diff --git a/numpy-1.6.2/COMPATIBILITY b/numpy-1.6.2/COMPATIBILITY deleted file mode 100644 index d2cd3cd275..0000000000 --- a/numpy-1.6.2/COMPATIBILITY +++ /dev/null @@ -1,59 +0,0 @@ - - -X.flat returns an indexable 1-D iterator (mostly similar to an array -but always 1-d) --- only has .copy and .__array__ attributes of an array!!! - -.typecode() --> .dtype.char - -.iscontiguous() --> .flags['CONTIGUOUS'] or .flags.contiguous - -.byteswapped() -> .byteswap() - -.itemsize() -> .itemsize - -.toscalar() -> .item() - -If you used typecode characters: - -'c' -> 'S1' or 'c' -'b' -> 'B' -'1' -> 'b' -'s' -> 'h' -'w' -> 'H' -'u' -> 'I' - - -C -level - -some API calls that used to take PyObject * now take PyArrayObject * -(this should only cause warnings during compile and not actual problems). - PyArray_Take - -These commands now return a buffer that must be freed once it is used -using PyMemData_FREE(ptr); - -a->descr->zero --> PyArray_Zero(a) -a->descr->one --> PyArray_One(a) - -Numeric/arrayobject.h --> numpy/oldnumeric.h - - -# These will actually work and are defines for PyArray_BYTE, -# but you really should change it in your code -PyArray_CHAR --> PyArray_CHAR - (or PyArray_STRING which is more flexible) -PyArray_SBYTE --> PyArray_BYTE - -Any uses of character codes will need adjusting.... -use PyArray_XXXLTR where XXX is the name of the type. - - -If you used function pointers directly (why did you do that?), -the arguments have changed. Everything that was an int is now an intp. -Also, arrayobjects should be passed in at the end. - -a->descr->cast[i](fromdata, fromstep, todata, tostep, n) -a->descr->cast[i](fromdata, todata, n, PyArrayObject *in, PyArrayObject *out) - anything but single-stepping is not supported by this function - use the PyArray_CastXXXX functions. - diff --git a/numpy-1.6.2/DEV_README.txt b/numpy-1.6.2/DEV_README.txt deleted file mode 100644 index 65f84994ca..0000000000 --- a/numpy-1.6.2/DEV_README.txt +++ /dev/null @@ -1,19 +0,0 @@ -Thank you for your willingness to help make NumPy the best array system -available. - -We have a few simple rules: - - * try hard to keep the Git repository in a buildable state and to not - indiscriminately muck with what others have contributed. - - * Simple changes (including bug fixes) and obvious improvements are - always welcome. Changes that fundamentally change behavior need - discussion on numpy-discussions@scipy.org before anything is - done. - - * Please add meaningful comments when you check changes in. These - comments form the basis of the change-log. - - * Add unit tests to exercise new code, and regression tests - whenever you fix a bug. - diff --git a/numpy-1.6.2/INSTALL.txt b/numpy-1.6.2/INSTALL.txt deleted file mode 100644 index d75d4d9d05..0000000000 --- a/numpy-1.6.2/INSTALL.txt +++ /dev/null @@ -1,139 +0,0 @@ -.. -*- rest -*- -.. vim:syntax=rest -.. NB! Keep this document a valid restructured document. - -Building and installing NumPy -+++++++++++++++++++++++++++++ - -:Authors: Numpy Developers -:Discussions to: numpy-discussion@scipy.org - -.. Contents:: - -PREREQUISITES -============= - -Building NumPy requires the following software installed: - -1) Python__ 2.4.x or newer - - On Debian and derivative (Ubuntu): python python-dev - - On Windows: the official python installer on Python__ is enough - - Make sure that the Python package distutils is installed before - continuing. For example, in Debian GNU/Linux, distutils is included - in the python-dev package. - - Python must also be compiled with the zlib module enabled. - -2) nose__ (optional) 0.10.3 or later - - This is required for testing numpy, but not for using it. - -Python__ http://www.python.org -nose__ http://somethingaboutorange.com/mrl/projects/nose/ - -Fortran ABI mismatch -==================== - -The two most popular open source fortran compilers are g77 and gfortran. -Unfortunately, they are not ABI compatible, which means that concretely you -should avoid mixing libraries built with one with another. In particular, if -your blas/lapack/atlas is built with g77, you *must* use g77 when building -numpy and scipy; on the contrary, if your atlas is built with gfortran, you -*must* build numpy/scipy with gfortran. - -Choosing the fortran compiler ------------------------------ - -To build with g77: - - python setup.py build --fcompiler=gnu - -To build with gfortran: - - python setup.py build --fcompiler=gnu95 - -How to check the ABI of blas/lapack/atlas ------------------------------------------ - -One relatively simple and reliable way to check for the compiler used to build -a library is to use ldd on the library. If libg2c.so is a dependency, this -means that g77 has been used. If libgfortran.so is a a dependency, gfortran has -been used. If both are dependencies, this means both have been used, which is -almost always a very bad idea. - -Building with ATLAS support -=========================== - -Ubuntu 8.10 (Intrepid) ----------------------- - -You can install the necessary packages for optimized ATLAS with this command: - - sudo apt-get install libatlas-base-dev - -If you have a recent CPU with SIMD suppport (SSE, SSE2, etc...), you should -also install the corresponding package for optimal performances. For example, -for SSE2: - - sudo apt-get install libatlas3gf-sse2 - -*NOTE*: if you build your own atlas, Intrepid changed its default fortran -compiler to gfortran. So you should rebuild everything from scratch, including -lapack, to use it on Intrepid. - -Ubuntu 8.04 and lower ---------------------- - -You can install the necessary packages for optimized ATLAS with this command: - - sudo apt-get install atlas3-base-dev - -If you have a recent CPU with SIMD suppport (SSE, SSE2, etc...), you should -also install the corresponding package for optimal performances. For example, -for SSE2: - - sudo apt-get install atlas3-sse2 - -Windows 64 bits notes -===================== - -Note: only AMD64 is supported (IA64 is not) - AMD64 is the version most people -want. - -Free compilers (mingw-w64) --------------------------- - -http://mingw-w64.sourceforge.net/ - -To use the free compilers (mingw-w64), you need to build your own toolchain, as -the mingw project only distribute cross-compilers (cross-compilation is not -supported by numpy). Since this toolchain is still being worked on, serious -compilers bugs can be expected. binutil 2.19 + gcc 4.3.3 + mingw-w64 runtime -gives you a working C compiler (but the C++ is broken). gcc 4.4 will hopefully -be able to run natively. - -This is the only tested way to get a numpy with a FULL blas/lapack (scipy does -not work because of C++). - -MS compilers ------------- - -If you are familiar with MS tools, that's obviously the easiest path, and the -compilers are hopefully more mature (although in my experience, they are quite -fragile, and often segfault on invalid C code). The main drawback is that no -fortran compiler + MS compiler combination has been tested - mingw-w64 gfortran -+ MS compiler does not work at all (it is unclear whether it ever will). - -For python 2.5, you need VS 2005 (MS compiler version 14) targetting -AMD64 bits, or the Platform SDK v6.0 or below (which gives command -line versions of 64 bits target compilers). The PSDK is free. - -For python 2.6, you need VS 2008. The freely available version does not -contains 64 bits compilers (you also need the PSDK, v6.1). - -It is *crucial* to use the right version: python 2.5 -> version 14, python 2.6, -version 15. You can check the compiler version with cl.exe /?. Note also that -for python 2.5, 64 bits and 32 bits versions use a different compiler version. diff --git a/numpy-1.6.2/LICENSE.txt b/numpy-1.6.2/LICENSE.txt deleted file mode 100644 index 4371a777b8..0000000000 --- a/numpy-1.6.2/LICENSE.txt +++ /dev/null @@ -1,30 +0,0 @@ -Copyright (c) 2005-2009, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/numpy-1.6.2/MANIFEST.in b/numpy-1.6.2/MANIFEST.in deleted file mode 100644 index e3c3316283..0000000000 --- a/numpy-1.6.2/MANIFEST.in +++ /dev/null @@ -1,26 +0,0 @@ -# -# Use .add_data_files and .add_data_dir methods in a appropriate -# setup.py files to include non-python files such as documentation, -# data, etc files to distribution. Avoid using MANIFEST.in for that. -# -include MANIFEST.in -include COMPATIBILITY -include *.txt -include setupscons.py -include setupsconsegg.py -include setupegg.py -include site.cfg.example -include tools/py3tool.py -# Adding scons build related files not found by distutils -recursive-include numpy/core/code_generators *.py *.txt -recursive-include numpy/core *.in *.h -recursive-include numpy SConstruct SConscript -# Add documentation: we don't use add_data_dir since we do not want to include -# this at installation, only for sdist-generated tarballs -include doc/Makefile doc/postprocess.py -recursive-include doc/release * -recursive-include doc/source * -recursive-include doc/sphinxext * -recursive-include doc/cython * -recursive-include doc/pyrex * -recursive-include doc/swig * diff --git a/numpy-1.6.2/PKG-INFO b/numpy-1.6.2/PKG-INFO deleted file mode 100644 index ed5ae3ae75..0000000000 --- a/numpy-1.6.2/PKG-INFO +++ /dev/null @@ -1,38 +0,0 @@ -Metadata-Version: 1.0 -Name: numpy -Version: 1.6.2 -Summary: NumPy: array processing for numbers, strings, records, and objects. -Home-page: http://numpy.scipy.org -Author: NumPy Developers -Author-email: numpy-discussion@scipy.org -License: BSD -Download-URL: http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=175103 -Description: NumPy is a general-purpose array-processing package designed to - efficiently manipulate large multi-dimensional arrays of arbitrary - records without sacrificing too much speed for small multi-dimensional - arrays. NumPy is built on the Numeric code base and adds features - introduced by numarray as well as an extended C-API and the ability to - create arrays of arbitrary type which also makes NumPy suitable for - interfacing with general-purpose data-base applications. - - There are also basic facilities for discrete fourier transform, - basic linear algebra and random number generation. - -Platform: Windows -Platform: Linux -Platform: Solaris -Platform: Mac OS-X -Platform: Unix -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Science/Research -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved -Classifier: Programming Language :: C -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Topic :: Software Development -Classifier: Topic :: Scientific/Engineering -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: POSIX -Classifier: Operating System :: Unix -Classifier: Operating System :: MacOS diff --git a/numpy-1.6.2/README.txt b/numpy-1.6.2/README.txt deleted file mode 100644 index 7ec97e1009..0000000000 --- a/numpy-1.6.2/README.txt +++ /dev/null @@ -1,30 +0,0 @@ -NumPy is the fundamental package needed for scientific computing with Python. -This package contains: - - * a powerful N-dimensional array object - * sophisticated (broadcasting) functions - * tools for integrating C/C++ and Fortran code - * useful linear algebra, Fourier transform, and random number capabilities. - -It derives from the old Numeric code base and can be used as a replacement for Numeric. It also adds the features introduced by numarray and can be used to replace numarray. - -More information can be found at the website: - -http://scipy.org/NumPy - -After installation, tests can be run with: - -python -c 'import numpy; numpy.test()' - -When installing a new version of numpy for the first time or before upgrading -to a newer version, it is recommended to turn on deprecation warnings when -running the tests: - -python -Wd -c 'import numpy; numpy.test()' - -The most current development version is always available from our -git repository: - -http://github.com/numpy/numpy - - diff --git a/numpy-1.6.2/THANKS.txt b/numpy-1.6.2/THANKS.txt deleted file mode 100644 index 396f4fba16..0000000000 --- a/numpy-1.6.2/THANKS.txt +++ /dev/null @@ -1,64 +0,0 @@ -Travis Oliphant for the NumPy core, the NumPy guide, various - bug-fixes and code contributions. -Paul Dubois, who implemented the original Masked Arrays. -Pearu Peterson for f2py, numpy.distutils and help with code - organization. -Robert Kern for mtrand, bug fixes, help with distutils, code - organization, strided tricks and much more. -Eric Jones for planning and code contributions. -Fernando Perez for code snippets, ideas, bugfixes, and testing. -Ed Schofield for matrix.py patches, bugfixes, testing, and docstrings. -Robert Cimrman for array set operations and numpy.distutils help. -John Hunter for code snippets from matplotlib. -Chris Hanley for help with records.py, testing, and bug fixes. -Travis Vaught for administration, community coordination and - marketing. -Joe Cooper, Jeff Strunk for administration. -Eric Firing for bugfixes. -Arnd Baecker for 64-bit testing. -David Cooke for many code improvements including the auto-generated C-API, - and optimizations. -Andrew Straw for help with the web-page, documentation, packaging and - testing. -Alexander Belopolsky (Sasha) for Masked array bug-fixes and tests, - rank-0 array improvements, scalar math help and other code additions. -Francesc Altet for unicode, work on nested record arrays, and bug-fixes. -Tim Hochberg for getting the build working on MSVC, optimization - improvements, and code review. -Charles (Chuck) Harris for the sorting code originally written for - Numarray and for improvements to polyfit, many bug fixes, delving - into the C code, release management, and documentation. -David Huard for histogram improvements including 2-D and d-D code and - other bug-fixes. -Stefan van der Walt for numerous bug-fixes, testing and documentation. -Albert Strasheim for documentation, bug-fixes, regression tests and - Valgrind expertise. -David Cournapeau for build support, doc-and-bug fixes, and code - contributions including fast_clipping. -Jarrod Millman for release management, community coordination, and code - clean up. -Chris Burns for work on memory mapped arrays and bug-fixes. -Pauli Virtanen for documentation, bug-fixes, lookfor and the - documentation editor. -A.M. Archibald for no-copy-reshape code, strided array tricks, - documentation and bug-fixes. -Pierre Gerard-Marchant for rewriting masked array functionality. -Roberto de Almeida for the buffered array iterator. -Alan McIntyre for updating the NumPy test framework to use nose, improve - the test coverage, and enhancing the test system documentation. -Joe Harrington for administering the 2008 Documentation Sprint. -Mark Wiebe for the new NumPy iterator, the float16 data type, improved - low-level data type operations, and other NumPy core improvements. - -NumPy is based on the Numeric (Jim Hugunin, Paul Dubois, Konrad -Hinsen, and David Ascher) and NumArray (Perry Greenfield, J Todd -Miller, Rick White and Paul Barrett) projects. We thank them for -paving the way ahead. - -Institutions ------------- - -Enthought for providing resources and finances for development of NumPy. -UC Berkeley for providing travel money and hosting numerous sprints. -The University of Central Florida for funding the 2008 Documentation Marathon. -The University of Stellenbosch for hosting the buildbot. diff --git a/numpy-1.6.2/benchmarks/benchmark.py b/numpy-1.6.2/benchmarks/benchmark.py deleted file mode 100644 index 526a69d58c..0000000000 --- a/numpy-1.6.2/benchmarks/benchmark.py +++ /dev/null @@ -1,42 +0,0 @@ -from timeit import Timer - -class Benchmark(dict): - """Benchmark a feature in different modules.""" - - def __init__(self,modules,title='',runs=3,reps=1000): - self.module_test = dict((m,'') for m in modules) - self.runs = runs - self.reps = reps - self.title = title - - def __setitem__(self,module,(test_str,setup_str)): - """Set the test code for modules.""" - if module == 'all': - modules = self.module_test.keys() - else: - modules = [module] - - for m in modules: - setup_str = 'import %s; import %s as np; ' % (m,m) \ - + setup_str - self.module_test[m] = Timer(test_str, setup_str) - - def run(self): - """Run the benchmark on the different modules.""" - module_column_len = max(len(mod) for mod in self.module_test) - - if self.title: - print self.title - print 'Doing %d runs, each with %d reps.' % (self.runs,self.reps) - print '-'*79 - - for mod in sorted(self.module_test): - modname = mod.ljust(module_column_len) - try: - print "%s: %s" % (modname, \ - self.module_test[mod].repeat(self.runs,self.reps)) - except Exception, e: - print "%s: Failed to benchmark (%s)." % (modname,e) - - print '-'*79 - print diff --git a/numpy-1.6.2/benchmarks/casting.py b/numpy-1.6.2/benchmarks/casting.py deleted file mode 100644 index 5624fddfa1..0000000000 --- a/numpy-1.6.2/benchmarks/casting.py +++ /dev/null @@ -1,17 +0,0 @@ -from benchmark import Benchmark - -modules = ['numpy','Numeric','numarray'] - -b = Benchmark(modules, - title='Casting a (10,10) integer array to float.', - runs=3,reps=10000) - -N = [10,10] -b['numpy'] = ('b = a.astype(int)', - 'a=numpy.zeros(shape=%s,dtype=float)' % N) -b['Numeric'] = ('b = a.astype("l")', - 'a=Numeric.zeros(shape=%s,typecode="d")' % N) -b['numarray'] = ("b = a.astype('l')", - "a=numarray.zeros(shape=%s,typecode='d')" % N) - -b.run() diff --git a/numpy-1.6.2/benchmarks/creating.py b/numpy-1.6.2/benchmarks/creating.py deleted file mode 100644 index 6f8dc0217c..0000000000 --- a/numpy-1.6.2/benchmarks/creating.py +++ /dev/null @@ -1,14 +0,0 @@ -from benchmark import Benchmark - -modules = ['numpy','Numeric','numarray'] - -N = [10,10] -b = Benchmark(modules, - title='Creating %s zeros.' % N, - runs=3,reps=10000) - -b['numpy'] = ('a=np.zeros(shape,type)', 'shape=%s;type=float' % N) -b['Numeric'] = ('a=np.zeros(shape,type)', 'shape=%s;type=np.Float' % N) -b['numarray'] = ('a=np.zeros(shape,type)', "shape=%s;type=np.Float" % N) - -b.run() diff --git a/numpy-1.6.2/benchmarks/simpleindex.py b/numpy-1.6.2/benchmarks/simpleindex.py deleted file mode 100644 index e4e541d96a..0000000000 --- a/numpy-1.6.2/benchmarks/simpleindex.py +++ /dev/null @@ -1,48 +0,0 @@ -import timeit -# This is to show that NumPy is a poorer choice than nested Python lists -# if you are writing nested for loops. -# This is slower than Numeric was but Numeric was slower than Python lists were -# in the first place. - -N = 30 - -code2 = r""" -for k in xrange(%d): - for l in xrange(%d): - res = a[k,l].item() + a[l,k].item() -""" % (N,N) - -code3 = r""" -for k in xrange(%d): - for l in xrange(%d): - res = a[k][l] + a[l][k] -""" % (N,N) - -code = r""" -for k in xrange(%d): - for l in xrange(%d): - res = a[k,l] + a[l,k] -""" % (N,N) - -setup3 = r""" -import random -a = [[None for k in xrange(%d)] for l in xrange(%d)] -for k in xrange(%d): - for l in xrange(%d): - a[k][l] = random.random() -""" % (N,N,N,N) - -numpy_timer1 = timeit.Timer(code, 'import numpy as np; a = np.random.rand(%d,%d)' % (N,N)) -numeric_timer = timeit.Timer(code, 'import MLab as np; a=np.rand(%d,%d)' % (N,N)) -numarray_timer = timeit.Timer(code, 'import numarray.mlab as np; a=np.rand(%d,%d)' % (N,N)) -numpy_timer2 = timeit.Timer(code2, 'import numpy as np; a = np.random.rand(%d,%d)' % (N,N)) -python_timer = timeit.Timer(code3, setup3) -numpy_timer3 = timeit.Timer("res = a + a.transpose()","import numpy as np; a=np.random.rand(%d,%d)" % (N,N)) - -print "shape = ", (N,N) -print "NumPy 1: ", numpy_timer1.repeat(3,100) -print "NumPy 2: ", numpy_timer2.repeat(3,100) -print "Numeric: ", numeric_timer.repeat(3,100) -print "Numarray: ", numarray_timer.repeat(3,100) -print "Python: ", python_timer.repeat(3,100) -print "Optimized: ", numpy_timer3.repeat(3,100) diff --git a/numpy-1.6.2/benchmarks/sorting.py b/numpy-1.6.2/benchmarks/sorting.py deleted file mode 100644 index 5a23506b53..0000000000 --- a/numpy-1.6.2/benchmarks/sorting.py +++ /dev/null @@ -1,25 +0,0 @@ -from benchmark import Benchmark - -modules = ['numpy','Numeric','numarray'] -b = Benchmark(modules,runs=3,reps=100) - -N = 10000 -b.title = 'Sorting %d elements' % N -b['numarray'] = ('a=np.array(None,shape=%d,typecode="i");a.sort()'%N,'') -b['numpy'] = ('a=np.empty(shape=%d, dtype="i");a.sort()'%N,'') -b['Numeric'] = ('a=np.empty(shape=%d, typecode="i");np.sort(a)'%N,'') -b.run() - -N1,N2 = 100,100 -b.title = 'Sorting (%d,%d) elements, last axis' % (N1,N2) -b['numarray'] = ('a=np.array(None,shape=(%d,%d),typecode="i");a.sort()'%(N1,N2),'') -b['numpy'] = ('a=np.empty(shape=(%d,%d), dtype="i");a.sort()'%(N1,N2),'') -b['Numeric'] = ('a=np.empty(shape=(%d,%d),typecode="i");np.sort(a)'%(N1,N2),'') -b.run() - -N1,N2 = 100,100 -b.title = 'Sorting (%d,%d) elements, first axis' % (N1,N2) -b['numarray'] = ('a=np.array(None,shape=(%d,%d), typecode="i");a.sort(0)'%(N1,N2),'') -b['numpy'] = ('a=np.empty(shape=(%d,%d),dtype="i");np.sort(a,0)'%(N1,N2),'') -b['Numeric'] = ('a=np.empty(shape=(%d,%d),typecode="i");np.sort(a,0)'%(N1,N2),'') -b.run() diff --git a/numpy-1.6.2/doc/Makefile b/numpy-1.6.2/doc/Makefile deleted file mode 100644 index 03b3270791..0000000000 --- a/numpy-1.6.2/doc/Makefile +++ /dev/null @@ -1,168 +0,0 @@ -# Makefile for Sphinx documentation -# - -PYVER = -PYTHON = python$(PYVER) - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = LANG=C sphinx-build -PAPER = - -FILES= - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html web pickle htmlhelp latex changes linkcheck \ - dist dist-build gitwash-update - -#------------------------------------------------------------------------------ - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " pickle to make pickle files (usable by e.g. sphinx-web)" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview over all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " dist PYVER=... to make a distribution-ready tree" - @echo " upload USER=... to upload results to docs.scipy.org" - @echo " gitwash-update GITWASH=path/to/gitwash update gitwash developer docs" - -clean: - -rm -rf build/* source/reference/generated - -gitwash-update: - rm -rf source/dev/gitwash - install -d source/dev/gitwash - python $(GITWASH)/gitwash_dumper.py source/dev NumPy \ - --repo-name=numpy \ - --github-user=numpy - cat source/dev/gitwash_links.txt >> source/dev/gitwash/git_links.inc - -#------------------------------------------------------------------------------ -# Automated generation of all documents -#------------------------------------------------------------------------------ - -# Build the current numpy version, and extract docs from it. -# We have to be careful of some issues: -# -# - Everything must be done using the same Python version -# - We must use eggs (otherwise they might override PYTHONPATH on import). -# - Different versions of easy_install install to different directories (!) -# - -INSTALL_DIR = $(CURDIR)/build/inst-dist/ -INSTALL_PPH = $(INSTALL_DIR)/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/lib/python$(PYVER)/dist-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/dist-packages - -DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)" SPHINXOPTS="$(SPHINXOPTS)" - -UPLOAD_TARGET = $(USER)@docs.scipy.org:/home/docserver/www-root/doc/numpy/ - -upload: - @test -e build/dist || { echo "make dist is required first"; exit 1; } - @test output-is-fine -nt build/dist || { \ - echo "Review the output in build/dist, and do 'touch output-is-fine' before uploading."; exit 1; } - rsync -r -z --delete-after -p \ - $(if $(shell test -f build/dist/numpy-ref.pdf && echo "y"),, \ - --exclude '**-ref.pdf' --exclude '**-user.pdf') \ - $(if $(shell test -f build/dist/numpy-chm.zip && echo "y"),, \ - --exclude '**-chm.zip') \ - build/dist/ $(UPLOAD_TARGET) - -dist: - make $(DIST_VARS) real-dist - -real-dist: dist-build html - test -d build/latex || make latex - make -C build/latex all-pdf - -test -d build/htmlhelp || make htmlhelp-build - -rm -rf build/dist - cp -r build/html build/dist - perl -pi -e 's#^\s*(
  • NumPy.*?Manual.*?»
  • )#
  • Numpy and Scipy Documentation »
  • #;' build/dist/*.html build/dist/*/*.html build/dist/*/*/*.html - cd build/html && zip -9r ../dist/numpy-html.zip . - cp build/latex/numpy-*.pdf build/dist - -zip build/dist/numpy-chm.zip build/htmlhelp/numpy.chm - cd build/dist && tar czf ../dist.tar.gz * - chmod ug=rwX,o=rX -R build/dist - find build/dist -type d -print0 | xargs -0r chmod g+s - -dist-build: - rm -f ../dist/*.egg - cd .. && $(PYTHON) setupegg.py bdist_egg - install -d $(subst :, ,$(INSTALL_PPH)) - $(PYTHON) `which easy_install` --prefix=$(INSTALL_DIR) ../dist/*.egg - - -#------------------------------------------------------------------------------ -# Basic Sphinx generation rules for different formats -#------------------------------------------------------------------------------ - -generate: build/generate-stamp -build/generate-stamp: $(wildcard source/reference/*.rst) - mkdir -p build - touch build/generate-stamp - -html: generate - mkdir -p build/html build/doctrees - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html $(FILES) - $(PYTHON) postprocess.py html build/html/*.html - @echo - @echo "Build finished. The HTML pages are in build/html." - -pickle: generate - mkdir -p build/pickle build/doctrees - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle $(FILES) - @echo - @echo "Build finished; now you can process the pickle files or run" - @echo " sphinx-web build/pickle" - @echo "to start the sphinx-web server." - -web: pickle - -htmlhelp: generate - mkdir -p build/htmlhelp build/doctrees - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp $(FILES) - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in build/htmlhelp." - -htmlhelp-build: htmlhelp build/htmlhelp/numpy.chm -%.chm: %.hhp - -hhc.exe $^ - -qthelp: generate - mkdir -p build/qthelp build/doctrees - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp $(FILES) - -latex: generate - mkdir -p build/latex build/doctrees - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex $(FILES) - $(PYTHON) postprocess.py tex build/latex/*.tex - perl -pi -e 's/\t(latex.*|pdflatex) (.*)/\t-$$1 -interaction batchmode $$2/' build/latex/Makefile - @echo - @echo "Build finished; the LaTeX files are in build/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -coverage: build - mkdir -p build/coverage build/doctrees - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) build/coverage $(FILES) - @echo "Coverage finished; see c.txt and python.txt in build/coverage" - -changes: generate - mkdir -p build/changes build/doctrees - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes $(FILES) - @echo - @echo "The overview file is in build/changes." - -linkcheck: generate - mkdir -p build/linkcheck build/doctrees - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck $(FILES) - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in build/linkcheck/output.txt." diff --git a/numpy-1.6.2/doc/cython/MANIFEST b/numpy-1.6.2/doc/cython/MANIFEST deleted file mode 100644 index feb3ec22aa..0000000000 --- a/numpy-1.6.2/doc/cython/MANIFEST +++ /dev/null @@ -1,2 +0,0 @@ -numpyx.pyx -setup.py diff --git a/numpy-1.6.2/doc/cython/Makefile b/numpy-1.6.2/doc/cython/Makefile deleted file mode 100644 index 7c9c72981e..0000000000 --- a/numpy-1.6.2/doc/cython/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Simple makefile to quickly access handy build commands for Cython extension -# code generation. Note that the actual code to produce the extension lives in -# the setup.py file, this Makefile is just meant as a command -# convenience/reminder while doing development. - -help: - @echo "Numpy/Cython tasks. Available tasks:" - @echo "ext -> build the Cython extension module." - @echo "html -> create annotated HTML from the .pyx sources" - @echo "test -> run a simple test demo." - @echo "all -> Call ext, html and finally test." - -all: ext html test - -ext: numpyx.so - -test: ext - python run_test.py - -html: numpyx.pyx.html - -numpyx.so: numpyx.pyx numpyx.c - python setup.py build_ext --inplace - -numpyx.pyx.html: numpyx.pyx - cython -a numpyx.pyx - @echo "Annotated HTML of the C code generated in numpyx.html" - -# Phony targets for cleanup and similar uses - -.PHONY: clean -clean: - rm -rf *~ *.so *.c *.o *.html build - -# Suffix rules -%.c : %.pyx - cython $< diff --git a/numpy-1.6.2/doc/cython/README.txt b/numpy-1.6.2/doc/cython/README.txt deleted file mode 100644 index ff0abb0fe3..0000000000 --- a/numpy-1.6.2/doc/cython/README.txt +++ /dev/null @@ -1,20 +0,0 @@ -================== - NumPy and Cython -================== - -This directory contains a small example of how to use NumPy and Cython -together. While much work is planned for the Summer of 2008 as part of the -Google Summer of Code project to improve integration between the two, even -today Cython can be used effectively to write optimized code that accesses -NumPy arrays. - -The example provided is just a stub showing how to build an extension and -access the array objects; improvements to this to show more sophisticated tasks -are welcome. - -To run it locally, simply type:: - - make help - -which shows you the currently available targets (these are just handy -shorthands for common commands). \ No newline at end of file diff --git a/numpy-1.6.2/doc/cython/c_numpy.pxd b/numpy-1.6.2/doc/cython/c_numpy.pxd deleted file mode 100644 index e178b8e33d..0000000000 --- a/numpy-1.6.2/doc/cython/c_numpy.pxd +++ /dev/null @@ -1,137 +0,0 @@ -# :Author: Travis Oliphant - -# API declaration section. This basically exposes the NumPy C API to -# Pyrex/Cython programs. - -cdef extern from "numpy/arrayobject.h": - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VOID - NPY_NTYPES - NPY_NOTYPE - - cdef enum requirements: - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_UPDATEIFCOPY - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - cdef enum defines: - NPY_MAXDIMS - - ctypedef struct npy_cdouble: - double real - double imag - - ctypedef struct npy_cfloat: - double real - double imag - - ctypedef int npy_intp - - ctypedef extern class numpy.dtype [object PyArray_Descr]: - cdef int type_num, elsize, alignment - cdef char type, kind, byteorder - cdef int flags - cdef object fields, typeobj - - ctypedef extern class numpy.ndarray [object PyArrayObject]: - cdef char *data - cdef int nd - cdef npy_intp *dimensions - cdef npy_intp *strides - cdef object base - cdef dtype descr - cdef int flags - - ctypedef extern class numpy.flatiter [object PyArrayIterObject]: - cdef int nd_m1 - cdef npy_intp index, size - cdef ndarray ao - cdef char *dataptr - - ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]: - cdef int numiter - cdef npy_intp size, index - cdef int nd - cdef npy_intp *dimensions - cdef void **iters - - object PyArray_ZEROS(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - object PyArray_EMPTY(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - dtype PyArray_DescrFromTypeNum(NPY_TYPES type_num) - object PyArray_SimpleNew(int ndims, npy_intp* dims, NPY_TYPES type_num) - int PyArray_Check(object obj) - object PyArray_ContiguousFromAny(object obj, NPY_TYPES type, - int mindim, int maxdim) - object PyArray_ContiguousFromObject(object obj, NPY_TYPES type, - int mindim, int maxdim) - npy_intp PyArray_SIZE(ndarray arr) - npy_intp PyArray_NBYTES(ndarray arr) - void *PyArray_DATA(ndarray arr) - object PyArray_FromAny(object obj, dtype newtype, int mindim, int maxdim, - int requirements, object context) - object PyArray_FROMANY(object obj, NPY_TYPES type_num, int min, - int max, int requirements) - object PyArray_NewFromDescr(object subtype, dtype newtype, int nd, - npy_intp* dims, npy_intp* strides, void* data, - int flags, object parent) - - object PyArray_FROM_OTF(object obj, NPY_TYPES type, int flags) - object PyArray_EnsureArray(object) - - object PyArray_MultiIterNew(int n, ...) - - char *PyArray_MultiIter_DATA(broadcast multi, int i) - void PyArray_MultiIter_NEXTi(broadcast multi, int i) - void PyArray_MultiIter_NEXT(broadcast multi) - - object PyArray_IterNew(object arr) - void PyArray_ITER_NEXT(flatiter it) - - void import_array() diff --git a/numpy-1.6.2/doc/cython/c_python.pxd b/numpy-1.6.2/doc/cython/c_python.pxd deleted file mode 100644 index 46d2fd1a73..0000000000 --- a/numpy-1.6.2/doc/cython/c_python.pxd +++ /dev/null @@ -1,62 +0,0 @@ -# :Author: Robert Kern -# :Copyright: 2004, Enthought, Inc. -# :License: BSD Style - - -cdef extern from "Python.h": - # Not part of the Python API, but we might as well define it here. - # Note that the exact type doesn't actually matter for Pyrex. - ctypedef int size_t - - # Some type declarations we need - ctypedef int Py_intptr_t - - - # String API - char* PyString_AsString(object string) - char* PyString_AS_STRING(object string) - object PyString_FromString(char* c_string) - object PyString_FromStringAndSize(char* c_string, int length) - object PyString_InternFromString(char *v) - - # Float API - object PyFloat_FromDouble(double v) - double PyFloat_AsDouble(object ob) - long PyInt_AsLong(object ob) - - - # Memory API - void* PyMem_Malloc(size_t n) - void* PyMem_Realloc(void* buf, size_t n) - void PyMem_Free(void* buf) - - void Py_DECREF(object obj) - void Py_XDECREF(object obj) - void Py_INCREF(object obj) - void Py_XINCREF(object obj) - - # CObject API - ctypedef void (*destructor1)(void* cobj) - ctypedef void (*destructor2)(void* cobj, void* desc) - int PyCObject_Check(object p) - object PyCObject_FromVoidPtr(void* cobj, destructor1 destr) - object PyCObject_FromVoidPtrAndDesc(void* cobj, void* desc, - destructor2 destr) - void* PyCObject_AsVoidPtr(object self) - void* PyCObject_GetDesc(object self) - int PyCObject_SetVoidPtr(object self, void* cobj) - - # TypeCheck API - int PyFloat_Check(object obj) - int PyInt_Check(object obj) - - # Error API - int PyErr_Occurred() - void PyErr_Clear() - int PyErr_CheckSignals() - -cdef extern from "string.h": - void *memcpy(void *s1, void *s2, int n) - -cdef extern from "math.h": - double fabs(double x) diff --git a/numpy-1.6.2/doc/cython/numpyx.pyx b/numpy-1.6.2/doc/cython/numpyx.pyx deleted file mode 100644 index cbc786ef0c..0000000000 --- a/numpy-1.6.2/doc/cython/numpyx.pyx +++ /dev/null @@ -1,127 +0,0 @@ -# -*- Mode: Python -*- Not really, but close enough -"""Cython access to Numpy arrays - simple example. -""" - -############################################################################# -# Load C APIs declared in .pxd files via cimport -# -# A 'cimport' is similar to a Python 'import' statement, but it provides access -# to the C part of a library instead of its Python-visible API. See the -# Pyrex/Cython documentation for details. - -cimport c_python as py - -cimport c_numpy as cnp - -# NOTE: numpy MUST be initialized before any other code is executed. -cnp.import_array() - -############################################################################# -# Load Python modules via normal import statements - -import numpy as np - -############################################################################# -# Regular code section begins - -# A 'def' function is visible in the Python-imported module -def print_array_info(cnp.ndarray arr): - """Simple information printer about an array. - - Code meant to illustrate Cython/NumPy integration only.""" - - cdef int i - - print '-='*10 - # Note: the double cast here (void * first, then py.Py_intptr_t) is needed - # in Cython but not in Pyrex, since the casting behavior of cython is - # slightly different (and generally safer) than that of Pyrex. In this - # case, we just want the memory address of the actual Array object, so we - # cast it to void before doing the py.Py_intptr_t cast: - print 'Printing array info for ndarray at 0x%0lx'% \ - (arr,) - print 'number of dimensions:',arr.nd - print 'address of strides: 0x%0lx'%(arr.strides,) - print 'strides:' - for i from 0<=iarr.strides[i] - print 'memory dump:' - print_elements( arr.data, arr.strides, arr.dimensions, - arr.nd, sizeof(double), arr.dtype ) - print '-='*10 - print - -# A 'cdef' function is NOT visible to the python side, but it is accessible to -# the rest of this Cython module -cdef print_elements(char *data, - py.Py_intptr_t* strides, - py.Py_intptr_t* dimensions, - int nd, - int elsize, - object dtype): - cdef py.Py_intptr_t i,j - cdef void* elptr - - if dtype not in [np.dtype(np.object_), - np.dtype(np.float64)]: - print ' print_elements() not (yet) implemented for dtype %s'%dtype.name - return - - if nd ==0: - if dtype==np.dtype(np.object_): - elptr = (data)[0] #[0] dereferences pointer in Pyrex - print ' ',elptr - elif dtype==np.dtype(np.float64): - print ' ',(data)[0] - elif nd == 1: - for i from 0<=idata)[0] - print ' ',elptr - elif dtype==np.dtype(np.float64): - print ' ',(data)[0] - data = data + strides[0] - else: - for i from 0<=ind); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":16 */ - __pyx_1 = PyInt_FromLong(((int )__pyx_v_arr->strides)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_2, 0, __pyx_1); - __pyx_1 = 0; - __pyx_1 = PyNumber_Remainder(__pyx_k5p, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":17 */ - if (__Pyx_PrintItem(__pyx_k6p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;} - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":18 */ - __pyx_3 = __pyx_v_arr->nd; - for (__pyx_v_i = 0; __pyx_v_i < __pyx_3; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":20 */ - __pyx_2 = PyInt_FromLong(__pyx_v_i); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - __pyx_1 = PyNumber_Remainder(__pyx_k7p, __pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = PyInt_FromLong(((int )(__pyx_v_arr->strides[__pyx_v_i]))); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;} - } - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":21 */ - if (__Pyx_PrintItem(__pyx_k8p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;} - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":22 */ - __pyx_1 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; goto __pyx_L1;} - __pyx_2 = __pyx_f_6numpyx_print_elements(__pyx_v_arr->data,__pyx_v_arr->strides,__pyx_v_arr->dimensions,__pyx_v_arr->nd,(sizeof(double )),__pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":24 */ - __pyx_1 = PyInt_FromLong(10); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - __pyx_2 = PyNumber_Multiply(__pyx_k9p, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":25 */ - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; goto __pyx_L1;} - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - __Pyx_AddTraceback("numpyx.print_array_info"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static PyObject *__pyx_n_object_; -static PyObject *__pyx_n_float64; -static PyObject *__pyx_n_name; - -static PyObject *__pyx_k10p; -static PyObject *__pyx_k11p; -static PyObject *__pyx_k12p; -static PyObject *__pyx_k13p; -static PyObject *__pyx_k14p; - -static char (__pyx_k10[]) = " print_elements() not (yet) implemented for dtype %s"; -static char (__pyx_k11[]) = " "; -static char (__pyx_k12[]) = " "; -static char (__pyx_k13[]) = " "; -static char (__pyx_k14[]) = " "; - -static PyObject *__pyx_f_6numpyx_print_elements(char (*__pyx_v_data),Py_intptr_t (*__pyx_v_strides),Py_intptr_t (*__pyx_v_dimensions),int __pyx_v_nd,int __pyx_v_elsize,PyObject *__pyx_v_dtype) { - Py_intptr_t __pyx_v_i; - void (*__pyx_v_elptr); - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - PyObject *__pyx_3 = 0; - PyObject *__pyx_4 = 0; - int __pyx_5; - Py_intptr_t __pyx_6; - Py_INCREF(__pyx_v_dtype); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":36 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_3 = PyObject_CallObject(__pyx_2, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_2, 0, __pyx_4); - __pyx_4 = 0; - __pyx_4 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyList_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_1, 0, __pyx_3); - PyList_SET_ITEM(__pyx_1, 1, __pyx_4); - __pyx_3 = 0; - __pyx_4 = 0; - __pyx_5 = PySequence_Contains(__pyx_1, __pyx_v_dtype); if (__pyx_5 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; goto __pyx_L1;} - __pyx_5 = !__pyx_5; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":38 */ - __pyx_2 = PyObject_GetAttr(__pyx_v_dtype, __pyx_n_name); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - __pyx_3 = PyNumber_Remainder(__pyx_k10p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":39 */ - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - goto __pyx_L2; - } - __pyx_L2:; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":41 */ - __pyx_5 = (__pyx_v_nd == 0); - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":42 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_4, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":43 */ - __pyx_v_elptr = (((void (*(*)))__pyx_v_data)[0]); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":44 */ - if (__Pyx_PrintItem(__pyx_k11p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - __pyx_3 = (PyObject *)__pyx_v_elptr; - Py_INCREF(__pyx_3); - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; goto __pyx_L1;} - goto __pyx_L4; - } - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":46 */ - if (__Pyx_PrintItem(__pyx_k12p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - __pyx_3 = PyFloat_FromDouble((((double (*))__pyx_v_data)[0])); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; goto __pyx_L1;} - goto __pyx_L4; - } - __pyx_L4:; - goto __pyx_L3; - } - __pyx_5 = (__pyx_v_nd == 1); - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":48 */ - __pyx_6 = (__pyx_v_dimensions[0]); - for (__pyx_v_i = 0; __pyx_v_i < __pyx_6; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":49 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_4, __pyx_n_dtype); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":50 */ - __pyx_v_elptr = (((void (*(*)))__pyx_v_data)[0]); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":51 */ - if (__Pyx_PrintItem(__pyx_k13p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - __pyx_3 = (PyObject *)__pyx_v_elptr; - Py_INCREF(__pyx_3); - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; goto __pyx_L1;} - goto __pyx_L7; - } - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_4 = PyObject_GetAttr(__pyx_1, __pyx_n_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_2, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (PyObject_Cmp(__pyx_v_dtype, __pyx_2, &__pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; goto __pyx_L1;} - __pyx_5 = __pyx_5 == 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__pyx_5) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":53 */ - if (__Pyx_PrintItem(__pyx_k14p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - __pyx_3 = PyFloat_FromDouble((((double (*))__pyx_v_data)[0])); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; goto __pyx_L1;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":54 */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - goto __pyx_L3; - } - /*else*/ { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":56 */ - __pyx_6 = (__pyx_v_dimensions[0]); - for (__pyx_v_i = 0; __pyx_v_i < __pyx_6; ++__pyx_v_i) { - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":57 */ - __pyx_4 = __pyx_f_6numpyx_print_elements(__pyx_v_data,(__pyx_v_strides + 1),(__pyx_v_dimensions + 1),(__pyx_v_nd - 1),__pyx_v_elsize,__pyx_v_dtype); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":58 */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - } - __pyx_L3:; - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - Py_XDECREF(__pyx_3); - Py_XDECREF(__pyx_4); - __Pyx_AddTraceback("numpyx.print_elements"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_dtype); - return __pyx_r; -} - -static PyObject *__pyx_n_any; - -static PyObject *__pyx_k15p; -static PyObject *__pyx_k16p; -static PyObject *__pyx_k17p; - -static char (__pyx_k15[]) = "arr.any() :"; -static char (__pyx_k16[]) = "arr.nd :"; -static char (__pyx_k17[]) = "arr.flags :"; - -static PyObject *__pyx_f_6numpyx_test_methods(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6numpyx_test_methods[] = "Test a few attribute accesses for an array.\n \n This illustrates how the pyrex-visible object is in practice a strange\n hybrid of the C PyArrayObject struct and the python object. Some\n properties (like .nd) are visible here but not in python, while others\n like flags behave very differently: in python flags appears as a separate,\n object while here we see the raw int holding the bit pattern.\n\n This makes sense when we think of how pyrex resolves arr.foo: if foo is\n listed as a field in the c_numpy.ndarray struct description, it will be\n directly accessed as a C variable without going through Python at all.\n This is why for arr.flags, we see the actual int which holds all the flags\n as bit fields. However, for any other attribute not listed in the struct,\n it simply forwards the attribute lookup to python at runtime, just like\n python would (which means that AttributeError can be raised for\n non-existent attributes, for example)."; -static PyObject *__pyx_f_6numpyx_test_methods(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_arr = 0; - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - static char *__pyx_argnames[] = {"arr",0}; - if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_arr)) return 0; - Py_INCREF(__pyx_v_arr); - if (!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_arr), __pyx_ptype_7c_numpy_ndarray, 1, "arr")) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":78 */ - if (__Pyx_PrintItem(__pyx_k15p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_any); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":79 */ - if (__Pyx_PrintItem(__pyx_k16p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - __pyx_1 = PyInt_FromLong(__pyx_v_arr->nd); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":80 */ - if (__Pyx_PrintItem(__pyx_k17p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(__pyx_v_arr->flags); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - if (__Pyx_PrintItem(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (__Pyx_PrintNewline() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;} - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - __Pyx_AddTraceback("numpyx.test_methods"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static PyObject *__pyx_n_array; -static PyObject *__pyx_n_arange; -static PyObject *__pyx_n_shape; -static PyObject *__pyx_n_one; -static PyObject *__pyx_n_two; - - -static PyObject *__pyx_f_6numpyx_test(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6numpyx_test[] = "this function is pure Python"; -static PyObject *__pyx_f_6numpyx_test(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_arr1; - PyObject *__pyx_v_arr2; - PyObject *__pyx_v_arr3; - PyObject *__pyx_v_four; - PyObject *__pyx_v_arr4; - PyObject *__pyx_v_arr5; - PyObject *__pyx_v_arr; - PyObject *__pyx_r; - PyObject *__pyx_1 = 0; - PyObject *__pyx_2 = 0; - PyObject *__pyx_3 = 0; - PyObject *__pyx_4 = 0; - PyObject *__pyx_5 = 0; - static char *__pyx_argnames[] = {0}; - if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0; - __pyx_v_arr1 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr2 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr3 = Py_None; Py_INCREF(Py_None); - __pyx_v_four = Py_None; Py_INCREF(Py_None); - __pyx_v_arr4 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr5 = Py_None; Py_INCREF(Py_None); - __pyx_v_arr = Py_None; Py_INCREF(Py_None); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":84 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_array); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_1 = PyFloat_FromDouble((-1e-30)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_3, 0, __pyx_1); - __pyx_1 = 0; - __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - __pyx_5 = PyObject_GetAttr(__pyx_4, __pyx_n_float64); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyDict_SetItem(__pyx_1, __pyx_n_dtype, __pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_4 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_3, __pyx_1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_v_arr1); - __pyx_v_arr1 = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":85 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_5, __pyx_n_array); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_3 = PyFloat_FromDouble(1.0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_1 = PyFloat_FromDouble(2.0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_4 = PyFloat_FromDouble(3.0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_5 = PyList_New(3); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_5, 0, __pyx_3); - PyList_SET_ITEM(__pyx_5, 1, __pyx_1); - PyList_SET_ITEM(__pyx_5, 2, __pyx_4); - __pyx_3 = 0; - __pyx_1 = 0; - __pyx_4 = 0; - __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_3, 0, __pyx_5); - __pyx_5 = 0; - __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - __pyx_5 = PyObject_GetAttr(__pyx_4, __pyx_n_float64); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - if (PyDict_SetItem(__pyx_1, __pyx_n_dtype, __pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_4 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_3, __pyx_1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_v_arr2); - __pyx_v_arr2 = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":87 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_2 = PyObject_GetAttr(__pyx_5, __pyx_n_arange); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_3 = PyInt_FromLong(9); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - __pyx_3 = 0; - __pyx_4 = PyDict_New(); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_5, __pyx_n_float64); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - if (PyDict_SetItem(__pyx_4, __pyx_n_dtype, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - __pyx_5 = PyEval_CallObjectWithKeywords(__pyx_2, __pyx_1, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - Py_DECREF(__pyx_1); __pyx_1 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_v_arr3); - __pyx_v_arr3 = __pyx_5; - __pyx_5 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":88 */ - __pyx_3 = PyInt_FromLong(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - __pyx_1 = PyTuple_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3); - PyTuple_SET_ITEM(__pyx_1, 1, __pyx_2); - __pyx_3 = 0; - __pyx_2 = 0; - if (PyObject_SetAttr(__pyx_v_arr3, __pyx_n_shape, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":90 */ - __pyx_4 = PyInt_FromLong(4); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; goto __pyx_L1;} - Py_DECREF(__pyx_v_four); - __pyx_v_four = __pyx_4; - __pyx_4 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":91 */ - __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_5, __pyx_n_array); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_5); __pyx_5 = 0; - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_1 = PyList_New(4); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_INCREF(__pyx_n_one); - PyList_SET_ITEM(__pyx_1, 0, __pyx_n_one); - Py_INCREF(__pyx_n_two); - PyList_SET_ITEM(__pyx_1, 1, __pyx_n_two); - PyList_SET_ITEM(__pyx_1, 2, __pyx_2); - Py_INCREF(__pyx_v_four); - PyList_SET_ITEM(__pyx_1, 3, __pyx_v_four); - __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_1); - __pyx_1 = 0; - __pyx_5 = PyDict_New(); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_object_); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - if (PyDict_SetItem(__pyx_5, __pyx_n_dtype, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_2 = PyEval_CallObjectWithKeywords(__pyx_3, __pyx_4, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_5); __pyx_5 = 0; - Py_DECREF(__pyx_v_arr4); - __pyx_v_arr4 = __pyx_2; - __pyx_2 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":93 */ - __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_numpy); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n_array); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - __pyx_4 = PyInt_FromLong(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_5 = PyInt_FromLong(2); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_2 = PyInt_FromLong(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - __pyx_1 = PyList_New(3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - PyList_SET_ITEM(__pyx_1, 0, __pyx_4); - PyList_SET_ITEM(__pyx_1, 1, __pyx_5); - PyList_SET_ITEM(__pyx_1, 2, __pyx_2); - __pyx_4 = 0; - __pyx_5 = 0; - __pyx_2 = 0; - __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - PyTuple_SET_ITEM(__pyx_4, 0, __pyx_1); - __pyx_1 = 0; - __pyx_5 = PyObject_CallObject(__pyx_3, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;} - Py_DECREF(__pyx_3); __pyx_3 = 0; - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_v_arr5); - __pyx_v_arr5 = __pyx_5; - __pyx_5 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":95 */ - __pyx_2 = PyList_New(5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - Py_INCREF(__pyx_v_arr1); - PyList_SET_ITEM(__pyx_2, 0, __pyx_v_arr1); - Py_INCREF(__pyx_v_arr2); - PyList_SET_ITEM(__pyx_2, 1, __pyx_v_arr2); - Py_INCREF(__pyx_v_arr3); - PyList_SET_ITEM(__pyx_2, 2, __pyx_v_arr3); - Py_INCREF(__pyx_v_arr4); - PyList_SET_ITEM(__pyx_2, 3, __pyx_v_arr4); - Py_INCREF(__pyx_v_arr5); - PyList_SET_ITEM(__pyx_2, 4, __pyx_v_arr5); - __pyx_1 = PyObject_GetIter(__pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - Py_DECREF(__pyx_2); __pyx_2 = 0; - for (;;) { - __pyx_3 = PyIter_Next(__pyx_1); - if (!__pyx_3) { - if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;} - break; - } - Py_DECREF(__pyx_v_arr); - __pyx_v_arr = __pyx_3; - __pyx_3 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":96 */ - __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_print_array_info); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - Py_INCREF(__pyx_v_arr); - PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_arr); - __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;} - Py_DECREF(__pyx_4); __pyx_4 = 0; - Py_DECREF(__pyx_5); __pyx_5 = 0; - Py_DECREF(__pyx_2); __pyx_2 = 0; - } - Py_DECREF(__pyx_1); __pyx_1 = 0; - - __pyx_r = Py_None; Py_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1:; - Py_XDECREF(__pyx_1); - Py_XDECREF(__pyx_2); - Py_XDECREF(__pyx_3); - Py_XDECREF(__pyx_4); - Py_XDECREF(__pyx_5); - __Pyx_AddTraceback("numpyx.test"); - __pyx_r = 0; - __pyx_L0:; - Py_DECREF(__pyx_v_arr1); - Py_DECREF(__pyx_v_arr2); - Py_DECREF(__pyx_v_arr3); - Py_DECREF(__pyx_v_four); - Py_DECREF(__pyx_v_arr4); - Py_DECREF(__pyx_v_arr5); - Py_DECREF(__pyx_v_arr); - return __pyx_r; -} - -static __Pyx_InternTabEntry __pyx_intern_tab[] = { - {&__pyx_n_any, "any"}, - {&__pyx_n_arange, "arange"}, - {&__pyx_n_array, "array"}, - {&__pyx_n_c_numpy, "c_numpy"}, - {&__pyx_n_c_python, "c_python"}, - {&__pyx_n_dtype, "dtype"}, - {&__pyx_n_float64, "float64"}, - {&__pyx_n_name, "name"}, - {&__pyx_n_numpy, "numpy"}, - {&__pyx_n_object_, "object_"}, - {&__pyx_n_one, "one"}, - {&__pyx_n_print_array_info, "print_array_info"}, - {&__pyx_n_shape, "shape"}, - {&__pyx_n_test, "test"}, - {&__pyx_n_test_methods, "test_methods"}, - {&__pyx_n_two, "two"}, - {0, 0} -}; - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_k2p, __pyx_k2, sizeof(__pyx_k2)}, - {&__pyx_k3p, __pyx_k3, sizeof(__pyx_k3)}, - {&__pyx_k4p, __pyx_k4, sizeof(__pyx_k4)}, - {&__pyx_k5p, __pyx_k5, sizeof(__pyx_k5)}, - {&__pyx_k6p, __pyx_k6, sizeof(__pyx_k6)}, - {&__pyx_k7p, __pyx_k7, sizeof(__pyx_k7)}, - {&__pyx_k8p, __pyx_k8, sizeof(__pyx_k8)}, - {&__pyx_k9p, __pyx_k9, sizeof(__pyx_k9)}, - {&__pyx_k10p, __pyx_k10, sizeof(__pyx_k10)}, - {&__pyx_k11p, __pyx_k11, sizeof(__pyx_k11)}, - {&__pyx_k12p, __pyx_k12, sizeof(__pyx_k12)}, - {&__pyx_k13p, __pyx_k13, sizeof(__pyx_k13)}, - {&__pyx_k14p, __pyx_k14, sizeof(__pyx_k14)}, - {&__pyx_k15p, __pyx_k15, sizeof(__pyx_k15)}, - {&__pyx_k16p, __pyx_k16, sizeof(__pyx_k16)}, - {&__pyx_k17p, __pyx_k17, sizeof(__pyx_k17)}, - {0, 0, 0} -}; - -static struct PyMethodDef __pyx_methods[] = { - {"print_array_info", (PyCFunction)__pyx_f_6numpyx_print_array_info, METH_VARARGS|METH_KEYWORDS, 0}, - {"test_methods", (PyCFunction)__pyx_f_6numpyx_test_methods, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6numpyx_test_methods}, - {"test", (PyCFunction)__pyx_f_6numpyx_test, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6numpyx_test}, - {0, 0, 0, 0} -}; - -static void __pyx_init_filenames(void); /*proto*/ - -PyMODINIT_FUNC initnumpyx(void); /*proto*/ -PyMODINIT_FUNC initnumpyx(void) { - PyObject *__pyx_1 = 0; - __pyx_init_filenames(); - __pyx_m = Py_InitModule4("numpyx", __pyx_methods, 0, 0, PYTHON_API_VERSION); - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - __pyx_b = PyImport_AddModule("__builtin__"); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (__Pyx_InternStrings(__pyx_intern_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; goto __pyx_L1;}; - __pyx_ptype_7c_numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr)); if (!__pyx_ptype_7c_numpy_dtype) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 76; goto __pyx_L1;} - __pyx_ptype_7c_numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject)); if (!__pyx_ptype_7c_numpy_ndarray) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 81; goto __pyx_L1;} - __pyx_ptype_7c_numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject)); if (!__pyx_ptype_7c_numpy_flatiter) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 90; goto __pyx_L1;} - __pyx_ptype_7c_numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject)); if (!__pyx_ptype_7c_numpy_broadcast) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 96; goto __pyx_L1;} - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":5 */ - __pyx_1 = __Pyx_Import(__pyx_n_numpy, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; goto __pyx_L1;} - if (PyObject_SetAttr(__pyx_m, __pyx_n_numpy, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; goto __pyx_L1;} - Py_DECREF(__pyx_1); __pyx_1 = 0; - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":8 */ - import_array(); - - /* "/Users/rkern/svn/numpy/numpy/doc/pyrex/numpyx.pyx":82 */ - return; - __pyx_L1:; - Py_XDECREF(__pyx_1); - __Pyx_AddTraceback("numpyx"); -} - -static char *__pyx_filenames[] = { - "numpyx.pyx", - "c_numpy.pxd", -}; - -/* Runtime support code */ - -static void __pyx_init_filenames(void) { - __pyx_f = __pyx_filenames; -} - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name) { - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if ((none_allowed && obj == Py_None) || PyObject_TypeCheck(obj, type)) - return 1; - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, obj->ob_type->tp_name); - return 0; -} - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) { - PyObject *__import__ = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - __import__ = PyObject_GetAttrString(__pyx_b, "__import__"); - if (!__import__) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - module = PyObject_CallFunction(__import__, "OOOO", - name, global_dict, empty_dict, list); -bad: - Py_XDECREF(empty_list); - Py_XDECREF(__import__); - Py_XDECREF(empty_dict); - return module; -} - -static PyObject *__Pyx_GetStdout(void) { - PyObject *f = PySys_GetObject("stdout"); - if (!f) { - PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout"); - } - return f; -} - -static int __Pyx_PrintItem(PyObject *v) { - PyObject *f; - - if (!(f = __Pyx_GetStdout())) - return -1; - if (PyFile_SoftSpace(f, 1)) { - if (PyFile_WriteString(" ", f) < 0) - return -1; - } - if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0) - return -1; - if (PyString_Check(v)) { - char *s = PyString_AsString(v); - int len = PyString_Size(v); - if (len > 0 && - isspace(Py_CHARMASK(s[len-1])) && - s[len-1] != ' ') - PyFile_SoftSpace(f, 0); - } - return 0; -} - -static int __Pyx_PrintNewline(void) { - PyObject *f; - - if (!(f = __Pyx_GetStdout())) - return -1; - if (PyFile_WriteString("\n", f) < 0) - return -1; - PyFile_SoftSpace(f, 0); - return 0; -} - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) - PyErr_SetObject(PyExc_NameError, name); - return result; -} - -static int __Pyx_InternStrings(__Pyx_InternTabEntry *t) { - while (t->p) { - *t->p = PyString_InternFromString(t->s); - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, - long size) -{ - PyObject *py_module_name = 0; - PyObject *py_class_name = 0; - PyObject *py_name_list = 0; - PyObject *py_module = 0; - PyObject *result = 0; - - py_module_name = PyString_FromString(module_name); - if (!py_module_name) - goto bad; - py_class_name = PyString_FromString(class_name); - if (!py_class_name) - goto bad; - py_name_list = PyList_New(1); - if (!py_name_list) - goto bad; - Py_INCREF(py_class_name); - if (PyList_SetItem(py_name_list, 0, py_class_name) < 0) - goto bad; - py_module = __Pyx_Import(py_module_name, py_name_list); - if (!py_module) - goto bad; - result = PyObject_GetAttr(py_module, py_class_name); - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (((PyTypeObject *)result)->tp_basicsize != size) { - PyErr_Format(PyExc_ValueError, - "%s.%s does not appear to be the correct type object", - module_name, class_name); - goto bad; - } - goto done; -bad: - Py_XDECREF(result); - result = 0; -done: - Py_XDECREF(py_module_name); - Py_XDECREF(py_class_name); - Py_XDECREF(py_name_list); - return (PyTypeObject *)result; -} - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(char *funcname) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyObject *empty_tuple = 0; - PyObject *empty_string = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - py_srcfile = PyString_FromString(__pyx_filename); - if (!py_srcfile) goto bad; - py_funcname = PyString_FromString(funcname); - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - empty_tuple = PyTuple_New(0); - if (!empty_tuple) goto bad; - empty_string = PyString_FromString(""); - if (!empty_string) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - empty_string, /*PyObject *code,*/ - empty_tuple, /*PyObject *consts,*/ - empty_tuple, /*PyObject *names,*/ - empty_tuple, /*PyObject *varnames,*/ - empty_tuple, /*PyObject *freevars,*/ - empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - empty_string /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_Get(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(empty_tuple); - Py_XDECREF(empty_string); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} diff --git a/numpy-1.6.2/doc/pyrex/numpyx.pyx b/numpy-1.6.2/doc/pyrex/numpyx.pyx deleted file mode 100644 index 068d251f67..0000000000 --- a/numpy-1.6.2/doc/pyrex/numpyx.pyx +++ /dev/null @@ -1,101 +0,0 @@ -# -*- Mode: Python -*- Not really, but close enough -"""WARNING: this code is deprecated and slated for removal soon. See the -doc/cython directory for the replacement, which uses Cython (the actively -maintained version of Pyrex). -""" - -cimport c_python -cimport c_numpy -import numpy - -# Numpy must be initialized -c_numpy.import_array() - -def print_array_info(c_numpy.ndarray arr): - cdef int i - - print '-='*10 - print 'printing array info for ndarray at 0x%0lx'%(arr,) - print 'print number of dimensions:',arr.nd - print 'address of strides: 0x%0lx'%(arr.strides,) - print 'strides:' - for i from 0<=iarr.strides[i] - print 'memory dump:' - print_elements( arr.data, arr.strides, arr.dimensions, - arr.nd, sizeof(double), arr.dtype ) - print '-='*10 - print - -cdef print_elements(char *data, - c_python.Py_intptr_t* strides, - c_python.Py_intptr_t* dimensions, - int nd, - int elsize, - object dtype): - cdef c_python.Py_intptr_t i,j - cdef void* elptr - - if dtype not in [numpy.dtype(numpy.object_), - numpy.dtype(numpy.float64)]: - print ' print_elements() not (yet) implemented for dtype %s'%dtype.name - return - - if nd ==0: - if dtype==numpy.dtype(numpy.object_): - elptr = (data)[0] #[0] dereferences pointer in Pyrex - print ' ',elptr - elif dtype==numpy.dtype(numpy.float64): - print ' ',(data)[0] - elif nd == 1: - for i from 0<=idata)[0] - print ' ',elptr - elif dtype==numpy.dtype(numpy.float64): - print ' ',(data)[0] - data = data + strides[0] - else: - for i from 0<=i()" defining two scalar inputs and one scalar output. - -Another example is (see the GeneralLoopingFunctions page) the function -inner1d(a,b) with a signature of "(i),(i)->()". This applies the inner product -along the last axis of each input, but keeps the remaining indices intact. For -example, where a is of shape (3,5,N) and b is of shape (5,N), this will return -an output of shape (3,5). The underlying elementary function is called 3*5 -times. In the signature, we specify one core dimension "(i)" for each input and -zero core dimensions "()" for the output, since it takes two 1-d arrays and -returns a scalar. By using the same name "i", we specify that the two -corresponding dimensions should be of the same size (or one of them is of size -1 and will be broadcasted). - -The dimensions beyond the core dimensions are called "loop" dimensions. In the -above example, this corresponds to (3,5). - -The usual numpy "broadcasting" rules apply, where the signature determines how -the dimensions of each input/output object are split into core and loop -dimensions: - -While an input array has a smaller dimensionality than the corresponding number -of core dimensions, 1's are pre-pended to its shape. The core dimensions are -removed from all inputs and the remaining dimensions are broadcasted; defining -the loop dimensions. The output is given by the loop dimensions plus the -output core dimensions. - -Experimental Windows 64 bits support -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Numpy can now be built on windows 64 bits (amd64 only, not IA64), with both MS -compilers and mingw-w64 compilers: - -This is *highly experimental*: DO NOT USE FOR PRODUCTION USE. See INSTALL.txt, -Windows 64 bits section for more information on limitations and how to build it -by yourself. - -New features -============ - -Formatting issues -~~~~~~~~~~~~~~~~~ - -Float formatting is now handled by numpy instead of the C runtime: this enables -locale independent formatting, more robust fromstring and related methods. -Special values (inf and nan) are also more consistent across platforms (nan vs -IND/NaN, etc...), and more consistent with recent python formatting work (in -2.6 and later). - -Nan handling in max/min -~~~~~~~~~~~~~~~~~~~~~~~ - -The maximum/minimum ufuncs now reliably propagate nans. If one of the -arguments is a nan, then nan is retured. This affects np.min/np.max, amin/amax -and the array methods max/min. New ufuncs fmax and fmin have been added to deal -with non-propagating nans. - -Nan handling in sign -~~~~~~~~~~~~~~~~~~~~ - -The ufunc sign now returns nan for the sign of anan. - - -New ufuncs -~~~~~~~~~~ - -#. fmax - same as maximum for integer types and non-nan floats. Returns the - non-nan argument if one argument is nan and returns nan if both arguments - are nan. -#. fmin - same as minimum for integer types and non-nan floats. Returns the - non-nan argument if one argument is nan and returns nan if both arguments - are nan. -#. deg2rad - converts degrees to radians, same as the radians ufunc. -#. rad2deg - converts radians to degrees, same as the degrees ufunc. -#. log2 - base 2 logarithm. -#. exp2 - base 2 exponential. -#. trunc - truncate floats to nearest integer towards zero. -#. logaddexp - add numbers stored as logarithms and return the logarithm - of the result. -#. logaddexp2 - add numbers stored as base 2 logarithms and return the base 2 - logarithm of the result result. - -Masked arrays -~~~~~~~~~~~~~ - -Several new features and bug fixes, including: - - * structured arrays should now be fully supported by MaskedArray - (r6463, r6324, r6305, r6300, r6294...) - * Minor bug fixes (r6356, r6352, r6335, r6299, r6298) - * Improved support for __iter__ (r6326) - * made baseclass, sharedmask and hardmask accesible to the user (but - read-only) - * doc update - -gfortran support on windows -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Gfortran can now be used as a fortran compiler for numpy on windows, even when -the C compiler is Visual Studio (VS 2005 and above; VS 2003 will NOT work). -Gfortran + Visual studio does not work on windows 64 bits (but gcc + gfortran -does). It is unclear whether it will be possible to use gfortran and visual -studio at all on x64. - -Arch option for windows binary -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Automatic arch detection can now be bypassed from the command line for the superpack installed: - - numpy-1.3.0-superpack-win32.exe /arch=nosse - -will install a numpy which works on any x86, even if the running computer -supports SSE set. - -Deprecated features -=================== - -Histogram -~~~~~~~~~ - -The semantics of histogram has been modified to fix long-standing issues -with outliers handling. The main changes concern - -#. the definition of the bin edges, now including the rightmost edge, and -#. the handling of upper outliers, now ignored rather than tallied in the - rightmost bin. - -The previous behavior is still accessible using `new=False`, but this is -deprecated, and will be removed entirely in 1.4.0. - -Documentation changes -===================== - -A lot of documentation has been added. Both user guide and references can be -built from sphinx. - -New C API -========= - -Multiarray API -~~~~~~~~~~~~~~ - -The following functions have been added to the multiarray C API: - - * PyArray_GetEndianness: to get runtime endianness - -Ufunc API -~~~~~~~~~~~~~~ - -The following functions have been added to the ufunc API: - - * PyUFunc_FromFuncAndDataAndSignature: to declare a more general ufunc - (generalized ufunc). - - -New defines -~~~~~~~~~~~ - -New public C defines are available for ARCH specific code through numpy/npy_cpu.h: - - * NPY_CPU_X86: x86 arch (32 bits) - * NPY_CPU_AMD64: amd64 arch (x86_64, NOT Itanium) - * NPY_CPU_PPC: 32 bits ppc - * NPY_CPU_PPC64: 64 bits ppc - * NPY_CPU_SPARC: 32 bits sparc - * NPY_CPU_SPARC64: 64 bits sparc - * NPY_CPU_S390: S390 - * NPY_CPU_IA64: ia64 - * NPY_CPU_PARISC: PARISC - -New macros for CPU endianness has been added as well (see internal changes -below for details): - - * NPY_BYTE_ORDER: integer - * NPY_LITTLE_ENDIAN/NPY_BIG_ENDIAN defines - -Those provide portable alternatives to glibc endian.h macros for platforms -without it. - -Portable NAN, INFINITY, etc... -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -npy_math.h now makes available several portable macro to get NAN, INFINITY: - - * NPY_NAN: equivalent to NAN, which is a GNU extension - * NPY_INFINITY: equivalent to C99 INFINITY - * NPY_PZERO, NPY_NZERO: positive and negative zero respectively - -Corresponding single and extended precision macros are available as well. All -references to NAN, or home-grown computation of NAN on the fly have been -removed for consistency. - -Internal changes -================ - -numpy.core math configuration revamp -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This should make the porting to new platforms easier, and more robust. In -particular, the configuration stage does not need to execute any code on the -target platform, which is a first step toward cross-compilation. - -http://projects.scipy.org/numpy/browser/trunk/doc/neps/math_config_clean.txt - -umath refactor -~~~~~~~~~~~~~~ - -A lot of code cleanup for umath/ufunc code (charris). - -Improvements to build warnings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Numpy can now build with -W -Wall without warnings - -http://projects.scipy.org/numpy/browser/trunk/doc/neps/warnfix.txt - -Separate core math library -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The core math functions (sin, cos, etc... for basic C types) have been put into -a separate library; it acts as a compatibility layer, to support most C99 maths -functions (real only for now). The library includes platform-specific fixes for -various maths functions, such as using those versions should be more robust -than using your platform functions directly. The API for existing functions is -exactly the same as the C99 math functions API; the only difference is the npy -prefix (npy_cos vs cos). - -The core library will be made available to any extension in 1.4.0. - -CPU arch detection -~~~~~~~~~~~~~~~~~~ - -npy_cpu.h defines numpy specific CPU defines, such as NPY_CPU_X86, etc... -Those are portable across OS and toolchains, and set up when the header is -parsed, so that they can be safely used even in the case of cross-compilation -(the values is not set when numpy is built), or for multi-arch binaries (e.g. -fat binaries on Max OS X). - -npy_endian.h defines numpy specific endianness defines, modeled on the glibc -endian.h. NPY_BYTE_ORDER is equivalent to BYTE_ORDER, and one of -NPY_LITTLE_ENDIAN or NPY_BIG_ENDIAN is defined. As for CPU archs, those are set -when the header is parsed by the compiler, and as such can be used for -cross-compilation and multi-arch binaries. diff --git a/numpy-1.6.2/doc/release/1.4.0-notes.rst b/numpy-1.6.2/doc/release/1.4.0-notes.rst deleted file mode 100644 index 5429f8e76d..0000000000 --- a/numpy-1.6.2/doc/release/1.4.0-notes.rst +++ /dev/null @@ -1,238 +0,0 @@ -========================= -NumPy 1.4.0 Release Notes -========================= - -This minor includes numerous bug fixes, as well as a few new features. It -is backward compatible with 1.3.0 release. - -Highlights -========== - -* New datetime dtype support to deal with dates in arrays - -* Faster import time - -* Extended array wrapping mechanism for ufuncs - -* New Neighborhood iterator (C-level only) - -* C99-like complex functions in npymath - -New features -============ - -Extended array wrapping mechanism for ufuncs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -An __array_prepare__ method has been added to ndarray to provide subclasses -greater flexibility to interact with ufuncs and ufunc-like functions. ndarray -already provided __array_wrap__, which allowed subclasses to set the array type -for the result and populate metadata on the way out of the ufunc (as seen in -the implementation of MaskedArray). For some applications it is necessary to -provide checks and populate metadata *on the way in*. __array_prepare__ is -therefore called just after the ufunc has initialized the output array but -before computing the results and populating it. This way, checks can be made -and errors raised before operations which may modify data in place. - -Automatic detection of forward incompatibilities -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Previously, if an extension was built against a version N of NumPy, and used on -a system with NumPy M < N, the import_array was successfull, which could cause -crashes because the version M does not have a function in N. Starting from -NumPy 1.4.0, this will cause a failure in import_array, so the error will be -catched early on. - -New iterators -~~~~~~~~~~~~~ - -A new neighborhood iterator has been added to the C API. It can be used to -iterate over the items in a neighborhood of an array, and can handle boundaries -conditions automatically. Zero and one padding are available, as well as -arbitrary constant value, mirror and circular padding. - -New polynomial support -~~~~~~~~~~~~~~~~~~~~~~ - -New modules chebyshev and polynomial have been added. The new polynomial module -is not compatible with the current polynomial support in numpy, but is much -like the new chebyshev module. The most noticeable difference to most will -be that coefficients are specified from low to high power, that the low -level functions do *not* work with the Chebyshev and Polynomial classes as -arguements, and that the Chebyshev and Polynomial classes include a domain. -Mapping between domains is a linear substitution and the two classes can be -converted one to the other, allowing, for instance, a Chebyshev series in -one domain to be expanded as a polynomial in another domain. The new classes -should generally be used instead of the low level functions, the latter are -provided for those who wish to build their own classes. - -The new modules are not automatically imported into the numpy namespace, -they must be explicitly brought in with an "import numpy.polynomial" -statement. - -New C API -~~~~~~~~~ - -The following C functions have been added to the C API: - - #. PyArray_GetNDArrayCFeatureVersion: return the *API* version of the - loaded numpy. - #. PyArray_Correlate2 - like PyArray_Correlate, but implements the usual - definition of correlation. Inputs are not swapped, and conjugate is - taken for complex arrays. - #. PyArray_NeighborhoodIterNew - a new iterator to iterate over a - neighborhood of a point, with automatic boundaries handling. It is - documented in the iterators section of the C-API reference, and you can - find some examples in the multiarray_test.c.src file in numpy.core. - -New ufuncs -~~~~~~~~~~ - -The following ufuncs have been added to the C API: - - #. copysign - return the value of the first argument with the sign copied - from the second argument. - #. nextafter - return the next representable floating point value of the - first argument toward the second argument. - -New defines -~~~~~~~~~~~ - -The alpha processor is now defined and available in numpy/npy_cpu.h. The -failed detection of the PARISC processor has been fixed. The defines are: - - #. NPY_CPU_HPPA: PARISC - #. NPY_CPU_ALPHA: Alpha - -Testing -~~~~~~~ - - #. deprecated decorator: this decorator may be used to avoid cluttering - testing output while testing DeprecationWarning is effectively raised by - the decorated test. - #. assert_array_almost_equal_nulps: new method to compare two arrays of - floating point values. With this function, two values are considered - close if there are not many representable floating point values in - between, thus being more robust than assert_array_almost_equal when the - values fluctuate a lot. - #. assert_array_max_ulp: raise an assertion if there are more than N - representable numbers between two floating point values. - #. assert_warns: raise an AssertionError if a callable does not generate a - warning of the appropriate class, without altering the warning state. - -Reusing npymath -~~~~~~~~~~~~~~~ - -In 1.3.0, we started putting portable C math routines in npymath library, so -that people can use those to write portable extensions. Unfortunately, it was -not possible to easily link against this library: in 1.4.0, support has been -added to numpy.distutils so that 3rd party can reuse this library. See coremath -documentation for more information. - -Improved set operations -~~~~~~~~~~~~~~~~~~~~~~~ - -In previous versions of NumPy some set functions (intersect1d, -setxor1d, setdiff1d and setmember1d) could return incorrect results if -the input arrays contained duplicate items. These now work correctly -for input arrays with duplicates. setmember1d has been renamed to -in1d, as with the change to accept arrays with duplicates it is -no longer a set operation, and is conceptually similar to an -elementwise version of the Python operator 'in'. All of these -functions now accept the boolean keyword assume_unique. This is False -by default, but can be set True if the input arrays are known not -to contain duplicates, which can increase the functions' execution -speed. - -Improvements -============ - - #. numpy import is noticeably faster (from 20 to 30 % depending on the - platform and computer) - - #. The sort functions now sort nans to the end. - - * Real sort order is [R, nan] - * Complex sort order is [R + Rj, R + nanj, nan + Rj, nan + nanj] - - Complex numbers with the same nan placements are sorted according to - the non-nan part if it exists. - #. The type comparison functions have been made consistent with the new - sort order of nans. Searchsorted now works with sorted arrays - containing nan values. - #. Complex division has been made more resistent to overflow. - #. Complex floor division has been made more resistent to overflow. - -Deprecations -============ - -The following functions are deprecated: - - #. correlate: it takes a new keyword argument old_behavior. When True (the - default), it returns the same result as before. When False, compute the - conventional correlation, and take the conjugate for complex arrays. The - old behavior will be removed in NumPy 1.5, and raises a - DeprecationWarning in 1.4. - - #. unique1d: use unique instead. unique1d raises a deprecation - warning in 1.4, and will be removed in 1.5. - - #. intersect1d_nu: use intersect1d instead. intersect1d_nu raises - a deprecation warning in 1.4, and will be removed in 1.5. - - #. setmember1d: use in1d instead. setmember1d raises a deprecation - warning in 1.4, and will be removed in 1.5. - -The following raise errors: - - #. When operating on 0-d arrays, ``numpy.max`` and other functions accept - only ``axis=0``, ``axis=-1`` and ``axis=None``. Using an out-of-bounds - axes is an indication of a bug, so Numpy raises an error for these cases - now. - - #. Specifying ``axis > MAX_DIMS`` is no longer allowed; Numpy raises now an - error instead of behaving similarly as for ``axis=None``. - -Internal changes -================ - -Use C99 complex functions when available -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The numpy complex types are now guaranteed to be ABI compatible with C99 -complex type, if availble on the platform. Moreoever, the complex ufunc now use -the platform C99 functions intead of our own. - -split multiarray and umath source code -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The source code of multiarray and umath has been split into separate logic -compilation units. This should make the source code more amenable for -newcomers. - -Separate compilation -~~~~~~~~~~~~~~~~~~~~ - -By default, every file of multiarray (and umath) is merged into one for -compilation as was the case before, but if NPY_SEPARATE_COMPILATION env -variable is set to a non-negative value, experimental individual compilation of -each file is enabled. This makes the compile/debug cycle much faster when -working on core numpy. - -Separate core math library -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -New functions which have been added: - - * npy_copysign - * npy_nextafter - * npy_cpack - * npy_creal - * npy_cimag - * npy_cabs - * npy_cexp - * npy_clog - * npy_cpow - * npy_csqr - * npy_ccos - * npy_csin diff --git a/numpy-1.6.2/doc/release/1.5.0-notes.rst b/numpy-1.6.2/doc/release/1.5.0-notes.rst deleted file mode 100644 index 94cf89ff74..0000000000 --- a/numpy-1.6.2/doc/release/1.5.0-notes.rst +++ /dev/null @@ -1,131 +0,0 @@ -========================= -NumPy 1.5.0 Release Notes -========================= - - -Highlights -========== - -Python 3 compatibility ----------------------- - -This is the first NumPy release which is compatible with Python 3. Support for -Python 3 and Python 2 is done from a single code base. Extensive notes on -changes can be found at -``_. - -Note that the Numpy testing framework relies on nose, which does not have a -Python 3 compatible release yet. A working Python 3 branch of nose can be found -at ``_ however. - -Porting of SciPy to Python 3 is expected to be completed soon. - -:pep:`3118` compatibility -------------------------- - -The new buffer protocol described by PEP 3118 is fully supported in this -version of Numpy. On Python versions >= 2.6 Numpy arrays expose the buffer -interface, and array(), asarray() and other functions accept new-style buffers -as input. - - -New features -============ - -Warning on casting complex to real ----------------------------------- - -Numpy now emits a `numpy.ComplexWarning` when a complex number is cast -into a real number. For example: - - >>> x = np.array([1,2,3]) - >>> x[:2] = np.array([1+2j, 1-2j]) - ComplexWarning: Casting complex values to real discards the imaginary part - -The cast indeed discards the imaginary part, and this may not be the -intended behavior in all cases, hence the warning. This warning can be -turned off in the standard way: - - >>> import warnings - >>> warnings.simplefilter("ignore", np.ComplexWarning) - -Dot method for ndarrays ------------------------ - -Ndarrays now have the dot product also as a method, which allows writing -chains of matrix products as - - >>> a.dot(b).dot(c) - -instead of the longer alternative - - >>> np.dot(a, np.dot(b, c)) - -linalg.slogdet function ------------------------ - -The slogdet function returns the sign and logarithm of the determinant -of a matrix. Because the determinant may involve the product of many -small/large values, the result is often more accurate than that obtained -by simple multiplication. - -new header ----------- - -The new header file ndarraytypes.h contains the symbols from -ndarrayobject.h that do not depend on the PY_ARRAY_UNIQUE_SYMBOL and -NO_IMPORT/_ARRAY macros. Broadly, these symbols are types, typedefs, -and enumerations; the array function calls are left in -ndarrayobject.h. This allows users to include array-related types and -enumerations without needing to concern themselves with the macro -expansions and their side- effects. - - -Changes -======= - -polynomial.polynomial ---------------------- - -* The polyint and polyder functions now check that the specified number - integrations or derivations is a non-negative integer. The number 0 is - a valid value for both functions. -* A degree method has been added to the Polynomial class. -* A trimdeg method has been added to the Polynomial class. It operates like - truncate except that the argument is the desired degree of the result, - not the number of coefficients. -* Polynomial.fit now uses None as the default domain for the fit. The default - Polynomial domain can be specified by using [] as the domain value. -* Weights can be used in both polyfit and Polynomial.fit -* A linspace method has been added to the Polynomial class to ease plotting. -* The polymulx function was added. - -polynomial.chebyshev --------------------- - -* The chebint and chebder functions now check that the specified number - integrations or derivations is a non-negative integer. The number 0 is - a valid value for both functions. -* A degree method has been added to the Chebyshev class. -* A trimdeg method has been added to the Chebyshev class. It operates like - truncate except that the argument is the desired degree of the result, - not the number of coefficients. -* Chebyshev.fit now uses None as the default domain for the fit. The default - Chebyshev domain can be specified by using [] as the domain value. -* Weights can be used in both chebfit and Chebyshev.fit -* A linspace method has been added to the Chebyshev class to ease plotting. -* The chebmulx function was added. -* Added functions for the Chebyshev points of the first and second kind. - - -histogram ---------- - -After a two years transition period, the old behavior of the histogram function -has been phased out, and the "new" keyword has been removed. - -correlate ---------- - -The old behavior of correlate was deprecated in 1.4.0, the new behavior (the -usual definition for cross-correlation) is now the default. diff --git a/numpy-1.6.2/doc/release/1.6.0-notes.rst b/numpy-1.6.2/doc/release/1.6.0-notes.rst deleted file mode 100644 index c5f53a0eb3..0000000000 --- a/numpy-1.6.2/doc/release/1.6.0-notes.rst +++ /dev/null @@ -1,177 +0,0 @@ -========================= -NumPy 1.6.0 Release Notes -========================= - -This release includes several new features as well as numerous bug fixes and -improved documentation. It is backward compatible with the 1.5.0 release, and -supports Python 2.4 - 2.7 and 3.1 - 3.2. - - -Highlights -========== - -* Re-introduction of datetime dtype support to deal with dates in arrays. - -* A new 16-bit floating point type. - -* A new iterator, which improves performance of many functions. - - -New features -============ - -New 16-bit floating point type ------------------------------- - -This release adds support for the IEEE 754-2008 binary16 format, available as -the data type ``numpy.half``. Within Python, the type behaves similarly to -`float` or `double`, and C extensions can add support for it with the exposed -half-float API. - - -New iterator ------------- - -A new iterator has been added, replacing the functionality of the -existing iterator and multi-iterator with a single object and API. -This iterator works well with general memory layouts different from -C or Fortran contiguous, and handles both standard NumPy and -customized broadcasting. The buffering, automatic data type -conversion, and optional output parameters, offered by -ufuncs but difficult to replicate elsewhere, are now exposed by this -iterator. - - -Legendre, Laguerre, Hermite, HermiteE polynomials in ``numpy.polynomial`` -------------------------------------------------------------------------- - -Extend the number of polynomials available in the polynomial package. In -addition, a new ``window`` attribute has been added to the classes in -order to specify the range the ``domain`` maps to. This is mostly useful -for the Laguerre, Hermite, and HermiteE polynomials whose natural domains -are infinite and provides a more intuitive way to get the correct mapping -of values without playing unnatural tricks with the domain. - - -Fortran assumed shape array and size function support in ``numpy.f2py`` ------------------------------------------------------------------------ - -F2py now supports wrapping Fortran 90 routines that use assumed shape -arrays. Before such routines could be called from Python but the -corresponding Fortran routines received assumed shape arrays as zero -length arrays which caused unpredicted results. Thanks to Lorenz -Hüdepohl for pointing out the correct way to interface routines with -assumed shape arrays. - -In addition, f2py supports now automatic wrapping of Fortran routines -that use two argument ``size`` function in dimension specifications. - - -Other new functions -------------------- - -``numpy.ravel_multi_index`` : Converts a multi-index tuple into -an array of flat indices, applying boundary modes to the indices. - -``numpy.einsum`` : Evaluate the Einstein summation convention. Using the -Einstein summation convention, many common multi-dimensional array operations -can be represented in a simple fashion. This function provides a way compute -such summations. - -``numpy.count_nonzero`` : Counts the number of non-zero elements in an array. - -``numpy.result_type`` and ``numpy.min_scalar_type`` : These functions expose -the underlying type promotion used by the ufuncs and other operations to -determine the types of outputs. These improve upon the ``numpy.common_type`` -and ``numpy.mintypecode`` which provide similar functionality but do -not match the ufunc implementation. - - -Changes -======= - -``default error handling`` --------------------------- - -The default error handling has been change from ``print`` to ``warn`` for -all except for ``underflow``, which remains as ``ignore``. - - -``numpy.distutils`` -------------------- - -Several new compilers are supported for building Numpy: the Portland Group -Fortran compiler on OS X, the PathScale compiler suite and the 64-bit Intel C -compiler on Linux. - - -``numpy.testing`` ------------------ - -The testing framework gained ``numpy.testing.assert_allclose``, which provides -a more convenient way to compare floating point arrays than -`assert_almost_equal`, `assert_approx_equal` and `assert_array_almost_equal`. - - -``C API`` ---------- - -In addition to the APIs for the new iterator and half data type, a number -of other additions have been made to the C API. The type promotion -mechanism used by ufuncs is exposed via ``PyArray_PromoteTypes``, -``PyArray_ResultType``, and ``PyArray_MinScalarType``. A new enumeration -``NPY_CASTING`` has been added which controls what types of casts are -permitted. This is used by the new functions ``PyArray_CanCastArrayTo`` -and ``PyArray_CanCastTypeTo``. A more flexible way to handle -conversion of arbitrary python objects into arrays is exposed by -``PyArray_GetArrayParamsFromObject``. - - -Deprecated features -=================== - -The "normed" keyword in ``numpy.histogram`` is deprecated. Its functionality -will be replaced by the new "density" keyword. - - -Removed features -================ - -``numpy.fft`` -------------- - -The functions `refft`, `refft2`, `refftn`, `irefft`, `irefft2`, `irefftn`, -which were aliases for the same functions without the 'e' in the name, were -removed. - - -``numpy.memmap`` ----------------- - -The `sync()` and `close()` methods of memmap were removed. Use `flush()` and -"del memmap" instead. - - -``numpy.lib`` -------------- - -The deprecated functions ``numpy.unique1d``, ``numpy.setmember1d``, -``numpy.intersect1d_nu`` and ``numpy.lib.ufunclike.log2`` were removed. - - -``numpy.ma`` ------------- - -Several deprecated items were removed from the ``numpy.ma`` module:: - - * ``numpy.ma.MaskedArray`` "raw_data" method - * ``numpy.ma.MaskedArray`` constructor "flag" keyword - * ``numpy.ma.make_mask`` "flag" keyword - * ``numpy.ma.allclose`` "fill_value" keyword - - -``numpy.distutils`` -------------------- - -The ``numpy.get_numpy_include`` function was removed, use ``numpy.get_include`` -instead. diff --git a/numpy-1.6.2/doc/release/1.6.1-notes.rst b/numpy-1.6.2/doc/release/1.6.1-notes.rst deleted file mode 100644 index 5f59cb7438..0000000000 --- a/numpy-1.6.2/doc/release/1.6.1-notes.rst +++ /dev/null @@ -1,22 +0,0 @@ -========================= -NumPy 1.6.1 Release Notes -========================= - -This is a bugfix only release in the 1.6.x series. - - -Issues fixed ------------- - -#1834 einsum fails for specific shapes -#1837 einsum throws nan or freezes python for specific array shapes -#1838 object <-> structured type arrays regression -#1851 regression for SWIG based code in 1.6.0 -#1863 Buggy results when operating on array copied with astype() -#1870 Fix corner case of object array assignment -#1843 Py3k: fix error with recarray -#1885 nditer: Error in detecting double reduction loop -#1874 f2py: fix --include_paths bug -#1749 Fix ctypes.load_library() -#1895/1896 iter: writeonly operands weren't always being buffered correctly - diff --git a/numpy-1.6.2/doc/release/1.6.2-notes.rst b/numpy-1.6.2/doc/release/1.6.2-notes.rst deleted file mode 100644 index 7b62e6c93a..0000000000 --- a/numpy-1.6.2/doc/release/1.6.2-notes.rst +++ /dev/null @@ -1,90 +0,0 @@ -========================= -NumPy 1.6.2 Release Notes -========================= - -This is a bugfix release in the 1.6.x series. Due to the delay of the NumPy -1.7.0 release, this release contains far more fixes than a regular NumPy bugfix -release. It also includes a number of documentation and build improvements. - - -``numpy.core`` issues fixed ---------------------------- - -#2063 make unique() return consistent index -#1138 allow creating arrays from empty buffers or empty slices -#1446 correct note about correspondence vstack and concatenate -#1149 make argmin() work for datetime -#1672 fix allclose() to work for scalar inf -#1747 make np.median() work for 0-D arrays -#1776 make complex division by zero to yield inf properly -#1675 add scalar support for the format() function -#1905 explicitly check for NaNs in allclose() -#1952 allow floating ddof in std() and var() -#1948 fix regression for indexing chararrays with empty list -#2017 fix type hashing -#2046 deleting array attributes causes segfault -#2033 a**2.0 has incorrect type -#2045 make attribute/iterator_element deletions not segfault -#2021 fix segfault in searchsorted() -#2073 fix float16 __array_interface__ bug - - -``numpy.lib`` issues fixed --------------------------- - -#2048 break reference cycle in NpzFile -#1573 savetxt() now handles complex arrays -#1387 allow bincount() to accept empty arrays -#1899 fixed histogramdd() bug with empty inputs -#1793 fix failing npyio test under py3k -#1936 fix extra nesting for subarray dtypes -#1848 make tril/triu return the same dtype as the original array -#1918 use Py_TYPE to access ob_type, so it works also on Py3 - - -``numpy.f2py`` changes ----------------------- - -ENH: Introduce new options extra_f77_compiler_args and extra_f90_compiler_args -BLD: Improve reporting of fcompiler value -BUG: Fix f2py test_kind.py test - - -``numpy.poly`` changes ----------------------- - -ENH: Add some tests for polynomial printing -ENH: Add companion matrix functions -DOC: Rearrange the polynomial documents -BUG: Fix up links to classes -DOC: Add version added to some of the polynomial package modules -DOC: Document xxxfit functions in the polynomial package modules -BUG: The polynomial convenience classes let different types interact -DOC: Document the use of the polynomial convenience classes -DOC: Improve numpy reference documentation of polynomial classes -ENH: Improve the computation of polynomials from roots -STY: Code cleanup in polynomial [*]fromroots functions -DOC: Remove references to cast and NA, which were added in 1.7 - - -``numpy.distutils`` issues fixed -------------------------------- - -#1261 change compile flag on AIX from -O5 to -O3 -#1377 update HP compiler flags -#1383 provide better support for C++ code on HPUX -#1857 fix build for py3k + pip -BLD: raise a clearer warning in case of building without cleaning up first -BLD: follow build_ext coding convention in build_clib -BLD: fix up detection of Intel CPU on OS X in system_info.py -BLD: add support for the new X11 directory structure on Ubuntu & co. -BLD: add ufsparse to the libraries search path. -BLD: add 'pgfortran' as a valid compiler in the Portland Group -BLD: update version match regexp for IBM AIX Fortran compilers. - - -``numpy.random`` issues fixed ------------------------------ - -BUG: Use npy_intp instead of long in mtrand - diff --git a/numpy-1.6.2/doc/release/2.0.0-notes.rst b/numpy-1.6.2/doc/release/2.0.0-notes.rst deleted file mode 100644 index cb5bdf14b5..0000000000 --- a/numpy-1.6.2/doc/release/2.0.0-notes.rst +++ /dev/null @@ -1,16 +0,0 @@ -========================= -NumPy 2.0.0 Release Notes -========================= - - -Highlights -========== - - -New features -============ - - -Changes -======= - diff --git a/numpy-1.6.2/doc/release/time_based_proposal.rst b/numpy-1.6.2/doc/release/time_based_proposal.rst deleted file mode 100644 index 555be68633..0000000000 --- a/numpy-1.6.2/doc/release/time_based_proposal.rst +++ /dev/null @@ -1,129 +0,0 @@ -.. vim:syntax=rst - -Introduction -============ - -This document proposes some enhancements for numpy and scipy releases. -Successive numpy and scipy releases are too far apart from a time point of -view - some people who are in the numpy release team feel that it cannot -improve without a bit more formal release process. The main proposal is to -follow a time-based release, with expected dates for code freeze, beta and rc. -The goal is two folds: make release more predictable, and move the code forward. - -Rationale -========= - -Right now, the release process of numpy is relatively organic. When some -features are there, we may decide to make a new release. Because there is not -fixed schedule, people don't really know when new features and bug fixes will -go into a release. More significantly, having an expected release schedule -helps to *coordinate* efforts: at the beginning of a cycle, everybody can jump -in and put new code, even break things if needed. But after some point, only -bug fixes are accepted: this makes beta and RC releases much easier; calming -things down toward the release date helps focusing on bugs and regressions - -Proposal -======== - -Time schedule -------------- - -The proposed schedule is to release numpy every 9 weeks - the exact period can -be tweaked if it ends up not working as expected. There will be several stages -for the cycle: - - * Development: anything can happen (by anything, we mean as currently - done). The focus is on new features, refactoring, etc... - - * Beta: no new features. No bug fixing which requires heavy changes. - regression fixes which appear on supported platforms and were not - caught earlier. - - * Polish/RC: only docstring changes and blocker regressions are allowed. - -The schedule would be as follows: - - +------+-----------------+-----------------+------------------+ - | Week | 1.3.0 | 1.4.0 | Release time | - +======+=================+=================+==================+ - | 1 | Development | | | - +------+-----------------+-----------------+------------------+ - | 2 | Development | | | - +------+-----------------+-----------------+------------------+ - | 3 | Development | | | - +------+-----------------+-----------------+------------------+ - | 4 | Development | | | - +------+-----------------+-----------------+------------------+ - | 5 | Development | | | - +------+-----------------+-----------------+------------------+ - | 6 | Development | | | - +------+-----------------+-----------------+------------------+ - | 7 | Beta | | | - +------+-----------------+-----------------+------------------+ - | 8 | Beta | | | - +------+-----------------+-----------------+------------------+ - | 9 | Beta | | 1.3.0 released | - +------+-----------------+-----------------+------------------+ - | 10 | Polish | Development | | - +------+-----------------+-----------------+------------------+ - | 11 | Polish | Development | | - +------+-----------------+-----------------+------------------+ - | 12 | Polish | Development | | - +------+-----------------+-----------------+------------------+ - | 13 | Polish | Development | | - +------+-----------------+-----------------+------------------+ - | 14 | | Development | | - +------+-----------------+-----------------+------------------+ - | 15 | | Development | | - +------+-----------------+-----------------+------------------+ - | 16 | | Beta | | - +------+-----------------+-----------------+------------------+ - | 17 | | Beta | | - +------+-----------------+-----------------+------------------+ - | 18 | | Beta | 1.4.0 released | - +------+-----------------+-----------------+------------------+ - -Each stage can be defined as follows: - - +------------------+-------------+----------------+----------------+ - | | Development | Beta | Polish | - +==================+=============+================+================+ - | Python Frozen | | slushy | Y | - +------------------+-------------+----------------+----------------+ - | Docstring Frozen | | slushy | thicker slush | - +------------------+-------------+----------------+----------------+ - | C code Frozen | | thicker slush | thicker slush | - +------------------+-------------+----------------+----------------+ - -Terminology: - - * slushy: you can change it if you beg the release team and it's really - important and you coordinate with docs/translations; no "big" - changes. - - * thicker slush: you can change it if it's an open bug marked - showstopper for the Polish release, you beg the release team, the - change is very very small yet very very important, and you feel - extremely guilty about your transgressions. - -The different frozen states are intended to be gradients. The exact meaning is -decided by the release manager: he has the last word on what's go in, what -doesn't. The proposed schedule means that there would be at most 12 weeks -between putting code into the source code repository and being released. - -Release team ------------- - -For every release, there would be at least one release manager. We propose to -rotate the release manager: rotation means it is not always the same person -doing the dirty job, and it should also keep the release manager honest. - -References -========== - - * Proposed schedule for Gnome from Havoc Pennington (one of the core - GTK and Gnome manager): - http://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html - The proposed schedule is heavily based on this email - - * http://live.gnome.org/ReleasePlanning/Freezes diff --git a/numpy-1.6.2/doc/source/_static/scipy.css b/numpy-1.6.2/doc/source/_static/scipy.css deleted file mode 100644 index 44ac1a60f7..0000000000 --- a/numpy-1.6.2/doc/source/_static/scipy.css +++ /dev/null @@ -1,183 +0,0 @@ -@import "default.css"; - -/** - * Spacing fixes - */ - -div.body p, div.body dd, div.body li { - line-height: 125%; -} - -ul.simple { - margin-top: 0; - margin-bottom: 0; - padding-top: 0; - padding-bottom: 0; -} - -/* spacing around blockquoted fields in parameters/attributes/returns */ -td.field-body > blockquote { - margin-top: 0.1em; - margin-bottom: 0.5em; -} - -/* spacing around example code */ -div.highlight > pre { - padding: 2px 5px 2px 5px; -} - -/* spacing in see also definition lists */ -dl.last > dd { - margin-top: 1px; - margin-bottom: 5px; - margin-left: 30px; -} - -/** - * Hide dummy toctrees - */ - -ul { - padding-top: 0; - padding-bottom: 0; - margin-top: 0; - margin-bottom: 0; -} -ul li { - padding-top: 0; - padding-bottom: 0; - margin-top: 0; - margin-bottom: 0; -} -ul li a.reference { - padding-top: 0; - padding-bottom: 0; - margin-top: 0; - margin-bottom: 0; -} - -/** - * Make high-level subsections easier to distinguish from top-level ones - */ -div.body h3 { - background-color: transparent; -} - -div.body h4 { - border: none; - background-color: transparent; -} - -/** - * Scipy colors - */ - -body { - background-color: rgb(100,135,220); -} - -div.document { - background-color: rgb(230,230,230); -} - -div.sphinxsidebar { - background-color: rgb(230,230,230); -} - -div.related { - background-color: rgb(100,135,220); -} - -div.sphinxsidebar h3 { - color: rgb(0,102,204); -} - -div.sphinxsidebar h3 a { - color: rgb(0,102,204); -} - -div.sphinxsidebar h4 { - color: rgb(0,82,194); -} - -div.sphinxsidebar p { - color: black; -} - -div.sphinxsidebar a { - color: #355f7c; -} - -div.sphinxsidebar ul.want-points { - list-style: disc; -} - -.field-list th { - color: rgb(0,102,204); -} - -/** - * Extra admonitions - */ - -div.tip { - background-color: #ffffe4; - border: 1px solid #ee6; -} - -div.plot-output { - clear-after: both; -} - -div.plot-output .figure { - float: left; - text-align: center; - margin-bottom: 0; - padding-bottom: 0; -} - -div.plot-output .caption { - margin-top: 2; - padding-top: 0; -} - -div.plot-output p.admonition-title { - display: none; -} - -div.plot-output:after { - content: ""; - display: block; - height: 0; - clear: both; -} - - -/* -div.admonition-example { - background-color: #e4ffe4; - border: 1px solid #ccc; -}*/ - - -/** - * Styling for field lists - */ - -table.field-list th { - border-left: 1px solid #aaa !important; - padding-left: 5px; -} - -table.field-list { - border-collapse: separate; - border-spacing: 10px; -} - -/** - * Styling for footnotes - */ - -table.footnote td, table.footnote th { - border: none; -} diff --git a/numpy-1.6.2/doc/source/_templates/autosummary/class.rst b/numpy-1.6.2/doc/source/_templates/autosummary/class.rst deleted file mode 100644 index 0cabe7cd16..0000000000 --- a/numpy-1.6.2/doc/source/_templates/autosummary/class.rst +++ /dev/null @@ -1,23 +0,0 @@ -{% extends "!autosummary/class.rst" %} - -{% block methods %} -{% if methods %} - .. HACK - .. autosummary:: - :toctree: - {% for item in methods %} - {{ name }}.{{ item }} - {%- endfor %} -{% endif %} -{% endblock %} - -{% block attributes %} -{% if attributes %} - .. HACK - .. autosummary:: - :toctree: - {% for item in attributes %} - {{ name }}.{{ item }} - {%- endfor %} -{% endif %} -{% endblock %} diff --git a/numpy-1.6.2/doc/source/_templates/indexcontent.html b/numpy-1.6.2/doc/source/_templates/indexcontent.html deleted file mode 100644 index b497c93dd8..0000000000 --- a/numpy-1.6.2/doc/source/_templates/indexcontent.html +++ /dev/null @@ -1,58 +0,0 @@ -{% extends "defindex.html" %} -{% block tables %} -

    Parts of the documentation:

    - - -
    - - - -
    - -

    Indices and tables:

    - - -
    - - - - - - -
    - -

    Meta information:

    - - -
    - - - - - -
    - -

    Acknowledgements

    -

    - Large parts of this manual originate from Travis E. Oliphant's book - "Guide to Numpy" (which generously entered - Public Domain in August 2008). The reference documentation for many of - the functions are written by numerous contributors and developers of - Numpy, both prior to and during the - Numpy Documentation Marathon. -

    -

    - The Documentation Marathon is still ongoing. Please help us write - better documentation for Numpy by joining it! Instructions on how to - join and what to do can be found - on the scipy.org website. -

    -{% endblock %} diff --git a/numpy-1.6.2/doc/source/_templates/indexsidebar.html b/numpy-1.6.2/doc/source/_templates/indexsidebar.html deleted file mode 100644 index 409743a038..0000000000 --- a/numpy-1.6.2/doc/source/_templates/indexsidebar.html +++ /dev/null @@ -1,5 +0,0 @@ -

    Resources

    - diff --git a/numpy-1.6.2/doc/source/_templates/layout.html b/numpy-1.6.2/doc/source/_templates/layout.html deleted file mode 100644 index 27798878e9..0000000000 --- a/numpy-1.6.2/doc/source/_templates/layout.html +++ /dev/null @@ -1,17 +0,0 @@ -{% extends "!layout.html" %} -{% block rootrellink %} -
  • {{ shorttitle }}{{ reldelim1 }}
  • -{% endblock %} - -{% block sidebarsearch %} -{%- if sourcename %} - -{%- endif %} -{{ super() }} -{% endblock %} diff --git a/numpy-1.6.2/doc/source/about.rst b/numpy-1.6.2/doc/source/about.rst deleted file mode 100644 index bcfbe53230..0000000000 --- a/numpy-1.6.2/doc/source/about.rst +++ /dev/null @@ -1,65 +0,0 @@ -About NumPy -=========== - -`NumPy `__ is the fundamental package -needed for scientific computing with Python. This package contains: - -- a powerful N-dimensional :ref:`array object ` -- sophisticated :ref:`(broadcasting) functions ` -- basic :ref:`linear algebra functions ` -- basic :ref:`Fourier transforms ` -- sophisticated :ref:`random number capabilities ` -- tools for integrating Fortran code -- tools for integrating C/C++ code - -Besides its obvious scientific uses, *NumPy* can also be used as an -efficient multi-dimensional container of generic data. Arbitrary -data types can be defined. This allows *NumPy* to seamlessly and -speedily integrate with a wide variety of databases. - -NumPy is a successor for two earlier scientific Python libraries: -NumPy derives from the old *Numeric* code base and can be used -as a replacement for *Numeric*. It also adds the features introduced -by *Numarray* and can also be used to replace *Numarray*. - -NumPy community ---------------- - -Numpy is a distributed, volunteer, open-source project. *You* can help -us make it better; if you believe something should be improved either -in functionality or in documentation, don't hesitate to contact us --- or -even better, contact us and participate in fixing the problem. - -Our main means of communication are: - -- `scipy.org website `__ - -- `Mailing lists `__ - -- `Numpy Trac `__ (bug "tickets" go here) - -More information about the development of Numpy can be found at -http://scipy.org/Developer_Zone - -If you want to fix issues in this documentation, the easiest way -is to participate in `our ongoing documentation marathon -`__. - - -About this documentation -======================== - -Conventions ------------ - -Names of classes, objects, constants, etc. are given in **boldface** font. -Often they are also links to a more detailed documentation of the -referred object. - -This manual contains many examples of use, usually prefixed with the -Python prompt ``>>>`` (which is not a part of the example code). The -examples assume that you have first entered:: - ->>> import numpy as np - -before running the examples. diff --git a/numpy-1.6.2/doc/source/bugs.rst b/numpy-1.6.2/doc/source/bugs.rst deleted file mode 100644 index cd2c5d3e85..0000000000 --- a/numpy-1.6.2/doc/source/bugs.rst +++ /dev/null @@ -1,23 +0,0 @@ -************** -Reporting bugs -************** - -File bug reports or feature requests, and make contributions -(e.g. code patches), by submitting a "ticket" on the Trac pages: - -- Numpy Trac: http://scipy.org/scipy/numpy - -Because of spam abuse, you must create an account on our Trac in order -to submit a ticket, then click on the "New Ticket" tab that only -appears when you have logged in. Please give as much information as -you can in the ticket. It is extremely useful if you can supply a -small self-contained code snippet that reproduces the problem. Also -specify the component, the version you are referring to and the -milestone. - -Report bugs to the appropriate Trac instance (there is one for NumPy -and a different one for SciPy). There are also read-only mailing lists -for tracking the status of your bug ticket. - -More information can be found on the http://scipy.org/Developer_Zone -website. diff --git a/numpy-1.6.2/doc/source/conf.py b/numpy-1.6.2/doc/source/conf.py deleted file mode 100644 index 4dfec91dba..0000000000 --- a/numpy-1.6.2/doc/source/conf.py +++ /dev/null @@ -1,269 +0,0 @@ -# -*- coding: utf-8 -*- - -import sys, os, re - -# Check Sphinx version -import sphinx -if sphinx.__version__ < "1.0.1": - raise RuntimeError("Sphinx 1.0.1 or newer required") - -needs_sphinx = '1.0' - -# ----------------------------------------------------------------------------- -# General configuration -# ----------------------------------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -sys.path.insert(0, os.path.abspath('../sphinxext')) - -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc', - 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', - 'sphinx.ext.doctest', 'sphinx.ext.autosummary', - 'plot_directive'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -#master_doc = 'index' - -# General substitutions. -project = 'NumPy' -copyright = '2008-2009, The Scipy community' - -# The default replacements for |version| and |release|, also used in various -# other places throughout the built documents. -# -import numpy -# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present) -version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__) -version = re.sub(r'(\.dev\d+).*?$', r'\1', version) -# The full version, including alpha/beta/rc tags. -release = numpy.__version__ -print version, release - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -#unused_docs = [] - -# The reST default role (used for this markup: `text`) to use for all documents. -default_role = "autolink" - -# List of directories, relative to source directories, that shouldn't be searched -# for source files. -exclude_dirs = [] - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = False - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - - -# ----------------------------------------------------------------------------- -# HTML output -# ----------------------------------------------------------------------------- - -# The style sheet to use for HTML and HTML Help pages. A file of that name -# must exist either in Sphinx' static/ path, or in one of the custom paths -# given in html_static_path. -html_style = 'scipy.css' - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -html_title = "%s v%s Manual (DRAFT)" % (project, version) - -# The name of an image file (within the static path) to place at the top of -# the sidebar. -html_logo = 'scipyshiny_small.png' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -html_sidebars = { - 'index': 'indexsidebar.html' -} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -html_additional_pages = { - 'index': 'indexcontent.html', -} - -# If false, no module index is generated. -html_use_modindex = True - -# If true, the reST sources are included in the HTML build as _sources/. -#html_copy_source = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".html"). -#html_file_suffix = '.html' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'numpy' - -# Pngmath should try to align formulas properly -pngmath_use_preview = True - - -# ----------------------------------------------------------------------------- -# LaTeX output -# ----------------------------------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, document class [howto/manual]). -_stdauthor = 'Written by the NumPy community' -latex_documents = [ - ('reference/index', 'numpy-ref.tex', 'NumPy Reference', - _stdauthor, 'manual'), - ('user/index', 'numpy-user.tex', 'NumPy User Guide', - _stdauthor, 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -latex_preamble = r''' -\usepackage{amsmath} -\DeclareUnicodeCharacter{00A0}{\nobreakspace} - -% In the parameters section, place a newline after the Parameters -% header -\usepackage{expdlist} -\let\latexdescription=\description -\def\description{\latexdescription{}{} \breaklabel} - -% Make Examples/etc section headers smaller and more compact -\makeatletter -\titleformat{\paragraph}{\normalsize\py@HeaderFamily}% - {\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor} -\titlespacing*{\paragraph}{0pt}{1ex}{0pt} -\makeatother - -% Fix footer/header -\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}} -\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}} -''' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -latex_use_modindex = False - - -# ----------------------------------------------------------------------------- -# Intersphinx configuration -# ----------------------------------------------------------------------------- -intersphinx_mapping = {'http://docs.python.org/dev': None} - - -# ----------------------------------------------------------------------------- -# Numpy extensions -# ----------------------------------------------------------------------------- - -# If we want to do a phantom import from an XML file for all autodocs -phantom_import_file = 'dump.xml' - -# Make numpydoc to generate plots for example sections -numpydoc_use_plots = True - -# ----------------------------------------------------------------------------- -# Autosummary -# ----------------------------------------------------------------------------- - -import glob -autosummary_generate = glob.glob("reference/*.rst") - -# ----------------------------------------------------------------------------- -# Coverage checker -# ----------------------------------------------------------------------------- -coverage_ignore_modules = r""" - """.split() -coverage_ignore_functions = r""" - test($|_) (some|all)true bitwise_not cumproduct pkgload - generic\. - """.split() -coverage_ignore_classes = r""" - """.split() - -coverage_c_path = [] -coverage_c_regexes = {} -coverage_ignore_c_items = {} - - -# ----------------------------------------------------------------------------- -# Plots -# ----------------------------------------------------------------------------- -plot_pre_code = """ -import numpy as np -np.random.seed(0) -""" -plot_include_source = True -plot_formats = [('png', 100), 'pdf'] - -import math -phi = (math.sqrt(5) + 1)/2 - -import matplotlib -matplotlib.rcParams.update({ - 'font.size': 8, - 'axes.titlesize': 8, - 'axes.labelsize': 8, - 'xtick.labelsize': 8, - 'ytick.labelsize': 8, - 'legend.fontsize': 8, - 'figure.figsize': (3*phi, 3), - 'figure.subplot.bottom': 0.2, - 'figure.subplot.left': 0.2, - 'figure.subplot.right': 0.9, - 'figure.subplot.top': 0.85, - 'figure.subplot.wspace': 0.4, - 'text.usetex': False, -}) diff --git a/numpy-1.6.2/doc/source/contents.rst b/numpy-1.6.2/doc/source/contents.rst deleted file mode 100644 index 04ee229b22..0000000000 --- a/numpy-1.6.2/doc/source/contents.rst +++ /dev/null @@ -1,14 +0,0 @@ -##################### -Numpy manual contents -##################### - -.. toctree:: - - user/index - reference/index - dev/index - release - about - bugs - license - glossary diff --git a/numpy-1.6.2/doc/source/dev/gitwash/branch_list.png b/numpy-1.6.2/doc/source/dev/gitwash/branch_list.png deleted file mode 100644 index 1196eb754d..0000000000 Binary files a/numpy-1.6.2/doc/source/dev/gitwash/branch_list.png and /dev/null differ diff --git a/numpy-1.6.2/doc/source/dev/gitwash/branch_list_compare.png b/numpy-1.6.2/doc/source/dev/gitwash/branch_list_compare.png deleted file mode 100644 index 336afa3746..0000000000 Binary files a/numpy-1.6.2/doc/source/dev/gitwash/branch_list_compare.png and /dev/null differ diff --git a/numpy-1.6.2/doc/source/dev/gitwash/configure_git.rst b/numpy-1.6.2/doc/source/dev/gitwash/configure_git.rst deleted file mode 100644 index 7e8cf8cbd6..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/configure_git.rst +++ /dev/null @@ -1,123 +0,0 @@ -.. _configure-git: - -================= -Git configuration -================= - -.. _git-config-basic: - -Overview -======== - -Your personal git_ configurations are saved in the ``.gitconfig`` file in -your home directory. -Here is an example ``.gitconfig`` file:: - - [user] - name = Your Name - email = you@yourdomain.example.com - - [alias] - ci = commit -a - co = checkout - st = status -a - stat = status -a - br = branch - wdiff = diff --color-words - - [core] - editor = vim - - [merge] - summary = true - -You can edit this file directly or you can use the ``git config --global`` -command:: - - git config --global user.name "Your Name" - git config --global user.email you@yourdomain.example.com - git config --global alias.ci "commit -a" - git config --global alias.co checkout - git config --global alias.st "status -a" - git config --global alias.stat "status -a" - git config --global alias.br branch - git config --global alias.wdiff "diff --color-words" - git config --global core.editor vim - git config --global merge.summary true - -To set up on another computer, you can copy your ``~/.gitconfig`` file, -or run the commands above. - -In detail -========= - -user.name and user.email ------------------------- - -It is good practice to tell git_ who you are, for labeling any changes -you make to the code. The simplest way to do this is from the command -line:: - - git config --global user.name "Your Name" - git config --global user.email you@yourdomain.example.com - -This will write the settings into your git configuration file, which -should now contain a user section with your name and email:: - - [user] - name = Your Name - email = you@yourdomain.example.com - -Of course you'll need to replace ``Your Name`` and ``you@yourdomain.example.com`` -with your actual name and email address. - -Aliases -------- - -You might well benefit from some aliases to common commands. - -For example, you might well want to be able to shorten ``git checkout`` -to ``git co``. Or you may want to alias ``git diff --color-words`` -(which gives a nicely formatted output of the diff) to ``git wdiff`` - -The following ``git config --global`` commands:: - - git config --global alias.ci "commit -a" - git config --global alias.co checkout - git config --global alias.st "status -a" - git config --global alias.stat "status -a" - git config --global alias.br branch - git config --global alias.wdiff "diff --color-words" - -will create an ``alias`` section in your ``.gitconfig`` file with contents -like this:: - - [alias] - ci = commit -a - co = checkout - st = status -a - stat = status -a - br = branch - wdiff = diff --color-words - -Editor ------- - -You may also want to make sure that your editor of choice is used :: - - git config --global core.editor vim - -Merging -------- - -To enforce summaries when doing merges (``~/.gitconfig`` file again):: - - [merge] - log = true - -Or from the command line:: - - git config --global merge.log true - - -.. include:: git_links.inc diff --git a/numpy-1.6.2/doc/source/dev/gitwash/development_setup.rst b/numpy-1.6.2/doc/source/dev/gitwash/development_setup.rst deleted file mode 100644 index 489a2e75dc..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/development_setup.rst +++ /dev/null @@ -1,113 +0,0 @@ -==================================== -Getting started with Git development -==================================== - -Basic Git setup -############### - -* :ref:`install-git`. -* Introduce yourself to Git:: - - git config --global user.email you@yourdomain.example.com - git config --global user.name "Your Name Comes Here" - -.. _forking: - -Making your own copy (fork) of NumPy -#################################### - -You need to do this only once. The instructions here are very similar -to the instructions at http://help.github.com/forking/ - please see that -page for more detail. We're repeating some of it here just to give the -specifics for the NumPy_ project, and to suggest some default names. - -Set up and configure a github_ account -====================================== - -If you don't have a github_ account, go to the github_ page, and make one. - -You then need to configure your account to allow write access - see the -``Generating SSH keys`` help on `github help`_. - -Create your own forked copy of NumPy_ -========================================= - -#. Log into your github_ account. -#. Go to the NumPy_ github home at `NumPy github`_. -#. Click on the *fork* button: - - .. image:: forking_button.png - - Now, after a short pause and some 'Hardcore forking action', you - should find yourself at the home page for your own forked copy of NumPy_. - -.. include:: git_links.inc - - -.. _set-up-fork: - -Set up your fork -################ - -First you follow the instructions for :ref:`forking`. - -Overview -======== - -:: - - git clone git@github.com:your-user-name/numpy.git - cd numpy - git remote add upstream git://github.com/numpy/numpy.git - -In detail -========= - -Clone your fork ---------------- - -#. Clone your fork to the local computer with ``git clone - git@github.com:your-user-name/numpy.git`` -#. Investigate. Change directory to your new repo: ``cd numpy``. Then - ``git branch -a`` to show you all branches. You'll get something - like:: - - * master - remotes/origin/master - - This tells you that you are currently on the ``master`` branch, and - that you also have a ``remote`` connection to ``origin/master``. - What remote repository is ``remote/origin``? Try ``git remote -v`` to - see the URLs for the remote. They will point to your github_ fork. - - Now you want to connect to the upstream `NumPy github`_ repository, so - you can merge in changes from trunk. - -.. _linking-to-upstream: - -Linking your repository to the upstream repo --------------------------------------------- - -:: - - cd numpy - git remote add upstream git://github.com/numpy/numpy.git - -``upstream`` here is just the arbitrary name we're using to refer to the -main NumPy_ repository at `NumPy github`_. - -Note that we've used ``git://`` for the URL rather than ``git@``. The -``git://`` URL is read only. This means we that we can't accidentally -(or deliberately) write to the upstream repo, and we are only going to -use it to merge into our own code. - -Just for your own satisfaction, show yourself that you now have a new -'remote', with ``git remote -v show``, giving you something like:: - - upstream git://github.com/numpy/numpy.git (fetch) - upstream git://github.com/numpy/numpy.git (push) - origin git@github.com:your-user-name/numpy.git (fetch) - origin git@github.com:your-user-name/numpy.git (push) - -.. include:: git_links.inc - diff --git a/numpy-1.6.2/doc/source/dev/gitwash/development_workflow.rst b/numpy-1.6.2/doc/source/dev/gitwash/development_workflow.rst deleted file mode 100644 index 75de6ae673..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/development_workflow.rst +++ /dev/null @@ -1,461 +0,0 @@ -.. _development-workflow: - -==================== -Development workflow -==================== - -You already have your own forked copy of the NumPy_ repository, by -following :ref:`forking`, :ref:`set-up-fork`, you have configured git_ -by following :ref:`configure-git`, and have linked the upstream -repository as explained in :ref:`linking-to-upstream`. - -What is described below is a recommended workflow with Git. - -Basic workflow -############## - -In short: - -1. Start a new *feature branch* for each set of edits that you do. - See :ref:`below `. - - Avoid putting new commits in your ``master`` branch. - -2. Hack away! See :ref:`below ` - -3. Avoid merging other branches into your feature branch while you are - working. - - You can optionally rebase if really needed, - see :ref:`below `. - -4. When finished: - - - *Contributors*: push your feature branch to your own Github repo, and - :ref:`ask for code review or make a pull request `. - - - *Core developers* (if you want to push changes without - further review):: - - # First, either (i) rebase on upstream -- if you have only few commits - git fetch upstream - git rebase upstream/master - - # or, (ii) merge to upstream -- if you have many related commits - git fetch upstream - git merge --no-ff upstream/master - - # Recheck that what is there is sensible - git log --oneline --graph - git log -p upstream/master.. - - # Finally, push branch to upstream master - git push upstream my-new-feature:master - - See :ref:`below `. - -This way of working helps to keep work well organized and the history -as clear as possible. - -.. note:: - - Do not use ``git pull`` --- this avoids common mistakes if you are - new to Git. Instead, always do ``git fetch`` followed by ``git - rebase``, ``git merge --ff-only`` or ``git merge --no-ff``, - depending on what you intend. - -.. seealso:: - - See discussions on `linux git workflow`_, - and `ipython git workflow `__. - -.. _making-a-new-feature-branch: - -Making a new feature branch -=========================== - -:: - - git branch my-new-feature - git checkout my-new-feature - -or just:: - - git checkout -b my-new-feature upstream/master - -Generally, you will want to keep this also on your public github_ fork -of NumPy_. To do this, you `git push`_ this new branch up to your github_ -repo. Generally (if you followed the instructions in these pages, and -by default), git will have a link to your github_ repo, called -``origin``. You push up to your own repo on github_ with:: - - git push origin my-new-feature - -In git >= 1.7 you can ensure that the link is correctly set by using the -``--set-upstream`` option:: - - git push --set-upstream origin my-new-feature - -From now on git_ will know that ``my-new-feature`` is related to the -``my-new-feature`` branch in your own github_ repo. - - -.. _editing-workflow: - -The editing workflow -==================== - -Overview --------- - -:: - - # hack hack - git add my_new_file - git commit -am 'ENH: some message' - - # push the branch to your own Github repo - git push - -In more detail --------------- - -#. Make some changes -#. See which files have changed with ``git status`` (see `git status`_). - You'll see a listing like this one:: - - # On branch my-new-feature - # Changed but not updated: - # (use "git add ..." to update what will be committed) - # (use "git checkout -- ..." to discard changes in working directory) - # - # modified: README - # - # Untracked files: - # (use "git add ..." to include in what will be committed) - # - # INSTALL - no changes added to commit (use "git add" and/or "git commit -a") - -#. Check what the actual changes are with ``git diff`` (`git diff`_). -#. Add any new files to version control ``git add new_file_name`` (see - `git add`_). -#. To commit all modified files into the local copy of your repo,, do - ``git commit -am 'A commit message'``. Note the ``-am`` options to - ``commit``. The ``m`` flag just signals that you're going to type a - message on the command line. The ``a`` flag - you can just take on - faith - or see `why the -a flag?`_ - and the helpful use-case description in - the `tangled working copy problem`_. The `git commit`_ manual - page might also be useful. -#. To push the changes up to your forked repo on github_, do a ``git - push`` (see `git push`). - - -.. _rebasing-on-master: - -Rebasing on master -================== - -This updates your feature branch with changes from the upstream `NumPy -github`_ repo. If you do not absolutely need to do this, try to avoid -doing it, except perhaps when you are finished. - -First, it can be useful to update your master branch:: - - # go to the master branch - git checkout master - # pull changes from github - git fetch upstream - # update the master branch - git rebase upstream/master - # push it to your Github repo - git push - -Then, the feature branch:: - - # go to the feature branch - git checkout my-new-feature - # make a backup in case you mess up - git branch tmp my-new-feature - # rebase on master - git rebase master - -If you have made changes to files that have changed also upstream, -this may generate merge conflicts that you need to resolve.:: - - # delete backup branch - git branch -D tmp - -.. _recovering-from-mess-up: - -Recovering from mess-ups ------------------------- - -Sometimes, you mess up merges or rebases. Luckily, in Git it is -relatively straightforward to recover from such mistakes. - -If you mess up during a rebase:: - - git rebase --abort - -If you notice you messed up after the rebase:: - - # reset branch back to the saved point - git reset --hard tmp - -If you forgot to make a backup branch:: - - # look at the reflog of the branch - git reflog show my-feature-branch - - 8630830 my-feature-branch@{0}: commit: BUG: io: close file handles immediately - 278dd2a my-feature-branch@{1}: rebase finished: refs/heads/my-feature-branch onto 11ee694744f2552d - 26aa21a my-feature-branch@{2}: commit: BUG: lib: make seek_gzip_factory not leak gzip obj - ... - - # reset the branch to where it was before the botched rebase - git reset --hard my-feature-branch@{2} - - -.. _asking-for-merging: - -Asking for your changes to be merged with the main repo -======================================================= - -When you feel your work is finished, you can ask for code review, or -directly file a pull request. - -Asking for code review ----------------------- - -#. Go to your repo URL - e.g. ``http://github.com/your-user-name/numpy``. -#. Click on the *Branch list* button: - - .. image:: branch_list.png - -#. Click on the *Compare* button for your feature branch - here ``my-new-feature``: - - .. image:: branch_list_compare.png - -#. If asked, select the *base* and *comparison* branch names you want to - compare. Usually these will be ``master`` and ``my-new-feature`` - (where that is your feature branch name). -#. At this point you should get a nice summary of the changes. Copy the - URL for this, and post it to the `NumPy mailing list`_, asking for - review. The URL will look something like: - ``http://github.com/your-user-name/numpy/compare/master...my-new-feature``. - There's an example at - http://github.com/matthew-brett/nipy/compare/master...find-install-data - See: http://github.com/blog/612-introducing-github-compare-view for - more detail. - -The generated comparison, is between your feature branch -``my-new-feature``, and the place in ``master`` from which you branched -``my-new-feature``. In other words, you can keep updating ``master`` -without interfering with the output from the comparison. More detail? -Note the three dots in the URL above (``master...my-new-feature``) and -see :ref:`dot2-dot3`. - -Filing a pull request ---------------------- - -When you are ready to ask for the merge of your code: - -#. Go to the URL of your forked repo, say - ``http://github.com/your-user-name/numpy.git``. -#. Click on the 'Pull request' button: - - .. image:: pull_button.png - - Enter a message; we suggest you select only ``NumPy`` as the - recipient. The message will go to the NumPy core developers. Please - feel free to add others from the list as you like. - - -.. _pushing-to-main: - -Pushing changes to the main repo -================================ - -When you have a set of "ready" changes in a feature branch ready for -Numpy's ``master`` or ``maintenance/1.5.x`` branches, you can push -them to ``upstream`` as follows: - -1. First, merge or rebase on the target branch. - - a) Only a few commits: prefer rebasing:: - - git fetch upstream - git rebase upstream/master - - See :ref:`above `. - - b) Many related commits: consider creating a merge commit:: - - git fetch upstream - git merge --no-ff upstream/master - -2. Check that what you are going to push looks sensible:: - - git log -p upstream/master.. - git log --oneline --graph - -3. Push to upstream:: - - git push upstream my-feature-branch:master - -.. note:: - - Avoid using ``git pull`` here. - -Additional things you might want to do -###################################### - -.. _rewriting-commit-history: - -Rewriting commit history -======================== - -.. note:: - - Do this only for your own feature branches. - -There's an embarassing typo in a commit you made? Or perhaps the you -made several false starts you would like the posterity not to see. - -This can be done via *interactive rebasing*. - -Suppose that the commit history looks like this:: - - git log --oneline - eadc391 Fix some remaining bugs - a815645 Modify it so that it works - 2dec1ac Fix a few bugs + disable - 13d7934 First implementation - 6ad92e5 * masked is now an instance of a new object, MaskedConstant - 29001ed Add pre-nep for a copule of structured_array_extensions. - ... - -and ``6ad92e5`` is the last commit in the ``master`` branch. Suppose we -want to make the following changes: - -* Rewrite the commit message for ``13d7934`` to something more sensible. -* Combine the commits ``2dec1ac``, ``a815645``, ``eadc391`` into a single one. - -We do as follows:: - - # make a backup of the current state - git branch tmp HEAD - # interactive rebase - git rebase -i 6ad92e5 - -This will open an editor with the following text in it:: - - pick 13d7934 First implementation - pick 2dec1ac Fix a few bugs + disable - pick a815645 Modify it so that it works - pick eadc391 Fix some remaining bugs - - # Rebase 6ad92e5..eadc391 onto 6ad92e5 - # - # Commands: - # p, pick = use commit - # r, reword = use commit, but edit the commit message - # e, edit = use commit, but stop for amending - # s, squash = use commit, but meld into previous commit - # f, fixup = like "squash", but discard this commit's log message - # - # If you remove a line here THAT COMMIT WILL BE LOST. - # However, if you remove everything, the rebase will be aborted. - # - -To achieve what we want, we will make the following changes to it:: - - r 13d7934 First implementation - pick 2dec1ac Fix a few bugs + disable - f a815645 Modify it so that it works - f eadc391 Fix some remaining bugs - -This means that (i) we want to edit the commit message for -``13d7934``, and (ii) collapse the last three commits into one. Now we -save and quit the editor. - -Git will then immediately bring up an editor for editing the commit -message. After revising it, we get the output:: - - [detached HEAD 721fc64] FOO: First implementation - 2 files changed, 199 insertions(+), 66 deletions(-) - [detached HEAD 0f22701] Fix a few bugs + disable - 1 files changed, 79 insertions(+), 61 deletions(-) - Successfully rebased and updated refs/heads/my-feature-branch. - -and the history looks now like this:: - - 0f22701 Fix a few bugs + disable - 721fc64 ENH: Sophisticated feature - 6ad92e5 * masked is now an instance of a new object, MaskedConstant - -If it went wrong, recovery is again possible as explained :ref:`above -`. - -Deleting a branch on github_ -============================ - -:: - - git checkout master - # delete branch locally - git branch -D my-unwanted-branch - # delete branch on github - git push origin :my-unwanted-branch - -(Note the colon ``:`` before ``test-branch``. See also: -http://github.com/guides/remove-a-remote-branch - - -Several people sharing a single repository -========================================== - -If you want to work on some stuff with other people, where you are all -committing into the same repository, or even the same branch, then just -share it via github_. - -First fork NumPy into your account, as from :ref:`forking`. - -Then, go to your forked repository github page, say -``http://github.com/your-user-name/numpy`` - -Click on the 'Admin' button, and add anyone else to the repo as a -collaborator: - - .. image:: pull_button.png - -Now all those people can do:: - - git clone git@githhub.com:your-user-name/numpy.git - -Remember that links starting with ``git@`` use the ssh protocol and are -read-write; links starting with ``git://`` are read-only. - -Your collaborators can then commit directly into that repo with the -usual:: - - git commit -am 'ENH - much better code' - git push origin master # pushes directly into your repo - -Exploring your repository -========================= - -To see a graphical representation of the repository branches and -commits:: - - gitk --all - -To see a linear list of commits for this branch:: - - git log - -You can also look at the `network graph visualizer`_ for your github_ -repo. - -.. include:: git_links.inc diff --git a/numpy-1.6.2/doc/source/dev/gitwash/dot2_dot3.rst b/numpy-1.6.2/doc/source/dev/gitwash/dot2_dot3.rst deleted file mode 100644 index 7759e2e60d..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/dot2_dot3.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _dot2-dot3: - -======================================== - Two and three dots in difference specs -======================================== - -Thanks to Yarik Halchenko for this explanation. - -Imagine a series of commits A, B, C, D... Imagine that there are two -branches, *topic* and *master*. You branched *topic* off *master* when -*master* was at commit 'E'. The graph of the commits looks like this:: - - - A---B---C topic - / - D---E---F---G master - -Then:: - - git diff master..topic - -will output the difference from G to C (i.e. with effects of F and G), -while:: - - git diff master...topic - -would output just differences in the topic branch (i.e. only A, B, and -C). diff --git a/numpy-1.6.2/doc/source/dev/gitwash/following_latest.rst b/numpy-1.6.2/doc/source/dev/gitwash/following_latest.rst deleted file mode 100644 index 5388ce1043..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/following_latest.rst +++ /dev/null @@ -1,37 +0,0 @@ -.. _following-latest: - -============================= - Following the latest source -============================= - -These are the instructions if you just want to follow the latest -*NumPy* source, but you don't need to do any development for now. - -The steps are: - -* :ref:`install-git` -* get local copy of the git repository from github_ -* update local copy from time to time - -Get the local copy of the code -============================== - -From the command line:: - - git clone git://github.com/numpy/numpy.git - -You now have a copy of the code tree in the new ``numpy`` directory. - -Updating the code -================= - -From time to time you may want to pull down the latest code. Do this with:: - - cd numpy - git fetch - git merge --ff-only - -The tree in ``numpy`` will now have the latest changes from the initial -repository. - -.. include:: git_links.inc diff --git a/numpy-1.6.2/doc/source/dev/gitwash/forking_button.png b/numpy-1.6.2/doc/source/dev/gitwash/forking_button.png deleted file mode 100644 index d0e04134d4..0000000000 Binary files a/numpy-1.6.2/doc/source/dev/gitwash/forking_button.png and /dev/null differ diff --git a/numpy-1.6.2/doc/source/dev/gitwash/git_development.rst b/numpy-1.6.2/doc/source/dev/gitwash/git_development.rst deleted file mode 100644 index fb997abec0..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/git_development.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. _git-development: - -===================== - Git for development -===================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - development_setup - configure_git - development_workflow - diff --git a/numpy-1.6.2/doc/source/dev/gitwash/git_intro.rst b/numpy-1.6.2/doc/source/dev/gitwash/git_intro.rst deleted file mode 100644 index 3ce322f8fd..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/git_intro.rst +++ /dev/null @@ -1,42 +0,0 @@ -============ -Introduction -============ - -These pages describe a git_ and github_ workflow for the NumPy_ -project. - -There are several different workflows here, for different ways of -working with *NumPy*. - -This is not a comprehensive git_ reference, it's just a workflow for our -own project. It's tailored to the github_ hosting service. You may well -find better or quicker ways of getting stuff done with git_, but these -should get you started. - -For general resources for learning git_ see :ref:`git-resources`. - -.. _install-git: - -Install git -=========== - -Overview --------- - -================ ============= -Debian / Ubuntu ``sudo apt-get install git-core`` -Fedora ``sudo yum install git-core`` -Windows Download and install msysGit_ -OS X Use the git-osx-installer_ -================ ============= - -In detail ---------- - -See the git_ page for the most recent information. - -Have a look at the github_ install help pages available from `github help`_ - -There are good instructions here: http://book.git-scm.com/2_installing_git.html - -.. include:: git_links.inc diff --git a/numpy-1.6.2/doc/source/dev/gitwash/git_links.inc b/numpy-1.6.2/doc/source/dev/gitwash/git_links.inc deleted file mode 100644 index e33dacc2a3..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/git_links.inc +++ /dev/null @@ -1,85 +0,0 @@ -.. This (-*- rst -*-) format file contains commonly used link targets - and name substitutions. It may be included in many files, - therefore it should only contain link targets and name - substitutions. Try grepping for "^\.\. _" to find plausible - candidates for this list. - -.. NOTE: reST targets are - __not_case_sensitive__, so only one target definition is needed for - nipy, NIPY, Nipy, etc... - -.. PROJECTNAME placeholders -.. _PROJECTNAME: http://neuroimaging.scipy.org -.. _`PROJECTNAME github`: http://github.com/nipy -.. _`PROJECTNAME mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel - -.. nipy -.. _nipy: http://nipy.org/nipy -.. _`nipy github`: http://github.com/nipy/nipy -.. _`nipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel - -.. ipython -.. _ipython: http://ipython.scipy.org -.. _`ipython github`: http://github.com/ipython/ipython -.. _`ipython mailing list`: http://mail.scipy.org/mailman/listinfo/IPython-dev - -.. dipy -.. _dipy: http://nipy.org/dipy -.. _`dipy github`: http://github.com/Garyfallidis/dipy -.. _`dipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel - -.. nibabel -.. _nibabel: http://nipy.org/nibabel -.. _`nibabel github`: http://github.com/nipy/nibabel -.. _`nibabel mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel - -.. marsbar -.. _marsbar: http://marsbar.sourceforge.net -.. _`marsbar github`: http://github.com/matthew-brett/marsbar -.. _`MarsBaR mailing list`: https://lists.sourceforge.net/lists/listinfo/marsbar-users - -.. git stuff -.. _git: http://git-scm.com/ -.. _github: http://github.com -.. _github help: http://help.github.com -.. _msysgit: http://code.google.com/p/msysgit/downloads/list -.. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list -.. _subversion: http://subversion.tigris.org/ -.. _git cheat sheet: http://github.com/guides/git-cheat-sheet -.. _pro git book: http://progit.org/ -.. _git svn crash course: http://git-scm.com/course/svn.html -.. _learn.github: http://learn.github.com/ -.. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer -.. _git user manual: http://www.kernel.org/pub/software/scm/git/docs/user-manual.html -.. _git tutorial: http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html -.. _git community book: http://book.git-scm.com/ -.. _git ready: http://www.gitready.com/ -.. _git casts: http://www.gitcasts.com/ -.. _Fernando's git page: http://www.fperez.org/py4science/git.html -.. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html -.. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/ -.. _git clone: http://www.kernel.org/pub/software/scm/git/docs/git-clone.html -.. _git checkout: http://www.kernel.org/pub/software/scm/git/docs/git-checkout.html -.. _git commit: http://www.kernel.org/pub/software/scm/git/docs/git-commit.html -.. _git push: http://www.kernel.org/pub/software/scm/git/docs/git-push.html -.. _git pull: http://www.kernel.org/pub/software/scm/git/docs/git-pull.html -.. _git add: http://www.kernel.org/pub/software/scm/git/docs/git-add.html -.. _git status: http://www.kernel.org/pub/software/scm/git/docs/git-status.html -.. _git diff: http://www.kernel.org/pub/software/scm/git/docs/git-diff.html -.. _git log: http://www.kernel.org/pub/software/scm/git/docs/git-log.html -.. _git branch: http://www.kernel.org/pub/software/scm/git/docs/git-branch.html -.. _git remote: http://www.kernel.org/pub/software/scm/git/docs/git-remote.html -.. _git config: http://www.kernel.org/pub/software/scm/git/docs/git-config.html -.. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html -.. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html -.. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git -.. _git management: http://kerneltrap.org/Linux/Git_Management -.. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html -.. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html -.. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html - -.. other stuff -.. _python: http://www.python.org -.. _NumPy: http://numpy.scipy.org -.. _`NumPy github`: http://github.com/numpy/numpy -.. _`NumPy mailing list`: http://scipy.org/Mailing_Lists diff --git a/numpy-1.6.2/doc/source/dev/gitwash/git_resources.rst b/numpy-1.6.2/doc/source/dev/gitwash/git_resources.rst deleted file mode 100644 index ae350806eb..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/git_resources.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. _git-resources: - -================ - git_ resources -================ - -Tutorials and summaries -======================= - -* `github help`_ has an excellent series of how-to guides. -* `learn.github`_ has an excellent series of tutorials -* The `pro git book`_ is a good in-depth book on git. -* A `git cheat sheet`_ is a page giving summaries of common commands. -* The `git user manual`_ -* The `git tutorial`_ -* The `git community book`_ -* `git ready`_ - a nice series of tutorials -* `git casts`_ - video snippets giving git how-tos. -* `git magic`_ - extended introduction with intermediate detail -* The `git parable`_ is an easy read explaining the concepts behind git. -* Our own `git foundation`_ expands on the `git parable`_. -* Fernando Perez' git page - `Fernando's git page`_ - many links and tips -* A good but technical page on `git concepts`_ -* `git svn crash course`_: git_ for those of us used to subversion_ - -Advanced git workflow -===================== - -There are many ways of working with git_; here are some posts on the -rules of thumb that other projects have come up with: - -* Linus Torvalds on `git management`_ -* Linus Torvalds on `linux git workflow`_ . Summary; use the git tools - to make the history of your edits as clean as possible; merge from - upstream edits as little as possible in branches where you are doing - active development. - -Manual pages online -=================== - -You can get these on your own machine with (e.g) ``git help push`` or -(same thing) ``git push --help``, but, for convenience, here are the -online manual pages for some common commands: - -* `git add`_ -* `git branch`_ -* `git checkout`_ -* `git clone`_ -* `git commit`_ -* `git config`_ -* `git diff`_ -* `git log`_ -* `git pull`_ -* `git push`_ -* `git remote`_ -* `git status`_ - -.. include:: git_links.inc diff --git a/numpy-1.6.2/doc/source/dev/gitwash/index.rst b/numpy-1.6.2/doc/source/dev/gitwash/index.rst deleted file mode 100644 index edbf4dbc3c..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _using-git: - -Working with *NumPy* source code -====================================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - git_intro - following_latest - patching - git_development - git_resources - - diff --git a/numpy-1.6.2/doc/source/dev/gitwash/patching.rst b/numpy-1.6.2/doc/source/dev/gitwash/patching.rst deleted file mode 100644 index da8b660acb..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash/patching.rst +++ /dev/null @@ -1,132 +0,0 @@ -================ - Making a patch -================ - -You've discovered a bug or something else you want to change in -NumPy_ - excellent! - -You've worked out a way to fix it - even better! - -You want to tell us about it - best of all! - -The easiest way is to make a *patch* or set of patches. Here we explain -how. Making a patch is the simplest and quickest, but if you're going -to be doing anything more than simple quick things, please consider -following the :ref:`git-development` model instead. - -.. _making-patches: - -Making patches -============== - -Overview --------- - -:: - - # tell git who you are - git config --global user.email you@yourdomain.example.com - git config --global user.name "Your Name Comes Here" - # get the repository if you don't have it - git clone git://github.com/numpy/numpy.git - # make a branch for your patching - cd numpy - git branch the-fix-im-thinking-of - git checkout the-fix-im-thinking-of - # hack, hack, hack - # Tell git about any new files you've made - git add somewhere/tests/test_my_bug.py - # commit work in progress as you go - git commit -am 'BF - added tests for Funny bug' - # hack hack, hack - git commit -am 'BF - added fix for Funny bug' - # make the patch files - git format-patch -M -C master - -Then, create a ticket in the `Numpy Trac `__, -attach the generated patch files there, and notify the `NumPy mailing list`_ -about your contribution. - -In detail ---------- - -#. Tell git_ who you are so it can label the commits you've made:: - - git config --global user.email you@yourdomain.example.com - git config --global user.name "Your Name Comes Here" - -#. If you don't already have one, clone a copy of the NumPy_ repository:: - - git clone git://github.com/numpy/numpy.git - cd numpy - -#. Make a 'feature branch'. This will be where you work on your bug - fix. It's nice and safe and leaves you with access to an unmodified - copy of the code in the main branch:: - - git branch the-fix-im-thinking-of - git checkout the-fix-im-thinking-of - -#. Do some edits, and commit them as you go:: - - # hack, hack, hack - # Tell git about any new files you've made - git add somewhere/tests/test_my_bug.py - # commit work in progress as you go - git commit -am 'BF - added tests for Funny bug' - # hack hack, hack - git commit -am 'BF - added fix for Funny bug' - - Note the ``-am`` options to ``commit``. The ``m`` flag just signals - that you're going to type a message on the command line. The ``a`` - flag - you can just take on faith - or see `why the -a flag?`_. - -#. When you have finished, check you have committed all your changes:: - - git status - -#. Finally, make your commits into patches. You want all the commits - since you branched from the ``master`` branch:: - - git format-patch -M -C master - - You will now have several files named for the commits:: - - 0001-BF-added-tests-for-Funny-bug.patch - 0002-BF-added-fix-for-Funny-bug.patch - - The recommended way to proceed is either to attach these files to - an enhancement ticket in the `Numpy Trac `__ - and send a mail about the enhancement to the `NumPy mailing list`_. - - You can also consider sending your changes via Github, see below and in - :ref:`asking-for-merging`. - -When you are done, to switch back to the main copy of the code, just -return to the ``master`` branch:: - - git checkout master - -Moving from patching to development -=================================== - -If you find you have done some patches, and you have one or more feature -branches, you will probably want to switch to development mode. You can -do this with the repository you have. - -Fork the NumPy_ repository on github_ - :ref:`forking`. Then:: - - # checkout and refresh master branch from main repo - git checkout master - git fetch origin - git merge --ff-only origin/master - # rename pointer to main repository to 'upstream' - git remote rename origin upstream - # point your repo to default read / write to your fork on github - git remote add origin git@github.com:your-user-name/numpy.git - # push up any branches you've made and want to keep - git push origin the-fix-im-thinking-of - -Then you can, if you want, follow the :ref:`development-workflow`. - -.. include:: git_links.inc diff --git a/numpy-1.6.2/doc/source/dev/gitwash/pull_button.png b/numpy-1.6.2/doc/source/dev/gitwash/pull_button.png deleted file mode 100644 index e5031681b9..0000000000 Binary files a/numpy-1.6.2/doc/source/dev/gitwash/pull_button.png and /dev/null differ diff --git a/numpy-1.6.2/doc/source/dev/gitwash_links.txt b/numpy-1.6.2/doc/source/dev/gitwash_links.txt deleted file mode 100644 index c598ba4aa8..0000000000 --- a/numpy-1.6.2/doc/source/dev/gitwash_links.txt +++ /dev/null @@ -1,3 +0,0 @@ -.. _NumPy: http://numpy.scipy.org -.. _`NumPy github`: http://github.com/numpy/numpy -.. _`NumPy mailing list`: http://scipy.org/Mailing_Lists diff --git a/numpy-1.6.2/doc/source/dev/index.rst b/numpy-1.6.2/doc/source/dev/index.rst deleted file mode 100644 index 2229f3ccb3..0000000000 --- a/numpy-1.6.2/doc/source/dev/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -##################### -Contributing to Numpy -##################### - -.. toctree:: - :maxdepth: 3 - - gitwash/index - -For core developers: see :ref:`development-workflow`. diff --git a/numpy-1.6.2/doc/source/glossary.rst b/numpy-1.6.2/doc/source/glossary.rst deleted file mode 100644 index ffa8f7368c..0000000000 --- a/numpy-1.6.2/doc/source/glossary.rst +++ /dev/null @@ -1,14 +0,0 @@ -******** -Glossary -******** - -.. toctree:: - -.. glossary:: - - .. automodule:: numpy.doc.glossary - -Jargon ------- - -.. automodule:: numpy.doc.jargon diff --git a/numpy-1.6.2/doc/source/license.rst b/numpy-1.6.2/doc/source/license.rst deleted file mode 100644 index 2b3b7ebd33..0000000000 --- a/numpy-1.6.2/doc/source/license.rst +++ /dev/null @@ -1,35 +0,0 @@ -************* -Numpy License -************* - -Copyright (c) 2005, NumPy Developers - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -* Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/numpy-1.6.2/doc/source/reference/arrays.classes.rst b/numpy-1.6.2/doc/source/reference/arrays.classes.rst deleted file mode 100644 index b35e365458..0000000000 --- a/numpy-1.6.2/doc/source/reference/arrays.classes.rst +++ /dev/null @@ -1,423 +0,0 @@ -######################### -Standard array subclasses -######################### - -.. currentmodule:: numpy - -The :class:`ndarray` in NumPy is a "new-style" Python -built-in-type. Therefore, it can be inherited from (in Python or in C) -if desired. Therefore, it can form a foundation for many useful -classes. Often whether to sub-class the array object or to simply use -the core array component as an internal part of a new class is a -difficult decision, and can be simply a matter of choice. NumPy has -several tools for simplifying how your new object interacts with other -array objects, and so the choice may not be significant in the -end. One way to simplify the question is by asking yourself if the -object you are interested in can be replaced as a single array or does -it really require two or more arrays at its core. - -Note that :func:`asarray` always returns the base-class ndarray. If -you are confident that your use of the array object can handle any -subclass of an ndarray, then :func:`asanyarray` can be used to allow -subclasses to propagate more cleanly through your subroutine. In -principal a subclass could redefine any aspect of the array and -therefore, under strict guidelines, :func:`asanyarray` would rarely be -useful. However, most subclasses of the arrayobject will not -redefine certain aspects of the array object such as the buffer -interface, or the attributes of the array. One important example, -however, of why your subroutine may not be able to handle an arbitrary -subclass of an array is that matrices redefine the "*" operator to be -matrix-multiplication, rather than element-by-element multiplication. - - -Special attributes and methods -============================== - -.. seealso:: :ref:`Subclassing ndarray ` - -Numpy provides several hooks that subclasses of :class:`ndarray` can -customize: - -.. function:: __array_finalize__(self) - - This method is called whenever the system internally allocates a - new array from *obj*, where *obj* is a subclass (subtype) of the - :class:`ndarray`. It can be used to change attributes of *self* - after construction (so as to ensure a 2-d matrix for example), or - to update meta-information from the "parent." Subclasses inherit - a default implementation of this method that does nothing. - -.. function:: __array_prepare__(array, context=None) - - At the beginning of every :ref:`ufunc `, this - method is called on the input object with the highest array - priority, or the output object if one was specified. The output - array is passed in and whatever is returned is passed to the ufunc. - Subclasses inherit a default implementation of this method which - simply returns the output array unmodified. Subclasses may opt to - use this method to transform the output array into an instance of - the subclass and update metadata before returning the array to the - ufunc for computation. - -.. function:: __array_wrap__(array, context=None) - - At the end of every :ref:`ufunc `, this method - is called on the input object with the highest array priority, or - the output object if one was specified. The ufunc-computed array - is passed in and whatever is returned is passed to the user. - Subclasses inherit a default implementation of this method, which - transforms the array into a new instance of the object's class. - Subclasses may opt to use this method to transform the output array - into an instance of the subclass and update metadata before - returning the array to the user. - -.. data:: __array_priority__ - - The value of this attribute is used to determine what type of - object to return in situations where there is more than one - possibility for the Python type of the returned object. Subclasses - inherit a default value of 1.0 for this attribute. - -.. function:: __array__([dtype]) - - If a class having the :obj:`__array__` method is used as the output - object of an :ref:`ufunc `, results will be - written to the object returned by :obj:`__array__`. - -Matrix objects -============== - -.. index:: - single: matrix - -:class:`matrix` objects inherit from the ndarray and therefore, they -have the same attributes and methods of ndarrays. There are six -important differences of matrix objects, however, that may lead to -unexpected results when you use matrices but expect them to act like -arrays: - -1. Matrix objects can be created using a string notation to allow - Matlab-style syntax where spaces separate columns and semicolons - (';') separate rows. - -2. Matrix objects are always two-dimensional. This has far-reaching - implications, in that m.ravel() is still two-dimensional (with a 1 - in the first dimension) and item selection returns two-dimensional - objects so that sequence behavior is fundamentally different than - arrays. - -3. Matrix objects over-ride multiplication to be - matrix-multiplication. **Make sure you understand this for - functions that you may want to receive matrices. Especially in - light of the fact that asanyarray(m) returns a matrix when m is - a matrix.** - -4. Matrix objects over-ride power to be matrix raised to a power. The - same warning about using power inside a function that uses - asanyarray(...) to get an array object holds for this fact. - -5. The default __array_priority\__ of matrix objects is 10.0, and - therefore mixed operations with ndarrays always produce matrices. - -6. Matrices have special attributes which make calculations easier. - These are - - .. autosummary:: - :toctree: generated/ - - matrix.T - matrix.H - matrix.I - matrix.A - -.. warning:: - - Matrix objects over-ride multiplication, '*', and power, '**', to - be matrix-multiplication and matrix power, respectively. If your - subroutine can accept sub-classes and you do not convert to base- - class arrays, then you must use the ufuncs multiply and power to - be sure that you are performing the correct operation for all - inputs. - -The matrix class is a Python subclass of the ndarray and can be used -as a reference for how to construct your own subclass of the ndarray. -Matrices can be created from other matrices, strings, and anything -else that can be converted to an ``ndarray`` . The name "mat "is an -alias for "matrix "in NumPy. - -.. autosummary:: - :toctree: generated/ - - matrix - asmatrix - bmat - -Example 1: Matrix creation from a string - ->>> a=mat('1 2 3; 4 5 3') ->>> print (a*a.T).I -[[ 0.2924 -0.1345] - [-0.1345 0.0819]] - -Example 2: Matrix creation from nested sequence - ->>> mat([[1,5,10],[1.0,3,4j]]) -matrix([[ 1.+0.j, 5.+0.j, 10.+0.j], - [ 1.+0.j, 3.+0.j, 0.+4.j]]) - -Example 3: Matrix creation from an array - ->>> mat(random.rand(3,3)).T -matrix([[ 0.7699, 0.7922, 0.3294], - [ 0.2792, 0.0101, 0.9219], - [ 0.3398, 0.7571, 0.8197]]) - -Memory-mapped file arrays -========================= - -.. index:: - single: memory maps - -.. currentmodule:: numpy - -Memory-mapped files are useful for reading and/or modifying small -segments of a large file with regular layout, without reading the -entire file into memory. A simple subclass of the ndarray uses a -memory-mapped file for the data buffer of the array. For small files, -the over-head of reading the entire file into memory is typically not -significant, however for large files using memory mapping can save -considerable resources. - -Memory-mapped-file arrays have one additional method (besides those -they inherit from the ndarray): :meth:`.flush() ` which -must be called manually by the user to ensure that any changes to the -array actually get written to disk. - -.. note:: - - Memory-mapped arrays use the the Python memory-map object which - (prior to Python 2.5) does not allow files to be larger than a - certain size depending on the platform. This size is always - < 2GB even on 64-bit systems. - -.. autosummary:: - :toctree: generated/ - - memmap - memmap.flush - -Example: - ->>> a = memmap('newfile.dat', dtype=float, mode='w+', shape=1000) ->>> a[10] = 10.0 ->>> a[30] = 30.0 ->>> del a ->>> b = fromfile('newfile.dat', dtype=float) ->>> print b[10], b[30] -10.0 30.0 ->>> a = memmap('newfile.dat', dtype=float) ->>> print a[10], a[30] -10.0 30.0 - - -Character arrays (:mod:`numpy.char`) -==================================== - -.. seealso:: :ref:`routines.array-creation.char` - -.. index:: - single: character arrays - -.. note:: - The `chararray` class exists for backwards compatibility with - Numarray, it is not recommended for new development. Starting from numpy - 1.4, if one needs arrays of strings, it is recommended to use arrays of - `dtype` `object_`, `string_` or `unicode_`, and use the free functions - in the `numpy.char` module for fast vectorized string operations. - -These are enhanced arrays of either :class:`string_` type or -:class:`unicode_` type. These arrays inherit from the -:class:`ndarray`, but specially-define the operations ``+``, ``*``, -and ``%`` on a (broadcasting) element-by-element basis. These -operations are not available on the standard :class:`ndarray` of -character type. In addition, the :class:`chararray` has all of the -standard :class:`string ` (and :class:`unicode`) methods, -executing them on an element-by-element basis. Perhaps the easiest -way to create a chararray is to use :meth:`self.view(chararray) -` where *self* is an ndarray of str or unicode -data-type. However, a chararray can also be created using the -:meth:`numpy.chararray` constructor, or via the -:func:`numpy.char.array ` function: - -.. autosummary:: - :toctree: generated/ - - chararray - core.defchararray.array - -Another difference with the standard ndarray of str data-type is -that the chararray inherits the feature introduced by Numarray that -white-space at the end of any element in the array will be ignored -on item retrieval and comparison operations. - - -.. _arrays.classes.rec: - -Record arrays (:mod:`numpy.rec`) -================================ - -.. seealso:: :ref:`routines.array-creation.rec`, :ref:`routines.dtype`, - :ref:`arrays.dtypes`. - -Numpy provides the :class:`recarray` class which allows accessing the -fields of a record/structured array as attributes, and a corresponding -scalar data type object :class:`record`. - -.. currentmodule:: numpy - -.. autosummary:: - :toctree: generated/ - - recarray - record - -Masked arrays (:mod:`numpy.ma`) -=============================== - -.. seealso:: :ref:`maskedarray` - -Standard container class -======================== - -.. currentmodule:: numpy - -For backward compatibility and as a standard "container "class, the -UserArray from Numeric has been brought over to NumPy and named -:class:`numpy.lib.user_array.container` The container class is a -Python class whose self.array attribute is an ndarray. Multiple -inheritance is probably easier with numpy.lib.user_array.container -than with the ndarray itself and so it is included by default. It is -not documented here beyond mentioning its existence because you are -encouraged to use the ndarray class directly if you can. - -.. autosummary:: - :toctree: generated/ - - numpy.lib.user_array.container - -.. index:: - single: user_array - single: container class - - -Array Iterators -=============== - -.. currentmodule:: numpy - -.. index:: - single: array iterator - -Iterators are a powerful concept for array processing. Essentially, -iterators implement a generalized for-loop. If *myiter* is an iterator -object, then the Python code:: - - for val in myiter: - ... - some code involving val - ... - -calls ``val = myiter.next()`` repeatedly until :exc:`StopIteration` is -raised by the iterator. There are several ways to iterate over an -array that may be useful: default iteration, flat iteration, and -:math:`N`-dimensional enumeration. - - -Default iteration ------------------ - -The default iterator of an ndarray object is the default Python -iterator of a sequence type. Thus, when the array object itself is -used as an iterator. The default behavior is equivalent to:: - - for i in xrange(arr.shape[0]): - val = arr[i] - -This default iterator selects a sub-array of dimension :math:`N-1` -from the array. This can be a useful construct for defining recursive -algorithms. To loop over the entire array requires :math:`N` for-loops. - ->>> a = arange(24).reshape(3,2,4)+10 ->>> for val in a: -... print 'item:', val -item: [[10 11 12 13] - [14 15 16 17]] -item: [[18 19 20 21] - [22 23 24 25]] -item: [[26 27 28 29] - [30 31 32 33]] - - -Flat iteration --------------- - -.. autosummary:: - :toctree: generated/ - - ndarray.flat - -As mentioned previously, the flat attribute of ndarray objects returns -an iterator that will cycle over the entire array in C-style -contiguous order. - ->>> for i, val in enumerate(a.flat): -... if i%5 == 0: print i, val -0 10 -5 15 -10 20 -15 25 -20 30 - -Here, I've used the built-in enumerate iterator to return the iterator -index as well as the value. - - -N-dimensional enumeration -------------------------- - -.. autosummary:: - :toctree: generated/ - - ndenumerate - -Sometimes it may be useful to get the N-dimensional index while -iterating. The ndenumerate iterator can achieve this. - ->>> for i, val in ndenumerate(a): -... if sum(i)%5 == 0: print i, val -(0, 0, 0) 10 -(1, 1, 3) 25 -(2, 0, 3) 29 -(2, 1, 2) 32 - - -Iterator for broadcasting -------------------------- - -.. autosummary:: - :toctree: generated/ - - broadcast - -The general concept of broadcasting is also available from Python -using the :class:`broadcast` iterator. This object takes :math:`N` -objects as inputs and returns an iterator that returns tuples -providing each of the input sequence elements in the broadcasted -result. - ->>> for val in broadcast([[1,0],[2,3]],[0,1]): -... print val -(1, 0) -(0, 1) -(2, 0) -(3, 1) diff --git a/numpy-1.6.2/doc/source/reference/arrays.dtypes.rst b/numpy-1.6.2/doc/source/reference/arrays.dtypes.rst deleted file mode 100644 index c89a282f40..0000000000 --- a/numpy-1.6.2/doc/source/reference/arrays.dtypes.rst +++ /dev/null @@ -1,512 +0,0 @@ -.. currentmodule:: numpy - -.. _arrays.dtypes: - -********************************** -Data type objects (:class:`dtype`) -********************************** - -A data type object (an instance of :class:`numpy.dtype` class) -describes how the bytes in the fixed-size block of memory -corresponding to an array item should be interpreted. It describes the -following aspects of the data: - -1. Type of the data (integer, float, Python object, etc.) -2. Size of the data (how many bytes is in *e.g.* the integer) -3. Byte order of the data (:term:`little-endian` or :term:`big-endian`) -4. If the data type is a :term:`record`, an aggregate of other - data types, (*e.g.*, describing an array item consisting of - an integer and a float), - - 1. what are the names of the ":term:`fields `" of the record, - by which they can be :ref:`accessed `, - 2. what is the data-type of each :term:`field`, and - 3. which part of the memory block each field takes. - -5. If the data is a sub-array, what is its shape and data type. - -.. index:: - pair: dtype; scalar - -To describe the type of scalar data, there are several :ref:`built-in -scalar types ` in Numpy for various precision -of integers, floating-point numbers, *etc*. An item extracted from an -array, *e.g.*, by indexing, will be a Python object whose type is the -scalar type associated with the data type of the array. - -Note that the scalar types are not :class:`dtype` objects, even though -they can be used in place of one whenever a data type specification is -needed in Numpy. - -.. index:: - pair: dtype; field - pair: dtype; record - -Record data types are formed by creating a data type whose -:term:`fields` contain other data types. Each field has a name by -which it can be :ref:`accessed `. The parent data -type should be of sufficient size to contain all its fields; the -parent can for example be based on the :class:`void` type which allows -an arbitrary item size. Record data types may also contain other record -types and fixed-size sub-array data types in their fields. - -.. index:: - pair: dtype; sub-array - -Finally, a data type can describe items that are themselves arrays of -items of another data type. These sub-arrays must, however, be of a -fixed size. If an array is created using a data-type describing a -sub-array, the dimensions of the sub-array are appended to the shape -of the array when the array is created. Sub-arrays in a field of a -record behave differently, see :ref:`arrays.indexing.rec`. - -.. admonition:: Example - - A simple data type containing a 32-bit big-endian integer: - (see :ref:`arrays.dtypes.constructing` for details on construction) - - >>> dt = np.dtype('>i4') - >>> dt.byteorder - '>' - >>> dt.itemsize - 4 - >>> dt.name - 'int32' - >>> dt.type is np.int32 - True - - The corresponding array scalar type is :class:`int32`. - -.. admonition:: Example - - A record data type containing a 16-character string (in field 'name') - and a sub-array of two 64-bit floating-point number (in field 'grades'): - - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt['name'] - dtype('|S16') - >>> dt['grades'] - dtype(('float64',(2,))) - - Items of an array of this data type are wrapped in an :ref:`array - scalar ` type that also has two fields: - - >>> x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) - >>> x[1] - ('John', [6.0, 7.0]) - >>> x[1]['grades'] - array([ 6., 7.]) - >>> type(x[1]) - - >>> type(x[1]['grades']) - - -.. _arrays.dtypes.constructing: - -Specifying and constructing data types -====================================== - -Whenever a data-type is required in a NumPy function or method, either -a :class:`dtype` object or something that can be converted to one can -be supplied. Such conversions are done by the :class:`dtype` -constructor: - -.. autosummary:: - :toctree: generated/ - - dtype - -What can be converted to a data-type object is described below: - -:class:`dtype` object - - .. index:: - triple: dtype; construction; from dtype - - Used as-is. - -:const:`None` - - .. index:: - triple: dtype; construction; from None - - The default data type: :class:`float_`. - -.. index:: - triple: dtype; construction; from type - -Array-scalar types - - The 24 built-in :ref:`array scalar type objects - ` all convert to an associated data-type object. - This is true for their sub-classes as well. - - Note that not all data-type information can be supplied with a - type-object: for example, :term:`flexible` data-types have - a default *itemsize* of 0, and require an explicitly given size - to be useful. - - .. admonition:: Example - - >>> dt = np.dtype(np.int32) # 32-bit integer - >>> dt = np.dtype(np.complex128) # 128-bit complex floating-point number - -Generic types - - The generic hierarchical type objects convert to corresponding - type objects according to the associations: - - ===================================================== =============== - :class:`number`, :class:`inexact`, :class:`floating` :class:`float` - :class:`complexfloating` :class:`cfloat` - :class:`integer`, :class:`signedinteger` :class:`int\_` - :class:`unsignedinteger` :class:`uint` - :class:`character` :class:`string` - :class:`generic`, :class:`flexible` :class:`void` - ===================================================== =============== - -Built-in Python types - - Several python types are equivalent to a corresponding - array scalar when used to generate a :class:`dtype` object: - - ================ =============== - :class:`int` :class:`int\_` - :class:`bool` :class:`bool\_` - :class:`float` :class:`float\_` - :class:`complex` :class:`cfloat` - :class:`str` :class:`string` - :class:`unicode` :class:`unicode\_` - :class:`buffer` :class:`void` - (all others) :class:`object_` - ================ =============== - - .. admonition:: Example - - >>> dt = np.dtype(float) # Python-compatible floating-point number - >>> dt = np.dtype(int) # Python-compatible integer - >>> dt = np.dtype(object) # Python object - -Types with ``.dtype`` - - Any type object with a ``dtype`` attribute: The attribute will be - accessed and used directly. The attribute must return something - that is convertible into a dtype object. - -.. index:: - triple: dtype; construction; from string - -Several kinds of strings can be converted. Recognized strings can be -prepended with ``'>'`` (:term:`big-endian`), ``'<'`` -(:term:`little-endian`), or ``'='`` (hardware-native, the default), to -specify the byte order. - -One-character strings - - Each built-in data-type has a character code - (the updated Numeric typecodes), that uniquely identifies it. - - .. admonition:: Example - - >>> dt = np.dtype('b') # byte, native byte order - >>> dt = np.dtype('>H') # big-endian unsigned short - >>> dt = np.dtype('>> dt = np.dtype('d') # double-precision floating-point number - -Array-protocol type strings (see :ref:`arrays.interface`) - - The first character specifies the kind of data and the remaining - characters specify how many bytes of data. The supported kinds are - - ================ ======================== - ``'b'`` Boolean - ``'i'`` (signed) integer - ``'u'`` unsigned integer - ``'f'`` floating-point - ``'c'`` complex-floating point - ``'S'``, ``'a'`` string - ``'U'`` unicode - ``'V'`` anything (:class:`void`) - ================ ======================== - - .. admonition:: Example - - >>> dt = np.dtype('i4') # 32-bit signed integer - >>> dt = np.dtype('f8') # 64-bit floating-point number - >>> dt = np.dtype('c16') # 128-bit complex floating-point number - >>> dt = np.dtype('a25') # 25-character string - -String with comma-separated fields - - Numarray introduced a short-hand notation for specifying the format - of a record as a comma-separated string of basic formats. - - A basic format in this context is an optional shape specifier - followed by an array-protocol type string. Parenthesis are required - on the shape if it is greater than 1-d. NumPy allows a modification - on the format in that any string that can uniquely identify the - type can be used to specify the data-type in a field. - The generated data-type fields are named ``'f0'``, ``'f1'``, ..., - ``'f'`` where N (>1) is the number of comma-separated basic - formats in the string. If the optional shape specifier is provided, - then the data-type for the corresponding field describes a sub-array. - - .. admonition:: Example - - - field named ``f0`` containing a 32-bit integer - - field named ``f1`` containing a 2 x 3 sub-array - of 64-bit floating-point numbers - - field named ``f2`` containing a 32-bit floating-point number - - >>> dt = np.dtype("i4, (2,3)f8, f4") - - - field named ``f0`` containing a 3-character string - - field named ``f1`` containing a sub-array of shape (3,) - containing 64-bit unsigned integers - - field named ``f2`` containing a 3 x 4 sub-array - containing 10-character strings - - >>> dt = np.dtype("a3, 3u8, (3,4)a10") - -Type strings - - Any string in :obj:`numpy.sctypeDict`.keys(): - - .. admonition:: Example - - >>> dt = np.dtype('uint32') # 32-bit unsigned integer - >>> dt = np.dtype('Float64') # 64-bit floating-point number - -.. index:: - triple: dtype; construction; from tuple - -``(flexible_dtype, itemsize)`` - - The first argument must be an object that is converted to a - flexible data-type object (one whose element size is 0), the - second argument is an integer providing the desired itemsize. - - .. admonition:: Example - - >>> dt = np.dtype((void, 10)) # 10-byte wide data block - >>> dt = np.dtype((str, 35)) # 35-character string - >>> dt = np.dtype(('U', 10)) # 10-character unicode string - -``(fixed_dtype, shape)`` - - .. index:: - pair: dtype; sub-array - - The first argument is any object that can be converted into a - fixed-size data-type object. The second argument is the desired - shape of this type. If the shape parameter is 1, then the - data-type object is equivalent to fixed dtype. If *shape* is a - tuple, then the new dtype defines a sub-array of the given shape. - - .. admonition:: Example - - >>> dt = np.dtype((np.int32, (2,2))) # 2 x 2 integer sub-array - >>> dt = np.dtype(('S10', 1)) # 10-character string - >>> dt = np.dtype(('i4, (2,3)f8, f4', (2,3))) # 2 x 3 record sub-array - -``(base_dtype, new_dtype)`` - - Both arguments must be convertible to data-type objects in this - case. The *base_dtype* is the data-type object that the new - data-type builds on. This is how you could assign named fields to - any built-in data-type object. - - .. admonition:: Example - - 32-bit integer, whose first two bytes are interpreted as an integer - via field ``real``, and the following two bytes via field ``imag``. - - >>> dt = np.dtype((np.int32,{'real':(np.int16, 0),'imag':(np.int16, 2)}) - - 32-bit integer, which is interpreted as consisting of a sub-array - of shape ``(4,)`` containing 8-bit integers: - - >>> dt = np.dtype((np.int32, (np.int8, 4))) - - 32-bit integer, containing fields ``r``, ``g``, ``b``, ``a`` that - interpret the 4 bytes in the integer as four unsigned integers: - - >>> dt = np.dtype(('i4', [('r','u1'),('g','u1'),('b','u1'),('a','u1')])) - -.. index:: - triple: dtype; construction; from list - -``[(field_name, field_dtype, field_shape), ...]`` - - *obj* should be a list of fields where each field is described by a - tuple of length 2 or 3. (Equivalent to the ``descr`` item in the - :obj:`__array_interface__` attribute.) - - The first element, *field_name*, is the field name (if this is - ``''`` then a standard field name, ``'f#'``, is assigned). The - field name may also be a 2-tuple of strings where the first string - is either a "title" (which may be any string or unicode string) or - meta-data for the field which can be any object, and the second - string is the "name" which must be a valid Python identifier. - - The second element, *field_dtype*, can be anything that can be - interpreted as a data-type. - - The optional third element *field_shape* contains the shape if this - field represents an array of the data-type in the second - element. Note that a 3-tuple with a third argument equal to 1 is - equivalent to a 2-tuple. - - This style does not accept *align* in the :class:`dtype` - constructor as it is assumed that all of the memory is accounted - for by the array interface description. - - .. admonition:: Example - - Data-type with fields ``big`` (big-endian 32-bit integer) and - ``little`` (little-endian 32-bit integer): - - >>> dt = np.dtype([('big', '>i4'), ('little', '>> dt = np.dtype([('R','u1'), ('G','u1'), ('B','u1'), ('A','u1')]) - -.. index:: - triple: dtype; construction; from dict - -``{'names': ..., 'formats': ..., 'offsets': ..., 'titles': ...}`` - - This style has two required and two optional keys. The *names* - and *formats* keys are required. Their respective values are - equal-length lists with the field names and the field formats. - The field names must be strings and the field formats can be any - object accepted by :class:`dtype` constructor. - - The optional keys in the dictionary are *offsets* and *titles* and - their values must each be lists of the same length as the *names* - and *formats* lists. The *offsets* value is a list of byte offsets - (integers) for each field, while the *titles* value is a list of - titles for each field (:const:`None` can be used if no title is - desired for that field). The *titles* can be any :class:`string` - or :class:`unicode` object and will add another entry to the - fields dictionary keyed by the title and referencing the same - field tuple which will contain the title as an additional tuple - member. - - .. admonition:: Example - - Data type with fields ``r``, ``g``, ``b``, ``a``, each being - a 8-bit unsigned integer: - - >>> dt = np.dtype({'names': ['r','g','b','a'], - ... 'formats': [uint8, uint8, uint8, uint8]}) - - Data type with fields ``r`` and ``b`` (with the given titles), - both being 8-bit unsigned integers, the first at byte position - 0 from the start of the field and the second at position 2: - - >>> dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], - ... 'offsets': [0, 2], - ... 'titles': ['Red pixel', 'Blue pixel']}) - - -``{'field1': ..., 'field2': ..., ...}`` - - This style allows passing in the :attr:`fields ` - attribute of a data-type object. - - *obj* should contain string or unicode keys that refer to - ``(data-type, offset)`` or ``(data-type, offset, title)`` tuples. - - .. admonition:: Example - - Data type containing field ``col1`` (10-character string at - byte position 0), ``col2`` (32-bit float at byte position 10), - and ``col3`` (integers at byte position 14): - - >>> dt = np.dtype({'col1': ('S10', 0), 'col2': (float32, 10), - 'col3': (int, 14)}) - - -:class:`dtype` -============== - -Numpy data type descriptions are instances of the :class:`dtype` class. - -Attributes ----------- - -The type of the data is described by the following :class:`dtype` attributes: - -.. autosummary:: - :toctree: generated/ - - dtype.type - dtype.kind - dtype.char - dtype.num - dtype.str - -Size of the data is in turn described by: - -.. autosummary:: - :toctree: generated/ - - dtype.name - dtype.itemsize - -Endianness of this data: - -.. autosummary:: - :toctree: generated/ - - dtype.byteorder - -Information about sub-data-types in a :term:`record`: - -.. autosummary:: - :toctree: generated/ - - dtype.fields - dtype.names - -For data types that describe sub-arrays: - -.. autosummary:: - :toctree: generated/ - - dtype.subdtype - dtype.shape - -Attributes providing additional information: - -.. autosummary:: - :toctree: generated/ - - dtype.hasobject - dtype.flags - dtype.isbuiltin - dtype.isnative - dtype.descr - dtype.alignment - - -Methods -------- - -Data types have the following method for changing the byte order: - -.. autosummary:: - :toctree: generated/ - - dtype.newbyteorder - -The following methods implement the pickle protocol: - -.. autosummary:: - :toctree: generated/ - - dtype.__reduce__ - dtype.__setstate__ diff --git a/numpy-1.6.2/doc/source/reference/arrays.indexing.rst b/numpy-1.6.2/doc/source/reference/arrays.indexing.rst deleted file mode 100644 index 8da4ecca7d..0000000000 --- a/numpy-1.6.2/doc/source/reference/arrays.indexing.rst +++ /dev/null @@ -1,368 +0,0 @@ -.. _arrays.indexing: - -Indexing -======== - -.. sectionauthor:: adapted from "Guide to Numpy" by Travis E. Oliphant - -.. currentmodule:: numpy - -.. index:: indexing, slicing - -:class:`ndarrays ` can be indexed using the standard Python -``x[obj]`` syntax, where *x* is the array and *obj* the selection. -There are three kinds of indexing available: record access, basic -slicing, advanced indexing. Which one occurs depends on *obj*. - -.. note:: - - In Python, ``x[(exp1, exp2, ..., expN)]`` is equivalent to - ``x[exp1, exp2, ..., expN]``; the latter is just syntactic sugar - for the former. - - -Basic Slicing -------------- - -Basic slicing extends Python's basic concept of slicing to N -dimensions. Basic slicing occurs when *obj* is a :class:`slice` object -(constructed by ``start:stop:step`` notation inside of brackets), an -integer, or a tuple of slice objects and integers. :const:`Ellipsis` -and :const:`newaxis` objects can be interspersed with these as -well. In order to remain backward compatible with a common usage in -Numeric, basic slicing is also initiated if the selection object is -any sequence (such as a :class:`list`) containing :class:`slice` -objects, the :const:`Ellipsis` object, or the :const:`newaxis` object, -but no integer arrays or other embedded sequences. - -.. index:: - triple: ndarray; special methods; getslice - triple: ndarray; special methods; setslice - single: ellipsis - single: newaxis - -The simplest case of indexing with *N* integers returns an :ref:`array -scalar ` representing the corresponding item. As in -Python, all indices are zero-based: for the *i*-th index :math:`n_i`, -the valid range is :math:`0 \le n_i < d_i` where :math:`d_i` is the -*i*-th element of the shape of the array. Negative indices are -interpreted as counting from the end of the array (*i.e.*, if *i < 0*, -it means :math:`n_i + i`). - - -All arrays generated by basic slicing are always :term:`views ` -of the original array. - -The standard rules of sequence slicing apply to basic slicing on a -per-dimension basis (including using a step index). Some useful -concepts to remember include: - -- The basic slice syntax is ``i:j:k`` where *i* is the starting index, - *j* is the stopping index, and *k* is the step (:math:`k\neq0`). - This selects the *m* elements (in the corresponding dimension) with - index values *i*, *i + k*, ..., *i + (m - 1) k* where - :math:`m = q + (r\neq0)` and *q* and *r* are the quotient and remainder - obtained by dividing *j - i* by *k*: *j - i = q k + r*, so that - *i + (m - 1) k < j*. - - .. admonition:: Example - - >>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> x[1:7:2] - array([1, 3, 5]) - -- Negative *i* and *j* are interpreted as *n + i* and *n + j* where - *n* is the number of elements in the corresponding dimension. - Negative *k* makes stepping go towards smaller indices. - - .. admonition:: Example - - >>> x[-2:10] - array([8, 9]) - >>> x[-3:3:-1] - array([7, 6, 5, 4]) - -- Assume *n* is the number of elements in the dimension being - sliced. Then, if *i* is not given it defaults to 0 for *k > 0* and - *n* for *k < 0* . If *j* is not given it defaults to *n* for *k > 0* - and -1 for *k < 0* . If *k* is not given it defaults to 1. Note that - ``::`` is the same as ``:`` and means select all indices along this - axis. - - .. admonition:: Example - - >>> x[5:] - array([5, 6, 7, 8, 9]) - -- If the number of objects in the selection tuple is less than - *N* , then ``:`` is assumed for any subsequent dimensions. - - .. admonition:: Example - - >>> x = np.array([[[1],[2],[3]], [[4],[5],[6]]]) - >>> x.shape - (2, 3, 1) - >>> x[1:2] - array([[[4], - [5], - [6]]]) - -- :const:`Ellipsis` expand to the number of ``:`` objects needed to - make a selection tuple of the same length as ``x.ndim``. Only the - first ellipsis is expanded, any others are interpreted as ``:``. - - .. admonition:: Example - - >>> x[...,0] - array([[1, 2, 3], - [4, 5, 6]]) - -- Each :const:`newaxis` object in the selection tuple serves to expand - the dimensions of the resulting selection by one unit-length - dimension. The added dimension is the position of the :const:`newaxis` - object in the selection tuple. - - .. admonition:: Example - - >>> x[:,np.newaxis,:,:].shape - (2, 1, 3, 1) - -- An integer, *i*, returns the same values as ``i:i+1`` - **except** the dimensionality of the returned object is reduced by - 1. In particular, a selection tuple with the *p*-th - element an integer (and all other entries ``:``) returns the - corresponding sub-array with dimension *N - 1*. If *N = 1* - then the returned object is an array scalar. These objects are - explained in :ref:`arrays.scalars`. - -- If the selection tuple has all entries ``:`` except the - *p*-th entry which is a slice object ``i:j:k``, - then the returned array has dimension *N* formed by - concatenating the sub-arrays returned by integer indexing of - elements *i*, *i+k*, ..., *i + (m - 1) k < j*, - -- Basic slicing with more than one non-``:`` entry in the slicing - tuple, acts like repeated application of slicing using a single - non-``:`` entry, where the non-``:`` entries are successively taken - (with all other non-``:`` entries replaced by ``:``). Thus, - ``x[ind1,...,ind2,:]`` acts like ``x[ind1][...,ind2,:]`` under basic - slicing. - - .. warning:: The above is **not** true for advanced slicing. - -- You may use slicing to set values in the array, but (unlike lists) you - can never grow the array. The size of the value to be set in - ``x[obj] = value`` must be (broadcastable) to the same shape as - ``x[obj]``. - -.. index:: - pair: ndarray; view - -.. note:: - - Remember that a slicing tuple can always be constructed as *obj* - and used in the ``x[obj]`` notation. Slice objects can be used in - the construction in place of the ``[start:stop:step]`` - notation. For example, ``x[1:10:5,::-1]`` can also be implemented - as ``obj = (slice(1,10,5), slice(None,None,-1)); x[obj]`` . This - can be useful for constructing generic code that works on arrays - of arbitrary dimension. - -.. data:: newaxis - - The :const:`newaxis` object can be used in the basic slicing syntax - discussed above. :const:`None` can also be used instead of - :const:`newaxis`. - - -Advanced indexing ------------------ - -Advanced indexing is triggered when the selection object, *obj*, is a -non-tuple sequence object, an :class:`ndarray` (of data type integer or bool), -or a tuple with at least one sequence object or ndarray (of data type -integer or bool). There are two types of advanced indexing: integer -and Boolean. - -Advanced indexing always returns a *copy* of the data (contrast with -basic slicing that returns a :term:`view`). - -Integer -^^^^^^^ - -Integer indexing allows selection of arbitrary items in the array -based on their *N*-dimensional index. This kind of selection occurs -when advanced indexing is triggered and the selection object is not -an array of data type bool. For the discussion below, when the -selection object is not a tuple, it will be referred to as if it had -been promoted to a 1-tuple, which will be called the selection -tuple. The rules of advanced integer-style indexing are: - -- If the length of the selection tuple is larger than *N* an error is raised. - -- All sequences and scalars in the selection tuple are converted to - :class:`intp` indexing arrays. - -- All selection tuple objects must be convertible to :class:`intp` - arrays, :class:`slice` objects, or the :const:`Ellipsis` object. - -- The first :const:`Ellipsis` object will be expanded, and any other - :const:`Ellipsis` objects will be treated as full slice (``:``) - objects. The expanded :const:`Ellipsis` object is replaced with as - many full slice (``:``) objects as needed to make the length of the - selection tuple :math:`N`. - -- If the selection tuple is smaller than *N*, then as many ``:`` - objects as needed are added to the end of the selection tuple so - that the modified selection tuple has length *N*. - -- All the integer indexing arrays must be :ref:`broadcastable - ` to the same shape. - -- The shape of the output (or the needed shape of the object to be used - for setting) is the broadcasted shape. - -- After expanding any ellipses and filling out any missing ``:`` - objects in the selection tuple, then let :math:`N_t` be the number - of indexing arrays, and let :math:`N_s = N - N_t` be the number of - slice objects. Note that :math:`N_t > 0` (or we wouldn't be doing - advanced integer indexing). - -- If :math:`N_s = 0` then the *M*-dimensional result is constructed by - varying the index tuple ``(i_1, ..., i_M)`` over the range - of the result shape and for each value of the index tuple - ``(ind_1, ..., ind_M)``:: - - result[i_1, ..., i_M] == x[ind_1[i_1, ..., i_M], ind_2[i_1, ..., i_M], - ..., ind_N[i_1, ..., i_M]] - - .. admonition:: Example - - Suppose the shape of the broadcasted indexing arrays is 3-dimensional - and *N* is 2. Then the result is found by letting *i, j, k* run over - the shape found by broadcasting ``ind_1`` and ``ind_2``, and each - *i, j, k* yields:: - - result[i,j,k] = x[ind_1[i,j,k], ind_2[i,j,k]] - -- If :math:`N_s > 0`, then partial indexing is done. This can be - somewhat mind-boggling to understand, but if you think in terms of - the shapes of the arrays involved, it can be easier to grasp what - happens. In simple cases (*i.e.* one indexing array and *N - 1* slice - objects) it does exactly what you would expect (concatenation of - repeated application of basic slicing). The rule for partial - indexing is that the shape of the result (or the interpreted shape - of the object to be used in setting) is the shape of *x* with the - indexed subspace replaced with the broadcasted indexing subspace. If - the index subspaces are right next to each other, then the - broadcasted indexing space directly replaces all of the indexed - subspaces in *x*. If the indexing subspaces are separated (by slice - objects), then the broadcasted indexing space is first, followed by - the sliced subspace of *x*. - - .. admonition:: Example - - Suppose ``x.shape`` is (10,20,30) and ``ind`` is a (2,3,4)-shaped - indexing :class:`intp` array, then ``result = x[...,ind,:]`` has - shape (10,2,3,4,30) because the (20,)-shaped subspace has been - replaced with a (2,3,4)-shaped broadcasted indexing subspace. If - we let *i, j, k* loop over the (2,3,4)-shaped subspace then - ``result[...,i,j,k,:] = x[...,ind[i,j,k],:]``. This example - produces the same result as :meth:`x.take(ind, axis=-2) `. - - .. admonition:: Example - - Now let ``x.shape`` be (10,20,30,40,50) and suppose ``ind_1`` - and ``ind_2`` are broadcastable to the shape (2,3,4). Then - ``x[:,ind_1,ind_2]`` has shape (10,2,3,4,40,50) because the - (20,30)-shaped subspace from X has been replaced with the - (2,3,4) subspace from the indices. However, - ``x[:,ind_1,:,ind_2]`` has shape (2,3,4,10,30,50) because there - is no unambiguous place to drop in the indexing subspace, thus - it is tacked-on to the beginning. It is always possible to use - :meth:`.transpose() ` to move the subspace - anywhere desired. (Note that this example cannot be replicated - using :func:`take`.) - - -Boolean -^^^^^^^ - -This advanced indexing occurs when obj is an array object of Boolean -type (such as may be returned from comparison operators). It is always -equivalent to (but faster than) ``x[obj.nonzero()]`` where, as -described above, :meth:`obj.nonzero() ` returns a -tuple (of length :attr:`obj.ndim `) of integer index -arrays showing the :const:`True` elements of *obj*. - -The special case when ``obj.ndim == x.ndim`` is worth mentioning. In -this case ``x[obj]`` returns a 1-dimensional array filled with the -elements of *x* corresponding to the :const:`True` values of *obj*. -The search order will be C-style (last index varies the fastest). If -*obj* has :const:`True` values at entries that are outside of the -bounds of *x*, then an index error will be raised. - -You can also use Boolean arrays as element of the selection tuple. In -such instances, they will always be interpreted as :meth:`nonzero(obj) -` and the equivalent integer indexing will be -done. - -.. warning:: - - The definition of advanced indexing means that ``x[(1,2,3),]`` is - fundamentally different than ``x[(1,2,3)]``. The latter is - equivalent to ``x[1,2,3]`` which will trigger basic selection while - the former will trigger advanced indexing. Be sure to understand - why this is occurs. - - Also recognize that ``x[[1,2,3]]`` will trigger advanced indexing, - whereas ``x[[1,2,slice(None)]]`` will trigger basic slicing. - -.. _arrays.indexing.rec: - -Record Access -------------- - -.. seealso:: :ref:`arrays.dtypes`, :ref:`arrays.scalars` - -If the :class:`ndarray` object is a record array, *i.e.* its data type -is a :term:`record` data type, the :term:`fields ` of the array -can be accessed by indexing the array with strings, dictionary-like. - -Indexing ``x['field-name']`` returns a new :term:`view` to the array, -which is of the same shape as *x* (except when the field is a -sub-array) but of data type ``x.dtype['field-name']`` and contains -only the part of the data in the specified field. Also record array -scalars can be "indexed" this way. - -If the accessed field is a sub-array, the dimensions of the sub-array -are appended to the shape of the result. - -.. admonition:: Example - - >>> x = np.zeros((2,2), dtype=[('a', np.int32), ('b', np.float64, (3,3))]) - >>> x['a'].shape - (2, 2) - >>> x['a'].dtype - dtype('int32') - >>> x['b'].shape - (2, 2, 3, 3) - >>> x['b'].dtype - dtype('float64') - - -Flat Iterator indexing ----------------------- - -:attr:`x.flat ` returns an iterator that will iterate -over the entire array (in C-contiguous style with the last index -varying the fastest). This iterator object can also be indexed using -basic slicing or advanced indexing as long as the selection object is -not a tuple. This should be clear from the fact that :attr:`x.flat -` is a 1-dimensional view. It can be used for integer -indexing with 1-dimensional C-style-flat indices. The shape of any -returned array is therefore the shape of the integer indexing object. - -.. index:: - single: indexing - single: ndarray diff --git a/numpy-1.6.2/doc/source/reference/arrays.interface.rst b/numpy-1.6.2/doc/source/reference/arrays.interface.rst deleted file mode 100644 index 87ba15a9f2..0000000000 --- a/numpy-1.6.2/doc/source/reference/arrays.interface.rst +++ /dev/null @@ -1,336 +0,0 @@ -.. index:: - pair: array; interface - pair: array; protocol - -.. _arrays.interface: - -******************* -The Array Interface -******************* - -.. note:: - - This page describes the numpy-specific API for accessing the contents of - a numpy array from other C extensions. :pep:`3118` -- - :cfunc:`The Revised Buffer Protocol ` introduces - similar, standardized API to Python 2.6 and 3.0 for any extension - module to use. Cython__'s buffer array support - uses the :pep:`3118` API; see the `Cython numpy - tutorial`__. Cython provides a way to write code that supports the buffer - protocol with Python versions older than 2.6 because it has a - backward-compatible implementation utilizing the legacy array interface - described here. - -__ http://cython.org/ -__ http://wiki.cython.org/tutorials/numpy - -:version: 3 - -The array interface (sometimes called array protocol) was created in -2005 as a means for array-like Python objects to re-use each other's -data buffers intelligently whenever possible. The homogeneous -N-dimensional array interface is a default mechanism for objects to -share N-dimensional array memory and information. The interface -consists of a Python-side and a C-side using two attributes. Objects -wishing to be considered an N-dimensional array in application code -should support at least one of these attributes. Objects wishing to -support an N-dimensional array in application code should look for at -least one of these attributes and use the information provided -appropriately. - -This interface describes homogeneous arrays in the sense that each -item of the array has the same "type". This type can be very simple -or it can be a quite arbitrary and complicated C-like structure. - -There are two ways to use the interface: A Python side and a C-side. -Both are separate attributes. - -Python side -=========== - -This approach to the interface consists of the object having an -:data:`__array_interface__` attribute. - -.. data:: __array_interface__ - - A dictionary of items (3 required and 5 optional). The optional - keys in the dictionary have implied defaults if they are not - provided. - - The keys are: - - **shape** (required) - - Tuple whose elements are the array size in each dimension. Each - entry is an integer (a Python int or long). Note that these - integers could be larger than the platform "int" or "long" - could hold (a Python int is a C long). It is up to the code - using this attribute to handle this appropriately; either by - raising an error when overflow is possible, or by using - :cdata:`Py_LONG_LONG` as the C type for the shapes. - - **typestr** (required) - - A string providing the basic type of the homogenous array The - basic string format consists of 3 parts: a character describing - the byteorder of the data (``<``: little-endian, ``>``: - big-endian, ``|``: not-relevant), a character code giving the - basic type of the array, and an integer providing the number of - bytes the type uses. - - The basic type character codes are: - - ===== ================================================================ - ``t`` Bit field (following integer gives the number of - bits in the bit field). - ``b`` Boolean (integer type where all values are only True or False) - ``i`` Integer - ``u`` Unsigned integer - ``f`` Floating point - ``c`` Complex floating point - ``O`` Object (i.e. the memory contains a pointer to :ctype:`PyObject`) - ``S`` String (fixed-length sequence of char) - ``U`` Unicode (fixed-length sequence of :ctype:`Py_UNICODE`) - ``V`` Other (void \* -- each item is a fixed-size chunk of memory) - ===== ================================================================ - - **descr** (optional) - - A list of tuples providing a more detailed description of the - memory layout for each item in the homogeneous array. Each - tuple in the list has two or three elements. Normally, this - attribute would be used when *typestr* is ``V[0-9]+``, but this is - not a requirement. The only requirement is that the number of - bytes represented in the *typestr* key is the same as the total - number of bytes represented here. The idea is to support - descriptions of C-like structs (records) that make up array - elements. The elements of each tuple in the list are - - 1. A string providing a name associated with this portion of - the record. This could also be a tuple of ``('full name', - 'basic_name')`` where basic name would be a valid Python - variable name representing the full name of the field. - - 2. Either a basic-type description string as in *typestr* or - another list (for nested records) - - 3. An optional shape tuple providing how many times this part - of the record should be repeated. No repeats are assumed - if this is not given. Very complicated structures can be - described using this generic interface. Notice, however, - that each element of the array is still of the same - data-type. Some examples of using this interface are given - below. - - **Default**: ``[('', typestr)]`` - - **data** (optional) - - A 2-tuple whose first argument is an integer (a long integer - if necessary) that points to the data-area storing the array - contents. This pointer must point to the first element of - data (in other words any offset is always ignored in this - case). The second entry in the tuple is a read-only flag (true - means the data area is read-only). - - This attribute can also be an object exposing the - :cfunc:`buffer interface ` which - will be used to share the data. If this key is not present (or - returns :class:`None`), then memory sharing will be done - through the buffer interface of the object itself. In this - case, the offset key can be used to indicate the start of the - buffer. A reference to the object exposing the array interface - must be stored by the new object if the memory area is to be - secured. - - **Default**: :const:`None` - - **strides** (optional) - - Either :const:`None` to indicate a C-style contiguous array or - a Tuple of strides which provides the number of bytes needed - to jump to the next array element in the corresponding - dimension. Each entry must be an integer (a Python - :const:`int` or :const:`long`). As with shape, the values may - be larger than can be represented by a C "int" or "long"; the - calling code should handle this appropiately, either by - raising an error, or by using :ctype:`Py_LONG_LONG` in C. The - default is :const:`None` which implies a C-style contiguous - memory buffer. In this model, the last dimension of the array - varies the fastest. For example, the default strides tuple - for an object whose array entries are 8 bytes long and whose - shape is (10,20,30) would be (4800, 240, 8) - - **Default**: :const:`None` (C-style contiguous) - - **mask** (optional) - - :const:`None` or an object exposing the array interface. All - elements of the mask array should be interpreted only as true - or not true indicating which elements of this array are valid. - The shape of this object should be `"broadcastable" - ` to the shape of the - original array. - - **Default**: :const:`None` (All array values are valid) - - **offset** (optional) - - An integer offset into the array data region. This can only be - used when data is :const:`None` or returns a :class:`buffer` - object. - - **Default**: 0. - - **version** (required) - - An integer showing the version of the interface (i.e. 3 for - this version). Be careful not to use this to invalidate - objects exposing future versions of the interface. - - -C-struct access -=============== - -This approach to the array interface allows for faster access to an -array using only one attribute lookup and a well-defined C-structure. - -.. cvar:: __array_struct__ - - A :ctype:`PyCObject` whose :cdata:`voidptr` member contains a - pointer to a filled :ctype:`PyArrayInterface` structure. Memory - for the structure is dynamically created and the :ctype:`PyCObject` - is also created with an appropriate destructor so the retriever of - this attribute simply has to apply :cfunc:`Py_DECREF()` to the - object returned by this attribute when it is finished. Also, - either the data needs to be copied out, or a reference to the - object exposing this attribute must be held to ensure the data is - not freed. Objects exposing the :obj:`__array_struct__` interface - must also not reallocate their memory if other objects are - referencing them. - -The PyArrayInterface structure is defined in ``numpy/ndarrayobject.h`` -as:: - - typedef struct { - int two; /* contains the integer 2 -- simple sanity check */ - int nd; /* number of dimensions */ - char typekind; /* kind in array --- character code of typestr */ - int itemsize; /* size of each element */ - int flags; /* flags indicating how the data should be interpreted */ - /* must set ARR_HAS_DESCR bit to validate descr */ - Py_intptr_t *shape; /* A length-nd array of shape information */ - Py_intptr_t *strides; /* A length-nd array of stride information */ - void *data; /* A pointer to the first element of the array */ - PyObject *descr; /* NULL or data-description (same as descr key - of __array_interface__) -- must set ARR_HAS_DESCR - flag or this will be ignored. */ - } PyArrayInterface; - -The flags member may consist of 5 bits showing how the data should be -interpreted and one bit showing how the Interface should be -interpreted. The data-bits are :const:`CONTIGUOUS` (0x1), -:const:`FORTRAN` (0x2), :const:`ALIGNED` (0x100), :const:`NOTSWAPPED` -(0x200), and :const:`WRITEABLE` (0x400). A final flag -:const:`ARR_HAS_DESCR` (0x800) indicates whether or not this structure -has the arrdescr field. The field should not be accessed unless this -flag is present. - -.. admonition:: New since June 16, 2006: - - In the past most implementations used the "desc" member of the - :ctype:`PyCObject` itself (do not confuse this with the "descr" member of - the :ctype:`PyArrayInterface` structure above --- they are two separate - things) to hold the pointer to the object exposing the interface. - This is now an explicit part of the interface. Be sure to own a - reference to the object when the :ctype:`PyCObject` is created using - :ctype:`PyCObject_FromVoidPtrAndDesc`. - - -Type description examples -========================= - -For clarity it is useful to provide some examples of the type -description and corresponding :data:`__array_interface__` 'descr' -entries. Thanks to Scott Gilbert for these examples: - -In every case, the 'descr' key is optional, but of course provides -more information which may be important for various applications:: - - * Float data - typestr == '>f4' - descr == [('','>f4')] - - * Complex double - typestr == '>c8' - descr == [('real','>f4'), ('imag','>f4')] - - * RGB Pixel data - typestr == '|V3' - descr == [('r','|u1'), ('g','|u1'), ('b','|u1')] - - * Mixed endian (weird but could happen). - typestr == '|V8' (or '>u8') - descr == [('big','>i4'), ('little','i4'), ('data','>f8',(16,4))] - - * Padded structure - struct { - int ival; - double dval; - } - typestr == '|V16' - descr == [('ival','>i4'),('','|V4'),('dval','>f8')] - -It should be clear that any record type could be described using this interface. - -Differences with Array interface (Version 2) -============================================ - -The version 2 interface was very similar. The differences were -largely asthetic. In particular: - -1. The PyArrayInterface structure had no descr member at the end - (and therefore no flag ARR_HAS_DESCR) - -2. The desc member of the PyCObject returned from __array_struct__ was - not specified. Usually, it was the object exposing the array (so - that a reference to it could be kept and destroyed when the - C-object was destroyed). Now it must be a tuple whose first - element is a string with "PyArrayInterface Version #" and whose - second element is the object exposing the array. - -3. The tuple returned from __array_interface__['data'] used to be a - hex-string (now it is an integer or a long integer). - -4. There was no __array_interface__ attribute instead all of the keys - (except for version) in the __array_interface__ dictionary were - their own attribute: Thus to obtain the Python-side information you - had to access separately the attributes: - - * __array_data__ - * __array_shape__ - * __array_strides__ - * __array_typestr__ - * __array_descr__ - * __array_offset__ - * __array_mask__ diff --git a/numpy-1.6.2/doc/source/reference/arrays.ndarray.rst b/numpy-1.6.2/doc/source/reference/arrays.ndarray.rst deleted file mode 100644 index 535ce8faa7..0000000000 --- a/numpy-1.6.2/doc/source/reference/arrays.ndarray.rst +++ /dev/null @@ -1,568 +0,0 @@ -.. _arrays.ndarray: - -****************************************** -The N-dimensional array (:class:`ndarray`) -****************************************** - -.. currentmodule:: numpy - -An :class:`ndarray` is a (usually fixed-size) multidimensional -container of items of the same type and size. The number of dimensions -and items in an array is defined by its :attr:`shape `, -which is a :class:`tuple` of *N* positive integers that specify the -sizes of each dimension. The type of items in the array is specified by -a separate :ref:`data-type object (dtype) `, one of which -is associated with each ndarray. - -As with other container objects in Python, the contents of an -:class:`ndarray` can be accessed and modified by :ref:`indexing or -slicing ` the array (using, for example, *N* integers), -and via the methods and attributes of the :class:`ndarray`. - -.. index:: view, base - -Different :class:`ndarrays ` can share the same data, so that -changes made in one :class:`ndarray` may be visible in another. That -is, an ndarray can be a *"view"* to another ndarray, and the data it -is referring to is taken care of by the *"base"* ndarray. ndarrays can -also be views to memory owned by Python :class:`strings ` or -objects implementing the :class:`buffer` or :ref:`array -` interfaces. - - -.. admonition:: Example - - A 2-dimensional array of size 2 x 3, composed of 4-byte integer - elements: - - >>> x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) - >>> type(x) - - >>> x.shape - (2, 3) - >>> x.dtype - dtype('int32') - - The array can be indexed using Python container-like syntax: - - >>> x[1,2] # i.e., the element of x in the *second* row, *third* - column, namely, 6. - - For example :ref:`slicing ` can produce views of - the array: - - >>> y = x[:,1] - >>> y - array([2, 5]) - >>> y[0] = 9 # this also changes the corresponding element in x - >>> y - array([9, 5]) - >>> x - array([[1, 9, 3], - [4, 5, 6]]) - - -Constructing arrays -=================== - -New arrays can be constructed using the routines detailed in -:ref:`routines.array-creation`, and also by using the low-level -:class:`ndarray` constructor: - -.. autosummary:: - :toctree: generated/ - - ndarray - -.. _arrays.ndarray.indexing: - - -Indexing arrays -=============== - -Arrays can be indexed using an extended Python slicing syntax, -``array[selection]``. Similar syntax is also used for accessing -fields in a :ref:`record array `. - -.. seealso:: :ref:`Array Indexing `. - -Internal memory layout of an ndarray -==================================== - -An instance of class :class:`ndarray` consists of a contiguous -one-dimensional segment of computer memory (owned by the array, or by -some other object), combined with an indexing scheme that maps *N* -integers into the location of an item in the block. The ranges in -which the indices can vary is specified by the :obj:`shape -` of the array. How many bytes each item takes and how -the bytes are interpreted is defined by the :ref:`data-type object -` associated with the array. - -.. index:: C-order, Fortran-order, row-major, column-major, stride, - offset - -A segment of memory is inherently 1-dimensional, and there are many -different schemes for arranging the items of an *N*-dimensional array -in a 1-dimensional block. Numpy is flexible, and :class:`ndarray` -objects can accommodate any *strided indexing scheme*. In a strided -scheme, the N-dimensional index :math:`(n_0, n_1, ..., n_{N-1})` -corresponds to the offset (in bytes): - -.. math:: n_{\mathrm{offset}} = \sum_{k=0}^{N-1} s_k n_k - -from the beginning of the memory block associated with the -array. Here, :math:`s_k` are integers which specify the :obj:`strides -` of the array. The :term:`column-major` order (used, -for example, in the Fortran language and in *Matlab*) and -:term:`row-major` order (used in C) schemes are just specific kinds of -strided scheme, and correspond to the strides: - -.. math:: - - s_k^{\mathrm{column}} = \prod_{j=0}^{k-1} d_j , - \quad s_k^{\mathrm{row}} = \prod_{j=k+1}^{N-1} d_j . - -.. index:: single-segment, contiguous, non-contiguous - -where :math:`d_j` = `self.itemsize * self.shape[j]`. - -Both the C and Fortran orders are :term:`contiguous`, *i.e.,* -:term:`single-segment`, memory layouts, in which every part of the -memory block can be accessed by some combination of the indices. - -Data in new :class:`ndarrays ` is in the :term:`row-major` -(C) order, unless otherwise specified, but, for example, :ref:`basic -array slicing ` often produces :term:`views ` -in a different scheme. - -.. seealso: :ref:`Indexing `_ - -.. note:: - - Several algorithms in NumPy work on arbitrarily strided arrays. - However, some algorithms require single-segment arrays. When an - irregularly strided array is passed in to such algorithms, a copy - is automatically made. - - -.. _arrays.ndarray.attributes: - -Array attributes -================ - -Array attributes reflect information that is intrinsic to the array -itself. Generally, accessing an array through its attributes allows -you to get and sometimes set intrinsic properties of the array without -creating a new array. The exposed attributes are the core parts of an -array and only some of them can be reset meaningfully without creating -a new array. Information on each attribute is given below. - -Memory layout -------------- - -The following attributes contain information about the memory layout -of the array: - -.. autosummary:: - :toctree: generated/ - - ndarray.flags - ndarray.shape - ndarray.strides - ndarray.ndim - ndarray.data - ndarray.size - ndarray.itemsize - ndarray.nbytes - ndarray.base - -Data type ---------- - -.. seealso:: :ref:`Data type objects ` - -The data type object associated with the array can be found in the -:attr:`dtype ` attribute: - -.. autosummary:: - :toctree: generated/ - - ndarray.dtype - -Other attributes ----------------- - -.. autosummary:: - :toctree: generated/ - - ndarray.T - ndarray.real - ndarray.imag - ndarray.flat - ndarray.ctypes - __array_priority__ - - -.. _arrays.ndarray.array-interface: - -Array interface ---------------- - -.. seealso:: :ref:`arrays.interface`. - -========================== =================================== -:obj:`__array_interface__` Python-side of the array interface -:obj:`__array_struct__` C-side of the array interface -========================== =================================== - -:mod:`ctypes` foreign function interface ----------------------------------------- - -.. autosummary:: - :toctree: generated/ - - ndarray.ctypes - -.. _array.ndarray.methods: - -Array methods -============= - -An :class:`ndarray` object has many methods which operate on or with -the array in some fashion, typically returning an array result. These -methods are briefly explained below. (Each method's docstring has a -more complete description.) - -For the following methods there are also corresponding functions in -:mod:`numpy`: :func:`all`, :func:`any`, :func:`argmax`, -:func:`argmin`, :func:`argsort`, :func:`choose`, :func:`clip`, -:func:`compress`, :func:`copy`, :func:`cumprod`, :func:`cumsum`, -:func:`diagonal`, :func:`imag`, :func:`max `, :func:`mean`, -:func:`min `, :func:`nonzero`, :func:`prod`, :func:`ptp`, -:func:`put`, :func:`ravel`, :func:`real`, :func:`repeat`, -:func:`reshape`, :func:`round `, :func:`searchsorted`, -:func:`sort`, :func:`squeeze`, :func:`std`, :func:`sum`, -:func:`swapaxes`, :func:`take`, :func:`trace`, :func:`transpose`, -:func:`var`. - -Array conversion ----------------- - -.. autosummary:: - :toctree: generated/ - - ndarray.item - ndarray.tolist - ndarray.itemset - ndarray.setasflat - ndarray.tostring - ndarray.tofile - ndarray.dump - ndarray.dumps - ndarray.astype - ndarray.byteswap - ndarray.copy - ndarray.view - ndarray.getfield - ndarray.setflags - ndarray.fill - -Shape manipulation ------------------- - -For reshape, resize, and transpose, the single tuple argument may be -replaced with ``n`` integers which will be interpreted as an n-tuple. - -.. autosummary:: - :toctree: generated/ - - ndarray.reshape - ndarray.resize - ndarray.transpose - ndarray.swapaxes - ndarray.flatten - ndarray.ravel - ndarray.squeeze - -Item selection and manipulation -------------------------------- - -For array methods that take an *axis* keyword, it defaults to -:const:`None`. If axis is *None*, then the array is treated as a 1-D -array. Any other value for *axis* represents the dimension along which -the operation should proceed. - -.. autosummary:: - :toctree: generated/ - - ndarray.take - ndarray.put - ndarray.repeat - ndarray.choose - ndarray.sort - ndarray.argsort - ndarray.searchsorted - ndarray.nonzero - ndarray.compress - ndarray.diagonal - -Calculation ------------ - -.. index:: axis - -Many of these methods take an argument named *axis*. In such cases, - -- If *axis* is *None* (the default), the array is treated as a 1-D - array and the operation is performed over the entire array. This - behavior is also the default if self is a 0-dimensional array or - array scalar. (An array scalar is an instance of the types/classes - float32, float64, etc., whereas a 0-dimensional array is an ndarray - instance containing precisely one array scalar.) - -- If *axis* is an integer, then the operation is done over the given - axis (for each 1-D subarray that can be created along the given axis). - -.. admonition:: Example of the *axis* argument - - A 3-dimensional array of size 3 x 3 x 3, summed over each of its - three axes - - >>> x - array([[[ 0, 1, 2], - [ 3, 4, 5], - [ 6, 7, 8]], - [[ 9, 10, 11], - [12, 13, 14], - [15, 16, 17]], - [[18, 19, 20], - [21, 22, 23], - [24, 25, 26]]]) - >>> x.sum(axis=0) - array([[27, 30, 33], - [36, 39, 42], - [45, 48, 51]]) - >>> # for sum, axis is the first keyword, so we may omit it, - >>> # specifying only its value - >>> x.sum(0), x.sum(1), x.sum(2) - (array([[27, 30, 33], - [36, 39, 42], - [45, 48, 51]]), - array([[ 9, 12, 15], - [36, 39, 42], - [63, 66, 69]]), - array([[ 3, 12, 21], - [30, 39, 48], - [57, 66, 75]])) - -The parameter *dtype* specifies the data type over which a reduction -operation (like summing) should take place. The default reduce data -type is the same as the data type of *self*. To avoid overflow, it can -be useful to perform the reduction using a larger data type. - -For several methods, an optional *out* argument can also be provided -and the result will be placed into the output array given. The *out* -argument must be an :class:`ndarray` and have the same number of -elements. It can have a different data type in which case casting will -be performed. - -.. autosummary:: - :toctree: generated/ - - ndarray.argmax - ndarray.min - ndarray.argmin - ndarray.ptp - ndarray.clip - ndarray.conj - ndarray.round - ndarray.trace - ndarray.sum - ndarray.cumsum - ndarray.mean - ndarray.var - ndarray.std - ndarray.prod - ndarray.cumprod - ndarray.all - ndarray.any - -Arithmetic and comparison operations -==================================== - -.. index:: comparison, arithmetic, operation, operator - -Arithmetic and comparison operations on :class:`ndarrays ` -are defined as element-wise operations, and generally yield -:class:`ndarray` objects as results. - -Each of the arithmetic operations (``+``, ``-``, ``*``, ``/``, ``//``, -``%``, ``divmod()``, ``**`` or ``pow()``, ``<<``, ``>>``, ``&``, -``^``, ``|``, ``~``) and the comparisons (``==``, ``<``, ``>``, -``<=``, ``>=``, ``!=``) is equivalent to the corresponding -:term:`universal function` (or :term:`ufunc` for short) in Numpy. For -more information, see the section on :ref:`Universal Functions -`. - -Comparison operators: - -.. autosummary:: - :toctree: generated/ - - ndarray.__lt__ - ndarray.__le__ - ndarray.__gt__ - ndarray.__ge__ - ndarray.__eq__ - ndarray.__ne__ - -Truth value of an array (:func:`bool()`): - -.. autosummary:: - :toctree: generated/ - - ndarray.__nonzero__ - -.. note:: - - Truth-value testing of an array invokes - :meth:`ndarray.__nonzero__`, which raises an error if the number of - elements in the the array is larger than 1, because the truth value - of such arrays is ambiguous. Use :meth:`.any() ` and - :meth:`.all() ` instead to be clear about what is meant - in such cases. (If the number of elements is 0, the array evaluates - to ``False``.) - - -Unary operations: - -.. autosummary:: - :toctree: generated/ - - ndarray.__neg__ - ndarray.__pos__ - ndarray.__abs__ - ndarray.__invert__ - -Arithmetic: - -.. autosummary:: - :toctree: generated/ - - ndarray.__add__ - ndarray.__sub__ - ndarray.__mul__ - ndarray.__div__ - ndarray.__truediv__ - ndarray.__floordiv__ - ndarray.__mod__ - ndarray.__divmod__ - ndarray.__pow__ - ndarray.__lshift__ - ndarray.__rshift__ - ndarray.__and__ - ndarray.__or__ - ndarray.__xor__ - -.. note:: - - - Any third argument to :func:`pow()` is silently ignored, - as the underlying :func:`ufunc ` takes only two arguments. - - - The three division operators are all defined; :obj:`div` is active - by default, :obj:`truediv` is active when - :obj:`__future__` division is in effect. - - - Because :class:`ndarray` is a built-in type (written in C), the - ``__r{op}__`` special methods are not directly defined. - - - The functions called to implement many arithmetic special methods - for arrays can be modified using :func:`set_numeric_ops`. - -Arithmetic, in-place: - -.. autosummary:: - :toctree: generated/ - - ndarray.__iadd__ - ndarray.__isub__ - ndarray.__imul__ - ndarray.__idiv__ - ndarray.__itruediv__ - ndarray.__ifloordiv__ - ndarray.__imod__ - ndarray.__ipow__ - ndarray.__ilshift__ - ndarray.__irshift__ - ndarray.__iand__ - ndarray.__ior__ - ndarray.__ixor__ - -.. warning:: - - In place operations will perform the calculation using the - precision decided by the data type of the two operands, but will - silently downcast the result (if necessary) so it can fit back into - the array. Therefore, for mixed precision calculations, ``A {op}= - B`` can be different than ``A = A {op} B``. For example, suppose - ``a = ones((3,3))``. Then, ``a += 3j`` is different than ``a = a + - 3j``: while they both perform the same computation, ``a += 3`` - casts the result to fit back in ``a``, whereas ``a = a + 3j`` - re-binds the name ``a`` to the result. - - -Special methods -=============== - -For standard library functions: - -.. autosummary:: - :toctree: generated/ - - ndarray.__copy__ - ndarray.__deepcopy__ - ndarray.__reduce__ - ndarray.__setstate__ - -Basic customization: - -.. autosummary:: - :toctree: generated/ - - ndarray.__new__ - ndarray.__array__ - ndarray.__array_wrap__ - -Container customization: (see :ref:`Indexing `) - -.. autosummary:: - :toctree: generated/ - - ndarray.__len__ - ndarray.__getitem__ - ndarray.__setitem__ - ndarray.__getslice__ - ndarray.__setslice__ - ndarray.__contains__ - -Conversion; the operations :func:`complex()`, :func:`int()`, -:func:`long()`, :func:`float()`, :func:`oct()`, and -:func:`hex()`. They work only on arrays that have one element in them -and return the appropriate scalar. - -.. autosummary:: - :toctree: generated/ - - ndarray.__int__ - ndarray.__long__ - ndarray.__float__ - ndarray.__oct__ - ndarray.__hex__ - -String representations: - -.. autosummary:: - :toctree: generated/ - - ndarray.__str__ - ndarray.__repr__ diff --git a/numpy-1.6.2/doc/source/reference/arrays.rst b/numpy-1.6.2/doc/source/reference/arrays.rst deleted file mode 100644 index 4204f13a42..0000000000 --- a/numpy-1.6.2/doc/source/reference/arrays.rst +++ /dev/null @@ -1,47 +0,0 @@ -.. _arrays: - -************* -Array objects -************* - -.. currentmodule:: numpy - -NumPy provides an N-dimensional array type, the :ref:`ndarray -`, which describes a collection of "items" of the same -type. The items can be :ref:`indexed ` using for -example N integers. - -All ndarrays are :term:`homogenous`: every item takes up the same size -block of memory, and all blocks are interpreted in exactly the same -way. How each item in the array is to be interpreted is specified by a -separate :ref:`data-type object `, one of which is associated -with every array. In addition to basic types (integers, floats, -*etc.*), the data type objects can also represent data structures. - -An item extracted from an array, *e.g.*, by indexing, is represented -by a Python object whose type is one of the :ref:`array scalar types -` built in Numpy. The array scalars allow easy manipulation -of also more complicated arrangements of data. - -.. figure:: figures/threefundamental.png - - **Figure** - Conceptual diagram showing the relationship between the three - fundamental objects used to describe the data in an array: 1) the - ndarray itself, 2) the data-type object that describes the layout - of a single fixed-size element of the array, 3) the array-scalar - Python object that is returned when a single element of the array - is accessed. - - - -.. toctree:: - :maxdepth: 2 - - arrays.ndarray - arrays.scalars - arrays.dtypes - arrays.indexing - arrays.classes - maskedarray - arrays.interface diff --git a/numpy-1.6.2/doc/source/reference/arrays.scalars.rst b/numpy-1.6.2/doc/source/reference/arrays.scalars.rst deleted file mode 100644 index 0a0d8045c7..0000000000 --- a/numpy-1.6.2/doc/source/reference/arrays.scalars.rst +++ /dev/null @@ -1,286 +0,0 @@ -.. _arrays.scalars: - -******* -Scalars -******* - -.. currentmodule:: numpy - -Python defines only one type of a particular data class (there is only -one integer type, one floating-point type, etc.). This can be -convenient in applications that don't need to be concerned with all -the ways data can be represented in a computer. For scientific -computing, however, more control is often needed. - -In NumPy, there are 24 new fundamental Python types to describe -different types of scalars. These type descriptors are mostly based on -the types available in the C language that CPython is written in, with -several additional types compatible with Python's types. - -Array scalars have the same attributes and methods as :class:`ndarrays -`. [#]_ This allows one to treat items of an array partly on -the same footing as arrays, smoothing out rough edges that result when -mixing scalar and array operations. - -Array scalars live in a hierarchy (see the Figure below) of data -types. They can be detected using the hierarchy: For example, -``isinstance(val, np.generic)`` will return :const:`True` if *val* is -an array scalar object. Alternatively, what kind of array scalar is -present can be determined using other members of the data type -hierarchy. Thus, for example ``isinstance(val, np.complexfloating)`` -will return :const:`True` if *val* is a complex valued type, while -:const:`isinstance(val, np.flexible)` will return true if *val* is one -of the flexible itemsize array types (:class:`string`, -:class:`unicode`, :class:`void`). - -.. figure:: figures/dtype-hierarchy.png - - **Figure:** Hierarchy of type objects representing the array data - types. Not shown are the two integer types :class:`intp` and - :class:`uintp` which just point to the integer type that holds a - pointer for the platform. All the number types can be obtained - using bit-width names as well. - -.. [#] However, array scalars are immutable, so none of the array - scalar attributes are settable. - -.. _arrays.scalars.character-codes: - -.. _arrays.scalars.built-in: - -Built-in scalar types -===================== - -The built-in scalar types are shown below. Along with their (mostly) -C-derived names, the integer, float, and complex data-types are also -available using a bit-width convention so that an array of the right -size can always be ensured (e.g. :class:`int8`, :class:`float64`, -:class:`complex128`). Two aliases (:class:`intp` and :class:`uintp`) -pointing to the integer type that is sufficiently large to hold a C pointer -are also provided. The C-like names are associated with character codes, -which are shown in the table. Use of the character codes, however, -is discouraged. - -Five of the scalar types are essentially equivalent to fundamental -Python types and therefore inherit from them as well as from the -generic array scalar type: - -==================== ==================== -Array scalar type Related Python type -==================== ==================== -:class:`int_` :class:`IntType` -:class:`float_` :class:`FloatType` -:class:`complex_` :class:`ComplexType` -:class:`str_` :class:`StringType` -:class:`unicode_` :class:`UnicodeType` -==================== ==================== - -The :class:`bool_` data type is very similar to the Python -:class:`BooleanType` but does not inherit from it because Python's -:class:`BooleanType` does not allow itself to be inherited from, and -on the C-level the size of the actual bool data is not the same as a -Python Boolean scalar. - -.. warning:: - - The :class:`bool_` type is not a subclass of the :class:`int_` type - (the :class:`bool_` is not even a number type). This is different - than Python's default implementation of :class:`bool` as a - sub-class of int. - - -.. tip:: The default data type in Numpy is :class:`float_`. - -In the tables below, ``platform?`` means that the type may not be -available on all platforms. Compatibility with different C or Python -types is indicated: two types are compatible if their data is of the -same size and interpreted in the same way. - -Booleans: - -=================== ============================= =============== -Type Remarks Character code -=================== ============================= =============== -:class:`bool_` compatible: Python bool ``'?'`` -:class:`bool8` 8 bits -=================== ============================= =============== - -Integers: - -=================== ============================= =============== -:class:`byte` compatible: C char ``'b'`` -:class:`short` compatible: C short ``'h'`` -:class:`intc` compatible: C int ``'i'`` -:class:`int_` compatible: Python int ``'l'`` -:class:`longlong` compatible: C long long ``'q'`` -:class:`intp` large enough to fit a pointer ``'p'`` -:class:`int8` 8 bits -:class:`int16` 16 bits -:class:`int32` 32 bits -:class:`int64` 64 bits -=================== ============================= =============== - -Unsigned integers: - -=================== ============================= =============== -:class:`ubyte` compatible: C unsigned char ``'B'`` -:class:`ushort` compatible: C unsigned short ``'H'`` -:class:`uintc` compatible: C unsigned int ``'I'`` -:class:`uint` compatible: Python int ``'L'`` -:class:`ulonglong` compatible: C long long ``'Q'`` -:class:`uintp` large enough to fit a pointer ``'P'`` -:class:`uint8` 8 bits -:class:`uint16` 16 bits -:class:`uint32` 32 bits -:class:`uint64` 64 bits -=================== ============================= =============== - -Floating-point numbers: - -=================== ============================= =============== -:class:`half` ``'e'`` -:class:`single` compatible: C float ``'f'`` -:class:`double` compatible: C double -:class:`float_` compatible: Python float ``'d'`` -:class:`longfloat` compatible: C long float ``'g'`` -:class:`float16` 16 bits -:class:`float32` 32 bits -:class:`float64` 64 bits -:class:`float96` 96 bits, platform? -:class:`float128` 128 bits, platform? -=================== ============================= =============== - -Complex floating-point numbers: - -=================== ============================= =============== -:class:`csingle` ``'F'`` -:class:`complex_` compatible: Python complex ``'D'`` -:class:`clongfloat` ``'G'`` -:class:`complex64` two 32-bit floats -:class:`complex128` two 64-bit floats -:class:`complex192` two 96-bit floats, - platform? -:class:`complex256` two 128-bit floats, - platform? -=================== ============================= =============== - -Any Python object: - -=================== ============================= =============== -:class:`object_` any Python object ``'O'`` -=================== ============================= =============== - -.. note:: - - The data actually stored in :term:`object arrays ` - (*i.e.*, arrays having dtype :class:`object_`) are references to - Python objects, not the objects themselves. Hence, object arrays - behave more like usual Python :class:`lists `, in the sense - that their contents need not be of the same Python type. - - The object type is also special because an array containing - :class:`object_` items does not return an :class:`object_` object - on item access, but instead returns the actual object that - the array item refers to. - -The following data types are :term:`flexible`. They have no predefined -size: the data they describe can be of different length in different -arrays. (In the character codes ``#`` is an integer denoting how many -elements the data type consists of.) - -=================== ============================= ======== -:class:`str_` compatible: Python str ``'S#'`` -:class:`unicode_` compatible: Python unicode ``'U#'`` -:class:`void` ``'V#'`` -=================== ============================= ======== - - -.. warning:: - - Numeric Compatibility: If you used old typecode characters in your - Numeric code (which was never recommended), you will need to change - some of them to the new characters. In particular, the needed - changes are ``c -> S1``, ``b -> B``, ``1 -> b``, ``s -> h``, ``w -> - H``, and ``u -> I``. These changes make the type character - convention more consistent with other Python modules such as the - :mod:`struct` module. - - -Attributes -========== - -The array scalar objects have an :obj:`array priority -<__array_priority__>` of :cdata:`NPY_SCALAR_PRIORITY` -(-1,000,000.0). They also do not (yet) have a :attr:`ctypes ` -attribute. Otherwise, they share the same attributes as arrays: - -.. autosummary:: - :toctree: generated/ - - generic.flags - generic.shape - generic.strides - generic.ndim - generic.data - generic.size - generic.itemsize - generic.base - generic.dtype - generic.real - generic.imag - generic.flat - generic.T - generic.__array_interface__ - generic.__array_struct__ - generic.__array_priority__ - generic.__array_wrap__ - - -Indexing -======== -.. seealso:: :ref:`arrays.indexing`, :ref:`arrays.dtypes` - -Array scalars can be indexed like 0-dimensional arrays: if *x* is an -array scalar, - -- ``x[()]`` returns a 0-dimensional :class:`ndarray` -- ``x['field-name']`` returns the array scalar in the field *field-name*. - (*x* can have fields, for example, when it corresponds to a record data type.) - -Methods -======= - -Array scalars have exactly the same methods as arrays. The default -behavior of these methods is to internally convert the scalar to an -equivalent 0-dimensional array and to call the corresponding array -method. In addition, math operations on array scalars are defined so -that the same hardware flags are set and used to interpret the results -as for :ref:`ufunc `, so that the error state used for ufuncs -also carries over to the math on array scalars. - -The exceptions to the above rules are given below: - -.. autosummary:: - :toctree: generated/ - - generic - generic.__array__ - generic.__array_wrap__ - generic.squeeze - generic.byteswap - generic.__reduce__ - generic.__setstate__ - generic.setflags - - -Defining new types -================== - -There are two ways to effectively define a new array scalar type -(apart from composing record :ref:`dtypes ` from the built-in -scalar types): One way is to simply subclass the :class:`ndarray` and -overwrite the methods of interest. This will work to a degree, but -internally certain behaviors are fixed by the data type of the array. -To fully customize the data type of an array you need to define a new -data-type, and register it with NumPy. Such new types can only be -defined in C, using the :ref:`Numpy C-API `. diff --git a/numpy-1.6.2/doc/source/reference/c-api.array.rst b/numpy-1.6.2/doc/source/reference/c-api.array.rst deleted file mode 100644 index f34176a001..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.array.rst +++ /dev/null @@ -1,3118 +0,0 @@ -Array API -========= - -.. sectionauthor:: Travis E. Oliphant - -| The test of a first-rate intelligence is the ability to hold two -| opposed ideas in the mind at the same time, and still retain the -| ability to function. -| --- *F. Scott Fitzgerald* - -| For a successful technology, reality must take precedence over public -| relations, for Nature cannot be fooled. -| --- *Richard P. Feynman* - -.. index:: - pair: ndarray; C-API - pair: C-API; array - - -Array structure and data access -------------------------------- - -These macros all access the :ctype:`PyArrayObject` structure members. The input -argument, obj, can be any :ctype:`PyObject *` that is directly interpretable -as a :ctype:`PyArrayObject *` (any instance of the :cdata:`PyArray_Type` and its -sub-types). - -.. cfunction:: void *PyArray_DATA(PyObject *obj) - -.. cfunction:: char *PyArray_BYTES(PyObject *obj) - - These two macros are similar and obtain the pointer to the - data-buffer for the array. The first macro can (and should be) - assigned to a particular pointer where the second is for generic - processing. If you have not guaranteed a contiguous and/or aligned - array then be sure you understand how to access the data in the - array to avoid memory and/or alignment problems. - -.. cfunction:: npy_intp *PyArray_DIMS(PyObject *arr) - -.. cfunction:: npy_intp *PyArray_STRIDES(PyObject* arr) - -.. cfunction:: npy_intp PyArray_DIM(PyObject* arr, int n) - - Return the shape in the *n* :math:`^{\textrm{th}}` dimension. - -.. cfunction:: npy_intp PyArray_STRIDE(PyObject* arr, int n) - - Return the stride in the *n* :math:`^{\textrm{th}}` dimension. - -.. cfunction:: PyObject *PyArray_BASE(PyObject* arr) - -.. cfunction:: PyArray_Descr *PyArray_DESCR(PyObject* arr) - -.. cfunction:: int PyArray_FLAGS(PyObject* arr) - -.. cfunction:: int PyArray_ITEMSIZE(PyObject* arr) - - Return the itemsize for the elements of this array. - -.. cfunction:: int PyArray_TYPE(PyObject* arr) - - Return the (builtin) typenumber for the elements of this array. - -.. cfunction:: PyObject *PyArray_GETITEM(PyObject* arr, void* itemptr) - - Get a Python object from the ndarray, *arr*, at the location - pointed to by itemptr. Return ``NULL`` on failure. - -.. cfunction:: int PyArray_SETITEM(PyObject* arr, void* itemptr, PyObject* obj) - - Convert obj and place it in the ndarray, *arr*, at the place - pointed to by itemptr. Return -1 if an error occurs or 0 on - success. - -.. cfunction:: npy_intp PyArray_SIZE(PyObject* arr) - - Returns the total size (in number of elements) of the array. - -.. cfunction:: npy_intp PyArray_Size(PyObject* obj) - - Returns 0 if *obj* is not a sub-class of bigndarray. Otherwise, - returns the total number of elements in the array. Safer version - of :cfunc:`PyArray_SIZE` (*obj*). - -.. cfunction:: npy_intp PyArray_NBYTES(PyObject* arr) - - Returns the total number of bytes consumed by the array. - - -Data access -^^^^^^^^^^^ - -These functions and macros provide easy access to elements of the -ndarray from C. These work for all arrays. You may need to take care -when accessing the data in the array, however, if it is not in machine -byte-order, misaligned, or not writeable. In other words, be sure to -respect the state of the flags unless you know what you are doing, or -have previously guaranteed an array that is writeable, aligned, and in -machine byte-order using :cfunc:`PyArray_FromAny`. If you wish to handle all -types of arrays, the copyswap function for each type is useful for -handling misbehaved arrays. Some platforms (e.g. Solaris) do not like -misaligned data and will crash if you de-reference a misaligned -pointer. Other platforms (e.g. x86 Linux) will just work more slowly -with misaligned data. - -.. cfunction:: void* PyArray_GetPtr(PyArrayObject* aobj, npy_intp* ind) - - Return a pointer to the data of the ndarray, *aobj*, at the - N-dimensional index given by the c-array, *ind*, (which must be - at least *aobj* ->nd in size). You may want to typecast the - returned pointer to the data type of the ndarray. - -.. cfunction:: void* PyArray_GETPTR1(PyObject* obj, i) - -.. cfunction:: void* PyArray_GETPTR2(PyObject* obj, i, j) - -.. cfunction:: void* PyArray_GETPTR3(PyObject* obj, i, j, k) - -.. cfunction:: void* PyArray_GETPTR4(PyObject* obj, i, j, k, l) - - Quick, inline access to the element at the given coordinates in - the ndarray, *obj*, which must have respectively 1, 2, 3, or 4 - dimensions (this is not checked). The corresponding *i*, *j*, - *k*, and *l* coordinates can be any integer but will be - interpreted as ``npy_intp``. You may want to typecast the - returned pointer to the data type of the ndarray. - - -Creating arrays ---------------- - - -From scratch -^^^^^^^^^^^^ - -.. cfunction:: PyObject* PyArray_NewFromDescr(PyTypeObject* subtype, PyArray_Descr* descr, int nd, npy_intp* dims, npy_intp* strides, void* data, int flags, PyObject* obj) - - This is the main array creation function. Most new arrays are - created with this flexible function. The returned object is an - object of Python-type *subtype*, which must be a subtype of - :cdata:`PyArray_Type`. The array has *nd* dimensions, described by - *dims*. The data-type descriptor of the new array is *descr*. If - *subtype* is not :cdata:`&PyArray_Type` (*e.g.* a Python subclass of - the ndarray), then *obj* is the object to pass to the - :obj:`__array_finalize__` method of the subclass. If *data* is - ``NULL``, then new memory will be allocated and *flags* can be - non-zero to indicate a Fortran-style contiguous array. If *data* - is not ``NULL``, then it is assumed to point to the memory to be - used for the array and the *flags* argument is used as the new - flags for the array (except the state of :cdata:`NPY_OWNDATA` and - :cdata:`UPDATEIFCOPY` flags of the new array will be reset). In - addition, if *data* is non-NULL, then *strides* can also be - provided. If *strides* is ``NULL``, then the array strides are - computed as C-style contiguous (default) or Fortran-style - contiguous (*flags* is nonzero for *data* = ``NULL`` or *flags* & - :cdata:`NPY_F_CONTIGUOUS` is nonzero non-NULL *data*). Any provided - *dims* and *strides* are copied into newly allocated dimension and - strides arrays for the new array object. - -.. cfunction:: PyObject* PyArray_NewLikeArray(PyArrayObject* prototype, NPY_ORDER order, PyArray_Descr* descr, int subok) - - .. versionadded:: 1.6 - - This function steals a reference to *descr* if it is not NULL. - - This array creation routine allows for the convenient creation of - a new array matching an existing array's shapes and memory layout, - possibly changing the layout and/or data type. - - When *order* is :cdata:`NPY_ANYORDER`, the result order is - :cdata:`NPY_FORTRANORDER` if *prototype* is a fortran array, - :cdata:`NPY_CORDER` otherwise. When *order* is - :cdata:`NPY_KEEPORDER`, the result order matches that of *prototype*, even - when the axes of *prototype* aren't in C or Fortran order. - - If *descr* is NULL, the data type of *prototype* is used. - - If *subok* is 1, the newly created array will use the sub-type of - *prototype* to create the new array, otherwise it will create a - base-class array. - -.. cfunction:: PyObject* PyArray_New(PyTypeObject* subtype, int nd, npy_intp* dims, int type_num, npy_intp* strides, void* data, int itemsize, int flags, PyObject* obj) - - This is similar to :cfunc:`PyArray_DescrNew` (...) except you - specify the data-type descriptor with *type_num* and *itemsize*, - where *type_num* corresponds to a builtin (or user-defined) - type. If the type always has the same number of bytes, then - itemsize is ignored. Otherwise, itemsize specifies the particular - size of this array. - - - -.. warning:: - - If data is passed to :cfunc:`PyArray_NewFromDescr` or :cfunc:`PyArray_New`, - this memory must not be deallocated until the new array is - deleted. If this data came from another Python object, this can - be accomplished using :cfunc:`Py_INCREF` on that object and setting the - base member of the new array to point to that object. If strides - are passed in they must be consistent with the dimensions, the - itemsize, and the data of the array. - -.. cfunction:: PyObject* PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - - Create a new unitialized array of type, *typenum*, whose size in - each of *nd* dimensions is given by the integer array, *dims*. - This function cannot be used to create a flexible-type array (no - itemsize given). - -.. cfunction:: PyObject* PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) - - Create an array wrapper around *data* pointed to by the given - pointer. The array flags will have a default that the data area is - well-behaved and C-style contiguous. The shape of the array is - given by the *dims* c-array of length *nd*. The data-type of the - array is indicated by *typenum*. - -.. cfunction:: PyObject* PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, PyArray_Descr* descr) - - Create a new array with the provided data-type descriptor, *descr* - , of the shape deteremined by *nd* and *dims*. - -.. cfunction:: PyArray_FILLWBYTE(PyObject* obj, int val) - - Fill the array pointed to by *obj* ---which must be a (subclass - of) bigndarray---with the contents of *val* (evaluated as a byte). - -.. cfunction:: PyObject* PyArray_Zeros(int nd, npy_intp* dims, PyArray_Descr* dtype, int fortran) - - Construct a new *nd* -dimensional array with shape given by *dims* - and data type given by *dtype*. If *fortran* is non-zero, then a - Fortran-order array is created, otherwise a C-order array is - created. Fill the memory with zeros (or the 0 object if *dtype* - corresponds to :ctype:`PyArray_OBJECT` ). - -.. cfunction:: PyObject* PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran) - - Macro form of :cfunc:`PyArray_Zeros` which takes a type-number instead - of a data-type object. - -.. cfunction:: PyObject* PyArray_Empty(int nd, npy_intp* dims, PyArray_Descr* dtype, int fortran) - - Construct a new *nd* -dimensional array with shape given by *dims* - and data type given by *dtype*. If *fortran* is non-zero, then a - Fortran-order array is created, otherwise a C-order array is - created. The array is uninitialized unless the data type - corresponds to :ctype:`PyArray_OBJECT` in which case the array is - filled with :cdata:`Py_None`. - -.. cfunction:: PyObject* PyArray_EMPTY(int nd, npy_intp* dims, int typenum, int fortran) - - Macro form of :cfunc:`PyArray_Empty` which takes a type-number, - *typenum*, instead of a data-type object. - -.. cfunction:: PyObject* PyArray_Arange(double start, double stop, double step, int typenum) - - Construct a new 1-dimensional array of data-type, *typenum*, that - ranges from *start* to *stop* (exclusive) in increments of *step* - . Equivalent to **arange** (*start*, *stop*, *step*, dtype). - -.. cfunction:: PyObject* PyArray_ArangeObj(PyObject* start, PyObject* stop, PyObject* step, PyArray_Descr* descr) - - Construct a new 1-dimensional array of data-type determined by - ``descr``, that ranges from ``start`` to ``stop`` (exclusive) in - increments of ``step``. Equivalent to arange( ``start``, - ``stop``, ``step``, ``typenum`` ). - - -From other objects -^^^^^^^^^^^^^^^^^^ - -.. cfunction:: PyObject* PyArray_FromAny(PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, int requirements, PyObject* context) - - This is the main function used to obtain an array from any nested - sequence, or object that exposes the array interface, *op*. The - parameters allow specification of the required *dtype*, the - minimum (*min_depth*) and maximum (*max_depth*) number of - dimensions acceptable, and other *requirements* for the array. The - *dtype* argument needs to be a :ctype:`PyArray_Descr` structure - indicating the desired data-type (including required - byteorder). The *dtype* argument may be NULL, indicating that any - data-type (and byteorder) is acceptable. Unless ``FORCECAST`` is - present in ``flags``, this call will generate an error if the data - type cannot be safely obtained from the object. If you want to use - ``NULL`` for the *dtype* and ensure the array is notswapped then - use :cfunc:`PyArray_CheckFromAny`. A value of 0 for either of the - depth parameters causes the parameter to be ignored. Any of the - following array flags can be added (*e.g.* using \|) to get the - *requirements* argument. If your code can handle general (*e.g.* - strided, byte-swapped, or unaligned arrays) then *requirements* - may be 0. Also, if *op* is not already an array (or does not - expose the array interface), then a new array will be created (and - filled from *op* using the sequence protocol). The new array will - have :cdata:`NPY_DEFAULT` as its flags member. The *context* argument - is passed to the :obj:`__array__` method of *op* and is only used if - the array is constructed that way. Almost always this - parameter is ``NULL``. - - .. cvar:: NPY_C_CONTIGUOUS - - Make sure the returned array is C-style contiguous - - .. cvar:: NPY_F_CONTIGUOUS - - Make sure the returned array is Fortran-style contiguous. - - .. cvar:: NPY_ALIGNED - - Make sure the returned array is aligned on proper boundaries for its - data type. An aligned array has the data pointer and every strides - factor as a multiple of the alignment factor for the data-type- - descriptor. - - .. cvar:: NPY_WRITEABLE - - Make sure the returned array can be written to. - - .. cvar:: NPY_ENSURECOPY - - Make sure a copy is made of *op*. If this flag is not - present, data is not copied if it can be avoided. - - .. cvar:: NPY_ENSUREARRAY - - Make sure the result is a base-class ndarray or bigndarray. By - default, if *op* is an instance of a subclass of the - bigndarray, an instance of that same subclass is returned. If - this flag is set, an ndarray object will be returned instead. - - .. cvar:: NPY_FORCECAST - - Force a cast to the output type even if it cannot be done - safely. Without this flag, a data cast will occur only if it - can be done safely, otherwise an error is reaised. - - .. cvar:: NPY_UPDATEIFCOPY - - If *op* is already an array, but does not satisfy the - requirements, then a copy is made (which will satisfy the - requirements). If this flag is present and a copy (of an - object that is already an array) must be made, then the - corresponding :cdata:`NPY_UPDATEIFCOPY` flag is set in the returned - copy and *op* is made to be read-only. When the returned copy - is deleted (presumably after your calculations are complete), - its contents will be copied back into *op* and the *op* array - will be made writeable again. If *op* is not writeable to - begin with, then an error is raised. If *op* is not already an - array, then this flag has no effect. - - .. cvar:: NPY_BEHAVED - - :cdata:`NPY_ALIGNED` \| :cdata:`NPY_WRITEABLE` - - .. cvar:: NPY_CARRAY - - :cdata:`NPY_C_CONTIGUOUS` \| :cdata:`NPY_BEHAVED` - - .. cvar:: NPY_CARRAY_RO - - :cdata:`NPY_C_CONTIGUOUS` \| :cdata:`NPY_ALIGNED` - - .. cvar:: NPY_FARRAY - - :cdata:`NPY_F_CONTIGUOUS` \| :cdata:`NPY_BEHAVED` - - .. cvar:: NPY_FARRAY_RO - - :cdata:`NPY_F_CONTIGUOUS` \| :cdata:`NPY_ALIGNED` - - .. cvar:: NPY_DEFAULT - - :cdata:`NPY_CARRAY` - - .. cvar:: NPY_IN_ARRAY - - :cdata:`NPY_CONTIGUOUS` \| :cdata:`NPY_ALIGNED` - - .. cvar:: NPY_IN_FARRAY - - :cdata:`NPY_F_CONTIGUOUS` \| :cdata:`NPY_ALIGNED` - - .. cvar:: NPY_OUT_ARRAY - - :cdata:`NPY_C_CONTIGUOUS` \| :cdata:`NPY_WRITEABLE` \| - :cdata:`NPY_ALIGNED` - - .. cvar:: NPY_OUT_FARRAY - - :cdata:`NPY_F_CONTIGUOUS` \| :cdata:`NPY_WRITEABLE` \| - :cdata:`NPY_ALIGNED` - - .. cvar:: NPY_INOUT_ARRAY - - :cdata:`NPY_C_CONTIGUOUS` \| :cdata:`NPY_WRITEABLE` \| - :cdata:`NPY_ALIGNED` \| :cdata:`NPY_UPDATEIFCOPY` - - .. cvar:: NPY_INOUT_FARRAY - - :cdata:`NPY_F_CONTIGUOUS` \| :cdata:`NPY_WRITEABLE` \| - :cdata:`NPY_ALIGNED` \| :cdata:`NPY_UPDATEIFCOPY` - -.. cfunction:: int PyArray_GetArrayParamsFromObject(PyObject* op, PyArray_Descr* requested_dtype, npy_bool writeable, PyArray_Descr** out_dtype, int* out_ndim, npy_intp* out_dims, PyArrayObject** out_arr, PyObject* context) - - .. versionadded:: 1.6 - - Retrieves the array parameters for viewing/converting an arbitrary - PyObject* to a NumPy array. This allows the "innate type and shape" - of Python list-of-lists to be discovered without - actually converting to an array. PyArray_FromAny calls this function - to analyze its input. - - In some cases, such as structured arrays and the __array__ interface, - a data type needs to be used to make sense of the object. When - this is needed, provide a Descr for 'requested_dtype', otherwise - provide NULL. This reference is not stolen. Also, if the requested - dtype doesn't modify the interpretation of the input, out_dtype will - still get the "innate" dtype of the object, not the dtype passed - in 'requested_dtype'. - - If writing to the value in 'op' is desired, set the boolean - 'writeable' to 1. This raises an error when 'op' is a scalar, list - of lists, or other non-writeable 'op'. This differs from passing - NPY_WRITEABLE to PyArray_FromAny, where the writeable array may - be a copy of the input. - - When success (0 return value) is returned, either out_arr - is filled with a non-NULL PyArrayObject and - the rest of the parameters are untouched, or out_arr is - filled with NULL, and the rest of the parameters are filled. - - Typical usage: - - .. code-block:: c - - PyArrayObject *arr = NULL; - PyArray_Descr *dtype = NULL; - int ndim = 0; - npy_intp dims[NPY_MAXDIMS]; - - if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype, - &ndim, &dims, &arr, NULL) < 0) { - return NULL; - } - if (arr == NULL) { - ... validate/change dtype, validate flags, ndim, etc ... - // Could make custom strides here too - arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, - dims, NULL, - fortran ? NPY_F_CONTIGUOUS : 0, - NULL); - if (arr == NULL) { - return NULL; - } - if (PyArray_CopyObject(arr, op) < 0) { - Py_DECREF(arr); - return NULL; - } - } - else { - ... in this case the other parameters weren't filled, just - validate and possibly copy arr itself ... - } - ... use arr ... - -.. cfunction:: PyObject* PyArray_CheckFromAny(PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, int requirements, PyObject* context) - - Nearly identical to :cfunc:`PyArray_FromAny` (...) except - *requirements* can contain :cdata:`NPY_NOTSWAPPED` (over-riding the - specification in *dtype*) and :cdata:`NPY_ELEMENTSTRIDES` which - indicates that the array should be aligned in the sense that the - strides are multiples of the element size. - -.. cvar:: NPY_NOTSWAPPED - - Make sure the returned array has a data-type descriptor that is in - machine byte-order, over-riding any specification in the *dtype* - argument. Normally, the byte-order requirement is determined by - the *dtype* argument. If this flag is set and the dtype argument - does not indicate a machine byte-order descriptor (or is NULL and - the object is already an array with a data-type descriptor that is - not in machine byte- order), then a new data-type descriptor is - created and used with its byte-order field set to native. - -.. cvar:: NPY_BEHAVED_NS - - :cdata:`NPY_ALIGNED` \| :cdata:`NPY_WRITEABLE` \| :cdata:`NPY_NOTSWAPPED` - -.. cvar:: NPY_ELEMENTSTRIDES - - Make sure the returned array has strides that are multiples of the - element size. - -.. cfunction:: PyObject* PyArray_FromArray(PyArrayObject* op, PyArray_Descr* newtype, int requirements) - - Special case of :cfunc:`PyArray_FromAny` for when *op* is already an - array but it needs to be of a specific *newtype* (including - byte-order) or has certain *requirements*. - -.. cfunction:: PyObject* PyArray_FromStructInterface(PyObject* op) - - Returns an ndarray object from a Python object that exposes the - :obj:`__array_struct__`` method and follows the array interface - protocol. If the object does not contain this method then a - borrowed reference to :cdata:`Py_NotImplemented` is returned. - -.. cfunction:: PyObject* PyArray_FromInterface(PyObject* op) - - Returns an ndarray object from a Python object that exposes the - :obj:`__array_shape__` and :obj:`__array_typestr__` - methods following - the array interface protocol. If the object does not contain one - of these method then a borrowed reference to :cdata:`Py_NotImplemented` - is returned. - -.. cfunction:: PyObject* PyArray_FromArrayAttr(PyObject* op, PyArray_Descr* dtype, PyObject* context) - - Return an ndarray object from a Python object that exposes the - :obj:`__array__` method. The :obj:`__array__` method can take 0, 1, or 2 - arguments ([dtype, context]) where *context* is used to pass - information about where the :obj:`__array__` method is being called - from (currently only used in ufuncs). - -.. cfunction:: PyObject* PyArray_ContiguousFromAny(PyObject* op, int typenum, int min_depth, int max_depth) - - This function returns a (C-style) contiguous and behaved function - array from any nested sequence or array interface exporting - object, *op*, of (non-flexible) type given by the enumerated - *typenum*, of minimum depth *min_depth*, and of maximum depth - *max_depth*. Equivalent to a call to :cfunc:`PyArray_FromAny` with - requirements set to :cdata:`NPY_DEFAULT` and the type_num member of the - type argument set to *typenum*. - -.. cfunction:: PyObject *PyArray_FromObject(PyObject *op, int typenum, int min_depth, int max_depth) - - Return an aligned and in native-byteorder array from any nested - sequence or array-interface exporting object, op, of a type given by - the enumerated typenum. The minimum number of dimensions the array can - have is given by min_depth while the maximum is max_depth. This is - equivalent to a call to :cfunc:`PyArray_FromAny` with requirements set to - BEHAVED. - -.. cfunction:: PyObject* PyArray_EnsureArray(PyObject* op) - - This function **steals a reference** to ``op`` and makes sure that - ``op`` is a base-class ndarray. It special cases array scalars, - but otherwise calls :cfunc:`PyArray_FromAny` ( ``op``, NULL, 0, 0, - :cdata:`NPY_ENSUREARRAY`). - -.. cfunction:: PyObject* PyArray_FromString(char* string, npy_intp slen, PyArray_Descr* dtype, npy_intp num, char* sep) - - Construct a one-dimensional ndarray of a single type from a binary - or (ASCII) text ``string`` of length ``slen``. The data-type of - the array to-be-created is given by ``dtype``. If num is -1, then - **copy** the entire string and return an appropriately sized - array, otherwise, ``num`` is the number of items to **copy** from - the string. If ``sep`` is NULL (or ""), then interpret the string - as bytes of binary data, otherwise convert the sub-strings - separated by ``sep`` to items of data-type ``dtype``. Some - data-types may not be readable in text mode and an error will be - raised if that occurs. All errors return NULL. - -.. cfunction:: PyObject* PyArray_FromFile(FILE* fp, PyArray_Descr* dtype, npy_intp num, char* sep) - - Construct a one-dimensional ndarray of a single type from a binary - or text file. The open file pointer is ``fp``, the data-type of - the array to be created is given by ``dtype``. This must match - the data in the file. If ``num`` is -1, then read until the end of - the file and return an appropriately sized array, otherwise, - ``num`` is the number of items to read. If ``sep`` is NULL (or - ""), then read from the file in binary mode, otherwise read from - the file in text mode with ``sep`` providing the item - separator. Some array types cannot be read in text mode in which - case an error is raised. - -.. cfunction:: PyObject* PyArray_FromBuffer(PyObject* buf, PyArray_Descr* dtype, npy_intp count, npy_intp offset) - - Construct a one-dimensional ndarray of a single type from an - object, ``buf``, that exports the (single-segment) buffer protocol - (or has an attribute __buffer\__ that returns an object that - exports the buffer protocol). A writeable buffer will be tried - first followed by a read- only buffer. The :cdata:`NPY_WRITEABLE` - flag of the returned array will reflect which one was - successful. The data is assumed to start at ``offset`` bytes from - the start of the memory location for the object. The type of the - data in the buffer will be interpreted depending on the data- type - descriptor, ``dtype.`` If ``count`` is negative then it will be - determined from the size of the buffer and the requested itemsize, - otherwise, ``count`` represents how many elements should be - converted from the buffer. - -.. cfunction:: int PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src) - - Copy from the source array, ``src``, into the destination array, - ``dest``, performing a data-type conversion if necessary. If an - error occurs return -1 (otherwise 0). The shape of ``src`` must be - broadcastable to the shape of ``dest``. The data areas of dest - and src must not overlap. - -.. cfunction:: int PyArray_MoveInto(PyArrayObject* dest, PyArrayObject* src) - - Move data from the source array, ``src``, into the destination - array, ``dest``, performing a data-type conversion if - necessary. If an error occurs return -1 (otherwise 0). The shape - of ``src`` must be broadcastable to the shape of ``dest``. The - data areas of dest and src may overlap. - -.. cfunction:: PyArrayObject* PyArray_GETCONTIGUOUS(PyObject* op) - - If ``op`` is already (C-style) contiguous and well-behaved then - just return a reference, otherwise return a (contiguous and - well-behaved) copy of the array. The parameter op must be a - (sub-class of an) ndarray and no checking for that is done. - -.. cfunction:: PyObject* PyArray_FROM_O(PyObject* obj) - - Convert ``obj`` to an ndarray. The argument can be any nested - sequence or object that exports the array interface. This is a - macro form of :cfunc:`PyArray_FromAny` using ``NULL``, 0, 0, 0 for the - other arguments. Your code must be able to handle any data-type - descriptor and any combination of data-flags to use this macro. - -.. cfunction:: PyObject* PyArray_FROM_OF(PyObject* obj, int requirements) - - Similar to :cfunc:`PyArray_FROM_O` except it can take an argument - of *requirements* indicating properties the resulting array must - have. Available requirements that can be enforced are - :cdata:`NPY_CONTIGUOUS`, :cdata:`NPY_F_CONTIGUOUS`, - :cdata:`NPY_ALIGNED`, :cdata:`NPY_WRITEABLE`, - :cdata:`NPY_NOTSWAPPED`, :cdata:`NPY_ENSURECOPY`, - :cdata:`NPY_UPDATEIFCOPY`, :cdata:`NPY_FORCECAST`, and - :cdata:`NPY_ENSUREARRAY`. Standard combinations of flags can also - be used: - -.. cfunction:: PyObject* PyArray_FROM_OT(PyObject* obj, int typenum) - - Similar to :cfunc:`PyArray_FROM_O` except it can take an argument of - *typenum* specifying the type-number the returned array. - -.. cfunction:: PyObject* PyArray_FROM_OTF(PyObject* obj, int typenum, int requirements) - - Combination of :cfunc:`PyArray_FROM_OF` and :cfunc:`PyArray_FROM_OT` - allowing both a *typenum* and a *flags* argument to be provided.. - -.. cfunction:: PyObject* PyArray_FROMANY(PyObject* obj, int typenum, int min, int max, int requirements) - - Similar to :cfunc:`PyArray_FromAny` except the data-type is - specified using a typenumber. :cfunc:`PyArray_DescrFromType` - (*typenum*) is passed directly to :cfunc:`PyArray_FromAny`. This - macro also adds :cdata:`NPY_DEFAULT` to requirements if - :cdata:`NPY_ENSURECOPY` is passed in as requirements. - -.. cfunction:: PyObject *PyArray_CheckAxis(PyObject* obj, int* axis, int requirements) - - Encapsulate the functionality of functions and methods that take - the axis= keyword and work properly with None as the axis - argument. The input array is ``obj``, while ``*axis`` is a - converted integer (so that >=MAXDIMS is the None value), and - ``requirements`` gives the needed properties of ``obj``. The - output is a converted version of the input so that requirements - are met and if needed a flattening has occurred. On output - negative values of ``*axis`` are converted and the new value is - checked to ensure consistency with the shape of ``obj``. - - -Dealing with types ------------------- - - -General check of Python Type -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. cfunction:: PyArray_Check(op) - - Evaluates true if *op* is a Python object whose type is a sub-type - of :cdata:`PyArray_Type`. - -.. cfunction:: PyArray_CheckExact(op) - - Evaluates true if *op* is a Python object with type - :cdata:`PyArray_Type`. - -.. cfunction:: PyArray_HasArrayInterface(op, out) - - If ``op`` implements any part of the array interface, then ``out`` - will contain a new reference to the newly created ndarray using - the interface or ``out`` will contain ``NULL`` if an error during - conversion occurs. Otherwise, out will contain a borrowed - reference to :cdata:`Py_NotImplemented` and no error condition is set. - -.. cfunction:: PyArray_HasArrayInterfaceType(op, type, context, out) - - If ``op`` implements any part of the array interface, then ``out`` - will contain a new reference to the newly created ndarray using - the interface or ``out`` will contain ``NULL`` if an error during - conversion occurs. Otherwise, out will contain a borrowed - reference to Py_NotImplemented and no error condition is set. - This version allows setting of the type and context in the part of - the array interface that looks for the :obj:`__array__` attribute. - -.. cfunction:: PyArray_IsZeroDim(op) - - Evaluates true if *op* is an instance of (a subclass of) - :cdata:`PyArray_Type` and has 0 dimensions. - -.. cfunction:: PyArray_IsScalar(op, cls) - - Evaluates true if *op* is an instance of :cdata:`Py{cls}ArrType_Type`. - -.. cfunction:: PyArray_CheckScalar(op) - - Evaluates true if *op* is either an array scalar (an instance of a - sub-type of :cdata:`PyGenericArr_Type` ), or an instance of (a - sub-class of) :cdata:`PyArray_Type` whose dimensionality is 0. - -.. cfunction:: PyArray_IsPythonScalar(op) - - Evaluates true if *op* is a builtin Python "scalar" object (int, - float, complex, str, unicode, long, bool). - -.. cfunction:: PyArray_IsAnyScalar(op) - - Evaluates true if *op* is either a Python scalar or an array - scalar (an instance of a sub- type of :cdata:`PyGenericArr_Type` ). - - -Data-type checking -^^^^^^^^^^^^^^^^^^ - -For the typenum macros, the argument is an integer representing an -enumerated array data type. For the array type checking macros the -argument must be a :ctype:`PyObject *` that can be directly interpreted as a -:ctype:`PyArrayObject *`. - -.. cfunction:: PyTypeNum_ISUNSIGNED(num) - -.. cfunction:: PyDataType_ISUNSIGNED(descr) - -.. cfunction:: PyArray_ISUNSIGNED(obj) - - Type represents an unsigned integer. - -.. cfunction:: PyTypeNum_ISSIGNED(num) - -.. cfunction:: PyDataType_ISSIGNED(descr) - -.. cfunction:: PyArray_ISSIGNED(obj) - - Type represents a signed integer. - -.. cfunction:: PyTypeNum_ISINTEGER(num) - -.. cfunction:: PyDataType_ISINTEGER(descr) - -.. cfunction:: PyArray_ISINTEGER(obj) - - Type represents any integer. - -.. cfunction:: PyTypeNum_ISFLOAT(num) - -.. cfunction:: PyDataType_ISFLOAT(descr) - -.. cfunction:: PyArray_ISFLOAT(obj) - - Type represents any floating point number. - -.. cfunction:: PyTypeNum_ISCOMPLEX(num) - -.. cfunction:: PyDataType_ISCOMPLEX(descr) - -.. cfunction:: PyArray_ISCOMPLEX(obj) - - Type represents any complex floating point number. - -.. cfunction:: PyTypeNum_ISNUMBER(num) - -.. cfunction:: PyDataType_ISNUMBER(descr) - -.. cfunction:: PyArray_ISNUMBER(obj) - - Type represents any integer, floating point, or complex floating point - number. - -.. cfunction:: PyTypeNum_ISSTRING(num) - -.. cfunction:: PyDataType_ISSTRING(descr) - -.. cfunction:: PyArray_ISSTRING(obj) - - Type represents a string data type. - -.. cfunction:: PyTypeNum_ISPYTHON(num) - -.. cfunction:: PyDataType_ISPYTHON(descr) - -.. cfunction:: PyArray_ISPYTHON(obj) - - Type represents an enumerated type corresponding to one of the - standard Python scalar (bool, int, float, or complex). - -.. cfunction:: PyTypeNum_ISFLEXIBLE(num) - -.. cfunction:: PyDataType_ISFLEXIBLE(descr) - -.. cfunction:: PyArray_ISFLEXIBLE(obj) - - Type represents one of the flexible array types ( :cdata:`NPY_STRING`, - :cdata:`NPY_UNICODE`, or :cdata:`NPY_VOID` ). - -.. cfunction:: PyTypeNum_ISUSERDEF(num) - -.. cfunction:: PyDataType_ISUSERDEF(descr) - -.. cfunction:: PyArray_ISUSERDEF(obj) - - Type represents a user-defined type. - -.. cfunction:: PyTypeNum_ISEXTENDED(num) - -.. cfunction:: PyDataType_ISEXTENDED(descr) - -.. cfunction:: PyArray_ISEXTENDED(obj) - - Type is either flexible or user-defined. - -.. cfunction:: PyTypeNum_ISOBJECT(num) - -.. cfunction:: PyDataType_ISOBJECT(descr) - -.. cfunction:: PyArray_ISOBJECT(obj) - - Type represents object data type. - -.. cfunction:: PyTypeNum_ISBOOL(num) - -.. cfunction:: PyDataType_ISBOOL(descr) - -.. cfunction:: PyArray_ISBOOL(obj) - - Type represents Boolean data type. - -.. cfunction:: PyDataType_HASFIELDS(descr) - -.. cfunction:: PyArray_HASFIELDS(obj) - - Type has fields associated with it. - -.. cfunction:: PyArray_ISNOTSWAPPED(m) - - Evaluates true if the data area of the ndarray *m* is in machine - byte-order according to the array's data-type descriptor. - -.. cfunction:: PyArray_ISBYTESWAPPED(m) - - Evaluates true if the data area of the ndarray *m* is **not** in - machine byte-order according to the array's data-type descriptor. - -.. cfunction:: Bool PyArray_EquivTypes(PyArray_Descr* type1, PyArray_Descr* type2) - - Return :cdata:`NPY_TRUE` if *type1* and *type2* actually represent - equivalent types for this platform (the fortran member of each - type is ignored). For example, on 32-bit platforms, - :cdata:`NPY_LONG` and :cdata:`NPY_INT` are equivalent. Otherwise - return :cdata:`NPY_FALSE`. - -.. cfunction:: Bool PyArray_EquivArrTypes(PyArrayObject* a1, PyArrayObject * a2) - - Return :cdata:`NPY_TRUE` if *a1* and *a2* are arrays with equivalent - types for this platform. - -.. cfunction:: Bool PyArray_EquivTypenums(int typenum1, int typenum2) - - Special case of :cfunc:`PyArray_EquivTypes` (...) that does not accept - flexible data types but may be easier to call. - -.. cfunction:: int PyArray_EquivByteorders({byteorder} b1, {byteorder} b2) - - True if byteorder characters ( :cdata:`NPY_LITTLE`, - :cdata:`NPY_BIG`, :cdata:`NPY_NATIVE`, :cdata:`NPY_IGNORE` ) are - either equal or equivalent as to their specification of a native - byte order. Thus, on a little-endian machine :cdata:`NPY_LITTLE` - and :cdata:`NPY_NATIVE` are equivalent where they are not - equivalent on a big-endian machine. - - -Converting data types -^^^^^^^^^^^^^^^^^^^^^ - -.. cfunction:: PyObject* PyArray_Cast(PyArrayObject* arr, int typenum) - - Mainly for backwards compatibility to the Numeric C-API and for - simple casts to non-flexible types. Return a new array object with - the elements of *arr* cast to the data-type *typenum* which must - be one of the enumerated types and not a flexible type. - -.. cfunction:: PyObject* PyArray_CastToType(PyArrayObject* arr, PyArray_Descr* type, int fortran) - - Return a new array of the *type* specified, casting the elements - of *arr* as appropriate. The fortran argument specifies the - ordering of the output array. - -.. cfunction:: int PyArray_CastTo(PyArrayObject* out, PyArrayObject* in) - - As of 1.6, this function simply calls :cfunc:`PyArray_CopyInto`, - which handles the casting. - - Cast the elements of the array *in* into the array *out*. The - output array should be writeable, have an integer-multiple of the - number of elements in the input array (more than one copy can be - placed in out), and have a data type that is one of the builtin - types. Returns 0 on success and -1 if an error occurs. - -.. cfunction:: PyArray_VectorUnaryFunc* PyArray_GetCastFunc(PyArray_Descr* from, int totype) - - Return the low-level casting function to cast from the given - descriptor to the builtin type number. If no casting function - exists return ``NULL`` and set an error. Using this function - instead of direct access to *from* ->f->cast will allow support of - any user-defined casting functions added to a descriptors casting - dictionary. - -.. cfunction:: int PyArray_CanCastSafely(int fromtype, int totype) - - Returns non-zero if an array of data type *fromtype* can be cast - to an array of data type *totype* without losing information. An - exception is that 64-bit integers are allowed to be cast to 64-bit - floating point values even though this can lose precision on large - integers so as not to proliferate the use of long doubles without - explict requests. Flexible array types are not checked according - to their lengths with this function. - -.. cfunction:: int PyArray_CanCastTo(PyArray_Descr* fromtype, PyArray_Descr* totype) - - :cfunc:`PyArray_CanCastTypeTo` supercedes this function in - NumPy 1.6 and later. - - Equivalent to PyArray_CanCastTypeTo(fromtype, totype, NPY_SAFE_CASTING). - -.. cfunction:: int PyArray_CanCastTypeTo(PyArray_Descr* fromtype, PyArray_Descr* totype, NPY_CASTING casting) - - .. versionadded:: 1.6 - - Returns non-zero if an array of data type *fromtype* (which can - include flexible types) can be cast safely to an array of data - type *totype* (which can include flexible types) according to - the casting rule *casting*. For simple types with :cdata:`NPY_SAFE_CASTING`, - this is basically a wrapper around :cfunc:`PyArray_CanCastSafely`, but - for flexible types such as strings or unicode, it produces results - taking into account their sizes. - -.. cfunction:: int PyArray_CanCastArrayTo(PyArrayObject* arr, PyArray_Descr* totype, NPY_CASTING casting) - - .. versionadded:: 1.6 - - Returns non-zero if *arr* can be cast to *totype* according - to the casting rule given in *casting*. If *arr* is an array - scalar, its value is taken into account, and non-zero is also - returned when the value will not overflow or be truncated to - an integer when converting to a smaller type. - - This is almost the same as the result of - PyArray_CanCastTypeTo(PyArray_MinScalarType(arr), totype, casting), - but it also handles a special case arising because the set - of uint values is not a subset of the int values for types with the - same number of bits. - -.. cfunction:: PyArray_Descr* PyArray_MinScalarType(PyArrayObject* arr) - - .. versionadded:: 1.6 - - If *arr* is an array, returns its data type descriptor, but if - *arr* is an array scalar (has 0 dimensions), it finds the data type - of smallest size to which the value may be converted - without overflow or truncation to an integer. - - This function will not demote complex to float or anything to - boolean, but will demote a signed integer to an unsigned integer - when the scalar value is positive. - -.. cfunction:: PyArray_Descr* PyArray_PromoteTypes(PyArray_Descr* type1, PyArray_Descr* type2) - - .. versionadded:: 1.6 - - Finds the data type of smallest size and kind to which *type1* and - *type2* may be safely converted. This function is symmetric and - associative. - -.. cfunction:: PyArray_Descr* PyArray_ResultType(npy_intp narrs, PyArrayObject**arrs, npy_intp ndtypes, PyArray_Descr**dtypes) - - .. versionadded:: 1.6 - - This applies type promotion to all the inputs, - using the NumPy rules for combining scalars and arrays, to - determine the output type of a set of operands. This is the - same result type that ufuncs produce. The specific algorithm - used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :cfunc:`PyArray_PromoteTypes` - to produce the return value. - - Otherwise, PyArray_MinScalarType is called on each array, and - the resulting data types are all combined with - :cfunc:`PyArray_PromoteTypes` to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :cfunc:`PyArray_MinScalarType`, but handled as a special case in - PyArray_ResultType. - -.. cfunction:: int PyArray_ObjectType(PyObject* op, int mintype) - - This function is superceded by :cfunc:`PyArray_MinScalarType` and/or - :cfunc:`PyArray_ResultType`. - - This function is useful for determining a common type that two or - more arrays can be converted to. It only works for non-flexible - array types as no itemsize information is passed. The *mintype* - argument represents the minimum type acceptable, and *op* - represents the object that will be converted to an array. The - return value is the enumerated typenumber that represents the - data-type that *op* should have. - -.. cfunction:: void PyArray_ArrayType(PyObject* op, PyArray_Descr* mintype, PyArray_Descr* outtype) - - This function is superceded by :cfunc:`PyArray_ResultType`. - - This function works similarly to :cfunc:`PyArray_ObjectType` (...) - except it handles flexible arrays. The *mintype* argument can have - an itemsize member and the *outtype* argument will have an - itemsize member at least as big but perhaps bigger depending on - the object *op*. - -.. cfunction:: PyArrayObject** PyArray_ConvertToCommonType(PyObject* op, int* n) - - The functionality this provides is largely superceded by iterator - :ctype:`NpyIter` introduced in 1.6, with flag - :cdata:`NPY_ITER_COMMON_DTYPE` or with the same dtype parameter for - all operands. - - Convert a sequence of Python objects contained in *op* to an array - of ndarrays each having the same data type. The type is selected - based on the typenumber (larger type number is chosen over a - smaller one) ignoring objects that are only scalars. The length of - the sequence is returned in *n*, and an *n* -length array of - :ctype:`PyArrayObject` pointers is the return value (or ``NULL`` if an - error occurs). The returned array must be freed by the caller of - this routine (using :cfunc:`PyDataMem_FREE` ) and all the array objects - in it ``DECREF`` 'd or a memory-leak will occur. The example - template-code below shows a typically usage: - - .. code-block:: c - - mps = PyArray_ConvertToCommonType(obj, &n); - if (mps==NULL) return NULL; - {code} - - for (i=0; iitemsize that - holds the representation of 0 for that type. The returned pointer, - *ret*, **must be freed** using :cfunc:`PyDataMem_FREE` (ret) when it is - not needed anymore. - -.. cfunction:: char* PyArray_One(PyArrayObject* arr) - - A pointer to newly created memory of size *arr* ->itemsize that - holds the representation of 1 for that type. The returned pointer, - *ret*, **must be freed** using :cfunc:`PyDataMem_FREE` (ret) when it - is not needed anymore. - -.. cfunction:: int PyArray_ValidType(int typenum) - - Returns :cdata:`NPY_TRUE` if *typenum* represents a valid type-number - (builtin or user-defined or character code). Otherwise, this - function returns :cdata:`NPY_FALSE`. - - -New data types -^^^^^^^^^^^^^^ - -.. cfunction:: void PyArray_InitArrFuncs(PyArray_ArrFuncs* f) - - Initialize all function pointers and members to ``NULL``. - -.. cfunction:: int PyArray_RegisterDataType(PyArray_Descr* dtype) - - Register a data-type as a new user-defined data type for - arrays. The type must have most of its entries filled in. This is - not always checked and errors can produce segfaults. In - particular, the typeobj member of the ``dtype`` structure must be - filled with a Python type that has a fixed-size element-size that - corresponds to the elsize member of *dtype*. Also the ``f`` - member must have the required functions: nonzero, copyswap, - copyswapn, getitem, setitem, and cast (some of the cast functions - may be ``NULL`` if no support is desired). To avoid confusion, you - should choose a unique character typecode but this is not enforced - and not relied on internally. - - A user-defined type number is returned that uniquely identifies - the type. A pointer to the new structure can then be obtained from - :cfunc:`PyArray_DescrFromType` using the returned type number. A -1 is - returned if an error occurs. If this *dtype* has already been - registered (checked only by the address of the pointer), then - return the previously-assigned type-number. - -.. cfunction:: int PyArray_RegisterCastFunc(PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc) - - Register a low-level casting function, *castfunc*, to convert - from the data-type, *descr*, to the given data-type number, - *totype*. Any old casting function is over-written. A ``0`` is - returned on success or a ``-1`` on failure. - -.. cfunction:: int PyArray_RegisterCanCast(PyArray_Descr* descr, int totype, PyArray_SCALARKIND scalar) - - Register the data-type number, *totype*, as castable from - data-type object, *descr*, of the given *scalar* kind. Use - *scalar* = :cdata:`NPY_NOSCALAR` to register that an array of data-type - *descr* can be cast safely to a data-type whose type_number is - *totype*. - - -Special functions for PyArray_OBJECT -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. cfunction:: int PyArray_INCREF(PyArrayObject* op) - - Used for an array, *op*, that contains any Python objects. It - increments the reference count of every object in the array - according to the data-type of *op*. A -1 is returned if an error - occurs, otherwise 0 is returned. - -.. cfunction:: void PyArray_Item_INCREF(char* ptr, PyArray_Descr* dtype) - - A function to INCREF all the objects at the location *ptr* - according to the data-type *dtype*. If *ptr* is the start of a - record with an object at any offset, then this will (recursively) - increment the reference count of all object-like items in the - record. - -.. cfunction:: int PyArray_XDECREF(PyArrayObject* op) - - Used for an array, *op*, that contains any Python objects. It - decrements the reference count of every object in the array - according to the data-type of *op*. Normal return value is 0. A - -1 is returned if an error occurs. - -.. cfunction:: void PyArray_Item_XDECREF(char* ptr, PyArray_Descr* dtype) - - A function to XDECREF all the object-like items at the loacation - *ptr* as recorded in the data-type, *dtype*. This works - recursively so that if ``dtype`` itself has fields with data-types - that contain object-like items, all the object-like fields will be - XDECREF ``'d``. - -.. cfunction:: void PyArray_FillObjectArray(PyArrayObject* arr, PyObject* obj) - - Fill a newly created array with a single value obj at all - locations in the structure with object data-types. No checking is - performed but *arr* must be of data-type :ctype:`PyArray_OBJECT` and be - single-segment and uninitialized (no previous objects in - position). Use :cfunc:`PyArray_DECREF` (*arr*) if you need to - decrement all the items in the object array prior to calling this - function. - - -Array flags ------------ - -The ``flags`` attribute of the ``PyArrayObject`` structure contains -important information about the memory used by the array (pointed to -by the data member) This flag information must be kept accurate or -strange results and even segfaults may result. - -There are 6 (binary) flags that describe the memory area used by the -data buffer. These constants are defined in ``arrayobject.h`` and -determine the bit-position of the flag. Python exposes a nice -attribute- based interface as well as a dictionary-like interface for -getting (and, if appropriate, setting) these flags. - -Memory areas of all kinds can be pointed to by an ndarray, -necessitating these flags. If you get an arbitrary ``PyArrayObject`` -in C-code, you need to be aware of the flags that are set. If you -need to guarantee a certain kind of array (like :cdata:`NPY_C_CONTIGUOUS` and -:cdata:`NPY_BEHAVED`), then pass these requirements into the -PyArray_FromAny function. - - -Basic Array Flags -^^^^^^^^^^^^^^^^^ - -An ndarray can have a data segment that is not a simple contiguous -chunk of well-behaved memory you can manipulate. It may not be aligned -with word boundaries (very important on some platforms). It might have -its data in a different byte-order than the machine recognizes. It -might not be writeable. It might be in Fortan-contiguous order. The -array flags are used to indicate what can be said about data -associated with an array. - -.. cvar:: NPY_C_CONTIGUOUS - - The data area is in C-style contiguous order (last index varies the - fastest). - -.. cvar:: NPY_F_CONTIGUOUS - - The data area is in Fortran-style contiguous order (first index varies - the fastest). - -Notice that contiguous 1-d arrays are always both Fortran -contiguous and C contiguous. Both of these flags can be checked and -are convenience flags only as whether or not an array is -:cdata:`NPY_C_CONTIGUOUS` or :cdata:`NPY_F_CONTIGUOUS` can be determined by the -``strides``, ``dimensions``, and ``itemsize`` attributes. - -.. cvar:: NPY_OWNDATA - - The data area is owned by this array. - -.. cvar:: NPY_ALIGNED - - The data area is aligned appropriately (for all strides). - -.. cvar:: NPY_WRITEABLE - - The data area can be written to. - - Notice that the above 3 flags are are defined so that a new, well- - behaved array has these flags defined as true. - -.. cvar:: NPY_UPDATEIFCOPY - - The data area represents a (well-behaved) copy whose information - should be transferred back to the original when this array is deleted. - - This is a special flag that is set if this array represents a copy - made because a user required certain flags in - :cfunc:`PyArray_FromAny` and a copy had to be made of some other - array (and the user asked for this flag to be set in such a - situation). The base attribute then points to the "misbehaved" - array (which is set read_only). When the array with this flag set - is deallocated, it will copy its contents back to the "misbehaved" - array (casting if necessary) and will reset the "misbehaved" array - to :cdata:`NPY_WRITEABLE`. If the "misbehaved" array was not - :cdata:`NPY_WRITEABLE` to begin with then :cfunc:`PyArray_FromAny` - would have returned an error because :cdata:`NPY_UPDATEIFCOPY` - would not have been possible. - -:cfunc:`PyArray_UpdateFlags` (obj, flags) will update the -``obj->flags`` for ``flags`` which can be any of -:cdata:`NPY_C_CONTIGUOUS`, :cdata:`NPY_F_CONTIGUOUS`, :cdata:`NPY_ALIGNED`, -or :cdata:`NPY_WRITEABLE`. - - -Combinations of array flags -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. cvar:: NPY_BEHAVED - - :cdata:`NPY_ALIGNED` \| :cdata:`NPY_WRITEABLE` - -.. cvar:: NPY_CARRAY - - :cdata:`NPY_C_CONTIGUOUS` \| :cdata:`NPY_BEHAVED` - -.. cvar:: NPY_CARRAY_RO - - :cdata:`NPY_C_CONTIGUOUS` \| :cdata:`NPY_ALIGNED` - -.. cvar:: NPY_FARRAY - - :cdata:`NPY_F_CONTIGUOUS` \| :cdata:`NPY_BEHAVED` - -.. cvar:: NPY_FARRAY_RO - - :cdata:`NPY_F_CONTIGUOUS` \| :cdata:`NPY_ALIGNED` - -.. cvar:: NPY_DEFAULT - - :cdata:`NPY_CARRAY` - -.. cvar:: NPY_UPDATE_ALL - - :cdata:`NPY_C_CONTIGUOUS` \| :cdata:`NPY_F_CONTIGUOUS` \| :cdata:`NPY_ALIGNED` - - -Flag-like constants -^^^^^^^^^^^^^^^^^^^ - -These constants are used in :cfunc:`PyArray_FromAny` (and its macro forms) to -specify desired properties of the new array. - -.. cvar:: NPY_FORCECAST - - Cast to the desired type, even if it can't be done without losing - information. - -.. cvar:: NPY_ENSURECOPY - - Make sure the resulting array is a copy of the original. - -.. cvar:: NPY_ENSUREARRAY - - Make sure the resulting object is an actual ndarray (or bigndarray), - and not a sub-class. - -.. cvar:: NPY_NOTSWAPPED - - Only used in :cfunc:`PyArray_CheckFromAny` to over-ride the byteorder - of the data-type object passed in. - -.. cvar:: NPY_BEHAVED_NS - - :cdata:`NPY_ALIGNED` \| :cdata:`NPY_WRITEABLE` \| :cdata:`NPY_NOTSWAPPED` - - -Flag checking -^^^^^^^^^^^^^ - -For all of these macros *arr* must be an instance of a (subclass of) -:cdata:`PyArray_Type`, but no checking is done. - -.. cfunction:: PyArray_CHKFLAGS(arr, flags) - - The first parameter, arr, must be an ndarray or subclass. The - parameter, *flags*, should be an integer consisting of bitwise - combinations of the possible flags an array can have: - :cdata:`NPY_C_CONTIGUOUS`, :cdata:`NPY_F_CONTIGUOUS`, - :cdata:`NPY_OWNDATA`, :cdata:`NPY_ALIGNED`, - :cdata:`NPY_WRITEABLE`, :cdata:`NPY_UPDATEIFCOPY`. - -.. cfunction:: PyArray_ISCONTIGUOUS(arr) - - Evaluates true if *arr* is C-style contiguous. - -.. cfunction:: PyArray_ISFORTRAN(arr) - - Evaluates true if *arr* is Fortran-style contiguous. - -.. cfunction:: PyArray_ISWRITEABLE(arr) - - Evaluates true if the data area of *arr* can be written to - -.. cfunction:: PyArray_ISALIGNED(arr) - - Evaluates true if the data area of *arr* is properly aligned on - the machine. - -.. cfunction:: PyArray_ISBEHAVED(arr) - - Evalutes true if the data area of *arr* is aligned and writeable - and in machine byte-order according to its descriptor. - -.. cfunction:: PyArray_ISBEHAVED_RO(arr) - - Evaluates true if the data area of *arr* is aligned and in machine - byte-order. - -.. cfunction:: PyArray_ISCARRAY(arr) - - Evaluates true if the data area of *arr* is C-style contiguous, - and :cfunc:`PyArray_ISBEHAVED` (*arr*) is true. - -.. cfunction:: PyArray_ISFARRAY(arr) - - Evaluates true if the data area of *arr* is Fortran-style - contiguous and :cfunc:`PyArray_ISBEHAVED` (*arr*) is true. - -.. cfunction:: PyArray_ISCARRAY_RO(arr) - - Evaluates true if the data area of *arr* is C-style contiguous, - aligned, and in machine byte-order. - -.. cfunction:: PyArray_ISFARRAY_RO(arr) - - Evaluates true if the data area of *arr* is Fortran-style - contiguous, aligned, and in machine byte-order **.** - -.. cfunction:: PyArray_ISONESEGMENT(arr) - - Evaluates true if the data area of *arr* consists of a single - (C-style or Fortran-style) contiguous segment. - -.. cfunction:: void PyArray_UpdateFlags(PyArrayObject* arr, int flagmask) - - The :cdata:`NPY_C_CONTIGUOUS`, :cdata:`NPY_ALIGNED`, and - :cdata:`NPY_F_CONTIGUOUS` array flags can be "calculated" from the - array object itself. This routine updates one or more of these - flags of *arr* as specified in *flagmask* by performing the - required calculation. - - -.. warning:: - - It is important to keep the flags updated (using - :cfunc:`PyArray_UpdateFlags` can help) whenever a manipulation with an - array is performed that might cause them to change. Later - calculations in NumPy that rely on the state of these flags do not - repeat the calculation to update them. - - -Array method alternative API ----------------------------- - - -Conversion -^^^^^^^^^^ - -.. cfunction:: PyObject* PyArray_GetField(PyArrayObject* self, PyArray_Descr* dtype, int offset) - - Equivalent to :meth:`ndarray.getfield` (*self*, *dtype*, *offset*). Return - a new array of the given *dtype* using the data in the current - array at a specified *offset* in bytes. The *offset* plus the - itemsize of the new array type must be less than *self* - ->descr->elsize or an error is raised. The same shape and strides - as the original array are used. Therefore, this function has the - effect of returning a field from a record array. But, it can also - be used to select specific bytes or groups of bytes from any array - type. - -.. cfunction:: int PyArray_SetField(PyArrayObject* self, PyArray_Descr* dtype, int offset, PyObject* val) - - Equivalent to :meth:`ndarray.setfield` (*self*, *val*, *dtype*, *offset* - ). Set the field starting at *offset* in bytes and of the given - *dtype* to *val*. The *offset* plus *dtype* ->elsize must be less - than *self* ->descr->elsize or an error is raised. Otherwise, the - *val* argument is converted to an array and copied into the field - pointed to. If necessary, the elements of *val* are repeated to - fill the destination array, But, the number of elements in the - destination must be an integer multiple of the number of elements - in *val*. - -.. cfunction:: PyObject* PyArray_Byteswap(PyArrayObject* self, Bool inplace) - - Equivalent to :meth:`ndarray.byteswap` (*self*, *inplace*). Return an array - whose data area is byteswapped. If *inplace* is non-zero, then do - the byteswap inplace and return a reference to self. Otherwise, - create a byteswapped copy and leave self unchanged. - -.. cfunction:: PyObject* PyArray_NewCopy(PyArrayObject* old, NPY_ORDER order) - - Equivalent to :meth:`ndarray.copy` (*self*, *fortran*). Make a copy of the - *old* array. The returned array is always aligned and writeable - with data interpreted the same as the old array. If *order* is - :cdata:`NPY_CORDER`, then a C-style contiguous array is returned. If - *order* is :cdata:`NPY_FORTRANORDER`, then a Fortran-style contiguous - array is returned. If *order is* :cdata:`NPY_ANYORDER`, then the array - returned is Fortran-style contiguous only if the old one is; - otherwise, it is C-style contiguous. - -.. cfunction:: PyObject* PyArray_ToList(PyArrayObject* self) - - Equivalent to :meth:`ndarray.tolist` (*self*). Return a nested Python list - from *self*. - -.. cfunction:: PyObject* PyArray_ToString(PyArrayObject* self, NPY_ORDER order) - - Equivalent to :meth:`ndarray.tostring` (*self*, *order*). Return the bytes - of this array in a Python string. - -.. cfunction:: PyObject* PyArray_ToFile(PyArrayObject* self, FILE* fp, char* sep, char* format) - - Write the contents of *self* to the file pointer *fp* in C-style - contiguous fashion. Write the data as binary bytes if *sep* is the - string ""or ``NULL``. Otherwise, write the contents of *self* as - text using the *sep* string as the item separator. Each item will - be printed to the file. If the *format* string is not ``NULL`` or - "", then it is a Python print statement format string showing how - the items are to be written. - -.. cfunction:: int PyArray_Dump(PyObject* self, PyObject* file, int protocol) - - Pickle the object in *self* to the given *file* (either a string - or a Python file object). If *file* is a Python string it is - considered to be the name of a file which is then opened in binary - mode. The given *protocol* is used (if *protocol* is negative, or - the highest available is used). This is a simple wrapper around - cPickle.dump(*self*, *file*, *protocol*). - -.. cfunction:: PyObject* PyArray_Dumps(PyObject* self, int protocol) - - Pickle the object in *self* to a Python string and return it. Use - the Pickle *protocol* provided (or the highest available if - *protocol* is negative). - -.. cfunction:: int PyArray_FillWithScalar(PyArrayObject* arr, PyObject* obj) - - Fill the array, *arr*, with the given scalar object, *obj*. The - object is first converted to the data type of *arr*, and then - copied into every location. A -1 is returned if an error occurs, - otherwise 0 is returned. - -.. cfunction:: PyObject* PyArray_View(PyArrayObject* self, PyArray_Descr* dtype) - - Equivalent to :meth:`ndarray.view` (*self*, *dtype*). Return a new view of - the array *self* as possibly a different data-type, *dtype*. If - *dtype* is ``NULL``, then the returned array will have the same - data type as *self*. The new data-type must be consistent with - the size of *self*. Either the itemsizes must be identical, or - *self* must be single-segment and the total number of bytes must - be the same. In the latter case the dimensions of the returned - array will be altered in the last (or first for Fortran-style - contiguous arrays) dimension. The data area of the returned array - and self is exactly the same. - - -Shape Manipulation -^^^^^^^^^^^^^^^^^^ - -.. cfunction:: PyObject* PyArray_Newshape(PyArrayObject* self, PyArray_Dims* newshape) - - Result will be a new array (pointing to the same memory location - as *self* if possible), but having a shape given by *newshape* - . If the new shape is not compatible with the strides of *self*, - then a copy of the array with the new specified shape will be - returned. - -.. cfunction:: PyObject* PyArray_Reshape(PyArrayObject* self, PyObject* shape) - - Equivalent to :meth:`ndarray.reshape` (*self*, *shape*) where *shape* is a - sequence. Converts *shape* to a :ctype:`PyArray_Dims` structure and - calls :cfunc:`PyArray_Newshape` internally. - -.. cfunction:: PyObject* PyArray_Squeeze(PyArrayObject* self) - - Equivalent to :meth:`ndarray.squeeze` (*self*). Return a new view of *self* - with all of the dimensions of length 1 removed from the shape. - -.. warning:: - - matrix objects are always 2-dimensional. Therefore, - :cfunc:`PyArray_Squeeze` has no effect on arrays of matrix sub-class. - -.. cfunction:: PyObject* PyArray_SwapAxes(PyArrayObject* self, int a1, int a2) - - Equivalent to :meth:`ndarray.swapaxes` (*self*, *a1*, *a2*). The returned - array is a new view of the data in *self* with the given axes, - *a1* and *a2*, swapped. - -.. cfunction:: PyObject* PyArray_Resize(PyArrayObject* self, PyArray_Dims* newshape, int refcheck, NPY_ORDER fortran) - - Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, refcheck - ``=`` *refcheck*, order= fortran ). This function only works on - single-segment arrays. It changes the shape of *self* inplace and - will reallocate the memory for *self* if *newshape* has a - different total number of elements then the old shape. If - reallocation is necessary, then *self* must own its data, have - *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and - (unless refcheck is 0) not be referenced by any other array. A - reference to the new array is returned. The fortran argument can - be :cdata:`NPY_ANYORDER`, :cdata:`NPY_CORDER`, or - :cdata:`NPY_FORTRANORDER`. It currently has no effect. Eventually - it could be used to determine how the resize operation should view - the data when constructing a differently-dimensioned array. - -.. cfunction:: PyObject* PyArray_Transpose(PyArrayObject* self, PyArray_Dims* permute) - - Equivalent to :meth:`ndarray.transpose` (*self*, *permute*). Permute the - axes of the ndarray object *self* according to the data structure - *permute* and return the result. If *permute* is ``NULL``, then - the resulting array has its axes reversed. For example if *self* - has shape :math:`10\times20\times30`, and *permute* ``.ptr`` is - (0,2,1) the shape of the result is :math:`10\times30\times20.` If - *permute* is ``NULL``, the shape of the result is - :math:`30\times20\times10.` - -.. cfunction:: PyObject* PyArray_Flatten(PyArrayObject* self, NPY_ORDER order) - - Equivalent to :meth:`ndarray.flatten` (*self*, *order*). Return a 1-d copy - of the array. If *order* is :cdata:`NPY_FORTRANORDER` the elements are - scanned out in Fortran order (first-dimension varies the - fastest). If *order* is :cdata:`NPY_CORDER`, the elements of ``self`` - are scanned in C-order (last dimension varies the fastest). If - *order* :cdata:`NPY_ANYORDER`, then the result of - :cfunc:`PyArray_ISFORTRAN` (*self*) is used to determine which order - to flatten. - -.. cfunction:: PyObject* PyArray_Ravel(PyArrayObject* self, NPY_ORDER order) - - Equivalent to *self*.ravel(*order*). Same basic functionality - as :cfunc:`PyArray_Flatten` (*self*, *order*) except if *order* is 0 - and *self* is C-style contiguous, the shape is altered but no copy - is performed. - - -Item selection and manipulation -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. cfunction:: PyObject* PyArray_TakeFrom(PyArrayObject* self, PyObject* indices, int axis, PyArrayObject* ret, NPY_CLIPMODE clipmode) - - Equivalent to :meth:`ndarray.take` (*self*, *indices*, *axis*, *ret*, - *clipmode*) except *axis* =None in Python is obtained by setting - *axis* = :cdata:`NPY_MAXDIMS` in C. Extract the items from self - indicated by the integer-valued *indices* along the given *axis.* - The clipmode argument can be :cdata:`NPY_RAISE`, :cdata:`NPY_WRAP`, or - :cdata:`NPY_CLIP` to indicate what to do with out-of-bound indices. The - *ret* argument can specify an output array rather than having one - created internally. - -.. cfunction:: PyObject* PyArray_PutTo(PyArrayObject* self, PyObject* values, PyObject* indices, NPY_CLIPMODE clipmode) - - Equivalent to *self*.put(*values*, *indices*, *clipmode* - ). Put *values* into *self* at the corresponding (flattened) - *indices*. If *values* is too small it will be repeated as - necessary. - -.. cfunction:: PyObject* PyArray_PutMask(PyArrayObject* self, PyObject* values, PyObject* mask) - - Place the *values* in *self* wherever corresponding positions - (using a flattened context) in *mask* are true. The *mask* and - *self* arrays must have the same total number of elements. If - *values* is too small, it will be repeated as necessary. - -.. cfunction:: PyObject* PyArray_Repeat(PyArrayObject* self, PyObject* op, int axis) - - Equivalent to :meth:`ndarray.repeat` (*self*, *op*, *axis*). Copy the - elements of *self*, *op* times along the given *axis*. Either - *op* is a scalar integer or a sequence of length *self* - ->dimensions[ *axis* ] indicating how many times to repeat each - item along the axis. - -.. cfunction:: PyObject* PyArray_Choose(PyArrayObject* self, PyObject* op, PyArrayObject* ret, NPY_CLIPMODE clipmode) - - Equivalent to :meth:`ndarray.choose` (*self*, *op*, *ret*, *clipmode*). - Create a new array by selecting elements from the sequence of - arrays in *op* based on the integer values in *self*. The arrays - must all be broadcastable to the same shape and the entries in - *self* should be between 0 and len(*op*). The output is placed - in *ret* unless it is ``NULL`` in which case a new output is - created. The *clipmode* argument determines behavior for when - entries in *self* are not between 0 and len(*op*). - - .. cvar:: NPY_RAISE - - raise a ValueError; - - .. cvar:: NPY_WRAP - - wrap values < 0 by adding len(*op*) and values >=len(*op*) - by subtracting len(*op*) until they are in range; - - .. cvar:: NPY_CLIP - - all values are clipped to the region [0, len(*op*) ). - - -.. cfunction:: PyObject* PyArray_Sort(PyArrayObject* self, int axis) - - Equivalent to :meth:`ndarray.sort` (*self*, *axis*). Return an array with - the items of *self* sorted along *axis*. - -.. cfunction:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis) - - Equivalent to :meth:`ndarray.argsort` (*self*, *axis*). Return an array of - indices such that selection of these indices along the given - ``axis`` would return a sorted version of *self*. If *self* - ->descr is a data-type with fields defined, then - self->descr->names is used to determine the sort order. A - comparison where the first field is equal will use the second - field and so on. To alter the sort order of a record array, create - a new data-type with a different order of names and construct a - view of the array with that new data-type. - -.. cfunction:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis) - - Given a sequence of arrays (*sort_keys*) of the same shape, - return an array of indices (similar to :cfunc:`PyArray_ArgSort` (...)) - that would sort the arrays lexicographically. A lexicographic sort - specifies that when two keys are found to be equal, the order is - based on comparison of subsequent keys. A merge sort (which leaves - equal entries unmoved) is required to be defined for the - types. The sort is accomplished by sorting the indices first using - the first *sort_key* and then using the second *sort_key* and so - forth. This is equivalent to the lexsort(*sort_keys*, *axis*) - Python command. Because of the way the merge-sort works, be sure - to understand the order the *sort_keys* must be in (reversed from - the order you would use when comparing two elements). - - If these arrays are all collected in a record array, then - :cfunc:`PyArray_Sort` (...) can also be used to sort the array - directly. - -.. cfunction:: PyObject* PyArray_SearchSorted(PyArrayObject* self, PyObject* values) - - Equivalent to :meth:`ndarray.searchsorted` (*self*, *values*). Assuming - *self* is a 1-d array in ascending order representing bin - boundaries then the output is an array the same shape as *values* - of bin numbers, giving the bin into which each item in *values* - would be placed. No checking is done on whether or not self is in - ascending order. - -.. cfunction:: PyObject* PyArray_Diagonal(PyArrayObject* self, int offset, int axis1, int axis2) - - Equivalent to :meth:`ndarray.diagonal` (*self*, *offset*, *axis1*, *axis2* - ). Return the *offset* diagonals of the 2-d arrays defined by - *axis1* and *axis2*. - -.. cfunction:: npy_intp PyArray_CountNonzero(PyArrayObject* self) - - .. versionadded:: 1.6 - - Counts the number of non-zero elements in the array object *self*. - -.. cfunction:: PyObject* PyArray_Nonzero(PyArrayObject* self) - - Equivalent to :meth:`ndarray.nonzero` (*self*). Returns a tuple of index - arrays that select elements of *self* that are nonzero. If (nd= - :cfunc:`PyArray_NDIM` ( ``self`` ))==1, then a single index array is - returned. The index arrays have data type :cdata:`NPY_INTP`. If a - tuple is returned (nd :math:`\neq` 1), then its length is nd. - -.. cfunction:: PyObject* PyArray_Compress(PyArrayObject* self, PyObject* condition, int axis, PyArrayObject* out) - - Equivalent to :meth:`ndarray.compress` (*self*, *condition*, *axis* - ). Return the elements along *axis* corresponding to elements of - *condition* that are true. - - -Calculation -^^^^^^^^^^^ - -.. tip:: - - Pass in :cdata:`NPY_MAXDIMS` for axis in order to achieve the same - effect that is obtained by passing in *axis* = :const:`None` in Python - (treating the array as a 1-d array). - -.. cfunction:: PyObject* PyArray_ArgMax(PyArrayObject* self, int axis) - - Equivalent to :meth:`ndarray.argmax` (*self*, *axis*). Return the index of - the largest element of *self* along *axis*. - -.. cfunction:: PyObject* PyArray_ArgMin(PyArrayObject* self, int axis) - - Equivalent to :meth:`ndarray.argmin` (*self*, *axis*). Return the index of - the smallest element of *self* along *axis*. - -.. cfunction:: PyObject* PyArray_Max(PyArrayObject* self, int axis, PyArrayObject* out) - - Equivalent to :meth:`ndarray.max` (*self*, *axis*). Return the largest - element of *self* along the given *axis*. - -.. cfunction:: PyObject* PyArray_Min(PyArrayObject* self, int axis, PyArrayObject* out) - - Equivalent to :meth:`ndarray.min` (*self*, *axis*). Return the smallest - element of *self* along the given *axis*. - -.. cfunction:: PyObject* PyArray_Ptp(PyArrayObject* self, int axis, PyArrayObject* out) - - Equivalent to :meth:`ndarray.ptp` (*self*, *axis*). Return the difference - between the largest element of *self* along *axis* and the - smallest element of *self* along *axis*. - - - -.. note:: - - The rtype argument specifies the data-type the reduction should - take place over. This is important if the data-type of the array - is not "large" enough to handle the output. By default, all - integer data-types are made at least as large as :cdata:`NPY_LONG` - for the "add" and "multiply" ufuncs (which form the basis for - mean, sum, cumsum, prod, and cumprod functions). - -.. cfunction:: PyObject* PyArray_Mean(PyArrayObject* self, int axis, int rtype, PyArrayObject* out) - - Equivalent to :meth:`ndarray.mean` (*self*, *axis*, *rtype*). Returns the - mean of the elements along the given *axis*, using the enumerated - type *rtype* as the data type to sum in. Default sum behavior is - obtained using :cdata:`PyArray_NOTYPE` for *rtype*. - -.. cfunction:: PyObject* PyArray_Trace(PyArrayObject* self, int offset, int axis1, int axis2, int rtype, PyArrayObject* out) - - Equivalent to :meth:`ndarray.trace` (*self*, *offset*, *axis1*, *axis2*, - *rtype*). Return the sum (using *rtype* as the data type of - summation) over the *offset* diagonal elements of the 2-d arrays - defined by *axis1* and *axis2* variables. A positive offset - chooses diagonals above the main diagonal. A negative offset - selects diagonals below the main diagonal. - -.. cfunction:: PyObject* PyArray_Clip(PyArrayObject* self, PyObject* min, PyObject* max) - - Equivalent to :meth:`ndarray.clip` (*self*, *min*, *max*). Clip an array, - *self*, so that values larger than *max* are fixed to *max* and - values less than *min* are fixed to *min*. - -.. cfunction:: PyObject* PyArray_Conjugate(PyArrayObject* self) - - Equivalent to :meth:`ndarray.conjugate` (*self*). - Return the complex conjugate of *self*. If *self* is not of - complex data type, then return *self* with an reference. - -.. cfunction:: PyObject* PyArray_Round(PyArrayObject* self, int decimals, PyArrayObject* out) - - Equivalent to :meth:`ndarray.round` (*self*, *decimals*, *out*). Returns - the array with elements rounded to the nearest decimal place. The - decimal place is defined as the :math:`10^{-\textrm{decimals}}` - digit so that negative *decimals* cause rounding to the nearest 10's, 100's, etc. If out is ``NULL``, then the output array is created, otherwise the output is placed in *out* which must be the correct size and type. - -.. cfunction:: PyObject* PyArray_Std(PyArrayObject* self, int axis, int rtype, PyArrayObject* out) - - Equivalent to :meth:`ndarray.std` (*self*, *axis*, *rtype*). Return the - standard deviation using data along *axis* converted to data type - *rtype*. - -.. cfunction:: PyObject* PyArray_Sum(PyArrayObject* self, int axis, int rtype, PyArrayObject* out) - - Equivalent to :meth:`ndarray.sum` (*self*, *axis*, *rtype*). Return 1-d - vector sums of elements in *self* along *axis*. Perform the sum - after converting data to data type *rtype*. - -.. cfunction:: PyObject* PyArray_CumSum(PyArrayObject* self, int axis, int rtype, PyArrayObject* out) - - Equivalent to :meth:`ndarray.cumsum` (*self*, *axis*, *rtype*). Return - cumulative 1-d sums of elements in *self* along *axis*. Perform - the sum after converting data to data type *rtype*. - -.. cfunction:: PyObject* PyArray_Prod(PyArrayObject* self, int axis, int rtype, PyArrayObject* out) - - Equivalent to :meth:`ndarray.prod` (*self*, *axis*, *rtype*). Return 1-d - products of elements in *self* along *axis*. Perform the product - after converting data to data type *rtype*. - -.. cfunction:: PyObject* PyArray_CumProd(PyArrayObject* self, int axis, int rtype, PyArrayObject* out) - - Equivalent to :meth:`ndarray.cumprod` (*self*, *axis*, *rtype*). Return - 1-d cumulative products of elements in ``self`` along ``axis``. - Perform the product after converting data to data type ``rtype``. - -.. cfunction:: PyObject* PyArray_All(PyArrayObject* self, int axis, PyArrayObject* out) - - Equivalent to :meth:`ndarray.all` (*self*, *axis*). Return an array with - True elements for every 1-d sub-array of ``self`` defined by - ``axis`` in which all the elements are True. - -.. cfunction:: PyObject* PyArray_Any(PyArrayObject* self, int axis, PyArrayObject* out) - - Equivalent to :meth:`ndarray.any` (*self*, *axis*). Return an array with - True elements for every 1-d sub-array of *self* defined by *axis* - in which any of the elements are True. - -Functions ---------- - - -Array Functions -^^^^^^^^^^^^^^^ - -.. cfunction:: int PyArray_AsCArray(PyObject** op, void* ptr, npy_intp* dims, int nd, int typenum, int itemsize) - - Sometimes it is useful to access a multidimensional array as a - C-style multi-dimensional array so that algorithms can be - implemented using C's a[i][j][k] syntax. This routine returns a - pointer, *ptr*, that simulates this kind of C-style array, for - 1-, 2-, and 3-d ndarrays. - - :param op: - - The address to any Python object. This Python object will be replaced - with an equivalent well-behaved, C-style contiguous, ndarray of the - given data type specifice by the last two arguments. Be sure that - stealing a reference in this way to the input object is justified. - - :param ptr: - - The address to a (ctype* for 1-d, ctype** for 2-d or ctype*** for 3-d) - variable where ctype is the equivalent C-type for the data type. On - return, *ptr* will be addressable as a 1-d, 2-d, or 3-d array. - - :param dims: - - An output array that contains the shape of the array object. This - array gives boundaries on any looping that will take place. - - :param nd: - - The dimensionality of the array (1, 2, or 3). - - :param typenum: - - The expected data type of the array. - - :param itemsize: - - This argument is only needed when *typenum* represents a - flexible array. Otherwise it should be 0. - -.. note:: - - The simulation of a C-style array is not complete for 2-d and 3-d - arrays. For example, the simulated arrays of pointers cannot be passed - to subroutines expecting specific, statically-defined 2-d and 3-d - arrays. To pass to functions requiring those kind of inputs, you must - statically define the required array and copy data. - -.. cfunction:: int PyArray_Free(PyObject* op, void* ptr) - - Must be called with the same objects and memory locations returned - from :cfunc:`PyArray_AsCArray` (...). This function cleans up memory - that otherwise would get leaked. - -.. cfunction:: PyObject* PyArray_Concatenate(PyObject* obj, int axis) - - Join the sequence of objects in *obj* together along *axis* into a - single array. If the dimensions or types are not compatible an - error is raised. - -.. cfunction:: PyObject* PyArray_InnerProduct(PyObject* obj1, PyObject* obj2) - - Compute a product-sum over the last dimensions of *obj1* and - *obj2*. Neither array is conjugated. - -.. cfunction:: PyObject* PyArray_MatrixProduct(PyObject* obj1, PyObject* obj) - - Compute a product-sum over the last dimension of *obj1* and the - second-to-last dimension of *obj2*. For 2-d arrays this is a - matrix-product. Neither array is conjugated. - -.. cfunction:: PyObject* PyArray_MatrixProduct2(PyObject* obj1, PyObject* obj, PyObject* out) - - .. versionadded:: 1.6 - - Same as PyArray_MatrixProduct, but store the result in *out*. The - output array must have the correct shape, type, and be - C-contiguous, or an exception is raised. - -.. cfunction:: PyObject* PyArray_EinsteinSum(char* subscripts, npy_intp nop, PyArrayObject** op_in, PyArray_Descr* dtype, NPY_ORDER order, NPY_CASTING casting, PyArrayObject* out) - - .. versionadded:: 1.6 - - Applies the einstein summation convention to the array operands - provided, returning a new array or placing the result in *out*. - The string in *subscripts* is a comma separated list of index - letters. The number of operands is in *nop*, and *op_in* is an - array containing those operands. The data type of the output can - be forced with *dtype*, the output order can be forced with *order* - (:cdata:`NPY_KEEPORDER` is recommended), and when *dtype* is specified, - *casting* indicates how permissive the data conversion should be. - - See the :func:`einsum` function for more details. - -.. cfunction:: PyObject* PyArray_CopyAndTranspose(PyObject \* op) - - A specialized copy and transpose function that works only for 2-d - arrays. The returned array is a transposed copy of *op*. - -.. cfunction:: PyObject* PyArray_Correlate(PyObject* op1, PyObject* op2, int mode) - - Compute the 1-d correlation of the 1-d arrays *op1* and *op2* - . The correlation is computed at each output point by multiplying - *op1* by a shifted version of *op2* and summing the result. As a - result of the shift, needed values outside of the defined range of - *op1* and *op2* are interpreted as zero. The mode determines how - many shifts to return: 0 - return only shifts that did not need to - assume zero- values; 1 - return an object that is the same size as - *op1*, 2 - return all possible shifts (any overlap at all is - accepted). - - .. rubric:: Notes - - This does not compute the usual correlation: if op2 is larger than op1, the - arguments are swapped, and the conjugate is never taken for complex arrays. - See PyArray_Correlate2 for the usual signal processing correlation. - -.. cfunction:: PyObject* PyArray_Correlate2(PyObject* op1, PyObject* op2, int mode) - - Updated version of PyArray_Correlate, which uses the usual definition of - correlation for 1d arrays. The correlation is computed at each output point - by multiplying *op1* by a shifted version of *op2* and summing the result. - As a result of the shift, needed values outside of the defined range of - *op1* and *op2* are interpreted as zero. The mode determines how many - shifts to return: 0 - return only shifts that did not need to assume zero- - values; 1 - return an object that is the same size as *op1*, 2 - return all - possible shifts (any overlap at all is accepted). - - .. rubric:: Notes - - Compute z as follows:: - - z[k] = sum_n op1[n] * conj(op2[n+k]) - -.. cfunction:: PyObject* PyArray_Where(PyObject* condition, PyObject* x, PyObject* y) - - If both ``x`` and ``y`` are ``NULL``, then return - :cfunc:`PyArray_Nonzero` (*condition*). Otherwise, both *x* and *y* - must be given and the object returned is shaped like *condition* - and has elements of *x* and *y* where *condition* is respectively - True or False. - - -Other functions -^^^^^^^^^^^^^^^ - -.. cfunction:: Bool PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp* dims, npy_intp* newstrides) - - Determine if *newstrides* is a strides array consistent with the - memory of an *nd* -dimensional array with shape ``dims`` and - element-size, *elsize*. The *newstrides* array is checked to see - if jumping by the provided number of bytes in each direction will - ever mean jumping more than *numbytes* which is the assumed size - of the available memory segment. If *numbytes* is 0, then an - equivalent *numbytes* is computed assuming *nd*, *dims*, and - *elsize* refer to a single-segment array. Return :cdata:`NPY_TRUE` if - *newstrides* is acceptable, otherwise return :cdata:`NPY_FALSE`. - -.. cfunction:: npy_intp PyArray_MultiplyList(npy_intp* seq, int n) - -.. cfunction:: int PyArray_MultiplyIntList(int* seq, int n) - - Both of these routines multiply an *n* -length array, *seq*, of - integers and return the result. No overflow checking is performed. - -.. cfunction:: int PyArray_CompareLists(npy_intp* l1, npy_intp* l2, int n) - - Given two *n* -length arrays of integers, *l1*, and *l2*, return - 1 if the lists are identical; otherwise, return 0. - - -Array Iterators ---------------- - -As of Numpy 1.6, these array iterators are superceded by -the new array iterator, :ctype:`NpyIter`. - -An array iterator is a simple way to access the elements of an -N-dimensional array quickly and efficiently. Section `2 -<#sec-array-iterator>`__ provides more description and examples of -this useful approach to looping over an array. - -.. cfunction:: PyObject* PyArray_IterNew(PyObject* arr) - - Return an array iterator object from the array, *arr*. This is - equivalent to *arr*. **flat**. The array iterator object makes - it easy to loop over an N-dimensional non-contiguous array in - C-style contiguous fashion. - -.. cfunction:: PyObject* PyArray_IterAllButAxis(PyObject* arr, int \*axis) - - Return an array iterator that will iterate over all axes but the - one provided in *\*axis*. The returned iterator cannot be used - with :cfunc:`PyArray_ITER_GOTO1D`. This iterator could be used to - write something similar to what ufuncs do wherein the loop over - the largest axis is done by a separate sub-routine. If *\*axis* is - negative then *\*axis* will be set to the axis having the smallest - stride and that axis will be used. - -.. cfunction:: PyObject *PyArray_BroadcastToShape(PyObject* arr, npy_intp *dimensions, int nd) - - Return an array iterator that is broadcast to iterate as an array - of the shape provided by *dimensions* and *nd*. - -.. cfunction:: int PyArrayIter_Check(PyObject* op) - - Evaluates true if *op* is an array iterator (or instance of a - subclass of the array iterator type). - -.. cfunction:: void PyArray_ITER_RESET(PyObject* iterator) - - Reset an *iterator* to the beginning of the array. - -.. cfunction:: void PyArray_ITER_NEXT(PyObject* iterator) - - Incremement the index and the dataptr members of the *iterator* to - point to the next element of the array. If the array is not - (C-style) contiguous, also increment the N-dimensional coordinates - array. - -.. cfunction:: void *PyArray_ITER_DATA(PyObject* iterator) - - A pointer to the current element of the array. - -.. cfunction:: void PyArray_ITER_GOTO(PyObject* iterator, npy_intp* destination) - - Set the *iterator* index, dataptr, and coordinates members to the - location in the array indicated by the N-dimensional c-array, - *destination*, which must have size at least *iterator* - ->nd_m1+1. - -.. cfunction:: PyArray_ITER_GOTO1D(PyObject* iterator, npy_intp index) - - Set the *iterator* index and dataptr to the location in the array - indicated by the integer *index* which points to an element in the - C-styled flattened array. - -.. cfunction:: int PyArray_ITER_NOTDONE(PyObject* iterator) - - Evaluates TRUE as long as the iterator has not looped through all of - the elements, otherwise it evaluates FALSE. - - -Broadcasting (multi-iterators) ------------------------------- - -.. cfunction:: PyObject* PyArray_MultiIterNew(int num, ...) - - A simplified interface to broadcasting. This function takes the - number of arrays to broadcast and then *num* extra ( :ctype:`PyObject *` - ) arguments. These arguments are converted to arrays and iterators - are created. :cfunc:`PyArray_Broadcast` is then called on the resulting - multi-iterator object. The resulting, broadcasted mult-iterator - object is then returned. A broadcasted operation can then be - performed using a single loop and using :cfunc:`PyArray_MultiIter_NEXT` - (..) - -.. cfunction:: void PyArray_MultiIter_RESET(PyObject* multi) - - Reset all the iterators to the beginning in a multi-iterator - object, *multi*. - -.. cfunction:: void PyArray_MultiIter_NEXT(PyObject* multi) - - Advance each iterator in a multi-iterator object, *multi*, to its - next (broadcasted) element. - -.. cfunction:: void *PyArray_MultiIter_DATA(PyObject* multi, int i) - - Return the data-pointer of the *i* :math:`^{\textrm{th}}` iterator - in a multi-iterator object. - -.. cfunction:: void PyArray_MultiIter_NEXTi(PyObject* multi, int i) - - Advance the pointer of only the *i* :math:`^{\textrm{th}}` iterator. - -.. cfunction:: void PyArray_MultiIter_GOTO(PyObject* multi, npy_intp* destination) - - Advance each iterator in a multi-iterator object, *multi*, to the - given :math:`N` -dimensional *destination* where :math:`N` is the - number of dimensions in the broadcasted array. - -.. cfunction:: void PyArray_MultiIter_GOTO1D(PyObject* multi, npy_intp index) - - Advance each iterator in a multi-iterator object, *multi*, to the - corresponding location of the *index* into the flattened - broadcasted array. - -.. cfunction:: int PyArray_MultiIter_NOTDONE(PyObject* multi) - - Evaluates TRUE as long as the multi-iterator has not looped - through all of the elements (of the broadcasted result), otherwise - it evaluates FALSE. - -.. cfunction:: int PyArray_Broadcast(PyArrayMultiIterObject* mit) - - This function encapsulates the broadcasting rules. The *mit* - container should already contain iterators for all the arrays that - need to be broadcast. On return, these iterators will be adjusted - so that iteration over each simultaneously will accomplish the - broadcasting. A negative number is returned if an error occurs. - -.. cfunction:: int PyArray_RemoveSmallest(PyArrayMultiIterObject* mit) - - This function takes a multi-iterator object that has been - previously "broadcasted," finds the dimension with the smallest - "sum of strides" in the broadcasted result and adapts all the - iterators so as not to iterate over that dimension (by effectively - making them of length-1 in that dimension). The corresponding - dimension is returned unless *mit* ->nd is 0, then -1 is - returned. This function is useful for constructing ufunc-like - routines that broadcast their inputs correctly and then call a - strided 1-d version of the routine as the inner-loop. This 1-d - version is usually optimized for speed and for this reason the - loop should be performed over the axis that won't require large - stride jumps. - -Neighborhood iterator ---------------------- - -.. versionadded:: 1.4.0 - -Neighborhood iterators are subclasses of the iterator object, and can be used -to iter over a neighborhood of a point. For example, you may want to iterate -over every voxel of a 3d image, and for every such voxel, iterate over an -hypercube. Neighborhood iterator automatically handle boundaries, thus making -this kind of code much easier to write than manual boundaries handling, at the -cost of a slight overhead. - -.. cfunction:: PyObject* PyArray_NeighborhoodIterNew(PyArrayIterObject* iter, npy_intp bounds, int mode, PyArrayObject* fill_value) - - This function creates a new neighborhood iterator from an existing - iterator. The neighborhood will be computed relatively to the position - currently pointed by *iter*, the bounds define the shape of the - neighborhood iterator, and the mode argument the boundaries handling mode. - - The *bounds* argument is expected to be a (2 * iter->ao->nd) arrays, such - as the range bound[2*i]->bounds[2*i+1] defines the range where to walk for - dimension i (both bounds are included in the walked coordinates). The - bounds should be ordered for each dimension (bounds[2*i] <= bounds[2*i+1]). - - The mode should be one of: - - * NPY_NEIGHBORHOOD_ITER_ZERO_PADDING: zero padding. Outside bounds values - will be 0. - * NPY_NEIGHBORHOOD_ITER_ONE_PADDING: one padding, Outside bounds values - will be 1. - * NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING: constant padding. Outside bounds - values will be the same as the first item in fill_value. - * NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING: mirror padding. Outside bounds - values will be as if the array items were mirrored. For example, for the - array [1, 2, 3, 4], x[-2] will be 2, x[-2] will be 1, x[4] will be 4, - x[5] will be 1, etc... - * NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING: circular padding. Outside bounds - values will be as if the array was repeated. For example, for the - array [1, 2, 3, 4], x[-2] will be 3, x[-2] will be 4, x[4] will be 1, - x[5] will be 2, etc... - - If the mode is constant filling (NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING), - fill_value should point to an array object which holds the filling value - (the first item will be the filling value if the array contains more than - one item). For other cases, fill_value may be NULL. - - - The iterator holds a reference to iter - - Return NULL on failure (in which case the reference count of iter is not - changed) - - iter itself can be a Neighborhood iterator: this can be useful for .e.g - automatic boundaries handling - - the object returned by this function should be safe to use as a normal - iterator - - If the position of iter is changed, any subsequent call to - PyArrayNeighborhoodIter_Next is undefined behavior, and - PyArrayNeighborhoodIter_Reset must be called. - - .. code-block:: c - - PyArrayIterObject \*iter; - PyArrayNeighborhoodIterObject \*neigh_iter; - iter = PyArray_IterNew(x); - - //For a 3x3 kernel - bounds = {-1, 1, -1, 1}; - neigh_iter = (PyArrayNeighborhoodIterObject*)PyArrayNeighborhoodIter_New( - iter, bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL); - - for(i = 0; i < iter->size; ++i) { - for (j = 0; j < neigh_iter->size; ++j) { - // Walk around the item currently pointed by iter->dataptr - PyArrayNeighborhoodIter_Next(neigh_iter); - } - - // Move to the next point of iter - PyArrayIter_Next(iter); - PyArrayNeighborhoodIter_Reset(neigh_iter); - } - -.. cfunction:: int PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) - - Reset the iterator position to the first point of the neighborhood. This - should be called whenever the iter argument given at - PyArray_NeighborhoodIterObject is changed (see example) - -.. cfunction:: int PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) - - After this call, iter->dataptr points to the next point of the - neighborhood. Calling this function after every point of the - neighborhood has been visited is undefined. - -Array Scalars -------------- - -.. cfunction:: PyObject* PyArray_Return(PyArrayObject* arr) - - This function checks to see if *arr* is a 0-dimensional array and, - if so, returns the appropriate array scalar. It should be used - whenever 0-dimensional arrays could be returned to Python. - -.. cfunction:: PyObject* PyArray_Scalar(void* data, PyArray_Descr* dtype, PyObject* itemsize) - - Return an array scalar object of the given enumerated *typenum* - and *itemsize* by **copying** from memory pointed to by *data* - . If *swap* is nonzero then this function will byteswap the data - if appropriate to the data-type because array scalars are always - in correct machine-byte order. - -.. cfunction:: PyObject* PyArray_ToScalar(void* data, PyArrayObject* arr) - - Return an array scalar object of the type and itemsize indicated - by the array object *arr* copied from the memory pointed to by - *data* and swapping if the data in *arr* is not in machine - byte-order. - -.. cfunction:: PyObject* PyArray_FromScalar(PyObject* scalar, PyArray_Descr* outcode) - - Return a 0-dimensional array of type determined by *outcode* from - *scalar* which should be an array-scalar object. If *outcode* is - NULL, then the type is determined from *scalar*. - -.. cfunction:: void PyArray_ScalarAsCtype(PyObject* scalar, void* ctypeptr) - - Return in *ctypeptr* a pointer to the actual value in an array - scalar. There is no error checking so *scalar* must be an - array-scalar object, and ctypeptr must have enough space to hold - the correct type. For flexible-sized types, a pointer to the data - is copied into the memory of *ctypeptr*, for all other types, the - actual data is copied into the address pointed to by *ctypeptr*. - -.. cfunction:: void PyArray_CastScalarToCtype(PyObject* scalar, void* ctypeptr, PyArray_Descr* outcode) - - Return the data (cast to the data type indicated by *outcode*) - from the array-scalar, *scalar*, into the memory pointed to by - *ctypeptr* (which must be large enough to handle the incoming - memory). - -.. cfunction:: PyObject* PyArray_TypeObjectFromType(int type) - - Returns a scalar type-object from a type-number, *type* - . Equivalent to :cfunc:`PyArray_DescrFromType` (*type*)->typeobj - except for reference counting and error-checking. Returns a new - reference to the typeobject on success or ``NULL`` on failure. - -.. cfunction:: NPY_SCALARKIND PyArray_ScalarKind(int typenum, PyArrayObject** arr) - - See the function :cfunc:`PyArray_MinScalarType` for an alternative - mechanism introduced in NumPy 1.6.0. - - Return the kind of scalar represented by *typenum* and the array - in *\*arr* (if *arr* is not ``NULL`` ). The array is assumed to be - rank-0 and only used if *typenum* represents a signed integer. If - *arr* is not ``NULL`` and the first element is negative then - :cdata:`NPY_INTNEG_SCALAR` is returned, otherwise - :cdata:`NPY_INTPOS_SCALAR` is returned. The possible return values - are :cdata:`NPY_{kind}_SCALAR` where ``{kind}`` can be **INTPOS**, - **INTNEG**, **FLOAT**, **COMPLEX**, **BOOL**, or **OBJECT**. - :cdata:`NPY_NOSCALAR` is also an enumerated value - :ctype:`NPY_SCALARKIND` variables can take on. - -.. cfunction:: int PyArray_CanCoerceScalar(char thistype, char neededtype, NPY_SCALARKIND scalar) - - See the function :cfunc:`PyArray_ResultType` for details of - NumPy type promotion, updated in NumPy 1.6.0. - - Implements the rules for scalar coercion. Scalars are only - silently coerced from thistype to neededtype if this function - returns nonzero. If scalar is :cdata:`NPY_NOSCALAR`, then this - function is equivalent to :cfunc:`PyArray_CanCastSafely`. The rule is - that scalars of the same KIND can be coerced into arrays of the - same KIND. This rule means that high-precision scalars will never - cause low-precision arrays of the same KIND to be upcast. - - -Data-type descriptors ---------------------- - - - -.. warning:: - - Data-type objects must be reference counted so be aware of the - action on the data-type reference of different C-API calls. The - standard rule is that when a data-type object is returned it is a - new reference. Functions that take :ctype:`PyArray_Descr *` objects and - return arrays steal references to the data-type their inputs - unless otherwise noted. Therefore, you must own a reference to any - data-type object used as input to such a function. - -.. cfunction:: int PyArrayDescr_Check(PyObject* obj) - - Evaluates as true if *obj* is a data-type object ( :ctype:`PyArray_Descr *` ). - -.. cfunction:: PyArray_Descr* PyArray_DescrNew(PyArray_Descr* obj) - - Return a new data-type object copied from *obj* (the fields - reference is just updated so that the new object points to the - same fields dictionary if any). - -.. cfunction:: PyArray_Descr* PyArray_DescrNewFromType(int typenum) - - Create a new data-type object from the built-in (or - user-registered) data-type indicated by *typenum*. All builtin - types should not have any of their fields changed. This creates a - new copy of the :ctype:`PyArray_Descr` structure so that you can fill - it in as appropriate. This function is especially needed for - flexible data-types which need to have a new elsize member in - order to be meaningful in array construction. - -.. cfunction:: PyArray_Descr* PyArray_DescrNewByteorder(PyArray_Descr* obj, char newendian) - - Create a new data-type object with the byteorder set according to - *newendian*. All referenced data-type objects (in subdescr and - fields members of the data-type object) are also changed - (recursively). If a byteorder of :cdata:`NPY_IGNORE` is encountered it - is left alone. If newendian is :cdata:`NPY_SWAP`, then all byte-orders - are swapped. Other valid newendian values are :cdata:`NPY_NATIVE`, - :cdata:`NPY_LITTLE`, and :cdata:`NPY_BIG` which all cause the returned - data-typed descriptor (and all it's - referenced data-type descriptors) to have the corresponding byte- - order. - -.. cfunction:: PyArray_Descr* PyArray_DescrFromObject(PyObject* op, PyArray_Descr* mintype) - - Determine an appropriate data-type object from the object *op* - (which should be a "nested" sequence object) and the minimum - data-type descriptor mintype (which can be ``NULL`` ). Similar in - behavior to array(*op*).dtype. Don't confuse this function with - :cfunc:`PyArray_DescrConverter`. This function essentially looks at - all the objects in the (nested) sequence and determines the - data-type from the elements it finds. - -.. cfunction:: PyArray_Descr* PyArray_DescrFromScalar(PyObject* scalar) - - Return a data-type object from an array-scalar object. No checking - is done to be sure that *scalar* is an array scalar. If no - suitable data-type can be determined, then a data-type of - :cdata:`NPY_OBJECT` is returned by default. - -.. cfunction:: PyArray_Descr* PyArray_DescrFromType(int typenum) - - Returns a data-type object corresponding to *typenum*. The - *typenum* can be one of the enumerated types, a character code for - one of the enumerated types, or a user-defined type. - -.. cfunction:: int PyArray_DescrConverter(PyObject* obj, PyArray_Descr** dtype) - - Convert any compatible Python object, *obj*, to a data-type object - in *dtype*. A large number of Python objects can be converted to - data-type objects. See :ref:`arrays.dtypes` for a complete - description. This version of the converter converts None objects - to a :cdata:`NPY_DEFAULT_TYPE` data-type object. This function can - be used with the "O&" character code in :cfunc:`PyArg_ParseTuple` - processing. - -.. cfunction:: int PyArray_DescrConverter2(PyObject* obj, PyArray_Descr** dtype) - - Convert any compatible Python object, *obj*, to a data-type - object in *dtype*. This version of the converter converts None - objects so that the returned data-type is ``NULL``. This function - can also be used with the "O&" character in PyArg_ParseTuple - processing. - -.. cfunction:: int Pyarray_DescrAlignConverter(PyObject* obj, PyArray_Descr** dtype) - - Like :cfunc:`PyArray_DescrConverter` except it aligns C-struct-like - objects on word-boundaries as the compiler would. - -.. cfunction:: int Pyarray_DescrAlignConverter2(PyObject* obj, PyArray_Descr** dtype) - - Like :cfunc:`PyArray_DescrConverter2` except it aligns C-struct-like - objects on word-boundaries as the compiler would. - -.. cfunction:: PyObject *PyArray_FieldNames(PyObject* dict) - - Take the fields dictionary, *dict*, such as the one attached to a - data-type object and construct an ordered-list of field names such - as is stored in the names field of the :ctype:`PyArray_Descr` object. - - -Conversion Utilities --------------------- - - -For use with :cfunc:`PyArg_ParseTuple` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -All of these functions can be used in :cfunc:`PyArg_ParseTuple` (...) with -the "O&" format specifier to automatically convert any Python object -to the required C-object. All of these functions return -:cdata:`NPY_SUCCEED` if successful and :cdata:`NPY_FAIL` if not. The first -argument to all of these function is a Python object. The second -argument is the **address** of the C-type to convert the Python object -to. - - -.. warning:: - - Be sure to understand what steps you should take to manage the - memory when using these conversion functions. These functions can - require freeing memory, and/or altering the reference counts of - specific objects based on your use. - -.. cfunction:: int PyArray_Converter(PyObject* obj, PyObject** address) - - Convert any Python object to a :ctype:`PyArrayObject`. If - :cfunc:`PyArray_Check` (*obj*) is TRUE then its reference count is - incremented and a reference placed in *address*. If *obj* is not - an array, then convert it to an array using :cfunc:`PyArray_FromAny` - . No matter what is returned, you must DECREF the object returned - by this routine in *address* when you are done with it. - -.. cfunction:: int PyArray_OutputConverter(PyObject* obj, PyArrayObject** address) - - This is a default converter for output arrays given to - functions. If *obj* is :cdata:`Py_None` or ``NULL``, then *\*address* - will be ``NULL`` but the call will succeed. If :cfunc:`PyArray_Check` ( - *obj*) is TRUE then it is returned in *\*address* without - incrementing its reference count. - -.. cfunction:: int PyArray_IntpConverter(PyObject* obj, PyArray_Dims* seq) - - Convert any Python sequence, *obj*, smaller than :cdata:`NPY_MAXDIMS` - to a C-array of :ctype:`npy_intp`. The Python object could also be a - single number. The *seq* variable is a pointer to a structure with - members ptr and len. On successful return, *seq* ->ptr contains a - pointer to memory that must be freed to avoid a memory leak. The - restriction on memory size allows this converter to be - conveniently used for sequences intended to be interpreted as - array shapes. - -.. cfunction:: int PyArray_BufferConverter(PyObject* obj, PyArray_Chunk* buf) - - Convert any Python object, *obj*, with a (single-segment) buffer - interface to a variable with members that detail the object's use - of its chunk of memory. The *buf* variable is a pointer to a - structure with base, ptr, len, and flags members. The - :ctype:`PyArray_Chunk` structure is binary compatibile with the - Python's buffer object (through its len member on 32-bit platforms - and its ptr member on 64-bit platforms or in Python 2.5). On - return, the base member is set to *obj* (or its base if *obj* is - already a buffer object pointing to another object). If you need - to hold on to the memory be sure to INCREF the base member. The - chunk of memory is pointed to by *buf* ->ptr member and has length - *buf* ->len. The flags member of *buf* is :cdata:`NPY_BEHAVED_RO` with - the :cdata:`NPY_WRITEABLE` flag set if *obj* has a writeable buffer - interface. - -.. cfunction:: int PyArray_AxisConverter(PyObject \* obj, int* axis) - - Convert a Python object, *obj*, representing an axis argument to - the proper value for passing to the functions that take an integer - axis. Specifically, if *obj* is None, *axis* is set to - :cdata:`NPY_MAXDIMS` which is interpreted correctly by the C-API - functions that take axis arguments. - -.. cfunction:: int PyArray_BoolConverter(PyObject* obj, Bool* value) - - Convert any Python object, *obj*, to :cdata:`NPY_TRUE` or - :cdata:`NPY_FALSE`, and place the result in *value*. - -.. cfunction:: int PyArray_ByteorderConverter(PyObject* obj, char* endian) - - Convert Python strings into the corresponding byte-order - character: - '>', '<', 's', '=', or '\|'. - -.. cfunction:: int PyArray_SortkindConverter(PyObject* obj, NPY_SORTKIND* sort) - - Convert Python strings into one of :cdata:`NPY_QUICKSORT` (starts - with 'q' or 'Q') , :cdata:`NPY_HEAPSORT` (starts with 'h' or 'H'), - or :cdata:`NPY_MERGESORT` (starts with 'm' or 'M'). - -.. cfunction:: int PyArray_SearchsideConverter(PyObject* obj, NPY_SEARCHSIDE* side) - - Convert Python strings into one of :cdata:`NPY_SEARCHLEFT` (starts with 'l' - or 'L'), or :cdata:`NPY_SEARCHRIGHT` (starts with 'r' or 'R'). - -.. cfunction:: int PyArray_OrderConverter(PyObject* obj, NPY_ORDER* order) - - Convert the Python strings 'C', 'F', 'A', and 'K' into the :ctype:`NPY_ORDER` - enumeration :cdata:`NPY_CORDER`, :cdata:`NPY_FORTRANORDER`, - :cdata:`NPY_ANYORDER`, and :cdata:`NPY_KEEPORDER`. - -.. cfunction:: int PyArray_CastingConverter(PyObject* obj, NPY_CASTING* casting) - - Convert the Python strings 'no', 'equiv', 'safe', 'same_kind', and - 'unsafe' into the :ctype:`NPY_CASTING` enumeration :cdata:`NPY_NO_CASTING`, - :cdata:`NPY_EQUIV_CASTING`, :cdata:`NPY_SAFE_CASTING`, - :cdata:`NPY_SAME_KIND_CASTING`, and :cdata:`NPY_UNSAFE_CASTING`. - -.. cfunction:: int PyArray_ClipmodeConverter(PyObject* object, NPY_CLIPMODE* val) - - Convert the Python strings 'clip', 'wrap', and 'raise' into the - :ctype:`NPY_CLIPMODE` enumeration :cdata:`NPY_CLIP`, :cdata:`NPY_WRAP`, - and :cdata:`NPY_RAISE`. - -.. cfunction:: int PyArray_ConvertClipmodeSequence(PyObject* object, NPY_CLIPMODE* modes, int n) - - Converts either a sequence of clipmodes or a single clipmode into - a C array of :ctype:`NPY_CLIPMODE` values. The number of clipmodes *n* - must be known before calling this function. This function is provided - to help functions allow a different clipmode for each dimension. - -Other conversions -^^^^^^^^^^^^^^^^^ - -.. cfunction:: int PyArray_PyIntAsInt(PyObject* op) - - Convert all kinds of Python objects (including arrays and array - scalars) to a standard integer. On error, -1 is returned and an - exception set. You may find useful the macro: - - .. code-block:: c - - #define error_converting(x) (((x) == -1) && PyErr_Occurred() - -.. cfunction:: npy_intp PyArray_PyIntAsIntp(PyObject* op) - - Convert all kinds of Python objects (including arrays and array - scalars) to a (platform-pointer-sized) integer. On error, -1 is - returned and an exception set. - -.. cfunction:: int PyArray_IntpFromSequence(PyObject* seq, npy_intp* vals, int maxvals) - - Convert any Python sequence (or single Python number) passed in as - *seq* to (up to) *maxvals* pointer-sized integers and place them - in the *vals* array. The sequence can be smaller then *maxvals* as - the number of converted objects is returned. - -.. cfunction:: int PyArray_TypestrConvert(int itemsize, int gentype) - - Convert typestring characters (with *itemsize*) to basic - enumerated data types. The typestring character corresponding to - signed and unsigned integers, floating point numbers, and - complex-floating point numbers are recognized and converted. Other - values of gentype are returned. This function can be used to - convert, for example, the string 'f4' to :cdata:`NPY_FLOAT32`. - - -Miscellaneous -------------- - - -Importing the API -^^^^^^^^^^^^^^^^^ - -In order to make use of the C-API from another extension module, the -``import_array`` () command must be used. If the extension module is -self-contained in a single .c file, then that is all that needs to be -done. If, however, the extension module involves multiple files where -the C-API is needed then some additional steps must be taken. - -.. cfunction:: void import_array(void) - - This function must be called in the initialization section of a - module that will make use of the C-API. It imports the module - where the function-pointer table is stored and points the correct - variable to it. - -.. cmacro:: PY_ARRAY_UNIQUE_SYMBOL - -.. cmacro:: NO_IMPORT_ARRAY - - Using these #defines you can use the C-API in multiple files for a - single extension module. In each file you must define - :cmacro:`PY_ARRAY_UNIQUE_SYMBOL` to some name that will hold the - C-API (*e.g.* myextension_ARRAY_API). This must be done **before** - including the numpy/arrayobject.h file. In the module - intialization routine you call ``import_array`` (). In addition, - in the files that do not have the module initialization - sub_routine define :cmacro:`NO_IMPORT_ARRAY` prior to including - numpy/arrayobject.h. - - Suppose I have two files coolmodule.c and coolhelper.c which need - to be compiled and linked into a single extension module. Suppose - coolmodule.c contains the required initcool module initialization - function (with the import_array() function called). Then, - coolmodule.c would have at the top: - - .. code-block:: c - - #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h - - On the other hand, coolhelper.c would contain at the top: - - .. code-block:: c - - #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #define NO_IMPORT_ARRAY - #include numpy/arrayobject.h - -Checking the API Version -^^^^^^^^^^^^^^^^^^^^^^^^ - -Because python extensions are not used in the same way as usual libraries on -most platforms, some errors cannot be automatically detected at build time or -even runtime. For example, if you build an extension using a function available -only for numpy >= 1.3.0, and you import the extension later with numpy 1.2, you -will not get an import error (but almost certainly a segmentation fault when -calling the function). That's why several functions are provided to check for -numpy versions. The macros :cdata:`NPY_VERSION` and -:cdata:`NPY_FEATURE_VERSION` corresponds to the numpy version used to build the -extension, whereas the versions returned by the functions -PyArray_GetNDArrayCVersion and PyArray_GetNDArrayCFeatureVersion corresponds to -the runtime numpy's version. - -The rules for ABI and API compatibilities can be summarized as follows: - - * Whenever :cdata:`NPY_VERSION` != PyArray_GetNDArrayCVersion, the - extension has to be recompiled (ABI incompatibility). - * :cdata:`NPY_VERSION` == PyArray_GetNDArrayCVersion and - :cdata:`NPY_FEATURE_VERSION` <= PyArray_GetNDArrayCFeatureVersion means - backward compatible changes. - -ABI incompatibility is automatically detected in every numpy's version. API -incompatibility detection was added in numpy 1.4.0. If you want to supported -many different numpy versions with one extension binary, you have to build your -extension with the lowest NPY_FEATURE_VERSION as possible. - -.. cfunction:: unsigned int PyArray_GetNDArrayCVersion(void) - - This just returns the value :cdata:`NPY_VERSION`. :cdata:`NPY_VERSION` - changes whenever a backward incompatible change at the ABI level. Because - it is in the C-API, however, comparing the output of this function from the - value defined in the current header gives a way to test if the C-API has - changed thus requiring a re-compilation of extension modules that use the - C-API. This is automatically checked in the function import_array. - -.. cfunction:: unsigned int PyArray_GetNDArrayCFeatureVersion(void) - - .. versionadded:: 1.4.0 - - This just returns the value :cdata:`NPY_FEATURE_VERSION`. - :cdata:`NPY_FEATURE_VERSION` changes whenever the API changes (e.g. a - function is added). A changed value does not always require a recompile. - -Internal Flexibility -^^^^^^^^^^^^^^^^^^^^ - -.. cfunction:: int PyArray_SetNumericOps(PyObject* dict) - - NumPy stores an internal table of Python callable objects that are - used to implement arithmetic operations for arrays as well as - certain array calculation methods. This function allows the user - to replace any or all of these Python objects with their own - versions. The keys of the dictionary, *dict*, are the named - functions to replace and the paired value is the Python callable - object to use. Care should be taken that the function used to - replace an internal array operation does not itself call back to - that internal array operation (unless you have designed the - function to handle that), or an unchecked infinite recursion can - result (possibly causing program crash). The key names that - represent operations that can be replaced are: - - **add**, **subtract**, **multiply**, **divide**, - **remainder**, **power**, **square**, **reciprocal**, - **ones_like**, **sqrt**, **negative**, **absolute**, - **invert**, **left_shift**, **right_shift**, - **bitwise_and**, **bitwise_xor**, **bitwise_or**, - **less**, **less_equal**, **equal**, **not_equal**, - **greater**, **greater_equal**, **floor_divide**, - **true_divide**, **logical_or**, **logical_and**, - **floor**, **ceil**, **maximum**, **minimum**, **rint**. - - - These functions are included here because they are used at least once - in the array object's methods. The function returns -1 (without - setting a Python Error) if one of the objects being assigned is not - callable. - -.. cfunction:: PyObject* PyArray_GetNumericOps(void) - - Return a Python dictionary containing the callable Python objects - stored in the the internal arithmetic operation table. The keys of - this dictionary are given in the explanation for :cfunc:`PyArray_SetNumericOps`. - -.. cfunction:: void PyArray_SetStringFunction(PyObject* op, int repr) - - This function allows you to alter the tp_str and tp_repr methods - of the array object to any Python function. Thus you can alter - what happens for all arrays when str(arr) or repr(arr) is called - from Python. The function to be called is passed in as *op*. If - *repr* is non-zero, then this function will be called in response - to repr(arr), otherwise the function will be called in response to - str(arr). No check on whether or not *op* is callable is - performed. The callable passed in to *op* should expect an array - argument and should return a string to be printed. - - -Memory management -^^^^^^^^^^^^^^^^^ - -.. cfunction:: char* PyDataMem_NEW(size_t nbytes) - -.. cfunction:: PyDataMem_FREE(char* ptr) - -.. cfunction:: char* PyDataMem_RENEW(void * ptr, size_t newbytes) - - Macros to allocate, free, and reallocate memory. These macros are used - internally to create arrays. - -.. cfunction:: npy_intp* PyDimMem_NEW(nd) - -.. cfunction:: PyDimMem_FREE(npy_intp* ptr) - -.. cfunction:: npy_intp* PyDimMem_RENEW(npy_intp* ptr, npy_intp newnd) - - Macros to allocate, free, and reallocate dimension and strides memory. - -.. cfunction:: PyArray_malloc(nbytes) - -.. cfunction:: PyArray_free(ptr) - -.. cfunction:: PyArray_realloc(ptr, nbytes) - - These macros use different memory allocators, depending on the - constant :cdata:`NPY_USE_PYMEM`. The system malloc is used when - :cdata:`NPY_USE_PYMEM` is 0, if :cdata:`NPY_USE_PYMEM` is 1, then - the Python memory allocator is used. - - -Threading support -^^^^^^^^^^^^^^^^^ - -These macros are only meaningful if :cdata:`NPY_ALLOW_THREADS` -evaluates True during compilation of the extension module. Otherwise, -these macros are equivalent to whitespace. Python uses a single Global -Interpreter Lock (GIL) for each Python process so that only a single -thread may excecute at a time (even on multi-cpu machines). When -calling out to a compiled function that may take time to compute (and -does not have side-effects for other threads like updated global -variables), the GIL should be released so that other Python threads -can run while the time-consuming calculations are performed. This can -be accomplished using two groups of macros. Typically, if one macro in -a group is used in a code block, all of them must be used in the same -code block. Currently, :cdata:`NPY_ALLOW_THREADS` is defined to the -python-defined :cdata:`WITH_THREADS` constant unless the environment -variable :cdata:`NPY_NOSMP` is set in which case -:cdata:`NPY_ALLOW_THREADS` is defined to be 0. - -Group 1 -""""""" - - This group is used to call code that may take some time but does not - use any Python C-API calls. Thus, the GIL should be released during - its calculation. - - .. cmacro:: NPY_BEGIN_ALLOW_THREADS - - Equivalent to :cmacro:`Py_BEGIN_ALLOW_THREADS` except it uses - :cdata:`NPY_ALLOW_THREADS` to determine if the macro if - replaced with white-space or not. - - .. cmacro:: NPY_END_ALLOW_THREADS - - Equivalent to :cmacro:`Py_END_ALLOW_THREADS` except it uses - :cdata:`NPY_ALLOW_THREADS` to determine if the macro if - replaced with white-space or not. - - .. cmacro:: NPY_BEGIN_THREADS_DEF - - Place in the variable declaration area. This macro sets up the - variable needed for storing the Python state. - - .. cmacro:: NPY_BEGIN_THREADS - - Place right before code that does not need the Python - interpreter (no Python C-API calls). This macro saves the - Python state and releases the GIL. - - .. cmacro:: NPY_END_THREADS - - Place right after code that does not need the Python - interpreter. This macro acquires the GIL and restores the - Python state from the saved variable. - - .. cfunction:: NPY_BEGIN_THREADS_DESCR(PyArray_Descr *dtype) - - Useful to release the GIL only if *dtype* does not contain - arbitrary Python objects which may need the Python interpreter - during execution of the loop. Equivalent to - - .. cfunction:: NPY_END_THREADS_DESCR(PyArray_Descr *dtype) - - Useful to regain the GIL in situations where it was released - using the BEGIN form of this macro. - -Group 2 -""""""" - - This group is used to re-acquire the Python GIL after it has been - released. For example, suppose the GIL has been released (using the - previous calls), and then some path in the code (perhaps in a - different subroutine) requires use of the Python C-API, then these - macros are useful to acquire the GIL. These macros accomplish - essentially a reverse of the previous three (acquire the LOCK saving - what state it had) and then re-release it with the saved state. - - .. cmacro:: NPY_ALLOW_C_API_DEF - - Place in the variable declaration area to set up the necessary - variable. - - .. cmacro:: NPY_ALLOW_C_API - - Place before code that needs to call the Python C-API (when it is - known that the GIL has already been released). - - .. cmacro:: NPY_DISABLE_C_API - - Place after code that needs to call the Python C-API (to re-release - the GIL). - -.. tip:: - - Never use semicolons after the threading support macros. - - -Priority -^^^^^^^^ - -.. cvar:: NPY_PRIOIRTY - - Default priority for arrays. - -.. cvar:: NPY_SUBTYPE_PRIORITY - - Default subtype priority. - -.. cvar:: NPY_SCALAR_PRIORITY - - Default scalar priority (very small) - -.. cfunction:: double PyArray_GetPriority(PyObject* obj, double def) - - Return the :obj:`__array_priority__` attribute (converted to a - double) of *obj* or *def* if no attribute of that name - exists. Fast returns that avoid the attribute lookup are provided - for objects of type :cdata:`PyArray_Type`. - - -Default buffers -^^^^^^^^^^^^^^^ - -.. cvar:: NPY_BUFSIZE - - Default size of the user-settable internal buffers. - -.. cvar:: NPY_MIN_BUFSIZE - - Smallest size of user-settable internal buffers. - -.. cvar:: NPY_MAX_BUFSIZE - - Largest size allowed for the user-settable buffers. - - -Other constants -^^^^^^^^^^^^^^^ - -.. cvar:: NPY_NUM_FLOATTYPE - - The number of floating-point types - -.. cvar:: NPY_MAXDIMS - - The maximum number of dimensions allowed in arrays. - -.. cvar:: NPY_VERSION - - The current version of the ndarray object (check to see if this - variable is defined to guarantee the numpy/arrayobject.h header is - being used). - -.. cvar:: NPY_FALSE - - Defined as 0 for use with Bool. - -.. cvar:: NPY_TRUE - - Defined as 1 for use with Bool. - -.. cvar:: NPY_FAIL - - The return value of failed converter functions which are called using - the "O&" syntax in :cfunc:`PyArg_ParseTuple`-like functions. - -.. cvar:: NPY_SUCCEED - - The return value of successful converter functions which are called - using the "O&" syntax in :cfunc:`PyArg_ParseTuple`-like functions. - - -Miscellaneous Macros -^^^^^^^^^^^^^^^^^^^^ - -.. cfunction:: PyArray_SAMESHAPE(a1, a2) - - Evaluates as True if arrays *a1* and *a2* have the same shape. - -.. cfunction:: PyArray_MAX(a,b) - - Returns the maximum of *a* and *b*. If (*a*) or (*b*) are - expressions they are evaluated twice. - -.. cfunction:: PyArray_MIN(a,b) - - Returns the minimum of *a* and *b*. If (*a*) or (*b*) are - expressions they are evaluated twice. - -.. cfunction:: PyArray_CLT(a,b) - -.. cfunction:: PyArray_CGT(a,b) - -.. cfunction:: PyArray_CLE(a,b) - -.. cfunction:: PyArray_CGE(a,b) - -.. cfunction:: PyArray_CEQ(a,b) - -.. cfunction:: PyArray_CNE(a,b) - - Implements the complex comparisons between two complex numbers - (structures with a real and imag member) using NumPy's definition - of the ordering which is lexicographic: comparing the real parts - first and then the complex parts if the real parts are equal. - -.. cfunction:: PyArray_REFCOUNT(PyObject* op) - - Returns the reference count of any Python object. - -.. cfunction:: PyArray_XDECREF_ERR(PyObject \*obj) - - DECREF's an array object which may have the :cdata:`NPY_UPDATEIFCOPY` - flag set without causing the contents to be copied back into the - original array. Resets the :cdata:`NPY_WRITEABLE` flag on the base - object. This is useful for recovering from an error condition when - :cdata:`NPY_UPDATEIFCOPY` is used. - - -Enumerated Types -^^^^^^^^^^^^^^^^ - -.. ctype:: NPY_SORTKIND - - A special variable-type which can take on the values :cdata:`NPY_{KIND}` - where ``{KIND}`` is - - **QUICKSORT**, **HEAPSORT**, **MERGESORT** - - .. cvar:: NPY_NSORTS - - Defined to be the number of sorts. - -.. ctype:: NPY_SCALARKIND - - A special variable type indicating the number of "kinds" of - scalars distinguished in determining scalar-coercion rules. This - variable can take on the values :cdata:`NPY_{KIND}` where ``{KIND}`` can be - - **NOSCALAR**, **BOOL_SCALAR**, **INTPOS_SCALAR**, - **INTNEG_SCALAR**, **FLOAT_SCALAR**, **COMPLEX_SCALAR**, - **OBJECT_SCALAR** - - .. cvar:: NPY_NSCALARKINDS - - Defined to be the number of scalar kinds - (not including :cdata:`NPY_NOSCALAR`). - -.. ctype:: NPY_ORDER - - An enumeration type indicating the element order that an array should be - interpreted in. When a brand new array is created, generally - only **NPY_CORDER** and **NPY_FORTRANORDER** are used, whereas - when one or more inputs are provided, the order can be based on them. - - .. cvar:: NPY_ANYORDER - - Fortran order if all the inputs are Fortran, C otherwise. - - .. cvar:: NPY_CORDER - - C order. - - .. cvar:: NPY_FORTRANORDER - - Fortran order. - - .. cvar:: NPY_KEEPORDER - - An order as close to the order of the inputs as possible, even - if the input is in neither C nor Fortran order. - -.. ctype:: NPY_CLIPMODE - - A variable type indicating the kind of clipping that should be - applied in certain functions. - - .. cvar:: NPY_RAISE - - The default for most operations, raises an exception if an index - is out of bounds. - - .. cvar:: NPY_CLIP - - Clips an index to the valid range if it is out of bounds. - - .. cvar:: NPY_WRAP - - Wraps an index to the valid range if it is out of bounds. - -.. ctype:: NPY_CASTING - - .. versionadded:: 1.6 - - An enumeration type indicating how permissive data conversions should - be. This is used by the iterator added in NumPy 1.6, and is intended - to be used more broadly in a future version. - - .. cvar:: NPY_NO_CASTING - - Only allow identical types. - - .. cvar:: NPY_EQUIV_CASTING - - Allow identical and casts involving byte swapping. - - .. cvar:: NPY_SAFE_CASTING - - Only allow casts which will not cause values to be rounded, - truncated, or otherwise changed. - - .. cvar:: NPY_SAME_KIND_CASTING - - Allow any safe casts, and casts between types of the same kind. - For example, float64 -> float32 is permitted with this rule. - - .. cvar:: NPY_UNSAFE_CASTING - - Allow any cast, no matter what kind of data loss may occur. - -.. index:: - pair: ndarray; C-API diff --git a/numpy-1.6.2/doc/source/reference/c-api.config.rst b/numpy-1.6.2/doc/source/reference/c-api.config.rst deleted file mode 100644 index 0989c53d7e..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.config.rst +++ /dev/null @@ -1,104 +0,0 @@ -System configuration -==================== - -.. sectionauthor:: Travis E. Oliphant - -When NumPy is built, information about system configuration is -recorded, and is made available for extension modules using Numpy's C -API. These are mostly defined in ``numpyconfig.h`` (included in -``ndarrayobject.h``). The public symbols are prefixed by ``NPY_*``. -Numpy also offers some functions for querying information about the -platform in use. - -For private use, Numpy also constructs a ``config.h`` in the NumPy -include directory, which is not exported by Numpy (that is a python -extension which use the numpy C API will not see those symbols), to -avoid namespace pollution. - - -Data type sizes ---------------- - -The :cdata:`NPY_SIZEOF_{CTYPE}` constants are defined so that sizeof -information is available to the pre-processor. - -.. cvar:: NPY_SIZEOF_SHORT - - sizeof(short) - -.. cvar:: NPY_SIZEOF_INT - - sizeof(int) - -.. cvar:: NPY_SIZEOF_LONG - - sizeof(long) - -.. cvar:: NPY_SIZEOF_LONG_LONG - - sizeof(longlong) where longlong is defined appropriately on the - platform (A macro defines **NPY_SIZEOF_LONGLONG** as well.) - -.. cvar:: NPY_SIZEOF_PY_LONG_LONG - - -.. cvar:: NPY_SIZEOF_FLOAT - - sizeof(float) - -.. cvar:: NPY_SIZEOF_DOUBLE - - sizeof(double) - -.. cvar:: NPY_SIZEOF_LONG_DOUBLE - - sizeof(longdouble) (A macro defines **NPY_SIZEOF_LONGDOUBLE** as well.) - -.. cvar:: NPY_SIZEOF_PY_INTPTR_T - - Size of a pointer on this platform (sizeof(void \*)) (A macro defines - NPY_SIZEOF_INTP as well.) - - -Platform information --------------------- - -.. cvar:: NPY_CPU_X86 -.. cvar:: NPY_CPU_AMD64 -.. cvar:: NPY_CPU_IA64 -.. cvar:: NPY_CPU_PPC -.. cvar:: NPY_CPU_PPC64 -.. cvar:: NPY_CPU_SPARC -.. cvar:: NPY_CPU_SPARC64 -.. cvar:: NPY_CPU_S390 -.. cvar:: NPY_CPU_PARISC - - .. versionadded:: 1.3.0 - - CPU architecture of the platform; only one of the above is - defined. - - Defined in ``numpy/npy_cpu.h`` - -.. cvar:: NPY_LITTLE_ENDIAN - -.. cvar:: NPY_BIG_ENDIAN - -.. cvar:: NPY_BYTE_ORDER - - .. versionadded:: 1.3.0 - - Portable alternatives to the ``endian.h`` macros of GNU Libc. - If big endian, :cdata:`NPY_BYTE_ORDER` == :cdata:`NPY_BIG_ENDIAN`, and - similarly for little endian architectures. - - Defined in ``numpy/npy_endian.h``. - -.. cfunction:: PyArray_GetEndianness() - - .. versionadded:: 1.3.0 - - Returns the endianness of the current platform. - One of :cdata:`NPY_CPU_BIG`, :cdata:`NPY_CPU_LITTLE`, - or :cdata:`NPY_CPU_UNKNOWN_ENDIAN`. - diff --git a/numpy-1.6.2/doc/source/reference/c-api.coremath.rst b/numpy-1.6.2/doc/source/reference/c-api.coremath.rst deleted file mode 100644 index 6584f216d2..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.coremath.rst +++ /dev/null @@ -1,380 +0,0 @@ -Numpy core libraries -==================== - -.. sectionauthor:: David Cournapeau - -.. versionadded:: 1.3.0 - -Starting from numpy 1.3.0, we are working on separating the pure C, -"computational" code from the python dependent code. The goal is twofolds: -making the code cleaner, and enabling code reuse by other extensions outside -numpy (scipy, etc...). - -Numpy core math library ------------------------ - -The numpy core math library ('npymath') is a first step in this direction. This -library contains most math-related C99 functionality, which can be used on -platforms where C99 is not well supported. The core math functions have the -same API as the C99 ones, except for the npy_* prefix. - -The available functions are defined in - please refer to this header when -in doubt. - -Floating point classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. cvar:: NPY_NAN - - This macro is defined to a NaN (Not a Number), and is guaranteed to have - the signbit unset ('positive' NaN). The corresponding single and extension - precision macro are available with the suffix F and L. - -.. cvar:: NPY_INFINITY - - This macro is defined to a positive inf. The corresponding single and - extension precision macro are available with the suffix F and L. - -.. cvar:: NPY_PZERO - - This macro is defined to positive zero. The corresponding single and - extension precision macro are available with the suffix F and L. - -.. cvar:: NPY_NZERO - - This macro is defined to negative zero (that is with the sign bit set). The - corresponding single and extension precision macro are available with the - suffix F and L. - -.. cfunction:: int npy_isnan(x) - - This is a macro, and is equivalent to C99 isnan: works for single, double - and extended precision, and return a non 0 value is x is a NaN. - -.. cfunction:: int npy_isfinite(x) - - This is a macro, and is equivalent to C99 isfinite: works for single, - double and extended precision, and return a non 0 value is x is neither a - NaN nor an infinity. - -.. cfunction:: int npy_isinf(x) - - This is a macro, and is equivalent to C99 isinf: works for single, double - and extended precision, and return a non 0 value is x is infinite (positive - and negative). - -.. cfunction:: int npy_signbit(x) - - This is a macro, and is equivalent to C99 signbit: works for single, double - and extended precision, and return a non 0 value is x has the signbit set - (that is the number is negative). - -.. cfunction:: double npy_copysign(double x, double y) - - This is a function equivalent to C99 copysign: return x with the same sign - as y. Works for any value, including inf and nan. Single and extended - precisions are available with suffix f and l. - - .. versionadded:: 1.4.0 - -Useful math constants -~~~~~~~~~~~~~~~~~~~~~ - -The following math constants are available in npy_math.h. Single and extended -precision are also available by adding the F and L suffixes respectively. - -.. cvar:: NPY_E - - Base of natural logarithm (:math:`e`) - -.. cvar:: NPY_LOG2E - - Logarithm to base 2 of the Euler constant (:math:`\frac{\ln(e)}{\ln(2)}`) - -.. cvar:: NPY_LOG10E - - Logarithm to base 10 of the Euler constant (:math:`\frac{\ln(e)}{\ln(10)}`) - -.. cvar:: NPY_LOGE2 - - Natural logarithm of 2 (:math:`\ln(2)`) - -.. cvar:: NPY_LOGE10 - - Natural logarithm of 10 (:math:`\ln(10)`) - -.. cvar:: NPY_PI - - Pi (:math:`\pi`) - -.. cvar:: NPY_PI_2 - - Pi divided by 2 (:math:`\frac{\pi}{2}`) - -.. cvar:: NPY_PI_4 - - Pi divided by 4 (:math:`\frac{\pi}{4}`) - -.. cvar:: NPY_1_PI - - Reciprocal of pi (:math:`\frac{1}{\pi}`) - -.. cvar:: NPY_2_PI - - Two times the reciprocal of pi (:math:`\frac{2}{\pi}`) - -.. cvar:: NPY_EULER - - The Euler constant - :math:`\lim_{n\rightarrow\infty}({\sum_{k=1}^n{\frac{1}{k}}-\ln n})` - -Low-level floating point manipulation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Those can be useful for precise floating point comparison. - -.. cfunction:: double npy_nextafter(double x, double y) - - This is a function equivalent to C99 nextafter: return next representable - floating point value from x in the direction of y. Single and extended - precisions are available with suffix f and l. - - .. versionadded:: 1.4.0 - -.. cfunction:: double npy_spacing(double x) - - This is a function equivalent to Fortran intrinsic. Return distance between - x and next representable floating point value from x, e.g. spacing(1) == - eps. spacing of nan and +/- inf return nan. Single and extended precisions - are available with suffix f and l. - - .. versionadded:: 1.4.0 - -Complex functions -~~~~~~~~~~~~~~~~~ - -.. versionadded:: 1.4.0 - -C99-like complex functions have been added. Those can be used if you wish to -implement portable C extensions. Since we still support platforms without C99 -complex type, you need to restrict to C90-compatible syntax, e.g.: - -.. code-block:: c - - /* a = 1 + 2i \*/ - npy_complex a = npy_cpack(1, 2); - npy_complex b; - - b = npy_log(a); - -Linking against the core math library in an extension -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. versionadded:: 1.4.0 - -To use the core math library in your own extension, you need to add the npymath -compile and link options to your extension in your setup.py: - - >>> from numpy.distutils.misc_utils import get_info - >>> info = get_info('npymath') - >>> config.add_extension('foo', sources=['foo.c'], extra_info=**info) - -In other words, the usage of info is exactly the same as when using blas_info -and co. - -Half-precision functions -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. versionadded:: 2.0.0 - -The header file provides functions to work with -IEEE 754-2008 16-bit floating point values. While this format is -not typically used for numerical computations, it is useful for -storing values which require floating point but do not need much precision. -It can also be used as an educational tool to understand the nature -of floating point round-off error. - -Like for other types, NumPy includes a typedef npy_half for the 16 bit -float. Unlike for most of the other types, you cannot use this as a -normal type in C, since is is a typedef for npy_uint16. For example, -1.0 looks like 0x3c00 to C, and if you do an equality comparison -between the different signed zeros, you will get -0.0 != 0.0 -(0x8000 != 0x0000), which is incorrect. - -For these reasons, NumPy provides an API to work with npy_half values -accessible by including and linking to 'npymath'. -For functions that are not provided directly, such as the arithmetic -operations, the preferred method is to convert to float -or double and back again, as in the following example. - -.. code-block:: c - - npy_half sum(int n, npy_half *array) { - float ret = 0; - while(n--) { - ret += npy_half_to_float(*array++); - } - return npy_float_to_half(ret); - } - -External Links: - -* `754-2008 IEEE Standard for Floating-Point Arithmetic`__ -* `Half-precision Float Wikipedia Article`__. -* `OpenGL Half Float Pixel Support`__ -* `The OpenEXR image format`__. - -__ http://ieeexplore.ieee.org/servlet/opac?punumber=4610933 -__ http://en.wikipedia.org/wiki/Half_precision_floating-point_format -__ http://www.opengl.org/registry/specs/ARB/half_float_pixel.txt -__ http://www.openexr.com/about.html - -.. cvar:: NPY_HALF_ZERO - - This macro is defined to positive zero. - -.. cvar:: NPY_HALF_PZERO - - This macro is defined to positive zero. - -.. cvar:: NPY_HALF_NZERO - - This macro is defined to negative zero. - -.. cvar:: NPY_HALF_ONE - - This macro is defined to 1.0. - -.. cvar:: NPY_HALF_NEGONE - - This macro is defined to -1.0. - -.. cvar:: NPY_HALF_PINF - - This macro is defined to +inf. - -.. cvar:: NPY_HALF_NINF - - This macro is defined to -inf. - -.. cvar:: NPY_HALF_NAN - - This macro is defined to a NaN value, guaranteed to have its sign bit unset. - -.. cfunction:: float npy_half_to_float(npy_half h) - - Converts a half-precision float to a single-precision float. - -.. cfunction:: double npy_half_to_double(npy_half h) - - Converts a half-precision float to a double-precision float. - -.. cfunction:: npy_half npy_float_to_half(float f) - - Converts a single-precision float to a half-precision float. The - value is rounded to the nearest representable half, with ties going - to the nearest even. If the value is too small or too big, the - system's floating point underflow or overflow bit will be set. - -.. cfunction:: npy_half npy_double_to_half(double d) - - Converts a double-precision float to a half-precision float. The - value is rounded to the nearest representable half, with ties going - to the nearest even. If the value is too small or too big, the - system's floating point underflow or overflow bit will be set. - -.. cfunction:: int npy_half_eq(npy_half h1, npy_half h2) - - Compares two half-precision floats (h1 == h2). - -.. cfunction:: int npy_half_ne(npy_half h1, npy_half h2) - - Compares two half-precision floats (h1 != h2). - -.. cfunction:: int npy_half_le(npy_half h1, npy_half h2) - - Compares two half-precision floats (h1 <= h2). - -.. cfunction:: int npy_half_lt(npy_half h1, npy_half h2) - - Compares two half-precision floats (h1 < h2). - -.. cfunction:: int npy_half_ge(npy_half h1, npy_half h2) - - Compares two half-precision floats (h1 >= h2). - -.. cfunction:: int npy_half_gt(npy_half h1, npy_half h2) - - Compares two half-precision floats (h1 > h2). - -.. cfunction:: int npy_half_eq_nonan(npy_half h1, npy_half h2) - - Compares two half-precision floats that are known to not be NaN (h1 == h2). If - a value is NaN, the result is undefined. - -.. cfunction:: int npy_half_lt_nonan(npy_half h1, npy_half h2) - - Compares two half-precision floats that are known to not be NaN (h1 < h2). If - a value is NaN, the result is undefined. - -.. cfunction:: int npy_half_le_nonan(npy_half h1, npy_half h2) - - Compares two half-precision floats that are known to not be NaN (h1 <= h2). If - a value is NaN, the result is undefined. - -.. cfunction:: int npy_half_iszero(npy_half h) - - Tests whether the half-precision float has a value equal to zero. This may be slightly - faster than calling npy_half_eq(h, NPY_ZERO). - -.. cfunction:: int npy_half_isnan(npy_half h) - - Tests whether the half-precision float is a NaN. - -.. cfunction:: int npy_half_isinf(npy_half h) - - Tests whether the half-precision float is plus or minus Inf. - -.. cfunction:: int npy_half_isfinite(npy_half h) - - Tests whether the half-precision float is finite (not NaN or Inf). - -.. cfunction:: int npy_half_signbit(npy_half h) - - Returns 1 is h is negative, 0 otherwise. - -.. cfunction:: npy_half npy_half_copysign(npy_half x, npy_half y) - - Returns the value of x with the sign bit copied from y. Works for any value, - including Inf and NaN. - -.. cfunction:: npy_half npy_half_spacing(npy_half h) - - This is the same for half-precision float as npy_spacing and npy_spacingf - described in the low-level floating point section. - -.. cfunction:: npy_half npy_half_nextafter(npy_half x, npy_half y) - - This is the same for half-precision float as npy_nextafter and npy_nextafterf - described in the low-level floating point section. - -.. cfunction:: npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) - - Low-level function which converts a 32-bit single-precision float, stored - as a uint32, into a 16-bit half-precision float. - -.. cfunction:: npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) - - Low-level function which converts a 64-bit double-precision float, stored - as a uint64, into a 16-bit half-precision float. - -.. cfunction:: npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h) - - Low-level function which converts a 16-bit half-precision float - into a 32-bit single-precision float, stored as a uint32. - -.. cfunction:: npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) - - Low-level function which converts a 16-bit half-precision float - into a 64-bit double-precision float, stored as a uint64. - diff --git a/numpy-1.6.2/doc/source/reference/c-api.dtype.rst b/numpy-1.6.2/doc/source/reference/c-api.dtype.rst deleted file mode 100644 index 01f5260de1..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.dtype.rst +++ /dev/null @@ -1,220 +0,0 @@ -Data Type API -============= - -.. sectionauthor:: Travis E. Oliphant - -The standard array can have 24 different data types (and has some -support for adding your own types). These data types all have an -enumerated type, an enumerated type-character, and a corresponding -array scalar Python type object (placed in a hierarchy). There are -also standard C typedefs to make it easier to manipulate elements of -the given data type. For the numeric types, there are also bit-width -equivalent C typedefs and named typenumbers that make it easier to -select the precision desired. - -.. warning:: - - The names for the types in c code follows c naming conventions - more closely. The Python names for these types follow Python - conventions. Thus, :cdata:`NPY_FLOAT` picks up a 32-bit float in - C, but :class:`numpy.float_` in Python corresponds to a 64-bit - double. The bit-width names can be used in both Python and C for - clarity. - - -Enumerated Types ----------------- - -There is a list of enumerated types defined providing the basic 24 -data types plus some useful generic names. Whenever the code requires -a type number, one of these enumerated types is requested. The types -are all called :cdata:`NPY_{NAME}` where ``{NAME}`` can be - - **BOOL**, **BYTE**, **UBYTE**, **SHORT**, **USHORT**, **INT**, - **UINT**, **LONG**, **ULONG**, **LONGLONG**, **ULONGLONG**, - **HALF**, **FLOAT**, **DOUBLE**, **LONGDOUBLE**, **CFLOAT**, - **CDOUBLE**, **CLONGDOUBLE**, **DATETIME**, **TIMEDELTA**, - **OBJECT**, **STRING**, **UNICODE**, **VOID** - - **NTYPES**, **NOTYPE**, **USERDEF**, **DEFAULT_TYPE** - -The various character codes indicating certain types are also part of -an enumerated list. References to type characters (should they be -needed at all) should always use these enumerations. The form of them -is :cdata:`NPY_{NAME}LTR` where ``{NAME}`` can be - - **BOOL**, **BYTE**, **UBYTE**, **SHORT**, **USHORT**, **INT**, - **UINT**, **LONG**, **ULONG**, **LONGLONG**, **ULONGLONG**, - **HALF**, **FLOAT**, **DOUBLE**, **LONGDOUBLE**, **CFLOAT**, - **CDOUBLE**, **CLONGDOUBLE**, **DATETIME**, **TIMEDELTA**, - **OBJECT**, **STRING**, **VOID** - - **INTP**, **UINTP** - - **GENBOOL**, **SIGNED**, **UNSIGNED**, **FLOATING**, **COMPLEX** - -The latter group of ``{NAME}s`` corresponds to letters used in the array -interface typestring specification. - - -Defines -------- - -Max and min values for integers -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. cvar:: NPY_MAX_INT{bits} - -.. cvar:: NPY_MAX_UINT{bits} - -.. cvar:: NPY_MIN_INT{bits} - - These are defined for ``{bits}`` = 8, 16, 32, 64, 128, and 256 and provide - the maximum (minimum) value of the corresponding (unsigned) integer - type. Note: the actual integer type may not be available on all - platforms (i.e. 128-bit and 256-bit integers are rare). - -.. cvar:: NPY_MIN_{type} - - This is defined for ``{type}`` = **BYTE**, **SHORT**, **INT**, - **LONG**, **LONGLONG**, **INTP** - -.. cvar:: NPY_MAX_{type} - - This is defined for all defined for ``{type}`` = **BYTE**, **UBYTE**, - **SHORT**, **USHORT**, **INT**, **UINT**, **LONG**, **ULONG**, - **LONGLONG**, **ULONGLONG**, **INTP**, **UINTP** - - -Number of bits in data types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -All :cdata:`NPY_SIZEOF_{CTYPE}` constants have corresponding -:cdata:`NPY_BITSOF_{CTYPE}` constants defined. The :cdata:`NPY_BITSOF_{CTYPE}` -constants provide the number of bits in the data type. Specifically, -the available ``{CTYPE}s`` are - - **BOOL**, **CHAR**, **SHORT**, **INT**, **LONG**, - **LONGLONG**, **FLOAT**, **DOUBLE**, **LONGDOUBLE** - - -Bit-width references to enumerated typenums -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -All of the numeric data types (integer, floating point, and complex) -have constants that are defined to be a specific enumerated type -number. Exactly which enumerated type a bit-width type refers to is -platform dependent. In particular, the constants available are -:cdata:`PyArray_{NAME}{BITS}` where ``{NAME}`` is **INT**, **UINT**, -**FLOAT**, **COMPLEX** and ``{BITS}`` can be 8, 16, 32, 64, 80, 96, 128, -160, 192, 256, and 512. Obviously not all bit-widths are available on -all platforms for all the kinds of numeric types. Commonly 8-, 16-, -32-, 64-bit integers; 32-, 64-bit floats; and 64-, 128-bit complex -types are available. - - -Integer that can hold a pointer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The constants **PyArray_INTP** and **PyArray_UINTP** refer to an -enumerated integer type that is large enough to hold a pointer on the -platform. Index arrays should always be converted to **PyArray_INTP** -, because the dimension of the array is of type npy_intp. - - -C-type names ------------- - -There are standard variable types for each of the numeric data types -and the bool data type. Some of these are already available in the -C-specification. You can create variables in extension code with these -types. - - -Boolean -^^^^^^^ - -.. ctype:: npy_bool - - unsigned char; The constants :cdata:`NPY_FALSE` and - :cdata:`NPY_TRUE` are also defined. - - -(Un)Signed Integer -^^^^^^^^^^^^^^^^^^ - -Unsigned versions of the integers can be defined by pre-pending a 'u' -to the front of the integer name. - -.. ctype:: npy_(u)byte - - (unsigned) char - -.. ctype:: npy_(u)short - - (unsigned) short - -.. ctype:: npy_(u)int - - (unsigned) int - -.. ctype:: npy_(u)long - - (unsigned) long int - -.. ctype:: npy_(u)longlong - - (unsigned long long int) - -.. ctype:: npy_(u)intp - - (unsigned) Py_intptr_t (an integer that is the size of a pointer on - the platform). - - -(Complex) Floating point -^^^^^^^^^^^^^^^^^^^^^^^^ - -.. ctype:: npy_(c)float - - float - -.. ctype:: npy_(c)double - - double - -.. ctype:: npy_(c)longdouble - - long double - -complex types are structures with **.real** and **.imag** members (in -that order). - - -Bit-width names -^^^^^^^^^^^^^^^ - -There are also typedefs for signed integers, unsigned integers, -floating point, and complex floating point types of specific bit- -widths. The available type names are - - :ctype:`npy_int{bits}`, :ctype:`npy_uint{bits}`, :ctype:`npy_float{bits}`, - and :ctype:`npy_complex{bits}` - -where ``{bits}`` is the number of bits in the type and can be **8**, -**16**, **32**, **64**, 128, and 256 for integer types; 16, **32** -, **64**, 80, 96, 128, and 256 for floating-point types; and 32, -**64**, **128**, 160, 192, and 512 for complex-valued types. Which -bit-widths are available is platform dependent. The bolded bit-widths -are usually available on all platforms. - - -Printf Formatting ------------------ - -For help in printing, the following strings are defined as the correct -format specifier in printf and related commands. - - :cdata:`NPY_LONGLONG_FMT`, :cdata:`NPY_ULONGLONG_FMT`, - :cdata:`NPY_INTP_FMT`, :cdata:`NPY_UINTP_FMT`, - :cdata:`NPY_LONGDOUBLE_FMT` diff --git a/numpy-1.6.2/doc/source/reference/c-api.generalized-ufuncs.rst b/numpy-1.6.2/doc/source/reference/c-api.generalized-ufuncs.rst deleted file mode 100644 index 870e5dbc41..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.generalized-ufuncs.rst +++ /dev/null @@ -1,175 +0,0 @@ -================================== -Generalized Universal Function API -================================== - -There is a general need for looping over not only functions on scalars -but also over functions on vectors (or arrays), as explained on -http://scipy.org/scipy/numpy/wiki/GeneralLoopingFunctions. We propose -to realize this concept by generalizing the universal functions -(ufuncs), and provide a C implementation that adds ~500 lines -to the numpy code base. In current (specialized) ufuncs, the elementary -function is limited to element-by-element operations, whereas the -generalized version supports "sub-array" by "sub-array" operations. -The Perl vector library PDL provides a similar functionality and its -terms are re-used in the following. - -Each generalized ufunc has information associated with it that states -what the "core" dimensionality of the inputs is, as well as the -corresponding dimensionality of the outputs (the element-wise ufuncs -have zero core dimensions). The list of the core dimensions for all -arguments is called the "signature" of a ufunc. For example, the -ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs -and one scalar output. - -Another example is (see the GeneralLoopingFunctions page) the function -``inner1d(a,b)`` with a signature of ``(i),(i)->()``. This applies the -inner product along the last axis of each input, but keeps the -remaining indices intact. For example, where ``a`` is of shape ``(3,5,N)`` -and ``b`` is of shape ``(5,N)``, this will return an output of shape ``(3,5)``. -The underlying elementary function is called 3*5 times. In the -signature, we specify one core dimension ``(i)`` for each input and zero core -dimensions ``()`` for the output, since it takes two 1-d arrays and -returns a scalar. By using the same name ``i``, we specify that the two -corresponding dimensions should be of the same size (or one of them is -of size 1 and will be broadcasted). - -The dimensions beyond the core dimensions are called "loop" dimensions. In -the above example, this corresponds to ``(3,5)``. - -The usual numpy "broadcasting" rules apply, where the signature -determines how the dimensions of each input/output object are split -into core and loop dimensions: - -#. While an input array has a smaller dimensionality than the corresponding - number of core dimensions, 1's are pre-pended to its shape. -#. The core dimensions are removed from all inputs and the remaining - dimensions are broadcasted; defining the loop dimensions. -#. The output is given by the loop dimensions plus the output core dimensions. - - - -Definitions ------------ - -Elementary Function - Each ufunc consists of an elementary function that performs the - most basic operation on the smallest portion of array arguments - (e.g. adding two numbers is the most basic operation in adding two - arrays). The ufunc applies the elementary function multiple times - on different parts of the arrays. The input/output of elementary - functions can be vectors; e.g., the elementary function of inner1d - takes two vectors as input. - -Signature - A signature is a string describing the input/output dimensions of - the elementary function of a ufunc. See section below for more - details. - -Core Dimension - The dimensionality of each input/output of an elementary function - is defined by its core dimensions (zero core dimensions correspond - to a scalar input/output). The core dimensions are mapped to the - last dimensions of the input/output arrays. - -Dimension Name - A dimension name represents a core dimension in the signature. - Different dimensions may share a name, indicating that they are of - the same size (or are broadcastable). - -Dimension Index - A dimension index is an integer representing a dimension name. It - enumerates the dimension names according to the order of the first - occurrence of each name in the signature. - - -Details of Signature --------------------- - -The signature defines "core" dimensionality of input and output -variables, and thereby also defines the contraction of the -dimensions. The signature is represented by a string of the -following format: - -* Core dimensions of each input or output array are represented by a - list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar - input/output is denoted by ``()``. Instead of ``i_1``, ``i_2``, - etc, one can use any valid Python variable name. -* Dimension lists for different arguments are separated by ``","``. - Input/output arguments are separated by ``"->"``. -* If one uses the same dimension name in multiple locations, this - enforces the same size (or broadcastable size) of the corresponding - dimensions. - -The formal syntax of signatures is as follows:: - - ::= "->" - ::= - ::= - ::= nil | | "," - ::= "(" ")" - ::= nil | | - "," - ::= valid Python variable name - - -Notes: - -#. All quotes are for clarity. -#. Core dimensions that share the same name must be broadcastable, as - the two ``i`` in our example above. Each dimension name typically - corresponding to one level of looping in the elementary function's - implementation. -#. White spaces are ignored. - -Here are some examples of signatures: - -+-------------+------------------------+-----------------------------------+ -| add | ``(),()->()`` | | -+-------------+------------------------+-----------------------------------+ -| inner1d | ``(i),(i)->()`` | | -+-------------+------------------------+-----------------------------------+ -| sum1d | ``(i)->()`` | | -+-------------+------------------------+-----------------------------------+ -| dot2d | ``(m,n),(n,p)->(m,p)`` | matrix multiplication | -+-------------+------------------------+-----------------------------------+ -| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, | -| | | outer over the second to last, | -| | | and loop/broadcast over the rest. | -+-------------+------------------------+-----------------------------------+ - -C-API for implementing Elementary Functions -------------------------------------------- - -The current interface remains unchanged, and ``PyUFunc_FromFuncAndData`` -can still be used to implement (specialized) ufuncs, consisting of -scalar elementary functions. - -One can use ``PyUFunc_FromFuncAndDataAndSignature`` to declare a more -general ufunc. The argument list is the same as -``PyUFunc_FromFuncAndData``, with an additional argument specifying the -signature as C string. - -Furthermore, the callback function is of the same type as before, -``void (*foo)(char **args, intp *dimensions, intp *steps, void *func)``. -When invoked, ``args`` is a list of length ``nargs`` containing -the data of all input/output arguments. For a scalar elementary -function, ``steps`` is also of length ``nargs``, denoting the strides used -for the arguments. ``dimensions`` is a pointer to a single integer -defining the size of the axis to be looped over. - -For a non-trivial signature, ``dimensions`` will also contain the sizes -of the core dimensions as well, starting at the second entry. Only -one size is provided for each unique dimension name and the sizes are -given according to the first occurrence of a dimension name in the -signature. - -The first ``nargs`` elements of ``steps`` remain the same as for scalar -ufuncs. The following elements contain the strides of all core -dimensions for all arguments in order. - -For example, consider a ufunc with signature ``(i,j),(i)->()``. In -this case, ``args`` will contain three pointers to the data of the -input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be -``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J`` -for the core dimensions ``i`` and ``j``. Finally, ``steps`` will be -``[a_N, b_N, c_N, a_i, a_j, b_i]``, containing all necessary strides. diff --git a/numpy-1.6.2/doc/source/reference/c-api.iterator.rst b/numpy-1.6.2/doc/source/reference/c-api.iterator.rst deleted file mode 100644 index 9e443f2cbb..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.iterator.rst +++ /dev/null @@ -1,1184 +0,0 @@ -Array Iterator API -================== - -.. sectionauthor:: Mark Wiebe - -.. index:: - pair: iterator; C-API - pair: C-API; iterator - -.. versionadded:: 1.6 - -Array Iterator --------------- - -The array iterator encapsulates many of the key features in ufuncs, -allowing user code to support features like output parameters, -preservation of memory layouts, and buffering of data with the wrong -alignment or type, without requiring difficult coding. - -This page documents the API for the iterator. -The C-API naming convention chosen is based on the one in the numpy-refactor -branch, so will integrate naturally into the refactored code base. -The iterator is named ``NpyIter`` and functions are -named ``NpyIter_*``. - -Converting from Previous NumPy Iterators ----------------------------------------- - -The existing iterator API includes functions like PyArrayIter_Check, -PyArray_Iter* and PyArray_ITER_*. The multi-iterator array includes -PyArray_MultiIter*, PyArray_Broadcast, and PyArray_RemoveSmallest. The -new iterator design replaces all of this functionality with a single object -and associated API. One goal of the new API is that all uses of the -existing iterator should be replaceable with the new iterator without -significant effort. In 1.6, the major exception to this is the neighborhood -iterator, which does not have corresponding features in this iterator. - -Here is a conversion table for which functions to use with the new iterator: - -===================================== ============================================= -*Iterator Functions* -:cfunc:`PyArray_IterNew` :cfunc:`NpyIter_New` -:cfunc:`PyArray_IterAllButAxis` :cfunc:`NpyIter_New` + ``axes`` parameter **or** - Iterator flag :cdata:`NPY_ITER_EXTERNAL_LOOP` -:cfunc:`PyArray_BroadcastToShape` **NOT SUPPORTED** (Use the support for - multiple operands instead.) -:cfunc:`PyArrayIter_Check` Will need to add this in Python exposure -:cfunc:`PyArray_ITER_RESET` :cfunc:`NpyIter_Reset` -:cfunc:`PyArray_ITER_NEXT` Function pointer from :cfunc:`NpyIter_GetIterNext` -:cfunc:`PyArray_ITER_DATA` :cfunc:`NpyIter_GetDataPtrArray` -:cfunc:`PyArray_ITER_GOTO` :cfunc:`NpyIter_GotoMultiIndex` -:cfunc:`PyArray_ITER_GOTO1D` :cfunc:`NpyIter_GotoIndex` or - :cfunc:`NpyIter_GotoIterIndex` -:cfunc:`PyArray_ITER_NOTDONE` Return value of ``iternext`` function pointer -*Multi-iterator Functions* -:cfunc:`PyArray_MultiIterNew` :cfunc:`NpyIter_MultiNew` -:cfunc:`PyArray_MultiIter_RESET` :cfunc:`NpyIter_Reset` -:cfunc:`PyArray_MultiIter_NEXT` Function pointer from :cfunc:`NpyIter_GetIterNext` -:cfunc:`PyArray_MultiIter_DATA` :cfunc:`NpyIter_GetDataPtrArray` -:cfunc:`PyArray_MultiIter_NEXTi` **NOT SUPPORTED** (always lock-step iteration) -:cfunc:`PyArray_MultiIter_GOTO` :cfunc:`NpyIter_GotoMultiIndex` -:cfunc:`PyArray_MultiIter_GOTO1D` :cfunc:`NpyIter_GotoIndex` or - :cfunc:`NpyIter_GotoIterIndex` -:cfunc:`PyArray_MultiIter_NOTDONE` Return value of ``iternext`` function pointer -:cfunc:`PyArray_Broadcast` Handled by :cfunc:`NpyIter_MultiNew` -:cfunc:`PyArray_RemoveSmallest` Iterator flag :cdata:`NPY_ITER_EXTERNAL_LOOP` -*Other Functions* -:cfunc:`PyArray_ConvertToCommonType` Iterator flag :cdata:`NPY_ITER_COMMON_DTYPE` -===================================== ============================================= - -Simple Iteration Example ------------------------- - -The best way to become familiar with the iterator is to look at its -usage within the NumPy codebase itself. For example, here is a slightly -tweaked version of the code for :cfunc:`PyArray_CountNonzero`, which counts the -number of non-zero elements in an array. - -.. code-block:: c - - npy_intp PyArray_CountNonzero(PyArrayObject* self) - { - /* Nonzero boolean function */ - PyArray_NonzeroFunc* nonzero = PyArray_DESCR(self)->f->nonzero; - - NpyIter* iter; - NpyIter_IterNextFunc *iternext; - char** dataptr; - npy_intp* strideptr,* innersizeptr; - - /* Handle zero-sized arrays specially */ - if (PyArray_SIZE(self) == 0) { - return 0; - } - - /* - * Create and use an iterator to count the nonzeros. - * flag NPY_ITER_READONLY - * - The array is never written to. - * flag NPY_ITER_EXTERNAL_LOOP - * - Inner loop is done outside the iterator for efficiency. - * flag NPY_ITER_NPY_ITER_REFS_OK - * - Reference types are acceptable. - * order NPY_KEEPORDER - * - Visit elements in memory order, regardless of strides. - * This is good for performance when the specific order - * elements are visited is unimportant. - * casting NPY_NO_CASTING - * - No casting is required for this operation. - */ - iter = NpyIter_New(self, NPY_ITER_READONLY| - NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_REFS_OK, - NPY_KEEPORDER, NPY_NO_CASTING, - NULL); - if (iter == NULL) { - return -1; - } - - /* - * The iternext function gets stored in a local variable - * so it can be called repeatedly in an efficient manner. - */ - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(iter); - return -1; - } - /* The location of the data pointer which the iterator may update */ - dataptr = NpyIter_GetDataPtrArray(iter); - /* The location of the stride which the iterator may update */ - strideptr = NpyIter_GetInnerStrideArray(iter); - /* The location of the inner loop size which the iterator may update */ - innersizeptr = NpyIter_GetInnerLoopSizePtr(iter); - - /* The iteration loop */ - do { - /* Get the inner loop data/stride/count values */ - char* data = *dataptr; - npy_intp stride = *strideptr; - npy_intp count = *innersizeptr; - - /* This is a typical inner loop for NPY_ITER_EXTERNAL_LOOP */ - while (count--) { - if (nonzero(data, self)) { - ++nonzero_count; - } - data += stride; - } - - /* Increment the iterator to the next inner loop */ - } while(iternext(iter)); - - NpyIter_Deallocate(iter); - - return nonzero_count; - } - -Simple Multi-Iteration Example ------------------------------- - -Here is a simple copy function using the iterator. The ``order`` parameter -is used to control the memory layout of the allocated result, typically -:cdata:`NPY_KEEPORDER` is desired. - -.. code-block:: c - - PyObject *CopyArray(PyObject *arr, NPY_ORDER order) - { - NpyIter *iter; - NpyIter_IterNextFunc *iternext; - PyObject *op[2], *ret; - npy_uint32 flags; - npy_uint32 op_flags[2]; - npy_intp itemsize, *innersizeptr, innerstride; - char **dataptrarray; - - /* - * No inner iteration - inner loop is handled by CopyArray code - */ - flags = NPY_ITER_EXTERNAL_LOOP; - /* - * Tell the constructor to automatically allocate the output. - * The data type of the output will match that of the input. - */ - op[0] = arr; - op[1] = NULL; - op_flags[0] = NPY_ITER_READONLY; - op_flags[1] = NPY_ITER_WRITEONLY | NPY_ITER_ALLOCATE; - - /* Construct the iterator */ - iter = NpyIter_MultiNew(2, op, flags, order, NPY_NO_CASTING, - op_flags, NULL); - if (iter == NULL) { - return NULL; - } - - /* - * Make a copy of the iternext function pointer and - * a few other variables the inner loop needs. - */ - iternext = NpyIter_GetIterNext(iter); - innerstride = NpyIter_GetInnerStrideArray(iter)[0]; - itemsize = NpyIter_GetDescrArray(iter)[0]->elsize; - /* - * The inner loop size and data pointers may change during the - * loop, so just cache the addresses. - */ - innersizeptr = NpyIter_GetInnerLoopSizePtr(iter); - dataptrarray = NpyIter_GetDataPtrArray(iter); - - /* - * Note that because the iterator allocated the output, - * it matches the iteration order and is packed tightly, - * so we don't need to check it like the input. - */ - if (innerstride == itemsize) { - do { - memcpy(dataptrarray[1], dataptrarray[0], - itemsize * (*innersizeptr)); - } while (iternext(iter)); - } else { - /* For efficiency, should specialize this based on item size... */ - npy_intp i; - do { - npy_intp size = *innersizeptr; - char *src = dataaddr[0], *dst = dataaddr[1]; - for(i = 0; i < size; i++, src += innerstride, dst += itemsize) { - memcpy(dst, src, itemsize); - } - } while (iternext(iter)); - } - - /* Get the result from the iterator object array */ - ret = NpyIter_GetOperandArray(iter)[1]; - Py_INCREF(ret); - - if (NpyIter_Deallocate(iter) != NPY_SUCCEED) { - Py_DECREF(ret); - return NULL; - } - - return ret; - } - - -Iterator Data Types ---------------------- - -The iterator layout is an internal detail, and user code only sees -an incomplete struct. - -.. ctype:: NpyIter - - This is an opaque pointer type for the iterator. Access to its contents - can only be done through the iterator API. - -.. ctype:: NpyIter_Type - - This is the type which exposes the iterator to Python. Currently, no - API is exposed which provides access to the values of a Python-created - iterator. If an iterator is created in Python, it must be used in Python - and vice versa. Such an API will likely be created in a future version. - -.. ctype:: NpyIter_IterNextFunc - - This is a function pointer for the iteration loop, returned by - :cfunc:`NpyIter_GetIterNext`. - -.. ctype:: NpyIter_GetMultiIndexFunc - - This is a function pointer for getting the current iterator multi-index, - returned by :cfunc:`NpyIter_GetGetMultiIndex`. - -Construction and Destruction ----------------------------- - -.. cfunction:: NpyIter* NpyIter_New(PyArrayObject* op, npy_uint32 flags, NPY_ORDER order, NPY_CASTING casting, PyArray_Descr* dtype) - - Creates an iterator for the given numpy array object ``op``. - - Flags that may be passed in ``flags`` are any combination - of the global and per-operand flags documented in - :cfunc:`NpyIter_MultiNew`, except for :cdata:`NPY_ITER_ALLOCATE`. - - Any of the :ctype:`NPY_ORDER` enum values may be passed to ``order``. For - efficient iteration, :ctype:`NPY_KEEPORDER` is the best option, and - the other orders enforce the particular iteration pattern. - - Any of the :ctype:`NPY_CASTING` enum values may be passed to ``casting``. - The values include :cdata:`NPY_NO_CASTING`, :cdata:`NPY_EQUIV_CASTING`, - :cdata:`NPY_SAFE_CASTING`, :cdata:`NPY_SAME_KIND_CASTING`, and - :cdata:`NPY_UNSAFE_CASTING`. To allow the casts to occur, copying or - buffering must also be enabled. - - If ``dtype`` isn't ``NULL``, then it requires that data type. - If copying is allowed, it will make a temporary copy if the data - is castable. If :cdata:`NPY_ITER_UPDATEIFCOPY` is enabled, it will - also copy the data back with another cast upon iterator destruction. - - Returns NULL if there is an error, otherwise returns the allocated - iterator. - - To make an iterator similar to the old iterator, this should work. - - .. code-block:: c - - iter = NpyIter_New(op, NPY_ITER_READWRITE, - NPY_CORDER, NPY_NO_CASTING, NULL); - - If you want to edit an array with aligned ``double`` code, - but the order doesn't matter, you would use this. - - .. code-block:: c - - dtype = PyArray_DescrFromType(NPY_DOUBLE); - iter = NpyIter_New(op, NPY_ITER_READWRITE| - NPY_ITER_BUFFERED| - NPY_ITER_NBO| - NPY_ITER_ALIGNED, - NPY_KEEPORDER, - NPY_SAME_KIND_CASTING, - dtype); - Py_DECREF(dtype); - -.. cfunction:: NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, NPY_ORDER order, NPY_CASTING casting, npy_uint32* op_flags, PyArray_Descr** op_dtypes) - - Creates an iterator for broadcasting the ``nop`` array objects provided - in ``op``, using regular NumPy broadcasting rules. - - Any of the :ctype:`NPY_ORDER` enum values may be passed to ``order``. For - efficient iteration, :cdata:`NPY_KEEPORDER` is the best option, and the - other orders enforce the particular iteration pattern. When using - :cdata:`NPY_KEEPORDER`, if you also want to ensure that the iteration is - not reversed along an axis, you should pass the flag - :cdata:`NPY_ITER_DONT_NEGATE_STRIDES`. - - Any of the :ctype:`NPY_CASTING` enum values may be passed to ``casting``. - The values include :cdata:`NPY_NO_CASTING`, :cdata:`NPY_EQUIV_CASTING`, - :cdata:`NPY_SAFE_CASTING`, :cdata:`NPY_SAME_KIND_CASTING`, and - :cdata:`NPY_UNSAFE_CASTING`. To allow the casts to occur, copying or - buffering must also be enabled. - - If ``op_dtypes`` isn't ``NULL``, it specifies a data type or ``NULL`` - for each ``op[i]``. - - Returns NULL if there is an error, otherwise returns the allocated - iterator. - - Flags that may be passed in ``flags``, applying to the whole - iterator, are: - - .. cvar:: NPY_ITER_C_INDEX - - Causes the iterator to track a raveled flat index matching C - order. This option cannot be used with :cdata:`NPY_ITER_F_INDEX`. - - .. cvar:: NPY_ITER_F_INDEX - - Causes the iterator to track a raveled flat index matching Fortran - order. This option cannot be used with :cdata:`NPY_ITER_C_INDEX`. - - .. cvar:: NPY_ITER_MULTI_INDEX - - Causes the iterator to track a multi-index. - This prevents the iterator from coalescing axes to - produce bigger inner loops. - - .. cvar:: NPY_ITER_EXTERNAL_LOOP - - Causes the iterator to skip iteration of the innermost - loop, requiring the user of the iterator to handle it. - - This flag is incompatible with :cdata:`NPY_ITER_C_INDEX`, - :cdata:`NPY_ITER_F_INDEX`, and :cdata:`NPY_ITER_MULTI_INDEX`. - - .. cvar:: NPY_ITER_DONT_NEGATE_STRIDES - - This only affects the iterator when :ctype:`NPY_KEEPORDER` is - specified for the order parameter. By default with - :ctype:`NPY_KEEPORDER`, the iterator reverses axes which have - negative strides, so that memory is traversed in a forward - direction. This disables this step. Use this flag if you - want to use the underlying memory-ordering of the axes, - but don't want an axis reversed. This is the behavior of - ``numpy.ravel(a, order='K')``, for instance. - - .. cvar:: NPY_ITER_COMMON_DTYPE - - Causes the iterator to convert all the operands to a common - data type, calculated based on the ufunc type promotion rules. - Copying or buffering must be enabled. - - If the common data type is known ahead of time, don't use this - flag. Instead, set the requested dtype for all the operands. - - .. cvar:: NPY_ITER_REFS_OK - - Indicates that arrays with reference types (object - arrays or structured arrays containing an object type) - may be accepted and used in the iterator. If this flag - is enabled, the caller must be sure to check whether - :cfunc:`NpyIter_IterationNeedsAPI`(iter) is true, in which case - it may not release the GIL during iteration. - - .. cvar:: NPY_ITER_ZEROSIZE_OK - - Indicates that arrays with a size of zero should be permitted. - Since the typical iteration loop does not naturally work with - zero-sized arrays, you must check that the IterSize is non-zero - before entering the iteration loop. - - .. cvar:: NPY_ITER_REDUCE_OK - - Permits writeable operands with a dimension with zero - stride and size greater than one. Note that such operands - must be read/write. - - When buffering is enabled, this also switches to a special - buffering mode which reduces the loop length as necessary to - not trample on values being reduced. - - Note that if you want to do a reduction on an automatically - allocated output, you must use :cfunc:`NpyIter_GetOperandArray` - to get its reference, then set every value to the reduction - unit before doing the iteration loop. In the case of a - buffered reduction, this means you must also specify the - flag :cdata:`NPY_ITER_DELAY_BUFALLOC`, then reset the iterator - after initializing the allocated operand to prepare the - buffers. - - .. cvar:: NPY_ITER_RANGED - - Enables support for iteration of sub-ranges of the full - ``iterindex`` range ``[0, NpyIter_IterSize(iter))``. Use - the function :cfunc:`NpyIter_ResetToIterIndexRange` to specify - a range for iteration. - - This flag can only be used with :cdata:`NPY_ITER_EXTERNAL_LOOP` - when :cdata:`NPY_ITER_BUFFERED` is enabled. This is because - without buffering, the inner loop is always the size of the - innermost iteration dimension, and allowing it to get cut up - would require special handling, effectively making it more - like the buffered version. - - .. cvar:: NPY_ITER_BUFFERED - - Causes the iterator to store buffering data, and use buffering - to satisfy data type, alignment, and byte-order requirements. - To buffer an operand, do not specify the :cdata:`NPY_ITER_COPY` - or :cdata:`NPY_ITER_UPDATEIFCOPY` flags, because they will - override buffering. Buffering is especially useful for Python - code using the iterator, allowing for larger chunks - of data at once to amortize the Python interpreter overhead. - - If used with :cdata:`NPY_ITER_EXTERNAL_LOOP`, the inner loop - for the caller may get larger chunks than would be possible - without buffering, because of how the strides are laid out. - - Note that if an operand is given the flag :cdata:`NPY_ITER_COPY` - or :cdata:`NPY_ITER_UPDATEIFCOPY`, a copy will be made in preference - to buffering. Buffering will still occur when the array was - broadcast so elements need to be duplicated to get a constant - stride. - - In normal buffering, the size of each inner loop is equal - to the buffer size, or possibly larger if - :cdata:`NPY_ITER_GROWINNER` is specified. If - :cdata:`NPY_ITER_REDUCE_OK` is enabled and a reduction occurs, - the inner loops may become smaller depending - on the structure of the reduction. - - .. cvar:: NPY_ITER_GROWINNER - - When buffering is enabled, this allows the size of the inner - loop to grow when buffering isn't necessary. This option - is best used if you're doing a straight pass through all the - data, rather than anything with small cache-friendly arrays - of temporary values for each inner loop. - - .. cvar:: NPY_ITER_DELAY_BUFALLOC - - When buffering is enabled, this delays allocation of the - buffers until :cfunc:`NpyIter_Reset` or another reset function is - called. This flag exists to avoid wasteful copying of - buffer data when making multiple copies of a buffered - iterator for multi-threaded iteration. - - Another use of this flag is for setting up reduction operations. - After the iterator is created, and a reduction output - is allocated automatically by the iterator (be sure to use - READWRITE access), its value may be initialized to the reduction - unit. Use :cfunc:`NpyIter_GetOperandArray` to get the object. - Then, call :cfunc:`NpyIter_Reset` to allocate and fill the buffers - with their initial values. - - Flags that may be passed in ``op_flags[i]``, where ``0 <= i < nop``: - - .. cvar:: NPY_ITER_READWRITE - .. cvar:: NPY_ITER_READONLY - .. cvar:: NPY_ITER_WRITEONLY - - Indicate how the user of the iterator will read or write - to ``op[i]``. Exactly one of these flags must be specified - per operand. - - .. cvar:: NPY_ITER_COPY - - Allow a copy of ``op[i]`` to be made if it does not - meet the data type or alignment requirements as specified - by the constructor flags and parameters. - - .. cvar:: NPY_ITER_UPDATEIFCOPY - - Triggers :cdata:`NPY_ITER_COPY`, and when an array operand - is flagged for writing and is copied, causes the data - in a copy to be copied back to ``op[i]`` when the iterator - is destroyed. - - If the operand is flagged as write-only and a copy is needed, - an uninitialized temporary array will be created and then copied - to back to ``op[i]`` on destruction, instead of doing - the unecessary copy operation. - - .. cvar:: NPY_ITER_NBO - .. cvar:: NPY_ITER_ALIGNED - .. cvar:: NPY_ITER_CONTIG - - Causes the iterator to provide data for ``op[i]`` - that is in native byte order, aligned according to - the dtype requirements, contiguous, or any combination. - - By default, the iterator produces pointers into the - arrays provided, which may be aligned or unaligned, and - with any byte order. If copying or buffering is not - enabled and the operand data doesn't satisfy the constraints, - an error will be raised. - - The contiguous constraint applies only to the inner loop, - successive inner loops may have arbitrary pointer changes. - - If the requested data type is in non-native byte order, - the NBO flag overrides it and the requested data type is - converted to be in native byte order. - - .. cvar:: NPY_ITER_ALLOCATE - - This is for output arrays, and requires that the flag - :cdata:`NPY_ITER_WRITEONLY` be set. If ``op[i]`` is NULL, - creates a new array with the final broadcast dimensions, - and a layout matching the iteration order of the iterator. - - When ``op[i]`` is NULL, the requested data type - ``op_dtypes[i]`` may be NULL as well, in which case it is - automatically generated from the dtypes of the arrays which - are flagged as readable. The rules for generating the dtype - are the same is for UFuncs. Of special note is handling - of byte order in the selected dtype. If there is exactly - one input, the input's dtype is used as is. Otherwise, - if more than one input dtypes are combined together, the - output will be in native byte order. - - After being allocated with this flag, the caller may retrieve - the new array by calling :cfunc:`NpyIter_GetOperandArray` and - getting the i-th object in the returned C array. The caller - must call Py_INCREF on it to claim a reference to the array. - - .. cvar:: NPY_ITER_NO_SUBTYPE - - For use with :cdata:`NPY_ITER_ALLOCATE`, this flag disables - allocating an array subtype for the output, forcing - it to be a straight ndarray. - - TODO: Maybe it would be better to introduce a function - ``NpyIter_GetWrappedOutput`` and remove this flag? - - .. cvar:: NPY_ITER_NO_BROADCAST - - Ensures that the input or output matches the iteration - dimensions exactly. - -.. cfunction:: NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, NPY_ORDER order, NPY_CASTING casting, npy_uint32* op_flags, PyArray_Descr** op_dtypes, int oa_ndim, int** op_axes, npy_intp* itershape, npy_intp buffersize) - - Extends :cfunc:`NpyIter_MultiNew` with several advanced options providing - more control over broadcasting and buffering. - - If 0/NULL values are passed to ``oa_ndim``, ``op_axes``, ``itershape``, - and ``buffersize``, it is equivalent to :cfunc:`NpyIter_MultiNew`. - - The parameter ``oa_ndim``, when non-zero, specifies the number of - dimensions that will be iterated with customized broadcasting. - If it is provided, ``op_axes`` and/or ``itershape`` must also be provided. - The ``op_axes`` parameter let you control in detail how the - axes of the operand arrays get matched together and iterated. - In ``op_axes``, you must provide an array of ``nop`` pointers - to ``oa_ndim``-sized arrays of type ``npy_intp``. If an entry - in ``op_axes`` is NULL, normal broadcasting rules will apply. - In ``op_axes[j][i]`` is stored either a valid axis of ``op[j]``, or - -1 which means ``newaxis``. Within each ``op_axes[j]`` array, axes - may not be repeated. The following example is how normal broadcasting - applies to a 3-D array, a 2-D array, a 1-D array and a scalar. - - .. code-block:: c - - int oa_ndim = 3; /* # iteration axes */ - int op0_axes[] = {0, 1, 2}; /* 3-D operand */ - int op1_axes[] = {-1, 0, 1}; /* 2-D operand */ - int op2_axes[] = {-1, -1, 0}; /* 1-D operand */ - int op3_axes[] = {-1, -1, -1} /* 0-D (scalar) operand */ - int* op_axes[] = {op0_axes, op1_axes, op2_axes, op3_axes}; - - The ``itershape`` parameter allows you to force the iterator - to have a specific iteration shape. It is an array of length - ``oa_ndim``. When an entry is negative, its value is determined - from the operands. This parameter allows automatically allocated - outputs to get additional dimensions which don't match up with - any dimension of an input. - - If ``buffersize`` is zero, a default buffer size is used, - otherwise it specifies how big of a buffer to use. Buffers - which are powers of 2 such as 4096 or 8192 are recommended. - - Returns NULL if there is an error, otherwise returns the allocated - iterator. - -.. cfunction:: NpyIter* NpyIter_Copy(NpyIter* iter) - - Makes a copy of the given iterator. This function is provided - primarily to enable multi-threaded iteration of the data. - - *TODO*: Move this to a section about multithreaded iteration. - - The recommended approach to multithreaded iteration is to - first create an iterator with the flags - :cdata:`NPY_ITER_EXTERNAL_LOOP`, :cdata:`NPY_ITER_RANGED`, - :cdata:`NPY_ITER_BUFFERED`, :cdata:`NPY_ITER_DELAY_BUFALLOC`, and - possibly :cdata:`NPY_ITER_GROWINNER`. Create a copy of this iterator - for each thread (minus one for the first iterator). Then, take - the iteration index range ``[0, NpyIter_GetIterSize(iter))`` and - split it up into tasks, for example using a TBB parallel_for loop. - When a thread gets a task to execute, it then uses its copy of - the iterator by calling :cfunc:`NpyIter_ResetToIterIndexRange` and - iterating over the full range. - - When using the iterator in multi-threaded code or in code not - holding the Python GIL, care must be taken to only call functions - which are safe in that context. :cfunc:`NpyIter_Copy` cannot be safely - called without the Python GIL, because it increments Python - references. The ``Reset*`` and some other functions may be safely - called by passing in the ``errmsg`` parameter as non-NULL, so that - the functions will pass back errors through it instead of setting - a Python exception. - -.. cfunction:: int NpyIter_RemoveAxis(NpyIter* iter, int axis)`` - - Removes an axis from iteration. This requires that - :cdata:`NPY_ITER_MULTI_INDEX` was set for iterator creation, and does - not work if buffering is enabled or an index is being tracked. This - function also resets the iterator to its initial state. - - This is useful for setting up an accumulation loop, for example. - The iterator can first be created with all the dimensions, including - the accumulation axis, so that the output gets created correctly. - Then, the accumulation axis can be removed, and the calculation - done in a nested fashion. - - **WARNING**: This function may change the internal memory layout of - the iterator. Any cached functions or pointers from the iterator - must be retrieved again! - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - - -.. cfunction:: int NpyIter_RemoveMultiIndex(NpyIter* iter) - - If the iterator is tracking a multi-index, this strips support for them, - and does further iterator optimizations that are possible if multi-indices - are not needed. This function also resets the iterator to its initial - state. - - **WARNING**: This function may change the internal memory layout of - the iterator. Any cached functions or pointers from the iterator - must be retrieved again! - - After calling this function, :cfunc:`NpyIter_HasMultiIndex`(iter) will - return false. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - -.. cfunction:: int NpyIter_EnableExternalLoop(NpyIter* iter) - - If :cfunc:`NpyIter_RemoveMultiIndex` was called, you may want to enable the - flag :cdata:`NPY_ITER_EXTERNAL_LOOP`. This flag is not permitted - together with :cdata:`NPY_ITER_MULTI_INDEX`, so this function is provided - to enable the feature after :cfunc:`NpyIter_RemoveMultiIndex` is called. - This function also resets the iterator to its initial state. - - **WARNING**: This function changes the internal logic of the iterator. - Any cached functions or pointers from the iterator must be retrieved - again! - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - -.. cfunction:: int NpyIter_Deallocate(NpyIter* iter) - - Deallocates the iterator object. This additionally frees any - copies made, triggering UPDATEIFCOPY behavior where necessary. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - -.. cfunction:: int NpyIter_Reset(NpyIter* iter, char** errmsg) - - Resets the iterator back to its initial state, at the beginning - of the iteration range. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL, - no Python exception is set when ``NPY_FAIL`` is returned. - Instead, \*errmsg is set to an error message. When errmsg is - non-NULL, the function may be safely called without holding - the Python GIL. - -.. cfunction:: int NpyIter_ResetToIterIndexRange(NpyIter* iter, npy_intp istart, npy_intp iend, char** errmsg) - - Resets the iterator and restricts it to the ``iterindex`` range - ``[istart, iend)``. See :cfunc:`NpyIter_Copy` for an explanation of - how to use this for multi-threaded iteration. This requires that - the flag :cdata:`NPY_ITER_RANGED` was passed to the iterator constructor. - - If you want to reset both the ``iterindex`` range and the base - pointers at the same time, you can do the following to avoid - extra buffer copying (be sure to add the return code error checks - when you copy this code). - - .. code-block:: c - - /* Set to a trivial empty range */ - NpyIter_ResetToIterIndexRange(iter, 0, 0); - /* Set the base pointers */ - NpyIter_ResetBasePointers(iter, baseptrs); - /* Set to the desired range */ - NpyIter_ResetToIterIndexRange(iter, istart, iend); - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL, - no Python exception is set when ``NPY_FAIL`` is returned. - Instead, \*errmsg is set to an error message. When errmsg is - non-NULL, the function may be safely called without holding - the Python GIL. - -.. cfunction:: int NpyIter_ResetBasePointers(NpyIter *iter, char** baseptrs, char** errmsg) - - Resets the iterator back to its initial state, but using the values - in ``baseptrs`` for the data instead of the pointers from the arrays - being iterated. This functions is intended to be used, together with - the ``op_axes`` parameter, by nested iteration code with two or more - iterators. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL, - no Python exception is set when ``NPY_FAIL`` is returned. - Instead, \*errmsg is set to an error message. When errmsg is - non-NULL, the function may be safely called without holding - the Python GIL. - - *TODO*: Move the following into a special section on nested iterators. - - Creating iterators for nested iteration requires some care. All - the iterator operands must match exactly, or the calls to - :cfunc:`NpyIter_ResetBasePointers` will be invalid. This means that - automatic copies and output allocation should not be used haphazardly. - It is possible to still use the automatic data conversion and casting - features of the iterator by creating one of the iterators with - all the conversion parameters enabled, then grabbing the allocated - operands with the :cfunc:`NpyIter_GetOperandArray` function and passing - them into the constructors for the rest of the iterators. - - **WARNING**: When creating iterators for nested iteration, - the code must not use a dimension more than once in the different - iterators. If this is done, nested iteration will produce - out-of-bounds pointers during iteration. - - **WARNING**: When creating iterators for nested iteration, buffering - can only be applied to the innermost iterator. If a buffered iterator - is used as the source for ``baseptrs``, it will point into a small buffer - instead of the array and the inner iteration will be invalid. - - The pattern for using nested iterators is as follows. - - .. code-block:: c - - NpyIter *iter1, *iter1; - NpyIter_IterNextFunc *iternext1, *iternext2; - char **dataptrs1; - - /* - * With the exact same operands, no copies allowed, and - * no axis in op_axes used both in iter1 and iter2. - * Buffering may be enabled for iter2, but not for iter1. - */ - iter1 = ...; iter2 = ...; - - iternext1 = NpyIter_GetIterNext(iter1); - iternext2 = NpyIter_GetIterNext(iter2); - dataptrs1 = NpyIter_GetDataPtrArray(iter1); - - do { - NpyIter_ResetBasePointers(iter2, dataptrs1); - do { - /* Use the iter2 values */ - } while (iternext2(iter2)); - } while (iternext1(iter1)); - -.. cfunction:: int NpyIter_GotoMultiIndex(NpyIter* iter, npy_intp* multi_index) - - Adjusts the iterator to point to the ``ndim`` indices - pointed to by ``multi_index``. Returns an error if a multi-index - is not being tracked, the indices are out of bounds, - or inner loop iteration is disabled. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - -.. cfunction:: int NpyIter_GotoIndex(NpyIter* iter, npy_intp index) - - Adjusts the iterator to point to the ``index`` specified. - If the iterator was constructed with the flag - :cdata:`NPY_ITER_C_INDEX`, ``index`` is the C-order index, - and if the iterator was constructed with the flag - :cdata:`NPY_ITER_F_INDEX`, ``index`` is the Fortran-order - index. Returns an error if there is no index being tracked, - the index is out of bounds, or inner loop iteration is disabled. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - -.. cfunction:: npy_intp NpyIter_GetIterSize(NpyIter* iter) - - Returns the number of elements being iterated. This is the product - of all the dimensions in the shape. - -.. cfunction:: npy_intp NpyIter_GetIterIndex(NpyIter* iter) - - Gets the ``iterindex`` of the iterator, which is an index matching - the iteration order of the iterator. - -.. cfunction:: void NpyIter_GetIterIndexRange(NpyIter* iter, npy_intp* istart, npy_intp* iend) - - Gets the ``iterindex`` sub-range that is being iterated. If - :cdata:`NPY_ITER_RANGED` was not specified, this always returns the - range ``[0, NpyIter_IterSize(iter))``. - -.. cfunction:: int NpyIter_GotoIterIndex(NpyIter* iter, npy_intp iterindex) - - Adjusts the iterator to point to the ``iterindex`` specified. - The IterIndex is an index matching the iteration order of the iterator. - Returns an error if the ``iterindex`` is out of bounds, - buffering is enabled, or inner loop iteration is disabled. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - -.. cfunction:: npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* iter) - - Returns 1 if the flag :cdata:`NPY_ITER_DELAY_BUFALLOC` was passed - to the iterator constructor, and no call to one of the Reset - functions has been done yet, 0 otherwise. - -.. cfunction:: npy_bool NpyIter_HasExternalLoop(NpyIter* iter) - - Returns 1 if the caller needs to handle the inner-most 1-dimensional - loop, or 0 if the iterator handles all looping. This is controlled - by the constructor flag :cdata:`NPY_ITER_EXTERNAL_LOOP` or - :cfunc:`NpyIter_EnableExternalLoop`. - -.. cfunction:: npy_bool NpyIter_HasMultiIndex(NpyIter* iter) - - Returns 1 if the iterator was created with the - :cdata:`NPY_ITER_MULTI_INDEX` flag, 0 otherwise. - -.. cfunction:: npy_bool NpyIter_HasIndex(NpyIter* iter) - - Returns 1 if the iterator was created with the - :cdata:`NPY_ITER_C_INDEX` or :cdata:`NPY_ITER_F_INDEX` - flag, 0 otherwise. - -.. cfunction:: npy_bool NpyIter_RequiresBuffering(NpyIter* iter) - - Returns 1 if the iterator requires buffering, which occurs - when an operand needs conversion or alignment and so cannot - be used directly. - -.. cfunction:: npy_bool NpyIter_IsBuffered(NpyIter* iter) - - Returns 1 if the iterator was created with the - :cdata:`NPY_ITER_BUFFERED` flag, 0 otherwise. - -.. cfunction:: npy_bool NpyIter_IsGrowInner(NpyIter* iter) - - Returns 1 if the iterator was created with the - :cdata:`NPY_ITER_GROWINNER` flag, 0 otherwise. - -.. cfunction:: npy_intp NpyIter_GetBufferSize(NpyIter* iter) - - If the iterator is buffered, returns the size of the buffer - being used, otherwise returns 0. - -.. cfunction:: int NpyIter_GetNDim(NpyIter* iter) - - Returns the number of dimensions being iterated. If a multi-index - was not requested in the iterator constructor, this value - may be smaller than the number of dimensions in the original - objects. - -.. cfunction:: int NpyIter_GetNOp(NpyIter* iter) - - Returns the number of operands in the iterator. - -.. cfunction:: npy_intp* NpyIter_GetAxisStrideArray(NpyIter* iter, int axis) - - Gets the array of strides for the specified axis. Requires that - the iterator be tracking a multi-index, and that buffering not - be enabled. - - This may be used when you want to match up operand axes in - some fashion, then remove them with :cfunc:`NpyIter_RemoveAxis` to - handle their processing manually. By calling this function - before removing the axes, you can get the strides for the - manual processing. - - Returns ``NULL`` on error. - -.. cfunction:: int NpyIter_GetShape(NpyIter* iter, npy_intp* outshape) - - Returns the broadcast shape of the iterator in ``outshape``. - This can only be called on an iterator which is tracking a multi-index. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - -.. cfunction:: PyArray_Descr** NpyIter_GetDescrArray(NpyIter* iter) - - This gives back a pointer to the ``nop`` data type Descrs for - the objects being iterated. The result points into ``iter``, - so the caller does not gain any references to the Descrs. - - This pointer may be cached before the iteration loop, calling - ``iternext`` will not change it. - -.. cfunction:: PyObject** NpyIter_GetOperandArray(NpyIter* iter) - - This gives back a pointer to the ``nop`` operand PyObjects - that are being iterated. The result points into ``iter``, - so the caller does not gain any references to the PyObjects. - -.. cfunction:: PyObject* NpyIter_GetIterView(NpyIter* iter, npy_intp i) - - This gives back a reference to a new ndarray view, which is a view - into the i-th object in the array :cfunc:`NpyIter_GetOperandArray`(), - whose dimensions and strides match the internal optimized - iteration pattern. A C-order iteration of this view is equivalent - to the iterator's iteration order. - - For example, if an iterator was created with a single array as its - input, and it was possible to rearrange all its axes and then - collapse it into a single strided iteration, this would return - a view that is a one-dimensional array. - -.. cfunction:: void NpyIter_GetReadFlags(NpyIter* iter, char* outreadflags) - - Fills ``nop`` flags. Sets ``outreadflags[i]`` to 1 if - ``op[i]`` can be read from, and to 0 if not. - -.. cfunction:: void NpyIter_GetWriteFlags(NpyIter* iter, char* outwriteflags) - - Fills ``nop`` flags. Sets ``outwriteflags[i]`` to 1 if - ``op[i]`` can be written to, and to 0 if not. - -.. cfunction:: int NpyIter_CreateCompatibleStrides(NpyIter* iter, npy_intp itemsize, npy_intp* outstrides) - - Builds a set of strides which are the same as the strides of an - output array created using the :cdata:`NPY_ITER_ALLOCATE` flag, where NULL - was passed for op_axes. This is for data packed contiguously, - but not necessarily in C or Fortran order. This should be used - together with :cfunc:`NpyIter_GetShape` and :cfunc:`NpyIter_GetNDim` - with the flag :cdata:`NPY_ITER_MULTI_INDEX` passed into the constructor. - - A use case for this function is to match the shape and layout of - the iterator and tack on one or more dimensions. For example, - in order to generate a vector per input value for a numerical gradient, - you pass in ndim*itemsize for itemsize, then add another dimension to - the end with size ndim and stride itemsize. To do the Hessian matrix, - you do the same thing but add two dimensions, or take advantage of - the symmetry and pack it into 1 dimension with a particular encoding. - - This function may only be called if the iterator is tracking a multi-index - and if :cdata:`NPY_ITER_DONT_NEGATE_STRIDES` was used to prevent an axis - from being iterated in reverse order. - - If an array is created with this method, simply adding 'itemsize' - for each iteration will traverse the new array matching the - iterator. - - Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. - -Functions For Iteration ------------------------ - -.. cfunction:: NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* iter, char** errmsg) - - Returns a function pointer for iteration. A specialized version - of the function pointer may be calculated by this function - instead of being stored in the iterator structure. Thus, to - get good performance, it is required that the function pointer - be saved in a variable rather than retrieved for each loop iteration. - - Returns NULL if there is an error. If errmsg is non-NULL, - no Python exception is set when ``NPY_FAIL`` is returned. - Instead, \*errmsg is set to an error message. When errmsg is - non-NULL, the function may be safely called without holding - the Python GIL. - - The typical looping construct is as follows. - - .. code-block:: c - - NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL); - char** dataptr = NpyIter_GetDataPtrArray(iter); - - do { - /* use the addresses dataptr[0], ... dataptr[nop-1] */ - } while(iternext(iter)); - - When :cdata:`NPY_ITER_EXTERNAL_LOOP` is specified, the typical - inner loop construct is as follows. - - .. code-block:: c - - NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL); - char** dataptr = NpyIter_GetDataPtrArray(iter); - npy_intp* stride = NpyIter_GetInnerStrideArray(iter); - npy_intp* size_ptr = NpyIter_GetInnerLoopSizePtr(iter), size; - npy_intp iop, nop = NpyIter_GetNOp(iter); - - do { - size = *size_ptr; - while (size--) { - /* use the addresses dataptr[0], ... dataptr[nop-1] */ - for (iop = 0; iop < nop; ++iop) { - dataptr[iop] += stride[iop]; - } - } - } while (iternext()); - - Observe that we are using the dataptr array inside the iterator, not - copying the values to a local temporary. This is possible because - when ``iternext()`` is called, these pointers will be overwritten - with fresh values, not incrementally updated. - - If a compile-time fixed buffer is being used (both flags - :cdata:`NPY_ITER_BUFFERED` and :cdata:`NPY_ITER_EXTERNAL_LOOP`), the - inner size may be used as a signal as well. The size is guaranteed - to become zero when ``iternext()`` returns false, enabling the - following loop construct. Note that if you use this construct, - you should not pass :cdata:`NPY_ITER_GROWINNER` as a flag, because it - will cause larger sizes under some circumstances. - - .. code-block:: c - - /* The constructor should have buffersize passed as this value */ - #define FIXED_BUFFER_SIZE 1024 - - NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL); - char **dataptr = NpyIter_GetDataPtrArray(iter); - npy_intp *stride = NpyIter_GetInnerStrideArray(iter); - npy_intp *size_ptr = NpyIter_GetInnerLoopSizePtr(iter), size; - npy_intp i, iop, nop = NpyIter_GetNOp(iter); - - /* One loop with a fixed inner size */ - size = *size_ptr; - while (size == FIXED_BUFFER_SIZE) { - /* - * This loop could be manually unrolled by a factor - * which divides into FIXED_BUFFER_SIZE - */ - for (i = 0; i < FIXED_BUFFER_SIZE; ++i) { - /* use the addresses dataptr[0], ... dataptr[nop-1] */ - for (iop = 0; iop < nop; ++iop) { - dataptr[iop] += stride[iop]; - } - } - iternext(); - size = *size_ptr; - } - - /* Finish-up loop with variable inner size */ - if (size > 0) do { - size = *size_ptr; - while (size--) { - /* use the addresses dataptr[0], ... dataptr[nop-1] */ - for (iop = 0; iop < nop; ++iop) { - dataptr[iop] += stride[iop]; - } - } - } while (iternext()); - -.. cfunction:: NpyIter_GetMultiIndexFunc *NpyIter_GetGetMultiIndex(NpyIter* iter, char** errmsg) - - Returns a function pointer for getting the current multi-index - of the iterator. Returns NULL if the iterator is not tracking - a multi-index. It is recommended that this function - pointer be cached in a local variable before the iteration - loop. - - Returns NULL if there is an error. If errmsg is non-NULL, - no Python exception is set when ``NPY_FAIL`` is returned. - Instead, \*errmsg is set to an error message. When errmsg is - non-NULL, the function may be safely called without holding - the Python GIL. - -.. cfunction:: char** NpyIter_GetDataPtrArray(NpyIter* iter) - - This gives back a pointer to the ``nop`` data pointers. If - :cdata:`NPY_ITER_EXTERNAL_LOOP` was not specified, each data - pointer points to the current data item of the iterator. If - no inner iteration was specified, it points to the first data - item of the inner loop. - - This pointer may be cached before the iteration loop, calling - ``iternext`` will not change it. This function may be safely - called without holding the Python GIL. - -.. cfunction:: char** NpyIter_GetInitialDataPtrArray(NpyIter* iter) - - Gets the array of data pointers directly into the arrays (never - into the buffers), corresponding to iteration index 0. - - These pointers are different from the pointers accepted by - ``NpyIter_ResetBasePointers``, because the direction along - some axes may have been reversed. - - This function may be safely called without holding the Python GIL. - -.. cfunction:: npy_intp* NpyIter_GetIndexPtr(NpyIter* iter) - - This gives back a pointer to the index being tracked, or NULL - if no index is being tracked. It is only useable if one of - the flags :cdata:`NPY_ITER_C_INDEX` or :cdata:`NPY_ITER_F_INDEX` - were specified during construction. - -When the flag :cdata:`NPY_ITER_EXTERNAL_LOOP` is used, the code -needs to know the parameters for doing the inner loop. These -functions provide that information. - -.. cfunction:: npy_intp* NpyIter_GetInnerStrideArray(NpyIter* iter) - - Returns a pointer to an array of the ``nop`` strides, - one for each iterated object, to be used by the inner loop. - - This pointer may be cached before the iteration loop, calling - ``iternext`` will not change it. This function may be safely - called without holding the Python GIL. - -.. cfunction:: npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* iter) - - Returns a pointer to the number of iterations the - inner loop should execute. - - This address may be cached before the iteration loop, calling - ``iternext`` will not change it. The value itself may change during - iteration, in particular if buffering is enabled. This function - may be safely called without holding the Python GIL. - -.. cfunction:: void NpyIter_GetInnerFixedStrideArray(NpyIter* iter, npy_intp* out_strides) - - Gets an array of strides which are fixed, or will not change during - the entire iteration. For strides that may change, the value - NPY_MAX_INTP is placed in the stride. - - Once the iterator is prepared for iteration (after a reset if - :cdata:`NPY_DELAY_BUFALLOC` was used), call this to get the strides - which may be used to select a fast inner loop function. For example, - if the stride is 0, that means the inner loop can always load its - value into a variable once, then use the variable throughout the loop, - or if the stride equals the itemsize, a contiguous version for that - operand may be used. - - This function may be safely called without holding the Python GIL. - -.. index:: - pair: iterator; C-API diff --git a/numpy-1.6.2/doc/source/reference/c-api.rst b/numpy-1.6.2/doc/source/reference/c-api.rst deleted file mode 100644 index 7c7775889f..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. _c-api: - -########### -Numpy C-API -########### - -.. sectionauthor:: Travis E. Oliphant - -| Beware of the man who won't be bothered with details. -| --- *William Feather, Sr.* - -| The truth is out there. -| --- *Chris Carter, The X Files* - - -NumPy provides a C-API to enable users to extend the system and get -access to the array object for use in other routines. The best way to -truly understand the C-API is to read the source code. If you are -unfamiliar with (C) source code, however, this can be a daunting -experience at first. Be assured that the task becomes easier with -practice, and you may be surprised at how simple the C-code can be to -understand. Even if you don't think you can write C-code from scratch, -it is much easier to understand and modify already-written source code -then create it *de novo*. - -Python extensions are especially straightforward to understand because -they all have a very similar structure. Admittedly, NumPy is not a -trivial extension to Python, and may take a little more snooping to -grasp. This is especially true because of the code-generation -techniques, which simplify maintenance of very similar code, but can -make the code a little less readable to beginners. Still, with a -little persistence, the code can be opened to your understanding. It -is my hope, that this guide to the C-API can assist in the process of -becoming familiar with the compiled-level work that can be done with -NumPy in order to squeeze that last bit of necessary speed out of your -code. - -.. currentmodule:: numpy-c-api - -.. toctree:: - :maxdepth: 2 - - c-api.types-and-structures - c-api.config - c-api.dtype - c-api.array - c-api.iterator - c-api.ufunc - c-api.generalized-ufuncs - c-api.coremath diff --git a/numpy-1.6.2/doc/source/reference/c-api.types-and-structures.rst b/numpy-1.6.2/doc/source/reference/c-api.types-and-structures.rst deleted file mode 100644 index 770aac995a..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.types-and-structures.rst +++ /dev/null @@ -1,1193 +0,0 @@ -***************************** -Python Types and C-Structures -***************************** - -.. sectionauthor:: Travis E. Oliphant - -Several new types are defined in the C-code. Most of these are -accessible from Python, but a few are not exposed due to their limited -use. Every new Python type has an associated :ctype:`PyObject *` with an -internal structure that includes a pointer to a "method table" that -defines how the new object behaves in Python. When you receive a -Python object into C code, you always get a pointer to a -:ctype:`PyObject` structure. Because a :ctype:`PyObject` structure is -very generic and defines only :cmacro:`PyObject_HEAD`, by itself it -is not very interesting. However, different objects contain more -details after the :cmacro:`PyObject_HEAD` (but you have to cast to the -correct type to access them --- or use accessor functions or macros). - - -New Python Types Defined -======================== - -Python types are the functional equivalent in C of classes in Python. -By constructing a new Python type you make available a new object for -Python. The ndarray object is an example of a new type defined in C. -New types are defined in C by two basic steps: - -1. creating a C-structure (usually named :ctype:`Py{Name}Object`) that is - binary- compatible with the :ctype:`PyObject` structure itself but holds - the additional information needed for that particular object; - -2. populating the :ctype:`PyTypeObject` table (pointed to by the ob_type - member of the :ctype:`PyObject` structure) with pointers to functions - that implement the desired behavior for the type. - -Instead of special method names which define behavior for Python -classes, there are "function tables" which point to functions that -implement the desired results. Since Python 2.2, the PyTypeObject -itself has become dynamic which allows C types that can be "sub-typed -"from other C-types in C, and sub-classed in Python. The children -types inherit the attributes and methods from their parent(s). - -There are two major new types: the ndarray ( :cdata:`PyArray_Type` ) -and the ufunc ( :cdata:`PyUFunc_Type` ). Additional types play a -supportive role: the :cdata:`PyArrayIter_Type`, the -:cdata:`PyArrayMultiIter_Type`, and the :cdata:`PyArrayDescr_Type` -. The :cdata:`PyArrayIter_Type` is the type for a flat iterator for an -ndarray (the object that is returned when getting the flat -attribute). The :cdata:`PyArrayMultiIter_Type` is the type of the -object returned when calling ``broadcast`` (). It handles iteration -and broadcasting over a collection of nested sequences. Also, the -:cdata:`PyArrayDescr_Type` is the data-type-descriptor type whose -instances describe the data. Finally, there are 21 new scalar-array -types which are new Python scalars corresponding to each of the -fundamental data types available for arrays. An additional 10 other -types are place holders that allow the array scalars to fit into a -hierarchy of actual Python types. - - -PyArray_Type ------------- - -.. cvar:: PyArray_Type - - The Python type of the ndarray is :cdata:`PyArray_Type`. In C, every - ndarray is a pointer to a :ctype:`PyArrayObject` structure. The ob_type - member of this structure contains a pointer to the :cdata:`PyArray_Type` - typeobject. - -.. ctype:: PyArrayObject - - The :ctype:`PyArrayObject` C-structure contains all of the required - information for an array. All instances of an ndarray (and its - subclasses) will have this structure. For future compatibility, - these structure members should normally be accessed using the - provided macros. If you need a shorter name, then you can make use - of :ctype:`NPY_AO` which is defined to be equivalent to - :ctype:`PyArrayObject`. - - .. code-block:: c - - typedef struct PyArrayObject { - PyObject_HEAD - char *data; - int nd; - npy_intp *dimensions; - npy_intp *strides; - PyObject *base; - PyArray_Descr *descr; - int flags; - PyObject *weakreflist; - } PyArrayObject; - -.. cmacro:: PyArrayObject.PyObject_HEAD - - This is needed by all Python objects. It consists of (at least) - a reference count member ( ``ob_refcnt`` ) and a pointer to the - typeobject ( ``ob_type`` ). (Other elements may also be present - if Python was compiled with special options see - Include/object.h in the Python source tree for more - information). The ob_type member points to a Python type - object. - -.. cmember:: char *PyArrayObject.data - - A pointer to the first element of the array. This pointer can - (and normally should) be recast to the data type of the array. - -.. cmember:: int PyArrayObject.nd - - An integer providing the number of dimensions for this - array. When nd is 0, the array is sometimes called a rank-0 - array. Such arrays have undefined dimensions and strides and - cannot be accessed. :cdata:`NPY_MAXDIMS` is the largest number of - dimensions for any array. - -.. cmember:: npy_intp PyArrayObject.dimensions - - An array of integers providing the shape in each dimension as - long as nd :math:`\geq` 1. The integer is always large enough - to hold a pointer on the platform, so the dimension size is - only limited by memory. - -.. cmember:: npy_intp *PyArrayObject.strides - - An array of integers providing for each dimension the number of - bytes that must be skipped to get to the next element in that - dimension. - -.. cmember:: PyObject *PyArrayObject.base - - This member is used to hold a pointer to another Python object - that is related to this array. There are two use cases: 1) If - this array does not own its own memory, then base points to the - Python object that owns it (perhaps another array object), 2) - If this array has the :cdata:`NPY_UPDATEIFCOPY` flag set, then this - array is a working copy of a "misbehaved" array. As soon as - this array is deleted, the array pointed to by base will be - updated with the contents of this array. - -.. cmember:: PyArray_Descr *PyArrayObject.descr - - A pointer to a data-type descriptor object (see below). The - data-type descriptor object is an instance of a new built-in - type which allows a generic description of memory. There is a - descriptor structure for each data type supported. This - descriptor structure contains useful information about the type - as well as a pointer to a table of function pointers to - implement specific functionality. - -.. cmember:: int PyArrayObject.flags - - Flags indicating how the memory pointed to by data is to be - interpreted. Possible flags are :cdata:`NPY_C_CONTIGUOUS`, - :cdata:`NPY_F_CONTIGUOUS`, :cdata:`NPY_OWNDATA`, :cdata:`NPY_ALIGNED`, - :cdata:`NPY_WRITEABLE`, and :cdata:`NPY_UPDATEIFCOPY`. - -.. cmember:: PyObject *PyArrayObject.weakreflist - - This member allows array objects to have weak references (using the - weakref module). - - -PyArrayDescr_Type ------------------ - -.. cvar:: PyArrayDescr_Type - - The :cdata:`PyArrayDescr_Type` is the built-in type of the - data-type-descriptor objects used to describe how the bytes comprising - the array are to be interpreted. There are 21 statically-defined - :ctype:`PyArray_Descr` objects for the built-in data-types. While these - participate in reference counting, their reference count should never - reach zero. There is also a dynamic table of user-defined - :ctype:`PyArray_Descr` objects that is also maintained. Once a - data-type-descriptor object is "registered" it should never be - deallocated either. The function :cfunc:`PyArray_DescrFromType` (...) can - be used to retrieve a :ctype:`PyArray_Descr` object from an enumerated - type-number (either built-in or user- defined). - -.. ctype:: PyArray_Descr - - The format of the :ctype:`PyArray_Descr` structure that lies at the - heart of the :cdata:`PyArrayDescr_Type` is - - .. code-block:: c - - typedef struct { - PyObject_HEAD - PyTypeObject *typeobj; - char kind; - char type; - char byteorder; - char unused; - int flags; - int type_num; - int elsize; - int alignment; - PyArray_ArrayDescr *subarray; - PyObject *fields; - PyArray_ArrFuncs *f; - } PyArray_Descr; - -.. cmember:: PyTypeObject *PyArray_Descr.typeobj - - Pointer to a typeobject that is the corresponding Python type for - the elements of this array. For the builtin types, this points to - the corresponding array scalar. For user-defined types, this - should point to a user-defined typeobject. This typeobject can - either inherit from array scalars or not. If it does not inherit - from array scalars, then the :cdata:`NPY_USE_GETITEM` and - :cdata:`NPY_USE_SETITEM` flags should be set in the ``flags`` member. - -.. cmember:: char PyArray_Descr.kind - - A character code indicating the kind of array (using the array - interface typestring notation). A 'b' represents Boolean, a 'i' - represents signed integer, a 'u' represents unsigned integer, 'f' - represents floating point, 'c' represents complex floating point, 'S' - represents 8-bit character string, 'U' represents 32-bit/character - unicode string, and 'V' repesents arbitrary. - -.. cmember:: char PyArray_Descr.type - - A traditional character code indicating the data type. - -.. cmember:: char PyArray_Descr.byteorder - - A character indicating the byte-order: '>' (big-endian), '<' (little- - endian), '=' (native), '\|' (irrelevant, ignore). All builtin data- - types have byteorder '='. - -.. cmember:: int PyArray_Descr.flags - - A data-type bit-flag that determines if the data-type exhibits object- - array like behavior. Each bit in this member is a flag which are named - as: - - .. cvar:: NPY_ITEM_REFCOUNT - - .. cvar:: NPY_ITEM_HASOBJECT - - Indicates that items of this data-type must be reference - counted (using :cfunc:`Py_INCREF` and :cfunc:`Py_DECREF` ). - - .. cvar:: NPY_ITEM_LISTPICKLE - - Indicates arrays of this data-type must be converted to a list - before pickling. - - .. cvar:: NPY_ITEM_IS_POINTER - - Indicates the item is a pointer to some other data-type - - .. cvar:: NPY_NEEDS_INIT - - Indicates memory for this data-type must be initialized (set - to 0) on creation. - - .. cvar:: NPY_NEEDS_PYAPI - - Indicates this data-type requires the Python C-API during - access (so don't give up the GIL if array access is going to - be needed). - - .. cvar:: NPY_USE_GETITEM - - On array access use the ``f->getitem`` function pointer - instead of the standard conversion to an array scalar. Must - use if you don't define an array scalar to go along with - the data-type. - - .. cvar:: NPY_USE_SETITEM - - When creating a 0-d array from an array scalar use - ``f->setitem`` instead of the standard copy from an array - scalar. Must use if you don't define an array scalar to go - along with the data-type. - - .. cvar:: NPY_FROM_FIELDS - - The bits that are inherited for the parent data-type if these - bits are set in any field of the data-type. Currently ( - :cdata:`NPY_NEEDS_INIT` \| :cdata:`NPY_LIST_PICKLE` \| - :cdata:`NPY_ITEM_REFCOUNT` \| :cdata:`NPY_NEEDS_PYAPI` ). - - .. cvar:: NPY_OBJECT_DTYPE_FLAGS - - Bits set for the object data-type: ( :cdata:`NPY_LIST_PICKLE` - \| :cdata:`NPY_USE_GETITEM` \| :cdata:`NPY_ITEM_IS_POINTER` \| - :cdata:`NPY_REFCOUNT` \| :cdata:`NPY_NEEDS_INIT` \| - :cdata:`NPY_NEEDS_PYAPI`). - - .. cfunction:: PyDataType_FLAGCHK(PyArray_Descr *dtype, int flags) - - Return true if all the given flags are set for the data-type - object. - - .. cfunction:: PyDataType_REFCHK(PyArray_Descr *dtype) - - Equivalent to :cfunc:`PyDataType_FLAGCHK` (*dtype*, - :cdata:`NPY_ITEM_REFCOUNT`). - -.. cmember:: int PyArray_Descr.type_num - - A number that uniquely identifies the data type. For new data-types, - this number is assigned when the data-type is registered. - -.. cmember:: int PyArray_Descr.elsize - - For data types that are always the same size (such as long), this - holds the size of the data type. For flexible data types where - different arrays can have a different elementsize, this should be - 0. - -.. cmember:: int PyArray_Descr.alignment - - A number providing alignment information for this data type. - Specifically, it shows how far from the start of a 2-element - structure (whose first element is a ``char`` ), the compiler - places an item of this type: ``offsetof(struct {char c; type v;}, - v)`` - -.. cmember:: PyArray_ArrayDescr *PyArray_Descr.subarray - - If this is non- ``NULL``, then this data-type descriptor is a - C-style contiguous array of another data-type descriptor. In - other-words, each element that this descriptor describes is - actually an array of some other base descriptor. This is most - useful as the data-type descriptor for a field in another - data-type descriptor. The fields member should be ``NULL`` if this - is non- ``NULL`` (the fields member of the base descriptor can be - non- ``NULL`` however). The :ctype:`PyArray_ArrayDescr` structure is - defined using - - .. code-block:: c - - typedef struct { - PyArray_Descr *base; - PyObject *shape; - } PyArray_ArrayDescr; - - The elements of this structure are: - - .. cmember:: PyArray_Descr *PyArray_ArrayDescr.base - - The data-type-descriptor object of the base-type. - - .. cmember:: PyObject *PyArray_ArrayDescr.shape - - The shape (always C-style contiguous) of the sub-array as a Python - tuple. - - -.. cmember:: PyObject *PyArray_Descr.fields - - If this is non-NULL, then this data-type-descriptor has fields - described by a Python dictionary whose keys are names (and also - titles if given) and whose values are tuples that describe the - fields. Recall that a data-type-descriptor always describes a - fixed-length set of bytes. A field is a named sub-region of that - total, fixed-length collection. A field is described by a tuple - composed of another data- type-descriptor and a byte - offset. Optionally, the tuple may contain a title which is - normally a Python string. These tuples are placed in this - dictionary keyed by name (and also title if given). - -.. cmember:: PyArray_ArrFuncs *PyArray_Descr.f - - A pointer to a structure containing functions that the type needs - to implement internal features. These functions are not the same - thing as the universal functions (ufuncs) described later. Their - signatures can vary arbitrarily. - -.. ctype:: PyArray_ArrFuncs - - Functions implementing internal features. Not all of these - function pointers must be defined for a given type. The required - members are ``nonzero``, ``copyswap``, ``copyswapn``, ``setitem``, - ``getitem``, and ``cast``. These are assumed to be non- ``NULL`` - and ``NULL`` entries will cause a program crash. The other - functions may be ``NULL`` which will just mean reduced - functionality for that data-type. (Also, the nonzero function will - be filled in with a default function if it is ``NULL`` when you - register a user-defined data-type). - - .. code-block:: c - - typedef struct { - PyArray_VectorUnaryFunc *cast[PyArray_NTYPES]; - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - PyArray_CompareFunc *compare; - PyArray_ArgFunc *argmax; - PyArray_DotFunc *dotfunc; - PyArray_ScanFunc *scanfunc; - PyArray_FromStrFunc *fromstr; - PyArray_NonzeroFunc *nonzero; - PyArray_FillFunc *fill; - PyArray_FillWithScalarFunc *fillwithscalar; - PyArray_SortFunc *sort[PyArray_NSORTS]; - PyArray_ArgSortFunc *argsort[PyArray_NSORTS]; - PyObject *castdict; - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - int listpickle - } PyArray_ArrFuncs; - - The concept of a behaved segment is used in the description of the - function pointers. A behaved segment is one that is aligned and in - native machine byte-order for the data-type. The ``nonzero``, - ``copyswap``, ``copyswapn``, ``getitem``, and ``setitem`` - functions can (and must) deal with mis-behaved arrays. The other - functions require behaved memory segments. - - .. cmember:: void cast(void *from, void *to, npy_intp n, void *fromarr, - void *toarr) - - An array of function pointers to cast from the current type to - all of the other builtin types. Each function casts a - contiguous, aligned, and notswapped buffer pointed at by - *from* to a contiguous, aligned, and notswapped buffer pointed - at by *to* The number of items to cast is given by *n*, and - the arguments *fromarr* and *toarr* are interpreted as - PyArrayObjects for flexible arrays to get itemsize - information. - - .. cmember:: PyObject *getitem(void *data, void *arr) - - A pointer to a function that returns a standard Python object - from a single element of the array object *arr* pointed to by - *data*. This function must be able to deal with "misbehaved - "(misaligned and/or swapped) arrays correctly. - - .. cmember:: int setitem(PyObject *item, void *data, void *arr) - - A pointer to a function that sets the Python object *item* - into the array, *arr*, at the position pointed to by *data* - . This function deals with "misbehaved" arrays. If successful, - a zero is returned, otherwise, a negative one is returned (and - a Python error set). - - .. cmember:: void copyswapn(void *dest, npy_intp dstride, void *src, - npy_intp sstride, npy_intp n, int swap, void *arr) - - .. cmember:: void copyswap(void *dest, void *src, int swap, void *arr) - - These members are both pointers to functions to copy data from - *src* to *dest* and *swap* if indicated. The value of arr is - only used for flexible ( :cdata:`NPY_STRING`, :cdata:`NPY_UNICODE`, - and :cdata:`NPY_VOID` ) arrays (and is obtained from - ``arr->descr->elsize`` ). The second function copies a single - value, while the first loops over n values with the provided - strides. These functions can deal with misbehaved *src* - data. If *src* is NULL then no copy is performed. If *swap* is - 0, then no byteswapping occurs. It is assumed that *dest* and - *src* do not overlap. If they overlap, then use ``memmove`` - (...) first followed by ``copyswap(n)`` with NULL valued - ``src``. - - .. cmember:: int compare(const void* d1, const void* d2, void* arr) - - A pointer to a function that compares two elements of the - array, ``arr``, pointed to by ``d1`` and ``d2``. This - function requires behaved arrays. The return value is 1 if * - ``d1`` > * ``d2``, 0 if * ``d1`` == * ``d2``, and -1 if * - ``d1`` < * ``d2``. The array object arr is used to retrieve - itemsize and field information for flexible arrays. - - .. cmember:: int argmax(void* data, npy_intp n, npy_intp* max_ind, - void* arr) - - A pointer to a function that retrieves the index of the - largest of ``n`` elements in ``arr`` beginning at the element - pointed to by ``data``. This function requires that the - memory segment be contiguous and behaved. The return value is - always 0. The index of the largest element is returned in - ``max_ind``. - - .. cmember:: void dotfunc(void* ip1, npy_intp is1, void* ip2, npy_intp is2, - void* op, npy_intp n, void* arr) - - A pointer to a function that multiplies two ``n`` -length - sequences together, adds them, and places the result in - element pointed to by ``op`` of ``arr``. The start of the two - sequences are pointed to by ``ip1`` and ``ip2``. To get to - the next element in each sequence requires a jump of ``is1`` - and ``is2`` *bytes*, respectively. This function requires - behaved (though not necessarily contiguous) memory. - - .. cmember:: int scanfunc(FILE* fd, void* ip , void* sep , void* arr) - - A pointer to a function that scans (scanf style) one element - of the corresponding type from the file descriptor ``fd`` into - the array memory pointed to by ``ip``. The array is assumed - to be behaved. If ``sep`` is not NULL, then a separator string - is also scanned from the file before returning. The last - argument ``arr`` is the array to be scanned into. A 0 is - returned if the scan is successful. A negative number - indicates something went wrong: -1 means the end of file was - reached before the separator string could be scanned, -4 means - that the end of file was reached before the element could be - scanned, and -3 means that the element could not be - interpreted from the format string. Requires a behaved array. - - .. cmember:: int fromstr(char* str, void* ip, char** endptr, void* arr) - - A pointer to a function that converts the string pointed to by - ``str`` to one element of the corresponding type and places it - in the memory location pointed to by ``ip``. After the - conversion is completed, ``*endptr`` points to the rest of the - string. The last argument ``arr`` is the array into which ip - points (needed for variable-size data- types). Returns 0 on - success or -1 on failure. Requires a behaved array. - - .. cmember:: Bool nonzero(void* data, void* arr) - - A pointer to a function that returns TRUE if the item of - ``arr`` pointed to by ``data`` is nonzero. This function can - deal with misbehaved arrays. - - .. cmember:: void fill(void* data, npy_intp length, void* arr) - - A pointer to a function that fills a contiguous array of given - length with data. The first two elements of the array must - already be filled- in. From these two values, a delta will be - computed and the values from item 3 to the end will be - computed by repeatedly adding this computed delta. The data - buffer must be well-behaved. - - .. cmember:: void fillwithscalar(void* buffer, npy_intp length, - void* value, void* arr) - - A pointer to a function that fills a contiguous ``buffer`` of - the given ``length`` with a single scalar ``value`` whose - address is given. The final argument is the array which is - needed to get the itemsize for variable-length arrays. - - .. cmember:: int sort(void* start, npy_intp length, void* arr) - - An array of function pointers to a particular sorting - algorithms. A particular sorting algorithm is obtained using a - key (so far :cdata:`PyArray_QUICKSORT`, :data`PyArray_HEAPSORT`, and - :cdata:`PyArray_MERGESORT` are defined). These sorts are done - in-place assuming contiguous and aligned data. - - .. cmember:: int argsort(void* start, npy_intp* result, npy_intp length, - void \*arr) - - An array of function pointers to sorting algorithms for this - data type. The same sorting algorithms as for sort are - available. The indices producing the sort are returned in - result (which must be initialized with indices 0 to length-1 - inclusive). - - .. cmember:: PyObject *castdict - - Either ``NULL`` or a dictionary containing low-level casting - functions for user- defined data-types. Each function is - wrapped in a :ctype:`PyCObject *` and keyed by the data-type number. - - .. cmember:: PyArray_SCALARKIND scalarkind(PyArrayObject* arr) - - A function to determine how scalars of this type should be - interpreted. The argument is ``NULL`` or a 0-dimensional array - containing the data (if that is needed to determine the kind - of scalar). The return value must be of type - :ctype:`PyArray_SCALARKIND`. - - .. cmember:: int **cancastscalarkindto - - Either ``NULL`` or an array of :ctype:`PyArray_NSCALARKINDS` - pointers. These pointers should each be either ``NULL`` or a - pointer to an array of integers (terminated by - :cdata:`PyArray_NOTYPE`) indicating data-types that a scalar of - this data-type of the specified kind can be cast to safely - (this usually means without losing precision). - - .. cmember:: int *cancastto - - Either ``NULL`` or an array of integers (terminated by - :cdata:`PyArray_NOTYPE` ) indicated data-types that this data-type - can be cast to safely (this usually means without losing - precision). - - .. cmember:: int listpickle - - Unused. - -The :cdata:`PyArray_Type` typeobject implements many of the features of -Python objects including the tp_as_number, tp_as_sequence, -tp_as_mapping, and tp_as_buffer interfaces. The rich comparison -(tp_richcompare) is also used along with new-style attribute lookup -for methods (tp_methods) and properties (tp_getset). The -:cdata:`PyArray_Type` can also be sub-typed. - -.. tip:: - - The tp_as_number methods use a generic approach to call whatever - function has been registered for handling the operation. The - function PyNumeric_SetOps(..) can be used to register functions to - handle particular mathematical operations (for all arrays). When - the umath module is imported, it sets the numeric operations for - all arrays to the corresponding ufuncs. The tp_str and tp_repr - methods can also be altered using PyString_SetStringFunction(...). - - -PyUFunc_Type ------------- - -.. cvar:: PyUFunc_Type - - The ufunc object is implemented by creation of the - :cdata:`PyUFunc_Type`. It is a very simple type that implements only - basic getattribute behavior, printing behavior, and has call - behavior which allows these objects to act like functions. The - basic idea behind the ufunc is to hold a reference to fast - 1-dimensional (vector) loops for each data type that supports the - operation. These one-dimensional loops all have the same signature - and are the key to creating a new ufunc. They are called by the - generic looping code as appropriate to implement the N-dimensional - function. There are also some generic 1-d loops defined for - floating and complexfloating arrays that allow you to define a - ufunc using a single scalar function (*e.g.* atanh). - - -.. ctype:: PyUFuncObject - - The core of the ufunc is the :ctype:`PyUFuncObject` which contains all - the information needed to call the underlying C-code loops that - perform the actual work. It has the following structure: - - .. code-block:: c - - typedef struct { - PyObject_HEAD - int nin; - int nout; - int nargs; - int identity; - PyUFuncGenericFunction *functions; - void **data; - int ntypes; - int check_return; - char *name; - char *types; - char *doc; - void *ptr; - PyObject *obj; - PyObject *userloops; - } PyUFuncObject; - - .. cmacro:: PyUFuncObject.PyObject_HEAD - - required for all Python objects. - - .. cmember:: int PyUFuncObject.nin - - The number of input arguments. - - .. cmember:: int PyUFuncObject.nout - - The number of output arguments. - - .. cmember:: int PyUFuncObject.nargs - - The total number of arguments (*nin* + *nout*). This must be - less than :cdata:`NPY_MAXARGS`. - - .. cmember:: int PyUFuncObject.identity - - Either :cdata:`PyUFunc_One`, :cdata:`PyUFunc_Zero`, or - :cdata:`PyUFunc_None` to indicate the identity for this operation. - It is only used for a reduce-like call on an empty array. - - .. cmember:: void PyUFuncObject.functions(char** args, npy_intp* dims, - npy_intp* steps, void* extradata) - - An array of function pointers --- one for each data type - supported by the ufunc. This is the vector loop that is called - to implement the underlying function *dims* [0] times. The - first argument, *args*, is an array of *nargs* pointers to - behaved memory. Pointers to the data for the input arguments - are first, followed by the pointers to the data for the output - arguments. How many bytes must be skipped to get to the next - element in the sequence is specified by the corresponding entry - in the *steps* array. The last argument allows the loop to - receive extra information. This is commonly used so that a - single, generic vector loop can be used for multiple - functions. In this case, the actual scalar function to call is - passed in as *extradata*. The size of this function pointer - array is ntypes. - - .. cmember:: void **PyUFuncObject.data - - Extra data to be passed to the 1-d vector loops or ``NULL`` if - no extra-data is needed. This C-array must be the same size ( - *i.e.* ntypes) as the functions array. ``NULL`` is used if - extra_data is not needed. Several C-API calls for UFuncs are - just 1-d vector loops that make use of this extra data to - receive a pointer to the actual function to call. - - .. cmember:: int PyUFuncObject.ntypes - - The number of supported data types for the ufunc. This number - specifies how many different 1-d loops (of the builtin data types) are - available. - - .. cmember:: int PyUFuncObject.check_return - - Obsolete and unused. However, it is set by the corresponding entry in - the main ufunc creation routine: :cfunc:`PyUFunc_FromFuncAndData` (...). - - .. cmember:: char *PyUFuncObject.name - - A string name for the ufunc. This is used dynamically to build - the __doc\__ attribute of ufuncs. - - .. cmember:: char *PyUFuncObject.types - - An array of *nargs* :math:`\times` *ntypes* 8-bit type_numbers - which contains the type signature for the function for each of - the supported (builtin) data types. For each of the *ntypes* - functions, the corresponding set of type numbers in this array - shows how the *args* argument should be interpreted in the 1-d - vector loop. These type numbers do not have to be the same type - and mixed-type ufuncs are supported. - - .. cmember:: char *PyUFuncObject.doc - - Documentation for the ufunc. Should not contain the function - signature as this is generated dynamically when __doc\__ is - retrieved. - - .. cmember:: void *PyUFuncObject.ptr - - Any dynamically allocated memory. Currently, this is used for dynamic - ufuncs created from a python function to store room for the types, - data, and name members. - - .. cmember:: PyObject *PyUFuncObject.obj - - For ufuncs dynamically created from python functions, this member - holds a reference to the underlying Python function. - - .. cmember:: PyObject *PyUFuncObject.userloops - - A dictionary of user-defined 1-d vector loops (stored as CObject ptrs) - for user-defined types. A loop may be registered by the user for any - user-defined type. It is retrieved by type number. User defined type - numbers are always larger than :cdata:`NPY_USERDEF`. - - -PyArrayIter_Type ----------------- - -.. cvar:: PyArrayIter_Type - - This is an iterator object that makes it easy to loop over an N-dimensional - array. It is the object returned from the flat attribute of an - ndarray. It is also used extensively throughout the implementation - internals to loop over an N-dimensional array. The tp_as_mapping - interface is implemented so that the iterator object can be indexed - (using 1-d indexing), and a few methods are implemented through the - tp_methods table. This object implements the next method and can be - used anywhere an iterator can be used in Python. - -.. ctype:: PyArrayIterObject - - The C-structure corresponding to an object of :cdata:`PyArrayIter_Type` is - the :ctype:`PyArrayIterObject`. The :ctype:`PyArrayIterObject` is used to - keep track of a pointer into an N-dimensional array. It contains associated - information used to quickly march through the array. The pointer can - be adjusted in three basic ways: 1) advance to the "next" position in - the array in a C-style contiguous fashion, 2) advance to an arbitrary - N-dimensional coordinate in the array, and 3) advance to an arbitrary - one-dimensional index into the array. The members of the - :ctype:`PyArrayIterObject` structure are used in these - calculations. Iterator objects keep their own dimension and strides - information about an array. This can be adjusted as needed for - "broadcasting," or to loop over only specific dimensions. - - .. code-block:: c - - typedef struct { - PyObject_HEAD - int nd_m1; - npy_intp index; - npy_intp size; - npy_intp coordinates[NPY_MAXDIMS]; - npy_intp dims_m1[NPY_MAXDIMS]; - npy_intp strides[NPY_MAXDIMS]; - npy_intp backstrides[NPY_MAXDIMS]; - npy_intp factors[NPY_MAXDIMS]; - PyArrayObject *ao; - char *dataptr; - Bool contiguous; - } PyArrayIterObject; - - .. cmember:: int PyArrayIterObject.nd_m1 - - :math:`N-1` where :math:`N` is the number of dimensions in the - underlying array. - - .. cmember:: npy_intp PyArrayIterObject.index - - The current 1-d index into the array. - - .. cmember:: npy_intp PyArrayIterObject.size - - The total size of the underlying array. - - .. cmember:: npy_intp *PyArrayIterObject.coordinates - - An :math:`N` -dimensional index into the array. - - .. cmember:: npy_intp *PyArrayIterObject.dims_m1 - - The size of the array minus 1 in each dimension. - - .. cmember:: npy_intp *PyArrayIterObject.strides - - The strides of the array. How many bytes needed to jump to the next - element in each dimension. - - .. cmember:: npy_intp *PyArrayIterObject.backstrides - - How many bytes needed to jump from the end of a dimension back - to its beginning. Note that *backstrides* [k]= *strides* [k]*d - *ims_m1* [k], but it is stored here as an optimization. - - .. cmember:: npy_intp *PyArrayIterObject.factors - - This array is used in computing an N-d index from a 1-d index. It - contains needed products of the dimensions. - - .. cmember:: PyArrayObject *PyArrayIterObject.ao - - A pointer to the underlying ndarray this iterator was created to - represent. - - .. cmember:: char *PyArrayIterObject.dataptr - - This member points to an element in the ndarray indicated by the - index. - - .. cmember:: Bool PyArrayIterObject.contiguous - - This flag is true if the underlying array is - :cdata:`NPY_C_CONTIGUOUS`. It is used to simplify calculations when - possible. - - -How to use an array iterator on a C-level is explained more fully in -later sections. Typically, you do not need to concern yourself with -the internal structure of the iterator object, and merely interact -with it through the use of the macros :cfunc:`PyArray_ITER_NEXT` (it), -:cfunc:`PyArray_ITER_GOTO` (it, dest), or :cfunc:`PyArray_ITER_GOTO1D` (it, -index). All of these macros require the argument *it* to be a -:ctype:`PyArrayIterObject *`. - - -PyArrayMultiIter_Type ---------------------- - -.. cvar:: PyArrayMultiIter_Type - - This type provides an iterator that encapsulates the concept of - broadcasting. It allows :math:`N` arrays to be broadcast together - so that the loop progresses in C-style contiguous fashion over the - broadcasted array. The corresponding C-structure is the - :ctype:`PyArrayMultiIterObject` whose memory layout must begin any - object, *obj*, passed in to the :cfunc:`PyArray_Broadcast` (obj) - function. Broadcasting is performed by adjusting array iterators so - that each iterator represents the broadcasted shape and size, but - has its strides adjusted so that the correct element from the array - is used at each iteration. - - -.. ctype:: PyArrayMultiIterObject - - .. code-block:: c - - typedef struct { - PyObject_HEAD - int numiter; - npy_intp size; - npy_intp index; - int nd; - npy_intp dimensions[NPY_MAXDIMS]; - PyArrayIterObject *iters[NPY_MAXDIMS]; - } PyArrayMultiIterObject; - - .. cmacro:: PyArrayMultiIterObject.PyObject_HEAD - - Needed at the start of every Python object (holds reference count and - type identification). - - .. cmember:: int PyArrayMultiIterObject.numiter - - The number of arrays that need to be broadcast to the same shape. - - .. cmember:: npy_intp PyArrayMultiIterObject.size - - The total broadcasted size. - - .. cmember:: npy_intp PyArrayMultiIterObject.index - - The current (1-d) index into the broadcasted result. - - .. cmember:: int PyArrayMultiIterObject.nd - - The number of dimensions in the broadcasted result. - - .. cmember:: npy_intp *PyArrayMultiIterObject.dimensions - - The shape of the broadcasted result (only ``nd`` slots are used). - - .. cmember:: PyArrayIterObject **PyArrayMultiIterObject.iters - - An array of iterator objects that holds the iterators for the arrays - to be broadcast together. On return, the iterators are adjusted for - broadcasting. - -PyArrayNeighborhoodIter_Type ----------------------------- - -.. cvar:: PyArrayNeighborhoodIter_Type - - This is an iterator object that makes it easy to loop over an N-dimensional - neighborhood. - -.. ctype:: PyArrayNeighborhoodIterObject - - The C-structure corresponding to an object of - :cdata:`PyArrayNeighborhoodIter_Type` is the - :ctype:`PyArrayNeighborhoodIterObject`. - -PyArrayFlags_Type ------------------ - -.. cvar:: PyArrayFlags_Type - - When the flags attribute is retrieved from Python, a special - builtin object of this type is constructed. This special type makes - it easier to work with the different flags by accessing them as - attributes or by accessing them as if the object were a dictionary - with the flag names as entries. - - -ScalarArrayTypes ----------------- - -There is a Python type for each of the different built-in data types -that can be present in the array Most of these are simple wrappers -around the corresponding data type in C. The C-names for these types -are :cdata:`Py{TYPE}ArrType_Type` where ``{TYPE}`` can be - - **Bool**, **Byte**, **Short**, **Int**, **Long**, **LongLong**, - **UByte**, **UShort**, **UInt**, **ULong**, **ULongLong**, - **Half**, **Float**, **Double**, **LongDouble**, **CFloat**, **CDouble**, - **CLongDouble**, **String**, **Unicode**, **Void**, and - **Object**. - -These type names are part of the C-API and can therefore be created in -extension C-code. There is also a :cdata:`PyIntpArrType_Type` and a -:cdata:`PyUIntpArrType_Type` that are simple substitutes for one of the -integer types that can hold a pointer on the platform. The structure -of these scalar objects is not exposed to C-code. The function -:cfunc:`PyArray_ScalarAsCtype` (..) can be used to extract the C-type value -from the array scalar and the function :cfunc:`PyArray_Scalar` (...) can be -used to construct an array scalar from a C-value. - - -Other C-Structures -================== - -A few new C-structures were found to be useful in the development of -NumPy. These C-structures are used in at least one C-API call and are -therefore documented here. The main reason these structures were -defined is to make it easy to use the Python ParseTuple C-API to -convert from Python objects to a useful C-Object. - - -PyArray_Dims ------------- - -.. ctype:: PyArray_Dims - - This structure is very useful when shape and/or strides information is - supposed to be interpreted. The structure is: - - .. code-block:: c - - typedef struct { - npy_intp *ptr; - int len; - } PyArray_Dims; - - The members of this structure are - - .. cmember:: npy_intp *PyArray_Dims.ptr - - A pointer to a list of (:ctype:`npy_intp`) integers which usually - represent array shape or array strides. - - .. cmember:: int PyArray_Dims.len - - The length of the list of integers. It is assumed safe to - access *ptr* [0] to *ptr* [len-1]. - - -PyArray_Chunk -------------- - -.. ctype:: PyArray_Chunk - - This is equivalent to the buffer object structure in Python up to - the ptr member. On 32-bit platforms (*i.e.* if :cdata:`NPY_SIZEOF_INT` - == :cdata:`NPY_SIZEOF_INTP` ) or in Python 2.5, the len member also - matches an equivalent member of the buffer object. It is useful to - represent a generic single- segment chunk of memory. - - .. code-block:: c - - typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; - } PyArray_Chunk; - - The members are - - .. cmacro:: PyArray_Chunk.PyObject_HEAD - - Necessary for all Python objects. Included here so that the - :ctype:`PyArray_Chunk` structure matches that of the buffer object - (at least to the len member). - - .. cmember:: PyObject *PyArray_Chunk.base - - The Python object this chunk of memory comes from. Needed so that - memory can be accounted for properly. - - .. cmember:: void *PyArray_Chunk.ptr - - A pointer to the start of the single-segment chunk of memory. - - .. cmember:: npy_intp PyArray_Chunk.len - - The length of the segment in bytes. - - .. cmember:: int PyArray_Chunk.flags - - Any data flags (*e.g.* :cdata:`NPY_WRITEABLE` ) that should be used - to interpret the memory. - - -PyArrayInterface ----------------- - -.. seealso:: :ref:`arrays.interface` - -.. ctype:: PyArrayInterface - - The :ctype:`PyArrayInterface` structure is defined so that NumPy and - other extension modules can use the rapid array interface - protocol. The :obj:`__array_struct__` method of an object that - supports the rapid array interface protocol should return a - :ctype:`PyCObject` that contains a pointer to a :ctype:`PyArrayInterface` - structure with the relevant details of the array. After the new - array is created, the attribute should be ``DECREF``'d which will - free the :ctype:`PyArrayInterface` structure. Remember to ``INCREF`` the - object (whose :obj:`__array_struct__` attribute was retrieved) and - point the base member of the new :ctype:`PyArrayObject` to this same - object. In this way the memory for the array will be managed - correctly. - - .. code-block:: c - - typedef struct { - int two; - int nd; - char typekind; - int itemsize; - int flags; - npy_intp *shape; - npy_intp *strides; - void *data; - PyObject *descr; - } PyArrayInterface; - - .. cmember:: int PyArrayInterface.two - - the integer 2 as a sanity check. - - .. cmember:: int PyArrayInterface.nd - - the number of dimensions in the array. - - .. cmember:: char PyArrayInterface.typekind - - A character indicating what kind of array is present according to the - typestring convention with 't' -> bitfield, 'b' -> Boolean, 'i' -> - signed integer, 'u' -> unsigned integer, 'f' -> floating point, 'c' -> - complex floating point, 'O' -> object, 'S' -> string, 'U' -> unicode, - 'V' -> void. - - .. cmember:: int PyArrayInterface.itemsize - - The number of bytes each item in the array requires. - - .. cmember:: int PyArrayInterface.flags - - Any of the bits :cdata:`NPY_C_CONTIGUOUS` (1), - :cdata:`NPY_F_CONTIGUOUS` (2), :cdata:`NPY_ALIGNED` (0x100), - :cdata:`NPY_NOTSWAPPED` (0x200), or :cdata:`NPY_WRITEABLE` - (0x400) to indicate something about the data. The - :cdata:`NPY_ALIGNED`, :cdata:`NPY_C_CONTIGUOUS`, and - :cdata:`NPY_F_CONTIGUOUS` flags can actually be determined from - the other parameters. The flag :cdata:`NPY_ARR_HAS_DESCR` - (0x800) can also be set to indicate to objects consuming the - version 3 array interface that the descr member of the - structure is present (it will be ignored by objects consuming - version 2 of the array interface). - - .. cmember:: npy_intp *PyArrayInterface.shape - - An array containing the size of the array in each dimension. - - .. cmember:: npy_intp *PyArrayInterface.strides - - An array containing the number of bytes to jump to get to the next - element in each dimension. - - .. cmember:: void *PyArrayInterface.data - - A pointer *to* the first element of the array. - - .. cmember:: PyObject *PyArrayInterface.descr - - A Python object describing the data-type in more detail (same - as the *descr* key in :obj:`__array_interface__`). This can be - ``NULL`` if *typekind* and *itemsize* provide enough - information. This field is also ignored unless - :cdata:`ARR_HAS_DESCR` flag is on in *flags*. - - -Internally used structures --------------------------- - -Internally, the code uses some additional Python objects primarily for -memory management. These types are not accessible directly from -Python, and are not exposed to the C-API. They are included here only -for completeness and assistance in understanding the code. - - -.. ctype:: PyUFuncLoopObject - - A loose wrapper for a C-structure that contains the information - needed for looping. This is useful if you are trying to understand - the ufunc looping code. The :ctype:`PyUFuncLoopObject` is the associated - C-structure. It is defined in the ``ufuncobject.h`` header. - -.. ctype:: PyUFuncReduceObject - - A loose wrapper for the C-structure that contains the information - needed for reduce-like methods of ufuncs. This is useful if you are - trying to understand the reduce, accumulate, and reduce-at - code. The :ctype:`PyUFuncReduceObject` is the associated C-structure. It - is defined in the ``ufuncobject.h`` header. - -.. ctype:: PyUFunc_Loop1d - - A simple linked-list of C-structures containing the information needed - to define a 1-d loop for a ufunc for every defined signature of a - user-defined data-type. - -.. cvar:: PyArrayMapIter_Type - - Advanced indexing is handled with this Python type. It is simply a - loose wrapper around the C-structure containing the variables - needed for advanced array indexing. The associated C-structure, - :ctype:`PyArrayMapIterObject`, is useful if you are trying to - understand the advanced-index mapping code. It is defined in the - ``arrayobject.h`` header. This type is not exposed to Python and - could be replaced with a C-structure. As a Python type it takes - advantage of reference- counted memory management. diff --git a/numpy-1.6.2/doc/source/reference/c-api.ufunc.rst b/numpy-1.6.2/doc/source/reference/c-api.ufunc.rst deleted file mode 100644 index 45268b261d..0000000000 --- a/numpy-1.6.2/doc/source/reference/c-api.ufunc.rst +++ /dev/null @@ -1,387 +0,0 @@ -UFunc API -========= - -.. sectionauthor:: Travis E. Oliphant - -.. index:: - pair: ufunc; C-API - - -Constants ---------- - -.. cvar:: UFUNC_ERR_{HANDLER} - - ``{HANDLER}`` can be **IGNORE**, **WARN**, **RAISE**, or **CALL** - -.. cvar:: UFUNC_{THING}_{ERR} - - ``{THING}`` can be **MASK**, **SHIFT**, or **FPE**, and ``{ERR}`` can - be **DIVIDEBYZERO**, **OVERFLOW**, **UNDERFLOW**, and **INVALID**. - -.. cvar:: PyUFunc_{VALUE} - - ``{VALUE}`` can be **One** (1), **Zero** (0), or **None** (-1) - - -Macros ------- - -.. cmacro:: NPY_LOOP_BEGIN_THREADS - - Used in universal function code to only release the Python GIL if - loop->obj is not true (*i.e.* this is not an OBJECT array - loop). Requires use of :cmacro:`NPY_BEGIN_THREADS_DEF` in variable - declaration area. - -.. cmacro:: NPY_LOOP_END_THREADS - - Used in universal function code to re-acquire the Python GIL if it - was released (because loop->obj was not true). - -.. cfunction:: UFUNC_CHECK_ERROR(loop) - - A macro used internally to check for errors and goto fail if - found. This macro requires a fail label in the current code - block. The *loop* variable must have at least members (obj, - errormask, and errorobj). If *loop* ->obj is nonzero, then - :cfunc:`PyErr_Occurred` () is called (meaning the GIL must be held). If - *loop* ->obj is zero, then if *loop* ->errormask is nonzero, - :cfunc:`PyUFunc_checkfperr` is called with arguments *loop* ->errormask - and *loop* ->errobj. If the result of this check of the IEEE - floating point registers is true then the code redirects to the - fail label which must be defined. - -.. cfunction:: UFUNC_CHECK_STATUS(ret) - - A macro that expands to platform-dependent code. The *ret* - variable can can be any integer. The :cdata:`UFUNC_FPE_{ERR}` bits are - set in *ret* according to the status of the corresponding error - flags of the floating point processor. - - -Functions ---------- - -.. cfunction:: PyObject* PyUFunc_FromFuncAndData(PyUFuncGenericFunction* func, - void** data, char* types, int ntypes, int nin, int nout, int identity, - char* name, char* doc, int check_return) - - Create a new broadcasting universal function from required variables. - Each ufunc builds around the notion of an element-by-element - operation. Each ufunc object contains pointers to 1-d loops - implementing the basic functionality for each supported type. - - .. note:: - - The *func*, *data*, *types*, *name*, and *doc* arguments are not - copied by :cfunc:`PyUFunc_FromFuncAndData`. The caller must ensure - that the memory used by these arrays is not freed as long as the - ufunc object is alive. - - :param func: - Must to an array of length *ntypes* containing - :ctype:`PyUFuncGenericFunction` items. These items are pointers to - functions that actually implement the underlying - (element-by-element) function :math:`N` times. - - :param data: - Should be ``NULL`` or a pointer to an array of size *ntypes* - . This array may contain arbitrary extra-data to be passed to - the corresponding 1-d loop function in the func array. - - :param types: - Must be of length (*nin* + *nout*) \* *ntypes*, and it - contains the data-types (built-in only) that the corresponding - function in the *func* array can deal with. - - :param ntypes: - How many different data-type "signatures" the ufunc has implemented. - - :param nin: - The number of inputs to this operation. - - :param nout: - The number of outputs - - :param name: - The name for the ufunc. Specifying a name of 'add' or - 'multiply' enables a special behavior for integer-typed - reductions when no dtype is given. If the input type is an - integer (or boolean) data type smaller than the size of the int_ - data type, it will be internally upcast to the int_ (or uint) - data type. - - - :param doc: - Allows passing in a documentation string to be stored with the - ufunc. The documentation string should not contain the name - of the function or the calling signature as that will be - dynamically determined from the object and available when - accessing the **__doc__** attribute of the ufunc. - - :param check_return: - Unused and present for backwards compatibility of the C-API. A - corresponding *check_return* integer does exist in the ufunc - structure and it does get set with this value when the ufunc - object is created. - -.. cfunction:: int PyUFunc_RegisterLoopForType(PyUFuncObject* ufunc, - int usertype, PyUFuncGenericFunction function, int* arg_types, void* data) - - This function allows the user to register a 1-d loop with an - already- created ufunc to be used whenever the ufunc is called - with any of its input arguments as the user-defined - data-type. This is needed in order to make ufuncs work with - built-in data-types. The data-type must have been previously - registered with the numpy system. The loop is passed in as - *function*. This loop can take arbitrary data which should be - passed in as *data*. The data-types the loop requires are passed - in as *arg_types* which must be a pointer to memory at least as - large as ufunc->nargs. - -.. cfunction:: int PyUFunc_ReplaceLoopBySignature(PyUFuncObject* ufunc, - PyUFuncGenericFunction newfunc, int* signature, - PyUFuncGenericFunction* oldfunc) - - Replace a 1-d loop matching the given *signature* in the - already-created *ufunc* with the new 1-d loop newfunc. Return the - old 1-d loop function in *oldfunc*. Return 0 on success and -1 on - failure. This function works only with built-in types (use - :cfunc:`PyUFunc_RegisterLoopForType` for user-defined types). A - signature is an array of data-type numbers indicating the inputs - followed by the outputs assumed by the 1-d loop. - -.. cfunction:: int PyUFunc_GenericFunction(PyUFuncObject* self, - PyObject* args, PyObject* kwds, PyArrayObject** mps) - - A generic ufunc call. The ufunc is passed in as *self*, the arguments - to the ufunc as *args* and *kwds*. The *mps* argument is an array of - :ctype:`PyArrayObject` pointers whose values are discarded and which - receive the converted input arguments as well as the ufunc outputs - when success is returned. The user is responsible for managing this - array and receives a new reference for each array in *mps*. The total - number of arrays in *mps* is given by *self* ->nin + *self* ->nout. - - Returns 0 on success, -1 on error. - -.. cfunction:: int PyUFunc_checkfperr(int errmask, PyObject* errobj) - - A simple interface to the IEEE error-flag checking support. The - *errmask* argument is a mask of :cdata:`UFUNC_MASK_{ERR}` bitmasks - indicating which errors to check for (and how to check for - them). The *errobj* must be a Python tuple with two elements: a - string containing the name which will be used in any communication - of error and either a callable Python object (call-back function) - or :cdata:`Py_None`. The callable object will only be used if - :cdata:`UFUNC_ERR_CALL` is set as the desired error checking - method. This routine manages the GIL and is safe to call even - after releasing the GIL. If an error in the IEEE-compatibile - hardware is determined a -1 is returned, otherwise a 0 is - returned. - -.. cfunction:: void PyUFunc_clearfperr() - - Clear the IEEE error flags. - -.. cfunction:: void PyUFunc_GetPyValues(char* name, int* bufsize, - int* errmask, PyObject** errobj) - - Get the Python values used for ufunc processing from the - thread-local storage area unless the defaults have been set in - which case the name lookup is bypassed. The name is placed as a - string in the first element of *\*errobj*. The second element is - the looked-up function to call on error callback. The value of the - looked-up buffer-size to use is passed into *bufsize*, and the - value of the error mask is placed into *errmask*. - - -Generic functions ------------------ - -At the core of every ufunc is a collection of type-specific functions -that defines the basic functionality for each of the supported types. -These functions must evaluate the underlying function :math:`N\geq1` -times. Extra-data may be passed in that may be used during the -calculation. This feature allows some general functions to be used as -these basic looping functions. The general function has all the code -needed to point variables to the right place and set up a function -call. The general function assumes that the actual function to call is -passed in as the extra data and calls it with the correct values. All -of these functions are suitable for placing directly in the array of -functions stored in the functions member of the PyUFuncObject -structure. - -.. cfunction:: void PyUFunc_f_f_As_d_d(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_d_d(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_f_f(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_g_g(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_F_F_As_D_D(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_F_F(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_D_D(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_G_G(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_e_e(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_e_e_As_f_f(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_e_e_As_d_d(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - - Type specific, core 1-d functions for ufuncs where each - calculation is obtained by calling a function taking one input - argument and returning one output. This function is passed in - ``func``. The letters correspond to dtypechar's of the supported - data types ( ``e`` - half, ``f`` - float, ``d`` - double, - ``g`` - long double, ``F`` - cfloat, ``D`` - cdouble, - ``G`` - clongdouble). The argument *func* must support the same - signature. The _As_X_X variants assume ndarray's of one data type - but cast the values to use an underlying function that takes a - different data type. Thus, :cfunc:`PyUFunc_f_f_As_d_d` uses - ndarrays of data type :cdata:`NPY_FLOAT` but calls out to a - C-function that takes double and returns double. - -.. cfunction:: void PyUFunc_ff_f_As_dd_d(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_ff_f(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_dd_d(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_gg_g(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_FF_F_As_DD_D(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_DD_D(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_FF_F(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_GG_G(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_ee_e(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_ee_e_As_ff_f(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_ee_e_As_dd_d(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - - Type specific, core 1-d functions for ufuncs where each - calculation is obtained by calling a function taking two input - arguments and returning one output. The underlying function to - call is passed in as *func*. The letters correspond to - dtypechar's of the specific data type supported by the - general-purpose function. The argument ``func`` must support the - corresponding signature. The ``_As_XX_X`` variants assume ndarrays - of one data type but cast the values at each iteration of the loop - to use the underlying function that takes a different data type. - -.. cfunction:: void PyUFunc_O_O(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - -.. cfunction:: void PyUFunc_OO_O(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - - One-input, one-output, and two-input, one-output core 1-d functions - for the :cdata:`NPY_OBJECT` data type. These functions handle reference - count issues and return early on error. The actual function to call is - *func* and it must accept calls with the signature ``(PyObject*) - (PyObject*)`` for :cfunc:`PyUFunc_O_O` or ``(PyObject*)(PyObject *, - PyObject *)`` for :cfunc:`PyUFunc_OO_O`. - -.. cfunction:: void PyUFunc_O_O_method(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - - This general purpose 1-d core function assumes that *func* is a string - representing a method of the input object. For each - iteration of the loop, the Python obejct is extracted from the array - and its *func* method is called returning the result to the output array. - -.. cfunction:: void PyUFunc_OO_O_method(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - - This general purpose 1-d core function assumes that *func* is a - string representing a method of the input object that takes one - argument. The first argument in *args* is the method whose function is - called, the second argument in *args* is the argument passed to the - function. The output of the function is stored in the third entry - of *args*. - -.. cfunction:: void PyUFunc_On_Om(char** args, npy_intp* dimensions, - npy_intp* steps, void* func) - - This is the 1-d core function used by the dynamic ufuncs created - by umath.frompyfunc(function, nin, nout). In this case *func* is a - pointer to a :ctype:`PyUFunc_PyFuncData` structure which has definition - - .. ctype:: PyUFunc_PyFuncData - - .. code-block:: c - - typedef struct { - int nin; - int nout; - PyObject *callable; - } PyUFunc_PyFuncData; - - At each iteration of the loop, the *nin* input objects are exctracted - from their object arrays and placed into an argument tuple, the Python - *callable* is called with the input arguments, and the nout - outputs are placed into their object arrays. - - -Importing the API ------------------ - -.. cvar:: PY_UFUNC_UNIQUE_SYMBOL - -.. cvar:: NO_IMPORT_UFUNC - -.. cfunction:: void import_ufunc(void) - - These are the constants and functions for accessing the ufunc - C-API from extension modules in precisely the same way as the - array C-API can be accessed. The ``import_ufunc`` () function must - always be called (in the initialization subroutine of the - extension module). If your extension module is in one file then - that is all that is required. The other two constants are useful - if your extension module makes use of multiple files. In that - case, define :cdata:`PY_UFUNC_UNIQUE_SYMBOL` to something unique to - your code and then in source files that do not contain the module - initialization function but still need access to the UFUNC API, - define :cdata:`PY_UFUNC_UNIQUE_SYMBOL` to the same name used previously - and also define :cdata:`NO_IMPORT_UFUNC`. - - The C-API is actually an array of function pointers. This array is - created (and pointed to by a global variable) by import_ufunc. The - global variable is either statically defined or allowed to be seen - by other files depending on the state of - :cdata:`Py_UFUNC_UNIQUE_SYMBOL` and :cdata:`NO_IMPORT_UFUNC`. - -.. index:: - pair: ufunc; C-API diff --git a/numpy-1.6.2/doc/source/reference/distutils.rst b/numpy-1.6.2/doc/source/reference/distutils.rst deleted file mode 100644 index 5d11a6d4ce..0000000000 --- a/numpy-1.6.2/doc/source/reference/distutils.rst +++ /dev/null @@ -1,316 +0,0 @@ -********************************** -Packaging (:mod:`numpy.distutils`) -********************************** - -.. module:: numpy.distutils - -NumPy provides enhanced distutils functionality to make it easier to -build and install sub-packages, auto-generate code, and extension -modules that use Fortran-compiled libraries. To use features of NumPy -distutils, use the :func:`setup ` command from -:mod:`numpy.distutils.core`. A useful :class:`Configuration -` class is also provided in -:mod:`numpy.distutils.misc_util` that can make it easier to construct -keyword arguments to pass to the setup function (by passing the -dictionary obtained from the todict() method of the class). More -information is available in the NumPy Distutils Users Guide in -``/numpy/doc/DISTUTILS.txt``. - - -.. index:: - single: distutils - - -Modules in :mod:`numpy.distutils` -================================= - -misc_util ---------- - -.. module:: numpy.distutils.misc_util - -.. autosummary:: - :toctree: generated/ - - get_numpy_include_dirs - dict_append - appendpath - allpath - dot_join - generate_config_py - get_cmd - terminal_has_colors - red_text - green_text - yellow_text - blue_text - cyan_text - cyg2win32 - all_strings - has_f_sources - has_cxx_sources - filter_sources - get_dependencies - is_local_src_dir - get_ext_source_files - get_script_files - - -.. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs) - - Construct a configuration instance for the given package name. If - *parent_name* is not None, then construct the package as a - sub-package of the *parent_name* package. If *top_path* and - *package_path* are None then they are assumed equal to - the path of the file this instance was created in. The setup.py - files in the numpy distribution are good examples of how to use - the :class:`Configuration` instance. - - .. automethod:: todict - - .. automethod:: get_distribution - - .. automethod:: get_subpackage - - .. automethod:: add_subpackage - - .. automethod:: add_data_files - - .. automethod:: add_data_dir - - .. automethod:: add_include_dirs - - .. automethod:: add_headers - - .. automethod:: add_extension - - .. automethod:: add_library - - .. automethod:: add_scripts - - .. automethod:: add_installed_library - - .. automethod:: add_npy_pkg_config - - .. automethod:: paths - - .. automethod:: get_config_cmd - - .. automethod:: get_build_temp_dir - - .. automethod:: have_f77c - - .. automethod:: have_f90c - - .. automethod:: get_version - - .. automethod:: make_svn_version_py - - .. automethod:: make_config_py - - .. automethod:: get_info - -Other modules -------------- - -.. currentmodule:: numpy.distutils - -.. autosummary:: - :toctree: generated/ - - system_info.get_info - system_info.get_standard_file - cpuinfo.cpu - log.set_verbosity - exec_command - -Building Installable C libraries -================================ - -Conventional C libraries (installed through `add_library`) are not installed, and -are just used during the build (they are statically linked). An installable C -library is a pure C library, which does not depend on the python C runtime, and -is installed such that it may be used by third-party packages. To build and -install the C library, you just use the method `add_installed_library` instead of -`add_library`, which takes the same arguments except for an additional -``install_dir`` argument:: - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - -npy-pkg-config files --------------------- - -To make the necessary build options available to third parties, you could use -the `npy-pkg-config` mechanism implemented in `numpy.distutils`. This mechanism is -based on a .ini file which contains all the options. A .ini file is very -similar to .pc files as used by the pkg-config unix utility:: - - [meta] - Name: foo - Version: 1.0 - Description: foo library - - [variables] - prefix = /home/user/local - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -Generally, the file needs to be generated during the build, since it needs some -information known at build time only (e.g. prefix). This is mostly automatic if -one uses the `Configuration` method `add_npy_pkg_config`. Assuming we have a -template file foo.ini.in as follows:: - - [meta] - Name: foo - Version: @version@ - Description: foo library - - [variables] - prefix = @prefix@ - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -and the following code in setup.py:: - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - >>> subst = {'version': '1.0'} - >>> config.add_npy_pkg_config('foo.ini.in', 'lib', subst_dict=subst) - -This will install the file foo.ini into the directory package_dir/lib, and the -foo.ini file will be generated from foo.ini.in, where each ``@version@`` will be -replaced by ``subst_dict['version']``. The dictionary has an additional prefix -substitution rule automatically added, which contains the install prefix (since -this is not easy to get from setup.py). npy-pkg-config files can also be -installed at the same location as used for numpy, using the path returned from -`get_npy_pkg_dir` function. - -Reusing a C library from another package ----------------------------------------- - -Info are easily retrieved from the `get_info` function in -`numpy.distutils.misc_util`:: - - >>> info = get_info('npymath') - >>> config.add_extension('foo', sources=['foo.c'], extra_info=**info) - -An additional list of paths to look for .ini files can be given to `get_info`. - -Conversion of ``.src`` files -============================ - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. - -.. index:: - single: code generation - -Fortran files -------------- - -This template converter will replicate all **function** and -**subroutine** blocks in the file with names that contain '<...>' -according to the rules in '<...>'. The number of comma-separated words -in '<...>' determines the number of times the block is repeated. What -these words are indicates what that repeat rule, '<...>', should be -replaced with in each block. All of the repeat rules in a block must -contain the same number of comma-separated words indicating the number -of times that block should be repeated. If the word in the repeat rule -needs a comma, leftarrow, or rightarrow, then prepend it with a -backslash ' \'. If a word in the repeat rule matches ' \\' then -it will be replaced with the -th word in the same repeat -specification. There are two forms for the repeat rule: named and -short. - - -Named repeat rule -^^^^^^^^^^^^^^^^^ - -A named repeat rule is useful when the same set of repeats must be -used several times in a block. It is specified using , where N is the number of times the block -should be repeated. On each repeat of the block, the entire -expression, '<...>' will be replaced first with item1, and then with -item2, and so forth until N repeats are accomplished. Once a named -repeat specification has been introduced, the same repeat rule may be -used **in the current block** by referring only to the name -(i.e. . - - -Short repeat rule -^^^^^^^^^^^^^^^^^ - -A short repeat rule looks like . The -rule specifies that the entire expression, '<...>' should be replaced -first with item1, and then with item2, and so forth until N repeats -are accomplished. - - -Pre-defined names -^^^^^^^^^^^^^^^^^ - -The following predefined named repeat rules are available: - -- - -- <_c=s,d,c,z> - -- <_t=real, double precision, complex, double complex> - -- - -- - -- - -- - - -Other files ------------ - -Non-Fortran files use a separate syntax for defining template blocks -that should be repeated using a variable expansion similar to the -named repeat rules of the Fortran-specific repeats. The template rules -for these files are: - -1. "/\**begin repeat "on a line by itself marks the beginning of - a segment that should be repeated. - -2. Named variable expansions are defined using #name=item1, item2, item3, - ..., itemN# and placed on successive lines. These variables are - replaced in each repeat block with corresponding word. All named - variables in the same repeat block must define the same number of - words. - -3. In specifying the repeat rule for a named variable, item*N is short- - hand for item, item, ..., item repeated N times. In addition, - parenthesis in combination with \*N can be used for grouping several - items that should be repeated. Thus, #name=(item1, item2)*4# is - equivalent to #name=item1, item2, item1, item2, item1, item2, item1, - item2# - -4. "\*/ "on a line by itself marks the end of the the variable expansion - naming. The next line is the first line that will be repeated using - the named rules. - -5. Inside the block to be repeated, the variables that should be expanded - are specified as @name@. - -6. "/\**end repeat**/ "on a line by itself marks the previous line - as the last line of the block to be repeated. diff --git a/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.dia b/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.dia deleted file mode 100644 index 62e925cfd9..0000000000 Binary files a/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.dia and /dev/null differ diff --git a/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.pdf b/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.pdf deleted file mode 100644 index 6ce496a3e1..0000000000 Binary files a/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.pdf and /dev/null differ diff --git a/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.png b/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.png deleted file mode 100644 index 6c45758b16..0000000000 Binary files a/numpy-1.6.2/doc/source/reference/figures/dtype-hierarchy.png and /dev/null differ diff --git a/numpy-1.6.2/doc/source/reference/figures/threefundamental.fig b/numpy-1.6.2/doc/source/reference/figures/threefundamental.fig deleted file mode 100644 index 79760c410e..0000000000 --- a/numpy-1.6.2/doc/source/reference/figures/threefundamental.fig +++ /dev/null @@ -1,57 +0,0 @@ -#FIG 3.2 -Landscape -Center -Inches -Letter -100.00 -Single --2 -1200 2 -6 1950 2850 4350 3450 -2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 - 1950 2850 4350 2850 4350 3450 1950 3450 1950 2850 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 - 2550 2850 2550 3450 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 - 3150 2850 3150 3450 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 - 3750 2850 3750 3450 --6 -2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 - 5100 2850 7500 2850 7500 3450 5100 3450 5100 2850 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 - 5700 2850 5700 3450 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 - 6300 2850 6300 3450 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 - 6900 2850 6900 3450 -2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5 - 7800 3600 7800 2700 525 2700 525 3600 7800 3600 -2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 - 675 2850 1725 2850 1725 3450 675 3450 675 2850 -2 2 0 4 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 - 5700 2850 6300 2850 6300 3450 5700 3450 5700 2850 -2 2 0 4 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 - 5700 1725 6300 1725 6300 2325 5700 2325 5700 1725 -2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5 - 6450 2475 6450 1275 5550 1275 5550 2475 6450 2475 -2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 - 5700 1350 6300 1350 6300 1575 5700 1575 5700 1350 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3 - 2 1 1.00 60.00 120.00 - 900 2850 900 1875 1575 1875 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 - 2 1 1.00 60.00 120.00 - 3375 1800 5550 1800 -2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 - 2 1 1.00 60.00 120.00 - 6000 2850 6000 2325 -2 4 0 1 0 7 50 -1 -1 0.000 0 0 7 0 0 5 - 3375 2100 3375 1575 1575 1575 1575 2100 3375 2100 -4 0 0 50 -1 18 14 0.0000 4 165 720 825 3225 header\001 -4 0 0 50 -1 2 40 0.0000 4 105 450 4500 3225 ...\001 -4 0 0 50 -1 18 14 0.0000 4 210 810 3600 3900 ndarray\001 -4 0 0 50 -1 18 14 0.0000 4 165 630 6600 2175 scalar\001 -4 0 0 50 -1 18 14 0.0000 4 165 540 6600 1950 array\001 -4 0 0 50 -1 16 12 0.0000 4 135 420 5775 1500 head\001 -4 0 0 50 -1 18 14 0.0000 4 210 975 1950 1875 data-type\001 diff --git a/numpy-1.6.2/doc/source/reference/figures/threefundamental.pdf b/numpy-1.6.2/doc/source/reference/figures/threefundamental.pdf deleted file mode 100644 index b89e9f2afa..0000000000 Binary files a/numpy-1.6.2/doc/source/reference/figures/threefundamental.pdf and /dev/null differ diff --git a/numpy-1.6.2/doc/source/reference/figures/threefundamental.png b/numpy-1.6.2/doc/source/reference/figures/threefundamental.png deleted file mode 100644 index de252fc9d4..0000000000 Binary files a/numpy-1.6.2/doc/source/reference/figures/threefundamental.png and /dev/null differ diff --git a/numpy-1.6.2/doc/source/reference/index.rst b/numpy-1.6.2/doc/source/reference/index.rst deleted file mode 100644 index 2e881542e1..0000000000 --- a/numpy-1.6.2/doc/source/reference/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. _reference: - -############### -NumPy Reference -############### - -:Release: |version| -:Date: |today| - -.. module:: numpy - -This reference manual details functions, modules, and objects -included in Numpy, describing what they are and what they do. -For learning how to use NumPy, see also :ref:`user`. - - -.. toctree:: - :maxdepth: 2 - - arrays - ufuncs - routines - ctypes - distutils - c-api - internals - swig - - -Acknowledgements -================ - -Large parts of this manual originate from Travis E. Oliphant's book -`Guide to Numpy `__ (which generously entered -Public Domain in August 2008). The reference documentation for many of -the functions are written by numerous contributors and developers of -Numpy, both prior to and during the -`Numpy Documentation Marathon -`__. - -Please help to improve NumPy's documentation! Instructions on how to -join the ongoing documentation marathon can be found -`on the scipy.org website `__ diff --git a/numpy-1.6.2/doc/source/reference/internals.code-explanations.rst b/numpy-1.6.2/doc/source/reference/internals.code-explanations.rst deleted file mode 100644 index cceb1a60d4..0000000000 --- a/numpy-1.6.2/doc/source/reference/internals.code-explanations.rst +++ /dev/null @@ -1,666 +0,0 @@ -.. currentmodule:: numpy - -************************* -Numpy C Code Explanations -************************* - - Fanaticism consists of redoubling your efforts when you have forgotten - your aim. - --- *George Santayana* - - An authority is a person who can tell you more about something than - you really care to know. - --- *Unknown* - -This Chapter attempts to explain the logic behind some of the new -pieces of code. The purpose behind these explanations is to enable -somebody to be able to understand the ideas behind the implementation -somewhat more easily than just staring at the code. Perhaps in this -way, the algorithms can be improved on, borrowed from, and/or -optimized. - - -Memory model -============ - -.. index:: - pair: ndarray; memory model - -One fundamental aspect of the ndarray is that an array is seen as a -"chunk" of memory starting at some location. The interpretation of -this memory depends on the stride information. For each dimension in -an :math:`N` -dimensional array, an integer (stride) dictates how many -bytes must be skipped to get to the next element in that dimension. -Unless you have a single-segment array, this stride information must -be consulted when traversing through an array. It is not difficult to -write code that accepts strides, you just have to use (char \*) -pointers because strides are in units of bytes. Keep in mind also that -strides do not have to be unit-multiples of the element size. Also, -remember that if the number of dimensions of the array is 0 (sometimes -called a rank-0 array), then the strides and dimensions variables are -NULL. - -Besides the structural information contained in the strides and -dimensions members of the :ctype:`PyArrayObject`, the flags contain important -information about how the data may be accessed. In particular, the -:cdata:`NPY_ALIGNED` flag is set when the memory is on a suitable boundary -according to the data-type array. Even if you have a contiguous chunk -of memory, you cannot just assume it is safe to dereference a data- -type-specific pointer to an element. Only if the :cdata:`NPY_ALIGNED` flag is -set is this a safe operation (on some platforms it will work but on -others, like Solaris, it will cause a bus error). The :cdata:`NPY_WRITEABLE` -should also be ensured if you plan on writing to the memory area of -the array. It is also possible to obtain a pointer to an unwriteable -memory area. Sometimes, writing to the memory area when the -:cdata:`NPY_WRITEABLE` flag is not set will just be rude. Other times it can -cause program crashes ( *e.g.* a data-area that is a read-only -memory-mapped file). - - -Data-type encapsulation -======================= - -.. index:: - single: dtype - -The data-type is an important abstraction of the ndarray. Operations -will look to the data-type to provide the key functionality that is -needed to operate on the array. This functionality is provided in the -list of function pointers pointed to by the 'f' member of the -:ctype:`PyArray_Descr` structure. In this way, the number of data-types can be -extended simply by providing a :ctype:`PyArray_Descr` structure with suitable -function pointers in the 'f' member. For built-in types there are some -optimizations that by-pass this mechanism, but the point of the data- -type abstraction is to allow new data-types to be added. - -One of the built-in data-types, the void data-type allows for -arbitrary records containing 1 or more fields as elements of the -array. A field is simply another data-type object along with an offset -into the current record. In order to support arbitrarily nested -fields, several recursive implementations of data-type access are -implemented for the void type. A common idiom is to cycle through the -elements of the dictionary and perform a specific operation based on -the data-type object stored at the given offset. These offsets can be -arbitrary numbers. Therefore, the possibility of encountering mis- -aligned data must be recognized and taken into account if necessary. - - -N-D Iterators -============= - -.. index:: - single: array iterator - -A very common operation in much of NumPy code is the need to iterate -over all the elements of a general, strided, N-dimensional array. This -operation of a general-purpose N-dimensional loop is abstracted in the -notion of an iterator object. To write an N-dimensional loop, you only -have to create an iterator object from an ndarray, work with the -dataptr member of the iterator object structure and call the macro -:cfunc:`PyArray_ITER_NEXT` (it) on the iterator object to move to the next -element. The "next" element is always in C-contiguous order. The macro -works by first special casing the C-contiguous, 1-D, and 2-D cases -which work very simply. - -For the general case, the iteration works by keeping track of a list -of coordinate counters in the iterator object. At each iteration, the -last coordinate counter is increased (starting from 0). If this -counter is smaller then one less than the size of the array in that -dimension (a pre-computed and stored value), then the counter is -increased and the dataptr member is increased by the strides in that -dimension and the macro ends. If the end of a dimension is reached, -the counter for the last dimension is reset to zero and the dataptr is -moved back to the beginning of that dimension by subtracting the -strides value times one less than the number of elements in that -dimension (this is also pre-computed and stored in the backstrides -member of the iterator object). In this case, the macro does not end, -but a local dimension counter is decremented so that the next-to-last -dimension replaces the role that the last dimension played and the -previously-described tests are executed again on the next-to-last -dimension. In this way, the dataptr is adjusted appropriately for -arbitrary striding. - -The coordinates member of the :ctype:`PyArrayIterObject` structure maintains -the current N-d counter unless the underlying array is C-contiguous in -which case the coordinate counting is by-passed. The index member of -the :ctype:`PyArrayIterObject` keeps track of the current flat index of the -iterator. It is updated by the :cfunc:`PyArray_ITER_NEXT` macro. - - -Broadcasting -============ - -.. index:: - single: broadcasting - -In Numeric, broadcasting was implemented in several lines of code -buried deep in ufuncobject.c. In NumPy, the notion of broadcasting has -been abstracted so that it can be performed in multiple places. -Broadcasting is handled by the function :cfunc:`PyArray_Broadcast`. This -function requires a :ctype:`PyArrayMultiIterObject` (or something that is a -binary equivalent) to be passed in. The :ctype:`PyArrayMultiIterObject` keeps -track of the broadcasted number of dimensions and size in each -dimension along with the total size of the broadcasted result. It also -keeps track of the number of arrays being broadcast and a pointer to -an iterator for each of the arrays being broadcasted. - -The :cfunc:`PyArray_Broadcast` function takes the iterators that have already -been defined and uses them to determine the broadcast shape in each -dimension (to create the iterators at the same time that broadcasting -occurs then use the :cfunc:`PyMultiIter_New` function). Then, the iterators are -adjusted so that each iterator thinks it is iterating over an array -with the broadcasted size. This is done by adjusting the iterators -number of dimensions, and the shape in each dimension. This works -because the iterator strides are also adjusted. Broadcasting only -adjusts (or adds) length-1 dimensions. For these dimensions, the -strides variable is simply set to 0 so that the data-pointer for the -iterator over that array doesn't move as the broadcasting operation -operates over the extended dimension. - -Broadcasting was always implemented in Numeric using 0-valued strides -for the extended dimensions. It is done in exactly the same way in -NumPy. The big difference is that now the array of strides is kept -track of in a :ctype:`PyArrayIterObject`, the iterators involved in a -broadcasted result are kept track of in a :ctype:`PyArrayMultiIterObject`, -and the :cfunc:`PyArray_BroadCast` call implements the broad-casting rules. - - -Array Scalars -============= - -.. index:: - single: array scalars - -The array scalars offer a hierarchy of Python types that allow a one- -to-one correspondence between the data-type stored in an array and the -Python-type that is returned when an element is extracted from the -array. An exception to this rule was made with object arrays. Object -arrays are heterogeneous collections of arbitrary Python objects. When -you select an item from an object array, you get back the original -Python object (and not an object array scalar which does exist but is -rarely used for practical purposes). - -The array scalars also offer the same methods and attributes as arrays -with the intent that the same code can be used to support arbitrary -dimensions (including 0-dimensions). The array scalars are read-only -(immutable) with the exception of the void scalar which can also be -written to so that record-array field setting works more naturally -(a[0]['f1'] = ``value`` ). - - -Advanced ("Fancy") Indexing -============================= - -.. index:: - single: indexing - -The implementation of advanced indexing represents some of the most -difficult code to write and explain. In fact, there are two -implementations of advanced indexing. The first works only with 1-D -arrays and is implemented to handle expressions involving a.flat[obj]. -The second is general-purpose that works for arrays of "arbitrary -dimension" (up to a fixed maximum). The one-dimensional indexing -approaches were implemented in a rather straightforward fashion, and -so it is the general-purpose indexing code that will be the focus of -this section. - -There is a multi-layer approach to indexing because the indexing code -can at times return an array scalar and at other times return an -array. The functions with "_nice" appended to their name do this -special handling while the function without the _nice appendage always -return an array (perhaps a 0-dimensional array). Some special-case -optimizations (the index being an integer scalar, and the index being -a tuple with as many dimensions as the array) are handled in -array_subscript_nice function which is what Python calls when -presented with the code "a[obj]." These optimizations allow fast -single-integer indexing, and also ensure that a 0-dimensional array is -not created only to be discarded as the array scalar is returned -instead. This provides significant speed-up for code that is selecting -many scalars out of an array (such as in a loop). However, it is still -not faster than simply using a list to store standard Python scalars, -because that is optimized by the Python interpreter itself. - -After these optimizations, the array_subscript function itself is -called. This function first checks for field selection which occurs -when a string is passed as the indexing object. Then, 0-D arrays are -given special-case consideration. Finally, the code determines whether -or not advanced, or fancy, indexing needs to be performed. If fancy -indexing is not needed, then standard view-based indexing is performed -using code borrowed from Numeric which parses the indexing object and -returns the offset into the data-buffer and the dimensions necessary -to create a new view of the array. The strides are also changed by -multiplying each stride by the step-size requested along the -corresponding dimension. - - -Fancy-indexing check --------------------- - -The fancy_indexing_check routine determines whether or not to use -standard view-based indexing or new copy-based indexing. If the -indexing object is a tuple, then view-based indexing is assumed by -default. Only if the tuple contains an array object or a sequence -object is fancy-indexing assumed. If the indexing object is an array, -then fancy indexing is automatically assumed. If the indexing object -is any other kind of sequence, then fancy-indexing is assumed by -default. This is over-ridden to simple indexing if the sequence -contains any slice, newaxis, or Ellipsis objects, and no arrays or -additional sequences are also contained in the sequence. The purpose -of this is to allow the construction of "slicing" sequences which is a -common technique for building up code that works in arbitrary numbers -of dimensions. - - -Fancy-indexing implementation ------------------------------ - -The concept of indexing was also abstracted using the idea of an -iterator. If fancy indexing is performed, then a :ctype:`PyArrayMapIterObject` -is created. This internal object is not exposed to Python. It is -created in order to handle the fancy-indexing at a high-level. Both -get and set fancy-indexing operations are implemented using this -object. Fancy indexing is abstracted into three separate operations: -(1) creating the :ctype:`PyArrayMapIterObject` from the indexing object, (2) -binding the :ctype:`PyArrayMapIterObject` to the array being indexed, and (3) -getting (or setting) the items determined by the indexing object. -There is an optimization implemented so that the :ctype:`PyArrayIterObject` -(which has it's own less complicated fancy-indexing) is used for -indexing when possible. - - -Creating the mapping object -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The first step is to convert the indexing objects into a standard form -where iterators are created for all of the index array inputs and all -Boolean arrays are converted to equivalent integer index arrays (as if -nonzero(arr) had been called). Finally, all integer arrays are -replaced with the integer 0 in the indexing object and all of the -index-array iterators are "broadcast" to the same shape. - - -Binding the mapping object -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When the mapping object is created it does not know which array it -will be used with so once the index iterators are constructed during -mapping-object creation, the next step is to associate these iterators -with a particular ndarray. This process interprets any ellipsis and -slice objects so that the index arrays are associated with the -appropriate axis (the axis indicated by the iteraxis entry -corresponding to the iterator for the integer index array). This -information is then used to check the indices to be sure they are -within range of the shape of the array being indexed. The presence of -ellipsis and/or slice objects implies a sub-space iteration that is -accomplished by extracting a sub-space view of the array (using the -index object resulting from replacing all the integer index arrays -with 0) and storing the information about where this sub-space starts -in the mapping object. This is used later during mapping-object -iteration to select the correct elements from the underlying array. - - -Getting (or Setting) -^^^^^^^^^^^^^^^^^^^^ - -After the mapping object is successfully bound to a particular array, -the mapping object contains the shape of the resulting item as well as -iterator objects that will walk through the currently-bound array and -either get or set its elements as needed. The walk is implemented -using the :cfunc:`PyArray_MapIterNext` function. This function sets the -coordinates of an iterator object into the current array to be the -next coordinate location indicated by all of the indexing-object -iterators while adjusting, if necessary, for the presence of a sub- -space. The result of this function is that the dataptr member of the -mapping object structure is pointed to the next position in the array -that needs to be copied out or set to some value. - -When advanced indexing is used to extract an array, an iterator for -the new array is constructed and advanced in phase with the mapping -object iterator. When advanced indexing is used to place values in an -array, a special "broadcasted" iterator is constructed from the object -being placed into the array so that it will only work if the values -used for setting have a shape that is "broadcastable" to the shape -implied by the indexing object. - - -Universal Functions -=================== - -.. index:: - single: ufunc - -Universal functions are callable objects that take :math:`N` inputs -and produce :math:`M` outputs by wrapping basic 1-D loops that work -element-by-element into full easy-to use functions that seamlessly -implement broadcasting, type-checking and buffered coercion, and -output-argument handling. New universal functions are normally created -in C, although there is a mechanism for creating ufuncs from Python -functions (:func:`frompyfunc`). The user must supply a 1-D loop that -implements the basic function taking the input scalar values and -placing the resulting scalars into the appropriate output slots as -explaine n implementation. - - -Setup ------ - -Every ufunc calculation involves some overhead related to setting up -the calculation. The practical significance of this overhead is that -even though the actual calculation of the ufunc is very fast, you will -be able to write array and type-specific code that will work faster -for small arrays than the ufunc. In particular, using ufuncs to -perform many calculations on 0-D arrays will be slower than other -Python-based solutions (the silently-imported scalarmath module exists -precisely to give array scalars the look-and-feel of ufunc-based -calculations with significantly reduced overhead). - -When a ufunc is called, many things must be done. The information -collected from these setup operations is stored in a loop-object. This -loop object is a C-structure (that could become a Python object but is -not initialized as such because it is only used internally). This loop -object has the layout needed to be used with PyArray_Broadcast so that -the broadcasting can be handled in the same way as it is handled in -other sections of code. - -The first thing done is to look-up in the thread-specific global -dictionary the current values for the buffer-size, the error mask, and -the associated error object. The state of the error mask controls what -happens when an error-condiction is found. It should be noted that -checking of the hardware error flags is only performed after each 1-D -loop is executed. This means that if the input and output arrays are -contiguous and of the correct type so that a single 1-D loop is -performed, then the flags may not be checked until all elements of the -array have been calcluated. Looking up these values in a thread- -specific dictionary takes time which is easily ignored for all but -very small arrays. - -After checking, the thread-specific global variables, the inputs are -evaluated to determine how the ufunc should proceed and the input and -output arrays are constructed if necessary. Any inputs which are not -arrays are converted to arrays (using context if necessary). Which of -the inputs are scalars (and therefore converted to 0-D arrays) is -noted. - -Next, an appropriate 1-D loop is selected from the 1-D loops available -to the ufunc based on the input array types. This 1-D loop is selected -by trying to match the signature of the data-types of the inputs -against the available signatures. The signatures corresponding to -built-in types are stored in the types member of the ufunc structure. -The signatures corresponding to user-defined types are stored in a -linked-list of function-information with the head element stored as a -``CObject`` in the userloops dictionary keyed by the data-type number -(the first user-defined type in the argument list is used as the key). -The signatures are searched until a signature is found to which the -input arrays can all be cast safely (ignoring any scalar arguments -which are not allowed to determine the type of the result). The -implication of this search procedure is that "lesser types" should be -placed below "larger types" when the signatures are stored. If no 1-D -loop is found, then an error is reported. Otherwise, the argument_list -is updated with the stored signature --- in case casting is necessary -and to fix the output types assumed by the 1-D loop. - -If the ufunc has 2 inputs and 1 output and the second input is an -Object array then a special-case check is performed so that -NotImplemented is returned if the second input is not an ndarray, has -the __array_priority\__ attribute, and has an __r{op}\__ special -method. In this way, Python is signaled to give the other object a -chance to complete the operation instead of using generic object-array -calculations. This allows (for example) sparse matrices to override -the multiplication operator 1-D loop. - -For input arrays that are smaller than the specified buffer size, -copies are made of all non-contiguous, mis-aligned, or out-of- -byteorder arrays to ensure that for small arrays, a single-loop is -used. Then, array iterators are created for all the input arrays and -the resulting collection of iterators is broadcast to a single shape. - -The output arguments (if any) are then processed and any missing -return arrays are constructed. If any provided output array doesn't -have the correct type (or is mis-aligned) and is smaller than the -buffer size, then a new output array is constructed with the special -UPDATEIFCOPY flag set so that when it is DECREF'd on completion of the -function, it's contents will be copied back into the output array. -Iterators for the output arguments are then processed. - -Finally, the decision is made about how to execute the looping -mechanism to ensure that all elements of the input arrays are combined -to produce the output arrays of the correct type. The options for loop -execution are one-loop (for contiguous, aligned, and correct data- -type), strided-loop (for non-contiguous but still aligned and correct -data-type), and a buffered loop (for mis-aligned or incorrect data- -type situations). Depending on which execution method is called for, -the loop is then setup and computed. - - -Function call -------------- - -This section describes how the basic universal function computation -loop is setup and executed for each of the three different kinds of -execution possibilities. If :cdata:`NPY_ALLOW_THREADS` is defined during -compilation, then the Python Global Interpreter Lock (GIL) is released -prior to calling all of these loops (as long as they don't involve -object arrays). It is re-acquired if necessary to handle error -conditions. The hardware error flags are checked only after the 1-D -loop is calcluated. - - -One Loop -^^^^^^^^ - -This is the simplest case of all. The ufunc is executed by calling the -underlying 1-D loop exactly once. This is possible only when we have -aligned data of the correct type (including byte-order) for both input -and output and all arrays have uniform strides (either contiguous, -0-D, or 1-D). In this case, the 1-D computational loop is called once -to compute the calculation for the entire array. Note that the -hardware error flags are only checked after the entire calculation is -complete. - - -Strided Loop -^^^^^^^^^^^^ - -When the input and output arrays are aligned and of the correct type, -but the striding is not uniform (non-contiguous and 2-D or larger), -then a second looping structure is employed for the calculation. This -approach converts all of the iterators for the input and output -arguments to iterate over all but the largest dimension. The inner -loop is then handled by the underlying 1-D computational loop. The -outer loop is a standard iterator loop on the converted iterators. The -hardware error flags are checked after each 1-D loop is completed. - - -Buffered Loop -^^^^^^^^^^^^^ - -This is the code that handles the situation whenever the input and/or -output arrays are either misaligned or of the wrong data-type -(including being byte-swapped) from what the underlying 1-D loop -expects. The arrays are also assumed to be non-contiguous. The code -works very much like the strided loop except for the inner 1-D loop is -modified so that pre-processing is performed on the inputs and post- -processing is performed on the outputs in bufsize chunks (where -bufsize is a user-settable parameter). The underlying 1-D -computational loop is called on data that is copied over (if it needs -to be). The setup code and the loop code is considerably more -complicated in this case because it has to handle: - -- memory allocation of the temporary buffers - -- deciding whether or not to use buffers on the input and output data - (mis-aligned and/or wrong data-type) - -- copying and possibly casting data for any inputs or outputs for which - buffers are necessary. - -- special-casing Object arrays so that reference counts are properly - handled when copies and/or casts are necessary. - -- breaking up the inner 1-D loop into bufsize chunks (with a possible - remainder). - -Again, the hardware error flags are checked at the end of each 1-D -loop. - - -Final output manipulation -------------------------- - -Ufuncs allow other array-like classes to be passed seamlessly through -the interface in that inputs of a particular class will induce the -outputs to be of that same class. The mechanism by which this works is -the following. If any of the inputs are not ndarrays and define the -:obj:`__array_wrap__` method, then the class with the largest -:obj:`__array_priority__` attribute determines the type of all the -outputs (with the exception of any output arrays passed in). The -:obj:`__array_wrap__` method of the input array will be called with the -ndarray being returned from the ufunc as it's input. There are two -calling styles of the :obj:`__array_wrap__` function supported. The first -takes the ndarray as the first argument and a tuple of "context" as -the second argument. The context is (ufunc, arguments, output argument -number). This is the first call tried. If a TypeError occurs, then the -function is called with just the ndarray as the first argument. - - -Methods -------- - -Their are three methods of ufuncs that require calculation similar to -the general-purpose ufuncs. These are reduce, accumulate, and -reduceat. Each of these methods requires a setup command followed by a -loop. There are four loop styles possible for the methods -corresponding to no-elements, one-element, strided-loop, and buffered- -loop. These are the same basic loop styles as implemented for the -general purpose function call except for the no-element and one- -element cases which are special-cases occurring when the input array -objects have 0 and 1 elements respectively. - - -Setup -^^^^^ - -The setup function for all three methods is ``construct_reduce``. -This function creates a reducing loop object and fills it with -parameters needed to complete the loop. All of the methods only work -on ufuncs that take 2-inputs and return 1 output. Therefore, the -underlying 1-D loop is selected assuming a signature of [ ``otype``, -``otype``, ``otype`` ] where ``otype`` is the requested reduction -data-type. The buffer size and error handling is then retrieved from -(per-thread) global storage. For small arrays that are mis-aligned or -have incorrect data-type, a copy is made so that the un-buffered -section of code is used. Then, the looping strategy is selected. If -there is 1 element or 0 elements in the array, then a simple looping -method is selected. If the array is not mis-aligned and has the -correct data-type, then strided looping is selected. Otherwise, -buffered looping must be performed. Looping parameters are then -established, and the return array is constructed. The output array is -of a different shape depending on whether the method is reduce, -accumulate, or reduceat. If an output array is already provided, then -it's shape is checked. If the output array is not C-contiguous, -aligned, and of the correct data type, then a temporary copy is made -with the UPDATEIFCOPY flag set. In this way, the methods will be able -to work with a well-behaved output array but the result will be copied -back into the true output array when the method computation is -complete. Finally, iterators are set up to loop over the correct axis -(depending on the value of axis provided to the method) and the setup -routine returns to the actual computation routine. - - -Reduce -^^^^^^ - -.. index:: - triple: ufunc; methods; reduce - -All of the ufunc methods use the same underlying 1-D computational -loops with input and output arguments adjusted so that the appropriate -reduction takes place. For example, the key to the functioning of -reduce is that the 1-D loop is called with the output and the second -input pointing to the same position in memory and both having a step- -size of 0. The first input is pointing to the input array with a step- -size given by the appropriate stride for the selected axis. In this -way, the operation performed is - -.. math:: - :nowrap: - - \begin{align*} - o & = & i[0] \\ - o & = & i[k]\textrm{}o\quad k=1\ldots N - \end{align*} - -where :math:`N+1` is the number of elements in the input, :math:`i`, -:math:`o` is the output, and :math:`i[k]` is the -:math:`k^{\textrm{th}}` element of :math:`i` along the selected axis. -This basic operations is repeated for arrays with greater than 1 -dimension so that the reduction takes place for every 1-D sub-array -along the selected axis. An iterator with the selected dimension -removed handles this looping. - -For buffered loops, care must be taken to copy and cast data before -the loop function is called because the underlying loop expects -aligned data of the correct data-type (including byte-order). The -buffered loop must handle this copying and casting prior to calling -the loop function on chunks no greater than the user-specified -bufsize. - - -Accumulate -^^^^^^^^^^ - -.. index:: - triple: ufunc; methods; accumulate - -The accumulate function is very similar to the reduce function in that -the output and the second input both point to the output. The -difference is that the second input points to memory one stride behind -the current output pointer. Thus, the operation performed is - -.. math:: - :nowrap: - - \begin{align*} - o[0] & = & i[0] \\ - o[k] & = & i[k]\textrm{}o[k-1]\quad k=1\ldots N. - \end{align*} - -The output has the same shape as the input and each 1-D loop operates -over :math:`N` elements when the shape in the selected axis is :math:`N+1`. -Again, buffered loops take care to copy and cast the data before -calling the underlying 1-D computational loop. - - -Reduceat -^^^^^^^^ - -.. index:: - triple: ufunc; methods; reduceat - single: ufunc - -The reduceat function is a generalization of both the reduce and -accumulate functions. It implements a reduce over ranges of the input -array specified by indices. The extra indices argument is checked to -be sure that every input is not too large for the input array along -the selected dimension before the loop calculations take place. The -loop implementation is handled using code that is very similar to the -reduce code repeated as many times as there are elements in the -indices input. In particular: the first input pointer passed to the -underlying 1-D computational loop points to the input array at the -correct location indicated by the index array. In addition, the output -pointer and the second input pointer passed to the underlying 1-D loop -point to the same position in memory. The size of the 1-D -computational loop is fixed to be the difference between the current -index and the next index (when the current index is the last index, -then the next index is assumed to be the length of the array along the -selected dimension). In this way, the 1-D loop will implement a reduce -over the specified indices. - -Mis-aligned or a loop data-type that does not match the input and/or -output data-type is handled using buffered code where-in data is -copied to a temporary buffer and cast to the correct data-type if -necessary prior to calling the underlying 1-D function. The temporary -buffers are created in (element) sizes no bigger than the user -settable buffer-size value. Thus, the loop must be flexible enough to -call the underlying 1-D computational loop enough times to complete -the total calculation in chunks no bigger than the buffer-size. diff --git a/numpy-1.6.2/doc/source/reference/internals.rst b/numpy-1.6.2/doc/source/reference/internals.rst deleted file mode 100644 index c9716813d1..0000000000 --- a/numpy-1.6.2/doc/source/reference/internals.rst +++ /dev/null @@ -1,9 +0,0 @@ -*************** -Numpy internals -*************** - -.. toctree:: - - internals.code-explanations - -.. automodule:: numpy.doc.internals diff --git a/numpy-1.6.2/doc/source/reference/maskedarray.baseclass.rst b/numpy-1.6.2/doc/source/reference/maskedarray.baseclass.rst deleted file mode 100644 index fd1fd7ae61..0000000000 --- a/numpy-1.6.2/doc/source/reference/maskedarray.baseclass.rst +++ /dev/null @@ -1,462 +0,0 @@ -.. currentmodule:: numpy.ma - - -.. _numpy.ma.constants: - -Constants of the :mod:`numpy.ma` module -======================================= - -In addition to the :class:`MaskedArray` class, the :mod:`numpy.ma` module -defines several constants. - -.. data:: masked - - The :attr:`masked` constant is a special case of :class:`MaskedArray`, - with a float datatype and a null shape. It is used to test whether a - specific entry of a masked array is masked, or to mask one or several - entries of a masked array:: - - >>> x = ma.array([1, 2, 3], mask=[0, 1, 0]) - >>> x[1] is ma.masked - True - >>> x[-1] = ma.masked - >>> x - masked_array(data = [1 -- --], - mask = [False True True], - fill_value = 999999) - - -.. data:: nomask - - Value indicating that a masked array has no invalid entry. - :attr:`nomask` is used internally to speed up computations when the mask - is not needed. - - -.. data:: masked_print_options - - String used in lieu of missing data when a masked array is printed. - By default, this string is ``'--'``. - - - - -.. _maskedarray.baseclass: - -The :class:`MaskedArray` class -============================== - - -.. class:: MaskedArray - - A subclass of :class:`~numpy.ndarray` designed to manipulate numerical arrays with missing data. - - - - An instance of :class:`MaskedArray` can be thought as the combination of several elements: - -* The :attr:`~MaskedArray.data`, as a regular :class:`numpy.ndarray` of any shape or datatype (the data). -* A boolean :attr:`~numpy.ma.MaskedArray.mask` with the same shape as the data, where a ``True`` value indicates that the corresponding element of the data is invalid. - The special value :const:`nomask` is also acceptable for arrays without named fields, and indicates that no data is invalid. -* A :attr:`~numpy.ma.MaskedArray.fill_value`, a value that may be used to replace the invalid entries in order to return a standard :class:`numpy.ndarray`. - - - -Attributes and properties of masked arrays ------------------------------------------- - -.. seealso:: :ref:`Array Attributes ` - - -.. attribute:: MaskedArray.data - - Returns the underlying data, as a view of the masked array. - If the underlying data is a subclass of :class:`numpy.ndarray`, it is - returned as such. - - >>> x = ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.data - matrix([[1, 2], - [3, 4]]) - - The type of the data can be accessed through the :attr:`baseclass` - attribute. - -.. attribute:: MaskedArray.mask - - Returns the underlying mask, as an array with the same shape and structure - as the data, but where all fields are atomically booleans. - A value of ``True`` indicates an invalid entry. - - -.. attribute:: MaskedArray.recordmask - - Returns the mask of the array if it has no named fields. For structured - arrays, returns a ndarray of booleans where entries are ``True`` if **all** - the fields are masked, ``False`` otherwise:: - - >>> x = ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], - ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], - ... dtype=[('a', int), ('b', int)]) - >>> x.recordmask - array([False, False, True, False, False], dtype=bool) - - -.. attribute:: MaskedArray.fill_value - - Returns the value used to fill the invalid entries of a masked array. - The value is either a scalar (if the masked array has no named fields), - or a 0-D ndarray with the same :attr:`dtype` as the masked array if it has - named fields. - - The default filling value depends on the datatype of the array: - - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== - - - -.. attribute:: MaskedArray.baseclass - - Returns the class of the underlying data. - - >>> x = ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 0], [1, 0]]) - >>> x.baseclass - - - -.. attribute:: MaskedArray.sharedmask - - Returns whether the mask of the array is shared between several masked arrays. - If this is the case, any modification to the mask of one array will be - propagated to the others. - - -.. attribute:: MaskedArray.hardmask - - Returns whether the mask is hard (``True``) or soft (``False``). - When the mask is hard, masked entries cannot be unmasked. - - -As :class:`MaskedArray` is a subclass of :class:`~numpy.ndarray`, a masked array also inherits all the attributes and properties of a :class:`~numpy.ndarray` instance. - -.. autosummary:: - :toctree: generated/ - - MaskedArray.base - MaskedArray.ctypes - MaskedArray.dtype - MaskedArray.flags - - MaskedArray.itemsize - MaskedArray.nbytes - MaskedArray.ndim - MaskedArray.shape - MaskedArray.size - MaskedArray.strides - - MaskedArray.imag - MaskedArray.real - - MaskedArray.flat - MaskedArray.__array_priority__ - - - -:class:`MaskedArray` methods -============================ - -.. seealso:: :ref:`Array methods ` - - -Conversion ----------- - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__float__ - MaskedArray.__hex__ - MaskedArray.__int__ - MaskedArray.__long__ - MaskedArray.__oct__ - - MaskedArray.view - MaskedArray.astype - MaskedArray.byteswap - - MaskedArray.compressed - MaskedArray.filled - MaskedArray.tofile - MaskedArray.toflex - MaskedArray.tolist - MaskedArray.torecords - MaskedArray.tostring - - -Shape manipulation ------------------- - -For reshape, resize, and transpose, the single tuple argument may be -replaced with ``n`` integers which will be interpreted as an n-tuple. - -.. autosummary:: - :toctree: generated/ - - MaskedArray.flatten - MaskedArray.ravel - MaskedArray.reshape - MaskedArray.resize - MaskedArray.squeeze - MaskedArray.swapaxes - MaskedArray.transpose - MaskedArray.T - - -Item selection and manipulation -------------------------------- - -For array methods that take an *axis* keyword, it defaults to `None`. -If axis is *None*, then the array is treated as a 1-D array. -Any other value for *axis* represents the dimension along which -the operation should proceed. - -.. autosummary:: - :toctree: generated/ - - MaskedArray.argmax - MaskedArray.argmin - MaskedArray.argsort - MaskedArray.choose - MaskedArray.compress - MaskedArray.diagonal - MaskedArray.fill - MaskedArray.item - MaskedArray.nonzero - MaskedArray.put - MaskedArray.repeat - MaskedArray.searchsorted - MaskedArray.sort - MaskedArray.take - - -Pickling and copy ------------------ - -.. autosummary:: - :toctree: generated/ - - MaskedArray.copy - MaskedArray.dump - MaskedArray.dumps - - -Calculations ------------- - -.. autosummary:: - :toctree: generated/ - - MaskedArray.all - MaskedArray.anom - MaskedArray.any - MaskedArray.clip - MaskedArray.conj - MaskedArray.conjugate - MaskedArray.cumprod - MaskedArray.cumsum - MaskedArray.max - MaskedArray.mean - MaskedArray.min - MaskedArray.prod - MaskedArray.product - MaskedArray.ptp - MaskedArray.round - MaskedArray.std - MaskedArray.sum - MaskedArray.trace - MaskedArray.var - - -Arithmetic and comparison operations ------------------------------------- - -.. index:: comparison, arithmetic, operation, operator - -Comparison operators: -~~~~~~~~~~~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__lt__ - MaskedArray.__le__ - MaskedArray.__gt__ - MaskedArray.__ge__ - MaskedArray.__eq__ - MaskedArray.__ne__ - -Truth value of an array (:func:`bool()`): -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__nonzero__ - - -Arithmetic: -~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__abs__ - MaskedArray.__add__ - MaskedArray.__radd__ - MaskedArray.__sub__ - MaskedArray.__rsub__ - MaskedArray.__mul__ - MaskedArray.__rmul__ - MaskedArray.__div__ - MaskedArray.__rdiv__ - MaskedArray.__truediv__ - MaskedArray.__rtruediv__ - MaskedArray.__floordiv__ - MaskedArray.__rfloordiv__ - MaskedArray.__mod__ - MaskedArray.__rmod__ - MaskedArray.__divmod__ - MaskedArray.__rdivmod__ - MaskedArray.__pow__ - MaskedArray.__rpow__ - MaskedArray.__lshift__ - MaskedArray.__rlshift__ - MaskedArray.__rshift__ - MaskedArray.__rrshift__ - MaskedArray.__and__ - MaskedArray.__rand__ - MaskedArray.__or__ - MaskedArray.__ror__ - MaskedArray.__xor__ - MaskedArray.__rxor__ - - -Arithmetic, in-place: -~~~~~~~~~~~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__iadd__ - MaskedArray.__isub__ - MaskedArray.__imul__ - MaskedArray.__idiv__ - MaskedArray.__itruediv__ - MaskedArray.__ifloordiv__ - MaskedArray.__imod__ - MaskedArray.__ipow__ - MaskedArray.__ilshift__ - MaskedArray.__irshift__ - MaskedArray.__iand__ - MaskedArray.__ior__ - MaskedArray.__ixor__ - - -Representation --------------- - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__repr__ - MaskedArray.__str__ - - MaskedArray.ids - MaskedArray.iscontiguous - - -Special methods ---------------- - -For standard library functions: - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__copy__ - MaskedArray.__deepcopy__ - MaskedArray.__getstate__ - MaskedArray.__reduce__ - MaskedArray.__setstate__ - -Basic customization: - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__new__ - MaskedArray.__array__ - MaskedArray.__array_wrap__ - -Container customization: (see :ref:`Indexing `) - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__len__ - MaskedArray.__getitem__ - MaskedArray.__setitem__ - MaskedArray.__delitem__ - MaskedArray.__getslice__ - MaskedArray.__setslice__ - MaskedArray.__contains__ - - - -Specific methods ----------------- - -Handling the mask -~~~~~~~~~~~~~~~~~ - -The following methods can be used to access information about the mask or to -manipulate the mask. - -.. autosummary:: - :toctree: generated/ - - MaskedArray.__setmask__ - - MaskedArray.harden_mask - MaskedArray.soften_mask - MaskedArray.unshare_mask - MaskedArray.shrink_mask - - -Handling the `fill_value` -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - MaskedArray.get_fill_value - MaskedArray.set_fill_value - - - -Counting the missing elements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - MaskedArray.count diff --git a/numpy-1.6.2/doc/source/reference/maskedarray.generic.rst b/numpy-1.6.2/doc/source/reference/maskedarray.generic.rst deleted file mode 100644 index f753a56f97..0000000000 --- a/numpy-1.6.2/doc/source/reference/maskedarray.generic.rst +++ /dev/null @@ -1,499 +0,0 @@ -.. currentmodule:: numpy.ma - -.. _maskedarray.generic: - - - -The :mod:`numpy.ma` module -========================== - -Rationale ---------- - -Masked arrays are arrays that may have missing or invalid entries. -The :mod:`numpy.ma` module provides a nearly work-alike replacement for numpy -that supports data arrays with masks. - - - -What is a masked array? ------------------------ - -In many circumstances, datasets can be incomplete or tainted by the presence -of invalid data. For example, a sensor may have failed to record a data, or -recorded an invalid value. The :mod:`numpy.ma` module provides a convenient -way to address this issue, by introducing masked arrays. - -A masked array is the combination of a standard :class:`numpy.ndarray` and a -mask. A mask is either :attr:`nomask`, indicating that no value of the -associated array is invalid, or an array of booleans that determines for each -element of the associated array whether the value is valid or not. When an -element of the mask is ``False``, the corresponding element of the associated -array is valid and is said to be unmasked. When an element of the mask is -``True``, the corresponding element of the associated array is said to be -masked (invalid). - -The package ensures that masked entries are not used in computations. - -As an illustration, let's consider the following dataset:: - - >>> import numpy as np - >>> import numpy.ma as ma - >>> x = np.array([1, 2, 3, -1, 5]) - -We wish to mark the fourth entry as invalid. The easiest is to create a masked -array:: - - >>> mx = ma.masked_array(x, mask=[0, 0, 0, 1, 0]) - -We can now compute the mean of the dataset, without taking the invalid data -into account:: - - >>> mx.mean() - 2.75 - - -The :mod:`numpy.ma` module --------------------------- - - -The main feature of the :mod:`numpy.ma` module is the :class:`MaskedArray` -class, which is a subclass of :class:`numpy.ndarray`. The class, its -attributes and methods are described in more details in the -:ref:`MaskedArray class ` section. - -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: :: - - >>> import numpy as np - >>> import numpy.ma as ma - -To create an array with the second element invalid, we would do:: - - >>> y = ma.array([1, 2, 3], mask = [0, 1, 0]) - -To create a masked array where all values close to 1.e20 are invalid, we would -do:: - - >>> z = masked_values([1.0, 1.e20, 3.0, 4.0], 1.e20) - -For a complete discussion of creation methods for masked arrays please see -section :ref:`Constructing masked arrays `. - - - - -Using numpy.ma -============== - -.. _maskedarray.generic.constructing: - -Constructing masked arrays --------------------------- - -There are several ways to construct a masked array. - -* A first possibility is to directly invoke the :class:`MaskedArray` class. - -* A second possibility is to use the two masked array constructors, - :func:`array` and :func:`masked_array`. - - .. autosummary:: - :toctree: generated/ - - array - masked_array - - -* A third option is to take the view of an existing array. In that case, the - mask of the view is set to :attr:`nomask` if the array has no named fields, - or an array of boolean with the same structure as the array otherwise. - - >>> x = np.array([1, 2, 3]) - >>> x.view(ma.MaskedArray) - masked_array(data = [1 2 3], - mask = False, - fill_value = 999999) - >>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)]) - >>> x.view(ma.MaskedArray) - masked_array(data = [(1, 1.0) (2, 2.0)], - mask = [(False, False) (False, False)], - fill_value = (999999, 1e+20), - dtype = [('a', '>> x = ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) - >>> x[~x.mask] - masked_array(data = [1 4], - mask = [False False], - fill_value = 999999) - -Another way to retrieve the valid data is to use the :meth:`compressed` -method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its -subclasses, depending on the value of the :attr:`~MaskedArray.baseclass` -attribute):: - - >>> x.compressed() - array([1, 4]) - -Note that the output of :meth:`compressed` is always 1D. - - - -Modifying the mask ------------------- - -Masking an entry -~~~~~~~~~~~~~~~~ - -The recommended way to mark one or several specific entries of a masked array -as invalid is to assign the special value :attr:`masked` to them:: - - >>> x = ma.array([1, 2, 3]) - >>> x[0] = ma.masked - >>> x - masked_array(data = [-- 2 3], - mask = [ True False False], - fill_value = 999999) - >>> y = ma.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - >>> y[(0, 1, 2), (1, 2, 0)] = ma.masked - >>> y - masked_array(data = - [[1 -- 3] - [4 5 --] - [-- 8 9]], - mask = - [[False True False] - [False False True] - [ True False False]], - fill_value = 999999) - >>> z = ma.array([1, 2, 3, 4]) - >>> z[:-2] = ma.masked - >>> z - masked_array(data = [-- -- 3 4], - mask = [ True True False False], - fill_value = 999999) - - -A second possibility is to modify the :attr:`~MaskedArray.mask` directly, -but this usage is discouraged. - -.. note:: - When creating a new masked array with a simple, non-structured datatype, - the mask is initially set to the special value :attr:`nomask`, that - corresponds roughly to the boolean ``False``. Trying to set an element of - :attr:`nomask` will fail with a :exc:`TypeError` exception, as a boolean - does not support item assignment. - - -All the entries of an array can be masked at once by assigning ``True`` to the -mask:: - - >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) - >>> x.mask = True - >>> x - masked_array(data = [-- -- --], - mask = [ True True True], - fill_value = 999999) - -Finally, specific entries can be masked and/or unmasked by assigning to the -mask a sequence of booleans:: - - >>> x = ma.array([1, 2, 3]) - >>> x.mask = [0, 1, 0] - >>> x - masked_array(data = [1 -- 3], - mask = [False True False], - fill_value = 999999) - -Unmasking an entry -~~~~~~~~~~~~~~~~~~ - -To unmask one or several specific entries, we can just assign one or several -new valid values to them:: - - >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) - >>> x - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) - >>> x[-1] = 5 - >>> x - masked_array(data = [1 2 5], - mask = [False False False], - fill_value = 999999) - -.. note:: - Unmasking an entry by direct assignment will silently fail if the masked - array has a *hard* mask, as shown by the :attr:`hardmask` attribute. This - feature was introduced to prevent overwriting the mask. To force the - unmasking of an entry where the array has a hard mask, the mask must first - to be softened using the :meth:`soften_mask` method before the allocation. - It can be re-hardened with :meth:`harden_mask`:: - - >>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True) - >>> x - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) - >>> x[-1] = 5 - >>> x - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) - >>> x.soften_mask() - >>> x[-1] = 5 - >>> x - masked_array(data = [1 2 5], - mask = [False False False], - fill_value = 999999) - >>> x.harden_mask() - - -To unmask all masked entries of a masked array (provided the mask isn't a hard -mask), the simplest solution is to assign the constant :attr:`nomask` to the -mask:: - - >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) - >>> x - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) - >>> x.mask = ma.nomask - >>> x - masked_array(data = [1 2 3], - mask = [False False False], - fill_value = 999999) - - - -Indexing and slicing --------------------- - -As a :class:`MaskedArray` is a subclass of :class:`numpy.ndarray`, it inherits -its mechanisms for indexing and slicing. - -When accessing a single entry of a masked array with no named fields, the -output is either a scalar (if the corresponding entry of the mask is -``False``) or the special value :attr:`masked` (if the corresponding entry of -the mask is ``True``):: - - >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) - >>> x[0] - 1 - >>> x[-1] - masked_array(data = --, - mask = True, - fill_value = 1e+20) - >>> x[-1] is ma.masked - True - -If the masked array has named fields, accessing a single entry returns a -:class:`numpy.void` object if none of the fields are masked, or a 0d masked -array with the same dtype as the initial array if at least one of the fields -is masked. - - >>> y = ma.masked_array([(1,2), (3, 4)], - ... mask=[(0, 0), (0, 1)], - ... dtype=[('a', int), ('b', int)]) - >>> y[0] - (1, 2) - >>> y[-1] - masked_array(data = (3, --), - mask = (False, True), - fill_value = (999999, 999999), - dtype = [('a', '>> x = ma.array([1, 2, 3, 4, 5], mask=[0, 1, 0, 0, 1]) - >>> mx = x[:3] - >>> mx - masked_array(data = [1 -- 3], - mask = [False True False], - fill_value = 999999) - >>> mx[1] = -1 - >>> mx - masked_array(data = [1 -1 3], - mask = [False True False], - fill_value = 999999) - >>> x.mask - array([False, True, False, False, True], dtype=bool) - >>> x.data - array([ 1, -1, 3, 4, 5]) - - -Accessing a field of a masked array with structured datatype returns a -:class:`MaskedArray`. - -Operations on masked arrays ---------------------------- - -Arithmetic and comparison operations are supported by masked arrays. -As much as possible, invalid entries of a masked array are not processed, -meaning that the corresponding :attr:`data` entries *should* be the same -before and after the operation. - -.. warning:: - We need to stress that this behavior may not be systematic, that masked - data may be affected by the operation in some cases and therefore users - should not rely on this data remaining unchanged. - -The :mod:`numpy.ma` module comes with a specific implementation of most -ufuncs. Unary and binary functions that have a validity domain (such as -:func:`~numpy.log` or :func:`~numpy.divide`) return the :data:`masked` -constant whenever the input is masked or falls outside the validity domain:: - - >>> ma.log([-1, 0, 1, 2]) - masked_array(data = [-- -- 0.0 0.69314718056], - mask = [ True True False False], - fill_value = 1e+20) - -Masked arrays also support standard numpy ufuncs. The output is then a masked -array. The result of a unary ufunc is masked wherever the input is masked. The -result of a binary ufunc is masked wherever any of the input is masked. If the -ufunc also returns the optional context output (a 3-element tuple containing -the name of the ufunc, its arguments and its domain), the context is processed -and entries of the output masked array are masked wherever the corresponding -input fall outside the validity domain:: - - >>> x = ma.array([-1, 1, 0, 2, 3], mask=[0, 0, 0, 0, 1]) - >>> np.log(x) - masked_array(data = [-- -- 0.0 0.69314718056 --], - mask = [ True True False False True], - fill_value = 1e+20) - - - -Examples -======== - -Data with a given value representing missing data -------------------------------------------------- - -Let's consider a list of elements, ``x``, where values of -9999. represent -missing data. We wish to compute the average value of the data and the vector -of anomalies (deviations from the average):: - - >>> import numpy.ma as ma - >>> x = [0.,1.,-9999.,3.,4.] - >>> mx = ma.masked_values (x, -9999.) - >>> print mx.mean() - 2.0 - >>> print mx - mx.mean() - [-2.0 -1.0 -- 1.0 2.0] - >>> print mx.anom() - [-2.0 -1.0 -- 1.0 2.0] - - -Filling in the missing data ---------------------------- - -Suppose now that we wish to print that same data, but with the missing values -replaced by the average value. - - >>> print mx.filled(mx.mean()) - [ 0. 1. 2. 3. 4.] - - -Numerical operations --------------------- - -Numerical operations can be easily performed without worrying about missing -values, dividing by zero, square roots of negative numbers, etc.:: - - >>> import numpy as np, numpy.ma as ma - >>> x = ma.array([1., -1., 3., 4., 5., 6.], mask=[0,0,0,0,1,0]) - >>> y = ma.array([1., 2., 0., 4., 5., 6.], mask=[0,0,0,0,0,1]) - >>> print np.sqrt(x/y) - [1.0 -- -- 1.0 -- --] - -Four values of the output are invalid: the first one comes from taking the -square root of a negative number, the second from the division by zero, and -the last two where the inputs were masked. - - -Ignoring extreme values ------------------------ - -Let's consider an array ``d`` of random floats between 0 and 1. We wish to -compute the average of the values of ``d`` while ignoring any data outside -the range ``[0.1, 0.9]``:: - - >>> print ma.masked_outside(d, 0.1, 0.9).mean() diff --git a/numpy-1.6.2/doc/source/reference/maskedarray.rst b/numpy-1.6.2/doc/source/reference/maskedarray.rst deleted file mode 100644 index c2deb3ba19..0000000000 --- a/numpy-1.6.2/doc/source/reference/maskedarray.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _maskedarray: - -************* -Masked arrays -************* - -Masked arrays are arrays that may have missing or invalid entries. -The :mod:`numpy.ma` module provides a nearly work-alike replacement for numpy -that supports data arrays with masks. - -.. index:: - single: masked arrays - -.. toctree:: - :maxdepth: 2 - - maskedarray.generic - maskedarray.baseclass - routines.ma diff --git a/numpy-1.6.2/doc/source/reference/routines.array-creation.rst b/numpy-1.6.2/doc/source/reference/routines.array-creation.rst deleted file mode 100644 index 23b35243b2..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.array-creation.rst +++ /dev/null @@ -1,103 +0,0 @@ -.. _routines.array-creation: - -Array creation routines -======================= - -.. seealso:: :ref:`Array creation ` - -.. currentmodule:: numpy - -Ones and zeros --------------- -.. autosummary:: - :toctree: generated/ - - empty - empty_like - eye - identity - ones - ones_like - zeros - zeros_like - -From existing data ------------------- -.. autosummary:: - :toctree: generated/ - - array - asarray - asanyarray - ascontiguousarray - asmatrix - copy - frombuffer - fromfile - fromfunction - fromiter - fromstring - loadtxt - -.. _routines.array-creation.rec: - -Creating record arrays (:mod:`numpy.rec`) ------------------------------------------ - -.. note:: :mod:`numpy.rec` is the preferred alias for - :mod:`numpy.core.records`. - -.. autosummary:: - :toctree: generated/ - - core.records.array - core.records.fromarrays - core.records.fromrecords - core.records.fromstring - core.records.fromfile - -.. _routines.array-creation.char: - -Creating character arrays (:mod:`numpy.char`) ---------------------------------------------- - -.. note:: :mod:`numpy.char` is the preferred alias for - :mod:`numpy.core.defchararray`. - -.. autosummary:: - :toctree: generated/ - - core.defchararray.array - core.defchararray.asarray - -Numerical ranges ----------------- -.. autosummary:: - :toctree: generated/ - - arange - linspace - logspace - meshgrid - mgrid - ogrid - -Building matrices ------------------ -.. autosummary:: - :toctree: generated/ - - diag - diagflat - tri - tril - triu - vander - -The Matrix class ----------------- -.. autosummary:: - :toctree: generated/ - - mat - bmat diff --git a/numpy-1.6.2/doc/source/reference/routines.array-manipulation.rst b/numpy-1.6.2/doc/source/reference/routines.array-manipulation.rst deleted file mode 100644 index 2c1a5b2006..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.array-manipulation.rst +++ /dev/null @@ -1,104 +0,0 @@ -Array manipulation routines -*************************** - -.. currentmodule:: numpy - -Changing array shape -==================== -.. autosummary:: - :toctree: generated/ - - - reshape - ravel - ndarray.flat - ndarray.flatten - -Transpose-like operations -========================= -.. autosummary:: - :toctree: generated/ - - rollaxis - swapaxes - ndarray.T - transpose - -Changing number of dimensions -============================= -.. autosummary:: - :toctree: generated/ - - atleast_1d - atleast_2d - atleast_3d - broadcast - broadcast_arrays - expand_dims - squeeze - -Changing kind of array -====================== -.. autosummary:: - :toctree: generated/ - - asarray - asanyarray - asmatrix - asfarray - asfortranarray - asscalar - require - -Joining arrays -============== -.. autosummary:: - :toctree: generated/ - - column_stack - concatenate - dstack - hstack - vstack - -Splitting arrays -================ -.. autosummary:: - :toctree: generated/ - - array_split - dsplit - hsplit - split - vsplit - -Tiling arrays -============= -.. autosummary:: - :toctree: generated/ - - tile - repeat - -Adding and removing elements -============================ -.. autosummary:: - :toctree: generated/ - - delete - insert - append - resize - trim_zeros - unique - -Rearranging elements -==================== -.. autosummary:: - :toctree: generated/ - - fliplr - flipud - reshape - roll - rot90 diff --git a/numpy-1.6.2/doc/source/reference/routines.bitwise.rst b/numpy-1.6.2/doc/source/reference/routines.bitwise.rst deleted file mode 100644 index 58661abc72..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.bitwise.rst +++ /dev/null @@ -1,31 +0,0 @@ -Binary operations -***************** - -.. currentmodule:: numpy - -Elementwise bit operations --------------------------- -.. autosummary:: - :toctree: generated/ - - bitwise_and - bitwise_or - bitwise_xor - invert - left_shift - right_shift - -Bit packing ------------ -.. autosummary:: - :toctree: generated/ - - packbits - unpackbits - -Output formatting ------------------ -.. autosummary:: - :toctree: generated/ - - binary_repr diff --git a/numpy-1.6.2/doc/source/reference/routines.char.rst b/numpy-1.6.2/doc/source/reference/routines.char.rst deleted file mode 100644 index 41af947c86..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.char.rst +++ /dev/null @@ -1,87 +0,0 @@ -String operations -***************** - -.. currentmodule:: numpy.core.defchararray - -This module provides a set of vectorized string operations for arrays -of type `numpy.string_` or `numpy.unicode_`. All of them are based on -the string methods in the Python standard library. - -String operations ------------------ - -.. autosummary:: - :toctree: generated/ - - add - multiply - mod - capitalize - center - decode - encode - join - ljust - lower - lstrip - partition - replace - rjust - rpartition - rsplit - rstrip - split - splitlines - strip - swapcase - title - translate - upper - zfill - -Comparison ----------- - -Unlike the standard numpy comparison operators, the ones in the `char` -module strip trailing whitespace characters before performing the -comparison. - -.. autosummary:: - :toctree: generated/ - - equal - not_equal - greater_equal - less_equal - greater - less - -String information ------------------- - -.. autosummary:: - :toctree: generated/ - - count - find - index - isalpha - isdecimal - isdigit - islower - isnumeric - isspace - istitle - isupper - rfind - rindex - startswith - -Convenience class ------------------ - -.. autosummary:: - :toctree: generated/ - - chararray - diff --git a/numpy-1.6.2/doc/source/reference/routines.ctypeslib.rst b/numpy-1.6.2/doc/source/reference/routines.ctypeslib.rst deleted file mode 100644 index b04713b61b..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.ctypeslib.rst +++ /dev/null @@ -1,11 +0,0 @@ -*********************************************************** -C-Types Foreign Function Interface (:mod:`numpy.ctypeslib`) -*********************************************************** - -.. currentmodule:: numpy.ctypeslib - -.. autofunction:: as_array -.. autofunction:: as_ctypes -.. autofunction:: ctypes_load_library -.. autofunction:: load_library -.. autofunction:: ndpointer diff --git a/numpy-1.6.2/doc/source/reference/routines.dtype.rst b/numpy-1.6.2/doc/source/reference/routines.dtype.rst deleted file mode 100644 index ec8d2981d6..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.dtype.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. _routines.dtype: - -Data type routines -================== - -.. currentmodule:: numpy - -.. autosummary:: - :toctree: generated/ - - can_cast - promote_types - min_scalar_type - result_type - common_type - obj2sctype - -Creating data types -------------------- - -.. autosummary:: - :toctree: generated/ - - - dtype - format_parser - -Data type information ---------------------- -.. autosummary:: - :toctree: generated/ - - finfo - iinfo - MachAr - -Data type testing ------------------ -.. autosummary:: - :toctree: generated/ - - issctype - issubdtype - issubsctype - issubclass_ - find_common_type - -Miscellaneous -------------- -.. autosummary:: - :toctree: generated/ - - typename - sctype2char - mintypecode diff --git a/numpy-1.6.2/doc/source/reference/routines.dual.rst b/numpy-1.6.2/doc/source/reference/routines.dual.rst deleted file mode 100644 index 456fc5c027..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.dual.rst +++ /dev/null @@ -1,48 +0,0 @@ -Optionally Scipy-accelerated routines (:mod:`numpy.dual`) -********************************************************* - -.. automodule:: numpy.dual - -Linear algebra --------------- - -.. currentmodule:: numpy.linalg - -.. autosummary:: - - cholesky - det - eig - eigh - eigvals - eigvalsh - inv - lstsq - norm - pinv - solve - svd - -FFT ---- - -.. currentmodule:: numpy.fft - -.. autosummary:: - - fft - fft2 - fftn - ifft - ifft2 - ifftn - -Other ------ - -.. currentmodule:: numpy - -.. autosummary:: - - i0 - diff --git a/numpy-1.6.2/doc/source/reference/routines.emath.rst b/numpy-1.6.2/doc/source/reference/routines.emath.rst deleted file mode 100644 index 9f6c2aaa77..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.emath.rst +++ /dev/null @@ -1,10 +0,0 @@ -Mathematical functions with automatic domain (:mod:`numpy.emath`) -*********************************************************************** - -.. currentmodule:: numpy - -.. note:: :mod:`numpy.emath` is a preferred alias for :mod:`numpy.lib.scimath`, - available after :mod:`numpy` is imported. - -.. automodule:: numpy.lib.scimath - diff --git a/numpy-1.6.2/doc/source/reference/routines.err.rst b/numpy-1.6.2/doc/source/reference/routines.err.rst deleted file mode 100644 index b3a7164b98..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.err.rst +++ /dev/null @@ -1,25 +0,0 @@ -Floating point error handling -***************************** - -.. currentmodule:: numpy - -Setting and getting error handling ----------------------------------- - -.. autosummary:: - :toctree: generated/ - - seterr - geterr - seterrcall - geterrcall - errstate - -Internal functions ------------------- - -.. autosummary:: - :toctree: generated/ - - seterrobj - geterrobj diff --git a/numpy-1.6.2/doc/source/reference/routines.fft.rst b/numpy-1.6.2/doc/source/reference/routines.fft.rst deleted file mode 100644 index 6c47925eee..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.fft.rst +++ /dev/null @@ -1,2 +0,0 @@ -.. _routines.fft: -.. automodule:: numpy.fft diff --git a/numpy-1.6.2/doc/source/reference/routines.financial.rst b/numpy-1.6.2/doc/source/reference/routines.financial.rst deleted file mode 100644 index 5f426d7abf..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.financial.rst +++ /dev/null @@ -1,21 +0,0 @@ -Financial functions -******************* - -.. currentmodule:: numpy - -Simple financial functions --------------------------- - -.. autosummary:: - :toctree: generated/ - - fv - pv - npv - pmt - ppmt - ipmt - irr - mirr - nper - rate diff --git a/numpy-1.6.2/doc/source/reference/routines.functional.rst b/numpy-1.6.2/doc/source/reference/routines.functional.rst deleted file mode 100644 index e4aababddc..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.functional.rst +++ /dev/null @@ -1,13 +0,0 @@ -Functional programming -********************** - -.. currentmodule:: numpy - -.. autosummary:: - :toctree: generated/ - - apply_along_axis - apply_over_axes - vectorize - frompyfunc - piecewise diff --git a/numpy-1.6.2/doc/source/reference/routines.help.rst b/numpy-1.6.2/doc/source/reference/routines.help.rst deleted file mode 100644 index a41563ccea..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.help.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _routines.help: - -Numpy-specific help functions -============================= - -.. currentmodule:: numpy - -Finding help ------------- - -.. autosummary:: - :toctree: generated/ - - lookfor - - -Reading help ------------- - -.. autosummary:: - :toctree: generated/ - - info - source diff --git a/numpy-1.6.2/doc/source/reference/routines.indexing.rst b/numpy-1.6.2/doc/source/reference/routines.indexing.rst deleted file mode 100644 index 853d24126c..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.indexing.rst +++ /dev/null @@ -1,63 +0,0 @@ -.. _routines.indexing: - -Indexing routines -================= - -.. seealso:: :ref:`Indexing ` - -.. currentmodule:: numpy - -Generating index arrays ------------------------ -.. autosummary:: - :toctree: generated/ - - c_ - r_ - s_ - nonzero - where - indices - ix_ - ogrid - ravel_multi_index - unravel_index - diag_indices - diag_indices_from - mask_indices - tril_indices - tril_indices_from - triu_indices - triu_indices_from - -Indexing-like operations ------------------------- -.. autosummary:: - :toctree: generated/ - - take - choose - compress - diag - diagonal - select - -Inserting data into arrays --------------------------- -.. autosummary:: - :toctree: generated/ - - place - put - putmask - fill_diagonal - -Iterating over arrays ---------------------- -.. autosummary:: - :toctree: generated/ - - nditer - ndenumerate - ndindex - flatiter diff --git a/numpy-1.6.2/doc/source/reference/routines.io.rst b/numpy-1.6.2/doc/source/reference/routines.io.rst deleted file mode 100644 index 1293acb485..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.io.rst +++ /dev/null @@ -1,65 +0,0 @@ -Input and output -**************** - -.. currentmodule:: numpy - -NPZ files ---------- -.. autosummary:: - :toctree: generated/ - - load - save - savez - -Text files ----------- -.. autosummary:: - :toctree: generated/ - - loadtxt - savetxt - genfromtxt - fromregex - fromstring - ndarray.tofile - ndarray.tolist - -String formatting ------------------ -.. autosummary:: - :toctree: generated/ - - array_repr - array_str - -Memory mapping files --------------------- -.. autosummary:: - :toctree: generated/ - - memmap - -Text formatting options ------------------------ -.. autosummary:: - :toctree: generated/ - - set_printoptions - get_printoptions - set_string_function - -Base-n representations ----------------------- -.. autosummary:: - :toctree: generated/ - - binary_repr - base_repr - -Data sources ------------- -.. autosummary:: - :toctree: generated/ - - DataSource diff --git a/numpy-1.6.2/doc/source/reference/routines.linalg.rst b/numpy-1.6.2/doc/source/reference/routines.linalg.rst deleted file mode 100644 index 173a6ad532..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.linalg.rst +++ /dev/null @@ -1,69 +0,0 @@ -.. _routines.linalg: - -Linear algebra (:mod:`numpy.linalg`) -************************************ - -.. currentmodule:: numpy - -Matrix and vector products --------------------------- -.. autosummary:: - :toctree: generated/ - - dot - vdot - inner - outer - tensordot - einsum - linalg.matrix_power - kron - -Decompositions --------------- -.. autosummary:: - :toctree: generated/ - - linalg.cholesky - linalg.qr - linalg.svd - -Matrix eigenvalues ------------------- -.. autosummary:: - :toctree: generated/ - - linalg.eig - linalg.eigh - linalg.eigvals - linalg.eigvalsh - -Norms and other numbers ------------------------ -.. autosummary:: - :toctree: generated/ - - linalg.norm - linalg.cond - linalg.det - linalg.slogdet - trace - -Solving equations and inverting matrices ----------------------------------------- -.. autosummary:: - :toctree: generated/ - - linalg.solve - linalg.tensorsolve - linalg.lstsq - linalg.inv - linalg.pinv - linalg.tensorinv - -Exceptions ----------- -.. autosummary:: - :toctree: generated/ - - linalg.LinAlgError diff --git a/numpy-1.6.2/doc/source/reference/routines.logic.rst b/numpy-1.6.2/doc/source/reference/routines.logic.rst deleted file mode 100644 index 56e36f49aa..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.logic.rst +++ /dev/null @@ -1,64 +0,0 @@ -Logic functions -*************** - -.. currentmodule:: numpy - -Truth value testing -------------------- -.. autosummary:: - :toctree: generated/ - - all - any - -Array contents --------------- -.. autosummary:: - :toctree: generated/ - - isfinite - isinf - isnan - isneginf - isposinf - -Array type testing ------------------- -.. autosummary:: - :toctree: generated/ - - iscomplex - iscomplexobj - isfortran - isreal - isrealobj - isscalar - -Logical operations ------------------- -.. autosummary:: - :toctree: generated/ - - logical_and - logical_or - logical_not - logical_xor - -Comparison ----------- -.. autosummary:: - :toctree: generated/ - - allclose - array_equal - array_equiv - -.. autosummary:: - :toctree: generated/ - - greater - greater_equal - less - less_equal - equal - not_equal diff --git a/numpy-1.6.2/doc/source/reference/routines.ma.rst b/numpy-1.6.2/doc/source/reference/routines.ma.rst deleted file mode 100644 index 7367553384..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.ma.rst +++ /dev/null @@ -1,404 +0,0 @@ -.. _routines.ma: - -Masked array operations -*********************** - -.. currentmodule:: numpy - - -Constants -========= - -.. autosummary:: - :toctree: generated/ - - ma.MaskType - - -Creation -======== - -From existing data -~~~~~~~~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - ma.masked_array - ma.array - ma.copy - ma.frombuffer - ma.fromfunction - - ma.MaskedArray.copy - - -Ones and zeros -~~~~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - ma.empty - ma.empty_like - ma.masked_all - ma.masked_all_like - ma.ones - ma.zeros - - -_____ - -Inspecting the array -==================== - -.. autosummary:: - :toctree: generated/ - - ma.all - ma.any - ma.count - ma.count_masked - ma.getmask - ma.getmaskarray - ma.getdata - ma.nonzero - ma.shape - ma.size - - ma.MaskedArray.data - ma.MaskedArray.mask - ma.MaskedArray.recordmask - - ma.MaskedArray.all - ma.MaskedArray.any - ma.MaskedArray.count - ma.MaskedArray.nonzero - ma.shape - ma.size - - -_____ - -Manipulating a MaskedArray -========================== - -Changing the shape -~~~~~~~~~~~~~~~~~~ - -.. autosummary:: - :toctree: generated/ - - ma.ravel - ma.reshape - ma.resize - - ma.MaskedArray.flatten - ma.MaskedArray.ravel - ma.MaskedArray.reshape - ma.MaskedArray.resize - - -Modifying axes -~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.swapaxes - ma.transpose - - ma.MaskedArray.swapaxes - ma.MaskedArray.transpose - - -Changing the number of dimensions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.atleast_1d - ma.atleast_2d - ma.atleast_3d - ma.expand_dims - ma.squeeze - - ma.MaskedArray.squeeze - - ma.column_stack - ma.concatenate - ma.dstack - ma.hstack - ma.hsplit - ma.mr_ - ma.row_stack - ma.vstack - - -Joining arrays -~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.column_stack - ma.concatenate - ma.dstack - ma.hstack - ma.vstack - - -_____ - -Operations on masks -=================== - -Creating a mask -~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.make_mask - ma.make_mask_none - ma.mask_or - ma.make_mask_descr - - -Accessing a mask -~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.getmask - ma.getmaskarray - ma.masked_array.mask - - -Finding masked data -~~~~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.flatnotmasked_contiguous - ma.flatnotmasked_edges - ma.notmasked_contiguous - ma.notmasked_edges - - -Modifying a mask -~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.mask_cols - ma.mask_or - ma.mask_rowcols - ma.mask_rows - ma.harden_mask - ma.soften_mask - - ma.MaskedArray.harden_mask - ma.MaskedArray.soften_mask - ma.MaskedArray.shrink_mask - ma.MaskedArray.unshare_mask - - -_____ - -Conversion operations -====================== - -> to a masked array -~~~~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.asarray - ma.asanyarray - ma.fix_invalid - ma.masked_equal - ma.masked_greater - ma.masked_greater_equal - ma.masked_inside - ma.masked_invalid - ma.masked_less - ma.masked_less_equal - ma.masked_not_equal - ma.masked_object - ma.masked_outside - ma.masked_values - ma.masked_where - - -> to a ndarray -~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.compress_cols - ma.compress_rowcols - ma.compress_rows - ma.compressed - ma.filled - - ma.MaskedArray.compressed - ma.MaskedArray.filled - - -> to another object -~~~~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.MaskedArray.tofile - ma.MaskedArray.tolist - ma.MaskedArray.torecords - ma.MaskedArray.tostring - - -Pickling and unpickling -~~~~~~~~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.dump - ma.dumps - ma.load - ma.loads - - -Filling a masked array -~~~~~~~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.common_fill_value - ma.default_fill_value - ma.maximum_fill_value - ma.maximum_fill_value - ma.set_fill_value - - ma.MaskedArray.get_fill_value - ma.MaskedArray.set_fill_value - ma.MaskedArray.fill_value - - -_____ - -Masked arrays arithmetics -========================= - -Arithmetics -~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.anom - ma.anomalies - ma.average - ma.conjugate - ma.corrcoef - ma.cov - ma.cumsum - ma.cumprod - ma.mean - ma.median - ma.power - ma.prod - ma.std - ma.sum - ma.var - - ma.MaskedArray.anom - ma.MaskedArray.cumprod - ma.MaskedArray.cumsum - ma.MaskedArray.mean - ma.MaskedArray.prod - ma.MaskedArray.std - ma.MaskedArray.sum - ma.MaskedArray.var - - -Minimum/maximum -~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.argmax - ma.argmin - ma.max - ma.min - ma.ptp - - ma.MaskedArray.argmax - ma.MaskedArray.argmin - ma.MaskedArray.max - ma.MaskedArray.min - ma.MaskedArray.ptp - - -Sorting -~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.argsort - ma.sort - ma.MaskedArray.argsort - ma.MaskedArray.sort - - -Algebra -~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.diag - ma.dot - ma.identity - ma.inner - ma.innerproduct - ma.outer - ma.outerproduct - ma.trace - ma.transpose - - ma.MaskedArray.trace - ma.MaskedArray.transpose - - -Polynomial fit -~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.vander - ma.polyfit - - -Clipping and rounding -~~~~~~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.around - ma.clip - ma.round - - ma.MaskedArray.clip - ma.MaskedArray.round - - -Miscellanea -~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - ma.allequal - ma.allclose - ma.apply_along_axis - ma.arange - ma.choose - ma.ediff1d - ma.indices - ma.where - - diff --git a/numpy-1.6.2/doc/source/reference/routines.math.rst b/numpy-1.6.2/doc/source/reference/routines.math.rst deleted file mode 100644 index 7ce77c24d4..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.math.rst +++ /dev/null @@ -1,150 +0,0 @@ -Mathematical functions -********************** - -.. currentmodule:: numpy - -Trigonometric functions ------------------------ -.. autosummary:: - :toctree: generated/ - - sin - cos - tan - arcsin - arccos - arctan - hypot - arctan2 - degrees - radians - unwrap - deg2rad - rad2deg - -Hyperbolic functions --------------------- -.. autosummary:: - :toctree: generated/ - - sinh - cosh - tanh - arcsinh - arccosh - arctanh - -Rounding --------- -.. autosummary:: - :toctree: generated/ - - around - round_ - rint - fix - floor - ceil - trunc - -Sums, products, differences ---------------------------- -.. autosummary:: - :toctree: generated/ - - prod - sum - nansum - cumprod - cumsum - diff - ediff1d - gradient - cross - trapz - -Exponents and logarithms ------------------------- -.. autosummary:: - :toctree: generated/ - - exp - expm1 - exp2 - log - log10 - log2 - log1p - logaddexp - logaddexp2 - -Other special functions ------------------------ -.. autosummary:: - :toctree: generated/ - - i0 - sinc - -Floating point routines ------------------------ -.. autosummary:: - :toctree: generated/ - - signbit - copysign - frexp - ldexp - -Arithmetic operations ---------------------- -.. autosummary:: - :toctree: generated/ - - add - reciprocal - negative - multiply - divide - power - subtract - true_divide - floor_divide - - fmod - mod - modf - remainder - -Handling complex numbers ------------------------- -.. autosummary:: - :toctree: generated/ - - angle - real - imag - conj - - -Miscellaneous -------------- -.. autosummary:: - :toctree: generated/ - - convolve - clip - - sqrt - square - - absolute - fabs - sign - maximum - minimum - - nan_to_num - real_if_close - - interp diff --git a/numpy-1.6.2/doc/source/reference/routines.matlib.rst b/numpy-1.6.2/doc/source/reference/routines.matlib.rst deleted file mode 100644 index 7f8a9eabb3..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.matlib.rst +++ /dev/null @@ -1,11 +0,0 @@ -Matrix library (:mod:`numpy.matlib`) -************************************ - -.. currentmodule:: numpy - -This module contains all functions in the :mod:`numpy` namespace, with -the following replacement functions that return :class:`matrices -` instead of :class:`ndarrays `. - -.. automodule:: numpy.matlib - diff --git a/numpy-1.6.2/doc/source/reference/routines.numarray.rst b/numpy-1.6.2/doc/source/reference/routines.numarray.rst deleted file mode 100644 index 36e5aa7645..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.numarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -********************************************** -Numarray compatibility (:mod:`numpy.numarray`) -********************************************** - -.. automodule:: numpy.numarray - diff --git a/numpy-1.6.2/doc/source/reference/routines.oldnumeric.rst b/numpy-1.6.2/doc/source/reference/routines.oldnumeric.rst deleted file mode 100644 index d7f15bcfd9..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.oldnumeric.rst +++ /dev/null @@ -1,8 +0,0 @@ -*************************************************** -Old Numeric compatibility (:mod:`numpy.oldnumeric`) -*************************************************** - -.. currentmodule:: numpy - -.. automodule:: numpy.oldnumeric - diff --git a/numpy-1.6.2/doc/source/reference/routines.other.rst b/numpy-1.6.2/doc/source/reference/routines.other.rst deleted file mode 100644 index 354f457338..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.other.rst +++ /dev/null @@ -1,24 +0,0 @@ -Miscellaneous routines -********************** - -.. toctree:: - -.. currentmodule:: numpy - -Buffer objects --------------- -.. autosummary:: - :toctree: generated/ - - getbuffer - newbuffer - -Performance tuning ------------------- -.. autosummary:: - :toctree: generated/ - - alterdot - restoredot - setbufsize - getbufsize diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.chebyshev.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.chebyshev.rst deleted file mode 100644 index 60c816f03d..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.chebyshev.rst +++ /dev/null @@ -1,92 +0,0 @@ -Chebyshev Module (:mod:`numpy.polynomial.chebyshev`) -==================================================== - -.. versionadded:: 1.4.0 - -.. currentmodule:: numpy.polynomial.chebyshev - -This module provides a number of objects (mostly functions) useful for -dealing with Chebyshev series, including a `Chebyshev` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Chebyshev Class ---------------- - -.. autosummary:: - :toctree: generated/ - - Chebyshev - -Basics ------- - -.. autosummary:: - :toctree: generated/ - - chebval - chebval2d - chebval3d - chebgrid2d - chebgrid3d - chebroots - chebfromroots - -Fitting -------- - -.. autosummary:: - :toctree: generated/ - - chebfit - chebvander - chebvander2d - chebvander3d - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - chebder - chebint - -Algebra -------- - -.. autosummary:: - :toctree: generated/ - - chebadd - chebsub - chebmul - chebmulx - chebdiv - chebpow - -Quadrature ----------- - -.. autosummary:: - :toctree: generated/ - - chebgauss - chebweight - -Miscellaneous -------------- - -.. autosummary:: - :toctree: generated/ - - chebcompanion - chebdomain - chebzero - chebone - chebx - chebtrim - chebline - cheb2poly - poly2cheb diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.classes.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.classes.rst deleted file mode 100644 index 9139b802e2..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.classes.rst +++ /dev/null @@ -1,316 +0,0 @@ -Using the Convenience Classes -============================= - -The convenience classes provided by the polynomial package are: - - ============ ================ - Name Provides - ============ ================ - Polynomial Power series - Chebyshev Chebyshev series - Legendre Legendre series - Laguerre Laguerre series - Hermite Hermite series - HermiteE HermiteE series - ============ ================ - -The series in this context are finite sums of the corresponding polynomial -basis functions multiplied by coefficients. For instance, a power series -looks like - -.. math:: p(x) = 1 + 2x + 3x^2 - -and has coefficients :math:`[1, 2, 3]`. The Chebyshev series with the -same coefficients looks like - - -.. math:: p(x) = 1 T_0(x) + 2 T_1(x) + 3 T_2(x) - -and more generally - -.. math:: p(x) = \sum_{i=0}^n c_i T_i(x) - -where in this case the :math:`T_n` are the Chebyshev functions of degree -`n`, but could just as easily be the basis functions of any of the other -classes. The convention for all the classes is that the coefficient -c[i] goes with the basis function of degree i. - -All of the classes have the same methods, and especially they implement the -Python numeric operators +, -, \*, //, %, divmod, \*\*, ==, -and !=. The last two can be a bit problematic due to floating point -roundoff errors. We now give a quick demonstration of the various -operations using Numpy version 1.7.0. - -Basics ------- - -First we need a polynomial class and a polynomial instance to play with. -The classes can be imported directly from the polynomial package or from -the module of the relevant type. Here we import from the package and use -the conventional Polynomial class because of its familiarity.:: - - >>> from numpy.polynomial import Polynomial as P - >>> p = P([1,2,3]) - >>> p - Polynomial([ 1., 2., 3.], [-1., 1.], [-1., 1.]) - -Note that there are three parts to the long version of the printout. The -first is the coefficients, the second is the domain, and the third is the -window:: - - >>> p.coef - array([ 1., 2., 3.]) - >>> p.domain - array([-1., 1.]) - >>> p.window - array([-1., 1.]) - -Printing a polynomial yields a shorter form without the domain -and window:: - - >>> print p - poly([ 1. 2. 3.]) - -We will deal with the domain and window when we get to fitting, for the moment -we ignore them and run through the basic algebraic and arithmetic operations. - -Addition and Subtraction:: - - >>> p + p - Polynomial([ 2., 4., 6.], [-1., 1.], [-1., 1.]) - >>> p - p - Polynomial([ 0.], [-1., 1.], [-1., 1.]) - -Multiplication:: - - >>> p * p - Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.]) - -Powers:: - - >>> p**2 - Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.]) - -Division: - -Floor_division, '//', is the division operator for the polynomial classes, -polynomials are treated like integers in this regard. For Python versions < -3.x the '/' operator maps to '//', as it does for Python, for later -versions the '/' will only work for division by scalars. At some point it -will be deprecated:: - - >>> p // P([-1, 1]) - Polynomial([ 5., 3.], [-1., 1.], [-1., 1.]) - -Remainder:: - - >>> p % P([-1, 1]) - Polynomial([ 6.], [-1., 1.], [-1., 1.]) - -Divmod:: - - >>> quo, rem = divmod(p, P([-1, 1])) - >>> quo - Polynomial([ 5., 3.], [-1., 1.], [-1., 1.]) - >>> rem - Polynomial([ 6.], [-1., 1.], [-1., 1.]) - -Evaluation:: - - >>> x = np.arange(5) - >>> p(x) - array([ 1., 6., 17., 34., 57.]) - >>> x = np.arange(6).reshape(3,2) - >>> p(x) - array([[ 1., 6.], - [ 17., 34.], - [ 57., 86.]]) - -Substitution: - -Substitute a polynomial for x and expand the result. Here we substitute -p in itself leading to a new polynomial of degree 4 after expansion. If -the polynomials are regarded as functions this is composition of -functions:: - - >>> p(p) - Polynomial([ 6., 16., 36., 36., 27.], [-1., 1.], [-1., 1.]) - -Roots:: - - >>> p.roots() - array([-0.33333333-0.47140452j, -0.33333333+0.47140452j]) - - - -It isn't always convenient to explicitly use Polynomial instances, so -tuples, lists, arrays, and scalars are automatically cast in the arithmetic -operations:: - - >>> p + [1, 2, 3] - Polynomial([ 2., 4., 6.], [-1., 1.], [-1., 1.]) - >>> [1, 2, 3] * p - Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.]) - >>> p / 2 - Polynomial([ 0.5, 1. , 1.5], [-1., 1.], [-1., 1.]) - -Polynomials that differ in domain, window, or class can't be mixed in -arithmetic:: - - >>> from numpy.polynomial import Chebyshev as T - >>> p + P([1], domain=[0,1]) - Traceback (most recent call last): - File "", line 1, in - File "", line 213, in __add__ - TypeError: Domains differ - >>> p + P([1], window=[0,1]) - Traceback (most recent call last): - File "", line 1, in - File "", line 215, in __add__ - TypeError: Windows differ - >>> p + T([1]) - Traceback (most recent call last): - File "", line 1, in - File "", line 211, in __add__ - TypeError: Polynomial types differ - - -But different types can be used for substitution. In fact, this is how -conversion of Polynomial classes among themselves is done for type, domain, -and window casting:: - - >>> p(T([0, 1])) - Chebyshev([ 2.5, 2. , 1.5], [-1., 1.], [-1., 1.]) - -Which gives the polynomial 'p' in Chebyshev form. This works because -:math:`T_1(x) = x` and substituting :math:`x` for :math:`x` doesn't change -the original polynomial. However, all the multiplications and divisions -will be done using Chebyshev series, hence the type of the result. - -Calculus --------- - -Polynomial instances can be integrated and differentiated.:: - - >>> from numpy.polynomial import Polynomial as P - >>> p = P([2, 6]) - >>> p.integ() - Polynomial([ 0., 2., 3.], [-1., 1.], [-1., 1.]) - >>> p.integ(2) - Polynomial([ 0., 0., 1., 1.], [-1., 1.], [-1., 1.]) - -The first example integrates 'p' once, the second example integrates it -twice. By default, the lower bound of the integration and the integration -constant are 0, but both can be specified.:: - - >>> p.integ(lbnd=-1) - Polynomial([-1., 2., 3.], [-1., 1.], [-1., 1.]) - >>> p.integ(lbnd=-1, k=1) - Polynomial([ 0., 2., 3.], [-1., 1.], [-1., 1.]) - -In the first case the lower bound of the integration is set to -1 and the -integration constant is 0. In the second the constant of integration is set -to 1 as well. Differentiation is simpler since the only option is the -number times the polynomial is differentiated:: - - >>> p = P([1, 2, 3]) - >>> p.deriv(1) - Polynomial([ 2., 6.], [-1., 1.], [-1., 1.]) - >>> p.deriv(2) - Polynomial([ 6.], [-1., 1.], [-1., 1.]) - - -Other Polynomial Constructors ------------------------------ - -Constructing polynomials by specifying coefficients is just one way of -obtaining a polynomial instance, they may also be created by specifying -their roots, by conversion from other polynomial types, and by least -squares fits. Fitting is discussed in its own section, the other methods -are demonstrated below.:: - - >>> from numpy.polynomial import Polynomial as P - >>> from numpy.polynomial import Chebyshev as T - >>> p = P.fromroots([1, 2, 3]) - >>> p - Polynomial([ -6., 11., -6., 1.], [-1., 1.], [-1., 1.]) - >>> p.convert(kind=T) - Chebyshev([ -9. , 11.75, -3. , 0.25], [-1., 1.], [-1., 1.]) - -The convert method can also convert domain and window:: - - >>> p.convert(kind=T, domain=[0, 1]) - Chebyshev([-2.4375 , 2.96875, -0.5625 , 0.03125], [ 0., 1.], [-1., 1.]) - >>> p.convert(kind=P, domain=[0, 1]) - Polynomial([-1.875, 2.875, -1.125, 0.125], [ 0., 1.], [-1., 1.]) - -Conversions between types can be useful, but it is *not* recommended -for routine use. The loss of numerical precision in passing from a -Chebyshev series of degree 50 to a Polynomial series of the same degree -can make the results of numerical evaluation essentially random. - -Fitting -------- - -Fitting is the reason that the `domain` and `window` attributes are part of -the convenience classes. To illustrate the problem, the values of the Chebyshev -polynomials up to degree 5 are plotted below. - -.. plot:: - - >>> import matplotlib.pyplot as plt - >>> from numpy.polynomial import Chebyshev as T - >>> x = np.linspace(-1, 1, 100) - >>> for i in range(6): ax = plt.plot(x, T.basis(i)(x), lw=2, label="T_%d"%i) - ... - >>> plt.legend(loc="upper left") - - >>> plt.show() - -In the range -1 <= x <= 1 they are nice, equiripple functions lying between +/- 1. -The same plots over the range -2 <= x <= 2 look very different: - -.. plot:: - - >>> import matplotlib.pyplot as plt - >>> from numpy.polynomial import Chebyshev as T - >>> x = np.linspace(-2, 2, 100) - >>> for i in range(6): ax = plt.plot(x, T.basis(i)(x), lw=2, label="T_%d"%i) - ... - >>> plt.legend(loc="lower right") - - >>> plt.show() - -As can be seen, the "good" parts have shrunk to insignificance. In using -Chebyshev polynomials for fitting we want to use the region where x is -between -1 and 1 and that is what the 'window' specifies. However, it is -unlikely that the data to be fit has all its data points in that interval, -so we use 'domain' to specify the interval where the data points lie. When -the fit is done, the domain is first mapped to the window by a linear -transformation and the usual least squares fit is done using the mapped -data points. The window and domain of the fit are part of the returned series -and are automatically used when computing values, derivatives, and such. If -they aren't specified in the call the fitting routine will use the default -window and the smallest domain that holds all the data points. This is -illustrated below for a fit to a noisy sin curve. - -.. plot:: - - >>> import numpy as np - >>> import matplotlib.pyplot as plt - >>> from numpy.polynomial import Chebyshev as T - >>> np.random.seed(11) - >>> x = np.linspace(0, 2*np.pi, 20) - >>> y = np.sin(x) + np.random.normal(scale=.1, size=x.shape) - >>> p = T.fit(x, y, 5) - >>> plt.plot(x, y, 'o') - [] - >>> xx, yy = p.linspace() - >>> plt.plot(xx, yy, lw=2) - [] - >>> p.domain - array([ 0. , 6.28318531]) - >>> p.window - array([-1., 1.]) - >>> plt.show() - diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.hermite.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.hermite.rst deleted file mode 100644 index 8ee72e97c3..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.hermite.rst +++ /dev/null @@ -1,92 +0,0 @@ -Hermite Module, "Physicists'" (:mod:`numpy.polynomial.hermite`) -=============================================================== - -.. versionadded:: 1.6.0 - -.. currentmodule:: numpy.polynomial.hermite - -This module provides a number of objects (mostly functions) useful for -dealing with Hermite series, including a `Hermite` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Hermite Class -------------- - -.. autosummary:: - :toctree: generated/ - - Hermite - -Basics ------- - -.. autosummary:: - :toctree: generated/ - - hermval - hermval2d - hermval3d - hermgrid2d - hermgrid3d - hermroots - hermfromroots - -Fitting -------- - -.. autosummary:: - :toctree: generated/ - - hermfit - hermvander - hermvander2d - hermvander3d - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - hermder - hermint - -Algebra -------- - -.. autosummary:: - :toctree: generated/ - - hermadd - hermsub - hermmul - hermmulx - hermdiv - hermpow - -Quadrature ----------- - -.. autosummary:: - :toctree: generated/ - - hermgauss - hermweight - -Miscellaneous -------------- - -.. autosummary:: - :toctree: generated/ - - hermcompanion - hermdomain - hermzero - hermone - hermx - hermtrim - hermline - herm2poly - poly2herm diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.hermite_e.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.hermite_e.rst deleted file mode 100644 index 33a15bb444..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.hermite_e.rst +++ /dev/null @@ -1,92 +0,0 @@ -HermiteE Module, "Probabilists'" (:mod:`numpy.polynomial.hermite_e`) -==================================================================== - -.. versionadded:: 1.6.0 - -.. currentmodule:: numpy.polynomial.hermite_e - -This module provides a number of objects (mostly functions) useful for -dealing with HermiteE series, including a `HermiteE` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -HermiteE Class --------------- - -.. autosummary:: - :toctree: generated/ - - HermiteE - -Basics ------- - -.. autosummary:: - :toctree: generated/ - - hermeval - hermeval2d - hermeval3d - hermegrid2d - hermegrid3d - hermeroots - hermefromroots - -Fitting -------- - -.. autosummary:: - :toctree: generated/ - - hermefit - hermevander - hermevander2d - hermevander3d - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - hermeder - hermeint - -Algebra -------- - -.. autosummary:: - :toctree: generated/ - - hermeadd - hermesub - hermemul - hermemulx - hermediv - hermepow - -Quadrature ----------- - -.. autosummary:: - :toctree: generated/ - - hermegauss - hermeweight - -Miscellaneous -------------- - -.. autosummary:: - :toctree: generated/ - - hermecompanion - hermedomain - hermezero - hermeone - hermex - hermetrim - hermeline - herme2poly - poly2herme diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.laguerre.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.laguerre.rst deleted file mode 100644 index 45e288cb9c..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.laguerre.rst +++ /dev/null @@ -1,92 +0,0 @@ -Laguerre Module (:mod:`numpy.polynomial.laguerre`) -================================================== - -.. versionadded:: 1.6.0 - -.. currentmodule:: numpy.polynomial.laguerre - -This module provides a number of objects (mostly functions) useful for -dealing with Laguerre series, including a `Laguerre` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Laguerre Class --------------- - -.. autosummary:: - :toctree: generated/ - - Laguerre - -Basics ------- - -.. autosummary:: - :toctree: generated/ - - lagval - lagval2d - lagval3d - laggrid2d - laggrid3d - lagroots - lagfromroots - -Fitting -------- - -.. autosummary:: - :toctree: generated/ - - lagfit - lagvander - lagvander2d - lagvander3d - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - lagder - lagint - -Algebra -------- - -.. autosummary:: - :toctree: generated/ - - lagadd - lagsub - lagmul - lagmulx - lagdiv - lagpow - -Quadrature ----------- - -.. autosummary:: - :toctree: generated/ - - laggauss - lagweight - -Miscellaneous -------------- - -.. autosummary:: - :toctree: generated/ - - lagcompanion - lagdomain - lagzero - lagone - lagx - lagtrim - lagline - lag2poly - poly2lag diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.legendre.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.legendre.rst deleted file mode 100644 index fe6edc216f..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.legendre.rst +++ /dev/null @@ -1,92 +0,0 @@ -Legendre Module (:mod:`numpy.polynomial.legendre`) -================================================== - -.. versionadded:: 1.6.0 - -.. currentmodule:: numpy.polynomial.legendre - -This module provides a number of objects (mostly functions) useful for -dealing with Legendre series, including a `Legendre` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Legendre Class --------------- - -.. autosummary:: - :toctree: generated/ - - Legendre - -Basics ------- - -.. autosummary:: - :toctree: generated/ - - legval - legval2d - legval3d - leggrid2d - leggrid3d - legroots - legfromroots - -Fitting -------- - -.. autosummary:: - :toctree: generated/ - - legfit - legvander - legvander2d - legvander3d - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - legder - legint - -Algebra -------- - -.. autosummary:: - :toctree: generated/ - - legadd - legsub - legmul - legmulx - legdiv - legpow - -Quadrature ----------- - -.. autosummary:: - :toctree: generated/ - - leggauss - legweight - -Miscellaneous -------------- - -.. autosummary:: - :toctree: generated/ - - legcompanion - legdomain - legzero - legone - legx - legtrim - legline - leg2poly - poly2leg diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.package.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.package.rst deleted file mode 100644 index b2d357b317..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.package.rst +++ /dev/null @@ -1,17 +0,0 @@ -Polynomial Package -================== - -.. versionadded:: 1.4.0 - -.. currentmodule:: numpy.polynomial - -.. toctree:: - :maxdepth: 2 - - routines.polynomials.classes - routines.polynomials.polynomial - routines.polynomials.chebyshev - routines.polynomials.legendre - routines.polynomials.laguerre - routines.polynomials.hermite - routines.polynomials.hermite_e diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.poly1d.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.poly1d.rst deleted file mode 100644 index 7eef53ce23..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.poly1d.rst +++ /dev/null @@ -1,46 +0,0 @@ -Poly1d -====== - -.. currentmodule:: numpy - -Basics ------- -.. autosummary:: - :toctree: generated/ - - poly1d - polyval - poly - roots - -Fitting -------- -.. autosummary:: - :toctree: generated/ - - polyfit - -Calculus --------- -.. autosummary:: - :toctree: generated/ - - polyder - polyint - -Arithmetic ----------- -.. autosummary:: - :toctree: generated/ - - polyadd - polydiv - polymul - polysub - -Warnings --------- -.. autosummary:: - :toctree: generated/ - - RankWarning diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.polynomial.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.polynomial.rst deleted file mode 100644 index 431856622b..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.polynomial.rst +++ /dev/null @@ -1,81 +0,0 @@ -Polynomial Module (:mod:`numpy.polynomial.polynomial`) -====================================================== - -.. versionadded:: 1.4.0 - -.. currentmodule:: numpy.polynomial.polynomial - -This module provides a number of objects (mostly functions) useful for -dealing with Polynomial series, including a `Polynomial` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Polynomial Class ----------------- - -.. autosummary:: - :toctree: generated/ - - Polynomial - -Basics ------- - -.. autosummary:: - :toctree: generated/ - - polyval - polyval2d - polyval3d - polygrid2d - polygrid3d - polyroots - polyfromroots - -Fitting -------- - -.. autosummary:: - :toctree: generated/ - - polyfit - polyvander - polyvander2d - polyvander3d - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - polyder - polyint - -Algebra -------- - -.. autosummary:: - :toctree: generated/ - - polyadd - polysub - polymul - polymulx - polydiv - polypow - -Miscellaneous -------------- - -.. autosummary:: - :toctree: generated/ - - polycompanion - polydomain - polyzero - polyone - polyx - polytrim - polyline diff --git a/numpy-1.6.2/doc/source/reference/routines.polynomials.rst b/numpy-1.6.2/doc/source/reference/routines.polynomials.rst deleted file mode 100644 index 94d1af8e7a..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.polynomials.rst +++ /dev/null @@ -1,23 +0,0 @@ -Polynomials -*********** - -The polynomial package is newer and more complete than poly1d and the -convenience classes are better behaved in the numpy environment. When -backwards compatibility is not an issue it should be the package of choice. -Note that the various routines in the polynomial package all deal with -series whose coefficients go from degree zero upward, which is the reverse -of the poly1d convention. The easy way to remember this is that indexes -correspond to degree, i.e., coef[i] is the coefficient of the term of -degree i. - - -.. toctree:: - :maxdepth: 2 - - routines.polynomials.poly1d - - -.. toctree:: - :maxdepth: 3 - - routines.polynomials.package diff --git a/numpy-1.6.2/doc/source/reference/routines.random.rst b/numpy-1.6.2/doc/source/reference/routines.random.rst deleted file mode 100644 index 508c2c96e7..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.random.rst +++ /dev/null @@ -1,77 +0,0 @@ -.. _routines.random: - -Random sampling (:mod:`numpy.random`) -************************************* - -.. currentmodule:: numpy.random - -Simple random data -================== -.. autosummary:: - :toctree: generated/ - - rand - randn - randint - random_integers - random_sample - bytes - -Permutations -============ -.. autosummary:: - :toctree: generated/ - - shuffle - permutation - -Distributions -============= -.. autosummary:: - :toctree: generated/ - - beta - binomial - chisquare - mtrand.dirichlet - exponential - f - gamma - geometric - gumbel - hypergeometric - laplace - logistic - lognormal - logseries - multinomial - multivariate_normal - negative_binomial - noncentral_chisquare - noncentral_f - normal - pareto - poisson - power - rayleigh - standard_cauchy - standard_exponential - standard_gamma - standard_normal - standard_t - triangular - uniform - vonmises - wald - weibull - zipf - -Random generator -================ -.. autosummary:: - :toctree: generated/ - - mtrand.RandomState - seed - get_state - set_state diff --git a/numpy-1.6.2/doc/source/reference/routines.rst b/numpy-1.6.2/doc/source/reference/routines.rst deleted file mode 100644 index c44af4427e..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.rst +++ /dev/null @@ -1,47 +0,0 @@ -******** -Routines -******** - -In this chapter routine docstrings are presented, grouped by functionality. -Many docstrings contain example code, which demonstrates basic usage -of the routine. The examples assume that NumPy is imported with:: - - >>> import numpy as np - -A convenient way to execute examples is the ``%doctest_mode`` mode of -IPython, which allows for pasting of multi-line examples and preserves -indentation. - -.. toctree:: - :maxdepth: 2 - - routines.array-creation - routines.array-manipulation - routines.indexing - routines.dtype - routines.io - routines.fft - routines.linalg - routines.random - routines.sort - routines.logic - routines.bitwise - routines.statistics - routines.math - routines.functional - routines.polynomials - routines.financial - routines.set - routines.window - routines.err - routines.ma - routines.help - routines.other - routines.testing - routines.emath - routines.matlib - routines.dual - routines.numarray - routines.oldnumeric - routines.ctypeslib - routines.char diff --git a/numpy-1.6.2/doc/source/reference/routines.set.rst b/numpy-1.6.2/doc/source/reference/routines.set.rst deleted file mode 100644 index 27c6aeb898..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.set.rst +++ /dev/null @@ -1,22 +0,0 @@ -Set routines -============ - -.. currentmodule:: numpy - -Making proper sets ------------------- -.. autosummary:: - :toctree: generated/ - - unique - -Boolean operations ------------------- -.. autosummary:: - :toctree: generated/ - - in1d - intersect1d - setdiff1d - setxor1d - union1d diff --git a/numpy-1.6.2/doc/source/reference/routines.sort.rst b/numpy-1.6.2/doc/source/reference/routines.sort.rst deleted file mode 100644 index c10252c694..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.sort.rst +++ /dev/null @@ -1,39 +0,0 @@ -Sorting, searching, and counting -================================ - -.. currentmodule:: numpy - -Sorting -------- -.. autosummary:: - :toctree: generated/ - - sort - lexsort - argsort - ndarray.sort - msort - sort_complex - -Searching ---------- -.. autosummary:: - :toctree: generated/ - - argmax - nanargmax - argmin - nanargmin - argwhere - nonzero - flatnonzero - where - searchsorted - extract - -Counting --------- -.. autosummary:: - :toctree: generated/ - - count_nonzero diff --git a/numpy-1.6.2/doc/source/reference/routines.statistics.rst b/numpy-1.6.2/doc/source/reference/routines.statistics.rst deleted file mode 100644 index b41b62839f..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.statistics.rst +++ /dev/null @@ -1,51 +0,0 @@ -Statistics -========== - -.. currentmodule:: numpy - - -Extremal values ---------------- - -.. autosummary:: - :toctree: generated/ - - amin - amax - nanmax - nanmin - ptp - -Averages and variances ----------------------- - -.. autosummary:: - :toctree: generated/ - - average - mean - median - std - var - -Correlating ------------ - -.. autosummary:: - :toctree: generated/ - - corrcoef - correlate - cov - -Histograms ----------- - -.. autosummary:: - :toctree: generated/ - - histogram - histogram2d - histogramdd - bincount - digitize diff --git a/numpy-1.6.2/doc/source/reference/routines.testing.rst b/numpy-1.6.2/doc/source/reference/routines.testing.rst deleted file mode 100644 index 5f92da1634..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.testing.rst +++ /dev/null @@ -1,48 +0,0 @@ -Test Support (:mod:`numpy.testing`) -=================================== - -.. currentmodule:: numpy.testing - -Common test support for all numpy test scripts. - -This single module should provide all the common functionality for numpy -tests in a single location, so that test scripts can just import it and -work right away. - - -Asserts -======= -.. autosummary:: - :toctree: generated/ - - assert_almost_equal - assert_approx_equal - assert_array_almost_equal - assert_array_equal - assert_array_less - assert_equal - assert_raises - assert_warns - assert_string_equal - -Decorators ----------- -.. autosummary:: - :toctree: generated/ - - decorators.deprecated - decorators.knownfailureif - decorators.setastest - decorators.skipif - decorators.slow - decorate_methods - - -Test Running ------------- -.. autosummary:: - :toctree: generated/ - - Tester - run_module_suite - rundocs diff --git a/numpy-1.6.2/doc/source/reference/routines.window.rst b/numpy-1.6.2/doc/source/reference/routines.window.rst deleted file mode 100644 index 7f3414815f..0000000000 --- a/numpy-1.6.2/doc/source/reference/routines.window.rst +++ /dev/null @@ -1,16 +0,0 @@ -Window functions -================ - -.. currentmodule:: numpy - -Various windows ---------------- - -.. autosummary:: - :toctree: generated/ - - bartlett - blackman - hamming - hanning - kaiser diff --git a/numpy-1.6.2/doc/source/reference/swig.interface-file.rst b/numpy-1.6.2/doc/source/reference/swig.interface-file.rst deleted file mode 100644 index 53bf687569..0000000000 --- a/numpy-1.6.2/doc/source/reference/swig.interface-file.rst +++ /dev/null @@ -1,930 +0,0 @@ -Numpy.i: a SWIG Interface File for NumPy -======================================== - -Introduction ------------- - -The Simple Wrapper and Interface Generator (or `SWIG -`_) is a powerful tool for generating wrapper -code for interfacing to a wide variety of scripting languages. -`SWIG`_ can parse header files, and using only the code prototypes, -create an interface to the target language. But `SWIG`_ is not -omnipotent. For example, it cannot know from the prototype:: - - double rms(double* seq, int n); - -what exactly ``seq`` is. Is it a single value to be altered in-place? -Is it an array, and if so what is its length? Is it input-only? -Output-only? Input-output? `SWIG`_ cannot determine these details, -and does not attempt to do so. - -If we designed ``rms``, we probably made it a routine that takes an -input-only array of length ``n`` of ``double`` values called ``seq`` -and returns the root mean square. The default behavior of `SWIG`_, -however, will be to create a wrapper function that compiles, but is -nearly impossible to use from the scripting language in the way the C -routine was intended. - -For Python, the preferred way of handling contiguous (or technically, -*strided*) blocks of homogeneous data is with NumPy, which provides full -object-oriented access to multidimensial arrays of data. Therefore, the most -logical Python interface for the ``rms`` function would be (including doc -string):: - - def rms(seq): - """ - rms: return the root mean square of a sequence - rms(numpy.ndarray) -> double - rms(list) -> double - rms(tuple) -> double - """ - -where ``seq`` would be a NumPy array of ``double`` values, and its -length ``n`` would be extracted from ``seq`` internally before being -passed to the C routine. Even better, since NumPy supports -construction of arrays from arbitrary Python sequences, ``seq`` -itself could be a nearly arbitrary sequence (so long as each element -can be converted to a ``double``) and the wrapper code would -internally convert it to a NumPy array before extracting its data -and length. - -`SWIG`_ allows these types of conversions to be defined via a -mechanism called typemaps. This document provides information on how -to use ``numpy.i``, a `SWIG`_ interface file that defines a series of -typemaps intended to make the type of array-related conversions -described above relatively simple to implement. For example, suppose -that the ``rms`` function prototype defined above was in a header file -named ``rms.h``. To obtain the Python interface discussed above, -your `SWIG`_ interface file would need the following:: - - %{ - #define SWIG_FILE_WITH_INIT - #include "rms.h" - %} - - %include "numpy.i" - - %init %{ - import_array(); - %} - - %apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)}; - %include "rms.h" - -Typemaps are keyed off a list of one or more function arguments, -either by type or by type and name. We will refer to such lists as -*signatures*. One of the many typemaps defined by ``numpy.i`` is used -above and has the signature ``(double* IN_ARRAY1, int DIM1)``. The -argument names are intended to suggest that the ``double*`` argument -is an input array of one dimension and that the ``int`` represents -that dimension. This is precisely the pattern in the ``rms`` -prototype. - -Most likely, no actual prototypes to be wrapped will have the argument -names ``IN_ARRAY1`` and ``DIM1``. We use the ``%apply`` directive to -apply the typemap for one-dimensional input arrays of type ``double`` -to the actual prototype used by ``rms``. Using ``numpy.i`` -effectively, therefore, requires knowing what typemaps are available -and what they do. - -A `SWIG`_ interface file that includes the `SWIG`_ directives given -above will produce wrapper code that looks something like:: - - 1 PyObject *_wrap_rms(PyObject *args) { - 2 PyObject *resultobj = 0; - 3 double *arg1 = (double *) 0 ; - 4 int arg2 ; - 5 double result; - 6 PyArrayObject *array1 = NULL ; - 7 int is_new_object1 = 0 ; - 8 PyObject * obj0 = 0 ; - 9 - 10 if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail; - 11 { - 12 array1 = obj_to_array_contiguous_allow_conversion( - 13 obj0, NPY_DOUBLE, &is_new_object1); - 14 npy_intp size[1] = { - 15 -1 - 16 }; - 17 if (!array1 || !require_dimensions(array1, 1) || - 18 !require_size(array1, size, 1)) SWIG_fail; - 19 arg1 = (double*) array1->data; - 20 arg2 = (int) array1->dimensions[0]; - 21 } - 22 result = (double)rms(arg1,arg2); - 23 resultobj = SWIG_From_double((double)(result)); - 24 { - 25 if (is_new_object1 && array1) Py_DECREF(array1); - 26 } - 27 return resultobj; - 28 fail: - 29 { - 30 if (is_new_object1 && array1) Py_DECREF(array1); - 31 } - 32 return NULL; - 33 } - -The typemaps from ``numpy.i`` are responsible for the following lines -of code: 12--20, 25 and 30. Line 10 parses the input to the ``rms`` -function. From the format string ``"O:rms"``, we can see that the -argument list is expected to be a single Python object (specified -by the ``O`` before the colon) and whose pointer is stored in -``obj0``. A number of functions, supplied by ``numpy.i``, are called -to make and check the (possible) conversion from a generic Python -object to a NumPy array. These functions are explained in the -section `Helper Functions`_, but hopefully their names are -self-explanatory. At line 12 we use ``obj0`` to construct a NumPy -array. At line 17, we check the validity of the result: that it is -non-null and that it has a single dimension of arbitrary length. Once -these states are verified, we extract the data buffer and length in -lines 19 and 20 so that we can call the underlying C function at line -22. Line 25 performs memory management for the case where we have -created a new array that is no longer needed. - -This code has a significant amount of error handling. Note the -``SWIG_fail`` is a macro for ``goto fail``, refering to the label at -line 28. If the user provides the wrong number of arguments, this -will be caught at line 10. If construction of the NumPy array -fails or produces an array with the wrong number of dimensions, these -errors are caught at line 17. And finally, if an error is detected, -memory is still managed correctly at line 30. - -Note that if the C function signature was in a different order:: - - double rms(int n, double* seq); - -that `SWIG`_ would not match the typemap signature given above with -the argument list for ``rms``. Fortunately, ``numpy.i`` has a set of -typemaps with the data pointer given last:: - - %apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)}; - -This simply has the effect of switching the definitions of ``arg1`` -and ``arg2`` in lines 3 and 4 of the generated code above, and their -assignments in lines 19 and 20. - -Using numpy.i -------------- - -The ``numpy.i`` file is currently located in the ``numpy/docs/swig`` -sub-directory under the ``numpy`` installation directory. Typically, -you will want to copy it to the directory where you are developing -your wrappers. If it is ever adopted by `SWIG`_ developers, then it -will be installed in a standard place where `SWIG`_ can find it. - -A simple module that only uses a single `SWIG`_ interface file should -include the following:: - - %{ - #define SWIG_FILE_WITH_INIT - %} - %include "numpy.i" - %init %{ - import_array(); - %} - -Within a compiled Python module, ``import_array()`` should only get -called once. This could be in a C/C++ file that you have written and -is linked to the module. If this is the case, then none of your -interface files should ``#define SWIG_FILE_WITH_INIT`` or call -``import_array()``. Or, this initialization call could be in a -wrapper file generated by `SWIG`_ from an interface file that has the -``%init`` block as above. If this is the case, and you have more than -one `SWIG`_ interface file, then only one interface file should -``#define SWIG_FILE_WITH_INIT`` and call ``import_array()``. - -Available Typemaps ------------------- - -The typemap directives provided by ``numpy.i`` for arrays of different -data types, say ``double`` and ``int``, and dimensions of different -types, say ``int`` or ``long``, are identical to one another except -for the C and NumPy type specifications. The typemaps are -therefore implemented (typically behind the scenes) via a macro:: - - %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) - -that can be invoked for appropriate ``(DATA_TYPE, DATA_TYPECODE, -DIM_TYPE)`` triplets. For example:: - - %numpy_typemaps(double, NPY_DOUBLE, int) - %numpy_typemaps(int, NPY_INT , int) - -The ``numpy.i`` interface file uses the ``%numpy_typemaps`` macro to -implement typemaps for the following C data types and ``int`` -dimension types: - - * ``signed char`` - * ``unsigned char`` - * ``short`` - * ``unsigned short`` - * ``int`` - * ``unsigned int`` - * ``long`` - * ``unsigned long`` - * ``long long`` - * ``unsigned long long`` - * ``float`` - * ``double`` - -In the following descriptions, we reference a generic ``DATA_TYPE``, which -could be any of the C data types listed above, and ``DIM_TYPE`` which -should be one of the many types of integers. - -The typemap signatures are largely differentiated on the name given to -the buffer pointer. Names with ``FARRAY`` are for FORTRAN-ordered -arrays, and names with ``ARRAY`` are for C-ordered (or 1D arrays). - -Input Arrays -```````````` - -Input arrays are defined as arrays of data that are passed into a -routine but are not altered in-place or returned to the user. The -Python input array is therefore allowed to be almost any Python -sequence (such as a list) that can be converted to the requested type -of array. The input array signatures are - -1D: - - * ``( DATA_TYPE IN_ARRAY1[ANY] )`` - * ``( DATA_TYPE* IN_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* IN_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` - * ``( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )`` - * ``( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )`` - * ``( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )`` - * ``( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )`` - -The first signature listed, ``( DATA_TYPE IN_ARRAY[ANY] )`` is for -one-dimensional arrays with hard-coded dimensions. Likewise, -``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` is for two-dimensional arrays -with hard-coded dimensions, and similarly for three-dimensional. - -In-Place Arrays -``````````````` - -In-place arrays are defined as arrays that are modified in-place. The -input values may or may not be used, but the values at the time the -function returns are significant. The provided Python argument -must therefore be a NumPy array of the required type. The in-place -signatures are - -1D: - - * ``( DATA_TYPE INPLACE_ARRAY1[ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )`` - * ``( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )`` - * ``( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )`` - -These typemaps now check to make sure that the ``INPLACE_ARRAY`` -arguments use native byte ordering. If not, an exception is raised. - -Argout Arrays -````````````` - -Argout arrays are arrays that appear in the input arguments in C, but -are in fact output arrays. This pattern occurs often when there is -more than one output variable and the single return argument is -therefore not sufficient. In Python, the convential way to return -multiple arguments is to pack them into a sequence (tuple, list, etc.) -and return the sequence. This is what the argout typemaps do. If a -wrapped function that uses these argout typemaps has more than one -return argument, they are packed into a tuple or list, depending on -the version of Python. The Python user does not pass these -arrays in, they simply get returned. For the case where a dimension -is specified, the python user must provide that dimension as an -argument. The argout signatures are - -1D: - - * ``( DATA_TYPE ARGOUT_ARRAY1[ANY] )`` - * ``( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )`` - -3D: - - * ``( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )`` - -These are typically used in situations where in C/C++, you would -allocate a(n) array(s) on the heap, and call the function to fill the -array(s) values. In Python, the arrays are allocated for you and -returned as new array objects. - -Note that we support ``DATA_TYPE*`` argout typemaps in 1D, but not 2D -or 3D. This is because of a quirk with the `SWIG`_ typemap syntax and -cannot be avoided. Note that for these types of 1D typemaps, the -Python function will take a single argument representing ``DIM1``. - -Argoutview Arrays -````````````````` - -Argoutview arrays are for when your C code provides you with a view of -its internal data and does not require any memory to be allocated by -the user. This can be dangerous. There is almost no way to guarantee -that the internal data from the C code will remain in existence for -the entire lifetime of the NumPy array that encapsulates it. If -the user destroys the object that provides the view of the data before -destroying the NumPy array, then using that array my result in bad -memory references or segmentation faults. Nevertheless, there are -situations, working with large data sets, where you simply have no -other choice. - -The C code to be wrapped for argoutview arrays are characterized by -pointers: pointers to the dimensions and double pointers to the data, -so that these values can be passed back to the user. The argoutview -typemap signatures are therefore - -1D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )`` - * ``( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )`` - * ``( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)`` - * ``( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)`` - -Note that arrays with hard-coded dimensions are not supported. These -cannot follow the double pointer signatures of these typemaps. - -Output Arrays -````````````` - -The ``numpy.i`` interface file does not support typemaps for output -arrays, for several reasons. First, C/C++ return arguments are -limited to a single value. This prevents obtaining dimension -information in a general way. Second, arrays with hard-coded lengths -are not permitted as return arguments. In other words:: - - double[3] newVector(double x, double y, double z); - -is not legal C/C++ syntax. Therefore, we cannot provide typemaps of -the form:: - - %typemap(out) (TYPE[ANY]); - -If you run into a situation where a function or method is returning a -pointer to an array, your best bet is to write your own version of the -function to be wrapped, either with ``%extend`` for the case of class -methods or ``%ignore`` and ``%rename`` for the case of functions. - -Other Common Types: bool -```````````````````````` - -Note that C++ type ``bool`` is not supported in the list in the -`Available Typemaps`_ section. NumPy bools are a single byte, while -the C++ ``bool`` is four bytes (at least on my system). Therefore:: - - %numpy_typemaps(bool, NPY_BOOL, int) - -will result in typemaps that will produce code that reference -improper data lengths. You can implement the following macro -expansion:: - - %numpy_typemaps(bool, NPY_UINT, int) - -to fix the data length problem, and `Input Arrays`_ will work fine, -but `In-Place Arrays`_ might fail type-checking. - -Other Common Types: complex -``````````````````````````` - -Typemap conversions for complex floating-point types is also not -supported automatically. This is because Python and NumPy are -written in C, which does not have native complex types. Both -Python and NumPy implement their own (essentially equivalent) -``struct`` definitions for complex variables:: - - /* Python */ - typedef struct {double real; double imag;} Py_complex; - - /* NumPy */ - typedef struct {float real, imag;} npy_cfloat; - typedef struct {double real, imag;} npy_cdouble; - -We could have implemented:: - - %numpy_typemaps(Py_complex , NPY_CDOUBLE, int) - %numpy_typemaps(npy_cfloat , NPY_CFLOAT , int) - %numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int) - -which would have provided automatic type conversions for arrays of -type ``Py_complex``, ``npy_cfloat`` and ``npy_cdouble``. However, it -seemed unlikely that there would be any independent (non-Python, -non-NumPy) application code that people would be using `SWIG`_ to -generate a Python interface to, that also used these definitions -for complex types. More likely, these application codes will define -their own complex types, or in the case of C++, use ``std::complex``. -Assuming these data structures are compatible with Python and -NumPy complex types, ``%numpy_typemap`` expansions as above (with -the user's complex type substituted for the first argument) should -work. - -NumPy Array Scalars and SWIG ----------------------------- - -`SWIG`_ has sophisticated type checking for numerical types. For -example, if your C/C++ routine expects an integer as input, the code -generated by `SWIG`_ will check for both Python integers and -Python long integers, and raise an overflow error if the provided -Python integer is too big to cast down to a C integer. With the -introduction of NumPy scalar arrays into your Python code, you -might conceivably extract an integer from a NumPy array and attempt -to pass this to a `SWIG`_-wrapped C/C++ function that expects an -``int``, but the `SWIG`_ type checking will not recognize the NumPy -array scalar as an integer. (Often, this does in fact work -- it -depends on whether NumPy recognizes the integer type you are using -as inheriting from the Python integer type on the platform you are -using. Sometimes, this means that code that works on a 32-bit machine -will fail on a 64-bit machine.) - -If you get a Python error that looks like the following:: - - TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int' - -and the argument you are passing is an integer extracted from a -NumPy array, then you have stumbled upon this problem. The -solution is to modify the `SWIG`_ type conversion system to accept -`Numpy`_ array scalars in addition to the standard integer types. -Fortunately, this capabilitiy has been provided for you. Simply copy -the file:: - - pyfragments.swg - -to the working build directory for you project, and this problem will -be fixed. It is suggested that you do this anyway, as it only -increases the capabilities of your Python interface. - -Why is There a Second File? -``````````````````````````` - -The `SWIG`_ type checking and conversion system is a complicated -combination of C macros, `SWIG`_ macros, `SWIG`_ typemaps and `SWIG`_ -fragments. Fragments are a way to conditionally insert code into your -wrapper file if it is needed, and not insert it if not needed. If -multiple typemaps require the same fragment, the fragment only gets -inserted into your wrapper code once. - -There is a fragment for converting a Python integer to a C -``long``. There is a different fragment that converts a Python -integer to a C ``int``, that calls the rountine defined in the -``long`` fragment. We can make the changes we want here by changing -the definition for the ``long`` fragment. `SWIG`_ determines the -active definition for a fragment using a "first come, first served" -system. That is, we need to define the fragment for ``long`` -conversions prior to `SWIG`_ doing it internally. `SWIG`_ allows us -to do this by putting our fragment definitions in the file -``pyfragments.swg``. If we were to put the new fragment definitions -in ``numpy.i``, they would be ignored. - -Helper Functions ----------------- - -The ``numpy.i`` file containes several macros and routines that it -uses internally to build its typemaps. However, these functions may -be useful elsewhere in your interface file. These macros and routines -are implemented as fragments, which are described briefly in the -previous section. If you try to use one or more of the following -macros or functions, but your compiler complains that it does not -recognize the symbol, then you need to force these fragments to appear -in your code using:: - - %fragment("NumPy_Fragments"); - -in your `SWIG`_ interface file. - -Macros -`````` - - **is_array(a)** - Evaluates as true if ``a`` is non-``NULL`` and can be cast to a - ``PyArrayObject*``. - - **array_type(a)** - Evaluates to the integer data type code of ``a``, assuming ``a`` can - be cast to a ``PyArrayObject*``. - - **array_numdims(a)** - Evaluates to the integer number of dimensions of ``a``, assuming - ``a`` can be cast to a ``PyArrayObject*``. - - **array_dimensions(a)** - Evaluates to an array of type ``npy_intp`` and length - ``array_numdims(a)``, giving the lengths of all of the dimensions - of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. - - **array_size(a,i)** - Evaluates to the ``i``-th dimension size of ``a``, assuming ``a`` - can be cast to a ``PyArrayObject*``. - - **array_data(a)** - Evaluates to a pointer of type ``void*`` that points to the data - buffer of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. - - **array_is_contiguous(a)** - Evaluates as true if ``a`` is a contiguous array. Equivalent to - ``(PyArray_ISCONTIGUOUS(a))``. - - **array_is_native(a)** - Evaluates as true if the data buffer of ``a`` uses native byte - order. Equivalent to ``(PyArray_ISNOTSWAPPED(a))``. - - **array_is_fortran(a)** - Evaluates as true if ``a`` is FORTRAN ordered. - -Routines -```````` - - **pytype_string()** - - Return type: ``char*`` - - Arguments: - - * ``PyObject* py_obj``, a general Python object. - - Return a string describing the type of ``py_obj``. - - - **typecode_string()** - - Return type: ``char*`` - - Arguments: - - * ``int typecode``, a NumPy integer typecode. - - Return a string describing the type corresponding to the NumPy - ``typecode``. - - **type_match()** - - Return type: ``int`` - - Arguments: - - * ``int actual_type``, the NumPy typecode of a NumPy array. - - * ``int desired_type``, the desired NumPy typecode. - - Make sure that ``actual_type`` is compatible with - ``desired_type``. For example, this allows character and - byte types, or int and long types, to match. This is now - equivalent to ``PyArray_EquivTypenums()``. - - - **obj_to_array_no_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general Python object. - - * ``int typecode``, the desired NumPy typecode. - - Cast ``input`` to a ``PyArrayObject*`` if legal, and ensure that - it is of type ``typecode``. If ``input`` cannot be cast, or the - ``typecode`` is wrong, set a Python error and return ``NULL``. - - - **obj_to_array_allow_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general Python object. - - * ``int typecode``, the desired NumPy typecode of the resulting - array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - Convert ``input`` to a NumPy array with the given ``typecode``. - On success, return a valid ``PyArrayObject*`` with the correct - type. On failure, the Python error string will be set and the - routine returns ``NULL``. - - - **make_contiguous()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyArrayObject* ary``, a NumPy array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - * ``int min_dims``, minimum allowable dimensions. - - * ``int max_dims``, maximum allowable dimensions. - - Check to see if ``ary`` is contiguous. If so, return the input - pointer and flag it as not a new object. If it is not contiguous, - create a new ``PyArrayObject*`` using the original data, flag it - as a new object and return the pointer. - - - **obj_to_array_contiguous_allow_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general Python object. - - * ``int typecode``, the desired NumPy typecode of the resulting - array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - Convert ``input`` to a contiguous ``PyArrayObject*`` of the - specified type. If the input object is not a contiguous - ``PyArrayObject*``, a new one will be created and the new object - flag will be set. - - - **require_contiguous()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a NumPy array. - - Test whether ``ary`` is contiguous. If so, return 1. Otherwise, - set a Python error and return 0. - - - **require_native()** - - Return type: ``int`` - - Arguments: - - * ``PyArray_Object* ary``, a NumPy array. - - Require that ``ary`` is not byte-swapped. If the array is not - byte-swapped, return 1. Otherwise, set a Python error and - return 0. - - **require_dimensions()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a NumPy array. - - * ``int exact_dimensions``, the desired number of dimensions. - - Require ``ary`` to have a specified number of dimensions. If the - array has the specified number of dimensions, return 1. - Otherwise, set a Python error and return 0. - - - **require_dimensions_n()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a NumPy array. - - * ``int* exact_dimensions``, an array of integers representing - acceptable numbers of dimensions. - - * ``int n``, the length of ``exact_dimensions``. - - Require ``ary`` to have one of a list of specified number of - dimensions. If the array has one of the specified number of - dimensions, return 1. Otherwise, set the Python error string - and return 0. - - - **require_size()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a NumPy array. - - * ``npy_int* size``, an array representing the desired lengths of - each dimension. - - * ``int n``, the length of ``size``. - - Require ``ary`` to have a specified shape. If the array has the - specified shape, return 1. Otherwise, set the Python error - string and return 0. - - - **require_fortran()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a NumPy array. - - Require the given ``PyArrayObject`` to to be FORTRAN ordered. If - the the ``PyArrayObject`` is already FORTRAN ordered, do nothing. - Else, set the FORTRAN ordering flag and recompute the strides. - - -Beyond the Provided Typemaps ----------------------------- - -There are many C or C++ array/NumPy array situations not covered by -a simple ``%include "numpy.i"`` and subsequent ``%apply`` directives. - -A Common Example -```````````````` - -Consider a reasonable prototype for a dot product function:: - - double dot(int len, double* vec1, double* vec2); - -The Python interface that we want is:: - - def dot(vec1, vec2): - """ - dot(PyObject,PyObject) -> double - """ - -The problem here is that there is one dimension argument and two array -arguments, and our typemaps are set up for dimensions that apply to a -single array (in fact, `SWIG`_ does not provide a mechanism for -associating ``len`` with ``vec2`` that takes two Python input -arguments). The recommended solution is the following:: - - %apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1), - (int len2, double* vec2)} - %rename (dot) my_dot; - %exception my_dot { - $action - if (PyErr_Occurred()) SWIG_fail; - } - %inline %{ - double my_dot(int len1, double* vec1, int len2, double* vec2) { - if (len1 != len2) { - PyErr_Format(PyExc_ValueError, - "Arrays of lengths (%d,%d) given", - len1, len2); - return 0.0; - } - return dot(len1, vec1, vec2); - } - %} - -If the header file that contains the prototype for ``double dot()`` -also contains other prototypes that you want to wrap, so that you need -to ``%include`` this header file, then you will also need a ``%ignore -dot;`` directive, placed after the ``%rename`` and before the -``%include`` directives. Or, if the function in question is a class -method, you will want to use ``%extend`` rather than ``%inline`` in -addition to ``%ignore``. - -**A note on error handling:** Note that ``my_dot`` returns a -``double`` but that it can also raise a Python error. The -resulting wrapper function will return a Python float -representation of 0.0 when the vector lengths do not match. Since -this is not ``NULL``, the Python interpreter will not know to check -for an error. For this reason, we add the ``%exception`` directive -above for ``my_dot`` to get the behavior we want (note that -``$action`` is a macro that gets expanded to a valid call to -``my_dot``). In general, you will probably want to write a `SWIG`_ -macro to perform this task. - -Other Situations -```````````````` - -There are other wrapping situations in which ``numpy.i`` may be -helpful when you encounter them. - - * In some situations, it is possible that you could use the - ``%numpy_templates`` macro to implement typemaps for your own - types. See the `Other Common Types: bool`_ or `Other Common - Types: complex`_ sections for examples. Another situation is if - your dimensions are of a type other than ``int`` (say ``long`` for - example):: - - %numpy_typemaps(double, NPY_DOUBLE, long) - - * You can use the code in ``numpy.i`` to write your own typemaps. - For example, if you had a four-dimensional array as a function - argument, you could cut-and-paste the appropriate - three-dimensional typemaps into your interface file. The - modifications for the fourth dimension would be trivial. - - * Sometimes, the best approach is to use the ``%extend`` directive - to define new methods for your classes (or overload existing ones) - that take a ``PyObject*`` (that either is or can be converted to a - ``PyArrayObject*``) instead of a pointer to a buffer. In this - case, the helper routines in ``numpy.i`` can be very useful. - - * Writing typemaps can be a bit nonintuitive. If you have specific - questions about writing `SWIG`_ typemaps for NumPy, the - developers of ``numpy.i`` do monitor the - `Numpy-discussion `_ and - `Swig-user `_ mail lists. - -A Final Note -```````````` - -When you use the ``%apply`` directive, as is usually necessary to use -``numpy.i``, it will remain in effect until you tell `SWIG`_ that it -shouldn't be. If the arguments to the functions or methods that you -are wrapping have common names, such as ``length`` or ``vector``, -these typemaps may get applied in situations you do not expect or -want. Therefore, it is always a good idea to add a ``%clear`` -directive after you are done with a specific typemap:: - - %apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)} - %include "my_header.h" - %clear (double* vector, int length); - -In general, you should target these typemap signatures specifically -where you want them, and then clear them after you are done. - -Summary -------- - -Out of the box, ``numpy.i`` provides typemaps that support conversion -between NumPy arrays and C arrays: - - * That can be one of 12 different scalar types: ``signed char``, - ``unsigned char``, ``short``, ``unsigned short``, ``int``, - ``unsigned int``, ``long``, ``unsigned long``, ``long long``, - ``unsigned long long``, ``float`` and ``double``. - - * That support 41 different argument signatures for each data type, - including: - - + One-dimensional, two-dimensional and three-dimensional arrays. - - + Input-only, in-place, argout and argoutview behavior. - - + Hard-coded dimensions, data-buffer-then-dimensions - specification, and dimensions-then-data-buffer specification. - - + Both C-ordering ("last dimension fastest") or FORTRAN-ordering - ("first dimension fastest") support for 2D and 3D arrays. - -The ``numpy.i`` interface file also provides additional tools for -wrapper developers, including: - - * A `SWIG`_ macro (``%numpy_typemaps``) with three arguments for - implementing the 41 argument signatures for the user's choice of - (1) C data type, (2) NumPy data type (assuming they match), and - (3) dimension type. - - * Nine C macros and 13 C functions that can be used to write - specialized typemaps, extensions, or inlined functions that handle - cases not covered by the provided typemaps. - diff --git a/numpy-1.6.2/doc/source/reference/swig.rst b/numpy-1.6.2/doc/source/reference/swig.rst deleted file mode 100644 index 3931b8e110..0000000000 --- a/numpy-1.6.2/doc/source/reference/swig.rst +++ /dev/null @@ -1,12 +0,0 @@ -************** -Numpy and SWIG -************** - -.. sectionauthor:: Bill Spotz - - -.. toctree:: - :maxdepth: 2 - - swig.interface-file - swig.testing diff --git a/numpy-1.6.2/doc/source/reference/swig.testing.rst b/numpy-1.6.2/doc/source/reference/swig.testing.rst deleted file mode 100644 index 4aad6bff79..0000000000 --- a/numpy-1.6.2/doc/source/reference/swig.testing.rst +++ /dev/null @@ -1,164 +0,0 @@ -Testing the numpy.i Typemaps -============================ - -Introduction ------------- - -Writing tests for the ``numpy.i`` `SWIG `_ -interface file is a combinatorial headache. At present, 12 different -data types are supported, each with 23 different argument signatures, -for a total of 276 typemaps supported "out of the box". Each of these -typemaps, in turn, might require several unit tests in order to verify -expected behavior for both proper and improper inputs. Currently, -this results in 1,020 individual unit tests that are performed when -``make test`` is run in the ``numpy/docs/swig`` subdirectory. - -To facilitate this many similar unit tests, some high-level -programming techniques are employed, including C and `SWIG`_ macros, -as well as Python inheritance. The purpose of this document is to describe -the testing infrastructure employed to verify that the ``numpy.i`` -typemaps are working as expected. - -Testing Organization --------------------- - -There are three indepedent testing frameworks supported, for one-, -two-, and three-dimensional arrays respectively. For one-dimensional -arrays, there are two C++ files, a header and a source, named:: - - Vector.h - Vector.cxx - -that contain prototypes and code for a variety of functions that have -one-dimensional arrays as function arguments. The file:: - - Vector.i - -is a `SWIG`_ interface file that defines a python module ``Vector`` -that wraps the functions in ``Vector.h`` while utilizing the typemaps -in ``numpy.i`` to correctly handle the C arrays. - -The ``Makefile`` calls ``swig`` to generate ``Vector.py`` and -``Vector_wrap.cxx``, and also executes the ``setup.py`` script that -compiles ``Vector_wrap.cxx`` and links together the extension module -``_Vector.so`` or ``_Vector.dylib``, depending on the platform. This -extension module and the proxy file ``Vector.py`` are both placed in a -subdirectory under the ``build`` directory. - -The actual testing takes place with a Python script named:: - - testVector.py - -that uses the standard Python library module ``unittest``, which -performs several tests of each function defined in ``Vector.h`` for -each data type supported. - -Two-dimensional arrays are tested in exactly the same manner. The -above description applies, but with ``Matrix`` substituted for -``Vector``. For three-dimensional tests, substitute ``Tensor`` for -``Vector``. For the descriptions that follow, we will reference the -``Vector`` tests, but the same information applies to ``Matrix`` and -``Tensor`` tests. - -The command ``make test`` will ensure that all of the test software is -built and then run all three test scripts. - -Testing Header Files --------------------- - -``Vector.h`` is a C++ header file that defines a C macro called -``TEST_FUNC_PROTOS`` that takes two arguments: ``TYPE``, which is a -data type name such as ``unsigned int``; and ``SNAME``, which is a -short name for the same data type with no spaces, e.g. ``uint``. This -macro defines several function prototypes that have the prefix -``SNAME`` and have at least one argument that is an array of type -``TYPE``. Those functions that have return arguments return a -``TYPE`` value. - -``TEST_FUNC_PROTOS`` is then implemented for all of the data types -supported by ``numpy.i``: - - * ``signed char`` - * ``unsigned char`` - * ``short`` - * ``unsigned short`` - * ``int`` - * ``unsigned int`` - * ``long`` - * ``unsigned long`` - * ``long long`` - * ``unsigned long long`` - * ``float`` - * ``double`` - -Testing Source Files --------------------- - -``Vector.cxx`` is a C++ source file that implements compilable code -for each of the function prototypes specified in ``Vector.h``. It -defines a C macro ``TEST_FUNCS`` that has the same arguments and works -in the same way as ``TEST_FUNC_PROTOS`` does in ``Vector.h``. -``TEST_FUNCS`` is implemented for each of the 12 data types as above. - -Testing SWIG Interface Files ----------------------------- - -``Vector.i`` is a `SWIG`_ interface file that defines python module -``Vector``. It follows the conventions for using ``numpy.i`` as -described in this chapter. It defines a `SWIG`_ macro -``%apply_numpy_typemaps`` that has a single argument ``TYPE``. -It uses the `SWIG`_ directive ``%apply`` to apply the provided -typemaps to the argument signatures found in ``Vector.h``. This macro -is then implemented for all of the data types supported by -``numpy.i``. It then does a ``%include "Vector.h"`` to wrap all of -the function prototypes in ``Vector.h`` using the typemaps in -``numpy.i``. - -Testing Python Scripts ----------------------- - -After ``make`` is used to build the testing extension modules, -``testVector.py`` can be run to execute the tests. As with other -scripts that use ``unittest`` to facilitate unit testing, -``testVector.py`` defines a class that inherits from -``unittest.TestCase``:: - - class VectorTestCase(unittest.TestCase): - -However, this class is not run directly. Rather, it serves as a base -class to several other python classes, each one specific to a -particular data type. The ``VectorTestCase`` class stores two strings -for typing information: - - **self.typeStr** - A string that matches one of the ``SNAME`` prefixes used in - ``Vector.h`` and ``Vector.cxx``. For example, ``"double"``. - - **self.typeCode** - A short (typically single-character) string that represents a - data type in numpy and corresponds to ``self.typeStr``. For - example, if ``self.typeStr`` is ``"double"``, then - ``self.typeCode`` should be ``"d"``. - -Each test defined by the ``VectorTestCase`` class extracts the python -function it is trying to test by accessing the ``Vector`` module's -dictionary:: - - length = Vector.__dict__[self.typeStr + "Length"] - -In the case of double precision tests, this will return the python -function ``Vector.doubleLength``. - -We then define a new test case class for each supported data type with -a short definition such as:: - - class doubleTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -Each of these 12 classes is collected into a ``unittest.TestSuite``, -which is then executed. Errors and failures are summed together and -returned as the exit argument. Any non-zero result indicates that at -least one test did not pass. diff --git a/numpy-1.6.2/doc/source/reference/ufuncs.rst b/numpy-1.6.2/doc/source/reference/ufuncs.rst deleted file mode 100644 index 0e7da347ea..0000000000 --- a/numpy-1.6.2/doc/source/reference/ufuncs.rst +++ /dev/null @@ -1,617 +0,0 @@ -.. sectionauthor:: adapted from "Guide to Numpy" by Travis E. Oliphant - -.. _ufuncs: - -************************************ -Universal functions (:class:`ufunc`) -************************************ - -.. note: XXX: section might need to be made more reference-guideish... - -.. currentmodule:: numpy - -.. index: ufunc, universal function, arithmetic, operation - -A universal function (or :term:`ufunc` for short) is a function that -operates on :class:`ndarrays ` in an element-by-element fashion, -supporting :ref:`array broadcasting `, :ref:`type -casting `, and several other standard features. That -is, a ufunc is a ":term:`vectorized`" wrapper for a function that -takes a fixed number of scalar inputs and produces a fixed number of -scalar outputs. - -In Numpy, universal functions are instances of the -:class:`numpy.ufunc` class. Many of the built-in functions are -implemented in compiled C code, but :class:`ufunc` instances can also -be produced using the :func:`frompyfunc` factory function. - - -.. _ufuncs.broadcasting: - -Broadcasting -============ - -.. index:: broadcasting - -Each universal function takes array inputs and produces array outputs -by performing the core function element-wise on the inputs. Standard -broadcasting rules are applied so that inputs not sharing exactly the -same shapes can still be usefully operated on. Broadcasting can be -understood by four rules: - -1. All input arrays with :attr:`ndim ` smaller than the - input array of largest :attr:`ndim `, have 1's - prepended to their shapes. - -2. The size in each dimension of the output shape is the maximum of all - the input sizes in that dimension. - -3. An input can be used in the calculation if its size in a particular - dimension either matches the output size in that dimension, or has - value exactly 1. - -4. If an input has a dimension size of 1 in its shape, the first data - entry in that dimension will be used for all calculations along - that dimension. In other words, the stepping machinery of the - :term:`ufunc` will simply not step along that dimension (the - :term:`stride` will be 0 for that dimension). - -Broadcasting is used throughout NumPy to decide how to handle -disparately shaped arrays; for example, all arithmetic operations (``+``, -``-``, ``*``, ...) between :class:`ndarrays ` broadcast the -arrays before operation. - -.. _arrays.broadcasting.broadcastable: - -.. index:: broadcastable - -A set of arrays is called ":term:`broadcastable`" to the same shape if -the above rules produce a valid result, *i.e.*, one of the following -is true: - -1. The arrays all have exactly the same shape. - -2. The arrays all have the same number of dimensions and the length of - each dimensions is either a common length or 1. - -3. The arrays that have too few dimensions can have their shapes prepended - with a dimension of length 1 to satisfy property 2. - -.. admonition:: Example - - If ``a.shape`` is (5,1), ``b.shape`` is (1,6), ``c.shape`` is (6,) - and ``d.shape`` is () so that *d* is a scalar, then *a*, *b*, *c*, - and *d* are all broadcastable to dimension (5,6); and - - - *a* acts like a (5,6) array where ``a[:,0]`` is broadcast to the other - columns, - - - *b* acts like a (5,6) array where ``b[0,:]`` is broadcast - to the other rows, - - - *c* acts like a (1,6) array and therefore like a (5,6) array - where ``c[:]`` is broadcast to every row, and finally, - - - *d* acts like a (5,6) array where the single value is repeated. - - -.. _ufuncs.output-type: - -Output type determination -========================= - -The output of the ufunc (and its methods) is not necessarily an -:class:`ndarray`, if all input arguments are not :class:`ndarrays `. - -All output arrays will be passed to the :obj:`__array_prepare__` and -:obj:`__array_wrap__` methods of the input (besides -:class:`ndarrays `, and scalars) that defines it **and** has -the highest :obj:`__array_priority__` of any other input to the -universal function. The default :obj:`__array_priority__` of the -ndarray is 0.0, and the default :obj:`__array_priority__` of a subtype -is 1.0. Matrices have :obj:`__array_priority__` equal to 10.0. - -All ufuncs can also take output arguments. If necessary, output will -be cast to the data-type(s) of the provided output array(s). If a class -with an :obj:`__array__` method is used for the output, results will be -written to the object returned by :obj:`__array__`. Then, if the class -also has an :obj:`__array_prepare__` method, it is called so metadata -may be determined based on the context of the ufunc (the context -consisting of the ufunc itself, the arguments passed to the ufunc, and -the ufunc domain.) The array object returned by -:obj:`__array_prepare__` is passed to the ufunc for computation. -Finally, if the class also has an :obj:`__array_wrap__` method, the returned -:class:`ndarray` result will be passed to that method just before -passing control back to the caller. - -Use of internal buffers -======================= - -.. index:: buffers - -Internally, buffers are used for misaligned data, swapped data, and -data that has to be converted from one data type to another. The size -of internal buffers is settable on a per-thread basis. There can -be up to :math:`2 (n_{\mathrm{inputs}} + n_{\mathrm{outputs}})` -buffers of the specified size created to handle the data from all the -inputs and outputs of a ufunc. The default size of a buffer is -10,000 elements. Whenever buffer-based calculation would be needed, -but all input arrays are smaller than the buffer size, those -misbehaved or incorrectly-typed arrays will be copied before the -calculation proceeds. Adjusting the size of the buffer may therefore -alter the speed at which ufunc calculations of various sorts are -completed. A simple interface for setting this variable is accessible -using the function - -.. autosummary:: - :toctree: generated/ - - setbufsize - - -Error handling -============== - -.. index:: error handling - -Universal functions can trip special floating-point status registers -in your hardware (such as divide-by-zero). If available on your -platform, these registers will be regularly checked during -calculation. Error handling is controlled on a per-thread basis, -and can be configured using the functions - -.. autosummary:: - :toctree: generated/ - - seterr - seterrcall - -.. _ufuncs.casting: - -Casting Rules -============= - -.. index:: - pair: ufunc; casting rules - -.. note:: - - In NumPy 1.6.0, a type promotion API was created to encapsulate the - mechansim for determining output types. See the functions - :func:`result_type`, :func:`promote_types`, and - :func:`min_scalar_type` for more details. - -At the core of every ufunc is a one-dimensional strided loop that -implements the actual function for a specific type combination. When a -ufunc is created, it is given a static list of inner loops and a -corresponding list of type signatures over which the ufunc operates. -The ufunc machinery uses this list to determine which inner loop to -use for a particular case. You can inspect the :attr:`.types -` attribute for a particular ufunc to see which type -combinations have a defined inner loop and which output type they -produce (:ref:`character codes ` are used -in said output for brevity). - -Casting must be done on one or more of the inputs whenever the ufunc -does not have a core loop implementation for the input types provided. -If an implementation for the input types cannot be found, then the -algorithm searches for an implementation with a type signature to -which all of the inputs can be cast "safely." The first one it finds -in its internal list of loops is selected and performed, after all -necessary type casting. Recall that internal copies during ufuncs (even -for casting) are limited to the size of an internal buffer (which is user -settable). - -.. note:: - - Universal functions in NumPy are flexible enough to have mixed type - signatures. Thus, for example, a universal function could be defined - that works with floating-point and integer values. See :func:`ldexp` - for an example. - -By the above description, the casting rules are essentially -implemented by the question of when a data type can be cast "safely" -to another data type. The answer to this question can be determined in -Python with a function call: :func:`can_cast(fromtype, totype) -`. The Figure below shows the results of this call for -the 24 internally supported types on the author's 64-bit system. You -can generate this table for your system with the code given in the Figure. - -.. admonition:: Figure - - Code segment showing the "can cast safely" table for a 32-bit system. - - >>> def print_table(ntypes): - ... print 'X', - ... for char in ntypes: print char, - ... print - ... for row in ntypes: - ... print row, - ... for col in ntypes: - ... print int(np.can_cast(row, col)), - ... print - >>> print_table(np.typecodes['All']) - X ? b h i l q p B H I L Q P e f d g F D G S U V O M m - ? 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - b 0 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 - h 0 0 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0 - i 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - l 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - q 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - p 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - B 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 - H 0 0 0 1 1 1 1 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 0 - I 0 0 0 0 1 1 1 0 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0 - L 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0 - Q 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0 - P 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0 - e 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 - f 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0 - d 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - g 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 1 1 1 0 0 - F 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0 - D 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 - G 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 - S 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 - U 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 - V 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 - O 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 - M 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 - m 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 - - -You should note that, while included in the table for completeness, -the 'S', 'U', and 'V' types cannot be operated on by ufuncs. Also, -note that on a 32-bit system the integer types may have different -sizes, resulting in a slightly altered table. - -Mixed scalar-array operations use a different set of casting rules -that ensure that a scalar cannot "upcast" an array unless the scalar is -of a fundamentally different kind of data (*i.e.*, under a different -hierarchy in the data-type hierarchy) than the array. This rule -enables you to use scalar constants in your code (which, as Python -types, are interpreted accordingly in ufuncs) without worrying about -whether the precision of the scalar constant will cause upcasting on -your large (small precision) array. - -:class:`ufunc` -============== - -Optional keyword arguments --------------------------- - -All ufuncs take optional keyword arguments. Most of these represent -advanced usage and will not typically be used. - -.. index:: - pair: ufunc; keyword arguments - -*out* - - .. versionadded:: 1.6 - - The first output can provided as either a positional or a keyword parameter. - -*casting* - - .. versionadded:: 1.6 - - Provides a policy for what kind of casting is permitted. For compatibility - with previous versions of NumPy, this defaults to 'unsafe'. May be 'no', - 'equiv', 'safe', 'same_kind', or 'unsafe'. See :func:`can_cast` for - explanations of the parameter values. - -*order* - - .. versionadded:: 1.6 - - Specifies the calculation iteration order/memory layout of the output array. - Defaults to 'K'. 'C' means the output should be C-contiguous, 'F' means - F-contiguous, 'A' means F-contiguous if the inputs are F-contiguous, C-contiguous - otherwise, and 'K' means to match the element ordering of the inputs - as closely as possible. - -*dtype* - - .. versionadded:: 1.6 - - Overrides the dtype of the calculation and output arrays. Similar to *sig*. - -*subok* - - .. versionadded:: 1.6 - - Defaults to true. If set to false, the output will always be a strict - array, not a subtype. - -*sig* - - Either a data-type, a tuple of data-types, or a special signature - string indicating the input and output types of a ufunc. This argument - allows you to provide a specific signature for the 1-d loop to use - in the underlying calculation. If the loop specified does not exist - for the ufunc, then a TypeError is raised. Normally, a suitable loop is - found automatically by comparing the input types with what is - available and searching for a loop with data-types to which all inputs - can be cast safely. This keyword argument lets you bypass that - search and choose a particular loop. A list of available signatures is - provided by the **types** attribute of the ufunc object. - -*extobj* - - a list of length 1, 2, or 3 specifying the ufunc buffer-size, the - error mode integer, and the error call-back function. Normally, these - values are looked up in a thread-specific dictionary. Passing them - here circumvents that look up and uses the low-level specification - provided for the error mode. This may be useful, for example, as an - optimization for calculations requiring many ufunc calls on small arrays - in a loop. - - - -Attributes ----------- - -There are some informational attributes that universal functions -possess. None of the attributes can be set. - -.. index:: - pair: ufunc; attributes - - -============ ================================================================= -**__doc__** A docstring for each ufunc. The first part of the docstring is - dynamically generated from the number of outputs, the name, and - the number of inputs. The second part of the docstring is - provided at creation time and stored with the ufunc. - -**__name__** The name of the ufunc. -============ ================================================================= - -.. autosummary:: - :toctree: generated/ - - ufunc.nin - ufunc.nout - ufunc.nargs - ufunc.ntypes - ufunc.types - ufunc.identity - -Methods -------- - -All ufuncs have four methods. However, these methods only make sense on -ufuncs that take two input arguments and return one output argument. -Attempting to call these methods on other ufuncs will cause a -:exc:`ValueError`. The reduce-like methods all take an *axis* keyword -and a *dtype* keyword, and the arrays must all have dimension >= 1. -The *axis* keyword specifies the axis of the array over which the reduction -will take place and may be negative, but must be an integer. The -*dtype* keyword allows you to manage a very common problem that arises -when naively using :ref:`{op}.reduce `. Sometimes you may -have an array of a certain data type and wish to add up all of its -elements, but the result does not fit into the data type of the -array. This commonly happens if you have an array of single-byte -integers. The *dtype* keyword allows you to alter the data type over which -the reduction takes place (and therefore the type of the output). Thus, -you can ensure that the output is a data type with precision large enough -to handle your output. The responsibility of altering the reduce type is -mostly up to you. There is one exception: if no *dtype* is given for a -reduction on the "add" or "multiply" operations, then if the input type is -an integer (or Boolean) data-type and smaller than the size of the -:class:`int_` data type, it will be internally upcast to the :class:`int_` -(or :class:`uint`) data-type. - -.. index:: - pair: ufunc; methods - -.. autosummary:: - :toctree: generated/ - - ufunc.reduce - ufunc.accumulate - ufunc.reduceat - ufunc.outer - - -.. warning:: - - A reduce-like operation on an array with a data-type that has a - range "too small" to handle the result will silently wrap. One - should use `dtype` to increase the size of the data-type over which - reduction takes place. - - -Available ufuncs -================ - -There are currently more than 60 universal functions defined in -:mod:`numpy` on one or more types, covering a wide variety of -operations. Some of these ufuncs are called automatically on arrays -when the relevant infix notation is used (*e.g.*, :func:`add(a, b) ` -is called internally when ``a + b`` is written and *a* or *b* is an -:class:`ndarray`). Nevertheless, you may still want to use the ufunc -call in order to use the optional output argument(s) to place the -output(s) in an object (or objects) of your choice. - -Recall that each ufunc operates element-by-element. Therefore, each -ufunc will be described as if acting on a set of scalar inputs to -return a set of scalar outputs. - -.. note:: - - The ufunc still returns its output(s) even if you use the optional - output argument(s). - -Math operations ---------------- - -.. autosummary:: - - add - subtract - multiply - divide - logaddexp - logaddexp2 - true_divide - floor_divide - negative - power - remainder - mod - fmod - absolute - rint - sign - conj - exp - exp2 - log - log2 - log10 - expm1 - log1p - sqrt - square - reciprocal - ones_like - -.. tip:: - - The optional output arguments can be used to help you save memory - for large calculations. If your arrays are large, complicated - expressions can take longer than absolutely necessary due to the - creation and (later) destruction of temporary calculation - spaces. For example, the expression ``G = a * b + c`` is equivalent to - ``t1 = A * B; G = T1 + C; del t1``. It will be more quickly executed - as ``G = A * B; add(G, C, G)`` which is the same as - ``G = A * B; G += C``. - - -Trigonometric functions ------------------------ -All trigonometric functions use radians when an angle is called for. -The ratio of degrees to radians is :math:`180^{\circ}/\pi.` - -.. autosummary:: - - sin - cos - tan - arcsin - arccos - arctan - arctan2 - hypot - sinh - cosh - tanh - arcsinh - arccosh - arctanh - deg2rad - rad2deg - -Bit-twiddling functions ------------------------ - -These function all require integer arguments and they manipulate the -bit-pattern of those arguments. - -.. autosummary:: - - bitwise_and - bitwise_or - bitwise_xor - invert - left_shift - right_shift - -Comparison functions --------------------- - -.. autosummary:: - - greater - greater_equal - less - less_equal - not_equal - equal - -.. warning:: - - Do not use the Python keywords ``and`` and ``or`` to combine - logical array expressions. These keywords will test the truth - value of the entire array (not element-by-element as you might - expect). Use the bitwise operators & and \| instead. - -.. autosummary:: - - logical_and - logical_or - logical_xor - logical_not - -.. warning:: - - The bit-wise operators & and \| are the proper way to perform - element-by-element array comparisons. Be sure you understand the - operator precedence: ``(a > 2) & (a < 5)`` is the proper syntax because - ``a > 2 & a < 5`` will result in an error due to the fact that ``2 & a`` - is evaluated first. - -.. autosummary:: - - maximum - -.. tip:: - - The Python function ``max()`` will find the maximum over a one-dimensional - array, but it will do so using a slower sequence interface. The reduce - method of the maximum ufunc is much faster. Also, the ``max()`` method - will not give answers you might expect for arrays with greater than - one dimension. The reduce method of minimum also allows you to compute - a total minimum over an array. - -.. autosummary:: - - minimum - -.. warning:: - - the behavior of ``maximum(a, b)`` is different than that of ``max(a, b)``. - As a ufunc, ``maximum(a, b)`` performs an element-by-element comparison - of `a` and `b` and chooses each element of the result according to which - element in the two arrays is larger. In contrast, ``max(a, b)`` treats - the objects `a` and `b` as a whole, looks at the (total) truth value of - ``a > b`` and uses it to return either `a` or `b` (as a whole). A similar - difference exists between ``minimum(a, b)`` and ``min(a, b)``. - - -Floating functions ------------------- - -Recall that all of these functions work element-by-element over an -array, returning an array output. The description details only a -single operation. - -.. autosummary:: - - isreal - iscomplex - isfinite - isinf - isnan - signbit - copysign - nextafter - modf - ldexp - frexp - fmod - floor - ceil - trunc diff --git a/numpy-1.6.2/doc/source/release.rst b/numpy-1.6.2/doc/source/release.rst deleted file mode 100644 index ce50cf2901..0000000000 --- a/numpy-1.6.2/doc/source/release.rst +++ /dev/null @@ -1,5 +0,0 @@ -************* -Release Notes -************* - -.. include:: ../release/1.3.0-notes.rst diff --git a/numpy-1.6.2/doc/source/scipyshiny_small.png b/numpy-1.6.2/doc/source/scipyshiny_small.png deleted file mode 100644 index 7ef81a9e8f..0000000000 Binary files a/numpy-1.6.2/doc/source/scipyshiny_small.png and /dev/null differ diff --git a/numpy-1.6.2/doc/source/user/basics.broadcasting.rst b/numpy-1.6.2/doc/source/user/basics.broadcasting.rst deleted file mode 100644 index 65584b1fd3..0000000000 --- a/numpy-1.6.2/doc/source/user/basics.broadcasting.rst +++ /dev/null @@ -1,7 +0,0 @@ -************ -Broadcasting -************ - -.. seealso:: :class:`numpy.broadcast` - -.. automodule:: numpy.doc.broadcasting diff --git a/numpy-1.6.2/doc/source/user/basics.byteswapping.rst b/numpy-1.6.2/doc/source/user/basics.byteswapping.rst deleted file mode 100644 index 4b1008df3a..0000000000 --- a/numpy-1.6.2/doc/source/user/basics.byteswapping.rst +++ /dev/null @@ -1,5 +0,0 @@ -************* -Byte-swapping -************* - -.. automodule:: numpy.doc.byteswapping diff --git a/numpy-1.6.2/doc/source/user/basics.creation.rst b/numpy-1.6.2/doc/source/user/basics.creation.rst deleted file mode 100644 index b3fa810177..0000000000 --- a/numpy-1.6.2/doc/source/user/basics.creation.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _arrays.creation: - -************** -Array creation -************** - -.. seealso:: :ref:`Array creation routines ` - -.. automodule:: numpy.doc.creation diff --git a/numpy-1.6.2/doc/source/user/basics.indexing.rst b/numpy-1.6.2/doc/source/user/basics.indexing.rst deleted file mode 100644 index 8844adcae6..0000000000 --- a/numpy-1.6.2/doc/source/user/basics.indexing.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _basics.indexing: - -******** -Indexing -******** - -.. seealso:: :ref:`Indexing routines ` - -.. automodule:: numpy.doc.indexing diff --git a/numpy-1.6.2/doc/source/user/basics.io.genfromtxt.rst b/numpy-1.6.2/doc/source/user/basics.io.genfromtxt.rst deleted file mode 100644 index 814ba520a2..0000000000 --- a/numpy-1.6.2/doc/source/user/basics.io.genfromtxt.rst +++ /dev/null @@ -1,444 +0,0 @@ -.. sectionauthor:: Pierre Gerard-Marchant - -********************************************* -Importing data with :func:`~numpy.genfromtxt` -********************************************* - -Numpy provides several functions to create arrays from tabular data. -We focus here on the :func:`~numpy.genfromtxt` function. - -In a nutshell, :func:`~numpy.genfromtxt` runs two main loops. -The first loop converts each line of the file in a sequence of strings. -The second loop converts each string to the appropriate data type. -This mechanism is slower than a single loop, but gives more flexibility. -In particular, :func:`~numpy.genfromtxt` is able to take missing data into account, when other faster and simpler functions like :func:`~numpy.loadtxt` cannot - - -.. note:: - When giving examples, we will use the following conventions - - >>> import numpy as np - >>> from StringIO import StringIO - - - -Defining the input -================== - -The only mandatory argument of :func:`~numpy.genfromtxt` is the source of the data. -It can be a string corresponding to the name of a local or remote file, or a file-like object with a :meth:`read` method (such as an actual file or a :class:`StringIO.StringIO` object). -If the argument is the URL of a remote file, this latter is automatically downloaded in the current directory. - -The input file can be a text file or an archive. -Currently, the function recognizes :class:`gzip` and :class:`bz2` (`bzip2`) archives. -The type of the archive is determined by examining the extension of the file: -if the filename ends with ``'.gz'``, a :class:`gzip` archive is expected; if it ends with ``'bz2'``, a :class:`bzip2` archive is assumed. - - - -Splitting the lines into columns -================================ - -The :keyword:`delimiter` argument ---------------------------------- - -Once the file is defined and open for reading, :func:`~numpy.genfromtxt` splits each non-empty line into a sequence of strings. -Empty or commented lines are just skipped. -The :keyword:`delimiter` keyword is used to define how the splitting should take place. - -Quite often, a single character marks the separation between columns. -For example, comma-separated files (CSV) use a comma (``,``) or a semicolon (``;``) as delimiter. - - >>> data = "1, 2, 3\n4, 5, 6" - >>> np.genfromtxt(StringIO(data), delimiter=",") - array([[ 1., 2., 3.], - [ 4., 5., 6.]]) - -Another common separator is ``"\t"``, the tabulation character. -However, we are not limited to a single character, any string will do. -By default, :func:`~numpy.genfromtxt` assumes ``delimiter=None``, meaning that the line is split along white spaces (including tabs) and that consecutive white spaces are considered as a single white space. - -Alternatively, we may be dealing with a fixed-width file, where columns are defined as a given number of characters. -In that case, we need to set :keyword:`delimiter` to a single integer (if all the columns have the same size) or to a sequence of integers (if columns can have different sizes). - - >>> data = " 1 2 3\n 4 5 67\n890123 4" - >>> np.genfromtxt(StringIO(data), delimiter=3) - array([[ 1., 2., 3.], - [ 4., 5., 67.], - [ 890., 123., 4.]]) - >>> data = "123456789\n 4 7 9\n 4567 9" - >>> np.genfromtxt(StringIO(data), delimiter=(4, 3, 2)) - array([[ 1234., 567., 89.], - [ 4., 7., 9.], - [ 4., 567., 9.]]) - - -The :keyword:`autostrip` argument ---------------------------------- - -By default, when a line is decomposed into a series of strings, the individual entries are not stripped of leading nor trailing white spaces. -This behavior can be overwritten by setting the optional argument :keyword:`autostrip` to a value of ``True``. - - >>> data = "1, abc , 2\n 3, xxx, 4" - >>> # Without autostrip - >>> np.genfromtxt(StringIO(data), dtype="|S5") - array([['1', ' abc ', ' 2'], - ['3', ' xxx', ' 4']], - dtype='|S5') - >>> # With autostrip - >>> np.genfromtxt(StringIO(data), dtype="|S5", autostrip=True) - array([['1', 'abc', '2'], - ['3', 'xxx', '4']], - dtype='|S5') - - -The :keyword:`comments` argument --------------------------------- - -The optional argument :keyword:`comments` is used to define a character string that marks the beginning of a comment. -By default, :func:`~numpy.genfromtxt` assumes ``comments='#'``. -The comment marker may occur anywhere on the line. -Any character present after the comment marker(s) is simply ignored. - - >>> data = """# - ... # Skip me ! - ... # Skip me too ! - ... 1, 2 - ... 3, 4 - ... 5, 6 #This is the third line of the data - ... 7, 8 - ... # And here comes the last line - ... 9, 0 - ... """ - >>> np.genfromtxt(StringIO(data), comments="#", delimiter=",") - [[ 1. 2.] - [ 3. 4.] - [ 5. 6.] - [ 7. 8.] - [ 9. 0.]] - -.. note:: - There is one notable exception to this behavior: if the optional argument ``names=True``, the first commented line will be examined for names. - - - -Skipping lines and choosing columns -=================================== - -The :keyword:`skip_header` and :keyword:`skip_footer` arguments ---------------------------------------------------------------- - -The presence of a header in the file can hinder data processing. -In that case, we need to use the :keyword:`skip_header` optional argument. -The values of this argument must be an integer which corresponds to the number of lines to skip at the beginning of the file, before any other action is performed. -Similarly, we can skip the last ``n`` lines of the file by using the :keyword:`skip_footer` attribute and giving it a value of ``n``. - - >>> data = "\n".join(str(i) for i in range(10)) - >>> np.genfromtxt(StringIO(data),) - array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) - >>> np.genfromtxt(StringIO(data), - ... skip_header=3, skip_footer=5) - array([ 3., 4.]) - -By default, ``skip_header=0`` and ``skip_footer=0``, meaning that no lines are skipped. - - -The :keyword:`usecols` argument -------------------------------- - -In some cases, we are not interested in all the columns of the data but only a few of them. -We can select which columns to import with the :keyword:`usecols` argument. -This argument accepts a single integer or a sequence of integers corresponding to the indices of the columns to import. -Remember that by convention, the first column has an index of 0. -Negative integers correspond to - -For example, if we want to import only the first and the last columns, we can use ``usecols=(0, -1)``: - >>> data = "1 2 3\n4 5 6" - >>> np.genfromtxt(StringIO(data), usecols=(0, -1)) - array([[ 1., 3.], - [ 4., 6.]]) - -If the columns have names, we can also select which columns to import by giving their name to the :keyword:`usecols` argument, either as a sequence of strings or a comma-separated string. - >>> data = "1 2 3\n4 5 6" - >>> np.genfromtxt(StringIO(data), - ... names="a, b, c", usecols=("a", "c")) - array([(1.0, 3.0), (4.0, 6.0)], - dtype=[('a', '>> np.genfromtxt(StringIO(data), - ... names="a, b, c", usecols=("a, c")) - array([(1.0, 3.0), (4.0, 6.0)], - dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=[(_, int) for _ in "abc"]) - array([(1, 2, 3), (4, 5, 6)], - dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, names="A, B, C") - array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)], - dtype=[('A', '>> data = StringIO("So it goes\n#a b c\n1 2 3\n 4 5 6") - >>> np.genfromtxt(data, skip_header=1, names=True) - array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)], - dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> ndtype=[('a',int), ('b', float), ('c', int)] - >>> names = ["A", "B", "C"] - >>> np.genfromtxt(data, names=names, dtype=ndtype) - array([(1, 2.0, 3), (4, 5.0, 6)], - dtype=[('A', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int)) - array([(1, 2.0, 3), (4, 5.0, 6)], - dtype=[('f0', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), names="a") - array([(1, 2.0, 3), (4, 5.0, 6)], - dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i") - array([(1, 2.0, 3), (4, 5.0, 6)], - dtype=[('var_00', ',<``. - :keyword:`excludelist` - Gives a list of the names to exclude, such as ``return``, ``file``, ``print``... - If one of the input name is part of this list, an underscore character (``'_'``) will be appended to it. - :keyword:`case_sensitive` - Whether the names should be case-sensitive (``case_sensitive=True``), - converted to upper case (``case_sensitive=False`` or ``case_sensitive='upper'``) or to lower case (``case_sensitive='lower'``). - - - -Tweaking the conversion -======================= - -The :keyword:`converters` argument ----------------------------------- - -Usually, defining a dtype is sufficient to define how the sequence of strings must be converted. -However, some additional control may sometimes be required. -For example, we may want to make sure that a date in a format ``YYYY/MM/DD`` is converted to a :class:`datetime` object, or that a string like ``xx%`` is properly converted to a float between 0 and 1. -In such cases, we should define conversion functions with the :keyword:`converters` arguments. - -The value of this argument is typically a dictionary with column indices or column names as keys and a conversion function as values. -These conversion functions can either be actual functions or lambda functions. In any case, they should accept only a string as input and output only a single element of the wanted type. - -In the following example, the second column is converted from as string representing a percentage to a float between 0 and 1 - >>> convertfunc = lambda x: float(x.strip("%"))/100. - >>> data = "1, 2.3%, 45.\n6, 78.9%, 0" - >>> names = ("i", "p", "n") - >>> # General case ..... - >>> np.genfromtxt(StringIO(data), delimiter=",", names=names) - array([(1.0, nan, 45.0), (6.0, nan, 0.0)], - dtype=[('i', '>> # Converted case ... - >>> np.genfromtxt(StringIO(data), delimiter=",", names=names, - ... converters={1: convertfunc}) - array([(1.0, 0.023, 45.0), (6.0, 0.78900000000000003, 0.0)], - dtype=[('i', '>> # Using a name for the converter ... - >>> np.genfromtxt(StringIO(data), delimiter=",", names=names, - ... converters={"p": convertfunc}) - array([(1.0, 0.023, 45.0), (6.0, 0.78900000000000003, 0.0)], - dtype=[('i', '>> data = "1, , 3\n 4, 5, 6" - >>> convert = lambda x: float(x.strip() or -999) - >>> np.genfromtxt(StringIO(data), delimiter=",", - ... converter={1: convert}) - array([[ 1., -999., 3.], - [ 4., 5., 6.]]) - - - - -Using missing and filling values --------------------------------- - -Some entries may be missing in the dataset we are trying to import. -In a previous example, we used a converter to transform an empty string into a float. -However, user-defined converters may rapidly become cumbersome to manage. - -The :func:`~nummpy.genfromtxt` function provides two other complementary mechanisms: the :keyword:`missing_values` argument is used to recognize missing data and a second argument, :keyword:`filling_values`, is used to process these missing data. - -:keyword:`missing_values` -------------------------- - -By default, any empty string is marked as missing. -We can also consider more complex strings, such as ``"N/A"`` or ``"???"`` to represent missing or invalid data. -The :keyword:`missing_values` argument accepts three kind of values: - - a string or a comma-separated string - This string will be used as the marker for missing data for all the columns - a sequence of strings - In that case, each item is associated to a column, in order. - a dictionary - Values of the dictionary are strings or sequence of strings. - The corresponding keys can be column indices (integers) or column names (strings). In addition, the special key ``None`` can be used to define a default applicable to all columns. - - -:keyword:`filling_values` -------------------------- - -We know how to recognize missing data, but we still need to provide a value for these missing entries. -By default, this value is determined from the expected dtype according to this table: - -============= ============== -Expected type Default -============= ============== -``bool`` ``False`` -``int`` ``-1`` -``float`` ``np.nan`` -``complex`` ``np.nan+0j`` -``string`` ``'???'`` -============= ============== - -We can get a finer control on the conversion of missing values with the :keyword:`filling_values` optional argument. -Like :keyword:`missing_values`, this argument accepts different kind of values: - - a single value - This will be the default for all columns - a sequence of values - Each entry will be the default for the corresponding column - a dictionary - Each key can be a column index or a column name, and the corresponding value should be a single object. - We can use the special key ``None`` to define a default for all columns. - -In the following example, we suppose that the missing values are flagged with ``"N/A"`` in the first column and by ``"???"`` in the third column. -We wish to transform these missing values to 0 if they occur in the first and second column, and to -999 if they occur in the last column. - ->>> data = "N/A, 2, 3\n4, ,???" ->>> kwargs = dict(delimiter=",", -... dtype=int, -... names="a,b,c", -... missing_values={0:"N/A", 'b':" ", 2:"???"}, -... filling_values={0:0, 'b':0, 2:-999}) ->>> np.genfromtxt(StringIO.StringIO(data), **kwargs) -array([(0, 2, 3), (4, 0, -999)], - dtype=[('a', '` - -.. automodule:: numpy.doc.basics diff --git a/numpy-1.6.2/doc/source/user/c-info.beyond-basics.rst b/numpy-1.6.2/doc/source/user/c-info.beyond-basics.rst deleted file mode 100644 index 5ff92a1220..0000000000 --- a/numpy-1.6.2/doc/source/user/c-info.beyond-basics.rst +++ /dev/null @@ -1,740 +0,0 @@ -***************** -Beyond the Basics -***************** - -| The voyage of discovery is not in seeking new landscapes but in having -| new eyes. -| --- *Marcel Proust* - -| Discovery is seeing what everyone else has seen and thinking what no -| one else has thought. -| --- *Albert Szent-Gyorgi* - - -Iterating over elements in the array -==================================== - -.. _`sec:array_iterator`: - -Basic Iteration ---------------- - -One common algorithmic requirement is to be able to walk over all -elements in a multidimensional array. The array iterator object makes -this easy to do in a generic way that works for arrays of any -dimension. Naturally, if you know the number of dimensions you will be -using, then you can always write nested for loops to accomplish the -iteration. If, however, you want to write code that works with any -number of dimensions, then you can make use of the array iterator. An -array iterator object is returned when accessing the .flat attribute -of an array. - -.. index:: - single: array iterator - -Basic usage is to call :cfunc:`PyArray_IterNew` ( ``array`` ) where array -is an ndarray object (or one of its sub-classes). The returned object -is an array-iterator object (the same object returned by the .flat -attribute of the ndarray). This object is usually cast to -PyArrayIterObject* so that its members can be accessed. The only -members that are needed are ``iter->size`` which contains the total -size of the array, ``iter->index``, which contains the current 1-d -index into the array, and ``iter->dataptr`` which is a pointer to the -data for the current element of the array. Sometimes it is also -useful to access ``iter->ao`` which is a pointer to the underlying -ndarray object. - -After processing data at the current element of the array, the next -element of the array can be obtained using the macro -:cfunc:`PyArray_ITER_NEXT` ( ``iter`` ). The iteration always proceeds in a -C-style contiguous fashion (last index varying the fastest). The -:cfunc:`PyArray_ITER_GOTO` ( ``iter``, ``destination`` ) can be used to -jump to a particular point in the array, where ``destination`` is an -array of npy_intp data-type with space to handle at least the number -of dimensions in the underlying array. Occasionally it is useful to -use :cfunc:`PyArray_ITER_GOTO1D` ( ``iter``, ``index`` ) which will jump -to the 1-d index given by the value of ``index``. The most common -usage, however, is given in the following example. - -.. code-block:: c - - PyObject *obj; /* assumed to be some ndarray object */ - PyArrayIterObject *iter; - ... - iter = (PyArrayIterObject *)PyArray_IterNew(obj); - if (iter == NULL) goto fail; /* Assume fail has clean-up code */ - while (iter->index < iter->size) { - /* do something with the data at it->dataptr */ - PyArray_ITER_NEXT(it); - } - ... - -You can also use :cfunc:`PyArrayIter_Check` ( ``obj`` ) to ensure you have -an iterator object and :cfunc:`PyArray_ITER_RESET` ( ``iter`` ) to reset an -iterator object back to the beginning of the array. - -It should be emphasized at this point that you may not need the array -iterator if your array is already contiguous (using an array iterator -will work but will be slower than the fastest code you could write). -The major purpose of array iterators is to encapsulate iteration over -N-dimensional arrays with arbitrary strides. They are used in many, -many places in the NumPy source code itself. If you already know your -array is contiguous (Fortran or C), then simply adding the element- -size to a running pointer variable will step you through the array -very efficiently. In other words, code like this will probably be -faster for you in the contiguous case (assuming doubles). - -.. code-block:: c - - npy_intp size; - double *dptr; /* could make this any variable type */ - size = PyArray_SIZE(obj); - dptr = PyArray_DATA(obj); - while(size--) { - /* do something with the data at dptr */ - dptr++; - } - - -Iterating over all but one axis -------------------------------- - -A common algorithm is to loop over all elements of an array and -perform some function with each element by issuing a function call. As -function calls can be time consuming, one way to speed up this kind of -algorithm is to write the function so it takes a vector of data and -then write the iteration so the function call is performed for an -entire dimension of data at a time. This increases the amount of work -done per function call, thereby reducing the function-call over-head -to a small(er) fraction of the total time. Even if the interior of the -loop is performed without a function call it can be advantageous to -perform the inner loop over the dimension with the highest number of -elements to take advantage of speed enhancements available on micro- -processors that use pipelining to enhance fundmental operations. - -The :cfunc:`PyArray_IterAllButAxis` ( ``array``, ``&dim`` ) constructs an -iterator object that is modified so that it will not iterate over the -dimension indicated by dim. The only restriction on this iterator -object, is that the :cfunc:`PyArray_Iter_GOTO1D` ( ``it``, ``ind`` ) macro -cannot be used (thus flat indexing won't work either if you pass this -object back to Python --- so you shouldn't do this). Note that the -returned object from this routine is still usually cast to -PyArrayIterObject \*. All that's been done is to modify the strides -and dimensions of the returned iterator to simulate iterating over -array[...,0,...] where 0 is placed on the -:math:`\textrm{dim}^{\textrm{th}}` dimension. If dim is negative, then -the dimension with the largest axis is found and used. - - -Iterating over multiple arrays ------------------------------- - -Very often, it is desireable to iterate over several arrays at the -same time. The universal functions are an example of this kind of -behavior. If all you want to do is iterate over arrays with the same -shape, then simply creating several iterator objects is the standard -procedure. For example, the following code iterates over two arrays -assumed to be the same shape and size (actually obj1 just has to have -at least as many total elements as does obj2): - -.. code-block:: c - - /* It is already assumed that obj1 and obj2 - are ndarrays of the same shape and size. - */ - iter1 = (PyArrayIterObject *)PyArray_IterNew(obj1); - if (iter1 == NULL) goto fail; - iter2 = (PyArrayIterObject *)PyArray_IterNew(obj2); - if (iter2 == NULL) goto fail; /* assume iter1 is DECREF'd at fail */ - while (iter2->index < iter2->size) { - /* process with iter1->dataptr and iter2->dataptr */ - PyArray_ITER_NEXT(iter1); - PyArray_ITER_NEXT(iter2); - } - - -Broadcasting over multiple arrays ---------------------------------- - -.. index:: - single: broadcasting - -When multiple arrays are involved in an operation, you may want to use the -same broadcasting rules that the math operations (*i.e.* the ufuncs) use. -This can be done easily using the :ctype:`PyArrayMultiIterObject`. This is -the object returned from the Python command numpy.broadcast and it is almost -as easy to use from C. The function -:cfunc:`PyArray_MultiIterNew` ( ``n``, ``...`` ) is used (with ``n`` input -objects in place of ``...`` ). The input objects can be arrays or anything -that can be converted into an array. A pointer to a PyArrayMultiIterObject is -returned. Broadcasting has already been accomplished which adjusts the -iterators so that all that needs to be done to advance to the next element in -each array is for PyArray_ITER_NEXT to be called for each of the inputs. This -incrementing is automatically performed by -:cfunc:`PyArray_MultiIter_NEXT` ( ``obj`` ) macro (which can handle a -multiterator ``obj`` as either a :ctype:`PyArrayMultiObject *` or a -:ctype:`PyObject *`). The data from input number ``i`` is available using -:cfunc:`PyArray_MultiIter_DATA` ( ``obj``, ``i`` ) and the total (broadcasted) -size as :cfunc:`PyArray_MultiIter_SIZE` ( ``obj``). An example of using this -feature follows. - -.. code-block:: c - - mobj = PyArray_MultiIterNew(2, obj1, obj2); - size = PyArray_MultiIter_SIZE(obj); - while(size--) { - ptr1 = PyArray_MultiIter_DATA(mobj, 0); - ptr2 = PyArray_MultiIter_DATA(mobj, 1); - /* code using contents of ptr1 and ptr2 */ - PyArray_MultiIter_NEXT(mobj); - } - -The function :cfunc:`PyArray_RemoveLargest` ( ``multi`` ) can be used to -take a multi-iterator object and adjust all the iterators so that -iteration does not take place over the largest dimension (it makes -that dimension of size 1). The code being looped over that makes use -of the pointers will very-likely also need the strides data for each -of the iterators. This information is stored in -multi->iters[i]->strides. - -.. index:: - single: array iterator - -There are several examples of using the multi-iterator in the NumPy -source code as it makes N-dimensional broadcasting-code very simple to -write. Browse the source for more examples. - -.. _`sec:Creating-a-new`: - -Creating a new universal function -================================= - -.. index:: - pair: ufunc; adding new - -The umath module is a computer-generated C-module that creates many -ufuncs. It provides a great many examples of how to create a universal -function. Creating your own ufunc that will make use of the ufunc -machinery is not difficult either. Suppose you have a function that -you want to operate element-by-element over its inputs. By creating a -new ufunc you will obtain a function that handles - -- broadcasting - -- N-dimensional looping - -- automatic type-conversions with minimal memory usage - -- optional output arrays - -It is not difficult to create your own ufunc. All that is required is -a 1-d loop for each data-type you want to support. Each 1-d loop must -have a specific signature, and only ufuncs for fixed-size data-types -can be used. The function call used to create a new ufunc to work on -built-in data-types is given below. A different mechanism is used to -register ufuncs for user-defined data-types. - -.. cfunction:: PyObject *PyUFunc_FromFuncAndData( PyUFuncGenericFunction* func, - void** data, char* types, int ntypes, int nin, int nout, int identity, - char* name, char* doc, int check_return) - - *func* - - A pointer to an array of 1-d functions to use. This array must be at - least ntypes long. Each entry in the array must be a - ``PyUFuncGenericFunction`` function. This function has the following - signature. An example of a valid 1d loop function is also given. - - .. cfunction:: void loop1d(char** args, npy_intp* dimensions, - npy_intp* steps, void* data) - - *args* - - An array of pointers to the actual data for the input and output - arrays. The input arguments are given first followed by the output - arguments. - - *dimensions* - - A pointer to the size of the dimension over which this function is - looping. - - *steps* - - A pointer to the number of bytes to jump to get to the - next element in this dimension for each of the input and - output arguments. - - *data* - - Arbitrary data (extra arguments, function names, *etc.* ) - that can be stored with the ufunc and will be passed in - when it is called. - - .. code-block:: c - - static void - double_add(char *args, npy_intp *dimensions, npy_intp *steps, - void *extra) - { - npy_intp i; - npy_intp is1=steps[0], is2=steps[1]; - npy_intp os=steps[2], n=dimensions[0]; - char *i1=args[0], *i2=args[1], *op=args[2]; - for (i=0; i`__ . - - *arg_types* - - (optional) If given, this should contain an array of integers of at - least size ufunc.nargs containing the data-types expected by the loop - function. The data will be copied into a NumPy-managed structure so - the memory for this argument should be deleted after calling this - function. If this is NULL, then it will be assumed that all data-types - are of type usertype. - - *data* - - (optional) Specify any optional data needed by the function which will - be passed when the function is called. - - .. index:: - pair: dtype; adding new - - -Subtyping the ndarray in C -========================== - -One of the lesser-used features that has been lurking in Python since -2.2 is the ability to sub-class types in C. This facility is one of -the important reasons for basing NumPy off of the Numeric code-base -which was already in C. A sub-type in C allows much more flexibility -with regards to memory management. Sub-typing in C is not difficult -even if you have only a rudimentary understanding of how to create new -types for Python. While it is easiest to sub-type from a single parent -type, sub-typing from multiple parent types is also possible. Multiple -inheritence in C is generally less useful than it is in Python because -a restriction on Python sub-types is that they have a binary -compatible memory layout. Perhaps for this reason, it is somewhat -easier to sub-type from a single parent type. - -.. index:: - pair: ndarray; subtyping - -All C-structures corresponding to Python objects must begin with -:cmacro:`PyObject_HEAD` (or :cmacro:`PyObject_VAR_HEAD`). In the same -way, any sub-type must have a C-structure that begins with exactly the -same memory layout as the parent type (or all of the parent types in -the case of multiple-inheritance). The reason for this is that Python -may attempt to access a member of the sub-type structure as if it had -the parent structure ( *i.e.* it will cast a given pointer to a -pointer to the parent structure and then dereference one of it's -members). If the memory layouts are not compatible, then this attempt -will cause unpredictable behavior (eventually leading to a memory -violation and program crash). - -One of the elements in :cmacro:`PyObject_HEAD` is a pointer to a -type-object structure. A new Python type is created by creating a new -type-object structure and populating it with functions and pointers to -describe the desired behavior of the type. Typically, a new -C-structure is also created to contain the instance-specific -information needed for each object of the type as well. For example, -:cdata:`&PyArray_Type` is a pointer to the type-object table for the ndarray -while a :ctype:`PyArrayObject *` variable is a pointer to a particular instance -of an ndarray (one of the members of the ndarray structure is, in -turn, a pointer to the type- object table :cdata:`&PyArray_Type`). Finally -:cfunc:`PyType_Ready` () must be called for -every new Python type. - - -Creating sub-types ------------------- - -To create a sub-type, a similar proceedure must be followed except -only behaviors that are different require new entries in the type- -object structure. All other entires can be NULL and will be filled in -by :cfunc:`PyType_Ready` with appropriate functions from the parent -type(s). In particular, to create a sub-type in C follow these steps: - -1. If needed create a new C-structure to handle each instance of your - type. A typical C-structure would be: - - .. code-block:: c - - typedef _new_struct { - PyArrayObject base; - /* new things here */ - } NewArrayObject; - - Notice that the full PyArrayObject is used as the first entry in order - to ensure that the binary layout of instances of the new type is - identical to the PyArrayObject. - -2. Fill in a new Python type-object structure with pointers to new - functions that will over-ride the default behavior while leaving any - function that should remain the same unfilled (or NULL). The tp_name - element should be different. - -3. Fill in the tp_base member of the new type-object structure with a - pointer to the (main) parent type object. For multiple-inheritance, - also fill in the tp_bases member with a tuple containing all of the - parent objects in the order they should be used to define inheritance. - Remember, all parent-types must have the same C-structure for multiple - inheritance to work properly. - -4. Call :cfunc:`PyType_Ready` (). If this function - returns a negative number, a failure occurred and the type is not - initialized. Otherwise, the type is ready to be used. It is - generally important to place a reference to the new type into the - module dictionary so it can be accessed from Python. - -More information on creating sub-types in C can be learned by reading -PEP 253 (available at http://www.python.org/dev/peps/pep-0253). - - -Specific features of ndarray sub-typing ---------------------------------------- - -Some special methods and attributes are used by arrays in order to -facilitate the interoperation of sub-types with the base ndarray type. - -The __array_finalize\__ method -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. attribute:: ndarray.__array_finalize__ - - Several array-creation functions of the ndarray allow - specification of a particular sub-type to be created. This allows - sub-types to be handled seamlessly in many routines. When a - sub-type is created in such a fashion, however, neither the - __new_\_ method nor the __init\__ method gets called. Instead, the - sub-type is allocated and the appropriate instance-structure - members are filled in. Finally, the :obj:`__array_finalize__` - attribute is looked-up in the object dictionary. If it is present - and not None, then it can be either a CObject containing a pointer - to a :cfunc:`PyArray_FinalizeFunc` or it can be a method taking a - single argument (which could be None). - - If the :obj:`__array_finalize__` attribute is a CObject, then the pointer - must be a pointer to a function with the signature: - - .. code-block:: c - - (int) (PyArrayObject *, PyObject *) - - The first argument is the newly created sub-type. The second argument - (if not NULL) is the "parent" array (if the array was created using - slicing or some other operation where a clearly-distinguishable parent - is present). This routine can do anything it wants to. It should - return a -1 on error and 0 otherwise. - - If the :obj:`__array_finalize__` attribute is not None nor a CObject, - then it must be a Python method that takes the parent array as an - argument (which could be None if there is no parent), and returns - nothing. Errors in this method will be caught and handled. - - -The __array_priority\__ attribute -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. attribute:: ndarray.__array_priority__ - - This attribute allows simple but flexible determination of which sub- - type should be considered "primary" when an operation involving two or - more sub-types arises. In operations where different sub-types are - being used, the sub-type with the largest :obj:`__array_priority__` - attribute will determine the sub-type of the output(s). If two sub- - types have the same :obj:`__array_priority__` then the sub-type of the - first argument determines the output. The default - :obj:`__array_priority__` attribute returns a value of 0.0 for the base - ndarray type and 1.0 for a sub-type. This attribute can also be - defined by objects that are not sub-types of the ndarray and can be - used to determine which :obj:`__array_wrap__` method should be called for - the return output. - -The __array_wrap\__ method -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. attribute:: ndarray.__array_wrap__ - - Any class or type can define this method which should take an ndarray - argument and return an instance of the type. It can be seen as the - opposite of the :obj:`__array__` method. This method is used by the - ufuncs (and other NumPy functions) to allow other objects to pass - through. For Python >2.4, it can also be used to write a decorator - that converts a function that works only with ndarrays to one that - works with any type with :obj:`__array__` and :obj:`__array_wrap__` methods. - -.. index:: - pair: ndarray; subtyping diff --git a/numpy-1.6.2/doc/source/user/c-info.how-to-extend.rst b/numpy-1.6.2/doc/source/user/c-info.how-to-extend.rst deleted file mode 100644 index 6c5e25aff6..0000000000 --- a/numpy-1.6.2/doc/source/user/c-info.how-to-extend.rst +++ /dev/null @@ -1,641 +0,0 @@ -******************* -How to extend NumPy -******************* - -| That which is static and repetitive is boring. That which is dynamic -| and random is confusing. In between lies art. -| --- *John A. Locke* - -| Science is a differential equation. Religion is a boundary condition. -| --- *Alan Turing* - - -.. _`sec:Writing-an-extension`: - -Writing an extension module -=========================== - -While the ndarray object is designed to allow rapid computation in -Python, it is also designed to be general-purpose and satisfy a wide- -variety of computational needs. As a result, if absolute speed is -essential, there is no replacement for a well-crafted, compiled loop -specific to your application and hardware. This is one of the reasons -that numpy includes f2py so that an easy-to-use mechanisms for linking -(simple) C/C++ and (arbitrary) Fortran code directly into Python are -available. You are encouraged to use and improve this mechanism. The -purpose of this section is not to document this tool but to document -the more basic steps to writing an extension module that this tool -depends on. - -.. index:: - single: extension module - -When an extension module is written, compiled, and installed to -somewhere in the Python path (sys.path), the code can then be imported -into Python as if it were a standard python file. It will contain -objects and methods that have been defined and compiled in C code. The -basic steps for doing this in Python are well-documented and you can -find more information in the documentation for Python itself available -online at `www.python.org `_ . - -In addition to the Python C-API, there is a full and rich C-API for -NumPy allowing sophisticated manipulations on a C-level. However, for -most applications, only a few API calls will typically be used. If all -you need to do is extract a pointer to memory along with some shape -information to pass to another calculation routine, then you will use -very different calls, then if you are trying to create a new array- -like type or add a new data type for ndarrays. This chapter documents -the API calls and macros that are most commonly used. - - -Required subroutine -=================== - -There is exactly one function that must be defined in your C-code in -order for Python to use it as an extension module. The function must -be called init{name} where {name} is the name of the module from -Python. This function must be declared so that it is visible to code -outside of the routine. Besides adding the methods and constants you -desire, this subroutine must also contain calls to import_array() -and/or import_ufunc() depending on which C-API is needed. Forgetting -to place these commands will show itself as an ugly segmentation fault -(crash) as soon as any C-API subroutine is actually called. It is -actually possible to have multiple init{name} functions in a single -file in which case multiple modules will be defined by that file. -However, there are some tricks to get that to work correctly and it is -not covered here. - -A minimal ``init{name}`` method looks like: - -.. code-block:: c - - PyMODINIT_FUNC - init{name}(void) - { - (void)Py_InitModule({name}, mymethods); - import_array(); - } - -The mymethods must be an array (usually statically declared) of -PyMethodDef structures which contain method names, actual C-functions, -a variable indicating whether the method uses keyword arguments or -not, and docstrings. These are explained in the next section. If you -want to add constants to the module, then you store the returned value -from Py_InitModule which is a module object. The most general way to -add itmes to the module is to get the module dictionary using -PyModule_GetDict(module). With the module dictionary, you can add -whatever you like to the module manually. An easier way to add objects -to the module is to use one of three additional Python C-API calls -that do not require a separate extraction of the module dictionary. -These are documented in the Python documentation, but repeated here -for convenience: - -.. cfunction:: int PyModule_AddObject(PyObject* module, char* name, PyObject* value) - -.. cfunction:: int PyModule_AddIntConstant(PyObject* module, char* name, long value) - -.. cfunction:: int PyModule_AddStringConstant(PyObject* module, char* name, char* value) - - All three of these functions require the *module* object (the - return value of Py_InitModule). The *name* is a string that - labels the value in the module. Depending on which function is - called, the *value* argument is either a general object - (:cfunc:`PyModule_AddObject` steals a reference to it), an integer - constant, or a string constant. - - -Defining functions -================== - -The second argument passed in to the Py_InitModule function is a -structure that makes it easy to to define functions in the module. In -the example given above, the mymethods structure would have been -defined earlier in the file (usually right before the init{name} -subroutine) to: - -.. code-block:: c - - static PyMethodDef mymethods[] = { - { nokeywordfunc,nokeyword_cfunc, - METH_VARARGS, - Doc string}, - { keywordfunc, keyword_cfunc, - METH_VARARGS|METH_KEYWORDS, - Doc string}, - {NULL, NULL, 0, NULL} /* Sentinel */ - } - -Each entry in the mymethods array is a :ctype:`PyMethodDef` structure -containing 1) the Python name, 2) the C-function that implements the -function, 3) flags indicating whether or not keywords are accepted for -this function, and 4) The docstring for the function. Any number of -functions may be defined for a single module by adding more entries to -this table. The last entry must be all NULL as shown to act as a -sentinel. Python looks for this entry to know that all of the -functions for the module have been defined. - -The last thing that must be done to finish the extension module is to -actually write the code that performs the desired functions. There are -two kinds of functions: those that don't accept keyword arguments, and -those that do. - - -Functions without keyword arguments ------------------------------------ - -Functions that don't accept keyword arguments should be written as: - -.. code-block:: c - - static PyObject* - nokeyword_cfunc (PyObject *dummy, PyObject *args) - { - /* convert Python arguments */ - /* do function */ - /* return something */ - } - -The dummy argument is not used in this context and can be safely -ignored. The *args* argument contains all of the arguments passed in -to the function as a tuple. You can do anything you want at this -point, but usually the easiest way to manage the input arguments is to -call :cfunc:`PyArg_ParseTuple` (args, format_string, -addresses_to_C_variables...) or :cfunc:`PyArg_UnpackTuple` (tuple, "name" , -min, max, ...). A good description of how to use the first function is -contained in the Python C-API reference manual under section 5.5 -(Parsing arguments and building values). You should pay particular -attention to the "O&" format which uses converter functions to go -between the Python object and the C object. All of the other format -functions can be (mostly) thought of as special cases of this general -rule. There are several converter functions defined in the NumPy C-API -that may be of use. In particular, the :cfunc:`PyArray_DescrConverter` -function is very useful to support arbitrary data-type specification. -This function transforms any valid data-type Python object into a -:ctype:`PyArray_Descr *` object. Remember to pass in the address of the -C-variables that should be filled in. - -There are lots of examples of how to use :cfunc:`PyArg_ParseTuple` -throughout the NumPy source code. The standard usage is like this: - -.. code-block:: c - - PyObject *input; - PyArray_Descr *dtype; - if (!PyArg_ParseTuple(args, "OO&", &input, - PyArray_DescrConverter, - &dtype)) return NULL; - -It is important to keep in mind that you get a *borrowed* reference to -the object when using the "O" format string. However, the converter -functions usually require some form of memory handling. In this -example, if the conversion is successful, *dtype* will hold a new -reference to a :ctype:`PyArray_Descr *` object, while *input* will hold a -borrowed reference. Therefore, if this conversion were mixed with -another conversion (say to an integer) and the data-type conversion -was successful but the integer conversion failed, then you would need -to release the reference count to the data-type object before -returning. A typical way to do this is to set *dtype* to ``NULL`` -before calling :cfunc:`PyArg_ParseTuple` and then use :cfunc:`Py_XDECREF` -on *dtype* before returning. - -After the input arguments are processed, the code that actually does -the work is written (likely calling other functions as needed). The -final step of the C-function is to return something. If an error is -encountered then ``NULL`` should be returned (making sure an error has -actually been set). If nothing should be returned then increment -:cdata:`Py_None` and return it. If a single object should be returned then -it is returned (ensuring that you own a reference to it first). If -multiple objects should be returned then you need to return a tuple. -The :cfunc:`Py_BuildValue` (format_string, c_variables...) function makes -it easy to build tuples of Python objects from C variables. Pay -special attention to the difference between 'N' and 'O' in the format -string or you can easily create memory leaks. The 'O' format string -increments the reference count of the :ctype:`PyObject *` C-variable it -corresponds to, while the 'N' format string steals a reference to the -corresponding :ctype:`PyObject *` C-variable. You should use 'N' if you ave -already created a reference for the object and just want to give that -reference to the tuple. You should use 'O' if you only have a borrowed -reference to an object and need to create one to provide for the -tuple. - - -Functions with keyword arguments --------------------------------- - -These functions are very similar to functions without keyword -arguments. The only difference is that the function signature is: - -.. code-block:: c - - static PyObject* - keyword_cfunc (PyObject *dummy, PyObject *args, PyObject *kwds) - { - ... - } - -The kwds argument holds a Python dictionary whose keys are the names -of the keyword arguments and whose values are the corresponding -keyword-argument values. This dictionary can be processed however you -see fit. The easiest way to handle it, however, is to replace the -:cfunc:`PyArg_ParseTuple` (args, format_string, addresses...) function with -a call to :cfunc:`PyArg_ParseTupleAndKeywords` (args, kwds, format_string, -char \*kwlist[], addresses...). The kwlist parameter to this function -is a ``NULL`` -terminated array of strings providing the expected -keyword arguments. There should be one string for each entry in the -format_string. Using this function will raise a TypeError if invalid -keyword arguments are passed in. - -For more help on this function please see section 1.8 (Keyword -Paramters for Extension Functions) of the Extending and Embedding -tutorial in the Python documentation. - - -Reference counting ------------------- - -The biggest difficulty when writing extension modules is reference -counting. It is an important reason for the popularity of f2py, weave, -pyrex, ctypes, etc.... If you mis-handle reference counts you can get -problems from memory-leaks to segmentation faults. The only strategy I -know of to handle reference counts correctly is blood, sweat, and -tears. First, you force it into your head that every Python variable -has a reference count. Then, you understand exactly what each function -does to the reference count of your objects, so that you can properly -use DECREF and INCREF when you need them. Reference counting can -really test the amount of patience and diligence you have towards your -programming craft. Despite the grim depiction, most cases of reference -counting are quite straightforward with the most common difficulty -being not using DECREF on objects before exiting early from a routine -due to some error. In second place, is the common error of not owning -the reference on an object that is passed to a function or macro that -is going to steal the reference ( *e.g.* :cfunc:`PyTuple_SET_ITEM`, and -most functions that take :ctype:`PyArray_Descr` objects). - -.. index:: - single: reference counting - -Typically you get a new reference to a variable when it is created or -is the return value of some function (there are some prominent -exceptions, however --- such as getting an item out of a tuple or a -dictionary). When you own the reference, you are responsible to make -sure that :cfunc:`Py_DECREF` (var) is called when the variable is no -longer necessary (and no other function has "stolen" its -reference). Also, if you are passing a Python object to a function -that will "steal" the reference, then you need to make sure you own it -(or use :cfunc:`Py_INCREF` to get your own reference). You will also -encounter the notion of borrowing a reference. A function that borrows -a reference does not alter the reference count of the object and does -not expect to "hold on "to the reference. It's just going to use the -object temporarily. When you use :cfunc:`PyArg_ParseTuple` or -:cfunc:`PyArg_UnpackTuple` you receive a borrowed reference to the -objects in the tuple and should not alter their reference count inside -your function. With practice, you can learn to get reference counting -right, but it can be frustrating at first. - -One common source of reference-count errors is the :cfunc:`Py_BuildValue` -function. Pay careful attention to the difference between the 'N' -format character and the 'O' format character. If you create a new -object in your subroutine (such as an output array), and you are -passing it back in a tuple of return values, then you should most- -likely use the 'N' format character in :cfunc:`Py_BuildValue`. The 'O' -character will increase the reference count by one. This will leave -the caller with two reference counts for a brand-new array. When the -variable is deleted and the reference count decremented by one, there -will still be that extra reference count, and the array will never be -deallocated. You will have a reference-counting induced memory leak. -Using the 'N' character will avoid this situation as it will return to -the caller an object (inside the tuple) with a single reference count. - -.. index:: - single: reference counting - - - - -Dealing with array objects -========================== - -Most extension modules for NumPy will need to access the memory for an -ndarray object (or one of it's sub-classes). The easiest way to do -this doesn't require you to know much about the internals of NumPy. -The method is to - -1. Ensure you are dealing with a well-behaved array (aligned, in machine - byte-order and single-segment) of the correct type and number of - dimensions. - - 1. By converting it from some Python object using - :cfunc:`PyArray_FromAny` or a macro built on it. - - 2. By constructing a new ndarray of your desired shape and type - using :cfunc:`PyArray_NewFromDescr` or a simpler macro or function - based on it. - - -2. Get the shape of the array and a pointer to its actual data. - -3. Pass the data and shape information on to a subroutine or other - section of code that actually performs the computation. - -4. If you are writing the algorithm, then I recommend that you use the - stride information contained in the array to access the elements of - the array (the :cfunc:`PyArray_GETPTR` macros make this painless). Then, - you can relax your requirements so as not to force a single-segment - array and the data-copying that might result. - -Each of these sub-topics is covered in the following sub-sections. - - -Converting an arbitrary sequence object ---------------------------------------- - -The main routine for obtaining an array from any Python object that -can be converted to an array is :cfunc:`PyArray_FromAny`. This -function is very flexible with many input arguments. Several macros -make it easier to use the basic function. :cfunc:`PyArray_FROM_OTF` is -arguably the most useful of these macros for the most common uses. It -allows you to convert an arbitrary Python object to an array of a -specific builtin data-type ( *e.g.* float), while specifying a -particular set of requirements ( *e.g.* contiguous, aligned, and -writeable). The syntax is - -.. cfunction:: PyObject *PyArray_FROM_OTF(PyObject* obj, int typenum, int requirements) - - Return an ndarray from any Python object, *obj*, that can be - converted to an array. The number of dimensions in the returned - array is determined by the object. The desired data-type of the - returned array is provided in *typenum* which should be one of the - enumerated types. The *requirements* for the returned array can be - any combination of standard array flags. Each of these arguments - is explained in more detail below. You receive a new reference to - the array on success. On failure, ``NULL`` is returned and an - exception is set. - - *obj* - - The object can be any Python object convertable to an ndarray. - If the object is already (a subclass of) the ndarray that - satisfies the requirements then a new reference is returned. - Otherwise, a new array is constructed. The contents of *obj* - are copied to the new array unless the array interface is used - so that data does not have to be copied. Objects that can be - converted to an array include: 1) any nested sequence object, - 2) any object exposing the array interface, 3) any object with - an :obj:`__array__` method (which should return an ndarray), - and 4) any scalar object (becomes a zero-dimensional - array). Sub-classes of the ndarray that otherwise fit the - requirements will be passed through. If you want to ensure - a base-class ndarray, then use :cdata:`NPY_ENSUREARRAY` in the - requirements flag. A copy is made only if necessary. If you - want to guarantee a copy, then pass in :cdata:`NPY_ENSURECOPY` - to the requirements flag. - - *typenum* - - One of the enumerated types or :cdata:`NPY_NOTYPE` if the data-type - should be determined from the object itself. The C-based names - can be used: - - :cdata:`NPY_BOOL`, :cdata:`NPY_BYTE`, :cdata:`NPY_UBYTE`, - :cdata:`NPY_SHORT`, :cdata:`NPY_USHORT`, :cdata:`NPY_INT`, - :cdata:`NPY_UINT`, :cdata:`NPY_LONG`, :cdata:`NPY_ULONG`, - :cdata:`NPY_LONGLONG`, :cdata:`NPY_ULONGLONG`, :cdata:`NPY_DOUBLE`, - :cdata:`NPY_LONGDOUBLE`, :cdata:`NPY_CFLOAT`, :cdata:`NPY_CDOUBLE`, - :cdata:`NPY_CLONGDOUBLE`, :cdata:`NPY_OBJECT`. - - Alternatively, the bit-width names can be used as supported on the - platform. For example: - - :cdata:`NPY_INT8`, :cdata:`NPY_INT16`, :cdata:`NPY_INT32`, - :cdata:`NPY_INT64`, :cdata:`NPY_UINT8`, - :cdata:`NPY_UINT16`, :cdata:`NPY_UINT32`, - :cdata:`NPY_UINT64`, :cdata:`NPY_FLOAT32`, - :cdata:`NPY_FLOAT64`, :cdata:`NPY_COMPLEX64`, - :cdata:`NPY_COMPLEX128`. - - The object will be converted to the desired type only if it - can be done without losing precision. Otherwise ``NULL`` will - be returned and an error raised. Use :cdata:`NPY_FORCECAST` in the - requirements flag to override this behavior. - - *requirements* - - The memory model for an ndarray admits arbitrary strides in - each dimension to advance to the next element of the array. - Often, however, you need to interface with code that expects a - C-contiguous or a Fortran-contiguous memory layout. In - addition, an ndarray can be misaligned (the address of an - element is not at an integral multiple of the size of the - element) which can cause your program to crash (or at least - work more slowly) if you try and dereference a pointer into - the array data. Both of these problems can be solved by - converting the Python object into an array that is more - "well-behaved" for your specific usage. - - The requirements flag allows specification of what kind of array is - acceptable. If the object passed in does not satisfy this requirements - then a copy is made so that thre returned object will satisfy the - requirements. these ndarray can use a very generic pointer to memory. - This flag allows specification of the desired properties of the - returned array object. All of the flags are explained in the detailed - API chapter. The flags most commonly needed are :cdata:`NPY_IN_ARRAY`, - :cdata:`NPY_OUT_ARRAY`, and :cdata:`NPY_INOUT_ARRAY`: - - .. cvar:: NPY_IN_ARRAY - - Equivalent to :cdata:`NPY_CONTIGUOUS` \| - :cdata:`NPY_ALIGNED`. This combination of flags is useful - for arrays that must be in C-contiguous order and aligned. - These kinds of arrays are usually input arrays for some - algorithm. - - .. cvar:: NPY_OUT_ARRAY - - Equivalent to :cdata:`NPY_CONTIGUOUS` \| - :cdata:`NPY_ALIGNED` \| :cdata:`NPY_WRITEABLE`. This - combination of flags is useful to specify an array that is - in C-contiguous order, is aligned, and can be written to - as well. Such an array is usually returned as output - (although normally such output arrays are created from - scratch). - - .. cvar:: NPY_INOUT_ARRAY - - Equivalent to :cdata:`NPY_CONTIGUOUS` \| - :cdata:`NPY_ALIGNED` \| :cdata:`NPY_WRITEABLE` \| - :cdata:`NPY_UPDATEIFCOPY`. This combination of flags is - useful to specify an array that will be used for both - input and output. If a copy is needed, then when the - temporary is deleted (by your use of :cfunc:`Py_DECREF` at - the end of the interface routine), the temporary array - will be copied back into the original array passed in. Use - of the :cdata:`UPDATEIFCOPY` flag requires that the input - object is already an array (because other objects cannot - be automatically updated in this fashion). If an error - occurs use :cfunc:`PyArray_DECREF_ERR` (obj) on an array - with the :cdata:`NPY_UPDATEIFCOPY` flag set. This will - delete the array without causing the contents to be copied - back into the original array. - - - Other useful flags that can be OR'd as additional requirements are: - - .. cvar:: NPY_FORCECAST - - Cast to the desired type, even if it can't be done without losing - information. - - .. cvar:: NPY_ENSURECOPY - - Make sure the resulting array is a copy of the original. - - .. cvar:: NPY_ENSUREARRAY - - Make sure the resulting object is an actual ndarray and not a sub- - class. - -.. note:: - - Whether or not an array is byte-swapped is determined by the - data-type of the array. Native byte-order arrays are always - requested by :cfunc:`PyArray_FROM_OTF` and so there is no need for - a :cdata:`NPY_NOTSWAPPED` flag in the requirements argument. There - is also no way to get a byte-swapped array from this routine. - - -Creating a brand-new ndarray ----------------------------- - -Quite often new arrays must be created from within extension-module -code. Perhaps an output array is needed and you don't want the caller -to have to supply it. Perhaps only a temporary array is needed to hold -an intermediate calculation. Whatever the need there are simple ways -to get an ndarray object of whatever data-type is needed. The most -general function for doing this is :cfunc:`PyArray_NewFromDescr`. All array -creation functions go through this heavily re-used code. Because of -its flexibility, it can be somewhat confusing to use. As a result, -simpler forms exist that are easier to use. - -.. cfunction:: PyObject *PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - - This function allocates new memory and places it in an ndarray - with *nd* dimensions whose shape is determined by the array of - at least *nd* items pointed to by *dims*. The memory for the - array is uninitialized (unless typenum is :cdata:`PyArray_OBJECT` in - which case each element in the array is set to NULL). The - *typenum* argument allows specification of any of the builtin - data-types such as :cdata:`PyArray_FLOAT` or :cdata:`PyArray_LONG`. The - memory for the array can be set to zero if desired using - :cfunc:`PyArray_FILLWBYTE` (return_object, 0). - -.. cfunction:: PyObject *PyArray_SimpleNewFromData( int nd, npy_intp* dims, int typenum, void* data) - - Sometimes, you want to wrap memory allocated elsewhere into an - ndarray object for downstream use. This routine makes it - straightforward to do that. The first three arguments are the same - as in :cfunc:`PyArray_SimpleNew`, the final argument is a pointer to a - block of contiguous memory that the ndarray should use as it's - data-buffer which will be interpreted in C-style contiguous - fashion. A new reference to an ndarray is returned, but the - ndarray will not own its data. When this ndarray is deallocated, - the pointer will not be freed. - - You should ensure that the provided memory is not freed while the - returned array is in existence. The easiest way to handle this is - if data comes from another reference-counted Python object. The - reference count on this object should be increased after the - pointer is passed in, and the base member of the returned ndarray - should point to the Python object that owns the data. Then, when - the ndarray is deallocated, the base-member will be DECREF'd - appropriately. If you want the memory to be freed as soon as the - ndarray is deallocated then simply set the OWNDATA flag on the - returned ndarray. - - -Getting at ndarray memory and accessing elements of the ndarray ---------------------------------------------------------------- - -If obj is an ndarray (:ctype:`PyArrayObject *`), then the data-area of the -ndarray is pointed to by the void* pointer :cfunc:`PyArray_DATA` (obj) or -the char* pointer :cfunc:`PyArray_BYTES` (obj). Remember that (in general) -this data-area may not be aligned according to the data-type, it may -represent byte-swapped data, and/or it may not be writeable. If the -data area is aligned and in native byte-order, then how to get at a -specific element of the array is determined only by the array of -npy_intp variables, :cfunc:`PyArray_STRIDES` (obj). In particular, this -c-array of integers shows how many **bytes** must be added to the -current element pointer to get to the next element in each dimension. -For arrays less than 4-dimensions there are :cfunc:`PyArray_GETPTR{k}` -(obj, ...) macros where {k} is the integer 1, 2, 3, or 4 that make -using the array strides easier. The arguments .... represent {k} non- -negative integer indices into the array. For example, suppose ``E`` is -a 3-dimensional ndarray. A (void*) pointer to the element ``E[i,j,k]`` -is obtained as :cfunc:`PyArray_GETPTR3` (E, i, j, k). - -As explained previously, C-style contiguous arrays and Fortran-style -contiguous arrays have particular striding patterns. Two array flags -(:cdata:`NPY_C_CONTIGUOUS` and :cdata`NPY_F_CONTIGUOUS`) indicate -whether or not the striding pattern of a particular array matches the -C-style contiguous or Fortran-style contiguous or neither. Whether or -not the striding pattern matches a standard C or Fortran one can be -tested Using :cfunc:`PyArray_ISCONTIGUOUS` (obj) and -:cfunc:`PyArray_ISFORTRAN` (obj) respectively. Most third-party -libraries expect contiguous arrays. But, often it is not difficult to -support general-purpose striding. I encourage you to use the striding -information in your own code whenever possible, and reserve -single-segment requirements for wrapping third-party code. Using the -striding information provided with the ndarray rather than requiring a -contiguous striding reduces copying that otherwise must be made. - - -Example -======= - -.. index:: - single: extension module - -The following example shows how you might write a wrapper that accepts -two input arguments (that will be converted to an array) and an output -argument (that must be an array). The function returns None and -updates the output array. - -.. code-block:: c - - static PyObject * - example_wrapper(PyObject *dummy, PyObject *args) - { - PyObject *arg1=NULL, *arg2=NULL, *out=NULL; - PyObject *arr1=NULL, *arr2=NULL, *oarr=NULL; - - if (!PyArg_ParseTuple(args, "OOO!", &arg1, &arg2, - &PyArray_Type, &out)) return NULL; - - arr1 = PyArray_FROM_OTF(arg1, NPY_DOUBLE, NPY_IN_ARRAY); - if (arr1 == NULL) return NULL; - arr2 = PyArray_FROM_OTF(arg2, NPY_DOUBLE, NPY_IN_ARRAY); - if (arr2 == NULL) goto fail; - oarr = PyArray_FROM_OTF(out, NPY_DOUBLE, NPY_INOUT_ARRAY); - if (oarr == NULL) goto fail; - - /* code that makes use of arguments */ - /* You will probably need at least - nd = PyArray_NDIM(<..>) -- number of dimensions - dims = PyArray_DIMS(<..>) -- npy_intp array of length nd - showing length in each dim. - dptr = (double *)PyArray_DATA(<..>) -- pointer to data. - - If an error occurs goto fail. - */ - - Py_DECREF(arr1); - Py_DECREF(arr2); - Py_DECREF(oarr); - Py_INCREF(Py_None); - return Py_None; - - fail: - Py_XDECREF(arr1); - Py_XDECREF(arr2); - PyArray_XDECREF_ERR(oarr); - return NULL; - } diff --git a/numpy-1.6.2/doc/source/user/c-info.python-as-glue.rst b/numpy-1.6.2/doc/source/user/c-info.python-as-glue.rst deleted file mode 100644 index 6ce2668592..0000000000 --- a/numpy-1.6.2/doc/source/user/c-info.python-as-glue.rst +++ /dev/null @@ -1,1522 +0,0 @@ -******************** -Using Python as glue -******************** - -| There is no conversation more boring than the one where everybody -| agrees. -| --- *Michel de Montaigne* - -| Duct tape is like the force. It has a light side, and a dark side, and -| it holds the universe together. -| --- *Carl Zwanzig* - -Many people like to say that Python is a fantastic glue language. -Hopefully, this Chapter will convince you that this is true. The first -adopters of Python for science were typically people who used it to -glue together large application codes running on super-computers. Not -only was it much nicer to code in Python than in a shell script or -Perl, in addition, the ability to easily extend Python made it -relatively easy to create new classes and types specifically adapted -to the problems being solved. From the interactions of these early -contributors, Numeric emerged as an array-like object that could be -used to pass data between these applications. - -As Numeric has matured and developed into NumPy, people have been able -to write more code directly in NumPy. Often this code is fast-enough -for production use, but there are still times that there is a need to -access compiled code. Either to get that last bit of efficiency out of -the algorithm or to make it easier to access widely-available codes -written in C/C++ or Fortran. - -This chapter will review many of the tools that are available for the -purpose of accessing code written in other compiled languages. There -are many resources available for learning to call other compiled -libraries from Python and the purpose of this Chapter is not to make -you an expert. The main goal is to make you aware of some of the -possibilities so that you will know what to "Google" in order to learn more. - -The http://www.scipy.org website also contains a great deal of useful -information about many of these tools. For example, there is a nice -description of using several of the tools explained in this chapter at -http://www.scipy.org/PerformancePython. This link provides several -ways to solve the same problem showing how to use and connect with -compiled code to get the best performance. In the process you can get -a taste for several of the approaches that will be discussed in this -chapter. - - -Calling other compiled libraries from Python -============================================ - -While Python is a great language and a pleasure to code in, its -dynamic nature results in overhead that can cause some code ( *i.e.* -raw computations inside of for loops) to be up 10-100 times slower -than equivalent code written in a static compiled language. In -addition, it can cause memory usage to be larger than necessary as -temporary arrays are created and destroyed during computation. For -many types of computing needs the extra slow-down and memory -consumption can often not be spared (at least for time- or memory- -critical portions of your code). Therefore one of the most common -needs is to call out from Python code to a fast, machine-code routine -(e.g. compiled using C/C++ or Fortran). The fact that this is -relatively easy to do is a big reason why Python is such an excellent -high-level language for scientific and engineering programming. - -Their are two basic approaches to calling compiled code: writing an -extension module that is then imported to Python using the import -command, or calling a shared-library subroutine directly from Python -using the ctypes module (included in the standard distribution with -Python 2.5). The first method is the most common (but with the -inclusion of ctypes into Python 2.5 this status may change). - -.. warning:: - - Calling C-code from Python can result in Python crashes if you are not - careful. None of the approaches in this chapter are immune. You have - to know something about the way data is handled by both NumPy and by - the third-party library being used. - - -Hand-generated wrappers -======================= - -Extension modules were discussed in Chapter `1 -<#sec-writing-an-extension>`__ . The most basic way to interface with -compiled code is to write an extension module and construct a module -method that calls the compiled code. For improved readability, your -method should take advantage of the PyArg_ParseTuple call to convert -between Python objects and C data-types. For standard C data-types -there is probably already a built-in converter. For others you may -need to write your own converter and use the "O&" format string which -allows you to specify a function that will be used to perform the -conversion from the Python object to whatever C-structures are needed. - -Once the conversions to the appropriate C-structures and C data-types -have been performed, the next step in the wrapper is to call the -underlying function. This is straightforward if the underlying -function is in C or C++. However, in order to call Fortran code you -must be familiar with how Fortran subroutines are called from C/C++ -using your compiler and platform. This can vary somewhat platforms and -compilers (which is another reason f2py makes life much simpler for -interfacing Fortran code) but generally involves underscore mangling -of the name and the fact that all variables are passed by reference -(i.e. all arguments are pointers). - -The advantage of the hand-generated wrapper is that you have complete -control over how the C-library gets used and called which can lead to -a lean and tight interface with minimal over-head. The disadvantage is -that you have to write, debug, and maintain C-code, although most of -it can be adapted using the time-honored technique of -"cutting-pasting-and-modifying" from other extension modules. Because, -the procedure of calling out to additional C-code is fairly -regimented, code-generation procedures have been developed to make -this process easier. One of these code- generation techniques is -distributed with NumPy and allows easy integration with Fortran and -(simple) C code. This package, f2py, will be covered briefly in the -next session. - - -f2py -==== - -F2py allows you to automatically construct an extension module that -interfaces to routines in Fortran 77/90/95 code. It has the ability to -parse Fortran 77/90/95 code and automatically generate Python -signatures for the subroutines it encounters, or you can guide how the -subroutine interfaces with Python by constructing an interface-definition-file (or modifying the f2py-produced one). - -.. index:: - single: f2py - -Creating source for a basic extension module --------------------------------------------- - -Probably the easiest way to introduce f2py is to offer a simple -example. Here is one of the subroutines contained in a file named -:file:`add.f`: - -.. code-block:: none - - C - SUBROUTINE ZADD(A,B,C,N) - C - DOUBLE COMPLEX A(*) - DOUBLE COMPLEX B(*) - DOUBLE COMPLEX C(*) - INTEGER N - DO 20 J = 1, N - C(J) = A(J)+B(J) - 20 CONTINUE - END - -This routine simply adds the elements in two contiguous arrays and -places the result in a third. The memory for all three arrays must be -provided by the calling routine. A very basic interface to this -routine can be automatically generated by f2py:: - - f2py -m add add.f - -You should be able to run this command assuming your search-path is -set-up properly. This command will produce an extension module named -addmodule.c in the current directory. This extension module can now be -compiled and used from Python just like any other extension module. - - -Creating a compiled extension module ------------------------------------- - -You can also get f2py to compile add.f and also compile its produced -extension module leaving only a shared-library extension file that can -be imported from Python:: - - f2py -c -m add add.f - -This command leaves a file named add.{ext} in the current directory -(where {ext} is the appropriate extension for a python extension -module on your platform --- so, pyd, *etc.* ). This module may then be -imported from Python. It will contain a method for each subroutine in -add (zadd, cadd, dadd, sadd). The docstring of each method contains -information about how the module method may be called: - - >>> import add - >>> print add.zadd.__doc__ - zadd - Function signature: - zadd(a,b,c,n) - Required arguments: - a : input rank-1 array('D') with bounds (*) - b : input rank-1 array('D') with bounds (*) - c : input rank-1 array('D') with bounds (*) - n : input int - - -Improving the basic interface ------------------------------ - -The default interface is a very literal translation of the fortran -code into Python. The Fortran array arguments must now be NumPy arrays -and the integer argument should be an integer. The interface will -attempt to convert all arguments to their required types (and shapes) -and issue an error if unsuccessful. However, because it knows nothing -about the semantics of the arguments (such that C is an output and n -should really match the array sizes), it is possible to abuse this -function in ways that can cause Python to crash. For example: - - >>> add.zadd([1,2,3],[1,2],[3,4],1000) - -will cause a program crash on most systems. Under the covers, the -lists are being converted to proper arrays but then the underlying add -loop is told to cycle way beyond the borders of the allocated memory. - -In order to improve the interface, directives should be provided. This -is accomplished by constructing an interface definition file. It is -usually best to start from the interface file that f2py can produce -(where it gets its default behavior from). To get f2py to generate the -interface file use the -h option:: - - f2py -h add.pyf -m add add.f - -This command leaves the file add.pyf in the current directory. The -section of this file corresponding to zadd is: - -.. code-block:: none - - subroutine zadd(a,b,c,n) ! in :add:add.f - double complex dimension(*) :: a - double complex dimension(*) :: b - double complex dimension(*) :: c - integer :: n - end subroutine zadd - -By placing intent directives and checking code, the interface can be -cleaned up quite a bit until the Python module method is both easier -to use and more robust. - -.. code-block:: none - - subroutine zadd(a,b,c,n) ! in :add:add.f - double complex dimension(n) :: a - double complex dimension(n) :: b - double complex intent(out),dimension(n) :: c - integer intent(hide),depend(a) :: n=len(a) - end subroutine zadd - -The intent directive, intent(out) is used to tell f2py that ``c`` is -an output variable and should be created by the interface before being -passed to the underlying code. The intent(hide) directive tells f2py -to not allow the user to specify the variable, ``n``, but instead to -get it from the size of ``a``. The depend( ``a`` ) directive is -necessary to tell f2py that the value of n depends on the input a (so -that it won't try to create the variable n until the variable a is -created). - -The new interface has docstring: - - >>> print add.zadd.__doc__ - zadd - Function signature: - c = zadd(a,b) - Required arguments: - a : input rank-1 array('D') with bounds (n) - b : input rank-1 array('D') with bounds (n) - Return objects: - c : rank-1 array('D') with bounds (n) - -Now, the function can be called in a much more robust way: - - >>> add.zadd([1,2,3],[4,5,6]) - array([ 5.+0.j, 7.+0.j, 9.+0.j]) - -Notice the automatic conversion to the correct format that occurred. - - -Inserting directives in Fortran source --------------------------------------- - -The nice interface can also be generated automatically by placing the -variable directives as special comments in the original fortran code. -Thus, if I modify the source code to contain: - -.. code-block:: none - - C - SUBROUTINE ZADD(A,B,C,N) - C - CF2PY INTENT(OUT) :: C - CF2PY INTENT(HIDE) :: N - CF2PY DOUBLE COMPLEX :: A(N) - CF2PY DOUBLE COMPLEX :: B(N) - CF2PY DOUBLE COMPLEX :: C(N) - DOUBLE COMPLEX A(*) - DOUBLE COMPLEX B(*) - DOUBLE COMPLEX C(*) - INTEGER N - DO 20 J = 1, N - C(J) = A(J) + B(J) - 20 CONTINUE - END - -Then, I can compile the extension module using:: - - f2py -c -m add add.f - -The resulting signature for the function add.zadd is exactly the same -one that was created previously. If the original source code had -contained A(N) instead of A(\*) and so forth with B and C, then I -could obtain (nearly) the same interface simply by placing the -INTENT(OUT) :: C comment line in the source code. The only difference -is that N would be an optional input that would default to the length -of A. - - -A filtering example -------------------- - -For comparison with the other methods to be discussed. Here is another -example of a function that filters a two-dimensional array of double -precision floating-point numbers using a fixed averaging filter. The -advantage of using Fortran to index into multi-dimensional arrays -should be clear from this example. - -.. code-block:: none - - SUBROUTINE DFILTER2D(A,B,M,N) - C - DOUBLE PRECISION A(M,N) - DOUBLE PRECISION B(M,N) - INTEGER N, M - CF2PY INTENT(OUT) :: B - CF2PY INTENT(HIDE) :: N - CF2PY INTENT(HIDE) :: M - DO 20 I = 2,M-1 - DO 40 J=2,N-1 - B(I,J) = A(I,J) + - $ (A(I-1,J)+A(I+1,J) + - $ A(I,J-1)+A(I,J+1) )*0.5D0 + - $ (A(I-1,J-1) + A(I-1,J+1) + - $ A(I+1,J-1) + A(I+1,J+1))*0.25D0 - 40 CONTINUE - 20 CONTINUE - END - -This code can be compiled and linked into an extension module named -filter using:: - - f2py -c -m filter filter.f - -This will produce an extension module named filter.so in the current -directory with a method named dfilter2d that returns a filtered -version of the input. - - -Calling f2py from Python ------------------------- - -The f2py program is written in Python and can be run from inside your -module. This provides a facility that is somewhat similar to the use -of weave.ext_tools described below. An example of the final interface -executed using Python code is: - -.. code-block:: python - - import numpy.f2py as f2py - fid = open('add.f') - source = fid.read() - fid.close() - f2py.compile(source, modulename='add') - import add - -The source string can be any valid Fortran code. If you want to save -the extension-module source code then a suitable file-name can be -provided by the source_fn keyword to the compile function. - - -Automatic extension module generation -------------------------------------- - -If you want to distribute your f2py extension module, then you only -need to include the .pyf file and the Fortran code. The distutils -extensions in NumPy allow you to define an extension module entirely -in terms of this interface file. A valid setup.py file allowing -distribution of the add.f module (as part of the package f2py_examples -so that it would be loaded as f2py_examples.add) is: - -.. code-block:: python - - def configuration(parent_package='', top_path=None) - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_examples',parent_package, top_path) - config.add_extension('add', sources=['add.pyf','add.f']) - return config - - if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) - -Installation of the new package is easy using:: - - python setup.py install - -assuming you have the proper permissions to write to the main site- -packages directory for the version of Python you are using. For the -resulting package to work, you need to create a file named __init__.py -(in the same directory as add.pyf). Notice the extension module is -defined entirely in terms of the "add.pyf" and "add.f" files. The -conversion of the .pyf file to a .c file is handled by numpy.disutils. - - -Conclusion ----------- - -The interface definition file (.pyf) is how you can fine-tune the -interface between Python and Fortran. There is decent documentation -for f2py found in the numpy/f2py/docs directory where-ever NumPy is -installed on your system (usually under site-packages). There is also -more information on using f2py (including how to use it to wrap C -codes) at http://www.scipy.org/Cookbook under the "Using NumPy with -Other Languages" heading. - -The f2py method of linking compiled code is currently the most -sophisticated and integrated approach. It allows clean separation of -Python with compiled code while still allowing for separate -distribution of the extension module. The only draw-back is that it -requires the existence of a Fortran compiler in order for a user to -install the code. However, with the existence of the free-compilers -g77, gfortran, and g95, as well as high-quality commerical compilers, -this restriction is not particularly onerous. In my opinion, Fortran -is still the easiest way to write fast and clear code for scientific -computing. It handles complex numbers, and multi-dimensional indexing -in the most straightforward way. Be aware, however, that some Fortran -compilers will not be able to optimize code as well as good hand- -written C-code. - -.. index:: - single: f2py - - -weave -===== - -Weave is a scipy package that can be used to automate the process of -extending Python with C/C++ code. It can be used to speed up -evaluation of an array expression that would otherwise create -temporary variables, to directly "inline" C/C++ code into Python, or -to create a fully-named extension module. You must either install -scipy or get the weave package separately and install it using the -standard python setup.py install. You must also have a C/C++-compiler -installed and useable by Python distutils in order to use weave. - -.. index:: - single: weave - -Somewhat dated, but still useful documentation for weave can be found -at the link http://www.scipy/Weave. There are also many examples found -in the examples directory which is installed under the weave directory -in the place where weave is installed on your system. - - -Speed up code involving arrays (also see scipy.numexpr) -------------------------------------------------------- - -This is the easiest way to use weave and requires minimal changes to -your Python code. It involves placing quotes around the expression of -interest and calling weave.blitz. Weave will parse the code and -generate C++ code using Blitz C++ arrays. It will then compile the -code and catalog the shared library so that the next time this exact -string is asked for (and the array types are the same), the already- -compiled shared library will be loaded and used. Because Blitz makes -extensive use of C++ templating, it can take a long time to compile -the first time. After that, however, the code should evaluate more -quickly than the equivalent NumPy expression. This is especially true -if your array sizes are large and the expression would require NumPy -to create several temporaries. Only expressions involving basic -arithmetic operations and basic array slicing can be converted to -Blitz C++ code. - -For example, consider the expression:: - - d = 4*a + 5*a*b + 6*b*c - -where a, b, and c are all arrays of the same type and shape. When the -data-type is double-precision and the size is 1000x1000, this -expression takes about 0.5 seconds to compute on an 1.1Ghz AMD Athlon -machine. When this expression is executed instead using blitz: - -.. code-block:: python - - d = empty(a.shape, 'd'); weave.blitz(expr) - -execution time is only about 0.20 seconds (about 0.14 seconds spent in -weave and the rest in allocating space for d). Thus, we've sped up the -code by a factor of 2 using only a simnple command (weave.blitz). Your -mileage may vary, but factors of 2-8 speed-ups are possible with this -very simple technique. - -If you are interested in using weave in this way, then you should also -look at scipy.numexpr which is another similar way to speed up -expressions by eliminating the need for temporary variables. Using -numexpr does not require a C/C++ compiler. - - -Inline C-code -------------- - -Probably the most widely-used method of employing weave is to -"in-line" C/C++ code into Python in order to speed up a time-critical -section of Python code. In this method of using weave, you define a -string containing useful C-code and then pass it to the function -**weave.inline** ( ``code_string``, ``variables`` ), where -code_string is a string of valid C/C++ code and variables is a list of -variables that should be passed in from Python. The C/C++ code should -refer to the variables with the same names as they are defined with in -Python. If weave.line should return anything the the special value -return_val should be set to whatever object should be returned. The -following example shows how to use weave on basic Python objects: - -.. code-block:: python - - code = r""" - int i; - py::tuple results(2); - for (i=0; ic.data)[i].real = \ - (a.data)[i].real + \ - (b.data)[i].real - (c.data)[i].imag = \ - (a.data)[i].imag + \ - (b.data)[i].imag - return c - -This module shows use of the ``cimport`` statement to load the -definitions from the c_numpy.pxd file. As shown, both versions of the -import statement are supported. It also shows use of the NumPy C-API -to construct NumPy arrays from arbitrary input objects. The array c is -created using PyArray_SimpleNew. Then the c-array is filled by -addition. Casting to a particiular data-type is accomplished using -. Pointers are de-referenced with bracket notation and -members of structures are accessed using '.' notation even if the -object is techinically a pointer to a structure. The use of the -special for loop construct ensures that the underlying code will have -a similar C-loop so the addition calculation will proceed quickly. -Notice that we have not checked for NULL after calling to the C-API ---- a cardinal sin when writing C-code. For routines that return -Python objects, Pyrex inserts the checks for NULL into the C-code for -you and returns with failure if need be. There is also a way to get -Pyrex to automatically check for exceptions when you call functions -that don't return Python objects. See the documentation of Pyrex for -details. - - -Pyrex-filter ------------- - -The two-dimensional example we created using weave is a bit uglier to -implement in Pyrex because two-dimensional indexing using Pyrex is not -as simple. But, it is straightforward (and possibly faster because of -pre-computed indices). Here is the Pyrex-file I named image.pyx. - -.. code-block:: none - - cimport c_numpy - from c_numpy cimport import_array, ndarray, npy_intp,\ - NPY_DOUBLE, NPY_CDOUBLE, \ - NPY_FLOAT, NPY_CFLOAT, NPY_ALIGNED \ - - #We need to initialize NumPy - import_array() - def filter(object ao): - cdef ndarray a, b - cdef npy_intp i, j, M, N, oS - cdef npy_intp r,rm1,rp1,c,cm1,cp1 - cdef double value - # Require an ALIGNED array - # (but not necessarily contiguous) - # We will use strides to access the elements. - a = c_numpy.PyArray_FROMANY(ao, NPY_DOUBLE, \ - 2, 2, NPY_ALIGNED) - b = c_numpy.PyArray_SimpleNew(a.nd,a.dimensions, \ - a.descr.type_num) - M = a.dimensions[0] - N = a.dimensions[1] - S0 = a.strides[0] - S1 = a.strides[1] - for i from 1 <= i < M-1: - r = i*S0 - rm1 = r-S0 - rp1 = r+S0 - oS = i*N - for j from 1 <= j < N-1: - c = j*S1 - cm1 = c-S1 - cp1 = c+S1 - (b.data)[oS+j] = \ - ((a.data+r+c))[0] + \ - (((a.data+rm1+c))[0] + \ - ((a.data+rp1+c))[0] + \ - ((a.data+r+cm1))[0] + \ - ((a.data+r+cp1))[0])*0.5 + \ - (((a.data+rm1+cm1))[0] + \ - ((a.data+rp1+cm1))[0] + \ - ((a.data+rp1+cp1))[0] + \ - ((a.data+rm1+cp1))[0])*0.25 - return b - -This 2-d averaging filter runs quickly because the loop is in C and -the pointer computations are done only as needed. However, it is not -particularly easy to understand what is happening. A 2-d image, ``in`` -, can be filtered using this code very quickly using: - -.. code-block:: python - - import image - out = image.filter(in) - - -Conclusion ----------- - -There are several disadvantages of using Pyrex: - -1. The syntax for Pyrex can get a bit bulky, and it can be confusing at - first to understand what kind of objects you are getting and how to - interface them with C-like constructs. - -2. Inappropriate Pyrex syntax or incorrect calls to C-code or type- - mismatches can result in failures such as - - 1. Pyrex failing to generate the extension module source code, - - 2. Compiler failure while generating the extension module binary due to - incorrect C syntax, - - 3. Python failure when trying to use the module. - - -3. It is easy to lose a clean separation between Python and C which makes - re-using your C-code for other non-Python-related projects more - difficult. - -4. Multi-dimensional arrays are "bulky" to index (appropriate macros - may be able to fix this). - -5. The C-code generated by Pyrex is hard to read and modify (and typically - compiles with annoying but harmless warnings). - -Writing a good Pyrex extension module still takes a bit of effort -because not only does it require (a little) familiarity with C, but -also with Pyrex's brand of Python-mixed-with C. One big advantage of -Pyrex-generated extension modules is that they are easy to distribute -using distutils. In summary, Pyrex is a very capable tool for either -gluing C-code or generating an extension module quickly and should not -be over-looked. It is especially useful for people that can't or won't -write C-code or Fortran code. But, if you are already able to write -simple subroutines in C or Fortran, then I would use one of the other -approaches such as f2py (for Fortran), ctypes (for C shared- -libraries), or weave (for inline C-code). - -.. index:: - single: pyrex - - - - -ctypes -====== - -Ctypes is a python extension module (downloaded separately for Python -<2.5 and included with Python 2.5) that allows you to call an -arbitrary function in a shared library directly from Python. This -approach allows you to interface with C-code directly from Python. -This opens up an enormous number of libraries for use from Python. The -drawback, however, is that coding mistakes can lead to ugly program -crashes very easily (just as can happen in C) because there is little -type or bounds checking done on the parameters. This is especially -true when array data is passed in as a pointer to a raw memory -location. The responsibility is then on you that the subroutine will -not access memory outside the actual array area. But, if you don't -mind living a little dangerously ctypes can be an effective tool for -quickly taking advantage of a large shared library (or writing -extended functionality in your own shared library). - -.. index:: - single: ctypes - -Because the ctypes approach exposes a raw interface to the compiled -code it is not always tolerant of user mistakes. Robust use of the -ctypes module typically involves an additional layer of Python code in -order to check the data types and array bounds of objects passed to -the underlying subroutine. This additional layer of checking (not to -mention the conversion from ctypes objects to C-data-types that ctypes -itself performs), will make the interface slower than a hand-written -extension-module interface. However, this overhead should be neglible -if the C-routine being called is doing any significant amount of work. -If you are a great Python programmer with weak C-skills, ctypes is an -easy way to write a useful interface to a (shared) library of compiled -code. - -To use c-types you must - -1. Have a shared library. - -2. Load the shared library. - -3. Convert the python objects to ctypes-understood arguments. - -4. Call the function from the library with the ctypes arguments. - - -Having a shared library ------------------------ - -There are several requirements for a shared library that can be used -with c-types that are platform specific. This guide assumes you have -some familiarity with making a shared library on your system (or -simply have a shared library available to you). Items to remember are: - -- A shared library must be compiled in a special way ( *e.g.* using - the -shared flag with gcc). - -- On some platforms (*e.g.* Windows) , a shared library requires a - .def file that specifies the functions to be exported. For example a - mylib.def file might contain. - - :: - - LIBRARY mylib.dll - EXPORTS - cool_function1 - cool_function2 - - Alternatively, you may be able to use the storage-class specifier - __declspec(dllexport) in the C-definition of the function to avoid the - need for this .def file. - -There is no standard way in Python distutils to create a standard -shared library (an extension module is a "special" shared library -Python understands) in a cross-platform manner. Thus, a big -disadvantage of ctypes at the time of writing this book is that it is -difficult to distribute in a cross-platform manner a Python extension -that uses c-types and includes your own code which should be compiled -as a shared library on the users system. - - -Loading the shared library --------------------------- - -A simple, but robust way to load the shared library is to get the -absolute path name and load it using the cdll object of ctypes.: - -.. code-block:: python - - lib = ctypes.cdll[] - -However, on Windows accessing an attribute of the cdll method will -load the first DLL by that name found in the current directory or on -the PATH. Loading the absolute path name requires a little finesse for -cross-platform work since the extension of shared libraries varies. -There is a ``ctypes.util.find_library`` utility available that can -simplify the process of finding the library to load but it is not -foolproof. Complicating matters, different platforms have different -default extensions used by shared libraries (e.g. .dll -- Windows, .so --- Linux, .dylib -- Mac OS X). This must also be taken into account if -you are using c-types to wrap code that needs to work on several -platforms. - -NumPy provides a convenience function called -:func:`ctypeslib.load_library` (name, path). This function takes the name -of the shared library (including any prefix like 'lib' but excluding -the extension) and a path where the shared library can be located. It -returns a ctypes library object or raises an OSError if the library -cannot be found or raises an ImportError if the ctypes module is not -available. (Windows users: the ctypes library object loaded using -:func:`load_library` is always loaded assuming cdecl calling convention. -See the ctypes documentation under ctypes.windll and/or ctypes.oledll -for ways to load libraries under other calling conventions). - -The functions in the shared library are available as attributes of the -ctypes library object (returned from :func:`ctypeslib.load_library`) or -as items using ``lib['func_name']`` syntax. The latter method for -retrieving a function name is particularly useful if the function name -contains characters that are not allowable in Python variable names. - - -Converting arguments --------------------- - -Python ints/longs, strings, and unicode objects are automatically -converted as needed to equivalent c-types arguments The None object is -also converted automatically to a NULL pointer. All other Python -objects must be converted to ctypes-specific types. There are two ways -around this restriction that allow c-types to integrate with other -objects. - -1. Don't set the argtypes attribute of the function object and define an - :obj:`_as_parameter_` method for the object you want to pass in. The - :obj:`_as_parameter_` method must return a Python int which will be passed - directly to the function. - -2. Set the argtypes attribute to a list whose entries contain objects - with a classmethod named from_param that knows how to convert your - object to an object that ctypes can understand (an int/long, string, - unicode, or object with the :obj:`_as_parameter_` attribute). - -NumPy uses both methods with a preference for the second method -because it can be safer. The ctypes attribute of the ndarray returns -an object that has an _as_parameter\_ attribute which returns an -integer representing the address of the ndarray to which it is -associated. As a result, one can pass this ctypes attribute object -directly to a function expecting a pointer to the data in your -ndarray. The caller must be sure that the ndarray object is of the -correct type, shape, and has the correct flags set or risk nasty -crashes if the data-pointer to inappropriate arrays are passsed in. - -To implement the second method, NumPy provides the class-factory -function :func:`ndpointer` in the :mod:`ctypeslib` module. This -class-factory function produces an appropriate class that can be -placed in an argtypes attribute entry of a ctypes function. The class -will contain a from_param method which ctypes will use to convert any -ndarray passed in to the function to a ctypes-recognized object. In -the process, the conversion will perform checking on any properties of -the ndarray that were specified by the user in the call to :func:`ndpointer`. -Aspects of the ndarray that can be checked include the data-type, the -number-of-dimensions, the shape, and/or the state of the flags on any -array passed. The return value of the from_param method is the ctypes -attribute of the array which (because it contains the _as_parameter\_ -attribute pointing to the array data area) can be used by ctypes -directly. - -The ctypes attribute of an ndarray is also endowed with additional -attributes that may be convenient when passing additional information -about the array into a ctypes function. The attributes **data**, -**shape**, and **strides** can provide c-types compatible types -corresponding to the data-area, the shape, and the strides of the -array. The data attribute reutrns a ``c_void_p`` representing a -pointer to the data area. The shape and strides attributes each return -an array of ctypes integers (or None representing a NULL pointer, if a -0-d array). The base ctype of the array is a ctype integer of the same -size as a pointer on the platform. There are also methods -data_as({ctype}), shape_as(), and strides_as(). These return the data as a ctype object of your choice and -the shape/strides arrays using an underlying base type of your choice. -For convenience, the **ctypeslib** module also contains **c_intp** as -a ctypes integer data-type whose size is the same as the size of -``c_void_p`` on the platform (it's value is None if ctypes is not -installed). - - -Calling the function --------------------- - -The function is accessed as an attribute of or an item from the loaded -shared-library. Thus, if "./mylib.so" has a function named -"cool_function1" , I could access this function either as: - -.. code-block:: python - - lib = numpy.ctypeslib.load_library('mylib','.') - func1 = lib.cool_function1 # or equivalently - func1 = lib['cool_function1'] - -In ctypes, the return-value of a function is set to be 'int' by -default. This behavior can be changed by setting the restype attribute -of the function. Use None for the restype if the function has no -return value ('void'): - -.. code-block:: python - - func1.restype = None - -As previously discussed, you can also set the argtypes attribute of -the function in order to have ctypes check the types of the input -arguments when the function is called. Use the :func:`ndpointer` factory -function to generate a ready-made class for data-type, shape, and -flags checking on your new function. The :func:`ndpointer` function has the -signature - -.. function:: ndpointer(dtype=None, ndim=None, shape=None, flags=None) - - Keyword arguments with the value ``None`` are not checked. - Specifying a keyword enforces checking of that aspect of the - ndarray on conversion to a ctypes-compatible object. The dtype - keyword can be any object understood as a data-type object. The - ndim keyword should be an integer, and the shape keyword should be - an integer or a sequence of integers. The flags keyword specifies - the minimal flags that are required on any array passed in. This - can be specified as a string of comma separated requirements, an - integer indicating the requirement bits OR'd together, or a flags - object returned from the flags attribute of an array with the - necessary requirements. - -Using an ndpointer class in the argtypes method can make it -significantly safer to call a C-function using ctypes and the data- -area of an ndarray. You may still want to wrap the function in an -additional Python wrapper to make it user-friendly (hiding some -obvious arguments and making some arguments output arguments). In this -process, the **requires** function in NumPy may be useful to return the right -kind of array from a given input. - - -Complete example ----------------- - -In this example, I will show how the addition function and the filter -function implemented previously using the other approaches can be -implemented using ctypes. First, the C-code which implements the -algorithms contains the functions zadd, dadd, sadd, cadd, and -dfilter2d. The zadd function is: - -.. code-block:: c - - /* Add arrays of contiguous data */ - typedef struct {double real; double imag;} cdouble; - typedef struct {float real; float imag;} cfloat; - void zadd(cdouble *a, cdouble *b, cdouble *c, long n) - { - while (n--) { - c->real = a->real + b->real; - c->imag = a->imag + b->imag; - a++; b++; c++; - } - } - -with similar code for cadd, dadd, and sadd that handles complex float, -double, and float data-types, respectively: - -.. code-block:: c - - void cadd(cfloat *a, cfloat *b, cfloat *c, long n) - { - while (n--) { - c->real = a->real + b->real; - c->imag = a->imag + b->imag; - a++; b++; c++; - } - } - void dadd(double *a, double *b, double *c, long n) - { - while (n--) { - *c++ = *a++ + *b++; - } - } - void sadd(float *a, float *b, float *c, long n) - { - while (n--) { - *c++ = *a++ + *b++; - } - } - -The code.c file also contains the function dfilter2d: - -.. code-block:: c - - /* Assumes b is contiguous and - a has strides that are multiples of sizeof(double) - */ - void - dfilter2d(double *a, double *b, int *astrides, int *dims) - { - int i, j, M, N, S0, S1; - int r, c, rm1, rp1, cp1, cm1; - - M = dims[0]; N = dims[1]; - S0 = astrides[0]/sizeof(double); - S1=astrides[1]/sizeof(double); - for (i=1; idimensions[0]; - int dims[1]; - dims[0] = n; - PyArrayObject* ret; - ret = (PyArrayObject*) PyArray_FromDims(1, dims, NPY_DOUBLE); - int i; - char *aj=a->data; - char *bj=b->data; - double *retj = (double *)ret->data; - for (i=0; i < n; i++) { - *retj++ = *((double *)aj) + *((double *)bj); - aj += a->strides[0]; - bj += b->strides[0]; - } - return (PyObject *)ret; - } - """ - import Instant, numpy - ext = Instant.Instant() - ext.create_extension(code=s, headers=["numpy/arrayobject.h"], - include_dirs=[numpy.get_include()], - init_code='import_array();', module="test2b_ext") - import test2b_ext - a = numpy.arange(1000) - b = numpy.arange(1000) - d = test2b_ext.add(a,b) - -Except perhaps for the dependence on SWIG, Instant is a -straightforward utility for writing extension modules. - - -PyInline --------- - -This is a much older module that allows automatic building of -extension modules so that C-code can be included with Python code. -It's latest release (version 0.03) was in 2001, and it appears that it -is not being updated. - - -PyFort ------- - -PyFort is a nice tool for wrapping Fortran and Fortran-like C-code -into Python with support for Numeric arrays. It was written by Paul -Dubois, a distinguished computer scientist and the very first -maintainer of Numeric (now retired). It is worth mentioning in the -hopes that somebody will update PyFort to work with NumPy arrays as -well which now support either Fortran or C-style contiguous arrays. diff --git a/numpy-1.6.2/doc/source/user/c-info.rst b/numpy-1.6.2/doc/source/user/c-info.rst deleted file mode 100644 index 086f97c8db..0000000000 --- a/numpy-1.6.2/doc/source/user/c-info.rst +++ /dev/null @@ -1,9 +0,0 @@ -################# -Using Numpy C-API -################# - -.. toctree:: - - c-info.how-to-extend - c-info.python-as-glue - c-info.beyond-basics diff --git a/numpy-1.6.2/doc/source/user/howtofind.rst b/numpy-1.6.2/doc/source/user/howtofind.rst deleted file mode 100644 index 00ed5daa70..0000000000 --- a/numpy-1.6.2/doc/source/user/howtofind.rst +++ /dev/null @@ -1,7 +0,0 @@ -************************* -How to find documentation -************************* - -.. seealso:: :ref:`Numpy-specific help functions ` - -.. automodule:: numpy.doc.howtofind diff --git a/numpy-1.6.2/doc/source/user/index.rst b/numpy-1.6.2/doc/source/user/index.rst deleted file mode 100644 index 022efcaeb4..0000000000 --- a/numpy-1.6.2/doc/source/user/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _user: - -################ -NumPy User Guide -################ - -This guide is intended as an introductory overview of NumPy and -explains how to install and make use of the most important features of -NumPy. For detailed reference documentation of the functions and -classes contained in the package, see the :ref:`reference`. - -.. warning:: - - This "User Guide" is still a work in progress; some of the material - is not organized, and several aspects of NumPy are not yet covered - sufficient detail. We are an open source community continually - working to improve the documentation and eagerly encourage interested - parties to contribute. For information on how to do so, please visit - the NumPy `doc wiki `_. - - More documentation for NumPy can be found on the `numpy.org - `__ website. - - Thanks! - -.. toctree:: - :maxdepth: 2 - - introduction - basics - performance - misc - c-info diff --git a/numpy-1.6.2/doc/source/user/install.rst b/numpy-1.6.2/doc/source/user/install.rst deleted file mode 100644 index 18e036ab0d..0000000000 --- a/numpy-1.6.2/doc/source/user/install.rst +++ /dev/null @@ -1,179 +0,0 @@ -***************************** -Building and installing NumPy -***************************** - -Binary installers -================= - -In most use cases the best way to install NumPy on your system is by using an -installable binary package for your operating system. - -Windows -------- - -Good solutions for Windows are, The Enthought Python Distribution `(EPD) -`_ (which provides binary -installers for Windows, OS X and Redhat) and `Python (x, y) -`_. Both of these packages include Python, NumPy and -many additional packages. - -A lightweight alternative is to download the Python -installer from `www.python.org `_ and the NumPy -installer for your Python version from the Sourceforge `download site `_ - -The NumPy installer includes binaries for different CPU's (without SSE -instructions, with SSE2 or with SSE3) and installs the correct one -automatically. If needed, this can be bypassed from the command line with :: - - numpy-<1.y.z>-superpack-win32.exe /arch nosse - -or 'sse2' or 'sse3' instead of 'nosse'. - -Linux ------ - -Most of the major distributions provide packages for NumPy, but these can lag -behind the most recent NumPy release. Pre-built binary packages for Ubuntu are -available on the `scipy ppa -`_. Redhat binaries are -available in the `EPD `_. - -Mac OS X --------- - -A universal binary installer for NumPy is available from the `download site -`_. The `EPD `_ -provides NumPy binaries. - -Building from source -==================== - -A general overview of building NumPy from source is given here, with detailed -instructions for specific platforms given seperately. - -Prerequisites -------------- - -Building NumPy requires the following software installed: - -1) Python 2.4.x, 2.5.x or 2.6.x - - On Debian and derivative (Ubuntu): python, python-dev - - On Windows: the official python installer at - `www.python.org `_ is enough - - Make sure that the Python package distutils is installed before - continuing. For example, in Debian GNU/Linux, distutils is included - in the python-dev package. - - Python must also be compiled with the zlib module enabled. - -2) Compilers - - To build any extension modules for Python, you'll need a C compiler. - Various NumPy modules use FORTRAN 77 libraries, so you'll also need a - FORTRAN 77 compiler installed. - - Note that NumPy is developed mainly using GNU compilers. Compilers from - other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland, - Lahey, HP, IBM, Microsoft are only supported in the form of community - feedback, and may not work out of the box. GCC 3.x (and later) compilers - are recommended. - -3) Linear Algebra libraries - - NumPy does not require any external linear algebra libraries to be - installed. However, if these are available, NumPy's setup script can detect - them and use them for building. A number of different LAPACK library setups - can be used, including optimized LAPACK libraries such as ATLAS, MKL or the - Accelerate/vecLib framework on OS X. - -FORTRAN ABI mismatch --------------------- - -The two most popular open source fortran compilers are g77 and gfortran. -Unfortunately, they are not ABI compatible, which means that concretely you -should avoid mixing libraries built with one with another. In particular, if -your blas/lapack/atlas is built with g77, you *must* use g77 when building -numpy and scipy; on the contrary, if your atlas is built with gfortran, you -*must* build numpy/scipy with gfortran. This applies for most other cases -where different FORTRAN compilers might have been used. - -Choosing the fortran compiler -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To build with g77:: - - python setup.py build --fcompiler=gnu - -To build with gfortran:: - - python setup.py build --fcompiler=gnu95 - -For more information see:: - - python setup.py build --help-fcompiler - -How to check the ABI of blas/lapack/atlas -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One relatively simple and reliable way to check for the compiler used to build -a library is to use ldd on the library. If libg2c.so is a dependency, this -means that g77 has been used. If libgfortran.so is a a dependency, gfortran -has been used. If both are dependencies, this means both have been used, which -is almost always a very bad idea. - -Disabling ATLAS and other accelerated libraries ------------------------------------------------ - -Usage of ATLAS and other accelerated libraries in Numpy can be disabled -via:: - - BLAS=None LAPACK=None ATLAS=None python setup.py build - - -Supplying additional compiler flags ------------------------------------ - -Additional compiler flags can be supplied by setting the ``OPT``, -``FOPT`` (for Fortran), and ``CC`` environment variables. - - -Building with ATLAS support ---------------------------- - -Ubuntu 8.10 (Intrepid) and 9.04 (Jaunty) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can install the necessary packages for optimized ATLAS with this command:: - - sudo apt-get install libatlas-base-dev - -If you have a recent CPU with SIMD suppport (SSE, SSE2, etc...), you should -also install the corresponding package for optimal performances. For example, -for SSE2:: - - sudo apt-get install libatlas3gf-sse2 - -This package is not available on amd64 platforms. - -*NOTE*: Ubuntu changed its default fortran compiler from g77 in Hardy to -gfortran in Intrepid. If you are building ATLAS from source and are upgrading -from Hardy to Intrepid or later versions, you should rebuild everything from -scratch, including lapack. - -Ubuntu 8.04 and lower -~~~~~~~~~~~~~~~~~~~~~ - -You can install the necessary packages for optimized ATLAS with this command:: - - sudo apt-get install atlas3-base-dev - -If you have a recent CPU with SIMD suppport (SSE, SSE2, etc...), you should -also install the corresponding package for optimal performances. For example, -for SSE2:: - - sudo apt-get install atlas3-sse2 diff --git a/numpy-1.6.2/doc/source/user/introduction.rst b/numpy-1.6.2/doc/source/user/introduction.rst deleted file mode 100644 index d29c13b307..0000000000 --- a/numpy-1.6.2/doc/source/user/introduction.rst +++ /dev/null @@ -1,10 +0,0 @@ -************ -Introduction -************ - - -.. toctree:: - - whatisnumpy - install - howtofind diff --git a/numpy-1.6.2/doc/source/user/misc.rst b/numpy-1.6.2/doc/source/user/misc.rst deleted file mode 100644 index 0e1807f3f9..0000000000 --- a/numpy-1.6.2/doc/source/user/misc.rst +++ /dev/null @@ -1,7 +0,0 @@ -************* -Miscellaneous -************* - -.. automodule:: numpy.doc.misc - -.. automodule:: numpy.doc.methods_vs_functions diff --git a/numpy-1.6.2/doc/source/user/performance.rst b/numpy-1.6.2/doc/source/user/performance.rst deleted file mode 100644 index 59f8a2edc9..0000000000 --- a/numpy-1.6.2/doc/source/user/performance.rst +++ /dev/null @@ -1,5 +0,0 @@ -*********** -Performance -*********** - -.. automodule:: numpy.doc.performance diff --git a/numpy-1.6.2/doc/source/user/whatisnumpy.rst b/numpy-1.6.2/doc/source/user/whatisnumpy.rst deleted file mode 100644 index 1c3f96b8b1..0000000000 --- a/numpy-1.6.2/doc/source/user/whatisnumpy.rst +++ /dev/null @@ -1,128 +0,0 @@ -************** -What is NumPy? -************** - -NumPy is the fundamental package for scientific computing in Python. -It is a Python library that provides a multidimensional array object, -various derived objects (such as masked arrays and matrices), and an -assortment of routines for fast operations on arrays, including -mathematical, logical, shape manipulation, sorting, selecting, I/O, -discrete Fourier transforms, basic linear algebra, basic statistical -operations, random simulation and much more. - -At the core of the NumPy package, is the `ndarray` object. This -encapsulates *n*-dimensional arrays of homogeneous data types, with -many operations being performed in compiled code for performance. -There are several important differences between NumPy arrays and the -standard Python sequences: - -- NumPy arrays have a fixed size at creation, unlike Python lists - (which can grow dynamically). Changing the size of an `ndarray` will - create a new array and delete the original. - -- The elements in a NumPy array are all required to be of the same - data type, and thus will be the same size in memory. The exception: - one can have arrays of (Python, including NumPy) objects, thereby - allowing for arrays of different sized elements. - -- NumPy arrays facilitate advanced mathematical and other types of - operations on large numbers of data. Typically, such operations are - executed more efficiently and with less code than is possible using - Python's built-in sequences. - -- A growing plethora of scientific and mathematical Python-based - packages are using NumPy arrays; though these typically support - Python-sequence input, they convert such input to NumPy arrays prior - to processing, and they often output NumPy arrays. In other words, - in order to efficiently use much (perhaps even most) of today's - scientific/mathematical Python-based software, just knowing how to - use Python's built-in sequence types is insufficient - one also - needs to know how to use NumPy arrays. - -The points about sequence size and speed are particularly important in -scientific computing. As a simple example, consider the case of -multiplying each element in a 1-D sequence with the corresponding -element in another sequence of the same length. If the data are -stored in two Python lists, ``a`` and ``b``, we could iterate over -each element:: - - c = [] - for i in range(len(a)): - c.append(a[i]*b[i]) - -This produces the correct answer, but if ``a`` and ``b`` each contain -millions of numbers, we will pay the price for the inefficiencies of -looping in Python. We could accomplish the same task much more -quickly in C by writing (for clarity we neglect variable declarations -and initializations, memory allocation, etc.) - -:: - - for (i = 0; i < rows; i++): { - c[i] = a[i]*b[i]; - } - -This saves all the overhead involved in interpreting the Python code -and manipulating Python objects, but at the expense of the benefits -gained from coding in Python. Furthermore, the coding work required -increases with the dimensionality of our data. In the case of a 2-D -array, for example, the C code (abridged as before) expands to - -:: - - for (i = 0; i < rows; i++): { - for (j = 0; j < columns; j++): { - c[i][j] = a[i][j]*b[i][j]; - } - } - -NumPy gives us the best of both worlds: element-by-element operations -are the "default mode" when an `ndarray` is involved, but the -element-by-element operation is speedily executed by pre-compiled C -code. In NumPy - -:: - - c = a * b - -does what the earlier examples do, at near-C speeds, but with the code -simplicity we expect from something based on Python (indeed, the NumPy -idiom is even simpler!). This last example illustrates two of NumPy's -features which are the basis of much of its power: vectorization and -broadcasting. - -Vectorization describes the absence of any explicit looping, indexing, -etc., in the code - these things are taking place, of course, just -"behind the scenes" (in optimized, pre-compiled C code). Vectorized -code has many advantages, among which are: - -- vectorized code is more concise and easier to read - -- fewer lines of code generally means fewer bugs - -- the code more closely resembles standard mathematical notation - (making it easier, typically, to correctly code mathematical - constructs) - -- vectorization results in more "Pythonic" code (without - vectorization, our code would still be littered with inefficient and - difficult to read ``for`` loops. - -Broadcasting is the term used to describe the implicit -element-by-element behavior of operations; generally speaking, in -NumPy all operations (i.e., not just arithmetic operations, but -logical, bit-wise, functional, etc.) behave in this implicit -element-by-element fashion, i.e., they broadcast. Moreover, in the -example above, ``a`` and ``b`` could be multidimensional arrays of the -same shape, or a scalar and an array, or even two arrays of with -different shapes. Provided that the smaller array is "expandable" to -the shape of the larger in such a way that the resulting broadcast is -unambiguous (for detailed "rules" of broadcasting see -`numpy.doc.broadcasting`). - -NumPy fully supports an object-oriented approach, starting, once -again, with `ndarray`. For example, `ndarray` is a class, possessing -numerous methods and attributes. Many, of it's methods mirror -functions in the outer-most NumPy namespace, giving the programmer has -complete freedom to code in whichever paradigm she prefers and/or -which seems most appropriate to the task at hand. diff --git a/numpy-1.6.2/doc/sphinxext/LICENSE.txt b/numpy-1.6.2/doc/sphinxext/LICENSE.txt deleted file mode 100644 index e00efc31ec..0000000000 --- a/numpy-1.6.2/doc/sphinxext/LICENSE.txt +++ /dev/null @@ -1,97 +0,0 @@ -------------------------------------------------------------------------------- - The files - - numpydoc.py - - autosummary.py - - autosummary_generate.py - - docscrape.py - - docscrape_sphinx.py - - phantom_import.py - have the following license: - -Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- - The files - - compiler_unparse.py - - comment_eater.py - - traitsdoc.py - have the following license: - -This software is OSI Certified Open Source Software. -OSI Certified is a certification mark of the Open Source Initiative. - -Copyright (c) 2006, Enthought, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Enthought, Inc. nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- - The files - - only_directives.py - - plot_directive.py - originate from Matplotlib (http://matplotlib.sf.net/) which has - the following license: - -Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. - -1. This LICENSE AGREEMENT is between John D. Hunter (“JDHâ€), and the Individual or Organization (“Licenseeâ€) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved†are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. - -4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS†basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. - -5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. - diff --git a/numpy-1.6.2/doc/sphinxext/MANIFEST.in b/numpy-1.6.2/doc/sphinxext/MANIFEST.in deleted file mode 100644 index f88ed785c5..0000000000 --- a/numpy-1.6.2/doc/sphinxext/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include tests *.py -include *.txt diff --git a/numpy-1.6.2/doc/sphinxext/README.txt b/numpy-1.6.2/doc/sphinxext/README.txt deleted file mode 100644 index 6ba63e6d85..0000000000 --- a/numpy-1.6.2/doc/sphinxext/README.txt +++ /dev/null @@ -1,45 +0,0 @@ -===================================== -numpydoc -- Numpy's Sphinx extensions -===================================== - -Numpy's documentation uses several custom extensions to Sphinx. These -are shipped in this ``numpydoc`` package, in case you want to make use -of them in third-party projects. - -The following extensions are available: - - - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add - the code description directives ``np:function``, ``np-c:function``, etc. - that support the Numpy docstring syntax. - - - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes. - - - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::`` - directive. Note that this implementation may still undergo severe - changes or eventually be deprecated. - - -numpydoc -======== - -Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings -following the Numpy/Scipy format to a form palatable to Sphinx. - -Options -------- - -The following options can be set in conf.py: - -- numpydoc_use_plots: bool - - Whether to produce ``plot::`` directives for Examples sections that - contain ``import matplotlib``. - -- numpydoc_show_class_members: bool - - Whether to show all members of a class in the Methods and Attributes - sections automatically. - -- numpydoc_edit_link: bool (DEPRECATED -- edit your HTML template instead) - - Whether to insert an edit link after docstrings. diff --git a/numpy-1.6.2/doc/sphinxext/__init__.py b/numpy-1.6.2/doc/sphinxext/__init__.py deleted file mode 100644 index ae9073bc41..0000000000 --- a/numpy-1.6.2/doc/sphinxext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from numpydoc import setup diff --git a/numpy-1.6.2/doc/sphinxext/comment_eater.py b/numpy-1.6.2/doc/sphinxext/comment_eater.py deleted file mode 100644 index e11eea9021..0000000000 --- a/numpy-1.6.2/doc/sphinxext/comment_eater.py +++ /dev/null @@ -1,158 +0,0 @@ -from cStringIO import StringIO -import compiler -import inspect -import textwrap -import tokenize - -from compiler_unparse import unparse - - -class Comment(object): - """ A comment block. - """ - is_comment = True - def __init__(self, start_lineno, end_lineno, text): - # int : The first line number in the block. 1-indexed. - self.start_lineno = start_lineno - # int : The last line number. Inclusive! - self.end_lineno = end_lineno - # str : The text block including '#' character but not any leading spaces. - self.text = text - - def add(self, string, start, end, line): - """ Add a new comment line. - """ - self.start_lineno = min(self.start_lineno, start[0]) - self.end_lineno = max(self.end_lineno, end[0]) - self.text += string - - def __repr__(self): - return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno, self.text) - - -class NonComment(object): - """ A non-comment block of code. - """ - is_comment = False - def __init__(self, start_lineno, end_lineno): - self.start_lineno = start_lineno - self.end_lineno = end_lineno - - def add(self, string, start, end, line): - """ Add lines to the block. - """ - if string.strip(): - # Only add if not entirely whitespace. - self.start_lineno = min(self.start_lineno, start[0]) - self.end_lineno = max(self.end_lineno, end[0]) - - def __repr__(self): - return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno) - - -class CommentBlocker(object): - """ Pull out contiguous comment blocks. - """ - def __init__(self): - # Start with a dummy. - self.current_block = NonComment(0, 0) - - # All of the blocks seen so far. - self.blocks = [] - - # The index mapping lines of code to their associated comment blocks. - self.index = {} - - def process_file(self, file): - """ Process a file object. - """ - for token in tokenize.generate_tokens(file.next): - self.process_token(*token) - self.make_index() - - def process_token(self, kind, string, start, end, line): - """ Process a single token. - """ - if self.current_block.is_comment: - if kind == tokenize.COMMENT: - self.current_block.add(string, start, end, line) - else: - self.new_noncomment(start[0], end[0]) - else: - if kind == tokenize.COMMENT: - self.new_comment(string, start, end, line) - else: - self.current_block.add(string, start, end, line) - - def new_noncomment(self, start_lineno, end_lineno): - """ We are transitioning from a noncomment to a comment. - """ - block = NonComment(start_lineno, end_lineno) - self.blocks.append(block) - self.current_block = block - - def new_comment(self, string, start, end, line): - """ Possibly add a new comment. - - Only adds a new comment if this comment is the only thing on the line. - Otherwise, it extends the noncomment block. - """ - prefix = line[:start[1]] - if prefix.strip(): - # Oops! Trailing comment, not a comment block. - self.current_block.add(string, start, end, line) - else: - # A comment block. - block = Comment(start[0], end[0], string) - self.blocks.append(block) - self.current_block = block - - def make_index(self): - """ Make the index mapping lines of actual code to their associated - prefix comments. - """ - for prev, block in zip(self.blocks[:-1], self.blocks[1:]): - if not block.is_comment: - self.index[block.start_lineno] = prev - - def search_for_comment(self, lineno, default=None): - """ Find the comment block just before the given line number. - - Returns None (or the specified default) if there is no such block. - """ - if not self.index: - self.make_index() - block = self.index.get(lineno, None) - text = getattr(block, 'text', default) - return text - - -def strip_comment_marker(text): - """ Strip # markers at the front of a block of comment text. - """ - lines = [] - for line in text.splitlines(): - lines.append(line.lstrip('#')) - text = textwrap.dedent('\n'.join(lines)) - return text - - -def get_class_traits(klass): - """ Yield all of the documentation for trait definitions on a class object. - """ - # FIXME: gracefully handle errors here or in the caller? - source = inspect.getsource(klass) - cb = CommentBlocker() - cb.process_file(StringIO(source)) - mod_ast = compiler.parse(source) - class_ast = mod_ast.node.nodes[0] - for node in class_ast.code.nodes: - # FIXME: handle other kinds of assignments? - if isinstance(node, compiler.ast.Assign): - name = node.nodes[0].name - rhs = unparse(node.expr).strip() - doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) - yield name, rhs, doc - diff --git a/numpy-1.6.2/doc/sphinxext/compiler_unparse.py b/numpy-1.6.2/doc/sphinxext/compiler_unparse.py deleted file mode 100644 index ffcf51b353..0000000000 --- a/numpy-1.6.2/doc/sphinxext/compiler_unparse.py +++ /dev/null @@ -1,860 +0,0 @@ -""" Turn compiler.ast structures back into executable python code. - - The unparse method takes a compiler.ast tree and transforms it back into - valid python code. It is incomplete and currently only works for - import statements, function calls, function definitions, assignments, and - basic expressions. - - Inspired by python-2.5-svn/Demo/parser/unparse.py - - fixme: We may want to move to using _ast trees because the compiler for - them is about 6 times faster than compiler.compile. -""" - -import sys -import cStringIO -from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add - -def unparse(ast, single_line_functions=False): - s = cStringIO.StringIO() - UnparseCompilerAst(ast, s, single_line_functions) - return s.getvalue().lstrip() - -op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, - 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } - -class UnparseCompilerAst: - """ Methods in this class recursively traverse an AST and - output source code for the abstract syntax; original formatting - is disregarged. - """ - - ######################################################################### - # object interface. - ######################################################################### - - def __init__(self, tree, file = sys.stdout, single_line_functions=False): - """ Unparser(tree, file=sys.stdout) -> None. - - Print the source for tree to file. - """ - self.f = file - self._single_func = single_line_functions - self._do_indent = True - self._indent = 0 - self._dispatch(tree) - self._write("\n") - self.f.flush() - - ######################################################################### - # Unparser private interface. - ######################################################################### - - ### format, output, and dispatch methods ################################ - - def _fill(self, text = ""): - "Indent a piece of text, according to the current indentation level" - if self._do_indent: - self._write("\n"+" "*self._indent + text) - else: - self._write(text) - - def _write(self, text): - "Append a piece of text to the current line." - self.f.write(text) - - def _enter(self): - "Print ':', and increase the indentation." - self._write(": ") - self._indent += 1 - - def _leave(self): - "Decrease the indentation level." - self._indent -= 1 - - def _dispatch(self, tree): - "_dispatcher function, _dispatching tree type T to method _T." - if isinstance(tree, list): - for t in tree: - self._dispatch(t) - return - meth = getattr(self, "_"+tree.__class__.__name__) - if tree.__class__.__name__ == 'NoneType' and not self._do_indent: - return - meth(tree) - - - ######################################################################### - # compiler.ast unparsing methods. - # - # There should be one method per concrete grammar type. They are - # organized in alphabetical order. - ######################################################################### - - def _Add(self, t): - self.__binary_op(t, '+') - - def _And(self, t): - self._write(" (") - for i, node in enumerate(t.nodes): - self._dispatch(node) - if i != len(t.nodes)-1: - self._write(") and (") - self._write(")") - - def _AssAttr(self, t): - """ Handle assigning an attribute of an object - """ - self._dispatch(t.expr) - self._write('.'+t.attrname) - - def _Assign(self, t): - """ Expression Assignment such as "a = 1". - - This only handles assignment in expressions. Keyword assignment - is handled separately. - """ - self._fill() - for target in t.nodes: - self._dispatch(target) - self._write(" = ") - self._dispatch(t.expr) - if not self._do_indent: - self._write('; ') - - def _AssName(self, t): - """ Name on left hand side of expression. - - Treat just like a name on the right side of an expression. - """ - self._Name(t) - - def _AssTuple(self, t): - """ Tuple on left hand side of an expression. - """ - - # _write each elements, separated by a comma. - for element in t.nodes[:-1]: - self._dispatch(element) - self._write(", ") - - # Handle the last one without writing comma - last_element = t.nodes[-1] - self._dispatch(last_element) - - def _AugAssign(self, t): - """ +=,-=,*=,/=,**=, etc. operations - """ - - self._fill() - self._dispatch(t.node) - self._write(' '+t.op+' ') - self._dispatch(t.expr) - if not self._do_indent: - self._write(';') - - def _Bitand(self, t): - """ Bit and operation. - """ - - for i, node in enumerate(t.nodes): - self._write("(") - self._dispatch(node) - self._write(")") - if i != len(t.nodes)-1: - self._write(" & ") - - def _Bitor(self, t): - """ Bit or operation - """ - - for i, node in enumerate(t.nodes): - self._write("(") - self._dispatch(node) - self._write(")") - if i != len(t.nodes)-1: - self._write(" | ") - - def _CallFunc(self, t): - """ Function call. - """ - self._dispatch(t.node) - self._write("(") - comma = False - for e in t.args: - if comma: self._write(", ") - else: comma = True - self._dispatch(e) - if t.star_args: - if comma: self._write(", ") - else: comma = True - self._write("*") - self._dispatch(t.star_args) - if t.dstar_args: - if comma: self._write(", ") - else: comma = True - self._write("**") - self._dispatch(t.dstar_args) - self._write(")") - - def _Compare(self, t): - self._dispatch(t.expr) - for op, expr in t.ops: - self._write(" " + op + " ") - self._dispatch(expr) - - def _Const(self, t): - """ A constant value such as an integer value, 3, or a string, "hello". - """ - self._dispatch(t.value) - - def _Decorators(self, t): - """ Handle function decorators (eg. @has_units) - """ - for node in t.nodes: - self._dispatch(node) - - def _Dict(self, t): - self._write("{") - for i, (k, v) in enumerate(t.items): - self._dispatch(k) - self._write(": ") - self._dispatch(v) - if i < len(t.items)-1: - self._write(", ") - self._write("}") - - def _Discard(self, t): - """ Node for when return value is ignored such as in "foo(a)". - """ - self._fill() - self._dispatch(t.expr) - - def _Div(self, t): - self.__binary_op(t, '/') - - def _Ellipsis(self, t): - self._write("...") - - def _From(self, t): - """ Handle "from xyz import foo, bar as baz". - """ - # fixme: Are From and ImportFrom handled differently? - self._fill("from ") - self._write(t.modname) - self._write(" import ") - for i, (name,asname) in enumerate(t.names): - if i != 0: - self._write(", ") - self._write(name) - if asname is not None: - self._write(" as "+asname) - - def _Function(self, t): - """ Handle function definitions - """ - if t.decorators is not None: - self._fill("@") - self._dispatch(t.decorators) - self._fill("def "+t.name + "(") - defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) - for i, arg in enumerate(zip(t.argnames, defaults)): - self._write(arg[0]) - if arg[1] is not None: - self._write('=') - self._dispatch(arg[1]) - if i < len(t.argnames)-1: - self._write(', ') - self._write(")") - if self._single_func: - self._do_indent = False - self._enter() - self._dispatch(t.code) - self._leave() - self._do_indent = True - - def _Getattr(self, t): - """ Handle getting an attribute of an object - """ - if isinstance(t.expr, (Div, Mul, Sub, Add)): - self._write('(') - self._dispatch(t.expr) - self._write(')') - else: - self._dispatch(t.expr) - - self._write('.'+t.attrname) - - def _If(self, t): - self._fill() - - for i, (compare,code) in enumerate(t.tests): - if i == 0: - self._write("if ") - else: - self._write("elif ") - self._dispatch(compare) - self._enter() - self._fill() - self._dispatch(code) - self._leave() - self._write("\n") - - if t.else_ is not None: - self._write("else") - self._enter() - self._fill() - self._dispatch(t.else_) - self._leave() - self._write("\n") - - def _IfExp(self, t): - self._dispatch(t.then) - self._write(" if ") - self._dispatch(t.test) - - if t.else_ is not None: - self._write(" else (") - self._dispatch(t.else_) - self._write(")") - - def _Import(self, t): - """ Handle "import xyz.foo". - """ - self._fill("import ") - - for i, (name,asname) in enumerate(t.names): - if i != 0: - self._write(", ") - self._write(name) - if asname is not None: - self._write(" as "+asname) - - def _Keyword(self, t): - """ Keyword value assignment within function calls and definitions. - """ - self._write(t.name) - self._write("=") - self._dispatch(t.expr) - - def _List(self, t): - self._write("[") - for i,node in enumerate(t.nodes): - self._dispatch(node) - if i < len(t.nodes)-1: - self._write(", ") - self._write("]") - - def _Module(self, t): - if t.doc is not None: - self._dispatch(t.doc) - self._dispatch(t.node) - - def _Mul(self, t): - self.__binary_op(t, '*') - - def _Name(self, t): - self._write(t.name) - - def _NoneType(self, t): - self._write("None") - - def _Not(self, t): - self._write('not (') - self._dispatch(t.expr) - self._write(')') - - def _Or(self, t): - self._write(" (") - for i, node in enumerate(t.nodes): - self._dispatch(node) - if i != len(t.nodes)-1: - self._write(") or (") - self._write(")") - - def _Pass(self, t): - self._write("pass\n") - - def _Printnl(self, t): - self._fill("print ") - if t.dest: - self._write(">> ") - self._dispatch(t.dest) - self._write(", ") - comma = False - for node in t.nodes: - if comma: self._write(', ') - else: comma = True - self._dispatch(node) - - def _Power(self, t): - self.__binary_op(t, '**') - - def _Return(self, t): - self._fill("return ") - if t.value: - if isinstance(t.value, Tuple): - text = ', '.join([ name.name for name in t.value.asList() ]) - self._write(text) - else: - self._dispatch(t.value) - if not self._do_indent: - self._write('; ') - - def _Slice(self, t): - self._dispatch(t.expr) - self._write("[") - if t.lower: - self._dispatch(t.lower) - self._write(":") - if t.upper: - self._dispatch(t.upper) - #if t.step: - # self._write(":") - # self._dispatch(t.step) - self._write("]") - - def _Sliceobj(self, t): - for i, node in enumerate(t.nodes): - if i != 0: - self._write(":") - if not (isinstance(node, Const) and node.value is None): - self._dispatch(node) - - def _Stmt(self, tree): - for node in tree.nodes: - self._dispatch(node) - - def _Sub(self, t): - self.__binary_op(t, '-') - - def _Subscript(self, t): - self._dispatch(t.expr) - self._write("[") - for i, value in enumerate(t.subs): - if i != 0: - self._write(",") - self._dispatch(value) - self._write("]") - - def _TryExcept(self, t): - self._fill("try") - self._enter() - self._dispatch(t.body) - self._leave() - - for handler in t.handlers: - self._fill('except ') - self._dispatch(handler[0]) - if handler[1] is not None: - self._write(', ') - self._dispatch(handler[1]) - self._enter() - self._dispatch(handler[2]) - self._leave() - - if t.else_: - self._fill("else") - self._enter() - self._dispatch(t.else_) - self._leave() - - def _Tuple(self, t): - - if not t.nodes: - # Empty tuple. - self._write("()") - else: - self._write("(") - - # _write each elements, separated by a comma. - for element in t.nodes[:-1]: - self._dispatch(element) - self._write(", ") - - # Handle the last one without writing comma - last_element = t.nodes[-1] - self._dispatch(last_element) - - self._write(")") - - def _UnaryAdd(self, t): - self._write("+") - self._dispatch(t.expr) - - def _UnarySub(self, t): - self._write("-") - self._dispatch(t.expr) - - def _With(self, t): - self._fill('with ') - self._dispatch(t.expr) - if t.vars: - self._write(' as ') - self._dispatch(t.vars.name) - self._enter() - self._dispatch(t.body) - self._leave() - self._write('\n') - - def _int(self, t): - self._write(repr(t)) - - def __binary_op(self, t, symbol): - # Check if parenthesis are needed on left side and then dispatch - has_paren = False - left_class = str(t.left.__class__) - if (left_class in op_precedence.keys() and - op_precedence[left_class] < op_precedence[str(t.__class__)]): - has_paren = True - if has_paren: - self._write('(') - self._dispatch(t.left) - if has_paren: - self._write(')') - # Write the appropriate symbol for operator - self._write(symbol) - # Check if parenthesis are needed on the right side and then dispatch - has_paren = False - right_class = str(t.right.__class__) - if (right_class in op_precedence.keys() and - op_precedence[right_class] < op_precedence[str(t.__class__)]): - has_paren = True - if has_paren: - self._write('(') - self._dispatch(t.right) - if has_paren: - self._write(')') - - def _float(self, t): - # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' - # We prefer str here. - self._write(str(t)) - - def _str(self, t): - self._write(repr(t)) - - def _tuple(self, t): - self._write(str(t)) - - ######################################################################### - # These are the methods from the _ast modules unparse. - # - # As our needs to handle more advanced code increase, we may want to - # modify some of the methods below so that they work for compiler.ast. - ######################################################################### - -# # stmt -# def _Expr(self, tree): -# self._fill() -# self._dispatch(tree.value) -# -# def _Import(self, t): -# self._fill("import ") -# first = True -# for a in t.names: -# if first: -# first = False -# else: -# self._write(", ") -# self._write(a.name) -# if a.asname: -# self._write(" as "+a.asname) -# -## def _ImportFrom(self, t): -## self._fill("from ") -## self._write(t.module) -## self._write(" import ") -## for i, a in enumerate(t.names): -## if i == 0: -## self._write(", ") -## self._write(a.name) -## if a.asname: -## self._write(" as "+a.asname) -## # XXX(jpe) what is level for? -## -# -# def _Break(self, t): -# self._fill("break") -# -# def _Continue(self, t): -# self._fill("continue") -# -# def _Delete(self, t): -# self._fill("del ") -# self._dispatch(t.targets) -# -# def _Assert(self, t): -# self._fill("assert ") -# self._dispatch(t.test) -# if t.msg: -# self._write(", ") -# self._dispatch(t.msg) -# -# def _Exec(self, t): -# self._fill("exec ") -# self._dispatch(t.body) -# if t.globals: -# self._write(" in ") -# self._dispatch(t.globals) -# if t.locals: -# self._write(", ") -# self._dispatch(t.locals) -# -# def _Print(self, t): -# self._fill("print ") -# do_comma = False -# if t.dest: -# self._write(">>") -# self._dispatch(t.dest) -# do_comma = True -# for e in t.values: -# if do_comma:self._write(", ") -# else:do_comma=True -# self._dispatch(e) -# if not t.nl: -# self._write(",") -# -# def _Global(self, t): -# self._fill("global") -# for i, n in enumerate(t.names): -# if i != 0: -# self._write(",") -# self._write(" " + n) -# -# def _Yield(self, t): -# self._fill("yield") -# if t.value: -# self._write(" (") -# self._dispatch(t.value) -# self._write(")") -# -# def _Raise(self, t): -# self._fill('raise ') -# if t.type: -# self._dispatch(t.type) -# if t.inst: -# self._write(", ") -# self._dispatch(t.inst) -# if t.tback: -# self._write(", ") -# self._dispatch(t.tback) -# -# -# def _TryFinally(self, t): -# self._fill("try") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# self._fill("finally") -# self._enter() -# self._dispatch(t.finalbody) -# self._leave() -# -# def _excepthandler(self, t): -# self._fill("except ") -# if t.type: -# self._dispatch(t.type) -# if t.name: -# self._write(", ") -# self._dispatch(t.name) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _ClassDef(self, t): -# self._write("\n") -# self._fill("class "+t.name) -# if t.bases: -# self._write("(") -# for a in t.bases: -# self._dispatch(a) -# self._write(", ") -# self._write(")") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _FunctionDef(self, t): -# self._write("\n") -# for deco in t.decorators: -# self._fill("@") -# self._dispatch(deco) -# self._fill("def "+t.name + "(") -# self._dispatch(t.args) -# self._write(")") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _For(self, t): -# self._fill("for ") -# self._dispatch(t.target) -# self._write(" in ") -# self._dispatch(t.iter) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# if t.orelse: -# self._fill("else") -# self._enter() -# self._dispatch(t.orelse) -# self._leave -# -# def _While(self, t): -# self._fill("while ") -# self._dispatch(t.test) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# if t.orelse: -# self._fill("else") -# self._enter() -# self._dispatch(t.orelse) -# self._leave -# -# # expr -# def _Str(self, tree): -# self._write(repr(tree.s)) -## -# def _Repr(self, t): -# self._write("`") -# self._dispatch(t.value) -# self._write("`") -# -# def _Num(self, t): -# self._write(repr(t.n)) -# -# def _ListComp(self, t): -# self._write("[") -# self._dispatch(t.elt) -# for gen in t.generators: -# self._dispatch(gen) -# self._write("]") -# -# def _GeneratorExp(self, t): -# self._write("(") -# self._dispatch(t.elt) -# for gen in t.generators: -# self._dispatch(gen) -# self._write(")") -# -# def _comprehension(self, t): -# self._write(" for ") -# self._dispatch(t.target) -# self._write(" in ") -# self._dispatch(t.iter) -# for if_clause in t.ifs: -# self._write(" if ") -# self._dispatch(if_clause) -# -# def _IfExp(self, t): -# self._dispatch(t.body) -# self._write(" if ") -# self._dispatch(t.test) -# if t.orelse: -# self._write(" else ") -# self._dispatch(t.orelse) -# -# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} -# def _UnaryOp(self, t): -# self._write(self.unop[t.op.__class__.__name__]) -# self._write("(") -# self._dispatch(t.operand) -# self._write(")") -# -# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", -# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", -# "FloorDiv":"//", "Pow": "**"} -# def _BinOp(self, t): -# self._write("(") -# self._dispatch(t.left) -# self._write(")" + self.binop[t.op.__class__.__name__] + "(") -# self._dispatch(t.right) -# self._write(")") -# -# boolops = {_ast.And: 'and', _ast.Or: 'or'} -# def _BoolOp(self, t): -# self._write("(") -# self._dispatch(t.values[0]) -# for v in t.values[1:]: -# self._write(" %s " % self.boolops[t.op.__class__]) -# self._dispatch(v) -# self._write(")") -# -# def _Attribute(self,t): -# self._dispatch(t.value) -# self._write(".") -# self._write(t.attr) -# -## def _Call(self, t): -## self._dispatch(t.func) -## self._write("(") -## comma = False -## for e in t.args: -## if comma: self._write(", ") -## else: comma = True -## self._dispatch(e) -## for e in t.keywords: -## if comma: self._write(", ") -## else: comma = True -## self._dispatch(e) -## if t.starargs: -## if comma: self._write(", ") -## else: comma = True -## self._write("*") -## self._dispatch(t.starargs) -## if t.kwargs: -## if comma: self._write(", ") -## else: comma = True -## self._write("**") -## self._dispatch(t.kwargs) -## self._write(")") -# -# # slice -# def _Index(self, t): -# self._dispatch(t.value) -# -# def _ExtSlice(self, t): -# for i, d in enumerate(t.dims): -# if i != 0: -# self._write(': ') -# self._dispatch(d) -# -# # others -# def _arguments(self, t): -# first = True -# nonDef = len(t.args)-len(t.defaults) -# for a in t.args[0:nonDef]: -# if first:first = False -# else: self._write(", ") -# self._dispatch(a) -# for a,d in zip(t.args[nonDef:], t.defaults): -# if first:first = False -# else: self._write(", ") -# self._dispatch(a), -# self._write("=") -# self._dispatch(d) -# if t.vararg: -# if first:first = False -# else: self._write(", ") -# self._write("*"+t.vararg) -# if t.kwarg: -# if first:first = False -# else: self._write(", ") -# self._write("**"+t.kwarg) -# -## def _keyword(self, t): -## self._write(t.arg) -## self._write("=") -## self._dispatch(t.value) -# -# def _Lambda(self, t): -# self._write("lambda ") -# self._dispatch(t.args) -# self._write(": ") -# self._dispatch(t.body) - - - diff --git a/numpy-1.6.2/doc/sphinxext/docscrape.py b/numpy-1.6.2/doc/sphinxext/docscrape.py deleted file mode 100644 index 615ea11f8d..0000000000 --- a/numpy-1.6.2/doc/sphinxext/docscrape.py +++ /dev/null @@ -1,500 +0,0 @@ -"""Extract reference documentation from the NumPy source tree. - -""" - -import inspect -import textwrap -import re -import pydoc -from StringIO import StringIO -from warnings import warn - -class Reader(object): - """A line-based string reader. - - """ - def __init__(self, data): - """ - Parameters - ---------- - data : str - String with lines separated by '\n'. - - """ - if isinstance(data,list): - self._str = data - else: - self._str = data.split('\n') # store string as list of lines - - self.reset() - - def __getitem__(self, n): - return self._str[n] - - def reset(self): - self._l = 0 # current line nr - - def read(self): - if not self.eof(): - out = self[self._l] - self._l += 1 - return out - else: - return '' - - def seek_next_non_empty_line(self): - for l in self[self._l:]: - if l.strip(): - break - else: - self._l += 1 - - def eof(self): - return self._l >= len(self._str) - - def read_to_condition(self, condition_func): - start = self._l - for line in self[start:]: - if condition_func(line): - return self[start:self._l] - self._l += 1 - if self.eof(): - return self[start:self._l+1] - return [] - - def read_to_next_empty_line(self): - self.seek_next_non_empty_line() - def is_empty(line): - return not line.strip() - return self.read_to_condition(is_empty) - - def read_to_next_unindented_line(self): - def is_unindented(line): - return (line.strip() and (len(line.lstrip()) == len(line))) - return self.read_to_condition(is_unindented) - - def peek(self,n=0): - if self._l + n < len(self._str): - return self[self._l + n] - else: - return '' - - def is_empty(self): - return not ''.join(self._str).strip() - - -class NumpyDocString(object): - def __init__(self, docstring, config={}): - docstring = textwrap.dedent(docstring).split('\n') - - self._doc = Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': [''], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Attributes': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'Warnings': [], - 'References': '', - 'Examples': '', - 'index': {} - } - - self._parse() - - def __getitem__(self,key): - return self._parsed_data[key] - - def __setitem__(self,key,val): - if not self._parsed_data.has_key(key): - warn("Unknown section %s" % key) - else: - self._parsed_data[key] = val - - def _is_at_section(self): - self._doc.seek_next_non_empty_line() - - if self._doc.eof(): - return False - - l1 = self._doc.peek().strip() # e.g. Parameters - - if l1.startswith('.. index::'): - return True - - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) - - def _strip(self,doc): - i = 0 - j = 0 - for i,line in enumerate(doc): - if line.strip(): break - - for j,line in enumerate(doc[::-1]): - if line.strip(): break - - return doc[i:len(doc)-j] - - def _read_to_next_section(self): - section = self._doc.read_to_next_empty_line() - - while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty - section += [''] - - section += self._doc.read_to_next_empty_line() - - return section - - def _read_sections(self): - while not self._doc.eof(): - data = self._read_to_next_section() - name = data[0].strip() - - if name.startswith('..'): # index section - yield name, data[1:] - elif len(data) < 2: - yield StopIteration - else: - yield name, self._strip(data[2:]) - - def _parse_param_list(self,content): - r = Reader(content) - params = [] - while not r.eof(): - header = r.read().strip() - if ' : ' in header: - arg_name, arg_type = header.split(' : ')[:2] - else: - arg_name, arg_type = header, '' - - desc = r.read_to_next_unindented_line() - desc = dedent_lines(desc) - - params.append((arg_name,arg_type,desc)) - - return params - - - _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" - r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) - def _parse_see_also(self, content): - """ - func_name : Descriptive text - continued text - another_func_name : Descriptive text - func_name1, func_name2, :meth:`func_name`, func_name3 - - """ - items = [] - - def parse_item_name(text): - """Match ':role:`name`' or 'name'""" - m = self._name_rgx.match(text) - if m: - g = m.groups() - if g[1] is None: - return g[3], None - else: - return g[2], g[1] - raise ValueError("%s is not a item name" % text) - - def push_item(name, rest): - if not name: - return - name, role = parse_item_name(name) - items.append((name, list(rest), role)) - del rest[:] - - current_func = None - rest = [] - - for line in content: - if not line.strip(): continue - - m = self._name_rgx.match(line) - if m and line[m.end():].strip().startswith(':'): - push_item(current_func, rest) - current_func, line = line[:m.end()], line[m.end():] - rest = [line.split(':', 1)[1].strip()] - if not rest[0]: - rest = [] - elif not line.startswith(' '): - push_item(current_func, rest) - current_func = None - if ',' in line: - for func in line.split(','): - if func.strip(): - push_item(func, []) - elif line.strip(): - current_func = line - elif current_func is not None: - rest.append(line.strip()) - push_item(current_func, rest) - return items - - def _parse_index(self, section, content): - """ - .. index: default - :refguide: something, else, and more - - """ - def strip_each_in(lst): - return [s.strip() for s in lst] - - out = {} - section = section.split('::') - if len(section) > 1: - out['default'] = strip_each_in(section[1].split(','))[0] - for line in content: - line = line.split(':') - if len(line) > 2: - out[line[1]] = strip_each_in(line[2].split(',')) - return out - - def _parse_summary(self): - """Grab signature (if given) and summary""" - if self._is_at_section(): - return - - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str - if not self._is_at_section(): - self['Summary'] = self._doc.read_to_next_empty_line() - else: - self['Summary'] = summary - - if not self._is_at_section(): - self['Extended Summary'] = self._read_to_next_section() - - def _parse(self): - self._doc.reset() - self._parse_summary() - - for (section,content) in self._read_sections(): - if not section.startswith('..'): - section = ' '.join([s.capitalize() for s in section.split(' ')]) - if section in ('Parameters', 'Returns', 'Raises', 'Warns', - 'Other Parameters', 'Attributes', 'Methods'): - self[section] = self._parse_param_list(content) - elif section.startswith('.. index::'): - self['index'] = self._parse_index(section, content) - elif section == 'See Also': - self['See Also'] = self._parse_see_also(content) - else: - self[section] = content - - # string conversion routines - - def _str_header(self, name, symbol='-'): - return [name, len(name)*symbol] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - if self['Signature']: - return [self['Signature'].replace('*','\*')] + [''] - else: - return [''] - - def _str_summary(self): - if self['Summary']: - return self['Summary'] + [''] - else: - return [] - - def _str_extended_summary(self): - if self['Extended Summary']: - return self['Extended Summary'] + [''] - else: - return [] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_header(name) - for param,param_type,desc in self[name]: - out += ['%s : %s' % (param, param_type)] - out += self._str_indent(desc) - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += self[name] - out += [''] - return out - - def _str_see_also(self, func_role): - if not self['See Also']: return [] - out = [] - out += self._str_header("See Also") - last_had_desc = True - for func, desc, role in self['See Also']: - if role: - link = ':%s:`%s`' % (role, func) - elif func_role: - link = ':%s:`%s`' % (func_role, func) - else: - link = "`%s`_" % func - if desc or last_had_desc: - out += [''] - out += [link] - else: - out[-1] += ", %s" % link - if desc: - out += self._str_indent([' '.join(desc)]) - last_had_desc = True - else: - last_had_desc = False - out += [''] - return out - - def _str_index(self): - idx = self['index'] - out = [] - out += ['.. index:: %s' % idx.get('default','')] - for section, references in idx.iteritems(): - if section == 'default': - continue - out += [' :%s: %s' % (section, ', '.join(references))] - return out - - def __str__(self, func_role=''): - out = [] - out += self._str_signature() - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Other Parameters', - 'Raises', 'Warns'): - out += self._str_param_list(param_list) - out += self._str_section('Warnings') - out += self._str_see_also(func_role) - for s in ('Notes','References','Examples'): - out += self._str_section(s) - for param_list in ('Attributes', 'Methods'): - out += self._str_param_list(param_list) - out += self._str_index() - return '\n'.join(out) - - -def indent(str,indent=4): - indent_str = ' '*indent - if str is None: - return indent_str - lines = str.split('\n') - return '\n'.join(indent_str + l for l in lines) - -def dedent_lines(lines): - """Deindent a list of lines maximally""" - return textwrap.dedent("\n".join(lines)).split("\n") - -def header(text, style='-'): - return text + '\n' + style*len(text) + '\n' - - -class FunctionDoc(NumpyDocString): - def __init__(self, func, role='func', doc=None, config={}): - self._f = func - self._role = role # e.g. "func" or "meth" - - if doc is None: - if func is None: - raise ValueError("No function or docstring given") - doc = inspect.getdoc(func) or '' - NumpyDocString.__init__(self, doc) - - if not self['Signature'] and func is not None: - func, func_name = self.get_func() - try: - # try to read signature - argspec = inspect.getargspec(func) - argspec = inspect.formatargspec(*argspec) - argspec = argspec.replace('*','\*') - signature = '%s%s' % (func_name, argspec) - except TypeError, e: - signature = '%s()' % func_name - self['Signature'] = signature - - def get_func(self): - func_name = getattr(self._f, '__name__', self.__class__.__name__) - if inspect.isclass(self._f): - func = getattr(self._f, '__call__', self._f.__init__) - else: - func = self._f - return func, func_name - - def __str__(self): - out = '' - - func, func_name = self.get_func() - signature = self['Signature'].replace('*', '\*') - - roles = {'func': 'function', - 'meth': 'method'} - - if self._role: - if not roles.has_key(self._role): - print "Warning: invalid role %s" % self._role - out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), - func_name) - - out += super(FunctionDoc, self).__str__(func_role=self._role) - return out - - -class ClassDoc(NumpyDocString): - def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, - config={}): - if not inspect.isclass(cls) and cls is not None: - raise ValueError("Expected a class or None, but got %r" % cls) - self._cls = cls - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - - if doc is None: - if cls is None: - raise ValueError("No class or documentation string given") - doc = pydoc.getdoc(cls) - - NumpyDocString.__init__(self, doc) - - if config.get('show_class_members', True): - if not self['Methods']: - self['Methods'] = [(name, '', '') - for name in sorted(self.methods)] - if not self['Attributes']: - self['Attributes'] = [(name, '', '') - for name in sorted(self.properties)] - - @property - def methods(self): - if self._cls is None: - return [] - return [name for name,func in inspect.getmembers(self._cls) - if not name.startswith('_') and callable(func)] - - @property - def properties(self): - if self._cls is None: - return [] - return [name for name,func in inspect.getmembers(self._cls) - if not name.startswith('_') and func is None] diff --git a/numpy-1.6.2/doc/sphinxext/docscrape_sphinx.py b/numpy-1.6.2/doc/sphinxext/docscrape_sphinx.py deleted file mode 100644 index e44e770ef8..0000000000 --- a/numpy-1.6.2/doc/sphinxext/docscrape_sphinx.py +++ /dev/null @@ -1,227 +0,0 @@ -import re, inspect, textwrap, pydoc -import sphinx -from docscrape import NumpyDocString, FunctionDoc, ClassDoc - -class SphinxDocString(NumpyDocString): - def __init__(self, docstring, config={}): - self.use_plots = config.get('use_plots', False) - NumpyDocString.__init__(self, docstring, config=config) - - # string conversion routines - def _str_header(self, name, symbol='`'): - return ['.. rubric:: ' + name, ''] - - def _str_field_list(self, name): - return [':' + name + ':'] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - return [''] - if self['Signature']: - return ['``%s``' % self['Signature']] + [''] - else: - return [''] - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Extended Summary'] + [''] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_field_list(name) - out += [''] - for param,param_type,desc in self[name]: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) - out += [''] - out += self._str_indent(desc,8) - out += [''] - return out - - @property - def _obj(self): - if hasattr(self, '_cls'): - return self._cls - elif hasattr(self, '_f'): - return self._f - return None - - def _str_member_list(self, name): - """ - Generate a member listing, autosummary:: table where possible, - and a table where not. - - """ - out = [] - if self[name]: - out += ['.. rubric:: %s' % name, ''] - prefix = getattr(self, '_name', '') - - if prefix: - prefix = '~%s.' % prefix - - autosum = [] - others = [] - for param, param_type, desc in self[name]: - param = param.strip() - if not self._obj or hasattr(self._obj, param): - autosum += [" %s%s" % (prefix, param)] - else: - others.append((param, param_type, desc)) - - if autosum: - out += ['.. autosummary::', ' :toctree:', ''] - out += autosum - - if others: - maxlen_0 = max([len(x[0]) for x in others]) - maxlen_1 = max([len(x[1]) for x in others]) - hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 - fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) - n_indent = maxlen_0 + maxlen_1 + 4 - out += [hdr] - for param, param_type, desc in others: - out += [fmt % (param.strip(), param_type)] - out += self._str_indent(desc, n_indent) - out += [hdr] - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += [''] - content = textwrap.dedent("\n".join(self[name])).split("\n") - out += content - out += [''] - return out - - def _str_see_also(self, func_role): - out = [] - if self['See Also']: - see_also = super(SphinxDocString, self)._str_see_also(func_role) - out = ['.. seealso::', ''] - out += self._str_indent(see_also[2:]) - return out - - def _str_warnings(self): - out = [] - if self['Warnings']: - out = ['.. warning::', ''] - out += self._str_indent(self['Warnings']) - return out - - def _str_index(self): - idx = self['index'] - out = [] - if len(idx) == 0: - return out - - out += ['.. index:: %s' % idx.get('default','')] - for section, references in idx.iteritems(): - if section == 'default': - continue - elif section == 'refguide': - out += [' single: %s' % (', '.join(references))] - else: - out += [' %s: %s' % (section, ','.join(references))] - return out - - def _str_references(self): - out = [] - if self['References']: - out += self._str_header('References') - if isinstance(self['References'], str): - self['References'] = [self['References']] - out.extend(self['References']) - out += [''] - # Latex collects all references to a separate bibliography, - # so we need to insert links to it - if sphinx.__version__ >= "0.6": - out += ['.. only:: latex',''] - else: - out += ['.. latexonly::',''] - items = [] - for line in self['References']: - m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) - if m: - items.append(m.group(1)) - out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] - return out - - def _str_examples(self): - examples_str = "\n".join(self['Examples']) - - if (self.use_plots and 'import matplotlib' in examples_str - and 'plot::' not in examples_str): - out = [] - out += self._str_header('Examples') - out += ['.. plot::', ''] - out += self._str_indent(self['Examples']) - out += [''] - return out - else: - return self._str_section('Examples') - - def __str__(self, indent=0, func_role="obj"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Other Parameters', - 'Raises', 'Warns'): - out += self._str_param_list(param_list) - out += self._str_warnings() - out += self._str_see_also(func_role) - out += self._str_section('Notes') - out += self._str_references() - out += self._str_examples() - for param_list in ('Attributes', 'Methods'): - out += self._str_member_list(param_list) - out = self._str_indent(out,indent) - return '\n'.join(out) - -class SphinxFunctionDoc(SphinxDocString, FunctionDoc): - def __init__(self, obj, doc=None, config={}): - self.use_plots = config.get('use_plots', False) - FunctionDoc.__init__(self, obj, doc=doc, config=config) - -class SphinxClassDoc(SphinxDocString, ClassDoc): - def __init__(self, obj, doc=None, func_doc=None, config={}): - self.use_plots = config.get('use_plots', False) - ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) - -class SphinxObjDoc(SphinxDocString): - def __init__(self, obj, doc=None, config={}): - self._f = obj - SphinxDocString.__init__(self, doc, config=config) - -def get_doc_object(obj, what=None, doc=None, config={}): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif callable(obj): - what = 'function' - else: - what = 'object' - if what == 'class': - return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, - config=config) - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, doc=doc, config=config) - else: - if doc is None: - doc = pydoc.getdoc(obj) - return SphinxObjDoc(obj, doc, config=config) diff --git a/numpy-1.6.2/doc/sphinxext/numpydoc.py b/numpy-1.6.2/doc/sphinxext/numpydoc.py deleted file mode 100644 index 43c67336b5..0000000000 --- a/numpy-1.6.2/doc/sphinxext/numpydoc.py +++ /dev/null @@ -1,169 +0,0 @@ -""" -======== -numpydoc -======== - -Sphinx extension that handles docstrings in the Numpy standard format. [1] - -It will: - -- Convert Parameters etc. sections to field lists. -- Convert See Also section to a See also entry. -- Renumber references. -- Extract the signature from the docstring, if it can't be determined otherwise. - -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard - -""" - -import sphinx - -if sphinx.__version__ < '1.0.1': - raise RuntimeError("Sphinx 1.0.1 or newer is required") - -import os, re, pydoc -from docscrape_sphinx import get_doc_object, SphinxDocString -from sphinx.util.compat import Directive -import inspect - -def mangle_docstrings(app, what, name, obj, options, lines, - reference_offset=[0]): - - cfg = dict(use_plots=app.config.numpydoc_use_plots, - show_class_members=app.config.numpydoc_show_class_members) - - if what == 'module': - # Strip top title - title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', - re.I|re.S) - lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") - else: - doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) - lines[:] = unicode(doc).split(u"\n") - - if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ - obj.__name__: - if hasattr(obj, '__module__'): - v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) - else: - v = dict(full_name=obj.__name__) - lines += [u'', u'.. htmlonly::', ''] - lines += [u' %s' % x for x in - (app.config.numpydoc_edit_link % v).split("\n")] - - # replace reference numbers so that there are no duplicates - references = [] - for line in lines: - line = line.strip() - m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) - if m: - references.append(m.group(1)) - - # start renaming from the longest string, to avoid overwriting parts - references.sort(key=lambda x: -len(x)) - if references: - for i, line in enumerate(lines): - for r in references: - if re.match(ur'^\d+$', r): - new_r = u"R%d" % (reference_offset[0] + int(r)) - else: - new_r = u"%s%d" % (r, reference_offset[0]) - lines[i] = lines[i].replace(u'[%s]_' % r, - u'[%s]_' % new_r) - lines[i] = lines[i].replace(u'.. [%s]' % r, - u'.. [%s]' % new_r) - - reference_offset[0] += len(references) - -def mangle_signature(app, what, name, obj, options, sig, retann): - # Do not try to inspect classes that don't define `__init__` - if (inspect.isclass(obj) and - (not hasattr(obj, '__init__') or - 'initializes x; see ' in pydoc.getdoc(obj.__init__))): - return '', '' - - if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return - if not hasattr(obj, '__doc__'): return - - doc = SphinxDocString(pydoc.getdoc(obj)) - if doc['Signature']: - sig = re.sub(u"^[^(]*", u"", doc['Signature']) - return sig, u'' - -def setup(app, get_doc_object_=get_doc_object): - global get_doc_object - get_doc_object = get_doc_object_ - - app.connect('autodoc-process-docstring', mangle_docstrings) - app.connect('autodoc-process-signature', mangle_signature) - app.add_config_value('numpydoc_edit_link', None, False) - app.add_config_value('numpydoc_use_plots', None, False) - app.add_config_value('numpydoc_show_class_members', True, True) - - # Extra mangling domains - app.add_domain(NumpyPythonDomain) - app.add_domain(NumpyCDomain) - -#------------------------------------------------------------------------------ -# Docstring-mangling domains -#------------------------------------------------------------------------------ - -from docutils.statemachine import ViewList -from sphinx.domains.c import CDomain -from sphinx.domains.python import PythonDomain - -class ManglingDomainBase(object): - directive_mangling_map = {} - - def __init__(self, *a, **kw): - super(ManglingDomainBase, self).__init__(*a, **kw) - self.wrap_mangling_directives() - - def wrap_mangling_directives(self): - for name, objtype in self.directive_mangling_map.items(): - self.directives[name] = wrap_mangling_directive( - self.directives[name], objtype) - -class NumpyPythonDomain(ManglingDomainBase, PythonDomain): - name = 'np' - directive_mangling_map = { - 'function': 'function', - 'class': 'class', - 'exception': 'class', - 'method': 'function', - 'classmethod': 'function', - 'staticmethod': 'function', - 'attribute': 'attribute', - } - -class NumpyCDomain(ManglingDomainBase, CDomain): - name = 'np-c' - directive_mangling_map = { - 'function': 'function', - 'member': 'attribute', - 'macro': 'function', - 'type': 'class', - 'var': 'object', - } - -def wrap_mangling_directive(base_directive, objtype): - class directive(base_directive): - def run(self): - env = self.state.document.settings.env - - name = None - if self.arguments: - m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) - name = m.group(2).strip() - - if not name: - name = self.arguments[0] - - lines = list(self.content) - mangle_docstrings(env.app, objtype, name, None, None, lines) - self.content = ViewList(lines, self.content.parent) - - return base_directive.run(self) - - return directive - diff --git a/numpy-1.6.2/doc/sphinxext/phantom_import.py b/numpy-1.6.2/doc/sphinxext/phantom_import.py deleted file mode 100644 index c77eeb544e..0000000000 --- a/numpy-1.6.2/doc/sphinxext/phantom_import.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -============== -phantom_import -============== - -Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar -extensions to use docstrings loaded from an XML file. - -This extension loads an XML file in the Pydocweb format [1] and -creates a dummy module that contains the specified docstrings. This -can be used to get the current docstrings from a Pydocweb instance -without needing to rebuild the documented module. - -.. [1] http://code.google.com/p/pydocweb - -""" -import imp, sys, compiler, types, os, inspect, re - -def setup(app): - app.connect('builder-inited', initialize) - app.add_config_value('phantom_import_file', None, True) - -def initialize(app): - fn = app.config.phantom_import_file - if (fn and os.path.isfile(fn)): - print "[numpydoc] Phantom importing modules from", fn, "..." - import_phantom_module(fn) - -#------------------------------------------------------------------------------ -# Creating 'phantom' modules from an XML description -#------------------------------------------------------------------------------ -def import_phantom_module(xml_file): - """ - Insert a fake Python module to sys.modules, based on a XML file. - - The XML file is expected to conform to Pydocweb DTD. The fake - module will contain dummy objects, which guarantee the following: - - - Docstrings are correct. - - Class inheritance relationships are correct (if present in XML). - - Function argspec is *NOT* correct (even if present in XML). - Instead, the function signature is prepended to the function docstring. - - Class attributes are *NOT* correct; instead, they are dummy objects. - - Parameters - ---------- - xml_file : str - Name of an XML file to read - - """ - import lxml.etree as etree - - object_cache = {} - - tree = etree.parse(xml_file) - root = tree.getroot() - - # Sort items so that - # - Base classes come before classes inherited from them - # - Modules come before their contents - all_nodes = dict([(n.attrib['id'], n) for n in root]) - - def _get_bases(node, recurse=False): - bases = [x.attrib['ref'] for x in node.findall('base')] - if recurse: - j = 0 - while True: - try: - b = bases[j] - except IndexError: break - if b in all_nodes: - bases.extend(_get_bases(all_nodes[b])) - j += 1 - return bases - - type_index = ['module', 'class', 'callable', 'object'] - - def base_cmp(a, b): - x = cmp(type_index.index(a.tag), type_index.index(b.tag)) - if x != 0: return x - - if a.tag == 'class' and b.tag == 'class': - a_bases = _get_bases(a, recurse=True) - b_bases = _get_bases(b, recurse=True) - x = cmp(len(a_bases), len(b_bases)) - if x != 0: return x - if a.attrib['id'] in b_bases: return -1 - if b.attrib['id'] in a_bases: return 1 - - return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) - - nodes = root.getchildren() - nodes.sort(base_cmp) - - # Create phantom items - for node in nodes: - name = node.attrib['id'] - doc = (node.text or '').decode('string-escape') + "\n" - if doc == "\n": doc = "" - - # create parent, if missing - parent = name - while True: - parent = '.'.join(parent.split('.')[:-1]) - if not parent: break - if parent in object_cache: break - obj = imp.new_module(parent) - object_cache[parent] = obj - sys.modules[parent] = obj - - # create object - if node.tag == 'module': - obj = imp.new_module(name) - obj.__doc__ = doc - sys.modules[name] = obj - elif node.tag == 'class': - bases = [object_cache[b] for b in _get_bases(node) - if b in object_cache] - bases.append(object) - init = lambda self: None - init.__doc__ = doc - obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) - obj.__name__ = name.split('.')[-1] - elif node.tag == 'callable': - funcname = node.attrib['id'].split('.')[-1] - argspec = node.attrib.get('argspec') - if argspec: - argspec = re.sub('^[^(]*', '', argspec) - doc = "%s%s\n\n%s" % (funcname, argspec, doc) - obj = lambda: 0 - obj.__argspec_is_invalid_ = True - obj.func_name = funcname - obj.__name__ = name - obj.__doc__ = doc - if inspect.isclass(object_cache[parent]): - obj.__objclass__ = object_cache[parent] - else: - class Dummy(object): pass - obj = Dummy() - obj.__name__ = name - obj.__doc__ = doc - if inspect.isclass(object_cache[parent]): - obj.__get__ = lambda: None - object_cache[name] = obj - - if parent: - if inspect.ismodule(object_cache[parent]): - obj.__module__ = parent - setattr(object_cache[parent], name.split('.')[-1], obj) - - # Populate items - for node in root: - obj = object_cache.get(node.attrib['id']) - if obj is None: continue - for ref in node.findall('ref'): - if node.tag == 'class': - if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) - else: - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) diff --git a/numpy-1.6.2/doc/sphinxext/plot_directive.py b/numpy-1.6.2/doc/sphinxext/plot_directive.py deleted file mode 100644 index 80801e7986..0000000000 --- a/numpy-1.6.2/doc/sphinxext/plot_directive.py +++ /dev/null @@ -1,636 +0,0 @@ -""" -A special directive for generating a matplotlib plot. - -.. warning:: - - This is a hacked version of plot_directive.py from Matplotlib. - It's very much subject to change! - - -Usage ------ - -Can be used like this:: - - .. plot:: examples/example.py - - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3], [4,5,6]) - - .. plot:: - - A plotting example: - - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3], [4,5,6]) - -The content is interpreted as doctest formatted if it has a line starting -with ``>>>``. - -The ``plot`` directive supports the options - - format : {'python', 'doctest'} - Specify the format of the input - - include-source : bool - Whether to display the source code. Default can be changed in conf.py - -and the ``image`` directive options ``alt``, ``height``, ``width``, -``scale``, ``align``, ``class``. - -Configuration options ---------------------- - -The plot directive has the following configuration options: - - plot_include_source - Default value for the include-source option - - plot_pre_code - Code that should be executed before each plot. - - plot_basedir - Base directory, to which plot:: file names are relative to. - (If None or empty, file names are relative to the directoly where - the file containing the directive is.) - - plot_formats - File formats to generate. List of tuples or strings:: - - [(suffix, dpi), suffix, ...] - - that determine the file format and the DPI. For entries whose - DPI was omitted, sensible defaults are chosen. - - plot_html_show_formats - Whether to show links to the files in HTML. - -TODO ----- - -* Refactor Latex output; now it's plain images, but it would be nice - to make them appear side-by-side, or in floats. - -""" - -import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback -import sphinx - -import warnings -warnings.warn("A plot_directive module is also available under " - "matplotlib.sphinxext; expect this numpydoc.plot_directive " - "module to be deprecated after relevant features have been " - "integrated there.", - FutureWarning, stacklevel=2) - - -#------------------------------------------------------------------------------ -# Registration hook -#------------------------------------------------------------------------------ - -def setup(app): - setup.app = app - setup.config = app.config - setup.confdir = app.confdir - - app.add_config_value('plot_pre_code', '', True) - app.add_config_value('plot_include_source', False, True) - app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) - app.add_config_value('plot_basedir', None, True) - app.add_config_value('plot_html_show_formats', True, True) - - app.add_directive('plot', plot_directive, True, (0, 1, False), - **plot_directive_options) - -#------------------------------------------------------------------------------ -# plot:: directive -#------------------------------------------------------------------------------ -from docutils.parsers.rst import directives -from docutils import nodes - -def plot_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return run(arguments, content, options, state_machine, state, lineno) -plot_directive.__doc__ = __doc__ - -def _option_boolean(arg): - if not arg or not arg.strip(): - # no argument given, assume used as a flag - return True - elif arg.strip().lower() in ('no', '0', 'false'): - return False - elif arg.strip().lower() in ('yes', '1', 'true'): - return True - else: - raise ValueError('"%s" unknown boolean' % arg) - -def _option_format(arg): - return directives.choice(arg, ('python', 'lisp')) - -def _option_align(arg): - return directives.choice(arg, ("top", "middle", "bottom", "left", "center", - "right")) - -plot_directive_options = {'alt': directives.unchanged, - 'height': directives.length_or_unitless, - 'width': directives.length_or_percentage_or_unitless, - 'scale': directives.nonnegative_int, - 'align': _option_align, - 'class': directives.class_option, - 'include-source': _option_boolean, - 'format': _option_format, - } - -#------------------------------------------------------------------------------ -# Generating output -#------------------------------------------------------------------------------ - -from docutils import nodes, utils - -try: - # Sphinx depends on either Jinja or Jinja2 - import jinja2 - def format_template(template, **kw): - return jinja2.Template(template).render(**kw) -except ImportError: - import jinja - def format_template(template, **kw): - return jinja.from_string(template, **kw) - -TEMPLATE = """ -{{ source_code }} - -{{ only_html }} - - {% if source_link or (html_show_formats and not multi_image) %} - ( - {%- if source_link -%} - `Source code <{{ source_link }}>`__ - {%- endif -%} - {%- if html_show_formats and not multi_image -%} - {%- for img in images -%} - {%- for fmt in img.formats -%} - {%- if source_link or not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - {%- endfor -%} - {%- endif -%} - ) - {% endif %} - - {% for img in images %} - .. figure:: {{ build_dir }}/{{ img.basename }}.png - {%- for option in options %} - {{ option }} - {% endfor %} - - {% if html_show_formats and multi_image -%} - ( - {%- for fmt in img.formats -%} - {%- if not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - ) - {%- endif -%} - {% endfor %} - -{{ only_latex }} - - {% for img in images %} - .. image:: {{ build_dir }}/{{ img.basename }}.pdf - {% endfor %} - -""" - -class ImageFile(object): - def __init__(self, basename, dirname): - self.basename = basename - self.dirname = dirname - self.formats = [] - - def filename(self, format): - return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) - - def filenames(self): - return [self.filename(fmt) for fmt in self.formats] - -def run(arguments, content, options, state_machine, state, lineno): - if arguments and content: - raise RuntimeError("plot:: directive can't have both args and content") - - document = state_machine.document - config = document.settings.env.config - - options.setdefault('include-source', config.plot_include_source) - - # determine input - rst_file = document.attributes['source'] - rst_dir = os.path.dirname(rst_file) - - if arguments: - if not config.plot_basedir: - source_file_name = os.path.join(rst_dir, - directives.uri(arguments[0])) - else: - source_file_name = os.path.join(setup.confdir, config.plot_basedir, - directives.uri(arguments[0])) - code = open(source_file_name, 'r').read() - output_base = os.path.basename(source_file_name) - else: - source_file_name = rst_file - code = textwrap.dedent("\n".join(map(str, content))) - counter = document.attributes.get('_plot_counter', 0) + 1 - document.attributes['_plot_counter'] = counter - base, ext = os.path.splitext(os.path.basename(source_file_name)) - output_base = '%s-%d.py' % (base, counter) - - base, source_ext = os.path.splitext(output_base) - if source_ext in ('.py', '.rst', '.txt'): - output_base = base - else: - source_ext = '' - - # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames - output_base = output_base.replace('.', '-') - - # is it in doctest format? - is_doctest = contains_doctest(code) - if options.has_key('format'): - if options['format'] == 'python': - is_doctest = False - else: - is_doctest = True - - # determine output directory name fragment - source_rel_name = relpath(source_file_name, setup.confdir) - source_rel_dir = os.path.dirname(source_rel_name) - while source_rel_dir.startswith(os.path.sep): - source_rel_dir = source_rel_dir[1:] - - # build_dir: where to place output files (temporarily) - build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), - 'plot_directive', - source_rel_dir) - if not os.path.exists(build_dir): - os.makedirs(build_dir) - - # output_dir: final location in the builder's directory - dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, - source_rel_dir)) - - # how to link to files from the RST file - dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), - source_rel_dir).replace(os.path.sep, '/') - build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') - source_link = dest_dir_link + '/' + output_base + source_ext - - # make figures - try: - results = makefig(code, source_file_name, build_dir, output_base, - config) - errors = [] - except PlotError, err: - reporter = state.memo.reporter - sm = reporter.system_message( - 2, "Exception occurred in plotting %s: %s" % (output_base, err), - line=lineno) - results = [(code, [])] - errors = [sm] - - # generate output restructuredtext - total_lines = [] - for j, (code_piece, images) in enumerate(results): - if options['include-source']: - if is_doctest: - lines = [''] - lines += [row.rstrip() for row in code_piece.split('\n')] - else: - lines = ['.. code-block:: python', ''] - lines += [' %s' % row.rstrip() - for row in code_piece.split('\n')] - source_code = "\n".join(lines) - else: - source_code = "" - - opts = [':%s: %s' % (key, val) for key, val in options.items() - if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] - - only_html = ".. only:: html" - only_latex = ".. only:: latex" - - if j == 0: - src_link = source_link - else: - src_link = None - - result = format_template( - TEMPLATE, - dest_dir=dest_dir_link, - build_dir=build_dir_link, - source_link=src_link, - multi_image=len(images) > 1, - only_html=only_html, - only_latex=only_latex, - options=opts, - images=images, - source_code=source_code, - html_show_formats=config.plot_html_show_formats) - - total_lines.extend(result.split("\n")) - total_lines.extend("\n") - - if total_lines: - state_machine.insert_input(total_lines, source=source_file_name) - - # copy image files to builder's output directory - if not os.path.exists(dest_dir): - os.makedirs(dest_dir) - - for code_piece, images in results: - for img in images: - for fn in img.filenames(): - shutil.copyfile(fn, os.path.join(dest_dir, - os.path.basename(fn))) - - # copy script (if necessary) - if source_file_name == rst_file: - target_name = os.path.join(dest_dir, output_base + source_ext) - f = open(target_name, 'w') - f.write(unescape_doctest(code)) - f.close() - - return errors - - -#------------------------------------------------------------------------------ -# Run code and capture figures -#------------------------------------------------------------------------------ - -import matplotlib -matplotlib.use('Agg') -import matplotlib.pyplot as plt -import matplotlib.image as image -from matplotlib import _pylab_helpers - -import exceptions - -def contains_doctest(text): - try: - # check if it's valid Python as-is - compile(text, '', 'exec') - return False - except SyntaxError: - pass - r = re.compile(r'^\s*>>>', re.M) - m = r.search(text) - return bool(m) - -def unescape_doctest(text): - """ - Extract code from a piece of text, which contains either Python code - or doctests. - - """ - if not contains_doctest(text): - return text - - code = "" - for line in text.split("\n"): - m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) - if m: - code += m.group(2) + "\n" - elif line.strip(): - code += "# " + line.strip() + "\n" - else: - code += "\n" - return code - -def split_code_at_show(text): - """ - Split code at plt.show() - - """ - - parts = [] - is_doctest = contains_doctest(text) - - part = [] - for line in text.split("\n"): - if (not is_doctest and line.strip() == 'plt.show()') or \ - (is_doctest and line.strip() == '>>> plt.show()'): - part.append(line) - parts.append("\n".join(part)) - part = [] - else: - part.append(line) - if "\n".join(part).strip(): - parts.append("\n".join(part)) - return parts - -class PlotError(RuntimeError): - pass - -def run_code(code, code_path, ns=None): - # Change the working directory to the directory of the example, so - # it can get at its data files, if any. - pwd = os.getcwd() - old_sys_path = list(sys.path) - if code_path is not None: - dirname = os.path.abspath(os.path.dirname(code_path)) - os.chdir(dirname) - sys.path.insert(0, dirname) - - # Redirect stdout - stdout = sys.stdout - sys.stdout = cStringIO.StringIO() - - # Reset sys.argv - old_sys_argv = sys.argv - sys.argv = [code_path] - - try: - try: - code = unescape_doctest(code) - if ns is None: - ns = {} - if not ns: - exec setup.config.plot_pre_code in ns - exec code in ns - except (Exception, SystemExit), err: - raise PlotError(traceback.format_exc()) - finally: - os.chdir(pwd) - sys.argv = old_sys_argv - sys.path[:] = old_sys_path - sys.stdout = stdout - return ns - - -#------------------------------------------------------------------------------ -# Generating figures -#------------------------------------------------------------------------------ - -def out_of_date(original, derived): - """ - Returns True if derivative is out-of-date wrt original, - both of which are full file paths. - """ - return (not os.path.exists(derived) - or os.stat(derived).st_mtime < os.stat(original).st_mtime) - - -def makefig(code, code_path, output_dir, output_base, config): - """ - Run a pyplot script *code* and save the images under *output_dir* - with file names derived from *output_base* - - """ - - # -- Parse format list - default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} - formats = [] - for fmt in config.plot_formats: - if isinstance(fmt, str): - formats.append((fmt, default_dpi.get(fmt, 80))) - elif type(fmt) in (tuple, list) and len(fmt)==2: - formats.append((str(fmt[0]), int(fmt[1]))) - else: - raise PlotError('invalid image format "%r" in plot_formats' % fmt) - - # -- Try to determine if all images already exist - - code_pieces = split_code_at_show(code) - - # Look for single-figure output files first - all_exists = True - img = ImageFile(output_base, output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False - break - img.formats.append(format) - - if all_exists: - return [(code, [img])] - - # Then look for multi-figure output files - results = [] - all_exists = True - for i, code_piece in enumerate(code_pieces): - images = [] - for j in xrange(1000): - img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False - break - img.formats.append(format) - - # assume that if we have one, we have them all - if not all_exists: - all_exists = (j > 0) - break - images.append(img) - if not all_exists: - break - results.append((code_piece, images)) - - if all_exists: - return results - - # -- We didn't find the files, so build them - - results = [] - ns = {} - - for i, code_piece in enumerate(code_pieces): - # Clear between runs - plt.close('all') - - # Run code - run_code(code_piece, code_path, ns) - - # Collect images - images = [] - fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() - for j, figman in enumerate(fig_managers): - if len(fig_managers) == 1 and len(code_pieces) == 1: - img = ImageFile(output_base, output_dir) - else: - img = ImageFile("%s_%02d_%02d" % (output_base, i, j), - output_dir) - images.append(img) - for format, dpi in formats: - try: - figman.canvas.figure.savefig(img.filename(format), dpi=dpi) - except exceptions.BaseException, err: - raise PlotError(traceback.format_exc()) - img.formats.append(format) - - # Results - results.append((code_piece, images)) - - return results - - -#------------------------------------------------------------------------------ -# Relative pathnames -#------------------------------------------------------------------------------ - -try: - from os.path import relpath -except ImportError: - # Copied from Python 2.7 - if 'posix' in sys.builtin_module_names: - def relpath(path, start=os.path.curdir): - """Return a relative version of a path""" - from os.path import sep, curdir, join, abspath, commonprefix, \ - pardir - - if not path: - raise ValueError("no path specified") - - start_list = abspath(start).split(sep) - path_list = abspath(path).split(sep) - - # Work out how much of the filepath is shared by start and path. - i = len(commonprefix([start_list, path_list])) - - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return curdir - return join(*rel_list) - elif 'nt' in sys.builtin_module_names: - def relpath(path, start=os.path.curdir): - """Return a relative version of a path""" - from os.path import sep, curdir, join, abspath, commonprefix, \ - pardir, splitunc - - if not path: - raise ValueError("no path specified") - start_list = abspath(start).split(sep) - path_list = abspath(path).split(sep) - if start_list[0].lower() != path_list[0].lower(): - unc_path, rest = splitunc(path) - unc_start, rest = splitunc(start) - if bool(unc_path) ^ bool(unc_start): - raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" - % (path, start)) - else: - raise ValueError("path is on drive %s, start on drive %s" - % (path_list[0], start_list[0])) - # Work out how much of the filepath is shared by start and path. - for i in range(min(len(start_list), len(path_list))): - if start_list[i].lower() != path_list[i].lower(): - break - else: - i += 1 - - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return curdir - return join(*rel_list) - else: - raise RuntimeError("Unsupported platform (no relpath available!)") diff --git a/numpy-1.6.2/doc/sphinxext/setup.py b/numpy-1.6.2/doc/sphinxext/setup.py deleted file mode 100644 index 76e3fd81bb..0000000000 --- a/numpy-1.6.2/doc/sphinxext/setup.py +++ /dev/null @@ -1,31 +0,0 @@ -from distutils.core import setup -import setuptools -import sys, os - -version = "0.4" - -setup( - name="numpydoc", - packages=["numpydoc"], - package_dir={"numpydoc": ""}, - version=version, - description="Sphinx extension to support docstrings in Numpy format", - # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers - classifiers=["Development Status :: 3 - Alpha", - "Environment :: Plugins", - "License :: OSI Approved :: BSD License", - "Topic :: Documentation"], - keywords="sphinx numpy", - author="Pauli Virtanen and others", - author_email="pav@iki.fi", - url="http://github.com/numpy/numpy/tree/master/doc/sphinxext", - license="BSD", - zip_safe=False, - install_requires=["Sphinx >= 1.0.1"], - package_data={'numpydoc': 'tests', '': ''}, - entry_points={ - "console_scripts": [ - "autosummary_generate = numpydoc.autosummary_generate:main", - ], - }, -) diff --git a/numpy-1.6.2/doc/sphinxext/tests/test_docscrape.py b/numpy-1.6.2/doc/sphinxext/tests/test_docscrape.py deleted file mode 100644 index 6fab79832d..0000000000 --- a/numpy-1.6.2/doc/sphinxext/tests/test_docscrape.py +++ /dev/null @@ -1,615 +0,0 @@ -# -*- encoding:utf-8 -*- - -import sys, os -sys.path.append(os.path.join(os.path.dirname(__file__), '..')) - -from docscrape import NumpyDocString, FunctionDoc, ClassDoc -from docscrape_sphinx import SphinxDocString, SphinxClassDoc -from nose.tools import * - -doc_txt = '''\ - numpy.multivariate_normal(mean, cov, shape=None, spam=None) - - Draw values from a multivariate normal distribution with specified - mean and covariance. - - The multivariate normal or Gaussian distribution is a generalisation - of the one-dimensional normal distribution to higher dimensions. - - Parameters - ---------- - mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - cov : (N,N) ndarray - Covariance matrix of the distribution. - shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - - Returns - ------- - out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - - Other Parameters - ---------------- - spam : parrot - A parrot off its mortal coil. - - Raises - ------ - RuntimeError - Some error - - Warns - ----- - RuntimeWarning - Some warning - - Warnings - -------- - Certain warnings apply. - - Notes - ----- - - Instead of specifying the full covariance matrix, popular - approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - - This geometrical property can be seen in two dimensions by plotting - generated data-points: - - >>> mean = [0,0] - >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - - >>> x,y = multivariate_normal(mean,cov,5000).T - >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - - Note that the covariance matrix must be symmetric and non-negative - definite. - - References - ---------- - .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 - .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - - See Also - -------- - some, other, funcs - otherfunc : relationship - - Examples - -------- - >>> mean = (1,2) - >>> cov = [[1,0],[1,0]] - >>> x = multivariate_normal(mean,cov,(3,3)) - >>> print x.shape - (3, 3, 2) - - The following is probably true, given that 0.6 is roughly twice the - standard deviation: - - >>> print list( (x[0,0,:] - mean) < 0.6 ) - [True, True] - - .. index:: random - :refguide: random;distributions, random;gauss - - ''' -doc = NumpyDocString(doc_txt) - - -def test_signature(): - assert doc['Signature'].startswith('numpy.multivariate_normal(') - assert doc['Signature'].endswith('spam=None)') - -def test_summary(): - assert doc['Summary'][0].startswith('Draw values') - assert doc['Summary'][-1].endswith('covariance.') - -def test_extended_summary(): - assert doc['Extended Summary'][0].startswith('The multivariate normal') - -def test_parameters(): - assert_equal(len(doc['Parameters']), 3) - assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) - - arg, arg_type, desc = doc['Parameters'][1] - assert_equal(arg_type, '(N,N) ndarray') - assert desc[0].startswith('Covariance matrix') - assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' - -def test_other_parameters(): - assert_equal(len(doc['Other Parameters']), 1) - assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam']) - arg, arg_type, desc = doc['Other Parameters'][0] - assert_equal(arg_type, 'parrot') - assert desc[0].startswith('A parrot off its mortal coil') - -def test_returns(): - assert_equal(len(doc['Returns']), 1) - arg, arg_type, desc = doc['Returns'][0] - assert_equal(arg, 'out') - assert_equal(arg_type, 'ndarray') - assert desc[0].startswith('The drawn samples') - assert desc[-1].endswith('distribution.') - -def test_notes(): - assert doc['Notes'][0].startswith('Instead') - assert doc['Notes'][-1].endswith('definite.') - assert_equal(len(doc['Notes']), 17) - -def test_references(): - assert doc['References'][0].startswith('..') - assert doc['References'][-1].endswith('2001.') - -def test_examples(): - assert doc['Examples'][0].startswith('>>>') - assert doc['Examples'][-1].endswith('True]') - -def test_index(): - assert_equal(doc['index']['default'], 'random') - print doc['index'] - assert_equal(len(doc['index']), 2) - assert_equal(len(doc['index']['refguide']), 2) - -def non_blank_line_by_line_compare(a,b): - a = [l for l in a.split('\n') if l.strip()] - b = [l for l in b.split('\n') if l.strip()] - for n,line in enumerate(a): - if not line == b[n]: - raise AssertionError("Lines %s of a and b differ: " - "\n>>> %s\n<<< %s\n" % - (n,line,b[n])) -def test_str(): - non_blank_line_by_line_compare(str(doc), -"""numpy.multivariate_normal(mean, cov, shape=None, spam=None) - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -Parameters ----------- -mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - -cov : (N,N) ndarray - Covariance matrix of the distribution. -shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -Returns -------- -out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - -Other Parameters ----------------- -spam : parrot - A parrot off its mortal coil. - -Raises ------- -RuntimeError : - Some error - -Warns ------ -RuntimeWarning : - Some warning - -Warnings --------- -Certain warnings apply. - -See Also --------- -`some`_, `other`_, `funcs`_ - -`otherfunc`_ - relationship - -Notes ------ -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -References ----------- -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -Examples --------- ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] - -.. index:: random - :refguide: random;distributions, random;gauss""") - - -def test_sphinx_str(): - sphinx_doc = SphinxDocString(doc_txt) - non_blank_line_by_line_compare(str(sphinx_doc), -""" -.. index:: random - single: random;distributions, random;gauss - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -:Parameters: - - **mean** : (N,) ndarray - - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - **cov** : (N,N) ndarray - - Covariance matrix of the distribution. - - **shape** : tuple of ints - - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -:Returns: - - **out** : ndarray - - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - -:Other Parameters: - - **spam** : parrot - - A parrot off its mortal coil. - -:Raises: - - **RuntimeError** : - - Some error - -:Warns: - - **RuntimeWarning** : - - Some warning - -.. warning:: - - Certain warnings apply. - -.. seealso:: - - :obj:`some`, :obj:`other`, :obj:`funcs` - - :obj:`otherfunc` - relationship - -.. rubric:: Notes - -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -.. rubric:: References - -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -.. only:: latex - - [1]_, [2]_ - -.. rubric:: Examples - ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] -""") - - -doc2 = NumpyDocString(""" - Returns array of indices of the maximum values of along the given axis. - - Parameters - ---------- - a : {array_like} - Array to look in. - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis""") - -def test_parameters_without_extended_description(): - assert_equal(len(doc2['Parameters']), 2) - -doc3 = NumpyDocString(""" - my_signature(*params, **kwds) - - Return this and that. - """) - -def test_escape_stars(): - signature = str(doc3).split('\n')[0] - assert_equal(signature, 'my_signature(\*params, \*\*kwds)') - -doc4 = NumpyDocString( - """a.conj() - - Return an array with all complex-valued elements conjugated.""") - -def test_empty_extended_summary(): - assert_equal(doc4['Extended Summary'], []) - -doc5 = NumpyDocString( - """ - a.something() - - Raises - ------ - LinAlgException - If array is singular. - - Warns - ----- - SomeWarning - If needed - """) - -def test_raises(): - assert_equal(len(doc5['Raises']), 1) - name,_,desc = doc5['Raises'][0] - assert_equal(name,'LinAlgException') - assert_equal(desc,['If array is singular.']) - -def test_warns(): - assert_equal(len(doc5['Warns']), 1) - name,_,desc = doc5['Warns'][0] - assert_equal(name,'SomeWarning') - assert_equal(desc,['If needed']) - -def test_see_also(): - doc6 = NumpyDocString( - """ - z(x,theta) - - See Also - -------- - func_a, func_b, func_c - func_d : some equivalent func - foo.func_e : some other func over - multiple lines - func_f, func_g, :meth:`func_h`, func_j, - func_k - :obj:`baz.obj_q` - :class:`class_j`: fubar - foobar - """) - - assert len(doc6['See Also']) == 12 - for func, desc, role in doc6['See Also']: - if func in ('func_a', 'func_b', 'func_c', 'func_f', - 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): - assert(not desc) - else: - assert(desc) - - if func == 'func_h': - assert role == 'meth' - elif func == 'baz.obj_q': - assert role == 'obj' - elif func == 'class_j': - assert role == 'class' - else: - assert role is None - - if func == 'func_d': - assert desc == ['some equivalent func'] - elif func == 'foo.func_e': - assert desc == ['some other func over', 'multiple lines'] - elif func == 'class_j': - assert desc == ['fubar', 'foobar'] - -def test_see_also_print(): - class Dummy(object): - """ - See Also - -------- - func_a, func_b - func_c : some relationship - goes here - func_d - """ - pass - - obj = Dummy() - s = str(FunctionDoc(obj, role='func')) - assert(':func:`func_a`, :func:`func_b`' in s) - assert(' some relationship' in s) - assert(':func:`func_d`' in s) - -doc7 = NumpyDocString(""" - - Doc starts on second line. - - """) - -def test_empty_first_line(): - assert doc7['Summary'][0].startswith('Doc starts') - - -def test_no_summary(): - str(SphinxDocString(""" - Parameters - ----------""")) - - -def test_unicode(): - doc = SphinxDocString(""" - öäöäöäöäöåååå - - öäöäöäööäååå - - Parameters - ---------- - ååå : äää - ööö - - Returns - ------- - ååå : ööö - äää - - """) - assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8') - -def test_plot_examples(): - cfg = dict(use_plots=True) - - doc = SphinxDocString(""" - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3],[4,5,6]) - >>> plt.show() - """, config=cfg) - assert 'plot::' in str(doc), str(doc) - - doc = SphinxDocString(""" - Examples - -------- - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3],[4,5,6]) - plt.show() - """, config=cfg) - assert str(doc).count('plot::') == 1, str(doc) - -def test_class_members(): - - class Dummy(object): - """ - Dummy class. - - """ - def spam(self, a, b): - """Spam\n\nSpam spam.""" - pass - def ham(self, c, d): - """Cheese\n\nNo cheese.""" - pass - - for cls in (ClassDoc, SphinxClassDoc): - doc = cls(Dummy, config=dict(show_class_members=False)) - assert 'Methods' not in str(doc), (cls, str(doc)) - assert 'spam' not in str(doc), (cls, str(doc)) - assert 'ham' not in str(doc), (cls, str(doc)) - - doc = cls(Dummy, config=dict(show_class_members=True)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - -if __name__ == "__main__": - import nose - nose.run() - diff --git a/numpy-1.6.2/doc/sphinxext/traitsdoc.py b/numpy-1.6.2/doc/sphinxext/traitsdoc.py deleted file mode 100644 index 0fcf2c1cd3..0000000000 --- a/numpy-1.6.2/doc/sphinxext/traitsdoc.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -========= -traitsdoc -========= - -Sphinx extension that handles docstrings in the Numpy standard format, [1] -and support Traits [2]. - -This extension can be used as a replacement for ``numpydoc`` when support -for Traits is required. - -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard -.. [2] http://code.enthought.com/projects/traits/ - -""" - -import inspect -import os -import pydoc - -import docscrape -import docscrape_sphinx -from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString - -import numpydoc - -import comment_eater - -class SphinxTraitsDoc(SphinxClassDoc): - def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): - if not inspect.isclass(cls): - raise ValueError("Initialise using a class. Got %r" % cls) - self._cls = cls - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - self._name = cls.__name__ - self._func_doc = func_doc - - docstring = pydoc.getdoc(cls) - docstring = docstring.split('\n') - - # De-indent paragraph - try: - indent = min(len(s) - len(s.lstrip()) for s in docstring - if s.strip()) - except ValueError: - indent = 0 - - for n,line in enumerate(docstring): - docstring[n] = docstring[n][indent:] - - self._doc = docscrape.Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': '', - 'Description': [], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Traits': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'References': '', - 'Example': '', - 'Examples': '', - 'index': {} - } - - self._parse() - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Description'] + self['Extended Summary'] + [''] - - def __str__(self, indent=0, func_role="func"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Traits', 'Methods', - 'Returns','Raises'): - out += self._str_param_list(param_list) - out += self._str_see_also("obj") - out += self._str_section('Notes') - out += self._str_references() - out += self._str_section('Example') - out += self._str_section('Examples') - out = self._str_indent(out,indent) - return '\n'.join(out) - -def looks_like_issubclass(obj, classname): - """ Return True if the object has a class or superclass with the given class - name. - - Ignores old-style classes. - """ - t = obj - if t.__name__ == classname: - return True - for klass in t.__mro__: - if klass.__name__ == classname: - return True - return False - -def get_doc_object(obj, what=None, config=None): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif callable(obj): - what = 'function' - else: - what = 'object' - if what == 'class': - doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) - if looks_like_issubclass(obj, 'HasTraits'): - for name, trait, comment in comment_eater.get_class_traits(obj): - # Exclude private traits. - if not name.startswith('_'): - doc['Traits'].append((name, trait, comment.splitlines())) - return doc - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, '', config=config) - else: - return SphinxDocString(pydoc.getdoc(obj), config=config) - -def setup(app): - # init numpydoc - numpydoc.setup(app, get_doc_object) - diff --git a/numpy-1.6.2/doc/swig/Makefile b/numpy-1.6.2/doc/swig/Makefile deleted file mode 100644 index 79eb33fcc1..0000000000 --- a/numpy-1.6.2/doc/swig/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -# List all of the subdirectories here for recursive make -SUBDIRS = test doc - -# Default target -.PHONY : default -default: - @echo "There is no default make target for this Makefile" - @echo "Valid make targets are:" - @echo " test - Compile and run tests of numpy.i" - @echo " doc - Generate numpy.i documentation" - @echo " all - make test + doc" - @echo " clean - Remove generated files recursively" - -# Target all -.PHONY : all -all: $(SUBDIRS) - -# Target test -.PHONY : test -test: - cd $@ && make $@ - -# Target clean -.PHONY : clean -clean: - @for dir in $(SUBDIRS); do \ - echo ; \ - echo Running \'make clean\' in $$dir; \ - cd $$dir && make clean && cd ..; \ - done; \ - echo diff --git a/numpy-1.6.2/doc/swig/README b/numpy-1.6.2/doc/swig/README deleted file mode 100644 index 4a10e436b6..0000000000 --- a/numpy-1.6.2/doc/swig/README +++ /dev/null @@ -1,115 +0,0 @@ -Notes for the numpy/doc/swig directory -====================================== - -This set of files is for developing and testing file numpy.i, which is -intended to be a set of typemaps for helping SWIG interface between C -and C++ code that uses C arrays and the python module NumPy. It is -ultimately hoped that numpy.i will be included as part of the SWIG -distribution. - -Documentation -------------- -Documentation for how to use numpy.i, as well as for the testing system -used here, can be found in the NumPy reference guide. - -Testing -------- -The tests are a good example of what we are trying to do with numpy.i. -The files related to testing are are in the test subdirectory:: - - Vector.h - Vector.cxx - Vector.i - testVector.py - - Matrix.h - Matrix.cxx - Matrix.i - testMatrix.py - - Tensor.h - Tensor.cxx - Tensor.i - testTensor.py - -The header files contain prototypes for functions that illustrate the -wrapping issues we wish to address. Right now, this consists of -functions with argument signatures of the following forms. Vector.h:: - - (type IN_ARRAY1[ANY]) - (type* IN_ARRAY1, int DIM1) - (int DIM1, type* IN_ARRAY1) - - (type INPLACE_ARRAY1[ANY]) - (type* INPLACE_ARRAY1, int DIM1) - (int DIM1, type* INPLACE_ARRAY1) - - (type ARGOUT_ARRAY1[ANY]) - (type* ARGOUT_ARRAY1, int DIM1) - (int DIM1, type* ARGOUT_ARRAY1) - -Matrix.h:: - - (type IN_ARRAY2[ANY][ANY]) - (type* IN_ARRAY2, int DIM1, int DIM2) - (int DIM1, int DIM2, type* IN_ARRAY2) - - (type INPLACE_ARRAY2[ANY][ANY]) - (type* INPLACE_ARRAY2, int DIM1, int DIM2) - (int DIM1, int DIM2, type* INPLACE_ARRAY2) - - (type ARGOUT_ARRAY2[ANY][ANY]) - -Tensor.h:: - - (type IN_ARRAY3[ANY][ANY][ANY]) - (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) - (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) - - (type INPLACE_ARRAY3[ANY][ANY][ANY]) - (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) - (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) - - (type ARGOUT_ARRAY3[ANY][ANY][ANY]) - -These function signatures take a pointer to an array of type "type", -whose length is specified by the integer(s) DIM1 (and DIM2, and DIM3). - -The objective for the IN_ARRAY signatures is for SWIG to generate -python wrappers that take a container that constitutes a valid -argument to the numpy array constructor, and can be used to build an -array of type "type". Currently, types "signed char", "unsigned -char", "short", "unsigned short", "int", "unsigned int", "long", -"unsigned long", "long long", "unsigned long long", "float", and -"double" are supported and tested. - -The objective for the INPLACE_ARRAY signatures is for SWIG to generate -python wrappers that accept a numpy array of any of the above-listed -types. - -The source files Vector.cxx, Matrix.cxx and Tensor.cxx contain the -actual implementations of the functions described in Vector.h, -Matrix.h and Tensor.h. The python scripts testVector.py, -testMatrix.py and testTensor.py test the resulting python wrappers -using the unittest module. - -The SWIG interface files Vector.i, Matrix.i and Tensor.i are used to -generate the wrapper code. The SWIG_FILE_WITH_INIT macro allows -numpy.i to be used with multiple python modules. If it is specified, -then the %init block found in Vector.i, Matrix.i and Tensor.i are -required. The other things done in Vector.i, Matrix.i and Tensor.i -are the inclusion of the appropriate header file and numpy.i file, and -the "%apply" directives to force the functions to use the typemaps. - -The setup.py script is a standard python distutils script. It defines -_Vector, _Matrix and _Tensor extension modules and Vector, Matrix and -Tensor python modules. The Makefile automates everything, setting up -the dependencies, calling swig to generate the wrappers, and calling -setup.py to compile the wrapper code and generate the shared objects. -Targets "all" (default), "test", "doc" and "clean" are supported. The -"doc" target creates HTML documentation (with make target "html"), and -PDF documentation (with make targets "tex" and "pdf"). - -To build and run the test code, simply execute from the shell:: - - $ make test diff --git a/numpy-1.6.2/doc/swig/numpy.i b/numpy-1.6.2/doc/swig/numpy.i deleted file mode 100644 index e3ff236aad..0000000000 --- a/numpy-1.6.2/doc/swig/numpy.i +++ /dev/null @@ -1,1634 +0,0 @@ -/* -*- C -*- (not really, but good for syntax highlighting) */ -#ifdef SWIGPYTHON - -%{ -#ifndef SWIG_FILE_WITH_INIT -# define NO_IMPORT_ARRAY -#endif -#include "stdio.h" -#include -%} - -/**********************************************************************/ - -%fragment("NumPy_Backward_Compatibility", "header") -{ -/* Support older NumPy data type names -*/ -%#if NDARRAY_VERSION < 0x01000000 -%#define NPY_BOOL PyArray_BOOL -%#define NPY_BYTE PyArray_BYTE -%#define NPY_UBYTE PyArray_UBYTE -%#define NPY_SHORT PyArray_SHORT -%#define NPY_USHORT PyArray_USHORT -%#define NPY_INT PyArray_INT -%#define NPY_UINT PyArray_UINT -%#define NPY_LONG PyArray_LONG -%#define NPY_ULONG PyArray_ULONG -%#define NPY_LONGLONG PyArray_LONGLONG -%#define NPY_ULONGLONG PyArray_ULONGLONG -%#define NPY_FLOAT PyArray_FLOAT -%#define NPY_DOUBLE PyArray_DOUBLE -%#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -%#define NPY_CFLOAT PyArray_CFLOAT -%#define NPY_CDOUBLE PyArray_CDOUBLE -%#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -%#define NPY_OBJECT PyArray_OBJECT -%#define NPY_STRING PyArray_STRING -%#define NPY_UNICODE PyArray_UNICODE -%#define NPY_VOID PyArray_VOID -%#define NPY_NTYPES PyArray_NTYPES -%#define NPY_NOTYPE PyArray_NOTYPE -%#define NPY_CHAR PyArray_CHAR -%#define NPY_USERDEF PyArray_USERDEF -%#define npy_intp intp - -%#define NPY_MAX_BYTE MAX_BYTE -%#define NPY_MIN_BYTE MIN_BYTE -%#define NPY_MAX_UBYTE MAX_UBYTE -%#define NPY_MAX_SHORT MAX_SHORT -%#define NPY_MIN_SHORT MIN_SHORT -%#define NPY_MAX_USHORT MAX_USHORT -%#define NPY_MAX_INT MAX_INT -%#define NPY_MIN_INT MIN_INT -%#define NPY_MAX_UINT MAX_UINT -%#define NPY_MAX_LONG MAX_LONG -%#define NPY_MIN_LONG MIN_LONG -%#define NPY_MAX_ULONG MAX_ULONG -%#define NPY_MAX_LONGLONG MAX_LONGLONG -%#define NPY_MIN_LONGLONG MIN_LONGLONG -%#define NPY_MAX_ULONGLONG MAX_ULONGLONG -%#define NPY_MAX_INTP MAX_INTP -%#define NPY_MIN_INTP MIN_INTP - -%#define NPY_FARRAY FARRAY -%#define NPY_F_CONTIGUOUS F_CONTIGUOUS -%#endif -} - -/**********************************************************************/ - -/* The following code originally appeared in - * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was - * translated from C++ to C by John Hunter. Bill Spotz has modified - * it to fix some minor bugs, upgrade from Numeric to numpy (all - * versions), add some comments and functionality, and convert from - * direct code insertion to SWIG fragments. - */ - -%fragment("NumPy_Macros", "header") -{ -/* Macros to extract array attributes. - */ -%#define is_array(a) ((a) && PyArray_Check((PyArrayObject *)a)) -%#define array_type(a) (int)(PyArray_TYPE(a)) -%#define array_numdims(a) (((PyArrayObject *)a)->nd) -%#define array_dimensions(a) (((PyArrayObject *)a)->dimensions) -%#define array_size(a,i) (((PyArrayObject *)a)->dimensions[i]) -%#define array_data(a) (((PyArrayObject *)a)->data) -%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS(a)) -%#define array_is_native(a) (PyArray_ISNOTSWAPPED(a)) -%#define array_is_fortran(a) (PyArray_ISFORTRAN(a)) -} - -/**********************************************************************/ - -%fragment("NumPy_Utilities", "header") -{ - /* Given a PyObject, return a string describing its type. - */ - const char* pytype_string(PyObject* py_obj) { - if (py_obj == NULL ) return "C NULL value"; - if (py_obj == Py_None ) return "Python None" ; - if (PyCallable_Check(py_obj)) return "callable" ; - if (PyString_Check( py_obj)) return "string" ; - if (PyInt_Check( py_obj)) return "int" ; - if (PyFloat_Check( py_obj)) return "float" ; - if (PyDict_Check( py_obj)) return "dict" ; - if (PyList_Check( py_obj)) return "list" ; - if (PyTuple_Check( py_obj)) return "tuple" ; - if (PyFile_Check( py_obj)) return "file" ; - if (PyModule_Check( py_obj)) return "module" ; - if (PyInstance_Check(py_obj)) return "instance" ; - - return "unkown type"; - } - - /* Given a NumPy typecode, return a string describing the type. - */ - const char* typecode_string(int typecode) { - static const char* type_names[25] = {"bool", "byte", "unsigned byte", - "short", "unsigned short", "int", - "unsigned int", "long", "unsigned long", - "long long", "unsigned long long", - "float", "double", "long double", - "complex float", "complex double", - "complex long double", "object", - "string", "unicode", "void", "ntypes", - "notype", "char", "unknown"}; - return typecode < 24 ? type_names[typecode] : type_names[24]; - } - - /* Make sure input has correct numpy type. Allow character and byte - * to match. Also allow int and long to match. This is deprecated. - * You should use PyArray_EquivTypenums() instead. - */ - int type_match(int actual_type, int desired_type) { - return PyArray_EquivTypenums(actual_type, desired_type); - } -} - -/**********************************************************************/ - -%fragment("NumPy_Object_to_Array", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros", - fragment="NumPy_Utilities") -{ - /* Given a PyObject pointer, cast it to a PyArrayObject pointer if - * legal. If not, set the python error string appropriately and - * return NULL. - */ - PyArrayObject* obj_to_array_no_conversion(PyObject* input, int typecode) - { - PyArrayObject* ary = NULL; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input), typecode))) - { - ary = (PyArrayObject*) input; - } - else if is_array(input) - { - const char* desired_type = typecode_string(typecode); - const char* actual_type = typecode_string(array_type(input)); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", - desired_type, actual_type); - ary = NULL; - } - else - { - const char * desired_type = typecode_string(typecode); - const char * actual_type = pytype_string(input); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", - desired_type, actual_type); - ary = NULL; - } - return ary; - } - - /* Convert the given PyObject to a NumPy array with the given - * typecode. On success, return a valid PyArrayObject* with the - * correct type. On failure, the python error string will be set and - * the routine returns NULL. - */ - PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, - int* is_new_object) - { - PyArrayObject* ary = NULL; - PyObject* py_obj; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input),typecode))) - { - ary = (PyArrayObject*) input; - *is_new_object = 0; - } - else - { - py_obj = PyArray_FROMANY(input, typecode, 0, 0, NPY_DEFAULT); - /* If NULL, PyArray_FromObject will have set python error value.*/ - ary = (PyArrayObject*) py_obj; - *is_new_object = 1; - } - return ary; - } - - /* Given a PyArrayObject, check to see if it is contiguous. If so, - * return the input pointer and flag it as not a new object. If it is - * not contiguous, create a new PyArrayObject using the original data, - * flag it as a new object and return the pointer. - */ - PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) - { - PyArrayObject* result; - if (array_is_contiguous(ary)) - { - result = ary; - *is_new_object = 0; - } - else - { - result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), - min_dims, - max_dims); - *is_new_object = 1; - } - return result; - } - - /* Given a PyArrayObject, check to see if it is Fortran-contiguous. - * If so, return the input pointer, but do not flag it as not a new - * object. If it is not Fortran-contiguous, create a new - * PyArrayObject using the original data, flag it as a new object - * and return the pointer. - */ - PyArrayObject* make_fortran(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) - { - PyArrayObject* result; - if (array_is_fortran(ary)) - { - result = ary; - *is_new_object = 0; - } - else - { - Py_INCREF(ary->descr); - result = (PyArrayObject*) PyArray_FromArray(ary, ary->descr, NPY_FORTRAN); - *is_new_object = 1; - } - return result; - } - - /* Convert a given PyObject to a contiguous PyArrayObject of the - * specified type. If the input object is not a contiguous - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ - PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) - { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, - &is_new1); - if (ary1) - { - ary2 = make_contiguous(ary1, &is_new2, 0, 0); - if ( is_new1 && is_new2) - { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; - } - - /* Convert a given PyObject to a Fortran-ordered PyArrayObject of the - * specified type. If the input object is not a Fortran-ordered - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ - PyArrayObject* obj_to_array_fortran_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) - { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, - &is_new1); - if (ary1) - { - ary2 = make_fortran(ary1, &is_new2, 0, 0); - if (is_new1 && is_new2) - { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; - } - -} /* end fragment */ - - -/**********************************************************************/ - -%fragment("NumPy_Array_Requirements", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros") -{ - /* Test whether a python object is contiguous. If array is - * contiguous, return 1. Otherwise, set the python error string and - * return 0. - */ - int require_contiguous(PyArrayObject* ary) - { - int contiguous = 1; - if (!array_is_contiguous(ary)) - { - PyErr_SetString(PyExc_TypeError, - "Array must be contiguous. A non-contiguous array was given"); - contiguous = 0; - } - return contiguous; - } - - /* Require that a numpy array is not byte-swapped. If the array is - * not byte-swapped, return 1. Otherwise, set the python error string - * and return 0. - */ - int require_native(PyArrayObject* ary) - { - int native = 1; - if (!array_is_native(ary)) - { - PyErr_SetString(PyExc_TypeError, - "Array must have native byteorder. " - "A byte-swapped array was given"); - native = 0; - } - return native; - } - - /* Require the given PyArrayObject to have a specified number of - * dimensions. If the array has the specified number of dimensions, - * return 1. Otherwise, set the python error string and return 0. - */ - int require_dimensions(PyArrayObject* ary, int exact_dimensions) - { - int success = 1; - if (array_numdims(ary) != exact_dimensions) - { - PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", - exact_dimensions, array_numdims(ary)); - success = 0; - } - return success; - } - - /* Require the given PyArrayObject to have one of a list of specified - * number of dimensions. If the array has one of the specified number - * of dimensions, return 1. Otherwise, set the python error string - * and return 0. - */ - int require_dimensions_n(PyArrayObject* ary, int* exact_dimensions, int n) - { - int success = 0; - int i; - char dims_str[255] = ""; - char s[255]; - for (i = 0; i < n && !success; i++) - { - if (array_numdims(ary) == exact_dimensions[i]) - { - success = 1; - } - } - if (!success) - { - for (i = 0; i < n-1; i++) - { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); - } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); - PyErr_Format(PyExc_TypeError, - "Array must have %s dimensions. Given array has %d dimensions", - dims_str, array_numdims(ary)); - } - return success; - } - - /* Require the given PyArrayObject to have a specified shape. If the - * array has the specified shape, return 1. Otherwise, set the python - * error string and return 0. - */ - int require_size(PyArrayObject* ary, npy_intp* size, int n) - { - int i; - int success = 1; - int len; - char desired_dims[255] = "["; - char s[255]; - char actual_dims[255] = "["; - for(i=0; i < n;i++) - { - if (size[i] != -1 && size[i] != array_size(ary,i)) - { - success = 0; - } - } - if (!success) - { - for (i = 0; i < n; i++) - { - if (size[i] == -1) - { - sprintf(s, "*,"); - } - else - { - sprintf(s, "%ld,", (long int)size[i]); - } - strcat(desired_dims,s); - } - len = strlen(desired_dims); - desired_dims[len-1] = ']'; - for (i = 0; i < n; i++) - { - sprintf(s, "%ld,", (long int)array_size(ary,i)); - strcat(actual_dims,s); - } - len = strlen(actual_dims); - actual_dims[len-1] = ']'; - PyErr_Format(PyExc_TypeError, - "Array must have shape of %s. Given array has shape of %s", - desired_dims, actual_dims); - } - return success; - } - - /* Require the given PyArrayObject to to be FORTRAN ordered. If the - * the PyArrayObject is already FORTRAN ordered, do nothing. Else, - * set the FORTRAN ordering flag and recompute the strides. - */ - int require_fortran(PyArrayObject* ary) - { - int success = 1; - int nd = array_numdims(ary); - int i; - if (array_is_fortran(ary)) return success; - /* Set the FORTRAN ordered flag */ - ary->flags = NPY_FARRAY; - /* Recompute the strides */ - ary->strides[0] = ary->strides[nd-1]; - for (i=1; i < nd; ++i) - ary->strides[i] = ary->strides[i-1] * array_size(ary,i-1); - return success; - } -} - -/* Combine all NumPy fragments into one for convenience */ -%fragment("NumPy_Fragments", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros", - fragment="NumPy_Utilities", - fragment="NumPy_Object_to_Array", - fragment="NumPy_Array_Requirements") { } - -/* End John Hunter translation (with modifications by Bill Spotz) - */ - -/* %numpy_typemaps() macro - * - * This macro defines a family of 41 typemaps that allow C arguments - * of the form - * - * (DATA_TYPE IN_ARRAY1[ANY]) - * (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - * - * (DATA_TYPE IN_ARRAY2[ANY][ANY]) - * (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - * (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - * - * (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - * (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) - * (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) - * - * (DATA_TYPE INPLACE_ARRAY1[ANY]) - * (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - * - * (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - * (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - * (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - * - * (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - * (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) - * (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - * (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) - * - * (DATA_TYPE ARGOUT_ARRAY1[ANY]) - * (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - * (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - * - * (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - * - * (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) - * (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) - * (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) - * - * (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) - * (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - * (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) - * - * where "DATA_TYPE" is any type supported by the NumPy module, and - * "DIM_TYPE" is any int-like type suitable for specifying dimensions. - * The difference between "ARRAY" typemaps and "FARRAY" typemaps is - * that the "FARRAY" typemaps expect FORTRAN ordering of - * multidimensional arrays. In python, the dimensions will not need - * to be specified (except for the "DATA_TYPE* ARGOUT_ARRAY1" - * typemaps). The IN_ARRAYs can be a numpy array or any sequence that - * can be converted to a numpy array of the specified type. The - * INPLACE_ARRAYs must be numpy arrays of the appropriate type. The - * ARGOUT_ARRAYs will be returned as new numpy arrays of the - * appropriate type. - * - * These typemaps can be applied to existing functions using the - * %apply directive. For example: - * - * %apply (double* IN_ARRAY1, int DIM1) {(double* series, int length)}; - * double prod(double* series, int length); - * - * %apply (int DIM1, int DIM2, double* INPLACE_ARRAY2) - * {(int rows, int cols, double* matrix )}; - * void floor(int rows, int cols, double* matrix, double f); - * - * %apply (double IN_ARRAY3[ANY][ANY][ANY]) - * {(double tensor[2][2][2] )}; - * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY]) - * {(double low[2][2][2] )}; - * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY]) - * {(double upp[2][2][2] )}; - * void luSplit(double tensor[2][2][2], - * double low[2][2][2], - * double upp[2][2][2] ); - * - * or directly with - * - * double prod(double* IN_ARRAY1, int DIM1); - * - * void floor(int DIM1, int DIM2, double* INPLACE_ARRAY2, double f); - * - * void luSplit(double IN_ARRAY3[ANY][ANY][ANY], - * double ARGOUT_ARRAY3[ANY][ANY][ANY], - * double ARGOUT_ARRAY3[ANY][ANY][ANY]); - */ - -%define %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) - -/************************/ -/* Input Array Typemaps */ -/************************/ - -/* Typemap suite for (DATA_TYPE IN_ARRAY1[ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY1[ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY1[ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = { $1_dim0 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY1[ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = { -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[1] = {-1}; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 1) || - !require_size(array, size, 1)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE IN_ARRAY2[ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY2[ANY][ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY2[ANY][ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { $1_dim0, $1_dim1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY2[ANY][ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} -%typemap(freearg) - (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[2] = { -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 2) || - !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(freearg) - (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} -%typemap(freearg) - (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* IN_ARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3) | !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} -%typemap(freearg) - (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* IN_FARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) -{ - $1 = is_array($input) || PySequence_Check($input); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) - (PyArrayObject* array=NULL, int is_new_object=0) -{ - npy_intp size[3] = { -1, -1, -1 }; - array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); - if (!array || !require_dimensions(array, 3) || - !require_size(array, size, 3) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} -%typemap(freearg) - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) -{ - if (is_new_object$argnum && array$argnum) - { Py_DECREF(array$argnum); } -} - -/***************************/ -/* In-Place Array Typemaps */ -/***************************/ - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY1[ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY1[ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY1[ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[1] = { $1_dim0 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_size(array, size, 1) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) - (PyArrayObject* array=NULL, int i=1) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = 1; - for (i=0; i < array_numdims(array); ++i) $2 *= array_size(array,i); -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) - (PyArrayObject* array=NULL, int i=0) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,1) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = 1; - for (i=0; i < array_numdims(array); ++i) $1 *= array_size(array,i); - $2 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[2] = { $1_dim0, $1_dim1 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_size(array, size, 2) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) || - !require_native(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) - || !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,2) || !require_contiguous(array) || - !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) - (PyArrayObject* array=NULL) -{ - npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_size(array, size, 3) || - !require_contiguous(array) || !require_native(array)) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) || - !require_native(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* INPLACE_ARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) - || !require_native(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} - -/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, - * DIM_TYPE DIM3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) || - !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); - $2 = (DIM_TYPE) array_size(array,0); - $3 = (DIM_TYPE) array_size(array,1); - $4 = (DIM_TYPE) array_size(array,2); -} - -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, - * DATA_TYPE* INPLACE_FARRAY3) - */ -%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) -{ - $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); -} -%typemap(in, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) - (PyArrayObject* array=NULL) -{ - array = obj_to_array_no_conversion($input, DATA_TYPECODE); - if (!array || !require_dimensions(array,3) || !require_contiguous(array) - || !require_native(array) || !require_fortran(array)) SWIG_fail; - $1 = (DIM_TYPE) array_size(array,0); - $2 = (DIM_TYPE) array_size(array,1); - $3 = (DIM_TYPE) array_size(array,2); - $4 = (DATA_TYPE*) array_data(array); -} - -/*************************/ -/* Argout Array Typemaps */ -/*************************/ - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY1[ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY1[ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[1] = { $1_dim0 }; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY1[ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - */ -%typemap(in,numinputs=1, - fragment="NumPy_Fragments") - (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) - (PyObject * array = NULL) -{ - npy_intp dims[1]; - if (!PyInt_Check($input)) - { - const char* typestring = pytype_string($input); - PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", - typestring); - SWIG_fail; - } - $2 = (DIM_TYPE) PyInt_AsLong($input); - dims[0] = (npy_intp) $2; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = (DATA_TYPE*) array_data(array); -} -%typemap(argout) - (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - */ -%typemap(in,numinputs=1, - fragment="NumPy_Fragments") - (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) - (PyObject * array = NULL) -{ - npy_intp dims[1]; - if (!PyInt_Check($input)) - { - const char* typestring = pytype_string($input); - PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", - typestring); - SWIG_fail; - } - $1 = (DIM_TYPE) PyInt_AsLong($input); - dims[0] = (npy_intp) $1; - array = PyArray_SimpleNew(1, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $2 = (DATA_TYPE*) array_data(array); -} -%typemap(argout) - (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[2] = { $1_dim0, $1_dim1 }; - array = PyArray_SimpleNew(2, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - */ -%typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") - (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) - (PyObject * array = NULL) -{ - npy_intp dims[3] = { $1_dim0, $1_dim1, $1_dim2 }; - array = PyArray_SimpleNew(3, dims, DATA_TYPECODE); - if (!array) SWIG_fail; - $1 = ($1_ltype) array_data(array); -} -%typemap(argout) - (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) -{ - $result = SWIG_Python_AppendOutput($result,array$argnum); -} - -/*****************************/ -/* Argoutview Array Typemaps */ -/*****************************/ - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 ) - (DATA_TYPE* data_temp , DIM_TYPE dim_temp) -{ - $1 = &data_temp; - $2 = &dim_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) -{ - npy_intp dims[1] = { *$2 }; - PyObject * array = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DATA_TYPE** ARGOUTVIEW_ARRAY1) - (DIM_TYPE dim_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim_temp; - $2 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) -{ - npy_intp dims[1] = { *$1 }; - PyObject * array = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$2)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 ) - (DATA_TYPE* data_temp , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) -{ - npy_intp dims[2] = { *$2, *$3 }; - PyObject * array = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_ARRAY2) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) -{ - npy_intp dims[2] = { *$1, *$2 }; - PyObject * array = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 ) - (DATA_TYPE* data_temp , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) -{ - npy_intp dims[2] = { *$2, *$3 }; - PyObject * obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_FARRAY2) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp ) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) -{ - npy_intp dims[2] = { *$1, *$2 }; - PyObject * obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, - DIM_TYPE* DIM3) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - (DATA_TYPE* data_temp, DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; - $4 = &dim3_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) -{ - npy_intp dims[3] = { *$2, *$3, *$4 }; - PyObject * array = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, - DATA_TYPE** ARGOUTVIEW_ARRAY3) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &dim3_temp; - $4 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) -{ - npy_intp dims[3] = { *$1, *$2, *$3 }; - PyObject * array = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$3)); - if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,array); -} - -/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, - DIM_TYPE* DIM3) - */ -%typemap(in,numinputs=0) - (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) - (DATA_TYPE* data_temp, DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp) -{ - $1 = &data_temp; - $2 = &dim1_temp; - $3 = &dim2_temp; - $4 = &dim3_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) -{ - npy_intp dims[3] = { *$2, *$3, *$4 }; - PyObject * obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, - DATA_TYPE** ARGOUTVIEW_FARRAY3) - */ -%typemap(in,numinputs=0) - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) - (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp) -{ - $1 = &dim1_temp; - $2 = &dim2_temp; - $3 = &dim3_temp; - $4 = &data_temp; -} -%typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") - (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) -{ - npy_intp dims[3] = { *$1, *$2, *$3 }; - PyObject * obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$3)); - PyArrayObject * array = (PyArrayObject*) obj; - if (!array || require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); -} - -%enddef /* %numpy_typemaps() macro */ -/* *************************************************************** */ - -/* Concrete instances of the %numpy_typemaps() macro: Each invocation - * below applies all of the typemaps above to the specified data type. - */ -%numpy_typemaps(signed char , NPY_BYTE , int) -%numpy_typemaps(unsigned char , NPY_UBYTE , int) -%numpy_typemaps(short , NPY_SHORT , int) -%numpy_typemaps(unsigned short , NPY_USHORT , int) -%numpy_typemaps(int , NPY_INT , int) -%numpy_typemaps(unsigned int , NPY_UINT , int) -%numpy_typemaps(long , NPY_LONG , int) -%numpy_typemaps(unsigned long , NPY_ULONG , int) -%numpy_typemaps(long long , NPY_LONGLONG , int) -%numpy_typemaps(unsigned long long, NPY_ULONGLONG, int) -%numpy_typemaps(float , NPY_FLOAT , int) -%numpy_typemaps(double , NPY_DOUBLE , int) - -/* *************************************************************** - * The follow macro expansion does not work, because C++ bool is 4 - * bytes and NPY_BOOL is 1 byte - * - * %numpy_typemaps(bool, NPY_BOOL, int) - */ - -/* *************************************************************** - * On my Mac, I get the following warning for this macro expansion: - * 'swig/python detected a memory leak of type 'long double *', no destructor found.' - * - * %numpy_typemaps(long double, NPY_LONGDOUBLE, int) - */ - -/* *************************************************************** - * Swig complains about a syntax error for the following macro - * expansions: - * - * %numpy_typemaps(complex float, NPY_CFLOAT , int) - * - * %numpy_typemaps(complex double, NPY_CDOUBLE, int) - * - * %numpy_typemaps(complex long double, NPY_CLONGDOUBLE, int) - */ - -#endif /* SWIGPYTHON */ diff --git a/numpy-1.6.2/doc/swig/pyfragments.swg b/numpy-1.6.2/doc/swig/pyfragments.swg deleted file mode 100644 index 0deaa61e1b..0000000000 --- a/numpy-1.6.2/doc/swig/pyfragments.swg +++ /dev/null @@ -1,174 +0,0 @@ -/*-*- C -*-*/ - -/**********************************************************************/ - -/* For numpy versions prior to 1.0, the names of certain data types - * are different than in later versions. This fragment provides macro - * substitutions that allow us to support old and new versions of - * numpy. - */ - -%fragment("NumPy_Backward_Compatibility", "header") -{ -/* Support older NumPy data type names - */ -%#if NDARRAY_VERSION < 0x01000000 -%#define NPY_BOOL PyArray_BOOL -%#define NPY_BYTE PyArray_BYTE -%#define NPY_UBYTE PyArray_UBYTE -%#define NPY_SHORT PyArray_SHORT -%#define NPY_USHORT PyArray_USHORT -%#define NPY_INT PyArray_INT -%#define NPY_UINT PyArray_UINT -%#define NPY_LONG PyArray_LONG -%#define NPY_ULONG PyArray_ULONG -%#define NPY_LONGLONG PyArray_LONGLONG -%#define NPY_ULONGLONG PyArray_ULONGLONG -%#define NPY_FLOAT PyArray_FLOAT -%#define NPY_DOUBLE PyArray_DOUBLE -%#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -%#define NPY_CFLOAT PyArray_CFLOAT -%#define NPY_CDOUBLE PyArray_CDOUBLE -%#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -%#define NPY_OBJECT PyArray_OBJECT -%#define NPY_STRING PyArray_STRING -%#define NPY_UNICODE PyArray_UNICODE -%#define NPY_VOID PyArray_VOID -%#define NPY_NTYPES PyArray_NTYPES -%#define NPY_NOTYPE PyArray_NOTYPE -%#define NPY_CHAR PyArray_CHAR -%#define NPY_USERDEF PyArray_USERDEF -%#define npy_intp intp - -%#define NPY_MAX_BYTE MAX_BYTE -%#define NPY_MIN_BYTE MIN_BYTE -%#define NPY_MAX_UBYTE MAX_UBYTE -%#define NPY_MAX_SHORT MAX_SHORT -%#define NPY_MIN_SHORT MIN_SHORT -%#define NPY_MAX_USHORT MAX_USHORT -%#define NPY_MAX_INT MAX_INT -%#define NPY_MIN_INT MIN_INT -%#define NPY_MAX_UINT MAX_UINT -%#define NPY_MAX_LONG MAX_LONG -%#define NPY_MIN_LONG MIN_LONG -%#define NPY_MAX_ULONG MAX_ULONG -%#define NPY_MAX_LONGLONG MAX_LONGLONG -%#define NPY_MIN_LONGLONG MIN_LONGLONG -%#define NPY_MAX_ULONGLONG MAX_ULONGLONG -%#define NPY_MAX_INTP MAX_INTP -%#define NPY_MIN_INTP MIN_INTP - -%#define NPY_FARRAY FARRAY -%#define NPY_F_CONTIGUOUS F_CONTIGUOUS -%#endif -} - -/**********************************************************************/ - -/* Override the SWIG_AsVal_frag(long) fragment so that it also checks - * for numpy scalar array types. The code through the %#endif is - * essentially cut-and-paste from pyprimtype.swg - */ - -%fragment(SWIG_AsVal_frag(long), "header", - fragment="SWIG_CanCastAsInteger", - fragment="NumPy_Backward_Compatibility") -{ - SWIGINTERN int - SWIG_AsVal_dec(long)(PyObject * obj, long * val) - { - static PyArray_Descr * longDescr = PyArray_DescrNewFromType(NPY_LONG); - if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -%#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - long v = PyInt_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal(double)(obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { - if (val) *val = (long)(d); - return res; - } - } - } -%#endif - if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError; - PyArray_CastScalarToCtype(obj, (void*)val, longDescr); - return SWIG_OK; - } -} - - -/* Override the SWIG_AsVal_frag(unsigned long) fragment so that it - * also checks for numpy scalar array types. The code through the - * %#endif is essentially cut-and-paste from pyprimtype.swg - */ - -%fragment(SWIG_AsVal_frag(unsigned long),"header", - fragment="SWIG_CanCastAsInteger", - fragment="NumPy_Backward_Compatibility") -{ - SWIGINTERN int - SWIG_AsVal_dec(unsigned long)(PyObject *obj, unsigned long *val) - { - static PyArray_Descr * ulongDescr = PyArray_DescrNewFromType(NPY_ULONG); - if (PyInt_Check(obj)) { - long v = PyInt_AsLong(obj); - if (v >= 0) { - if (val) *val = v; - return SWIG_OK; - } else { - return SWIG_OverflowError; - } - } else if (PyLong_Check(obj)) { - unsigned long v = PyLong_AsUnsignedLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -%#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - unsigned long v = PyLong_AsUnsignedLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal(double)(obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, 0, ULONG_MAX)) { - if (val) *val = (unsigned long)(d); - return res; - } - } - } -%#endif - if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError; - PyArray_CastScalarToCtype(obj, (void*)val, ulongDescr); - return SWIG_OK; - } -} diff --git a/numpy-1.6.2/doc/swig/test/Array.i b/numpy-1.6.2/doc/swig/test/Array.i deleted file mode 100644 index d56dd2d1c6..0000000000 --- a/numpy-1.6.2/doc/swig/test/Array.i +++ /dev/null @@ -1,107 +0,0 @@ -// -*- c++ -*- - -%module Array - -%{ -#define SWIG_FILE_WITH_INIT -#include "Array1.h" -#include "Array2.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - - // Get the STL typemaps -%include "stl.i" - -// Handle standard exceptions -%include "exception.i" -%exception -{ - try - { - $action - } - catch (const std::invalid_argument& e) - { - SWIG_exception(SWIG_ValueError, e.what()); - } - catch (const std::out_of_range& e) - { - SWIG_exception(SWIG_IndexError, e.what()); - } -} -%init %{ - import_array(); -%} - -// Global ignores -%ignore *::operator=; -%ignore *::operator[]; - -// Apply the 1D NumPy typemaps -%apply (int DIM1 , long* INPLACE_ARRAY1) - {(int length, long* data )}; -%apply (long** ARGOUTVIEW_ARRAY1, int* DIM1 ) - {(long** data , int* length)}; - -// Apply the 2D NumPy typemaps -%apply (int DIM1 , int DIM2 , long* INPLACE_ARRAY2) - {(int nrows, int ncols, long* data )}; -%apply (int* DIM1 , int* DIM2 , long** ARGOUTVIEW_ARRAY2) - {(int* nrows, int* ncols, long** data )}; -// Note: the %apply for INPLACE_ARRAY2 above gets successfully applied -// to the constructor Array2(int nrows, int ncols, long* data), but -// does not get applied to the method Array2::resize(int nrows, int -// ncols, long* data). I have no idea why. For this reason the test -// for Apply2.resize(numpy.ndarray) in testArray.py is commented out. - -// Array1 support -%include "Array1.h" -%extend Array1 -{ - void __setitem__(int i, long v) - { - self->operator[](i) = v; - } - - long __getitem__(int i) - { - return self->operator[](i); - } - - int __len__() - { - return self->length(); - } - - std::string __str__() - { - return self->asString(); - } -} - -// Array2 support -%include "Array2.h" -%extend Array2 -{ - void __setitem__(int i, Array1 & v) - { - self->operator[](i) = v; - } - - Array1 & __getitem__(int i) - { - return self->operator[](i); - } - - int __len__() - { - return self->nrows() * self->ncols(); - } - - std::string __str__() - { - return self->asString(); - } -} diff --git a/numpy-1.6.2/doc/swig/test/Array1.cxx b/numpy-1.6.2/doc/swig/test/Array1.cxx deleted file mode 100644 index 0c09e02f92..0000000000 --- a/numpy-1.6.2/doc/swig/test/Array1.cxx +++ /dev/null @@ -1,131 +0,0 @@ -#include "Array1.h" -#include -#include - -// Default/length/array constructor -Array1::Array1(int length, long* data) : - _ownData(false), _length(0), _buffer(0) -{ - resize(length, data); -} - -// Copy constructor -Array1::Array1(const Array1 & source) : - _length(source._length) -{ - allocateMemory(); - *this = source; -} - -// Destructor -Array1::~Array1() -{ - deallocateMemory(); -} - -// Assignment operator -Array1 & Array1::operator=(const Array1 & source) -{ - int len = _length < source._length ? _length : source._length; - for (int i=0; i < len; ++i) - { - (*this)[i] = source[i]; - } - return *this; -} - -// Equals operator -bool Array1::operator==(const Array1 & other) const -{ - if (_length != other._length) return false; - for (int i=0; i < _length; ++i) - { - if ((*this)[i] != other[i]) return false; - } - return true; -} - -// Length accessor -int Array1::length() const -{ - return _length; -} - -// Resize array -void Array1::resize(int length, long* data) -{ - if (length < 0) throw std::invalid_argument("Array1 length less than 0"); - if (length == _length) return; - deallocateMemory(); - _length = length; - if (!data) - { - allocateMemory(); - } - else - { - _ownData = false; - _buffer = data; - } -} - -// Set item accessor -long & Array1::operator[](int i) -{ - if (i < 0 || i >= _length) throw std::out_of_range("Array1 index out of range"); - return _buffer[i]; -} - -// Get item accessor -const long & Array1::operator[](int i) const -{ - if (i < 0 || i >= _length) throw std::out_of_range("Array1 index out of range"); - return _buffer[i]; -} - -// String output -std::string Array1::asString() const -{ - std::stringstream result; - result << "["; - for (int i=0; i < _length; ++i) - { - result << " " << _buffer[i]; - if (i < _length-1) result << ","; - } - result << " ]"; - return result.str(); -} - -// Get view -void Array1::view(long** data, int* length) const -{ - *data = _buffer; - *length = _length; -} - -// Private methods - void Array1::allocateMemory() - { - if (_length == 0) - { - _ownData = false; - _buffer = 0; - } - else - { - _ownData = true; - _buffer = new long[_length]; - } - } - - void Array1::deallocateMemory() - { - if (_ownData && _length && _buffer) - { - delete [] _buffer; - } - _ownData = false; - _length = 0; - _buffer = 0; - } diff --git a/numpy-1.6.2/doc/swig/test/Array1.h b/numpy-1.6.2/doc/swig/test/Array1.h deleted file mode 100644 index 754c248fc2..0000000000 --- a/numpy-1.6.2/doc/swig/test/Array1.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef ARRAY1_H -#define ARRAY1_H - -#include -#include - -class Array1 -{ -public: - - // Default/length/array constructor - Array1(int length = 0, long* data = 0); - - // Copy constructor - Array1(const Array1 & source); - - // Destructor - ~Array1(); - - // Assignment operator - Array1 & operator=(const Array1 & source); - - // Equals operator - bool operator==(const Array1 & other) const; - - // Length accessor - int length() const; - - // Resize array - void resize(int length, long* data = 0); - - // Set item accessor - long & operator[](int i); - - // Get item accessor - const long & operator[](int i) const; - - // String output - std::string asString() const; - - // Get view - void view(long** data, int* length) const; - -private: - // Members - bool _ownData; - int _length; - long * _buffer; - - // Methods - void allocateMemory(); - void deallocateMemory(); -}; - -#endif diff --git a/numpy-1.6.2/doc/swig/test/Array2.cxx b/numpy-1.6.2/doc/swig/test/Array2.cxx deleted file mode 100644 index e3558f7861..0000000000 --- a/numpy-1.6.2/doc/swig/test/Array2.cxx +++ /dev/null @@ -1,168 +0,0 @@ -#include "Array2.h" -#include - -// Default constructor -Array2::Array2() : - _ownData(false), _nrows(0), _ncols(), _buffer(0), _rows(0) -{ } - -// Size/array constructor -Array2::Array2(int nrows, int ncols, long* data) : - _ownData(false), _nrows(0), _ncols(), _buffer(0), _rows(0) -{ - resize(nrows, ncols, data); -} - -// Copy constructor -Array2::Array2(const Array2 & source) : - _nrows(source._nrows), _ncols(source._ncols) -{ - _ownData = true; - allocateMemory(); - *this = source; -} - -// Destructor -Array2::~Array2() -{ - deallocateMemory(); -} - -// Assignment operator -Array2 & Array2::operator=(const Array2 & source) -{ - int nrows = _nrows < source._nrows ? _nrows : source._nrows; - int ncols = _ncols < source._ncols ? _ncols : source._ncols; - for (int i=0; i < nrows; ++i) - { - for (int j=0; j < ncols; ++j) - { - (*this)[i][j] = source[i][j]; - } - } - return *this; -} - -// Equals operator -bool Array2::operator==(const Array2 & other) const -{ - if (_nrows != other._nrows) return false; - if (_ncols != other._ncols) return false; - for (int i=0; i < _nrows; ++i) - { - for (int j=0; j < _ncols; ++j) - { - if ((*this)[i][j] != other[i][j]) return false; - } - } - return true; -} - -// Length accessors -int Array2::nrows() const -{ - return _nrows; -} - -int Array2::ncols() const -{ - return _ncols; -} - -// Resize array -void Array2::resize(int nrows, int ncols, long* data) -{ - if (nrows < 0) throw std::invalid_argument("Array2 nrows less than 0"); - if (ncols < 0) throw std::invalid_argument("Array2 ncols less than 0"); - if (nrows == _nrows && ncols == _ncols) return; - deallocateMemory(); - _nrows = nrows; - _ncols = ncols; - if (!data) - { - allocateMemory(); - } - else - { - _ownData = false; - _buffer = data; - allocateRows(); - } -} - -// Set item accessor -Array1 & Array2::operator[](int i) -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Array2 row index out of range"); - return _rows[i]; -} - -// Get item accessor -const Array1 & Array2::operator[](int i) const -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Array2 row index out of range"); - return _rows[i]; -} - -// String output -std::string Array2::asString() const -{ - std::stringstream result; - result << "[ "; - for (int i=0; i < _nrows; ++i) - { - if (i > 0) result << " "; - result << (*this)[i].asString(); - if (i < _nrows-1) result << "," << std::endl; - } - result << " ]" << std::endl; - return result.str(); -} - -// Get view -void Array2::view(int* nrows, int* ncols, long** data) const -{ - *nrows = _nrows; - *ncols = _ncols; - *data = _buffer; -} - -// Private methods -void Array2::allocateMemory() -{ - if (_nrows * _ncols == 0) - { - _ownData = false; - _buffer = 0; - _rows = 0; - } - else - { - _ownData = true; - _buffer = new long[_nrows*_ncols]; - allocateRows(); - } -} - -void Array2::allocateRows() -{ - _rows = new Array1[_nrows]; - for (int i=0; i < _nrows; ++i) - { - _rows[i].resize(_ncols, &_buffer[i*_ncols]); - } -} - -void Array2::deallocateMemory() -{ - if (_ownData && _nrows*_ncols && _buffer) - { - delete [] _rows; - delete [] _buffer; - } - _ownData = false; - _nrows = 0; - _ncols = 0; - _buffer = 0; - _rows = 0; -} diff --git a/numpy-1.6.2/doc/swig/test/Array2.h b/numpy-1.6.2/doc/swig/test/Array2.h deleted file mode 100644 index a6e5bfc308..0000000000 --- a/numpy-1.6.2/doc/swig/test/Array2.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef ARRAY2_H -#define ARRAY2_H - -#include "Array1.h" -#include -#include - -class Array2 -{ -public: - - // Default constructor - Array2(); - - // Size/array constructor - Array2(int nrows, int ncols, long* data=0); - - // Copy constructor - Array2(const Array2 & source); - - // Destructor - ~Array2(); - - // Assignment operator - Array2 & operator=(const Array2 & source); - - // Equals operator - bool operator==(const Array2 & other) const; - - // Length accessors - int nrows() const; - int ncols() const; - - // Resize array - void resize(int ncols, int nrows, long* data=0); - - // Set item accessor - Array1 & operator[](int i); - - // Get item accessor - const Array1 & operator[](int i) const; - - // String output - std::string asString() const; - - // Get view - void view(int* nrows, int* ncols, long** data) const; - -private: - // Members - bool _ownData; - int _nrows; - int _ncols; - long * _buffer; - Array1 * _rows; - - // Methods - void allocateMemory(); - void allocateRows(); - void deallocateMemory(); -}; - -#endif diff --git a/numpy-1.6.2/doc/swig/test/Farray.cxx b/numpy-1.6.2/doc/swig/test/Farray.cxx deleted file mode 100644 index 3983c333be..0000000000 --- a/numpy-1.6.2/doc/swig/test/Farray.cxx +++ /dev/null @@ -1,122 +0,0 @@ -#include "Farray.h" -#include - -// Size constructor -Farray::Farray(int nrows, int ncols) : - _nrows(nrows), _ncols(ncols), _buffer(0) -{ - allocateMemory(); -} - -// Copy constructor -Farray::Farray(const Farray & source) : - _nrows(source._nrows), _ncols(source._ncols) -{ - allocateMemory(); - *this = source; -} - -// Destructor -Farray::~Farray() -{ - delete [] _buffer; -} - -// Assignment operator -Farray & Farray::operator=(const Farray & source) -{ - int nrows = _nrows < source._nrows ? _nrows : source._nrows; - int ncols = _ncols < source._ncols ? _ncols : source._ncols; - for (int i=0; i < nrows; ++i) - { - for (int j=0; j < ncols; ++j) - { - (*this)(i,j) = source(i,j); - } - } - return *this; -} - -// Equals operator -bool Farray::operator==(const Farray & other) const -{ - if (_nrows != other._nrows) return false; - if (_ncols != other._ncols) return false; - for (int i=0; i < _nrows; ++i) - { - for (int j=0; j < _ncols; ++j) - { - if ((*this)(i,j) != other(i,j)) return false; - } - } - return true; -} - -// Length accessors -int Farray::nrows() const -{ - return _nrows; -} - -int Farray::ncols() const -{ - return _ncols; -} - -// Set item accessor -long & Farray::operator()(int i, int j) -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Farray row index out of range"); - if (j < 0 || j > _ncols) throw std::out_of_range("Farray col index out of range"); - return _buffer[offset(i,j)]; -} - -// Get item accessor -const long & Farray::operator()(int i, int j) const -{ - if (i < 0 || i > _nrows) throw std::out_of_range("Farray row index out of range"); - if (j < 0 || j > _ncols) throw std::out_of_range("Farray col index out of range"); - return _buffer[offset(i,j)]; -} - -// String output -std::string Farray::asString() const -{ - std::stringstream result; - result << "[ "; - for (int i=0; i < _nrows; ++i) - { - if (i > 0) result << " "; - result << "["; - for (int j=0; j < _ncols; ++j) - { - result << " " << (*this)(i,j); - if (j < _ncols-1) result << ","; - } - result << " ]"; - if (i < _nrows-1) result << "," << std::endl; - } - result << " ]" << std::endl; - return result.str(); -} - -// Get view -void Farray::view(int* nrows, int* ncols, long** data) const -{ - *nrows = _nrows; - *ncols = _ncols; - *data = _buffer; -} - -// Private methods -void Farray::allocateMemory() -{ - if (_nrows <= 0) throw std::invalid_argument("Farray nrows <= 0"); - if (_ncols <= 0) throw std::invalid_argument("Farray ncols <= 0"); - _buffer = new long[_nrows*_ncols]; -} - -inline int Farray::offset(int i, int j) const -{ - return i + j * _nrows; -} diff --git a/numpy-1.6.2/doc/swig/test/Farray.h b/numpy-1.6.2/doc/swig/test/Farray.h deleted file mode 100644 index 4199a287ce..0000000000 --- a/numpy-1.6.2/doc/swig/test/Farray.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef FARRAY_H -#define FARRAY_H - -#include -#include - -class Farray -{ -public: - - // Size constructor - Farray(int nrows, int ncols); - - // Copy constructor - Farray(const Farray & source); - - // Destructor - ~Farray(); - - // Assignment operator - Farray & operator=(const Farray & source); - - // Equals operator - bool operator==(const Farray & other) const; - - // Length accessors - int nrows() const; - int ncols() const; - - // Set item accessor - long & operator()(int i, int j); - - // Get item accessor - const long & operator()(int i, int j) const; - - // String output - std::string asString() const; - - // Get view - void view(int* nrows, int* ncols, long** data) const; - -private: - // Members - int _nrows; - int _ncols; - long * _buffer; - - // Default constructor: not implemented - Farray(); - - // Methods - void allocateMemory(); - int offset(int i, int j) const; -}; - -#endif diff --git a/numpy-1.6.2/doc/swig/test/Farray.i b/numpy-1.6.2/doc/swig/test/Farray.i deleted file mode 100644 index 25f6cd025c..0000000000 --- a/numpy-1.6.2/doc/swig/test/Farray.i +++ /dev/null @@ -1,73 +0,0 @@ -// -*- c++ -*- - -%module Farray - -%{ -#define SWIG_FILE_WITH_INIT -#include "Farray.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - - // Get the STL typemaps -%include "stl.i" - -// Handle standard exceptions -%include "exception.i" -%exception -{ - try - { - $action - } - catch (const std::invalid_argument& e) - { - SWIG_exception(SWIG_ValueError, e.what()); - } - catch (const std::out_of_range& e) - { - SWIG_exception(SWIG_IndexError, e.what()); - } -} -%init %{ - import_array(); -%} - -// Global ignores -%ignore *::operator=; -%ignore *::operator(); - -// Apply the 2D NumPy typemaps -%apply (int* DIM1 , int* DIM2 , long** ARGOUTVIEW_FARRAY2) - {(int* nrows, int* ncols, long** data )}; - -// Farray support -%include "Farray.h" -%extend Farray -{ - PyObject * __setitem__(PyObject* index, long v) - { - int i, j; - if (!PyArg_ParseTuple(index, "ii:Farray___setitem__",&i,&j)) return NULL; - self->operator()(i,j) = v; - return Py_BuildValue(""); - } - - PyObject * __getitem__(PyObject * index) - { - int i, j; - if (!PyArg_ParseTuple(index, "ii:Farray___getitem__",&i,&j)) return NULL; - return SWIG_From_long(self->operator()(i,j)); - } - - int __len__() - { - return self->nrows() * self->ncols(); - } - - std::string __str__() - { - return self->asString(); - } -} diff --git a/numpy-1.6.2/doc/swig/test/Fortran.cxx b/numpy-1.6.2/doc/swig/test/Fortran.cxx deleted file mode 100644 index 475d21ddc4..0000000000 --- a/numpy-1.6.2/doc/swig/test/Fortran.cxx +++ /dev/null @@ -1,24 +0,0 @@ -#include -#include -#include -#include "Fortran.h" - -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## SecondElement(TYPE * matrix, int rows, int cols) { \ - TYPE result = matrix[1]; \ - return result; \ -} \ - -TEST_FUNCS(signed char , schar ) -TEST_FUNCS(unsigned char , uchar ) -TEST_FUNCS(short , short ) -TEST_FUNCS(unsigned short , ushort ) -TEST_FUNCS(int , int ) -TEST_FUNCS(unsigned int , uint ) -TEST_FUNCS(long , long ) -TEST_FUNCS(unsigned long , ulong ) -TEST_FUNCS(long long , longLong ) -TEST_FUNCS(unsigned long long, ulongLong) -TEST_FUNCS(float , float ) -TEST_FUNCS(double , double ) diff --git a/numpy-1.6.2/doc/swig/test/Fortran.h b/numpy-1.6.2/doc/swig/test/Fortran.h deleted file mode 100644 index c243bb50fd..0000000000 --- a/numpy-1.6.2/doc/swig/test/Fortran.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef FORTRAN_H -#define FORTRAN_H - -#define TEST_FUNC_PROTOS(TYPE, SNAME) \ -\ -TYPE SNAME ## SecondElement( TYPE * matrix, int rows, int cols); \ - -TEST_FUNC_PROTOS(signed char , schar ) -TEST_FUNC_PROTOS(unsigned char , uchar ) -TEST_FUNC_PROTOS(short , short ) -TEST_FUNC_PROTOS(unsigned short , ushort ) -TEST_FUNC_PROTOS(int , int ) -TEST_FUNC_PROTOS(unsigned int , uint ) -TEST_FUNC_PROTOS(long , long ) -TEST_FUNC_PROTOS(unsigned long , ulong ) -TEST_FUNC_PROTOS(long long , longLong ) -TEST_FUNC_PROTOS(unsigned long long, ulongLong) -TEST_FUNC_PROTOS(float , float ) -TEST_FUNC_PROTOS(double , double ) - -#endif diff --git a/numpy-1.6.2/doc/swig/test/Fortran.i b/numpy-1.6.2/doc/swig/test/Fortran.i deleted file mode 100644 index 131790dd66..0000000000 --- a/numpy-1.6.2/doc/swig/test/Fortran.i +++ /dev/null @@ -1,36 +0,0 @@ -// -*- c++ -*- -%module Fortran - -%{ -#define SWIG_FILE_WITH_INIT -#include "Fortran.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - -%init %{ - import_array(); -%} - -%define %apply_numpy_typemaps(TYPE) - -%apply (TYPE* IN_FARRAY2, int DIM1, int DIM2) {(TYPE* matrix, int rows, int cols)}; - -%enddef /* %apply_numpy_typemaps() macro */ - -%apply_numpy_typemaps(signed char ) -%apply_numpy_typemaps(unsigned char ) -%apply_numpy_typemaps(short ) -%apply_numpy_typemaps(unsigned short ) -%apply_numpy_typemaps(int ) -%apply_numpy_typemaps(unsigned int ) -%apply_numpy_typemaps(long ) -%apply_numpy_typemaps(unsigned long ) -%apply_numpy_typemaps(long long ) -%apply_numpy_typemaps(unsigned long long) -%apply_numpy_typemaps(float ) -%apply_numpy_typemaps(double ) - -// Include the header file to be wrapped -%include "Fortran.h" diff --git a/numpy-1.6.2/doc/swig/test/Makefile b/numpy-1.6.2/doc/swig/test/Makefile deleted file mode 100644 index 5360b1ced5..0000000000 --- a/numpy-1.6.2/doc/swig/test/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# SWIG -INTERFACES = Array.i Farray.i Vector.i Matrix.i Tensor.i Fortran.i -WRAPPERS = $(INTERFACES:.i=_wrap.cxx) -PROXIES = $(INTERFACES:.i=.py ) - -# Default target: build the tests -.PHONY : all -all: $(WRAPPERS) Array1.cxx Array1.h Farray.cxx Farray.h Vector.cxx Vector.h \ - Matrix.cxx Matrix.h Tensor.cxx Tensor.h Fortran.h Fortran.cxx - ./setup.py build_ext -i - -# Test target: run the tests -.PHONY : test -test: all - python testVector.py - python testMatrix.py - python testTensor.py - python testArray.py - python testFarray.py - python testFortran.py - -# Rule: %.i -> %_wrap.cxx -%_wrap.cxx: %.i %.h ../numpy.i - swig -c++ -python $< -%_wrap.cxx: %.i %1.h %2.h ../numpy.i - swig -c++ -python $< - -# Clean target -.PHONY : clean -clean: - $(RM) -r build - $(RM) *.so - $(RM) $(WRAPPERS) - $(RM) $(PROXIES) diff --git a/numpy-1.6.2/doc/swig/test/Matrix.cxx b/numpy-1.6.2/doc/swig/test/Matrix.cxx deleted file mode 100644 index b953d70177..0000000000 --- a/numpy-1.6.2/doc/swig/test/Matrix.cxx +++ /dev/null @@ -1,112 +0,0 @@ -#include -#include -#include -#include "Matrix.h" - -// The following macro defines a family of functions that work with 2D -// arrays with the forms -// -// TYPE SNAMEDet( TYPE matrix[2][2]); -// TYPE SNAMEMax( TYPE * matrix, int rows, int cols); -// TYPE SNAMEMin( int rows, int cols, TYPE * matrix); -// void SNAMEScale( TYPE matrix[3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, TYPE floor); -// void SNAMECeil( int rows, int cols, TYPE * array, TYPE ceil); -// void SNAMELUSplit(TYPE in[3][3], TYPE lower[3][3], TYPE upper[3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 2D input arrays, hard-coded length -// * 2D input arrays -// * 2D input arrays, data last -// * 2D in-place arrays, hard-coded lengths -// * 2D in-place arrays -// * 2D in-place arrays, data last -// * 2D argout arrays, hard-coded length -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Det(TYPE matrix[2][2]) { \ - return matrix[0][0]*matrix[1][1] - matrix[0][1]*matrix[1][0]; \ -} \ -\ -TYPE SNAME ## Max(TYPE * matrix, int rows, int cols) { \ - int i, j, index; \ - TYPE result = matrix[0]; \ - for (j=0; j result) result = matrix[index]; \ - } \ - } \ - return result; \ -} \ -\ -TYPE SNAME ## Min(int rows, int cols, TYPE * matrix) { \ - int i, j, index; \ - TYPE result = matrix[0]; \ - for (j=0; j ceil) array[index] = ceil; \ - } \ - } \ -} \ -\ -void SNAME ## LUSplit(TYPE matrix[3][3], TYPE lower[3][3], TYPE upper[3][3]) { \ - for (int i=0; i<3; ++i) { \ - for (int j=0; j<3; ++j) { \ - if (i >= j) { \ - lower[i][j] = matrix[i][j]; \ - upper[i][j] = 0; \ - } else { \ - lower[i][j] = 0; \ - upper[i][j] = matrix[i][j]; \ - } \ - } \ - } \ -} - -TEST_FUNCS(signed char , schar ) -TEST_FUNCS(unsigned char , uchar ) -TEST_FUNCS(short , short ) -TEST_FUNCS(unsigned short , ushort ) -TEST_FUNCS(int , int ) -TEST_FUNCS(unsigned int , uint ) -TEST_FUNCS(long , long ) -TEST_FUNCS(unsigned long , ulong ) -TEST_FUNCS(long long , longLong ) -TEST_FUNCS(unsigned long long, ulongLong) -TEST_FUNCS(float , float ) -TEST_FUNCS(double , double ) diff --git a/numpy-1.6.2/doc/swig/test/Matrix.h b/numpy-1.6.2/doc/swig/test/Matrix.h deleted file mode 100644 index f37836cc44..0000000000 --- a/numpy-1.6.2/doc/swig/test/Matrix.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef MATRIX_H -#define MATRIX_H - -// The following macro defines the prototypes for a family of -// functions that work with 2D arrays with the forms -// -// TYPE SNAMEDet( TYPE matrix[2][2]); -// TYPE SNAMEMax( TYPE * matrix, int rows, int cols); -// TYPE SNAMEMin( int rows, int cols, TYPE * matrix); -// void SNAMEScale( TYPE array[3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, TYPE floor); -// void SNAMECeil( int rows, int cols, TYPE * array, TYPE ceil ); -// void SNAMELUSplit(TYPE in[3][3], TYPE lower[3][3], TYPE upper[3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 2D input arrays, hard-coded lengths -// * 2D input arrays -// * 2D input arrays, data last -// * 2D in-place arrays, hard-coded lengths -// * 2D in-place arrays -// * 2D in-place arrays, data last -// * 2D argout arrays, hard-coded length -// -#define TEST_FUNC_PROTOS(TYPE, SNAME) \ -\ -TYPE SNAME ## Det( TYPE matrix[2][2]); \ -TYPE SNAME ## Max( TYPE * matrix, int rows, int cols); \ -TYPE SNAME ## Min( int rows, int cols, TYPE * matrix); \ -void SNAME ## Scale( TYPE array[3][3], TYPE val); \ -void SNAME ## Floor( TYPE * array, int rows, int cols, TYPE floor); \ -void SNAME ## Ceil( int rows, int cols, TYPE * array, TYPE ceil ); \ -void SNAME ## LUSplit(TYPE matrix[3][3], TYPE lower[3][3], TYPE upper[3][3]); - -TEST_FUNC_PROTOS(signed char , schar ) -TEST_FUNC_PROTOS(unsigned char , uchar ) -TEST_FUNC_PROTOS(short , short ) -TEST_FUNC_PROTOS(unsigned short , ushort ) -TEST_FUNC_PROTOS(int , int ) -TEST_FUNC_PROTOS(unsigned int , uint ) -TEST_FUNC_PROTOS(long , long ) -TEST_FUNC_PROTOS(unsigned long , ulong ) -TEST_FUNC_PROTOS(long long , longLong ) -TEST_FUNC_PROTOS(unsigned long long, ulongLong) -TEST_FUNC_PROTOS(float , float ) -TEST_FUNC_PROTOS(double , double ) - -#endif diff --git a/numpy-1.6.2/doc/swig/test/Matrix.i b/numpy-1.6.2/doc/swig/test/Matrix.i deleted file mode 100644 index e721397a04..0000000000 --- a/numpy-1.6.2/doc/swig/test/Matrix.i +++ /dev/null @@ -1,45 +0,0 @@ -// -*- c++ -*- -%module Matrix - -%{ -#define SWIG_FILE_WITH_INIT -#include "Matrix.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - -%init %{ - import_array(); -%} - -%define %apply_numpy_typemaps(TYPE) - -%apply (TYPE IN_ARRAY2[ANY][ANY]) {(TYPE matrix[ANY][ANY])}; -%apply (TYPE* IN_ARRAY2, int DIM1, int DIM2) {(TYPE* matrix, int rows, int cols)}; -%apply (int DIM1, int DIM2, TYPE* IN_ARRAY2) {(int rows, int cols, TYPE* matrix)}; - -%apply (TYPE INPLACE_ARRAY2[ANY][ANY]) {(TYPE array[3][3])}; -%apply (TYPE* INPLACE_ARRAY2, int DIM1, int DIM2) {(TYPE* array, int rows, int cols)}; -%apply (int DIM1, int DIM2, TYPE* INPLACE_ARRAY2) {(int rows, int cols, TYPE* array)}; - -%apply (TYPE ARGOUT_ARRAY2[ANY][ANY]) {(TYPE lower[3][3])}; -%apply (TYPE ARGOUT_ARRAY2[ANY][ANY]) {(TYPE upper[3][3])}; - -%enddef /* %apply_numpy_typemaps() macro */ - -%apply_numpy_typemaps(signed char ) -%apply_numpy_typemaps(unsigned char ) -%apply_numpy_typemaps(short ) -%apply_numpy_typemaps(unsigned short ) -%apply_numpy_typemaps(int ) -%apply_numpy_typemaps(unsigned int ) -%apply_numpy_typemaps(long ) -%apply_numpy_typemaps(unsigned long ) -%apply_numpy_typemaps(long long ) -%apply_numpy_typemaps(unsigned long long) -%apply_numpy_typemaps(float ) -%apply_numpy_typemaps(double ) - -// Include the header file to be wrapped -%include "Matrix.h" diff --git a/numpy-1.6.2/doc/swig/test/Tensor.cxx b/numpy-1.6.2/doc/swig/test/Tensor.cxx deleted file mode 100644 index dce5952910..0000000000 --- a/numpy-1.6.2/doc/swig/test/Tensor.cxx +++ /dev/null @@ -1,131 +0,0 @@ -#include -#include -#include -#include "Tensor.h" - -// The following macro defines a family of functions that work with 3D -// arrays with the forms -// -// TYPE SNAMENorm( TYPE tensor[2][2][2]); -// TYPE SNAMEMax( TYPE * tensor, int rows, int cols, int num); -// TYPE SNAMEMin( int rows, int cols, int num, TYPE * tensor); -// void SNAMEScale( TYPE tensor[3][3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, int num, TYPE floor); -// void SNAMECeil( int rows, int cols, int num, TYPE * array, TYPE ceil); -// void SNAMELUSplit(TYPE in[2][2][2], TYPE lower[2][2][2], TYPE upper[2][2][2]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 3D input arrays, hard-coded length -// * 3D input arrays -// * 3D input arrays, data last -// * 3D in-place arrays, hard-coded lengths -// * 3D in-place arrays -// * 3D in-place arrays, data last -// * 3D argout arrays, hard-coded length -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Norm(TYPE tensor[2][2][2]) { \ - double result = 0; \ - for (int k=0; k<2; ++k) \ - for (int j=0; j<2; ++j) \ - for (int i=0; i<2; ++i) \ - result += tensor[i][j][k] * tensor[i][j][k]; \ - return (TYPE)sqrt(result/8); \ -} \ -\ -TYPE SNAME ## Max(TYPE * tensor, int rows, int cols, int num) { \ - int i, j, k, index; \ - TYPE result = tensor[0]; \ - for (k=0; k result) result = tensor[index]; \ - } \ - } \ - } \ - return result; \ -} \ -\ -TYPE SNAME ## Min(int rows, int cols, int num, TYPE * tensor) { \ - int i, j, k, index; \ - TYPE result = tensor[0]; \ - for (k=0; k ceil) array[index] = ceil; \ - } \ - } \ - } \ -} \ -\ -void SNAME ## LUSplit(TYPE tensor[2][2][2], TYPE lower[2][2][2], \ - TYPE upper[2][2][2]) { \ - int sum; \ - for (int k=0; k<2; ++k) { \ - for (int j=0; j<2; ++j) { \ - for (int i=0; i<2; ++i) { \ - sum = i + j + k; \ - if (sum < 2) { \ - lower[i][j][k] = tensor[i][j][k]; \ - upper[i][j][k] = 0; \ - } else { \ - upper[i][j][k] = tensor[i][j][k]; \ - lower[i][j][k] = 0; \ - } \ - } \ - } \ - } \ -} - -TEST_FUNCS(signed char , schar ) -TEST_FUNCS(unsigned char , uchar ) -TEST_FUNCS(short , short ) -TEST_FUNCS(unsigned short , ushort ) -TEST_FUNCS(int , int ) -TEST_FUNCS(unsigned int , uint ) -TEST_FUNCS(long , long ) -TEST_FUNCS(unsigned long , ulong ) -TEST_FUNCS(long long , longLong ) -TEST_FUNCS(unsigned long long, ulongLong) -TEST_FUNCS(float , float ) -TEST_FUNCS(double , double ) diff --git a/numpy-1.6.2/doc/swig/test/Tensor.h b/numpy-1.6.2/doc/swig/test/Tensor.h deleted file mode 100644 index d60eb2d2ee..0000000000 --- a/numpy-1.6.2/doc/swig/test/Tensor.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef TENSOR_H -#define TENSOR_H - -// The following macro defines the prototypes for a family of -// functions that work with 3D arrays with the forms -// -// TYPE SNAMENorm( TYPE tensor[2][2][2]); -// TYPE SNAMEMax( TYPE * tensor, int rows, int cols, int num); -// TYPE SNAMEMin( int rows, int cols, int num, TYPE * tensor); -// void SNAMEScale( TYPE array[3][3][3]); -// void SNAMEFloor( TYPE * array, int rows, int cols, int num, TYPE floor); -// void SNAMECeil( int rows, int cols, int num, TYPE * array, TYPE ceil ); -// void SNAMELUSplit(TYPE in[3][3][3], TYPE lower[3][3][3], TYPE upper[3][3][3]); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 3D input arrays, hard-coded lengths -// * 3D input arrays -// * 3D input arrays, data last -// * 3D in-place arrays, hard-coded lengths -// * 3D in-place arrays -// * 3D in-place arrays, data last -// * 3D argout arrays, hard-coded length -// -#define TEST_FUNC_PROTOS(TYPE, SNAME) \ -\ -TYPE SNAME ## Norm( TYPE tensor[2][2][2]); \ -TYPE SNAME ## Max( TYPE * tensor, int rows, int cols, int num); \ -TYPE SNAME ## Min( int rows, int cols, int num, TYPE * tensor); \ -void SNAME ## Scale( TYPE array[3][3][3], TYPE val); \ -void SNAME ## Floor( TYPE * array, int rows, int cols, int num, TYPE floor); \ -void SNAME ## Ceil( int rows, int cols, int num, TYPE * array, TYPE ceil ); \ -void SNAME ## LUSplit(TYPE tensor[2][2][2], TYPE lower[2][2][2], TYPE upper[2][2][2]); - -TEST_FUNC_PROTOS(signed char , schar ) -TEST_FUNC_PROTOS(unsigned char , uchar ) -TEST_FUNC_PROTOS(short , short ) -TEST_FUNC_PROTOS(unsigned short , ushort ) -TEST_FUNC_PROTOS(int , int ) -TEST_FUNC_PROTOS(unsigned int , uint ) -TEST_FUNC_PROTOS(long , long ) -TEST_FUNC_PROTOS(unsigned long , ulong ) -TEST_FUNC_PROTOS(long long , longLong ) -TEST_FUNC_PROTOS(unsigned long long, ulongLong) -TEST_FUNC_PROTOS(float , float ) -TEST_FUNC_PROTOS(double , double ) - -#endif diff --git a/numpy-1.6.2/doc/swig/test/Tensor.i b/numpy-1.6.2/doc/swig/test/Tensor.i deleted file mode 100644 index a1198dc9eb..0000000000 --- a/numpy-1.6.2/doc/swig/test/Tensor.i +++ /dev/null @@ -1,49 +0,0 @@ -// -*- c++ -*- -%module Tensor - -%{ -#define SWIG_FILE_WITH_INIT -#include "Tensor.h" -%} - -// Get the NumPy typemaps -%include "../numpy.i" - -%init %{ - import_array(); -%} - -%define %apply_numpy_typemaps(TYPE) - -%apply (TYPE IN_ARRAY3[ANY][ANY][ANY]) {(TYPE tensor[ANY][ANY][ANY])}; -%apply (TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3) - {(TYPE* tensor, int rows, int cols, int num)}; -%apply (int DIM1, int DIM2, int DIM3, TYPE* IN_ARRAY3) - {(int rows, int cols, int num, TYPE* tensor)}; - -%apply (TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) {(TYPE array[3][3][3])}; -%apply (TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) - {(TYPE* array, int rows, int cols, int num)}; -%apply (int DIM1, int DIM2, int DIM3, TYPE* INPLACE_ARRAY3) - {(int rows, int cols, int num, TYPE* array)}; - -%apply (TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) {(TYPE lower[2][2][2])}; -%apply (TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) {(TYPE upper[2][2][2])}; - -%enddef /* %apply_numpy_typemaps() macro */ - -%apply_numpy_typemaps(signed char ) -%apply_numpy_typemaps(unsigned char ) -%apply_numpy_typemaps(short ) -%apply_numpy_typemaps(unsigned short ) -%apply_numpy_typemaps(int ) -%apply_numpy_typemaps(unsigned int ) -%apply_numpy_typemaps(long ) -%apply_numpy_typemaps(unsigned long ) -%apply_numpy_typemaps(long long ) -%apply_numpy_typemaps(unsigned long long) -%apply_numpy_typemaps(float ) -%apply_numpy_typemaps(double ) - -// Include the header file to be wrapped -%include "Tensor.h" diff --git a/numpy-1.6.2/doc/swig/test/Vector.cxx b/numpy-1.6.2/doc/swig/test/Vector.cxx deleted file mode 100644 index 2c90404da1..0000000000 --- a/numpy-1.6.2/doc/swig/test/Vector.cxx +++ /dev/null @@ -1,100 +0,0 @@ -#include -#include -#include -#include "Vector.h" - -// The following macro defines a family of functions that work with 1D -// arrays with the forms -// -// TYPE SNAMELength( TYPE vector[3]); -// TYPE SNAMEProd( TYPE * series, int size); -// TYPE SNAMESum( int size, TYPE * series); -// void SNAMEReverse(TYPE array[3]); -// void SNAMEOnes( TYPE * array, int size); -// void SNAMEZeros( int size, TYPE * array); -// void SNAMEEOSplit(TYPE vector[3], TYPE even[3], odd[3]); -// void SNAMETwos( TYPE * twoVec, int size); -// void SNAMEThrees( int size, TYPE * threeVec); -// -// for any specified type TYPE (for example: short, unsigned int, long -// long, etc.) with given short name SNAME (for example: short, uint, -// longLong, etc.). The macro is then expanded for the given -// TYPE/SNAME pairs. The resulting functions are for testing numpy -// interfaces, respectively, for: -// -// * 1D input arrays, hard-coded length -// * 1D input arrays -// * 1D input arrays, data last -// * 1D in-place arrays, hard-coded length -// * 1D in-place arrays -// * 1D in-place arrays, data last -// * 1D argout arrays, hard-coded length -// * 1D argout arrays -// * 1D argout arrays, data last -// -#define TEST_FUNCS(TYPE, SNAME) \ -\ -TYPE SNAME ## Length(TYPE vector[3]) { \ - double result = 0; \ - for (int i=0; i<3; ++i) result += vector[i]*vector[i]; \ - return (TYPE)sqrt(result); \ -} \ -\ -TYPE SNAME ## Prod(TYPE * series, int size) { \ - TYPE result = 1; \ - for (int i=0; i>sys.stderr, self.typeStr, "... ", - second = Fortran.__dict__[self.typeStr + "SecondElement"] - matrix = np.arange(9).reshape(3, 3).astype(self.typeCode) - self.assertEquals(second(matrix), 3) - - def testSecondElementFortran(self): - "Test luSplit function with a Fortran-array" - print >>sys.stderr, self.typeStr, "... ", - second = Fortran.__dict__[self.typeStr + "SecondElement"] - matrix = np.asfortranarray(np.arange(9).reshape(3, 3), - self.typeCode) - self.assertEquals(second(matrix), 3) - - def testSecondElementObject(self): - "Test luSplit function with a Fortran-array" - print >>sys.stderr, self.typeStr, "... ", - second = Fortran.__dict__[self.typeStr + "SecondElement"] - matrix = np.asfortranarray([[0,1,2],[3,4,5],[6,7,8]], self.typeCode) - self.assertEquals(second(matrix), 3) - -###################################################################### - -class scharTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - -###################################################################### - -class ucharTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - -###################################################################### - -class shortTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - -###################################################################### - -class ushortTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - -###################################################################### - -class intTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - -###################################################################### - -class uintTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - -###################################################################### - -class longTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - -###################################################################### - -class ulongTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - -###################################################################### - -class longLongTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - -###################################################################### - -class ulongLongTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - -###################################################################### - -class floatTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(FortranTestCase): - def __init__(self, methodName="runTest"): - FortranTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 2D Functions of Module Matrix" - print "NumPy version", np.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy-1.6.2/doc/swig/test/testMatrix.py b/numpy-1.6.2/doc/swig/test/testMatrix.py deleted file mode 100755 index 12061702d7..0000000000 --- a/numpy-1.6.2/doc/swig/test/testMatrix.py +++ /dev/null @@ -1,361 +0,0 @@ -#! /usr/bin/env python - -# System imports -from distutils.util import get_platform -import os -import sys -import unittest - -# Import NumPy -import numpy as np -major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] -if major == 0: BadListError = TypeError -else: BadListError = ValueError - -import Matrix - -###################################################################### - -class MatrixTestCase(unittest.TestCase): - - def __init__(self, methodName="runTests"): - unittest.TestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDet(self): - "Test det function" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7],[6,9]] - self.assertEquals(det(matrix), 30) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetBadList(self): - "Test det function with bad list" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7], ["e", "pi"]] - self.assertRaises(BadListError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetWrongDim(self): - "Test det function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [8,7] - self.assertRaises(TypeError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetWrongSize(self): - "Test det function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7,6], [5,4,3], [2,1,0]] - self.assertRaises(TypeError, det, matrix) - - # Test (type IN_ARRAY2[ANY][ANY]) typemap - def testDetNonContainer(self): - "Test det function with non-container" - print >>sys.stderr, self.typeStr, "... ", - det = Matrix.__dict__[self.typeStr + "Det"] - self.assertRaises(TypeError, det, None) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMax(self): - "Test max function" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - matrix = [[6,5,4],[3,2,1]] - self.assertEquals(max(matrix), 6) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxBadList(self): - "Test max function with bad list" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - matrix = [[6,"five",4], ["three", 2, "one"]] - self.assertRaises(BadListError, max, matrix) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxNonContainer(self): - "Test max function with non-container" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, None) - - # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap - def testMaxWrongDim(self): - "Test max function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - max = Matrix.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, 1, 2, 3]) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMin(self): - "Test min function" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - matrix = [[9,8],[7,6],[5,4]] - self.assertEquals(min(matrix), 4) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinBadList(self): - "Test min function with bad list" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - matrix = [["nine","eight"], ["seven","six"]] - self.assertRaises(BadListError, min, matrix) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinWrongDim(self): - "Test min function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [1,3,5,7,9]) - - # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap - def testMinNonContainer(self): - "Test min function with non-container" - print >>sys.stderr, self.typeStr, "... ", - min = Matrix.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, False) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScale(self): - "Test scale function" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],self.typeCode) - scale(matrix,4) - self.assertEquals((matrix == [[4,8,12],[8,4,8],[12,8,4]]).all(), True) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongDim(self): - "Test scale function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([1,2,2,1],self.typeCode) - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongSize(self): - "Test scale function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2],[2,1]],self.typeCode) - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleWrongType(self): - "Test scale function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],'c') - self.assertRaises(TypeError, scale, matrix) - - # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap - def testScaleNonArray(self): - "Test scale function with non-array" - print >>sys.stderr, self.typeStr, "... ", - scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = [[1,2,3],[2,1,2],[3,2,1]] - self.assertRaises(TypeError, scale, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloor(self): - "Test floor function" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([[6,7],[8,9]],self.typeCode) - floor(matrix,7) - np.testing.assert_array_equal(matrix, np.array([[7,7],[8,9]])) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorWrongDim(self): - "Test floor function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([6,7,8,9],self.typeCode) - self.assertRaises(TypeError, floor, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorWrongType(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([[6,7], [8,9]],'c') - self.assertRaises(TypeError, floor, matrix) - - # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap - def testFloorNonArray(self): - "Test floor function with non-array" - print >>sys.stderr, self.typeStr, "... ", - floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = [[6,7], [8,9]] - self.assertRaises(TypeError, floor, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeil(self): - "Test ceil function" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([[1,2],[3,4]],self.typeCode) - ceil(matrix,3) - np.testing.assert_array_equal(matrix, np.array([[1,2],[3,3]])) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilWrongDim(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([1,2,3,4],self.typeCode) - self.assertRaises(TypeError, ceil, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilWrongType(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([[1,2], [3,4]],'c') - self.assertRaises(TypeError, ceil, matrix) - - # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap - def testCeilNonArray(self): - "Test ceil function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = [[1,2], [3,4]] - self.assertRaises(TypeError, ceil, matrix) - - # Test (type ARGOUT_ARRAY2[ANY][ANY]) typemap - def testLUSplit(self): - "Test luSplit function" - print >>sys.stderr, self.typeStr, "... ", - luSplit = Matrix.__dict__[self.typeStr + "LUSplit"] - lower, upper = luSplit([[1,2,3],[4,5,6],[7,8,9]]) - self.assertEquals((lower == [[1,0,0],[4,5,0],[7,8,9]]).all(), True) - self.assertEquals((upper == [[0,2,3],[0,0,6],[0,0,0]]).all(), True) - -###################################################################### - -class scharTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - -###################################################################### - -class ucharTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - -###################################################################### - -class shortTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - -###################################################################### - -class ushortTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - -###################################################################### - -class intTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - -###################################################################### - -class uintTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - -###################################################################### - -class longTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - -###################################################################### - -class ulongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - -###################################################################### - -class longLongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - -###################################################################### - -class ulongLongTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - -###################################################################### - -class floatTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(MatrixTestCase): - def __init__(self, methodName="runTest"): - MatrixTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 2D Functions of Module Matrix" - print "NumPy version", np.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy-1.6.2/doc/swig/test/testTensor.py b/numpy-1.6.2/doc/swig/test/testTensor.py deleted file mode 100755 index 3d0ce097e6..0000000000 --- a/numpy-1.6.2/doc/swig/test/testTensor.py +++ /dev/null @@ -1,401 +0,0 @@ -#! /usr/bin/env python - -# System imports -from distutils.util import get_platform -from math import sqrt -import os -import sys -import unittest - -# Import NumPy -import numpy as np -major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] -if major == 0: BadListError = TypeError -else: BadListError = ValueError - -import Tensor - -###################################################################### - -class TensorTestCase(unittest.TestCase): - - def __init__(self, methodName="runTests"): - unittest.TestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - self.result = sqrt(28.0/8) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNorm(self): - "Test norm function" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,1], [2,3]], - [[3,2], [1,0]]] - if isinstance(self.result, int): - self.assertEquals(norm(tensor), self.result) - else: - self.assertAlmostEqual(norm(tensor), self.result, 6) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormBadList(self): - "Test norm function with bad list" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,"one"],[2,3]], - [[3,"two"],[1,0]]] - self.assertRaises(BadListError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormWrongDim(self): - "Test norm function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[0,1,2,3], - [3,2,1,0]] - self.assertRaises(TypeError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormWrongSize(self): - "Test norm function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,1,0], [2,3,2]], - [[3,2,3], [1,0,1]]] - self.assertRaises(TypeError, norm, tensor) - - # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap - def testNormNonContainer(self): - "Test norm function with non-container" - print >>sys.stderr, self.typeStr, "... ", - norm = Tensor.__dict__[self.typeStr + "Norm"] - self.assertRaises(TypeError, norm, None) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMax(self): - "Test max function" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - tensor = [[[1,2], [3,4]], - [[5,6], [7,8]]] - self.assertEquals(max(tensor), 8) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxBadList(self): - "Test max function with bad list" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - tensor = [[[1,"two"], [3,4]], - [[5,"six"], [7,8]]] - self.assertRaises(BadListError, max, tensor) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxNonContainer(self): - "Test max function with non-container" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, None) - - # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testMaxWrongDim(self): - "Test max function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, -1, 2, -3]) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMin(self): - "Test min function" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - tensor = [[[9,8], [7,6]], - [[5,4], [3,2]]] - self.assertEquals(min(tensor), 2) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinBadList(self): - "Test min function with bad list" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - tensor = [[["nine",8], [7,6]], - [["five",4], [3,2]]] - self.assertRaises(BadListError, min, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinNonContainer(self): - "Test min function with non-container" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, True) - - # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap - def testMinWrongDim(self): - "Test min function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - min = Tensor.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [[1,3],[5,7]]) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScale(self): - "Test scale function" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],self.typeCode) - scale(tensor,4) - self.assertEquals((tensor == [[[4,0,4], [0,4,0], [4,0,4]], - [[0,4,0], [4,0,4], [0,4,0]], - [[4,0,4], [0,4,0], [4,0,4]]]).all(), True) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongType(self): - "Test scale function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],'c') - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongDim(self): - "Test scale function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[1,0,1], [0,1,0], [1,0,1], - [0,1,0], [1,0,1], [0,1,0]],self.typeCode) - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleWrongSize(self): - "Test scale function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0], [0,1], [1,0]], - [[0,1], [1,0], [0,1]], - [[1,0], [0,1], [1,0]]],self.typeCode) - self.assertRaises(TypeError, scale, tensor) - - # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap - def testScaleNonArray(self): - "Test scale function with non-array" - print >>sys.stderr, self.typeStr, "... ", - scale = Tensor.__dict__[self.typeStr + "Scale"] - self.assertRaises(TypeError, scale, True) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloor(self): - "Test floor function" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[[1,2], [3,4]], - [[5,6], [7,8]]],self.typeCode) - floor(tensor,4) - np.testing.assert_array_equal(tensor, np.array([[[4,4], [4,4]], - [[5,6], [7,8]]])) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorWrongType(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[[1,2], [3,4]], - [[5,6], [7,8]]],'c') - self.assertRaises(TypeError, floor, tensor) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorWrongDim(self): - "Test floor function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[1,2], [3,4], [5,6], [7,8]],self.typeCode) - self.assertRaises(TypeError, floor, tensor) - - # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap - def testFloorNonArray(self): - "Test floor function with non-array" - print >>sys.stderr, self.typeStr, "... ", - floor = Tensor.__dict__[self.typeStr + "Floor"] - self.assertRaises(TypeError, floor, object) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeil(self): - "Test ceil function" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[[9,8], [7,6]], - [[5,4], [3,2]]],self.typeCode) - ceil(tensor,5) - np.testing.assert_array_equal(tensor, np.array([[[5,5], [5,5]], - [[5,4], [3,2]]])) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilWrongType(self): - "Test ceil function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[[9,8], [7,6]], - [[5,4], [3,2]]],'c') - self.assertRaises(TypeError, ceil, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilWrongDim(self): - "Test ceil function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[9,8], [7,6], [5,4], [3,2]], self.typeCode) - self.assertRaises(TypeError, ceil, tensor) - - # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap - def testCeilNonArray(self): - "Test ceil function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = [[[9,8], [7,6]], - [[5,4], [3,2]]] - self.assertRaises(TypeError, ceil, tensor) - - # Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap - def testLUSplit(self): - "Test luSplit function" - print >>sys.stderr, self.typeStr, "... ", - luSplit = Tensor.__dict__[self.typeStr + "LUSplit"] - lower, upper = luSplit([[[1,1], [1,1]], - [[1,1], [1,1]]]) - self.assertEquals((lower == [[[1,1], [1,0]], - [[1,0], [0,0]]]).all(), True) - self.assertEquals((upper == [[[0,0], [0,1]], - [[0,1], [1,1]]]).all(), True) - -###################################################################### - -class scharTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - self.result = int(self.result) - -###################################################################### - -class ucharTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - self.result = int(self.result) - -###################################################################### - -class shortTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - self.result = int(self.result) - -###################################################################### - -class ushortTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - self.result = int(self.result) - -###################################################################### - -class intTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - self.result = int(self.result) - -###################################################################### - -class uintTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - self.result = int(self.result) - -###################################################################### - -class longTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - self.result = int(self.result) - -###################################################################### - -class ulongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - self.result = int(self.result) - -###################################################################### - -class longLongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - self.result = int(self.result) - -###################################################################### - -class ulongLongTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - self.result = int(self.result) - -###################################################################### - -class floatTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(TensorTestCase): - def __init__(self, methodName="runTest"): - TensorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 3D Functions of Module Tensor" - print "NumPy version", np.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy-1.6.2/doc/swig/test/testVector.py b/numpy-1.6.2/doc/swig/test/testVector.py deleted file mode 100755 index 2ee9183899..0000000000 --- a/numpy-1.6.2/doc/swig/test/testVector.py +++ /dev/null @@ -1,380 +0,0 @@ -#! /usr/bin/env python - -# System imports -from distutils.util import get_platform -import os -import sys -import unittest - -# Import NumPy -import numpy as np -major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] -if major == 0: BadListError = TypeError -else: BadListError = ValueError - -import Vector - -###################################################################### - -class VectorTestCase(unittest.TestCase): - - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLength(self): - "Test length function" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertEquals(length([5, 12, 0]), 13) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthBadList(self): - "Test length function with bad list" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(BadListError, length, [5, "twelve", 0]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthWrongSize(self): - "Test length function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, [5, 12]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthWrongDim(self): - "Test length function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, [[1,2], [3,4]]) - - # Test the (type IN_ARRAY1[ANY]) typemap - def testLengthNonContainer(self): - "Test length function with non-container" - print >>sys.stderr, self.typeStr, "... ", - length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, None) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProd(self): - "Test prod function" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertEquals(prod([1,2,3,4]), 24) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdBadList(self): - "Test prod function with bad list" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(BadListError, prod, [[1,"two"], ["e","pi"]]) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdWrongDim(self): - "Test prod function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(TypeError, prod, [[1,2], [8,9]]) - - # Test the (type* IN_ARRAY1, int DIM1) typemap - def testProdNonContainer(self): - "Test prod function with non-container" - print >>sys.stderr, self.typeStr, "... ", - prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(TypeError, prod, None) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSum(self): - "Test sum function" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertEquals(sum([5,6,7,8]), 26) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumBadList(self): - "Test sum function with bad list" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(BadListError, sum, [3,4, 5, "pi"]) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumWrongDim(self): - "Test sum function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(TypeError, sum, [[3,4], [5,6]]) - - # Test the (int DIM1, type* IN_ARRAY1) typemap - def testSumNonContainer(self): - "Test sum function with non-container" - print >>sys.stderr, self.typeStr, "... ", - sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(TypeError, sum, True) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverse(self): - "Test reverse function" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([1,2,4],self.typeCode) - reverse(vector) - self.assertEquals((vector == [4,2,1]).all(), True) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongDim(self): - "Test reverse function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([[1,2], [3,4]],self.typeCode) - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongSize(self): - "Test reverse function with wrong size" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([9,8,7,6,5,4],self.typeCode) - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseWrongType(self): - "Test reverse function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([1,2,4],'c') - self.assertRaises(TypeError, reverse, vector) - - # Test the (type INPLACE_ARRAY1[ANY]) typemap - def testReverseNonArray(self): - "Test reverse function with non-array" - print >>sys.stderr, self.typeStr, "... ", - reverse = Vector.__dict__[self.typeStr + "Reverse"] - self.assertRaises(TypeError, reverse, [2,4,6]) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnes(self): - "Test ones function" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros(5,self.typeCode) - ones(vector) - np.testing.assert_array_equal(vector, np.array([1,1,1,1,1])) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesWrongDim(self): - "Test ones function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros((5,5),self.typeCode) - self.assertRaises(TypeError, ones, vector) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesWrongType(self): - "Test ones function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros((5,5),'c') - self.assertRaises(TypeError, ones, vector) - - # Test the (type* INPLACE_ARRAY1, int DIM1) typemap - def testOnesNonArray(self): - "Test ones function with non-array" - print >>sys.stderr, self.typeStr, "... ", - ones = Vector.__dict__[self.typeStr + "Ones"] - self.assertRaises(TypeError, ones, [2,4,6,8]) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZeros(self): - "Test zeros function" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones(5,self.typeCode) - zeros(vector) - np.testing.assert_array_equal(vector, np.array([0,0,0,0,0])) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosWrongDim(self): - "Test zeros function with wrong dimensions" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones((5,5),self.typeCode) - self.assertRaises(TypeError, zeros, vector) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosWrongType(self): - "Test zeros function with wrong type" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones(6,'c') - self.assertRaises(TypeError, zeros, vector) - - # Test the (int DIM1, type* INPLACE_ARRAY1) typemap - def testZerosNonArray(self): - "Test zeros function with non-array" - print >>sys.stderr, self.typeStr, "... ", - zeros = Vector.__dict__[self.typeStr + "Zeros"] - self.assertRaises(TypeError, zeros, [1,3,5,7,9]) - - # Test the (type ARGOUT_ARRAY1[ANY]) typemap - def testEOSplit(self): - "Test eoSplit function" - print >>sys.stderr, self.typeStr, "... ", - eoSplit = Vector.__dict__[self.typeStr + "EOSplit"] - even, odd = eoSplit([1,2,3]) - self.assertEquals((even == [1,0,3]).all(), True) - self.assertEquals((odd == [0,2,0]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testTwos(self): - "Test twos function" - print >>sys.stderr, self.typeStr, "... ", - twos = Vector.__dict__[self.typeStr + "Twos"] - vector = twos(5) - self.assertEquals((vector == [2,2,2,2,2]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testTwosNonInt(self): - "Test twos function with non-integer dimension" - print >>sys.stderr, self.typeStr, "... ", - twos = Vector.__dict__[self.typeStr + "Twos"] - self.assertRaises(TypeError, twos, 5.0) - - # Test the (int DIM1, type* ARGOUT_ARRAY1) typemap - def testThrees(self): - "Test threes function" - print >>sys.stderr, self.typeStr, "... ", - threes = Vector.__dict__[self.typeStr + "Threes"] - vector = threes(6) - self.assertEquals((vector == [3,3,3,3,3,3]).all(), True) - - # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap - def testThreesNonInt(self): - "Test threes function with non-integer dimension" - print >>sys.stderr, self.typeStr, "... ", - threes = Vector.__dict__[self.typeStr + "Threes"] - self.assertRaises(TypeError, threes, "threes") - -###################################################################### - -class scharTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "schar" - self.typeCode = "b" - -###################################################################### - -class ucharTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "uchar" - self.typeCode = "B" - -###################################################################### - -class shortTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "short" - self.typeCode = "h" - -###################################################################### - -class ushortTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ushort" - self.typeCode = "H" - -###################################################################### - -class intTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "int" - self.typeCode = "i" - -###################################################################### - -class uintTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "uint" - self.typeCode = "I" - -###################################################################### - -class longTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "long" - self.typeCode = "l" - -###################################################################### - -class ulongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ulong" - self.typeCode = "L" - -###################################################################### - -class longLongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "longLong" - self.typeCode = "q" - -###################################################################### - -class ulongLongTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "ulongLong" - self.typeCode = "Q" - -###################################################################### - -class floatTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "float" - self.typeCode = "f" - -###################################################################### - -class doubleTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -###################################################################### - -if __name__ == "__main__": - - # Build the test suite - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) - - # Execute the test suite - print "Testing 1D Functions of Module Vector" - print "NumPy version", np.__version__ - print - result = unittest.TextTestRunner(verbosity=2).run(suite) - sys.exit(len(result.errors) + len(result.failures)) diff --git a/numpy-1.6.2/numpy/__init__.py b/numpy-1.6.2/numpy/__init__.py deleted file mode 100644 index f2ffdf92ce..0000000000 --- a/numpy-1.6.2/numpy/__init__.py +++ /dev/null @@ -1,171 +0,0 @@ -""" -NumPy -===== - -Provides - 1. An array object of arbitrary homogeneous items - 2. Fast mathematical operations over arrays - 3. Linear Algebra, Fourier Transforms, Random Number Generation - -How to use the documentation ----------------------------- -Documentation is available in two forms: docstrings provided -with the code, and a loose standing reference guide, available from -`the NumPy homepage `_. - -We recommend exploring the docstrings using -`IPython `_, an advanced Python shell with -TAB-completion and introspection capabilities. See below for further -instructions. - -The docstring examples assume that `numpy` has been imported as `np`:: - - >>> import numpy as np - -Code snippets are indicated by three greater-than signs:: - - >>> x = 42 - >>> x = x + 1 - -Use the built-in ``help`` function to view a function's docstring:: - - >>> help(np.sort) - ... # doctest: +SKIP - -For some objects, ``np.info(obj)`` may provide additional help. This is -particularly true if you see the line "Help on ufunc object:" at the top -of the help() page. Ufuncs are implemented in C, not Python, for speed. -The native Python help() does not know how to view their help, but our -np.info() function does. - -To search for documents containing a keyword, do:: - - >>> np.lookfor('keyword') - ... # doctest: +SKIP - -General-purpose documents like a glossary and help on the basic concepts -of numpy are available under the ``doc`` sub-module:: - - >>> from numpy import doc - >>> help(doc) - ... # doctest: +SKIP - -Available subpackages ---------------------- -doc - Topical documentation on broadcasting, indexing, etc. -lib - Basic functions used by several sub-packages. -random - Core Random Tools -linalg - Core Linear Algebra Tools -fft - Core FFT routines -polynomial - Polynomial tools -testing - Numpy testing tools -f2py - Fortran to Python Interface Generator. -distutils - Enhancements to distutils with support for - Fortran compilers support and more. - -Utilities ---------- -test - Run numpy unittests -show_config - Show numpy build configuration -dual - Overwrite certain functions with high-performance Scipy tools -matlib - Make everything matrices. -__version__ - Numpy version string - -Viewing documentation using IPython ------------------------------------ -Start IPython with the NumPy profile (``ipython -p numpy``), which will -import `numpy` under the alias `np`. Then, use the ``cpaste`` command to -paste examples into the shell. To see which functions are available in -`numpy`, type ``np.`` (where ```` refers to the TAB key), or use -``np.*cos*?`` (where ```` refers to the ENTER key) to narrow -down the list. To view the docstring for a function, use -``np.cos?`` (to view the docstring) and ``np.cos??`` (to view -the source code). - -Copies vs. in-place operation ------------------------------ -Most of the functions in `numpy` return a copy of the array argument -(e.g., `np.sort`). In-place versions of these functions are often -available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. -Exceptions to this rule are documented. - -""" - -# We first need to detect if we're being called as part of the numpy setup -# procedure itself in a reliable manner. -try: - __NUMPY_SETUP__ -except NameError: - __NUMPY_SETUP__ = False - - -if __NUMPY_SETUP__: - import sys as _sys - _sys.stderr.write('Running from numpy source directory.') - del _sys -else: - try: - from numpy.__config__ import show as show_config - except ImportError: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python intepreter from there.""" - raise ImportError(msg) - from version import git_revision as __git_revision__ - from version import version as __version__ - - from _import_tools import PackageLoader - - def pkgload(*packages, **options): - loader = PackageLoader(infunc=True) - return loader(*packages, **options) - - import add_newdocs - __all__ = ['add_newdocs'] - - pkgload.__doc__ = PackageLoader.__call__.__doc__ - - from testing import Tester - test = Tester().test - bench = Tester().bench - - import core - from core import * - import compat - import lib - from lib import * - import linalg - import fft - import polynomial - import random - import ctypeslib - import ma - import matrixlib as _mat - from matrixlib import * - - # Make these accessible from numpy name-space - # but not imported in from numpy import * - from __builtin__ import bool, int, long, float, complex, \ - object, unicode, str - from core import round, abs, max, min - - __all__.extend(['__version__', 'pkgload', 'PackageLoader', - 'show_config']) - __all__.extend(core.__all__) - __all__.extend(_mat.__all__) - __all__.extend(lib.__all__) - __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) diff --git a/numpy-1.6.2/numpy/_import_tools.py b/numpy-1.6.2/numpy/_import_tools.py deleted file mode 100644 index 38bf712fe3..0000000000 --- a/numpy-1.6.2/numpy/_import_tools.py +++ /dev/null @@ -1,346 +0,0 @@ -import os -import sys - -__all__ = ['PackageLoader'] - -class PackageLoader: - def __init__(self, verbose=False, infunc=False): - """ Manages loading packages. - """ - - if infunc: - _level = 2 - else: - _level = 1 - self.parent_frame = frame = sys._getframe(_level) - self.parent_name = eval('__name__',frame.f_globals,frame.f_locals) - parent_path = eval('__path__',frame.f_globals,frame.f_locals) - if isinstance(parent_path, str): - parent_path = [parent_path] - self.parent_path = parent_path - if '__all__' not in frame.f_locals: - exec('__all__ = []',frame.f_globals,frame.f_locals) - self.parent_export_names = eval('__all__',frame.f_globals,frame.f_locals) - - self.info_modules = {} - self.imported_packages = [] - self.verbose = None - - def _get_info_files(self, package_dir, parent_path, parent_package=None): - """ Return list of (package name,info.py file) from parent_path subdirectories. - """ - from glob import glob - files = glob(os.path.join(parent_path,package_dir,'info.py')) - for info_file in glob(os.path.join(parent_path,package_dir,'info.pyc')): - if info_file[:-1] not in files: - files.append(info_file) - info_files = [] - for info_file in files: - package_name = os.path.dirname(info_file[len(parent_path)+1:])\ - .replace(os.sep,'.') - if parent_package: - package_name = parent_package + '.' + package_name - info_files.append((package_name,info_file)) - info_files.extend(self._get_info_files('*', - os.path.dirname(info_file), - package_name)) - return info_files - - def _init_info_modules(self, packages=None): - """Initialize info_modules = {: }. - """ - import imp - info_files = [] - info_modules = self.info_modules - - if packages is None: - for path in self.parent_path: - info_files.extend(self._get_info_files('*',path)) - else: - for package_name in packages: - package_dir = os.path.join(*package_name.split('.')) - for path in self.parent_path: - names_files = self._get_info_files(package_dir, path) - if names_files: - info_files.extend(names_files) - break - else: - try: - exec 'import %s.info as info' % (package_name) - info_modules[package_name] = info - except ImportError, msg: - self.warn('No scipy-style subpackage %r found in %s. '\ - 'Ignoring: %s'\ - % (package_name,':'.join(self.parent_path), msg)) - - for package_name,info_file in info_files: - if package_name in info_modules: - continue - fullname = self.parent_name +'.'+ package_name - if info_file[-1]=='c': - filedescriptor = ('.pyc','rb',2) - else: - filedescriptor = ('.py','U',1) - - try: - info_module = imp.load_module(fullname+'.info', - open(info_file,filedescriptor[1]), - info_file, - filedescriptor) - except Exception,msg: - self.error(msg) - info_module = None - - if info_module is None or getattr(info_module,'ignore',False): - info_modules.pop(package_name,None) - else: - self._init_info_modules(getattr(info_module,'depends',[])) - info_modules[package_name] = info_module - - return - - def _get_sorted_names(self): - """ Return package names sorted in the order as they should be - imported due to dependence relations between packages. - """ - - depend_dict = {} - for name,info_module in self.info_modules.items(): - depend_dict[name] = getattr(info_module,'depends',[]) - package_names = [] - - for name in depend_dict.keys(): - if not depend_dict[name]: - package_names.append(name) - del depend_dict[name] - - while depend_dict: - for name, lst in depend_dict.items(): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - package_names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - - return package_names - - def __call__(self,*packages, **options): - """Load one or more packages into parent package top-level namespace. - - This function is intended to shorten the need to import many - subpackages, say of scipy, constantly with statements such as - - import scipy.linalg, scipy.fftpack, scipy.etc... - - Instead, you can say: - - import scipy - scipy.pkgload('linalg','fftpack',...) - - or - - scipy.pkgload() - - to load all of them in one call. - - If a name which doesn't exist in scipy's namespace is - given, a warning is shown. - - Parameters - ---------- - *packages : arg-tuple - the names (one or more strings) of all the modules one - wishes to load into the top-level namespace. - verbose= : integer - verbosity level [default: -1]. - verbose=-1 will suspend also warnings. - force= : bool - when True, force reloading loaded packages [default: False]. - postpone= : bool - when True, don't load packages [default: False] - - """ - frame = self.parent_frame - self.info_modules = {} - if options.get('force',False): - self.imported_packages = [] - self.verbose = verbose = options.get('verbose',-1) - postpone = options.get('postpone',None) - self._init_info_modules(packages or None) - - self.log('Imports to %r namespace\n----------------------------'\ - % self.parent_name) - - for package_name in self._get_sorted_names(): - if package_name in self.imported_packages: - continue - info_module = self.info_modules[package_name] - global_symbols = getattr(info_module,'global_symbols',[]) - postpone_import = getattr(info_module,'postpone_import',False) - if (postpone and not global_symbols) \ - or (postpone_import and postpone is not None): - continue - - old_object = frame.f_locals.get(package_name,None) - - cmdstr = 'import '+package_name - if self._execcmd(cmdstr): - continue - self.imported_packages.append(package_name) - - if verbose!=-1: - new_object = frame.f_locals.get(package_name) - if old_object is not None and old_object is not new_object: - self.warn('Overwriting %s=%s (was %s)' \ - % (package_name,self._obj2repr(new_object), - self._obj2repr(old_object))) - - if '.' not in package_name: - self.parent_export_names.append(package_name) - - for symbol in global_symbols: - if symbol=='*': - symbols = eval('getattr(%s,"__all__",None)'\ - % (package_name), - frame.f_globals,frame.f_locals) - if symbols is None: - symbols = eval('dir(%s)' % (package_name), - frame.f_globals,frame.f_locals) - symbols = filter(lambda s:not s.startswith('_'),symbols) - else: - symbols = [symbol] - - if verbose!=-1: - old_objects = {} - for s in symbols: - if s in frame.f_locals: - old_objects[s] = frame.f_locals[s] - - cmdstr = 'from '+package_name+' import '+symbol - if self._execcmd(cmdstr): - continue - - if verbose!=-1: - for s,old_object in old_objects.items(): - new_object = frame.f_locals[s] - if new_object is not old_object: - self.warn('Overwriting %s=%s (was %s)' \ - % (s,self._obj2repr(new_object), - self._obj2repr(old_object))) - - if symbol=='*': - self.parent_export_names.extend(symbols) - else: - self.parent_export_names.append(symbol) - - return - - def _execcmd(self,cmdstr): - """ Execute command in parent_frame.""" - frame = self.parent_frame - try: - exec (cmdstr, frame.f_globals,frame.f_locals) - except Exception,msg: - self.error('%s -> failed: %s' % (cmdstr,msg)) - return True - else: - self.log('%s -> success' % (cmdstr)) - return - - def _obj2repr(self,obj): - """ Return repr(obj) with""" - module = getattr(obj,'__module__',None) - file = getattr(obj,'__file__',None) - if module is not None: - return repr(obj) + ' from ' + module - if file is not None: - return repr(obj) + ' from ' + file - return repr(obj) - - def log(self,mess): - if self.verbose>1: - print >> sys.stderr, str(mess) - def warn(self,mess): - if self.verbose>=0: - print >> sys.stderr, str(mess) - def error(self,mess): - if self.verbose!=-1: - print >> sys.stderr, str(mess) - - def _get_doc_title(self, info_module): - """ Get the title from a package info.py file. - """ - title = getattr(info_module,'__doc_title__',None) - if title is not None: - return title - title = getattr(info_module,'__doc__',None) - if title is not None: - title = title.lstrip().split('\n',1)[0] - return title - return '* Not Available *' - - def _format_titles(self,titles,colsep='---'): - display_window_width = 70 # How to determine the correct value in runtime?? - lengths = [len(name)-name.find('.')-1 for (name,title) in titles]+[0] - max_length = max(lengths) - lines = [] - for (name,title) in titles: - name = name[name.find('.')+1:] - w = max_length - len(name) - words = title.split() - line = '%s%s %s' % (name,w*' ',colsep) - tab = len(line) * ' ' - while words: - word = words.pop(0) - if len(line)+len(word)>display_window_width: - lines.append(line) - line = tab - line += ' ' + word - else: - lines.append(line) - return '\n'.join(lines) - - def get_pkgdocs(self): - """ Return documentation summary of subpackages. - """ - import sys - self.info_modules = {} - self._init_info_modules(None) - - titles = [] - symbols = [] - for package_name, info_module in self.info_modules.items(): - global_symbols = getattr(info_module,'global_symbols',[]) - fullname = self.parent_name +'.'+ package_name - note = '' - if fullname not in sys.modules: - note = ' [*]' - titles.append((fullname,self._get_doc_title(info_module) + note)) - if global_symbols: - symbols.append((package_name,', '.join(global_symbols))) - - retstr = self._format_titles(titles) +\ - '\n [*] - using a package requires explicit import (see pkgload)' - - - if symbols: - retstr += """\n\nGlobal symbols from subpackages"""\ - """\n-------------------------------\n""" +\ - self._format_titles(symbols,'-->') - - return retstr - -class PackageLoaderDebug(PackageLoader): - def _execcmd(self,cmdstr): - """ Execute command in parent_frame.""" - frame = self.parent_frame - print 'Executing',`cmdstr`,'...', - sys.stdout.flush() - exec (cmdstr, frame.f_globals,frame.f_locals) - print 'ok' - sys.stdout.flush() - return - -if int(os.environ.get('NUMPY_IMPORT_DEBUG','0')): - PackageLoader = PackageLoaderDebug diff --git a/numpy-1.6.2/numpy/add_newdocs.py b/numpy-1.6.2/numpy/add_newdocs.py deleted file mode 100644 index 92b260754e..0000000000 --- a/numpy-1.6.2/numpy/add_newdocs.py +++ /dev/null @@ -1,6900 +0,0 @@ -# This is only meant to add docs to objects defined in C-extension modules. -# The purpose is to allow easier editing of the docstrings without -# requiring a re-compile. - -# NOTE: Many of the methods of ndarray have corresponding functions. -# If you update these docstrings, please keep also the ones in -# core/fromnumeric.py, core/defmatrix.py up-to-date. - -from numpy.lib import add_newdoc - -############################################################################### -# -# flatiter -# -# flatiter needs a toplevel description -# -############################################################################### - -add_newdoc('numpy.core', 'flatiter', - """ - Flat iterator object to iterate over arrays. - - A `flatiter` iterator is returned by ``x.flat`` for any array `x`. - It allows iterating over the array as if it were a 1-D array, - either in a for-loop or by calling its `next` method. - - Iteration is done in C-contiguous style, with the last index varying the - fastest. The iterator can also be indexed using basic slicing or - advanced indexing. - - See Also - -------- - ndarray.flat : Return a flat iterator over an array. - ndarray.flatten : Returns a flattened copy of an array. - - Notes - ----- - A `flatiter` iterator can not be constructed directly from Python code - by calling the `flatiter` constructor. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> type(fl) - - >>> for item in fl: - ... print item - ... - 0 - 1 - 2 - 3 - 4 - 5 - - >>> fl[2:4] - array([2, 3]) - - """) - -# flatiter attributes - -add_newdoc('numpy.core', 'flatiter', ('base', - """ - A reference to the array that is iterated over. - - Examples - -------- - >>> x = np.arange(5) - >>> fl = x.flat - >>> fl.base is x - True - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('coords', - """ - An N-dimensional tuple of current coordinates. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> fl.coords - (0, 0) - >>> fl.next() - 0 - >>> fl.coords - (0, 1) - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('index', - """ - Current flat index into the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> fl.index - 0 - >>> fl.next() - 0 - >>> fl.index - 1 - - """)) - -# flatiter functions - -add_newdoc('numpy.core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator - - """)) - - -add_newdoc('numpy.core', 'flatiter', ('copy', - """ - copy() - - Get a copy of the iterator as a 1-D array. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> fl = x.flat - >>> fl.copy() - array([0, 1, 2, 3, 4, 5]) - - """)) - - -############################################################################### -# -# nditer -# -############################################################################### - -add_newdoc('numpy.core', 'nditer', - """ - Efficient multi-dimensional iterator object to iterate over arrays. - - Parameters - ---------- - op : ndarray or sequence of array_like - The array(s) to iterate over. - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * "buffered" enables buffering when required. - * "c_index" causes a C-order index to be tracked. - * "f_index" causes a Fortran-order index to be tracked. - * "multi_index" causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * "common_dtype" causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * "delay_bufalloc" delays allocation of the buffers until - a reset() call is made. Allows "allocate" operands to - be initialized before their values are copied into the buffers. - * "external_loop" causes the `values` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * "grow_inner" allows the `value` array sizes to be made - larger than the buffer size when both "buffered" and - "external_loop" is used. - * "ranged" allows the iterator to be restricted to a sub-range - of the iterindex values. - * "refs_ok" enables iteration of reference types, such as - object arrays. - * "reduce_ok" enables iteration of "readwrite" operands - which are broadcasted, also known as reduction operands. - * "zerosize_ok" allows `itersize` to be zero. - op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - "readonly", "readwrite", or "writeonly" must be specified. - - * "readonly" indicates the operand will only be read from. - * "readwrite" indicates the operand will be read from and written to. - * "writeonly" indicates the operand will only be written to. - * "no_broadcast" prevents the operand from being broadcasted. - * "contig" forces the operand data to be contiguous. - * "aligned" forces the operand data to be aligned. - * "nbo" forces the operand data to be in native byte order. - * "copy" allows a temporary read-only copy if required. - * "updateifcopy" allows a temporary read-write copy if required. - * "allocate" causes the array to be allocated if it is None - in the `op` parameter. - * "no_subtype" prevents an "allocate" operand from using a subtype. - op_dtypes : dtype or tuple of dtype(s), optional - The required data type(s) of the operands. If copying or buffering - is enabled, the data will be converted to/from their original types. - order : {'C', 'F', 'A', or 'K'}, optional - Controls the iteration order. 'C' means C order, 'F' means - Fortran order, 'A' means 'F' order if all the arrays are Fortran - contiguous, 'C' order otherwise, and 'K' means as close to the - order the array elements appear in memory as possible. This also - affects the element memory order of "allocate" operands, as they - are allocated to be compatible with iteration order. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur when making a copy - or buffering. Setting this to 'unsafe' is not recommended, - as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - op_axes : list of list of ints, optional - If provided, is a list of ints or None for each operands. - The list of axes for an operand is a mapping from the dimensions - of the iterator to the dimensions of the operand. A value of - -1 can be placed for entries, causing that dimension to be - treated as "newaxis". - itershape : tuple of ints, optional - The desired shape of the iterator. This allows "allocate" operands - with a dimension mapped by op_axes not corresponding to a dimension - of a different operand to get a value not equal to 1 for that - dimension. - buffersize : int, optional - When buffering is enabled, controls the size of the temporary - buffers. Set to 0 for the default value. - - Attributes - ---------- - dtypes : tuple of dtype(s) - The data types of the values provided in `value`. This may be - different from the operand data types if buffering is enabled. - finished : bool - Whether the iteration over the operands is finished or not. - has_delayed_bufalloc : bool - If True, the iterator was created with the "delay_bufalloc" flag, - and no reset() function was called on it yet. - has_index : bool - If True, the iterator was created with either the "c_index" or - the "f_index" flag, and the property `index` can be used to - retrieve it. - has_multi_index : bool - If True, the iterator was created with the "multi_index" flag, - and the property `multi_index` can be used to retrieve it. - index : - When the "c_index" or "f_index" flag was used, this property - provides access to the index. Raises a ValueError if accessed - and `has_index` is False. - iterationneedsapi : bool - Whether iteration requires access to the Python API, for example - if one of the operands is an object array. - iterindex : int - An index which matches the order of iteration. - itersize : int - Size of the iterator. - itviews : - Structured view(s) of `operands` in memory, matching the reordered - and optimized iterator access pattern. - multi_index : - When the "multi_index" flag was used, this property - provides access to the index. Raises a ValueError if accessed - accessed and `has_multi_index` is False. - ndim : int - The iterator's dimension. - nop : int - The number of iterator operands. - operands : tuple of operand(s) - The array(s) to be iterated over. - shape : tuple of ints - Shape tuple, the shape of the iterator. - value : - Value of `operands` at current iteration. Normally, this is a - tuple of array scalars, but if the flag "external_loop" is used, - it is a tuple of one dimensional arrays. - - Notes - ----- - `nditer` supersedes `flatiter`. The iterator implementation behind - `nditer` is also exposed by the Numpy C API. - - The Python exposure supplies two iteration interfaces, one which follows - the Python iterator protocol, and another which mirrors the C-style - do-while pattern. The native Python approach is better in most cases, but - if you need the iterator's coordinates or index, use the C-style pattern. - - Examples - -------- - Here is how we might write an ``iter_add`` function, using the - Python iterator protocol:: - - def iter_add_py(x, y, out=None): - addop = np.add - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - for (a, b, c) in it: - addop(a, b, out=c) - return it.operands[2] - - Here is the same function, but following the C-style pattern:: - - def iter_add(x, y, out=None): - addop = np.add - - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - - while not it.finished: - addop(it[0], it[1], out=it[2]) - it.iternext() - - return it.operands[2] - - Here is an example outer product function:: - - def outer_it(x, y, out=None): - mulop = np.multiply - - it = np.nditer([x, y, out], ['external_loop'], - [['readonly'], ['readonly'], ['writeonly', 'allocate']], - op_axes=[range(x.ndim)+[-1]*y.ndim, - [-1]*x.ndim+range(y.ndim), - None]) - - for (a, b, c) in it: - mulop(a, b, out=c) - - return it.operands[2] - - >>> a = np.arange(2)+1 - >>> b = np.arange(3)+1 - >>> outer_it(a,b) - array([[1, 2, 3], - [2, 4, 6]]) - - Here is an example function which operates like a "lambda" ufunc:: - - def luf(lamdaexpr, *args, **kwargs): - "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)" - nargs = len(args) - op = (kwargs.get('out',None),) + args - it = np.nditer(op, ['buffered','external_loop'], - [['writeonly','allocate','no_broadcast']] + - [['readonly','nbo','aligned']]*nargs, - order=kwargs.get('order','K'), - casting=kwargs.get('casting','safe'), - buffersize=kwargs.get('buffersize',0)) - while not it.finished: - it[0] = lamdaexpr(*it[1:]) - it.iternext() - return it.operands[0] - - >>> a = np.arange(5) - >>> b = np.ones(5) - >>> luf(lambda i,j:i*i + j/2, a, b) - array([ 0.5, 1.5, 4.5, 9.5, 16.5]) - - """) - -# nditer methods - -add_newdoc('numpy.core', 'nditer', ('copy', - """ - copy() - - Get a copy of the iterator in its current state. - - Examples - -------- - >>> x = np.arange(10) - >>> y = x + 1 - >>> it = np.nditer([x, y]) - >>> it.next() - (array(0), array(1)) - >>> it2 = it.copy() - >>> it2.next() - (array(1), array(2)) - - """)) - -add_newdoc('numpy.core', 'nditer', ('debug_print', - """ - debug_print() - - Print the current state of the `nditer` instance and debug info to stdout. - - """)) - -add_newdoc('numpy.core', 'nditer', ('enable_external_loop', - """ - enable_external_loop() - - When the "external_loop" was not used during construction, but - is desired, this modifies the iterator to behave as if the flag - was specified. - - """)) - -add_newdoc('numpy.core', 'nditer', ('iternext', - """ - iternext() - - Check whether iterations are left, and perform a single internal iteration - without returning the result. Used in the C-style pattern do-while - pattern. For an example, see `nditer`. - - Returns - ------- - iternext : bool - Whether or not there are iterations left. - - """)) - -add_newdoc('numpy.core', 'nditer', ('remove_axis', - """ - remove_axis(i) - - Removes axis `i` from the iterator. Requires that the flag "multi_index" - be enabled. - - """)) - -add_newdoc('numpy.core', 'nditer', ('remove_multi_index', - """ - remove_multi_index() - - When the "multi_index" flag was specified, this removes it, allowing - the internal iteration structure to be optimized further. - - """)) - -add_newdoc('numpy.core', 'nditer', ('reset', - """ - reset() - - Reset the iterator to its initial state. - - """)) - - - -############################################################################### -# -# broadcast -# -############################################################################### - -add_newdoc('numpy.core', 'broadcast', - """ - Produce an object that mimics broadcasting. - - Parameters - ---------- - in1, in2, ... : array_like - Input parameters. - - Returns - ------- - b : broadcast object - Broadcast the input parameters against one another, and - return an object that encapsulates the result. - Amongst others, it has ``shape`` and ``nd`` properties, and - may be used as an iterator. - - Examples - -------- - Manually adding two vectors, using broadcasting: - - >>> x = np.array([[1], [2], [3]]) - >>> y = np.array([4, 5, 6]) - >>> b = np.broadcast(x, y) - - >>> out = np.empty(b.shape) - >>> out.flat = [u+v for (u,v) in b] - >>> out - array([[ 5., 6., 7.], - [ 6., 7., 8.], - [ 7., 8., 9.]]) - - Compare against built-in broadcasting: - - >>> x + y - array([[5, 6, 7], - [6, 7, 8], - [7, 8, 9]]) - - """) - -# attributes - -add_newdoc('numpy.core', 'broadcast', ('index', - """ - current index in broadcasted result - - Examples - -------- - >>> x = np.array([[1], [2], [3]]) - >>> y = np.array([4, 5, 6]) - >>> b = np.broadcast(x, y) - >>> b.index - 0 - >>> b.next(), b.next(), b.next() - ((1, 4), (1, 5), (1, 6)) - >>> b.index - 3 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('iters', - """ - tuple of iterators along ``self``'s "components." - - Returns a tuple of `numpy.flatiter` objects, one for each "component" - of ``self``. - - See Also - -------- - numpy.flatiter - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> row, col = b.iters - >>> row.next(), col.next() - (1, 4) - - """)) - -add_newdoc('numpy.core', 'broadcast', ('nd', - """ - Number of dimensions of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.nd - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('numiter', - """ - Number of iterators possessed by the broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.numiter - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('shape', - """ - Shape of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.shape - (3, 3) - - """)) - -add_newdoc('numpy.core', 'broadcast', ('size', - """ - Total size of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.size - 9 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('reset', - """ - reset() - - Reset the broadcasted result's iterator(s). - - Parameters - ---------- - None - - Returns - ------- - None - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]] - >>> b = np.broadcast(x, y) - >>> b.index - 0 - >>> b.next(), b.next(), b.next() - ((1, 4), (2, 4), (3, 4)) - >>> b.index - 3 - >>> b.reset() - >>> b.index - 0 - - """)) - -############################################################################### -# -# numpy functions -# -############################################################################### - -add_newdoc('numpy.core.multiarray', 'array', - """ - array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0) - - Create an array. - - Parameters - ---------- - object : array_like - An array, any object exposing the array interface, an - object whose __array__ method returns an array, or any - (nested) sequence. - dtype : data-type, optional - The desired data-type for the array. If not given, then - the type will be determined as the minimum type required - to hold the objects in the sequence. This argument can only - be used to 'upcast' the array. For downcasting, use the - .astype(t) method. - copy : bool, optional - If true (default), then the object is copied. Otherwise, a copy - will only be made if __array__ returns a copy, if obj is a - nested sequence, or if a copy is needed to satisfy any of the other - requirements (`dtype`, `order`, etc.). - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). If order is 'A', then the returned array may - be in any order (either C-, Fortran-contiguous, or even - discontiguous). - subok : bool, optional - If True, then sub-classes will be passed-through, otherwise - the returned array will be forced to be a base-class array (default). - ndmin : int, optional - Specifies the minimum number of dimensions that the resulting - array should have. Ones will be pre-pended to the shape as - needed to meet this requirement. - - Returns - ------- - out : ndarray - An array object satisfying the specified requirements. - - See Also - -------- - empty, empty_like, zeros, zeros_like, ones, ones_like, fill - - Examples - -------- - >>> np.array([1, 2, 3]) - array([1, 2, 3]) - - Upcasting: - - >>> np.array([1, 2, 3.0]) - array([ 1., 2., 3.]) - - More than one dimension: - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - Minimum dimensions 2: - - >>> np.array([1, 2, 3], ndmin=2) - array([[1, 2, 3]]) - - Type provided: - - >>> np.array([1, 2, 3], dtype=complex) - array([ 1.+0.j, 2.+0.j, 3.+0.j]) - - Data-type consisting of more than one element: - - >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] - array([1, 3]) - - Creating an array from sub-classes: - - >>> np.array(np.mat('1 2; 3 4')) - array([[1, 2], - [3, 4]]) - - >>> np.array(np.mat('1 2; 3 4'), subok=True) - matrix([[1, 2], - [3, 4]]) - - """) - -add_newdoc('numpy.core.multiarray', 'empty', - """ - empty(shape, dtype=float, order='C') - - Return a new array of given shape and type, without initializing entries. - - Parameters - ---------- - shape : int or tuple of int - Shape of the empty array - dtype : data-type, optional - Desired output data-type. - order : {'C', 'F'}, optional - Whether to store multi-dimensional data in C (row-major) or - Fortran (column-major) order in memory. - - See Also - -------- - empty_like, zeros, ones - - Notes - ----- - `empty`, unlike `zeros`, does not set the array values to zero, - and may therefore be marginally faster. On the other hand, it requires - the user to manually set all the values in the array, and should be - used with caution. - - Examples - -------- - >>> np.empty([2, 2]) - array([[ -9.74499359e+001, 6.69583040e-309], - [ 2.13182611e-314, 3.06959433e-309]]) #random - - >>> np.empty([2, 2], dtype=int) - array([[-1073741821, -1067949133], - [ 496041986, 19249760]]) #random - - """) - -add_newdoc('numpy.core.multiarray', 'empty_like', - """ - empty_like(a, dtype=None, order='K', subok=True) - - Return a new array with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of the - returned array. - dtype : data-type, optional - Overrides the data type of the result. - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of ``a`` as closely - as possible. - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - - Returns - ------- - out : ndarray - Array of uninitialized (arbitrary) data with the same - shape and type as `a`. - - See Also - -------- - ones_like : Return an array of ones with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - empty : Return a new uninitialized array. - ones : Return a new array setting values to one. - zeros : Return a new array setting values to zero. - - Notes - ----- - This function does *not* initialize the returned array; to do that use - `zeros_like` or `ones_like` instead. It may be marginally faster than - the functions that do set the array values. - - Examples - -------- - >>> a = ([1,2,3], [4,5,6]) # a is array-like - >>> np.empty_like(a) - array([[-1073741821, -1073741821, 3], #random - [ 0, 0, -1073741821]]) - >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) - >>> np.empty_like(a) - array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random - [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) - - """) - - -add_newdoc('numpy.core.multiarray', 'scalar', - """ - scalar(dtype, obj) - - Return a new scalar array of the given type initialized with obj. - - This function is meant mainly for pickle support. `dtype` must be a - valid data-type descriptor. If `dtype` corresponds to an object - descriptor, then `obj` can be any object, otherwise `obj` must be a - string. If `obj` is not given, it will be interpreted as None for object - type and as zeros for all other types. - - """) - -add_newdoc('numpy.core.multiarray', 'zeros', - """ - zeros(shape, dtype=float, order='C') - - Return a new array of given shape and type, filled with zeros. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional - Whether to store multidimensional data in C- or Fortran-contiguous - (row- or column-wise) order in memory. - - Returns - ------- - out : ndarray - Array of zeros with the given shape, dtype, and order. - - See Also - -------- - zeros_like : Return an array of zeros with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - empty_like : Return an empty array with shape and type of input. - ones : Return a new array setting values to one. - empty : Return a new uninitialized array. - - Examples - -------- - >>> np.zeros(5) - array([ 0., 0., 0., 0., 0.]) - - >>> np.zeros((5,), dtype=numpy.int) - array([0, 0, 0, 0, 0]) - - >>> np.zeros((2, 1)) - array([[ 0.], - [ 0.]]) - - >>> s = (2,2) - >>> np.zeros(s) - array([[ 0., 0.], - [ 0., 0.]]) - - >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype - array([(0, 0), (0, 0)], - dtype=[('x', '>> np.count_nonzero(np.eye(4)) - 4 - - >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]]) - 5 - """) - -add_newdoc('numpy.core.multiarray','set_typeDict', - """set_typeDict(dict) - - Set the internal dictionary that can look up an array type using a - registered code. - - """) - -add_newdoc('numpy.core.multiarray', 'fromstring', - """ - fromstring(string, dtype=float, count=-1, sep='') - - A new 1-D array initialized from raw binary or text data in a string. - - Parameters - ---------- - string : str - A string containing the data. - dtype : data-type, optional - The data type of the array; default: float. For binary input data, - the data must be in exactly this format. - count : int, optional - Read this number of `dtype` elements from the data. If this is - negative (the default), the count will be determined from the - length of the data. - sep : str, optional - If not provided or, equivalently, the empty string, the data will - be interpreted as binary data; otherwise, as ASCII text with - decimal numbers. Also in this latter case, this argument is - interpreted as the string separating numbers in the data; extra - whitespace between elements is also ignored. - - Returns - ------- - arr : ndarray - The constructed array. - - Raises - ------ - ValueError - If the string is not the correct size to satisfy the requested - `dtype` and `count`. - - See Also - -------- - frombuffer, fromfile, fromiter - - Examples - -------- - >>> np.fromstring('\\x01\\x02', dtype=np.uint8) - array([1, 2], dtype=uint8) - >>> np.fromstring('1 2', dtype=int, sep=' ') - array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') - array([1, 2]) - >>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) - array([1, 2, 3], dtype=uint8) - - """) - -add_newdoc('numpy.core.multiarray', 'fromiter', - """ - fromiter(iterable, dtype, count=-1) - - Create a new 1-dimensional array from an iterable object. - - Parameters - ---------- - iterable : iterable object - An iterable object providing data for the array. - dtype : data-type - The data-type of the returned array. - count : int, optional - The number of items to read from *iterable*. The default is -1, - which means all data is read. - - Returns - ------- - out : ndarray - The output array. - - Notes - ----- - Specify `count` to improve performance. It allows ``fromiter`` to - pre-allocate the output array, instead of resizing it on demand. - - Examples - -------- - >>> iterable = (x*x for x in range(5)) - >>> np.fromiter(iterable, np.float) - array([ 0., 1., 4., 9., 16.]) - - """) - -add_newdoc('numpy.core.multiarray', 'fromfile', - """ - fromfile(file, dtype=float, count=-1, sep='') - - Construct an array from data in a text or binary file. - - A highly efficient way of reading binary data with a known data-type, - as well as parsing simply formatted text files. Data written using the - `tofile` method can be read using this function. - - Parameters - ---------- - file : file or str - Open file object or filename. - dtype : data-type - Data type of the returned array. - For binary files, it is used to determine the size and byte-order - of the items in the file. - count : int - Number of items to read. ``-1`` means all items (i.e., the complete - file). - sep : str - Separator between items if file is a text file. - Empty ("") separator means the file should be treated as binary. - Spaces (" ") in the separator match zero or more whitespace characters. - A separator consisting only of spaces must match at least one - whitespace. - - See also - -------- - load, save - ndarray.tofile - loadtxt : More flexible way of loading data from a text file. - - Notes - ----- - Do not rely on the combination of `tofile` and `fromfile` for - data storage, as the binary files generated are are not platform - independent. In particular, no byte-order or data-type information is - saved. Data can be stored in the platform independent ``.npy`` format - using `save` and `load` instead. - - Examples - -------- - Construct an ndarray: - - >>> dt = np.dtype([('time', [('min', int), ('sec', int)]), - ... ('temp', float)]) - >>> x = np.zeros((1,), dtype=dt) - >>> x['time']['min'] = 10; x['temp'] = 98.25 - >>> x - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> import os - >>> fname = os.tmpnam() - >>> x.tofile(fname) - - Read the raw data from disk: - - >>> np.fromfile(fname, dtype=dt) - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> np.save(fname, x) - >>> np.load(fname + '.npy') - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> dt = np.dtype(int) - >>> dt = dt.newbyteorder('>') - >>> np.frombuffer(buf, dtype=dt) - - The data of the resulting array will not be byteswapped, but will be - interpreted correctly. - - Examples - -------- - >>> s = 'hello world' - >>> np.frombuffer(s, dtype='S1', count=5, offset=6) - array(['w', 'o', 'r', 'l', 'd'], - dtype='|S1') - - """) - -add_newdoc('numpy.core.multiarray', 'concatenate', - """ - concatenate((a1, a2, ...), axis=0) - - Join a sequence of arrays together. - - Parameters - ---------- - a1, a2, ... : sequence of array_like - The arrays must have the same shape, except in the dimension - corresponding to `axis` (the first, by default). - axis : int, optional - The axis along which the arrays will be joined. Default is 0. - - Returns - ------- - res : ndarray - The concatenated array. - - See Also - -------- - ma.concatenate : Concatenate function that preserves input masks. - array_split : Split an array into multiple sub-arrays of equal or - near-equal size. - split : Split array into a list of multiple sub-arrays of equal size. - hsplit : Split array into multiple sub-arrays horizontally (column wise) - vsplit : Split array into multiple sub-arrays vertically (row wise) - dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). - hstack : Stack arrays in sequence horizontally (column wise) - vstack : Stack arrays in sequence vertically (row wise) - dstack : Stack arrays in sequence depth wise (along third dimension) - - Notes - ----- - When one or more of the arrays to be concatenated is a MaskedArray, - this function will return a MaskedArray object instead of an ndarray, - but the input masks are *not* preserved. In cases where a MaskedArray - is expected as input, use the ma.concatenate function from the masked - array module instead. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> b = np.array([[5, 6]]) - >>> np.concatenate((a, b), axis=0) - array([[1, 2], - [3, 4], - [5, 6]]) - >>> np.concatenate((a, b.T), axis=1) - array([[1, 2, 5], - [3, 4, 6]]) - - This function will not preserve masking of MaskedArray inputs. - - >>> a = np.ma.arange(3) - >>> a[1] = np.ma.masked - >>> b = np.arange(2, 5) - >>> a - masked_array(data = [0 -- 2], - mask = [False True False], - fill_value = 999999) - >>> b - array([2, 3, 4]) - >>> np.concatenate([a, b]) - masked_array(data = [0 1 2 2 3 4], - mask = False, - fill_value = 999999) - >>> np.ma.concatenate([a, b]) - masked_array(data = [0 -- 2 2 3 4], - mask = [False True False False False False], - fill_value = 999999) - - """) - -add_newdoc('numpy.core', 'inner', - """ - inner(a, b) - - Inner product of two arrays. - - Ordinary inner product of vectors for 1-D arrays (without complex - conjugation), in higher dimensions a sum product over the last axes. - - Parameters - ---------- - a, b : array_like - If `a` and `b` are nonscalar, their last dimensions of must match. - - Returns - ------- - out : ndarray - `out.shape = a.shape[:-1] + b.shape[:-1]` - - Raises - ------ - ValueError - If the last dimension of `a` and `b` has different size. - - See Also - -------- - tensordot : Sum products over arbitrary axes. - dot : Generalised matrix product, using second last dimension of `b`. - einsum : Einstein summation convention. - - Notes - ----- - For vectors (1-D arrays) it computes the ordinary inner-product:: - - np.inner(a, b) = sum(a[:]*b[:]) - - More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: - - np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) - - or explicitly:: - - np.inner(a, b)[i0,...,ir-1,j0,...,js-1] - = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) - - In addition `a` or `b` may be scalars, in which case:: - - np.inner(a,b) = a*b - - Examples - -------- - Ordinary inner product for vectors: - - >>> a = np.array([1,2,3]) - >>> b = np.array([0,1,0]) - >>> np.inner(a, b) - 2 - - A multidimensional example: - - >>> a = np.arange(24).reshape((2,3,4)) - >>> b = np.arange(4) - >>> np.inner(a, b) - array([[ 14, 38, 62], - [ 86, 110, 134]]) - - An example where `b` is a scalar: - - >>> np.inner(np.eye(2), 7) - array([[ 7., 0.], - [ 0., 7.]]) - - """) - -add_newdoc('numpy.core','fastCopyAndTranspose', - """_fastCopyAndTranspose(a)""") - -add_newdoc('numpy.core.multiarray','correlate', - """cross_correlate(a,v, mode=0)""") - -add_newdoc('numpy.core.multiarray', 'arange', - """ - arange([start,] stop[, step,], dtype=None) - - Return evenly spaced values within a given interval. - - Values are generated within the half-open interval ``[start, stop)`` - (in other words, the interval including `start` but excluding `stop`). - For integer arguments the function is equivalent to the Python built-in - `range `_ function, - but returns a ndarray rather than a list. - - When using a non-integer step, such as 0.1, the results will often not - be consistent. It is better to use ``linspace`` for these cases. - - Parameters - ---------- - start : number, optional - Start of interval. The interval includes this value. The default - start value is 0. - stop : number - End of interval. The interval does not include this value, except - in some cases where `step` is not an integer and floating point - round-off affects the length of `out`. - step : number, optional - Spacing between values. For any output `out`, this is the distance - between two adjacent values, ``out[i+1] - out[i]``. The default - step size is 1. If `step` is specified, `start` must also be given. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - - Returns - ------- - out : ndarray - Array of evenly spaced values. - - For floating point arguments, the length of the result is - ``ceil((stop - start)/step)``. Because of floating point overflow, - this rule may result in the last element of `out` being greater - than `stop`. - - See Also - -------- - linspace : Evenly spaced numbers with careful handling of endpoints. - ogrid: Arrays of evenly spaced numbers in N-dimensions - mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions - - Examples - -------- - >>> np.arange(3) - array([0, 1, 2]) - >>> np.arange(3.0) - array([ 0., 1., 2.]) - >>> np.arange(3,7) - array([3, 4, 5, 6]) - >>> np.arange(3,7,2) - array([3, 5]) - - """) - -add_newdoc('numpy.core.multiarray','_get_ndarray_c_version', - """_get_ndarray_c_version() - - Return the compile time NDARRAY_VERSION number. - - """) - -add_newdoc('numpy.core.multiarray','_reconstruct', - """_reconstruct(subtype, shape, dtype) - - Construct an empty array. Used by Pickles. - - """) - - -add_newdoc('numpy.core.multiarray', 'set_string_function', - """ - set_string_function(f, repr=1) - - Internal method to set a function to be used when pretty printing arrays. - - """) - -add_newdoc('numpy.core.multiarray', 'set_numeric_ops', - """ - set_numeric_ops(op1=func1, op2=func2, ...) - - Set numerical operators for array objects. - - Parameters - ---------- - op1, op2, ... : callable - Each ``op = func`` pair describes an operator to be replaced. - For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace - addition by modulus 5 addition. - - Returns - ------- - saved_ops : list of callables - A list of all operators, stored before making replacements. - - Notes - ----- - .. WARNING:: - Use with care! Incorrect usage may lead to memory errors. - - A function replacing an operator cannot make use of that operator. - For example, when replacing add, you may not use ``+``. Instead, - directly call ufuncs. - - Examples - -------- - >>> def add_mod5(x, y): - ... return np.add(x, y) % 5 - ... - >>> old_funcs = np.set_numeric_ops(add=add_mod5) - - >>> x = np.arange(12).reshape((3, 4)) - >>> x + x - array([[0, 2, 4, 1], - [3, 0, 2, 4], - [1, 3, 0, 2]]) - - >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators - - """) - -add_newdoc('numpy.core.multiarray', 'where', - """ - where(condition, [x, y]) - - Return elements, either from `x` or `y`, depending on `condition`. - - If only `condition` is given, return ``condition.nonzero()``. - - Parameters - ---------- - condition : array_like, bool - When True, yield `x`, otherwise yield `y`. - x, y : array_like, optional - Values from which to choose. `x` and `y` need to have the same - shape as `condition`. - - Returns - ------- - out : ndarray or tuple of ndarrays - If both `x` and `y` are specified, the output array contains - elements of `x` where `condition` is True, and elements from - `y` elsewhere. - - If only `condition` is given, return the tuple - ``condition.nonzero()``, the indices where `condition` is True. - - See Also - -------- - nonzero, choose - - Notes - ----- - If `x` and `y` are given and input arrays are 1-D, `where` is - equivalent to:: - - [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] - - Examples - -------- - >>> np.where([[True, False], [True, True]], - ... [[1, 2], [3, 4]], - ... [[9, 8], [7, 6]]) - array([[1, 8], - [3, 4]]) - - >>> np.where([[0, 1], [1, 0]]) - (array([0, 1]), array([1, 0])) - - >>> x = np.arange(9.).reshape(3, 3) - >>> np.where( x > 5 ) - (array([2, 2, 2]), array([0, 1, 2])) - >>> x[np.where( x > 3.0 )] # Note: result is 1D. - array([ 4., 5., 6., 7., 8.]) - >>> np.where(x < 5, x, -1) # Note: broadcasting. - array([[ 0., 1., 2.], - [ 3., 4., -1.], - [-1., -1., -1.]]) - - """) - - -add_newdoc('numpy.core.multiarray', 'lexsort', - """ - lexsort(keys, axis=-1) - - Perform an indirect sort using a sequence of keys. - - Given multiple sorting keys, which can be interpreted as columns in a - spreadsheet, lexsort returns an array of integer indices that describes - the sort order by multiple columns. The last key in the sequence is used - for the primary sort order, the second-to-last key for the secondary sort - order, and so on. The keys argument must be a sequence of objects that - can be converted to arrays of the same shape. If a 2D array is provided - for the keys argument, it's rows are interpreted as the sorting keys and - sorting is according to the last row, second last row etc. - - Parameters - ---------- - keys : (k,N) array or tuple containing k (N,)-shaped sequences - The `k` different "columns" to be sorted. The last column (or row if - `keys` is a 2D array) is the primary sort key. - axis : int, optional - Axis to be indirectly sorted. By default, sort over the last axis. - - Returns - ------- - indices : (N,) ndarray of ints - Array of indices that sort the keys along the specified axis. - - See Also - -------- - argsort : Indirect sort. - ndarray.sort : In-place sort. - sort : Return a sorted copy of an array. - - Examples - -------- - Sort names: first by surname, then by name. - - >>> surnames = ('Hertz', 'Galilei', 'Hertz') - >>> first_names = ('Heinrich', 'Galileo', 'Gustav') - >>> ind = np.lexsort((first_names, surnames)) - >>> ind - array([1, 2, 0]) - - >>> [surnames[i] + ", " + first_names[i] for i in ind] - ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] - - Sort two columns of numbers: - - >>> a = [1,5,1,4,3,4,4] # First column - >>> b = [9,4,0,4,0,2,1] # Second column - >>> ind = np.lexsort((b,a)) # Sort by a, then by b - >>> print ind - [2 0 4 6 5 3 1] - - >>> [(a[i],b[i]) for i in ind] - [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] - - Note that sorting is first according to the elements of ``a``. - Secondary sorting is according to the elements of ``b``. - - A normal ``argsort`` would have yielded: - - >>> [(a[i],b[i]) for i in np.argsort(a)] - [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] - - Structured arrays are sorted lexically by ``argsort``: - - >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], - ... dtype=np.dtype([('x', int), ('y', int)])) - - >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) - array([2, 0, 4, 6, 5, 3, 1]) - - """) - -add_newdoc('numpy.core.multiarray', 'can_cast', - """ - can_cast(from, totype, casting = 'safe') - - Returns True if cast between data types can occur according to the - casting rule. If from is a scalar or array scalar, also returns - True if the scalar value can be cast without overflow or truncation - to an integer. - - Parameters - ---------- - from : dtype, dtype specifier, scalar, or array - Data type, scalar, or array to cast from. - totype : dtype or dtype specifier - Data type to cast to. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Returns - ------- - out : bool - True if cast can occur according to the casting rule. - - See also - -------- - dtype, result_type - - Examples - -------- - - Basic examples - - >>> np.can_cast(np.int32, np.int64) - True - >>> np.can_cast(np.float64, np.complex) - True - >>> np.can_cast(np.complex, np.float) - False - - >>> np.can_cast('i8', 'f8') - True - >>> np.can_cast('i8', 'f4') - False - >>> np.can_cast('i4', 'S4') - True - - Casting scalars - - >>> np.can_cast(100, 'i1') - True - >>> np.can_cast(150, 'i1') - False - >>> np.can_cast(150, 'u1') - True - - >>> np.can_cast(3.5e100, np.float32) - False - >>> np.can_cast(1000.0, np.float32) - True - - Array scalar checks the value, array does not - - >>> np.can_cast(np.array(1000.0), np.float32) - True - >>> np.can_cast(np.array([1000.0]), np.float32) - False - - Using the casting rules - - >>> np.can_cast('i8', 'i8', 'no') - True - >>> np.can_cast('i8', 'no') - False - - >>> np.can_cast('i8', 'equiv') - True - >>> np.can_cast('i8', 'equiv') - False - - >>> np.can_cast('i8', 'safe') - True - >>> np.can_cast('i4', 'safe') - False - - >>> np.can_cast('i4', 'same_kind') - True - >>> np.can_cast('u4', 'same_kind') - False - - >>> np.can_cast('u4', 'unsafe') - True - - """) - -add_newdoc('numpy.core.multiarray', 'promote_types', - """ - promote_types(type1, type2) - - Returns the data type with the smallest size and smallest scalar - kind to which both ``type1`` and ``type2`` may be safely cast. - The returned data type is always in native byte order. - - This function is symmetric and associative. - - Parameters - ---------- - type1 : dtype or dtype specifier - First data type. - type2 : dtype or dtype specifier - Second data type. - - Returns - ------- - out : dtype - The promoted data type. - - Notes - ----- - .. versionadded:: 1.6.0 - - See Also - -------- - result_type, dtype, can_cast - - Examples - -------- - >>> np.promote_types('f4', 'f8') - dtype('float64') - - >>> np.promote_types('i8', 'f4') - dtype('float64') - - >>> np.promote_types('>i8', '>> np.promote_types('i1', 'S8') - Traceback (most recent call last): - File "", line 1, in - TypeError: invalid type promotion - - """) - -add_newdoc('numpy.core.multiarray', 'min_scalar_type', - """ - min_scalar_type(a) - - For scalar ``a``, returns the data type with the smallest size - and smallest scalar kind which can hold its value. For non-scalar - array ``a``, returns the vector's dtype unmodified. - - Floating point values are not demoted to integers, - and complex values are not demoted to floats. - - Parameters - ---------- - a : scalar or array_like - The value whose minimal data type is to be found. - - Returns - ------- - out : dtype - The minimal data type. - - Notes - ----- - .. versionadded:: 1.6.0 - - See Also - -------- - result_type, promote_types, dtype, can_cast - - Examples - -------- - >>> np.min_scalar_type(10) - dtype('uint8') - - >>> np.min_scalar_type(-260) - dtype('int16') - - >>> np.min_scalar_type(3.1) - dtype('float16') - - >>> np.min_scalar_type(1e50) - dtype('float64') - - >>> np.min_scalar_type(np.arange(4,dtype='f8')) - dtype('float64') - - """) - -add_newdoc('numpy.core.multiarray', 'result_type', - """ - result_type(*arrays_and_dtypes) - - Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. - - Parameters - ---------- - arrays_and_dtypes : list of arrays and dtypes - The operands of some operation whose result type is needed. - - Returns - ------- - out : dtype - The result type. - - See also - -------- - dtype, promote_types, min_scalar_type, can_cast - - Notes - ----- - .. versionadded:: 1.6.0 - - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each array, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - - Examples - -------- - >>> np.result_type(3, np.arange(7, dtype='i1')) - dtype('int8') - - >>> np.result_type('i4', 'c8') - dtype('complex128') - - >>> np.result_type(3.0, -2) - dtype('float64') - - """) - -add_newdoc('numpy.core.multiarray','newbuffer', - """newbuffer(size) - - Return a new uninitialized buffer object of size bytes - - """) - -add_newdoc('numpy.core.multiarray', 'getbuffer', - """ - getbuffer(obj [,offset[, size]]) - - Create a buffer object from the given object referencing a slice of - length size starting at offset. - - Default is the entire buffer. A read-write buffer is attempted followed - by a read-only buffer. - - Parameters - ---------- - obj : object - - offset : int, optional - - size : int, optional - - Returns - ------- - buffer_obj : buffer - - Examples - -------- - >>> buf = np.getbuffer(np.ones(5), 1, 3) - >>> len(buf) - 3 - >>> buf[0] - '\\x00' - >>> buf - - - """) - -add_newdoc('numpy.core', 'dot', - """ - dot(a, b, out=None) - - Dot product of two arrays. - - For 2-D arrays it is equivalent to matrix multiplication, and for 1-D - arrays to inner product of vectors (without complex conjugation). For - N dimensions it is a sum product over the last axis of `a` and - the second-to-last of `b`:: - - dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) - - Parameters - ---------- - a : array_like - First argument. - b : array_like - Second argument. - out : ndarray, optional - Output argument. This must have the exact kind that would be returned - if it was not used. In particular, it must have the right type, must be - C-contiguous, and its dtype must be the dtype that would be returned - for `dot(a,b)`. This is a performance feature. Therefore, if these - conditions are not met, an exception is raised, instead of attempting - to be flexible. - - Returns - ------- - output : ndarray - Returns the dot product of `a` and `b`. If `a` and `b` are both - scalars or both 1-D arrays then a scalar is returned; otherwise - an array is returned. - If `out` is given, then it is returned. - - Raises - ------ - ValueError - If the last dimension of `a` is not the same size as - the second-to-last dimension of `b`. - - See Also - -------- - vdot : Complex-conjugating dot product. - tensordot : Sum products over arbitrary axes. - einsum : Einstein summation convention. - - Examples - -------- - >>> np.dot(3, 4) - 12 - - Neither argument is complex-conjugated: - - >>> np.dot([2j, 3j], [2j, 3j]) - (-13+0j) - - For 2-D arrays it's the matrix product: - - >>> a = [[1, 0], [0, 1]] - >>> b = [[4, 1], [2, 2]] - >>> np.dot(a, b) - array([[4, 1], - [2, 2]]) - - >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) - >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) - >>> np.dot(a, b)[2,3,2,1,2,2] - 499128 - >>> sum(a[2,3,2,:] * b[1,2,:,2]) - 499128 - - """) - -add_newdoc('numpy.core', 'einsum', - """ - einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe') - - Evaluates the Einstein summation convention on the operands. - - Using the Einstein summation convention, many common multi-dimensional - array operations can be represented in a simple fashion. This function - provides a way compute such summations. The best way to understand this - function is to try the examples below, which show how many common NumPy - functions can be implemented as calls to `einsum`. - - Parameters - ---------- - subscripts : str - Specifies the subscripts for summation. - operands : list of array_like - These are the arrays for the operation. - out : ndarray, optional - If provided, the calculation is done into this array. - dtype : data-type, optional - If provided, forces the calculation to use the data type specified. - Note that you may have to also give a more liberal `casting` - parameter to allow the conversions. - order : {'C', 'F', 'A', or 'K'}, optional - Controls the memory layout of the output. 'C' means it should - be C contiguous. 'F' means it should be Fortran contiguous, - 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. - 'K' means it should be as close to the layout as the inputs as - is possible, including arbitrarily permuted axes. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Setting this to - 'unsafe' is not recommended, as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Returns - ------- - output : ndarray - The calculation based on the Einstein summation convention. - - See Also - -------- - dot, inner, outer, tensordot - - Notes - ----- - .. versionadded:: 1.6.0 - - The subscripts string is a comma-separated list of subscript labels, - where each label refers to a dimension of the corresponding operand. - Repeated subscripts labels in one operand take the diagonal. For example, - ``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``. - - Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)`` - is equivalent to ``np.inner(a,b)``. If a label appears only once, - it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` - with no changes. - - The order of labels in the output is by default alphabetical. This - means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while - ``np.einsum('ji', a)`` takes its transpose. - - The output can be controlled by specifying output subscript labels - as well. This specifies the label order, and allows summing to - be disallowed or forced when desired. The call ``np.einsum('i->', a)`` - is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)`` - is like ``np.diag(a)``. The difference is that `einsum` does not - allow broadcasting by default. - - To enable and control broadcasting, use an ellipsis. Default - NumPy-style broadcasting is done by adding an ellipsis - to the left of each term, like ``np.einsum('...ii->...i', a)``. - To take the trace along the first and last axes, - you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix - product with the left-most indices instead of rightmost, you can do - ``np.einsum('ij...,jk...->ik...', a, b)``. - - When there is only one operand, no axes are summed, and no output - parameter is provided, a view into the operand is returned instead - of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` - produces a view. - - An alternative way to provide the subscripts and operands is as - ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples - below have corresponding `einsum` calls with the two parameter methods. - - Examples - -------- - >>> a = np.arange(25).reshape(5,5) - >>> b = np.arange(5) - >>> c = np.arange(6).reshape(2,3) - - >>> np.einsum('ii', a) - 60 - >>> np.einsum(a, [0,0]) - 60 - >>> np.trace(a) - 60 - - >>> np.einsum('ii->i', a) - array([ 0, 6, 12, 18, 24]) - >>> np.einsum(a, [0,0], [0]) - array([ 0, 6, 12, 18, 24]) - >>> np.diag(a) - array([ 0, 6, 12, 18, 24]) - - >>> np.einsum('ij,j', a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum(a, [0,1], b, [1]) - array([ 30, 80, 130, 180, 230]) - >>> np.dot(a, b) - array([ 30, 80, 130, 180, 230]) - - >>> np.einsum('ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum(c, [1,0]) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> c.T - array([[0, 3], - [1, 4], - [2, 5]]) - - >>> np.einsum('..., ...', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.multiply(3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - - >>> np.einsum('i,i', b, b) - 30 - >>> np.einsum(b, [0], b, [0]) - 30 - >>> np.inner(b,b) - 30 - - >>> np.einsum('i,j', np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.einsum(np.arange(2)+1, [0], b, [1]) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.outer(np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - - >>> np.einsum('i...->...', a) - array([50, 55, 60, 65, 70]) - >>> np.einsum(a, [0,Ellipsis], [Ellipsis]) - array([50, 55, 60, 65, 70]) - >>> np.sum(a, axis=0) - array([50, 55, 60, 65, 70]) - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> np.einsum('ijk,jil->kl', a, b) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> np.tensordot(a,b, axes=([1,0],[0,1])) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - - """) - -add_newdoc('numpy.core', 'alterdot', - """ - Change `dot`, `vdot`, and `innerproduct` to use accelerated BLAS functions. - - Typically, as a user of Numpy, you do not explicitly call this function. If - Numpy is built with an accelerated BLAS, this function is automatically - called when Numpy is imported. - - When Numpy is built with an accelerated BLAS like ATLAS, these functions - are replaced to make use of the faster implementations. The faster - implementations only affect float32, float64, complex64, and complex128 - arrays. Furthermore, the BLAS API only includes matrix-matrix, - matrix-vector, and vector-vector products. Products of arrays with larger - dimensionalities use the built in functions and are not accelerated. - - See Also - -------- - restoredot : `restoredot` undoes the effects of `alterdot`. - - """) - -add_newdoc('numpy.core', 'restoredot', - """ - Restore `dot`, `vdot`, and `innerproduct` to the default non-BLAS - implementations. - - Typically, the user will only need to call this when troubleshooting and - installation problem, reproducing the conditions of a build without an - accelerated BLAS, or when being very careful about benchmarking linear - algebra operations. - - See Also - -------- - alterdot : `restoredot` undoes the effects of `alterdot`. - - """) - -add_newdoc('numpy.core', 'vdot', - """ - Return the dot product of two vectors. - - The vdot(`a`, `b`) function handles complex numbers differently than - dot(`a`, `b`). If the first argument is complex the complex conjugate - of the first argument is used for the calculation of the dot product. - - Note that `vdot` handles multidimensional arrays differently than `dot`: - it does *not* perform a matrix product, but flattens input arguments - to 1-D vectors first. Consequently, it should only be used for vectors. - - Parameters - ---------- - a : array_like - If `a` is complex the complex conjugate is taken before calculation - of the dot product. - b : array_like - Second argument to the dot product. - - Returns - ------- - output : ndarray - Dot product of `a` and `b`. Can be an int, float, or - complex depending on the types of `a` and `b`. - - See Also - -------- - dot : Return the dot product without using the complex conjugate of the - first argument. - - Examples - -------- - >>> a = np.array([1+2j,3+4j]) - >>> b = np.array([5+6j,7+8j]) - >>> np.vdot(a, b) - (70-8j) - >>> np.vdot(b, a) - (70+8j) - - Note that higher-dimensional arrays are flattened! - - >>> a = np.array([[1, 4], [5, 6]]) - >>> b = np.array([[4, 1], [2, 2]]) - >>> np.vdot(a, b) - 30 - >>> np.vdot(b, a) - 30 - >>> 1*4 + 4*1 + 5*2 + 6*2 - 30 - - """) - - -############################################################################## -# -# Documentation for ndarray attributes and methods -# -############################################################################## - - -############################################################################## -# -# ndarray object -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', - """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) - - An array object represents a multidimensional, homogeneous array - of fixed-size items. An associated data-type object describes the - format of each element in the array (its byte-order, how many bytes it - occupies in memory, whether it is an integer, a floating point number, - or something else, etc.) - - Arrays should be constructed using `array`, `zeros` or `empty` (refer - to the See Also section below). The parameters given here refer to - a low-level method (`ndarray(...)`) for instantiating an array. - - For more information, refer to the `numpy` module and examine the - the methods and attributes of an array. - - Parameters - ---------- - (for the __new__ method; see Notes below) - - shape : tuple of ints - Shape of created array. - dtype : data-type, optional - Any object that can be interpreted as a numpy data type. - buffer : object exposing buffer interface, optional - Used to fill the array with data. - offset : int, optional - Offset of array data in buffer. - strides : tuple of ints, optional - Strides of data in memory. - order : {'C', 'F'}, optional - Row-major or column-major order. - - Attributes - ---------- - T : ndarray - Transpose of the array. - data : buffer - The array's elements, in memory. - dtype : dtype object - Describes the format of the elements in the array. - flags : dict - Dictionary containing information related to memory use, e.g., - 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. - flat : numpy.flatiter object - Flattened version of the array as an iterator. The iterator - allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for - assignment examples; TODO). - imag : ndarray - Imaginary part of the array. - real : ndarray - Real part of the array. - size : int - Number of elements in the array. - itemsize : int - The memory use of each array element in bytes. - nbytes : int - The total number of bytes required to store the array data, - i.e., ``itemsize * size``. - ndim : int - The array's number of dimensions. - shape : tuple of ints - Shape of the array. - strides : tuple of ints - The step-size required to move from one element to the next in - memory. For example, a contiguous ``(3, 4)`` array of type - ``int16`` in C-order has strides ``(8, 2)``. This implies that - to move from element to element in memory requires jumps of 2 bytes. - To move from row-to-row, one needs to jump 8 bytes at a time - (``2 * 4``). - ctypes : ctypes object - Class containing properties of the array needed for interaction - with ctypes. - base : ndarray - If the array is a view into another array, that array is its `base` - (unless that array is also a view). The `base` array is where the - array data is actually stored. - - See Also - -------- - array : Construct an array. - zeros : Create an array, each element of which is zero. - empty : Create an array, but leave its allocated memory unchanged (i.e., - it contains "garbage"). - dtype : Create a data-type. - - Notes - ----- - There are two modes of creating an array using ``__new__``: - - 1. If `buffer` is None, then only `shape`, `dtype`, and `order` - are used. - 2. If `buffer` is an object exposing the buffer interface, then - all keywords are interpreted. - - No ``__init__`` method is needed because the array is fully initialized - after the ``__new__`` method. - - Examples - -------- - These examples illustrate the low-level `ndarray` constructor. Refer - to the `See Also` section above for easier ways of constructing an - ndarray. - - First mode, `buffer` is None: - - >>> np.ndarray(shape=(2,2), dtype=float, order='F') - array([[ -1.13698227e+002, 4.25087011e-303], - [ 2.88528414e-306, 3.27025015e-309]]) #random - - Second mode: - - >>> np.ndarray((2,), buffer=np.array([1,2,3]), - ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element - array([2, 3]) - - """) - - -############################################################################## -# -# ndarray attributes -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', - """Array protocol: Python side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', - """None.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', - """Array priority.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', - """Array protocol: C-struct side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_', - """Allow the array to be interpreted as a ctypes object by returning the - data-memory location as an integer - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('base', - """ - Base object if memory is from some other object. - - Examples - -------- - The base of an array that owns its memory is None: - - >>> x = np.array([1,2,3,4]) - >>> x.base is None - True - - Slicing creates a view, whose memory is shared with x: - - >>> y = x[2:] - >>> y.base is x - True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', - """ - An object to simplify the interaction of the array with the ctypes - module. - - This attribute creates an object that makes it easier to use arrays - when calling shared libraries with the ctypes module. The returned - object has, among others, data, shape, and strides attributes (see - Notes below) which themselves return ctypes objects that can be used - as arguments to a shared library. - - Parameters - ---------- - None - - Returns - ------- - c : Python object - Possessing attributes data, shape, strides, etc. - - See Also - -------- - numpy.ctypeslib - - Notes - ----- - Below are the public attributes of this object which were documented - in "Guide to NumPy" (we have omitted undocumented public attributes, - as well as documented private attributes): - - * data: A pointer to the memory area of the array as a Python integer. - This memory area may contain data that is not aligned, or not in correct - byte-order. The memory area may not even be writeable. The array - flags and data-type of this array should be respected when passing this - attribute to arbitrary C-code to avoid trouble that can include Python - crashing. User Beware! The value of this attribute is exactly the same - as self._array_interface_['data'][0]. - - * shape (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the C-integer corresponding to dtype('p') on this - platform. This base-type could be c_int, c_long, or c_longlong - depending on the platform. The c_intp type is defined accordingly in - numpy.ctypeslib. The ctypes array contains the shape of the underlying - array. - - * strides (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the same as for the shape attribute. This ctypes array - contains the strides information from the underlying array. This strides - information is important for showing how many bytes must be jumped to - get to the next element in the array. - - * data_as(obj): Return the data pointer cast to a particular c-types object. - For example, calling self._as_parameter_ is equivalent to - self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a - pointer to a ctypes array of floating-point data: - self.data_as(ctypes.POINTER(ctypes.c_double)). - - * shape_as(obj): Return the shape tuple as an array of some other c-types - type. For example: self.shape_as(ctypes.c_short). - - * strides_as(obj): Return the strides tuple as an array of some other - c-types type. For example: self.strides_as(ctypes.c_longlong). - - Be careful using the ctypes attribute - especially on temporary - arrays or arrays constructed on the fly. For example, calling - ``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory - that is invalid because the array created as (a+b) is deallocated - before the next Python statement. You can avoid this problem using - either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will - hold a reference to the array until ct is deleted or re-assigned. - - If the ctypes module is not available, then the ctypes attribute - of array objects still returns something useful, but ctypes objects - are not returned and errors may be raised instead. In particular, - the object will still have the as parameter attribute which will - return an integer equal to the data attribute. - - Examples - -------- - >>> import ctypes - >>> x - array([[0, 1], - [2, 3]]) - >>> x.ctypes.data - 30439712 - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) - - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents - c_long(0) - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents - c_longlong(4294967296L) - >>> x.ctypes.shape - - >>> x.ctypes.shape_as(ctypes.c_long) - - >>> x.ctypes.strides - - >>> x.ctypes.strides_as(ctypes.c_longlong) - - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('data', - """Python buffer object pointing to the start of the array's data.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', - """ - Data-type of the array's elements. - - Parameters - ---------- - None - - Returns - ------- - d : numpy dtype object - - See Also - -------- - numpy.dtype - - Examples - -------- - >>> x - array([[0, 1], - [2, 3]]) - >>> x.dtype - dtype('int32') - >>> type(x.dtype) - - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', - """ - The imaginary part of the array. - - Examples - -------- - >>> x = np.sqrt([1+0j, 0+1j]) - >>> x.imag - array([ 0. , 0.70710678]) - >>> x.imag.dtype - dtype('float64') - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', - """ - Length of one array element in bytes. - - Examples - -------- - >>> x = np.array([1,2,3], dtype=np.float64) - >>> x.itemsize - 8 - >>> x = np.array([1,2,3], dtype=np.complex128) - >>> x.itemsize - 16 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', - """ - Information about the memory layout of the array. - - Attributes - ---------- - C_CONTIGUOUS (C) - The data is in a single, C-style contiguous segment. - F_CONTIGUOUS (F) - The data is in a single, Fortran-style contiguous segment. - OWNDATA (O) - The array owns the memory it uses or borrows it from another object. - WRITEABLE (W) - The data area can be written to. Setting this to False locks - the data, making it read-only. A view (slice, etc.) inherits WRITEABLE - from its base array at creation time, but a view of a writeable - array may be subsequently locked while the base array remains writeable. - (The opposite is not true, in that a view of a locked array may not - be made writeable. However, currently, locking a base object does not - lock any views that already reference it, so under that circumstance it - is possible to alter the contents of a locked array via a previously - created writeable view onto it.) Attempting to change a non-writeable - array raises a RuntimeError exception. - ALIGNED (A) - The data and strides are aligned appropriately for the hardware. - UPDATEIFCOPY (U) - This array is a copy of some other array. When this array is - deallocated, the base array will be updated with the contents of - this array. - - FNC - F_CONTIGUOUS and not C_CONTIGUOUS. - FORC - F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). - BEHAVED (B) - ALIGNED and WRITEABLE. - CARRAY (CA) - BEHAVED and C_CONTIGUOUS. - FARRAY (FA) - BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. - - Notes - ----- - The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), - or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag - names are only supported in dictionary access. - - Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by - the user, via direct assignment to the attribute or dictionary entry, - or by calling `ndarray.setflags`. - - The array flags cannot be set arbitrarily: - - - UPDATEIFCOPY can only be set ``False``. - - ALIGNED can only be set ``True`` if the data is truly aligned. - - WRITEABLE can only be set ``True`` if the array owns its own memory - or the ultimate owner of the memory exposes a writeable buffer - interface or is a string. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', - """ - A 1-D iterator over the array. - - This is a `numpy.flatiter` instance, which acts similarly to, but is not - a subclass of, Python's built-in iterator object. - - See Also - -------- - flatten : Return a copy of the array collapsed into one dimension. - - flatiter - - Examples - -------- - >>> x = np.arange(1, 7).reshape(2, 3) - >>> x - array([[1, 2, 3], - [4, 5, 6]]) - >>> x.flat[3] - 4 - >>> x.T - array([[1, 4], - [2, 5], - [3, 6]]) - >>> x.T.flat[3] - 5 - >>> type(x.flat) - - - An assignment example: - - >>> x.flat = 3; x - array([[3, 3, 3], - [3, 3, 3]]) - >>> x.flat[[1,4]] = 1; x - array([[3, 1, 3], - [3, 1, 3]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', - """ - Total bytes consumed by the elements of the array. - - Notes - ----- - Does not include memory consumed by non-element attributes of the - array object. - - Examples - -------- - >>> x = np.zeros((3,5,2), dtype=np.complex128) - >>> x.nbytes - 480 - >>> np.prod(x.shape) * x.itemsize - 480 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', - """ - Number of array dimensions. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> x.ndim - 1 - >>> y = np.zeros((2, 3, 4)) - >>> y.ndim - 3 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('real', - """ - The real part of the array. - - Examples - -------- - >>> x = np.sqrt([1+0j, 0+1j]) - >>> x.real - array([ 1. , 0.70710678]) - >>> x.real.dtype - dtype('float64') - - See Also - -------- - numpy.real : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', - """ - Tuple of array dimensions. - - Notes - ----- - May be used to "reshape" the array, as long as this would not - require a change in the total number of elements - - Examples - -------- - >>> x = np.array([1, 2, 3, 4]) - >>> x.shape - (4,) - >>> y = np.zeros((2, 3, 4)) - >>> y.shape - (2, 3, 4) - >>> y.shape = (3, 8) - >>> y - array([[ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.]]) - >>> y.shape = (3, 6) - Traceback (most recent call last): - File "", line 1, in - ValueError: total size of new array must be unchanged - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('size', - """ - Number of elements in the array. - - Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's - dimensions. - - Examples - -------- - >>> x = np.zeros((3, 5, 2), dtype=np.complex128) - >>> x.size - 30 - >>> np.prod(x.shape) - 30 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', - """ - Tuple of bytes to step in each dimension when traversing an array. - - The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` - is:: - - offset = sum(np.array(i) * a.strides) - - A more detailed explanation of strides can be found in the - "ndarray.rst" file in the NumPy reference guide. - - Notes - ----- - Imagine an array of 32-bit integers (each 4 bytes):: - - x = np.array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]], dtype=np.int32) - - This array is stored in memory as 40 bytes, one after the other - (known as a contiguous block of memory). The strides of an array tell - us how many bytes we have to skip in memory to move to the next position - along a certain axis. For example, we have to skip 4 bytes (1 value) to - move to the next column, but 20 bytes (5 values) to get to the same - position in the next row. As such, the strides for the array `x` will be - ``(20, 4)``. - - See Also - -------- - numpy.lib.stride_tricks.as_strided - - Examples - -------- - >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) - >>> y - array([[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23]]]) - >>> y.strides - (48, 16, 4) - >>> y[1,1,1] - 17 - >>> offset=sum(y.strides * np.array((1,1,1))) - >>> offset/y.itemsize - 17 - - >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) - >>> x.strides - (32, 4, 224, 1344) - >>> i = np.array([3,5,2,2]) - >>> offset = sum(i * x.strides) - >>> x[3,5,2,2] - 813 - >>> offset / x.itemsize - 813 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('T', - """ - Same as self.transpose(), except that self is returned if - self.ndim < 2. - - Examples - -------- - >>> x = np.array([[1.,2.],[3.,4.]]) - >>> x - array([[ 1., 2.], - [ 3., 4.]]) - >>> x.T - array([[ 1., 3.], - [ 2., 4.]]) - >>> x = np.array([1.,2.,3.,4.]) - >>> x - array([ 1., 2., 3., 4.]) - >>> x.T - array([ 1., 2., 3., 4.]) - - """)) - - -############################################################################## -# -# ndarray methods -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', - """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. - - Returns either a new reference to self if dtype is not given or a new array - of provided data type if dtype is different from the current dtype of the - array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', - """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', - """a.__array_wrap__(obj) -> Object of same type as ndarray object a. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', - """a.__copy__([order]) - - Return a copy of the array. - - Parameters - ---------- - order : {'C', 'F', 'A'}, optional - If order is 'C' (False) then the result is contiguous (default). - If order is 'Fortran' (True) then the result has fortran order. - If order is 'Any' (None) then the result has fortran order - only if the array already is in fortran order. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', - """a.__deepcopy__() -> Deep copy of array. - - Used if copy.deepcopy is called on an array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', - """a.__reduce__() - - For pickling. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', - """a.__setstate__(version, shape, dtype, isfortran, rawdata) - - For unpickling. - - Parameters - ---------- - version : int - optional pickle version. If omitted defaults to 0. - shape : tuple - dtype : data-type - isFortran : bool - rawdata : string or list - a binary string with the data (or a list if 'a' is an object array) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('all', - """ - a.all(axis=None, out=None) - - Returns True if all elements evaluate to True. - - Refer to `numpy.all` for full documentation. - - See Also - -------- - numpy.all : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('any', - """ - a.any(axis=None, out=None) - - Returns True if any of the elements of `a` evaluate to True. - - Refer to `numpy.any` for full documentation. - - See Also - -------- - numpy.any : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', - """ - a.argmax(axis=None, out=None) - - Return indices of the maximum values along the given axis. - - Refer to `numpy.argmax` for full documentation. - - See Also - -------- - numpy.argmax : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', - """ - a.argmin(axis=None, out=None) - - Return indices of the minimum values along the given axis of `a`. - - Refer to `numpy.argmin` for detailed documentation. - - See Also - -------- - numpy.argmin : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', - """ - a.argsort(axis=-1, kind='quicksort', order=None) - - Returns the indices that would sort this array. - - Refer to `numpy.argsort` for full documentation. - - See Also - -------- - numpy.argsort : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', - """ - a.astype(t) - - Copy of the array, cast to a specified type. - - Parameters - ---------- - t : str or dtype - Typecode or data-type to which the array is cast. - - Raises - ------ - ComplexWarning : - When casting from complex to float or int. To avoid this, - one should use ``a.real.astype(t)``. - - Examples - -------- - >>> x = np.array([1, 2, 2.5]) - >>> x - array([ 1. , 2. , 2.5]) - - >>> x.astype(int) - array([1, 2, 2]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', - """ - a.byteswap(inplace) - - Swap the bytes of the array elements - - Toggle between low-endian and big-endian data representation by - returning a byteswapped array, optionally swapped in-place. - - Parameters - ---------- - inplace: bool, optional - If ``True``, swap bytes in-place, default is ``False``. - - Returns - ------- - out: ndarray - The byteswapped array. If `inplace` is ``True``, this is - a view to self. - - Examples - -------- - >>> A = np.array([1, 256, 8755], dtype=np.int16) - >>> map(hex, A) - ['0x1', '0x100', '0x2233'] - >>> A.byteswap(True) - array([ 256, 1, 13090], dtype=int16) - >>> map(hex, A) - ['0x100', '0x1', '0x3322'] - - Arrays of strings are not swapped - - >>> A = np.array(['ceg', 'fac']) - >>> A.byteswap() - array(['ceg', 'fac'], - dtype='|S3') - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', - """ - a.choose(choices, out=None, mode='raise') - - Use an index array to construct a new array from a set of choices. - - Refer to `numpy.choose` for full documentation. - - See Also - -------- - numpy.choose : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', - """ - a.clip(a_min, a_max, out=None) - - Return an array whose values are limited to ``[a_min, a_max]``. - - Refer to `numpy.clip` for full documentation. - - See Also - -------- - numpy.clip : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', - """ - a.compress(condition, axis=None, out=None) - - Return selected slices of this array along given axis. - - Refer to `numpy.compress` for full documentation. - - See Also - -------- - numpy.compress : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', - """ - a.conj() - - Complex-conjugate all elements. - - Refer to `numpy.conjugate` for full documentation. - - See Also - -------- - numpy.conjugate : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', - """ - a.conjugate() - - Return the complex conjugate, element-wise. - - Refer to `numpy.conjugate` for full documentation. - - See Also - -------- - numpy.conjugate : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', - """ - a.copy(order='C') - - Return a copy of the array. - - Parameters - ---------- - order : {'C', 'F', 'A'}, optional - By default, the result is stored in C-contiguous (row-major) order in - memory. If `order` is `F`, the result has 'Fortran' (column-major) - order. If order is 'A' ('Any'), then the result has the same order - as the input. - - Examples - -------- - >>> x = np.array([[1,2,3],[4,5,6]], order='F') - - >>> y = x.copy() - - >>> x.fill(0) - - >>> x - array([[0, 0, 0], - [0, 0, 0]]) - - >>> y - array([[1, 2, 3], - [4, 5, 6]]) - - >>> y.flags['C_CONTIGUOUS'] - True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', - """ - a.cumprod(axis=None, dtype=None, out=None) - - Return the cumulative product of the elements along the given axis. - - Refer to `numpy.cumprod` for full documentation. - - See Also - -------- - numpy.cumprod : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', - """ - a.cumsum(axis=None, dtype=None, out=None) - - Return the cumulative sum of the elements along the given axis. - - Refer to `numpy.cumsum` for full documentation. - - See Also - -------- - numpy.cumsum : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', - """ - a.diagonal(offset=0, axis1=0, axis2=1) - - Return specified diagonals. - - Refer to `numpy.diagonal` for full documentation. - - See Also - -------- - numpy.diagonal : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', - """ - a.dot(b, out=None) - - Dot product of two arrays. - - Refer to `numpy.dot` for full documentation. - - See Also - -------- - numpy.dot : equivalent function - - Examples - -------- - >>> a = np.eye(2) - >>> b = np.ones((2, 2)) * 2 - >>> a.dot(b) - array([[ 2., 2.], - [ 2., 2.]]) - - This array method can be conveniently chained: - - >>> a.dot(b).dot(b) - array([[ 8., 8.], - [ 8., 8.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', - """a.dump(file) - - Dump a pickle of the array to the specified file. - The array can be read back with pickle.load or numpy.load. - - Parameters - ---------- - file : str - A string naming the dump file. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', - """ - a.dumps() - - Returns the pickle of the array as a string. - pickle.loads or numpy.loads will convert the string back to an array. - - Parameters - ---------- - None - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', - """ - a.fill(value) - - Fill the array with a scalar value. - - Parameters - ---------- - value : scalar - All elements of `a` will be assigned this value. - - Examples - -------- - >>> a = np.array([1, 2]) - >>> a.fill(0) - >>> a - array([0, 0]) - >>> a = np.empty(2) - >>> a.fill(1) - >>> a - array([ 1., 1.]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', - """ - a.flatten(order='C') - - Return a copy of the array collapsed into one dimension. - - Parameters - ---------- - order : {'C', 'F', 'A'}, optional - Whether to flatten in C (row-major), Fortran (column-major) order, - or preserve the C/Fortran ordering from `a`. - The default is 'C'. - - Returns - ------- - y : ndarray - A copy of the input array, flattened to one dimension. - - See Also - -------- - ravel : Return a flattened array. - flat : A 1-D flat iterator over the array. - - Examples - -------- - >>> a = np.array([[1,2], [3,4]]) - >>> a.flatten() - array([1, 2, 3, 4]) - >>> a.flatten('F') - array([1, 3, 2, 4]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', - """ - a.getfield(dtype, offset) - - Returns a field of the given array as a certain type. - - A field is a view of the array data with each itemsize determined - by the given type and the offset into the current array, i.e. from - ``offset * dtype.itemsize`` to ``(offset+1) * dtype.itemsize``. - - Parameters - ---------- - dtype : str - String denoting the data type of the field. - offset : int - Number of `dtype.itemsize`'s to skip before beginning the element view. - - Examples - -------- - >>> x = np.diag([1.+1.j]*2) - >>> x - array([[ 1.+1.j, 0.+0.j], - [ 0.+0.j, 1.+1.j]]) - >>> x.dtype - dtype('complex128') - - >>> x.getfield('complex64', 0) # Note how this != x - array([[ 0.+1.875j, 0.+0.j ], - [ 0.+0.j , 0.+1.875j]], dtype=complex64) - - >>> x.getfield('complex64',1) # Note how different this is than x - array([[ 0. +5.87173204e-39j, 0. +0.00000000e+00j], - [ 0. +0.00000000e+00j, 0. +5.87173204e-39j]], dtype=complex64) - - >>> x.getfield('complex128', 0) # == x - array([[ 1.+1.j, 0.+0.j], - [ 0.+0.j, 1.+1.j]]) - - If the argument dtype is the same as x.dtype, then offset != 0 raises - a ValueError: - - >>> x.getfield('complex128', 1) - Traceback (most recent call last): - File "", line 1, in - ValueError: Need 0 <= offset <= 0 for requested type but received offset = 1 - - >>> x.getfield('float64', 0) - array([[ 1., 0.], - [ 0., 1.]]) - - >>> x.getfield('float64', 1) - array([[ 1.77658241e-307, 0.00000000e+000], - [ 0.00000000e+000, 1.77658241e-307]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('item', - """ - a.item(*args) - - Copy an element of an array to a standard Python scalar and return it. - - Parameters - ---------- - \\*args : Arguments (variable number and type) - - * none: in this case, the method only works for arrays - with one element (`a.size == 1`), which element is - copied into a standard Python scalar object and returned. - - * int_type: this argument is interpreted as a flat index into - the array, specifying which element to copy and return. - - * tuple of int_types: functions as does a single int_type argument, - except that the argument is interpreted as an nd-index into the - array. - - Returns - ------- - z : Standard Python scalar object - A copy of the specified element of the array as a suitable - Python scalar - - Notes - ----- - When the data type of `a` is longdouble or clongdouble, item() returns - a scalar array object because there is no available Python scalar that - would not lose information. Void arrays return a buffer object for item(), - unless fields are defined, in which case a tuple is returned. - - `item` is very similar to a[args], except, instead of an array scalar, - a standard Python scalar is returned. This can be useful for speeding up - access to elements of the array and doing arithmetic on elements of the - array using Python's optimized math. - - Examples - -------- - >>> x = np.random.randint(9, size=(3, 3)) - >>> x - array([[3, 1, 7], - [2, 8, 3], - [8, 5, 3]]) - >>> x.item(3) - 2 - >>> x.item(7) - 5 - >>> x.item((0, 1)) - 1 - >>> x.item((2, 2)) - 3 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', - """ - a.itemset(*args) - - Insert scalar into an array (scalar is cast to array's dtype, if possible) - - There must be at least 1 argument, and define the last argument - as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster - than ``a[args] = item``. The item should be a scalar value and `args` - must select a single item in the array `a`. - - Parameters - ---------- - \*args : Arguments - If one argument: a scalar, only used in case `a` is of size 1. - If two arguments: the last argument is the value to be set - and must be a scalar, the first argument specifies a single array - element location. It is either an int or a tuple. - - Notes - ----- - Compared to indexing syntax, `itemset` provides some speed increase - for placing a scalar into a particular location in an `ndarray`, - if you must do this. However, generally this is discouraged: - among other problems, it complicates the appearance of the code. - Also, when using `itemset` (and `item`) inside a loop, be sure - to assign the methods to a local variable to avoid the attribute - look-up at each loop iteration. - - Examples - -------- - >>> x = np.random.randint(9, size=(3, 3)) - >>> x - array([[3, 1, 7], - [2, 8, 3], - [8, 5, 3]]) - >>> x.itemset(4, 0) - >>> x.itemset((2, 2), 9) - >>> x - array([[3, 1, 7], - [2, 0, 3], - [8, 5, 9]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setasflat', - """ - a.setasflat(arr) - - Equivalent to a.flat = arr.flat, but is generally more efficient. - This function does not check for overlap, so if ``arr`` and ``a`` - are viewing the same data with different strides, the results will - be unpredictable. - - Parameters - ---------- - arr : array_like - The array to copy into a. - - Examples - -------- - >>> a = np.arange(2*4).reshape(2,4)[:,:-1]; a - array([[0, 1, 2], - [4, 5, 6]]) - >>> b = np.arange(3*3, dtype='f4').reshape(3,3).T[::-1,:-1]; b - array([[ 2., 5.], - [ 1., 4.], - [ 0., 3.]], dtype=float32) - >>> a.setasflat(b) - >>> a - array([[2, 5, 1], - [4, 0, 3]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('max', - """ - a.max(axis=None, out=None) - - Return the maximum along a given axis. - - Refer to `numpy.amax` for full documentation. - - See Also - -------- - numpy.amax : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', - """ - a.mean(axis=None, dtype=None, out=None) - - Returns the average of the array elements along given axis. - - Refer to `numpy.mean` for full documentation. - - See Also - -------- - numpy.mean : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('min', - """ - a.min(axis=None, out=None) - - Return the minimum along a given axis. - - Refer to `numpy.amin` for full documentation. - - See Also - -------- - numpy.amin : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', - """ - arr.newbyteorder(new_order='S') - - Return the array with the same data viewed with a different byte order. - - Equivalent to:: - - arr.view(arr.dtype.newbytorder(new_order)) - - Changes are also made in all fields and sub-arrays of the array data - type. - - - - Parameters - ---------- - new_order : string, optional - Byte order to force; a value from the byte order specifications - above. `new_order` codes can be any of:: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - The default value ('S') results in swapping the current - byte order. The code does a case-insensitive check on the first - letter of `new_order` for the alternatives above. For example, - any of 'B' or 'b' or 'biggish' are valid to specify big-endian. - - - Returns - ------- - new_arr : array - New array object with the dtype reflecting given change to the - byte order. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', - """ - a.nonzero() - - Return the indices of the elements that are non-zero. - - Refer to `numpy.nonzero` for full documentation. - - See Also - -------- - numpy.nonzero : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', - """ - a.prod(axis=None, dtype=None, out=None) - - Return the product of the array elements over the given axis - - Refer to `numpy.prod` for full documentation. - - See Also - -------- - numpy.prod : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', - """ - a.ptp(axis=None, out=None) - - Peak to peak (maximum - minimum) value along a given axis. - - Refer to `numpy.ptp` for full documentation. - - See Also - -------- - numpy.ptp : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('put', - """ - a.put(indices, values, mode='raise') - - Set ``a.flat[n] = values[n]`` for all `n` in indices. - - Refer to `numpy.put` for full documentation. - - See Also - -------- - numpy.put : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'putmask', - """ - putmask(a, mask, values) - - Changes elements of an array based on conditional and input values. - - Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. - - If `values` is not the same size as `a` and `mask` then it will repeat. - This gives behavior different from ``a[mask] = values``. - - Parameters - ---------- - a : array_like - Target array. - mask : array_like - Boolean mask array. It has to be the same shape as `a`. - values : array_like - Values to put into `a` where `mask` is True. If `values` is smaller - than `a` it will be repeated. - - See Also - -------- - place, put, take - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> np.putmask(x, x>2, x**2) - >>> x - array([[ 0, 1, 2], - [ 9, 16, 25]]) - - If `values` is smaller than `a` it is repeated: - - >>> x = np.arange(5) - >>> np.putmask(x, x>1, [-33, -44]) - >>> x - array([ 0, 1, -33, -44, -33]) - - """) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', - """ - a.ravel([order]) - - Return a flattened array. - - Refer to `numpy.ravel` for full documentation. - - See Also - -------- - numpy.ravel : equivalent function - - ndarray.flat : a flat iterator on the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', - """ - a.repeat(repeats, axis=None) - - Repeat elements of an array. - - Refer to `numpy.repeat` for full documentation. - - See Also - -------- - numpy.repeat : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', - """ - a.reshape(shape, order='C') - - Returns an array containing the same data with a new shape. - - Refer to `numpy.reshape` for full documentation. - - See Also - -------- - numpy.reshape : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', - """ - a.resize(new_shape, refcheck=True) - - Change shape and size of array in-place. - - Parameters - ---------- - new_shape : tuple of ints, or `n` ints - Shape of resized array. - refcheck : bool, optional - If False, reference count will not be checked. Default is True. - - Returns - ------- - None - - Raises - ------ - ValueError - If `a` does not own its own data or references or views to it exist, - and the data memory must be changed. - - SystemError - If the `order` keyword argument is specified. This behaviour is a - bug in NumPy. - - See Also - -------- - resize : Return a new array with the specified shape. - - Notes - ----- - This reallocates space for the data area if necessary. - - Only contiguous arrays (data elements consecutive in memory) can be - resized. - - The purpose of the reference count check is to make sure you - do not use this array as a buffer for another Python object and then - reallocate the memory. However, reference counts can increase in - other ways so if you are sure that you have not shared the memory - for this array with another Python object, then you may safely set - `refcheck` to False. - - Examples - -------- - Shrinking an array: array is flattened (in the order that the data are - stored in memory), resized, and reshaped: - - >>> a = np.array([[0, 1], [2, 3]], order='C') - >>> a.resize((2, 1)) - >>> a - array([[0], - [1]]) - - >>> a = np.array([[0, 1], [2, 3]], order='F') - >>> a.resize((2, 1)) - >>> a - array([[0], - [2]]) - - Enlarging an array: as above, but missing entries are filled with zeros: - - >>> b = np.array([[0, 1], [2, 3]]) - >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple - >>> b - array([[0, 1, 2], - [3, 0, 0]]) - - Referencing an array prevents resizing... - - >>> c = a - >>> a.resize((1, 1)) - Traceback (most recent call last): - ... - ValueError: cannot resize an array that has been referenced ... - - Unless `refcheck` is False: - - >>> a.resize((1, 1), refcheck=False) - >>> a - array([[0]]) - >>> c - array([[0]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('round', - """ - a.round(decimals=0, out=None) - - Return `a` with each element rounded to the given number of decimals. - - Refer to `numpy.around` for full documentation. - - See Also - -------- - numpy.around : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', - """ - a.searchsorted(v, side='left') - - Find indices where elements of v should be inserted in a to maintain order. - - For full documentation, see `numpy.searchsorted` - - See Also - -------- - numpy.searchsorted : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', - """ - a.setfield(val, dtype, offset=0) - - Put a value into a specified place in a field defined by a data-type. - - Place `val` into `a`'s field defined by `dtype` and beginning `offset` - bytes into the field. - - Parameters - ---------- - val : object - Value to be placed in field. - dtype : dtype object - Data-type of the field in which to place `val`. - offset : int, optional - The number of bytes into the field at which to place `val`. - - Returns - ------- - None - - See Also - -------- - getfield - - Examples - -------- - >>> x = np.eye(3) - >>> x.getfield(np.float64) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - >>> x.setfield(3, np.int32) - >>> x.getfield(np.int32) - array([[3, 3, 3], - [3, 3, 3], - [3, 3, 3]]) - >>> x - array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323], - [ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323], - [ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]]) - >>> x.setfield(np.eye(3), np.int32) - >>> x - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', - """ - a.setflags(write=None, align=None, uic=None) - - Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively. - - These Boolean-valued flags affect how numpy interprets the memory - area used by `a` (see Notes below). The ALIGNED flag can only - be set to True if the data is actually aligned according to the type. - The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE - can only be set to True if the array owns its own memory, or the - ultimate owner of the memory exposes a writeable buffer interface, - or is a string. (The exception for string is made so that unpickling - can be done without copying memory.) - - Parameters - ---------- - write : bool, optional - Describes whether or not `a` can be written to. - align : bool, optional - Describes whether or not `a` is aligned properly for its type. - uic : bool, optional - Describes whether or not `a` is a copy of another "base" array. - - Notes - ----- - Array flags provide information about how the memory area used - for the array is to be interpreted. There are 6 Boolean flags - in use, only three of which can be changed by the user: - UPDATEIFCOPY, WRITEABLE, and ALIGNED. - - WRITEABLE (W) the data area can be written to; - - ALIGNED (A) the data and strides are aligned appropriately for the hardware - (as determined by the compiler); - - UPDATEIFCOPY (U) this array is a copy of some other array (referenced - by .base). When this array is deallocated, the base array will be - updated with the contents of this array. - - All flags can be accessed using their first (upper case) letter as well - as the full name. - - Examples - -------- - >>> y - array([[3, 1, 7], - [2, 0, 0], - [8, 5, 9]]) - >>> y.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : True - WRITEABLE : True - ALIGNED : True - UPDATEIFCOPY : False - >>> y.setflags(write=0, align=0) - >>> y.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : True - WRITEABLE : False - ALIGNED : False - UPDATEIFCOPY : False - >>> y.setflags(uic=1) - Traceback (most recent call last): - File "", line 1, in - ValueError: cannot set UPDATEIFCOPY flag to True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', - """ - a.sort(axis=-1, kind='quicksort', order=None) - - Sort an array, in-place. - - Parameters - ---------- - axis : int, optional - Axis along which to sort. Default is -1, which means sort along the - last axis. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. Default is 'quicksort'. - order : list, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - See Also - -------- - numpy.sort : Return a sorted copy of an array. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in sorted array. - - Notes - ----- - See ``sort`` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = np.array([[1,4], [3,1]]) - >>> a.sort(axis=1) - >>> a - array([[1, 4], - [1, 3]]) - >>> a.sort(axis=0) - >>> a - array([[1, 3], - [1, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) - >>> a.sort(order='y') - >>> a - array([('c', 1), ('a', 2)], - dtype=[('x', '|S1'), ('y', '>> a = np.array([1, 2]) - >>> a.tolist() - [1, 2] - >>> a = np.array([[1, 2], [3, 4]]) - >>> list(a) - [array([1, 2]), array([3, 4])] - >>> a.tolist() - [[1, 2], [3, 4]] - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', - """ - a.tostring(order='C') - - Construct a Python string containing the raw data bytes in the array. - - Constructs a Python string showing a copy of the raw contents of - data memory. The string can be produced in either 'C' or 'Fortran', - or 'Any' order (the default is 'C'-order). 'Any' order means C-order - unless the F_CONTIGUOUS flag in the array is set, in which case it - means 'Fortran' order. - - Parameters - ---------- - order : {'C', 'F', None}, optional - Order of the data for multidimensional arrays: - C, Fortran, or the same as for the original array. - - Returns - ------- - s : str - A Python string exhibiting a copy of `a`'s raw data. - - Examples - -------- - >>> x = np.array([[0, 1], [2, 3]]) - >>> x.tostring() - '\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' - >>> x.tostring('C') == x.tostring() - True - >>> x.tostring('F') - '\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', - """ - a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) - - Return the sum along diagonals of the array. - - Refer to `numpy.trace` for full documentation. - - See Also - -------- - numpy.trace : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', - """ - a.transpose(*axes) - - Returns a view of the array with axes transposed. - - For a 1-D array, this has no effect. (To change between column and - row vectors, first cast the 1-D array into a matrix object.) - For a 2-D array, this is the usual matrix transpose. - For an n-D array, if axes are given, their order indicates how the - axes are permuted (see Examples). If axes are not provided and - ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then - ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. - - Parameters - ---------- - axes : None, tuple of ints, or `n` ints - - * None or no argument: reverses the order of the axes. - - * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s - `i`-th axis becomes `a.transpose()`'s `j`-th axis. - - * `n` ints: same as an n-tuple of the same ints (this form is - intended simply as a "convenience" alternative to the tuple form) - - Returns - ------- - out : ndarray - View of `a`, with axes suitably permuted. - - See Also - -------- - ndarray.T : Array property returning the array transposed. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> a - array([[1, 2], - [3, 4]]) - >>> a.transpose() - array([[1, 3], - [2, 4]]) - >>> a.transpose((1, 0)) - array([[1, 3], - [2, 4]]) - >>> a.transpose(1, 0) - array([[1, 3], - [2, 4]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('var', - """ - a.var(axis=None, dtype=None, out=None, ddof=0) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('view', - """ - a.view(dtype=None, type=None) - - New view of array with the same data. - - Parameters - ---------- - dtype : data-type, optional - Data-type descriptor of the returned view, e.g., float32 or int16. - The default, None, results in the view having the same data-type - as `a`. - type : Python type, optional - Type of the returned view, e.g., ndarray or matrix. Again, the - default None results in type preservation. - - Notes - ----- - ``a.view()`` is used two different ways: - - ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view - of the array's memory with a different data-type. This can cause a - reinterpretation of the bytes of memory. - - ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just - returns an instance of `ndarray_subclass` that looks at the same array - (same shape, dtype, etc.) This does not cause a reinterpretation of the - memory. - - - Examples - -------- - >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) - - Viewing array data using a different type and dtype: - - >>> y = x.view(dtype=np.int16, type=np.matrix) - >>> y - matrix([[513]], dtype=int16) - >>> print type(y) - - - Creating a view on a structured array so it can be used in calculations - - >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) - >>> xv = x.view(dtype=np.int8).reshape(-1,2) - >>> xv - array([[1, 2], - [3, 4]], dtype=int8) - >>> xv.mean(0) - array([ 2., 3.]) - - Making changes to the view changes the underlying array - - >>> xv[0,1] = 20 - >>> print x - [(1, 20) (3, 4)] - - Using a view to convert an array to a record array: - - >>> z = x.view(np.recarray) - >>> z.a - array([1], dtype=int8) - - Views share data: - - >>> x[0] = (9, 10) - >>> z[0] - (9, 10) - - """)) - - -############################################################################## -# -# umath functions -# -############################################################################## - -add_newdoc('numpy.core.umath', 'frexp', - """ - Return normalized fraction and exponent of 2 of input array, element-wise. - - Returns (`out1`, `out2`) from equation ``x` = out1 * 2**out2``. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - (out1, out2) : tuple of ndarrays, (float, int) - `out1` is a float array with values between -1 and 1. - `out2` is an int array which represent the exponent of 2. - - See Also - -------- - ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`. - - Notes - ----- - Complex dtypes are not supported, they will raise a TypeError. - - Examples - -------- - >>> x = np.arange(9) - >>> y1, y2 = np.frexp(x) - >>> y1 - array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875, - 0.5 ]) - >>> y2 - array([0, 1, 2, 2, 3, 3, 3, 3, 4]) - >>> y1 * 2**y2 - array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.]) - - """) - -add_newdoc('numpy.core.umath', 'frompyfunc', - """ - frompyfunc(func, nin, nout) - - Takes an arbitrary Python function and returns a Numpy ufunc. - - Can be used, for example, to add broadcasting to a built-in Python - function (see Examples section). - - Parameters - ---------- - func : Python function object - An arbitrary Python function. - nin : int - The number of input arguments. - nout : int - The number of objects returned by `func`. - - Returns - ------- - out : ufunc - Returns a Numpy universal function (``ufunc``) object. - - Notes - ----- - The returned ufunc always returns PyObject arrays. - - Examples - -------- - Use frompyfunc to add broadcasting to the Python function ``oct``: - - >>> oct_array = np.frompyfunc(oct, 1, 1) - >>> oct_array(np.array((10, 30, 100))) - array([012, 036, 0144], dtype=object) - >>> np.array((oct(10), oct(30), oct(100))) # for comparison - array(['012', '036', '0144'], - dtype='|S4') - - """) - -add_newdoc('numpy.core.umath', 'ldexp', - """ - Compute y = x1 * 2**x2. - - Parameters - ---------- - x1 : array_like - The array of multipliers. - x2 : array_like - The array of exponents. - - Returns - ------- - y : array_like - The output array, the result of ``x1 * 2**x2``. - - See Also - -------- - frexp : Return (y1, y2) from ``x = y1 * 2**y2``, the inverse of `ldexp`. - - Notes - ----- - Complex dtypes are not supported, they will raise a TypeError. - - `ldexp` is useful as the inverse of `frexp`, if used by itself it is - more clear to simply use the expression ``x1 * 2**x2``. - - Examples - -------- - >>> np.ldexp(5, np.arange(4)) - array([ 5., 10., 20., 40.], dtype=float32) - - >>> x = np.arange(6) - >>> np.ldexp(*np.frexp(x)) - array([ 0., 1., 2., 3., 4., 5.]) - - """) - -add_newdoc('numpy.core.umath', 'geterrobj', - """ - geterrobj() - - Return the current object that defines floating-point error handling. - - The error object contains all information that defines the error handling - behavior in Numpy. `geterrobj` is used internally by the other - functions that get and set error handling behavior (`geterr`, `seterr`, - `geterrcall`, `seterrcall`). - - Returns - ------- - errobj : list - The error object, a list containing three elements: - [internal numpy buffer size, error mask, error callback function]. - - The error mask is a single integer that holds the treatment information - on all four floating point errors. The information for each error type - is contained in three bits of the integer. If we print it in base 8, we - can see what treatment is set for "invalid", "under", "over", and - "divide" (in that order). The printed string can be interpreted with - - * 0 : 'ignore' - * 1 : 'warn' - * 2 : 'raise' - * 3 : 'call' - * 4 : 'print' - * 5 : 'log' - - See Also - -------- - seterrobj, seterr, geterr, seterrcall, geterrcall - getbufsize, setbufsize - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> np.geterrobj() # first get the defaults - [10000, 0, None] - - >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) - ... - >>> old_bufsize = np.setbufsize(20000) - >>> old_err = np.seterr(divide='raise') - >>> old_handler = np.seterrcall(err_handler) - >>> np.geterrobj() - [20000, 2, ] - - >>> old_err = np.seterr(all='ignore') - >>> np.base_repr(np.geterrobj()[1], 8) - '0' - >>> old_err = np.seterr(divide='warn', over='log', under='call', - invalid='print') - >>> np.base_repr(np.geterrobj()[1], 8) - '4351' - - """) - -add_newdoc('numpy.core.umath', 'seterrobj', - """ - seterrobj(errobj) - - Set the object that defines floating-point error handling. - - The error object contains all information that defines the error handling - behavior in Numpy. `seterrobj` is used internally by the other - functions that set error handling behavior (`seterr`, `seterrcall`). - - Parameters - ---------- - errobj : list - The error object, a list containing three elements: - [internal numpy buffer size, error mask, error callback function]. - - The error mask is a single integer that holds the treatment information - on all four floating point errors. The information for each error type - is contained in three bits of the integer. If we print it in base 8, we - can see what treatment is set for "invalid", "under", "over", and - "divide" (in that order). The printed string can be interpreted with - - * 0 : 'ignore' - * 1 : 'warn' - * 2 : 'raise' - * 3 : 'call' - * 4 : 'print' - * 5 : 'log' - - See Also - -------- - geterrobj, seterr, geterr, seterrcall, geterrcall - getbufsize, setbufsize - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> old_errobj = np.geterrobj() # first get the defaults - >>> old_errobj - [10000, 0, None] - - >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) - ... - >>> new_errobj = [20000, 12, err_handler] - >>> np.seterrobj(new_errobj) - >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') - '14' - >>> np.geterr() - {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} - >>> np.geterrcall() is err_handler - True - - """) - - -############################################################################## -# -# lib._compiled_base functions -# -############################################################################## - -add_newdoc('numpy.lib._compiled_base', 'digitize', - """ - digitize(x, bins) - - Return the indices of the bins to which each value in input array belongs. - - Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if - `bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if - `bins` is monotonically decreasing. If values in `x` are beyond the - bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. - - Parameters - ---------- - x : array_like - Input array to be binned. It has to be 1-dimensional. - bins : array_like - Array of bins. It has to be 1-dimensional and monotonic. - - Returns - ------- - out : ndarray of ints - Output array of indices, of same shape as `x`. - - Raises - ------ - ValueError - If the input is not 1-dimensional, or if `bins` is not monotonic. - TypeError - If the type of the input is complex. - - See Also - -------- - bincount, histogram, unique - - Notes - ----- - If values in `x` are such that they fall outside the bin range, - attempting to index `bins` with the indices that `digitize` returns - will result in an IndexError. - - Examples - -------- - >>> x = np.array([0.2, 6.4, 3.0, 1.6]) - >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) - >>> inds = np.digitize(x, bins) - >>> inds - array([1, 4, 3, 2]) - >>> for n in range(x.size): - ... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]] - ... - 0.0 <= 0.2 < 1.0 - 4.0 <= 6.4 < 10.0 - 2.5 <= 3.0 < 4.0 - 1.0 <= 1.6 < 2.5 - - """) - -add_newdoc('numpy.lib._compiled_base', 'bincount', - """ - bincount(x, weights=None, minlength=None) - - Count number of occurrences of each value in array of non-negative ints. - - The number of bins (of size 1) is one larger than the largest value in - `x`. If `minlength` is specified, there will be at least this number - of bins in the output array (though it will be longer if necessary, - depending on the contents of `x`). - Each bin gives the number of occurrences of its index value in `x`. - If `weights` is specified the input array is weighted by it, i.e. if a - value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead - of ``out[n] += 1``. - - Parameters - ---------- - x : array_like, 1 dimension, nonnegative ints - Input array. - weights : array_like, optional - Weights, array of the same shape as `x`. - minlength : int, optional - .. versionadded:: 1.6.0 - - A minimum number of bins for the output array. - - Returns - ------- - out : ndarray of ints - The result of binning the input array. - The length of `out` is equal to ``np.amax(x)+1``. - - Raises - ------ - ValueError - If the input is not 1-dimensional, or contains elements with negative - values, or if `minlength` is non-positive. - TypeError - If the type of the input is float or complex. - - See Also - -------- - histogram, digitize, unique - - Examples - -------- - >>> np.bincount(np.arange(5)) - array([1, 1, 1, 1, 1]) - >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) - array([1, 3, 1, 1, 0, 0, 0, 1]) - - >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) - >>> np.bincount(x).size == np.amax(x)+1 - True - - The input array needs to be of integer dtype, otherwise a - TypeError is raised: - - >>> np.bincount(np.arange(5, dtype=np.float)) - Traceback (most recent call last): - File "", line 1, in - TypeError: array cannot be safely cast to required type - - A possible use of ``bincount`` is to perform sums over - variable-size chunks of an array, using the ``weights`` keyword. - - >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights - >>> x = np.array([0, 1, 1, 2, 2, 2]) - >>> np.bincount(x, weights=w) - array([ 0.3, 0.7, 1.1]) - - """) - -add_newdoc('numpy.lib._compiled_base', 'ravel_multi_index', - """ - ravel_multi_index(multi_index, dims, mode='raise', order='C') - - Converts a tuple of index arrays into an array of flat - indices, applying boundary modes to the multi-index. - - Parameters - ---------- - multi_index : tuple of array_like - A tuple of integer arrays, one array for each dimension. - dims : tuple of ints - The shape of array into which the indices from ``multi_index`` apply. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices are handled. Can specify - either one mode or a tuple of modes, one mode per index. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - In 'clip' mode, a negative index which would normally - wrap will clip to 0 instead. - order : {'C', 'F'}, optional - Determines whether the multi-index should be viewed as indexing in - C (row-major) order or FORTRAN (column-major) order. - - Returns - ------- - raveled_indices : ndarray - An array of indices into the flattened version of an array - of dimensions ``dims``. - - See Also - -------- - unravel_index - - Notes - ----- - .. versionadded:: 1.6.0 - - Examples - -------- - >>> arr = np.array([[3,6,6],[4,5,1]]) - >>> np.ravel_multi_index(arr, (7,6)) - array([22, 41, 37]) - >>> np.ravel_multi_index(arr, (7,6), order='F') - array([31, 41, 13]) - >>> np.ravel_multi_index(arr, (4,6), mode='clip') - array([22, 23, 19]) - >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) - array([12, 13, 13]) - - >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) - 1621 - """) - -add_newdoc('numpy.lib._compiled_base', 'unravel_index', - """ - unravel_index(indices, dims, order='C') - - Converts a flat index or array of flat indices into a tuple - of coordinate arrays. - - Parameters - ---------- - indices : array_like - An integer array whose elements are indices into the flattened - version of an array of dimensions ``dims``. Before version 1.6.0, - this function accepted just one index value. - dims : tuple of ints - The shape of the array to use for unraveling ``indices``. - order : {'C', 'F'}, optional - .. versionadded:: 1.6.0 - - Determines whether the indices should be viewed as indexing in - C (row-major) order or FORTRAN (column-major) order. - - Returns - ------- - unraveled_coords : tuple of ndarray - Each array in the tuple has the same shape as the ``indices`` - array. - - See Also - -------- - ravel_multi_index - - Examples - -------- - >>> np.unravel_index([22, 41, 37], (7,6)) - (array([3, 6, 6]), array([4, 5, 1])) - >>> np.unravel_index([31, 41, 13], (7,6), order='F') - (array([3, 6, 6]), array([4, 5, 1])) - - >>> np.unravel_index(1621, (6,7,8,9)) - (3, 1, 4, 1) - - """) - -add_newdoc('numpy.lib._compiled_base', 'add_docstring', - """ - docstring(obj, docstring) - - Add a docstring to a built-in obj if possible. - If the obj already has a docstring raise a RuntimeError - If this routine does not know how to add a docstring to the object - raise a TypeError - """) - -add_newdoc('numpy.lib._compiled_base', 'packbits', - """ - packbits(myarray, axis=None) - - Packs the elements of a binary-valued array into bits in a uint8 array. - - The result is padded to full bytes by inserting zero bits at the end. - - Parameters - ---------- - myarray : array_like - An integer type array whose elements should be packed to bits. - axis : int, optional - The dimension over which bit-packing is done. - ``None`` implies packing the flattened array. - - Returns - ------- - packed : ndarray - Array of type uint8 whose elements represent bits corresponding to the - logical (0 or nonzero) value of the input elements. The shape of - `packed` has the same number of dimensions as the input (unless `axis` - is None, in which case the output is 1-D). - - See Also - -------- - unpackbits: Unpacks elements of a uint8 array into a binary-valued output - array. - - Examples - -------- - >>> a = np.array([[[1,0,1], - ... [0,1,0]], - ... [[1,1,0], - ... [0,0,1]]]) - >>> b = np.packbits(a, axis=-1) - >>> b - array([[[160],[64]],[[192],[32]]], dtype=uint8) - - Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, - and 32 = 0010 0000. - - """) - -add_newdoc('numpy.lib._compiled_base', 'unpackbits', - """ - unpackbits(myarray, axis=None) - - Unpacks elements of a uint8 array into a binary-valued output array. - - Each element of `myarray` represents a bit-field that should be unpacked - into a binary-valued output array. The shape of the output array is either - 1-D (if `axis` is None) or the same shape as the input array with unpacking - done along the axis specified. - - Parameters - ---------- - myarray : ndarray, uint8 type - Input array. - axis : int, optional - Unpacks along this axis. - - Returns - ------- - unpacked : ndarray, uint8 type - The elements are binary-valued (0 or 1). - - See Also - -------- - packbits : Packs the elements of a binary-valued array into bits in a uint8 - array. - - Examples - -------- - >>> a = np.array([[2], [7], [23]], dtype=np.uint8) - >>> a - array([[ 2], - [ 7], - [23]], dtype=uint8) - >>> b = np.unpackbits(a, axis=1) - >>> b - array([[0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) - - """) - - -############################################################################## -# -# Documentation for ufunc attributes and methods -# -############################################################################## - - -############################################################################## -# -# ufunc object -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', - """ - Functions that operate element by element on whole arrays. - - To see the documentation for a specific ufunc, use np.info(). For - example, np.info(np.sin). Because ufuncs are written in C - (for speed) and linked into Python with NumPy's ufunc facility, - Python's help() function finds this page whenever help() is called - on a ufunc. - - A detailed explanation of ufuncs can be found in the "ufuncs.rst" - file in the NumPy reference guide. - - Unary ufuncs: - ============= - - op(X, out=None) - Apply op to X elementwise - - Parameters - ---------- - X : array_like - Input array. - out : array_like - An array to store the output. Must be the same shape as `X`. - - Returns - ------- - r : array_like - `r` will have the same shape as `X`; if out is provided, `r` - will be equal to out. - - Binary ufuncs: - ============== - - op(X, Y, out=None) - Apply `op` to `X` and `Y` elementwise. May "broadcast" to make - the shapes of `X` and `Y` congruent. - - The broadcasting rules are: - - * Dimensions of length 1 may be prepended to either array. - * Arrays may be repeated along dimensions of length 1. - - Parameters - ---------- - X : array_like - First input array. - Y : array_like - Second input array. - out : array_like - An array to store the output. Must be the same shape as the - output would have. - - Returns - ------- - r : array_like - The return value; if out is provided, `r` will be equal to out. - - """) - - -############################################################################## -# -# ufunc attributes -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', ('identity', - """ - The identity value. - - Data attribute containing the identity element for the ufunc, if it has one. - If it does not, the attribute value is None. - - Examples - -------- - >>> np.add.identity - 0 - >>> np.multiply.identity - 1 - >>> np.power.identity - 1 - >>> print np.exp.identity - None - """)) - -add_newdoc('numpy.core', 'ufunc', ('nargs', - """ - The number of arguments. - - Data attribute containing the number of arguments the ufunc takes, including - optional ones. - - Notes - ----- - Typically this value will be one more than what you might expect because all - ufuncs take the optional "out" argument. - - Examples - -------- - >>> np.add.nargs - 3 - >>> np.multiply.nargs - 3 - >>> np.power.nargs - 3 - >>> np.exp.nargs - 2 - """)) - -add_newdoc('numpy.core', 'ufunc', ('nin', - """ - The number of inputs. - - Data attribute containing the number of arguments the ufunc treats as input. - - Examples - -------- - >>> np.add.nin - 2 - >>> np.multiply.nin - 2 - >>> np.power.nin - 2 - >>> np.exp.nin - 1 - """)) - -add_newdoc('numpy.core', 'ufunc', ('nout', - """ - The number of outputs. - - Data attribute containing the number of arguments the ufunc treats as output. - - Notes - ----- - Since all ufuncs can take output arguments, this will always be (at least) 1. - - Examples - -------- - >>> np.add.nout - 1 - >>> np.multiply.nout - 1 - >>> np.power.nout - 1 - >>> np.exp.nout - 1 - - """)) - -add_newdoc('numpy.core', 'ufunc', ('ntypes', - """ - The number of types. - - The number of numerical NumPy types - of which there are 18 total - on which - the ufunc can operate. - - See Also - -------- - numpy.ufunc.types - - Examples - -------- - >>> np.add.ntypes - 18 - >>> np.multiply.ntypes - 18 - >>> np.power.ntypes - 17 - >>> np.exp.ntypes - 7 - >>> np.remainder.ntypes - 14 - - """)) - -add_newdoc('numpy.core', 'ufunc', ('types', - """ - Returns a list with types grouped input->output. - - Data attribute listing the data-type "Domain-Range" groupings the ufunc can - deliver. The data-types are given using the character codes. - - See Also - -------- - numpy.ufunc.ntypes - - Examples - -------- - >>> np.add.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.multiply.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.power.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', - 'OO->O'] - - >>> np.exp.types - ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] - - >>> np.remainder.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] - - """)) - - -############################################################################## -# -# ufunc methods -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', ('reduce', - """ - reduce(a, axis=0, dtype=None, out=None) - - Reduces `a`'s dimension by one, by applying ufunc along one axis. - - Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then - :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = - the result of iterating `j` over :math:`range(N_i)`, cumulatively applying - ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. - For a one-dimensional array, reduce produces results equivalent to: - :: - - r = op.identity # op = ufunc - for i in xrange(len(A)): - r = op(r, A[i]) - return r - - For example, add.reduce() is equivalent to sum(). - - Parameters - ---------- - a : array_like - The array to act on. - axis : int, optional - The axis along which to apply the reduction. - dtype : data-type code, optional - The type used to represent the intermediate results. Defaults - to the data-type of the output array if this is provided, or - the data-type of the input array if no output array is provided. - out : ndarray, optional - A location into which the result is stored. If not provided, a - freshly-allocated array is returned. - - Returns - ------- - r : ndarray - The reduced array. If `out` was supplied, `r` is a reference to it. - - Examples - -------- - >>> np.multiply.reduce([2,3,5]) - 30 - - A multi-dimensional array example: - - >>> X = np.arange(8).reshape((2,2,2)) - >>> X - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> np.add.reduce(X, 0) - array([[ 4, 6], - [ 8, 10]]) - >>> np.add.reduce(X) # confirm: default axis value is 0 - array([[ 4, 6], - [ 8, 10]]) - >>> np.add.reduce(X, 1) - array([[ 2, 4], - [10, 12]]) - >>> np.add.reduce(X, 2) - array([[ 1, 5], - [ 9, 13]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('accumulate', - """ - accumulate(array, axis=0, dtype=None, out=None) - - Accumulate the result of applying the operator to all elements. - - For a one-dimensional array, accumulate produces results equivalent to:: - - r = np.empty(len(A)) - t = op.identity # op = the ufunc being applied to A's elements - for i in xrange(len(A)): - t = op(t, A[i]) - r[i] = t - return r - - For example, add.accumulate() is equivalent to np.cumsum(). - - For a multi-dimensional array, accumulate is applied along only one - axis (axis zero by default; see Examples below) so repeated use is - necessary if one wants to accumulate over multiple axes. - - Parameters - ---------- - array : array_like - The array to act on. - axis : int, optional - The axis along which to apply the accumulation; default is zero. - dtype : data-type code, optional - The data-type used to represent the intermediate results. Defaults - to the data-type of the output array if such is provided, or the - the data-type of the input array if no output array is provided. - out : ndarray, optional - A location into which the result is stored. If not provided a - freshly-allocated array is returned. - - Returns - ------- - r : ndarray - The accumulated values. If `out` was supplied, `r` is a reference to - `out`. - - Examples - -------- - 1-D array examples: - - >>> np.add.accumulate([2, 3, 5]) - array([ 2, 5, 10]) - >>> np.multiply.accumulate([2, 3, 5]) - array([ 2, 6, 30]) - - 2-D array examples: - - >>> I = np.eye(2) - >>> I - array([[ 1., 0.], - [ 0., 1.]]) - - Accumulate along axis 0 (rows), down columns: - - >>> np.add.accumulate(I, 0) - array([[ 1., 0.], - [ 1., 1.]]) - >>> np.add.accumulate(I) # no axis specified = axis zero - array([[ 1., 0.], - [ 1., 1.]]) - - Accumulate along axis 1 (columns), through rows: - - >>> np.add.accumulate(I, 1) - array([[ 1., 1.], - [ 0., 1.]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('reduceat', - """ - reduceat(a, indices, axis=0, dtype=None, out=None) - - Performs a (local) reduce with specified slices over a single axis. - - For i in ``range(len(indices))``, `reduceat` computes - ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th - generalized "row" parallel to `axis` in the final result (i.e., in a - 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if - `axis = 1`, it becomes the i-th column). There are two exceptions to this: - - * when ``i = len(indices) - 1`` (so for the last index), - ``indices[i+1] = a.shape[axis]``. - * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is - simply ``a[indices[i]]``. - - The shape of the output depends on the size of `indices`, and may be - larger than `a` (this happens if ``len(indices) > a.shape[axis]``). - - Parameters - ---------- - a : array_like - The array to act on. - indices : array_like - Paired indices, comma separated (not colon), specifying slices to - reduce. - axis : int, optional - The axis along which to apply the reduceat. - dtype : data-type code, optional - The type used to represent the intermediate results. Defaults - to the data type of the output array if this is provided, or - the data type of the input array if no output array is provided. - out : ndarray, optional - A location into which the result is stored. If not provided a - freshly-allocated array is returned. - - Returns - ------- - r : ndarray - The reduced values. If `out` was supplied, `r` is a reference to - `out`. - - Notes - ----- - A descriptive example: - - If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as - ``ufunc.reduceat(a, indices)[::2]`` where `indices` is - ``range(len(array) - 1)`` with a zero placed - in every other element: - ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. - - Don't be fooled by this attribute's name: `reduceat(a)` is not - necessarily smaller than `a`. - - Examples - -------- - To take the running sum of four successive values: - - >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] - array([ 6, 10, 14, 18]) - - A 2-D example: - - >>> x = np.linspace(0, 15, 16).reshape(4,4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) - - :: - - # reduce such that the result has the following five rows: - # [row1 + row2 + row3] - # [row4] - # [row2] - # [row3] - # [row1 + row2 + row3 + row4] - - >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) - array([[ 12., 15., 18., 21.], - [ 12., 13., 14., 15.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 24., 28., 32., 36.]]) - - :: - - # reduce such that result has the following two columns: - # [col1 * col2 * col3, col4] - - >>> np.multiply.reduceat(x, [0, 3], 1) - array([[ 0., 3.], - [ 120., 7.], - [ 720., 11.], - [ 2184., 15.]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('outer', - """ - outer(A, B) - - Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. - - Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of - ``op.outer(A, B)`` is an array of dimension M + N such that: - - .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = - op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) - - For `A` and `B` one-dimensional, this is equivalent to:: - - r = empty(len(A),len(B)) - for i in xrange(len(A)): - for j in xrange(len(B)): - r[i,j] = op(A[i], B[j]) # op = ufunc in question - - Parameters - ---------- - A : array_like - First array - B : array_like - Second array - - Returns - ------- - r : ndarray - Output array - - See Also - -------- - numpy.outer - - Examples - -------- - >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) - array([[ 4, 5, 6], - [ 8, 10, 12], - [12, 15, 18]]) - - A multi-dimensional example: - - >>> A = np.array([[1, 2, 3], [4, 5, 6]]) - >>> A.shape - (2, 3) - >>> B = np.array([[1, 2, 3, 4]]) - >>> B.shape - (1, 4) - >>> C = np.multiply.outer(A, B) - >>> C.shape; C - (2, 3, 1, 4) - array([[[[ 1, 2, 3, 4]], - [[ 2, 4, 6, 8]], - [[ 3, 6, 9, 12]]], - [[[ 4, 8, 12, 16]], - [[ 5, 10, 15, 20]], - [[ 6, 12, 18, 24]]]]) - - """)) - - -############################################################################## -# -# Documentation for dtype attributes and methods -# -############################################################################## - -############################################################################## -# -# dtype object -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', - """ - dtype(obj, align=False, copy=False) - - Create a data type object. - - A numpy array is homogeneous, and contains elements described by a - dtype object. A dtype object can be constructed from different - combinations of fundamental numeric types. - - Parameters - ---------- - obj - Object to be converted to a data type object. - align : bool, optional - Add padding to the fields to match what a C compiler would output - for a similar C-struct. Can be ``True`` only if `obj` is a dictionary - or a comma-separated string. - copy : bool, optional - Make a new copy of the data-type object. If ``False``, the result - may just be a reference to a built-in data-type object. - - See also - -------- - result_type - - Examples - -------- - Using array-scalar type: - - >>> np.dtype(np.int16) - dtype('int16') - - Record, one field name 'f1', containing int16: - - >>> np.dtype([('f1', np.int16)]) - dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) - dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) - dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) - dtype([('a', '>> np.dtype("i4, (2,3)f8") - dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) - dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) - dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) - dtype([('gender', '|S1'), ('age', '|u1')]) - - Offsets in bytes, here 0 and 25: - - >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) - dtype([('surname', '|S25'), ('age', '|u1')]) - - """) - -############################################################################## -# -# dtype attributes -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', - """ - The required alignment (bytes) of this data-type according to the compiler. - - More information is available in the C-API section of the manual. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', - """ - A character indicating the byte-order of this data-type object. - - One of: - - === ============== - '=' native - '<' little-endian - '>' big-endian - '|' not applicable - === ============== - - All built-in data-type objects have byteorder either '=' or '|'. - - Examples - -------- - - >>> dt = np.dtype('i2') - >>> dt.byteorder - '=' - >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder - '|' - >>> # or ASCII strings - >>> np.dtype('S2').byteorder - '|' - >>> # Even if specific code is given, and it is native - >>> # '=' is the byteorder - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> dt = np.dtype(native_code + 'i2') - >>> dt.byteorder - '=' - >>> # Swapped code shows up as itself - >>> dt = np.dtype(swapped_code + 'i2') - >>> dt.byteorder == swapped_code - True - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('char', - """A unique character code for each of the 21 different built-in types.""")) - -add_newdoc('numpy.core.multiarray', 'dtype', ('descr', - """ - Array-interface compliant full description of the data-type. - - The format is that required by the 'descr' key in the - `__array_interface__` attribute. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('fields', - """ - Dictionary of named fields defined for this data type, or ``None``. - - The dictionary is indexed by keys that are the names of the fields. - Each entry in the dictionary is a tuple fully describing the field:: - - (dtype, offset[, title]) - - If present, the optional title can be any object (if it is a string - or unicode then it will also be a key in the fields dictionary, - otherwise it's meta-data). Notice also that the first two elements - of the tuple can be passed directly as arguments to the ``ndarray.getfield`` - and ``ndarray.setfield`` methods. - - See Also - -------- - ndarray.getfield, ndarray.setfield - - Examples - -------- - - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> print dt.fields - {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('flags', - """ - Bit-flags describing how this data type is to be interpreted. - - Bit-masks are in `numpy.core.multiarray` as the constants - `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, - `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation - of these flags is in C-API documentation; they are largely useful - for user-defined data-types. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', - """ - Boolean indicating whether this dtype contains any reference-counted - objects in any fields or sub-dtypes. - - Recall that what is actually in the ndarray memory representing - the Python object is the memory address of that object (a pointer). - Special handling may be required, and this attribute is useful for - distinguishing data types that may contain arbitrary Python objects - and data-types that won't. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', - """ - Integer indicating how this dtype relates to the built-in dtypes. - - Read-only. - - = ======================================================================== - 0 if this is a structured array type, with fields - 1 if this is a dtype compiled into numpy (such as ints, floats etc) - 2 if the dtype is for a user-defined numpy type - A user-defined type uses the numpy C-API machinery to extend - numpy to handle a new array type. See - :ref:`user.user-defined-data-types` in the Numpy manual. - = ======================================================================== - - Examples - -------- - >>> dt = np.dtype('i2') - >>> dt.isbuiltin - 1 - >>> dt = np.dtype('f8') - >>> dt.isbuiltin - 1 - >>> dt = np.dtype([('field1', 'f8')]) - >>> dt.isbuiltin - 0 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', - """ - Boolean indicating whether the byte order of this dtype is native - to the platform. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', - """ - The element size of this data-type object. - - For 18 of the 21 types this number is fixed by the data-type. - For the flexible data-types, this number can be anything. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('kind', - """ - A character code (one of 'biufcSUV') identifying the general kind of data. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('name', - """ - A bit-width name for this data-type. - - Un-sized flexible data-type objects do not have this attribute. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('names', - """ - Ordered list of field names, or ``None`` if there are no fields. - - The names are ordered according to increasing byte offset. This can be - used, for example, to walk through all of the named fields in offset order. - - Examples - -------- - - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt.names - ('name', 'grades') - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('num', - """ - A unique number for each of the 21 different built-in types. - - These are roughly ordered from least-to-most precision. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('shape', - """ - Shape tuple of the sub-array if this data type describes a sub-array, - and ``()`` otherwise. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('str', - """The array-protocol typestring of this data-type object.""")) - -add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', - """ - Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and - None otherwise. - - The *shape* is the fixed shape of the sub-array described by this - data type, and *item_dtype* the data type of the array. - - If a field whose dtype object has this attribute is retrieved, - then the extra dimensions implied by *shape* are tacked on to - the end of the retrieved array. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('type', - """The type object used to instantiate a scalar of this data-type.""")) - -############################################################################## -# -# dtype methods -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', - """ - newbyteorder(new_order='S') - - Return a new dtype with a different byte order. - - Changes are also made in all fields and sub-arrays of the data type. - - Parameters - ---------- - new_order : string, optional - Byte order to force; a value from the byte order - specifications below. The default value ('S') results in - swapping the current byte order. - `new_order` codes can be any of:: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - The code does a case-insensitive check on the first letter of - `new_order` for these alternatives. For example, any of '>' - or 'B' or 'b' or 'brian' are valid to specify big-endian. - - Returns - ------- - new_dtype : dtype - New dtype object with the given change to the byte order. - - Notes - ----- - Changes are also made in all fields and sub-arrays of the data type. - - Examples - -------- - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> native_dt = np.dtype(native_code+'i2') - >>> swapped_dt = np.dtype(swapped_code+'i2') - >>> native_dt.newbyteorder('S') == swapped_dt - True - >>> native_dt.newbyteorder() == swapped_dt - True - >>> native_dt == swapped_dt.newbyteorder('S') - True - >>> native_dt == swapped_dt.newbyteorder('=') - True - >>> native_dt == swapped_dt.newbyteorder('N') - True - >>> native_dt == native_dt.newbyteorder('|') - True - >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') - True - >>> np.dtype('>i2') == native_dt.newbyteorder('B') - True - - """)) - - -############################################################################## -# -# nd_grid instances -# -############################################################################## - -add_newdoc('numpy.lib.index_tricks', 'mgrid', - """ - `nd_grid` instance which returns a dense multi-dimensional "meshgrid". - - An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense - (or fleshed out) mesh-grid when indexed, so that each returned argument - has the same shape. The dimensions and number of the output arrays are - equal to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then - the integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - Returns - ---------- - mesh-grid `ndarrays` all of the same dimensions - - See Also - -------- - numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects - ogrid : like mgrid but returns open (not fleshed out) mesh grids - r_ : array concatenator - - Examples - -------- - >>> np.mgrid[0:5,0:5] - array([[[0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [2, 2, 2, 2, 2], - [3, 3, 3, 3, 3], - [4, 4, 4, 4, 4]], - [[0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4]]]) - >>> np.mgrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - - """) - -add_newdoc('numpy.lib.index_tricks', 'ogrid', - """ - `nd_grid` instance which returns an open multi-dimensional "meshgrid". - - An instance of `numpy.lib.index_tricks.nd_grid` which returns an open - (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension - of each returned array is greater than 1. The dimension and number of the - output arrays are equal to the number of indexing dimensions. If the step - length is not a complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then - the integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - Returns - ---------- - mesh-grid `ndarrays` with only one dimension :math:`\\neq 1` - - See Also - -------- - np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects - mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids - r_ : array concatenator - - Examples - -------- - >>> from numpy import ogrid - >>> ogrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - >>> ogrid[0:5,0:5] - [array([[0], - [1], - [2], - [3], - [4]]), array([[0, 1, 2, 3, 4]])] - - """) - - -############################################################################## -# -# Documentation for `generic` attributes and methods -# -############################################################################## - -add_newdoc('numpy.core.numerictypes', 'generic', - """ - Base class for numpy scalar types. - - Class from which most (all?) numpy scalar types are derived. For - consistency, exposes the same API as `ndarray`, despite many - consequent attributes being either "get-only," or completely irrelevant. - This is the class from which it is strongly suggested users should derive - custom scalar types. - - """) - -# Attributes - -add_newdoc('numpy.core.numerictypes', 'generic', ('T', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('base', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('data', - """Pointer to start of data.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', - """Get array data-descriptor.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flags', - """The integer value of flags.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flat', - """A 1-D view of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('imag', - """The imaginary part of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', - """The length of one element in bytes.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', - """The length of the scalar in bytes.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', - """The number of array dimensions.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('real', - """The real part of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('shape', - """Tuple of array dimensions.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('size', - """The number of elements in the gentype.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('strides', - """Tuple of bytes steps in each dimension.""")) - -# Methods - -add_newdoc('numpy.core.numerictypes', 'generic', ('all', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('any', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('astype', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('choose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('clip', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('compress', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('copy', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dump', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('fill', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('item', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('max', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('mean', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('min', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', - """ - newbyteorder(new_order='S') - - Return a new `dtype` with a different byte order. - - Changes are also made in all fields and sub-arrays of the data type. - - The `new_order` code can be any from the following: - - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * 'S' - swap dtype from current to opposite endian - * {'|', 'I'} - ignore (no change to byte order) - - Parameters - ---------- - new_order : str, optional - Byte order to force; a value from the byte order specifications - above. The default value ('S') results in swapping the current - byte order. The code does a case-insensitive check on the first - letter of `new_order` for the alternatives above. For example, - any of 'B' or 'b' or 'biggish' are valid to specify big-endian. - - - Returns - ------- - new_dtype : dtype - New `dtype` object with the given change to the byte order. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('prod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('put', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('resize', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('round', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('std', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('take', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('trace', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('var', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('view', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - - -############################################################################## -# -# Documentation for other scalar classes -# -############################################################################## - -add_newdoc('numpy.core.numerictypes', 'bool_', - """Numpy's Boolean type. Character code: ``?``. Alias: bool8""") - -add_newdoc('numpy.core.numerictypes', 'complex64', - """ - Complex number type composed of two 32 bit floats. Character code: 'F'. - - """) - -add_newdoc('numpy.core.numerictypes', 'complex128', - """ - Complex number type composed of two 64 bit floats. Character code: 'D'. - Python complex compatible. - - """) - -add_newdoc('numpy.core.numerictypes', 'complex256', - """ - Complex number type composed of two 128-bit floats. Character code: 'G'. - - """) - -add_newdoc('numpy.core.numerictypes', 'float32', - """ - 32-bit floating-point number. Character code 'f'. C float compatible. - - """) - -add_newdoc('numpy.core.numerictypes', 'float64', - """ - 64-bit floating-point number. Character code 'd'. Python float compatible. - - """) - -add_newdoc('numpy.core.numerictypes', 'float96', - """ - """) - -add_newdoc('numpy.core.numerictypes', 'float128', - """ - 128-bit floating-point number. Character code: 'g'. C long float - compatible. - - """) - -add_newdoc('numpy.core.numerictypes', 'int8', - """8-bit integer. Character code ``b``. C char compatible.""") - -add_newdoc('numpy.core.numerictypes', 'int16', - """16-bit integer. Character code ``h``. C short compatible.""") - -add_newdoc('numpy.core.numerictypes', 'int32', - """32-bit integer. Character code 'i'. C int compatible.""") - -add_newdoc('numpy.core.numerictypes', 'int64', - """64-bit integer. Character code 'l'. Python int compatible.""") - -add_newdoc('numpy.core.numerictypes', 'object_', - """Any Python object. Character code: 'O'.""") diff --git a/numpy-1.6.2/numpy/compat/__init__.py b/numpy-1.6.2/numpy/compat/__init__.py deleted file mode 100644 index 9b42616167..0000000000 --- a/numpy-1.6.2/numpy/compat/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -""" -Compatibility module. - -This module contains duplicated code from Python itself or 3rd party -extensions, which may be included for the following reasons: - - * compatibility - * we may only need a small subset of the copied library/module - -""" -import _inspect -import py3k -from _inspect import getargspec, formatargspec -from py3k import * - -__all__ = [] -__all__.extend(_inspect.__all__) -__all__.extend(py3k.__all__) diff --git a/numpy-1.6.2/numpy/compat/_inspect.py b/numpy-1.6.2/numpy/compat/_inspect.py deleted file mode 100644 index 612d1e1477..0000000000 --- a/numpy-1.6.2/numpy/compat/_inspect.py +++ /dev/null @@ -1,219 +0,0 @@ -"""Subset of inspect module from upstream python - -We use this instead of upstream because upstream inspect is slow to import, and -significanly contributes to numpy import times. Importing this copy has almost -no overhead. -""" - -import types - -__all__ = ['getargspec', 'formatargspec'] - -# ----------------------------------------------------------- type-checking -def ismethod(object): - """Return true if the object is an instance method. - - Instance method objects provide these attributes: - __doc__ documentation string - __name__ name with which this method was defined - im_class class object in which this method belongs - im_func function object containing implementation of method - im_self instance to which this method is bound, or None""" - return isinstance(object, types.MethodType) - -def isfunction(object): - """Return true if the object is a user-defined function. - - Function objects provide these attributes: - __doc__ documentation string - __name__ name with which this function was defined - func_code code object containing compiled function bytecode - func_defaults tuple of any default values for arguments - func_doc (same as __doc__) - func_globals global namespace in which this function was defined - func_name (same as __name__)""" - return isinstance(object, types.FunctionType) - -def iscode(object): - """Return true if the object is a code object. - - Code objects provide these attributes: - co_argcount number of arguments (not including * or ** args) - co_code string of raw compiled bytecode - co_consts tuple of constants used in the bytecode - co_filename name of file in which this code object was created - co_firstlineno number of first line in Python source code - co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg - co_lnotab encoded mapping of line numbers to bytecode indices - co_name name with which this code object was defined - co_names tuple of names of local variables - co_nlocals number of local variables - co_stacksize virtual machine stack space required - co_varnames tuple of names of arguments and local variables""" - return isinstance(object, types.CodeType) - -# ------------------------------------------------ argument list extraction -# These constants are from Python's compile.h. -CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 - -def getargs(co): - """Get information about the arguments accepted by a code object. - - Three things are returned: (args, varargs, varkw), where 'args' is - a list of argument names (possibly containing nested lists), and - 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" - - if not iscode(co): - raise TypeError('arg is not a code object') - - code = co.co_code - nargs = co.co_argcount - names = co.co_varnames - args = list(names[:nargs]) - step = 0 - - # The following acrobatics are for anonymous (tuple) arguments. - for i in range(nargs): - if args[i][:1] in ['', '.']: - stack, remain, count = [], [], [] - while step < len(code): - op = ord(code[step]) - step = step + 1 - if op >= dis.HAVE_ARGUMENT: - opname = dis.opname[op] - value = ord(code[step]) + ord(code[step+1])*256 - step = step + 2 - if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']: - remain.append(value) - count.append(value) - elif opname == 'STORE_FAST': - stack.append(names[value]) - - # Special case for sublists of length 1: def foo((bar)) - # doesn't generate the UNPACK_TUPLE bytecode, so if - # `remain` is empty here, we have such a sublist. - if not remain: - stack[0] = [stack[0]] - break - else: - remain[-1] = remain[-1] - 1 - while remain[-1] == 0: - remain.pop() - size = count.pop() - stack[-size:] = [stack[-size:]] - if not remain: break - remain[-1] = remain[-1] - 1 - if not remain: break - args[i] = stack[0] - - varargs = None - if co.co_flags & CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - return args, varargs, varkw - -def getargspec(func): - """Get the names and default values of a function's arguments. - - A tuple of four things is returned: (args, varargs, varkw, defaults). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'defaults' is an n-tuple of the default values of the last n arguments. - """ - - if ismethod(func): - func = func.im_func - if not isfunction(func): - raise TypeError('arg is not a Python function') - args, varargs, varkw = getargs(func.func_code) - return args, varargs, varkw, func.func_defaults - -def getargvalues(frame): - """Get information about arguments passed into a particular frame. - - A tuple of four things is returned: (args, varargs, varkw, locals). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'locals' is the locals dictionary of the given frame.""" - args, varargs, varkw = getargs(frame.f_code) - return args, varargs, varkw, frame.f_locals - -def joinseq(seq): - if len(seq) == 1: - return '(' + seq[0] + ',)' - else: - return '(' + ', '.join(seq) + ')' - -def strseq(object, convert, join=joinseq): - """Recursively walk a sequence, stringifying each element.""" - if type(object) in [types.ListType, types.TupleType]: - return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object)) - else: - return convert(object) - -def formatargspec(args, varargs=None, varkw=None, defaults=None, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargspec. - - The first four arguments are (args, varargs, varkw, defaults). The - other four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" - specs = [] - if defaults: - firstdefault = len(args) - len(defaults) - for i in range(len(args)): - spec = strseq(args[i], formatarg, join) - if defaults and i >= firstdefault: - spec = spec + formatvalue(defaults[i - firstdefault]) - specs.append(spec) - if varargs is not None: - specs.append(formatvarargs(varargs)) - if varkw is not None: - specs.append(formatvarkw(varkw)) - return '(' + ', '.join(specs) + ')' - -def formatargvalues(args, varargs, varkw, locals, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargvalues. - - The first four arguments are (args, varargs, varkw, locals). The - next four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" - def convert(name, locals=locals, - formatarg=formatarg, formatvalue=formatvalue): - return formatarg(name) + formatvalue(locals[name]) - specs = [] - for i in range(len(args)): - specs.append(strseq(args[i], convert, join)) - if varargs: - specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) - if varkw: - specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) - return '(' + string.join(specs, ', ') + ')' - -if __name__ == '__main__': - import inspect - def foo(x, y, z=None): - return None - - print inspect.getargs(foo.func_code) - print getargs(foo.func_code) - - print inspect.getargspec(foo) - print getargspec(foo) - - print inspect.formatargspec(*inspect.getargspec(foo)) - print formatargspec(*getargspec(foo)) diff --git a/numpy-1.6.2/numpy/compat/py3k.py b/numpy-1.6.2/numpy/compat/py3k.py deleted file mode 100644 index 001455de5c..0000000000 --- a/numpy-1.6.2/numpy/compat/py3k.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Python 3 compatibility tools. - -""" - -__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', - 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr', 'open_latin1'] - -import sys - -if sys.version_info[0] >= 3: - import io - bytes = bytes - unicode = str - asunicode = str - def asbytes(s): - if isinstance(s, bytes): - return s - return s.encode('latin1') - def asstr(s): - if isinstance(s, str): - return s - return s.decode('latin1') - def isfileobj(f): - return isinstance(f, (io.FileIO, io.BufferedReader)) - def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') - strchar = 'U' -else: - bytes = str - unicode = unicode - asbytes = str - asstr = str - strchar = 'S' - def isfileobj(f): - return isinstance(f, file) - def asunicode(s): - if isinstance(s, unicode): - return s - return s.decode('ascii') - def open_latin1(filename, mode='r'): - return open(filename, mode=mode) - -def getexception(): - return sys.exc_info()[1] - -def asbytes_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asbytes_nested(y) for y in x] - else: - return asbytes(x) - -def asunicode_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asunicode_nested(y) for y in x] - else: - return asunicode(x) diff --git a/numpy-1.6.2/numpy/compat/setup.py b/numpy-1.6.2/numpy/compat/setup.py deleted file mode 100644 index 4e07810850..0000000000 --- a/numpy-1.6.2/numpy/compat/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('compat',parent_package,top_path) - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/compat/setupscons.py b/numpy-1.6.2/numpy/compat/setupscons.py deleted file mode 100644 index e518245b2a..0000000000 --- a/numpy-1.6.2/numpy/compat/setupscons.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python -import os.path - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('compat',parent_package,top_path) - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/core/SConscript b/numpy-1.6.2/numpy/core/SConscript deleted file mode 100644 index 956a887c24..0000000000 --- a/numpy-1.6.2/numpy/core/SConscript +++ /dev/null @@ -1,521 +0,0 @@ -# Last Change: Sun Apr 26 05:00 PM 2009 J -# vim:syntax=python -import os -import sys -from os.path import join as pjoin, basename as pbasename, dirname as pdirname -from copy import deepcopy - -from numscons import get_pythonlib_dir -from numscons import GetNumpyEnvironment -from numscons import CheckCBLAS -from numscons import write_info - -from code_generators.numpy_api import \ - multiarray_api as multiarray_api_dict, \ - ufunc_api as ufunc_api_dict - -from setup_common import * -from scons_support import CheckBrokenMathlib, define_no_smp, \ - check_mlib, check_mlibs, is_npy_no_signal, CheckInline -from scons_support import array_api_gen_bld, ufunc_api_gen_bld, template_bld, \ - umath_bld, CheckGCC4, check_api_version, \ - CheckLongDoubleRepresentation - -import SCons - -# Set to True to enable multiple file compilations (experimental) -try: - os.environ['NPY_SEPARATE_COMPILATION'] - ENABLE_SEPARATE_COMPILATION = True -except KeyError: - ENABLE_SEPARATE_COMPILATION = False -try: - os.environ['NPY_BYPASS_SINGLE_EXTENDED'] - BYPASS_SINGLE_EXTENDED = True -except KeyError: - BYPASS_SINGLE_EXTENDED = False - -env = GetNumpyEnvironment(ARGUMENTS) -env.Append(CPPPATH = env["PYEXTCPPPATH"]) -if os.name == 'nt': - # NT needs the pythonlib to run any code importing Python.h, including - # simple code using only typedef and so on, so we need it for configuration - # checks - env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -# Check whether we have a mismatch between the set C API VERSION and the -# actual C API VERSION -check_api_version(C_API_VERSION) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckBrokenMathlib' : CheckBrokenMathlib, - 'CheckCBLAS' : CheckCBLAS, 'CheckInline': CheckInline, 'CheckGCC4' : CheckGCC4, - 'CheckLongDoubleRepresentation': CheckLongDoubleRepresentation}, - config_h = pjoin('config.h')) - -# numpyconfig_sym will keep the values of some configuration variables, the one -# needed for the public numpy API. - -# Convention: list of tuples (definition, value). value: -# - 0: #undef definition -# - 1: #define definition -# - string: #define definition value -numpyconfig_sym = [] - -#--------------- -# Checking Types -#--------------- -if not config.CheckHeader("Python.h"): - errmsg = [] - for line in config.GetLastError(): - errmsg.append("%s " % line) - print """ -Error: Python.h header cannot be compiled (or cannot be found). -On linux, check that you have python-dev/python-devel packages. On windows, -check that you have he platform SDK. You may also use unsupported cflags. -Configuration error log says: \n\n%s""" % ''.join(errmsg) - Exit(-1) - -st = config.CheckHeader("endian.h") -if st: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_ENDIAN_H', '#define NPY_HAVE_ENDIAN_H 1')) -else: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_ENDIAN_H', '')) - -def check_type(type, include = None): - st = config.CheckTypeSize(type, includes = include) - type = type.replace(' ', '_') - if st: - numpyconfig_sym.append(('SIZEOF_%s' % type.upper(), '%d' % st)) - else: - numpyconfig_sym.append(('SIZEOF_%s' % type.upper(), 0)) - -for type in ('short', 'int', 'long'): - # SIZEOF_LONG defined on darwin - if type == "long": - if not config.CheckDeclaration("SIZEOF_LONG", includes="#include "): - check_type(type) - else: - numpyconfig_sym.append(('SIZEOF_LONG', 'SIZEOF_LONG')) - else: - check_type(type) - -for type in ('float', 'double', 'long double'): - sz = config.CheckTypeSize(type) - numpyconfig_sym.append(('SIZEOF_%s' % type2def(type), str(sz))) - - # Compute size of corresponding complex type: used to check that our - # definition is binary compatible with C99 complex type (check done at - # build time in npy_common.h) - complex_def = "struct {%s __x; %s __y;}" % (type, type) - sz = config.CheckTypeSize(complex_def) - numpyconfig_sym.append(('SIZEOF_COMPLEX_%s' % type2def(type), str(sz))) - -if sys.platform != 'darwin': - tp = config.CheckLongDoubleRepresentation() - config.Define("HAVE_LDOUBLE_%s" % tp, 1, - "Define for arch-specific long double representation") - -for type in ('Py_intptr_t',): - check_type(type, include = "#include \n") - -# We check declaration AND type because that's how distutils does it. -if config.CheckDeclaration('PY_LONG_LONG', includes = '#include \n'): - st = config.CheckTypeSize('PY_LONG_LONG', - includes = '#include \n') - assert not st == 0 - numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_LONGLONG', - '#define NPY_SIZEOF_LONGLONG %d' % st)) - numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_PY_LONG_LONG', - '#define NPY_SIZEOF_PY_LONG_LONG %d' % st)) -else: - numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_LONGLONG', '')) - numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_PY_LONG_LONG', '')) - -if not config.CheckDeclaration('CHAR_BIT', includes= '#include \n'): - raise RuntimeError(\ -"""Config wo CHAR_BIT is not supported with scons: please contact the -maintainer (cdavid)""") - -#---------------------- -# Checking signal stuff -#---------------------- -if is_npy_no_signal(): - numpyconfig_sym.append(('DEFINE_NPY_NO_SIGNAL', '#define NPY_NO_SIGNAL\n')) - config.Define('__NPY_PRIVATE_NO_SIGNAL', - comment = "define to 1 to disable SMP support ") -else: - numpyconfig_sym.append(('DEFINE_NPY_NO_SIGNAL', '')) - -#--------------------- -# Checking SMP option -#--------------------- -if define_no_smp(): - nosmp = 1 -else: - nosmp = 0 -numpyconfig_sym.append(('NPY_NO_SMP', nosmp)) - -#---------------------------------------------- -# Check whether we can use C99 printing formats -#---------------------------------------------- -if config.CheckDeclaration(('PRIdPTR'), includes = '#include '): - numpyconfig_sym.append(('DEFINE_NPY_USE_C99_FORMATS', '#define NPY_USE_C99_FORMATS 1')) -else: - numpyconfig_sym.append(('DEFINE_NPY_USE_C99_FORMATS', '')) - -#---------------------- -# Checking the mathlib -#---------------------- -mlibs = [[], ['m'], ['cpml']] -mathlib = os.environ.get('MATHLIB') -if mathlib: - mlibs.insert(0, mathlib) - -mlib = check_mlibs(config, mlibs) - -# XXX: this is ugly: mathlib has nothing to do in a public header file -numpyconfig_sym.append(('MATHLIB', ','.join(mlib))) - -#---------------------------------- -# Checking the math funcs available -#---------------------------------- -# Function to check: -mfuncs = ('expl', 'expf', 'log1p', 'expm1', 'asinh', 'atanhf', 'atanhl', - 'rint', 'trunc') - -# Set value to 1 for each defined function (in math lib) -mfuncs_defined = dict([(f, 0) for f in mfuncs]) - -# Check for mandatory funcs: we barf if a single one of those is not there -if not config.CheckFuncsAtOnce(MANDATORY_FUNCS): - raise SystemError("One of the required function to build numpy is not" - " available (the list is %s)." % str(MANDATORY_FUNCS)) - -# Standard functions which may not be available and for which we have a -# replacement implementation -# -def check_funcs(funcs): - # Use check_funcs_once first, and if it does not work, test func per - # func. Return success only if all the functions are available - st = config.CheckFuncsAtOnce(funcs) - if not st: - # Global check failed, check func per func - for f in funcs: - st = config.CheckFunc(f, language = 'C') - -for f in OPTIONAL_STDFUNCS_MAYBE: - if config.CheckDeclaration(fname2def(f), - includes="#include \n#include "): - OPTIONAL_STDFUNCS.remove(f) -check_funcs(OPTIONAL_STDFUNCS) - -# C99 functions: float and long double versions -if not BYPASS_SINGLE_EXTENDED: - check_funcs(C99_FUNCS_SINGLE) - check_funcs(C99_FUNCS_EXTENDED) - -# Normally, isnan and isinf are macro (C99), but some platforms only have -# func, or both func and macro version. Check for macro only, and define -# replacement ones if not found. -# Note: including Python.h is necessary because it modifies some math.h -# definitions -for f in ["isnan", "isinf", "signbit", "isfinite"]: - includes = """\ -#include -#include -""" - st = config.CheckDeclaration(f, includes=includes) - if st: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), - '#define NPY_HAVE_DECL_%s' % f.upper())) - else: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), '')) - -inline = config.CheckInline() -config.Define('inline', inline) - - -if ENABLE_SEPARATE_COMPILATION: - config.Define("ENABLE_SEPARATE_COMPILATION", 1) - numpyconfig_sym.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '#define NPY_ENABLE_SEPARATE_COMPILATION 1')) -else: - numpyconfig_sym.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '')) - -#----------------------------- -# Checking for complex support -#----------------------------- -if config.CheckHeader('complex.h'): - numpyconfig_sym.append(('DEFINE_NPY_USE_C99_COMPLEX', '#define NPY_USE_C99_COMPLEX 1')) - - for t in C99_COMPLEX_TYPES: - st = config.CheckType(t, includes='#include ') - if st: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_%s' % type2def(t), - '#define NPY_HAVE_%s' % type2def(t))) - else: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_%s' % type2def(t), '')) - - def check_prec(prec): - flist = [f + prec for f in C99_COMPLEX_FUNCS] - st = config.CheckFuncsAtOnce(flist) - if not st: - # Global check failed, check func per func - for f in flist: - config.CheckFunc(f, language='C') - - check_prec('') - check_prec('f') - check_prec('l') - -else: - numpyconfig_sym.append(('DEFINE_NPY_USE_C99_COMPLEX', '')) - for t in C99_COMPLEX_TYPES: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_%s' % type2def(t), '')) - -def visibility_define(): - if config.CheckGCC4(): - return '__attribute__((visibility("hidden")))' - else: - return '' - -numpyconfig_sym.append(('VISIBILITY_HIDDEN', visibility_define())) - -# Add the C API/ABI versions -numpyconfig_sym.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION)) -numpyconfig_sym.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION)) - -# Check whether we need our own wide character support -if not config.CheckDeclaration('Py_UNICODE_WIDE', includes='#include '): - PYTHON_HAS_UNICODE_WIDE = True -else: - PYTHON_HAS_UNICODE_WIDE = False - -#------------------------------------------------------- -# Define the function PyOS_ascii_strod if not available -#------------------------------------------------------- -if not config.CheckDeclaration('PyOS_ascii_strtod', - includes = "#include "): - if config.CheckFunc('strtod'): - config.Define('PyOS_ascii_strtod', 'strtod', - "Define to a function to use as a replacement for "\ - "PyOS_ascii_strtod if not available in python header") - -#------------------------------------ -# DISTUTILS Hack on AMD64 on windows -#------------------------------------ -# XXX: this is ugly -if sys.platform=='win32' or os.name=='nt': - from distutils.msvccompiler import get_build_architecture - a = get_build_architecture() - print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % \ - (a, os.name, sys.platform) - if a == 'AMD64': - distutils_use_sdk = 1 - config.Define('DISTUTILS_USE_SDK', distutils_use_sdk, - "define to 1 to disable SMP support ") - - if a == "Intel": - config.Define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1, - "define to 1 to force long double format string to the" \ - " same as double (Lg -> g)") -#-------------- -# Checking Blas -#-------------- -if config.CheckCBLAS(): - build_blasdot = 1 -else: - build_blasdot = 0 - -config.config_h_text += """ -#ifndef _NPY_NPY_CONFIG_H_ -#error config.h should never be included directly, include npy_config.h instead -#endif -""" - -config.Finish() -write_info(env) - -#========== -# Build -#========== - -# List of headers which need to be "installed " into the build directory for -# proper in-place build support -generated_headers = [] - -#--------------------------------------- -# Generate the public configuration file -#--------------------------------------- -config_dict = {} -# XXX: this is ugly, make the API for config.h and numpyconfig.h similar -for key, value in numpyconfig_sym: - config_dict['@%s@' % key] = str(value) -env['SUBST_DICT'] = config_dict - -include_dir = 'include/numpy' -target = env.SubstInFile(pjoin(include_dir, '_numpyconfig.h'), - pjoin(include_dir, '_numpyconfig.h.in')) -generated_headers.append(target[0]) - -env['CONFIG_H_GEN'] = numpyconfig_sym - -#--------------------------- -# Builder for generated code -#--------------------------- -env.Append(BUILDERS = {'GenerateMultiarrayApi' : array_api_gen_bld, - 'GenerateUfuncApi' : ufunc_api_gen_bld, - 'GenerateFromTemplate' : template_bld, - 'GenerateUmath' : umath_bld}) - -#------------------------ -# Generate generated code -#------------------------ -scalartypes_src = env.GenerateFromTemplate( - pjoin('src', 'multiarray', 'scalartypes.c.src')) -umath_funcs_src = env.GenerateFromTemplate(pjoin('src', 'umath', 'funcs.inc.src')) -umath_loops_src = env.GenerateFromTemplate(pjoin('src', 'umath', 'loops.c.src')) -arraytypes_src = env.GenerateFromTemplate( - pjoin('src', 'multiarray', 'arraytypes.c.src')) -nditer_src = env.GenerateFromTemplate( - pjoin('src', 'multiarray', 'nditer.c.src')) -lowlevel_strided_loops_src = env.GenerateFromTemplate( - pjoin('src', 'multiarray', 'lowlevel_strided_loops.c.src')) -einsum_src = env.GenerateFromTemplate(pjoin('src', 'multiarray', 'einsum.c.src')) -sortmodule_src = env.GenerateFromTemplate(pjoin('src', '_sortmodule.c.src')) -umathmodule_src = env.GenerateFromTemplate(pjoin('src', 'umath', - 'umathmodule.c.src')) -umath_tests_src = env.GenerateFromTemplate(pjoin('src', 'umath', - 'umath_tests.c.src')) -multiarray_tests_src = env.GenerateFromTemplate(pjoin('src', 'multiarray', - 'multiarray_tests.c.src')) -scalarmathmodule_src = env.GenerateFromTemplate( - pjoin('src', 'scalarmathmodule.c.src')) - -umath = env.GenerateUmath('__umath_generated', - pjoin('code_generators', 'generate_umath.py')) - -multiarray_api = env.GenerateMultiarrayApi('include/numpy/multiarray_api', - [SCons.Node.Python.Value(d) for d in multiarray_api_dict]) -generated_headers.append(multiarray_api[0]) - -ufunc_api = env.GenerateUfuncApi('include/numpy/ufunc_api', - [SCons.Node.Python.Value(d) for d in ufunc_api_dict]) -generated_headers.append(ufunc_api[0]) - -# include/numpy is added for compatibility reasons with distutils: this is -# needed for __multiarray_api.c and __ufunc_api.c included from multiarray and -# ufunc. -env.Prepend(CPPPATH = ['src/private', 'include', '.', 'include/numpy']) - -# npymath core lib -npymath_src = [env.GenerateFromTemplate(pjoin('src', 'npymath', 'npy_math.c.src')), - env.GenerateFromTemplate(pjoin('src', 'npymath', 'npy_math_complex.c.src')), - env.GenerateFromTemplate(pjoin('src', 'npymath', 'ieee754.c.src')), - pjoin('src', 'npymath', 'halffloat.c')] -env.DistutilsInstalledStaticExtLibrary("npymath", npymath_src, install_dir='lib') -env.Prepend(LIBS=["npymath"]) -env.Prepend(LIBPATH=["."]) - -subst_dict = {'@prefix@': '$distutils_install_prefix', - '@pkgname@': 'numpy.core', '@sep@': os.path.sep} -npymath_ini = env.SubstInFile(pjoin('lib', 'npy-pkg-config', 'npymath.ini'), - 'npymath.ini.in', SUBST_DICT=subst_dict) - -subst_dict = {'@posix_mathlib@': " ".join(['-l%s' % l for l in mlib]), - '@msvc_mathlib@': " ".join(['%s.mlib' % l for l in mlib])} -mlib_ini = env.SubstInFile(pjoin('lib', 'npy-pkg-config', 'mlib.ini'), - 'mlib.ini.in', SUBST_DICT=subst_dict) -env.Install('$distutils_installdir/lib/npy-pkg-config', mlib_ini) -env.Install('$distutils_installdir/lib/npy-pkg-config', npymath_ini) - -#----------------- -# Build multiarray -#----------------- -if ENABLE_SEPARATE_COMPILATION: - multiarray_src = [pjoin('src', 'multiarray', 'multiarraymodule.c'), - pjoin('src', 'multiarray', 'hashdescr.c'), - pjoin('src', 'multiarray', 'arrayobject.c'), - pjoin('src', 'multiarray', 'datetime.c'), - pjoin('src', 'multiarray', 'numpyos.c'), - pjoin('src', 'multiarray', 'flagsobject.c'), - pjoin('src', 'multiarray', 'descriptor.c'), - pjoin('src', 'multiarray', 'iterators.c'), - pjoin('src', 'multiarray', 'mapping.c'), - pjoin('src', 'multiarray', 'number.c'), - pjoin('src', 'multiarray', 'getset.c'), - pjoin('src', 'multiarray', 'sequence.c'), - pjoin('src', 'multiarray', 'methods.c'), - pjoin('src', 'multiarray', 'ctors.c'), - pjoin('src', 'multiarray', 'convert_datatype.c'), - pjoin('src', 'multiarray', 'convert.c'), - pjoin('src', 'multiarray', 'shape.c'), - pjoin('src', 'multiarray', 'item_selection.c'), - pjoin('src', 'multiarray', 'calculation.c'), - pjoin('src', 'multiarray', 'common.c'), - pjoin('src', 'multiarray', 'refcount.c'), - pjoin('src', 'multiarray', 'conversion_utils.c'), - pjoin('src', 'multiarray', 'usertypes.c'), - pjoin('src', 'multiarray', 'buffer.c'), - pjoin('src', 'multiarray', 'numpymemoryview.c'), - pjoin('src', 'multiarray', 'scalarapi.c'), - pjoin('src', 'multiarray', 'nditer_pywrap.c'), - pjoin('src', 'multiarray', 'dtype_transfer.c'), - pjoin("src", "multiarray", "ucsnarrow.c")] - multiarray_src.extend(arraytypes_src) - multiarray_src.extend(scalartypes_src) - multiarray_src.extend(lowlevel_strided_loops_src) - multiarray_src.extend(nditer_src) - multiarray_src.extend(einsum_src) -else: - multiarray_src = [pjoin('src', 'multiarray', 'multiarraymodule_onefile.c')] -multiarray = env.DistutilsPythonExtension('multiarray', source = multiarray_src) -env.DistutilsPythonExtension('multiarray_tests', source=multiarray_tests_src) - -#------------------ -# Build sort module -#------------------ -sort = env.DistutilsPythonExtension('_sort', source = sortmodule_src) - -#------------------- -# Build umath module -#------------------- -if ENABLE_SEPARATE_COMPILATION: - umathmodule_src.extend([pjoin('src', 'umath', 'ufunc_object.c')]) - umathmodule_src.extend(umath_loops_src) -else: - umathmodule_src = [pjoin('src', 'umath', 'umathmodule_onefile.c')] -umathmodule = env.DistutilsPythonExtension('umath', source = umathmodule_src) - -#------------------------ -# Build scalarmath module -#------------------------ -scalarmathmodule = env.DistutilsPythonExtension('scalarmath', - source = scalarmathmodule_src) - -#------------------------ -# Build scalarmath module -#------------------------ -umath_tests = env.DistutilsPythonExtension('umath_tests', - source=umath_tests_src) - -#---------------------- -# Build _dotblas module -#---------------------- -if build_blasdot: - dotblas_src = [pjoin('blasdot', i) for i in ['_dotblas.c']] - # because _dotblas does #include CBLAS_HEADER instead of #include - # "cblas.h", scons does not detect the dependency - # XXX: PythonExtension builder does not take the Depends on extension into - # account for some reason, so we first build the object, with forced - # dependency, and then builds the extension. This is more likely a bug in - # our PythonExtension builder, but I cannot see how to solve it. - dotblas_o = env.PythonObject('_dotblas', source = dotblas_src) - env.Depends(dotblas_o, pjoin("blasdot", "cblas.h")) - dotblas = env.DistutilsPythonExtension('_dotblas', dotblas_o) - -# "Install" the header in the build directory, so that in-place build works -for h in generated_headers: - env.Install(pjoin('$distutils_installdir', include_dir), h) diff --git a/numpy-1.6.2/numpy/core/SConstruct b/numpy-1.6.2/numpy/core/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/numpy-1.6.2/numpy/core/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/numpy-1.6.2/numpy/core/__init__.py b/numpy-1.6.2/numpy/core/__init__.py deleted file mode 100644 index 4a9f3ac758..0000000000 --- a/numpy-1.6.2/numpy/core/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ - -from info import __doc__ -from numpy.version import version as __version__ - -import multiarray -import umath -import _internal # for freeze programs -import numerictypes as nt -multiarray.set_typeDict(nt.sctypeDict) -import _sort -from numeric import * -from fromnumeric import * -import defchararray as char -import records as rec -from records import * -from memmap import * -from defchararray import chararray -import scalarmath -from function_base import * -from machar import * -from getlimits import * -from shape_base import * -del nt - -from fromnumeric import amax as max, amin as min, \ - round_ as round -from numeric import absolute as abs - -__all__ = ['char','rec','memmap'] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += rec.__all__ -__all__ += ['chararray'] -__all__ += function_base.__all__ -__all__ += machar.__all__ -__all__ += getlimits.__all__ -__all__ += shape_base.__all__ - - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/core/_internal.py b/numpy-1.6.2/numpy/core/_internal.py deleted file mode 100644 index 5298f412b3..0000000000 --- a/numpy-1.6.2/numpy/core/_internal.py +++ /dev/null @@ -1,613 +0,0 @@ -#A place for code to be called from C-code -# that implements more complicated stuff. - -import re -import sys -import warnings - -from numpy.compat import asbytes, bytes - -if (sys.byteorder == 'little'): - _nbo = asbytes('<') -else: - _nbo = asbytes('>') - -def _makenames_list(adict): - from multiarray import dtype - allfields = [] - fnames = adict.keys() - for fname in fnames: - obj = adict[fname] - n = len(obj) - if not isinstance(obj, tuple) or n not in [2,3]: - raise ValueError("entry not a 2- or 3- tuple") - if (n > 2) and (obj[2] == fname): - continue - num = int(obj[1]) - if (num < 0): - raise ValueError("invalid offset.") - format = dtype(obj[0]) - if (format.itemsize == 0): - raise ValueError("all itemsizes must be fixed.") - if (n > 2): - title = obj[2] - else: - title = None - allfields.append((fname, format, num, title)) - # sort by offsets - allfields.sort(key=lambda x: x[2]) - names = [x[0] for x in allfields] - formats = [x[1] for x in allfields] - offsets = [x[2] for x in allfields] - titles = [x[3] for x in allfields] - - return names, formats, offsets, titles - -# Called in PyArray_DescrConverter function when -# a dictionary without "names" and "formats" -# fields is used as a data-type descriptor. -def _usefields(adict, align): - from multiarray import dtype - try: - names = adict[-1] - except KeyError: - names = None - if names is None: - names, formats, offsets, titles = _makenames_list(adict) - else: - formats = [] - offsets = [] - titles = [] - for name in names: - res = adict[name] - formats.append(res[0]) - offsets.append(res[1]) - if (len(res) > 2): - titles.append(res[2]) - else: - titles.append(None) - - return dtype({"names" : names, - "formats" : formats, - "offsets" : offsets, - "titles" : titles}, align) - - -# construct an array_protocol descriptor list -# from the fields attribute of a descriptor -# This calls itself recursively but should eventually hit -# a descriptor that has no fields and then return -# a simple typestring - -def _array_descr(descriptor): - from multiarray import METADATA_DTSTR - fields = descriptor.fields - if fields is None: - subdtype = descriptor.subdtype - if subdtype is None: - if descriptor.metadata is None: - return descriptor.str - else: - new = descriptor.metadata.copy() - # Eliminate any key related to internal implementation - _ = new.pop(METADATA_DTSTR, None) - return (descriptor.str, new) - else: - return (_array_descr(subdtype[0]), subdtype[1]) - - - names = descriptor.names - ordered_fields = [fields[x] + (x,) for x in names] - result = [] - offset = 0 - for field in ordered_fields: - if field[1] > offset: - num = field[1] - offset - result.append(('','|V%d' % num)) - offset += num - if len(field) > 3: - name = (field[2],field[3]) - else: - name = field[2] - if field[0].subdtype: - tup = (name, _array_descr(field[0].subdtype[0]), - field[0].subdtype[1]) - else: - tup = (name, _array_descr(field[0])) - offset += field[0].itemsize - result.append(tup) - - return result - -# Build a new array from the information in a pickle. -# Note that the name numpy.core._internal._reconstruct is embedded in -# pickles of ndarrays made with NumPy before release 1.0 -# so don't remove the name here, or you'll -# break backward compatibilty. -def _reconstruct(subtype, shape, dtype): - from multiarray import ndarray - return ndarray.__new__(subtype, shape, dtype) - - -# format_re and _split were taken from numarray by J. Todd Miller - -def _split(input): - """Split the input formats string into field formats without splitting - the tuple used to specify multi-dimensional arrays.""" - - newlist = [] - hold = asbytes('') - - listinput = input.split(asbytes(',')) - for element in listinput: - if hold != asbytes(''): - item = hold + asbytes(',') + element - else: - item = element - left = item.count(asbytes('(')) - right = item.count(asbytes(')')) - - # if the parenthesis is not balanced, hold the string - if left > right : - hold = item - - # when balanced, append to the output list and reset the hold - elif left == right: - newlist.append(item.strip()) - hold = asbytes('') - - # too many close parenthesis is unacceptable - else: - raise SyntaxError(item) - - # if there is string left over in hold - if hold != asbytes(''): - raise SyntaxError(hold) - - return newlist - -format_datetime = re.compile(asbytes(r""" - (?PM8|m8|datetime64|timedelta64) - ([[] - ((?P\d+)? - (?PY|M|W|B|D|h|m|s|ms|us|ns|ps|fs|as) - (/(?P\d+))? - []]) - (//(?P\d+))?)?"""), re.X) - -# Return (baseunit, num, den, events), datetime -# from date-time string -def _datetimestring(astr): - res = format_datetime.match(astr) - if res is None: - raise ValueError("Incorrect date-time string.") - typecode = res.group('typecode') - datetime = (typecode == asbytes('M8') or typecode == asbytes('datetime64')) - defaults = [asbytes('us'), 1, 1, 1] - names = ['baseunit', 'num', 'den', 'events'] - func = [bytes, int, int, int] - dt_tuple = [] - for i, name in enumerate(names): - value = res.group(name) - if value: - dt_tuple.append(func[i](value)) - else: - dt_tuple.append(defaults[i]) - - return tuple(dt_tuple), datetime - -format_re = re.compile(asbytes(r'(?P[<>|=]?)(?P *[(]?[ ,0-9]*[)]? *)(?P[<>|=]?)(?P[A-Za-z0-9.]*)')) - -# astr is a string (perhaps comma separated) - -_convorder = {asbytes('='): _nbo} - -def _commastring(astr): - res = _split(astr) - if (len(res)) < 1: - raise ValueError("unrecognized formant") - result = [] - for k,item in enumerate(res): - # convert item - try: - (order1, repeats, order2, dtype) = format_re.match(item).groups() - except (TypeError, AttributeError): - raise ValueError('format %s is not recognized' % item) - - if order2 == asbytes(''): - order = order1 - elif order1 == asbytes(''): - order = order2 - else: - order1 = _convorder.get(order1, order1) - order2 = _convorder.get(order2, order2) - if (order1 != order2): - raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2)) - order = order1 - - if order in [asbytes('|'), asbytes('='), _nbo]: - order = asbytes('') - dtype = order + dtype - if (repeats == asbytes('')): - newitem = dtype - else: - newitem = (dtype, eval(repeats)) - result.append(newitem) - - return result - -def _getintp_ctype(): - from multiarray import dtype - val = _getintp_ctype.cache - if val is not None: - return val - char = dtype('p').char - import ctypes - if (char == 'i'): - val = ctypes.c_int - elif char == 'l': - val = ctypes.c_long - elif char == 'q': - val = ctypes.c_longlong - else: - val = ctypes.c_long - _getintp_ctype.cache = val - return val -_getintp_ctype.cache = None - -# Used for .ctypes attribute of ndarray - -class _missing_ctypes(object): - def cast(self, num, obj): - return num - - def c_void_p(self, num): - return num - -class _ctypes(object): - def __init__(self, array, ptr=None): - try: - import ctypes - self._ctypes = ctypes - except ImportError: - self._ctypes = _missing_ctypes() - self._arr = array - self._data = ptr - if self._arr.ndim == 0: - self._zerod = True - else: - self._zerod = False - - def data_as(self, obj): - return self._ctypes.cast(self._data, obj) - - def shape_as(self, obj): - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.shape) - - def strides_as(self, obj): - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.strides) - - def get_data(self): - return self._data - - def get_shape(self): - if self._zerod: - return None - return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape) - - def get_strides(self): - if self._zerod: - return None - return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides) - - def get_as_parameter(self): - return self._ctypes.c_void_p(self._data) - - data = property(get_data, None, doc="c-types data") - shape = property(get_shape, None, doc="c-types shape") - strides = property(get_strides, None, doc="c-types strides") - _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") - - -# Given a datatype and an order object -# return a new names tuple -# with the order indicated -def _newnames(datatype, order): - oldnames = datatype.names - nameslist = list(oldnames) - if isinstance(order, str): - order = [order] - if isinstance(order, (list, tuple)): - for name in order: - try: - nameslist.remove(name) - except ValueError: - raise ValueError("unknown field name: %s" % (name,)) - return tuple(list(order) + nameslist) - raise ValueError("unsupported order value: %s" % (order,)) - -# Given an array with fields and a sequence of field names -# construct a new array with just those fields copied over -def _index_fields(ary, fields): - from multiarray import empty, dtype - dt = ary.dtype - new_dtype = [(name, dt[name]) for name in fields if name in dt.names] - if ary.flags.f_contiguous: - order = 'F' - else: - order = 'C' - - newarray = empty(ary.shape, dtype=new_dtype, order=order) - - for name in fields: - newarray[name] = ary[name] - - return newarray - -# Given a string containing a PEP 3118 format specifier, -# construct a Numpy dtype - -_pep3118_native_map = { - '?': '?', - 'b': 'b', - 'B': 'B', - 'h': 'h', - 'H': 'H', - 'i': 'i', - 'I': 'I', - 'l': 'l', - 'L': 'L', - 'q': 'q', - 'Q': 'Q', - 'e': 'e', - 'f': 'f', - 'd': 'd', - 'g': 'g', - 'Zf': 'F', - 'Zd': 'D', - 'Zg': 'G', - 's': 'S', - 'w': 'U', - 'O': 'O', - 'x': 'V', # padding -} -_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) - -_pep3118_standard_map = { - '?': '?', - 'b': 'b', - 'B': 'B', - 'h': 'i2', - 'H': 'u2', - 'i': 'i4', - 'I': 'u4', - 'l': 'i4', - 'L': 'u4', - 'q': 'i8', - 'Q': 'u8', - 'e': 'f2', - 'f': 'f', - 'd': 'd', - 'Zf': 'F', - 'Zd': 'D', - 's': 'S', - 'w': 'U', - 'O': 'O', - 'x': 'V', # padding -} -_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) - -def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False): - from numpy.core.multiarray import dtype - - fields = {} - offset = 0 - explicit_name = False - this_explicit_name = False - common_alignment = 1 - is_padding = False - last_offset = 0 - - dummy_name_index = [0] - def next_dummy_name(): - dummy_name_index[0] += 1 - def get_dummy_name(): - while True: - name = 'f%d' % dummy_name_index[0] - if name not in fields: - return name - next_dummy_name() - - # Parse spec - while spec: - value = None - - # End of structure, bail out to upper level - if spec[0] == '}': - spec = spec[1:] - break - - # Sub-arrays (1) - shape = None - if spec[0] == '(': - j = spec.index(')') - shape = tuple(map(int, spec[1:j].split(','))) - spec = spec[j+1:] - - # Byte order - if spec[0] in ('@', '=', '<', '>', '^', '!'): - byteorder = spec[0] - if byteorder == '!': - byteorder = '>' - spec = spec[1:] - - # Byte order characters also control native vs. standard type sizes - if byteorder in ('@', '^'): - type_map = _pep3118_native_map - type_map_chars = _pep3118_native_typechars - else: - type_map = _pep3118_standard_map - type_map_chars = _pep3118_standard_typechars - - # Item sizes - itemsize = 1 - if spec[0].isdigit(): - j = 1 - for j in xrange(1, len(spec)): - if not spec[j].isdigit(): - break - itemsize = int(spec[:j]) - spec = spec[j:] - - # Data types - is_padding = False - - if spec[:2] == 'T{': - value, spec, align, next_byteorder = _dtype_from_pep3118( - spec[2:], byteorder=byteorder, is_subdtype=True) - elif spec[0] in type_map_chars: - next_byteorder = byteorder - if spec[0] == 'Z': - j = 2 - else: - j = 1 - typechar = spec[:j] - spec = spec[j:] - is_padding = (typechar == 'x') - dtypechar = type_map[typechar] - if dtypechar in 'USV': - dtypechar += '%d' % itemsize - itemsize = 1 - numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder) - value = dtype(numpy_byteorder + dtypechar) - align = value.alignment - else: - raise ValueError("Unknown PEP 3118 data type specifier %r" % spec) - - # - # Native alignment may require padding - # - # Here we assume that the presence of a '@' character implicitly implies - # that the start of the array is *already* aligned. - # - extra_offset = 0 - if byteorder == '@': - start_padding = (-offset) % align - intra_padding = (-value.itemsize) % align - - offset += start_padding - - if intra_padding != 0: - if itemsize > 1 or (shape is not None and _prod(shape) > 1): - # Inject internal padding to the end of the sub-item - value = _add_trailing_padding(value, intra_padding) - else: - # We can postpone the injection of internal padding, - # as the item appears at most once - extra_offset += intra_padding - - # Update common alignment - common_alignment = (align*common_alignment - / _gcd(align, common_alignment)) - - # Convert itemsize to sub-array - if itemsize != 1: - value = dtype((value, (itemsize,))) - - # Sub-arrays (2) - if shape is not None: - value = dtype((value, shape)) - - # Field name - this_explicit_name = False - if spec and spec.startswith(':'): - i = spec[1:].index(':') + 1 - name = spec[1:i] - spec = spec[i+1:] - explicit_name = True - this_explicit_name = True - else: - name = get_dummy_name() - - if not is_padding or this_explicit_name: - if name in fields: - raise RuntimeError("Duplicate field name '%s' in PEP3118 format" - % name) - fields[name] = (value, offset) - last_offset = offset - if not this_explicit_name: - next_dummy_name() - - byteorder = next_byteorder - - offset += value.itemsize - offset += extra_offset - - # Check if this was a simple 1-item type - if len(fields.keys()) == 1 and not explicit_name and fields['f0'][1] == 0 \ - and not is_subdtype: - ret = fields['f0'][0] - else: - ret = dtype(fields) - - # Trailing padding must be explicitly added - padding = offset - ret.itemsize - if byteorder == '@': - padding += (-offset) % common_alignment - if is_padding and not this_explicit_name: - ret = _add_trailing_padding(ret, padding) - - # Finished - if is_subdtype: - return ret, spec, common_alignment, byteorder - else: - return ret - -def _add_trailing_padding(value, padding): - """Inject the specified number of padding bytes at the end of a dtype""" - from numpy.core.multiarray import dtype - - if value.fields is None: - vfields = {'f0': (value, 0)} - else: - vfields = dict(value.fields) - - if value.names and value.names[-1] == '' and \ - value[''].char == 'V': - # A trailing padding field is already present - vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding), - vfields[''][1]) - value = dtype(vfields) - else: - # Get a free name for the padding field - j = 0 - while True: - name = 'pad%d' % j - if name not in vfields: - vfields[name] = ('V%d' % padding, value.itemsize) - break - j += 1 - - value = dtype(vfields) - if '' not in vfields: - # Strip out the name of the padding field - names = list(value.names) - names[-1] = '' - value.names = tuple(names) - return value - -def _prod(a): - p = 1 - for x in a: - p *= x - return p - -def _gcd(a, b): - """Calculate the greatest common divisor of a and b""" - while b: - a, b = b, a%b - return a diff --git a/numpy-1.6.2/numpy/core/_mx_datetime_parser.py b/numpy-1.6.2/numpy/core/_mx_datetime_parser.py deleted file mode 100644 index f1b330f007..0000000000 --- a/numpy-1.6.2/numpy/core/_mx_datetime_parser.py +++ /dev/null @@ -1,962 +0,0 @@ -#-*- coding: latin-1 -*- -""" -Date/Time string parsing module. - -This code is a slightly modified version of Parser.py found in mx.DateTime -version 3.0.0 - -As such, it is subject to the terms of the eGenix public license version 1.1.0. - -FIXME: Add license.txt to NumPy -""" - -__all__ = ['date_from_string', 'datetime_from_string'] - -import types -import re -import datetime as dt - -class RangeError(Exception): pass - -# Enable to produce debugging output -_debug = 0 - -# REs for matching date and time parts in a string; These REs -# parse a superset of ARPA, ISO, American and European style dates. -# Timezones are supported via the Timezone submodule. - -_year = '(?P-?\d+\d(?!:))' -_fullyear = '(?P-?\d+\d\d(?!:))' -_year_epoch = '(?:' + _year + '(?P *[ABCDE\.]+)?)' -_fullyear_epoch = '(?:' + _fullyear + '(?P *[ABCDE\.]+)?)' -_relyear = '(?:\((?P[-+]?\d+)\))' - -_month = '(?P\d?\d(?!:))' -_fullmonth = '(?P\d\d(?!:))' -_litmonth = ('(?P' - 'jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|' - 'mär|mae|mrz|mai|okt|dez|' - 'fev|avr|juin|juil|aou|aoû|déc|' - 'ene|abr|ago|dic|' - 'out' - ')[a-z,\.;]*') -litmonthtable = { - # English - 'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, - 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12, - # German - 'mär':3, 'mae':3, 'mrz':3, 'mai':5, 'okt':10, 'dez':12, - # French - 'fev':2, 'avr':4, 'juin':6, 'juil':7, 'aou':8, 'aoû':8, - 'déc':12, - # Spanish - 'ene':1, 'abr':4, 'ago':8, 'dic':12, - # Portuguese - 'out':10, - } -_relmonth = '(?:\((?P[-+]?\d+)\))' - -_day = '(?P\d?\d(?!:))' -_usday = '(?P\d?\d(?!:))(?:st|nd|rd|th|[,\.;])?' -_fullday = '(?P\d\d(?!:))' -_litday = ('(?P' - 'mon|tue|wed|thu|fri|sat|sun|' - 'die|mit|don|fre|sam|son|' - 'lun|mar|mer|jeu|ven|sam|dim|' - 'mie|jue|vie|sab|dom|' - 'pri|seg|ter|cua|qui' - ')[a-z]*') -litdaytable = { - # English - 'mon':0, 'tue':1, 'wed':2, 'thu':3, 'fri':4, 'sat':5, 'sun':6, - # German - 'die':1, 'mit':2, 'don':3, 'fre':4, 'sam':5, 'son':6, - # French - 'lun':0, 'mar':1, 'mer':2, 'jeu':3, 'ven':4, 'sam':5, 'dim':6, - # Spanish - 'mie':2, 'jue':3, 'vie':4, 'sab':5, 'dom':6, - # Portuguese - 'pri':0, 'seg':1, 'ter':2, 'cua':3, 'qui':4, - } -_relday = '(?:\((?P[-+]?\d+)\))' - -_hour = '(?P[012]?\d)' -_minute = '(?P[0-6]\d)' -_second = '(?P[0-6]\d(?:[.,]\d+)?)' - -_days = '(?P\d*\d(?:[.,]\d+)?)' -_hours = '(?P\d*\d(?:[.,]\d+)?)' -_minutes = '(?P\d*\d(?:[.,]\d+)?)' -_seconds = '(?P\d*\d(?:[.,]\d+)?)' - -_reldays = '(?:\((?P[-+]?\d+(?:[.,]\d+)?)\))' -_relhours = '(?:\((?P[-+]?\d+(?:[.,]\d+)?)\))' -_relminutes = '(?:\((?P[-+]?\d+(?:[.,]\d+)?)\))' -_relseconds = '(?:\((?P[-+]?\d+(?:[.,]\d+)?)\))' - -_sign = '(?:(?P[-+]) *)' -_week = 'W(?P\d?\d)' -_zone = '(?P[A-Z]+|[+-]\d\d?:?(?:\d\d)?)' -_ampm = '(?P[ap][m.]+)' - -_time = (_hour + ':' + _minute + '(?::' + _second + '|[^:]|$) *' - + _ampm + '? *' + _zone + '?') -_isotime = _hour + ':?' + _minute + ':?' + _second + '? *' + _zone + '?' - -_yeardate = _year -_weekdate = _year + '-?(?:' + _week + '-?' + _day + '?)?' -_eurodate = _day + '\.' + _month + '\.' + _year_epoch + '?' -_usdate = _month + '/' + _day + '(?:/' + _year_epoch + '|[^/]|$)' -_altusdate = _month + '-' + _day + '-' + _fullyear_epoch -_isodate = _year + '-' + _month + '-?' + _day + '?(?!:)' -_altisodate = _year + _fullmonth + _fullday + '(?!:)' -_usisodate = _fullyear + '/' + _fullmonth + '/' + _fullday -_litdate = ('(?:'+ _litday + ',? )? *' + - _usday + ' *' + - '[- ] *(?:' + _litmonth + '|'+ _month +') *[- ] *' + - _year_epoch + '?') -_altlitdate = ('(?:'+ _litday + ',? )? *' + - _litmonth + '[ ,.a-z]+' + - _usday + - '(?:[ a-z]+' + _year_epoch + ')?') -_eurlitdate = ('(?:'+ _litday + ',?[ a-z]+)? *' + - '(?:'+ _usday + '[ a-z]+)? *' + - _litmonth + - '(?:[ ,.a-z]+' + _year_epoch + ')?') - -_relany = '[*%?a-zA-Z]+' - -_relisodate = ('(?:(?:' + _relany + '|' + _year + '|' + _relyear + ')-' + - '(?:' + _relany + '|' + _month + '|' + _relmonth + ')-' + - '(?:' + _relany + '|' + _day + '|' + _relday + '))') - -_asctime = ('(?:'+ _litday + ',? )? *' + - _usday + ' *' + - '[- ] *(?:' + _litmonth + '|'+ _month +') *[- ]' + - '(?:[0-9: ]+)' + - _year_epoch + '?') - -_relisotime = ('(?:(?:' + _relany + '|' + _hour + '|' + _relhours + '):' + - '(?:' + _relany + '|' + _minute + '|' + _relminutes + ')' + - '(?::(?:' + _relany + '|' + _second + '|' + _relseconds + '))?)') - -_isodelta1 = (_sign + '?' + - _days + ':' + _hours + ':' + _minutes + ':' + _seconds) -_isodelta2 = (_sign + '?' + - _hours + ':' + _minutes + ':' + _seconds) -_isodelta3 = (_sign + '?' + - _hours + ':' + _minutes) -_litdelta = (_sign + '?' + - '(?:' + _days + ' *d[a-z]*[,; ]*)?' + - '(?:' + _hours + ' *h[a-z]*[,; ]*)?' + - '(?:' + _minutes + ' *m[a-z]*[,; ]*)?' + - '(?:' + _seconds + ' *s[a-z]*[,; ]*)?') -_litdelta2 = (_sign + '?' + - '(?:' + _days + ' *d[a-z]*[,; ]*)?' + - _hours + ':' + _minutes + '(?::' + _seconds + ')?') - -_timeRE = re.compile(_time, re.I) -_isotimeRE = re.compile(_isotime, re.I) -_isodateRE = re.compile(_isodate, re.I) -_altisodateRE = re.compile(_altisodate, re.I) -_usisodateRE = re.compile(_usisodate, re.I) -_yeardateRE = re.compile(_yeardate, re.I) -_eurodateRE = re.compile(_eurodate, re.I) -_usdateRE = re.compile(_usdate, re.I) -_altusdateRE = re.compile(_altusdate, re.I) -_litdateRE = re.compile(_litdate, re.I) -_altlitdateRE = re.compile(_altlitdate, re.I) -_eurlitdateRE = re.compile(_eurlitdate, re.I) -_relisodateRE = re.compile(_relisodate, re.I) -_asctimeRE = re.compile(_asctime, re.I) -_isodelta1RE = re.compile(_isodelta1) -_isodelta2RE = re.compile(_isodelta2) -_isodelta3RE = re.compile(_isodelta3) -_litdeltaRE = re.compile(_litdelta) -_litdelta2RE = re.compile(_litdelta2) -_relisotimeRE = re.compile(_relisotime, re.I) - -# Available date parsers -_date_formats = ('euro', - 'usiso', 'us', 'altus', - 'iso', 'altiso', - 'lit', 'altlit', 'eurlit', - 'year', 'unknown') - -# Available time parsers -_time_formats = ('standard', - 'iso', - 'unknown') - -_zoneoffset = ('(?:' - '(?P[+-])?' - '(?P\d\d?)' - ':?' - '(?P\d\d)?' - '(?P\d+)?' - ')' - ) - -_zoneoffsetRE = re.compile(_zoneoffset) - -_zonetable = { - # Timezone abbreviations - # Std Summer - - # Standards - 'UT':0, - 'UTC':0, - 'GMT':0, - - # A few common timezone abbreviations - 'CET':1, 'CEST':2, 'CETDST':2, # Central European - 'MET':1, 'MEST':2, 'METDST':2, # Mean European - 'MEZ':1, 'MESZ':2, # Mitteleuropäische Zeit - 'EET':2, 'EEST':3, 'EETDST':3, # Eastern Europe - 'WET':0, 'WEST':1, 'WETDST':1, # Western Europe - 'MSK':3, 'MSD':4, # Moscow - 'IST':5.5, # India - 'JST':9, # Japan - 'KST':9, # Korea - 'HKT':8, # Hong Kong - - # US time zones - 'AST':-4, 'ADT':-3, # Atlantic - 'EST':-5, 'EDT':-4, # Eastern - 'CST':-6, 'CDT':-5, # Central - 'MST':-7, 'MDT':-6, # Midwestern - 'PST':-8, 'PDT':-7, # Pacific - - # Australian time zones - 'CAST':9.5, 'CADT':10.5, # Central - 'EAST':10, 'EADT':11, # Eastern - 'WAST':8, 'WADT':9, # Western - 'SAST':9.5, 'SADT':10.5, # Southern - - # US military time zones - 'Z': 0, - 'A': 1, - 'B': 2, - 'C': 3, - 'D': 4, - 'E': 5, - 'F': 6, - 'G': 7, - 'H': 8, - 'I': 9, - 'K': 10, - 'L': 11, - 'M': 12, - 'N':-1, - 'O':-2, - 'P':-3, - 'Q':-4, - 'R':-5, - 'S':-6, - 'T':-7, - 'U':-8, - 'V':-9, - 'W':-10, - 'X':-11, - 'Y':-12 - } - - -def utc_offset(zone): - """ utc_offset(zonestring) - - Return the UTC time zone offset in minutes. - - zone must be string and can either be given as +-HH:MM, - +-HHMM, +-HH numeric offset or as time zone - abbreviation. Daylight saving time must be encoded into the - zone offset. - - Timezone abbreviations are treated case-insensitive. - - """ - if not zone: - return 0 - uzone = zone.upper() - if uzone in _zonetable: - return _zonetable[uzone]*60 - offset = _zoneoffsetRE.match(zone) - if not offset: - raise ValueError,'wrong format or unkown time zone: "%s"' % zone - zonesign,hours,minutes,extra = offset.groups() - if extra: - raise ValueError,'illegal time zone offset: "%s"' % zone - offset = int(hours or 0) * 60 + int(minutes or 0) - if zonesign == '-': - offset = -offset - return offset - -def add_century(year): - - """ Sliding window approach to the Y2K problem: adds a suitable - century to the given year and returns it as integer. - - The window used depends on the current year. If adding the current - century to the given year gives a year within the range - current_year-70...current_year+30 [both inclusive], then the - current century is added. Otherwise the century (current + 1 or - - 1) producing the least difference is chosen. - - """ - - current_year=dt.datetime.now().year - current_century=(dt.datetime.now().year / 100) * 100 - - if year > 99: - # Take it as-is - return year - year = year + current_century - diff = year - current_year - if diff >= -70 and diff <= 30: - return year - elif diff < -70: - return year + 100 - else: - return year - 100 - - -def _parse_date(text): - """ - Parses the date part given in text and returns a tuple - (text,day,month,year,style) with the following meanings: - - * text gives the original text without the date part - - * day,month,year give the parsed date - - * style gives information about which parser was successful: - 'euro' - the European date parser - 'us' - the US date parser - 'altus' - the alternative US date parser (with '-' instead of '/') - 'iso' - the ISO date parser - 'altiso' - the alternative ISO date parser (without '-') - 'usiso' - US style ISO date parser (yyyy/mm/dd) - 'lit' - the US literal date parser - 'altlit' - the alternative US literal date parser - 'eurlit' - the Eurpean literal date parser - 'unknown' - no date part was found, defaultdate was used - - Formats may be set to a tuple of style strings specifying which of the above - parsers to use and in which order to try them. - Default is to try all of them in the above order. - - ``defaultdate`` provides the defaults to use in case no date part is found. - Most other parsers default to the current year January 1 if some of these - date parts are missing. - - If ``'unknown'`` is not given in formats and the date cannot be parsed, - a :exc:`ValueError` is raised. - - """ - match = None - style = '' - - formats = _date_formats - - us_formats=('us', 'altus') - iso_formats=('iso', 'altiso', 'usiso') - - now=dt.datetime.now - - # Apply parsers in the order given in formats - for format in formats: - - if format == 'euro': - # European style date - match = _eurodateRE.search(text) - if match is not None: - day,month,year,epoch = match.groups() - if year: - if len(year) == 2: - # Y2K problem: - year = add_century(int(year)) - else: - year = int(year) - else: - defaultdate = now() - year = defaultdate.year - if epoch and 'B' in epoch: - year = -year + 1 - month = int(month) - day = int(day) - # Could have mistaken euro format for us style date - # which uses month, day order - if month > 12 or month == 0: - match = None - continue - break - - elif format == 'year': - # just a year specified - match = _yeardateRE.match(text) - if match is not None: - year = match.groups()[0] - if year: - if len(year) == 2: - # Y2K problem: - year = add_century(int(year)) - else: - year = int(year) - else: - defaultdate = now() - year = defaultdate.year - day = 1 - month = 1 - break - - elif format in iso_formats: - # ISO style date - if format == 'iso': - match = _isodateRE.search(text) - elif format == 'altiso': - match = _altisodateRE.search(text) - # Avoid mistaking ISO time parts ('Thhmmss') for dates - if match is not None: - left, right = match.span() - if left > 0 and \ - text[left - 1:left] == 'T': - match = None - continue - else: - match = _usisodateRE.search(text) - if match is not None: - year,month,day = match.groups() - if len(year) == 2: - # Y2K problem: - year = add_century(int(year)) - else: - year = int(year) - # Default to January 1st - if not month: - month = 1 - else: - month = int(month) - if not day: - day = 1 - else: - day = int(day) - break - - elif format in us_formats: - # US style date - if format == 'us': - match = _usdateRE.search(text) - else: - match = _altusdateRE.search(text) - if match is not None: - month,day,year,epoch = match.groups() - if year: - if len(year) == 2: - # Y2K problem: - year = add_century(int(year)) - else: - year = int(year) - else: - defaultdate = now() - year = defaultdate.year - if epoch and 'B' in epoch: - year = -year + 1 - # Default to 1 if no day is given - if day: - day = int(day) - else: - day = 1 - month = int(month) - # Could have mistaken us format for euro style date - # which uses day, month order - if month > 12 or month == 0: - match = None - continue - break - - elif format == 'lit': - # US style literal date - match = _litdateRE.search(text) - if match is not None: - litday,day,litmonth,month,year,epoch = match.groups() - break - - elif format == 'altlit': - # Alternative US style literal date - match = _altlitdateRE.search(text) - if match is not None: - litday,litmonth,day,year,epoch = match.groups() - month = '' - break - - elif format == 'eurlit': - # European style literal date - match = _eurlitdateRE.search(text) - if match is not None: - litday,day,litmonth,year,epoch = match.groups() - month = '' - break - - elif format == 'unknown': - # No date part: use defaultdate - defaultdate = now() - year = defaultdate.year - month = defaultdate.month - day = defaultdate.day - style = format - break - - # Check success - if match is not None: - # Remove date from text - left, right = match.span() - if 0 and _debug: - print 'parsed date:',repr(text[left:right]),\ - 'giving:',year,month,day - text = text[:left] + text[right:] - style = format - - elif not style: - # Not recognized: raise an error - raise ValueError, 'unknown date format: "%s"' % text - - # Literal date post-processing - if style in ('lit', 'altlit', 'eurlit'): - if 0 and _debug: print match.groups() - # Default to current year, January 1st - if not year: - defaultdate = now() - year = defaultdate.year - else: - if len(year) == 2: - # Y2K problem: - year = add_century(int(year)) - else: - year = int(year) - if epoch and 'B' in epoch: - year = -year + 1 - if litmonth: - litmonth = litmonth.lower() - try: - month = litmonthtable[litmonth] - except KeyError: - raise ValueError,\ - 'wrong month name: "%s"' % litmonth - elif month: - month = int(month) - else: - month = 1 - if day: - day = int(day) - else: - day = 1 - - #print '_parse_date:',text,day,month,year,style - return text,day,month,year,style - -def _parse_time(text): - - """ Parses a time part given in text and returns a tuple - (text,hour,minute,second,offset,style) with the following - meanings: - - * text gives the original text without the time part - * hour,minute,second give the parsed time - * offset gives the time zone UTC offset - * style gives information about which parser was successful: - 'standard' - the standard parser - 'iso' - the ISO time format parser - 'unknown' - no time part was found - - formats may be set to a tuple specifying the parsers to use: - 'standard' - standard time format with ':' delimiter - 'iso' - ISO time format (superset of 'standard') - 'unknown' - default to 0:00:00, 0 zone offset - - If 'unknown' is not given in formats and the time cannot be - parsed, a ValueError is raised. - - """ - match = None - style = '' - - formats=_time_formats - - # Apply parsers in the order given in formats - for format in formats: - - # Standard format - if format == 'standard': - match = _timeRE.search(text) - if match is not None: - hour,minute,second,ampm,zone = match.groups() - style = 'standard' - break - - # ISO format - if format == 'iso': - match = _isotimeRE.search(text) - if match is not None: - hour,minute,second,zone = match.groups() - ampm = None - style = 'iso' - break - - # Default handling - elif format == 'unknown': - hour,minute,second,offset = 0,0,0.0,0 - style = 'unknown' - break - - if not style: - # If no default handling should be applied, raise an error - raise ValueError, 'unknown time format: "%s"' % text - - # Post-processing - if match is not None: - - if zone: - # Convert to UTC offset - offset = utc_offset(zone) - else: - offset = 0 - - hour = int(hour) - if ampm: - if ampm[0] in ('p', 'P'): - # 12pm = midday - if hour < 12: - hour = hour + 12 - else: - # 12am = midnight - if hour >= 12: - hour = hour - 12 - if minute: - minute = int(minute) - else: - minute = 0 - if not second: - second = 0.0 - else: - if ',' in second: - second = second.replace(',', '.') - second = float(second) - - # Remove time from text - left,right = match.span() - if 0 and _debug: - print 'parsed time:',repr(text[left:right]),\ - 'giving:',hour,minute,second,offset - text = text[:left] + text[right:] - - #print '_parse_time:',text,hour,minute,second,offset,style - return text,hour,minute,second,offset,style - -### - -def datetime_from_string(text): - - """ datetime_from_string(text, [formats, defaultdate]) - - Returns a datetime instance reflecting the date and time given - in text. In case a timezone is given, the returned instance - will point to the corresponding UTC time value. Otherwise, the - value is set as given in the string. - - formats may be set to a tuple of strings specifying which of - the following parsers to use and in which order to try - them. Default is to try all of them in the order given below: - - 'euro' - the European date parser - 'us' - the US date parser - 'altus' - the alternative US date parser (with '-' instead of '/') - 'iso' - the ISO date parser - 'altiso' - the alternative ISO date parser (without '-') - 'usiso' - US style ISO date parser (yyyy/mm/dd) - 'lit' - the US literal date parser - 'altlit' - the alternative US literal date parser - 'eurlit' - the Eurpean literal date parser - 'unknown' - if no date part is found, use defaultdate - - defaultdate provides the defaults to use in case no date part - is found. Most of the parsers default to the current year - January 1 if some of these date parts are missing. - - If 'unknown' is not given in formats and the date cannot - be parsed, a ValueError is raised. - - time_formats may be set to a tuple of strings specifying which - of the following parsers to use and in which order to try - them. Default is to try all of them in the order given below: - - 'standard' - standard time format HH:MM:SS (with ':' delimiter) - 'iso' - ISO time format (superset of 'standard') - 'unknown' - default to 00:00:00 in case the time format - cannot be parsed - - Defaults to 00:00:00.00 for time parts that are not included - in the textual representation. - - If 'unknown' is not given in time_formats and the time cannot - be parsed, a ValueError is raised. - - """ - origtext = text - - text,hour,minute,second,offset,timestyle = _parse_time(origtext) - text,day,month,year,datestyle = _parse_date(text) - - if 0 and _debug: - print 'tried time/date on %s, date=%s, time=%s' % (origtext, - datestyle, - timestyle) - - # If this fails, try the ISO order (date, then time) - if timestyle in ('iso', 'unknown'): - text,day,month,year,datestyle = _parse_date(origtext) - text,hour,minute,second,offset,timestyle = _parse_time(text) - if 0 and _debug: - print 'tried ISO on %s, date=%s, time=%s' % (origtext, - datestyle, - timestyle) - - try: - microsecond = int(round(1000000 * (second % 1))) - second = int(second) - return dt.datetime(year,month,day,hour,minute,second, microsecond) - \ - dt.timedelta(minutes=offset) - except ValueError, why: - raise RangeError,\ - 'Failed to parse "%s": %s' % (origtext, why) - -def date_from_string(text): - - """ date_from_string(text, [formats, defaultdate]) - - Returns a datetime instance reflecting the date given in - text. A possibly included time part is ignored. - - formats and defaultdate work just like for - datetime_from_string(). - - """ - _text,day,month,year,datestyle = _parse_date(text) - - try: - return dt.datetime(year,month,day) - except ValueError, why: - raise RangeError,\ - 'Failed to parse "%s": %s' % (text, why) - -def validateDateTimeString(text): - - """ validateDateTimeString(text, [formats, defaultdate]) - - Validates the given text and returns 1/0 depending on whether - text includes parseable date and time values or not. - - formats works just like for datetime_from_string() and defines - the order of date/time parsers to apply. It defaults to the - same list of parsers as for datetime_from_string(). - - XXX Undocumented ! - - """ - try: - datetime_from_string(text) - except ValueError, why: - return 0 - return 1 - - -def validateDateString(text): - - """ validateDateString(text, [formats, defaultdate]) - - Validates the given text and returns 1/0 depending on whether - text includes a parseable date value or not. - - formats works just like for datetime_from_string() and defines - the order of date/time parsers to apply. It defaults to the - same list of parsers as for datetime_from_string(). - - XXX Undocumented ! - - """ - try: - date_from_string(text) - except ValueError, why: - return 0 - return 1 - -### Tests - -def _test(): - - import sys - - t = dt.datetime.now() - _date = t.strftime('%Y-%m-%d') - - print 'Testing DateTime Parser...' - - l = [ - - # Literal formats - ('Sun Nov 6 08:49:37 1994', '1994-11-06 08:49:37.00'), - ('sun nov 6 08:49:37 1994', '1994-11-06 08:49:37.00'), - ('sUN NOV 6 08:49:37 1994', '1994-11-06 08:49:37.00'), - ('Sunday, 06-Nov-94 08:49:37 GMT', '1994-11-06 08:49:37.00'), - ('Sun, 06 Nov 1994 08:49:37 GMT', '1994-11-06 08:49:37.00'), - ('06-Nov-94 08:49:37', '1994-11-06 08:49:37.00'), - ('06-Nov-94', '1994-11-06 00:00:00.00'), - ('06-NOV-94', '1994-11-06 00:00:00.00'), - ('November 19 08:49:37', '%s-11-19 08:49:37.00' % t.year), - ('Nov. 9', '%s-11-09 00:00:00.00' % t.year), - ('Sonntag, der 6. November 1994, 08:49:37 GMT', '1994-11-06 08:49:37.00'), - ('6. November 2001, 08:49:37', '2001-11-06 08:49:37.00'), - ('sep 6', '%s-09-06 00:00:00.00' % t.year), - ('sep 6 2000', '2000-09-06 00:00:00.00'), - ('September 29', '%s-09-29 00:00:00.00' % t.year), - ('Sep. 29', '%s-09-29 00:00:00.00' % t.year), - ('6 sep', '%s-09-06 00:00:00.00' % t.year), - ('29 September', '%s-09-29 00:00:00.00' % t.year), - ('29 Sep.', '%s-09-29 00:00:00.00' % t.year), - ('sep 6 2001', '2001-09-06 00:00:00.00'), - ('Sep 6, 2001', '2001-09-06 00:00:00.00'), - ('September 6, 2001', '2001-09-06 00:00:00.00'), - ('sep 6 01', '2001-09-06 00:00:00.00'), - ('Sep 6, 01', '2001-09-06 00:00:00.00'), - ('September 6, 01', '2001-09-06 00:00:00.00'), - ('30 Apr 2006 20:19:00', '2006-04-30 20:19:00.00'), - - # ISO formats - ('1994-11-06 08:49:37', '1994-11-06 08:49:37.00'), - ('010203', '2001-02-03 00:00:00.00'), - ('2001-02-03 00:00:00.00', '2001-02-03 00:00:00.00'), - ('2001-02 00:00:00.00', '2001-02-01 00:00:00.00'), - ('2001-02-03', '2001-02-03 00:00:00.00'), - ('2001-02', '2001-02-01 00:00:00.00'), - ('20000824/2300', '2000-08-24 23:00:00.00'), - ('20000824/0102', '2000-08-24 01:02:00.00'), - ('20000824', '2000-08-24 00:00:00.00'), - ('20000824/020301', '2000-08-24 02:03:01.00'), - ('20000824 020301', '2000-08-24 02:03:01.00'), - ('20000824T020301', '2000-08-24 02:03:01.00'), - ('20000824 020301', '2000-08-24 02:03:01.00'), - ('2000-08-24 02:03:01.00', '2000-08-24 02:03:01.00'), - ('T020311', '%s 02:03:11.00' % _date), - ('2003-12-9', '2003-12-09 00:00:00.00'), - ('03-12-9', '2003-12-09 00:00:00.00'), - ('003-12-9', '0003-12-09 00:00:00.00'), - ('0003-12-9', '0003-12-09 00:00:00.00'), - ('2003-1-9', '2003-01-09 00:00:00.00'), - ('03-1-9', '2003-01-09 00:00:00.00'), - ('003-1-9', '0003-01-09 00:00:00.00'), - ('0003-1-9', '0003-01-09 00:00:00.00'), - - # US formats - ('06/11/94 08:49:37', '1994-06-11 08:49:37.00'), - ('11/06/94 08:49:37', '1994-11-06 08:49:37.00'), - ('9/23/2001', '2001-09-23 00:00:00.00'), - ('9-23-2001', '2001-09-23 00:00:00.00'), - ('9/6', '%s-09-06 00:00:00.00' % t.year), - ('09/6', '%s-09-06 00:00:00.00' % t.year), - ('9/06', '%s-09-06 00:00:00.00' % t.year), - ('09/06', '%s-09-06 00:00:00.00' % t.year), - ('9/6/2001', '2001-09-06 00:00:00.00'), - ('09/6/2001', '2001-09-06 00:00:00.00'), - ('9/06/2001', '2001-09-06 00:00:00.00'), - ('09/06/2001', '2001-09-06 00:00:00.00'), - ('9-6-2001', '2001-09-06 00:00:00.00'), - ('09-6-2001', '2001-09-06 00:00:00.00'), - ('9-06-2001', '2001-09-06 00:00:00.00'), - ('09-06-2001', '2001-09-06 00:00:00.00'), - ('2002/05/28 13:10:56.114700 GMT+2', '2002-05-28 13:10:56.114700'), - ('1970/01/01', '1970-01-01 00:00:00.00'), - ('20021025 12:00 PM', '2002-10-25 12:00:00.00'), - ('20021025 12:30 PM', '2002-10-25 12:30:00.00'), - ('20021025 12:00 AM', '2002-10-25 00:00:00.00'), - ('20021025 12:30 AM', '2002-10-25 00:30:00.00'), - ('20021025 1:00 PM', '2002-10-25 13:00:00.00'), - ('20021025 2:00 AM', '2002-10-25 02:00:00.00'), - ('Thursday, February 06, 2003 12:40 PM', '2003-02-06 12:40:00.00'), - ('Mon, 18 Sep 2006 23:03:00', '2006-09-18 23:03:00.00'), - - # European formats - ('6.11.2001, 08:49:37', '2001-11-06 08:49:37.00'), - ('06.11.2001, 08:49:37', '2001-11-06 08:49:37.00'), - ('06.11. 08:49:37', '%s-11-06 08:49:37.00' % t.year), - #('21/12/2002', '2002-12-21 00:00:00.00'), - #('21/08/2002', '2002-08-21 00:00:00.00'), - #('21-08-2002', '2002-08-21 00:00:00.00'), - #('13/01/03', '2003-01-13 00:00:00.00'), - #('13/1/03', '2003-01-13 00:00:00.00'), - #('13/1/3', '2003-01-13 00:00:00.00'), - #('13/01/3', '2003-01-13 00:00:00.00'), - - # Time only formats - ('01:03', '%s 01:03:00.00' % _date), - ('01:03:11', '%s 01:03:11.00' % _date), - ('01:03:11.50', '%s 01:03:11.500000' % _date), - ('01:03:11.50 AM', '%s 01:03:11.500000' % _date), - ('01:03:11.50 PM', '%s 13:03:11.500000' % _date), - ('01:03:11.50 a.m.', '%s 01:03:11.500000' % _date), - ('01:03:11.50 p.m.', '%s 13:03:11.500000' % _date), - - # Invalid formats - ('6..2001, 08:49:37', '%s 08:49:37.00' % _date), - ('9//2001', 'ignore'), - ('06--94 08:49:37', 'ignore'), - ('20-03 00:00:00.00', 'ignore'), - ('9/2001', 'ignore'), - ('9-6', 'ignore'), - ('09-6', 'ignore'), - ('9-06', 'ignore'), - ('09-06', 'ignore'), - ('20000824/23', 'ignore'), - ('November 1994 08:49:37', 'ignore'), - ] - - # Add Unicode versions - try: - unicode - except NameError: - pass - else: - k = [] - for text, result in l: - k.append((unicode(text), result)) - l.extend(k) - - for text, reference in l: - try: - value = datetime_from_string(text) - except: - if reference is None: - continue - else: - value = str(sys.exc_info()[1]) - valid_datetime = validateDateTimeString(text) - valid_date = validateDateString(text) - - if reference[-3:] == '.00': reference = reference[:-3] - - if str(value) != reference and \ - not reference == 'ignore': - print 'Failed to parse "%s"' % text - print ' expected: %s' % (reference or '') - print ' parsed: %s' % value - elif _debug: - print 'Parsed "%s" successfully' % text - if _debug: - if not valid_datetime: - print ' "%s" failed date/time validation' % text - if not valid_date: - print ' "%s" failed date validation' % text - - et = dt.datetime.now() - print 'done. (after %f seconds)' % ((et-t).seconds) - -if __name__ == '__main__': - _test() diff --git a/numpy-1.6.2/numpy/core/arrayprint.py b/numpy-1.6.2/numpy/core/arrayprint.py deleted file mode 100644 index ff6d0ae87b..0000000000 --- a/numpy-1.6.2/numpy/core/arrayprint.py +++ /dev/null @@ -1,556 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ -""" -__all__ = ["array2string", "set_printoptions", "get_printoptions"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - -import sys -import numerictypes as _nt -from umath import maximum, minimum, absolute, not_equal, isnan, isinf -from multiarray import format_longfloat -from fromnumeric import ravel - - -def product(x, y): return x*y - -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization - -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'nan' -_inf_str = 'inf' - -if sys.version_info[0] >= 3: - from functools import reduce - -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int, optional - Number of digits of precision for floating point output (default 8). - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - Whether or not suppress printing of small floating point values - using scientific notation (default False). - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - - See Also - -------- - get_printoptions, set_string_function - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> print np.array([1.123456789]) - [ 1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> print np.arange(10) - [0 1 2 ..., 7 8 9] - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3,infstr='Inf', - ... linewidth=75, nanstr='NaN', precision=8, - ... suppress=False, threshold=1000) - """ - - global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ - _line_width, _float_output_suppress_small, _nan_str, _inf_str - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, set_string_function - - """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str) - return d - -def _leading_trailing(a): - import numeric as _nc - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems),0,-1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: return ' True' - else: return 'False' - - -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix=""): - - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(a) - - try: - format_function = a._format - except AttributeError: - dtypeobj = a.dtype.type - if issubclass(dtypeobj, _nt.bool_): - # make sure True and False line up. - format_function = _boolFormatter - elif issubclass(dtypeobj, _nt.integer): - if issubclass(dtypeobj, _nt.timeinteger): - format_function = str - else: - max_str_len = max(len(str(maximum.reduce(data))), - len(str(minimum.reduce(data)))) - format = '%' + str(max_str_len) + 'd' - format_function = lambda x: _formatInteger(x, format) - elif issubclass(dtypeobj, _nt.floating): - if issubclass(dtypeobj, _nt.longfloat): - format_function = LongFloatFormat(precision) - else: - format_function = FloatFormat(data, precision, suppress_small) - elif issubclass(dtypeobj, _nt.complexfloating): - if issubclass(dtypeobj, _nt.clongfloat): - format_function = LongComplexFormat(precision) - else: - format_function = ComplexFormat(data, precision, suppress_small) - elif issubclass(dtypeobj, _nt.unicode_) or \ - issubclass(dtypeobj, _nt.string_): - format_function = repr - else: - format_function = str - - next_line_prefix = " " # skip over "[" - next_line_prefix += " "*len(prefix) # skip over array( - - lst = _formatArray(a, format_function, len(a.shape), max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - - return lst - -def _convert_arrays(obj): - import numeric as _nc - newtup = [] - for k in obj: - if isinstance(k, _nc.ndarray): - k = k.tolist() - elif isinstance(k, tuple): - k = _convert_arrays(k) - newtup.append(k) - return tuple(newtup) - - -def array2string(a, max_line_width = None, precision = None, - suppress_small = None, separator=' ', prefix="", - style=repr): - """ - Return a string representation of an array. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing - precision (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero. A number is "very small" if it - is smaller than the current printing precision. - separator : str, optional - Inserted between elements. - prefix : str, optional - An array is typically printed as:: - - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function, optional - A function that accepts an ndarray and returns a string. Used only - when the shape of `a` is equal to (). - - Returns - ------- - array_str : str - String representation of the array. - - See Also - -------- - array_str, array_repr, set_printoptions - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> print np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - [ 0., 1., 2., 3.] - - """ - - if a.shape == (): - x = a.item() - try: - lst = a._format(x) - except AttributeError: - if isinstance(x, tuple): - x = _convert_arrays(x) - lst = style(x) - elif reduce(product, a.shape) == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, max_line_width, precision, suppress_small, - separator, prefix) - return lst - -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - if rank == 0: - obj = a.item() - if isinstance(obj, tuple): - obj = _convert_arrays(obj) - return str(obj) - - if summary_insert and 2*edge_items < len(a): - leading_items, trailing_items, summary_insert1 = \ - edge_items, edge_items, summary_insert - else: - leading_items, trailing_items, summary_insert1 = 0, len(a), "" - - if rank == 1: - s = "" - line = next_line_prefix - for i in xrange(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) - - for i in xrange(trailing_items, 1, -1): - word = format_function(a[-i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - word = format_function(a[-1]) - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - s += line + "]\n" - s = '[' + s[len(next_line_prefix):] - else: - s = '[' - sep = separator.rstrip() - for i in xrange(leading_items): - if i > 0: - s += next_line_prefix - s += _formatArray(a[i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - - if summary_insert1: - s += next_line_prefix + summary_insert1 + "\n" - - for i in xrange(trailing_items, 1, -1): - if leading_items or i != trailing_items: - s += next_line_prefix - s += _formatArray(a[-i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - if leading_items or trailing_items > 1: - s += next_line_prefix - s += _formatArray(a[-1], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert).rstrip()+']\n' - return s - -class FloatFormat(object): - def __init__(self, data, precision, suppress_small, sign=False): - self.precision = precision - self.suppress_small = suppress_small - self.sign = sign - self.exp_format = False - self.large_exponent = False - self.max_str_len = 0 - self.fillFormat(data) - - def fillFormat(self, data): - import numeric as _nc - errstate = _nc.seterr(all='ignore') - try: - special = isnan(data) | isinf(data) - non_zero = absolute(data.compress(not_equal(data, 0) & ~special)) - if len(non_zero) == 0: - max_val = 0. - min_val = 0. - else: - max_val = maximum.reduce(non_zero) - min_val = minimum.reduce(non_zero) - if max_val >= 1.e8: - self.exp_format = True - if not self.suppress_small and (min_val < 0.0001 - or max_val/min_val > 1000.): - self.exp_format = True - finally: - _nc.seterr(**errstate) - - if self.exp_format: - self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100 - self.max_str_len = 8 + self.precision - if self.large_exponent: - self.max_str_len += 1 - if self.sign: - format = '%+' - else: - format = '%' - format = format + '%d.%de' % (self.max_str_len, self.precision) - else: - format = '%%.%df' % (self.precision,) - if len(non_zero): - precision = max([_digits(x, self.precision, format) - for x in non_zero]) - else: - precision = 0 - precision = min(self.precision, precision) - self.max_str_len = len(str(int(max_val))) + precision + 2 - if _nc.any(special): - self.max_str_len = max(self.max_str_len, - len(_nan_str), - len(_inf_str)+1) - if self.sign: - format = '%#+' - else: - format = '%#' - format = format + '%d.%df' % (self.max_str_len, precision) - - self.special_fmt = '%%%ds' % (self.max_str_len,) - self.format = format - - def __call__(self, x, strip_zeros=True): - import numeric as _nc - err = _nc.seterr(invalid='ignore') - try: - if isnan(x): - if self.sign: - return self.special_fmt % ('+' + _nan_str,) - else: - return self.special_fmt % (_nan_str,) - elif isinf(x): - if x > 0: - if self.sign: - return self.special_fmt % ('+' + _inf_str,) - else: - return self.special_fmt % (_inf_str,) - else: - return self.special_fmt % ('-' + _inf_str,) - finally: - _nc.seterr(**err) - - s = self.format % x - if self.large_exponent: - # 3-digit exponent - expsign = s[-3] - if expsign == '+' or expsign == '-': - s = s[1:-2] + '0' + s[-2:] - elif self.exp_format: - # 2-digit exponent - if s[-3] == '0': - s = ' ' + s[:-3] + s[-2:] - elif strip_zeros: - z = s.rstrip('0') - s = z + ' '*(len(s)-len(z)) - return s - - -def _digits(x, precision, format): - s = format % x - z = s.rstrip('0') - return precision - len(s) + len(z) - - -_MAXINT = sys.maxint -_MININT = -sys.maxint-1 -def _formatInteger(x, format): - if _MININT < x < _MAXINT: - return format % x - else: - return "%s" % x - -class LongFloatFormat(object): - # XXX Have to add something to determine the width to use a la FloatFormat - # Right now, things won't line up properly - def __init__(self, precision, sign=False): - self.precision = precision - self.sign = sign - - def __call__(self, x): - if isnan(x): - if self.sign: - return '+' + _nan_str - else: - return ' ' + _nan_str - elif isinf(x): - if x > 0: - if self.sign: - return '+' + _inf_str - else: - return ' ' + _inf_str - else: - return '-' + _inf_str - elif x >= 0: - if self.sign: - return '+' + format_longfloat(x, self.precision) - else: - return ' ' + format_longfloat(x, self.precision) - else: - return format_longfloat(x, self.precision) - - -class LongComplexFormat(object): - def __init__(self, precision): - self.real_format = LongFloatFormat(precision) - self.imag_format = LongFloatFormat(precision, sign=True) - - def __call__(self, x): - r = self.real_format(x.real) - i = self.imag_format(x.imag) - return r + i + 'j' - - -class ComplexFormat(object): - def __init__(self, x, precision, suppress_small): - self.real_format = FloatFormat(x.real, precision, suppress_small) - self.imag_format = FloatFormat(x.imag, precision, suppress_small, - sign=True) - - def __call__(self, x): - r = self.real_format(x.real, strip_zeros=False) - i = self.imag_format(x.imag, strip_zeros=False) - if not self.imag_format.exp_format: - z = i.rstrip('0') - i = z + 'j' + ' '*(len(i)-len(z)) - else: - i = i + 'j' - return r + i - -## end diff --git a/numpy-1.6.2/numpy/core/blasdot/_dotblas.c b/numpy-1.6.2/numpy/core/blasdot/_dotblas.c deleted file mode 100644 index 35db1bf1c3..0000000000 --- a/numpy-1.6.2/numpy/core/blasdot/_dotblas.c +++ /dev/null @@ -1,1257 +0,0 @@ -static char module_doc[] = -"This module provides a BLAS optimized\nmatrix multiply, inner product and dot for numpy arrays"; - -#include "Python.h" -#include "npy_config.h" -#include "numpy/ndarrayobject.h" -#ifndef CBLAS_HEADER -#define CBLAS_HEADER "cblas.h" -#endif -#include CBLAS_HEADER - -#include - -#if (PY_VERSION_HEX < 0x02060000) -#define Py_TYPE(o) (((PyObject*)(o))->ob_type) -#define Py_REFCNT(o) (((PyObject*)(o))->ob_refcnt) -#define Py_SIZE(o) (((PyVarObject*)(o))->ob_size) -#endif - -static PyArray_DotFunc *oldFunctions[PyArray_NTYPES]; - -static void -FLOAT_dot(void *a, npy_intp stridea, void *b, npy_intp strideb, void *res, - npy_intp n, void *tmp) -{ - register npy_intp na = stridea / sizeof(float); - register npy_intp nb = strideb / sizeof(float); - - if ((sizeof(float) * na == (size_t)stridea) && - (sizeof(float) * nb == (size_t)strideb) && - (na >= 0) && (nb >= 0)) - *((float *)res) = cblas_sdot((int)n, (float *)a, na, (float *)b, nb); - - else - oldFunctions[PyArray_FLOAT](a, stridea, b, strideb, res, n, tmp); -} - -static void -DOUBLE_dot(void *a, npy_intp stridea, void *b, npy_intp strideb, void *res, - npy_intp n, void *tmp) -{ - register int na = stridea / sizeof(double); - register int nb = strideb / sizeof(double); - - if ((sizeof(double) * na == (size_t)stridea) && - (sizeof(double) * nb == (size_t)strideb) && - (na >= 0) && (nb >= 0)) - *((double *)res) = cblas_ddot((int)n, (double *)a, na, (double *)b, nb); - else - oldFunctions[PyArray_DOUBLE](a, stridea, b, strideb, res, n, tmp); -} - -static void -CFLOAT_dot(void *a, npy_intp stridea, void *b, npy_intp strideb, void *res, - npy_intp n, void *tmp) -{ - - register int na = stridea / sizeof(npy_cfloat); - register int nb = strideb / sizeof(npy_cfloat); - - if ((sizeof(npy_cfloat) * na == (size_t)stridea) && - (sizeof(npy_cfloat) * nb == (size_t)strideb) && - (na >= 0) && (nb >= 0)) - cblas_cdotu_sub((int)n, (float *)a, na, (float *)b, nb, (float *)res); - else - oldFunctions[PyArray_CFLOAT](a, stridea, b, strideb, res, n, tmp); -} - -static void -CDOUBLE_dot(void *a, npy_intp stridea, void *b, npy_intp strideb, void *res, - npy_intp n, void *tmp) -{ - register int na = stridea / sizeof(npy_cdouble); - register int nb = strideb / sizeof(npy_cdouble); - - if ((sizeof(npy_cdouble) * na == (size_t)stridea) && - (sizeof(npy_cdouble) * nb == (size_t)strideb) && - (na >= 0) && (nb >= 0)) - cblas_zdotu_sub((int)n, (double *)a, na, (double *)b, nb, (double *)res); - else - oldFunctions[PyArray_CDOUBLE](a, stridea, b, strideb, res, n, tmp); -} - - -static npy_bool altered=NPY_FALSE; - -/* - * alterdot() changes all dot functions to use blas. - */ -static PyObject * -dotblas_alterdot(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyArray_Descr *descr; - - if (!PyArg_ParseTuple(args, "")) return NULL; - - /* Replace the dot functions to the ones using blas */ - - if (!altered) { - descr = PyArray_DescrFromType(PyArray_FLOAT); - oldFunctions[PyArray_FLOAT] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)FLOAT_dot; - - descr = PyArray_DescrFromType(PyArray_DOUBLE); - oldFunctions[PyArray_DOUBLE] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)DOUBLE_dot; - - descr = PyArray_DescrFromType(PyArray_CFLOAT); - oldFunctions[PyArray_CFLOAT] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)CFLOAT_dot; - - descr = PyArray_DescrFromType(PyArray_CDOUBLE); - oldFunctions[PyArray_CDOUBLE] = descr->f->dotfunc; - descr->f->dotfunc = (PyArray_DotFunc *)CDOUBLE_dot; - - altered = NPY_TRUE; - } - - Py_INCREF(Py_None); - return Py_None; -} - -/* - * restoredot() restores dots to defaults. - */ -static PyObject * -dotblas_restoredot(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyArray_Descr *descr; - - if (!PyArg_ParseTuple(args, "")) return NULL; - - if (altered) { - descr = PyArray_DescrFromType(PyArray_FLOAT); - descr->f->dotfunc = oldFunctions[PyArray_FLOAT]; - oldFunctions[PyArray_FLOAT] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(PyArray_DOUBLE); - descr->f->dotfunc = oldFunctions[PyArray_DOUBLE]; - oldFunctions[PyArray_DOUBLE] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(PyArray_CFLOAT); - descr->f->dotfunc = oldFunctions[PyArray_CFLOAT]; - oldFunctions[PyArray_CFLOAT] = NULL; - Py_XDECREF(descr); - - descr = PyArray_DescrFromType(PyArray_CDOUBLE); - descr->f->dotfunc = oldFunctions[PyArray_CDOUBLE]; - oldFunctions[PyArray_CDOUBLE] = NULL; - Py_XDECREF(descr); - - altered = NPY_FALSE; - } - - Py_INCREF(Py_None); - return Py_None; -} - -typedef enum {_scalar, _column, _row, _matrix} MatrixShape; - -static MatrixShape -_select_matrix_shape(PyArrayObject *array) -{ - switch (array->nd) { - case 0: - return _scalar; - case 1: - if (array->dimensions[0] > 1) - return _column; - return _scalar; - case 2: - if (array->dimensions[0] > 1) { - if (array->dimensions[1] == 1) - return _column; - else - return _matrix; - } - if (array->dimensions[1] == 1) - return _scalar; - return _row; - } - return _matrix; -} - - -/* This also makes sure that the data segment is aligned with - an itemsize address as well by returning one if not true. -*/ -static int -_bad_strides(PyArrayObject *ap) -{ - register int itemsize = PyArray_ITEMSIZE(ap); - register int i, N=PyArray_NDIM(ap); - register npy_intp *strides = PyArray_STRIDES(ap); - - if (((npy_intp)(ap->data) % itemsize) != 0) - return 1; - for (i=0; ind > 2) || (ap2->nd > 2)) { - /* - * This function doesn't handle dimensions greater than 2 - * (or negative striding) -- other - * than to ensure the dot function is altered - */ - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - ret = (PyArrayObject *)PyArray_MatrixProduct2((PyObject *)ap1, - (PyObject *)ap2, - (PyObject *)out); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - if (_bad_strides(ap1)) { - op1 = PyArray_NewCopy(ap1, PyArray_ANYORDER); - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - if (ap1 == NULL) { - goto fail; - } - } - if (_bad_strides(ap2)) { - op2 = PyArray_NewCopy(ap2, PyArray_ANYORDER); - Py_DECREF(ap2); - ap2 = (PyArrayObject *)op2; - if (ap2 == NULL) { - goto fail; - } - } - ap1shape = _select_matrix_shape(ap1); - ap2shape = _select_matrix_shape(ap2); - - if (ap1shape == _scalar || ap2shape == _scalar) { - PyArrayObject *oap1, *oap2; - oap1 = ap1; oap2 = ap2; - /* One of ap1 or ap2 is a scalar */ - if (ap1shape == _scalar) { /* Make ap2 the scalar */ - PyArrayObject *t = ap1; - ap1 = ap2; - ap2 = t; - ap1shape = ap2shape; - ap2shape = _scalar; - } - - if (ap1shape == _row) { - ap1stride = ap1->strides[1]; - } - else if (ap1->nd > 0) { - ap1stride = ap1->strides[0]; - } - - if (ap1->nd == 0 || ap2->nd == 0) { - npy_intp *thisdims; - if (ap1->nd == 0) { - nd = ap2->nd; - thisdims = ap2->dimensions; - } - else { - nd = ap1->nd; - thisdims = ap1->dimensions; - } - l = 1; - for (j = 0; j < nd; j++) { - dimensions[j] = thisdims[j]; - l *= dimensions[j]; - } - } - else { - l = oap1->dimensions[oap1->nd - 1]; - - if (oap2->dimensions[0] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); - goto fail; - } - nd = ap1->nd + ap2->nd - 2; - /* - * nd = 0 or 1 or 2. If nd == 0 do nothing ... - */ - if (nd == 1) { - /* - * Either ap1->nd is 1 dim or ap2->nd is 1 dim - * and the other is 2-dim - */ - dimensions[0] = (oap1->nd == 2) ? oap1->dimensions[0] : oap2->dimensions[1]; - l = dimensions[0]; - /* - * Fix it so that dot(shape=(N,1), shape=(1,)) - * and dot(shape=(1,), shape=(1,N)) both return - * an (N,) array (but use the fast scalar code) - */ - } - else if (nd == 2) { - dimensions[0] = oap1->dimensions[0]; - dimensions[1] = oap2->dimensions[1]; - /* - * We need to make sure that dot(shape=(1,1), shape=(1,N)) - * and dot(shape=(N,1),shape=(1,1)) uses - * scalar multiplication appropriately - */ - if (ap1shape == _row) { - l = dimensions[1]; - } - else { - l = dimensions[0]; - } - } - - /* Check if the summation dimension is 0-sized */ - if (oap1->dimensions[oap1->nd - 1] == 0) { - l = 0; - } - } - } - else { - /* - * (ap1->nd <= 2 && ap2->nd <= 2) - * Both ap1 and ap2 are vectors or matrices - */ - l = ap1->dimensions[ap1->nd - 1]; - - if (ap2->dimensions[0] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); - goto fail; - } - nd = ap1->nd + ap2->nd - 2; - - if (nd == 1) - dimensions[0] = (ap1->nd == 2) ? ap1->dimensions[0] : ap2->dimensions[1]; - else if (nd == 2) { - dimensions[0] = ap1->dimensions[0]; - dimensions[1] = ap2->dimensions[1]; - } - } - - /* Choose which subtype to return */ - if (Py_TYPE(ap1) != Py_TYPE(ap2)) { - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); - } - else { - prior1 = prior2 = 0.0; - subtype = Py_TYPE(ap1); - } - - if (out) { - int d; - /* verify that out is usable */ - if (Py_TYPE(out) != subtype || - PyArray_NDIM(out) != nd || - PyArray_TYPE(out) != typenum || - !PyArray_ISCARRAY(out)) { - - PyErr_SetString(PyExc_ValueError, - "output array is not acceptable " - "(must have the right type, nr dimensions, and be a C-Array)"); - goto fail; - } - for (d = 0; d < nd; ++d) { - if (dimensions[d] != PyArray_DIM(out, d)) { - PyErr_SetString(PyExc_ValueError, - "output array has wrong dimensions"); - goto fail; - } - } - Py_INCREF(out); - ret = out; - } else { - ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); - } - - if (ret == NULL) { - goto fail; - } - numbytes = PyArray_NBYTES(ret); - memset(ret->data, 0, numbytes); - if (numbytes==0 || l == 0) { - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - if (ap2shape == _scalar) { - /* - * Multiplication by a scalar -- Level 1 BLAS - * if ap1shape is a matrix and we are not contiguous, then we can't - * just blast through the entire array using a single striding factor - */ - NPY_BEGIN_ALLOW_THREADS; - - if (typenum == PyArray_DOUBLE) { - if (l == 1) { - *((double *)ret->data) = *((double *)ap2->data) * - *((double *)ap1->data); - } - else if (ap1shape != _matrix) { - cblas_daxpy(l, *((double *)ap2->data), (double *)ap1->data, - ap1stride/sizeof(double), (double *)ret->data, 1); - } - else { - int maxind, oind, i, a1s, rets; - char *ptr, *rptr; - double val; - - maxind = (ap1->dimensions[0] >= ap1->dimensions[1] ? 0 : 1); - oind = 1-maxind; - ptr = ap1->data; - rptr = ret->data; - l = ap1->dimensions[maxind]; - val = *((double *)ap2->data); - a1s = ap1->strides[maxind] / sizeof(double); - rets = ret->strides[maxind] / sizeof(double); - for (i = 0; i < ap1->dimensions[oind]; i++) { - cblas_daxpy(l, val, (double *)ptr, a1s, - (double *)rptr, rets); - ptr += ap1->strides[oind]; - rptr += ret->strides[oind]; - } - } - } - else if (typenum == PyArray_CDOUBLE) { - if (l == 1) { - npy_cdouble *ptr1, *ptr2, *res; - - ptr1 = (npy_cdouble *)ap2->data; - ptr2 = (npy_cdouble *)ap1->data; - res = (npy_cdouble *)ret->data; - res->real = ptr1->real * ptr2->real - ptr1->imag * ptr2->imag; - res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real; - } - else if (ap1shape != _matrix) { - cblas_zaxpy(l, (double *)ap2->data, (double *)ap1->data, - ap1stride/sizeof(npy_cdouble), (double *)ret->data, 1); - } - else { - int maxind, oind, i, a1s, rets; - char *ptr, *rptr; - double *pval; - - maxind = (ap1->dimensions[0] >= ap1->dimensions[1] ? 0 : 1); - oind = 1-maxind; - ptr = ap1->data; - rptr = ret->data; - l = ap1->dimensions[maxind]; - pval = (double *)ap2->data; - a1s = ap1->strides[maxind] / sizeof(npy_cdouble); - rets = ret->strides[maxind] / sizeof(npy_cdouble); - for (i = 0; i < ap1->dimensions[oind]; i++) { - cblas_zaxpy(l, pval, (double *)ptr, a1s, - (double *)rptr, rets); - ptr += ap1->strides[oind]; - rptr += ret->strides[oind]; - } - } - } - else if (typenum == PyArray_FLOAT) { - if (l == 1) { - *((float *)ret->data) = *((float *)ap2->data) * - *((float *)ap1->data); - } - else if (ap1shape != _matrix) { - cblas_saxpy(l, *((float *)ap2->data), (float *)ap1->data, - ap1stride/sizeof(float), (float *)ret->data, 1); - } - else { - int maxind, oind, i, a1s, rets; - char *ptr, *rptr; - float val; - - maxind = (ap1->dimensions[0] >= ap1->dimensions[1] ? 0 : 1); - oind = 1-maxind; - ptr = ap1->data; - rptr = ret->data; - l = ap1->dimensions[maxind]; - val = *((float *)ap2->data); - a1s = ap1->strides[maxind] / sizeof(float); - rets = ret->strides[maxind] / sizeof(float); - for (i = 0; i < ap1->dimensions[oind]; i++) { - cblas_saxpy(l, val, (float *)ptr, a1s, - (float *)rptr, rets); - ptr += ap1->strides[oind]; - rptr += ret->strides[oind]; - } - } - } - else if (typenum == PyArray_CFLOAT) { - if (l == 1) { - npy_cfloat *ptr1, *ptr2, *res; - - ptr1 = (npy_cfloat *)ap2->data; - ptr2 = (npy_cfloat *)ap1->data; - res = (npy_cfloat *)ret->data; - res->real = ptr1->real * ptr2->real - ptr1->imag * ptr2->imag; - res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real; - } - else if (ap1shape != _matrix) { - cblas_caxpy(l, (float *)ap2->data, (float *)ap1->data, - ap1stride/sizeof(npy_cfloat), (float *)ret->data, 1); - } - else { - int maxind, oind, i, a1s, rets; - char *ptr, *rptr; - float *pval; - - maxind = (ap1->dimensions[0] >= ap1->dimensions[1] ? 0 : 1); - oind = 1-maxind; - ptr = ap1->data; - rptr = ret->data; - l = ap1->dimensions[maxind]; - pval = (float *)ap2->data; - a1s = ap1->strides[maxind] / sizeof(npy_cfloat); - rets = ret->strides[maxind] / sizeof(npy_cfloat); - for (i = 0; i < ap1->dimensions[oind]; i++) { - cblas_caxpy(l, pval, (float *)ptr, a1s, - (float *)rptr, rets); - ptr += ap1->strides[oind]; - rptr += ret->strides[oind]; - } - } - } - NPY_END_ALLOW_THREADS; - } - else if ((ap2shape == _column) && (ap1shape != _matrix)) { - int ap1s, ap2s; - NPY_BEGIN_ALLOW_THREADS; - - ap2s = ap2->strides[0] / ap2->descr->elsize; - if (ap1shape == _row) { - ap1s = ap1->strides[1] / ap1->descr->elsize; - } - else { - ap1s = ap1->strides[0] / ap1->descr->elsize; - } - - /* Dot product between two vectors -- Level 1 BLAS */ - if (typenum == PyArray_DOUBLE) { - double result = cblas_ddot(l, (double *)ap1->data, ap1s, - (double *)ap2->data, ap2s); - *((double *)ret->data) = result; - } - else if (typenum == PyArray_FLOAT) { - float result = cblas_sdot(l, (float *)ap1->data, ap1s, - (float *)ap2->data, ap2s); - *((float *)ret->data) = result; - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zdotu_sub(l, (double *)ap1->data, ap1s, - (double *)ap2->data, ap2s, (double *)ret->data); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cdotu_sub(l, (float *)ap1->data, ap1s, - (float *)ap2->data, ap2s, (float *)ret->data); - } - NPY_END_ALLOW_THREADS; - } - else if (ap1shape == _matrix && ap2shape != _matrix) { - /* Matrix vector multiplication -- Level 2 BLAS */ - /* lda must be MAX(M,1) */ - enum CBLAS_ORDER Order; - int ap2s; - - if (!PyArray_ISONESEGMENT(ap1)) { - PyObject *new; - new = PyArray_Copy(ap1); - Py_DECREF(ap1); - ap1 = (PyArrayObject *)new; - if (new == NULL) { - goto fail; - } - } - NPY_BEGIN_ALLOW_THREADS - if (PyArray_ISCONTIGUOUS(ap1)) { - Order = CblasRowMajor; - lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1); - } - else { - Order = CblasColMajor; - lda = (ap1->dimensions[0] > 1 ? ap1->dimensions[0] : 1); - } - ap2s = ap2->strides[0] / ap2->descr->elsize; - if (typenum == PyArray_DOUBLE) { - cblas_dgemv(Order, CblasNoTrans, - ap1->dimensions[0], ap1->dimensions[1], - 1.0, (double *)ap1->data, lda, - (double *)ap2->data, ap2s, 0.0, (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemv(Order, CblasNoTrans, - ap1->dimensions[0], ap1->dimensions[1], - 1.0, (float *)ap1->data, lda, - (float *)ap2->data, ap2s, 0.0, (float *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemv(Order, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - oneD, (double *)ap1->data, lda, - (double *)ap2->data, ap2s, zeroD, - (double *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemv(Order, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - oneF, (float *)ap1->data, lda, - (float *)ap2->data, ap2s, zeroF, - (float *)ret->data, 1); - } - NPY_END_ALLOW_THREADS; - } - else if (ap1shape != _matrix && ap2shape == _matrix) { - /* Vector matrix multiplication -- Level 2 BLAS */ - enum CBLAS_ORDER Order; - int ap1s; - - if (!PyArray_ISONESEGMENT(ap2)) { - PyObject *new; - new = PyArray_Copy(ap2); - Py_DECREF(ap2); - ap2 = (PyArrayObject *)new; - if (new == NULL) { - goto fail; - } - } - NPY_BEGIN_ALLOW_THREADS - if (PyArray_ISCONTIGUOUS(ap2)) { - Order = CblasRowMajor; - lda = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1); - } - else { - Order = CblasColMajor; - lda = (ap2->dimensions[0] > 1 ? ap2->dimensions[0] : 1); - } - if (ap1shape == _row) { - ap1s = ap1->strides[1] / ap1->descr->elsize; - } - else { - ap1s = ap1->strides[0] / ap1->descr->elsize; - } - if (typenum == PyArray_DOUBLE) { - cblas_dgemv(Order, - CblasTrans, ap2->dimensions[0], ap2->dimensions[1], - 1.0, (double *)ap2->data, lda, - (double *)ap1->data, ap1s, 0.0, (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemv(Order, - CblasTrans, ap2->dimensions[0], ap2->dimensions[1], - 1.0, (float *)ap2->data, lda, - (float *)ap1->data, ap1s, 0.0, (float *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemv(Order, - CblasTrans, ap2->dimensions[0], ap2->dimensions[1], - oneD, (double *)ap2->data, lda, - (double *)ap1->data, ap1s, zeroD, (double *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemv(Order, - CblasTrans, ap2->dimensions[0], ap2->dimensions[1], - oneF, (float *)ap2->data, lda, - (float *)ap1->data, ap1s, zeroF, (float *)ret->data, 1); - } - NPY_END_ALLOW_THREADS; - } - else { - /* - * (ap1->nd == 2 && ap2->nd == 2) - * Matrix matrix multiplication -- Level 3 BLAS - * L x M multiplied by M x N - */ - enum CBLAS_ORDER Order; - enum CBLAS_TRANSPOSE Trans1, Trans2; - int M, N, L; - - /* Optimization possible: */ - /* - * We may be able to handle single-segment arrays here - * using appropriate values of Order, Trans1, and Trans2. - */ - - if (!PyArray_ISCONTIGUOUS(ap2)) { - PyObject *new = PyArray_Copy(ap2); - - Py_DECREF(ap2); - ap2 = (PyArrayObject *)new; - if (new == NULL) { - goto fail; - } - } - if (!PyArray_ISCONTIGUOUS(ap1)) { - PyObject *new = PyArray_Copy(ap1); - - Py_DECREF(ap1); - ap1 = (PyArrayObject *)new; - if (new == NULL) { - goto fail; - } - } - - NPY_BEGIN_ALLOW_THREADS; - - Order = CblasRowMajor; - Trans1 = CblasNoTrans; - Trans2 = CblasNoTrans; - L = ap1->dimensions[0]; - N = ap2->dimensions[1]; - M = ap2->dimensions[0]; - lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1); - ldb = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1); - ldc = (ret->dimensions[1] > 1 ? ret->dimensions[1] : 1); - if (typenum == PyArray_DOUBLE) { - cblas_dgemm(Order, Trans1, Trans2, - L, N, M, - 1.0, (double *)ap1->data, lda, - (double *)ap2->data, ldb, - 0.0, (double *)ret->data, ldc); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemm(Order, Trans1, Trans2, - L, N, M, - 1.0, (float *)ap1->data, lda, - (float *)ap2->data, ldb, - 0.0, (float *)ret->data, ldc); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemm(Order, Trans1, Trans2, - L, N, M, - oneD, (double *)ap1->data, lda, - (double *)ap2->data, ldb, - zeroD, (double *)ret->data, ldc); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemm(Order, Trans1, Trans2, - L, N, M, - oneF, (float *)ap1->data, lda, - (float *)ap2->data, ldb, - zeroF, (float *)ret->data, ldc); - } - NPY_END_ALLOW_THREADS; - } - - - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -/* - * innerproduct(a,b) - * - * Returns the inner product of a and b for arrays of - * floating point types. Like the generic NumPy equivalent the product - * sum is over the last dimension of a and b. - * NB: The first argument is not conjugated. - */ - -static PyObject * -dotblas_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *ap1, *ap2, *ret; - int j, l, lda, ldb, ldc; - int typenum, nd; - npy_intp dimensions[NPY_MAXDIMS]; - static const float oneF[2] = {1.0, 0.0}; - static const float zeroF[2] = {0.0, 0.0}; - static const double oneD[2] = {1.0, 0.0}; - static const double zeroD[2] = {0.0, 0.0}; - PyTypeObject *subtype; - double prior1, prior2; - - if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL; - - /* - * Inner product using the BLAS. The product sum is taken along the last - * dimensions of the two arrays. - * Only speeds things up for float double and complex types. - */ - - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - /* This function doesn't handle other types */ - if ((typenum != PyArray_DOUBLE && typenum != PyArray_CDOUBLE && - typenum != PyArray_FLOAT && typenum != PyArray_CFLOAT)) { - return PyArray_Return((PyArrayObject *)PyArray_InnerProduct(op1, op2)); - } - - ret = NULL; - ap1 = (PyArrayObject *)PyArray_ContiguousFromObject(op1, typenum, 0, 0); - if (ap1 == NULL) return NULL; - ap2 = (PyArrayObject *)PyArray_ContiguousFromObject(op2, typenum, 0, 0); - if (ap2 == NULL) goto fail; - - if ((ap1->nd > 2) || (ap2->nd > 2)) { - /* This function doesn't handle dimensions greater than 2 -- other - than to ensure the dot function is altered - */ - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - ret = (PyArrayObject *)PyArray_InnerProduct((PyObject *)ap1, - (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - if (ap1->nd == 0 || ap2->nd == 0) { - /* One of ap1 or ap2 is a scalar */ - if (ap1->nd == 0) { /* Make ap2 the scalar */ - PyArrayObject *t = ap1; - ap1 = ap2; - ap2 = t; - } - for (l = 1, j = 0; j < ap1->nd; j++) { - dimensions[j] = ap1->dimensions[j]; - l *= dimensions[j]; - } - nd = ap1->nd; - } - else { /* (ap1->nd <= 2 && ap2->nd <= 2) */ - /* Both ap1 and ap2 are vectors or matrices */ - l = ap1->dimensions[ap1->nd-1]; - - if (ap2->dimensions[ap2->nd-1] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); - goto fail; - } - nd = ap1->nd+ap2->nd-2; - - if (nd == 1) - dimensions[0] = (ap1->nd == 2) ? ap1->dimensions[0] : ap2->dimensions[0]; - else if (nd == 2) { - dimensions[0] = ap1->dimensions[0]; - dimensions[1] = ap2->dimensions[0]; - } - } - - /* Choose which subtype to return */ - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); - - ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *)\ - (prior2 > prior1 ? ap2 : ap1)); - - if (ret == NULL) goto fail; - NPY_BEGIN_ALLOW_THREADS - memset(ret->data, 0, PyArray_NBYTES(ret)); - - if (ap2->nd == 0) { - /* Multiplication by a scalar -- Level 1 BLAS */ - if (typenum == PyArray_DOUBLE) { - cblas_daxpy(l, *((double *)ap2->data), (double *)ap1->data, 1, - (double *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zaxpy(l, (double *)ap2->data, (double *)ap1->data, 1, - (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_saxpy(l, *((float *)ap2->data), (float *)ap1->data, 1, - (float *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_caxpy(l, (float *)ap2->data, (float *)ap1->data, 1, - (float *)ret->data, 1); - } - } - else if (ap1->nd == 1 && ap2->nd == 1) { - /* Dot product between two vectors -- Level 1 BLAS */ - if (typenum == PyArray_DOUBLE) { - double result = cblas_ddot(l, (double *)ap1->data, 1, - (double *)ap2->data, 1); - *((double *)ret->data) = result; - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zdotu_sub(l, (double *)ap1->data, 1, - (double *)ap2->data, 1, (double *)ret->data); - } - else if (typenum == PyArray_FLOAT) { - float result = cblas_sdot(l, (float *)ap1->data, 1, - (float *)ap2->data, 1); - *((float *)ret->data) = result; - } - else if (typenum == PyArray_CFLOAT) { - cblas_cdotu_sub(l, (float *)ap1->data, 1, - (float *)ap2->data, 1, (float *)ret->data); - } - } - else if (ap1->nd == 2 && ap2->nd == 1) { - /* Matrix-vector multiplication -- Level 2 BLAS */ - lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1); - if (typenum == PyArray_DOUBLE) { - cblas_dgemv(CblasRowMajor, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - 1.0, (double *)ap1->data, lda, - (double *)ap2->data, 1, 0.0, (double *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemv(CblasRowMajor, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - oneD, (double *)ap1->data, lda, - (double *)ap2->data, 1, zeroD, (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemv(CblasRowMajor, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - 1.0, (float *)ap1->data, lda, - (float *)ap2->data, 1, 0.0, (float *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemv(CblasRowMajor, - CblasNoTrans, ap1->dimensions[0], ap1->dimensions[1], - oneF, (float *)ap1->data, lda, - (float *)ap2->data, 1, zeroF, (float *)ret->data, 1); - } - } - else if (ap1->nd == 1 && ap2->nd == 2) { - /* Vector matrix multiplication -- Level 2 BLAS */ - lda = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1); - if (typenum == PyArray_DOUBLE) { - cblas_dgemv(CblasRowMajor, - CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1], - 1.0, (double *)ap2->data, lda, - (double *)ap1->data, 1, 0.0, (double *)ret->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemv(CblasRowMajor, - CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1], - oneD, (double *)ap2->data, lda, - (double *)ap1->data, 1, zeroD, (double *)ret->data, 1); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemv(CblasRowMajor, - CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1], - 1.0, (float *)ap2->data, lda, - (float *)ap1->data, 1, 0.0, (float *)ret->data, 1); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemv(CblasRowMajor, - CblasNoTrans, ap2->dimensions[0], ap2->dimensions[1], - oneF, (float *)ap2->data, lda, - (float *)ap1->data, 1, zeroF, (float *)ret->data, 1); - } - } - else { /* (ap1->nd == 2 && ap2->nd == 2) */ - /* Matrix matrix multiplication -- Level 3 BLAS */ - lda = (ap1->dimensions[1] > 1 ? ap1->dimensions[1] : 1); - ldb = (ap2->dimensions[1] > 1 ? ap2->dimensions[1] : 1); - ldc = (ret->dimensions[1] > 1 ? ret->dimensions[1] : 1); - if (typenum == PyArray_DOUBLE) { - cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1], - 1.0, (double *)ap1->data, lda, - (double *)ap2->data, ldb, - 0.0, (double *)ret->data, ldc); - } - else if (typenum == PyArray_FLOAT) { - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1], - 1.0, (float *)ap1->data, lda, - (float *)ap2->data, ldb, - 0.0, (float *)ret->data, ldc); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1], - oneD, (double *)ap1->data, lda, - (double *)ap2->data, ldb, - zeroD, (double *)ret->data, ldc); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ap1->dimensions[0], ap2->dimensions[0], ap1->dimensions[1], - oneF, (float *)ap1->data, lda, - (float *)ap2->data, ldb, - zeroF, (float *)ret->data, ldc); - } - } - NPY_END_ALLOW_THREADS - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -/* - * vdot(a,b) - * - * Returns the dot product of a and b for scalars and vectors of - * floating point and complex types. The first argument, a, is conjugated. - */ -static PyObject *dotblas_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) { - PyObject *op1, *op2; - PyArrayObject *ap1=NULL, *ap2=NULL, *ret=NULL; - int l; - int typenum; - npy_intp dimensions[NPY_MAXDIMS]; - PyArray_Descr *type; - - if (!PyArg_ParseTuple(args, "OO", &op1, &op2)) return NULL; - - /* - * Conjugating dot product using the BLAS for vectors. - * Multiplies op1 and op2, each of which must be vector. - */ - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - type = PyArray_DescrFromType(typenum); - Py_INCREF(type); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, type, 0, 0, 0, NULL); - if (ap1==NULL) {Py_DECREF(type); goto fail;} - op1 = PyArray_Flatten(ap1, 0); - if (op1==NULL) {Py_DECREF(type); goto fail;} - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - - ap2 = (PyArrayObject *)PyArray_FromAny(op2, type, 0, 0, 0, NULL); - if (ap2==NULL) goto fail; - op2 = PyArray_Flatten(ap2, 0); - if (op2 == NULL) goto fail; - Py_DECREF(ap2); - ap2 = (PyArrayObject *)op2; - - if (typenum != PyArray_FLOAT && typenum != PyArray_DOUBLE && - typenum != PyArray_CFLOAT && typenum != PyArray_CDOUBLE) { - if (!altered) { - /* need to alter dot product */ - PyObject *tmp1, *tmp2; - tmp1 = PyTuple_New(0); - tmp2 = dotblas_alterdot(NULL, tmp1); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - } - if (PyTypeNum_ISCOMPLEX(typenum)) { - op1 = PyArray_Conjugate(ap1, NULL); - if (op1==NULL) goto fail; - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - } - ret = (PyArrayObject *)PyArray_InnerProduct((PyObject *)ap1, - (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - } - - if (ap2->dimensions[0] != ap1->dimensions[ap1->nd-1]) { - PyErr_SetString(PyExc_ValueError, "vectors have different lengths"); - goto fail; - } - l = ap1->dimensions[ap1->nd-1]; - - ret = (PyArrayObject *)PyArray_SimpleNew(0, dimensions, typenum); - if (ret == NULL) goto fail; - - NPY_BEGIN_ALLOW_THREADS - - /* Dot product between two vectors -- Level 1 BLAS */ - if (typenum == PyArray_DOUBLE) { - *((double *)ret->data) = cblas_ddot(l, (double *)ap1->data, 1, - (double *)ap2->data, 1); - } - else if (typenum == PyArray_FLOAT) { - *((float *)ret->data) = cblas_sdot(l, (float *)ap1->data, 1, - (float *)ap2->data, 1); - } - else if (typenum == PyArray_CDOUBLE) { - cblas_zdotc_sub(l, (double *)ap1->data, 1, - (double *)ap2->data, 1, (double *)ret->data); - } - else if (typenum == PyArray_CFLOAT) { - cblas_cdotc_sub(l, (float *)ap1->data, 1, - (float *)ap2->data, 1, (float *)ret->data); - } - - NPY_END_ALLOW_THREADS - - Py_DECREF(ap1); - Py_DECREF(ap2); - return PyArray_Return(ret); - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - -static struct PyMethodDef dotblas_module_methods[] = { - {"dot", (PyCFunction)dotblas_matrixproduct, METH_VARARGS|METH_KEYWORDS, NULL}, - {"inner", (PyCFunction)dotblas_innerproduct, 1, NULL}, - {"vdot", (PyCFunction)dotblas_vdot, 1, NULL}, - {"alterdot", (PyCFunction)dotblas_alterdot, 1, NULL}, - {"restoredot", (PyCFunction)dotblas_restoredot, 1, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_dotblas", - NULL, - -1, - dotblas_module_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -/* Initialization function for the module */ -#if defined(NPY_PY3K) -#define RETVAL m -PyObject *PyInit__dotblas(void) -#else -#define RETVAL -PyMODINIT_FUNC init_dotblas(void) -#endif -{ - int i; - PyObject *d, *s, *m; - - /* Create the module and add the functions */ -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule3("_dotblas", dotblas_module_methods, module_doc); -#endif - - /* Import the array object */ - import_array(); - - /* Initialise the array of dot functions */ - for (i = 0; i < PyArray_NTYPES; i++) - oldFunctions[i] = NULL; - - /* alterdot at load */ - d = PyTuple_New(0); - s = dotblas_alterdot(NULL, d); - Py_DECREF(d); - Py_DECREF(s); - - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/core/blasdot/cblas.h b/numpy-1.6.2/numpy/core/blasdot/cblas.h deleted file mode 100644 index 25de09edfe..0000000000 --- a/numpy-1.6.2/numpy/core/blasdot/cblas.h +++ /dev/null @@ -1,578 +0,0 @@ -#ifndef CBLAS_H -#define CBLAS_H -#include - -/* Allow the use in C++ code. */ -#ifdef __cplusplus -extern "C" -{ -#endif - -/* - * Enumerated and derived types - */ -#define CBLAS_INDEX size_t /* this may vary between platforms */ - -enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102}; -enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113}; -enum CBLAS_UPLO {CblasUpper=121, CblasLower=122}; -enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132}; -enum CBLAS_SIDE {CblasLeft=141, CblasRight=142}; - -/* - * =========================================================================== - * Prototypes for level 1 BLAS functions (complex are recast as routines) - * =========================================================================== - */ -float cblas_sdsdot(const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY); -double cblas_dsdot(const int N, const float *X, const int incX, const float *Y, - const int incY); -float cblas_sdot(const int N, const float *X, const int incX, - const float *Y, const int incY); -double cblas_ddot(const int N, const double *X, const int incX, - const double *Y, const int incY); - -/* - * Functions having prefixes Z and C only - */ -void cblas_cdotu_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotu); -void cblas_cdotc_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotc); - -void cblas_zdotu_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotu); -void cblas_zdotc_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotc); - - -/* - * Functions having prefixes S D SC DZ - */ -float cblas_snrm2(const int N, const float *X, const int incX); -float cblas_sasum(const int N, const float *X, const int incX); - -double cblas_dnrm2(const int N, const double *X, const int incX); -double cblas_dasum(const int N, const double *X, const int incX); - -float cblas_scnrm2(const int N, const void *X, const int incX); -float cblas_scasum(const int N, const void *X, const int incX); - -double cblas_dznrm2(const int N, const void *X, const int incX); -double cblas_dzasum(const int N, const void *X, const int incX); - - -/* - * Functions having standard 4 prefixes (S D C Z) - */ -CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX); -CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX); -CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX); -CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX); - -/* - * =========================================================================== - * Prototypes for level 1 BLAS routines - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (s, d, c, z) - */ -void cblas_sswap(const int N, float *X, const int incX, - float *Y, const int incY); -void cblas_scopy(const int N, const float *X, const int incX, - float *Y, const int incY); -void cblas_saxpy(const int N, const float alpha, const float *X, - const int incX, float *Y, const int incY); - -void cblas_dswap(const int N, double *X, const int incX, - double *Y, const int incY); -void cblas_dcopy(const int N, const double *X, const int incX, - double *Y, const int incY); -void cblas_daxpy(const int N, const double alpha, const double *X, - const int incX, double *Y, const int incY); - -void cblas_cswap(const int N, void *X, const int incX, - void *Y, const int incY); -void cblas_ccopy(const int N, const void *X, const int incX, - void *Y, const int incY); -void cblas_caxpy(const int N, const void *alpha, const void *X, - const int incX, void *Y, const int incY); - -void cblas_zswap(const int N, void *X, const int incX, - void *Y, const int incY); -void cblas_zcopy(const int N, const void *X, const int incX, - void *Y, const int incY); -void cblas_zaxpy(const int N, const void *alpha, const void *X, - const int incX, void *Y, const int incY); - - -/* - * Routines with S and D prefix only - */ -void cblas_srotg(float *a, float *b, float *c, float *s); -void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P); -void cblas_srot(const int N, float *X, const int incX, - float *Y, const int incY, const float c, const float s); -void cblas_srotm(const int N, float *X, const int incX, - float *Y, const int incY, const float *P); - -void cblas_drotg(double *a, double *b, double *c, double *s); -void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P); -void cblas_drot(const int N, double *X, const int incX, - double *Y, const int incY, const double c, const double s); -void cblas_drotm(const int N, double *X, const int incX, - double *Y, const int incY, const double *P); - - -/* - * Routines with S D C Z CS and ZD prefixes - */ -void cblas_sscal(const int N, const float alpha, float *X, const int incX); -void cblas_dscal(const int N, const double alpha, double *X, const int incX); -void cblas_cscal(const int N, const void *alpha, void *X, const int incX); -void cblas_zscal(const int N, const void *alpha, void *X, const int incX); -void cblas_csscal(const int N, const float alpha, void *X, const int incX); -void cblas_zdscal(const int N, const double alpha, void *X, const int incX); - -/* - * =========================================================================== - * Prototypes for level 2 BLAS - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (S, D, C, Z) - */ -void cblas_sgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const float alpha, const float *A, const int lda, - const float *X, const int incX, const float beta, - float *Y, const int incY); -void cblas_sgbmv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const float alpha, - const float *A, const int lda, const float *X, - const int incX, const float beta, float *Y, const int incY); -void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *A, const int lda, - float *X, const int incX); -void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const float *A, const int lda, - float *X, const int incX); -void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *Ap, float *X, const int incX); -void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *A, const int lda, float *X, - const int incX); -void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const float *A, const int lda, - float *X, const int incX); -void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *Ap, float *X, const int incX); - -void cblas_dgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const double alpha, const double *A, const int lda, - const double *X, const int incX, const double beta, - double *Y, const int incY); -void cblas_dgbmv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const double alpha, - const double *A, const int lda, const double *X, - const int incX, const double beta, double *Y, const int incY); -void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *A, const int lda, - double *X, const int incX); -void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const double *A, const int lda, - double *X, const int incX); -void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *Ap, double *X, const int incX); -void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *A, const int lda, double *X, - const int incX); -void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const double *A, const int lda, - double *X, const int incX); -void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *Ap, double *X, const int incX); - -void cblas_cgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *X, const int incX, const void *beta, - void *Y, const int incY); -void cblas_cgbmv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const void *alpha, - const void *A, const int lda, const void *X, - const int incX, const void *beta, void *Y, const int incY); -void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, - void *X, const int incX); -void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); -void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, void *X, - const int incX); -void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); - -void cblas_zgemv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *X, const int incX, const void *beta, - void *Y, const int incY); -void cblas_zgbmv(const enum CBLAS_ORDER order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const void *alpha, - const void *A, const int lda, const void *X, - const int incX, const void *beta, void *Y, const int incY); -void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, - void *X, const int incX); -void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); -void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, void *X, - const int incX); -void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); - - -/* - * Routines with S and D prefixes only - */ -void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *A, - const int lda, const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const float alpha, const float *A, - const int lda, const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *Ap, - const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N, - const float alpha, const float *X, const int incX, - const float *Y, const int incY, float *A, const int lda); -void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, float *A, const int lda); -void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, float *Ap); -void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY, float *A, - const int lda); -void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY, float *A); - -void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *A, - const int lda, const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const double alpha, const double *A, - const int lda, const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *Ap, - const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N, - const double alpha, const double *X, const int incX, - const double *Y, const int incY, double *A, const int lda); -void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, double *A, const int lda); -void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, double *Ap); -void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, const double *Y, const int incY, double *A, - const int lda); -void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, const double *Y, const int incY, double *A); - - -/* - * Routines with C and Z prefixes only - */ -void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *Ap, - const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const void *X, const int incX, - void *A, const int lda); -void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const void *X, - const int incX, void *A); -void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *Ap); - -void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *Ap, - const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const void *X, const int incX, - void *A, const int lda); -void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const void *X, - const int incX, void *A); -void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *Ap); - -/* - * =========================================================================== - * Prototypes for level 3 BLAS - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (S, D, C, Z) - */ -void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const float alpha, const float *A, - const int lda, const float *B, const int ldb, - const float beta, float *C, const int ldc); -void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const float alpha, const float *A, const int lda, - const float *B, const int ldb, const float beta, - float *C, const int ldc); -void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const float *A, const int lda, - const float beta, float *C, const int ldc); -void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const float *A, const int lda, - const float *B, const int ldb, const float beta, - float *C, const int ldc); -void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const float alpha, const float *A, const int lda, - float *B, const int ldb); -void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const float alpha, const float *A, const int lda, - float *B, const int ldb); - -void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const double alpha, const double *A, - const int lda, const double *B, const int ldb, - const double beta, double *C, const int ldc); -void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const double alpha, const double *A, const int lda, - const double *B, const int ldb, const double beta, - double *C, const int ldc); -void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const double *A, const int lda, - const double beta, double *C, const int ldc); -void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const double *A, const int lda, - const double *B, const int ldb, const double beta, - double *C, const int ldc); -void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const double alpha, const double *A, const int lda, - double *B, const int ldb); -void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const double alpha, const double *A, const int lda, - double *B, const int ldb); - -void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const void *alpha, const void *A, - const int lda, const void *B, const int ldb, - const void *beta, void *C, const int ldc); -void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *beta, void *C, const int ldc); -void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); -void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); - -void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const void *alpha, const void *A, - const int lda, const void *B, const int ldb, - const void *beta, void *C, const int ldc); -void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *beta, void *C, const int ldc); -void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); -void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); - - -/* - * Routines with prefixes C and Z only - */ -void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const void *A, const int lda, - const float beta, void *C, const int ldc); -void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const float beta, - void *C, const int ldc); - -void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const void *A, const int lda, - const double beta, void *C, const int ldc); -void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const double beta, - void *C, const int ldc); - -void cblas_xerbla(int p, const char *rout, const char *form, ...); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/code_generators/__init__.py b/numpy-1.6.2/numpy/core/code_generators/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/numpy-1.6.2/numpy/core/code_generators/cversions.py b/numpy-1.6.2/numpy/core/code_generators/cversions.py deleted file mode 100644 index 036e923ec9..0000000000 --- a/numpy-1.6.2/numpy/core/code_generators/cversions.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Simple script to compute the api hash of the current API as defined by -numpy_api_order and ufunc_api_order.""" -from os.path import join, dirname - -from genapi import fullapi_hash -import numpy_api - -if __name__ == '__main__': - curdir = dirname(__file__) - print fullapi_hash(numpy_api.full_api) diff --git a/numpy-1.6.2/numpy/core/code_generators/cversions.txt b/numpy-1.6.2/numpy/core/code_generators/cversions.txt deleted file mode 100644 index 64754fa87c..0000000000 --- a/numpy-1.6.2/numpy/core/code_generators/cversions.txt +++ /dev/null @@ -1,11 +0,0 @@ -# hash below were defined from numpy_api_order.txt and ufunc_api_order.txt -0x00000001 = 603580d224763e58c5e7147f804dc0f5 -0x00000002 = 8ecb29306758515ae69749c803a75da1 -0x00000003 = bf22c0d05b31625d2a7015988d61ce5a -# Starting from here, the hash is defined from numpy_api.full_api dict -# version 4 added neighborhood iterators and PyArray_Correlate2 -0x00000004 = 3d8940bf7b0d2a4e25be4338c14c3c85 -0x00000005 = 77e2e846db87f25d7cf99f9d812076f0 -# Version 6 added new iterator, half float and casting functions, -# PyArray_CountNonzero, PyArray_NewLikeArray and PyArray_MatrixProduct2. -0x00000006 = e61d5dc51fa1c6459328266e215d6987 diff --git a/numpy-1.6.2/numpy/core/code_generators/genapi.py b/numpy-1.6.2/numpy/core/code_generators/genapi.py deleted file mode 100644 index 2f6d0039a2..0000000000 --- a/numpy-1.6.2/numpy/core/code_generators/genapi.py +++ /dev/null @@ -1,477 +0,0 @@ -""" -Get API information encoded in C files. - -See ``find_function`` for how functions should be formatted, and -``read_order`` for how the order of the functions should be -specified. -""" -import sys, os, re -try: - import hashlib - md5new = hashlib.md5 -except ImportError: - import md5 - md5new = md5.new -if sys.version_info[:2] < (2, 6): - from sets import Set as set -import textwrap - -from os.path import join - -__docformat__ = 'restructuredtext' - -# The files under src/ that are scanned for API functions -API_FILES = [join('multiarray', 'methods.c'), - join('multiarray', 'arrayobject.c'), - join('multiarray', 'flagsobject.c'), - join('multiarray', 'descriptor.c'), - join('multiarray', 'iterators.c'), - join('multiarray', 'getset.c'), - join('multiarray', 'number.c'), - join('multiarray', 'sequence.c'), - join('multiarray', 'ctors.c'), - join('multiarray', 'convert.c'), - join('multiarray', 'shape.c'), - join('multiarray', 'item_selection.c'), - join('multiarray', 'convert_datatype.c'), - join('multiarray', 'arraytypes.c.src'), - join('multiarray', 'multiarraymodule.c'), - join('multiarray', 'scalartypes.c.src'), - join('multiarray', 'scalarapi.c'), - join('multiarray', 'calculation.c'), - join('multiarray', 'usertypes.c'), - join('multiarray', 'refcount.c'), - join('multiarray', 'conversion_utils.c'), - join('multiarray', 'buffer.c'), - join('multiarray', 'datetime.c'), - join('multiarray', 'nditer.c.src'), - join('multiarray', 'nditer_pywrap.c'), - join('multiarray', 'einsum.c.src'), - join('umath', 'ufunc_object.c'), - join('umath', 'loops.c.src'), - ] -THIS_DIR = os.path.dirname(__file__) -API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES] - -def file_in_this_dir(filename): - return os.path.join(THIS_DIR, filename) - -def remove_whitespace(s): - return ''.join(s.split()) - -def _repl(str): - return str.replace('Bool','npy_bool') - -class Function(object): - def __init__(self, name, return_type, args, doc=''): - self.name = name - self.return_type = _repl(return_type) - self.args = args - self.doc = doc - - def _format_arg(self, typename, name): - if typename.endswith('*'): - return typename + name - else: - return typename + ' ' + name - - def __str__(self): - argstr = ', '.join([self._format_arg(*a) for a in self.args]) - if self.doc: - doccomment = '/* %s */\n' % self.doc - else: - doccomment = '' - return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr) - - def to_ReST(self): - lines = ['::', '', ' ' + self.return_type] - argstr = ',\000'.join([self._format_arg(*a) for a in self.args]) - name = ' %s' % (self.name,) - s = textwrap.wrap('(%s)' % (argstr,), width=72, - initial_indent=name, - subsequent_indent=' ' * (len(name)+1), - break_long_words=False) - for l in s: - lines.append(l.replace('\000', ' ').rstrip()) - lines.append('') - if self.doc: - lines.append(textwrap.dedent(self.doc)) - return '\n'.join(lines) - - def api_hash(self): - m = md5new() - m.update(remove_whitespace(self.return_type)) - m.update('\000') - m.update(self.name) - m.update('\000') - for typename, name in self.args: - m.update(remove_whitespace(typename)) - m.update('\000') - return m.hexdigest()[:8] - -class ParseError(Exception): - def __init__(self, filename, lineno, msg): - self.filename = filename - self.lineno = lineno - self.msg = msg - - def __str__(self): - return '%s:%s:%s' % (self.filename, self.lineno, self.msg) - -def skip_brackets(s, lbrac, rbrac): - count = 0 - for i, c in enumerate(s): - if c == lbrac: - count += 1 - elif c == rbrac: - count -= 1 - if count == 0: - return i - raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s)) - -def split_arguments(argstr): - arguments = [] - bracket_counts = {'(': 0, '[': 0} - current_argument = [] - state = 0 - i = 0 - def finish_arg(): - if current_argument: - argstr = ''.join(current_argument).strip() - m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr) - if m: - typename = m.group(1).strip() - name = m.group(3) - else: - typename = argstr - name = '' - arguments.append((typename, name)) - del current_argument[:] - while i < len(argstr): - c = argstr[i] - if c == ',': - finish_arg() - elif c == '(': - p = skip_brackets(argstr[i:], '(', ')') - current_argument += argstr[i:i+p] - i += p-1 - else: - current_argument += c - i += 1 - finish_arg() - return arguments - - -def find_functions(filename, tag='API'): - """ - Scan the file, looking for tagged functions. - - Assuming ``tag=='API'``, a tagged function looks like:: - - /*API*/ - static returntype* - function_name(argtype1 arg1, argtype2 arg2) - { - } - - where the return type must be on a separate line, the function - name must start the line, and the opening ``{`` must start the line. - - An optional documentation comment in ReST format may follow the tag, - as in:: - - /*API - This function does foo... - */ - """ - fo = open(filename, 'r') - functions = [] - return_type = None - function_name = None - function_args = [] - doclist = [] - SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5) - state = SCANNING - tagcomment = '/*' + tag - for lineno, line in enumerate(fo): - try: - line = line.strip() - if state == SCANNING: - if line.startswith(tagcomment): - if line.endswith('*/'): - state = STATE_RETTYPE - else: - state = STATE_DOC - elif state == STATE_DOC: - if line.startswith('*/'): - state = STATE_RETTYPE - else: - line = line.lstrip(' *') - doclist.append(line) - elif state == STATE_RETTYPE: - # first line of declaration with return type - m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line) - if m: - line = m.group(1) - return_type = line - state = STATE_NAME - elif state == STATE_NAME: - # second line, with function name - m = re.match(r'(\w+)\s*\(', line) - if m: - function_name = m.group(1) - else: - raise ParseError(filename, lineno+1, - 'could not find function name') - function_args.append(line[m.end():]) - state = STATE_ARGS - elif state == STATE_ARGS: - if line.startswith('{'): - # finished - fargs_str = ' '.join(function_args).rstrip(' )') - fargs = split_arguments(fargs_str) - f = Function(function_name, return_type, fargs, - '\n'.join(doclist)) - functions.append(f) - return_type = None - function_name = None - function_args = [] - doclist = [] - state = SCANNING - else: - function_args.append(line) - except: - print(filename, lineno+1) - raise - fo.close() - return functions - -def should_rebuild(targets, source_files): - from distutils.dep_util import newer_group - for t in targets: - if not os.path.exists(t): - return True - sources = API_FILES + list(source_files) + [__file__] - if newer_group(sources, targets[0], missing='newer'): - return True - return False - -# Those *Api classes instances know how to output strings for the generated code -class TypeApi: - def __init__(self, name, index, ptr_cast, api_name): - self.index = index - self.name = name - self.ptr_cast = ptr_cast - self.api_name = api_name - - def define_from_array_api_string(self): - return "#define %s (*(%s *)%s[%d])" % (self.name, - self.ptr_cast, - self.api_name, - self.index) - - def array_api_define(self): - return " (void *) &%s" % self.name - - def internal_define(self): - astr = """\ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject %(type)s; -#else - NPY_NO_EXPORT PyTypeObject %(type)s; -#endif -""" % {'type': self.name} - return astr - -class GlobalVarApi: - def __init__(self, name, index, type, api_name): - self.name = name - self.index = index - self.type = type - self.api_name = api_name - - def define_from_array_api_string(self): - return "#define %s (*(%s *)%s[%d])" % (self.name, - self.type, - self.api_name, - self.index) - - def array_api_define(self): - return " (%s *) &%s" % (self.type, self.name) - - def internal_define(self): - astr = """\ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT %(type)s %(name)s; -#else - NPY_NO_EXPORT %(type)s %(name)s; -#endif -""" % {'type': self.type, 'name': self.name} - return astr - -# Dummy to be able to consistently use *Api instances for all items in the -# array api -class BoolValuesApi: - def __init__(self, name, index, api_name): - self.name = name - self.index = index - self.type = 'PyBoolScalarObject' - self.api_name = api_name - - def define_from_array_api_string(self): - return "#define %s ((%s *)%s[%d])" % (self.name, - self.type, - self.api_name, - self.index) - - def array_api_define(self): - return " (void *) &%s" % self.name - - def internal_define(self): - astr = """\ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#else -NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#endif -""" - return astr - -class FunctionApi: - def __init__(self, name, index, return_type, args, api_name): - self.name = name - self.index = index - self.return_type = return_type - self.args = args - self.api_name = api_name - - def _argtypes_string(self): - if not self.args: - return 'void' - argstr = ', '.join([_repl(a[0]) for a in self.args]) - return argstr - - def define_from_array_api_string(self): - define = """\ -#define %s \\\n (*(%s (*)(%s)) \\ - %s[%d])""" % (self.name, - self.return_type, - self._argtypes_string(), - self.api_name, - self.index) - return define - - def array_api_define(self): - return " (void *) %s" % self.name - - def internal_define(self): - astr = """\ -NPY_NO_EXPORT %s %s \\\n (%s);""" % (self.return_type, - self.name, - self._argtypes_string()) - return astr - -def order_dict(d): - """Order dict by its values.""" - o = d.items() - def _key(x): - return (x[1], x[0]) - return sorted(o, key=_key) - -def merge_api_dicts(dicts): - ret = {} - for d in dicts: - for k, v in d.items(): - ret[k] = v - - return ret - -def check_api_dict(d): - """Check that an api dict is valid (does not use the same index twice).""" - # We have if a same index is used twice: we 'revert' the dict so that index - # become keys. If the length is different, it means one index has been used - # at least twice - revert_dict = dict([(v, k) for k, v in d.items()]) - if not len(revert_dict) == len(d): - # We compute a dict index -> list of associated items - doubled = {} - for name, index in d.items(): - try: - doubled[index].append(name) - except KeyError: - doubled[index] = [name] - msg = """\ -Same index has been used twice in api definition: %s -""" % ['index %d -> %s' % (index, names) for index, names in doubled.items() \ - if len(names) != 1] - raise ValueError(msg) - - # No 'hole' in the indexes may be allowed, and it must starts at 0 - indexes = set(d.values()) - expected = set(range(len(indexes))) - if not indexes == expected: - diff = expected.symmetric_difference(indexes) - msg = "There are some holes in the API indexing: " \ - "(symmetric diff is %s)" % diff - raise ValueError(msg) - -def get_api_functions(tagname, api_dict): - """Parse source files to get functions tagged by the given tag.""" - functions = [] - for f in API_FILES: - functions.extend(find_functions(f, tagname)) - dfunctions = [] - for func in functions: - o = api_dict[func.name] - dfunctions.append( (o, func) ) - dfunctions.sort() - return [a[1] for a in dfunctions] - -def fullapi_hash(api_dicts): - """Given a list of api dicts defining the numpy C API, compute a checksum - of the list of items in the API (as a string).""" - a = [] - for d in api_dicts: - def sorted_by_values(d): - """Sort a dictionary by its values. Assume the dictionary items is of - the form func_name -> order""" - return sorted(d.items(), key=lambda x_y: (x_y[1], x_y[0])) - for name, index in sorted_by_values(d): - a.extend(name) - a.extend(str(index)) - - return md5new(''.join(a).encode('ascii')).hexdigest() - -# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and -# checksum a 128 bits md5 checksum (hex format as well) -VERRE = re.compile('(^0x[\da-f]{8})\s*=\s*([\da-f]{32})') - -def get_versions_hash(): - d = [] - - file = os.path.join(os.path.dirname(__file__), 'cversions.txt') - fid = open(file, 'r') - try: - for line in fid.readlines(): - m = VERRE.match(line) - if m: - d.append((int(m.group(1), 16), m.group(2))) - finally: - fid.close() - - return dict(d) - -def main(): - tagname = sys.argv[1] - order_file = sys.argv[2] - functions = get_api_functions(tagname, order_file) - m = md5new(tagname) - for func in functions: - print(func) - ah = func.api_hash() - m.update(ah) - print(hex(int(ah,16))) - print(hex(int(m.hexdigest()[:8],16))) - -if __name__ == '__main__': - main() diff --git a/numpy-1.6.2/numpy/core/code_generators/genapi2.py b/numpy-1.6.2/numpy/core/code_generators/genapi2.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/numpy-1.6.2/numpy/core/code_generators/generate_numpy_api.py b/numpy-1.6.2/numpy/core/code_generators/generate_numpy_api.py deleted file mode 100644 index 2f4316d170..0000000000 --- a/numpy-1.6.2/numpy/core/code_generators/generate_numpy_api.py +++ /dev/null @@ -1,252 +0,0 @@ -import os -import genapi - -from genapi import \ - TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi - -import numpy_api - -h_template = r""" -#ifdef _MULTIARRAYMODULE - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#else -NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#endif - -%s - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -%s - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - int st; - PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyArray_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); - return -1; - } - - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "ABI version %%x but this version of numpy is %%x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "API version %%x but this version of numpy is %%x", \ - (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); - return -1; - } - - /* - * Perform runtime check of endianness and check it matches the one set by - * the headers (npy_endian.h) as a safeguard - */ - st = PyArray_GetEndianness(); - if (st == NPY_CPU_UNKNOWN_ENDIAN) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); - return -1; - } -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN - if (st != NPY_CPU_BIG) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "big endian, but detected different endianness at runtime"); - return -1; - } -#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN - if (st != NPY_CPU_LITTLE) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "little endian, but detected different endianness at runtime"); - return -1; - } -#endif - - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_ARRAY_RETVAL NULL -#else -#define NUMPY_IMPORT_ARRAY_RETVAL -#endif - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif -""" - - -c_template = r""" -/* These pointers will be stored in the C-object for use in other - extension modules -*/ - -void *PyArray_API[] = { -%s -}; -""" - -c_api_header = """ -=========== -Numpy C-API -=========== -""" - -def generate_api(output_dir, force=False): - basename = 'multiarray_api' - - h_file = os.path.join(output_dir, '__%s.h' % basename) - c_file = os.path.join(output_dir, '__%s.c' % basename) - d_file = os.path.join(output_dir, '%s.txt' % basename) - targets = (h_file, c_file, d_file) - - sources = numpy_api.multiarray_api - - if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])): - return targets - else: - do_generate_api(targets, sources) - - return targets - -def do_generate_api(targets, sources): - header_file = targets[0] - c_file = targets[1] - doc_file = targets[2] - - global_vars = sources[0] - global_vars_types = sources[1] - scalar_bool_values = sources[2] - types_api = sources[3] - multiarray_funcs = sources[4] - - # Remove global_vars_type: not a api dict - multiarray_api = sources[:1] + sources[2:] - - module_list = [] - extension_list = [] - init_list = [] - - # Check multiarray api indexes - multiarray_api_index = genapi.merge_api_dicts(multiarray_api) - genapi.check_api_dict(multiarray_api_index) - - numpyapi_list = genapi.get_api_functions('NUMPY_API', - multiarray_funcs) - ordered_funcs_api = genapi.order_dict(multiarray_funcs) - - # Create dict name -> *Api instance - api_name = 'PyArray_API' - multiarray_api_dict = {} - for f in numpyapi_list: - name = f.name - index = multiarray_funcs[name] - multiarray_api_dict[f.name] = FunctionApi(f.name, index, f.return_type, - f.args, api_name) - - for name, index in global_vars.items(): - type = global_vars_types[name] - multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) - - for name, index in scalar_bool_values.items(): - multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) - - for name, index in types_api.items(): - multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) - - assert len(multiarray_api_dict) == len(multiarray_api_index) - - extension_list = [] - for name, index in genapi.order_dict(multiarray_api_index): - api_item = multiarray_api_dict[name] - extension_list.append(api_item.define_from_array_api_string()) - init_list.append(api_item.array_api_define()) - module_list.append(api_item.internal_define()) - - # Write to header - fid = open(header_file, 'w') - s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) - fid.write(s) - fid.close() - - # Write to c-code - fid = open(c_file, 'w') - s = c_template % ',\n'.join(init_list) - fid.write(s) - fid.close() - - # write to documentation - fid = open(doc_file, 'w') - fid.write(c_api_header) - for func in numpyapi_list: - fid.write(func.to_ReST()) - fid.write('\n\n') - fid.close() - - return targets diff --git a/numpy-1.6.2/numpy/core/code_generators/generate_ufunc_api.py b/numpy-1.6.2/numpy/core/code_generators/generate_ufunc_api.py deleted file mode 100644 index e10b9cd38b..0000000000 --- a/numpy-1.6.2/numpy/core/code_generators/generate_ufunc_api.py +++ /dev/null @@ -1,215 +0,0 @@ -import os -import genapi - -import numpy_api - -from genapi import \ - TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi - -h_template = r""" -#ifdef _UMATHMODULE - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#else -NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#endif - -%s - -#else - -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) -extern void **PyUFunc_API; -#else -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -void **PyUFunc_API; -#else -static void **PyUFunc_API=NULL; -#endif -#endif - -%s - -static int -_import_umath(void) -{ - PyObject *numpy = PyImport_ImportModule("numpy.core.umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyUFunc_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); - return -1; - } - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_UMATH_RETVAL NULL -#else -#define NUMPY_IMPORT_UMATH_RETVAL -#endif - -#define import_umath() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return NUMPY_IMPORT_UMATH_RETVAL;\ - }\ - } while(0) - -#define import_umath1(ret) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return ret;\ - }\ - } while(0) - -#define import_umath2(ret, msg) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError, msg);\ - return ret;\ - }\ - } while(0) - -#define import_ufunc() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - }\ - } while(0) - -#endif -""" - -c_template = r""" -/* These pointers will be stored in the C-object for use in other - extension modules -*/ - -void *PyUFunc_API[] = { -%s -}; -""" - -def generate_api(output_dir, force=False): - basename = 'ufunc_api' - - h_file = os.path.join(output_dir, '__%s.h' % basename) - c_file = os.path.join(output_dir, '__%s.c' % basename) - d_file = os.path.join(output_dir, '%s.txt' % basename) - targets = (h_file, c_file, d_file) - - sources = ['ufunc_api_order.txt'] - - if (not force and not genapi.should_rebuild(targets, sources + [__file__])): - return targets - else: - do_generate_api(targets, sources) - - return targets - -def do_generate_api(targets, sources): - header_file = targets[0] - c_file = targets[1] - doc_file = targets[2] - - ufunc_api_index = genapi.merge_api_dicts(( - numpy_api.ufunc_funcs_api, - numpy_api.ufunc_types_api)) - genapi.check_api_dict(ufunc_api_index) - - ufunc_api_list = genapi.get_api_functions('UFUNC_API', numpy_api.ufunc_funcs_api) - - # Create dict name -> *Api instance - ufunc_api_dict = {} - api_name = 'PyUFunc_API' - for f in ufunc_api_list: - name = f.name - index = ufunc_api_index[name] - ufunc_api_dict[name] = FunctionApi(f.name, index, f.return_type, - f.args, api_name) - - for name, index in numpy_api.ufunc_types_api.items(): - ufunc_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) - - # set up object API - module_list = [] - extension_list = [] - init_list = [] - - for name, index in genapi.order_dict(ufunc_api_index): - api_item = ufunc_api_dict[name] - extension_list.append(api_item.define_from_array_api_string()) - init_list.append(api_item.array_api_define()) - module_list.append(api_item.internal_define()) - - # Write to header - fid = open(header_file, 'w') - s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) - fid.write(s) - fid.close() - - # Write to c-code - fid = open(c_file, 'w') - s = c_template % ',\n'.join(init_list) - fid.write(s) - fid.close() - - # Write to documentation - fid = open(doc_file, 'w') - fid.write(''' -================= -Numpy Ufunc C-API -================= -''') - for func in ufunc_api_list: - fid.write(func.to_ReST()) - fid.write('\n\n') - fid.close() - - return targets diff --git a/numpy-1.6.2/numpy/core/code_generators/generate_umath.py b/numpy-1.6.2/numpy/core/code_generators/generate_umath.py deleted file mode 100644 index 9382b1faeb..0000000000 --- a/numpy-1.6.2/numpy/core/code_generators/generate_umath.py +++ /dev/null @@ -1,867 +0,0 @@ -import os -import re -import struct -import sys -import textwrap - -sys.path.insert(0, os.path.dirname(__file__)) -import ufunc_docstrings as docstrings -sys.path.pop(0) - -Zero = "PyUFunc_Zero" -One = "PyUFunc_One" -None_ = "PyUFunc_None" - -# Sentinel value to specify that the loop for the given TypeDescription uses the -# pointer to arrays as its func_data. -UsesArraysAsData = object() - - -class TypeDescription(object): - """Type signature for a ufunc. - - Attributes - ---------- - type : str - Character representing the nominal type. - func_data : str or None or UsesArraysAsData, optional - The string representing the expression to insert into the data array, if - any. - in_ : str or None, optional - The typecode(s) of the inputs. - out : str or None, optional - The typecode(s) of the outputs. - astype : dict or None, optional - If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y - instead of PyUFunc_x_x/PyUFunc_xx_x. - """ - def __init__(self, type, f=None, in_=None, out=None, astype=None): - self.type = type - self.func_data = f - if astype is None: - astype = {} - self.astype_dict = astype - if in_ is not None: - in_ = in_.replace('P', type) - self.in_ = in_ - if out is not None: - out = out.replace('P', type) - self.out = out - - def finish_signature(self, nin, nout): - if self.in_ is None: - self.in_ = self.type * nin - assert len(self.in_) == nin - if self.out is None: - self.out = self.type * nout - assert len(self.out) == nout - self.astype = self.astype_dict.get(self.type, None) - -_fdata_map = dict(e='npy_%sf', f='npy_%sf', d='npy_%s', g='npy_%sl', - F='nc_%sf', D='nc_%s', G='nc_%sl') -def build_func_data(types, f): - func_data = [] - for t in types: - d = _fdata_map.get(t, '%s') % (f,) - func_data.append(d) - return func_data - -def TD(types, f=None, astype=None, in_=None, out=None): - if f is not None: - if isinstance(f, str): - func_data = build_func_data(types, f) - else: - assert len(f) == len(types) - func_data = f - else: - func_data = (None,) * len(types) - if isinstance(in_, str): - in_ = (in_,) * len(types) - elif in_ is None: - in_ = (None,) * len(types) - if isinstance(out, str): - out = (out,) * len(types) - elif out is None: - out = (None,) * len(types) - tds = [] - for t, fd, i, o in zip(types, func_data, in_, out): - tds.append(TypeDescription(t, f=fd, in_=i, out=o, astype=astype)) - return tds - -class Ufunc(object): - """Description of a ufunc. - - Attributes - ---------- - - nin: number of input arguments - nout: number of output arguments - identity: identity element for a two-argument function - docstring: docstring for the ufunc - type_descriptions: list of TypeDescription objects - """ - def __init__(self, nin, nout, identity, docstring, - *type_descriptions): - self.nin = nin - self.nout = nout - if identity is None: - identity = None_ - self.identity = identity - self.docstring = docstring - self.type_descriptions = [] - for td in type_descriptions: - self.type_descriptions.extend(td) - for td in self.type_descriptions: - td.finish_signature(self.nin, self.nout) - -# String-handling utilities to avoid locale-dependence. - -import string -if sys.version_info[0] < 3: - UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase) -else: - UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"), - bytes(string.ascii_uppercase, "ascii")) - -def english_upper(s): - """ Apply English case rules to convert ASCII strings to all upper case. - - This is an internal utility function to replace calls to str.upper() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - uppered : str - - Examples - -------- - >>> from numpy.lib.utils import english_upper - >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' - >>> english_upper('') - '' - """ - uppered = s.translate(UPPER_TABLE) - return uppered - - -#each entry in defdict is a Ufunc object. - -#name: [string of chars for which it is defined, -# string of characters using func interface, -# tuple of strings giving funcs for data, -# (in, out), or (instr, outstr) giving the signature as character codes, -# identity, -# docstring, -# output specification (optional) -# ] - -chartoname = {'?': 'bool', - 'b': 'byte', - 'B': 'ubyte', - 'h': 'short', - 'H': 'ushort', - 'i': 'int', - 'I': 'uint', - 'l': 'long', - 'L': 'ulong', - 'q': 'longlong', - 'Q': 'ulonglong', - 'e': 'half', - 'f': 'float', - 'd': 'double', - 'g': 'longdouble', - 'F': 'cfloat', - 'D': 'cdouble', - 'G': 'clongdouble', - 'M': 'datetime', - 'm': 'timedelta', - 'O': 'OBJECT', - # '.' is like 'O', but calls a method of the object instead - # of a function - 'P': 'OBJECT', - } - -all = '?bBhHiIlLqQefdgFDGOMm' -O = 'O' -P = 'P' -ints = 'bBhHiIlLqQ' -times = 'Mm' -intsO = ints + O -bints = '?' + ints -bintsO = bints + O -flts = 'efdg' -fltsO = flts + O -fltsP = flts + P -cmplx = 'FDG' -cmplxO = cmplx + O -cmplxP = cmplx + P -inexact = flts + cmplx -noint = inexact+O -nointP = inexact+P -allP = bints+times+flts+cmplxP -nobool = all[1:] -noobj = all[:-3]+all[-2:] -nobool_or_obj = all[1:-3]+all[-2:] -intflt = ints+flts -intfltcmplx = ints+flts+cmplx -nocmplx = bints+times+flts -nocmplxO = nocmplx+O -nocmplxP = nocmplx+P -notimes_or_obj = bints + inexact - -# Find which code corresponds to int64. -int64 = '' -uint64 = '' -for code in 'bhilq': - if struct.calcsize(code) == 8: - int64 = code - uint64 = english_upper(code) - break - -# This dictionary describes all the ufunc implementations, generating -# all the function names and their corresponding ufunc signatures. TD is -# an object which expands a list of character codes into an array of -# TypeDescriptions. -defdict = { -'add' : - Ufunc(2, 1, Zero, - docstrings.get('numpy.core.umath.add'), - TD(notimes_or_obj), - [TypeDescription('M', UsesArraysAsData, 'Mm', 'M'), - TypeDescription('m', UsesArraysAsData, 'mm', 'm'), - TypeDescription('M', UsesArraysAsData, 'mM', 'M'), - ], - TD(O, f='PyNumber_Add'), - ), -'subtract' : - Ufunc(2, 1, Zero, - docstrings.get('numpy.core.umath.subtract'), - TD(notimes_or_obj), - [TypeDescription('M', UsesArraysAsData, 'Mm', 'M'), - TypeDescription('m', UsesArraysAsData, 'mm', 'm'), - TypeDescription('M', UsesArraysAsData, 'MM', 'm'), - ], - TD(O, f='PyNumber_Subtract'), - ), -'multiply' : - Ufunc(2, 1, One, - docstrings.get('numpy.core.umath.multiply'), - TD(notimes_or_obj), - TD(O, f='PyNumber_Multiply'), - ), -'divide' : - Ufunc(2, 1, One, - docstrings.get('numpy.core.umath.divide'), - TD(intfltcmplx), - TD(O, f='PyNumber_Divide'), - ), -'floor_divide' : - Ufunc(2, 1, One, - docstrings.get('numpy.core.umath.floor_divide'), - TD(intfltcmplx), - TD(O, f='PyNumber_FloorDivide'), - ), -'true_divide' : - Ufunc(2, 1, One, - docstrings.get('numpy.core.umath.true_divide'), - TD('bBhH', out='d'), - TD('iIlLqQ', out='d'), - TD(flts+cmplx), - TD(O, f='PyNumber_TrueDivide'), - ), -'conjugate' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.conjugate'), - TD(ints+flts+cmplx), - TD(P, f='conjugate'), - ), -'fmod' : - Ufunc(2, 1, Zero, - docstrings.get('numpy.core.umath.fmod'), - TD(ints), - TD(flts, f='fmod', astype={'e':'f'}), - TD(P, f='fmod'), - ), -'square' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.square'), - TD(ints+inexact), - TD(O, f='Py_square'), - ), -'reciprocal' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.reciprocal'), - TD(ints+inexact), - TD(O, f='Py_reciprocal'), - ), -'ones_like' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.ones_like'), - TD(noobj), - TD(O, f='Py_get_one'), - ), -'power' : - Ufunc(2, 1, One, - docstrings.get('numpy.core.umath.power'), - TD(ints), - TD(inexact, f='pow', astype={'e':'f'}), - TD(O, f='npy_ObjectPower'), - ), -'absolute' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.absolute'), - TD(bints+flts+times), - TD(cmplx, out=('f', 'd', 'g')), - TD(O, f='PyNumber_Absolute'), - ), -'_arg' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath._arg'), - TD(cmplx, out=('f', 'd', 'g')), - ), -'negative' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.negative'), - TD(bints+flts+times), - TD(cmplx, f='neg'), - TD(O, f='PyNumber_Negative'), - ), -'sign' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.sign'), - TD(nobool), - ), -'greater' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.greater'), - TD(all, out='?'), - ), -'greater_equal' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.greater_equal'), - TD(all, out='?'), - ), -'less' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.less'), - TD(all, out='?'), - ), -'less_equal' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.less_equal'), - TD(all, out='?'), - ), -'equal' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.equal'), - TD(all, out='?'), - ), -'not_equal' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.not_equal'), - TD(all, out='?'), - ), -'logical_and' : - Ufunc(2, 1, One, - docstrings.get('numpy.core.umath.logical_and'), - TD(noobj, out='?'), - TD(P, f='logical_and'), - ), -'logical_not' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.logical_not'), - TD(noobj, out='?'), - TD(P, f='logical_not'), - ), -'logical_or' : - Ufunc(2, 1, Zero, - docstrings.get('numpy.core.umath.logical_or'), - TD(noobj, out='?'), - TD(P, f='logical_or'), - ), -'logical_xor' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.logical_xor'), - TD(noobj, out='?'), - TD(P, f='logical_xor'), - ), -'maximum' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.maximum'), - TD(noobj), - TD(O, f='npy_ObjectMax') - ), -'minimum' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.minimum'), - TD(noobj), - TD(O, f='npy_ObjectMin') - ), -'fmax' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.fmax'), - TD(noobj), - TD(O, f='npy_ObjectMax') - ), -'fmin' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.fmin'), - TD(noobj), - TD(O, f='npy_ObjectMin') - ), -'logaddexp' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.logaddexp'), - TD(flts, f="logaddexp", astype={'e':'f'}) - ), -'logaddexp2' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.logaddexp2'), - TD(flts, f="logaddexp2", astype={'e':'f'}) - ), -# FIXME: decide if the times should have the bitwise operations. -'bitwise_and' : - Ufunc(2, 1, One, - docstrings.get('numpy.core.umath.bitwise_and'), - TD(bints), - TD(O, f='PyNumber_And'), - ), -'bitwise_or' : - Ufunc(2, 1, Zero, - docstrings.get('numpy.core.umath.bitwise_or'), - TD(bints), - TD(O, f='PyNumber_Or'), - ), -'bitwise_xor' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.bitwise_xor'), - TD(bints), - TD(O, f='PyNumber_Xor'), - ), -'invert' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.invert'), - TD(bints), - TD(O, f='PyNumber_Invert'), - ), -'left_shift' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.left_shift'), - TD(ints), - TD(O, f='PyNumber_Lshift'), - ), -'right_shift' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.right_shift'), - TD(ints), - TD(O, f='PyNumber_Rshift'), - ), -'degrees' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.degrees'), - TD(fltsP, f='degrees', astype={'e':'f'}), - ), -'rad2deg' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.rad2deg'), - TD(fltsP, f='rad2deg', astype={'e':'f'}), - ), -'radians' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.radians'), - TD(fltsP, f='radians', astype={'e':'f'}), - ), -'deg2rad' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.deg2rad'), - TD(fltsP, f='deg2rad', astype={'e':'f'}), - ), -'arccos' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.arccos'), - TD(inexact, f='acos', astype={'e':'f'}), - TD(P, f='arccos'), - ), -'arccosh' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.arccosh'), - TD(inexact, f='acosh', astype={'e':'f'}), - TD(P, f='arccosh'), - ), -'arcsin' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.arcsin'), - TD(inexact, f='asin', astype={'e':'f'}), - TD(P, f='arcsin'), - ), -'arcsinh' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.arcsinh'), - TD(inexact, f='asinh', astype={'e':'f'}), - TD(P, f='arcsinh'), - ), -'arctan' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.arctan'), - TD(inexact, f='atan', astype={'e':'f'}), - TD(P, f='arctan'), - ), -'arctanh' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.arctanh'), - TD(inexact, f='atanh', astype={'e':'f'}), - TD(P, f='arctanh'), - ), -'cos' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.cos'), - TD(inexact, f='cos', astype={'e':'f'}), - TD(P, f='cos'), - ), -'sin' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.sin'), - TD(inexact, f='sin', astype={'e':'f'}), - TD(P, f='sin'), - ), -'tan' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.tan'), - TD(inexact, f='tan', astype={'e':'f'}), - TD(P, f='tan'), - ), -'cosh' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.cosh'), - TD(inexact, f='cosh', astype={'e':'f'}), - TD(P, f='cosh'), - ), -'sinh' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.sinh'), - TD(inexact, f='sinh', astype={'e':'f'}), - TD(P, f='sinh'), - ), -'tanh' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.tanh'), - TD(inexact, f='tanh', astype={'e':'f'}), - TD(P, f='tanh'), - ), -'exp' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.exp'), - TD(inexact, f='exp', astype={'e':'f'}), - TD(P, f='exp'), - ), -'exp2' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.exp2'), - TD(inexact, f='exp2', astype={'e':'f'}), - TD(P, f='exp2'), - ), -'expm1' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.expm1'), - TD(inexact, f='expm1', astype={'e':'f'}), - TD(P, f='expm1'), - ), -'log' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.log'), - TD(inexact, f='log', astype={'e':'f'}), - TD(P, f='log'), - ), -'log2' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.log2'), - TD(inexact, f='log2', astype={'e':'f'}), - TD(P, f='log2'), - ), -'log10' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.log10'), - TD(inexact, f='log10', astype={'e':'f'}), - TD(P, f='log10'), - ), -'log1p' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.log1p'), - TD(inexact, f='log1p', astype={'e':'f'}), - TD(P, f='log1p'), - ), -'sqrt' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.sqrt'), - TD(inexact, f='sqrt', astype={'e':'f'}), - TD(P, f='sqrt'), - ), -'ceil' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.ceil'), - TD(flts, f='ceil', astype={'e':'f'}), - TD(P, f='ceil'), - ), -'trunc' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.trunc'), - TD(flts, f='trunc', astype={'e':'f'}), - TD(P, f='trunc'), - ), -'fabs' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.fabs'), - TD(flts, f='fabs', astype={'e':'f'}), - TD(P, f='fabs'), - ), -'floor' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.floor'), - TD(flts, f='floor', astype={'e':'f'}), - TD(P, f='floor'), - ), -'rint' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.rint'), - TD(inexact, f='rint', astype={'e':'f'}), - TD(P, f='rint'), - ), -'arctan2' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.arctan2'), - TD(flts, f='atan2', astype={'e':'f'}), - TD(P, f='arctan2'), - ), -'remainder' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.remainder'), - TD(intflt), - TD(O, f='PyNumber_Remainder'), - ), -'hypot' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.hypot'), - TD(flts, f='hypot', astype={'e':'f'}), - TD(P, f='hypot'), - ), -'isnan' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.isnan'), - TD(inexact, out='?'), - ), -'isinf' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.isinf'), - TD(inexact, out='?'), - ), -'isfinite' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.isfinite'), - TD(inexact, out='?'), - ), -'signbit' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.signbit'), - TD(flts, out='?'), - ), -'copysign' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.copysign'), - TD(flts), - ), -'nextafter' : - Ufunc(2, 1, None, - docstrings.get('numpy.core.umath.nextafter'), - TD(flts), - ), -'spacing' : - Ufunc(1, 1, None, - docstrings.get('numpy.core.umath.spacing'), - TD(flts), - ), -'modf' : - Ufunc(1, 2, None, - docstrings.get('numpy.core.umath.modf'), - TD(flts), - ), -} - -if sys.version_info[0] >= 3: - # Will be aliased to true_divide in umathmodule.c.src:InitOtherOperators - del defdict['divide'] - -def indent(st,spaces): - indention = ' '*spaces - indented = indention + st.replace('\n','\n'+indention) - # trim off any trailing spaces - indented = re.sub(r' +$',r'',indented) - return indented - -chartotype1 = {'e': 'e_e', - 'f': 'f_f', - 'd': 'd_d', - 'g': 'g_g', - 'F': 'F_F', - 'D': 'D_D', - 'G': 'G_G', - 'O': 'O_O', - 'P': 'O_O_method'} - -chartotype2 = {'e': 'ee_e', - 'f': 'ff_f', - 'd': 'dd_d', - 'g': 'gg_g', - 'F': 'FF_F', - 'D': 'DD_D', - 'G': 'GG_G', - 'O': 'OO_O', - 'P': 'OO_O_method'} -#for each name -# 1) create functions, data, and signature -# 2) fill in functions and data in InitOperators -# 3) add function. - -def make_arrays(funcdict): - # functions array contains an entry for every type implemented - # NULL should be placed where PyUfunc_ style function will be filled in later - # - code1list = [] - code2list = [] - names = list(funcdict.keys()) - names.sort() - for name in names: - uf = funcdict[name] - funclist = [] - datalist = [] - siglist = [] - k = 0 - sub = 0 - - if uf.nin > 1: - assert uf.nin == 2 - thedict = chartotype2 # two inputs and one output - else: - thedict = chartotype1 # one input and one output - - for t in uf.type_descriptions: - if t.func_data not in (None, UsesArraysAsData): - funclist.append('NULL') - astype = '' - if not t.astype is None: - astype = '_As_%s' % thedict[t.astype] - astr = '%s_functions[%d] = PyUFunc_%s%s;' % \ - (name, k, thedict[t.type], astype) - code2list.append(astr) - if t.type == 'O': - astr = '%s_data[%d] = (void *) %s;' % \ - (name, k, t.func_data) - code2list.append(astr) - datalist.append('(void *)NULL') - elif t.type == 'P': - datalist.append('(void *)"%s"' % t.func_data) - else: - astr = '%s_data[%d] = (void *) %s;' % \ - (name, k, t.func_data) - code2list.append(astr) - datalist.append('(void *)NULL') - #datalist.append('(void *)%s' % t.func_data) - sub += 1 - elif t.func_data is UsesArraysAsData: - tname = english_upper(chartoname[t.type]) - datalist.append('(void *)NULL') - funclist.append('%s_%s_%s_%s' % (tname, t.in_, t.out, name)) - code2list.append('PyUFunc_SetUsesArraysAsData(%s_data, %s);' % (name, k)) - else: - datalist.append('(void *)NULL') - tname = english_upper(chartoname[t.type]) - funclist.append('%s_%s' % (tname, name)) - - for x in t.in_ + t.out: - siglist.append('PyArray_%s' % (english_upper(chartoname[x]),)) - - k += 1 - - funcnames = ', '.join(funclist) - signames = ', '.join(siglist) - datanames = ', '.join(datalist) - code1list.append("static PyUFuncGenericFunction %s_functions[] = { %s };" \ - % (name, funcnames)) - code1list.append("static void * %s_data[] = { %s };" \ - % (name, datanames)) - code1list.append("static char %s_signatures[] = { %s };" \ - % (name, signames)) - return "\n".join(code1list),"\n".join(code2list) - -def make_ufuncs(funcdict): - code3list = [] - names = list(funcdict.keys()) - names.sort() - for name in names: - uf = funcdict[name] - mlist = [] - docstring = textwrap.dedent(uf.docstring).strip() - if sys.version_info[0] < 3: - docstring = docstring.encode('string-escape') - docstring = docstring.replace(r'"', r'\"') - else: - docstring = docstring.encode('unicode-escape').decode('ascii') - docstring = docstring.replace(r'"', r'\"') - # XXX: I don't understand why the following replace is not - # necessary in the python 2 case. - docstring = docstring.replace(r"'", r"\'") - # Split the docstring because some compilers (like MS) do not like big - # string literal in C code. We split at endlines because textwrap.wrap - # do not play well with \n - docstring = '\\n\"\"'.join(docstring.split(r"\n")) - mlist.append(\ -r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d, - %d, %d, %s, "%s", - "%s", 0);""" % (name, name, name, - len(uf.type_descriptions), - uf.nin, uf.nout, - uf.identity, - name, docstring)) - mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) - mlist.append(r"""Py_DECREF(f);""") - code3list.append('\n'.join(mlist)) - return '\n'.join(code3list) - - -def make_code(funcdict,filename): - code1, code2 = make_arrays(funcdict) - code3 = make_ufuncs(funcdict) - code2 = indent(code2,4) - code3 = indent(code3,4) - code = r""" - -/** Warning this file is autogenerated!!! - - Please make changes to the code generator program (%s) -**/ - -%s - -static void -InitOperators(PyObject *dictionary) { - PyObject *f; - -%s -%s -} -""" % (filename, code1, code2, code3) - return code; - - -if __name__ == "__main__": - filename = __file__ - fid = open('__umath_generated.c','w') - code = make_code(defdict, filename) - fid.write(code) - fid.close() diff --git a/numpy-1.6.2/numpy/core/code_generators/numpy_api.py b/numpy-1.6.2/numpy/core/code_generators/numpy_api.py deleted file mode 100644 index db2c368dd1..0000000000 --- a/numpy-1.6.2/numpy/core/code_generators/numpy_api.py +++ /dev/null @@ -1,381 +0,0 @@ -"""Here we define the exported functions, types, etc... which need to be -exported through a global C pointer. - -Each dictionary contains name -> index pair. - -Whenever you change one index, you break the ABI (and the ABI version number -should be incremented). Whenever you add an item to one of the dict, the API -needs to be updated. - -When adding a function, make sure to use the next integer not used as an index -(in case you use an existing index or jump, the build will stop and raise an -exception, so it should hopefully not get unnoticed). -""" - -multiarray_global_vars = { - 'NPY_NUMUSERTYPES': 7, -} - -multiarray_global_vars_types = { - 'NPY_NUMUSERTYPES': 'int', -} - -multiarray_scalar_bool_values = { - '_PyArrayScalar_BoolValues': 9 -} - -multiarray_types_api = { - 'PyBigArray_Type': 1, - 'PyArray_Type': 2, - 'PyArrayDescr_Type': 3, - 'PyArrayFlags_Type': 4, - 'PyArrayIter_Type': 5, - 'PyArrayMultiIter_Type': 6, - 'PyBoolArrType_Type': 8, - 'PyGenericArrType_Type': 10, - 'PyNumberArrType_Type': 11, - 'PyIntegerArrType_Type': 12, - 'PySignedIntegerArrType_Type': 13, - 'PyUnsignedIntegerArrType_Type': 14, - 'PyInexactArrType_Type': 15, - 'PyFloatingArrType_Type': 16, - 'PyComplexFloatingArrType_Type': 17, - 'PyFlexibleArrType_Type': 18, - 'PyCharacterArrType_Type': 19, - 'PyByteArrType_Type': 20, - 'PyShortArrType_Type': 21, - 'PyIntArrType_Type': 22, - 'PyLongArrType_Type': 23, - 'PyLongLongArrType_Type': 24, - 'PyUByteArrType_Type': 25, - 'PyUShortArrType_Type': 26, - 'PyUIntArrType_Type': 27, - 'PyULongArrType_Type': 28, - 'PyULongLongArrType_Type': 29, - 'PyFloatArrType_Type': 30, - 'PyDoubleArrType_Type': 31, - 'PyLongDoubleArrType_Type': 32, - 'PyCFloatArrType_Type': 33, - 'PyCDoubleArrType_Type': 34, - 'PyCLongDoubleArrType_Type': 35, - 'PyObjectArrType_Type': 36, - 'PyStringArrType_Type': 37, - 'PyUnicodeArrType_Type': 38, - 'PyVoidArrType_Type': 39, - # End 1.5 API - 'PyTimeIntegerArrType_Type': 214, - 'PyDatetimeArrType_Type': 215, - 'PyTimedeltaArrType_Type': 216, - 'PyHalfArrType_Type': 217, - 'NpyIter_Type': 218, -} - -#define NPY_NUMUSERTYPES (*(int *)PyArray_API[6]) -#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[7]) -#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[8]) - -multiarray_funcs_api = { - 'PyArray_GetNDArrayCVersion': 0, - 'PyArray_SetNumericOps': 40, - 'PyArray_GetNumericOps': 41, - 'PyArray_INCREF': 42, - 'PyArray_XDECREF': 43, - 'PyArray_SetStringFunction': 44, - 'PyArray_DescrFromType': 45, - 'PyArray_TypeObjectFromType': 46, - 'PyArray_Zero': 47, - 'PyArray_One': 48, - 'PyArray_CastToType': 49, - 'PyArray_CastTo': 50, - 'PyArray_CastAnyTo': 51, - 'PyArray_CanCastSafely': 52, - 'PyArray_CanCastTo': 53, - 'PyArray_ObjectType': 54, - 'PyArray_DescrFromObject': 55, - 'PyArray_ConvertToCommonType': 56, - 'PyArray_DescrFromScalar': 57, - 'PyArray_DescrFromTypeObject': 58, - 'PyArray_Size': 59, - 'PyArray_Scalar': 60, - 'PyArray_FromScalar': 61, - 'PyArray_ScalarAsCtype': 62, - 'PyArray_CastScalarToCtype': 63, - 'PyArray_CastScalarDirect': 64, - 'PyArray_ScalarFromObject': 65, - 'PyArray_GetCastFunc': 66, - 'PyArray_FromDims': 67, - 'PyArray_FromDimsAndDataAndDescr': 68, - 'PyArray_FromAny': 69, - 'PyArray_EnsureArray': 70, - 'PyArray_EnsureAnyArray': 71, - 'PyArray_FromFile': 72, - 'PyArray_FromString': 73, - 'PyArray_FromBuffer': 74, - 'PyArray_FromIter': 75, - 'PyArray_Return': 76, - 'PyArray_GetField': 77, - 'PyArray_SetField': 78, - 'PyArray_Byteswap': 79, - 'PyArray_Resize': 80, - 'PyArray_MoveInto': 81, - 'PyArray_CopyInto': 82, - 'PyArray_CopyAnyInto': 83, - 'PyArray_CopyObject': 84, - 'PyArray_NewCopy': 85, - 'PyArray_ToList': 86, - 'PyArray_ToString': 87, - 'PyArray_ToFile': 88, - 'PyArray_Dump': 89, - 'PyArray_Dumps': 90, - 'PyArray_ValidType': 91, - 'PyArray_UpdateFlags': 92, - 'PyArray_New': 93, - 'PyArray_NewFromDescr': 94, - 'PyArray_DescrNew': 95, - 'PyArray_DescrNewFromType': 96, - 'PyArray_GetPriority': 97, - 'PyArray_IterNew': 98, - 'PyArray_MultiIterNew': 99, - 'PyArray_PyIntAsInt': 100, - 'PyArray_PyIntAsIntp': 101, - 'PyArray_Broadcast': 102, - 'PyArray_FillObjectArray': 103, - 'PyArray_FillWithScalar': 104, - 'PyArray_CheckStrides': 105, - 'PyArray_DescrNewByteorder': 106, - 'PyArray_IterAllButAxis': 107, - 'PyArray_CheckFromAny': 108, - 'PyArray_FromArray': 109, - 'PyArray_FromInterface': 110, - 'PyArray_FromStructInterface': 111, - 'PyArray_FromArrayAttr': 112, - 'PyArray_ScalarKind': 113, - 'PyArray_CanCoerceScalar': 114, - 'PyArray_NewFlagsObject': 115, - 'PyArray_CanCastScalar': 116, - 'PyArray_CompareUCS4': 117, - 'PyArray_RemoveSmallest': 118, - 'PyArray_ElementStrides': 119, - 'PyArray_Item_INCREF': 120, - 'PyArray_Item_XDECREF': 121, - 'PyArray_FieldNames': 122, - 'PyArray_Transpose': 123, - 'PyArray_TakeFrom': 124, - 'PyArray_PutTo': 125, - 'PyArray_PutMask': 126, - 'PyArray_Repeat': 127, - 'PyArray_Choose': 128, - 'PyArray_Sort': 129, - 'PyArray_ArgSort': 130, - 'PyArray_SearchSorted': 131, - 'PyArray_ArgMax': 132, - 'PyArray_ArgMin': 133, - 'PyArray_Reshape': 134, - 'PyArray_Newshape': 135, - 'PyArray_Squeeze': 136, - 'PyArray_View': 137, - 'PyArray_SwapAxes': 138, - 'PyArray_Max': 139, - 'PyArray_Min': 140, - 'PyArray_Ptp': 141, - 'PyArray_Mean': 142, - 'PyArray_Trace': 143, - 'PyArray_Diagonal': 144, - 'PyArray_Clip': 145, - 'PyArray_Conjugate': 146, - 'PyArray_Nonzero': 147, - 'PyArray_Std': 148, - 'PyArray_Sum': 149, - 'PyArray_CumSum': 150, - 'PyArray_Prod': 151, - 'PyArray_CumProd': 152, - 'PyArray_All': 153, - 'PyArray_Any': 154, - 'PyArray_Compress': 155, - 'PyArray_Flatten': 156, - 'PyArray_Ravel': 157, - 'PyArray_MultiplyList': 158, - 'PyArray_MultiplyIntList': 159, - 'PyArray_GetPtr': 160, - 'PyArray_CompareLists': 161, - 'PyArray_AsCArray': 162, - 'PyArray_As1D': 163, - 'PyArray_As2D': 164, - 'PyArray_Free': 165, - 'PyArray_Converter': 166, - 'PyArray_IntpFromSequence': 167, - 'PyArray_Concatenate': 168, - 'PyArray_InnerProduct': 169, - 'PyArray_MatrixProduct': 170, - 'PyArray_CopyAndTranspose': 171, - 'PyArray_Correlate': 172, - 'PyArray_TypestrConvert': 173, - 'PyArray_DescrConverter': 174, - 'PyArray_DescrConverter2': 175, - 'PyArray_IntpConverter': 176, - 'PyArray_BufferConverter': 177, - 'PyArray_AxisConverter': 178, - 'PyArray_BoolConverter': 179, - 'PyArray_ByteorderConverter': 180, - 'PyArray_OrderConverter': 181, - 'PyArray_EquivTypes': 182, - 'PyArray_Zeros': 183, - 'PyArray_Empty': 184, - 'PyArray_Where': 185, - 'PyArray_Arange': 186, - 'PyArray_ArangeObj': 187, - 'PyArray_SortkindConverter': 188, - 'PyArray_LexSort': 189, - 'PyArray_Round': 190, - 'PyArray_EquivTypenums': 191, - 'PyArray_RegisterDataType': 192, - 'PyArray_RegisterCastFunc': 193, - 'PyArray_RegisterCanCast': 194, - 'PyArray_InitArrFuncs': 195, - 'PyArray_IntTupleFromIntp': 196, - 'PyArray_TypeNumFromName': 197, - 'PyArray_ClipmodeConverter': 198, - 'PyArray_OutputConverter': 199, - 'PyArray_BroadcastToShape': 200, - '_PyArray_SigintHandler': 201, - '_PyArray_GetSigintBuf': 202, - 'PyArray_DescrAlignConverter': 203, - 'PyArray_DescrAlignConverter2': 204, - 'PyArray_SearchsideConverter': 205, - 'PyArray_CheckAxis': 206, - 'PyArray_OverflowMultiplyList': 207, - 'PyArray_CompareString': 208, - 'PyArray_MultiIterFromObjects': 209, - 'PyArray_GetEndianness': 210, - 'PyArray_GetNDArrayCFeatureVersion': 211, - 'PyArray_Correlate2': 212, - 'PyArray_NeighborhoodIterNew': 213, - # End 1.5 API - 'PyArray_SetDatetimeParseFunction': 219, - 'PyArray_DatetimeToDatetimeStruct': 220, - 'PyArray_TimedeltaToTimedeltaStruct': 221, - 'PyArray_DatetimeStructToDatetime': 222, - 'PyArray_TimedeltaStructToTimedelta': 223, - # New Iterator API - 'NpyIter_New': 224, - 'NpyIter_MultiNew': 225, - 'NpyIter_AdvancedNew': 226, - 'NpyIter_Copy': 227, - 'NpyIter_Deallocate': 228, - 'NpyIter_HasDelayedBufAlloc': 229, - 'NpyIter_HasExternalLoop': 230, - 'NpyIter_EnableExternalLoop': 231, - 'NpyIter_GetInnerStrideArray': 232, - 'NpyIter_GetInnerLoopSizePtr': 233, - 'NpyIter_Reset': 234, - 'NpyIter_ResetBasePointers': 235, - 'NpyIter_ResetToIterIndexRange': 236, - 'NpyIter_GetNDim': 237, - 'NpyIter_GetNOp': 238, - 'NpyIter_GetIterNext': 239, - 'NpyIter_GetIterSize': 240, - 'NpyIter_GetIterIndexRange': 241, - 'NpyIter_GetIterIndex': 242, - 'NpyIter_GotoIterIndex': 243, - 'NpyIter_HasMultiIndex': 244, - 'NpyIter_GetShape': 245, - 'NpyIter_GetGetMultiIndex': 246, - 'NpyIter_GotoMultiIndex': 247, - 'NpyIter_RemoveMultiIndex': 248, - 'NpyIter_HasIndex': 249, - 'NpyIter_IsBuffered': 250, - 'NpyIter_IsGrowInner': 251, - 'NpyIter_GetBufferSize': 252, - 'NpyIter_GetIndexPtr': 253, - 'NpyIter_GotoIndex': 254, - 'NpyIter_GetDataPtrArray': 255, - 'NpyIter_GetDescrArray': 256, - 'NpyIter_GetOperandArray': 257, - 'NpyIter_GetIterView': 258, - 'NpyIter_GetReadFlags': 259, - 'NpyIter_GetWriteFlags': 260, - 'NpyIter_DebugPrint': 261, - 'NpyIter_IterationNeedsAPI': 262, - 'NpyIter_GetInnerFixedStrideArray': 263, - 'NpyIter_RemoveAxis': 264, - 'NpyIter_GetAxisStrideArray': 265, - 'NpyIter_RequiresBuffering': 266, - 'NpyIter_GetInitialDataPtrArray': 267, - 'NpyIter_CreateCompatibleStrides': 268, - # - 'PyArray_CastingConverter': 269, - 'PyArray_CountNonzero': 270, - 'PyArray_PromoteTypes': 271, - 'PyArray_MinScalarType': 272, - 'PyArray_ResultType': 273, - 'PyArray_CanCastArrayTo': 274, - 'PyArray_CanCastTypeTo': 275, - 'PyArray_EinsteinSum': 276, - 'PyArray_NewLikeArray': 277, - 'PyArray_GetArrayParamsFromObject': 278, - 'PyArray_ConvertClipmodeSequence': 279, - 'PyArray_MatrixProduct2': 280, -} - -ufunc_types_api = { - 'PyUFunc_Type': 0 -} - -ufunc_funcs_api = { - 'PyUFunc_FromFuncAndData': 1, - 'PyUFunc_RegisterLoopForType': 2, - 'PyUFunc_GenericFunction': 3, - 'PyUFunc_f_f_As_d_d': 4, - 'PyUFunc_d_d': 5, - 'PyUFunc_f_f': 6, - 'PyUFunc_g_g': 7, - 'PyUFunc_F_F_As_D_D': 8, - 'PyUFunc_F_F': 9, - 'PyUFunc_D_D': 10, - 'PyUFunc_G_G': 11, - 'PyUFunc_O_O': 12, - 'PyUFunc_ff_f_As_dd_d': 13, - 'PyUFunc_ff_f': 14, - 'PyUFunc_dd_d': 15, - 'PyUFunc_gg_g': 16, - 'PyUFunc_FF_F_As_DD_D': 17, - 'PyUFunc_DD_D': 18, - 'PyUFunc_FF_F': 19, - 'PyUFunc_GG_G': 20, - 'PyUFunc_OO_O': 21, - 'PyUFunc_O_O_method': 22, - 'PyUFunc_OO_O_method': 23, - 'PyUFunc_On_Om': 24, - 'PyUFunc_GetPyValues': 25, - 'PyUFunc_checkfperr': 26, - 'PyUFunc_clearfperr': 27, - 'PyUFunc_getfperr': 28, - 'PyUFunc_handlefperr': 29, - 'PyUFunc_ReplaceLoopBySignature': 30, - 'PyUFunc_FromFuncAndDataAndSignature': 31, - 'PyUFunc_SetUsesArraysAsData': 32, - # End 1.5 API - 'PyUFunc_e_e': 33, - 'PyUFunc_e_e_As_f_f': 34, - 'PyUFunc_e_e_As_d_d': 35, - 'PyUFunc_ee_e': 36, - 'PyUFunc_ee_e_As_ff_f': 37, - 'PyUFunc_ee_e_As_dd_d': 38, -} - -# List of all the dicts which define the C API -# XXX: DO NOT CHANGE THE ORDER OF TUPLES BELOW ! -multiarray_api = ( - multiarray_global_vars, - multiarray_global_vars_types, - multiarray_scalar_bool_values, - multiarray_types_api, - multiarray_funcs_api, -) - -ufunc_api = ( - ufunc_funcs_api, - ufunc_types_api -) - -full_api = multiarray_api + ufunc_api diff --git a/numpy-1.6.2/numpy/core/code_generators/ufunc_docstrings.py b/numpy-1.6.2/numpy/core/code_generators/ufunc_docstrings.py deleted file mode 100644 index 591a898ed7..0000000000 --- a/numpy-1.6.2/numpy/core/code_generators/ufunc_docstrings.py +++ /dev/null @@ -1,3299 +0,0 @@ -# Docstrings for generated ufuncs - -docdict = {} - -def get(name): - return docdict.get(name) - -def add_newdoc(place, name, doc): - docdict['.'.join((place, name))] = doc - - -add_newdoc('numpy.core.umath', 'absolute', - """ - Calculate the absolute value element-wise. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - absolute : ndarray - An ndarray containing the absolute value of - each element in `x`. For complex input, ``a + ib``, the - absolute value is :math:`\\sqrt{ a^2 + b^2 }`. - - Examples - -------- - >>> x = np.array([-1.2, 1.2]) - >>> np.absolute(x) - array([ 1.2, 1.2]) - >>> np.absolute(1.2 + 1j) - 1.5620499351813308 - - Plot the function over ``[-10, 10]``: - - >>> import matplotlib.pyplot as plt - - >>> x = np.linspace(-10, 10, 101) - >>> plt.plot(x, np.absolute(x)) - >>> plt.show() - - Plot the function over the complex plane: - - >>> xx = x + 1j * x[:, np.newaxis] - >>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10]) - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'add', - """ - Add arguments element-wise. - - Parameters - ---------- - x1, x2 : array_like - The arrays to be added. If ``x1.shape != x2.shape``, they must be - broadcastable to a common shape (which may be the shape of one or - the other). - - Returns - ------- - y : ndarray or scalar - The sum of `x1` and `x2`, element-wise. Returns a scalar if - both `x1` and `x2` are scalars. - - Notes - ----- - Equivalent to `x1` + `x2` in terms of array broadcasting. - - Examples - -------- - >>> np.add(1.0, 4.0) - 5.0 - >>> x1 = np.arange(9.0).reshape((3, 3)) - >>> x2 = np.arange(3.0) - >>> np.add(x1, x2) - array([[ 0., 2., 4.], - [ 3., 5., 7.], - [ 6., 8., 10.]]) - - """) - -add_newdoc('numpy.core.umath', 'arccos', - """ - Trigonometric inverse cosine, element-wise. - - The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``. - - Parameters - ---------- - x : array_like - `x`-coordinate on the unit circle. - For real arguments, the domain is [-1, 1]. - - out : ndarray, optional - Array of the same shape as `a`, to store results in. See - `doc.ufuncs` (Section "Output arguments") for more details. - - Returns - ------- - angle : ndarray - The angle of the ray intersecting the unit circle at the given - `x`-coordinate in radians [0, pi]. If `x` is a scalar then a - scalar is returned, otherwise an array of the same shape as `x` - is returned. - - See Also - -------- - cos, arctan, arcsin, emath.arccos - - Notes - ----- - `arccos` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `cos(z) = x`. The convention is to return - the angle `z` whose real part lies in `[0, pi]`. - - For real-valued input data types, `arccos` always returns real output. - For each value that cannot be expressed as a real number or infinity, - it yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arccos` is a complex analytic function that - has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from - above on the former and from below on the latter. - - The inverse `cos` is also known as `acos` or cos^-1. - - References - ---------- - M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ - - Examples - -------- - We expect the arccos of 1 to be 0, and of -1 to be pi: - - >>> np.arccos([1, -1]) - array([ 0. , 3.14159265]) - - Plot arccos: - - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(-1, 1, num=100) - >>> plt.plot(x, np.arccos(x)) - >>> plt.axis('tight') - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'arccosh', - """ - Inverse hyperbolic cosine, elementwise. - - Parameters - ---------- - x : array_like - Input array. - out : ndarray, optional - Array of the same shape as `x`, to store results in. - See `doc.ufuncs` (Section "Output arguments") for details. - - - Returns - ------- - y : ndarray - Array of the same shape as `x`. - - See Also - -------- - - cosh, arcsinh, sinh, arctanh, tanh - - Notes - ----- - `arccosh` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `cosh(z) = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi, pi]` and the real part in - ``[0, inf]``. - - For real-valued input data types, `arccosh` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arccosh` is a complex analytical function that - has a branch cut `[-inf, 1]` and is continuous from above on it. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse hyperbolic function", - http://en.wikipedia.org/wiki/Arccosh - - Examples - -------- - >>> np.arccosh([np.e, 10.0]) - array([ 1.65745445, 2.99322285]) - >>> np.arccosh(1) - 0.0 - - """) - -add_newdoc('numpy.core.umath', 'arcsin', - """ - Inverse sine, element-wise. - - Parameters - ---------- - x : array_like - `y`-coordinate on the unit circle. - - out : ndarray, optional - Array of the same shape as `x`, in which to store the results. - See `doc.ufuncs` (Section "Output arguments") for more details. - - Returns - ------- - angle : ndarray - The inverse sine of each element in `x`, in radians and in the - closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar - is returned, otherwise an array. - - See Also - -------- - sin, cos, arccos, tan, arctan, arctan2, emath.arcsin - - Notes - ----- - `arcsin` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that :math:`sin(z) = x`. The convention is to - return the angle `z` whose real part lies in [-pi/2, pi/2]. - - For real-valued input data types, *arcsin* always returns real output. - For each value that cannot be expressed as a real number or infinity, - it yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arcsin` is a complex analytic function that - has, by convention, the branch cuts [-inf, -1] and [1, inf] and is - continuous from above on the former and from below on the latter. - - The inverse sine is also known as `asin` or sin^{-1}. - - References - ---------- - Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, - 10th printing, New York: Dover, 1964, pp. 79ff. - http://www.math.sfu.ca/~cbm/aands/ - - Examples - -------- - >>> np.arcsin(1) # pi/2 - 1.5707963267948966 - >>> np.arcsin(-1) # -pi/2 - -1.5707963267948966 - >>> np.arcsin(0) - 0.0 - - """) - -add_newdoc('numpy.core.umath', 'arcsinh', - """ - Inverse hyperbolic sine elementwise. - - Parameters - ---------- - x : array_like - Input array. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See `doc.ufuncs`. - - Returns - ------- - out : ndarray - Array of of the same shape as `x`. - - Notes - ----- - `arcsinh` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `sinh(z) = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi/2, pi/2]`. - - For real-valued input data types, `arcsinh` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - returns ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arccos` is a complex analytical function that - has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from - the right on the former and from the left on the latter. - - The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse hyperbolic function", - http://en.wikipedia.org/wiki/Arcsinh - - Examples - -------- - >>> np.arcsinh(np.array([np.e, 10.0])) - array([ 1.72538256, 2.99822295]) - - """) - -add_newdoc('numpy.core.umath', 'arctan', - """ - Trigonometric inverse tangent, element-wise. - - The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``. - - Parameters - ---------- - x : array_like - Input values. `arctan` is applied to each element of `x`. - - Returns - ------- - out : ndarray - Out has the same shape as `x`. Its real part is in - ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``). - It is a scalar if `x` is a scalar. - - See Also - -------- - arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`) - and the positive `x`-axis. - angle : Argument of complex values. - - Notes - ----- - `arctan` is a multi-valued function: for each `x` there are infinitely - many numbers `z` such that tan(`z`) = `x`. The convention is to return - the angle `z` whose real part lies in [-pi/2, pi/2]. - - For real-valued input data types, `arctan` always returns real output. - For each value that cannot be expressed as a real number or infinity, - it yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arctan` is a complex analytic function that - has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous - from the left on the former and from the right on the latter. - - The inverse tangent is also known as `atan` or tan^{-1}. - - References - ---------- - Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, - 10th printing, New York: Dover, 1964, pp. 79. - http://www.math.sfu.ca/~cbm/aands/ - - Examples - -------- - We expect the arctan of 0 to be 0, and of 1 to be pi/4: - - >>> np.arctan([0, 1]) - array([ 0. , 0.78539816]) - - >>> np.pi/4 - 0.78539816339744828 - - Plot arctan: - - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(-10, 10) - >>> plt.plot(x, np.arctan(x)) - >>> plt.axis('tight') - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'arctan2', - """ - Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. - - The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is - the signed angle in radians between the ray ending at the origin and - passing through the point (1,0), and the ray ending at the origin and - passing through the point (`x2`, `x1`). (Note the role reversal: the - "`y`-coordinate" is the first function parameter, the "`x`-coordinate" - is the second.) By IEEE convention, this function is defined for - `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see - Notes for specific values). - - This function is not defined for complex-valued arguments; for the - so-called argument of complex values, use `angle`. - - Parameters - ---------- - x1 : array_like, real-valued - `y`-coordinates. - x2 : array_like, real-valued - `x`-coordinates. `x2` must be broadcastable to match the shape of - `x1` or vice versa. - - Returns - ------- - angle : ndarray - Array of angles in radians, in the range ``[-pi, pi]``. - - See Also - -------- - arctan, tan, angle - - Notes - ----- - *arctan2* is identical to the `atan2` function of the underlying - C library. The following special values are defined in the C - standard: [1]_ - - ====== ====== ================ - `x1` `x2` `arctan2(x1,x2)` - ====== ====== ================ - +/- 0 +0 +/- 0 - +/- 0 -0 +/- pi - > 0 +/-inf +0 / +pi - < 0 +/-inf -0 / -pi - +/-inf +inf +/- (pi/4) - +/-inf -inf +/- (3*pi/4) - ====== ====== ================ - - Note that +0 and -0 are distinct floating point numbers, as are +inf - and -inf. - - References - ---------- - .. [1] ISO/IEC standard 9899:1999, "Programming language C." - - Examples - -------- - Consider four points in different quadrants: - - >>> x = np.array([-1, +1, +1, -1]) - >>> y = np.array([-1, -1, +1, +1]) - >>> np.arctan2(y, x) * 180 / np.pi - array([-135., -45., 45., 135.]) - - Note the order of the parameters. `arctan2` is defined also when `x2` = 0 - and at several other special points, obtaining values in - the range ``[-pi, pi]``: - - >>> np.arctan2([1., -1.], [0., 0.]) - array([ 1.57079633, -1.57079633]) - >>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf]) - array([ 0. , 3.14159265, 0.78539816]) - - """) - -add_newdoc('numpy.core.umath', '_arg', - """ - DO NOT USE, ONLY FOR TESTING - """) - -add_newdoc('numpy.core.umath', 'arctanh', - """ - Inverse hyperbolic tangent elementwise. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray - Array of the same shape as `x`. - - See Also - -------- - emath.arctanh - - Notes - ----- - `arctanh` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `tanh(z) = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi/2, pi/2]`. - - For real-valued input data types, `arctanh` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arctanh` is a complex analytical function that - has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from - above on the former and from below on the latter. - - The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse hyperbolic function", - http://en.wikipedia.org/wiki/Arctanh - - Examples - -------- - >>> np.arctanh([0, -0.5]) - array([ 0. , -0.54930614]) - - """) - -add_newdoc('numpy.core.umath', 'bitwise_and', - """ - Compute the bit-wise AND of two arrays element-wise. - - Computes the bit-wise AND of the underlying binary representation of - the integers in the input arrays. This ufunc implements the C/Python - operator ``&``. - - Parameters - ---------- - x1, x2 : array_like - Only integer types are handled (including booleans). - - Returns - ------- - out : array_like - Result. - - See Also - -------- - logical_and - bitwise_or - bitwise_xor - binary_repr : - Return the binary representation of the input number as a string. - - Examples - -------- - The number 13 is represented by ``00001101``. Likewise, 17 is - represented by ``00010001``. The bit-wise AND of 13 and 17 is - therefore ``000000001``, or 1: - - >>> np.bitwise_and(13, 17) - 1 - - >>> np.bitwise_and(14, 13) - 12 - >>> np.binary_repr(12) - '1100' - >>> np.bitwise_and([14,3], 13) - array([12, 1]) - - >>> np.bitwise_and([11,7], [4,25]) - array([0, 1]) - >>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16])) - array([ 2, 4, 16]) - >>> np.bitwise_and([True, True], [False, True]) - array([False, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'bitwise_or', - """ - Compute the bit-wise OR of two arrays element-wise. - - Computes the bit-wise OR of the underlying binary representation of - the integers in the input arrays. This ufunc implements the C/Python - operator ``|``. - - Parameters - ---------- - x1, x2 : array_like - Only integer types are handled (including booleans). - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See doc.ufuncs. - - Returns - ------- - out : array_like - Result. - - See Also - -------- - logical_or - bitwise_and - bitwise_xor - binary_repr : - Return the binary representation of the input number as a string. - - Examples - -------- - The number 13 has the binaray representation ``00001101``. Likewise, - 16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is - then ``000111011``, or 29: - - >>> np.bitwise_or(13, 16) - 29 - >>> np.binary_repr(29) - '11101' - - >>> np.bitwise_or(32, 2) - 34 - >>> np.bitwise_or([33, 4], 1) - array([33, 5]) - >>> np.bitwise_or([33, 4], [1, 2]) - array([33, 6]) - - >>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4])) - array([ 6, 5, 255]) - >>> np.array([2, 5, 255]) | np.array([4, 4, 4]) - array([ 6, 5, 255]) - >>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32), - ... np.array([4, 4, 4, 2147483647L], dtype=np.int32)) - array([ 6, 5, 255, 2147483647]) - >>> np.bitwise_or([True, True], [False, True]) - array([ True, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'bitwise_xor', - """ - Compute the bit-wise XOR of two arrays element-wise. - - Computes the bit-wise XOR of the underlying binary representation of - the integers in the input arrays. This ufunc implements the C/Python - operator ``^``. - - Parameters - ---------- - x1, x2 : array_like - Only integer types are handled (including booleans). - - Returns - ------- - out : array_like - Result. - - See Also - -------- - logical_xor - bitwise_and - bitwise_or - binary_repr : - Return the binary representation of the input number as a string. - - Examples - -------- - The number 13 is represented by ``00001101``. Likewise, 17 is - represented by ``00010001``. The bit-wise XOR of 13 and 17 is - therefore ``00011100``, or 28: - - >>> np.bitwise_xor(13, 17) - 28 - >>> np.binary_repr(28) - '11100' - - >>> np.bitwise_xor(31, 5) - 26 - >>> np.bitwise_xor([31,3], 5) - array([26, 6]) - - >>> np.bitwise_xor([31,3], [5,6]) - array([26, 5]) - >>> np.bitwise_xor([True, True], [False, True]) - array([ True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'ceil', - """ - Return the ceiling of the input, element-wise. - - The ceil of the scalar `x` is the smallest integer `i`, such that - `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - y : {ndarray, scalar} - The ceiling of each element in `x`, with `float` dtype. - - See Also - -------- - floor, trunc, rint - - Examples - -------- - >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) - >>> np.ceil(a) - array([-1., -1., -0., 1., 2., 2., 2.]) - - """) - -add_newdoc('numpy.core.umath', 'trunc', - """ - Return the truncated value of the input, element-wise. - - The truncated value of the scalar `x` is the nearest integer `i` which - is closer to zero than `x` is. In short, the fractional part of the - signed number `x` is discarded. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - y : {ndarray, scalar} - The truncated value of each element in `x`. - - See Also - -------- - ceil, floor, rint - - Notes - ----- - .. versionadded:: 1.3.0 - - Examples - -------- - >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) - >>> np.trunc(a) - array([-1., -1., -0., 0., 1., 1., 2.]) - - """) - -add_newdoc('numpy.core.umath', 'conjugate', - """ - Return the complex conjugate, element-wise. - - The complex conjugate of a complex number is obtained by changing the - sign of its imaginary part. - - Parameters - ---------- - x : array_like - Input value. - - Returns - ------- - y : ndarray - The complex conjugate of `x`, with same dtype as `y`. - - Examples - -------- - >>> np.conjugate(1+2j) - (1-2j) - - >>> x = np.eye(2) + 1j * np.eye(2) - >>> np.conjugate(x) - array([[ 1.-1.j, 0.-0.j], - [ 0.-0.j, 1.-1.j]]) - - """) - -add_newdoc('numpy.core.umath', 'cos', - """ - Cosine elementwise. - - Parameters - ---------- - x : array_like - Input array in radians. - out : ndarray, optional - Output array of same shape as `x`. - - Returns - ------- - y : ndarray - The corresponding cosine values. - - Raises - ------ - ValueError: invalid return array shape - if `out` is provided and `out.shape` != `x.shape` (See Examples) - - Notes - ----- - If `out` is provided, the function writes the result into it, - and returns a reference to `out`. (See Examples) - - References - ---------- - M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. - New York, NY: Dover, 1972. - - Examples - -------- - >>> np.cos(np.array([0, np.pi/2, np.pi])) - array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) - >>> - >>> # Example of providing the optional output parameter - >>> out2 = np.cos([0.1], out1) - >>> out2 is out1 - True - >>> - >>> # Example of ValueError due to provision of shape mis-matched `out` - >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) - Traceback (most recent call last): - File "", line 1, in - ValueError: invalid return array shape - - """) - -add_newdoc('numpy.core.umath', 'cosh', - """ - Hyperbolic cosine, element-wise. - - Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray - Output array of same shape as `x`. - - Examples - -------- - >>> np.cosh(0) - 1.0 - - The hyperbolic cosine describes the shape of a hanging cable: - - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(-4, 4, 1000) - >>> plt.plot(x, np.cosh(x)) - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'degrees', - """ - Convert angles from radians to degrees. - - Parameters - ---------- - x : array_like - Input array in radians. - out : ndarray, optional - Output array of same shape as x. - - Returns - ------- - y : ndarray of floats - The corresponding degree values; if `out` was supplied this is a - reference to it. - - See Also - -------- - rad2deg : equivalent function - - Examples - -------- - Convert a radian array to degrees - - >>> rad = np.arange(12.)*np.pi/6 - >>> np.degrees(rad) - array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., - 270., 300., 330.]) - - >>> out = np.zeros((rad.shape)) - >>> r = degrees(rad, out) - >>> np.all(r == out) - True - - """) - -add_newdoc('numpy.core.umath', 'rad2deg', - """ - Convert angles from radians to degrees. - - Parameters - ---------- - x : array_like - Angle in radians. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See doc.ufuncs. - - Returns - ------- - y : ndarray - The corresponding angle in degrees. - - See Also - -------- - deg2rad : Convert angles from degrees to radians. - unwrap : Remove large jumps in angle by wrapping. - - Notes - ----- - .. versionadded:: 1.3.0 - - rad2deg(x) is ``180 * x / pi``. - - Examples - -------- - >>> np.rad2deg(np.pi/2) - 90.0 - - """) - -add_newdoc('numpy.core.umath', 'divide', - """ - Divide arguments element-wise. - - Parameters - ---------- - x1 : array_like - Dividend array. - x2 : array_like - Divisor array. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See doc.ufuncs. - - Returns - ------- - y : {ndarray, scalar} - The quotient `x1/x2`, element-wise. Returns a scalar if - both `x1` and `x2` are scalars. - - See Also - -------- - seterr : Set whether to raise or warn on overflow, underflow and division - by zero. - - Notes - ----- - Equivalent to `x1` / `x2` in terms of array-broadcasting. - - Behavior on division by zero can be changed using `seterr`. - - When both `x1` and `x2` are of an integer type, `divide` will return - integers and throw away the fractional part. Moreover, division by zero - always yields zero in integer arithmetic. - - Examples - -------- - >>> np.divide(2.0, 4.0) - 0.5 - >>> x1 = np.arange(9.0).reshape((3, 3)) - >>> x2 = np.arange(3.0) - >>> np.divide(x1, x2) - array([[ NaN, 1. , 1. ], - [ Inf, 4. , 2.5], - [ Inf, 7. , 4. ]]) - - Note the behavior with integer types: - - >>> np.divide(2, 4) - 0 - >>> np.divide(2, 4.) - 0.5 - - Division by zero always yields zero in integer arithmetic, and does not - raise an exception or a warning: - - >>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int)) - array([0, 0]) - - Division by zero can, however, be caught using `seterr`: - - >>> old_err_state = np.seterr(divide='raise') - >>> np.divide(1, 0) - Traceback (most recent call last): - File "", line 1, in - FloatingPointError: divide by zero encountered in divide - - >>> ignored_states = np.seterr(**old_err_state) - >>> np.divide(1, 0) - 0 - - """) - -add_newdoc('numpy.core.umath', 'equal', - """ - Return (x1 == x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - not_equal, greater_equal, less_equal, greater, less - - Examples - -------- - >>> np.equal([0, 1, 3], np.arange(3)) - array([ True, True, False], dtype=bool) - - What is compared are values, not types. So an int (1) and an array of - length one can evaluate as True: - - >>> np.equal(1, np.ones(1)) - array([ True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'exp', - """ - Calculate the exponential of all elements in the input array. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - out : ndarray - Output array, element-wise exponential of `x`. - - See Also - -------- - expm1 : Calculate ``exp(x) - 1`` for all elements in the array. - exp2 : Calculate ``2**x`` for all elements in the array. - - Notes - ----- - The irrational number ``e`` is also known as Euler's number. It is - approximately 2.718281, and is the base of the natural logarithm, - ``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`, - then :math:`e^x = y`. For real input, ``exp(x)`` is always positive. - - For complex arguments, ``x = a + ib``, we can write - :math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already - known (it is the real argument, described above). The second term, - :math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude - 1 and a periodic phase. - - References - ---------- - .. [1] Wikipedia, "Exponential function", - http://en.wikipedia.org/wiki/Exponential_function - .. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions - with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69, - http://www.math.sfu.ca/~cbm/aands/page_69.htm - - Examples - -------- - Plot the magnitude and phase of ``exp(x)`` in the complex plane: - - >>> import matplotlib.pyplot as plt - - >>> x = np.linspace(-2*np.pi, 2*np.pi, 100) - >>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane - >>> out = np.exp(xx) - - >>> plt.subplot(121) - >>> plt.imshow(np.abs(out), - ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi]) - >>> plt.title('Magnitude of exp(x)') - - >>> plt.subplot(122) - >>> plt.imshow(np.angle(out), - ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi]) - >>> plt.title('Phase (angle) of exp(x)') - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'exp2', - """ - Calculate `2**p` for all `p` in the input array. - - Parameters - ---------- - x : array_like - Input values. - - out : ndarray, optional - Array to insert results into. - - Returns - ------- - out : ndarray - Element-wise 2 to the power `x`. - - See Also - -------- - exp : calculate x**p. - - Notes - ----- - .. versionadded:: 1.3.0 - - - - Examples - -------- - >>> np.exp2([2, 3]) - array([ 4., 8.]) - - """) - -add_newdoc('numpy.core.umath', 'expm1', - """ - Calculate ``exp(x) - 1`` for all elements in the array. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - out : ndarray - Element-wise exponential minus one: ``out = exp(x) - 1``. - - See Also - -------- - log1p : ``log(1 + x)``, the inverse of expm1. - - - Notes - ----- - This function provides greater precision than the formula ``exp(x) - 1`` - for small values of ``x``. - - Examples - -------- - The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to - about 32 significant digits. This example shows the superiority of - expm1 in this case. - - >>> np.expm1(1e-10) - 1.00000000005e-10 - >>> np.exp(1e-10) - 1 - 1.000000082740371e-10 - - """) - -add_newdoc('numpy.core.umath', 'fabs', - """ - Compute the absolute values elementwise. - - This function returns the absolute values (positive magnitude) of the data - in `x`. Complex values are not handled, use `absolute` to find the - absolute values of complex data. - - Parameters - ---------- - x : array_like - The array of numbers for which the absolute values are required. If - `x` is a scalar, the result `y` will also be a scalar. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See doc.ufuncs. - - Returns - ------- - y : {ndarray, scalar} - The absolute values of `x`, the returned values are always floats. - - See Also - -------- - absolute : Absolute values including `complex` types. - - Examples - -------- - >>> np.fabs(-1) - 1.0 - >>> np.fabs([-1.2, 1.2]) - array([ 1.2, 1.2]) - - """) - -add_newdoc('numpy.core.umath', 'floor', - """ - Return the floor of the input, element-wise. - - The floor of the scalar `x` is the largest integer `i`, such that - `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - y : {ndarray, scalar} - The floor of each element in `x`. - - See Also - -------- - ceil, trunc, rint - - Notes - ----- - Some spreadsheet programs calculate the "floor-towards-zero", in other - words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of - `floor` such that `floor(-2.5) == -3`. - - Examples - -------- - >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) - >>> np.floor(a) - array([-2., -2., -1., 0., 1., 1., 2.]) - - """) - -add_newdoc('numpy.core.umath', 'floor_divide', - """ - Return the largest integer smaller or equal to the division of the inputs. - - Parameters - ---------- - x1 : array_like - Numerator. - x2 : array_like - Denominator. - - Returns - ------- - y : ndarray - y = floor(`x1`/`x2`) - - - See Also - -------- - divide : Standard division. - floor : Round a number to the nearest integer toward minus infinity. - ceil : Round a number to the nearest integer toward infinity. - - Examples - -------- - >>> np.floor_divide(7,3) - 2 - >>> np.floor_divide([1., 2., 3., 4.], 2.5) - array([ 0., 0., 1., 1.]) - - """) - -add_newdoc('numpy.core.umath', 'fmod', - """ - Return the element-wise remainder of division. - - This is the NumPy implementation of the Python modulo operator `%`. - - Parameters - ---------- - x1 : array_like - Dividend. - x2 : array_like - Divisor. - - Returns - ------- - y : array_like - The remainder of the division of `x1` by `x2`. - - See Also - -------- - remainder : Modulo operation where the quotient is `floor(x1/x2)`. - divide - - Notes - ----- - The result of the modulo operation for negative dividend and divisors is - bound by conventions. In `fmod`, the sign of the remainder is the sign of - the dividend. In `remainder`, the sign of the divisor does not affect the - sign of the result. - - Examples - -------- - >>> np.fmod([-3, -2, -1, 1, 2, 3], 2) - array([-1, 0, -1, 1, 0, 1]) - >>> np.remainder([-3, -2, -1, 1, 2, 3], 2) - array([1, 0, 1, 1, 0, 1]) - - >>> np.fmod([5, 3], [2, 2.]) - array([ 1., 1.]) - >>> a = np.arange(-3, 3).reshape(3, 2) - >>> a - array([[-3, -2], - [-1, 0], - [ 1, 2]]) - >>> np.fmod(a, [2,2]) - array([[-1, 0], - [-1, 0], - [ 1, 0]]) - - """) - -add_newdoc('numpy.core.umath', 'greater', - """ - Return the truth value of (x1 > x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. If ``x1.shape != x2.shape``, they must be - broadcastable to a common shape (which may be the shape of one or - the other). - - Returns - ------- - out : bool or ndarray of bool - Array of bools, or a single bool if `x1` and `x2` are scalars. - - - See Also - -------- - greater_equal, less, less_equal, equal, not_equal - - Examples - -------- - >>> np.greater([4,2],[2,2]) - array([ True, False], dtype=bool) - - If the inputs are ndarrays, then np.greater is equivalent to '>'. - - >>> a = np.array([4,2]) - >>> b = np.array([2,2]) - >>> a > b - array([ True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'greater_equal', - """ - Return the truth value of (x1 >= x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. If ``x1.shape != x2.shape``, they must be - broadcastable to a common shape (which may be the shape of one or - the other). - - Returns - ------- - out : bool or ndarray of bool - Array of bools, or a single bool if `x1` and `x2` are scalars. - - See Also - -------- - greater, less, less_equal, equal, not_equal - - Examples - -------- - >>> np.greater_equal([4, 2, 1], [2, 2, 2]) - array([ True, True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'hypot', - """ - Given the "legs" of a right triangle, return its hypotenuse. - - Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or - `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type), - it is broadcast for use with each element of the other argument. - (See Examples) - - Parameters - ---------- - x1, x2 : array_like - Leg of the triangle(s). - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See doc.ufuncs. - - Returns - ------- - z : ndarray - The hypotenuse of the triangle(s). - - Examples - -------- - >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3))) - array([[ 5., 5., 5.], - [ 5., 5., 5.], - [ 5., 5., 5.]]) - - Example showing broadcast of scalar_like argument: - - >>> np.hypot(3*np.ones((3, 3)), [4]) - array([[ 5., 5., 5.], - [ 5., 5., 5.], - [ 5., 5., 5.]]) - - """) - -add_newdoc('numpy.core.umath', 'invert', - """ - Compute bit-wise inversion, or bit-wise NOT, element-wise. - - Computes the bit-wise NOT of the underlying binary representation of - the integers in the input arrays. This ufunc implements the C/Python - operator ``~``. - - For signed integer inputs, the two's complement is returned. - In a two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement - system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. - - Parameters - ---------- - x1 : array_like - Only integer types are handled (including booleans). - - Returns - ------- - out : array_like - Result. - - See Also - -------- - bitwise_and, bitwise_or, bitwise_xor - logical_not - binary_repr : - Return the binary representation of the input number as a string. - - Notes - ----- - `bitwise_not` is an alias for `invert`: - - >>> np.bitwise_not is np.invert - True - - References - ---------- - .. [1] Wikipedia, "Two's complement", - http://en.wikipedia.org/wiki/Two's_complement - - Examples - -------- - We've seen that 13 is represented by ``00001101``. - The invert or bit-wise NOT of 13 is then: - - >>> np.invert(np.array([13], dtype=uint8)) - array([242], dtype=uint8) - >>> np.binary_repr(x, width=8) - '00001101' - >>> np.binary_repr(242, width=8) - '11110010' - - The result depends on the bit-width: - - >>> np.invert(np.array([13], dtype=uint16)) - array([65522], dtype=uint16) - >>> np.binary_repr(x, width=16) - '0000000000001101' - >>> np.binary_repr(65522, width=16) - '1111111111110010' - - When using signed integer types the result is the two's complement of - the result for the unsigned type: - - >>> np.invert(np.array([13], dtype=int8)) - array([-14], dtype=int8) - >>> np.binary_repr(-14, width=8) - '11110010' - - Booleans are accepted as well: - - >>> np.invert(array([True, False])) - array([False, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'isfinite', - """ - Test element-wise for finite-ness (not infinity or not Not a Number). - - The result is returned as a boolean array. - - Parameters - ---------- - x : array_like - Input values. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See `doc.ufuncs`. - - Returns - ------- - y : ndarray, bool - For scalar input, the result is a new boolean with value True - if the input is finite; otherwise the value is False (input is - either positive infinity, negative infinity or Not a Number). - - For array input, the result is a boolean array with the same - dimensions as the input and the values are True if the corresponding - element of the input is finite; otherwise the values are False (element - is either positive infinity, negative infinity or Not a Number). - - See Also - -------- - isinf, isneginf, isposinf, isnan - - Notes - ----- - Not a Number, positive infinity and negative infinity are considered - to be non-finite. - - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - Errors result if the second argument is also supplied when `x` is a scalar - input, or if first and second arguments have different shapes. - - Examples - -------- - >>> np.isfinite(1) - True - >>> np.isfinite(0) - True - >>> np.isfinite(np.nan) - False - >>> np.isfinite(np.inf) - False - >>> np.isfinite(np.NINF) - False - >>> np.isfinite([np.log(-1.),1.,np.log(0)]) - array([False, True, False], dtype=bool) - - >>> x = np.array([-np.inf, 0., np.inf]) - >>> y = np.array([2, 2, 2]) - >>> np.isfinite(x, y) - array([0, 1, 0]) - >>> y - array([0, 1, 0]) - - """) - -add_newdoc('numpy.core.umath', 'isinf', - """ - Test element-wise for positive or negative infinity. - - Return a bool-type array, the same shape as `x`, True where ``x == - +/-inf``, False everywhere else. - - Parameters - ---------- - x : array_like - Input values - out : array_like, optional - An array with the same shape as `x` to store the result. - - Returns - ------- - y : bool (scalar) or bool-type ndarray - For scalar input, the result is a new boolean with value True - if the input is positive or negative infinity; otherwise the value - is False. - - For array input, the result is a boolean array with the same - shape as the input and the values are True where the - corresponding element of the input is positive or negative - infinity; elsewhere the values are False. If a second argument - was supplied the result is stored there. If the type of that array - is a numeric type the result is represented as zeros and ones, if - the type is boolean then as False and True, respectively. - The return value `y` is then a reference to that array. - - See Also - -------- - isneginf, isposinf, isnan, isfinite - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). - - Errors result if the second argument is supplied when the first - argument is a scalar, or if the first and second arguments have - different shapes. - - Examples - -------- - >>> np.isinf(np.inf) - True - >>> np.isinf(np.nan) - False - >>> np.isinf(np.NINF) - True - >>> np.isinf([np.inf, -np.inf, 1.0, np.nan]) - array([ True, True, False, False], dtype=bool) - - >>> x = np.array([-np.inf, 0., np.inf]) - >>> y = np.array([2, 2, 2]) - >>> np.isinf(x, y) - array([1, 0, 1]) - >>> y - array([1, 0, 1]) - - """) - -add_newdoc('numpy.core.umath', 'isnan', - """ - Test element-wise for Not a Number (NaN), return result as a bool array. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - y : {ndarray, bool} - For scalar input, the result is a new boolean with value True - if the input is NaN; otherwise the value is False. - - For array input, the result is a boolean array with the same - dimensions as the input and the values are True if the corresponding - element of the input is NaN; otherwise the values are False. - - See Also - -------- - isinf, isneginf, isposinf, isfinite - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - Examples - -------- - >>> np.isnan(np.nan) - True - >>> np.isnan(np.inf) - False - >>> np.isnan([np.log(-1.),1.,np.log(0)]) - array([ True, False, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'left_shift', - """ - Shift the bits of an integer to the left. - - Bits are shifted to the left by appending `x2` 0s at the right of `x1`. - Since the internal representation of numbers is in binary format, this - operation is equivalent to multiplying `x1` by ``2**x2``. - - Parameters - ---------- - x1 : array_like of integer type - Input values. - x2 : array_like of integer type - Number of zeros to append to `x1`. Has to be non-negative. - - Returns - ------- - out : array of integer type - Return `x1` with bits shifted `x2` times to the left. - - See Also - -------- - right_shift : Shift the bits of an integer to the right. - binary_repr : Return the binary representation of the input number - as a string. - - Examples - -------- - >>> np.binary_repr(5) - '101' - >>> np.left_shift(5, 2) - 20 - >>> np.binary_repr(20) - '10100' - - >>> np.left_shift(5, [1,2,3]) - array([10, 20, 40]) - - """) - -add_newdoc('numpy.core.umath', 'less', - """ - Return the truth value of (x1 < x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. If ``x1.shape != x2.shape``, they must be - broadcastable to a common shape (which may be the shape of one or - the other). - - Returns - ------- - out : bool or ndarray of bool - Array of bools, or a single bool if `x1` and `x2` are scalars. - - See Also - -------- - greater, less_equal, greater_equal, equal, not_equal - - Examples - -------- - >>> np.less([1, 2], [2, 2]) - array([ True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'less_equal', - """ - Return the truth value of (x1 =< x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. If ``x1.shape != x2.shape``, they must be - broadcastable to a common shape (which may be the shape of one or - the other). - - Returns - ------- - out : bool or ndarray of bool - Array of bools, or a single bool if `x1` and `x2` are scalars. - - See Also - -------- - greater, less, greater_equal, equal, not_equal - - Examples - -------- - >>> np.less_equal([4, 2, 1], [2, 2, 2]) - array([False, True, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'log', - """ - Natural logarithm, element-wise. - - The natural logarithm `log` is the inverse of the exponential function, - so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`. - - Parameters - ---------- - x : array_like - Input value. - - Returns - ------- - y : ndarray - The natural logarithm of `x`, element-wise. - - See Also - -------- - log10, log2, log1p, emath.log - - Notes - ----- - Logarithm is a multivalued function: for each `x` there is an infinite - number of `z` such that `exp(z) = x`. The convention is to return the `z` - whose imaginary part lies in `[-pi, pi]`. - - For real-valued input data types, `log` always returns real output. For - each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `log` is a complex analytical function that - has a branch cut `[-inf, 0]` and is continuous from above on it. `log` - handles the floating-point negative zero as an infinitesimal negative - number, conforming to the C99 standard. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm - - Examples - -------- - >>> np.log([1, np.e, np.e**2, 0]) - array([ 0., 1., 2., -Inf]) - - """) - -add_newdoc('numpy.core.umath', 'log10', - """ - Return the base 10 logarithm of the input array, element-wise. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - y : ndarray - The logarithm to the base 10 of `x`, element-wise. NaNs are - returned where x is negative. - - See Also - -------- - emath.log10 - - Notes - ----- - Logarithm is a multivalued function: for each `x` there is an infinite - number of `z` such that `10**z = x`. The convention is to return the `z` - whose imaginary part lies in `[-pi, pi]`. - - For real-valued input data types, `log10` always returns real output. For - each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `log10` is a complex analytical function that - has a branch cut `[-inf, 0]` and is continuous from above on it. `log10` - handles the floating-point negative zero as an infinitesimal negative - number, conforming to the C99 standard. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm - - Examples - -------- - >>> np.log10([1e-15, -3.]) - array([-15., NaN]) - - """) - -add_newdoc('numpy.core.umath', 'log2', - """ - Base-2 logarithm of `x`. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - y : ndarray - Base-2 logarithm of `x`. - - See Also - -------- - log, log10, log1p, emath.log2 - - Notes - ----- - .. versionadded:: 1.3.0 - - Logarithm is a multivalued function: for each `x` there is an infinite - number of `z` such that `2**z = x`. The convention is to return the `z` - whose imaginary part lies in `[-pi, pi]`. - - For real-valued input data types, `log2` always returns real output. For - each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `log2` is a complex analytical function that - has a branch cut `[-inf, 0]` and is continuous from above on it. `log2` - handles the floating-point negative zero as an infinitesimal negative - number, conforming to the C99 standard. - - Examples - -------- - >>> x = np.array([0, 1, 2, 2**4]) - >>> np.log2(x) - array([-Inf, 0., 1., 4.]) - - >>> xi = np.array([0+1.j, 1, 2+0.j, 4.j]) - >>> np.log2(xi) - array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j]) - - """) - -add_newdoc('numpy.core.umath', 'logaddexp', - """ - Logarithm of the sum of exponentiations of the inputs. - - Calculates ``log(exp(x1) + exp(x2))``. This function is useful in - statistics where the calculated probabilities of events may be so small - as to exceed the range of normal floating point numbers. In such cases - the logarithm of the calculated probability is stored. This function - allows adding probabilities stored in such a fashion. - - Parameters - ---------- - x1, x2 : array_like - Input values. - - Returns - ------- - result : ndarray - Logarithm of ``exp(x1) + exp(x2)``. - - See Also - -------- - logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2. - - Notes - ----- - .. versionadded:: 1.3.0 - - Examples - -------- - >>> prob1 = np.log(1e-50) - >>> prob2 = np.log(2.5e-50) - >>> prob12 = np.logaddexp(prob1, prob2) - >>> prob12 - -113.87649168120691 - >>> np.exp(prob12) - 3.5000000000000057e-50 - - """) - -add_newdoc('numpy.core.umath', 'logaddexp2', - """ - Logarithm of the sum of exponentiations of the inputs in base-2. - - Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine - learning when the calculated probabilities of events may be so small - as to exceed the range of normal floating point numbers. In such cases - the base-2 logarithm of the calculated probability can be used instead. - This function allows adding probabilities stored in such a fashion. - - Parameters - ---------- - x1, x2 : array_like - Input values. - out : ndarray, optional - Array to store results in. - - Returns - ------- - result : ndarray - Base-2 logarithm of ``2**x1 + 2**x2``. - - See Also - -------- - logaddexp: Logarithm of the sum of exponentiations of the inputs. - - Notes - ----- - .. versionadded:: 1.3.0 - - Examples - -------- - >>> prob1 = np.log2(1e-50) - >>> prob2 = np.log2(2.5e-50) - >>> prob12 = np.logaddexp2(prob1, prob2) - >>> prob1, prob2, prob12 - (-166.09640474436813, -164.77447664948076, -164.28904982231052) - >>> 2**prob12 - 3.4999999999999914e-50 - - """) - -add_newdoc('numpy.core.umath', 'log1p', - """ - Return the natural logarithm of one plus the input array, element-wise. - - Calculates ``log(1 + x)``. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - y : ndarray - Natural logarithm of `1 + x`, element-wise. - - See Also - -------- - expm1 : ``exp(x) - 1``, the inverse of `log1p`. - - Notes - ----- - For real-valued input, `log1p` is accurate also for `x` so small - that `1 + x == 1` in floating-point accuracy. - - Logarithm is a multivalued function: for each `x` there is an infinite - number of `z` such that `exp(z) = 1 + x`. The convention is to return - the `z` whose imaginary part lies in `[-pi, pi]`. - - For real-valued input data types, `log1p` always returns real output. For - each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `log1p` is a complex analytical function that - has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p` - handles the floating-point negative zero as an infinitesimal negative - number, conforming to the C99 standard. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm - - Examples - -------- - >>> np.log1p(1e-99) - 1e-99 - >>> np.log(1 + 1e-99) - 0.0 - - """) - -add_newdoc('numpy.core.umath', 'logical_and', - """ - Compute the truth value of x1 AND x2 elementwise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. `x1` and `x2` must be of the same shape. - - - Returns - ------- - y : {ndarray, bool} - Boolean result with the same shape as `x1` and `x2` of the logical - AND operation on corresponding elements of `x1` and `x2`. - - See Also - -------- - logical_or, logical_not, logical_xor - bitwise_and - - Examples - -------- - >>> np.logical_and(True, False) - False - >>> np.logical_and([True, False], [False, False]) - array([False, False], dtype=bool) - - >>> x = np.arange(5) - >>> np.logical_and(x>1, x<4) - array([False, False, True, True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'logical_not', - """ - Compute the truth value of NOT x elementwise. - - Parameters - ---------- - x : array_like - Logical NOT is applied to the elements of `x`. - - Returns - ------- - y : bool or ndarray of bool - Boolean result with the same shape as `x` of the NOT operation - on elements of `x`. - - See Also - -------- - logical_and, logical_or, logical_xor - - Examples - -------- - >>> np.logical_not(3) - False - >>> np.logical_not([True, False, 0, 1]) - array([False, True, True, False], dtype=bool) - - >>> x = np.arange(5) - >>> np.logical_not(x<3) - array([False, False, False, True, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'logical_or', - """ - Compute the truth value of x1 OR x2 elementwise. - - Parameters - ---------- - x1, x2 : array_like - Logical OR is applied to the elements of `x1` and `x2`. - They have to be of the same shape. - - Returns - ------- - y : {ndarray, bool} - Boolean result with the same shape as `x1` and `x2` of the logical - OR operation on elements of `x1` and `x2`. - - See Also - -------- - logical_and, logical_not, logical_xor - bitwise_or - - Examples - -------- - >>> np.logical_or(True, False) - True - >>> np.logical_or([True, False], [False, False]) - array([ True, False], dtype=bool) - - >>> x = np.arange(5) - >>> np.logical_or(x < 1, x > 3) - array([ True, False, False, False, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'logical_xor', - """ - Compute the truth value of x1 XOR x2, element-wise. - - Parameters - ---------- - x1, x2 : array_like - Logical XOR is applied to the elements of `x1` and `x2`. They must - be broadcastable to the same shape. - - Returns - ------- - y : bool or ndarray of bool - Boolean result of the logical XOR operation applied to the elements - of `x1` and `x2`; the shape is determined by whether or not - broadcasting of one or both arrays was required. - - See Also - -------- - logical_and, logical_or, logical_not, bitwise_xor - - Examples - -------- - >>> np.logical_xor(True, False) - True - >>> np.logical_xor([True, True, False, False], [True, False, True, False]) - array([False, True, True, False], dtype=bool) - - >>> x = np.arange(5) - >>> np.logical_xor(x < 1, x > 3) - array([ True, False, False, False, True], dtype=bool) - - Simple example showing support of broadcasting - - >>> np.logical_xor(0, np.eye(2)) - array([[ True, False], - [False, True]], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'maximum', - """ - Element-wise maximum of array elements. - - Compare two arrays and returns a new array containing - the element-wise maxima. If one of the elements being - compared is a nan, then that element is returned. If - both elements are nans then the first is returned. The - latter distinction is important for complex nans, - which are defined as at least one of the real or - imaginary parts being a nan. The net effect is that - nans are propagated. - - Parameters - ---------- - x1, x2 : array_like - The arrays holding the elements to be compared. They must have - the same shape, or shapes that can be broadcast to a single shape. - - Returns - ------- - y : {ndarray, scalar} - The maximum of `x1` and `x2`, element-wise. Returns scalar if - both `x1` and `x2` are scalars. - - See Also - -------- - minimum : - element-wise minimum - - fmax : - element-wise maximum that ignores nans unless both inputs are nans. - - fmin : - element-wise minimum that ignores nans unless both inputs are nans. - - Notes - ----- - Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper - broadcasting. - - Examples - -------- - >>> np.maximum([2, 3, 4], [1, 5, 2]) - array([2, 5, 4]) - - >>> np.maximum(np.eye(2), [0.5, 2]) - array([[ 1. , 2. ], - [ 0.5, 2. ]]) - - >>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan]) - array([ NaN, NaN, NaN]) - >>> np.maximum(np.Inf, 1) - inf - - """) - -add_newdoc('numpy.core.umath', 'minimum', - """ - Element-wise minimum of array elements. - - Compare two arrays and returns a new array containing the element-wise - minima. If one of the elements being compared is a nan, then that element - is returned. If both elements are nans then the first is returned. The - latter distinction is important for complex nans, which are defined as at - least one of the real or imaginary parts being a nan. The net effect is - that nans are propagated. - - Parameters - ---------- - x1, x2 : array_like - The arrays holding the elements to be compared. They must have - the same shape, or shapes that can be broadcast to a single shape. - - Returns - ------- - y : {ndarray, scalar} - The minimum of `x1` and `x2`, element-wise. Returns scalar if - both `x1` and `x2` are scalars. - - See Also - -------- - maximum : - element-wise minimum that propagates nans. - fmax : - element-wise maximum that ignores nans unless both inputs are nans. - fmin : - element-wise minimum that ignores nans unless both inputs are nans. - - Notes - ----- - The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither - x1 nor x2 are nans, but it is faster and does proper broadcasting. - - Examples - -------- - >>> np.minimum([2, 3, 4], [1, 5, 2]) - array([1, 3, 2]) - - >>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting - array([[ 0.5, 0. ], - [ 0. , 1. ]]) - - >>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan]) - array([ NaN, NaN, NaN]) - - """) - -add_newdoc('numpy.core.umath', 'fmax', - """ - Element-wise maximum of array elements. - - Compare two arrays and returns a new array containing the element-wise - maxima. If one of the elements being compared is a nan, then the non-nan - element is returned. If both elements are nans then the first is returned. - The latter distinction is important for complex nans, which are defined as - at least one of the real or imaginary parts being a nan. The net effect is - that nans are ignored when possible. - - Parameters - ---------- - x1, x2 : array_like - The arrays holding the elements to be compared. They must have - the same shape. - - Returns - ------- - y : {ndarray, scalar} - The minimum of `x1` and `x2`, element-wise. Returns scalar if - both `x1` and `x2` are scalars. - - See Also - -------- - fmin : - element-wise minimum that ignores nans unless both inputs are nans. - maximum : - element-wise maximum that propagates nans. - minimum : - element-wise minimum that propagates nans. - - Notes - ----- - .. versionadded:: 1.3.0 - - The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither - x1 nor x2 are nans, but it is faster and does proper broadcasting. - - Examples - -------- - >>> np.fmax([2, 3, 4], [1, 5, 2]) - array([ 2., 5., 4.]) - - >>> np.fmax(np.eye(2), [0.5, 2]) - array([[ 1. , 2. ], - [ 0.5, 2. ]]) - - >>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan]) - array([ 0., 0., NaN]) - - """) - -add_newdoc('numpy.core.umath', 'fmin', - """ - fmin(x1, x2[, out]) - - Element-wise minimum of array elements. - - Compare two arrays and returns a new array containing the element-wise - minima. If one of the elements being compared is a nan, then the non-nan - element is returned. If both elements are nans then the first is returned. - The latter distinction is important for complex nans, which are defined as - at least one of the real or imaginary parts being a nan. The net effect is - that nans are ignored when possible. - - Parameters - ---------- - x1, x2 : array_like - The arrays holding the elements to be compared. They must have - the same shape. - - Returns - ------- - y : {ndarray, scalar} - The minimum of `x1` and `x2`, element-wise. Returns scalar if - both `x1` and `x2` are scalars. - - See Also - -------- - fmax : - element-wise maximum that ignores nans unless both inputs are nans. - maximum : - element-wise maximum that propagates nans. - minimum : - element-wise minimum that propagates nans. - - Notes - ----- - .. versionadded:: 1.3.0 - - The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither - x1 nor x2 are nans, but it is faster and does proper broadcasting. - - Examples - -------- - >>> np.fmin([2, 3, 4], [1, 5, 2]) - array([2, 5, 4]) - - >>> np.fmin(np.eye(2), [0.5, 2]) - array([[ 1. , 2. ], - [ 0.5, 2. ]]) - - >>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan]) - array([ 0., 0., NaN]) - - """) - -add_newdoc('numpy.core.umath', 'modf', - """ - Return the fractional and integral parts of an array, element-wise. - - The fractional and integral parts are negative if the given number is - negative. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - y1 : ndarray - Fractional part of `x`. - y2 : ndarray - Integral part of `x`. - - Notes - ----- - For integer input the return values are floats. - - Examples - -------- - >>> np.modf([0, 3.5]) - (array([ 0. , 0.5]), array([ 0., 3.])) - >>> np.modf(-0.5) - (-0.5, -0) - - """) - -add_newdoc('numpy.core.umath', 'multiply', - """ - Multiply arguments element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays to be multiplied. - - Returns - ------- - y : ndarray - The product of `x1` and `x2`, element-wise. Returns a scalar if - both `x1` and `x2` are scalars. - - Notes - ----- - Equivalent to `x1` * `x2` in terms of array broadcasting. - - Examples - -------- - >>> np.multiply(2.0, 4.0) - 8.0 - - >>> x1 = np.arange(9.0).reshape((3, 3)) - >>> x2 = np.arange(3.0) - >>> np.multiply(x1, x2) - array([[ 0., 1., 4.], - [ 0., 4., 10.], - [ 0., 7., 16.]]) - - """) - -add_newdoc('numpy.core.umath', 'negative', - """ - Returns an array with the negative of each element of the original array. - - Parameters - ---------- - x : array_like or scalar - Input array. - - Returns - ------- - y : ndarray or scalar - Returned array or scalar: `y = -x`. - - Examples - -------- - >>> np.negative([1.,-1.]) - array([-1., 1.]) - - """) - -add_newdoc('numpy.core.umath', 'not_equal', - """ - Return (x1 != x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. - out : ndarray, optional - A placeholder the same shape as `x1` to store the result. - See `doc.ufuncs` (Section "Output arguments") for more details. - - Returns - ------- - not_equal : ndarray bool, scalar bool - For each element in `x1, x2`, return True if `x1` is not equal - to `x2` and False otherwise. - - - See Also - -------- - equal, greater, greater_equal, less, less_equal - - Examples - -------- - >>> np.not_equal([1.,2.], [1., 3.]) - array([False, True], dtype=bool) - >>> np.not_equal([1, 2], [[1, 3],[1, 4]]) - array([[False, True], - [False, True]], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'ones_like', - """ - Returns an array of ones with the same shape and type as a given array. - - Equivalent to ``a.copy().fill(1)``. - - Please refer to the documentation for `zeros_like` for further details. - - See Also - -------- - zeros_like, ones - - Examples - -------- - >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.ones_like(a) - array([[1, 1, 1], - [1, 1, 1]]) - - """) - -add_newdoc('numpy.core.umath', 'power', - """ - First array elements raised to powers from second array, element-wise. - - Raise each base in `x1` to the positionally-corresponding power in - `x2`. `x1` and `x2` must be broadcastable to the same shape. - - Parameters - ---------- - x1 : array_like - The bases. - x2 : array_like - The exponents. - - Returns - ------- - y : ndarray - The bases in `x1` raised to the exponents in `x2`. - - Examples - -------- - Cube each element in a list. - - >>> x1 = range(6) - >>> x1 - [0, 1, 2, 3, 4, 5] - >>> np.power(x1, 3) - array([ 0, 1, 8, 27, 64, 125]) - - Raise the bases to different exponents. - - >>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0] - >>> np.power(x1, x2) - array([ 0., 1., 8., 27., 16., 5.]) - - The effect of broadcasting. - - >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) - >>> x2 - array([[1, 2, 3, 3, 2, 1], - [1, 2, 3, 3, 2, 1]]) - >>> np.power(x1, x2) - array([[ 0, 1, 8, 27, 16, 5], - [ 0, 1, 8, 27, 16, 5]]) - - """) - -add_newdoc('numpy.core.umath', 'radians', - """ - Convert angles from degrees to radians. - - Parameters - ---------- - x : array_like - Input array in degrees. - out : ndarray, optional - Output array of same shape as `x`. - - Returns - ------- - y : ndarray - The corresponding radian values. - - See Also - -------- - deg2rad : equivalent function - - Examples - -------- - Convert a degree array to radians - - >>> deg = np.arange(12.) * 30. - >>> np.radians(deg) - array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 , - 2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898, - 5.23598776, 5.75958653]) - - >>> out = np.zeros((deg.shape)) - >>> ret = np.radians(deg, out) - >>> ret is out - True - - """) - -add_newdoc('numpy.core.umath', 'deg2rad', - """ - Convert angles from degrees to radians. - - Parameters - ---------- - x : array_like - Angles in degrees. - - Returns - ------- - y : ndarray - The corresponding angle in radians. - - See Also - -------- - rad2deg : Convert angles from radians to degrees. - unwrap : Remove large jumps in angle by wrapping. - - Notes - ----- - .. versionadded:: 1.3.0 - - ``deg2rad(x)`` is ``x * pi / 180``. - - Examples - -------- - >>> np.deg2rad(180) - 3.1415926535897931 - - """) - -add_newdoc('numpy.core.umath', 'reciprocal', - """ - Return the reciprocal of the argument, element-wise. - - Calculates ``1/x``. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - y : ndarray - Return array. - - Notes - ----- - .. note:: - This function is not designed to work with integers. - - For integer arguments with absolute value larger than 1 the result is - always zero because of the way Python handles integer division. - For integer zero the result is an overflow. - - Examples - -------- - >>> np.reciprocal(2.) - 0.5 - >>> np.reciprocal([1, 2., 3.33]) - array([ 1. , 0.5 , 0.3003003]) - - """) - -add_newdoc('numpy.core.umath', 'remainder', - """ - Return element-wise remainder of division. - - Computes ``x1 - floor(x1 / x2) * x2``. - - Parameters - ---------- - x1 : array_like - Dividend array. - x2 : array_like - Divisor array. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See doc.ufuncs. - - Returns - ------- - y : ndarray - The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar - if both `x1` and `x2` are scalars. - - See Also - -------- - divide, floor - - Notes - ----- - Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers. - - Examples - -------- - >>> np.remainder([4, 7], [2, 3]) - array([0, 1]) - >>> np.remainder(np.arange(7), 5) - array([0, 1, 2, 3, 4, 0, 1]) - - """) - -add_newdoc('numpy.core.umath', 'right_shift', - """ - Shift the bits of an integer to the right. - - Bits are shifted to the right by removing `x2` bits at the right of `x1`. - Since the internal representation of numbers is in binary format, this - operation is equivalent to dividing `x1` by ``2**x2``. - - Parameters - ---------- - x1 : array_like, int - Input values. - x2 : array_like, int - Number of bits to remove at the right of `x1`. - - Returns - ------- - out : ndarray, int - Return `x1` with bits shifted `x2` times to the right. - - See Also - -------- - left_shift : Shift the bits of an integer to the left. - binary_repr : Return the binary representation of the input number - as a string. - - Examples - -------- - >>> np.binary_repr(10) - '1010' - >>> np.right_shift(10, 1) - 5 - >>> np.binary_repr(5) - '101' - - >>> np.right_shift(10, [1,2,3]) - array([5, 2, 1]) - - """) - -add_newdoc('numpy.core.umath', 'rint', - """ - Round elements of the array to the nearest integer. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : {ndarray, scalar} - Output array is same shape and type as `x`. - - See Also - -------- - ceil, floor, trunc - - Examples - -------- - >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) - >>> np.rint(a) - array([-2., -2., -0., 0., 2., 2., 2.]) - - """) - -add_newdoc('numpy.core.umath', 'sign', - """ - Returns an element-wise indication of the sign of a number. - - The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - y : ndarray - The sign of `x`. - - Examples - -------- - >>> np.sign([-5., 4.5]) - array([-1., 1.]) - >>> np.sign(0) - 0 - - """) - -add_newdoc('numpy.core.umath', 'signbit', - """ - Returns element-wise True where signbit is set (less than zero). - - Parameters - ---------- - x: array_like - The input value(s). - out : ndarray, optional - Array into which the output is placed. Its type is preserved - and it must be of the right shape to hold the output. - See `doc.ufuncs`. - - Returns - ------- - result : ndarray of bool - Output array, or reference to `out` if that was supplied. - - Examples - -------- - >>> np.signbit(-1.2) - True - >>> np.signbit(np.array([1, -2.3, 2.1])) - array([False, True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'copysign', - """ - Change the sign of x1 to that of x2, element-wise. - - If both arguments are arrays or sequences, they have to be of the same - length. If `x2` is a scalar, its sign will be copied to all elements of - `x1`. - - Parameters - ---------- - x1: array_like - Values to change the sign of. - x2: array_like - The sign of `x2` is copied to `x1`. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See doc.ufuncs. - - Returns - ------- - out : array_like - The values of `x1` with the sign of `x2`. - - Examples - -------- - >>> np.copysign(1.3, -1) - -1.3 - >>> 1/np.copysign(0, 1) - inf - >>> 1/np.copysign(0, -1) - -inf - - >>> np.copysign([-1, 0, 1], -1.1) - array([-1., -0., -1.]) - >>> np.copysign([-1, 0, 1], np.arange(3)-1) - array([-1., 0., 1.]) - - """) - -add_newdoc('numpy.core.umath', 'nextafter', - """ - Return the next representable floating-point value after x1 in the direction - of x2 element-wise. - - Parameters - ---------- - x1 : array_like - Values to find the next representable value of. - x2 : array_like - The direction where to look for the next representable value of `x1`. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and it - must be of the right shape to hold the output. See `doc.ufuncs`. - - Returns - ------- - out : array_like - The next representable values of `x1` in the direction of `x2`. - - Examples - -------- - >>> eps = np.finfo(np.float64).eps - >>> np.nextafter(1, 2) == eps + 1 - True - >>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps] - array([ True, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'spacing', - """ - Return the distance between x and the nearest adjacent number. - - Parameters - ---------- - x1: array_like - Values to find the spacing of. - - Returns - ------- - out : array_like - The spacing of values of `x1`. - - Notes - ----- - It can be considered as a generalization of EPS: - ``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there - should not be any representable number between ``x + spacing(x)`` and - x for any finite x. - - Spacing of +- inf and nan is nan. - - Examples - -------- - >>> np.spacing(1) == np.finfo(np.float64).eps - True - - """) - -add_newdoc('numpy.core.umath', 'sin', - """ - Trigonometric sine, element-wise. - - Parameters - ---------- - x : array_like - Angle, in radians (:math:`2 \\pi` rad equals 360 degrees). - - Returns - ------- - y : array_like - The sine of each element of x. - - See Also - -------- - arcsin, sinh, cos - - Notes - ----- - The sine is one of the fundamental functions of trigonometry - (the mathematical study of triangles). Consider a circle of radius - 1 centered on the origin. A ray comes in from the :math:`+x` axis, - makes an angle at the origin (measured counter-clockwise from that - axis), and departs from the origin. The :math:`y` coordinate of - the outgoing ray's intersection with the unit circle is the sine - of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to - +1 for :math:`\\pi / 2.` The function has zeroes where the angle is - a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and - :math:`2\\pi` are negative. The numerous properties of the sine and - related functions are included in any standard trigonometry text. - - Examples - -------- - Print sine of one angle: - - >>> np.sin(np.pi/2.) - 1.0 - - Print sines of an array of angles given in degrees: - - >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. ) - array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ]) - - Plot the sine function: - - >>> import matplotlib.pylab as plt - >>> x = np.linspace(-np.pi, np.pi, 201) - >>> plt.plot(x, np.sin(x)) - >>> plt.xlabel('Angle [rad]') - >>> plt.ylabel('sin(x)') - >>> plt.axis('tight') - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'sinh', - """ - Hyperbolic sine, element-wise. - - Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or - ``-1j * np.sin(1j*x)``. - - Parameters - ---------- - x : array_like - Input array. - out : ndarray, optional - Output array of same shape as `x`. - - Returns - ------- - y : ndarray - The corresponding hyperbolic sine values. - - Raises - ------ - ValueError: invalid return array shape - if `out` is provided and `out.shape` != `x.shape` (See Examples) - - Notes - ----- - If `out` is provided, the function writes the result into it, - and returns a reference to `out`. (See Examples) - - References - ---------- - M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. - New York, NY: Dover, 1972, pg. 83. - - Examples - -------- - >>> np.sinh(0) - 0.0 - >>> np.sinh(np.pi*1j/2) - 1j - >>> np.sinh(np.pi*1j) # (exact value is 0) - 1.2246063538223773e-016j - >>> # Discrepancy due to vagaries of floating point arithmetic. - - >>> # Example of providing the optional output parameter - >>> out2 = np.sinh([0.1], out1) - >>> out2 is out1 - True - - >>> # Example of ValueError due to provision of shape mis-matched `out` - >>> np.sinh(np.zeros((3,3)),np.zeros((2,2))) - Traceback (most recent call last): - File "", line 1, in - ValueError: invalid return array shape - - """) - -add_newdoc('numpy.core.umath', 'sqrt', - """ - Return the positive square-root of an array, element-wise. - - Parameters - ---------- - x : array_like - The values whose square-roots are required. - out : ndarray, optional - Alternate array object in which to put the result; if provided, it - must have the same shape as `x` - - Returns - ------- - y : ndarray - An array of the same shape as `x`, containing the positive - square-root of each element in `x`. If any element in `x` is - complex, a complex array is returned (and the square-roots of - negative reals are calculated). If all of the elements in `x` - are real, so is `y`, with negative elements returning ``nan``. - If `out` was provided, `y` is a reference to it. - - See Also - -------- - lib.scimath.sqrt - A version which returns complex numbers when given negative reals. - - Notes - ----- - *sqrt* has--consistent with common convention--as its branch cut the - real "interval" [`-inf`, 0), and is continuous from above on it. - (A branch cut is a curve in the complex plane across which a given - complex function fails to be continuous.) - - Examples - -------- - >>> np.sqrt([1,4,9]) - array([ 1., 2., 3.]) - - >>> np.sqrt([4, -1, -3+4J]) - array([ 2.+0.j, 0.+1.j, 1.+2.j]) - - >>> np.sqrt([4, -1, numpy.inf]) - array([ 2., NaN, Inf]) - - """) - -add_newdoc('numpy.core.umath', 'square', - """ - Return the element-wise square of the input. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - out : ndarray - Element-wise `x*x`, of the same shape and dtype as `x`. - Returns scalar if `x` is a scalar. - - See Also - -------- - numpy.linalg.matrix_power - sqrt - power - - Examples - -------- - >>> np.square([-1j, 1]) - array([-1.-0.j, 1.+0.j]) - - """) - -add_newdoc('numpy.core.umath', 'subtract', - """ - Subtract arguments, element-wise. - - Parameters - ---------- - x1, x2 : array_like - The arrays to be subtracted from each other. - - Returns - ------- - y : ndarray - The difference of `x1` and `x2`, element-wise. Returns a scalar if - both `x1` and `x2` are scalars. - - Notes - ----- - Equivalent to ``x1 - x2`` in terms of array broadcasting. - - Examples - -------- - >>> np.subtract(1.0, 4.0) - -3.0 - - >>> x1 = np.arange(9.0).reshape((3, 3)) - >>> x2 = np.arange(3.0) - >>> np.subtract(x1, x2) - array([[ 0., 0., 0.], - [ 3., 3., 3.], - [ 6., 6., 6.]]) - - """) - -add_newdoc('numpy.core.umath', 'tan', - """ - Compute tangent element-wise. - - Equivalent to ``np.sin(x)/np.cos(x)`` element-wise. - - Parameters - ---------- - x : array_like - Input array. - out : ndarray, optional - Output array of same shape as `x`. - - Returns - ------- - y : ndarray - The corresponding tangent values. - - Raises - ------ - ValueError: invalid return array shape - if `out` is provided and `out.shape` != `x.shape` (See Examples) - - Notes - ----- - If `out` is provided, the function writes the result into it, - and returns a reference to `out`. (See Examples) - - References - ---------- - M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. - New York, NY: Dover, 1972. - - Examples - -------- - >>> from math import pi - >>> np.tan(np.array([-pi,pi/2,pi])) - array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) - >>> - >>> # Example of providing the optional output parameter illustrating - >>> # that what is returned is a reference to said parameter - >>> out2 = np.cos([0.1], out1) - >>> out2 is out1 - True - >>> - >>> # Example of ValueError due to provision of shape mis-matched `out` - >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) - Traceback (most recent call last): - File "", line 1, in - ValueError: invalid return array shape - - """) - -add_newdoc('numpy.core.umath', 'tanh', - """ - Compute hyperbolic tangent element-wise. - - Equivalent to ``np.sinh(x)/np.cosh(x)`` or - ``-1j * np.tan(1j*x)``. - - Parameters - ---------- - x : array_like - Input array. - out : ndarray, optional - Output array of same shape as `x`. - - Returns - ------- - y : ndarray - The corresponding hyperbolic tangent values. - - Raises - ------ - ValueError: invalid return array shape - if `out` is provided and `out.shape` != `x.shape` (See Examples) - - Notes - ----- - If `out` is provided, the function writes the result into it, - and returns a reference to `out`. (See Examples) - - References - ---------- - .. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. - New York, NY: Dover, 1972, pg. 83. - http://www.math.sfu.ca/~cbm/aands/ - - .. [2] Wikipedia, "Hyperbolic function", - http://en.wikipedia.org/wiki/Hyperbolic_function - - Examples - -------- - >>> np.tanh((0, np.pi*1j, np.pi*1j/2)) - array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j]) - - >>> # Example of providing the optional output parameter illustrating - >>> # that what is returned is a reference to said parameter - >>> out2 = np.tanh([0.1], out1) - >>> out2 is out1 - True - - >>> # Example of ValueError due to provision of shape mis-matched `out` - >>> np.tanh(np.zeros((3,3)),np.zeros((2,2))) - Traceback (most recent call last): - File "", line 1, in - ValueError: invalid return array shape - - """) - -add_newdoc('numpy.core.umath', 'true_divide', - """ - Returns a true division of the inputs, element-wise. - - Instead of the Python traditional 'floor division', this returns a true - division. True division adjusts the output type to present the best - answer, regardless of input types. - - Parameters - ---------- - x1 : array_like - Dividend array. - x2 : array_like - Divisor array. - - Returns - ------- - out : ndarray - Result is scalar if both inputs are scalar, ndarray otherwise. - - Notes - ----- - The floor division operator ``//`` was added in Python 2.2 making ``//`` - and ``/`` equivalent operators. The default floor division operation of - ``/`` can be replaced by true division with - ``from __future__ import division``. - - In Python 3.0, ``//`` is the floor division operator and ``/`` the - true division operator. The ``true_divide(x1, x2)`` function is - equivalent to true division in Python. - - Examples - -------- - >>> x = np.arange(5) - >>> np.true_divide(x, 4) - array([ 0. , 0.25, 0.5 , 0.75, 1. ]) - - >>> x/4 - array([0, 0, 0, 0, 1]) - >>> x//4 - array([0, 0, 0, 0, 1]) - - >>> from __future__ import division - >>> x/4 - array([ 0. , 0.25, 0.5 , 0.75, 1. ]) - >>> x//4 - array([0, 0, 0, 0, 1]) - - """) diff --git a/numpy-1.6.2/numpy/core/defchararray.py b/numpy-1.6.2/numpy/core/defchararray.py deleted file mode 100644 index 029f797e55..0000000000 --- a/numpy-1.6.2/numpy/core/defchararray.py +++ /dev/null @@ -1,2753 +0,0 @@ -""" -This module contains a set of functions for vectorized string -operations and methods. - -.. note:: - The `chararray` class exists for backwards compatibility with - Numarray, it is not recommended for new development. Starting from numpy - 1.4, if one needs arrays of strings, it is recommended to use arrays of - `dtype` `object_`, `string_` or `unicode_`, and use the free functions - in the `numpy.char` module for fast vectorized string operations. - -Some methods will only be available if the corresponding string method is -available in your version of Python. - -The preferred alias for `defchararray` is `numpy.char`. - -""" - -import sys -from numerictypes import string_, unicode_, integer, object_, bool_, character -from numeric import ndarray, compare_chararrays -from numeric import array as narray -from numpy.core.multiarray import _vec_string -from numpy.compat import asbytes -import numpy - -__all__ = ['chararray', - 'equal', 'not_equal', 'greater_equal', 'less_equal', 'greater', 'less', - 'str_len', 'add', 'multiply', 'mod', 'capitalize', 'center', 'count', - 'decode', 'encode', 'endswith', 'expandtabs', 'find', 'format', - 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', - 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', - 'partition', 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', - 'rsplit', 'rstrip', 'split', 'splitlines', 'startswith', 'strip', - 'swapcase', 'title', 'translate', 'upper', 'zfill', - 'isnumeric', 'isdecimal', - 'array', 'asarray'] - -_globalvar = 0 -if sys.version_info[0] >= 3: - _unicode = str - _bytes = bytes -else: - _unicode = unicode - _bytes = str -_len = len - -def _use_unicode(*args): - """ - Helper function for determining the output type of some string - operations. - - For an operation on two ndarrays, if at least one is unicode, the - result should be unicode. - """ - for x in args: - if (isinstance(x, _unicode) - or issubclass(numpy.asarray(x).dtype.type, unicode_)): - return unicode_ - return string_ - -def _to_string_or_unicode_array(result): - """ - Helper function to cast a result back into a string or unicode array - if an object array must be used as an intermediary. - """ - return numpy.asarray(result.tolist()) - -def _clean_args(*args): - """ - Helper function for delegating arguments to Python string - functions. - - Many of the Python string operations that have optional arguments - do not use 'None' to indicate a default value. In these cases, - we need to remove all `None` arguments, and those following them. - """ - newargs = [] - for chk in args: - if chk is None: - break - newargs.append(chk) - return newargs - -def _get_num_chars(a): - """ - Helper function that returns the number of characters per field in - a string or unicode array. This is to abstract out the fact that - for a unicode array this is itemsize / 4. - """ - if issubclass(a.dtype.type, unicode_): - return a.itemsize / 4 - return a.itemsize - - -def equal(x1, x2): - """ - Return (x1 == x2) element-wise. - - Unlike `numpy.equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - not_equal, greater_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '==', True) - -def not_equal(x1, x2): - """ - Return (x1 != x2) element-wise. - - Unlike `numpy.not_equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, greater_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '!=', True) - -def greater_equal(x1, x2): - """ - Return (x1 >= x2) element-wise. - - Unlike `numpy.greater_equal`, this comparison is performed by - first stripping whitespace characters from the end of the string. - This behavior is provided for backward-compatibility with - numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '>=', True) - -def less_equal(x1, x2): - """ - Return (x1 <= x2) element-wise. - - Unlike `numpy.less_equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, greater, less - """ - return compare_chararrays(x1, x2, '<=', True) - -def greater(x1, x2): - """ - Return (x1 > x2) element-wise. - - Unlike `numpy.greater`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, less_equal, less - """ - return compare_chararrays(x1, x2, '>', True) - -def less(x1, x2): - """ - Return (x1 < x2) element-wise. - - Unlike `numpy.greater`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, less_equal, greater - """ - return compare_chararrays(x1, x2, '<', True) - -def str_len(a): - """ - Return len(a) element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of integers - - See also - -------- - __builtin__.len - """ - return _vec_string(a, integer, '__len__') - -def add(x1, x2): - """ - Return (x1 + x2), that is string concatenation, element-wise for a - pair of array_likes of str or unicode. - - Parameters - ---------- - x1 : array_like of str or unicode - x2 : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of `string_` or `unicode_`, depending on input types - """ - arr1 = numpy.asarray(x1) - arr2 = numpy.asarray(x2) - out_size = _get_num_chars(arr1) + _get_num_chars(arr2) - dtype = _use_unicode(arr1, arr2) - return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) - -def multiply(a, i): - """ - Return (a * i), that is string multiple concatenation, - element-wise. - - Values in `i` of less than 0 are treated as 0 (which yields an - empty string). - - Parameters - ---------- - a : array_like of str or unicode - - i : array_like of ints - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - """ - a_arr = numpy.asarray(a) - i_arr = numpy.asarray(i) - if not issubclass(i_arr.dtype.type, integer): - raise ValueError, "Can only multiply by integers" - out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0) - return _vec_string( - a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) - -def mod(a, values): - """ - Return (a % i), that is pre-Python 2.6 string formatting - (iterpolation), element-wise for a pair of array_likes of str - or unicode. - - Parameters - ---------- - a : array_like of str or unicode - - values : array_like of values - These values will be element-wise interpolated into the string. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - See also - -------- - str.__mod__ - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, '__mod__', (values,))) - -def capitalize(a): - """ - Return a copy of `a` with only the first character of each element - capitalized. - - Calls `str.capitalize` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input - types - - See also - -------- - str.capitalize - - Examples - -------- - >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c - array(['a1b2', '1b2a', 'b2a1', '2a1b'], - dtype='|S4') - >>> np.char.capitalize(c) - array(['A1b2', '1b2a', 'B2a1', '2a1b'], - dtype='|S4') - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'capitalize') - -if sys.version_info >= (2, 4): - def center(a, width, fillchar=' '): - """ - Return a copy of `a` with its elements centered in a string of - length `width`. - - Calls `str.center` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The padding character to use (default is space). - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input - types - - See also - -------- - str.center - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) -else: - def center(a, width): - """ - Return an array with the elements of `a` centered in a string - of length width. - - Calls `str.center` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - width : int - The length of the resulting strings - - Returns - ------- - out : ndarray, str or unicode - Output array of str or unicode, depending on input types - - See also - -------- - str.center - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'center', (width_arr,)) - -def count(a, sub, start=0, end=None): - """ - Returns an array with the number of non-overlapping occurrences of - substring `sub` in the range [`start`, `end`]. - - Calls `str.count` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - The substring to search for. - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as slice - notation to specify the range in which to count. - - Returns - ------- - out : ndarray - Output array of ints. - - See also - -------- - str.count - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> np.char.count(c, 'A') - array([3, 1, 1]) - >>> np.char.count(c, 'aA') - array([3, 1, 0]) - >>> np.char.count(c, 'A', start=1, end=4) - array([2, 1, 1]) - >>> np.char.count(c, 'A', start=1, end=3) - array([1, 0, 0]) - - """ - return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end)) - -def decode(a, encoding=None, errors=None): - """ - Calls `str.decode` element-wise. - - The set of available codecs comes from the Python standard library, - and may be extended at runtime. For more information, see the - :mod:`codecs` module. - - Parameters - ---------- - a : array_like of str or unicode - - encoding : str, optional - The name of an encoding - - errors : str, optional - Specifies how to handle encoding errors - - Returns - ------- - out : ndarray - - See also - -------- - str.decode - - Notes - ----- - The type of the result will depend on the encoding specified. - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> np.char.encode(c, encoding='cp037') - array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', - '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], - dtype='|S7') - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) - -def encode(a, encoding=None, errors=None): - """ - Calls `str.encode` element-wise. - - The set of available codecs comes from the Python standard library, - and may be extended at runtime. For more information, see the codecs - module. - - Parameters - ---------- - a : array_like of str or unicode - - encoding : str, optional - The name of an encoding - - errors : str, optional - Specifies how to handle encoding errors - - Returns - ------- - out : ndarray - - See also - -------- - str.encode - - Notes - ----- - The type of the result will depend on the encoding specified. - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'encode', _clean_args(encoding, errors))) - -def endswith(a, suffix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `a` ends with `suffix`, otherwise `False`. - - Calls `str.endswith` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - suffix : str - - start, end : int, optional - With optional `start`, test beginning at that position. With - optional `end`, stop comparing at that position. - - Returns - ------- - out : ndarray - Outputs an array of bools. - - See also - -------- - str.endswith - - Examples - -------- - >>> s = np.array(['foo', 'bar']) - >>> s[0] = 'foo' - >>> s[1] = 'bar' - >>> s - array(['foo', 'bar'], - dtype='|S3') - >>> np.char.endswith(s, 'ar') - array([False, True], dtype=bool) - >>> np.char.endswith(s, 'a', start=1, end=2) - array([False, True], dtype=bool) - - """ - return _vec_string( - a, bool_, 'endswith', [suffix, start] + _clean_args(end)) - -def expandtabs(a, tabsize=8): - """ - Return a copy of each string element where all tab characters are - replaced by one or more spaces. - - Calls `str.expandtabs` element-wise. - - Return a copy of each string element where all tab characters are - replaced by one or more spaces, depending on the current column - and the given `tabsize`. The column number is reset to zero after - each newline occurring in the string. If `tabsize` is not given, a - tab size of 8 characters is assumed. This doesn't understand other - non-printing characters or escape sequences. - - Parameters - ---------- - a : array_like of str or unicode - tabsize : int, optional - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.expandtabs - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'expandtabs', (tabsize,))) - -def find(a, sub, start=0, end=None): - """ - For each element, return the lowest index in the string where - substring `sub` is found. - - Calls `str.find` element-wise. - - For each element, return the lowest index in the string where - substring `sub` is found, such that `sub` is contained in the - range [`start`, `end`]. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as in - slice notation. - - Returns - ------- - out : ndarray or int - Output array of ints. Returns -1 if `sub` is not found. - - See also - -------- - str.find - - """ - return _vec_string( - a, integer, 'find', [sub, start] + _clean_args(end)) - -# if sys.version_info >= (2.6): -# def format(a, *args, **kwargs): -# # _vec_string doesn't support kwargs at present -# raise NotImplementedError - -def index(a, sub, start=0, end=None): - """ - Like `find`, but raises `ValueError` when the substring is not found. - - Calls `str.index` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - - start, end : int, optional - - Returns - ------- - out : ndarray - Output array of ints. Returns -1 if `sub` is not found. - - See also - -------- - find, str.find - - """ - return _vec_string( - a, integer, 'index', [sub, start] + _clean_args(end)) - -def isalnum(a): - """ - Returns true for each element if all characters in the string are - alphanumeric and there is at least one character, false otherwise. - - Calls `str.isalnum` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.isalnum - """ - return _vec_string(a, bool_, 'isalnum') - -def isalpha(a): - """ - Returns true for each element if all characters in the string are - alphabetic and there is at least one character, false otherwise. - - Calls `str.isalpha` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isalpha - """ - return _vec_string(a, bool_, 'isalpha') - -def isdigit(a): - """ - Returns true for each element if all characters in the string are - digits and there is at least one character, false otherwise. - - Calls `str.isdigit` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isdigit - """ - return _vec_string(a, bool_, 'isdigit') - -def islower(a): - """ - Returns true for each element if all cased characters in the - string are lowercase and there is at least one cased character, - false otherwise. - - Calls `str.islower` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.islower - """ - return _vec_string(a, bool_, 'islower') - -def isspace(a): - """ - Returns true for each element if there are only whitespace - characters in the string and there is at least one character, - false otherwise. - - Calls `str.isspace` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isspace - """ - return _vec_string(a, bool_, 'isspace') - -def istitle(a): - """ - Returns true for each element if the element is a titlecased - string and there is at least one character, false otherwise. - - Call `str.istitle` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.istitle - """ - return _vec_string(a, bool_, 'istitle') - -def isupper(a): - """ - Returns true for each element if all cased characters in the - string are uppercase and there is at least one character, false - otherwise. - - Call `str.isupper` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isupper - """ - return _vec_string(a, bool_, 'isupper') - -def join(sep, seq): - """ - Return a string which is the concatenation of the strings in the - sequence `seq`. - - Calls `str.join` element-wise. - - Parameters - ---------- - sep : array_like of str or unicode - seq : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - See also - -------- - str.join - """ - return _to_string_or_unicode_array( - _vec_string(sep, object_, 'join', (seq,))) - -if sys.version_info >= (2, 4): - def ljust(a, width, fillchar=' '): - """ - Return an array with the elements of `a` left-justified in a - string of length `width`. - - Calls `str.ljust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The character to use for padding - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.ljust - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) -else: - def ljust(a, width): - """ - Return an array with the elements of `a` left-justified in a - string of length `width`. - - Calls `str.ljust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - width : int - The length of the resulting strings - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.ljust - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr,)) - -def lower(a): - """ - Return an array with the elements of `a` converted to lowercase. - - Call `str.lower` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array-like of str or unicode - - Returns - ------- - out : ndarray, str or unicode - Output array of str or unicode, depending on input type - - See also - -------- - str.lower - - Examples - -------- - >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c - array(['A1B C', '1BCA', 'BCA1'], - dtype='|S5') - >>> np.char.lower(c) - array(['a1b c', '1bca', 'bca1'], - dtype='|S5') - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'lower') - -def lstrip(a, chars=None): - """ - For each element in `a`, return a copy with the leading characters - removed. - - Calls `str.lstrip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a prefix; rather, all combinations of its values are - stripped. - - Returns - ------- - out : ndarray, str or unicode - Output array of str or unicode, depending on input type - - See also - -------- - str.lstrip - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> np.char.lstrip(c, 'a') # 'a' unstripped from c[1] because whitespace leading - array(['AaAaA', ' aA ', 'bBABba'], - dtype='|S7') - >>> np.char.lstrip(c, 'A') # leaves c unchanged - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() - ... # XXX: is this a regression? this line now returns False - ... # np.char.lstrip(c,'') does not modify c at all. - True - >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() - True - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) - -if sys.version_info >= (2, 5): - def partition(a, sep): - """ - Partition each element in `a` around `sep`. - - Calls `str.partition` element-wise. - - For each element in `a`, split the element as the first - occurrence of `sep`, and return 3 strings containing the part - before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. - - Parameters - ---------- - a : array-like of str or unicode - sep : str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type. - The output array will have an extra dimension with 3 - elements per input element. - - See also - -------- - str.partition - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'partition', (sep,))) - -def replace(a, old, new, count=None): - """ - For each element in `a`, return a copy of the string with all - occurrences of substring `old` replaced by `new`. - - Calls `str.replace` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - old, new : str or unicode - - count : int, optional - If the optional argument `count` is given, only the first - `count` occurrences are replaced. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.replace - - """ - return _to_string_or_unicode_array( - _vec_string( - a, object_, 'replace', [old, new] +_clean_args(count))) - -def rfind(a, sub, start=0, end=None): - """ - For each element in `a`, return the highest index in the string - where substring `sub` is found, such that `sub` is contained - within [`start`, `end`]. - - Calls `str.rfind` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - sub : str or unicode - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as in - slice notation. - - Returns - ------- - out : ndarray - Output array of ints. Return -1 on failure. - - See also - -------- - str.rfind - - """ - return _vec_string( - a, integer, 'rfind', [sub, start] + _clean_args(end)) - -def rindex(a, sub, start=0, end=None): - """ - Like `rfind`, but raises `ValueError` when the substring `sub` is - not found. - - Calls `str.rindex` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - sub : str or unicode - - start, end : int, optional - - Returns - ------- - out : ndarray - Output array of ints. - - See also - -------- - rfind, str.rindex - - """ - return _vec_string( - a, integer, 'rindex', [sub, start] + _clean_args(end)) - -if sys.version_info >= (2, 4): - def rjust(a, width, fillchar=' '): - """ - Return an array with the elements of `a` right-justified in a - string of length `width`. - - Calls `str.rjust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The character to use for padding - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rjust - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) -else: - def rjust(a, width): - """ - Return an array with the elements of `a` right-justified in a - string of length `width`. - - Calls `str.rjust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - width : int - The length of the resulting strings - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rjust - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'rjust', (width,)) - -if sys.version_info >= (2, 5): - def rpartition(a, sep): - """ - Partition each element in `a` around `sep`. - - Calls `str.rpartition` element-wise. - - For each element in `a`, split the element as the last - occurrence of `sep`, and return 3 strings containing the part - before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. - - Parameters - ---------- - a : array-like of str or unicode - sep : str or unicode - - Returns - ------- - out : ndarray - Output array of string or unicode, depending on input - type. The output array will have an extra dimension with - 3 elements per input element. - - See also - -------- - str.rpartition - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'rpartition', (sep,))) - -if sys.version_info >= (2, 4): - def rsplit(a, sep=None, maxsplit=None): - """ - For each element in `a`, return a list of the words in the - string, using `sep` as the delimiter string. - - Calls `str.rsplit` element-wise. - - Except for splitting from the right, `rsplit` - behaves like `split`. - - Parameters - ---------- - a : array_like of str or unicode - - sep : str or unicode, optional - If `sep` is not specified or `None`, any whitespace string - is a separator. - maxsplit : int, optional - If `maxsplit` is given, at most `maxsplit` splits are done, - the rightmost ones. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.rsplit, split - - """ - # This will return an array of lists of different sizes, so we - # leave it as an object array - return _vec_string( - a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) - -def rstrip(a, chars=None): - """ - For each element in `a`, return a copy with the trailing - characters removed. - - Calls `str.rstrip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a suffix; rather, all combinations of its values are - stripped. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rstrip - - Examples - -------- - >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c - array(['aAaAaA', 'abBABba'], - dtype='|S7') - >>> np.char.rstrip(c, 'a') - array(['aAaAaA', 'abBABb'], - dtype='|S7') - >>> np.char.rstrip(c, 'A') - array(['aAaAa', 'abBABba'], - dtype='|S7') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) - -def split(a, sep=None, maxsplit=None): - """ - For each element in `a`, return a list of the words in the - string, using `sep` as the delimiter string. - - Calls `str.rsplit` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sep : str or unicode, optional - If `sep` is not specified or `None`, any whitespace string is a - separator. - - maxsplit : int, optional - If `maxsplit` is given, at most `maxsplit` splits are done. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.split, rsplit - - """ - # This will return an array of lists of different sizes, so we - # leave it as an object array - return _vec_string( - a, object_, 'split', [sep] + _clean_args(maxsplit)) - -def splitlines(a, keepends=None): - """ - For each element in `a`, return a list of the lines in the - element, breaking at line boundaries. - - Calls `str.splitlines` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - keepends : bool, optional - Line breaks are not included in the resulting list unless - keepends is given and true. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.splitlines - - """ - return _vec_string( - a, object_, 'splitlines', _clean_args(keepends)) - -def startswith(a, prefix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `a` starts with `prefix`, otherwise `False`. - - Calls `str.startswith` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - suffix : str - - start, end : int, optional - With optional `start`, test beginning at that position. With - optional `end`, stop comparing at that position. - - Returns - ------- - out : ndarray - Array of booleans - - See also - -------- - str.startswith - - """ - return _vec_string( - a, bool_, 'startswith', [prefix, start] + _clean_args(end)) - -def strip(a, chars=None): - """ - For each element in `a`, return a copy with the leading and - trailing characters removed. - - Calls `str.rstrip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a prefix or suffix; rather, all combinations of its - values are stripped. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.strip - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> np.char.strip(c) - array(['aAaAaA', 'aA', 'abBABba'], - dtype='|S7') - >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads - array(['AaAaA', ' aA ', 'bBABb'], - dtype='|S7') - >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails - array(['aAaAa', ' aA ', 'abBABba'], - dtype='|S7') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars)) - -def swapcase(a): - """ - For each element in `a`, return a copy of the string with - uppercase characters converted to lowercase and vice versa. - - Calls `str.swapcase` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array-like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.swapcase - - Examples - -------- - >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c - array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], - dtype='|S5') - >>> np.char.swapcase(c) - array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], - dtype='|S5') - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'swapcase') - -def title(a): - """ - For each element in `a`, return a titlecased version of the - string: words start with uppercase characters, all remaining cased - characters are lowercase. - - Calls `str.title` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array-like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.title - - Examples - -------- - >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c - array(['a1b c', '1b ca', 'b ca1', 'ca1b'], - dtype='|S5') - >>> np.char.title(c) - array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], - dtype='|S5') - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'title') - -def translate(a, table, deletechars=None): - """ - For each element in `a`, return a copy of the string where all - characters occurring in the optional argument `deletechars` are - removed, and the remaining characters have been mapped through the - given translation table. - - Calls `str.translate` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - table : str of length 256 - - deletechars : str - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.translate - - """ - a_arr = numpy.asarray(a) - if issubclass(a_arr.dtype.type, unicode_): - return _vec_string( - a_arr, a_arr.dtype, 'translate', (table,)) - else: - return _vec_string( - a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) - -def upper(a): - """ - Return an array with the elements of `a` converted to uppercase. - - Calls `str.upper` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array-like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.upper - - Examples - -------- - >>> c = np.array(['a1b c', '1bca', 'bca1']); c - array(['a1b c', '1bca', 'bca1'], - dtype='|S5') - >>> np.char.upper(c) - array(['A1B C', '1BCA', 'BCA1'], - dtype='|S5') - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'upper') - -def zfill(a, width): - """ - Return the numeric string left-filled with zeros in a string of - length `width`. - - Calls `str.zfill` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - width : int - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.zfill - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,)) - -def isnumeric(a): - """ - For each element in `a`, return True if there are only numeric - characters in the element. - - Calls `unicode.isnumeric` element-wise. - - Numeric characters include digit characters, and all characters - that have the Unicode numeric value property, e.g. ``U+2155, - VULGAR FRACTION ONE FIFTH``. - - Parameters - ---------- - a : array-like of unicode - - Returns - ------- - out : ndarray - Array of booleans - - See also - -------- - unicode.isnumeric - """ - if _use_unicode(a) != unicode_: - raise TypeError, "isnumeric is only available for Unicode strings and arrays" - return _vec_string(a, bool_, 'isnumeric') - -def isdecimal(a): - """ - For each element in `a`, return True if there are only decimal - characters in the element. - - Calls `unicode.isdecimal` element-wise. - - Decimal characters include digit characters, and all characters - that that can be used to form decimal-radix numbers, - e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``. - - Parameters - ---------- - a : array-like of unicode - - Returns - ------- - out : ndarray - Array of booleans - - See also - -------- - unicode.isdecimal - """ - if _use_unicode(a) != unicode_: - raise TypeError, "isnumeric is only available for Unicode strings and arrays" - return _vec_string(a, bool_, 'isdecimal') - - -class chararray(ndarray): - """ - chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0, - strides=None, order=None) - - Provides a convenient view on arrays of string and unicode values. - - .. note:: - The `chararray` class exists for backwards compatibility with - Numarray, it is not recommended for new development. Starting from numpy - 1.4, if one needs arrays of strings, it is recommended to use arrays of - `dtype` `object_`, `string_` or `unicode_`, and use the free functions - in the `numpy.char` module for fast vectorized string operations. - - Versus a regular Numpy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``) - - chararrays should be created using `numpy.char.array` or - `numpy.char.asarray`, rather than this constructor directly. - - This constructor creates the array, using `buffer` (with `offset` - and `strides`) if it is not ``None``. If `buffer` is ``None``, then - constructs a new array with `strides` in "C order", unless both - ``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides` - is in "Fortran order". - - Methods - ------- - astype - argsort - copy - count - decode - dump - dumps - encode - endswith - expandtabs - fill - find - flatten - getfield - index - isalnum - isalpha - isdecimal - isdigit - islower - isnumeric - isspace - istitle - isupper - item - join - ljust - lower - lstrip - nonzero - put - ravel - repeat - replace - reshape - resize - rfind - rindex - rjust - rsplit - rstrip - searchsorted - setfield - setflags - sort - split - splitlines - squeeze - startswith - strip - swapaxes - swapcase - take - title - tofile - tolist - tostring - translate - transpose - upper - view - zfill - - Parameters - ---------- - shape : tuple - Shape of the array. - itemsize : int, optional - Length of each array element, in number of characters. Default is 1. - unicode : bool, optional - Are the array elements of type unicode (True) or string (False). - Default is False. - buffer : int, optional - Memory address of the start of the array data. Default is None, - in which case a new array is created. - offset : int, optional - Fixed stride displacement from the beginning of an axis? - Default is 0. Needs to be >=0. - strides : array_like of ints, optional - Strides for the array (see `ndarray.strides` for full description). - Default is None. - order : {'C', 'F'}, optional - The order in which the array data is stored in memory: 'C' -> - "row major" order (the default), 'F' -> "column major" - (Fortran) order. - - Examples - -------- - >>> charar = np.chararray((3, 3)) - >>> charar[:] = 'a' - >>> charar - chararray([['a', 'a', 'a'], - ['a', 'a', 'a'], - ['a', 'a', 'a']], - dtype='|S1') - - >>> charar = np.chararray(charar.shape, itemsize=5) - >>> charar[:] = 'abc' - >>> charar - chararray([['abc', 'abc', 'abc'], - ['abc', 'abc', 'abc'], - ['abc', 'abc', 'abc']], - dtype='|S5') - - """ - def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, - offset=0, strides=None, order='C'): - global _globalvar - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - # force itemsize to be a Python long, since using Numpy integer - # types results in itemsize.itemsize being used as the size of - # strings in the new array. - itemsize = long(itemsize) - - if sys.version_info[0] >= 3 and isinstance(buffer, _unicode): - # On Py3, unicode objects do not have the buffer interface - filler = buffer - buffer = None - else: - filler = None - - _globalvar = 1 - if buffer is None: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - order=order) - else: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - buffer=buffer, - offset=offset, strides=strides, - order=order) - if filler is not None: - self[...] = filler - _globalvar = 0 - return self - - def __array_finalize__(self, obj): - # The b is a special case because it is used for reconstructing. - if not _globalvar and self.dtype.char not in 'SUbc': - raise ValueError, "Can only create a chararray from string data." - - def __getitem__(self, obj): - val = ndarray.__getitem__(self, obj) - if issubclass(val.dtype.type, character) and not _len(val) == 0: - temp = val.rstrip() - if _len(temp) == 0: - val = '' - else: - val = temp - return val - - # IMPLEMENTATION NOTE: Most of the methods of this class are - # direct delegations to the free functions in this module. - # However, those that return an array of strings should instead - # return a chararray, so some extra wrapping is required. - - def __eq__(self, other): - """ - Return (self == other) element-wise. - - See also - -------- - equal - """ - return equal(self, other) - - def __ne__(self, other): - """ - Return (self != other) element-wise. - - See also - -------- - not_equal - """ - return not_equal(self, other) - - def __ge__(self, other): - """ - Return (self >= other) element-wise. - - See also - -------- - greater_equal - """ - return greater_equal(self, other) - - def __le__(self, other): - """ - Return (self <= other) element-wise. - - See also - -------- - less_equal - """ - return less_equal(self, other) - - def __gt__(self, other): - """ - Return (self > other) element-wise. - - See also - -------- - greater - """ - return greater(self, other) - - def __lt__(self, other): - """ - Return (self < other) element-wise. - - See also - -------- - less - """ - return less(self, other) - - def __add__(self, other): - """ - Return (self + other), that is string concatenation, - element-wise for a pair of array_likes of str or unicode. - - See also - -------- - add - """ - return asarray(add(self, other)) - - def __radd__(self, other): - """ - Return (other + self), that is string concatenation, - element-wise for a pair of array_likes of `string_` or `unicode_`. - - See also - -------- - add - """ - return asarray(add(numpy.asarray(other), self)) - - def __mul__(self, i): - """ - Return (self * i), that is string multiple concatenation, - element-wise. - - See also - -------- - multiply - """ - return asarray(multiply(self, i)) - - def __rmul__(self, i): - """ - Return (self * i), that is string multiple concatenation, - element-wise. - - See also - -------- - multiply - """ - return asarray(multiply(self, i)) - - def __mod__(self, i): - """ - Return (self % i), that is pre-Python 2.6 string formatting - (iterpolation), element-wise for a pair of array_likes of `string_` - or `unicode_`. - - See also - -------- - mod - """ - return asarray(mod(self, i)) - - def __rmod__(self, other): - return NotImplemented - - def argsort(self, axis=-1, kind='quicksort', order=None): - """ - Return the indices that sort the array lexicographically. - - For full documentation see `numpy.argsort`, for which this method is - in fact merely a "thin wrapper." - - Examples - -------- - >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') - >>> c = c.view(np.chararray); c - chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], - dtype='|S5') - >>> c[c.argsort()] - chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], - dtype='|S5') - - """ - return self.__array__().argsort(axis, kind, order) - argsort.__doc__ = ndarray.argsort.__doc__ - - def capitalize(self): - """ - Return a copy of `self` with only the first character of each element - capitalized. - - See also - -------- - char.capitalize - - """ - return asarray(capitalize(self)) - - if sys.version_info >= (2, 4): - def center(self, width, fillchar=' '): - """ - Return a copy of `self` with its elements centered in a - string of length `width`. - - See also - -------- - center - """ - return asarray(center(self, width, fillchar)) - else: - def center(self, width): - """ - Return a copy of `self` with its elements centered in a - string of length `width`. - - See also - -------- - center - """ - return asarray(center(self, width)) - - def count(self, sub, start=0, end=None): - """ - Returns an array with the number of non-overlapping occurrences of - substring `sub` in the range [`start`, `end`]. - - See also - -------- - char.count - - """ - return count(self, sub, start, end) - - - def decode(self, encoding=None, errors=None): - """ - Calls `str.decode` element-wise. - - See also - -------- - char.decode - - """ - return decode(self, encoding, errors) - - def encode(self, encoding=None, errors=None): - """ - Calls `str.encode` element-wise. - - See also - -------- - char.encode - - """ - return encode(self, encoding, errors) - - def endswith(self, suffix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `self` ends with `suffix`, otherwise `False`. - - See also - -------- - char.endswith - - """ - return endswith(self, suffix, start, end) - - def expandtabs(self, tabsize=8): - """ - Return a copy of each string element where all tab characters are - replaced by one or more spaces. - - See also - -------- - char.expandtabs - - """ - return asarray(expandtabs(self, tabsize)) - - def find(self, sub, start=0, end=None): - """ - For each element, return the lowest index in the string where - substring `sub` is found. - - See also - -------- - char.find - - """ - return find(self, sub, start, end) - - def index(self, sub, start=0, end=None): - """ - Like `find`, but raises `ValueError` when the substring is not found. - - See also - -------- - char.index - - """ - return index(self, sub, start, end) - - def isalnum(self): - """ - Returns true for each element if all characters in the string - are alphanumeric and there is at least one character, false - otherwise. - - See also - -------- - char.isalnum - - """ - return isalnum(self) - - def isalpha(self): - """ - Returns true for each element if all characters in the string - are alphabetic and there is at least one character, false - otherwise. - - See also - -------- - char.isalpha - - """ - return isalpha(self) - - def isdigit(self): - """ - Returns true for each element if all characters in the string are - digits and there is at least one character, false otherwise. - - See also - -------- - char.isdigit - - """ - return isdigit(self) - - def islower(self): - """ - Returns true for each element if all cased characters in the - string are lowercase and there is at least one cased character, - false otherwise. - - See also - -------- - char.islower - - """ - return islower(self) - - def isspace(self): - """ - Returns true for each element if there are only whitespace - characters in the string and there is at least one character, - false otherwise. - - See also - -------- - char.isspace - - """ - return isspace(self) - - def istitle(self): - """ - Returns true for each element if the element is a titlecased - string and there is at least one character, false otherwise. - - See also - -------- - char.istitle - - """ - return istitle(self) - - def isupper(self): - """ - Returns true for each element if all cased characters in the - string are uppercase and there is at least one character, false - otherwise. - - See also - -------- - char.isupper - - """ - return isupper(self) - - def join(self, seq): - """ - Return a string which is the concatenation of the strings in the - sequence `seq`. - - See also - -------- - char.join - - """ - return join(self, seq) - - if sys.version_info >= (2, 4): - def ljust(self, width, fillchar=' '): - """ - Return an array with the elements of `self` left-justified in a - string of length `width`. - - See also - -------- - char.ljust - - """ - return asarray(ljust(self, width, fillchar)) - else: - def ljust(self, width): - """ - Return an array with the elements of `self` left-justified in a - string of length `width`. - - See also - -------- - ljust - """ - return asarray(ljust(self, width)) - - def lower(self): - """ - Return an array with the elements of `self` converted to - lowercase. - - See also - -------- - char.lower - - """ - return asarray(lower(self)) - - def lstrip(self, chars=None): - """ - For each element in `self`, return a copy with the leading characters - removed. - - See also - -------- - char.lstrip - - """ - return asarray(lstrip(self, chars)) - - if sys.version_info >= (2, 5): - def partition(self, sep): - """ - Partition each element in `self` around `sep`. - - See also - -------- - partition - """ - return asarray(partition(self, sep)) - - def replace(self, old, new, count=None): - """ - For each element in `self`, return a copy of the string with all - occurrences of substring `old` replaced by `new`. - - See also - -------- - char.replace - - """ - return asarray(replace(self, old, new, count)) - - def rfind(self, sub, start=0, end=None): - """ - For each element in `self`, return the highest index in the string - where substring `sub` is found, such that `sub` is contained - within [`start`, `end`]. - - See also - -------- - char.rfind - - """ - return rfind(self, sub, start, end) - - def rindex(self, sub, start=0, end=None): - """ - Like `rfind`, but raises `ValueError` when the substring `sub` is - not found. - - See also - -------- - char.rindex - - """ - return rindex(self, sub, start, end) - - if sys.version_info >= (2, 4): - def rjust(self, width, fillchar=' '): - """ - Return an array with the elements of `self` - right-justified in a string of length `width`. - - See also - -------- - char.rjust - - """ - return asarray(rjust(self, width, fillchar)) - else: - def rjust(self, width): - """ - Return an array with the elements of `self` - right-justified in a string of length `width`. - - See also - -------- - rjust - """ - return asarray(rjust(self, width)) - - if sys.version_info >= (2, 5): - def rpartition(self, sep): - """ - Partition each element in `self` around `sep`. - - See also - -------- - rpartition - """ - return asarray(rpartition(self, sep)) - - if sys.version_info >= (2, 4): - def rsplit(self, sep=None, maxsplit=None): - """ - For each element in `self`, return a list of the words in - the string, using `sep` as the delimiter string. - - See also - -------- - char.rsplit - - """ - return rsplit(self, sep, maxsplit) - - def rstrip(self, chars=None): - """ - For each element in `self`, return a copy with the trailing - characters removed. - - See also - -------- - char.rstrip - - """ - return asarray(rstrip(self, chars)) - - def split(self, sep=None, maxsplit=None): - """ - For each element in `self`, return a list of the words in the - string, using `sep` as the delimiter string. - - See also - -------- - char.split - - """ - return split(self, sep, maxsplit) - - def splitlines(self, keepends=None): - """ - For each element in `self`, return a list of the lines in the - element, breaking at line boundaries. - - See also - -------- - char.splitlines - - """ - return splitlines(self, keepends) - - def startswith(self, prefix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `self` starts with `prefix`, otherwise `False`. - - See also - -------- - char.startswith - - """ - return startswith(self, prefix, start, end) - - def strip(self, chars=None): - """ - For each element in `self`, return a copy with the leading and - trailing characters removed. - - See also - -------- - char.strip - - """ - return asarray(strip(self, chars)) - - def swapcase(self): - """ - For each element in `self`, return a copy of the string with - uppercase characters converted to lowercase and vice versa. - - See also - -------- - char.swapcase - - """ - return asarray(swapcase(self)) - - def title(self): - """ - For each element in `self`, return a titlecased version of the - string: words start with uppercase characters, all remaining cased - characters are lowercase. - - See also - -------- - char.title - - """ - return asarray(title(self)) - - def translate(self, table, deletechars=None): - """ - For each element in `self`, return a copy of the string where - all characters occurring in the optional argument - `deletechars` are removed, and the remaining characters have - been mapped through the given translation table. - - See also - -------- - char.translate - - """ - return asarray(translate(self, table, deletechars)) - - def upper(self): - """ - Return an array with the elements of `self` converted to - uppercase. - - See also - -------- - char.upper - - """ - return asarray(upper(self)) - - def zfill(self, width): - """ - Return the numeric string left-filled with zeros in a string of - length `width`. - - See also - -------- - char.zfill - - """ - return asarray(zfill(self, width)) - - def isnumeric(self): - """ - For each element in `self`, return True if there are only - numeric characters in the element. - - See also - -------- - char.isnumeric - - """ - return isnumeric(self) - - def isdecimal(self): - """ - For each element in `self`, return True if there are only - decimal characters in the element. - - See also - -------- - char.isdecimal - - """ - return isdecimal(self) - - -def array(obj, itemsize=None, copy=True, unicode=None, order=None): - """ - Create a `chararray`. - - .. note:: - This class is provided for numarray backward-compatibility. - New code (not concerned with numarray compatibility) should use - arrays of type `string_` or `unicode_` and use the free functions - in :mod:`numpy.char ` for fast - vectorized string operations instead. - - Versus a regular Numpy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) - - Parameters - ---------- - obj : array of str or unicode-like - - itemsize : int, optional - `itemsize` is the number of characters per scalar in the - resulting array. If `itemsize` is None, and `obj` is an - object array or a Python list, the `itemsize` will be - automatically determined. If `itemsize` is provided and `obj` - is of type str or unicode, then the `obj` string will be - chunked into `itemsize` pieces. - - copy : bool, optional - If true (default), then the object is copied. Otherwise, a copy - will only be made if __array__ returns a copy, if obj is a - nested sequence, or if a copy is needed to satisfy any of the other - requirements (`itemsize`, unicode, `order`, etc.). - - unicode : bool, optional - When true, the resulting `chararray` can contain Unicode - characters, when false only 8-bit characters. If unicode is - `None` and `obj` is one of the following: - - - a `chararray`, - - an ndarray of type `str` or `unicode` - - a Python str or unicode object, - - then the unicode setting of the output array will be - automatically determined. - - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). If order is 'A', then the returned array may - be in any order (either C-, Fortran-contiguous, or even - discontiguous). - """ - if isinstance(obj, (_bytes, _unicode)): - if unicode is None: - if isinstance(obj, _unicode): - unicode = True - else: - unicode = False - - if itemsize is None: - itemsize = _len(obj) - shape = _len(obj) // itemsize - - if unicode: - if sys.maxunicode == 0xffff: - # On a narrow Python build, the buffer for Unicode - # strings is UCS2, which doesn't match the buffer for - # Numpy Unicode types, which is ALWAYS UCS4. - # Therefore, we need to convert the buffer. On Python - # 2.6 and later, we can use the utf_32 codec. Earlier - # versions don't have that codec, so we convert to a - # numerical array that matches the input buffer, and - # then use Numpy to convert it to UCS4. All of this - # should happen in native endianness. - if sys.hexversion >= 0x2060000: - obj = obj.encode('utf_32') - else: - if isinstance(obj, str): - ascii = numpy.frombuffer(obj, 'u1') - ucs4 = numpy.array(ascii, 'u4') - obj = ucs4.data - else: - ucs2 = numpy.frombuffer(obj, 'u2') - ucs4 = numpy.array(ucs2, 'u4') - obj = ucs4.data - else: - obj = _unicode(obj) - else: - # Let the default Unicode -> string encoding (if any) take - # precedence. - obj = _bytes(obj) - - return chararray(shape, itemsize=itemsize, unicode=unicode, - buffer=obj, order=order) - - if isinstance(obj, (list, tuple)): - obj = numpy.asarray(obj) - - if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): - # If we just have a vanilla chararray, create a chararray - # view around it. - if not isinstance(obj, chararray): - obj = obj.view(chararray) - - if itemsize is None: - itemsize = obj.itemsize - # itemsize is in 8-bit chars, so for Unicode, we need - # to divide by the size of a single Unicode character, - # which for Numpy is always 4 - if issubclass(obj.dtype.type, unicode_): - itemsize /= 4 - - if unicode is None: - if issubclass(obj.dtype.type, unicode_): - unicode = True - else: - unicode = False - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - if order is not None: - obj = numpy.asarray(obj, order=order) - if (copy - or (itemsize != obj.itemsize) - or (not unicode and isinstance(obj, unicode_)) - or (unicode and isinstance(obj, string_))): - obj = obj.astype((dtype, long(itemsize))) - return obj - - if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): - if itemsize is None: - # Since no itemsize was specified, convert the input array to - # a list so the ndarray constructor will automatically - # determine the itemsize for us. - obj = obj.tolist() - # Fall through to the default case - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - if itemsize is None: - val = narray(obj, dtype=dtype, order=order, subok=True) - else: - val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) - return val.view(chararray) - - -def asarray(obj, itemsize=None, unicode=None, order=None): - """ - Convert the input to a `chararray`, copying the data only if - necessary. - - Versus a regular Numpy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `str.endswith`) and infix operators (e.g. +, *, %) - - Parameters - ---------- - obj : array of str or unicode-like - - itemsize : int, optional - `itemsize` is the number of characters per scalar in the - resulting array. If `itemsize` is None, and `obj` is an - object array or a Python list, the `itemsize` will be - automatically determined. If `itemsize` is provided and `obj` - is of type str or unicode, then the `obj` string will be - chunked into `itemsize` pieces. - - unicode : bool, optional - When true, the resulting `chararray` can contain Unicode - characters, when false only 8-bit characters. If unicode is - `None` and `obj` is one of the following: - - - a `chararray`, - - an ndarray of type `str` or 'unicode` - - a Python str or unicode object, - - then the unicode setting of the output array will be - automatically determined. - - order : {'C', 'F'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). - """ - return array(obj, itemsize, copy=False, - unicode=unicode, order=order) diff --git a/numpy-1.6.2/numpy/core/fromnumeric.py b/numpy-1.6.2/numpy/core/fromnumeric.py deleted file mode 100644 index 602c0ebc52..0000000000 --- a/numpy-1.6.2/numpy/core/fromnumeric.py +++ /dev/null @@ -1,2562 +0,0 @@ -# Module containing non-deprecated functions borrowed from Numeric. -__docformat__ = "restructuredtext en" - -# functions that are now methods -__all__ = ['take', 'reshape', 'choose', 'repeat', 'put', - 'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin', - 'searchsorted', 'alen', - 'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape', - 'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue', - 'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim', - 'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze', - 'amax', 'amin', - ] - -import multiarray as mu -import umath as um -import numerictypes as nt -from numeric import asarray, array, asanyarray, concatenate -_dt_ = nt.sctype2char - -import types - -try: - _gentype = types.GeneratorType -except AttributeError: - _gentype = types.NoneType - -# save away Python sum -_sum_ = sum - -# functions that are now methods -def _wrapit(obj, method, *args, **kwds): - try: - wrap = obj.__array_wrap__ - except AttributeError: - wrap = None - result = getattr(asarray(obj),method)(*args, **kwds) - if wrap: - if not isinstance(result, mu.ndarray): - result = asarray(result) - result = wrap(result) - return result - - -def take(a, indices, axis=None, out=None, mode='raise'): - """ - Take elements from an array along an axis. - - This function does the same thing as "fancy" indexing (indexing arrays - using arrays); however, it can be easier to use if you need elements - along a given axis. - - Parameters - ---------- - a : array_like - The source array. - indices : array_like - The indices of the values to extract. - axis : int, optional - The axis over which to select values. By default, the flattened - input array is used. - out : ndarray, optional - If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - Returns - ------- - subarray : ndarray - The returned array has the same type as `a`. - - See Also - -------- - ndarray.take : equivalent method - - Examples - -------- - >>> a = [4, 3, 5, 7, 6, 8] - >>> indices = [0, 1, 4] - >>> np.take(a, indices) - array([4, 3, 6]) - - In this example if `a` is an ndarray, "fancy" indexing can be used. - - >>> a = np.array(a) - >>> a[indices] - array([4, 3, 6]) - - """ - try: - take = a.take - except AttributeError: - return _wrapit(a, 'take', indices, axis, out, mode) - return take(indices, axis, out, mode) - - -# not deprecated --- copy if necessary, view otherwise -def reshape(a, newshape, order='C'): - """ - Gives a new shape to an array without changing its data. - - Parameters - ---------- - a : array_like - Array to be reshaped. - newshape : int or tuple of ints - The new shape should be compatible with the original shape. If - an integer, then the result will be a 1-D array of that length. - One shape dimension can be -1. In this case, the value is inferred - from the length of the array and remaining dimensions. - order : {'C', 'F', 'A'}, optional - Determines whether the array data should be viewed as in C - (row-major) order, FORTRAN (column-major) order, or the C/FORTRAN - order should be preserved. - - Returns - ------- - reshaped_array : ndarray - This will be a new view object if possible; otherwise, it will - be a copy. - - - See Also - -------- - ndarray.reshape : Equivalent method. - - Notes - ----- - - It is not always possible to change the shape of an array without - copying the data. If you want an error to be raise if the data is copied, - you should assign the new shape to the shape attribute of the array:: - - >>> a = np.zeros((10, 2)) - # A transpose make the array non-contiguous - >>> b = a.T - # Taking a view makes it possible to modify the shape without modiying the - # initial object. - >>> c = b.view() - >>> c.shape = (20) - AttributeError: incompatible shape for a non-contiguous array - - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> np.reshape(a, 6) - array([1, 2, 3, 4, 5, 6]) - >>> np.reshape(a, 6, order='F') - array([1, 4, 2, 5, 3, 6]) - - >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 - array([[1, 2], - [3, 4], - [5, 6]]) - - """ - try: - reshape = a.reshape - except AttributeError: - return _wrapit(a, 'reshape', newshape, order=order) - return reshape(newshape, order=order) - - -def choose(a, choices, out=None, mode='raise'): - """ - Construct an array from an index array and a set of arrays to choose from. - - First of all, if confused or uncertain, definitely look at the Examples - - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): - - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. - - But this omits some subtleties. Here is a fully general summary: - - Given an "index" array (`a`) of integers and a sequence of `n` arrays - (`choices`), `a` and each choice array are first broadcast, as necessary, - to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = - 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as - follows: - - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; - - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) - integer; modular arithmetic is used to map integers outside the range - `[0, n-1]` back into that range; and then the new array is constructed - as above; - - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. - - Parameters - ---------- - a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. - choices : sequence of arrays - Choice arrays. `a` and all of the choices must be broadcastable to the - same shape. If `choices` is itself an array (not recommended), then - its outermost dimension (i.e., the one corresponding to - ``choices.shape[0]``) is taken as defining the "sequence". - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: - - * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` - * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 - - Returns - ------- - merged_array : array - The merged result. - - Raises - ------ - ValueError: shape mismatch - If `a` and each choice array are not all broadcastable to the same - shape. - - See Also - -------- - ndarray.choose : equivalent method - - Notes - ----- - To reduce the chance of misinterpretation, even though the following - "abuse" is nominally supported, `choices` should neither be, nor be - thought of as, a single array, i.e., the outermost sequence-like container - should be either a list or a tuple. - - Examples - -------- - - >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], - ... [20, 21, 22, 23], [30, 31, 32, 33]] - >>> np.choose([2, 3, 1, 0], choices - ... # the first element of the result will be the first element of the - ... # third (2+1) "array" in choices, namely, 20; the second element - ... # will be the second element of the fourth (3+1) choice array, i.e., - ... # 31, etc. - ... ) - array([20, 31, 12, 3]) - >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) - array([20, 31, 12, 3]) - >>> # because there are 4 choice arrays - >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) - array([20, 1, 12, 3]) - >>> # i.e., 0 - - A couple examples illustrating how choose broadcasts: - - >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] - >>> choices = [-10, 10] - >>> np.choose(a, choices) - array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]]) - - >>> # With thanks to Anne Archibald - >>> a = np.array([0, 1]).reshape((2,1,1)) - >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) - >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) - >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 - array([[[ 1, 1, 1, 1, 1], - [ 2, 2, 2, 2, 2], - [ 3, 3, 3, 3, 3]], - [[-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5]]]) - - """ - try: - choose = a.choose - except AttributeError: - return _wrapit(a, 'choose', choices, out=out, mode=mode) - return choose(choices, out=out, mode=mode) - - -def repeat(a, repeats, axis=None): - """ - Repeat elements of an array. - - Parameters - ---------- - a : array_like - Input array. - repeats : {int, array of ints} - The number of repetitions for each element. `repeats` is broadcasted - to fit the shape of the given axis. - axis : int, optional - The axis along which to repeat values. By default, use the - flattened input array, and return a flat output array. - - Returns - ------- - repeated_array : ndarray - Output array which has the same shape as `a`, except along - the given axis. - - See Also - -------- - tile : Tile an array. - - Examples - -------- - >>> x = np.array([[1,2],[3,4]]) - >>> np.repeat(x, 2) - array([1, 1, 2, 2, 3, 3, 4, 4]) - >>> np.repeat(x, 3, axis=1) - array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]]) - >>> np.repeat(x, [1, 2], axis=0) - array([[1, 2], - [3, 4], - [3, 4]]) - - """ - try: - repeat = a.repeat - except AttributeError: - return _wrapit(a, 'repeat', repeats, axis) - return repeat(repeats, axis) - - -def put(a, ind, v, mode='raise'): - """ - Replaces specified elements of an array with given values. - - The indexing works on the flattened target array. `put` is roughly - equivalent to: - - :: - - a.flat[ind] = v - - Parameters - ---------- - a : ndarray - Target array. - ind : array_like - Target indices, interpreted as integers. - v : array_like - Values to place in `a` at target indices. If `v` is shorter than - `ind` it will be repeated as necessary. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - See Also - -------- - putmask, place - - Examples - -------- - >>> a = np.arange(5) - >>> np.put(a, [0, 2], [-44, -55]) - >>> a - array([-44, 1, -55, 3, 4]) - - >>> a = np.arange(5) - >>> np.put(a, 22, -5, mode='clip') - >>> a - array([ 0, 1, 2, 3, -5]) - - """ - return a.put(ind, v, mode) - - -def swapaxes(a, axis1, axis2): - """ - Interchange two axes of an array. - - Parameters - ---------- - a : array_like - Input array. - axis1 : int - First axis. - axis2 : int - Second axis. - - Returns - ------- - a_swapped : ndarray - If `a` is an ndarray, then a view of `a` is returned; otherwise - a new array is created. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> np.swapaxes(x,0,1) - array([[1], - [2], - [3]]) - - >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) - >>> x - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - - >>> np.swapaxes(x,0,2) - array([[[0, 4], - [2, 6]], - [[1, 5], - [3, 7]]]) - - """ - try: - swapaxes = a.swapaxes - except AttributeError: - return _wrapit(a, 'swapaxes', axis1, axis2) - return swapaxes(axis1, axis2) - - -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. - axes : list of ints, optional - By default, reverse the dimensions, otherwise permute the axes - according to the values given. - - Returns - ------- - p : ndarray - `a` with its axes permuted. A view is returned whenever - possible. - - See Also - -------- - rollaxis - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.transpose(x) - array([[0, 2], - [1, 3]]) - - >>> x = np.ones((1, 2, 3)) - >>> np.transpose(x, (1, 0, 2)).shape - (2, 1, 3) - - """ - try: - transpose = a.transpose - except AttributeError: - return _wrapit(a, 'transpose', axes) - return transpose(axes) - - -def sort(a, axis=-1, kind='quicksort', order=None): - """ - Return a sorted copy of an array. - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. Default is 'quicksort'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - - Notes - ----- - The various sorting algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative - order. The three available algorithms have the following - properties: - - =========== ======= ============= ============ ======= - kind speed worst case work space stable - =========== ======= ============= ============ ======= - 'quicksort' 1 O(n^2) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'heapsort' 3 O(n*log(n)) 0 no - =========== ======= ============= ============ ======= - - All the sort algorithms make temporary copies of the data when - sorting along any but the last axis. Consequently, sorting along - the last axis is faster and uses less space than sorting along - any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Previous to numpy 1.4.0 sorting real and complex arrays containing nan - values led to undefined behaviour. In numpy versions >= 1.4.0 nan - values are sorted to the end. The extended sort order is: - - * Real: [R, nan] - * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] - - where R is a non-nan real value. Complex values with the same nan - placements are sorted according to the non-nan part if it exists. - Non-nan values are sorted as before. - - Examples - -------- - >>> a = np.array([[1,4],[3,1]]) - >>> np.sort(a) # sort along the last axis - array([[1, 4], - [1, 3]]) - >>> np.sort(a, axis=None) # sort the flattened array - array([1, 1, 3, 4]) - >>> np.sort(a, axis=0) # sort along the first axis - array([[1, 1], - [3, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] - >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), - ... ('Galahad', 1.7, 38)] - >>> a = np.array(values, dtype=dtype) # create a structured array - >>> np.sort(a, order='height') # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.8999999999999999, 38)], - dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), - ('Arthur', 1.8, 41)], - dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) - >>> np.argsort(x) - array([1, 2, 0]) - - Two-dimensional array: - - >>> x = np.array([[0, 3], [2, 2]]) - >>> x - array([[0, 3], - [2, 2]]) - - >>> np.argsort(x, axis=0) - array([[0, 1], - [1, 0]]) - - >>> np.argsort(x, axis=1) - array([[0, 1], - [0, 1]]) - - Sorting with keys: - - >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x - array([(1, 0), (0, 1)], - dtype=[('x', '>> np.argsort(x, order=('x','y')) - array([1, 0]) - - >>> np.argsort(x, order=('y','x')) - array([0, 1]) - - """ - try: - argsort = a.argsort - except AttributeError: - return _wrapit(a, 'argsort', axis, kind, order) - return argsort(axis, kind, order) - - -def argmax(a, axis=None): - """ - Indices of the maximum values along an axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. - - Returns - ------- - index_array : ndarray of ints - Array of indices into the array. It has the same shape as `a.shape` - with the dimension along `axis` removed. - - See Also - -------- - ndarray.argmax, argmin - amax : The maximum value along a given axis. - unravel_index : Convert a flat index into an index tuple. - - Notes - ----- - In case of multiple occurrences of the maximum values, the indices - corresponding to the first occurrence are returned. - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.argmax(a) - 5 - >>> np.argmax(a, axis=0) - array([1, 1, 1]) - >>> np.argmax(a, axis=1) - array([2, 2]) - - >>> b = np.arange(6) - >>> b[1] = 5 - >>> b - array([0, 5, 2, 3, 4, 5]) - >>> np.argmax(b) # Only the first occurrence is returned. - 1 - - """ - try: - argmax = a.argmax - except AttributeError: - return _wrapit(a, 'argmax', axis) - return argmax(axis) - - -def argmin(a, axis=None): - """ - Return the indices of the minimum values along an axis. - - See Also - -------- - argmax : Similar function. Please refer to `numpy.argmax` for detailed - documentation. - - """ - try: - argmin = a.argmin - except AttributeError: - return _wrapit(a, 'argmin', axis) - return argmin(axis) - - -def searchsorted(a, v, side='left'): - """ - Find indices where elements should be inserted to maintain order. - - Find the indices into a sorted array `a` such that, if the corresponding - elements in `v` were inserted before the indices, the order of `a` would - be preserved. - - Parameters - ---------- - a : 1-D array_like - Input array, sorted in ascending order. - v : array_like - Values to insert into `a`. - side : {'left', 'right'}, optional - If 'left', the index of the first suitable location found is given. If - 'right', return the last such index. If there is no suitable - index, return either 0 or N (where N is the length of `a`). - - Returns - ------- - indices : array of ints - Array of insertion points with the same shape as `v`. - - See Also - -------- - sort : Return a sorted copy of an array. - histogram : Produce histogram from 1-D data. - - Notes - ----- - Binary search is used to find the required insertion points. - - As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing - `nan` values. The enhanced sort order is documented in `sort`. - - Examples - -------- - >>> np.searchsorted([1,2,3,4,5], 3) - 2 - >>> np.searchsorted([1,2,3,4,5], 3, side='right') - 3 - >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) - array([0, 5, 1, 2]) - - """ - try: - searchsorted = a.searchsorted - except AttributeError: - return _wrapit(a, 'searchsorted', v, side) - return searchsorted(v, side) - - -def resize(a, new_shape): - """ - Return a new array with the specified shape. - - If the new array is larger than the original array, then the new - array is filled with repeated copies of `a`. Note that this behavior - is different from a.resize(new_shape) which fills with zeros instead - of repeated copies of `a`. - - Parameters - ---------- - a : array_like - Array to be resized. - - new_shape : int or tuple of int - Shape of resized array. - - Returns - ------- - reshaped_array : ndarray - The new array is formed from the data in the old array, repeated - if necessary to fill out the required number of elements. The - data are repeated in the order that they are stored in memory. - - See Also - -------- - ndarray.resize : resize an array in-place. - - Examples - -------- - >>> a=np.array([[0,1],[2,3]]) - >>> np.resize(a,(1,4)) - array([[0, 1, 2, 3]]) - >>> np.resize(a,(2,4)) - array([[0, 1, 2, 3], - [0, 1, 2, 3]]) - - """ - if isinstance(new_shape, (int, nt.integer)): - new_shape = (new_shape,) - a = ravel(a) - Na = len(a) - if not Na: return mu.zeros(new_shape, a.dtype.char) - total_size = um.multiply.reduce(new_shape) - n_copies = int(total_size / Na) - extra = total_size % Na - - if total_size == 0: - return a[:0] - - if extra != 0: - n_copies = n_copies+1 - extra = Na-extra - - a = concatenate( (a,)*n_copies) - if extra > 0: - a = a[:-extra] - - return reshape(a, new_shape) - - -def squeeze(a): - """ - Remove single-dimensional entries from the shape of an array. - - Parameters - ---------- - a : array_like - Input data. - - Returns - ------- - squeezed : ndarray - The input array, but with with all dimensions of length 1 - removed. Whenever possible, a view on `a` is returned. - - Examples - -------- - >>> x = np.array([[[0], [1], [2]]]) - >>> x.shape - (1, 3, 1) - >>> np.squeeze(x).shape - (3,) - - """ - try: - squeeze = a.squeeze - except AttributeError: - return _wrapit(a, 'squeeze') - return squeeze() - - -def diagonal(a, offset=0, axis1=0, axis2=1): - """ - Return specified diagonals. - - If `a` is 2-D, returns the diagonal of `a` with the given offset, - i.e., the collection of elements of the form ``a[i, i+offset]``. If - `a` has more than two dimensions, then the axes specified by `axis1` - and `axis2` are used to determine the 2-D sub-array whose diagonal is - returned. The shape of the resulting array can be determined by - removing `axis1` and `axis2` and appending an index to the right equal - to the size of the resulting diagonals. - - Parameters - ---------- - a : array_like - Array from which the diagonals are taken. - offset : int, optional - Offset of the diagonal from the main diagonal. Can be positive or - negative. Defaults to main diagonal (0). - axis1 : int, optional - Axis to be used as the first axis of the 2-D sub-arrays from which - the diagonals should be taken. Defaults to first axis (0). - axis2 : int, optional - Axis to be used as the second axis of the 2-D sub-arrays from - which the diagonals should be taken. Defaults to second axis (1). - - Returns - ------- - array_of_diagonals : ndarray - If `a` is 2-D, a 1-D array containing the diagonal is returned. - If the dimension of `a` is larger, then an array of diagonals is - returned, "packed" from left-most dimension to right-most (e.g., - if `a` is 3-D, then the diagonals are "packed" along rows). - - Raises - ------ - ValueError - If the dimension of `a` is less than 2. - - See Also - -------- - diag : MATLAB work-a-like for 1-D and 2-D arrays. - diagflat : Create diagonal arrays. - trace : Sum along diagonals. - - Examples - -------- - >>> a = np.arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> a.diagonal() - array([0, 3]) - >>> a.diagonal(1) - array([1]) - - A 3-D example: - - >>> a = np.arange(8).reshape(2,2,2); a - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> a.diagonal(0, # Main diagonals of two arrays created by skipping - ... 0, # across the outer(left)-most axis last and - ... 1) # the "middle" (row) axis first. - array([[0, 6], - [1, 7]]) - - The sub-arrays whose main diagonals we just obtained; note that each - corresponds to fixing the right-most (column) axis, and that the - diagonals are "packed" in rows. - - >>> a[:,:,0] # main diagonal is [0 6] - array([[0, 2], - [4, 6]]) - >>> a[:,:,1] # main diagonal is [1 7] - array([[1, 3], - [5, 7]]) - - """ - return asarray(a).diagonal(offset, axis1, axis2) - - -def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """ - Return the sum along diagonals of the array. - - If `a` is 2-D, the sum along its diagonal with the given offset - is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. - - If `a` has more than two dimensions, then the axes specified by axis1 and - axis2 are used to determine the 2-D sub-arrays whose traces are returned. - The shape of the resulting array is the same as that of `a` with `axis1` - and `axis2` removed. - - Parameters - ---------- - a : array_like - Input array, from which the diagonals are taken. - offset : int, optional - Offset of the diagonal from the main diagonal. Can be both positive - and negative. Defaults to 0. - axis1, axis2 : int, optional - Axes to be used as the first and second axis of the 2-D sub-arrays - from which the diagonals should be taken. Defaults are the first two - axes of `a`. - dtype : dtype, optional - Determines the data-type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and `a` is - of integer type of precision less than the default integer - precision, then the default integer precision is used. Otherwise, - the precision is the same as that of `a`. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and - it must be of the right shape to hold the output. - - Returns - ------- - sum_along_diagonals : ndarray - If `a` is 2-D, the sum along the diagonal is returned. If `a` has - larger dimensions, then an array of sums along diagonals is returned. - - See Also - -------- - diag, diagonal, diagflat - - Examples - -------- - >>> np.trace(np.eye(3)) - 3.0 - >>> a = np.arange(8).reshape((2,2,2)) - >>> np.trace(a) - array([6, 8]) - - >>> a = np.arange(24).reshape((2,2,2,3)) - >>> np.trace(a).shape - (2, 3) - - """ - return asarray(a).trace(offset, axis1, axis2, dtype, out) - -def ravel(a, order='C'): - """ - Return a flattened array. - - A 1-D array, containing the elements of the input, is returned. A copy is - made only if needed. - - Parameters - ---------- - a : array_like - Input array. The elements in ``a`` are read in the order specified by - `order`, and packed as a 1-D array. - order : {'C','F', 'A', 'K'}, optional - The elements of ``a`` are read in this order. 'C' means to view - the elements in C (row-major) order. 'F' means to view the elements - in Fortran (column-major) order. 'A' means to view the elements - in 'F' order if a is Fortran contiguous, 'C' order otherwise. - 'K' means to view the elements in the order they occur in memory, - except for reversing the data when strides are negative. - By default, 'C' order is used. - - Returns - ------- - 1d_array : ndarray - Output of the same dtype as `a`, and of shape ``(a.size(),)``. - - See Also - -------- - ndarray.flat : 1-D iterator over an array. - ndarray.flatten : 1-D array copy of the elements of an array - in row-major order. - - Notes - ----- - In row-major order, the row index varies the slowest, and the column - index the quickest. This can be generalized to multiple dimensions, - where row-major order implies that the index along the first axis - varies slowest, and the index along the last quickest. The opposite holds - for Fortran-, or column-major, mode. - - Examples - -------- - It is equivalent to ``reshape(-1, order=order)``. - - >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> print np.ravel(x) - [1 2 3 4 5 6] - - >>> print x.reshape(-1) - [1 2 3 4 5 6] - - >>> print np.ravel(x, order='F') - [1 4 2 5 3 6] - - When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: - - >>> print np.ravel(x.T) - [1 4 2 5 3 6] - >>> print np.ravel(x.T, order='A') - [1 2 3 4 5 6] - - When ``order`` is 'K', it will preserve orderings that are neither 'C' - nor 'F', but won't reverse axes: - - >>> a = np.arange(3)[::-1]; a - array([2, 1, 0]) - >>> a.ravel(order='C') - array([2, 1, 0]) - >>> a.ravel(order='K') - array([2, 1, 0]) - - >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a - array([[[ 0, 2, 4], - [ 1, 3, 5]], - [[ 6, 8, 10], - [ 7, 9, 11]]]) - >>> a.ravel(order='C') - array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) - >>> a.ravel(order='K') - array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - - """ - return asarray(a).ravel(order) - - -def nonzero(a): - """ - Return the indices of the elements that are non-zero. - - Returns a tuple of arrays, one for each dimension of `a`, containing - the indices of the non-zero elements in that dimension. The - corresponding non-zero values can be obtained with:: - - a[nonzero(a)] - - To group the indices by element, rather than dimension, use:: - - transpose(nonzero(a)) - - The result of this is always a 2-D array, with a row for - each non-zero element. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - tuple_of_arrays : tuple - Indices of elements that are non-zero. - - See Also - -------- - flatnonzero : - Return indices that are non-zero in the flattened version of the input - array. - ndarray.nonzero : - Equivalent ndarray method. - count_nonzero : - Counts the number of non-zero elements in the input array. - - Examples - -------- - >>> x = np.eye(3) - >>> x - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - >>> np.nonzero(x) - (array([0, 1, 2]), array([0, 1, 2])) - - >>> x[np.nonzero(x)] - array([ 1., 1., 1.]) - >>> np.transpose(np.nonzero(x)) - array([[0, 0], - [1, 1], - [2, 2]]) - - A common use for ``nonzero`` is to find the indices of an array, where - a condition is True. Given an array `a`, the condition `a` > 3 is a - boolean array and since False is interpreted as 0, np.nonzero(a > 3) - yields the indices of the `a` where the condition is true. - - >>> a = np.array([[1,2,3],[4,5,6],[7,8,9]]) - >>> a > 3 - array([[False, False, False], - [ True, True, True], - [ True, True, True]], dtype=bool) - >>> np.nonzero(a > 3) - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - The ``nonzero`` method of the boolean array can also be called. - - >>> (a > 3).nonzero() - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - """ - try: - nonzero = a.nonzero - except AttributeError: - res = _wrapit(a, 'nonzero') - else: - res = nonzero() - return res - - -def shape(a): - """ - Return the shape of an array. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - shape : tuple of ints - The elements of the shape tuple give the lengths of the - corresponding array dimensions. - - See Also - -------- - alen - ndarray.shape : Equivalent array method. - - Examples - -------- - >>> np.shape(np.eye(3)) - (3, 3) - >>> np.shape([[1, 2]]) - (1, 2) - >>> np.shape([0]) - (1,) - >>> np.shape(0) - () - - >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - >>> np.shape(a) - (2,) - >>> a.shape - (2,) - - """ - try: - result = a.shape - except AttributeError: - result = asarray(a).shape - return result - - -def compress(condition, a, axis=None, out=None): - """ - Return selected slices of an array along given axis. - - When working along a given axis, a slice along that axis is returned in - `output` for each index where `condition` evaluates to True. When - working on a 1-D array, `compress` is equivalent to `extract`. - - Parameters - ---------- - condition : 1-D array of bools - Array that selects which entries to return. If len(condition) - is less than the size of `a` along the given axis, then output is - truncated to the length of the condition array. - a : array_like - Array from which to extract a part. - axis : int, optional - Axis along which to take slices. If None (default), work on the - flattened array. - out : ndarray, optional - Output array. Its type is preserved and it must be of the right - shape to hold the output. - - Returns - ------- - compressed_array : ndarray - A copy of `a` without the slices along axis for which `condition` - is false. - - See Also - -------- - take, choose, diag, diagonal, select - ndarray.compress : Equivalent method. - numpy.doc.ufuncs : Section "Output arguments" - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4], [5, 6]]) - >>> a - array([[1, 2], - [3, 4], - [5, 6]]) - >>> np.compress([0, 1], a, axis=0) - array([[3, 4]]) - >>> np.compress([False, True, True], a, axis=0) - array([[3, 4], - [5, 6]]) - >>> np.compress([False, True], a, axis=1) - array([[2], - [4], - [6]]) - - Working on the flattened array does not return slices along an axis but - selects elements. - - >>> np.compress([False, True], a) - array([2]) - - """ - try: - compress = a.compress - except AttributeError: - return _wrapit(a, 'compress', condition, axis, out) - return compress(condition, axis, out) - - -def clip(a, a_min, a_max, out=None): - """ - Clip (limit) the values in an array. - - Given an interval, values outside the interval are clipped to - the interval edges. For example, if an interval of ``[0, 1]`` - is specified, values smaller than 0 become 0, and values larger - than 1 become 1. - - Parameters - ---------- - a : array_like - Array containing elements to clip. - a_min : scalar or array_like - Minimum value. - a_max : scalar or array_like - Maximum value. If `a_min` or `a_max` are array_like, then they will - be broadcasted to the shape of `a`. - out : ndarray, optional - The results will be placed in this array. It may be the input - array for in-place clipping. `out` must be of the right shape - to hold the output. Its type is preserved. - - Returns - ------- - clipped_array : ndarray - An array with the elements of `a`, but where values - < `a_min` are replaced with `a_min`, and those > `a_max` - with `a_max`. - - See Also - -------- - numpy.doc.ufuncs : Section "Output arguments" - - Examples - -------- - >>> a = np.arange(10) - >>> np.clip(a, 1, 8) - array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.clip(a, 3, 6, out=a) - array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) - >>> a = np.arange(10) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8) - array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) - - """ - try: - clip = a.clip - except AttributeError: - return _wrapit(a, 'clip', a_min, a_max, out) - return clip(a_min, a_max, out) - - -def sum(a, axis=None, dtype=None, out=None): - """ - Sum of array elements over a given axis. - - Parameters - ---------- - a : array_like - Elements to sum. - axis : integer, optional - Axis over which the sum is taken. By default `axis` is None, - and all elements are summed. - dtype : dtype, optional - The type of the returned array and of the accumulator in which - the elements are summed. By default, the dtype of `a` is used. - An exception is when `a` has an integer type with less precision - than the default platform integer. In that case, the default - platform integer is used instead. - out : ndarray, optional - Array into which the output is placed. By default, a new array is - created. If `out` is given, it must be of the appropriate shape - (the shape of `a` with `axis` removed, i.e., - ``numpy.delete(a.shape, axis)``). Its type is preserved. See - `doc.ufuncs` (Section "Output arguments") for more details. - - Returns - ------- - sum_along_axis : ndarray - An array with the same shape as `a`, with the specified - axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar - is returned. If an output array is specified, a reference to - `out` is returned. - - See Also - -------- - ndarray.sum : Equivalent method. - - cumsum : Cumulative sum of array elements. - - trapz : Integration of array values using the composite trapezoidal rule. - - mean, average - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> np.sum([0.5, 1.5]) - 2.0 - >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) - 1 - >>> np.sum([[0, 1], [0, 5]]) - 6 - >>> np.sum([[0, 1], [0, 5]], axis=0) - array([0, 6]) - >>> np.sum([[0, 1], [0, 5]], axis=1) - array([1, 5]) - - If the accumulator is too small, overflow occurs: - - >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) - -128 - - """ - if isinstance(a, _gentype): - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - try: - sum = a.sum - except AttributeError: - return _wrapit(a, 'sum', axis, dtype, out) - return sum(axis, dtype, out) - - -def product (a, axis=None, dtype=None, out=None): - """ - Return the product of array elements over a given axis. - - See Also - -------- - prod : equivalent function; see for details. - - """ - try: - prod = a.prod - except AttributeError: - return _wrapit(a, 'prod', axis, dtype, out) - return prod(axis, dtype, out) - - -def sometrue(a, axis=None, out=None): - """ - Check whether some values are true. - - Refer to `any` for full documentation. - - See Also - -------- - any : equivalent function - - """ - try: - any = a.any - except AttributeError: - return _wrapit(a, 'any', axis, out) - return any(axis, out) - - -def alltrue (a, axis=None, out=None): - """ - Check if all elements of input array are true. - - See Also - -------- - numpy.all : Equivalent function; see for details. - - """ - try: - all = a.all - except AttributeError: - return _wrapit(a, 'all', axis, out) - return all(axis, out) - - -def any(a,axis=None, out=None): - """ - Test whether any array element along a given axis evaluates to True. - - Returns single boolean unless `axis` is not ``None`` - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int, optional - Axis along which a logical OR is performed. The default - (`axis` = `None`) is to perform a logical OR over a flattened - input array. `axis` may be negative, in which case it counts - from the last to the first axis. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output and its type is preserved - (e.g., if it is of type float, then it will remain so, returning - 1.0 for True and 0.0 for False, regardless of the type of `a`). - See `doc.ufuncs` (Section "Output arguments") for details. - - Returns - ------- - any : bool or ndarray - A new boolean or `ndarray` is returned unless `out` is specified, - in which case a reference to `out` is returned. - - See Also - -------- - ndarray.any : equivalent method - - all : Test whether all elements along a given axis evaluate to True. - - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity evaluate - to `True` because these are not equal to zero. - - Examples - -------- - >>> np.any([[True, False], [True, True]]) - True - - >>> np.any([[True, False], [False, False]], axis=0) - array([ True, False], dtype=bool) - - >>> np.any([-1, 0, 5]) - True - - >>> np.any(np.nan) - True - - >>> o=np.array([False]) - >>> z=np.any([-1, 4, 5], out=o) - >>> z, o - (array([ True], dtype=bool), array([ True], dtype=bool)) - >>> # Check now that z is a reference to o - >>> z is o - True - >>> id(z), id(o) # identity of z and o # doctest: +SKIP - (191614240, 191614240) - - """ - try: - any = a.any - except AttributeError: - return _wrapit(a, 'any', axis, out) - return any(axis, out) - - -def all(a,axis=None, out=None): - """ - Test whether all array elements along a given axis evaluate to True. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int, optional - Axis along which a logical AND is performed. - The default (`axis` = `None`) is to perform a logical AND - over a flattened input array. `axis` may be negative, in which - case it counts from the last to the first axis. - out : ndarray, optional - Alternate output array in which to place the result. - It must have the same shape as the expected output and its - type is preserved (e.g., if ``dtype(out)`` is float, the result - will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section - "Output arguments") for more details. - - Returns - ------- - all : ndarray, bool - A new boolean or array is returned unless `out` is specified, - in which case a reference to `out` is returned. - - See Also - -------- - ndarray.all : equivalent method - - any : Test whether any element along a given axis evaluates to True. - - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity - evaluate to `True` because these are not equal to zero. - - Examples - -------- - >>> np.all([[True,False],[True,True]]) - False - - >>> np.all([[True,False],[True,True]], axis=0) - array([ True, False], dtype=bool) - - >>> np.all([-1, 4, 5]) - True - - >>> np.all([1.0, np.nan]) - True - - >>> o=np.array([False]) - >>> z=np.all([-1, 4, 5], out=o) - >>> id(z), id(o), z # doctest: +SKIP - (28293632, 28293632, array([ True], dtype=bool)) - - """ - try: - all = a.all - except AttributeError: - return _wrapit(a, 'all', axis, out) - return all(axis, out) - - -def cumsum (a, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of the elements along a given axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative sum is computed. The default - (None) is to compute the cumsum over the flattened array. - dtype : dtype, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. See `doc.ufuncs` - (Section "Output arguments") for more details. - - Returns - ------- - cumsum_along_axis : ndarray. - A new array holding the result is returned unless `out` is - specified, in which case a reference to `out` is returned. The - result has the same size as `a`, and the same shape as `a` if - `axis` is not None or `a` is a 1-d array. - - - See Also - -------- - sum : Sum array elements. - - trapz : Integration of array values using the composite trapezoidal rule. - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.cumsum(a) - array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) - array([ 1., 3., 6., 10., 15., 21.]) - - >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns - array([[1, 2, 3], - [5, 7, 9]]) - >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows - array([[ 1, 3, 6], - [ 4, 9, 15]]) - - """ - try: - cumsum = a.cumsum - except AttributeError: - return _wrapit(a, 'cumsum', axis, dtype, out) - return cumsum(axis, dtype, out) - - -def cumproduct(a, axis=None, dtype=None, out=None): - """ - Return the cumulative product over the given axis. - - - See Also - -------- - cumprod : equivalent function; see for details. - - """ - try: - cumprod = a.cumprod - except AttributeError: - return _wrapit(a, 'cumprod', axis, dtype, out) - return cumprod(axis, dtype, out) - - -def ptp(a, axis=None, out=None): - """ - Range of values (maximum - minimum) along an axis. - - The name of the function comes from the acronym for 'peak to peak'. - - Parameters - ---------- - a : array_like - Input values. - axis : int, optional - Axis along which to find the peaks. By default, flatten the - array. - out : array_like - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type of the output values will be cast if necessary. - - Returns - ------- - ptp : ndarray - A new array holding the result, unless `out` was - specified, in which case a reference to `out` is returned. - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.ptp(x, axis=0) - array([2, 2]) - - >>> np.ptp(x, axis=1) - array([1, 1]) - - """ - try: - ptp = a.ptp - except AttributeError: - return _wrapit(a, 'ptp', axis, out) - return ptp(axis, out) - - -def amax(a, axis=None, out=None): - """ - Return the maximum of an array or maximum along an axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - out : ndarray, optional - Alternate output array in which to place the result. Must be of - the same shape and buffer length as the expected output. See - `doc.ufuncs` (Section "Output arguments") for more details. - - Returns - ------- - amax : ndarray or scalar - Maximum of `a`. If `axis` is None, the result is a scalar value. - If `axis` is given, the result is an array of dimension - ``a.ndim - 1``. - - See Also - -------- - nanmax : NaN values are ignored instead of being propagated. - fmax : same behavior as the C99 fmax function. - argmax : indices of the maximum values. - - Notes - ----- - NaN values are propagated, that is if at least one item is NaN, the - corresponding max value will be NaN as well. To ignore NaN values - (MATLAB behavior), please use nanmax. - - Examples - -------- - >>> a = np.arange(4).reshape((2,2)) - >>> a - array([[0, 1], - [2, 3]]) - >>> np.amax(a) - 3 - >>> np.amax(a, axis=0) - array([2, 3]) - >>> np.amax(a, axis=1) - array([1, 3]) - - >>> b = np.arange(5, dtype=np.float) - >>> b[2] = np.NaN - >>> np.amax(b) - nan - >>> np.nanmax(b) - 4.0 - - """ - try: - amax = a.max - except AttributeError: - return _wrapit(a, 'max', axis, out) - return amax(axis, out) - - -def amin(a, axis=None, out=None): - """ - Return the minimum of an array or minimum along an axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default a flattened input is used. - out : ndarray, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - See `doc.ufuncs` (Section "Output arguments") for more details. - - Returns - ------- - amin : ndarray - A new array or a scalar array with the result. - - See Also - -------- - nanmin: nan values are ignored instead of being propagated - fmin: same behavior as the C99 fmin function - argmin: Return the indices of the minimum values. - - amax, nanmax, fmax - - Notes - ----- - NaN values are propagated, that is if at least one item is nan, the - corresponding min value will be nan as well. To ignore NaN values (matlab - behavior), please use nanmin. - - Examples - -------- - >>> a = np.arange(4).reshape((2,2)) - >>> a - array([[0, 1], - [2, 3]]) - >>> np.amin(a) # Minimum of the flattened array - 0 - >>> np.amin(a, axis=0) # Minima along the first axis - array([0, 1]) - >>> np.amin(a, axis=1) # Minima along the second axis - array([0, 2]) - - >>> b = np.arange(5, dtype=np.float) - >>> b[2] = np.NaN - >>> np.amin(b) - nan - >>> np.nanmin(b) - 0.0 - - """ - try: - amin = a.min - except AttributeError: - return _wrapit(a, 'min', axis, out) - return amin(axis, out) - - -def alen(a): - """ - Return the length of the first dimension of the input array. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - l : int - Length of the first dimension of `a`. - - See Also - -------- - shape, size - - Examples - -------- - >>> a = np.zeros((7,4,5)) - >>> a.shape[0] - 7 - >>> np.alen(a) - 7 - - """ - try: - return len(a) - except TypeError: - return len(array(a,ndmin=1)) - - -def prod(a, axis=None, dtype=None, out=None): - """ - Return the product of array elements over a given axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis over which the product is taken. By default, the product - of all elements is calculated. - dtype : data-type, optional - The data-type of the returned array, as well as of the accumulator - in which the elements are multiplied. By default, if `a` is of - integer type, `dtype` is the default platform integer. (Note: if - the type of `a` is unsigned, then so is `dtype`.) Otherwise, - the dtype is the same as that of `a`. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the - output values will be cast if necessary. - - Returns - ------- - product_along_axis : ndarray, see `dtype` parameter above. - An array shaped as `a` but with the specified axis removed. - Returns a reference to `out` if specified. - - See Also - -------- - ndarray.prod : equivalent method - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. That means that, on a 32-bit platform: - - >>> x = np.array([536870910, 536870910, 536870910, 536870910]) - >>> np.prod(x) #random - 16 - - Examples - -------- - By default, calculate the product of all elements: - - >>> np.prod([1.,2.]) - 2.0 - - Even when the input array is two-dimensional: - - >>> np.prod([[1.,2.],[3.,4.]]) - 24.0 - - But we can also specify the axis over which to multiply: - - >>> np.prod([[1.,2.],[3.,4.]], axis=1) - array([ 2., 12.]) - - If the type of `x` is unsigned, then the output type is - the unsigned platform integer: - - >>> x = np.array([1, 2, 3], dtype=np.uint8) - >>> np.prod(x).dtype == np.uint - True - - If `x` is of a signed integer type, then the output type - is the default platform integer: - - >>> x = np.array([1, 2, 3], dtype=np.int8) - >>> np.prod(x).dtype == np.int - True - - """ - try: - prod = a.prod - except AttributeError: - return _wrapit(a, 'prod', axis, dtype, out) - return prod(axis, dtype, out) - - -def cumprod(a, axis=None, dtype=None, out=None): - """ - Return the cumulative product of elements along a given axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative product is computed. By default - the input is flattened. - dtype : dtype, optional - Type of the returned array, as well as of the accumulator in which - the elements are multiplied. If *dtype* is not specified, it - defaults to the dtype of `a`, unless `a` has an integer dtype with - a precision less than that of the default platform integer. In - that case, the default platform integer is used instead. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type of the resulting values will be cast if necessary. - - Returns - ------- - cumprod : ndarray - A new array holding the result is returned unless `out` is - specified, in which case a reference to out is returned. - - See Also - -------- - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> a = np.array([1,2,3]) - >>> np.cumprod(a) # intermediate results 1, 1*2 - ... # total product 1*2*3 = 6 - array([1, 2, 6]) - >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output - array([ 1., 2., 6., 24., 120., 720.]) - - The cumulative product for each column (i.e., over the rows) of `a`: - - >>> np.cumprod(a, axis=0) - array([[ 1, 2, 3], - [ 4, 10, 18]]) - - The cumulative product for each row (i.e. over the columns) of `a`: - - >>> np.cumprod(a,axis=1) - array([[ 1, 2, 6], - [ 4, 20, 120]]) - - """ - try: - cumprod = a.cumprod - except AttributeError: - return _wrapit(a, 'cumprod', axis, dtype, out) - return cumprod(axis, dtype, out) - - -def ndim(a): - """ - Return the number of dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. If it is not already an ndarray, a conversion is - attempted. - - Returns - ------- - number_of_dimensions : int - The number of dimensions in `a`. Scalars are zero-dimensional. - - See Also - -------- - ndarray.ndim : equivalent method - shape : dimensions of array - ndarray.shape : dimensions of array - - Examples - -------- - >>> np.ndim([[1,2,3],[4,5,6]]) - 2 - >>> np.ndim(np.array([[1,2,3],[4,5,6]])) - 2 - >>> np.ndim(1) - 0 - - """ - try: - return a.ndim - except AttributeError: - return asarray(a).ndim - - -def rank(a): - """ - Return the number of dimensions of an array. - - If `a` is not already an array, a conversion is attempted. - Scalars are zero dimensional. - - Parameters - ---------- - a : array_like - Array whose number of dimensions is desired. If `a` is not an array, - a conversion is attempted. - - Returns - ------- - number_of_dimensions : int - The number of dimensions in the array. - - See Also - -------- - ndim : equivalent function - ndarray.ndim : equivalent property - shape : dimensions of array - ndarray.shape : dimensions of array - - Notes - ----- - In the old Numeric package, `rank` was the term used for the number of - dimensions, but in Numpy `ndim` is used instead. - - Examples - -------- - >>> np.rank([1,2,3]) - 1 - >>> np.rank(np.array([[1,2,3],[4,5,6]])) - 2 - >>> np.rank(1) - 0 - - """ - try: - return a.ndim - except AttributeError: - return asarray(a).ndim - - -def size(a, axis=None): - """ - Return the number of elements along a given axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which the elements are counted. By default, give - the total number of elements. - - Returns - ------- - element_count : int - Number of elements along the specified axis. - - See Also - -------- - shape : dimensions of array - ndarray.shape : dimensions of array - ndarray.size : number of elements in array - - Examples - -------- - >>> a = np.array([[1,2,3],[4,5,6]]) - >>> np.size(a) - 6 - >>> np.size(a,1) - 3 - >>> np.size(a,0) - 2 - - """ - if axis is None: - try: - return a.size - except AttributeError: - return asarray(a).size - else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] - - -def around(a, decimals=0, out=None): - """ - Evenly round to the given number of decimals. - - Parameters - ---------- - a : array_like - Input data. - decimals : int, optional - Number of decimal places to round to (default: 0). If - decimals is negative, it specifies the number of positions to - the left of the decimal point. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the output - values will be cast if necessary. See `doc.ufuncs` (Section - "Output arguments") for details. - - Returns - ------- - rounded_array : ndarray - An array of the same type as `a`, containing the rounded values. - Unless `out` was specified, a new array is created. A reference to - the result is returned. - - The real and imaginary parts of complex numbers are rounded - separately. The result of rounding a float is a float. - - See Also - -------- - ndarray.round : equivalent method - - ceil, fix, floor, rint, trunc - - - Notes - ----- - For values exactly halfway between rounded decimal values, Numpy - rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, - -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due - to the inexact representation of decimal fractions in the IEEE - floating point standard [1]_ and errors introduced when scaling - by powers of ten. - - References - ---------- - .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, - http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF - .. [2] "How Futile are Mindless Assessments of - Roundoff in Floating-Point Computation?", William Kahan, - http://www.cs.berkeley.edu/~wkahan/Mindless.pdf - - Examples - -------- - >>> np.around([0.37, 1.64]) - array([ 0., 2.]) - >>> np.around([0.37, 1.64], decimals=1) - array([ 0.4, 1.6]) - >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value - array([ 0., 2., 2., 4., 4.]) - >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned - array([ 1, 2, 3, 11]) - >>> np.around([1,2,3,11], decimals=-1) - array([ 0, 0, 0, 10]) - - """ - try: - round = a.round - except AttributeError: - return _wrapit(a, 'round', decimals, out) - return round(decimals, out) - - -def round_(a, decimals=0, out=None): - """ - Round an array to the given number of decimals. - - Refer to `around` for full documentation. - - See Also - -------- - around : equivalent function - - """ - try: - round = a.round - except AttributeError: - return _wrapit(a, 'round', decimals, out) - return round(decimals, out) - - -def mean(a, axis=None, dtype=None, out=None): - """ - Compute the arithmetic mean along the specified axis. - - Returns the average of the array elements. The average is taken over - the flattened array by default, otherwise over the specified axis. - `float64` intermediate and return values are used for integer inputs. - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the means are computed. The default is to compute - the mean of the flattened array. - dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default - is `float64`; for floating point inputs, it is the same as the - input dtype. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. - See `doc.ufuncs` for details. - - Returns - ------- - m : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. - - See Also - -------- - average : Weighted average - - Notes - ----- - The arithmetic mean is the sum of the elements along the axis divided - by the number of elements. - - Note that for floating-point input, the mean is computed using the - same precision the input has. Depending on the input data, this can - cause the results to be inaccurate, especially for `float32` (see - example below). Specifying a higher-precision accumulator using the - `dtype` keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.mean(a) - 2.5 - >>> np.mean(a, axis=0) - array([ 2., 3.]) - >>> np.mean(a, axis=1) - array([ 1.5, 3.5]) - - In single precision, `mean` can be inaccurate: - - >>> a = np.zeros((2, 512*512), dtype=np.float32) - >>> a[0, :] = 1.0 - >>> a[1, :] = 0.1 - >>> np.mean(a) - 0.546875 - - Computing the mean in float64 is more accurate: - - >>> np.mean(a, dtype=np.float64) - 0.55000000074505806 - - """ - try: - mean = a.mean - except AttributeError: - return _wrapit(a, 'mean', axis, dtype, out) - return mean(axis, dtype, out) - - -def std(a, axis=None, dtype=None, out=None, ddof=0): - """ - Compute the standard deviation along the specified axis. - - Returns the standard deviation, a measure of the spread of a distribution, - of the array elements. The standard deviation is computed for the - flattened array by default, otherwise over the specified axis. - - Parameters - ---------- - a : array_like - Calculate the standard deviation of these values. - axis : int, optional - Axis along which the standard deviation is computed. The default is - to compute the standard deviation of the flattened array. - dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default is float64, for arrays of float types it is - the same as the array type. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type (of the calculated - values) will be cast if necessary. - ddof : int, optional - Means Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements. - By default `ddof` is zero. - - Returns - ------- - standard_deviation : ndarray, see dtype parameter above. - If `out` is None, return a new array containing the standard deviation, - otherwise return a reference to the output array. - - See Also - -------- - var, mean - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - The standard deviation is the square root of the average of the squared - deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``. - - The average squared deviation is normally calculated as ``x.sum() / N``, where - ``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof`` - is used instead. In standard statistical practice, ``ddof=1`` provides an - unbiased estimator of the variance of the infinite population. ``ddof=0`` - provides a maximum likelihood estimate of the variance for normally - distributed variables. The standard deviation computed in this function - is the square root of the estimated variance, so even with ``ddof=1``, it - will not be an unbiased estimate of the standard deviation per se. - - Note that, for complex numbers, `std` takes the absolute - value before squaring, so that the result is always real and nonnegative. - - For floating-point input, the *std* is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for float32 (see example below). - Specifying a higher-accuracy accumulator using the `dtype` keyword can - alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.std(a) - 1.1180339887498949 - >>> np.std(a, axis=0) - array([ 1., 1.]) - >>> np.std(a, axis=1) - array([ 0.5, 0.5]) - - In single precision, std() can be inaccurate: - - >>> a = np.zeros((2,512*512), dtype=np.float32) - >>> a[0,:] = 1.0 - >>> a[1,:] = 0.1 - >>> np.std(a) - 0.45172946707416706 - - Computing the standard deviation in float64 is more accurate: - - >>> np.std(a, dtype=np.float64) - 0.44999999925552653 - - """ - try: - std = a.std - except AttributeError: - return _wrapit(a, 'std', axis, dtype, out, ddof) - return std(axis, dtype, out, ddof) - - -def var(a, axis=None, dtype=None, out=None, ddof=0): - """ - Compute the variance along the specified axis. - - Returns the variance of the array elements, a measure of the spread of a - distribution. The variance is computed for the flattened array by - default, otherwise over the specified axis. - - Parameters - ---------- - a : array_like - Array containing numbers whose variance is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the variance is computed. The default is to compute - the variance of the flattened array. - dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as - the array type. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output, but the type is cast if - necessary. - ddof : int, optional - "Delta Degrees of Freedom": the divisor used in the calculation is - ``N - ddof``, where ``N`` represents the number of elements. By - default `ddof` is zero. - - Returns - ------- - variance : ndarray, see dtype parameter above - If ``out=None``, returns a new array containing the variance; - otherwise, a reference to the output array is returned. - - See Also - -------- - std : Standard deviation - mean : Average - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - The variance is the average of the squared deviations from the mean, - i.e., ``var = mean(abs(x - x.mean())**2)``. - - The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. - If, however, `ddof` is specified, the divisor ``N - ddof`` is used - instead. In standard statistical practice, ``ddof=1`` provides an - unbiased estimator of the variance of a hypothetical infinite population. - ``ddof=0`` provides a maximum likelihood estimate of the variance for - normally distributed variables. - - Note that for complex numbers, the absolute value is taken before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the variance is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32` (see example - below). Specifying a higher-accuracy accumulator using the ``dtype`` - keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1,2],[3,4]]) - >>> np.var(a) - 1.25 - >>> np.var(a,0) - array([ 1., 1.]) - >>> np.var(a,1) - array([ 0.25, 0.25]) - - In single precision, var() can be inaccurate: - - >>> a = np.zeros((2,512*512), dtype=np.float32) - >>> a[0,:] = 1.0 - >>> a[1,:] = 0.1 - >>> np.var(a) - 0.20405951142311096 - - Computing the standard deviation in float64 is more accurate: - - >>> np.var(a, dtype=np.float64) - 0.20249999932997387 - >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 - 0.20250000000000001 - - """ - try: - var = a.var - except AttributeError: - return _wrapit(a, 'var', axis, dtype, out, ddof) - return var(axis, dtype, out, ddof) diff --git a/numpy-1.6.2/numpy/core/function_base.py b/numpy-1.6.2/numpy/core/function_base.py deleted file mode 100644 index b2f9dc70cf..0000000000 --- a/numpy-1.6.2/numpy/core/function_base.py +++ /dev/null @@ -1,167 +0,0 @@ -__all__ = ['logspace', 'linspace'] - -import numeric as _nx -from numeric import array - -def linspace(start, stop, num=50, endpoint=True, retstep=False): - """ - Return evenly spaced numbers over a specified interval. - - Returns `num` evenly spaced samples, calculated over the - interval [`start`, `stop` ]. - - The endpoint of the interval can optionally be excluded. - - Parameters - ---------- - start : scalar - The starting value of the sequence. - stop : scalar - The end value of the sequence, unless `endpoint` is set to False. - In that case, the sequence consists of all but the last of ``num + 1`` - evenly spaced samples, so that `stop` is excluded. Note that the step - size changes when `endpoint` is False. - num : int, optional - Number of samples to generate. Default is 50. - endpoint : bool, optional - If True, `stop` is the last sample. Otherwise, it is not included. - Default is True. - retstep : bool, optional - If True, return (`samples`, `step`), where `step` is the spacing - between samples. - - Returns - ------- - samples : ndarray - There are `num` equally spaced samples in the closed interval - ``[start, stop]`` or the half-open interval ``[start, stop)`` - (depending on whether `endpoint` is True or False). - step : float (only if `retstep` is True) - Size of spacing between samples. - - - See Also - -------- - arange : Similiar to `linspace`, but uses a step size (instead of the - number of samples). - logspace : Samples uniformly distributed in log space. - - Examples - -------- - >>> np.linspace(2.0, 3.0, num=5) - array([ 2. , 2.25, 2.5 , 2.75, 3. ]) - >>> np.linspace(2.0, 3.0, num=5, endpoint=False) - array([ 2. , 2.2, 2.4, 2.6, 2.8]) - >>> np.linspace(2.0, 3.0, num=5, retstep=True) - (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 8 - >>> y = np.zeros(N) - >>> x1 = np.linspace(0, 10, N, endpoint=True) - >>> x2 = np.linspace(0, 10, N, endpoint=False) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - num = int(num) - if num <= 0: - return array([], float) - if endpoint: - if num == 1: - return array([float(start)]) - step = (stop-start)/float((num-1)) - y = _nx.arange(0, num) * step + start - y[-1] = stop - else: - step = (stop-start)/float(num) - y = _nx.arange(0, num) * step + start - if retstep: - return y, step - else: - return y - -def logspace(start,stop,num=50,endpoint=True,base=10.0): - """ - Return numbers spaced evenly on a log scale. - - In linear space, the sequence starts at ``base ** start`` - (`base` to the power of `start`) and ends with ``base ** stop`` - (see `endpoint` below). - - Parameters - ---------- - start : float - ``base ** start`` is the starting value of the sequence. - stop : float - ``base ** stop`` is the final value of the sequence, unless `endpoint` - is False. In that case, ``num + 1`` values are spaced over the - interval in log-space, of which all but the last (a sequence of - length ``num``) are returned. - num : integer, optional - Number of samples to generate. Default is 50. - endpoint : boolean, optional - If true, `stop` is the last sample. Otherwise, it is not included. - Default is True. - base : float, optional - The base of the log space. The step size between the elements in - ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. - Default is 10.0. - - Returns - ------- - samples : ndarray - `num` samples, equally spaced on a log scale. - - See Also - -------- - arange : Similiar to linspace, with the step size specified instead of the - number of samples. Note that, when used with a float endpoint, the - endpoint may or may not be included. - linspace : Similar to logspace, but with the samples uniformly distributed - in linear space, instead of log space. - - Notes - ----- - Logspace is equivalent to the code - - >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) - ... # doctest: +SKIP - >>> power(base, y) - ... # doctest: +SKIP - - Examples - -------- - >>> np.logspace(2.0, 3.0, num=4) - array([ 100. , 215.443469 , 464.15888336, 1000. ]) - >>> np.logspace(2.0, 3.0, num=4, endpoint=False) - array([ 100. , 177.827941 , 316.22776602, 562.34132519]) - >>> np.logspace(2.0, 3.0, num=4, base=2.0) - array([ 4. , 5.0396842 , 6.34960421, 8. ]) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 10 - >>> x1 = np.logspace(0.1, 1, N, endpoint=True) - >>> x2 = np.logspace(0.1, 1, N, endpoint=False) - >>> y = np.zeros(N) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - y = linspace(start,stop,num=num,endpoint=endpoint) - return _nx.power(base,y) - diff --git a/numpy-1.6.2/numpy/core/getlimits.py b/numpy-1.6.2/numpy/core/getlimits.py deleted file mode 100644 index 53728bc59f..0000000000 --- a/numpy-1.6.2/numpy/core/getlimits.py +++ /dev/null @@ -1,293 +0,0 @@ -""" Machine limits for Float32 and Float64 and (long double) if available... -""" - -__all__ = ['finfo','iinfo'] - -from machar import MachAr -import numeric -import numerictypes as ntypes -from numeric import array - -def _frz(a): - """fix rank-0 --> rank-1""" - if a.ndim == 0: a.shape = (1,) - return a - -_convert_to_float = { - ntypes.csingle: ntypes.single, - ntypes.complex_: ntypes.float_, - ntypes.clongfloat: ntypes.longfloat - } - -class finfo(object): - """ - finfo(dtype) - - Machine limits for floating point types. - - Attributes - ---------- - eps : float - The smallest representable positive number such that - ``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating - point type. - epsneg : floating point number of the appropriate type - The smallest representable positive number such that - ``1.0 - epsneg != 1.0``. - iexp : int - The number of bits in the exponent portion of the floating point - representation. - machar : MachAr - The object which calculated these parameters and holds more - detailed information. - machep : int - The exponent that yields `eps`. - max : floating point number of the appropriate type - The largest representable number. - maxexp : int - The smallest positive power of the base (2) that causes overflow. - min : floating point number of the appropriate type - The smallest representable number, typically ``-max``. - minexp : int - The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. - negep : int - The exponent that yields `epsneg`. - nexp : int - The number of bits in the exponent including its sign and bias. - nmant : int - The number of bits in the mantissa. - precision : int - The approximate number of decimal digits to which this kind of - float is precise. - resolution : floating point number of the appropriate type - The approximate decimal resolution of this type, i.e., - ``10**-precision``. - tiny : float - The smallest positive usable number. Type of `tiny` is an - appropriate floating point type. - - Parameters - ---------- - dtype : float, dtype, or instance - Kind of floating point data-type about which to get information. - - See Also - -------- - MachAr : The implementation of the tests that produce this information. - iinfo : The equivalent for integer data types. - - Notes - ----- - For developers of NumPy: do not instantiate this at the module level. - The initial calculation of these parameters is expensive and negatively - impacts import times. These objects are cached, so calling ``finfo()`` - repeatedly inside your functions is not a problem. - - """ - - _finfo_cache = {} - - def __new__(cls, dtype): - try: - dtype = numeric.dtype(dtype) - except TypeError: - # In case a float instance was given - dtype = numeric.dtype(type(dtype)) - - obj = cls._finfo_cache.get(dtype,None) - if obj is not None: - return obj - dtypes = [dtype] - newdtype = numeric.obj2sctype(dtype) - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - if not issubclass(dtype, numeric.inexact): - raise ValueError, "data type %r not inexact" % (dtype) - obj = cls._finfo_cache.get(dtype,None) - if obj is not None: - return obj - if not issubclass(dtype, numeric.floating): - newdtype = _convert_to_float[dtype] - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - obj = cls._finfo_cache.get(dtype,None) - if obj is not None: - return obj - obj = object.__new__(cls)._init(dtype) - for dt in dtypes: - cls._finfo_cache[dt] = obj - return obj - - def _init(self, dtype): - self.dtype = numeric.dtype(dtype) - if dtype is ntypes.double: - itype = ntypes.int64 - fmt = '%24.16e' - precname = 'double' - elif dtype is ntypes.single: - itype = ntypes.int32 - fmt = '%15.7e' - precname = 'single' - elif dtype is ntypes.longdouble: - itype = ntypes.longlong - fmt = '%s' - precname = 'long double' - elif dtype is ntypes.half: - itype = ntypes.int16 - fmt = '%12.5e' - precname = 'half' - else: - raise ValueError, repr(dtype) - - machar = MachAr(lambda v:array([v], dtype), - lambda v:_frz(v.astype(itype))[0], - lambda v:array(_frz(v)[0], dtype), - lambda v: fmt % array(_frz(v)[0], dtype), - 'numpy %s precision floating point number' % precname) - - for word in ['precision', 'iexp', - 'maxexp','minexp','negep', - 'machep']: - setattr(self,word,getattr(machar, word)) - for word in ['tiny','resolution','epsneg']: - setattr(self,word,getattr(machar, word).flat[0]) - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self.machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - return self - - def __str__(self): - return '''\ -Machine parameters for %(dtype)s ---------------------------------------------------------------------- -precision=%(precision)3s resolution= %(_str_resolution)s -machep=%(machep)6s eps= %(_str_eps)s -negep =%(negep)6s epsneg= %(_str_epsneg)s -minexp=%(minexp)6s tiny= %(_str_tiny)s -maxexp=%(maxexp)6s max= %(_str_max)s -nexp =%(nexp)6s min= -max ---------------------------------------------------------------------- -''' % self.__dict__ - - -class iinfo: - """ - iinfo(type) - - Machine limits for integer types. - - Attributes - ---------- - min : int - The smallest integer expressible by the type. - max : int - The largest integer expressible by the type. - - Parameters - ---------- - type : integer type, dtype, or instance - The kind of integer data type to get information about. - - See Also - -------- - finfo : The equivalent for floating point data types. - - Examples - -------- - With types: - - >>> ii16 = np.iinfo(np.int16) - >>> ii16.min - -32768 - >>> ii16.max - 32767 - >>> ii32 = np.iinfo(np.int32) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - With instances: - - >>> ii32 = np.iinfo(np.int32(10)) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - """ - - _min_vals = {} - _max_vals = {} - - def __init__(self, int_type): - try: - self.dtype = numeric.dtype(int_type) - except TypeError: - self.dtype = numeric.dtype(type(int_type)) - self.kind = self.dtype.kind - self.bits = self.dtype.itemsize * 8 - self.key = "%s%d" % (self.kind, self.bits) - if not self.kind in 'iu': - raise ValueError("Invalid integer data type.") - - def min(self): - """Minimum value of given dtype.""" - if self.kind == 'u': - return 0 - else: - try: - val = iinfo._min_vals[self.key] - except KeyError: - val = int(-(1L << (self.bits-1))) - iinfo._min_vals[self.key] = val - return val - - min = property(min) - - def max(self): - """Maximum value of given dtype.""" - try: - val = iinfo._max_vals[self.key] - except KeyError: - if self.kind == 'u': - val = int((1L << self.bits) - 1) - else: - val = int((1L << (self.bits-1)) - 1) - iinfo._max_vals[self.key] = val - return val - - max = property(max) - - def __str__(self): - """String representation.""" - return '''\ -Machine parameters for %(dtype)s ---------------------------------------------------------------------- -min = %(min)s -max = %(max)s ---------------------------------------------------------------------- -''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max} - - -if __name__ == '__main__': - f = finfo(ntypes.single) - print 'single epsilon:',f.eps - print 'single tiny:',f.tiny - f = finfo(ntypes.float) - print 'float epsilon:',f.eps - print 'float tiny:',f.tiny - f = finfo(ntypes.longfloat) - print 'longfloat epsilon:',f.eps - print 'longfloat tiny:',f.tiny diff --git a/numpy-1.6.2/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/numpy-1.6.2/numpy/core/include/numpy/_neighborhood_iterator_imp.h deleted file mode 100644 index e8860cbc73..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/_neighborhood_iterator_imp.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP -#error You should not include this header directly -#endif -/* - * Private API (here for inline) - */ -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); - -/* - * Update to next item of the iterator - * - * Note: this simply increment the coordinates vector, last dimension - * incremented first , i.e, for dimension 3 - * ... - * -1, -1, -1 - * -1, -1, 0 - * -1, -1, 1 - * .... - * -1, 0, -1 - * -1, 0, 0 - * .... - * 0, -1, -1 - * 0, -1, 0 - * .... - */ -#define _UPDATE_COORD_ITER(c) \ - wb = iter->coordinates[c] < iter->bounds[c][1]; \ - if (wb) { \ - iter->coordinates[c] += 1; \ - return 0; \ - } \ - else { \ - iter->coordinates[c] = iter->bounds[c][0]; \ - } - -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp i, wb; - - for (i = iter->nd - 1; i >= 0; --i) { - _UPDATE_COORD_ITER(i) - } - - return 0; -} - -/* - * Version optimized for 2d arrays, manual loop unrolling - */ -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp wb; - - _UPDATE_COORD_ITER(1) - _UPDATE_COORD_ITER(0) - - return 0; -} -#undef _UPDATE_COORD_ITER - -/* - * Advance to the next neighbour - */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) -{ - _PyArrayNeighborhoodIter_IncrCoord (iter); - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); - - return 0; -} - -/* - * Reset functions - */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp i; - - for (i = 0; i < iter->nd; ++i) { - iter->coordinates[i] = iter->bounds[i][0]; - } - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); - - return 0; -} diff --git a/numpy-1.6.2/numpy/core/include/numpy/_numpyconfig.h.in b/numpy-1.6.2/numpy/core/include/numpy/_numpyconfig.h.in deleted file mode 100644 index 2cd389d44c..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/_numpyconfig.h.in +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef _NPY_NUMPYCONFIG_H_ -#error this header should not be included directly, always include numpyconfig.h instead -#endif - -#define NPY_SIZEOF_SHORT @SIZEOF_SHORT@ -#define NPY_SIZEOF_INT @SIZEOF_INT@ -#define NPY_SIZEOF_LONG @SIZEOF_LONG@ -#define NPY_SIZEOF_FLOAT @SIZEOF_FLOAT@ -#define NPY_SIZEOF_DOUBLE @SIZEOF_DOUBLE@ -#define NPY_SIZEOF_LONGDOUBLE @SIZEOF_LONG_DOUBLE@ -#define NPY_SIZEOF_PY_INTPTR_T @SIZEOF_PY_INTPTR_T@ - -#define NPY_SIZEOF_COMPLEX_FLOAT @SIZEOF_COMPLEX_FLOAT@ -#define NPY_SIZEOF_COMPLEX_DOUBLE @SIZEOF_COMPLEX_DOUBLE@ -#define NPY_SIZEOF_COMPLEX_LONGDOUBLE @SIZEOF_COMPLEX_LONG_DOUBLE@ - -@DEFINE_NPY_HAVE_DECL_ISNAN@ -@DEFINE_NPY_HAVE_DECL_ISINF@ -@DEFINE_NPY_HAVE_DECL_ISFINITE@ -@DEFINE_NPY_HAVE_DECL_SIGNBIT@ - -@DEFINE_NPY_NO_SIGNAL@ -#define NPY_NO_SMP @NPY_NO_SMP@ - -/* XXX: this has really nothing to do in a config file... */ -#define NPY_MATHLIB @MATHLIB@ - -@DEFINE_NPY_SIZEOF_LONGLONG@ -@DEFINE_NPY_SIZEOF_PY_LONG_LONG@ - -@DEFINE_NPY_ENABLE_SEPARATE_COMPILATION@ -#define NPY_VISIBILITY_HIDDEN @VISIBILITY_HIDDEN@ - -@DEFINE_NPY_USE_C99_FORMATS@ -@DEFINE_NPY_HAVE_COMPLEX_DOUBLE@ -@DEFINE_NPY_HAVE_COMPLEX_FLOAT@ -@DEFINE_NPY_HAVE_COMPLEX_LONG_DOUBLE@ -@DEFINE_NPY_USE_C99_COMPLEX@ - -#define NPY_ABI_VERSION @NPY_ABI_VERSION@ -#define NPY_API_VERSION @NPY_API_VERSION@ - -@DEFINE_NPY_HAVE_ENDIAN_H@ - -/* Ugly, but we can't test this in a proper manner without requiring a C++ - * compiler at the configuration stage of numpy ? */ -#ifndef __STDC_FORMAT_MACROS - #define __STDC_FORMAT_MACROS 1 -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/arrayobject.h b/numpy-1.6.2/numpy/core/include/numpy/arrayobject.h deleted file mode 100644 index f64d2a6c3b..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/arrayobject.h +++ /dev/null @@ -1,21 +0,0 @@ - -/* This expects the following variables to be defined (besides - the usual ones from pyconfig.h - - SIZEOF_LONG_DOUBLE -- sizeof(long double) or sizeof(double) if no - long double is present on platform. - CHAR_BIT -- number of bits in a char (usually 8) - (should be in limits.h) - -*/ - -#ifndef Py_ARRAYOBJECT_H -#define Py_ARRAYOBJECT_H -#include "ndarrayobject.h" -#ifdef NPY_NO_PREFIX -#include "noprefix.h" -#endif - -#include "npy_interrupt.h" - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/arrayscalars.h b/numpy-1.6.2/numpy/core/include/numpy/arrayscalars.h deleted file mode 100644 index 64450e7132..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/arrayscalars.h +++ /dev/null @@ -1,175 +0,0 @@ -#ifndef _NPY_ARRAYSCALARS_H_ -#define _NPY_ARRAYSCALARS_H_ - -#ifndef _MULTIARRAYMODULE -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; -#endif - - -typedef struct { - PyObject_HEAD - signed char obval; -} PyByteScalarObject; - - -typedef struct { - PyObject_HEAD - short obval; -} PyShortScalarObject; - - -typedef struct { - PyObject_HEAD - int obval; -} PyIntScalarObject; - - -typedef struct { - PyObject_HEAD - long obval; -} PyLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longlong obval; -} PyLongLongScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned char obval; -} PyUByteScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned short obval; -} PyUShortScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned int obval; -} PyUIntScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned long obval; -} PyULongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_ulonglong obval; -} PyULongLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_half obval; -} PyHalfScalarObject; - - -typedef struct { - PyObject_HEAD - float obval; -} PyFloatScalarObject; - - -typedef struct { - PyObject_HEAD - double obval; -} PyDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longdouble obval; -} PyLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cfloat obval; -} PyCFloatScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cdouble obval; -} PyCDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_clongdouble obval; -} PyCLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - PyObject * obval; -} PyObjectScalarObject; - -typedef struct { - PyObject_HEAD - npy_datetime obval; - PyArray_DatetimeMetaData obmeta; -} PyDatetimeScalarObject; - -typedef struct { - PyObject_HEAD - npy_timedelta obval; - PyArray_DatetimeMetaData obmeta; -} PyTimedeltaScalarObject; - - -typedef struct { - PyObject_HEAD - char obval; -} PyScalarObject; - -#define PyStringScalarObject PyStringObject -#define PyUnicodeScalarObject PyUnicodeObject - -typedef struct { - PyObject_VAR_HEAD - char *obval; - PyArray_Descr *descr; - int flags; - PyObject *base; -} PyVoidScalarObject; - -/* Macros - PyScalarObject - PyArrType_Type - are defined in ndarrayobject.h -*/ - -#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) -#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) -#define PyArrayScalar_FromLong(i) \ - ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) -#define PyArrayScalar_RETURN_FALSE \ - return Py_INCREF(PyArrayScalar_False), \ - PyArrayScalar_False -#define PyArrayScalar_RETURN_TRUE \ - return Py_INCREF(PyArrayScalar_True), \ - PyArrayScalar_True - -#define PyArrayScalar_New(cls) \ - Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) -#define PyArrayScalar_VAL(obj, cls) \ - ((Py##cls##ScalarObject *)obj)->obval -#define PyArrayScalar_ASSIGN(obj, cls, val) \ - PyArrayScalar_VAL(obj, cls) = val - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/fenv/fenv.c b/numpy-1.6.2/numpy/core/include/numpy/fenv/fenv.c deleted file mode 100644 index 9a8d1be100..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/fenv/fenv.c +++ /dev/null @@ -1,38 +0,0 @@ -/*- - * Copyright (c) 2004 David Schultz - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#include -#include "fenv.h" - -const fenv_t npy__fe_dfl_env = { - 0xffff0000, - 0xffff0000, - 0xffffffff, - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff } -}; diff --git a/numpy-1.6.2/numpy/core/include/numpy/fenv/fenv.h b/numpy-1.6.2/numpy/core/include/numpy/fenv/fenv.h deleted file mode 100644 index 79a215fc3e..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/fenv/fenv.h +++ /dev/null @@ -1,224 +0,0 @@ -/*- - * Copyright (c) 2004 David Schultz - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef _FENV_H_ -#define _FENV_H_ - -#include -#include - -typedef struct { - __uint32_t __control; - __uint32_t __status; - __uint32_t __tag; - char __other[16]; -} fenv_t; - -typedef __uint16_t fexcept_t; - -/* Exception flags */ -#define FE_INVALID 0x01 -#define FE_DENORMAL 0x02 -#define FE_DIVBYZERO 0x04 -#define FE_OVERFLOW 0x08 -#define FE_UNDERFLOW 0x10 -#define FE_INEXACT 0x20 -#define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_DENORMAL | FE_INEXACT | \ - FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW) - -/* Rounding modes */ -#define FE_TONEAREST 0x0000 -#define FE_DOWNWARD 0x0400 -#define FE_UPWARD 0x0800 -#define FE_TOWARDZERO 0x0c00 -#define _ROUND_MASK (FE_TONEAREST | FE_DOWNWARD | \ - FE_UPWARD | FE_TOWARDZERO) - -__BEGIN_DECLS - -/* Default floating-point environment */ -extern const fenv_t npy__fe_dfl_env; -#define FE_DFL_ENV (&npy__fe_dfl_env) - -#define __fldcw(__cw) __asm __volatile("fldcw %0" : : "m" (__cw)) -#define __fldenv(__env) __asm __volatile("fldenv %0" : : "m" (__env)) -#define __fnclex() __asm __volatile("fnclex") -#define __fnstenv(__env) __asm __volatile("fnstenv %0" : "=m" (*(__env))) -#define __fnstcw(__cw) __asm __volatile("fnstcw %0" : "=m" (*(__cw))) -#define __fnstsw(__sw) __asm __volatile("fnstsw %0" : "=am" (*(__sw))) -#define __fwait() __asm __volatile("fwait") - -static __inline int -feclearexcept(int __excepts) -{ - fenv_t __env; - - if (__excepts == FE_ALL_EXCEPT) { - __fnclex(); - } else { - __fnstenv(&__env); - __env.__status &= ~__excepts; - __fldenv(__env); - } - return (0); -} - -static __inline int -fegetexceptflag(fexcept_t *__flagp, int __excepts) -{ - __uint16_t __status; - - __fnstsw(&__status); - *__flagp = __status & __excepts; - return (0); -} - -static __inline int -fesetexceptflag(const fexcept_t *__flagp, int __excepts) -{ - fenv_t __env; - - __fnstenv(&__env); - __env.__status &= ~__excepts; - __env.__status |= *__flagp & __excepts; - __fldenv(__env); - return (0); -} - -static __inline int -feraiseexcept(int __excepts) -{ - fexcept_t __ex = __excepts; - - fesetexceptflag(&__ex, __excepts); - __fwait(); - return (0); -} - -static __inline int -fetestexcept(int __excepts) -{ - __uint16_t __status; - - __fnstsw(&__status); - return (__status & __excepts); -} - -static __inline int -fegetround(void) -{ - int __control; - - __fnstcw(&__control); - return (__control & _ROUND_MASK); -} - -static __inline int -fesetround(int __round) -{ - int __control; - - if (__round & ~_ROUND_MASK) - return (-1); - __fnstcw(&__control); - __control &= ~_ROUND_MASK; - __control |= __round; - __fldcw(__control); - return (0); -} - -static __inline int -fegetenv(fenv_t *__envp) -{ - int __control; - - /* - * fnstenv masks all exceptions, so we need to save and - * restore the control word to avoid this side effect. - */ - __fnstcw(&__control); - __fnstenv(__envp); - __fldcw(__control); - return (0); -} - -static __inline int -feholdexcept(fenv_t *__envp) -{ - - __fnstenv(__envp); - __fnclex(); - return (0); -} - -static __inline int -fesetenv(const fenv_t *__envp) -{ - - __fldenv(*__envp); - return (0); -} - -static __inline int -feupdateenv(const fenv_t *__envp) -{ - __uint16_t __status; - - __fnstsw(&__status); - __fldenv(*__envp); - feraiseexcept(__status & FE_ALL_EXCEPT); - return (0); -} - -#if __BSD_VISIBLE - -static __inline int -fesetmask(int __mask) -{ - int __control; - - __fnstcw(&__control); - __mask = (__control | FE_ALL_EXCEPT) & ~__mask; - __fldcw(__mask); - return (~__control & FE_ALL_EXCEPT); -} - -static __inline int -fegetmask(void) -{ - int __control; - - __fnstcw(&__control); - return (~__control & FE_ALL_EXCEPT); -} - -#endif /* __BSD_VISIBLE */ - -__END_DECLS - -#endif /* !_FENV_H_ */ diff --git a/numpy-1.6.2/numpy/core/include/numpy/halffloat.h b/numpy-1.6.2/numpy/core/include/numpy/halffloat.h deleted file mode 100644 index c6bb726bc6..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/halffloat.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef __NPY_HALFFLOAT_H__ -#define __NPY_HALFFLOAT_H__ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Half-precision routines - */ - -/* Conversions */ -float npy_half_to_float(npy_half h); -double npy_half_to_double(npy_half h); -npy_half npy_float_to_half(float f); -npy_half npy_double_to_half(double d); -/* Comparisons */ -int npy_half_eq(npy_half h1, npy_half h2); -int npy_half_ne(npy_half h1, npy_half h2); -int npy_half_le(npy_half h1, npy_half h2); -int npy_half_lt(npy_half h1, npy_half h2); -int npy_half_ge(npy_half h1, npy_half h2); -int npy_half_gt(npy_half h1, npy_half h2); -/* faster *_nonan variants for when you know h1 and h2 are not NaN */ -int npy_half_eq_nonan(npy_half h1, npy_half h2); -int npy_half_lt_nonan(npy_half h1, npy_half h2); -int npy_half_le_nonan(npy_half h1, npy_half h2); -/* Miscellaneous functions */ -int npy_half_iszero(npy_half h); -int npy_half_isnan(npy_half h); -int npy_half_isinf(npy_half h); -int npy_half_isfinite(npy_half h); -int npy_half_signbit(npy_half h); -npy_half npy_half_copysign(npy_half x, npy_half y); -npy_half npy_half_spacing(npy_half h); -npy_half npy_half_nextafter(npy_half x, npy_half y); - -/* - * Half-precision constants - */ - -#define NPY_HALF_ZERO (0x0000u) -#define NPY_HALF_PZERO (0x0000u) -#define NPY_HALF_NZERO (0x8000u) -#define NPY_HALF_ONE (0x3c00u) -#define NPY_HALF_NEGONE (0xbc00u) -#define NPY_HALF_PINF (0x7c00u) -#define NPY_HALF_NINF (0xfc00u) -#define NPY_HALF_NAN (0x7e00u) - -/* - * Bit-level conversions - */ - -npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); -npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); -npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); -npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/ndarrayobject.h b/numpy-1.6.2/numpy/core/include/numpy/ndarrayobject.h deleted file mode 100644 index 25beceab74..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/ndarrayobject.h +++ /dev/null @@ -1,233 +0,0 @@ -/* - * DON'T INCLUDE THIS DIRECTLY. - */ - -#ifndef NPY_NDARRAYOBJECT_H -#define NPY_NDARRAYOBJECT_H -#ifdef __cplusplus -#define CONFUSE_EMACS { -#define CONFUSE_EMACS2 } -extern "C" CONFUSE_EMACS -#undef CONFUSE_EMACS -#undef CONFUSE_EMACS2 -/* ... otherwise a semi-smart identer (like emacs) tries to indent - everything when you're typing */ -#endif - -#include "ndarraytypes.h" - -/* Includes the "function" C-API -- these are all stored in a - list of pointers --- one for each file - The two lists are concatenated into one in multiarray. - - They are available as import_array() -*/ - -#include "__multiarray_api.h" - - -/* C-API that requries previous API to be defined */ - -#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type) - -#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) -#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) - -#define PyArray_HasArrayInterfaceType(op, type, context, out) \ - ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromArrayAttr(op, type, context)) != \ - Py_NotImplemented)) - -#define PyArray_HasArrayInterface(op, out) \ - PyArray_HasArrayInterfaceType(op, NULL, NULL, out) - -#define PyArray_IsZeroDim(op) (PyArray_Check(op) && (PyArray_NDIM(op) == 0)) - -#define PyArray_IsScalar(obj, cls) \ - (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) - -#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ - PyArray_IsZeroDim(m)) - -#define PyArray_IsPythonNumber(obj) \ - (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \ - PyLong_Check(obj) || PyBool_Check(obj)) - -#define PyArray_IsPythonScalar(obj) \ - (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \ - PyUnicode_Check(obj)) - -#define PyArray_IsAnyScalar(obj) \ - (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) - -#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ - PyArray_CheckScalar(obj)) - -#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \ - || PyLong_Check(obj) \ - || PyArray_IsScalar((obj), Integer)) - - -#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ - Py_INCREF(m), (m) : \ - (PyArrayObject *)(PyArray_Copy(m))) - -#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ - PyArray_CompareLists(PyArray_DIMS(a1), \ - PyArray_DIMS(a2), \ - PyArray_NDIM(a1))) - -#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) -#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) -#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) - -#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ - NULL) - -#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ - PyArray_DescrFromType(type), 0, 0, 0, NULL); - -#define PyArray_FROM_OTF(m, type, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ - (((flags) & NPY_ENSURECOPY) ? \ - ((flags) | NPY_DEFAULT) : (flags)), NULL) - -#define PyArray_FROMANY(m, type, min, max, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ - (((flags) & NPY_ENSURECOPY) ? \ - (flags) | NPY_DEFAULT : (flags)), NULL) - -#define PyArray_ZEROS(m, dims, type, fortran) \ - PyArray_Zeros(m, dims, PyArray_DescrFromType(type), fortran) - -#define PyArray_EMPTY(m, dims, type, fortran) \ - PyArray_Empty(m, dims, PyArray_DescrFromType(type), fortran) - -#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ - PyArray_NBYTES(obj)) - -#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) -#define NPY_REFCOUNT PyArray_REFCOUNT -#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) - -#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_DEFAULT, NULL) - -#define PyArray_EquivArrTypes(a1, a2) \ - PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) - -#define PyArray_EquivByteorders(b1, b2) \ - (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) - -#define PyArray_SimpleNew(nd, dims, typenum) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) - -#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ - data, 0, NPY_CARRAY, NULL) - -#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ - PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ - NULL, NULL, 0, NULL) - -#define PyArray_ToScalar(data, arr) \ - PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) - - -/* These might be faster without the dereferencing of obj - going on inside -- of course an optimizing compiler should - inline the constants inside a for loop making it a moot point -*/ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0])) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1])) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2])) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2] + \ - (l)*PyArray_STRIDES(obj)[3])) - -#define PyArray_XDECREF_ERR(obj) \ - if (obj && (PyArray_FLAGS(obj) & NPY_UPDATEIFCOPY)) { \ - PyArray_FLAGS(PyArray_BASE(obj)) |= NPY_WRITEABLE; \ - PyArray_FLAGS(obj) &= ~NPY_UPDATEIFCOPY; \ - } \ - Py_XDECREF(obj) - -#define PyArray_DESCR_REPLACE(descr) do { \ - PyArray_Descr *_new_; \ - _new_ = PyArray_DescrNew(descr); \ - Py_XDECREF(descr); \ - descr = _new_; \ - } while(0) - -/* Copy should always return contiguous array */ -#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) - -#define PyArray_FromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_BEHAVED | NPY_ENSUREARRAY, NULL) - -#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_DEFAULT | NPY_ENSUREARRAY, NULL) - -#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ENSURECOPY | NPY_DEFAULT | \ - NPY_ENSUREARRAY, NULL) - -#define PyArray_Cast(mp, type_num) \ - PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) - -#define PyArray_Take(ap, items, axis) \ - PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) - -#define PyArray_Put(ap, items, values) \ - PyArray_PutTo(ap, items, values, NPY_RAISE) - -/* Compatibility with old Numeric stuff -- don't use in new code */ - -#define PyArray_FromDimsAndData(nd, d, type, data) \ - PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ - data) - -#include "old_defines.h" - -/* - Check to see if this key in the dictionary is the "title" - entry of the tuple (i.e. a duplicate dictionary entry in the fields - dict. -*/ - -#define NPY_TITLE_KEY(key, value) ((PyTuple_GET_SIZE((value))==3) && \ - (PyTuple_GET_ITEM((value), 2) == (key))) - - -/* Define python version independent deprecation macro */ - -#if PY_VERSION_HEX >= 0x02050000 -#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) -#else -#define DEPRECATE(msg) PyErr_Warn(PyExc_DeprecationWarning,msg) -#endif - - -#ifdef __cplusplus -} -#endif - - -#endif /* NPY_NDARRAYOBJECT_H */ diff --git a/numpy-1.6.2/numpy/core/include/numpy/ndarraytypes.h b/numpy-1.6.2/numpy/core/include/numpy/ndarraytypes.h deleted file mode 100644 index 7acc6c46ce..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/ndarraytypes.h +++ /dev/null @@ -1,1499 +0,0 @@ -#ifndef NDARRAYTYPES_H -#define NDARRAYTYPES_H - -/* This is auto-generated by the installer */ -#include "numpyconfig.h" - -#include "npy_common.h" -#include "npy_endian.h" -#include "npy_cpu.h" -#include "utils.h" - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - - - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION - -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -#define NPY_METADATA_DTSTR "__frequency__" - -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - -/* - * We need to match npy_intp to a signed integer of the same size as a - * pointer variable. npy_uintp to the equivalent unsigned integer - */ - - -/* - * These characters correspond to the array type and the struct - * module - */ - -/* except 'p' -- signed integer for pointer type */ - -enum NPY_TYPECHAR { NPY_BOOLLTR = '?', - NPY_BYTELTR = 'b', - NPY_UBYTELTR = 'B', - NPY_SHORTLTR = 'h', - NPY_USHORTLTR = 'H', - NPY_INTLTR = 'i', - NPY_UINTLTR = 'I', - NPY_LONGLTR = 'l', - NPY_ULONGLTR = 'L', - NPY_LONGLONGLTR = 'q', - NPY_ULONGLONGLTR = 'Q', - NPY_HALFLTR = 'e', - NPY_FLOATLTR = 'f', - NPY_DOUBLELTR = 'd', - NPY_LONGDOUBLELTR = 'g', - NPY_CFLOATLTR = 'F', - NPY_CDOUBLELTR = 'D', - NPY_CLONGDOUBLELTR = 'G', - NPY_OBJECTLTR = 'O', - NPY_STRINGLTR = 'S', - NPY_STRINGLTR2 = 'a', - NPY_UNICODELTR = 'U', - NPY_VOIDLTR = 'V', - NPY_DATETIMELTR = 'M', - NPY_TIMEDELTALTR = 'm', - NPY_CHARLTR = 'c', - - /* - * No Descriptor, just a define -- this let's - * Python users specify an array of integers - * large enough to hold a pointer on the - * platform - */ - NPY_INTPLTR = 'p', - NPY_UINTPLTR = 'P', - - NPY_GENBOOLLTR ='b', - NPY_SIGNEDLTR = 'i', - NPY_UNSIGNEDLTR = 'u', - NPY_FLOATINGLTR = 'f', - NPY_COMPLEXLTR = 'c' -}; - -typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { - NPY_NOSCALAR=-1, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR -} NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) - -/* For specifying array memory layout or iteration order */ -typedef enum { - /* Fortran order if inputs are all Fortran, C otherwise */ - NPY_ANYORDER=-1, - /* C order */ - NPY_CORDER=0, - /* Fortran order */ - NPY_FORTRANORDER=1, - /* An order as close to the inputs as possible */ - NPY_KEEPORDER=2 -} NPY_ORDER; - -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4 -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -typedef enum { - NPY_FR_Y, - NPY_FR_M, - NPY_FR_W, - NPY_FR_B, - NPY_FR_D, - NPY_FR_h, - NPY_FR_m, - NPY_FR_s, - NPY_FR_ms, - NPY_FR_us, - NPY_FR_ns, - NPY_FR_ps, - NPY_FR_fs, - NPY_FR_as -} NPY_DATETIMEUNIT; - -#define NPY_DATETIME_NUMUNITS (NPY_FR_as + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_us - -#define NPY_STR_Y "Y" -#define NPY_STR_M "M" -#define NPY_STR_W "W" -#define NPY_STR_B "B" -#define NPY_STR_D "D" -#define NPY_STR_h "h" -#define NPY_STR_m "m" -#define NPY_STR_s "s" -#define NPY_STR_ms "ms" -#define NPY_STR_us "us" -#define NPY_STR_ns "ns" -#define NPY_STR_ps "ps" -#define NPY_STR_fs "fs" -#define NPY_STR_as "as" - - -/* - * This is to typedef npy_intp to the appropriate pointer size for - * this platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. - */ -typedef Py_intptr_t npy_intp; -typedef Py_uintptr_t npy_uintp; -#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T - -#ifdef constchar -#undef constchar -#endif - -#if (PY_VERSION_HEX < 0x02050000) - #ifndef PY_SSIZE_T_MIN - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #endif -#define NPY_SSIZE_T_PYFMT "i" -#undef PyIndex_Check -#define constchar const char -#define PyIndex_Check(op) 0 -#else -#define NPY_SSIZE_T_PYFMT "n" -#define constchar char -#endif - -/* NPY_INTP_FMT Note: - * Unlike the other NPY_*_FMT macros which are used with - * PyOS_snprintf, NPY_INTP_FMT is used with PyErr_Format and - * PyString_Format. These functions use different formatting - * codes which are portably specified according to the Python - * documentation. See ticket #1795. - * - * On Windows x64, the LONGLONG formatter should be used, but - * in Python 2.6 the %lld formatter is not supported. In this - * case we work around the problem by using the %zd formatter. - */ -#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT - #define NPY_INTP NPY_INT - #define NPY_UINTP NPY_UINT - #define PyIntpArrType_Type PyIntArrType_Type - #define PyUIntpArrType_Type PyUIntArrType_Type - #define NPY_MAX_INTP NPY_MAX_INT - #define NPY_MIN_INTP NPY_MIN_INT - #define NPY_MAX_UINTP NPY_MAX_UINT - #define NPY_INTP_FMT "d" -#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG - #define NPY_INTP NPY_LONG - #define NPY_UINTP NPY_ULONG - #define PyIntpArrType_Type PyLongArrType_Type - #define PyUIntpArrType_Type PyULongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONG - #define NPY_MIN_INTP MIN_LONG - #define NPY_MAX_UINTP NPY_MAX_ULONG - #define NPY_INTP_FMT "ld" -#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) - #define NPY_INTP NPY_LONGLONG - #define NPY_UINTP NPY_ULONGLONG - #define PyIntpArrType_Type PyLongLongArrType_Type - #define PyUIntpArrType_Type PyULongLongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONGLONG - #define NPY_MIN_INTP NPY_MIN_LONGLONG - #define NPY_MAX_UINTP NPY_MAX_ULONGLONG - #if (PY_VERSION_HEX >= 0x02070000) - #define NPY_INTP_FMT "lld" - #else - #define NPY_INTP_FMT "zd" - #endif -#endif - -/* - * We can only use C99 formats for npy_int_p if it is the same as - * intp_t, hence the condition on HAVE_UNITPTR_T - */ -#if (NPY_USE_C99_FORMATS) == 1 \ - && (defined HAVE_UINTPTR_T) \ - && (defined HAVE_INTTYPES_H) - #include - #undef NPY_INTP_FMT - #define NPY_INTP_FMT PRIdPTR -#endif - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer */ -#define PyDataMem_NEW(size) ((char *)malloc(size)) -#define PyDataMem_FREE(ptr) free(ptr) -#define PyDataMem_RENEW(ptr,size) ((char *)realloc(ptr,size)) - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* define NPY_IS_COMPLEX */ - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - PyTypeObject *typeobj; /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - char kind; /* kind for this type */ - char type; /* unique-character representing this type */ - char byteorder; /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char flags; /* flags describing data type */ - int type_num; /* number representing this type */ - int elsize; /* element size for this type */ - int alignment; /* alignment needed for this type */ - struct _arr_descr \ - *subarray; /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - PyObject *fields; /* The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - - PyObject *names; /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - - PyArray_ArrFuncs *f; /* - * a table of functions specific for each - * basic data descriptor - */ - - PyObject *metadata; /* Metadata about this dtype */ -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - * The main array object structure. It is recommended to use the macros - * defined below (PyArray_DATA and friends) access fields here, instead - * of the members themselves. - */ - -typedef struct PyArrayObject { - PyObject_HEAD - char *data; /* pointer to raw data buffer */ - int nd; /* number of dimensions, also called ndim */ - npy_intp *dimensions; /* size in each dimension */ - npy_intp *strides; /* - * bytes to jump to get to the - * next element in each dimension - */ - PyObject *base; /* - * This object should be decref'd upon - * deletion of array - * - * For views it points to the original - * array - * - * For creation from buffer object it - * points to an object that shold be - * decref'd on deletion - * - * For UPDATEIFCOPY flag this is an - * array to-be-updated upon deletion - * of this one - */ - PyArray_Descr *descr; /* Pointer to type structure */ - int flags; /* Flags describing array -- see below */ - PyObject *weakreflist; /* For weakreferences */ -} PyArrayObject; - -#define NPY_AO PyArrayObject - -#define fortran fortran_ /* For some compilers */ - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - - -typedef struct { - NPY_DATETIMEUNIT base; - int num; - int den; /* - * Converted to 1 on input for now -- an - * input-only mechanism - */ - int events; -} PyArray_DatetimeMetaData; - -typedef struct { - npy_longlong year; - int month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -typedef struct { - npy_longlong day; - int sec, us, ps, as; -} npy_timedeltastruct; - -#if PY_VERSION_HEX >= 0x03000000 -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ - PyDict_GetItemString( \ - descr->metadata, NPY_METADATA_DTSTR), NULL)))) -#else -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ - PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) -#endif - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* - * Means c-style contiguous (last index varies the fastest). The data - * elements right after each other. - */ -#define NPY_CONTIGUOUS 0x0001 - -/* - * set if array is a contiguous Fortran array: the first index varies - * the fastest in memory (strides array is reverse of C-contiguous - * array) - */ -#define NPY_FORTRAN 0x0002 - -#define NPY_C_CONTIGUOUS NPY_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_FORTRAN - -/* - * Note: all 0-d arrays are CONTIGUOUS and FORTRAN contiguous. If a - * 1-d array is CONTIGUOUS it is also FORTRAN contiguous - */ - -/* - * If set, the array owns the data: it will be free'd when the array - * is deleted. - */ -#define NPY_OWNDATA 0x0004 - -/* - * An array never has the next four set; they're only used as parameter - * flags to the the various FromAny functions - */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_FORCECAST 0x0010 - -/* - * Always copy the array. Returned arrays are always CONTIGUOUS, - * ALIGNED, and WRITEABLE. - */ -#define NPY_ENSURECOPY 0x0020 - -/* Make sure the returned array is a base-class ndarray */ -#define NPY_ENSUREARRAY 0x0040 - -/* - * Make sure that the strides are in units of the element size Needed - * for some operations with record-arrays. - */ -#define NPY_ELEMENTSTRIDES 0x0080 - -/* - * Array data is aligned on the appropiate memory address for the type - * stored according to how the compiler would align things (e.g., an - * array of integers (4 bytes each) starts on a memory address that's - * a multiple of 4) - */ -#define NPY_ALIGNED 0x0100 - -/* Array data has the native endianness */ -#define NPY_NOTSWAPPED 0x0200 - -/* Array data is writeable */ -#define NPY_WRITEABLE 0x0400 - -/* - * If this flag is set, then base contains a pointer to an array of - * the same size that should be updated with the current contents of - * this array when this array is deallocated - */ -#define NPY_UPDATEIFCOPY 0x1000 - -/* This flag is for the array interface */ -#define NPY_ARR_HAS_DESCR 0x0800 - - -#define NPY_BEHAVED (NPY_ALIGNED | NPY_WRITEABLE) -#define NPY_BEHAVED_NS (NPY_ALIGNED | NPY_WRITEABLE | NPY_NOTSWAPPED) -#define NPY_CARRAY (NPY_CONTIGUOUS | NPY_BEHAVED) -#define NPY_CARRAY_RO (NPY_CONTIGUOUS | NPY_ALIGNED) -#define NPY_FARRAY (NPY_FORTRAN | NPY_BEHAVED) -#define NPY_FARRAY_RO (NPY_FORTRAN | NPY_ALIGNED) -#define NPY_DEFAULT NPY_CARRAY -#define NPY_IN_ARRAY NPY_CARRAY_RO -#define NPY_OUT_ARRAY NPY_CARRAY -#define NPY_INOUT_ARRAY (NPY_CARRAY | NPY_UPDATEIFCOPY) -#define NPY_IN_FARRAY NPY_FARRAY_RO -#define NPY_OUT_FARRAY NPY_FARRAY -#define NPY_INOUT_FARRAY (NPY_FARRAY | NPY_UPDATEIFCOPY) - -#define NPY_UPDATE_ALL (NPY_CONTIGUOUS | NPY_FORTRAN | NPY_ALIGNED) - - -/* - * Size of internal buffers used for alignment Make BUFSIZE a multiple - * of sizeof(cdouble) -- ususally 16 so that ufunc buffers are aligned - */ -#define NPY_MIN_BUFSIZE ((int)sizeof(cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(cdouble))*1000000) -#define NPY_BUFSIZE 8192 -/* buffer stress test size: */ -/*#define NPY_BUFSIZE 17*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) - -/* - * C API: consists of Macros and functions. The MACROS are defined - * here. - */ - - -#define PyArray_CHKFLAGS(m, FLAGS) \ - ((((PyArrayObject *)(m))->flags & (FLAGS)) == (FLAGS)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_F_CONTIGUOUS) - -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#define NPY_BEGIN_THREADS _save = PyEval_SaveThread(); -#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API __save__ = PyGILState_Ensure(); -#define NPY_DISABLE_C_API PyGILState_Release(__save__); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/***************************** - * New iterator object - *****************************/ - -/* The actual structure of the iterator is an internal detail */ -typedef struct NpyIter_InternalOnly NpyIter; - -/* Iterator function pointers that may be specialized */ -typedef int (NpyIter_IterNextFunc)(NpyIter *iter); -typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, - npy_intp *outcoords); - -/*** Global flags that may be passed to the iterator constructors ***/ - -/* Track an index representing C order */ -#define NPY_ITER_C_INDEX 0x00000001 -/* Track an index representing Fortran order */ -#define NPY_ITER_F_INDEX 0x00000002 -/* Track a multi-index */ -#define NPY_ITER_MULTI_INDEX 0x00000004 -/* User code external to the iterator does the 1-dimensional innermost loop */ -#define NPY_ITER_EXTERNAL_LOOP 0x00000008 -/* Convert all the operands to a common data type */ -#define NPY_ITER_COMMON_DTYPE 0x00000010 -/* Operands may hold references, requiring API access during iteration */ -#define NPY_ITER_REFS_OK 0x00000020 -/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ -#define NPY_ITER_ZEROSIZE_OK 0x00000040 -/* Permits reductions (size-0 stride with dimension size > 1) */ -#define NPY_ITER_REDUCE_OK 0x00000080 -/* Enables sub-range iteration */ -#define NPY_ITER_RANGED 0x00000100 -/* Enables buffering */ -#define NPY_ITER_BUFFERED 0x00000200 -/* When buffering is enabled, grows the inner loop if possible */ -#define NPY_ITER_GROWINNER 0x00000400 -/* Delay allocation of buffers until first Reset* call */ -#define NPY_ITER_DELAY_BUFALLOC 0x00000800 -/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ -#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 - -/*** Per-operand flags that may be passed to the iterator constructors ***/ - -/* The operand will be read from and written to */ -#define NPY_ITER_READWRITE 0x00010000 -/* The operand will only be read from */ -#define NPY_ITER_READONLY 0x00020000 -/* The operand will only be written to */ -#define NPY_ITER_WRITEONLY 0x00040000 -/* The operand's data must be in native byte order */ -#define NPY_ITER_NBO 0x00080000 -/* The operand's data must be aligned */ -#define NPY_ITER_ALIGNED 0x00100000 -/* The operand's data must be contiguous (within the inner loop) */ -#define NPY_ITER_CONTIG 0x00200000 -/* The operand may be copied to satisfy requirements */ -#define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ -#define NPY_ITER_UPDATEIFCOPY 0x00800000 -/* Allocate the operand if it is NULL */ -#define NPY_ITER_ALLOCATE 0x01000000 -/* If an operand is allocated, don't use any subtype */ -#define NPY_ITER_NO_SUBTYPE 0x02000000 -/* Require that the dimension match the iterator dimensions exactly */ -#define NPY_ITER_NO_BROADCAST 0x08000000 - -#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff -#define NPY_ITER_PER_OP_FLAGS 0xffff0000 - - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* - * type of the function which translates a set of coordinates to a - * pointer to the data - */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} - -#define _PyArray_ITER_NEXT1(it) { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} - -#define _PyArray_ITER_NEXT2(it) { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} - -#define _PyArray_ITER_NEXT3(it) { \ - if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ - (it)->coordinates[2]++; \ - (it)->dataptr += (it)->strides[2]; \ - } \ - else { \ - (it)->coordinates[2] = 0; \ - (it)->dataptr -= (it)->backstrides[2]; \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ - } \ -} - -#define PyArray_ITER_NEXT(it) { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += _PyAIT(it)->ao->descr->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} - -#define PyArray_ITER_GOTO(it, destination) { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} - -#define PyArray_ITER_GOTO1D(it, ind) { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp) (ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data + \ - __npy_ind * _PyAIT(it)->ao->descr->elsize; \ - else { \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - * Any object passed to PyArray_Broadcast must be binary compatible - * with this structure. - */ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} - -#define PyArray_MultiIter_NEXT(multi) { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} - -#define PyArray_MultiIter_GOTO(multi, dest) { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} - -#define PyArray_MultiIter_GOTO1D(multi, ind) { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - -/* Store the information needed for fancy-indexing over an array */ - -typedef struct { - PyObject_HEAD - /* - * Multi-iterator portion --- needs to be present in this - * order to work with PyArray_Broadcast - */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object - iterators */ - PyArrayIterObject *ait; /* flat Iterator for - underlying array */ - - /* flat iterator for subspace (when numiter < nd) */ - PyArrayIterObject *subspace; - - /* - * if subspace iteration, then this is the array of axes in - * the underlying array represented by the index objects - */ - int iteraxes[NPY_MAXDIMS]; - /* - * if subspace iteration, the these are the coordinates to the - * start of the subspace. - */ - npy_intp bscoord[NPY_MAXDIMS]; - - PyObject *indexobj; /* creating obj */ - int consec; - char *dataptr; - -} PyArrayMapIterObject; - -enum { - NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, - NPY_NEIGHBORHOOD_ITER_ONE_PADDING, - NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, - NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, - NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING -}; - -typedef struct { - PyObject_HEAD - - /* - * PyArrayIterObject part: keep this in this exact order - */ - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; - - /* - * New members - */ - npy_intp nd; - - /* Dimensions is the dimension of the array */ - npy_intp dimensions[NPY_MAXDIMS]; - - /* - * Neighborhood points coordinates are computed relatively to the - * point pointed by _internal_iter - */ - PyArrayIterObject* _internal_iter; - /* - * To keep a reference to the representation of the constant value - * for constant padding - */ - char* constant; - - int mode; -} PyArrayNeighborhoodIterObject; - -/* - * Neighborhood iterator API - */ - -/* General: those work for any mode */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); -#if 0 -static NPY_INLINE int -PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); -#endif - -/* - * Include inline implementations - functions defined there are not - * considered public API - */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP -#include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP - -/* The default array type */ -#define NPY_DEFAULT_TYPE NPY_DOUBLE -#define PyArray_DEFAULT NPY_DEFAULT_TYPE - -/* - * All sorts of useful ways to look into a PyArrayObject. These are - * the recommended over casting to PyArrayObject and accessing the - * members directly. - */ - -#define PyArray_NDIM(obj) (((PyArrayObject *)(obj))->nd) -#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ - PyArray_CHKFLAGS(m, NPY_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_FORTRAN)) - -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_FORTRAN) && \ - (PyArray_NDIM(m) > 1)) - -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_FORTRAN) ? \ - NPY_FORTRAN : 0)) - -#define FORTRAN_IF PyArray_FORTRAN_IF -#define PyArray_DATA(obj) ((void *)(((PyArrayObject *)(obj))->data)) -#define PyArray_BYTES(obj) (((PyArrayObject *)(obj))->data) -#define PyArray_DIMS(obj) (((PyArrayObject *)(obj))->dimensions) -#define PyArray_STRIDES(obj) (((PyArrayObject *)(obj))->strides) -#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) -#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) -#define PyArray_BASE(obj) (((PyArrayObject *)(obj))->base) -#define PyArray_DESCR(obj) (((PyArrayObject *)(obj))->descr) -#define PyArray_FLAGS(obj) (((PyArrayObject *)(obj))->flags) -#define PyArray_ITEMSIZE(obj) (((PyArrayObject *)(obj))->descr->elsize) -#define PyArray_TYPE(obj) (((PyArrayObject *)(obj))->descr->type_num) - -#define PyArray_GETITEM(obj,itemptr) \ - ((PyArrayObject *)(obj))->descr->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)) - -#define PyArray_SETITEM(obj,itemptr,v) \ - ((PyArrayObject *)(obj))->descr->f->setitem((PyObject *)(v), \ - (char *)(itemptr), \ - (PyArrayObject *)(obj)) - - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) - -#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ - ((type) == NPY_USHORT) || \ - ((type) == NPY_UINT) || \ - ((type) == NPY_ULONG) || \ - ((type) == NPY_ULONGLONG)) - -#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ - ((type) == NPY_SHORT) || \ - ((type) == NPY_INT) || \ - ((type) == NPY_LONG) || \ - ((type) == NPY_LONGLONG)) - -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) - -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ - ((type) == NPY_UNICODE)) - -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ - ((type) == NPY_DOUBLE) || \ - ((type) == NPY_CDOUBLE) || \ - ((type) == NPY_BOOL) || \ - ((type) == NPY_OBJECT )) - -#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ - ((type) <=NPY_VOID)) - -#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ - ((type) <=NPY_TIMEDELTA)) - -#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ - ((type) < NPY_USERDEF+ \ - NPY_NUMUSERTYPES)) - -#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ - PyTypeNum_ISUSERDEF(type)) - -#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) - - -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) - -#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) -#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) -#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) -#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) -#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) -#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) -#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) -#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) -#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) -#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) -#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) -#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) -#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) - - /* - * FIXME: This should check for a flag on the data-type that - * states whether or not it is variable length. Because the - * ISFLEXIBLE check is hard-coded to the built-in data-types. - */ -#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) - -#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) - - -#define NPY_LITTLE '<' -#define NPY_BIG '>' -#define NPY_NATIVE '=' -#define NPY_SWAP 's' -#define NPY_IGNORE '|' - -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN -#define NPY_NATBYTE NPY_BIG -#define NPY_OPPBYTE NPY_LITTLE -#else -#define NPY_NATBYTE NPY_LITTLE -#define NPY_OPPBYTE NPY_BIG -#endif - -#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) -#define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) -#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ALIGNED) - - -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) -#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) - - -/* - * This is the form of the struct that's returned pointed by the - * PyCObject attribute of an array __array_struct__. See - * http://numpy.scipy.org/array_interface.shtml for the full - * documentation. - */ -typedef struct { - int two; /* - * contains the integer 2 as a sanity - * check - */ - - int nd; /* number of dimensions */ - - char typekind; /* - * kind in array --- character code of - * typestr - */ - - int itemsize; /* size of each element */ - - int flags; /* - * how should be data interpreted. Valid - * flags are CONTIGUOUS (1), FORTRAN (2), - * ALIGNED (0x100), NOTSWAPPED (0x200), and - * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) - * states that arrdescr field is present in - * structure - */ - - npy_intp *shape; /* - * A length-nd array of shape - * information - */ - - npy_intp *strides; /* A length-nd array of stride information */ - - void *data; /* A pointer to the first element of the array */ - - PyObject *descr; /* - * A list of fields or NULL (ignored if flags - * does not have ARR_HAS_DESCR flag set) - */ -} PyArrayInterface; - -#endif /* NPY_ARRAYTYPES_H */ diff --git a/numpy-1.6.2/numpy/core/include/numpy/noprefix.h b/numpy-1.6.2/numpy/core/include/numpy/noprefix.h deleted file mode 100644 index 571c9d0828..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/noprefix.h +++ /dev/null @@ -1,203 +0,0 @@ -#ifndef NPY_NOPREFIX_H -#define NPY_NOPREFIX_H - -/* You can directly include noprefix.h as a backward -compatibility measure*/ -#ifndef NPY_NO_PREFIX -#include "ndarrayobject.h" -#endif - -#define MAX_DIMS NPY_MAXDIMS - -#define longlong npy_longlong -#define ulonglong npy_ulonglong -#define Bool npy_bool -#define longdouble npy_longdouble -#define byte npy_byte - -#ifndef _BSD_SOURCE -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#endif - -#define ubyte npy_ubyte -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#define cfloat npy_cfloat -#define cdouble npy_cdouble -#define clongdouble npy_clongdouble -#define Int8 npy_int8 -#define UInt8 npy_uint8 -#define Int16 npy_int16 -#define UInt16 npy_uint16 -#define Int32 npy_int32 -#define UInt32 npy_uint32 -#define Int64 npy_int64 -#define UInt64 npy_uint64 -#define Int128 npy_int128 -#define UInt128 npy_uint128 -#define Int256 npy_int256 -#define UInt256 npy_uint256 -#define Float16 npy_float16 -#define Complex32 npy_complex32 -#define Float32 npy_float32 -#define Complex64 npy_complex64 -#define Float64 npy_float64 -#define Complex128 npy_complex128 -#define Float80 npy_float80 -#define Complex160 npy_complex160 -#define Float96 npy_float96 -#define Complex192 npy_complex192 -#define Float128 npy_float128 -#define Complex256 npy_complex256 -#define intp npy_intp -#define uintp npy_uintp -#define datetime npy_datetime -#define timedelta npy_timedelta - -#define SIZEOF_INTP NPY_SIZEOF_INTP -#define SIZEOF_UINTP NPY_SIZEOF_UINTP -#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME -#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA - -#define LONGLONG_FMT NPY_LONGLONG_FMT -#define ULONGLONG_FMT NPY_ULONGLONG_FMT -#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX -#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX(x) - -#define MAX_INT8 127 -#define MIN_INT8 -128 -#define MAX_UINT8 255 -#define MAX_INT16 32767 -#define MIN_INT16 -32768 -#define MAX_UINT16 65535 -#define MAX_INT32 2147483647 -#define MIN_INT32 (-MAX_INT32 - 1) -#define MAX_UINT32 4294967295U -#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) -#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) -#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) -#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) -#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) -#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) - -#define MAX_BYTE NPY_MAX_BYTE -#define MIN_BYTE NPY_MIN_BYTE -#define MAX_UBYTE NPY_MAX_UBYTE -#define MAX_SHORT NPY_MAX_SHORT -#define MIN_SHORT NPY_MIN_SHORT -#define MAX_USHORT NPY_MAX_USHORT -#define MAX_INT NPY_MAX_INT -#define MIN_INT NPY_MIN_INT -#define MAX_UINT NPY_MAX_UINT -#define MAX_LONG NPY_MAX_LONG -#define MIN_LONG NPY_MIN_LONG -#define MAX_ULONG NPY_MAX_ULONG -#define MAX_LONGLONG NPY_MAX_LONGLONG -#define MIN_LONGLONG NPY_MIN_LONGLONG -#define MAX_ULONGLONG NPY_MAX_ULONGLONG -#define MIN_DATETIME NPY_MIN_DATETIME -#define MAX_DATETIME NPY_MAX_DATETIME -#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA -#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA - -#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE -#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG -#define SIZEOF_HALF NPY_SIZEOF_HALF -#define BITSOF_BOOL NPY_BITSOF_BOOL -#define BITSOF_CHAR NPY_BITSOF_CHAR -#define BITSOF_SHORT NPY_BITSOF_SHORT -#define BITSOF_INT NPY_BITSOF_INT -#define BITSOF_LONG NPY_BITSOF_LONG -#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG -#define BITSOF_HALF NPY_BITSOF_HALF -#define BITSOF_FLOAT NPY_BITSOF_FLOAT -#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE -#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE -#define BITSOF_DATETIME NPY_BITSOF_DATETIME -#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA - -#define PyArray_UCS4 npy_ucs4 -#define _pya_malloc PyArray_malloc -#define _pya_free PyArray_free -#define _pya_realloc PyArray_realloc - -#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF -#define BEGIN_THREADS NPY_BEGIN_THREADS -#define END_THREADS NPY_END_THREADS -#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF -#define ALLOW_C_API NPY_ALLOW_C_API -#define DISABLE_C_API NPY_DISABLE_C_API - -#define PY_FAIL NPY_FAIL -#define PY_SUCCEED NPY_SUCCEED - -#ifndef TRUE -#define TRUE NPY_TRUE -#endif - -#ifndef FALSE -#define FALSE NPY_FALSE -#endif - -#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT - -#define CONTIGUOUS NPY_CONTIGUOUS -#define C_CONTIGUOUS NPY_C_CONTIGUOUS -#define FORTRAN NPY_FORTRAN -#define F_CONTIGUOUS NPY_F_CONTIGUOUS -#define OWNDATA NPY_OWNDATA -#define FORCECAST NPY_FORCECAST -#define ENSURECOPY NPY_ENSURECOPY -#define ENSUREARRAY NPY_ENSUREARRAY -#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES -#define ALIGNED NPY_ALIGNED -#define NOTSWAPPED NPY_NOTSWAPPED -#define WRITEABLE NPY_WRITEABLE -#define UPDATEIFCOPY NPY_UPDATEIFCOPY -#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR -#define BEHAVED NPY_BEHAVED -#define BEHAVED_NS NPY_BEHAVED_NS -#define CARRAY NPY_CARRAY -#define CARRAY_RO NPY_CARRAY_RO -#define FARRAY NPY_FARRAY -#define FARRAY_RO NPY_FARRAY_RO -#define DEFAULT NPY_DEFAULT -#define IN_ARRAY NPY_IN_ARRAY -#define OUT_ARRAY NPY_OUT_ARRAY -#define INOUT_ARRAY NPY_INOUT_ARRAY -#define IN_FARRAY NPY_IN_FARRAY -#define OUT_FARRAY NPY_OUT_FARRAY -#define INOUT_FARRAY NPY_INOUT_FARRAY -#define UPDATE_ALL NPY_UPDATE_ALL - -#define OWN_DATA NPY_OWNDATA -#define BEHAVED_FLAGS NPY_BEHAVED -#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS -#define CARRAY_FLAGS_RO NPY_CARRAY_RO -#define CARRAY_FLAGS NPY_CARRAY -#define FARRAY_FLAGS NPY_FARRAY -#define FARRAY_FLAGS_RO NPY_FARRAY_RO -#define DEFAULT_FLAGS NPY_DEFAULT -#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS - -#ifndef MIN -#define MIN PyArray_MIN -#endif -#ifndef MAX -#define MAX PyArray_MAX -#endif -#define MAX_INTP NPY_MAX_INTP -#define MIN_INTP NPY_MIN_INTP -#define MAX_UINTP NPY_MAX_UINTP -#define INTP_FMT NPY_INTP_FMT - -#define REFCOUNT PyArray_REFCOUNT -#define MAX_ELSIZE NPY_MAX_ELSIZE - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/npy_3kcompat.h b/numpy-1.6.2/numpy/core/include/numpy/npy_3kcompat.h deleted file mode 100644 index 02355e8522..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/npy_3kcompat.h +++ /dev/null @@ -1,402 +0,0 @@ -/* - * This is a convenience header file providing compatibility utilities - * for supporting Python 2 and Python 3 in the same code base. - * - * If you want to use this for your own projects, it's recommended to make a - * copy of it. Although the stuff below is unlikely to change, we don't provide - * strong backwards compatibility guarantees at the moment. - */ - -#ifndef _NPY_3KCOMPAT_H_ -#define _NPY_3KCOMPAT_H_ - -#include -#include - -#if PY_VERSION_HEX >= 0x03000000 -#ifndef NPY_PY3K -#define NPY_PY3K -#endif -#endif - -#include "numpy/npy_common.h" -#include "numpy/ndarrayobject.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * PyInt -> PyLong - */ - -#if defined(NPY_PY3K) -/* Return True only if the long fits in a C long */ -static NPY_INLINE int PyInt_Check(PyObject *op) { - int overflow = 0; - if (!PyLong_Check(op)) { - return 0; - } - PyLong_AsLongAndOverflow(op, &overflow); - return (overflow == 0); -} - -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t - -/* NOTE: - * - * Since the PyLong type is very different from the fixed-range PyInt, - * we don't define PyInt_Type -> PyLong_Type. - */ -#endif /* NPY_PY3K */ - -/* - * PyString -> PyBytes - */ - -#if defined(NPY_PY3K) - -#define PyString_Type PyBytes_Type -#define PyString_Check PyBytes_Check -#define PyStringObject PyBytesObject -#define PyString_FromString PyBytes_FromString -#define PyString_FromStringAndSize PyBytes_FromStringAndSize -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsStringAndSize PyBytes_AsStringAndSize -#define PyString_FromFormat PyBytes_FromFormat -#define PyString_Concat PyBytes_Concat -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_Size PyBytes_Size - -#define PyUString_Type PyUnicode_Type -#define PyUString_Check PyUnicode_Check -#define PyUStringObject PyUnicodeObject -#define PyUString_FromString PyUnicode_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyUString_FromFormat PyUnicode_FromFormat -#define PyUString_Concat PyUnicode_Concat2 -#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel -#define PyUString_GET_SIZE PyUnicode_GET_SIZE -#define PyUString_Size PyUnicode_Size -#define PyUString_InternFromString PyUnicode_InternFromString -#define PyUString_Format PyUnicode_Format - -#else - -#define PyBytes_Type PyString_Type -#define PyBytes_Check PyString_Check -#define PyBytesObject PyStringObject -#define PyBytes_FromString PyString_FromString -#define PyBytes_FromStringAndSize PyString_FromStringAndSize -#define PyBytes_AS_STRING PyString_AS_STRING -#define PyBytes_AsStringAndSize PyString_AsStringAndSize -#define PyBytes_FromFormat PyString_FromFormat -#define PyBytes_Concat PyString_Concat -#define PyBytes_ConcatAndDel PyString_ConcatAndDel -#define PyBytes_AsString PyString_AsString -#define PyBytes_GET_SIZE PyString_GET_SIZE -#define PyBytes_Size PyString_Size - -#define PyUString_Type PyString_Type -#define PyUString_Check PyString_Check -#define PyUStringObject PyStringObject -#define PyUString_FromString PyString_FromString -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#define PyUString_FromFormat PyString_FromFormat -#define PyUString_Concat PyString_Concat -#define PyUString_ConcatAndDel PyString_ConcatAndDel -#define PyUString_GET_SIZE PyString_GET_SIZE -#define PyUString_Size PyString_Size -#define PyUString_InternFromString PyString_InternFromString -#define PyUString_Format PyString_Format - -#endif /* NPY_PY3K */ - - -static NPY_INLINE void -PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) -{ - PyObject *newobj; - newobj = PyUnicode_Concat(*left, right); - Py_DECREF(*left); - Py_DECREF(right); - *left = newobj; -} - -static NPY_INLINE void -PyUnicode_Concat2(PyObject **left, PyObject *right) -{ - PyObject *newobj; - newobj = PyUnicode_Concat(*left, right); - Py_DECREF(*left); - *left = newobj; -} - - -/* - * Accessing items of ob_base - */ - -#if (PY_VERSION_HEX < 0x02060000) -#define Py_TYPE(o) (((PyObject*)(o))->ob_type) -#define Py_REFCNT(o) (((PyObject*)(o))->ob_refcnt) -#define Py_SIZE(o) (((PyVarObject*)(o))->ob_size) -#endif - -/* - * PyFile_* compatibility - */ -#if defined(NPY_PY3K) - -/* - * Get a FILE* handle to the file represented by the Python object - */ -static NPY_INLINE FILE* -npy_PyFile_Dup(PyObject *file, char *mode) -{ - int fd, fd2; - PyObject *ret, *os; - Py_ssize_t pos; - FILE *handle; - /* Flush first to ensure things end up in the file in the correct order */ - ret = PyObject_CallMethod(file, "flush", ""); - if (ret == NULL) { - return NULL; - } - Py_DECREF(ret); - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - return NULL; - } - os = PyImport_ImportModule("os"); - if (os == NULL) { - return NULL; - } - ret = PyObject_CallMethod(os, "dup", "i", fd); - Py_DECREF(os); - if (ret == NULL) { - return NULL; - } - fd2 = PyNumber_AsSsize_t(ret, NULL); - Py_DECREF(ret); -#ifdef _WIN32 - handle = _fdopen(fd2, mode); -#else - handle = fdopen(fd2, mode); -#endif - if (handle == NULL) { - PyErr_SetString(PyExc_IOError, - "Getting a FILE* from a Python file object failed"); - } - ret = PyObject_CallMethod(file, "tell", ""); - if (ret == NULL) { - fclose(handle); - return NULL; - } - pos = PyNumber_AsSsize_t(ret, PyExc_OverflowError); - Py_DECREF(ret); - if (PyErr_Occurred()) { - fclose(handle); - return NULL; - } - fseek(handle, pos, SEEK_SET); - return handle; -} - -/* - * Close the dup-ed file handle, and seek the Python one to the current position - */ -static NPY_INLINE int -npy_PyFile_DupClose(PyObject *file, FILE* handle) -{ - PyObject *ret; - long position; - position = ftell(handle); - fclose(handle); - - ret = PyObject_CallMethod(file, "seek", "li", position, 0); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - -static NPY_INLINE int -npy_PyFile_Check(PyObject *file) -{ - int fd; - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - PyErr_Clear(); - return 0; - } - return 1; -} - -#else - -#define npy_PyFile_Dup(file, mode) PyFile_AsFile(file) -#define npy_PyFile_DupClose(file, handle) (0) -#define npy_PyFile_Check PyFile_Check - -#endif - -static NPY_INLINE PyObject* -npy_PyFile_OpenFile(PyObject *filename, char *mode) -{ - PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); - if (open == NULL) { - return NULL; - } - return PyObject_CallFunction(open, "Os", filename, mode); -} - -/* - * PyObject_Cmp - */ -#if defined(NPY_PY3K) -static NPY_INLINE int -PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) -{ - int v; - v = PyObject_RichCompareBool(i1, i2, Py_LT); - if (v == 0) { - *cmp = -1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_GT); - if (v == 0) { - *cmp = 1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_EQ); - if (v == 0) { - *cmp = 0; - return 1; - } - else { - *cmp = 0; - return -1; - } -} -#endif - -/* - * PyCObject functions adapted to PyCapsules. - * - * The main job here is to get rid of the improved error handling - * of PyCapsules. It's a shame... - */ -#if PY_VERSION_HEX >= 0x03000000 - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) -{ - PyObject *ret = PyCapsule_New(ptr, NULL, dtor); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) -{ - PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); - if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { - PyErr_Clear(); - Py_DECREF(ret); - ret = NULL; - } - return ret; -} - -static NPY_INLINE void * -NpyCapsule_AsVoidPtr(PyObject *obj) -{ - void *ret = PyCapsule_GetPointer(obj, NULL); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -static NPY_INLINE void * -NpyCapsule_GetDesc(PyObject *obj) -{ - return PyCapsule_GetContext(obj); -} - -static NPY_INLINE int -NpyCapsule_Check(PyObject *ptr) -{ - return PyCapsule_CheckExact(ptr); -} - -static void -simple_capsule_dtor(PyObject *cap) -{ - PyArray_free(PyCapsule_GetPointer(cap, NULL)); -} - -#else - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) -{ - return PyCObject_FromVoidPtr(ptr, dtor); -} - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, - void (*dtor)(void *, void *)) -{ - return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor); -} - -static NPY_INLINE void * -NpyCapsule_AsVoidPtr(PyObject *ptr) -{ - return PyCObject_AsVoidPtr(ptr); -} - -static NPY_INLINE void * -NpyCapsule_GetDesc(PyObject *obj) -{ - return PyCObject_GetDesc(obj); -} - -static NPY_INLINE int -NpyCapsule_Check(PyObject *ptr) -{ - return PyCObject_Check(ptr); -} - -static void -simple_capsule_dtor(void *ptr) -{ - PyArray_free(ptr); -} - -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* _NPY_3KCOMPAT_H_ */ diff --git a/numpy-1.6.2/numpy/core/include/numpy/npy_common.h b/numpy-1.6.2/numpy/core/include/numpy/npy_common.h deleted file mode 100644 index 145fe21423..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/npy_common.h +++ /dev/null @@ -1,821 +0,0 @@ -#ifndef _NPY_COMMON_H_ -#define _NPY_COMMON_H_ - -/* This is auto-generated */ -#include "numpyconfig.h" - -#if defined(_MSC_VER) - #define NPY_INLINE __inline -#elif defined(__GNUC__) - #if defined(__STRICT_ANSI__) - #define NPY_INLINE __inline__ - #else - #define NPY_INLINE inline - #endif -#else - #define NPY_INLINE -#endif - -/* enums for detected endianness */ -enum { - NPY_CPU_UNKNOWN_ENDIAN, - NPY_CPU_LITTLE, - NPY_CPU_BIG -}; - -/* Some platforms don't define bool, long long, or long double. - Handle that here. -*/ - -#define NPY_BYTE_FMT "hhd" -#define NPY_UBYTE_FMT "hhu" -#define NPY_SHORT_FMT "hd" -#define NPY_USHORT_FMT "hu" -#define NPY_INT_FMT "d" -#define NPY_UINT_FMT "u" -#define NPY_LONG_FMT "ld" -#define NPY_ULONG_FMT "lu" -#define NPY_HALF_FMT "g" -#define NPY_FLOAT_FMT "g" -#define NPY_DOUBLE_FMT "g" - - -#ifdef PY_LONG_LONG -typedef PY_LONG_LONG npy_longlong; -typedef unsigned PY_LONG_LONG npy_ulonglong; -# ifdef _MSC_VER -# define NPY_LONGLONG_FMT "I64d" -# define NPY_ULONGLONG_FMT "I64u" -# define NPY_LONGLONG_SUFFIX(x) (x##i64) -# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) -# else - /* #define LONGLONG_FMT "lld" Another possible variant - #define ULONGLONG_FMT "llu" - - #define LONGLONG_FMT "qd" -- BSD perhaps? - #define ULONGLONG_FMT "qu" - */ -# define NPY_LONGLONG_FMT "Ld" -# define NPY_ULONGLONG_FMT "Lu" -# define NPY_LONGLONG_SUFFIX(x) (x##LL) -# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) -# endif -#else -typedef long npy_longlong; -typedef unsigned long npy_ulonglong; -# define NPY_LONGLONG_SUFFIX(x) (x##L) -# define NPY_ULONGLONG_SUFFIX(x) (x##UL) -#endif - - -typedef unsigned char npy_bool; -#define NPY_FALSE 0 -#define NPY_TRUE 1 - - -#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE - typedef double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "g" -#else - typedef long double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "Lg" -#endif - -#ifndef Py_USING_UNICODE -#error Must use Python with unicode enabled. -#endif - - -typedef signed char npy_byte; -typedef unsigned char npy_ubyte; -typedef unsigned short npy_ushort; -typedef unsigned int npy_uint; -typedef unsigned long npy_ulong; - -/* These are for completeness */ -typedef float npy_float; -typedef double npy_double; -typedef short npy_short; -typedef int npy_int; -typedef long npy_long; - -/* - * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being - * able to do .real/.imag. Will have to convert code first. - */ -#if 0 -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE) -typedef complex npy_cdouble; -#else -typedef struct { double real, imag; } npy_cdouble; -#endif - -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT) -typedef complex float npy_cfloat; -#else -typedef struct { float real, imag; } npy_cfloat; -#endif - -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE) -typedef complex long double npy_clongdouble; -#else -typedef struct {npy_longdouble real, imag;} npy_clongdouble; -#endif -#endif -#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE -#error npy_cdouble definition is not compatible with C99 complex definition ! \ - Please contact Numpy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { double real, imag; } npy_cdouble; - -#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT -#error npy_cfloat definition is not compatible with C99 complex definition ! \ - Please contact Numpy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { float real, imag; } npy_cfloat; - -#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE -#error npy_clongdouble definition is not compatible with C99 complex definition ! \ - Please contact Numpy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { npy_longdouble real, imag; } npy_clongdouble; - -/* - * numarray-style bit-width typedefs - */ -#define NPY_MAX_INT8 127 -#define NPY_MIN_INT8 -128 -#define NPY_MAX_UINT8 255 -#define NPY_MAX_INT16 32767 -#define NPY_MIN_INT16 -32768 -#define NPY_MAX_UINT16 65535 -#define NPY_MAX_INT32 2147483647 -#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) -#define NPY_MAX_UINT32 4294967295U -#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) -#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) -#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) -#define NPY_MIN_DATETIME NPY_MIN_INT64 -#define NPY_MAX_DATETIME NPY_MAX_INT64 -#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 -#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 - - /* Need to find the number of bits for each type and - make definitions accordingly. - - C states that sizeof(char) == 1 by definition - - So, just using the sizeof keyword won't help. - - It also looks like Python itself uses sizeof(char) quite a - bit, which by definition should be 1 all the time. - - Idea: Make Use of CHAR_BIT which should tell us how many - BITS per CHARACTER - */ - - /* Include platform definitions -- These are in the C89/90 standard */ -#include -#define NPY_MAX_BYTE SCHAR_MAX -#define NPY_MIN_BYTE SCHAR_MIN -#define NPY_MAX_UBYTE UCHAR_MAX -#define NPY_MAX_SHORT SHRT_MAX -#define NPY_MIN_SHORT SHRT_MIN -#define NPY_MAX_USHORT USHRT_MAX -#define NPY_MAX_INT INT_MAX -#ifndef INT_MIN -#define INT_MIN (-INT_MAX - 1) -#endif -#define NPY_MIN_INT INT_MIN -#define NPY_MAX_UINT UINT_MAX -#define NPY_MAX_LONG LONG_MAX -#define NPY_MIN_LONG LONG_MIN -#define NPY_MAX_ULONG ULONG_MAX - -#define NPY_SIZEOF_HALF 2 -#define NPY_SIZEOF_DATETIME 8 -#define NPY_SIZEOF_TIMEDELTA 8 - -#define NPY_BITSOF_BOOL (sizeof(npy_bool)*CHAR_BIT) -#define NPY_BITSOF_CHAR CHAR_BIT -#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) -#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) -#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) -#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) -#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) -#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) -#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) -#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) -#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) -#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) - -#if NPY_BITSOF_LONG == 8 -#define NPY_INT8 NPY_LONG -#define NPY_UINT8 NPY_ULONG - typedef long npy_int8; - typedef unsigned long npy_uint8; -#define PyInt8ScalarObject PyLongScalarObject -#define PyInt8ArrType_Type PyLongArrType_Type -#define PyUInt8ScalarObject PyULongScalarObject -#define PyUInt8ArrType_Type PyULongArrType_Type -#define NPY_INT8_FMT NPY_LONG_FMT -#define NPY_UINT8_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 16 -#define NPY_INT16 NPY_LONG -#define NPY_UINT16 NPY_ULONG - typedef long npy_int16; - typedef unsigned long npy_uint16; -#define PyInt16ScalarObject PyLongScalarObject -#define PyInt16ArrType_Type PyLongArrType_Type -#define PyUInt16ScalarObject PyULongScalarObject -#define PyUInt16ArrType_Type PyULongArrType_Type -#define NPY_INT16_FMT NPY_LONG_FMT -#define NPY_UINT16_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 32 -#define NPY_INT32 NPY_LONG -#define NPY_UINT32 NPY_ULONG - typedef long npy_int32; - typedef unsigned long npy_uint32; - typedef unsigned long npy_ucs4; -#define PyInt32ScalarObject PyLongScalarObject -#define PyInt32ArrType_Type PyLongArrType_Type -#define PyUInt32ScalarObject PyULongScalarObject -#define PyUInt32ArrType_Type PyULongArrType_Type -#define NPY_INT32_FMT NPY_LONG_FMT -#define NPY_UINT32_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 64 -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG - typedef long npy_int64; - typedef unsigned long npy_uint64; -#define PyInt64ScalarObject PyLongScalarObject -#define PyInt64ArrType_Type PyLongArrType_Type -#define PyUInt64ScalarObject PyULongScalarObject -#define PyUInt64ArrType_Type PyULongArrType_Type -#define NPY_INT64_FMT NPY_LONG_FMT -#define NPY_UINT64_FMT NPY_ULONG_FMT -#define MyPyLong_FromInt64 PyLong_FromLong -#define MyPyLong_AsInt64 PyLong_AsLong -#elif NPY_BITSOF_LONG == 128 -#define NPY_INT128 NPY_LONG -#define NPY_UINT128 NPY_ULONG - typedef long npy_int128; - typedef unsigned long npy_uint128; -#define PyInt128ScalarObject PyLongScalarObject -#define PyInt128ArrType_Type PyLongArrType_Type -#define PyUInt128ScalarObject PyULongScalarObject -#define PyUInt128ArrType_Type PyULongArrType_Type -#define NPY_INT128_FMT NPY_LONG_FMT -#define NPY_UINT128_FMT NPY_ULONG_FMT -#endif - -#if NPY_BITSOF_LONGLONG == 8 -# ifndef NPY_INT8 -# define NPY_INT8 NPY_LONGLONG -# define NPY_UINT8 NPY_ULONGLONG - typedef npy_longlong npy_int8; - typedef npy_ulonglong npy_uint8; -# define PyInt8ScalarObject PyLongLongScalarObject -# define PyInt8ArrType_Type PyLongLongArrType_Type -# define PyUInt8ScalarObject PyULongLongScalarObject -# define PyUInt8ArrType_Type PyULongLongArrType_Type -#define NPY_INT8_FMT NPY_LONGLONG_FMT -#define NPY_UINT8_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT8 -# define NPY_MIN_LONGLONG NPY_MIN_INT8 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 -#elif NPY_BITSOF_LONGLONG == 16 -# ifndef NPY_INT16 -# define NPY_INT16 NPY_LONGLONG -# define NPY_UINT16 NPY_ULONGLONG - typedef npy_longlong npy_int16; - typedef npy_ulonglong npy_uint16; -# define PyInt16ScalarObject PyLongLongScalarObject -# define PyInt16ArrType_Type PyLongLongArrType_Type -# define PyUInt16ScalarObject PyULongLongScalarObject -# define PyUInt16ArrType_Type PyULongLongArrType_Type -#define NPY_INT16_FMT NPY_LONGLONG_FMT -#define NPY_UINT16_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT16 -# define NPY_MIN_LONGLONG NPY_MIN_INT16 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 -#elif NPY_BITSOF_LONGLONG == 32 -# ifndef NPY_INT32 -# define NPY_INT32 NPY_LONGLONG -# define NPY_UINT32 NPY_ULONGLONG - typedef npy_longlong npy_int32; - typedef npy_ulonglong npy_uint32; - typedef npy_ulonglong npy_ucs4; -# define PyInt32ScalarObject PyLongLongScalarObject -# define PyInt32ArrType_Type PyLongLongArrType_Type -# define PyUInt32ScalarObject PyULongLongScalarObject -# define PyUInt32ArrType_Type PyULongLongArrType_Type -#define NPY_INT32_FMT NPY_LONGLONG_FMT -#define NPY_UINT32_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT32 -# define NPY_MIN_LONGLONG NPY_MIN_INT32 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 -#elif NPY_BITSOF_LONGLONG == 64 -# ifndef NPY_INT64 -# define NPY_INT64 NPY_LONGLONG -# define NPY_UINT64 NPY_ULONGLONG - typedef npy_longlong npy_int64; - typedef npy_ulonglong npy_uint64; -# define PyInt64ScalarObject PyLongLongScalarObject -# define PyInt64ArrType_Type PyLongLongArrType_Type -# define PyUInt64ScalarObject PyULongLongScalarObject -# define PyUInt64ArrType_Type PyULongLongArrType_Type -#define NPY_INT64_FMT NPY_LONGLONG_FMT -#define NPY_UINT64_FMT NPY_ULONGLONG_FMT -# define MyPyLong_FromInt64 PyLong_FromLongLong -# define MyPyLong_AsInt64 PyLong_AsLongLong -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT64 -# define NPY_MIN_LONGLONG NPY_MIN_INT64 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 -#elif NPY_BITSOF_LONGLONG == 128 -# ifndef NPY_INT128 -# define NPY_INT128 NPY_LONGLONG -# define NPY_UINT128 NPY_ULONGLONG - typedef npy_longlong npy_int128; - typedef npy_ulonglong npy_uint128; -# define PyInt128ScalarObject PyLongLongScalarObject -# define PyInt128ArrType_Type PyLongLongArrType_Type -# define PyUInt128ScalarObject PyULongLongScalarObject -# define PyUInt128ArrType_Type PyULongLongArrType_Type -#define NPY_INT128_FMT NPY_LONGLONG_FMT -#define NPY_UINT128_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT128 -# define NPY_MIN_LONGLONG NPY_MIN_INT128 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 -#elif NPY_BITSOF_LONGLONG == 256 -# define NPY_INT256 NPY_LONGLONG -# define NPY_UINT256 NPY_ULONGLONG - typedef npy_longlong npy_int256; - typedef npy_ulonglong npy_uint256; -# define PyInt256ScalarObject PyLongLongScalarObject -# define PyInt256ArrType_Type PyLongLongArrType_Type -# define PyUInt256ScalarObject PyULongLongScalarObject -# define PyUInt256ArrType_Type PyULongLongArrType_Type -#define NPY_INT256_FMT NPY_LONGLONG_FMT -#define NPY_UINT256_FMT NPY_ULONGLONG_FMT -# define NPY_MAX_LONGLONG NPY_MAX_INT256 -# define NPY_MIN_LONGLONG NPY_MIN_INT256 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 -#endif - -#if NPY_BITSOF_INT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_INT -#define NPY_UINT8 NPY_UINT - typedef int npy_int8; - typedef unsigned int npy_uint8; -# define PyInt8ScalarObject PyIntScalarObject -# define PyInt8ArrType_Type PyIntArrType_Type -# define PyUInt8ScalarObject PyUIntScalarObject -# define PyUInt8ArrType_Type PyUIntArrType_Type -#define NPY_INT8_FMT NPY_INT_FMT -#define NPY_UINT8_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_INT -#define NPY_UINT16 NPY_UINT - typedef int npy_int16; - typedef unsigned int npy_uint16; -# define PyInt16ScalarObject PyIntScalarObject -# define PyInt16ArrType_Type PyIntArrType_Type -# define PyUInt16ScalarObject PyIntUScalarObject -# define PyUInt16ArrType_Type PyIntUArrType_Type -#define NPY_INT16_FMT NPY_INT_FMT -#define NPY_UINT16_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT - typedef int npy_int32; - typedef unsigned int npy_uint32; - typedef unsigned int npy_ucs4; -# define PyInt32ScalarObject PyIntScalarObject -# define PyInt32ArrType_Type PyIntArrType_Type -# define PyUInt32ScalarObject PyUIntScalarObject -# define PyUInt32ArrType_Type PyUIntArrType_Type -#define NPY_INT32_FMT NPY_INT_FMT -#define NPY_UINT32_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_INT -#define NPY_UINT64 NPY_UINT - typedef int npy_int64; - typedef unsigned int npy_uint64; -# define PyInt64ScalarObject PyIntScalarObject -# define PyInt64ArrType_Type PyIntArrType_Type -# define PyUInt64ScalarObject PyUIntScalarObject -# define PyUInt64ArrType_Type PyUIntArrType_Type -#define NPY_INT64_FMT NPY_INT_FMT -#define NPY_UINT64_FMT NPY_UINT_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_INT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_INT -#define NPY_UINT128 NPY_UINT - typedef int npy_int128; - typedef unsigned int npy_uint128; -# define PyInt128ScalarObject PyIntScalarObject -# define PyInt128ArrType_Type PyIntArrType_Type -# define PyUInt128ScalarObject PyUIntScalarObject -# define PyUInt128ArrType_Type PyUIntArrType_Type -#define NPY_INT128_FMT NPY_INT_FMT -#define NPY_UINT128_FMT NPY_UINT_FMT -#endif -#endif - -#if NPY_BITSOF_SHORT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_SHORT -#define NPY_UINT8 NPY_USHORT - typedef short npy_int8; - typedef unsigned short npy_uint8; -# define PyInt8ScalarObject PyShortScalarObject -# define PyInt8ArrType_Type PyShortArrType_Type -# define PyUInt8ScalarObject PyUShortScalarObject -# define PyUInt8ArrType_Type PyUShortArrType_Type -#define NPY_INT8_FMT NPY_SHORT_FMT -#define NPY_UINT8_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT - typedef short npy_int16; - typedef unsigned short npy_uint16; -# define PyInt16ScalarObject PyShortScalarObject -# define PyInt16ArrType_Type PyShortArrType_Type -# define PyUInt16ScalarObject PyUShortScalarObject -# define PyUInt16ArrType_Type PyUShortArrType_Type -#define NPY_INT16_FMT NPY_SHORT_FMT -#define NPY_UINT16_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_SHORT -#define NPY_UINT32 NPY_USHORT - typedef short npy_int32; - typedef unsigned short npy_uint32; - typedef unsigned short npy_ucs4; -# define PyInt32ScalarObject PyShortScalarObject -# define PyInt32ArrType_Type PyShortArrType_Type -# define PyUInt32ScalarObject PyUShortScalarObject -# define PyUInt32ArrType_Type PyUShortArrType_Type -#define NPY_INT32_FMT NPY_SHORT_FMT -#define NPY_UINT32_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_SHORT -#define NPY_UINT64 NPY_USHORT - typedef short npy_int64; - typedef unsigned short npy_uint64; -# define PyInt64ScalarObject PyShortScalarObject -# define PyInt64ArrType_Type PyShortArrType_Type -# define PyUInt64ScalarObject PyUShortScalarObject -# define PyUInt64ArrType_Type PyUShortArrType_Type -#define NPY_INT64_FMT NPY_SHORT_FMT -#define NPY_UINT64_FMT NPY_USHORT_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_SHORT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_SHORT -#define NPY_UINT128 NPY_USHORT - typedef short npy_int128; - typedef unsigned short npy_uint128; -# define PyInt128ScalarObject PyShortScalarObject -# define PyInt128ArrType_Type PyShortArrType_Type -# define PyUInt128ScalarObject PyUShortScalarObject -# define PyUInt128ArrType_Type PyUShortArrType_Type -#define NPY_INT128_FMT NPY_SHORT_FMT -#define NPY_UINT128_FMT NPY_USHORT_FMT -#endif -#endif - - -#if NPY_BITSOF_CHAR == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE - typedef signed char npy_int8; - typedef unsigned char npy_uint8; -# define PyInt8ScalarObject PyByteScalarObject -# define PyInt8ArrType_Type PyByteArrType_Type -# define PyUInt8ScalarObject PyUByteScalarObject -# define PyUInt8ArrType_Type PyUByteArrType_Type -#define NPY_INT8_FMT NPY_BYTE_FMT -#define NPY_UINT8_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_BYTE -#define NPY_UINT16 NPY_UBYTE - typedef signed char npy_int16; - typedef unsigned char npy_uint16; -# define PyInt16ScalarObject PyByteScalarObject -# define PyInt16ArrType_Type PyByteArrType_Type -# define PyUInt16ScalarObject PyUByteScalarObject -# define PyUInt16ArrType_Type PyUByteArrType_Type -#define NPY_INT16_FMT NPY_BYTE_FMT -#define NPY_UINT16_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_BYTE -#define NPY_UINT32 NPY_UBYTE - typedef signed char npy_int32; - typedef unsigned char npy_uint32; - typedef unsigned char npy_ucs4; -# define PyInt32ScalarObject PyByteScalarObject -# define PyInt32ArrType_Type PyByteArrType_Type -# define PyUInt32ScalarObject PyUByteScalarObject -# define PyUInt32ArrType_Type PyUByteArrType_Type -#define NPY_INT32_FMT NPY_BYTE_FMT -#define NPY_UINT32_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_BYTE -#define NPY_UINT64 NPY_UBYTE - typedef signed char npy_int64; - typedef unsigned char npy_uint64; -# define PyInt64ScalarObject PyByteScalarObject -# define PyInt64ArrType_Type PyByteArrType_Type -# define PyUInt64ScalarObject PyUByteScalarObject -# define PyUInt64ArrType_Type PyUByteArrType_Type -#define NPY_INT64_FMT NPY_BYTE_FMT -#define NPY_UINT64_FMT NPY_UBYTE_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_CHAR == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_BYTE -#define NPY_UINT128 NPY_UBYTE - typedef signed char npy_int128; - typedef unsigned char npy_uint128; -# define PyInt128ScalarObject PyByteScalarObject -# define PyInt128ArrType_Type PyByteArrType_Type -# define PyUInt128ScalarObject PyUByteScalarObject -# define PyUInt128ArrType_Type PyUByteArrType_Type -#define NPY_INT128_FMT NPY_BYTE_FMT -#define NPY_UINT128_FMT NPY_UBYTE_FMT -#endif -#endif - - - -#if NPY_BITSOF_DOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_DOUBLE -#define NPY_COMPLEX64 NPY_CDOUBLE - typedef double npy_float32; - typedef npy_cdouble npy_complex64; -# define PyFloat32ScalarObject PyDoubleScalarObject -# define PyComplex64ScalarObject PyCDoubleScalarObject -# define PyFloat32ArrType_Type PyDoubleArrType_Type -# define PyComplex64ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX128 NPY_CDOUBLE - typedef double npy_float64; - typedef npy_cdouble npy_complex128; -# define PyFloat64ScalarObject PyDoubleScalarObject -# define PyComplex128ScalarObject PyCDoubleScalarObject -# define PyFloat64ArrType_Type PyDoubleArrType_Type -# define PyComplex128ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_DOUBLE -#define NPY_COMPLEX160 NPY_CDOUBLE - typedef double npy_float80; - typedef npy_cdouble npy_complex160; -# define PyFloat80ScalarObject PyDoubleScalarObject -# define PyComplex160ScalarObject PyCDoubleScalarObject -# define PyFloat80ArrType_Type PyDoubleArrType_Type -# define PyComplex160ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_DOUBLE -#define NPY_COMPLEX192 NPY_CDOUBLE - typedef double npy_float96; - typedef npy_cdouble npy_complex192; -# define PyFloat96ScalarObject PyDoubleScalarObject -# define PyComplex192ScalarObject PyCDoubleScalarObject -# define PyFloat96ArrType_Type PyDoubleArrType_Type -# define PyComplex192ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_DOUBLE -#define NPY_COMPLEX256 NPY_CDOUBLE - typedef double npy_float128; - typedef npy_cdouble npy_complex256; -# define PyFloat128ScalarObject PyDoubleScalarObject -# define PyComplex256ScalarObject PyCDoubleScalarObject -# define PyFloat128ArrType_Type PyDoubleArrType_Type -# define PyComplex256ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT -#endif -#endif - - - -#if NPY_BITSOF_FLOAT == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_COMPLEX64 NPY_CFLOAT - typedef float npy_float32; - typedef npy_cfloat npy_complex64; -# define PyFloat32ScalarObject PyFloatScalarObject -# define PyComplex64ScalarObject PyCFloatScalarObject -# define PyFloat32ArrType_Type PyFloatArrType_Type -# define PyComplex64ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT32_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_FLOAT -#define NPY_COMPLEX128 NPY_CFLOAT - typedef float npy_float64; - typedef npy_cfloat npy_complex128; -# define PyFloat64ScalarObject PyFloatScalarObject -# define PyComplex128ScalarObject PyCFloatScalarObject -# define PyFloat64ArrType_Type PyFloatArrType_Type -# define PyComplex128ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT64_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_FLOAT -#define NPY_COMPLEX160 NPY_CFLOAT - typedef float npy_float80; - typedef npy_cfloat npy_complex160; -# define PyFloat80ScalarObject PyFloatScalarObject -# define PyComplex160ScalarObject PyCFloatScalarObject -# define PyFloat80ArrType_Type PyFloatArrType_Type -# define PyComplex160ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT80_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_FLOAT -#define NPY_COMPLEX192 NPY_CFLOAT - typedef float npy_float96; - typedef npy_cfloat npy_complex192; -# define PyFloat96ScalarObject PyFloatScalarObject -# define PyComplex192ScalarObject PyCFloatScalarObject -# define PyFloat96ArrType_Type PyFloatArrType_Type -# define PyComplex192ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT96_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_FLOAT -#define NPY_COMPLEX256 NPY_CFLOAT - typedef float npy_float128; - typedef npy_cfloat npy_complex256; -# define PyFloat128ScalarObject PyFloatScalarObject -# define PyComplex256ScalarObject PyCFloatScalarObject -# define PyFloat128ArrType_Type PyFloatArrType_Type -# define PyComplex256ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT128_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT -#endif -#endif - -/* half/float16 isn't a floating-point type in C */ -#define NPY_FLOAT16 NPY_HALF -typedef npy_uint16 npy_half; -typedef npy_half npy_float16; - -#if NPY_BITSOF_LONGDOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_LONGDOUBLE -#define NPY_COMPLEX64 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float32; - typedef npy_clongdouble npy_complex64; -# define PyFloat32ScalarObject PyLongDoubleScalarObject -# define PyComplex64ScalarObject PyCLongDoubleScalarObject -# define PyFloat32ArrType_Type PyLongDoubleArrType_Type -# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_LONGDOUBLE -#define NPY_COMPLEX128 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float64; - typedef npy_clongdouble npy_complex128; -# define PyFloat64ScalarObject PyLongDoubleScalarObject -# define PyComplex128ScalarObject PyCLongDoubleScalarObject -# define PyFloat64ArrType_Type PyLongDoubleArrType_Type -# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_LONGDOUBLE -#define NPY_COMPLEX160 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float80; - typedef npy_clongdouble npy_complex160; -# define PyFloat80ScalarObject PyLongDoubleScalarObject -# define PyComplex160ScalarObject PyCLongDoubleScalarObject -# define PyFloat80ArrType_Type PyLongDoubleArrType_Type -# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_LONGDOUBLE -#define NPY_COMPLEX192 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float96; - typedef npy_clongdouble npy_complex192; -# define PyFloat96ScalarObject PyLongDoubleScalarObject -# define PyComplex192ScalarObject PyCLongDoubleScalarObject -# define PyFloat96ArrType_Type PyLongDoubleArrType_Type -# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_LONGDOUBLE -#define NPY_COMPLEX256 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float128; - typedef npy_clongdouble npy_complex256; -# define PyFloat128ScalarObject PyLongDoubleScalarObject -# define PyComplex256ScalarObject PyCLongDoubleScalarObject -# define PyFloat128ArrType_Type PyLongDoubleArrType_Type -# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 256 -#define NPY_FLOAT256 NPY_LONGDOUBLE -#define NPY_COMPLEX512 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float256; - typedef npy_clongdouble npy_complex512; -# define PyFloat256ScalarObject PyLongDoubleScalarObject -# define PyComplex512ScalarObject PyCLongDoubleScalarObject -# define PyFloat256ArrType_Type PyLongDoubleArrType_Type -# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT -#endif - -/* datetime typedefs */ -typedef npy_int64 npy_timedelta; -typedef npy_int64 npy_datetime; -#define NPY_DATETIME_FMT NPY_INT64_FMT -#define NPY_TIMEDELTA_FMT NPY_INT64_FMT - -/* End of typedefs for numarray style bit-width names */ - -#endif - diff --git a/numpy-1.6.2/numpy/core/include/numpy/npy_cpu.h b/numpy-1.6.2/numpy/core/include/numpy/npy_cpu.h deleted file mode 100644 index 8a29788065..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/npy_cpu.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * This set (target) cpu specific macros: - * - Possible values: - * NPY_CPU_X86 - * NPY_CPU_AMD64 - * NPY_CPU_PPC - * NPY_CPU_PPC64 - * NPY_CPU_SPARC - * NPY_CPU_S390 - * NPY_CPU_IA64 - * NPY_CPU_HPPA - * NPY_CPU_ALPHA - * NPY_CPU_ARMEL - * NPY_CPU_ARMEB - * NPY_CPU_SH_LE - * NPY_CPU_SH_BE - */ -#ifndef _NPY_CPUARCH_H_ -#define _NPY_CPUARCH_H_ - -#include "numpyconfig.h" - -#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) - /* - * __i386__ is defined by gcc and Intel compiler on Linux, - * _M_IX86 by VS compiler, - * i386 by Sun compilers on opensolaris at least - */ - #define NPY_CPU_X86 -#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) - /* - * both __x86_64__ and __amd64__ are defined by gcc - * __x86_64 defined by sun compiler on opensolaris at least - * _M_AMD64 defined by MS compiler - */ - #define NPY_CPU_AMD64 -#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) - /* - * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, - * but can't find it ATM - * _ARCH_PPC is used by at least gcc on AIX - */ - #define NPY_CPU_PPC -#elif defined(__ppc64__) - #define NPY_CPU_PPC64 -#elif defined(__sparc__) || defined(__sparc) - /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ - #define NPY_CPU_SPARC -#elif defined(__s390__) - #define NPY_CPU_S390 -#elif defined(__ia64) - #define NPY_CPU_IA64 -#elif defined(__hppa) - #define NPY_CPU_HPPA -#elif defined(__alpha__) - #define NPY_CPU_ALPHA -#elif defined(__arm__) && defined(__ARMEL__) - #define NPY_CPU_ARMEL -#elif defined(__arm__) && defined(__ARMEB__) - #define NPY_CPU_ARMEB -#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) - #define NPY_CPU_SH_LE -#elif defined(__sh__) && defined(__BIG_ENDIAN__) - #define NPY_CPU_SH_BE -#elif defined(__MIPSEL__) - #define NPY_CPU_MIPSEL -#elif defined(__MIPSEB__) - #define NPY_CPU_MIPSEB -#else - #error Unknown CPU, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) -#endif - -/* - This "white-lists" the architectures that we know don't require - pointer alignment. We white-list, since the memcpy version will - work everywhere, whereas assignment will only work where pointer - dereferencing doesn't require alignment. - - TODO: There may be more architectures we can white list. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) - #define NPY_COPY_PYOBJECT_PTR(dst, src) (*((PyObject **)(dst)) = *((PyObject **)(src))) -#else - #if NPY_SIZEOF_PY_INTPTR_T == 4 - #define NPY_COPY_PYOBJECT_PTR(dst, src) \ - ((char*)(dst))[0] = ((char*)(src))[0]; \ - ((char*)(dst))[1] = ((char*)(src))[1]; \ - ((char*)(dst))[2] = ((char*)(src))[2]; \ - ((char*)(dst))[3] = ((char*)(src))[3]; - #elif NPY_SIZEOF_PY_INTPTR_T == 8 - #define NPY_COPY_PYOBJECT_PTR(dst, src) \ - ((char*)(dst))[0] = ((char*)(src))[0]; \ - ((char*)(dst))[1] = ((char*)(src))[1]; \ - ((char*)(dst))[2] = ((char*)(src))[2]; \ - ((char*)(dst))[3] = ((char*)(src))[3]; \ - ((char*)(dst))[4] = ((char*)(src))[4]; \ - ((char*)(dst))[5] = ((char*)(src))[5]; \ - ((char*)(dst))[6] = ((char*)(src))[6]; \ - ((char*)(dst))[7] = ((char*)(src))[7]; - #else - #error Unknown architecture, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) - #endif -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/npy_endian.h b/numpy-1.6.2/numpy/core/include/numpy/npy_endian.h deleted file mode 100644 index aa5ed8b2bd..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/npy_endian.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef _NPY_ENDIAN_H_ -#define _NPY_ENDIAN_H_ - -/* - * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in - * endian.h - */ - -#ifdef NPY_HAVE_ENDIAN_H - /* Use endian.h if available */ - #include - - #define NPY_BYTE_ORDER __BYTE_ORDER - #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN - #define NPY_BIG_ENDIAN __BIG_ENDIAN -#else - /* Set endianness info using target CPU */ - #include "npy_cpu.h" - - #define NPY_LITTLE_ENDIAN 1234 - #define NPY_BIG_ENDIAN 4321 - - #if defined(NPY_CPU_X86) \ - || defined(NPY_CPU_AMD64) \ - || defined(NPY_CPU_IA64) \ - || defined(NPY_CPU_ALPHA) \ - || defined(NPY_CPU_ARMEL) \ - || defined(NPY_CPU_SH_LE) \ - || defined(NPY_CPU_MIPSEL) - #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN - #elif defined(NPY_CPU_PPC) \ - || defined(NPY_CPU_SPARC) \ - || defined(NPY_CPU_S390) \ - || defined(NPY_CPU_HPPA) \ - || defined(NPY_CPU_PPC64) \ - || defined(NPY_CPU_ARMEB) \ - || defined(NPY_CPU_SH_BE) \ - || defined(NPY_CPU_MIPSEB) - #define NPY_BYTE_ORDER NPY_BIG_ENDIAN - #else - #error Unknown CPU: can not set endianness - #endif -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/npy_interrupt.h b/numpy-1.6.2/numpy/core/include/numpy/npy_interrupt.h deleted file mode 100644 index eb72fbaf0b..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/npy_interrupt.h +++ /dev/null @@ -1,117 +0,0 @@ - -/* Signal handling: - -This header file defines macros that allow your code to handle -interrupts received during processing. Interrupts that -could reasonably be handled: - -SIGINT, SIGABRT, SIGALRM, SIGSEGV - -****Warning*************** - -Do not allow code that creates temporary memory or increases reference -counts of Python objects to be interrupted unless you handle it -differently. - -************************** - -The mechanism for handling interrupts is conceptually simple: - - - replace the signal handler with our own home-grown version - and store the old one. - - run the code to be interrupted -- if an interrupt occurs - the handler should basically just cause a return to the - calling function for finish work. - - restore the old signal handler - -Of course, every code that allows interrupts must account for -returning via the interrupt and handle clean-up correctly. But, -even still, the simple paradigm is complicated by at least three -factors. - - 1) platform portability (i.e. Microsoft says not to use longjmp - to return from signal handling. They have a __try and __except - extension to C instead but what about mingw?). - - 2) how to handle threads: apparently whether signals are delivered to - every thread of the process or the "invoking" thread is platform - dependent. --- we don't handle threads for now. - - 3) do we need to worry about re-entrance. For now, assume the - code will not call-back into itself. - -Ideas: - - 1) Start by implementing an approach that works on platforms that - can use setjmp and longjmp functionality and does nothing - on other platforms. - - 2) Ignore threads --- i.e. do not mix interrupt handling and threads - - 3) Add a default signal_handler function to the C-API but have the rest - use macros. - - -Simple Interface: - - -In your C-extension: around a block of code you want to be interruptable -with a SIGINT - -NPY_SIGINT_ON -[code] -NPY_SIGINT_OFF - -In order for this to work correctly, the -[code] block must not allocate any memory or alter the reference count of any -Python objects. In other words [code] must be interruptible so that continuation -after NPY_SIGINT_OFF will only be "missing some computations" - -Interrupt handling does not work well with threads. - -*/ - -/* Add signal handling macros - Make the global variable and signal handler part of the C-API -*/ - -#ifndef NPY_INTERRUPT_H -#define NPY_INTERRUPT_H - -#ifndef NPY_NO_SIGNAL - -#include -#include - -#ifndef sigsetjmp - -#define SIGSETJMP(arg1, arg2) setjmp(arg1) -#define SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) -#define SIGJMP_BUF jmp_buf - -#else - -#define SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) -#define SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) -#define SIGJMP_BUF sigjmp_buf - -#endif - -# define NPY_SIGINT_ON { \ - PyOS_sighandler_t _npy_sig_save; \ - _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ - if (SIGSETJMP(*((SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ - 1) == 0) { \ - -# define NPY_SIGINT_OFF } \ - PyOS_setsig(SIGINT, _npy_sig_save); \ - } - -#else /* NPY_NO_SIGNAL */ - -# define NPY_SIGINT_ON -# define NPY_SIGINT_OFF - -#endif /* HAVE_SIGSETJMP */ - -#endif /* NPY_INTERRUPT_H */ diff --git a/numpy-1.6.2/numpy/core/include/numpy/npy_math.h b/numpy-1.6.2/numpy/core/include/numpy/npy_math.h deleted file mode 100644 index 56c1c2b280..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/npy_math.h +++ /dev/null @@ -1,435 +0,0 @@ -#ifndef __NPY_MATH_C99_H_ -#define __NPY_MATH_C99_H_ - -#include -#include - -/* - * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 - * for INFINITY) - * - * XXX: I should test whether INFINITY and NAN are available on the platform - */ -NPY_INLINE static float __npy_inff(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_nanf(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_pzerof(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_nzerof(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; - return __bint.__f; -} - -#define NPY_INFINITYF __npy_inff() -#define NPY_NANF __npy_nanf() -#define NPY_PZEROF __npy_pzerof() -#define NPY_NZEROF __npy_nzerof() - -#define NPY_INFINITY ((npy_double)NPY_INFINITYF) -#define NPY_NAN ((npy_double)NPY_NANF) -#define NPY_PZERO ((npy_double)NPY_PZEROF) -#define NPY_NZERO ((npy_double)NPY_NZEROF) - -#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) -#define NPY_NANL ((npy_longdouble)NPY_NANF) -#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) -#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) - -/* - * Useful constants - */ -#define NPY_E 2.718281828459045235360287471352662498 /* e */ -#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ -#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ -#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ -#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ -#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ -#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ -#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ -#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ -#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ -#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ -#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ -#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ - -#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ -#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ -#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ -#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ -#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ -#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ -#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ -#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ -#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ -#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ -#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constan*/ -#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ -#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ - -#define NPY_El 2.718281828459045235360287471352662498L /* e */ -#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ -#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ -#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ -#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ -#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ -#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ -#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ -#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ -#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ -#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constan*/ -#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ -#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ - -/* - * C99 double math funcs - */ -double npy_sin(double x); -double npy_cos(double x); -double npy_tan(double x); -double npy_sinh(double x); -double npy_cosh(double x); -double npy_tanh(double x); - -double npy_asin(double x); -double npy_acos(double x); -double npy_atan(double x); -double npy_aexp(double x); -double npy_alog(double x); -double npy_asqrt(double x); -double npy_afabs(double x); - -double npy_log(double x); -double npy_log10(double x); -double npy_exp(double x); -double npy_sqrt(double x); - -double npy_fabs(double x); -double npy_ceil(double x); -double npy_fmod(double x, double y); -double npy_floor(double x); - -double npy_expm1(double x); -double npy_log1p(double x); -double npy_hypot(double x, double y); -double npy_acosh(double x); -double npy_asinh(double xx); -double npy_atanh(double x); -double npy_rint(double x); -double npy_trunc(double x); -double npy_exp2(double x); -double npy_log2(double x); - -double npy_atan2(double x, double y); -double npy_pow(double x, double y); -double npy_modf(double x, double* y); - -double npy_copysign(double x, double y); -double npy_nextafter(double x, double y); -double npy_spacing(double x); - -/* - * IEEE 754 fpu handling. Those are guaranteed to be macros - */ -#ifndef NPY_HAVE_DECL_ISNAN - #define npy_isnan(x) ((x) != (x)) -#else - #ifdef _MSC_VER - #define npy_isnan(x) _isnan((x)) - #else - #define npy_isnan(x) isnan((x)) - #endif -#endif - -#ifndef NPY_HAVE_DECL_ISFINITE - #ifdef _MSC_VER - #define npy_isfinite(x) _finite((x)) - #else - #define npy_isfinite(x) !npy_isnan((x) + (-x)) - #endif -#else - #define npy_isfinite(x) isfinite((x)) -#endif - -#ifndef NPY_HAVE_DECL_ISINF - #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) -#else - #ifdef _MSC_VER - #define npy_isinf(x) (!_finite((x)) && !_isnan((x))) - #else - #define npy_isinf(x) isinf((x)) - #endif -#endif - -#ifndef NPY_HAVE_DECL_SIGNBIT - int _npy_signbit_f(float x); - int _npy_signbit_d(double x); - int _npy_signbit_ld(long double x); - #define npy_signbit(x) \ - (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ - : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ - : _npy_signbit_f (x)) -#else - #define npy_signbit(x) signbit((x)) -#endif - -/* - * float C99 math functions - */ - -float npy_sinf(float x); -float npy_cosf(float x); -float npy_tanf(float x); -float npy_sinhf(float x); -float npy_coshf(float x); -float npy_tanhf(float x); -float npy_fabsf(float x); -float npy_floorf(float x); -float npy_ceilf(float x); -float npy_rintf(float x); -float npy_truncf(float x); -float npy_sqrtf(float x); -float npy_log10f(float x); -float npy_logf(float x); -float npy_expf(float x); -float npy_expm1f(float x); -float npy_asinf(float x); -float npy_acosf(float x); -float npy_atanf(float x); -float npy_asinhf(float x); -float npy_acoshf(float x); -float npy_atanhf(float x); -float npy_log1pf(float x); -float npy_exp2f(float x); -float npy_log2f(float x); - -float npy_atan2f(float x, float y); -float npy_hypotf(float x, float y); -float npy_powf(float x, float y); -float npy_fmodf(float x, float y); - -float npy_modff(float x, float* y); - -float npy_copysignf(float x, float y); -float npy_nextafterf(float x, float y); -float npy_spacingf(float x); - -/* - * float C99 math functions - */ - -npy_longdouble npy_sinl(npy_longdouble x); -npy_longdouble npy_cosl(npy_longdouble x); -npy_longdouble npy_tanl(npy_longdouble x); -npy_longdouble npy_sinhl(npy_longdouble x); -npy_longdouble npy_coshl(npy_longdouble x); -npy_longdouble npy_tanhl(npy_longdouble x); -npy_longdouble npy_fabsl(npy_longdouble x); -npy_longdouble npy_floorl(npy_longdouble x); -npy_longdouble npy_ceill(npy_longdouble x); -npy_longdouble npy_rintl(npy_longdouble x); -npy_longdouble npy_truncl(npy_longdouble x); -npy_longdouble npy_sqrtl(npy_longdouble x); -npy_longdouble npy_log10l(npy_longdouble x); -npy_longdouble npy_logl(npy_longdouble x); -npy_longdouble npy_expl(npy_longdouble x); -npy_longdouble npy_expm1l(npy_longdouble x); -npy_longdouble npy_asinl(npy_longdouble x); -npy_longdouble npy_acosl(npy_longdouble x); -npy_longdouble npy_atanl(npy_longdouble x); -npy_longdouble npy_asinhl(npy_longdouble x); -npy_longdouble npy_acoshl(npy_longdouble x); -npy_longdouble npy_atanhl(npy_longdouble x); -npy_longdouble npy_log1pl(npy_longdouble x); -npy_longdouble npy_exp2l(npy_longdouble x); -npy_longdouble npy_log2l(npy_longdouble x); - -npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); - -npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); - -npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_spacingl(npy_longdouble x); - -/* - * Non standard functions - */ -double npy_deg2rad(double x); -double npy_rad2deg(double x); -double npy_logaddexp(double x, double y); -double npy_logaddexp2(double x, double y); - -float npy_deg2radf(float x); -float npy_rad2degf(float x); -float npy_logaddexpf(float x, float y); -float npy_logaddexp2f(float x, float y); - -npy_longdouble npy_deg2radl(npy_longdouble x); -npy_longdouble npy_rad2degl(npy_longdouble x); -npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); - -#define npy_degrees npy_rad2deg -#define npy_degreesf npy_rad2degf -#define npy_degreesl npy_rad2degl - -#define npy_radians npy_deg2rad -#define npy_radiansf npy_deg2radf -#define npy_radiansl npy_deg2radl - -/* - * Complex declarations - */ - -/* - * C99 specifies that complex numbers have the same representation as - * an array of two elements, where the first element is the real part - * and the second element is the imaginary part. - */ -#define __NPY_CPACK_IMP(x, y, type, ctype) \ - union { \ - ctype z; \ - type a[2]; \ - } z1;; \ - \ - z1.a[0] = (x); \ - z1.a[1] = (y); \ - \ - return z1.z; - -static NPY_INLINE npy_cdouble npy_cpack(double x, double y) -{ - __NPY_CPACK_IMP(x, y, double, npy_cdouble); -} - -static NPY_INLINE npy_cfloat npy_cpackf(float x, float y) -{ - __NPY_CPACK_IMP(x, y, float, npy_cfloat); -} - -static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) -{ - __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); -} -#undef __NPY_CPACK_IMP - -/* - * Same remark as above, but in the other direction: extract first/second - * member of complex number, assuming a C99-compatible representation - * - * Those are defineds as static inline, and such as a reasonable compiler would - * most likely compile this to one or two instructions (on CISC at least) - */ -#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ - union { \ - ctype z; \ - type a[2]; \ - } __z_repr; \ - __z_repr.z = z; \ - \ - return __z_repr.a[index]; - -static NPY_INLINE double npy_creal(npy_cdouble z) -{ - __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); -} - -static NPY_INLINE double npy_cimag(npy_cdouble z) -{ - __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); -} - -static NPY_INLINE float npy_crealf(npy_cfloat z) -{ - __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); -} - -static NPY_INLINE float npy_cimagf(npy_cfloat z) -{ - __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); -} - -static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z) -{ - __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); -} - -static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z) -{ - __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); -} -#undef __NPY_CEXTRACT_IMP - -/* - * Double precision complex functions - */ -double npy_cabs(npy_cdouble z); -double npy_carg(npy_cdouble z); - -npy_cdouble npy_cexp(npy_cdouble z); -npy_cdouble npy_clog(npy_cdouble z); -npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); - -npy_cdouble npy_csqrt(npy_cdouble z); - -npy_cdouble npy_ccos(npy_cdouble z); -npy_cdouble npy_csin(npy_cdouble z); - -/* - * Single precision complex functions - */ -float npy_cabsf(npy_cfloat z); -float npy_cargf(npy_cfloat z); - -npy_cfloat npy_cexpf(npy_cfloat z); -npy_cfloat npy_clogf(npy_cfloat z); -npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); - -npy_cfloat npy_csqrtf(npy_cfloat z); - -npy_cfloat npy_ccosf(npy_cfloat z); -npy_cfloat npy_csinf(npy_cfloat z); - -/* - * Extended precision complex functions - */ -npy_longdouble npy_cabsl(npy_clongdouble z); -npy_longdouble npy_cargl(npy_clongdouble z); - -npy_clongdouble npy_cexpl(npy_clongdouble z); -npy_clongdouble npy_clogl(npy_clongdouble z); -npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); - -npy_clongdouble npy_csqrtl(npy_clongdouble z); - -npy_clongdouble npy_ccosl(npy_clongdouble z); -npy_clongdouble npy_csinl(npy_clongdouble z); - -/* - * Functions that set the floating point error - * status word. - */ - -void npy_set_floatstatus_divbyzero(void); -void npy_set_floatstatus_overflow(void); -void npy_set_floatstatus_underflow(void); -void npy_set_floatstatus_invalid(void); - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/npy_os.h b/numpy-1.6.2/numpy/core/include/numpy/npy_os.h deleted file mode 100644 index 9228c3916e..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/npy_os.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _NPY_OS_H_ -#define _NPY_OS_H_ - -#if defined(linux) || defined(__linux) || defined(__linux__) - #define NPY_OS_LINUX -#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__OpenBSD__) || defined(__DragonFly__) - #define NPY_OS_BSD - #ifdef __FreeBSD__ - #define NPY_OS_FREEBSD - #elif defined(__NetBSD__) - #define NPY_OS_NETBSD - #elif defined(__OpenBSD__) - #define NPY_OS_OPENBSD - #elif defined(__DragonFly__) - #define NPY_OS_DRAGONFLY - #endif -#elif defined(sun) || defined(__sun) - #define NPY_OS_SOLARIS -#elif defined(__CYGWIN__) - #define NPY_OS_CYGWIN -#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) - #define NPY_OS_WIN32 -#elif defined(__APPLE__) - #define NPY_OS_DARWIN -#else - #define NPY_OS_UNKNOWN -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/numpyconfig.h b/numpy-1.6.2/numpy/core/include/numpy/numpyconfig.h deleted file mode 100644 index ff7938cd96..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/numpyconfig.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _NPY_NUMPYCONFIG_H_ -#define _NPY_NUMPYCONFIG_H_ - -#include "_numpyconfig.h" - -/* - * On Mac OS X, because there is only one configuration stage for all the archs - * in universal builds, any macro which depends on the arch needs to be - * harcoded - */ -#ifdef __APPLE__ - #undef NPY_SIZEOF_LONG - #undef NPY_SIZEOF_PY_INTPTR_T - - #ifdef __LP64__ - #define NPY_SIZEOF_LONG 8 - #define NPY_SIZEOF_PY_INTPTR_T 8 - #else - #define NPY_SIZEOF_LONG 4 - #define NPY_SIZEOF_PY_INTPTR_T 4 - #endif -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/include/numpy/old_defines.h b/numpy-1.6.2/numpy/core/include/numpy/old_defines.h deleted file mode 100644 index 8c83a70a2e..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/old_defines.h +++ /dev/null @@ -1,175 +0,0 @@ -#define NDARRAY_VERSION NPY_VERSION - -#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE -#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE -#define PyArray_BUFSIZE NPY_BUFSIZE - -#define PyArray_PRIORITY NPY_PRIORITY -#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY -#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE - -#define NPY_MAX PyArray_MAX -#define NPY_MIN PyArray_MIN - -#define PyArray_TYPES NPY_TYPES -#define PyArray_BOOL NPY_BOOL -#define PyArray_BYTE NPY_BYTE -#define PyArray_UBYTE NPY_UBYTE -#define PyArray_SHORT NPY_SHORT -#define PyArray_USHORT NPY_USHORT -#define PyArray_INT NPY_INT -#define PyArray_UINT NPY_UINT -#define PyArray_LONG NPY_LONG -#define PyArray_ULONG NPY_ULONG -#define PyArray_LONGLONG NPY_LONGLONG -#define PyArray_ULONGLONG NPY_ULONGLONG -#define PyArray_HALF NPY_HALF -#define PyArray_FLOAT NPY_FLOAT -#define PyArray_DOUBLE NPY_DOUBLE -#define PyArray_LONGDOUBLE NPY_LONGDOUBLE -#define PyArray_CFLOAT NPY_CFLOAT -#define PyArray_CDOUBLE NPY_CDOUBLE -#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE -#define PyArray_OBJECT NPY_OBJECT -#define PyArray_STRING NPY_STRING -#define PyArray_UNICODE NPY_UNICODE -#define PyArray_VOID NPY_VOID -#define PyArray_DATETIME NPY_DATETIME -#define PyArray_TIMEDELTA NPY_TIMEDELTA -#define PyArray_NTYPES NPY_NTYPES -#define PyArray_NOTYPE NPY_NOTYPE -#define PyArray_CHAR NPY_CHAR -#define PyArray_USERDEF NPY_USERDEF -#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES - -#define PyArray_INTP NPY_INTP -#define PyArray_UINTP NPY_UINTP - -#define PyArray_INT8 NPY_INT8 -#define PyArray_UINT8 NPY_UINT8 -#define PyArray_INT16 NPY_INT16 -#define PyArray_UINT16 NPY_UINT16 -#define PyArray_INT32 NPY_INT32 -#define PyArray_UINT32 NPY_UINT32 - -#ifdef NPY_INT64 -#define PyArray_INT64 NPY_INT64 -#define PyArray_UINT64 NPY_UINT64 -#endif - -#ifdef NPY_INT128 -#define PyArray_INT128 NPY_INT128 -#define PyArray_UINT128 NPY_UINT128 -#endif - -#ifdef NPY_FLOAT16 -#define PyArray_FLOAT16 NPY_FLOAT16 -#define PyArray_COMPLEX32 NPY_COMPLEX32 -#endif - -#ifdef NPY_FLOAT80 -#define PyArray_FLOAT80 NPY_FLOAT80 -#define PyArray_COMPLEX160 NPY_COMPLEX160 -#endif - -#ifdef NPY_FLOAT96 -#define PyArray_FLOAT96 NPY_FLOAT96 -#define PyArray_COMPLEX192 NPY_COMPLEX192 -#endif - -#ifdef NPY_FLOAT128 -#define PyArray_FLOAT128 NPY_FLOAT128 -#define PyArray_COMPLEX256 NPY_COMPLEX256 -#endif - -#define PyArray_FLOAT32 NPY_FLOAT32 -#define PyArray_COMPLEX64 NPY_COMPLEX64 -#define PyArray_FLOAT64 NPY_FLOAT64 -#define PyArray_COMPLEX128 NPY_COMPLEX128 - - -#define PyArray_TYPECHAR NPY_TYPECHAR -#define PyArray_BOOLLTR NPY_BOOLLTR -#define PyArray_BYTELTR NPY_BYTELTR -#define PyArray_UBYTELTR NPY_UBYTELTR -#define PyArray_SHORTLTR NPY_SHORTLTR -#define PyArray_USHORTLTR NPY_USHORTLTR -#define PyArray_INTLTR NPY_INTLTR -#define PyArray_UINTLTR NPY_UINTLTR -#define PyArray_LONGLTR NPY_LONGLTR -#define PyArray_ULONGLTR NPY_ULONGLTR -#define PyArray_LONGLONGLTR NPY_LONGLONGLTR -#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR -#define PyArray_HALFLTR NPY_HALFLTR -#define PyArray_FLOATLTR NPY_FLOATLTR -#define PyArray_DOUBLELTR NPY_DOUBLELTR -#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR -#define PyArray_CFLOATLTR NPY_CFLOATLTR -#define PyArray_CDOUBLELTR NPY_CDOUBLELTR -#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR -#define PyArray_OBJECTLTR NPY_OBJECTLTR -#define PyArray_STRINGLTR NPY_STRINGLTR -#define PyArray_STRINGLTR2 NPY_STRINGLTR2 -#define PyArray_UNICODELTR NPY_UNICODELTR -#define PyArray_VOIDLTR NPY_VOIDLTR -#define PyArray_DATETIMELTR NPY_DATETIMELTR -#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR -#define PyArray_CHARLTR NPY_CHARLTR -#define PyArray_INTPLTR NPY_INTPLTR -#define PyArray_UINTPLTR NPY_UINTPLTR -#define PyArray_GENBOOLLTR NPY_GENBOOLLTR -#define PyArray_SIGNEDLTR NPY_SIGNEDLTR -#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR -#define PyArray_FLOATINGLTR NPY_FLOATINGLTR -#define PyArray_COMPLEXLTR NPY_COMPLEXLTR - -#define PyArray_QUICKSORT NPY_QUICKSORT -#define PyArray_HEAPSORT NPY_HEAPSORT -#define PyArray_MERGESORT NPY_MERGESORT -#define PyArray_SORTKIND NPY_SORTKIND -#define PyArray_NSORTS NPY_NSORTS - -#define PyArray_NOSCALAR NPY_NOSCALAR -#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR -#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR -#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR -#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR -#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR -#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR -#define PyArray_SCALARKIND NPY_SCALARKIND -#define PyArray_NSCALARKINDS NPY_NSCALARKINDS - -#define PyArray_ANYORDER NPY_ANYORDER -#define PyArray_CORDER NPY_CORDER -#define PyArray_FORTRANORDER NPY_FORTRANORDER -#define PyArray_ORDER NPY_ORDER - -#define PyDescr_ISBOOL PyDataType_ISBOOL -#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED -#define PyDescr_ISSIGNED PyDataType_ISSIGNED -#define PyDescr_ISINTEGER PyDataType_ISINTEGER -#define PyDescr_ISFLOAT PyDataType_ISFLOAT -#define PyDescr_ISNUMBER PyDataType_ISNUMBER -#define PyDescr_ISSTRING PyDataType_ISSTRING -#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX -#define PyDescr_ISPYTHON PyDataType_ISPYTHON -#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE -#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF -#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED -#define PyDescr_ISOBJECT PyDataType_ISOBJECT -#define PyDescr_HASFIELDS PyDataType_HASFIELDS - -#define PyArray_LITTLE NPY_LITTLE -#define PyArray_BIG NPY_BIG -#define PyArray_NATIVE NPY_NATIVE -#define PyArray_SWAP NPY_SWAP -#define PyArray_IGNORE NPY_IGNORE - -#define PyArray_NATBYTE NPY_NATBYTE -#define PyArray_OPPBYTE NPY_OPPBYTE - -#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE - -#define PyArray_USE_PYMEM NPY_USE_PYMEM - -#define PyArray_RemoveLargest PyArray_RemoveSmallest diff --git a/numpy-1.6.2/numpy/core/include/numpy/oldnumeric.h b/numpy-1.6.2/numpy/core/include/numpy/oldnumeric.h deleted file mode 100644 index 51dba29cd4..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/oldnumeric.h +++ /dev/null @@ -1,23 +0,0 @@ -#include "arrayobject.h" - -#ifndef REFCOUNT -# define REFCOUNT NPY_REFCOUNT -# define MAX_ELSIZE 16 -#endif - -#define PyArray_UNSIGNED_TYPES -#define PyArray_SBYTE PyArray_BYTE -#define PyArray_CopyArray PyArray_CopyInto -#define _PyArray_multiply_list PyArray_MultiplyIntList -#define PyArray_ISSPACESAVER(m) NPY_FALSE -#define PyScalarArray_Check PyArray_CheckScalar - -#define CONTIGUOUS NPY_CONTIGUOUS -#define OWN_DIMENSIONS 0 -#define OWN_STRIDES 0 -#define OWN_DATA NPY_OWNDATA -#define SAVESPACE 0 -#define SAVESPACEBIT 0 - -#undef import_array -#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } diff --git a/numpy-1.6.2/numpy/core/include/numpy/ufuncobject.h b/numpy-1.6.2/numpy/core/include/numpy/ufuncobject.h deleted file mode 100644 index 34cd727076..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/ufuncobject.h +++ /dev/null @@ -1,289 +0,0 @@ -#ifndef Py_UFUNCOBJECT_H -#define Py_UFUNCOBJECT_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *); - -typedef struct { - PyObject_HEAD - /* - * nin: Number of inputs - * nout: Number of outputs - * nargs: Always nin + nout (Why is it stored?) - */ - int nin, nout, nargs; - - /* Identity for reduction, either PyUFunc_One or PyUFunc_Zero */ - int identity; - - /* Array of one-dimensional core loops */ - PyUFuncGenericFunction *functions; - /* Array of funcdata that gets passed into the functions */ - void **data; - /* The number of elements in 'functions' and 'data' */ - int ntypes; - - /* Does not appear to be used */ - int check_return; - - /* The name of the ufunc */ - char *name; - - /* Array of type numbers, of size ('nargs' * 'ntypes') */ - char *types; - - /* Documentation string */ - char *doc; - - void *ptr; - PyObject *obj; - PyObject *userloops; - - /* generalized ufunc parameters */ - - /* 0 for scalar ufunc; 1 for generalized ufunc */ - int core_enabled; - /* number of distinct dimension names in signature */ - int core_num_dim_ix; - - /* - * dimension indices of input/output argument k are stored in - * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] - */ - - /* numbers of core dimensions of each argument */ - int *core_num_dims; - /* - * dimension indices in a flatted form; indices - * are in the range of [0,core_num_dim_ix) - */ - int *core_dim_ixs; - /* - * positions of 1st core dimensions of each - * argument in core_dim_ixs - */ - int *core_offsets; - /* signature string for printing purpose */ - char *core_signature; -} PyUFuncObject; - -#include "arrayobject.h" - -#define UFUNC_ERR_IGNORE 0 -#define UFUNC_ERR_WARN 1 -#define UFUNC_ERR_RAISE 2 -#define UFUNC_ERR_CALL 3 -#define UFUNC_ERR_PRINT 4 -#define UFUNC_ERR_LOG 5 - - /* Python side integer mask */ - -#define UFUNC_MASK_DIVIDEBYZERO 0x07 -#define UFUNC_MASK_OVERFLOW 0x3f -#define UFUNC_MASK_UNDERFLOW 0x1ff -#define UFUNC_MASK_INVALID 0xfff - -#define UFUNC_SHIFT_DIVIDEBYZERO 0 -#define UFUNC_SHIFT_OVERFLOW 3 -#define UFUNC_SHIFT_UNDERFLOW 6 -#define UFUNC_SHIFT_INVALID 9 - - -/* platform-dependent code translates floating point - status to an integer sum of these values -*/ -#define UFUNC_FPE_DIVIDEBYZERO 1 -#define UFUNC_FPE_OVERFLOW 2 -#define UFUNC_FPE_UNDERFLOW 4 -#define UFUNC_FPE_INVALID 8 - -/* Error mode that avoids look-up (no checking) */ -#define UFUNC_ERR_DEFAULT 0 - -#define UFUNC_OBJ_ISOBJECT 1 -#define UFUNC_OBJ_NEEDS_API 2 - - /* Default user error mode */ -#define UFUNC_ERR_DEFAULT2 \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) - -#if NPY_ALLOW_THREADS -#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0) -#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0) -#else -#define NPY_LOOP_BEGIN_THREADS -#define NPY_LOOP_END_THREADS -#endif - -#define PyUFunc_One 1 -#define PyUFunc_Zero 0 -#define PyUFunc_None -1 - -#define UFUNC_REDUCE 0 -#define UFUNC_ACCUMULATE 1 -#define UFUNC_REDUCEAT 2 -#define UFUNC_OUTER 3 - - -typedef struct { - int nin; - int nout; - PyObject *callable; -} PyUFunc_PyFuncData; - -/* A linked-list of function information for - user-defined 1-d loops. - */ -typedef struct _loop1d_info { - PyUFuncGenericFunction func; - void *data; - int *arg_types; - struct _loop1d_info *next; -} PyUFunc_Loop1d; - - -#include "__ufunc_api.h" - -#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" - -#define UFUNC_CHECK_ERROR(arg) \ - do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \ - ((arg)->errormask && \ - PyUFunc_checkfperr((arg)->errormask, \ - (arg)->errobj, \ - &(arg)->first))) \ - goto fail;} while (0) - -/* This code checks the IEEE status flags in a platform-dependent way */ -/* Adapted from Numarray */ - -#if (defined(__unix__) || defined(unix)) && !defined(USG) -#include -#endif - -/* OSF/Alpha (Tru64) ---------------------------------------------*/ -#if defined(__osf__) && defined(__alpha) - -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - unsigned long fpstatus; \ - \ - fpstatus = ieee_get_fp_control(); \ - /* clear status bits as well as disable exception mode if on */ \ - ieee_set_fp_control( 0 ); \ - ret = ((IEEE_STATUS_DZE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((IEEE_STATUS_OVF & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((IEEE_STATUS_UNF & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((IEEE_STATUS_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - } - -/* MS Windows -----------------------------------------------------*/ -#elif defined(_MSC_VER) - -#include - - /* Clear the floating point exception default of Borland C++ */ -#if defined(__BORLANDC__) -#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); -#endif - -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus = (int) _clearfp(); \ - \ - ret = ((SW_ZERODIVIDE & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((SW_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((SW_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((SW_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - } - -/* Solaris --------------------------------------------------------*/ -/* --------ignoring SunOS ieee_flags approach, someone else can -** deal with that! */ -#elif defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \ - (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \ - defined(__NetBSD__) -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus; \ - \ - fpstatus = (int) fpgetsticky(); \ - ret = ((FP_X_DZ & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FP_X_OFL & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FP_X_UFL & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FP_X_INV & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - (void) fpsetsticky(0); \ - } - -#elif defined(__GLIBC__) || defined(__APPLE__) || \ - defined(__CYGWIN__) || defined(__MINGW32__) || \ - (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) - -#if defined(__GLIBC__) || defined(__APPLE__) || \ - defined(__MINGW32__) || defined(__FreeBSD__) -#include -#elif defined(__CYGWIN__) -#include "fenv/fenv.c" -#endif - -#define UFUNC_CHECK_STATUS(ret) { \ - int fpstatus = (int) fetestexcept(FE_DIVBYZERO | FE_OVERFLOW | \ - FE_UNDERFLOW | FE_INVALID); \ - ret = ((FE_DIVBYZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FE_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FE_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FE_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - (void) feclearexcept(FE_DIVBYZERO | FE_OVERFLOW | \ - FE_UNDERFLOW | FE_INVALID); \ -} - -#elif defined(_AIX) - -#include -#include - -#define UFUNC_CHECK_STATUS(ret) { \ - fpflag_t fpstatus; \ - \ - fpstatus = fp_read_flag(); \ - ret = ((FP_DIV_BY_ZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ - | ((FP_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ - | ((FP_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ - | ((FP_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ - fp_swap_flag(0); \ -} - -#else - -#define NO_FLOATING_POINT_SUPPORT -#define UFUNC_CHECK_STATUS(ret) { \ - ret = 0; \ - } - -#endif - -/* - * THESE MACROS ARE DEPRECATED. - * Use npy_set_floatstatus_* in the npymath library. - */ -#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() -#define generate_overflow_error() npy_set_floatstatus_overflow() - - /* Make sure it gets defined if it isn't already */ -#ifndef UFUNC_NOFPE -#define UFUNC_NOFPE -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_UFUNCOBJECT_H */ diff --git a/numpy-1.6.2/numpy/core/include/numpy/utils.h b/numpy-1.6.2/numpy/core/include/numpy/utils.h deleted file mode 100644 index cc968a3544..0000000000 --- a/numpy-1.6.2/numpy/core/include/numpy/utils.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __NUMPY_UTILS_HEADER__ -#define __NUMPY_UTILS_HEADER__ - -#ifndef __COMP_NPY_UNUSED - #if defined(__GNUC__) - #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) - # elif defined(__ICC) - #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) - #else - #define __COMP_NPY_UNUSED - #endif -#endif - -/* Use this to tag a variable as not used. It will remove unused variable - * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable - * to avoid accidental use */ -#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED - -#endif diff --git a/numpy-1.6.2/numpy/core/info.py b/numpy-1.6.2/numpy/core/info.py deleted file mode 100644 index 561e171b03..0000000000 --- a/numpy-1.6.2/numpy/core/info.py +++ /dev/null @@ -1,86 +0,0 @@ -__doc__ = """Defines a multi-dimensional array and useful procedures for Numerical computation. - -Functions - -- array - NumPy Array construction -- zeros - Return an array of all zeros -- empty - Return an unitialized array -- shape - Return shape of sequence or array -- rank - Return number of dimensions -- size - Return number of elements in entire array or a - certain dimension -- fromstring - Construct array from (byte) string -- take - Select sub-arrays using sequence of indices -- put - Set sub-arrays using sequence of 1-D indices -- putmask - Set portion of arrays using a mask -- reshape - Return array with new shape -- repeat - Repeat elements of array -- choose - Construct new array from indexed array tuple -- correlate - Correlate two 1-d arrays -- searchsorted - Search for element in 1-d array -- sum - Total sum over a specified dimension -- average - Average, possibly weighted, over axis or array. -- cumsum - Cumulative sum over a specified dimension -- product - Total product over a specified dimension -- cumproduct - Cumulative product over a specified dimension -- alltrue - Logical and over an entire axis -- sometrue - Logical or over an entire axis -- allclose - Tests if sequences are essentially equal - -More Functions: - -- arange - Return regularly spaced array -- asarray - Guarantee NumPy array -- convolve - Convolve two 1-d arrays -- swapaxes - Exchange axes -- concatenate - Join arrays together -- transpose - Permute axes -- sort - Sort elements of array -- argsort - Indices of sorted array -- argmax - Index of largest value -- argmin - Index of smallest value -- inner - Innerproduct of two arrays -- dot - Dot product (matrix multiplication) -- outer - Outerproduct of two arrays -- resize - Return array with arbitrary new shape -- indices - Tuple of indices -- fromfunction - Construct array from universal function -- diagonal - Return diagonal array -- trace - Trace of array -- dump - Dump array to file object (pickle) -- dumps - Return pickled string representing data -- load - Return array stored in file object -- loads - Return array from pickled string -- ravel - Return array as 1-D -- nonzero - Indices of nonzero elements for 1-D array -- shape - Shape of array -- where - Construct array from binary result -- compress - Elements of array where condition is true -- clip - Clip array between two values -- ones - Array of all ones -- identity - 2-D identity array (matrix) - -(Universal) Math Functions - - add logical_or exp - subtract logical_xor log - multiply logical_not log10 - divide maximum sin - divide_safe minimum sinh - conjugate bitwise_and sqrt - power bitwise_or tan - absolute bitwise_xor tanh - negative invert ceil - greater left_shift fabs - greater_equal right_shift floor - less arccos arctan2 - less_equal arcsin fmod - equal arctan hypot - not_equal cos around - logical_and cosh sign - arccosh arcsinh arctanh - -""" - -depends = ['testing'] -global_symbols = ['*'] diff --git a/numpy-1.6.2/numpy/core/machar.py b/numpy-1.6.2/numpy/core/machar.py deleted file mode 100644 index 290f33746a..0000000000 --- a/numpy-1.6.2/numpy/core/machar.py +++ /dev/null @@ -1,339 +0,0 @@ -""" -Machine arithmetics - determine the parameters of the -floating-point arithmetic system -""" -# Author: Pearu Peterson, September 2003 - - -__all__ = ['MachAr'] - -from numpy.core.fromnumeric import any -from numpy.core.numeric import seterr - -# Need to speed this up...especially for longfloat - -class MachAr(object): - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, substracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating point number ``beta**minexp`` (the smallest [in - magnitude] usable floating value). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - Same as `xmin`. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - def __init__(self, float_conv=float,int_conv=int, - float_to_float=float, - float_to_str = lambda v:'%24.16e' % v, - title = 'Python floating point number'): - """ - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the runninng arch. - saverrstate = seterr(under='ignore') - try: - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - finally: - seterr(**saverrstate) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in xrange(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError, msg % (_, one.dtype) - b = one - for _ in xrange(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp-a) - if any(itemp != 0): - break - else: - raise RuntimeError, msg % (_, one.dtype) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in xrange(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError, msg % (_, one.dtype) - - betah = beta / two - a = one - for _ in xrange(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError, msg % (_, one.dtype) - temp = a + betah - irnd = 0 - if any(temp-a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd==0 and any(temp-tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in xrange(max_iterN): - temp = one - a - if any(temp-one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError, "could not determine machine tolerance " \ - "for 'negep', locals() -> %s" % (locals()) - else: - raise RuntimeError, msg % (_, one.dtype) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in xrange(max_iterN): - temp = one + a - if any(temp-one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError, msg % (_, one.dtype) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd==0 and any(temp*one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in xrange(max_iterN): - y = z - z = y*y - a = z*one # Check here for underflow - temp = z*t - if any(a+a == zero) or any(abs(z)>=y): - break - temp1 = temp * betain - if any(temp1*beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError, msg % (_, one.dtype) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in xrange(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any(a+a != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1*beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError, msg % (_, one.dtype) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax*one != xmax): - xmax = one - beta*epsneg - xmax = xmax / (xmin*beta*beta*beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta==2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - return '''\ -Machine parameters for %(title)s ---------------------------------------------------------------------- -ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s -machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon) -negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg) -minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny) -maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge) ---------------------------------------------------------------------- -''' % self.__dict__ - - -if __name__ == '__main__': - print MachAr() diff --git a/numpy-1.6.2/numpy/core/memmap.py b/numpy-1.6.2/numpy/core/memmap.py deleted file mode 100644 index 844e13c4e9..0000000000 --- a/numpy-1.6.2/numpy/core/memmap.py +++ /dev/null @@ -1,301 +0,0 @@ -__all__ = ['memmap'] - -import warnings -from numeric import uint8, ndarray, dtype -import sys - -from numpy.compat import asbytes - -dtypedescr = dtype -valid_filemodes = ["r", "c", "r+", "w+"] -writeable_filemodes = ["r+","w+"] - -mode_equivalents = { - "readonly":"r", - "copyonwrite":"c", - "readwrite":"r+", - "write":"w+" - } - -class memmap(ndarray): - """ - Create a memory-map to an array stored in a *binary* file on disk. - - Memory-mapped files are used for accessing small segments of large files - on disk, without reading the entire file into memory. Numpy's - memmap's are array-like objects. This differs from Python's ``mmap`` - module, which uses file-like objects. - - Parameters - ---------- - filename : str or file-like object - The file name or file object to be used as the array data buffer. - dtype : data-type, optional - The data-type used to interpret the file contents. - Default is `uint8`. - mode : {'r+', 'r', 'w+', 'c'}, optional - The file is opened in this mode: - - +------+-------------------------------------------------------------+ - | 'r' | Open existing file for reading only. | - +------+-------------------------------------------------------------+ - | 'r+' | Open existing file for reading and writing. | - +------+-------------------------------------------------------------+ - | 'w+' | Create or overwrite existing file for reading and writing. | - +------+-------------------------------------------------------------+ - | 'c' | Copy-on-write: assignments affect data in memory, but | - | | changes are not saved to disk. The file on disk is | - | | read-only. | - +------+-------------------------------------------------------------+ - - Default is 'r+'. - offset : int, optional - In the file, array data starts at this offset. Since `offset` is - measured in bytes, it should be a multiple of the byte-size of - `dtype`. Requires ``shape=None``. The default is 0. - shape : tuple, optional - The desired shape of the array. By default, the returned array will be - 1-D with the number of elements determined by file size and data-type. - order : {'C', 'F'}, optional - Specify the order of the ndarray memory layout: C (row-major) or - Fortran (column-major). This only has an effect if the shape is - greater than 1-D. The default order is 'C'. - - Attributes - ---------- - filename : str - Path to the mapped file. - offset : int - Offset position in the file. - mode : str - File mode. - - - Methods - ------- - close - Close the memmap file. - flush - Flush any changes in memory to file on disk. - When you delete a memmap object, flush is called first to write - changes to disk before removing the object. - - Notes - ----- - The memmap object can be used anywhere an ndarray is accepted. - Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns - ``True``. - - Memory-mapped arrays use the Python memory-map object which - (prior to Python 2.5) does not allow files to be larger than a - certain size depending on the platform. This size is always < 2GB - even on 64-bit systems. - - Examples - -------- - >>> data = np.arange(12, dtype='float32') - >>> data.resize((3,4)) - - This example uses a temporary file so that doctest doesn't write - files to your directory. You would use a 'normal' filename. - - >>> from tempfile import mkdtemp - >>> import os.path as path - >>> filename = path.join(mkdtemp(), 'newfile.dat') - - Create a memmap with dtype and shape that matches our data: - - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) - >>> fp - memmap([[ 0., 0., 0., 0.], - [ 0., 0., 0., 0.], - [ 0., 0., 0., 0.]], dtype=float32) - - Write data to memmap array: - - >>> fp[:] = data[:] - >>> fp - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - >>> fp.filename == path.abspath(filename) - True - - Deletion flushes memory changes to disk before removing the object: - - >>> del fp - - Load the memmap and verify data was stored: - - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) - >>> newfp - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - Read-only memmap: - - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) - >>> fpr.flags.writeable - False - - Copy-on-write memmap: - - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) - >>> fpc.flags.writeable - True - - It's possible to assign to copy-on-write array, but values are only - written into the memory copy of the array, and not written to disk: - - >>> fpc - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - >>> fpc[0,:] = 0 - >>> fpc - memmap([[ 0., 0., 0., 0.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - File on disk is unchanged: - - >>> fpr - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - Offset into a memmap: - - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) - >>> fpo - memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) - - """ - - __array_priority__ = -100.0 - def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, - shape=None, order='C'): - # Import here to minimize 'import numpy' overhead - import mmap - import os.path - try: - mode = mode_equivalents[mode] - except KeyError: - if mode not in valid_filemodes: - raise ValueError("mode must be one of %s" % \ - (valid_filemodes + mode_equivalents.keys())) - - if hasattr(filename,'read'): - fid = filename - else: - fid = open(filename, (mode == 'c' and 'r' or mode)+'b') - - if (mode == 'w+') and shape is None: - raise ValueError, "shape must be given" - - fid.seek(0, 2) - flen = fid.tell() - descr = dtypedescr(dtype) - _dbytes = descr.itemsize - - if shape is None: - bytes = flen - offset - if (bytes % _dbytes): - fid.close() - raise ValueError, "Size of available data is not a "\ - "multiple of data-type size." - size = bytes // _dbytes - shape = (size,) - else: - if not isinstance(shape, tuple): - shape = (shape,) - size = 1 - for k in shape: - size *= k - - bytes = long(offset + size*_dbytes) - - if mode == 'w+' or (mode == 'r+' and flen < bytes): - fid.seek(bytes - 1, 0) - fid.write(asbytes('\0')) - fid.flush() - - if mode == 'c': - acc = mmap.ACCESS_COPY - elif mode == 'r': - acc = mmap.ACCESS_READ - else: - acc = mmap.ACCESS_WRITE - - if sys.version_info[:2] >= (2,6): - # The offset keyword in mmap.mmap needs Python >= 2.6 - start = offset - offset % mmap.ALLOCATIONGRANULARITY - bytes -= start - offset -= start - mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) - else: - mm = mmap.mmap(fid.fileno(), bytes, access=acc) - - self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, - offset=offset, order=order) - self._mmap = mm - self.offset = offset - self.mode = mode - - if isinstance(filename, basestring): - self.filename = os.path.abspath(filename) - elif hasattr(filename, "name"): - self.filename = os.path.abspath(filename.name) - - return self - - def __array_finalize__(self, obj): - if hasattr(obj, '_mmap'): - self._mmap = obj._mmap - self.filename = obj.filename - self.offset = obj.offset - self.mode = obj.mode - else: - self._mmap = None - - def flush(self): - """ - Write any changes in the array to the file on disk. - - For further information, see `memmap`. - - Parameters - ---------- - None - - See Also - -------- - memmap - - """ - if self._mmap is not None: - self._mmap.flush() - - def _close(self): - """Close the memmap file. Only do this when deleting the object.""" - if self.base is self._mmap: - # The python mmap probably causes flush on close, but - # we put this here for safety - self._mmap.flush() - self._mmap.close() - self._mmap = None - - def __del__(self): - # We first check if we are the owner of the mmap, rather than - # a view, so deleting a view does not call _close - # on the parent mmap - if self._mmap is self.base: - try: - # First run tell() to see whether file is open - self._mmap.tell() - except ValueError: - pass - else: - self._close() diff --git a/numpy-1.6.2/numpy/core/mlib.ini.in b/numpy-1.6.2/numpy/core/mlib.ini.in deleted file mode 100644 index badaa2ae9d..0000000000 --- a/numpy-1.6.2/numpy/core/mlib.ini.in +++ /dev/null @@ -1,12 +0,0 @@ -[meta] -Name = mlib -Description = Math library used with this version of numpy -Version = 1.0 - -[default] -Libs=@posix_mathlib@ -Cflags= - -[msvc] -Libs=@msvc_mathlib@ -Cflags= diff --git a/numpy-1.6.2/numpy/core/npymath.ini.in b/numpy-1.6.2/numpy/core/npymath.ini.in deleted file mode 100644 index a233b8f3bf..0000000000 --- a/numpy-1.6.2/numpy/core/npymath.ini.in +++ /dev/null @@ -1,20 +0,0 @@ -[meta] -Name=npymath -Description=Portable, core math library implementing C99 standard -Version=0.1 - -[variables] -pkgname=@pkgname@ -prefix=${pkgdir} -libdir=${prefix}@sep@lib -includedir=${prefix}@sep@include - -[default] -Libs=-L${libdir} -lnpymath -Cflags=-I${includedir} -Requires=mlib - -[msvc] -Libs=/LIBPATH:${libdir} npymath.lib -Cflags=/INCLUDE:${includedir} -Requires=mlib diff --git a/numpy-1.6.2/numpy/core/numeric.py b/numpy-1.6.2/numpy/core/numeric.py deleted file mode 100644 index 97949df3bf..0000000000 --- a/numpy-1.6.2/numpy/core/numeric.py +++ /dev/null @@ -1,2436 +0,0 @@ -__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', - 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', - 'dtype', 'fromstring', 'fromfile', 'frombuffer', - 'int_asbuffer', 'where', 'argwhere', - 'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops', - 'can_cast', 'promote_types', 'min_scalar_type', 'result_type', - 'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray', - 'isfortran', 'empty_like', 'zeros_like', - 'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot', - 'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot', - 'array2string', 'get_printoptions', 'set_printoptions', - 'array_repr', 'array_str', 'set_string_function', - 'little_endian', 'require', - 'fromiter', 'array_equal', 'array_equiv', - 'indices', 'fromfunction', - 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', - 'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask', - 'seterr', 'geterr', 'setbufsize', 'getbufsize', - 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero', - 'Inf', 'inf', 'infty', 'Infinity', - 'nan', 'NaN', 'False_', 'True_', 'bitwise_not', - 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS', - 'ComplexWarning'] - -import sys -import warnings -import multiarray -import umath -from umath import * -import numerictypes -from numerictypes import * - -if sys.version_info[0] < 3: - __all__.extend(['getbuffer', 'newbuffer']) - -class ComplexWarning(RuntimeWarning): - """ - The warning raised when casting a complex dtype to a real dtype. - - As implemented, casting a complex number to a real discards its imaginary - part, but this behavior may not be what the user actually wants. - - """ - pass - -bitwise_not = invert - -CLIP = multiarray.CLIP -WRAP = multiarray.WRAP -RAISE = multiarray.RAISE -MAXDIMS = multiarray.MAXDIMS -ALLOW_THREADS = multiarray.ALLOW_THREADS -BUFSIZE = multiarray.BUFSIZE - -ndarray = multiarray.ndarray -flatiter = multiarray.flatiter -nditer = multiarray.nditer -nested_iters = multiarray.nested_iters -broadcast = multiarray.broadcast -dtype = multiarray.dtype -ufunc = type(sin) - - -# originally from Fernando Perez's IPython -def zeros_like(a, dtype=None, order='K', subok=True): - """ - Return an array of zeros with the same shape and type as a given array. - - With default parameters, is equivalent to ``a.copy().fill(0)``. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - dtype : data-type, optional - Overrides the data type of the result. - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - - Returns - ------- - out : ndarray - Array of zeros with the same shape and type as `a`. - - See Also - -------- - ones_like : Return an array of ones with shape and type of input. - empty_like : Return an empty array with shape and type of input. - zeros : Return a new array setting values to zero. - ones : Return a new array setting values to one. - empty : Return a new uninitialized array. - - Examples - -------- - >>> x = np.arange(6) - >>> x = x.reshape((2, 3)) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.zeros_like(x) - array([[0, 0, 0], - [0, 0, 0]]) - - >>> y = np.arange(3, dtype=np.float) - >>> y - array([ 0., 1., 2.]) - >>> np.zeros_like(y) - array([ 0., 0., 0.]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok) - res.fill(0) - return res - -# end Fernando's utilities - - -def extend_all(module): - adict = {} - for a in __all__: - adict[a] = 1 - try: - mall = getattr(module, '__all__') - except AttributeError: - mall = [k for k in module.__dict__.keys() if not k.startswith('_')] - for a in mall: - if a not in adict: - __all__.append(a) - -extend_all(umath) -extend_all(numerictypes) - -newaxis = None - - -arange = multiarray.arange -array = multiarray.array -zeros = multiarray.zeros -count_nonzero = multiarray.count_nonzero -empty = multiarray.empty -empty_like = multiarray.empty_like -fromstring = multiarray.fromstring -fromiter = multiarray.fromiter -fromfile = multiarray.fromfile -frombuffer = multiarray.frombuffer -if sys.version_info[0] < 3: - newbuffer = multiarray.newbuffer - getbuffer = multiarray.getbuffer -int_asbuffer = multiarray.int_asbuffer -where = multiarray.where -concatenate = multiarray.concatenate -fastCopyAndTranspose = multiarray._fastCopyAndTranspose -set_numeric_ops = multiarray.set_numeric_ops -can_cast = multiarray.can_cast -promote_types = multiarray.promote_types -min_scalar_type = multiarray.min_scalar_type -result_type = multiarray.result_type -lexsort = multiarray.lexsort -compare_chararrays = multiarray.compare_chararrays -putmask = multiarray.putmask -einsum = multiarray.einsum - -def asarray(a, dtype=None, order=None): - """ - Convert the input to an array. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('F' for FORTRAN) - memory representation. Defaults to 'C'. - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray. If `a` is a subclass of ndarray, a base - class ndarray is returned. - - See Also - -------- - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asarray(a) - array([1, 2]) - - Existing arrays are not copied: - - >>> a = np.array([1, 2]) - >>> np.asarray(a) is a - True - - If `dtype` is set, array is copied only if dtype does not match: - - >>> a = np.array([1, 2], dtype=np.float32) - >>> np.asarray(a, dtype=np.float32) is a - True - >>> np.asarray(a, dtype=np.float64) is a - False - - Contrary to `asanyarray`, ndarray subclasses are not passed through: - - >>> issubclass(np.matrix, np.ndarray) - True - >>> a = np.matrix([[1, 2]]) - >>> np.asarray(a) is a - False - >>> np.asanyarray(a) is a - True - - """ - return array(a, dtype, copy=False, order=order) - -def asanyarray(a, dtype=None, order=None): - """ - Convert the input to an ndarray, but pass ndarray subclasses through. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes scalars, lists, lists of tuples, tuples, tuples of tuples, - tuples of lists, and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('F') memory - representation. Defaults to 'C'. - - Returns - ------- - out : ndarray or an ndarray subclass - Array interpretation of `a`. If `a` is an ndarray or a subclass - of ndarray, it is returned as-is and no copy is performed. - - See Also - -------- - asarray : Similar function which always returns ndarrays. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asanyarray(a) - array([1, 2]) - - Instances of `ndarray` subclasses are passed through as-is: - - >>> a = np.matrix([1, 2]) - >>> np.asanyarray(a) is a - True - - """ - return array(a, dtype, copy=False, order=order, subok=True) - -def ascontiguousarray(a, dtype=None): - """ - Return a contiguous array in memory (C order). - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - Data-type of returned array. - - Returns - ------- - out : ndarray - Contiguous array of same shape and content as `a`, with type `dtype` - if specified. - - See Also - -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> np.ascontiguousarray(x, dtype=np.float32) - array([[ 0., 1., 2.], - [ 3., 4., 5.]], dtype=float32) - >>> x.flags['C_CONTIGUOUS'] - True - - """ - return array(a, dtype, copy=False, order='C', ndmin=1) - -def asfortranarray(a, dtype=None): - """ - Return an array laid out in Fortran order in memory. - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - By default, the data-type is inferred from the input data. - - Returns - ------- - out : ndarray - The input `a` in Fortran, or column-major, order. - - See Also - -------- - ascontiguousarray : Convert input to a contiguous (C order) array. - asanyarray : Convert input to an ndarray with either row or - column-major memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> y = np.asfortranarray(x) - >>> x.flags['F_CONTIGUOUS'] - False - >>> y.flags['F_CONTIGUOUS'] - True - - """ - return array(a, dtype, copy=False, order='F', ndmin=1) - -def require(a, dtype=None, requirements=None): - """ - Return an ndarray of the provided type that satisfies requirements. - - This function is useful to be sure that an array with the correct flags - is returned for passing to compiled code (perhaps through ctypes). - - Parameters - ---------- - a : array_like - The object to be converted to a type-and-requirement-satisfying array. - dtype : data-type - The required data-type, the default data-type is float64). - requirements : str or list of str - The requirements list can be any of the following - - * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array - * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array - * 'ALIGNED' ('A') - ensure a data-type aligned array - * 'WRITEABLE' ('W') - ensure a writable array - * 'OWNDATA' ('O') - ensure an array that owns its own data - - See Also - -------- - asarray : Convert input to an ndarray. - asanyarray : Convert to an ndarray, but pass through ndarray subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - ndarray.flags : Information about the memory layout of the array. - - Notes - ----- - The returned array will be guaranteed to have the listed requirements - by making a copy if needed. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> x.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : False - WRITEABLE : True - ALIGNED : True - UPDATEIFCOPY : False - - >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) - >>> y.flags - C_CONTIGUOUS : False - F_CONTIGUOUS : True - OWNDATA : True - WRITEABLE : True - ALIGNED : True - UPDATEIFCOPY : False - - """ - if requirements is None: - requirements = [] - else: - requirements = [x.upper() for x in requirements] - - if not requirements: - return asanyarray(a, dtype=dtype) - - if 'ENSUREARRAY' in requirements or 'E' in requirements: - subok = False - else: - subok = True - - arr = array(a, dtype=dtype, copy=False, subok=subok) - - copychar = 'A' - if 'FORTRAN' in requirements or \ - 'F_CONTIGUOUS' in requirements or \ - 'F' in requirements: - copychar = 'F' - elif 'CONTIGUOUS' in requirements or \ - 'C_CONTIGUOUS' in requirements or \ - 'C' in requirements: - copychar = 'C' - - for prop in requirements: - if not arr.flags[prop]: - arr = arr.copy(copychar) - break - return arr - -def isfortran(a): - """ - Returns True if array is arranged in Fortran-order in memory - and dimension > 1. - - Parameters - ---------- - a : ndarray - Input array. - - - Examples - -------- - - np.array allows to specify whether the array is written in C-contiguous - order (last index varies the fastest), or FORTRAN-contiguous order in - memory (first index varies the fastest). - - >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(a) - False - - >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN') - >>> b - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(b) - True - - - The transpose of a C-ordered array is a FORTRAN-ordered array. - - >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(a) - False - >>> b = a.T - >>> b - array([[1, 4], - [2, 5], - [3, 6]]) - >>> np.isfortran(b) - True - - 1-D arrays always evaluate as False. - - >>> np.isfortran(np.array([1, 2], order='FORTRAN')) - False - - """ - return a.flags.fnc - -def argwhere(a): - """ - Find the indices of array elements that are non-zero, grouped by element. - - Parameters - ---------- - a : array_like - Input data. - - Returns - ------- - index_array : ndarray - Indices of elements that are non-zero. Indices are grouped by element. - - See Also - -------- - where, nonzero - - Notes - ----- - ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. - - The output of ``argwhere`` is not suitable for indexing arrays. - For this purpose use ``where(a)`` instead. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.argwhere(x>1) - array([[0, 2], - [1, 0], - [1, 1], - [1, 2]]) - - """ - return transpose(asanyarray(a).nonzero()) - -def flatnonzero(a): - """ - Return indices that are non-zero in the flattened version of a. - - This is equivalent to a.ravel().nonzero()[0]. - - Parameters - ---------- - a : ndarray - Input array. - - Returns - ------- - res : ndarray - Output array, containing the indices of the elements of `a.ravel()` - that are non-zero. - - See Also - -------- - nonzero : Return the indices of the non-zero elements of the input array. - ravel : Return a 1-D array containing the elements of the input array. - - Examples - -------- - >>> x = np.arange(-2, 3) - >>> x - array([-2, -1, 0, 1, 2]) - >>> np.flatnonzero(x) - array([0, 1, 3, 4]) - - Use the indices of the non-zero elements as an index array to extract - these elements: - - >>> x.ravel()[np.flatnonzero(x)] - array([-2, -1, 1, 2]) - - """ - return a.ravel().nonzero()[0] - -_mode_from_name_dict = {'v': 0, - 's' : 1, - 'f' : 2} - -def _mode_from_name(mode): - if isinstance(mode, type("")): - return _mode_from_name_dict[mode.lower()[0]] - return mode - -def correlate(a, v, mode='valid', old_behavior=False): - """ - Cross-correlation of two 1-dimensional sequences. - - This function computes the correlation as generally defined in signal - processing texts:: - - z[k] = sum_n a[n] * conj(v[n+k]) - - with a and v sequences being zero-padded where necessary and conj being - the conjugate. - - Parameters - ---------- - a, v : array_like - Input sequences. - mode : {'valid', 'same', 'full'}, optional - Refer to the `convolve` docstring. Note that the default - is `valid`, unlike `convolve`, which uses `full`. - old_behavior : bool - If True, uses the old behavior from Numeric, (correlate(a,v) == correlate(v, - a), and the conjugate is not taken for complex arrays). If False, uses - the conventional signal processing definition (see note). - - See Also - -------- - convolve : Discrete, linear convolution of two one-dimensional sequences. - - Examples - -------- - >>> np.correlate([1, 2, 3], [0, 1, 0.5]) - array([ 3.5]) - >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") - array([ 2. , 3.5, 3. ]) - >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") - array([ 0.5, 2. , 3.5, 3. , 0. ]) - - """ - mode = _mode_from_name(mode) -# the old behavior should be made available under a different name, see thread -# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630 - if old_behavior: - warnings.warn(""" -The old behavior of correlate was deprecated for 1.4.0, and will be completely removed -for NumPy 2.0. - -The new behavior fits the conventional definition of correlation: inputs are -never swapped, and the second argument is conjugated for complex arrays.""", - DeprecationWarning) - return multiarray.correlate(a,v,mode) - else: - return multiarray.correlate2(a,v,mode) - -def convolve(a,v,mode='full'): - """ - Returns the discrete, linear convolution of two one-dimensional sequences. - - The convolution operator is often seen in signal processing, where it - models the effect of a linear time-invariant system on a signal [1]_. In - probability theory, the sum of two independent random variables is - distributed according to the convolution of their individual - distributions. - - Parameters - ---------- - a : (N,) array_like - First one-dimensional input array. - v : (M,) array_like - Second one-dimensional input array. - mode : {'full', 'valid', 'same'}, optional - 'full': - By default, mode is 'full'. This returns the convolution - at each point of overlap, with an output shape of (N+M-1,). At - the end-points of the convolution, the signals do not overlap - completely, and boundary effects may be seen. - - 'same': - Mode `same` returns output of length ``max(M, N)``. Boundary - effects are still visible. - - 'valid': - Mode `valid` returns output of length - ``max(M, N) - min(M, N) + 1``. The convolution product is only given - for points where the signals overlap completely. Values outside - the signal boundary have no effect. - - Returns - ------- - out : ndarray - Discrete, linear convolution of `a` and `v`. - - See Also - -------- - scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier - Transform. - scipy.linalg.toeplitz : Used to construct the convolution operator. - - Notes - ----- - The discrete convolution operation is defined as - - .. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m] - - It can be shown that a convolution :math:`x(t) * y(t)` in time/space - is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier - domain, after appropriate padding (padding is necessary to prevent - circular convolution). Since multiplication is more efficient (faster) - than convolution, the function `scipy.signal.fftconvolve` exploits the - FFT to calculate the convolution of large data-sets. - - References - ---------- - .. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution. - - Examples - -------- - Note how the convolution operator flips the second array - before "sliding" the two across one another: - - >>> np.convolve([1, 2, 3], [0, 1, 0.5]) - array([ 0. , 1. , 2.5, 4. , 1.5]) - - Only return the middle values of the convolution. - Contains boundary effects, where zeros are taken - into account: - - >>> np.convolve([1,2,3],[0,1,0.5], 'same') - array([ 1. , 2.5, 4. ]) - - The two arrays are of the same length, so there - is only one position where they completely overlap: - - >>> np.convolve([1,2,3],[0,1,0.5], 'valid') - array([ 2.5]) - - """ - a,v = array(a, ndmin=1),array(v, ndmin=1) - if (len(v) > len(a)): - a, v = v, a - if len(a) == 0 : - raise ValueError('a cannot be empty') - if len(v) == 0 : - raise ValueError('v cannot be empty') - mode = _mode_from_name(mode) - return multiarray.correlate(a, v[::-1], mode) - -def outer(a,b): - """ - Compute the outer product of two vectors. - - Given two vectors, ``a = [a0, a1, ..., aM]`` and - ``b = [b0, b1, ..., bN]``, - the outer product [1]_ is:: - - [[a0*b0 a0*b1 ... a0*bN ] - [a1*b0 . - [ ... . - [aM*b0 aM*bN ]] - - Parameters - ---------- - a, b : array_like, shape (M,), (N,) - First and second input vectors. Inputs are flattened if they - are not already 1-dimensional. - - Returns - ------- - out : ndarray, shape (M, N) - ``out[i, j] = a[i] * b[j]`` - - See also - -------- - inner, einsum - - References - ---------- - .. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd - ed., Baltimore, MD, Johns Hopkins University Press, 1996, - pg. 8. - - Examples - -------- - Make a (*very* coarse) grid for computing a Mandelbrot set: - - >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) - >>> rl - array([[-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.]]) - >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) - >>> im - array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], - [ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], - [ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) - >>> grid = rl + im - >>> grid - array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], - [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], - [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], - [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], - [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) - - An example using a "vector" of letters: - - >>> x = np.array(['a', 'b', 'c'], dtype=object) - >>> np.outer(x, [1, 2, 3]) - array([[a, aa, aaa], - [b, bb, bbb], - [c, cc, ccc]], dtype=object) - - """ - a = asarray(a) - b = asarray(b) - return a.ravel()[:,newaxis]*b.ravel()[newaxis,:] - -# try to import blas optimized dot if available -try: - # importing this changes the dot function for basic 4 types - # to blas-optimized versions. - from _dotblas import dot, vdot, inner, alterdot, restoredot -except ImportError: - # docstrings are in add_newdocs.py - inner = multiarray.inner - dot = multiarray.dot - def vdot(a, b): - return dot(asarray(a).ravel().conj(), asarray(b).ravel()) - def alterdot(): - pass - def restoredot(): - pass - -def tensordot(a, b, axes=2): - """ - Compute tensor dot product along specified axes for arrays >= 1-D. - - Given two tensors (arrays of dimension greater than or equal to one), - ``a`` and ``b``, and an array_like object containing two array_like - objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s - elements (components) over the axes specified by ``a_axes`` and - ``b_axes``. The third argument can be a single non-negative - integer_like scalar, ``N``; if it is such, then the last ``N`` - dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed - over. - - Parameters - ---------- - a, b : array_like, len(shape) >= 1 - Tensors to "dot". - - axes : variable type - - * integer_like scalar - Number of axes to sum over (applies to both arrays); or - - * array_like, shape = (2,), both elements array_like - Axes to be summed over, first sequence applying to ``a``, second - to ``b``. - - See Also - -------- - dot, einsum - - Notes - ----- - When there is more than one axis to sum over - and they are not the last - (first) axes of ``a`` (``b``) - the argument ``axes`` should consist of - two sequences of the same length, with the first axis to sum over given - first in both sequences, the second axis second, and so forth. - - Examples - -------- - A "traditional" example: - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) - >>> c.shape - (5, 2) - >>> c - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> # A slower but equivalent way of computing the same... - >>> d = np.zeros((5,2)) - >>> for i in range(5): - ... for j in range(2): - ... for k in range(3): - ... for n in range(4): - ... d[i,j] += a[k,n,i] * b[n,k,j] - >>> c == d - array([[ True, True], - [ True, True], - [ True, True], - [ True, True], - [ True, True]], dtype=bool) - - An extended example taking advantage of the overloading of + and \\*: - - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) - >>> a; A - array([[[1, 2], - [3, 4]], - [[5, 6], - [7, 8]]]) - array([[a, b], - [c, d]], dtype=object) - - >>> np.tensordot(a, A) # third argument default is 2 - array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object) - - >>> np.tensordot(a, A, 1) - array([[[acc, bdd], - [aaacccc, bbbdddd]], - [[aaaaacccccc, bbbbbdddddd], - [aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object) - - >>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.) - array([[[[[a, b], - [c, d]], - ... - - >>> np.tensordot(a, A, (0, 1)) - array([[[abbbbb, cddddd], - [aabbbbbb, ccdddddd]], - [[aaabbbbbbb, cccddddddd], - [aaaabbbbbbbb, ccccdddddddd]]], dtype=object) - - >>> np.tensordot(a, A, (2, 1)) - array([[[abb, cdd], - [aaabbbb, cccdddd]], - [[aaaaabbbbbb, cccccdddddd], - [aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object) - - >>> np.tensordot(a, A, ((0, 1), (0, 1))) - array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object) - - >>> np.tensordot(a, A, ((2, 1), (1, 0))) - array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object) - - """ - try: - iter(axes) - except: - axes_a = range(-axes,0) - axes_b = range(0,axes) - else: - axes_a, axes_b = axes - try: - na = len(axes_a) - axes_a = list(axes_a) - except TypeError: - axes_a = [axes_a] - na = 1 - try: - nb = len(axes_b) - axes_b = list(axes_b) - except TypeError: - axes_b = [axes_b] - nb = 1 - - a, b = asarray(a), asarray(b) - as_ = a.shape - nda = len(a.shape) - bs = b.shape - ndb = len(b.shape) - equal = True - if (na != nb): equal = False - else: - for k in xrange(na): - if as_[axes_a[k]] != bs[axes_b[k]]: - equal = False - break - if axes_a[k] < 0: - axes_a[k] += nda - if axes_b[k] < 0: - axes_b[k] += ndb - if not equal: - raise ValueError, "shape-mismatch for sum" - - # Move the axes to sum over to the end of "a" - # and to the front of "b" - notin = [k for k in range(nda) if k not in axes_a] - newaxes_a = notin + axes_a - N2 = 1 - for axis in axes_a: - N2 *= as_[axis] - newshape_a = (-1, N2) - olda = [as_[axis] for axis in notin] - - notin = [k for k in range(ndb) if k not in axes_b] - newaxes_b = axes_b + notin - N2 = 1 - for axis in axes_b: - N2 *= bs[axis] - newshape_b = (N2, -1) - oldb = [bs[axis] for axis in notin] - - at = a.transpose(newaxes_a).reshape(newshape_a) - bt = b.transpose(newaxes_b).reshape(newshape_b) - res = dot(at, bt) - return res.reshape(olda + oldb) - -def roll(a, shift, axis=None): - """ - Roll array elements along a given axis. - - Elements that roll beyond the last position are re-introduced at - the first. - - Parameters - ---------- - a : array_like - Input array. - shift : int - The number of places by which elements are shifted. - axis : int, optional - The axis along which elements are shifted. By default, the array - is flattened before shifting, after which the original - shape is restored. - - Returns - ------- - res : ndarray - Output array, with the same shape as `a`. - - See Also - -------- - rollaxis : Roll the specified axis backwards, until it lies in a - given position. - - Examples - -------- - >>> x = np.arange(10) - >>> np.roll(x, 2) - array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) - - >>> x2 = np.reshape(x, (2,5)) - >>> x2 - array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]]) - >>> np.roll(x2, 1) - array([[9, 0, 1, 2, 3], - [4, 5, 6, 7, 8]]) - >>> np.roll(x2, 1, axis=0) - array([[5, 6, 7, 8, 9], - [0, 1, 2, 3, 4]]) - >>> np.roll(x2, 1, axis=1) - array([[4, 0, 1, 2, 3], - [9, 5, 6, 7, 8]]) - - """ - a = asanyarray(a) - if axis is None: - n = a.size - reshape = True - else: - n = a.shape[axis] - reshape = False - shift %= n - indexes = concatenate((arange(n-shift,n),arange(n-shift))) - res = a.take(indexes, axis) - if reshape: - return res.reshape(a.shape) - else: - return res - -def rollaxis(a, axis, start=0): - """ - Roll the specified axis backwards, until it lies in a given position. - - Parameters - ---------- - a : ndarray - Input array. - axis : int - The axis to roll backwards. The positions of the other axes do not - change relative to one another. - start : int, optional - The axis is rolled until it lies before this position. The default, - 0, results in a "complete" roll. - - Returns - ------- - res : ndarray - Output array. - - See Also - -------- - roll : Roll the elements of an array by a number of positions along a - given axis. - - Examples - -------- - >>> a = np.ones((3,4,5,6)) - >>> np.rollaxis(a, 3, 1).shape - (3, 6, 4, 5) - >>> np.rollaxis(a, 2).shape - (5, 3, 4, 6) - >>> np.rollaxis(a, 1, 4).shape - (3, 5, 6, 4) - - """ - n = a.ndim - if axis < 0: - axis += n - if start < 0: - start += n - msg = 'rollaxis: %s (%d) must be >=0 and < %d' - if not (0 <= axis < n): - raise ValueError, msg % ('axis', axis, n) - if not (0 <= start < n+1): - raise ValueError, msg % ('start', start, n+1) - if (axis < start): # it's been removed - start -= 1 - if axis==start: - return a - axes = range(0,n) - axes.remove(axis) - axes.insert(start, axis) - return a.transpose(axes) - -# fix hack in scipy which imports this function -def _move_axis_to_0(a, axis): - return rollaxis(a, axis, 0) - -def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): - """ - Return the cross product of two (arrays of) vectors. - - The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular - to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors - are defined by the last axis of `a` and `b` by default, and these axes - can have dimensions 2 or 3. Where the dimension of either `a` or `b` is - 2, the third component of the input vector is assumed to be zero and the - cross product calculated accordingly. In cases where both input vectors - have dimension 2, the z-component of the cross product is returned. - - Parameters - ---------- - a : array_like - Components of the first vector(s). - b : array_like - Components of the second vector(s). - axisa : int, optional - Axis of `a` that defines the vector(s). By default, the last axis. - axisb : int, optional - Axis of `b` that defines the vector(s). By default, the last axis. - axisc : int, optional - Axis of `c` containing the cross product vector(s). By default, the - last axis. - axis : int, optional - If defined, the axis of `a`, `b` and `c` that defines the vector(s) - and cross product(s). Overrides `axisa`, `axisb` and `axisc`. - - Returns - ------- - c : ndarray - Vector cross product(s). - - Raises - ------ - ValueError - When the dimension of the vector(s) in `a` and/or `b` does not - equal 2 or 3. - - See Also - -------- - inner : Inner product - outer : Outer product. - ix_ : Construct index arrays. - - Examples - -------- - Vector cross-product. - - >>> x = [1, 2, 3] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([-3, 6, -3]) - - One vector with dimension 2. - - >>> x = [1, 2] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Equivalently: - - >>> x = [1, 2, 0] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Both vectors with dimension 2. - - >>> x = [1,2] - >>> y = [4,5] - >>> np.cross(x, y) - -3 - - Multiple vector cross-products. Note that the direction of the cross - product vector is defined by the `right-hand rule`. - - >>> x = np.array([[1,2,3], [4,5,6]]) - >>> y = np.array([[4,5,6], [1,2,3]]) - >>> np.cross(x, y) - array([[-3, 6, -3], - [ 3, -6, 3]]) - - The orientation of `c` can be changed using the `axisc` keyword. - - >>> np.cross(x, y, axisc=0) - array([[-3, 3], - [ 6, -6], - [-3, 3]]) - - Change the vector definition of `x` and `y` using `axisa` and `axisb`. - - >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) - >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) - >>> np.cross(x, y) - array([[ -6, 12, -6], - [ 0, 0, 0], - [ 6, -12, 6]]) - >>> np.cross(x, y, axisa=0, axisb=0) - array([[-24, 48, -24], - [-30, 60, -30], - [-36, 72, -36]]) - - """ - if axis is not None: - axisa,axisb,axisc=(axis,)*3 - a = asarray(a).swapaxes(axisa, 0) - b = asarray(b).swapaxes(axisb, 0) - msg = "incompatible dimensions for cross product\n"\ - "(dimension must be 2 or 3)" - if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]): - raise ValueError(msg) - if a.shape[0] == 2: - if (b.shape[0] == 2): - cp = a[0]*b[1] - a[1]*b[0] - if cp.ndim == 0: - return cp - else: - return cp.swapaxes(0, axisc) - else: - x = a[1]*b[2] - y = -a[0]*b[2] - z = a[0]*b[1] - a[1]*b[0] - elif a.shape[0] == 3: - if (b.shape[0] == 3): - x = a[1]*b[2] - a[2]*b[1] - y = a[2]*b[0] - a[0]*b[2] - z = a[0]*b[1] - a[1]*b[0] - else: - x = -a[2]*b[1] - y = a[2]*b[0] - z = a[0]*b[1] - a[1]*b[0] - cp = array([x,y,z]) - if cp.ndim == 1: - return cp - else: - return cp.swapaxes(0,axisc) - - -#Use numarray's printing function -from arrayprint import array2string, get_printoptions, set_printoptions - -_typelessdata = [int_, float_, complex_] -if issubclass(intc, int): - _typelessdata.append(intc) - -if issubclass(longlong, int): - _typelessdata.append(longlong) - -def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): - """ - Return the string representation of an array. - - Parameters - ---------- - arr : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters split the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing precision - (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero, default is False. Very small - is defined by `precision`, if the precision is 8 then - numbers smaller than 5e-9 are represented as zero. - - Returns - ------- - string : str - The string representation of an array. - - See Also - -------- - array_str, array2string, set_printoptions - - Examples - -------- - >>> np.array_repr(np.array([1,2])) - 'array([1, 2])' - >>> np.array_repr(np.ma.array([0.])) - 'MaskedArray([ 0.])' - >>> np.array_repr(np.array([], np.int32)) - 'array([], dtype=int32)' - - >>> x = np.array([1e-6, 4e-7, 2, 3]) - >>> np.array_repr(x, precision=6, suppress_small=True) - 'array([ 0.000001, 0. , 2. , 3. ])' - - """ - if arr.size > 0 or arr.shape==(0,): - lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', "array(") - else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(arr.shape),) - typeless = arr.dtype.type in _typelessdata - - if arr.__class__ is not ndarray: - cName= arr.__class__.__name__ - else: - cName = "array" - if typeless and arr.size: - return cName + "(%s)" % lst - else: - typename=arr.dtype.name - lf = '' - if issubclass(arr.dtype.type, flexible): - if arr.dtype.names: - typename = "%s" % str(arr.dtype) - else: - typename = "'%s'" % str(arr.dtype) - lf = '\n'+' '*len("array(") - return cName + "(%s, %sdtype=%s)" % (lst, lf, typename) - -def array_str(a, max_line_width=None, precision=None, suppress_small=None): - """ - Return a string representation of the data in an array. - - The data in the array is returned as a single string. This function is - similar to `array_repr`, the difference being that `array_repr` also - returns information on the kind of array and its data type. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. The - default is, indirectly, 75. - precision : int, optional - Floating point precision. Default is the current printing precision - (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - - See Also - -------- - array2string, array_repr, set_printoptions - - Examples - -------- - >>> np.array_str(np.arange(3)) - '[0 1 2]' - - """ - return array2string(a, max_line_width, precision, suppress_small, ' ', "", str) - -def set_string_function(f, repr=True): - """ - Set a Python function to be used when pretty printing arrays. - - Parameters - ---------- - f : function or None - Function to be used to pretty print arrays. The function should expect - a single array argument and return a string of the representation of - the array. If None, the function is reset to the default NumPy function - to print arrays. - repr : bool, optional - If True (default), the function for pretty printing (``__repr__``) - is set, if False the function that returns the default string - representation (``__str__``) is set. - - See Also - -------- - set_printoptions, get_printoptions - - Examples - -------- - >>> def pprint(arr): - ... return 'HA! - What are you going to do now?' - ... - >>> np.set_string_function(pprint) - >>> a = np.arange(10) - >>> a - HA! - What are you going to do now? - >>> print a - [0 1 2 3 4 5 6 7 8 9] - - We can reset the function to the default: - - >>> np.set_string_function(None) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - `repr` affects either pretty printing or normal string representation. - Note that ``__repr__`` is still affected by setting ``__str__`` - because the width of each array element in the returned string becomes - equal to the length of the result of ``__str__()``. - - >>> x = np.arange(4) - >>> np.set_string_function(lambda x:'random', repr=False) - >>> x.__str__() - 'random' - >>> x.__repr__() - 'array([ 0, 1, 2, 3])' - - """ - if f is None: - if repr: - return multiarray.set_string_function(array_repr, 1) - else: - return multiarray.set_string_function(array_str, 0) - else: - return multiarray.set_string_function(f, repr) - -set_string_function(array_str, 0) -set_string_function(array_repr, 1) - -little_endian = (sys.byteorder == 'little') - - -def indices(dimensions, dtype=int): - """ - Return an array representing the indices of a grid. - - Compute an array where the subarrays contain index values 0,1,... - varying only along the corresponding axis. - - Parameters - ---------- - dimensions : sequence of ints - The shape of the grid. - dtype : dtype, optional - Data type of the result. - - Returns - ------- - grid : ndarray - The array of grid indices, - ``grid.shape = (len(dimensions),) + tuple(dimensions)``. - - See Also - -------- - mgrid, meshgrid - - Notes - ----- - The output shape is obtained by prepending the number of dimensions - in front of the tuple of dimensions, i.e. if `dimensions` is a tuple - ``(r0, ..., rN-1)`` of length ``N``, the output shape is - ``(N,r0,...,rN-1)``. - - The subarrays ``grid[k]`` contains the N-D array of indices along the - ``k-th`` axis. Explicitly:: - - grid[k,i0,i1,...,iN-1] = ik - - Examples - -------- - >>> grid = np.indices((2, 3)) - >>> grid.shape - (2, 2, 3) - >>> grid[0] # row indices - array([[0, 0, 0], - [1, 1, 1]]) - >>> grid[1] # column indices - array([[0, 1, 2], - [0, 1, 2]]) - - The indices can be used as an index into an array. - - >>> x = np.arange(20).reshape(5, 4) - >>> row, col = np.indices((2, 3)) - >>> x[row, col] - array([[0, 1, 2], - [4, 5, 6]]) - - Note that it would be more straightforward in the above example to - extract the required elements directly with ``x[:2, :3]``. - - """ - dimensions = tuple(dimensions) - N = len(dimensions) - if N == 0: - return array([],dtype=dtype) - res = empty((N,)+dimensions, dtype=dtype) - for i, dim in enumerate(dimensions): - tmp = arange(dim,dtype=dtype) - tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1) - newdim = dimensions[:i] + (1,)+ dimensions[i+1:] - val = zeros(newdim, dtype) - add(tmp, val, res[i]) - return res - -def fromfunction(function, shape, **kwargs): - """ - Construct an array by executing a function over each coordinate. - - The resulting array therefore has a value ``fn(x, y, z)`` at - coordinate ``(x, y, z)``. - - Parameters - ---------- - function : callable - The function is called with N parameters, each of which - represents the coordinates of the array varying along a - specific axis. For example, if `shape` were ``(2, 2)``, then - the parameters would be two arrays, ``[[0, 0], [1, 1]]`` and - ``[[0, 1], [0, 1]]``. `function` must be capable of operating on - arrays, and should return a scalar value. - shape : (N,) tuple of ints - Shape of the output array, which also determines the shape of - the coordinate arrays passed to `function`. - dtype : data-type, optional - Data-type of the coordinate arrays passed to `function`. - By default, `dtype` is float. - - Returns - ------- - out : any - The result of the call to `function` is passed back directly. - Therefore the type and shape of `out` is completely determined by - `function`. - - See Also - -------- - indices, meshgrid - - Notes - ----- - Keywords other than `shape` and `dtype` are passed to `function`. - - Examples - -------- - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) - array([[ True, False, False], - [False, True, False], - [False, False, True]], dtype=bool) - - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) - array([[0, 1, 2], - [1, 2, 3], - [2, 3, 4]]) - - """ - dtype = kwargs.pop('dtype', float) - args = indices(shape, dtype=dtype) - return function(*args,**kwargs) - -def isscalar(num): - """ - Returns True if the type of `num` is a scalar type. - - Parameters - ---------- - num : any - Input argument, can be of any type and shape. - - Returns - ------- - val : bool - True if `num` is a scalar type, False if it is not. - - Examples - -------- - >>> np.isscalar(3.1) - True - >>> np.isscalar([3.1]) - False - >>> np.isscalar(False) - True - - """ - if isinstance(num, generic): - return True - else: - return type(num) in ScalarType - -_lkup = { - '0':'0000', - '1':'0001', - '2':'0010', - '3':'0011', - '4':'0100', - '5':'0101', - '6':'0110', - '7':'0111', - '8':'1000', - '9':'1001', - 'a':'1010', - 'b':'1011', - 'c':'1100', - 'd':'1101', - 'e':'1110', - 'f':'1111', - 'A':'1010', - 'B':'1011', - 'C':'1100', - 'D':'1101', - 'E':'1110', - 'F':'1111', - 'L':''} - -def binary_repr(num, width=None): - """ - Return the binary representation of the input number as a string. - - For negative numbers, if width is not given, a minus sign is added to the - front. If width is given, the two's complement of the number is - returned, with respect to that width. - - In a two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement - system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. - - Parameters - ---------- - num : int - Only an integer decimal number can be used. - width : int, optional - The length of the returned string if `num` is positive, the length of - the two's complement if `num` is negative. - - Returns - ------- - bin : str - Binary representation of `num` or two's complement of `num`. - - See Also - -------- - base_repr: Return a string representation of a number in the given base - system. - - Notes - ----- - `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x - faster. - - References - ---------- - .. [1] Wikipedia, "Two's complement", - http://en.wikipedia.org/wiki/Two's_complement - - Examples - -------- - >>> np.binary_repr(3) - '11' - >>> np.binary_repr(-3) - '-11' - >>> np.binary_repr(3, width=4) - '0011' - - The two's complement is returned when the input number is negative and - width is specified: - - >>> np.binary_repr(-3, width=4) - '1101' - - """ - sign = '' - if num < 0: - if width is None: - sign = '-' - num = -num - else: - # replace num with its 2-complement - num = 2**width + num - elif num == 0: - return '0'*(width or 1) - ostr = hex(num) - bin = ''.join([_lkup[ch] for ch in ostr[2:]]) - bin = bin.lstrip('0') - if width is not None: - bin = bin.zfill(width) - return sign + bin - -def base_repr(number, base=2, padding=0): - """ - Return a string representation of a number in the given base system. - - Parameters - ---------- - number : int - The value to convert. Only positive values are handled. - base : int, optional - Convert `number` to the `base` number system. The valid range is 2-36, - the default value is 2. - padding : int, optional - Number of zeros padded on the left. Default is 0 (no padding). - - Returns - ------- - out : str - String representation of `number` in `base` system. - - See Also - -------- - binary_repr : Faster version of `base_repr` for base 2. - - Examples - -------- - >>> np.base_repr(5) - '101' - >>> np.base_repr(6, 5) - '11' - >>> np.base_repr(7, base=5, padding=3) - '00012' - - >>> np.base_repr(10, base=16) - 'A' - >>> np.base_repr(32, base=16) - '20' - - """ - digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' - if base > len(digits): - raise ValueError("Bases greater than 36 not handled in base_repr.") - - num = abs(number) - res = [] - while num: - res.append(digits[num % base]) - num //= base - if padding: - res.append('0' * padding) - if number < 0: - res.append('-') - return ''.join(reversed(res or '0')) - -from cPickle import load, loads -_cload = load -_file = open - -def load(file): - """ - Wrapper around cPickle.load which accepts either a file-like object or - a filename. - - Note that the NumPy binary format is not based on pickle/cPickle anymore. - For details on the preferred way of loading and saving files, see `load` - and `save`. - - See Also - -------- - load, save - - """ - if isinstance(file, type("")): - file = _file(file,"rb") - return _cload(file) - -# These are all essentially abbreviations -# These might wind up in a special abbreviations module - -def _maketup(descr, val): - dt = dtype(descr) - # Place val in all scalar tuples: - fields = dt.fields - if fields is None: - return val - else: - res = [_maketup(fields[name][0],val) for name in dt.names] - return tuple(res) - -def ones(shape, dtype=None, order='C'): - """ - Return a new array of given shape and type, filled with ones. - - Please refer to the documentation for `zeros` for further details. - - See Also - -------- - zeros, ones_like - - Examples - -------- - >>> np.ones(5) - array([ 1., 1., 1., 1., 1.]) - - >>> np.ones((5,), dtype=np.int) - array([1, 1, 1, 1, 1]) - - >>> np.ones((2, 1)) - array([[ 1.], - [ 1.]]) - - >>> s = (2,2) - >>> np.ones(s) - array([[ 1., 1.], - [ 1., 1.]]) - - """ - a = empty(shape, dtype, order) - try: - a.fill(1) - # Above is faster now after addition of fast loops. - #a = zeros(shape, dtype, order) - #a+=1 - except TypeError: - obj = _maketup(dtype, 1) - a.fill(obj) - return a - -def identity(n, dtype=None): - """ - Return the identity array. - - The identity array is a square array with ones on - the main diagonal. - - Parameters - ---------- - n : int - Number of rows (and columns) in `n` x `n` output. - dtype : data-type, optional - Data-type of the output. Defaults to ``float``. - - Returns - ------- - out : ndarray - `n` x `n` array with its main diagonal set to one, - and all other elements 0. - - Examples - -------- - >>> np.identity(3) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - - """ - a = zeros((n,n), dtype=dtype) - a.flat[::n+1] = 1 - return a - -def allclose(a, b, rtol=1.e-5, atol=1.e-8): - """ - Returns True if two arrays are element-wise equal within a tolerance. - - The tolerance values are positive, typically very small numbers. The - relative difference (`rtol` * abs(`b`)) and the absolute difference - `atol` are added together to compare against the absolute difference - between `a` and `b`. - - If either array contains one or more NaNs, False is returned. - Infs are treated as equal if they are in the same place and of the same - sign in both arrays. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - rtol : float - The relative tolerance parameter (see Notes). - atol : float - The absolute tolerance parameter (see Notes). - - Returns - ------- - y : bool - Returns True if the two arrays are equal within the given - tolerance; False otherwise. - - See Also - -------- - all, any, alltrue, sometrue - - Notes - ----- - If the following equation is element-wise True, then allclose returns - True. - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - The above equation is not symmetric in `a` and `b`, so that - `allclose(a, b)` might be different from `allclose(b, a)` in - some rare cases. - - Examples - -------- - >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) - False - >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) - True - >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) - False - >>> np.allclose([1.0, np.nan], [1.0, np.nan]) - False - - """ - x = array(a, copy=False, ndmin=1) - y = array(b, copy=False, ndmin=1) - - if any(isnan(x)) or any(isnan(y)): - return False - - xinf = isinf(x) - yinf = isinf(y) - if any(xinf) or any(yinf): - # Check that x and y have inf's only in the same positions - if not all(xinf == yinf): - return False - # Check that sign of inf's in x and y is the same - if not all(x[xinf] == y[xinf]): - return False - - x = x[~xinf] - y = y[~xinf] - - return all(less_equal(abs(x-y), atol + rtol * abs(y))) - -def array_equal(a1, a2): - """ - True if two arrays have the same shape and elements, False otherwise. - - Parameters - ---------- - a1, a2 : array_like - Input arrays. - - Returns - ------- - b : bool - Returns True if the arrays are equal. - - See Also - -------- - allclose: Returns True if two arrays are element-wise equal within a - tolerance. - array_equiv: Returns True if input arrays are shape consistent and all - elements equal. - - Examples - -------- - >>> np.array_equal([1, 2], [1, 2]) - True - >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) - True - >>> np.array_equal([1, 2], [1, 2, 3]) - False - >>> np.array_equal([1, 2], [1, 4]) - False - - """ - try: - a1, a2 = asarray(a1), asarray(a2) - except: - return False - if a1.shape != a2.shape: - return False - return bool(logical_and.reduce(equal(a1,a2).ravel())) - -def array_equiv(a1, a2): - """ - Returns True if input arrays are shape consistent and all elements equal. - - Shape consistent means they are either the same shape, or one input array - can be broadcasted to create the same shape as the other one. - - Parameters - ---------- - a1, a2 : array_like - Input arrays. - - Returns - ------- - out : bool - True if equivalent, False otherwise. - - Examples - -------- - >>> np.array_equiv([1, 2], [1, 2]) - True - >>> np.array_equiv([1, 2], [1, 3]) - False - - Showing the shape equivalence: - - >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) - True - >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) - False - - >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) - False - - """ - try: - a1, a2 = asarray(a1), asarray(a2) - except: - return False - try: - return bool(logical_and.reduce(equal(a1,a2).ravel())) - except ValueError: - return False - - -_errdict = {"ignore":ERR_IGNORE, - "warn":ERR_WARN, - "raise":ERR_RAISE, - "call":ERR_CALL, - "print":ERR_PRINT, - "log":ERR_LOG} - -_errdict_rev = {} -for key in _errdict.keys(): - _errdict_rev[_errdict[key]] = key -del key - -def seterr(all=None, divide=None, over=None, under=None, invalid=None): - """ - Set how floating-point errors are handled. - - Note that operations on integer scalar types (such as `int16`) are - handled like floating point, and are affected by these settings. - - Parameters - ---------- - all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Set treatment for all types of floating-point errors at once: - - - ignore: Take no action when the exception occurs. - - warn: Print a `RuntimeWarning` (via the Python `warnings` module). - - raise: Raise a `FloatingPointError`. - - call: Call a function specified using the `seterrcall` function. - - print: Print a warning directly to ``stdout``. - - log: Record error in a Log object specified by `seterrcall`. - - The default is not to change the current behavior. - divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for division by zero. - over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for floating-point overflow. - under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for floating-point underflow. - invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for invalid floating-point operation. - - Returns - ------- - old_settings : dict - Dictionary containing the old settings. - - See also - -------- - seterrcall : Set a callback function for the 'call' mode. - geterr, geterrcall - - Notes - ----- - The floating-point exceptions are defined in the IEEE 754 standard [1]: - - - Division by zero: infinite result obtained from finite numbers. - - Overflow: result too large to be expressed. - - Underflow: result so close to zero that some precision - was lost. - - Invalid operation: result is not an expressible number, typically - indicates that a NaN was produced. - - .. [1] http://en.wikipedia.org/wiki/IEEE_754 - - Examples - -------- - >>> old_settings = np.seterr(all='ignore') #seterr to known value - >>> np.seterr(over='raise') - {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', - 'under': 'ignore'} - >>> np.seterr(all='ignore') # reset to default - {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} - - >>> np.int16(32000) * np.int16(3) - 30464 - >>> old_settings = np.seterr(all='warn', over='raise') - >>> np.int16(32000) * np.int16(3) - Traceback (most recent call last): - File "", line 1, in - FloatingPointError: overflow encountered in short_scalars - - >>> old_settings = np.seterr(all='print') - >>> np.geterr() - {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'} - >>> np.int16(32000) * np.int16(3) - Warning: overflow encountered in short_scalars - 30464 - - """ - - pyvals = umath.geterrobj() - old = geterr() - - if divide is None: divide = all or old['divide'] - if over is None: over = all or old['over'] - if under is None: under = all or old['under'] - if invalid is None: invalid = all or old['invalid'] - - maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + - (_errdict[over] << SHIFT_OVERFLOW ) + - (_errdict[under] << SHIFT_UNDERFLOW) + - (_errdict[invalid] << SHIFT_INVALID)) - - pyvals[1] = maskvalue - umath.seterrobj(pyvals) - return old - - -def geterr(): - """ - Get the current way of handling floating-point errors. - - Returns - ------- - res : dict - A dictionary with keys "divide", "over", "under", and "invalid", - whose values are from the strings "ignore", "print", "log", "warn", - "raise", and "call". The keys represent possible floating-point - exceptions, and the values define how these exceptions are handled. - - See Also - -------- - geterrcall, seterr, seterrcall - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> np.geterr() - {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', - 'under': 'ignore'} - >>> np.arange(3.) / np.arange(3.) - array([ NaN, 1., 1.]) - - >>> oldsettings = np.seterr(all='warn', over='raise') - >>> np.geterr() - {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'} - >>> np.arange(3.) / np.arange(3.) - __main__:1: RuntimeWarning: invalid value encountered in divide - array([ NaN, 1., 1.]) - - """ - maskvalue = umath.geterrobj()[1] - mask = 7 - res = {} - val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask - res['divide'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_OVERFLOW) & mask - res['over'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_UNDERFLOW) & mask - res['under'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_INVALID) & mask - res['invalid'] = _errdict_rev[val] - return res - -def setbufsize(size): - """ - Set the size of the buffer used in ufuncs. - - Parameters - ---------- - size : int - Size of buffer. - - """ - if size > 10e6: - raise ValueError, "Buffer size, %s, is too big." % size - if size < 5: - raise ValueError, "Buffer size, %s, is too small." %size - if size % 16 != 0: - raise ValueError, "Buffer size, %s, is not a multiple of 16." %size - - pyvals = umath.geterrobj() - old = getbufsize() - pyvals[0] = size - umath.seterrobj(pyvals) - return old - -def getbufsize(): - """Return the size of the buffer used in ufuncs. - """ - return umath.geterrobj()[0] - -def seterrcall(func): - """ - Set the floating-point error callback function or log object. - - There are two ways to capture floating-point error messages. The first - is to set the error-handler to 'call', using `seterr`. Then, set - the function to call using this function. - - The second is to set the error-handler to 'log', using `seterr`. - Floating-point errors then trigger a call to the 'write' method of - the provided object. - - Parameters - ---------- - func : callable f(err, flag) or object with write method - Function to call upon floating-point errors ('call'-mode) or - object whose 'write' method is used to log such message ('log'-mode). - - The call function takes two arguments. The first is the - type of error (one of "divide", "over", "under", or "invalid"), - and the second is the status flag. The flag is a byte, whose - least-significant bits indicate the status:: - - [0 0 0 0 invalid over under invalid] - - In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. - - If an object is provided, its write method should take one argument, - a string. - - Returns - ------- - h : callable, log instance or None - The old error handler. - - See Also - -------- - seterr, geterr, geterrcall - - Examples - -------- - Callback upon error: - - >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) - ... - - >>> saved_handler = np.seterrcall(err_handler) - >>> save_err = np.seterr(all='call') - - >>> np.array([1, 2, 3]) / 0.0 - Floating point error (divide by zero), with flag 1 - array([ Inf, Inf, Inf]) - - >>> np.seterrcall(saved_handler) - - >>> np.seterr(**save_err) - {'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'} - - Log error message: - - >>> class Log(object): - ... def write(self, msg): - ... print "LOG: %s" % msg - ... - - >>> log = Log() - >>> saved_handler = np.seterrcall(log) - >>> save_err = np.seterr(all='log') - - >>> np.array([1, 2, 3]) / 0.0 - LOG: Warning: divide by zero encountered in divide - - array([ Inf, Inf, Inf]) - - >>> np.seterrcall(saved_handler) - <__main__.Log object at 0x...> - >>> np.seterr(**save_err) - {'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'} - - """ - if func is not None and not callable(func): - if not hasattr(func, 'write') or not callable(func.write): - raise ValueError, "Only callable can be used as callback" - pyvals = umath.geterrobj() - old = geterrcall() - pyvals[2] = func - umath.seterrobj(pyvals) - return old - -def geterrcall(): - """ - Return the current callback function used on floating-point errors. - - When the error handling for a floating-point error (one of "divide", - "over", "under", or "invalid") is set to 'call' or 'log', the function - that is called or the log instance that is written to is returned by - `geterrcall`. This function or log instance has been set with - `seterrcall`. - - Returns - ------- - errobj : callable, log instance or None - The current error handler. If no handler was set through `seterrcall`, - ``None`` is returned. - - See Also - -------- - seterrcall, seterr, geterr - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> np.geterrcall() # we did not yet set a handler, returns None - - >>> oldsettings = np.seterr(all='call') - >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) - >>> oldhandler = np.seterrcall(err_handler) - >>> np.array([1, 2, 3]) / 0.0 - Floating point error (divide by zero), with flag 1 - array([ Inf, Inf, Inf]) - - >>> cur_handler = np.geterrcall() - >>> cur_handler is err_handler - True - - """ - return umath.geterrobj()[2] - -class _unspecified(object): - pass -_Unspecified = _unspecified() - -class errstate(object): - """ - errstate(**kwargs) - - Context manager for floating-point error handling. - - Using an instance of `errstate` as a context manager allows statements in - that context to execute with a known error handling behavior. Upon entering - the context the error handling is set with `seterr` and `seterrcall`, and - upon exiting it is reset to what it was before. - - Parameters - ---------- - kwargs : {divide, over, under, invalid} - Keyword arguments. The valid keywords are the possible floating-point - exceptions. Each keyword should have a string value that defines the - treatment for the particular error. Possible values are - {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. - - See Also - -------- - seterr, geterr, seterrcall, geterrcall - - Notes - ----- - The ``with`` statement was introduced in Python 2.5, and can only be used - there by importing it: ``from __future__ import with_statement``. In - earlier Python versions the ``with`` statement is not available. - - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> from __future__ import with_statement # use 'with' in Python 2.5 - >>> olderr = np.seterr(all='ignore') # Set error handling to known state. - - >>> np.arange(3) / 0. - array([ NaN, Inf, Inf]) - >>> with np.errstate(divide='warn'): - ... np.arange(3) / 0. - ... - __main__:2: RuntimeWarning: divide by zero encountered in divide - array([ NaN, Inf, Inf]) - - >>> np.sqrt(-1) - nan - >>> with np.errstate(invalid='raise'): - ... np.sqrt(-1) - Traceback (most recent call last): - File "", line 2, in - FloatingPointError: invalid value encountered in sqrt - - Outside the context the error handling behavior has not changed: - - >>> np.geterr() - {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', - 'under': 'ignore'} - - """ - # Note that we don't want to run the above doctests because they will fail - # without a from __future__ import with_statement - def __init__(self, **kwargs): - self.call = kwargs.pop('call',_Unspecified) - self.kwargs = kwargs - def __enter__(self): - self.oldstate = seterr(**self.kwargs) - if self.call is not _Unspecified: - self.oldcall = seterrcall(self.call) - def __exit__(self, *exc_info): - seterr(**self.oldstate) - if self.call is not _Unspecified: - seterrcall(self.oldcall) - -def _setdef(): - defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None] - umath.seterrobj(defval) - -# set the default values -_setdef() - -Inf = inf = infty = Infinity = PINF -nan = NaN = NAN -False_ = bool_(False) -True_ = bool_(True) - -import fromnumeric -from fromnumeric import * -extend_all(fromnumeric) diff --git a/numpy-1.6.2/numpy/core/numerictypes.py b/numpy-1.6.2/numpy/core/numerictypes.py deleted file mode 100644 index 8874667d8b..0000000000 --- a/numpy-1.6.2/numpy/core/numerictypes.py +++ /dev/null @@ -1,1033 +0,0 @@ -""" -numerictypes: Define the numeric type objects - -This module is designed so "from numerictypes import \\*" is safe. -Exported symbols include: - - Dictionary with all registered number types (including aliases): - typeDict - - Type objects (not all will be available, depends on platform): - see variable sctypes for which ones you have - - Bit-width names - - int8 int16 int32 int64 int128 - uint8 uint16 uint32 uint64 uint128 - float16 float32 float64 float96 float128 float256 - complex32 complex64 complex128 complex192 complex256 complex512 - datetime64 timedelta64 - - c-based names - - bool_ - - object_ - - void, str_, unicode_ - - byte, ubyte, - short, ushort - intc, uintc, - intp, uintp, - int_, uint, - longlong, ulonglong, - - - single, csingle, - float_, complex_, - longfloat, clongfloat, - - - datetime_, timedelta_, (these inherit from timeinteger which inherits - from signedinteger) - - - As part of the type-hierarchy: xx -- is bit-width - - generic - +-> bool_ (kind=b) - +-> number (kind=i) - | integer - | signedinteger (intxx) - | byte - | short - | intc - | intp int0 - | int_ - | longlong - +-> unsignedinteger (uintxx) (kind=u) - | ubyte - | ushort - | uintc - | uintp uint0 - | uint_ - | ulonglong - +-> inexact - | +-> floating (floatxx) (kind=f) - | | half - | | single - | | float_ (double) - | | longfloat - | \\-> complexfloating (complexxx) (kind=c) - | csingle (singlecomplex) - | complex_ (cfloat, cdouble) - | clongfloat (longcomplex) - +-> flexible - | character - | void (kind=V) - | - | str_ (string_, bytes_) (kind=S) [Python 2] - | unicode_ (kind=U) [Python 2] - | - | bytes_ (string_) (kind=S) [Python 3] - | str_ (unicode_) (kind=U) [Python 3] - | - \\-> object_ (not used much) (kind=O) - -""" - -# we add more at the bottom -__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes', - 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', - 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', - 'issubdtype'] - -from numpy.core.multiarray import typeinfo, ndarray, array, empty, dtype -import types as _types -import sys - -# we don't export these for import *, but we do want them accessible -# as numerictypes.bool, etc. -from __builtin__ import bool, int, long, float, complex, object, unicode, str -from numpy.compat import bytes - -if sys.version_info[0] >= 3: - # Py3K - class long(int): - # Placeholder class -- this will not escape outside numerictypes.py - pass - -# String-handling utilities to avoid locale-dependence. - -# "import string" is costly to import! -# Construct the translation tables directly -# "A" = chr(65), "a" = chr(97) -_all_chars = map(chr, range(256)) -_ascii_upper = _all_chars[65:65+26] -_ascii_lower = _all_chars[97:97+26] -LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) -UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) - -#import string -# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \ -# LOWER_TABLE) -# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \ -# UPPER_TABLE) -#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase) -#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase) - -def english_lower(s): - """ Apply English case rules to convert ASCII strings to all lower case. - - This is an internal utility function to replace calls to str.lower() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - lowered : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_lower - >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' - >>> english_lower('') - '' - """ - lowered = s.translate(LOWER_TABLE) - return lowered - -def english_upper(s): - """ Apply English case rules to convert ASCII strings to all upper case. - - This is an internal utility function to replace calls to str.upper() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - uppered : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_upper - >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' - >>> english_upper('') - '' - """ - uppered = s.translate(UPPER_TABLE) - return uppered - -def english_capitalize(s): - """ Apply English case rules to convert the first character of an ASCII - string to upper case. - - This is an internal utility function to replace calls to str.capitalize() - such that we can avoid changing behavior with changing locales. - - Parameters - ---------- - s : str - - Returns - ------- - capitalized : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_capitalize - >>> english_capitalize('int8') - 'Int8' - >>> english_capitalize('Int8') - 'Int8' - >>> english_capitalize('') - '' - """ - if s: - return english_upper(s[0]) + s[1:] - else: - return s - - -sctypeDict = {} # Contains all leaf-node scalar types with aliases -sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences -allTypes = {} # Collect the types we will add to the module here - -def _evalname(name): - k = 0 - for ch in name: - if ch in '0123456789': - break - k += 1 - try: - bits = int(name[k:]) - except ValueError: - bits = 0 - base = name[:k] - return base, bits - -def bitname(obj): - """Return a bit-width name for a given type object""" - name = obj.__name__ - base = '' - char = '' - try: - if name[-1] == '_': - newname = name[:-1] - else: - newname = name - info = typeinfo[english_upper(newname)] - assert(info[-1] == obj) # sanity check - bits = info[2] - - except KeyError: # bit-width name - base, bits = _evalname(name) - char = base[0] - - if name == 'bool_': - char = 'b' - base = 'bool' - elif name=='void': - char = 'V' - base = 'void' - elif name=='object_': - char = 'O' - base = 'object' - bits = 0 - - if sys.version_info[0] >= 3: - if name=='bytes_': - char = 'S' - base = 'bytes' - elif name=='str_': - char = 'U' - base = 'str' - else: - if name=='string_': - char = 'S' - base = 'string' - elif name=='unicode_': - char = 'U' - base = 'unicode' - - bytes = bits // 8 - - if char != '' and bytes != 0: - char = "%s%d" % (char, bytes) - - return base, bits, char - - -def _add_types(): - for a in typeinfo.keys(): - name = english_lower(a) - if isinstance(typeinfo[a], tuple): - typeobj = typeinfo[a][-1] - - # define C-name and insert typenum and typechar references also - allTypes[name] = typeobj - sctypeDict[name] = typeobj - sctypeDict[typeinfo[a][0]] = typeobj - sctypeDict[typeinfo[a][1]] = typeobj - - else: # generic class - allTypes[name] = typeinfo[a] -_add_types() - -def _add_aliases(): - for a in typeinfo.keys(): - name = english_lower(a) - if not isinstance(typeinfo[a], tuple): - continue - typeobj = typeinfo[a][-1] - # insert bit-width version for this class (if relevant) - base, bit, char = bitname(typeobj) - if base[-3:] == 'int' or char[0] in 'ui': continue - if base != '': - myname = "%s%d" % (base, bit) - if (name != 'longdouble' and name != 'clongdouble') or \ - myname not in allTypes.keys(): - allTypes[myname] = typeobj - sctypeDict[myname] = typeobj - if base == 'complex': - na_name = '%s%d' % (english_capitalize(base), bit//2) - elif base == 'bool': - na_name = english_capitalize(base) - sctypeDict[na_name] = typeobj - else: - na_name = "%s%d" % (english_capitalize(base), bit) - sctypeDict[na_name] = typeobj - sctypeNA[na_name] = typeobj - sctypeDict[na_name] = typeobj - sctypeNA[typeobj] = na_name - sctypeNA[typeinfo[a][0]] = na_name - if char != '': - sctypeDict[char] = typeobj - sctypeNA[char] = na_name -_add_aliases() - -# Integers handled so that -# The int32, int64 types should agree exactly with -# PyArray_INT32, PyArray_INT64 in C -# We need to enforce the same checking as is done -# in arrayobject.h where the order of getting a -# bit-width match is: -# long, longlong, int, short, char -# for int8, int16, int32, int64, int128 - -def _add_integer_aliases(): - _ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE'] - for ctype in _ctypes: - val = typeinfo[ctype] - bits = val[2] - charname = 'i%d' % (bits//8,) - ucharname = 'u%d' % (bits//8,) - intname = 'int%d' % bits - UIntname = 'UInt%d' % bits - Intname = 'Int%d' % bits - uval = typeinfo['U'+ctype] - typeobj = val[-1] - utypeobj = uval[-1] - if intname not in allTypes.keys(): - uintname = 'uint%d' % bits - allTypes[intname] = typeobj - allTypes[uintname] = utypeobj - sctypeDict[intname] = typeobj - sctypeDict[uintname] = utypeobj - sctypeDict[Intname] = typeobj - sctypeDict[UIntname] = utypeobj - sctypeDict[charname] = typeobj - sctypeDict[ucharname] = utypeobj - sctypeNA[Intname] = typeobj - sctypeNA[UIntname] = utypeobj - sctypeNA[charname] = typeobj - sctypeNA[ucharname] = utypeobj - sctypeNA[typeobj] = Intname - sctypeNA[utypeobj] = UIntname - sctypeNA[val[0]] = Intname - sctypeNA[uval[0]] = UIntname -_add_integer_aliases() - -# We use these later -void = allTypes['void'] -generic = allTypes['generic'] - -# -# Rework the Python names (so that float and complex and int are consistent -# with Python usage) -# -def _set_up_aliases(): - type_pairs = [('complex_', 'cdouble'), - ('int0', 'intp'), - ('uint0', 'uintp'), - ('single', 'float'), - ('csingle', 'cfloat'), - ('singlecomplex', 'cfloat'), - ('float_', 'double'), - ('intc', 'int'), - ('uintc', 'uint'), - ('int_', 'long'), - ('uint', 'ulong'), - ('cfloat', 'cdouble'), - ('longfloat', 'longdouble'), - ('clongfloat', 'clongdouble'), - ('longcomplex', 'clongdouble'), - ('bool_', 'bool'), - ('unicode_', 'unicode'), - ('object_', 'object'), - ('timedelta_', 'timedelta'), - ('datetime_', 'datetime')] - if sys.version_info[0] >= 3: - type_pairs.extend([('bytes_', 'string'), - ('str_', 'unicode'), - ('string_', 'string')]) - else: - type_pairs.extend([('str_', 'string'), - ('string_', 'string'), - ('bytes_', 'string')]) - for alias, t in type_pairs: - allTypes[alias] = allTypes[t] - sctypeDict[alias] = sctypeDict[t] - # Remove aliases overriding python types and modules - to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float', - 'complex', 'bool', 'string', 'datetime', 'timedelta'] - if sys.version_info[0] >= 3: - # Py3K - to_remove.append('bytes') - to_remove.append('str') - to_remove.remove('unicode') - to_remove.remove('long') - for t in to_remove: - try: - del allTypes[t] - del sctypeDict[t] - except KeyError: - pass -_set_up_aliases() - -# Now, construct dictionary to lookup character codes from types -_sctype2char_dict = {} -def _construct_char_code_lookup(): - for name in typeinfo.keys(): - tup = typeinfo[name] - if isinstance(tup, tuple): - if tup[0] not in ['p','P']: - _sctype2char_dict[tup[-1]] = tup[0] -_construct_char_code_lookup() - - -sctypes = {'int': [], - 'uint':[], - 'float':[], - 'complex':[], - 'others':[bool,object,str,unicode,void]} - -def _add_array_type(typename, bits): - try: - t = allTypes['%s%d' % (typename, bits)] - except KeyError: - pass - else: - sctypes[typename].append(t) - -def _set_array_types(): - ibytes = [1, 2, 4, 8, 16, 32, 64] - fbytes = [2, 4, 8, 10, 12, 16, 32, 64] - for bytes in ibytes: - bits = 8*bytes - _add_array_type('int', bits) - _add_array_type('uint', bits) - for bytes in fbytes: - bits = 8*bytes - _add_array_type('float', bits) - _add_array_type('complex', 2*bits) - _gi = dtype('p') - if _gi.type not in sctypes['int']: - indx = 0 - sz = _gi.itemsize - _lst = sctypes['int'] - while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): - indx += 1 - sctypes['int'].insert(indx, _gi.type) - sctypes['uint'].insert(indx, dtype('P').type) -_set_array_types() - - -genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', - 'int32', 'uint32', 'int64', 'uint64', 'int128', - 'uint128', 'float16', - 'float32', 'float64', 'float80', 'float96', 'float128', - 'float256', - 'complex32', 'complex64', 'complex128', 'complex160', - 'complex192', 'complex256', 'complex512', 'object'] - -def maximum_sctype(t): - """ - Return the scalar type of highest precision of the same kind as the input. - - Parameters - ---------- - t : dtype or dtype specifier - The input data type. This can be a `dtype` object or an object that - is convertible to a `dtype`. - - Returns - ------- - out : dtype - The highest precision data type of the same kind (`dtype.kind`) as `t`. - - See Also - -------- - obj2sctype, mintypecode, sctype2char - dtype - - Examples - -------- - >>> np.maximum_sctype(np.int) - - >>> np.maximum_sctype(np.uint8) - - >>> np.maximum_sctype(np.complex) - - - >>> np.maximum_sctype(str) - - - >>> np.maximum_sctype('i2') - - >>> np.maximum_sctype('f4') - - - """ - g = obj2sctype(t) - if g is None: - return t - t = g - name = t.__name__ - base, bits = _evalname(name) - if bits == 0: - return t - else: - return sctypes[base][-1] - -try: - buffer_type = _types.BufferType -except AttributeError: - # Py3K - buffer_type = memoryview - -_python_types = {int : 'int_', - float: 'float_', - complex: 'complex_', - bool: 'bool_', - bytes: 'bytes_', - unicode: 'unicode_', - buffer_type: 'void', - } - -if sys.version_info[0] >= 3: - def _python_type(t): - """returns the type corresponding to a certain Python type""" - if not isinstance(t, type): - t = type(t) - return allTypes[_python_types.get(t, 'object_')] -else: - def _python_type(t): - """returns the type corresponding to a certain Python type""" - if not isinstance(t, _types.TypeType): - t = type(t) - return allTypes[_python_types.get(t, 'object_')] - -def issctype(rep): - """ - Determines whether the given object represents a scalar data-type. - - Parameters - ---------- - rep : any - If `rep` is an instance of a scalar dtype, True is returned. If not, - False is returned. - - Returns - ------- - out : bool - Boolean result of check whether `rep` is a scalar dtype. - - See Also - -------- - issubsctype, issubdtype, obj2sctype, sctype2char - - Examples - -------- - >>> np.issctype(np.int32) - True - >>> np.issctype(list) - False - >>> np.issctype(1.1) - False - - Strings are also a scalar type: - - >>> np.issctype(np.dtype('str')) - True - - """ - if not isinstance(rep, (type, dtype)): - return False - try: - res = obj2sctype(rep) - if res and res != object_: - return True - return False - except: - return False - -def obj2sctype(rep, default=None): - """ - Return the scalar dtype or NumPy equivalent of Python type of an object. - - Parameters - ---------- - rep : any - The object of which the type is returned. - default : any, optional - If given, this is returned for objects whose types can not be - determined. If not given, None is returned for those objects. - - Returns - ------- - dtype : dtype or Python type - The data type of `rep`. - - See Also - -------- - sctype2char, issctype, issubsctype, issubdtype, maximum_sctype - - Examples - -------- - >>> np.obj2sctype(np.int32) - - >>> np.obj2sctype(np.array([1., 2.])) - - >>> np.obj2sctype(np.array([1.j])) - - - >>> np.obj2sctype(dict) - - >>> np.obj2sctype('string') - - - >>> np.obj2sctype(1, default=list) - - - """ - try: - if issubclass(rep, generic): - return rep - except TypeError: - pass - if isinstance(rep, dtype): - return rep.type - if isinstance(rep, type): - return _python_type(rep) - if isinstance(rep, ndarray): - return rep.dtype.type - try: - res = dtype(rep) - except: - return default - return res.type - - -def issubclass_(arg1, arg2): - """ - Determine if a class is a subclass of a second class. - - `issubclass_` is equivalent to the Python built-in ``issubclass``, - except that it returns False instead of raising a TypeError is one - of the arguments is not a class. - - Parameters - ---------- - arg1 : class - Input class. True is returned if `arg1` is a subclass of `arg2`. - arg2 : class or tuple of classes. - Input class. If a tuple of classes, True is returned if `arg1` is a - subclass of any of the tuple elements. - - Returns - ------- - out : bool - Whether `arg1` is a subclass of `arg2` or not. - - See Also - -------- - issubsctype, issubdtype, issctype - - Examples - -------- - >>> np.issubclass_(np.int32, np.int) - True - >>> np.issubclass_(np.int32, np.float) - False - - """ - try: - return issubclass(arg1, arg2) - except TypeError: - return False - -def issubsctype(arg1, arg2): - """ - Determine if the first argument is a subclass of the second argument. - - Parameters - ---------- - arg1, arg2 : dtype or dtype specifier - Data-types. - - Returns - ------- - out : bool - The result. - - See Also - -------- - issctype, issubdtype,obj2sctype - - Examples - -------- - >>> np.issubsctype('S8', str) - True - >>> np.issubsctype(np.array([1]), np.int) - True - >>> np.issubsctype(np.array([1]), np.float) - False - - """ - return issubclass(obj2sctype(arg1), obj2sctype(arg2)) - -def issubdtype(arg1, arg2): - """ - Returns True if first argument is a typecode lower/equal in type hierarchy. - - Parameters - ---------- - arg1, arg2 : dtype_like - dtype or string representing a typecode. - - Returns - ------- - out : bool - - See Also - -------- - issubsctype, issubclass_ - numpy.core.numerictypes : Overview of numpy type hierarchy. - - Examples - -------- - >>> np.issubdtype('S1', str) - True - >>> np.issubdtype(np.float64, np.float32) - False - - """ - if issubclass_(arg2, generic): - return issubclass(dtype(arg1).type, arg2) - mro = dtype(arg2).type.mro() - if len(mro) > 1: - val = mro[1] - else: - val = mro[0] - return issubclass(dtype(arg1).type, val) - - -# This dictionary allows look up based on any alias for an array data-type -class _typedict(dict): - """ - Base object for a dictionary for look-up with any alias for an array dtype. - - Instances of `_typedict` can not be used as dictionaries directly, - first they have to be populated. - - """ - def __getitem__(self, obj): - return dict.__getitem__(self, obj2sctype(obj)) - -nbytes = _typedict() -_alignment = _typedict() -_maxvals = _typedict() -_minvals = _typedict() -def _construct_lookups(): - for name, val in typeinfo.iteritems(): - if not isinstance(val, tuple): - continue - obj = val[-1] - nbytes[obj] = val[2] // 8 - _alignment[obj] = val[3] - if (len(val) > 5): - _maxvals[obj] = val[4] - _minvals[obj] = val[5] - else: - _maxvals[obj] = None - _minvals[obj] = None - -_construct_lookups() - -def sctype2char(sctype): - """ - Return the string representation of a scalar dtype. - - Parameters - ---------- - sctype : scalar dtype or object - If a scalar dtype, the corresponding string character is - returned. If an object, `sctype2char` tries to infer its scalar type - and then return the corresponding string character. - - Returns - ------- - typechar : str - The string character corresponding to the scalar type. - - Raises - ------ - ValueError - If `sctype` is an object for which the type can not be inferred. - - See Also - -------- - obj2sctype, issctype, issubsctype, mintypecode - - Examples - -------- - >>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]: - ... print np.sctype2char(sctype) - l - d - D - S - O - - >>> x = np.array([1., 2-1.j]) - >>> np.sctype2char(x) - 'D' - >>> np.sctype2char(list) - 'O' - - """ - sctype = obj2sctype(sctype) - if sctype is None: - raise ValueError, "unrecognized type" - return _sctype2char_dict[sctype] - -# Create dictionary of casting functions that wrap sequences -# indexed by type or type character - - -cast = _typedict() -try: - ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType, - _types.LongType, _types.BooleanType, - _types.StringType, _types.UnicodeType, _types.BufferType] -except AttributeError: - # Py3K - ScalarType = [int, float, complex, long, bool, bytes, str, memoryview] - -ScalarType.extend(_sctype2char_dict.keys()) -ScalarType = tuple(ScalarType) -for key in _sctype2char_dict.keys(): - cast[key] = lambda x, k=key : array(x, copy=False).astype(k) - -# Create the typestring lookup dictionary -_typestr = _typedict() -for key in _sctype2char_dict.keys(): - if issubclass(key, allTypes['flexible']): - _typestr[key] = _sctype2char_dict[key] - else: - _typestr[key] = empty((1,),key).dtype.str[1:] - -# Make sure all typestrings are in sctypeDict -for key, val in _typestr.items(): - if val not in sctypeDict: - sctypeDict[val] = key - -# Add additional strings to the sctypeDict - -if sys.version_info[0] >= 3: - _toadd = ['int', 'float', 'complex', 'bool', 'object', - 'str', 'bytes', 'object', ('a', allTypes['bytes_'])] -else: - _toadd = ['int', 'float', 'complex', 'bool', 'object', 'string', - ('str', allTypes['string_']), - 'unicode', 'object', ('a', allTypes['string_'])] - -for name in _toadd: - if isinstance(name, tuple): - sctypeDict[name[0]] = name[1] - else: - sctypeDict[name] = allTypes['%s_' % name] - -del _toadd, name - -# Now add the types we've determined to this module -for key in allTypes: - globals()[key] = allTypes[key] - __all__.append(key) - -del key - -typecodes = {'Character':'c', - 'Integer':'bhilqp', - 'UnsignedInteger':'BHILQP', - 'Float':'efdg', - 'Complex':'FDG', - 'AllInteger':'bBhHiIlLqQpP', - 'AllFloat':'efdgFDG', - 'Datetime': 'Mm', - 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} - -# backwards compatibility --- deprecated name -typeDict = sctypeDict -typeNA = sctypeNA - -# b -> boolean -# u -> unsigned integer -# i -> signed integer -# f -> floating point -# c -> complex -# M -> datetime -# m -> timedelta -# S -> string -# U -> Unicode string -# V -> record -# O -> Python object -_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm'] - -__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O' -__len_test_types = len(__test_types) - -# Keep incrementing until a common type both can be coerced to -# is found. Otherwise, return None -def _find_common_coerce(a, b): - if a > b: - return a - try: - thisind = __test_types.index(a.char) - except ValueError: - return None - return _can_coerce_all([a,b], start=thisind) - -# Find a data-type that all data-types in a list can be coerced to -def _can_coerce_all(dtypelist, start=0): - N = len(dtypelist) - if N == 0: - return None - if N == 1: - return dtypelist[0] - thisind = start - while thisind < __len_test_types: - newdtype = dtype(__test_types[thisind]) - numcoerce = len([x for x in dtypelist if newdtype >= x]) - if numcoerce == N: - return newdtype - thisind += 1 - return None - -def find_common_type(array_types, scalar_types): - """ - Determine common type following standard coercion rules. - - Parameters - ---------- - array_types : sequence - A list of dtypes or dtype convertible objects representing arrays. - scalar_types : sequence - A list of dtypes or dtype convertible objects representing scalars. - - Returns - ------- - datatype : dtype - The common data type, which is the maximum of `array_types` ignoring - `scalar_types`, unless the maximum of `scalar_types` is of a - different kind (`dtype.kind`). If the kind is not understood, then - None is returned. - - See Also - -------- - dtype, common_type, can_cast, mintypecode - - Examples - -------- - >>> np.find_common_type([], [np.int64, np.float32, np.complex]) - dtype('complex128') - >>> np.find_common_type([np.int64, np.float32], []) - dtype('float64') - - The standard casting rules ensure that a scalar cannot up-cast an - array unless the scalar is of a fundamentally different kind of data - (i.e. under a different hierarchy in the data type hierarchy) then - the array: - - >>> np.find_common_type([np.float32], [np.int64, np.float64]) - dtype('float32') - - Complex is of a different type, so it up-casts the float in the - `array_types` argument: - - >>> np.find_common_type([np.float32], [np.complex]) - dtype('complex128') - - Type specifier strings are convertible to dtypes and can therefore - be used instead of dtypes: - - >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) - dtype('complex128') - - """ - array_types = [dtype(x) for x in array_types] - scalar_types = [dtype(x) for x in scalar_types] - - maxa = _can_coerce_all(array_types) - maxsc = _can_coerce_all(scalar_types) - - if maxa is None: - return maxsc - - if maxsc is None: - return maxa - - try: - index_a = _kind_list.index(maxa.kind) - index_sc = _kind_list.index(maxsc.kind) - except ValueError: - return None - - if index_sc > index_a: - return _find_common_coerce(maxsc,maxa) - else: - return maxa diff --git a/numpy-1.6.2/numpy/core/records.py b/numpy-1.6.2/numpy/core/records.py deleted file mode 100644 index 58cead0d9a..0000000000 --- a/numpy-1.6.2/numpy/core/records.py +++ /dev/null @@ -1,806 +0,0 @@ -""" -Record Arrays -============= -Record arrays expose the fields of structured arrays as properties. - -Most commonly, ndarrays contain elements of a single type, e.g. floats, integers, -bools etc. However, it is possible for elements to be combinations of these, -such as:: - - >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)]) - >>> a - array([(1, 2.0), (1, 2.0)], - dtype=[('x', '>> a['x'] - array([1, 1]) - - >>> a['y'] - array([ 2., 2.]) - -Record arrays allow us to access fields as properties:: - - >>> ar = a.view(np.recarray) - - >>> ar.x - array([1, 1]) - - >>> ar.y - array([ 2., 2.]) - -""" -# All of the functions allow formats to be a dtype -__all__ = ['record', 'recarray', 'format_parser'] - -import numeric as sb -from defchararray import chararray -import numerictypes as nt -import types -import os -import sys - -from numpy.compat import isfileobj, bytes - -ndarray = sb.ndarray - -_byteorderconv = {'b':'>', - 'l':'<', - 'n':'=', - 'B':'>', - 'L':'<', - 'N':'=', - 'S':'s', - 's':'s', - '>':'>', - '<':'<', - '=':'=', - '|':'|', - 'I':'|', - 'i':'|'} - -# formats regular expression -# allows multidimension spec with a tuple syntax in front -# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' -# are equally allowed - -numfmt = nt.typeDict -_typestr = nt._typestr - -def find_duplicate(list): - """Find duplication in a list, return a list of duplicated elements""" - dup = [] - for i in range(len(list)): - if (list[i] in list[i + 1:]): - if (list[i] not in dup): - dup.append(list[i]) - return dup - -class format_parser: - """ - Class to convert formats, names, titles description to a dtype. - - After constructing the format_parser object, the dtype attribute is - the converted data-type: - ``dtype = format_parser(formats, names, titles).dtype`` - - Attributes - ---------- - dtype : dtype - The converted data-type. - - Parameters - ---------- - formats : str or list of str - The format description, either specified as a string with - comma-separated format descriptions in the form ``'f8, i4, a5'``, or - a list of format description strings in the form - ``['f8', 'i4', 'a5']``. - names : str or list/tuple of str - The field names, either specified as a comma-separated string in the - form ``'col1, col2, col3'``, or as a list or tuple of strings in the - form ``['col1', 'col2', 'col3']``. - An empty list can be used, in that case default field names - ('f0', 'f1', ...) are used. - titles : sequence - Sequence of title strings. An empty list can be used to leave titles - out. - aligned : bool, optional - If True, align the fields by padding as the C-compiler would. - Default is False. - byteorder : str, optional - If specified, all the fields will be changed to the - provided byte-order. Otherwise, the default byte-order is - used. For all available string specifiers, see `dtype.newbyteorder`. - - See Also - -------- - dtype, typename, sctype2char - - Examples - -------- - >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], - ... ['T1', 'T2', 'T3']).dtype - dtype([(('T1', 'col1'), '>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], - ... []).dtype - dtype([('col1', '>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype - dtype([('f0', ' len(titles)): - self._titles += [None] * (self._nfields - len(titles)) - - def _createdescr(self, byteorder): - descr = sb.dtype({'names':self._names, - 'formats':self._f_formats, - 'offsets':self._offsets, - 'titles':self._titles}) - if (byteorder is not None): - byteorder = _byteorderconv[byteorder[0]] - descr = descr.newbyteorder(byteorder) - - self._descr = descr - -class record(nt.void): - """A data-type scalar that allows field access as attribute lookup. - """ - def __repr__(self): - return self.__str__() - - def __str__(self): - return str(self.item()) - - def __getattribute__(self, attr): - if attr in ['setfield', 'getfield', 'dtype']: - return nt.void.__getattribute__(self, attr) - try: - return nt.void.__getattribute__(self, attr) - except AttributeError: - pass - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - obj = self.getfield(*res[:2]) - # if it has fields return a recarray, - # if it's a string ('SU') return a chararray - # otherwise return the object - try: - dt = obj.dtype - except AttributeError: - return obj - if dt.fields: - return obj.view(obj.__class__) - if dt.char in 'SU': - return obj.view(chararray) - return obj - else: - raise AttributeError, "'record' object has no "\ - "attribute '%s'" % attr - - - def __setattr__(self, attr, val): - if attr in ['setfield', 'getfield', 'dtype']: - raise AttributeError, "Cannot set '%s' attribute" % attr - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - return self.setfield(val, *res[:2]) - else: - if getattr(self, attr, None): - return nt.void.__setattr__(self, attr, val) - else: - raise AttributeError, "'record' object has no "\ - "attribute '%s'" % attr - - def pprint(self): - """Pretty-print all fields.""" - # pretty-print all fields - names = self.dtype.names - maxlen = max([len(name) for name in names]) - rows = [] - fmt = '%% %ds: %%s' % maxlen - for name in names: - rows.append(fmt % (name, getattr(self, name))) - return "\n".join(rows) - -# The recarray is almost identical to a standard array (which supports -# named fields already) The biggest difference is that it can use -# attribute-lookup to find the fields and it is constructed using -# a record. - -# If byteorder is given it forces a particular byteorder on all -# the fields (and any subfields) - -class recarray(ndarray): - """ - Construct an ndarray that allows field access using attributes. - - Arrays may have a data-types containing fields, analogous - to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, - where each entry in the array is a pair of ``(int, float)``. Normally, - these attributes are accessed using dictionary lookups such as ``arr['x']`` - and ``arr['y']``. Record arrays allow the fields to be accessed as members - of the array, using ``arr.x`` and ``arr.y``. - - Parameters - ---------- - shape : tuple - Shape of output array. - dtype : data-type, optional - The desired data-type. By default, the data-type is determined - from `formats`, `names`, `titles`, `aligned` and `byteorder`. - formats : list of data-types, optional - A list containing the data-types for the different columns, e.g. - ``['i4', 'f8', 'i4']``. `formats` does *not* support the new - convention of using types directly, i.e. ``(int, float, int)``. - Note that `formats` must be a list, not a tuple. - Given that `formats` is somewhat limited, we recommend specifying - `dtype` instead. - names : tuple of str, optional - The name of each column, e.g. ``('x', 'y', 'z')``. - buf : buffer, optional - By default, a new array is created of the given shape and data-type. - If `buf` is specified and is an object exposing the buffer interface, - the array will use the memory from the existing buffer. In this case, - the `offset` and `strides` keywords are available. - - Other Parameters - ---------------- - titles : tuple of str, optional - Aliases for column names. For example, if `names` were - ``('x', 'y', 'z')`` and `titles` is - ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then - ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. - byteorder : {'<', '>', '='}, optional - Byte-order for all fields. - aligned : bool, optional - Align the fields in memory as the C-compiler would. - strides : tuple of ints, optional - Buffer (`buf`) is interpreted according to these strides (strides - define how many bytes each array element, row, column, etc. - occupy in memory). - offset : int, optional - Start reading buffer (`buf`) from this offset onwards. - order : {'C', 'F'}, optional - Row-major or column-major order. - - Returns - ------- - rec : recarray - Empty array of the given shape and type. - - See Also - -------- - rec.fromrecords : Construct a record array from data. - record : fundamental data-type for `recarray`. - format_parser : determine a data-type from formats, names, titles. - - Notes - ----- - This constructor can be compared to ``empty``: it creates a new record - array but does not fill it with data. To create a record array from data, - use one of the following methods: - - 1. Create a standard ndarray and convert it to a record array, - using ``arr.view(np.recarray)`` - 2. Use the `buf` keyword. - 3. Use `np.rec.fromrecords`. - - Examples - -------- - Create an array with two fields, ``x`` and ``y``: - - >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]) - >>> x - array([(1.0, 2), (3.0, 4)], - dtype=[('x', '>> x['x'] - array([ 1., 3.]) - - View the array as a record array: - - >>> x = x.view(np.recarray) - - >>> x.x - array([ 1., 3.]) - - >>> x.y - array([2, 4]) - - Create a new, empty record array: - - >>> np.recarray((2,), - ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP - rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), - (3471280, 1.2134086255804012e-316, 0)], - dtype=[('x', '>> x1=np.array([1,2,3,4]) - >>> x2=np.array(['a','dd','xyz','12']) - >>> x3=np.array([1.1,2,3,4]) - >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') - >>> print r[1] - (2, 'dd', 2.0) - >>> x1[1]=34 - >>> r.a - array([1, 2, 3, 4]) - """ - - arrayList = [sb.asarray(x) for x in arrayList] - - if shape is None or shape == 0: - shape = arrayList[0].shape - - if isinstance(shape, int): - shape = (shape,) - - if formats is None and dtype is None: - # go through each object in the list to see if it is an ndarray - # and determine the formats. - formats = '' - for obj in arrayList: - if not isinstance(obj, ndarray): - raise ValueError, "item in the array list must be an ndarray." - formats += _typestr[obj.dtype.type] - if issubclass(obj.dtype.type, nt.flexible): - formats += `obj.itemsize` - formats += ',' - formats = formats[:-1] - - if dtype is not None: - descr = sb.dtype(dtype) - _names = descr.names - else: - parsed = format_parser(formats, names, titles, aligned, byteorder) - _names = parsed._names - descr = parsed._descr - - # Determine shape from data-type. - if len(descr) != len(arrayList): - raise ValueError, "mismatch between the number of fields "\ - "and the number of arrays" - - d0 = descr[0].shape - nn = len(d0) - if nn > 0: - shape = shape[:-nn] - - for k, obj in enumerate(arrayList): - nn = len(descr[k].shape) - testshape = obj.shape[:len(obj.shape) - nn] - if testshape != shape: - raise ValueError, "array-shape mismatch in array %d" % k - - _array = recarray(shape, descr) - - # populate the record array (makes a copy) - for i in range(len(arrayList)): - _array[_names[i]] = arrayList[i] - - return _array - -# shape must be 1-d if you use list of lists... -def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None): - """ create a recarray from a list of records in text form - - The data in the same field can be heterogeneous, they will be promoted - to the highest data type. This method is intended for creating - smaller record arrays. If used to create large array without formats - defined - - r=fromrecords([(2,3.,'abc')]*100000) - - it can be slow. - - If formats is None, then this will auto-detect formats. Use list of - tuples rather than list of lists for faster processing. - - >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], - ... names='col1,col2,col3') - >>> print r[0] - (456, 'dbe', 1.2) - >>> r.col1 - array([456, 2]) - >>> r.col2 - chararray(['dbe', 'de'], - dtype='|S3') - >>> import cPickle - >>> print cPickle.loads(cPickle.dumps(r)) - [(456, 'dbe', 1.2) (2, 'de', 1.3)] - """ - - nfields = len(recList[0]) - if formats is None and dtype is None: # slower - obj = sb.array(recList, dtype=object) - arrlist = [sb.array(obj[..., i].tolist()) for i in xrange(nfields)] - return fromarrays(arrlist, formats=formats, shape=shape, names=names, - titles=titles, aligned=aligned, byteorder=byteorder) - - if dtype is not None: - descr = sb.dtype((record, dtype)) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - try: - retval = sb.array(recList, dtype=descr) - except TypeError: # list of lists instead of list of tuples - if (shape is None or shape == 0): - shape = len(recList) - if isinstance(shape, (int, long)): - shape = (shape,) - if len(shape) > 1: - raise ValueError, "Can only deal with 1-d array." - _array = recarray(shape, descr) - for k in xrange(_array.size): - _array[k] = tuple(recList[k]) - return _array - else: - if shape is not None and retval.shape != shape: - retval.shape = shape - - res = retval.view(recarray) - - return res - - -def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """ create a (read-only) record array from binary data contained in - a string""" - - - if dtype is None and formats is None: - raise ValueError, "Must have dtype= or formats=" - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - if (shape is None or shape == 0 or shape == -1): - shape = (len(datastring) - offset) / itemsize - - _array = recarray(shape, descr, buf=datastring, offset=offset) - return _array - -def get_remaining_size(fd): - try: - fn = fd.fileno() - except AttributeError: - return os.path.getsize(fd.name) - fd.tell() - st = os.fstat(fn) - size = st.st_size - fd.tell() - return size - -def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """Create an array from binary file data - - If file is a string then that file is opened, else it is assumed - to be a file object. - - >>> from tempfile import TemporaryFile - >>> a = np.empty(10,dtype='f8,i4,a5') - >>> a[5] = (0.5,10,'abcde') - >>> - >>> fd=TemporaryFile() - >>> a = a.newbyteorder('<') - >>> a.tofile(fd) - >>> - >>> fd.seek(0) - >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, - ... byteorder='<') - >>> print r[5] - (0.5, 10, 'abcde') - >>> r.shape - (10,) - """ - - if (shape is None or shape == 0): - shape = (-1,) - elif isinstance(shape, (int, long)): - shape = (shape,) - - name = 0 - if isinstance(fd, str): - name = 1 - fd = open(fd, 'rb') - if (offset > 0): - fd.seek(offset, 1) - size = get_remaining_size(fd) - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - - shapeprod = sb.array(shape).prod() - shapesize = shapeprod * itemsize - if shapesize < 0: - shape = list(shape) - shape[ shape.index(-1) ] = size / -shapesize - shape = tuple(shape) - shapeprod = sb.array(shape).prod() - - nbytes = shapeprod * itemsize - - if nbytes > size: - raise ValueError( - "Not enough bytes left in file for specified shape and type") - - # create the array - _array = recarray(shape, descr) - nbytesread = fd.readinto(_array.data) - if nbytesread != nbytes: - raise IOError("Didn't read as many bytes as expected") - if name: - fd.close() - - return _array - -def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None, copy=True): - """Construct a record array from a wide-variety of objects. - """ - - if (isinstance(obj, (type(None), str)) or isfileobj(obj)) \ - and (formats is None) \ - and (dtype is None): - raise ValueError("Must define formats (or dtype) if object is "\ - "None, string, or an open file") - - kwds = {} - if dtype is not None: - dtype = sb.dtype(dtype) - elif formats is not None: - dtype = format_parser(formats, names, titles, - aligned, byteorder)._descr - else: - kwds = {'formats': formats, - 'names' : names, - 'titles' : titles, - 'aligned' : aligned, - 'byteorder' : byteorder - } - - if obj is None: - if shape is None: - raise ValueError("Must define a shape if obj is None") - return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) - - elif isinstance(obj, bytes): - return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) - - elif isinstance(obj, (list, tuple)): - if isinstance(obj[0], (tuple, list)): - return fromrecords(obj, dtype=dtype, shape=shape, **kwds) - else: - return fromarrays(obj, dtype=dtype, shape=shape, **kwds) - - elif isinstance(obj, recarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - return new - - elif isfileobj(obj): - return fromfile(obj, dtype=dtype, shape=shape, offset=offset) - - elif isinstance(obj, ndarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - res = new.view(recarray) - if issubclass(res.dtype.type, nt.void): - res.dtype = sb.dtype((record, res.dtype)) - return res - - else: - interface = getattr(obj, "__array_interface__", None) - if interface is None or not isinstance(interface, dict): - raise ValueError("Unknown input type") - obj = sb.array(obj) - if dtype is not None and (obj.dtype != dtype): - obj = obj.view(dtype) - res = obj.view(recarray) - if issubclass(res.dtype.type, nt.void): - res.dtype = sb.dtype((record, res.dtype)) - return res diff --git a/numpy-1.6.2/numpy/core/scons_support.py b/numpy-1.6.2/numpy/core/scons_support.py deleted file mode 100644 index 048f85db6d..0000000000 --- a/numpy-1.6.2/numpy/core/scons_support.py +++ /dev/null @@ -1,272 +0,0 @@ -#! Last Change: Sun Apr 26 05:00 PM 2009 J - -"""Code to support special facilities to scons which are only useful for -numpy.core, hence not put into numpy.distutils.scons""" - -import sys -import os - -from os.path import join as pjoin, dirname as pdirname, basename as pbasename -from copy import deepcopy - -import code_generators -from code_generators.generate_numpy_api import \ - do_generate_api as nowrap_do_generate_numpy_api -from code_generators.generate_ufunc_api import \ - do_generate_api as nowrap_do_generate_ufunc_api -from setup_common import check_api_version as _check_api_version -from setup_common import \ - LONG_DOUBLE_REPRESENTATION_SRC, pyod, long_double_representation - -from numscons.numdist import process_c_str as process_str - -import SCons.Node -import SCons -from SCons.Builder import Builder -from SCons.Action import Action - -def check_api_version(apiversion): - return _check_api_version(apiversion, pdirname(code_generators.__file__)) - -def split_ext(string): - sp = string.rsplit( '.', 1) - if len(sp) == 1: - return (sp[0], '') - else: - return sp -#------------------------------------ -# Ufunc and multiarray API generators -#------------------------------------ -def do_generate_numpy_api(target, source, env): - nowrap_do_generate_numpy_api([str(i) for i in target], - [s.value for s in source]) - return 0 - -def do_generate_ufunc_api(target, source, env): - nowrap_do_generate_ufunc_api([str(i) for i in target], - [s.value for s in source]) - return 0 - -def generate_api_emitter(target, source, env): - """Returns the list of targets generated by the code generator for array - api and ufunc api.""" - base, ext = split_ext(str(target[0])) - dir = pdirname(base) - ba = pbasename(base) - h = pjoin(dir, '__' + ba + '.h') - c = pjoin(dir, '__' + ba + '.c') - txt = base + '.txt' - #print h, c, txt - t = [h, c, txt] - return (t, source) - -#------------------------- -# From template generators -#------------------------- -# XXX: this is general and can be used outside numpy.core. -def do_generate_from_template(targetfile, sourcefile, env): - t = open(targetfile, 'w') - s = open(sourcefile, 'r') - allstr = s.read() - s.close() - writestr = process_str(allstr) - t.write(writestr) - t.close() - return 0 - -def generate_from_template(target, source, env): - for t, s in zip(target, source): - do_generate_from_template(str(t), str(s), env) - -def generate_from_template_emitter(target, source, env): - base, ext = split_ext(pbasename(str(source[0]))) - t = pjoin(pdirname(str(target[0])), base) - return ([t], source) - -#---------------- -# umath generator -#---------------- -def do_generate_umath(targetfile, sourcefile, env): - t = open(targetfile, 'w') - from code_generators import generate_umath - code = generate_umath.make_code(generate_umath.defdict, generate_umath.__file__) - t.write(code) - t.close() - -def generate_umath(target, source, env): - for t, s in zip(target, source): - do_generate_umath(str(t), str(s), env) - -def generate_umath_emitter(target, source, env): - t = str(target[0]) + '.c' - return ([t], source) - -#----------------------------------------- -# Other functions related to configuration -#----------------------------------------- -def CheckGCC4(context): - src = """ -int -main() -{ -#if !(defined __GNUC__ && (__GNUC__ >= 4)) -die from an horrible death -#endif -} -""" - - context.Message("Checking if compiled with gcc 4.x or above ... ") - st = context.TryCompile(src, '.c') - - if st: - context.Result(' yes') - else: - context.Result(' no') - return st == 1 - -def CheckBrokenMathlib(context, mathlib): - src = """ -/* check whether libm is broken */ -#include -int main(int argc, char *argv[]) -{ - return exp(-720.) > 1.0; /* typically an IEEE denormal */ -} -""" - - try: - oldLIBS = deepcopy(context.env['LIBS']) - except: - oldLIBS = [] - - try: - context.Message("Checking if math lib %s is usable for numpy ... " % mathlib) - context.env.AppendUnique(LIBS = mathlib) - st = context.TryRun(src, '.c') - finally: - context.env['LIBS'] = oldLIBS - - if st[0]: - context.Result(' Yes !') - else: - context.Result(' No !') - return st[0] - -def check_mlib(config, mlib): - """Return 1 if mlib is available and usable by numpy, 0 otherwise. - - mlib can be a string (one library), or a list of libraries.""" - # Check the libraries in mlib are linkable - if len(mlib) > 0: - # XXX: put an autoadd argument to 0 here and add an autoadd argument to - # CheckBroekenMathlib (otherwise we may add bogus libraries, the ones - # which do not path the CheckBrokenMathlib test). - st = config.CheckLib(mlib) - if not st: - return 0 - # Check the mlib is usable by numpy - return config.CheckBrokenMathlib(mlib) - -def check_mlibs(config, mlibs): - for mlib in mlibs: - if check_mlib(config, mlib): - return mlib - - # No mlib was found. - raise SCons.Errors.UserError("No usable mathlib was found: chose another "\ - "one using the MATHLIB env variable, eg "\ - "'MATHLIB=m python setup.py build'") - - -def is_npy_no_signal(): - """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration - header.""" - return sys.platform == 'win32' - -def define_no_smp(): - """Returns True if we should define NPY_NOSMP, False otherwise.""" - #-------------------------------- - # Checking SMP and thread options - #-------------------------------- - # Python 2.3 causes a segfault when - # trying to re-acquire the thread-state - # which is done in error-handling - # ufunc code. NPY_ALLOW_C_API and friends - # cause the segfault. So, we disable threading - # for now. - if sys.version[:5] < '2.4.2': - nosmp = 1 - else: - # Perhaps a fancier check is in order here. - # so that threads are only enabled if there - # are actually multiple CPUS? -- but - # threaded code can be nice even on a single - # CPU so that long-calculating code doesn't - # block. - try: - nosmp = os.environ['NPY_NOSMP'] - nosmp = 1 - except KeyError: - nosmp = 0 - return nosmp == 1 - -# Inline check -def CheckInline(context): - context.Message("Checking for inline keyword... ") - body = """ -#ifndef __cplusplus -static %(inline)s int static_func (void) -{ - return 0; -} -%(inline)s int nostatic_func (void) -{ - return 0; -} -#endif""" - inline = None - for kw in ['inline', '__inline__', '__inline']: - st = context.TryCompile(body % {'inline': kw}, '.c') - if st: - inline = kw - break - - if inline: - context.Result(inline) - else: - context.Result(0) - return inline - -def CheckLongDoubleRepresentation(context): - msg = { - 'INTEL_EXTENDED_12_BYTES_LE': "Intel extended, little endian", - 'INTEL_EXTENDED_16_BYTES_LE': "Intel extended, little endian", - 'IEEE_QUAD_BE': "IEEE Quad precision, big endian", - 'IEEE_QUAD_LE': "IEEE Quad precision, little endian", - 'IEEE_DOUBLE_LE': "IEEE Double precision, little endian", - 'IEEE_DOUBLE_BE': "IEEE Double precision, big endian" - } - - context.Message("Checking for long double representation... ") - body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} - st = context.TryCompile(body, '.c') - if st: - obj = str(context.sconf.lastTarget) - tp = long_double_representation(pyod(obj)) - context.Result(msg[tp]) - return tp - if not st: - context.Result(0) - -array_api_gen_bld = Builder(action = Action(do_generate_numpy_api, '$ARRAYPIGENCOMSTR'), - emitter = generate_api_emitter) - - -ufunc_api_gen_bld = Builder(action = Action(do_generate_ufunc_api, '$UFUNCAPIGENCOMSTR'), - emitter = generate_api_emitter) - -template_bld = Builder(action = Action(generate_from_template, '$TEMPLATECOMSTR'), - emitter = generate_from_template_emitter) - -umath_bld = Builder(action = Action(generate_umath, '$UMATHCOMSTR'), - emitter = generate_umath_emitter) diff --git a/numpy-1.6.2/numpy/core/setup.py b/numpy-1.6.2/numpy/core/setup.py deleted file mode 100644 index 001d98dcce..0000000000 --- a/numpy-1.6.2/numpy/core/setup.py +++ /dev/null @@ -1,861 +0,0 @@ -import imp -import os -import sys -import shutil -from os.path import join -from numpy.distutils import log -from distutils.dep_util import newer -from distutils.sysconfig import get_config_var -import warnings -import re - -from setup_common import * - -# Set to True to enable multiple file compilations (experimental) -try: - os.environ['NPY_SEPARATE_COMPILATION'] - ENABLE_SEPARATE_COMPILATION = True -except KeyError: - ENABLE_SEPARATE_COMPILATION = False - -# XXX: ugly, we use a class to avoid calling twice some expensive functions in -# config.h/numpyconfig.h. I don't see a better way because distutils force -# config.h generation inside an Extension class, and as such sharing -# configuration informations between extensions is not easy. -# Using a pickled-based memoize does not work because config_cmd is an instance -# method, which cPickle does not like. -try: - import cPickle as _pik -except ImportError: - import pickle as _pik -import copy - -class CallOnceOnly(object): - def __init__(self): - self._check_types = None - self._check_ieee_macros = None - self._check_complex = None - - def check_types(self, *a, **kw): - if self._check_types is None: - out = check_types(*a, **kw) - self._check_types = _pik.dumps(out) - else: - out = copy.deepcopy(_pik.loads(self._check_types)) - return out - - def check_ieee_macros(self, *a, **kw): - if self._check_ieee_macros is None: - out = check_ieee_macros(*a, **kw) - self._check_ieee_macros = _pik.dumps(out) - else: - out = copy.deepcopy(_pik.loads(self._check_ieee_macros)) - return out - - def check_complex(self, *a, **kw): - if self._check_complex is None: - out = check_complex(*a, **kw) - self._check_complex = _pik.dumps(out) - else: - out = copy.deepcopy(_pik.loads(self._check_complex)) - return out - -PYTHON_HAS_UNICODE_WIDE = True - -def pythonlib_dir(): - """return path where libpython* is.""" - if sys.platform == 'win32': - return os.path.join(sys.prefix, "libs") - else: - return get_config_var('LIBDIR') - -def is_npy_no_signal(): - """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration - header.""" - return sys.platform == 'win32' - -def is_npy_no_smp(): - """Return True if the NPY_NO_SMP symbol must be defined in public - header (when SMP support cannot be reliably enabled).""" - # Python 2.3 causes a segfault when - # trying to re-acquire the thread-state - # which is done in error-handling - # ufunc code. NPY_ALLOW_C_API and friends - # cause the segfault. So, we disable threading - # for now. - if sys.version[:5] < '2.4.2': - nosmp = 1 - else: - # Perhaps a fancier check is in order here. - # so that threads are only enabled if there - # are actually multiple CPUS? -- but - # threaded code can be nice even on a single - # CPU so that long-calculating code doesn't - # block. - try: - nosmp = os.environ['NPY_NOSMP'] - nosmp = 1 - except KeyError: - nosmp = 0 - return nosmp == 1 - -def win32_checks(deflist): - from numpy.distutils.misc_util import get_build_architecture - a = get_build_architecture() - - # Distutils hack on AMD64 on windows - print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % \ - (a, os.name, sys.platform)) - if a == 'AMD64': - deflist.append('DISTUTILS_USE_SDK') - - # On win32, force long double format string to be 'g', not - # 'Lg', since the MS runtime does not support long double whose - # size is > sizeof(double) - if a == "Intel" or a == "AMD64": - deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') - -def check_math_capabilities(config, moredefs, mathlibs): - def check_func(func_name): - return config.check_func(func_name, libraries=mathlibs, - decl=True, call=True) - - def check_funcs_once(funcs_name): - decl = dict([(f, True) for f in funcs_name]) - st = config.check_funcs_once(funcs_name, libraries=mathlibs, - decl=decl, call=decl) - if st: - moredefs.extend([fname2def(f) for f in funcs_name]) - return st - - def check_funcs(funcs_name): - # Use check_funcs_once first, and if it does not work, test func per - # func. Return success only if all the functions are available - if not check_funcs_once(funcs_name): - # Global check failed, check func per func - for f in funcs_name: - if check_func(f): - moredefs.append(fname2def(f)) - return 0 - else: - return 1 - - #use_msvc = config.check_decl("_MSC_VER") - - if not check_funcs_once(MANDATORY_FUNCS): - raise SystemError("One of the required function to build numpy is not" - " available (the list is %s)." % str(MANDATORY_FUNCS)) - - # Standard functions which may not be available and for which we have a - # replacement implementation. Note that some of these are C99 functions. - - # XXX: hack to circumvent cpp pollution from python: python put its - # config.h in the public namespace, so we have a clash for the common - # functions we test. We remove every function tested by python's - # autoconf, hoping their own test are correct - if sys.version_info[:2] >= (2, 5): - for f in OPTIONAL_STDFUNCS_MAYBE: - if config.check_decl(fname2def(f), - headers=["Python.h", "math.h"]): - OPTIONAL_STDFUNCS.remove(f) - - check_funcs(OPTIONAL_STDFUNCS) - - # C99 functions: float and long double versions - check_funcs(C99_FUNCS_SINGLE) - check_funcs(C99_FUNCS_EXTENDED) - -def check_complex(config, mathlibs): - priv = [] - pub = [] - - try: - if os.uname()[0] == "Interix": - warnings.warn("Disabling broken complex support. See #1365") - return priv, pub - except: - # os.uname not available on all platforms. blanket except ugly but safe - pass - - # Check for complex support - st = config.check_header('complex.h') - if st: - priv.append('HAVE_COMPLEX_H') - pub.append('NPY_USE_C99_COMPLEX') - - for t in C99_COMPLEX_TYPES: - st = config.check_type(t, headers=["complex.h"]) - if st: - pub.append(('NPY_HAVE_%s' % type2def(t), 1)) - - def check_prec(prec): - flist = [f + prec for f in C99_COMPLEX_FUNCS] - decl = dict([(f, True) for f in flist]) - if not config.check_funcs_once(flist, call=decl, decl=decl, - libraries=mathlibs): - for f in flist: - if config.check_func(f, call=True, decl=True, - libraries=mathlibs): - priv.append(fname2def(f)) - else: - priv.extend([fname2def(f) for f in flist]) - - check_prec('') - check_prec('f') - check_prec('l') - - return priv, pub - -def check_ieee_macros(config): - priv = [] - pub = [] - - macros = [] - - def _add_decl(f): - priv.append(fname2def("decl_%s" % f)) - pub.append('NPY_%s' % fname2def("decl_%s" % f)) - - # XXX: hack to circumvent cpp pollution from python: python put its - # config.h in the public namespace, so we have a clash for the common - # functions we test. We remove every function tested by python's - # autoconf, hoping their own test are correct - _macros = ["isnan", "isinf", "signbit", "isfinite"] - if sys.version_info[:2] >= (2, 6): - for f in _macros: - py_symbol = fname2def("decl_%s" % f) - already_declared = config.check_decl(py_symbol, - headers=["Python.h", "math.h"]) - if already_declared: - if config.check_macro_true(py_symbol, - headers=["Python.h", "math.h"]): - pub.append('NPY_%s' % fname2def("decl_%s" % f)) - else: - macros.append(f) - else: - macros = _macros[:] - # Normally, isnan and isinf are macro (C99), but some platforms only have - # func, or both func and macro version. Check for macro only, and define - # replacement ones if not found. - # Note: including Python.h is necessary because it modifies some math.h - # definitions - for f in macros: - st = config.check_decl(f, headers = ["Python.h", "math.h"]) - if st: - _add_decl(f) - - return priv, pub - -def check_types(config_cmd, ext, build_dir): - private_defines = [] - public_defines = [] - - # Expected size (in number of bytes) for each type. This is an - # optimization: those are only hints, and an exhaustive search for the size - # is done if the hints are wrong. - expected = {} - expected['short'] = [2] - expected['int'] = [4] - expected['long'] = [8, 4] - expected['float'] = [4] - expected['double'] = [8] - expected['long double'] = [8, 12, 16] - expected['Py_intptr_t'] = [4, 8] - expected['PY_LONG_LONG'] = [8] - expected['long long'] = [8] - - # Check we have the python header (-dev* packages on Linux) - result = config_cmd.check_header('Python.h') - if not result: - raise SystemError( - "Cannot compile 'Python.h'. Perhaps you need to "\ - "install python-dev|python-devel.") - res = config_cmd.check_header("endian.h") - if res: - private_defines.append(('HAVE_ENDIAN_H', 1)) - public_defines.append(('NPY_HAVE_ENDIAN_H', 1)) - - # Check basic types sizes - for type in ('short', 'int', 'long'): - res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"]) - if res: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type))) - else: - res = config_cmd.check_type_size(type, expected=expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - for type in ('float', 'double', 'long double'): - already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), - headers = ["Python.h"]) - res = config_cmd.check_type_size(type, expected=expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - if not already_declared and not type == 'long double': - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - # Compute size of corresponding complex type: used to check that our - # definition is binary compatible with C99 complex type (check done at - # build time in npy_common.h) - complex_def = "struct {%s __x; %s __y;}" % (type, type) - res = config_cmd.check_type_size(complex_def, expected=2*expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % complex_def) - - - for type in ('Py_intptr_t',): - res = config_cmd.check_type_size(type, headers=["Python.h"], - library_dirs=[pythonlib_dir()], - expected=expected[type]) - - if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - # We check declaration AND type because that's how distutils does it. - if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): - res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], - library_dirs=[pythonlib_dir()], - expected=expected['PY_LONG_LONG']) - if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') - - res = config_cmd.check_type_size('long long', - expected=expected['long long']) - if res >= 0: - #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % 'long long') - - if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): - raise RuntimeError( - "Config wo CHAR_BIT is not supported"\ - ", please contact the maintainers") - - return private_defines, public_defines - -def check_mathlib(config_cmd): - # Testing the C math library - mathlibs = [] - mathlibs_choices = [[],['m'],['cpml']] - mathlib = os.environ.get('MATHLIB') - if mathlib: - mathlibs_choices.insert(0,mathlib.split(',')) - for libs in mathlibs_choices: - if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): - mathlibs = libs - break - else: - raise EnvironmentError("math library missing; rerun " - "setup.py after setting the " - "MATHLIB env variable") - return mathlibs - -def visibility_define(config): - """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty - string).""" - if config.check_compiler_gcc4(): - return '__attribute__((visibility("hidden")))' - else: - return '' - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration,dot_join - from numpy.distutils.system_info import get_info, default_lib_dirs - - config = Configuration('core',parent_package,top_path) - local_dir = config.local_path - codegen_dir = join(local_dir,'code_generators') - - if is_released(config): - warnings.simplefilter('error', MismatchCAPIWarning) - - # Check whether we have a mismatch between the set C API VERSION and the - # actual C API VERSION - check_api_version(C_API_VERSION, codegen_dir) - - generate_umath_py = join(codegen_dir,'generate_umath.py') - n = dot_join(config.name,'generate_umath') - generate_umath = imp.load_module('_'.join(n.split('.')), - open(generate_umath_py,'U'),generate_umath_py, - ('.py','U',1)) - - header_dir = 'include/numpy' # this is relative to config.path_in_package - - cocache = CallOnceOnly() - - def generate_config_h(ext, build_dir): - target = join(build_dir,header_dir,'config.h') - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - - if newer(__file__,target): - config_cmd = config.get_config_cmd() - log.info('Generating %s',target) - - # Check sizeof - moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) - - # Check math library and C99 math funcs availability - mathlibs = check_mathlib(config_cmd) - moredefs.append(('MATHLIB',','.join(mathlibs))) - - check_math_capabilities(config_cmd, moredefs, mathlibs) - moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) - moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) - - # Signal check - if is_npy_no_signal(): - moredefs.append('__NPY_PRIVATE_NO_SIGNAL') - - # Windows checks - if sys.platform=='win32' or os.name=='nt': - win32_checks(moredefs) - - # Inline check - inline = config_cmd.check_inline() - - # Check whether we need our own wide character support - if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']): - PYTHON_HAS_UNICODE_WIDE = True - else: - PYTHON_HAS_UNICODE_WIDE = False - - if ENABLE_SEPARATE_COMPILATION: - moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1)) - - # Get long double representation - if sys.platform != 'darwin': - rep = check_long_double_representation(config_cmd) - if rep in ['INTEL_EXTENDED_12_BYTES_LE', - 'INTEL_EXTENDED_16_BYTES_LE', - 'IEEE_QUAD_LE', 'IEEE_QUAD_BE', - 'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE', - 'DOUBLE_DOUBLE_BE']: - moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) - else: - raise ValueError("Unrecognized long double format: %s" % rep) - - # Py3K check - if sys.version_info[0] == 3: - moredefs.append(('NPY_PY3K', 1)) - - # Generate the config.h file from moredefs - target_f = open(target, 'w') - for d in moredefs: - if isinstance(d,str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0],d[1])) - - # define inline to our keyword, or nothing - target_f.write('#ifndef __cplusplus\n') - if inline == 'inline': - target_f.write('/* #undef inline */\n') - else: - target_f.write('#define inline %s\n' % inline) - target_f.write('#endif\n') - - # add the guard to make sure config.h is never included directly, - # but always through npy_config.h - target_f.write(""" -#ifndef _NPY_NPY_CONFIG_H_ -#error config.h should never be included directly, include npy_config.h instead -#endif -""") - - target_f.close() - print('File:',target) - target_f = open(target) - print(target_f.read()) - target_f.close() - print('EOF') - else: - mathlibs = [] - target_f = open(target) - for line in target_f.readlines(): - s = '#define MATHLIB' - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - target_f.close() - - # Ugly: this can be called within a library and not an extension, - # in which case there is no libraries attributes (and none is - # needed). - if hasattr(ext, 'libraries'): - ext.libraries.extend(mathlibs) - - incl_dir = os.path.dirname(target) - if incl_dir not in config.numpy_include_dirs: - config.numpy_include_dirs.append(incl_dir) - - return target - - def generate_numpyconfig_h(ext, build_dir): - """Depends on config.h: generate_config_h has to be called before !""" - target = join(build_dir,header_dir,'_numpyconfig.h') - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - if newer(__file__,target): - config_cmd = config.get_config_cmd() - log.info('Generating %s',target) - - # Check sizeof - ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) - - if is_npy_no_signal(): - moredefs.append(('NPY_NO_SIGNAL', 1)) - - if is_npy_no_smp(): - moredefs.append(('NPY_NO_SMP', 1)) - else: - moredefs.append(('NPY_NO_SMP', 0)) - - mathlibs = check_mathlib(config_cmd) - moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) - moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1]) - - if ENABLE_SEPARATE_COMPILATION: - moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1)) - - # Check wether we can use inttypes (C99) formats - if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']): - moredefs.append(('NPY_USE_C99_FORMATS', 1)) - - # visibility check - hidden_visibility = visibility_define(config_cmd) - moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility)) - - # Add the C API/ABI versions - moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION)) - moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION)) - - # Add moredefs to header - target_f = open(target, 'w') - for d in moredefs: - if isinstance(d,str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0],d[1])) - - # Define __STDC_FORMAT_MACROS - target_f.write(""" -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS 1 -#endif -""") - target_f.close() - - # Dump the numpyconfig.h header to stdout - print('File: %s' % target) - target_f = open(target) - print(target_f.read()) - target_f.close() - print('EOF') - config.add_data_files((header_dir, target)) - return target - - def generate_api_func(module_name): - def generate_api(ext, build_dir): - script = join(codegen_dir, module_name + '.py') - sys.path.insert(0, codegen_dir) - try: - m = __import__(module_name) - log.info('executing %s', script) - h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir)) - finally: - del sys.path[0] - config.add_data_files((header_dir, h_file), - (header_dir, doc_file)) - return (h_file,) - return generate_api - - generate_numpy_api = generate_api_func('generate_numpy_api') - generate_ufunc_api = generate_api_func('generate_ufunc_api') - - config.add_include_dirs(join(local_dir, "src", "private")) - config.add_include_dirs(join(local_dir, "src")) - config.add_include_dirs(join(local_dir)) - # Multiarray version: this function is needed to build foo.c from foo.c.src - # when foo.c is included in another file and as such not in the src - # argument of build_ext command - def generate_multiarray_templated_sources(ext, build_dir): - from numpy.distutils.misc_util import get_cmd - - subpath = join('src', 'multiarray') - sources = [join(local_dir, subpath, 'scalartypes.c.src'), - join(local_dir, subpath, 'arraytypes.c.src'), - join(local_dir, subpath, 'nditer.c.src'), - join(local_dir, subpath, 'lowlevel_strided_loops.c.src'), - join(local_dir, subpath, 'einsum.c.src')] - - # numpy.distutils generate .c from .c.src in weird directories, we have - # to add them there as they depend on the build_dir - config.add_include_dirs(join(build_dir, subpath)) - - cmd = get_cmd('build_src') - cmd.ensure_finalized() - - cmd.template_sources(sources, ext) - - # umath version: this function is needed to build foo.c from foo.c.src - # when foo.c is included in another file and as such not in the src - # argument of build_ext command - def generate_umath_templated_sources(ext, build_dir): - from numpy.distutils.misc_util import get_cmd - - subpath = join('src', 'umath') - # NOTE: For manual template conversion of loops.h.src, read the note - # in that file. - sources = [join(local_dir, subpath, 'loops.c.src'), - join(local_dir, subpath, 'umathmodule.c.src')] - - # numpy.distutils generate .c from .c.src in weird directories, we have - # to add them there as they depend on the build_dir - config.add_include_dirs(join(build_dir, subpath)) - - cmd = get_cmd('build_src') - cmd.ensure_finalized() - - cmd.template_sources(sources, ext) - - - def generate_umath_c(ext,build_dir): - target = join(build_dir,header_dir,'__umath_generated.c') - dir = os.path.dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) - script = generate_umath_py - if newer(script,target): - f = open(target,'w') - f.write(generate_umath.make_code(generate_umath.defdict, - generate_umath.__file__)) - f.close() - return [] - - config.add_data_files('include/numpy/*.h') - config.add_include_dirs(join('src', 'npymath')) - config.add_include_dirs(join('src', 'multiarray')) - config.add_include_dirs(join('src', 'umath')) - - config.numpy_include_dirs.extend(config.paths('include')) - - deps = [join('src','npymath','_signbit.c'), - join('include','numpy','*object.h'), - 'include/numpy/fenv/fenv.c', - 'include/numpy/fenv/fenv.h', - join(codegen_dir,'genapi.py'), - ] - - # Don't install fenv unless we need them. - if sys.platform == 'cygwin': - config.add_data_dir('include/numpy/fenv') - - config.add_extension('_sort', - sources=[join('src','_sortmodule.c.src'), - generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - ], - libraries=['npymath']) - - # npymath needs the config.h and numpyconfig.h files to be generated, but - # build_clib cannot handle generate_config_h and generate_numpyconfig_h - # (don't ask). Because clib are generated before extensions, we have to - # explicitly add an extension which has generate_config_h and - # generate_numpyconfig_h as sources *before* adding npymath. - - subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")]) - def get_mathlib_info(*args): - # Another ugly hack: the mathlib info is known once build_src is run, - # but we cannot use add_installed_pkg_config here either, so we only - # updated the substition dictionary during npymath build - config_cmd = config.get_config_cmd() - - # Check that the toolchain works, to fail early if it doesn't - # (avoid late errors with MATHLIB which are confusing if the - # compiler does not work). - st = config_cmd.try_link('int main(void) { return 0;}') - if not st: - raise RuntimeError("Broken toolchain: cannot link a simple C program") - mlibs = check_mathlib(config_cmd) - - posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) - msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs]) - subst_dict["posix_mathlib"] = posix_mlib - subst_dict["msvc_mathlib"] = msvc_mlib - - config.add_installed_library('npymath', - sources=[join('src', 'npymath', 'npy_math.c.src'), - join('src', 'npymath', 'ieee754.c.src'), - join('src', 'npymath', 'npy_math_complex.c.src'), - join('src', 'npymath', 'halffloat.c'), - get_mathlib_info], - install_dir='lib') - config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", - subst_dict) - config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", - subst_dict) - - multiarray_deps = [ - join('src', 'multiarray', 'arrayobject.h'), - join('src', 'multiarray', 'arraytypes.h'), - join('src', 'multiarray', 'buffer.h'), - join('src', 'multiarray', 'calculation.h'), - join('src', 'multiarray', 'common.h'), - join('src', 'multiarray', 'convert_datatype.h'), - join('src', 'multiarray', 'convert.h'), - join('src', 'multiarray', 'conversion_utils.h'), - join('src', 'multiarray', 'ctors.h'), - join('src', 'multiarray', 'descriptor.h'), - join('src', 'multiarray', 'getset.h'), - join('src', 'multiarray', 'hashdescr.h'), - join('src', 'multiarray', 'iterators.h'), - join('src', 'multiarray', 'mapping.h'), - join('src', 'multiarray', 'methods.h'), - join('src', 'multiarray', 'multiarraymodule.h'), - join('src', 'multiarray', 'numpymemoryview.h'), - join('src', 'multiarray', 'number.h'), - join('src', 'multiarray', 'numpyos.h'), - join('src', 'multiarray', 'refcount.h'), - join('src', 'multiarray', 'scalartypes.h'), - join('src', 'multiarray', 'sequence.h'), - join('src', 'multiarray', 'shape.h'), - join('src', 'multiarray', 'ucsnarrow.h'), - join('src', 'multiarray', 'usertypes.h'), - join('src', 'private', 'lowlevel_strided_loops.h')] - - multiarray_src = [join('src', 'multiarray', 'multiarraymodule.c'), - join('src', 'multiarray', 'hashdescr.c'), - join('src', 'multiarray', 'arrayobject.c'), - join('src', 'multiarray', 'numpymemoryview.c'), - join('src', 'multiarray', 'buffer.c'), - join('src', 'multiarray', 'datetime.c'), - join('src', 'multiarray', 'numpyos.c'), - join('src', 'multiarray', 'conversion_utils.c'), - join('src', 'multiarray', 'flagsobject.c'), - join('src', 'multiarray', 'descriptor.c'), - join('src', 'multiarray', 'iterators.c'), - join('src', 'multiarray', 'mapping.c'), - join('src', 'multiarray', 'number.c'), - join('src', 'multiarray', 'getset.c'), - join('src', 'multiarray', 'sequence.c'), - join('src', 'multiarray', 'methods.c'), - join('src', 'multiarray', 'ctors.c'), - join('src', 'multiarray', 'convert_datatype.c'), - join('src', 'multiarray', 'convert.c'), - join('src', 'multiarray', 'shape.c'), - join('src', 'multiarray', 'item_selection.c'), - join('src', 'multiarray', 'calculation.c'), - join('src', 'multiarray', 'common.c'), - join('src', 'multiarray', 'usertypes.c'), - join('src', 'multiarray', 'scalarapi.c'), - join('src', 'multiarray', 'refcount.c'), - join('src', 'multiarray', 'arraytypes.c.src'), - join('src', 'multiarray', 'scalartypes.c.src'), - join('src', 'multiarray', 'nditer.c.src'), - join('src', 'multiarray', 'lowlevel_strided_loops.c.src'), - join('src', 'multiarray', 'dtype_transfer.c'), - join('src', 'multiarray', 'nditer_pywrap.c'), - join('src', 'multiarray', 'einsum.c.src'), - join('src', 'multiarray', 'ucsnarrow.c')] - - - umath_src = [join('src', 'umath', 'umathmodule.c.src'), - join('src', 'umath', 'funcs.inc.src'), - join('src', 'umath', 'loops.c.src'), - join('src', 'umath', 'ufunc_object.c')] - - umath_deps = [generate_umath_py, - join(codegen_dir,'generate_ufunc_api.py')] - - if not ENABLE_SEPARATE_COMPILATION: - multiarray_deps.extend(multiarray_src) - multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')] - multiarray_src.append(generate_multiarray_templated_sources) - - umath_deps.extend(umath_src) - umath_src = [join('src', 'umath', 'umathmodule_onefile.c')] - umath_src.append(generate_umath_templated_sources) - umath_src.append(join('src', 'umath', 'funcs.inc.src')) - - config.add_extension('multiarray', - sources = multiarray_src + - [generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - join(codegen_dir,'generate_numpy_api.py'), - join('*.py')], - depends = deps + multiarray_deps, - libraries=['npymath']) - - config.add_extension('umath', - sources = [generate_config_h, - generate_numpyconfig_h, - generate_umath_c, - generate_ufunc_api, - ] + umath_src, - depends = deps + umath_deps, - libraries=['npymath'], - ) - - config.add_extension('scalarmath', - sources=[join('src','scalarmathmodule.c.src'), - generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - generate_ufunc_api], - libraries=['npymath'], - ) - - # Configure blasdot - blas_info = get_info('blas_opt',0) - #blas_info = {} - def get_dotblas_sources(ext, build_dir): - if blas_info: - if ('NO_ATLAS_INFO',1) in blas_info.get('define_macros',[]): - return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient. - return ext.depends[:1] - return None # no extension module will be built - - config.add_extension('_dotblas', - sources = [get_dotblas_sources], - depends=[join('blasdot','_dotblas.c'), - join('blasdot','cblas.h'), - ], - include_dirs = ['blasdot'], - extra_info = blas_info - ) - - config.add_extension('umath_tests', - sources = [join('src','umath', 'umath_tests.c.src')]) - - config.add_extension('multiarray_tests', - sources = [join('src', 'multiarray', 'multiarray_tests.c.src')]) - - config.add_data_dir('tests') - config.add_data_dir('tests/data') - - config.make_svn_version_py() - - return config - -if __name__=='__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/core/setup_common.py b/numpy-1.6.2/numpy/core/setup_common.py deleted file mode 100644 index 33659b8fae..0000000000 --- a/numpy-1.6.2/numpy/core/setup_common.py +++ /dev/null @@ -1,271 +0,0 @@ -# Code shared by distutils and scons builds -import sys -from os.path import join -import warnings -import copy -import binascii - -from distutils.ccompiler import CompileError - -#------------------- -# Versioning support -#------------------- -# How to change C_API_VERSION ? -# - increase C_API_VERSION value -# - record the hash for the new C API with the script cversions.py -# and add the hash to cversions.txt -# The hash values are used to remind developers when the C API number was not -# updated - generates a MismatchCAPIWarning warning which is turned into an -# exception for released version. - -# Binary compatibility version number. This number is increased whenever the -# C-API is changed such that binary compatibility is broken, i.e. whenever a -# recompile of extension modules is needed. -C_ABI_VERSION = 0x01000009 - -# Minor API version. This number is increased whenever a change is made to the -# C-API -- whether it breaks binary compatibility or not. Some changes, such -# as adding a function pointer to the end of the function table, can be made -# without breaking binary compatibility. In this case, only the C_API_VERSION -# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is -# broken, both C_API_VERSION and C_ABI_VERSION should be increased. -C_API_VERSION = 0x00000006 - -class MismatchCAPIWarning(Warning): - pass - -def is_released(config): - """Return True if a released version of numpy is detected.""" - from distutils.version import LooseVersion - - v = config.get_version('../version.py') - if v is None: - raise ValueError("Could not get version") - pv = LooseVersion(vstring=v).version - if len(pv) > 3: - return False - return True - -def get_api_versions(apiversion, codegen_dir): - """Return current C API checksum and the recorded checksum for the given - version of the C API version.""" - api_files = [join(codegen_dir, 'numpy_api_order.txt'), - join(codegen_dir, 'ufunc_api_order.txt')] - - # Compute the hash of the current API as defined in the .txt files in - # code_generators - sys.path.insert(0, codegen_dir) - try: - m = __import__('genapi') - numpy_api = __import__('numpy_api') - curapi_hash = m.fullapi_hash(numpy_api.full_api) - apis_hash = m.get_versions_hash() - finally: - del sys.path[0] - - return curapi_hash, apis_hash[apiversion] - -def check_api_version(apiversion, codegen_dir): - """Emits a MismacthCAPIWarning if the C API version needs updating.""" - curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) - - # If different hash, it means that the api .txt files in - # codegen_dir have been updated without the API version being - # updated. Any modification in those .txt files should be reflected - # in the api and eventually abi versions. - # To compute the checksum of the current API, use - # code_generators/cversions.py script - if not curapi_hash == api_hash: - msg = "API mismatch detected, the C API version " \ - "numbers have to be updated. Current C api version is %d, " \ - "with checksum %s, but recorded checksum for C API version %d in " \ - "codegen_dir/cversions.txt is %s. If functions were added in the " \ - "C API, you have to update C_API_VERSION in %s." - warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, - __file__), - MismatchCAPIWarning) -# Mandatory functions: if not found, fail the build -MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", - "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", - "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] - -# Standard functions which may not be available and for which we have a -# replacement implementation. Note that some of these are C99 functions. -OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", - "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", - "copysign", "nextafter"] - -# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h -OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot", - "copysign"] - -# C99 functions: float and long double versions -C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", - "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", - "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", - "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', - "exp2", "log2", "copysign", "nextafter"] - -C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] -C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] - -C99_COMPLEX_TYPES = ['complex double', 'complex float', 'complex long double'] - -C99_COMPLEX_FUNCS = ['creal', 'cimag', 'cabs', 'carg', 'cexp', 'csqrt', 'clog', - 'ccos', 'csin', 'cpow'] - -def fname2def(name): - return "HAVE_%s" % name.upper() - -def sym2def(symbol): - define = symbol.replace(' ', '') - return define.upper() - -def type2def(symbol): - define = symbol.replace(' ', '_') - return define.upper() - -# Code to detect long double representation taken from MPFR m4 macro -def check_long_double_representation(cmd): - cmd._check_compiler() - body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} - - # We need to use _compile because we need the object filename - src, object = cmd._compile(body, None, None, 'c') - try: - type = long_double_representation(pyod(object)) - return type - finally: - cmd._clean() - -LONG_DOUBLE_REPRESENTATION_SRC = r""" -/* "before" is 16 bytes to ensure there's no padding between it and "x". - * We're not expecting any "long double" bigger than 16 bytes or with - * alignment requirements stricter than 16 bytes. */ -typedef %(type)s test_type; - -struct { - char before[16]; - test_type x; - char after[8]; -} foo = { - { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, - -123456789.0, - { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } -}; -""" - -def pyod(filename): - """Python implementation of the od UNIX utility (od -b, more exactly). - - Parameters - ---------- - filename: str - name of the file to get the dump from. - - Returns - ------- - out: seq - list of lines of od output - Note - ---- - We only implement enough to get the necessary information for long double - representation, this is not intended as a compatible replacement for od. - """ - def _pyod2(): - out = [] - - fid = open(filename, 'rb') - try: - yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] - for i in range(0, len(yo), 16): - line = ['%07d' % int(oct(i))] - line.extend(['%03d' % c for c in yo[i:i+16]]) - out.append(" ".join(line)) - return out - finally: - fid.close() - - def _pyod3(): - out = [] - - fid = open(filename, 'rb') - try: - yo2 = [oct(o)[2:] for o in fid.read()] - for i in range(0, len(yo2), 16): - line = ['%07d' % int(oct(i)[2:])] - line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) - out.append(" ".join(line)) - return out - finally: - fid.close() - - if sys.version_info[0] < 3: - return _pyod2() - else: - return _pyod3() - -_BEFORE_SEQ = ['000','000','000','000','000','000','000','000', - '001','043','105','147','211','253','315','357'] -_AFTER_SEQ = ['376', '334','272','230','166','124','062','020'] - -_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] -_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] -_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', - '031', '300', '000', '000'] -_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', - '031', '300', '000', '000', '000', '000', '000', '000'] -_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', - '000', '000', '000', '000', '000', '000', '000', '000'] -_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] -_DOUBLE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] + \ - ['000'] * 8 - -def long_double_representation(lines): - """Given a binary dump as given by GNU od -b, look for long double - representation.""" - - # Read contains a list of 32 items, each item is a byte (in octal - # representation, as a string). We 'slide' over the output until read is of - # the form before_seq + content + after_sequence, where content is the long double - # representation: - # - content is 12 bytes: 80 bits Intel representation - # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision - # - content is 8 bytes: same as double (not implemented yet) - read = [''] * 32 - saw = None - for line in lines: - # we skip the first word, as od -b output an index at the beginning of - # each line - for w in line.split()[1:]: - read.pop(0) - read.append(w) - - # If the end of read is equal to the after_sequence, read contains - # the long double - if read[-8:] == _AFTER_SEQ: - saw = copy.copy(read) - if read[:12] == _BEFORE_SEQ[4:]: - if read[12:-8] == _INTEL_EXTENDED_12B: - return 'INTEL_EXTENDED_12_BYTES_LE' - elif read[:8] == _BEFORE_SEQ[8:]: - if read[8:-8] == _INTEL_EXTENDED_16B: - return 'INTEL_EXTENDED_16_BYTES_LE' - elif read[8:-8] == _IEEE_QUAD_PREC_BE: - return 'IEEE_QUAD_BE' - elif read[8:-8] == _IEEE_QUAD_PREC_LE: - return 'IEEE_QUAD_LE' - elif read[8:-8] == _DOUBLE_DOUBLE_BE: - return 'DOUBLE_DOUBLE_BE' - elif read[:16] == _BEFORE_SEQ: - if read[16:-8] == _IEEE_DOUBLE_LE: - return 'IEEE_DOUBLE_LE' - elif read[16:-8] == _IEEE_DOUBLE_BE: - return 'IEEE_DOUBLE_BE' - - if saw is not None: - raise ValueError("Unrecognized format (%s)" % saw) - else: - # We never detected the after_sequence - raise ValueError("Could not lock sequences (%s)" % saw) diff --git a/numpy-1.6.2/numpy/core/setupscons.py b/numpy-1.6.2/numpy/core/setupscons.py deleted file mode 100644 index 4d329fded0..0000000000 --- a/numpy-1.6.2/numpy/core/setupscons.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -import sys -import glob -from os.path import join, basename - -from numpy.distutils import log - -from numscons import get_scons_build_dir - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration,dot_join - from numpy.distutils.command.scons import get_scons_pkg_build_dir - from numpy.distutils.system_info import get_info, default_lib_dirs - - config = Configuration('core',parent_package,top_path) - local_dir = config.local_path - - header_dir = 'include/numpy' # this is relative to config.path_in_package - - config.add_subpackage('code_generators') - - # List of files to register to numpy.distutils - dot_blas_src = [join('blasdot', '_dotblas.c'), - join('blasdot', 'cblas.h')] - api_definition = [join('code_generators', 'numpy_api_order.txt'), - join('code_generators', 'ufunc_api_order.txt')] - core_src = [join('src', basename(i)) for i in glob.glob(join(local_dir, - 'src', - '*.c'))] - core_src += [join('src', basename(i)) for i in glob.glob(join(local_dir, - 'src', - '*.src'))] - - source_files = dot_blas_src + api_definition + core_src + \ - [join(header_dir, 'numpyconfig.h.in')] - - # Add generated files to distutils... - def add_config_header(): - scons_build_dir = get_scons_build_dir() - # XXX: I really have to think about how to communicate path info - # between scons and distutils, and set the options at one single - # location. - target = join(get_scons_pkg_build_dir(config.name), 'config.h') - incl_dir = os.path.dirname(target) - if incl_dir not in config.numpy_include_dirs: - config.numpy_include_dirs.append(incl_dir) - - def add_numpyconfig_header(): - scons_build_dir = get_scons_build_dir() - # XXX: I really have to think about how to communicate path info - # between scons and distutils, and set the options at one single - # location. - target = join(get_scons_pkg_build_dir(config.name), - 'include/numpy/numpyconfig.h') - incl_dir = os.path.dirname(target) - if incl_dir not in config.numpy_include_dirs: - config.numpy_include_dirs.append(incl_dir) - config.add_data_files((header_dir, target)) - - def add_array_api(): - scons_build_dir = get_scons_build_dir() - # XXX: I really have to think about how to communicate path info - # between scons and distutils, and set the options at one single - # location. - h_file = join(get_scons_pkg_build_dir(config.name), - 'include/numpy/__multiarray_api.h') - t_file = join(get_scons_pkg_build_dir(config.name), - 'include/numpy/multiarray_api.txt') - config.add_data_files((header_dir, h_file), - (header_dir, t_file)) - - def add_ufunc_api(): - scons_build_dir = get_scons_build_dir() - # XXX: I really have to think about how to communicate path info - # between scons and distutils, and set the options at one single - # location. - h_file = join(get_scons_pkg_build_dir(config.name), - 'include/numpy/__ufunc_api.h') - t_file = join(get_scons_pkg_build_dir(config.name), - 'include/numpy/ufunc_api.txt') - config.add_data_files((header_dir, h_file), - (header_dir, t_file)) - - def add_generated_files(*args, **kw): - add_config_header() - add_numpyconfig_header() - add_array_api() - add_ufunc_api() - - config.add_sconscript('SConstruct', - post_hook = add_generated_files, - source_files = source_files) - config.add_scons_installed_library('npymath', 'lib') - - config.add_data_files('include/numpy/*.h') - config.add_include_dirs('src') - - config.numpy_include_dirs.extend(config.paths('include')) - - # Don't install fenv unless we need them. - if sys.platform == 'cygwin': - config.add_data_dir('include/numpy/fenv') - - config.add_data_dir('tests') - config.make_svn_version_py() - - return config - -if __name__=='__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/core/shape_base.py b/numpy-1.6.2/numpy/core/shape_base.py deleted file mode 100644 index 93de192995..0000000000 --- a/numpy-1.6.2/numpy/core/shape_base.py +++ /dev/null @@ -1,271 +0,0 @@ -__all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack'] - -import numeric as _nx -from numeric import array, asanyarray, newaxis - -def atleast_1d(*arys): - """ - Convert inputs to arrays with at least one dimension. - - Scalar inputs are converted to 1-dimensional arrays, whilst - higher-dimensional inputs are preserved. - - Parameters - ---------- - array1, array2, ... : array_like - One or more input arrays. - - Returns - ------- - ret : ndarray - An array, or sequence of arrays, each with ``a.ndim >= 1``. - Copies are made only if necessary. - - See Also - -------- - atleast_2d, atleast_3d - - Examples - -------- - >>> np.atleast_1d(1.0) - array([ 1.]) - - >>> x = np.arange(9.0).reshape(3,3) - >>> np.atleast_1d(x) - array([[ 0., 1., 2.], - [ 3., 4., 5.], - [ 6., 7., 8.]]) - >>> np.atleast_1d(x) is x - True - - >>> np.atleast_1d(1, [3, 4]) - [array([1]), array([3, 4])] - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if len(ary.shape) == 0 : - result = ary.reshape(1) - else : - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - -def atleast_2d(*arys): - """ - View inputs as arrays with at least two dimensions. - - Parameters - ---------- - array1, array2, ... : array_like - One or more array-like sequences. Non-array inputs are converted - to arrays. Arrays that already have two or more dimensions are - preserved. - - Returns - ------- - res, res2, ... : ndarray - An array, or tuple of arrays, each with ``a.ndim >= 2``. - Copies are avoided where possible, and views with two or more - dimensions are returned. - - See Also - -------- - atleast_1d, atleast_3d - - Examples - -------- - >>> np.atleast_2d(3.0) - array([[ 3.]]) - - >>> x = np.arange(3.0) - >>> np.atleast_2d(x) - array([[ 0., 1., 2.]]) - >>> np.atleast_2d(x).base is x - True - - >>> np.atleast_2d(1, [1, 2], [[1, 2]]) - [array([[1]]), array([[1, 2]]), array([[1, 2]])] - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if len(ary.shape) == 0 : - result = ary.reshape(1, 1) - elif len(ary.shape) == 1 : - result = ary[newaxis, :] - else : - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - -def atleast_3d(*arys): - """ - View inputs as arrays with at least three dimensions. - - Parameters - ---------- - array1, array2, ... : array_like - One or more array-like sequences. Non-array inputs are converted to - arrays. Arrays that already have three or more dimensions are - preserved. - - Returns - ------- - res1, res2, ... : ndarray - An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are - avoided where possible, and views with three or more dimensions are - returned. For example, a 1-D array of shape ``(N,)`` becomes a view - of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a - view of shape ``(M, N, 1)``. - - See Also - -------- - atleast_1d, atleast_2d - - Examples - -------- - >>> np.atleast_3d(3.0) - array([[[ 3.]]]) - - >>> x = np.arange(3.0) - >>> np.atleast_3d(x).shape - (1, 3, 1) - - >>> x = np.arange(12.0).reshape(4,3) - >>> np.atleast_3d(x).shape - (4, 3, 1) - >>> np.atleast_3d(x).base is x - True - - >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): - ... print arr, arr.shape - ... - [[[1] - [2]]] (1, 2, 1) - [[[1] - [2]]] (1, 2, 1) - [[[1 2]]] (1, 1, 2) - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if len(ary.shape) == 0: - result = ary.reshape(1,1,1) - elif len(ary.shape) == 1: - result = ary[newaxis,:,newaxis] - elif len(ary.shape) == 2: - result = ary[:,:,newaxis] - else: - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - - -def vstack(tup): - """ - Stack arrays in sequence vertically (row wise). - - Take a sequence of arrays and stack them vertically to make a single - array. Rebuild arrays divided by `vsplit`. - - Parameters - ---------- - tup : sequence of ndarrays - Tuple containing arrays to be stacked. The arrays must have the same - shape along all but the first axis. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays. - - See Also - -------- - hstack : Stack arrays in sequence horizontally (column wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - concatenate : Join a sequence of arrays together. - vsplit : Split array into a list of multiple sub-arrays vertically. - - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that - are at least 2-dimensional. - - Examples - -------- - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.vstack((a,b)) - array([[1, 2, 3], - [2, 3, 4]]) - - >>> a = np.array([[1], [2], [3]]) - >>> b = np.array([[2], [3], [4]]) - >>> np.vstack((a,b)) - array([[1], - [2], - [3], - [2], - [3], - [4]]) - - """ - return _nx.concatenate(map(atleast_2d,tup),0) - -def hstack(tup): - """ - Stack arrays in sequence horizontally (column wise). - - Take a sequence of arrays and stack them horizontally to make - a single array. Rebuild arrays divided by `hsplit`. - - Parameters - ---------- - tup : sequence of ndarrays - All arrays must have the same shape along all but the second axis. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays. - - See Also - -------- - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third axis). - concatenate : Join a sequence of arrays together. - hsplit : Split array along second axis. - - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=1)`` - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.hstack((a,b)) - array([1, 2, 3, 2, 3, 4]) - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.hstack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - return _nx.concatenate(map(atleast_1d,tup),1) - diff --git a/numpy-1.6.2/numpy/core/src/_sortmodule.c.src b/numpy-1.6.2/numpy/core/src/_sortmodule.c.src deleted file mode 100644 index 527d0c402a..0000000000 --- a/numpy-1.6.2/numpy/core/src/_sortmodule.c.src +++ /dev/null @@ -1,1041 +0,0 @@ -/* -*- c -*- */ - -/* - * The purpose of this module is to add faster sort functions - * that are type-specific. This is done by altering the - * function table for the builtin descriptors. - * - * These sorting functions are copied almost directly from numarray - * with a few modifications (complex comparisons compare the imaginary - * part if the real parts are equal, for example), and the names - * are changed. - * - * The original sorting code is due to Charles R. Harris who wrote - * it for numarray. - */ - -/* - * Quick sort is usually the fastest, but the worst case scenario can - * be slower than the merge and heap sorts. The merge sort requires - * extra memory and so for large arrays may not be useful. - * - * The merge sort is *stable*, meaning that equal components - * are unmoved from their entry versions, so it can be used to - * implement lexigraphic sorting on multiple keys. - * - * The heap sort is included for completeness. - */ - - -#include "Python.h" -#include "numpy/noprefix.h" -#include "numpy/npy_math.h" -#include "numpy/halffloat.h" - -#include "npy_config.h" - -#define NOT_USED NPY_UNUSED(unused) -#define PYA_QS_STACK 100 -#define SMALL_QUICKSORT 15 -#define SMALL_MERGESORT 20 -#define SMALL_STRING 16 - -/* - ***************************************************************************** - ** SWAP MACROS ** - ***************************************************************************** - */ - -/**begin repeat - * - * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, - * CDOUBLE,CLONGDOUBLE, INTP# - * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, - * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_half, npy_float, npy_double, npy_longdouble, npy_cfloat, - * npy_cdouble, npy_clongdouble, npy_intp# - */ -#define @TYPE@_SWAP(a,b) {@type@ tmp = (b); (b)=(a); (a) = tmp;} - -/**end repeat**/ - -/* - ***************************************************************************** - ** COMPARISON FUNCTIONS ** - ***************************************************************************** - */ - -/**begin repeat - * - * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG# - * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong# - */ -NPY_INLINE static int -@TYPE@_LT(@type@ a, @type@ b) -{ - return a < b; -} -/**end repeat**/ - - -/**begin repeat - * - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# - * #type = float, double, longdouble# - */ -NPY_INLINE static int -@TYPE@_LT(@type@ a, @type@ b) -{ - return a < b || (b != b && a == a); -} -/**end repeat**/ - -NPY_INLINE static int -HALF_LT(npy_half a, npy_half b) -{ - int ret; - - if (npy_half_isnan(b)) { - ret = !npy_half_isnan(a); - } else { - ret = !npy_half_isnan(a) && npy_half_lt_nonan(a, b); - } - - return ret; -} - -/* - * For inline functions SUN recommends not using a return in the then part - * of an if statement. It's a SUN compiler thing, so assign the return value - * to a variable instead. - */ - -/**begin repeat - * - * #TYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #type = cfloat, cdouble, clongdouble# - */ -NPY_INLINE static int -@TYPE@_LT(@type@ a, @type@ b) -{ - int ret; - - if (a.real < b.real) { - ret = a.imag == a.imag || b.imag != b.imag; - } - else if (a.real > b.real) { - ret = b.imag != b.imag && a.imag == a.imag; - } - else if (a.real == b.real || (a.real != a.real && b.real != b.real)) { - ret = a.imag < b.imag || (b.imag != b.imag && a.imag == a.imag); - } - else { - ret = b.real != b.real; - } - - return ret; -} -/**end repeat**/ - - -/* The PyObject functions are stubs for later use */ -NPY_INLINE static int -PyObject_LT(PyObject *pa, PyObject *pb) -{ - return 0; -} - - -NPY_INLINE static void -STRING_COPY(char *s1, char *s2, size_t len) -{ - memcpy(s1, s2, len); -} - - -NPY_INLINE static void -STRING_SWAP(char *s1, char *s2, size_t len) -{ - while(len--) { - const char t = *s1; - *s1++ = *s2; - *s2++ = t; - } -} - - -NPY_INLINE static int -STRING_LT(char *s1, char *s2, size_t len) -{ - const unsigned char *c1 = (unsigned char *)s1; - const unsigned char *c2 = (unsigned char *)s2; - size_t i; - int ret = 0; - - for (i = 0; i < len; ++i) { - if (c1[i] != c2[i]) { - ret = c1[i] < c2[i]; - break; - } - } - return ret; -} - - -NPY_INLINE static void -UNICODE_COPY(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) -{ - while(len--) { - *s1++ = *s2++; - } -} - - -NPY_INLINE static void -UNICODE_SWAP(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) -{ - while(len--) { - const npy_ucs4 t = *s1; - *s1++ = *s2; - *s2++ = t; - } -} - - -NPY_INLINE static int -UNICODE_LT(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) -{ - size_t i; - int ret = 0; - - for (i = 0; i < len; ++i) { - if (s1[i] != s2[i]) { - ret = s1[i] < s2[i]; - break; - } - } - return ret; -} - - -/* - ***************************************************************************** - ** NUMERIC SORTS ** - ***************************************************************************** - */ - - -/**begin repeat - * - * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# - * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, ushort, float, double, longdouble, - * cfloat, cdouble, clongdouble# - */ - - -static int -@TYPE@_quicksort(@type@ *start, npy_intp num, void *NOT_USED) -{ - @type@ *pl = start; - @type@ *pr = start + num - 1; - @type@ vp; - @type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pk; - - for (;;) { - while ((pr - pl) > SMALL_QUICKSORT) { - /* quicksort partition */ - pm = pl + ((pr - pl) >> 1); - if (@TYPE@_LT(*pm, *pl)) @TYPE@_SWAP(*pm, *pl); - if (@TYPE@_LT(*pr, *pm)) @TYPE@_SWAP(*pr, *pm); - if (@TYPE@_LT(*pm, *pl)) @TYPE@_SWAP(*pm, *pl); - vp = *pm; - pi = pl; - pj = pr - 1; - @TYPE@_SWAP(*pm, *pj); - for (;;) { - do ++pi; while (@TYPE@_LT(*pi, vp)); - do --pj; while (@TYPE@_LT(vp, *pj)); - if (pi >= pj) { - break; - } - @TYPE@_SWAP(*pi,*pj); - } - pk = pr - 1; - @TYPE@_SWAP(*pi, *pk); - /* push largest partition on stack */ - if (pi - pl < pr - pi) { - *sptr++ = pi + 1; - *sptr++ = pr; - pr = pi - 1; - } - else { - *sptr++ = pl; - *sptr++ = pi - 1; - pl = pi + 1; - } - } - - /* insertion sort */ - for (pi = pl + 1; pi <= pr; ++pi) { - vp = *pi; - pj = pi; - pk = pi - 1; - while (pj > pl && @TYPE@_LT(vp, *pk)) { - *pj-- = *pk--; - } - *pj = vp; - } - if (sptr == stack) { - break; - } - pr = *(--sptr); - pl = *(--sptr); - } - - return 0; -} - -static int -@TYPE@_aquicksort(@type@ *v, npy_intp* tosort, npy_intp num, void *NOT_USED) -{ - @type@ vp; - npy_intp *pl, *pr; - npy_intp *stack[PYA_QS_STACK], **sptr=stack, *pm, *pi, *pj, *pk, vi; - - pl = tosort; - pr = tosort + num - 1; - - for (;;) { - while ((pr - pl) > SMALL_QUICKSORT) { - /* quicksort partition */ - pm = pl + ((pr - pl) >> 1); - if (@TYPE@_LT(v[*pm],v[*pl])) INTP_SWAP(*pm,*pl); - if (@TYPE@_LT(v[*pr],v[*pm])) INTP_SWAP(*pr,*pm); - if (@TYPE@_LT(v[*pm],v[*pl])) INTP_SWAP(*pm,*pl); - vp = v[*pm]; - pi = pl; - pj = pr - 1; - INTP_SWAP(*pm,*pj); - for (;;) { - do ++pi; while (@TYPE@_LT(v[*pi],vp)); - do --pj; while (@TYPE@_LT(vp,v[*pj])); - if (pi >= pj) { - break; - } - INTP_SWAP(*pi,*pj); - } - pk = pr - 1; - INTP_SWAP(*pi,*pk); - /* push largest partition on stack */ - if (pi - pl < pr - pi) { - *sptr++ = pi + 1; - *sptr++ = pr; - pr = pi - 1; - } - else { - *sptr++ = pl; - *sptr++ = pi - 1; - pl = pi + 1; - } - } - - /* insertion sort */ - for (pi = pl + 1; pi <= pr; ++pi) { - vi = *pi; - vp = v[vi]; - pj = pi; - pk = pi - 1; - while (pj > pl && @TYPE@_LT(vp, v[*pk])) { - *pj-- = *pk--; - } - *pj = vi; - } - if (sptr == stack) { - break; - } - pr = *(--sptr); - pl = *(--sptr); - } - - return 0; -} - - -static int -@TYPE@_heapsort(@type@ *start, npy_intp n, void *NOT_USED) -{ - @type@ tmp, *a; - npy_intp i,j,l; - - /* The array needs to be offset by one for heapsort indexing */ - a = start - 1; - - for (l = n>>1; l > 0; --l) { - tmp = a[l]; - for (i = l, j = l<<1; j <= n;) { - if (j < n && @TYPE@_LT(a[j], a[j+1])) { - j += 1; - } - if (@TYPE@_LT(tmp, a[j])) { - a[i] = a[j]; - i = j; - j += j; - } - else { - break; - } - } - a[i] = tmp; - } - - for (; n > 1;) { - tmp = a[n]; - a[n] = a[1]; - n -= 1; - for (i = 1, j = 2; j <= n;) { - if (j < n && @TYPE@_LT(a[j], a[j+1])) { - j++; - } - if (@TYPE@_LT(tmp, a[j])) { - a[i] = a[j]; - i = j; - j += j; - } - else { - break; - } - } - a[i] = tmp; - } - - return 0; -} - -static int -@TYPE@_aheapsort(@type@ *v, npy_intp *tosort, npy_intp n, void *NOT_USED) -{ - npy_intp *a, i,j,l, tmp; - /* The arrays need to be offset by one for heapsort indexing */ - a = tosort - 1; - - for (l = n>>1; l > 0; --l) { - tmp = a[l]; - for (i = l, j = l<<1; j <= n;) { - if (j < n && @TYPE@_LT(v[a[j]], v[a[j+1]])) { - j += 1; - } - if (@TYPE@_LT(v[tmp], v[a[j]])) { - a[i] = a[j]; - i = j; - j += j; - } - else { - break; - } - } - a[i] = tmp; - } - - for (; n > 1;) { - tmp = a[n]; - a[n] = a[1]; - n -= 1; - for (i = 1, j = 2; j <= n;) { - if (j < n && @TYPE@_LT(v[a[j]], v[a[j+1]])) { - j++; - } - if (@TYPE@_LT(v[tmp], v[a[j]])) { - a[i] = a[j]; - i = j; - j += j; - } - else { - break; - } - } - a[i] = tmp; - } - - return 0; -} - -static void -@TYPE@_mergesort0(@type@ *pl, @type@ *pr, @type@ *pw) -{ - @type@ vp, *pi, *pj, *pk, *pm; - - if (pr - pl > SMALL_MERGESORT) { - /* merge sort */ - pm = pl + ((pr - pl) >> 1); - @TYPE@_mergesort0(pl, pm, pw); - @TYPE@_mergesort0(pm, pr, pw); - for (pi = pw, pj = pl; pj < pm;) { - *pi++ = *pj++; - } - pj = pw; - pk = pl; - while (pj < pi && pm < pr) { - if (@TYPE@_LT(*pm,*pj)) { - *pk = *pm++; - } - else { - *pk = *pj++; - } - pk++; - } - while(pj < pi) { - *pk++ = *pj++; - } - } - else { - /* insertion sort */ - for (pi = pl + 1; pi < pr; ++pi) { - vp = *pi; - pj = pi; - pk = pi -1; - while (pj > pl && @TYPE@_LT(vp, *pk)) { - *pj-- = *pk--; - } - *pj = vp; - } - } -} - -static int -@TYPE@_mergesort(@type@ *start, npy_intp num, void *NOT_USED) -{ - @type@ *pl, *pr, *pw; - - pl = start; - pr = pl + num; - pw = (@type@ *) PyDataMem_NEW((num/2)*sizeof(@type@)); - if (!pw) { - PyErr_NoMemory(); - return -1; - } - @TYPE@_mergesort0(pl, pr, pw); - - PyDataMem_FREE(pw); - return 0; -} - -static void -@TYPE@_amergesort0(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw) -{ - @type@ vp; - npy_intp vi, *pi, *pj, *pk, *pm; - - if (pr - pl > SMALL_MERGESORT) { - /* merge sort */ - pm = pl + ((pr - pl + 1)>>1); - @TYPE@_amergesort0(pl,pm-1,v,pw); - @TYPE@_amergesort0(pm,pr,v,pw); - for (pi = pw, pj = pl; pj < pm; ++pi, ++pj) { - *pi = *pj; - } - for (pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) { - if (@TYPE@_LT(v[*pj],v[*pk])) { - *pm = *pj; - ++pj; - } - else { - *pm = *pk; - ++pk; - } - } - for (; pk < pi; ++pm, ++pk) { - *pm = *pk; - } - } - else { - /* insertion sort */ - for (pi = pl + 1; pi <= pr; ++pi) { - vi = *pi; - vp = v[vi]; - for (pj = pi, pk = pi - 1; pj > pl && @TYPE@_LT(vp, v[*pk]); --pj, --pk) { - *pj = *pk; - } - *pj = vi; - } - } -} - -static int -@TYPE@_amergesort(@type@ *v, npy_intp *tosort, npy_intp num, void *NOT_USED) -{ - npy_intp *pl, *pr, *pw; - - pl = tosort; pr = pl + num - 1; - pw = PyDimMem_NEW((1+num/2)); - - if (!pw) { - PyErr_NoMemory(); - return -1; - } - - @TYPE@_amergesort0(pl, pr, v, pw); - PyDimMem_FREE(pw); - - return 0; -} - - -/**end repeat**/ - -/* - ***************************************************************************** - ** STRING SORTS ** - ***************************************************************************** - */ - - -/**begin repeat - * - * #TYPE = STRING, UNICODE# - * #type = char, PyArray_UCS4# - */ - -static void -@TYPE@_mergesort0(@type@ *pl, @type@ *pr, @type@ *pw, @type@ *vp, size_t len) -{ - @type@ *pi, *pj, *pk, *pm; - - if ((size_t)(pr - pl) > SMALL_MERGESORT*len) { - /* merge sort */ - pm = pl + (((pr - pl)/len) >> 1)*len; - @TYPE@_mergesort0(pl, pm, pw, vp, len); - @TYPE@_mergesort0(pm, pr, pw, vp, len); - @TYPE@_COPY(pw, pl, pm - pl); - pi = pw + (pm - pl); - pj = pw; - pk = pl; - while (pj < pi && pm < pr) { - if (@TYPE@_LT(pm, pj, len)) { - @TYPE@_COPY(pk, pm, len); - pm += len; - } - else { - @TYPE@_COPY(pk, pj, len); - pj += len; - } - pk += len; - } - @TYPE@_COPY(pk, pj, pi - pj); - } - else { - /* insertion sort */ - for (pi = pl + len; pi < pr; pi += len) { - @TYPE@_COPY(vp, pi, len); - pj = pi; - pk = pi - len; - while (pj > pl && @TYPE@_LT(vp, pk, len)) { - @TYPE@_COPY(pj, pk, len); - pj -= len; - pk -= len; - } - @TYPE@_COPY(pj, vp, len); - } - } -} - -static int -@TYPE@_mergesort(@type@ *start, npy_intp num, PyArrayObject *arr) -{ - const size_t elsize = arr->descr->elsize; - const size_t len = elsize / sizeof(@type@); - @type@ *pl, *pr, *pw, *vp; - int err = 0; - - pl = start; - pr = pl + num*len; - pw = (@type@ *) PyDataMem_NEW((num/2)*elsize); - if (!pw) { - PyErr_NoMemory(); - err = -1; - goto fail_0; - } - vp = (@type@ *) PyDataMem_NEW(elsize); - if (!vp) { - PyErr_NoMemory(); - err = -1; - goto fail_1; - } - @TYPE@_mergesort0(pl, pr, pw, vp, len); - - PyDataMem_FREE(vp); -fail_1: - PyDataMem_FREE(pw); -fail_0: - return err; -} - -static int -@TYPE@_quicksort(@type@ *start, npy_intp num, PyArrayObject *arr) -{ - const size_t len = arr->descr->elsize/sizeof(@type@); - @type@ *vp = malloc(arr->descr->elsize); - @type@ *pl = start; - @type@ *pr = start + (num - 1)*len; - @type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pk; - - for (;;) { - while ((size_t)(pr - pl) > SMALL_QUICKSORT*len) { - /* quicksort partition */ - pm = pl + (((pr - pl)/len) >> 1)*len; - if (@TYPE@_LT(pm, pl, len)) @TYPE@_SWAP(pm, pl, len); - if (@TYPE@_LT(pr, pm, len)) @TYPE@_SWAP(pr, pm, len); - if (@TYPE@_LT(pm, pl, len)) @TYPE@_SWAP(pm, pl, len); - @TYPE@_COPY(vp, pm, len); - pi = pl; - pj = pr - len; - @TYPE@_SWAP(pm, pj, len); - for (;;) { - do pi += len; while (@TYPE@_LT(pi, vp, len)); - do pj -= len; while (@TYPE@_LT(vp, pj, len)); - if (pi >= pj) { - break; - } - @TYPE@_SWAP(pi, pj, len); - } - pk = pr - len; - @TYPE@_SWAP(pi, pk, len); - /* push largest partition on stack */ - if (pi - pl < pr - pi) { - *sptr++ = pi + len; - *sptr++ = pr; - pr = pi - len; - } - else { - *sptr++ = pl; - *sptr++ = pi - len; - pl = pi + len; - } - } - - /* insertion sort */ - for (pi = pl + len; pi <= pr; pi += len) { - @TYPE@_COPY(vp, pi, len); - pj = pi; - pk = pi - len; - while (pj > pl && @TYPE@_LT(vp, pk, len)) { - @TYPE@_COPY(pj, pk, len); - pj -= len; - pk -= len; - } - @TYPE@_COPY(pj, vp, len); - } - if (sptr == stack) { - break; - } - pr = *(--sptr); - pl = *(--sptr); - } - - free(vp); - return 0; -} - - -static int -@TYPE@_heapsort(@type@ *start, npy_intp n, PyArrayObject *arr) -{ - size_t len = arr->descr->elsize/sizeof(@type@); - @type@ *tmp = malloc(arr->descr->elsize); - @type@ *a = start - len; - npy_intp i,j,l; - - for (l = n>>1; l > 0; --l) { - @TYPE@_COPY(tmp, a + l*len, len); - for (i = l, j = l<<1; j <= n;) { - if (j < n && @TYPE@_LT(a + j*len, a + (j+1)*len, len)) - j += 1; - if (@TYPE@_LT(tmp, a + j*len, len)) { - @TYPE@_COPY(a + i*len, a + j*len, len); - i = j; - j += j; - } - else { - break; - } - } - @TYPE@_COPY(a + i*len, tmp, len); - } - - for (; n > 1;) { - @TYPE@_COPY(tmp, a + n*len, len); - @TYPE@_COPY(a + n*len, a + len, len); - n -= 1; - for (i = 1, j = 2; j <= n;) { - if (j < n && @TYPE@_LT(a + j*len, a + (j+1)*len, len)) - j++; - if (@TYPE@_LT(tmp, a + j*len, len)) { - @TYPE@_COPY(a + i*len, a + j*len, len); - i = j; - j += j; - } - else { - break; - } - } - @TYPE@_COPY(a + i*len, tmp, len); - } - - free(tmp); - return 0; -} - - -static int -@TYPE@_aheapsort(@type@ *v, npy_intp *tosort, npy_intp n, PyArrayObject *arr) -{ - size_t len = arr->descr->elsize/sizeof(@type@); - npy_intp *a, i,j,l, tmp; - - /* The array needs to be offset by one for heapsort indexing */ - a = tosort - 1; - - for (l = n>>1; l > 0; --l) { - tmp = a[l]; - for (i = l, j = l<<1; j <= n;) { - if (j < n && @TYPE@_LT(v + a[j]*len, v + a[j+1]*len, len)) - j += 1; - if (@TYPE@_LT(v + tmp*len, v + a[j]*len, len)) { - a[i] = a[j]; - i = j; - j += j; - } - else { - break; - } - } - a[i] = tmp; - } - - for (; n > 1;) { - tmp = a[n]; - a[n] = a[1]; - n -= 1; - for (i = 1, j = 2; j <= n;) { - if (j < n && @TYPE@_LT(v + a[j]*len, v + a[j+1]*len, len)) - j++; - if (@TYPE@_LT(v + tmp*len, v + a[j]*len, len)) { - a[i] = a[j]; - i = j; - j += j; - } - else { - break; - } - } - a[i] = tmp; - } - - return 0; -} - - -static int -@TYPE@_aquicksort(@type@ *v, npy_intp* tosort, npy_intp num, PyArrayObject *arr) -{ - size_t len = arr->descr->elsize/sizeof(@type@); - @type@ *vp; - npy_intp *pl = tosort; - npy_intp *pr = tosort + num - 1; - npy_intp *stack[PYA_QS_STACK]; - npy_intp **sptr=stack; - npy_intp *pm, *pi, *pj, *pk, vi; - - for (;;) { - while ((pr - pl) > SMALL_QUICKSORT) { - /* quicksort partition */ - pm = pl + ((pr - pl) >> 1); - if (@TYPE@_LT(v + (*pm)*len, v + (*pl)*len, len)) INTP_SWAP(*pm, *pl); - if (@TYPE@_LT(v + (*pr)*len, v + (*pm)*len, len)) INTP_SWAP(*pr, *pm); - if (@TYPE@_LT(v + (*pm)*len, v + (*pl)*len, len)) INTP_SWAP(*pm, *pl); - vp = v + (*pm)*len; - pi = pl; - pj = pr - 1; - INTP_SWAP(*pm,*pj); - for (;;) { - do ++pi; while (@TYPE@_LT(v + (*pi)*len, vp, len)); - do --pj; while (@TYPE@_LT(vp, v + (*pj)*len, len)); - if (pi >= pj) { - break; - } - INTP_SWAP(*pi,*pj); - } - pk = pr - 1; - INTP_SWAP(*pi,*pk); - /* push largest partition on stack */ - if (pi - pl < pr - pi) { - *sptr++ = pi + 1; - *sptr++ = pr; - pr = pi - 1; - } - else { - *sptr++ = pl; - *sptr++ = pi - 1; - pl = pi + 1; - } - } - - /* insertion sort */ - for (pi = pl + 1; pi <= pr; ++pi) { - vi = *pi; - vp = v + vi*len; - pj = pi; - pk = pi - 1; - while (pj > pl && @TYPE@_LT(vp, v + (*pk)*len, len)) { - *pj-- = *pk--; - } - *pj = vi; - } - if (sptr == stack) { - break; - } - pr = *(--sptr); - pl = *(--sptr); - } - - return 0; -} - - -static void -@TYPE@_amergesort0(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw, int len) -{ - @type@ *vp; - npy_intp vi, *pi, *pj, *pk, *pm; - - if (pr - pl > SMALL_MERGESORT) { - /* merge sort */ - pm = pl + ((pr - pl) >> 1); - @TYPE@_amergesort0(pl,pm,v,pw,len); - @TYPE@_amergesort0(pm,pr,v,pw,len); - for (pi = pw, pj = pl; pj < pm;) { - *pi++ = *pj++; - } - pj = pw; - pk = pl; - while (pj < pi && pm < pr) { - if (@TYPE@_LT(v + (*pm)*len, v + (*pj)*len, len)) { - *pk = *pm++; - } else { - *pk = *pj++; - } - pk++; - } - while (pj < pi) { - *pk++ = *pj++; - } - } else { - /* insertion sort */ - for (pi = pl + 1; pi < pr; ++pi) { - vi = *pi; - vp = v + vi*len; - pj = pi; - pk = pi -1; - while (pj > pl && @TYPE@_LT(vp, v + (*pk)*len, len)) { - *pj-- = *pk--; - } - *pj = vi; - } - } -} - - -static int -@TYPE@_amergesort(@type@ *v, npy_intp *tosort, npy_intp num, PyArrayObject *arr) -{ - const size_t elsize = arr->descr->elsize; - const size_t len = elsize / sizeof(@type@); - npy_intp *pl, *pr, *pw; - - pl = tosort; - pr = pl + num; - pw = PyDimMem_NEW(num/2); - if (!pw) { - PyErr_NoMemory(); - return -1; - } - @TYPE@_amergesort0(pl, pr, v, pw, len); - - PyDimMem_FREE(pw); - return 0; -} -/**end repeat**/ - -static void -add_sortfuncs(void) -{ - PyArray_Descr *descr; - - /**begin repeat - * - * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE# - */ - descr = PyArray_DescrFromType(PyArray_@TYPE@); - descr->f->sort[PyArray_QUICKSORT] = - (PyArray_SortFunc *)@TYPE@_quicksort; - descr->f->argsort[PyArray_QUICKSORT] = - (PyArray_ArgSortFunc *)@TYPE@_aquicksort; - descr->f->sort[PyArray_HEAPSORT] = - (PyArray_SortFunc *)@TYPE@_heapsort; - descr->f->argsort[PyArray_HEAPSORT] = - (PyArray_ArgSortFunc *)@TYPE@_aheapsort; - descr->f->sort[PyArray_MERGESORT] = - (PyArray_SortFunc *)@TYPE@_mergesort; - descr->f->argsort[PyArray_MERGESORT] = - (PyArray_ArgSortFunc *)@TYPE@_amergesort; - /**end repeat**/ - -} - -static struct PyMethodDef methods[] = { - {NULL, NULL, 0, NULL} -}; - - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_sort", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -/* Initialization function for the module */ -#if defined(NPY_PY3K) -PyObject *PyInit__sort(void) { - PyObject *m; - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; - } - import_array(); - add_sortfuncs(); - return m; -} -#else -PyMODINIT_FUNC -init_sort(void) { - Py_InitModule("_sort", methods); - - import_array(); - add_sortfuncs(); -} -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/_datetime.h b/numpy-1.6.2/numpy/core/src/multiarray/_datetime.h deleted file mode 100644 index 9be7e56734..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/_datetime.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _NPY_PRIVATE__DATETIME_H_ -#define _NPY_PRIVATE__DATETIME_H_ - -NPY_NO_EXPORT void -PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr, - npy_datetimestruct *result); - -NPY_NO_EXPORT void -PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT fr, - npy_timedeltastruct *result); - -NPY_NO_EXPORT npy_datetime -PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d); - -NPY_NO_EXPORT npy_datetime -PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/arrayobject.c b/numpy-1.6.2/numpy/core/src/multiarray/arrayobject.c deleted file mode 100644 index af5a908cd8..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/arrayobject.c +++ /dev/null @@ -1,1453 +0,0 @@ -/* - Provide multidimensional arrays as a basic object type in python. - - Based on Original Numeric implementation - Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu - - with contributions from many Numeric Python developers 1995-2004 - - Heavily modified in 2005 with inspiration from Numarray - - by - - Travis Oliphant, oliphant@ee.byu.edu - Brigham Young Univeristy - - -maintainer email: oliphant.travis@ieee.org - - Numarray design (which provided guidance) by - Space Science Telescope Institute - (J. Todd Miller, Perry Greenfield, Rick White) -*/ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -/*#include */ -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" - -#include "number.h" -#include "usertypes.h" -#include "arraytypes.h" -#include "scalartypes.h" -#include "arrayobject.h" -#include "ctors.h" -#include "methods.h" -#include "descriptor.h" -#include "iterators.h" -#include "mapping.h" -#include "getset.h" -#include "sequence.h" -#include "buffer.h" - -/*NUMPY_API - Compute the size of an array (in number of items) -*/ -NPY_NO_EXPORT npy_intp -PyArray_Size(PyObject *op) -{ - if (PyArray_Check(op)) { - return PyArray_SIZE((PyArrayObject *)op); - } - else { - return 0; - } -} - -/*NUMPY_API*/ -NPY_NO_EXPORT int -PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) -{ - int ret; - PyArrayObject *src; - PyArray_Descr *dtype = NULL; - int ndim = 0; - npy_intp dims[NPY_MAXDIMS]; - - Py_INCREF(src_object); - /* - * Special code to mimic Numeric behavior for - * character arrays. - */ - if (dest->descr->type == PyArray_CHARLTR && dest->nd > 0 \ - && PyString_Check(src_object)) { - npy_intp n_new, n_old; - char *new_string; - PyObject *tmp; - - n_new = dest->dimensions[dest->nd-1]; - n_old = PyString_Size(src_object); - if (n_new > n_old) { - new_string = (char *)malloc(n_new); - memmove(new_string, PyString_AS_STRING(src_object), n_old); - memset(new_string + n_old, ' ', n_new - n_old); - tmp = PyString_FromStringAndSize(new_string, n_new); - free(new_string); - Py_DECREF(src_object); - src_object = tmp; - } - } - - /* - * Get either an array object we can copy from, or its parameters - * if there isn't a convenient array available. - */ - if (PyArray_GetArrayParamsFromObject(src_object, PyArray_DESCR(dest), - 0, &dtype, &ndim, dims, &src, NULL) < 0) { - Py_DECREF(src_object); - return -1; - } - - /* If it's not an array, either assign from a sequence or as a scalar */ - if (src == NULL) { - /* If the input is scalar */ - if (ndim == 0) { - /* If there's one dest element and src is a Python scalar */ - if (PyArray_IsScalar(src_object, Generic)) { - src = (PyArrayObject *)PyArray_FromScalar(src_object, dtype); - if (src == NULL) { - Py_DECREF(src_object); - return -1; - } - } - else { - if (PyArray_SIZE(dest) == 1) { - Py_DECREF(dtype); - return PyArray_DESCR(dest)->f->setitem(src_object, - PyArray_DATA(dest), dest); - } - else { - src = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - dtype, 0, NULL, NULL, - NULL, 0, NULL); - if (src == NULL) { - Py_DECREF(src_object); - return -1; - } - if (PyArray_DESCR(src)->f->setitem(src_object, - PyArray_DATA(src), src) < 0) { - Py_DECREF(src_object); - Py_DECREF(src); - return -1; - } - } - } - } - else { - /* - * If there are more than enough dims, use AssignFromSequence - * because it can handle this style of broadcasting. - */ - if (ndim >= PyArray_NDIM(dest)) { - int res; - Py_DECREF(dtype); - res = PyArray_AssignFromSequence(dest, src_object); - Py_DECREF(src_object); - return res; - } - /* Otherwise convert to an array and do an array-based copy */ - src = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - dtype, ndim, dims, NULL, NULL, - PyArray_ISFORTRAN(dest), NULL); - if (src == NULL) { - Py_DECREF(src_object); - return -1; - } - if (PyArray_AssignFromSequence(src, src_object) < 0) { - Py_DECREF(src); - Py_DECREF(src_object); - return -1; - } - } - } - - /* If it's an array, do a move (handling possible overlapping data) */ - ret = PyArray_MoveInto(dest, src); - Py_DECREF(src); - Py_DECREF(src_object); - return ret; -} - - -/* returns an Array-Scalar Object of the type of arr - from the given pointer to memory -- main Scalar creation function - default new method calls this. -*/ - -/* Ideally, here the descriptor would contain all the information needed. - So, that we simply need the data and the descriptor, and perhaps - a flag -*/ - - -/* - Given a string return the type-number for - the data-type with that string as the type-object name. - Returns PyArray_NOTYPE without setting an error if no type can be - found. Only works for user-defined data-types. -*/ - -/*NUMPY_API - */ -NPY_NO_EXPORT int -PyArray_TypeNumFromName(char *str) -{ - int i; - PyArray_Descr *descr; - - for (i = 0; i < NPY_NUMUSERTYPES; i++) { - descr = userdescrs[i]; - if (strcmp(descr->typeobj->tp_name, str) == 0) { - return descr->type_num; - } - } - return PyArray_NOTYPE; -} - -/*********************** end C-API functions **********************/ - -/* array object functions */ - -static void -array_dealloc(PyArrayObject *self) { - - _array_dealloc_buffer_info(self); - - if (self->weakreflist != NULL) { - PyObject_ClearWeakRefs((PyObject *)self); - } - if (self->base) { - /* - * UPDATEIFCOPY means that base points to an - * array that should be updated with the contents - * of this array upon destruction. - * self->base->flags must have been WRITEABLE - * (checked previously) and it was locked here - * thus, unlock it. - */ - if (self->flags & UPDATEIFCOPY) { - ((PyArrayObject *)self->base)->flags |= WRITEABLE; - Py_INCREF(self); /* hold on to self in next call */ - if (PyArray_CopyAnyInto((PyArrayObject *)self->base, self) < 0) { - PyErr_Print(); - PyErr_Clear(); - } - /* - * Don't need to DECREF -- because we are deleting - *self already... - */ - } - /* - * In any case base is pointing to something that we need - * to DECREF -- either a view or a buffer object - */ - Py_DECREF(self->base); - } - - if ((self->flags & OWNDATA) && self->data) { - /* Free internal references if an Object array */ - if (PyDataType_FLAGCHK(self->descr, NPY_ITEM_REFCOUNT)) { - Py_INCREF(self); /*hold on to self */ - PyArray_XDECREF(self); - /* - * Don't need to DECREF -- because we are deleting - * self already... - */ - } - PyDataMem_FREE(self->data); - } - - PyDimMem_FREE(self->dimensions); - Py_DECREF(self->descr); - Py_TYPE(self)->tp_free((PyObject *)self); -} - -static int -dump_data(char **string, int *n, int *max_n, char *data, int nd, - npy_intp *dimensions, npy_intp *strides, PyArrayObject* self) -{ - PyArray_Descr *descr=self->descr; - PyObject *op, *sp; - char *ostring; - npy_intp i, N; - -#define CHECK_MEMORY do { if (*n >= *max_n-16) { \ - *max_n *= 2; \ - *string = (char *)_pya_realloc(*string, *max_n); \ - }} while (0) - - if (nd == 0) { - if ((op = descr->f->getitem(data, self)) == NULL) { - return -1; - } - sp = PyObject_Repr(op); - if (sp == NULL) { - Py_DECREF(op); - return -1; - } - ostring = PyString_AsString(sp); - N = PyString_Size(sp)*sizeof(char); - *n += N; - CHECK_MEMORY; - memmove(*string + (*n - N), ostring, N); - Py_DECREF(sp); - Py_DECREF(op); - return 0; - } - else { - CHECK_MEMORY; - (*string)[*n] = '['; - *n += 1; - for (i = 0; i < dimensions[0]; i++) { - if (dump_data(string, n, max_n, - data + (*strides)*i, - nd - 1, dimensions + 1, - strides + 1, self) < 0) { - return -1; - } - CHECK_MEMORY; - if (i < dimensions[0] - 1) { - (*string)[*n] = ','; - (*string)[*n+1] = ' '; - *n += 2; - } - } - CHECK_MEMORY; - (*string)[*n] = ']'; - *n += 1; - return 0; - } - -#undef CHECK_MEMORY -} - -static PyObject * -array_repr_builtin(PyArrayObject *self, int repr) -{ - PyObject *ret; - char *string; - int n, max_n; - - max_n = PyArray_NBYTES(self)*4*sizeof(char) + 7; - - if ((string = (char *)_pya_malloc(max_n)) == NULL) { - PyErr_SetString(PyExc_MemoryError, "out of memory"); - return NULL; - } - - if (repr) { - n = 6; - sprintf(string, "array("); - } - else { - n = 0; - } - if (dump_data(&string, &n, &max_n, self->data, - self->nd, self->dimensions, - self->strides, self) < 0) { - _pya_free(string); - return NULL; - } - - if (repr) { - if (PyArray_ISEXTENDED(self)) { - char buf[100]; - PyOS_snprintf(buf, sizeof(buf), "%d", self->descr->elsize); - sprintf(string+n, ", '%c%s')", self->descr->type, buf); - ret = PyUString_FromStringAndSize(string, n + 6 + strlen(buf)); - } - else { - sprintf(string+n, ", '%c')", self->descr->type); - ret = PyUString_FromStringAndSize(string, n+6); - } - } - else { - ret = PyUString_FromStringAndSize(string, n); - } - - _pya_free(string); - return ret; -} - -static PyObject *PyArray_StrFunction = NULL; -static PyObject *PyArray_ReprFunction = NULL; -static PyObject *PyArray_DatetimeParseFunction = NULL; - -/*NUMPY_API - * Set the array print function to be a Python function. - */ -NPY_NO_EXPORT void -PyArray_SetStringFunction(PyObject *op, int repr) -{ - if (repr) { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_ReprFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_ReprFunction = op; - } - else { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_StrFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_StrFunction = op; - } -} - -/*NUMPY_API - * Set the date time print function to be a Python function. - */ -NPY_NO_EXPORT void -PyArray_SetDatetimeParseFunction(PyObject *op) -{ - /* Dispose of previous callback */ - Py_XDECREF(PyArray_DatetimeParseFunction); - /* Add a reference to the new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_DatetimeParseFunction = op; -} - - -static PyObject * -array_repr(PyArrayObject *self) -{ - PyObject *s, *arglist; - - if (PyArray_ReprFunction == NULL) { - s = array_repr_builtin(self, 1); - } - else { - arglist = Py_BuildValue("(O)", self); - s = PyEval_CallObject(PyArray_ReprFunction, arglist); - Py_DECREF(arglist); - } - return s; -} - -static PyObject * -array_str(PyArrayObject *self) -{ - PyObject *s, *arglist; - - if (PyArray_StrFunction == NULL) { - s = array_repr_builtin(self, 0); - } - else { - arglist = Py_BuildValue("(O)", self); - s = PyEval_CallObject(PyArray_StrFunction, arglist); - Py_DECREF(arglist); - } - return s; -} - - - -/*NUMPY_API - */ -NPY_NO_EXPORT int -PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) -{ - PyArray_UCS4 c1, c2; - while(len-- > 0) { - c1 = *s1++; - c2 = *s2++; - if (c1 != c2) { - return (c1 < c2) ? -1 : 1; - } - } - return 0; -} - -/*NUMPY_API - */ -NPY_NO_EXPORT int -PyArray_CompareString(char *s1, char *s2, size_t len) -{ - const unsigned char *c1 = (unsigned char *)s1; - const unsigned char *c2 = (unsigned char *)s2; - size_t i; - - for(i = 0; i < len; ++i) { - if (c1[i] != c2[i]) { - return (c1[i] > c2[i]) ? 1 : -1; - } - } - return 0; -} - - -/* This also handles possibly mis-aligned data */ -/* Compare s1 and s2 which are not necessarily NULL-terminated. - s1 is of length len1 - s2 is of length len2 - If they are NULL terminated, then stop comparison. -*/ -static int -_myunincmp(PyArray_UCS4 *s1, PyArray_UCS4 *s2, int len1, int len2) -{ - PyArray_UCS4 *sptr; - PyArray_UCS4 *s1t=s1, *s2t=s2; - int val; - npy_intp size; - int diff; - - if ((npy_intp)s1 % sizeof(PyArray_UCS4) != 0) { - size = len1*sizeof(PyArray_UCS4); - s1t = malloc(size); - memcpy(s1t, s1, size); - } - if ((npy_intp)s2 % sizeof(PyArray_UCS4) != 0) { - size = len2*sizeof(PyArray_UCS4); - s2t = malloc(size); - memcpy(s2t, s2, size); - } - val = PyArray_CompareUCS4(s1t, s2t, MIN(len1,len2)); - if ((val != 0) || (len1 == len2)) { - goto finish; - } - if (len2 > len1) { - sptr = s2t+len1; - val = -1; - diff = len2-len1; - } - else { - sptr = s1t+len2; - val = 1; - diff=len1-len2; - } - while (diff--) { - if (*sptr != 0) { - goto finish; - } - sptr++; - } - val = 0; - - finish: - if (s1t != s1) { - free(s1t); - } - if (s2t != s2) { - free(s2t); - } - return val; -} - - - - -/* - * Compare s1 and s2 which are not necessarily NULL-terminated. - * s1 is of length len1 - * s2 is of length len2 - * If they are NULL terminated, then stop comparison. - */ -static int -_mystrncmp(char *s1, char *s2, int len1, int len2) -{ - char *sptr; - int val; - int diff; - - val = memcmp(s1, s2, MIN(len1, len2)); - if ((val != 0) || (len1 == len2)) { - return val; - } - if (len2 > len1) { - sptr = s2 + len1; - val = -1; - diff = len2 - len1; - } - else { - sptr = s1 + len2; - val = 1; - diff = len1 - len2; - } - while (diff--) { - if (*sptr != 0) { - return val; - } - sptr++; - } - return 0; /* Only happens if NULLs are everywhere */ -} - -/* Borrowed from Numarray */ - -#define SMALL_STRING 2048 - -#if defined(isspace) -#undef isspace -#define isspace(c) ((c==' ')||(c=='\t')||(c=='\n')||(c=='\r')||(c=='\v')||(c=='\f')) -#endif - -static void _rstripw(char *s, int n) -{ - int i; - for (i = n - 1; i >= 1; i--) { /* Never strip to length 0. */ - int c = s[i]; - - if (!c || isspace(c)) { - s[i] = 0; - } - else { - break; - } - } -} - -static void _unistripw(PyArray_UCS4 *s, int n) -{ - int i; - for (i = n - 1; i >= 1; i--) { /* Never strip to length 0. */ - PyArray_UCS4 c = s[i]; - if (!c || isspace(c)) { - s[i] = 0; - } - else { - break; - } - } -} - - -static char * -_char_copy_n_strip(char *original, char *temp, int nc) -{ - if (nc > SMALL_STRING) { - temp = malloc(nc); - if (!temp) { - PyErr_NoMemory(); - return NULL; - } - } - memcpy(temp, original, nc); - _rstripw(temp, nc); - return temp; -} - -static void -_char_release(char *ptr, int nc) -{ - if (nc > SMALL_STRING) { - free(ptr); - } -} - -static char * -_uni_copy_n_strip(char *original, char *temp, int nc) -{ - if (nc*sizeof(PyArray_UCS4) > SMALL_STRING) { - temp = malloc(nc*sizeof(PyArray_UCS4)); - if (!temp) { - PyErr_NoMemory(); - return NULL; - } - } - memcpy(temp, original, nc*sizeof(PyArray_UCS4)); - _unistripw((PyArray_UCS4 *)temp, nc); - return temp; -} - -static void -_uni_release(char *ptr, int nc) -{ - if (nc*sizeof(PyArray_UCS4) > SMALL_STRING) { - free(ptr); - } -} - - -/* End borrowed from numarray */ - -#define _rstrip_loop(CMP) { \ - void *aptr, *bptr; \ - char atemp[SMALL_STRING], btemp[SMALL_STRING]; \ - while(size--) { \ - aptr = stripfunc(iself->dataptr, atemp, N1); \ - if (!aptr) return -1; \ - bptr = stripfunc(iother->dataptr, btemp, N2); \ - if (!bptr) { \ - relfunc(aptr, N1); \ - return -1; \ - } \ - val = compfunc(aptr, bptr, N1, N2); \ - *dptr = (val CMP 0); \ - PyArray_ITER_NEXT(iself); \ - PyArray_ITER_NEXT(iother); \ - dptr += 1; \ - relfunc(aptr, N1); \ - relfunc(bptr, N2); \ - } \ - } - -#define _reg_loop(CMP) { \ - while(size--) { \ - val = compfunc((void *)iself->dataptr, \ - (void *)iother->dataptr, \ - N1, N2); \ - *dptr = (val CMP 0); \ - PyArray_ITER_NEXT(iself); \ - PyArray_ITER_NEXT(iother); \ - dptr += 1; \ - } \ - } - -#define _loop(CMP) if (rstrip) _rstrip_loop(CMP) \ - else _reg_loop(CMP) - -static int -_compare_strings(PyObject *result, PyArrayMultiIterObject *multi, - int cmp_op, void *func, int rstrip) -{ - PyArrayIterObject *iself, *iother; - Bool *dptr; - npy_intp size; - int val; - int N1, N2; - int (*compfunc)(void *, void *, int, int); - void (*relfunc)(char *, int); - char* (*stripfunc)(char *, char *, int); - - compfunc = func; - dptr = (Bool *)PyArray_DATA(result); - iself = multi->iters[0]; - iother = multi->iters[1]; - size = multi->size; - N1 = iself->ao->descr->elsize; - N2 = iother->ao->descr->elsize; - if ((void *)compfunc == (void *)_myunincmp) { - N1 >>= 2; - N2 >>= 2; - stripfunc = _uni_copy_n_strip; - relfunc = _uni_release; - } - else { - stripfunc = _char_copy_n_strip; - relfunc = _char_release; - } - switch (cmp_op) { - case Py_EQ: - _loop(==) - break; - case Py_NE: - _loop(!=) - break; - case Py_LT: - _loop(<) - break; - case Py_LE: - _loop(<=) - break; - case Py_GT: - _loop(>) - break; - case Py_GE: - _loop(>=) - break; - default: - PyErr_SetString(PyExc_RuntimeError, "bad comparison operator"); - return -1; - } - return 0; -} - -#undef _loop -#undef _reg_loop -#undef _rstrip_loop -#undef SMALL_STRING - -NPY_NO_EXPORT PyObject * -_strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op, - int rstrip) -{ - PyObject *result; - PyArrayMultiIterObject *mit; - int val; - - /* Cast arrays to a common type */ - if (self->descr->type_num != other->descr->type_num) { -#if defined(NPY_PY3K) - /* - * Comparison between Bytes and Unicode is not defined in Py3K; - * we follow. - */ - result = Py_NotImplemented; - Py_INCREF(result); - return result; -#else - PyObject *new; - if (self->descr->type_num == PyArray_STRING && - other->descr->type_num == PyArray_UNICODE) { - PyArray_Descr* unicode = PyArray_DescrNew(other->descr); - unicode->elsize = self->descr->elsize << 2; - new = PyArray_FromAny((PyObject *)self, unicode, - 0, 0, 0, NULL); - if (new == NULL) { - return NULL; - } - Py_INCREF(other); - self = (PyArrayObject *)new; - } - else if (self->descr->type_num == PyArray_UNICODE && - other->descr->type_num == PyArray_STRING) { - PyArray_Descr* unicode = PyArray_DescrNew(self->descr); - unicode->elsize = other->descr->elsize << 2; - new = PyArray_FromAny((PyObject *)other, unicode, - 0, 0, 0, NULL); - if (new == NULL) { - return NULL; - } - Py_INCREF(self); - other = (PyArrayObject *)new; - } - else { - PyErr_SetString(PyExc_TypeError, - "invalid string data-types " - "in comparison"); - return NULL; - } -#endif - } - else { - Py_INCREF(self); - Py_INCREF(other); - } - - /* Broad-cast the arrays to a common shape */ - mit = (PyArrayMultiIterObject *)PyArray_MultiIterNew(2, self, other); - Py_DECREF(self); - Py_DECREF(other); - if (mit == NULL) { - return NULL; - } - - result = PyArray_NewFromDescr(&PyArray_Type, - PyArray_DescrFromType(PyArray_BOOL), - mit->nd, - mit->dimensions, - NULL, NULL, 0, - NULL); - if (result == NULL) { - goto finish; - } - - if (self->descr->type_num == PyArray_UNICODE) { - val = _compare_strings(result, mit, cmp_op, _myunincmp, rstrip); - } - else { - val = _compare_strings(result, mit, cmp_op, _mystrncmp, rstrip); - } - - if (val < 0) { - Py_DECREF(result); result = NULL; - } - - finish: - Py_DECREF(mit); - return result; -} - -/* - * VOID-type arrays can only be compared equal and not-equal - * in which case the fields are all compared by extracting the fields - * and testing one at a time... - * equality testing is performed using logical_ands on all the fields. - * in-equality testing is performed using logical_ors on all the fields. - * - * VOID-type arrays without fields are compared for equality by comparing their - * memory at each location directly (using string-code). - */ -static PyObject * -_void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op) -{ - if (!(cmp_op == Py_EQ || cmp_op == Py_NE)) { - PyErr_SetString(PyExc_ValueError, - "Void-arrays can only be compared for equality."); - return NULL; - } - if (PyArray_HASFIELDS(self)) { - PyObject *res = NULL, *temp, *a, *b; - PyObject *key, *value, *temp2; - PyObject *op; - Py_ssize_t pos = 0; - npy_intp result_ndim = PyArray_NDIM(self) > PyArray_NDIM(other) ? - PyArray_NDIM(self) : PyArray_NDIM(other); - - op = (cmp_op == Py_EQ ? n_ops.logical_and : n_ops.logical_or); - while (PyDict_Next(self->descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - a = PyArray_EnsureAnyArray(array_subscript(self, key)); - if (a == NULL) { - Py_XDECREF(res); - return NULL; - } - b = array_subscript(other, key); - if (b == NULL) { - Py_XDECREF(res); - Py_DECREF(a); - return NULL; - } - temp = array_richcompare((PyArrayObject *)a,b,cmp_op); - Py_DECREF(a); - Py_DECREF(b); - if (temp == NULL) { - Py_XDECREF(res); - return NULL; - } - - /* - * If the field type has a non-trivial shape, additional - * dimensions will have been appended to `a` and `b`. - * In that case, reduce them using `op`. - */ - if (PyArray_Check(temp) && PyArray_NDIM(temp) > result_ndim) { - /* If the type was multidimensional, collapse that part to 1-D - */ - if (PyArray_NDIM(temp) != result_ndim+1) { - npy_intp dimensions[NPY_MAXDIMS]; - PyArray_Dims newdims; - - newdims.ptr = dimensions; - newdims.len = result_ndim+1; - memcpy(dimensions, PyArray_DIMS(temp), - sizeof(intp)*result_ndim); - dimensions[result_ndim] = -1; - temp2 = PyArray_Newshape((PyArrayObject *)temp, - &newdims, PyArray_ANYORDER); - if (temp2 == NULL) { - Py_DECREF(temp); - Py_XDECREF(res); - return NULL; - } - Py_DECREF(temp); - temp = temp2; - } - /* Reduce the extra dimension of `temp` using `op` */ - temp2 = PyArray_GenericReduceFunction((PyArrayObject *)temp, - op, result_ndim, - PyArray_BOOL, NULL); - if (temp2 == NULL) { - Py_DECREF(temp); - Py_XDECREF(res); - return NULL; - } - Py_DECREF(temp); - temp = temp2; - } - - if (res == NULL) { - res = temp; - } - else { - temp2 = PyObject_CallFunction(op, "OO", res, temp); - Py_DECREF(temp); - Py_DECREF(res); - if (temp2 == NULL) { - return NULL; - } - res = temp2; - } - } - if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, "No fields found."); - } - return res; - } - else { - /* - * compare as a string. Assumes self and - * other have same descr->type - */ - return _strings_richcompare(self, other, cmp_op, 0); - } -} - -NPY_NO_EXPORT PyObject * -array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) -{ - PyObject *array_other, *result = NULL; - int typenum; - - switch (cmp_op) { - case Py_LT: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.less); - break; - case Py_LE: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.less_equal); - break; - case Py_EQ: - if (other == Py_None) { - Py_INCREF(Py_False); - return Py_False; - } - /* Try to convert other to an array */ - if (!PyArray_Check(other)) { - typenum = self->descr->type_num; - if (typenum != PyArray_OBJECT) { - typenum = PyArray_NOTYPE; - } - array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* - * If not successful, indicate that the items cannot be compared - * this way. - */ - if ((array_other == NULL) || - (array_other == Py_None)) { - Py_XDECREF(array_other); - PyErr_Clear(); - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - } - else { - Py_INCREF(other); - array_other = other; - } - result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.equal); - if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { - int _res; - - _res = PyObject_RichCompareBool - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); - if (_res < 0) { - Py_DECREF(result); - Py_DECREF(array_other); - return NULL; - } - if (_res) { - Py_DECREF(result); - result = _void_compare - (self, - (PyArrayObject *)array_other, - cmp_op); - Py_DECREF(array_other); - } - return result; - } - /* - * If the comparison results in NULL, then the - * two array objects can not be compared together; - * indicate that - */ - Py_DECREF(array_other); - if (result == NULL) { - PyErr_Clear(); - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - break; - case Py_NE: - if (other == Py_None) { - Py_INCREF(Py_True); - return Py_True; - } - /* Try to convert other to an array */ - if (!PyArray_Check(other)) { - typenum = self->descr->type_num; - if (typenum != PyArray_OBJECT) { - typenum = PyArray_NOTYPE; - } - array_other = PyArray_FromObject(other, typenum, 0, 0); - /* - * If not successful, then objects cannot be - * compared this way - */ - if ((array_other == NULL) || (array_other == Py_None)) { - Py_XDECREF(array_other); - PyErr_Clear(); - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - } - else { - Py_INCREF(other); - array_other = other; - } - result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.not_equal); - if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { - int _res; - - _res = PyObject_RichCompareBool( - (PyObject *)self->descr, - (PyObject *) - PyArray_DESCR(array_other), - Py_EQ); - if (_res < 0) { - Py_DECREF(result); - Py_DECREF(array_other); - return NULL; - } - if (_res) { - Py_DECREF(result); - result = _void_compare( - self, - (PyArrayObject *)array_other, - cmp_op); - Py_DECREF(array_other); - } - return result; - } - - Py_DECREF(array_other); - if (result == NULL) { - PyErr_Clear(); - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - break; - case Py_GT: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater); - break; - case Py_GE: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater_equal); - break; - default: - result = Py_NotImplemented; - Py_INCREF(result); - } - if (result == Py_NotImplemented) { - /* Try to handle string comparisons */ - if (self->descr->type_num == PyArray_OBJECT) { - return result; - } - array_other = PyArray_FromObject(other,PyArray_NOTYPE, 0, 0); - if (PyArray_ISSTRING(self) && PyArray_ISSTRING(array_other)) { - Py_DECREF(result); - result = _strings_richcompare(self, (PyArrayObject *) - array_other, cmp_op, 0); - } - Py_DECREF(array_other); - } - return result; -} - -/*NUMPY_API - */ -NPY_NO_EXPORT int -PyArray_ElementStrides(PyObject *arr) -{ - int itemsize = PyArray_ITEMSIZE(arr); - int i, N = PyArray_NDIM(arr); - npy_intp *strides = PyArray_STRIDES(arr); - - for (i = 0; i < N; i++) { - if ((strides[i] % itemsize) != 0) { - return 0; - } - } - return 1; -} - -/* - * This routine checks to see if newstrides (of length nd) will not - * ever be able to walk outside of the memory implied numbytes and offset. - * - * The available memory is assumed to start at -offset and proceed - * to numbytes-offset. The strides are checked to ensure - * that accessing memory using striding will not try to reach beyond - * this memory for any of the axes. - * - * If numbytes is 0 it will be calculated using the dimensions and - * element-size. - * - * This function checks for walking beyond the beginning and right-end - * of the buffer and therefore works for any integer stride (positive - * or negative). - */ - -/*NUMPY_API*/ -NPY_NO_EXPORT Bool -PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp offset, - npy_intp *dims, npy_intp *newstrides) -{ - int i; - npy_intp byte_begin; - npy_intp begin; - npy_intp end; - - if (numbytes == 0) { - numbytes = PyArray_MultiplyList(dims, nd) * elsize; - } - begin = -offset; - end = numbytes - offset - elsize; - for (i = 0; i < nd; i++) { - byte_begin = newstrides[i]*(dims[i] - 1); - if ((byte_begin < begin) || (byte_begin > end)) { - return FALSE; - } - } - return TRUE; -} - - -static PyObject * -array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"shape", "dtype", "buffer", "offset", "strides", - "order", NULL}; - PyArray_Descr *descr = NULL; - int itemsize; - PyArray_Dims dims = {NULL, 0}; - PyArray_Dims strides = {NULL, 0}; - PyArray_Chunk buffer; - longlong offset = 0; - NPY_ORDER order = PyArray_CORDER; - int fortran = 0; - PyArrayObject *ret; - - buffer.ptr = NULL; - /* - * Usually called with shape and type but can also be called with buffer, - * strides, and swapped info For now, let's just use this to create an - * empty, contiguous array of a specific type and shape. - */ - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&LO&O&", - kwlist, PyArray_IntpConverter, - &dims, - PyArray_DescrConverter, - &descr, - PyArray_BufferConverter, - &buffer, - &offset, - &PyArray_IntpConverter, - &strides, - &PyArray_OrderConverter, - &order)) { - goto fail; - } - if (order == PyArray_FORTRANORDER) { - fortran = 1; - } - if (descr == NULL) { - descr = PyArray_DescrFromType(PyArray_DEFAULT); - } - - itemsize = descr->elsize; - if (itemsize == 0) { - PyErr_SetString(PyExc_ValueError, - "data-type with unspecified variable length"); - goto fail; - } - - if (strides.ptr != NULL) { - npy_intp nb, off; - if (strides.len != dims.len) { - PyErr_SetString(PyExc_ValueError, - "strides, if given, must be " \ - "the same length as shape"); - goto fail; - } - - if (buffer.ptr == NULL) { - nb = 0; - off = 0; - } - else { - nb = buffer.len; - off = (npy_intp) offset; - } - - - if (!PyArray_CheckStrides(itemsize, dims.len, - nb, off, - dims.ptr, strides.ptr)) { - PyErr_SetString(PyExc_ValueError, - "strides is incompatible " \ - "with shape of requested " \ - "array and size of buffer"); - goto fail; - } - } - - if (buffer.ptr == NULL) { - ret = (PyArrayObject *) - PyArray_NewFromDescr(subtype, descr, - (int)dims.len, - dims.ptr, - strides.ptr, NULL, fortran, NULL); - if (ret == NULL) { - descr = NULL; - goto fail; - } - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)) { - /* place Py_None in object positions */ - PyArray_FillObjectArray(ret, Py_None); - if (PyErr_Occurred()) { - descr = NULL; - goto fail; - } - } - } - else { - /* buffer given -- use it */ - if (dims.len == 1 && dims.ptr[0] == -1) { - dims.ptr[0] = (buffer.len-(npy_intp)offset) / itemsize; - } - else if ((strides.ptr == NULL) && - (buffer.len < (offset + (((npy_intp)itemsize)* - PyArray_MultiplyList(dims.ptr, - dims.len))))) { - PyErr_SetString(PyExc_TypeError, - "buffer is too small for " \ - "requested array"); - goto fail; - } - /* get writeable and aligned */ - if (fortran) { - buffer.flags |= FORTRAN; - } - ret = (PyArrayObject *)\ - PyArray_NewFromDescr(subtype, descr, - dims.len, dims.ptr, - strides.ptr, - offset + (char *)buffer.ptr, - buffer.flags, NULL); - if (ret == NULL) { - descr = NULL; - goto fail; - } - PyArray_UpdateFlags(ret, UPDATE_ALL); - ret->base = buffer.base; - Py_INCREF(buffer.base); - } - - PyDimMem_FREE(dims.ptr); - if (strides.ptr) { - PyDimMem_FREE(strides.ptr); - } - return (PyObject *)ret; - - fail: - Py_XDECREF(descr); - if (dims.ptr) { - PyDimMem_FREE(dims.ptr); - } - if (strides.ptr) { - PyDimMem_FREE(strides.ptr); - } - return NULL; -} - - -static PyObject * -array_iter(PyArrayObject *arr) -{ - if (arr->nd == 0) { - PyErr_SetString(PyExc_TypeError, - "iteration over a 0-d array"); - return NULL; - } - return PySeqIter_New((PyObject *)arr); -} - -static PyObject * -array_alloc(PyTypeObject *type, Py_ssize_t NPY_UNUSED(nitems)) -{ - PyObject *obj; - /* nitems will always be 0 */ - obj = (PyObject *)_pya_malloc(sizeof(PyArrayObject)); - PyObject_Init(obj, type); - return obj; -} - - -NPY_NO_EXPORT PyTypeObject PyArray_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.ndarray", /* tp_name */ - sizeof(PyArrayObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)array_dealloc, /* tp_dealloc */ - (printfunc)NULL, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - (reprfunc)array_repr, /* tp_repr */ - &array_as_number, /* tp_as_number */ - &array_as_sequence, /* tp_as_sequence */ - &array_as_mapping, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)array_str, /* tp_str */ - (getattrofunc)0, /* tp_getattro */ - (setattrofunc)0, /* tp_setattro */ - &array_as_buffer, /* tp_as_buffer */ - (Py_TPFLAGS_DEFAULT -#if !defined(NPY_PY3K) - | Py_TPFLAGS_CHECKTYPES -#endif -#if (PY_VERSION_HEX >= 0x02060000) && (PY_VERSION_HEX < 0x03000000) - | Py_TPFLAGS_HAVE_NEWBUFFER -#endif - | Py_TPFLAGS_BASETYPE), /* tp_flags */ - 0, /* tp_doc */ - - (traverseproc)0, /* tp_traverse */ - (inquiry)0, /* tp_clear */ - (richcmpfunc)array_richcompare, /* tp_richcompare */ - offsetof(PyArrayObject, weakreflist), /* tp_weaklistoffset */ - (getiterfunc)array_iter, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - array_methods, /* tp_methods */ - 0, /* tp_members */ - array_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - array_alloc, /* tp_alloc */ - (newfunc)array_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; diff --git a/numpy-1.6.2/numpy/core/src/multiarray/arrayobject.h b/numpy-1.6.2/numpy/core/src/multiarray/arrayobject.h deleted file mode 100644 index ec33614357..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/arrayobject.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _NPY_INTERNAL_ARRAYOBJECT_H_ -#define _NPY_INTERNAL_ARRAYOBJECT_H_ - -#ifndef _MULTIARRAYMODULE -#error You should not include this -#endif - -NPY_NO_EXPORT PyObject * -_strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op, - int rstrip); - -NPY_NO_EXPORT PyObject * -array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/arraytypes.c.src b/numpy-1.6.2/numpy/core/src/multiarray/arraytypes.c.src deleted file mode 100644 index de0c7ffcbf..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/arraytypes.c.src +++ /dev/null @@ -1,4233 +0,0 @@ -/* -*- c -*- */ -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "datetime.h" -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "numpy/npy_3kcompat.h" - -#include "numpy/npy_math.h" -#include "numpy/halffloat.h" - -#include "common.h" -#include "ctors.h" -#include "usertypes.h" -#include "npy_config.h" -#include "_datetime.h" - -#include "numpyos.h" - - -/* - ***************************************************************************** - ** PYTHON TYPES TO C TYPES ** - ***************************************************************************** - */ - -static double -MyPyFloat_AsDouble(PyObject *obj) -{ - double ret = 0; - PyObject *num; - - if (obj == Py_None) { - return NPY_NAN; - } - num = PyNumber_Float(obj); - if (num == NULL) { - return NPY_NAN; - } - ret = PyFloat_AsDouble(num); - Py_DECREF(num); - return ret; -} - -static npy_half -MyPyFloat_AsHalf(PyObject *obj) -{ - return npy_double_to_half(MyPyFloat_AsDouble(obj)); -} - -static PyObject * -MyPyFloat_FromHalf(npy_half h) -{ - return PyFloat_FromDouble(npy_half_to_double(h)); -} - - -/**begin repeat - * #type = long, longlong# - * #Type = Long, LongLong# - */ -static @type@ -MyPyLong_As@Type@ (PyObject *obj) -{ - @type@ ret; - PyObject *num = PyNumber_Long(obj); - - if (num == NULL) { - return -1; - } - ret = PyLong_As@Type@(num); - Py_DECREF(num); - return ret; -} - -static u@type@ -MyPyLong_AsUnsigned@Type@ (PyObject *obj) -{ - u@type@ ret; - PyObject *num = PyNumber_Long(obj); - - if (num == NULL) { - return -1; - } - ret = PyLong_AsUnsigned@Type@(num); - if (PyErr_Occurred()) { - PyErr_Clear(); - ret = PyLong_As@Type@(num); - } - Py_DECREF(num); - return ret; -} - -/**end repeat**/ - - -/* - ***************************************************************************** - ** GETITEM AND SETITEM ** - ***************************************************************************** - */ - - -static char * _SEQUENCE_MESSAGE = "error setting an array element with a sequence"; - -/**begin repeat - * - * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, LONG, UINT, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE# - * #func1 = PyBool_FromLong, PyInt_FromLong*6, PyLong_FromUnsignedLong*2, - * PyLong_FromLongLong, PyLong_FromUnsignedLongLong, - * MyPyFloat_FromHalf, PyFloat_FromDouble*2# - * #func2 = PyObject_IsTrue, MyPyLong_AsLong*6, MyPyLong_AsUnsignedLong*2, - * MyPyLong_AsLongLong, MyPyLong_AsUnsignedLongLong, - * MyPyFloat_AsHalf, MyPyFloat_AsDouble*2# - * #type = Bool, byte, ubyte, short, ushort, int, long, uint, ulong, - * longlong, ulonglong, npy_half, float, double# - * #type1 = long*7, ulong*2, longlong, ulonglong, npy_half, float, double# - * #kind = Bool, Byte, UByte, Short, UShort, Int, Long, UInt, ULong, - * LongLong, ULongLong, Half, Float, Double# -*/ -static PyObject * -@TYPE@_getitem(char *ip, PyArrayObject *ap) { - @type@ t1; - - if ((ap == NULL) || PyArray_ISBEHAVED_RO(ap)) { - t1 = *((@type@ *)ip); - return @func1@((@type1@)t1); - } - else { - ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), ap); - return @func1@((@type1@)t1); - } -} - -static int -@TYPE@_setitem(PyObject *op, char *ov, PyArrayObject *ap) { - @type@ temp; /* ensures alignment */ - - if (PyArray_IsScalar(op, @kind@)) { - temp = ((Py@kind@ScalarObject *)op)->obval; - } - else { - temp = (@type@)@func2@(op); - } - if (PyErr_Occurred()) { - if (PySequence_Check(op) && !PyString_Check(op) && - !PyUnicode_Check(op)) { - PyErr_Clear(); - PyErr_SetString(PyExc_ValueError, - "setting an array element with a sequence."); - } - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) - *((@type@ *)ov)=temp; - else { - ap->descr->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap); - } - return 0; -} - -/**end repeat**/ - -/**begin repeat - * - * #TYPE = CFLOAT, CDOUBLE# - * #type = float, double# - */ -static PyObject * -@TYPE@_getitem(char *ip, PyArrayObject *ap) { - @type@ t1, t2; - - if ((ap == NULL) || PyArray_ISBEHAVED_RO(ap)) { - return PyComplex_FromDoubles((double)((@type@ *)ip)[0], - (double)((@type@ *)ip)[1]); - } - else { - int size = sizeof(@type@); - Bool swap = !PyArray_ISNOTSWAPPED(ap); - copy_and_swap(&t1, ip, size, 1, 0, swap); - copy_and_swap(&t2, ip + size, size, 1, 0, swap); - return PyComplex_FromDoubles((double)t1, (double)t2); - } -} - -/**end repeat**/ - - - -/**begin repeat - * - * #TYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #type = float, double, longdouble# - * #kind = CFloat, CDouble, CLongDouble# - */ -static int -@TYPE@_setitem(PyObject *op, char *ov, PyArrayObject *ap) -{ - Py_complex oop; - PyObject *op2; - c@type@ temp; - int rsize; - - if (!(PyArray_IsScalar(op, @kind@))) { - if (PyArray_Check(op) && (PyArray_NDIM(op) == 0)) { - op2 = ((PyArrayObject *)op)->descr->f->getitem - (((PyArrayObject *)op)->data, (PyArrayObject *)op); - } - else { - op2 = op; Py_INCREF(op); - } - if (op2 == Py_None) { - oop.real = NPY_NAN; - oop.imag = NPY_NAN; - } - else { - oop = PyComplex_AsCComplex (op2); - } - Py_DECREF(op2); - if (PyErr_Occurred()) { - return -1; - } - temp.real = (@type@) oop.real; - temp.imag = (@type@) oop.imag; - } - else { - temp = ((Py@kind@ScalarObject *)op)->obval; - } - memcpy(ov, &temp, ap->descr->elsize); - if (!PyArray_ISNOTSWAPPED(ap)) { - byte_swap_vector(ov, 2, sizeof(@type@)); - } - rsize = sizeof(@type@); - copy_and_swap(ov, &temp, rsize, 2, rsize, !PyArray_ISNOTSWAPPED(ap)); - return 0; -} - -/**end repeat**/ - -/* - * These return array scalars which are different than other date-types. - */ - -static PyObject * -LONGDOUBLE_getitem(char *ip, PyArrayObject *ap) -{ - return PyArray_Scalar(ip, ap->descr, NULL); -} - -static int -LONGDOUBLE_setitem(PyObject *op, char *ov, PyArrayObject *ap) { - /* ensure alignment */ - longdouble temp; - - if (PyArray_IsScalar(op, LongDouble)) { - temp = ((PyLongDoubleScalarObject *)op)->obval; - } - else { - temp = (longdouble) MyPyFloat_AsDouble(op); - } - if (PyErr_Occurred()) { - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((longdouble *)ov) = temp; - } - else { - copy_and_swap(ov, &temp, ap->descr->elsize, 1, 0, - !PyArray_ISNOTSWAPPED(ap)); - } - return 0; -} - -static PyObject * -CLONGDOUBLE_getitem(char *ip, PyArrayObject *ap) -{ - return PyArray_Scalar(ip, ap->descr, NULL); -} - -/* UNICODE */ -static PyObject * -UNICODE_getitem(char *ip, PyArrayObject *ap) -{ - Py_ssize_t size = PyArray_ITEMSIZE(ap); - int swap = !PyArray_ISNOTSWAPPED(ap); - int align = !PyArray_ISALIGNED(ap); - - return (PyObject *)PyUnicode_FromUCS4(ip, size, swap, align); -} - -static int -UNICODE_setitem(PyObject *op, char *ov, PyArrayObject *ap) -{ - PyObject *temp; - Py_UNICODE *ptr; - int datalen; -#ifndef Py_UNICODE_WIDE - char *buffer; -#endif - - if (!PyBytes_Check(op) && !PyUnicode_Check(op) && - PySequence_Check(op) && PySequence_Size(op) > 0) { - PyErr_SetString(PyExc_ValueError, - "setting an array element with a sequence"); - return -1; - } - /* Sequence_Size might have returned an error */ - if (PyErr_Occurred()) { - PyErr_Clear(); - } -#if defined(NPY_PY3K) - if (PyBytes_Check(op)) { - /* Try to decode from ASCII */ - temp = PyUnicode_FromEncodedObject(op, "ASCII", "strict"); - if (temp == NULL) { - return -1; - } - } - else if ((temp=PyObject_Str(op)) == NULL) { -#else - if ((temp=PyObject_Unicode(op)) == NULL) { -#endif - return -1; - } - ptr = PyUnicode_AS_UNICODE(temp); - if ((ptr == NULL) || (PyErr_Occurred())) { - Py_DECREF(temp); - return -1; - } - datalen = PyUnicode_GET_DATA_SIZE(temp); - -#ifdef Py_UNICODE_WIDE - memcpy(ov, ptr, MIN(ap->descr->elsize, datalen)); -#else - if (!PyArray_ISALIGNED(ap)) { - buffer = _pya_malloc(ap->descr->elsize); - if (buffer == NULL) { - Py_DECREF(temp); - PyErr_NoMemory(); - return -1; - } - } - else { - buffer = ov; - } - datalen = PyUCS2Buffer_AsUCS4(ptr, (PyArray_UCS4 *)buffer, - datalen >> 1, ap->descr->elsize >> 2); - datalen <<= 2; - if (!PyArray_ISALIGNED(ap)) { - memcpy(ov, buffer, datalen); - _pya_free(buffer); - } -#endif - /* Fill in the rest of the space with 0 */ - if (ap->descr->elsize > datalen) { - memset(ov + datalen, 0, (ap->descr->elsize - datalen)); - } - if (!PyArray_ISNOTSWAPPED(ap)) { - byte_swap_vector(ov, ap->descr->elsize >> 2, 4); - } - Py_DECREF(temp); - return 0; -} - -/* STRING - * - * can handle both NULL-terminated and not NULL-terminated cases - * will truncate all ending NULLs in returned string. - */ -static PyObject * -STRING_getitem(char *ip, PyArrayObject *ap) -{ - /* Will eliminate NULLs at the end */ - char *ptr; - int size = ap->descr->elsize; - - ptr = ip + size - 1; - while (*ptr-- == '\0' && size > 0) { - size--; - } - return PyBytes_FromStringAndSize(ip,size); -} - -static int -STRING_setitem(PyObject *op, char *ov, PyArrayObject *ap) -{ - char *ptr; - Py_ssize_t len; - PyObject *temp = NULL; - - /* Handle case of assigning from an array scalar */ - if (PyArray_Check(op) && PyArray_NDIM(op) == 0) { - temp = PyArray_ToScalar(PyArray_DATA(op), op); - if (temp == NULL) { - return -1; - } - else { - int res = STRING_setitem(temp, ov, ap); - Py_DECREF(temp); - return res; - } - } - - if (!PyBytes_Check(op) && !PyUnicode_Check(op) - && PySequence_Check(op) && PySequence_Size(op) != 0) { - PyErr_SetString(PyExc_ValueError, - "cannot set an array element with a sequence"); - return -1; - } -#if defined(NPY_PY3K) - if (PyUnicode_Check(op)) { - /* Assume ASCII codec -- function similarly as Python 2 */ - temp = PyUnicode_AsASCIIString(op); - if (temp == NULL) return -1; - } - else if (PyBytes_Check(op) || PyMemoryView_Check(op)) { - temp = PyObject_Bytes(op); - if (temp == NULL) { - return -1; - } - } - else { - /* Emulate similar casting behavior as on Python 2 */ - PyObject *str; - str = PyObject_Str(op); - if (str == NULL) { - return -1; - } - temp = PyUnicode_AsASCIIString(str); - Py_DECREF(str); - if (temp == NULL) { - return -1; - } - } -#else - if ((temp = PyObject_Str(op)) == NULL) { - return -1; - } -#endif - if (PyBytes_AsStringAndSize(temp, &ptr, &len) == -1) { - Py_DECREF(temp); - return -1; - } - memcpy(ov, ptr, MIN(ap->descr->elsize,len)); - /* - * If string lenth is smaller than room in array - * Then fill the rest of the element size with NULL - */ - if (ap->descr->elsize > len) { - memset(ov + len, 0, (ap->descr->elsize - len)); - } - Py_DECREF(temp); - return 0; -} - -/* OBJECT */ - -#define __ALIGNED(obj, sz) ((((size_t) obj) % (sz))==0) - -static PyObject * -OBJECT_getitem(char *ip, PyArrayObject *ap) -{ - PyObject *obj; - NPY_COPY_PYOBJECT_PTR(&obj, ip); - if (obj == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - else { - Py_INCREF(obj); - return obj; - } -} - - -static int -OBJECT_setitem(PyObject *op, char *ov, PyArrayObject *ap) -{ - PyObject *obj; - - NPY_COPY_PYOBJECT_PTR(&obj, ov); - Py_XDECREF(obj); - - Py_INCREF(op); - NPY_COPY_PYOBJECT_PTR(ov, &op); - - return PyErr_Occurred() ? -1 : 0; -} - -/* VOID */ - -static PyObject * -VOID_getitem(char *ip, PyArrayObject *ap) -{ - PyObject *u = NULL; - PyArray_Descr* descr; - int itemsize; - - descr = ap->descr; - if (descr->names != NULL) { - PyObject *key; - PyObject *names; - int i, n; - PyObject *ret; - PyObject *tup, *title; - PyArray_Descr *new; - int offset; - int savedflags; - - /* get the names from the fields dictionary*/ - names = descr->names; - n = PyTuple_GET_SIZE(names); - ret = PyTuple_New(n); - savedflags = ap->flags; - for (i = 0; i < n; i++) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, &title)) { - Py_DECREF(ret); - ap->descr = descr; - return NULL; - } - ap->descr = new; - /* update alignment based on offset */ - if ((new->alignment > 1) - && ((((intp)(ip+offset)) % new->alignment) != 0)) { - ap->flags &= ~ALIGNED; - } - else { - ap->flags |= ALIGNED; - } - PyTuple_SET_ITEM(ret, i, new->f->getitem(ip+offset, ap)); - ap->flags = savedflags; - } - ap->descr = descr; - return ret; - } - - if (descr->subarray) { - /* return an array of the basic type */ - PyArray_Dims shape = {NULL, -1}; - PyObject *ret; - - if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { - PyDimMem_FREE(shape.ptr); - PyErr_SetString(PyExc_ValueError, - "invalid shape in fixed-type tuple."); - return NULL; - } - Py_INCREF(descr->subarray->base); - ret = PyArray_NewFromDescr(&PyArray_Type, - descr->subarray->base, shape.len, shape.ptr, - NULL, ip, ap->flags&(~NPY_F_CONTIGUOUS), NULL); - PyDimMem_FREE(shape.ptr); - if (!ret) { - return NULL; - } - PyArray_BASE(ret) = (PyObject *)ap; - Py_INCREF(ap); - PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); - return ret; - } - - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) - || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_ValueError, - "tried to get void-array with object members as buffer."); - return NULL; - } - itemsize = ap->descr->elsize; -#if defined(NPY_PY3K) - /* - * Return a byte array; there are no plain buffer objects on Py3 - */ - { - intp dims[1], strides[1]; - PyArray_Descr *descr; - dims[0] = itemsize; - strides[0] = 1; - descr = PyArray_DescrNewFromType(PyArray_BYTE); - u = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, strides, - ip, - PyArray_ISWRITEABLE(ap) ? NPY_WRITEABLE : 0, - NULL); - ((PyArrayObject*)u)->base = ap; - Py_INCREF(ap); - } -#else - /* - * default is to return buffer object pointing to - * current item a view of it - */ - if (PyArray_ISWRITEABLE(ap)) { - u = PyBuffer_FromReadWriteMemory(ip, itemsize); - } - else { - u = PyBuffer_FromMemory(ip, itemsize); - } -#endif - if (u == NULL) { - goto fail; - } - return u; - -fail: - return NULL; -} - - -NPY_NO_EXPORT int PyArray_CopyObject(PyArrayObject *, PyObject *); - -static int -VOID_setitem(PyObject *op, char *ip, PyArrayObject *ap) -{ - PyArray_Descr* descr; - int itemsize=ap->descr->elsize; - int res; - - descr = ap->descr; - if (descr->names && PyTuple_Check(op)) { - PyObject *key; - PyObject *names; - int i, n; - PyObject *tup, *title; - PyArray_Descr *new; - int offset; - int savedflags; - - res = -1; - /* get the names from the fields dictionary*/ - names = descr->names; - n = PyTuple_GET_SIZE(names); - if (PyTuple_GET_SIZE(op) != n) { - PyErr_SetString(PyExc_ValueError, - "size of tuple must match number of fields."); - return -1; - } - savedflags = ap->flags; - for (i = 0; i < n; i++) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, &title)) { - ap->descr = descr; - return -1; - } - ap->descr = new; - /* remember to update alignment flags */ - if ((new->alignment > 1) - && ((((intp)(ip+offset)) % new->alignment) != 0)) { - ap->flags &= ~ALIGNED; - } - else { - ap->flags |= ALIGNED; - } - res = new->f->setitem(PyTuple_GET_ITEM(op, i), ip+offset, ap); - ap->flags = savedflags; - if (res < 0) { - break; - } - } - ap->descr = descr; - return res; - } - - if (descr->subarray) { - /* copy into an array of the same basic type */ - PyArray_Dims shape = {NULL, -1}; - PyObject *ret; - if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { - PyDimMem_FREE(shape.ptr); - PyErr_SetString(PyExc_ValueError, - "invalid shape in fixed-type tuple."); - return -1; - } - Py_INCREF(descr->subarray->base); - ret = PyArray_NewFromDescr(&PyArray_Type, - descr->subarray->base, shape.len, shape.ptr, - NULL, ip, ap->flags, NULL); - PyDimMem_FREE(shape.ptr); - if (!ret) { - return -1; - } - PyArray_BASE(ret) = (PyObject *)ap; - Py_INCREF(ap); - PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); - res = PyArray_CopyObject((PyArrayObject *)ret, op); - Py_DECREF(ret); - return res; - } - - /* Default is to use buffer interface to set item */ - { - const void *buffer; - Py_ssize_t buflen; - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) - || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_ValueError, - "Setting void-array with object members using buffer."); - return -1; - } - res = PyObject_AsReadBuffer(op, &buffer, &buflen); - if (res == -1) { - goto fail; - } - memcpy(ip, buffer, NPY_MIN(buflen, itemsize)); - if (itemsize > buflen) { - memset(ip + buflen, 0, itemsize - buflen); - } - } - return 0; - -fail: - return -1; -} - -/* - * Acknowledgement: Example code contributed by Marty Fuhr sponsored by - * Google Summer of Code 2009 was used to integrate and adapt the mxDateTime - * parser - */ - -/* #include "datetime.c" --- now included in multiarray_onefile */ - - -/* DateTime Objects in Python only keep microsecond resolution. - * - * When converting from datetime objects with an event component return a - * tuple: * (baseunit, number of event) where baseunit follows is a datetime - * type and number of events is a Python integer - */ - - -/* - * Return a Python Datetime Object from a number representing the number of - * units since the epoch (1970-01-01T00:00:00Z) ignoring leap seconds. - */ - -NPY_NO_EXPORT PyObject * -PyDateTime_FromNormalized(npy_datetime val, NPY_DATETIMEUNIT base) -{ - npy_datetimestruct ydate; - - /* Must be here to use PyDateTime_FromDateAndTime */ - PyDateTime_IMPORT; - - /* We just truncate the unused variables and don't wory about overflow */ - PyArray_DatetimeToDatetimeStruct(val, base, &ydate); - - /* FIXME?: We discard ydate.ns, ydate.ps, ydate.fs, and ydate.as */ - return PyDateTime_FromDateAndTime(ydate.year, ydate.month, ydate.day, - ydate.hour, ydate.min, ydate.sec, - ydate.us); -} - -/* - * We also can lose precision and range here. Ignored. - * Don't use this function if you care. - */ - -NPY_NO_EXPORT PyObject * -PyTimeDelta_FromNormalized(npy_timedelta val, NPY_DATETIMEUNIT base) -{ - npy_timedeltastruct td; - - PyDateTime_IMPORT; - PyArray_TimedeltaToTimedeltaStruct(val, base, &td); - - /* We discard td.ps and td.as */ - return PyDelta_FromDSU(td.day, td.sec, td.us); -} - - -NPY_NO_EXPORT PyObject * -PyDateTime_FromInt64(datetime val, PyArray_Descr *descr) -{ - PyArray_DatetimeMetaData *meta; - - meta = PyDataType_GetDatetimeMetaData(descr); - if (meta == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "metadata not set for descriptor"); - return NULL; - } - - if (meta->events > 1) { - int events, rem, div; - PyObject *obj; - - obj = PyTuple_New(2); - events = meta->events; - div = val/events; - rem = val % events; - PyTuple_SET_ITEM(obj, 1, PyInt_FromLong(rem)); - /* This resets meta->events for recursive call */ - meta->events = 1; - PyTuple_SET_ITEM(obj, 0, PyDateTime_FromInt64(div, descr)); - meta->events = events; - if (PyErr_Occurred()) { - Py_DECREF(obj); - return NULL; - } - return obj; - } - - /* - * We normalize the number to a base-unit and then return a - * Python Datetime Object - * - * FIXME? : We silently truncate if it doesn't fit, either too - * wide (e.g. 10 BC) or too narrow (nanoseconds) - */ - - /* Normalization and then conversion to Datetime */ - /* FIXME? : Check for Overflow... */ - return PyDateTime_FromNormalized(val*meta->num, meta->base); -} - - -NPY_NO_EXPORT PyObject * -PyTimeDelta_FromInt64(timedelta val, PyArray_Descr *descr) -{ - PyArray_DatetimeMetaData *meta; - meta = PyDataType_GetDatetimeMetaData(descr); - if (meta == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "metadata not set for descriptor"); - return NULL; - } - - if (meta->events > 1) { - int events, rem, div; - PyObject *obj; - - obj = PyTuple_New(2); - events = meta->events; - div = val/events; - rem = val % events; - PyTuple_SET_ITEM(obj, 1, PyInt_FromLong(rem)); - /* This resets meta->events for recursive call */ - meta->events = 1; - PyTuple_SET_ITEM(obj, 0, PyTimeDelta_FromInt64(div, descr)); - meta->events = events; - if (PyErr_Occurred()) { - Py_DECREF(obj); - return NULL; - } - return obj; - } - - /* FIXME? : Check for Overflow */ - return PyTimeDelta_FromNormalized(val*meta->num, meta->base); -} - - - -NPY_NO_EXPORT npy_datetime -PyDateTime_AsNormalized(PyObject *obj, NPY_DATETIMEUNIT base) -{ - npy_datetimestruct ydate; - - /* Must be here to use PyDateTime_FromDateAndTime */ - PyDateTime_IMPORT; - - if (!PyDateTime_Check(obj) && !PyDate_Check(obj)) { - PyErr_SetString(PyExc_ValueError, - "Must be a datetime.date or datetime.datetime object"); - return -1; - } - - ydate.year = PyDateTime_GET_YEAR(obj); - ydate.month = PyDateTime_GET_MONTH(obj); - ydate.day = PyDateTime_GET_DAY(obj); - - if (PyDateTime_Check(obj)) { - ydate.hour = PyDateTime_DATE_GET_HOUR(obj); - ydate.min = PyDateTime_DATE_GET_MINUTE(obj); - ydate.sec = PyDateTime_DATE_GET_SECOND(obj); - ydate.us = PyDateTime_DATE_GET_MICROSECOND(obj); - } - else { - ydate.hour = 0; - ydate.min = 0; - ydate.sec = 0; - ydate.us = 0; - } - - ydate.ps = 0; - ydate.as = 0; - - /* We just truncate the unused variables and don't wory about overflow */ - return PyArray_DatetimeStructToDatetime(base, &ydate); -} - -NPY_NO_EXPORT npy_timedelta -PyTimeDelta_AsNormalized(PyObject *obj, NPY_DATETIMEUNIT base) -{ - npy_timedeltastruct td; - - PyDateTime_IMPORT; - - if (!PyDelta_Check(obj)) { - PyErr_SetString(PyExc_ValueError, - "Must be a datetime.timedelta object"); - return -1; - } - - td.day = ((PyDateTime_Delta *)obj)->days; - td.sec = ((PyDateTime_Delta *)obj)->seconds; - td.us = ((PyDateTime_Delta *)obj)->microseconds; - td.ps = 0; - td.as = 0; - - return PyArray_TimedeltaStructToTimedelta(base, &td); -} - - -/* - * These expect a 2-tuple if meta->events > 1 (baseobj, num-counts) - * where baseobj is a datetime object or a timedelta object respectively. - * - */ - -NPY_NO_EXPORT npy_datetime -PyDateTime_AsInt64(PyObject *obj, PyArray_Descr *descr) -{ - PyArray_DatetimeMetaData *meta; - npy_datetime res; - - meta = PyDataType_GetDatetimeMetaData(descr); - if (meta == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "metadata not set for descriptor"); - return -1; - } - - - if (meta->events > 1) { - datetime tmp; - int events; - - if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 2) { - PyErr_SetString(PyExc_ValueError, - "need a 2-tuple on setting if events > 1"); - return -1; - } - /* Alter the dictionary and call again */ - /* FIXME: not thread safe */ - events = meta->events; - meta->events = 1; - tmp = PyDateTime_AsInt64(PyTuple_GET_ITEM(obj, 0), descr); - meta->events = events; - if (PyErr_Occurred()) { - return -1; - } - /* FIXME: Check for overflow */ - tmp *= events; - tmp += MyPyLong_AsLongLong(PyTuple_GET_ITEM(obj, 1)); - if (PyErr_Occurred()) { - return -1; - } - return tmp; - } - - res = PyDateTime_AsNormalized(obj, meta->base); - return res/meta->num; -} - - -NPY_NO_EXPORT timedelta -PyTimeDelta_AsInt64(PyObject *obj, PyArray_Descr *descr) -{ - PyArray_DatetimeMetaData *meta; - npy_timedelta res; - - meta = PyDataType_GetDatetimeMetaData(descr); - if (meta == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "metadata not set for descriptor"); - return -1; - } - - if (meta->events > 1) { - timedelta tmp; - int events; - - if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 2) { - PyErr_SetString(PyExc_ValueError, - "need a 2-tuple on setting if events > 1"); - return -1; - } - /* Alter the dictionary and call again (not thread safe) */ - events = meta->events; - meta->events = 1; - tmp = PyTimeDelta_AsInt64(PyTuple_GET_ITEM(obj, 0), descr); - meta->events = events; - if (PyErr_Occurred()) { - return -1; - } - /* FIXME: Check for overflow */ - tmp *= events; - tmp += MyPyLong_AsLongLong(PyTuple_GET_ITEM(obj, 1)); - if (PyErr_Occurred()) { - return -1; - } - return tmp; - } - - res = PyTimeDelta_AsNormalized(obj, meta->base); - return res / meta->num; -} - - -/* - * Always return DateTime Object after normalizing to basic units (or a tuple - * if meta->events > 1): - * - * Problem: DateTime does not support all the resolutions (ns) nor the - * dynamic range (pre 1 AD) of NumPy Date-times. - * - * getitem is not used that much --- if losing resolution hurts, stick - * with the array scalar versions of the date-time. - * - * considered returning array scalars here just like longdouble. This has the - * problem of recursion in some cases (because in a few places the code - * expects getitem to return a Python-system object) - * - * considered returning different things depending on the resolution but this - * would make it hard to write generic code --- but do you need to write - * generic code on all the frequencies because they cover a wide range. - * - * Solution: The use-case of actually wanting a date-time object when the - * resolution and dynamic range match, make it the compelling default. When it - * does fails, there are alternatives for the programmer to use. - * - * New question: Should we change (c)longdouble at this point? to return Python Float? - */ - -static PyObject * -DATETIME_getitem(char *ip, PyArrayObject *ap) { - datetime t1; - - if ((ap == NULL) || PyArray_ISBEHAVED_RO(ap)) { - t1 = *((datetime *)ip); - return PyDateTime_FromInt64((datetime)t1, ap->descr); - } - else { - ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), ap); - return PyDateTime_FromInt64((datetime)t1, ap->descr); - } -} - - -static PyObject * -TIMEDELTA_getitem(char *ip, PyArrayObject *ap) { - timedelta t1; - - if ((ap == NULL) || PyArray_ISBEHAVED_RO(ap)) { - t1 = *((timedelta *)ip); - return PyTimeDelta_FromInt64((timedelta)t1, ap->descr); - } - else { - ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), ap); - return PyTimeDelta_FromInt64((timedelta)t1, ap->descr); - } -} - -/* FIXME: - * This needs to take - * 1) Integers and Longs (anything that can be converted to an Int) - * 2) Strings (ISO-style dates) - * 3) Datetime Scalars (that it converts based on scalar dtype. - * 4) Datetime and Date objects - * Plus a tuple for meta->events > 1 - * - * 3) is partially implemented, 4) is implemented - */ - -static int -DATETIME_setitem(PyObject *op, char *ov, PyArrayObject *ap) { - /* ensure alignment */ - datetime temp; - - if (PyArray_IsScalar(op, Datetime)) { - /* This needs to convert based on type */ - temp = ((PyDatetimeScalarObject *)op)->obval; - } -#if defined(NPY_PY3K) - else if (PyUString_Check(op)) { -#else - else if (PyUString_Check(op) || PyUnicode_Check(op)) { -#endif - /* FIXME: Converts to DateTime first and therefore does not handle extended notation */ - /* import _mx_datetime_parser - * res = _mx_datetime_parser(name) - * Convert from datetime to Int - */ - PyObject *res, *module; - - module = PyImport_ImportModule("numpy.core._mx_datetime_parser"); - if (module == NULL) { return -1; } - res = PyObject_CallMethod(module, "datetime_from_string", "O", op); - Py_DECREF(module); - if (res == NULL) { return -1; } - temp = PyDateTime_AsInt64(res, ap->descr); - Py_DECREF(res); - if (PyErr_Occurred()) return -1; - } - else if (PyInt_Check(op)) { - temp = PyInt_AS_LONG(op); - } - else if (PyLong_Check(op)) { - temp = PyLong_AsLongLong(op); - } - else { - temp = PyDateTime_AsInt64(op, ap->descr); - } - if (PyErr_Occurred()) { - if (PySequence_Check(op)) { - PyErr_Clear(); - PyErr_SetString(PyExc_ValueError, _SEQUENCE_MESSAGE); - } - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) - *((datetime *)ov)=temp; - else { - ap->descr->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap); - } - return 0; -} - -/* FIXME: This needs to take - * 1) Integers and Longs (anything that can be converted to an Int) - * 2) Timedelta scalar objects (with resolution conversion) - * 3) Python Timedelta objects - * - * Plus a tuple for meta->events > 1 - */ - -static int -TIMEDELTA_setitem(PyObject *op, char *ov, PyArrayObject *ap) { - /* ensure alignment */ - timedelta temp; - - if (PyArray_IsScalar(op, Timedelta)) { - temp = ((PyTimedeltaScalarObject *)op)->obval; - } - else if (PyInt_Check(op)) { - temp = PyInt_AS_LONG(op); - } - else if (PyLong_Check(op)) { - temp = PyLong_AsLongLong(op); - } - else { - temp = PyTimeDelta_AsInt64(op, ap->descr); - } - if (PyErr_Occurred()) { - if (PySequence_Check(op)) { - PyErr_Clear(); - PyErr_SetString(PyExc_ValueError, _SEQUENCE_MESSAGE); - } - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) - *((timedelta *)ov)=temp; - else { - ap->descr->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap); - } - return 0; -} - - -/* - ***************************************************************************** - ** TYPE TO TYPE CONVERSIONS ** - ***************************************************************************** - */ - - -/* Assumes contiguous, and aligned, from and to */ - - -/**begin repeat - * - * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, DATETIME, - * TIMEDELTA# - * #totype = byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, float, double, longdouble, datetime, - * timedelta# - */ - -/**begin repeat1 - * - * #FROMTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, DATETIME, - * TIMEDELTA# - * #fromtype = byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, float, double, longdouble, datetime, - * timedelta# - */ -static void -@FROMTYPE@_to_@TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = (@totype@)*ip++; - } -} -/**end repeat1**/ - -/**begin repeat1 - * - * #FROMTYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #fromtype = float, double, longdouble# - */ -static void -@FROMTYPE@_to_@TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = (@totype@)*ip; - ip += 2; - } -} -/**end repeat1**/ - -/**end repeat**/ - - -/**begin repeat - * - * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, LONGDOUBLE, DATETIME, - * TIMEDELTA# - * #type = byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, longdouble, datetime, - * timedelta# - */ - -static void -@TYPE@_to_HALF(@type@ *ip, npy_half *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = npy_float_to_half((float)(*ip++)); - } -} - -static void -HALF_to_@TYPE@(npy_half *ip, @type@ *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = (@type@)npy_half_to_float(*ip++); - } -} - -/**end repeat**/ -#if SIZEOF_SHORT == 2 -#define HALF_to_HALF SHORT_to_SHORT -#elif SIZEOF_INT == 2 -#define HALF_to_HALF INT_to_INT -#endif - -/**begin repeat - * - * #TYPE = FLOAT, DOUBLE, CFLOAT, CDOUBLE# - * #type = float, double, float, double# - * #itype = npy_uint32, npy_uint64, npy_uint32, npy_uint64# - * #iscomplex = 0, 0, 1, 1# - */ - -static void -@TYPE@_to_HALF(@itype@ *ip, npy_half *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = npy_@type@bits_to_halfbits(*ip); -#if @iscomplex@ - ip += 2; -#else - ip++; -#endif - } -} - -static void -HALF_to_@TYPE@(npy_half *ip, @itype@ *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = npy_halfbits_to_@type@bits(*ip++); -#if @iscomplex@ - *op++ = 0; -#endif - } -} - -/**end repeat**/ - -static void -CLONGDOUBLE_to_HALF(longdouble *ip, npy_half *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = npy_double_to_half((double) (*ip++)); - ip += 2; - } -} - -static void -HALF_to_CLONGDOUBLE(npy_half *ip, longdouble *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = npy_half_to_double(*ip++); - *op++ = 0; - } -} - -/**begin repeat - * - * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, DATETIME, - * TIMEDELTA# - * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, float, double, longdouble, datetime, - * timedelta# -*/ -static void -@FROMTYPE@_to_BOOL(@fromtype@ *ip, Bool *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = (Bool)(*ip++ != FALSE); - } -} -/**end repeat**/ - -static void -HALF_to_BOOL(npy_half *ip, Bool *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = (Bool)(!npy_half_iszero(*ip++)); - } -} - -/**begin repeat - * - * #FROMTYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #fromtype = cfloat, cdouble, clongdouble# -*/ -static void -@FROMTYPE@_to_BOOL(@fromtype@ *ip, Bool *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op = (Bool)(((*ip).real != FALSE) || ((*ip).imag != FALSE)); - op++; - ip++; - } -} -/**end repeat**/ - -/**begin repeat - * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, DATETIME, - * TIMEDELTA# - * #totype = byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, datetime, - * timedelta# - * #one = 1*10, NPY_HALF_ONE, 1*5# - * #zero = 0*10, NPY_HALF_ZERO, 0*5# - */ -static void -BOOL_to_@TOTYPE@(Bool *ip, @totype@ *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = (@totype@)((*ip++ != FALSE) ? @one@ : @zero@); - } -} -/**end repeat**/ - -/**begin repeat - * - * #TOTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# - * #totype = float, double, longdouble# - */ - -/**begin repeat1 - * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, DATETIME, - * TIMEDELTA# - * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, float, double, longdouble, datetime, - * timedelta# - */ -static void -@FROMTYPE@_to_@TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - while (n--) { - *op++ = (@totype@)*ip++; - *op++ = 0.0; - } - -} -/**end repeat1**/ -/**end repeat**/ - -/**begin repeat - * - * #TOTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# - * #totype = float, double, longdouble# - */ - -/**begin repeat1 - * #FROMTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# - * #fromtype = float, double, longdouble# - */ -static void -@FROMTYPE@_to_@TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, - PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) -{ - n <<= 1; - while (n--) { - *op++ = (@totype@)*ip++; - } -} - -/**end repeat1**/ -/**end repeat**/ - -/**begin repeat - * - * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID, OBJECT, - * DATETIME, TIMEDELTA# - * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * cfloat, cdouble, clongdouble, char, char, char, PyObject *, - * datetime, timedelta# - * #skip = 1*18, aip->descr->elsize*3, 1*3# - */ -static void -@FROMTYPE@_to_OBJECT(@fromtype@ *ip, PyObject **op, intp n, PyArrayObject *aip, - PyArrayObject *NPY_UNUSED(aop)) -{ - intp i; - int skip = @skip@; - for (i = 0; i < n; i++, ip +=skip, op++) { - Py_XDECREF(*op); - *op = @FROMTYPE@_getitem((char *)ip, aip); - } -} -/**end repeat**/ - -#define _NPY_UNUSEDBOOL NPY_UNUSED -#define _NPY_UNUSEDBYTE NPY_UNUSED -#define _NPY_UNUSEDUBYTE NPY_UNUSED -#define _NPY_UNUSEDSHORT NPY_UNUSED -#define _NPY_UNUSEDUSHORT NPY_UNUSED -#define _NPY_UNUSEDINT NPY_UNUSED -#define _NPY_UNUSEDUINT NPY_UNUSED -#define _NPY_UNUSEDLONG NPY_UNUSED -#define _NPY_UNUSEDULONG NPY_UNUSED -#define _NPY_UNUSEDLONGLONG NPY_UNUSED -#define _NPY_UNUSEDULONGLONG NPY_UNUSED -#define _NPY_UNUSEDHALF NPY_UNUSED -#define _NPY_UNUSEDFLOAT NPY_UNUSED -#define _NPY_UNUSEDDOUBLE NPY_UNUSED -#define _NPY_UNUSEDLONGDOUBLE NPY_UNUSED -#define _NPY_UNUSEDCFLOAT NPY_UNUSED -#define _NPY_UNUSEDCDOUBLE NPY_UNUSED -#define _NPY_UNUSEDCLONGDOUBLE NPY_UNUSED -#define _NPY_UNUSEDDATETIME NPY_UNUSED -#define _NPY_UNUSEDTIMEDELTA NPY_UNUSED -#define _NPY_UNUSEDHALF NPY_UNUSED -#define _NPY_UNUSEDSTRING -#define _NPY_UNUSEDVOID -#define _NPY_UNUSEDUNICODE - -/**begin repeat - * - * #TOTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID, DATETIME, - * TIMEDELTA# - * #totype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * cfloat, cdouble, clongdouble, char, char, char, datetime, - * timedelta# - * #skip = 1*18, aop->descr->elsize*3, 1*2# - */ -static void -OBJECT_to_@TOTYPE@(PyObject **ip, @totype@ *op, intp n, - PyArrayObject *_NPY_UNUSED@TOTYPE@(aip), PyArrayObject *aop) -{ - intp i; - int skip = @skip@; - - for (i = 0; i < n; i++, ip++, op += skip) { - if (*ip == NULL) { - @TOTYPE@_setitem(Py_False, (char *)op, aop); - } - else { - @TOTYPE@_setitem(*ip, (char *)op, aop); - } - } -} -/**end repeat**/ - - -/**begin repeat - * - * #from = STRING*23, UNICODE*23, VOID*23# - * #fromtyp = char*69# - * #to = (BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID, DATETIME, TIMEDELTA)*3# - * #totyp = (Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, npy_half, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, char, datetime, timedelta)*3# - * #oskip = (1*18,aop->descr->elsize*3,1*2)*3# - * #convert = 1*18, 0*3, 1*2, 1*18, 0*3, 1*2, 0*23# - * #convstr = (Int*9, Long*2, Float*4, Complex*3, Tuple*3, Long*2)*3# - */ -static void -@from@_to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip, - PyArrayObject *aop) -{ - intp i; - PyObject *temp = NULL; - int skip = aip->descr->elsize; - int oskip = @oskip@; - - for (i = 0; i < n; i++, ip+=skip, op+=oskip) { - temp = @from@_getitem((char *)ip, aip); - if (temp == NULL) { - return; - } - /* convert from Python object to needed one */ -#if @convert@ - { - PyObject *new, *args; - /* call out to the Python builtin given by convstr */ - args = Py_BuildValue("(N)", temp); -#if defined(NPY_PY3K) -#define PyInt_Type PyLong_Type -#endif - new = Py@convstr@_Type.tp_new(&Py@convstr@_Type, args, NULL); -#if defined(NPY_PY3K) -#undef PyInt_Type -#endif - Py_DECREF(args); - temp = new; - if (temp == NULL) { - return; - } - } -#endif /* @convert@ */ - if (@to@_setitem(temp,(char *)op, aop)) { - Py_DECREF(temp); - return; - } - Py_DECREF(temp); - } -} -/**end repeat**/ - - -/**begin repeat - * - * #to = STRING*20, UNICODE*20, VOID*20# - * #totyp = char*20, char*20, char*20# - * #from = (BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA)*3# - * #fromtyp = (Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * cfloat, cdouble, clongdouble, datetime, timedelta)*3# - */ -static void -@from@_to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip, - PyArrayObject *aop) -{ - intp i; - PyObject *temp = NULL; - int skip = 1; - int oskip = aop->descr->elsize; - for (i = 0; i < n; i++, ip += skip, op += oskip) { - temp = @from@_getitem((char *)ip, aip); - if (temp == NULL) { - Py_INCREF(Py_False); - temp = Py_False; - } - if (@to@_setitem(temp,(char *)op, aop)) { - Py_DECREF(temp); - return; - } - Py_DECREF(temp); - } -} - -/**end repeat**/ - - -/* - ***************************************************************************** - ** SCAN ** - ***************************************************************************** - */ - - -/* - * The first ignore argument is for backwards compatibility. - * Should be removed when the API version is bumped up. - */ - -/**begin repeat - * #fname = SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG# - * #type = short, ushort, int, uint, long, ulong, longlong, ulonglong# - * #format = "hd", "hu", "d", "u", "ld", "lu", LONGLONG_FMT, ULONGLONG_FMT# - */ -static int -@fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) -{ - return fscanf(fp, "%"@format@, ip); -} -/**end repeat**/ - -/**begin repeat - * #fname = FLOAT, DOUBLE, LONGDOUBLE# - * #type = float, double, longdouble# - */ -static int -@fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) -{ - double result; - int ret; - - ret = NumPyOS_ascii_ftolf(fp, &result); - *ip = (@type@) result; - return ret; -} -/**end repeat**/ - -static int -HALF_scan(FILE *fp, npy_half *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) -{ - double result; - int ret; - - ret = NumPyOS_ascii_ftolf(fp, &result); - *ip = npy_double_to_half(result); - return ret; -} - -/**begin repeat - * #fname = BYTE, UBYTE# - * #type = byte, ubyte# - * #btype = int, uint# - * #format = "d", "u"# - */ -static int -@fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) -{ - @btype@ temp; - int num; - - num = fscanf(fp, "%"@format@, &temp); - *ip = (@type@) temp; - return num; -} -/**end repeat**/ - -static int -BOOL_scan(FILE *fp, Bool *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) -{ - double result; - int ret; - - ret = NumPyOS_ascii_ftolf(fp, &result); - *ip = (Bool) (result != 0.0); - return ret; -} - -/**begin repeat - * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID, - * DATETIME, TIMEDELTA# - */ -#define @fname@_scan NULL -/**end repeat**/ - - -/* - ***************************************************************************** - ** FROMSTR ** - ***************************************************************************** - */ - - -/**begin repeat - * #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, - * ULONGLONG, DATETIME, TIMEDELTA# - * #type = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, - * ulonglong, datetime, timedelta# - * #func = (l, ul)*5, l, l# - * #btype = (long, ulong)*5, long, long# - */ -static int -@fname@_fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) -{ - @btype@ result; - - result = PyOS_strto@func@(str, endptr, 10); - *ip = (@type@) result; - return 0; -} -/**end repeat**/ - -/**begin repeat - * - * #fname=FLOAT,DOUBLE,LONGDOUBLE# - * #type=float,double,longdouble# - */ -static int -@fname@_fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) -{ - double result; - - result = NumPyOS_ascii_strtod(str, endptr); - *ip = (@type@) result; - return 0; -} -/**end repeat**/ - -static int -HALF_fromstr(char *str, npy_half *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) -{ - double result; - - result = NumPyOS_ascii_strtod(str, endptr); - *ip = npy_double_to_half(result); - return 0; -} - -static int -BOOL_fromstr(char *str, Bool *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) -{ - double result; - - result = NumPyOS_ascii_strtod(str, endptr); - *ip = (Bool) (result != 0.0); - return 0; -} - -/**begin repeat - * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID# - */ -#define @fname@_fromstr NULL -/**end repeat**/ - - -/* - ***************************************************************************** - ** COPYSWAPN ** - ***************************************************************************** - */ - - -/**begin repeat - * - * #fname = SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, HALF, FLOAT, - * DOUBLE, LONGDOUBLE, DATETIME, TIMEDELTA# - * #fsize = SHORT, SHORT, INT, INT, LONG, LONG, LONGLONG, LONGLONG, HALF, FLOAT, - * DOUBLE, LONGDOUBLE, DATETIME, TIMEDELTA# - * #type = short, ushort, int, uint, long, ulong, longlong, ulonglong, npy_half, float, - * double, longdouble, datetime, timedelta# - */ -static void -@fname@_copyswapn (void *dst, intp dstride, void *src, intp sstride, - intp n, int swap, void *NPY_UNUSED(arr)) -{ - if (src != NULL) { - if (sstride == sizeof(@type@) && dstride == sizeof(@type@)) { - memcpy(dst, src, n*sizeof(@type@)); - } - else { - _unaligned_strided_byte_copy(dst, dstride, src, sstride, - n, sizeof(@type@)); - } - } - if (swap) { - _strided_byte_swap(dst, dstride, n, sizeof(@type@)); - } -} - -static void -@fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) -{ - - if (src != NULL) { - /* copy first if needed */ - memcpy(dst, src, sizeof(@type@)); - } - if (swap) { - char *a, *b, c; - - a = (char *)dst; -#if SIZEOF_@fsize@ == 2 - b = a + 1; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 4 - b = a + 3; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 8 - b = a + 7; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 10 - b = a + 9; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 12 - b = a + 11; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 16 - b = a + 15; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#else - { - int i, nn; - - b = a + (SIZEOF_@fsize@-1); - nn = SIZEOF_@fsize@ / 2; - for (i = 0; i < nn; i++) { - c = *a; - *a++ = *b; - *b-- = c; - } - } -#endif - } -} - -/**end repeat**/ - -/**begin repeat - * - * #fname = BOOL, BYTE, UBYTE# - * #type = Bool, byte, ubyte# - */ -static void -@fname@_copyswapn (void *dst, intp dstride, void *src, intp sstride, intp n, - int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) -{ - if (src != NULL) { - if (sstride == sizeof(@type@) && dstride == sizeof(@type@)) { - memcpy(dst, src, n*sizeof(@type@)); - } - else { - _unaligned_strided_byte_copy(dst, dstride, src, sstride, - n, sizeof(@type@)); - } - } - /* ignore swap */ -} - -static void -@fname@_copyswap (void *dst, void *src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) -{ - if (src != NULL) { - /* copy first if needed */ - memcpy(dst, src, sizeof(@type@)); - } - /* ignore swap */ -} - -/**end repeat**/ - - - -/**begin repeat - * - * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #type = cfloat, cdouble, clongdouble# - * #fsize = FLOAT, DOUBLE, LONGDOUBLE# -*/ -static void -@fname@_copyswapn (void *dst, intp dstride, void *src, intp sstride, intp n, - int swap, void *NPY_UNUSED(arr)) -{ - - if (src != NULL) { - /* copy first if needed */ - if (sstride == sizeof(@type@) && dstride == sizeof(@type@)) { - memcpy(dst, src, n*sizeof(@type@)); - } - else { - _unaligned_strided_byte_copy(dst, dstride, src, sstride, n, - sizeof(@type@)); - } - } - - if (swap) { - _strided_byte_swap(dst, dstride, n, SIZEOF_@fsize@); - _strided_byte_swap(((char *)dst + SIZEOF_@fsize@), dstride, - n, SIZEOF_@fsize@); - } -} - -static void -@fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) -{ - if (src != NULL) /* copy first if needed */ - memcpy(dst, src, sizeof(@type@)); - - if (swap) { - char *a, *b, c; - a = (char *)dst; -#if SIZEOF_@fsize@ == 4 - b = a + 3; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; - a += 2; - b = a + 3; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 8 - b = a + 7; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; - a += 4; - b = a + 7; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 10 - b = a + 9; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; - a += 5; - b = a + 9; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 12 - b = a + 11; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; - a += 6; - b = a + 11; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#elif SIZEOF_@fsize@ == 16 - b = a + 15; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; - a += 8; - b = a + 15; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; -#else - { - int i, nn; - - b = a + (SIZEOF_@fsize@ - 1); - nn = SIZEOF_@fsize@ / 2; - for (i = 0; i < nn; i++) { - c = *a; - *a++ = *b; - *b-- = c; - } - a += nn / 2; - b = a + (SIZEOF_@fsize@ - 1); - nn = SIZEOF_@fsize@ / 2; - for (i = 0; i < nn; i++) { - c = *a; - *a++ = *b; - *b-- = c; - } - } -#endif - } -} - -/**end repeat**/ - -static void -OBJECT_copyswapn (PyObject **dst, intp dstride, PyObject **src, intp sstride, - intp n, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) -{ - intp i; - if (src != NULL) { - if (__ALIGNED(dst, sizeof(PyObject **)) - && __ALIGNED(src, sizeof(PyObject **)) - && __ALIGNED(dstride, sizeof(PyObject **)) - && __ALIGNED(sstride, sizeof(PyObject **))) { - dstride /= sizeof(PyObject **); - sstride /= sizeof(PyObject **); - for (i = 0; i < n; i++) { - Py_XINCREF(*src); - Py_XDECREF(*dst); - *dst = *src; - dst += dstride; - src += sstride; - } - } - else { - unsigned char *dstp, *srcp; - PyObject *tmp; - dstp = (unsigned char*)dst; - srcp = (unsigned char*)src; - for (i = 0; i < n; i++) { - NPY_COPY_PYOBJECT_PTR(&tmp, dstp); - Py_XDECREF(tmp); - NPY_COPY_PYOBJECT_PTR(&tmp, srcp); - Py_XINCREF(tmp); - NPY_COPY_PYOBJECT_PTR(dstp, srcp); - dstp += dstride; - srcp += sstride; - } - } - } - /* ignore swap */ - return; -} - -static void -OBJECT_copyswap(PyObject **dst, PyObject **src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) -{ - - if (src != NULL) { - if (__ALIGNED(dst,sizeof(PyObject **)) && __ALIGNED(src,sizeof(PyObject **))) { - Py_XINCREF(*src); - Py_XDECREF(*dst); - *dst = *src; - } - else { - PyObject *tmp; - NPY_COPY_PYOBJECT_PTR(&tmp, dst); - Py_XDECREF(tmp); - NPY_COPY_PYOBJECT_PTR(&tmp, src); - Py_XINCREF(tmp); - NPY_COPY_PYOBJECT_PTR(dst, src); - } - } -} - -/* ignore swap */ -static void -STRING_copyswapn (char *dst, intp dstride, char *src, intp sstride, - intp n, int NPY_UNUSED(swap), PyArrayObject *arr) -{ - if (src != NULL && arr != NULL) { - int itemsize = arr->descr->elsize; - - if (dstride == itemsize && sstride == itemsize) { - memcpy(dst, src, itemsize * n); - } - else { - _unaligned_strided_byte_copy(dst, dstride, src, sstride, n, - itemsize); - } - } - return; -} - -/* */ -static void -VOID_copyswapn (char *dst, intp dstride, char *src, intp sstride, - intp n, int swap, PyArrayObject *arr) -{ - if (arr == NULL) { - return; - } - if (PyArray_HASFIELDS(arr)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new, *descr; - int offset; - Py_ssize_t pos = 0; - - descr = arr->descr; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - arr->descr = descr; - return; - } - arr->descr = new; - new->f->copyswapn(dst+offset, dstride, - (src != NULL ? src+offset : NULL), - sstride, n, swap, arr); - } - arr->descr = descr; - return; - } - if (swap && arr->descr->subarray != NULL) { - PyArray_Descr *descr, *new; - npy_intp num; - npy_intp i; - int subitemsize; - char *dstptr, *srcptr; - - descr = arr->descr; - new = descr->subarray->base; - arr->descr = new; - dstptr = dst; - srcptr = src; - subitemsize = new->elsize; - num = descr->elsize / subitemsize; - for (i = 0; i < n; i++) { - new->f->copyswapn(dstptr, subitemsize, srcptr, - subitemsize, num, swap, arr); - dstptr += dstride; - if (srcptr) { - srcptr += sstride; - } - } - arr->descr = descr; - return; - } - if (src != NULL) { - memcpy(dst, src, arr->descr->elsize * n); - } - return; -} - -static void -VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) -{ - if (arr == NULL) { - return; - } - if (PyArray_HASFIELDS(arr)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new, *descr; - int offset; - Py_ssize_t pos = 0; - - descr = arr->descr; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - arr->descr = descr; - return; - } - arr->descr = new; - new->f->copyswap(dst+offset, - (src != NULL ? src+offset : NULL), - swap, arr); - } - arr->descr = descr; - return; - } - if (swap && arr->descr->subarray != NULL) { - PyArray_Descr *descr, *new; - npy_intp num; - int itemsize; - - descr = arr->descr; - new = descr->subarray->base; - arr->descr = new; - itemsize = new->elsize; - num = descr->elsize / itemsize; - new->f->copyswapn(dst, itemsize, src, - itemsize, num, swap, arr); - arr->descr = descr; - return; - } - if (src != NULL) { - memcpy(dst, src, arr->descr->elsize); - } - return; -} - - -static void -UNICODE_copyswapn (char *dst, intp dstride, char *src, intp sstride, - intp n, int swap, PyArrayObject *arr) -{ - int itemsize; - - if (arr == NULL) { - return; - } - itemsize = arr->descr->elsize; - if (src != NULL) { - if (dstride == itemsize && sstride == itemsize) { - memcpy(dst, src, n * itemsize); - } - else { - _unaligned_strided_byte_copy(dst, dstride, src, - sstride, n, itemsize); - } - } - - n *= itemsize; - if (swap) { - char *a, *b, c; - - /* n is the number of unicode characters to swap */ - n >>= 2; - for (a = (char *)dst; n > 0; n--) { - b = a + 3; - c = *a; - *a++ = *b; - *b-- = c; - c = *a; - *a++ = *b; - *b-- = c; - a += 2; - } - } -} - - -static void -STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) -{ - if (src != NULL && arr != NULL) { - memcpy(dst, src, arr->descr->elsize); - } -} - -static void -UNICODE_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) -{ - int itemsize; - - if (arr == NULL) { - return; - } - itemsize = arr->descr->elsize; - if (src != NULL) { - memcpy(dst, src, itemsize); - } - - if (swap) { - char *a, *b, c; - itemsize >>= 2; - for (a = (char *)dst; itemsize>0; itemsize--) { - b = a + 3; - c = *a; - *a++ = *b; - *b-- = c; - c = *a; - *a++ = *b; - *b-- = c; - a += 2; - } - } -} - - -/* - ***************************************************************************** - ** NONZERO ** - ***************************************************************************** - */ - -#define _NONZERO(a) ((a) != 0) - -/**begin repeat - * - * #fname = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * DATETIME, TIMEDELTA# - * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * datetime, timedelta# - * #isfloat = 0*11, 1*4, 0*2# - * #nonzero = _NONZERO*11, !npy_half_iszero, _NONZERO*5# - */ -static Bool -@fname@_nonzero (char *ip, PyArrayObject *ap) -{ - if (ap == NULL || PyArray_ISBEHAVED_RO(ap)) { - @type@ *ptmp = (@type@ *)ip; - return (Bool) @nonzero@(*ptmp); - } - else { - /* - * Don't worry about swapping for integer types, - * since we are just testing for equality with 0. - * For float types, the signed zeros require us to swap. - */ - @type@ tmp; -#if @isfloat@ - ap->descr->f->copyswap(&tmp, ip, !PyArray_ISNOTSWAPPED(ap), ap); -#else - memcpy(&tmp, ip, sizeof(@type@)); -#endif - return (Bool) @nonzero@(tmp); - } -} -/**end repeat**/ - -/**begin repeat - * - * #fname=CFLOAT,CDOUBLE,CLONGDOUBLE# - * #type=cfloat, cdouble, clongdouble# - */ -static Bool -@fname@_nonzero (char *ip, PyArrayObject *ap) -{ - if (ap == NULL || PyArray_ISBEHAVED_RO(ap)) { - @type@ *ptmp = (@type@ *)ip; - return (Bool) ((ptmp->real != 0) || (ptmp->imag != 0)); - } - else { - @type@ tmp; - ap->descr->f->copyswap(&tmp, ip, !PyArray_ISNOTSWAPPED(ap), ap); - return (Bool) ((tmp.real != 0) || (tmp.imag != 0)); - } -} -/**end repeat**/ - - -#define WHITESPACE " \t\n\r\v\f" -#define WHITELEN 6 - -static Bool -Py_STRING_ISSPACE(char ch) -{ - char white[] = WHITESPACE; - int j; - Bool space = FALSE; - - for (j = 0; j < WHITELEN; j++) { - if (ch == white[j]) { - space = TRUE; - break; - } - } - return space; -} - -static Bool -STRING_nonzero (char *ip, PyArrayObject *ap) -{ - int len = ap->descr->elsize; - int i; - Bool nonz = FALSE; - - for (i = 0; i < len; i++) { - if (!Py_STRING_ISSPACE(*ip)) { - nonz = TRUE; - break; - } - ip++; - } - return nonz; -} - -#ifdef Py_UNICODE_WIDE -#define PyArray_UCS4_ISSPACE Py_UNICODE_ISSPACE -#else -#define PyArray_UCS4_ISSPACE(ch) Py_STRING_ISSPACE((char)ch) -#endif - -static Bool -UNICODE_nonzero (PyArray_UCS4 *ip, PyArrayObject *ap) -{ - int len = ap->descr->elsize >> 2; - int i; - Bool nonz = FALSE; - char *buffer = NULL; - - if ((!PyArray_ISNOTSWAPPED(ap)) || (!PyArray_ISALIGNED(ap))) { - buffer = _pya_malloc(ap->descr->elsize); - if (buffer == NULL) { - return nonz; - } - memcpy(buffer, ip, ap->descr->elsize); - if (!PyArray_ISNOTSWAPPED(ap)) { - byte_swap_vector(buffer, len, 4); - } - ip = (PyArray_UCS4 *)buffer; - } - - for (i = 0; i < len; i++) { - if (!PyArray_UCS4_ISSPACE(*ip)) { - nonz = TRUE; - break; - } - ip++; - } - _pya_free(buffer); - return nonz; -} - -static Bool -OBJECT_nonzero (PyObject **ip, PyArrayObject *ap) -{ - - if (PyArray_ISALIGNED(ap)) { - if (*ip == NULL) { - return FALSE; - } - return (Bool) PyObject_IsTrue(*ip); - } - else { - PyObject *obj; - NPY_COPY_PYOBJECT_PTR(&obj, ip); - if (obj == NULL) { - return FALSE; - } - return (Bool) PyObject_IsTrue(obj); - } -} - -/* - * if we have fields, then nonzero only if all sub-fields are nonzero. - */ -static Bool -VOID_nonzero (char *ip, PyArrayObject *ap) -{ - int i; - int len; - Bool nonz = FALSE; - - if (PyArray_HASFIELDS(ap)) { - PyArray_Descr *descr, *new; - PyObject *key, *value, *title; - int savedflags, offset; - Py_ssize_t pos = 0; - - descr = ap->descr; - savedflags = ap->flags; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { - PyErr_Clear(); - continue; - } - ap->descr = new; - ap->flags = savedflags; - if ((new->alignment > 1) && !__ALIGNED(ip+offset, new->alignment)) { - ap->flags &= ~ALIGNED; - } - else { - ap->flags |= ALIGNED; - } - if (new->f->nonzero(ip+offset, ap)) { - nonz = TRUE; - break; - } - } - ap->descr = descr; - ap->flags = savedflags; - return nonz; - } - len = ap->descr->elsize; - for (i = 0; i < len; i++) { - if (*ip != '\0') { - nonz = TRUE; - break; - } - ip++; - } - return nonz; -} - -#undef __ALIGNED - - -/* - ***************************************************************************** - ** COMPARE ** - ***************************************************************************** - */ - - -/* boolean type */ - -static int -BOOL_compare(Bool *ip1, Bool *ip2, PyArrayObject *NPY_UNUSED(ap)) -{ - return (*ip1 ? (*ip2 ? 0 : 1) : (*ip2 ? -1 : 0)); -} - - -/* integer types */ - -/**begin repeat - * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, DATETIME, TIMEDELTA# - * #type = byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, datetime, timedelta# - */ - -static int -@TYPE@_compare (@type@ *pa, @type@ *pb, PyArrayObject *NPY_UNUSED(ap)) -{ - const @type@ a = *pa; - const @type@ b = *pb; - - return a < b ? -1 : a == b ? 0 : 1; -} - -/**end repeat**/ - - -/* float types */ - -/* - * The real/complex comparison functions are compatible with the new sort - * order for nans introduced in numpy 1.4.0. All nan values now compare - * larger than non-nan values and are sorted to the end. The comparison - * order is: - * - * Real: [R, nan] - * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] - * - * where complex values with the same nan placements are sorted according - * to the non-nan part if it exists. If both the real and imaginary parts - * of complex types are non-nan the order is the same as the real parts - * unless they happen to be equal, in which case the order is that of the - * imaginary parts. - */ - -/**begin repeat - * - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# - * #type = float, double, longdouble# - */ - -#define LT(a,b) ((a) < (b) || ((b) != (b) && (a) ==(a))) - -static int -@TYPE@_compare(@type@ *pa, @type@ *pb) -{ - const @type@ a = *pa; - const @type@ b = *pb; - int ret; - - if (LT(a,b)) { - ret = -1; - } - else if (LT(b,a)) { - ret = 1; - } - else { - ret = 0; - } - return ret; -} - - -static int -C@TYPE@_compare(@type@ *pa, @type@ *pb) -{ - const @type@ ar = pa[0]; - const @type@ ai = pa[1]; - const @type@ br = pb[0]; - const @type@ bi = pb[1]; - int ret; - - if (ar < br) { - if (ai == ai || bi != bi) { - ret = -1; - } - else { - ret = 1; - } - } - else if (br < ar) { - if (bi == bi || ai != ai) { - ret = 1; - } - else { - ret = -1; - } - } - else if (ar == br || (ar != ar && br != br)) { - if (LT(ai,bi)) { - ret = -1; - } - else if (LT(bi,ai)) { - ret = 1; - } - else { - ret = 0; - } - } - else if (ar == ar) { - ret = -1; - } - else { - ret = 1; - } - - return ret; -} - -#undef LT - -/**end repeat**/ - -static int -HALF_compare (npy_half *pa, npy_half *pb, PyArrayObject *NPY_UNUSED(ap)) -{ - npy_half a = *pa, b = *pb; - Bool a_isnan, b_isnan; - int ret; - - a_isnan = npy_half_isnan(a); - b_isnan = npy_half_isnan(b); - - if (a_isnan) { - ret = b_isnan ? 0 : -1; - } else if (b_isnan) { - ret = 1; - } else if(npy_half_lt_nonan(a, b)) { - ret = -1; - } else if(npy_half_lt_nonan(b, a)) { - ret = 1; - } else { - ret = 0; - } - - return ret; -} - - -/* object type */ - -static int -OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap)) -{ - /* - * ALIGNMENT NOTE: It seems that PyArray_Sort is already handling - * the alignment of pointers, so it doesn't need to be handled - * here. - */ - if ((*ip1 == NULL) || (*ip2 == NULL)) { - if (ip1 == ip2) { - return 1; - } - if (ip1 == NULL) { - return -1; - } - return 1; - } -#if defined(NPY_PY3K) - if (PyObject_RichCompareBool(*ip1, *ip2, Py_LT) == 1) { - return -1; - } - else if (PyObject_RichCompareBool(*ip1, *ip2, Py_GT) == 1) { - return 1; - } - else { - return 0; - } -#else - return PyObject_Compare(*ip1, *ip2); -#endif -} - - -/* string type */ - -static int -STRING_compare(char *ip1, char *ip2, PyArrayObject *ap) -{ - const unsigned char *c1 = (unsigned char *)ip1; - const unsigned char *c2 = (unsigned char *)ip2; - const size_t len = ap->descr->elsize; - size_t i; - - for(i = 0; i < len; ++i) { - if (c1[i] != c2[i]) { - return (c1[i] > c2[i]) ? 1 : -1; - } - } - return 0; -} - - -/* unicode type */ - -static int -UNICODE_compare(PyArray_UCS4 *ip1, PyArray_UCS4 *ip2, - PyArrayObject *ap) -{ - int itemsize = ap->descr->elsize; - - if (itemsize < 0) { - return 0; - } - itemsize >>= 2; - while (itemsize-- > 0) { - PyArray_UCS4 c1 = *ip1++; - PyArray_UCS4 c2 = *ip2++; - if (c1 != c2) { - return (c1 < c2) ? -1 : 1; - } - } - return 0; -} - - -/* void type */ - -/* - * If fields are defined, then compare on first field and if equal - * compare on second field. Continue until done or comparison results - * in not_equal. - * - * Must align data passed on to sub-comparisons. - * Also must swap data based on to sub-comparisons. - */ -static int -VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) -{ - PyArray_Descr *descr, *new; - PyObject *names, *key; - PyObject *tup, *title; - char *nip1, *nip2; - int i, offset, res = 0, swap=0; - - if (!PyArray_HASFIELDS(ap)) { - return STRING_compare(ip1, ip2, ap); - } - descr = ap->descr; - /* - * Compare on the first-field. If equal, then - * compare on the second-field, etc. - */ - names = descr->names; - for (i = 0; i < PyTuple_GET_SIZE(names); i++) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, &title)) { - goto finish; - } - ap->descr = new; - swap = PyArray_ISBYTESWAPPED(ap); - nip1 = ip1+offset; - nip2 = ip2+offset; - if ((swap) || (new->alignment > 1)) { - if ((swap) || (((intp)(nip1) % new->alignment) != 0)) { - /* create buffer and copy */ - nip1 = _pya_malloc(new->elsize); - if (nip1 == NULL) { - goto finish; - } - memcpy(nip1, ip1+offset, new->elsize); - if (swap) - new->f->copyswap(nip1, NULL, swap, ap); - } - if ((swap) || (((intp)(nip2) % new->alignment) != 0)) { - /* copy data to a buffer */ - nip2 = _pya_malloc(new->elsize); - if (nip2 == NULL) { - if (nip1 != ip1+offset) { - _pya_free(nip1); - } - goto finish; - } - memcpy(nip2, ip2+offset, new->elsize); - if (swap) - new->f->copyswap(nip2, NULL, swap, ap); - } - } - res = new->f->compare(nip1, nip2, ap); - if ((swap) || (new->alignment > 1)) { - if (nip1 != ip1+offset) { - _pya_free(nip1); - } - if (nip2 != ip2+offset) { - _pya_free(nip2); - } - } - if (res != 0) { - break; - } - } - -finish: - ap->descr = descr; - return res; -} - - -/* - ***************************************************************************** - ** ARGFUNC ** - ***************************************************************************** - */ - -#define _LESS_THAN_OR_EQUAL(a,b) ((a) <= (b)) - -/**begin repeat - * - * #fname = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA# - * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * float, double, longdouble, datetime, timedelta# - * #isfloat = 0*11, 1*7, 0*2# - * #isnan = nop*11, npy_half_isnan, npy_isnan*6, nop*2# - * #le = _LESS_THAN_OR_EQUAL*11, npy_half_le, _LESS_THAN_OR_EQUAL*8# - * #iscomplex = 0*15, 1*3, 0*2# - * #incr = ip++*15, ip+=2*3, ip++*2# - */ -static int -@fname@_argmax(@type@ *ip, intp n, intp *max_ind, PyArrayObject *NPY_UNUSED(aip)) -{ - intp i; - @type@ mp = *ip; -#if @iscomplex@ - @type@ mp_im = ip[1]; -#endif - - *max_ind = 0; - -#if @isfloat@ - if (@isnan@(mp)) { - /* nan encountered; it's maximal */ - return 0; - } -#endif -#if @iscomplex@ - if (@isnan@(mp_im)) { - /* nan encountered; it's maximal */ - return 0; - } -#endif - - for (i = 1; i < n; i++) { - @incr@; - /* - * Propagate nans, similarly as max() and min() - */ -#if @iscomplex@ - /* Lexical order for complex numbers */ - if ((ip[0] > mp) || ((ip[0] == mp) && (ip[1] > mp_im)) - || @isnan@(ip[0]) || @isnan@(ip[1])) { - mp = ip[0]; - mp_im = ip[1]; - *max_ind = i; - if (@isnan@(mp) || @isnan@(mp_im)) { - /* nan encountered, it's maximal */ - break; - } - } -#else - if (!@le@(*ip, mp)) { /* negated, for correct nan handling */ - mp = *ip; - *max_ind = i; -#if @isfloat@ - if (@isnan@(mp)) { - /* nan encountered, it's maximal */ - break; - } -#endif - } -#endif - } - return 0; -} - -/**end repeat**/ - -/**begin repeat - * - * #fname = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA# - * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * float, double, longdouble, datetime, timedelta# - * #isfloat = 0*11, 1*7, 0*2# - * #isnan = nop*11, npy_half_isnan, npy_isnan*6, nop*2# - * #le = _LESS_THAN_OR_EQUAL*11, npy_half_le, _LESS_THAN_OR_EQUAL*8# - * #iscomplex = 0*15, 1*3, 0*2# - * #incr = ip++*15, ip+=2*3, ip++*2# - */ -static int -@fname@_argmin(@type@ *ip, intp n, intp *min_ind, PyArrayObject *NPY_UNUSED(aip)) -{ - intp i; - @type@ mp = *ip; -#if @iscomplex@ - @type@ mp_im = ip[1]; -#endif - - *min_ind = 0; - -#if @isfloat@ - if (@isnan@(mp)) { - /* nan encountered; it's minimal */ - return 0; - } -#endif -#if @iscomplex@ - if (@isnan@(mp_im)) { - /* nan encountered; it's minimal */ - return 0; - } -#endif - - for (i = 1; i < n; i++) { - @incr@; - /* - * Propagate nans, similarly as max() and min() - */ -#if @iscomplex@ - /* Lexical order for complex numbers */ - if ((mp > ip[0]) || ((ip[0] == mp) && (mp_im > ip[1])) - || @isnan@(ip[0]) || @isnan@(ip[1])) { - mp = ip[0]; - mp_im = ip[1]; - *min_ind = i; - if (@isnan@(mp) || @isnan@(mp_im)) { - /* nan encountered, it's minimal */ - break; - } - } -#else - if (!@le@(mp, *ip)) { /* negated, for correct nan handling */ - mp = *ip; - *min_ind = i; -#if @isfloat@ - if (@isnan@(mp)) { - /* nan encountered, it's minimal */ - break; - } -#endif - } -#endif - } - return 0; -} - -/**end repeat**/ - -#undef _LESS_THAN_OR_EQUAL - -static int -OBJECT_argmax(PyObject **ip, intp n, intp *max_ind, PyArrayObject *NPY_UNUSED(aip)) -{ - intp i; - PyObject *mp = ip[0]; - - *max_ind = 0; - i = 1; - while (i < n && mp == NULL) { - mp = ip[i]; - i++; - } - for (; i < n; i++) { - ip++; -#if defined(NPY_PY3K) - if (*ip != NULL && PyObject_RichCompareBool(*ip, mp, Py_GT) == 1) { -#else - if (*ip != NULL && PyObject_Compare(*ip, mp) > 0) { -#endif - mp = *ip; - *max_ind = i; - } - } - return 0; -} - -/**begin repeat - * - * #fname = STRING, UNICODE# - * #type = char, PyArray_UCS4# - */ -static int -@fname@_argmax(@type@ *ip, intp n, intp *max_ind, PyArrayObject *aip) -{ - intp i; - int elsize = aip->descr->elsize; - @type@ *mp = (@type@ *)_pya_malloc(elsize); - - if (mp==NULL) return 0; - memcpy(mp, ip, elsize); - *max_ind = 0; - for(i=1; i 0) { - memcpy(mp, ip, elsize); - *max_ind=i; - } - } - _pya_free(mp); - return 0; -} - -/**end repeat**/ - -#define VOID_argmax NULL - -static int -OBJECT_argmin(PyObject **ip, intp n, intp *min_ind, PyArrayObject *NPY_UNUSED(aip)) -{ - intp i; - PyObject *mp = ip[0]; - - *min_ind = 0; - i = 1; - while (i < n && mp == NULL) { - mp = ip[i]; - i++; - } - for (; i < n; i++) { - ip++; -#if defined(NPY_PY3K) - if (*ip != NULL && PyObject_RichCompareBool(mp, *ip, Py_GT) == 1) { -#else - if (*ip != NULL && PyObject_Compare(mp, *ip) > 0) { -#endif - mp = *ip; - *min_ind = i; - } - } - return 0; -} - -/**begin repeat - * - * #fname = STRING, UNICODE# - * #type = char, PyArray_UCS4# - */ -static int -@fname@_argmin(@type@ *ip, intp n, intp *min_ind, PyArrayObject *aip) -{ - intp i; - int elsize = PyArray_DESCR(aip)->elsize; - @type@ *mp = (@type@ *)PyArray_malloc(elsize); - - if (mp==NULL) return 0; - memcpy(mp, ip, elsize); - *min_ind = 0; - for(i=1; i 0) { - memcpy(mp, ip, elsize); - *min_ind=i; - } - } - PyArray_free(mp); - return 0; -} - -/**end repeat**/ - - -#define VOID_argmin NULL - - -/* - ***************************************************************************** - ** DOT ** - ***************************************************************************** - */ - -/* - * dot means inner product - */ - -static void -BOOL_dot(char *ip1, intp is1, char *ip2, intp is2, char *op, intp n, - void *NPY_UNUSED(ignore)) -{ - Bool tmp = FALSE; - intp i; - - for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { - if ((*((Bool *)ip1) != 0) && (*((Bool *)ip2) != 0)) { - tmp = TRUE; - break; - } - } - *((Bool *)op) = tmp; -} - -/**begin repeat - * - * #name = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, - * DATETIME, TIMEDELTA# - * #type = byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, float, double, longdouble, - * datetime, timedelta# - * #out = long, ulong, long, ulong, long, ulong, long, ulong, - * longlong, ulonglong, float, double, longdouble, - * datetime, timedelta# - */ -static void -@name@_dot(char *ip1, intp is1, char *ip2, intp is2, char *op, intp n, - void *NPY_UNUSED(ignore)) -{ - @out@ tmp = (@out@)0; - intp i; - - for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { - tmp += (@out@)(*((@type@ *)ip1)) * - (@out@)(*((@type@ *)ip2)); - } - *((@type@ *)op) = (@type@) tmp; -} -/**end repeat**/ - -static void -HALF_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, - void *NPY_UNUSED(ignore)) -{ - float tmp = 0.0f; - npy_intp i; - - for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { - tmp += npy_half_to_float(*((npy_half *)ip1)) * - npy_half_to_float(*((npy_half *)ip2)); - } - *((npy_half *)op) = npy_float_to_half(tmp); -} - -/**begin repeat - * - * #name = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #type = float, double, longdouble# - */ -static void @name@_dot(char *ip1, intp is1, char *ip2, intp is2, - char *op, intp n, void *NPY_UNUSED(ignore)) -{ - @type@ tmpr = (@type@)0.0, tmpi=(@type@)0.0; - intp i; - - for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { - tmpr += ((@type@ *)ip1)[0] * ((@type@ *)ip2)[0] - - ((@type@ *)ip1)[1] * ((@type@ *)ip2)[1]; - tmpi += ((@type@ *)ip1)[1] * ((@type@ *)ip2)[0] - + ((@type@ *)ip1)[0] * ((@type@ *)ip2)[1]; - } - ((@type@ *)op)[0] = tmpr; ((@type@ *)op)[1] = tmpi; -} - -/**end repeat**/ - -static void -OBJECT_dot(char *ip1, intp is1, char *ip2, intp is2, char *op, intp n, - void *NPY_UNUSED(ignore)) -{ - /* - * ALIGNMENT NOTE: np.dot, np.inner etc. enforce that the array is - * BEHAVED before getting to this point, so unaligned pointers aren't - * handled here. - */ - intp i; - PyObject *tmp1, *tmp2, *tmp = NULL; - PyObject **tmp3; - for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { - if ((*((PyObject **)ip1) == NULL) || (*((PyObject **)ip2) == NULL)) { - tmp1 = Py_False; - Py_INCREF(Py_False); - } - else { - tmp1 = PyNumber_Multiply(*((PyObject **)ip1), *((PyObject **)ip2)); - if (!tmp1) { - Py_XDECREF(tmp); - return; - } - } - if (i == 0) { - tmp = tmp1; - } - else { - tmp2 = PyNumber_Add(tmp, tmp1); - Py_XDECREF(tmp); - Py_XDECREF(tmp1); - if (!tmp2) { - return; - } - tmp = tmp2; - } - } - tmp3 = (PyObject**) op; - tmp2 = *tmp3; - *((PyObject **)op) = tmp; - Py_XDECREF(tmp2); -} - - -/* - ***************************************************************************** - ** FILL ** - ***************************************************************************** - */ - - -#define BOOL_fill NULL - -/* this requires buffer to be filled with objects or NULL */ -static void -OBJECT_fill(PyObject **buffer, intp length, void *NPY_UNUSED(ignored)) -{ - intp i; - PyObject *start = buffer[0]; - PyObject *delta = buffer[1]; - - delta = PyNumber_Subtract(delta, start); - if (!delta) { - return; - } - start = PyNumber_Add(start, delta); - if (!start) { - goto finish; - } - buffer += 2; - - for (i = 2; i < length; i++, buffer++) { - start = PyNumber_Add(start, delta); - if (!start) { - goto finish; - } - Py_XDECREF(*buffer); - *buffer = start; - } - -finish: - Py_DECREF(delta); - return; -} - -/**begin repeat - * - * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, - * DATETIME, TIMEDELTA# - * #typ = byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, float, double, longdouble, - * datetime, timedelta# -*/ -static void -@NAME@_fill(@typ@ *buffer, intp length, void *NPY_UNUSED(ignored)) -{ - intp i; - @typ@ start = buffer[0]; - @typ@ delta = buffer[1]; - - delta -= start; - for (i = 2; i < length; ++i) { - buffer[i] = start + i*delta; - } -} -/**end repeat**/ - -static void -HALF_fill(npy_half *buffer, npy_intp length, void *NPY_UNUSED(ignored)) -{ - npy_intp i; - float start = npy_half_to_float(buffer[0]); - float delta = npy_half_to_float(buffer[1]); - - delta -= start; - for (i = 2; i < length; ++i) { - buffer[i] = npy_float_to_half(start + i*delta); - } -} - -/**begin repeat - * - * #NAME = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #typ = cfloat, cdouble, clongdouble# -*/ -static void -@NAME@_fill(@typ@ *buffer, intp length, void *NPY_UNUSED(ignore)) -{ - intp i; - @typ@ start; - @typ@ delta; - - start.real = buffer->real; - start.imag = buffer->imag; - delta.real = buffer[1].real; - delta.imag = buffer[1].imag; - delta.real -= start.real; - delta.imag -= start.imag; - buffer += 2; - for (i = 2; i < length; i++, buffer++) { - buffer->real = start.real + i*delta.real; - buffer->imag = start.imag + i*delta.imag; - } -} -/**end repeat**/ - - -/* this requires buffer to be filled with objects or NULL */ -static void -OBJECT_fillwithscalar(PyObject **buffer, intp length, PyObject **value, void *NPY_UNUSED(ignored)) -{ - intp i; - PyObject *val = *value; - for (i = 0; i < length; i++) { - Py_XDECREF(buffer[i]); - Py_XINCREF(val); - buffer[i] = val; - } -} -/**begin repeat - * - * #NAME = BOOL, BYTE, UBYTE# - * #typ = Bool, byte, ubyte# - */ -static void -@NAME@_fillwithscalar(@typ@ *buffer, intp length, @typ@ *value, void *NPY_UNUSED(ignored)) -{ - memset(buffer, *value, length); -} -/**end repeat**/ - -/**begin repeat - * - * #NAME = SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, - * DATETIME, TIMEDELTA# - * #typ = short, ushort, int, uint, long, ulong, longlong, ulonglong, - * npy_half, float, double, longdouble, cfloat, cdouble, clongdouble, - * datetime, timedelta# - */ -static void -@NAME@_fillwithscalar(@typ@ *buffer, intp length, @typ@ *value, void *NPY_UNUSED(ignored)) -{ - intp i; - @typ@ val = *value; - - for (i = 0; i < length; ++i) { - buffer[i] = val; - } -} -/**end repeat**/ - - -/* - ***************************************************************************** - ** FASTCLIP ** - ***************************************************************************** - */ - -#define _LESS_THAN(a, b) ((a) < (b)) -#define _GREATER_THAN(a, b) ((a) > (b)) -/* - * In fastclip, 'b' was already checked for NaN, so the half comparison - * only needs to check 'a' for NaN. - */ -#define _HALF_LESS_THAN(a, b) (!npy_half_isnan(a) && npy_half_lt_nonan(a, b)) -#define _HALF_GREATER_THAN(a, b) (!npy_half_isnan(a) && npy_half_lt_nonan(b, a)) - -/**begin repeat - * - * #name = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * DATETIME, TIMEDELTA# - * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * datetime, timedelta# - * #isfloat = 0*11, 1*4, 0*2# - * #isnan = nop*11, npy_half_isnan, npy_isnan*3, nop*2# - * #lt = _LESS_THAN*11, _HALF_LESS_THAN, _LESS_THAN*5# - * #gt = _GREATER_THAN*11, _HALF_GREATER_THAN, _GREATER_THAN*5# - */ -static void -@name@_fastclip(@type@ *in, intp ni, @type@ *min, @type@ *max, @type@ *out) -{ - npy_intp i; - @type@ max_val = 0, min_val = 0; - - if (max != NULL) { - max_val = *max; -#if @isfloat@ - /* NaNs result in no clipping, so optimize the case away */ - if (@isnan@(max_val)) { - if (min == NULL) { - return; - } - max = NULL; - } -#endif - } - if (min != NULL) { - min_val = *min; -#if @isfloat@ - if (@isnan@(min_val)) { - if (max == NULL) { - return; - } - min = NULL; - } -#endif - } - if (max == NULL) { - for (i = 0; i < ni; i++) { - if (@lt@(in[i], min_val)) { - out[i] = min_val; - } - } - } - else if (min == NULL) { - for (i = 0; i < ni; i++) { - if (@gt@(in[i], max_val)) { - out[i] = max_val; - } - } - } - else { - for (i = 0; i < ni; i++) { - if (@lt@(in[i], min_val)) { - out[i] = min_val; - } - else if (@gt@(in[i], max_val)) { - out[i] = max_val; - } - } - } -} -/**end repeat**/ - -#undef _LESS_THAN -#undef _GREATER_THAN -#undef _HALF_LESS_THAN -#undef _HALF_GREATER_THAN - -/**begin repeat - * - * #name = CFLOAT, CDOUBLE, CLONGDOUBLE# - * #type = cfloat, cdouble, clongdouble# - */ -static void -@name@_fastclip(@type@ *in, intp ni, @type@ *min, @type@ *max, @type@ *out) -{ - npy_intp i; - @type@ max_val, min_val; - - min_val = *min; - max_val = *max; - if (max != NULL) { - max_val = *max; - } - if (min != NULL) { - min_val = *min; - } - if (max == NULL) { - for (i = 0; i < ni; i++) { - if (PyArray_CLT(in[i],min_val)) { - out[i] = min_val; - } - } - } - else if (min == NULL) { - for (i = 0; i < ni; i++) { - if (PyArray_CGT(in[i], max_val)) { - out[i] = max_val; - } - } - } - else { - for (i = 0; i < ni; i++) { - if (PyArray_CLT(in[i], min_val)) { - out[i] = min_val; - } - else if (PyArray_CGT(in[i], max_val)) { - out[i] = max_val; - } - } - } -} - -/**end repeat**/ - -#define OBJECT_fastclip NULL - - -/* - ***************************************************************************** - ** FASTPUTMASK ** - ***************************************************************************** - */ - - -/**begin repeat - * - * #name = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA# - * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * cfloat, cdouble, clongdouble, datetime, timedelta# -*/ -static void -@name@_fastputmask(@type@ *in, Bool *mask, intp ni, @type@ *vals, intp nv) -{ - npy_intp i; - @type@ s_val; - - if (nv == 1) { - s_val = *vals; - for (i = 0; i < ni; i++) { - if (mask[i]) { - in[i] = s_val; - } - } - } - else { - for (i = 0; i < ni; i++) { - if (mask[i]) { - in[i] = vals[i%nv]; - } - } - } - return; -} -/**end repeat**/ - -#define OBJECT_fastputmask NULL - - -/* - ***************************************************************************** - ** FASTTAKE ** - ***************************************************************************** - */ - - -/**begin repeat - * - * #name = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA# - * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * cfloat, cdouble, clongdouble, datetime, timedelta# -*/ -static int -@name@_fasttake(@type@ *dest, @type@ *src, intp *indarray, - intp nindarray, intp n_outer, - intp m_middle, intp nelem, - NPY_CLIPMODE clipmode) -{ - intp i, j, k, tmp; - - switch(clipmode) { - case NPY_RAISE: - for (i = 0; i < n_outer; i++) { - for (j = 0; j < m_middle; j++) { - tmp = indarray[j]; - if (tmp < 0) { - tmp = tmp+nindarray; - } - if ((tmp < 0) || (tmp >= nindarray)) { - PyErr_SetString(PyExc_IndexError, - "index out of range for array"); - return 1; - } - if (nelem == 1) { - *dest++ = *(src+tmp); - } - else { - for (k = 0; k < nelem; k++) { - *dest++ = *(src + tmp*nelem + k); - } - } - } - src += nelem*nindarray; - } - break; - case NPY_WRAP: - for (i = 0; i < n_outer; i++) { - for (j = 0; j < m_middle; j++) { - tmp = indarray[j]; - if (tmp < 0) { - while (tmp < 0) { - tmp += nindarray; - } - } - else if (tmp >= nindarray) { - while (tmp >= nindarray) { - tmp -= nindarray; - } - } - if (nelem == 1) { - *dest++ = *(src+tmp); - } - else { - for (k = 0; k < nelem; k++) { - *dest++ = *(src+tmp*nelem+k); - } - } - } - src += nelem*nindarray; - } - break; - case NPY_CLIP: - for (i = 0; i < n_outer; i++) { - for (j = 0; j < m_middle; j++) { - tmp = indarray[j]; - if (tmp < 0) { - tmp = 0; - } - else if (tmp >= nindarray) { - tmp = nindarray - 1; - } - if (nelem == 1) { - *dest++ = *(src+tmp); - } - else { - for (k = 0; k < nelem; k++) { - *dest++ = *(src + tmp*nelem + k); - } - } - } - src += nelem*nindarray; - } - break; - } - return 0; -} -/**end repeat**/ - -#define OBJECT_fasttake NULL - - -/* - ***************************************************************************** - ** SETUP FUNCTION POINTERS ** - ***************************************************************************** - */ - - -#define _ALIGN(type) offsetof(struct {char c; type v;}, v) -/* - * Disable harmless compiler warning "4116: unnamed type definition in - * parentheses" which is caused by the _ALIGN macro. - */ -#if defined(_MSC_VER) -#pragma warning(disable:4116) -#endif - - -/**begin repeat - * - * #from = VOID, STRING, UNICODE# - * #align = char, char, PyArray_UCS4# - * #NAME = Void, String, Unicode# - * #endian = |, |, =# -*/ -static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { - { - (PyArray_VectorUnaryFunc*)@from@_to_BOOL, - (PyArray_VectorUnaryFunc*)@from@_to_BYTE, - (PyArray_VectorUnaryFunc*)@from@_to_UBYTE, - (PyArray_VectorUnaryFunc*)@from@_to_SHORT, - (PyArray_VectorUnaryFunc*)@from@_to_USHORT, - (PyArray_VectorUnaryFunc*)@from@_to_INT, - (PyArray_VectorUnaryFunc*)@from@_to_UINT, - (PyArray_VectorUnaryFunc*)@from@_to_LONG, - (PyArray_VectorUnaryFunc*)@from@_to_ULONG, - (PyArray_VectorUnaryFunc*)@from@_to_LONGLONG, - (PyArray_VectorUnaryFunc*)@from@_to_ULONGLONG, - (PyArray_VectorUnaryFunc*)@from@_to_FLOAT, - (PyArray_VectorUnaryFunc*)@from@_to_DOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_LONGDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_CFLOAT, - (PyArray_VectorUnaryFunc*)@from@_to_CDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_CLONGDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_OBJECT, - (PyArray_VectorUnaryFunc*)@from@_to_STRING, - (PyArray_VectorUnaryFunc*)@from@_to_UNICODE, - (PyArray_VectorUnaryFunc*)@from@_to_VOID - }, - (PyArray_GetItemFunc*)@from@_getitem, - (PyArray_SetItemFunc*)@from@_setitem, - (PyArray_CopySwapNFunc*)@from@_copyswapn, - (PyArray_CopySwapFunc*)@from@_copyswap, - (PyArray_CompareFunc*)@from@_compare, - (PyArray_ArgFunc*)@from@_argmax, - (PyArray_DotFunc*)NULL, - (PyArray_ScanFunc*)@from@_scan, - (PyArray_FromStrFunc*)@from@_fromstr, - (PyArray_NonzeroFunc*)@from@_nonzero, - (PyArray_FillFunc*)NULL, - (PyArray_FillWithScalarFunc*)NULL, - { - NULL, NULL, NULL - }, - { - NULL, NULL, NULL - }, - NULL, - (PyArray_ScalarKindFunc*)NULL, - NULL, - NULL, - (PyArray_FastClipFunc *)NULL, - (PyArray_FastPutmaskFunc *)NULL, - (PyArray_FastTakeFunc *)NULL, - (PyArray_ArgFunc*)@from@_argmin -}; - -/* - * FIXME: check for PY3K - */ -static PyArray_Descr @from@_Descr = { - PyObject_HEAD_INIT(&PyArrayDescr_Type) - &Py@NAME@ArrType_Type, - PyArray_@from@LTR, - PyArray_@from@LTR, - '@endian@', - 0, - PyArray_@from@, - 0, - _ALIGN(@align@), - NULL, - NULL, - NULL, - &_Py@NAME@_ArrFuncs, - NULL, -}; - -/**end repeat**/ - -/**begin repeat - * - * #from = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, DATETIME, TIMEDELTA# - * #num = 1*15, 2*3, 1*3# - * #fromtyp = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, npy_half, float, double, longdouble, - * float, double, longdouble, PyObject *, datetime, timedelta# - * #NAME = Bool, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, - * LongLong, ULongLong, Half, Float, Double, LongDouble, - * CFloat, CDouble, CLongDouble, Object, Datetime, Timedelta# - * #kind = GENBOOL, SIGNED, UNSIGNED, SIGNED, UNSIGNED, SIGNED, UNSIGNED, SIGNED, UNSIGNED, - * SIGNED, UNSIGNED, FLOATING, FLOATING, FLOATING, FLOATING, - * COMPLEX, COMPLEX, COMPLEX, OBJECT, DATETIME, TIMEDELTA# - * #endian = |*3, =*15, |, =*2# - * #isobject= 0*18,NPY_OBJECT_DTYPE_FLAGS,0*2# - */ -static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { - { - (PyArray_VectorUnaryFunc*)@from@_to_BOOL, - (PyArray_VectorUnaryFunc*)@from@_to_BYTE, - (PyArray_VectorUnaryFunc*)@from@_to_UBYTE, - (PyArray_VectorUnaryFunc*)@from@_to_SHORT, - (PyArray_VectorUnaryFunc*)@from@_to_USHORT, - (PyArray_VectorUnaryFunc*)@from@_to_INT, - (PyArray_VectorUnaryFunc*)@from@_to_UINT, - (PyArray_VectorUnaryFunc*)@from@_to_LONG, - (PyArray_VectorUnaryFunc*)@from@_to_ULONG, - (PyArray_VectorUnaryFunc*)@from@_to_LONGLONG, - (PyArray_VectorUnaryFunc*)@from@_to_ULONGLONG, - (PyArray_VectorUnaryFunc*)@from@_to_FLOAT, - (PyArray_VectorUnaryFunc*)@from@_to_DOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_LONGDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_CFLOAT, - (PyArray_VectorUnaryFunc*)@from@_to_CDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_CLONGDOUBLE, - (PyArray_VectorUnaryFunc*)@from@_to_OBJECT, - (PyArray_VectorUnaryFunc*)@from@_to_STRING, - (PyArray_VectorUnaryFunc*)@from@_to_UNICODE, - (PyArray_VectorUnaryFunc*)@from@_to_VOID - }, - (PyArray_GetItemFunc*)@from@_getitem, - (PyArray_SetItemFunc*)@from@_setitem, - (PyArray_CopySwapNFunc*)@from@_copyswapn, - (PyArray_CopySwapFunc*)@from@_copyswap, - (PyArray_CompareFunc*)@from@_compare, - (PyArray_ArgFunc*)@from@_argmax, - (PyArray_DotFunc*)@from@_dot, - (PyArray_ScanFunc*)@from@_scan, - (PyArray_FromStrFunc*)@from@_fromstr, - (PyArray_NonzeroFunc*)@from@_nonzero, - (PyArray_FillFunc*)@from@_fill, - (PyArray_FillWithScalarFunc*)@from@_fillwithscalar, - { - NULL, NULL, NULL - }, - { - NULL, NULL, NULL - }, - NULL, - (PyArray_ScalarKindFunc*)NULL, - NULL, - NULL, - (PyArray_FastClipFunc*)@from@_fastclip, - (PyArray_FastPutmaskFunc*)@from@_fastputmask, - (PyArray_FastTakeFunc*)@from@_fasttake, - (PyArray_ArgFunc*)@from@_argmin -}; - -/* - * FIXME: check for PY3K - */ -NPY_NO_EXPORT PyArray_Descr @from@_Descr = { - PyObject_HEAD_INIT(&PyArrayDescr_Type) - &Py@NAME@ArrType_Type, - PyArray_@kind@LTR, - PyArray_@from@LTR, - '@endian@', - @isobject@, - PyArray_@from@, - @num@*sizeof(@fromtyp@), - _ALIGN(@fromtyp@), - NULL, - NULL, - NULL, - &_Py@NAME@_ArrFuncs, - NULL, -}; - -/**end repeat**/ - -static void -_init_datetime_descr(PyArray_Descr *descr) -{ - PyArray_DatetimeMetaData *dt_data; - PyObject *cobj; - - dt_data = _pya_malloc(sizeof(PyArray_DatetimeMetaData)); - dt_data->base = NPY_FR_us; - dt_data->num = 1; - dt_data->den = 1; - dt_data->events = 1; - -/* FIXME - * There is no error check here and no way to indicate an error - * until the metadata turns up NULL. - */ - cobj = NpyCapsule_FromVoidPtr((void *)dt_data, simple_capsule_dtor); - descr->metadata = PyDict_New(); - PyDict_SetItemString(descr->metadata, NPY_METADATA_DTSTR, cobj); - Py_DECREF(cobj); - -} - -#define _MAX_LETTER 128 -static char _letter_to_num[_MAX_LETTER]; - -static PyArray_Descr *_builtin_descrs[] = { - &BOOL_Descr, - &BYTE_Descr, - &UBYTE_Descr, - &SHORT_Descr, - &USHORT_Descr, - &INT_Descr, - &UINT_Descr, - &LONG_Descr, - &ULONG_Descr, - &LONGLONG_Descr, - &ULONGLONG_Descr, - &FLOAT_Descr, - &DOUBLE_Descr, - &LONGDOUBLE_Descr, - &CFLOAT_Descr, - &CDOUBLE_Descr, - &CLONGDOUBLE_Descr, - &OBJECT_Descr, - &STRING_Descr, - &UNICODE_Descr, - &VOID_Descr, - &DATETIME_Descr, - &TIMEDELTA_Descr, - &HALF_Descr -}; - -/*NUMPY_API - * Get the PyArray_Descr structure for a type. - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_DescrFromType(int type) -{ - PyArray_Descr *ret = NULL; - - if (type < PyArray_NTYPES) { - ret = _builtin_descrs[type]; - } - else if (type == PyArray_NOTYPE) { - /* - * This needs to not raise an error so - * that PyArray_DescrFromType(PyArray_NOTYPE) - * works for backwards-compatible C-API - */ - return NULL; - } - else if ((type == PyArray_CHAR) || (type == PyArray_CHARLTR)) { - ret = PyArray_DescrNew(_builtin_descrs[PyArray_STRING]); - if (ret == NULL) { - return NULL; - } - ret->elsize = 1; - ret->type = PyArray_CHARLTR; - return ret; - } - else if (PyTypeNum_ISUSERDEF(type)) { - ret = userdescrs[type - PyArray_USERDEF]; - } - else { - int num = PyArray_NTYPES; - if (type < _MAX_LETTER) { - num = (int) _letter_to_num[type]; - } - if (num >= PyArray_NTYPES) { - ret = NULL; - } - else { - ret = _builtin_descrs[num]; - } - } - if (ret == NULL) { - PyErr_SetString(PyExc_ValueError, - "Invalid data-type for array"); - } - else { - Py_INCREF(ret); - } - - /* Make sure dtype metadata is initialized for DATETIME */ - if (PyTypeNum_ISDATETIME(type)) { - if (ret->metadata == NULL) { - _init_datetime_descr(ret); - } - } - - return ret; -} - - -/* - ***************************************************************************** - ** SETUP TYPE INFO ** - ***************************************************************************** - */ - - -NPY_NO_EXPORT int -set_typeinfo(PyObject *dict) -{ - PyObject *infodict, *s; - int i; - - PyArray_Descr *dtype; - PyObject *cobj, *key; - - /* Add cast functions for the new types */ -/**begin repeat - * - * #name1 = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, - * LONG, ULONG, LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID, - * DATETIME,TIMEDELTA# - */ -/**begin repeat1 - * - * #name2 = HALF, DATETIME, TIMEDELTA# - */ - dtype = _builtin_descrs[NPY_@name1@]; - if (dtype->f->castdict == NULL) { - dtype->f->castdict = PyDict_New(); - if (dtype->f->castdict == NULL) { - return -1; - } - } - key = PyInt_FromLong(NPY_@name2@); - if (key == NULL) { - return -1; - } - cobj = NpyCapsule_FromVoidPtr((void *)@name1@_to_@name2@, NULL); - if (cobj == NULL) { - Py_DECREF(key); - return -1; - } - if (PyDict_SetItem(dtype->f->castdict, key, cobj) < 0) { - Py_DECREF(key); - Py_DECREF(cobj); - return -1; - } - Py_DECREF(key); - Py_DECREF(cobj); -/**end repeat1**/ -/**end repeat**/ - - for (i = 0; i < _MAX_LETTER; i++) { - _letter_to_num[i] = NPY_NTYPES; - } - -/**begin repeat - * - * #name = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, INTP, UINTP, - * LONG, ULONG, LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID, - * DATETIME,TIMEDELTA# - */ - _letter_to_num[PyArray_@name@LTR] = PyArray_@name@; -/**end repeat**/ - _letter_to_num[PyArray_STRINGLTR2] = PyArray_STRING; - -/**begin repeat - * #name = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, - * LONG, ULONG, LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID, - * DATETIME, TIMEDELTA# -*/ - @name@_Descr.fields = Py_None; -/**end repeat**/ - - /* Set a dictionary with type information */ - infodict = PyDict_New(); - if (infodict == NULL) return -1; - -#define BITSOF_INTP CHAR_BIT*SIZEOF_PY_INTPTR_T -#define BITSOF_BYTE CHAR_BIT - -/**begin repeat - * - * #name = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, INTP, UINTP, - * LONG, ULONG, LONGLONG, ULONGLONG# - * #uname = BOOL, BYTE*2, SHORT*2, INT*2, INTP*2, LONG*2, LONGLONG*2# - * #Name = Bool, Byte, UByte, Short, UShort, Int, UInt, Intp, UIntp, - * Long, ULong, LongLong, ULongLong# - * #type = Bool, byte, ubyte, short, ushort, int, uint, intp, uintp, - * long, ulong, longlong, ulonglong# - * #max= 1, MAX_BYTE, MAX_UBYTE, MAX_SHORT, MAX_USHORT, MAX_INT, - * PyLong_FromUnsignedLong(MAX_UINT), PyLong_FromLongLong((longlong) MAX_INTP), - * PyLong_FromUnsignedLongLong((ulonglong) MAX_UINTP), MAX_LONG, - * PyLong_FromUnsignedLong((unsigned long) MAX_ULONG), - * PyLong_FromLongLong((longlong) MAX_LONGLONG), - * PyLong_FromUnsignedLongLong((ulonglong) MAX_ULONGLONG)# - * #min = 0, MIN_BYTE, 0, MIN_SHORT, 0, MIN_INT, 0, - * PyLong_FromLongLong((longlong) MIN_INTP), 0, MIN_LONG, 0, - * PyLong_FromLongLong((longlong) MIN_LONGLONG),0# - * #cx = i*6, N, N, N, l, N, N, N# - * #cn = i*7, N, i, l, i, N, i# -*/ - PyDict_SetItemString(infodict, "@name@", -#if defined(NPY_PY3K) - s = Py_BuildValue("Ciii@cx@@cn@O", -#else - s = Py_BuildValue("ciii@cx@@cn@O", -#endif - PyArray_@name@LTR, - PyArray_@name@, - BITSOF_@uname@, - _ALIGN(@type@), - @max@, - @min@, - (PyObject *) &Py@Name@ArrType_Type)); - Py_DECREF(s); -/**end repeat**/ - -#define BITSOF_CFLOAT 2*BITSOF_FLOAT -#define BITSOF_CDOUBLE 2*BITSOF_DOUBLE -#define BITSOF_CLONGDOUBLE 2*BITSOF_LONGDOUBLE - -/**begin repeat - * - * #type = npy_half, float, double, longdouble, cfloat, cdouble, clongdouble# - * #name = HALF, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# - * #Name = Half, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - */ - PyDict_SetItemString(infodict, "@name@", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", PyArray_@name@LTR, -#else - s = Py_BuildValue("ciiiO", PyArray_@name@LTR, -#endif - PyArray_@name@, - BITSOF_@name@, - _ALIGN(@type@), - (PyObject *) &Py@Name@ArrType_Type)); - Py_DECREF(s); -/**end repeat**/ - - PyDict_SetItemString(infodict, "OBJECT", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", PyArray_OBJECTLTR, -#else - s = Py_BuildValue("ciiiO", PyArray_OBJECTLTR, -#endif - PyArray_OBJECT, - sizeof(PyObject *) * CHAR_BIT, - _ALIGN(PyObject *), - (PyObject *) &PyObjectArrType_Type)); - Py_DECREF(s); - PyDict_SetItemString(infodict, "STRING", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", PyArray_STRINGLTR, -#else - s = Py_BuildValue("ciiiO", PyArray_STRINGLTR, -#endif - PyArray_STRING, - 0, - _ALIGN(char), - (PyObject *) &PyStringArrType_Type)); - Py_DECREF(s); - PyDict_SetItemString(infodict, "UNICODE", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", PyArray_UNICODELTR, -#else - s = Py_BuildValue("ciiiO", PyArray_UNICODELTR, -#endif - PyArray_UNICODE, - 0, - _ALIGN(PyArray_UCS4), - (PyObject *) &PyUnicodeArrType_Type)); - Py_DECREF(s); - PyDict_SetItemString(infodict, "VOID", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiO", PyArray_VOIDLTR, -#else - s = Py_BuildValue("ciiiO", PyArray_VOIDLTR, -#endif - PyArray_VOID, - 0, - _ALIGN(char), - (PyObject *) &PyVoidArrType_Type)); - Py_DECREF(s); - PyDict_SetItemString(infodict, "DATETIME", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiNNO", PyArray_DATETIMELTR, -#else - s = Py_BuildValue("ciiiNNO", PyArray_DATETIMELTR, -#endif - PyArray_DATETIME, - sizeof(npy_datetime) * CHAR_BIT, - _ALIGN(npy_datetime), - MyPyLong_FromInt64(MAX_DATETIME), - MyPyLong_FromInt64(MIN_DATETIME), - (PyObject *) &PyDatetimeArrType_Type)); - Py_DECREF(s); - PyDict_SetItemString(infodict, "TIMEDELTA", -#if defined(NPY_PY3K) - s = Py_BuildValue("CiiiNNO", PyArray_TIMEDELTALTR, -#else - s = Py_BuildValue("ciiiNNO",PyArray_TIMEDELTALTR, -#endif - PyArray_TIMEDELTA, - sizeof(npy_timedelta) * CHAR_BIT, - _ALIGN(npy_timedelta), - MyPyLong_FromInt64(MAX_TIMEDELTA), - MyPyLong_FromInt64(MIN_TIMEDELTA), - (PyObject *)&PyTimedeltaArrType_Type)); - Py_DECREF(s); - -#define SETTYPE(name) \ - Py_INCREF(&Py##name##ArrType_Type); \ - PyDict_SetItemString(infodict, #name, \ - (PyObject *)&Py##name##ArrType_Type) - - SETTYPE(Generic); - SETTYPE(Number); - SETTYPE(Integer); - SETTYPE(Inexact); - SETTYPE(SignedInteger); - SETTYPE(TimeInteger); - SETTYPE(UnsignedInteger); - SETTYPE(Floating); - SETTYPE(ComplexFloating); - SETTYPE(Flexible); - SETTYPE(Character); - -#undef SETTYPE - - PyDict_SetItemString(dict, "typeinfo", infodict); - Py_DECREF(infodict); - return 0; -} - -#undef _MAX_LETTER diff --git a/numpy-1.6.2/numpy/core/src/multiarray/arraytypes.h b/numpy-1.6.2/numpy/core/src/multiarray/arraytypes.h deleted file mode 100644 index ff7d4ae408..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/arraytypes.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _NPY_ARRAYTYPES_H_ -#define _NPY_ARRAYTYPES_H_ - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyArray_Descr LONGLONG_Descr; -extern NPY_NO_EXPORT PyArray_Descr LONG_Descr; -extern NPY_NO_EXPORT PyArray_Descr INT_Descr; -#endif - -NPY_NO_EXPORT int -set_typeinfo(PyObject *dict); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/buffer.c b/numpy-1.6.2/numpy/core/src/multiarray/buffer.c deleted file mode 100644 index 9bc45a76f3..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/buffer.c +++ /dev/null @@ -1,810 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "buffer.h" -#include "numpyos.h" - -/************************************************************************* - **************** Implement Buffer Protocol **************************** - *************************************************************************/ - -/* removed multiple segment interface */ - -static Py_ssize_t -array_getsegcount(PyArrayObject *self, Py_ssize_t *lenp) -{ - if (lenp) { - *lenp = PyArray_NBYTES(self); - } - if (PyArray_ISONESEGMENT(self)) { - return 1; - } - if (lenp) { - *lenp = 0; - } - return 0; -} - -static Py_ssize_t -array_getreadbuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr) -{ - if (segment != 0) { - PyErr_SetString(PyExc_ValueError, - "accessing non-existing array segment"); - return -1; - } - if (PyArray_ISONESEGMENT(self)) { - *ptrptr = self->data; - return PyArray_NBYTES(self); - } - PyErr_SetString(PyExc_ValueError, "array is not a single segment"); - *ptrptr = NULL; - return -1; -} - - -static Py_ssize_t -array_getwritebuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr) -{ - if (PyArray_CHKFLAGS(self, WRITEABLE)) { - return array_getreadbuf(self, segment, (void **) ptrptr); - } - else { - PyErr_SetString(PyExc_ValueError, "array cannot be " - "accessed as a writeable buffer"); - return -1; - } -} - -static Py_ssize_t -array_getcharbuf(PyArrayObject *self, Py_ssize_t segment, constchar **ptrptr) -{ - return array_getreadbuf(self, segment, (void **) ptrptr); -} - - -/************************************************************************* - * PEP 3118 buffer protocol - * - * Implementing PEP 3118 is somewhat convoluted because of the desirata: - * - * - Don't add new members to ndarray or descr structs, to preserve binary - * compatibility. (Also, adding the items is actually not very useful, - * since mutability issues prevent an 1 to 1 relationship between arrays - * and buffer views.) - * - * - Don't use bf_releasebuffer, because it prevents PyArg_ParseTuple("s#", ... - * from working. Breaking this would cause several backward compatibility - * issues already on Python 2.6. - * - * - Behave correctly when array is reshaped in-place, or it's dtype is - * altered. - * - * The solution taken below is to manually track memory allocated for - * Py_buffers. - *************************************************************************/ - -#if PY_VERSION_HEX >= 0x02060000 - -/* - * Format string translator - * - * Translate PyArray_Descr to a PEP 3118 format string. - */ - -/* Fast string 'class' */ -typedef struct { - char *s; - int allocated; - int pos; -} _tmp_string_t; - -static int -_append_char(_tmp_string_t *s, char c) -{ - char *p; - if (s->s == NULL) { - s->s = (char*)malloc(16); - s->pos = 0; - s->allocated = 16; - } - if (s->pos >= s->allocated) { - p = (char*)realloc(s->s, 2*s->allocated); - if (p == NULL) { - PyErr_SetString(PyExc_MemoryError, "memory allocation failed"); - return -1; - } - s->s = p; - s->allocated *= 2; - } - s->s[s->pos] = c; - ++s->pos; - return 0; -} - -static int -_append_str(_tmp_string_t *s, char *c) -{ - while (*c != '\0') { - if (_append_char(s, *c)) return -1; - ++c; - } - return 0; -} - -/* - * Return non-zero if a type is aligned in each item in the given array, - * AND, the descr element size is a multiple of the alignment, - * AND, the array data is positioned to alignment granularity. - */ -static int -_is_natively_aligned_at(PyArray_Descr *descr, - PyArrayObject *arr, Py_ssize_t offset) -{ - int k; - - if ((Py_ssize_t)(arr->data) % descr->alignment != 0) { - return 0; - } - - if (offset % descr->alignment != 0) { - return 0; - } - - if (descr->elsize % descr->alignment) { - return 0; - } - - for (k = 0; k < arr->nd; ++k) { - if (arr->dimensions[k] > 1) { - if (arr->strides[k] % descr->alignment != 0) { - return 0; - } - } - } - - return 1; -} - -static int -_buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, - PyArrayObject* arr, Py_ssize_t *offset, - char *active_byteorder) -{ - int k; - char _active_byteorder = '@'; - Py_ssize_t _offset = 0; - - if (active_byteorder == NULL) { - active_byteorder = &_active_byteorder; - } - if (offset == NULL) { - offset = &_offset; - } - - if (descr->subarray) { - PyObject *item; - Py_ssize_t total_count = 1; - Py_ssize_t dim_size; - char buf[128]; - int old_offset; - int ret; - - _append_char(str, '('); - for (k = 0; k < PyTuple_GET_SIZE(descr->subarray->shape); ++k) { - if (k > 0) { - _append_char(str, ','); - } - item = PyTuple_GET_ITEM(descr->subarray->shape, k); - dim_size = PyNumber_AsSsize_t(item, NULL); - - PyOS_snprintf(buf, sizeof(buf), "%ld", (long)dim_size); - _append_str(str, buf); - total_count *= dim_size; - } - _append_char(str, ')'); - old_offset = *offset; - ret = _buffer_format_string(descr->subarray->base, str, arr, offset, - active_byteorder); - *offset = old_offset + (*offset - old_offset) * total_count; - return ret; - } - else if (PyDataType_HASFIELDS(descr)) { - int base_offset = *offset; - - _append_str(str, "T{"); - for (k = 0; k < PyTuple_GET_SIZE(descr->names); ++k) { - PyObject *name, *item, *offset_obj, *tmp; - PyArray_Descr *child; - char *p; - Py_ssize_t len; - int new_offset; - - name = PyTuple_GET_ITEM(descr->names, k); - item = PyDict_GetItem(descr->fields, name); - - child = (PyArray_Descr*)PyTuple_GetItem(item, 0); - offset_obj = PyTuple_GetItem(item, 1); - new_offset = base_offset + PyInt_AsLong(offset_obj); - - /* Insert padding manually */ - if (*offset > new_offset) { - PyErr_SetString(PyExc_RuntimeError, - "This should never happen: Invalid offset in " - "buffer format string generation. Please " - "report a bug to the Numpy developers."); - return -1; - } - while (*offset < new_offset) { - _append_char(str, 'x'); - ++*offset; - } - - /* Insert child item */ - _buffer_format_string(child, str, arr, offset, - active_byteorder); - - /* Insert field name */ -#if defined(NPY_PY3K) - /* FIXME: XXX -- should it use UTF-8 here? */ - tmp = PyUnicode_AsUTF8String(name); -#else - tmp = name; -#endif - if (tmp == NULL || PyBytes_AsStringAndSize(tmp, &p, &len) < 0) { - PyErr_SetString(PyExc_ValueError, "invalid field name"); - return -1; - } - _append_char(str, ':'); - while (len > 0) { - if (*p == ':') { - Py_DECREF(tmp); - PyErr_SetString(PyExc_ValueError, - "':' is not an allowed character in buffer " - "field names"); - return -1; - } - _append_char(str, *p); - ++p; - --len; - } - _append_char(str, ':'); -#if defined(NPY_PY3K) - Py_DECREF(tmp); -#endif - } - _append_char(str, '}'); - } - else { - int is_standard_size = 1; - int is_native_only_type = (descr->type_num == NPY_LONGDOUBLE || - descr->type_num == NPY_CLONGDOUBLE); -#if NPY_SIZEOF_LONG_LONG != 8 - is_native_only_type = is_native_only_type || ( - descr->type_num == NPY_LONGLONG || - descr->type_num == NPY_ULONGLONG); -#endif - - *offset += descr->elsize; - - if (descr->byteorder == '=' && - _is_natively_aligned_at(descr, arr, *offset)) { - /* Prefer native types, to cater for Cython */ - is_standard_size = 0; - if (*active_byteorder != '@') { - _append_char(str, '@'); - *active_byteorder = '@'; - } - } - else if (descr->byteorder == '=' && is_native_only_type) { - /* Data types that have no standard size */ - is_standard_size = 0; - if (*active_byteorder != '^') { - _append_char(str, '^'); - *active_byteorder = '^'; - } - } - else if (descr->byteorder == '<' || descr->byteorder == '>' || - descr->byteorder == '=') { - is_standard_size = 1; - if (*active_byteorder != descr->byteorder) { - _append_char(str, descr->byteorder); - *active_byteorder = descr->byteorder; - } - - if (is_native_only_type) { - /* It's not possible to express native-only data types - in non-native byte orders */ - PyErr_Format(PyExc_ValueError, - "cannot expose native-only dtype '%c' in " - "non-native byte order '%c' via buffer interface", - descr->type, descr->byteorder); - } - } - - switch (descr->type_num) { - case NPY_BOOL: if (_append_char(str, '?')) return -1; break; - case NPY_BYTE: if (_append_char(str, 'b')) return -1; break; - case NPY_UBYTE: if (_append_char(str, 'B')) return -1; break; - case NPY_SHORT: if (_append_char(str, 'h')) return -1; break; - case NPY_USHORT: if (_append_char(str, 'H')) return -1; break; - case NPY_INT: if (_append_char(str, 'i')) return -1; break; - case NPY_UINT: if (_append_char(str, 'I')) return -1; break; - case NPY_LONG: - if (is_standard_size && (NPY_SIZEOF_LONG == 8)) { - if (_append_char(str, 'q')) return -1; - } - else { - if (_append_char(str, 'l')) return -1; - } - break; - case NPY_ULONG: - if (is_standard_size && (NPY_SIZEOF_LONG == 8)) { - if (_append_char(str, 'Q')) return -1; - } - else { - if (_append_char(str, 'L')) return -1; - } - break; - case NPY_LONGLONG: if (_append_char(str, 'q')) return -1; break; - case NPY_ULONGLONG: if (_append_char(str, 'Q')) return -1; break; - case NPY_HALF: if (_append_char(str, 'e')) return -1; break; - case NPY_FLOAT: if (_append_char(str, 'f')) return -1; break; - case NPY_DOUBLE: if (_append_char(str, 'd')) return -1; break; - case NPY_LONGDOUBLE: if (_append_char(str, 'g')) return -1; break; - case NPY_CFLOAT: if (_append_str(str, "Zf")) return -1; break; - case NPY_CDOUBLE: if (_append_str(str, "Zd")) return -1; break; - case NPY_CLONGDOUBLE: if (_append_str(str, "Zg")) return -1; break; - /* XXX: datetime */ - /* XXX: timedelta */ - case NPY_OBJECT: if (_append_char(str, 'O')) return -1; break; - case NPY_STRING: { - char buf[128]; - PyOS_snprintf(buf, sizeof(buf), "%ds", descr->elsize); - if (_append_str(str, buf)) return -1; - break; - } - case NPY_UNICODE: { - /* Numpy Unicode is always 4-byte */ - char buf[128]; - assert(descr->elsize % 4 == 0); - PyOS_snprintf(buf, sizeof(buf), "%dw", descr->elsize / 4); - if (_append_str(str, buf)) return -1; - break; - } - case NPY_VOID: { - /* Insert padding bytes */ - char buf[128]; - PyOS_snprintf(buf, sizeof(buf), "%dx", descr->elsize); - if (_append_str(str, buf)) return -1; - break; - } - default: - PyErr_Format(PyExc_ValueError, - "cannot include dtype '%c' in a buffer", - descr->type); - return -1; - } - } - - return 0; -} - - -/* - * Global information about all active buffers - * - * Note: because for backward compatibility we cannot define bf_releasebuffer, - * we must manually keep track of the additional data required by the buffers. - */ - -/* Additional per-array data required for providing the buffer interface */ -typedef struct { - char *format; - int ndim; - Py_ssize_t *strides; - Py_ssize_t *shape; -} _buffer_info_t; - -/* - * { id(array): [list of pointers to _buffer_info_t, the last one is latest] } - * - * Because shape, strides, and format can be different for different buffers, - * we may need to keep track of multiple buffer infos for each array. - * - * However, when none of them has changed, the same buffer info may be reused. - * - * Thread-safety is provided by GIL. - */ -static PyObject *_buffer_info_cache = NULL; - -/* Fill in the info structure */ -static _buffer_info_t* -_buffer_info_new(PyArrayObject *arr) -{ - _buffer_info_t *info; - _tmp_string_t fmt = {0,0,0}; - int k; - - info = (_buffer_info_t*)malloc(sizeof(_buffer_info_t)); - - /* Fill in format */ - if (_buffer_format_string(PyArray_DESCR(arr), &fmt, arr, NULL, NULL) != 0) { - free(info); - return NULL; - } - _append_char(&fmt, '\0'); - info->format = fmt.s; - - /* Fill in shape and strides */ - info->ndim = PyArray_NDIM(arr); - - if (info->ndim == 0) { - info->shape = NULL; - info->strides = NULL; - } - else { - info->shape = (Py_ssize_t*)malloc(sizeof(Py_ssize_t) - * PyArray_NDIM(arr) * 2 + 1); - info->strides = info->shape + PyArray_NDIM(arr); - for (k = 0; k < PyArray_NDIM(arr); ++k) { - info->shape[k] = PyArray_DIMS(arr)[k]; - info->strides[k] = PyArray_STRIDES(arr)[k]; - } - } - - return info; -} - -/* Compare two info structures */ -static Py_ssize_t -_buffer_info_cmp(_buffer_info_t *a, _buffer_info_t *b) -{ - Py_ssize_t c; - int k; - - c = strcmp(a->format, b->format); - if (c != 0) return c; - - c = a->ndim - b->ndim; - if (c != 0) return c; - - for (k = 0; k < a->ndim; ++k) { - c = a->shape[k] - b->shape[k]; - if (c != 0) return c; - c = a->strides[k] - b->strides[k]; - if (c != 0) return c; - } - - return 0; -} - -static void -_buffer_info_free(_buffer_info_t *info) -{ - if (info->format) { - free(info->format); - } - if (info->shape) { - free(info->shape); - } - free(info); -} - -/* Get buffer info from the global dictionary */ -static _buffer_info_t* -_buffer_get_info(PyObject *arr) -{ - PyObject *key, *item_list, *item; - _buffer_info_t *info = NULL, *old_info = NULL; - - if (_buffer_info_cache == NULL) { - _buffer_info_cache = PyDict_New(); - if (_buffer_info_cache == NULL) { - return NULL; - } - } - - /* Compute information */ - info = _buffer_info_new((PyArrayObject*)arr); - if (info == NULL) { - return NULL; - } - - /* Check if it is identical with an old one; reuse old one, if yes */ - key = PyLong_FromVoidPtr((void*)arr); - item_list = PyDict_GetItem(_buffer_info_cache, key); - - if (item_list != NULL) { - Py_INCREF(item_list); - if (PyList_GET_SIZE(item_list) > 0) { - item = PyList_GetItem(item_list, PyList_GET_SIZE(item_list) - 1); - old_info = (_buffer_info_t*)PyLong_AsVoidPtr(item); - - if (_buffer_info_cmp(info, old_info) == 0) { - _buffer_info_free(info); - info = old_info; - } - } - } - else { - item_list = PyList_New(0); - PyDict_SetItem(_buffer_info_cache, key, item_list); - } - - if (info != old_info) { - /* Needs insertion */ - item = PyLong_FromVoidPtr((void*)info); - PyList_Append(item_list, item); - Py_DECREF(item); - } - - Py_DECREF(item_list); - Py_DECREF(key); - return info; -} - -/* Clear buffer info from the global dictionary */ -static void -_buffer_clear_info(PyObject *arr) -{ - PyObject *key, *item_list, *item; - _buffer_info_t *info; - int k; - - if (_buffer_info_cache == NULL) { - return; - } - - key = PyLong_FromVoidPtr((void*)arr); - item_list = PyDict_GetItem(_buffer_info_cache, key); - if (item_list != NULL) { - for (k = 0; k < PyList_GET_SIZE(item_list); ++k) { - item = PyList_GET_ITEM(item_list, k); - info = (_buffer_info_t*)PyLong_AsVoidPtr(item); - _buffer_info_free(info); - } - PyDict_DelItem(_buffer_info_cache, key); - } - - Py_DECREF(key); -} - -/* - * Retrieving buffers - */ - -static int -array_getbuffer(PyObject *obj, Py_buffer *view, int flags) -{ - PyArrayObject *self; - _buffer_info_t *info = NULL; - - self = (PyArrayObject*)obj; - - /* Check whether we can provide the wanted properties */ - if ((flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS && - !PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)) { - PyErr_SetString(PyExc_ValueError, "ndarray is not C-contiguous"); - goto fail; - } - if ((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS && - !PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)) { - PyErr_SetString(PyExc_ValueError, "ndarray is not Fortran contiguous"); - goto fail; - } - if ((flags & PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS - && !PyArray_ISONESEGMENT(self)) { - PyErr_SetString(PyExc_ValueError, "ndarray is not contiguous"); - goto fail; - } - if ((flags & PyBUF_STRIDES) != PyBUF_STRIDES && - (flags & PyBUF_ND) == PyBUF_ND && - !PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)) { - /* Non-strided N-dim buffers must be C-contiguous */ - PyErr_SetString(PyExc_ValueError, "ndarray is not C-contiguous"); - goto fail; - } - if ((flags & PyBUF_WRITEABLE) == PyBUF_WRITEABLE && - !PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_ValueError, "ndarray is not writeable"); - goto fail; - } - - if (view == NULL) { - PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer"); - goto fail; - } - - /* Fill in information */ - info = _buffer_get_info(obj); - if (info == NULL) { - goto fail; - } - - view->buf = PyArray_DATA(self); - view->suboffsets = NULL; - view->itemsize = PyArray_ITEMSIZE(self); - view->readonly = !PyArray_ISWRITEABLE(self); - view->internal = NULL; - view->len = PyArray_NBYTES(self); - if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) { - view->format = info->format; - } else { - view->format = NULL; - } - if ((flags & PyBUF_ND) == PyBUF_ND) { - view->ndim = info->ndim; - view->shape = info->shape; - } - else { - view->ndim = 0; - view->shape = NULL; - } - if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) { - view->strides = info->strides; - } - else { - view->strides = NULL; - } - view->obj = (PyObject*)self; - - Py_INCREF(self); - return 0; - -fail: - return -1; -} - - -/* - * NOTE: for backward compatibility (esp. with PyArg_ParseTuple("s#", ...)) - * we do *not* define bf_releasebuffer at all. - * - * Instead, any extra data allocated with the buffer is released only in - * array_dealloc. - * - * Ensuring that the buffer stays in place is taken care by refcounting; - * ndarrays do not reallocate if there are references to them, and a buffer - * view holds one reference. - */ - -NPY_NO_EXPORT void -_array_dealloc_buffer_info(PyArrayObject *self) -{ - int reset_error_state = 0; - PyObject *ptype, *pvalue, *ptraceback; - - /* This function may be called when processing an exception -- - * we need to stash the error state to avoid confusing PyDict - */ - - if (PyErr_Occurred()) { - reset_error_state = 1; - PyErr_Fetch(&ptype, &pvalue, &ptraceback); - } - - _buffer_clear_info((PyObject*)self); - - if (reset_error_state) { - PyErr_Restore(ptype, pvalue, ptraceback); - } -} - -#else - -NPY_NO_EXPORT void -_array_dealloc_buffer_info(PyArrayObject *self) -{ -} - -#endif - -/*************************************************************************/ - -NPY_NO_EXPORT PyBufferProcs array_as_buffer = { -#if !defined(NPY_PY3K) -#if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (segcountproc)array_getsegcount, /*bf_getsegcount*/ - (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ -#else - (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ - (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ -#endif -#endif -#if PY_VERSION_HEX >= 0x02060000 - (getbufferproc)array_getbuffer, - (releasebufferproc)0, -#endif -}; - - -/************************************************************************* - * Convert PEP 3118 format string to PyArray_Descr - */ -#if PY_VERSION_HEX >= 0x02060000 - -NPY_NO_EXPORT PyArray_Descr* -_descriptor_from_pep3118_format(char *s) -{ - char *buf, *p; - int in_name = 0; - PyObject *descr; - PyObject *str; - PyObject *_numpy_internal; - - if (s == NULL) { - return PyArray_DescrNewFromType(PyArray_BYTE); - } - - /* Strip whitespace, except from field names */ - buf = (char*)malloc(strlen(s) + 1); - p = buf; - while (*s != '\0') { - if (*s == ':') { - in_name = !in_name; - *p = *s; - } - else if (in_name || !NumPyOS_ascii_isspace(*s)) { - *p = *s; - } - ++p; - ++s; - } - *p = '\0'; - - str = PyUString_FromStringAndSize(buf, strlen(buf)); - free(buf); - if (str == NULL) { - return NULL; - } - - /* Convert */ - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - Py_DECREF(str); - return NULL; - } - descr = PyObject_CallMethod( - _numpy_internal, "_dtype_from_pep3118", "O", str); - Py_DECREF(str); - Py_DECREF(_numpy_internal); - if (descr == NULL) { - PyErr_Format(PyExc_ValueError, - "'%s' is not a valid PEP 3118 buffer format string", buf); - return NULL; - } - if (!PyArray_DescrCheck(descr)) { - PyErr_Format(PyExc_RuntimeError, - "internal error: numpy.core._internal._dtype_from_pep3118 " - "did not return a valid dtype, got %s", buf); - return NULL; - } - return (PyArray_Descr*)descr; -} - -#else - -NPY_NO_EXPORT PyArray_Descr* -_descriptor_from_pep3118_format(char *s) -{ - PyErr_SetString(PyExc_RuntimeError, - "PEP 3118 is not supported on Python versions < 2.6"); - return NULL; -} - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/buffer.h b/numpy-1.6.2/numpy/core/src/multiarray/buffer.h deleted file mode 100644 index c0a1f8e260..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/buffer.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _NPY_PRIVATE_BUFFER_H_ -#define _NPY_PRIVATE_BUFFER_H_ - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyBufferProcs array_as_buffer; -#else -NPY_NO_EXPORT PyBufferProcs array_as_buffer; -#endif - -NPY_NO_EXPORT void -_array_dealloc_buffer_info(PyArrayObject *self); - -NPY_NO_EXPORT PyArray_Descr* -_descriptor_from_pep3118_format(char *s); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/calculation.c b/numpy-1.6.2/numpy/core/src/multiarray/calculation.c deleted file mode 100644 index 41d96df01c..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/calculation.c +++ /dev/null @@ -1,1162 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" -#include "number.h" - -#include "calculation.h" - -/* FIXME: just remove _check_axis ? */ -#define _check_axis PyArray_CheckAxis -#define PyAO PyArrayObject - -static double -power_of_ten(int n) -{ - static const double p10[] = {1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8}; - double ret; - if (n < 9) { - ret = p10[n]; - } - else { - ret = 1e9; - while (n-- > 9) { - ret *= 10.; - } - } - return ret; -} - -/*NUMPY_API - * ArgMax - */ -NPY_NO_EXPORT PyObject * -PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) -{ - PyArrayObject *ap = NULL, *rp = NULL; - PyArray_ArgFunc* arg_func; - char *ip; - intp *rptr; - intp i, n, m; - int elsize; - int copyret = 0; - NPY_BEGIN_THREADS_DEF; - - if ((ap=(PyAO *)_check_axis(op, &axis, 0)) == NULL) { - return NULL; - } - /* - * We need to permute the array so that axis is placed at the end. - * And all other dimensions are shifted left. - */ - if (axis != ap->nd-1) { - PyArray_Dims newaxes; - intp dims[MAX_DIMS]; - int i; - - newaxes.ptr = dims; - newaxes.len = ap->nd; - for (i = 0; i < axis; i++) dims[i] = i; - for (i = axis; i < ap->nd - 1; i++) dims[i] = i + 1; - dims[ap->nd - 1] = axis; - op = (PyAO *)PyArray_Transpose(ap, &newaxes); - Py_DECREF(ap); - if (op == NULL) { - return NULL; - } - } - else { - op = ap; - } - - /* Will get native-byte order contiguous copy. */ - ap = (PyArrayObject *) - PyArray_ContiguousFromAny((PyObject *)op, - op->descr->type_num, 1, 0); - Py_DECREF(op); - if (ap == NULL) { - return NULL; - } - arg_func = ap->descr->f->argmax; - if (arg_func == NULL) { - PyErr_SetString(PyExc_TypeError, "data type not ordered"); - goto fail; - } - elsize = ap->descr->elsize; - m = ap->dimensions[ap->nd-1]; - if (m == 0) { - PyErr_SetString(PyExc_ValueError, - "attempt to get argmax/argmin "\ - "of an empty sequence"); - goto fail; - } - - if (!out) { - rp = (PyArrayObject *)PyArray_New(Py_TYPE(ap), ap->nd-1, - ap->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, - (PyObject *)ap); - if (rp == NULL) { - goto fail; - } - } - else { - if (PyArray_SIZE(out) != - PyArray_MultiplyList(ap->dimensions, ap->nd - 1)) { - PyErr_SetString(PyExc_TypeError, - "invalid shape for output array."); - } - rp = (PyArrayObject *)\ - PyArray_FromArray(out, - PyArray_DescrFromType(PyArray_INTP), - NPY_CARRAY | NPY_UPDATEIFCOPY); - if (rp == NULL) { - goto fail; - } - if (rp != out) { - copyret = 1; - } - } - - NPY_BEGIN_THREADS_DESCR(ap->descr); - n = PyArray_SIZE(ap)/m; - rptr = (intp *)rp->data; - for (ip = ap->data, i = 0; i < n; i++, ip += elsize*m) { - arg_func(ip, m, rptr, ap); - rptr += 1; - } - NPY_END_THREADS_DESCR(ap->descr); - - Py_DECREF(ap); - if (copyret) { - PyArrayObject *obj; - obj = (PyArrayObject *)rp->base; - Py_INCREF(obj); - Py_DECREF(rp); - rp = obj; - } - return (PyObject *)rp; - - fail: - Py_DECREF(ap); - Py_XDECREF(rp); - return NULL; -} - -/*NUMPY_API - * ArgMin - */ -NPY_NO_EXPORT PyObject * -PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) -{ - PyArrayObject *ap = NULL, *rp = NULL; - PyArray_ArgFunc* arg_func; - char *ip; - intp *rptr; - intp i, n, m; - int elsize; - NPY_BEGIN_THREADS_DEF; - - if ((ap=(PyArrayObject *)PyArray_CheckAxis(op, &axis, 0)) == NULL) { - return NULL; - } - /* - * We need to permute the array so that axis is placed at the end. - * And all other dimensions are shifted left. - */ - if (axis != PyArray_NDIM(ap)-1) { - PyArray_Dims newaxes; - intp dims[MAX_DIMS]; - int i; - - newaxes.ptr = dims; - newaxes.len = PyArray_NDIM(ap); - for (i = 0; i < axis; i++) dims[i] = i; - for (i = axis; i < PyArray_NDIM(ap) - 1; i++) dims[i] = i + 1; - dims[PyArray_NDIM(ap) - 1] = axis; - op = (PyArrayObject *)PyArray_Transpose(ap, &newaxes); - Py_DECREF(ap); - if (op == NULL) { - return NULL; - } - } - else { - op = ap; - } - - /* Will get native-byte order contiguous copy. */ - ap = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)op, - PyArray_DESCR(op)->type_num, 1, 0); - Py_DECREF(op); - if (ap == NULL) { - return NULL; - } - arg_func = PyArray_DESCR(ap)->f->argmin; - if (arg_func == NULL) { - PyErr_SetString(PyExc_TypeError, "data type not ordered"); - goto fail; - } - elsize = PyArray_DESCR(ap)->elsize; - m = PyArray_DIMS(ap)[PyArray_NDIM(ap)-1]; - if (m == 0) { - PyErr_SetString(PyExc_ValueError, - "attempt to get argmax/argmin "\ - "of an empty sequence"); - goto fail; - } - - if (!out) { - rp = (PyArrayObject *)PyArray_New(Py_TYPE(ap), PyArray_NDIM(ap)-1, - PyArray_DIMS(ap), PyArray_INTP, - NULL, NULL, 0, 0, - (PyObject *)ap); - if (rp == NULL) { - goto fail; - } - } - else { - if (PyArray_SIZE(out) != - PyArray_MultiplyList(PyArray_DIMS(ap), PyArray_NDIM(ap) - 1)) { - PyErr_SetString(PyExc_TypeError, - "invalid shape for output array."); - } - rp = (PyArrayObject *)PyArray_FromArray(out, - PyArray_DescrFromType(PyArray_INTP), - NPY_CARRAY | NPY_UPDATEIFCOPY); - if (rp == NULL) { - goto fail; - } - } - - NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ap)); - n = PyArray_SIZE(ap)/m; - rptr = (intp *)PyArray_DATA(rp); - for (ip = PyArray_DATA(ap), i = 0; i < n; i++, ip += elsize*m) { - arg_func(ip, m, rptr, ap); - rptr += 1; - } - NPY_END_THREADS_DESCR(PyArray_DESCR(ap)); - - Py_DECREF(ap); - /* Trigger the UPDATEIFCOPY if necessary */ - if (out != NULL && out != rp) { - Py_DECREF(rp); - rp = out; - Py_INCREF(rp); - } - return (PyObject *)rp; - - fail: - Py_DECREF(ap); - Py_XDECREF(rp); - return NULL; -} - -/*NUMPY_API - * Max - */ -NPY_NO_EXPORT PyObject * -PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) -{ - PyArrayObject *arr; - PyObject *ret; - - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { - return NULL; - } - ret = PyArray_GenericReduceFunction(arr, n_ops.maximum, axis, - arr->descr->type_num, out); - Py_DECREF(arr); - return ret; -} - -/*NUMPY_API - * Min - */ -NPY_NO_EXPORT PyObject * -PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) -{ - PyArrayObject *arr; - PyObject *ret; - - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { - return NULL; - } - ret = PyArray_GenericReduceFunction(arr, n_ops.minimum, axis, - arr->descr->type_num, out); - Py_DECREF(arr); - return ret; -} - -/*NUMPY_API - * Ptp - */ -NPY_NO_EXPORT PyObject * -PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) -{ - PyArrayObject *arr; - PyObject *ret; - PyObject *obj1 = NULL, *obj2 = NULL; - - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { - return NULL; - } - obj1 = PyArray_Max(arr, axis, out); - if (obj1 == NULL) { - goto fail; - } - obj2 = PyArray_Min(arr, axis, NULL); - if (obj2 == NULL) { - goto fail; - } - Py_DECREF(arr); - if (out) { - ret = PyObject_CallFunction(n_ops.subtract, "OOO", out, obj2, out); - } - else { - ret = PyNumber_Subtract(obj1, obj2); - } - Py_DECREF(obj1); - Py_DECREF(obj2); - return ret; - - fail: - Py_XDECREF(arr); - Py_XDECREF(obj1); - Py_XDECREF(obj2); - return NULL; -} - - - -/*NUMPY_API - * Set variance to 1 to by-pass square-root calculation and return variance - * Std - */ -NPY_NO_EXPORT PyObject * -PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, - int variance) -{ - return __New_PyArray_Std(self, axis, rtype, out, variance, 0); -} - -NPY_NO_EXPORT PyObject * -__New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, - int variance, double num) -{ - PyObject *obj1 = NULL, *obj2 = NULL, *obj3 = NULL, *new = NULL; - PyObject *ret = NULL, *newshape = NULL; - double scl; - int i, n; - intp val; - - if ((new = _check_axis(self, &axis, 0)) == NULL) { - return NULL; - } - /* Compute and reshape mean */ - obj1 = PyArray_EnsureAnyArray(PyArray_Mean((PyAO *)new, axis, rtype, NULL)); - if (obj1 == NULL) { - Py_DECREF(new); - return NULL; - } - n = PyArray_NDIM(new); - newshape = PyTuple_New(n); - if (newshape == NULL) { - Py_DECREF(obj1); - Py_DECREF(new); - return NULL; - } - for (i = 0; i < n; i++) { - if (i == axis) { - val = 1; - } - else { - val = PyArray_DIM(new,i); - } - PyTuple_SET_ITEM(newshape, i, PyInt_FromLong((long)val)); - } - obj2 = PyArray_Reshape((PyAO *)obj1, newshape); - Py_DECREF(obj1); - Py_DECREF(newshape); - if (obj2 == NULL) { - Py_DECREF(new); - return NULL; - } - - /* Compute x = x - mx */ - obj1 = PyArray_EnsureAnyArray(PyNumber_Subtract((PyObject *)new, obj2)); - Py_DECREF(obj2); - if (obj1 == NULL) { - Py_DECREF(new); - return NULL; - } - /* Compute x * x */ - if (PyArray_ISCOMPLEX(obj1)) { - obj3 = PyArray_Conjugate((PyAO *)obj1, NULL); - } - else { - obj3 = obj1; - Py_INCREF(obj1); - } - if (obj3 == NULL) { - Py_DECREF(new); - return NULL; - } - obj2 = PyArray_EnsureAnyArray \ - (PyArray_GenericBinaryFunction((PyAO *)obj1, obj3, n_ops.multiply)); - Py_DECREF(obj1); - Py_DECREF(obj3); - if (obj2 == NULL) { - Py_DECREF(new); - return NULL; - } - if (PyArray_ISCOMPLEX(obj2)) { - obj3 = PyObject_GetAttrString(obj2, "real"); - switch(rtype) { - case NPY_CDOUBLE: - rtype = NPY_DOUBLE; - break; - case NPY_CFLOAT: - rtype = NPY_FLOAT; - break; - case NPY_CLONGDOUBLE: - rtype = NPY_LONGDOUBLE; - break; - } - } - else { - obj3 = obj2; - Py_INCREF(obj2); - } - if (obj3 == NULL) { - Py_DECREF(new); - return NULL; - } - /* Compute add.reduce(x*x,axis) */ - obj1 = PyArray_GenericReduceFunction((PyAO *)obj3, n_ops.add, - axis, rtype, NULL); - Py_DECREF(obj3); - Py_DECREF(obj2); - if (obj1 == NULL) { - Py_DECREF(new); - return NULL; - } - n = PyArray_DIM(new,axis); - Py_DECREF(new); - scl = n - num; - if (scl <= 0) { - scl = NPY_NAN; - } - obj2 = PyFloat_FromDouble(1.0/scl); - if (obj2 == NULL) { - Py_DECREF(obj1); - return NULL; - } - ret = PyNumber_Multiply(obj1, obj2); - Py_DECREF(obj1); - Py_DECREF(obj2); - - if (!variance) { - obj1 = PyArray_EnsureAnyArray(ret); - /* sqrt() */ - ret = PyArray_GenericUnaryFunction((PyAO *)obj1, n_ops.sqrt); - Py_DECREF(obj1); - } - if (ret == NULL) { - return NULL; - } - if (PyArray_CheckExact(self)) { - goto finish; - } - if (PyArray_Check(self) && Py_TYPE(self) == Py_TYPE(ret)) { - goto finish; - } - obj1 = PyArray_EnsureArray(ret); - if (obj1 == NULL) { - return NULL; - } - ret = PyArray_View((PyAO *)obj1, NULL, Py_TYPE(self)); - Py_DECREF(obj1); - -finish: - if (out) { - if (PyArray_CopyAnyInto(out, (PyArrayObject *)ret) < 0) { - Py_DECREF(ret); - return NULL; - } - Py_DECREF(ret); - Py_INCREF(out); - return (PyObject *)out; - } - return ret; -} - - -/*NUMPY_API - *Sum - */ -NPY_NO_EXPORT PyObject * -PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0)) == NULL) { - return NULL; - } - ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis, - rtype, out); - Py_DECREF(new); - return ret; -} - -/*NUMPY_API - * Prod - */ -NPY_NO_EXPORT PyObject * -PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0)) == NULL) { - return NULL; - } - ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.multiply, axis, - rtype, out); - Py_DECREF(new); - return ret; -} - -/*NUMPY_API - *CumSum - */ -NPY_NO_EXPORT PyObject * -PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0)) == NULL) { - return NULL; - } - ret = PyArray_GenericAccumulateFunction((PyAO *)new, n_ops.add, axis, - rtype, out); - Py_DECREF(new); - return ret; -} - -/*NUMPY_API - * CumProd - */ -NPY_NO_EXPORT PyObject * -PyArray_CumProd(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0)) == NULL) { - return NULL; - } - - ret = PyArray_GenericAccumulateFunction((PyAO *)new, - n_ops.multiply, axis, - rtype, out); - Py_DECREF(new); - return ret; -} - -/*NUMPY_API - * Round - */ -NPY_NO_EXPORT PyObject * -PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) -{ - PyObject *f, *ret = NULL, *tmp, *op1, *op2; - int ret_int=0; - PyArray_Descr *my_descr; - if (out && (PyArray_SIZE(out) != PyArray_SIZE(a))) { - PyErr_SetString(PyExc_ValueError, - "invalid output shape"); - return NULL; - } - if (PyArray_ISCOMPLEX(a)) { - PyObject *part; - PyObject *round_part; - PyObject *new; - int res; - - if (out) { - new = (PyObject *)out; - Py_INCREF(new); - } - else { - new = PyArray_Copy(a); - if (new == NULL) { - return NULL; - } - } - - /* new.real = a.real.round(decimals) */ - part = PyObject_GetAttrString(new, "real"); - if (part == NULL) { - Py_DECREF(new); - return NULL; - } - part = PyArray_EnsureAnyArray(part); - round_part = PyArray_Round((PyArrayObject *)part, - decimals, NULL); - Py_DECREF(part); - if (round_part == NULL) { - Py_DECREF(new); - return NULL; - } - res = PyObject_SetAttrString(new, "real", round_part); - Py_DECREF(round_part); - if (res < 0) { - Py_DECREF(new); - return NULL; - } - - /* new.imag = a.imag.round(decimals) */ - part = PyObject_GetAttrString(new, "imag"); - if (part == NULL) { - Py_DECREF(new); - return NULL; - } - part = PyArray_EnsureAnyArray(part); - round_part = PyArray_Round((PyArrayObject *)part, - decimals, NULL); - Py_DECREF(part); - if (round_part == NULL) { - Py_DECREF(new); - return NULL; - } - res = PyObject_SetAttrString(new, "imag", round_part); - Py_DECREF(round_part); - if (res < 0) { - Py_DECREF(new); - return NULL; - } - return new; - } - /* do the most common case first */ - if (decimals >= 0) { - if (PyArray_ISINTEGER(a)) { - if (out) { - if (PyArray_CopyAnyInto(out, a) < 0) { - return NULL; - } - Py_INCREF(out); - return (PyObject *)out; - } - else { - Py_INCREF(a); - return (PyObject *)a; - } - } - if (decimals == 0) { - if (out) { - return PyObject_CallFunction(n_ops.rint, "OO", a, out); - } - return PyObject_CallFunction(n_ops.rint, "O", a); - } - op1 = n_ops.multiply; - op2 = n_ops.true_divide; - } - else { - op1 = n_ops.true_divide; - op2 = n_ops.multiply; - decimals = -decimals; - } - if (!out) { - if (PyArray_ISINTEGER(a)) { - ret_int = 1; - my_descr = PyArray_DescrFromType(NPY_DOUBLE); - } - else { - Py_INCREF(a->descr); - my_descr = a->descr; - } - out = (PyArrayObject *)PyArray_Empty(a->nd, a->dimensions, - my_descr, - PyArray_ISFORTRAN(a)); - if (out == NULL) { - return NULL; - } - } - else { - Py_INCREF(out); - } - f = PyFloat_FromDouble(power_of_ten(decimals)); - if (f == NULL) { - return NULL; - } - ret = PyObject_CallFunction(op1, "OOO", a, f, out); - if (ret == NULL) { - goto finish; - } - tmp = PyObject_CallFunction(n_ops.rint, "OO", ret, ret); - if (tmp == NULL) { - Py_DECREF(ret); - ret = NULL; - goto finish; - } - Py_DECREF(tmp); - tmp = PyObject_CallFunction(op2, "OOO", ret, f, ret); - if (tmp == NULL) { - Py_DECREF(ret); - ret = NULL; - goto finish; - } - Py_DECREF(tmp); - - finish: - Py_DECREF(f); - Py_DECREF(out); - if (ret_int) { - Py_INCREF(a->descr); - tmp = PyArray_CastToType((PyArrayObject *)ret, - a->descr, PyArray_ISFORTRAN(a)); - Py_DECREF(ret); - return tmp; - } - return ret; -} - - -/*NUMPY_API - * Mean - */ -NPY_NO_EXPORT PyObject * -PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) -{ - PyObject *obj1 = NULL, *obj2 = NULL; - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0)) == NULL) { - return NULL; - } - obj1 = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis, - rtype, out); - obj2 = PyFloat_FromDouble((double) PyArray_DIM(new,axis)); - Py_DECREF(new); - if (obj1 == NULL || obj2 == NULL) { - Py_XDECREF(obj1); - Py_XDECREF(obj2); - return NULL; - } - if (!out) { -#if defined(NPY_PY3K) - ret = PyNumber_TrueDivide(obj1, obj2); -#else - ret = PyNumber_Divide(obj1, obj2); -#endif - } - else { - ret = PyObject_CallFunction(n_ops.divide, "OOO", out, obj2, out); - } - Py_DECREF(obj1); - Py_DECREF(obj2); - return ret; -} - -/*NUMPY_API - * Any - */ -NPY_NO_EXPORT PyObject * -PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0)) == NULL) { - return NULL; - } - ret = PyArray_GenericReduceFunction((PyAO *)new, - n_ops.logical_or, axis, - PyArray_BOOL, out); - Py_DECREF(new); - return ret; -} - -/*NUMPY_API - * All - */ -NPY_NO_EXPORT PyObject * -PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) -{ - PyObject *new, *ret; - - if ((new = _check_axis(self, &axis, 0)) == NULL) { - return NULL; - } - ret = PyArray_GenericReduceFunction((PyAO *)new, - n_ops.logical_and, axis, - PyArray_BOOL, out); - Py_DECREF(new); - return ret; -} - - -static PyObject * -_GenericBinaryOutFunction(PyArrayObject *m1, PyObject *m2, PyArrayObject *out, - PyObject *op) -{ - if (out == NULL) { - return PyObject_CallFunction(op, "OO", m1, m2); - } - else { - return PyObject_CallFunction(op, "OOO", m1, m2, out); - } -} - -static PyObject * -_slow_array_clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *out) -{ - PyObject *res1=NULL, *res2=NULL; - - if (max != NULL) { - res1 = _GenericBinaryOutFunction(self, max, out, n_ops.minimum); - if (res1 == NULL) { - return NULL; - } - } - else { - res1 = (PyObject *)self; - Py_INCREF(res1); - } - - if (min != NULL) { - res2 = _GenericBinaryOutFunction((PyArrayObject *)res1, - min, out, n_ops.maximum); - if (res2 == NULL) { - Py_XDECREF(res1); - return NULL; - } - } - else { - res2 = res1; - Py_INCREF(res2); - } - Py_DECREF(res1); - return res2; -} - -/*NUMPY_API - * Clip - */ -NPY_NO_EXPORT PyObject * -PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *out) -{ - PyArray_FastClipFunc *func; - int outgood = 0, ingood = 0; - PyArrayObject *maxa = NULL; - PyArrayObject *mina = NULL; - PyArrayObject *newout = NULL, *newin = NULL; - PyArray_Descr *indescr, *newdescr; - char *max_data, *min_data; - PyObject *zero; - - if ((max == NULL) && (min == NULL)) { - PyErr_SetString(PyExc_ValueError, "array_clip: must set either max "\ - "or min"); - return NULL; - } - - func = self->descr->f->fastclip; - if (func == NULL || (min != NULL && !PyArray_CheckAnyScalar(min)) || - (max != NULL && !PyArray_CheckAnyScalar(max))) { - return _slow_array_clip(self, min, max, out); - } - /* Use the fast scalar clip function */ - - /* First we need to figure out the correct type */ - indescr = NULL; - if (min != NULL) { - indescr = PyArray_DescrFromObject(min, NULL); - if (indescr == NULL) { - return NULL; - } - } - if (max != NULL) { - newdescr = PyArray_DescrFromObject(max, indescr); - Py_XDECREF(indescr); - if (newdescr == NULL) { - return NULL; - } - } - else { - /* Steal the reference */ - newdescr = indescr; - } - - - /* - * Use the scalar descriptor only if it is of a bigger - * KIND than the input array (and then find the - * type that matches both). - */ - if (PyArray_ScalarKind(newdescr->type_num, NULL) > - PyArray_ScalarKind(self->descr->type_num, NULL)) { - indescr = PyArray_PromoteTypes(newdescr, self->descr); - func = indescr->f->fastclip; - if (func == NULL) { - return _slow_array_clip(self, min, max, out); - } - } - else { - indescr = self->descr; - Py_INCREF(indescr); - } - Py_DECREF(newdescr); - - if (!PyDataType_ISNOTSWAPPED(indescr)) { - PyArray_Descr *descr2; - descr2 = PyArray_DescrNewByteorder(indescr, '='); - Py_DECREF(indescr); - if (descr2 == NULL) { - goto fail; - } - indescr = descr2; - } - - /* Convert max to an array */ - if (max != NULL) { - maxa = (NPY_AO *)PyArray_FromAny(max, indescr, 0, 0, - NPY_DEFAULT, NULL); - if (maxa == NULL) { - return NULL; - } - } - else { - /* Side-effect of PyArray_FromAny */ - Py_DECREF(indescr); - } - - /* - * If we are unsigned, then make sure min is not < 0 - * This is to match the behavior of _slow_array_clip - * - * We allow min and max to go beyond the limits - * for other data-types in which case they - * are interpreted as their modular counterparts. - */ - if (min != NULL) { - if (PyArray_ISUNSIGNED(self)) { - int cmp; - zero = PyInt_FromLong(0); - cmp = PyObject_RichCompareBool(min, zero, Py_LT); - if (cmp == -1) { - Py_DECREF(zero); - goto fail; - } - if (cmp == 1) { - min = zero; - } - else { - Py_DECREF(zero); - Py_INCREF(min); - } - } - else { - Py_INCREF(min); - } - - /* Convert min to an array */ - Py_INCREF(indescr); - mina = (NPY_AO *)PyArray_FromAny(min, indescr, 0, 0, - NPY_DEFAULT, NULL); - Py_DECREF(min); - if (mina == NULL) { - goto fail; - } - } - - - /* - * Check to see if input is single-segment, aligned, - * and in native byteorder - */ - if (PyArray_ISONESEGMENT(self) && PyArray_CHKFLAGS(self, ALIGNED) && - PyArray_ISNOTSWAPPED(self) && (self->descr == indescr)) { - ingood = 1; - } - if (!ingood) { - int flags; - - if (PyArray_ISFORTRAN(self)) { - flags = NPY_FARRAY; - } - else { - flags = NPY_CARRAY; - } - Py_INCREF(indescr); - newin = (NPY_AO *)PyArray_FromArray(self, indescr, flags); - if (newin == NULL) { - goto fail; - } - } - else { - newin = self; - Py_INCREF(newin); - } - - /* - * At this point, newin is a single-segment, aligned, and correct - * byte-order array of the correct type - * - * if ingood == 0, then it is a copy, otherwise, - * it is the original input. - */ - - /* - * If we have already made a copy of the data, then use - * that as the output array - */ - if (out == NULL && !ingood) { - out = newin; - } - - /* - * Now, we know newin is a usable array for fastclip, - * we need to make sure the output array is available - * and usable - */ - if (out == NULL) { - Py_INCREF(indescr); - out = (NPY_AO*)PyArray_NewFromDescr(Py_TYPE(self), - indescr, self->nd, - self->dimensions, - NULL, NULL, - PyArray_ISFORTRAN(self), - (PyObject *)self); - if (out == NULL) { - goto fail; - } - outgood = 1; - } - else Py_INCREF(out); - /* Input is good at this point */ - if (out == newin) { - outgood = 1; - } - if (!outgood && PyArray_ISONESEGMENT(out) && - PyArray_CHKFLAGS(out, ALIGNED) && PyArray_ISNOTSWAPPED(out) && - PyArray_EquivTypes(out->descr, indescr)) { - outgood = 1; - } - - /* - * Do we still not have a suitable output array? - * Create one, now - */ - if (!outgood) { - int oflags; - if (PyArray_ISFORTRAN(out)) - oflags = NPY_FARRAY; - else - oflags = NPY_CARRAY; - oflags |= NPY_UPDATEIFCOPY | NPY_FORCECAST; - Py_INCREF(indescr); - newout = (NPY_AO*)PyArray_FromArray(out, indescr, oflags); - if (newout == NULL) { - goto fail; - } - } - else { - newout = out; - Py_INCREF(newout); - } - - /* make sure the shape of the output array is the same */ - if (!PyArray_SAMESHAPE(newin, newout)) { - PyErr_SetString(PyExc_ValueError, "clip: Output array must have the" - "same shape as the input."); - goto fail; - } - if (newout->data != newin->data) { - memcpy(newout->data, newin->data, PyArray_NBYTES(newin)); - } - - /* Now we can call the fast-clip function */ - min_data = max_data = NULL; - if (mina != NULL) { - min_data = mina->data; - } - if (maxa != NULL) { - max_data = maxa->data; - } - func(newin->data, PyArray_SIZE(newin), min_data, max_data, newout->data); - - /* Clean up temporary variables */ - Py_XDECREF(mina); - Py_XDECREF(maxa); - Py_DECREF(newin); - /* Copy back into out if out was not already a nice array. */ - Py_DECREF(newout); - return (PyObject *)out; - - fail: - Py_XDECREF(maxa); - Py_XDECREF(mina); - Py_XDECREF(newin); - PyArray_XDECREF_ERR(newout); - return NULL; -} - - -/*NUMPY_API - * Conjugate - */ -NPY_NO_EXPORT PyObject * -PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) -{ - if (PyArray_ISCOMPLEX(self)) { - if (out == NULL) { - return PyArray_GenericUnaryFunction(self, - n_ops.conjugate); - } - else { - return PyArray_GenericBinaryFunction(self, - (PyObject *)out, - n_ops.conjugate); - } - } - else { - PyArrayObject *ret; - if (out) { - if (PyArray_CopyAnyInto(out, self) < 0) { - return NULL; - } - ret = out; - } - else { - ret = self; - } - Py_INCREF(ret); - return (PyObject *)ret; - } -} - -/*NUMPY_API - * Trace - */ -NPY_NO_EXPORT PyObject * -PyArray_Trace(PyArrayObject *self, int offset, int axis1, int axis2, - int rtype, PyArrayObject *out) -{ - PyObject *diag = NULL, *ret = NULL; - - diag = PyArray_Diagonal(self, offset, axis1, axis2); - if (diag == NULL) { - return NULL; - } - ret = PyArray_GenericReduceFunction((PyAO *)diag, n_ops.add, -1, rtype, out); - Py_DECREF(diag); - return ret; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/calculation.h b/numpy-1.6.2/numpy/core/src/multiarray/calculation.h deleted file mode 100644 index 0d6633ae5b..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/calculation.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef _NPY_CALCULATION_H_ -#define _NPY_CALCULATION_H_ - -NPY_NO_EXPORT PyObject* -PyArray_ArgMax(PyArrayObject* self, int axis, PyArrayObject *out); - -NPY_NO_EXPORT PyObject* -PyArray_ArgMin(PyArrayObject* self, int axis, PyArrayObject *out); - -NPY_NO_EXPORT PyObject* -PyArray_Max(PyArrayObject* self, int axis, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_Min(PyArrayObject* self, int axis, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_Ptp(PyArrayObject* self, int axis, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_Mean(PyArrayObject* self, int axis, int rtype, PyArrayObject* out); - -NPY_NO_EXPORT PyObject * -PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out); - -NPY_NO_EXPORT PyObject* -PyArray_Trace(PyArrayObject* self, int offset, int axis1, int axis2, - int rtype, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_Clip(PyArrayObject* self, PyObject* min, PyObject* max, PyArrayObject *out); - -NPY_NO_EXPORT PyObject* -PyArray_Conjugate(PyArrayObject* self, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_Round(PyArrayObject* self, int decimals, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_Std(PyArrayObject* self, int axis, int rtype, PyArrayObject* out, - int variance); - -NPY_NO_EXPORT PyObject * -__New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, - int variance, double num); - -NPY_NO_EXPORT PyObject* -PyArray_Sum(PyArrayObject* self, int axis, int rtype, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_CumSum(PyArrayObject* self, int axis, int rtype, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_Prod(PyArrayObject* self, int axis, int rtype, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_CumProd(PyArrayObject* self, int axis, int rtype, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_All(PyArrayObject* self, int axis, PyArrayObject* out); - -NPY_NO_EXPORT PyObject* -PyArray_Any(PyArrayObject* self, int axis, PyArrayObject* out); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/common.c b/numpy-1.6.2/numpy/core/src/multiarray/common.c deleted file mode 100644 index b98565ead6..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/common.c +++ /dev/null @@ -1,417 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" - -#include "npy_config.h" -#include "numpy/npy_3kcompat.h" - -#include "usertypes.h" - -#include "common.h" -#include "buffer.h" - - -NPY_NO_EXPORT PyArray_Descr * -_array_find_python_scalar_type(PyObject *op) -{ - if (PyFloat_Check(op)) { - return PyArray_DescrFromType(PyArray_DOUBLE); - } - else if (PyComplex_Check(op)) { - return PyArray_DescrFromType(PyArray_CDOUBLE); - } - else if (PyInt_Check(op)) { - /* bools are a subclass of int */ - if (PyBool_Check(op)) { - return PyArray_DescrFromType(PyArray_BOOL); - } - else { - return PyArray_DescrFromType(PyArray_LONG); - } - } - else if (PyLong_Check(op)) { - /* if integer can fit into a longlong then return that*/ - if ((PyLong_AsLongLong(op) == -1) && PyErr_Occurred()) { - PyErr_Clear(); - return PyArray_DescrFromType(PyArray_OBJECT); - } - return PyArray_DescrFromType(PyArray_LONGLONG); - } - return NULL; -} - -static PyArray_Descr * -_use_default_type(PyObject *op) -{ - int typenum, l; - PyObject *type; - - typenum = -1; - l = 0; - type = (PyObject *)Py_TYPE(op); - while (l < PyArray_NUMUSERTYPES) { - if (type == (PyObject *)(userdescrs[l]->typeobj)) { - typenum = l + PyArray_USERDEF; - break; - } - l++; - } - if (typenum == -1) { - typenum = PyArray_OBJECT; - } - return PyArray_DescrFromType(typenum); -} - - -/* - * op is an object to be converted to an ndarray. - * - * minitype is the minimum type-descriptor needed. - * - * max is the maximum number of dimensions -- used for recursive call - * to avoid infinite recursion... - */ -NPY_NO_EXPORT PyArray_Descr * -_array_find_type(PyObject *op, PyArray_Descr *minitype, int max) -{ - int l; - PyObject *ip; - PyArray_Descr *chktype = NULL; - PyArray_Descr *outtype; -#if PY_VERSION_HEX >= 0x02060000 - Py_buffer buffer_view; -#endif - - /* - * These need to come first because if op already carries - * a descr structure, then we want it to be the result if minitype - * is NULL. - */ - if (PyArray_Check(op)) { - chktype = PyArray_DESCR(op); - Py_INCREF(chktype); - if (minitype == NULL) { - return chktype; - } - Py_INCREF(minitype); - goto finish; - } - - if (PyArray_IsScalar(op, Generic)) { - chktype = PyArray_DescrFromScalar(op); - if (minitype == NULL) { - return chktype; - } - Py_INCREF(minitype); - goto finish; - } - - if (minitype == NULL) { - minitype = PyArray_DescrFromType(PyArray_BOOL); - } - else { - Py_INCREF(minitype); - } - if (max < 0) { - goto deflt; - } - chktype = _array_find_python_scalar_type(op); - if (chktype) { - goto finish; - } - - if (PyBytes_Check(op)) { - chktype = PyArray_DescrNewFromType(PyArray_STRING); - chktype->elsize = PyString_GET_SIZE(op); - goto finish; - } - - if (PyUnicode_Check(op)) { - chktype = PyArray_DescrNewFromType(PyArray_UNICODE); - chktype->elsize = PyUnicode_GET_DATA_SIZE(op); -#ifndef Py_UNICODE_WIDE - chktype->elsize <<= 1; -#endif - goto finish; - } - -#if PY_VERSION_HEX >= 0x02060000 - /* PEP 3118 buffer interface */ - memset(&buffer_view, 0, sizeof(Py_buffer)); - if (PyObject_GetBuffer(op, &buffer_view, PyBUF_FORMAT|PyBUF_STRIDES) == 0 || - PyObject_GetBuffer(op, &buffer_view, PyBUF_FORMAT) == 0) { - - PyErr_Clear(); - chktype = _descriptor_from_pep3118_format(buffer_view.format); - PyBuffer_Release(&buffer_view); - if (chktype) { - goto finish; - } - } - else if (PyObject_GetBuffer(op, &buffer_view, PyBUF_STRIDES) == 0 || - PyObject_GetBuffer(op, &buffer_view, PyBUF_SIMPLE) == 0) { - - PyErr_Clear(); - chktype = PyArray_DescrNewFromType(PyArray_VOID); - chktype->elsize = buffer_view.itemsize; - PyBuffer_Release(&buffer_view); - goto finish; - } - else { - PyErr_Clear(); - } -#endif - - if ((ip=PyObject_GetAttrString(op, "__array_interface__"))!=NULL) { - if (PyDict_Check(ip)) { - PyObject *new; - new = PyDict_GetItemString(ip, "typestr"); - if (new && PyString_Check(new)) { - chktype =_array_typedescr_fromstr(PyString_AS_STRING(new)); - } - } - Py_DECREF(ip); - if (chktype) { - goto finish; - } - } - else { - PyErr_Clear(); - } - if ((ip=PyObject_GetAttrString(op, "__array_struct__")) != NULL) { - PyArrayInterface *inter; - char buf[40]; - - if (NpyCapsule_Check(ip)) { - inter = (PyArrayInterface *)NpyCapsule_AsVoidPtr(ip); - if (inter->two == 2) { - PyOS_snprintf(buf, sizeof(buf), - "|%c%d", inter->typekind, inter->itemsize); - chktype = _array_typedescr_fromstr(buf); - } - } - Py_DECREF(ip); - if (chktype) { - goto finish; - } - } - else { - PyErr_Clear(); - } - -#if !defined(NPY_PY3K) - if (PyBuffer_Check(op)) { - chktype = PyArray_DescrNewFromType(PyArray_VOID); - chktype->elsize = Py_TYPE(op)->tp_as_sequence->sq_length(op); - PyErr_Clear(); - goto finish; - } -#endif - - if (PyObject_HasAttrString(op, "__array__")) { - ip = PyObject_CallMethod(op, "__array__", NULL); - if(ip && PyArray_Check(ip)) { - chktype = PyArray_DESCR(ip); - Py_INCREF(chktype); - Py_DECREF(ip); - goto finish; - } - Py_XDECREF(ip); - if (PyErr_Occurred()) PyErr_Clear(); - } - -#if defined(NPY_PY3K) - /* FIXME: XXX -- what is the correct thing to do here? */ -#else - if (PyInstance_Check(op)) { - goto deflt; - } -#endif - if (PySequence_Check(op)) { - l = PyObject_Length(op); - if (l < 0 && PyErr_Occurred()) { - PyErr_Clear(); - goto deflt; - } - if (l == 0 && minitype->type_num == PyArray_BOOL) { - Py_DECREF(minitype); - minitype = PyArray_DescrFromType(PyArray_DEFAULT); - if (minitype == NULL) { - return NULL; - } - } - while (--l >= 0) { - PyArray_Descr *newtype; - ip = PySequence_GetItem(op, l); - if (ip==NULL) { - PyErr_Clear(); - goto deflt; - } - chktype = _array_find_type(ip, minitype, max-1); - if (chktype == NULL) { - Py_DECREF(minitype); - return NULL; - } - newtype = PyArray_PromoteTypes(chktype, minitype); - Py_DECREF(minitype); - minitype = newtype; - Py_DECREF(chktype); - Py_DECREF(ip); - } - chktype = minitype; - Py_INCREF(minitype); - goto finish; - } - - - deflt: - chktype = _use_default_type(op); - - finish: - outtype = PyArray_PromoteTypes(chktype, minitype); - Py_DECREF(chktype); - Py_DECREF(minitype); - if (outtype == NULL) { - return NULL; - } - /* - * VOID Arrays should not occur by "default" - * unless input was already a VOID - */ - if (outtype->type_num == PyArray_VOID && - minitype->type_num != PyArray_VOID) { - Py_DECREF(outtype); - return PyArray_DescrFromType(PyArray_OBJECT); - } - return outtype; -} - -/* new reference */ -NPY_NO_EXPORT PyArray_Descr * -_array_typedescr_fromstr(char *c_str) -{ - PyArray_Descr *descr = NULL; - PyObject *stringobj = PyString_FromString(c_str); - - if (stringobj == NULL) { - return NULL; - } - if (PyArray_DescrConverter(stringobj, &descr) != NPY_SUCCEED) { - Py_DECREF(stringobj); - return NULL; - } - Py_DECREF(stringobj); - return descr; -} - -NPY_NO_EXPORT char * -index2ptr(PyArrayObject *mp, intp i) -{ - intp dim0; - - if (mp->nd == 0) { - PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed"); - return NULL; - } - dim0 = mp->dimensions[0]; - if (i < 0) { - i += dim0; - } - if (i == 0 && dim0 > 0) { - return mp->data; - } - if (i > 0 && i < dim0) { - return mp->data+i*mp->strides[0]; - } - PyErr_SetString(PyExc_IndexError,"index out of bounds"); - return NULL; -} - -NPY_NO_EXPORT int -_zerofill(PyArrayObject *ret) -{ - if (PyDataType_REFCHK(ret->descr)) { - PyObject *zero = PyInt_FromLong(0); - PyArray_FillObjectArray(ret, zero); - Py_DECREF(zero); - if (PyErr_Occurred()) { - Py_DECREF(ret); - return -1; - } - } - else { - intp n = PyArray_NBYTES(ret); - memset(ret->data, 0, n); - } - return 0; -} - -NPY_NO_EXPORT int -_IsAligned(PyArrayObject *ap) -{ - int i, alignment, aligned = 1; - intp ptr; - - /* The special casing for STRING and VOID types was removed - * in accordance with http://projects.scipy.org/numpy/ticket/1227 - * It used to be that IsAligned always returned True for these - * types, which is indeed the case when they are created using - * PyArray_DescrConverter(), but not necessarily when using - * PyArray_DescrAlignConverter(). */ - - alignment = ap->descr->alignment; - if (alignment == 1) { - return 1; - } - ptr = (intp) ap->data; - aligned = (ptr % alignment) == 0; - for (i = 0; i < ap->nd; i++) { - aligned &= ((ap->strides[i] % alignment) == 0); - } - return aligned != 0; -} - -NPY_NO_EXPORT Bool -_IsWriteable(PyArrayObject *ap) -{ - PyObject *base=ap->base; - void *dummy; - Py_ssize_t n; - - /* If we own our own data, then no-problem */ - if ((base == NULL) || (ap->flags & OWNDATA)) { - return TRUE; - } - /* - * Get to the final base object - * If it is a writeable array, then return TRUE - * If we can find an array object - * or a writeable buffer object as the final base object - * or a string object (for pickling support memory savings). - * - this last could be removed if a proper pickleable - * buffer was added to Python. - */ - - while(PyArray_Check(base)) { - if (PyArray_CHKFLAGS(base, OWNDATA)) { - return (Bool) (PyArray_ISWRITEABLE(base)); - } - base = PyArray_BASE(base); - } - - /* - * here so pickle support works seamlessly - * and unpickled array can be set and reset writeable - * -- could be abused -- - */ - if (PyString_Check(base)) { - return TRUE; - } - if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) { - return FALSE; - } - return TRUE; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/common.h b/numpy-1.6.2/numpy/core/src/multiarray/common.h deleted file mode 100644 index 23b24881a7..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/common.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _NPY_PRIVATE_COMMON_H_ -#define _NPY_PRIVATE_COMMON_H_ - -#define error_converting(x) (((x) == -1) && PyErr_Occurred()) - -NPY_NO_EXPORT PyArray_Descr * -_array_find_type(PyObject *op, PyArray_Descr *minitype, int max); - -NPY_NO_EXPORT PyArray_Descr * -_array_find_python_scalar_type(PyObject *op); - -NPY_NO_EXPORT PyArray_Descr * -_array_typedescr_fromstr(char *str); - -NPY_NO_EXPORT char * -index2ptr(PyArrayObject *mp, intp i); - -NPY_NO_EXPORT int -_zerofill(PyArrayObject *ret); - -NPY_NO_EXPORT int -_IsAligned(PyArrayObject *ap); - -NPY_NO_EXPORT Bool -_IsWriteable(PyArrayObject *ap); - -#include "ucsnarrow.h" - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/conversion_utils.c b/numpy-1.6.2/numpy/core/src/multiarray/conversion_utils.c deleted file mode 100644 index e8ac992732..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/conversion_utils.c +++ /dev/null @@ -1,778 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" -#include "numpy/npy_3kcompat.h" - -#include "common.h" -#include "arraytypes.h" - -#include "conversion_utils.h" - -/**************************************************************** -* Useful function for conversion when used with PyArg_ParseTuple -****************************************************************/ - -/*NUMPY_API - * - * Useful to pass as converter function for O& processing in PyArgs_ParseTuple. - * - * This conversion function can be used with the "O&" argument for - * PyArg_ParseTuple. It will immediately return an object of array type - * or will convert to a CARRAY any other object. - * - * If you use PyArray_Converter, you must DECREF the array when finished - * as you get a new reference to it. - */ -NPY_NO_EXPORT int -PyArray_Converter(PyObject *object, PyObject **address) -{ - if (PyArray_Check(object)) { - *address = object; - Py_INCREF(object); - return PY_SUCCEED; - } - else { - *address = PyArray_FromAny(object, NULL, 0, 0, CARRAY, NULL); - if (*address == NULL) { - return PY_FAIL; - } - return PY_SUCCEED; - } -} - -/*NUMPY_API - * Useful to pass as converter function for O& processing in - * PyArgs_ParseTuple for output arrays - */ -NPY_NO_EXPORT int -PyArray_OutputConverter(PyObject *object, PyArrayObject **address) -{ - if (object == NULL || object == Py_None) { - *address = NULL; - return PY_SUCCEED; - } - if (PyArray_Check(object)) { - *address = (PyArrayObject *)object; - return PY_SUCCEED; - } - else { - PyErr_SetString(PyExc_TypeError, - "output must be an array"); - *address = NULL; - return PY_FAIL; - } -} - -/*NUMPY_API - * Get intp chunk from sequence - * - * This function takes a Python sequence object and allocates and - * fills in an intp array with the converted values. - * - * Remember to free the pointer seq.ptr when done using - * PyDimMem_FREE(seq.ptr)** - */ -NPY_NO_EXPORT int -PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) -{ - int len; - int nd; - - seq->ptr = NULL; - seq->len = 0; - if (obj == Py_None) { - return PY_SUCCEED; - } - len = PySequence_Size(obj); - if (len == -1) { - /* Check to see if it is a number */ - if (PyNumber_Check(obj)) { - len = 1; - } - } - if (len < 0) { - PyErr_SetString(PyExc_TypeError, - "expected sequence object with len >= 0"); - return PY_FAIL; - } - if (len > MAX_DIMS) { - PyErr_Format(PyExc_ValueError, "sequence too large; " \ - "must be smaller than %d", MAX_DIMS); - return PY_FAIL; - } - if (len > 0) { - seq->ptr = PyDimMem_NEW(len); - if (seq->ptr == NULL) { - PyErr_NoMemory(); - return PY_FAIL; - } - } - seq->len = len; - nd = PyArray_IntpFromSequence(obj, (npy_intp *)seq->ptr, len); - if (nd == -1 || nd != len) { - PyDimMem_FREE(seq->ptr); - seq->ptr = NULL; - return PY_FAIL; - } - return PY_SUCCEED; -} - -/*NUMPY_API - * Get buffer chunk from object - * - * this function takes a Python object which exposes the (single-segment) - * buffer interface and returns a pointer to the data segment - * - * You should increment the reference count by one of buf->base - * if you will hang on to a reference - * - * You only get a borrowed reference to the object. Do not free the - * memory... - */ -NPY_NO_EXPORT int -PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) -{ - Py_ssize_t buflen; - - buf->ptr = NULL; - buf->flags = BEHAVED; - buf->base = NULL; - if (obj == Py_None) { - return PY_SUCCEED; - } - if (PyObject_AsWriteBuffer(obj, &(buf->ptr), &buflen) < 0) { - PyErr_Clear(); - buf->flags &= ~WRITEABLE; - if (PyObject_AsReadBuffer(obj, (const void **)&(buf->ptr), - &buflen) < 0) { - return PY_FAIL; - } - } - buf->len = (npy_intp) buflen; - - /* Point to the base of the buffer object if present */ -#if defined(NPY_PY3K) - if (PyMemoryView_Check(obj)) { - buf->base = PyMemoryView_GET_BASE(obj); - } -#else - if (PyBuffer_Check(obj)) { - buf->base = ((PyArray_Chunk *)obj)->base; - } -#endif - if (buf->base == NULL) { - buf->base = obj; - } - return PY_SUCCEED; -} - -/*NUMPY_API - * Get axis from an object (possibly None) -- a converter function, - */ -NPY_NO_EXPORT int -PyArray_AxisConverter(PyObject *obj, int *axis) -{ - if (obj == Py_None) { - *axis = MAX_DIMS; - } - else { - *axis = (int) PyInt_AsLong(obj); - if (PyErr_Occurred()) { - return PY_FAIL; - } - } - return PY_SUCCEED; -} - -/*NUMPY_API - * Convert an object to true / false - */ -NPY_NO_EXPORT int -PyArray_BoolConverter(PyObject *object, Bool *val) -{ - if (PyObject_IsTrue(object)) { - *val = TRUE; - } - else { - *val = FALSE; - } - if (PyErr_Occurred()) { - return PY_FAIL; - } - return PY_SUCCEED; -} - -/*NUMPY_API - * Convert object to endian - */ -NPY_NO_EXPORT int -PyArray_ByteorderConverter(PyObject *obj, char *endian) -{ - char *str; - PyObject *tmp = NULL; - - if (PyUnicode_Check(obj)) { - obj = tmp = PyUnicode_AsASCIIString(obj); - } - - *endian = PyArray_SWAP; - str = PyBytes_AsString(obj); - if (!str) { - Py_XDECREF(tmp); - return PY_FAIL; - } - if (strlen(str) < 1) { - PyErr_SetString(PyExc_ValueError, - "Byteorder string must be at least length 1"); - Py_XDECREF(tmp); - return PY_FAIL; - } - *endian = str[0]; - if (str[0] != PyArray_BIG && str[0] != PyArray_LITTLE - && str[0] != PyArray_NATIVE && str[0] != PyArray_IGNORE) { - if (str[0] == 'b' || str[0] == 'B') { - *endian = PyArray_BIG; - } - else if (str[0] == 'l' || str[0] == 'L') { - *endian = PyArray_LITTLE; - } - else if (str[0] == 'n' || str[0] == 'N') { - *endian = PyArray_NATIVE; - } - else if (str[0] == 'i' || str[0] == 'I') { - *endian = PyArray_IGNORE; - } - else if (str[0] == 's' || str[0] == 'S') { - *endian = PyArray_SWAP; - } - else { - PyErr_Format(PyExc_ValueError, - "%s is an unrecognized byteorder", - str); - Py_XDECREF(tmp); - return PY_FAIL; - } - } - Py_XDECREF(tmp); - return PY_SUCCEED; -} - -/*NUMPY_API - * Convert object to sort kind - */ -NPY_NO_EXPORT int -PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) -{ - char *str; - PyObject *tmp = NULL; - - if (PyUnicode_Check(obj)) { - obj = tmp = PyUnicode_AsASCIIString(obj); - } - - *sortkind = PyArray_QUICKSORT; - str = PyBytes_AsString(obj); - if (!str) { - Py_XDECREF(tmp); - return PY_FAIL; - } - if (strlen(str) < 1) { - PyErr_SetString(PyExc_ValueError, - "Sort kind string must be at least length 1"); - Py_XDECREF(tmp); - return PY_FAIL; - } - if (str[0] == 'q' || str[0] == 'Q') { - *sortkind = PyArray_QUICKSORT; - } - else if (str[0] == 'h' || str[0] == 'H') { - *sortkind = PyArray_HEAPSORT; - } - else if (str[0] == 'm' || str[0] == 'M') { - *sortkind = PyArray_MERGESORT; - } - else { - PyErr_Format(PyExc_ValueError, - "%s is an unrecognized kind of sort", - str); - Py_XDECREF(tmp); - return PY_FAIL; - } - Py_XDECREF(tmp); - return PY_SUCCEED; -} - -/*NUMPY_API - * Convert object to searchsorted side - */ -NPY_NO_EXPORT int -PyArray_SearchsideConverter(PyObject *obj, void *addr) -{ - NPY_SEARCHSIDE *side = (NPY_SEARCHSIDE *)addr; - char *str; - PyObject *tmp = NULL; - - if (PyUnicode_Check(obj)) { - obj = tmp = PyUnicode_AsASCIIString(obj); - } - - str = PyBytes_AsString(obj); - if (!str || strlen(str) < 1) { - PyErr_SetString(PyExc_ValueError, - "expected nonempty string for keyword 'side'"); - Py_XDECREF(tmp); - return PY_FAIL; - } - - if (str[0] == 'l' || str[0] == 'L') { - *side = NPY_SEARCHLEFT; - } - else if (str[0] == 'r' || str[0] == 'R') { - *side = NPY_SEARCHRIGHT; - } - else { - PyErr_Format(PyExc_ValueError, - "'%s' is an invalid value for keyword 'side'", str); - Py_XDECREF(tmp); - return PY_FAIL; - } - Py_XDECREF(tmp); - return PY_SUCCEED; -} - -/***************************** -* Other conversion functions -*****************************/ - -/*NUMPY_API*/ -NPY_NO_EXPORT int -PyArray_PyIntAsInt(PyObject *o) -{ - long long_value = -1; - PyObject *obj; - static char *msg = "an integer is required"; - PyObject *arr; - PyArray_Descr *descr; - int ret; - - - if (!o) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - if (PyInt_Check(o)) { - long_value = (long) PyInt_AS_LONG(o); - goto finish; - } else if (PyLong_Check(o)) { - long_value = (long) PyLong_AsLong(o); - goto finish; - } - - descr = &INT_Descr; - arr = NULL; - if (PyArray_Check(o)) { - if (PyArray_SIZE(o)!=1 || !PyArray_ISINTEGER(o)) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - Py_INCREF(descr); - arr = PyArray_CastToType((PyArrayObject *)o, descr, 0); - } - if (PyArray_IsScalar(o, Integer)) { - Py_INCREF(descr); - arr = PyArray_FromScalar(o, descr); - } - if (arr != NULL) { - ret = *((int *)PyArray_DATA(arr)); - Py_DECREF(arr); - return ret; - } -#if (PY_VERSION_HEX >= 0x02050000) - if (PyIndex_Check(o)) { - PyObject* value = PyNumber_Index(o); - long_value = (longlong) PyInt_AsSsize_t(value); - goto finish; - } -#endif - if (Py_TYPE(o)->tp_as_number != NULL && \ - Py_TYPE(o)->tp_as_number->nb_int != NULL) { - obj = Py_TYPE(o)->tp_as_number->nb_int(o); - if (obj == NULL) { - return -1; - } - long_value = (long) PyLong_AsLong(obj); - Py_DECREF(obj); - } -#if !defined(NPY_PY3K) - else if (Py_TYPE(o)->tp_as_number != NULL && \ - Py_TYPE(o)->tp_as_number->nb_long != NULL) { - obj = Py_TYPE(o)->tp_as_number->nb_long(o); - if (obj == NULL) { - return -1; - } - long_value = (long) PyLong_AsLong(obj); - Py_DECREF(obj); - } -#endif - else { - PyErr_SetString(PyExc_NotImplementedError,""); - } - - finish: - if error_converting(long_value) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - -#if (SIZEOF_LONG > SIZEOF_INT) - if ((long_value < INT_MIN) || (long_value > INT_MAX)) { - PyErr_SetString(PyExc_ValueError, "integer won't fit into a C int"); - return -1; - } -#endif - return (int) long_value; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT npy_intp -PyArray_PyIntAsIntp(PyObject *o) -{ - longlong long_value = -1; - PyObject *obj; - static char *msg = "an integer is required"; - PyObject *arr; - PyArray_Descr *descr; - npy_intp ret; - - if (!o) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - if (PyInt_Check(o)) { - long_value = (longlong) PyInt_AS_LONG(o); - goto finish; - } else if (PyLong_Check(o)) { - long_value = (longlong) PyLong_AsLongLong(o); - goto finish; - } - -#if SIZEOF_INTP == SIZEOF_LONG - descr = &LONG_Descr; -#elif SIZEOF_INTP == SIZEOF_INT - descr = &INT_Descr; -#else - descr = &LONGLONG_Descr; -#endif - arr = NULL; - - if (PyArray_Check(o)) { - if (PyArray_SIZE(o)!=1 || !PyArray_ISINTEGER(o)) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - Py_INCREF(descr); - arr = PyArray_CastToType((PyArrayObject *)o, descr, 0); - } - else if (PyArray_IsScalar(o, Integer)) { - Py_INCREF(descr); - arr = PyArray_FromScalar(o, descr); - } - if (arr != NULL) { - ret = *((npy_intp *)PyArray_DATA(arr)); - Py_DECREF(arr); - return ret; - } - -#if (PY_VERSION_HEX >= 0x02050000) - if (PyIndex_Check(o)) { - PyObject* value = PyNumber_Index(o); - if (value == NULL) { - return -1; - } - long_value = (longlong) PyInt_AsSsize_t(value); - goto finish; - } -#endif -#if !defined(NPY_PY3K) - if (Py_TYPE(o)->tp_as_number != NULL && \ - Py_TYPE(o)->tp_as_number->nb_long != NULL) { - obj = Py_TYPE(o)->tp_as_number->nb_long(o); - if (obj != NULL) { - long_value = (longlong) PyLong_AsLongLong(obj); - Py_DECREF(obj); - } - } - else -#endif - if (Py_TYPE(o)->tp_as_number != NULL && \ - Py_TYPE(o)->tp_as_number->nb_int != NULL) { - obj = Py_TYPE(o)->tp_as_number->nb_int(o); - if (obj != NULL) { - long_value = (longlong) PyLong_AsLongLong(obj); - Py_DECREF(obj); - } - } - else { - PyErr_SetString(PyExc_NotImplementedError,""); - } - - finish: - if error_converting(long_value) { - PyErr_SetString(PyExc_TypeError, msg); - return -1; - } - -#if (SIZEOF_LONGLONG > SIZEOF_INTP) - if ((long_value < MIN_INTP) || (long_value > MAX_INTP)) { - PyErr_SetString(PyExc_ValueError, - "integer won't fit into a C intp"); - return -1; - } -#endif - return (npy_intp) long_value; -} - -/*NUMPY_API - * PyArray_IntpFromSequence - * Returns the number of dimensions or -1 if an error occurred. - * vals must be large enough to hold maxvals - */ -NPY_NO_EXPORT int -PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) -{ - int nd, i; - PyObject *op, *err; - - /* - * Check to see if sequence is a single integer first. - * or, can be made into one - */ - if ((nd=PySequence_Length(seq)) == -1) { - if (PyErr_Occurred()) PyErr_Clear(); -#if SIZEOF_LONG >= SIZEOF_INTP && !defined(NPY_PY3K) - if (!(op = PyNumber_Int(seq))) { - return -1; - } -#else - if (!(op = PyNumber_Long(seq))) { - return -1; - } -#endif - nd = 1; -#if SIZEOF_LONG >= SIZEOF_INTP - vals[0] = (npy_intp ) PyInt_AsLong(op); -#else - vals[0] = (npy_intp ) PyLong_AsLongLong(op); -#endif - Py_DECREF(op); - - /* - * Check wether there was an error - if the error was an overflow, raise - * a ValueError instead to be more helpful - */ - if(vals[0] == -1) { - err = PyErr_Occurred(); - if (err && - PyErr_GivenExceptionMatches(err, PyExc_OverflowError)) { - PyErr_SetString(PyExc_ValueError, - "Maximum allowed dimension exceeded"); - } - if(err != NULL) { - return -1; - } - } - } - else { - for (i = 0; i < MIN(nd,maxvals); i++) { - op = PySequence_GetItem(seq, i); - if (op == NULL) { - return -1; - } -#if SIZEOF_LONG >= SIZEOF_INTP - vals[i]=(npy_intp )PyInt_AsLong(op); -#else - vals[i]=(npy_intp )PyLong_AsLongLong(op); -#endif - Py_DECREF(op); - - /* - * Check wether there was an error - if the error was an overflow, - * raise a ValueError instead to be more helpful - */ - if(vals[0] == -1) { - err = PyErr_Occurred(); - if (err && - PyErr_GivenExceptionMatches(err, PyExc_OverflowError)) { - PyErr_SetString(PyExc_ValueError, - "Maximum allowed dimension exceeded"); - } - if(err != NULL) { - return -1; - } - } - } - } - return nd; -} - -/*NUMPY_API - * Typestr converter - */ -NPY_NO_EXPORT int -PyArray_TypestrConvert(int itemsize, int gentype) -{ - int newtype = gentype; - - if (gentype == PyArray_GENBOOLLTR) { - if (itemsize == 1) { - newtype = PyArray_BOOL; - } - else { - newtype = PyArray_NOTYPE; - } - } - else if (gentype == PyArray_SIGNEDLTR) { - switch(itemsize) { - case 1: - newtype = PyArray_INT8; - break; - case 2: - newtype = PyArray_INT16; - break; - case 4: - newtype = PyArray_INT32; - break; - case 8: - newtype = PyArray_INT64; - break; -#ifdef PyArray_INT128 - case 16: - newtype = PyArray_INT128; - break; -#endif - default: - newtype = PyArray_NOTYPE; - } - } - else if (gentype == PyArray_UNSIGNEDLTR) { - switch(itemsize) { - case 1: - newtype = PyArray_UINT8; - break; - case 2: - newtype = PyArray_UINT16; - break; - case 4: - newtype = PyArray_UINT32; - break; - case 8: - newtype = PyArray_UINT64; - break; -#ifdef PyArray_INT128 - case 16: - newtype = PyArray_UINT128; - break; -#endif - default: - newtype = PyArray_NOTYPE; - break; - } - } - else if (gentype == PyArray_FLOATINGLTR) { - switch(itemsize) { - case 2: - newtype = PyArray_FLOAT16; - break; - case 4: - newtype = PyArray_FLOAT32; - break; - case 8: - newtype = PyArray_FLOAT64; - break; -#ifdef PyArray_FLOAT80 - case 10: - newtype = PyArray_FLOAT80; - break; -#endif -#ifdef PyArray_FLOAT96 - case 12: - newtype = PyArray_FLOAT96; - break; -#endif -#ifdef PyArray_FLOAT128 - case 16: - newtype = PyArray_FLOAT128; - break; -#endif - default: - newtype = PyArray_NOTYPE; - } - } - else if (gentype == PyArray_COMPLEXLTR) { - switch(itemsize) { - case 8: - newtype = PyArray_COMPLEX64; - break; - case 16: - newtype = PyArray_COMPLEX128; - break; -#ifdef PyArray_FLOAT80 - case 20: - newtype = PyArray_COMPLEX160; - break; -#endif -#ifdef PyArray_FLOAT96 - case 24: - newtype = PyArray_COMPLEX192; - break; -#endif -#ifdef PyArray_FLOAT128 - case 32: - newtype = PyArray_COMPLEX256; - break; -#endif - default: - newtype = PyArray_NOTYPE; - } - } - return newtype; -} - -/* Lifted from numarray */ -/* TODO: not documented */ -/*NUMPY_API - PyArray_IntTupleFromIntp -*/ -NPY_NO_EXPORT PyObject * -PyArray_IntTupleFromIntp(int len, npy_intp *vals) -{ - int i; - PyObject *intTuple = PyTuple_New(len); - - if (!intTuple) { - goto fail; - } - for (i = 0; i < len; i++) { -#if SIZEOF_INTP <= SIZEOF_LONG - PyObject *o = PyInt_FromLong((long) vals[i]); -#else - PyObject *o = PyLong_FromLongLong((longlong) vals[i]); -#endif - if (!o) { - Py_DECREF(intTuple); - intTuple = NULL; - goto fail; - } - PyTuple_SET_ITEM(intTuple, i, o); - } - - fail: - return intTuple; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/conversion_utils.h b/numpy-1.6.2/numpy/core/src/multiarray/conversion_utils.h deleted file mode 100644 index 64b26b23ef..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/conversion_utils.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _NPY_PRIVATE_CONVERSION_UTILS_H_ -#define _NPY_PRIVATE_CONVERSION_UTILS_H_ - -NPY_NO_EXPORT int -PyArray_Converter(PyObject *object, PyObject **address); - -NPY_NO_EXPORT int -PyArray_OutputConverter(PyObject *object, PyArrayObject **address); - -NPY_NO_EXPORT int -PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq); - -NPY_NO_EXPORT int -PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf); - -NPY_NO_EXPORT int -PyArray_BoolConverter(PyObject *object, Bool *val); - -NPY_NO_EXPORT int -PyArray_ByteorderConverter(PyObject *obj, char *endian); - -NPY_NO_EXPORT int -PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind); - -NPY_NO_EXPORT int -PyArray_SearchsideConverter(PyObject *obj, void *addr); - -NPY_NO_EXPORT int -PyArray_PyIntAsInt(PyObject *o); - -NPY_NO_EXPORT intp -PyArray_PyIntAsIntp(PyObject *o); - -NPY_NO_EXPORT int -PyArray_IntpFromSequence(PyObject *seq, intp *vals, int maxvals); - -NPY_NO_EXPORT int -PyArray_TypestrConvert(int itemsize, int gentype); - -NPY_NO_EXPORT PyObject * -PyArray_IntTupleFromIntp(int len, intp *vals); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/convert.c b/numpy-1.6.2/numpy/core/src/multiarray/convert.c deleted file mode 100644 index ffeb96ba93..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/convert.c +++ /dev/null @@ -1,523 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "arrayobject.h" -#include "mapping.h" -#include "lowlevel_strided_loops.h" - -#include "convert.h" - -/* - * Converts a subarray of 'self' into lists, with starting data pointer - * 'dataptr' and from dimension 'startdim' to the last dimension of 'self'. - * - * Returns a new reference. - */ -static PyObject * -recursive_tolist(PyArrayObject *self, char *dataptr, int startdim) -{ - npy_intp i, n, stride; - PyObject *ret, *item; - - /* Base case */ - if (startdim >= PyArray_NDIM(self)) { - return PyArray_DESCR(self)->f->getitem(dataptr,self); - } - - n = PyArray_DIM(self, startdim); - stride = PyArray_STRIDE(self, startdim); - - ret = PyList_New(n); - if (ret == NULL) { - return NULL; - } - - for (i = 0; i < n; ++i) { - item = recursive_tolist(self, dataptr, startdim+1); - if (item == NULL) { - Py_DECREF(ret); - return NULL; - } - PyList_SET_ITEM(ret, i, item); - - dataptr += stride; - } - - return ret; -} - -/*NUMPY_API - * To List - */ -NPY_NO_EXPORT PyObject * -PyArray_ToList(PyArrayObject *self) -{ - return recursive_tolist(self, PyArray_DATA(self), 0); -} - -/* XXX: FIXME --- add ordering argument to - Allow Fortran ordering on write - This will need the addition of a Fortran-order iterator. - */ - -/*NUMPY_API - To File -*/ -NPY_NO_EXPORT int -PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) -{ - npy_intp size; - npy_intp n, n2; - size_t n3, n4; - PyArrayIterObject *it; - PyObject *obj, *strobj, *tupobj, *byteobj; - - n3 = (sep ? strlen((const char *)sep) : 0); - if (n3 == 0) { - /* binary data */ - if (PyDataType_FLAGCHK(self->descr, NPY_LIST_PICKLE)) { - PyErr_SetString(PyExc_ValueError, "cannot write " \ - "object arrays to a file in " \ - "binary mode"); - return -1; - } - - if (PyArray_ISCONTIGUOUS(self)) { - size = PyArray_SIZE(self); - NPY_BEGIN_ALLOW_THREADS; - -#if defined (_MSC_VER) && defined(_WIN64) - /* Workaround Win64 fwrite() bug. Ticket #1660 */ - { - npy_intp maxsize = 2147483648 / self->descr->elsize; - npy_intp chunksize; - - n = 0; - while (size > 0) { - chunksize = (size > maxsize) ? maxsize : size; - n2 = fwrite((const void *) - ((char *)self->data + (n * self->descr->elsize)), - (size_t) self->descr->elsize, - (size_t) chunksize, fp); - if (n2 < chunksize) { - break; - } - n += n2; - size -= chunksize; - } - size = PyArray_SIZE(self); - } -#else - n = fwrite((const void *)self->data, - (size_t) self->descr->elsize, - (size_t) size, fp); -#endif - NPY_END_ALLOW_THREADS; - if (n < size) { - PyErr_Format(PyExc_ValueError, - "%ld requested and %ld written", - (long) size, (long) n); - return -1; - } - } - else { - NPY_BEGIN_THREADS_DEF; - - it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); - NPY_BEGIN_THREADS; - while (it->index < it->size) { - if (fwrite((const void *)it->dataptr, - (size_t) self->descr->elsize, - 1, fp) < 1) { - NPY_END_THREADS; - PyErr_Format(PyExc_IOError, - "problem writing element"\ - " %"INTP_FMT" to file", - it->index); - Py_DECREF(it); - return -1; - } - PyArray_ITER_NEXT(it); - } - NPY_END_THREADS; - Py_DECREF(it); - } - } - else { - /* - * text data - */ - - it = (PyArrayIterObject *) - PyArray_IterNew((PyObject *)self); - n4 = (format ? strlen((const char *)format) : 0); - while (it->index < it->size) { - obj = self->descr->f->getitem(it->dataptr, self); - if (obj == NULL) { - Py_DECREF(it); - return -1; - } - if (n4 == 0) { - /* - * standard writing - */ - strobj = PyObject_Str(obj); - Py_DECREF(obj); - if (strobj == NULL) { - Py_DECREF(it); - return -1; - } - } - else { - /* - * use format string - */ - tupobj = PyTuple_New(1); - if (tupobj == NULL) { - Py_DECREF(it); - return -1; - } - PyTuple_SET_ITEM(tupobj,0,obj); - obj = PyUString_FromString((const char *)format); - if (obj == NULL) { - Py_DECREF(tupobj); - Py_DECREF(it); - return -1; - } - strobj = PyUString_Format(obj, tupobj); - Py_DECREF(obj); - Py_DECREF(tupobj); - if (strobj == NULL) { - Py_DECREF(it); - return -1; - } - } -#if defined(NPY_PY3K) - byteobj = PyUnicode_AsASCIIString(strobj); -#else - byteobj = strobj; -#endif - NPY_BEGIN_ALLOW_THREADS; - n2 = PyBytes_GET_SIZE(byteobj); - n = fwrite(PyBytes_AS_STRING(byteobj), 1, n2, fp); - NPY_END_ALLOW_THREADS; -#if defined(NPY_PY3K) - Py_DECREF(byteobj); -#endif - if (n < n2) { - PyErr_Format(PyExc_IOError, - "problem writing element %"INTP_FMT\ - " to file", it->index); - Py_DECREF(strobj); - Py_DECREF(it); - return -1; - } - /* write separator for all but last one */ - if (it->index != it->size-1) { - if (fwrite(sep, 1, n3, fp) < n3) { - PyErr_Format(PyExc_IOError, - "problem writing "\ - "separator to file"); - Py_DECREF(strobj); - Py_DECREF(it); - return -1; - } - } - Py_DECREF(strobj); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - return 0; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT PyObject * -PyArray_ToString(PyArrayObject *self, NPY_ORDER order) -{ - intp numbytes; - intp index; - char *dptr; - int elsize; - PyObject *ret; - PyArrayIterObject *it; - - if (order == NPY_ANYORDER) - order = PyArray_ISFORTRAN(self); - - /* if (PyArray_TYPE(self) == PyArray_OBJECT) { - PyErr_SetString(PyExc_ValueError, "a string for the data" \ - "in an object array is not appropriate"); - return NULL; - } - */ - - numbytes = PyArray_NBYTES(self); - if ((PyArray_ISCONTIGUOUS(self) && (order == NPY_CORDER)) - || (PyArray_ISFORTRAN(self) && (order == NPY_FORTRANORDER))) { - ret = PyBytes_FromStringAndSize(self->data, (Py_ssize_t) numbytes); - } - else { - PyObject *new; - if (order == NPY_FORTRANORDER) { - /* iterators are always in C-order */ - new = PyArray_Transpose(self, NULL); - if (new == NULL) { - return NULL; - } - } - else { - Py_INCREF(self); - new = (PyObject *)self; - } - it = (PyArrayIterObject *)PyArray_IterNew(new); - Py_DECREF(new); - if (it == NULL) { - return NULL; - } - ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); - if (ret == NULL) { - Py_DECREF(it); - return NULL; - } - dptr = PyBytes_AS_STRING(ret); - index = it->size; - elsize = self->descr->elsize; - while (index--) { - memcpy(dptr, it->dataptr, elsize); - dptr += elsize; - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - return ret; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT int -PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) -{ - PyObject *newarr; - int itemsize, swap; - void *fromptr; - PyArray_Descr *descr; - intp size; - PyArray_CopySwapFunc *copyswap; - - itemsize = arr->descr->elsize; - if (PyArray_ISOBJECT(arr)) { - fromptr = &obj; - swap = 0; - newarr = NULL; - } - else { - descr = PyArray_DESCR(arr); - Py_INCREF(descr); - newarr = PyArray_FromAny(obj, descr, 0,0, ALIGNED, NULL); - if (newarr == NULL) { - return -1; - } - fromptr = PyArray_DATA(newarr); - swap = (PyArray_ISNOTSWAPPED(arr) != PyArray_ISNOTSWAPPED(newarr)); - } - size=PyArray_SIZE(arr); - copyswap = arr->descr->f->copyswap; - if (PyArray_ISONESEGMENT(arr)) { - char *toptr=PyArray_DATA(arr); - PyArray_FillWithScalarFunc* fillwithscalar = - arr->descr->f->fillwithscalar; - if (fillwithscalar && PyArray_ISALIGNED(arr)) { - copyswap(fromptr, NULL, swap, newarr); - fillwithscalar(toptr, size, fromptr, arr); - } - else { - while (size--) { - copyswap(toptr, fromptr, swap, arr); - toptr += itemsize; - } - } - } - else { - PyArrayIterObject *iter; - - iter = (PyArrayIterObject *)\ - PyArray_IterNew((PyObject *)arr); - if (iter == NULL) { - Py_XDECREF(newarr); - return -1; - } - while (size--) { - copyswap(iter->dataptr, fromptr, swap, arr); - PyArray_ITER_NEXT(iter); - } - Py_DECREF(iter); - } - Py_XDECREF(newarr); - return 0; -} - -/* - * Fills an array with zeros. - * - * Returns 0 on success, -1 on failure. - */ -NPY_NO_EXPORT int -PyArray_FillWithZero(PyArrayObject *a) -{ - PyArray_StridedTransferFn *stransfer = NULL; - void *transferdata = NULL; - PyArray_Descr *dtype = PyArray_DESCR(a); - NpyIter *iter; - - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp stride, *countptr; - int needs_api; - - NPY_BEGIN_THREADS_DEF; - - if (!PyArray_ISWRITEABLE(a)) { - PyErr_SetString(PyExc_RuntimeError, "cannot write to array"); - return -1; - } - - /* A zero-sized array needs no zeroing */ - if (PyArray_SIZE(a) == 0) { - return 0; - } - - /* If it's possible to do a simple memset, do so */ - if (!PyDataType_REFCHK(dtype) && (PyArray_ISCONTIGUOUS(a) || - PyArray_ISFORTRAN(a))) { - memset(PyArray_DATA(a), 0, PyArray_NBYTES(a)); - return 0; - } - - /* Use an iterator to go through all the data */ - iter = NpyIter_New(a, NPY_ITER_WRITEONLY|NPY_ITER_EXTERNAL_LOOP, - NPY_KEEPORDER, NPY_NO_CASTING, NULL); - - if (iter == NULL) { - return -1; - } - - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(iter); - return -1; - } - dataptr = NpyIter_GetDataPtrArray(iter); - stride = NpyIter_GetInnerStrideArray(iter)[0]; - countptr = NpyIter_GetInnerLoopSizePtr(iter); - - needs_api = NpyIter_IterationNeedsAPI(iter); - - /* - * Because buffering is disabled in the iterator, the inner loop - * strides will be the same throughout the iteration loop. Thus, - * we can pass them to this function to take advantage of - * contiguous strides, etc. - * - * By setting the src_dtype to NULL, we get a function which sets - * the destination to zeros. - */ - if (PyArray_GetDTypeTransferFunction( - PyArray_ISALIGNED(a), - 0, stride, - NULL, PyArray_DESCR(a), - 0, - &stransfer, &transferdata, - &needs_api) != NPY_SUCCEED) { - NpyIter_Deallocate(iter); - return -1; - } - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - do { - stransfer(*dataptr, stride, NULL, 0, - *countptr, 0, transferdata); - } while(iternext(iter)); - - if (!needs_api) { - NPY_END_THREADS; - } - - PyArray_FreeStridedTransferData(transferdata); - NpyIter_Deallocate(iter); - - return 0; -} - -/*NUMPY_API - * Copy an array. - */ -NPY_NO_EXPORT PyObject * -PyArray_NewCopy(PyArrayObject *m1, NPY_ORDER order) -{ - PyArrayObject *ret = (PyArrayObject *)PyArray_NewLikeArray( - m1, order, NULL, 1); - if (ret == NULL) { - return NULL; - } - - if (PyArray_CopyInto(ret, m1) == -1) { - Py_DECREF(ret); - return NULL; - } - - return (PyObject *)ret; -} - -/*NUMPY_API - * View - * steals a reference to type -- accepts NULL - */ -NPY_NO_EXPORT PyObject * -PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) -{ - PyObject *new = NULL; - PyTypeObject *subtype; - - if (pytype) { - subtype = pytype; - } - else { - subtype = Py_TYPE(self); - } - Py_INCREF(self->descr); - new = PyArray_NewFromDescr(subtype, - self->descr, - self->nd, self->dimensions, - self->strides, - self->data, - self->flags, (PyObject *)self); - if (new == NULL) { - return NULL; - } - Py_INCREF(self); - PyArray_BASE(new) = (PyObject *)self; - - if (type != NULL) { - if (PyObject_SetAttrString(new, "dtype", - (PyObject *)type) < 0) { - Py_DECREF(new); - Py_DECREF(type); - return NULL; - } - Py_DECREF(type); - } - return new; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/convert.h b/numpy-1.6.2/numpy/core/src/multiarray/convert.h deleted file mode 100644 index 1a34cfc52b..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/convert.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _NPY_ARRAYOBJECT_CONVERT_H_ -#define _NPY_ARRAYOBJECT_CONVERT_H_ - -NPY_NO_EXPORT int -PyArray_FillWithZero(PyArrayObject *a); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/convert_datatype.c b/numpy-1.6.2/numpy/core/src/multiarray/convert_datatype.c deleted file mode 100644 index aed578613a..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/convert_datatype.c +++ /dev/null @@ -1,1600 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" -#include "scalartypes.h" -#include "mapping.h" - -#include "convert_datatype.h" - -/*NUMPY_API - * For backward compatibility - * - * Cast an array using typecode structure. - * steals reference to at --- cannot be NULL - * - * This function always makes a copy of arr, even if the dtype - * doesn't change. - */ -NPY_NO_EXPORT PyObject * -PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int fortran) -{ - PyObject *out; - PyArray_Descr *arr_dtype; - - arr_dtype = PyArray_DESCR(arr); - - if (dtype->elsize == 0) { - PyArray_DESCR_REPLACE(dtype); - if (dtype == NULL) { - return NULL; - } - - if (arr_dtype->type_num == dtype->type_num) { - dtype->elsize = arr_dtype->elsize; - } - else if (arr_dtype->type_num == NPY_STRING && - dtype->type_num == NPY_UNICODE) { - dtype->elsize = arr_dtype->elsize * 4; - } - else if (arr_dtype->type_num == NPY_UNICODE && - dtype->type_num == NPY_STRING) { - dtype->elsize = arr_dtype->elsize / 4; - } - else if (dtype->type_num == NPY_VOID) { - dtype->elsize = arr_dtype->elsize; - } - } - - out = PyArray_NewFromDescr(Py_TYPE(arr), dtype, - arr->nd, - arr->dimensions, - NULL, NULL, - fortran, - (PyObject *)arr); - - if (out == NULL) { - return NULL; - } - - if (PyArray_CopyInto((PyArrayObject *)out, arr) < 0) { - Py_DECREF(out); - return NULL; - } - - return out; -} - -/*NUMPY_API - * Get a cast function to cast from the input descriptor to the - * output type_number (must be a registered data-type). - * Returns NULL if un-successful. - */ -NPY_NO_EXPORT PyArray_VectorUnaryFunc * -PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) -{ - PyArray_VectorUnaryFunc *castfunc = NULL; - - if (type_num < NPY_NTYPES_ABI_COMPATIBLE) { - castfunc = descr->f->cast[type_num]; - } - else { - PyObject *obj = descr->f->castdict; - if (obj && PyDict_Check(obj)) { - PyObject *key; - PyObject *cobj; - - key = PyInt_FromLong(type_num); - cobj = PyDict_GetItem(obj, key); - Py_DECREF(key); - if (NpyCapsule_Check(cobj)) { - castfunc = NpyCapsule_AsVoidPtr(cobj); - } - } - } - if (PyTypeNum_ISCOMPLEX(descr->type_num) && - !PyTypeNum_ISCOMPLEX(type_num) && - PyTypeNum_ISNUMBER(type_num) && - !PyTypeNum_ISBOOL(type_num)) { - PyObject *cls = NULL, *obj = NULL; - int ret; - obj = PyImport_ImportModule("numpy.core"); - if (obj) { - cls = PyObject_GetAttrString(obj, "ComplexWarning"); - Py_DECREF(obj); - } -#if PY_VERSION_HEX >= 0x02050000 - ret = PyErr_WarnEx(cls, - "Casting complex values to real discards " - "the imaginary part", 1); -#else - ret = PyErr_Warn(cls, - "Casting complex values to real discards " - "the imaginary part"); -#endif - Py_XDECREF(cls); - if (ret < 0) { - return NULL; - } - } - if (castfunc) { - return castfunc; - } - - PyErr_SetString(PyExc_ValueError, "No cast function available."); - return NULL; -} - -/* - * Must be broadcastable. - * This code is very similar to PyArray_CopyInto/PyArray_MoveInto - * except casting is done --- PyArray_BUFSIZE is used - * as the size of the casting buffer. - */ - -/*NUMPY_API - * Cast to an already created array. - */ -NPY_NO_EXPORT int -PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp) -{ - /* CopyInto handles the casting now */ - return PyArray_CopyInto(out, mp); -} - -/*NUMPY_API - * Cast to an already created array. Arrays don't have to be "broadcastable" - * Only requirement is they have the same number of elements. - */ -NPY_NO_EXPORT int -PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) -{ - /* CopyAnyInto handles the casting now */ - return PyArray_CopyAnyInto(out, mp); -} - -/*NUMPY_API - *Check the type coercion rules. - */ -NPY_NO_EXPORT int -PyArray_CanCastSafely(int fromtype, int totype) -{ - PyArray_Descr *from; - - /* Fast table lookup for small type numbers */ - if ((unsigned int)fromtype < NPY_NTYPES && - (unsigned int)totype < NPY_NTYPES) { - return _npy_can_cast_safely_table[fromtype][totype]; - } - - /* Identity */ - if (fromtype == totype) { - return 1; - } - /* Special-cases for some types */ - switch (fromtype) { - case PyArray_DATETIME: - case PyArray_TIMEDELTA: - case PyArray_OBJECT: - case PyArray_VOID: - return 0; - case PyArray_BOOL: - return 1; - } - switch (totype) { - case PyArray_BOOL: - case PyArray_DATETIME: - case PyArray_TIMEDELTA: - return 0; - case PyArray_OBJECT: - case PyArray_VOID: - return 1; - } - - from = PyArray_DescrFromType(fromtype); - /* - * cancastto is a PyArray_NOTYPE terminated C-int-array of types that - * the data-type can be cast to safely. - */ - if (from->f->cancastto) { - int *curtype = from->f->cancastto; - - while (*curtype != PyArray_NOTYPE) { - if (*curtype++ == totype) { - return 1; - } - } - } - return 0; -} - -/*NUMPY_API - * leaves reference count alone --- cannot be NULL - * - * PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting' - * parameter. - */ -NPY_NO_EXPORT npy_bool -PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) -{ - int fromtype=from->type_num; - int totype=to->type_num; - npy_bool ret; - - ret = (npy_bool) PyArray_CanCastSafely(fromtype, totype); - if (ret) { - /* Check String and Unicode more closely */ - if (fromtype == PyArray_STRING) { - if (totype == PyArray_STRING) { - ret = (from->elsize <= to->elsize); - } - else if (totype == PyArray_UNICODE) { - ret = (from->elsize << 2 <= to->elsize); - } - } - else if (fromtype == PyArray_UNICODE) { - if (totype == PyArray_UNICODE) { - ret = (from->elsize <= to->elsize); - } - } - /* - * TODO: If totype is STRING or unicode - * see if the length is long enough to hold the - * stringified value of the object. - */ - } - return ret; -} - -/* Provides an ordering for the dtype 'kind' character codes */ -static int -dtype_kind_to_ordering(char kind) -{ - switch (kind) { - /* Boolean kind */ - case 'b': - return 0; - /* Unsigned int kind */ - case 'u': - return 1; - /* Signed int kind */ - case 'i': - return 2; - /* Float kind */ - case 'f': - return 4; - /* Complex kind */ - case 'c': - return 5; - /* String kind */ - case 'S': - case 'a': - return 6; - /* Unicode kind */ - case 'U': - return 7; - /* Void kind */ - case 'V': - return 8; - /* Object kind */ - case 'O': - return 9; - /* Anything else - ideally shouldn't happen... */ - default: - return 10; - } -} - -/* Converts a type number from unsigned to signed */ -static int -type_num_unsigned_to_signed(int type_num) -{ - switch (type_num) { - case NPY_UBYTE: - return NPY_BYTE; - case NPY_USHORT: - return NPY_SHORT; - case NPY_UINT: - return NPY_INT; - case NPY_ULONG: - return NPY_LONG; - case NPY_ULONGLONG: - return NPY_LONGLONG; - default: - return type_num; - } -} - -/*NUMPY_API - * Returns true if data of type 'from' may be cast to data of type - * 'to' according to the rule 'casting'. - */ -NPY_NO_EXPORT npy_bool -PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, - NPY_CASTING casting) -{ - /* If unsafe casts are allowed */ - if (casting == NPY_UNSAFE_CASTING) { - return 1; - } - /* Equivalent types can be cast with any value of 'casting' */ - else if (PyArray_EquivTypenums(from->type_num, to->type_num)) { - /* For complicated case, use EquivTypes (for now) */ - if (PyTypeNum_ISUSERDEF(from->type_num) || - PyDataType_HASFIELDS(from) || - from->subarray != NULL) { - int ret; - - /* Only NPY_NO_CASTING prevents byte order conversion */ - if ((casting != NPY_NO_CASTING) && - (!PyArray_ISNBO(from->byteorder) || - !PyArray_ISNBO(to->byteorder))) { - PyArray_Descr *nbo_from, *nbo_to; - - nbo_from = PyArray_DescrNewByteorder(from, NPY_NATIVE); - nbo_to = PyArray_DescrNewByteorder(to, NPY_NATIVE); - if (nbo_from == NULL || nbo_to == NULL) { - Py_XDECREF(nbo_from); - Py_XDECREF(nbo_to); - PyErr_Clear(); - return 0; - } - ret = PyArray_EquivTypes(nbo_from, nbo_to); - Py_DECREF(nbo_from); - Py_DECREF(nbo_to); - } - else { - ret = PyArray_EquivTypes(from, to); - } - return ret; - } - - switch (casting) { - case NPY_NO_CASTING: - return (from->elsize == to->elsize) && - PyArray_ISNBO(from->byteorder) == - PyArray_ISNBO(to->byteorder); - case NPY_EQUIV_CASTING: - return (from->elsize == to->elsize); - case NPY_SAFE_CASTING: - return (from->elsize <= to->elsize); - default: - return 1; - } - } - /* If safe or same-kind casts are allowed */ - else if (casting == NPY_SAFE_CASTING || casting == NPY_SAME_KIND_CASTING) { - if (PyArray_CanCastTo(from, to)) { - return 1; - } - else if(casting == NPY_SAME_KIND_CASTING) { - /* - * Also allow casting from lower to higher kinds, according - * to the ordering provided by dtype_kind_to_ordering. - */ - return dtype_kind_to_ordering(from->kind) <= - dtype_kind_to_ordering(to->kind); - } - else { - return 0; - } - } - /* NPY_NO_CASTING or NPY_EQUIV_CASTING was specified */ - else { - return 0; - } -} - -/* CanCastArrayTo needs this function */ -static int min_scalar_type_num(char *valueptr, int type_num, - int *is_small_unsigned); - -/*NUMPY_API - * Returns 1 if the array object may be cast to the given data type using - * the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in - * that it handles scalar arrays (0 dimensions) specially, by checking - * their value. - */ -NPY_NO_EXPORT npy_bool -PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, - NPY_CASTING casting) -{ - PyArray_Descr *from = PyArray_DESCR(arr); - - /* If it's not a scalar, use the standard rules */ - if (PyArray_NDIM(arr) > 0 || !PyTypeNum_ISNUMBER(from->type_num)) { - return PyArray_CanCastTypeTo(from, to, casting); - } - /* Otherwise, check the value */ - else { - int swap = !PyArray_ISNBO(from->byteorder); - int is_small_unsigned = 0, type_num; - npy_bool ret; - PyArray_Descr *dtype; - - /* An aligned memory buffer large enough to hold any type */ - npy_longlong value[4]; - - from->f->copyswap(&value, PyArray_BYTES(arr), swap, NULL); - - type_num = min_scalar_type_num((char *)&value, from->type_num, - &is_small_unsigned); - - /* - * If we've got a small unsigned scalar, and the 'to' type - * is not unsigned, then make it signed to allow the value - * to be cast more appropriately. - */ - if (is_small_unsigned && !(PyTypeNum_ISUNSIGNED(to->type_num))) { - type_num = type_num_unsigned_to_signed(type_num); - } - - dtype = PyArray_DescrFromType(type_num); - if (dtype == NULL) { - return 0; - } -#if 0 - printf("min scalar cast "); - PyObject_Print(dtype, stdout, 0); - printf(" to "); - PyObject_Print(to, stdout, 0); - printf("\n"); -#endif - ret = PyArray_CanCastTypeTo(dtype, to, casting); - Py_DECREF(dtype); - return ret; - } -} - -/*NUMPY_API - * See if array scalars can be cast. - * - * TODO: For NumPy 2.0, add a NPY_CASTING parameter. - */ -NPY_NO_EXPORT npy_bool -PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) -{ - int fromtype; - int totype; - - fromtype = _typenum_fromtypeobj((PyObject *)from, 0); - totype = _typenum_fromtypeobj((PyObject *)to, 0); - if (fromtype == PyArray_NOTYPE || totype == PyArray_NOTYPE) { - return FALSE; - } - return (npy_bool) PyArray_CanCastSafely(fromtype, totype); -} - -/* - * Internal promote types function which handles unsigned integers which - * fit in same-sized signed integers specially. - */ -static PyArray_Descr * -promote_types(PyArray_Descr *type1, PyArray_Descr *type2, - int is_small_unsigned1, int is_small_unsigned2) -{ - if (is_small_unsigned1) { - int type_num1 = type1->type_num; - int type_num2 = type2->type_num; - int ret_type_num; - - if (type_num2 < NPY_NTYPES && !(PyTypeNum_ISBOOL(type_num2) || - PyTypeNum_ISUNSIGNED(type_num2))) { - /* Convert to the equivalent-sized signed integer */ - type_num1 = type_num_unsigned_to_signed(type_num1); - - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - - return PyArray_PromoteTypes(type1, type2); - } - else if (is_small_unsigned2) { - int type_num1 = type1->type_num; - int type_num2 = type2->type_num; - int ret_type_num; - - if (type_num1 < NPY_NTYPES && !(PyTypeNum_ISBOOL(type_num1) || - PyTypeNum_ISUNSIGNED(type_num1))) { - /* Convert to the equivalent-sized signed integer */ - type_num2 = type_num_unsigned_to_signed(type_num2); - - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - - return PyArray_PromoteTypes(type1, type2); - } - else { - return PyArray_PromoteTypes(type1, type2); - } - -} - -/*NUMPY_API - * Produces the smallest size and lowest kind type to which both - * input types can be cast. - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) -{ - int type_num1, type_num2, ret_type_num; - - type_num1 = type1->type_num; - type_num2 = type2->type_num; - - /* If they're built-in types, use the promotion table */ - if (type_num1 < NPY_NTYPES && type_num2 < NPY_NTYPES) { - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - /* If one or both are user defined, calculate it */ - else { - int skind1 = NPY_NOSCALAR, skind2 = NPY_NOSCALAR, skind; - - if (PyArray_CanCastTo(type2, type1)) { - /* Promoted types are always native byte order */ - if (PyArray_ISNBO(type1->byteorder)) { - Py_INCREF(type1); - return type1; - } - else { - return PyArray_DescrNewByteorder(type1, NPY_NATIVE); - } - } - else if (PyArray_CanCastTo(type1, type2)) { - /* Promoted types are always native byte order */ - if (PyArray_ISNBO(type2->byteorder)) { - Py_INCREF(type2); - return type2; - } - else { - return PyArray_DescrNewByteorder(type2, NPY_NATIVE); - } - } - - /* Convert the 'kind' char into a scalar kind */ - switch (type1->kind) { - case 'b': - skind1 = NPY_BOOL_SCALAR; - break; - case 'u': - skind1 = NPY_INTPOS_SCALAR; - break; - case 'i': - skind1 = NPY_INTNEG_SCALAR; - break; - case 'f': - skind1 = NPY_FLOAT_SCALAR; - break; - case 'c': - skind1 = NPY_COMPLEX_SCALAR; - break; - } - switch (type2->kind) { - case 'b': - skind2 = NPY_BOOL_SCALAR; - break; - case 'u': - skind2 = NPY_INTPOS_SCALAR; - break; - case 'i': - skind2 = NPY_INTNEG_SCALAR; - break; - case 'f': - skind2 = NPY_FLOAT_SCALAR; - break; - case 'c': - skind2 = NPY_COMPLEX_SCALAR; - break; - } - - /* If both are scalars, there may be a promotion possible */ - if (skind1 != NPY_NOSCALAR && skind2 != NPY_NOSCALAR) { - - /* Start with the larger scalar kind */ - skind = (skind1 > skind2) ? skind1 : skind2; - ret_type_num = _npy_smallest_type_of_kind_table[skind]; - - for (;;) { - - /* If there is no larger type of this kind, try a larger kind */ - if (ret_type_num < 0) { - ++skind; - /* Use -1 to signal no promoted type found */ - if (skind < NPY_NSCALARKINDS) { - ret_type_num = _npy_smallest_type_of_kind_table[skind]; - } - else { - break; - } - } - - /* If we found a type to which we can promote both, done! */ - if (PyArray_CanCastSafely(type_num1, ret_type_num) && - PyArray_CanCastSafely(type_num2, ret_type_num)) { - return PyArray_DescrFromType(ret_type_num); - } - - /* Try the next larger type of this kind */ - ret_type_num = _npy_next_larger_type_table[ret_type_num]; - } - - } - - PyErr_SetString(PyExc_TypeError, - "invalid type promotion with custom data type"); - return NULL; - } - - switch (type_num1) { - /* BOOL can convert to anything */ - case NPY_BOOL: - Py_INCREF(type2); - return type2; - /* For strings and unicodes, take the larger size */ - case NPY_STRING: - if (type_num2 == NPY_STRING) { - if (type1->elsize > type2->elsize) { - Py_INCREF(type1); - return type1; - } - else { - Py_INCREF(type2); - return type2; - } - } - else if (type_num2 == NPY_UNICODE) { - if (type2->elsize >= type1->elsize * 4) { - Py_INCREF(type2); - return type2; - } - else { - PyArray_Descr *d = PyArray_DescrNewFromType(NPY_UNICODE); - if (d == NULL) { - return NULL; - } - d->elsize = type1->elsize * 4; - return d; - } - } - /* Allow NUMBER -> STRING */ - else if (PyTypeNum_ISNUMBER(type_num2)) { - Py_INCREF(type1); - return type1; - } - case NPY_UNICODE: - if (type_num2 == NPY_UNICODE) { - if (type1->elsize > type2->elsize) { - Py_INCREF(type1); - return type1; - } - else { - Py_INCREF(type2); - return type2; - } - } - else if (type_num2 == NPY_STRING) { - if (type1->elsize >= type2->elsize * 4) { - Py_INCREF(type1); - return type1; - } - else { - PyArray_Descr *d = PyArray_DescrNewFromType(NPY_UNICODE); - if (d == NULL) { - return NULL; - } - d->elsize = type2->elsize * 4; - return d; - } - } - /* Allow NUMBER -> UNICODE */ - else if (PyTypeNum_ISNUMBER(type_num2)) { - Py_INCREF(type1); - return type1; - } - break; - } - - switch (type_num2) { - /* BOOL can convert to anything */ - case NPY_BOOL: - Py_INCREF(type1); - return type1; - case NPY_STRING: - /* Allow NUMBER -> STRING */ - if (PyTypeNum_ISNUMBER(type_num1)) { - Py_INCREF(type2); - return type2; - } - case NPY_UNICODE: - /* Allow NUMBER -> UNICODE */ - if (PyTypeNum_ISNUMBER(type_num1)) { - Py_INCREF(type2); - return type2; - } - break; - } - - /* For equivalent types we can return either */ - if (PyArray_EquivTypes(type1, type2)) { - Py_INCREF(type1); - return type1; - } - - /* TODO: Also combine fields, subarrays, strings, etc */ - - /* - printf("invalid type promotion: "); - PyObject_Print(type1, stdout, 0); - printf(" "); - PyObject_Print(type2, stdout, 0); - printf("\n"); - */ - PyErr_SetString(PyExc_TypeError, "invalid type promotion"); - return NULL; -} - -/* - * NOTE: While this is unlikely to be a performance problem, if - * it is it could be reverted to a simple positive/negative - * check as the previous system used. - * - * The is_small_unsigned output flag indicates whether it's an unsigned integer, - * and would fit in a signed integer of the same bit size. - */ -static int min_scalar_type_num(char *valueptr, int type_num, - int *is_small_unsigned) -{ - switch (type_num) { - case NPY_BOOL: { - return NPY_BOOL; - } - case NPY_UBYTE: { - npy_ubyte value = *(npy_ubyte *)valueptr; - if (value <= NPY_MAX_BYTE) { - *is_small_unsigned = 1; - } - return NPY_UBYTE; - } - case NPY_BYTE: { - npy_byte value = *(npy_byte *)valueptr; - if (value >= 0) { - *is_small_unsigned = 1; - return NPY_UBYTE; - } - break; - } - case NPY_USHORT: { - npy_ushort value = *(npy_ushort *)valueptr; - if (value <= NPY_MAX_UBYTE) { - if (value <= NPY_MAX_BYTE) { - *is_small_unsigned = 1; - } - return NPY_UBYTE; - } - - if (value <= NPY_MAX_SHORT) { - *is_small_unsigned = 1; - } - break; - } - case NPY_SHORT: { - npy_short value = *(npy_short *)valueptr; - if (value >= 0) { - return min_scalar_type_num(valueptr, NPY_USHORT, is_small_unsigned); - } - else if (value >= NPY_MIN_BYTE) { - return NPY_BYTE; - } - break; - } -#if NPY_SIZEOF_LONG == NPY_SIZEOF_INT - case NPY_ULONG: -#endif - case NPY_UINT: { - npy_uint value = *(npy_uint *)valueptr; - if (value <= NPY_MAX_UBYTE) { - if (value < NPY_MAX_BYTE) { - *is_small_unsigned = 1; - } - return NPY_UBYTE; - } - else if (value <= NPY_MAX_USHORT) { - if (value <= NPY_MAX_SHORT) { - *is_small_unsigned = 1; - } - return NPY_USHORT; - } - - if (value <= NPY_MAX_INT) { - *is_small_unsigned = 1; - } - break; - } -#if NPY_SIZEOF_LONG == NPY_SIZEOF_INT - case NPY_LONG: -#endif - case NPY_INT: { - npy_int value = *(npy_int *)valueptr; - if (value >= 0) { - return min_scalar_type_num(valueptr, NPY_UINT, is_small_unsigned); - } - else if (value >= NPY_MIN_BYTE) { - return NPY_BYTE; - } - else if (value >= NPY_MIN_SHORT) { - return NPY_SHORT; - } - break; - } -#if NPY_SIZEOF_LONG != NPY_SIZEOF_INT && NPY_SIZEOF_LONG != NPY_SIZEOF_LONGLONG - case NPY_ULONG: { - npy_ulong value = *(npy_ulong *)valueptr; - if (value <= NPY_MAX_UBYTE) { - if (value <= NPY_MAX_BYTE) { - *is_small_unsigned = 1; - } - return NPY_UBYTE; - } - else if (value <= NPY_MAX_USHORT) { - if (value <= NPY_MAX_SHORT) { - *is_small_unsigned = 1; - } - return NPY_USHORT; - } - else if (value <= NPY_MAX_UINT) { - if (value <= NPY_MAX_INT) { - *is_small_unsigned = 1; - } - return NPY_UINT; - } - - if (value <= NPY_MAX_LONG) { - *is_small_unsigned = 1; - } - break; - } - case NPY_LONG: { - npy_long value = *(npy_long *)valueptr; - if (value >= 0) { - return min_scalar_type_num(valueptr, NPY_ULONG, is_small_unsigned); - } - else if (value >= NPY_MIN_BYTE) { - return NPY_BYTE; - } - else if (value >= NPY_MIN_SHORT) { - return NPY_SHORT; - } - else if (value >= NPY_MIN_INT) { - return NPY_INT; - } - break; - } -#endif -#if NPY_SIZEOF_LONG == NPY_SIZEOF_LONGLONG - case NPY_ULONG: -#endif - case NPY_ULONGLONG: { - npy_ulonglong value = *(npy_ulonglong *)valueptr; - if (value <= NPY_MAX_UBYTE) { - if (value <= NPY_MAX_BYTE) { - *is_small_unsigned = 1; - } - return NPY_UBYTE; - } - else if (value <= NPY_MAX_USHORT) { - if (value <= NPY_MAX_SHORT) { - *is_small_unsigned = 1; - } - return NPY_USHORT; - } - else if (value <= NPY_MAX_UINT) { - if (value <= NPY_MAX_INT) { - *is_small_unsigned = 1; - } - return NPY_UINT; - } -#if NPY_SIZEOF_LONG != NPY_SIZEOF_INT && NPY_SIZEOF_LONG != NPY_SIZEOF_LONGLONG - else if (value <= NPY_MAX_ULONG) { - if (value <= NPY_MAX_LONG) { - *is_small_unsigned = 1; - } - return NPY_ULONG; - } -#endif - - if (value <= NPY_MAX_LONGLONG) { - *is_small_unsigned = 1; - } - break; - } -#if NPY_SIZEOF_LONG == NPY_SIZEOF_LONGLONG - case NPY_LONG: -#endif - case NPY_LONGLONG: { - npy_longlong value = *(npy_longlong *)valueptr; - if (value >= 0) { - return min_scalar_type_num(valueptr, NPY_ULONGLONG, is_small_unsigned); - } - else if (value >= NPY_MIN_BYTE) { - return NPY_BYTE; - } - else if (value >= NPY_MIN_SHORT) { - return NPY_SHORT; - } - else if (value >= NPY_MIN_INT) { - return NPY_INT; - } -#if NPY_SIZEOF_LONG != NPY_SIZEOF_INT && NPY_SIZEOF_LONG != NPY_SIZEOF_LONGLONG - else if (value >= NPY_MIN_LONG) { - return NPY_LONG; - } -#endif - break; - } - /* - * Float types aren't allowed to be demoted to integer types, - * but precision loss is allowed. - */ - case NPY_HALF: { - return NPY_HALF; - } - case NPY_FLOAT: { - float value = *(float *)valueptr; - if (value > -65000 && value < 65000) { - return NPY_HALF; - } - break; - } - case NPY_DOUBLE: { - double value = *(double *)valueptr; - if (value > -65000 && value < 65000) { - return NPY_HALF; - } - else if (value > -3.4e38 && value < 3.4e38) { - return NPY_FLOAT; - } - break; - } - case NPY_LONGDOUBLE: { - npy_longdouble value = *(npy_longdouble *)valueptr; - if (value > -65000 && value < 65000) { - return NPY_HALF; - } - else if (value > -3.4e38 && value < 3.4e38) { - return NPY_FLOAT; - } - else if (value > -1.7e308 && value < 1.7e308) { - return NPY_DOUBLE; - } - break; - } - /* - * The code to demote complex to float is disabled for now, - * as forcing complex by adding 0j is probably desireable. - */ - case NPY_CFLOAT: { - /* - npy_cfloat value = *(npy_cfloat *)valueptr; - if (value.imag == 0) { - return min_scalar_type_num((char *)&value.real, - NPY_FLOAT, is_small_unsigned); - } - */ - break; - } - case NPY_CDOUBLE: { - npy_cdouble value = *(npy_cdouble *)valueptr; - /* - if (value.imag == 0) { - return min_scalar_type_num((char *)&value.real, - NPY_DOUBLE, is_small_unsigned); - } - */ - if (value.real > -3.4e38 && value.real < 3.4e38 && - value.imag > -3.4e38 && value.imag < 3.4e38) { - return NPY_CFLOAT; - } - break; - } - case NPY_CLONGDOUBLE: { - npy_cdouble value = *(npy_cdouble *)valueptr; - /* - if (value.imag == 0) { - return min_scalar_type_num((char *)&value.real, - NPY_LONGDOUBLE, is_small_unsigned); - } - */ - if (value.real > -3.4e38 && value.real < 3.4e38 && - value.imag > -3.4e38 && value.imag < 3.4e38) { - return NPY_CFLOAT; - } - else if (value.real > -1.7e308 && value.real < 1.7e308 && - value.imag > -1.7e308 && value.imag < 1.7e308) { - return NPY_CDOUBLE; - } - break; - } - } - - return type_num; -} - -/*NUMPY_API - * If arr is a scalar (has 0 dimensions) with a built-in number data type, - * finds the smallest type size/kind which can still represent its data. - * Otherwise, returns the array's data type. - * - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_MinScalarType(PyArrayObject *arr) -{ - PyArray_Descr *dtype = PyArray_DESCR(arr); - if (PyArray_NDIM(arr) > 0 || !PyTypeNum_ISNUMBER(dtype->type_num)) { - Py_INCREF(dtype); - return dtype; - } - else { - char *data = PyArray_BYTES(arr); - int swap = !PyArray_ISNBO(dtype->byteorder); - int is_small_unsigned = 0; - /* An aligned memory buffer large enough to hold any type */ - npy_longlong value[4]; - dtype->f->copyswap(&value, data, swap, NULL); - - return PyArray_DescrFromType( - min_scalar_type_num((char *)&value, - dtype->type_num, &is_small_unsigned)); - - } -} - -/* - * Provides an ordering for the dtype 'kind' character codes, to help - * determine when to use the min_scalar_type function. This groups - * 'kind' into boolean, integer, floating point, and everything else. - */ -static int -dtype_kind_to_simplified_ordering(char kind) -{ - switch (kind) { - /* Boolean kind */ - case 'b': - return 0; - /* Unsigned int kind */ - case 'u': - /* Signed int kind */ - case 'i': - return 1; - /* Float kind */ - case 'f': - /* Complex kind */ - case 'c': - return 2; - /* Anything else */ - default: - return 3; - } -} - -/*NUMPY_API - * Produces the result type of a bunch of inputs, using the UFunc - * type promotion rules. Use this function when you have a set of - * input arrays, and need to determine an output array dtype. - * - * If all the inputs are scalars (have 0 dimensions) or the maximum "kind" - * of the scalars is greater than the maximum "kind" of the arrays, does - * a regular type promotion. - * - * Otherwise, does a type promotion on the MinScalarType - * of all the inputs. Data types passed directly are treated as array - * types. - * - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, - npy_intp ndtypes, PyArray_Descr **dtypes) -{ - npy_intp i; - int use_min_scalar = 0; - PyArray_Descr *ret = NULL, *tmpret; - int ret_is_small_unsigned = 0; - - /* If there's just one type, pass it through */ - if (narrs + ndtypes == 1) { - if (narrs == 1) { - ret = PyArray_DESCR(arr[0]); - } - else { - ret = dtypes[0]; - } - Py_INCREF(ret); - return ret; - } - - /* - * Determine if there are any scalars, and if so, whether - * the maximum "kind" of the scalars surpasses the maximum - * "kind" of the arrays - */ - if (narrs > 0) { - int all_scalars, max_scalar_kind = -1, max_array_kind = -1; - int kind; - - all_scalars = (ndtypes > 0) ? 0 : 1; - - /* Compute the maximum "kinds" and whether everything is scalar */ - for (i = 0; i < narrs; ++i) { - if (PyArray_NDIM(arr[i]) == 0) { - kind = dtype_kind_to_simplified_ordering( - PyArray_DESCR(arr[i])->kind); - if (kind > max_scalar_kind) { - max_scalar_kind = kind; - } - } - else { - all_scalars = 0; - kind = dtype_kind_to_simplified_ordering( - PyArray_DESCR(arr[i])->kind); - if (kind > max_array_kind) { - max_array_kind = kind; - } - } - } - /* - * If the max scalar kind is bigger than the max array kind, - * finish computing the max array kind - */ - for (i = 0; i < ndtypes; ++i) { - kind = dtype_kind_to_simplified_ordering(dtypes[i]->kind); - if (kind > max_array_kind) { - max_array_kind = kind; - } - } - - /* Indicate whether to use the min_scalar_type function */ - if (!all_scalars && max_array_kind >= max_scalar_kind) { - use_min_scalar = 1; - } - } - - /* Loop through all the types, promoting them */ - if (!use_min_scalar) { - for (i = 0; i < narrs; ++i) { - PyArray_Descr *tmp = PyArray_DESCR(arr[i]); - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - Py_INCREF(ret); - } - else { - /* Only call promote if the types aren't the same dtype */ - if (tmp != ret || !PyArray_ISNBO(ret->byteorder)) { - tmpret = PyArray_PromoteTypes(tmp, ret); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return NULL; - } - } - } - } - - for (i = 0; i < ndtypes; ++i) { - PyArray_Descr *tmp = dtypes[i]; - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - Py_INCREF(ret); - } - else { - /* Only call promote if the types aren't the same dtype */ - if (tmp != ret || !PyArray_ISNBO(tmp->byteorder)) { - tmpret = PyArray_PromoteTypes(tmp, ret); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return NULL; - } - } - } - } - } - else { - for (i = 0; i < narrs; ++i) { - /* Get the min scalar type for the array */ - PyArray_Descr *tmp = PyArray_DESCR(arr[i]); - int tmp_is_small_unsigned = 0; - /* - * If it's a scalar, find the min scalar type. The function - * is expanded here so that we can flag whether we've got an - * unsigned integer which would fit an a signed integer - * of the same size, something not exposed in the public API. - */ - if (PyArray_NDIM(arr[i]) == 0 && - PyTypeNum_ISNUMBER(tmp->type_num)) { - char *data = PyArray_BYTES(arr[i]); - int swap = !PyArray_ISNBO(tmp->byteorder); - int type_num; - /* An aligned memory buffer large enough to hold any type */ - npy_longlong value[4]; - tmp->f->copyswap(&value, data, swap, NULL); - type_num = min_scalar_type_num((char *)&value, - tmp->type_num, &tmp_is_small_unsigned); - tmp = PyArray_DescrFromType(type_num); - if (tmp == NULL) { - Py_XDECREF(ret); - return NULL; - } - } - else { - Py_INCREF(tmp); - } - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - ret_is_small_unsigned = tmp_is_small_unsigned; - } - else { -#if 0 - printf("promoting type "); - PyObject_Print(tmp, stdout, 0); - printf(" (%d) ", tmp_is_small_unsigned); - PyObject_Print(ret, stdout, 0); - printf(" (%d) ", ret_is_small_unsigned); - printf("\n"); -#endif - /* If they point to the same type, don't call promote */ - if (tmp == ret && PyArray_ISNBO(tmp->byteorder)) { - Py_DECREF(tmp); - } - else { - tmpret = promote_types(tmp, ret, tmp_is_small_unsigned, - ret_is_small_unsigned); - if (tmpret == NULL) { - Py_DECREF(tmp); - Py_DECREF(ret); - return NULL; - } - Py_DECREF(tmp); - Py_DECREF(ret); - ret = tmpret; - } - ret_is_small_unsigned = tmp_is_small_unsigned && - ret_is_small_unsigned; - } - } - - for (i = 0; i < ndtypes; ++i) { - PyArray_Descr *tmp = dtypes[i]; - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - Py_INCREF(ret); - } - else { - /* Only call promote if the types aren't the same dtype */ - if (tmp != ret || !PyArray_ISNBO(tmp->byteorder)) { - if (ret_is_small_unsigned) { - tmpret = promote_types(tmp, ret, 0, - ret_is_small_unsigned); - if (tmpret == NULL) { - Py_DECREF(tmp); - Py_DECREF(ret); - return NULL; - } - } - else { - tmpret = PyArray_PromoteTypes(tmp, ret); - } - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return NULL; - } - } - } - } - } - - if (ret == NULL) { - PyErr_SetString(PyExc_TypeError, - "no arrays or types available to calculate result type"); - } - - return ret; -} - -/*NUMPY_API - * Is the typenum valid? - */ -NPY_NO_EXPORT int -PyArray_ValidType(int type) -{ - PyArray_Descr *descr; - int res=TRUE; - - descr = PyArray_DescrFromType(type); - if (descr == NULL) { - res = FALSE; - } - Py_DECREF(descr); - return res; -} - -/* Backward compatibility only */ -/* In both Zero and One - -***You must free the memory once you are done with it -using PyDataMem_FREE(ptr) or you create a memory leak*** - -If arr is an Object array you are getting a -BORROWED reference to Zero or One. -Do not DECREF. -Please INCREF if you will be hanging on to it. - -The memory for the ptr still must be freed in any case; -*/ - -static int -_check_object_rec(PyArray_Descr *descr) -{ - if (PyDataType_HASFIELDS(descr) && PyDataType_REFCHK(descr)) { - PyErr_SetString(PyExc_TypeError, "Not supported for this data-type."); - return -1; - } - return 0; -} - -/*NUMPY_API - Get pointer to zero of correct type for array. -*/ -NPY_NO_EXPORT char * -PyArray_Zero(PyArrayObject *arr) -{ - char *zeroval; - int ret, storeflags; - PyObject *obj; - - if (_check_object_rec(arr->descr) < 0) { - return NULL; - } - zeroval = PyDataMem_NEW(arr->descr->elsize); - if (zeroval == NULL) { - PyErr_SetNone(PyExc_MemoryError); - return NULL; - } - - obj=PyInt_FromLong((long) 0); - if (PyArray_ISOBJECT(arr)) { - memcpy(zeroval, &obj, sizeof(PyObject *)); - Py_DECREF(obj); - return zeroval; - } - storeflags = arr->flags; - arr->flags |= BEHAVED; - ret = arr->descr->f->setitem(obj, zeroval, arr); - arr->flags = storeflags; - Py_DECREF(obj); - if (ret < 0) { - PyDataMem_FREE(zeroval); - return NULL; - } - return zeroval; -} - -/*NUMPY_API - Get pointer to one of correct type for array -*/ -NPY_NO_EXPORT char * -PyArray_One(PyArrayObject *arr) -{ - char *oneval; - int ret, storeflags; - PyObject *obj; - - if (_check_object_rec(arr->descr) < 0) { - return NULL; - } - oneval = PyDataMem_NEW(arr->descr->elsize); - if (oneval == NULL) { - PyErr_SetNone(PyExc_MemoryError); - return NULL; - } - - obj = PyInt_FromLong((long) 1); - if (PyArray_ISOBJECT(arr)) { - memcpy(oneval, &obj, sizeof(PyObject *)); - Py_DECREF(obj); - return oneval; - } - - storeflags = arr->flags; - arr->flags |= BEHAVED; - ret = arr->descr->f->setitem(obj, oneval, arr); - arr->flags = storeflags; - Py_DECREF(obj); - if (ret < 0) { - PyDataMem_FREE(oneval); - return NULL; - } - return oneval; -} - -/* End deprecated */ - -/*NUMPY_API - * Return the typecode of the array a Python object would be converted to - */ -NPY_NO_EXPORT int -PyArray_ObjectType(PyObject *op, int minimum_type) -{ - PyArray_Descr *intype; - PyArray_Descr *outtype; - int ret; - - intype = PyArray_DescrFromType(minimum_type); - if (intype == NULL) { - PyErr_Clear(); - } - outtype = _array_find_type(op, intype, MAX_DIMS); - ret = outtype->type_num; - Py_DECREF(outtype); - Py_XDECREF(intype); - return ret; -} - -/* Raises error when len(op) == 0 */ - -/*NUMPY_API*/ -NPY_NO_EXPORT PyArrayObject ** -PyArray_ConvertToCommonType(PyObject *op, int *retn) -{ - int i, n, allscalars = 0; - PyArrayObject **mps = NULL; - PyObject *otmp; - PyArray_Descr *intype = NULL, *stype = NULL; - PyArray_Descr *newtype = NULL; - NPY_SCALARKIND scalarkind = NPY_NOSCALAR, intypekind = NPY_NOSCALAR; - - *retn = n = PySequence_Length(op); - if (n == 0) { - PyErr_SetString(PyExc_ValueError, "0-length sequence."); - } - if (PyErr_Occurred()) { - *retn = 0; - return NULL; - } - mps = (PyArrayObject **)PyDataMem_NEW(n*sizeof(PyArrayObject *)); - if (mps == NULL) { - *retn = 0; - return (void*)PyErr_NoMemory(); - } - - if (PyArray_Check(op)) { - for (i = 0; i < n; i++) { - mps[i] = (PyArrayObject *) array_big_item((PyArrayObject *)op, i); - } - if (!PyArray_ISCARRAY(op)) { - for (i = 0; i < n; i++) { - PyObject *obj; - obj = PyArray_NewCopy(mps[i], NPY_CORDER); - Py_DECREF(mps[i]); - mps[i] = (PyArrayObject *)obj; - } - } - return mps; - } - - for (i = 0; i < n; i++) { - mps[i] = NULL; - } - - for (i = 0; i < n; i++) { - otmp = PySequence_GetItem(op, i); - if (!PyArray_CheckAnyScalar(otmp)) { - newtype = PyArray_DescrFromObject(otmp, intype); - Py_XDECREF(intype); - if (newtype == NULL) { - goto fail; - } - intype = newtype; - intypekind = PyArray_ScalarKind(intype->type_num, NULL); - } - else { - newtype = PyArray_DescrFromObject(otmp, stype); - Py_XDECREF(stype); - if (newtype == NULL) { - goto fail; - } - stype = newtype; - scalarkind = PyArray_ScalarKind(newtype->type_num, NULL); - mps[i] = (PyArrayObject *)Py_None; - Py_INCREF(Py_None); - } - Py_XDECREF(otmp); - } - if (intype == NULL) { - /* all scalars */ - allscalars = 1; - intype = stype; - Py_INCREF(intype); - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - mps[i] = NULL; - } - } - else if ((stype != NULL) && (intypekind != scalarkind)) { - /* - * we need to upconvert to type that - * handles both intype and stype - * also don't forcecast the scalars. - */ - if (!PyArray_CanCoerceScalar(stype->type_num, - intype->type_num, - scalarkind)) { - newtype = PyArray_PromoteTypes(intype, stype); - Py_XDECREF(intype); - intype = newtype; - } - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - mps[i] = NULL; - } - } - - - /* Make sure all arrays are actual array objects. */ - for (i = 0; i < n; i++) { - int flags = CARRAY; - - if ((otmp = PySequence_GetItem(op, i)) == NULL) { - goto fail; - } - if (!allscalars && ((PyObject *)(mps[i]) == Py_None)) { - /* forcecast scalars */ - flags |= FORCECAST; - Py_DECREF(Py_None); - } - Py_INCREF(intype); - mps[i] = (PyArrayObject*) - PyArray_FromAny(otmp, intype, 0, 0, flags, NULL); - Py_DECREF(otmp); - if (mps[i] == NULL) { - goto fail; - } - } - Py_DECREF(intype); - Py_XDECREF(stype); - return mps; - - fail: - Py_XDECREF(intype); - Py_XDECREF(stype); - *retn = 0; - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - } - PyDataMem_FREE(mps); - return NULL; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/convert_datatype.h b/numpy-1.6.2/numpy/core/src/multiarray/convert_datatype.h deleted file mode 100644 index 844cce0c96..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/convert_datatype.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _NPY_ARRAY_CONVERT_DATATYPE_H_ -#define _NPY_ARRAY_CONVERT_DATATYPE_H_ - -NPY_NO_EXPORT PyArray_VectorUnaryFunc * -PyArray_GetCastFunc(PyArray_Descr *descr, int type_num); - -NPY_NO_EXPORT int -PyArray_ObjectType(PyObject *op, int minimum_type); - -NPY_NO_EXPORT PyArrayObject ** -PyArray_ConvertToCommonType(PyObject *op, int *retn); - -NPY_NO_EXPORT int -PyArray_ValidType(int type); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/ctors.c b/numpy-1.6.2/numpy/core/src/multiarray/ctors.c deleted file mode 100644 index d5b2837cc0..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/ctors.c +++ /dev/null @@ -1,3815 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "numpy/npy_math.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" - -#include "ctors.h" - -#include "shape.h" - -#include "buffer.h" - -#include "numpymemoryview.h" - -#include "lowlevel_strided_loops.h" - -/* - * Reading from a file or a string. - * - * As much as possible, we try to use the same code for both files and strings, - * so the semantics for fromstring and fromfile are the same, especially with - * regards to the handling of text representations. - */ - -typedef int (*next_element)(void **, void *, PyArray_Descr *, void *); -typedef int (*skip_separator)(void **, const char *, void *); - -static int -fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype, - const char *end) -{ - int r = dtype->f->fromstr(*s, dptr, s, dtype); - if (end != NULL && *s > end) { - return -1; - } - return r; -} - -static int -fromfile_next_element(FILE **fp, void *dptr, PyArray_Descr *dtype, - void *NPY_UNUSED(stream_data)) -{ - /* the NULL argument is for backwards-compatibility */ - return dtype->f->scanfunc(*fp, dptr, NULL, dtype); -} - -/* - * Remove multiple whitespace from the separator, and add a space to the - * beginning and end. This simplifies the separator-skipping code below. - */ -static char * -swab_separator(char *sep) -{ - int skip_space = 0; - char *s, *start; - - s = start = malloc(strlen(sep)+3); - /* add space to front if there isn't one */ - if (*sep != '\0' && !isspace(*sep)) { - *s = ' '; s++; - } - while (*sep != '\0') { - if (isspace(*sep)) { - if (skip_space) { - sep++; - } - else { - *s = ' '; - s++; - sep++; - skip_space = 1; - } - } - else { - *s = *sep; - s++; - sep++; - skip_space = 0; - } - } - /* add space to end if there isn't one */ - if (s != start && s[-1] == ' ') { - *s = ' '; - s++; - } - *s = '\0'; - return start; -} - -/* - * Assuming that the separator is the next bit in the string (file), skip it. - * - * Single spaces in the separator are matched to arbitrary-long sequences - * of whitespace in the input. If the separator consists only of spaces, - * it matches one or more whitespace characters. - * - * If we can't match the separator, return -2. - * If we hit the end of the string (file), return -1. - * Otherwise, return 0. - */ -static int -fromstr_skip_separator(char **s, const char *sep, const char *end) -{ - char *string = *s; - int result = 0; - while (1) { - char c = *string; - if (c == '\0' || (end != NULL && string >= end)) { - result = -1; - break; - } - else if (*sep == '\0') { - if (string != *s) { - /* matched separator */ - result = 0; - break; - } - else { - /* separator was whitespace wildcard that didn't match */ - result = -2; - break; - } - } - else if (*sep == ' ') { - /* whitespace wildcard */ - if (!isspace(c)) { - sep++; - continue; - } - } - else if (*sep != c) { - result = -2; - break; - } - else { - sep++; - } - string++; - } - *s = string; - return result; -} - -static int -fromfile_skip_separator(FILE **fp, const char *sep, void *NPY_UNUSED(stream_data)) -{ - int result = 0; - const char *sep_start = sep; - - while (1) { - int c = fgetc(*fp); - - if (c == EOF) { - result = -1; - break; - } - else if (*sep == '\0') { - ungetc(c, *fp); - if (sep != sep_start) { - /* matched separator */ - result = 0; - break; - } - else { - /* separator was whitespace wildcard that didn't match */ - result = -2; - break; - } - } - else if (*sep == ' ') { - /* whitespace wildcard */ - if (!isspace(c)) { - sep++; - sep_start++; - ungetc(c, *fp); - } - else if (sep == sep_start) { - sep_start--; - } - } - else if (*sep != c) { - ungetc(c, *fp); - result = -2; - break; - } - else { - sep++; - } - } - return result; -} - -/* - * Change a sub-array field to the base descriptor - * and update the dimensions and strides - * appropriately. Dimensions and strides are added - * to the end. - * - * Strides are only added if given (because data is given). - */ -static int -_update_descr_and_dimensions(PyArray_Descr **des, npy_intp *newdims, - npy_intp *newstrides, int oldnd) -{ - PyArray_Descr *old; - int newnd; - int numnew; - npy_intp *mydim; - int i; - int tuple; - - old = *des; - *des = old->subarray->base; - - - mydim = newdims + oldnd; - tuple = PyTuple_Check(old->subarray->shape); - if (tuple) { - numnew = PyTuple_GET_SIZE(old->subarray->shape); - } - else { - numnew = 1; - } - - - newnd = oldnd + numnew; - if (newnd > MAX_DIMS) { - goto finish; - } - if (tuple) { - for (i = 0; i < numnew; i++) { - mydim[i] = (npy_intp) PyInt_AsLong( - PyTuple_GET_ITEM(old->subarray->shape, i)); - } - } - else { - mydim[0] = (npy_intp) PyInt_AsLong(old->subarray->shape); - } - - if (newstrides) { - npy_intp tempsize; - npy_intp *mystrides; - - mystrides = newstrides + oldnd; - /* Make new strides -- alwasy C-contiguous */ - tempsize = (*des)->elsize; - for (i = numnew - 1; i >= 0; i--) { - mystrides[i] = tempsize; - tempsize *= mydim[i] ? mydim[i] : 1; - } - } - - finish: - Py_INCREF(*des); - Py_DECREF(old); - return newnd; -} - -NPY_NO_EXPORT void -_unaligned_strided_byte_copy(char *dst, npy_intp outstrides, char *src, - npy_intp instrides, npy_intp N, int elsize) -{ - npy_intp i; - char *tout = dst; - char *tin = src; - -#define _COPY_N_SIZE(size) \ - for(i=0; i 0; n--, a += stride - 1) { - b = a + 3; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a = *b; *b = c; - } - break; - case 8: - for (a = (char*)p; n > 0; n--, a += stride - 3) { - b = a + 7; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a = *b; *b = c; - } - break; - case 2: - for (a = (char*)p; n > 0; n--, a += stride) { - b = a + 1; - c = *a; *a = *b; *b = c; - } - break; - default: - m = size/2; - for (a = (char *)p; n > 0; n--, a += stride - m) { - b = a + (size - 1); - for (j = 0; j < m; j++) { - c=*a; *a++ = *b; *b-- = c; - } - } - break; - } -} - -NPY_NO_EXPORT void -byte_swap_vector(void *p, npy_intp n, int size) -{ - _strided_byte_swap(p, (npy_intp) size, n, size); - return; -} - -/* If numitems > 1, then dst must be contiguous */ -NPY_NO_EXPORT void -copy_and_swap(void *dst, void *src, int itemsize, npy_intp numitems, - npy_intp srcstrides, int swap) -{ - npy_intp i; - char *s1 = (char *)src; - char *d1 = (char *)dst; - - - if ((numitems == 1) || (itemsize == srcstrides)) { - memcpy(d1, s1, itemsize*numitems); - } - else { - for (i = 0; i < numitems; i++) { - memcpy(d1, s1, itemsize); - d1 += itemsize; - s1 += srcstrides; - } - } - - if (swap) { - byte_swap_vector(d1, numitems, itemsize); - } -} - -/* Gets a half-open range [start, end) which contains the array data */ -NPY_NO_EXPORT void -_get_array_memory_extents(PyArrayObject *arr, - npy_uintp *out_start, npy_uintp *out_end) -{ - npy_uintp start, end; - npy_intp idim, ndim = PyArray_NDIM(arr); - npy_intp *dimensions = PyArray_DIMS(arr), - *strides = PyArray_STRIDES(arr); - - /* Calculate with a closed range [start, end] */ - start = end = (npy_uintp)PyArray_DATA(arr); - for (idim = 0; idim < ndim; ++idim) { - npy_intp stride = strides[idim], dim = dimensions[idim]; - /* If the array size is zero, return an empty range */ - if (dim == 0) { - *out_start = *out_end = (npy_uintp)PyArray_DATA(arr); - return; - } - /* Expand either upwards or downwards depending on stride */ - else { - if (stride > 0) { - end += stride*(dim-1); - } - else if (stride < 0) { - start += stride*(dim-1); - } - } - } - - /* Return a half-open range */ - *out_start = start; - *out_end = end + arr->descr->elsize; -} - -/* Returns 1 if the arrays have overlapping data, 0 otherwise */ -NPY_NO_EXPORT int -_arrays_overlap(PyArrayObject *arr1, PyArrayObject *arr2) -{ - npy_uintp start1 = 0, start2 = 0, end1 = 0, end2 = 0; - - _get_array_memory_extents(arr1, &start1, &end1); - _get_array_memory_extents(arr2, &start2, &end2); - - return (start1 < end2) && (start2 < end1); -} - -/*NUMPY_API - * Move the memory of one array into another, allowing for overlapping data. - * - * This is in general a difficult problem to solve efficiently, because - * strides can be negative. Consider "a = np.arange(3); a[::-1] = a", which - * previously produced the incorrect [0, 1, 0]. - * - * Instead of trying to be fancy, we simply check for overlap and make - * a temporary copy when one exists. - * - * Returns 0 on success, negative on failure. - */ -NPY_NO_EXPORT int -PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src) -{ - /* - * Performance fix for expresions like "a[1000:6000] += x". In this - * case, first an in-place add is done, followed by an assignment, - * equivalently expressed like this: - * - * tmp = a[1000:6000] # Calls array_subscript_nice in mapping.c - * np.add(tmp, x, tmp) - * a[1000:6000] = tmp # Calls array_ass_sub in mapping.c - * - * In the assignment the underlying data type, shape, strides, and - * data pointers are identical, but src != dst because they are separately - * generated slices. By detecting this and skipping the redundant - * copy of values to themselves, we potentially give a big speed boost. - * - * Note that we don't call EquivTypes, because usually the exact same - * dtype object will appear, and we don't want to slow things down - * with a complicated comparison. The comparisons are ordered to - * try and reject this with as little work as possible. - */ - if (PyArray_DATA(src) == PyArray_DATA(dst) && - PyArray_DESCR(src) == PyArray_DESCR(dst) && - PyArray_NDIM(src) == PyArray_NDIM(dst) && - PyArray_CompareLists(PyArray_DIMS(src), - PyArray_DIMS(dst), - PyArray_NDIM(src)) && - PyArray_CompareLists(PyArray_STRIDES(src), - PyArray_STRIDES(dst), - PyArray_NDIM(src))) { - /*printf("Redundant copy operation detected\n");*/ - return 0; - } - - /* - * A special case is when there is just one dimension with positive - * strides, and we pass that to CopyInto, which correctly handles - * it for most cases. It may still incorrectly handle copying of - * partially-overlapping data elements, where the data pointer was offset - * by a fraction of the element size. - */ - if ((PyArray_NDIM(dst) == 1 && - PyArray_NDIM(src) == 1 && - PyArray_STRIDE(dst, 0) > 0 && - PyArray_STRIDE(src, 0) > 0) || - !_arrays_overlap(dst, src)) { - return PyArray_CopyInto(dst, src); - } - else { - PyArrayObject *tmp; - int ret; - - /* - * Allocate a temporary copy array. - */ - tmp = (PyArrayObject *)PyArray_NewLikeArray(dst, - NPY_KEEPORDER, NULL, 0); - if (tmp == NULL) { - return -1; - } - ret = PyArray_CopyInto(tmp, src); - if (ret == 0) { - ret = PyArray_CopyInto(dst, tmp); - } - Py_DECREF(tmp); - return ret; - } -} - - - -/* adapted from Numarray */ -static int -setArrayFromSequence(PyArrayObject *a, PyObject *s, int dim, npy_intp offset) -{ - Py_ssize_t i, slen; - int res = -1; - - /* - * This code is to ensure that the sequence access below will - * return a lower-dimensional sequence. - */ - - /* INCREF on entry DECREF on exit */ - Py_INCREF(s); - - if (PyArray_Check(s) && !(PyArray_CheckExact(s))) { - /* - * FIXME: This could probably copy the entire subarray at once here using - * a faster algorithm. Right now, just make sure a base-class array is - * used so that the dimensionality reduction assumption is correct. - */ - /* This will DECREF(s) if replaced */ - s = PyArray_EnsureArray(s); - if (s == NULL) { - goto fail; - } - } - - if (dim > a->nd) { - PyErr_Format(PyExc_ValueError, - "setArrayFromSequence: sequence/array dimensions mismatch."); - goto fail; - } - - slen = PySequence_Length(s); - if (slen < 0) { - goto fail; - } - /* - * Either the dimensions match, or the sequence has length 1 and can - * be broadcast to the destination. - */ - if (slen != a->dimensions[dim] && slen != 1) { - PyErr_Format(PyExc_ValueError, - "cannot copy sequence with size %d to array axis " - "with dimension %d", (int)slen, (int)a->dimensions[dim]); - goto fail; - } - - /* Broadcast the one element from the sequence to all the outputs */ - if (slen == 1) { - PyObject *o; - npy_intp alen = a->dimensions[dim]; - - o = PySequence_GetItem(s, 0); - if (o == NULL) { - goto fail; - } - for (i = 0; i < alen; i++) { - if ((a->nd - dim) > 1) { - res = setArrayFromSequence(a, o, dim+1, offset); - } - else { - res = a->descr->f->setitem(o, (a->data + offset), a); - } - if (res < 0) { - Py_DECREF(o); - goto fail; - } - offset += a->strides[dim]; - } - Py_DECREF(o); - } - /* Copy element by element */ - else { - for (i = 0; i < slen; i++) { - PyObject *o = PySequence_GetItem(s, i); - if (o == NULL) { - goto fail; - } - if ((a->nd - dim) > 1) { - res = setArrayFromSequence(a, o, dim+1, offset); - } - else { - res = a->descr->f->setitem(o, (a->data + offset), a); - } - Py_DECREF(o); - if (res < 0) { - goto fail; - } - offset += a->strides[dim]; - } - } - - Py_DECREF(s); - return 0; - - fail: - Py_DECREF(s); - return res; -} - -NPY_NO_EXPORT int -PyArray_AssignFromSequence(PyArrayObject *self, PyObject *v) -{ - if (!PySequence_Check(v)) { - PyErr_SetString(PyExc_ValueError, - "assignment from non-sequence"); - return -1; - } - if (self->nd == 0) { - PyErr_SetString(PyExc_ValueError, - "assignment to 0-d array"); - return -1; - } - return setArrayFromSequence(self, v, 0, 0); -} - -/* - * The rest of this code is to build the right kind of array - * from a python object. - */ - -static int -discover_itemsize(PyObject *s, int nd, int *itemsize) -{ - int n, r, i; - - if (PyArray_Check(s)) { - *itemsize = MAX(*itemsize, PyArray_ITEMSIZE(s)); - return 0; - } - - if ((nd == 0) || PyString_Check(s) || -#if defined(NPY_PY3K) - PyMemoryView_Check(s) || -#else - PyBuffer_Check(s) || -#endif - PyUnicode_Check(s)) { - - /* If an object has no length, leave it be */ - n = PyObject_Length(s); - if (n == -1) { - PyErr_Clear(); - } - else { - *itemsize = MAX(*itemsize, n); - } - return 0; - } - - n = PySequence_Length(s); - for (i = 0; i < n; i++) { - PyObject *e = PySequence_GetItem(s,i); - - if (e == NULL) { - return -1; - } - - r = discover_itemsize(e,nd-1,itemsize); - Py_DECREF(e); - if (r == -1) { - return -1; - } - } - - return 0; -} - -/* - * Take an arbitrary object and discover how many dimensions it - * has, filling in the dimensions as we go. - */ -static int -discover_dimensions(PyObject *s, int *maxndim, npy_intp *d, int check_it, - int stop_at_string, int stop_at_tuple, - int *out_is_object) -{ - PyObject *e; - int r, n, i; -#if PY_VERSION_HEX >= 0x02060000 - Py_buffer buffer_view; -#endif - - if (*maxndim == 0) { - return 0; - } - - /* s is an Array */ - if (PyArray_Check(s)) { - if (PyArray_NDIM(s) < *maxndim) { - *maxndim = PyArray_NDIM(s); - } - - for (i=0; i<*maxndim; i++) { - d[i] = PyArray_DIM(s,i); - } - return 0; - } - - /* s is a Scalar */ - if (PyArray_IsScalar(s, Generic)) { - *maxndim = 0; - return 0; - } - - /* s is not a Sequence */ - if (!PySequence_Check(s) || -#if defined(NPY_PY3K) - /* FIXME: XXX -- what is the correct thing to do here? */ -#else - PyInstance_Check(s) || -#endif - PySequence_Length(s) < 0) { - *maxndim = 0; - PyErr_Clear(); - return 0; - } - - /* s is a String */ - if (PyString_Check(s) || -#if defined(NPY_PY3K) -#else - PyBuffer_Check(s) || -#endif - PyUnicode_Check(s)) { - if (stop_at_string) { - *maxndim = 0; - } - else { - d[0] = PySequence_Length(s); - *maxndim = 1; - } - return 0; - } - - /* s is a Tuple, but tuples aren't expanded */ - if (stop_at_tuple && PyTuple_Check(s)) { - *maxndim = 0; - return 0; - } - - /* s is a PEP 3118 buffer */ -#if PY_VERSION_HEX >= 0x02060000 - /* PEP 3118 buffer interface */ - memset(&buffer_view, 0, sizeof(Py_buffer)); - if (PyObject_GetBuffer(s, &buffer_view, PyBUF_STRIDES) == 0 || - PyObject_GetBuffer(s, &buffer_view, PyBUF_ND) == 0) { - int nd = buffer_view.ndim; - if (nd < *maxndim) { - *maxndim = nd; - } - for (i=0; i<*maxndim; i++) { - d[i] = buffer_view.shape[i]; - } - PyBuffer_Release(&buffer_view); - return 0; - } - else if (PyObject_GetBuffer(s, &buffer_view, PyBUF_SIMPLE) == 0) { - d[0] = buffer_view.len; - *maxndim = 1; - PyBuffer_Release(&buffer_view); - return 0; - } - else { - PyErr_Clear(); - } -#endif - - /* s has the __array_struct__ interface */ - if ((e = PyObject_GetAttrString(s, "__array_struct__")) != NULL) { - int nd = -1; - if (NpyCapsule_Check(e)) { - PyArrayInterface *inter; - inter = (PyArrayInterface *)NpyCapsule_AsVoidPtr(e); - if (inter->two == 2) { - nd = inter->nd; - if (nd >= 0) { - if (nd < *maxndim) { - *maxndim = nd; - } - for (i=0; i<*maxndim; i++) { - d[i] = inter->shape[i]; - } - } - } - } - Py_DECREF(e); - if (nd >= 0) { - return 0; - } - } - else { - PyErr_Clear(); - } - - /* s has the __array_interface__ interface */ - if ((e = PyObject_GetAttrString(s, "__array_interface__")) != NULL) { - int nd = -1; - if (PyDict_Check(e)) { - PyObject *new; - new = PyDict_GetItemString(e, "shape"); - if (new && PyTuple_Check(new)) { - nd = PyTuple_GET_SIZE(new); - if (nd < *maxndim) { - *maxndim = nd; - } - for (i=0; i<*maxndim; i++) { -#if (PY_VERSION_HEX >= 0x02050000) - d[i] = PyInt_AsSsize_t(PyTuple_GET_ITEM(new, i)); -#else - d[i] = PyInt_AsLong(PyTuple_GET_ITEM(new, i)); -#endif - if (d[i] < 0) { - PyErr_SetString(PyExc_RuntimeError, - "Invalid shape in __array_interface__"); - Py_DECREF(e); - return -1; - } - } - } - } - Py_DECREF(e); - if (nd >= 0) { - return 0; - } - } - else { - PyErr_Clear(); - } - - n = PySequence_Size(s); - - if (n < 0) { - return -1; - } - - d[0] = n; - - /* 1-dimensional sequence */ - if (n == 0 || *maxndim == 1) { - *maxndim = 1; - return 0; - } - else { - npy_intp dtmp[NPY_MAXDIMS]; - int j, maxndim_m1 = *maxndim - 1; - - if ((e = PySequence_GetItem(s, 0)) == NULL) { - /* - * PySequence_Check detects whether an old type object is a - * sequence by the presence of the __getitem__ attribute, and - * for new type objects that aren't dictionaries by the - * presence of the __len__ attribute as well. In either case it - * is possible to have an object that tests as a sequence but - * doesn't behave as a sequence and consequently, the - * PySequence_GetItem call can fail. When that happens and the - * object looks like a dictionary, we truncate the dimensions - * and set the object creation flag, otherwise we pass the - * error back up the call chain. - */ - if (PyErr_ExceptionMatches(PyExc_KeyError)) { - PyErr_Clear(); - *maxndim = 0; - *out_is_object = 1; - return 0; - } - else { - return -1; - } - } - r = discover_dimensions(e, &maxndim_m1, d + 1, check_it, - stop_at_string, stop_at_tuple, - out_is_object); - Py_DECREF(e); - if (r < 0) { - return r; - } - - /* For the dimension truncation check below */ - *maxndim = maxndim_m1 + 1; - for (i = 1; i < n; ++i) { - /* Get the dimensions of the first item */ - if ((e = PySequence_GetItem(s, i)) == NULL) { - /* see comment above */ - if (PyErr_ExceptionMatches(PyExc_KeyError)) { - PyErr_Clear(); - *maxndim = 0; - *out_is_object = 1; - return 0; - } - else { - return -1; - } - } - r = discover_dimensions(e, &maxndim_m1, dtmp, check_it, - stop_at_string, stop_at_tuple, - out_is_object); - Py_DECREF(e); - if (r < 0) { - return r; - } - - /* Reduce max_ndim_m1 to just items which match */ - for (j = 0; j < maxndim_m1; ++j) { - if (dtmp[j] != d[j+1]) { - maxndim_m1 = j; - break; - } - } - } - /* - * If the dimensions are truncated, need to produce - * an object array. - */ - if (maxndim_m1 + 1 < *maxndim) { - *out_is_object = 1; - *maxndim = maxndim_m1 + 1; - } - } - - return 0; -} - -/*NUMPY_API - * Generic new array creation routine. - * - * steals a reference to descr (even on failure) - */ -NPY_NO_EXPORT PyObject * -PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd, - npy_intp *dims, npy_intp *strides, void *data, - int flags, PyObject *obj) -{ - PyArrayObject *self; - int i; - size_t sd; - npy_intp largest; - npy_intp size; - - if (descr->subarray) { - PyObject *ret; - npy_intp newdims[2*NPY_MAXDIMS]; - npy_intp *newstrides = NULL; - memcpy(newdims, dims, nd*sizeof(npy_intp)); - if (strides) { - newstrides = newdims + NPY_MAXDIMS; - memcpy(newstrides, strides, nd*sizeof(npy_intp)); - } - nd =_update_descr_and_dimensions(&descr, newdims, - newstrides, nd); - ret = PyArray_NewFromDescr(subtype, descr, nd, newdims, - newstrides, - data, flags, obj); - return ret; - } - - if ((unsigned int)nd > (unsigned int)NPY_MAXDIMS) { - PyErr_Format(PyExc_ValueError, - "number of dimensions must be within [0, %d]", - NPY_MAXDIMS); - Py_DECREF(descr); - return NULL; - } - - /* Check dimensions */ - size = 1; - sd = (size_t) descr->elsize; - if (sd == 0) { - if (!PyDataType_ISSTRING(descr)) { - PyErr_SetString(PyExc_TypeError, "Empty data-type"); - Py_DECREF(descr); - return NULL; - } - PyArray_DESCR_REPLACE(descr); - if (descr->type_num == NPY_STRING) { - sd = descr->elsize = 1; - } - else { - sd = descr->elsize = sizeof(PyArray_UCS4); - } - } - - largest = NPY_MAX_INTP / sd; - for (i = 0; i < nd; i++) { - npy_intp dim = dims[i]; - - if (dim == 0) { - /* - * Compare to PyArray_OverflowMultiplyList that - * returns 0 in this case. - */ - continue; - } - - if (dim < 0) { - PyErr_SetString(PyExc_ValueError, - "negative dimensions " \ - "are not allowed"); - Py_DECREF(descr); - return NULL; - } - - if (dim > largest) { - PyErr_SetString(PyExc_ValueError, - "array is too big."); - Py_DECREF(descr); - return NULL; - } - size *= dim; - largest /= dim; - } - - self = (PyArrayObject *) subtype->tp_alloc(subtype, 0); - if (self == NULL) { - Py_DECREF(descr); - return NULL; - } - self->nd = nd; - self->dimensions = NULL; - self->data = NULL; - if (data == NULL) { - self->flags = DEFAULT; - if (flags) { - self->flags |= NPY_F_CONTIGUOUS; - if (nd > 1) { - self->flags &= ~NPY_C_CONTIGUOUS; - } - flags = NPY_F_CONTIGUOUS; - } - } - else { - self->flags = (flags & ~NPY_UPDATEIFCOPY); - } - self->descr = descr; - self->base = (PyObject *)NULL; - self->weakreflist = (PyObject *)NULL; - - if (nd > 0) { - self->dimensions = PyDimMem_NEW(2*nd); - if (self->dimensions == NULL) { - PyErr_NoMemory(); - goto fail; - } - self->strides = self->dimensions + nd; - memcpy(self->dimensions, dims, sizeof(npy_intp)*nd); - if (strides == NULL) { /* fill it in */ - sd = _array_fill_strides(self->strides, dims, nd, sd, - flags, &(self->flags)); - } - else { - /* - * we allow strides even when we create - * the memory, but be careful with this... - */ - memcpy(self->strides, strides, sizeof(npy_intp)*nd); - sd *= size; - } - } - else { - self->dimensions = self->strides = NULL; - self->flags |= NPY_F_CONTIGUOUS; - } - - if (data == NULL) { - /* - * Allocate something even for zero-space arrays - * e.g. shape=(0,) -- otherwise buffer exposure - * (a.data) doesn't work as it should. - */ - - if (sd == 0) { - sd = descr->elsize; - } - data = PyDataMem_NEW(sd); - if (data == NULL) { - PyErr_NoMemory(); - goto fail; - } - self->flags |= OWNDATA; - - /* - * It is bad to have unitialized OBJECT pointers - * which could also be sub-fields of a VOID array - */ - if (PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { - memset(data, 0, sd); - } - } - else { - /* - * If data is passed in, this object won't own it by default. - * Caller must arrange for this to be reset if truly desired - */ - self->flags &= ~OWNDATA; - } - self->data = data; - - /* - * If the strides were provided to the function, need to - * update the flags to get the right CONTIGUOUS, ALIGN properties - */ - if (strides != NULL) { - PyArray_UpdateFlags(self, UPDATE_ALL); - } - - /* - * call the __array_finalize__ - * method if a subtype. - * If obj is NULL, then call method with Py_None - */ - if ((subtype != &PyArray_Type)) { - PyObject *res, *func, *args; - - func = PyObject_GetAttrString((PyObject *)self, "__array_finalize__"); - if (func && func != Py_None) { - if (NpyCapsule_Check(func)) { - /* A C-function is stored here */ - PyArray_FinalizeFunc *cfunc; - cfunc = NpyCapsule_AsVoidPtr(func); - Py_DECREF(func); - if (cfunc(self, obj) < 0) { - goto fail; - } - } - else { - args = PyTuple_New(1); - if (obj == NULL) { - obj=Py_None; - } - Py_INCREF(obj); - PyTuple_SET_ITEM(args, 0, obj); - res = PyObject_Call(func, args, NULL); - Py_DECREF(args); - Py_DECREF(func); - if (res == NULL) { - goto fail; - } - else { - Py_DECREF(res); - } - } - } - else Py_XDECREF(func); - } - return (PyObject *)self; - - fail: - Py_DECREF(self); - return NULL; -} - -/*NUMPY_API - * Creates a new array with the same shape as the provided one, - * with possible memory layout order and data type changes. - * - * prototype - The array the new one should be like. - * order - NPY_CORDER - C-contiguous result. - * NPY_FORTRANORDER - Fortran-contiguous result. - * NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise. - * NPY_KEEPORDER - Keeps the axis ordering of prototype. - * dtype - If not NULL, overrides the data type of the result. - * subok - If 1, use the prototype's array subtype, otherwise - * always create a base-class array. - * - * NOTE: If dtype is not NULL, steals the dtype reference. - */ -NPY_NO_EXPORT PyObject * -PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order, - PyArray_Descr *dtype, int subok) -{ - PyObject *ret = NULL; - int ndim = PyArray_NDIM(prototype); - - /* If no override data type, use the one from the prototype */ - if (dtype == NULL) { - dtype = PyArray_DESCR(prototype); - Py_INCREF(dtype); - } - - /* Handle ANYORDER and simple KEEPORDER cases */ - switch (order) { - case NPY_ANYORDER: - order = PyArray_ISFORTRAN(prototype) ? - NPY_FORTRANORDER : NPY_CORDER; - break; - case NPY_KEEPORDER: - if (PyArray_IS_C_CONTIGUOUS(prototype) || ndim <= 1) { - order = NPY_CORDER; - break; - } - else if (PyArray_IS_F_CONTIGUOUS(prototype)) { - order = NPY_FORTRANORDER; - break; - } - break; - default: - break; - } - - /* If it's not KEEPORDER, this is simple */ - if (order != NPY_KEEPORDER) { - ret = PyArray_NewFromDescr(subok ? Py_TYPE(prototype) : &PyArray_Type, - dtype, - ndim, - PyArray_DIMS(prototype), - NULL, - NULL, - order, - subok ? (PyObject *)prototype : NULL); - } - /* KEEPORDER needs some analysis of the strides */ - else { - npy_intp strides[NPY_MAXDIMS], stride; - npy_intp *shape = PyArray_DIMS(prototype); - _npy_stride_sort_item strideperm[NPY_MAXDIMS]; - int i; - - PyArray_CreateSortedStridePerm(prototype, strideperm); - - /* Build the new strides */ - stride = dtype->elsize; - for (i = ndim-1; i >= 0; --i) { - npy_intp i_perm = strideperm[i].perm; - strides[i_perm] = stride; - stride *= shape[i_perm]; - } - - /* Finally, allocate the array */ - ret = PyArray_NewFromDescr( subok ? Py_TYPE(prototype) : &PyArray_Type, - dtype, - ndim, - shape, - strides, - NULL, - 0, - subok ? (PyObject *)prototype : NULL); - } - - return ret; -} - -/*NUMPY_API - * Generic new array creation routine. - */ -NPY_NO_EXPORT PyObject * -PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int type_num, - npy_intp *strides, void *data, int itemsize, int flags, - PyObject *obj) -{ - PyArray_Descr *descr; - PyObject *new; - - descr = PyArray_DescrFromType(type_num); - if (descr == NULL) { - return NULL; - } - if (descr->elsize == 0) { - if (itemsize < 1) { - PyErr_SetString(PyExc_ValueError, - "data type must provide an itemsize"); - Py_DECREF(descr); - return NULL; - } - PyArray_DESCR_REPLACE(descr); - descr->elsize = itemsize; - } - new = PyArray_NewFromDescr(subtype, descr, nd, dims, strides, - data, flags, obj); - return new; -} - - -NPY_NO_EXPORT int -_array_from_buffer_3118(PyObject *obj, PyObject **out) -{ -#if PY_VERSION_HEX >= 0x02060000 - /* PEP 3118 */ - PyObject *memoryview; - Py_buffer *view; - PyArray_Descr *descr = NULL; - PyObject *r; - int nd, flags, k; - Py_ssize_t d; - npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS]; - - memoryview = PyMemoryView_FromObject(obj); - if (memoryview == NULL) { - PyErr_Clear(); - return -1; - } - - view = PyMemoryView_GET_BUFFER(memoryview); - if (view->format != NULL) { - descr = _descriptor_from_pep3118_format(view->format); - if (descr == NULL) { - PyObject *msg; - msg = PyBytes_FromFormat("Invalid PEP 3118 format string: '%s'", - view->format); - PyErr_WarnEx(PyExc_RuntimeWarning, PyBytes_AS_STRING(msg), 0); - Py_DECREF(msg); - goto fail; - } - - /* Sanity check */ - if (descr->elsize != view->itemsize) { - PyErr_WarnEx(PyExc_RuntimeWarning, - "Item size computed from the PEP 3118 buffer format " - "string does not match the actual item size.", - 0); - goto fail; - } - } - else { - descr = PyArray_DescrNewFromType(PyArray_STRING); - descr->elsize = view->itemsize; - } - - if (view->shape != NULL) { - nd = view->ndim; - if (nd >= NPY_MAXDIMS || nd < 0) { - goto fail; - } - for (k = 0; k < nd; ++k) { - if (k >= NPY_MAXDIMS) { - goto fail; - } - shape[k] = view->shape[k]; - } - if (view->strides != NULL) { - for (k = 0; k < nd; ++k) { - strides[k] = view->strides[k]; - } - } - else { - d = view->len; - for (k = 0; k < nd; ++k) { - d /= view->shape[k]; - strides[k] = d; - } - } - } - else { - nd = 1; - shape[0] = view->len / view->itemsize; - strides[0] = view->itemsize; - } - - flags = BEHAVED & (view->readonly ? ~NPY_WRITEABLE : ~0); - r = PyArray_NewFromDescr(&PyArray_Type, descr, - nd, shape, strides, view->buf, - flags, NULL); - ((PyArrayObject *)r)->base = memoryview; - PyArray_UpdateFlags((PyArrayObject *)r, UPDATE_ALL); - - *out = r; - return 0; - -fail: - Py_XDECREF(descr); - Py_DECREF(memoryview); - return -1; - -#else - return -1; -#endif -} - -/*NUMPY_API - * Retrieves the array parameters for viewing/converting an arbitrary - * PyObject* to a NumPy array. This allows the "innate type and shape" - * of Python list-of-lists to be discovered without - * actually converting to an array. - * - * In some cases, such as structured arrays and the __array__ interface, - * a data type needs to be used to make sense of the object. When - * this is needed, provide a Descr for 'requested_dtype', otherwise - * provide NULL. This reference is not stolen. Also, if the requested - * dtype doesn't modify the interpretation of the input, out_dtype will - * still get the "innate" dtype of the object, not the dtype passed - * in 'requested_dtype'. - * - * If writing to the value in 'op' is desired, set the boolean - * 'writeable' to 1. This raises an error when 'op' is a scalar, list - * of lists, or other non-writeable 'op'. - * - * Result: When success (0 return value) is returned, either out_arr - * is filled with a non-NULL PyArrayObject and - * the rest of the parameters are untouched, or out_arr is - * filled with NULL, and the rest of the parameters are - * filled. - * - * Typical usage: - * - * PyArrayObject *arr = NULL; - * PyArray_Descr *dtype = NULL; - * int ndim = 0; - * npy_intp dims[NPY_MAXDIMS]; - * - * if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype, - * &ndim, &dims, &arr, NULL) < 0) { - * return NULL; - * } - * if (arr == NULL) { - * ... validate/change dtype, validate flags, ndim, etc ... - * // Could make custom strides here too - * arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, - * dims, NULL, - * fortran ? NPY_F_CONTIGUOUS : 0, - * NULL); - * if (arr == NULL) { - * return NULL; - * } - * if (PyArray_CopyObject(arr, op) < 0) { - * Py_DECREF(arr); - * return NULL; - * } - * } - * else { - * ... in this case the other parameters weren't filled, just - * validate and possibly copy arr itself ... - * } - * ... use arr ... - */ -NPY_NO_EXPORT int -PyArray_GetArrayParamsFromObject(PyObject *op, - PyArray_Descr *requested_dtype, - npy_bool writeable, - PyArray_Descr **out_dtype, - int *out_ndim, npy_intp *out_dims, - PyArrayObject **out_arr, PyObject *context) -{ - PyObject *tmp; - - /* If op is an array */ - if (PyArray_Check(op)) { - if (writeable && !PyArray_ISWRITEABLE((PyArrayObject *)op)) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to array"); - return -1; - } - Py_INCREF(op); - *out_arr = (PyArrayObject *)op; - return 0; - } - - /* If op is a NumPy scalar */ - if (PyArray_IsScalar(op, Generic)) { - if (writeable) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to scalar"); - return -1; - } - *out_dtype = PyArray_DescrFromScalar(op); - if (*out_dtype == NULL) { - return -1; - } - *out_ndim = 0; - *out_arr = NULL; - return 0; - } - - /* If op is a Python scalar */ - *out_dtype = _array_find_python_scalar_type(op); - if (*out_dtype != NULL) { - if (writeable) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to scalar"); - Py_DECREF(*out_dtype); - return -1; - } - *out_ndim = 0; - *out_arr = NULL; - return 0; - } - - /* If op supports the PEP 3118 buffer interface */ - if (!PyBytes_Check(op) && !PyUnicode_Check(op) && - _array_from_buffer_3118(op, (PyObject **)out_arr) == 0) { - if (writeable && !PyArray_ISWRITEABLE(*out_arr)) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to PEP 3118 buffer"); - Py_DECREF(*out_arr); - return -1; - } - return (*out_arr) == NULL ? -1 : 0; - } - - /* If op supports the __array_struct__ or __array_interface__ interface */ - tmp = PyArray_FromStructInterface(op); - if (tmp == Py_NotImplemented) { - tmp = PyArray_FromInterface(op); - } - if (tmp != Py_NotImplemented) { - if (writeable && !PyArray_ISWRITEABLE(tmp)) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to array interface object"); - Py_DECREF(tmp); - return -1; - } - *out_arr = (PyArrayObject *)tmp; - return (*out_arr) == NULL ? -1 : 0; - } - - /* - * If op supplies the __array__ function. - * The documentation says this should produce a copy, so - * we skip this method if writeable is true, because the intent - * of writeable is to modify the operand. - * XXX: If the implementation is wrong, and/or if actual - * usage requires this behave differently, - * this should be changed! - */ - if (!writeable) { - tmp = PyArray_FromArrayAttr(op, requested_dtype, context); - if (tmp != Py_NotImplemented) { - if (writeable && !PyArray_ISWRITEABLE(tmp)) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to array interface object"); - Py_DECREF(tmp); - return -1; - } - *out_arr = (PyArrayObject *)tmp; - return (*out_arr) == NULL ? -1 : 0; - } - } - - /* Try to treat op as a list of lists */ - if (!writeable && PySequence_Check(op)) { - int check_it, stop_at_string, stop_at_tuple, is_object; - int type_num, type; - - /* - * Determine the type, using the requested data type if - * it will affect how the array is retrieved - */ - if (requested_dtype != NULL && ( - requested_dtype->type_num == NPY_STRING || - requested_dtype->type_num == NPY_UNICODE || - (requested_dtype->type_num == NPY_VOID && - (requested_dtype->names || requested_dtype->subarray)) || - requested_dtype->type == NPY_CHARLTR || - requested_dtype->type_num == NPY_OBJECT)) { - Py_INCREF(requested_dtype); - *out_dtype = requested_dtype; - } - else { - *out_dtype = _array_find_type(op, NULL, MAX_DIMS); - if (*out_dtype == NULL) { - if (PyErr_Occurred() && - PyErr_GivenExceptionMatches(PyErr_Occurred(), - PyExc_MemoryError)) { - return -1; - } - /* Say it's an OBJECT array if there's an error */ - PyErr_Clear(); - *out_dtype = PyArray_DescrFromType(NPY_OBJECT); - if (*out_dtype == NULL) { - return -1; - } - } - } - - type_num = (*out_dtype)->type_num; - type = (*out_dtype)->type; - - check_it = (type != NPY_CHARLTR); - stop_at_string = (type_num != NPY_STRING) || - (type == NPY_STRINGLTR); - stop_at_tuple = (type_num == NPY_VOID && - ((*out_dtype)->names || (*out_dtype)->subarray)); - - *out_ndim = NPY_MAXDIMS; - is_object = 0; - if (discover_dimensions(op, out_ndim, out_dims, check_it, - stop_at_string, stop_at_tuple, - &is_object) < 0) { - Py_DECREF(*out_dtype); - if (PyErr_Occurred()) { - return -1; - } - *out_dtype = PyArray_DescrFromType(NPY_OBJECT); - if (*out_dtype == NULL) { - return -1; - } - *out_ndim = 0; - *out_arr = NULL; - return 0; - } - /* If object arrays are forced */ - if (is_object) { - Py_DECREF(*out_dtype); - *out_dtype = PyArray_DescrFromType(NPY_OBJECT); - if (*out_dtype == NULL) { - return -1; - } - } - - if ((*out_dtype)->type == NPY_CHARLTR && (*out_ndim) > 0 && - out_dims[(*out_ndim) - 1] == 1) { - (*out_ndim) -= 1; - } - - /* If the type is flexible, determine its size */ - if ((*out_dtype)->elsize == 0 && - PyTypeNum_ISEXTENDED((*out_dtype)->type_num)) { - int itemsize = 0; - if (discover_itemsize(op, *out_ndim, &itemsize) < 0) { - Py_DECREF(*out_dtype); - if (PyErr_Occurred() && - PyErr_GivenExceptionMatches(PyErr_Occurred(), - PyExc_MemoryError)) { - return -1; - } - /* Say it's an OBJECT scalar if there's an error */ - PyErr_Clear(); - *out_dtype = PyArray_DescrFromType(NPY_OBJECT); - *out_ndim = 0; - *out_arr = NULL; - return 0; - } - if ((*out_dtype)->type_num == NPY_UNICODE) { - itemsize *= 4; - } - - if (itemsize != (*out_dtype)->elsize) { - PyArray_DESCR_REPLACE(*out_dtype); - (*out_dtype)->elsize = itemsize; - } - } - - *out_arr = NULL; - return 0; - } - - /* Anything can be viewed as an object, unless it needs to be writeable */ - if (!writeable) { - *out_dtype = PyArray_DescrFromType(NPY_OBJECT); - if (*out_dtype == NULL) { - return -1; - } - *out_ndim = 0; - *out_arr = NULL; - return 0; - } - - PyErr_SetString(PyExc_RuntimeError, - "object cannot be viewed as a writeable numpy array"); - return -1; -} - -/*NUMPY_API - * Does not check for NPY_ENSURECOPY and NPY_NOTSWAPPED in flags - * Steals a reference to newtype --- which can be NULL - */ -NPY_NO_EXPORT PyObject * -PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, - int max_depth, int flags, PyObject *context) -{ - /* - * This is the main code to make a NumPy array from a Python - * Object. It is called from many different places. - */ - PyArrayObject *arr = NULL, *ret; - PyArray_Descr *dtype = NULL; - int ndim = 0; - npy_intp dims[NPY_MAXDIMS]; - - /* Get either the array or its parameters if it isn't an array */ - if (PyArray_GetArrayParamsFromObject(op, newtype, - 0, &dtype, - &ndim, dims, &arr, context) < 0) { - Py_XDECREF(newtype); - ret = NULL; - return NULL; - } - - /* If the requested dtype is flexible, adjust its size */ - if (newtype != NULL && newtype->elsize == 0) { - PyArray_DESCR_REPLACE(newtype); - if (newtype == NULL) { - ret = NULL; - return NULL; - } - if (arr != NULL) { - dtype = PyArray_DESCR(arr); - } - - if (newtype->type_num == dtype->type_num) { - newtype->elsize = dtype->elsize; - } - else { - switch(newtype->type_num) { - case NPY_STRING: - if (dtype->type_num == NPY_UNICODE) { - newtype->elsize = dtype->elsize >> 2; - } - else { - newtype->elsize = dtype->elsize; - } - break; - case NPY_UNICODE: - newtype->elsize = dtype->elsize << 2; - break; - case NPY_VOID: - newtype->elsize = dtype->elsize; - break; - } - } - } - - /* If we got dimensions and dtype instead of an array */ - if (arr == NULL) { - if (flags&NPY_UPDATEIFCOPY) { - Py_XDECREF(newtype); - PyErr_SetString(PyExc_TypeError, - "UPDATEIFCOPY used for non-array input."); - return NULL; - } - else if (min_depth != 0 && ndim < min_depth) { - Py_DECREF(dtype); - Py_XDECREF(newtype); - PyErr_SetString(PyExc_ValueError, - "object of too small depth for desired array"); - ret = NULL; - } - else if (max_depth != 0 && ndim > max_depth) { - Py_DECREF(dtype); - Py_XDECREF(newtype); - PyErr_SetString(PyExc_ValueError, - "object too deep for desired array"); - ret = NULL; - } - else if (ndim == 0 && PyArray_IsScalar(op, Generic)) { - ret = (PyArrayObject *)PyArray_FromScalar(op, newtype); - Py_DECREF(dtype); - } - else { - if (newtype == NULL) { - newtype = dtype; - } - else { - /* - * TODO: would be nice to do this too, but it's - * a behavior change. It's also a bit tricky - * for downcasting to small integer and float - * types, and might be better to modify - * PyArray_AssignFromSequence and descr->f->setitem - * to have a 'casting' parameter and - * to check each value with scalar rules like - * in PyArray_MinScalarType. - */ - /* - if (!(flags&NPY_FORCECAST) && ndim > 0 && - !PyArray_CanCastTo(dtype, newtype)) { - Py_DECREF(dtype); - Py_XDECREF(newtype); - PyErr_SetString(PyExc_TypeError, - "object cannot be safely cast to array " - "of required type"); - return NULL; - } - */ - Py_DECREF(dtype); - } - - /* Create an array and copy the data */ - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, newtype, - ndim, dims, - NULL, NULL, - flags&NPY_F_CONTIGUOUS, NULL); - if (ret != NULL) { - if (ndim > 0) { - if (PyArray_AssignFromSequence(ret, op) < 0) { - Py_DECREF(ret); - ret = NULL; - } - } - else { - if (PyArray_DESCR(ret)->f->setitem(op, - PyArray_DATA(ret), ret) < 0) { - Py_DECREF(ret); - ret = NULL; - } - } - } - } - } - else { - if (min_depth != 0 && PyArray_NDIM(arr) < min_depth) { - PyErr_SetString(PyExc_ValueError, - "object of too small depth for desired array"); - Py_DECREF(arr); - ret = NULL; - } - else if (max_depth != 0 && PyArray_NDIM(arr) > max_depth) { - PyErr_SetString(PyExc_ValueError, - "object too deep for desired array"); - Py_DECREF(arr); - ret = NULL; - } - else { - ret = (PyArrayObject *)PyArray_FromArray(arr, newtype, flags); - Py_DECREF(arr); - } - } - - return (PyObject *)ret; -} - -/* - * flags is any of - * NPY_C_CONTIGUOUS (CONTIGUOUS), - * NPY_F_CONTIGUOUS (FORTRAN), - * NPY_ALIGNED, - * NPY_WRITEABLE, - * NPY_NOTSWAPPED, - * NPY_ENSURECOPY, - * NPY_UPDATEIFCOPY, - * NPY_FORCECAST, - * NPY_ENSUREARRAY, - * NPY_ELEMENTSTRIDES - * - * or'd (|) together - * - * Any of these flags present means that the returned array should - * guarantee that aspect of the array. Otherwise the returned array - * won't guarantee it -- it will depend on the object as to whether or - * not it has such features. - * - * Note that NPY_ENSURECOPY is enough - * to guarantee NPY_C_CONTIGUOUS, NPY_ALIGNED and NPY_WRITEABLE - * and therefore it is redundant to include those as well. - * - * NPY_BEHAVED == NPY_ALIGNED | NPY_WRITEABLE - * NPY_CARRAY = NPY_C_CONTIGUOUS | NPY_BEHAVED - * NPY_FARRAY = NPY_F_CONTIGUOUS | NPY_BEHAVED - * - * NPY_F_CONTIGUOUS can be set in the FLAGS to request a FORTRAN array. - * Fortran arrays are always behaved (aligned, - * notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). - * - * NPY_UPDATEIFCOPY flag sets this flag in the returned array if a copy is - * made and the base argument points to the (possibly) misbehaved array. - * When the new array is deallocated, the original array held in base - * is updated with the contents of the new array. - * - * NPY_FORCECAST will cause a cast to occur regardless of whether or not - * it is safe. - */ - -/*NUMPY_API - * steals a reference to descr -- accepts NULL - */ -NPY_NO_EXPORT PyObject * -PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context) -{ - PyObject *obj; - if (requires & NPY_NOTSWAPPED) { - if (!descr && PyArray_Check(op) && - !PyArray_ISNBO(PyArray_DESCR(op)->byteorder)) { - descr = PyArray_DescrNew(PyArray_DESCR(op)); - } - else if (descr && !PyArray_ISNBO(descr->byteorder)) { - PyArray_DESCR_REPLACE(descr); - } - if (descr) { - descr->byteorder = PyArray_NATIVE; - } - } - - obj = PyArray_FromAny(op, descr, min_depth, max_depth, requires, context); - if (obj == NULL) { - return NULL; - } - if ((requires & NPY_ELEMENTSTRIDES) && - !PyArray_ElementStrides(obj)) { - PyObject *new; - new = PyArray_NewCopy((PyArrayObject *)obj, NPY_ANYORDER); - Py_DECREF(obj); - obj = new; - } - return obj; -} - -/*NUMPY_API - * steals reference to newtype --- acc. NULL - */ -NPY_NO_EXPORT PyObject * -PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) -{ - - PyArrayObject *ret = NULL; - int itemsize; - int copy = 0; - int arrflags; - PyArray_Descr *oldtype; - char *msg = "cannot copy back to a read-only array"; - PyTypeObject *subtype; - - oldtype = PyArray_DESCR(arr); - subtype = Py_TYPE(arr); - if (newtype == NULL) { - newtype = oldtype; Py_INCREF(oldtype); - } - itemsize = newtype->elsize; - if (itemsize == 0) { - PyArray_DESCR_REPLACE(newtype); - if (newtype == NULL) { - return NULL; - } - newtype->elsize = oldtype->elsize; - itemsize = newtype->elsize; - } - - /* - * Can't cast unless ndim-0 array, NPY_FORCECAST is specified - * or the cast is safe. - */ - if (!(flags & NPY_FORCECAST) && !PyArray_NDIM(arr) == 0 && - !PyArray_CanCastTo(oldtype, newtype)) { - Py_DECREF(newtype); - PyErr_SetString(PyExc_TypeError, - "array cannot be safely cast " \ - "to required type"); - return NULL; - } - - /* Don't copy if sizes are compatible */ - if ((flags & NPY_ENSURECOPY) || PyArray_EquivTypes(oldtype, newtype)) { - arrflags = arr->flags; - if (arr->nd <= 1 && (flags & NPY_F_CONTIGUOUS)) { - flags |= NPY_C_CONTIGUOUS; - } - copy = (flags & NPY_ENSURECOPY) || - ((flags & NPY_C_CONTIGUOUS) && (!(arrflags & NPY_C_CONTIGUOUS))) - || ((flags & NPY_ALIGNED) && (!(arrflags & NPY_ALIGNED))) - || (arr->nd > 1 && - ((flags & NPY_F_CONTIGUOUS) && - (!(arrflags & NPY_F_CONTIGUOUS)))) - || ((flags & NPY_WRITEABLE) && (!(arrflags & NPY_WRITEABLE))); - - if (copy) { - if ((flags & NPY_UPDATEIFCOPY) && - (!PyArray_ISWRITEABLE(arr))) { - Py_DECREF(newtype); - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - if ((flags & NPY_ENSUREARRAY)) { - subtype = &PyArray_Type; - } - ret = (PyArrayObject *) - PyArray_NewFromDescr(subtype, newtype, - arr->nd, - arr->dimensions, - NULL, NULL, - flags & NPY_F_CONTIGUOUS, - (PyObject *)arr); - if (ret == NULL) { - return NULL; - } - if (PyArray_CopyInto(ret, arr) == -1) { - Py_DECREF(ret); - return NULL; - } - if (flags & NPY_UPDATEIFCOPY) { - ret->flags |= NPY_UPDATEIFCOPY; - ret->base = (PyObject *)arr; - PyArray_FLAGS(ret->base) &= ~NPY_WRITEABLE; - Py_INCREF(arr); - } - } - /* - * If no copy then just increase the reference - * count and return the input - */ - else { - Py_DECREF(newtype); - if ((flags & NPY_ENSUREARRAY) && - !PyArray_CheckExact(arr)) { - Py_INCREF(arr->descr); - ret = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, - arr->descr, - arr->nd, - arr->dimensions, - arr->strides, - arr->data, - arr->flags,NULL); - if (ret == NULL) { - return NULL; - } - ret->base = (PyObject *)arr; - } - else { - ret = arr; - } - Py_INCREF(arr); - } - } - - /* - * The desired output type is different than the input - * array type and copy was not specified - */ - else { - if ((flags & NPY_UPDATEIFCOPY) && - (!PyArray_ISWRITEABLE(arr))) { - Py_DECREF(newtype); - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - if ((flags & NPY_ENSUREARRAY)) { - subtype = &PyArray_Type; - } - ret = (PyArrayObject *) - PyArray_NewFromDescr(subtype, newtype, - arr->nd, arr->dimensions, - NULL, NULL, - flags & NPY_F_CONTIGUOUS, - (PyObject *)arr); - if (ret == NULL) { - return NULL; - } - if (PyArray_CastTo(ret, arr) < 0) { - Py_DECREF(ret); - return NULL; - } - if (flags & NPY_UPDATEIFCOPY) { - ret->flags |= NPY_UPDATEIFCOPY; - ret->base = (PyObject *)arr; - PyArray_FLAGS(ret->base) &= ~NPY_WRITEABLE; - Py_INCREF(arr); - } - } - return (PyObject *)ret; -} - -/*NUMPY_API */ -NPY_NO_EXPORT PyObject * -PyArray_FromStructInterface(PyObject *input) -{ - PyArray_Descr *thetype = NULL; - char buf[40]; - PyArrayInterface *inter; - PyObject *attr, *r; - char endian = PyArray_NATBYTE; - - attr = PyObject_GetAttrString(input, "__array_struct__"); - if (attr == NULL) { - PyErr_Clear(); - return Py_NotImplemented; - } - if (!NpyCapsule_Check(attr)) { - goto fail; - } - inter = NpyCapsule_AsVoidPtr(attr); - if (inter->two != 2) { - goto fail; - } - if ((inter->flags & NPY_NOTSWAPPED) != NPY_NOTSWAPPED) { - endian = PyArray_OPPBYTE; - inter->flags &= ~NPY_NOTSWAPPED; - } - - if (inter->flags & ARR_HAS_DESCR) { - if (PyArray_DescrConverter(inter->descr, &thetype) == PY_FAIL) { - thetype = NULL; - PyErr_Clear(); - } - } - - if (thetype == NULL) { - PyOS_snprintf(buf, sizeof(buf), - "%c%c%d", endian, inter->typekind, inter->itemsize); - if (!(thetype=_array_typedescr_fromstr(buf))) { - Py_DECREF(attr); - return NULL; - } - } - - r = PyArray_NewFromDescr(&PyArray_Type, thetype, - inter->nd, inter->shape, - inter->strides, inter->data, - inter->flags, NULL); - Py_INCREF(input); - PyArray_BASE(r) = input; - Py_DECREF(attr); - PyArray_UpdateFlags((PyArrayObject *)r, UPDATE_ALL); - return r; - - fail: - PyErr_SetString(PyExc_ValueError, "invalid __array_struct__"); - Py_DECREF(attr); - return NULL; -} - -#define PyIntOrLong_Check(obj) (PyInt_Check(obj) || PyLong_Check(obj)) - -/*NUMPY_API*/ -NPY_NO_EXPORT PyObject * -PyArray_FromInterface(PyObject *input) -{ - PyObject *attr = NULL, *item = NULL; - PyObject *tstr = NULL, *shape = NULL; - PyObject *inter = NULL; - PyObject *base = NULL; - PyArrayObject *ret; - PyArray_Descr *type=NULL; - char *data; - Py_ssize_t buffer_len; - int res, i, n; - intp dims[MAX_DIMS], strides[MAX_DIMS]; - int dataflags = BEHAVED; - - /* Get the memory from __array_data__ and __array_offset__ */ - /* Get the shape */ - /* Get the typestring -- ignore array_descr */ - /* Get the strides */ - - inter = PyObject_GetAttrString(input, "__array_interface__"); - if (inter == NULL) { - PyErr_Clear(); - return Py_NotImplemented; - } - if (!PyDict_Check(inter)) { - Py_DECREF(inter); - return Py_NotImplemented; - } - shape = PyDict_GetItemString(inter, "shape"); - if (shape == NULL) { - Py_DECREF(inter); - return Py_NotImplemented; - } - tstr = PyDict_GetItemString(inter, "typestr"); - if (tstr == NULL) { - Py_DECREF(inter); - return Py_NotImplemented; - } - - attr = PyDict_GetItemString(inter, "data"); - base = input; - if ((attr == NULL) || (attr==Py_None) || (!PyTuple_Check(attr))) { - if (attr && (attr != Py_None)) { - item = attr; - } - else { - item = input; - } - res = PyObject_AsWriteBuffer(item, (void **)&data, &buffer_len); - if (res < 0) { - PyErr_Clear(); - res = PyObject_AsReadBuffer( - item, (const void **)&data, &buffer_len); - if (res < 0) { - goto fail; - } - dataflags &= ~NPY_WRITEABLE; - } - attr = PyDict_GetItemString(inter, "offset"); - if (attr) { - longlong num = PyLong_AsLongLong(attr); - if (error_converting(num)) { - PyErr_SetString(PyExc_TypeError, - "__array_interface__ offset must be an integer"); - goto fail; - } - data += num; - } - base = item; - } - else { - PyObject *dataptr; - if (PyTuple_GET_SIZE(attr) != 2) { - PyErr_SetString(PyExc_TypeError, - "__array_interface__ data must be a 2-tuple with " - "(data pointer integer, read-only flag)"); - goto fail; - } - dataptr = PyTuple_GET_ITEM(attr, 0); - if (PyString_Check(dataptr)) { - res = sscanf(PyString_AsString(dataptr), - "%p", (void **)&data); - if (res < 1) { - PyErr_SetString(PyExc_TypeError, - "__array_interface__ data string cannot be converted"); - goto fail; - } - } - else if (PyIntOrLong_Check(dataptr)) { - data = PyLong_AsVoidPtr(dataptr); - } - else { - PyErr_SetString(PyExc_TypeError, - "first element of __array_interface__ data tuple " - "must be integer or string."); - goto fail; - } - if (PyObject_IsTrue(PyTuple_GET_ITEM(attr,1))) { - dataflags &= ~NPY_WRITEABLE; - } - } - attr = tstr; -#if defined(NPY_PY3K) - if (PyUnicode_Check(tstr)) { - /* Allow unicode type strings */ - attr = PyUnicode_AsASCIIString(tstr); - } -#endif - if (!PyBytes_Check(attr)) { - PyErr_SetString(PyExc_TypeError, "typestr must be a string"); - goto fail; - } - type = _array_typedescr_fromstr(PyString_AS_STRING(attr)); -#if defined(NPY_PY3K) - if (attr != tstr) { - Py_DECREF(attr); - } -#endif - if (type == NULL) { - goto fail; - } - attr = shape; - if (!PyTuple_Check(attr)) { - PyErr_SetString(PyExc_TypeError, - "shape must be a tuple"); - Py_DECREF(type); - goto fail; - } - n = PyTuple_GET_SIZE(attr); - for (i = 0; i < n; i++) { - item = PyTuple_GET_ITEM(attr, i); - dims[i] = PyArray_PyIntAsIntp(item); - if (error_converting(dims[i])) { - break; - } - } - - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, type, - n, dims, - NULL, data, - dataflags, NULL); - if (ret == NULL) { - return NULL; - } - Py_INCREF(base); - ret->base = base; - - attr = PyDict_GetItemString(inter, "strides"); - if (attr != NULL && attr != Py_None) { - if (!PyTuple_Check(attr)) { - PyErr_SetString(PyExc_TypeError, - "strides must be a tuple"); - Py_DECREF(ret); - return NULL; - } - if (n != PyTuple_GET_SIZE(attr)) { - PyErr_SetString(PyExc_ValueError, - "mismatch in length of strides and shape"); - Py_DECREF(ret); - return NULL; - } - for (i = 0; i < n; i++) { - item = PyTuple_GET_ITEM(attr, i); - strides[i] = PyArray_PyIntAsIntp(item); - if (error_converting(strides[i])) { - break; - } - } - if (PyErr_Occurred()) { - PyErr_Clear(); - } - memcpy(ret->strides, strides, n*sizeof(npy_intp)); - } - else PyErr_Clear(); - PyArray_UpdateFlags(ret, UPDATE_ALL); - Py_DECREF(inter); - return (PyObject *)ret; - - fail: - Py_XDECREF(inter); - return NULL; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT PyObject * -PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) -{ - PyObject *new; - PyObject *array_meth; - - array_meth = PyObject_GetAttrString(op, "__array__"); - if (array_meth == NULL) { - PyErr_Clear(); - return Py_NotImplemented; - } - if (context == NULL) { - if (typecode == NULL) { - new = PyObject_CallFunction(array_meth, NULL); - } - else { - new = PyObject_CallFunction(array_meth, "O", typecode); - } - } - else { - if (typecode == NULL) { - new = PyObject_CallFunction(array_meth, "OO", Py_None, context); - if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - new = PyObject_CallFunction(array_meth, ""); - } - } - else { - new = PyObject_CallFunction(array_meth, "OO", typecode, context); - if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - new = PyObject_CallFunction(array_meth, "O", typecode); - } - } - } - Py_DECREF(array_meth); - if (new == NULL) { - return NULL; - } - if (!PyArray_Check(new)) { - PyErr_SetString(PyExc_ValueError, - "object __array__ method not " \ - "producing an array"); - Py_DECREF(new); - return NULL; - } - return new; -} - -/*NUMPY_API -* new reference -- accepts NULL for mintype -*/ -NPY_NO_EXPORT PyArray_Descr * -PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) -{ - return _array_find_type(op, mintype, MAX_DIMS); -} - -/* These are also old calls (should use PyArray_NewFromDescr) */ - -/* They all zero-out the memory as previously done */ - -/* steals reference to descr -- and enforces native byteorder on it.*/ -/*NUMPY_API - Like FromDimsAndData but uses the Descr structure instead of typecode - as input. -*/ -NPY_NO_EXPORT PyObject * -PyArray_FromDimsAndDataAndDescr(int nd, int *d, - PyArray_Descr *descr, - char *data) -{ - PyObject *ret; - int i; - npy_intp newd[MAX_DIMS]; - char msg[] = "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr."; - - if (DEPRECATE(msg) < 0) { - return NULL; - } - if (!PyArray_ISNBO(descr->byteorder)) - descr->byteorder = '='; - for (i = 0; i < nd; i++) { - newd[i] = (npy_intp) d[i]; - } - ret = PyArray_NewFromDescr(&PyArray_Type, descr, - nd, newd, - NULL, data, - (data ? CARRAY : 0), NULL); - return ret; -} - -/*NUMPY_API - Construct an empty array from dimensions and typenum -*/ -NPY_NO_EXPORT PyObject * -PyArray_FromDims(int nd, int *d, int type) -{ - PyObject *ret; - char msg[] = "PyArray_FromDims: use PyArray_SimpleNew."; - - if (DEPRECATE(msg) < 0) { - return NULL; - } - ret = PyArray_FromDimsAndDataAndDescr(nd, d, - PyArray_DescrFromType(type), - NULL); - /* - * Old FromDims set memory to zero --- some algorithms - * relied on that. Better keep it the same. If - * Object type, then it's already been set to zero, though. - */ - if (ret && (PyArray_DESCR(ret)->type_num != PyArray_OBJECT)) { - memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); - } - return ret; -} - -/* end old calls */ - -/*NUMPY_API - * This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, ENSUREARRAY) - * that special cases Arrays and PyArray_Scalars up front - * It *steals a reference* to the object - * It also guarantees that the result is PyArray_Type - * Because it decrefs op if any conversion needs to take place - * so it can be used like PyArray_EnsureArray(some_function(...)) - */ -NPY_NO_EXPORT PyObject * -PyArray_EnsureArray(PyObject *op) -{ - PyObject *new; - - if ((op == NULL) || (PyArray_CheckExact(op))) { - new = op; - Py_XINCREF(new); - } - else if (PyArray_Check(op)) { - new = PyArray_View((PyArrayObject *)op, NULL, &PyArray_Type); - } - else if (PyArray_IsScalar(op, Generic)) { - new = PyArray_FromScalar(op, NULL); - } - else { - new = PyArray_FromAny(op, NULL, 0, 0, NPY_ENSUREARRAY, NULL); - } - Py_XDECREF(op); - return new; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT PyObject * -PyArray_EnsureAnyArray(PyObject *op) -{ - if (op && PyArray_Check(op)) { - return op; - } - return PyArray_EnsureArray(op); -} - -/* TODO: Put the order parameter in PyArray_CopyAnyInto and remove this */ -NPY_NO_EXPORT int -PyArray_CopyAnyIntoOrdered(PyArrayObject *dst, PyArrayObject *src, - NPY_ORDER order) -{ - PyArray_StridedTransferFn *stransfer = NULL; - void *transferdata = NULL; - NpyIter *dst_iter, *src_iter; - - NpyIter_IterNextFunc *dst_iternext, *src_iternext; - char **dst_dataptr, **src_dataptr; - npy_intp dst_stride, src_stride; - npy_intp *dst_countptr, *src_countptr; - - char *dst_data, *src_data; - npy_intp dst_count, src_count, count; - npy_intp src_itemsize; - npy_intp dst_size, src_size; - int needs_api; - - NPY_BEGIN_THREADS_DEF; - - if (!PyArray_ISWRITEABLE(dst)) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to array"); - return -1; - } - - /* - * If the shapes match and a particular order is forced - * for both, use the more efficient CopyInto - */ - if (order != NPY_ANYORDER && order != NPY_KEEPORDER && - PyArray_NDIM(dst) == PyArray_NDIM(src) && - PyArray_CompareLists(PyArray_DIMS(dst), PyArray_DIMS(src), - PyArray_NDIM(dst))) { - return PyArray_CopyInto(dst, src); - } - - dst_size = PyArray_SIZE(dst); - src_size = PyArray_SIZE(src); - if (dst_size != src_size) { - PyErr_SetString(PyExc_ValueError, - "arrays must have the same number of elements" - " for copy"); - return -1; - } - - /* Zero-sized arrays require nothing be done */ - if (dst_size == 0) { - return 0; - } - - - /* - * This copy is based on matching C-order traversals of src and dst. - * By using two iterators, we can find maximal sub-chunks that - * can be processed at once. - */ - dst_iter = NpyIter_New(dst, NPY_ITER_WRITEONLY| - NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_DONT_NEGATE_STRIDES| - NPY_ITER_REFS_OK, - order, - NPY_NO_CASTING, - NULL); - if (dst_iter == NULL) { - return -1; - } - src_iter = NpyIter_New(src, NPY_ITER_READONLY| - NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_DONT_NEGATE_STRIDES| - NPY_ITER_REFS_OK, - order, - NPY_NO_CASTING, - NULL); - if (src_iter == NULL) { - NpyIter_Deallocate(dst_iter); - return -1; - } - - /* Get all the values needed for the inner loop */ - dst_iternext = NpyIter_GetIterNext(dst_iter, NULL); - dst_dataptr = NpyIter_GetDataPtrArray(dst_iter); - /* Since buffering is disabled, we can cache the stride */ - dst_stride = *NpyIter_GetInnerStrideArray(dst_iter); - dst_countptr = NpyIter_GetInnerLoopSizePtr(dst_iter); - - src_iternext = NpyIter_GetIterNext(src_iter, NULL); - src_dataptr = NpyIter_GetDataPtrArray(src_iter); - /* Since buffering is disabled, we can cache the stride */ - src_stride = *NpyIter_GetInnerStrideArray(src_iter); - src_countptr = NpyIter_GetInnerLoopSizePtr(src_iter); - - if (dst_iternext == NULL || src_iternext == NULL) { - NpyIter_Deallocate(dst_iter); - NpyIter_Deallocate(src_iter); - return -1; - } - - src_itemsize = PyArray_DESCR(src)->elsize; - - needs_api = NpyIter_IterationNeedsAPI(dst_iter) || - NpyIter_IterationNeedsAPI(src_iter); - - /* - * Because buffering is disabled in the iterator, the inner loop - * strides will be the same throughout the iteration loop. Thus, - * we can pass them to this function to take advantage of - * contiguous strides, etc. - */ - if (PyArray_GetDTypeTransferFunction( - PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst), - src_stride, dst_stride, - PyArray_DESCR(src), PyArray_DESCR(dst), - 0, - &stransfer, &transferdata, - &needs_api) != NPY_SUCCEED) { - NpyIter_Deallocate(dst_iter); - NpyIter_Deallocate(src_iter); - return -1; - } - - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - dst_count = *dst_countptr; - src_count = *src_countptr; - dst_data = *dst_dataptr; - src_data = *src_dataptr; - /* - * The tests did not trigger this code, so added a new function - * ndarray.setasflat to the Python exposure in order to test it. - */ - for(;;) { - /* Transfer the biggest amount that fits both */ - count = (src_count < dst_count) ? src_count : dst_count; - stransfer(dst_data, dst_stride, - src_data, src_stride, - count, src_itemsize, transferdata); - - /* If we exhausted the dst block, refresh it */ - if (dst_count == count) { - if (!dst_iternext(dst_iter)) { - break; - } - dst_count = *dst_countptr; - dst_data = *dst_dataptr; - } - else { - dst_count -= count; - dst_data += count*dst_stride; - } - - /* If we exhausted the src block, refresh it */ - if (src_count == count) { - if (!src_iternext(src_iter)) { - break; - } - src_count = *src_countptr; - src_data = *src_dataptr; - } - else { - src_count -= count; - src_data += count*src_stride; - } - } - - if (!needs_api) { - NPY_END_THREADS; - } - - PyArray_FreeStridedTransferData(transferdata); - NpyIter_Deallocate(dst_iter); - NpyIter_Deallocate(src_iter); - - return PyErr_Occurred() ? -1 : 0; -} - -/*NUMPY_API - * Copy an Array into another array -- memory must not overlap - * Does not require src and dest to have "broadcastable" shapes - * (only the same number of elements). - * - * TODO: For NumPy 2.0, this could accept an order parameter which - * only allows NPY_CORDER and NPY_FORDER. Could also rename - * this to CopyAsFlat to make the name more intuitive. - * - * Returns 0 on success, -1 on error. - */ -NPY_NO_EXPORT int -PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src) -{ - return PyArray_CopyAnyIntoOrdered(dst, src, NPY_CORDER); -} - -/*NUMPY_API - * Copy an Array into another array -- memory must not overlap. - * Broadcast to the destination shape if necessary. - * - * Returns 0 on success, -1 on failure. - */ -NPY_NO_EXPORT int -PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src) -{ - PyArray_StridedTransferFn *stransfer = NULL; - void *transferdata = NULL; - NPY_BEGIN_THREADS_DEF; - - if (!PyArray_ISWRITEABLE(dst)) { - PyErr_SetString(PyExc_RuntimeError, - "cannot write to array"); - return -1; - } - - if (PyArray_NDIM(dst) >= PyArray_NDIM(src) && - PyArray_TRIVIALLY_ITERABLE_PAIR(dst, src)) { - char *dst_data, *src_data; - npy_intp count, dst_stride, src_stride, src_itemsize; - - int needs_api = 0; - - PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(dst, src, count, - dst_data, src_data, dst_stride, src_stride); - - /* - * Check for overlap with positive strides, and if found, - * possibly reverse the order - */ - if (dst_data > src_data && src_stride > 0 && dst_stride > 0 && - (dst_data < src_data+src_stride*count) && - (src_data < dst_data+dst_stride*count)) { - dst_data += dst_stride*(count-1); - src_data += src_stride*(count-1); - dst_stride = -dst_stride; - src_stride = -src_stride; - } - - if (PyArray_GetDTypeTransferFunction( - PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst), - src_stride, dst_stride, - PyArray_DESCR(src), PyArray_DESCR(dst), - 0, - &stransfer, &transferdata, - &needs_api) != NPY_SUCCEED) { - return -1; - } - - src_itemsize = PyArray_DESCR(src)->elsize; - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - stransfer(dst_data, dst_stride, src_data, src_stride, - count, src_itemsize, transferdata); - - if (!needs_api) { - NPY_END_THREADS; - } - - PyArray_FreeStridedTransferData(transferdata); - - return PyErr_Occurred() ? -1 : 0; - } - else { - PyArrayObject *op[2]; - npy_uint32 op_flags[2]; - NpyIter *iter; - - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp *stride; - npy_intp *countptr; - npy_intp src_itemsize; - int needs_api; - - op[0] = dst; - op[1] = src; - /* - * TODO: In NumPy 2.0, renable NPY_ITER_NO_BROADCAST. This - * was removed during NumPy 1.6 testing for compatibility - * with NumPy 1.5, as per Travis's -10 veto power. - */ - /*op_flags[0] = NPY_ITER_WRITEONLY|NPY_ITER_NO_BROADCAST;*/ - op_flags[0] = NPY_ITER_WRITEONLY; - op_flags[1] = NPY_ITER_READONLY; - - iter = NpyIter_MultiNew(2, op, - NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_REFS_OK| - NPY_ITER_ZEROSIZE_OK, - NPY_KEEPORDER, - NPY_NO_CASTING, - op_flags, - NULL); - if (iter == NULL) { - return -1; - } - - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(iter); - return -1; - } - dataptr = NpyIter_GetDataPtrArray(iter); - stride = NpyIter_GetInnerStrideArray(iter); - countptr = NpyIter_GetInnerLoopSizePtr(iter); - src_itemsize = PyArray_DESCR(src)->elsize; - - needs_api = NpyIter_IterationNeedsAPI(iter); - - /* - * Because buffering is disabled in the iterator, the inner loop - * strides will be the same throughout the iteration loop. Thus, - * we can pass them to this function to take advantage of - * contiguous strides, etc. - */ - if (PyArray_GetDTypeTransferFunction( - PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst), - stride[1], stride[0], - PyArray_DESCR(src), PyArray_DESCR(dst), - 0, - &stransfer, &transferdata, - &needs_api) != NPY_SUCCEED) { - NpyIter_Deallocate(iter); - return -1; - } - - - if (NpyIter_GetIterSize(iter) != 0) { - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - do { - stransfer(dataptr[0], stride[0], - dataptr[1], stride[1], - *countptr, src_itemsize, transferdata); - } while(iternext(iter)); - - if (!needs_api) { - NPY_END_THREADS; - } - } - - PyArray_FreeStridedTransferData(transferdata); - NpyIter_Deallocate(iter); - - return PyErr_Occurred() ? -1 : 0; - } -} - - -/*NUMPY_API - PyArray_CheckAxis - - check that axis is valid - convert 0-d arrays to 1-d arrays -*/ -NPY_NO_EXPORT PyObject * -PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags) -{ - PyObject *temp1, *temp2; - int n = arr->nd; - - if (*axis == MAX_DIMS || n == 0) { - if (n != 1) { - temp1 = PyArray_Ravel(arr,0); - if (temp1 == NULL) { - *axis = 0; - return NULL; - } - if (*axis == MAX_DIMS) { - *axis = PyArray_NDIM(temp1)-1; - } - } - else { - temp1 = (PyObject *)arr; - Py_INCREF(temp1); - *axis = 0; - } - if (!flags && *axis == 0) { - return temp1; - } - } - else { - temp1 = (PyObject *)arr; - Py_INCREF(temp1); - } - if (flags) { - temp2 = PyArray_CheckFromAny((PyObject *)temp1, NULL, - 0, 0, flags, NULL); - Py_DECREF(temp1); - if (temp2 == NULL) { - return NULL; - } - } - else { - temp2 = (PyObject *)temp1; - } - n = PyArray_NDIM(temp2); - if (*axis < 0) { - *axis += n; - } - if ((*axis < 0) || (*axis >= n)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", *axis); - Py_DECREF(temp2); - return NULL; - } - return temp2; -} - -/*NUMPY_API - * Zeros - * - * steal a reference - * accepts NULL type - */ -NPY_NO_EXPORT PyObject * -PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int fortran) -{ - PyArrayObject *ret; - - if (!type) { - type = PyArray_DescrFromType(PyArray_DEFAULT); - } - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - type, - nd, dims, - NULL, NULL, - fortran, NULL); - if (ret == NULL) { - return NULL; - } - if (_zerofill(ret) < 0) { - return NULL; - } - return (PyObject *)ret; - -} - -/*NUMPY_API - * Empty - * - * accepts NULL type - * steals referenct to type - */ -NPY_NO_EXPORT PyObject * -PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int fortran) -{ - PyArrayObject *ret; - - if (!type) type = PyArray_DescrFromType(PyArray_DEFAULT); - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - type, nd, dims, - NULL, NULL, - fortran, NULL); - if (ret == NULL) { - return NULL; - } - if (PyDataType_REFCHK(type)) { - PyArray_FillObjectArray(ret, Py_None); - if (PyErr_Occurred()) { - Py_DECREF(ret); - return NULL; - } - } - return (PyObject *)ret; -} - -/* - * Like ceil(value), but check for overflow. - * - * Return 0 on success, -1 on failure. In case of failure, set a PyExc_Overflow - * exception - */ -static int _safe_ceil_to_intp(double value, npy_intp* ret) -{ - double ivalue; - - ivalue = npy_ceil(value); - if (ivalue < NPY_MIN_INTP || ivalue > NPY_MAX_INTP) { - return -1; - } - - *ret = (npy_intp)ivalue; - return 0; -} - - -/*NUMPY_API - Arange, -*/ -NPY_NO_EXPORT PyObject * -PyArray_Arange(double start, double stop, double step, int type_num) -{ - npy_intp length; - PyObject *range; - PyArray_ArrFuncs *funcs; - PyObject *obj; - int ret; - - if (_safe_ceil_to_intp((stop - start)/step, &length)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); - } - - if (length <= 0) { - length = 0; - return PyArray_New(&PyArray_Type, 1, &length, type_num, - NULL, NULL, 0, 0, NULL); - } - range = PyArray_New(&PyArray_Type, 1, &length, type_num, - NULL, NULL, 0, 0, NULL); - if (range == NULL) { - return NULL; - } - funcs = PyArray_DESCR(range)->f; - - /* - * place start in the buffer and the next value in the second position - * if length > 2, then call the inner loop, otherwise stop - */ - obj = PyFloat_FromDouble(start); - ret = funcs->setitem(obj, PyArray_DATA(range), (PyArrayObject *)range); - Py_DECREF(obj); - if (ret < 0) { - goto fail; - } - if (length == 1) { - return range; - } - obj = PyFloat_FromDouble(start + step); - ret = funcs->setitem(obj, PyArray_BYTES(range)+PyArray_ITEMSIZE(range), - (PyArrayObject *)range); - Py_DECREF(obj); - if (ret < 0) { - goto fail; - } - if (length == 2) { - return range; - } - if (!funcs->fill) { - PyErr_SetString(PyExc_ValueError, "no fill-function for data-type."); - Py_DECREF(range); - return NULL; - } - funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range); - if (PyErr_Occurred()) { - goto fail; - } - return range; - - fail: - Py_DECREF(range); - return NULL; -} - -/* - * the formula is len = (intp) ceil((start - stop) / step); - */ -static npy_intp -_calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx) -{ - npy_intp len, tmp; - PyObject *val; - double value; - - *next = PyNumber_Subtract(stop, start); - if (!(*next)) { - if (PyTuple_Check(stop)) { - PyErr_Clear(); - PyErr_SetString(PyExc_TypeError, - "arange: scalar arguments expected "\ - "instead of a tuple."); - } - return -1; - } - val = PyNumber_TrueDivide(*next, step); - Py_DECREF(*next); - *next = NULL; - if (!val) { - return -1; - } - if (cmplx && PyComplex_Check(val)) { - value = PyComplex_RealAsDouble(val); - if (error_converting(value)) { - Py_DECREF(val); - return -1; - } - if (_safe_ceil_to_intp(value, &len)) { - Py_DECREF(val); - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); - return -1; - } - value = PyComplex_ImagAsDouble(val); - Py_DECREF(val); - if (error_converting(value)) { - return -1; - } - if (_safe_ceil_to_intp(value, &tmp)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); - return -1; - } - len = MIN(len, tmp); - } - else { - value = PyFloat_AsDouble(val); - Py_DECREF(val); - if (error_converting(value)) { - return -1; - } - if (_safe_ceil_to_intp(value, &len)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); - return -1; - } - } - if (len > 0) { - *next = PyNumber_Add(start, step); - if (!next) { - return -1; - } - } - return len; -} - -/*NUMPY_API - * - * ArangeObj, - * - * this doesn't change the references - */ -NPY_NO_EXPORT PyObject * -PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr *dtype) -{ - PyObject *range; - PyArray_ArrFuncs *funcs; - PyObject *next, *err; - npy_intp length; - PyArray_Descr *native = NULL; - int swap; - - if (!dtype) { - PyArray_Descr *deftype; - PyArray_Descr *newtype; - - /* intentionally made to be at least NPY_LONG */ - deftype = PyArray_DescrFromType(NPY_LONG); - newtype = PyArray_DescrFromObject(start, deftype); - Py_DECREF(deftype); - if (newtype == NULL) { - return NULL; - } - deftype = newtype; - if (stop && stop != Py_None) { - newtype = PyArray_DescrFromObject(stop, deftype); - Py_DECREF(deftype); - if (newtype == NULL) { - return NULL; - } - deftype = newtype; - } - if (step && step != Py_None) { - newtype = PyArray_DescrFromObject(step, deftype); - Py_DECREF(deftype); - if (newtype == NULL) { - return NULL; - } - deftype = newtype; - } - dtype = deftype; - } - else { - Py_INCREF(dtype); - } - if (!step || step == Py_None) { - step = PyInt_FromLong(1); - } - else { - Py_XINCREF(step); - } - if (!stop || stop == Py_None) { - stop = start; - start = PyInt_FromLong(0); - } - else { - Py_INCREF(start); - } - /* calculate the length and next = start + step*/ - length = _calc_length(start, stop, step, &next, - PyTypeNum_ISCOMPLEX(dtype->type_num)); - err = PyErr_Occurred(); - if (err) { - Py_DECREF(dtype); - if (err && PyErr_GivenExceptionMatches(err, PyExc_OverflowError)) { - PyErr_SetString(PyExc_ValueError, "Maximum allowed size exceeded"); - } - goto fail; - } - if (length <= 0) { - length = 0; - range = PyArray_SimpleNewFromDescr(1, &length, dtype); - Py_DECREF(step); - Py_DECREF(start); - return range; - } - - /* - * If dtype is not in native byte-order then get native-byte - * order version. And then swap on the way out. - */ - if (!PyArray_ISNBO(dtype->byteorder)) { - native = PyArray_DescrNewByteorder(dtype, PyArray_NATBYTE); - swap = 1; - } - else { - native = dtype; - swap = 0; - } - - range = PyArray_SimpleNewFromDescr(1, &length, native); - if (range == NULL) { - goto fail; - } - - /* - * place start in the buffer and the next value in the second position - * if length > 2, then call the inner loop, otherwise stop - */ - funcs = PyArray_DESCR(range)->f; - if (funcs->setitem( - start, PyArray_DATA(range), (PyArrayObject *)range) < 0) { - goto fail; - } - if (length == 1) { - goto finish; - } - if (funcs->setitem(next, PyArray_BYTES(range)+PyArray_ITEMSIZE(range), - (PyArrayObject *)range) < 0) { - goto fail; - } - if (length == 2) { - goto finish; - } - if (!funcs->fill) { - PyErr_SetString(PyExc_ValueError, "no fill-function for data-type."); - Py_DECREF(range); - goto fail; - } - funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range); - if (PyErr_Occurred()) { - goto fail; - } - finish: - if (swap) { - PyObject *new; - new = PyArray_Byteswap((PyArrayObject *)range, 1); - Py_DECREF(new); - Py_DECREF(PyArray_DESCR(range)); - PyArray_DESCR(range) = dtype; /* steals the reference */ - } - Py_DECREF(start); - Py_DECREF(step); - Py_DECREF(next); - return range; - - fail: - Py_DECREF(start); - Py_DECREF(step); - Py_XDECREF(next); - return NULL; -} - -static PyArrayObject * -array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nread) -{ - PyArrayObject *r; - npy_intp start, numbytes; - - if (num < 0) { - int fail = 0; - -#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) - /* Workaround Win64 fwrite() bug. Ticket #1660 */ - start = (npy_intp )_ftelli64(fp); - if (start < 0) { - fail = 1; - } - if (_fseeki64(fp, 0, SEEK_END) < 0) { - fail = 1; - } - numbytes = (npy_intp) _ftelli64(fp); - if (numbytes < 0) { - fail = 1; - } - numbytes -= start; - if (_fseeki64(fp, start, SEEK_SET) < 0) { - fail = 1; - } -#else - start = (npy_intp)ftell(fp); - if (start < 0) { - fail = 1; - } - if (fseek(fp, 0, SEEK_END) < 0) { - fail = 1; - } - numbytes = (npy_intp) ftell(fp); - if (numbytes < 0) { - fail = 1; - } - numbytes -= start; - if (fseek(fp, start, SEEK_SET) < 0) { - fail = 1; - } -#endif - if (fail) { - PyErr_SetString(PyExc_IOError, - "could not seek in file"); - Py_DECREF(dtype); - return NULL; - } - num = numbytes / dtype->elsize; - } - r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - dtype, - 1, &num, - NULL, NULL, - 0, NULL); - if (r == NULL) { - return NULL; - } - NPY_BEGIN_ALLOW_THREADS; - *nread = fread(r->data, dtype->elsize, num, fp); - NPY_END_ALLOW_THREADS; - return r; -} - -/* - * Create an array by reading from the given stream, using the passed - * next_element and skip_separator functions. - */ -#define FROM_BUFFER_SIZE 4096 -static PyArrayObject * -array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread, - void *stream, next_element next, skip_separator skip_sep, - void *stream_data) -{ - PyArrayObject *r; - npy_intp i; - char *dptr, *clean_sep, *tmp; - int err = 0; - npy_intp thisbuf = 0; - npy_intp size; - npy_intp bytes, totalbytes; - - size = (num >= 0) ? num : FROM_BUFFER_SIZE; - r = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, - dtype, - 1, &size, - NULL, NULL, - 0, NULL); - if (r == NULL) { - return NULL; - } - clean_sep = swab_separator(sep); - NPY_BEGIN_ALLOW_THREADS; - totalbytes = bytes = size * dtype->elsize; - dptr = r->data; - for (i= 0; num < 0 || i < num; i++) { - if (next(&stream, dptr, dtype, stream_data) < 0) { - break; - } - *nread += 1; - thisbuf += 1; - dptr += dtype->elsize; - if (num < 0 && thisbuf == size) { - totalbytes += bytes; - tmp = PyDataMem_RENEW(r->data, totalbytes); - if (tmp == NULL) { - err = 1; - break; - } - r->data = tmp; - dptr = tmp + (totalbytes - bytes); - thisbuf = 0; - } - if (skip_sep(&stream, clean_sep, stream_data) < 0) { - break; - } - } - if (num < 0) { - tmp = PyDataMem_RENEW(r->data, NPY_MAX(*nread,1)*dtype->elsize); - if (tmp == NULL) { - err = 1; - } - else { - PyArray_DIM(r,0) = *nread; - r->data = tmp; - } - } - NPY_END_ALLOW_THREADS; - free(clean_sep); - if (err == 1) { - PyErr_NoMemory(); - } - if (PyErr_Occurred()) { - Py_DECREF(r); - return NULL; - } - return r; -} -#undef FROM_BUFFER_SIZE - -/*NUMPY_API - * - * Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an - * array corresponding to the data encoded in that file. - * - * If the dtype is NULL, the default array type is used (double). - * If non-null, the reference is stolen. - * - * The number of elements to read is given as ``num``; if it is < 0, then - * then as many as possible are read. - * - * If ``sep`` is NULL or empty, then binary data is assumed, else - * text data, with ``sep`` as the separator between elements. Whitespace in - * the separator matches any length of whitespace in the text, and a match - * for whitespace around the separator is added. - * - * For memory-mapped files, use the buffer interface. No more data than - * necessary is read by this routine. - */ -NPY_NO_EXPORT PyObject * -PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep) -{ - PyArrayObject *ret; - size_t nread = 0; - - if (PyDataType_REFCHK(dtype)) { - PyErr_SetString(PyExc_ValueError, - "Cannot read into object array"); - Py_DECREF(dtype); - return NULL; - } - if (dtype->elsize == 0) { - PyErr_SetString(PyExc_ValueError, - "The elements are 0-sized."); - Py_DECREF(dtype); - return NULL; - } - if ((sep == NULL) || (strlen(sep) == 0)) { - ret = array_fromfile_binary(fp, dtype, num, &nread); - } - else { - if (dtype->f->scanfunc == NULL) { - PyErr_SetString(PyExc_ValueError, - "Unable to read character files of that array type"); - Py_DECREF(dtype); - return NULL; - } - ret = array_from_text(dtype, num, sep, &nread, fp, - (next_element) fromfile_next_element, - (skip_separator) fromfile_skip_separator, NULL); - } - if (ret == NULL) { - Py_DECREF(dtype); - return NULL; - } - if (((npy_intp) nread) < num) { - /* Realloc memory for smaller number of elements */ - const size_t nsize = NPY_MAX(nread,1)*ret->descr->elsize; - char *tmp; - - if((tmp = PyDataMem_RENEW(ret->data, nsize)) == NULL) { - Py_DECREF(ret); - return PyErr_NoMemory(); - } - ret->data = tmp; - PyArray_DIM(ret,0) = nread; - } - return (PyObject *)ret; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT PyObject * -PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, - npy_intp count, npy_intp offset) -{ - PyArrayObject *ret; - char *data; - Py_ssize_t ts; - npy_intp s, n; - int itemsize; - int writeable = 1; - - - if (PyDataType_REFCHK(type)) { - PyErr_SetString(PyExc_ValueError, - "cannot create an OBJECT array from memory"\ - " buffer"); - Py_DECREF(type); - return NULL; - } - if (type->elsize == 0) { - PyErr_SetString(PyExc_ValueError, - "itemsize cannot be zero in type"); - Py_DECREF(type); - return NULL; - } - if (Py_TYPE(buf)->tp_as_buffer == NULL -#if defined(NPY_PY3K) - || Py_TYPE(buf)->tp_as_buffer->bf_getbuffer == NULL -#else - || (Py_TYPE(buf)->tp_as_buffer->bf_getwritebuffer == NULL - && Py_TYPE(buf)->tp_as_buffer->bf_getreadbuffer == NULL) -#endif - ) { - PyObject *newbuf; - newbuf = PyObject_GetAttrString(buf, "__buffer__"); - if (newbuf == NULL) { - Py_DECREF(type); - return NULL; - } - buf = newbuf; - } - else { - Py_INCREF(buf); - } - - if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts) == -1) { - writeable = 0; - PyErr_Clear(); - if (PyObject_AsReadBuffer(buf, (void *)&data, &ts) == -1) { - Py_DECREF(buf); - Py_DECREF(type); - return NULL; - } - } - - if ((offset < 0) || (offset > ts)) { - PyErr_Format(PyExc_ValueError, - "offset must be non-negative and no greater than buffer "\ - "length (%" INTP_FMT ")", (npy_intp)ts); - Py_DECREF(buf); - Py_DECREF(type); - return NULL; - } - - data += offset; - s = (npy_intp)ts - offset; - n = (npy_intp)count; - itemsize = type->elsize; - if (n < 0 ) { - if (s % itemsize != 0) { - PyErr_SetString(PyExc_ValueError, - "buffer size must be a multiple"\ - " of element size"); - Py_DECREF(buf); - Py_DECREF(type); - return NULL; - } - n = s/itemsize; - } - else { - if (s < n*itemsize) { - PyErr_SetString(PyExc_ValueError, - "buffer is smaller than requested"\ - " size"); - Py_DECREF(buf); - Py_DECREF(type); - return NULL; - } - } - - if ((ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - type, - 1, &n, - NULL, data, - DEFAULT, - NULL)) == NULL) { - Py_DECREF(buf); - return NULL; - } - - if (!writeable) { - ret->flags &= ~NPY_WRITEABLE; - } - /* Store a reference for decref on deallocation */ - ret->base = buf; - PyArray_UpdateFlags(ret, NPY_ALIGNED); - return (PyObject *)ret; -} - -/*NUMPY_API - * - * Given a pointer to a string ``data``, a string length ``slen``, and - * a ``PyArray_Descr``, return an array corresponding to the data - * encoded in that string. - * - * If the dtype is NULL, the default array type is used (double). - * If non-null, the reference is stolen. - * - * If ``slen`` is < 0, then the end of string is used for text data. - * It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs - * would be the norm). - * - * The number of elements to read is given as ``num``; if it is < 0, then - * then as many as possible are read. - * - * If ``sep`` is NULL or empty, then binary data is assumed, else - * text data, with ``sep`` as the separator between elements. Whitespace in - * the separator matches any length of whitespace in the text, and a match - * for whitespace around the separator is added. - */ -NPY_NO_EXPORT PyObject * -PyArray_FromString(char *data, npy_intp slen, PyArray_Descr *dtype, - npy_intp num, char *sep) -{ - int itemsize; - PyArrayObject *ret; - Bool binary; - - if (dtype == NULL) { - dtype=PyArray_DescrFromType(PyArray_DEFAULT); - } - if (PyDataType_FLAGCHK(dtype, NPY_ITEM_IS_POINTER) || - PyDataType_REFCHK(dtype)) { - PyErr_SetString(PyExc_ValueError, - "Cannot create an object array from" \ - " a string"); - Py_DECREF(dtype); - return NULL; - } - itemsize = dtype->elsize; - if (itemsize == 0) { - PyErr_SetString(PyExc_ValueError, "zero-valued itemsize"); - Py_DECREF(dtype); - return NULL; - } - - binary = ((sep == NULL) || (strlen(sep) == 0)); - if (binary) { - if (num < 0 ) { - if (slen % itemsize != 0) { - PyErr_SetString(PyExc_ValueError, - "string size must be a "\ - "multiple of element size"); - Py_DECREF(dtype); - return NULL; - } - num = slen/itemsize; - } - else { - if (slen < num*itemsize) { - PyErr_SetString(PyExc_ValueError, - "string is smaller than " \ - "requested size"); - Py_DECREF(dtype); - return NULL; - } - } - ret = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, dtype, - 1, &num, NULL, NULL, - 0, NULL); - if (ret == NULL) { - return NULL; - } - memcpy(ret->data, data, num*dtype->elsize); - } - else { - /* read from character-based string */ - size_t nread = 0; - char *end; - - if (dtype->f->scanfunc == NULL) { - PyErr_SetString(PyExc_ValueError, - "don't know how to read " \ - "character strings with that " \ - "array type"); - Py_DECREF(dtype); - return NULL; - } - if (slen < 0) { - end = NULL; - } - else { - end = data + slen; - } - ret = array_from_text(dtype, num, sep, &nread, - data, - (next_element) fromstr_next_element, - (skip_separator) fromstr_skip_separator, - end); - } - return (PyObject *)ret; -} - -/*NUMPY_API - * - * steals a reference to dtype (which cannot be NULL) - */ -NPY_NO_EXPORT PyObject * -PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) -{ - PyObject *value; - PyObject *iter = PyObject_GetIter(obj); - PyArrayObject *ret = NULL; - npy_intp i, elsize, elcount; - char *item, *new_data; - - if (iter == NULL) { - goto done; - } - elcount = (count < 0) ? 0 : count; - if ((elsize=dtype->elsize) == 0) { - PyErr_SetString(PyExc_ValueError, "Must specify length "\ - "when using variable-size data-type."); - goto done; - } - - /* - * We would need to alter the memory RENEW code to decrement any - * reference counts before throwing away any memory. - */ - if (PyDataType_REFCHK(dtype)) { - PyErr_SetString(PyExc_ValueError, "cannot create "\ - "object arrays from iterator"); - goto done; - } - - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, - &elcount, NULL,NULL, 0, NULL); - dtype = NULL; - if (ret == NULL) { - goto done; - } - for (i = 0; (i < count || count == -1) && - (value = PyIter_Next(iter)); i++) { - if (i >= elcount) { - /* - Grow ret->data: - this is similar for the strategy for PyListObject, but we use - 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ... - */ - elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (elcount <= NPY_MAX_INTP/elsize) { - new_data = PyDataMem_RENEW(ret->data, elcount * elsize); - } - else { - new_data = NULL; - } - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); - Py_DECREF(value); - goto done; - } - ret->data = new_data; - } - ret->dimensions[0] = i + 1; - - if (((item = index2ptr(ret, i)) == NULL) - || (ret->descr->f->setitem(value, item, ret) == -1)) { - Py_DECREF(value); - goto done; - } - Py_DECREF(value); - } - - if (i < count) { - PyErr_SetString(PyExc_ValueError, "iterator too short"); - goto done; - } - - /* - * Realloc the data so that don't keep extra memory tied up - * (assuming realloc is reasonably good about reusing space...) - */ - if (i == 0) { - i = 1; - } - new_data = PyDataMem_RENEW(ret->data, i * elsize); - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); - goto done; - } - ret->data = new_data; - - done: - Py_XDECREF(iter); - Py_XDECREF(dtype); - if (PyErr_Occurred()) { - Py_XDECREF(ret); - return NULL; - } - return (PyObject *)ret; -} - -/* - * This is the main array creation routine. - * - * Flags argument has multiple related meanings - * depending on data and strides: - * - * If data is given, then flags is flags associated with data. - * If strides is not given, then a contiguous strides array will be created - * and the NPY_C_CONTIGUOUS bit will be set. If the flags argument - * has the NPY_F_CONTIGUOUS bit set, then a FORTRAN-style strides array will be - * created (and of course the NPY_F_CONTIGUOUS flag bit will be set). - * - * If data is not given but created here, then flags will be DEFAULT - * and a non-zero flags argument can be used to indicate a FORTRAN style - * array is desired. - */ - -NPY_NO_EXPORT size_t -_array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize, - int inflag, int *objflags) -{ - int i; - /* Only make Fortran strides if not contiguous as well */ - if ((inflag & (NPY_F_CONTIGUOUS|NPY_C_CONTIGUOUS)) == NPY_F_CONTIGUOUS) { - for (i = 0; i < nd; i++) { - strides[i] = itemsize; - itemsize *= dims[i] ? dims[i] : 1; - } - if (nd > 1) { - *objflags = ((*objflags)|NPY_F_CONTIGUOUS) & ~NPY_C_CONTIGUOUS; - } - else { - *objflags |= (NPY_F_CONTIGUOUS|NPY_C_CONTIGUOUS); - } - } - else { - for (i = nd - 1; i >= 0; i--) { - strides[i] = itemsize; - itemsize *= dims[i] ? dims[i] : 1; - } - if (nd > 1) { - *objflags = ((*objflags)|NPY_C_CONTIGUOUS) & ~NPY_F_CONTIGUOUS; - } - else { - *objflags |= (NPY_C_CONTIGUOUS|NPY_F_CONTIGUOUS); - } - } - return itemsize; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/ctors.h b/numpy-1.6.2/numpy/core/src/multiarray/ctors.h deleted file mode 100644 index 13f5d0da6e..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/ctors.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _NPY_ARRAY_CTORS_H_ -#define _NPY_ARRAY_CTORS_H_ - -NPY_NO_EXPORT PyObject * -PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd, - intp *dims, intp *strides, void *data, - int flags, PyObject *obj); - -NPY_NO_EXPORT PyObject *PyArray_New(PyTypeObject *, int nd, intp *, - int, intp *, void *, int, int, PyObject *); - -NPY_NO_EXPORT PyObject * -PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, - int max_depth, int flags, PyObject *context); - -NPY_NO_EXPORT PyObject * -PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context); - -NPY_NO_EXPORT PyObject * -PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags); - -NPY_NO_EXPORT PyObject * -PyArray_FromStructInterface(PyObject *input); - -NPY_NO_EXPORT PyObject * -PyArray_FromInterface(PyObject *input); - -NPY_NO_EXPORT PyObject * -PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, - PyObject *context); - -NPY_NO_EXPORT PyObject * -PyArray_EnsureArray(PyObject *op); - -NPY_NO_EXPORT PyObject * -PyArray_EnsureAnyArray(PyObject *op); - -NPY_NO_EXPORT int -PyArray_MoveInto(PyArrayObject *dest, PyArrayObject *src); - -NPY_NO_EXPORT int -PyArray_CopyAnyInto(PyArrayObject *dest, PyArrayObject *src); - -NPY_NO_EXPORT PyObject * -PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags); - -/* TODO: Put the order parameter in PyArray_CopyAnyInto and remove this */ -NPY_NO_EXPORT int -PyArray_CopyAnyIntoOrdered(PyArrayObject *dst, PyArrayObject *src, - NPY_ORDER order); - -/* FIXME: remove those from here */ -NPY_NO_EXPORT size_t -_array_fill_strides(intp *strides, intp *dims, int nd, size_t itemsize, - int inflag, int *objflags); - -NPY_NO_EXPORT void -_unaligned_strided_byte_copy(char *dst, intp outstrides, char *src, - intp instrides, intp N, int elsize); - -NPY_NO_EXPORT void -_strided_byte_swap(void *p, intp stride, intp n, int size); - -NPY_NO_EXPORT void -copy_and_swap(void *dst, void *src, int itemsize, intp numitems, - intp srcstrides, int swap); - -NPY_NO_EXPORT void -byte_swap_vector(void *p, intp n, int size); - -NPY_NO_EXPORT int -PyArray_AssignFromSequence(PyArrayObject *self, PyObject *v); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/datetime.c b/numpy-1.6.2/numpy/core/src/multiarray/datetime.c deleted file mode 100644 index ad1d982705..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/datetime.c +++ /dev/null @@ -1,922 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include - -#include - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "_datetime.h" - -/* For defaults and errors */ -#define NPY_FR_ERR -1 - -/* Offset for number of days between Dec 31, 1969 and Jan 1, 0001 -* Assuming Gregorian calendar was always in effect (proleptic Gregorian calendar) -*/ - -/* Calendar Structure for Parsing Long -> Date */ -typedef struct { - int year, month, day; -} ymdstruct; - -typedef struct { - int hour, min, sec; -} hmsstruct; - - -/* - ==================================================== - == Beginning of section borrowed from mx.DateTime == - ==================================================== -*/ - -/* - * Functions in the following section are borrowed from mx.DateTime version - * 2.0.6, and hence this code is subject to the terms of the egenix public - * license version 1.0.0 - */ - -#define Py_AssertWithArg(x,errortype,errorstr,a1) {if (!(x)) {PyErr_Format(errortype,errorstr,a1);goto onError;}} - -/* Table with day offsets for each month (0-based, without and with leap) */ -static int month_offset[2][13] = { - { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, - { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } -}; - -/* Table of number of days in a month (0-based, without and with leap) */ -static int days_in_month[2][12] = { - { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, - { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 } -}; - -/* Return 1/0 iff year points to a leap year in calendar. */ -static int -is_leapyear(long year) -{ - return (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)); -} - - -/* - * Return the day of the week for the given absolute date. - * Monday is 0 and Sunday is 6 - */ -static int -day_of_week(npy_longlong absdate) -{ - /* Add in four for the Thursday on Jan 1, 1970 (epoch offset)*/ - absdate += 4; - - if (absdate >= 0) { - return absdate % 7; - } - else { - return 6 + (absdate + 1) % 7; - } -} - -/* - * Return the year offset, that is the absolute date of the day - * 31.12.(year-1) since 31.12.1969 in the proleptic Gregorian calendar. - */ -static npy_longlong -year_offset(npy_longlong year) -{ - /* Note that 477 == 1969/4 - 1969/100 + 1969/400 */ - year--; - if (year >= 0 || -1/4 == -1) - return (year-1969)*365 + year/4 - year/100 + year/400 - 477; - else - return (year-1969)*365 + (year-3)/4 - (year-99)/100 + (year-399)/400 - 477; -} - -/* - * Modified version of mxDateTime function - * Returns absolute number of days since Jan 1, 1970 - * assuming a proleptic Gregorian Calendar - * Raises a ValueError if out of range month or day - * day -1 is Dec 31, 1969, day 0 is Jan 1, 1970, day 1 is Jan 2, 1970 - */ -static npy_longlong -days_from_ymd(int year, int month, int day) -{ - - /* Calculate the absolute date */ - int leap; - npy_longlong yearoffset, absdate; - - /* Is it a leap year ? */ - leap = is_leapyear(year); - - /* Negative month values indicate months relative to the years end */ - if (month < 0) month += 13; - Py_AssertWithArg(month >= 1 && month <= 12, - PyExc_ValueError, - "month out of range (1-12): %i", - month); - - /* Negative values indicate days relative to the months end */ - if (day < 0) day += days_in_month[leap][month - 1] + 1; - Py_AssertWithArg(day >= 1 && day <= days_in_month[leap][month - 1], - PyExc_ValueError, - "day out of range: %i", - day); - - /* - * Number of days between Dec 31, (year - 1) and Dec 31, 1969 - * (can be negative). - */ - yearoffset = year_offset(year); - - if (PyErr_Occurred()) goto onError; - - /* - * Calculate the number of days using yearoffset - * Jan 1, 1970 is day 0 and thus Dec. 31, 1969 is day -1 - */ - absdate = day-1 + month_offset[leap][month - 1] + yearoffset; - - return absdate; - - onError: - return 0; - -} - -/* Returns absolute seconds from an hour, minute, and second - */ -#define secs_from_hms(hour, min, sec, multiplier) (\ - ((hour)*3600 + (min)*60 + (sec)) * (npy_int64)(multiplier)\ -) - -/* - * Takes a number of days since Jan 1, 1970 (positive or negative) - * and returns the year. month, and day in the proleptic - * Gregorian calendar - * - * Examples: - * - * -1 returns 1969, 12, 31 - * 0 returns 1970, 1, 1 - * 1 returns 1970, 1, 2 - */ - -static ymdstruct -days_to_ymdstruct(npy_datetime dlong) -{ - ymdstruct ymd; - long year; - npy_longlong yearoffset; - int leap, dayoffset; - int month = 1, day = 1; - int *monthoffset; - - dlong += 1; - - /* Approximate year */ - year = 1970 + dlong / 365.2425; - - /* Apply corrections to reach the correct year */ - while (1) { - /* Calculate the year offset */ - yearoffset = year_offset(year); - - /* - * Backward correction: absdate must be greater than the - * yearoffset - */ - if (yearoffset >= dlong) { - year--; - continue; - } - - dayoffset = dlong - yearoffset; - leap = is_leapyear(year); - - /* Forward correction: non leap years only have 365 days */ - if (dayoffset > 365 && !leap) { - year++; - continue; - } - break; - } - - /* Now iterate to find the month */ - monthoffset = month_offset[leap]; - for (month = 1; month < 13; month++) { - if (monthoffset[month] >= dayoffset) - break; - } - day = dayoffset - month_offset[leap][month-1]; - - ymd.year = year; - ymd.month = month; - ymd.day = day; - - return ymd; -} - -/* - * Converts an integer number of seconds in a day to hours minutes seconds. - * It assumes seconds is between 0 and 86399. - */ - -static hmsstruct -seconds_to_hmsstruct(npy_longlong dlong) -{ - int hour, minute, second; - hmsstruct hms; - - hour = dlong / 3600; - minute = (dlong % 3600) / 60; - second = dlong - (hour*3600 + minute*60); - - hms.hour = hour; - hms.min = minute; - hms.sec = second; - - return hms; -} - -/* - ==================================================== - == End of section adapted from mx.DateTime == - ==================================================== -*/ - - -/*================================================== -// Parsing DateTime struct and returns a date-time number -// ================================================= - - Structure is assumed to be already normalized -*/ - -/*NUMPY_API - * Create a datetime value from a filled datetime struct and resolution unit. - */ -NPY_NO_EXPORT npy_datetime -PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d) -{ - npy_datetime ret; - npy_longlong days = 0; /* The absolute number of days since Jan 1, 1970 */ - - if (fr > NPY_FR_M) { - days = days_from_ymd(d->year, d->month, d->day); - } - if (fr == NPY_FR_Y) { - ret = d->year - 1970; - } - else if (fr == NPY_FR_M) { - ret = (d->year - 1970) * 12 + d->month - 1; - } - else if (fr == NPY_FR_W) { - /* This is just 7-days for now. */ - if (days >= 0) { - ret = days / 7; - } - else { - ret = (days - 6) / 7; - } - } - else if (fr == NPY_FR_B) { - npy_longlong x; - int dotw = day_of_week(days); - - if (dotw > 4) { - /* Invalid business day */ - ret = 0; - } - else { - if (days >= 0) { - /* offset to adjust first week */ - x = days - 4; - } - else { - x = days - 2; - } - ret = 2 + (x / 7) * 5 + x % 7; - } - } - else if (fr == NPY_FR_D) { - ret = days; - } - else if (fr == NPY_FR_h) { - ret = days * 24 + d->hour; - } - else if (fr == NPY_FR_m) { - ret = days * 1440 + d->hour * 60 + d->min; - } - else if (fr == NPY_FR_s) { - ret = days * (npy_int64)(86400) + - secs_from_hms(d->hour, d->min, d->sec, 1); - } - else if (fr == NPY_FR_ms) { - ret = days * (npy_int64)(86400000) - + secs_from_hms(d->hour, d->min, d->sec, 1000) - + (d->us / 1000); - } - else if (fr == NPY_FR_us) { - npy_int64 num = 86400 * 1000; - num *= (npy_int64)(1000); - ret = days * num + secs_from_hms(d->hour, d->min, d->sec, 1000000) - + d->us; - } - else if (fr == NPY_FR_ns) { - npy_int64 num = 86400 * 1000; - num *= (npy_int64)(1000 * 1000); - ret = days * num + secs_from_hms(d->hour, d->min, d->sec, 1000000000) - + d->us * (npy_int64)(1000) + (d->ps / 1000); - } - else if (fr == NPY_FR_ps) { - npy_int64 num2 = 1000 * 1000; - npy_int64 num1; - - num2 *= (npy_int64)(1000 * 1000); - num1 = (npy_int64)(86400) * num2; - ret = days * num1 + secs_from_hms(d->hour, d->min, d->sec, num2) - + d->us * (npy_int64)(1000000) + d->ps; - } - else if (fr == NPY_FR_fs) { - /* only 2.6 hours */ - npy_int64 num2 = 1000000; - num2 *= (npy_int64)(1000000); - num2 *= (npy_int64)(1000); - - /* get number of seconds as a postive or negative number */ - if (days >= 0) { - ret = secs_from_hms(d->hour, d->min, d->sec, 1); - } - else { - ret = ((d->hour - 24)*3600 + d->min*60 + d->sec); - } - ret = ret * num2 + d->us * (npy_int64)(1000000000) - + d->ps * (npy_int64)(1000) + (d->as / 1000); - } - else if (fr == NPY_FR_as) { - /* only 9.2 secs */ - npy_int64 num1, num2; - - num1 = 1000000; - num1 *= (npy_int64)(1000000); - num2 = num1 * (npy_int64)(1000000); - - if (days >= 0) { - ret = d->sec; - } - else { - ret = d->sec - 60; - } - ret = ret * num2 + d->us * num1 + d->ps * (npy_int64)(1000000) - + d->as; - } - else { - /* Shouldn't get here */ - PyErr_SetString(PyExc_ValueError, "invalid internal frequency"); - ret = -1; - } - - return ret; -} - -/* Uses Average values when frequency is Y, M, or B */ - -#define _DAYS_PER_MONTH 30.436875 -#define _DAYS_PER_YEAR 365.2425 - -/*NUMPY_API - * Create a timdelta value from a filled timedelta struct and resolution unit. - */ -NPY_NO_EXPORT npy_datetime -PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d) -{ - npy_datetime ret; - - if (fr == NPY_FR_Y) { - ret = d->day / _DAYS_PER_YEAR; - } - else if (fr == NPY_FR_M) { - ret = d->day / _DAYS_PER_MONTH; - } - else if (fr == NPY_FR_W) { - /* This is just 7-days for now. */ - if (d->day >= 0) { - ret = d->day / 7; - } - else { - ret = (d->day - 6) / 7; - } - } - else if (fr == NPY_FR_B) { - /* - * What is the meaning of a relative Business day? - * - * This assumes you want to take the day difference and - * convert it to business-day difference (removing 2 every 7). - */ - ret = (d->day / 7) * 5 + d->day % 7; - } - else if (fr == NPY_FR_D) { - ret = d->day; - } - else if (fr == NPY_FR_h) { - ret = d->day + d->sec / 3600; - } - else if (fr == NPY_FR_m) { - ret = d->day * (npy_int64)(1440) + d->sec / 60; - } - else if (fr == NPY_FR_s) { - ret = d->day * (npy_int64)(86400) + d->sec; - } - else if (fr == NPY_FR_ms) { - ret = d->day * (npy_int64)(86400000) + d->sec * 1000 + d->us / 1000; - } - else if (fr == NPY_FR_us) { - npy_int64 num = 86400000; - num *= (npy_int64)(1000); - ret = d->day * num + d->sec * (npy_int64)(1000000) + d->us; - } - else if (fr == NPY_FR_ns) { - npy_int64 num = 86400000; - num *= (npy_int64)(1000000); - ret = d->day * num + d->sec * (npy_int64)(1000000000) - + d->us * (npy_int64)(1000) + (d->ps / 1000); - } - else if (fr == NPY_FR_ps) { - npy_int64 num2, num1; - - num2 = 1000000; - num2 *= (npy_int64)(1000000); - num1 = (npy_int64)(86400) * num2; - - ret = d->day * num1 + d->sec * num2 + d->us * (npy_int64)(1000000) - + d->ps; - } - else if (fr == NPY_FR_fs) { - /* only 2.6 hours */ - npy_int64 num2 = 1000000000; - num2 *= (npy_int64)(1000000); - ret = d->sec * num2 + d->us * (npy_int64)(1000000000) - + d->ps * (npy_int64)(1000) + (d->as / 1000); - } - else if (fr == NPY_FR_as) { - /* only 9.2 secs */ - npy_int64 num1, num2; - - num1 = 1000000; - num1 *= (npy_int64)(1000000); - num2 = num1 * (npy_int64)(1000000); - ret = d->sec * num2 + d->us * num1 + d->ps * (npy_int64)(1000000) - + d->as; - } - else { - /* Shouldn't get here */ - PyErr_SetString(PyExc_ValueError, "invalid internal frequency"); - ret = -1; - } - - return ret; -} - - - -/*NUMPY_API - * Fill the datetime struct from the value and resolution unit. - */ -NPY_NO_EXPORT void -PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr, - npy_datetimestruct *result) -{ - int year = 1970, month = 1, day = 1, - hour = 0, min = 0, sec = 0, - us = 0, ps = 0, as = 0; - - npy_int64 tmp; - ymdstruct ymd; - hmsstruct hms; - - /* - * Note that what looks like val / N and val % N for positive numbers maps to - * [val - (N-1)] / N and [N-1 + (val+1) % N] for negative numbers (with the 2nd - * value, the remainder, being positive in both cases). - */ - if (fr == NPY_FR_Y) { - year = 1970 + val; - } - else if (fr == NPY_FR_M) { - if (val >= 0) { - year = 1970 + val / 12; - month = val % 12 + 1; - } - else { - year = 1969 + (val + 1) / 12; - month = 12 + (val + 1)% 12; - } - } - else if (fr == NPY_FR_W) { - /* A week is the same as 7 days */ - ymd = days_to_ymdstruct(val * 7); - year = ymd.year; - month = ymd.month; - day = ymd.day; - } - else if (fr == NPY_FR_B) { - /* Number of business days since Thursday, 1-1-70 */ - npy_longlong absdays; - /* - * A buisness day is M T W Th F (i.e. all but Sat and Sun.) - * Convert the business day to the number of actual days. - * - * Must convert [0,1,2,3,4,5,6,7,...] to - * [0,1,4,5,6,7,8,11,...] - * and [...,-9,-8,-7,-6,-5,-4,-3,-2,-1,0] to - * [...,-13,-10,-9,-8,-7,-6,-3,-2,-1,0] - */ - if (val >= 0) { - absdays = 7 * ((val + 3) / 5) + ((val + 3) % 5) - 3; - } - else { - /* Recall how C computes / and % with negative numbers */ - absdays = 7 * ((val - 1) / 5) + ((val - 1) % 5) + 1; - } - ymd = days_to_ymdstruct(absdays); - year = ymd.year; - month = ymd.month; - day = ymd.day; - } - else if (fr == NPY_FR_D) { - ymd = days_to_ymdstruct(val); - year = ymd.year; - month = ymd.month; - day = ymd.day; - } - else if (fr == NPY_FR_h) { - if (val >= 0) { - ymd = days_to_ymdstruct(val / 24); - hour = val % 24; - } - else { - ymd = days_to_ymdstruct((val - 23) / 24); - hour = 23 + (val + 1) % 24; - } - year = ymd.year; - month = ymd.month; - day = ymd.day; - } - else if (fr == NPY_FR_m) { - if (val >= 0) { - ymd = days_to_ymdstruct(val / 1440); - min = val % 1440; - } - else { - ymd = days_to_ymdstruct((val - 1439) / 1440); - min = 1439 + (val + 1) % 1440; - } - hms = seconds_to_hmsstruct(min * 60); - year = ymd.year; - month = ymd.month; - day = ymd.day; - hour = hms.hour; - min = hms.min; - } - else if (fr == NPY_FR_s) { - if (val >= 0) { - ymd = days_to_ymdstruct(val / 86400); - sec = val % 86400; - } - else { - ymd = days_to_ymdstruct((val - 86399) / 86400); - sec = 86399 + (val + 1) % 86400; - } - hms = seconds_to_hmsstruct(sec); - year = ymd.year; - month = ymd.month; - day = ymd.day; - hour = hms.hour; - min = hms.min; - sec = hms.sec; - } - else if (fr == NPY_FR_ms) { - if (val >= 0) { - ymd = days_to_ymdstruct(val / 86400000); - tmp = val % 86400000; - } - else { - ymd = days_to_ymdstruct((val - 86399999) / 86400000); - tmp = 86399999 + (val + 1) % 86399999; - } - hms = seconds_to_hmsstruct(tmp / 1000); - us = (tmp % 1000)*1000; - year = ymd.year; - month = ymd.month; - day = ymd.day; - hour = hms.hour; - min = hms.min; - sec = hms.sec; - } - else if (fr == NPY_FR_us) { - npy_int64 num1, num2; - num1 = 86400000; - num1 *= 1000; - num2 = num1 - 1; - if (val >= 0) { - ymd = days_to_ymdstruct(val / num1); - tmp = val % num1; - } - else { - ymd = days_to_ymdstruct((val - num2)/ num1); - tmp = num2 + (val + 1) % num1; - } - hms = seconds_to_hmsstruct(tmp / 1000000); - us = tmp % 1000000; - year = ymd.year; - month = ymd.month; - day = ymd.day; - hour = hms.hour; - min = hms.min; - sec = hms.sec; - } - else if (fr == NPY_FR_ns) { - npy_int64 num1, num2, num3; - num1 = 86400000; - num1 *= 1000000000; - num2 = num1 - 1; - num3 = 1000000; - num3 *= 1000000; - if (val >= 0) { - ymd = days_to_ymdstruct(val / num1); - tmp = val % num1; - } - else { - ymd = days_to_ymdstruct((val - num2)/ num1); - tmp = num2 + (val + 1) % num1; - } - hms = seconds_to_hmsstruct(tmp / 1000000000); - tmp = tmp % 1000000000; - us = tmp / 1000; - ps = (tmp % 1000) * (npy_int64)(1000); - year = ymd.year; - month = ymd.month; - day = ymd.day; - hour = hms.hour; - min = hms.min; - sec = hms.sec; - } - else if (fr == NPY_FR_ps) { - npy_int64 num1, num2, num3; - num3 = 1000000000; - num3 *= (npy_int64)(1000); - num1 = (npy_int64)(86400) * num3; - num2 = num1 - 1; - - if (val >= 0) { - ymd = days_to_ymdstruct(val / num1); - tmp = val % num1; - } - else { - ymd = days_to_ymdstruct((val - num2) / num1); - tmp = num2 + (val + 1) % num1; - } - hms = seconds_to_hmsstruct(tmp / num3); - tmp = tmp % num3; - us = tmp / 1000000; - ps = tmp % 1000000; - year = ymd.year; - month = ymd.month; - day = ymd.day; - hour = hms.hour; - min = hms.min; - sec = hms.sec; - } - else if (fr == NPY_FR_fs) { - /* entire range is only += 2.6 hours */ - npy_int64 num1, num2; - num1 = 1000000000; - num1 *= (npy_int64)(1000); - num2 = num1 * (npy_int64)(1000); - - if (val >= 0) { - sec = val / num2; - tmp = val % num2; - hms = seconds_to_hmsstruct(sec); - hour = hms.hour; - min = hms.min; - sec = hms.sec; - } - else { - /* tmp (number of fs) will be positive after this segment */ - year = 1969; - day = 31; - month = 12; - sec = (val - (num2-1))/num2; - tmp = (num2-1) + (val + 1) % num2; - if (sec == 0) { - /* we are at the last second */ - hour = 23; - min = 59; - sec = 59; - } - else { - hour = 24 + (sec - 3599)/3600; - sec = 3599 + (sec+1)%3600; - min = sec / 60; - sec = sec % 60; - } - } - us = tmp / 1000000000; - tmp = tmp % 1000000000; - ps = tmp / 1000; - as = (tmp % 1000) * (npy_int64)(1000); - } - else if (fr == NPY_FR_as) { - /* entire range is only += 9.2 seconds */ - npy_int64 num1, num2, num3; - num1 = 1000000; - num2 = num1 * (npy_int64)(1000000); - num3 = num2 * (npy_int64)(1000000); - if (val >= 0) { - hour = 0; - min = 0; - sec = val / num3; - tmp = val % num3; - } - else { - year = 1969; - day = 31; - month = 12; - hour = 23; - min = 59; - sec = 60 + (val - (num3-1)) / num3; - tmp = (num3-1) + (val+1) % num3; - } - us = tmp / num2; - tmp = tmp % num2; - ps = tmp / num1; - as = tmp % num1; - } - else { - PyErr_SetString(PyExc_RuntimeError, "invalid internal time resolution"); - } - - result->year = year; - result->month = month; - result->day = day; - result->hour = hour; - result->min = min; - result->sec = sec; - result->us = us; - result->ps = ps; - result->as = as; - - return; -} - -/* - * FIXME: Overflow is not handled at all - * To convert from Years, Months, and Business Days, multiplication by the average is done - */ - -/*NUMPY_API - * Fill the timedelta struct from the timedelta value and resolution unit. - */ -NPY_NO_EXPORT void -PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT fr, - npy_timedeltastruct *result) -{ - npy_longlong day=0; - int sec=0, us=0, ps=0, as=0; - npy_bool negative=0; - - /* - * Note that what looks like val / N and val % N for positive numbers maps to - * [val - (N-1)] / N and [N-1 + (val+1) % N] for negative numbers (with the 2nd - * value, the remainder, being positive in both cases). - */ - - if (val < 0) { - val = -val; - negative = 1; - } - if (fr == NPY_FR_Y) { - day = val * _DAYS_PER_YEAR; - } - else if (fr == NPY_FR_M) { - day = val * _DAYS_PER_MONTH; - } - else if (fr == NPY_FR_W) { - day = val * 7; - } - else if (fr == NPY_FR_B) { - /* Number of business days since Thursday, 1-1-70 */ - day = (val * 7) / 5; - } - else if (fr == NPY_FR_D) { - day = val; - } - else if (fr == NPY_FR_h) { - day = val / 24; - sec = (val % 24)*3600; - } - else if (fr == NPY_FR_m) { - day = val / 1440; - sec = (val % 1440)*60; - } - else if (fr == NPY_FR_s) { - day = val / (86400); - sec = val % 86400; - } - else if (fr == NPY_FR_ms) { - day = val / 86400000; - val = val % 86400000; - sec = val / 1000; - us = (val % 1000)*1000; - } - else if (fr == NPY_FR_us) { - npy_int64 num1; - num1 = 86400000; - num1 *= 1000; - day = val / num1; - us = val % num1; - sec = us / 1000000; - us = us % 1000000; - } - else if (fr == NPY_FR_ns) { - npy_int64 num1; - num1 = 86400000; - num1 *= 1000000; - day = val / num1; - val = val % num1; - sec = val / 1000000000; - val = val % 1000000000; - us = val / 1000; - ps = (val % 1000) * (npy_int64)(1000); - } - else if (fr == NPY_FR_ps) { - npy_int64 num1, num2; - num2 = 1000000000; - num2 *= (npy_int64)(1000); - num1 = (npy_int64)(86400) * num2; - - day = val / num1; - ps = val % num1; - sec = ps / num2; - ps = ps % num2; - us = ps / 1000000; - ps = ps % 1000000; - } - else if (fr == NPY_FR_fs) { - /* entire range is only += 9.2 hours */ - npy_int64 num1, num2; - num1 = 1000000000; - num2 = num1 * (npy_int64)(1000000); - - day = 0; - sec = val / num2; - val = val % num2; - us = val / num1; - val = val % num1; - ps = val / 1000; - as = (val % 1000) * (npy_int64)(1000); - } - else if (fr == NPY_FR_as) { - /* entire range is only += 2.6 seconds */ - npy_int64 num1, num2, num3; - num1 = 1000000; - num2 = num1 * (npy_int64)(1000000); - num3 = num2 * (npy_int64)(1000000); - day = 0; - sec = val / num3; - as = val % num3; - us = as / num2; - as = as % num2; - ps = as / num1; - as = as % num1; - } - else { - PyErr_SetString(PyExc_RuntimeError, "invalid internal time resolution"); - } - - if (negative) { - result->day = -day; - result->sec = -sec; - result->us = -us; - result->ps = -ps; - result->as = -as; - } - else { - result->day = day; - result->sec = sec; - result->us = us; - result->ps = ps; - result->as = as; - } - return; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/descriptor.c b/numpy-1.6.2/numpy/core/src/multiarray/descriptor.c deleted file mode 100644 index 3d95583346..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/descriptor.c +++ /dev/null @@ -1,3025 +0,0 @@ -/* Array Descr Object */ - -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" - -#define _chk_byteorder(arg) (arg == '>' || arg == '<' || \ - arg == '|' || arg == '=') - -static PyObject *typeDict = NULL; /* Must be explicitly loaded */ - -static PyArray_Descr * -_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag); - -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_fromobj(PyObject *obj) -{ - PyObject *dtypedescr; - PyArray_Descr *new; - int ret; - - dtypedescr = PyObject_GetAttrString(obj, "dtype"); - PyErr_Clear(); - if (dtypedescr) { - ret = PyArray_DescrConverter(dtypedescr, &new); - Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) { - return new; - } - PyErr_Clear(); - } - /* Understand basic ctypes */ - dtypedescr = PyObject_GetAttrString(obj, "_type_"); - PyErr_Clear(); - if (dtypedescr) { - ret = PyArray_DescrConverter(dtypedescr, &new); - Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) { - PyObject *length; - length = PyObject_GetAttrString(obj, "_length_"); - PyErr_Clear(); - if (length) { - /* derived type */ - PyObject *newtup; - PyArray_Descr *derived; - newtup = Py_BuildValue("NO", new, length); - ret = PyArray_DescrConverter(newtup, &derived); - Py_DECREF(newtup); - if (ret == PY_SUCCEED) { - return derived; - } - PyErr_Clear(); - return NULL; - } - return new; - } - PyErr_Clear(); - return NULL; - } - /* Understand ctypes structures -- - bit-fields are not supported - automatically aligns */ - dtypedescr = PyObject_GetAttrString(obj, "_fields_"); - PyErr_Clear(); - if (dtypedescr) { - ret = PyArray_DescrAlignConverter(dtypedescr, &new); - Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) { - return new; - } - PyErr_Clear(); - } - return NULL; -} - -NPY_NO_EXPORT PyObject * -array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args) -{ - PyObject *dict; - - if (!PyArg_ParseTuple(args, "O", &dict)) { - return NULL; - } - /* Decrement old reference (if any)*/ - Py_XDECREF(typeDict); - typeDict = dict; - /* Create an internal reference to it */ - Py_INCREF(dict); - Py_INCREF(Py_None); - return Py_None; -} - -static int -_check_for_commastring(char *type, int len) -{ - int i; - - /* Check for ints at start of string */ - if ((type[0] >= '0' - && type[0] <= '9') - || ((len > 1) - && _chk_byteorder(type[0]) - && (type[1] >= '0' - && type[1] <= '9'))) { - return 1; - } - /* Check for empty tuple */ - if (((len > 1) - && (type[0] == '(' - && type[1] == ')')) - || ((len > 3) - && _chk_byteorder(type[0]) - && (type[1] == '(' - && type[2] == ')'))) { - return 1; - } - /* Check for presence of commas */ - for (i = 1; i < len; i++) { - if (type[i] == ',') { - return 1; - } - } - return 0; -} - -static int -_check_for_datetime(char *type, int len) -{ - if (len < 1) { - return 0; - } - if (type[1] == '8' && (type[0] == 'M' || type[0] == 'm')) { - return 1; - } - if (len < 10) { - return 0; - } - if (strncmp(type, "datetime64", 10) == 0) { - return 1; - } - if (len < 11) { - return 0; - } - if (strncmp(type, "timedelta64", 11) == 0) { - return 1; - } - return 0; -} - - - -#undef _chk_byteorder - -static PyArray_Descr * -_convert_from_tuple(PyObject *obj) -{ - PyArray_Descr *type, *res; - PyObject *val; - int errflag; - - if (PyTuple_GET_SIZE(obj) != 2) { - return NULL; - } - if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type)) { - return NULL; - } - val = PyTuple_GET_ITEM(obj,1); - /* try to interpret next item as a type */ - res = _use_inherit(type, val, &errflag); - if (res || errflag) { - Py_DECREF(type); - if (res) { - return res; - } - else { - return NULL; - } - } - PyErr_Clear(); - /* - * We get here if res was NULL but errflag wasn't set - * --- i.e. the conversion to a data-descr failed in _use_inherit - */ - if (type->elsize == 0) { - /* interpret next item as a typesize */ - int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - - if (error_converting(itemsize)) { - PyErr_SetString(PyExc_ValueError, - "invalid itemsize in generic type tuple"); - goto fail; - } - PyArray_DESCR_REPLACE(type); - if (type->type_num == PyArray_UNICODE) { - type->elsize = itemsize << 2; - } - else { - type->elsize = itemsize; - } - } - else if (PyDict_Check(val)) { - /* Assume it's a metadata dictionary */ - if (PyDict_Merge(type->metadata, val, 0) == -1) { - Py_DECREF(type); - return NULL; - } - } - else { - /* - * interpret next item as shape (if it's a tuple) - * and reset the type to PyArray_VOID with - * a new fields attribute. - */ - PyArray_Dims shape = {NULL, -1}; - PyArray_Descr *newdescr; - - if (!(PyArray_IntpConverter(val, &shape)) || (shape.len > MAX_DIMS)) { - PyDimMem_FREE(shape.ptr); - PyErr_SetString(PyExc_ValueError, - "invalid shape in fixed-type tuple."); - goto fail; - } - /* - * If (type, 1) was given, it is equivalent to type... - * or (type, ()) was given it is equivalent to type... - */ - if ((shape.len == 1 - && shape.ptr[0] == 1 - && PyNumber_Check(val)) - || (shape.len == 0 - && PyTuple_Check(val))) { - PyDimMem_FREE(shape.ptr); - return type; - } - newdescr = PyArray_DescrNewFromType(PyArray_VOID); - if (newdescr == NULL) { - PyDimMem_FREE(shape.ptr); - goto fail; - } - newdescr->elsize = type->elsize; - newdescr->elsize *= PyArray_MultiplyList(shape.ptr, shape.len); - PyDimMem_FREE(shape.ptr); - newdescr->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr)); - newdescr->flags = type->flags; - newdescr->subarray->base = type; - type = NULL; - Py_XDECREF(newdescr->fields); - Py_XDECREF(newdescr->names); - newdescr->fields = NULL; - newdescr->names = NULL; - /* Force subarray->shape to always be a tuple */ - if (PyTuple_Check(val)) { - Py_INCREF(val); - newdescr->subarray->shape = val; - } else { - newdescr->subarray->shape = Py_BuildValue("(O)", val); - if (newdescr->subarray->shape == NULL) { - Py_DECREF(newdescr); - goto fail; - } - } - type = newdescr; - } - return type; - - fail: - Py_XDECREF(type); - return NULL; -} - -/* - * obj is a list. Each item is a tuple with - * - * (field-name, data-type (either a list or a string), and an optional - * shape parameter). - * - * field-name can be a string or a 2-tuple - * data-type can now be a list, string, or 2-tuple (string, metadata dictionary)) - */ - -static PyArray_Descr * -_convert_from_array_descr(PyObject *obj, int align) -{ - int n, i, totalsize; - int ret; - PyObject *fields, *item, *newobj; - PyObject *name, *tup, *title; - PyObject *nameslist; - PyArray_Descr *new; - PyArray_Descr *conv; - char dtypeflags = 0; - int maxalign = 0; - - n = PyList_GET_SIZE(obj); - nameslist = PyTuple_New(n); - if (!nameslist) { - return NULL; - } - totalsize = 0; - fields = PyDict_New(); - for (i = 0; i < n; i++) { - item = PyList_GET_ITEM(obj, i); - if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) { - goto fail; - } - name = PyTuple_GET_ITEM(item, 0); - if (PyUString_Check(name)) { - title = NULL; - } - else if (PyTuple_Check(name)) { - if (PyTuple_GET_SIZE(name) != 2) { - goto fail; - } - title = PyTuple_GET_ITEM(name, 0); - name = PyTuple_GET_ITEM(name, 1); - if (!PyUString_Check(name)) { - goto fail; - } - } - else { - goto fail; - } - - /* Insert name into nameslist */ - Py_INCREF(name); - - if (PyUString_GET_SIZE(name) == 0) { - Py_DECREF(name); - if (title == NULL) { - name = PyUString_FromFormat("f%d", i); - } -#if defined(NPY_PY3K) - /* On Py3, allow only non-empty Unicode strings as field names */ - else if (PyUString_Check(title) && PyUString_GET_SIZE(title) > 0) { - name = title; - Py_INCREF(name); - } - else { - goto fail; - } -#else - else { - name = title; - Py_INCREF(name); - } -#endif - } - PyTuple_SET_ITEM(nameslist, i, name); - - /* Process rest */ - - if (PyTuple_GET_SIZE(item) == 2) { - ret = PyArray_DescrConverter(PyTuple_GET_ITEM(item, 1), &conv); - if (ret == PY_FAIL) { - PyObject_Print(PyTuple_GET_ITEM(item, 1), stderr, 0); - } - } - else if (PyTuple_GET_SIZE(item) == 3) { - newobj = PyTuple_GetSlice(item, 1, 3); - ret = PyArray_DescrConverter(newobj, &conv); - Py_DECREF(newobj); - } - else { - goto fail; - } - if (ret == PY_FAIL) { - goto fail; - } - if ((PyDict_GetItem(fields, name) != NULL) -#if defined(NPY_PY3K) - || (title - && PyUString_Check(title) - && (PyDict_GetItem(fields, title) != NULL))) { -#else - || (title - && (PyUString_Check(title) || PyUnicode_Check(title)) - && (PyDict_GetItem(fields, title) != NULL))) { -#endif - PyErr_SetString(PyExc_ValueError, - "two fields with the same name"); - goto fail; - } - dtypeflags |= (conv->flags & NPY_FROM_FIELDS); - tup = PyTuple_New((title == NULL ? 2 : 3)); - PyTuple_SET_ITEM(tup, 0, (PyObject *)conv); - if (align) { - int _align; - - _align = conv->alignment; - if (_align > 1) { - totalsize = ((totalsize + _align - 1)/_align)*_align; - } - maxalign = MAX(maxalign, _align); - } - PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize)); - - PyDict_SetItem(fields, name, tup); - - /* - * Title can be "meta-data". Only insert it - * into the fields dictionary if it is a string - * and if it is not the same as the name. - */ - if (title != NULL) { - Py_INCREF(title); - PyTuple_SET_ITEM(tup, 2, title); -#if defined(NPY_PY3K) - if (PyUString_Check(title)) { -#else - if (PyUString_Check(title) || PyUnicode_Check(title)) { -#endif - if (PyDict_GetItem(fields, title) != NULL) { - PyErr_SetString(PyExc_ValueError, - "title already used as a name or title."); - Py_DECREF(tup); - goto fail; - } - PyDict_SetItem(fields, title, tup); - } - } - totalsize += conv->elsize; - Py_DECREF(tup); - } - new = PyArray_DescrNewFromType(PyArray_VOID); - new->fields = fields; - new->names = nameslist; - new->elsize = totalsize; - new->flags = dtypeflags; - - /* Structured arrays get a sticky aligned bit */ - if (align) { - new->alignment = maxalign; - } - return new; - - fail: - Py_DECREF(fields); - Py_DECREF(nameslist); - return NULL; - -} - -/* - * a list specifying a data-type can just be - * a list of formats. The names for the fields - * will default to f0, f1, f2, and so forth. - */ -static PyArray_Descr * -_convert_from_list(PyObject *obj, int align) -{ - int n, i; - int totalsize; - PyObject *fields; - PyArray_Descr *conv = NULL; - PyArray_Descr *new; - PyObject *key, *tup; - PyObject *nameslist = NULL; - int ret; - int maxalign = 0; - char dtypeflags = 0; - - n = PyList_GET_SIZE(obj); - /* - * Ignore any empty string at end which _internal._commastring - * can produce - */ - key = PyList_GET_ITEM(obj, n-1); - if (PyBytes_Check(key) && PyBytes_GET_SIZE(key) == 0) { - n = n - 1; - } - /* End ignore code.*/ - totalsize = 0; - if (n == 0) { - return NULL; - } - nameslist = PyTuple_New(n); - if (!nameslist) { - return NULL; - } - fields = PyDict_New(); - for (i = 0; i < n; i++) { - tup = PyTuple_New(2); - key = PyUString_FromFormat("f%d", i); - ret = PyArray_DescrConverter(PyList_GET_ITEM(obj, i), &conv); - if (ret == PY_FAIL) { - Py_DECREF(tup); - Py_DECREF(key); - goto fail; - } - dtypeflags |= (conv->flags & NPY_FROM_FIELDS); - PyTuple_SET_ITEM(tup, 0, (PyObject *)conv); - if (align) { - int _align; - - _align = conv->alignment; - if (_align > 1) { - totalsize = ((totalsize + _align - 1)/_align)*_align; - } - maxalign = MAX(maxalign, _align); - } - PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize)); - PyDict_SetItem(fields, key, tup); - Py_DECREF(tup); - PyTuple_SET_ITEM(nameslist, i, key); - totalsize += conv->elsize; - } - new = PyArray_DescrNewFromType(PyArray_VOID); - new->fields = fields; - new->names = nameslist; - new->flags=dtypeflags; - if (maxalign > 1) { - totalsize = ((totalsize+maxalign-1)/maxalign)*maxalign; - } - if (align) { - new->alignment = maxalign; - } - new->elsize = totalsize; - return new; - - fail: - Py_DECREF(nameslist); - Py_DECREF(fields); - return NULL; -} - -/* Exported as DATETIMEUNITS in multiarraymodule.c */ -NPY_NO_EXPORT char *_datetime_strings[] = { - NPY_STR_Y, - NPY_STR_M, - NPY_STR_W, - NPY_STR_B, - NPY_STR_D, - NPY_STR_h, - NPY_STR_m, - NPY_STR_s, - NPY_STR_ms, - NPY_STR_us, - NPY_STR_ns, - NPY_STR_ps, - NPY_STR_fs, - NPY_STR_as -}; - -static NPY_DATETIMEUNIT - _unit_from_str(char *base) -{ - NPY_DATETIMEUNIT unit; - - if (base == NULL) { - return NPY_DATETIME_DEFAULTUNIT; - } - - unit = NPY_FR_Y; - while (unit < NPY_DATETIME_NUMUNITS) { - if (strcmp(base, _datetime_strings[unit]) == 0) { - break; - } - unit++; - } - if (unit == NPY_DATETIME_NUMUNITS) { - return NPY_DATETIME_DEFAULTUNIT; - } - - return unit; -} - -static NPY_DATETIMEUNIT _multiples_table[16][4] = { - {12, 52, 365}, /* NPY_FR_Y */ - {NPY_FR_M, NPY_FR_W, NPY_FR_D}, - {4, 30, 720}, /* NPY_FR_M */ - {NPY_FR_W, NPY_FR_D, NPY_FR_h}, - {5, 7, 168, 10080}, /* NPY_FR_W */ - {NPY_FR_B, NPY_FR_D, NPY_FR_h, NPY_FR_m}, - {24, 1440, 86400}, /* NPY_FR_B */ - {NPY_FR_h, NPY_FR_m, NPY_FR_s}, - {24, 1440, 86400}, /* NPY_FR_D */ - {NPY_FR_h, NPY_FR_m, NPY_FR_s}, - {60, 3600}, /* NPY_FR_h */ - {NPY_FR_m, NPY_FR_s}, - {60, 60000}, /* NPY_FR_m */ - {NPY_FR_s, NPY_FR_ms}, - {1000, 1000000}, /* >=NPY_FR_s */ - {0, 0} -}; - - -/* Translate divisors into multiples of smaller units */ -static int -_convert_divisor_to_multiple(PyArray_DatetimeMetaData *meta) -{ - int i, num, ind; - NPY_DATETIMEUNIT *totry; - NPY_DATETIMEUNIT *baseunit; - int q, r; - - ind = ((int)meta->base - (int)NPY_FR_Y)*2; - totry = _multiples_table[ind]; - baseunit = _multiples_table[ind + 1]; - - num = 3; - if (meta->base == NPY_FR_W) { - num = 4; - } - else if (meta->base > NPY_FR_D) { - num = 2; - } - if (meta->base >= NPY_FR_s) { - ind = ((int)NPY_FR_s - (int)NPY_FR_Y)*2; - totry = _multiples_table[ind]; - baseunit = _multiples_table[ind + 1]; - baseunit[0] = meta->base + 1; - baseunit[1] = meta->base + 2; - if (meta->base == NPY_DATETIME_NUMUNITS - 2) { - num = 1; - } - if (meta->base == NPY_DATETIME_NUMUNITS - 1) { - num = 0; - } - } - - for (i = 0; i < num; i++) { - q = totry[i] / meta->den; - r = totry[i] % meta->den; - if (r == 0) { - break; - } - } - if (i == num) { - PyErr_Format(PyExc_ValueError, - "divisor (%d) is not a multiple of a lower-unit", meta->den); - return -1; - } - meta->base = baseunit[i]; - meta->den = 1; - meta->num *= q; - - return 0; -} - - -static PyObject * -_get_datetime_tuple_from_cobj(PyObject *cobj) -{ - PyArray_DatetimeMetaData *dt_data; - PyObject *dt_tuple; - - dt_data = NpyCapsule_AsVoidPtr(cobj); - dt_tuple = PyTuple_New(4); - - PyTuple_SET_ITEM(dt_tuple, 0, - PyBytes_FromString(_datetime_strings[dt_data->base])); - PyTuple_SET_ITEM(dt_tuple, 1, - PyInt_FromLong(dt_data->num)); - PyTuple_SET_ITEM(dt_tuple, 2, - PyInt_FromLong(dt_data->den)); - PyTuple_SET_ITEM(dt_tuple, 3, - PyInt_FromLong(dt_data->events)); - - return dt_tuple; -} - -static PyObject * -_convert_datetime_tuple_to_cobj(PyObject *tuple) -{ - PyArray_DatetimeMetaData *dt_data; - PyObject *ret; - - dt_data = _pya_malloc(sizeof(PyArray_DatetimeMetaData)); - dt_data->base = _unit_from_str( - PyBytes_AsString(PyTuple_GET_ITEM(tuple, 0))); - - /* Assumes other objects are Python integers */ - dt_data->num = PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 1)); - dt_data->den = PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 2)); - dt_data->events = PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3)); - - if (dt_data->den > 1) { - if (_convert_divisor_to_multiple(dt_data) < 0) { - return NULL; - } - } - -/* FIXME - * There is no error handling here. - */ - ret = NpyCapsule_FromVoidPtr((void *)dt_data, simple_capsule_dtor); - return ret; -} - -static PyArray_Descr * -_convert_from_datetime_tuple(PyObject *obj) -{ - PyArray_Descr *new; - PyObject *dt_tuple; - PyObject *dt_cobj; - PyObject *datetime_flag; - - if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj)!=2) { - PyErr_SetString(PyExc_RuntimeError, - "_datetimestring is not returning a tuple with length 2"); - return NULL; - } - - dt_tuple = PyTuple_GET_ITEM(obj, 0); - datetime_flag = PyTuple_GET_ITEM(obj, 1); - if (!PyTuple_Check(dt_tuple) - || PyTuple_GET_SIZE(dt_tuple) != 4 - || !PyInt_Check(datetime_flag)) { - PyErr_SetString(PyExc_RuntimeError, - "_datetimestring is not returning a length 4 tuple"\ - " and an integer"); - return NULL; - } - - /* Create new timedelta or datetime dtype */ - if (PyObject_IsTrue(datetime_flag)) { - new = PyArray_DescrNewFromType(PyArray_DATETIME); - } - else { - new = PyArray_DescrNewFromType(PyArray_TIMEDELTA); - } - - if (new == NULL) { - return NULL; - } - /* - * Remove any reference to old metadata dictionary - * And create a new one for this new dtype - */ - Py_XDECREF(new->metadata); - if ((new->metadata = PyDict_New()) == NULL) { - return NULL; - } - dt_cobj = _convert_datetime_tuple_to_cobj(dt_tuple); - if (dt_cobj == NULL) { - /* Failure in conversion */ - Py_DECREF(new); - return NULL; - } - - /* Assume this sets a new reference to dt_cobj */ - PyDict_SetItemString(new->metadata, NPY_METADATA_DTSTR, dt_cobj); - Py_DECREF(dt_cobj); - return new; -} - - -static PyArray_Descr * -_convert_from_datetime(PyObject *obj) -{ - PyObject *tupleobj; - PyArray_Descr *res; - PyObject *_numpy_internal; - - if (!PyBytes_Check(obj)) { - return NULL; - } - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - tupleobj = PyObject_CallMethod(_numpy_internal, - "_datetimestring", "O", obj); - Py_DECREF(_numpy_internal); - if (!tupleobj) { - return NULL; - } - /* - * tuple of a standard tuple (baseunit, num, den, events) and a timedelta - * boolean - */ - res = _convert_from_datetime_tuple(tupleobj); - Py_DECREF(tupleobj); - if (!res && !PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "invalid data-type"); - return NULL; - } - return res; -} - - -/* - * comma-separated string - * this is the format developed by the numarray records module and implemented - * by the format parser in that module this is an alternative implementation - * found in the _internal.py file patterned after that one -- the approach is - * to try to convert to a list (with tuples if any repeat information is - * present) and then call the _convert_from_list) - */ -static PyArray_Descr * -_convert_from_commastring(PyObject *obj, int align) -{ - PyObject *listobj; - PyArray_Descr *res; - PyObject *_numpy_internal; - - if (!PyBytes_Check(obj)) { - return NULL; - } - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - listobj = PyObject_CallMethod(_numpy_internal, "_commastring", "O", obj); - Py_DECREF(_numpy_internal); - if (!listobj) { - return NULL; - } - if (!PyList_Check(listobj) || PyList_GET_SIZE(listobj) < 1) { - PyErr_SetString(PyExc_RuntimeError, - "_commastring is not returning a list with len >= 1"); - return NULL; - } - if (PyList_GET_SIZE(listobj) == 1) { - if (PyArray_DescrConverter( - PyList_GET_ITEM(listobj, 0), &res) == NPY_FAIL) { - res = NULL; - } - } - else { - res = _convert_from_list(listobj, align); - } - Py_DECREF(listobj); - if (!res && !PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "invalid data-type"); - return NULL; - } - return res; -} - -static int -_is_tuple_of_integers(PyObject *obj) -{ - int i; - - if (!PyTuple_Check(obj)) { - return 0; - } - for (i = 0; i < PyTuple_GET_SIZE(obj); i++) { - if (!PyArray_IsIntegerScalar(PyTuple_GET_ITEM(obj, i))) { - return 0; - } - } - return 1; -} - -/* - * A tuple type would be either (generic typeobject, typesize) - * or (fixed-length data-type, shape) - * - * or (inheriting data-type, new-data-type) - * The new data-type must have the same itemsize as the inheriting data-type - * unless the latter is 0 - * - * Thus (int32, {'real':(int16,0),'imag',(int16,2)}) - * - * is one way to specify a descriptor that will give - * a['real'] and a['imag'] to an int32 array. - * - * leave type reference alone - */ -static PyArray_Descr * -_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag) -{ - PyArray_Descr *new; - PyArray_Descr *conv; - - *errflag = 0; - if (PyArray_IsScalar(newobj, Integer) - || _is_tuple_of_integers(newobj) - || !PyArray_DescrConverter(newobj, &conv)) { - return NULL; - } - *errflag = 1; - new = PyArray_DescrNew(type); - if (new == NULL) { - goto fail; - } - if (new->elsize && new->elsize != conv->elsize) { - PyErr_SetString(PyExc_ValueError, - "mismatch in size of old and new data-descriptor"); - goto fail; - } - new->elsize = conv->elsize; - if (conv->names) { - new->fields = conv->fields; - Py_XINCREF(new->fields); - new->names = conv->names; - Py_XINCREF(new->names); - } - new->flags = conv->flags; - Py_DECREF(conv); - *errflag = 0; - return new; - - fail: - Py_DECREF(conv); - return NULL; -} - -/* - * a dictionary specifying a data-type - * must have at least two and up to four - * keys These must all be sequences of the same length. - * - * can also have an additional key called "metadata" which can be any dictionary - * - * "names" --- field names - * "formats" --- the data-type descriptors for the field. - * - * Optional: - * - * "offsets" --- integers indicating the offset into the - * record of the start of the field. - * if not given, then "consecutive offsets" - * will be assumed and placed in the dictionary. - * - * "titles" --- Allows the use of an additional key - * for the fields dictionary.(if these are strings - * or unicode objects) or - * this can also be meta-data to - * be passed around with the field description. - * - * Attribute-lookup-based field names merely has to query the fields - * dictionary of the data-descriptor. Any result present can be used - * to return the correct field. - * - * So, the notion of what is a name and what is a title is really quite - * arbitrary. - * - * What does distinguish a title, however, is that if it is not None, - * it will be placed at the end of the tuple inserted into the - * fields dictionary.and can therefore be used to carry meta-data around. - * - * If the dictionary does not have "names" and "formats" entries, - * then it will be checked for conformity and used directly. - */ -static PyArray_Descr * -_use_fields_dict(PyObject *obj, int align) -{ - PyObject *_numpy_internal; - PyArray_Descr *res; - - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, - "_usefields", "Oi", obj, align); - Py_DECREF(_numpy_internal); - return res; -} - -static PyArray_Descr * -_convert_from_dict(PyObject *obj, int align) -{ - PyArray_Descr *new; - PyObject *fields = NULL; - PyObject *names, *offsets, *descrs, *titles; - PyObject *metadata; - int n, i; - int totalsize; - int maxalign = 0; - char dtypeflags = 0; - int has_out_of_order_fields = 0; - - fields = PyDict_New(); - if (fields == NULL) { - return (PyArray_Descr *)PyErr_NoMemory(); - } - names = PyDict_GetItemString(obj, "names"); - descrs = PyDict_GetItemString(obj, "formats"); - if (!names || !descrs) { - Py_DECREF(fields); - return _use_fields_dict(obj, align); - } - n = PyObject_Length(names); - offsets = PyDict_GetItemString(obj, "offsets"); - titles = PyDict_GetItemString(obj, "titles"); - if ((n > PyObject_Length(descrs)) - || (offsets && (n > PyObject_Length(offsets))) - || (titles && (n > PyObject_Length(titles)))) { - PyErr_SetString(PyExc_ValueError, - "all items in the dictionary must have the same length."); - goto fail; - } - - totalsize = 0; - for (i = 0; i < n; i++) { - PyObject *tup, *descr, *index, *item, *name, *off; - int len, ret, _align = 1; - PyArray_Descr *newdescr; - - /* Build item to insert (descr, offset, [title])*/ - len = 2; - item = NULL; - index = PyInt_FromLong(i); - if (titles) { - item=PyObject_GetItem(titles, index); - if (item && item != Py_None) { - len = 3; - } - else { - Py_XDECREF(item); - } - PyErr_Clear(); - } - tup = PyTuple_New(len); - descr = PyObject_GetItem(descrs, index); - if (!descr) { - goto fail; - } - ret = PyArray_DescrConverter(descr, &newdescr); - Py_DECREF(descr); - if (ret == PY_FAIL) { - Py_DECREF(tup); - Py_DECREF(index); - goto fail; - } - PyTuple_SET_ITEM(tup, 0, (PyObject *)newdescr); - if (align) { - _align = newdescr->alignment; - maxalign = MAX(maxalign,_align); - } - if (offsets) { - long offset; - off = PyObject_GetItem(offsets, index); - if (!off) { - goto fail; - } - offset = PyInt_AsLong(off); - PyTuple_SET_ITEM(tup, 1, off); - if (offset < totalsize) { - PyErr_SetString(PyExc_ValueError, - "invalid offset (must be ordered)"); - ret = PY_FAIL; - } - if (offset > totalsize) { - totalsize = offset; - } - } - else { - if (align && _align > 1) { - totalsize = ((totalsize + _align - 1)/_align)*_align; - } - PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize)); - } - if (len == 3) { - PyTuple_SET_ITEM(tup, 2, item); - } - name = PyObject_GetItem(names, index); - if (!name) { - goto fail; - } - Py_DECREF(index); -#if defined(NPY_PY3K) - if (!PyUString_Check(name)) { -#else - if (!(PyUString_Check(name) || PyUnicode_Check(name))) { -#endif - PyErr_SetString(PyExc_ValueError, - "field names must be strings"); - ret = PY_FAIL; - } - - /* Insert into dictionary */ - if (PyDict_GetItem(fields, name) != NULL) { - PyErr_SetString(PyExc_ValueError, - "name already used as a name or title"); - ret = PY_FAIL; - } - PyDict_SetItem(fields, name, tup); - Py_DECREF(name); - if (len == 3) { -#if defined(NPY_PY3K) - if (PyUString_Check(item)) { -#else - if (PyUString_Check(item) || PyUnicode_Check(item)) { -#endif - if (PyDict_GetItem(fields, item) != NULL) { - PyErr_SetString(PyExc_ValueError, - "title already used as a name or title."); - ret=PY_FAIL; - } - PyDict_SetItem(fields, item, tup); - } - } - Py_DECREF(tup); - if ((ret == PY_FAIL) || (newdescr->elsize == 0)) { - goto fail; - } - dtypeflags |= (newdescr->flags & NPY_FROM_FIELDS); - totalsize += newdescr->elsize; - } - - new = PyArray_DescrNewFromType(PyArray_VOID); - if (new == NULL) { - goto fail; - } - if (maxalign > 1) { - totalsize = ((totalsize + maxalign - 1)/maxalign)*maxalign; - } - if (align) { - new->alignment = maxalign; - } - new->elsize = totalsize; - if (!PyTuple_Check(names)) { - names = PySequence_Tuple(names); - } - else { - Py_INCREF(names); - } - new->names = names; - new->fields = fields; - new->flags = dtypeflags; - - metadata = PyDict_GetItemString(obj, "metadata"); - - if (new->metadata == NULL) { - new->metadata = metadata; - Py_XINCREF(new->metadata); - } - else if (metadata != NULL) { - if (PyDict_Merge(new->metadata, metadata, 0) == -1) { - Py_DECREF(new); - return NULL; - } - } - return new; - - fail: - Py_XDECREF(fields); - return NULL; -} - - -/*NUMPY_API*/ -NPY_NO_EXPORT PyArray_Descr * -PyArray_DescrNewFromType(int type_num) -{ - PyArray_Descr *old; - PyArray_Descr *new; - - old = PyArray_DescrFromType(type_num); - new = PyArray_DescrNew(old); - Py_DECREF(old); - return new; -} - -/*NUMPY_API - * Get typenum from an object -- None goes to NULL - */ -NPY_NO_EXPORT int -PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) -{ - if (obj == Py_None) { - *at = NULL; - return PY_SUCCEED; - } - else { - return PyArray_DescrConverter(obj, at); - } -} - -/*NUMPY_API - * Get typenum from an object -- None goes to PyArray_DEFAULT - * This function takes a Python object representing a type and converts it - * to a the correct PyArray_Descr * structure to describe the type. - * - * Many objects can be used to represent a data-type which in NumPy is - * quite a flexible concept. - * - * This is the central code that converts Python objects to - * Type-descriptor objects that are used throughout numpy. - * new reference in *at - */ -NPY_NO_EXPORT int -PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) -{ - char *type; - int check_num = PyArray_NOTYPE + 10; - int len; - PyObject *item; - int elsize = 0; - char endian = '='; - - *at = NULL; - /* default */ - if (obj == Py_None) { - *at = PyArray_DescrFromType(PyArray_DEFAULT); - return PY_SUCCEED; - } - if (PyArray_DescrCheck(obj)) { - *at = (PyArray_Descr *)obj; - Py_INCREF(*at); - return PY_SUCCEED; - } - - if (PyType_Check(obj)) { - if (PyType_IsSubtype((PyTypeObject *)obj, &PyGenericArrType_Type)) { - *at = PyArray_DescrFromTypeObject(obj); - if (*at) { - return PY_SUCCEED; - } - else { - return PY_FAIL; - } - } - check_num = PyArray_OBJECT; -#if !defined(NPY_PY3K) - if (obj == (PyObject *)(&PyInt_Type)) { - check_num = PyArray_LONG; - } - else if (obj == (PyObject *)(&PyLong_Type)) { - check_num = PyArray_LONGLONG; - } -#else - if (obj == (PyObject *)(&PyLong_Type)) { - check_num = PyArray_LONG; - } -#endif - else if (obj == (PyObject *)(&PyFloat_Type)) { - check_num = PyArray_DOUBLE; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - check_num = PyArray_CDOUBLE; - } - else if (obj == (PyObject *)(&PyBool_Type)) { - check_num = PyArray_BOOL; - } - else if (obj == (PyObject *)(&PyBytes_Type)) { - check_num = PyArray_STRING; - } - else if (obj == (PyObject *)(&PyUnicode_Type)) { - check_num = PyArray_UNICODE; - } -#if defined(NPY_PY3K) - else if (obj == (PyObject *)(&PyMemoryView_Type)) { - check_num = PyArray_VOID; - } -#else - else if (obj == (PyObject *)(&PyBuffer_Type)) { - check_num = PyArray_VOID; - } -#endif - else { - *at = _arraydescr_fromobj(obj); - if (*at) { - return PY_SUCCEED; - } - } - goto finish; - } - - /* or a typecode string */ - - if (PyUnicode_Check(obj)) { - /* Allow unicode format strings: convert to bytes */ - int retval; - PyObject *obj2; - obj2 = PyUnicode_AsASCIIString(obj); - if (obj2 == NULL) { - return PY_FAIL; - } - retval = PyArray_DescrConverter(obj2, at); - Py_DECREF(obj2); - return retval; - } - - if (PyBytes_Check(obj)) { - /* Check for a string typecode. */ - type = PyBytes_AS_STRING(obj); - len = PyBytes_GET_SIZE(obj); - if (len <= 0) { - goto fail; - } - /* check for datetime format */ - if ((len > 1) && _check_for_datetime(type, len)) { - *at = _convert_from_datetime(obj); - if (*at) { - return PY_SUCCEED; - } - return PY_FAIL; - } - /* check for commas present or first (or second) element a digit */ - if (_check_for_commastring(type, len)) { - *at = _convert_from_commastring(obj, 0); - if (*at) { - return PY_SUCCEED; - } - return PY_FAIL; - } - check_num = (int) type[0]; - if ((char) check_num == '>' - || (char) check_num == '<' - || (char) check_num == '|' - || (char) check_num == '=') { - if (len <= 1) { - goto fail; - } - endian = (char) check_num; - type++; len--; - check_num = (int) type[0]; - if (endian == '|') { - endian = '='; - } - } - if (len > 1) { - elsize = atoi(type + 1); - if (elsize == 0) { - check_num = PyArray_NOTYPE+10; - } - /* - * When specifying length of UNICODE - * the number of characters is given to match - * the STRING interface. Each character can be - * more than one byte and itemsize must be - * the number of bytes. - */ - else if (check_num == PyArray_UNICODELTR) { - elsize <<= 2; - } - /* Support for generic processing c4, i4, f8, etc...*/ - else if ((check_num != PyArray_STRINGLTR) - && (check_num != PyArray_VOIDLTR) - && (check_num != PyArray_STRINGLTR2)) { - check_num = PyArray_TypestrConvert(elsize, check_num); - if (check_num == PyArray_NOTYPE) { - check_num += 10; - } - elsize = 0; - } - } - } - else if (PyTuple_Check(obj)) { - /* or a tuple */ - *at = _convert_from_tuple(obj); - if (*at == NULL){ - if (PyErr_Occurred()) { - return PY_FAIL; - } - goto fail; - } - return PY_SUCCEED; - } - else if (PyList_Check(obj)) { - /* or a list */ - *at = _convert_from_array_descr(obj,0); - if (*at == NULL) { - if (PyErr_Occurred()) { - return PY_FAIL; - } - goto fail; - } - return PY_SUCCEED; - } - else if (PyDict_Check(obj)) { - /* or a dictionary */ - *at = _convert_from_dict(obj,0); - if (*at == NULL) { - if (PyErr_Occurred()) { - return PY_FAIL; - } - goto fail; - } - return PY_SUCCEED; - } - else if (PyArray_Check(obj)) { - goto fail; - } - else { - *at = _arraydescr_fromobj(obj); - if (*at) { - return PY_SUCCEED; - } - if (PyErr_Occurred()) { - return PY_FAIL; - } - goto fail; - } - if (PyErr_Occurred()) { - goto fail; - } - /* if (check_num == PyArray_NOTYPE) { - return PY_FAIL; - } - */ - - finish: - if ((check_num == PyArray_NOTYPE + 10) - || (*at = PyArray_DescrFromType(check_num)) == NULL) { - PyErr_Clear(); - /* Now check to see if the object is registered in typeDict */ - if (typeDict != NULL) { - item = PyDict_GetItem(typeDict, obj); -#if defined(NPY_PY3K) - if (!item && PyBytes_Check(obj)) { - PyObject *tmp; - tmp = PyUnicode_FromEncodedObject(obj, "ascii", "strict"); - if (tmp != NULL) { - item = PyDict_GetItem(typeDict, tmp); - Py_DECREF(tmp); - } - } -#endif - if (item) { - return PyArray_DescrConverter(item, at); - } - } - goto fail; - } - - if (((*at)->elsize == 0) && (elsize != 0)) { - PyArray_DESCR_REPLACE(*at); - (*at)->elsize = elsize; - } - if (endian != '=' && PyArray_ISNBO(endian)) { - endian = '='; - } - if (endian != '=' && (*at)->byteorder != '|' - && (*at)->byteorder != endian) { - PyArray_DESCR_REPLACE(*at); - (*at)->byteorder = endian; - } - return PY_SUCCEED; - - fail: - PyErr_SetString(PyExc_TypeError, "data type not understood"); - *at = NULL; - return PY_FAIL; -} - -/** Array Descr Objects for dynamic types **/ - -/* - * There are some statically-defined PyArray_Descr objects corresponding - * to the basic built-in types. - * These can and should be DECREF'd and INCREF'd as appropriate, anyway. - * If a mistake is made in reference counting, deallocation on these - * builtins will be attempted leading to problems. - * - * This let's us deal with all PyArray_Descr objects using reference - * counting (regardless of whether they are statically or dynamically - * allocated). - */ - -/*NUMPY_API - * base cannot be NULL - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_DescrNew(PyArray_Descr *base) -{ - PyArray_Descr *new = PyObject_New(PyArray_Descr, &PyArrayDescr_Type); - - if (new == NULL) { - return NULL; - } - /* Don't copy PyObject_HEAD part */ - memcpy((char *)new + sizeof(PyObject), - (char *)base + sizeof(PyObject), - sizeof(PyArray_Descr) - sizeof(PyObject)); - - if (new->fields == Py_None) { - new->fields = NULL; - } - Py_XINCREF(new->fields); - Py_XINCREF(new->names); - if (new->subarray) { - new->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr)); - memcpy(new->subarray, base->subarray, sizeof(PyArray_ArrayDescr)); - Py_INCREF(new->subarray->shape); - Py_INCREF(new->subarray->base); - } - Py_XINCREF(new->typeobj); - Py_XINCREF(new->metadata); - - return new; -} - -/* - * should never be called for builtin-types unless - * there is a reference-count problem - */ -static void -arraydescr_dealloc(PyArray_Descr *self) -{ - if (self->fields == Py_None) { - fprintf(stderr, "*** Reference count error detected: \n" \ - "an attempt was made to deallocate %d (%c) ***\n", - self->type_num, self->type); - Py_INCREF(self); - Py_INCREF(self); - return; - } - Py_XDECREF(self->typeobj); - Py_XDECREF(self->names); - Py_XDECREF(self->fields); - if (self->subarray) { - Py_XDECREF(self->subarray->shape); - Py_DECREF(self->subarray->base); - _pya_free(self->subarray); - } - Py_XDECREF(self->metadata); - Py_TYPE(self)->tp_free((PyObject *)self); -} - -/* - * we need to be careful about setting attributes because these - * objects are pointed to by arrays that depend on them for interpreting - * data. Currently no attributes of data-type objects can be set - * directly except names. - */ -static PyMemberDef arraydescr_members[] = { - {"type", - T_OBJECT, offsetof(PyArray_Descr, typeobj), READONLY, NULL}, - {"kind", - T_CHAR, offsetof(PyArray_Descr, kind), READONLY, NULL}, - {"char", - T_CHAR, offsetof(PyArray_Descr, type), READONLY, NULL}, - {"num", - T_INT, offsetof(PyArray_Descr, type_num), READONLY, NULL}, - {"byteorder", - T_CHAR, offsetof(PyArray_Descr, byteorder), READONLY, NULL}, - {"itemsize", - T_INT, offsetof(PyArray_Descr, elsize), READONLY, NULL}, - {"alignment", - T_INT, offsetof(PyArray_Descr, alignment), READONLY, NULL}, - {"flags", - T_BYTE, offsetof(PyArray_Descr, flags), READONLY, NULL}, - {NULL, 0, 0, 0, NULL}, -}; - -static PyObject * -arraydescr_subdescr_get(PyArray_Descr *self) -{ - if (self->subarray == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - return Py_BuildValue("OO", - (PyObject *)self->subarray->base, self->subarray->shape); -} - -static PyObject * -_append_to_datetime_typestr(PyArray_Descr *self, PyObject *ret) -{ - PyObject *tmp; - PyObject *res; - int num, den, events; - char *basestr; - PyArray_DatetimeMetaData *dt_data; - - /* This shouldn't happen */ - if (self->metadata == NULL) { - return ret; - } - tmp = PyDict_GetItemString(self->metadata, NPY_METADATA_DTSTR); - dt_data = NpyCapsule_AsVoidPtr(tmp); - num = dt_data->num; - den = dt_data->den; - events = dt_data->events; - basestr = _datetime_strings[dt_data->base]; - - if (num == 1) { - tmp = PyUString_FromString(basestr); - } - else { - tmp = PyUString_FromFormat("%d%s", num, basestr); - } - if (den != 1) { - res = PyUString_FromFormat("/%d", den); - PyUString_ConcatAndDel(&tmp, res); - } - - res = PyUString_FromString("["); - PyUString_ConcatAndDel(&res, tmp); - PyUString_ConcatAndDel(&res, PyUString_FromString("]")); - if (events != 1) { - tmp = PyUString_FromFormat("//%d", events); - PyUString_ConcatAndDel(&res, tmp); - } - PyUString_ConcatAndDel(&ret, res); - return ret; -} - -NPY_NO_EXPORT PyObject * -arraydescr_protocol_typestr_get(PyArray_Descr *self) -{ - char basic_ = self->kind; - char endian = self->byteorder; - int size = self->elsize; - PyObject *ret; - - if (endian == '=') { - endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) { - endian = '>'; - } - } - if (self->type_num == PyArray_UNICODE) { - size >>= 2; - } - - ret = PyUString_FromFormat("%c%c%d", endian, basic_, size); - if (PyDataType_ISDATETIME(self)) { - ret = _append_to_datetime_typestr(self, ret); - } - - return ret; -} - -static PyObject * -arraydescr_typename_get(PyArray_Descr *self) -{ - int len; - PyTypeObject *typeobj = self->typeobj; - PyObject *res; - char *s; - /* fixme: not reentrant */ - static int prefix_len = 0; - - if (PyTypeNum_ISUSERDEF(self->type_num)) { - s = strrchr(typeobj->tp_name, '.'); - if (s == NULL) { - res = PyUString_FromString(typeobj->tp_name); - } - else { - res = PyUString_FromStringAndSize(s + 1, strlen(s) - 1); - } - return res; - } - else { - if (prefix_len == 0) { - prefix_len = strlen("numpy."); - } - len = strlen(typeobj->tp_name); - if (*(typeobj->tp_name + (len-1)) == '_') { - len -= 1; - } - len -= prefix_len; - res = PyUString_FromStringAndSize(typeobj->tp_name+prefix_len, len); - } - if (PyTypeNum_ISFLEXIBLE(self->type_num) && self->elsize != 0) { - PyObject *p; - p = PyUString_FromFormat("%d", self->elsize * 8); - PyUString_ConcatAndDel(&res, p); - } - if (PyDataType_ISDATETIME(self)) { - res = _append_to_datetime_typestr(self, res); - } - - return res; -} - -static PyObject * -arraydescr_base_get(PyArray_Descr *self) -{ - if (self->subarray == NULL) { - Py_INCREF(self); - return (PyObject *)self; - } - Py_INCREF(self->subarray->base); - return (PyObject *)(self->subarray->base); -} - -static PyObject * -arraydescr_shape_get(PyArray_Descr *self) -{ - if (self->subarray == NULL) { - return PyTuple_New(0); - } - /*TODO - * self->subarray->shape should always be a tuple, - * so this check should be unnecessary - */ - if (PyTuple_Check(self->subarray->shape)) { - Py_INCREF(self->subarray->shape); - return (PyObject *)(self->subarray->shape); - } - return Py_BuildValue("(O)", self->subarray->shape); -} - -NPY_NO_EXPORT PyObject * -arraydescr_protocol_descr_get(PyArray_Descr *self) -{ - PyObject *dobj, *res; - PyObject *_numpy_internal; - - if (self->names == NULL) { - /* get default */ - dobj = PyTuple_New(2); - if (dobj == NULL) { - return NULL; - } - PyTuple_SET_ITEM(dobj, 0, PyUString_FromString("")); - PyTuple_SET_ITEM(dobj, 1, arraydescr_protocol_typestr_get(self)); - res = PyList_New(1); - if (res == NULL) { - Py_DECREF(dobj); - return NULL; - } - PyList_SET_ITEM(res, 0, dobj); - return res; - } - - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - res = PyObject_CallMethod(_numpy_internal, "_array_descr", "O", self); - Py_DECREF(_numpy_internal); - return res; -} - -/* - * returns 1 for a builtin type - * and 2 for a user-defined data-type descriptor - * return 0 if neither (i.e. it's a copy of one) - */ -static PyObject * -arraydescr_isbuiltin_get(PyArray_Descr *self) -{ - long val; - val = 0; - if (self->fields == Py_None) { - val = 1; - } - if (PyTypeNum_ISUSERDEF(self->type_num)) { - val = 2; - } - return PyInt_FromLong(val); -} - -static int -_arraydescr_isnative(PyArray_Descr *self) -{ - if (self->names == NULL) { - return PyArray_ISNBO(self->byteorder); - } - else { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - while (PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - return -1; - } - if (!_arraydescr_isnative(new)) { - return 0; - } - } - } - return 1; -} - -/* - * return Py_True if this data-type descriptor - * has native byteorder if no fields are defined - * - * or if all sub-fields have native-byteorder if - * fields are defined - */ -static PyObject * -arraydescr_isnative_get(PyArray_Descr *self) -{ - PyObject *ret; - int retval; - retval = _arraydescr_isnative(self); - if (retval == -1) { - return NULL; - } - ret = retval ? Py_True : Py_False; - Py_INCREF(ret); - return ret; -} - -static PyObject * -arraydescr_fields_get(PyArray_Descr *self) -{ - if (self->names == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - return PyDictProxy_New(self->fields); -} - -static PyObject * -arraydescr_metadata_get(PyArray_Descr *self) -{ - if (self->metadata == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - return PyDictProxy_New(self->metadata); -} - -static PyObject * -arraydescr_hasobject_get(PyArray_Descr *self) -{ - if (PyDataType_FLAGCHK(self, NPY_ITEM_HASOBJECT)) { - Py_RETURN_TRUE; - } - else { - Py_RETURN_FALSE; - } -} - -static PyObject * -arraydescr_names_get(PyArray_Descr *self) -{ - if (self->names == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - Py_INCREF(self->names); - return self->names; -} - -static int -arraydescr_names_set(PyArray_Descr *self, PyObject *val) -{ - int N = 0; - int i; - PyObject *new_names; - PyObject *new_fields; - - if (val == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete dtype names attribute"); - return -1; - } - if (!PyDataType_HASFIELDS(self)) { - PyErr_SetString(PyExc_ValueError, - "there are no fields defined"); - return -1; - } - - N = PyTuple_GET_SIZE(self->names); - if (!PySequence_Check(val) || PyObject_Size((PyObject *)val) != N) { - PyErr_Format(PyExc_ValueError, - "must replace all names at once with a sequence of length %d", - N); - return -1; - } - /* Make sure all entries are strings */ - for (i = 0; i < N; i++) { - PyObject *item; - int valid = 1; - item = PySequence_GetItem(val, i); - valid = PyUString_Check(item); - Py_DECREF(item); - if (!valid) { - PyErr_Format(PyExc_ValueError, - "item #%d of names is of type %s and not string", - i, Py_TYPE(item)->tp_name); - return -1; - } - } - /* Update dictionary keys in fields */ - new_names = PySequence_Tuple(val); - new_fields = PyDict_New(); - for (i = 0; i < N; i++) { - PyObject *key; - PyObject *item; - PyObject *new_key; - int ret; - key = PyTuple_GET_ITEM(self->names, i); - /* Borrowed references to item and new_key */ - item = PyDict_GetItem(self->fields, key); - new_key = PyTuple_GET_ITEM(new_names, i); - /* Check for duplicates */ - ret = PyDict_Contains(new_fields, new_key); - if (ret != 0) { - if (ret < 0) { - PyErr_Clear(); - } - PyErr_SetString(PyExc_ValueError, "Duplicate field names given."); - Py_DECREF(new_names); - Py_DECREF(new_fields); - return -1; - } - PyDict_SetItem(new_fields, new_key, item); - } - - /* Replace names */ - Py_DECREF(self->names); - self->names = new_names; - - /* Replace fields */ - Py_DECREF(self->fields); - self->fields = new_fields; - - return 0; -} - -static PyGetSetDef arraydescr_getsets[] = { - {"subdtype", - (getter)arraydescr_subdescr_get, - NULL, NULL, NULL}, - {"descr", - (getter)arraydescr_protocol_descr_get, - NULL, NULL, NULL}, - {"str", - (getter)arraydescr_protocol_typestr_get, - NULL, NULL, NULL}, - {"name", - (getter)arraydescr_typename_get, - NULL, NULL, NULL}, - {"base", - (getter)arraydescr_base_get, - NULL, NULL, NULL}, - {"shape", - (getter)arraydescr_shape_get, - NULL, NULL, NULL}, - {"isbuiltin", - (getter)arraydescr_isbuiltin_get, - NULL, NULL, NULL}, - {"isnative", - (getter)arraydescr_isnative_get, - NULL, NULL, NULL}, - {"fields", - (getter)arraydescr_fields_get, - NULL, NULL, NULL}, - {"metadata", - (getter)arraydescr_metadata_get, - NULL, NULL, NULL}, - {"names", - (getter)arraydescr_names_get, - (setter)arraydescr_names_set, - NULL, NULL}, - {"hasobject", - (getter)arraydescr_hasobject_get, - NULL, NULL, NULL}, - {NULL, NULL, NULL, NULL, NULL}, -}; - -static int -_invalid_metadata_check(PyObject *metadata) -{ - PyObject *res; - - /* borrowed reference */ - res = PyDict_GetItemString(metadata, NPY_METADATA_DTSTR); - if (res == NULL) { - return 0; - } - else { - PyErr_SetString(PyExc_ValueError, - "cannot set " NPY_METADATA_DTSTR "in dtype metadata"); - return 1; - } -} - -static PyObject * -arraydescr_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds) -{ - PyObject *odescr, *ometadata=NULL; - PyArray_Descr *descr, *conv; - Bool align = FALSE; - Bool copy = FALSE; - Bool copied = FALSE; - static char *kwlist[] = {"dtype", "align", "copy", "metadata", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O!", kwlist, - &odescr, PyArray_BoolConverter, &align, - PyArray_BoolConverter, ©, - &PyDict_Type, &ometadata)) { - return NULL; - } - - if ((ometadata != NULL) && (_invalid_metadata_check(ometadata))) { - return NULL; - } - if (align) { - if (!PyArray_DescrAlignConverter(odescr, &conv)) { - return NULL; - } - } - else if (!PyArray_DescrConverter(odescr, &conv)) { - return NULL; - } - /* Get a new copy of it unless it's already a copy */ - if (copy && conv->fields == Py_None) { - descr = PyArray_DescrNew(conv); - Py_DECREF(conv); - conv = descr; - copied = TRUE; - } - - if ((ometadata != NULL)) { - /* - * We need to be sure to make a new copy of the data-type and any - * underlying dictionary - */ - if (!copied) { - descr = PyArray_DescrNew(conv); - Py_DECREF(conv); - conv = descr; - } - if ((conv->metadata != NULL)) { - /* - * Make a copy of the metadata before merging with ometadata - * so that this data-type descriptor has it's own copy - */ - /* Save a reference */ - odescr = conv->metadata; - conv->metadata = PyDict_Copy(odescr); - /* Decrement the old reference */ - Py_DECREF(odescr); - - /* - * Update conv->metadata with anything new in metadata - * keyword, but do not over-write anything already there - */ - if (PyDict_Merge(conv->metadata, ometadata, 0) != 0) { - Py_DECREF(conv); - return NULL; - } - } - else { - /* Make a copy of the input dictionary */ - conv->metadata = PyDict_Copy(ometadata); - } - } - - return (PyObject *)conv; -} - -/* - * Return a tuple of - * (cleaned metadata dictionary, tuple with (str, num, events)) - */ -static PyObject * -_get_pickleabletype_from_metadata(PyObject *metadata) -{ - PyObject *newdict; - PyObject *newtup, *dt_tuple; - PyObject *cobj; - - newdict = PyDict_Copy(metadata); - PyDict_DelItemString(newdict, NPY_METADATA_DTSTR); - newtup = PyTuple_New(2); - PyTuple_SET_ITEM(newtup, 0, newdict); - - cobj = PyDict_GetItemString(metadata, NPY_METADATA_DTSTR); - dt_tuple = _get_datetime_tuple_from_cobj(cobj); - - PyTuple_SET_ITEM(newtup, 1, dt_tuple); - - return newtup; -} - - -/* return a tuple of (callable object, args, state). */ -static PyObject * -arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) -{ - /* - * version number of this pickle type. Increment if we need to - * change the format. Be sure to handle the old versions in - * arraydescr_setstate. - */ - const int version = 4; - PyObject *ret, *mod, *obj; - PyObject *state; - char endian; - int elsize, alignment; - - ret = PyTuple_New(3); - if (ret == NULL) { - return NULL; - } - mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) { - Py_DECREF(ret); - return NULL; - } - obj = PyObject_GetAttrString(mod, "dtype"); - Py_DECREF(mod); - if (obj == NULL) { - Py_DECREF(ret); - return NULL; - } - PyTuple_SET_ITEM(ret, 0, obj); - if (PyTypeNum_ISUSERDEF(self->type_num) - || ((self->type_num == PyArray_VOID - && self->typeobj != &PyVoidArrType_Type))) { - obj = (PyObject *)self->typeobj; - Py_INCREF(obj); - } - else { - elsize = self->elsize; - if (self->type_num == PyArray_UNICODE) { - elsize >>= 2; - } - obj = PyUString_FromFormat("%c%d",self->kind, elsize); - } - PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(Nii)", obj, 0, 1)); - - /* - * Now return the state which is at least byteorder, - * subarray, and fields - */ - endian = self->byteorder; - if (endian == '=') { - endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) { - endian = '>'; - } - } - if (self->metadata) { - state = PyTuple_New(9); - PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); - if (PyDataType_ISDATETIME(self)) { - PyObject *newobj; - /* Handle CObject in NPY_METADATA_DTSTR key separately */ - /* - * newobj is a tuple of cleaned metadata dictionary - * and tuple of date_time info (str, num, den, events) - */ - newobj = _get_pickleabletype_from_metadata(self->metadata); - PyTuple_SET_ITEM(state, 8, newobj); - } - else { - Py_INCREF(self->metadata); - PyTuple_SET_ITEM(state, 8, self->metadata); - } - } - else { /* Use version 3 pickle format */ - state = PyTuple_New(8); - PyTuple_SET_ITEM(state, 0, PyInt_FromLong(3)); - } - - PyTuple_SET_ITEM(state, 1, PyUString_FromFormat("%c", endian)); - PyTuple_SET_ITEM(state, 2, arraydescr_subdescr_get(self)); - if (self->names) { - Py_INCREF(self->names); - Py_INCREF(self->fields); - PyTuple_SET_ITEM(state, 3, self->names); - PyTuple_SET_ITEM(state, 4, self->fields); - } - else { - PyTuple_SET_ITEM(state, 3, Py_None); - PyTuple_SET_ITEM(state, 4, Py_None); - Py_INCREF(Py_None); - Py_INCREF(Py_None); - } - - /* for extended types it also includes elsize and alignment */ - if (PyTypeNum_ISEXTENDED(self->type_num)) { - elsize = self->elsize; - alignment = self->alignment; - } - else { - elsize = -1; - alignment = -1; - } - PyTuple_SET_ITEM(state, 5, PyInt_FromLong(elsize)); - PyTuple_SET_ITEM(state, 6, PyInt_FromLong(alignment)); - PyTuple_SET_ITEM(state, 7, PyInt_FromLong(self->flags)); - - PyTuple_SET_ITEM(ret, 2, state); - return ret; -} - -/* - * returns NPY_OBJECT_DTYPE_FLAGS if this data-type has an object portion used - * when setting the state because hasobject is not stored. - */ -static char -_descr_find_object(PyArray_Descr *self) -{ - if (self->flags - || self->type_num == PyArray_OBJECT - || self->kind == 'O') { - return NPY_OBJECT_DTYPE_FLAGS; - } - if (PyDescr_HASFIELDS(self)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - - while (PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - PyErr_Clear(); - return 0; - } - if (_descr_find_object(new)) { - new->flags = NPY_OBJECT_DTYPE_FLAGS; - return NPY_OBJECT_DTYPE_FLAGS; - } - } - } - return 0; -} - -/* - * state is at least byteorder, subarray, and fields but could include elsize - * and alignment for EXTENDED arrays - */ -static PyObject * -arraydescr_setstate(PyArray_Descr *self, PyObject *args) -{ - int elsize = -1, alignment = -1; - int version = 4; -#if defined(NPY_PY3K) - int endian; -#else - char endian; -#endif - PyObject *subarray, *fields, *names = NULL, *metadata=NULL; - int incref_names = 1; - int int_dtypeflags = 0; - char dtypeflags; - - if (self->fields == Py_None) { - Py_INCREF(Py_None); - return Py_None; - } - if (PyTuple_GET_SIZE(args) != 1 - || !(PyTuple_Check(PyTuple_GET_ITEM(args, 0)))) { - PyErr_BadInternalCall(); - return NULL; - } - switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { - case 9: -#if defined(NPY_PY3K) -#define _ARGSTR_ "(iCOOOiiiO)" -#else -#define _ARGSTR_ "(icOOOiiiO)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags, &metadata)) { - return NULL; -#undef _ARGSTR_ - } - break; - case 8: -#if defined(NPY_PY3K) -#define _ARGSTR_ "(iCOOOiii)" -#else -#define _ARGSTR_ "(icOOOiii)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags)) { - return NULL; -#undef _ARGSTR_ - } - break; - case 7: -#if defined(NPY_PY3K) -#define _ARGSTR_ "(iCOOOii)" -#else -#define _ARGSTR_ "(icOOOii)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment)) { - return NULL; -#undef _ARGSTR_ - } - break; - case 6: -#if defined(NPY_PY3K) -#define _ARGSTR_ "(iCOOii)" -#else -#define _ARGSTR_ "(icOOii)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, &version, - &endian, &subarray, &fields, - &elsize, &alignment)) { - PyErr_Clear(); -#undef _ARGSTR_ - } - break; - case 5: - version = 0; -#if defined(NPY_PY3K) -#define _ARGSTR_ "(COOii)" -#else -#define _ARGSTR_ "(cOOii)" -#endif - if (!PyArg_ParseTuple(args, _ARGSTR_, - &endian, &subarray, &fields, &elsize, - &alignment)) { -#undef _ARGSTR_ - return NULL; - } - break; - default: - /* raise an error */ - if (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0)) > 5) { - version = PyInt_AsLong(PyTuple_GET_ITEM(args, 0)); - } - else { - version = -1; - } - } - - /* - * If we ever need another pickle format, increment the version - * number. But we should still be able to handle the old versions. - */ - if (version < 0 || version > 4) { - PyErr_Format(PyExc_ValueError, - "can't handle version %d of numpy.dtype pickle", - version); - return NULL; - } - - if (version == 1 || version == 0) { - if (fields != Py_None) { - PyObject *key, *list; - key = PyInt_FromLong(-1); - list = PyDict_GetItem(fields, key); - if (!list) { - return NULL; - } - Py_INCREF(list); - names = list; - PyDict_DelItem(fields, key); - incref_names = 0; - } - else { - names = Py_None; - } - } - - - if ((fields == Py_None && names != Py_None) || - (names == Py_None && fields != Py_None)) { - PyErr_Format(PyExc_ValueError, - "inconsistent fields and names"); - return NULL; - } - - if (endian != '|' && PyArray_IsNativeByteOrder(endian)) { - endian = '='; - } - self->byteorder = endian; - if (self->subarray) { - Py_XDECREF(self->subarray->base); - Py_XDECREF(self->subarray->shape); - _pya_free(self->subarray); - } - self->subarray = NULL; - - if (subarray != Py_None) { - PyObject *subarray_shape; - - /* - * Ensure that subarray[0] is an ArrayDescr and - * that subarray_shape obtained from subarray[1] is a tuple of integers. - */ - if (!(PyTuple_Check(subarray) && - PyTuple_Size(subarray) == 2 && - PyArray_DescrCheck(PyTuple_GET_ITEM(subarray, 0)))) { - PyErr_Format(PyExc_ValueError, - "incorrect subarray in __setstate__"); - return NULL; - } - subarray_shape = PyTuple_GET_ITEM(subarray, 1); - if (PyNumber_Check(subarray_shape)) { - PyObject *tmp; -#if defined(NPY_PY3K) - tmp = PyNumber_Long(subarray_shape); -#else - tmp = PyNumber_Int(subarray_shape); -#endif - if (tmp == NULL) { - return NULL; - } - subarray_shape = Py_BuildValue("(O)", tmp); - Py_DECREF(tmp); - if (subarray_shape == NULL) { - return NULL; - } - } - else if (_is_tuple_of_integers(subarray_shape)) { - Py_INCREF(subarray_shape); - } - else { - PyErr_Format(PyExc_ValueError, - "incorrect subarray shape in __setstate__"); - return NULL; - } - - self->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr)); - self->subarray->base = (PyArray_Descr *)PyTuple_GET_ITEM(subarray, 0); - Py_INCREF(self->subarray->base); - self->subarray->shape = subarray_shape; - } - - if (fields != Py_None) { - Py_XDECREF(self->fields); - self->fields = fields; - Py_INCREF(fields); - Py_XDECREF(self->names); - self->names = names; - if (incref_names) { - Py_INCREF(names); - } - } - - if (PyTypeNum_ISEXTENDED(self->type_num)) { - self->elsize = elsize; - self->alignment = alignment; - } - - /* - * We use an integer converted to char for backward compatibility with - * pickled arrays. Pickled arrays created with previous versions encoded - * flags as an int even though it actually was a char in the PyArray_Descr - * structure - */ - dtypeflags = int_dtypeflags; - if (dtypeflags != int_dtypeflags) { - PyErr_Format(PyExc_ValueError, - "incorrect value for flags variable (overflow)"); - return NULL; - } - else { - self->flags = dtypeflags; - } - - if (version < 3) { - self->flags = _descr_find_object(self); - } - - Py_XDECREF(self->metadata); - if (PyDataType_ISDATETIME(self) - && (metadata != Py_None) - && (metadata != NULL)) { - PyObject *cobj; - self->metadata = PyTuple_GET_ITEM(metadata, 0); - Py_INCREF(self->metadata); - cobj = _convert_datetime_tuple_to_cobj(PyTuple_GET_ITEM(metadata, 1)); - PyDict_SetItemString(self->metadata, NPY_METADATA_DTSTR, cobj); - Py_DECREF(cobj); - } - else { - /* - * We have a borrowed reference to metadata so no need - * to alter reference count - */ - if (metadata == Py_None) { - metadata = NULL; - } - self->metadata = metadata; - Py_XINCREF(metadata); - } - - Py_INCREF(Py_None); - return Py_None; -} - -/*NUMPY_API - * - * Get type-descriptor from an object forcing alignment if possible - * None goes to DEFAULT type. - * - * any object with the .fields attribute and/or .itemsize attribute (if the - *.fields attribute does not give the total size -- i.e. a partial record - * naming). If itemsize is given it must be >= size computed from fields - * - * The .fields attribute must return a convertible dictionary if present. - * Result inherits from PyArray_VOID. -*/ -NPY_NO_EXPORT int -PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) -{ - if (PyDict_Check(obj)) { - *at = _convert_from_dict(obj, 1); - } - else if (PyBytes_Check(obj)) { - *at = _convert_from_commastring(obj, 1); - } - else if (PyUnicode_Check(obj)) { - PyObject *tmp; - tmp = PyUnicode_AsASCIIString(obj); - *at = _convert_from_commastring(tmp, 1); - Py_DECREF(tmp); - } - else if (PyList_Check(obj)) { - *at = _convert_from_array_descr(obj, 1); - } - else { - return PyArray_DescrConverter(obj, at); - } - if (*at == NULL) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "data-type-descriptor not understood"); - } - return PY_FAIL; - } - return PY_SUCCEED; -} - -/*NUMPY_API - * - * Get type-descriptor from an object forcing alignment if possible - * None goes to NULL. - */ -NPY_NO_EXPORT int -PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) -{ - if (PyDict_Check(obj)) { - *at = _convert_from_dict(obj, 1); - } - else if (PyBytes_Check(obj)) { - *at = _convert_from_commastring(obj, 1); - } - else if (PyUnicode_Check(obj)) { - PyObject *tmp; - tmp = PyUnicode_AsASCIIString(obj); - *at = _convert_from_commastring(tmp, 1); - Py_DECREF(tmp); - } - else if (PyList_Check(obj)) { - *at = _convert_from_array_descr(obj, 1); - } - else { - return PyArray_DescrConverter2(obj, at); - } - if (*at == NULL) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "data-type-descriptor not understood"); - } - return PY_FAIL; - } - return PY_SUCCEED; -} - - - -/*NUMPY_API - * - * returns a copy of the PyArray_Descr structure with the byteorder - * altered: - * no arguments: The byteorder is swapped (in all subfields as well) - * single argument: The byteorder is forced to the given state - * (in all subfields as well) - * - * Valid states: ('big', '>') or ('little' or '<') - * ('native', or '=') - * - * If a descr structure with | is encountered it's own - * byte-order is not changed but any fields are: - * - * - * Deep bytorder change of a data-type descriptor - * *** Leaves reference count of self unchanged --- does not DECREF self *** - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) -{ - PyArray_Descr *new; - char endian; - - new = PyArray_DescrNew(self); - endian = new->byteorder; - if (endian != PyArray_IGNORE) { - if (newendian == PyArray_SWAP) { - /* swap byteorder */ - if PyArray_ISNBO(endian) { - endian = PyArray_OPPBYTE; - } - else { - endian = PyArray_NATBYTE; - } - new->byteorder = endian; - } - else if (newendian != PyArray_IGNORE) { - new->byteorder = newendian; - } - } - if (new->names) { - PyObject *newfields; - PyObject *key, *value; - PyObject *newvalue; - PyObject *old; - PyArray_Descr *newdescr; - Py_ssize_t pos = 0; - int len, i; - - newfields = PyDict_New(); - /* make new dictionary with replaced PyArray_Descr Objects */ - while (PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyUString_Check(key) || !PyTuple_Check(value) || - ((len=PyTuple_GET_SIZE(value)) < 2)) { - continue; - } - old = PyTuple_GET_ITEM(value, 0); - if (!PyArray_DescrCheck(old)) { - continue; - } - newdescr = PyArray_DescrNewByteorder( - (PyArray_Descr *)old, newendian); - if (newdescr == NULL) { - Py_DECREF(newfields); Py_DECREF(new); - return NULL; - } - newvalue = PyTuple_New(len); - PyTuple_SET_ITEM(newvalue, 0, (PyObject *)newdescr); - for (i = 1; i < len; i++) { - old = PyTuple_GET_ITEM(value, i); - Py_INCREF(old); - PyTuple_SET_ITEM(newvalue, i, old); - } - PyDict_SetItem(newfields, key, newvalue); - Py_DECREF(newvalue); - } - Py_DECREF(new->fields); - new->fields = newfields; - } - if (new->subarray) { - Py_DECREF(new->subarray->base); - new->subarray->base = PyArray_DescrNewByteorder( - self->subarray->base, newendian); - } - return new; -} - - -static PyObject * -arraydescr_newbyteorder(PyArray_Descr *self, PyObject *args) -{ - char endian=PyArray_SWAP; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) { - return NULL; - } - return (PyObject *)PyArray_DescrNewByteorder(self, endian); -} - -static PyMethodDef arraydescr_methods[] = { - /* for pickling */ - {"__reduce__", - (PyCFunction)arraydescr_reduce, - METH_VARARGS, NULL}, - {"__setstate__", - (PyCFunction)arraydescr_setstate, - METH_VARARGS, NULL}, - {"newbyteorder", - (PyCFunction)arraydescr_newbyteorder, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -static PyObject * -arraydescr_str(PyArray_Descr *self) -{ - PyObject *sub; - - if (self->names) { - PyObject *lst; - lst = arraydescr_protocol_descr_get(self); - if (!lst) { - sub = PyUString_FromString(""); - PyErr_Clear(); - } - else { - sub = PyObject_Str(lst); - } - Py_XDECREF(lst); - if (self->type_num != PyArray_VOID) { - PyObject *p, *t; - t=PyUString_FromString("'"); - p = arraydescr_protocol_typestr_get(self); - PyUString_Concat(&p, t); - PyUString_ConcatAndDel(&t, p); - p = PyUString_FromString("("); - PyUString_ConcatAndDel(&p, t); - PyUString_ConcatAndDel(&p, PyUString_FromString(", ")); - PyUString_ConcatAndDel(&p, sub); - PyUString_ConcatAndDel(&p, PyUString_FromString(")")); - sub = p; - } - } - else if (self->subarray) { - PyObject *p; - PyObject *t = PyUString_FromString("("); - PyObject *sh; - p = arraydescr_str(self->subarray->base); - if (!self->subarray->base->names && !self->subarray->base->subarray) { - PyObject *tmp=PyUString_FromString("'"); - PyUString_Concat(&p, tmp); - PyUString_ConcatAndDel(&tmp, p); - p = tmp; - } - PyUString_ConcatAndDel(&t, p); - PyUString_ConcatAndDel(&t, PyUString_FromString(",")); - /*TODO - * self->subarray->shape should always be a tuple, - * so this check should be unnecessary - */ - if (!PyTuple_Check(self->subarray->shape)) { - sh = Py_BuildValue("(O)", self->subarray->shape); - } - else { - sh = self->subarray->shape; - Py_INCREF(sh); - } - PyUString_ConcatAndDel(&t, PyObject_Str(sh)); - Py_DECREF(sh); - PyUString_ConcatAndDel(&t, PyUString_FromString(")")); - sub = t; - } - else if (PyDataType_ISFLEXIBLE(self) || !PyArray_ISNBO(self->byteorder)) { - sub = arraydescr_protocol_typestr_get(self); - } - else { - sub = arraydescr_typename_get(self); - } - return sub; -} - -static PyObject * -arraydescr_repr(PyArray_Descr *self) -{ - PyObject *sub, *s; - s = PyUString_FromString("dtype("); - sub = arraydescr_str(self); - if (sub == NULL) { - return sub; - } - if (!self->names && !self->subarray) { - PyObject *t=PyUString_FromString("'"); - PyUString_Concat(&sub, t); - PyUString_ConcatAndDel(&t, sub); - sub = t; - } - PyUString_ConcatAndDel(&s, sub); - sub = PyUString_FromString(")"); - PyUString_ConcatAndDel(&s, sub); - return s; -} - -static PyObject * -arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) -{ - PyArray_Descr *new = NULL; - PyObject *result = Py_NotImplemented; - if (!PyArray_DescrCheck(other)) { - if (PyArray_DescrConverter(other, &new) == PY_FAIL) { - return NULL; - } - } - else { - new = (PyArray_Descr *)other; - Py_INCREF(new); - } - switch (cmp_op) { - case Py_LT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - case Py_LE: - if (PyArray_CanCastTo(self, new)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - case Py_EQ: - if (PyArray_EquivTypes(self, new)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - case Py_NE: - if (PyArray_EquivTypes(self, new)) - result = Py_False; - else - result = Py_True; - break; - case Py_GT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - case Py_GE: - if (PyArray_CanCastTo(new, self)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - default: - result = Py_NotImplemented; - } - - Py_XDECREF(new); - Py_INCREF(result); - return result; -} - -/************************************************************************* - **************** Implement Mapping Protocol *************************** - *************************************************************************/ - -static Py_ssize_t -descr_length(PyObject *self0) -{ - PyArray_Descr *self = (PyArray_Descr *)self0; - - if (self->names) { - return PyTuple_GET_SIZE(self->names); - } - else { - return 0; - } -} - -static PyObject * -descr_repeat(PyObject *self, Py_ssize_t length) -{ - PyObject *tup; - PyArray_Descr *new; - if (length < 0) { - return PyErr_Format(PyExc_ValueError, - "Array length must be >= 0, not %"INTP_FMT, length); - } - tup = Py_BuildValue("O" NPY_SSIZE_T_PYFMT, self, length); - if (tup == NULL) { - return NULL; - } - PyArray_DescrConverter(tup, &new); - Py_DECREF(tup); - return (PyObject *)new; -} - -static PyObject * -descr_subscript(PyArray_Descr *self, PyObject *op) -{ - PyObject *retval; - - if (!self->names) { - PyObject *astr = arraydescr_str(self); -#if defined(NPY_PY3K) - PyObject *bstr = PyUnicode_AsUnicodeEscapeString(astr); - Py_DECREF(astr); - astr = bstr; -#endif - PyErr_Format(PyExc_KeyError, - "There are no fields in dtype %s.", PyBytes_AsString(astr)); - Py_DECREF(astr); - return NULL; - } -#if defined(NPY_PY3K) - if (PyUString_Check(op)) { -#else - if (PyUString_Check(op) || PyUnicode_Check(op)) { -#endif - PyObject *obj = PyDict_GetItem(self->fields, op); - PyObject *descr; - PyObject *s; - - if (obj == NULL) { - if (PyUnicode_Check(op)) { - s = PyUnicode_AsUnicodeEscapeString(op); - } - else { - s = op; - } - - PyErr_Format(PyExc_KeyError, - "Field named \'%s\' not found.", PyBytes_AsString(s)); - if (s != op) { - Py_DECREF(s); - } - return NULL; - } - descr = PyTuple_GET_ITEM(obj, 0); - Py_INCREF(descr); - retval = descr; - } - else if (PyInt_Check(op)) { - PyObject *name; - int size = PyTuple_GET_SIZE(self->names); - int value = PyArray_PyIntAsInt(op); - - if (PyErr_Occurred()) { - return NULL; - } - if (value < 0) { - value += size; - } - if (value < 0 || value >= size) { - PyErr_Format(PyExc_IndexError, - "Field index out of range."); - return NULL; - } - name = PyTuple_GET_ITEM(self->names, value); - retval = descr_subscript(self, name); - } - else { - PyErr_SetString(PyExc_ValueError, - "Field key must be an integer, string, or unicode."); - return NULL; - } - return retval; -} - -static PySequenceMethods descr_as_sequence = { - descr_length, - (binaryfunc)NULL, - descr_repeat, - NULL, NULL, - NULL, /* sq_ass_item */ - NULL, /* ssizessizeobjargproc sq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ -}; - -static PyMappingMethods descr_as_mapping = { - descr_length, /* mp_length*/ - (binaryfunc)descr_subscript, /* mp_subscript*/ - (objobjargproc)NULL, /* mp_ass_subscript*/ -}; - -/****************** End of Mapping Protocol ******************************/ - -NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.dtype", /* tp_name */ - sizeof(PyArray_Descr), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)arraydescr_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - (void *)0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - (reprfunc)arraydescr_repr, /* tp_repr */ - 0, /* tp_as_number */ - &descr_as_sequence, /* tp_as_sequence */ - &descr_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arraydescr_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - arraydescr_methods, /* tp_methods */ - arraydescr_members, /* tp_members */ - arraydescr_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arraydescr_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; diff --git a/numpy-1.6.2/numpy/core/src/multiarray/descriptor.h b/numpy-1.6.2/numpy/core/src/multiarray/descriptor.h deleted file mode 100644 index acb80eec69..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/descriptor.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _NPY_ARRAYDESCR_H_ -#define _NPY_ARRAYDESCR_H_ - -NPY_NO_EXPORT PyObject *arraydescr_protocol_typestr_get(PyArray_Descr *); -NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get(PyArray_Descr *self); - -NPY_NO_EXPORT PyObject * -array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args); - -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_fromobj(PyObject *obj); - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT char *_datetime_strings[]; -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/dtype_transfer.c b/numpy-1.6.2/numpy/core/src/multiarray/dtype_transfer.c deleted file mode 100644 index f04dbe8ebf..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/dtype_transfer.c +++ /dev/null @@ -1,2995 +0,0 @@ -/* - * This file contains low-level loops for data type transfers. - * In particular the function PyArray_GetDTypeTransferFunction is - * implemented here. - * - * Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia - * - * See LICENSE.txt for the license. - - */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - -#define _MULTIARRAYMODULE -#include -#include -#include - -#include "lowlevel_strided_loops.h" - -#define NPY_LOWLEVEL_BUFFER_BLOCKSIZE 128 - -/********** PRINTF DEBUG TRACING **************/ -#define NPY_DT_DBG_TRACING 0 -/* Tracing incref/decref can be very noisy */ -#define NPY_DT_REF_DBG_TRACING 0 - -#if NPY_DT_REF_DBG_TRACING -#define NPY_DT_DBG_REFTRACE(msg, ref) \ - printf("%-12s %20p %s%d%s\n", msg, ref, \ - ref ? "(refcnt " : "", \ - ref ? (int)ref->ob_refcnt : 0, \ - ref ? ((ref->ob_refcnt <= 0) ? \ - ") <- BIG PROBLEM!!!!" : ")") : ""); \ - fflush(stdout); -#else -#define NPY_DT_DBG_REFTRACE(msg, ref) -#endif -/**********************************************/ - -/* - * Returns a transfer function which DECREFs any references in src_type. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -static int -get_decsrcref_transfer_function(int aligned, - npy_intp src_stride, - PyArray_Descr *src_dtype, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api); - -/* - * Returns a transfer function which zeros out the dest values. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -static int -get_setdstzero_transfer_function(int aligned, - npy_intp dst_stride, - PyArray_Descr *dst_dtype, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api); - -/* - * Returns a transfer function which sets a boolean type to ones. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -NPY_NO_EXPORT int -get_bool_setdstone_transfer_function(npy_intp dst_stride, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *NPY_UNUSED(out_needs_api)); - -/*************************** COPY REFERENCES *******************************/ - -/* Moves references from src to dst */ -static void -_strided_to_strided_move_references(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - PyObject *src_ref = NULL, *dst_ref = NULL; - while (N > 0) { - NPY_COPY_PYOBJECT_PTR(&src_ref, src); - NPY_COPY_PYOBJECT_PTR(&dst_ref, dst); - - /* Release the reference in dst */ - NPY_DT_DBG_REFTRACE("dec dst ref", dst_ref); - Py_XDECREF(dst_ref); - /* Move the reference */ - NPY_DT_DBG_REFTRACE("move src ref", src_ref); - NPY_COPY_PYOBJECT_PTR(dst, &src_ref); - /* Set the source reference to NULL */ - src_ref = NULL; - NPY_COPY_PYOBJECT_PTR(src, &src_ref); - - src += src_stride; - dst += dst_stride; - --N; - } -} - -/* Copies references from src to dst */ -static void -_strided_to_strided_copy_references(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - PyObject *src_ref = NULL, *dst_ref = NULL; - while (N > 0) { - NPY_COPY_PYOBJECT_PTR(&src_ref, src); - NPY_COPY_PYOBJECT_PTR(&dst_ref, dst); - - /* Release the reference in dst */ - NPY_DT_DBG_REFTRACE("dec dst ref", dst_ref); - Py_XDECREF(dst_ref); - /* Copy the reference */ - NPY_DT_DBG_REFTRACE("copy src ref", src_ref); - NPY_COPY_PYOBJECT_PTR(dst, &src_ref); - /* Claim the reference */ - Py_XINCREF(src_ref); - - src += src_stride; - dst += dst_stride; - --N; - } -} - -/************************** ZERO-PADDED COPY ******************************/ - -typedef void (*free_strided_transfer_data)(void *); -typedef void *(*copy_strided_transfer_data)(void *); - -/* Does a zero-padded copy */ -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - npy_intp dst_itemsize; -} _strided_zero_pad_data; - -/* zero-padded data copy function */ -void *_strided_zero_pad_data_copy(void *data) -{ - _strided_zero_pad_data *newdata = - (_strided_zero_pad_data *)PyArray_malloc( - sizeof(_strided_zero_pad_data)); - if (newdata == NULL) { - return NULL; - } - - memcpy(newdata, data, sizeof(_strided_zero_pad_data)); - - return newdata; -} - -/* - * Does a strided to strided zero-padded copy for the case where - * dst_itemsize > src_itemsize - */ -static void -_strided_to_strided_zero_pad_copy(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _strided_zero_pad_data *d = (_strided_zero_pad_data *)data; - npy_intp dst_itemsize = d->dst_itemsize; - npy_intp zero_size = dst_itemsize-src_itemsize; - - while (N > 0) { - memcpy(dst, src, src_itemsize); - memset(dst + src_itemsize, 0, zero_size); - src += src_stride; - dst += dst_stride; - --N; - } -} - -/* - * Does a strided to strided zero-padded copy for the case where - * dst_itemsize < src_itemsize - */ -static void -_strided_to_strided_truncate_copy(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _strided_zero_pad_data *d = (_strided_zero_pad_data *)data; - npy_intp dst_itemsize = d->dst_itemsize; - - while (N > 0) { - memcpy(dst, src, dst_itemsize); - src += src_stride; - dst += dst_stride; - --N; - } -} - -NPY_NO_EXPORT int -PyArray_GetStridedZeroPadCopyFn(int aligned, - npy_intp src_stride, npy_intp dst_stride, - npy_intp src_itemsize, npy_intp dst_itemsize, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata) -{ - if (src_itemsize == dst_itemsize) { - *out_stransfer = PyArray_GetStridedCopyFn(aligned, src_stride, - dst_stride, src_itemsize); - *out_transferdata = NULL; - return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED; - } - else { - _strided_zero_pad_data *d = PyArray_malloc( - sizeof(_strided_zero_pad_data)); - if (d == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - d->dst_itemsize = dst_itemsize; - d->freefunc = &PyArray_free; - d->copyfunc = &_strided_zero_pad_data_copy; - - if (src_itemsize < dst_itemsize) { - *out_stransfer = &_strided_to_strided_zero_pad_copy; - } - else { - *out_stransfer = &_strided_to_strided_truncate_copy; - } - - *out_transferdata = d; - return NPY_SUCCEED; - } -} - -/***************** WRAP ALIGNED CONTIGUOUS TRANSFER FUNCTION **************/ - -/* Wraps a transfer function + data in alignment code */ -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - PyArray_StridedTransferFn *wrapped, - *tobuffer, *frombuffer; - void *wrappeddata, *todata, *fromdata; - npy_intp src_itemsize, dst_itemsize; - char *bufferin, *bufferout; -} _align_wrap_data; - -/* transfer data free function */ -void _align_wrap_data_free(void *data) -{ - _align_wrap_data *d = (_align_wrap_data *)data; - PyArray_FreeStridedTransferData(d->wrappeddata); - PyArray_FreeStridedTransferData(d->todata); - PyArray_FreeStridedTransferData(d->fromdata); - PyArray_free(data); -} - -/* transfer data copy function */ -void *_align_wrap_data_copy(void *data) -{ - _align_wrap_data *d = (_align_wrap_data *)data; - _align_wrap_data *newdata; - npy_intp basedatasize, datasize; - - /* Round up the structure size to 16-byte boundary */ - basedatasize = (sizeof(_align_wrap_data)+15)&(-0x10); - /* Add space for two low level buffers */ - datasize = basedatasize + - NPY_LOWLEVEL_BUFFER_BLOCKSIZE*d->src_itemsize + - NPY_LOWLEVEL_BUFFER_BLOCKSIZE*d->dst_itemsize; - - /* Allocate the data, and populate it */ - newdata = (_align_wrap_data *)PyArray_malloc(datasize); - if (newdata == NULL) { - return NULL; - } - memcpy(newdata, data, basedatasize); - newdata->bufferin = (char *)newdata + basedatasize; - newdata->bufferout = newdata->bufferin + - NPY_LOWLEVEL_BUFFER_BLOCKSIZE*newdata->src_itemsize; - if (newdata->wrappeddata != NULL) { - newdata->wrappeddata = - PyArray_CopyStridedTransferData(d->wrappeddata); - if (newdata->wrappeddata == NULL) { - PyArray_free(newdata); - return NULL; - } - } - if (newdata->todata != NULL) { - newdata->todata = PyArray_CopyStridedTransferData(d->todata); - if (newdata->todata == NULL) { - PyArray_FreeStridedTransferData(newdata->wrappeddata); - PyArray_free(newdata); - return NULL; - } - } - if (newdata->fromdata != NULL) { - newdata->fromdata = PyArray_CopyStridedTransferData(d->fromdata); - if (newdata->fromdata == NULL) { - PyArray_FreeStridedTransferData(newdata->wrappeddata); - PyArray_FreeStridedTransferData(newdata->todata); - PyArray_free(newdata); - return NULL; - } - } - - return (void *)newdata; -} - -static void -_strided_to_strided_contig_align_wrap(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _align_wrap_data *d = (_align_wrap_data *)data; - PyArray_StridedTransferFn *wrapped = d->wrapped, - *tobuffer = d->tobuffer, - *frombuffer = d->frombuffer; - npy_intp dst_itemsize = d->dst_itemsize; - void *wrappeddata = d->wrappeddata, - *todata = d->todata, - *fromdata = d->fromdata; - char *bufferin = d->bufferin, *bufferout = d->bufferout; - - for(;;) { - if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) { - tobuffer(bufferin, src_itemsize, src, src_stride, - NPY_LOWLEVEL_BUFFER_BLOCKSIZE, - src_itemsize, todata); - wrapped(bufferout, dst_itemsize, bufferin, src_itemsize, - NPY_LOWLEVEL_BUFFER_BLOCKSIZE, - src_itemsize, wrappeddata); - frombuffer(dst, dst_stride, bufferout, dst_itemsize, - NPY_LOWLEVEL_BUFFER_BLOCKSIZE, - dst_itemsize, fromdata); - N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE; - src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride; - dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride; - } - else { - tobuffer(bufferin, src_itemsize, src, src_stride, N, - src_itemsize, todata); - wrapped(bufferout, dst_itemsize, bufferin, src_itemsize, N, - src_itemsize, wrappeddata); - frombuffer(dst, dst_stride, bufferout, dst_itemsize, N, - dst_itemsize, fromdata); - return; - } - } -} - -static void -_strided_to_strided_contig_align_wrap_init_dest(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _align_wrap_data *d = (_align_wrap_data *)data; - PyArray_StridedTransferFn *wrapped = d->wrapped, - *tobuffer = d->tobuffer, - *frombuffer = d->frombuffer; - npy_intp dst_itemsize = d->dst_itemsize; - void *wrappeddata = d->wrappeddata, - *todata = d->todata, - *fromdata = d->fromdata; - char *bufferin = d->bufferin, *bufferout = d->bufferout; - - for(;;) { - if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) { - tobuffer(bufferin, src_itemsize, src, src_stride, - NPY_LOWLEVEL_BUFFER_BLOCKSIZE, - src_itemsize, todata); - memset(bufferout, 0, dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE); - wrapped(bufferout, dst_itemsize, bufferin, src_itemsize, - NPY_LOWLEVEL_BUFFER_BLOCKSIZE, - src_itemsize, wrappeddata); - frombuffer(dst, dst_stride, bufferout, dst_itemsize, - NPY_LOWLEVEL_BUFFER_BLOCKSIZE, - dst_itemsize, fromdata); - N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE; - src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride; - dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride; - } - else { - tobuffer(bufferin, src_itemsize, src, src_stride, N, - src_itemsize, todata); - memset(bufferout, 0, dst_itemsize*N); - wrapped(bufferout, dst_itemsize, bufferin, src_itemsize, N, - src_itemsize, wrappeddata); - frombuffer(dst, dst_stride, bufferout, dst_itemsize, N, - dst_itemsize, fromdata); - return; - } - } -} - -/* - * Wraps an aligned contig to contig transfer function between either - * copies or byte swaps to temporary buffers. - * - * src_itemsize/dst_itemsize - The sizes of the src and dst datatypes. - * tobuffer - copy/swap function from src to an aligned contiguous buffer. - * todata - data for tobuffer - * frombuffer - copy/swap function from an aligned contiguous buffer to dst. - * fromdata - data for frombuffer - * wrapped - contig to contig transfer function being wrapped - * wrappeddata - data for wrapped - * init_dest - 1 means to memset the dest buffer to 0 before calling wrapped. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -NPY_NO_EXPORT int -wrap_aligned_contig_transfer_function( - npy_intp src_itemsize, npy_intp dst_itemsize, - PyArray_StridedTransferFn *tobuffer, void *todata, - PyArray_StridedTransferFn *frombuffer, void *fromdata, - PyArray_StridedTransferFn *wrapped, void *wrappeddata, - int init_dest, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata) -{ - _align_wrap_data *data; - npy_intp basedatasize, datasize; - - /* Round up the structure size to 16-byte boundary */ - basedatasize = (sizeof(_align_wrap_data)+15)&(-0x10); - /* Add space for two low level buffers */ - datasize = basedatasize + - NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_itemsize + - NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_itemsize; - - /* Allocate the data, and populate it */ - data = (_align_wrap_data *)PyArray_malloc(datasize); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - data->freefunc = &_align_wrap_data_free; - data->copyfunc = &_align_wrap_data_copy; - data->tobuffer = tobuffer; - data->todata = todata; - data->frombuffer = frombuffer; - data->fromdata = fromdata; - data->wrapped = wrapped; - data->wrappeddata = wrappeddata; - data->src_itemsize = src_itemsize; - data->dst_itemsize = dst_itemsize; - data->bufferin = (char *)data + basedatasize; - data->bufferout = data->bufferin + - NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_itemsize; - - /* Set the function and data */ - if (init_dest) { - *out_stransfer = &_strided_to_strided_contig_align_wrap_init_dest; - } - else { - *out_stransfer = &_strided_to_strided_contig_align_wrap; - } - *out_transferdata = data; - - return NPY_SUCCEED; -} - -/*************************** WRAP DTYPE COPY/SWAP *************************/ -/* Wraps the dtype copy swap function */ -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - PyArray_CopySwapNFunc *copyswapn; - int swap; - PyArrayObject *arr; -} _wrap_copy_swap_data; - -/* wrap copy swap data free function */ -void _wrap_copy_swap_data_free(void *data) -{ - _wrap_copy_swap_data *d = (_wrap_copy_swap_data *)data; - Py_DECREF(d->arr); - PyArray_free(data); -} - -/* wrap copy swap data copy function */ -void *_wrap_copy_swap_data_copy(void *data) -{ - _wrap_copy_swap_data *newdata = - (_wrap_copy_swap_data *)PyArray_malloc(sizeof(_wrap_copy_swap_data)); - if (newdata == NULL) { - return NULL; - } - - memcpy(newdata, data, sizeof(_wrap_copy_swap_data)); - Py_INCREF(newdata->arr); - - return (void *)newdata; -} - -static void -_strided_to_strided_wrap_copy_swap(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *data) -{ - _wrap_copy_swap_data *d = (_wrap_copy_swap_data *)data; - - d->copyswapn(dst, dst_stride, src, src_stride, N, d->swap, d->arr); -} - -/* This only gets used for custom data types */ -static int -wrap_copy_swap_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *dtype, - int should_swap, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata) -{ - _wrap_copy_swap_data *data; - npy_intp shape = 1; - - /* Allocate the data for the copy swap */ - data = (_wrap_copy_swap_data *)PyArray_malloc(sizeof(_wrap_copy_swap_data)); - if (data == NULL) { - PyErr_NoMemory(); - *out_stransfer = NULL; - *out_transferdata = NULL; - return NPY_FAIL; - } - - data->freefunc = &_wrap_copy_swap_data_free; - data->copyfunc = &_wrap_copy_swap_data_copy; - data->copyswapn = dtype->f->copyswapn; - data->swap = should_swap; - - /* - * TODO: This is a hack so the copyswap functions have an array. - * The copyswap functions shouldn't need that. - */ - Py_INCREF(dtype); - data->arr = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, - 1, &shape, NULL, NULL, 0, NULL); - if (data->arr == NULL) { - PyArray_free(data); - return NPY_FAIL; - } - - *out_stransfer = &_strided_to_strided_wrap_copy_swap; - *out_transferdata = data; - - return NPY_SUCCEED; -} - -/*************************** DTYPE CAST FUNCTIONS *************************/ - -/* Does a simple aligned cast */ -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - PyArray_VectorUnaryFunc *castfunc; - PyArrayObject *aip, *aop; -} _strided_cast_data; - -/* strided cast data free function */ -void _strided_cast_data_free(void *data) -{ - _strided_cast_data *d = (_strided_cast_data *)data; - Py_DECREF(d->aip); - Py_DECREF(d->aop); - PyArray_free(data); -} - -/* strided cast data copy function */ -void *_strided_cast_data_copy(void *data) -{ - _strided_cast_data *newdata = - (_strided_cast_data *)PyArray_malloc(sizeof(_strided_cast_data)); - if (newdata == NULL) { - return NULL; - } - - memcpy(newdata, data, sizeof(_strided_cast_data)); - Py_INCREF(newdata->aip); - Py_INCREF(newdata->aop); - - return (void *)newdata; -} - -static void -_aligned_strided_to_strided_cast(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _strided_cast_data *d = (_strided_cast_data *)data; - PyArray_VectorUnaryFunc *castfunc = d->castfunc; - PyArrayObject *aip = d->aip, *aop = d->aop; - - while (N > 0) { - castfunc(src, dst, 1, aip, aop); - dst += dst_stride; - src += src_stride; - --N; - } -} - -/* This one requires src be of type NPY_OBJECT */ -static void -_aligned_strided_to_strided_cast_decref_src(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _strided_cast_data *d = (_strided_cast_data *)data; - PyArray_VectorUnaryFunc *castfunc = d->castfunc; - PyArrayObject *aip = d->aip, *aop = d->aop; - PyObject *src_ref; - - while (N > 0) { - castfunc(src, dst, 1, aip, aop); - - /* After casting, decrement the source ref */ - NPY_COPY_PYOBJECT_PTR(&src_ref, src); - NPY_DT_DBG_REFTRACE("dec src ref (cast object -> not object)", src_ref); - Py_XDECREF(src_ref); - - dst += dst_stride; - src += src_stride; - --N; - } -} - -static void -_aligned_contig_to_contig_cast(char *dst, npy_intp NPY_UNUSED(dst_stride), - char *src, npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp NPY_UNUSED(itemsize), - void *data) -{ - _strided_cast_data *d = (_strided_cast_data *)data; - - d->castfunc(src, dst, N, d->aip, d->aop); -} - -static int -get_nbo_cast_numeric_transfer_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - int src_type_num, int dst_type_num, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata) -{ - /* Emit a warning if complex imaginary is being cast away */ - if (PyTypeNum_ISCOMPLEX(src_type_num) && - !PyTypeNum_ISCOMPLEX(dst_type_num) && - !PyTypeNum_ISBOOL(dst_type_num)) { - PyObject *cls = NULL, *obj = NULL; - int ret; - obj = PyImport_ImportModule("numpy.core"); - if (obj) { - cls = PyObject_GetAttrString(obj, "ComplexWarning"); - Py_DECREF(obj); - } -#if PY_VERSION_HEX >= 0x02050000 - ret = PyErr_WarnEx(cls, - "Casting complex values to real discards " - "the imaginary part", 1); -#else - ret = PyErr_Warn(cls, - "Casting complex values to real discards " - "the imaginary part"); -#endif - Py_XDECREF(cls); - if (ret < 0) { - return NPY_FAIL; - } - } - - *out_stransfer = PyArray_GetStridedNumericCastFn(aligned, - src_stride, dst_stride, - src_type_num, dst_type_num); - *out_transferdata = NULL; - if (*out_stransfer == NULL) { - PyErr_SetString(PyExc_ValueError, - "unexpected error in GetStridedNumericCastFn"); - return NPY_FAIL; - } - - return NPY_SUCCEED; -} - -static int -get_nbo_cast_transfer_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - int move_references, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api, - int *out_needs_wrap) -{ - _strided_cast_data *data; - PyArray_VectorUnaryFunc *castfunc; - PyArray_Descr *tmp_dtype; - npy_intp shape = 1, src_itemsize = src_dtype->elsize, - dst_itemsize = dst_dtype->elsize; - - if (PyTypeNum_ISNUMBER(src_dtype->type_num) && - PyTypeNum_ISNUMBER(dst_dtype->type_num)) { - *out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder) || - !PyArray_ISNBO(dst_dtype->byteorder); - return get_nbo_cast_numeric_transfer_function(aligned, - src_stride, dst_stride, - src_dtype->type_num, dst_dtype->type_num, - out_stransfer, out_transferdata); - } - - *out_needs_wrap = !aligned || - !PyArray_ISNBO(src_dtype->byteorder) || - !PyArray_ISNBO(dst_dtype->byteorder); - - /* Check the data types whose casting functions use API calls */ - switch (src_dtype->type_num) { - case NPY_OBJECT: - case NPY_STRING: - case NPY_UNICODE: - case NPY_VOID: - if (out_needs_api) { - *out_needs_api = 1; - } - break; - } - switch (dst_dtype->type_num) { - case NPY_OBJECT: - case NPY_STRING: - case NPY_UNICODE: - case NPY_VOID: - if (out_needs_api) { - *out_needs_api = 1; - } - break; - } - - /* Get the cast function */ - castfunc = PyArray_GetCastFunc(src_dtype, dst_dtype->type_num); - if (!castfunc) { - *out_stransfer = NULL; - *out_transferdata = NULL; - return NPY_FAIL; - } - - /* Allocate the data for the casting */ - data = (_strided_cast_data *)PyArray_malloc(sizeof(_strided_cast_data)); - if (data == NULL) { - PyErr_NoMemory(); - *out_stransfer = NULL; - *out_transferdata = NULL; - return NPY_FAIL; - } - data->freefunc = &_strided_cast_data_free; - data->copyfunc = &_strided_cast_data_copy; - data->castfunc = castfunc; - /* - * TODO: This is a hack so the cast functions have an array. - * The cast functions shouldn't need that. Also, since we - * always handle byte order conversions, this array should - * have native byte order. - */ - if (PyArray_ISNBO(src_dtype->byteorder)) { - tmp_dtype = src_dtype; - Py_INCREF(tmp_dtype); - } - else { - tmp_dtype = PyArray_DescrNewByteorder(src_dtype, NPY_NATIVE); - if (tmp_dtype == NULL) { - PyArray_free(data); - return NPY_FAIL; - } - } - data->aip = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, tmp_dtype, - 1, &shape, NULL, NULL, 0, NULL); - if (data->aip == NULL) { - PyArray_free(data); - return NPY_FAIL; - } - /* - * TODO: This is a hack so the cast functions have an array. - * The cast functions shouldn't need that. Also, since we - * always handle byte order conversions, this array should - * have native byte order. - */ - if (PyArray_ISNBO(dst_dtype->byteorder)) { - tmp_dtype = dst_dtype; - Py_INCREF(tmp_dtype); - } - else { - tmp_dtype = PyArray_DescrNewByteorder(dst_dtype, NPY_NATIVE); - if (tmp_dtype == NULL) { - Py_DECREF(data->aip); - PyArray_free(data); - return NPY_FAIL; - } - } - data->aop = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, tmp_dtype, - 1, &shape, NULL, NULL, 0, NULL); - if (data->aop == NULL) { - Py_DECREF(data->aip); - PyArray_free(data); - return NPY_FAIL; - } - - /* If it's aligned and all native byte order, we're all done */ - if (move_references && src_dtype->type_num == NPY_OBJECT) { - *out_stransfer = _aligned_strided_to_strided_cast_decref_src; - } - else { - /* - * Use the contig version if the strides are contiguous or - * we're telling the caller to wrap the return, because - * the wrapping uses a contiguous buffer. - */ - if ((src_stride == src_itemsize && dst_stride == dst_itemsize) || - *out_needs_wrap) { - *out_stransfer = _aligned_contig_to_contig_cast; - } - else { - *out_stransfer = _aligned_strided_to_strided_cast; - } - } - *out_transferdata = data; - - return NPY_SUCCEED; -} - -static int -get_cast_transfer_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - int move_references, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - PyArray_StridedTransferFn *caststransfer; - void *castdata, *todata = NULL, *fromdata = NULL; - int needs_wrap = 0; - npy_intp src_itemsize = src_dtype->elsize, - dst_itemsize = dst_dtype->elsize; - - if (src_dtype->type_num == dst_dtype->type_num) { - PyErr_SetString(PyExc_ValueError, - "low level cast function is for unequal type numbers"); - return NPY_FAIL; - } - - if (get_nbo_cast_transfer_function(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - move_references, - &caststransfer, - &castdata, - out_needs_api, - &needs_wrap) != NPY_SUCCEED) { - return NPY_FAIL; - } - - /* - * If all native byte order and doesn't need alignment wrapping, - * return the function - */ - if (!needs_wrap) { - *out_stransfer = caststransfer; - *out_transferdata = castdata; - - return NPY_SUCCEED; - } - /* Otherwise, we have to copy and/or swap to aligned temporaries */ - else { - PyArray_StridedTransferFn *tobuffer, *frombuffer; - - /* Get the copy/swap operation from src */ - - /* If it's a custom data type, wrap its copy swap function */ - if (src_dtype->type_num >= NPY_NTYPES) { - tobuffer = NULL; - wrap_copy_swap_function(aligned, - src_stride, src_itemsize, - src_dtype, - !PyArray_ISNBO(src_dtype->byteorder), - &tobuffer, &todata); - } - /* A straight copy */ - else if (src_itemsize == 1 || PyArray_ISNBO(src_dtype->byteorder)) { - tobuffer = PyArray_GetStridedCopyFn(aligned, - src_stride, src_itemsize, - src_itemsize); - } - /* If it's not complex, one swap */ - else if(src_dtype->kind != 'c') { - tobuffer = PyArray_GetStridedCopySwapFn(aligned, - src_stride, src_itemsize, - src_itemsize); - } - /* If complex, a paired swap */ - else { - tobuffer = PyArray_GetStridedCopySwapPairFn(aligned, - src_stride, src_itemsize, - src_itemsize); - } - - /* Get the copy/swap operation to dst */ - - /* If it's a custom data type, wrap its copy swap function */ - if (dst_dtype->type_num >= NPY_NTYPES) { - frombuffer = NULL; - wrap_copy_swap_function(aligned, - dst_itemsize, dst_stride, - dst_dtype, - !PyArray_ISNBO(dst_dtype->byteorder), - &frombuffer, &fromdata); - } - /* A straight copy */ - else if (dst_itemsize == 1 || PyArray_ISNBO(dst_dtype->byteorder)) { - if (dst_dtype->type_num == NPY_OBJECT) { - frombuffer = &_strided_to_strided_move_references; - } - else { - frombuffer = PyArray_GetStridedCopyFn(aligned, - dst_itemsize, dst_stride, - dst_itemsize); - } - } - /* If it's not complex, one swap */ - else if(dst_dtype->kind != 'c') { - frombuffer = PyArray_GetStridedCopySwapFn(aligned, - dst_itemsize, dst_stride, - dst_itemsize); - } - /* If complex, a paired swap */ - else { - frombuffer = PyArray_GetStridedCopySwapPairFn(aligned, - dst_itemsize, dst_stride, - dst_itemsize); - } - - if (frombuffer == NULL || tobuffer == NULL) { - PyArray_FreeStridedTransferData(castdata); - PyArray_FreeStridedTransferData(todata); - PyArray_FreeStridedTransferData(fromdata); - return NPY_FAIL; - } - - *out_stransfer = caststransfer; - - /* Wrap it all up in a new transfer function + data */ - if (wrap_aligned_contig_transfer_function( - src_itemsize, dst_itemsize, - tobuffer, todata, - frombuffer, fromdata, - caststransfer, castdata, - PyDataType_FLAGCHK(dst_dtype, NPY_NEEDS_INIT), - out_stransfer, out_transferdata) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(castdata); - PyArray_FreeStridedTransferData(todata); - PyArray_FreeStridedTransferData(fromdata); - return NPY_FAIL; - } - - return NPY_SUCCEED; - } -} - -/**************************** COPY 1 TO N CONTIGUOUS ************************/ - -/* Copies 1 element to N contiguous elements */ -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - PyArray_StridedTransferFn *stransfer; - void *data; - npy_intp N, dst_itemsize; - /* If this is non-NULL the source type has references needing a decref */ - PyArray_StridedTransferFn *stransfer_finish_src; - void *data_finish_src; -} _one_to_n_data; - -/* transfer data free function */ -void _one_to_n_data_free(void *data) -{ - _one_to_n_data *d = (_one_to_n_data *)data; - PyArray_FreeStridedTransferData(d->data); - PyArray_FreeStridedTransferData(d->data_finish_src); - PyArray_free(data); -} - -/* transfer data copy function */ -void *_one_to_n_data_copy(void *data) -{ - _one_to_n_data *d = (_one_to_n_data *)data; - _one_to_n_data *newdata; - - /* Allocate the data, and populate it */ - newdata = (_one_to_n_data *)PyArray_malloc(sizeof(_one_to_n_data)); - if (newdata == NULL) { - return NULL; - } - memcpy(newdata, data, sizeof(_one_to_n_data)); - if (d->data != NULL) { - newdata->data = PyArray_CopyStridedTransferData(d->data); - if (newdata->data == NULL) { - PyArray_free(newdata); - return NULL; - } - } - if (d->data_finish_src != NULL) { - newdata->data_finish_src = - PyArray_CopyStridedTransferData(d->data_finish_src); - if (newdata->data_finish_src == NULL) { - PyArray_FreeStridedTransferData(newdata->data); - PyArray_free(newdata); - return NULL; - } - } - - return (void *)newdata; -} - -static void -_strided_to_strided_one_to_n(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _one_to_n_data *d = (_one_to_n_data *)data; - PyArray_StridedTransferFn *subtransfer = d->stransfer; - void *subdata = d->data; - npy_intp subN = d->N, dst_itemsize = d->dst_itemsize; - - while (N > 0) { - subtransfer(dst, dst_itemsize, - src, 0, - subN, src_itemsize, - subdata); - - src += src_stride; - dst += dst_stride; - --N; - } -} - -static void -_strided_to_strided_one_to_n_with_finish(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _one_to_n_data *d = (_one_to_n_data *)data; - PyArray_StridedTransferFn *subtransfer = d->stransfer, - *stransfer_finish_src = d->stransfer_finish_src; - void *subdata = d->data, *data_finish_src = data_finish_src; - npy_intp subN = d->N, dst_itemsize = d->dst_itemsize; - - while (N > 0) { - subtransfer(dst, dst_itemsize, - src, 0, - subN, src_itemsize, - subdata); - - - stransfer_finish_src(NULL, 0, - src, 0, - 1, src_itemsize, - data_finish_src); - - src += src_stride; - dst += dst_stride; - --N; - } -} - -/* - * Wraps a transfer function to produce one that copies one element - * of src to N contiguous elements of dst. If stransfer_finish_src is - * not NULL, it should be a transfer function which just affects - * src, for example to do a final DECREF operation for references. - */ -static int -wrap_transfer_function_one_to_n( - PyArray_StridedTransferFn *stransfer_inner, - void *data_inner, - PyArray_StridedTransferFn *stransfer_finish_src, - void *data_finish_src, - npy_intp dst_itemsize, - npy_intp N, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata) -{ - _one_to_n_data *data; - - - data = PyArray_malloc(sizeof(_one_to_n_data)); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - - data->freefunc = &_one_to_n_data_free; - data->copyfunc = &_one_to_n_data_copy; - data->stransfer = stransfer_inner; - data->data = data_inner; - data->stransfer_finish_src = stransfer_finish_src; - data->data_finish_src = data_finish_src; - data->N = N; - data->dst_itemsize = dst_itemsize; - - if (stransfer_finish_src == NULL) { - *out_stransfer = &_strided_to_strided_one_to_n; - } - else { - *out_stransfer = &_strided_to_strided_one_to_n_with_finish; - } - *out_transferdata = data; - - return NPY_SUCCEED; -} - -static int -get_one_to_n_transfer_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - int move_references, - npy_intp N, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - PyArray_StridedTransferFn *stransfer, *stransfer_finish_src = NULL; - void *data, *data_finish_src = NULL; - - /* - * move_references is set to 0, handled in the wrapping transfer fn, - * src_stride is set to zero, because its 1 to N copying, - * and dst_stride is set to contiguous, because subarrays are always - * contiguous. - */ - if (PyArray_GetDTypeTransferFunction(aligned, - 0, dst_dtype->elsize, - src_dtype, dst_dtype, - 0, - &stransfer, &data, - out_needs_api) != NPY_SUCCEED) { - return NPY_FAIL; - } - - /* If the src object will need a DECREF, set src_dtype */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - if (get_decsrcref_transfer_function(aligned, - src_stride, - src_dtype, - &stransfer_finish_src, - &data_finish_src, - out_needs_api) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(data); - return NPY_FAIL; - } - } - - if (wrap_transfer_function_one_to_n(stransfer, data, - stransfer_finish_src, data_finish_src, - dst_dtype->elsize, - N, - out_stransfer, out_transferdata) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(data); - PyArray_FreeStridedTransferData(data_finish_src); - return NPY_FAIL; - } - - return NPY_SUCCEED; -} - -/**************************** COPY N TO N CONTIGUOUS ************************/ - -/* Copies N contiguous elements to N contiguous elements */ -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - PyArray_StridedTransferFn *stransfer; - void *data; - npy_intp N, src_itemsize, dst_itemsize; -} _n_to_n_data; - -/* transfer data free function */ -void _n_to_n_data_free(void *data) -{ - _n_to_n_data *d = (_n_to_n_data *)data; - PyArray_FreeStridedTransferData(d->data); - PyArray_free(data); -} - -/* transfer data copy function */ -void *_n_to_n_data_copy(void *data) -{ - _n_to_n_data *d = (_n_to_n_data *)data; - _n_to_n_data *newdata; - - /* Allocate the data, and populate it */ - newdata = (_n_to_n_data *)PyArray_malloc(sizeof(_n_to_n_data)); - if (newdata == NULL) { - return NULL; - } - memcpy(newdata, data, sizeof(_n_to_n_data)); - if (newdata->data != NULL) { - newdata->data = PyArray_CopyStridedTransferData(d->data); - if (newdata->data == NULL) { - PyArray_free(newdata); - return NULL; - } - } - - return (void *)newdata; -} - -static void -_strided_to_strided_n_to_n(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *data) -{ - _n_to_n_data *d = (_n_to_n_data *)data; - PyArray_StridedTransferFn *subtransfer = d->stransfer; - void *subdata = d->data; - npy_intp subN = d->N, src_subitemsize = d->src_itemsize, - dst_subitemsize = d->dst_itemsize; - - while (N > 0) { - subtransfer(dst, dst_subitemsize, - src, src_subitemsize, - subN, src_subitemsize, - subdata); - - src += src_stride; - dst += dst_stride; - --N; - } -} - -static void -_contig_to_contig_n_to_n(char *dst, npy_intp NPY_UNUSED(dst_stride), - char *src, npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *data) -{ - _n_to_n_data *d = (_n_to_n_data *)data; - PyArray_StridedTransferFn *subtransfer = d->stransfer; - void *subdata = d->data; - npy_intp subN = d->N, src_subitemsize = d->src_itemsize, - dst_subitemsize = d->dst_itemsize; - - subtransfer(dst, dst_subitemsize, - src, src_subitemsize, - subN*N, src_subitemsize, - subdata); -} - -/* - * Wraps a transfer function to produce one that copies N contiguous elements - * of src to N contiguous elements of dst. - */ -static int -wrap_transfer_function_n_to_n( - PyArray_StridedTransferFn *stransfer_inner, - void *data_inner, - npy_intp src_stride, npy_intp dst_stride, - npy_intp src_itemsize, npy_intp dst_itemsize, - npy_intp N, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata) -{ - _n_to_n_data *data; - - data = PyArray_malloc(sizeof(_n_to_n_data)); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - - data->freefunc = &_n_to_n_data_free; - data->copyfunc = &_n_to_n_data_copy; - data->stransfer = stransfer_inner; - data->data = data_inner; - data->N = N; - data->src_itemsize = src_itemsize; - data->dst_itemsize = dst_itemsize; - - /* - * If the N subarray elements exactly fit in the strides, - * then can do a faster contiguous transfer. - */ - if (src_stride == N * src_itemsize && - dst_stride == N * dst_itemsize) { - *out_stransfer = &_contig_to_contig_n_to_n; - } - else { - *out_stransfer = &_strided_to_strided_n_to_n; - } - *out_transferdata = data; - - return NPY_SUCCEED; -} - -static int -get_n_to_n_transfer_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - int move_references, - npy_intp N, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - PyArray_StridedTransferFn *stransfer; - void *data; - - /* - * src_stride and dst_stride are set to contiguous, because - * subarrays are always contiguous. - */ - if (PyArray_GetDTypeTransferFunction(aligned, - src_dtype->elsize, dst_dtype->elsize, - src_dtype, dst_dtype, - move_references, - &stransfer, &data, - out_needs_api) != NPY_SUCCEED) { - return NPY_FAIL; - } - - if (wrap_transfer_function_n_to_n(stransfer, data, - src_stride, dst_stride, - src_dtype->elsize, dst_dtype->elsize, - N, - out_stransfer, - out_transferdata) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(data); - return NPY_FAIL; - } - - return NPY_SUCCEED; -} - -/********************** COPY WITH SUBARRAY BROADCAST ************************/ - -typedef struct { - npy_intp offset, count; -} _subarray_broadcast_offsetrun; - -/* Copies element with subarray broadcasting */ -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - PyArray_StridedTransferFn *stransfer; - void *data; - npy_intp src_N, dst_N, src_itemsize, dst_itemsize; - PyArray_StridedTransferFn *stransfer_decsrcref; - void *data_decsrcref; - PyArray_StridedTransferFn *stransfer_decdstref; - void *data_decdstref; - /* This gets a run-length encoded representation of the transfer */ - npy_intp run_count; - _subarray_broadcast_offsetrun offsetruns; -} _subarray_broadcast_data; - -/* transfer data free function */ -void _subarray_broadcast_data_free(void *data) -{ - _subarray_broadcast_data *d = (_subarray_broadcast_data *)data; - PyArray_FreeStridedTransferData(d->data); - PyArray_FreeStridedTransferData(d->data_decsrcref); - PyArray_FreeStridedTransferData(d->data_decdstref); - PyArray_free(data); -} - -/* transfer data copy function */ -void *_subarray_broadcast_data_copy( void *data) -{ - _subarray_broadcast_data *d = (_subarray_broadcast_data *)data; - _subarray_broadcast_data *newdata; - npy_intp run_count = d->run_count, structsize; - - structsize = sizeof(_subarray_broadcast_data) + - run_count*sizeof(_subarray_broadcast_offsetrun); - - /* Allocate the data and populate it */ - newdata = (_subarray_broadcast_data *)PyArray_malloc(structsize); - if (newdata == NULL) { - return NULL; - } - memcpy(newdata, data, structsize); - if (d->data != NULL) { - newdata->data = PyArray_CopyStridedTransferData(d->data); - if (newdata->data == NULL) { - PyArray_free(newdata); - return NULL; - } - } - if (d->data_decsrcref != NULL) { - newdata->data_decsrcref = - PyArray_CopyStridedTransferData(d->data_decsrcref); - if (newdata->data_decsrcref == NULL) { - PyArray_FreeStridedTransferData(newdata->data); - PyArray_free(newdata); - return NULL; - } - } - if (d->data_decdstref != NULL) { - newdata->data_decdstref = - PyArray_CopyStridedTransferData(d->data_decdstref); - if (newdata->data_decdstref == NULL) { - PyArray_FreeStridedTransferData(newdata->data); - PyArray_FreeStridedTransferData(newdata->data_decsrcref); - PyArray_free(newdata); - return NULL; - } - } - - return newdata; -} - -static void -_strided_to_strided_subarray_broadcast(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *data) -{ - _subarray_broadcast_data *d = (_subarray_broadcast_data *)data; - PyArray_StridedTransferFn *subtransfer = d->stransfer; - void *subdata = d->data; - npy_intp run, run_count = d->run_count, - src_subitemsize = d->src_itemsize, - dst_subitemsize = d->dst_itemsize; - npy_intp loop_index, offset, count; - char *dst_ptr; - _subarray_broadcast_offsetrun *offsetruns = &d->offsetruns; - - while (N > 0) { - loop_index = 0; - for (run = 0; run < run_count; ++run) { - offset = offsetruns[run].offset; - count = offsetruns[run].count; - dst_ptr = dst + loop_index*dst_subitemsize; - if (offset != -1) { - subtransfer(dst_ptr, dst_subitemsize, - src + offset, src_subitemsize, - count, src_subitemsize, - subdata); - } - else { - memset(dst_ptr, 0, count*dst_subitemsize); - } - loop_index += count; - } - - src += src_stride; - dst += dst_stride; - --N; - } -} - - -static void -_strided_to_strided_subarray_broadcast_withrefs(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *data) -{ - _subarray_broadcast_data *d = (_subarray_broadcast_data *)data; - PyArray_StridedTransferFn *subtransfer = d->stransfer; - void *subdata = d->data; - PyArray_StridedTransferFn *stransfer_decsrcref = d->stransfer_decsrcref; - void *data_decsrcref = d->data_decsrcref; - PyArray_StridedTransferFn *stransfer_decdstref = d->stransfer_decdstref; - void *data_decdstref = d->data_decdstref; - npy_intp run, run_count = d->run_count, - src_subitemsize = d->src_itemsize, - dst_subitemsize = d->dst_itemsize, - src_subN = d->src_N; - npy_intp loop_index, offset, count; - char *dst_ptr; - _subarray_broadcast_offsetrun *offsetruns = &d->offsetruns; - - while (N > 0) { - loop_index = 0; - for (run = 0; run < run_count; ++run) { - offset = offsetruns[run].offset; - count = offsetruns[run].count; - dst_ptr = dst + loop_index*dst_subitemsize; - if (offset != -1) { - subtransfer(dst_ptr, dst_subitemsize, - src + offset, src_subitemsize, - count, src_subitemsize, - subdata); - } - else { - if (stransfer_decdstref != NULL) { - stransfer_decdstref(NULL, 0, dst_ptr, dst_subitemsize, - count, dst_subitemsize, - data_decdstref); - } - memset(dst_ptr, 0, count*dst_subitemsize); - } - loop_index += count; - } - - if (stransfer_decsrcref != NULL) { - stransfer_decsrcref(NULL, 0, src, src_subitemsize, - src_subN, src_subitemsize, - data_decsrcref); - } - - src += src_stride; - dst += dst_stride; - --N; - } -} - - -static int -get_subarray_broadcast_transfer_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - npy_intp src_size, npy_intp dst_size, - PyArray_Dims src_shape, PyArray_Dims dst_shape, - int move_references, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - _subarray_broadcast_data *data; - npy_intp structsize, loop_index, run, run_size, - src_index, dst_index, i, ndim; - _subarray_broadcast_offsetrun *offsetruns; - - structsize = sizeof(_subarray_broadcast_data) + - dst_size*sizeof(_subarray_broadcast_offsetrun); - - /* Allocate the data and populate it */ - data = (_subarray_broadcast_data *)PyArray_malloc(structsize); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - - /* - * move_references is set to 0, handled in the wrapping transfer fn, - * src_stride and dst_stride are set to contiguous, as N will always - * be 1 when it's called. - */ - if (PyArray_GetDTypeTransferFunction(aligned, - src_dtype->elsize, dst_dtype->elsize, - src_dtype, dst_dtype, - 0, - &data->stransfer, &data->data, - out_needs_api) != NPY_SUCCEED) { - PyArray_free(data); - return NPY_FAIL; - } - data->freefunc = &_subarray_broadcast_data_free; - data->copyfunc = &_subarray_broadcast_data_copy; - data->src_N = src_size; - data->dst_N = dst_size; - data->src_itemsize = src_dtype->elsize; - data->dst_itemsize = dst_dtype->elsize; - - /* If the src object will need a DECREF */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - if (PyArray_GetDTypeTransferFunction(aligned, - src_dtype->elsize, 0, - src_dtype, NULL, - 1, - &data->stransfer_decsrcref, - &data->data_decsrcref, - out_needs_api) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(data->data); - PyArray_free(data); - return NPY_FAIL; - } - } - else { - data->stransfer_decsrcref = NULL; - data->data_decsrcref = NULL; - } - - /* If the dst object needs a DECREF to set it to NULL */ - if (PyDataType_REFCHK(dst_dtype)) { - if (PyArray_GetDTypeTransferFunction(aligned, - dst_dtype->elsize, 0, - dst_dtype, NULL, - 1, - &data->stransfer_decdstref, - &data->data_decdstref, - out_needs_api) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(data->data); - PyArray_FreeStridedTransferData(data->data_decsrcref); - PyArray_free(data); - return NPY_FAIL; - } - } - else { - data->stransfer_decdstref = NULL; - data->data_decdstref = NULL; - } - - /* Calculate the broadcasting and set the offsets */ - offsetruns = &data->offsetruns; - ndim = (src_shape.len > dst_shape.len) ? src_shape.len : dst_shape.len; - for (loop_index = 0; loop_index < dst_size; ++loop_index) { - npy_intp src_factor = 1; - - dst_index = loop_index; - src_index = 0; - for (i = ndim-1; i >= 0; --i) { - npy_intp coord = 0, shape; - - /* Get the dst coord of this index for dimension i */ - if (i >= ndim - dst_shape.len) { - shape = dst_shape.ptr[i-(ndim-dst_shape.len)]; - coord = dst_index % shape; - dst_index /= shape; - } - - /* Translate it into a src coord and update src_index */ - if (i >= ndim - src_shape.len) { - shape = src_shape.ptr[i-(ndim-src_shape.len)]; - if (shape == 1) { - coord = 0; - } - else { - if (coord < shape) { - src_index += src_factor*coord; - src_factor *= shape; - } - else { - /* Out of bounds, flag with -1 */ - src_index = -1; - break; - } - } - } - } - /* Set the offset */ - if (src_index == -1) { - offsetruns[loop_index].offset = -1; - } - else { - offsetruns[loop_index].offset = src_index; - } - } - - /* Run-length encode the result */ - run = 0; - run_size = 1; - for (loop_index = 1; loop_index < dst_size; ++loop_index) { - if (offsetruns[run].offset == -1) { - /* Stop the run when there's a valid index again */ - if (offsetruns[loop_index].offset != -1) { - offsetruns[run].count = run_size; - run++; - run_size = 1; - offsetruns[run].offset = offsetruns[loop_index].offset; - } - else { - run_size++; - } - } - else { - /* Stop the run when there's a valid index again */ - if (offsetruns[loop_index].offset != - offsetruns[loop_index-1].offset + 1) { - offsetruns[run].count = run_size; - run++; - run_size = 1; - offsetruns[run].offset = offsetruns[loop_index].offset; - } - else { - run_size++; - } - } - } - offsetruns[run].count = run_size; - run++; - data->run_count = run; - - /* Multiply all the offsets by the src item size */ - while (run--) { - if (offsetruns[run].offset != -1) { - offsetruns[run].offset *= src_dtype->elsize; - } - } - - if (data->stransfer_decsrcref == NULL && - data->stransfer_decdstref == NULL) { - *out_stransfer = &_strided_to_strided_subarray_broadcast; - } - else { - *out_stransfer = &_strided_to_strided_subarray_broadcast_withrefs; - } - *out_transferdata = data; - - return NPY_SUCCEED; -} - -/* - * Handles subarray transfer. To call this, at least one of the dtype's - * subarrays must be non-NULL - */ -static int -get_subarray_transfer_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - int move_references, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - PyArray_Dims src_shape = {NULL, -1}, dst_shape = {NULL, -1}; - npy_intp src_size = 1, dst_size = 1; - - /* Get the subarray shapes and sizes */ - if (src_dtype->subarray != NULL) { - if (!(PyArray_IntpConverter(src_dtype->subarray->shape, - &src_shape))) { - PyErr_SetString(PyExc_ValueError, - "invalid subarray shape"); - return NPY_FAIL; - } - src_size = PyArray_MultiplyList(src_shape.ptr, src_shape.len); - src_dtype = src_dtype->subarray->base; - } - if (dst_dtype->subarray != NULL) { - if (!(PyArray_IntpConverter(dst_dtype->subarray->shape, - &dst_shape))) { - if (src_shape.ptr != NULL) { - PyDimMem_FREE(src_shape.ptr); - } - PyErr_SetString(PyExc_ValueError, - "invalid subarray shape"); - return NPY_FAIL; - } - dst_size = PyArray_MultiplyList(dst_shape.ptr, dst_shape.len); - dst_dtype = dst_dtype->subarray->base; - } - - /* - * Just a straight one-element copy. - */ - if (dst_size == 1 && src_size == 1) { - PyDimMem_FREE(src_shape.ptr); - PyDimMem_FREE(dst_shape.ptr); - - return PyArray_GetDTypeTransferFunction(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - move_references, - out_stransfer, out_transferdata, - out_needs_api); - } - /* Copy the src value to all the dst values */ - else if (src_size == 1) { - PyDimMem_FREE(src_shape.ptr); - PyDimMem_FREE(dst_shape.ptr); - - return get_one_to_n_transfer_function(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - move_references, - dst_size, - out_stransfer, out_transferdata, - out_needs_api); - } - /* If the shapes match exactly, do an n to n copy */ - else if (src_shape.len == dst_shape.len && - PyArray_CompareLists(src_shape.ptr, dst_shape.ptr, - src_shape.len)) { - PyDimMem_FREE(src_shape.ptr); - PyDimMem_FREE(dst_shape.ptr); - - return get_n_to_n_transfer_function(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - move_references, - src_size, - out_stransfer, out_transferdata, - out_needs_api); - } - /* - * Copy the subarray with broadcasting, truncating, and zero-padding - * as necessary. - */ - else { - int ret = get_subarray_broadcast_transfer_function(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - src_size, dst_size, - src_shape, dst_shape, - move_references, - out_stransfer, out_transferdata, - out_needs_api); - - PyDimMem_FREE(src_shape.ptr); - PyDimMem_FREE(dst_shape.ptr); - return ret; - } -} - -/**************************** COPY FIELDS *******************************/ -typedef struct { - npy_intp src_offset, dst_offset, src_itemsize; - PyArray_StridedTransferFn *stransfer; - void *data; -} _single_field_transfer; - -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - npy_intp field_count; - - _single_field_transfer fields; -} _field_transfer_data; - -/* transfer data free function */ -void _field_transfer_data_free(void *data) -{ - _field_transfer_data *d = (_field_transfer_data *)data; - npy_intp i, field_count; - _single_field_transfer *fields; - - field_count = d->field_count; - fields = &d->fields; - - for (i = 0; i < field_count; ++i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(d); -} - -/* transfer data copy function */ -void *_field_transfer_data_copy(void *data) -{ - _field_transfer_data *d = (_field_transfer_data *)data; - _field_transfer_data *newdata; - npy_intp i, field_count = d->field_count, structsize; - _single_field_transfer *fields, *newfields; - - structsize = sizeof(_field_transfer_data) + - field_count * sizeof(_single_field_transfer); - - /* Allocate the data and populate it */ - newdata = (_field_transfer_data *)PyArray_malloc(structsize); - if (newdata == NULL) { - return NULL; - } - memcpy(newdata, d, structsize); - /* Copy all the fields transfer data */ - fields = &d->fields; - newfields = &newdata->fields; - for (i = 0; i < field_count; ++i) { - if (fields[i].data != NULL) { - newfields[i].data = - PyArray_CopyStridedTransferData(fields[i].data); - if (newfields[i].data == NULL) { - for (i = i-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(newfields[i].data); - } - PyArray_free(newdata); - return NULL; - } - } - - } - - return (void *)newdata; -} - -static void -_strided_to_strided_field_transfer(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *data) -{ - _field_transfer_data *d = (_field_transfer_data *)data; - npy_intp i, field_count = d->field_count; - _single_field_transfer *field; - - /* Do the transfer a block at a time */ - for (;;) { - field = &d->fields; - if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) { - for (i = 0; i < field_count; ++i, ++field) { - field->stransfer(dst + field->dst_offset, dst_stride, - src + field->src_offset, src_stride, - NPY_LOWLEVEL_BUFFER_BLOCKSIZE, - field->src_itemsize, - field->data); - } - N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE; - src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride; - dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride; - } - else { - for (i = 0; i < field_count; ++i, ++field) { - field->stransfer(dst + field->dst_offset, dst_stride, - src + field->src_offset, src_stride, - N, - field->src_itemsize, - field->data); - } - return; - } - } -} - -/* - * Handles fields transfer. To call this, at least one of the dtypes - * must have fields - */ -static int -get_fields_transfer_function(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - int move_references, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - PyObject *names, *key, *tup, *title; - PyArray_Descr *src_fld_dtype, *dst_fld_dtype; - npy_int i, names_size, field_count, structsize; - int src_offset, dst_offset; - _field_transfer_data *data; - _single_field_transfer *fields; - - /* Copy the src value to all the fields of dst */ - if (!PyDescr_HASFIELDS(src_dtype)) { - names = dst_dtype->names; - names_size = PyTuple_GET_SIZE(dst_dtype->names); - - field_count = names_size; - structsize = sizeof(_field_transfer_data) + - (field_count + 1) * sizeof(_single_field_transfer); - /* Allocate the data and populate it */ - data = (_field_transfer_data *)PyArray_malloc(structsize); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - data->freefunc = &_field_transfer_data_free; - data->copyfunc = &_field_transfer_data_copy; - fields = &data->fields; - - for (i = 0; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dst_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, - &dst_offset, &title)) { - PyArray_free(data); - return NPY_FAIL; - } - if (PyArray_GetDTypeTransferFunction(0, - src_stride, dst_stride, - src_dtype, dst_fld_dtype, - 0, - &fields[i].stransfer, - &fields[i].data, - out_needs_api) != NPY_SUCCEED) { - for (i = i-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - return NPY_FAIL; - } - fields[i].src_offset = 0; - fields[i].dst_offset = dst_offset; - fields[i].src_itemsize = src_dtype->elsize; - } - - /* - * If the references should be removed from src, add - * another transfer function to do that. - */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - if (get_decsrcref_transfer_function(0, - src_stride, - src_dtype, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - for (i = 0; i < field_count; ++i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = 0; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = src_dtype->elsize; - field_count++; - } - data->field_count = field_count; - - *out_stransfer = &_strided_to_strided_field_transfer; - *out_transferdata = data; - - return NPY_SUCCEED; - } - /* Copy the value of the first field to dst */ - else if (!PyDescr_HASFIELDS(dst_dtype)) { - names = src_dtype->names; - names_size = PyTuple_GET_SIZE(src_dtype->names); - - /* - * If DECREF is needed on source fields, may need - * to process all the fields - */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - field_count = names_size + 1; - } - else { - field_count = 1; - } - structsize = sizeof(_field_transfer_data) + - field_count * sizeof(_single_field_transfer); - /* Allocate the data and populate it */ - data = (_field_transfer_data *)PyArray_malloc(structsize); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - data->freefunc = &_field_transfer_data_free; - data->copyfunc = &_field_transfer_data_copy; - fields = &data->fields; - - key = PyTuple_GET_ITEM(names, 0); - tup = PyDict_GetItem(src_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - PyArray_free(data); - return NPY_FAIL; - } - field_count = 0; - /* - * Special case bool type, the existence of fields implies True - * - * TODO: Perhaps a better behavior would be to combine all the - * input fields with an OR? The same would apply to subarrays. - */ - if (dst_dtype->type_num == NPY_BOOL) { - if (get_bool_setdstone_transfer_function(dst_stride, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = 0; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = 0; - field_count++; - - /* If the src field has references, may need to clear them */ - if (move_references && PyDataType_REFCHK(src_fld_dtype)) { - if (get_decsrcref_transfer_function(0, - src_stride, - src_fld_dtype, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(fields[0].data); - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = src_fld_dtype->elsize; - field_count++; - } - } - /* Transfer the first field to the output */ - else { - if (PyArray_GetDTypeTransferFunction(0, - src_stride, dst_stride, - src_fld_dtype, dst_dtype, - move_references, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = src_fld_dtype->elsize; - field_count++; - } - - /* - * If the references should be removed from src, add - * more transfer functions to decrement the references - * for all the other fields. - */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - for (i = 1; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(src_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - return NPY_FAIL; - } - if (PyDataType_REFCHK(src_fld_dtype)) { - if (get_decsrcref_transfer_function(0, - src_stride, - src_fld_dtype, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - for (i = field_count-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = src_fld_dtype->elsize; - field_count++; - } - } - } - - data->field_count = field_count; - - *out_stransfer = &_strided_to_strided_field_transfer; - *out_transferdata = data; - - return NPY_SUCCEED; - } - /* Match up the fields to copy */ - else { - /* Keeps track of the names we already used */ - PyObject *used_names_dict = NULL; - - names = dst_dtype->names; - names_size = PyTuple_GET_SIZE(dst_dtype->names); - - /* - * If DECREF is needed on source fields, will need - * to also go through its fields. - */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - field_count = names_size + PyTuple_GET_SIZE(src_dtype->names); - used_names_dict = PyDict_New(); - if (used_names_dict == NULL) { - return NPY_FAIL; - } - } - else { - field_count = names_size; - } - structsize = sizeof(_field_transfer_data) + - field_count * sizeof(_single_field_transfer); - /* Allocate the data and populate it */ - data = (_field_transfer_data *)PyArray_malloc(structsize); - if (data == NULL) { - PyErr_NoMemory(); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - data->freefunc = &_field_transfer_data_free; - data->copyfunc = &_field_transfer_data_copy; - fields = &data->fields; - - for (i = 0; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dst_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, - &dst_offset, &title)) { - for (i = i-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - tup = PyDict_GetItem(src_dtype->fields, key); - if (tup != NULL) { - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - for (i = i-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - if (PyArray_GetDTypeTransferFunction(0, - src_stride, dst_stride, - src_fld_dtype, dst_fld_dtype, - move_references, - &fields[i].stransfer, - &fields[i].data, - out_needs_api) != NPY_SUCCEED) { - for (i = i-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - fields[i].src_offset = src_offset; - fields[i].dst_offset = dst_offset; - fields[i].src_itemsize = src_fld_dtype->elsize; - - if (used_names_dict != NULL) { - PyDict_SetItem(used_names_dict, key, Py_True); - } - } - else { - if (get_setdstzero_transfer_function(0, - dst_stride, - dst_fld_dtype, - &fields[i].stransfer, - &fields[i].data, - out_needs_api) != NPY_SUCCEED) { - for (i = i-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - fields[i].src_offset = 0; - fields[i].dst_offset = dst_offset; - fields[i].src_itemsize = 0; - } - } - - if (move_references && PyDataType_REFCHK(src_dtype)) { - /* Use field_count to track additional functions added */ - field_count = names_size; - - names = src_dtype->names; - names_size = PyTuple_GET_SIZE(src_dtype->names); - for (i = 0; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - if (PyDict_GetItem(used_names_dict, key) == NULL) { - tup = PyDict_GetItem(src_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - for (i = field_count-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - if (PyDataType_REFCHK(src_fld_dtype)) { - if (get_decsrcref_transfer_function(0, - src_stride, - src_fld_dtype, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - for (i = field_count-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = - src_fld_dtype->elsize; - field_count++; - } - } - } - } - - Py_XDECREF(used_names_dict); - - data->field_count = field_count; - - *out_stransfer = &_strided_to_strided_field_transfer; - *out_transferdata = data; - - return NPY_SUCCEED; - } -} - -static int -get_decsrcref_fields_transfer_function(int aligned, - npy_intp src_stride, - PyArray_Descr *src_dtype, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - PyObject *names, *key, *tup, *title; - PyArray_Descr *src_fld_dtype; - npy_int i, names_size, field_count, structsize; - int src_offset; - _field_transfer_data *data; - _single_field_transfer *fields; - - names = src_dtype->names; - names_size = PyTuple_GET_SIZE(src_dtype->names); - - field_count = names_size; - structsize = sizeof(_field_transfer_data) + - field_count * sizeof(_single_field_transfer); - /* Allocate the data and populate it */ - data = (_field_transfer_data *)PyArray_malloc(structsize); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - data->freefunc = &_field_transfer_data_free; - data->copyfunc = &_field_transfer_data_copy; - fields = &data->fields; - - field_count = 0; - for (i = 0; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(src_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - PyArray_free(data); - return NPY_FAIL; - } - if (PyDataType_REFCHK(src_fld_dtype)) { - if (out_needs_api) { - *out_needs_api = 1; - } - if (get_decsrcref_transfer_function(0, - src_stride, - src_fld_dtype, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - for (i = field_count-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = src_dtype->elsize; - field_count++; - } - } - - data->field_count = field_count; - - *out_stransfer = &_strided_to_strided_field_transfer; - *out_transferdata = data; - - return NPY_SUCCEED; -} - -static int -get_setdestzero_fields_transfer_function(int aligned, - npy_intp dst_stride, - PyArray_Descr *dst_dtype, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - PyObject *names, *key, *tup, *title; - PyArray_Descr *dst_fld_dtype; - npy_int i, names_size, field_count, structsize; - int dst_offset; - _field_transfer_data *data; - _single_field_transfer *fields; - - names = dst_dtype->names; - names_size = PyTuple_GET_SIZE(dst_dtype->names); - - field_count = names_size; - structsize = sizeof(_field_transfer_data) + - field_count * sizeof(_single_field_transfer); - /* Allocate the data and populate it */ - data = (_field_transfer_data *)PyArray_malloc(structsize); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - data->freefunc = &_field_transfer_data_free; - data->copyfunc = &_field_transfer_data_copy; - fields = &data->fields; - - for (i = 0; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dst_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, - &dst_offset, &title)) { - PyArray_free(data); - return NPY_FAIL; - } - if (get_setdstzero_transfer_function(0, - dst_stride, - dst_fld_dtype, - &fields[i].stransfer, - &fields[i].data, - out_needs_api) != NPY_SUCCEED) { - for (i = i-1; i >= 0; --i) { - PyArray_FreeStridedTransferData(fields[i].data); - } - PyArray_free(data); - return NPY_FAIL; - } - fields[i].src_offset = 0; - fields[i].dst_offset = dst_offset; - fields[i].src_itemsize = 0; - } - - data->field_count = field_count; - - *out_stransfer = &_strided_to_strided_field_transfer; - *out_transferdata = data; - - return NPY_SUCCEED; -} - -/************************* DEST BOOL SETONE *******************************/ - -static void -_null_to_strided_set_bool_one(char *dst, - npy_intp dst_stride, - char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *NPY_UNUSED(data)) -{ - /* bool type is one byte, so can just use the char */ - - while (N > 0) { - *dst = 1; - - dst += dst_stride; - --N; - } -} - -static void -_null_to_contig_set_bool_one(char *dst, - npy_intp NPY_UNUSED(dst_stride), - char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *NPY_UNUSED(data)) -{ - /* bool type is one byte, so can just use the char */ - - memset(dst, 1, N); -} - -/* Only for the bool type, sets the destination to 1 */ -NPY_NO_EXPORT int -get_bool_setdstone_transfer_function(npy_intp dst_stride, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *NPY_UNUSED(out_needs_api)) -{ - if (dst_stride == 1) { - *out_stransfer = &_null_to_contig_set_bool_one; - } - else { - *out_stransfer = &_null_to_strided_set_bool_one; - } - *out_transferdata = NULL; - - return NPY_SUCCEED; -} - -/*************************** DEST SETZERO *******************************/ - -/* Sets dest to zero */ -typedef struct { - free_strided_transfer_data freefunc; - copy_strided_transfer_data copyfunc; - npy_intp dst_itemsize; -} _dst_memset_zero_data; - -/* zero-padded data copy function */ -void *_dst_memset_zero_data_copy(void *data) -{ - _dst_memset_zero_data *newdata = - (_dst_memset_zero_data *)PyArray_malloc( - sizeof(_dst_memset_zero_data)); - if (newdata == NULL) { - return NULL; - } - - memcpy(newdata, data, sizeof(_dst_memset_zero_data)); - - return newdata; -} - -static void -_null_to_strided_memset_zero(char *dst, - npy_intp dst_stride, - char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *data) -{ - _dst_memset_zero_data *d = (_dst_memset_zero_data *)data; - npy_intp dst_itemsize = d->dst_itemsize; - - while (N > 0) { - memset(dst, 0, dst_itemsize); - dst += dst_stride; - --N; - } -} - -static void -_null_to_contig_memset_zero(char *dst, - npy_intp dst_stride, - char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *data) -{ - _dst_memset_zero_data *d = (_dst_memset_zero_data *)data; - npy_intp dst_itemsize = d->dst_itemsize; - - memset(dst, 0, N*dst_itemsize); -} - -static void -_null_to_strided_reference_setzero(char *dst, - npy_intp dst_stride, - char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *NPY_UNUSED(data)) -{ - PyObject *dst_ref = NULL; - - while (N > 0) { - NPY_COPY_PYOBJECT_PTR(&dst_ref, dst); - - /* Release the reference in dst */ - NPY_DT_DBG_REFTRACE("dec dest ref (to set zero)", dst_ref); - Py_XDECREF(dst_ref); - - /* Set it to zero */ - dst_ref = NULL; - NPY_COPY_PYOBJECT_PTR(dst, &dst_ref); - - dst += dst_stride; - --N; - } -} - -NPY_NO_EXPORT int -get_setdstzero_transfer_function(int aligned, - npy_intp dst_stride, - PyArray_Descr *dst_dtype, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - _dst_memset_zero_data *data; - - /* If there are no references, just set the whole thing to zero */ - if (!PyDataType_REFCHK(dst_dtype)) { - data = (_dst_memset_zero_data *) - PyArray_malloc(sizeof(_dst_memset_zero_data)); - if (data == NULL) { - PyErr_NoMemory(); - return NPY_FAIL; - } - - data->freefunc = &PyArray_free; - data->copyfunc = &_dst_memset_zero_data_copy; - data->dst_itemsize = dst_dtype->elsize; - - if (dst_stride == data->dst_itemsize) { - *out_stransfer = &_null_to_contig_memset_zero; - } - else { - *out_stransfer = &_null_to_strided_memset_zero; - } - *out_transferdata = data; - } - /* If it's exactly one reference, use the decref function */ - else if (dst_dtype->type_num == NPY_OBJECT) { - if (out_needs_api) { - *out_needs_api = 1; - } - - *out_stransfer = &_null_to_strided_reference_setzero; - *out_transferdata = NULL; - } - /* If there are subarrays, need to wrap it */ - else if (dst_dtype->subarray != NULL) { - PyArray_Dims dst_shape = {NULL, -1}; - npy_intp dst_size = 1; - PyArray_StridedTransferFn *contig_stransfer; - void *contig_data; - - if (out_needs_api) { - *out_needs_api = 1; - } - - if (!(PyArray_IntpConverter(dst_dtype->subarray->shape, - &dst_shape))) { - PyErr_SetString(PyExc_ValueError, - "invalid subarray shape"); - return NPY_FAIL; - } - dst_size = PyArray_MultiplyList(dst_shape.ptr, dst_shape.len); - PyDimMem_FREE(dst_shape.ptr); - - /* Get a function for contiguous dst of the subarray type */ - if (get_setdstzero_transfer_function(aligned, - dst_dtype->subarray->base->elsize, - dst_dtype->subarray->base, - &contig_stransfer, &contig_data, - out_needs_api) != NPY_SUCCEED) { - return NPY_FAIL; - } - - if (wrap_transfer_function_n_to_n(contig_stransfer, contig_data, - 0, dst_stride, - 0, dst_dtype->subarray->base->elsize, - dst_size, - out_stransfer, out_transferdata) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(contig_data); - return NPY_FAIL; - } - } - /* If there are fields, need to do each field */ - else if (PyDataType_HASFIELDS(dst_dtype)) { - if (out_needs_api) { - *out_needs_api = 1; - } - - return get_setdestzero_fields_transfer_function(aligned, - dst_stride, dst_dtype, - out_stransfer, - out_transferdata, - out_needs_api); - } - - return NPY_SUCCEED; -} - -static void -_dec_src_ref_nop(char *NPY_UNUSED(dst), - npy_intp NPY_UNUSED(dst_stride), - char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride), - npy_intp NPY_UNUSED(N), - npy_intp NPY_UNUSED(src_itemsize), - void *NPY_UNUSED(data)) -{ - /* NOP */ -} - -static void -_strided_to_null_dec_src_ref_reference(char *NPY_UNUSED(dst), - npy_intp NPY_UNUSED(dst_stride), - char *src, npy_intp src_stride, - npy_intp N, - npy_intp NPY_UNUSED(src_itemsize), - void *NPY_UNUSED(data)) -{ - PyObject *src_ref = NULL; - while (N > 0) { - NPY_COPY_PYOBJECT_PTR(&src_ref, src); - - /* Release the reference in src */ - NPY_DT_DBG_REFTRACE("dec src ref (null dst)", src_ref); - Py_XDECREF(src_ref); - - src += src_stride; - --N; - } -} - - -NPY_NO_EXPORT int -get_decsrcref_transfer_function(int aligned, - npy_intp src_stride, - PyArray_Descr *src_dtype, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - /* If there are no references, it's a nop */ - if (!PyDataType_REFCHK(src_dtype)) { - *out_stransfer = &_dec_src_ref_nop; - *out_transferdata = NULL; - - return NPY_SUCCEED; - } - /* If it's a single reference, it's one decref */ - else if (src_dtype->type_num == NPY_OBJECT) { - if (out_needs_api) { - *out_needs_api = 1; - } - - *out_stransfer = &_strided_to_null_dec_src_ref_reference; - *out_transferdata = NULL; - - return NPY_SUCCEED; - } - /* If there are subarrays, need to wrap it */ - else if (src_dtype->subarray != NULL) { - PyArray_Dims src_shape = {NULL, -1}; - npy_intp src_size = 1; - PyArray_StridedTransferFn *stransfer; - void *data; - - if (out_needs_api) { - *out_needs_api = 1; - } - - if (!(PyArray_IntpConverter(src_dtype->subarray->shape, - &src_shape))) { - PyErr_SetString(PyExc_ValueError, - "invalid subarray shape"); - return NPY_FAIL; - } - src_size = PyArray_MultiplyList(src_shape.ptr, src_shape.len); - PyDimMem_FREE(src_shape.ptr); - - /* Get a function for contiguous src of the subarray type */ - if (get_decsrcref_transfer_function(aligned, - src_dtype->subarray->base->elsize, - src_dtype->subarray->base, - &stransfer, &data, - out_needs_api) != NPY_SUCCEED) { - return NPY_FAIL; - } - - if (wrap_transfer_function_n_to_n(stransfer, data, - src_stride, 0, - src_dtype->subarray->base->elsize, 0, - src_size, - out_stransfer, out_transferdata) != NPY_SUCCEED) { - PyArray_FreeStridedTransferData(data); - return NPY_FAIL; - } - - return NPY_SUCCEED; - } - /* If there are fields, need to do each field */ - else { - if (out_needs_api) { - *out_needs_api = 1; - } - - return get_decsrcref_fields_transfer_function(aligned, - src_stride, src_dtype, - out_stransfer, - out_transferdata, - out_needs_api); - } -} - -/********************* MAIN DTYPE TRANSFER FUNCTION ***********************/ - -NPY_NO_EXPORT int -PyArray_GetDTypeTransferFunction(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - int move_references, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api) -{ - npy_intp src_itemsize, dst_itemsize; - int src_type_num, dst_type_num; - -#if NPY_DT_DBG_TRACING - printf("Calculating dtype transfer from "); - PyObject_Print((PyObject *)src_dtype, stdout, 0); - printf(" to "); - PyObject_Print((PyObject *)dst_dtype, stdout, 0); - printf("\n"); -#endif - - /* - * If one of the dtypes is NULL, we give back either a src decref - * function or a dst setzero function - */ - if (dst_dtype == NULL) { - if (move_references) { - return get_decsrcref_transfer_function(aligned, - src_dtype->elsize, - src_dtype, - out_stransfer, out_transferdata, - out_needs_api); - } - else { - *out_stransfer = &_dec_src_ref_nop; - *out_transferdata = NULL; - return NPY_SUCCEED; - } - } - else if (src_dtype == NULL) { - return get_setdstzero_transfer_function(aligned, - dst_dtype->elsize, - dst_dtype, - out_stransfer, out_transferdata, - out_needs_api); - } - - src_itemsize = src_dtype->elsize; - dst_itemsize = dst_dtype->elsize; - src_type_num = src_dtype->type_num; - dst_type_num = dst_dtype->type_num; - - /* Common special case - number -> number NBO cast */ - if (PyTypeNum_ISNUMBER(src_type_num) && - PyTypeNum_ISNUMBER(dst_type_num) && - PyArray_ISNBO(src_dtype->byteorder) && - PyArray_ISNBO(dst_dtype->byteorder)) { - if (PyArray_EquivTypenums(src_type_num, dst_type_num)) { - *out_stransfer = PyArray_GetStridedCopyFn(aligned, - src_stride, dst_stride, - src_itemsize); - *out_transferdata = NULL; - return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED; - } - else { - return get_nbo_cast_numeric_transfer_function (aligned, - src_stride, dst_stride, - src_type_num, dst_type_num, - out_stransfer, out_transferdata); - } - } - - /* - * If there are no references and the data types are equivalent, - * return a simple copy - */ - if (!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) && - PyArray_EquivTypes(src_dtype, dst_dtype)) { - /* - * We can't pass through the aligned flag because it's not - * appropriate. Consider a size-8 string, it will say it's - * aligned because strings only need alignment 1, but the - * copy function wants to know if it's alignment 8. - * - * TODO: Change align from a flag to a "best power of 2 alignment" - * which holds the strongest alignment value for all - * the data which will be used. - */ - *out_stransfer = PyArray_GetStridedCopyFn(0, - src_stride, dst_stride, - src_dtype->elsize); - *out_transferdata = NULL; - return NPY_SUCCEED; - } - - /* First look at the possibilities of just a copy or swap */ - if (src_itemsize == dst_itemsize && src_dtype->kind == dst_dtype->kind && - !PyDataType_HASFIELDS(src_dtype) && - !PyDataType_HASFIELDS(dst_dtype) && - src_dtype->subarray == NULL && dst_dtype->subarray == NULL) { - /* A custom data type requires that we use its copy/swap */ - if (src_type_num >= NPY_NTYPES || dst_type_num >= NPY_NTYPES) { - /* - * If the sizes and kinds are identical, but they're different - * custom types, then get a cast function - */ - if (src_type_num != dst_type_num) { - return get_cast_transfer_function(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - move_references, - out_stransfer, out_transferdata, - out_needs_api); - } - else { - return wrap_copy_swap_function(aligned, - src_stride, dst_stride, - src_dtype, - PyArray_ISNBO(src_dtype->byteorder) != - PyArray_ISNBO(dst_dtype->byteorder), - out_stransfer, out_transferdata); - } - - - } - - /* The special types, which have no byte-order */ - switch (src_type_num) { - case NPY_VOID: - case NPY_STRING: - case NPY_UNICODE: - *out_stransfer = PyArray_GetStridedCopyFn(0, - src_stride, dst_stride, - src_itemsize); - *out_transferdata = NULL; - return NPY_SUCCEED; - case NPY_OBJECT: - if (out_needs_api) { - *out_needs_api = 1; - } - if (move_references) { - *out_stransfer = &_strided_to_strided_move_references; - *out_transferdata = NULL; - } - else { - *out_stransfer = &_strided_to_strided_copy_references; - *out_transferdata = NULL; - } - return NPY_SUCCEED; - } - - /* This is a straight copy */ - if (src_itemsize == 1 || PyArray_ISNBO(src_dtype->byteorder) == - PyArray_ISNBO(dst_dtype->byteorder)) { - *out_stransfer = PyArray_GetStridedCopyFn(aligned, - src_stride, dst_stride, - src_itemsize); - *out_transferdata = NULL; - return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED; - } - /* This is a straight copy + byte swap */ - else if (!PyTypeNum_ISCOMPLEX(src_type_num)) { - *out_stransfer = PyArray_GetStridedCopySwapFn(aligned, - src_stride, dst_stride, - src_itemsize); - *out_transferdata = NULL; - return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED; - } - /* This is a straight copy + element pair byte swap */ - else { - *out_stransfer = PyArray_GetStridedCopySwapPairFn(aligned, - src_stride, dst_stride, - src_itemsize); - *out_transferdata = NULL; - return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED; - } - } - - /* Handle subarrays */ - if (src_dtype->subarray != NULL || dst_dtype->subarray != NULL) { - return get_subarray_transfer_function(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - move_references, - out_stransfer, out_transferdata, - out_needs_api); - } - - /* Handle fields */ - if ((PyDataType_HASFIELDS(src_dtype) || PyDataType_HASFIELDS(dst_dtype)) && - src_type_num != NPY_OBJECT && dst_type_num != NPY_OBJECT) { - return get_fields_transfer_function(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - move_references, - out_stransfer, out_transferdata, - out_needs_api); - } - - /* Check for different-sized strings, unicodes, or voids */ - if (src_type_num == dst_type_num) { - switch (src_type_num) { - case NPY_STRING: - case NPY_UNICODE: - case NPY_VOID: - return PyArray_GetStridedZeroPadCopyFn(0, - src_stride, dst_stride, - src_dtype->elsize, dst_dtype->elsize, - out_stransfer, out_transferdata); - } - } - - /* Otherwise a cast is necessary */ - return get_cast_transfer_function(aligned, - src_stride, dst_stride, - src_dtype, dst_dtype, - move_references, - out_stransfer, out_transferdata, - out_needs_api); -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/einsum.c.src b/numpy-1.6.2/numpy/core/src/multiarray/einsum.c.src deleted file mode 100644 index 42f92f1944..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/einsum.c.src +++ /dev/null @@ -1,3133 +0,0 @@ -/* - * This file contains the implementation of the 'einsum' function, - * which provides an einstein-summation operation. - * - * Copyright (c) 2011 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia - * - * See LICENSE.txt for the license. - */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - -#define _MULTIARRAYMODULE -#include -#include -#include - -#include - -#include "convert.h" - -#ifdef __SSE__ -#define EINSUM_USE_SSE1 1 -#else -#define EINSUM_USE_SSE1 0 -#endif - -/* - * TODO: Only some SSE2 for float64 is implemented. - */ -#ifdef __SSE2__ -#define EINSUM_USE_SSE2 1 -#else -#define EINSUM_USE_SSE2 0 -#endif - -#if EINSUM_USE_SSE1 -#include -#endif - -#if EINSUM_USE_SSE2 -#include -#endif - -#define EINSUM_IS_SSE_ALIGNED(x) ((((npy_intp)x)&0xf) == 0) - -/********** PRINTF DEBUG TRACING **************/ -#define NPY_EINSUM_DBG_TRACING 0 - -#if NPY_EINSUM_DBG_TRACING -#define NPY_EINSUM_DBG_PRINT(s) printf("%s", s); -#define NPY_EINSUM_DBG_PRINT1(s, p1) printf(s, p1); -#define NPY_EINSUM_DBG_PRINT2(s, p1, p2) printf(s, p1, p2); -#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3) printf(s); -#else -#define NPY_EINSUM_DBG_PRINT(s) -#define NPY_EINSUM_DBG_PRINT1(s, p1) -#define NPY_EINSUM_DBG_PRINT2(s, p1, p2) -#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3) -#endif -/**********************************************/ - -typedef enum { - BROADCAST_NONE, - BROADCAST_LEFT, - BROADCAST_RIGHT, - BROADCAST_MIDDLE -} EINSUM_BROADCAST; - -/**begin repeat - * #name = byte, short, int, long, longlong, - * ubyte, ushort, uint, ulong, ulonglong, - * half, float, double, longdouble, - * cfloat, cdouble, clongdouble# - * #temp = byte, short, int, long, longlong, - * ubyte, ushort, uint, ulong, ulonglong, - * float, float, double, longdouble, - * float, double, longdouble# - * #to = ,,,,, - * ,,,,, - * npy_float_to_half,,,, - * ,,# - * #from = ,,,,, - * ,,,,, - * npy_half_to_float,,,, - * ,,# - * #complex = 0*5, - * 0*5, - * 0*4, - * 1*3# - * #float32 = 0*5, - * 0*5, - * 0,1,0,0, - * 0*3# - * #float64 = 0*5, - * 0*5, - * 0,0,1,0, - * 0*3# - */ - -/**begin repeat1 - * #nop = 1, 2, 3, 1000# - * #noplabel = one, two, three, any# - */ -static void -@name@_sum_of_products_@noplabel@(int nop, char **dataptr, - npy_intp *strides, npy_intp count) -{ -#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@) - char *data0 = dataptr[0]; - npy_intp stride0 = strides[0]; -#endif -#if (@nop@ == 2 || @nop@ == 3) && !@complex@ - char *data1 = dataptr[1]; - npy_intp stride1 = strides[1]; -#endif -#if (@nop@ == 3) && !@complex@ - char *data2 = dataptr[2]; - npy_intp stride2 = strides[2]; -#endif -#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@) - char *data_out = dataptr[@nop@]; - npy_intp stride_out = strides[@nop@]; -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_@noplabel@ (%d)\n", (int)count); - - while (count--) { -#if !@complex@ -# if @nop@ == 1 - *(npy_@name@ *)data_out = @to@(@from@(*(npy_@name@ *)data0) + - @from@(*(npy_@name@ *)data_out)); - data0 += stride0; - data_out += stride_out; -# elif @nop@ == 2 - *(npy_@name@ *)data_out = @to@(@from@(*(npy_@name@ *)data0) * - @from@(*(npy_@name@ *)data1) + - @from@(*(npy_@name@ *)data_out)); - data0 += stride0; - data1 += stride1; - data_out += stride_out; -# elif @nop@ == 3 - *(npy_@name@ *)data_out = @to@(@from@(*(npy_@name@ *)data0) * - @from@(*(npy_@name@ *)data1) * - @from@(*(npy_@name@ *)data2) + - @from@(*(npy_@name@ *)data_out)); - data0 += stride0; - data1 += stride1; - data2 += stride2; - data_out += stride_out; -# else - npy_@temp@ temp = @from@(*(npy_@name@ *)dataptr[0]); - int i; - for (i = 1; i < nop; ++i) { - temp *= @from@(*(npy_@name@ *)dataptr[i]); - } - *(npy_@name@ *)dataptr[nop] = @to@(temp + - @from@(*(npy_@name@ *)dataptr[i])); - for (i = 0; i <= nop; ++i) { - dataptr[i] += strides[i]; - } -# endif -#else /* complex */ -# if @nop@ == 1 - ((npy_@temp@ *)data_out)[0] = ((npy_@temp@ *)data0)[0] + - ((npy_@temp@ *)data_out)[0]; - ((npy_@temp@ *)data_out)[1] = ((npy_@temp@ *)data0)[1] + - ((npy_@temp@ *)data_out)[1]; - data0 += stride0; - data_out += stride_out; -# else -# if @nop@ <= 3 -#define _SUMPROD_NOP @nop@ -# else -#define _SUMPROD_NOP nop -# endif - npy_@temp@ re, im, tmp; - int i; - re = ((npy_@temp@ *)dataptr[0])[0]; - im = ((npy_@temp@ *)dataptr[0])[1]; - for (i = 1; i < _SUMPROD_NOP; ++i) { - tmp = re * ((npy_@temp@ *)dataptr[i])[0] - - im * ((npy_@temp@ *)dataptr[i])[1]; - im = re * ((npy_@temp@ *)dataptr[i])[1] + - im * ((npy_@temp@ *)dataptr[i])[0]; - re = tmp; - } - ((npy_@temp@ *)dataptr[_SUMPROD_NOP])[0] = re + - ((npy_@temp@ *)dataptr[_SUMPROD_NOP])[0]; - ((npy_@temp@ *)dataptr[_SUMPROD_NOP])[1] = im + - ((npy_@temp@ *)dataptr[_SUMPROD_NOP])[1]; - - for (i = 0; i <= _SUMPROD_NOP; ++i) { - dataptr[i] += strides[i]; - } -#undef _SUMPROD_NOP -# endif -#endif - } -} - -#if @nop@ == 1 - -static void -@name@_sum_of_products_contig_one(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - npy_@name@ *data0 = (npy_@name@ *)dataptr[0]; - npy_@name@ *data_out = (npy_@name@ *)dataptr[1]; - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_one (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: -#if !@complex@ - data_out[@i@] = @to@(@from@(data0[@i@]) + - @from@(data_out[@i@])); -#else - ((npy_@temp@ *)data_out + 2*@i@)[0] = - ((npy_@temp@ *)data0 + 2*@i@)[0] + - ((npy_@temp@ *)data_out + 2*@i@)[0]; - ((npy_@temp@ *)data_out + 2*@i@)[1] = - ((npy_@temp@ *)data0 + 2*@i@)[1] + - ((npy_@temp@ *)data_out + 2*@i@)[1]; -#endif -/**end repeat2**/ - case 0: - return; - } - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ -#if !@complex@ - data_out[@i@] = @to@(@from@(data0[@i@]) + - @from@(data_out[@i@])); -#else /* complex */ - ((npy_@temp@ *)data_out + 2*@i@)[0] = - ((npy_@temp@ *)data0 + 2*@i@)[0] + - ((npy_@temp@ *)data_out + 2*@i@)[0]; - ((npy_@temp@ *)data_out + 2*@i@)[1] = - ((npy_@temp@ *)data0 + 2*@i@)[1] + - ((npy_@temp@ *)data_out + 2*@i@)[1]; -#endif -/**end repeat2**/ - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; -} - -#elif @nop@ == 2 && !@complex@ - -static void -@name@_sum_of_products_contig_two(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - npy_@name@ *data0 = (npy_@name@ *)dataptr[0]; - npy_@name@ *data1 = (npy_@name@ *)dataptr[1]; - npy_@name@ *data_out = (npy_@name@ *)dataptr[2]; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, b; -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_two (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - data_out[@i@] = @to@(@from@(data0[@i@]) * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ - case 0: - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) && - EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_load_ps(data_out+@i@)); - _mm_store_ps(data_out+@i@, b); -/**end repeat2**/ - data0 += 8; - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@)); - _mm_storeu_ps(data_out+@i@, b); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(@from@(data0[@i@]) * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ -#endif - data0 += 8; - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; -} - -/* Some extra specializations for the two operand case */ -static void -@name@_sum_of_products_stride0_contig_outcontig_two(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - npy_@temp@ value0 = @from@(*(npy_@name@ *)dataptr[0]); - npy_@name@ *data1 = (npy_@name@ *)dataptr[1]; - npy_@name@ *data_out = (npy_@name@ *)dataptr[2]; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, b, value0_sse; -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, b, value0_sse; -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outcontig_two (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - data_out[@i@] = @to@(value0 * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ - case 0: - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - value0_sse = _mm_set_ps1(value0); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(value0_sse, _mm_load_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_load_ps(data_out+@i@)); - _mm_store_ps(data_out+@i@, b); -/**end repeat2**/ - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } - else { - return; - } - } -#elif EINSUM_USE_SSE2 && @float64@ - value0_sse = _mm_set1_pd(value0); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(value0_sse, _mm_load_pd(data1+@i@)); - b = _mm_add_pd(a, _mm_load_pd(data_out+@i@)); - _mm_store_pd(data_out+@i@, b); -/**end repeat2**/ - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } - else { - return; - } - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(value0_sse, _mm_loadu_ps(data1+@i@)); - b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@)); - _mm_storeu_ps(data_out+@i@, b); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - a = _mm_mul_pd(value0_sse, _mm_loadu_pd(data1+@i@)); - b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@)); - _mm_storeu_pd(data_out+@i@, b); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(value0 * - @from@(data1[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ -#endif - data1 += 8; - data_out += 8; - } - - /* Finish off the loop */ - if (count > 0) { - goto finish_after_unrolled_loop; - } -} - -static void -@name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - npy_@name@ *data0 = (npy_@name@ *)dataptr[0]; - npy_@temp@ value1 = @from@(*(npy_@name@ *)dataptr[1]); - npy_@name@ *data_out = (npy_@name@ *)dataptr[2]; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, b, value1_sse; -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outcontig_two (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - data_out[@i@] = @to@(@from@(data0[@i@])* - value1 + - @from@(data_out[@i@])); -/**end repeat2**/ - case 0: - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - value1_sse = _mm_set_ps1(value1); - - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_load_ps(data0+@i@), value1_sse); - b = _mm_add_ps(a, _mm_load_ps(data_out+@i@)); - _mm_store_ps(data_out+@i@, b); -/**end repeat2**/ - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), value1_sse); - b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@)); - _mm_storeu_ps(data_out+@i@, b); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(@from@(data0[@i@])* - value1 + - @from@(data_out[@i@])); -/**end repeat2**/ -#endif - data0 += 8; - data_out += 8; - } - - /* Finish off the loop */ - goto finish_after_unrolled_loop; -} - -static void -@name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - npy_@name@ *data0 = (npy_@name@ *)dataptr[0]; - npy_@name@ *data1 = (npy_@name@ *)dataptr[1]; - npy_@temp@ accum = 0; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, accum_sse = _mm_setzero_ps(); -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, accum_sse = _mm_setzero_pd(); -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_contig_outstride0_two (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - accum += @from@(data0[@i@]) * @from@(data1[@i@]); -/**end repeat2**/ - case 0: - *(npy_@name@ *)dataptr[2] += @to@(accum); - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - - _mm_prefetch(data0 + 512, _MM_HINT_T0); - _mm_prefetch(data1 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@)); - accum_sse = _mm_add_ps(accum_sse, a); -/**end repeat2**/ - data0 += 8; - data1 += 8; - } - - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#elif EINSUM_USE_SSE2 && @float64@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - - _mm_prefetch(data0 + 512, _MM_HINT_T0); - _mm_prefetch(data1 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@)); - accum_sse = _mm_add_pd(accum_sse, a); -/**end repeat2**/ - data0 += 8; - data1 += 8; - } - - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ - _mm_prefetch(data0 + 512, _MM_HINT_T0); - _mm_prefetch(data1 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@)); - accum_sse = _mm_add_ps(accum_sse, a); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ - _mm_prefetch(data0 + 512, _MM_HINT_T0); - _mm_prefetch(data1 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@)); - accum_sse = _mm_add_pd(accum_sse, a); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - accum += @from@(data0[@i@]) * @from@(data1[@i@]); -/**end repeat2**/ -#endif - data0 += 8; - data1 += 8; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#elif EINSUM_USE_SSE2 && @float64@ - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; -} - -static void -@name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - npy_@temp@ value0 = @from@(*(npy_@name@ *)dataptr[0]); - npy_@name@ *data1 = (npy_@name@ *)dataptr[1]; - npy_@temp@ accum = 0; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, accum_sse = _mm_setzero_ps(); -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outstride0_two (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - accum += @from@(data1[@i@]); -/**end repeat2**/ - case 0: - *(npy_@name@ *)dataptr[2] += @to@(value0 * accum); - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data1)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data1+@i@)); -/**end repeat2**/ - data1 += 8; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data1+@i@)); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - accum += @from@(data1[@i@]); -/**end repeat2**/ -#endif - data1 += 8; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; -} - -static void -@name@_sum_of_products_contig_stride0_outstride0_two(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - npy_@name@ *data0 = (npy_@name@ *)dataptr[0]; - npy_@temp@ value1 = @from@(*(npy_@name@ *)dataptr[1]); - npy_@temp@ accum = 0; - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, accum_sse = _mm_setzero_ps(); -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outstride0_two (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: - accum += @from@(data0[@i@]); -/**end repeat2**/ - case 0: - *(npy_@name@ *)dataptr[2] += @to@(accum * value1); - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@)); -/**end repeat2**/ - data0 += 8; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@)); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - accum += @from@(data0[@i@]); -/**end repeat2**/ -#endif - data0 += 8; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; -} - -#elif @nop@ == 3 && !@complex@ - -static void -@name@_sum_of_products_contig_three(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - npy_@name@ *data0 = (npy_@name@ *)dataptr[0]; - npy_@name@ *data1 = (npy_@name@ *)dataptr[1]; - npy_@name@ *data2 = (npy_@name@ *)dataptr[2]; - npy_@name@ *data_out = (npy_@name@ *)dataptr[3]; - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - data_out[@i@] = @to@(@from@(data0[@i@]) * - @from@(data1[@i@]) * - @from@(data2[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ - data0 += 8; - data1 += 8; - data2 += 8; - data_out += 8; - } - - /* Finish off the loop */ - -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - if (count-- == 0) { - return; - } - data_out[@i@] = @to@(@from@(data0[@i@]) * - @from@(data1[@i@]) * - @from@(data2[@i@]) + - @from@(data_out[@i@])); -/**end repeat2**/ -} - -#else /* @nop@ > 3 || @complex */ - -static void -@name@_sum_of_products_contig_@noplabel@(int nop, char **dataptr, - npy_intp *NPY_UNUSED(strides), npy_intp count) -{ - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_@noplabel@ (%d)\n", - (int)count); - - while (count--) { -#if !@complex@ - npy_@temp@ temp = @from@(*(npy_@name@ *)dataptr[0]); - int i; - for (i = 1; i < nop; ++i) { - temp *= @from@(*(npy_@name@ *)dataptr[i]); - } - *(npy_@name@ *)dataptr[nop] = @to@(temp + - @from@(*(npy_@name@ *)dataptr[i])); - for (i = 0; i <= nop; ++i) { - dataptr[i] += sizeof(npy_@name@); - } -#else /* complex */ -# if @nop@ <= 3 -# define _SUMPROD_NOP @nop@ -# else -# define _SUMPROD_NOP nop -# endif - npy_@temp@ re, im, tmp; - int i; - re = ((npy_@temp@ *)dataptr[0])[0]; - im = ((npy_@temp@ *)dataptr[0])[1]; - for (i = 1; i < _SUMPROD_NOP; ++i) { - tmp = re * ((npy_@temp@ *)dataptr[i])[0] - - im * ((npy_@temp@ *)dataptr[i])[1]; - im = re * ((npy_@temp@ *)dataptr[i])[1] + - im * ((npy_@temp@ *)dataptr[i])[0]; - re = tmp; - } - ((npy_@temp@ *)dataptr[_SUMPROD_NOP])[0] = re + - ((npy_@temp@ *)dataptr[_SUMPROD_NOP])[0]; - ((npy_@temp@ *)dataptr[_SUMPROD_NOP])[1] = im + - ((npy_@temp@ *)dataptr[_SUMPROD_NOP])[1]; - - for (i = 0; i <= _SUMPROD_NOP; ++i) { - dataptr[i] += sizeof(npy_@name@); - } -# undef _SUMPROD_NOP -#endif - } -} - -#endif /* functions for various @nop@ */ - -#if @nop@ == 1 - -static void -@name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr, - npy_intp *strides, npy_intp count) -{ -#if @complex@ - npy_@temp@ accum_re = 0, accum_im = 0; - npy_@temp@ *data0 = (npy_@temp@ *)dataptr[0]; -#else - npy_@temp@ accum = 0; - npy_@name@ *data0 = (npy_@name@ *)dataptr[0]; -#endif - -#if EINSUM_USE_SSE1 && @float32@ - __m128 a, accum_sse = _mm_setzero_ps(); -#elif EINSUM_USE_SSE2 && @float64@ - __m128d a, accum_sse = _mm_setzero_pd(); -#endif - - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_outstride0_one (%d)\n", - (int)count); - -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat2 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: -#if !@complex@ - accum += @from@(data0[@i@]); -#else /* complex */ - accum_re += data0[2*@i@+0]; - accum_im += data0[2*@i@+1]; -#endif -/**end repeat2**/ - case 0: -#if @complex@ - ((npy_@temp@ *)dataptr[1])[0] += accum_re; - ((npy_@temp@ *)dataptr[1])[1] += accum_im; -#else - *((npy_@name@ *)dataptr[1]) = @to@(accum + - @from@(*((npy_@name@ *)dataptr[1]))); -#endif - return; - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - - _mm_prefetch(data0 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@)); -/**end repeat2**/ - data0 += 8; - } - - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#elif EINSUM_USE_SSE2 && @float64@ - /* Use aligned instructions if possible */ - if (EINSUM_IS_SSE_ALIGNED(data0)) { - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - - _mm_prefetch(data0 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@)); -/**end repeat2**/ - data0 += 8; - } - - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); - - /* Finish off the loop */ - goto finish_after_unrolled_loop; - } -#endif - - /* Unroll the loop by 8 */ - while (count >= 8) { - count -= 8; - -#if EINSUM_USE_SSE1 && @float32@ - _mm_prefetch(data0 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 4# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@)); -/**end repeat2**/ -#elif EINSUM_USE_SSE2 && @float64@ - _mm_prefetch(data0 + 512, _MM_HINT_T0); - -/**begin repeat2 - * #i = 0, 2, 4, 6# - */ - /* - * NOTE: This accumulation changes the order, so will likely - * produce slightly different results. - */ - accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@)); -/**end repeat2**/ -#else -/**begin repeat2 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ -# if !@complex@ - accum += @from@(data0[@i@]); -# else /* complex */ - accum_re += data0[2*@i@+0]; - accum_im += data0[2*@i@+1]; -# endif -/**end repeat2**/ -#endif - -#if !@complex@ - data0 += 8; -#else - data0 += 8*2; -#endif - } - -#if EINSUM_USE_SSE1 && @float32@ - /* Add the four SSE values and put in accum */ - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1)); - accum_sse = _mm_add_ps(a, accum_sse); - a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2)); - accum_sse = _mm_add_ps(a, accum_sse); - _mm_store_ss(&accum, accum_sse); -#elif EINSUM_USE_SSE2 && @float64@ - /* Add the two SSE2 values and put in accum */ - a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1)); - accum_sse = _mm_add_pd(a, accum_sse); - _mm_store_sd(&accum, accum_sse); -#endif - - /* Finish off the loop */ - goto finish_after_unrolled_loop; -} - -#endif /* @nop@ == 1 */ - -static void -@name@_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr, - npy_intp *strides, npy_intp count) -{ -#if @complex@ - npy_@temp@ accum_re = 0, accum_im = 0; -#else - npy_@temp@ accum = 0; -#endif - -#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@) - char *data0 = dataptr[0]; - npy_intp stride0 = strides[0]; -#endif -#if (@nop@ == 2 || @nop@ == 3) && !@complex@ - char *data1 = dataptr[1]; - npy_intp stride1 = strides[1]; -#endif -#if (@nop@ == 3) && !@complex@ - char *data2 = dataptr[2]; - npy_intp stride2 = strides[2]; -#endif - - NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_outstride0_@noplabel@ (%d)\n", - (int)count); - - while (count--) { -#if !@complex@ -# if @nop@ == 1 - accum += @from@(*(npy_@name@ *)data0); - data0 += stride0; -# elif @nop@ == 2 - accum += @from@(*(npy_@name@ *)data0) * - @from@(*(npy_@name@ *)data1); - data0 += stride0; - data1 += stride1; -# elif @nop@ == 3 - accum += @from@(*(npy_@name@ *)data0) * - @from@(*(npy_@name@ *)data1) * - @from@(*(npy_@name@ *)data2); - data0 += stride0; - data1 += stride1; - data2 += stride2; -# else - npy_@temp@ temp = @from@(*(npy_@name@ *)dataptr[0]); - int i; - for (i = 1; i < nop; ++i) { - temp *= @from@(*(npy_@name@ *)dataptr[i]); - } - accum += temp; - for (i = 0; i < nop; ++i) { - dataptr[i] += strides[i]; - } -# endif -#else /* complex */ -# if @nop@ == 1 - accum_re += ((npy_@temp@ *)data0)[0]; - accum_im += ((npy_@temp@ *)data0)[1]; - data0 += stride0; -# else -# if @nop@ <= 3 -#define _SUMPROD_NOP @nop@ -# else -#define _SUMPROD_NOP nop -# endif - npy_@temp@ re, im, tmp; - int i; - re = ((npy_@temp@ *)dataptr[0])[0]; - im = ((npy_@temp@ *)dataptr[0])[1]; - for (i = 1; i < _SUMPROD_NOP; ++i) { - tmp = re * ((npy_@temp@ *)dataptr[i])[0] - - im * ((npy_@temp@ *)dataptr[i])[1]; - im = re * ((npy_@temp@ *)dataptr[i])[1] + - im * ((npy_@temp@ *)dataptr[i])[0]; - re = tmp; - } - accum_re += re; - accum_im += im; - for (i = 0; i < _SUMPROD_NOP; ++i) { - dataptr[i] += strides[i]; - } -#undef _SUMPROD_NOP -# endif -#endif - } - -#if @complex@ -# if @nop@ <= 3 - ((npy_@temp@ *)dataptr[@nop@])[0] += accum_re; - ((npy_@temp@ *)dataptr[@nop@])[1] += accum_im; -# else - ((npy_@temp@ *)dataptr[nop])[0] += accum_re; - ((npy_@temp@ *)dataptr[nop])[1] += accum_im; -# endif -#else -# if @nop@ <= 3 - *((npy_@name@ *)dataptr[@nop@]) = @to@(accum + - @from@(*((npy_@name@ *)dataptr[@nop@]))); -# else - *((npy_@name@ *)dataptr[nop]) = @to@(accum + - @from@(*((npy_@name@ *)dataptr[nop]))); -# endif -#endif - -} - -/**end repeat1**/ - -/**end repeat**/ - - -/* Do OR of ANDs for the boolean type */ - -/**begin repeat - * #nop = 1, 2, 3, 1000# - * #noplabel = one, two, three, any# - */ - -static void -bool_sum_of_products_@noplabel@(int nop, char **dataptr, - npy_intp *strides, npy_intp count) -{ -#if (@nop@ <= 3) - char *data0 = dataptr[0]; - npy_intp stride0 = strides[0]; -#endif -#if (@nop@ == 2 || @nop@ == 3) - char *data1 = dataptr[1]; - npy_intp stride1 = strides[1]; -#endif -#if (@nop@ == 3) - char *data2 = dataptr[2]; - npy_intp stride2 = strides[2]; -#endif -#if (@nop@ <= 3) - char *data_out = dataptr[@nop@]; - npy_intp stride_out = strides[@nop@]; -#endif - - while (count--) { -#if @nop@ == 1 - *(npy_bool *)data_out = *(npy_bool *)data0 || - *(npy_bool *)data_out; - data0 += stride0; - data_out += stride_out; -#elif @nop@ == 2 - *(npy_bool *)data_out = (*(npy_bool *)data0 && - *(npy_bool *)data1) || - *(npy_bool *)data_out; - data0 += stride0; - data1 += stride1; - data_out += stride_out; -#elif @nop@ == 3 - *(npy_bool *)data_out = (*(npy_bool *)data0 && - *(npy_bool *)data1 && - *(npy_bool *)data2) || - *(npy_bool *)data_out; - data0 += stride0; - data1 += stride1; - data2 += stride2; - data_out += stride_out; -#else - npy_bool temp = *(npy_bool *)dataptr[0]; - int i; - for (i = 1; i < nop; ++i) { - temp = temp && *(npy_bool *)dataptr[i]; - } - *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i]; - for (i = 0; i <= nop; ++i) { - dataptr[i] += strides[i]; - } -#endif - } -} - -static void -bool_sum_of_products_contig_@noplabel@(int nop, char **dataptr, - npy_intp *strides, npy_intp count) -{ -#if (@nop@ <= 3) - char *data0 = dataptr[0]; -#endif -#if (@nop@ == 2 || @nop@ == 3) - char *data1 = dataptr[1]; -#endif -#if (@nop@ == 3) - char *data2 = dataptr[2]; -#endif -#if (@nop@ <= 3) - char *data_out = dataptr[@nop@]; -#endif - -#if (@nop@ <= 3) -/* This is placed before the main loop to make small counts faster */ -finish_after_unrolled_loop: - switch (count) { -/**begin repeat1 - * #i = 6, 5, 4, 3, 2, 1, 0# - */ - case @i@+1: -# if @nop@ == 1 - *((npy_bool *)data_out + @i@) = (*((npy_bool *)data0 + @i@)) || - (*((npy_bool *)data_out + @i@)); - data0 += 8*sizeof(npy_bool); - data_out += 8*sizeof(npy_bool); -# elif @nop@ == 2 - *((npy_bool *)data_out + @i@) = - ((*((npy_bool *)data0 + @i@)) && - (*((npy_bool *)data1 + @i@))) || - (*((npy_bool *)data_out + @i@)); - data0 += 8*sizeof(npy_bool); - data1 += 8*sizeof(npy_bool); - data_out += 8*sizeof(npy_bool); -# elif @nop@ == 3 - *((npy_bool *)data_out + @i@) = - ((*((npy_bool *)data0 + @i@)) && - (*((npy_bool *)data1 + @i@)) && - (*((npy_bool *)data2 + @i@))) || - (*((npy_bool *)data_out + @i@)); - data0 += 8*sizeof(npy_bool); - data1 += 8*sizeof(npy_bool); - data2 += 8*sizeof(npy_bool); - data_out += 8*sizeof(npy_bool); -# endif -/**end repeat1**/ - case 0: - return; - } -#endif - -/* Unroll the loop by 8 for fixed-size nop */ -#if (@nop@ <= 3) - while (count >= 8) { - count -= 8; -#else - while (count--) { -#endif - -# if @nop@ == 1 -/**begin repeat1 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - *((npy_bool *)data_out + @i@) = (*((npy_bool *)data0 + @i@)) || - (*((npy_bool *)data_out + @i@)); -/**end repeat1**/ - data0 += 8*sizeof(npy_bool); - data_out += 8*sizeof(npy_bool); -# elif @nop@ == 2 -/**begin repeat1 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - *((npy_bool *)data_out + @i@) = - ((*((npy_bool *)data0 + @i@)) && - (*((npy_bool *)data1 + @i@))) || - (*((npy_bool *)data_out + @i@)); -/**end repeat1**/ - data0 += 8*sizeof(npy_bool); - data1 += 8*sizeof(npy_bool); - data_out += 8*sizeof(npy_bool); -# elif @nop@ == 3 -/**begin repeat1 - * #i = 0, 1, 2, 3, 4, 5, 6, 7# - */ - *((npy_bool *)data_out + @i@) = - ((*((npy_bool *)data0 + @i@)) && - (*((npy_bool *)data1 + @i@)) && - (*((npy_bool *)data2 + @i@))) || - (*((npy_bool *)data_out + @i@)); -/**end repeat1**/ - data0 += 8*sizeof(npy_bool); - data1 += 8*sizeof(npy_bool); - data2 += 8*sizeof(npy_bool); - data_out += 8*sizeof(npy_bool); -# else - npy_bool temp = *(npy_bool *)dataptr[0]; - int i; - for (i = 1; i < nop; ++i) { - temp = temp && *(npy_bool *)dataptr[i]; - } - *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i]; - for (i = 0; i <= nop; ++i) { - dataptr[i] += sizeof(npy_bool); - } -# endif - } - - /* If the loop was unrolled, we need to finish it off */ -#if (@nop@ <= 3) - goto finish_after_unrolled_loop; -#endif -} - -static void -bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr, - npy_intp *strides, npy_intp count) -{ - npy_bool accum = 0; - -#if (@nop@ <= 3) - char *data0 = dataptr[0]; - npy_intp stride0 = strides[0]; -#endif -#if (@nop@ == 2 || @nop@ == 3) - char *data1 = dataptr[1]; - npy_intp stride1 = strides[1]; -#endif -#if (@nop@ == 3) - char *data2 = dataptr[2]; - npy_intp stride2 = strides[2]; -#endif - - while (count--) { -#if @nop@ == 1 - accum = *(npy_bool *)data0 || accum; - data0 += stride0; -#elif @nop@ == 2 - accum = (*(npy_bool *)data0 && *(npy_bool *)data1) || accum; - data0 += stride0; - data1 += stride1; -#elif @nop@ == 3 - accum = (*(npy_bool *)data0 && - *(npy_bool *)data1 && - *(npy_bool *)data2) || accum; - data0 += stride0; - data1 += stride1; - data2 += stride2; -#else - npy_bool temp = *(npy_bool *)dataptr[0]; - int i; - for (i = 1; i < nop; ++i) { - temp = temp && *(npy_bool *)dataptr[i]; - } - accum = temp || accum; - for (i = 0; i <= nop; ++i) { - dataptr[i] += strides[i]; - } -#endif - } - -# if @nop@ <= 3 - *((npy_bool *)dataptr[@nop@]) = accum || *((npy_bool *)dataptr[@nop@]); -# else - *((npy_bool *)dataptr[nop]) = accum || *((npy_bool *)dataptr[nop]); -# endif -} - -/**end repeat**/ - -typedef void (*sum_of_products_fn)(int, char **, npy_intp *, npy_intp); - -/* These tables need to match up with the type enum */ -static sum_of_products_fn -_contig_outstride0_unary_specialization_table[NPY_NTYPES] = { -/**begin repeat - * #name = bool, - * byte, ubyte, - * short, ushort, - * int, uint, - * long, ulong, - * longlong, ulonglong, - * float, double, longdouble, - * cfloat, cdouble, clongdouble, - * object, string, unicode, void, - * datetime, timedelta, half# - * #use = 0, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, 1, - * 1, 1, 1, - * 0, 0, 0, 0, - * 0, 0, 1# - */ -#if @use@ - &@name@_sum_of_products_contig_outstride0_one, -#else - NULL, -#endif -/**end repeat**/ -}; /* End of _contig_outstride0_unary_specialization_table */ - -static sum_of_products_fn _binary_specialization_table[NPY_NTYPES][5] = { -/**begin repeat - * #name = bool, - * byte, ubyte, - * short, ushort, - * int, uint, - * long, ulong, - * longlong, ulonglong, - * float, double, longdouble, - * cfloat, cdouble, clongdouble, - * object, string, unicode, void, - * datetime, timedelta, half# - * #use = 0, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, 1, - * 0, 0, 0, - * 0, 0, 0, 0, - * 0, 0, 1# - */ -#if @use@ -{ - &@name@_sum_of_products_stride0_contig_outstride0_two, - &@name@_sum_of_products_stride0_contig_outcontig_two, - &@name@_sum_of_products_contig_stride0_outstride0_two, - &@name@_sum_of_products_contig_stride0_outcontig_two, - &@name@_sum_of_products_contig_contig_outstride0_two, -}, -#else - {NULL, NULL, NULL, NULL, NULL}, -#endif -/**end repeat**/ -}; /* End of _binary_specialization_table */ - -static sum_of_products_fn _outstride0_specialized_table[NPY_NTYPES][4] = { -/**begin repeat - * #name = bool, - * byte, ubyte, - * short, ushort, - * int, uint, - * long, ulong, - * longlong, ulonglong, - * float, double, longdouble, - * cfloat, cdouble, clongdouble, - * object, string, unicode, void, - * datetime, timedelta, half# - * #use = 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, 1, - * 1, 1, 1, - * 0, 0, 0, 0, - * 0, 0, 1# - */ -#if @use@ -{ - &@name@_sum_of_products_outstride0_any, - &@name@_sum_of_products_outstride0_one, - &@name@_sum_of_products_outstride0_two, - &@name@_sum_of_products_outstride0_three -}, -#else - {NULL, NULL, NULL, NULL}, -#endif -/**end repeat**/ -}; /* End of _outstride0_specialized_table */ - -static sum_of_products_fn _allcontig_specialized_table[NPY_NTYPES][4] = { -/**begin repeat - * #name = bool, - * byte, ubyte, - * short, ushort, - * int, uint, - * long, ulong, - * longlong, ulonglong, - * float, double, longdouble, - * cfloat, cdouble, clongdouble, - * object, string, unicode, void, - * datetime, timedelta, half# - * #use = 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, 1, - * 1, 1, 1, - * 0, 0, 0, 0, - * 0, 0, 1# - */ -#if @use@ -{ - &@name@_sum_of_products_contig_any, - &@name@_sum_of_products_contig_one, - &@name@_sum_of_products_contig_two, - &@name@_sum_of_products_contig_three -}, -#else - {NULL, NULL, NULL, NULL}, -#endif -/**end repeat**/ -}; /* End of _allcontig_specialized_table */ - -static sum_of_products_fn _unspecialized_table[NPY_NTYPES][4] = { -/**begin repeat - * #name = bool, - * byte, ubyte, - * short, ushort, - * int, uint, - * long, ulong, - * longlong, ulonglong, - * float, double, longdouble, - * cfloat, cdouble, clongdouble, - * object, string, unicode, void, - * datetime, timedelta, half# - * #use = 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, - * 1, 1, 1, - * 1, 1, 1, - * 0, 0, 0, 0, - * 0, 0, 1# - */ -#if @use@ -{ - &@name@_sum_of_products_any, - &@name@_sum_of_products_one, - &@name@_sum_of_products_two, - &@name@_sum_of_products_three -}, -#else - {NULL, NULL, NULL, NULL}, -#endif -/**end repeat**/ -}; /* End of _unnspecialized_table */ - -static sum_of_products_fn -get_sum_of_products_function(int nop, int type_num, - npy_intp itemsize, npy_intp *fixed_strides) -{ - int iop; - - if (type_num >= NPY_NTYPES) { - return NULL; - } - - /* contiguous reduction */ - if (nop == 1 && fixed_strides[0] == itemsize && fixed_strides[1] == 0) { - sum_of_products_fn ret = - _contig_outstride0_unary_specialization_table[type_num]; - if (ret != NULL) { - return ret; - } - } - - /* nop of 2 has more specializations */ - if (nop == 2) { - /* Encode the zero/contiguous strides */ - int code; - code = (fixed_strides[0] == 0) ? 0 : - (fixed_strides[0] == itemsize) ? 2*2*1 : 8; - code += (fixed_strides[1] == 0) ? 0 : - (fixed_strides[1] == itemsize) ? 2*1 : 8; - code += (fixed_strides[2] == 0) ? 0 : - (fixed_strides[2] == itemsize) ? 1 : 8; - if (code >= 2 && code < 7) { - sum_of_products_fn ret = - _binary_specialization_table[type_num][code-2]; - if (ret != NULL) { - return ret; - } - } - } - - /* Inner loop with an output stride of 0 */ - if (fixed_strides[nop] == 0) { - return _outstride0_specialized_table[type_num][nop <= 3 ? nop : 0]; - } - - /* Check for all contiguous */ - for (iop = 0; iop < nop; ++iop) { - if (fixed_strides[iop] != itemsize) { - break; - } - } - - /* Contiguous loop */ - if (iop == nop) { - return _allcontig_specialized_table[type_num][nop <= 3 ? nop : 0]; - } - - /* None of the above specializations caught it, general loops */ - return _unspecialized_table[type_num][nop <= 3 ? nop : 0]; -} - -/* - * Parses the subscripts for one operand into an output - * of 'ndim' labels - */ -static int -parse_operand_subscripts(char *subscripts, int length, - int ndim, - int iop, char *out_labels, - char *out_label_counts, - int *out_min_label, - int *out_max_label, - int *out_num_labels, - EINSUM_BROADCAST *out_broadcast) -{ - int i, idim, ndim_left, label; - int left_labels = 0, right_labels = 0, ellipsis = 0; - - /* Process the labels from the end until the ellipsis */ - idim = ndim-1; - for (i = length-1; i >= 0; --i) { - label = subscripts[i]; - /* A label for an axis */ - if (label > 0 && isalpha(label)) { - if (idim >= 0) { - out_labels[idim--] = label; - /* Calculate the min and max labels */ - if (label < *out_min_label) { - *out_min_label = label; - } - if (label > *out_max_label) { - *out_max_label = label; - } - /* If it's the first time we see this label, count it */ - if (out_label_counts[label] == 0) { - (*out_num_labels)++; - } - out_label_counts[label]++; - right_labels = 1; - } - else { - PyErr_Format(PyExc_ValueError, - "einstein sum subscripts string contains " - "too many subscripts for operand %d", iop); - return 0; - } - } - /* The end of the ellipsis */ - else if (label == '.') { - /* A valid ellipsis */ - if (i >= 2 && subscripts[i-1] == '.' && subscripts[i-2] == '.') { - ellipsis = 1; - length = i-2; - break; - } - else { - PyErr_SetString(PyExc_ValueError, - "einstein sum subscripts string contains a " - "'.' that is not part of an ellipsis ('...')"); - return 0; - - } - } - else if (label != ' ') { - PyErr_Format(PyExc_ValueError, - "invalid subscript '%c' in einstein sum " - "subscripts string, subscripts must " - "be letters", (char)label); - return 0; - } - } - - if (!ellipsis && idim != -1) { - PyErr_Format(PyExc_ValueError, - "operand has more dimensions than subscripts " - "given in einstein sum, but no '...' ellipsis " - "provided to broadcast the extra dimensions."); - return 0; - } - - /* Reduce ndim to just the dimensions left to fill at the beginning */ - ndim_left = idim+1; - idim = 0; - - /* - * If we stopped because of an ellipsis, start again from the beginning. - * The length was truncated to end at the ellipsis in this case. - */ - if (i > 0) { - for (i = 0; i < length; ++i) { - label = subscripts[i]; - /* A label for an axis */ - if (label > 0 && isalnum(label)) { - if (idim < ndim_left) { - out_labels[idim++] = label; - /* Calculate the min and max labels */ - if (label < *out_min_label) { - *out_min_label = label; - } - if (label > *out_max_label) { - *out_max_label = label; - } - /* If it's the first time we see this label, count it */ - if (out_label_counts[label] == 0) { - (*out_num_labels)++; - } - out_label_counts[label]++; - left_labels = 1; - } - else { - PyErr_Format(PyExc_ValueError, - "einstein sum subscripts string contains " - "too many subscripts for operand %d", iop); - return 0; - } - } - else if (label != ' ') { - PyErr_Format(PyExc_ValueError, - "invalid subscript '%c' in einstein sum " - "subscripts string, subscripts must " - "be letters", (char)label); - return 0; - } - } - } - - /* Set the remaining labels to 0 */ - while (idim < ndim_left) { - out_labels[idim++] = 0; - } - - /* - * Find any labels duplicated for this operand, and turn them - * into negative offets to the axis to merge with. - * - * In C, the char type may be signed or unsigned, but with - * twos complement arithmetic the char is ok either way here, and - * later where it matters the char is cast to a signed char. - */ - for (idim = 0; idim < ndim-1; ++idim) { - char *next; - /* If this is a proper label, find any duplicates of it */ - label = out_labels[idim]; - if (label > 0) { - /* Search for the next matching label */ - next = (char *)memchr(out_labels+idim+1, label, - ndim-idim-1); - while (next != NULL) { - /* The offset from next to out_labels[idim] (negative) */ - *next = (char)((out_labels+idim)-next); - /* Search for the next matching label */ - next = (char *)memchr(next+1, label, - out_labels+ndim-1-next); - } - } - } - - if (!ellipsis) { - *out_broadcast = BROADCAST_NONE; - } - else if (left_labels && right_labels) { - *out_broadcast = BROADCAST_MIDDLE; - } - else if (!left_labels) { - *out_broadcast = BROADCAST_RIGHT; - } - else { - *out_broadcast = BROADCAST_LEFT; - } - - return 1; -} - -/* - * Parses the subscripts for the output operand into an output - * that requires 'ndim_broadcast' unlabeled dimensions, returning - * the number of output dimensions. Returns -1 if there is an error. - */ -static int -parse_output_subscripts(char *subscripts, int length, - int ndim_broadcast, - const char *label_counts, - char *out_labels, - EINSUM_BROADCAST *out_broadcast) -{ - int i, nlabels, label, idim, ndim, ndim_left; - int left_labels = 0, right_labels = 0, ellipsis = 0; - - /* Count the labels, making sure they're all unique and valid */ - nlabels = 0; - for (i = 0; i < length; ++i) { - label = subscripts[i]; - if (label > 0 && isalpha(label)) { - /* Check if it occurs again */ - if (memchr(subscripts+i+1, label, length-i-1) == NULL) { - /* Check that it was used in the inputs */ - if (label_counts[label] == 0) { - PyErr_Format(PyExc_ValueError, - "einstein sum subscripts string included " - "output subscript '%c' which never appeared " - "in an input", (char)label); - return -1; - } - - nlabels++; - } - else { - PyErr_Format(PyExc_ValueError, - "einstein sum subscripts string includes " - "output subscript '%c' multiple times", - (char)label); - return -1; - } - } - else if (label != '.' && label != ' ') { - PyErr_Format(PyExc_ValueError, - "invalid subscript '%c' in einstein sum " - "subscripts string, subscripts must " - "be letters", (char)label); - return -1; - } - } - - /* The number of output dimensions */ - ndim = ndim_broadcast + nlabels; - - /* Process the labels from the end until the ellipsis */ - idim = ndim-1; - for (i = length-1; i >= 0; --i) { - label = subscripts[i]; - /* A label for an axis */ - if (label != '.' && label != ' ') { - if (idim >= 0) { - out_labels[idim--] = label; - } - else { - PyErr_Format(PyExc_ValueError, - "einstein sum subscripts string contains " - "too many output subscripts"); - return -1; - } - right_labels = 1; - } - /* The end of the ellipsis */ - else if (label == '.') { - /* A valid ellipsis */ - if (i >= 2 && subscripts[i-1] == '.' && subscripts[i-2] == '.') { - ellipsis = 1; - length = i-2; - break; - } - else { - PyErr_SetString(PyExc_ValueError, - "einstein sum subscripts string contains a " - "'.' that is not part of an ellipsis ('...')"); - return -1; - - } - } - } - - if (!ellipsis && idim != -1) { - PyErr_SetString(PyExc_ValueError, - "output has more dimensions than subscripts " - "given in einstein sum, but no '...' ellipsis " - "provided to broadcast the extra dimensions."); - return 0; - } - - /* Reduce ndim to just the dimensions left to fill at the beginning */ - ndim_left = idim+1; - idim = 0; - - /* - * If we stopped because of an ellipsis, start again from the beginning. - * The length was truncated to end at the ellipsis in this case. - */ - if (i > 0) { - for (i = 0; i < length; ++i) { - label = subscripts[i]; - /* A label for an axis */ - if (label != '.' && label != ' ') { - if (idim < ndim_left) { - out_labels[idim++] = label; - } - else { - PyErr_Format(PyExc_ValueError, - "einstein sum subscripts string contains " - "too many subscripts for the output"); - return -1; - } - left_labels = 1; - } - else { - PyErr_SetString(PyExc_ValueError, - "einstein sum subscripts string contains a " - "'.' that is not part of an ellipsis ('...')"); - return -1; - } - } - } - - /* Set the remaining output labels to 0 */ - while (idim < ndim_left) { - out_labels[idim++] = 0; - } - - if (!ellipsis) { - *out_broadcast = BROADCAST_NONE; - } - else if (left_labels && right_labels) { - *out_broadcast = BROADCAST_MIDDLE; - } - else if (!left_labels) { - *out_broadcast = BROADCAST_RIGHT; - } - else { - *out_broadcast = BROADCAST_LEFT; - } - - return ndim; -} - - -/* - * When there's just one operand and no reduction, we - * can return a view into op. This calculates the view - * if possible. - */ -static int -get_single_op_view(PyArrayObject *op, int iop, char *labels, - int ndim_output, char *output_labels, - PyArrayObject **ret) -{ - npy_intp new_strides[NPY_MAXDIMS]; - npy_intp new_dims[NPY_MAXDIMS]; - char *out_label; - int label, i, idim, ndim, ibroadcast = 0; - - ndim = PyArray_NDIM(op); - - /* Initialize the dimensions and strides to zero */ - for (idim = 0; idim < ndim_output; ++idim) { - new_dims[idim] = 0; - new_strides[idim] = 0; - } - - /* Match the labels in the operand with the output labels */ - for (idim = 0; idim < ndim; ++idim) { - /* - * The char type may be either signed or unsigned, we - * need it to be signed here. - */ - label = (signed char)labels[idim]; - /* If this label says to merge axes, get the actual label */ - if (label < 0) { - label = labels[idim+label]; - } - /* If the label is 0, it's an unlabeled broadcast dimension */ - if (label == 0) { - /* The next output label that's a broadcast dimension */ - for (; ibroadcast < ndim_output; ++ibroadcast) { - if (output_labels[ibroadcast] == 0) { - break; - } - } - if (ibroadcast == ndim_output) { - PyErr_SetString(PyExc_ValueError, - "output had too few broadcast dimensions"); - return 0; - } - new_dims[ibroadcast] = PyArray_DIM(op, idim); - new_strides[ibroadcast] = PyArray_STRIDE(op, idim); - ++ibroadcast; - } - else { - /* Find the position for this dimension in the output */ - out_label = (char *)memchr(output_labels, label, - ndim_output); - /* If it's not found, reduction -> can't return a view */ - if (out_label == NULL) { - break; - } - /* Update the dimensions and strides of the output */ - i = out_label - output_labels; - if (new_dims[i] != 0 && - new_dims[i] != PyArray_DIM(op, idim)) { - PyErr_Format(PyExc_ValueError, - "dimensions in operand %d for collapsing " - "index '%c' don't match (%d != %d)", - iop, label, (int)new_dims[i], - (int)PyArray_DIM(op, idim)); - return 0; - } - new_dims[i] = PyArray_DIM(op, idim); - new_strides[i] += PyArray_STRIDE(op, idim); - } - } - /* If we processed all the input axes, return a view */ - if (idim == ndim) { - Py_INCREF(PyArray_DESCR(op)); - *ret = (PyArrayObject *)PyArray_NewFromDescr( - Py_TYPE(op), - PyArray_DESCR(op), - ndim_output, new_dims, new_strides, - PyArray_DATA(op), - 0, (PyObject *)op); - - if (*ret == NULL) { - return 0; - } - if (!PyArray_Check(*ret)) { - Py_DECREF(*ret); - *ret = NULL; - PyErr_SetString(PyExc_RuntimeError, - "NewFromDescr failed to return an array"); - return 0; - } - PyArray_UpdateFlags(*ret, - NPY_C_CONTIGUOUS|NPY_ALIGNED|NPY_F_CONTIGUOUS); - Py_INCREF(op); - PyArray_BASE(*ret) = (PyObject *)op; - return 1; - } - - /* Return success, but that we couldn't make a view */ - *ret = NULL; - return 1; -} - -static PyArrayObject * -get_combined_dims_view(PyArrayObject *op, int iop, char *labels) -{ - npy_intp new_strides[NPY_MAXDIMS]; - npy_intp new_dims[NPY_MAXDIMS]; - int i, idim, ndim, icombine, combineoffset, label; - int icombinemap[NPY_MAXDIMS]; - - PyArrayObject *ret = NULL; - - ndim = PyArray_NDIM(op); - - /* Initialize the dimensions and strides to zero */ - for (idim = 0; idim < ndim; ++idim) { - new_dims[idim] = 0; - new_strides[idim] = 0; - } - - /* Copy the dimensions and strides, except when collapsing */ - icombine = 0; - for (idim = 0; idim < ndim; ++idim) { - /* - * The char type may be either signed or unsigned, we - * need it to be signed here. - */ - label = (signed char)labels[idim]; - /* If this label says to merge axes, get the actual label */ - if (label < 0) { - combineoffset = label; - label = labels[idim+label]; - } - else { - combineoffset = 0; - if (icombine != idim) { - labels[icombine] = labels[idim]; - } - icombinemap[idim] = icombine; - } - /* If the label is 0, it's an unlabeled broadcast dimension */ - if (label == 0) { - new_dims[icombine] = PyArray_DIM(op, idim); - new_strides[icombine] = PyArray_STRIDE(op, idim); - } - else { - /* Update the combined axis dimensions and strides */ - i = idim + combineoffset; - if (combineoffset < 0 && - new_dims[i] != PyArray_DIM(op, idim)) { - PyErr_Format(PyExc_ValueError, - "dimensions in operand %d for collapsing " - "index '%c' don't match (%d != %d)", - iop, label, (int)new_dims[i], - (int)PyArray_DIM(op, idim)); - return NULL; - } - i = icombinemap[i]; - new_dims[i] = PyArray_DIM(op, idim); - new_strides[i] += PyArray_STRIDE(op, idim); - } - - /* If the label didn't say to combine axes, increment dest i */ - if (combineoffset == 0) { - icombine++; - } - } - - /* The compressed number of dimensions */ - ndim = icombine; - - Py_INCREF(PyArray_DESCR(op)); - ret = (PyArrayObject *)PyArray_NewFromDescr( - Py_TYPE(op), - PyArray_DESCR(op), - ndim, new_dims, new_strides, - PyArray_DATA(op), - PyArray_ISWRITEABLE(op) ? NPY_WRITEABLE : 0, - (PyObject *)op); - - if (ret == NULL) { - return NULL; - } - if (!PyArray_Check(ret)) { - Py_DECREF(ret); - PyErr_SetString(PyExc_RuntimeError, - "NewFromDescr failed to return an array"); - return NULL; - } - PyArray_UpdateFlags(ret, - NPY_C_CONTIGUOUS|NPY_ALIGNED|NPY_F_CONTIGUOUS); - Py_INCREF(op); - PyArray_BASE(ret) = (PyObject *)op; - - return ret; -} - -static int -prepare_op_axes(int ndim, int iop, char *labels, int *axes, - int ndim_iter, char *iter_labels, EINSUM_BROADCAST broadcast) -{ - int i, label, ibroadcast; - - /* Regular broadcasting */ - if (broadcast == BROADCAST_RIGHT) { - /* broadcast dimensions get placed in rightmost position */ - ibroadcast = ndim-1; - for (i = ndim_iter-1; i >= 0; --i) { - label = iter_labels[i]; - /* - * If it's an unlabeled broadcast dimension, choose - * the next broadcast dimension from the operand. - */ - if (label == 0) { - while (ibroadcast >= 0 && labels[ibroadcast] != 0) { - --ibroadcast; - } - /* - * If we used up all the operand broadcast dimensions, - * extend it with a "newaxis" - */ - if (ibroadcast < 0) { - axes[i] = -1; - } - /* Otherwise map to the broadcast axis */ - else { - axes[i] = ibroadcast; - --ibroadcast; - } - } - /* It's a labeled dimension, find the matching one */ - else { - char *match = memchr(labels, label, ndim); - /* If the op doesn't have the label, broadcast it */ - if (match == NULL) { - axes[i] = -1; - } - /* Otherwise use it */ - else { - axes[i] = match - labels; - } - } - } - } - /* Reverse broadcasting */ - else if (broadcast == BROADCAST_LEFT) { - /* broadcast dimensions get placed in leftmost position */ - ibroadcast = 0; - for (i = 0; i < ndim_iter; ++i) { - label = iter_labels[i]; - /* - * If it's an unlabeled broadcast dimension, choose - * the next broadcast dimension from the operand. - */ - if (label == 0) { - while (ibroadcast < ndim && labels[ibroadcast] != 0) { - ++ibroadcast; - } - /* - * If we used up all the operand broadcast dimensions, - * extend it with a "newaxis" - */ - if (ibroadcast >= ndim) { - axes[i] = -1; - } - /* Otherwise map to the broadcast axis */ - else { - axes[i] = ibroadcast; - ++ibroadcast; - } - } - /* It's a labeled dimension, find the matching one */ - else { - char *match = memchr(labels, label, ndim); - /* If the op doesn't have the label, broadcast it */ - if (match == NULL) { - axes[i] = -1; - } - /* Otherwise use it */ - else { - axes[i] = match - labels; - } - } - } - } - /* Middle or None broadcasting */ - else { - /* broadcast dimensions get placed in leftmost position */ - ibroadcast = 0; - for (i = 0; i < ndim_iter; ++i) { - label = iter_labels[i]; - /* - * If it's an unlabeled broadcast dimension, choose - * the next broadcast dimension from the operand. - */ - if (label == 0) { - while (ibroadcast < ndim && labels[ibroadcast] != 0) { - ++ibroadcast; - } - /* - * If we used up all the operand broadcast dimensions, - * it's an error - */ - if (ibroadcast >= ndim) { - PyErr_Format(PyExc_ValueError, - "operand %d did not have enough dimensions " - "to match the broadcasting, and couldn't be " - "extended because einstein sum subscripts " - "were specified at both the start and end", - iop); - return 0; - } - /* Otherwise map to the broadcast axis */ - else { - axes[i] = ibroadcast; - ++ibroadcast; - } - } - /* It's a labeled dimension, find the matching one */ - else { - char *match = memchr(labels, label, ndim); - /* If the op doesn't have the label, broadcast it */ - if (match == NULL) { - axes[i] = -1; - } - /* Otherwise use it */ - else { - axes[i] = match - labels; - } - } - } - } - - return 1; -} - -static int -unbuffered_loop_nop1_ndim2(NpyIter *iter) -{ - npy_intp coord, shape[2], strides[2][2]; - char *ptrs[2][2], *ptr; - sum_of_products_fn sop; - -#if NPY_EINSUM_DBG_TRACING - NpyIter_DebugPrint(iter); -#endif - NPY_EINSUM_DBG_PRINT("running hand-coded 1-op 2-dim loop\n"); - - NpyIter_GetShape(iter, shape); - memcpy(strides[0], NpyIter_GetAxisStrideArray(iter, 0), - 2*sizeof(npy_intp)); - memcpy(strides[1], NpyIter_GetAxisStrideArray(iter, 1), - 2*sizeof(npy_intp)); - memcpy(ptrs[0], NpyIter_GetInitialDataPtrArray(iter), - 2*sizeof(char *)); - memcpy(ptrs[1], ptrs[0], 2*sizeof(char*)); - - sop = get_sum_of_products_function(1, - NpyIter_GetDescrArray(iter)[0]->type_num, - NpyIter_GetDescrArray(iter)[0]->elsize, - strides[0]); - - if (sop == NULL) { - PyErr_SetString(PyExc_TypeError, - "invalid data type for einsum"); - return -1; - } - - /* - * Since the iterator wasn't tracking coordinates, the - * loop provided by the iterator is in Fortran-order. - */ - for (coord = shape[1]; coord > 0; --coord) { - sop(1, ptrs[0], strides[0], shape[0]); - - ptr = ptrs[1][0] + strides[1][0]; - ptrs[0][0] = ptrs[1][0] = ptr; - ptr = ptrs[1][1] + strides[1][1]; - ptrs[0][1] = ptrs[1][1] = ptr; - } - - return 0; -} - -static int -unbuffered_loop_nop1_ndim3(NpyIter *iter) -{ - npy_intp coords[2], shape[3], strides[3][2]; - char *ptrs[3][2], *ptr; - sum_of_products_fn sop; - -#if NPY_EINSUM_DBG_TRACING - NpyIter_DebugPrint(iter); -#endif - NPY_EINSUM_DBG_PRINT("running hand-coded 1-op 3-dim loop\n"); - - NpyIter_GetShape(iter, shape); - memcpy(strides[0], NpyIter_GetAxisStrideArray(iter, 0), - 2*sizeof(npy_intp)); - memcpy(strides[1], NpyIter_GetAxisStrideArray(iter, 1), - 2*sizeof(npy_intp)); - memcpy(strides[2], NpyIter_GetAxisStrideArray(iter, 2), - 2*sizeof(npy_intp)); - memcpy(ptrs[0], NpyIter_GetInitialDataPtrArray(iter), - 2*sizeof(char *)); - memcpy(ptrs[1], ptrs[0], 2*sizeof(char*)); - memcpy(ptrs[2], ptrs[0], 2*sizeof(char*)); - - sop = get_sum_of_products_function(1, - NpyIter_GetDescrArray(iter)[0]->type_num, - NpyIter_GetDescrArray(iter)[0]->elsize, - strides[0]); - - if (sop == NULL) { - PyErr_SetString(PyExc_TypeError, - "invalid data type for einsum"); - return -1; - } - - /* - * Since the iterator wasn't tracking coordinates, the - * loop provided by the iterator is in Fortran-order. - */ - for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) { - for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) { - sop(1, ptrs[0], strides[0], shape[0]); - - ptr = ptrs[1][0] + strides[1][0]; - ptrs[0][0] = ptrs[1][0] = ptr; - ptr = ptrs[1][1] + strides[1][1]; - ptrs[0][1] = ptrs[1][1] = ptr; - } - ptr = ptrs[2][0] + strides[2][0]; - ptrs[0][0] = ptrs[1][0] = ptrs[2][0] = ptr; - ptr = ptrs[2][1] + strides[2][1]; - ptrs[0][1] = ptrs[1][1] = ptrs[2][1] = ptr; - } - - return 0; -} - -static int -unbuffered_loop_nop2_ndim2(NpyIter *iter) -{ - npy_intp coord, shape[2], strides[2][3]; - char *ptrs[2][3], *ptr; - sum_of_products_fn sop; - -#if NPY_EINSUM_DBG_TRACING - NpyIter_DebugPrint(iter); -#endif - NPY_EINSUM_DBG_PRINT("running hand-coded 2-op 2-dim loop\n"); - - NpyIter_GetShape(iter, shape); - memcpy(strides[0], NpyIter_GetAxisStrideArray(iter, 0), - 3*sizeof(npy_intp)); - memcpy(strides[1], NpyIter_GetAxisStrideArray(iter, 1), - 3*sizeof(npy_intp)); - memcpy(ptrs[0], NpyIter_GetInitialDataPtrArray(iter), - 3*sizeof(char *)); - memcpy(ptrs[1], ptrs[0], 3*sizeof(char*)); - - sop = get_sum_of_products_function(2, - NpyIter_GetDescrArray(iter)[0]->type_num, - NpyIter_GetDescrArray(iter)[0]->elsize, - strides[0]); - - if (sop == NULL) { - PyErr_SetString(PyExc_TypeError, - "invalid data type for einsum"); - return -1; - } - - /* - * Since the iterator wasn't tracking coordinates, the - * loop provided by the iterator is in Fortran-order. - */ - for (coord = shape[1]; coord > 0; --coord) { - sop(2, ptrs[0], strides[0], shape[0]); - - ptr = ptrs[1][0] + strides[1][0]; - ptrs[0][0] = ptrs[1][0] = ptr; - ptr = ptrs[1][1] + strides[1][1]; - ptrs[0][1] = ptrs[1][1] = ptr; - ptr = ptrs[1][2] + strides[1][2]; - ptrs[0][2] = ptrs[1][2] = ptr; - } - - return 0; -} - -static int -unbuffered_loop_nop2_ndim3(NpyIter *iter) -{ - npy_intp coords[2], shape[3], strides[3][3]; - char *ptrs[3][3], *ptr; - sum_of_products_fn sop; - -#if NPY_EINSUM_DBG_TRACING - NpyIter_DebugPrint(iter); -#endif - NPY_EINSUM_DBG_PRINT("running hand-coded 2-op 3-dim loop\n"); - - NpyIter_GetShape(iter, shape); - memcpy(strides[0], NpyIter_GetAxisStrideArray(iter, 0), - 3*sizeof(npy_intp)); - memcpy(strides[1], NpyIter_GetAxisStrideArray(iter, 1), - 3*sizeof(npy_intp)); - memcpy(strides[2], NpyIter_GetAxisStrideArray(iter, 2), - 3*sizeof(npy_intp)); - memcpy(ptrs[0], NpyIter_GetInitialDataPtrArray(iter), - 3*sizeof(char *)); - memcpy(ptrs[1], ptrs[0], 3*sizeof(char*)); - memcpy(ptrs[2], ptrs[0], 3*sizeof(char*)); - - sop = get_sum_of_products_function(2, - NpyIter_GetDescrArray(iter)[0]->type_num, - NpyIter_GetDescrArray(iter)[0]->elsize, - strides[0]); - - if (sop == NULL) { - PyErr_SetString(PyExc_TypeError, - "invalid data type for einsum"); - return -1; - } - - /* - * Since the iterator wasn't tracking coordinates, the - * loop provided by the iterator is in Fortran-order. - */ - for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) { - for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) { - sop(2, ptrs[0], strides[0], shape[0]); - - ptr = ptrs[1][0] + strides[1][0]; - ptrs[0][0] = ptrs[1][0] = ptr; - ptr = ptrs[1][1] + strides[1][1]; - ptrs[0][1] = ptrs[1][1] = ptr; - ptr = ptrs[1][2] + strides[1][2]; - ptrs[0][2] = ptrs[1][2] = ptr; - } - ptr = ptrs[2][0] + strides[2][0]; - ptrs[0][0] = ptrs[1][0] = ptrs[2][0] = ptr; - ptr = ptrs[2][1] + strides[2][1]; - ptrs[0][1] = ptrs[1][1] = ptrs[2][1] = ptr; - ptr = ptrs[2][2] + strides[2][2]; - ptrs[0][2] = ptrs[1][2] = ptrs[2][2] = ptr; - } - - return 0; -} - - -/*NUMPY_API - * This function provides summation of array elements according to - * the Einstein summation convention. For example: - * - trace(a) -> einsum("ii", a) - * - transpose(a) -> einsum("ji", a) - * - multiply(a,b) -> einsum(",", a, b) - * - inner(a,b) -> einsum("i,i", a, b) - * - outer(a,b) -> einsum("i,j", a, b) - * - matvec(a,b) -> einsum("ij,j", a, b) - * - matmat(a,b) -> einsum("ij,jk", a, b) - * - * subscripts: The string of subscripts for einstein summation. - * nop: The number of operands - * op_in: The array of operands - * dtype: Either NULL, or the data type to force the calculation as. - * order: The order for the calculation/the output axes. - * casting: What kind of casts should be permitted. - * out: Either NULL, or an array into which the output should be placed. - * - * By default, the labels get placed in alphabetical order - * at the end of the output. So, if c = einsum("i,j", a, b) - * then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b) - * then c[i,j] = a[j]*b[i]. - * - * Alternatively, you can control the output order or prevent - * an axis from being summed/force an axis to be summed by providing - * indices for the output. This allows us to turn 'trace' into - * 'diag', for example. - * - diag(a) -> einsum("ii->i", a) - * - sum(a, axis=0) -> einsum("i...->", a) - * - * Subscripts at the beginning and end may be specified by - * putting an ellipsis "..." in the middle. For example, - * the function einsum("i...i", a) takes the diagonal of - * the first and last dimensions of the operand, and - * einsum("ij...,jk...->ik...") takes the matrix product using - * the first two indices of each operand instead of the last two. - * - * When there is only one operand, no axes being summed, and - * no output parameter, this function returns a view - * into the operand instead of making a copy. - */ -NPY_NO_EXPORT PyArrayObject * -PyArray_EinsteinSum(char *subscripts, npy_intp nop, - PyArrayObject **op_in, - PyArray_Descr *dtype, - NPY_ORDER order, NPY_CASTING casting, - PyArrayObject *out) -{ - int iop, label, min_label = 127, max_label = 0, num_labels; - char label_counts[128]; - char op_labels[NPY_MAXARGS][NPY_MAXDIMS]; - char output_labels[NPY_MAXDIMS], *iter_labels; - int idim, ndim_output, ndim_broadcast, ndim_iter; - - EINSUM_BROADCAST broadcast[NPY_MAXARGS]; - PyArrayObject *op[NPY_MAXARGS], *ret = NULL; - PyArray_Descr *op_dtypes_array[NPY_MAXARGS], **op_dtypes; - - int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS]; - int *op_axes[NPY_MAXARGS]; - npy_uint32 op_flags[NPY_MAXARGS]; - - NpyIter *iter; - sum_of_products_fn sop; - npy_intp fixed_strides[NPY_MAXARGS]; - - /* nop+1 (+1 is for the output) must fit in NPY_MAXARGS */ - if (nop >= NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, - "too many operands provided to einstein sum function"); - return NULL; - } - else if (nop < 1) { - PyErr_SetString(PyExc_ValueError, - "not enough operands provided to einstein sum function"); - return NULL; - } - - /* Parse the subscripts string into label_counts and op_labels */ - memset(label_counts, 0, sizeof(label_counts)); - num_labels = 0; - for (iop = 0; iop < nop; ++iop) { - int length = (int)strcspn(subscripts, ",-"); - - if (iop == nop-1 && subscripts[length] == ',') { - PyErr_SetString(PyExc_ValueError, - "more operands provided to einstein sum function " - "than specified in the subscripts string"); - return NULL; - } - else if(iop < nop-1 && subscripts[length] != ',') { - PyErr_SetString(PyExc_ValueError, - "fewer operands provided to einstein sum function " - "than specified in the subscripts string"); - return NULL; - } - - if (!parse_operand_subscripts(subscripts, length, - PyArray_NDIM(op_in[iop]), - iop, op_labels[iop], label_counts, - &min_label, &max_label, &num_labels, - &broadcast[iop])) { - return NULL; - } - - /* Move subscripts to the start of the labels for the next op */ - subscripts += length; - if (iop < nop-1) { - subscripts++; - } - } - - /* - * Find the number of broadcast dimensions, which is the maximum - * number of labels == 0 in an op_labels array. - */ - ndim_broadcast = 0; - for (iop = 0; iop < nop; ++iop) { - npy_intp count_zeros = 0; - int ndim; - char *labels = op_labels[iop]; - - ndim = PyArray_NDIM(op_in[iop]); - for (idim = 0; idim < ndim; ++idim) { - if (labels[idim] == 0) { - ++count_zeros; - } - } - - if (count_zeros > ndim_broadcast) { - ndim_broadcast = count_zeros; - } - } - - /* - * If there is no output signature, create one using each label - * that appeared once, in alphabetical order - */ - if (subscripts[0] == '\0') { - char outsubscripts[NPY_MAXDIMS + 3]; - int length; - /* If no output was specified, always broadcast left (like normal) */ - outsubscripts[0] = '.'; - outsubscripts[1] = '.'; - outsubscripts[2] = '.'; - length = 3; - for (label = min_label; label <= max_label; ++label) { - if (label_counts[label] == 1) { - if (length < NPY_MAXDIMS-1) { - outsubscripts[length++] = label; - } - else { - PyErr_SetString(PyExc_ValueError, - "einstein sum subscript string has too many " - "distinct labels"); - return NULL; - } - } - } - /* Parse the output subscript string */ - ndim_output = parse_output_subscripts(outsubscripts, length, - ndim_broadcast, label_counts, - output_labels, &broadcast[nop]); - } - else { - if (subscripts[0] != '-' || subscripts[1] != '>') { - PyErr_SetString(PyExc_ValueError, - "einstein sum subscript string does not " - "contain proper '->' output specified"); - return NULL; - } - subscripts += 2; - - /* Parse the output subscript string */ - ndim_output = parse_output_subscripts(subscripts, strlen(subscripts), - ndim_broadcast, label_counts, - output_labels, &broadcast[nop]); - } - if (ndim_output < 0) { - return NULL; - } - - if (out != NULL && PyArray_NDIM(out) != ndim_output) { - PyErr_Format(PyExc_ValueError, - "out parameter does not have the correct number of " - "dimensions, has %d but should have %d", - (int)PyArray_NDIM(out), (int)ndim_output); - return NULL; - } - - /* Set all the op references to NULL */ - for (iop = 0; iop < nop; ++iop) { - op[iop] = NULL; - } - - /* - * Process all the input ops, combining dimensions into their - * diagonal where specified. - */ - for (iop = 0; iop < nop; ++iop) { - char *labels = op_labels[iop]; - int combine, ndim; - - ndim = PyArray_NDIM(op_in[iop]); - - /* - * If there's just one operand and no output parameter, - * first try remapping the axes to the output to return - * a view instead of a copy. - */ - if (iop == 0 && nop == 1 && out == NULL) { - ret = NULL; - - if (!get_single_op_view(op_in[iop], iop, labels, - ndim_output, output_labels, - &ret)) { - return NULL; - } - - if (ret != NULL) { - return ret; - } - } - - /* - * Check whether any dimensions need to be combined - * - * The char type may be either signed or unsigned, we - * need it to be signed here. - */ - combine = 0; - for (idim = 0; idim < ndim; ++idim) { - if ((signed char)labels[idim] < 0) { - combine = 1; - } - } - - /* If any dimensions are combined, create a view which combines them */ - if (combine) { - op[iop] = get_combined_dims_view(op_in[iop], iop, labels); - if (op[iop] == NULL) { - goto fail; - } - } - /* No combining needed */ - else { - Py_INCREF(op_in[iop]); - op[iop] = op_in[iop]; - } - } - - /* Set the output op */ - op[nop] = out; - - /* - * Set up the labels for the iterator (output + combined labels). - * Can just share the output_labels memory, because iter_labels - * is output_labels with some more labels appended. - */ - iter_labels = output_labels; - ndim_iter = ndim_output; - for (label = min_label; label <= max_label; ++label) { - if (label_counts[label] > 0 && - memchr(output_labels, label, ndim_output) == NULL) { - if (ndim_iter >= NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "too many subscripts in einsum"); - goto fail; - } - iter_labels[ndim_iter++] = label; - } - } - - /* Set up the op_axes for the iterator */ - for (iop = 0; iop < nop; ++iop) { - op_axes[iop] = op_axes_arrays[iop]; - - if (!prepare_op_axes(PyArray_NDIM(op[iop]), iop, op_labels[iop], - op_axes[iop], ndim_iter, iter_labels, broadcast[iop])) { - goto fail; - } - } - - /* Set up the op_dtypes if dtype was provided */ - if (dtype == NULL) { - op_dtypes = NULL; - } - else { - op_dtypes = op_dtypes_array; - for (iop = 0; iop <= nop; ++iop) { - op_dtypes[iop] = dtype; - } - } - - /* Set the op_axes for the output */ - op_axes[nop] = op_axes_arrays[nop]; - for (idim = 0; idim < ndim_output; ++idim) { - op_axes[nop][idim] = idim; - } - for (idim = ndim_output; idim < ndim_iter; ++idim) { - op_axes[nop][idim] = -1; - } - - /* Set the iterator per-op flags */ - - for (iop = 0; iop < nop; ++iop) { - op_flags[iop] = NPY_ITER_READONLY| - NPY_ITER_NBO| - NPY_ITER_ALIGNED; - } - op_flags[nop] = NPY_ITER_READWRITE| - NPY_ITER_NBO| - NPY_ITER_ALIGNED| - NPY_ITER_ALLOCATE| - NPY_ITER_NO_BROADCAST; - - /* Allocate the iterator */ - iter = NpyIter_AdvancedNew(nop+1, op, NPY_ITER_EXTERNAL_LOOP| - ((dtype != NULL) ? 0 : NPY_ITER_COMMON_DTYPE)| - NPY_ITER_BUFFERED| - NPY_ITER_DELAY_BUFALLOC| - NPY_ITER_GROWINNER| - NPY_ITER_REDUCE_OK| - NPY_ITER_REFS_OK| - NPY_ITER_ZEROSIZE_OK, - order, casting, - op_flags, op_dtypes, - ndim_iter, op_axes, NULL, 0); - - if (iter == NULL) { - goto fail; - } - - /* Initialize the output to all zeros and reset the iterator */ - ret = NpyIter_GetOperandArray(iter)[nop]; - Py_INCREF(ret); - PyArray_FillWithZero(ret); - - - /***************************/ - /* - * Acceleration for some specific loop structures. Note - * that with axis coalescing, inputs with more dimensions can - * be reduced to fit into these patterns. - */ - if (!NpyIter_RequiresBuffering(iter)) { - int ndim = NpyIter_GetNDim(iter); - switch (nop) { - case 1: - if (ndim == 2) { - if (unbuffered_loop_nop1_ndim2(iter) < 0) { - Py_DECREF(ret); - ret = NULL; - goto fail; - } - goto finish; - } - else if (ndim == 3) { - if (unbuffered_loop_nop1_ndim3(iter) < 0) { - Py_DECREF(ret); - ret = NULL; - goto fail; - } - goto finish; - } - break; - case 2: - if (ndim == 2) { - if (unbuffered_loop_nop2_ndim2(iter) < 0) { - Py_DECREF(ret); - ret = NULL; - goto fail; - } - goto finish; - } - else if (ndim == 3) { - if (unbuffered_loop_nop2_ndim3(iter) < 0) { - Py_DECREF(ret); - ret = NULL; - goto fail; - } - goto finish; - } - break; - } - } - /***************************/ - - if (NpyIter_Reset(iter, NULL) != NPY_SUCCEED) { - Py_DECREF(ret); - goto fail; - } - - /* - * Get an inner loop function, specializing it based on - * the strides that are fixed for the whole loop. - */ - NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); - sop = get_sum_of_products_function(nop, - NpyIter_GetDescrArray(iter)[0]->type_num, - NpyIter_GetDescrArray(iter)[0]->elsize, - fixed_strides); - -#if NPY_EINSUM_DBG_TRACING - NpyIter_DebugPrint(iter); -#endif - - /* Finally, the main loop */ - if (sop == NULL) { - PyErr_SetString(PyExc_TypeError, - "invalid data type for einsum"); - Py_DECREF(ret); - ret = NULL; - } - else if (NpyIter_GetIterSize(iter) != 0) { - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp *stride; - npy_intp *countptr; - int needs_api = NpyIter_IterationNeedsAPI(iter); - NPY_BEGIN_THREADS_DEF; - - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(iter); - Py_DECREF(ret); - goto fail; - } - dataptr = NpyIter_GetDataPtrArray(iter); - stride = NpyIter_GetInnerStrideArray(iter); - countptr = NpyIter_GetInnerLoopSizePtr(iter); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - NPY_EINSUM_DBG_PRINT("Einsum loop\n"); - do { - sop(nop, dataptr, stride, *countptr); - } while(iternext(iter)); - if (!needs_api) { - NPY_END_THREADS; - } - - /* If the API was needed, it may have thrown an error */ - if (needs_api && PyErr_Occurred()) { - Py_DECREF(ret); - ret = NULL; - } - } - -finish: - NpyIter_Deallocate(iter); - for (iop = 0; iop < nop; ++iop) { - Py_DECREF(op[iop]); - } - - return ret; - -fail: - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - } - - return NULL; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/flagsobject.c b/numpy-1.6.2/numpy/core/src/multiarray/flagsobject.c deleted file mode 100644 index cca064a347..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/flagsobject.c +++ /dev/null @@ -1,699 +0,0 @@ -/* Array Flags Object */ - -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" - -static int -_IsContiguous(PyArrayObject *ap); - -static int -_IsFortranContiguous(PyArrayObject *ap); - -/*NUMPY_API - * - * Get New ArrayFlagsObject - */ -NPY_NO_EXPORT PyObject * -PyArray_NewFlagsObject(PyObject *obj) -{ - PyObject *flagobj; - int flags; - if (obj == NULL) { - flags = CONTIGUOUS | OWNDATA | FORTRAN | ALIGNED; - } - else { - flags = PyArray_FLAGS(obj); - } - flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) { - return NULL; - } - Py_XINCREF(obj); - ((PyArrayFlagsObject *)flagobj)->arr = obj; - ((PyArrayFlagsObject *)flagobj)->flags = flags; - return flagobj; -} - -/*NUMPY_API - * Update Several Flags at once. - */ -NPY_NO_EXPORT void -PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) -{ - - if (flagmask & FORTRAN) { - if (_IsFortranContiguous(ret)) { - ret->flags |= FORTRAN; - if (ret->nd > 1) { - ret->flags &= ~CONTIGUOUS; - } - } - else { - ret->flags &= ~FORTRAN; - } - } - if (flagmask & CONTIGUOUS) { - if (_IsContiguous(ret)) { - ret->flags |= CONTIGUOUS; - if (ret->nd > 1) { - ret->flags &= ~FORTRAN; - } - } - else { - ret->flags &= ~CONTIGUOUS; - } - } - if (flagmask & ALIGNED) { - if (_IsAligned(ret)) { - ret->flags |= ALIGNED; - } - else { - ret->flags &= ~ALIGNED; - } - } - /* - * This is not checked by default WRITEABLE is not - * part of UPDATE_ALL - */ - if (flagmask & WRITEABLE) { - if (_IsWriteable(ret)) { - ret->flags |= WRITEABLE; - } - else { - ret->flags &= ~WRITEABLE; - } - } - return; -} - -/* - * Check whether the given array is stored contiguously - * (row-wise) in memory. - * - * 0-strided arrays are not contiguous (even if dimension == 1) - */ -static int -_IsContiguous(PyArrayObject *ap) -{ - intp sd; - intp dim; - int i; - - if (ap->nd == 0) { - return 1; - } - sd = ap->descr->elsize; - if (ap->nd == 1) { - return ap->dimensions[0] == 1 || sd == ap->strides[0]; - } - for (i = ap->nd - 1; i >= 0; --i) { - dim = ap->dimensions[i]; - /* contiguous by definition */ - if (dim == 0) { - return 1; - } - if (ap->strides[i] != sd) { - return 0; - } - sd *= dim; - } - return 1; -} - - -/* 0-strided arrays are not contiguous (even if dimension == 1) */ -static int -_IsFortranContiguous(PyArrayObject *ap) -{ - intp sd; - intp dim; - int i; - - if (ap->nd == 0) { - return 1; - } - sd = ap->descr->elsize; - if (ap->nd == 1) { - return ap->dimensions[0] == 1 || sd == ap->strides[0]; - } - for (i = 0; i < ap->nd; ++i) { - dim = ap->dimensions[i]; - /* fortran contiguous by definition */ - if (dim == 0) { - return 1; - } - if (ap->strides[i] != sd) { - return 0; - } - sd *= dim; - } - return 1; -} - -static void -arrayflags_dealloc(PyArrayFlagsObject *self) -{ - Py_XDECREF(self->arr); - Py_TYPE(self)->tp_free((PyObject *)self); -} - - -#define _define_get(UPPER, lower) \ - static PyObject * \ - arrayflags_ ## lower ## _get(PyArrayFlagsObject *self) \ - { \ - PyObject *item; \ - item = ((self->flags & (UPPER)) == (UPPER)) ? Py_True : Py_False; \ - Py_INCREF(item); \ - return item; \ - } - -_define_get(CONTIGUOUS, contiguous) -_define_get(FORTRAN, fortran) -_define_get(UPDATEIFCOPY, updateifcopy) -_define_get(OWNDATA, owndata) -_define_get(ALIGNED, aligned) -_define_get(WRITEABLE, writeable) - -_define_get(ALIGNED|WRITEABLE, behaved) -_define_get(ALIGNED|WRITEABLE|CONTIGUOUS, carray) - -static PyObject * -arrayflags_forc_get(PyArrayFlagsObject *self) -{ - PyObject *item; - - if (((self->flags & FORTRAN) == FORTRAN) || - ((self->flags & CONTIGUOUS) == CONTIGUOUS)) { - item = Py_True; - } - else { - item = Py_False; - } - Py_INCREF(item); - return item; -} - -static PyObject * -arrayflags_fnc_get(PyArrayFlagsObject *self) -{ - PyObject *item; - - if (((self->flags & FORTRAN) == FORTRAN) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) { - item = Py_True; - } - else { - item = Py_False; - } - Py_INCREF(item); - return item; -} - -static PyObject * -arrayflags_farray_get(PyArrayFlagsObject *self) -{ - PyObject *item; - - if (((self->flags & (ALIGNED|WRITEABLE|FORTRAN)) == - (ALIGNED|WRITEABLE|FORTRAN)) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) { - item = Py_True; - } - else { - item = Py_False; - } - Py_INCREF(item); - return item; -} - -static PyObject * -arrayflags_num_get(PyArrayFlagsObject *self) -{ - return PyInt_FromLong(self->flags); -} - -/* relies on setflags order being write, align, uic */ -static int -arrayflags_updateifcopy_set(PyArrayFlagsObject *self, PyObject *obj) -{ - PyObject *res; - - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete flags updateifcopy attribute"); - return -1; - } - if (self->arr == NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot set flags on array scalars."); - return -1; - } - res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, - (PyObject_IsTrue(obj) ? Py_True : Py_False)); - if (res == NULL) { - return -1; - } - Py_DECREF(res); - return 0; -} - -static int -arrayflags_aligned_set(PyArrayFlagsObject *self, PyObject *obj) -{ - PyObject *res; - - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete flags aligned attribute"); - return -1; - } - if (self->arr == NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot set flags on array scalars."); - return -1; - } - res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, - (PyObject_IsTrue(obj) ? Py_True : Py_False), - Py_None); - if (res == NULL) { - return -1; - } - Py_DECREF(res); - return 0; -} - -static int -arrayflags_writeable_set(PyArrayFlagsObject *self, PyObject *obj) -{ - PyObject *res; - - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete flags writeable attribute"); - return -1; - } - if (self->arr == NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot set flags on array scalars."); - return -1; - } - res = PyObject_CallMethod(self->arr, "setflags", "OOO", - (PyObject_IsTrue(obj) ? Py_True : Py_False), - Py_None, Py_None); - if (res == NULL) { - return -1; - } - Py_DECREF(res); - return 0; -} - - -static PyGetSetDef arrayflags_getsets[] = { - {"contiguous", - (getter)arrayflags_contiguous_get, - NULL, - NULL, NULL}, - {"c_contiguous", - (getter)arrayflags_contiguous_get, - NULL, - NULL, NULL}, - {"f_contiguous", - (getter)arrayflags_fortran_get, - NULL, - NULL, NULL}, - {"fortran", - (getter)arrayflags_fortran_get, - NULL, - NULL, NULL}, - {"updateifcopy", - (getter)arrayflags_updateifcopy_get, - (setter)arrayflags_updateifcopy_set, - NULL, NULL}, - {"owndata", - (getter)arrayflags_owndata_get, - NULL, - NULL, NULL}, - {"aligned", - (getter)arrayflags_aligned_get, - (setter)arrayflags_aligned_set, - NULL, NULL}, - {"writeable", - (getter)arrayflags_writeable_get, - (setter)arrayflags_writeable_set, - NULL, NULL}, - {"fnc", - (getter)arrayflags_fnc_get, - NULL, - NULL, NULL}, - {"forc", - (getter)arrayflags_forc_get, - NULL, - NULL, NULL}, - {"behaved", - (getter)arrayflags_behaved_get, - NULL, - NULL, NULL}, - {"carray", - (getter)arrayflags_carray_get, - NULL, - NULL, NULL}, - {"farray", - (getter)arrayflags_farray_get, - NULL, - NULL, NULL}, - {"num", - (getter)arrayflags_num_get, - NULL, - NULL, NULL}, - {NULL, NULL, NULL, NULL, NULL}, -}; - -static PyObject * -arrayflags_getitem(PyArrayFlagsObject *self, PyObject *ind) -{ - char *key = NULL; - char buf[16]; - int n; - if (PyUnicode_Check(ind)) { - PyObject *tmp_str; - tmp_str = PyUnicode_AsASCIIString(ind); - if (tmp_str == NULL) { - return NULL; - } - key = PyBytes_AS_STRING(tmp_str); - n = PyBytes_GET_SIZE(tmp_str); - if (n > 16) { - Py_DECREF(tmp_str); - goto fail; - } - memcpy(buf, key, n); - Py_DECREF(tmp_str); - key = buf; - } - else if (PyBytes_Check(ind)) { - key = PyBytes_AS_STRING(ind); - n = PyBytes_GET_SIZE(ind); - } - else { - goto fail; - } - switch(n) { - case 1: - switch(key[0]) { - case 'C': - return arrayflags_contiguous_get(self); - case 'F': - return arrayflags_fortran_get(self); - case 'W': - return arrayflags_writeable_get(self); - case 'B': - return arrayflags_behaved_get(self); - case 'O': - return arrayflags_owndata_get(self); - case 'A': - return arrayflags_aligned_get(self); - case 'U': - return arrayflags_updateifcopy_get(self); - default: - goto fail; - } - break; - case 2: - if (strncmp(key, "CA", n) == 0) { - return arrayflags_carray_get(self); - } - if (strncmp(key, "FA", n) == 0) { - return arrayflags_farray_get(self); - } - break; - case 3: - if (strncmp(key, "FNC", n) == 0) { - return arrayflags_fnc_get(self); - } - break; - case 4: - if (strncmp(key, "FORC", n) == 0) { - return arrayflags_forc_get(self); - } - break; - case 6: - if (strncmp(key, "CARRAY", n) == 0) { - return arrayflags_carray_get(self); - } - if (strncmp(key, "FARRAY", n) == 0) { - return arrayflags_farray_get(self); - } - break; - case 7: - if (strncmp(key,"FORTRAN",n) == 0) { - return arrayflags_fortran_get(self); - } - if (strncmp(key,"BEHAVED",n) == 0) { - return arrayflags_behaved_get(self); - } - if (strncmp(key,"OWNDATA",n) == 0) { - return arrayflags_owndata_get(self); - } - if (strncmp(key,"ALIGNED",n) == 0) { - return arrayflags_aligned_get(self); - } - break; - case 9: - if (strncmp(key,"WRITEABLE",n) == 0) { - return arrayflags_writeable_get(self); - } - break; - case 10: - if (strncmp(key,"CONTIGUOUS",n) == 0) { - return arrayflags_contiguous_get(self); - } - break; - case 12: - if (strncmp(key, "UPDATEIFCOPY", n) == 0) { - return arrayflags_updateifcopy_get(self); - } - if (strncmp(key, "C_CONTIGUOUS", n) == 0) { - return arrayflags_contiguous_get(self); - } - if (strncmp(key, "F_CONTIGUOUS", n) == 0) { - return arrayflags_fortran_get(self); - } - break; - } - - fail: - PyErr_SetString(PyExc_KeyError, "Unknown flag"); - return NULL; -} - -static int -arrayflags_setitem(PyArrayFlagsObject *self, PyObject *ind, PyObject *item) -{ - char *key; - char buf[16]; - int n; - if (PyUnicode_Check(ind)) { - PyObject *tmp_str; - tmp_str = PyUnicode_AsASCIIString(ind); - key = PyBytes_AS_STRING(tmp_str); - n = PyBytes_GET_SIZE(tmp_str); - if (n > 16) n = 16; - memcpy(buf, key, n); - Py_DECREF(tmp_str); - key = buf; - } - else if (PyBytes_Check(ind)) { - key = PyBytes_AS_STRING(ind); - n = PyBytes_GET_SIZE(ind); - } - else { - goto fail; - } - if (((n==9) && (strncmp(key, "WRITEABLE", n) == 0)) || - ((n==1) && (strncmp(key, "W", n) == 0))) { - return arrayflags_writeable_set(self, item); - } - else if (((n==7) && (strncmp(key, "ALIGNED", n) == 0)) || - ((n==1) && (strncmp(key, "A", n) == 0))) { - return arrayflags_aligned_set(self, item); - } - else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n) == 0)) || - ((n==1) && (strncmp(key, "U", n) == 0))) { - return arrayflags_updateifcopy_set(self, item); - } - - fail: - PyErr_SetString(PyExc_KeyError, "Unknown flag"); - return -1; -} - -static char * -_torf_(int flags, int val) -{ - if ((flags & val) == val) { - return "True"; - } - else { - return "False"; - } -} - -static PyObject * -arrayflags_print(PyArrayFlagsObject *self) -{ - int fl = self->flags; - - return PyUString_FromFormat(" %s : %s\n %s : %s\n %s : %s\n"\ - " %s : %s\n %s : %s\n %s : %s", - "C_CONTIGUOUS", _torf_(fl, CONTIGUOUS), - "F_CONTIGUOUS", _torf_(fl, FORTRAN), - "OWNDATA", _torf_(fl, OWNDATA), - "WRITEABLE", _torf_(fl, WRITEABLE), - "ALIGNED", _torf_(fl, ALIGNED), - "UPDATEIFCOPY", _torf_(fl, UPDATEIFCOPY)); -} - - -static int -arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other) -{ - if (self->flags == other->flags) { - return 0; - } - else if (self->flags < other->flags) { - return -1; - } - else { - return 1; - } -} - - -static PyObject* -arrayflags_richcompare(PyObject *self, PyObject *other, int cmp_op) -{ - PyObject *result = Py_NotImplemented; - int cmp; - - if (cmp_op != Py_EQ && cmp_op != Py_NE) { - PyErr_SetString(PyExc_TypeError, - "undefined comparison for flag object"); - return NULL; - } - - if (PyObject_TypeCheck(other, &PyArrayFlags_Type)) { - cmp = arrayflags_compare((PyArrayFlagsObject *)self, - (PyArrayFlagsObject *)other); - - if (cmp_op == Py_EQ) { - result = (cmp == 0) ? Py_True : Py_False; - } - else if (cmp_op == Py_NE) { - result = (cmp != 0) ? Py_True : Py_False; - } - } - - Py_INCREF(result); - return result; -} - -static PyMappingMethods arrayflags_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)NULL, /*mp_length*/ -#else - (inquiry)NULL, /*mp_length*/ -#endif - (binaryfunc)arrayflags_getitem, /*mp_subscript*/ - (objobjargproc)arrayflags_setitem, /*mp_ass_subscript*/ -}; - - -static PyObject * -arrayflags_new(PyTypeObject *NPY_UNUSED(self), PyObject *args, PyObject *NPY_UNUSED(kwds)) -{ - PyObject *arg=NULL; - if (!PyArg_UnpackTuple(args, "flagsobj", 0, 1, &arg)) { - return NULL; - } - if ((arg != NULL) && PyArray_Check(arg)) { - return PyArray_NewFlagsObject(arg); - } - else { - return PyArray_NewFlagsObject(NULL); - } -} - -NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.flagsobj", - sizeof(PyArrayFlagsObject), - 0, /* tp_itemsize */ - /* methods */ - (destructor)arrayflags_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - (cmpfunc)arrayflags_compare, /* tp_compare */ -#endif - (reprfunc)arrayflags_print, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &arrayflags_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arrayflags_print, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - arrayflags_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - arrayflags_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arrayflags_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; diff --git a/numpy-1.6.2/numpy/core/src/multiarray/getset.c b/numpy-1.6.2/numpy/core/src/multiarray/getset.c deleted file mode 100644 index 12e608f332..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/getset.c +++ /dev/null @@ -1,958 +0,0 @@ -/* Array Descr Object */ - -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" -#include "scalartypes.h" -#include "descriptor.h" -#include "getset.h" - -/******************* array attribute get and set routines ******************/ - -static PyObject * -array_ndim_get(PyArrayObject *self) -{ - return PyInt_FromLong(self->nd); -} - -static PyObject * -array_flags_get(PyArrayObject *self) -{ - return PyArray_NewFlagsObject((PyObject *)self); -} - -static PyObject * -array_shape_get(PyArrayObject *self) -{ - return PyArray_IntTupleFromIntp(self->nd, self->dimensions); -} - - -static int -array_shape_set(PyArrayObject *self, PyObject *val) -{ - int nd; - PyObject *ret; - - if (val == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array shape"); - return -1; - } - /* Assumes C-order */ - ret = PyArray_Reshape(self, val); - if (ret == NULL) { - return -1; - } - if (PyArray_DATA(ret) != PyArray_DATA(self)) { - Py_DECREF(ret); - PyErr_SetString(PyExc_AttributeError, - "incompatible shape for a non-contiguous "\ - "array"); - return -1; - } - - /* Free old dimensions and strides */ - PyDimMem_FREE(self->dimensions); - nd = PyArray_NDIM(ret); - self->nd = nd; - if (nd > 0) { - /* create new dimensions and strides */ - self->dimensions = PyDimMem_NEW(2*nd); - if (self->dimensions == NULL) { - Py_DECREF(ret); - PyErr_SetString(PyExc_MemoryError,""); - return -1; - } - self->strides = self->dimensions + nd; - memcpy(self->dimensions, PyArray_DIMS(ret), nd*sizeof(intp)); - memcpy(self->strides, PyArray_STRIDES(ret), nd*sizeof(intp)); - } - else { - self->dimensions = NULL; - self->strides = NULL; - } - Py_DECREF(ret); - PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN); - return 0; -} - - -static PyObject * -array_strides_get(PyArrayObject *self) -{ - return PyArray_IntTupleFromIntp(self->nd, self->strides); -} - -static int -array_strides_set(PyArrayObject *self, PyObject *obj) -{ - PyArray_Dims newstrides = {NULL, 0}; - PyArrayObject *new; - intp numbytes = 0; - intp offset = 0; - Py_ssize_t buf_len; - char *buf; - - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array strides"); - return -1; - } - if (!PyArray_IntpConverter(obj, &newstrides) || - newstrides.ptr == NULL) { - PyErr_SetString(PyExc_TypeError, "invalid strides"); - return -1; - } - if (newstrides.len != self->nd) { - PyErr_Format(PyExc_ValueError, "strides must be " \ - " same length as shape (%d)", self->nd); - goto fail; - } - new = self; - while(new->base && PyArray_Check(new->base)) { - new = (PyArrayObject *)(new->base); - } - /* - * Get the available memory through the buffer interface on - * new->base or if that fails from the current new - */ - if (new->base && PyObject_AsReadBuffer(new->base, - (const void **)&buf, - &buf_len) >= 0) { - offset = self->data - buf; - numbytes = buf_len + offset; - } - else { - PyErr_Clear(); - numbytes = PyArray_MultiplyList(new->dimensions, - new->nd)*new->descr->elsize; - offset = self->data - new->data; - } - - if (!PyArray_CheckStrides(self->descr->elsize, self->nd, numbytes, - offset, - self->dimensions, newstrides.ptr)) { - PyErr_SetString(PyExc_ValueError, "strides is not "\ - "compatible with available memory"); - goto fail; - } - memcpy(self->strides, newstrides.ptr, sizeof(intp)*newstrides.len); - PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN); - PyDimMem_FREE(newstrides.ptr); - return 0; - - fail: - PyDimMem_FREE(newstrides.ptr); - return -1; -} - - - -static PyObject * -array_priority_get(PyArrayObject *self) -{ - if (PyArray_CheckExact(self)) { - return PyFloat_FromDouble(PyArray_PRIORITY); - } - else { - return PyFloat_FromDouble(PyArray_SUBTYPE_PRIORITY); - } -} - -static PyObject * -array_typestr_get(PyArrayObject *self) -{ - return arraydescr_protocol_typestr_get(self->descr); -} - -static PyObject * -array_descr_get(PyArrayObject *self) -{ - Py_INCREF(self->descr); - return (PyObject *)self->descr; -} - -static PyObject * -array_protocol_descr_get(PyArrayObject *self) -{ - PyObject *res; - PyObject *dobj; - - res = arraydescr_protocol_descr_get(self->descr); - if (res) { - return res; - } - PyErr_Clear(); - - /* get default */ - dobj = PyTuple_New(2); - if (dobj == NULL) { - return NULL; - } - PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); - PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); - res = PyList_New(1); - if (res == NULL) { - Py_DECREF(dobj); - return NULL; - } - PyList_SET_ITEM(res, 0, dobj); - return res; -} - -static PyObject * -array_protocol_strides_get(PyArrayObject *self) -{ - if PyArray_ISCONTIGUOUS(self) { - Py_INCREF(Py_None); - return Py_None; - } - return PyArray_IntTupleFromIntp(self->nd, self->strides); -} - - - -static PyObject * -array_dataptr_get(PyArrayObject *self) -{ - return Py_BuildValue("NO", - PyLong_FromVoidPtr(self->data), - (self->flags & WRITEABLE ? Py_False : - Py_True)); -} - -static PyObject * -array_ctypes_get(PyArrayObject *self) -{ - PyObject *_numpy_internal; - PyObject *ret; - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - ret = PyObject_CallMethod(_numpy_internal, "_ctypes", "ON", self, - PyLong_FromVoidPtr(self->data)); - Py_DECREF(_numpy_internal); - return ret; -} - -static PyObject * -array_interface_get(PyArrayObject *self) -{ - PyObject *dict; - PyObject *obj; - - dict = PyDict_New(); - if (dict == NULL) { - return NULL; - } - - /* dataptr */ - obj = array_dataptr_get(self); - PyDict_SetItemString(dict, "data", obj); - Py_DECREF(obj); - - obj = array_protocol_strides_get(self); - PyDict_SetItemString(dict, "strides", obj); - Py_DECREF(obj); - - obj = array_protocol_descr_get(self); - PyDict_SetItemString(dict, "descr", obj); - Py_DECREF(obj); - - obj = arraydescr_protocol_typestr_get(self->descr); - PyDict_SetItemString(dict, "typestr", obj); - Py_DECREF(obj); - - obj = array_shape_get(self); - PyDict_SetItemString(dict, "shape", obj); - Py_DECREF(obj); - - obj = PyInt_FromLong(3); - PyDict_SetItemString(dict, "version", obj); - Py_DECREF(obj); - - return dict; -} - -static PyObject * -array_data_get(PyArrayObject *self) -{ -#if defined(NPY_PY3K) - return PyMemoryView_FromObject(self); -#else - intp nbytes; - if (!(PyArray_ISONESEGMENT(self))) { - PyErr_SetString(PyExc_AttributeError, "cannot get single-"\ - "segment buffer for discontiguous array"); - return NULL; - } - nbytes = PyArray_NBYTES(self); - if (PyArray_ISWRITEABLE(self)) { - return PyBuffer_FromReadWriteObject((PyObject *)self, 0, (Py_ssize_t) nbytes); - } - else { - return PyBuffer_FromObject((PyObject *)self, 0, (Py_ssize_t) nbytes); - } -#endif -} - -static int -array_data_set(PyArrayObject *self, PyObject *op) -{ - void *buf; - Py_ssize_t buf_len; - int writeable=1; - - if (op == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array data"); - return -1; - } - if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) { - writeable = 0; - if (PyObject_AsReadBuffer(op, (const void **)&buf, &buf_len) < 0) { - PyErr_SetString(PyExc_AttributeError, - "object does not have single-segment " \ - "buffer interface"); - return -1; - } - } - if (!PyArray_ISONESEGMENT(self)) { - PyErr_SetString(PyExc_AttributeError, "cannot set single-" \ - "segment buffer for discontiguous array"); - return -1; - } - if (PyArray_NBYTES(self) > buf_len) { - PyErr_SetString(PyExc_AttributeError, "not enough data for array"); - return -1; - } - if (self->flags & OWNDATA) { - PyArray_XDECREF(self); - PyDataMem_FREE(self->data); - } - if (self->base) { - if (self->flags & UPDATEIFCOPY) { - ((PyArrayObject *)self->base)->flags |= WRITEABLE; - self->flags &= ~UPDATEIFCOPY; - } - Py_DECREF(self->base); - } - Py_INCREF(op); - self->base = op; - self->data = buf; - self->flags = CARRAY; - if (!writeable) { - self->flags &= ~WRITEABLE; - } - return 0; -} - - -static PyObject * -array_itemsize_get(PyArrayObject *self) -{ - return PyInt_FromLong((long) self->descr->elsize); -} - -static PyObject * -array_size_get(PyArrayObject *self) -{ - intp size=PyArray_SIZE(self); -#if SIZEOF_INTP <= SIZEOF_LONG - return PyInt_FromLong((long) size); -#else - if (size > MAX_LONG || size < MIN_LONG) { - return PyLong_FromLongLong(size); - } - else { - return PyInt_FromLong((long) size); - } -#endif -} - -static PyObject * -array_nbytes_get(PyArrayObject *self) -{ - intp nbytes = PyArray_NBYTES(self); -#if SIZEOF_INTP <= SIZEOF_LONG - return PyInt_FromLong((long) nbytes); -#else - if (nbytes > MAX_LONG || nbytes < MIN_LONG) { - return PyLong_FromLongLong(nbytes); - } - else { - return PyInt_FromLong((long) nbytes); - } -#endif -} - - -/* - * If the type is changed. - * Also needing change: strides, itemsize - * - * Either itemsize is exactly the same or the array is single-segment - * (contiguous or fortran) with compatibile dimensions The shape and strides - * will be adjusted in that case as well. - */ - -static int -array_descr_set(PyArrayObject *self, PyObject *arg) -{ - PyArray_Descr *newtype = NULL; - intp newdim; - int index; - char *msg = "new type not compatible with array."; - - if (arg == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array dtype"); - return -1; - } - - if (!(PyArray_DescrConverter(arg, &newtype)) || - newtype == NULL) { - PyErr_SetString(PyExc_TypeError, "invalid data-type for array"); - return -1; - } - if (PyDataType_FLAGCHK(newtype, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(newtype, NPY_ITEM_IS_POINTER) || - PyDataType_FLAGCHK(self->descr, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(self->descr, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_TypeError, \ - "Cannot change data-type for object " \ - "array."); - Py_DECREF(newtype); - return -1; - } - - if (newtype->elsize == 0) { - /* Allow a void view */ - if (newtype->type_num == NPY_VOID) { - PyArray_DESCR_REPLACE(newtype); - if (newtype == NULL) { - return -1; - } - newtype->elsize = self->descr->elsize; - } - /* But no other flexible types */ - else { - PyErr_SetString(PyExc_TypeError, - "data-type must not be 0-sized"); - Py_DECREF(newtype); - return -1; - } - } - - - if ((newtype->elsize != self->descr->elsize) && - (self->nd == 0 || !PyArray_ISONESEGMENT(self) || - newtype->subarray)) { - goto fail; - } - if (PyArray_ISCONTIGUOUS(self)) { - index = self->nd - 1; - } - else { - index = 0; - } - if (newtype->elsize < self->descr->elsize) { - /* - * if it is compatible increase the size of the - * dimension at end (or at the front for FORTRAN) - */ - if (self->descr->elsize % newtype->elsize != 0) { - goto fail; - } - newdim = self->descr->elsize / newtype->elsize; - self->dimensions[index] *= newdim; - self->strides[index] = newtype->elsize; - } - else if (newtype->elsize > self->descr->elsize) { - /* - * Determine if last (or first if FORTRAN) dimension - * is compatible - */ - newdim = self->dimensions[index] * self->descr->elsize; - if ((newdim % newtype->elsize) != 0) { - goto fail; - } - self->dimensions[index] = newdim / newtype->elsize; - self->strides[index] = newtype->elsize; - } - - /* fall through -- adjust type*/ - Py_DECREF(self->descr); - if (newtype->subarray) { - /* - * create new array object from data and update - * dimensions, strides and descr from it - */ - PyArrayObject *temp; - /* - * We would decref newtype here. - * temp will steal a reference to it - */ - temp = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, newtype, self->nd, - self->dimensions, self->strides, - self->data, self->flags, NULL); - if (temp == NULL) { - return -1; - } - PyDimMem_FREE(self->dimensions); - self->dimensions = temp->dimensions; - self->nd = temp->nd; - self->strides = temp->strides; - newtype = temp->descr; - Py_INCREF(temp->descr); - /* Fool deallocator not to delete these*/ - temp->nd = 0; - temp->dimensions = NULL; - Py_DECREF(temp); - } - - self->descr = newtype; - PyArray_UpdateFlags(self, UPDATE_ALL); - return 0; - - fail: - PyErr_SetString(PyExc_ValueError, msg); - Py_DECREF(newtype); - return -1; -} - -static PyObject * -array_struct_get(PyArrayObject *self) -{ - PyArrayInterface *inter; - PyObject *ret; - - inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface)); - if (inter==NULL) { - return PyErr_NoMemory(); - } - inter->two = 2; - inter->nd = self->nd; - inter->typekind = self->descr->kind; - inter->itemsize = self->descr->elsize; - inter->flags = self->flags; - /* reset unused flags */ - inter->flags &= ~(UPDATEIFCOPY | OWNDATA); - if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NOTSWAPPED; - /* - * Copy shape and strides over since these can be reset - *when the array is "reshaped". - */ - if (self->nd > 0) { - inter->shape = (intp *)_pya_malloc(2*sizeof(intp)*self->nd); - if (inter->shape == NULL) { - _pya_free(inter); - return PyErr_NoMemory(); - } - inter->strides = inter->shape + self->nd; - memcpy(inter->shape, self->dimensions, sizeof(intp)*self->nd); - memcpy(inter->strides, self->strides, sizeof(intp)*self->nd); - } - else { - inter->shape = NULL; - inter->strides = NULL; - } - inter->data = self->data; - if (self->descr->names) { - inter->descr = arraydescr_protocol_descr_get(self->descr); - if (inter->descr == NULL) { - PyErr_Clear(); - } - else { - inter->flags &= ARR_HAS_DESCR; - } - } - else { - inter->descr = NULL; - } - Py_INCREF(self); - ret = NpyCapsule_FromVoidPtrAndDesc(inter, self, gentype_struct_free); - return ret; -} - -static PyObject * -array_base_get(PyArrayObject *self) -{ - if (self->base == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - else { - Py_INCREF(self->base); - return self->base; - } -} - -/* - * Create a view of a complex array with an equivalent data-type - * except it is real instead of complex. - */ -static PyArrayObject * -_get_part(PyArrayObject *self, int imag) -{ - int float_type_num; - PyArray_Descr *type; - PyArrayObject *ret; - int offset; - - switch (self->descr->type_num) { - case PyArray_CFLOAT: - float_type_num = PyArray_FLOAT; - break; - case PyArray_CDOUBLE: - float_type_num = PyArray_DOUBLE; - break; - case PyArray_CLONGDOUBLE: - float_type_num = PyArray_LONGDOUBLE; - break; - default: - PyErr_Format(PyExc_ValueError, - "Cannot convert complex type number %d to float", - self->descr->type_num); - return NULL; - - } - type = PyArray_DescrFromType(float_type_num); - - offset = (imag ? type->elsize : 0); - - if (!PyArray_ISNBO(self->descr->byteorder)) { - PyArray_Descr *new; - new = PyArray_DescrNew(type); - new->byteorder = self->descr->byteorder; - Py_DECREF(type); - type = new; - } - ret = (PyArrayObject *) - PyArray_NewFromDescr(Py_TYPE(self), - type, - self->nd, - self->dimensions, - self->strides, - self->data + offset, - self->flags, (PyObject *)self); - if (ret == NULL) { - return NULL; - } - ret->flags &= ~CONTIGUOUS; - ret->flags &= ~FORTRAN; - Py_INCREF(self); - ret->base = (PyObject *)self; - return ret; -} - -/* For Object arrays, we need to get and set the - real part of each element. - */ - -static PyObject * -array_real_get(PyArrayObject *self) -{ - PyArrayObject *ret; - - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 0); - return (PyObject *)ret; - } - else { - Py_INCREF(self); - return (PyObject *)self; - } -} - - -static int -array_real_set(PyArrayObject *self, PyObject *val) -{ - PyArrayObject *ret; - PyArrayObject *new; - int rint; - - if (val == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array real part"); - return -1; - } - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 0); - if (ret == NULL) { - return -1; - } - } - else { - Py_INCREF(self); - ret = self; - } - new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) { - Py_DECREF(ret); - return -1; - } - rint = PyArray_MoveInto(ret, new); - Py_DECREF(ret); - Py_DECREF(new); - return rint; -} - -/* For Object arrays we need to get - and set the imaginary part of - each element -*/ - -static PyObject * -array_imag_get(PyArrayObject *self) -{ - PyArrayObject *ret; - - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 1); - } - else { - Py_INCREF(self->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self), - self->descr, - self->nd, - self->dimensions, - NULL, NULL, - PyArray_ISFORTRAN(self), - (PyObject *)self); - if (ret == NULL) { - return NULL; - } - if (_zerofill(ret) < 0) { - return NULL; - } - ret->flags &= ~WRITEABLE; - } - return (PyObject *) ret; -} - -static int -array_imag_set(PyArrayObject *self, PyObject *val) -{ - if (val == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array imaginary part"); - return -1; - } - if (PyArray_ISCOMPLEX(self)) { - PyArrayObject *ret; - PyArrayObject *new; - int rint; - - ret = _get_part(self, 1); - if (ret == NULL) { - return -1; - } - new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) { - Py_DECREF(ret); - return -1; - } - rint = PyArray_MoveInto(ret, new); - Py_DECREF(ret); - Py_DECREF(new); - return rint; - } - else { - PyErr_SetString(PyExc_TypeError, - "array does not have imaginary part to set"); - return -1; - } -} - -static PyObject * -array_flat_get(PyArrayObject *self) -{ - return PyArray_IterNew((PyObject *)self); -} - -static int -array_flat_set(PyArrayObject *self, PyObject *val) -{ - PyObject *arr = NULL; - int retval = -1; - PyArrayIterObject *selfit = NULL, *arrit = NULL; - PyArray_Descr *typecode; - int swap; - PyArray_CopySwapFunc *copyswap; - - if (val == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array flat iterator"); - return -1; - } - typecode = PyArray_DESCR(self); - Py_INCREF(typecode); - arr = PyArray_FromAny(val, typecode, - 0, 0, FORCECAST | FORTRAN_IF(self), NULL); - if (arr == NULL) { - return -1; - } - arrit = (PyArrayIterObject *)PyArray_IterNew(arr); - if (arrit == NULL) { - goto exit; - } - selfit = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (selfit == NULL) { - goto exit; - } - if (arrit->size == 0) { - retval = 0; - goto exit; - } - swap = PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(arr); - copyswap = self->descr->f->copyswap; - if (PyDataType_REFCHK(self->descr)) { - while (selfit->index < selfit->size) { - PyArray_Item_XDECREF(selfit->dataptr, self->descr); - PyArray_Item_INCREF(arrit->dataptr, PyArray_DESCR(arr)); - memmove(selfit->dataptr, arrit->dataptr, sizeof(PyObject **)); - if (swap) { - copyswap(selfit->dataptr, NULL, swap, self); - } - PyArray_ITER_NEXT(selfit); - PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) { - PyArray_ITER_RESET(arrit); - } - } - retval = 0; - goto exit; - } - - while(selfit->index < selfit->size) { - memmove(selfit->dataptr, arrit->dataptr, self->descr->elsize); - if (swap) { - copyswap(selfit->dataptr, NULL, swap, self); - } - PyArray_ITER_NEXT(selfit); - PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) { - PyArray_ITER_RESET(arrit); - } - } - retval = 0; - - exit: - Py_XDECREF(selfit); - Py_XDECREF(arrit); - Py_XDECREF(arr); - return retval; -} - -static PyObject * -array_transpose_get(PyArrayObject *self) -{ - return PyArray_Transpose(self, NULL); -} - -/* If this is None, no function call is made - --- default sub-class behavior -*/ -static PyObject * -array_finalize_get(PyArrayObject *NPY_UNUSED(self)) -{ - Py_INCREF(Py_None); - return Py_None; -} - -NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { - {"ndim", - (getter)array_ndim_get, - NULL, - NULL, NULL}, - {"flags", - (getter)array_flags_get, - NULL, - NULL, NULL}, - {"shape", - (getter)array_shape_get, - (setter)array_shape_set, - NULL, NULL}, - {"strides", - (getter)array_strides_get, - (setter)array_strides_set, - NULL, NULL}, - {"data", - (getter)array_data_get, - (setter)array_data_set, - NULL, NULL}, - {"itemsize", - (getter)array_itemsize_get, - NULL, - NULL, NULL}, - {"size", - (getter)array_size_get, - NULL, - NULL, NULL}, - {"nbytes", - (getter)array_nbytes_get, - NULL, - NULL, NULL}, - {"base", - (getter)array_base_get, - NULL, - NULL, NULL}, - {"dtype", - (getter)array_descr_get, - (setter)array_descr_set, - NULL, NULL}, - {"real", - (getter)array_real_get, - (setter)array_real_set, - NULL, NULL}, - {"imag", - (getter)array_imag_get, - (setter)array_imag_set, - NULL, NULL}, - {"flat", - (getter)array_flat_get, - (setter)array_flat_set, - NULL, NULL}, - {"ctypes", - (getter)array_ctypes_get, - NULL, - NULL, NULL}, - {"T", - (getter)array_transpose_get, - NULL, - NULL, NULL}, - {"__array_interface__", - (getter)array_interface_get, - NULL, - NULL, NULL}, - {"__array_struct__", - (getter)array_struct_get, - NULL, - NULL, NULL}, - {"__array_priority__", - (getter)array_priority_get, - NULL, - NULL, NULL}, - {"__array_finalize__", - (getter)array_finalize_get, - NULL, - NULL, NULL}, - {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ -}; - -/****************** end of attribute get and set routines *******************/ diff --git a/numpy-1.6.2/numpy/core/src/multiarray/getset.h b/numpy-1.6.2/numpy/core/src/multiarray/getset.h deleted file mode 100644 index 98bd217f72..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/getset.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _NPY_ARRAY_GETSET_H_ -#define _NPY_ARRAY_GETSET_H_ - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyGetSetDef array_getsetlist[]; -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/hashdescr.c b/numpy-1.6.2/numpy/core/src/multiarray/hashdescr.c deleted file mode 100644 index ef12364831..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/hashdescr.c +++ /dev/null @@ -1,318 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#define _MULTIARRAYMODULE -#include - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "hashdescr.h" - -/* - * How does this work ? The hash is computed from a list which contains all the - * information specific to a type. The hard work is to build the list - * (_array_descr_walk). The list is built as follows: - * * If the dtype is builtin (no fields, no subarray), then the list - * contains 6 items which uniquely define one dtype (_array_descr_builtin) - * * If the dtype is a compound array, one walk on each field. For each - * field, we append title, names, offset to the final list used for - * hashing, and then append the list recursively built for each - * corresponding dtype (_array_descr_walk_fields) - * * If the dtype is a subarray, one adds the shape tuple to the list, and - * then append the list recursively built for each corresponding dtype - * (_array_descr_walk_subarray) - * - */ - -static int _is_array_descr_builtin(PyArray_Descr* descr); -static int _array_descr_walk(PyArray_Descr* descr, PyObject *l); -static int _array_descr_walk_fields(PyObject* fields, PyObject* l); -static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l); - -/* - * normalize endian character: always return 'I', '<' or '>' - */ -static char _normalize_byteorder(char byteorder) -{ - switch(byteorder) { - case '=': - if (PyArray_GetEndianness() == NPY_CPU_BIG) { - return '>'; - } - else { - return '<'; - } - default: - return byteorder; - } -} - -/* - * Return true if descr is a builtin type - */ -static int _is_array_descr_builtin(PyArray_Descr* descr) -{ - if (descr->fields != NULL && descr->fields != Py_None) { - return 0; - } - if (descr->subarray != NULL) { - return 0; - } - return 1; -} - -/* - * Add to l all the items which uniquely define a builtin type - */ -static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l) -{ - Py_ssize_t i; - PyObject *t, *item; - char nbyteorder = _normalize_byteorder(descr->byteorder); - - /* - * For builtin type, hash relies on : kind + byteorder + flags + - * type_num + elsize + alignment - */ - t = Py_BuildValue("(cccii)", descr->kind, nbyteorder, - descr->flags, descr->elsize, descr->alignment); - - for(i = 0; i < PyTuple_Size(t); ++i) { - item = PyTuple_GetItem(t, i); - if (item == NULL) { - PyErr_SetString(PyExc_SystemError, - "(Hash) Error while computing builting hash"); - goto clean_t; - } - Py_INCREF(item); - PyList_Append(l, item); - } - - Py_DECREF(t); - return 0; - -clean_t: - Py_DECREF(t); - return -1; -} - -/* - * Walk inside the fields and add every item which will be used for hashing - * into the list l - * - * Return 0 on success - */ -static int _array_descr_walk_fields(PyObject* fields, PyObject* l) -{ - PyObject *key, *value, *foffset, *fdescr; - Py_ssize_t pos = 0; - int st; - - while (PyDict_Next(fields, &pos, &key, &value)) { - /* - * For each field, add the key + descr + offset to l - */ - - /* XXX: are those checks necessary ? */ - if (!PyUString_Check(key)) { - PyErr_SetString(PyExc_SystemError, - "(Hash) key of dtype dict not a string ???"); - return -1; - } - if (!PyTuple_Check(value)) { - PyErr_SetString(PyExc_SystemError, - "(Hash) value of dtype dict not a dtype ???"); - return -1; - } - if (PyTuple_Size(value) < 2) { - PyErr_SetString(PyExc_SystemError, - "(Hash) Less than 2 items in dtype dict ???"); - return -1; - } - Py_INCREF(key); - PyList_Append(l, key); - - fdescr = PyTuple_GetItem(value, 0); - if (!PyArray_DescrCheck(fdescr)) { - PyErr_SetString(PyExc_SystemError, - "(Hash) First item in compound dtype tuple not a descr ???"); - return -1; - } - else { - Py_INCREF(fdescr); - st = _array_descr_walk((PyArray_Descr*)fdescr, l); - Py_DECREF(fdescr); - if (st) { - return -1; - } - } - - foffset = PyTuple_GetItem(value, 1); - if (!PyInt_Check(foffset)) { - PyErr_SetString(PyExc_SystemError, - "(Hash) Second item in compound dtype tuple not an int ???"); - return -1; - } - else { - Py_INCREF(foffset); - PyList_Append(l, foffset); - } - } - - return 0; -} - -/* - * Walk into subarray, and add items for hashing in l - * - * Return 0 on success - */ -static int _array_descr_walk_subarray(PyArray_ArrayDescr* adescr, PyObject *l) -{ - PyObject *item; - Py_ssize_t i; - int st; - - /* - * Add shape and descr itself to the list of object to hash - */ - if (PyTuple_Check(adescr->shape)) { - for(i = 0; i < PyTuple_Size(adescr->shape); ++i) { - item = PyTuple_GetItem(adescr->shape, i); - if (item == NULL) { - PyErr_SetString(PyExc_SystemError, - "(Hash) Error while getting shape item of subarray dtype ???"); - return -1; - } - Py_INCREF(item); - PyList_Append(l, item); - } - } - else if (PyInt_Check(adescr->shape)) { - Py_INCREF(adescr->shape); - PyList_Append(l, adescr->shape); - } - else { - PyErr_SetString(PyExc_SystemError, - "(Hash) Shape of subarray dtype neither a tuple or int ???"); - return -1; - } - - Py_INCREF(adescr->base); - st = _array_descr_walk(adescr->base, l); - Py_DECREF(adescr->base); - - return st; -} - -/* - * 'Root' function to walk into a dtype. May be called recursively - */ -static int _array_descr_walk(PyArray_Descr* descr, PyObject *l) -{ - int st; - - if (_is_array_descr_builtin(descr)) { - return _array_descr_builtin(descr, l); - } - else { - if(descr->fields != NULL && descr->fields != Py_None) { - if (!PyDict_Check(descr->fields)) { - PyErr_SetString(PyExc_SystemError, - "(Hash) fields is not a dict ???"); - return -1; - } - st = _array_descr_walk_fields(descr->fields, l); - if (st) { - return -1; - } - } - if(descr->subarray != NULL) { - st = _array_descr_walk_subarray(descr->subarray, l); - if (st) { - return -1; - } - } - } - - return 0; -} - -/* - * Return 0 if successfull - */ -static int _PyArray_DescrHashImp(PyArray_Descr *descr, long *hash) -{ - PyObject *l, *tl, *item; - Py_ssize_t i; - int st; - - l = PyList_New(0); - if (l == NULL) { - return -1; - } - - st = _array_descr_walk(descr, l); - if (st) { - goto clean_l; - } - - /* - * Convert the list to tuple and compute the tuple hash using python - * builtin function - */ - tl = PyTuple_New(PyList_Size(l)); - for(i = 0; i < PyList_Size(l); ++i) { - item = PyList_GetItem(l, i); - if (item == NULL) { - PyErr_SetString(PyExc_SystemError, - "(Hash) Error while translating the list into a tuple " \ - "(NULL item)"); - goto clean_tl; - } - PyTuple_SetItem(tl, i, item); - } - - *hash = PyObject_Hash(tl); - if (*hash == -1) { - /* XXX: does PyObject_Hash set an exception on failure ? */ -#if 0 - PyErr_SetString(PyExc_SystemError, - "(Hash) Error while hashing final tuple"); -#endif - goto clean_tl; - } - Py_DECREF(tl); - Py_DECREF(l); - - return 0; - -clean_tl: - Py_DECREF(tl); -clean_l: - Py_DECREF(l); - return -1; -} - -NPY_NO_EXPORT long -PyArray_DescrHash(PyObject* odescr) -{ - PyArray_Descr *descr; - int st; - long hash; - - if (!PyArray_DescrCheck(odescr)) { - PyErr_SetString(PyExc_ValueError, - "PyArray_DescrHash argument must be a type descriptor"); - return -1; - } - descr = (PyArray_Descr*)odescr; - - st = _PyArray_DescrHashImp(descr, &hash); - if (st) { - return -1; - } - - return hash; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/hashdescr.h b/numpy-1.6.2/numpy/core/src/multiarray/hashdescr.h deleted file mode 100644 index af0ec13b99..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/hashdescr.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _NPY_HASHDESCR_H_ -#define _NPY_HASHDESCR_H_ - -NPY_NO_EXPORT long -PyArray_DescrHash(PyObject* odescr); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/item_selection.c b/numpy-1.6.2/numpy/core/src/multiarray/item_selection.c deleted file mode 100644 index 58871131f2..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/item_selection.c +++ /dev/null @@ -1,1933 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "numpy/npy_math.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" -#include "ctors.h" -#include "lowlevel_strided_loops.h" - -#define PyAO PyArrayObject -#define _check_axis PyArray_CheckAxis - -/*NUMPY_API - * Take - */ -NPY_NO_EXPORT PyObject * -PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, - PyArrayObject *ret, NPY_CLIPMODE clipmode) -{ - PyArray_FastTakeFunc *func; - PyArrayObject *self, *indices; - intp nd, i, j, n, m, max_item, tmp, chunk, nelem; - intp shape[MAX_DIMS]; - char *src, *dest; - int copyret = 0; - int err; - - indices = NULL; - self = (PyAO *)_check_axis(self0, &axis, CARRAY); - if (self == NULL) { - return NULL; - } - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, - PyArray_INTP, - 1, 0); - if (indices == NULL) { - Py_XINCREF(ret); - goto fail; - } - n = m = chunk = 1; - nd = self->nd + indices->nd - 1; - for (i = 0; i < nd; i++) { - if (i < axis) { - shape[i] = self->dimensions[i]; - n *= shape[i]; - } - else { - if (i < axis+indices->nd) { - shape[i] = indices->dimensions[i-axis]; - m *= shape[i]; - } - else { - shape[i] = self->dimensions[i-indices->nd+1]; - chunk *= shape[i]; - } - } - } - Py_INCREF(self->descr); - if (!ret) { - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self), - self->descr, - nd, shape, - NULL, NULL, 0, - (PyObject *)self); - - if (ret == NULL) { - goto fail; - } - } - else { - PyArrayObject *obj; - int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; - - if ((ret->nd != nd) || - !PyArray_CompareLists(ret->dimensions, shape, nd)) { - PyErr_SetString(PyExc_ValueError, - "bad shape in output array"); - ret = NULL; - Py_DECREF(self->descr); - goto fail; - } - - if (clipmode == NPY_RAISE) { - /* - * we need to make sure and get a copy - * so the input array is not changed - * before the error is called - */ - flags |= NPY_ENSURECOPY; - } - obj = (PyArrayObject *)PyArray_FromArray(ret, self->descr, - flags); - if (obj != ret) { - copyret = 1; - } - ret = obj; - if (ret == NULL) { - goto fail; - } - } - - max_item = self->dimensions[axis]; - nelem = chunk; - chunk = chunk * ret->descr->elsize; - src = self->data; - dest = ret->data; - - func = self->descr->f->fasttake; - if (func == NULL) { - switch(clipmode) { - case NPY_RAISE: - for (i = 0; i < n; i++) { - for (j = 0; j < m; j++) { - tmp = ((intp *)(indices->data))[j]; - if (tmp < 0) { - tmp = tmp + max_item; - } - if ((tmp < 0) || (tmp >= max_item)) { - PyErr_SetString(PyExc_IndexError, - "index out of range "\ - "for array"); - goto fail; - } - memmove(dest, src + tmp*chunk, chunk); - dest += chunk; - } - src += chunk*max_item; - } - break; - case NPY_WRAP: - for (i = 0; i < n; i++) { - for (j = 0; j < m; j++) { - tmp = ((intp *)(indices->data))[j]; - if (tmp < 0) { - while (tmp < 0) { - tmp += max_item; - } - } - else if (tmp >= max_item) { - while (tmp >= max_item) { - tmp -= max_item; - } - } - memmove(dest, src + tmp*chunk, chunk); - dest += chunk; - } - src += chunk*max_item; - } - break; - case NPY_CLIP: - for (i = 0; i < n; i++) { - for (j = 0; j < m; j++) { - tmp = ((intp *)(indices->data))[j]; - if (tmp < 0) { - tmp = 0; - } - else if (tmp >= max_item) { - tmp = max_item - 1; - } - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; - } - src += chunk*max_item; - } - break; - } - } - else { - err = func(dest, src, (intp *)(indices->data), - max_item, n, m, nelem, clipmode); - if (err) { - goto fail; - } - } - - PyArray_INCREF(ret); - Py_XDECREF(indices); - Py_XDECREF(self); - if (copyret) { - PyObject *obj; - obj = ret->base; - Py_INCREF(obj); - Py_DECREF(ret); - ret = (PyArrayObject *)obj; - } - return (PyObject *)ret; - - fail: - PyArray_XDECREF_ERR(ret); - Py_XDECREF(indices); - Py_XDECREF(self); - return NULL; -} - -/*NUMPY_API - * Put values into an array - */ -NPY_NO_EXPORT PyObject * -PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, - NPY_CLIPMODE clipmode) -{ - PyArrayObject *indices, *values; - intp i, chunk, ni, max_item, nv, tmp; - char *src, *dest; - int copied = 0; - - indices = NULL; - values = NULL; - if (!PyArray_Check(self)) { - PyErr_SetString(PyExc_TypeError, - "put: first argument must be an array"); - return NULL; - } - if (!PyArray_ISCONTIGUOUS(self)) { - PyArrayObject *obj; - int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; - - if (clipmode == NPY_RAISE) { - flags |= NPY_ENSURECOPY; - } - Py_INCREF(self->descr); - obj = (PyArrayObject *)PyArray_FromArray(self, - self->descr, flags); - if (obj != self) { - copied = 1; - } - self = obj; - } - max_item = PyArray_SIZE(self); - dest = self->data; - chunk = self->descr->elsize; - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, - PyArray_INTP, 0, 0); - if (indices == NULL) { - goto fail; - } - ni = PyArray_SIZE(indices); - Py_INCREF(self->descr); - values = (PyArrayObject *)PyArray_FromAny(values0, self->descr, 0, 0, - DEFAULT | FORCECAST, NULL); - if (values == NULL) { - goto fail; - } - nv = PyArray_SIZE(values); - if (nv <= 0) { - goto finish; - } - if (PyDataType_REFCHK(self->descr)) { - switch(clipmode) { - case NPY_RAISE: - for (i = 0; i < ni; i++) { - src = values->data + chunk*(i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) { - tmp = tmp + max_item; - } - if ((tmp < 0) || (tmp >= max_item)) { - PyErr_SetString(PyExc_IndexError, - "index out of " \ - "range for array"); - goto fail; - } - PyArray_Item_INCREF(src, self->descr); - PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); - memmove(dest + tmp*chunk, src, chunk); - } - break; - case NPY_WRAP: - for (i = 0; i < ni; i++) { - src = values->data + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) { - while (tmp < 0) { - tmp += max_item; - } - } - else if (tmp >= max_item) { - while (tmp >= max_item) { - tmp -= max_item; - } - } - PyArray_Item_INCREF(src, self->descr); - PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); - memmove(dest + tmp * chunk, src, chunk); - } - break; - case NPY_CLIP: - for (i = 0; i < ni; i++) { - src = values->data + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) { - tmp = 0; - } - else if (tmp >= max_item) { - tmp = max_item - 1; - } - PyArray_Item_INCREF(src, self->descr); - PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); - memmove(dest + tmp * chunk, src, chunk); - } - break; - } - } - else { - switch(clipmode) { - case NPY_RAISE: - for (i = 0; i < ni; i++) { - src = values->data + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) { - tmp = tmp + max_item; - } - if ((tmp < 0) || (tmp >= max_item)) { - PyErr_SetString(PyExc_IndexError, - "index out of " \ - "range for array"); - goto fail; - } - memmove(dest + tmp * chunk, src, chunk); - } - break; - case NPY_WRAP: - for (i = 0; i < ni; i++) { - src = values->data + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) { - while (tmp < 0) { - tmp += max_item; - } - } - else if (tmp >= max_item) { - while (tmp >= max_item) { - tmp -= max_item; - } - } - memmove(dest + tmp * chunk, src, chunk); - } - break; - case NPY_CLIP: - for (i = 0; i < ni; i++) { - src = values->data + chunk * (i % nv); - tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) { - tmp = 0; - } - else if (tmp >= max_item) { - tmp = max_item - 1; - } - memmove(dest + tmp * chunk, src, chunk); - } - break; - } - } - - finish: - Py_XDECREF(values); - Py_XDECREF(indices); - if (copied) { - Py_DECREF(self); - } - Py_INCREF(Py_None); - return Py_None; - - fail: - Py_XDECREF(indices); - Py_XDECREF(values); - if (copied) { - PyArray_XDECREF_ERR(self); - } - return NULL; -} - -/*NUMPY_API - * Put values into an array according to a mask. - */ -NPY_NO_EXPORT PyObject * -PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) -{ - PyArray_FastPutmaskFunc *func; - PyArrayObject *mask, *values; - intp i, chunk, ni, max_item, nv, tmp; - char *src, *dest; - int copied = 0; - - mask = NULL; - values = NULL; - if (!PyArray_Check(self)) { - PyErr_SetString(PyExc_TypeError, - "putmask: first argument must "\ - "be an array"); - return NULL; - } - if (!PyArray_ISCONTIGUOUS(self)) { - PyArrayObject *obj; - int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; - - Py_INCREF(self->descr); - obj = (PyArrayObject *)PyArray_FromArray(self, - self->descr, flags); - if (obj != self) { - copied = 1; - } - self = obj; - } - - max_item = PyArray_SIZE(self); - dest = self->data; - chunk = self->descr->elsize; - mask = (PyArrayObject *)\ - PyArray_FROM_OTF(mask0, PyArray_BOOL, CARRAY | FORCECAST); - if (mask == NULL) { - goto fail; - } - ni = PyArray_SIZE(mask); - if (ni != max_item) { - PyErr_SetString(PyExc_ValueError, - "putmask: mask and data must be "\ - "the same size"); - goto fail; - } - Py_INCREF(self->descr); - values = (PyArrayObject *)\ - PyArray_FromAny(values0, self->descr, 0, 0, NPY_CARRAY, NULL); - if (values == NULL) { - goto fail; - } - nv = PyArray_SIZE(values); /* zero if null array */ - if (nv <= 0) { - Py_XDECREF(values); - Py_XDECREF(mask); - Py_INCREF(Py_None); - return Py_None; - } - if (PyDataType_REFCHK(self->descr)) { - for (i = 0; i < ni; i++) { - tmp = ((Bool *)(mask->data))[i]; - if (tmp) { - src = values->data + chunk * (i % nv); - PyArray_Item_INCREF(src, self->descr); - PyArray_Item_XDECREF(dest+i*chunk, self->descr); - memmove(dest + i * chunk, src, chunk); - } - } - } - else { - func = self->descr->f->fastputmask; - if (func == NULL) { - for (i = 0; i < ni; i++) { - tmp = ((Bool *)(mask->data))[i]; - if (tmp) { - src = values->data + chunk*(i % nv); - memmove(dest + i*chunk, src, chunk); - } - } - } - else { - func(dest, mask->data, ni, values->data, nv); - } - } - - Py_XDECREF(values); - Py_XDECREF(mask); - if (copied) { - Py_DECREF(self); - } - Py_INCREF(Py_None); - return Py_None; - - fail: - Py_XDECREF(mask); - Py_XDECREF(values); - if (copied) { - PyArray_XDECREF_ERR(self); - } - return NULL; -} - -/*NUMPY_API - * Repeat the array. - */ -NPY_NO_EXPORT PyObject * -PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) -{ - intp *counts; - intp n, n_outer, i, j, k, chunk, total; - intp tmp; - int nd; - PyArrayObject *repeats = NULL; - PyObject *ap = NULL; - PyArrayObject *ret = NULL; - char *new_data, *old_data; - - repeats = (PyAO *)PyArray_ContiguousFromAny(op, PyArray_INTP, 0, 1); - if (repeats == NULL) { - return NULL; - } - nd = repeats->nd; - counts = (intp *)repeats->data; - - if ((ap=_check_axis(aop, &axis, CARRAY))==NULL) { - Py_DECREF(repeats); - return NULL; - } - - aop = (PyAO *)ap; - if (nd == 1) { - n = repeats->dimensions[0]; - } - else { - /* nd == 0 */ - n = aop->dimensions[axis]; - } - if (aop->dimensions[axis] != n) { - PyErr_SetString(PyExc_ValueError, - "a.shape[axis] != len(repeats)"); - goto fail; - } - - if (nd == 0) { - total = counts[0]*n; - } - else { - - total = 0; - for (j = 0; j < n; j++) { - if (counts[j] < 0) { - PyErr_SetString(PyExc_ValueError, "count < 0"); - goto fail; - } - total += counts[j]; - } - } - - - /* Construct new array */ - aop->dimensions[axis] = total; - Py_INCREF(aop->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(aop), - aop->descr, - aop->nd, - aop->dimensions, - NULL, NULL, 0, - (PyObject *)aop); - aop->dimensions[axis] = n; - if (ret == NULL) { - goto fail; - } - new_data = ret->data; - old_data = aop->data; - - chunk = aop->descr->elsize; - for(i = axis + 1; i < aop->nd; i++) { - chunk *= aop->dimensions[i]; - } - - n_outer = 1; - for (i = 0; i < axis; i++) { - n_outer *= aop->dimensions[i]; - } - for (i = 0; i < n_outer; i++) { - for (j = 0; j < n; j++) { - tmp = nd ? counts[j] : counts[0]; - for (k = 0; k < tmp; k++) { - memcpy(new_data, old_data, chunk); - new_data += chunk; - } - old_data += chunk; - } - } - - Py_DECREF(repeats); - PyArray_INCREF(ret); - Py_XDECREF(aop); - return (PyObject *)ret; - - fail: - Py_DECREF(repeats); - Py_XDECREF(aop); - Py_XDECREF(ret); - return NULL; -} - -/*NUMPY_API - */ -NPY_NO_EXPORT PyObject * -PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *ret, - NPY_CLIPMODE clipmode) -{ - int n, elsize; - intp i; - char *ret_data; - PyArrayObject **mps, *ap; - PyArrayMultiIterObject *multi = NULL; - intp mi; - int copyret = 0; - ap = NULL; - - /* - * Convert all inputs to arrays of a common type - * Also makes them C-contiguous - */ - mps = PyArray_ConvertToCommonType(op, &n); - if (mps == NULL) { - return NULL; - } - for (i = 0; i < n; i++) { - if (mps[i] == NULL) { - goto fail; - } - } - ap = (PyArrayObject *)PyArray_FROM_OT((PyObject *)ip, NPY_INTP); - if (ap == NULL) { - goto fail; - } - /* Broadcast all arrays to each other, index array at the end. */ - multi = (PyArrayMultiIterObject *) - PyArray_MultiIterFromObjects((PyObject **)mps, n, 1, ap); - if (multi == NULL) { - goto fail; - } - /* Set-up return array */ - if (!ret) { - Py_INCREF(mps[0]->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(ap), - mps[0]->descr, - multi->nd, - multi->dimensions, - NULL, NULL, 0, - (PyObject *)ap); - } - else { - PyArrayObject *obj; - int flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; - - if ((PyArray_NDIM(ret) != multi->nd) - || !PyArray_CompareLists( - PyArray_DIMS(ret), multi->dimensions, multi->nd)) { - PyErr_SetString(PyExc_TypeError, - "invalid shape for output array."); - ret = NULL; - goto fail; - } - if (clipmode == NPY_RAISE) { - /* - * we need to make sure and get a copy - * so the input array is not changed - * before the error is called - */ - flags |= NPY_ENSURECOPY; - } - Py_INCREF(mps[0]->descr); - obj = (PyArrayObject *)PyArray_FromArray(ret, mps[0]->descr, flags); - if (obj != ret) { - copyret = 1; - } - ret = obj; - } - - if (ret == NULL) { - goto fail; - } - elsize = ret->descr->elsize; - ret_data = ret->data; - - while (PyArray_MultiIter_NOTDONE(multi)) { - mi = *((intp *)PyArray_MultiIter_DATA(multi, n)); - if (mi < 0 || mi >= n) { - switch(clipmode) { - case NPY_RAISE: - PyErr_SetString(PyExc_ValueError, - "invalid entry in choice "\ - "array"); - goto fail; - case NPY_WRAP: - if (mi < 0) { - while (mi < 0) { - mi += n; - } - } - else { - while (mi >= n) { - mi -= n; - } - } - break; - case NPY_CLIP: - if (mi < 0) { - mi = 0; - } - else if (mi >= n) { - mi = n - 1; - } - break; - } - } - memmove(ret_data, PyArray_MultiIter_DATA(multi, mi), elsize); - ret_data += elsize; - PyArray_MultiIter_NEXT(multi); - } - - PyArray_INCREF(ret); - Py_DECREF(multi); - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - } - Py_DECREF(ap); - PyDataMem_FREE(mps); - if (copyret) { - PyObject *obj; - obj = ret->base; - Py_INCREF(obj); - Py_DECREF(ret); - ret = (PyArrayObject *)obj; - } - return (PyObject *)ret; - - fail: - Py_XDECREF(multi); - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - } - Py_XDECREF(ap); - PyDataMem_FREE(mps); - PyArray_XDECREF_ERR(ret); - return NULL; -} - -/* - * These algorithms use special sorting. They are not called unless the - * underlying sort function for the type is available. Note that axis is - * already valid. The sort functions require 1-d contiguous and well-behaved - * data. Therefore, a copy will be made of the data if needed before handing - * it to the sorting routine. An iterator is constructed and adjusted to walk - * over all but the desired sorting axis. - */ -static int -_new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayIterObject *it; - int needcopy = 0, swap; - intp N, size; - int elsize; - intp astride; - PyArray_SortFunc *sort; - BEGIN_THREADS_DEF; - - it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, &axis); - swap = !PyArray_ISNOTSWAPPED(op); - if (it == NULL) { - return -1; - } - - NPY_BEGIN_THREADS_DESCR(op->descr); - sort = op->descr->f->sort[which]; - size = it->size; - N = op->dimensions[axis]; - elsize = op->descr->elsize; - astride = op->strides[axis]; - - needcopy = !(op->flags & ALIGNED) || (astride != (intp) elsize) || swap; - if (needcopy) { - char *buffer = PyDataMem_NEW(N*elsize); - - while (size--) { - _unaligned_strided_byte_copy(buffer, (intp) elsize, it->dataptr, - astride, N, elsize); - if (swap) { - _strided_byte_swap(buffer, (intp) elsize, N, elsize); - } - if (sort(buffer, N, op) < 0) { - PyDataMem_FREE(buffer); - goto fail; - } - if (swap) { - _strided_byte_swap(buffer, (intp) elsize, N, elsize); - } - _unaligned_strided_byte_copy(it->dataptr, astride, buffer, - (intp) elsize, N, elsize); - PyArray_ITER_NEXT(it); - } - PyDataMem_FREE(buffer); - } - else { - while (size--) { - if (sort(it->dataptr, N, op) < 0) { - goto fail; - } - PyArray_ITER_NEXT(it); - } - } - NPY_END_THREADS_DESCR(op->descr); - Py_DECREF(it); - return 0; - - fail: - NPY_END_THREADS; - Py_DECREF(it); - return 0; -} - -static PyObject* -_new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - - PyArrayIterObject *it = NULL; - PyArrayIterObject *rit = NULL; - PyObject *ret; - int needcopy = 0, i; - intp N, size; - int elsize, swap; - intp astride, rstride, *iptr; - PyArray_ArgSortFunc *argsort; - BEGIN_THREADS_DEF; - - ret = PyArray_New(Py_TYPE(op), op->nd, - op->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) { - return NULL; - } - it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, &axis); - rit = (PyArrayIterObject *)PyArray_IterAllButAxis(ret, &axis); - if (rit == NULL || it == NULL) { - goto fail; - } - swap = !PyArray_ISNOTSWAPPED(op); - - NPY_BEGIN_THREADS_DESCR(op->descr); - argsort = op->descr->f->argsort[which]; - size = it->size; - N = op->dimensions[axis]; - elsize = op->descr->elsize; - astride = op->strides[axis]; - rstride = PyArray_STRIDE(ret,axis); - - needcopy = swap || !(op->flags & ALIGNED) || (astride != (intp) elsize) || - (rstride != sizeof(intp)); - if (needcopy) { - char *valbuffer, *indbuffer; - - valbuffer = PyDataMem_NEW(N*elsize); - indbuffer = PyDataMem_NEW(N*sizeof(intp)); - while (size--) { - _unaligned_strided_byte_copy(valbuffer, (intp) elsize, it->dataptr, - astride, N, elsize); - if (swap) { - _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); - } - iptr = (intp *)indbuffer; - for (i = 0; i < N; i++) { - *iptr++ = i; - } - if (argsort(valbuffer, (intp *)indbuffer, N, op) < 0) { - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); - goto fail; - } - _unaligned_strided_byte_copy(rit->dataptr, rstride, indbuffer, - sizeof(intp), N, sizeof(intp)); - PyArray_ITER_NEXT(it); - PyArray_ITER_NEXT(rit); - } - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); - } - else { - while (size--) { - iptr = (intp *)rit->dataptr; - for (i = 0; i < N; i++) { - *iptr++ = i; - } - if (argsort(it->dataptr, (intp *)rit->dataptr, N, op) < 0) { - goto fail; - } - PyArray_ITER_NEXT(it); - PyArray_ITER_NEXT(rit); - } - } - - NPY_END_THREADS_DESCR(op->descr); - - Py_DECREF(it); - Py_DECREF(rit); - return ret; - - fail: - NPY_END_THREADS; - Py_DECREF(ret); - Py_XDECREF(it); - Py_XDECREF(rit); - return NULL; -} - - -/* Be sure to save this global_compare when necessary */ -static PyArrayObject *global_obj; - -static int -qsortCompare (const void *a, const void *b) -{ - return global_obj->descr->f->compare(a,b,global_obj); -} - -/* - * Consumes reference to ap (op gets it) op contains a version of - * the array with axes swapped if local variable axis is not the - * last dimension. Origin must be defined locally. - */ -#define SWAPAXES(op, ap) { \ - orign = (ap)->nd-1; \ - if (axis != orign) { \ - (op) = (PyAO *)PyArray_SwapAxes((ap), axis, orign); \ - Py_DECREF((ap)); \ - if ((op) == NULL) return NULL; \ - } \ - else (op) = (ap); \ - } - -/* - * Consumes reference to ap (op gets it) origin must be previously - * defined locally. SWAPAXES must have been called previously. - * op contains the swapped version of the array. - */ -#define SWAPBACK(op, ap) { \ - if (axis != orign) { \ - (op) = (PyAO *)PyArray_SwapAxes((ap), axis, orign); \ - Py_DECREF((ap)); \ - if ((op) == NULL) return NULL; \ - } \ - else (op) = (ap); \ - } - -/* These swap axes in-place if necessary */ -#define SWAPINTP(a,b) {intp c; c=(a); (a) = (b); (b) = c;} -#define SWAPAXES2(ap) { \ - orign = (ap)->nd-1; \ - if (axis != orign) { \ - SWAPINTP(ap->dimensions[axis], ap->dimensions[orign]); \ - SWAPINTP(ap->strides[axis], ap->strides[orign]); \ - PyArray_UpdateFlags(ap, CONTIGUOUS | FORTRAN); \ - } \ - } - -#define SWAPBACK2(ap) { \ - if (axis != orign) { \ - SWAPINTP(ap->dimensions[axis], ap->dimensions[orign]); \ - SWAPINTP(ap->strides[axis], ap->strides[orign]); \ - PyArray_UpdateFlags(ap, CONTIGUOUS | FORTRAN); \ - } \ - } - -/*NUMPY_API - * Sort an array in-place - */ -NPY_NO_EXPORT int -PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayObject *ap = NULL, *store_arr = NULL; - char *ip; - int i, n, m, elsize, orign; - - n = op->nd; - if ((n == 0) || (PyArray_SIZE(op) == 1)) { - return 0; - } - if (axis < 0) { - axis += n; - } - if ((axis < 0) || (axis >= n)) { - PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis); - return -1; - } - if (!PyArray_ISWRITEABLE(op)) { - PyErr_SetString(PyExc_RuntimeError, - "attempted sort on unwriteable array."); - return -1; - } - - /* Determine if we should use type-specific algorithm or not */ - if (op->descr->f->sort[which] != NULL) { - return _new_sort(op, axis, which); - } - if ((which != PyArray_QUICKSORT) - || op->descr->f->compare == NULL) { - PyErr_SetString(PyExc_TypeError, - "desired sort not supported for this type"); - return -1; - } - - SWAPAXES2(op); - - ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op, - NULL, 1, 0, - DEFAULT | UPDATEIFCOPY, NULL); - if (ap == NULL) { - goto fail; - } - elsize = ap->descr->elsize; - m = ap->dimensions[ap->nd-1]; - if (m == 0) { - goto finish; - } - n = PyArray_SIZE(ap)/m; - - /* Store global -- allows re-entry -- restore before leaving*/ - store_arr = global_obj; - global_obj = ap; - for (ip = ap->data, i = 0; i < n; i++, ip += elsize*m) { - qsort(ip, m, elsize, qsortCompare); - } - global_obj = store_arr; - - if (PyErr_Occurred()) { - goto fail; - } - - finish: - Py_DECREF(ap); /* Should update op if needed */ - SWAPBACK2(op); - return 0; - - fail: - Py_XDECREF(ap); - SWAPBACK2(op); - return -1; -} - - -static char *global_data; - -static int -argsort_static_compare(const void *ip1, const void *ip2) -{ - int isize = global_obj->descr->elsize; - const intp *ipa = ip1; - const intp *ipb = ip2; - return global_obj->descr->f->compare(global_data + (isize * *ipa), - global_data + (isize * *ipb), - global_obj); -} - -/*NUMPY_API - * ArgSort an array - */ -NPY_NO_EXPORT PyObject * -PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayObject *ap = NULL, *ret = NULL, *store, *op2; - intp *ip; - intp i, j, n, m, orign; - int argsort_elsize; - char *store_ptr; - - n = op->nd; - if ((n == 0) || (PyArray_SIZE(op) == 1)) { - ret = (PyArrayObject *)PyArray_New(Py_TYPE(op), op->nd, - op->dimensions, - PyArray_INTP, - NULL, NULL, 0, 0, - (PyObject *)op); - if (ret == NULL) { - return NULL; - } - *((intp *)ret->data) = 0; - return (PyObject *)ret; - } - - /* Creates new reference op2 */ - if ((op2=(PyAO *)_check_axis(op, &axis, 0)) == NULL) { - return NULL; - } - /* Determine if we should use new algorithm or not */ - if (op2->descr->f->argsort[which] != NULL) { - ret = (PyArrayObject *)_new_argsort(op2, axis, which); - Py_DECREF(op2); - return (PyObject *)ret; - } - - if ((which != PyArray_QUICKSORT) || op2->descr->f->compare == NULL) { - PyErr_SetString(PyExc_TypeError, - "requested sort not available for type"); - Py_DECREF(op2); - op = NULL; - goto fail; - } - - /* ap will contain the reference to op2 */ - SWAPAXES(ap, op2); - op = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ap, - PyArray_NOTYPE, - 1, 0); - Py_DECREF(ap); - if (op == NULL) { - return NULL; - } - ret = (PyArrayObject *)PyArray_New(Py_TYPE(op), op->nd, - op->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) { - goto fail; - } - ip = (intp *)ret->data; - argsort_elsize = op->descr->elsize; - m = op->dimensions[op->nd-1]; - if (m == 0) { - goto finish; - } - n = PyArray_SIZE(op)/m; - store_ptr = global_data; - global_data = op->data; - store = global_obj; - global_obj = op; - for (i = 0; i < n; i++, ip += m, global_data += m*argsort_elsize) { - for (j = 0; j < m; j++) { - ip[j] = j; - } - qsort((char *)ip, m, sizeof(intp), argsort_static_compare); - } - global_data = store_ptr; - global_obj = store; - - finish: - Py_DECREF(op); - SWAPBACK(op, ret); - return (PyObject *)op; - - fail: - Py_XDECREF(op); - Py_XDECREF(ret); - return NULL; - -} - - -/*NUMPY_API - *LexSort an array providing indices that will sort a collection of arrays - *lexicographically. The first key is sorted on first, followed by the second key - *-- requires that arg"merge"sort is available for each sort_key - * - *Returns an index array that shows the indexes for the lexicographic sort along - *the given axis. - */ -NPY_NO_EXPORT PyObject * -PyArray_LexSort(PyObject *sort_keys, int axis) -{ - PyArrayObject **mps; - PyArrayIterObject **its; - PyArrayObject *ret = NULL; - PyArrayIterObject *rit = NULL; - int n; - int nd; - int needcopy = 0, i,j; - intp N, size; - int elsize; - int maxelsize; - intp astride, rstride, *iptr; - int object = 0; - PyArray_ArgSortFunc *argsort; - NPY_BEGIN_THREADS_DEF; - - if (!PySequence_Check(sort_keys) - || ((n = PySequence_Size(sort_keys)) <= 0)) { - PyErr_SetString(PyExc_TypeError, - "need sequence of keys with len > 0 in lexsort"); - return NULL; - } - mps = (PyArrayObject **) _pya_malloc(n*sizeof(PyArrayObject)); - if (mps == NULL) { - return PyErr_NoMemory(); - } - its = (PyArrayIterObject **) _pya_malloc(n*sizeof(PyArrayIterObject)); - if (its == NULL) { - _pya_free(mps); - return PyErr_NoMemory(); - } - for (i = 0; i < n; i++) { - mps[i] = NULL; - its[i] = NULL; - } - for (i = 0; i < n; i++) { - PyObject *obj; - obj = PySequence_GetItem(sort_keys, i); - mps[i] = (PyArrayObject *)PyArray_FROM_O(obj); - Py_DECREF(obj); - if (mps[i] == NULL) { - goto fail; - } - if (i > 0) { - if ((mps[i]->nd != mps[0]->nd) - || (!PyArray_CompareLists(mps[i]->dimensions, - mps[0]->dimensions, - mps[0]->nd))) { - PyErr_SetString(PyExc_ValueError, - "all keys need to be the same shape"); - goto fail; - } - } - if (!mps[i]->descr->f->argsort[PyArray_MERGESORT]) { - PyErr_Format(PyExc_TypeError, - "merge sort not available for item %d", i); - goto fail; - } - if (!object - && PyDataType_FLAGCHK(mps[i]->descr, NPY_NEEDS_PYAPI)) { - object = 1; - } - its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis( - (PyObject *)mps[i], &axis); - if (its[i] == NULL) { - goto fail; - } - } - - /* Now we can check the axis */ - nd = mps[0]->nd; - if ((nd == 0) || (PyArray_SIZE(mps[0]) == 1)) { - /* single element case */ - ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd, - mps[0]->dimensions, - PyArray_INTP, - NULL, NULL, 0, 0, NULL); - - if (ret == NULL) { - goto fail; - } - *((intp *)(ret->data)) = 0; - goto finish; - } - if (axis < 0) { - axis += nd; - } - if ((axis < 0) || (axis >= nd)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", axis); - goto fail; - } - - /* Now do the sorting */ - ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd, - mps[0]->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, NULL); - if (ret == NULL) { - goto fail; - } - rit = (PyArrayIterObject *) - PyArray_IterAllButAxis((PyObject *)ret, &axis); - if (rit == NULL) { - goto fail; - } - if (!object) { - NPY_BEGIN_THREADS; - } - size = rit->size; - N = mps[0]->dimensions[axis]; - rstride = PyArray_STRIDE(ret, axis); - maxelsize = mps[0]->descr->elsize; - needcopy = (rstride != sizeof(intp)); - for (j = 0; j < n; j++) { - needcopy = needcopy - || PyArray_ISBYTESWAPPED(mps[j]) - || !(mps[j]->flags & ALIGNED) - || (mps[j]->strides[axis] != (intp)mps[j]->descr->elsize); - if (mps[j]->descr->elsize > maxelsize) { - maxelsize = mps[j]->descr->elsize; - } - } - - if (needcopy) { - char *valbuffer, *indbuffer; - int *swaps; - - valbuffer = PyDataMem_NEW(N*maxelsize); - indbuffer = PyDataMem_NEW(N*sizeof(intp)); - swaps = malloc(n*sizeof(int)); - for (j = 0; j < n; j++) { - swaps[j] = PyArray_ISBYTESWAPPED(mps[j]); - } - while (size--) { - iptr = (intp *)indbuffer; - for (i = 0; i < N; i++) { - *iptr++ = i; - } - for (j = 0; j < n; j++) { - elsize = mps[j]->descr->elsize; - astride = mps[j]->strides[axis]; - argsort = mps[j]->descr->f->argsort[PyArray_MERGESORT]; - _unaligned_strided_byte_copy(valbuffer, (intp) elsize, - its[j]->dataptr, astride, N, elsize); - if (swaps[j]) { - _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); - } - if (argsort(valbuffer, (intp *)indbuffer, N, mps[j]) < 0) { - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); - free(swaps); - goto fail; - } - PyArray_ITER_NEXT(its[j]); - } - _unaligned_strided_byte_copy(rit->dataptr, rstride, indbuffer, - sizeof(intp), N, sizeof(intp)); - PyArray_ITER_NEXT(rit); - } - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); - free(swaps); - } - else { - while (size--) { - iptr = (intp *)rit->dataptr; - for (i = 0; i < N; i++) { - *iptr++ = i; - } - for (j = 0; j < n; j++) { - argsort = mps[j]->descr->f->argsort[PyArray_MERGESORT]; - if (argsort(its[j]->dataptr, (intp *)rit->dataptr, - N, mps[j]) < 0) { - goto fail; - } - PyArray_ITER_NEXT(its[j]); - } - PyArray_ITER_NEXT(rit); - } - } - - if (!object) { - NPY_END_THREADS; - } - - finish: - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - Py_XDECREF(its[i]); - } - Py_XDECREF(rit); - _pya_free(mps); - _pya_free(its); - return (PyObject *)ret; - - fail: - NPY_END_THREADS; - Py_XDECREF(rit); - Py_XDECREF(ret); - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - Py_XDECREF(its[i]); - } - _pya_free(mps); - _pya_free(its); - return NULL; -} - - -/** @brief Use bisection of sorted array to find first entries >= keys. - * - * For each key use bisection to find the first index i s.t. key <= arr[i]. - * When there is no such index i, set i = len(arr). Return the results in ret. - * All arrays are assumed contiguous on entry and both arr and key must be of - * the same comparable type. - * - * @param arr contiguous sorted array to be searched. - * @param key contiguous array of keys. - * @param ret contiguous array of intp for returned indices. - * @return void - */ -static void -local_search_left(PyArrayObject *arr, PyArrayObject *key, PyArrayObject *ret) -{ - PyArray_CompareFunc *compare = key->descr->f->compare; - intp nelts = arr->dimensions[arr->nd - 1]; - intp nkeys = PyArray_SIZE(key); - char *parr = arr->data; - char *pkey = key->data; - intp *pret = (intp *)ret->data; - int elsize = arr->descr->elsize; - intp i; - - for (i = 0; i < nkeys; ++i) { - intp imin = 0; - intp imax = nelts; - while (imin < imax) { - intp imid = imin + ((imax - imin) >> 1); - if (compare(parr + elsize*imid, pkey, key) < 0) { - imin = imid + 1; - } - else { - imax = imid; - } - } - *pret = imin; - pret += 1; - pkey += elsize; - } -} - - -/** @brief Use bisection of sorted array to find first entries > keys. - * - * For each key use bisection to find the first index i s.t. key < arr[i]. - * When there is no such index i, set i = len(arr). Return the results in ret. - * All arrays are assumed contiguous on entry and both arr and key must be of - * the same comparable type. - * - * @param arr contiguous sorted array to be searched. - * @param key contiguous array of keys. - * @param ret contiguous array of intp for returned indices. - * @return void - */ -static void -local_search_right(PyArrayObject *arr, PyArrayObject *key, PyArrayObject *ret) -{ - PyArray_CompareFunc *compare = key->descr->f->compare; - intp nelts = arr->dimensions[arr->nd - 1]; - intp nkeys = PyArray_SIZE(key); - char *parr = arr->data; - char *pkey = key->data; - intp *pret = (intp *)ret->data; - int elsize = arr->descr->elsize; - intp i; - - for(i = 0; i < nkeys; ++i) { - intp imin = 0; - intp imax = nelts; - while (imin < imax) { - intp imid = imin + ((imax - imin) >> 1); - if (compare(parr + elsize*imid, pkey, key) <= 0) { - imin = imid + 1; - } - else { - imax = imid; - } - } - *pret = imin; - pret += 1; - pkey += elsize; - } -} - -/*NUMPY_API - * - * Search the sorted array op1 for the location of the items in op2. The - * result is an array of indexes, one for each element in op2, such that if - * the item were to be inserted in op1 just before that index the array - * would still be in sorted order. - * - * Parameters - * ---------- - * op1 : PyArrayObject * - * Array to be searched, must be 1-D. - * op2 : PyObject * - * Array of items whose insertion indexes in op1 are wanted - * side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT} - * If NPY_SEARCHLEFT, return first valid insertion indexes - * If NPY_SEARCHRIGHT, return last valid insertion indexes - * - * Returns - * ------- - * ret : PyObject * - * New reference to npy_intp array containing indexes where items in op2 - * could be validly inserted into op1. NULL on error. - * - * Notes - * ----- - * Binary search is used to find the indexes. - */ -NPY_NO_EXPORT PyObject * -PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE side) -{ - PyArrayObject *ap1 = NULL; - PyArrayObject *ap2 = NULL; - PyArrayObject *ret = NULL; - PyArray_Descr *dtype; - NPY_BEGIN_THREADS_DEF; - - /* Find common type */ - dtype = PyArray_DescrFromObject((PyObject *)op2, PyArray_DESCR(op1)); - if (dtype == NULL) { - return NULL; - } - - /* need ap1 as contiguous array and of right type */ - Py_INCREF(dtype); - ap1 = (PyArrayObject *)PyArray_CheckFromAny((PyObject *)op1, dtype, - 1, 1, NPY_DEFAULT | NPY_NOTSWAPPED, NULL); - if (ap1 == NULL) { - Py_DECREF(dtype); - return NULL; - } - - /* need ap2 as contiguous array and of right type */ - ap2 = (PyArrayObject *)PyArray_CheckFromAny(op2, dtype, - 0, 0, NPY_DEFAULT | NPY_NOTSWAPPED, NULL); - if (ap2 == NULL) { - goto fail; - } - - /* ret is a contiguous array of intp type to hold returned indices */ - ret = (PyArrayObject *)PyArray_New(Py_TYPE(ap2), ap2->nd, - ap2->dimensions, PyArray_INTP, - NULL, NULL, 0, 0, (PyObject *)ap2); - if (ret == NULL) { - goto fail; - } - /* check that comparison function exists */ - if (ap2->descr->f->compare == NULL) { - PyErr_SetString(PyExc_TypeError, - "compare not supported for type"); - goto fail; - } - - if (side == NPY_SEARCHLEFT) { - NPY_BEGIN_THREADS_DESCR(ap2->descr); - local_search_left(ap1, ap2, ret); - NPY_END_THREADS_DESCR(ap2->descr); - } - else if (side == NPY_SEARCHRIGHT) { - NPY_BEGIN_THREADS_DESCR(ap2->descr); - local_search_right(ap1, ap2, ret); - NPY_END_THREADS_DESCR(ap2->descr); - } - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - -/*NUMPY_API - * Diagonal - */ -NPY_NO_EXPORT PyObject * -PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) -{ - int n = self->nd; - PyObject *new; - PyArray_Dims newaxes; - intp dims[MAX_DIMS]; - int i, pos; - - newaxes.ptr = dims; - if (n < 2) { - PyErr_SetString(PyExc_ValueError, - "array.ndim must be >= 2"); - return NULL; - } - if (axis1 < 0) { - axis1 += n; - } - if (axis2 < 0) { - axis2 += n; - } - if ((axis1 == axis2) || (axis1 < 0) || (axis1 >= n) || - (axis2 < 0) || (axis2 >= n)) { - PyErr_Format(PyExc_ValueError, "axis1(=%d) and axis2(=%d) "\ - "must be different and within range (nd=%d)", - axis1, axis2, n); - return NULL; - } - - newaxes.len = n; - /* insert at the end */ - newaxes.ptr[n-2] = axis1; - newaxes.ptr[n-1] = axis2; - pos = 0; - for (i = 0; i < n; i++) { - if ((i==axis1) || (i==axis2)) { - continue; - } - newaxes.ptr[pos++] = i; - } - new = PyArray_Transpose(self, &newaxes); - if (new == NULL) { - return NULL; - } - self = (PyAO *)new; - - if (n == 2) { - PyObject *a = NULL, *indices= NULL, *ret = NULL; - intp n1, n2, start, stop, step, count; - intp *dptr; - - n1 = self->dimensions[0]; - n2 = self->dimensions[1]; - step = n2 + 1; - if (offset < 0) { - start = -n2 * offset; - stop = MIN(n2, n1+offset)*(n2+1) - n2*offset; - } - else { - start = offset; - stop = MIN(n1, n2-offset)*(n2+1) + offset; - } - - /* count = ceil((stop-start)/step) */ - count = ((stop-start) / step) + (((stop-start) % step) != 0); - indices = PyArray_New(&PyArray_Type, 1, &count, - PyArray_INTP, NULL, NULL, 0, 0, NULL); - if (indices == NULL) { - Py_DECREF(self); - return NULL; - } - dptr = (intp *)PyArray_DATA(indices); - for (n1 = start; n1 < stop; n1 += step) { - *dptr++ = n1; - } - a = PyArray_IterNew((PyObject *)self); - Py_DECREF(self); - if (a == NULL) { - Py_DECREF(indices); - return NULL; - } - ret = PyObject_GetItem(a, indices); - Py_DECREF(a); - Py_DECREF(indices); - return ret; - } - - else { - /* - * my_diagonal = [] - * for i in range (s [0]) : - * my_diagonal.append (diagonal (a [i], offset)) - * return array (my_diagonal) - */ - PyObject *mydiagonal = NULL, *ret = NULL, *sel = NULL; - intp n1; - int res; - PyArray_Descr *typecode; - - new = NULL; - - typecode = self->descr; - mydiagonal = PyList_New(0); - if (mydiagonal == NULL) { - Py_DECREF(self); - return NULL; - } - n1 = self->dimensions[0]; - for (i = 0; i < n1; i++) { - new = PyInt_FromLong((long) i); - sel = PyArray_EnsureAnyArray(PyObject_GetItem((PyObject *)self, new)); - Py_DECREF(new); - if (sel == NULL) { - Py_DECREF(self); - Py_DECREF(mydiagonal); - return NULL; - } - new = PyArray_Diagonal((PyAO *)sel, offset, n-3, n-2); - Py_DECREF(sel); - if (new == NULL) { - Py_DECREF(self); - Py_DECREF(mydiagonal); - return NULL; - } - res = PyList_Append(mydiagonal, new); - Py_DECREF(new); - if (res < 0) { - Py_DECREF(self); - Py_DECREF(mydiagonal); - return NULL; - } - } - Py_DECREF(self); - Py_INCREF(typecode); - ret = PyArray_FromAny(mydiagonal, typecode, 0, 0, 0, NULL); - Py_DECREF(mydiagonal); - return ret; - } -} - -/*NUMPY_API - * Compress - */ -NPY_NO_EXPORT PyObject * -PyArray_Compress(PyArrayObject *self, PyObject *condition, int axis, - PyArrayObject *out) -{ - PyArrayObject *cond; - PyObject *res, *ret; - - cond = (PyAO *)PyArray_FROM_O(condition); - if (cond == NULL) { - return NULL; - } - if (cond->nd != 1) { - Py_DECREF(cond); - PyErr_SetString(PyExc_ValueError, - "condition must be 1-d array"); - return NULL; - } - - res = PyArray_Nonzero(cond); - Py_DECREF(cond); - if (res == NULL) { - return res; - } - ret = PyArray_TakeFrom(self, PyTuple_GET_ITEM(res, 0), axis, - out, NPY_RAISE); - Py_DECREF(res); - return ret; -} - -/*NUMPY_API - * Counts the number of non-zero elements in the array - * - * Returns -1 on error. - */ -NPY_NO_EXPORT npy_intp -PyArray_CountNonzero(PyArrayObject *self) -{ - PyArray_NonzeroFunc *nonzero = self->descr->f->nonzero; - char *data; - npy_intp stride, count; - npy_intp nonzero_count = 0; - - NpyIter *iter; - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp *strideptr, *innersizeptr; - - /* If it's a trivial one-dimensional loop, don't use an iterator */ - if (PyArray_TRIVIALLY_ITERABLE(self)) { - PyArray_PREPARE_TRIVIAL_ITERATION(self, count, data, stride); - - while (count--) { - if (nonzero(data, self)) { - ++nonzero_count; - } - data += stride; - } - - return nonzero_count; - } - - /* - * If the array has size zero, return zero (the iterator rejects - * size zero arrays) - */ - if (PyArray_SIZE(self) == 0) { - return 0; - } - - /* Otherwise create and use an iterator to count the nonzeros */ - iter = NpyIter_New(self, NPY_ITER_READONLY| - NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_REFS_OK, - NPY_KEEPORDER, NPY_NO_CASTING, - NULL); - if (iter == NULL) { - return -1; - } - - /* Get the pointers for inner loop iteration */ - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(iter); - return -1; - } - dataptr = NpyIter_GetDataPtrArray(iter); - strideptr = NpyIter_GetInnerStrideArray(iter); - innersizeptr = NpyIter_GetInnerLoopSizePtr(iter); - - /* Iterate over all the elements to count the nonzeros */ - do { - data = *dataptr; - stride = *strideptr; - count = *innersizeptr; - - while (count--) { - if (nonzero(data, self)) { - ++nonzero_count; - } - data += stride; - } - - } while(iternext(iter)); - - NpyIter_Deallocate(iter); - - return nonzero_count; -} - -/*NUMPY_API - * Nonzero - * - * TODO: In NumPy 2.0, should make the iteration order a parameter. - */ -NPY_NO_EXPORT PyObject * -PyArray_Nonzero(PyArrayObject *self) -{ - int i, ndim = PyArray_NDIM(self); - PyArrayObject *ret = NULL; - PyObject *ret_tuple; - npy_intp ret_dims[2]; - PyArray_NonzeroFunc *nonzero = self->descr->f->nonzero; - char *data; - npy_intp stride, count; - npy_intp nonzero_count = PyArray_CountNonzero(self); - npy_intp *multi_index; - - NpyIter *iter; - NpyIter_IterNextFunc *iternext; - NpyIter_GetMultiIndexFunc *get_multi_index; - char **dataptr; - - /* Allocate the result as a 2D array */ - ret_dims[0] = nonzero_count; - ret_dims[1] = (ndim == 0) ? 1 : ndim; - ret = (PyArrayObject *)PyArray_New(&PyArray_Type, 2, ret_dims, - NPY_INTP, NULL, NULL, 0, 0, - NULL); - if (ret == NULL) { - return NULL; - } - - /* If it's a one-dimensional result, don't use an iterator */ - if (ndim <= 1) { - npy_intp j; - - multi_index = (npy_intp *)PyArray_DATA(ret); - data = PyArray_BYTES(self); - stride = (ndim == 0) ? 0 : PyArray_STRIDE(self, 0); - count = (ndim == 0) ? 1 : PyArray_DIM(self, 0); - - for (j = 0; j < count; ++j) { - if (nonzero(data, self)) { - *multi_index++ = j; - } - data += stride; - } - - goto finish; - } - - /* Build an iterator tracking a multi-index, in C order */ - iter = NpyIter_New(self, NPY_ITER_READONLY| - NPY_ITER_MULTI_INDEX| - NPY_ITER_ZEROSIZE_OK| - NPY_ITER_REFS_OK, - NPY_CORDER, NPY_NO_CASTING, - NULL); - - if (iter == NULL) { - Py_DECREF(ret); - return NULL; - } - - if (NpyIter_GetIterSize(iter) != 0) { - /* Get the pointers for inner loop iteration */ - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(iter); - Py_DECREF(ret); - return NULL; - } - get_multi_index = NpyIter_GetGetMultiIndex(iter, NULL); - if (get_multi_index == NULL) { - NpyIter_Deallocate(iter); - Py_DECREF(ret); - return NULL; - } - dataptr = NpyIter_GetDataPtrArray(iter); - - multi_index = (npy_intp *)PyArray_DATA(ret); - - /* Get the multi-index for each non-zero element */ - do { - if (nonzero(*dataptr, self)) { - get_multi_index(iter, multi_index); - multi_index += ndim; - } - } while(iternext(iter)); - } - - NpyIter_Deallocate(iter); - -finish: - /* Treat zero-dimensional as shape (1,) */ - if (ndim == 0) { - ndim = 1; - } - - ret_tuple = PyTuple_New(ndim); - if (ret_tuple == NULL) { - Py_DECREF(ret); - return NULL; - } - - /* Create views into ret, one for each dimension */ - if (ndim == 1) { - /* Directly switch to one dimensions (dimension 1 is 1 anyway) */ - ret->nd = 1; - PyTuple_SET_ITEM(ret_tuple, 0, (PyObject *)ret); - } - else { - for (i = 0; i < ndim; ++i) { - PyArrayObject *view; - stride = ndim*NPY_SIZEOF_INTP; - - view = (PyArrayObject *)PyArray_New(Py_TYPE(self), 1, - &nonzero_count, - NPY_INTP, &stride, - PyArray_BYTES(ret) + i*NPY_SIZEOF_INTP, - 0, 0, (PyObject *)self); - if (view == NULL) { - Py_DECREF(ret); - Py_DECREF(ret_tuple); - return NULL; - } - Py_INCREF(ret); - view->base = (PyObject *)ret; - PyTuple_SET_ITEM(ret_tuple, i, (PyObject *)view); - } - - Py_DECREF(ret); - } - - return ret_tuple; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/iterators.c b/numpy-1.6.2/numpy/core/src/multiarray/iterators.c deleted file mode 100644 index 0960b152a1..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/iterators.c +++ /dev/null @@ -1,2123 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "arrayobject.h" -#include "iterators.h" -#include "ctors.h" -#include "common.h" - -#define PseudoIndex -1 -#define RubberIndex -2 -#define SingleIndex -3 - -NPY_NO_EXPORT npy_intp -parse_subindex(PyObject *op, npy_intp *step_size, npy_intp *n_steps, npy_intp max) -{ - npy_intp index; - - if (op == Py_None) { - *n_steps = PseudoIndex; - index = 0; - } - else if (op == Py_Ellipsis) { - *n_steps = RubberIndex; - index = 0; - } - else if (PySlice_Check(op)) { - npy_intp stop; - if (slice_GetIndices((PySliceObject *)op, max, - &index, &stop, step_size, n_steps) < 0) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_IndexError, - "invalid slice"); - } - goto fail; - } - if (*n_steps <= 0) { - *n_steps = 0; - *step_size = 1; - index = 0; - } - } - else { - index = PyArray_PyIntAsIntp(op); - if (error_converting(index)) { - PyErr_SetString(PyExc_IndexError, - "each subindex must be either a "\ - "slice, an integer, Ellipsis, or "\ - "newaxis"); - goto fail; - } - *n_steps = SingleIndex; - *step_size = 0; - if (index < 0) { - index += max; - } - if (index >= max || index < 0) { - PyErr_SetString(PyExc_IndexError, "invalid index"); - goto fail; - } - } - return index; - - fail: - return -1; -} - - -NPY_NO_EXPORT int -parse_index(PyArrayObject *self, PyObject *op, - npy_intp *dimensions, npy_intp *strides, npy_intp *offset_ptr) -{ - int i, j, n; - int nd_old, nd_new, n_add, n_pseudo; - npy_intp n_steps, start, offset, step_size; - PyObject *op1 = NULL; - int is_slice; - - if (PySlice_Check(op) || op == Py_Ellipsis || op == Py_None) { - n = 1; - op1 = op; - Py_INCREF(op); - /* this relies on the fact that n==1 for loop below */ - is_slice = 1; - } - else { - if (!PySequence_Check(op)) { - PyErr_SetString(PyExc_IndexError, - "index must be either an int "\ - "or a sequence"); - return -1; - } - n = PySequence_Length(op); - is_slice = 0; - } - - nd_old = nd_new = 0; - - offset = 0; - for (i = 0; i < n; i++) { - if (!is_slice) { - if (!(op1=PySequence_GetItem(op, i))) { - PyErr_SetString(PyExc_IndexError, - "invalid index"); - return -1; - } - } - start = parse_subindex(op1, &step_size, &n_steps, - nd_old < self->nd ? - self->dimensions[nd_old] : 0); - Py_DECREF(op1); - if (start == -1) { - break; - } - if (n_steps == PseudoIndex) { - dimensions[nd_new] = 1; strides[nd_new] = 0; - nd_new++; - } - else { - if (n_steps == RubberIndex) { - for (j = i + 1, n_pseudo = 0; j < n; j++) { - op1 = PySequence_GetItem(op, j); - if (op1 == Py_None) { - n_pseudo++; - } - Py_DECREF(op1); - } - n_add = self->nd-(n-i-n_pseudo-1+nd_old); - if (n_add < 0) { - PyErr_SetString(PyExc_IndexError, - "too many indices"); - return -1; - } - for (j = 0; j < n_add; j++) { - dimensions[nd_new] = \ - self->dimensions[nd_old]; - strides[nd_new] = \ - self->strides[nd_old]; - nd_new++; nd_old++; - } - } - else { - if (nd_old >= self->nd) { - PyErr_SetString(PyExc_IndexError, - "too many indices"); - return -1; - } - offset += self->strides[nd_old]*start; - nd_old++; - if (n_steps != SingleIndex) { - dimensions[nd_new] = n_steps; - strides[nd_new] = step_size * \ - self->strides[nd_old-1]; - nd_new++; - } - } - } - } - if (i < n) { - return -1; - } - n_add = self->nd-nd_old; - for (j = 0; j < n_add; j++) { - dimensions[nd_new] = self->dimensions[nd_old]; - strides[nd_new] = self->strides[nd_old]; - nd_new++; - nd_old++; - } - *offset_ptr = offset; - return nd_new; -} - -static int -slice_coerce_index(PyObject *o, npy_intp *v) -{ - *v = PyArray_PyIntAsIntp(o); - if (error_converting(*v)) { - PyErr_Clear(); - return 0; - } - return 1; -} - -/* This is basically PySlice_GetIndicesEx, but with our coercion - * of indices to integers (plus, that function is new in Python 2.3) */ -NPY_NO_EXPORT int -slice_GetIndices(PySliceObject *r, npy_intp length, - npy_intp *start, npy_intp *stop, npy_intp *step, - npy_intp *slicelength) -{ - npy_intp defstop; - - if (r->step == Py_None) { - *step = 1; - } - else { - if (!slice_coerce_index(r->step, step)) { - return -1; - } - if (*step == 0) { - PyErr_SetString(PyExc_ValueError, - "slice step cannot be zero"); - return -1; - } - } - /* defstart = *step < 0 ? length - 1 : 0; */ - defstop = *step < 0 ? -1 : length; - if (r->start == Py_None) { - *start = *step < 0 ? length-1 : 0; - } - else { - if (!slice_coerce_index(r->start, start)) { - return -1; - } - if (*start < 0) { - *start += length; - } - if (*start < 0) { - *start = (*step < 0) ? -1 : 0; - } - if (*start >= length) { - *start = (*step < 0) ? length - 1 : length; - } - } - - if (r->stop == Py_None) { - *stop = defstop; - } - else { - if (!slice_coerce_index(r->stop, stop)) { - return -1; - } - if (*stop < 0) { - *stop += length; - } - if (*stop < 0) { - *stop = -1; - } - if (*stop > length) { - *stop = length; - } - } - - if ((*step < 0 && *stop >= *start) || - (*step > 0 && *start >= *stop)) { - *slicelength = 0; - } - else if (*step < 0) { - *slicelength = (*stop - *start + 1) / (*step) + 1; - } - else { - *slicelength = (*stop - *start - 1) / (*step) + 1; - } - - return 0; -} - -/*********************** Element-wise Array Iterator ***********************/ -/* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ -/* and Python's array iterator ***/ - -/* get the dataptr from its current coordinates for simple iterator */ -static char* -get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates) -{ - npy_intp i; - char *ret; - - ret = iter->ao->data; - - for(i = 0; i < iter->ao->nd; ++i) { - ret += coordinates[i] * iter->strides[i]; - } - - return ret; -} - -/* - * This is common initialization code between PyArrayIterObject and - * PyArrayNeighborhoodIterObject - * - * Increase ao refcount - */ -static PyObject * -array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao) -{ - int nd, i; - - nd = ao->nd; - PyArray_UpdateFlags(ao, CONTIGUOUS); - if (PyArray_ISCONTIGUOUS(ao)) { - it->contiguous = 1; - } - else { - it->contiguous = 0; - } - Py_INCREF(ao); - it->ao = ao; - it->size = PyArray_SIZE(ao); - it->nd_m1 = nd - 1; - it->factors[nd-1] = 1; - for (i = 0; i < nd; i++) { - it->dims_m1[i] = ao->dimensions[i] - 1; - it->strides[i] = ao->strides[i]; - it->backstrides[i] = it->strides[i] * it->dims_m1[i]; - if (i > 0) { - it->factors[nd-i-1] = it->factors[nd-i] * ao->dimensions[nd-i]; - } - it->bounds[i][0] = 0; - it->bounds[i][1] = ao->dimensions[i] - 1; - it->limits[i][0] = 0; - it->limits[i][1] = ao->dimensions[i] - 1; - it->limits_sizes[i] = it->limits[i][1] - it->limits[i][0] + 1; - } - - it->translate = &get_ptr_simple; - PyArray_ITER_RESET(it); - - return (PyObject *)it; -} - -static void -array_iter_base_dealloc(PyArrayIterObject *it) -{ - Py_XDECREF(it->ao); -} - -/*NUMPY_API - * Get Iterator. - */ -NPY_NO_EXPORT PyObject * -PyArray_IterNew(PyObject *obj) -{ - PyArrayIterObject *it; - PyArrayObject *ao = (PyArrayObject *)obj; - - if (!PyArray_Check(ao)) { - PyErr_BadInternalCall(); - return NULL; - } - - it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); - PyObject_Init((PyObject *)it, &PyArrayIter_Type); - /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ - if (it == NULL) { - return NULL; - } - - array_iter_base_init(it, ao); - return (PyObject *)it; -} - -/*NUMPY_API - * Get Iterator broadcast to a particular shape - */ -NPY_NO_EXPORT PyObject * -PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd) -{ - PyArrayIterObject *it; - int i, diff, j, compat, k; - PyArrayObject *ao = (PyArrayObject *)obj; - - if (ao->nd > nd) { - goto err; - } - compat = 1; - diff = j = nd - ao->nd; - for (i = 0; i < ao->nd; i++, j++) { - if (ao->dimensions[i] == 1) { - continue; - } - if (ao->dimensions[i] != dims[j]) { - compat = 0; - break; - } - } - if (!compat) { - goto err; - } - it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); - PyObject_Init((PyObject *)it, &PyArrayIter_Type); - - if (it == NULL) { - return NULL; - } - PyArray_UpdateFlags(ao, CONTIGUOUS); - if (PyArray_ISCONTIGUOUS(ao)) { - it->contiguous = 1; - } - else { - it->contiguous = 0; - } - Py_INCREF(ao); - it->ao = ao; - it->size = PyArray_MultiplyList(dims, nd); - it->nd_m1 = nd - 1; - it->factors[nd-1] = 1; - for (i = 0; i < nd; i++) { - it->dims_m1[i] = dims[i] - 1; - k = i - diff; - if ((k < 0) || ao->dimensions[k] != dims[i]) { - it->contiguous = 0; - it->strides[i] = 0; - } - else { - it->strides[i] = ao->strides[k]; - } - it->backstrides[i] = it->strides[i] * it->dims_m1[i]; - if (i > 0) { - it->factors[nd-i-1] = it->factors[nd-i] * dims[nd-i]; - } - } - PyArray_ITER_RESET(it); - return (PyObject *)it; - - err: - PyErr_SetString(PyExc_ValueError, "array is not broadcastable to "\ - "correct shape"); - return NULL; -} - - - - - -/*NUMPY_API - * Get Iterator that iterates over all but one axis (don't use this with - * PyArray_ITER_GOTO1D). The axis will be over-written if negative - * with the axis having the smallest stride. - */ -NPY_NO_EXPORT PyObject * -PyArray_IterAllButAxis(PyObject *obj, int *inaxis) -{ - PyArrayIterObject *it; - int axis; - it = (PyArrayIterObject *)PyArray_IterNew(obj); - if (it == NULL) { - return NULL; - } - if (PyArray_NDIM(obj)==0) { - return (PyObject *)it; - } - if (*inaxis < 0) { - int i, minaxis = 0; - npy_intp minstride = 0; - i = 0; - while (minstride == 0 && i < PyArray_NDIM(obj)) { - minstride = PyArray_STRIDE(obj,i); - i++; - } - for (i = 1; i < PyArray_NDIM(obj); i++) { - if (PyArray_STRIDE(obj,i) > 0 && - PyArray_STRIDE(obj, i) < minstride) { - minaxis = i; - minstride = PyArray_STRIDE(obj,i); - } - } - *inaxis = minaxis; - } - axis = *inaxis; - /* adjust so that will not iterate over axis */ - it->contiguous = 0; - if (it->size != 0) { - it->size /= PyArray_DIM(obj,axis); - } - it->dims_m1[axis] = 0; - it->backstrides[axis] = 0; - - /* - * (won't fix factors so don't use - * PyArray_ITER_GOTO1D with this iterator) - */ - return (PyObject *)it; -} - -/*NUMPY_API - * Adjusts previously broadcasted iterators so that the axis with - * the smallest sum of iterator strides is not iterated over. - * Returns dimension which is smallest in the range [0,multi->nd). - * A -1 is returned if multi->nd == 0. - * - * don't use with PyArray_ITER_GOTO1D because factors are not adjusted - */ -NPY_NO_EXPORT int -PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) -{ - PyArrayIterObject *it; - int i, j; - int axis; - npy_intp smallest; - npy_intp sumstrides[NPY_MAXDIMS]; - - if (multi->nd == 0) { - return -1; - } - for (i = 0; i < multi->nd; i++) { - sumstrides[i] = 0; - for (j = 0; j < multi->numiter; j++) { - sumstrides[i] += multi->iters[j]->strides[i]; - } - } - axis = 0; - smallest = sumstrides[0]; - /* Find longest dimension */ - for (i = 1; i < multi->nd; i++) { - if (sumstrides[i] < smallest) { - axis = i; - smallest = sumstrides[i]; - } - } - for(i = 0; i < multi->numiter; i++) { - it = multi->iters[i]; - it->contiguous = 0; - if (it->size != 0) { - it->size /= (it->dims_m1[axis]+1); - } - it->dims_m1[axis] = 0; - it->backstrides[axis] = 0; - } - multi->size = multi->iters[0]->size; - return axis; -} - -/* Returns an array scalar holding the element desired */ - -static PyObject * -arrayiter_next(PyArrayIterObject *it) -{ - PyObject *ret; - - if (it->index < it->size) { - ret = PyArray_ToScalar(it->dataptr, it->ao); - PyArray_ITER_NEXT(it); - return ret; - } - return NULL; -} - -static void -arrayiter_dealloc(PyArrayIterObject *it) -{ - array_iter_base_dealloc(it); - _pya_free(it); -} - -static Py_ssize_t -iter_length(PyArrayIterObject *self) -{ - return self->size; -} - - -static PyObject * -iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind) -{ - npy_intp index, strides; - int itemsize; - npy_intp count = 0; - char *dptr, *optr; - PyObject *r; - int swap; - PyArray_CopySwapFunc *copyswap; - - - if (ind->nd != 1) { - PyErr_SetString(PyExc_ValueError, - "boolean index array should have 1 dimension"); - return NULL; - } - index = ind->dimensions[0]; - if (index > self->size) { - PyErr_SetString(PyExc_ValueError, - "too many boolean indices"); - return NULL; - } - - strides = ind->strides[0]; - dptr = ind->data; - /* Get size of return array */ - while (index--) { - if (*((Bool *)dptr) != 0) { - count++; - } - dptr += strides; - } - itemsize = self->ao->descr->elsize; - Py_INCREF(self->ao->descr); - r = PyArray_NewFromDescr(Py_TYPE(self->ao), - self->ao->descr, 1, &count, - NULL, NULL, - 0, (PyObject *)self->ao); - if (r == NULL) { - return NULL; - } - /* Set up loop */ - optr = PyArray_DATA(r); - index = ind->dimensions[0]; - dptr = ind->data; - copyswap = self->ao->descr->f->copyswap; - /* Loop over Boolean array */ - swap = (PyArray_ISNOTSWAPPED(self->ao) != PyArray_ISNOTSWAPPED(r)); - while (index--) { - if (*((Bool *)dptr) != 0) { - copyswap(optr, self->dataptr, swap, self->ao); - optr += itemsize; - } - dptr += strides; - PyArray_ITER_NEXT(self); - } - PyArray_ITER_RESET(self); - return r; -} - -static PyObject * -iter_subscript_int(PyArrayIterObject *self, PyArrayObject *ind) -{ - npy_intp num; - PyObject *r; - PyArrayIterObject *ind_it; - int itemsize; - int swap; - char *optr; - npy_intp index; - PyArray_CopySwapFunc *copyswap; - - itemsize = self->ao->descr->elsize; - if (ind->nd == 0) { - num = *((npy_intp *)ind->data); - if (num < 0) { - num += self->size; - } - if (num < 0 || num >= self->size) { - PyErr_Format(PyExc_IndexError, - "index %"INTP_FMT" out of bounds" \ - " 0<=index<%"INTP_FMT, - num, self->size); - r = NULL; - } - else { - PyArray_ITER_GOTO1D(self, num); - r = PyArray_ToScalar(self->dataptr, self->ao); - } - PyArray_ITER_RESET(self); - return r; - } - - Py_INCREF(self->ao->descr); - r = PyArray_NewFromDescr(Py_TYPE(self->ao), self->ao->descr, - ind->nd, ind->dimensions, - NULL, NULL, - 0, (PyObject *)self->ao); - if (r == NULL) { - return NULL; - } - optr = PyArray_DATA(r); - ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) { - Py_DECREF(r); - return NULL; - } - index = ind_it->size; - copyswap = PyArray_DESCR(r)->f->copyswap; - swap = (PyArray_ISNOTSWAPPED(r) != PyArray_ISNOTSWAPPED(self->ao)); - while (index--) { - num = *((npy_intp *)(ind_it->dataptr)); - if (num < 0) { - num += self->size; - } - if (num < 0 || num >= self->size) { - PyErr_Format(PyExc_IndexError, - "index %"INTP_FMT" out of bounds" \ - " 0<=index<%"INTP_FMT, - num, self->size); - Py_DECREF(ind_it); - Py_DECREF(r); - PyArray_ITER_RESET(self); - return NULL; - } - PyArray_ITER_GOTO1D(self, num); - copyswap(optr, self->dataptr, swap, r); - optr += itemsize; - PyArray_ITER_NEXT(ind_it); - } - Py_DECREF(ind_it); - PyArray_ITER_RESET(self); - return r; -} - -/* Always returns arrays */ -NPY_NO_EXPORT PyObject * -iter_subscript(PyArrayIterObject *self, PyObject *ind) -{ - PyArray_Descr *indtype = NULL; - npy_intp start, step_size; - npy_intp n_steps; - PyObject *r; - char *dptr; - int size; - PyObject *obj = NULL; - PyArray_CopySwapFunc *copyswap; - - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - obj = iter_subscript(self, ind); - Py_DECREF(ind); - return obj; - } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { - goto fail; - } - if (len == 0) { - Py_INCREF(self->ao); - return (PyObject *)self->ao; - } - ind = PyTuple_GET_ITEM(ind, 0); - } - - /* - * Tuples >1d not accepted --- i.e. no newaxis - * Could implement this with adjusted strides and dimensions in iterator - * Check for Boolean -- this is first becasue Bool is a subclass of Int - */ - PyArray_ITER_RESET(self); - - if (PyBool_Check(ind)) { - if (PyObject_IsTrue(ind)) { - return PyArray_ToScalar(self->dataptr, self->ao); - } - else { /* empty array */ - npy_intp ii = 0; - Py_INCREF(self->ao->descr); - r = PyArray_NewFromDescr(Py_TYPE(self->ao), - self->ao->descr, - 1, &ii, - NULL, NULL, 0, - (PyObject *)self->ao); - return r; - } - } - - /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PyInt_Check(ind) || PySlice_Check(ind)) { - start = parse_subindex(ind, &step_size, &n_steps, - self->size); - if (start == -1) { - goto fail; - } - if (n_steps == RubberIndex || n_steps == PseudoIndex) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto fail; - } - PyArray_ITER_GOTO1D(self, start) - if (n_steps == SingleIndex) { /* Integer */ - r = PyArray_ToScalar(self->dataptr, self->ao); - PyArray_ITER_RESET(self); - return r; - } - size = self->ao->descr->elsize; - Py_INCREF(self->ao->descr); - r = PyArray_NewFromDescr(Py_TYPE(self->ao), - self->ao->descr, - 1, &n_steps, - NULL, NULL, - 0, (PyObject *)self->ao); - if (r == NULL) { - goto fail; - } - dptr = PyArray_DATA(r); - copyswap = PyArray_DESCR(r)->f->copyswap; - while (n_steps--) { - copyswap(dptr, self->dataptr, 0, r); - start += step_size; - PyArray_ITER_GOTO1D(self, start) - dptr += size; - } - PyArray_ITER_RESET(self); - return r; - } - - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(PyArray_INTP); - if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST, NULL); - if (obj == NULL) { - goto fail; - } - } - else { - Py_INCREF(ind); - obj = ind; - } - - if (PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE(obj)==PyArray_BOOL) { - r = iter_subscript_Bool(self, (PyArrayObject *)obj); - Py_DECREF(indtype); - } - /* Check for integer array */ - else if (PyArray_ISINTEGER(obj)) { - PyObject *new; - new = PyArray_FromAny(obj, indtype, 0, 0, - FORCECAST | ALIGNED, NULL); - if (new == NULL) { - goto fail; - } - Py_DECREF(obj); - obj = new; - r = iter_subscript_int(self, (PyArrayObject *)obj); - } - else { - goto fail; - } - Py_DECREF(obj); - return r; - } - else { - Py_DECREF(indtype); - } - - - fail: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); - } - Py_XDECREF(indtype); - Py_XDECREF(obj); - return NULL; - -} - - -static int -iter_ass_sub_Bool(PyArrayIterObject *self, PyArrayObject *ind, - PyArrayIterObject *val, int swap) -{ - npy_intp index, strides; - char *dptr; - PyArray_CopySwapFunc *copyswap; - - if (ind->nd != 1) { - PyErr_SetString(PyExc_ValueError, - "boolean index array should have 1 dimension"); - return -1; - } - - index = ind->dimensions[0]; - if (index > self->size) { - PyErr_SetString(PyExc_ValueError, - "boolean index array has too many values"); - return -1; - } - - strides = ind->strides[0]; - dptr = ind->data; - PyArray_ITER_RESET(self); - /* Loop over Boolean array */ - copyswap = self->ao->descr->f->copyswap; - while (index--) { - if (*((Bool *)dptr) != 0) { - copyswap(self->dataptr, val->dataptr, swap, self->ao); - PyArray_ITER_NEXT(val); - if (val->index == val->size) { - PyArray_ITER_RESET(val); - } - } - dptr += strides; - PyArray_ITER_NEXT(self); - } - PyArray_ITER_RESET(self); - return 0; -} - -static int -iter_ass_sub_int(PyArrayIterObject *self, PyArrayObject *ind, - PyArrayIterObject *val, int swap) -{ - npy_intp num; - PyArrayIterObject *ind_it; - npy_intp index; - PyArray_CopySwapFunc *copyswap; - - copyswap = self->ao->descr->f->copyswap; - if (ind->nd == 0) { - num = *((npy_intp *)ind->data); - PyArray_ITER_GOTO1D(self, num); - copyswap(self->dataptr, val->dataptr, swap, self->ao); - return 0; - } - ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) { - return -1; - } - index = ind_it->size; - while (index--) { - num = *((npy_intp *)(ind_it->dataptr)); - if (num < 0) { - num += self->size; - } - if ((num < 0) || (num >= self->size)) { - PyErr_Format(PyExc_IndexError, - "index %"INTP_FMT" out of bounds" \ - " 0<=index<%"INTP_FMT, num, - self->size); - Py_DECREF(ind_it); - return -1; - } - PyArray_ITER_GOTO1D(self, num); - copyswap(self->dataptr, val->dataptr, swap, self->ao); - PyArray_ITER_NEXT(ind_it); - PyArray_ITER_NEXT(val); - if (val->index == val->size) { - PyArray_ITER_RESET(val); - } - } - Py_DECREF(ind_it); - return 0; -} - -NPY_NO_EXPORT int -iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) -{ - PyObject *arrval = NULL; - PyArrayIterObject *val_it = NULL; - PyArray_Descr *type; - PyArray_Descr *indtype = NULL; - int swap, retval = -1; - npy_intp start, step_size; - npy_intp n_steps; - PyObject *obj = NULL; - PyArray_CopySwapFunc *copyswap; - - - if (val == NULL) { - PyErr_SetString(PyExc_TypeError, - "Cannot delete iterator elements"); - return -1; - } - - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - retval = iter_ass_subscript(self, ind, val); - Py_DECREF(ind); - return retval; - } - - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { - goto finish; - } - ind = PyTuple_GET_ITEM(ind, 0); - } - - type = self->ao->descr; - - /* - * Check for Boolean -- this is first becasue - * Bool is a subclass of Int - */ - if (PyBool_Check(ind)) { - retval = 0; - if (PyObject_IsTrue(ind)) { - retval = type->f->setitem(val, self->dataptr, self->ao); - } - goto finish; - } - - if (PySequence_Check(ind) || PySlice_Check(ind)) { - goto skip; - } - start = PyArray_PyIntAsIntp(ind); - if (start==-1 && PyErr_Occurred()) { - PyErr_Clear(); - } - else { - if (start < -self->size || start >= self->size) { - PyErr_Format(PyExc_ValueError, - "index (%" NPY_INTP_FMT \ - ") out of range", start); - goto finish; - } - retval = 0; - PyArray_ITER_GOTO1D(self, start); - retval = type->f->setitem(val, self->dataptr, self->ao); - PyArray_ITER_RESET(self); - if (retval < 0) { - PyErr_SetString(PyExc_ValueError, - "Error setting single item of array."); - } - goto finish; - } - - skip: - Py_INCREF(type); - arrval = PyArray_FromAny(val, type, 0, 0, 0, NULL); - if (arrval == NULL) { - return -1; - } - val_it = (PyArrayIterObject *)PyArray_IterNew(arrval); - if (val_it == NULL) { - goto finish; - } - if (val_it->size == 0) { - retval = 0; - goto finish; - } - - copyswap = PyArray_DESCR(arrval)->f->copyswap; - swap = (PyArray_ISNOTSWAPPED(self->ao)!=PyArray_ISNOTSWAPPED(arrval)); - - /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_subindex(ind, &step_size, &n_steps, self->size); - if (start == -1) { - goto finish; - } - if (n_steps == RubberIndex || n_steps == PseudoIndex) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto finish; - } - PyArray_ITER_GOTO1D(self, start); - if (n_steps == SingleIndex) { - /* Integer */ - copyswap(self->dataptr, PyArray_DATA(arrval), swap, arrval); - PyArray_ITER_RESET(self); - retval = 0; - goto finish; - } - while (n_steps--) { - copyswap(self->dataptr, val_it->dataptr, swap, arrval); - start += step_size; - PyArray_ITER_GOTO1D(self, start); - PyArray_ITER_NEXT(val_it); - if (val_it->index == val_it->size) { - PyArray_ITER_RESET(val_it); - } - } - PyArray_ITER_RESET(self); - retval = 0; - goto finish; - } - - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(PyArray_INTP); - if (PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST, NULL); - } - else { - Py_INCREF(ind); - obj = ind; - } - - if (obj != NULL && PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE(obj)==PyArray_BOOL) { - if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, swap) < 0) { - goto finish; - } - retval=0; - } - /* Check for integer array */ - else if (PyArray_ISINTEGER(obj)) { - PyObject *new; - Py_INCREF(indtype); - new = PyArray_CheckFromAny(obj, indtype, 0, 0, - FORCECAST | BEHAVED_NS, NULL); - Py_DECREF(obj); - obj = new; - if (new == NULL) { - goto finish; - } - if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, swap) < 0) { - goto finish; - } - retval = 0; - } - } - - finish: - if (!PyErr_Occurred() && retval < 0) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); - } - Py_XDECREF(indtype); - Py_XDECREF(obj); - Py_XDECREF(val_it); - Py_XDECREF(arrval); - return retval; - -} - - -static PyMappingMethods iter_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)iter_length, /*mp_length*/ -#else - (inquiry)iter_length, /*mp_length*/ -#endif - (binaryfunc)iter_subscript, /*mp_subscript*/ - (objobjargproc)iter_ass_subscript, /*mp_ass_subscript*/ -}; - - - -static PyObject * -iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op)) -{ - - PyObject *r; - npy_intp size; - - /* Any argument ignored */ - - /* Two options: - * 1) underlying array is contiguous - * -- return 1-d wrapper around it - * 2) underlying array is not contiguous - * -- make new 1-d contiguous array with updateifcopy flag set - * to copy back to the old array - */ - size = PyArray_SIZE(it->ao); - Py_INCREF(it->ao->descr); - if (PyArray_ISCONTIGUOUS(it->ao)) { - r = PyArray_NewFromDescr(&PyArray_Type, - it->ao->descr, - 1, &size, - NULL, it->ao->data, - it->ao->flags, - (PyObject *)it->ao); - if (r == NULL) { - return NULL; - } - } - else { - r = PyArray_NewFromDescr(&PyArray_Type, - it->ao->descr, - 1, &size, - NULL, NULL, - 0, (PyObject *)it->ao); - if (r == NULL) { - return NULL; - } - if (PyArray_CopyAnyInto((PyArrayObject *)r, it->ao) < 0) { - Py_DECREF(r); - return NULL; - } - PyArray_FLAGS(r) |= UPDATEIFCOPY; - it->ao->flags &= ~WRITEABLE; - } - Py_INCREF(it->ao); - PyArray_BASE(r) = (PyObject *)it->ao; - return r; - -} - -static PyObject * -iter_copy(PyArrayIterObject *it, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - return PyArray_Flatten(it->ao, 0); -} - -static PyMethodDef iter_methods[] = { - /* to get array */ - {"__array__", - (PyCFunction)iter_array, - METH_VARARGS, NULL}, - {"copy", - (PyCFunction)iter_copy, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -static PyObject * -iter_richcompare(PyArrayIterObject *self, PyObject *other, int cmp_op) -{ - PyArrayObject *new; - PyObject *ret; - new = (PyArrayObject *)iter_array(self, NULL); - if (new == NULL) { - return NULL; - } - ret = array_richcompare(new, other, cmp_op); - Py_DECREF(new); - return ret; -} - - -static PyMemberDef iter_members[] = { - {"base", - T_OBJECT, - offsetof(PyArrayIterObject, ao), - READONLY, NULL}, - {"index", - T_INT, - offsetof(PyArrayIterObject, index), - READONLY, NULL}, - {NULL, 0, 0, 0, NULL}, -}; - -static PyObject * -iter_coords_get(PyArrayIterObject *self) -{ - int nd; - nd = self->ao->nd; - if (self->contiguous) { - /* - * coordinates not kept track of --- - * need to generate from index - */ - npy_intp val; - int i; - val = self->index; - for (i = 0; i < nd; i++) { - if (self->factors[i] != 0) { - self->coordinates[i] = val / self->factors[i]; - val = val % self->factors[i]; - } else { - self->coordinates[i] = 0; - } - } - } - return PyArray_IntTupleFromIntp(nd, self->coordinates); -} - -static PyGetSetDef iter_getsets[] = { - {"coords", - (getter)iter_coords_get, - NULL, - NULL, NULL}, - {NULL, NULL, NULL, NULL, NULL}, -}; - -NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.flatiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)arrayiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &iter_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)iter_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arrayiter_next, /* tp_iternext */ - iter_methods, /* tp_methods */ - iter_members, /* tp_members */ - iter_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - -/** END of Array Iterator **/ - -/* Adjust dimensionality and strides for index object iterators - --- i.e. broadcast -*/ -/*NUMPY_API*/ -NPY_NO_EXPORT int -PyArray_Broadcast(PyArrayMultiIterObject *mit) -{ - int i, nd, k, j; - npy_intp tmp; - PyArrayIterObject *it; - - /* Discover the broadcast number of dimensions */ - for (i = 0, nd = 0; i < mit->numiter; i++) { - nd = MAX(nd, mit->iters[i]->ao->nd); - } - mit->nd = nd; - - /* Discover the broadcast shape in each dimension */ - for (i = 0; i < nd; i++) { - mit->dimensions[i] = 1; - for (j = 0; j < mit->numiter; j++) { - it = mit->iters[j]; - /* This prepends 1 to shapes not already equal to nd */ - k = i + it->ao->nd - nd; - if (k >= 0) { - tmp = it->ao->dimensions[k]; - if (tmp == 1) { - continue; - } - if (mit->dimensions[i] == 1) { - mit->dimensions[i] = tmp; - } - else if (mit->dimensions[i] != tmp) { - PyErr_SetString(PyExc_ValueError, - "shape mismatch: objects" \ - " cannot be broadcast" \ - " to a single shape"); - return -1; - } - } - } - } - - /* - * Reset the iterator dimensions and strides of each iterator - * object -- using 0 valued strides for broadcasting - * Need to check for overflow - */ - tmp = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); - if (tmp < 0) { - PyErr_SetString(PyExc_ValueError, - "broadcast dimensions too large."); - return -1; - } - mit->size = tmp; - for (i = 0; i < mit->numiter; i++) { - it = mit->iters[i]; - it->nd_m1 = mit->nd - 1; - it->size = tmp; - nd = it->ao->nd; - it->factors[mit->nd-1] = 1; - for (j = 0; j < mit->nd; j++) { - it->dims_m1[j] = mit->dimensions[j] - 1; - k = j + nd - mit->nd; - /* - * If this dimension was added or shape of - * underlying array was 1 - */ - if ((k < 0) || - it->ao->dimensions[k] != mit->dimensions[j]) { - it->contiguous = 0; - it->strides[j] = 0; - } - else { - it->strides[j] = it->ao->strides[k]; - } - it->backstrides[j] = it->strides[j] * it->dims_m1[j]; - if (j > 0) - it->factors[mit->nd-j-1] = - it->factors[mit->nd-j] * mit->dimensions[mit->nd-j]; - } - PyArray_ITER_RESET(it); - } - return 0; -} - -/*NUMPY_API - * Get MultiIterator from array of Python objects and any additional - * - * PyObject **mps -- array of PyObjects - * int n - number of PyObjects in the array - * int nadd - number of additional arrays to include in the iterator. - * - * Returns a multi-iterator object. - */ -NPY_NO_EXPORT PyObject * -PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...) -{ - va_list va; - PyArrayMultiIterObject *multi; - PyObject *current; - PyObject *arr; - - int i, ntot, err=0; - - ntot = n + nadd; - if (ntot < 2 || ntot > NPY_MAXARGS) { - PyErr_Format(PyExc_ValueError, - "Need between 2 and (%d) " \ - "array objects (inclusive).", NPY_MAXARGS); - return NULL; - } - multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) { - return PyErr_NoMemory(); - } - PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - - for (i = 0; i < ntot; i++) { - multi->iters[i] = NULL; - } - multi->numiter = ntot; - multi->index = 0; - - va_start(va, nadd); - for (i = 0; i < ntot; i++) { - if (i < n) { - current = mps[i]; - } - else { - current = va_arg(va, PyObject *); - } - arr = PyArray_FROM_O(current); - if (arr == NULL) { - err = 1; - break; - } - else { - multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); - Py_DECREF(arr); - } - } - va_end(va); - - if (!err && PyArray_Broadcast(multi) < 0) { - err = 1; - } - if (err) { - Py_DECREF(multi); - return NULL; - } - PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; -} - -/*NUMPY_API - * Get MultiIterator, - */ -NPY_NO_EXPORT PyObject * -PyArray_MultiIterNew(int n, ...) -{ - va_list va; - PyArrayMultiIterObject *multi; - PyObject *current; - PyObject *arr; - - int i, err = 0; - - if (n < 2 || n > NPY_MAXARGS) { - PyErr_Format(PyExc_ValueError, - "Need between 2 and (%d) " \ - "array objects (inclusive).", NPY_MAXARGS); - return NULL; - } - - /* fprintf(stderr, "multi new...");*/ - - multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) { - return PyErr_NoMemory(); - } - PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - - for (i = 0; i < n; i++) { - multi->iters[i] = NULL; - } - multi->numiter = n; - multi->index = 0; - - va_start(va, n); - for (i = 0; i < n; i++) { - current = va_arg(va, PyObject *); - arr = PyArray_FROM_O(current); - if (arr == NULL) { - err = 1; - break; - } - else { - multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); - Py_DECREF(arr); - } - } - va_end(va); - - if (!err && PyArray_Broadcast(multi) < 0) { - err = 1; - } - if (err) { - Py_DECREF(multi); - return NULL; - } - PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; -} - -static PyObject * -arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds) -{ - - Py_ssize_t n, i; - PyArrayMultiIterObject *multi; - PyObject *arr; - - if (kwds != NULL) { - PyErr_SetString(PyExc_ValueError, - "keyword arguments not accepted."); - return NULL; - } - - n = PyTuple_Size(args); - if (n < 2 || n > NPY_MAXARGS) { - if (PyErr_Occurred()) { - return NULL; - } - PyErr_Format(PyExc_ValueError, - "Need at least two and fewer than (%d) " \ - "array objects.", NPY_MAXARGS); - return NULL; - } - - multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) { - return PyErr_NoMemory(); - } - PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - - multi->numiter = n; - multi->index = 0; - for (i = 0; i < n; i++) { - multi->iters[i] = NULL; - } - for (i = 0; i < n; i++) { - arr = PyArray_FromAny(PyTuple_GET_ITEM(args, i), NULL, 0, 0, 0, NULL); - if (arr == NULL) { - goto fail; - } - if ((multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr)) - == NULL) { - goto fail; - } - Py_DECREF(arr); - } - if (PyArray_Broadcast(multi) < 0) { - goto fail; - } - PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; - - fail: - Py_DECREF(multi); - return NULL; -} - -static PyObject * -arraymultiter_next(PyArrayMultiIterObject *multi) -{ - PyObject *ret; - int i, n; - - n = multi->numiter; - ret = PyTuple_New(n); - if (ret == NULL) { - return NULL; - } - if (multi->index < multi->size) { - for (i = 0; i < n; i++) { - PyArrayIterObject *it=multi->iters[i]; - PyTuple_SET_ITEM(ret, i, - PyArray_ToScalar(it->dataptr, it->ao)); - PyArray_ITER_NEXT(it); - } - multi->index++; - return ret; - } - return NULL; -} - -static void -arraymultiter_dealloc(PyArrayMultiIterObject *multi) -{ - int i; - - for (i = 0; i < multi->numiter; i++) { - Py_XDECREF(multi->iters[i]); - } - Py_TYPE(multi)->tp_free((PyObject *)multi); -} - -static PyObject * -arraymultiter_size_get(PyArrayMultiIterObject *self) -{ -#if SIZEOF_INTP <= SIZEOF_LONG - return PyInt_FromLong((long) self->size); -#else - if (self->size < MAX_LONG) { - return PyInt_FromLong((long) self->size); - } - else { - return PyLong_FromLongLong((longlong) self->size); - } -#endif -} - -static PyObject * -arraymultiter_index_get(PyArrayMultiIterObject *self) -{ -#if SIZEOF_INTP <= SIZEOF_LONG - return PyInt_FromLong((long) self->index); -#else - if (self->size < MAX_LONG) { - return PyInt_FromLong((long) self->index); - } - else { - return PyLong_FromLongLong((longlong) self->index); - } -#endif -} - -static PyObject * -arraymultiter_shape_get(PyArrayMultiIterObject *self) -{ - return PyArray_IntTupleFromIntp(self->nd, self->dimensions); -} - -static PyObject * -arraymultiter_iters_get(PyArrayMultiIterObject *self) -{ - PyObject *res; - int i, n; - - n = self->numiter; - res = PyTuple_New(n); - if (res == NULL) { - return res; - } - for (i = 0; i < n; i++) { - Py_INCREF(self->iters[i]); - PyTuple_SET_ITEM(res, i, (PyObject *)self->iters[i]); - } - return res; -} - -static PyGetSetDef arraymultiter_getsetlist[] = { - {"size", - (getter)arraymultiter_size_get, - NULL, - NULL, NULL}, - {"index", - (getter)arraymultiter_index_get, - NULL, - NULL, NULL}, - {"shape", - (getter)arraymultiter_shape_get, - NULL, - NULL, NULL}, - {"iters", - (getter)arraymultiter_iters_get, - NULL, - NULL, NULL}, - {NULL, NULL, NULL, NULL, NULL}, -}; - -static PyMemberDef arraymultiter_members[] = { - {"numiter", - T_INT, - offsetof(PyArrayMultiIterObject, numiter), - READONLY, NULL}, - {"nd", - T_INT, - offsetof(PyArrayMultiIterObject, nd), - READONLY, NULL}, - {NULL, 0, 0, 0, NULL}, -}; - -static PyObject * -arraymultiter_reset(PyArrayMultiIterObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - PyArray_MultiIter_RESET(self); - Py_INCREF(Py_None); - return Py_None; -} - -static PyMethodDef arraymultiter_methods[] = { - {"reset", - (PyCFunction) arraymultiter_reset, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL}, /* sentinal */ -}; - -NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.broadcast", /* tp_name */ - sizeof(PyArrayMultiIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)arraymultiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arraymultiter_next, /* tp_iternext */ - arraymultiter_methods, /* tp_methods */ - arraymultiter_members, /* tp_members */ - arraymultiter_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - arraymultiter_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - -/*========================= Neighborhood iterator ======================*/ - -static void neighiter_dealloc(PyArrayNeighborhoodIterObject* iter); - -static char* _set_constant(PyArrayNeighborhoodIterObject* iter, - PyArrayObject *fill) -{ - char *ret; - PyArrayIterObject *ar = iter->_internal_iter; - int storeflags, st; - - ret = PyDataMem_NEW(ar->ao->descr->elsize); - if (ret == NULL) { - PyErr_SetNone(PyExc_MemoryError); - return NULL; - } - - if (PyArray_ISOBJECT(ar->ao)) { - memcpy(ret, fill->data, sizeof(PyObject*)); - Py_INCREF(*(PyObject**)ret); - } else { - /* Non-object types */ - - storeflags = ar->ao->flags; - ar->ao->flags |= BEHAVED; - st = ar->ao->descr->f->setitem((PyObject*)fill, ret, ar->ao); - ar->ao->flags = storeflags; - - if (st < 0) { - PyDataMem_FREE(ret); - return NULL; - } - } - - return ret; -} - -#define _INF_SET_PTR(c) \ - bd = coordinates[c] + p->coordinates[c]; \ - if (bd < p->limits[c][0] || bd > p->limits[c][1]) { \ - return niter->constant; \ - } \ - _coordinates[c] = bd; - -/* set the dataptr from its current coordinates */ -static char* -get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates) -{ - int i; - npy_intp bd, _coordinates[NPY_MAXDIMS]; - PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter; - PyArrayIterObject *p = niter->_internal_iter; - - for(i = 0; i < niter->nd; ++i) { - _INF_SET_PTR(i) - } - - return p->translate(p, _coordinates); -} -#undef _INF_SET_PTR - -#define _NPY_IS_EVEN(x) ((x) % 2 == 0) - -/* For an array x of dimension n, and given index i, returns j, 0 <= j < n - * such as x[i] = x[j], with x assumed to be mirrored. For example, for x = - * {1, 2, 3} (n = 3) - * - * index -5 -4 -3 -2 -1 0 1 2 3 4 5 6 - * value 2 3 3 2 1 1 2 3 3 2 1 1 - * - * _npy_pos_index_mirror(4, 3) will return 1, because x[4] = x[1]*/ -NPY_INLINE static npy_intp -__npy_pos_remainder(npy_intp i, npy_intp n) -{ - npy_intp k, l, j; - - /* Mirror i such as it is guaranteed to be positive */ - if (i < 0) { - i = - i - 1; - } - - /* compute k and l such as i = k * n + l, 0 <= l < k */ - k = i / n; - l = i - k * n; - - if (_NPY_IS_EVEN(k)) { - j = l; - } else { - j = n - 1 - l; - } - return j; -} -#undef _NPY_IS_EVEN - -#define _INF_SET_PTR_MIRROR(c) \ - lb = p->limits[c][0]; \ - bd = coordinates[c] + p->coordinates[c] - lb; \ - _coordinates[c] = lb + __npy_pos_remainder(bd, p->limits_sizes[c]); - -/* set the dataptr from its current coordinates */ -static char* -get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates) -{ - int i; - npy_intp bd, _coordinates[NPY_MAXDIMS], lb; - PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter; - PyArrayIterObject *p = niter->_internal_iter; - - for(i = 0; i < niter->nd; ++i) { - _INF_SET_PTR_MIRROR(i) - } - - return p->translate(p, _coordinates); -} -#undef _INF_SET_PTR_MIRROR - -/* compute l such as i = k * n + l, 0 <= l < |k| */ -NPY_INLINE static npy_intp -__npy_euclidean_division(npy_intp i, npy_intp n) -{ - npy_intp l; - - l = i % n; - if (l < 0) { - l += n; - } - return l; -} - -#define _INF_SET_PTR_CIRCULAR(c) \ - lb = p->limits[c][0]; \ - bd = coordinates[c] + p->coordinates[c] - lb; \ - _coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]); - -static char* -get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates) -{ - int i; - npy_intp bd, _coordinates[NPY_MAXDIMS], lb; - PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter; - PyArrayIterObject *p = niter->_internal_iter; - - for(i = 0; i < niter->nd; ++i) { - _INF_SET_PTR_CIRCULAR(i) - } - return p->translate(p, _coordinates); -} - -#undef _INF_SET_PTR_CIRCULAR - -/* - * fill and x->ao should have equivalent types - */ -/*NUMPY_API - * A Neighborhood Iterator object. -*/ -NPY_NO_EXPORT PyObject* -PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds, - int mode, PyArrayObject* fill) -{ - int i; - PyArrayNeighborhoodIterObject *ret; - - ret = _pya_malloc(sizeof(*ret)); - if (ret == NULL) { - return NULL; - } - PyObject_Init((PyObject *)ret, &PyArrayNeighborhoodIter_Type); - - array_iter_base_init((PyArrayIterObject*)ret, x->ao); - Py_INCREF(x); - ret->_internal_iter = x; - - ret->nd = x->ao->nd; - - for (i = 0; i < ret->nd; ++i) { - ret->dimensions[i] = x->ao->dimensions[i]; - } - - /* Compute the neighborhood size and copy the shape */ - ret->size = 1; - for (i = 0; i < ret->nd; ++i) { - ret->bounds[i][0] = bounds[2 * i]; - ret->bounds[i][1] = bounds[2 * i + 1]; - ret->size *= (ret->bounds[i][1] - ret->bounds[i][0]) + 1; - - /* limits keep track of valid ranges for the neighborhood: if a bound - * of the neighborhood is outside the array, then limits is the same as - * boundaries. On the contrary, if a bound is strictly inside the - * array, then limits correspond to the array range. For example, for - * an array [1, 2, 3], if bounds are [-1, 3], limits will be [-1, 3], - * but if bounds are [1, 2], then limits will be [0, 2]. - * - * This is used by neighborhood iterators stacked on top of this one */ - ret->limits[i][0] = ret->bounds[i][0] < 0 ? ret->bounds[i][0] : 0; - ret->limits[i][1] = ret->bounds[i][1] >= ret->dimensions[i] - 1 ? - ret->bounds[i][1] : - ret->dimensions[i] - 1; - ret->limits_sizes[i] = (ret->limits[i][1] - ret->limits[i][0]) + 1; - } - - switch (mode) { - case NPY_NEIGHBORHOOD_ITER_ZERO_PADDING: - ret->constant = PyArray_Zero(x->ao); - ret->mode = mode; - ret->translate = &get_ptr_constant; - break; - case NPY_NEIGHBORHOOD_ITER_ONE_PADDING: - ret->constant = PyArray_One(x->ao); - ret->mode = mode; - ret->translate = &get_ptr_constant; - break; - case NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING: - /* New reference in returned value of _set_constant if array - * object */ - assert(PyArray_EquivArrTypes(x->ao, fill) == NPY_TRUE); - ret->constant = _set_constant(ret, fill); - if (ret->constant == NULL) { - goto clean_x; - } - ret->mode = mode; - ret->translate = &get_ptr_constant; - break; - case NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING: - ret->mode = mode; - ret->constant = NULL; - ret->translate = &get_ptr_mirror; - break; - case NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING: - ret->mode = mode; - ret->constant = NULL; - ret->translate = &get_ptr_circular; - break; - default: - PyErr_SetString(PyExc_ValueError, "Unsupported padding mode"); - goto clean_x; - } - - /* - * XXX: we force x iterator to be non contiguous because we need - * coordinates... Modifying the iterator here is not great - */ - x->contiguous = 0; - - PyArrayNeighborhoodIter_Reset(ret); - - return (PyObject*)ret; - -clean_x: - Py_DECREF(ret->_internal_iter); - array_iter_base_dealloc((PyArrayIterObject*)ret); - _pya_free((PyArrayObject*)ret); - return NULL; -} - -static void neighiter_dealloc(PyArrayNeighborhoodIterObject* iter) -{ - if (iter->mode == NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING) { - if (PyArray_ISOBJECT(iter->_internal_iter->ao)) { - Py_DECREF(*(PyObject**)iter->constant); - } - } - if (iter->constant != NULL) { - PyDataMem_FREE(iter->constant); - } - Py_DECREF(iter->_internal_iter); - - array_iter_base_dealloc((PyArrayIterObject*)iter); - _pya_free((PyArrayObject*)iter); -} - -NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.neigh_internal_iter", /* tp_name*/ - sizeof(PyArrayNeighborhoodIterObject), /* tp_basicsize*/ - 0, /* tp_itemsize*/ - (destructor)neighiter_dealloc, /* tp_dealloc*/ - 0, /* tp_print*/ - 0, /* tp_getattr*/ - 0, /* tp_setattr*/ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr*/ - 0, /* tp_as_number*/ - 0, /* tp_as_sequence*/ - 0, /* tp_as_mapping*/ - 0, /* tp_hash */ - 0, /* tp_call*/ - 0, /* tp_str*/ - 0, /* tp_getattro*/ - 0, /* tp_setattro*/ - 0, /* tp_as_buffer*/ - Py_TPFLAGS_DEFAULT, /* tp_flags*/ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; diff --git a/numpy-1.6.2/numpy/core/src/multiarray/iterators.h b/numpy-1.6.2/numpy/core/src/multiarray/iterators.h deleted file mode 100644 index 3099425c55..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/iterators.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _NPY_ARRAYITERATORS_H_ -#define _NPY_ARRAYITERATORS_H_ - -NPY_NO_EXPORT intp -parse_subindex(PyObject *op, intp *step_size, intp *n_steps, intp max); - -NPY_NO_EXPORT int -parse_index(PyArrayObject *self, PyObject *op, - intp *dimensions, intp *strides, intp *offset_ptr); - -NPY_NO_EXPORT PyObject -*iter_subscript(PyArrayIterObject *, PyObject *); - -NPY_NO_EXPORT int -iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *); - -NPY_NO_EXPORT int -slice_GetIndices(PySliceObject *r, intp length, - intp *start, intp *stop, intp *step, - intp *slicelength); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy-1.6.2/numpy/core/src/multiarray/lowlevel_strided_loops.c.src deleted file mode 100644 index fc8d71f46a..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ /dev/null @@ -1,1184 +0,0 @@ -/* - * This file contains low-level loops for copying and byte-swapping - * strided data. - * - * Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia - * - * See LICENSE.txt for the license. - */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - -#define _MULTIARRAYMODULE -#include -#include -#include -#include - -#include "lowlevel_strided_loops.h" - -/* - * x86 platform may work with unaligned access, except when the - * compiler uses aligned SSE instructions, which gcc does in some - * cases. This is disabled for the time being. - */ -#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)) -# define NPY_USE_UNALIGNED_ACCESS 0 -#else -# define NPY_USE_UNALIGNED_ACCESS 0 -#endif - -#define _NPY_NOP1(x) (x) -#define _NPY_NOP2(x) (x) -#define _NPY_NOP4(x) (x) -#define _NPY_NOP8(x) (x) - -#define _NPY_SWAP2(x) (((((npy_uint16)x)&0xffu) << 8) | \ - (((npy_uint16)x) >> 8)) - -#define _NPY_SWAP4(x) (((((npy_uint32)x)&0xffu) << 24) | \ - ((((npy_uint32)x)&0xff00u) << 8) | \ - ((((npy_uint32)x)&0xff0000u) >> 8) | \ - (((npy_uint32)x) >> 24)) - -#define _NPY_SWAP_PAIR4(x) (((((npy_uint32)x)&0xffu) << 8) | \ - ((((npy_uint32)x)&0xff00u) >> 8) | \ - ((((npy_uint32)x)&0xff0000u) << 8) | \ - ((((npy_uint32)x)&0xff000000u) >> 8)) - -#define _NPY_SWAP8(x) (((((npy_uint64)x)&0xffULL) << 56) | \ - ((((npy_uint64)x)&0xff00ULL) << 40) | \ - ((((npy_uint64)x)&0xff0000ULL) << 24) | \ - ((((npy_uint64)x)&0xff000000ULL) << 8) | \ - ((((npy_uint64)x)&0xff00000000ULL) >> 8) | \ - ((((npy_uint64)x)&0xff0000000000ULL) >> 24) | \ - ((((npy_uint64)x)&0xff000000000000ULL) >> 40) | \ - (((npy_uint64)x) >> 56)) - -#define _NPY_SWAP_PAIR8(x) (((((npy_uint64)x)&0xffULL) << 24) | \ - ((((npy_uint64)x)&0xff00ULL) << 8) | \ - ((((npy_uint64)x)&0xff0000ULL) >> 8) | \ - ((((npy_uint64)x)&0xff000000ULL) >> 24) | \ - ((((npy_uint64)x)&0xff00000000ULL) << 24) | \ - ((((npy_uint64)x)&0xff0000000000ULL) << 8) | \ - ((((npy_uint64)x)&0xff000000000000ULL) >> 8) | \ - ((((npy_uint64)x)&0xff00000000000000ULL) >> 24)) - -#define _NPY_SWAP_INPLACE2(x) { \ - char a = (x)[0]; (x)[0] = (x)[1]; (x)[1] = a; \ - } - -#define _NPY_SWAP_INPLACE4(x) { \ - char a = (x)[0]; (x)[0] = (x)[3]; (x)[3] = a; \ - a = (x)[1]; (x)[1] = (x)[2]; (x)[2] = a; \ - } - -#define _NPY_SWAP_INPLACE8(x) { \ - char a = (x)[0]; (x)[0] = (x)[7]; (x)[7] = a; \ - a = (x)[1]; (x)[1] = (x)[6]; (x)[6] = a; \ - a = (x)[2]; (x)[2] = (x)[5]; (x)[5] = a; \ - a = (x)[3]; (x)[3] = (x)[4]; (x)[4] = a; \ - } - -#define _NPY_SWAP_INPLACE16(x) { \ - char a = (x)[0]; (x)[0] = (x)[15]; (x)[15] = a; \ - a = (x)[1]; (x)[1] = (x)[14]; (x)[14] = a; \ - a = (x)[2]; (x)[2] = (x)[13]; (x)[13] = a; \ - a = (x)[3]; (x)[3] = (x)[12]; (x)[12] = a; \ - a = (x)[4]; (x)[4] = (x)[11]; (x)[11] = a; \ - a = (x)[5]; (x)[5] = (x)[10]; (x)[10] = a; \ - a = (x)[6]; (x)[6] = (x)[9]; (x)[9] = a; \ - a = (x)[7]; (x)[7] = (x)[8]; (x)[8] = a; \ - } - -/************* STRIDED COPYING/SWAPPING SPECIALIZED FUNCTIONS *************/ - -/**begin repeat - * #elsize = 1, 2, 4, 8, 16# - * #elsize_half = 0, 1, 2, 4, 8# - * #type = npy_uint8, npy_uint16, npy_uint32, npy_uint64, npy_uint128# - */ -/**begin repeat1 - * #oper = strided_to_strided, strided_to_contig, - * contig_to_strided, contig_to_contig# - * #src_contig = 0, 0, 1 ,1# - * #dst_contig = 0, 1, 0 ,1# - */ -/**begin repeat2 - * #swap = _NPY_NOP, _NPY_NOP, _NPY_SWAP_INPLACE, _NPY_SWAP, - * _NPY_SWAP_INPLACE, _NPY_SWAP_PAIR# - * #prefix = , _aligned, _swap, _aligned_swap, _swap_pair, _aligned_swap_pair# - * #is_aligned = 0, 1, 0, 1, 0, 1# - * #minelsize = 1, 1, 2, 2, 4, 4# - * #is_swap = 0, 0, 1, 1, 2, 2# - */ - -#if (@elsize@ >= @minelsize@) && \ - (@elsize@ > 1 || @is_aligned@) && \ - (!NPY_USE_UNALIGNED_ACCESS || @is_aligned@) - - -#if @is_swap@ || @src_contig@ == 0 || @dst_contig@ == 0 -static void -@prefix@_@oper@_size@elsize@(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *NPY_UNUSED(data)) -{ - /*printf("fn @prefix@_@oper@_size@elsize@\n");*/ - while (N > 0) { -#if @is_aligned@ - - /* aligned copy and swap */ -# if @elsize@ != 16 - (*((@type@ *)dst)) = @swap@@elsize@(*((@type@ *)src)); -# else -# if @is_swap@ == 0 - (*((npy_uint64 *)dst)) = (*((npy_uint64 *)src)); - (*((npy_uint64 *)dst + 1)) = (*((npy_uint64 *)src + 1)); -# elif @is_swap@ == 1 - (*((npy_uint64 *)dst)) = _NPY_SWAP8(*((npy_uint64 *)src + 1)); - (*((npy_uint64 *)dst + 1)) = _NPY_SWAP8(*((npy_uint64 *)src)); -# elif @is_swap@ == 2 - (*((npy_uint64 *)dst)) = _NPY_SWAP8(*((npy_uint64 *)src)); - (*((npy_uint64 *)dst + 1)) = _NPY_SWAP8(*((npy_uint64 *)src + 1)); -# endif -# endif - -#else - - /* unaligned copy and swap */ - memcpy(dst, src, @elsize@); -# if @is_swap@ == 1 - @swap@@elsize@(dst); -# elif @is_swap@ == 2 - @swap@@elsize_half@(dst); - @swap@@elsize_half@(dst + @elsize_half@); -# endif - -#endif - -#if @dst_contig@ - dst += @elsize@; -#else - dst += dst_stride; -#endif - -#if @src_contig@ - src += @elsize@; -#else - src += src_stride; -#endif - - --N; - } -} -#endif - - -/* specialized copy and swap for source stride 0 */ -#if (@src_contig@ == 0) && @is_aligned@ -static void -@prefix@_@oper@_size@elsize@_srcstride0(char *dst, - npy_intp dst_stride, - char *src, npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *NPY_UNUSED(data)) -{ -#if @elsize@ != 16 - @type@ temp = @swap@@elsize@(*((@type@ *)src)); -#else - npy_uint64 temp0, temp1; -# if @is_swap@ == 0 - temp0 = (*((npy_uint64 *)src)); - temp1 = (*((npy_uint64 *)src + 1)); -# elif @is_swap@ == 1 - temp0 = _NPY_SWAP8(*((npy_uint64 *)src + 1)); - temp1 = _NPY_SWAP8(*((npy_uint64 *)src)); -# elif @is_swap@ == 2 - temp0 = _NPY_SWAP8(*((npy_uint64 *)src)); - temp1 = _NPY_SWAP8(*((npy_uint64 *)src + 1)); -# endif -#endif - - while (N > 0) { -#if @elsize@ != 16 - *((@type@ *)dst) = temp; -#else - *((npy_uint64 *)dst) = temp0; - *((npy_uint64 *)dst + 1) = temp1; -#endif -#if @dst_contig@ - dst += @elsize@; -#else - dst += dst_stride; -#endif - --N; - } -} -#endif - -#endif/* @elsize@ >= @minelsize@ */ - -/**end repeat2**/ -/**end repeat1**/ -/**end repeat**/ - -static void -_strided_to_strided(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *NPY_UNUSED(data)) -{ - while (N > 0) { - memcpy(dst, src, src_itemsize); - dst += dst_stride; - src += src_stride; - --N; - } -} - -static void -_swap_strided_to_strided(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *NPY_UNUSED(data)) -{ - char *a, *b, c; - - while (N > 0) { - memcpy(dst, src, src_itemsize); - /* general in-place swap */ - a = dst; - b = dst + src_itemsize - 1; - while (a < b) { - c = *a; - *a = *b; - *b = c; - ++a; --b; - } - dst += dst_stride; - src += src_stride; - --N; - } -} - -static void -_swap_pair_strided_to_strided(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *NPY_UNUSED(data)) -{ - char *a, *b, c; - npy_intp itemsize_half = src_itemsize / 2; - - while (N > 0) { - memcpy(dst, src, src_itemsize); - /* general in-place swap */ - a = dst; - b = dst + itemsize_half - 1; - while (a < b) { - c = *a; - *a = *b; - *b = c; - ++a; --b; - } - /* general in-place swap */ - a = dst + itemsize_half; - b = dst + 2*itemsize_half - 1; - while (a < b) { - c = *a; - *a = *b; - *b = c; - ++a; --b; - } - dst += dst_stride; - src += src_stride; - --N; - } -} - -static void -_contig_to_contig(char *dst, npy_intp NPY_UNUSED(dst_stride), - char *src, npy_intp NPY_UNUSED(src_stride), - npy_intp N, npy_intp src_itemsize, - void *NPY_UNUSED(data)) -{ - memcpy(dst, src, src_itemsize*N); -} - - -NPY_NO_EXPORT PyArray_StridedTransferFn * -PyArray_GetStridedCopyFn(npy_intp aligned, npy_intp src_stride, - npy_intp dst_stride, npy_intp itemsize) -{ -/* - * Skip the "unaligned" versions on CPUs which support unaligned - * memory accesses. - */ -#if !NPY_USE_UNALIGNED_ACCESS - if (aligned) { -#endif/*!NPY_USE_UNALIGNED_ACCESS*/ - - /* contiguous dst */ - if (itemsize != 0 && dst_stride == itemsize) { - /* constant src */ - if (src_stride == 0) { - switch (itemsize) { -/**begin repeat - * #elsize = 1, 2, 4, 8, 16# - */ - case @elsize@: - return - &_aligned_strided_to_contig_size@elsize@_srcstride0; -/**end repeat**/ - } - } - /* contiguous src */ - else if (src_stride == itemsize) { - return &_contig_to_contig; - } - /* general src */ - else { - switch (itemsize) { -/**begin repeat - * #elsize = 1, 2, 4, 8, 16# - */ - case @elsize@: - return &_aligned_strided_to_contig_size@elsize@; -/**end repeat**/ - } - } - - return &_strided_to_strided; - } - /* general dst */ - else { - /* constant src */ - if (src_stride == 0) { - switch (itemsize) { -/**begin repeat - * #elsize = 1, 2, 4, 8, 16# - */ - case @elsize@: - return - &_aligned_strided_to_strided_size@elsize@_srcstride0; -/**end repeat**/ - } - } - /* contiguous src */ - else if (src_stride == itemsize) { - switch (itemsize) { -/**begin repeat - * #elsize = 1, 2, 4, 8, 16# - */ - case @elsize@: - return &_aligned_contig_to_strided_size@elsize@; -/**end repeat**/ - } - - return &_strided_to_strided; - } - else { - switch (itemsize) { -/**begin repeat - * #elsize = 1, 2, 4, 8, 16# - */ - case @elsize@: - return &_aligned_strided_to_strided_size@elsize@; -/**end repeat**/ - } - } - } - -#if !NPY_USE_UNALIGNED_ACCESS - } - else { - /* contiguous dst */ - if (itemsize != 0 && dst_stride == itemsize) { - /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { - return &_contig_to_contig; - } - /* general src */ - else { - switch (itemsize) { - case 1: - return &_aligned_strided_to_contig_size1; -/**begin repeat - * #elsize = 2, 4, 8, 16# - */ - case @elsize@: - return &_strided_to_contig_size@elsize@; -/**end repeat**/ - } - } - - return &_strided_to_strided; - } - /* general dst */ - else { - /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { - switch (itemsize) { - case 1: - return &_aligned_contig_to_strided_size1; -/**begin repeat - * #elsize = 2, 4, 8, 16# - */ - case @elsize@: - return &_contig_to_strided_size@elsize@; -/**end repeat**/ - } - - return &_strided_to_strided; - } - /* general src */ - else { - switch (itemsize) { - case 1: - return &_aligned_strided_to_strided_size1; -/**begin repeat - * #elsize = 2, 4, 8, 16# - */ - case @elsize@: - return &_strided_to_strided_size@elsize@; -/**end repeat**/ - } - } - } - } -#endif/*!NPY_USE_UNALIGNED_ACCESS*/ - - return &_strided_to_strided; -} - -/* - * PyArray_GetStridedCopySwapFn and PyArray_GetStridedCopySwapPairFn are - * nearly identical, so can do a repeat for them. - */ -/**begin repeat - * #function = PyArray_GetStridedCopySwapFn, PyArray_GetStridedCopySwapPairFn# - * #tag = , _pair# - * #not_pair = 1, 0# - */ - -NPY_NO_EXPORT PyArray_StridedTransferFn * -@function@(npy_intp aligned, npy_intp src_stride, - npy_intp dst_stride, npy_intp itemsize) -{ -/* - * Skip the "unaligned" versions on CPUs which support unaligned - * memory accesses. - */ -#if !NPY_USE_UNALIGNED_ACCESS - if (aligned) { -#endif/*!NPY_USE_UNALIGNED_ACCESS*/ - - /* contiguous dst */ - if (itemsize != 0 && dst_stride == itemsize) { - /* constant src */ - if (src_stride == 0) { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return - &_aligned_swap@tag@_strided_to_contig_size@elsize@_srcstride0; -#endif -/**end repeat1**/ - } - } - /* contiguous src */ - else if (src_stride == itemsize) { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return &_aligned_swap@tag@_contig_to_contig_size@elsize@; -#endif -/**end repeat1**/ - } - } - /* general src */ - else { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return &_aligned_swap@tag@_strided_to_contig_size@elsize@; -#endif -/**end repeat1**/ - } - } - } - /* general dst */ - else { - /* constant src */ - if (src_stride == 0) { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return - &_aligned_swap@tag@_strided_to_strided_size@elsize@_srcstride0; -#endif -/**end repeat1**/ - } - } - /* contiguous src */ - else if (src_stride == itemsize) { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return &_aligned_swap@tag@_contig_to_strided_size@elsize@; -#endif -/**end repeat1**/ - } - - return &_strided_to_strided; - } - else { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return &_aligned_swap@tag@_strided_to_strided_size@elsize@; -#endif -/**end repeat1**/ - } - } - } - -#if !NPY_USE_UNALIGNED_ACCESS - } - else { - /* contiguous dst */ - if (itemsize != 0 && dst_stride == itemsize) { - /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return &_swap@tag@_contig_to_contig_size@elsize@; -#endif -/**end repeat1**/ - } - } - /* general src */ - else { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return &_swap@tag@_strided_to_contig_size@elsize@; -#endif -/**end repeat1**/ - } - } - - return &_strided_to_strided; - } - /* general dst */ - else { - /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return &_swap@tag@_contig_to_strided_size@elsize@; -#endif -/**end repeat1**/ - } - - return &_strided_to_strided; - } - /* general src */ - else { - switch (itemsize) { -/**begin repeat1 - * #elsize = 2, 4, 8, 16# - */ -#if @not_pair@ || @elsize@ > 2 - case @elsize@: - return &_swap@tag@_strided_to_strided_size@elsize@; -#endif -/**end repeat1**/ - } - } - } - } -#endif/*!NPY_USE_UNALIGNED_ACCESS*/ - - return &_swap@tag@_strided_to_strided; -} - -/**end repeat**/ - -/************* STRIDED CASTING SPECIALIZED FUNCTIONS *************/ - -/**begin repeat - * - * #NAME1 = BOOL, - * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# - * #name1 = bool, - * ubyte, ushort, uint, ulong, ulonglong, - * byte, short, int, long, longlong, - * half, float, double, longdouble, - * cfloat, cdouble, clongdouble# - * #rname1 = bool, - * ubyte, ushort, uint, ulong, ulonglong, - * byte, short, int, long, longlong, - * half, float, double, longdouble, - * float, double, longdouble# - * #is_bool1 = 1, 0*17# - * #is_half1 = 0*11, 1, 0*6# - * #is_float1 = 0*12, 1, 0, 0, 1, 0, 0# - * #is_double1 = 0*13, 1, 0, 0, 1, 0# - * #is_complex1 = 0*15, 1*3# - */ - -/**begin repeat1 - * - * #NAME2 = BOOL, - * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# - * #name2 = bool, - * ubyte, ushort, uint, ulong, ulonglong, - * byte, short, int, long, longlong, - * half, float, double, longdouble, - * cfloat, cdouble, clongdouble# - * #rname2 = bool, - * ubyte, ushort, uint, ulong, ulonglong, - * byte, short, int, long, longlong, - * half, float, double, longdouble, - * float, double, longdouble# - * #is_bool2 = 1, 0*17# - * #is_half2 = 0*11, 1, 0*6# - * #is_float2 = 0*12, 1, 0, 0, 1, 0, 0# - * #is_double2 = 0*13, 1, 0, 0, 1, 0# - * #is_complex2 = 0*15, 1*3# - */ - -/**begin repeat2 - * #prefix = _aligned,,_aligned_contig,_contig# - * #aligned = 1,0,1,0# - * #contig = 0,0,1,1# - */ - -#if !(NPY_USE_UNALIGNED_ACCESS && !@aligned@) - -/* For half types, don't use actual double/float types in conversion */ -#if @is_half1@ || @is_half2@ - -# if @is_float1@ -# define _TYPE1 npy_uint32 -# elif @is_double1@ -# define _TYPE1 npy_uint64 -# else -# define _TYPE1 npy_@rname1@ -# endif - -# if @is_float2@ -# define _TYPE2 npy_uint32 -# elif @is_double2@ -# define _TYPE2 npy_uint64 -# else -# define _TYPE2 npy_@rname2@ -# endif - -#else - -#define _TYPE1 npy_@rname1@ -#define _TYPE2 npy_@rname2@ - -#endif - -/* Determine an appropriate casting conversion function */ -#if @is_half1@ - -# if @is_float2@ -# define _CONVERT_FN(x) npy_halfbits_to_floatbits(x) -# elif @is_double2@ -# define _CONVERT_FN(x) npy_halfbits_to_doublebits(x) -# elif @is_half2@ -# define _CONVERT_FN(x) (x) -# elif @is_bool2@ -# define _CONVERT_FN(x) ((npy_bool)!npy_half_iszero(x)) -# else -# define _CONVERT_FN(x) ((_TYPE2)npy_half_to_float(x)) -# endif - -#elif @is_half2@ - -# if @is_float1@ -# define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) -# elif @is_double1@ -# define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) -# else -# define _CONVERT_FN(x) npy_float_to_half((float)x) -# endif - -#else - -# if @is_bool2@ -# define _CONVERT_FN(x) ((npy_bool)(x != 0)) -# else -# define _CONVERT_FN(x) ((_TYPE2)x) -# endif - -#endif - -static void -@prefix@_cast_@name1@_to_@name2@( - char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp NPY_UNUSED(src_itemsize), - void *NPY_UNUSED(data)) -{ -#if @is_complex1@ - _TYPE1 src_value[2]; -#elif !@aligned@ - _TYPE1 src_value; -#endif -#if @is_complex2@ - _TYPE2 dst_value[2]; -#elif !@aligned@ - _TYPE2 dst_value; -#endif - - /*printf("@prefix@_cast_@name1@_to_@name2@\n");*/ - - while (N--) { -#if @aligned@ -# if @is_complex1@ - src_value[0] = ((_TYPE1 *)src)[0]; - src_value[1] = ((_TYPE1 *)src)[1]; -# elif !@aligned@ - src_value = *((_TYPE1 *)src); -# endif -#else - memcpy(&src_value, src, sizeof(src_value)); -#endif - -/* Do the cast */ -#if @is_complex1@ -# if @is_complex2@ - dst_value[0] = _CONVERT_FN(src_value[0]); - dst_value[1] = _CONVERT_FN(src_value[1]); -# elif !@aligned@ - dst_value = _CONVERT_FN(src_value[0]); -# else - *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]); -# endif -#else -# if @is_complex2@ -# if !@aligned@ - dst_value[0] = _CONVERT_FN(src_value); -# else - dst_value[0] = _CONVERT_FN(*(_TYPE1 *)src); -# endif - dst_value[1] = 0; -# elif !@aligned@ - dst_value = _CONVERT_FN(src_value); -# else - *(_TYPE2 *)dst = _CONVERT_FN(*(_TYPE1 *)src); -# endif -#endif - -#if @aligned@ -# if @is_complex2@ - ((_TYPE2 *)dst)[0] = dst_value[0]; - ((_TYPE2 *)dst)[1] = dst_value[1]; -# elif !@aligned@ - *((_TYPE2 *)dst) = dst_value; -# endif -#else - memcpy(dst, &dst_value, sizeof(dst_value)); -#endif - -#if @contig@ - dst += sizeof(npy_@name2@); - src += sizeof(npy_@name1@); -#else - dst += dst_stride; - src += src_stride; -#endif - } -} - -#undef _CONVERT_FN -#undef _TYPE2 -#undef _TYPE1 - -#endif - -/**end repeat2**/ - -/**end repeat1**/ - -/**end repeat**/ - -NPY_NO_EXPORT PyArray_StridedTransferFn * -PyArray_GetStridedNumericCastFn(npy_intp aligned, npy_intp src_stride, - npy_intp dst_stride, - int src_type_num, int dst_type_num) -{ - switch (src_type_num) { -/**begin repeat - * - * #NAME1 = BOOL, - * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# - * #name1 = bool, - * ubyte, ushort, uint, ulong, ulonglong, - * byte, short, int, long, longlong, - * half, float, double, longdouble, - * cfloat, cdouble, clongdouble# - */ - - case NPY_@NAME1@: - /*printf("test fn %d - second %d\n", NPY_@NAME1@, dst_type_num);*/ - switch (dst_type_num) { -/**begin repeat1 - * - * #NAME2 = BOOL, - * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# - * #name2 = bool, - * ubyte, ushort, uint, ulong, ulonglong, - * byte, short, int, long, longlong, - * half, float, double, longdouble, - * cfloat, cdouble, clongdouble# - */ - - case NPY_@NAME2@: - /*printf("ret fn %d %d\n", NPY_@NAME1@, NPY_@NAME2@);*/ -# if NPY_USE_UNALIGNED_ACCESS - if (src_stride == sizeof(npy_@name1@) && - dst_stride == sizeof(npy_@name2@)) { - return &_aligned_contig_cast_@name1@_to_@name2@; - } - else { - return &_aligned_cast_@name1@_to_@name2@; - } -# else - if (src_stride == sizeof(npy_@name1@) && - dst_stride == sizeof(npy_@name2@)) { - return aligned ? - &_aligned_contig_cast_@name1@_to_@name2@ : - &_contig_cast_@name1@_to_@name2@; - } - else { - return aligned ? &_aligned_cast_@name1@_to_@name2@ : - &_cast_@name1@_to_@name2@; - } -# endif - -/**end repeat1**/ - } - /*printf("switched test fn %d - second %d\n", NPY_@NAME1@, dst_type_num);*/ - -/**end repeat**/ - } - - return NULL; -} - - -/************** STRIDED TRANSFER FUNCTION MEMORY MANAGEMENT **************/ - -typedef void (*_npy_stridedtransfer_dealloc)(void *); -NPY_NO_EXPORT void -PyArray_FreeStridedTransferData(void *transferdata) -{ - if (transferdata != NULL) { - _npy_stridedtransfer_dealloc dealloc = - *((_npy_stridedtransfer_dealloc *)transferdata); - dealloc(transferdata); - } -} - -typedef void *(*_npy_stridedtransfer_copy)(void *); -NPY_NO_EXPORT void * -PyArray_CopyStridedTransferData(void *transferdata) -{ - if (transferdata != NULL) { - _npy_stridedtransfer_copy copy = - *((_npy_stridedtransfer_copy *)transferdata + 1); - return copy(transferdata); - } - - return NULL; -} - -/****************** PRIMITIVE FLAT TO/FROM NDIM FUNCTIONS ******************/ - -NPY_NO_EXPORT npy_intp -PyArray_TransferNDimToStrided(npy_intp ndim, - char *dst, npy_intp dst_stride, - char *src, npy_intp *src_strides, npy_intp src_strides_inc, - npy_intp *coords, npy_intp coords_inc, - npy_intp *shape, npy_intp shape_inc, - npy_intp count, npy_intp src_itemsize, - PyArray_StridedTransferFn *stransfer, - void *data) -{ - npy_intp i, M, N, coord0, shape0, src_stride0, coord1, shape1, src_stride1; - - /* Finish off dimension 0 */ - coord0 = coords[0]; - shape0 = shape[0]; - src_stride0 = src_strides[0]; - N = shape0 - coord0; - if (N >= count) { - stransfer(dst, dst_stride, src, src_stride0, count, src_itemsize, data); - return 0; - } - stransfer(dst, dst_stride, src, src_stride0, N, src_itemsize, data); - count -= N; - - /* If it's 1-dimensional, there's no more to copy */ - if (ndim == 1) { - return count; - } - - /* Adjust the src and dst pointers */ - coord1 = (coords + coords_inc)[0]; - shape1 = (shape + shape_inc)[0]; - src_stride1 = (src_strides + src_strides_inc)[0]; - src = src - coord0*src_stride0 + src_stride1; - dst += N*dst_stride; - - /* Finish off dimension 1 */ - M = (shape1 - coord1 - 1); - N = shape0*M; - for (i = 0; i < M; ++i) { - if (shape0 >= count) { - stransfer(dst, dst_stride, src, src_stride0, - count, src_itemsize, data); - return 0; - } - else { - stransfer(dst, dst_stride, src, src_stride0, - shape0, src_itemsize, data); - } - count -= shape0; - src += src_stride1; - dst += shape0*dst_stride; - } - - /* If it's 2-dimensional, there's no more to copy */ - if (ndim == 2) { - return count; - } - - /* General-case loop for everything else */ - else { - /* Iteration structure for dimensions 2 and up */ - struct { - npy_intp coord, shape, src_stride; - } it[NPY_MAXDIMS]; - - /* Copy the coordinates and shape */ - coords += 2*coords_inc; - shape += 2*shape_inc; - src_strides += 2*src_strides_inc; - for (i = 0; i < ndim-2; ++i) { - it[i].coord = coords[0]; - it[i].shape = shape[0]; - it[i].src_stride = src_strides[0]; - coords += coords_inc; - shape += shape_inc; - src_strides += src_strides_inc; - } - - for (;;) { - /* Adjust the src pointer from the dimension 0 and 1 loop */ - src = src - shape1*src_stride1; - - /* Increment to the next coordinate */ - for (i = 0; i < ndim-2; ++i) { - src += it[i].src_stride; - if (++it[i].coord >= it[i].shape) { - it[i].coord = 0; - src -= it[i].src_stride*it[i].shape; - } - else { - break; - } - } - /* If the last dimension rolled over, we're done */ - if (i == ndim-2) { - return count; - } - - /* A loop for dimensions 0 and 1 */ - for (i = 0; i < shape1; ++i) { - if (shape0 >= count) { - stransfer(dst, dst_stride, src, src_stride0, - count, src_itemsize, data); - return 0; - } - else { - stransfer(dst, dst_stride, src, src_stride0, - shape0, src_itemsize, data); - } - count -= shape0; - src += src_stride1; - dst += shape0*dst_stride; - } - } - } -} - -NPY_NO_EXPORT npy_intp -PyArray_TransferStridedToNDim(npy_intp ndim, - char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc, - char *src, npy_intp src_stride, - npy_intp *coords, npy_intp coords_inc, - npy_intp *shape, npy_intp shape_inc, - npy_intp count, npy_intp src_itemsize, - PyArray_StridedTransferFn *stransfer, - void *data) -{ - npy_intp i, M, N, coord0, shape0, dst_stride0, coord1, shape1, dst_stride1; - - /* Finish off dimension 0 */ - coord0 = coords[0]; - shape0 = shape[0]; - dst_stride0 = dst_strides[0]; - N = shape0 - coord0; - if (N >= count) { - stransfer(dst, dst_stride0, src, src_stride, count, src_itemsize, data); - return 0; - } - stransfer(dst, dst_stride0, src, src_stride, N, src_itemsize, data); - count -= N; - - /* If it's 1-dimensional, there's no more to copy */ - if (ndim == 1) { - return count; - } - - /* Adjust the src and dst pointers */ - coord1 = (coords + coords_inc)[0]; - shape1 = (shape + shape_inc)[0]; - dst_stride1 = (dst_strides + dst_strides_inc)[0]; - dst = dst - coord0*dst_stride0 + dst_stride1; - src += N*src_stride; - - /* Finish off dimension 1 */ - M = (shape1 - coord1 - 1); - N = shape0*M; - for (i = 0; i < M; ++i) { - if (shape0 >= count) { - stransfer(dst, dst_stride0, src, src_stride, - count, src_itemsize, data); - return 0; - } - else { - stransfer(dst, dst_stride0, src, src_stride, - shape0, src_itemsize, data); - } - count -= shape0; - dst += dst_stride1; - src += shape0*src_stride; - } - - /* If it's 2-dimensional, there's no more to copy */ - if (ndim == 2) { - return count; - } - - /* General-case loop for everything else */ - else { - /* Iteration structure for dimensions 2 and up */ - struct { - npy_intp coord, shape, dst_stride; - } it[NPY_MAXDIMS]; - - /* Copy the coordinates and shape */ - coords += 2*coords_inc; - shape += 2*shape_inc; - dst_strides += 2*dst_strides_inc; - for (i = 0; i < ndim-2; ++i) { - it[i].coord = coords[0]; - it[i].shape = shape[0]; - it[i].dst_stride = dst_strides[0]; - coords += coords_inc; - shape += shape_inc; - dst_strides += dst_strides_inc; - } - - for (;;) { - /* Adjust the dst pointer from the dimension 0 and 1 loop */ - dst = dst - shape1*dst_stride1; - - /* Increment to the next coordinate */ - for (i = 0; i < ndim-2; ++i) { - dst += it[i].dst_stride; - if (++it[i].coord >= it[i].shape) { - it[i].coord = 0; - dst -= it[i].dst_stride*it[i].shape; - } - else { - break; - } - } - /* If the last dimension rolled over, we're done */ - if (i == ndim-2) { - return count; - } - - /* A loop for dimensions 0 and 1 */ - for (i = 0; i < shape1; ++i) { - if (shape0 >= count) { - stransfer(dst, dst_stride0, src, src_stride, - count, src_itemsize, data); - return 0; - } - else { - stransfer(dst, dst_stride0, src, src_stride, - shape0, src_itemsize, data); - } - count -= shape0; - dst += dst_stride1; - src += shape0*src_stride; - } - } - } -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/mapping.c b/numpy-1.6.2/numpy/core/src/multiarray/mapping.c deleted file mode 100644 index 66117cfa39..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/mapping.c +++ /dev/null @@ -1,1711 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -/*#include */ -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" -#include "iterators.h" -#include "mapping.h" - -#define SOBJ_NOTFANCY 0 -#define SOBJ_ISFANCY 1 -#define SOBJ_BADARRAY 2 -#define SOBJ_TOOMANY 3 -#define SOBJ_LISTTUP 4 - -static PyObject * -array_subscript_simple(PyArrayObject *self, PyObject *op); - -/****************************************************************************** - *** IMPLEMENT MAPPING PROTOCOL *** - *****************************************************************************/ - -NPY_NO_EXPORT Py_ssize_t -array_length(PyArrayObject *self) -{ - if (self->nd != 0) { - return self->dimensions[0]; - } else { - PyErr_SetString(PyExc_TypeError, "len() of unsized object"); - return -1; - } -} - -NPY_NO_EXPORT PyObject * -array_big_item(PyArrayObject *self, intp i) -{ - char *item; - PyArrayObject *r; - - if(self->nd == 0) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed"); - return NULL; - } - if ((item = index2ptr(self, i)) == NULL) { - return NULL; - } - Py_INCREF(self->descr); - r = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self), - self->descr, - self->nd-1, - self->dimensions+1, - self->strides+1, item, - self->flags, - (PyObject *)self); - if (r == NULL) { - return NULL; - } - Py_INCREF(self); - r->base = (PyObject *)self; - PyArray_UpdateFlags(r, CONTIGUOUS | FORTRAN); - return (PyObject *)r; -} - -NPY_NO_EXPORT int -_array_ass_item(PyArrayObject *self, Py_ssize_t i, PyObject *v) -{ - return array_ass_big_item(self, (intp) i, v); -} -/* contains optimization for 1-d arrays */ -NPY_NO_EXPORT PyObject * -array_item_nice(PyArrayObject *self, Py_ssize_t i) -{ - if (self->nd == 1) { - char *item; - if ((item = index2ptr(self, i)) == NULL) { - return NULL; - } - return PyArray_Scalar(item, self->descr, (PyObject *)self); - } - else { - return PyArray_Return( - (PyArrayObject *) array_big_item(self, (intp) i)); - } -} - -NPY_NO_EXPORT int -array_ass_big_item(PyArrayObject *self, intp i, PyObject *v) -{ - PyArrayObject *tmp; - char *item; - int ret; - - if (v == NULL) { - PyErr_SetString(PyExc_ValueError, - "can't delete array elements"); - return -1; - } - if (!PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_RuntimeError, - "array is not writeable"); - return -1; - } - if (self->nd == 0) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed."); - return -1; - } - - - if (self->nd > 1) { - if((tmp = (PyArrayObject *)array_big_item(self, i)) == NULL) { - return -1; - } - ret = PyArray_CopyObject(tmp, v); - Py_DECREF(tmp); - return ret; - } - - if ((item = index2ptr(self, i)) == NULL) { - return -1; - } - if (self->descr->f->setitem(v, item, self) == -1) { - return -1; - } - return 0; -} - -/* -------------------------------------------------------------- */ - -static void -_swap_axes(PyArrayMapIterObject *mit, PyArrayObject **ret, int getmap) -{ - PyObject *new; - int n1, n2, n3, val, bnd; - int i; - PyArray_Dims permute; - intp d[MAX_DIMS]; - PyArrayObject *arr; - - permute.ptr = d; - permute.len = mit->nd; - - /* - * arr might not have the right number of dimensions - * and need to be reshaped first by pre-pending ones - */ - arr = *ret; - if (arr->nd != mit->nd) { - for (i = 1; i <= arr->nd; i++) { - permute.ptr[mit->nd-i] = arr->dimensions[arr->nd-i]; - } - for (i = 0; i < mit->nd-arr->nd; i++) { - permute.ptr[i] = 1; - } - new = PyArray_Newshape(arr, &permute, PyArray_ANYORDER); - Py_DECREF(arr); - *ret = (PyArrayObject *)new; - if (new == NULL) { - return; - } - } - - /* - * Setting and getting need to have different permutations. - * On the get we are permuting the returned object, but on - * setting we are permuting the object-to-be-set. - * The set permutation is the inverse of the get permutation. - */ - - /* - * For getting the array the tuple for transpose is - * (n1,...,n1+n2-1,0,...,n1-1,n1+n2,...,n3-1) - * n1 is the number of dimensions of the broadcast index array - * n2 is the number of dimensions skipped at the start - * n3 is the number of dimensions of the result - */ - - /* - * For setting the array the tuple for transpose is - * (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1) - */ - n1 = mit->iters[0]->nd_m1 + 1; - n2 = mit->iteraxes[0]; - n3 = mit->nd; - - /* use n1 as the boundary if getting but n2 if setting */ - bnd = getmap ? n1 : n2; - val = bnd; - i = 0; - while (val < n1 + n2) { - permute.ptr[i++] = val++; - } - val = 0; - while (val < bnd) { - permute.ptr[i++] = val++; - } - val = n1 + n2; - while (val < n3) { - permute.ptr[i++] = val++; - } - new = PyArray_Transpose(*ret, &permute); - Py_DECREF(*ret); - *ret = (PyArrayObject *)new; -} - -static PyObject * -PyArray_GetMap(PyArrayMapIterObject *mit) -{ - - PyArrayObject *ret, *temp; - PyArrayIterObject *it; - npy_intp index; - int swap; - PyArray_CopySwapFunc *copyswap; - - /* Unbound map iterator --- Bind should have been called */ - if (mit->ait == NULL) { - return NULL; - } - - /* This relies on the map iterator object telling us the shape - of the new array in nd and dimensions. - */ - temp = mit->ait->ao; - Py_INCREF(temp->descr); - ret = (PyArrayObject *) - PyArray_NewFromDescr(Py_TYPE(temp), - temp->descr, - mit->nd, mit->dimensions, - NULL, NULL, - PyArray_ISFORTRAN(temp), - (PyObject *)temp); - if (ret == NULL) { - return NULL; - } - - /* - * Now just iterate through the new array filling it in - * with the next object from the original array as - * defined by the mapping iterator - */ - - if ((it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ret)) == NULL) { - Py_DECREF(ret); - return NULL; - } - index = it->size; - swap = (PyArray_ISNOTSWAPPED(temp) != PyArray_ISNOTSWAPPED(ret)); - copyswap = ret->descr->f->copyswap; - PyArray_MapIterReset(mit); - while (index--) { - copyswap(it->dataptr, mit->dataptr, swap, ret); - PyArray_MapIterNext(mit); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - - /* check for consecutive axes */ - if ((mit->subspace != NULL) && (mit->consec)) { - if (mit->iteraxes[0] > 0) { /* then we need to swap */ - _swap_axes(mit, &ret, 1); - } - } - return (PyObject *)ret; -} - -static int -PyArray_SetMap(PyArrayMapIterObject *mit, PyObject *op) -{ - PyObject *arr = NULL; - PyArrayIterObject *it; - npy_intp index; - int swap; - PyArray_CopySwapFunc *copyswap; - PyArray_Descr *descr; - - /* Unbound Map Iterator */ - if (mit->ait == NULL) { - return -1; - } - descr = mit->ait->ao->descr; - Py_INCREF(descr); - arr = PyArray_FromAny(op, descr, 0, 0, FORCECAST, NULL); - if (arr == NULL) { - return -1; - } - if ((mit->subspace != NULL) && (mit->consec)) { - if (mit->iteraxes[0] > 0) { /* then we need to swap */ - _swap_axes(mit, (PyArrayObject **)&arr, 0); - if (arr == NULL) { - return -1; - } - } - } - - /* Be sure values array is "broadcastable" - to shape of mit->dimensions, mit->nd */ - - if ((it = (PyArrayIterObject *)\ - PyArray_BroadcastToShape(arr, mit->dimensions, mit->nd))==NULL) { - Py_DECREF(arr); - return -1; - } - - index = mit->size; - swap = (PyArray_ISNOTSWAPPED(mit->ait->ao) != - (PyArray_ISNOTSWAPPED(arr))); - copyswap = PyArray_DESCR(arr)->f->copyswap; - PyArray_MapIterReset(mit); - /* Need to decref arrays with objects in them */ - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)) { - while (index--) { - PyArray_Item_INCREF(it->dataptr, PyArray_DESCR(arr)); - PyArray_Item_XDECREF(mit->dataptr, PyArray_DESCR(arr)); - memmove(mit->dataptr, it->dataptr, PyArray_ITEMSIZE(arr)); - /* ignored unless VOID array with object's */ - if (swap) { - copyswap(mit->dataptr, NULL, swap, arr); - } - PyArray_MapIterNext(mit); - PyArray_ITER_NEXT(it); - } - Py_DECREF(arr); - Py_DECREF(it); - return 0; - } - while(index--) { - memmove(mit->dataptr, it->dataptr, PyArray_ITEMSIZE(arr)); - if (swap) { - copyswap(mit->dataptr, NULL, swap, arr); - } - PyArray_MapIterNext(mit); - PyArray_ITER_NEXT(it); - } - Py_DECREF(arr); - Py_DECREF(it); - return 0; -} - -NPY_NO_EXPORT int -count_new_axes_0d(PyObject *tuple) -{ - int i, argument_count; - int ellipsis_count = 0; - int newaxis_count = 0; - - argument_count = PyTuple_GET_SIZE(tuple); - for (i = 0; i < argument_count; ++i) { - PyObject *arg = PyTuple_GET_ITEM(tuple, i); - if (arg == Py_Ellipsis && !ellipsis_count) { - ellipsis_count++; - } - else if (arg == Py_None) { - newaxis_count++; - } - else { - break; - } - } - if (i < argument_count) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can only use a single ()" - " or a list of newaxes (and a single ...)" - " as an index"); - return -1; - } - if (newaxis_count > MAX_DIMS) { - PyErr_SetString(PyExc_IndexError, "too many dimensions"); - return -1; - } - return newaxis_count; -} - -NPY_NO_EXPORT PyObject * -add_new_axes_0d(PyArrayObject *arr, int newaxis_count) -{ - PyArrayObject *other; - intp dimensions[MAX_DIMS]; - int i; - - for (i = 0; i < newaxis_count; ++i) { - dimensions[i] = 1; - } - Py_INCREF(arr->descr); - if ((other = (PyArrayObject *) - PyArray_NewFromDescr(Py_TYPE(arr), arr->descr, - newaxis_count, dimensions, - NULL, arr->data, - arr->flags, - (PyObject *)arr)) == NULL) - return NULL; - other->base = (PyObject *)arr; - Py_INCREF(arr); - return (PyObject *)other; -} - - -/* This checks the args for any fancy indexing objects */ - -static int -fancy_indexing_check(PyObject *args) -{ - int i, n; - PyObject *obj; - int retval = SOBJ_NOTFANCY; - - if (PyTuple_Check(args)) { - n = PyTuple_GET_SIZE(args); - if (n >= MAX_DIMS) { - return SOBJ_TOOMANY; - } - for (i = 0; i < n; i++) { - obj = PyTuple_GET_ITEM(args,i); - if (PyArray_Check(obj)) { - if (PyArray_ISINTEGER(obj) || - PyArray_ISBOOL(obj)) { - retval = SOBJ_ISFANCY; - } - else { - retval = SOBJ_BADARRAY; - break; - } - } - else if (PySequence_Check(obj)) { - retval = SOBJ_ISFANCY; - } - } - } - else if (PyArray_Check(args)) { - if ((PyArray_TYPE(args)==PyArray_BOOL) || - (PyArray_ISINTEGER(args))) { - return SOBJ_ISFANCY; - } - else { - return SOBJ_BADARRAY; - } - } - else if (PySequence_Check(args)) { - /* - * Sequences < MAX_DIMS with any slice objects - * or newaxis, or Ellipsis is considered standard - * as long as there are also no Arrays and or additional - * sequences embedded. - */ - retval = SOBJ_ISFANCY; - n = PySequence_Size(args); - if (n < 0 || n >= MAX_DIMS) { - return SOBJ_ISFANCY; - } - for (i = 0; i < n; i++) { - obj = PySequence_GetItem(args, i); - if (obj == NULL) { - return SOBJ_ISFANCY; - } - if (PyArray_Check(obj)) { - if (PyArray_ISINTEGER(obj) || PyArray_ISBOOL(obj)) { - retval = SOBJ_LISTTUP; - } - else { - retval = SOBJ_BADARRAY; - } - } - else if (PySequence_Check(obj)) { - retval = SOBJ_LISTTUP; - } - else if (PySlice_Check(obj) || obj == Py_Ellipsis || - obj == Py_None) { - retval = SOBJ_NOTFANCY; - } - Py_DECREF(obj); - if (retval > SOBJ_ISFANCY) { - return retval; - } - } - } - return retval; -} - -/* - * Called when treating array object like a mapping -- called first from - * Python when using a[object] unless object is a standard slice object - * (not an extended one). - * - * There are two situations: - * - * 1 - the subscript is a standard view and a reference to the - * array can be returned - * - * 2 - the subscript uses Boolean masks or integer indexing and - * therefore a new array is created and returned. - */ - -NPY_NO_EXPORT PyObject * -array_subscript_simple(PyArrayObject *self, PyObject *op) -{ - intp dimensions[MAX_DIMS], strides[MAX_DIMS]; - intp offset; - int nd; - PyArrayObject *other; - intp value; - - value = PyArray_PyIntAsIntp(op); - if (!PyErr_Occurred()) { - return array_big_item(self, value); - } - PyErr_Clear(); - - /* Standard (view-based) Indexing */ - if ((nd = parse_index(self, op, dimensions, strides, &offset)) == -1) { - return NULL; - } - /* This will only work if new array will be a view */ - Py_INCREF(self->descr); - if ((other = (PyArrayObject *) - PyArray_NewFromDescr(Py_TYPE(self), self->descr, - nd, dimensions, - strides, self->data+offset, - self->flags, - (PyObject *)self)) == NULL) { - return NULL; - } - other->base = (PyObject *)self; - Py_INCREF(self); - PyArray_UpdateFlags(other, UPDATE_ALL); - return (PyObject *)other; -} - -NPY_NO_EXPORT PyObject * -array_subscript(PyArrayObject *self, PyObject *op) -{ - int nd, fancy; - PyArrayObject *other; - PyArrayMapIterObject *mit; - PyObject *obj; - - if (PyString_Check(op) || PyUnicode_Check(op)) { - PyObject *temp; - - if (self->descr->names) { - obj = PyDict_GetItem(self->descr->fields, op); - if (obj != NULL) { - PyArray_Descr *descr; - int offset; - PyObject *title; - - if (PyArg_ParseTuple(obj, "Oi|O", &descr, &offset, &title)) { - Py_INCREF(descr); - return PyArray_GetField(self, descr, offset); - } - } - } - - temp = op; - if (PyUnicode_Check(op)) { - temp = PyUnicode_AsUnicodeEscapeString(op); - } - PyErr_Format(PyExc_ValueError, - "field named %s not found.", - PyBytes_AsString(temp)); - if (temp != op) { - Py_DECREF(temp); - } - return NULL; - } - - /* Check for multiple field access */ - if (self->descr->names && PySequence_Check(op) && !PyTuple_Check(op)) { - int seqlen, i; - seqlen = PySequence_Size(op); - for (i = 0; i < seqlen; i++) { - obj = PySequence_GetItem(op, i); - if (!PyString_Check(obj) && !PyUnicode_Check(obj)) { - Py_DECREF(obj); - break; - } - Py_DECREF(obj); - } - /* - * extract multiple fields if all elements in sequence - * are either string or unicode (i.e. no break occurred). - */ - fancy = ((seqlen > 0) && (i == seqlen)); - if (fancy) { - PyObject *_numpy_internal; - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - obj = PyObject_CallMethod(_numpy_internal, - "_index_fields", "OO", self, op); - Py_DECREF(_numpy_internal); - return obj; - } - } - - if (op == Py_Ellipsis) { - Py_INCREF(self); - return (PyObject *)self; - } - - if (self->nd == 0) { - if (op == Py_None) { - return add_new_axes_0d(self, 1); - } - if (PyTuple_Check(op)) { - if (0 == PyTuple_GET_SIZE(op)) { - Py_INCREF(self); - return (PyObject *)self; - } - if ((nd = count_new_axes_0d(op)) == -1) { - return NULL; - } - return add_new_axes_0d(self, nd); - } - /* Allow Boolean mask selection also */ - if ((PyArray_Check(op) && (PyArray_DIMS(op)==0) - && PyArray_ISBOOL(op))) { - if (PyObject_IsTrue(op)) { - Py_INCREF(self); - return (PyObject *)self; - } - else { - intp oned = 0; - Py_INCREF(self->descr); - return PyArray_NewFromDescr(Py_TYPE(self), - self->descr, - 1, &oned, - NULL, NULL, - NPY_DEFAULT, - NULL); - } - } - PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed."); - return NULL; - } - - fancy = fancy_indexing_check(op); - if (fancy != SOBJ_NOTFANCY) { - int oned; - - oned = ((self->nd == 1) && - !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1)); - - /* wrap arguments into a mapiter object */ - mit = (PyArrayMapIterObject *) PyArray_MapIterNew(op, oned, fancy); - if (mit == NULL) { - return NULL; - } - if (oned) { - PyArrayIterObject *it; - PyObject *rval; - it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); - if (it == NULL) { - Py_DECREF(mit); - return NULL; - } - rval = iter_subscript(it, mit->indexobj); - Py_DECREF(it); - Py_DECREF(mit); - return rval; - } - PyArray_MapIterBind(mit, self); - other = (PyArrayObject *)PyArray_GetMap(mit); - Py_DECREF(mit); - return (PyObject *)other; - } - - return array_subscript_simple(self, op); -} - - -/* - * Another assignment hacked by using CopyObject. - * This only works if subscript returns a standard view. - * Again there are two cases. In the first case, PyArray_CopyObject - * can be used. In the second case, a new indexing function has to be - * used. - */ - -static int -array_ass_sub_simple(PyArrayObject *self, PyObject *index, PyObject *op) -{ - int ret; - PyArrayObject *tmp; - intp value; - - value = PyArray_PyIntAsIntp(index); - if (!error_converting(value)) { - return array_ass_big_item(self, value, op); - } - PyErr_Clear(); - - /* Rest of standard (view-based) indexing */ - - if (PyArray_CheckExact(self)) { - tmp = (PyArrayObject *)array_subscript_simple(self, index); - if (tmp == NULL) { - return -1; - } - } - else { - PyObject *tmp0; - - /* - * Note: this code path should never be reached with an index that - * produces scalars -- those are handled earlier in array_ass_sub - */ - - tmp0 = PyObject_GetItem((PyObject *)self, index); - if (tmp0 == NULL) { - return -1; - } - if (!PyArray_Check(tmp0)) { - PyErr_SetString(PyExc_RuntimeError, - "Getitem not returning array."); - Py_DECREF(tmp0); - return -1; - } - tmp = (PyArrayObject *)tmp0; - } - - if (PyArray_ISOBJECT(self) && (tmp->nd == 0)) { - ret = tmp->descr->f->setitem(op, tmp->data, tmp); - } - else { - ret = PyArray_CopyObject(tmp, op); - } - Py_DECREF(tmp); - return ret; -} - - -/* return -1 if tuple-object seq is not a tuple of integers. - otherwise fill vals with converted integers -*/ -static int -_tuple_of_integers(PyObject *seq, intp *vals, int maxvals) -{ - int i; - PyObject *obj; - intp temp; - - for(i=0; i 0) - || PyList_Check(obj)) { - return -1; - } - temp = PyArray_PyIntAsIntp(obj); - if (error_converting(temp)) { - return -1; - } - vals[i] = temp; - } - return 0; -} - - -static int -array_ass_sub(PyArrayObject *self, PyObject *index, PyObject *op) -{ - int ret, oned, fancy; - PyArrayMapIterObject *mit; - intp vals[MAX_DIMS]; - - if (op == NULL) { - PyErr_SetString(PyExc_ValueError, - "cannot delete array elements"); - return -1; - } - if (!PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_RuntimeError, - "array is not writeable"); - return -1; - } - - if (PyInt_Check(index) || PyArray_IsScalar(index, Integer) || - PyLong_Check(index) || (PyIndex_Check(index) && - !PySequence_Check(index))) { - intp value; - value = PyArray_PyIntAsIntp(index); - if (PyErr_Occurred()) { - PyErr_Clear(); - } - else { - return array_ass_big_item(self, value, op); - } - } - - if (PyString_Check(index) || PyUnicode_Check(index)) { - if (self->descr->names) { - PyObject *obj; - - obj = PyDict_GetItem(self->descr->fields, index); - if (obj != NULL) { - PyArray_Descr *descr; - int offset; - PyObject *title; - - if (PyArg_ParseTuple(obj, "Oi|O", &descr, &offset, &title)) { - Py_INCREF(descr); - return PyArray_SetField(self, descr, offset, op); - } - } - } - -#if defined(NPY_PY3K) - PyErr_Format(PyExc_ValueError, - "field named %S not found.", - index); -#else - PyErr_Format(PyExc_ValueError, - "field named %s not found.", - PyString_AsString(index)); -#endif - return -1; - } - - if (index == Py_Ellipsis) { - /* - * Doing "a[...] += 1" triggers assigning an array to itself, - * so this check is needed. - */ - if ((PyObject *)self == op) { - return 0; - } - else { - return PyArray_CopyObject(self, op); - } - } - - if (self->nd == 0) { - /* - * Several different exceptions to the 0-d no-indexing rule - * - * 1) ellipses (handled above generally) - * 2) empty tuple - * 3) Using newaxis (None) - * 4) Boolean mask indexing - */ - if (index == Py_None || (PyTuple_Check(index) && - (0 == PyTuple_GET_SIZE(index) || - count_new_axes_0d(index) > 0))) { - return self->descr->f->setitem(op, self->data, self); - } - if (PyBool_Check(index) || PyArray_IsScalar(index, Bool) || - (PyArray_Check(index) && (PyArray_DIMS(index)==0) && - PyArray_ISBOOL(index))) { - if (PyObject_IsTrue(index)) { - return self->descr->f->setitem(op, self->data, self); - } - else { /* don't do anything */ - return 0; - } - } - PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed."); - return -1; - } - - /* Integer-tuple */ - if (PyTuple_Check(index) && (PyTuple_GET_SIZE(index) == self->nd) - && (_tuple_of_integers(index, vals, self->nd) >= 0)) { - int i; - char *item; - - for (i = 0; i < self->nd; i++) { - if (vals[i] < 0) { - vals[i] += self->dimensions[i]; - } - if ((vals[i] < 0) || (vals[i] >= self->dimensions[i])) { - PyErr_Format(PyExc_IndexError, - "index (%"INTP_FMT") out of range "\ - "(0<=index<%"INTP_FMT") in dimension %d", - vals[i], self->dimensions[i], i); - return -1; - } - } - item = PyArray_GetPtr(self, vals); - return self->descr->f->setitem(op, item, self); - } - PyErr_Clear(); - - fancy = fancy_indexing_check(index); - if (fancy != SOBJ_NOTFANCY) { - oned = ((self->nd == 1) && - !(PyTuple_Check(index) && PyTuple_GET_SIZE(index) > 1)); - mit = (PyArrayMapIterObject *) PyArray_MapIterNew(index, oned, fancy); - if (mit == NULL) { - return -1; - } - if (oned) { - PyArrayIterObject *it; - int rval; - - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it == NULL) { - Py_DECREF(mit); - return -1; - } - rval = iter_ass_subscript(it, mit->indexobj, op); - Py_DECREF(it); - Py_DECREF(mit); - return rval; - } - PyArray_MapIterBind(mit, self); - ret = PyArray_SetMap(mit, op); - Py_DECREF(mit); - return ret; - } - - return array_ass_sub_simple(self, index, op); -} - - -/* - * There are places that require that array_subscript return a PyArrayObject - * and not possibly a scalar. Thus, this is the function exposed to - * Python so that 0-dim arrays are passed as scalars - */ - - -static PyObject * -array_subscript_nice(PyArrayObject *self, PyObject *op) -{ - - PyArrayObject *mp; - intp vals[MAX_DIMS]; - - if (PyInt_Check(op) || PyArray_IsScalar(op, Integer) || - PyLong_Check(op) || (PyIndex_Check(op) && - !PySequence_Check(op))) { - intp value; - value = PyArray_PyIntAsIntp(op); - if (PyErr_Occurred()) { - PyErr_Clear(); - } - else { - return array_item_nice(self, (Py_ssize_t) value); - } - } - /* optimization for a tuple of integers */ - if (self->nd > 1 && PyTuple_Check(op) && - (PyTuple_GET_SIZE(op) == self->nd) - && (_tuple_of_integers(op, vals, self->nd) >= 0)) { - int i; - char *item; - - for (i = 0; i < self->nd; i++) { - if (vals[i] < 0) { - vals[i] += self->dimensions[i]; - } - if ((vals[i] < 0) || (vals[i] >= self->dimensions[i])) { - PyErr_Format(PyExc_IndexError, - "index (%"INTP_FMT") out of range "\ - "(0<=index<%"INTP_FMT") in dimension %d", - vals[i], self->dimensions[i], i); - return NULL; - } - } - item = PyArray_GetPtr(self, vals); - return PyArray_Scalar(item, self->descr, (PyObject *)self); - } - PyErr_Clear(); - - mp = (PyArrayObject *)array_subscript(self, op); - /* - * mp could be a scalar if op is not an Int, Scalar, Long or other Index - * object and still convertable to an integer (so that the code goes to - * array_subscript_simple). So, this cast is a bit dangerous.. - */ - - /* - * The following is just a copy of PyArray_Return with an - * additional logic in the nd == 0 case. - */ - - if (mp == NULL) { - return NULL; - } - if (PyErr_Occurred()) { - Py_XDECREF(mp); - return NULL; - } - if (PyArray_Check(mp) && mp->nd == 0) { - Bool noellipses = TRUE; - if ((op == Py_Ellipsis) || PyString_Check(op) || PyUnicode_Check(op)) { - noellipses = FALSE; - } - else if (PyBool_Check(op) || PyArray_IsScalar(op, Bool) || - (PyArray_Check(op) && (PyArray_DIMS(op)==0) && - PyArray_ISBOOL(op))) { - noellipses = FALSE; - } - else if (PySequence_Check(op)) { - Py_ssize_t n, i; - PyObject *temp; - - n = PySequence_Size(op); - i = 0; - while (i < n && noellipses) { - temp = PySequence_GetItem(op, i); - if (temp == Py_Ellipsis) { - noellipses = FALSE; - } - Py_DECREF(temp); - i++; - } - } - if (noellipses) { - PyObject *ret; - ret = PyArray_ToScalar(mp->data, mp); - Py_DECREF(mp); - return ret; - } - } - return (PyObject *)mp; -} - - -NPY_NO_EXPORT PyMappingMethods array_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)array_length, /*mp_length*/ -#else - (inquiry)array_length, /*mp_length*/ -#endif - (binaryfunc)array_subscript_nice, /*mp_subscript*/ - (objobjargproc)array_ass_sub, /*mp_ass_subscript*/ -}; - -/****************** End of Mapping Protocol ******************************/ - -/*********************** Subscript Array Iterator ************************* - * * - * This object handles subscript behavior for array objects. * - * It is an iterator object with a next method * - * It abstracts the n-dimensional mapping behavior to make the looping * - * code more understandable (maybe) * - * and so that indexing can be set up ahead of time * - */ - -/* - * This function takes a Boolean array and constructs index objects and - * iterators as if nonzero(Bool) had been called - */ -static int -_nonzero_indices(PyObject *myBool, PyArrayIterObject **iters) -{ - PyArray_Descr *typecode; - PyArrayObject *ba = NULL, *new = NULL; - int nd, j; - intp size, i, count; - Bool *ptr; - intp coords[MAX_DIMS], dims_m1[MAX_DIMS]; - intp *dptr[MAX_DIMS]; - - typecode=PyArray_DescrFromType(PyArray_BOOL); - ba = (PyArrayObject *)PyArray_FromAny(myBool, typecode, 0, 0, - CARRAY, NULL); - if (ba == NULL) { - return -1; - } - nd = ba->nd; - for (j = 0; j < nd; j++) { - iters[j] = NULL; - } - size = PyArray_SIZE(ba); - ptr = (Bool *)ba->data; - count = 0; - - /* pre-determine how many nonzero entries there are */ - for (i = 0; i < size; i++) { - if (*(ptr++)) { - count++; - } - } - - /* create count-sized index arrays for each dimension */ - for (j = 0; j < nd; j++) { - new = (PyArrayObject *)PyArray_New(&PyArray_Type, 1, &count, - PyArray_INTP, NULL, NULL, - 0, 0, NULL); - if (new == NULL) { - goto fail; - } - iters[j] = (PyArrayIterObject *) - PyArray_IterNew((PyObject *)new); - Py_DECREF(new); - if (iters[j] == NULL) { - goto fail; - } - dptr[j] = (intp *)iters[j]->ao->data; - coords[j] = 0; - dims_m1[j] = ba->dimensions[j]-1; - } - ptr = (Bool *)ba->data; - if (count == 0) { - goto finish; - } - - /* - * Loop through the Boolean array and copy coordinates - * for non-zero entries - */ - for (i = 0; i < size; i++) { - if (*(ptr++)) { - for (j = 0; j < nd; j++) { - *(dptr[j]++) = coords[j]; - } - } - /* Borrowed from ITER_NEXT macro */ - for (j = nd - 1; j >= 0; j--) { - if (coords[j] < dims_m1[j]) { - coords[j]++; - break; - } - else { - coords[j] = 0; - } - } - } - - finish: - Py_DECREF(ba); - return nd; - - fail: - for (j = 0; j < nd; j++) { - Py_XDECREF(iters[j]); - } - Py_XDECREF(ba); - return -1; -} - -/* convert an indexing object to an INTP indexing array iterator - if possible -- otherwise, it is a Slice or Ellipsis object - and has to be interpreted on bind to a particular - array so leave it NULL for now. -*/ -static int -_convert_obj(PyObject *obj, PyArrayIterObject **iter) -{ - PyArray_Descr *indtype; - PyObject *arr; - - if (PySlice_Check(obj) || (obj == Py_Ellipsis)) { - return 0; - } - else if (PyArray_Check(obj) && PyArray_ISBOOL(obj)) { - return _nonzero_indices(obj, iter); - } - else { - indtype = PyArray_DescrFromType(PyArray_INTP); - arr = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) { - return -1; - } - *iter = (PyArrayIterObject *)PyArray_IterNew(arr); - Py_DECREF(arr); - if (*iter == NULL) { - return -1; - } - } - return 1; -} - -/* Reset the map iterator to the beginning */ -NPY_NO_EXPORT void -PyArray_MapIterReset(PyArrayMapIterObject *mit) -{ - int i,j; intp coord[MAX_DIMS]; - PyArrayIterObject *it; - PyArray_CopySwapFunc *copyswap; - - mit->index = 0; - - copyswap = mit->iters[0]->ao->descr->f->copyswap; - - if (mit->subspace != NULL) { - memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); - PyArray_ITER_RESET(mit->subspace); - for (i = 0; i < mit->numiter; i++) { - it = mit->iters[i]; - PyArray_ITER_RESET(it); - j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), - it->ao); - } - PyArray_ITER_GOTO(mit->ait, coord); - mit->subspace->dataptr = mit->ait->dataptr; - mit->dataptr = mit->subspace->dataptr; - } - else { - for (i = 0; i < mit->numiter; i++) { - it = mit->iters[i]; - if (it->size != 0) { - PyArray_ITER_RESET(it); - copyswap(coord+i,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), - it->ao); - } - else { - coord[i] = 0; - } - } - PyArray_ITER_GOTO(mit->ait, coord); - mit->dataptr = mit->ait->dataptr; - } - return; -} - -/* - * This function needs to update the state of the map iterator - * and point mit->dataptr to the memory-location of the next object - */ -NPY_NO_EXPORT void -PyArray_MapIterNext(PyArrayMapIterObject *mit) -{ - int i, j; - intp coord[MAX_DIMS]; - PyArrayIterObject *it; - PyArray_CopySwapFunc *copyswap; - - mit->index += 1; - if (mit->index >= mit->size) { - return; - } - copyswap = mit->iters[0]->ao->descr->f->copyswap; - /* Sub-space iteration */ - if (mit->subspace != NULL) { - PyArray_ITER_NEXT(mit->subspace); - if (mit->subspace->index >= mit->subspace->size) { - /* reset coord to coordinates of beginning of the subspace */ - memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); - PyArray_ITER_RESET(mit->subspace); - for (i = 0; i < mit->numiter; i++) { - it = mit->iters[i]; - PyArray_ITER_NEXT(it); - j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), - it->ao); - } - PyArray_ITER_GOTO(mit->ait, coord); - mit->subspace->dataptr = mit->ait->dataptr; - } - mit->dataptr = mit->subspace->dataptr; - } - else { - for (i = 0; i < mit->numiter; i++) { - it = mit->iters[i]; - PyArray_ITER_NEXT(it); - copyswap(coord+i,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), - it->ao); - } - PyArray_ITER_GOTO(mit->ait, coord); - mit->dataptr = mit->ait->dataptr; - } - return; -} - -/* - * Bind a mapiteration to a particular array - * - * Determine if subspace iteration is necessary. If so, - * 1) Fill in mit->iteraxes - * 2) Create subspace iterator - * 3) Update nd, dimensions, and size. - * - * Subspace iteration is necessary if: arr->nd > mit->numiter - * - * Need to check for index-errors somewhere. - * - * Let's do it at bind time and also convert all <0 values to >0 here - * as well. - */ -NPY_NO_EXPORT void -PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr) -{ - int subnd; - PyObject *sub, *obj = NULL; - int i, j, n, curraxis, ellipexp, noellip; - PyArrayIterObject *it; - intp dimsize; - intp *indptr; - - subnd = arr->nd - mit->numiter; - if (subnd < 0) { - PyErr_SetString(PyExc_ValueError, - "too many indices for array"); - return; - } - - mit->ait = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arr); - if (mit->ait == NULL) { - return; - } - /* no subspace iteration needed. Finish up and Return */ - if (subnd == 0) { - n = arr->nd; - for (i = 0; i < n; i++) { - mit->iteraxes[i] = i; - } - goto finish; - } - - /* - * all indexing arrays have been converted to 0 - * therefore we can extract the subspace with a simple - * getitem call which will use view semantics - * - * But, be sure to do it with a true array. - */ - if (PyArray_CheckExact(arr)) { - sub = array_subscript_simple(arr, mit->indexobj); - } - else { - Py_INCREF(arr); - obj = PyArray_EnsureArray((PyObject *)arr); - if (obj == NULL) { - goto fail; - } - sub = array_subscript_simple((PyArrayObject *)obj, mit->indexobj); - Py_DECREF(obj); - } - - if (sub == NULL) { - goto fail; - } - mit->subspace = (PyArrayIterObject *)PyArray_IterNew(sub); - Py_DECREF(sub); - if (mit->subspace == NULL) { - goto fail; - } - /* Expand dimensions of result */ - n = mit->subspace->ao->nd; - for (i = 0; i < n; i++) { - mit->dimensions[mit->nd+i] = mit->subspace->ao->dimensions[i]; - } - mit->nd += n; - - /* - * Now, we still need to interpret the ellipsis and slice objects - * to determine which axes the indexing arrays are referring to - */ - n = PyTuple_GET_SIZE(mit->indexobj); - /* The number of dimensions an ellipsis takes up */ - ellipexp = arr->nd - n + 1; - /* - * Now fill in iteraxes -- remember indexing arrays have been - * converted to 0's in mit->indexobj - */ - curraxis = 0; - j = 0; - /* Only expand the first ellipsis */ - noellip = 1; - memset(mit->bscoord, 0, sizeof(intp)*arr->nd); - for (i = 0; i < n; i++) { - /* - * We need to fill in the starting coordinates for - * the subspace - */ - obj = PyTuple_GET_ITEM(mit->indexobj, i); - if (PyInt_Check(obj) || PyLong_Check(obj)) { - mit->iteraxes[j++] = curraxis++; - } - else if (noellip && obj == Py_Ellipsis) { - curraxis += ellipexp; - noellip = 0; - } - else { - intp start = 0; - intp stop, step; - /* Should be slice object or another Ellipsis */ - if (obj == Py_Ellipsis) { - mit->bscoord[curraxis] = 0; - } - else if (!PySlice_Check(obj) || - (slice_GetIndices((PySliceObject *)obj, - arr->dimensions[curraxis], - &start, &stop, &step, - &dimsize) < 0)) { - PyErr_Format(PyExc_ValueError, - "unexpected object " \ - "(%s) in selection position %d", - Py_TYPE(obj)->tp_name, i); - goto fail; - } - else { - mit->bscoord[curraxis] = start; - } - curraxis += 1; - } - } - - finish: - /* Here check the indexes (now that we have iteraxes) */ - mit->size = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); - if (mit->size < 0) { - PyErr_SetString(PyExc_ValueError, - "dimensions too large in fancy indexing"); - goto fail; - } - if (mit->ait->size == 0 && mit->size != 0) { - PyErr_SetString(PyExc_ValueError, - "invalid index into a 0-size array"); - goto fail; - } - - for (i = 0; i < mit->numiter; i++) { - intp indval; - it = mit->iters[i]; - PyArray_ITER_RESET(it); - dimsize = arr->dimensions[mit->iteraxes[i]]; - while (it->index < it->size) { - indptr = ((intp *)it->dataptr); - indval = *indptr; - if (indval < 0) { - indval += dimsize; - } - if (indval < 0 || indval >= dimsize) { - PyErr_Format(PyExc_IndexError, - "index (%"INTP_FMT") out of range "\ - "(0<=index<%"INTP_FMT") in dimension %d", - indval, (dimsize-1), mit->iteraxes[i]); - goto fail; - } - PyArray_ITER_NEXT(it); - } - PyArray_ITER_RESET(it); - } - return; - - fail: - Py_XDECREF(mit->subspace); - Py_XDECREF(mit->ait); - mit->subspace = NULL; - mit->ait = NULL; - return; -} - - -NPY_NO_EXPORT PyObject * -PyArray_MapIterNew(PyObject *indexobj, int oned, int fancy) -{ - PyArrayMapIterObject *mit; - PyArray_Descr *indtype; - PyObject *arr = NULL; - int i, n, started, nonindex; - - if (fancy == SOBJ_BADARRAY) { - PyErr_SetString(PyExc_IndexError, \ - "arrays used as indices must be of " \ - "integer (or boolean) type"); - return NULL; - } - if (fancy == SOBJ_TOOMANY) { - PyErr_SetString(PyExc_IndexError, "too many indices"); - return NULL; - } - - mit = (PyArrayMapIterObject *)_pya_malloc(sizeof(PyArrayMapIterObject)); - PyObject_Init((PyObject *)mit, &PyArrayMapIter_Type); - if (mit == NULL) { - return NULL; - } - for (i = 0; i < MAX_DIMS; i++) { - mit->iters[i] = NULL; - } - mit->index = 0; - mit->ait = NULL; - mit->subspace = NULL; - mit->numiter = 0; - mit->consec = 1; - Py_INCREF(indexobj); - mit->indexobj = indexobj; - - if (fancy == SOBJ_LISTTUP) { - PyObject *newobj; - newobj = PySequence_Tuple(indexobj); - if (newobj == NULL) { - goto fail; - } - Py_DECREF(indexobj); - indexobj = newobj; - mit->indexobj = indexobj; - } - -#undef SOBJ_NOTFANCY -#undef SOBJ_ISFANCY -#undef SOBJ_BADARRAY -#undef SOBJ_TOOMANY -#undef SOBJ_LISTTUP - - if (oned) { - return (PyObject *)mit; - } - /* - * Must have some kind of fancy indexing if we are here - * indexobj is either a list, an arrayobject, or a tuple - * (with at least 1 list or arrayobject or Bool object) - */ - - /* convert all inputs to iterators */ - if (PyArray_Check(indexobj) && (PyArray_TYPE(indexobj) == PyArray_BOOL)) { - mit->numiter = _nonzero_indices(indexobj, mit->iters); - if (mit->numiter < 0) { - goto fail; - } - mit->nd = 1; - mit->dimensions[0] = mit->iters[0]->dims_m1[0]+1; - Py_DECREF(mit->indexobj); - mit->indexobj = PyTuple_New(mit->numiter); - if (mit->indexobj == NULL) { - goto fail; - } - for (i = 0; i < mit->numiter; i++) { - PyTuple_SET_ITEM(mit->indexobj, i, PyInt_FromLong(0)); - } - } - - else if (PyArray_Check(indexobj) || !PyTuple_Check(indexobj)) { - mit->numiter = 1; - indtype = PyArray_DescrFromType(PyArray_INTP); - arr = PyArray_FromAny(indexobj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) { - goto fail; - } - mit->iters[0] = (PyArrayIterObject *)PyArray_IterNew(arr); - if (mit->iters[0] == NULL) { - Py_DECREF(arr); - goto fail; - } - mit->nd = PyArray_NDIM(arr); - memcpy(mit->dimensions, PyArray_DIMS(arr), mit->nd*sizeof(intp)); - mit->size = PyArray_SIZE(arr); - Py_DECREF(arr); - Py_DECREF(mit->indexobj); - mit->indexobj = Py_BuildValue("(N)", PyInt_FromLong(0)); - } - else { - /* must be a tuple */ - PyObject *obj; - PyArrayIterObject **iterp; - PyObject *new; - int numiters, j, n2; - /* - * Make a copy of the tuple -- we will be replacing - * index objects with 0's - */ - n = PyTuple_GET_SIZE(indexobj); - n2 = n; - new = PyTuple_New(n2); - if (new == NULL) { - goto fail; - } - started = 0; - nonindex = 0; - j = 0; - for (i = 0; i < n; i++) { - obj = PyTuple_GET_ITEM(indexobj,i); - iterp = mit->iters + mit->numiter; - if ((numiters=_convert_obj(obj, iterp)) < 0) { - Py_DECREF(new); - goto fail; - } - if (numiters > 0) { - started = 1; - if (nonindex) { - mit->consec = 0; - } - mit->numiter += numiters; - if (numiters == 1) { - PyTuple_SET_ITEM(new,j++, PyInt_FromLong(0)); - } - else { - /* - * we need to grow the new indexing object and fill - * it with 0s for each of the iterators produced - */ - int k; - n2 += numiters - 1; - if (_PyTuple_Resize(&new, n2) < 0) { - goto fail; - } - for (k = 0; k < numiters; k++) { - PyTuple_SET_ITEM(new, j++, PyInt_FromLong(0)); - } - } - } - else { - if (started) { - nonindex = 1; - } - Py_INCREF(obj); - PyTuple_SET_ITEM(new,j++,obj); - } - } - Py_DECREF(mit->indexobj); - mit->indexobj = new; - /* - * Store the number of iterators actually converted - * These will be mapped to actual axes at bind time - */ - if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0) { - goto fail; - } - } - - return (PyObject *)mit; - - fail: - Py_DECREF(mit); - return NULL; -} - - -static void -arraymapiter_dealloc(PyArrayMapIterObject *mit) -{ - int i; - Py_XDECREF(mit->indexobj); - Py_XDECREF(mit->ait); - Py_XDECREF(mit->subspace); - for (i = 0; i < mit->numiter; i++) { - Py_XDECREF(mit->iters[i]); - } - _pya_free(mit); -} - -/* - * The mapiter object must be created new each time. It does not work - * to bind to a new array, and continue. - * - * This was the orginal intention, but currently that does not work. - * Do not expose the MapIter_Type to Python. - * - * It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a); - * mapiter is equivalent to a[indexobj].flat but the latter gets to use - * slice syntax. - */ -NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.mapiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)arraymapiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - -/** END of Subscript Iterator **/ diff --git a/numpy-1.6.2/numpy/core/src/multiarray/mapping.h b/numpy-1.6.2/numpy/core/src/multiarray/mapping.h deleted file mode 100644 index d5ac74735e..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/mapping.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef _NPY_ARRAYMAPPING_H_ -#define _NPY_ARRAYMAPPING_H_ - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyMappingMethods array_as_mapping; -#else -NPY_NO_EXPORT PyMappingMethods array_as_mapping; -#endif - -NPY_NO_EXPORT PyObject * -array_big_item(PyArrayObject *self, intp i); - -NPY_NO_EXPORT Py_ssize_t -array_length(PyArrayObject *self); - -NPY_NO_EXPORT PyObject * -array_item_nice(PyArrayObject *self, Py_ssize_t i); - -NPY_NO_EXPORT PyObject * -array_subscript(PyArrayObject *self, PyObject *op); - -NPY_NO_EXPORT int -array_ass_big_item(PyArrayObject *self, intp i, PyObject *v); - -#if PY_VERSION_HEX < 0x02050000 - #if SIZEOF_INT == SIZEOF_INTP - #define array_ass_item array_ass_big_item - #endif -#else - #if SIZEOF_SIZE_T == SIZEOF_INTP - #define array_ass_item array_ass_big_item - #endif -#endif -#ifndef array_ass_item -NPY_NO_EXPORT int -_array_ass_item(PyArrayObject *self, Py_ssize_t i, PyObject *v); -#define array_ass_item _array_ass_item -#endif - -NPY_NO_EXPORT PyObject * -add_new_axes_0d(PyArrayObject *, int); - -NPY_NO_EXPORT int -count_new_axes_0d(PyObject *tuple); - -/* - * Prototypes for Mapping calls --- not part of the C-API - * because only useful as part of a getitem call. - */ -NPY_NO_EXPORT void -PyArray_MapIterReset(PyArrayMapIterObject *mit); - -NPY_NO_EXPORT void -PyArray_MapIterNext(PyArrayMapIterObject *mit); - -NPY_NO_EXPORT void -PyArray_MapIterBind(PyArrayMapIterObject *, PyArrayObject *); - -NPY_NO_EXPORT PyObject* -PyArray_MapIterNew(PyObject *, int, int); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/methods.c b/numpy-1.6.2/numpy/core/src/multiarray/methods.c deleted file mode 100644 index 53dc671000..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/methods.c +++ /dev/null @@ -1,2331 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" -#include "ctors.h" -#include "calculation.h" - -#include "methods.h" - - -/* NpyArg_ParseKeywords - * - * Utility function that provides the keyword parsing functionality of - * PyArg_ParseTupleAndKeywords without having to have an args argument. - * - */ -static int -NpyArg_ParseKeywords(PyObject *keys, const char *format, char **kwlist, ...) -{ - PyObject *args = PyTuple_New(0); - int ret; - va_list va; - - if (args == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "Failed to allocate new tuple"); - return 0; - } - va_start(va, kwlist); - ret = PyArg_VaParseTupleAndKeywords(args, keys, format, kwlist, va); - va_end(va); - Py_DECREF(args); - return ret; -} - -/* Should only be used if x is known to be an nd-array */ -#define _ARET(x) PyArray_Return((PyArrayObject *)(x)) - -static PyObject * -array_take(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int dimension = MAX_DIMS; - PyObject *indices; - PyArrayObject *out = NULL; - NPY_CLIPMODE mode = NPY_RAISE; - static char *kwlist[] = {"indices", "axis", "out", "mode", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O&", kwlist, - &indices, - PyArray_AxisConverter, &dimension, - PyArray_OutputConverter, &out, - PyArray_ClipmodeConverter, &mode)) - return NULL; - - return _ARET(PyArray_TakeFrom(self, indices, dimension, out, mode)); -} - -static PyObject * -array_fill(PyArrayObject *self, PyObject *args) -{ - PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) { - return NULL; - } - if (PyArray_FillWithScalar(self, obj) < 0) { - return NULL; - } - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -array_put(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *indices, *values; - NPY_CLIPMODE mode = NPY_RAISE; - static char *kwlist[] = {"indices", "values", "mode", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O&", kwlist, - &indices, - &values, - PyArray_ClipmodeConverter, &mode)) - return NULL; - return PyArray_PutTo(self, values, indices, mode); -} - -static PyObject * -array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *keywords[] = {"order", NULL}; - PyArray_Dims newshape; - PyObject *ret; - PyArray_ORDER order = PyArray_CORDER; - Py_ssize_t n = PyTuple_Size(args); - - if (!NpyArg_ParseKeywords(kwds, "|O&", keywords, - PyArray_OrderConverter, &order)) { - return NULL; - } - - if (n <= 1) { - if (PyTuple_GET_ITEM(args, 0) == Py_None) { - return PyArray_View(self, NULL, NULL); - } - if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter, - &newshape)) { - return NULL; - } - } - else { - if (!PyArray_IntpConverter(args, &newshape)) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "invalid shape"); - } - goto fail; - } - } - ret = PyArray_Newshape(self, &newshape, order); - PyDimMem_FREE(newshape.ptr); - return ret; - - fail: - PyDimMem_FREE(newshape.ptr); - return NULL; -} - -static PyObject * -array_squeeze(PyArrayObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - return PyArray_Squeeze(self); -} - -static PyObject * -array_view(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *out_dtype = NULL; - PyObject *out_type = NULL; - PyArray_Descr *dtype = NULL; - - static char *kwlist[] = {"dtype", "type", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwlist, - &out_dtype, - &out_type)) - return NULL; - - /* If user specified a positional argument, guess whether it - represents a type or a dtype for backward compatibility. */ - if (out_dtype) { - /* type specified? */ - if (PyType_Check(out_dtype) && - PyType_IsSubtype((PyTypeObject *)out_dtype, - &PyArray_Type)) { - if (out_type) { - PyErr_SetString(PyExc_ValueError, - "Cannot specify output type twice."); - return NULL; - } - out_type = out_dtype; - out_dtype = NULL; - } - } - - if ((out_type) && (!PyType_Check(out_type) || - !PyType_IsSubtype((PyTypeObject *)out_type, - &PyArray_Type))) { - PyErr_SetString(PyExc_ValueError, - "Type must be a sub-type of ndarray type"); - return NULL; - } - - if ((out_dtype) && - (PyArray_DescrConverter(out_dtype, &dtype) == PY_FAIL)) { - PyErr_SetString(PyExc_ValueError, - "Dtype must be a numpy data-type"); - return NULL; - } - - return PyArray_View(self, dtype, (PyTypeObject*)out_type); -} - -static PyObject * -array_argmax(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) - return NULL; - - return _ARET(PyArray_ArgMax(self, axis, out)); -} - -static PyObject * -array_argmin(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) - return NULL; - - return _ARET(PyArray_ArgMin(self, axis, out)); -} - -static PyObject * -array_max(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) - return NULL; - - return PyArray_Max(self, axis, out); -} - -static PyObject * -array_ptp(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) - return NULL; - - return PyArray_Ptp(self, axis, out); -} - - -static PyObject * -array_min(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) - return NULL; - - return PyArray_Min(self, axis, out); -} - -static PyObject * -array_swapaxes(PyArrayObject *self, PyObject *args) -{ - int axis1, axis2; - - if (!PyArg_ParseTuple(args, "ii", &axis1, &axis2)) { - return NULL; - } - return PyArray_SwapAxes(self, axis1, axis2); -} - - -/* steals typed reference */ -/*NUMPY_API - Get a subset of bytes from each element of the array -*/ -NPY_NO_EXPORT PyObject * -PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) -{ - PyObject *ret = NULL; - - if (offset < 0 || (offset + typed->elsize) > self->descr->elsize) { - PyErr_Format(PyExc_ValueError, - "Need 0 <= offset <= %d for requested type " \ - "but received offset = %d", - self->descr->elsize-typed->elsize, offset); - Py_DECREF(typed); - return NULL; - } - ret = PyArray_NewFromDescr(Py_TYPE(self), - typed, - self->nd, self->dimensions, - self->strides, - self->data + offset, - self->flags&(~NPY_F_CONTIGUOUS), - (PyObject *)self); - if (ret == NULL) { - return NULL; - } - Py_INCREF(self); - ((PyArrayObject *)ret)->base = (PyObject *)self; - - PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); - return ret; -} - -static PyObject * -array_getfield(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - - PyArray_Descr *dtype = NULL; - int offset = 0; - static char *kwlist[] = {"dtype", "offset", 0}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|i", kwlist, - PyArray_DescrConverter, &dtype, - &offset)) { - Py_XDECREF(dtype); - return NULL; - } - - return PyArray_GetField(self, dtype, offset); -} - - -/*NUMPY_API - Set a subset of bytes from each element of the array -*/ -NPY_NO_EXPORT int -PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, - int offset, PyObject *val) -{ - PyObject *ret = NULL; - int retval = 0; - - if (offset < 0 || (offset + dtype->elsize) > self->descr->elsize) { - PyErr_Format(PyExc_ValueError, - "Need 0 <= offset <= %d for requested type " \ - "but received offset = %d", - self->descr->elsize-dtype->elsize, offset); - Py_DECREF(dtype); - return -1; - } - ret = PyArray_NewFromDescr(Py_TYPE(self), - dtype, self->nd, self->dimensions, - self->strides, self->data + offset, - self->flags, (PyObject *)self); - if (ret == NULL) { - return -1; - } - Py_INCREF(self); - ((PyArrayObject *)ret)->base = (PyObject *)self; - - PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); - retval = PyArray_CopyObject((PyArrayObject *)ret, val); - Py_DECREF(ret); - return retval; -} - -static PyObject * -array_setfield(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_Descr *dtype = NULL; - int offset = 0; - PyObject *value; - static char *kwlist[] = {"value", "dtype", "offset", 0}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist, - &value, - PyArray_DescrConverter, &dtype, - &offset)) { - Py_XDECREF(dtype); - return NULL; - } - - if (PyDataType_REFCHK(PyArray_DESCR(self))) { - PyErr_SetString(PyExc_RuntimeError, - "cannot call setfield on an object array"); - Py_DECREF(dtype); - return NULL; - } - - if (PyArray_SetField(self, dtype, offset, value) < 0) { - return NULL; - } - Py_INCREF(Py_None); - return Py_None; -} - -/* This doesn't change the descriptor just the actual data... - */ - -/*NUMPY_API*/ -NPY_NO_EXPORT PyObject * -PyArray_Byteswap(PyArrayObject *self, Bool inplace) -{ - PyArrayObject *ret; - intp size; - PyArray_CopySwapNFunc *copyswapn; - PyArrayIterObject *it; - - copyswapn = self->descr->f->copyswapn; - if (inplace) { - if (!PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_RuntimeError, - "Cannot byte-swap in-place on a " \ - "read-only array"); - return NULL; - } - size = PyArray_SIZE(self); - if (PyArray_ISONESEGMENT(self)) { - copyswapn(self->data, self->descr->elsize, NULL, -1, size, 1, self); - } - else { /* Use iterator */ - int axis = -1; - intp stride; - it = (PyArrayIterObject *) \ - PyArray_IterAllButAxis((PyObject *)self, &axis); - stride = self->strides[axis]; - size = self->dimensions[axis]; - while (it->index < it->size) { - copyswapn(it->dataptr, stride, NULL, -1, size, 1, self); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - - Py_INCREF(self); - return (PyObject *)self; - } - else { - PyObject *new; - if ((ret = (PyArrayObject *)PyArray_NewCopy(self,-1)) == NULL) { - return NULL; - } - new = PyArray_Byteswap(ret, TRUE); - Py_DECREF(new); - return (PyObject *)ret; - } -} - - -static PyObject * -array_byteswap(PyArrayObject *self, PyObject *args) -{ - Bool inplace = FALSE; - - if (!PyArg_ParseTuple(args, "|O&", - PyArray_BoolConverter, &inplace)) { - return NULL; - } - return PyArray_Byteswap(self, inplace); -} - -static PyObject * -array_tolist(PyArrayObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - return PyArray_ToList(self); -} - - -static PyObject * -array_tostring(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - NPY_ORDER order = NPY_CORDER; - static char *kwlist[] = {"order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&", kwlist, - PyArray_OrderConverter, &order)) { - return NULL; - } - return PyArray_ToString(self, order); -} - - -/* This should grow an order= keyword to be consistent - */ - -static PyObject * -array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int ret, ret2; - PyObject *file; - FILE *fd; - char *sep = ""; - char *format = ""; - static char *kwlist[] = {"file", "sep", "format", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|ss", kwlist, - &file, - &sep, - &format)) { - return NULL; - } - - if (PyBytes_Check(file) || PyUnicode_Check(file)) { - file = npy_PyFile_OpenFile(file, "wb"); - if (file == NULL) { - return NULL; - } - } - else { - Py_INCREF(file); - } - fd = npy_PyFile_Dup(file, "wb"); - if (fd == NULL) { - PyErr_SetString(PyExc_IOError, "first argument must be a " \ - "string or open file"); - Py_DECREF(file); - return NULL; - } - ret = PyArray_ToFile(self, fd, sep, format); - ret2 = npy_PyFile_DupClose(file, fd); - Py_DECREF(file); - if (ret < 0 || ret2 < 0) { - return NULL; - } - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -array_toscalar(PyArrayObject *self, PyObject *args) { - int n, nd; - n = PyTuple_GET_SIZE(args); - - if (n == 1) { - PyObject *obj; - obj = PyTuple_GET_ITEM(args, 0); - if (PyTuple_Check(obj)) { - args = obj; - n = PyTuple_GET_SIZE(args); - } - } - - if (n == 0) { - if (self->nd == 0 || PyArray_SIZE(self) == 1) - return self->descr->f->getitem(self->data, self); - else { - PyErr_SetString(PyExc_ValueError, - "can only convert an array " \ - " of size 1 to a Python scalar"); - return NULL; - } - } - else if (n != self->nd && (n > 1 || self->nd == 0)) { - PyErr_SetString(PyExc_ValueError, - "incorrect number of indices for " \ - "array"); - return NULL; - } - else if (n == 1) { /* allows for flat getting as well as 1-d case */ - intp value, loc, index, factor; - intp factors[MAX_DIMS]; - value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0)); - if (error_converting(value)) { - PyErr_SetString(PyExc_ValueError, "invalid integer"); - return NULL; - } - factor = PyArray_SIZE(self); - if (value < 0) value += factor; - if ((value >= factor) || (value < 0)) { - PyErr_SetString(PyExc_ValueError, - "index out of bounds"); - return NULL; - } - if (self->nd == 1) { - value *= self->strides[0]; - return self->descr->f->getitem(self->data + value, - self); - } - nd = self->nd; - factor = 1; - while (nd--) { - factors[nd] = factor; - factor *= self->dimensions[nd]; - } - loc = 0; - for (nd = 0; nd < self->nd; nd++) { - index = value / factors[nd]; - value = value % factors[nd]; - loc += self->strides[nd]*index; - } - - return self->descr->f->getitem(self->data + loc, - self); - - } - else { - intp loc, index[MAX_DIMS]; - nd = PyArray_IntpFromSequence(args, index, MAX_DIMS); - if (nd < n) { - return NULL; - } - loc = 0; - while (nd--) { - if (index[nd] < 0) { - index[nd] += self->dimensions[nd]; - } - if (index[nd] < 0 || - index[nd] >= self->dimensions[nd]) { - PyErr_SetString(PyExc_ValueError, - "index out of bounds"); - return NULL; - } - loc += self->strides[nd]*index[nd]; - } - return self->descr->f->getitem(self->data + loc, self); - } -} - -static PyObject * -array_setscalar(PyArrayObject *self, PyObject *args) { - int n, nd; - int ret = -1; - PyObject *obj; - n = PyTuple_GET_SIZE(args) - 1; - - if (n < 0) { - PyErr_SetString(PyExc_ValueError, - "itemset must have at least one argument"); - return NULL; - } - obj = PyTuple_GET_ITEM(args, n); - if (n == 0) { - if (self->nd == 0 || PyArray_SIZE(self) == 1) { - ret = self->descr->f->setitem(obj, self->data, self); - } - else { - PyErr_SetString(PyExc_ValueError, - "can only place a scalar for an " - " array of size 1"); - return NULL; - } - } - else if (n != self->nd && (n > 1 || self->nd == 0)) { - PyErr_SetString(PyExc_ValueError, - "incorrect number of indices for " \ - "array"); - return NULL; - } - else if (n == 1) { /* allows for flat setting as well as 1-d case */ - intp value, loc, index, factor; - intp factors[MAX_DIMS]; - PyObject *indobj; - - indobj = PyTuple_GET_ITEM(args, 0); - if (PyTuple_Check(indobj)) { - PyObject *res; - PyObject *newargs; - PyObject *tmp; - int i, nn; - nn = PyTuple_GET_SIZE(indobj); - newargs = PyTuple_New(nn+1); - Py_INCREF(obj); - for (i = 0; i < nn; i++) { - tmp = PyTuple_GET_ITEM(indobj, i); - Py_INCREF(tmp); - PyTuple_SET_ITEM(newargs, i, tmp); - } - PyTuple_SET_ITEM(newargs, nn, obj); - /* Call with a converted set of arguments */ - res = array_setscalar(self, newargs); - Py_DECREF(newargs); - return res; - } - value = PyArray_PyIntAsIntp(indobj); - if (error_converting(value)) { - PyErr_SetString(PyExc_ValueError, "invalid integer"); - return NULL; - } - if (value >= PyArray_SIZE(self)) { - PyErr_SetString(PyExc_ValueError, - "index out of bounds"); - return NULL; - } - if (self->nd == 1) { - value *= self->strides[0]; - ret = self->descr->f->setitem(obj, self->data + value, - self); - goto finish; - } - nd = self->nd; - factor = 1; - while (nd--) { - factors[nd] = factor; - factor *= self->dimensions[nd]; - } - loc = 0; - for (nd = 0; nd < self->nd; nd++) { - index = value / factors[nd]; - value = value % factors[nd]; - loc += self->strides[nd]*index; - } - - ret = self->descr->f->setitem(obj, self->data + loc, self); - } - else { - intp loc, index[MAX_DIMS]; - PyObject *tupargs; - tupargs = PyTuple_GetSlice(args, 0, n); - nd = PyArray_IntpFromSequence(tupargs, index, MAX_DIMS); - Py_DECREF(tupargs); - if (nd < n) { - return NULL; - } - loc = 0; - while (nd--) { - if (index[nd] < 0) { - index[nd] += self->dimensions[nd]; - } - if (index[nd] < 0 || - index[nd] >= self->dimensions[nd]) { - PyErr_SetString(PyExc_ValueError, - "index out of bounds"); - return NULL; - } - loc += self->strides[nd]*index[nd]; - } - ret = self->descr->f->setitem(obj, self->data + loc, self); - } - - finish: - if (ret < 0) { - return NULL; - } - Py_INCREF(Py_None); - return Py_None; -} - -/* Sets the array values from another array as if they were flat */ -static PyObject * -array_setasflat(PyArrayObject *self, PyObject *args) -{ - PyObject *arr_in; - PyArrayObject *arr; - - if (!PyArg_ParseTuple(args, "O", &arr_in)) { - return NULL; - } - - arr = (PyArrayObject *)PyArray_FromAny(arr_in, NULL, 0, 0, 0, NULL); - if (arr == NULL) { - return NULL; - } - - if (PyArray_CopyAnyInto(self, arr) != 0) { - Py_DECREF(arr); - return NULL; - } - - Py_DECREF(arr); - Py_RETURN_NONE; -} - -static PyObject * -array_astype(PyArrayObject *self, PyObject *args) -{ - PyArray_Descr *descr = NULL; - - if (!PyArg_ParseTuple(args, "O&", PyArray_DescrConverter, - &descr)) { - Py_XDECREF(descr); - return NULL; - } - - return PyArray_CastToType(self, descr, PyArray_ISFORTRAN(self)); -} - -/* default sub-type implementation */ - - -static PyObject * -array_wraparray(PyArrayObject *self, PyObject *args) -{ - PyObject *arr; - PyObject *ret; - - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument"); - return NULL; - } - arr = PyTuple_GET_ITEM(args, 0); - if (arr == NULL) { - return NULL; - } - if (!PyArray_Check(arr)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); - return NULL; - } - - if (Py_TYPE(self) != Py_TYPE(arr)){ - Py_INCREF(PyArray_DESCR(arr)); - ret = PyArray_NewFromDescr(Py_TYPE(self), - PyArray_DESCR(arr), - PyArray_NDIM(arr), - PyArray_DIMS(arr), - PyArray_STRIDES(arr), PyArray_DATA(arr), - PyArray_FLAGS(arr), (PyObject *)self); - if (ret == NULL) { - return NULL; - } - Py_INCREF(arr); - PyArray_BASE(ret) = arr; - return ret; - } else { - /*The type was set in __array_prepare__*/ - Py_INCREF(arr); - return arr; - } -} - - -static PyObject * -array_preparearray(PyArrayObject *self, PyObject *args) -{ - PyObject *arr; - PyObject *ret; - - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument"); - return NULL; - } - arr = PyTuple_GET_ITEM(args, 0); - if (!PyArray_Check(arr)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); - return NULL; - } - - if (Py_TYPE(self) == Py_TYPE(arr)) { - /* No need to create a new view */ - Py_INCREF(arr); - return arr; - } - - Py_INCREF(PyArray_DESCR(arr)); - ret = PyArray_NewFromDescr(Py_TYPE(self), - PyArray_DESCR(arr), - PyArray_NDIM(arr), - PyArray_DIMS(arr), - PyArray_STRIDES(arr), PyArray_DATA(arr), - PyArray_FLAGS(arr), (PyObject *)self); - if (ret == NULL) { - return NULL; - } - Py_INCREF(arr); - PyArray_BASE(ret) = arr; - return ret; -} - - -static PyObject * -array_getarray(PyArrayObject *self, PyObject *args) -{ - PyArray_Descr *newtype = NULL; - PyObject *ret; - - if (!PyArg_ParseTuple(args, "|O&", - PyArray_DescrConverter, &newtype)) { - Py_XDECREF(newtype); - return NULL; - } - - /* convert to PyArray_Type */ - if (!PyArray_CheckExact(self)) { - PyObject *new; - PyTypeObject *subtype = &PyArray_Type; - - if (!PyType_IsSubtype(Py_TYPE(self), &PyArray_Type)) { - subtype = &PyArray_Type; - } - - Py_INCREF(PyArray_DESCR(self)); - new = PyArray_NewFromDescr(subtype, - PyArray_DESCR(self), - PyArray_NDIM(self), - PyArray_DIMS(self), - PyArray_STRIDES(self), - PyArray_DATA(self), - PyArray_FLAGS(self), NULL); - if (new == NULL) { - return NULL; - } - Py_INCREF(self); - PyArray_BASE(new) = (PyObject *)self; - self = (PyArrayObject *)new; - } - else { - Py_INCREF(self); - } - - if ((newtype == NULL) || - PyArray_EquivTypes(self->descr, newtype)) { - return (PyObject *)self; - } - else { - ret = PyArray_CastToType(self, newtype, 0); - Py_DECREF(self); - return ret; - } -} - - -static PyObject * -array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_ORDER order = PyArray_CORDER; - static char *kwlist[] = {"order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&", kwlist, - PyArray_OrderConverter, &order)) { - return NULL; - } - - return PyArray_NewCopy(self, order); -} - -#include -static PyObject * -array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"refcheck", NULL}; - Py_ssize_t size = PyTuple_Size(args); - int refcheck = 1; - PyArray_Dims newshape; - PyObject *ret, *obj; - - - if (!NpyArg_ParseKeywords(kwds, "|i", kwlist, &refcheck)) { - return NULL; - } - - if (size == 0) { - Py_INCREF(Py_None); - return Py_None; - } - else if (size == 1) { - obj = PyTuple_GET_ITEM(args, 0); - if (obj == Py_None) { - Py_INCREF(Py_None); - return Py_None; - } - args = obj; - } - if (!PyArray_IntpConverter(args, &newshape)) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, "invalid shape"); - } - return NULL; - } - - ret = PyArray_Resize(self, &newshape, refcheck, PyArray_CORDER); - PyDimMem_FREE(newshape.ptr); - if (ret == NULL) { - return NULL; - } - Py_DECREF(ret); - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -array_repeat(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *repeats; - int axis = MAX_DIMS; - static char *kwlist[] = {"repeats", "axis", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist, - &repeats, - PyArray_AxisConverter, &axis)) { - return NULL; - } - return _ARET(PyArray_Repeat(self, repeats, axis)); -} - -static PyObject * -array_choose(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *keywords[] = {"out", "mode", NULL}; - PyObject *choices; - PyArrayObject *out = NULL; - NPY_CLIPMODE clipmode = NPY_RAISE; - Py_ssize_t n = PyTuple_Size(args); - - if (n <= 1) { - if (!PyArg_ParseTuple(args, "O", &choices)) { - return NULL; - } - } - else { - choices = args; - } - - if (!NpyArg_ParseKeywords(kwds, "|O&O&", keywords, - PyArray_OutputConverter, &out, - PyArray_ClipmodeConverter, &clipmode)) { - return NULL; - } - - return _ARET(PyArray_Choose(self, choices, out, clipmode)); -} - -static PyObject * -array_sort(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis=-1; - int val; - PyArray_SORTKIND sortkind = PyArray_QUICKSORT; - PyObject *order = NULL; - PyArray_Descr *saved = NULL; - PyArray_Descr *newd; - static char *kwlist[] = {"axis", "kind", "order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&O", kwlist, - &axis, - PyArray_SortkindConverter, &sortkind, - &order)) { - return NULL; - } - if (order == Py_None) { - order = NULL; - } - if (order != NULL) { - PyObject *new_name; - PyObject *_numpy_internal; - saved = self->descr; - if (saved->names == NULL) { - PyErr_SetString(PyExc_ValueError, "Cannot specify " \ - "order when the array has no fields."); - return NULL; - } - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - new_name = PyObject_CallMethod(_numpy_internal, "_newnames", - "OO", saved, order); - Py_DECREF(_numpy_internal); - if (new_name == NULL) { - return NULL; - } - newd = PyArray_DescrNew(saved); - newd->names = new_name; - self->descr = newd; - } - - val = PyArray_Sort(self, axis, sortkind); - if (order != NULL) { - Py_XDECREF(self->descr); - self->descr = saved; - } - if (val < 0) { - return NULL; - } - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = -1; - PyArray_SORTKIND sortkind = PyArray_QUICKSORT; - PyObject *order = NULL, *res; - PyArray_Descr *newd, *saved=NULL; - static char *kwlist[] = {"axis", "kind", "order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O", kwlist, - PyArray_AxisConverter, &axis, - PyArray_SortkindConverter, &sortkind, - &order)) { - return NULL; - } - if (order == Py_None) { - order = NULL; - } - if (order != NULL) { - PyObject *new_name; - PyObject *_numpy_internal; - saved = self->descr; - if (saved->names == NULL) { - PyErr_SetString(PyExc_ValueError, "Cannot specify " \ - "order when the array has no fields."); - return NULL; - } - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - new_name = PyObject_CallMethod(_numpy_internal, "_newnames", - "OO", saved, order); - Py_DECREF(_numpy_internal); - if (new_name == NULL) { - return NULL; - } - newd = PyArray_DescrNew(saved); - newd->names = new_name; - self->descr = newd; - } - - res = PyArray_ArgSort(self, axis, sortkind); - if (order != NULL) { - Py_XDECREF(self->descr); - self->descr = saved; - } - return _ARET(res); -} - -static PyObject * -array_searchsorted(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"keys", "side", NULL}; - PyObject *keys; - NPY_SEARCHSIDE side = NPY_SEARCHLEFT; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:searchsorted", - kwlist, &keys, - PyArray_SearchsideConverter, &side)) { - return NULL; - } - return _ARET(PyArray_SearchSorted(self, keys, side)); -} - -static void -_deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, - PyObject *deepcopy, PyObject *visit) -{ - if (!PyDataType_REFCHK(dtype)) { - return; - } - else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { - return; - } - _deepcopy_call(iptr + offset, optr + offset, new, - deepcopy, visit); - } - } - else { - PyObject *itemp, *otemp; - PyObject *res; - NPY_COPY_PYOBJECT_PTR(&itemp, iptr); - NPY_COPY_PYOBJECT_PTR(&otemp, optr); - Py_XINCREF(itemp); - /* call deepcopy on this argument */ - res = PyObject_CallFunctionObjArgs(deepcopy, itemp, visit, NULL); - Py_XDECREF(itemp); - Py_XDECREF(otemp); - NPY_COPY_PYOBJECT_PTR(optr, &res); - } - -} - - -static PyObject * -array_deepcopy(PyArrayObject *self, PyObject *args) -{ - PyObject* visit; - char *optr; - PyArrayIterObject *it; - PyObject *copy, *ret, *deepcopy; - - if (!PyArg_ParseTuple(args, "O", &visit)) { - return NULL; - } - ret = PyArray_Copy(self); - if (PyDataType_REFCHK(self->descr)) { - copy = PyImport_ImportModule("copy"); - if (copy == NULL) { - return NULL; - } - deepcopy = PyObject_GetAttrString(copy, "deepcopy"); - Py_DECREF(copy); - if (deepcopy == NULL) { - return NULL; - } - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it == NULL) { - Py_DECREF(deepcopy); - return NULL; - } - optr = PyArray_DATA(ret); - while(it->index < it->size) { - _deepcopy_call(it->dataptr, optr, self->descr, deepcopy, visit); - optr += self->descr->elsize; - PyArray_ITER_NEXT(it); - } - Py_DECREF(deepcopy); - Py_DECREF(it); - } - return _ARET(ret); -} - -/* Convert Array to flat list (using getitem) */ -static PyObject * -_getlist_pkl(PyArrayObject *self) -{ - PyObject *theobject; - PyArrayIterObject *iter = NULL; - PyObject *list; - PyArray_GetItemFunc *getitem; - - getitem = self->descr->f->getitem; - iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (iter == NULL) { - return NULL; - } - list = PyList_New(iter->size); - if (list == NULL) { - Py_DECREF(iter); - return NULL; - } - while (iter->index < iter->size) { - theobject = getitem(iter->dataptr, self); - PyList_SET_ITEM(list, (int) iter->index, theobject); - PyArray_ITER_NEXT(iter); - } - Py_DECREF(iter); - return list; -} - -static int -_setlist_pkl(PyArrayObject *self, PyObject *list) -{ - PyObject *theobject; - PyArrayIterObject *iter = NULL; - PyArray_SetItemFunc *setitem; - - setitem = self->descr->f->setitem; - iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (iter == NULL) { - return -1; - } - while(iter->index < iter->size) { - theobject = PyList_GET_ITEM(list, (int) iter->index); - setitem(theobject, iter->dataptr, self); - PyArray_ITER_NEXT(iter); - } - Py_XDECREF(iter); - return 0; -} - - -static PyObject * -array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args)) -{ - /* version number of this pickle type. Increment if we need to - change the format. Be sure to handle the old versions in - array_setstate. */ - const int version = 1; - PyObject *ret = NULL, *state = NULL, *obj = NULL, *mod = NULL; - PyObject *mybool, *thestr = NULL; - PyArray_Descr *descr; - - /* Return a tuple of (callable object, arguments, object's state) */ - /* We will put everything in the object's state, so that on UnPickle - it can use the string object as memory without a copy */ - - ret = PyTuple_New(3); - if (ret == NULL) { - return NULL; - } - mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) { - Py_DECREF(ret); - return NULL; - } - obj = PyObject_GetAttrString(mod, "_reconstruct"); - Py_DECREF(mod); - PyTuple_SET_ITEM(ret, 0, obj); - PyTuple_SET_ITEM(ret, 1, - Py_BuildValue("ONc", - (PyObject *)Py_TYPE(self), - Py_BuildValue("(N)", - PyInt_FromLong(0)), - /* dummy data-type */ - 'b')); - - /* Now fill in object's state. This is a tuple with - 5 arguments - - 1) an integer with the pickle version. - 2) a Tuple giving the shape - 3) a PyArray_Descr Object (with correct bytorder set) - 4) a Bool stating if Fortran or not - 5) a Python object representing the data (a string, or - a list or any user-defined object). - - Notice because Python does not describe a mechanism to write - raw data to the pickle, this performs a copy to a string first - */ - - state = PyTuple_New(5); - if (state == NULL) { - Py_DECREF(ret); - return NULL; - } - PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); - PyTuple_SET_ITEM(state, 1, PyObject_GetAttrString((PyObject *)self, - "shape")); - descr = self->descr; - Py_INCREF(descr); - PyTuple_SET_ITEM(state, 2, (PyObject *)descr); - mybool = (PyArray_ISFORTRAN(self) ? Py_True : Py_False); - Py_INCREF(mybool); - PyTuple_SET_ITEM(state, 3, mybool); - if (PyDataType_FLAGCHK(self->descr, NPY_LIST_PICKLE)) { - thestr = _getlist_pkl(self); - } - else { - thestr = PyArray_ToString(self, NPY_ANYORDER); - } - if (thestr == NULL) { - Py_DECREF(ret); - Py_DECREF(state); - return NULL; - } - PyTuple_SET_ITEM(state, 4, thestr); - PyTuple_SET_ITEM(ret, 2, state); - return ret; -} - -static PyObject * -array_setstate(PyArrayObject *self, PyObject *args) -{ - PyObject *shape; - PyArray_Descr *typecode; - int version = 1; - int fortran; - PyObject *rawdata; - char *datastr; - Py_ssize_t len; - intp size, dimensions[MAX_DIMS]; - int nd; - int incref_base = 1; - - /* This will free any memory associated with a and - use the string in setstate as the (writeable) memory. - */ - if (!PyArg_ParseTuple(args, "(iO!O!iO)", - &version, - &PyTuple_Type, &shape, - &PyArrayDescr_Type, &typecode, - &fortran, - &rawdata)) { - PyErr_Clear(); - version = 0; - if (!PyArg_ParseTuple(args, "(O!O!iO)", - &PyTuple_Type, &shape, - &PyArrayDescr_Type, &typecode, - &fortran, - &rawdata)) { - return NULL; - } - } - - /* If we ever need another pickle format, increment the version - number. But we should still be able to handle the old versions. - We've only got one right now. */ - if (version != 1 && version != 0) { - PyErr_Format(PyExc_ValueError, - "can't handle version %d of numpy.ndarray pickle", - version); - return NULL; - } - - Py_XDECREF(self->descr); - self->descr = typecode; - Py_INCREF(typecode); - nd = PyArray_IntpFromSequence(shape, dimensions, MAX_DIMS); - if (nd < 0) { - return NULL; - } - size = PyArray_MultiplyList(dimensions, nd); - if (self->descr->elsize == 0) { - PyErr_SetString(PyExc_ValueError, "Invalid data-type size."); - return NULL; - } - if (size < 0 || size > MAX_INTP / self->descr->elsize) { - PyErr_NoMemory(); - return NULL; - } - - if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { - if (!PyList_Check(rawdata)) { - PyErr_SetString(PyExc_TypeError, - "object pickle not returning list"); - return NULL; - } - } - else { -#if defined(NPY_PY3K) - /* Backward compatibility with Python 2 Numpy pickles */ - if (PyUnicode_Check(rawdata)) { - PyObject *tmp; - tmp = PyUnicode_AsLatin1String(rawdata); - rawdata = tmp; - incref_base = 0; - } -#endif - - if (!PyBytes_Check(rawdata)) { - PyErr_SetString(PyExc_TypeError, - "pickle not returning string"); - return NULL; - } - - if (PyBytes_AsStringAndSize(rawdata, &datastr, &len)) - return NULL; - - if ((len != (self->descr->elsize * size))) { - PyErr_SetString(PyExc_ValueError, - "buffer size does not" \ - " match array size"); - return NULL; - } - } - - if ((self->flags & OWNDATA)) { - if (self->data != NULL) { - PyDataMem_FREE(self->data); - } - self->flags &= ~OWNDATA; - } - Py_XDECREF(self->base); - - self->flags &= ~UPDATEIFCOPY; - - if (self->dimensions != NULL) { - PyDimMem_FREE(self->dimensions); - self->dimensions = NULL; - } - - self->flags = DEFAULT; - - self->nd = nd; - - if (nd > 0) { - self->dimensions = PyDimMem_NEW(nd * 2); - self->strides = self->dimensions + nd; - memcpy(self->dimensions, dimensions, sizeof(intp)*nd); - (void) _array_fill_strides(self->strides, dimensions, nd, - (size_t) self->descr->elsize, - (fortran ? FORTRAN : CONTIGUOUS), - &(self->flags)); - } - - if (!PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { - int swap=!PyArray_ISNOTSWAPPED(self); - self->data = datastr; - if (!_IsAligned(self) || swap) { - intp num = PyArray_NBYTES(self); - self->data = PyDataMem_NEW(num); - if (self->data == NULL) { - self->nd = 0; - PyDimMem_FREE(self->dimensions); - return PyErr_NoMemory(); - } - if (swap) { /* byte-swap on pickle-read */ - intp numels = num / self->descr->elsize; - self->descr->f->copyswapn(self->data, self->descr->elsize, - datastr, self->descr->elsize, - numels, 1, self); - if (!PyArray_ISEXTENDED(self)) { - self->descr = PyArray_DescrFromType(self->descr->type_num); - } - else { - self->descr = PyArray_DescrNew(typecode); - if (self->descr->byteorder == PyArray_BIG) { - self->descr->byteorder = PyArray_LITTLE; - } - else if (self->descr->byteorder == PyArray_LITTLE) { - self->descr->byteorder = PyArray_BIG; - } - } - Py_DECREF(typecode); - } - else { - memcpy(self->data, datastr, num); - } - self->flags |= OWNDATA; - self->base = NULL; - } - else { - self->base = rawdata; - if (incref_base) { - Py_INCREF(self->base); - } - } - } - else { - self->data = PyDataMem_NEW(PyArray_NBYTES(self)); - if (self->data == NULL) { - self->nd = 0; - self->data = PyDataMem_NEW(self->descr->elsize); - if (self->dimensions) { - PyDimMem_FREE(self->dimensions); - } - return PyErr_NoMemory(); - } - if (PyDataType_FLAGCHK(self->descr, NPY_NEEDS_INIT)) { - memset(self->data, 0, PyArray_NBYTES(self)); - } - self->flags |= OWNDATA; - self->base = NULL; - if (_setlist_pkl(self, rawdata) < 0) { - return NULL; - } - } - - PyArray_UpdateFlags(self, UPDATE_ALL); - - Py_INCREF(Py_None); - return Py_None; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT int -PyArray_Dump(PyObject *self, PyObject *file, int protocol) -{ - PyObject *cpick = NULL; - PyObject *ret; - if (protocol < 0) { - protocol = 2; - } - -#if defined(NPY_PY3K) - cpick = PyImport_ImportModule("pickle"); -#else - cpick = PyImport_ImportModule("cPickle"); -#endif - if (cpick == NULL) { - return -1; - } - if (PyBytes_Check(file) || PyUnicode_Check(file)) { - file = npy_PyFile_OpenFile(file, "wb"); - if (file == NULL) { - return -1; - } - } - else { - Py_INCREF(file); - } - ret = PyObject_CallMethod(cpick, "dump", "OOi", self, file, protocol); - Py_XDECREF(ret); - Py_DECREF(file); - Py_DECREF(cpick); - if (PyErr_Occurred()) { - return -1; - } - return 0; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT PyObject * -PyArray_Dumps(PyObject *self, int protocol) -{ - PyObject *cpick = NULL; - PyObject *ret; - if (protocol < 0) { - protocol = 2; - } -#if defined(NPY_PY3K) - cpick = PyImport_ImportModule("pickle"); -#else - cpick = PyImport_ImportModule("cPickle"); -#endif - if (cpick == NULL) { - return NULL; - } - ret = PyObject_CallMethod(cpick, "dumps", "Oi", self, protocol); - Py_DECREF(cpick); - return ret; -} - - -static PyObject * -array_dump(PyArrayObject *self, PyObject *args) -{ - PyObject *file = NULL; - int ret; - - if (!PyArg_ParseTuple(args, "O", &file)) { - return NULL; - } - ret = PyArray_Dump((PyObject *)self, file, 2); - if (ret < 0) { - return NULL; - } - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -array_dumps(PyArrayObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - return PyArray_Dumps((PyObject *)self, 2); -} - - -static PyObject * -array_transpose(PyArrayObject *self, PyObject *args) -{ - PyObject *shape = Py_None; - Py_ssize_t n = PyTuple_Size(args); - PyArray_Dims permute; - PyObject *ret; - - if (n > 1) { - shape = args; - } - else if (n == 1) { - shape = PyTuple_GET_ITEM(args, 0); - } - - if (shape == Py_None) { - ret = PyArray_Transpose(self, NULL); - } - else { - if (!PyArray_IntpConverter(shape, &permute)) { - return NULL; - } - ret = PyArray_Transpose(self, &permute); - PyDimMem_FREE(permute.ptr); - } - - return ret; -} - -/* Return typenumber from dtype2 unless it is NULL, then return - NPY_DOUBLE if dtype1->type_num is integer or bool - and dtype1->type_num otherwise. -*/ -static int -_get_type_num_double(PyArray_Descr *dtype1, PyArray_Descr *dtype2) -{ - if (dtype2 != NULL) { - return dtype2->type_num; - } - /* For integer or bool data-types */ - if (dtype1->type_num < NPY_FLOAT) { - return NPY_DOUBLE; - } - else { - return dtype1->type_num; - } -} - -#define _CHKTYPENUM(typ) ((typ) ? (typ)->type_num : PyArray_NOTYPE) - -static PyObject * -array_mean(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArray_Descr *dtype = NULL; - PyArrayObject *out = NULL; - int num; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(dtype); - return NULL; - } - - num = _get_type_num_double(self->descr, dtype); - Py_XDECREF(dtype); - return PyArray_Mean(self, axis, num, out); -} - -static PyObject * -array_sum(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArray_Descr *dtype = NULL; - PyArrayObject *out = NULL; - int rtype; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(dtype); - return NULL; - } - - rtype = _CHKTYPENUM(dtype); - Py_XDECREF(dtype); - return PyArray_Sum(self, axis, rtype, out); -} - - -static PyObject * -array_cumsum(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArray_Descr *dtype = NULL; - PyArrayObject *out = NULL; - int rtype; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(dtype); - return NULL; - } - - rtype = _CHKTYPENUM(dtype); - Py_XDECREF(dtype); - return PyArray_CumSum(self, axis, rtype, out); -} - -static PyObject * -array_prod(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArray_Descr *dtype = NULL; - PyArrayObject *out = NULL; - int rtype; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(dtype); - return NULL; - } - - rtype = _CHKTYPENUM(dtype); - Py_XDECREF(dtype); - return PyArray_Prod(self, axis, rtype, out); -} - -static PyObject * -array_cumprod(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArray_Descr *dtype = NULL; - PyArrayObject *out = NULL; - int rtype; - static char *kwlist[] = {"axis", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(dtype); - return NULL; - } - - rtype = _CHKTYPENUM(dtype); - Py_XDECREF(dtype); - return PyArray_CumProd(self, axis, rtype, out); -} - - -static PyObject * -array_dot(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *b; - static PyObject *numpycore = NULL; - - if (!PyArg_ParseTuple(args, "O", &b)) { - return NULL; - } - - /* Since blas-dot is exposed only on the Python side, we need to grab it - * from there */ - if (numpycore == NULL) { - numpycore = PyImport_ImportModule("numpy.core"); - if (numpycore == NULL) { - return NULL; - } - } - - return PyObject_CallMethod(numpycore, "dot", "OO", self, b); -} - - -static PyObject * -array_any(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) - return NULL; - - return PyArray_Any(self, axis, out); -} - - -static PyObject * -array_all(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArrayObject *out = NULL; - static char *kwlist[] = {"axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) - return NULL; - - return PyArray_All(self, axis, out); -} - - -static PyObject * -array_stddev(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArray_Descr *dtype = NULL; - PyArrayObject *out = NULL; - int num; - double ddof = 0; - static char *kwlist[] = {"axis", "dtype", "out", "ddof", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&d", kwlist, - PyArray_AxisConverter, &axis, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out, - &ddof)) { - Py_XDECREF(dtype); - return NULL; - } - - num = _get_type_num_double(self->descr, dtype); - Py_XDECREF(dtype); - return __New_PyArray_Std(self, axis, num, out, 0, ddof); -} - - -static PyObject * -array_variance(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyArray_Descr *dtype = NULL; - PyArrayObject *out = NULL; - int num; - double ddof = 0; - static char *kwlist[] = {"axis", "dtype", "out", "ddof", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O&d", kwlist, - PyArray_AxisConverter, &axis, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out, - &ddof)) { - Py_XDECREF(dtype); - return NULL; - } - - num = _get_type_num_double(self->descr, dtype); - Py_XDECREF(dtype); - return __New_PyArray_Std(self, axis, num, out, 1, ddof); -} - - -static PyObject * -array_compress(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis = MAX_DIMS; - PyObject *condition; - PyArrayObject *out = NULL; - static char *kwlist[] = {"condition", "axis", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&", kwlist, - &condition, - PyArray_AxisConverter, &axis, - PyArray_OutputConverter, &out)) { - return NULL; - } - return _ARET(PyArray_Compress(self, condition, axis, out)); -} - - -static PyObject * -array_nonzero(PyArrayObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - return PyArray_Nonzero(self); -} - - -static PyObject * -array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis1 = 0, axis2 = 1, offset = 0; - PyArray_Descr *dtype = NULL; - PyArrayObject *out = NULL; - int rtype; - static char *kwlist[] = {"offset", "axis1", "axis2", "dtype", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiO&O&", kwlist, - &offset, - &axis1, - &axis2, - PyArray_DescrConverter2, &dtype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(dtype); - return NULL; - } - - rtype = _CHKTYPENUM(dtype); - Py_XDECREF(dtype); - return _ARET(PyArray_Trace(self, offset, axis1, axis2, rtype, out)); -} - -#undef _CHKTYPENUM - - -static PyObject * -array_clip(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *min = NULL, *max = NULL; - PyArrayObject *out = NULL; - static char *kwlist[] = {"min", "max", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO&", kwlist, - &min, - &max, - PyArray_OutputConverter, &out)) { - return NULL; - } - if (max == NULL && min == NULL) { - PyErr_SetString(PyExc_ValueError, "One of max or min must be given."); - return NULL; - } - return _ARET(PyArray_Clip(self, min, max, out)); -} - - -static PyObject * -array_conjugate(PyArrayObject *self, PyObject *args) -{ - - PyArrayObject *out = NULL; - if (!PyArg_ParseTuple(args, "|O&", - PyArray_OutputConverter, - &out)) { - return NULL; - } - return PyArray_Conjugate(self, out); -} - - -static PyObject * -array_diagonal(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int axis1 = 0, axis2 = 1, offset = 0; - static char *kwlist[] = {"offset", "axis1", "axis2", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwlist, - &offset, - &axis1, - &axis2)) { - return NULL; - } - return _ARET(PyArray_Diagonal(self, offset, axis1, axis2)); -} - - -static PyObject * -array_flatten(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_ORDER order = PyArray_CORDER; - static char *kwlist[] = {"order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&", kwlist, - PyArray_OrderConverter, &order)) { - return NULL; - } - return PyArray_Flatten(self, order); -} - - -static PyObject * -array_ravel(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_ORDER order = PyArray_CORDER; - static char *kwlist[] = {"order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&", kwlist, - PyArray_OrderConverter, &order)) { - return NULL; - } - return PyArray_Ravel(self, order); -} - - -static PyObject * -array_round(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - int decimals = 0; - PyArrayObject *out = NULL; - static char *kwlist[] = {"decimals", "out", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&", kwlist, - &decimals, - PyArray_OutputConverter, &out)) { - return NULL; - } - return _ARET(PyArray_Round(self, decimals, out)); -} - - - -static PyObject * -array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"write", "align", "uic", NULL}; - PyObject *write = Py_None; - PyObject *align = Py_None; - PyObject *uic = Py_None; - int flagback = self->flags; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO", kwlist, - &write, - &align, - &uic)) - return NULL; - - if (align != Py_None) { - if (PyObject_Not(align)) { - self->flags &= ~ALIGNED; - } - else if (_IsAligned(self)) { - self->flags |= ALIGNED; - } - else { - PyErr_SetString(PyExc_ValueError, - "cannot set aligned flag of mis-"\ - "aligned array to True"); - return NULL; - } - } - - if (uic != Py_None) { - if (PyObject_IsTrue(uic)) { - self->flags = flagback; - PyErr_SetString(PyExc_ValueError, - "cannot set UPDATEIFCOPY " \ - "flag to True"); - return NULL; - } - else { - self->flags &= ~UPDATEIFCOPY; - Py_XDECREF(self->base); - self->base = NULL; - } - } - - if (write != Py_None) { - if (PyObject_IsTrue(write)) - if (_IsWriteable(self)) { - self->flags |= WRITEABLE; - } - else { - self->flags = flagback; - PyErr_SetString(PyExc_ValueError, - "cannot set WRITEABLE " \ - "flag to True of this " \ - "array"); \ - return NULL; - } - else - self->flags &= ~WRITEABLE; - } - - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -array_newbyteorder(PyArrayObject *self, PyObject *args) -{ - char endian = PyArray_SWAP; - PyArray_Descr *new; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) { - return NULL; - } - new = PyArray_DescrNewByteorder(self->descr, endian); - if (!new) { - return NULL; - } - return PyArray_View(self, new, NULL); - -} - -NPY_NO_EXPORT PyMethodDef array_methods[] = { - - /* for subtypes */ - {"__array__", - (PyCFunction)array_getarray, - METH_VARARGS, NULL}, - {"__array_prepare__", - (PyCFunction)array_preparearray, - METH_VARARGS, NULL}, - {"__array_wrap__", - (PyCFunction)array_wraparray, - METH_VARARGS, NULL}, - - /* for the copy module */ - {"__copy__", - (PyCFunction)array_copy, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"__deepcopy__", - (PyCFunction)array_deepcopy, - METH_VARARGS, NULL}, - - /* for Pickling */ - {"__reduce__", - (PyCFunction) array_reduce, - METH_VARARGS, NULL}, - {"__setstate__", - (PyCFunction) array_setstate, - METH_VARARGS, NULL}, - {"dumps", - (PyCFunction) array_dumps, - METH_VARARGS, NULL}, - {"dump", - (PyCFunction) array_dump, - METH_VARARGS, NULL}, - - /* Original and Extended methods added 2005 */ - {"all", - (PyCFunction)array_all, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"any", - (PyCFunction)array_any, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"argmax", - (PyCFunction)array_argmax, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"argmin", - (PyCFunction)array_argmin, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"argsort", - (PyCFunction)array_argsort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"astype", - (PyCFunction)array_astype, - METH_VARARGS, NULL}, - {"byteswap", - (PyCFunction)array_byteswap, - METH_VARARGS, NULL}, - {"choose", - (PyCFunction)array_choose, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"clip", - (PyCFunction)array_clip, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"compress", - (PyCFunction)array_compress, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"conj", - (PyCFunction)array_conjugate, - METH_VARARGS, NULL}, - {"conjugate", - (PyCFunction)array_conjugate, - METH_VARARGS, NULL}, - {"copy", - (PyCFunction)array_copy, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"cumprod", - (PyCFunction)array_cumprod, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"cumsum", - (PyCFunction)array_cumsum, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"diagonal", - (PyCFunction)array_diagonal, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"dot", - (PyCFunction)array_dot, - METH_VARARGS, NULL}, - {"fill", - (PyCFunction)array_fill, - METH_VARARGS, NULL}, - {"flatten", - (PyCFunction)array_flatten, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"getfield", - (PyCFunction)array_getfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"item", - (PyCFunction)array_toscalar, - METH_VARARGS, NULL}, - {"itemset", - (PyCFunction) array_setscalar, - METH_VARARGS, NULL}, - {"setasflat", - (PyCFunction) array_setasflat, - METH_VARARGS, NULL}, - {"max", - (PyCFunction)array_max, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"mean", - (PyCFunction)array_mean, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"min", - (PyCFunction)array_min, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"newbyteorder", - (PyCFunction)array_newbyteorder, - METH_VARARGS, NULL}, - {"nonzero", - (PyCFunction)array_nonzero, - METH_VARARGS, NULL}, - {"prod", - (PyCFunction)array_prod, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"ptp", - (PyCFunction)array_ptp, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"put", - (PyCFunction)array_put, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"ravel", - (PyCFunction)array_ravel, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"repeat", - (PyCFunction)array_repeat, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"reshape", - (PyCFunction)array_reshape, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"resize", - (PyCFunction)array_resize, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"round", - (PyCFunction)array_round, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"searchsorted", - (PyCFunction)array_searchsorted, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"setfield", - (PyCFunction)array_setfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"setflags", - (PyCFunction)array_setflags, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"sort", - (PyCFunction)array_sort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"squeeze", - (PyCFunction)array_squeeze, - METH_VARARGS, NULL}, - {"std", - (PyCFunction)array_stddev, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"sum", - (PyCFunction)array_sum, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"swapaxes", - (PyCFunction)array_swapaxes, - METH_VARARGS, NULL}, - {"take", - (PyCFunction)array_take, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"tofile", - (PyCFunction)array_tofile, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"tolist", - (PyCFunction)array_tolist, - METH_VARARGS, NULL}, - {"tostring", - (PyCFunction)array_tostring, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"trace", - (PyCFunction)array_trace, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"transpose", - (PyCFunction)array_transpose, - METH_VARARGS, NULL}, - {"var", - (PyCFunction)array_variance, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"view", - (PyCFunction)array_view, - METH_VARARGS | METH_KEYWORDS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -#undef _ARET diff --git a/numpy-1.6.2/numpy/core/src/multiarray/methods.h b/numpy-1.6.2/numpy/core/src/multiarray/methods.h deleted file mode 100644 index 642265ccdc..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/methods.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _NPY_ARRAY_METHODS_H_ -#define _NPY_ARRAY_METHODS_H_ - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyMethodDef array_methods[]; -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy-1.6.2/numpy/core/src/multiarray/multiarray_tests.c.src deleted file mode 100644 index d6340025c2..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/multiarray_tests.c.src +++ /dev/null @@ -1,419 +0,0 @@ -#include -#include "numpy/ndarrayobject.h" - -#include "numpy/npy_3kcompat.h" - -/* - * TODO: - * - Handle mode - */ - -/**begin repeat - * #type = double, int# - * #typenum = NPY_DOUBLE, NPY_INT# - */ -static int copy_@type@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx, - npy_intp *bounds, - PyObject **out) -{ - npy_intp i, j; - @type@ *ptr; - npy_intp odims[NPY_MAXDIMS]; - PyArrayObject *aout; - - /* - * For each point in itx, copy the current neighborhood into an array which - * is appended at the output list - */ - for (i = 0; i < itx->size; ++i) { - PyArrayNeighborhoodIter_Reset(niterx); - - for (j = 0; j < itx->ao->nd; ++j) { - odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1; - } - aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, @typenum@); - if (aout == NULL) { - return -1; - } - - ptr = (@type@*)aout->data; - - for (j = 0; j < niterx->size; ++j) { - *ptr = *((@type@*)niterx->dataptr); - PyArrayNeighborhoodIter_Next(niterx); - ptr += 1; - } - - PyList_Append(*out, (PyObject*)aout); - Py_DECREF(aout); - PyArray_ITER_NEXT(itx); - } - - return 0; -} -/**end repeat**/ - -static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx, - npy_intp *bounds, - PyObject **out) -{ - npy_intp i, j; - npy_intp odims[NPY_MAXDIMS]; - PyArrayObject *aout; - PyArray_CopySwapFunc *copyswap = itx->ao->descr->f->copyswap; - npy_int itemsize = PyArray_ITEMSIZE(itx->ao); - - /* - * For each point in itx, copy the current neighborhood into an array which - * is appended at the output list - */ - for (i = 0; i < itx->size; ++i) { - PyArrayNeighborhoodIter_Reset(niterx); - - for (j = 0; j < itx->ao->nd; ++j) { - odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1; - } - aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, NPY_OBJECT); - if (aout == NULL) { - return -1; - } - - for (j = 0; j < niterx->size; ++j) { - copyswap(aout->data + j * itemsize, niterx->dataptr, 0, NULL); - PyArrayNeighborhoodIter_Next(niterx); - } - - PyList_Append(*out, (PyObject*)aout); - Py_DECREF(aout); - PyArray_ITER_NEXT(itx); - } - - return 0; -} - -static PyObject* -test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args) -{ - PyObject *x, *fill, *out, *b; - PyArrayObject *ax, *afill; - PyArrayIterObject *itx; - int i, typenum, mode, st; - npy_intp bounds[NPY_MAXDIMS*2]; - PyArrayNeighborhoodIterObject *niterx; - - if (!PyArg_ParseTuple(args, "OOOi", &x, &b, &fill, &mode)) { - return NULL; - } - - if (!PySequence_Check(b)) { - return NULL; - } - - typenum = PyArray_ObjectType(x, 0); - typenum = PyArray_ObjectType(fill, typenum); - - ax = (PyArrayObject*)PyArray_FromObject(x, typenum, 1, 10); - if (ax == NULL) { - return NULL; - } - if (PySequence_Size(b) != 2 * ax->nd) { - PyErr_SetString(PyExc_ValueError, - "bounds sequence size not compatible with x input"); - goto clean_ax; - } - - out = PyList_New(0); - if (out == NULL) { - goto clean_ax; - } - - itx = (PyArrayIterObject*)PyArray_IterNew(x); - if (itx == NULL) { - goto clean_out; - } - - /* Compute boundaries for the neighborhood iterator */ - for (i = 0; i < 2 * ax->nd; ++i) { - PyObject* bound; - bound = PySequence_GetItem(b, i); - if (bounds == NULL) { - goto clean_itx; - } - if (!PyInt_Check(bound)) { - PyErr_SetString(PyExc_ValueError, "bound not long"); - Py_DECREF(bound); - goto clean_itx; - } - bounds[i] = PyInt_AsLong(bound); - Py_DECREF(bound); - } - - /* Create the neighborhood iterator */ - afill = NULL; - if (mode == NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING) { - afill = (PyArrayObject *)PyArray_FromObject(fill, typenum, 0, 0); - if (afill == NULL) { - goto clean_itx; - } - } - - niterx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( - (PyArrayIterObject*)itx, bounds, mode, afill); - if (niterx == NULL) { - goto clean_afill; - } - - switch (typenum) { - case NPY_OBJECT: - st = copy_object(itx, niterx, bounds, &out); - break; - case NPY_INT: - st = copy_int(itx, niterx, bounds, &out); - break; - case NPY_DOUBLE: - st = copy_double(itx, niterx, bounds, &out); - break; - default: - PyErr_SetString(PyExc_ValueError, "Type not supported"); - goto clean_niterx; - } - - if (st) { - goto clean_niterx; - } - - Py_DECREF(niterx); - Py_XDECREF(afill); - Py_DECREF(itx); - - Py_DECREF(ax); - - return out; - -clean_niterx: - Py_DECREF(niterx); -clean_afill: - Py_XDECREF(afill); -clean_itx: - Py_DECREF(itx); -clean_out: - Py_DECREF(out); -clean_ax: - Py_DECREF(ax); - return NULL; -} - -static int -copy_double_double(PyArrayNeighborhoodIterObject *itx, - PyArrayNeighborhoodIterObject *niterx, - npy_intp *bounds, - PyObject **out) -{ - npy_intp i, j; - double *ptr; - npy_intp odims[NPY_MAXDIMS]; - PyArrayObject *aout; - - /* - * For each point in itx, copy the current neighborhood into an array which - * is appended at the output list - */ - PyArrayNeighborhoodIter_Reset(itx); - for (i = 0; i < itx->size; ++i) { - for (j = 0; j < itx->ao->nd; ++j) { - odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1; - } - aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, NPY_DOUBLE); - if (aout == NULL) { - return -1; - } - - ptr = (double*)aout->data; - - PyArrayNeighborhoodIter_Reset(niterx); - for (j = 0; j < niterx->size; ++j) { - *ptr = *((double*)niterx->dataptr); - ptr += 1; - PyArrayNeighborhoodIter_Next(niterx); - } - PyList_Append(*out, (PyObject*)aout); - Py_DECREF(aout); - PyArrayNeighborhoodIter_Next(itx); - } - return 0; -} - -static PyObject* -test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args) -{ - PyObject *x, *out, *b1, *b2; - PyArrayObject *ax; - PyArrayIterObject *itx; - int i, typenum, mode1, mode2, st; - npy_intp bounds[NPY_MAXDIMS*2]; - PyArrayNeighborhoodIterObject *niterx1, *niterx2; - - if (!PyArg_ParseTuple(args, "OOiOi", &x, &b1, &mode1, &b2, &mode2)) { - return NULL; - } - - if (!PySequence_Check(b1) || !PySequence_Check(b2)) { - return NULL; - } - - typenum = PyArray_ObjectType(x, 0); - - ax = (PyArrayObject*)PyArray_FromObject(x, typenum, 1, 10); - if (ax == NULL) { - return NULL; - } - if (PySequence_Size(b1) != 2 * ax->nd) { - PyErr_SetString(PyExc_ValueError, - "bounds sequence 1 size not compatible with x input"); - goto clean_ax; - } - if (PySequence_Size(b2) != 2 * ax->nd) { - PyErr_SetString(PyExc_ValueError, - "bounds sequence 2 size not compatible with x input"); - goto clean_ax; - } - - out = PyList_New(0); - if (out == NULL) { - goto clean_ax; - } - - itx = (PyArrayIterObject*)PyArray_IterNew(x); - if (itx == NULL) { - goto clean_out; - } - - /* Compute boundaries for the neighborhood iterator */ - for (i = 0; i < 2 * ax->nd; ++i) { - PyObject* bound; - bound = PySequence_GetItem(b1, i); - if (bounds == NULL) { - goto clean_itx; - } - if (!PyInt_Check(bound)) { - PyErr_SetString(PyExc_ValueError, "bound not long"); - Py_DECREF(bound); - goto clean_itx; - } - bounds[i] = PyInt_AsLong(bound); - Py_DECREF(bound); - } - - /* Create the neighborhood iterator */ - niterx1 = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( - (PyArrayIterObject*)itx, bounds, - mode1, NULL); - if (niterx1 == NULL) { - goto clean_out; - } - - for (i = 0; i < 2 * ax->nd; ++i) { - PyObject* bound; - bound = PySequence_GetItem(b2, i); - if (bounds == NULL) { - goto clean_itx; - } - if (!PyInt_Check(bound)) { - PyErr_SetString(PyExc_ValueError, "bound not long"); - Py_DECREF(bound); - goto clean_itx; - } - bounds[i] = PyInt_AsLong(bound); - Py_DECREF(bound); - } - - niterx2 = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( - (PyArrayIterObject*)niterx1, bounds, - mode2, NULL); - if (niterx1 == NULL) { - goto clean_niterx1; - } - - switch (typenum) { - case NPY_DOUBLE: - st = copy_double_double(niterx1, niterx2, bounds, &out); - break; - default: - PyErr_SetString(PyExc_ValueError, "Type not supported"); - goto clean_niterx2; - } - - if (st) { - goto clean_niterx2; - } - - Py_DECREF(niterx2); - Py_DECREF(niterx1); - Py_DECREF(itx); - Py_DECREF(ax); - return out; - -clean_niterx2: - Py_DECREF(niterx2); -clean_niterx1: - Py_DECREF(niterx1); -clean_itx: - Py_DECREF(itx); -clean_out: - Py_DECREF(out); -clean_ax: - Py_DECREF(ax); - return NULL; -} - -static PyMethodDef Multiarray_TestsMethods[] = { - {"test_neighborhood_iterator", - test_neighborhood_iterator, - METH_VARARGS, NULL}, - {"test_neighborhood_iterator_oob", - test_neighborhood_iterator_oob, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} /* Sentinel */ -}; - - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "multiarray_tests", - NULL, - -1, - Multiarray_TestsMethods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#if defined(NPY_PY3K) -#define RETVAL m -PyObject *PyInit_multiarray_tests(void) -#else -#define RETVAL -PyMODINIT_FUNC -initmultiarray_tests(void) -#endif -{ - PyObject *m; - -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("multiarray_tests", Multiarray_TestsMethods); -#endif - if (m == NULL) { - return RETVAL; - } - import_array(); - if (PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load umath_tests module."); - } - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule.c b/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule.c deleted file mode 100644 index f121f61477..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule.c +++ /dev/null @@ -1,3868 +0,0 @@ -/* - Python Multiarray Module -- A useful collection of functions for creating and - using ndarrays - - Original file - Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu - - Modified for numpy in 2005 - - Travis E. Oliphant - oliphant@ee.byu.edu - Brigham Young University -*/ - -/* $Id: multiarraymodule.c,v 1.36 2005/09/14 00:14:00 teoliphant Exp $ */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "numpy/npy_math.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; - -#define PyAO PyArrayObject - -/* Internal APIs */ -#include "arraytypes.h" -#include "arrayobject.h" -#include "hashdescr.h" -#include "descriptor.h" -#include "calculation.h" -#include "number.h" -#include "scalartypes.h" -#include "numpymemoryview.h" -#include "convert_datatype.h" -#include "nditer_pywrap.h" - -/* Only here for API compatibility */ -NPY_NO_EXPORT PyTypeObject PyBigArray_Type; - -/*NUMPY_API - * Get Priority from object - */ -NPY_NO_EXPORT double -PyArray_GetPriority(PyObject *obj, double default_) -{ - PyObject *ret; - double priority = PyArray_PRIORITY; - - if (PyArray_CheckExact(obj)) - return priority; - - ret = PyObject_GetAttrString(obj, "__array_priority__"); - if (ret != NULL) { - priority = PyFloat_AsDouble(ret); - } - if (PyErr_Occurred()) { - PyErr_Clear(); - priority = default_; - } - Py_XDECREF(ret); - return priority; -} - -/*NUMPY_API - * Multiply a List of ints - */ -NPY_NO_EXPORT int -PyArray_MultiplyIntList(int *l1, int n) -{ - int s = 1; - - while (n--) { - s *= (*l1++); - } - return s; -} - -/*NUMPY_API - * Multiply a List - */ -NPY_NO_EXPORT npy_intp -PyArray_MultiplyList(npy_intp *l1, int n) -{ - npy_intp s = 1; - - while (n--) { - s *= (*l1++); - } - return s; -} - -/*NUMPY_API - * Multiply a List of Non-negative numbers with over-flow detection. - */ -NPY_NO_EXPORT npy_intp -PyArray_OverflowMultiplyList(npy_intp *l1, int n) -{ - npy_intp prod = 1; - npy_intp imax = NPY_MAX_INTP; - int i; - - for (i = 0; i < n; i++) { - npy_intp dim = l1[i]; - - if (dim == 0) { - return 0; - } - if (dim > imax) { - return -1; - } - imax /= dim; - prod *= dim; - } - return prod; -} - -/*NUMPY_API - * Produce a pointer into array - */ -NPY_NO_EXPORT void * -PyArray_GetPtr(PyArrayObject *obj, npy_intp* ind) -{ - int n = obj->nd; - npy_intp *strides = obj->strides; - char *dptr = obj->data; - - while (n--) { - dptr += (*strides++) * (*ind++); - } - return (void *)dptr; -} - -/*NUMPY_API - * Compare Lists - */ -NPY_NO_EXPORT int -PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n) -{ - int i; - - for (i = 0; i < n; i++) { - if (l1[i] != l2[i]) { - return 0; - } - } - return 1; -} - -/* - * simulates a C-style 1-3 dimensional array which can be accesed using - * ptr[i] or ptr[i][j] or ptr[i][j][k] -- requires pointer allocation - * for 2-d and 3-d. - * - * For 2-d and up, ptr is NOT equivalent to a statically defined - * 2-d or 3-d array. In particular, it cannot be passed into a - * function that requires a true pointer to a fixed-size array. - */ - -/*NUMPY_API - * Simulate a C-array - * steals a reference to typedescr -- can be NULL - */ -NPY_NO_EXPORT int -PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, - PyArray_Descr* typedescr) -{ - PyArrayObject *ap; - npy_intp n, m, i, j; - char **ptr2; - char ***ptr3; - - if ((nd < 1) || (nd > 3)) { - PyErr_SetString(PyExc_ValueError, - "C arrays of only 1-3 dimensions available"); - Py_XDECREF(typedescr); - return -1; - } - if ((ap = (PyArrayObject*)PyArray_FromAny(*op, typedescr, nd, nd, - CARRAY, NULL)) == NULL) { - return -1; - } - switch(nd) { - case 1: - *((char **)ptr) = ap->data; - break; - case 2: - n = ap->dimensions[0]; - ptr2 = (char **)_pya_malloc(n * sizeof(char *)); - if (!ptr2) { - goto fail; - } - for (i = 0; i < n; i++) { - ptr2[i] = ap->data + i*ap->strides[0]; - } - *((char ***)ptr) = ptr2; - break; - case 3: - n = ap->dimensions[0]; - m = ap->dimensions[1]; - ptr3 = (char ***)_pya_malloc(n*(m+1) * sizeof(char *)); - if (!ptr3) { - goto fail; - } - for (i = 0; i < n; i++) { - ptr3[i] = ptr3[n + (m-1)*i]; - for (j = 0; j < m; j++) { - ptr3[i][j] = ap->data + i*ap->strides[0] + j*ap->strides[1]; - } - } - *((char ****)ptr) = ptr3; - } - memcpy(dims, ap->dimensions, nd*sizeof(npy_intp)); - *op = (PyObject *)ap; - return 0; - - fail: - PyErr_SetString(PyExc_MemoryError, "no memory"); - return -1; -} - -/* Deprecated --- Use PyArray_AsCArray instead */ - -/*NUMPY_API - * Convert to a 1D C-array - */ -NPY_NO_EXPORT int -PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) -{ - npy_intp newd1; - PyArray_Descr *descr; - char msg[] = "PyArray_As1D: use PyArray_AsCArray."; - - if (DEPRECATE(msg) < 0) { - return -1; - } - descr = PyArray_DescrFromType(typecode); - if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1) { - return -1; - } - *d1 = (int) newd1; - return 0; -} - -/*NUMPY_API - * Convert to a 2D C-array - */ -NPY_NO_EXPORT int -PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode) -{ - npy_intp newdims[2]; - PyArray_Descr *descr; - char msg[] = "PyArray_As1D: use PyArray_AsCArray."; - - if (DEPRECATE(msg) < 0) { - return -1; - } - descr = PyArray_DescrFromType(typecode); - if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1) { - return -1; - } - *d1 = (int ) newdims[0]; - *d2 = (int ) newdims[1]; - return 0; -} - -/* End Deprecated */ - -/*NUMPY_API - * Free pointers created if As2D is called - */ -NPY_NO_EXPORT int -PyArray_Free(PyObject *op, void *ptr) -{ - PyArrayObject *ap = (PyArrayObject *)op; - - if ((ap->nd < 1) || (ap->nd > 3)) { - return -1; - } - if (ap->nd >= 2) { - _pya_free(ptr); - } - Py_DECREF(ap); - return 0; -} - - -static PyObject * -_swap_and_concat(PyObject *op, int axis, int n) -{ - PyObject *newtup = NULL; - PyObject *otmp, *arr; - int i; - - newtup = PyTuple_New(n); - if (newtup == NULL) { - return NULL; - } - for (i = 0; i < n; i++) { - otmp = PySequence_GetItem(op, i); - arr = PyArray_FROM_O(otmp); - Py_DECREF(otmp); - if (arr == NULL) { - goto fail; - } - otmp = PyArray_SwapAxes((PyArrayObject *)arr, axis, 0); - Py_DECREF(arr); - if (otmp == NULL) { - goto fail; - } - PyTuple_SET_ITEM(newtup, i, otmp); - } - otmp = PyArray_Concatenate(newtup, 0); - Py_DECREF(newtup); - if (otmp == NULL) { - return NULL; - } - arr = PyArray_SwapAxes((PyArrayObject *)otmp, axis, 0); - Py_DECREF(otmp); - return arr; - - fail: - Py_DECREF(newtup); - return NULL; -} - -/*NUMPY_API - * Concatenate - * - * Concatenate an arbitrary Python sequence into an array. - * op is a python object supporting the sequence interface. - * Its elements will be concatenated together to form a single - * multidimensional array. If axis is MAX_DIMS or bigger, then - * each sequence object will be flattened before concatenation -*/ -NPY_NO_EXPORT PyObject * -PyArray_Concatenate(PyObject *op, int axis) -{ - PyArrayObject *ret, **mps; - PyObject *otmp; - int i, n, tmp, nd = 0, new_dim; - char *data; - PyTypeObject *subtype; - double prior1, prior2; - npy_intp numbytes; - - n = PySequence_Length(op); - if (n == -1) { - return NULL; - } - if (n == 0) { - PyErr_SetString(PyExc_ValueError, - "concatenation of zero-length sequences is "\ - "impossible"); - return NULL; - } - - if ((axis < 0) || ((0 < axis) && (axis < MAX_DIMS))) { - return _swap_and_concat(op, axis, n); - } - mps = PyArray_ConvertToCommonType(op, &n); - if (mps == NULL) { - return NULL; - } - - /* - * Make sure these arrays are legal to concatenate. - * Must have same dimensions except d0 - */ - prior1 = PyArray_PRIORITY; - subtype = &PyArray_Type; - ret = NULL; - for (i = 0; i < n; i++) { - if (axis >= MAX_DIMS) { - otmp = PyArray_Ravel(mps[i],0); - Py_DECREF(mps[i]); - mps[i] = (PyArrayObject *)otmp; - } - if (Py_TYPE(mps[i]) != subtype) { - prior2 = PyArray_GetPriority((PyObject *)(mps[i]), 0.0); - if (prior2 > prior1) { - prior1 = prior2; - subtype = Py_TYPE(mps[i]); - } - } - } - - new_dim = 0; - for (i = 0; i < n; i++) { - if (mps[i] == NULL) { - goto fail; - } - if (i == 0) { - nd = mps[i]->nd; - } - else { - if (nd != mps[i]->nd) { - PyErr_SetString(PyExc_ValueError, - "arrays must have same "\ - "number of dimensions"); - goto fail; - } - if (!PyArray_CompareLists(mps[0]->dimensions+1, - mps[i]->dimensions+1, - nd-1)) { - PyErr_SetString(PyExc_ValueError, - "array dimensions must "\ - "agree except for d_0"); - goto fail; - } - } - if (nd == 0) { - PyErr_SetString(PyExc_ValueError, - "0-d arrays can't be concatenated"); - goto fail; - } - new_dim += mps[i]->dimensions[0]; - } - tmp = mps[0]->dimensions[0]; - mps[0]->dimensions[0] = new_dim; - Py_INCREF(mps[0]->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, - mps[0]->descr, nd, - mps[0]->dimensions, - NULL, NULL, 0, - (PyObject *)ret); - mps[0]->dimensions[0] = tmp; - - if (ret == NULL) { - goto fail; - } - data = ret->data; - for (i = 0; i < n; i++) { - numbytes = PyArray_NBYTES(mps[i]); - memcpy(data, mps[i]->data, numbytes); - data += numbytes; - } - - PyArray_INCREF(ret); - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - } - PyDataMem_FREE(mps); - return (PyObject *)ret; - - fail: - Py_XDECREF(ret); - for (i = 0; i < n; i++) { - Py_XDECREF(mps[i]); - } - PyDataMem_FREE(mps); - return NULL; -} - -static int -_signbit_set(PyArrayObject *arr) -{ - static char bitmask = (char) 0x80; - char *ptr; /* points to the byte to test */ - char byteorder; - int elsize; - - elsize = arr->descr->elsize; - byteorder = arr->descr->byteorder; - ptr = arr->data; - if (elsize > 1 && - (byteorder == PyArray_LITTLE || - (byteorder == PyArray_NATIVE && - PyArray_ISNBO(PyArray_LITTLE)))) { - ptr += elsize - 1; - } - return ((*ptr & bitmask) != 0); -} - - -/*NUMPY_API - * ScalarKind - * - * Returns the scalar kind of a type number, with an - * optional tweak based on the scalar value itself. - * If no scalar is provided, it returns INTPOS_SCALAR - * for both signed and unsigned integers, otherwise - * it checks the sign of any signed integer to choose - * INTNEG_SCALAR when appropriate. - */ -NPY_NO_EXPORT NPY_SCALARKIND -PyArray_ScalarKind(int typenum, PyArrayObject **arr) -{ - NPY_SCALARKIND ret = PyArray_NOSCALAR; - - if ((unsigned int)typenum < NPY_NTYPES) { - ret = _npy_scalar_kinds_table[typenum]; - /* Signed integer types are INTNEG in the table */ - if (ret == PyArray_INTNEG_SCALAR) { - if (!arr || !_signbit_set(*arr)) { - ret = PyArray_INTPOS_SCALAR; - } - } - } else if (PyTypeNum_ISUSERDEF(typenum)) { - PyArray_Descr* descr = PyArray_DescrFromType(typenum); - - if (descr->f->scalarkind) { - ret = descr->f->scalarkind((arr ? *arr : NULL)); - } - Py_DECREF(descr); - } - - return ret; -} - -/*NUMPY_API - * - * Determines whether the data type 'thistype', with - * scalar kind 'scalar', can be coerced into 'neededtype'. - */ -NPY_NO_EXPORT int -PyArray_CanCoerceScalar(int thistype, int neededtype, - NPY_SCALARKIND scalar) -{ - PyArray_Descr* from; - int *castlist; - - /* If 'thistype' is not a scalar, it must be safely castable */ - if (scalar == PyArray_NOSCALAR) { - return PyArray_CanCastSafely(thistype, neededtype); - } - if ((unsigned int)neededtype < NPY_NTYPES) { - NPY_SCALARKIND neededscalar; - - if (scalar == PyArray_OBJECT_SCALAR) { - return PyArray_CanCastSafely(thistype, neededtype); - } - - /* - * The lookup table gives us exactly what we need for - * this comparison, which PyArray_ScalarKind would not. - * - * The rule is that positive scalars can be coerced - * to a signed ints, but negative scalars cannot be coerced - * to unsigned ints. - * _npy_scalar_kinds_table[int]==NEGINT > POSINT, - * so 1 is returned, but - * _npy_scalar_kinds_table[uint]==POSINT < NEGINT, - * so 0 is returned, as required. - * - */ - neededscalar = _npy_scalar_kinds_table[neededtype]; - if (neededscalar >= scalar) { - return 1; - } - if (!PyTypeNum_ISUSERDEF(thistype)) { - return 0; - } - } - - from = PyArray_DescrFromType(thistype); - if (from->f->cancastscalarkindto - && (castlist = from->f->cancastscalarkindto[scalar])) { - while (*castlist != PyArray_NOTYPE) { - if (*castlist++ == neededtype) { - Py_DECREF(from); - return 1; - } - } - } - Py_DECREF(from); - - return 0; -} - -/* - * Make a new empty array, of the passed size, of a type that takes the - * priority of ap1 and ap2 into account. - */ -static PyArrayObject * -new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, - int nd, npy_intp dimensions[], int typenum) -{ - PyArrayObject *ret; - PyTypeObject *subtype; - double prior1, prior2; - /* - * Need to choose an output array that can hold a sum - * -- use priority to determine which subtype. - */ - if (Py_TYPE(ap2) != Py_TYPE(ap1)) { - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); - } - else { - prior1 = prior2 = 0.0; - subtype = Py_TYPE(ap1); - } - if (out) { - int d; - /* verify that out is usable */ - if (Py_TYPE(out) != subtype || - PyArray_NDIM(out) != nd || - PyArray_TYPE(out) != typenum || - !PyArray_ISCARRAY(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not acceptable " - "(must have the right type, nr dimensions, and be a C-Array)"); - return 0; - } - for (d = 0; d < nd; ++d) { - if (dimensions[d] != PyArray_DIM(out, d)) { - PyErr_SetString(PyExc_ValueError, - "output array has wrong dimensions"); - return 0; - } - } - Py_INCREF(out); - return out; - } - - ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); - return ret; -} - -/* Could perhaps be redone to not make contiguous arrays */ - -/*NUMPY_API - * Numeric.innerproduct(a,v) - */ -NPY_NO_EXPORT PyObject * -PyArray_InnerProduct(PyObject *op1, PyObject *op2) -{ - PyArrayObject *ap1, *ap2, *ret = NULL; - PyArrayIterObject *it1, *it2; - npy_intp i, j, l; - int typenum, nd, axis; - npy_intp is1, is2, os; - char *op; - npy_intp dimensions[MAX_DIMS]; - PyArray_DotFunc *dot; - PyArray_Descr *typec; - NPY_BEGIN_THREADS_DEF; - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - typec = PyArray_DescrFromType(typenum); - Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, ALIGNED, NULL); - if (ap1 == NULL) { - Py_DECREF(typec); - return NULL; - } - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, ALIGNED, NULL); - if (ap2 == NULL) { - goto fail; - } - if (ap1->nd == 0 || ap2->nd == 0) { - ret = (ap1->nd == 0 ? ap1 : ap2); - ret = (PyArrayObject *)Py_TYPE(ret)->tp_as_number->nb_multiply( - (PyObject *)ap1, (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - } - - l = ap1->dimensions[ap1->nd - 1]; - if (ap2->dimensions[ap2->nd - 1] != l) { - PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); - goto fail; - } - - nd = ap1->nd + ap2->nd - 2; - j = 0; - for (i = 0; i < ap1->nd - 1; i++) { - dimensions[j++] = ap1->dimensions[i]; - } - for (i = 0; i < ap2->nd - 1; i++) { - dimensions[j++] = ap2->dimensions[i]; - } - - /* - * Need to choose an output array that can hold a sum - * -- use priority to determine which subtype. - */ - ret = new_array_for_sum(ap1, ap2, NULL, nd, dimensions, typenum); - if (ret == NULL) { - goto fail; - } - dot = (ret->descr->f->dotfunc); - if (dot == NULL) { - PyErr_SetString(PyExc_ValueError, - "dot not available for this type"); - goto fail; - } - is1 = ap1->strides[ap1->nd - 1]; - is2 = ap2->strides[ap2->nd - 1]; - op = ret->data; os = ret->descr->elsize; - axis = ap1->nd - 1; - it1 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap1, &axis); - axis = ap2->nd - 1; - it2 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap2, &axis); - NPY_BEGIN_THREADS_DESCR(ap2->descr); - while (1) { - while (it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - if (it1->index >= it1->size) { - break; - } - PyArray_ITER_RESET(it2); - } - NPY_END_THREADS_DESCR(ap2->descr); - Py_DECREF(it1); - Py_DECREF(it2); - if (PyErr_Occurred()) { - goto fail; - } - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - -/*NUMPY_API - * Numeric.matrixproduct(a,v,out) - * just like inner product but does the swapaxes stuff on the fly - */ -NPY_NO_EXPORT PyObject * -PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) -{ - PyArrayObject *ap1, *ap2, *ret = NULL; - PyArrayIterObject *it1, *it2; - npy_intp i, j, l; - int typenum, nd, axis, matchDim; - npy_intp is1, is2, os; - char *op; - npy_intp dimensions[MAX_DIMS]; - PyArray_DotFunc *dot; - PyArray_Descr *typec; - NPY_BEGIN_THREADS_DEF; - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - typec = PyArray_DescrFromType(typenum); - - Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, ALIGNED, NULL); - if (ap1 == NULL) { - Py_DECREF(typec); - return NULL; - } - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, ALIGNED, NULL); - if (ap2 == NULL) { - goto fail; - } - if (ap1->nd == 0 || ap2->nd == 0) { - ret = (ap1->nd == 0 ? ap1 : ap2); - ret = (PyArrayObject *)Py_TYPE(ret)->tp_as_number->nb_multiply( - (PyObject *)ap1, (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - } - l = ap1->dimensions[ap1->nd - 1]; - if (ap2->nd > 1) { - matchDim = ap2->nd - 2; - } - else { - matchDim = 0; - } - if (ap2->dimensions[matchDim] != l) { - PyErr_SetString(PyExc_ValueError, "objects are not aligned"); - goto fail; - } - nd = ap1->nd + ap2->nd - 2; - if (nd > NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, "dot: too many dimensions in result"); - goto fail; - } - j = 0; - for (i = 0; i < ap1->nd - 1; i++) { - dimensions[j++] = ap1->dimensions[i]; - } - for (i = 0; i < ap2->nd - 2; i++) { - dimensions[j++] = ap2->dimensions[i]; - } - if(ap2->nd > 1) { - dimensions[j++] = ap2->dimensions[ap2->nd-1]; - } - /* - fprintf(stderr, "nd=%d dimensions=", nd); - for(i=0; istrides[ap1->nd-1]; is2 = ap2->strides[matchDim]; - /* Choose which subtype to return */ - ret = new_array_for_sum(ap1, ap2, out, nd, dimensions, typenum); - if (ret == NULL) { - goto fail; - } - /* Ensure that multiarray.dot(,<0xM>) -> zeros((N,M)) */ - if (PyArray_SIZE(ap1) == 0 && PyArray_SIZE(ap2) == 0) { - memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); - } - else { - /* Ensure that multiarray.dot([],[]) -> 0 */ - memset(PyArray_DATA(ret), 0, PyArray_ITEMSIZE(ret)); - } - - dot = ret->descr->f->dotfunc; - if (dot == NULL) { - PyErr_SetString(PyExc_ValueError, - "dot not available for this type"); - goto fail; - } - - op = ret->data; os = ret->descr->elsize; - axis = ap1->nd-1; - it1 = (PyArrayIterObject *) - PyArray_IterAllButAxis((PyObject *)ap1, &axis); - it2 = (PyArrayIterObject *) - PyArray_IterAllButAxis((PyObject *)ap2, &matchDim); - NPY_BEGIN_THREADS_DESCR(ap2->descr); - while (1) { - while (it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - if (it1->index >= it1->size) { - break; - } - PyArray_ITER_RESET(it2); - } - NPY_END_THREADS_DESCR(ap2->descr); - Py_DECREF(it1); - Py_DECREF(it2); - if (PyErr_Occurred()) { - /* only for OBJECT arrays */ - goto fail; - } - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - -/*NUMPY_API - *Numeric.matrixproduct(a,v) - * just like inner product but does the swapaxes stuff on the fly - */ -NPY_NO_EXPORT PyObject * -PyArray_MatrixProduct(PyObject *op1, PyObject *op2) -{ - return PyArray_MatrixProduct2(op1, op2, NULL); -} - -/*NUMPY_API - * Copy and Transpose - * - * Could deprecate this function, as there isn't a speed benefit over - * calling Transpose and then Copy. - */ -NPY_NO_EXPORT PyObject * -PyArray_CopyAndTranspose(PyObject *op) -{ - PyArrayObject *arr, *tmp, *ret; - int i; - npy_intp new_axes_values[NPY_MAXDIMS]; - PyArray_Dims new_axes; - - /* Make sure we have an array */ - arr = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, NULL); - if (arr == NULL) { - return NULL; - } - - if (PyArray_NDIM(arr) > 1) { - /* Set up the transpose operation */ - new_axes.len = PyArray_NDIM(arr); - for (i = 0; i < new_axes.len; ++i) { - new_axes_values[i] = new_axes.len - i - 1; - } - new_axes.ptr = new_axes_values; - - /* Do the transpose (always returns a view) */ - tmp = (PyArrayObject *)PyArray_Transpose(arr, &new_axes); - if (tmp == NULL) { - Py_DECREF(arr); - return NULL; - } - } - else { - tmp = arr; - arr = NULL; - } - - /* TODO: Change this to NPY_KEEPORDER for NumPy 2.0 */ - ret = (PyArrayObject *)PyArray_NewCopy(tmp, NPY_CORDER); - - Py_XDECREF(arr); - Py_DECREF(tmp); - return (PyObject *)ret; -} - -/* - * Implementation which is common between PyArray_Correlate and PyArray_Correlate2 - * - * inverted is set to 1 if computed correlate(ap2, ap1), 0 otherwise - */ -static PyArrayObject* -_pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, - int mode, int *inverted) -{ - PyArrayObject *ret; - npy_intp length; - npy_intp i, n1, n2, n, n_left, n_right; - npy_intp is1, is2, os; - char *ip1, *ip2, *op; - PyArray_DotFunc *dot; - - NPY_BEGIN_THREADS_DEF; - - n1 = ap1->dimensions[0]; - n2 = ap2->dimensions[0]; - if (n1 < n2) { - ret = ap1; - ap1 = ap2; - ap2 = ret; - ret = NULL; - i = n1; - n1 = n2; - n2 = i; - *inverted = 1; - } else { - *inverted = 0; - } - - length = n1; - n = n2; - switch(mode) { - case 0: - length = length - n + 1; - n_left = n_right = 0; - break; - case 1: - n_left = (npy_intp)(n/2); - n_right = n - n_left - 1; - break; - case 2: - n_right = n - 1; - n_left = n - 1; - length = length + n - 1; - break; - default: - PyErr_SetString(PyExc_ValueError, "mode must be 0, 1, or 2"); - return NULL; - } - - /* - * Need to choose an output array that can hold a sum - * -- use priority to determine which subtype. - */ - ret = new_array_for_sum(ap1, ap2, NULL, 1, &length, typenum); - if (ret == NULL) { - return NULL; - } - dot = ret->descr->f->dotfunc; - if (dot == NULL) { - PyErr_SetString(PyExc_ValueError, - "function not available for this data type"); - goto clean_ret; - } - - NPY_BEGIN_THREADS_DESCR(ret->descr); - is1 = ap1->strides[0]; - is2 = ap2->strides[0]; - op = ret->data; - os = ret->descr->elsize; - ip1 = ap1->data; - ip2 = ap2->data + n_left*is2; - n = n - n_left; - for (i = 0; i < n_left; i++) { - dot(ip1, is1, ip2, is2, op, n, ret); - n++; - ip2 -= is2; - op += os; - } - for (i = 0; i < (n1 - n2 + 1); i++) { - dot(ip1, is1, ip2, is2, op, n, ret); - ip1 += is1; - op += os; - } - for (i = 0; i < n_right; i++) { - n--; - dot(ip1, is1, ip2, is2, op, n, ret); - ip1 += is1; - op += os; - } - - NPY_END_THREADS_DESCR(ret->descr); - if (PyErr_Occurred()) { - goto clean_ret; - } - - return ret; - -clean_ret: - Py_DECREF(ret); - return NULL; -} - -/* - * Revert a one dimensional array in-place - * - * Return 0 on success, other value on failure - */ -static int -_pyarray_revert(PyArrayObject *ret) -{ - npy_intp length; - npy_intp i; - PyArray_CopySwapFunc *copyswap; - char *tmp = NULL, *sw1, *sw2; - npy_intp os; - char *op; - - length = ret->dimensions[0]; - copyswap = ret->descr->f->copyswap; - - tmp = PyArray_malloc(ret->descr->elsize); - if (tmp == NULL) { - return -1; - } - - os = ret->descr->elsize; - op = ret->data; - sw1 = op; - sw2 = op + (length - 1) * os; - if (PyArray_ISFLEXIBLE(ret) || PyArray_ISOBJECT(ret)) { - for(i = 0; i < length/2; ++i) { - memmove(tmp, sw1, os); - copyswap(tmp, NULL, 0, NULL); - memmove(sw1, sw2, os); - copyswap(sw1, NULL, 0, NULL); - memmove(sw2, tmp, os); - copyswap(sw2, NULL, 0, NULL); - sw1 += os; - sw2 -= os; - } - } else { - for(i = 0; i < length/2; ++i) { - memcpy(tmp, sw1, os); - memcpy(sw1, sw2, os); - memcpy(sw2, tmp, os); - sw1 += os; - sw2 -= os; - } - } - - PyArray_free(tmp); - return 0; -} - -/*NUMPY_API - * correlate(a1,a2,mode) - * - * This function computes the usual correlation (correlate(a1, a2) != - * correlate(a2, a1), and conjugate the second argument for complex inputs - */ -NPY_NO_EXPORT PyObject * -PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) -{ - PyArrayObject *ap1, *ap2, *ret = NULL; - int typenum; - PyArray_Descr *typec; - int inverted; - int st; - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - typec = PyArray_DescrFromType(typenum); - Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 1, 1, DEFAULT, NULL); - if (ap1 == NULL) { - Py_DECREF(typec); - return NULL; - } - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 1, 1, DEFAULT, NULL); - if (ap2 == NULL) { - goto clean_ap1; - } - - if (PyArray_ISCOMPLEX(ap2)) { - PyArrayObject *cap2; - cap2 = (PyArrayObject *)PyArray_Conjugate(ap2, NULL); - if (cap2 == NULL) { - goto clean_ap2; - } - Py_DECREF(ap2); - ap2 = cap2; - } - - ret = _pyarray_correlate(ap1, ap2, typenum, mode, &inverted); - if (ret == NULL) { - goto clean_ap2; - } - - /* - * If we inverted input orders, we need to reverse the output array (i.e. - * ret = ret[::-1]) - */ - if (inverted) { - st = _pyarray_revert(ret); - if(st) { - goto clean_ret; - } - } - - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - -clean_ret: - Py_DECREF(ret); -clean_ap2: - Py_DECREF(ap2); -clean_ap1: - Py_DECREF(ap1); - return NULL; -} - -/*NUMPY_API - * Numeric.correlate(a1,a2,mode) - */ -NPY_NO_EXPORT PyObject * -PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) -{ - PyArrayObject *ap1, *ap2, *ret = NULL; - int typenum; - int unused; - PyArray_Descr *typec; - - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - typec = PyArray_DescrFromType(typenum); - Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 1, 1, DEFAULT, NULL); - if (ap1 == NULL) { - Py_DECREF(typec); - return NULL; - } - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 1, 1, DEFAULT, NULL); - if (ap2 == NULL) { - goto fail; - } - - ret = _pyarray_correlate(ap1, ap2, typenum, mode, &unused); - if(ret == NULL) { - goto fail; - } - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - -fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -static PyObject * -array_putmask(PyObject *NPY_UNUSED(module), PyObject *args, PyObject *kwds) -{ - PyObject *mask, *values; - PyObject *array; - - static char *kwlist[] = {"arr", "mask", "values", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!OO:putmask", kwlist, - &PyArray_Type, &array, &mask, &values)) { - return NULL; - } - return PyArray_PutMask((PyArrayObject *)array, values, mask); -} - -/*NUMPY_API - * Convert an object to FORTRAN / C / ANY / KEEP - */ -NPY_NO_EXPORT int -PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) -{ - char *str; - /* Leave the desired default from the caller for NULL/Py_None */ - if (object == NULL || object == Py_None) { - return PY_SUCCEED; - } - else if (PyUnicode_Check(object)) { - PyObject *tmp; - int ret; - tmp = PyUnicode_AsASCIIString(object); - ret = PyArray_OrderConverter(tmp, val); - Py_DECREF(tmp); - return ret; - } - else if (!PyBytes_Check(object) || PyBytes_GET_SIZE(object) < 1) { - if (PyObject_IsTrue(object)) { - *val = NPY_FORTRANORDER; - } - else { - *val = NPY_CORDER; - } - if (PyErr_Occurred()) { - return PY_FAIL; - } - return PY_SUCCEED; - } - else { - str = PyBytes_AS_STRING(object); - if (str[0] == 'C' || str[0] == 'c') { - *val = NPY_CORDER; - } - else if (str[0] == 'F' || str[0] == 'f') { - *val = NPY_FORTRANORDER; - } - else if (str[0] == 'A' || str[0] == 'a') { - *val = NPY_ANYORDER; - } - else if (str[0] == 'K' || str[0] == 'k') { - *val = NPY_KEEPORDER; - } - else { - PyErr_SetString(PyExc_TypeError, - "order not understood"); - return PY_FAIL; - } - } - return PY_SUCCEED; -} - -/*NUMPY_API - * Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP - */ -NPY_NO_EXPORT int -PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) -{ - if (object == NULL || object == Py_None) { - *val = NPY_RAISE; - } - else if (PyBytes_Check(object)) { - char *str; - str = PyBytes_AS_STRING(object); - if (str[0] == 'C' || str[0] == 'c') { - *val = NPY_CLIP; - } - else if (str[0] == 'W' || str[0] == 'w') { - *val = NPY_WRAP; - } - else if (str[0] == 'R' || str[0] == 'r') { - *val = NPY_RAISE; - } - else { - PyErr_SetString(PyExc_TypeError, - "clipmode not understood"); - return PY_FAIL; - } - } - else if (PyUnicode_Check(object)) { - PyObject *tmp; - int ret; - tmp = PyUnicode_AsASCIIString(object); - ret = PyArray_ClipmodeConverter(tmp, val); - Py_DECREF(tmp); - return ret; - } - else { - int number = PyInt_AsLong(object); - if (number == -1 && PyErr_Occurred()) { - goto fail; - } - if (number <= (int) NPY_RAISE - && number >= (int) NPY_CLIP) { - *val = (NPY_CLIPMODE) number; - } - else { - goto fail; - } - } - return PY_SUCCEED; - - fail: - PyErr_SetString(PyExc_TypeError, - "clipmode not understood"); - return PY_FAIL; -} - -/*NUMPY_API - * Convert an object to an array of n NPY_CLIPMODE values. - * This is intended to be used in functions where a different mode - * could be applied to each axis, like in ravel_multi_index. - */ -NPY_NO_EXPORT int -PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE *modes, int n) -{ - int i; - /* Get the clip mode(s) */ - if (object && (PyTuple_Check(object) || PyList_Check(object))) { - if (PySequence_Size(object) != n) { - PyErr_Format(PyExc_ValueError, - "list of clipmodes has wrong length (%d instead of %d)", - (int)PySequence_Size(object), n); - return PY_FAIL; - } - - for (i = 0; i < n; ++i) { - PyObject *item = PySequence_GetItem(object, i); - if(item == NULL) { - return PY_FAIL; - } - - if(PyArray_ClipmodeConverter(item, &modes[i]) != PY_SUCCEED) { - Py_DECREF(item); - return PY_FAIL; - } - - Py_DECREF(item); - } - } - else if (PyArray_ClipmodeConverter(object, &modes[0]) == PY_SUCCEED) { - for (i = 1; i < n; ++i) { - modes[i] = modes[0]; - } - } - else { - return PY_FAIL; - } - return PY_SUCCEED; -} - -/* - * Compare the field dictionaries for two types. - * - * Return 1 if the contents are the same, 0 if not. - */ -static int -_equivalent_fields(PyObject *field1, PyObject *field2) { - - int same, val; - - if (field1 == field2) { - return 1; - } - if (field1 == NULL || field2 == NULL) { - return 0; - } -#if defined(NPY_PY3K) - val = PyObject_RichCompareBool(field1, field2, Py_EQ); - if (val != 1 || PyErr_Occurred()) { -#else - val = PyObject_Compare(field1, field2); - if (val != 0 || PyErr_Occurred()) { -#endif - same = 0; - } - else { - same = 1; - } - PyErr_Clear(); - return same; -} - -/* - * compare the metadata for two date-times - * return 1 if they are the same - * or 0 if not - */ -static int -_equivalent_units(PyObject *meta1, PyObject *meta2) -{ - PyObject *cobj1, *cobj2; - PyArray_DatetimeMetaData *data1, *data2; - - /* Same meta object */ - if (meta1 == meta2) { - return 1; - } - - cobj1 = PyDict_GetItemString(meta1, NPY_METADATA_DTSTR); - cobj2 = PyDict_GetItemString(meta2, NPY_METADATA_DTSTR); - if (cobj1 == cobj2) { - return 1; - } - -/* FIXME - * There is no err handling here. - */ - data1 = NpyCapsule_AsVoidPtr(cobj1); - data2 = NpyCapsule_AsVoidPtr(cobj2); - return ((data1->base == data2->base) - && (data1->num == data2->num) - && (data1->den == data2->den) - && (data1->events == data2->events)); -} - -/* - * Compare the subarray data for two types. - * Return 1 if they are the same, 0 if not. - */ -static int -_equivalent_subarrays(PyArray_ArrayDescr *sub1, PyArray_ArrayDescr *sub2) -{ - int val; - - if (sub1 == sub2) { - return 1; - - } - if (sub1 == NULL || sub2 == NULL) { - return 0; - } - -#if defined(NPY_PY3K) - val = PyObject_RichCompareBool(sub1->shape, sub2->shape, Py_EQ); - if (val != 1 || PyErr_Occurred()) { -#else - val = PyObject_Compare(sub1->shape, sub2->shape); - if (val != 0 || PyErr_Occurred()) { -#endif - PyErr_Clear(); - return 0; - } - - return PyArray_EquivTypes(sub1->base, sub2->base); -} - - -/*NUMPY_API - * - * This function returns true if the two typecodes are - * equivalent (same basic kind and same itemsize). - */ -NPY_NO_EXPORT unsigned char -PyArray_EquivTypes(PyArray_Descr *typ1, PyArray_Descr *typ2) -{ - int typenum1, typenum2, size1, size2; - - if (typ1 == typ2) { - return TRUE; - } - - typenum1 = typ1->type_num; - typenum2 = typ2->type_num; - size1 = typ1->elsize; - size2 = typ2->elsize; - - if (size1 != size2) { - return FALSE; - } - if (PyArray_ISNBO(typ1->byteorder) != PyArray_ISNBO(typ2->byteorder)) { - return FALSE; - } - if (typ1->subarray || typ2->subarray) { - return ((typenum1 == typenum2) - && _equivalent_subarrays(typ1->subarray, typ2->subarray)); - } - if (typenum1 == PyArray_VOID - || typenum2 == PyArray_VOID) { - return ((typenum1 == typenum2) - && _equivalent_fields(typ1->fields, typ2->fields)); - } - if (typenum1 == PyArray_DATETIME - || typenum1 == PyArray_DATETIME - || typenum2 == PyArray_TIMEDELTA - || typenum2 == PyArray_TIMEDELTA) { - return ((typenum1 == typenum2) - && _equivalent_units(typ1->metadata, typ2->metadata)); - } - return typ1->kind == typ2->kind; -} - -/*NUMPY_API*/ -NPY_NO_EXPORT unsigned char -PyArray_EquivTypenums(int typenum1, int typenum2) -{ - PyArray_Descr *d1, *d2; - Bool ret; - - d1 = PyArray_DescrFromType(typenum1); - d2 = PyArray_DescrFromType(typenum2); - ret = PyArray_EquivTypes(d1, d2); - Py_DECREF(d1); - Py_DECREF(d2); - return ret; -} - -/*** END C-API FUNCTIONS **/ - -static PyObject * -_prepend_ones(PyArrayObject *arr, int nd, int ndmin) -{ - npy_intp newdims[MAX_DIMS]; - npy_intp newstrides[MAX_DIMS]; - int i, k, num; - PyObject *ret; - - num = ndmin - nd; - for (i = 0; i < num; i++) { - newdims[i] = 1; - newstrides[i] = arr->descr->elsize; - } - for (i = num; i < ndmin; i++) { - k = i - num; - newdims[i] = arr->dimensions[k]; - newstrides[i] = arr->strides[k]; - } - Py_INCREF(arr->descr); - ret = PyArray_NewFromDescr(Py_TYPE(arr), arr->descr, ndmin, - newdims, newstrides, arr->data, arr->flags, (PyObject *)arr); - /* steals a reference to arr --- so don't increment here */ - PyArray_BASE(ret) = (PyObject *)arr; - return ret; -} - - -#define _ARET(x) PyArray_Return((PyArrayObject *)(x)) - -#define STRIDING_OK(op, order) ((order) == NPY_ANYORDER || \ - ((order) == NPY_CORDER && \ - PyArray_ISCONTIGUOUS(op)) || \ - ((order) == NPY_FORTRANORDER && \ - PyArray_ISFORTRAN(op))) - -static PyObject * -_array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) -{ - PyObject *op, *ret = NULL; - static char *kwd[]= {"object", "dtype", "copy", "order", "subok", - "ndmin", NULL}; - Bool subok = FALSE; - Bool copy = TRUE; - int ndmin = 0, nd; - PyArray_Descr *type = NULL; - PyArray_Descr *oldtype = NULL; - NPY_ORDER order = NPY_ANYORDER; - int flags = 0; - - if (PyTuple_GET_SIZE(args) > 2) { - PyErr_SetString(PyExc_ValueError, - "only 2 non-keyword arguments accepted"); - return NULL; - } - if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i", kwd, &op, - PyArray_DescrConverter2, &type, - PyArray_BoolConverter, ©, - PyArray_OrderConverter, &order, - PyArray_BoolConverter, &subok, - &ndmin)) { - goto clean_type; - } - - if (ndmin > NPY_MAXDIMS) { - PyErr_Format(PyExc_ValueError, - "ndmin bigger than allowable number of dimensions "\ - "NPY_MAXDIMS (=%d)", NPY_MAXDIMS); - goto clean_type; - } - /* fast exit if simple call */ - if ((subok && PyArray_Check(op)) - || (!subok && PyArray_CheckExact(op))) { - if (type == NULL) { - if (!copy && STRIDING_OK(op, order)) { - Py_INCREF(op); - ret = op; - goto finish; - } - else { - ret = PyArray_NewCopy((PyArrayObject*)op, order); - goto finish; - } - } - /* One more chance */ - oldtype = PyArray_DESCR(op); - if (PyArray_EquivTypes(oldtype, type)) { - if (!copy && STRIDING_OK(op, order)) { - Py_INCREF(op); - ret = op; - goto finish; - } - else { - ret = PyArray_NewCopy((PyArrayObject*)op, order); - if (oldtype == type) { - goto finish; - } - Py_INCREF(oldtype); - Py_DECREF(PyArray_DESCR(ret)); - PyArray_DESCR(ret) = oldtype; - goto finish; - } - } - } - - if (copy) { - flags = ENSURECOPY; - } - if (order == NPY_CORDER) { - flags |= CONTIGUOUS; - } - else if ((order == NPY_FORTRANORDER) - /* order == NPY_ANYORDER && */ - || (PyArray_Check(op) && PyArray_ISFORTRAN(op))) { - flags |= FORTRAN; - } - if (!subok) { - flags |= ENSUREARRAY; - } - - flags |= NPY_FORCECAST; - Py_XINCREF(type); - ret = PyArray_CheckFromAny(op, type, 0, 0, flags, NULL); - - finish: - Py_XDECREF(type); - if (!ret) { - return ret; - } - else if ((nd=PyArray_NDIM(ret)) >= ndmin) { - return ret; - } - /* - * create a new array from the same data with ones in the shape - * steals a reference to ret - */ - return _prepend_ones((PyArrayObject *)ret, nd, ndmin); - -clean_type: - Py_XDECREF(type); - return NULL; -} - -static PyObject * -array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) -{ - - static char *kwlist[] = {"shape","dtype","order",NULL}; - PyArray_Descr *typecode = NULL; - PyArray_Dims shape = {NULL, 0}; - NPY_ORDER order = NPY_CORDER; - Bool fortran; - PyObject *ret = NULL; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&", kwlist, - PyArray_IntpConverter, &shape, - PyArray_DescrConverter, &typecode, - PyArray_OrderConverter, &order)) { - goto fail; - } - - switch (order) { - case NPY_CORDER: - fortran = FALSE; - break; - case NPY_FORTRANORDER: - fortran = TRUE; - break; - default: - PyErr_SetString(PyExc_ValueError, - "only 'C' or 'F' order is permitted"); - goto fail; - } - - ret = PyArray_Empty(shape.len, shape.ptr, typecode, fortran); - PyDimMem_FREE(shape.ptr); - return ret; - - fail: - Py_XDECREF(typecode); - PyDimMem_FREE(shape.ptr); - return NULL; -} - -static PyObject * -array_empty_like(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) -{ - - static char *kwlist[] = {"prototype","dtype","order","subok",NULL}; - PyArrayObject *prototype = NULL; - PyArray_Descr *dtype = NULL; - NPY_ORDER order = NPY_KEEPORDER; - PyObject *ret = NULL; - int subok = 1; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&i", kwlist, - PyArray_Converter, &prototype, - PyArray_DescrConverter2, &dtype, - PyArray_OrderConverter, &order, - &subok)) { - goto fail; - } - /* steals the reference to dtype if it's not NULL */ - ret = PyArray_NewLikeArray(prototype, order, dtype, subok); - Py_DECREF(prototype); - return ret; - - fail: - Py_XDECREF(prototype); - Py_XDECREF(dtype); - return NULL; -} - -/* - * This function is needed for supporting Pickles of - * numpy scalar objects. - */ -static PyObject * -array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) -{ - - static char *kwlist[] = {"dtype","obj", NULL}; - PyArray_Descr *typecode; - PyObject *obj = NULL; - int alloc = 0; - void *dptr; - PyObject *ret; - - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|O", kwlist, - &PyArrayDescr_Type, &typecode, &obj)) { - return NULL; - } - if (typecode->elsize == 0) { - PyErr_SetString(PyExc_ValueError, - "itemsize cannot be zero"); - return NULL; - } - - if (PyDataType_FLAGCHK(typecode, NPY_ITEM_IS_POINTER)) { - if (obj == NULL) { - obj = Py_None; - } - dptr = &obj; - } - else { - if (obj == NULL) { - dptr = _pya_malloc(typecode->elsize); - if (dptr == NULL) { - return PyErr_NoMemory(); - } - memset(dptr, '\0', typecode->elsize); - alloc = 1; - } - else { - if (!PyString_Check(obj)) { - PyErr_SetString(PyExc_TypeError, - "initializing object must be a string"); - return NULL; - } - if (PyString_GET_SIZE(obj) < typecode->elsize) { - PyErr_SetString(PyExc_ValueError, - "initialization string is too small"); - return NULL; - } - dptr = PyString_AS_STRING(obj); - } - } - ret = PyArray_Scalar(dptr, typecode, NULL); - - /* free dptr which contains zeros */ - if (alloc) { - _pya_free(dptr); - } - return ret; -} - -static PyObject * -array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"shape","dtype","order",NULL}; /* XXX ? */ - PyArray_Descr *typecode = NULL; - PyArray_Dims shape = {NULL, 0}; - NPY_ORDER order = NPY_CORDER; - Bool fortran = FALSE; - PyObject *ret = NULL; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&", kwlist, - PyArray_IntpConverter, &shape, - PyArray_DescrConverter, &typecode, - PyArray_OrderConverter, &order)) { - goto fail; - } - - switch (order) { - case NPY_CORDER: - fortran = FALSE; - break; - case NPY_FORTRANORDER: - fortran = TRUE; - break; - default: - PyErr_SetString(PyExc_ValueError, - "only 'C' or 'F' order is permitted"); - goto fail; - } - - ret = PyArray_Zeros(shape.len, shape.ptr, typecode, (int) fortran); - PyDimMem_FREE(shape.ptr); - return ret; - - fail: - Py_XDECREF(typecode); - PyDimMem_FREE(shape.ptr); - return ret; -} - -static PyObject * -array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *array_in; - PyArrayObject *array; - npy_intp count; - - if (!PyArg_ParseTuple(args, "O", &array_in)) { - return NULL; - } - - array = (PyArrayObject *)PyArray_FromAny(array_in, NULL, 0, 0, 0, NULL); - if (array == NULL) { - return NULL; - } - - count = PyArray_CountNonzero(array); - - Py_DECREF(array); - -#if defined(NPY_PY3K) - return (count == -1) ? NULL : PyLong_FromSsize_t(count); -#elif PY_VERSION_HEX >= 0x02050000 - return (count == -1) ? NULL : PyInt_FromSsize_t(count); -#else - if ((npy_intp)((long)count) == count) { - return (count == -1) ? NULL : PyInt_FromLong(count); - } - else { - return (count == -1) ? NULL : PyLong_FromVoidPtr((void*)count); - } -#endif -} - -static PyObject * -array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) -{ - char *data; - Py_ssize_t nin = -1; - char *sep = NULL; - Py_ssize_t s; - static char *kwlist[] = {"string", "dtype", "count", "sep", NULL}; - PyArray_Descr *descr = NULL; - - if (!PyArg_ParseTupleAndKeywords(args, keywds, - "s#|O&" NPY_SSIZE_T_PYFMT "s", kwlist, - &data, &s, PyArray_DescrConverter, &descr, &nin, &sep)) { - Py_XDECREF(descr); - return NULL; - } - return PyArray_FromString(data, (npy_intp)s, descr, (npy_intp)nin, sep); -} - - - -static PyObject * -array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) -{ - PyObject *file = NULL, *ret; - int ok; - FILE *fp; - char *sep = ""; - Py_ssize_t nin = -1; - static char *kwlist[] = {"file", "dtype", "count", "sep", NULL}; - PyArray_Descr *type = NULL; - - if (!PyArg_ParseTupleAndKeywords(args, keywds, - "O|O&" NPY_SSIZE_T_PYFMT "s", kwlist, - &file, PyArray_DescrConverter, &type, &nin, &sep)) { - Py_XDECREF(type); - return NULL; - } - if (PyString_Check(file) || PyUnicode_Check(file)) { - file = npy_PyFile_OpenFile(file, "rb"); - if (file == NULL) { - return NULL; - } - } - else { - Py_INCREF(file); - } - fp = npy_PyFile_Dup(file, "rb"); - if (fp == NULL) { - PyErr_SetString(PyExc_IOError, - "first argument must be an open file"); - Py_DECREF(file); - return NULL; - } - if (type == NULL) { - type = PyArray_DescrFromType(PyArray_DEFAULT); - } - ret = PyArray_FromFile(fp, type, (npy_intp) nin, sep); - ok = npy_PyFile_DupClose(file, fp); - Py_DECREF(file); - if (ok < 0) { - Py_DECREF(ret); - return NULL; - } - return ret; -} - -static PyObject * -array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) -{ - PyObject *iter; - Py_ssize_t nin = -1; - static char *kwlist[] = {"iter", "dtype", "count", NULL}; - PyArray_Descr *descr = NULL; - - if (!PyArg_ParseTupleAndKeywords(args, keywds, - "OO&|" NPY_SSIZE_T_PYFMT, kwlist, - &iter, PyArray_DescrConverter, &descr, &nin)) { - Py_XDECREF(descr); - return NULL; - } - return PyArray_FromIter(iter, descr, (npy_intp)nin); -} - -static PyObject * -array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) -{ - PyObject *obj = NULL; - Py_ssize_t nin = -1, offset = 0; - static char *kwlist[] = {"buffer", "dtype", "count", "offset", NULL}; - PyArray_Descr *type = NULL; - - if (!PyArg_ParseTupleAndKeywords(args, keywds, - "O|O&" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT, kwlist, - &obj, PyArray_DescrConverter, &type, &nin, &offset)) { - Py_XDECREF(type); - return NULL; - } - if (type == NULL) { - type = PyArray_DescrFromType(PyArray_DEFAULT); - } - return PyArray_FromBuffer(obj, type, (npy_intp)nin, (npy_intp)offset); -} - -static PyObject * -array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - PyObject *a0; - int axis = 0; - static char *kwlist[] = {"seq", "axis", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist, - &a0, PyArray_AxisConverter, &axis)) { - return NULL; - } - return PyArray_Concatenate(a0, axis); -} - -static PyObject * -array_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *b0, *a0; - - if (!PyArg_ParseTuple(args, "OO", &a0, &b0)) { - return NULL; - } - return _ARET(PyArray_InnerProduct(a0, b0)); -} - -static PyObject * -array_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwds) -{ - PyObject *v, *a, *o = NULL; - char* kwlist[] = {"a", "b", "out", NULL }; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O", kwlist, &a, &v, &o)) { - return NULL; - } - if (o == Py_None) { - o = NULL; - } - if (o != NULL && !PyArray_Check(o)) { - PyErr_SetString(PyExc_TypeError, - "'out' must be an array"); - return NULL; - } - return _ARET(PyArray_MatrixProduct2(a, v, (PyArrayObject *)o)); -} - -static int -einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, - PyArrayObject **op) -{ - int i, nop; - PyObject *subscripts_str; - - nop = PyTuple_GET_SIZE(args) - 1; - if (nop <= 0) { - PyErr_SetString(PyExc_ValueError, - "must specify the einstein sum subscripts string " - "and at least one operand"); - return -1; - } - else if (nop >= NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, "too many operands"); - return -1; - } - - /* Get the subscripts string */ - subscripts_str = PyTuple_GET_ITEM(args, 0); - if (PyUnicode_Check(subscripts_str)) { - *str_obj = PyUnicode_AsASCIIString(subscripts_str); - if (*str_obj == NULL) { - return -1; - } - subscripts_str = *str_obj; - } - - *subscripts = PyBytes_AsString(subscripts_str); - if (subscripts == NULL) { - Py_XDECREF(*str_obj); - *str_obj = NULL; - return -1; - } - - /* Set the operands to NULL */ - for (i = 0; i < nop; ++i) { - op[i] = NULL; - } - - /* Get the operands */ - for (i = 0; i < nop; ++i) { - PyObject *obj = PyTuple_GET_ITEM(args, i+1); - - op[i] = (PyArrayObject *)PyArray_FromAny(obj, - NULL, 0, 0, NPY_ENSUREARRAY, NULL); - if (op[i] == NULL) { - goto fail; - } - } - - return nop; - -fail: - for (i = 0; i < nop; ++i) { - Py_XDECREF(op[i]); - op[i] = NULL; - } - - return -1; -} - -/* - * Converts a list of subscripts to a string. - * - * Returns -1 on error, the number of characters placed in subscripts - * otherwise. - */ -static int -einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) -{ - int ellipsis = 0, subindex = 0; - npy_intp i, size; - PyObject *item; - - obj = PySequence_Fast(obj, "the subscripts for each operand must " - "be a list or a tuple"); - if (obj == NULL) { - return -1; - } - size = PySequence_Size(obj); - - - for (i = 0; i < size; ++i) { - item = PySequence_Fast_GET_ITEM(obj, i); - /* Ellipsis */ - if (item == Py_Ellipsis) { - if (ellipsis) { - PyErr_SetString(PyExc_ValueError, - "each subscripts list may have only one ellipsis"); - Py_DECREF(obj); - return -1; - } - if (subindex + 3 >= subsize) { - PyErr_SetString(PyExc_ValueError, - "subscripts list is too long"); - Py_DECREF(obj); - return -1; - } - subscripts[subindex++] = '.'; - subscripts[subindex++] = '.'; - subscripts[subindex++] = '.'; - ellipsis = 1; - } - /* Subscript */ - else if (PyInt_Check(item) || PyLong_Check(item)) { - long s = PyInt_AsLong(item); - if ( s < 0 || s > 2*26) { - PyErr_SetString(PyExc_ValueError, - "subscript is not within the valid range [0, 52]"); - Py_DECREF(obj); - return -1; - } - if (s < 26) { - subscripts[subindex++] = 'A' + s; - } - else { - subscripts[subindex++] = 'a' + s; - } - if (subindex >= subsize) { - PyErr_SetString(PyExc_ValueError, - "subscripts list is too long"); - Py_DECREF(obj); - return -1; - } - } - /* Invalid */ - else { - PyErr_SetString(PyExc_ValueError, - "each subscript must be either an integer " - "or an ellipsis"); - Py_DECREF(obj); - return -1; - } - } - - Py_DECREF(obj); - - return subindex; -} - -/* - * Fills in the subscripts, with maximum size subsize, and op, - * with the values in the tuple 'args'. - * - * Returns -1 on error, number of operands placed in op otherwise. - */ -static int -einsum_sub_op_from_lists(PyObject *args, - char *subscripts, int subsize, PyArrayObject **op) -{ - int subindex = 0; - npy_intp i, nop; - - nop = PyTuple_Size(args)/2; - - if (nop == 0) { - PyErr_SetString(PyExc_ValueError, "must provide at least an " - "operand and a subscripts list to einsum"); - return -1; - } - else if(nop >= NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, "too many operands"); - return -1; - } - - /* Set the operands to NULL */ - for (i = 0; i < nop; ++i) { - op[nop] = NULL; - } - - /* Get the operands and build the subscript string */ - for (i = 0; i < nop; ++i) { - PyObject *obj = PyTuple_GET_ITEM(args, 2*i); - int n; - - /* Comma between the subscripts for each operand */ - if (i != 0) { - subscripts[subindex++] = ','; - if (subindex >= subsize) { - PyErr_SetString(PyExc_ValueError, - "subscripts list is too long"); - goto fail; - } - } - - op[i] = (PyArrayObject *)PyArray_FromAny(obj, - NULL, 0, 0, NPY_ENSUREARRAY, NULL); - if (op[i] == NULL) { - goto fail; - } - - obj = PyTuple_GET_ITEM(args, 2*i+1); - n = einsum_list_to_subscripts(obj, subscripts+subindex, - subsize-subindex); - if (n < 0) { - goto fail; - } - subindex += n; - } - - /* Add the '->' to the string if provided */ - if (PyTuple_Size(args) == 2*nop+1) { - PyObject *obj; - int n; - - if (subindex + 2 >= subsize) { - PyErr_SetString(PyExc_ValueError, - "subscripts list is too long"); - goto fail; - } - subscripts[subindex++] = '-'; - subscripts[subindex++] = '>'; - - obj = PyTuple_GET_ITEM(args, 2*nop); - n = einsum_list_to_subscripts(obj, subscripts+subindex, - subsize-subindex); - if (n < 0) { - goto fail; - } - subindex += n; - } - - /* NULL-terminate the subscripts string */ - subscripts[subindex] = '\0'; - - return nop; - -fail: - for (i = 0; i < nop; ++i) { - Py_XDECREF(op[i]); - op[i] = NULL; - } - - return -1; -} - -static PyObject * -array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - char *subscripts = NULL, subscripts_buffer[256]; - PyObject *str_obj = NULL, *str_key_obj = NULL; - PyObject *arg0; - int i, nop; - PyArrayObject *op[NPY_MAXARGS]; - NPY_ORDER order = NPY_KEEPORDER; - NPY_CASTING casting = NPY_SAFE_CASTING; - PyArrayObject *out = NULL; - PyArray_Descr *dtype = NULL; - PyObject *ret = NULL; - - if (PyTuple_GET_SIZE(args) < 1) { - PyErr_SetString(PyExc_ValueError, - "must specify the einstein sum subscripts string " - "and at least one operand, or at least one operand " - "and its corresponding subscripts list"); - return NULL; - } - arg0 = PyTuple_GET_ITEM(args, 0); - - /* einsum('i,j', a, b), einsum('i,j->ij', a, b) */ - if (PyString_Check(arg0) || PyUnicode_Check(arg0)) { - nop = einsum_sub_op_from_str(args, &str_obj, &subscripts, op); - } - /* einsum(a, [0], b, [1]), einsum(a, [0], b, [1], [0,1]) */ - else { - nop = einsum_sub_op_from_lists(args, subscripts_buffer, - sizeof(subscripts_buffer), op); - subscripts = subscripts_buffer; - } - if (nop <= 0) { - goto finish; - } - - /* Get the keyword arguments */ - if (kwds != NULL) { - PyObject *key, *value; - Py_ssize_t pos = 0; - while (PyDict_Next(kwds, &pos, &key, &value)) { - char *str = NULL; - -#if defined(NPY_PY3K) - Py_XDECREF(str_key_obj); - str_key_obj = PyUnicode_AsASCIIString(key); - if (str_key_obj != NULL) { - key = str_key_obj; - } -#endif - - str = PyBytes_AsString(key); - - if (str == NULL) { - PyErr_Clear(); - PyErr_SetString(PyExc_TypeError, "invalid keyword"); - goto finish; - } - - if (strcmp(str,"out") == 0) { - if (PyArray_Check(value)) { - out = (PyArrayObject *)value; - } - else { - PyErr_SetString(PyExc_TypeError, - "keyword parameter out must be an " - "array for einsum"); - goto finish; - } - } - else if (strcmp(str,"order") == 0) { - if (!PyArray_OrderConverter(value, &order)) { - goto finish; - } - } - else if (strcmp(str,"casting") == 0) { - if (!PyArray_CastingConverter(value, &casting)) { - goto finish; - } - } - else if (strcmp(str,"dtype") == 0) { - if (!PyArray_DescrConverter2(value, &dtype)) { - goto finish; - } - } - else { - PyErr_Format(PyExc_TypeError, - "'%s' is an invalid keyword for einsum", - str); - goto finish; - } - } - } - - ret = (PyObject *)PyArray_EinsteinSum(subscripts, nop, op, dtype, - order, casting, out); - - /* If no output was supplied, possibly convert to a scalar */ - if (ret != NULL && out == NULL) { - ret = _ARET(ret); - } - -finish: - for (i = 0; i < nop; ++i) { - Py_XDECREF(op[i]); - } - Py_XDECREF(dtype); - Py_XDECREF(str_obj); - Py_XDECREF(str_key_obj); - /* out is a borrowed reference */ - - return ret; -} - -static PyObject * -array_fastCopyAndTranspose(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *a0; - - if (!PyArg_ParseTuple(args, "O", &a0)) { - return NULL; - } - return _ARET(PyArray_CopyAndTranspose(a0)); -} - -static PyObject * -array_correlate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - PyObject *shape, *a0; - int mode = 0; - static char *kwlist[] = {"a", "v", "mode", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist, - &a0, &shape, &mode)) { - return NULL; - } - return PyArray_Correlate(a0, shape, mode); -} - -static PyObject* -array_correlate2(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - PyObject *shape, *a0; - int mode = 0; - static char *kwlist[] = {"a", "v", "mode", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist, - &a0, &shape, &mode)) { - return NULL; - } - return PyArray_Correlate2(a0, shape, mode); -} - -static PyObject * -array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { - PyObject *o_start = NULL, *o_stop = NULL, *o_step = NULL, *range=NULL; - static char *kwd[]= {"start", "stop", "step", "dtype", NULL}; - PyArray_Descr *typecode = NULL; - - if(!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&", kwd, - &o_start, &o_stop, &o_step, - PyArray_DescrConverter2, &typecode)) { - Py_XDECREF(typecode); - return NULL; - } - range = PyArray_ArangeObj(o_start, o_stop, o_step, typecode); - Py_XDECREF(typecode); - return range; -} - -/*NUMPY_API - * - * Included at the very first so not auto-grabbed and thus not labeled. - */ -NPY_NO_EXPORT unsigned int -PyArray_GetNDArrayCVersion(void) -{ - return (unsigned int)NPY_ABI_VERSION; -} - -/*NUMPY_API - * Returns the built-in (at compilation time) C API version - */ -NPY_NO_EXPORT unsigned int -PyArray_GetNDArrayCFeatureVersion(void) -{ - return (unsigned int)NPY_API_VERSION; -} - -static PyObject * -array__get_ndarray_c_version(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {NULL}; - - if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) { - return NULL; - } - return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() ); -} - -/*NUMPY_API -*/ -NPY_NO_EXPORT int -PyArray_GetEndianness(void) -{ - const union { - npy_uint32 i; - char c[4]; - } bint = {0x01020304}; - - if (bint.c[0] == 1) { - return NPY_CPU_BIG; - } - else if (bint.c[0] == 4) { - return NPY_CPU_LITTLE; - } - else { - return NPY_CPU_UNKNOWN_ENDIAN; - } -} - -static PyObject * -array__reconstruct(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - - PyObject *ret; - PyTypeObject *subtype; - PyArray_Dims shape = {NULL, 0}; - PyArray_Descr *dtype = NULL; - - if (!PyArg_ParseTuple(args, "O!O&O&", - &PyType_Type, &subtype, - PyArray_IntpConverter, &shape, - PyArray_DescrConverter, &dtype)) { - goto fail; - } - if (!PyType_IsSubtype(subtype, &PyArray_Type)) { - PyErr_SetString(PyExc_TypeError, - "_reconstruct: First argument must be a sub-type of ndarray"); - goto fail; - } - ret = PyArray_NewFromDescr(subtype, dtype, - (int)shape.len, shape.ptr, NULL, NULL, 0, NULL); - if (shape.ptr) { - PyDimMem_FREE(shape.ptr); - } - return ret; - - fail: - Py_XDECREF(dtype); - if (shape.ptr) { - PyDimMem_FREE(shape.ptr); - } - return NULL; -} - -static PyObject * -array_set_string_function(PyObject *NPY_UNUSED(self), PyObject *args, - PyObject *kwds) -{ - PyObject *op = NULL; - int repr = 1; - static char *kwlist[] = {"f", "repr", NULL}; - - if(!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi", kwlist, &op, &repr)) { - return NULL; - } - /* reset the array_repr function to built-in */ - if (op == Py_None) { - op = NULL; - } - if (op != NULL && !PyCallable_Check(op)) { - PyErr_SetString(PyExc_TypeError, - "Argument must be callable."); - return NULL; - } - PyArray_SetStringFunction(op, repr); - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -array_set_ops_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), - PyObject *kwds) -{ - PyObject *oldops = NULL; - - if ((oldops = PyArray_GetNumericOps()) == NULL) { - return NULL; - } - /* - * Should probably ensure that objects are at least callable - * Leave this to the caller for now --- error will be raised - * later when use is attempted - */ - if (kwds && PyArray_SetNumericOps(kwds) == -1) { - Py_DECREF(oldops); - PyErr_SetString(PyExc_ValueError, - "one or more objects not callable"); - return NULL; - } - return oldops; -} - -static PyObject * -array_set_datetimeparse_function(PyObject *NPY_UNUSED(self), PyObject *args, - PyObject *kwds) -{ - PyObject *op = NULL; - static char *kwlist[] = {"f", NULL}; - PyObject *_numpy_internal; - - if(!PyArg_ParseTupleAndKeywords(args, kwds, "|O", kwlist, &op)) { - return NULL; - } - /* reset the array_repr function to built-in */ - if (op == Py_None) { - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - op = PyObject_GetAttrString(_numpy_internal, "datetime_from_string"); - } - else { /* Must balance reference count increment in both branches */ - if (!PyCallable_Check(op)) { - PyErr_SetString(PyExc_TypeError, - "Argument must be callable."); - return NULL; - } - Py_INCREF(op); - } - PyArray_SetDatetimeParseFunction(op); - Py_DECREF(op); - Py_INCREF(Py_None); - return Py_None; -} - - -/*NUMPY_API - * Where - */ -NPY_NO_EXPORT PyObject * -PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) -{ - PyArrayObject *arr; - PyObject *tup = NULL, *obj = NULL; - PyObject *ret = NULL, *zero = NULL; - - arr = (PyArrayObject *)PyArray_FromAny(condition, NULL, 0, 0, 0, NULL); - if (arr == NULL) { - return NULL; - } - if ((x == NULL) && (y == NULL)) { - ret = PyArray_Nonzero(arr); - Py_DECREF(arr); - return ret; - } - if ((x == NULL) || (y == NULL)) { - Py_DECREF(arr); - PyErr_SetString(PyExc_ValueError, - "either both or neither of x and y should be given"); - return NULL; - } - - - zero = PyInt_FromLong((long) 0); - obj = PyArray_EnsureAnyArray(PyArray_GenericBinaryFunction(arr, zero, - n_ops.not_equal)); - Py_DECREF(zero); - Py_DECREF(arr); - if (obj == NULL) { - return NULL; - } - tup = Py_BuildValue("(OO)", y, x); - if (tup == NULL) { - Py_DECREF(obj); - return NULL; - } - ret = PyArray_Choose((PyAO *)obj, tup, NULL, NPY_RAISE); - Py_DECREF(obj); - Py_DECREF(tup); - return ret; -} - -static PyObject * -array_where(PyObject *NPY_UNUSED(ignored), PyObject *args) -{ - PyObject *obj = NULL, *x = NULL, *y = NULL; - - if (!PyArg_ParseTuple(args, "O|OO", &obj, &x, &y)) { - return NULL; - } - return PyArray_Where(obj, x, y); -} - -static PyObject * -array_lexsort(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) -{ - int axis = -1; - PyObject *obj; - static char *kwlist[] = {"keys", "axis", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|i", kwlist, &obj, &axis)) { - return NULL; - } - return _ARET(PyArray_LexSort(obj, axis)); -} - -#undef _ARET - -static PyObject * -array_can_cast_safely(PyObject *NPY_UNUSED(self), PyObject *args, - PyObject *kwds) -{ - PyObject *from_obj = NULL; - PyArray_Descr *d1 = NULL; - PyArray_Descr *d2 = NULL; - Bool ret; - PyObject *retobj = NULL; - NPY_CASTING casting = NPY_SAFE_CASTING; - static char *kwlist[] = {"from", "to", "casting", NULL}; - - if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&", kwlist, - &from_obj, - PyArray_DescrConverter2, &d2, - PyArray_CastingConverter, &casting)) { - goto finish; - } - if (d2 == NULL) { - PyErr_SetString(PyExc_TypeError, - "did not understand one of the types; 'None' not accepted"); - goto finish; - } - - /* If the first parameter is an object or scalar, use CanCastArrayTo */ - if (PyArray_Check(from_obj)) { - ret = PyArray_CanCastArrayTo((PyArrayObject *)from_obj, d2, casting); - } - else if (PyArray_IsScalar(from_obj, Generic) || - PyArray_IsPythonNumber(from_obj)) { - PyArrayObject *arr; - arr = (PyArrayObject *)PyArray_FromAny(from_obj, - NULL, 0, 0, 0, NULL); - if (arr == NULL) { - goto finish; - } - ret = PyArray_CanCastArrayTo(arr, d2, casting); - Py_DECREF(arr); - } - /* Otherwise use CanCastTypeTo */ - else { - if (!PyArray_DescrConverter2(from_obj, &d1) || d1 == NULL) { - PyErr_SetString(PyExc_TypeError, - "did not understand one of the types; 'None' not accepted"); - goto finish; - } - ret = PyArray_CanCastTypeTo(d1, d2, casting); - } - - retobj = ret ? Py_True : Py_False; - Py_INCREF(retobj); - - finish: - Py_XDECREF(d1); - Py_XDECREF(d2); - return retobj; -} - -static PyObject * -array_promote_types(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyArray_Descr *d1 = NULL; - PyArray_Descr *d2 = NULL; - PyObject *ret = NULL; - if(!PyArg_ParseTuple(args, "O&O&", - PyArray_DescrConverter2, &d1, PyArray_DescrConverter2, &d2)) { - goto finish; - } - - if (d1 == NULL || d2 == NULL) { - PyErr_SetString(PyExc_TypeError, - "did not understand one of the types"); - goto finish; - } - - ret = (PyObject *)PyArray_PromoteTypes(d1, d2); - - finish: - Py_XDECREF(d1); - Py_XDECREF(d2); - return ret; -} - -static PyObject * -array_min_scalar_type(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *array_in = NULL; - PyArrayObject *array; - PyObject *ret = NULL; - - if(!PyArg_ParseTuple(args, "O", &array_in)) { - return NULL; - } - - array = (PyArrayObject *)PyArray_FromAny(array_in, NULL, 0, 0, 0, NULL); - if (array == NULL) { - return NULL; - } - - ret = (PyObject *)PyArray_MinScalarType(array); - Py_DECREF(array); - return ret; -} - -static PyObject * -array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - npy_intp i, len, narr = 0, ndtypes = 0; - PyArrayObject *arr[NPY_MAXARGS]; - PyArray_Descr *dtypes[NPY_MAXARGS]; - PyObject *ret = NULL; - - len = PyTuple_GET_SIZE(args); - if (len == 0) { - PyErr_SetString(PyExc_ValueError, - "at least one array or dtype is required"); - goto finish; - } - - for (i = 0; i < len; ++i) { - PyObject *obj = PyTuple_GET_ITEM(args, i); - if (PyArray_Check(obj)) { - if (narr == NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, - "too many arguments"); - goto finish; - } - Py_INCREF(obj); - arr[narr] = (PyArrayObject *)obj; - ++narr; - } - else if (PyArray_IsScalar(obj, Generic) || - PyArray_IsPythonNumber(obj)) { - if (narr == NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, - "too many arguments"); - goto finish; - } - arr[narr] = (PyArrayObject *)PyArray_FromAny(obj, - NULL, 0, 0, 0, NULL); - if (arr[narr] == NULL) { - goto finish; - } - ++narr; - } - else { - if (ndtypes == NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, - "too many arguments"); - goto finish; - } - if (!PyArray_DescrConverter2(obj, &dtypes[ndtypes])) { - goto finish; - } - ++ndtypes; - } - } - - ret = (PyObject *)PyArray_ResultType(narr, arr, ndtypes, dtypes); - -finish: - for (i = 0; i < narr; ++i) { - Py_DECREF(arr[i]); - } - for (i = 0; i < ndtypes; ++i) { - Py_DECREF(dtypes[i]); - } - return ret; -} - -#if !defined(NPY_PY3K) -static PyObject * -new_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - int size; - - if(!PyArg_ParseTuple(args, "i", &size)) { - return NULL; - } - return PyBuffer_New(size); -} - -static PyObject * -buffer_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - PyObject *obj; - Py_ssize_t offset = 0, n; - Py_ssize_t size = Py_END_OF_BUFFER; - void *unused; - static char *kwlist[] = {"object", "offset", "size", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O|" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT, kwlist, - &obj, &offset, &size)) { - return NULL; - } - if (PyObject_AsWriteBuffer(obj, &unused, &n) < 0) { - PyErr_Clear(); - return PyBuffer_FromObject(obj, offset, size); - } - else { - return PyBuffer_FromReadWriteObject(obj, offset, size); - } -} -#endif - -#ifndef _MSC_VER -#include -#include -jmp_buf _NPY_SIGSEGV_BUF; -static void -_SigSegv_Handler(int signum) -{ - longjmp(_NPY_SIGSEGV_BUF, signum); -} -#endif - -#define _test_code() { \ - test = *((char*)memptr); \ - if (!ro) { \ - *((char *)memptr) = '\0'; \ - *((char *)memptr) = test; \ - } \ - test = *((char*)memptr+size-1); \ - if (!ro) { \ - *((char *)memptr+size-1) = '\0'; \ - *((char *)memptr+size-1) = test; \ - } \ - } - -static PyObject * -as_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - PyObject *mem; - Py_ssize_t size; - Bool ro = FALSE, check = TRUE; - void *memptr; - static char *kwlist[] = {"mem", "size", "readonly", "check", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O" NPY_SSIZE_T_PYFMT "|O&O&", kwlist, - &mem, &size, PyArray_BoolConverter, &ro, - PyArray_BoolConverter, &check)) { - return NULL; - } - memptr = PyLong_AsVoidPtr(mem); - if (memptr == NULL) { - return NULL; - } - if (check) { - /* - * Try to dereference the start and end of the memory region - * Catch segfault and report error if it occurs - */ - char test; - int err = 0; - -#ifdef _MSC_VER - __try { - _test_code(); - } - __except(1) { - err = 1; - } -#else - PyOS_sighandler_t _npy_sig_save; - _npy_sig_save = PyOS_setsig(SIGSEGV, _SigSegv_Handler); - if (setjmp(_NPY_SIGSEGV_BUF) == 0) { - _test_code(); - } - else { - err = 1; - } - PyOS_setsig(SIGSEGV, _npy_sig_save); -#endif - if (err) { - PyErr_SetString(PyExc_ValueError, - "cannot use memory location as a buffer."); - return NULL; - } - } - - -#if defined(NPY_PY3K) - PyErr_SetString(PyExc_RuntimeError, - "XXX -- not implemented!"); - return NULL; -#else - if (ro) { - return PyBuffer_FromMemory(memptr, size); - } - return PyBuffer_FromReadWriteMemory(memptr, size); -#endif -} - -#undef _test_code - -static PyObject * -format_longfloat(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - PyObject *obj; - unsigned int precision; - longdouble x; - static char *kwlist[] = {"x", "precision", NULL}; - static char repr[100]; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OI", kwlist, - &obj, &precision)) { - return NULL; - } - if (!PyArray_IsScalar(obj, LongDouble)) { - PyErr_SetString(PyExc_TypeError, - "not a longfloat"); - return NULL; - } - x = ((PyLongDoubleScalarObject *)obj)->obval; - if (precision > 70) { - precision = 70; - } - format_longdouble(repr, 100, x, precision); - return PyUString_FromString(repr); -} - -static PyObject * -compare_chararrays(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - PyObject *array; - PyObject *other; - PyArrayObject *newarr, *newoth; - int cmp_op; - Bool rstrip; - char *cmp_str; - Py_ssize_t strlength; - PyObject *res = NULL; - static char msg[] = "comparision must be '==', '!=', '<', '>', '<=', '>='"; - static char *kwlist[] = {"a1", "a2", "cmp", "rstrip", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOs#O&", kwlist, - &array, &other, &cmp_str, &strlength, - PyArray_BoolConverter, &rstrip)) { - return NULL; - } - if (strlength < 1 || strlength > 2) { - goto err; - } - if (strlength > 1) { - if (cmp_str[1] != '=') { - goto err; - } - if (cmp_str[0] == '=') { - cmp_op = Py_EQ; - } - else if (cmp_str[0] == '!') { - cmp_op = Py_NE; - } - else if (cmp_str[0] == '<') { - cmp_op = Py_LE; - } - else if (cmp_str[0] == '>') { - cmp_op = Py_GE; - } - else { - goto err; - } - } - else { - if (cmp_str[0] == '<') { - cmp_op = Py_LT; - } - else if (cmp_str[0] == '>') { - cmp_op = Py_GT; - } - else { - goto err; - } - } - - newarr = (PyArrayObject *)PyArray_FROM_O(array); - if (newarr == NULL) { - return NULL; - } - newoth = (PyArrayObject *)PyArray_FROM_O(other); - if (newoth == NULL) { - Py_DECREF(newarr); - return NULL; - } - if (PyArray_ISSTRING(newarr) && PyArray_ISSTRING(newoth)) { - res = _strings_richcompare(newarr, newoth, cmp_op, rstrip != 0); - } - else { - PyErr_SetString(PyExc_TypeError, - "comparison of non-string arrays"); - } - Py_DECREF(newarr); - Py_DECREF(newoth); - return res; - - err: - PyErr_SetString(PyExc_ValueError, msg); - return NULL; -} - -static PyObject * -_vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type, - PyObject* method, PyObject* args) -{ - PyObject* broadcast_args[NPY_MAXARGS]; - PyArrayMultiIterObject* in_iter = NULL; - PyArrayObject* result = NULL; - PyArrayIterObject* out_iter = NULL; - PyObject* args_tuple = NULL; - Py_ssize_t i, n, nargs; - - nargs = PySequence_Size(args) + 1; - if (nargs == -1 || nargs > NPY_MAXARGS) { - PyErr_Format(PyExc_ValueError, - "len(args) must be < %d", NPY_MAXARGS - 1); - goto err; - } - - broadcast_args[0] = (PyObject*)char_array; - for (i = 1; i < nargs; i++) { - PyObject* item = PySequence_GetItem(args, i-1); - if (item == NULL) { - goto err; - } - broadcast_args[i] = item; - Py_DECREF(item); - } - in_iter = (PyArrayMultiIterObject*)PyArray_MultiIterFromObjects - (broadcast_args, nargs, 0); - if (in_iter == NULL) { - goto err; - } - n = in_iter->numiter; - - result = (PyArrayObject*)PyArray_SimpleNewFromDescr(in_iter->nd, - in_iter->dimensions, type); - if (result == NULL) { - goto err; - } - - out_iter = (PyArrayIterObject*)PyArray_IterNew((PyObject*)result); - if (out_iter == NULL) { - goto err; - } - - args_tuple = PyTuple_New(n); - if (args_tuple == NULL) { - goto err; - } - - while (PyArray_MultiIter_NOTDONE(in_iter)) { - PyObject* item_result; - - for (i = 0; i < n; i++) { - PyArrayIterObject* it = in_iter->iters[i]; - PyObject* arg = PyArray_ToScalar(PyArray_ITER_DATA(it), it->ao); - if (arg == NULL) { - goto err; - } - /* Steals ref to arg */ - PyTuple_SetItem(args_tuple, i, arg); - } - - item_result = PyObject_CallObject(method, args_tuple); - if (item_result == NULL) { - goto err; - } - - if (PyArray_SETITEM(result, PyArray_ITER_DATA(out_iter), item_result)) { - Py_DECREF(item_result); - PyErr_SetString( PyExc_TypeError, - "result array type does not match underlying function"); - goto err; - } - Py_DECREF(item_result); - - PyArray_MultiIter_NEXT(in_iter); - PyArray_ITER_NEXT(out_iter); - } - - Py_DECREF(in_iter); - Py_DECREF(out_iter); - Py_DECREF(args_tuple); - - return (PyObject*)result; - - err: - Py_XDECREF(in_iter); - Py_XDECREF(out_iter); - Py_XDECREF(args_tuple); - Py_XDECREF(result); - - return 0; -} - -static PyObject * -_vec_string_no_args(PyArrayObject* char_array, - PyArray_Descr* type, PyObject* method) -{ - /* - * This is a faster version of _vec_string_args to use when there - * are no additional arguments to the string method. This doesn't - * require a broadcast iterator (and broadcast iterators don't work - * with 1 argument anyway). - */ - PyArrayIterObject* in_iter = NULL; - PyArrayObject* result = NULL; - PyArrayIterObject* out_iter = NULL; - - in_iter = (PyArrayIterObject*)PyArray_IterNew((PyObject*)char_array); - if (in_iter == NULL) { - goto err; - } - - result = (PyArrayObject*)PyArray_SimpleNewFromDescr( - PyArray_NDIM(char_array), PyArray_DIMS(char_array), type); - if (result == NULL) { - goto err; - } - - out_iter = (PyArrayIterObject*)PyArray_IterNew((PyObject*)result); - if (out_iter == NULL) { - goto err; - } - - while (PyArray_ITER_NOTDONE(in_iter)) { - PyObject* item_result; - PyObject* item = PyArray_ToScalar(in_iter->dataptr, in_iter->ao); - if (item == NULL) { - goto err; - } - - item_result = PyObject_CallFunctionObjArgs(method, item, NULL); - Py_DECREF(item); - if (item_result == NULL) { - goto err; - } - - if (PyArray_SETITEM(result, PyArray_ITER_DATA(out_iter), item_result)) { - Py_DECREF(item_result); - PyErr_SetString( PyExc_TypeError, - "result array type does not match underlying function"); - goto err; - } - Py_DECREF(item_result); - - PyArray_ITER_NEXT(in_iter); - PyArray_ITER_NEXT(out_iter); - } - - Py_DECREF(in_iter); - Py_DECREF(out_iter); - - return (PyObject*)result; - - err: - Py_XDECREF(in_iter); - Py_XDECREF(out_iter); - Py_XDECREF(result); - - return 0; -} - -static PyObject * -_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) -{ - PyArrayObject* char_array = NULL; - PyArray_Descr *type = NULL; - PyObject* method_name; - PyObject* args_seq = NULL; - - PyObject* method = NULL; - PyObject* result = NULL; - - if (!PyArg_ParseTuple(args, "O&O&O|O", - PyArray_Converter, &char_array, - PyArray_DescrConverter, &type, - &method_name, &args_seq)) { - goto err; - } - - if (PyArray_TYPE(char_array) == NPY_STRING) { - method = PyObject_GetAttr((PyObject *)&PyString_Type, method_name); - } - else if (PyArray_TYPE(char_array) == NPY_UNICODE) { - method = PyObject_GetAttr((PyObject *)&PyUnicode_Type, method_name); - } - else { - PyErr_SetString(PyExc_TypeError, - "string operation on non-string array"); - goto err; - } - if (method == NULL) { - goto err; - } - - if (args_seq == NULL - || (PySequence_Check(args_seq) && PySequence_Size(args_seq) == 0)) { - result = _vec_string_no_args(char_array, type, method); - } - else if (PySequence_Check(args_seq)) { - result = _vec_string_with_args(char_array, type, method, args_seq); - } - else { - PyErr_SetString(PyExc_TypeError, - "'args' must be a sequence of arguments"); - goto err; - } - if (result == NULL) { - goto err; - } - - Py_DECREF(char_array); - Py_DECREF(method); - - return (PyObject*)result; - - err: - Py_XDECREF(char_array); - Py_XDECREF(method); - - return 0; -} - -#ifndef __NPY_PRIVATE_NO_SIGNAL - -SIGJMP_BUF _NPY_SIGINT_BUF; - -/*NUMPY_API - */ -NPY_NO_EXPORT void -_PyArray_SigintHandler(int signum) -{ - PyOS_setsig(signum, SIG_IGN); - SIGLONGJMP(_NPY_SIGINT_BUF, signum); -} - -/*NUMPY_API - */ -NPY_NO_EXPORT void* -_PyArray_GetSigintBuf(void) -{ - return (void *)&_NPY_SIGINT_BUF; -} - -#else - -NPY_NO_EXPORT void -_PyArray_SigintHandler(int signum) -{ - return; -} - -NPY_NO_EXPORT void* -_PyArray_GetSigintBuf(void) -{ - return NULL; -} - -#endif - - -static PyObject * -test_interrupt(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int kind = 0; - int a = 0; - - if (!PyArg_ParseTuple(args, "|i", &kind)) { - return NULL; - } - if (kind) { - Py_BEGIN_ALLOW_THREADS; - while (a >= 0) { - if ((a % 1000 == 0) && PyOS_InterruptOccurred()) { - break; - } - a += 1; - } - Py_END_ALLOW_THREADS; - } - else { - NPY_SIGINT_ON - while(a >= 0) { - a += 1; - } - NPY_SIGINT_OFF - } - return PyInt_FromLong(a); -} - -static struct PyMethodDef array_module_methods[] = { - {"_get_ndarray_c_version", - (PyCFunction)array__get_ndarray_c_version, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"_reconstruct", - (PyCFunction)array__reconstruct, - METH_VARARGS, NULL}, - {"set_string_function", - (PyCFunction)array_set_string_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_numeric_ops", - (PyCFunction)array_set_ops_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_datetimeparse_function", - (PyCFunction)array_set_datetimeparse_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_typeDict", - (PyCFunction)array_set_typeDict, - METH_VARARGS, NULL}, - {"array", - (PyCFunction)_array_fromobject, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"nested_iters", - (PyCFunction)NpyIter_NestedIters, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"arange", - (PyCFunction)array_arange, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"zeros", - (PyCFunction)array_zeros, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"count_nonzero", - (PyCFunction)array_count_nonzero, - METH_VARARGS, NULL}, - {"empty", - (PyCFunction)array_empty, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"empty_like", - (PyCFunction)array_empty_like, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"scalar", - (PyCFunction)array_scalar, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"where", - (PyCFunction)array_where, - METH_VARARGS, NULL}, - {"lexsort", - (PyCFunction)array_lexsort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"putmask", - (PyCFunction)array_putmask, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fromstring", - (PyCFunction)array_fromstring, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"fromiter", - (PyCFunction)array_fromiter, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"concatenate", - (PyCFunction)array_concatenate, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"inner", - (PyCFunction)array_innerproduct, - METH_VARARGS, NULL}, - {"dot", - (PyCFunction)array_matrixproduct, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"einsum", - (PyCFunction)array_einsum, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"_fastCopyAndTranspose", - (PyCFunction)array_fastCopyAndTranspose, - METH_VARARGS, NULL}, - {"correlate", - (PyCFunction)array_correlate, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"correlate2", - (PyCFunction)array_correlate2, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"frombuffer", - (PyCFunction)array_frombuffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fromfile", - (PyCFunction)array_fromfile, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"can_cast", - (PyCFunction)array_can_cast_safely, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"promote_types", - (PyCFunction)array_promote_types, - METH_VARARGS, NULL}, - {"min_scalar_type", - (PyCFunction)array_min_scalar_type, - METH_VARARGS, NULL}, - {"result_type", - (PyCFunction)array_result_type, - METH_VARARGS, NULL}, -#if !defined(NPY_PY3K) - {"newbuffer", - (PyCFunction)new_buffer, - METH_VARARGS, NULL}, - {"getbuffer", - (PyCFunction)buffer_buffer, - METH_VARARGS | METH_KEYWORDS, NULL}, -#endif - {"int_asbuffer", - (PyCFunction)as_buffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"format_longfloat", - (PyCFunction)format_longfloat, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"compare_chararrays", - (PyCFunction)compare_chararrays, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"_vec_string", - (PyCFunction)_vec_string, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"test_interrupt", - (PyCFunction)test_interrupt, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -#include "__multiarray_api.c" - -/* Establish scalar-type hierarchy - * - * For dual inheritance we need to make sure that the objects being - * inherited from have the tp->mro object initialized. This is - * not necessarily true for the basic type objects of Python (it is - * checked for single inheritance but not dual in PyType_Ready). - * - * Thus, we call PyType_Ready on the standard Python Types, here. - */ -static int -setup_scalartypes(PyObject *NPY_UNUSED(dict)) -{ - initialize_casting_tables(); - initialize_numeric_types(); - - if (PyType_Ready(&PyBool_Type) < 0) { - return -1; - } -#if !defined(NPY_PY3K) - if (PyType_Ready(&PyInt_Type) < 0) { - return -1; - } -#endif - if (PyType_Ready(&PyFloat_Type) < 0) { - return -1; - } - if (PyType_Ready(&PyComplex_Type) < 0) { - return -1; - } - if (PyType_Ready(&PyString_Type) < 0) { - return -1; - } - if (PyType_Ready(&PyUnicode_Type) < 0) { - return -1; - } - -#define SINGLE_INHERIT(child, parent) \ - Py##child##ArrType_Type.tp_base = &Py##parent##ArrType_Type; \ - if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \ - PyErr_Print(); \ - PyErr_Format(PyExc_SystemError, \ - "could not initialize Py%sArrType_Type", \ - #child); \ - return -1; \ - } - - if (PyType_Ready(&PyGenericArrType_Type) < 0) { - return -1; - } - SINGLE_INHERIT(Number, Generic); - SINGLE_INHERIT(Integer, Number); - SINGLE_INHERIT(Inexact, Number); - SINGLE_INHERIT(SignedInteger, Integer); - SINGLE_INHERIT(UnsignedInteger, Integer); - SINGLE_INHERIT(Floating, Inexact); - SINGLE_INHERIT(ComplexFloating, Inexact); - SINGLE_INHERIT(Flexible, Generic); - SINGLE_INHERIT(Character, Flexible); - -#define DUAL_INHERIT(child, parent1, parent2) \ - Py##child##ArrType_Type.tp_base = &Py##parent2##ArrType_Type; \ - Py##child##ArrType_Type.tp_bases = \ - Py_BuildValue("(OO)", &Py##parent2##ArrType_Type, \ - &Py##parent1##_Type); \ - if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \ - PyErr_Print(); \ - PyErr_Format(PyExc_SystemError, \ - "could not initialize Py%sArrType_Type", \ - #child); \ - return -1; \ - } \ - Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash; - -#if defined(NPY_PY3K) -#define DUAL_INHERIT_COMPARE(child, parent1, parent2) -#else -#define DUAL_INHERIT_COMPARE(child, parent1, parent2) \ - Py##child##ArrType_Type.tp_compare = \ - Py##parent1##_Type.tp_compare; -#endif - -#define DUAL_INHERIT2(child, parent1, parent2) \ - Py##child##ArrType_Type.tp_base = &Py##parent1##_Type; \ - Py##child##ArrType_Type.tp_bases = \ - Py_BuildValue("(OO)", &Py##parent1##_Type, \ - &Py##parent2##ArrType_Type); \ - Py##child##ArrType_Type.tp_richcompare = \ - Py##parent1##_Type.tp_richcompare; \ - DUAL_INHERIT_COMPARE(child, parent1, parent2) \ - Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash; \ - if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \ - PyErr_Print(); \ - PyErr_Format(PyExc_SystemError, \ - "could not initialize Py%sArrType_Type", \ - #child); \ - return -1; \ - } - - SINGLE_INHERIT(Bool, Generic); - SINGLE_INHERIT(Byte, SignedInteger); - SINGLE_INHERIT(Short, SignedInteger); -#if SIZEOF_INT == SIZEOF_LONG && !defined(NPY_PY3K) - DUAL_INHERIT(Int, Int, SignedInteger); -#else - SINGLE_INHERIT(Int, SignedInteger); -#endif -#if !defined(NPY_PY3K) - DUAL_INHERIT(Long, Int, SignedInteger); -#else - SINGLE_INHERIT(Long, SignedInteger); -#endif -#if SIZEOF_LONGLONG == SIZEOF_LONG && !defined(NPY_PY3K) - DUAL_INHERIT(LongLong, Int, SignedInteger); -#else - SINGLE_INHERIT(LongLong, SignedInteger); -#endif - - SINGLE_INHERIT(TimeInteger, SignedInteger); - SINGLE_INHERIT(Datetime, TimeInteger); - SINGLE_INHERIT(Timedelta, TimeInteger); - - /* - fprintf(stderr, - "tp_free = %p, PyObject_Del = %p, int_tp_free = %p, base.tp_free = %p\n", - PyIntArrType_Type.tp_free, PyObject_Del, PyInt_Type.tp_free, - PySignedIntegerArrType_Type.tp_free); - */ - SINGLE_INHERIT(UByte, UnsignedInteger); - SINGLE_INHERIT(UShort, UnsignedInteger); - SINGLE_INHERIT(UInt, UnsignedInteger); - SINGLE_INHERIT(ULong, UnsignedInteger); - SINGLE_INHERIT(ULongLong, UnsignedInteger); - - SINGLE_INHERIT(Half, Floating); - SINGLE_INHERIT(Float, Floating); - DUAL_INHERIT(Double, Float, Floating); - SINGLE_INHERIT(LongDouble, Floating); - - SINGLE_INHERIT(CFloat, ComplexFloating); - DUAL_INHERIT(CDouble, Complex, ComplexFloating); - SINGLE_INHERIT(CLongDouble, ComplexFloating); - - DUAL_INHERIT2(String, String, Character); - DUAL_INHERIT2(Unicode, Unicode, Character); - - SINGLE_INHERIT(Void, Flexible); - - SINGLE_INHERIT(Object, Generic); - - return 0; - -#undef SINGLE_INHERIT -#undef DUAL_INHERIT - - /* - * Clean up string and unicode array types so they act more like - * strings -- get their tables from the standard types. - */ -} - -/* place a flag dictionary in d */ - -static void -set_flaginfo(PyObject *d) -{ - PyObject *s; - PyObject *newd; - - newd = PyDict_New(); - -#define _addnew(val, one) \ - PyDict_SetItemString(newd, #val, s=PyInt_FromLong(val)); \ - Py_DECREF(s); \ - PyDict_SetItemString(newd, #one, s=PyInt_FromLong(val)); \ - Py_DECREF(s) - -#define _addone(val) \ - PyDict_SetItemString(newd, #val, s=PyInt_FromLong(val)); \ - Py_DECREF(s) - - _addnew(OWNDATA, O); - _addnew(FORTRAN, F); - _addnew(CONTIGUOUS, C); - _addnew(ALIGNED, A); - _addnew(UPDATEIFCOPY, U); - _addnew(WRITEABLE, W); - _addone(C_CONTIGUOUS); - _addone(F_CONTIGUOUS); - -#undef _addone -#undef _addnew - - PyDict_SetItemString(d, "_flagdict", newd); - Py_DECREF(newd); - return; -} - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "multiarray", - NULL, - -1, - array_module_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -/* Initialization function for the module */ -#if defined(NPY_PY3K) -#define RETVAL m -PyObject *PyInit_multiarray(void) { -#else -#define RETVAL -PyMODINIT_FUNC initmultiarray(void) { -#endif - PyObject *m, *d, *s; - PyObject *c_api; - - /* Create the module and add the functions */ -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("multiarray", array_module_methods); -#endif - if (!m) { - goto err; - } - -#if defined(MS_WIN64) && defined(__GNUC__) - PyErr_WarnEx(PyExc_Warning, - "Numpy built with MINGW-W64 on Windows 64 bits is experimental, " \ - "and only available for \n" \ - "testing. You are advised not to use it for production. \n\n" \ - "CRASHES ARE TO BE EXPECTED - PLEASE REPORT THEM TO NUMPY DEVELOPERS", - 1); -#endif - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - if (!d) { - goto err; - } - PyArray_Type.tp_free = _pya_free; - if (PyType_Ready(&PyArray_Type) < 0) { - return RETVAL; - } - if (setup_scalartypes(d) < 0) { - goto err; - } - PyArrayIter_Type.tp_iter = PyObject_SelfIter; - NpyIter_Type.tp_iter = PyObject_SelfIter; - PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; - PyArrayMultiIter_Type.tp_free = _pya_free; - if (PyType_Ready(&PyArrayIter_Type) < 0) { - return RETVAL; - } - if (PyType_Ready(&PyArrayMapIter_Type) < 0) { - return RETVAL; - } - if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { - return RETVAL; - } - PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew; - if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) { - return RETVAL; - } - if (PyType_Ready(&NpyIter_Type) < 0) { - return RETVAL; - } - - PyArrayDescr_Type.tp_hash = PyArray_DescrHash; - if (PyType_Ready(&PyArrayDescr_Type) < 0) { - return RETVAL; - } - if (PyType_Ready(&PyArrayFlags_Type) < 0) { - return RETVAL; - } -/* FIXME - * There is no error handling here - */ - c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL); - PyDict_SetItemString(d, "_ARRAY_API", c_api); - Py_DECREF(c_api); - if (PyErr_Occurred()) { - goto err; - } - - /* Initialize types in numpymemoryview.c */ - if (_numpymemoryview_init(&s) < 0) { - return RETVAL; - } - if (s != NULL) { - PyDict_SetItemString(d, "memorysimpleview", s); - } - - /* - * PyExc_Exception should catch all the standard errors that are - * now raised instead of the string exception "multiarray.error" - - * This is for backward compatibility with existing code. - */ - PyDict_SetItemString (d, "error", PyExc_Exception); - - s = PyUString_FromString("3.1"); - PyDict_SetItemString(d, "__version__", s); - Py_DECREF(s); - - s = PyUString_InternFromString(NPY_METADATA_DTSTR); - PyDict_SetItemString(d, "METADATA_DTSTR", s); - Py_DECREF(s); - -/* FIXME - * There is no error handling here - */ - s = NpyCapsule_FromVoidPtr((void *)_datetime_strings, NULL); - PyDict_SetItemString(d, "DATETIMEUNITS", s); - Py_DECREF(s); - -#define ADDCONST(NAME) \ - s = PyInt_FromLong(NPY_##NAME); \ - PyDict_SetItemString(d, #NAME, s); \ - Py_DECREF(s) - - - ADDCONST(ALLOW_THREADS); - ADDCONST(BUFSIZE); - ADDCONST(CLIP); - - ADDCONST(ITEM_HASOBJECT); - ADDCONST(LIST_PICKLE); - ADDCONST(ITEM_IS_POINTER); - ADDCONST(NEEDS_INIT); - ADDCONST(NEEDS_PYAPI); - ADDCONST(USE_GETITEM); - ADDCONST(USE_SETITEM); - - ADDCONST(RAISE); - ADDCONST(WRAP); - ADDCONST(MAXDIMS); -#undef ADDCONST - - Py_INCREF(&PyArray_Type); - PyDict_SetItemString(d, "ndarray", (PyObject *)&PyArray_Type); - Py_INCREF(&PyArrayIter_Type); - PyDict_SetItemString(d, "flatiter", (PyObject *)&PyArrayIter_Type); - Py_INCREF(&PyArrayMultiIter_Type); - PyDict_SetItemString(d, "nditer", (PyObject *)&NpyIter_Type); - Py_INCREF(&NpyIter_Type); - PyDict_SetItemString(d, "broadcast", - (PyObject *)&PyArrayMultiIter_Type); - Py_INCREF(&PyArrayDescr_Type); - PyDict_SetItemString(d, "dtype", (PyObject *)&PyArrayDescr_Type); - - Py_INCREF(&PyArrayFlags_Type); - PyDict_SetItemString(d, "flagsobj", (PyObject *)&PyArrayFlags_Type); - - set_flaginfo(d); - - if (set_typeinfo(d) != 0) { - goto err; - } - return RETVAL; - - err: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load multiarray module."); - } - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule.h b/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule.h deleted file mode 100644 index 5a3b14b0b5..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _NPY_MULTIARRAY_H_ -#define _NPY_MULTIARRAY_H_ - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule_onefile.c b/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule_onefile.c deleted file mode 100644 index 0acbaf515e..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/multiarraymodule_onefile.c +++ /dev/null @@ -1,49 +0,0 @@ -/* - * This file includes all the .c files needed for a complete multiarray module. - * This is used in the case where separate compilation is not enabled - * - * Note that the order of the includs matters - */ - -#include "common.c" - -#include "scalartypes.c" -#include "scalarapi.c" - -#include "datetime.c" -#include "arraytypes.c" - -#include "hashdescr.c" -#include "numpyos.c" - -#include "descriptor.c" -#include "flagsobject.c" -#include "ctors.c" -#include "iterators.c" -#include "mapping.c" -#include "number.c" -#include "getset.c" -#include "sequence.c" -#include "methods.c" -#include "convert_datatype.c" -#include "convert.c" -#include "shape.c" -#include "item_selection.c" -#include "calculation.c" -#include "usertypes.c" -#include "refcount.c" -#include "conversion_utils.c" -#include "buffer.c" - -#include "nditer.c" -#include "nditer_pywrap.c" -#include "lowlevel_strided_loops.c" -#include "dtype_transfer.c" -#include "einsum.c" -#include "ucsnarrow.c" - -#include "arrayobject.c" - -#include "numpymemoryview.c" - -#include "multiarraymodule.c" diff --git a/numpy-1.6.2/numpy/core/src/multiarray/nditer.c.src b/numpy-1.6.2/numpy/core/src/multiarray/nditer.c.src deleted file mode 100644 index ee7b887a04..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/nditer.c.src +++ /dev/null @@ -1,6422 +0,0 @@ -/* - * This file implements a highly flexible iterator for NumPy. - * - * Copyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia - * - * See LICENSE.txt for the license. - */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - -#define _MULTIARRAYMODULE -#include -#include -#include "convert_datatype.h" - -#include "lowlevel_strided_loops.h" - -/********** ITERATOR CONSTRUCTION TIMING **************/ -#define NPY_IT_CONSTRUCTION_TIMING 0 - -#if NPY_IT_CONSTRUCTION_TIMING -#define NPY_IT_TIME_POINT(var) { \ - unsigned int hi, lo; \ - __asm__ __volatile__ ( \ - "rdtsc" \ - : "=d" (hi), "=a" (lo)); \ - var = (((unsigned long long)hi) << 32) | lo; \ - } -#define NPY_IT_PRINT_TIME_START(var) { \ - printf("%30s: start\n", #var); \ - c_temp = var; \ - } -#define NPY_IT_PRINT_TIME_VAR(var) { \ - printf("%30s: %6.0f clocks\n", #var, \ - ((double)(var-c_temp))); \ - c_temp = var; \ - } -#else -#define NPY_IT_TIME_POINT(var) -#endif - -/******************************************************/ - -/********** PRINTF DEBUG TRACING **************/ -#define NPY_IT_DBG_TRACING 0 - -#if NPY_IT_DBG_TRACING -#define NPY_IT_DBG_PRINT(s) printf("%s", s) -#define NPY_IT_DBG_PRINT1(s, p1) printf(s, p1) -#define NPY_IT_DBG_PRINT2(s, p1, p2) printf(s, p1, p2) -#define NPY_IT_DBG_PRINT3(s, p1, p2, p3) printf(s, p1, p2, p3) -#else -#define NPY_IT_DBG_PRINT(s) -#define NPY_IT_DBG_PRINT1(s, p1) -#define NPY_IT_DBG_PRINT2(s, p1, p2) -#define NPY_IT_DBG_PRINT3(s, p1, p2, p3) -#endif -/**********************************************/ - -/* Rounds up a number of bytes to be divisible by sizeof intp */ -#if NPY_SIZEOF_INTP == 4 -#define NPY_INTP_ALIGNED(size) ((size + 0x3)&(-0x4)) -#else -#define NPY_INTP_ALIGNED(size) ((size + 0x7)&(-0x8)) -#endif - -/* Internal iterator flags */ - -/* The perm is the identity */ -#define NPY_ITFLAG_IDENTPERM 0x0001 -/* The perm has negative entries (indicating flipped axes) */ -#define NPY_ITFLAG_NEGPERM 0x0002 -/* The iterator is tracking an index */ -#define NPY_ITFLAG_HASINDEX 0x0004 -/* The iterator is tracking a multi-index */ -#define NPY_ITFLAG_HASMULTIINDEX 0x0008 -/* The iteration order was forced on construction */ -#define NPY_ITFLAG_FORCEDORDER 0x0010 -/* The inner loop is handled outside the iterator */ -#define NPY_ITFLAG_EXLOOP 0x0020 -/* The iterator is ranged */ -#define NPY_ITFLAG_RANGE 0x0040 -/* The iterator is buffered */ -#define NPY_ITFLAG_BUFFER 0x0080 -/* The iterator should grow the buffered inner loop when possible */ -#define NPY_ITFLAG_GROWINNER 0x0100 -/* There is just one iteration, can specialize iternext for that */ -#define NPY_ITFLAG_ONEITERATION 0x0200 -/* Delay buffer allocation until first Reset* call */ -#define NPY_ITFLAG_DELAYBUF 0x0400 -/* Iteration needs API access during iternext */ -#define NPY_ITFLAG_NEEDSAPI 0x0800 -/* Iteration includes one or more operands being reduced */ -#define NPY_ITFLAG_REDUCE 0x1000 -/* Reduce iteration doesn't need to recalculate reduce loops next time */ -#define NPY_ITFLAG_REUSE_REDUCE_LOOPS 0x2000 - -/* Internal iterator per-operand iterator flags */ - -/* The operand will be written to */ -#define NPY_OP_ITFLAG_WRITE 0x01 -/* The operand will be read from */ -#define NPY_OP_ITFLAG_READ 0x02 -/* The operand needs type conversion/byte swapping/alignment */ -#define NPY_OP_ITFLAG_CAST 0x04 -/* The operand never needs buffering */ -#define NPY_OP_ITFLAG_BUFNEVER 0x08 -/* The operand is aligned */ -#define NPY_OP_ITFLAG_ALIGNED 0x10 -/* The operand is being reduced */ -#define NPY_OP_ITFLAG_REDUCE 0x20 - -/* - * The data layout of the iterator is fully specified by - * a triple (itflags, ndim, nop). These three variables - * are expected to exist in all functions calling these macros, - * either as true variables initialized to the correct values - * from the iterator, or as constants in the case of specialized - * functions such as the various iternext functions. - */ - -struct NpyIter_InternalOnly { - /* Initial fixed position data */ - npy_uint32 itflags; - npy_uint16 ndim, nop; - npy_intp itersize, iterstart, iterend; - /* iterindex is only used if RANGED or BUFFERED is set */ - npy_intp iterindex; - /* The rest is variable */ - char iter_flexdata; -}; - -typedef struct NpyIter_AD NpyIter_AxisData; -typedef struct NpyIter_BD NpyIter_BufferData; - -/* Byte sizes of the iterator members */ -#define NIT_PERM_SIZEOF(itflags, ndim, nop) \ - NPY_INTP_ALIGNED(NPY_MAXDIMS) -#define NIT_DTYPES_SIZEOF(itflags, ndim, nop) \ - ((NPY_SIZEOF_INTP)*(nop)) -#define NIT_RESETDATAPTR_SIZEOF(itflags, ndim, nop) \ - ((NPY_SIZEOF_INTP)*(nop+1)) -#define NIT_BASEOFFSETS_SIZEOF(itflags, ndim, nop) \ - ((NPY_SIZEOF_INTP)*(nop+1)) -#define NIT_OPERANDS_SIZEOF(itflags, ndim, nop) \ - ((NPY_SIZEOF_INTP)*(nop)) -#define NIT_OPITFLAGS_SIZEOF(itflags, ndim, nop) \ - (NPY_INTP_ALIGNED(nop)) -#define NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop) \ - ((itflags&NPY_ITFLAG_BUFFER) ? ((NPY_SIZEOF_INTP)*(6 + 9*nop)) : 0) - -/* Byte offsets of the iterator members starting from iter->iter_flexdata */ -#define NIT_PERM_OFFSET() \ - (0) -#define NIT_DTYPES_OFFSET(itflags, ndim, nop) \ - (NIT_PERM_OFFSET() + \ - NIT_PERM_SIZEOF(itflags, ndim, nop)) -#define NIT_RESETDATAPTR_OFFSET(itflags, ndim, nop) \ - (NIT_DTYPES_OFFSET(itflags, ndim, nop) + \ - NIT_DTYPES_SIZEOF(itflags, ndim, nop)) -#define NIT_BASEOFFSETS_OFFSET(itflags, ndim, nop) \ - (NIT_RESETDATAPTR_OFFSET(itflags, ndim, nop) + \ - NIT_RESETDATAPTR_SIZEOF(itflags, ndim, nop)) -#define NIT_OPERANDS_OFFSET(itflags, ndim, nop) \ - (NIT_BASEOFFSETS_OFFSET(itflags, ndim, nop) + \ - NIT_BASEOFFSETS_SIZEOF(itflags, ndim, nop)) -#define NIT_OPITFLAGS_OFFSET(itflags, ndim, nop) \ - (NIT_OPERANDS_OFFSET(itflags, ndim, nop) + \ - NIT_OPERANDS_SIZEOF(itflags, ndim, nop)) -#define NIT_BUFFERDATA_OFFSET(itflags, ndim, nop) \ - (NIT_OPITFLAGS_OFFSET(itflags, ndim, nop) + \ - NIT_OPITFLAGS_SIZEOF(itflags, ndim, nop)) -#define NIT_AXISDATA_OFFSET(itflags, ndim, nop) \ - (NIT_BUFFERDATA_OFFSET(itflags, ndim, nop) + \ - NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop)) - -/* Internal-only ITERATOR DATA MEMBER ACCESS */ -#define NIT_ITFLAGS(iter) \ - ((iter)->itflags) -#define NIT_NDIM(iter) \ - ((iter)->ndim) -#define NIT_NOP(iter) \ - ((iter)->nop) -#define NIT_ITERSIZE(iter) \ - (iter->itersize) -#define NIT_ITERSTART(iter) \ - (iter->iterstart) -#define NIT_ITEREND(iter) \ - (iter->iterend) -#define NIT_ITERINDEX(iter) \ - (iter->iterindex) -#define NIT_PERM(iter) ((npy_int8 *)( \ - &(iter)->iter_flexdata + NIT_PERM_OFFSET())) -#define NIT_DTYPES(iter) ((PyArray_Descr **)( \ - &(iter)->iter_flexdata + NIT_DTYPES_OFFSET(itflags, ndim, nop))) -#define NIT_RESETDATAPTR(iter) ((char **)( \ - &(iter)->iter_flexdata + NIT_RESETDATAPTR_OFFSET(itflags, ndim, nop))) -#define NIT_BASEOFFSETS(iter) ((npy_intp *)( \ - &(iter)->iter_flexdata + NIT_BASEOFFSETS_OFFSET(itflags, ndim, nop))) -#define NIT_OPERANDS(iter) ((PyArrayObject **)( \ - &(iter)->iter_flexdata + NIT_OPERANDS_OFFSET(itflags, ndim, nop))) -#define NIT_OPITFLAGS(iter) ( \ - &(iter)->iter_flexdata + NIT_OPITFLAGS_OFFSET(itflags, ndim, nop)) -#define NIT_BUFFERDATA(iter) ((NpyIter_BufferData *)( \ - &(iter)->iter_flexdata + NIT_BUFFERDATA_OFFSET(itflags, ndim, nop))) -#define NIT_AXISDATA(iter) ((NpyIter_AxisData *)( \ - &(iter)->iter_flexdata + NIT_AXISDATA_OFFSET(itflags, ndim, nop))) - -/* Internal-only BUFFERDATA MEMBER ACCESS */ -struct NpyIter_BD { - npy_intp buffersize, size, bufiterend, - reduce_pos, reduce_outersize, reduce_outerdim; - npy_intp bd_flexdata; -}; -#define NBF_BUFFERSIZE(bufferdata) ((bufferdata)->buffersize) -#define NBF_SIZE(bufferdata) ((bufferdata)->size) -#define NBF_BUFITEREND(bufferdata) ((bufferdata)->bufiterend) -#define NBF_REDUCE_POS(bufferdata) ((bufferdata)->reduce_pos) -#define NBF_REDUCE_OUTERSIZE(bufferdata) ((bufferdata)->reduce_outersize) -#define NBF_REDUCE_OUTERDIM(bufferdata) ((bufferdata)->reduce_outerdim) -#define NBF_STRIDES(bufferdata) ( \ - &(bufferdata)->bd_flexdata + 0) -#define NBF_PTRS(bufferdata) ((char **) \ - (&(bufferdata)->bd_flexdata + 1*(nop))) -#define NBF_REDUCE_OUTERSTRIDES(bufferdata) ( \ - (&(bufferdata)->bd_flexdata + 2*(nop))) -#define NBF_REDUCE_OUTERPTRS(bufferdata) ((char **) \ - (&(bufferdata)->bd_flexdata + 3*(nop))) -#define NBF_READTRANSFERFN(bufferdata) ((PyArray_StridedTransferFn **) \ - (&(bufferdata)->bd_flexdata + 4*(nop))) -#define NBF_READTRANSFERDATA(bufferdata) ((void **) \ - (&(bufferdata)->bd_flexdata + 5*(nop))) -#define NBF_WRITETRANSFERFN(bufferdata) ((PyArray_StridedTransferFn **) \ - (&(bufferdata)->bd_flexdata + 6*(nop))) -#define NBF_WRITETRANSFERDATA(bufferdata) ((void **) \ - (&(bufferdata)->bd_flexdata + 7*(nop))) -#define NBF_BUFFERS(bufferdata) ((char **) \ - (&(bufferdata)->bd_flexdata + 8*(nop))) - -/* Internal-only AXISDATA MEMBER ACCESS. */ -struct NpyIter_AD { - npy_intp shape, index; - npy_intp ad_flexdata; -}; -#define NAD_SHAPE(axisdata) ((axisdata)->shape) -#define NAD_INDEX(axisdata) ((axisdata)->index) -#define NAD_STRIDES(axisdata) ( \ - &(axisdata)->ad_flexdata + 0) -#define NAD_PTRS(axisdata) ((char **) \ - &(axisdata)->ad_flexdata + 1*(nop+1)) - -#define NAD_NSTRIDES() \ - ((nop) + ((itflags&NPY_ITFLAG_HASINDEX) ? 1 : 0)) - -/* Size of one AXISDATA struct within the iterator */ -#define NIT_AXISDATA_SIZEOF(itflags, ndim, nop) (( \ - /* intp shape */ \ - 1 + \ - /* intp index */ \ - 1 + \ - /* intp stride[nop+1] AND char* ptr[nop+1] */ \ - 2*((nop)+1) \ - )*NPY_SIZEOF_INTP ) - -/* - * Macro to advance an AXISDATA pointer by a specified count. - * Requires that sizeof_axisdata be previously initialized - * to NIT_AXISDATA_SIZEOF(itflags, ndim, nop). - */ -#define NIT_INDEX_AXISDATA(axisdata, index) ((NpyIter_AxisData *) \ - (((char *)(axisdata)) + (index)*sizeof_axisdata)) -#define NIT_ADVANCE_AXISDATA(axisdata, count) \ - axisdata = NIT_INDEX_AXISDATA(axisdata, count) - -/* Size of the whole iterator */ -#define NIT_SIZEOF_ITERATOR(itflags, ndim, nop) ( \ - sizeof(struct NpyIter_InternalOnly) + \ - NIT_AXISDATA_OFFSET(itflags, ndim, nop) + \ - NIT_AXISDATA_SIZEOF(itflags, ndim, nop)*(ndim)) - -/* Internal helper functions */ -static int -npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags); -static int -npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, - npy_intp *itershape); -static int -npyiter_calculate_ndim(int nop, PyArrayObject **op_in, - int oa_ndim); -static int -npyiter_check_per_op_flags(npy_uint32 flags, char *op_itflags); -static int -npyiter_prepare_one_operand(PyArrayObject **op, - char **op_dataptr, - PyArray_Descr *op_request_dtype, - PyArray_Descr** op_dtype, - npy_uint32 flags, - npy_uint32 op_flags, char *op_itflags); -static int -npyiter_prepare_operands(int nop, PyArrayObject **op_in, - PyArrayObject **op, - char **op_dataptr, - PyArray_Descr **op_request_dtypes, - PyArray_Descr **op_dtype, - npy_uint32 flags, - npy_uint32 *op_flags, char *op_itflags); -static int -npyiter_check_casting(int nop, PyArrayObject **op, - PyArray_Descr **op_dtype, - NPY_CASTING casting, - char *op_itflags); -static int -npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, char *op_itflags, - char **op_dataptr, - npy_uint32 *op_flags, int **op_axes, - npy_intp *itershape, - int output_scalars); -static void -npyiter_replace_axisdata(NpyIter *iter, int iop, - PyArrayObject *op, - int op_ndim, char *op_dataptr, - int *op_axes); -static void -npyiter_compute_index_strides(NpyIter *iter, npy_uint32 flags); -static void -npyiter_apply_forced_iteration_order(NpyIter *iter, NPY_ORDER order); - -static void -npyiter_flip_negative_strides(NpyIter *iter); -static void -npyiter_reverse_axis_ordering(NpyIter *iter); -static void -npyiter_find_best_axis_ordering(NpyIter *iter); -static void -npyiter_coalesce_axes(NpyIter *iter); - -static PyArray_Descr * -npyiter_get_common_dtype(int nop, PyArrayObject **op, - char *op_itflags, PyArray_Descr **op_dtype, - PyArray_Descr **op_request_dtypes, - int only_inputs, int output_scalars); - -static PyArrayObject * -npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype, - npy_uint32 flags, char *op_itflags, - int op_ndim, npy_intp *shape, - PyArray_Descr *op_dtype, int *op_axes); -static int -npyiter_allocate_arrays(NpyIter *iter, - npy_uint32 flags, - PyArray_Descr **op_dtype, PyTypeObject *subtype, - npy_uint32 *op_flags, char *op_itflags, - int **op_axes, int output_scalars); -static void -npyiter_get_priority_subtype(int nop, PyArrayObject **op, - char *op_itflags, - double *subtype_priority, PyTypeObject **subtype); - -static int -npyiter_allocate_transfer_functions(NpyIter *iter); -static int -npyiter_allocate_buffers(NpyIter *iter, char **errmsg); -static void npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex); -static void -npyiter_copy_from_buffers(NpyIter *iter); -static void -npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs); -static npy_intp -npyiter_checkreducesize(NpyIter *iter, npy_intp count, - npy_intp *reduce_innersize, - npy_intp *reduce_outerdim); - -/*NUMPY_API - * Allocate a new iterator for multiple array objects, and advanced - * options for controlling the broadcasting, shape, and buffer size. - */ -NPY_NO_EXPORT NpyIter * -NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags, - NPY_ORDER order, NPY_CASTING casting, - npy_uint32 *op_flags, - PyArray_Descr **op_request_dtypes, - int oa_ndim, int **op_axes, npy_intp *itershape, - npy_intp buffersize) -{ - npy_uint32 itflags = NPY_ITFLAG_IDENTPERM; - int idim, ndim; - int iop; - - /* The iterator being constructed */ - NpyIter *iter; - - /* Per-operand values */ - PyArrayObject **op; - PyArray_Descr **op_dtype; - char *op_itflags; - char **op_dataptr; - - npy_int8 *perm; - NpyIter_BufferData *bufferdata = NULL; - int any_allocate = 0, any_missing_dtypes = 0, - output_scalars = 0, need_subtype = 0; - - /* The subtype for automatically allocated outputs */ - double subtype_priority = NPY_PRIORITY; - PyTypeObject *subtype = &PyArray_Type; - -#if NPY_IT_CONSTRUCTION_TIMING - npy_intp c_temp, - c_start, - c_check_op_axes, - c_check_global_flags, - c_calculate_ndim, - c_malloc, - c_prepare_operands, - c_fill_axisdata, - c_compute_index_strides, - c_apply_forced_iteration_order, - c_find_best_axis_ordering, - c_get_priority_subtype, - c_find_output_common_dtype, - c_check_casting, - c_allocate_arrays, - c_coalesce_axes, - c_prepare_buffers; -#endif - - NPY_IT_TIME_POINT(c_start); - - if (nop > NPY_MAXARGS) { - PyErr_Format(PyExc_ValueError, - "Cannot construct an iterator with more than %d operands " - "(%d were requested)", (int)NPY_MAXARGS, (int)nop); - return NULL; - } - - /* Error check 'oa_ndim' and 'op_axes', which must be used together */ - if (!npyiter_check_op_axes(nop, oa_ndim, op_axes, itershape)) { - return NULL; - } - - NPY_IT_TIME_POINT(c_check_op_axes); - - /* Check the global iterator flags */ - if (!npyiter_check_global_flags(flags, &itflags)) { - return NULL; - } - - NPY_IT_TIME_POINT(c_check_global_flags); - - /* Calculate how many dimensions the iterator should have */ - ndim = npyiter_calculate_ndim(nop, op_in, oa_ndim); - - /* If 'ndim' is zero, any outputs should be scalars */ - if (ndim == 0) { - output_scalars = 1; - ndim = 1; - } - - NPY_IT_TIME_POINT(c_calculate_ndim); - - /* Allocate memory for the iterator */ - iter = (NpyIter*) - PyArray_malloc(NIT_SIZEOF_ITERATOR(itflags, ndim, nop)); - - NPY_IT_TIME_POINT(c_malloc); - - /* Fill in the basic data */ - NIT_ITFLAGS(iter) = itflags; - NIT_NDIM(iter) = ndim; - NIT_NOP(iter) = nop; - NIT_ITERINDEX(iter) = 0; - memset(NIT_BASEOFFSETS(iter), 0, (nop+1)*NPY_SIZEOF_INTP); - - op = NIT_OPERANDS(iter); - op_dtype = NIT_DTYPES(iter); - op_itflags = NIT_OPITFLAGS(iter); - op_dataptr = NIT_RESETDATAPTR(iter); - - /* Prepare all the operands */ - if (!npyiter_prepare_operands(nop, op_in, op, op_dataptr, - op_request_dtypes, op_dtype, - flags, - op_flags, op_itflags)) { - PyArray_free(iter); - return NULL; - } - /* Set resetindex to zero as well (it's just after the resetdataptr) */ - op_dataptr[nop] = 0; - - NPY_IT_TIME_POINT(c_prepare_operands); - - /* - * Initialize buffer data (must set the buffers and transferdata - * to NULL before we might deallocate the iterator). - */ - if (itflags&NPY_ITFLAG_BUFFER) { - bufferdata = NIT_BUFFERDATA(iter); - NBF_SIZE(bufferdata) = 0; - memset(NBF_BUFFERS(bufferdata), 0, nop*NPY_SIZEOF_INTP); - memset(NBF_READTRANSFERDATA(bufferdata), 0, nop*NPY_SIZEOF_INTP); - memset(NBF_WRITETRANSFERDATA(bufferdata), 0, nop*NPY_SIZEOF_INTP); - } - - /* Fill in the AXISDATA arrays and set the ITERSIZE field */ - if (!npyiter_fill_axisdata(iter, flags, op_itflags, op_dataptr, - op_flags, op_axes, itershape, - output_scalars)) { - NpyIter_Deallocate(iter); - return NULL; - } - - NPY_IT_TIME_POINT(c_fill_axisdata); - - if (itflags&NPY_ITFLAG_BUFFER) { - /* - * If buffering is enabled and no buffersize was given, use a default - * chosen to be big enough to get some amortization benefits, but - * small enough to be cache-friendly. - */ - if (buffersize <= 0) { - buffersize = NPY_BUFSIZE; - } - /* No point in a buffer bigger than the iteration size */ - if (buffersize > NIT_ITERSIZE(iter)) { - buffersize = NIT_ITERSIZE(iter); - } - NBF_BUFFERSIZE(bufferdata) = buffersize; - } - - /* - * If an index was requested, compute the strides for it. - * Note that we must do this before changing the order of the - * axes - */ - npyiter_compute_index_strides(iter, flags); - - NPY_IT_TIME_POINT(c_compute_index_strides); - - /* Initialize the perm to the identity */ - perm = NIT_PERM(iter); - for(idim = 0; idim < ndim; ++idim) { - perm[idim] = (npy_int8)idim; - } - - /* - * If an iteration order is being forced, apply it. - */ - npyiter_apply_forced_iteration_order(iter, order); - itflags = NIT_ITFLAGS(iter); - - NPY_IT_TIME_POINT(c_apply_forced_iteration_order); - - /* Set some flags for allocated outputs */ - for (iop = 0; iop < nop; ++iop) { - if (op[iop] == NULL) { - /* Flag this so later we can avoid flipping axes */ - any_allocate = 1; - /* If a subtype may be used, indicate so */ - if (!(op_flags[iop]&NPY_ITER_NO_SUBTYPE)) { - need_subtype = 1; - } - /* - * If the data type wasn't provided, will need to - * calculate it. - */ - if (op_dtype[iop] == NULL) { - any_missing_dtypes = 1; - } - } - } - - /* - * If the ordering was not forced, reorder the axes - * and flip negative strides to find the best one. - */ - if (!(itflags&NPY_ITFLAG_FORCEDORDER)) { - if (ndim > 1) { - npyiter_find_best_axis_ordering(iter); - } - /* - * If there's an output being allocated, we must not negate - * any strides. - */ - if (!any_allocate && !(flags&NPY_ITER_DONT_NEGATE_STRIDES)) { - npyiter_flip_negative_strides(iter); - } - itflags = NIT_ITFLAGS(iter); - } - - NPY_IT_TIME_POINT(c_find_best_axis_ordering); - - if (need_subtype) { - npyiter_get_priority_subtype(nop, op, op_itflags, - &subtype_priority, &subtype); - } - - NPY_IT_TIME_POINT(c_get_priority_subtype); - - /* - * If an automatically allocated output didn't have a specified - * dtype, we need to figure it out now, before allocating the outputs. - */ - if (any_missing_dtypes || (flags&NPY_ITER_COMMON_DTYPE)) { - PyArray_Descr *dtype; - int only_inputs = !(flags&NPY_ITER_COMMON_DTYPE); - - op = NIT_OPERANDS(iter); - op_dtype = NIT_DTYPES(iter); - - dtype = npyiter_get_common_dtype(nop, op, - op_itflags, op_dtype, - op_request_dtypes, - only_inputs, - output_scalars); - if (dtype == NULL) { - NpyIter_Deallocate(iter); - return NULL; - } - if (flags&NPY_ITER_COMMON_DTYPE) { - NPY_IT_DBG_PRINT("Iterator: Replacing all data types\n"); - /* Replace all the data types */ - for (iop = 0; iop < nop; ++iop) { - if (op_dtype[iop] != dtype) { - Py_XDECREF(op_dtype[iop]); - Py_INCREF(dtype); - op_dtype[iop] = dtype; - } - } - } - else { - NPY_IT_DBG_PRINT("Iterator: Setting unset output data types\n"); - /* Replace the NULL data types */ - for (iop = 0; iop < nop; ++iop) { - if (op_dtype[iop] == NULL) { - Py_INCREF(dtype); - op_dtype[iop] = dtype; - } - } - } - Py_DECREF(dtype); - } - - NPY_IT_TIME_POINT(c_find_output_common_dtype); - - /* - * All of the data types have been settled, so it's time - * to check that data type conversions are following the - * casting rules. - */ - if (!npyiter_check_casting(nop, op, op_dtype, casting, op_itflags)) { - NpyIter_Deallocate(iter); - return NULL; - } - - NPY_IT_TIME_POINT(c_check_casting); - - /* - * At this point, the iteration order has been finalized. so - * any allocation of ops that were NULL, or any temporary - * copying due to casting/byte order/alignment can be - * done now using a memory layout matching the iterator. - */ - if (!npyiter_allocate_arrays(iter, flags, op_dtype, subtype, op_flags, - op_itflags, op_axes, output_scalars)) { - NpyIter_Deallocate(iter); - return NULL; - } - - NPY_IT_TIME_POINT(c_allocate_arrays); - - /* - * Finally, if a multi-index wasn't requested, - * it may be possible to coalesce some axes together. - */ - if (ndim > 1 && !(itflags&NPY_ITFLAG_HASMULTIINDEX)) { - npyiter_coalesce_axes(iter); - /* - * The operation may have changed the layout, so we have to - * get the internal pointers again. - */ - itflags = NIT_ITFLAGS(iter); - ndim = NIT_NDIM(iter); - op = NIT_OPERANDS(iter); - op_dtype = NIT_DTYPES(iter); - op_itflags = NIT_OPITFLAGS(iter); - op_dataptr = NIT_RESETDATAPTR(iter); - } - - NPY_IT_TIME_POINT(c_coalesce_axes); - - /* - * Now that the axes are finished, check whether we can apply - * the single iteration optimization to the iternext function. - */ - if (!(itflags&NPY_ITFLAG_BUFFER)) { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - if (itflags&NPY_ITFLAG_EXLOOP) { - if (NIT_ITERSIZE(iter) == NAD_SHAPE(axisdata)) { - NIT_ITFLAGS(iter) |= NPY_ITFLAG_ONEITERATION; - } - } - else if (NIT_ITERSIZE(iter) == 1) { - NIT_ITFLAGS(iter) |= NPY_ITFLAG_ONEITERATION; - } - } - - /* - * If REFS_OK was specified, check whether there are any - * reference arrays and flag it if so. - */ - if (flags&NPY_ITER_REFS_OK) { - for (iop = 0; iop < nop; ++iop) { - PyArray_Descr *rdt = op_dtype[iop]; - if ((rdt->flags&(NPY_ITEM_REFCOUNT| - NPY_ITEM_IS_POINTER| - NPY_NEEDS_PYAPI)) != 0) { - /* Iteration needs API access */ - NIT_ITFLAGS(iter) |= NPY_ITFLAG_NEEDSAPI; - } - } - } - - /* If buffering is set without delayed allocation */ - if (itflags&NPY_ITFLAG_BUFFER) { - if (!npyiter_allocate_transfer_functions(iter)) { - NpyIter_Deallocate(iter); - return NULL; - } - if (itflags&NPY_ITFLAG_DELAYBUF) { - bufferdata = NIT_BUFFERDATA(iter); - /* Make the data pointers NULL */ - memset(NBF_PTRS(bufferdata), 0, nop*NPY_SIZEOF_INTP); - } - else { - /* Allocate the buffers */ - if (!npyiter_allocate_buffers(iter, NULL)) { - NpyIter_Deallocate(iter); - return NULL; - } - - /* Prepare the next buffers and set iterend/size */ - npyiter_copy_to_buffers(iter, NULL); - } - } - - NPY_IT_TIME_POINT(c_prepare_buffers); - -#if NPY_IT_CONSTRUCTION_TIMING - printf("\nIterator construction timing:\n"); - NPY_IT_PRINT_TIME_START(c_start); - NPY_IT_PRINT_TIME_VAR(c_check_op_axes); - NPY_IT_PRINT_TIME_VAR(c_check_global_flags); - NPY_IT_PRINT_TIME_VAR(c_calculate_ndim); - NPY_IT_PRINT_TIME_VAR(c_malloc); - NPY_IT_PRINT_TIME_VAR(c_prepare_operands); - NPY_IT_PRINT_TIME_VAR(c_fill_axisdata); - NPY_IT_PRINT_TIME_VAR(c_compute_index_strides); - NPY_IT_PRINT_TIME_VAR(c_apply_forced_iteration_order); - NPY_IT_PRINT_TIME_VAR(c_find_best_axis_ordering); - NPY_IT_PRINT_TIME_VAR(c_get_priority_subtype); - NPY_IT_PRINT_TIME_VAR(c_find_output_common_dtype); - NPY_IT_PRINT_TIME_VAR(c_check_casting); - NPY_IT_PRINT_TIME_VAR(c_allocate_arrays); - NPY_IT_PRINT_TIME_VAR(c_coalesce_axes); - NPY_IT_PRINT_TIME_VAR(c_prepare_buffers); - printf("\n"); -#endif - - return iter; -} - -/*NUMPY_API - * Allocate a new iterator for more than one array object, using - * standard NumPy broadcasting rules and the default buffer size. - */ -NPY_NO_EXPORT NpyIter * -NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 flags, - NPY_ORDER order, NPY_CASTING casting, - npy_uint32 *op_flags, - PyArray_Descr **op_request_dtypes) -{ - return NpyIter_AdvancedNew(nop, op_in, flags, order, casting, - op_flags, op_request_dtypes, - 0, NULL, NULL, 0); -} - -/*NUMPY_API - * Allocate a new iterator for one array object. - */ -NPY_NO_EXPORT NpyIter * -NpyIter_New(PyArrayObject *op, npy_uint32 flags, - NPY_ORDER order, NPY_CASTING casting, - PyArray_Descr* dtype) -{ - /* Split the flags into separate global and op flags */ - npy_uint32 op_flags = flags&NPY_ITER_PER_OP_FLAGS; - flags &= NPY_ITER_GLOBAL_FLAGS; - - return NpyIter_AdvancedNew(1, &op, flags, order, casting, - &op_flags, &dtype, - 0, NULL, NULL, 0); -} - -/*NUMPY_API - * Makes a copy of the iterator - */ -NPY_NO_EXPORT NpyIter * -NpyIter_Copy(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - int out_of_memory = 0; - - npy_intp size; - NpyIter *newiter; - PyArrayObject **objects; - PyArray_Descr **dtypes; - - /* Allocate memory for the new iterator */ - size = NIT_SIZEOF_ITERATOR(itflags, ndim, nop); - newiter = (NpyIter*)PyArray_malloc(size); - - /* Copy the raw values to the new iterator */ - memcpy(newiter, iter, size); - - /* Take ownership of references to the operands and dtypes */ - objects = NIT_OPERANDS(newiter); - dtypes = NIT_DTYPES(newiter); - for (iop = 0; iop < nop; ++iop) { - Py_INCREF(objects[iop]); - Py_INCREF(dtypes[iop]); - } - - /* Allocate buffers and make copies of the transfer data if necessary */ - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *bufferdata; - npy_intp buffersize, itemsize; - char **buffers; - void **readtransferdata, **writetransferdata; - - bufferdata = NIT_BUFFERDATA(newiter); - buffers = NBF_BUFFERS(bufferdata); - readtransferdata = NBF_READTRANSFERDATA(bufferdata); - writetransferdata = NBF_WRITETRANSFERDATA(bufferdata); - buffersize = NBF_BUFFERSIZE(bufferdata); - - for (iop = 0; iop < nop; ++iop) { - if (buffers[iop] != NULL) { - if (out_of_memory) { - buffers[iop] = NULL; - } - else { - itemsize = dtypes[iop]->elsize; - buffers[iop] = PyArray_malloc(itemsize*buffersize); - if (buffers[iop] == NULL) { - out_of_memory = 1; - } - } - } - - if (readtransferdata[iop] != NULL) { - if (out_of_memory) { - readtransferdata[iop] = NULL; - } - else { - readtransferdata[iop] = - PyArray_CopyStridedTransferData(readtransferdata[iop]); - if (readtransferdata[iop] == NULL) { - out_of_memory = 1; - } - } - } - - if (writetransferdata[iop] != NULL) { - if (out_of_memory) { - writetransferdata[iop] = NULL; - } - else { - writetransferdata[iop] = - PyArray_CopyStridedTransferData(writetransferdata[iop]); - if (writetransferdata[iop] == NULL) { - out_of_memory = 1; - } - } - } - } - - /* Initialize the buffers to the current iterindex */ - if (!out_of_memory && NBF_SIZE(bufferdata) > 0) { - npyiter_goto_iterindex(newiter, NIT_ITERINDEX(newiter)); - - /* Prepare the next buffers and set iterend/size */ - npyiter_copy_to_buffers(newiter, NULL); - } - } - - if (out_of_memory) { - NpyIter_Deallocate(newiter); - PyErr_NoMemory(); - return NULL; - } - - return newiter; -} - -/*NUMPY_API - * Deallocate an iterator - */ -NPY_NO_EXPORT int -NpyIter_Deallocate(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int iop, nop = NIT_NOP(iter); - - PyArray_Descr **dtype = NIT_DTYPES(iter); - PyArrayObject **object = NIT_OPERANDS(iter); - - /* Deallocate any buffers and buffering data */ - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - char **buffers; - void **transferdata; - - /* buffers */ - buffers = NBF_BUFFERS(bufferdata); - for(iop = 0; iop < nop; ++iop, ++buffers) { - if (*buffers) { - PyArray_free(*buffers); - } - } - /* read bufferdata */ - transferdata = NBF_READTRANSFERDATA(bufferdata); - for(iop = 0; iop < nop; ++iop, ++transferdata) { - if (*transferdata) { - PyArray_FreeStridedTransferData(*transferdata); - } - } - /* write bufferdata */ - transferdata = NBF_WRITETRANSFERDATA(bufferdata); - for(iop = 0; iop < nop; ++iop, ++transferdata) { - if (*transferdata) { - PyArray_FreeStridedTransferData(*transferdata); - } - } - } - - /* Deallocate all the dtypes and objects that were iterated */ - for(iop = 0; iop < nop; ++iop, ++dtype, ++object) { - Py_XDECREF(*dtype); - Py_XDECREF(*object); - } - - /* Deallocate the iterator memory */ - PyArray_free(iter); - - return NPY_SUCCEED; -} - -/*NUMPY_API - * Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX - * was set for iterator creation, and does not work if buffering is - * enabled. This function also resets the iterator to its initial state. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -NPY_NO_EXPORT int -NpyIter_RemoveAxis(NpyIter *iter, int axis) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - int xdim = 0; - npy_int8 *perm = NIT_PERM(iter); - NpyIter_AxisData *axisdata_del = NIT_AXISDATA(iter), *axisdata; - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - npy_intp *baseoffsets = NIT_BASEOFFSETS(iter); - char **resetdataptr = NIT_RESETDATAPTR(iter); - - if (!(itflags&NPY_ITFLAG_HASMULTIINDEX)) { - PyErr_SetString(PyExc_RuntimeError, - "Iterator RemoveAxis may only be called " - "if a multi-index is being tracked"); - return NPY_FAIL; - } - else if (itflags&NPY_ITFLAG_HASINDEX) { - PyErr_SetString(PyExc_RuntimeError, - "Iterator RemoveAxis may not be called on " - "an index is being tracked"); - return NPY_FAIL; - } - else if (itflags&NPY_ITFLAG_BUFFER) { - PyErr_SetString(PyExc_RuntimeError, - "Iterator RemoveAxis may not be called on " - "a buffered iterator"); - return NPY_FAIL; - } - else if (axis < 0 || axis >= ndim) { - PyErr_SetString(PyExc_ValueError, - "axis out of bounds in iterator RemoveAxis"); - return NPY_FAIL; - } - - /* Reverse axis, since the iterator treats them that way */ - axis = ndim - 1 - axis; - - /* First find the axis in question */ - for (idim = 0; idim < ndim; ++idim) { - /* If this is it, and it's iterated forward, done */ - if (perm[idim] == axis) { - xdim = idim; - break; - } - /* If this is it, but it's iterated backward, must reverse the axis */ - else if (-1 - perm[idim] == axis) { - npy_intp *strides = NAD_STRIDES(axisdata_del); - npy_intp shape = NAD_SHAPE(axisdata_del), offset; - - xdim = idim; - - /* - * Adjust baseoffsets and resetbaseptr back to the start of - * this axis. - */ - for (iop = 0; iop < nop; ++iop) { - offset = (shape-1)*strides[iop]; - baseoffsets[iop] += offset; - resetdataptr[iop] += offset; - } - break; - } - - NIT_ADVANCE_AXISDATA(axisdata_del, 1); - } - - if (idim == ndim) { - PyErr_SetString(PyExc_RuntimeError, - "internal error in iterator perm"); - return NPY_FAIL; - } - - if (NAD_SHAPE(axisdata_del) == 0) { - PyErr_SetString(PyExc_ValueError, - "cannot remove a zero-sized axis from an iterator"); - return NPY_FAIL; - } - - /* Adjust the permutation */ - for (idim = 0; idim < ndim-1; ++idim) { - npy_int8 p = (idim < xdim) ? perm[idim] : perm[idim+1]; - if (p >= 0) { - if (p > axis) { - --p; - } - } - else if (p <= 0) { - if (p < -1-axis) { - ++p; - } - } - perm[idim] = p; - } - - /* Adjust the iteration size */ - NIT_ITERSIZE(iter) /= NAD_SHAPE(axisdata_del); - - /* Shift all the axisdata structures by one */ - axisdata = NIT_INDEX_AXISDATA(axisdata_del, 1); - memmove(axisdata_del, axisdata, (ndim-1-xdim)*sizeof_axisdata); - - /* If there is more than one dimension, shrink the iterator */ - if (ndim > 1) { - NIT_NDIM(iter) = ndim-1; - } - /* Otherwise convert it to a singleton dimension */ - else { - npy_intp *strides = NAD_STRIDES(axisdata_del); - NAD_SHAPE(axisdata_del) = 1; - for (iop = 0; iop < nop; ++iop) { - strides[iop] = 0; - } - NIT_ITFLAGS(iter) |= NPY_ITFLAG_ONEITERATION; - } - - return NpyIter_Reset(iter, NULL); -} - -/*NUMPY_API - * Removes multi-index support from an iterator. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -NPY_NO_EXPORT int -NpyIter_RemoveMultiIndex(NpyIter *iter) -{ - npy_uint32 itflags; - - /* Make sure the iterator is reset */ - if (NpyIter_Reset(iter, NULL) != NPY_SUCCEED) { - return NPY_FAIL; - } - - itflags = NIT_ITFLAGS(iter); - if (itflags&NPY_ITFLAG_HASMULTIINDEX) { - NIT_ITFLAGS(iter) = itflags & ~NPY_ITFLAG_HASMULTIINDEX; - npyiter_coalesce_axes(iter); - } - - return NPY_SUCCEED; -} - -/*NUMPY_API - * Removes the inner loop handling (so HasExternalLoop returns true) - */ -NPY_NO_EXPORT int -NpyIter_EnableExternalLoop(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - /* Check conditions under which this can be done */ - if (itflags&(NPY_ITFLAG_HASINDEX|NPY_ITFLAG_HASMULTIINDEX)) { - PyErr_SetString(PyExc_ValueError, - "Iterator flag EXTERNAL_LOOP cannot be used " - "if an index or multi-index is being tracked"); - return NPY_FAIL; - } - if ((itflags&(NPY_ITFLAG_BUFFER|NPY_ITFLAG_RANGE|NPY_ITFLAG_EXLOOP)) - == (NPY_ITFLAG_RANGE|NPY_ITFLAG_EXLOOP)) { - PyErr_SetString(PyExc_ValueError, - "Iterator flag EXTERNAL_LOOP cannot be used " - "with ranged iteration unless buffering is also enabled"); - return NPY_FAIL; - } - /* Set the flag */ - if (!(itflags&NPY_ITFLAG_EXLOOP)) { - itflags |= NPY_ITFLAG_EXLOOP; - NIT_ITFLAGS(iter) = itflags; - - /* - * Check whether we can apply the single iteration - * optimization to the iternext function. - */ - if (!(itflags&NPY_ITFLAG_BUFFER)) { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - if (NIT_ITERSIZE(iter) == NAD_SHAPE(axisdata)) { - NIT_ITFLAGS(iter) |= NPY_ITFLAG_ONEITERATION; - } - } - } - - /* Reset the iterator */ - return NpyIter_Reset(iter, NULL); -} - -/*NUMPY_API - * Resets the iterator to its initial state - * - * If errmsg is non-NULL, it should point to a variable which will - * receive the error message, and no Python exception will be set. - * This is so that the function can be called from code not holding - * the GIL. - */ -NPY_NO_EXPORT int -NpyIter_Reset(NpyIter *iter, char **errmsg) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *bufferdata; - - /* If buffer allocation was delayed, do it now */ - if (itflags&NPY_ITFLAG_DELAYBUF) { - if (!npyiter_allocate_buffers(iter, errmsg)) { - return NPY_FAIL; - } - NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_DELAYBUF; - } - else { - /* - * If the iterindex is already right, no need to - * do anything - */ - bufferdata = NIT_BUFFERDATA(iter); - if (NIT_ITERINDEX(iter) == NIT_ITERSTART(iter) && - NBF_BUFITEREND(bufferdata) <= NIT_ITEREND(iter) && - NBF_SIZE(bufferdata) > 0) { - return NPY_SUCCEED; - } - - /* Copy any data from the buffers back to the arrays */ - npyiter_copy_from_buffers(iter); - } - } - - npyiter_goto_iterindex(iter, NIT_ITERSTART(iter)); - - if (itflags&NPY_ITFLAG_BUFFER) { - /* Prepare the next buffers and set iterend/size */ - npyiter_copy_to_buffers(iter, NULL); - } - - return NPY_SUCCEED; -} - -/*NUMPY_API - * Resets the iterator to its initial state, with new base data pointers - * - * If errmsg is non-NULL, it should point to a variable which will - * receive the error message, and no Python exception will be set. - * This is so that the function can be called from code not holding - * the GIL. - */ -NPY_NO_EXPORT int -NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int iop, nop = NIT_NOP(iter); - - char **resetdataptr = NIT_RESETDATAPTR(iter); - npy_intp *baseoffsets = NIT_BASEOFFSETS(iter); - - if (itflags&NPY_ITFLAG_BUFFER) { - /* If buffer allocation was delayed, do it now */ - if (itflags&NPY_ITFLAG_DELAYBUF) { - if (!npyiter_allocate_buffers(iter, errmsg)) { - return NPY_FAIL; - } - NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_DELAYBUF; - } - else { - /* Copy any data from the buffers back to the arrays */ - npyiter_copy_from_buffers(iter); - } - } - - /* The new data pointers for resetting */ - for (iop = 0; iop < nop; ++iop) { - resetdataptr[iop] = baseptrs[iop] + baseoffsets[iop]; - } - - npyiter_goto_iterindex(iter, NIT_ITERSTART(iter)); - - if (itflags&NPY_ITFLAG_BUFFER) { - /* Prepare the next buffers and set iterend/size */ - npyiter_copy_to_buffers(iter, NULL); - } - - return NPY_SUCCEED; -} - -/*NUMPY_API - * Resets the iterator to a new iterator index range - * - * If errmsg is non-NULL, it should point to a variable which will - * receive the error message, and no Python exception will be set. - * This is so that the function can be called from code not holding - * the GIL. - */ -NPY_NO_EXPORT int -NpyIter_ResetToIterIndexRange(NpyIter *iter, - npy_intp istart, npy_intp iend, char **errmsg) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - /*int nop = NIT_NOP(iter);*/ - - if (!(itflags&NPY_ITFLAG_RANGE)) { - if (errmsg == NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot call ResetToIterIndexRange on an iterator without " - "requesting ranged iteration support in the constructor"); - } - else { - *errmsg = "Cannot call ResetToIterIndexRange on an iterator " - "without requesting ranged iteration support in the " - "constructor"; - } - return NPY_FAIL; - } - - if (istart < 0 || iend > NIT_ITERSIZE(iter)) { - if (errmsg == NULL) { - PyErr_Format(PyExc_ValueError, - "Out-of-bounds range [%d, %d) passed to " - "ResetToIterIndexRange", (int)istart, (int)iend); - } - else { - *errmsg = "Out-of-bounds range passed to ResetToIterIndexRange"; - } - return NPY_FAIL; - } - else if (iend < istart) { - if (errmsg == NULL) { - PyErr_Format(PyExc_ValueError, - "Invalid range [%d, %d) passed to ResetToIterIndexRange", - (int)istart, (int)iend); - } - else { - *errmsg = "Invalid range passed to ResetToIterIndexRange"; - } - return NPY_FAIL; - } - - NIT_ITERSTART(iter) = istart; - NIT_ITEREND(iter) = iend; - - return NpyIter_Reset(iter, errmsg); -} - -/*NUMPY_API - * Sets the iterator to the specified multi-index, which must have the - * correct number of entries for 'ndim'. It is only valid - * when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation - * fails if the multi-index is out of bounds. - * - * Returns NPY_SUCCEED on success, NPY_FAIL on failure. - */ -NPY_NO_EXPORT int -NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp *multi_index) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_intp iterindex, factor; - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - npy_int8 *perm; - - if (!(itflags&NPY_ITFLAG_HASMULTIINDEX)) { - PyErr_SetString(PyExc_ValueError, - "Cannot call GotoMultiIndex on an iterator without " - "requesting a multi-index in the constructor"); - return NPY_FAIL; - } - - if (itflags&NPY_ITFLAG_BUFFER) { - PyErr_SetString(PyExc_ValueError, - "Cannot call GotoMultiIndex on an iterator which " - "is buffered"); - return NPY_FAIL; - } - - if (itflags&NPY_ITFLAG_EXLOOP) { - PyErr_SetString(PyExc_ValueError, - "Cannot call GotoMultiIndex on an iterator which " - "has the flag EXTERNAL_LOOP"); - return NPY_FAIL; - } - - perm = NIT_PERM(iter); - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - /* Compute the iterindex corresponding to the multi-index */ - iterindex = 0; - factor = 1; - for (idim = 0; idim < ndim; ++idim) { - npy_int8 p = perm[idim]; - npy_intp i, shape; - - shape = NAD_SHAPE(axisdata); - if (p < 0) { - /* If the perm entry is negative, reverse the index */ - i = shape - multi_index[ndim+p] - 1; - } - else { - i = multi_index[ndim-p-1]; - } - - /* Bounds-check this index */ - if (i >= 0 && i < shape) { - iterindex += factor * i; - factor *= shape; - } - else { - PyErr_SetString(PyExc_IndexError, - "Iterator GotoMultiIndex called with an out-of-bounds " - "multi-index"); - return NPY_FAIL; - } - - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - - if (iterindex < NIT_ITERSTART(iter) || iterindex >= NIT_ITEREND(iter)) { - PyErr_SetString(PyExc_IndexError, - "Iterator GotoMultiIndex called with a multi-index outside the " - "restricted iteration range"); - return NPY_FAIL; - } - - npyiter_goto_iterindex(iter, iterindex); - - return NPY_SUCCEED; -} - -/*NUMPY_API - * If the iterator is tracking an index, sets the iterator - * to the specified index. - * - * Returns NPY_SUCCEED on success, NPY_FAIL on failure. - */ -NPY_NO_EXPORT int -NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_intp iterindex, factor; - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - - if (!(itflags&NPY_ITFLAG_HASINDEX)) { - PyErr_SetString(PyExc_ValueError, - "Cannot call GotoIndex on an iterator without " - "requesting a C or Fortran index in the constructor"); - return NPY_FAIL; - } - - if (itflags&NPY_ITFLAG_BUFFER) { - PyErr_SetString(PyExc_ValueError, - "Cannot call GotoIndex on an iterator which " - "is buffered"); - return NPY_FAIL; - } - - if (itflags&NPY_ITFLAG_EXLOOP) { - PyErr_SetString(PyExc_ValueError, - "Cannot call GotoIndex on an iterator which " - "has the flag EXTERNAL_LOOP"); - return NPY_FAIL; - } - - if (flat_index < 0 || flat_index >= NIT_ITERSIZE(iter)) { - PyErr_SetString(PyExc_IndexError, - "Iterator GotoIndex called with an out-of-bounds " - "index"); - return NPY_FAIL; - } - - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - /* Compute the iterindex corresponding to the flat_index */ - iterindex = 0; - factor = 1; - for (idim = 0; idim < ndim; ++idim) { - npy_intp i, shape, iterstride; - - iterstride = NAD_STRIDES(axisdata)[nop]; - shape = NAD_SHAPE(axisdata); - - /* Extract the index from the flat_index */ - if (iterstride == 0) { - i = 0; - } - else if (iterstride < 0) { - i = shape - (flat_index/(-iterstride))%shape - 1; - } - else { - i = (flat_index/iterstride)%shape; - } - - /* Add its contribution to iterindex */ - iterindex += factor * i; - factor *= shape; - - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - - - if (iterindex < NIT_ITERSTART(iter) || iterindex >= NIT_ITEREND(iter)) { - PyErr_SetString(PyExc_IndexError, - "Iterator GotoIndex called with an index outside the " - "restricted iteration range."); - return NPY_FAIL; - } - - npyiter_goto_iterindex(iter, iterindex); - - return NPY_SUCCEED; -} - -/*NUMPY_API - * Sets the iterator position to the specified iterindex, - * which matches the iteration order of the iterator. - * - * Returns NPY_SUCCEED on success, NPY_FAIL on failure. - */ -NPY_NO_EXPORT int -NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int iop, nop = NIT_NOP(iter); - - if (itflags&NPY_ITFLAG_EXLOOP) { - PyErr_SetString(PyExc_ValueError, - "Cannot call GotoIterIndex on an iterator which " - "has the flag EXTERNAL_LOOP"); - return NPY_FAIL; - } - - if (iterindex < NIT_ITERSTART(iter) || iterindex >= NIT_ITEREND(iter)) { - PyErr_SetString(PyExc_IndexError, - "Iterator GotoIterIndex called with an iterindex outside the " - "iteration range."); - return NPY_FAIL; - } - - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - npy_intp bufiterend, size; - - size = NBF_SIZE(bufferdata); - bufiterend = NBF_BUFITEREND(bufferdata); - /* Check if the new iterindex is already within the buffer */ - if (!(itflags&NPY_ITFLAG_REDUCE) && iterindex < bufiterend && - iterindex >= bufiterend - size) { - npy_intp *strides, delta; - char **ptrs; - - strides = NBF_STRIDES(bufferdata); - ptrs = NBF_PTRS(bufferdata); - delta = iterindex - NIT_ITERINDEX(iter); - - for (iop = 0; iop < nop; ++iop) { - ptrs[iop] += delta * strides[iop]; - } - - NIT_ITERINDEX(iter) = iterindex; - } - /* Start the buffer at the provided iterindex */ - else { - /* Write back to the arrays */ - npyiter_copy_from_buffers(iter); - - npyiter_goto_iterindex(iter, iterindex); - - /* Prepare the next buffers and set iterend/size */ - npyiter_copy_to_buffers(iter, NULL); - } - } - else { - npyiter_goto_iterindex(iter, iterindex); - } - - return NPY_SUCCEED; -} - -/*NUMPY_API - * Gets the current iteration index - */ -NPY_NO_EXPORT npy_intp -NpyIter_GetIterIndex(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - /* iterindex is only used if NPY_ITER_RANGED or NPY_ITER_BUFFERED was set */ - if (itflags&(NPY_ITFLAG_RANGE|NPY_ITFLAG_BUFFER)) { - return NIT_ITERINDEX(iter); - } - else { - npy_intp iterindex; - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - - iterindex = 0; - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - axisdata = NIT_INDEX_AXISDATA(NIT_AXISDATA(iter), ndim-1); - - for (idim = ndim-2; idim >= 0; --idim) { - iterindex += NAD_INDEX(axisdata); - NIT_ADVANCE_AXISDATA(axisdata, -1); - iterindex *= NAD_SHAPE(axisdata); - } - iterindex += NAD_INDEX(axisdata); - - return iterindex; - } -} - -/* SPECIALIZED iternext functions that handle the non-buffering part */ - -/**begin repeat - * #const_itflags = 0, - * NPY_ITFLAG_HASINDEX, - * NPY_ITFLAG_EXLOOP, - * NPY_ITFLAG_RANGE, - * NPY_ITFLAG_RANGE|NPY_ITFLAG_HASINDEX# - * #tag_itflags = 0, IND, NOINN, RNG, RNGuIND# - */ -/**begin repeat1 - * #const_ndim = 1, 2, NPY_MAXDIMS# - * #tag_ndim = 1, 2, ANY# - */ -/**begin repeat2 - * #const_nop = 1, 2, NPY_MAXDIMS# - * #tag_nop = 1, 2, ANY# - */ - -/* Specialized iternext (@const_itflags@,@tag_ndim@,@tag_nop@) */ -static int -npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@( - NpyIter *iter) -{ -#if !(@const_itflags@&NPY_ITFLAG_EXLOOP) || (@const_ndim@ > 1) - const npy_uint32 itflags = @const_itflags@; -# if @const_ndim@ >= NPY_MAXDIMS - int idim, ndim = NIT_NDIM(iter); -# endif -# if @const_nop@ < NPY_MAXDIMS - const int nop = @const_nop@; -# else - int nop = NIT_NOP(iter); -# endif - - NpyIter_AxisData *axisdata0; - npy_intp istrides, nstrides = NAD_NSTRIDES(); -#endif -#if @const_ndim@ > 1 - NpyIter_AxisData *axisdata1; - npy_intp sizeof_axisdata; -#endif -#if @const_ndim@ > 2 - NpyIter_AxisData *axisdata2; -#endif - -#if (@const_itflags@&NPY_ITFLAG_RANGE) - /* When ranged iteration is enabled, use the iterindex */ - if (++NIT_ITERINDEX(iter) >= NIT_ITEREND(iter)) { - return 0; - } -#endif - -#if @const_ndim@ > 1 - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); -#endif - -# if !(@const_itflags@&NPY_ITFLAG_EXLOOP) || (@const_ndim@ > 1) - axisdata0 = NIT_AXISDATA(iter); -# endif -# if !(@const_itflags@&NPY_ITFLAG_EXLOOP) - /* Increment index 0 */ - NAD_INDEX(axisdata0)++; - /* Increment pointer 0 */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata0)[istrides] += NAD_STRIDES(axisdata0)[istrides]; - } -# endif - -#if @const_ndim@ == 1 - -# if !(@const_itflags@&NPY_ITFLAG_EXLOOP) - /* Finished when the index equals the shape */ - return NAD_INDEX(axisdata0) < NAD_SHAPE(axisdata0); -# else - return 0; -# endif - -#else - -# if !(@const_itflags@&NPY_ITFLAG_EXLOOP) - if (NAD_INDEX(axisdata0) < NAD_SHAPE(axisdata0)) { - return 1; - } -# endif - - axisdata1 = NIT_INDEX_AXISDATA(axisdata0, 1); - /* Increment index 1 */ - NAD_INDEX(axisdata1)++; - /* Increment pointer 1 */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata1)[istrides] += NAD_STRIDES(axisdata1)[istrides]; - } - - if (NAD_INDEX(axisdata1) < NAD_SHAPE(axisdata1)) { - /* Reset the 1st index to 0 */ - NAD_INDEX(axisdata0) = 0; - /* Reset the 1st pointer to the value of the 2nd */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata0)[istrides] = NAD_PTRS(axisdata1)[istrides]; - } - return 1; - } - -# if @const_ndim@ == 2 - return 0; -# else - - axisdata2 = NIT_INDEX_AXISDATA(axisdata1, 1); - /* Increment index 2 */ - NAD_INDEX(axisdata2)++; - /* Increment pointer 2 */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata2)[istrides] += NAD_STRIDES(axisdata2)[istrides]; - } - - if (NAD_INDEX(axisdata2) < NAD_SHAPE(axisdata2)) { - /* Reset the 1st and 2nd indices to 0 */ - NAD_INDEX(axisdata0) = 0; - NAD_INDEX(axisdata1) = 0; - /* Reset the 1st and 2nd pointers to the value of the 3nd */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata0)[istrides] = NAD_PTRS(axisdata2)[istrides]; - NAD_PTRS(axisdata1)[istrides] = NAD_PTRS(axisdata2)[istrides]; - } - return 1; - } - - for (idim = 3; idim < ndim; ++idim) { - NIT_ADVANCE_AXISDATA(axisdata2, 1); - /* Increment the index */ - NAD_INDEX(axisdata2)++; - /* Increment the pointer */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata2)[istrides] += NAD_STRIDES(axisdata2)[istrides]; - } - - - if (NAD_INDEX(axisdata2) < NAD_SHAPE(axisdata2)) { - /* Reset the indices and pointers of all previous axisdatas */ - axisdata1 = axisdata2; - do { - NIT_ADVANCE_AXISDATA(axisdata1, -1); - /* Reset the index to 0 */ - NAD_INDEX(axisdata1) = 0; - /* Reset the pointer to the updated value */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata1)[istrides] = - NAD_PTRS(axisdata2)[istrides]; - } - } while (axisdata1 != axisdata0); - - return 1; - } - } - - return 0; - -# endif /* ndim != 2 */ - -#endif /* ndim != 1 */ -} - -/**end repeat2**/ -/**end repeat1**/ -/**end repeat**/ - - -/**begin repeat - * #const_nop = 1, 2, 3, 4, NPY_MAXDIMS# - * #tag_nop = 1, 2, 3, 4, ANY# - */ - -/* - * Iternext function that handles the reduction buffering part. This - * is done with a double loop to avoid frequent re-buffering. - */ -static int -npyiter_buffered_reduce_iternext_iters@tag_nop@(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ -#if @const_nop@ >= NPY_MAXDIMS - int nop = NIT_NOP(iter); -#else - const int nop = @const_nop@; -#endif - - int iop; - - NpyIter_AxisData *axisdata; - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - char **ptrs; - char *prev_dataptrs[NPY_MAXARGS]; - - ptrs = NBF_PTRS(bufferdata); - - /* - * If the iterator handles the inner loop, need to increment all - * the indices and pointers - */ - if (!(itflags&NPY_ITFLAG_EXLOOP)) { - /* Increment within the buffer */ - if (++NIT_ITERINDEX(iter) < NBF_BUFITEREND(bufferdata)) { - npy_intp *strides; - - strides = NBF_STRIDES(bufferdata); - for (iop = 0; iop < nop; ++iop) { - ptrs[iop] += strides[iop]; - } - return 1; - } - } - else { - NIT_ITERINDEX(iter) += NBF_SIZE(bufferdata); - } - - NPY_IT_DBG_PRINT1("Iterator: Finished iteration %d of outer reduce loop\n", - (int)NBF_REDUCE_POS(bufferdata)); - /* The outer increment for the reduce double loop */ - if (++NBF_REDUCE_POS(bufferdata) < NBF_REDUCE_OUTERSIZE(bufferdata)) { - npy_intp *reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata); - char **reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata); - for (iop = 0; iop < nop; ++iop) { - char *ptr = reduce_outerptrs[iop] + reduce_outerstrides[iop]; - ptrs[iop] = ptr; - reduce_outerptrs[iop] = ptr; - } - NBF_BUFITEREND(bufferdata) = NIT_ITERINDEX(iter) + NBF_SIZE(bufferdata); - return 1; - } - - /* Save the previously used data pointers */ - axisdata = NIT_AXISDATA(iter); - memcpy(prev_dataptrs, NAD_PTRS(axisdata), NPY_SIZEOF_INTP*nop); - - /* Write back to the arrays */ - npyiter_copy_from_buffers(iter); - - /* Check if we're past the end */ - if (NIT_ITERINDEX(iter) >= NIT_ITEREND(iter)) { - NBF_SIZE(bufferdata) = 0; - return 0; - } - /* Increment to the next buffer */ - else { - npyiter_goto_iterindex(iter, NIT_ITERINDEX(iter)); - } - - /* Prepare the next buffers and set iterend/size */ - npyiter_copy_to_buffers(iter, prev_dataptrs); - - return 1; -} - -/**end repeat**/ - -/* iternext function that handles the buffering part */ -static int -npyiter_buffered_iternext(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - - /* - * If the iterator handles the inner loop, need to increment all - * the indices and pointers - */ - if (!(itflags&NPY_ITFLAG_EXLOOP)) { - /* Increment within the buffer */ - if (++NIT_ITERINDEX(iter) < NBF_BUFITEREND(bufferdata)) { - int iop; - npy_intp *strides; - char **ptrs; - - strides = NBF_STRIDES(bufferdata); - ptrs = NBF_PTRS(bufferdata); - for (iop = 0; iop < nop; ++iop) { - ptrs[iop] += strides[iop]; - } - return 1; - } - } - else { - NIT_ITERINDEX(iter) += NBF_SIZE(bufferdata); - } - - /* Write back to the arrays */ - npyiter_copy_from_buffers(iter); - - /* Check if we're past the end */ - if (NIT_ITERINDEX(iter) >= NIT_ITEREND(iter)) { - NBF_SIZE(bufferdata) = 0; - return 0; - } - /* Increment to the next buffer */ - else { - npyiter_goto_iterindex(iter, NIT_ITERINDEX(iter)); - } - - /* Prepare the next buffers and set iterend/size */ - npyiter_copy_to_buffers(iter, NULL); - - return 1; -} - -/**end repeat2**/ -/**end repeat1**/ -/**end repeat**/ - -/* Specialization of iternext for when the iteration size is 1 */ -static int -npyiter_iternext_sizeone(NpyIter *iter) -{ - return 0; -} - -/*NUMPY_API - * Compute the specialized iteration function for an iterator - * - * If errmsg is non-NULL, it should point to a variable which will - * receive the error message, and no Python exception will be set. - * This is so that the function can be called from code not holding - * the GIL. - */ -NPY_NO_EXPORT NpyIter_IterNextFunc * -NpyIter_GetIterNext(NpyIter *iter, char **errmsg) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - /* - * When there is just one iteration and buffering is disabled - * the iternext function is very simple. - */ - if (itflags&NPY_ITFLAG_ONEITERATION) { - return &npyiter_iternext_sizeone; - } - - /* - * If buffering is enabled. - */ - if (itflags&NPY_ITFLAG_BUFFER) { - if (itflags&NPY_ITFLAG_REDUCE) { - switch (nop) { - case 1: - return &npyiter_buffered_reduce_iternext_iters1; - case 2: - return &npyiter_buffered_reduce_iternext_iters2; - case 3: - return &npyiter_buffered_reduce_iternext_iters3; - case 4: - return &npyiter_buffered_reduce_iternext_iters4; - default: - return &npyiter_buffered_reduce_iternext_itersANY; - } - } - else { - return &npyiter_buffered_iternext; - } - } - - /* - * Ignore all the flags that don't affect the iterator memory - * layout or the iternext function. Currently only HASINDEX, - * EXLOOP, and RANGE affect them here. - */ - itflags &= (NPY_ITFLAG_HASINDEX|NPY_ITFLAG_EXLOOP|NPY_ITFLAG_RANGE); - - /* Switch statements let the compiler optimize this most effectively */ - switch (itflags) { - /* - * The combinations HASINDEX|EXLOOP and RANGE|EXLOOP are excluded - * by the New functions - */ -/**begin repeat - * #const_itflags = 0, - * NPY_ITFLAG_HASINDEX, - * NPY_ITFLAG_EXLOOP, - * NPY_ITFLAG_RANGE, - * NPY_ITFLAG_RANGE|NPY_ITFLAG_HASINDEX# - * #tag_itflags = 0, IND, NOINN, RNG, RNGuIND# - */ - case @const_itflags@: - switch (ndim) { -/**begin repeat1 - * #const_ndim = 1, 2# - * #tag_ndim = 1, 2# - */ - case @const_ndim@: - switch (nop) { -/**begin repeat2 - * #const_nop = 1, 2# - * #tag_nop = 1, 2# - */ - case @const_nop@: - return &npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@; -/**end repeat2**/ - /* Not specialized on nop */ - default: - return &npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_itersANY; - } -/**end repeat1**/ - /* Not specialized on ndim */ - default: - switch (nop) { -/**begin repeat1 - * #const_nop = 1, 2# - * #tag_nop = 1, 2# - */ - case @const_nop@: - return &npyiter_iternext_itflags@tag_itflags@_dimsANY_iters@tag_nop@; -/**end repeat1**/ - /* Not specialized on nop */ - default: - return &npyiter_iternext_itflags@tag_itflags@_dimsANY_itersANY; - } - } -/**end repeat**/ - } - /* The switch above should have caught all the possibilities. */ - if (errmsg == NULL) { - PyErr_Format(PyExc_ValueError, - "GetIterNext internal iterator error - unexpected " - "itflags/ndim/nop combination (%04x/%d/%d)", - (int)itflags, (int)ndim, (int)nop); - } - else { - *errmsg = "GetIterNext internal iterator error - unexpected " - "itflags/ndim/nop combination"; - } - return NULL; -} - - -/* SPECIALIZED getindex functions */ - -/**begin repeat - * #const_itflags = 0, - * NPY_ITFLAG_HASINDEX, - * NPY_ITFLAG_IDENTPERM, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_IDENTPERM, - * NPY_ITFLAG_NEGPERM, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_NEGPERM, - * NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_NEGPERM|NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_NEGPERM|NPY_ITFLAG_BUFFER# - * #tag_itflags = 0, IND, IDP, INDuIDP, NEGP, INDuNEGP, - * BUF, INDuBUF, IDPuBUF, INDuIDPuBUF, NEGPuBUF, INDuNEGPuBUF# - */ -static void -npyiter_get_multi_index_itflags@tag_itflags@( - NpyIter *iter, npy_intp *out_multi_index) -{ - const npy_uint32 itflags = @const_itflags@; - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_intp sizeof_axisdata; - NpyIter_AxisData *axisdata; -#if !((@const_itflags@)&NPY_ITFLAG_IDENTPERM) - npy_int8 *perm = NIT_PERM(iter); -#endif - - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); -#if ((@const_itflags@)&NPY_ITFLAG_IDENTPERM) - out_multi_index += ndim-1; - for(idim = 0; idim < ndim; ++idim, --out_multi_index, - NIT_ADVANCE_AXISDATA(axisdata, 1)) { - *out_multi_index = NAD_INDEX(axisdata); - } -#elif !((@const_itflags@)&NPY_ITFLAG_NEGPERM) - for(idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - npy_int8 p = perm[idim]; - out_multi_index[ndim-p-1] = NAD_INDEX(axisdata); - } -#else - for(idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - npy_int8 p = perm[idim]; - if (p < 0) { - /* If the perm entry is negative, reverse the index */ - out_multi_index[ndim+p] = NAD_SHAPE(axisdata) - NAD_INDEX(axisdata) - 1; - } - else { - out_multi_index[ndim-p-1] = NAD_INDEX(axisdata); - } - } -#endif /* not ident perm */ -} -/**end repeat**/ - -/*NUMPY_API - * Compute a specialized get_multi_index function for the iterator - * - * If errmsg is non-NULL, it should point to a variable which will - * receive the error message, and no Python exception will be set. - * This is so that the function can be called from code not holding - * the GIL. - */ -NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * -NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - /* These flags must be correct */ - if ((itflags&(NPY_ITFLAG_HASMULTIINDEX|NPY_ITFLAG_DELAYBUF)) != - NPY_ITFLAG_HASMULTIINDEX) { - if (!(itflags&NPY_ITFLAG_HASMULTIINDEX)) { - if (errmsg == NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot retrieve a GetMultiIndex function for an " - "iterator that doesn't track a multi-index."); - } - else { - *errmsg = "Cannot retrieve a GetMultiIndex function for an " - "iterator that doesn't track a multi-index."; - } - return NULL; - } - else { - if (errmsg == NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot retrieve a GetMultiIndex function for an " - "iterator that used DELAY_BUFALLOC before a Reset call"); - } - else { - *errmsg = "Cannot retrieve a GetMultiIndex function for an " - "iterator that used DELAY_BUFALLOC before a " - "Reset call"; - } - return NULL; - } - } - - /* - * Only these flags affect the iterator memory layout or - * the get_multi_index behavior. IDENTPERM and NEGPERM are mutually - * exclusive, so that reduces the number of cases slightly. - */ - itflags &= (NPY_ITFLAG_HASINDEX | - NPY_ITFLAG_IDENTPERM | - NPY_ITFLAG_NEGPERM | - NPY_ITFLAG_BUFFER); - - switch (itflags) { -/**begin repeat - * #const_itflags = 0, - * NPY_ITFLAG_HASINDEX, - * NPY_ITFLAG_IDENTPERM, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_IDENTPERM, - * NPY_ITFLAG_NEGPERM, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_NEGPERM, - * NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_NEGPERM|NPY_ITFLAG_BUFFER, - * NPY_ITFLAG_HASINDEX|NPY_ITFLAG_NEGPERM|NPY_ITFLAG_BUFFER# - * #tag_itflags = 0, IND, IDP, INDuIDP, NEGP, INDuNEGP, - * BUF, INDuBUF, IDPuBUF, INDuIDPuBUF, NEGPuBUF, INDuNEGPuBUF# - */ - case @const_itflags@: - return npyiter_get_multi_index_itflags@tag_itflags@; -/**end repeat**/ - } - /* The switch above should have caught all the possibilities. */ - if (errmsg == NULL) { - PyErr_Format(PyExc_ValueError, - "GetGetMultiIndex internal iterator error - unexpected " - "itflags/ndim/nop combination (%04x/%d/%d)", - (int)itflags, (int)ndim, (int)nop); - } - else { - *errmsg = "GetGetMultiIndex internal iterator error - unexpected " - "itflags/ndim/nop combination"; - } - return NULL; - -} - -/*NUMPY_API - * Whether the buffer allocation is being delayed - */ -NPY_NO_EXPORT npy_bool -NpyIter_HasDelayedBufAlloc(NpyIter *iter) -{ - return (NIT_ITFLAGS(iter)&NPY_ITFLAG_DELAYBUF) != 0; -} - -/*NUMPY_API - * Whether the iterator handles the inner loop - */ -NPY_NO_EXPORT npy_bool -NpyIter_HasExternalLoop(NpyIter *iter) -{ - return (NIT_ITFLAGS(iter)&NPY_ITFLAG_EXLOOP) != 0; -} - -/*NUMPY_API - * Whether the iterator is tracking a multi-index - */ -NPY_NO_EXPORT npy_bool -NpyIter_HasMultiIndex(NpyIter *iter) -{ - return (NIT_ITFLAGS(iter)&NPY_ITFLAG_HASMULTIINDEX) != 0; -} - -/*NUMPY_API - * Whether the iterator is tracking an index - */ -NPY_NO_EXPORT npy_bool -NpyIter_HasIndex(NpyIter *iter) -{ - return (NIT_ITFLAGS(iter)&NPY_ITFLAG_HASINDEX) != 0; -} - -/*NUMPY_API - * Whether the iteration could be done with no buffering. - */ -NPY_NO_EXPORT npy_bool -NpyIter_RequiresBuffering(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int iop, nop = NIT_NOP(iter); - - char *op_itflags; - - if (!(itflags&NPY_ITFLAG_BUFFER)) { - return 0; - } - - op_itflags = NIT_OPITFLAGS(iter); - - /* If any operand requires a cast, buffering is mandatory */ - for (iop = 0; iop < nop; ++iop) { - if (op_itflags[iop]&NPY_OP_ITFLAG_CAST) { - return 1; - } - } - - return 0; -} - -/*NUMPY_API - * Whether the iteration loop, and in particular the iternext() - * function, needs API access. If this is true, the GIL must - * be retained while iterating. - */ -NPY_NO_EXPORT npy_bool -NpyIter_IterationNeedsAPI(NpyIter *iter) -{ - return (NIT_ITFLAGS(iter)&NPY_ITFLAG_NEEDSAPI) != 0; -} - -/*NUMPY_API - * Gets the number of dimensions being iterated - */ -NPY_NO_EXPORT int -NpyIter_GetNDim(NpyIter *iter) -{ - return NIT_NDIM(iter); -} - -/*NUMPY_API - * Gets the number of operands being iterated - */ -NPY_NO_EXPORT int -NpyIter_GetNOp(NpyIter *iter) -{ - return NIT_NOP(iter); -} - -/*NUMPY_API - * Gets the number of elements being iterated - */ -NPY_NO_EXPORT npy_intp -NpyIter_GetIterSize(NpyIter *iter) -{ - return NIT_ITERSIZE(iter); -} - -/*NUMPY_API - * Whether the iterator is buffered - */ -NPY_NO_EXPORT npy_bool -NpyIter_IsBuffered(NpyIter *iter) -{ - return (NIT_ITFLAGS(iter)&NPY_ITFLAG_BUFFER) != 0; -} - -/*NUMPY_API - * Whether the inner loop can grow if buffering is unneeded - */ -NPY_NO_EXPORT npy_bool -NpyIter_IsGrowInner(NpyIter *iter) -{ - return (NIT_ITFLAGS(iter)&NPY_ITFLAG_GROWINNER) != 0; -} - -/*NUMPY_API - * Gets the size of the buffer, or 0 if buffering is not enabled - */ -NPY_NO_EXPORT npy_intp -NpyIter_GetBufferSize(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - return NBF_BUFFERSIZE(bufferdata); - } - else { - return 0; - } - -} - -/*NUMPY_API - * Gets the range of iteration indices being iterated - */ -NPY_NO_EXPORT void -NpyIter_GetIterIndexRange(NpyIter *iter, - npy_intp *istart, npy_intp *iend) -{ - *istart = NIT_ITERSTART(iter); - *iend = NIT_ITEREND(iter); -} - -/*NUMPY_API - * Gets the broadcast shape if a multi-index is being tracked by the iterator, - * otherwise gets the shape of the iteration as Fortran-order - * (fastest-changing index first). - * - * The reason Fortran-order is returned when a multi-index - * is not enabled is that this is providing a direct view into how - * the iterator traverses the n-dimensional space. The iterator organizes - * its memory from fastest index to slowest index, and when - * a multi-index is enabled, it uses a permutation to recover the original - * order. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -NPY_NO_EXPORT int -NpyIter_GetShape(NpyIter *iter, npy_intp *outshape) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - int idim, sizeof_axisdata; - NpyIter_AxisData *axisdata; - npy_int8 *perm; - - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - if (itflags&NPY_ITFLAG_HASMULTIINDEX) { - perm = NIT_PERM(iter); - for(idim = 0; idim < ndim; ++idim) { - npy_int8 p = perm[idim]; - if (p < 0) { - outshape[ndim+p] = NAD_SHAPE(axisdata); - } - else { - outshape[ndim-p-1] = NAD_SHAPE(axisdata); - } - - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - } - else { - for(idim = 0; idim < ndim; ++idim) { - outshape[idim] = NAD_SHAPE(axisdata); - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - } - - return NPY_SUCCEED; -} - -/*NUMPY_API - * Builds a set of strides which are the same as the strides of an - * output array created using the NPY_ITER_ALLOCATE flag, where NULL - * was passed for op_axes. This is for data packed contiguously, - * but not necessarily in C or Fortran order. This should be used - * together with NpyIter_GetShape and NpyIter_GetNDim. - * - * A use case for this function is to match the shape and layout of - * the iterator and tack on one or more dimensions. For example, - * in order to generate a vector per input value for a numerical gradient, - * you pass in ndim*itemsize for itemsize, then add another dimension to - * the end with size ndim and stride itemsize. To do the Hessian matrix, - * you do the same thing but add two dimensions, or take advantage of - * the symmetry and pack it into 1 dimension with a particular encoding. - * - * This function may only be called if the iterator is tracking a multi-index - * and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from - * being iterated in reverse order. - * - * If an array is created with this method, simply adding 'itemsize' - * for each iteration will traverse the new array matching the - * iterator. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -NPY_NO_EXPORT int -NpyIter_CreateCompatibleStrides(NpyIter *iter, - npy_intp itemsize, npy_intp *outstrides) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_intp sizeof_axisdata; - NpyIter_AxisData *axisdata; - npy_int8 *perm; - - if (!(itflags&NPY_ITFLAG_HASMULTIINDEX)) { - PyErr_SetString(PyExc_RuntimeError, - "Iterator CreateCompatibleStrides may only be called " - "if a multi-index is being tracked"); - return NPY_FAIL; - } - - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - perm = NIT_PERM(iter); - for(idim = 0; idim < ndim; ++idim) { - npy_int8 p = perm[idim]; - if (p < 0) { - PyErr_SetString(PyExc_RuntimeError, - "Iterator CreateCompatibleStrides may only be called " - "if DONT_NEGATE_STRIDES was used to prevent reverse " - "iteration of an axis"); - return NPY_FAIL; - } - else { - outstrides[ndim-p-1] = itemsize; - } - - itemsize *= NAD_SHAPE(axisdata); - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - - return NPY_SUCCEED; -} - -/*NUMPY_API - * Get the array of data pointers (1 per object being iterated) - * - * This function may be safely called without holding the Python GIL. - */ -NPY_NO_EXPORT char ** -NpyIter_GetDataPtrArray(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - return NBF_PTRS(bufferdata); - } - else { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - return NAD_PTRS(axisdata); - } -} - -/*NUMPY_API - * Get the array of data pointers (1 per object being iterated), - * directly into the arrays (never pointing to a buffer), for starting - * unbuffered iteration. This always returns the addresses for the - * iterator position as reset to iterator index 0. - * - * These pointers are different from the pointers accepted by - * NpyIter_ResetBasePointers, because the direction along some - * axes may have been reversed, requiring base offsets. - * - * This function may be safely called without holding the Python GIL. - */ -NPY_NO_EXPORT char ** -NpyIter_GetInitialDataPtrArray(NpyIter *iter) -{ - /*npy_uint32 itflags = NIT_ITFLAGS(iter);*/ - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - return NIT_RESETDATAPTR(iter); -} - -/*NUMPY_API - * Get the array of data type pointers (1 per object being iterated) - */ -NPY_NO_EXPORT PyArray_Descr ** -NpyIter_GetDescrArray(NpyIter *iter) -{ - /*npy_uint32 itflags = NIT_ITFLAGS(iter);*/ - /*int ndim = NIT_NDIM(iter);*/ - /*int nop = NIT_NOP(iter);*/ - - return NIT_DTYPES(iter); -} - -/*NUMPY_API - * Get the array of objects being iterated - */ -NPY_NO_EXPORT PyArrayObject ** -NpyIter_GetOperandArray(NpyIter *iter) -{ - /*npy_uint32 itflags = NIT_ITFLAGS(iter);*/ - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - return NIT_OPERANDS(iter); -} - -/*NUMPY_API - * Returns a view to the i-th object with the iterator's internal axes - */ -NPY_NO_EXPORT PyArrayObject * -NpyIter_GetIterView(NpyIter *iter, npy_intp i) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS]; - PyArrayObject *obj, *view; - PyArray_Descr *dtype; - char *dataptr; - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - int writeable; - - if (i < 0 || i >= nop) { - PyErr_SetString(PyExc_IndexError, - "index provided for an iterator view was out of bounds"); - return NULL; - } - - /* Don't provide views if buffering is enabled */ - if (itflags&NPY_ITFLAG_BUFFER) { - PyErr_SetString(PyExc_ValueError, - "cannot provide an iterator view when buffering is enabled"); - return NULL; - } - - obj = NIT_OPERANDS(iter)[i]; - dtype = PyArray_DESCR(obj); - writeable = NIT_OPITFLAGS(iter)[i]&NPY_OP_ITFLAG_WRITE; - dataptr = NIT_RESETDATAPTR(iter)[i]; - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - /* Retrieve the shape and strides from the axisdata */ - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - shape[ndim-idim-1] = NAD_SHAPE(axisdata); - strides[ndim-idim-1] = NAD_STRIDES(axisdata)[i]; - } - - Py_INCREF(dtype); - view = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, - shape, strides, dataptr, - writeable ? NPY_WRITEABLE : 0, - NULL); - if (view == NULL) { - return NULL; - } - /* Tell the view who owns the data */ - Py_INCREF(obj); - view->base = (PyObject *)obj; - /* Make sure all the flags are good */ - PyArray_UpdateFlags(view, NPY_UPDATE_ALL); - - return view; -} - -/*NUMPY_API - * Get a pointer to the index, if it is being tracked - */ -NPY_NO_EXPORT npy_intp * -NpyIter_GetIndexPtr(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - - if (itflags&NPY_ITFLAG_HASINDEX) { - /* The index is just after the data pointers */ - return (npy_intp*)NAD_PTRS(axisdata) + nop; - } - else { - return NULL; - } -} - -/*NUMPY_API - * Gets an array of read flags (1 per object being iterated) - */ -NPY_NO_EXPORT void -NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags) -{ - /*npy_uint32 itflags = NIT_ITFLAGS(iter);*/ - /*int ndim = NIT_NDIM(iter);*/ - int iop, nop = NIT_NOP(iter); - - char *op_itflags = NIT_OPITFLAGS(iter); - - for (iop = 0; iop < nop; ++iop) { - outreadflags[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_READ) != 0; - } -} - -/*NUMPY_API - * Gets an array of write flags (1 per object being iterated) - */ -NPY_NO_EXPORT void -NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags) -{ - /*npy_uint32 itflags = NIT_ITFLAGS(iter);*/ - /*int ndim = NIT_NDIM(iter);*/ - int iop, nop = NIT_NOP(iter); - - char *op_itflags = NIT_OPITFLAGS(iter); - - for (iop = 0; iop < nop; ++iop) { - outwriteflags[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_WRITE) != 0; - } -} - - -/*NUMPY_API - * Get the array of strides for the inner loop (when HasExternalLoop is true) - * - * This function may be safely called without holding the Python GIL. - */ -NPY_NO_EXPORT npy_intp * -NpyIter_GetInnerStrideArray(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *data = NIT_BUFFERDATA(iter); - return NBF_STRIDES(data); - } - else { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - return NAD_STRIDES(axisdata); - } -} - -/*NUMPY_API - * Gets the array of strides for the specified axis. - * If the iterator is tracking a multi-index, gets the strides - * for the axis specified, otherwise gets the strides for - * the iteration axis as Fortran order (fastest-changing axis first). - * - * Returns NULL if an error occurs. - */ -NPY_NO_EXPORT npy_intp * -NpyIter_GetAxisStrideArray(NpyIter *iter, int axis) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_int8 *perm = NIT_PERM(iter); - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - if (axis < 0 || axis >= ndim) { - PyErr_SetString(PyExc_ValueError, - "axis out of bounds in iterator GetStrideAxisArray"); - return NULL; - } - - if (itflags&NPY_ITFLAG_HASMULTIINDEX) { - /* Reverse axis, since the iterator treats them that way */ - axis = ndim-1-axis; - - /* First find the axis in question */ - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - if (perm[idim] == axis || -1 - perm[idim] == axis) { - return NAD_STRIDES(axisdata); - } - } - } - else { - return NAD_STRIDES(NIT_INDEX_AXISDATA(axisdata, axis)); - } - - PyErr_SetString(PyExc_RuntimeError, - "internal error in iterator perm"); - return NULL; -} - -/*NUMPY_API - * Get an array of strides which are fixed. Any strides which may - * change during iteration receive the value NPY_MAX_INTP. Once - * the iterator is ready to iterate, call this to get the strides - * which will always be fixed in the inner loop, then choose optimized - * inner loop functions which take advantage of those fixed strides. - * - * This function may be safely called without holding the Python GIL. - */ -NPY_NO_EXPORT void -NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - NpyIter_AxisData *axisdata0 = NIT_AXISDATA(iter); - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *data = NIT_BUFFERDATA(iter); - char *op_itflags = NIT_OPITFLAGS(iter); - npy_intp stride, *strides = NBF_STRIDES(data), - *ad_strides = NAD_STRIDES(axisdata0); - PyArray_Descr **dtypes = NIT_DTYPES(iter); - - for (iop = 0; iop < nop; ++iop) { - stride = strides[iop]; - /* - * Operands which are always/never buffered have fixed strides, - * and everything has fixed strides when ndim is 0 or 1 - */ - if (ndim <= 1 || (op_itflags[iop]& - (NPY_OP_ITFLAG_CAST|NPY_OP_ITFLAG_BUFNEVER))) { - out_strides[iop] = stride; - } - /* If it's a reduction, 0-stride inner loop may have fixed stride */ - else if (stride == 0 && (itflags&NPY_ITFLAG_REDUCE)) { - /* If it's a reduction operand, definitely fixed stride */ - if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) { - out_strides[iop] = stride; - } - /* - * Otherwise it's a fixed stride if the stride is 0 - * for all inner dimensions of the reduction double loop - */ - else { - NpyIter_AxisData *axisdata = axisdata0; - int idim, - reduce_outerdim = NBF_REDUCE_OUTERDIM(data); - for (idim = 0; idim < reduce_outerdim; ++idim) { - if (NAD_STRIDES(axisdata)[iop] != 0) { - break; - } - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - /* If all the strides were 0, the stride won't change */ - if (idim == reduce_outerdim) { - out_strides[iop] = stride; - } - else { - out_strides[iop] = NPY_MAX_INTP; - } - } - } - /* - * Inner loop contiguous array means its stride won't change when - * switching between buffering and not buffering - */ - else if (ad_strides[iop] == dtypes[iop]->elsize) { - out_strides[iop] = ad_strides[iop]; - } - /* - * Otherwise the strides can change if the operand is sometimes - * buffered, sometimes not. - */ - else { - out_strides[iop] = NPY_MAX_INTP; - } - } - } - else { - /* If there's no buffering, the strides are always fixed */ - memcpy(out_strides, NAD_STRIDES(axisdata0), nop*NPY_SIZEOF_INTP); - } -} - - -/*NUMPY_API - * Get a pointer to the size of the inner loop (when HasExternalLoop is true) - * - * This function may be safely called without holding the Python GIL. - */ -NPY_NO_EXPORT npy_intp * -NpyIter_GetInnerLoopSizePtr(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int nop = NIT_NOP(iter); - - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *data = NIT_BUFFERDATA(iter); - return &NBF_SIZE(data); - } - else { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - return &NAD_SHAPE(axisdata); - } -} - -/* Checks 'flags' for (C|F)_ORDER_INDEX, MULTI_INDEX, and EXTERNAL_LOOP, - * setting the appropriate internal flags in 'itflags'. - * - * Returns 1 on success, 0 on error. - */ -static int -npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags) -{ - if ((flags&NPY_ITER_PER_OP_FLAGS) != 0) { - PyErr_SetString(PyExc_ValueError, - "A per-operand flag was passed as a global flag " - "to the iterator constructor"); - return 0; - } - - /* Check for an index */ - if (flags&(NPY_ITER_C_INDEX | NPY_ITER_F_INDEX)) { - if ((flags&(NPY_ITER_C_INDEX | NPY_ITER_F_INDEX)) == - (NPY_ITER_C_INDEX | NPY_ITER_F_INDEX)) { - PyErr_SetString(PyExc_ValueError, - "Iterator flags C_INDEX and " - "F_INDEX cannot both be specified"); - return 0; - } - (*itflags) |= NPY_ITFLAG_HASINDEX; - } - /* Check if a multi-index was requested */ - if (flags&NPY_ITER_MULTI_INDEX) { - /* - * This flag primarily disables dimension manipulations that - * would produce an incorrect multi-index. - */ - (*itflags) |= NPY_ITFLAG_HASMULTIINDEX; - } - /* Check if the caller wants to handle inner iteration */ - if (flags&NPY_ITER_EXTERNAL_LOOP) { - if ((*itflags)&(NPY_ITFLAG_HASINDEX|NPY_ITFLAG_HASMULTIINDEX)) { - PyErr_SetString(PyExc_ValueError, - "Iterator flag EXTERNAL_LOOP cannot be used " - "if an index or multi-index is being tracked"); - return 0; - } - (*itflags) |= NPY_ITFLAG_EXLOOP; - } - /* Ranged */ - if (flags&NPY_ITER_RANGED) { - (*itflags) |= NPY_ITFLAG_RANGE; - if ((flags&NPY_ITER_EXTERNAL_LOOP) && - !(flags&NPY_ITER_BUFFERED)) { - PyErr_SetString(PyExc_ValueError, - "Iterator flag RANGED cannot be used with " - "the flag EXTERNAL_LOOP unless " - "BUFFERED is also enabled"); - return 0; - } - } - /* Buffering */ - if (flags&NPY_ITER_BUFFERED) { - (*itflags) |= NPY_ITFLAG_BUFFER; - if (flags&NPY_ITER_GROWINNER) { - (*itflags) |= NPY_ITFLAG_GROWINNER; - } - if (flags&NPY_ITER_DELAY_BUFALLOC) { - (*itflags) |= NPY_ITFLAG_DELAYBUF; - } - } - - return 1; -} - -static int -npyiter_calculate_ndim(int nop, PyArrayObject **op_in, - int oa_ndim) -{ - /* If 'op_axes' is being used, force 'ndim' */ - if (oa_ndim > 0 ) { - return oa_ndim; - } - /* Otherwise it's the maximum 'ndim' from the operands */ - else { - int ndim = 0, iop; - - for (iop = 0; iop < nop; ++iop) { - if (op_in[iop] != NULL) { - int ondim = PyArray_NDIM(op_in[iop]); - if (ondim > ndim) { - ndim = ondim; - } - } - - } - - return ndim; - } -} - -static int -npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, - npy_intp *itershape) -{ - char axes_dupcheck[NPY_MAXDIMS]; - int iop, idim; - - if (oa_ndim == 0 && (op_axes != NULL || itershape != NULL)) { - PyErr_Format(PyExc_ValueError, - "If 'op_axes' or 'itershape' is not NULL in the" - "iterator constructor, 'oa_ndim' must be greater than zero"); - return 0; - } - else if (oa_ndim > 0) { - if (oa_ndim > NPY_MAXDIMS) { - PyErr_Format(PyExc_ValueError, - "Cannot construct an iterator with more than %d dimensions " - "(%d were requested for op_axes)", - (int)NPY_MAXDIMS, oa_ndim); - return 0; - } - else if (op_axes == NULL) { - PyErr_Format(PyExc_ValueError, - "If 'oa_ndim' is greater than zero in the iterator " - "constructor, then op_axes cannot be NULL"); - return 0; - } - - /* Check that there are no duplicates in op_axes */ - for (iop = 0; iop < nop; ++iop) { - int *axes = op_axes[iop]; - if (axes != NULL) { - memset(axes_dupcheck, 0, NPY_MAXDIMS); - for (idim = 0; idim < oa_ndim; ++idim) { - npy_intp i = axes[idim]; - if (i >= 0) { - if (i >= NPY_MAXDIMS) { - PyErr_Format(PyExc_ValueError, - "The 'op_axes' provided to the iterator " - "constructor for operand %d " - "contained invalid " - "values %d", (int)iop, (int)i); - return 0; - } else if(axes_dupcheck[i] == 1) { - PyErr_Format(PyExc_ValueError, - "The 'op_axes' provided to the iterator " - "constructor for operand %d " - "contained duplicate " - "value %d", (int)iop, (int)i); - return 0; - } - else { - axes_dupcheck[i] = 1; - } - } - } - } - } - } - - return 1; -} - -/* - * Checks the per-operand input flags, and fills in op_itflags. - * - * Returns 1 on success, 0 on failure. - */ -static int -npyiter_check_per_op_flags(npy_uint32 op_flags, char *op_itflags) -{ - if ((op_flags&NPY_ITER_GLOBAL_FLAGS) != 0) { - PyErr_SetString(PyExc_ValueError, - "A global iterator flag was passed as a per-operand flag " - "to the iterator constructor"); - return 0; - } - - /* Check the read/write flags */ - if (op_flags&NPY_ITER_READONLY) { - /* The read/write flags are mutually exclusive */ - if (op_flags&(NPY_ITER_READWRITE|NPY_ITER_WRITEONLY)) { - PyErr_SetString(PyExc_ValueError, - "Only one of the iterator flags READWRITE, " - "READONLY, and WRITEONLY may be " - "specified for an operand"); - return 0; - } - - *op_itflags = NPY_OP_ITFLAG_READ; - } - else if (op_flags&NPY_ITER_READWRITE) { - /* The read/write flags are mutually exclusive */ - if (op_flags&NPY_ITER_WRITEONLY) { - PyErr_SetString(PyExc_ValueError, - "Only one of the iterator flags READWRITE, " - "READONLY, and WRITEONLY may be " - "specified for an operand"); - return 0; - } - - *op_itflags = NPY_OP_ITFLAG_READ|NPY_OP_ITFLAG_WRITE; - } - else if(op_flags&NPY_ITER_WRITEONLY) { - *op_itflags = NPY_OP_ITFLAG_WRITE; - } - else { - PyErr_SetString(PyExc_ValueError, - "None of the iterator flags READWRITE, " - "READONLY, or WRITEONLY were " - "specified for an operand"); - return 0; - } - - /* Check the flags for temporary copies */ - if (((*op_itflags)&NPY_OP_ITFLAG_WRITE) && - (op_flags&(NPY_ITER_COPY| - NPY_ITER_UPDATEIFCOPY)) == NPY_ITER_COPY) { - PyErr_SetString(PyExc_ValueError, - "If an iterator operand is writeable, must use " - "the flag UPDATEIFCOPY instead of " - "COPY"); - return 0; - } - - return 1; -} - -/* - * Prepares a a constructor operand. Assumes a reference to 'op' - * is owned, and that 'op' may be replaced. Fills in 'op_dtype' - * and 'ndim'. - * - * Returns 1 on success, 0 on failure. - */ -static int -npyiter_prepare_one_operand(PyArrayObject **op, - char **op_dataptr, - PyArray_Descr *op_request_dtype, - PyArray_Descr **op_dtype, - npy_uint32 flags, - npy_uint32 op_flags, char *op_itflags) -{ - /* NULL operands must be automatically allocated outputs */ - if (*op == NULL) { - /* ALLOCATE should be enabled */ - if (!(op_flags&NPY_ITER_ALLOCATE)) { - PyErr_SetString(PyExc_ValueError, - "Iterator operand was NULL, but automatic allocation as an " - "output wasn't requested"); - return 0; - } - /* Writing should be enabled */ - if (!((*op_itflags)&NPY_OP_ITFLAG_WRITE)) { - PyErr_SetString(PyExc_ValueError, - "Automatic allocation was requested for an iterator " - "operand, but it wasn't flagged for writing"); - return 0; - } - /* - * Reading should be disabled if buffering is enabled without - * also enabling NPY_ITER_DELAY_BUFALLOC. In all other cases, - * the caller may initialize the allocated operand to a value - * before beginning iteration. - */ - if (((flags&(NPY_ITER_BUFFERED| - NPY_ITER_DELAY_BUFALLOC)) == NPY_ITER_BUFFERED) && - ((*op_itflags)&NPY_OP_ITFLAG_READ)) { - PyErr_SetString(PyExc_ValueError, - "Automatic allocation was requested for an iterator " - "operand, and it was flagged as readable, but buffering " - " without delayed allocation was enabled"); - return 0; - } - *op_dataptr = NULL; - /* If a requested dtype was provided, use it, otherwise NULL */ - Py_XINCREF(op_request_dtype); - *op_dtype = op_request_dtype; - - return 1; - } - - if (PyArray_Check(*op)) { - if (((*op_itflags)&NPY_OP_ITFLAG_WRITE) && - (!PyArray_CHKFLAGS(*op, NPY_WRITEABLE))) { - PyErr_SetString(PyExc_ValueError, - "Iterator operand was a non-writeable array, but was " - "flagged as writeable"); - return 0; - } - if (!(flags&NPY_ITER_ZEROSIZE_OK) && PyArray_SIZE(*op) == 0) { - PyErr_SetString(PyExc_ValueError, - "Iteration of zero-sized operands is not enabled"); - return 0; - } - *op_dataptr = PyArray_BYTES(*op); - /* PyArray_DESCR does not give us a reference */ - *op_dtype = PyArray_DESCR(*op); - if (*op_dtype == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator input array object has no dtype descr"); - return 0; - } - Py_INCREF(*op_dtype); - /* - * If references weren't specifically allowed, make sure there - * are no references in the inputs or requested dtypes. - */ - if (!(flags&NPY_ITER_REFS_OK)) { - PyArray_Descr *dt = PyArray_DESCR(*op); - if (((dt->flags&(NPY_ITEM_REFCOUNT| - NPY_ITEM_IS_POINTER)) != 0) || - (dt != *op_dtype && - (((*op_dtype)->flags&(NPY_ITEM_REFCOUNT| - NPY_ITEM_IS_POINTER))) != 0)) { - PyErr_SetString(PyExc_TypeError, - "Iterator operand or requested dtype holds " - "references, but the REFS_OK flag was not enabled"); - return 0; - } - } - /* - * Checking whether casts are valid is done later, once the - * final data types have been selected. For now, just store the - * requested type. - */ - if (op_request_dtype != NULL) { - /* We just have a borrowed reference to op_request_dtype */ - Py_INCREF(op_request_dtype); - /* If it's a data type without a size, set the size */ - if (op_request_dtype->elsize == 0) { - PyArray_DESCR_REPLACE(op_request_dtype); - if (op_request_dtype == NULL) { - return 0; - } - - if (op_request_dtype->type_num == NPY_STRING) { - switch((*op_dtype)->type_num) { - case NPY_STRING: - op_request_dtype->elsize = (*op_dtype)->elsize; - break; - case NPY_UNICODE: - op_request_dtype->elsize = (*op_dtype)->elsize >> 2; - break; - } - } - else if (op_request_dtype->type_num == NPY_UNICODE) { - switch((*op_dtype)->type_num) { - case NPY_STRING: - op_request_dtype->elsize = (*op_dtype)->elsize << 2; - break; - case NPY_UNICODE: - op_request_dtype->elsize = (*op_dtype)->elsize; - break; - } - } - else if (op_request_dtype->type_num == NPY_VOID) { - op_request_dtype->elsize = (*op_dtype)->elsize; - } - } - /* Store the requested dtype */ - Py_DECREF(*op_dtype); - *op_dtype = op_request_dtype; - } - - /* Check if the operand is in the byte order requested */ - if (op_flags&NPY_ITER_NBO) { - /* Check byte order */ - if (!PyArray_ISNBO((*op_dtype)->byteorder)) { - PyArray_Descr *nbo_dtype; - - /* Replace with a new descr which is in native byte order */ - nbo_dtype = PyArray_DescrNewByteorder(*op_dtype, NPY_NATIVE); - Py_DECREF(*op_dtype); - *op_dtype = nbo_dtype; - - NPY_IT_DBG_PRINT("Iterator: Setting NPY_OP_ITFLAG_CAST " - "because of NPY_ITER_NBO\n"); - /* Indicate that byte order or alignment needs fixing */ - *op_itflags |= NPY_OP_ITFLAG_CAST; - } - } - /* Check if the operand is aligned */ - if (op_flags&NPY_ITER_ALIGNED) { - /* Check alignment */ - if (!PyArray_ISALIGNED(*op)) { - NPY_IT_DBG_PRINT("Iterator: Setting NPY_OP_ITFLAG_CAST " - "because of NPY_ITER_ALIGNED\n"); - *op_itflags |= NPY_OP_ITFLAG_CAST; - } - } - /* - * The check for NPY_ITER_CONTIG can only be done later, - * once the final iteration order is settled. - */ - } - else { - PyErr_SetString(PyExc_ValueError, - "Iterator inputs must be ndarrays"); - return 0; - } - - return 1; -} - -/* - * Process all the operands, copying new references so further processing - * can replace the arrays if copying is necessary. - */ -static int -npyiter_prepare_operands(int nop, PyArrayObject **op_in, - PyArrayObject **op, - char **op_dataptr, - PyArray_Descr **op_request_dtypes, - PyArray_Descr **op_dtype, - npy_uint32 flags, - npy_uint32 *op_flags, char *op_itflags) -{ - int iop, i; - - for (iop = 0; iop < nop; ++iop) { - op[iop] = op_in[iop]; - Py_XINCREF(op[iop]); - op_dtype[iop] = NULL; - - /* Check the readonly/writeonly flags, and fill in op_itflags */ - if (!npyiter_check_per_op_flags(op_flags[iop], &op_itflags[iop])) { - for (i = 0; i <= iop; ++i) { - Py_XDECREF(op[i]); - Py_XDECREF(op_dtype[i]); - } - return 0; - } - - /* - * Prepare the operand. This produces an op_dtype[iop] reference - * on success. - */ - if (!npyiter_prepare_one_operand(&op[iop], - &op_dataptr[iop], - op_request_dtypes ? op_request_dtypes[iop] : NULL, - &op_dtype[iop], - flags, - op_flags[iop], &op_itflags[iop])) { - for (i = 0; i <= iop; ++i) { - Py_XDECREF(op[i]); - Py_XDECREF(op_dtype[i]); - } - return 0; - } - } - - - /* If all the operands were NULL, it's an error */ - if (op[0] == NULL) { - int all_null = 1; - for (iop = 1; iop < nop; ++iop) { - if (op[iop] != NULL) { - all_null = 0; - break; - } - } - if (all_null) { - for (i = 0; i < nop; ++i) { - Py_XDECREF(op[i]); - Py_XDECREF(op_dtype[i]); - } - PyErr_SetString(PyExc_ValueError, - "At least one iterator input must be non-NULL"); - return 0; - } - } - - return 1; -} - -static const char * -npyiter_casting_to_string(NPY_CASTING casting) -{ - switch (casting) { - case NPY_NO_CASTING: - return "'no'"; - case NPY_EQUIV_CASTING: - return "'equiv'"; - case NPY_SAFE_CASTING: - return "'safe'"; - case NPY_SAME_KIND_CASTING: - return "'same_kind'"; - case NPY_UNSAFE_CASTING: - return "'unsafe'"; - default: - return ""; - } -} - -static int -npyiter_check_casting(int nop, PyArrayObject **op, - PyArray_Descr **op_dtype, - NPY_CASTING casting, - char *op_itflags) -{ - int iop; - - for(iop = 0; iop < nop; ++iop) { - NPY_IT_DBG_PRINT1("Iterator: Checking casting for operand %d\n", - (int)iop); -#if NPY_IT_DBG_TRACING - printf("op: "); - if (op[iop] != NULL) { - PyObject_Print((PyObject *)PyArray_DESCR(op[iop]), stdout, 0); - } - else { - printf(""); - } - printf(", iter: "); - PyObject_Print((PyObject *)op_dtype[iop], stdout, 0); - printf("\n"); -#endif - /* If the types aren't equivalent, a cast is necessary */ - if (op[iop] != NULL && !PyArray_EquivTypes(PyArray_DESCR(op[iop]), - op_dtype[iop])) { - /* Check read (op -> temp) casting */ - if ((op_itflags[iop]&NPY_OP_ITFLAG_READ) && - !PyArray_CanCastArrayTo(op[iop], - op_dtype[iop], - casting)) { - PyErr_Format(PyExc_TypeError, - "Iterator operand %d dtype could not be cast " - "to the requested dtype, according to " - "the casting rule given, %s", (int)iop, - npyiter_casting_to_string(casting)); - return 0; - } - /* Check write (temp -> op) casting */ - if ((op_itflags[iop]&NPY_OP_ITFLAG_WRITE) && - !PyArray_CanCastTypeTo(op_dtype[iop], - PyArray_DESCR(op[iop]), - casting)) { - PyErr_Format(PyExc_TypeError, - "Iterator requested dtype could not be cast " - "to the operand %d dtype, according to " - "the casting rule given, %s", (int)iop, - npyiter_casting_to_string(casting)); - return 0; - } - - NPY_IT_DBG_PRINT("Iterator: Setting NPY_OP_ITFLAG_CAST " - "because the types aren't equivalent\n"); - /* Indicate that this operand needs casting */ - op_itflags[iop] |= NPY_OP_ITFLAG_CAST; - } - } - - return 1; -} - -static PyObject * -npyiter_shape_string(npy_intp n, npy_intp *vals, char *ending) -{ - npy_intp i; - PyObject *ret, *tmp; - - /* - * Negative dimension indicates "newaxis", which can - * be discarded for printing if its a leading dimension. - * Find the first non-"newaxis" dimension. - */ - i = 0; - while (i < n && vals[i] < 0) { - ++i; - } - - if (i == n) { - return PyUString_FromFormat("()%s", ending); - } - else { - ret = PyUString_FromFormat("(%" NPY_INTP_FMT, vals[i++]); - if (ret == NULL) { - return NULL; - } - } - - for (; i < n; ++i) { - if (vals[i] < 0) { - tmp = PyUString_FromString(",newaxis"); - } - else { - tmp = PyUString_FromFormat(",%" NPY_INTP_FMT, vals[i]); - } - if (tmp == NULL) { - Py_DECREF(ret); - return NULL; - } - - PyUString_ConcatAndDel(&ret, tmp); - if (ret == NULL) { - return NULL; - } - } - - tmp = PyUString_FromFormat(")%s", ending); - PyUString_ConcatAndDel(&ret, tmp); - return ret; -} - -/* - * Fills in the AXISDATA for the 'nop' operands, broadcasting - * the dimensionas as necessary. Also fills - * in the ITERSIZE data member. - * - * If op_axes is not NULL, it should point to an array of ndim-sized - * arrays, one for each op. - * - * Returns 1 on success, 0 on failure. - */ -static int -npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, char *op_itflags, - char **op_dataptr, - npy_uint32 *op_flags, int **op_axes, - npy_intp *itershape, - int output_scalars) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - int ondim; - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - PyArrayObject **op = NIT_OPERANDS(iter), *op_cur; - npy_intp broadcast_shape[NPY_MAXDIMS]; - - /* First broadcast the shapes together */ - if (itershape == NULL) { - for (idim = 0; idim < ndim; ++idim) { - broadcast_shape[idim] = 1; - } - } - else { - for (idim = 0; idim < ndim; ++idim) { - broadcast_shape[idim] = itershape[idim]; - /* Negative shape entries are deduced from the operands */ - if (broadcast_shape[idim] < 0) { - broadcast_shape[idim] = 1; - } - } - } - for (iop = 0; iop < nop; ++iop) { - op_cur = op[iop]; - if (op_cur != NULL) { - npy_intp *shape = PyArray_DIMS(op_cur); - ondim = PyArray_NDIM(op_cur); - - if (op_axes == NULL || op_axes[iop] == NULL) { - /* - * Possible if op_axes are being used, but - * op_axes[iop] is NULL - */ - if (ondim > ndim) { - PyErr_SetString(PyExc_ValueError, - "input operand has more dimensions than allowed " - "by the axis remapping"); - return 0; - } - for (idim = 0; idim < ondim; ++idim) { - npy_intp bshape = broadcast_shape[idim+ndim-ondim], - op_shape = shape[idim]; - if (bshape == 1) { - broadcast_shape[idim+ndim-ondim] = op_shape; - } - else if (bshape != op_shape && op_shape != 1) { - goto broadcast_error; - } - } - } - else { - int *axes = op_axes[iop]; - for (idim = 0; idim < ndim; ++idim) { - int i = axes[idim]; - if (i >= 0) { - if (i < ondim) { - npy_intp bshape = broadcast_shape[idim], - op_shape = shape[i]; - if (bshape == 1) { - broadcast_shape[idim] = op_shape; - } - else if (bshape != op_shape && op_shape != 1) { - goto broadcast_error; - } - } - else { - PyErr_Format(PyExc_ValueError, - "Iterator input op_axes[%d][%d] (==%d) " - "is not a valid axis of op[%d], which " - "has %d dimensions ", - (int)iop, (int)(ndim-idim-1), (int)i, - (int)iop, (int)ondim); - return 0; - } - } - } - } - } - } - /* - * If a shape was provided with a 1 entry, make sure that entry didn't - * get expanded by broadcasting. - */ - if (itershape != NULL) { - for (idim = 0; idim < ndim; ++idim) { - if (itershape[idim] == 1 && broadcast_shape[idim] != 1) { - goto broadcast_error; - } - } - } - - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - /* Now process the operands, filling in the axisdata */ - for (idim = 0; idim < ndim; ++idim) { - npy_intp bshape = broadcast_shape[ndim-idim-1]; - npy_intp *strides = NAD_STRIDES(axisdata); - - NAD_SHAPE(axisdata) = bshape; - NAD_INDEX(axisdata) = 0; - memcpy(NAD_PTRS(axisdata), op_dataptr, NPY_SIZEOF_INTP*nop); - - for (iop = 0; iop < nop; ++iop) { - op_cur = op[iop]; - - if (op_axes == NULL || op_axes[iop] == NULL) { - if (op_cur == NULL) { - strides[iop] = 0; - } - else { - ondim = PyArray_NDIM(op_cur); - if (bshape == 1) { - strides[iop] = 0; - if (idim >= ondim && !output_scalars && - (op_flags[iop]&NPY_ITER_NO_BROADCAST)) { - goto operand_different_than_broadcast; - } - } - else if (idim >= ondim || - PyArray_DIM(op_cur, ondim-idim-1) == 1) { - strides[iop] = 0; - if (op_flags[iop]&NPY_ITER_NO_BROADCAST) { - goto operand_different_than_broadcast; - } - /* If it's writeable, this means a reduction */ - if (op_itflags[iop]&NPY_OP_ITFLAG_WRITE) { - if (!(flags&NPY_ITER_REDUCE_OK)) { - PyErr_SetString(PyExc_ValueError, - "output operand requires a reduction, but " - "reduction is not enabled"); - return 0; - } - if (!(op_itflags[iop]&NPY_OP_ITFLAG_READ)) { - PyErr_SetString(PyExc_ValueError, - "output operand requires a reduction, but " - "is flagged as write-only, not " - "read-write"); - return 0; - } - NIT_ITFLAGS(iter) |= NPY_ITFLAG_REDUCE; - op_itflags[iop] |= NPY_OP_ITFLAG_REDUCE; - } - } - else { - strides[iop] = PyArray_STRIDE(op_cur, ondim-idim-1); - } - } - } - else { - int *axes = op_axes[iop]; - int i = axes[ndim-idim-1]; - if (i >= 0) { - if (bshape == 1 || op_cur == NULL) { - strides[iop] = 0; - } - else if (PyArray_DIM(op_cur, i) == 1) { - strides[iop] = 0; - if (op_flags[iop]&NPY_ITER_NO_BROADCAST) { - goto operand_different_than_broadcast; - } - /* If it's writeable, this means a reduction */ - if (op_itflags[iop]&NPY_OP_ITFLAG_WRITE) { - if (!(flags&NPY_ITER_REDUCE_OK)) { - PyErr_SetString(PyExc_ValueError, - "output operand requires a reduction, but " - "reduction is not enabled"); - return 0; - } - if (!(op_itflags[iop]&NPY_OP_ITFLAG_READ)) { - PyErr_SetString(PyExc_ValueError, - "output operand requires a reduction, but " - "is flagged as write-only, not " - "read-write"); - return 0; - } - NIT_ITFLAGS(iter) |= NPY_ITFLAG_REDUCE; - op_itflags[iop] |= NPY_OP_ITFLAG_REDUCE; - } - } - else { - strides[iop] = PyArray_STRIDE(op_cur, i); - } - } - else if (bshape == 1) { - strides[iop] = 0; - } - else { - strides[iop] = 0; - /* If it's writeable, this means a reduction */ - if (op_itflags[iop]&NPY_OP_ITFLAG_WRITE) { - if (!(flags&NPY_ITER_REDUCE_OK)) { - PyErr_SetString(PyExc_ValueError, - "output operand requires a reduction, but " - "reduction is not enabled"); - return 0; - } - if (!(op_itflags[iop]&NPY_OP_ITFLAG_READ)) { - PyErr_SetString(PyExc_ValueError, - "output operand requires a reduction, but " - "is flagged as write-only, not " - "read-write"); - return 0; - } - NIT_ITFLAGS(iter) |= NPY_ITFLAG_REDUCE; - op_itflags[iop] |= NPY_OP_ITFLAG_REDUCE; - } - } - } - } - - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - - /* Now fill in the ITERSIZE member */ - NIT_ITERSIZE(iter) = broadcast_shape[0]; - for (idim = 1; idim < ndim; ++idim) { - NIT_ITERSIZE(iter) *= broadcast_shape[idim]; - } - /* The range defaults to everything */ - NIT_ITERSTART(iter) = 0; - NIT_ITEREND(iter) = NIT_ITERSIZE(iter); - - return 1; - -broadcast_error: { - PyObject *errmsg, *tmp; - npy_intp remdims[NPY_MAXDIMS]; - char *tmpstr; - - if (op_axes == NULL) { - errmsg = PyUString_FromString("operands could not be broadcast " - "together with shapes "); - if (errmsg == NULL) { - return 0; - } - for (iop = 0; iop < nop; ++iop) { - if (op[iop] != NULL) { - tmp = npyiter_shape_string(PyArray_NDIM(op[iop]), - PyArray_DIMS(op[iop]), - " "); - if (tmp == NULL) { - Py_DECREF(errmsg); - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - } - } - if (itershape != NULL) { - tmp = PyUString_FromString("and requested shape "); - if (tmp == NULL) { - Py_DECREF(errmsg); - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - - tmp = npyiter_shape_string(ndim, itershape, ""); - if (tmp == NULL) { - Py_DECREF(errmsg); - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - - } - PyErr_SetObject(PyExc_ValueError, errmsg); - } - else { - errmsg = PyUString_FromString("operands could not be broadcast " - "together with remapped shapes " - "[original->remapped]: "); - for (iop = 0; iop < nop; ++iop) { - if (op[iop] != NULL) { - int *axes = op_axes[iop]; - - tmpstr = (axes == NULL) ? " " : "->"; - tmp = npyiter_shape_string(PyArray_NDIM(op[iop]), - PyArray_DIMS(op[iop]), - tmpstr); - if (tmp == NULL) { - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - - if (axes != NULL) { - for (idim = 0; idim < ndim; ++idim) { - npy_intp i = axes[idim]; - - if (i >= 0 && i < PyArray_NDIM(op[iop])) { - remdims[idim] = PyArray_DIM(op[iop], i); - } - else { - remdims[idim] = -1; - } - } - tmp = npyiter_shape_string(ndim, remdims, " "); - if (tmp == NULL) { - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - } - } - } - if (itershape != NULL) { - tmp = PyUString_FromString("and requested shape "); - if (tmp == NULL) { - Py_DECREF(errmsg); - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - - tmp = npyiter_shape_string(ndim, itershape, ""); - if (tmp == NULL) { - Py_DECREF(errmsg); - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - - } - PyErr_SetObject(PyExc_ValueError, errmsg); - } - - return 0; - } - -operand_different_than_broadcast: { - npy_intp remdims[NPY_MAXDIMS]; - PyObject *errmsg, *tmp; - - /* Start of error message */ - if (op_flags[iop]&NPY_ITER_READONLY) { - errmsg = PyUString_FromString("non-broadcastable operand " - "with shape "); - } - else { - errmsg = PyUString_FromString("non-broadcastable output " - "operand with shape "); - } - if (errmsg == NULL) { - return 0; - } - - /* Operand shape */ - tmp = npyiter_shape_string(PyArray_NDIM(op[iop]), - PyArray_DIMS(op[iop]), ""); - if (tmp == NULL) { - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - /* Remapped operand shape */ - if (op_axes != NULL && op_axes[iop] != NULL) { - int *axes = op_axes[iop]; - - for (idim = 0; idim < ndim; ++idim) { - npy_intp i = axes[ndim-idim-1]; - - if (i >= 0 && i < PyArray_NDIM(op[iop])) { - remdims[idim] = PyArray_DIM(op[iop], i); - } - else { - remdims[idim] = -1; - } - } - - tmp = PyUString_FromString(" [remapped to "); - if (tmp == NULL) { - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - - tmp = npyiter_shape_string(ndim, remdims, "]"); - if (tmp == NULL) { - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - } - - tmp = PyUString_FromString(" doesn't match the broadcast shape "); - if (tmp == NULL) { - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - - /* Fill in the broadcast shape */ - axisdata = NIT_AXISDATA(iter); - for (idim = 0; idim < ndim; ++idim) { - remdims[ndim-idim-1] = NAD_SHAPE(axisdata); - - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - - /* Broadcast shape */ - tmp = npyiter_shape_string(ndim, remdims, ""); - if (tmp == NULL) { - return 0; - } - PyUString_ConcatAndDel(&errmsg, tmp); - if (errmsg == NULL) { - return 0; - } - - PyErr_SetObject(PyExc_ValueError, errmsg); - - return 0; - } -} - -/* - * Replaces the AXISDATA for the iop'th operand, broadcasting - * the dimensions as necessary. Assumes the replacement array is - * exactly the same shape as the original array used when - * npy_fill_axisdata was called. - * - * If op_axes is not NULL, it should point to an ndim-sized - * array. - */ -static void -npyiter_replace_axisdata(NpyIter *iter, int iop, - PyArrayObject *op, - int op_ndim, char *op_dataptr, - int *op_axes) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - NpyIter_AxisData *axisdata0, *axisdata; - npy_intp sizeof_axisdata; - npy_int8 *perm; - npy_intp baseoffset = 0; - - perm = NIT_PERM(iter); - axisdata0 = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - /* - * Replace just the strides which were non-zero, and compute - * the base data address. - */ - axisdata = axisdata0; - - if (op_axes != NULL) { - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - npy_int8 p; - int i; - npy_intp shape; - - /* Apply the perm to get the original axis */ - p = perm[idim]; - if (p < 0) { - i = op_axes[ndim+p]; - } - else { - i = op_axes[ndim-p-1]; - } - - if (0 <= i && i < op_ndim) { - shape = PyArray_DIM(op, i); - if (shape != 1) { - npy_intp stride = PyArray_STRIDE(op, i); - if (p < 0) { - /* If the perm entry is negative, flip the axis */ - NAD_STRIDES(axisdata)[iop] = -stride; - baseoffset += stride*(shape-1); - } - else { - NAD_STRIDES(axisdata)[iop] = stride; - } - } - } - } - } - else { - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - npy_int8 p; - int i; - npy_intp shape; - - /* Apply the perm to get the original axis */ - p = perm[idim]; - if (p < 0) { - i = op_ndim+p; - } - else { - i = op_ndim-p-1; - } - - if (i >= 0) { - shape = PyArray_DIM(op, i); - if (shape != 1) { - npy_intp stride = PyArray_STRIDE(op, i); - if (p < 0) { - /* If the perm entry is negative, flip the axis */ - NAD_STRIDES(axisdata)[iop] = -stride; - baseoffset += stride*(shape-1); - } - else { - NAD_STRIDES(axisdata)[iop] = stride; - } - } - } - } - } - - op_dataptr += baseoffset; - - /* Now the base data pointer is calculated, set it everywhere it's needed */ - NIT_RESETDATAPTR(iter)[iop] = op_dataptr; - NIT_BASEOFFSETS(iter)[iop] = baseoffset; - axisdata = axisdata0; - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - NAD_PTRS(axisdata)[iop] = op_dataptr; - } -} - -/* - * Computes the iterator's index strides and initializes the index values - * to zero. - * - * This must be called before the axes (i.e. the AXISDATA array) may - * be reordered. - */ -static void -npyiter_compute_index_strides(NpyIter *iter, npy_uint32 flags) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_intp indexstride; - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - - /* - * If there is only one element being iterated, we just have - * to touch the first AXISDATA because nothing will ever be - * incremented. - */ - if (NIT_ITERSIZE(iter) == 1) { - if (itflags&NPY_ITFLAG_HASINDEX) { - axisdata = NIT_AXISDATA(iter); - NAD_PTRS(axisdata)[nop] = 0; - } - return; - } - - if (flags&NPY_ITER_C_INDEX) { - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - axisdata = NIT_AXISDATA(iter); - indexstride = 1; - for(idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - npy_intp shape = NAD_SHAPE(axisdata); - - if (shape == 1) { - NAD_STRIDES(axisdata)[nop] = 0; - } - else { - NAD_STRIDES(axisdata)[nop] = indexstride; - } - NAD_PTRS(axisdata)[nop] = 0; - indexstride *= shape; - } - } - else if (flags&NPY_ITER_F_INDEX) { - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - axisdata = NIT_INDEX_AXISDATA(NIT_AXISDATA(iter), ndim-1); - indexstride = 1; - for(idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, -1)) { - npy_intp shape = NAD_SHAPE(axisdata); - - if (shape == 1) { - NAD_STRIDES(axisdata)[nop] = 0; - } - else { - NAD_STRIDES(axisdata)[nop] = indexstride; - } - NAD_PTRS(axisdata)[nop] = 0; - indexstride *= shape; - } - } -} - -/* - * If the order is NPY_KEEPORDER, lets the iterator find the best - * iteration order, otherwise forces it. Indicates in the itflags that - * whether the iteration order was forced. - */ -static void -npyiter_apply_forced_iteration_order(NpyIter *iter, NPY_ORDER order) -{ - /*npy_uint32 itflags = NIT_ITFLAGS(iter);*/ - int ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - switch (order) { - case NPY_CORDER: - NIT_ITFLAGS(iter) |= NPY_ITFLAG_FORCEDORDER; - break; - case NPY_FORTRANORDER: - NIT_ITFLAGS(iter) |= NPY_ITFLAG_FORCEDORDER; - /* Only need to actually do something if there is more than 1 dim */ - if (ndim > 1) { - npyiter_reverse_axis_ordering(iter); - } - break; - case NPY_ANYORDER: - NIT_ITFLAGS(iter) |= NPY_ITFLAG_FORCEDORDER; - /* Only need to actually do something if there is more than 1 dim */ - if (ndim > 1) { - PyArrayObject **op = NIT_OPERANDS(iter); - int forder = 1; - - /* Check that all the array inputs are fortran order */ - for (iop = 0; iop < nop; ++iop, ++op) { - if (*op && !PyArray_CHKFLAGS(*op, NPY_F_CONTIGUOUS)) { - forder = 0; - break; - } - } - - if (forder) { - npyiter_reverse_axis_ordering(iter); - } - } - break; - case NPY_KEEPORDER: - /* Don't set the forced order flag here... */ - break; - } -} - - -/* - * This function negates any strides in the iterator - * which are negative. When iterating more than one - * object, it only flips strides when they are all - * negative or zero. - */ -static void -npyiter_flip_negative_strides(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - npy_intp istrides, nstrides = NAD_NSTRIDES(); - NpyIter_AxisData *axisdata, *axisdata0; - npy_intp *baseoffsets; - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - int any_flipped = 0; - - axisdata0 = axisdata = NIT_AXISDATA(iter); - baseoffsets = NIT_BASEOFFSETS(iter); - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - npy_intp *strides = NAD_STRIDES(axisdata); - int any_negative = 0; - - /* - * Check the signs of all the strides, excluding - * the index stride at the end. - */ - for (iop = 0; iop < nop; ++iop) { - if (strides[iop] < 0) { - any_negative = 1; - } - else if (strides[iop] != 0) { - break; - } - } - /* - * If at least on stride is negative and none are positive, - * flip all the strides for this dimension. - */ - if (any_negative && iop == nop) { - npy_intp shapem1 = NAD_SHAPE(axisdata) - 1; - - for (istrides = 0; istrides < nstrides; ++istrides) { - npy_intp stride = strides[istrides]; - - /* Adjust the base pointers to start at the end */ - baseoffsets[istrides] += shapem1 * stride; - /* Flip the stride */ - strides[istrides] = -stride; - } - /* - * Make the perm entry negative so get_multi_index - * knows it's flipped - */ - NIT_PERM(iter)[idim] = -1-NIT_PERM(iter)[idim]; - - any_flipped = 1; - } - } - - /* - * If any strides were flipped, the base pointers were adjusted - * in the first AXISDATA, and need to be copied to all the rest - */ - if (any_flipped) { - char **resetdataptr = NIT_RESETDATAPTR(iter); - - for (istrides = 0; istrides < nstrides; ++istrides) { - resetdataptr[istrides] += baseoffsets[istrides]; - } - axisdata = axisdata0; - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - char **ptrs = NAD_PTRS(axisdata); - for (istrides = 0; istrides < nstrides; ++istrides) { - ptrs[istrides] = resetdataptr[istrides]; - } - } - /* - * Indicate that some of the perm entries are negative, - * and that it's not (strictly speaking) the identity perm. - */ - NIT_ITFLAGS(iter) = (NIT_ITFLAGS(iter)|NPY_ITFLAG_NEGPERM) & - ~NPY_ITFLAG_IDENTPERM; - } -} - -static void -npyiter_reverse_axis_ordering(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_intp i, temp, size; - npy_intp *first, *last; - npy_int8 *perm; - - size = NIT_AXISDATA_SIZEOF(itflags, ndim, nop)/NPY_SIZEOF_INTP; - first = (npy_intp*)NIT_AXISDATA(iter); - last = first + (ndim-1)*size; - - /* This loop reverses the order of the AXISDATA array */ - while (first < last) { - for (i = 0; i < size; ++i) { - temp = first[i]; - first[i] = last[i]; - last[i] = temp; - } - first += size; - last -= size; - } - - /* Store the perm we applied */ - perm = NIT_PERM(iter); - for(i = ndim-1; i >= 0; --i, ++perm) { - *perm = (npy_int8)i; - } - - NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_IDENTPERM; -} - -static npy_intp intp_abs(npy_intp x) -{ - return (x < 0) ? -x : x; -} - -static void -npyiter_find_best_axis_ordering(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - npy_intp ax_i0, ax_i1, ax_ipos; - npy_int8 ax_j0, ax_j1; - npy_int8 *perm; - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - int permuted = 0; - - perm = NIT_PERM(iter); - - /* - * Do a custom stable insertion sort. Note that because - * the AXISDATA has been reversed from C order, this - * is sorting from smallest stride to biggest stride. - */ - for (ax_i0 = 1; ax_i0 < ndim; ++ax_i0) { - npy_intp *strides0; - - /* 'ax_ipos' is where perm[ax_i0] will get inserted */ - ax_ipos = ax_i0; - ax_j0 = perm[ax_i0]; - - strides0 = NAD_STRIDES(NIT_INDEX_AXISDATA(axisdata, ax_j0)); - for (ax_i1 = ax_i0-1; ax_i1 >= 0; --ax_i1) { - int ambig = 1, shouldswap = 0; - npy_intp *strides1; - - ax_j1 = perm[ax_i1]; - - strides1 = NAD_STRIDES(NIT_INDEX_AXISDATA(axisdata, ax_j1)); - - for (iop = 0; iop < nop; ++iop) { - if (strides0[iop] != 0 && strides1[iop] != 0) { - if (intp_abs(strides1[iop]) <= - intp_abs(strides0[iop])) { - /* - * Set swap even if it's not ambiguous already, - * because in the case of conflicts between - * different operands, C-order wins. - */ - shouldswap = 0; - } - else { - /* Only set swap if it's still ambiguous */ - if (ambig) { - shouldswap = 1; - } - } - - /* - * A comparison has been done, so it's - * no longer ambiguous - */ - ambig = 0; - } - } - /* - * If the comparison was unambiguous, either shift - * 'ax_ipos' to 'ax_i1' or stop looking for an insertion - * point - */ - if (!ambig) { - if (shouldswap) { - ax_ipos = ax_i1; - } - else { - break; - } - } - } - - /* Insert perm[ax_i0] into the right place */ - if (ax_ipos != ax_i0) { - for (ax_i1 = ax_i0; ax_i1 > ax_ipos; --ax_i1) { - perm[ax_i1] = perm[ax_i1-1]; - } - perm[ax_ipos] = ax_j0; - permuted = 1; - } - } - - /* Apply the computed permutation to the AXISDATA array */ - if (permuted == 1) { - npy_intp i, size = sizeof_axisdata/NPY_SIZEOF_INTP; - NpyIter_AxisData *ad_i; - - /* Use the index as a flag, set each to 1 */ - ad_i = axisdata; - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(ad_i, 1)) { - NAD_INDEX(ad_i) = 1; - } - /* Apply the permutation by following the cycles */ - for (idim = 0; idim < ndim; ++idim) { - ad_i = NIT_INDEX_AXISDATA(axisdata, idim); - - /* If this axis hasn't been touched yet, process it */ - if (NAD_INDEX(ad_i) == 1) { - npy_int8 pidim = perm[idim]; - npy_intp tmp; - NpyIter_AxisData *ad_p, *ad_q; - - if (pidim != idim) { - /* Follow the cycle, copying the data */ - for (i = 0; i < size; ++i) { - pidim = perm[idim]; - ad_q = ad_i; - tmp = *((npy_intp*)ad_q + i); - while (pidim != idim) { - ad_p = NIT_INDEX_AXISDATA(axisdata, pidim); - *((npy_intp*)ad_q + i) = *((npy_intp*)ad_p + i); - - ad_q = ad_p; - pidim = perm[(int)pidim]; - } - *((npy_intp*)ad_q + i) = tmp; - } - /* Follow the cycle again, marking it as done */ - pidim = perm[idim]; - - while (pidim != idim) { - NAD_INDEX(NIT_INDEX_AXISDATA(axisdata, pidim)) = 0; - pidim = perm[(int)pidim]; - } - } - NAD_INDEX(ad_i) = 0; - } - } - /* Clear the identity perm flag */ - NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_IDENTPERM; - } -} - -static void -npyiter_coalesce_axes(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_intp istrides, nstrides = NAD_NSTRIDES(); - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - NpyIter_AxisData *ad_compress; - npy_intp new_ndim = 1; - - /* The HASMULTIINDEX or IDENTPERM flags do not apply after coalescing */ - NIT_ITFLAGS(iter) &= ~(NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_HASMULTIINDEX); - - axisdata = NIT_AXISDATA(iter); - ad_compress = axisdata; - - for (idim = 0; idim < ndim-1; ++idim) { - int can_coalesce = 1; - npy_intp shape0 = NAD_SHAPE(ad_compress); - npy_intp shape1 = NAD_SHAPE(NIT_INDEX_AXISDATA(axisdata, 1)); - npy_intp *strides0 = NAD_STRIDES(ad_compress); - npy_intp *strides1 = NAD_STRIDES(NIT_INDEX_AXISDATA(axisdata, 1)); - - /* Check that all the axes can be coalesced */ - for (istrides = 0; istrides < nstrides; ++istrides) { - if (!((shape0 == 1 && strides0[istrides] == 0) || - (shape1 == 1 && strides1[istrides] == 0)) && - (strides0[istrides]*shape0 != strides1[istrides])) { - can_coalesce = 0; - break; - } - } - - if (can_coalesce) { - npy_intp *strides = NAD_STRIDES(ad_compress); - - NIT_ADVANCE_AXISDATA(axisdata, 1); - NAD_SHAPE(ad_compress) *= NAD_SHAPE(axisdata); - for (istrides = 0; istrides < nstrides; ++istrides) { - if (strides[istrides] == 0) { - strides[istrides] = NAD_STRIDES(axisdata)[istrides]; - } - } - } - else { - NIT_ADVANCE_AXISDATA(axisdata, 1); - NIT_ADVANCE_AXISDATA(ad_compress, 1); - if (ad_compress != axisdata) { - memcpy(ad_compress, axisdata, sizeof_axisdata); - } - ++new_ndim; - } - } - - /* - * If the number of axes shrunk, reset the perm and - * compress the data into the new layout. - */ - if (new_ndim < ndim) { - npy_int8 *perm = NIT_PERM(iter); - - /* Reset to an identity perm */ - for (idim = 0; idim < new_ndim; ++idim) { - perm[idim] = (npy_int8)idim; - } - NIT_NDIM(iter) = new_ndim; - } -} - -/* - * Allocates a temporary array which can be used to replace op - * in the iteration. Its dtype will be op_dtype. - * - * The result array has a memory ordering which matches the iterator, - * which may or may not match that of op. The parameter 'shape' may be - * NULL, in which case it is filled in from the iterator's shape. - * - * This function must be called before any axes are coalesced. - */ -static PyArrayObject * -npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype, - npy_uint32 flags, char *op_itflags, - int op_ndim, npy_intp *shape, - PyArray_Descr *op_dtype, int *op_axes) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - npy_int8 *perm = NIT_PERM(iter); - npy_intp new_shape[NPY_MAXDIMS], strides[NPY_MAXDIMS], - stride = op_dtype->elsize; - char reversestride[NPY_MAXDIMS], anyreverse = 0; - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - npy_intp i; - - PyArrayObject *ret; - - /* If it's a scalar, don't need to check the axes */ - if (op_ndim == 0) { - Py_INCREF(op_dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, op_dtype, 0, - NULL, NULL, NULL, 0, NULL); - - /* Double-check that the subtype didn't mess with the dimensions */ - if (PyArray_NDIM(ret) != 0) { - PyErr_SetString(PyExc_RuntimeError, - "Iterator automatic output has an array subtype " - "which changed the dimensions of the output"); - Py_DECREF(ret); - return NULL; - } - - return ret; - } - - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - - memset(reversestride, 0, NPY_MAXDIMS); - /* Initialize the strides to invalid values */ - for (i = 0; i < NPY_MAXDIMS; ++i) { - strides[i] = NPY_MAX_INTP; - } - - if (op_axes != NULL) { - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - npy_int8 p; - - /* Apply the perm to get the original axis */ - p = perm[idim]; - if (p < 0) { - i = op_axes[ndim+p]; - } - else { - i = op_axes[ndim-p-1]; - } - - if (i >= 0) { - NPY_IT_DBG_PRINT3("Iterator: Setting allocated stride %d " - "for iterator dimension %d to %d\n", (int)i, - (int)idim, (int)stride); - strides[i] = stride; - if (p < 0) { - reversestride[i] = 1; - anyreverse = 1; - } - else { - reversestride[i] = 0; - } - if (shape == NULL) { - new_shape[i] = NAD_SHAPE(axisdata); - stride *= new_shape[i]; - if (i >= ndim) { - PyErr_SetString(PyExc_ValueError, - "automatically allocated output array " - "specified with an inconsistent axis mapping"); - return NULL; - } - } - else { - stride *= shape[i]; - } - } - else { - if (shape == NULL) { - /* - * If deleting this axis produces a reduction, but - * reduction wasn't enabled, throw an error - */ - if (NAD_SHAPE(axisdata) != 1) { - if (!(flags&NPY_ITER_REDUCE_OK)) { - PyErr_SetString(PyExc_ValueError, - "output requires a reduction, but " - "reduction is not enabled"); - return NULL; - } - if (!((*op_itflags)&NPY_OP_ITFLAG_READ)) { - PyErr_SetString(PyExc_ValueError, - "output requires a reduction, but " - "is flagged as write-only, not read-write"); - return NULL; - } - - NPY_IT_DBG_PRINT("Iterator: Indicating that a " - "reduction is occurring\n"); - /* Indicate that a reduction is occurring */ - NIT_ITFLAGS(iter) |= NPY_ITFLAG_REDUCE; - (*op_itflags) |= NPY_OP_ITFLAG_REDUCE; - } - } - } - } - } - else { - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - npy_int8 p; - - /* Apply the perm to get the original axis */ - p = perm[idim]; - if (p < 0) { - i = op_ndim + p; - } - else { - i = op_ndim - p - 1; - } - - if (i >= 0) { - NPY_IT_DBG_PRINT3("Iterator: Setting allocated stride %d " - "for iterator dimension %d to %d\n", (int)i, - (int)idim, (int)stride); - strides[i] = stride; - if (p < 0) { - reversestride[i] = 1; - anyreverse = 1; - } - else { - reversestride[i] = 0; - } - if (shape == NULL) { - new_shape[i] = NAD_SHAPE(axisdata); - stride *= new_shape[i]; - } - else { - stride *= shape[i]; - } - } - } - } - - /* - * If custom axes were specified, some dimensions may not have been used. - * Add the REDUCE itflag if this creates a reduction situation. - */ - if (shape == NULL) { - /* Ensure there are no dimension gaps in op_axes, and find op_ndim */ - op_ndim = ndim; - if (op_axes != NULL) { - for (i = 0; i < ndim; ++i) { - if (strides[i] == NPY_MAX_INTP) { - if (op_ndim == ndim) { - op_ndim = i; - } - } - /* - * If there's a gap in the array's dimensions, it's an error. - * For example, op_axes of [0,2] for the automatically - * allocated output. - */ - else if (op_ndim != ndim) { - PyErr_SetString(PyExc_ValueError, - "automatically allocated output array " - "specified with an inconsistent axis mapping"); - return NULL; - } - } - } - } - else { - for (i = 0; i < op_ndim; ++i) { - if (strides[i] == NPY_MAX_INTP) { - npy_intp factor, new_strides[NPY_MAXDIMS], - itemsize; - - /* Fill in the missing strides in C order */ - factor = 1; - itemsize = op_dtype->elsize; - for (i = op_ndim-1; i >= 0; --i) { - if (strides[i] == NPY_MAX_INTP) { - new_strides[i] = factor * itemsize; - factor *= shape[i]; - } - } - - /* - * Copy the missing strides, and multiply the existing strides - * by the calculated factor. This way, the missing strides - * are tighter together in memory, which is good for nested - * loops. - */ - for (i = 0; i < op_ndim; ++i) { - if (strides[i] == NPY_MAX_INTP) { - strides[i] = new_strides[i]; - } - else { - strides[i] *= factor; - } - } - - break; - } - } - } - - /* If shape was NULL, set it to the shape we calculated */ - if (shape == NULL) { - shape = new_shape; - } - - /* Allocate the temporary array */ - Py_INCREF(op_dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, op_dtype, op_ndim, - shape, strides, NULL, 0, NULL); - if (ret == NULL) { - return NULL; - } - - /* If there are any reversed axes, create a view that reverses them */ - if (anyreverse) { - char *dataptr = PyArray_DATA(ret); - PyArrayObject *newret; - - for (idim = 0; idim < op_ndim; ++idim) { - if (reversestride[idim]) { - dataptr += strides[idim]*(shape[idim]-1); - strides[idim] = -strides[idim]; - } - } - Py_INCREF(op_dtype); - newret = (PyArrayObject *)PyArray_NewFromDescr(subtype, - op_dtype, op_ndim, - shape, strides, dataptr, - NPY_WRITEABLE, NULL); - if (newret == NULL) { - Py_DECREF(ret); - return NULL; - } - newret->base = (PyObject *)ret; - ret = newret; - } - - /* Make sure all the flags are good */ - PyArray_UpdateFlags(ret, NPY_UPDATE_ALL); - - /* Double-check that the subtype didn't mess with the dimensions */ - if (subtype != &PyArray_Type) { - if (PyArray_NDIM(ret) != op_ndim || - !PyArray_CompareLists(shape, PyArray_DIMS(ret), op_ndim)) { - PyErr_SetString(PyExc_RuntimeError, - "Iterator automatic output has an array subtype " - "which changed the dimensions of the output"); - Py_DECREF(ret); - return NULL; - } - } - - return ret; -} - -static int -npyiter_allocate_arrays(NpyIter *iter, - npy_uint32 flags, - PyArray_Descr **op_dtype, PyTypeObject *subtype, - npy_uint32 *op_flags, char *op_itflags, - int **op_axes, int output_scalars) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - NpyIter_BufferData *bufferdata = NULL; - PyArrayObject **op = NIT_OPERANDS(iter); - - if (itflags&NPY_ITFLAG_BUFFER) { - bufferdata = NIT_BUFFERDATA(iter); - } - - - for (iop = 0; iop < nop; ++iop) { - /* NULL means an output the iterator should allocate */ - if (op[iop] == NULL) { - PyArrayObject *out; - PyTypeObject *op_subtype; - int ondim = output_scalars ? 0 : ndim; - - /* Check whether the subtype was disabled */ - op_subtype = (op_flags[iop]&NPY_ITER_NO_SUBTYPE) ? - &PyArray_Type : subtype; - - /* Allocate the output array */ - out = npyiter_new_temp_array(iter, op_subtype, - flags, &op_itflags[iop], - ondim, - NULL, - op_dtype[iop], - op_axes ? op_axes[iop] : NULL); - if (out == NULL) { - return 0; - } - - op[iop] = out; - - /* - * Now we need to replace the pointers and strides with values - * from the new array. - */ - npyiter_replace_axisdata(iter, iop, op[iop], ondim, - PyArray_DATA(op[iop]), op_axes ? op_axes[iop] : NULL); - - /* New arrays are aligned and need no cast */ - op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED; - op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST; - } - /* - * If casting is required, the operand is read-only, and - * it's an array scalar, make a copy whether or not the - * copy flag is enabled. - */ - else if ((op_itflags[iop]&(NPY_OP_ITFLAG_CAST| - NPY_OP_ITFLAG_READ| - NPY_OP_ITFLAG_WRITE)) == (NPY_OP_ITFLAG_CAST| - NPY_OP_ITFLAG_READ) && - PyArray_NDIM(op[iop]) == 0) { - PyArrayObject *temp; - Py_INCREF(op_dtype[iop]); - temp = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, op_dtype[iop], - 0, NULL, NULL, NULL, 0, NULL); - if (temp == NULL) { - return 0; - } - if (PyArray_CopyInto(temp, op[iop]) != 0) { - Py_DECREF(temp); - return 0; - } - Py_DECREF(op[iop]); - op[iop] = temp; - - /* - * Now we need to replace the pointers and strides with values - * from the temporary array. - */ - npyiter_replace_axisdata(iter, iop, op[iop], 0, - PyArray_DATA(op[iop]), NULL); - - /* - * New arrays are aligned need no cast, and in the case - * of scalars, always have stride 0 so never need buffering - */ - op_itflags[iop] |= (NPY_OP_ITFLAG_ALIGNED| - NPY_OP_ITFLAG_BUFNEVER); - op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST; - if (itflags&NPY_ITFLAG_BUFFER) { - NBF_STRIDES(bufferdata)[iop] = 0; - } - } - /* If casting is required and permitted */ - else if ((op_itflags[iop]&NPY_OP_ITFLAG_CAST) && - (op_flags[iop]&(NPY_ITER_COPY|NPY_ITER_UPDATEIFCOPY))) { - PyArrayObject *temp; - int ondim = PyArray_NDIM(op[iop]); - - /* Allocate the temporary array, if possible */ - temp = npyiter_new_temp_array(iter, &PyArray_Type, - flags, &op_itflags[iop], - ondim, - PyArray_DIMS(op[iop]), - op_dtype[iop], - op_axes ? op_axes[iop] : NULL); - if (temp == NULL) { - return 0; - } - - /* If the data will be read, copy it into temp */ - if (op_itflags[iop]&NPY_OP_ITFLAG_READ) { - if (PyArray_CopyInto(temp, op[iop]) != 0) { - Py_DECREF(temp); - return 0; - } - } - /* If the data will be written to, set UPDATEIFCOPY */ - if (op_itflags[iop]&NPY_OP_ITFLAG_WRITE) { - PyArray_FLAGS(temp) |= NPY_UPDATEIFCOPY; - PyArray_FLAGS(op[iop]) &= ~NPY_WRITEABLE; - Py_INCREF(op[iop]); - temp->base = (PyObject *)op[iop]; - } - - Py_DECREF(op[iop]); - op[iop] = temp; - - /* - * Now we need to replace the pointers and strides with values - * from the temporary array. - */ - npyiter_replace_axisdata(iter, iop, op[iop], ondim, - PyArray_DATA(op[iop]), op_axes ? op_axes[iop] : NULL); - - /* The temporary copy is aligned and needs no cast */ - op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED; - op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST; - } - else { - /* - * Buffering must be enabled for casting/conversion if copy - * wasn't specified. - */ - if ((op_itflags[iop]&NPY_OP_ITFLAG_CAST) && - !(itflags&NPY_ITFLAG_BUFFER)) { - PyErr_SetString(PyExc_TypeError, - "Iterator operand required copying or buffering, " - "but neither copying nor buffering was enabled"); - return 0; - } - - /* - * If the operand is aligned, any buffering can use aligned - * optimizations. - */ - if (PyArray_ISALIGNED(op[iop])) { - op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED; - } - } - - /* Here we can finally check for contiguous iteration */ - if (op_flags[iop]&NPY_ITER_CONTIG) { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - npy_intp stride = NAD_STRIDES(axisdata)[iop]; - - if (stride != op_dtype[iop]->elsize) { - NPY_IT_DBG_PRINT("Iterator: Setting NPY_OP_ITFLAG_CAST " - "because of NPY_ITER_CONTIG\n"); - op_itflags[iop] |= NPY_OP_ITFLAG_CAST; - if (!(itflags&NPY_ITFLAG_BUFFER)) { - PyErr_SetString(PyExc_TypeError, - "Iterator operand required buffering, " - "to be contiguous as requested, but " - "buffering is not enabled"); - return 0; - } - } - } - - /* - * If no alignment, byte swap, or casting is needed, and - * the inner stride of this operand works for the whole - * array, we can set NPY_OP_ITFLAG_BUFNEVER. - */ - if ((itflags&NPY_ITFLAG_BUFFER) && !(op_itflags[iop]&NPY_OP_ITFLAG_CAST)) { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - if (ndim == 1) { - op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER; - NBF_STRIDES(bufferdata)[iop] = NAD_STRIDES(axisdata)[iop]; - } - else if (PyArray_NDIM(op[iop]) > 0) { - npy_intp stride, shape, innerstride = 0, innershape; - npy_intp sizeof_axisdata = - NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - /* Find stride of the first non-empty shape */ - for (idim = 0; idim < ndim; ++idim) { - innershape = NAD_SHAPE(axisdata); - if (innershape != 1) { - innerstride = NAD_STRIDES(axisdata)[iop]; - break; - } - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - ++idim; - NIT_ADVANCE_AXISDATA(axisdata, 1); - /* Check that everything could have coalesced together */ - for (; idim < ndim; ++idim) { - stride = NAD_STRIDES(axisdata)[iop]; - shape = NAD_SHAPE(axisdata); - if (shape != 1) { - /* - * If N times the inner stride doesn't equal this - * stride, the multi-dimensionality is needed. - */ - if (innerstride*innershape != stride) { - break; - } - else { - innershape *= shape; - } - } - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - /* - * If we looped all the way to the end, one stride works. - * Set that stride, because it may not belong to the first - * dimension. - */ - if (idim == ndim) { - op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER; - NBF_STRIDES(bufferdata)[iop] = innerstride; - } - } - } - } - - return 1; -} - -/* - * The __array_priority__ attribute of the inputs determines - * the subtype of any output arrays. This function finds the - * subtype of the input array with highest priority. - */ -static void -npyiter_get_priority_subtype(int nop, PyArrayObject **op, - char *op_itflags, - double *subtype_priority, - PyTypeObject **subtype) -{ - int iop; - - for (iop = 0; iop < nop; ++iop) { - if (op[iop] != NULL && op_itflags[iop]&NPY_OP_ITFLAG_READ) { - double priority = PyArray_GetPriority((PyObject *)op[iop], 0.0); - if (priority > *subtype_priority) { - *subtype_priority = priority; - *subtype = Py_TYPE(op[iop]); - } - } - } -} - -/* - * Calculates a dtype that all the types can be promoted to, using the - * ufunc rules. If only_inputs is 1, it leaves any operands that - * are not read from out of the calculation. - */ -static PyArray_Descr * -npyiter_get_common_dtype(int nop, PyArrayObject **op, - char *op_itflags, PyArray_Descr **op_dtype, - PyArray_Descr **op_request_dtypes, - int only_inputs, int output_scalars) -{ - int iop; - npy_intp narrs = 0, ndtypes = 0; - PyArrayObject *arrs[NPY_MAXARGS]; - PyArray_Descr *dtypes[NPY_MAXARGS]; - PyArray_Descr *ret; - - NPY_IT_DBG_PRINT("Iterator: Getting a common data type from operands\n"); - - for (iop = 0; iop < nop; ++iop) { - if (op_dtype[iop] != NULL && - (!only_inputs || (op_itflags[iop]&NPY_OP_ITFLAG_READ))) { - /* If no dtype was requested and the op is a scalar, pass the op */ - if ((op_request_dtypes == NULL || - op_request_dtypes[iop] == NULL) && - PyArray_NDIM(op[iop]) == 0) { - arrs[narrs++] = op[iop]; - } - /* Otherwise just pass in the dtype */ - else { - dtypes[ndtypes++] = op_dtype[iop]; - } - } - } - - if (narrs == 0) { - npy_intp i; - ret = dtypes[0]; - for (i = 1; i < ndtypes; ++i) { - if (ret != dtypes[i]) - break; - } - if (i == ndtypes) { - if (ndtypes == 1 || PyArray_ISNBO(ret->byteorder)) { - Py_INCREF(ret); - } - else { - ret = PyArray_DescrNewByteorder(ret, NPY_NATIVE); - } - } - else { - ret = PyArray_ResultType(narrs, arrs, ndtypes, dtypes); - } - } - else { - ret = PyArray_ResultType(narrs, arrs, ndtypes, dtypes); - } - - return ret; -} - -static int -npyiter_allocate_transfer_functions(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - /*int ndim = NIT_NDIM(iter);*/ - int iop = 0, nop = NIT_NOP(iter); - - npy_intp i; - char *op_itflags = NIT_OPITFLAGS(iter); - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - PyArrayObject **op = NIT_OPERANDS(iter); - PyArray_Descr **op_dtype = NIT_DTYPES(iter); - npy_intp *strides = NAD_STRIDES(axisdata), op_stride; - PyArray_StridedTransferFn **readtransferfn = NBF_READTRANSFERFN(bufferdata), - **writetransferfn = NBF_WRITETRANSFERFN(bufferdata); - void **readtransferdata = NBF_READTRANSFERDATA(bufferdata), - **writetransferdata = NBF_WRITETRANSFERDATA(bufferdata); - - PyArray_StridedTransferFn *stransfer = NULL; - void *transferdata = NULL; - int needs_api = 0; - - for (iop = 0; iop < nop; ++iop) { - char flags = op_itflags[iop]; - /* - * Reduction operands may be buffered with a different stride, - * so we must pass NPY_MAX_INTP to the transfer function factory. - */ - op_stride = (flags&NPY_OP_ITFLAG_REDUCE) ? NPY_MAX_INTP : - strides[iop]; - - /* - * If we have determined that a buffer may be needed, - * allocate the appropriate transfer functions - */ - if (!(flags&NPY_OP_ITFLAG_BUFNEVER)) { - if (flags&NPY_OP_ITFLAG_READ) { - int move_references = 0; - if (PyArray_GetDTypeTransferFunction( - (flags&NPY_OP_ITFLAG_ALIGNED) != 0, - op_stride, - op_dtype[iop]->elsize, - PyArray_DESCR(op[iop]), - op_dtype[iop], - move_references, - &stransfer, - &transferdata, - &needs_api) != NPY_SUCCEED) { - goto fail; - } - readtransferfn[iop] = stransfer; - readtransferdata[iop] = transferdata; - } - else { - readtransferfn[iop] = NULL; - } - if (flags&NPY_OP_ITFLAG_WRITE) { - int move_references = 1; - if (PyArray_GetDTypeTransferFunction( - (flags&NPY_OP_ITFLAG_ALIGNED) != 0, - op_dtype[iop]->elsize, - op_stride, - op_dtype[iop], - PyArray_DESCR(op[iop]), - move_references, - &stransfer, - &transferdata, - &needs_api) != NPY_SUCCEED) { - goto fail; - } - writetransferfn[iop] = stransfer; - writetransferdata[iop] = transferdata; - } - /* If no write back but there are references make a decref fn */ - else if (PyDataType_REFCHK(op_dtype[iop])) { - /* - * By passing NULL to dst_type and setting move_references - * to 1, we get back a function that just decrements the - * src references. - */ - if (PyArray_GetDTypeTransferFunction( - (flags&NPY_OP_ITFLAG_ALIGNED) != 0, - op_dtype[iop]->elsize, 0, - op_dtype[iop], NULL, - 1, - &stransfer, - &transferdata, - &needs_api) != NPY_SUCCEED) { - goto fail; - } - writetransferfn[iop] = stransfer; - writetransferdata[iop] = transferdata; - } - else { - writetransferfn[iop] = NULL; - } - } - else { - readtransferfn[iop] = NULL; - writetransferfn[iop] = NULL; - } - } - - /* If any of the dtype transfer functions needed the API, flag it */ - if (needs_api) { - NIT_ITFLAGS(iter) |= NPY_ITFLAG_NEEDSAPI; - } - - return 1; - -fail: - for (i = 0; i < iop; ++i) { - if (readtransferdata[iop] != NULL) { - PyArray_FreeStridedTransferData(readtransferdata[iop]); - readtransferdata[iop] = NULL; - } - if (writetransferdata[iop] != NULL) { - PyArray_FreeStridedTransferData(writetransferdata[iop]); - writetransferdata[iop] = NULL; - } - } - return 0; -} - -/* - * - * If errmsg is non-NULL, it should point to a variable which will - * receive the error message, and no Python exception will be set. - * This is so that the function can be called from code not holding - * the GIL. - */ -static int -npyiter_allocate_buffers(NpyIter *iter, char **errmsg) -{ - /*npy_uint32 itflags = NIT_ITFLAGS(iter);*/ - /*int ndim = NIT_NDIM(iter);*/ - int iop = 0, nop = NIT_NOP(iter); - - npy_intp i; - char *op_itflags = NIT_OPITFLAGS(iter); - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - PyArray_Descr **op_dtype = NIT_DTYPES(iter); - npy_intp buffersize = NBF_BUFFERSIZE(bufferdata); - char *buffer, **buffers = NBF_BUFFERS(bufferdata); - - for (iop = 0; iop < nop; ++iop) { - char flags = op_itflags[iop]; - - /* - * If we have determined that a buffer may be needed, - * allocate one. - */ - if (!(flags&NPY_OP_ITFLAG_BUFNEVER)) { - npy_intp itemsize = op_dtype[iop]->elsize; - buffer = PyArray_malloc(itemsize*buffersize); - if (buffer == NULL) { - if (errmsg == NULL) { - PyErr_NoMemory(); - } - else { - *errmsg = "out of memory"; - } - goto fail; - } - buffers[iop] = buffer; - } - } - - return 1; - -fail: - for (i = 0; i < iop; ++i) { - if (buffers[i] != NULL) { - PyArray_free(buffers[i]); - buffers[i] = NULL; - } - } - return 0; -} - -/* - * This sets the AXISDATA portion of the iterator to the specified - * iterindex, updating the pointers as well. This function does - * no error checking. - */ -static void -npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int nop = NIT_NOP(iter); - - char **dataptr; - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - npy_intp istrides, nstrides, i, shape; - - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - nstrides = NAD_NSTRIDES(); - - NIT_ITERINDEX(iter) = iterindex; - - if (iterindex == 0) { - dataptr = NIT_RESETDATAPTR(iter); - - for (idim = 0; idim < ndim; ++idim) { - char **ptrs; - NAD_INDEX(axisdata) = 0; - ptrs = NAD_PTRS(axisdata); - for (istrides = 0; istrides < nstrides; ++istrides) { - ptrs[istrides] = dataptr[istrides]; - } - - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - } - else { - /* - * Set the multi-index, from the fastest-changing to the - * slowest-changing. - */ - axisdata = NIT_AXISDATA(iter); - shape = NAD_SHAPE(axisdata); - i = iterindex; - iterindex /= shape; - NAD_INDEX(axisdata) = i - iterindex * shape; - for (idim = 0; idim < ndim-1; ++idim) { - NIT_ADVANCE_AXISDATA(axisdata, 1); - - shape = NAD_SHAPE(axisdata); - i = iterindex; - iterindex /= shape; - NAD_INDEX(axisdata) = i - iterindex * shape; - } - - dataptr = NIT_RESETDATAPTR(iter); - - /* - * Accumulate the successive pointers with their - * offsets in the opposite order, starting from the - * original data pointers. - */ - for (idim = 0; idim < ndim; ++idim) { - npy_intp *strides; - char **ptrs; - - strides = NAD_STRIDES(axisdata); - ptrs = NAD_PTRS(axisdata); - - i = NAD_INDEX(axisdata); - - for (istrides = 0; istrides < nstrides; ++istrides) { - ptrs[istrides] = dataptr[istrides] + i*strides[istrides]; - } - - dataptr = ptrs; - - NIT_ADVANCE_AXISDATA(axisdata, -1); - } - } -} - -/* - * This gets called after the the buffers have been exhausted, and - * their data needs to be written back to the arrays. The multi-index - * must be positioned for the beginning of the buffer. - */ -static void -npyiter_copy_from_buffers(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - char *op_itflags = NIT_OPITFLAGS(iter); - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter), - *reduce_outeraxisdata = NULL; - - PyArray_Descr **dtypes = NIT_DTYPES(iter); - npy_intp transfersize = NBF_SIZE(bufferdata), - buffersize = NBF_BUFFERSIZE(bufferdata); - npy_intp *strides = NBF_STRIDES(bufferdata), - *ad_strides = NAD_STRIDES(axisdata); - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - char **ptrs = NBF_PTRS(bufferdata), **ad_ptrs = NAD_PTRS(axisdata); - char **buffers = NBF_BUFFERS(bufferdata); - char *buffer; - - npy_intp reduce_outerdim = 0; - npy_intp *reduce_outerstrides = NULL; - - PyArray_StridedTransferFn *stransfer = NULL; - void *transferdata = NULL; - - npy_intp axisdata_incr = NIT_AXISDATA_SIZEOF(itflags, ndim, nop) / - NPY_SIZEOF_INTP; - - /* If we're past the end, nothing to copy */ - if (NBF_SIZE(bufferdata) == 0) { - return; - } - - NPY_IT_DBG_PRINT("Iterator: Copying buffers to outputs\n"); - - if (itflags&NPY_ITFLAG_REDUCE) { - reduce_outerdim = NBF_REDUCE_OUTERDIM(bufferdata); - reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata); - reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim); - transfersize *= NBF_REDUCE_OUTERSIZE(bufferdata); - } - - for (iop = 0; iop < nop; ++iop) { - stransfer = NBF_WRITETRANSFERFN(bufferdata)[iop]; - transferdata = NBF_WRITETRANSFERDATA(bufferdata)[iop]; - buffer = buffers[iop]; - /* - * Copy the data back to the arrays. If the type has refs, - * this function moves them so the buffer's refs are released. - */ - if ((stransfer != NULL) && (op_itflags[iop]&NPY_OP_ITFLAG_WRITE)) { - /* Copy back only if the pointer was pointing to the buffer */ - npy_intp delta = (ptrs[iop] - buffer); - if (0 <= delta && delta <= buffersize*dtypes[iop]->elsize) { - npy_intp op_transfersize; - - npy_intp src_stride, *dst_strides, *dst_coords, *dst_shape; - int ndim_transfer; - - NPY_IT_DBG_PRINT1("Iterator: Operand %d was buffered\n", - (int)iop); - - /* - * If this operand is being reduced in the inner loop, - * its buffering stride was set to zero, and just - * one element was copied. - */ - if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) { - if (strides[iop] == 0) { - if (reduce_outerstrides[iop] == 0) { - op_transfersize = 1; - src_stride = 0; - dst_strides = &src_stride; - dst_coords = &NAD_INDEX(reduce_outeraxisdata); - dst_shape = &NAD_SHAPE(reduce_outeraxisdata); - ndim_transfer = 1; - } - else { - op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata); - src_stride = reduce_outerstrides[iop]; - dst_strides = - &NAD_STRIDES(reduce_outeraxisdata)[iop]; - dst_coords = &NAD_INDEX(reduce_outeraxisdata); - dst_shape = &NAD_SHAPE(reduce_outeraxisdata); - ndim_transfer = ndim - reduce_outerdim; - } - } - else { - if (reduce_outerstrides[iop] == 0) { - op_transfersize = NBF_SIZE(bufferdata); - src_stride = strides[iop]; - dst_strides = &ad_strides[iop]; - dst_coords = &NAD_INDEX(axisdata); - dst_shape = &NAD_SHAPE(axisdata); - ndim_transfer = reduce_outerdim ? - reduce_outerdim : 1; - } - else { - op_transfersize = transfersize; - src_stride = strides[iop]; - dst_strides = &ad_strides[iop]; - dst_coords = &NAD_INDEX(axisdata); - dst_shape = &NAD_SHAPE(axisdata); - ndim_transfer = ndim; - } - } - } - else { - op_transfersize = transfersize; - src_stride = strides[iop]; - dst_strides = &ad_strides[iop]; - dst_coords = &NAD_INDEX(axisdata); - dst_shape = &NAD_SHAPE(axisdata); - ndim_transfer = ndim; - } - - NPY_IT_DBG_PRINT2("Iterator: Copying buffer to " - "operand %d (%d items)\n", - (int)iop, (int)op_transfersize); - - PyArray_TransferStridedToNDim(ndim_transfer, - ad_ptrs[iop], dst_strides, axisdata_incr, - buffer, src_stride, - dst_coords, axisdata_incr, - dst_shape, axisdata_incr, - op_transfersize, dtypes[iop]->elsize, - stransfer, - transferdata); - } - } - /* If there's no copy back, we may have to decrement refs. In - * this case, the transfer function has a 'decsrcref' transfer - * function, so we can use it to do the decrement. - */ - else if (stransfer != NULL) { - /* Decrement refs only if the pointer was pointing to the buffer */ - npy_intp delta = (ptrs[iop] - buffer); - if (0 <= delta && delta <= transfersize*dtypes[iop]->elsize) { - NPY_IT_DBG_PRINT1("Iterator: Freeing refs and zeroing buffer " - "of operand %d\n", (int)iop); - /* Decrement refs */ - stransfer(NULL, 0, buffer, dtypes[iop]->elsize, - transfersize, dtypes[iop]->elsize, - transferdata); - /* - * Zero out the memory for safety. For instance, - * if during iteration some Python code copied an - * array pointing into the buffer, it will get None - * values for its references after this. - */ - memset(buffer, 0, dtypes[iop]->elsize*transfersize); - } - } - } - - NPY_IT_DBG_PRINT("Iterator: Finished copying buffers to outputs\n"); -} - -/* - * This gets called after the iterator has been positioned to a multi-index - * for the start of a buffer. It decides which operands need a buffer, - * and copies the data into the buffers. - */ -static void -npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - char *op_itflags = NIT_OPITFLAGS(iter); - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter), - *reduce_outeraxisdata = NULL; - - PyArray_Descr **dtypes = NIT_DTYPES(iter); - PyArrayObject **operands = NIT_OPERANDS(iter); - npy_intp *strides = NBF_STRIDES(bufferdata), - *ad_strides = NAD_STRIDES(axisdata); - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - char **ptrs = NBF_PTRS(bufferdata), **ad_ptrs = NAD_PTRS(axisdata); - char **buffers = NBF_BUFFERS(bufferdata); - npy_intp iterindex, iterend, transfersize, - singlestridesize, reduce_innersize = 0, reduce_outerdim = 0; - int is_onestride = 0, any_buffered = 0; - - npy_intp *reduce_outerstrides = NULL; - char **reduce_outerptrs = NULL; - - PyArray_StridedTransferFn *stransfer = NULL; - void *transferdata = NULL; - - /* - * Have to get this flag before npyiter_checkreducesize sets - * it for the next iteration. - */ - npy_bool reuse_reduce_loops = (prev_dataptrs != NULL) && - ((itflags&NPY_ITFLAG_REUSE_REDUCE_LOOPS) != 0); - - npy_intp axisdata_incr = NIT_AXISDATA_SIZEOF(itflags, ndim, nop) / - NPY_SIZEOF_INTP; - - NPY_IT_DBG_PRINT("Iterator: Copying inputs to buffers\n"); - - /* Calculate the size if using any buffers */ - iterindex = NIT_ITERINDEX(iter); - iterend = NIT_ITEREND(iter); - transfersize = NBF_BUFFERSIZE(bufferdata); - if (transfersize > iterend - iterindex) { - transfersize = iterend - iterindex; - } - - /* If last time around, the reduce loop structure was full, we reuse it */ - if (reuse_reduce_loops) { - npy_intp full_transfersize; - - reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata); - reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata); - reduce_outerdim = NBF_REDUCE_OUTERDIM(bufferdata); - reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim); - reduce_innersize = NBF_SIZE(bufferdata); - NBF_REDUCE_POS(bufferdata) = 0; - /* - * Try to do make the outersize as big as possible. This allows - * it to shrink when processing the last bit of the outer reduce loop, - * then grow again at the beginnning of the next outer reduce loop. - */ - NBF_REDUCE_OUTERSIZE(bufferdata) = (NAD_SHAPE(reduce_outeraxisdata)- - NAD_INDEX(reduce_outeraxisdata)); - full_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata)*reduce_innersize; - /* If the full transfer size doesn't fit in the buffer, truncate it */ - if (full_transfersize > NBF_BUFFERSIZE(bufferdata)) { - NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize/reduce_innersize; - transfersize = NBF_REDUCE_OUTERSIZE(bufferdata)*reduce_innersize; - } - else { - transfersize = full_transfersize; - } - NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize; - - NPY_IT_DBG_PRINT3("Reused reduce transfersize: %d innersize: %d " - "itersize: %d\n", - (int)transfersize, - (int)reduce_innersize, - (int)NpyIter_GetIterSize(iter)); - NPY_IT_DBG_PRINT1("Reduced reduce outersize: %d", - (int)NBF_REDUCE_OUTERSIZE(bufferdata)); - } - /* - * If there are any reduction operands, we may have to make - * the size smaller so we don't copy the same value into - * a buffer twice, as the buffering does not have a mechanism - * to combine values itself. - */ - else if (itflags&NPY_ITFLAG_REDUCE) { - NPY_IT_DBG_PRINT("Iterator: Calculating reduce loops\n"); - transfersize = npyiter_checkreducesize(iter, transfersize, - &reduce_innersize, - &reduce_outerdim); - NPY_IT_DBG_PRINT3("Reduce transfersize: %d innersize: %d " - "itersize: %d\n", - (int)transfersize, - (int)reduce_innersize, - (int)NpyIter_GetIterSize(iter)); - - reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata); - reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata); - reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim); - NBF_SIZE(bufferdata) = reduce_innersize; - NBF_REDUCE_POS(bufferdata) = 0; - NBF_REDUCE_OUTERDIM(bufferdata) = reduce_outerdim; - NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize; - if (reduce_innersize == 0) { - NBF_REDUCE_OUTERSIZE(bufferdata) = 0; - return; - } - else { - NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize/reduce_innersize; - } - } - else { - NBF_SIZE(bufferdata) = transfersize; - NBF_BUFITEREND(bufferdata) = iterindex + transfersize; - } - - /* Calculate the maximum size if using a single stride and no buffers */ - singlestridesize = NAD_SHAPE(axisdata)-NAD_INDEX(axisdata); - if (singlestridesize > iterend - iterindex) { - singlestridesize = iterend - iterindex; - } - if (singlestridesize >= transfersize) { - is_onestride = 1; - } - - for (iop = 0; iop < nop; ++iop) { - /* - * If the buffer is write-only, these two are NULL, and the buffer - * pointers will be set up but the read copy won't be done - */ - stransfer = NBF_READTRANSFERFN(bufferdata)[iop]; - transferdata = NBF_READTRANSFERDATA(bufferdata)[iop]; - switch (op_itflags[iop]& - (NPY_OP_ITFLAG_BUFNEVER| - NPY_OP_ITFLAG_CAST| - NPY_OP_ITFLAG_REDUCE)) { - /* Never need to buffer this operand */ - case NPY_OP_ITFLAG_BUFNEVER: - ptrs[iop] = ad_ptrs[iop]; - if (itflags&NPY_ITFLAG_REDUCE) { - reduce_outerstrides[iop] = reduce_innersize * - strides[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - } - /* - * Should not adjust the stride - ad_strides[iop] - * could be zero, but strides[iop] was initialized - * to the first non-trivial stride. - */ - stransfer = NULL; - break; - /* Never need to buffer this operand */ - case NPY_OP_ITFLAG_BUFNEVER|NPY_OP_ITFLAG_REDUCE: - ptrs[iop] = ad_ptrs[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - reduce_outerstrides[iop] = 0; - /* - * Should not adjust the stride - ad_strides[iop] - * could be zero, but strides[iop] was initialized - * to the first non-trivial stride. - */ - stransfer = NULL; - break; - /* Just a copy */ - case 0: - /* - * No copyswap or cast was requested, so all we're - * doing is copying the data to fill the buffer and - * produce a single stride. If the underlying data - * already does that, no need to copy it. - */ - if (is_onestride) { - ptrs[iop] = ad_ptrs[iop]; - strides[iop] = ad_strides[iop]; - stransfer = NULL; - } - /* If some other op is reduced, we have a double reduce loop */ - else if ((itflags&NPY_ITFLAG_REDUCE) && - (reduce_outerdim == 1) && - (transfersize/reduce_innersize <= - NAD_SHAPE(reduce_outeraxisdata) - - NAD_INDEX(reduce_outeraxisdata))) { - ptrs[iop] = ad_ptrs[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - strides[iop] = ad_strides[iop]; - reduce_outerstrides[iop] = - NAD_STRIDES(reduce_outeraxisdata)[iop]; - stransfer = NULL; - } - else { - /* In this case, the buffer is being used */ - ptrs[iop] = buffers[iop]; - strides[iop] = dtypes[iop]->elsize; - if (itflags&NPY_ITFLAG_REDUCE) { - reduce_outerstrides[iop] = reduce_innersize * - strides[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - } - } - break; - /* Just a copy, but with a reduction */ - case NPY_OP_ITFLAG_REDUCE: - if (ad_strides[iop] == 0) { - strides[iop] = 0; - /* It's all in one stride in the inner loop dimension */ - if (is_onestride) { - NPY_IT_DBG_PRINT1("reduce op %d all one stride\n", (int)iop); - ptrs[iop] = ad_ptrs[iop]; - reduce_outerstrides[iop] = 0; - stransfer = NULL; - } - /* It's all in one stride in the reduce outer loop */ - else if ((reduce_outerdim > 0) && - (transfersize/reduce_innersize <= - NAD_SHAPE(reduce_outeraxisdata) - - NAD_INDEX(reduce_outeraxisdata))) { - NPY_IT_DBG_PRINT1("reduce op %d all one outer stride\n", - (int)iop); - ptrs[iop] = ad_ptrs[iop]; - /* Outer reduce loop advances by one item */ - reduce_outerstrides[iop] = - NAD_STRIDES(reduce_outeraxisdata)[iop]; - stransfer = NULL; - } - /* In this case, the buffer is being used */ - else { - NPY_IT_DBG_PRINT1("reduce op %d must buffer\n", (int)iop); - ptrs[iop] = buffers[iop]; - /* Both outer and inner reduce loops have stride 0 */ - if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) { - reduce_outerstrides[iop] = 0; - } - /* Outer reduce loop advances by one item */ - else { - reduce_outerstrides[iop] = dtypes[iop]->elsize; - } - } - - } - else if (is_onestride) { - NPY_IT_DBG_PRINT1("reduce op %d all one stride in dim 0\n", (int)iop); - ptrs[iop] = ad_ptrs[iop]; - strides[iop] = ad_strides[iop]; - reduce_outerstrides[iop] = 0; - stransfer = NULL; - } - else { - /* It's all in one stride in the reduce outer loop */ - if ((reduce_outerdim > 0) && - (transfersize/reduce_innersize <= - NAD_SHAPE(reduce_outeraxisdata) - - NAD_INDEX(reduce_outeraxisdata))) { - ptrs[iop] = ad_ptrs[iop]; - strides[iop] = ad_strides[iop]; - /* Outer reduce loop advances by one item */ - reduce_outerstrides[iop] = - NAD_STRIDES(reduce_outeraxisdata)[iop]; - stransfer = NULL; - } - /* In this case, the buffer is being used */ - else { - ptrs[iop] = buffers[iop]; - strides[iop] = dtypes[iop]->elsize; - - if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) { - /* Reduction in outer reduce loop */ - reduce_outerstrides[iop] = 0; - } - else { - /* Advance to next items in outer reduce loop */ - reduce_outerstrides[iop] = reduce_innersize * - dtypes[iop]->elsize; - } - } - } - reduce_outerptrs[iop] = ptrs[iop]; - break; - default: - /* In this case, the buffer is always being used */ - any_buffered = 1; - - if (!(op_itflags[iop]&NPY_OP_ITFLAG_REDUCE)) { - ptrs[iop] = buffers[iop]; - strides[iop] = dtypes[iop]->elsize; - if (itflags&NPY_ITFLAG_REDUCE) { - reduce_outerstrides[iop] = reduce_innersize * - strides[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - } - } - /* The buffer is being used with reduction */ - else { - ptrs[iop] = buffers[iop]; - if (ad_strides[iop] == 0) { - NPY_IT_DBG_PRINT1("cast op %d has innermost stride 0\n", (int)iop); - strides[iop] = 0; - /* Both outer and inner reduce loops have stride 0 */ - if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) { - NPY_IT_DBG_PRINT1("cast op %d has outermost stride 0\n", (int)iop); - reduce_outerstrides[iop] = 0; - } - /* Outer reduce loop advances by one item */ - else { - NPY_IT_DBG_PRINT1("cast op %d has outermost stride !=0\n", (int)iop); - reduce_outerstrides[iop] = dtypes[iop]->elsize; - } - } - else { - NPY_IT_DBG_PRINT1("cast op %d has innermost stride !=0\n", (int)iop); - strides[iop] = dtypes[iop]->elsize; - - if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) { - NPY_IT_DBG_PRINT1("cast op %d has outermost stride 0\n", (int)iop); - /* Reduction in outer reduce loop */ - reduce_outerstrides[iop] = 0; - } - else { - NPY_IT_DBG_PRINT1("cast op %d has outermost stride !=0\n", (int)iop); - /* Advance to next items in outer reduce loop */ - reduce_outerstrides[iop] = reduce_innersize * - dtypes[iop]->elsize; - } - } - reduce_outerptrs[iop] = ptrs[iop]; - } - break; - } - - if (stransfer != NULL) { - npy_intp src_itemsize = PyArray_DESCR(operands[iop])->elsize; - npy_intp op_transfersize; - - npy_intp dst_stride, *src_strides, *src_coords, *src_shape; - int ndim_transfer; - - npy_bool skip_transfer = 0; - - /* If stransfer wasn't set to NULL, buffering is required */ - any_buffered = 1; - - /* - * If this operand is being reduced in the inner loop, - * set its buffering stride to zero, and just copy - * one element. - */ - if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) { - if (ad_strides[iop] == 0) { - strides[iop] = 0; - if (reduce_outerstrides[iop] == 0) { - op_transfersize = 1; - dst_stride = 0; - src_strides = &dst_stride; - src_coords = &NAD_INDEX(reduce_outeraxisdata); - src_shape = &NAD_SHAPE(reduce_outeraxisdata); - ndim_transfer = 1; - - /* - * When we're reducing a single element, and - * it's still the same element, don't overwrite - * it even when reuse reduce loops is unset. - * This preserves the precision of the - * intermediate calculation. - */ - if (prev_dataptrs && - prev_dataptrs[iop] == ad_ptrs[iop]) { - NPY_IT_DBG_PRINT1("Iterator: skipping operand %d" - " copy because it's a 1-element reduce\n", - (int)iop); - - skip_transfer = 1; - } - } - else { - op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata); - dst_stride = reduce_outerstrides[iop]; - src_strides = &NAD_STRIDES(reduce_outeraxisdata)[iop]; - src_coords = &NAD_INDEX(reduce_outeraxisdata); - src_shape = &NAD_SHAPE(reduce_outeraxisdata); - ndim_transfer = ndim - reduce_outerdim; - } - } - else { - if (reduce_outerstrides[iop] == 0) { - op_transfersize = NBF_SIZE(bufferdata); - dst_stride = strides[iop]; - src_strides = &ad_strides[iop]; - src_coords = &NAD_INDEX(axisdata); - src_shape = &NAD_SHAPE(axisdata); - ndim_transfer = reduce_outerdim ? reduce_outerdim : 1; - } - else { - op_transfersize = transfersize; - dst_stride = strides[iop]; - src_strides = &ad_strides[iop]; - src_coords = &NAD_INDEX(axisdata); - src_shape = &NAD_SHAPE(axisdata); - ndim_transfer = ndim; - } - } - } - else { - op_transfersize = transfersize; - dst_stride = strides[iop]; - src_strides = &ad_strides[iop]; - src_coords = &NAD_INDEX(axisdata); - src_shape = &NAD_SHAPE(axisdata); - ndim_transfer = ndim; - } - - /* - * If the whole buffered loop structure remains the same, - * and the source pointer for this data didn't change, - * we don't have to copy the data again. - */ - if (reuse_reduce_loops && prev_dataptrs[iop] == ad_ptrs[iop]) { - NPY_IT_DBG_PRINT2("Iterator: skipping operands %d " - "copy (%d items) because loops are reused and the data " - "pointer didn't change\n", - (int)iop, (int)op_transfersize); - skip_transfer = 1; - } - - /* If the data type requires zero-inititialization */ - if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) { - NPY_IT_DBG_PRINT("Iterator: Buffer requires init, " - "memsetting to 0\n"); - memset(ptrs[iop], 0, dtypes[iop]->elsize*op_transfersize); - /* Can't skip the transfer in this case */ - skip_transfer = 0; - } - - if (!skip_transfer) { - NPY_IT_DBG_PRINT2("Iterator: Copying operand %d to " - "buffer (%d items)\n", - (int)iop, (int)op_transfersize); - - PyArray_TransferNDimToStrided(ndim_transfer, - ptrs[iop], dst_stride, - ad_ptrs[iop], src_strides, axisdata_incr, - src_coords, axisdata_incr, - src_shape, axisdata_incr, - op_transfersize, src_itemsize, - stransfer, - transferdata); - } - } - else if (ptrs[iop] == buffers[iop]) { - /* If the data type requires zero-inititialization */ - if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) { - NPY_IT_DBG_PRINT1("Iterator: Write-only buffer for " - "operand %d requires init, " - "memsetting to 0\n", (int)iop); - memset(ptrs[iop], 0, dtypes[iop]->elsize*transfersize); - } - } - - } - - /* - * If buffering wasn't needed, we can grow the inner - * loop to as large as possible. - * - * TODO: Could grow REDUCE loop too with some more logic above. - */ - if (!any_buffered && (itflags&NPY_ITFLAG_GROWINNER) && - !(itflags&NPY_ITFLAG_REDUCE)) { - if (singlestridesize > transfersize) { - NPY_IT_DBG_PRINT2("Iterator: Expanding inner loop size " - "from %d to %d since buffering wasn't needed\n", - (int)NBF_SIZE(bufferdata), (int)singlestridesize); - NBF_SIZE(bufferdata) = singlestridesize; - NBF_BUFITEREND(bufferdata) = iterindex + singlestridesize; - } - } - - NPY_IT_DBG_PRINT1("Any buffering needed: %d\n", any_buffered); - - NPY_IT_DBG_PRINT1("Iterator: Finished copying inputs to buffers " - "(buffered size is %d)\n", (int)NBF_SIZE(bufferdata)); -} - -/* - * This checks how much space can be buffered without encountering the - * same value twice, or for operands whose innermost stride is zero, - * without encountering a different value. By reducing the buffered - * amount to this size, reductions can be safely buffered. - * - * Reductions are buffered with two levels of looping, to avoid - * frequent copying to the buffers. The return value is the over-all - * buffer size, and when the flag NPY_ITFLAG_REDUCE is set, reduce_innersize - * receives the size of the inner of the two levels of looping. - * - * The value placed in reduce_outerdim is the index into the AXISDATA - * for where the second level of the double loop begins. - * - * The return value is always a multiple of the value placed in - * reduce_innersize. - */ -static npy_intp -npyiter_checkreducesize(NpyIter *iter, npy_intp count, - npy_intp *reduce_innersize, - npy_intp *reduce_outerdim) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - npy_intp coord, shape, *strides; - npy_intp reducespace = 1, factor; - npy_bool nonzerocoord; - - char *op_itflags = NIT_OPITFLAGS(iter); - char stride0op[NPY_MAXARGS]; - - /* Default to no outer axis */ - *reduce_outerdim = 0; - - /* If there's only one dimension, no need to calculate anything */ - if (ndim == 1) { - *reduce_innersize = count; - return count; - } - - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - axisdata = NIT_AXISDATA(iter); - - /* Indicate which REDUCE operands have stride 0 in the inner loop */ - strides = NAD_STRIDES(axisdata); - for (iop = 0; iop < nop; ++iop) { - stride0op[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) && - (strides[iop] == 0); - NPY_IT_DBG_PRINT2("Iterator: Operand %d has stride 0 in " - "the inner loop? %d\n", iop, (int)stride0op[iop]); - } - shape = NAD_SHAPE(axisdata); - coord = NAD_INDEX(axisdata); - reducespace += (shape-coord-1); - factor = shape; - NIT_ADVANCE_AXISDATA(axisdata, 1); - - /* Initialize nonzerocoord based on the first coordinate */ - nonzerocoord = (coord != 0); - - /* Go forward through axisdata, calculating the space available */ - for (idim = 1; idim < ndim && reducespace < count; - ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - NPY_IT_DBG_PRINT2("Iterator: inner loop reducespace %d, count %d\n", - (int)reducespace, (int)count); - - strides = NAD_STRIDES(axisdata); - for (iop = 0; iop < nop; ++iop) { - /* - * If a reduce stride switched from zero to non-zero, or - * vice versa, that's the point where the data will stop - * being the same element or will repeat, and if the - * buffer starts with an all zero multi-index up to this - * point, gives us the reduce_innersize. - */ - if((stride0op[iop] && (strides[iop] != 0)) || - (!stride0op[iop] && - (strides[iop] == 0) && - (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE))) { - NPY_IT_DBG_PRINT1("Iterator: Reduce operation limits " - "buffer to %d\n", (int)reducespace); - /* - * If we already found more elements than count, or - * the starting coordinate wasn't zero, the two-level - * looping is unnecessary/can't be done, so return. - */ - if (count <= reducespace) { - *reduce_innersize = count; - return count; - } - else if (nonzerocoord) { - if (reducespace < count) { - count = reducespace; - } - *reduce_innersize = count; - return count; - } - else { - *reduce_innersize = reducespace; - break; - } - } - } - /* If we broke out of the loop early, we found reduce_innersize */ - if (iop != nop) { - NPY_IT_DBG_PRINT2("Iterator: Found first dim not " - "reduce (%d of %d)\n", iop, nop); - break; - } - - shape = NAD_SHAPE(axisdata); - coord = NAD_INDEX(axisdata); - if (coord != 0) { - nonzerocoord = 1; - } - reducespace += (shape-coord-1) * factor; - factor *= shape; - } - - /* - * If there was any non-zero coordinate, the reduction inner - * loop doesn't fit in the buffersize, or the reduction inner loop - * covered the entire iteration size, can't do the double loop. - */ - if (nonzerocoord || count < reducespace || idim == ndim) { - if (reducespace < count) { - count = reducespace; - } - *reduce_innersize = count; - /* In this case, we can't reuse the reduce loops */ - NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS; - return count; - } - - /* In this case, we can reuse the reduce loops */ - NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS; - - *reduce_innersize = reducespace; - count /= reducespace; - - NPY_IT_DBG_PRINT2("Iterator: reduce_innersize %d count /ed %d\n", - (int)reducespace, (int)count); - - /* - * Continue through the rest of the dimensions. If there are - * two separated reduction axes, we may have to cut the buffer - * short again. - */ - *reduce_outerdim = idim; - reducespace = 1; - factor = 1; - /* Indicate which REDUCE operands have stride 0 at the current level */ - strides = NAD_STRIDES(axisdata); - for (iop = 0; iop < nop; ++iop) { - stride0op[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) && - (strides[iop] == 0); - NPY_IT_DBG_PRINT2("Iterator: Operand %d has stride 0 in " - "the outer loop? %d\n", iop, (int)stride0op[iop]); - } - shape = NAD_SHAPE(axisdata); - coord = NAD_INDEX(axisdata); - reducespace += (shape-coord-1) * factor; - factor *= shape; - NIT_ADVANCE_AXISDATA(axisdata, 1); - ++idim; - - for (; idim < ndim && reducespace < count; - ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - NPY_IT_DBG_PRINT2("Iterator: outer loop reducespace %d, count %d\n", - (int)reducespace, (int)count); - strides = NAD_STRIDES(axisdata); - for (iop = 0; iop < nop; ++iop) { - /* - * If a reduce stride switched from zero to non-zero, or - * vice versa, that's the point where the data will stop - * being the same element or will repeat, and if the - * buffer starts with an all zero multi-index up to this - * point, gives us the reduce_innersize. - */ - if((stride0op[iop] && (strides[iop] != 0)) || - (!stride0op[iop] && - (strides[iop] == 0) && - (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE))) { - NPY_IT_DBG_PRINT1("Iterator: Reduce operation limits " - "buffer to %d\n", (int)reducespace); - /* - * This terminates the outer level of our double loop. - */ - if (count <= reducespace) { - return count * (*reduce_innersize); - } - else { - return reducespace * (*reduce_innersize); - } - } - } - - shape = NAD_SHAPE(axisdata); - coord = NAD_INDEX(axisdata); - if (coord != 0) { - nonzerocoord = 1; - } - reducespace += (shape-coord-1) * factor; - factor *= shape; - } - - if (reducespace < count) { - count = reducespace; - } - return count * (*reduce_innersize); -} - - - -/*NUMPY_API - * For debugging - */ -NPY_NO_EXPORT void -NpyIter_DebugPrint(NpyIter *iter) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - - PyGILState_STATE gilstate = PyGILState_Ensure(); - - printf("\n------ BEGIN ITERATOR DUMP ------\n"); - printf("| Iterator Address: %p\n", (void *)iter); - printf("| ItFlags: "); - if (itflags&NPY_ITFLAG_IDENTPERM) - printf("IDENTPERM "); - if (itflags&NPY_ITFLAG_NEGPERM) - printf("NEGPERM "); - if (itflags&NPY_ITFLAG_HASINDEX) - printf("HASINDEX "); - if (itflags&NPY_ITFLAG_HASMULTIINDEX) - printf("HASMULTIINDEX "); - if (itflags&NPY_ITFLAG_FORCEDORDER) - printf("FORCEDORDER "); - if (itflags&NPY_ITFLAG_EXLOOP) - printf("EXLOOP "); - if (itflags&NPY_ITFLAG_RANGE) - printf("RANGE "); - if (itflags&NPY_ITFLAG_BUFFER) - printf("BUFFER "); - if (itflags&NPY_ITFLAG_GROWINNER) - printf("GROWINNER "); - if (itflags&NPY_ITFLAG_ONEITERATION) - printf("ONEITERATION "); - if (itflags&NPY_ITFLAG_DELAYBUF) - printf("DELAYBUF "); - if (itflags&NPY_ITFLAG_NEEDSAPI) - printf("NEEDSAPI "); - if (itflags&NPY_ITFLAG_REDUCE) - printf("REDUCE "); - if (itflags&NPY_ITFLAG_REUSE_REDUCE_LOOPS) - printf("REUSE_REDUCE_LOOPS "); - printf("\n"); - printf("| NDim: %d\n", (int)ndim); - printf("| NOp: %d\n", (int)nop); - printf("| IterSize: %d\n", (int)NIT_ITERSIZE(iter)); - printf("| IterStart: %d\n", (int)NIT_ITERSTART(iter)); - printf("| IterEnd: %d\n", (int)NIT_ITEREND(iter)); - printf("| IterIndex: %d\n", (int)NIT_ITERINDEX(iter)); - printf("| Iterator SizeOf: %d\n", - (int)NIT_SIZEOF_ITERATOR(itflags, ndim, nop)); - printf("| BufferData SizeOf: %d\n", - (int)NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop)); - printf("| AxisData SizeOf: %d\n", - (int)NIT_AXISDATA_SIZEOF(itflags, ndim, nop)); - printf("|\n"); - - printf("| Perm: "); - for (idim = 0; idim < ndim; ++idim) { - printf("%d ", (int)NIT_PERM(iter)[idim]); - } - printf("\n"); - printf("| DTypes: "); - for (iop = 0; iop < nop; ++iop) { - printf("%p ", (void *)NIT_DTYPES(iter)[iop]); - } - printf("\n"); - printf("| DTypes: "); - for (iop = 0; iop < nop; ++iop) { - if (NIT_DTYPES(iter)[iop] != NULL) - PyObject_Print((PyObject*)NIT_DTYPES(iter)[iop], stdout, 0); - else - printf("(nil) "); - printf(" "); - } - printf("\n"); - printf("| InitDataPtrs: "); - for (iop = 0; iop < nop; ++iop) { - printf("%p ", (void *)NIT_RESETDATAPTR(iter)[iop]); - } - printf("\n"); - printf("| BaseOffsets: "); - for (iop = 0; iop < nop; ++iop) { - printf("%i ", (int)NIT_BASEOFFSETS(iter)[iop]); - } - printf("\n"); - if (itflags&NPY_ITFLAG_HASINDEX) { - printf("| InitIndex: %d\n", - (int)(npy_intp)NIT_RESETDATAPTR(iter)[nop]); - } - printf("| Operands: "); - for (iop = 0; iop < nop; ++iop) { - printf("%p ", (void *)NIT_OPERANDS(iter)[iop]); - } - printf("\n"); - printf("| Operand DTypes: "); - for (iop = 0; iop < nop; ++iop) { - PyArray_Descr *dtype; - if (NIT_OPERANDS(iter)[iop] != NULL) { - dtype = PyArray_DESCR(NIT_OPERANDS(iter)[iop]); - if (dtype != NULL) - PyObject_Print((PyObject *)dtype, stdout, 0); - else - printf("(nil) "); - } - else { - printf("(op nil) "); - } - printf(" "); - } - printf("\n"); - printf("| OpItFlags:\n"); - for (iop = 0; iop < nop; ++iop) { - printf("| Flags[%d]: ", (int)iop); - if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_READ) - printf("READ "); - if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_WRITE) - printf("WRITE "); - if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_CAST) - printf("CAST "); - if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_BUFNEVER) - printf("BUFNEVER "); - if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_ALIGNED) - printf("ALIGNED "); - if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_REDUCE) - printf("REDUCE "); - printf("\n"); - } - printf("|\n"); - - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - printf("| BufferData:\n"); - printf("| BufferSize: %d\n", (int)NBF_BUFFERSIZE(bufferdata)); - printf("| Size: %d\n", (int)NBF_SIZE(bufferdata)); - printf("| BufIterEnd: %d\n", (int)NBF_BUFITEREND(bufferdata)); - if (itflags&NPY_ITFLAG_REDUCE) { - printf("| REDUCE Pos: %d\n", - (int)NBF_REDUCE_POS(bufferdata)); - printf("| REDUCE OuterSize: %d\n", - (int)NBF_REDUCE_OUTERSIZE(bufferdata)); - printf("| REDUCE OuterDim: %d\n", - (int)NBF_REDUCE_OUTERDIM(bufferdata)); - } - printf("| Strides: "); - for (iop = 0; iop < nop; ++iop) - printf("%d ", (int)NBF_STRIDES(bufferdata)[iop]); - printf("\n"); - /* Print the fixed strides when there's no inner loop */ - if (itflags&NPY_ITFLAG_EXLOOP) { - npy_intp fixedstrides[NPY_MAXDIMS]; - printf("| Fixed Strides: "); - NpyIter_GetInnerFixedStrideArray(iter, fixedstrides); - for (iop = 0; iop < nop; ++iop) - printf("%d ", (int)fixedstrides[iop]); - printf("\n"); - } - printf("| Ptrs: "); - for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_PTRS(bufferdata)[iop]); - printf("\n"); - if (itflags&NPY_ITFLAG_REDUCE) { - printf("| REDUCE Outer Strides: "); - for (iop = 0; iop < nop; ++iop) - printf("%d ", (int)NBF_REDUCE_OUTERSTRIDES(bufferdata)[iop]); - printf("\n"); - printf("| REDUCE Outer Ptrs: "); - for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_REDUCE_OUTERPTRS(bufferdata)[iop]); - printf("\n"); - } - printf("| ReadTransferFn: "); - for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_READTRANSFERFN(bufferdata)[iop]); - printf("\n"); - printf("| ReadTransferData: "); - for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_READTRANSFERDATA(bufferdata)[iop]); - printf("\n"); - printf("| WriteTransferFn: "); - for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_WRITETRANSFERFN(bufferdata)[iop]); - printf("\n"); - printf("| WriteTransferData: "); - for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_WRITETRANSFERDATA(bufferdata)[iop]); - printf("\n"); - printf("| Buffers: "); - for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_BUFFERS(bufferdata)[iop]); - printf("\n"); - printf("|\n"); - } - - axisdata = NIT_AXISDATA(iter); - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - printf("| AxisData[%d]:\n", (int)idim); - printf("| Shape: %d\n", (int)NAD_SHAPE(axisdata)); - printf("| Index: %d\n", (int)NAD_INDEX(axisdata)); - printf("| Strides: "); - for (iop = 0; iop < nop; ++iop) { - printf("%d ", (int)NAD_STRIDES(axisdata)[iop]); - } - printf("\n"); - if (itflags&NPY_ITFLAG_HASINDEX) { - printf("| Index Stride: %d\n", (int)NAD_STRIDES(axisdata)[nop]); - } - printf("| Ptrs: "); - for (iop = 0; iop < nop; ++iop) { - printf("%p ", (void *)NAD_PTRS(axisdata)[iop]); - } - printf("\n"); - if (itflags&NPY_ITFLAG_HASINDEX) { - printf("| Index Value: %d\n", - (int)((npy_intp*)NAD_PTRS(axisdata))[nop]); - } - } - - printf("------- END ITERATOR DUMP -------\n"); - - PyGILState_Release(gilstate); -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/nditer_pywrap.c b/numpy-1.6.2/numpy/core/src/multiarray/nditer_pywrap.c deleted file mode 100644 index 3ee5223f75..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/nditer_pywrap.c +++ /dev/null @@ -1,2504 +0,0 @@ -/* - * This file implements the CPython wrapper of the new NumPy iterator. - * - * Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia - * - * See LICENSE.txt for the license. - */ -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - -#define _MULTIARRAYMODULE -#include -#include - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -typedef struct NewNpyArrayIterObject_tag NewNpyArrayIterObject; - -struct NewNpyArrayIterObject_tag { - PyObject_HEAD - /* The iterator */ - NpyIter *iter; - /* Flag indicating iteration started/stopped */ - char started, finished; - /* Child to update for nested iteration */ - NewNpyArrayIterObject *nested_child; - /* Cached values from the iterator */ - NpyIter_IterNextFunc *iternext; - NpyIter_GetMultiIndexFunc *get_multi_index; - char **dataptrs; - PyArray_Descr **dtypes; - PyArrayObject **operands; - npy_intp *innerstrides, *innerloopsizeptr; - char readflags[NPY_MAXARGS]; - char writeflags[NPY_MAXARGS]; -}; - -void npyiter_cache_values(NewNpyArrayIterObject *self) -{ - NpyIter *iter = self->iter; - - /* iternext and get_multi_index functions */ - self->iternext = NpyIter_GetIterNext(iter, NULL); - if (NpyIter_HasMultiIndex(iter) && !NpyIter_HasDelayedBufAlloc(iter)) { - self->get_multi_index = NpyIter_GetGetMultiIndex(iter, NULL); - } - else { - self->get_multi_index = NULL; - } - - /* Internal data pointers */ - self->dataptrs = NpyIter_GetDataPtrArray(iter); - self->dtypes = NpyIter_GetDescrArray(iter); - self->operands = NpyIter_GetOperandArray(iter); - - if (NpyIter_HasExternalLoop(iter)) { - self->innerstrides = NpyIter_GetInnerStrideArray(iter); - self->innerloopsizeptr = NpyIter_GetInnerLoopSizePtr(iter); - } - else { - self->innerstrides = NULL; - self->innerloopsizeptr = NULL; - } - - /* The read/write settings */ - NpyIter_GetReadFlags(iter, self->readflags); - NpyIter_GetWriteFlags(iter, self->writeflags); -} - -static PyObject * -npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) -{ - NewNpyArrayIterObject *self; - - self = (NewNpyArrayIterObject *)subtype->tp_alloc(subtype, 0); - if (self != NULL) { - self->iter = NULL; - self->nested_child = NULL; - } - - return (PyObject *)self; -} - -static int -NpyIter_GlobalFlagsConverter(PyObject *flags_in, npy_uint32 *flags) -{ - npy_uint32 tmpflags = 0; - int iflags, nflags; - - PyObject *f; - char *str = NULL; - Py_ssize_t length = 0; - npy_uint32 flag; - - if (flags_in == NULL || flags_in == Py_None) { - *flags = 0; - return 1; - } - - if (!PyTuple_Check(flags_in) && !PyList_Check(flags_in)) { - PyErr_SetString(PyExc_ValueError, - "Iterator global flags must be a list or tuple of strings"); - return 0; - } - - nflags = PySequence_Size(flags_in); - - for (iflags = 0; iflags < nflags; ++iflags) { - f = PySequence_GetItem(flags_in, iflags); - if (f == NULL) { - return 0; - } - - if (PyUnicode_Check(f)) { - /* accept unicode input */ - PyObject *f_str; - f_str = PyUnicode_AsASCIIString(f); - if (f_str == NULL) { - Py_DECREF(f); - return 0; - } - Py_DECREF(f); - f = f_str; - } - - if (PyBytes_AsStringAndSize(f, &str, &length) == -1) { - Py_DECREF(f); - return 0; - } - /* Use switch statements to quickly isolate the right flag */ - flag = 0; - switch (str[0]) { - case 'b': - if (strcmp(str, "buffered") == 0) { - flag = NPY_ITER_BUFFERED; - } - break; - case 'c': - if (length >= 6) switch (str[5]) { - case 'e': - if (strcmp(str, "c_index") == 0) { - flag = NPY_ITER_C_INDEX; - } - break; - case 'n': - if (strcmp(str, "common_dtype") == 0) { - flag = NPY_ITER_COMMON_DTYPE; - } - break; - } - break; - case 'd': - if (strcmp(str, "delay_bufalloc") == 0) { - flag = NPY_ITER_DELAY_BUFALLOC; - } - break; - case 'e': - if (strcmp(str, "external_loop") == 0) { - flag = NPY_ITER_EXTERNAL_LOOP; - } - break; - case 'f': - if (strcmp(str, "f_index") == 0) { - flag = NPY_ITER_F_INDEX; - } - break; - case 'g': - if (strcmp(str, "growinner") == 0) { - flag = NPY_ITER_GROWINNER; - } - break; - case 'm': - if (strcmp(str, "multi_index") == 0) { - flag = NPY_ITER_MULTI_INDEX; - } - break; - case 'r': - if (strcmp(str, "ranged") == 0) { - flag = NPY_ITER_RANGED; - } - else if (strcmp(str, "refs_ok") == 0) { - flag = NPY_ITER_REFS_OK; - } - else if (strcmp(str, "reduce_ok") == 0) { - flag = NPY_ITER_REDUCE_OK; - } - break; - case 'z': - if (strcmp(str, "zerosize_ok") == 0) { - flag = NPY_ITER_ZEROSIZE_OK; - } - break; - } - if (flag == 0) { - PyErr_Format(PyExc_ValueError, - "Unexpected iterator global flag \"%s\"", str); - Py_DECREF(f); - return 0; - } - else { - tmpflags |= flag; - } - Py_DECREF(f); - } - - *flags |= tmpflags; - return 1; -} - -/* TODO: Use PyArray_OrderConverter once 'K' is added there */ -static int -npyiter_order_converter(PyObject *order_in, NPY_ORDER *order) -{ - char *str = NULL; - Py_ssize_t length = 0; - - if (PyUnicode_Check(order_in)) { - /* accept unicode input */ - PyObject *str_obj; - int ret; - str_obj = PyUnicode_AsASCIIString(order_in); - if (str_obj == NULL) { - return 0; - } - ret = npyiter_order_converter(str_obj, order); - Py_DECREF(str_obj); - return ret; - } - - if (PyBytes_AsStringAndSize(order_in, &str, &length) == -1) { - return 0; - } - - if (length == 1) switch (str[0]) { - case 'C': - *order = NPY_CORDER; - return 1; - case 'F': - *order = NPY_FORTRANORDER; - return 1; - case 'A': - *order = NPY_ANYORDER; - return 1; - case 'K': - *order = NPY_KEEPORDER; - return 1; - } - - PyErr_SetString(PyExc_ValueError, - "order must be one of 'C', 'F', 'A', or 'K'"); - return 0; -} - -/*NUMPY_API - * Convert any Python object, *obj*, to an NPY_CASTING enum. - * TODO: Move elsewhere - */ -NPY_NO_EXPORT int -PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) -{ - char *str = NULL; - Py_ssize_t length = 0; - - if (PyUnicode_Check(obj)) { - PyObject *str_obj; - int ret; - str_obj = PyUnicode_AsASCIIString(obj); - if (str_obj == NULL) { - return 0; - } - ret = PyArray_CastingConverter(str_obj, casting); - Py_DECREF(str_obj); - return ret; - } - - if (PyBytes_AsStringAndSize(obj, &str, &length) == -1) { - return 0; - } - - if (length >= 2) switch (str[2]) { - case 0: - if (strcmp(str, "no") == 0) { - *casting = NPY_NO_CASTING; - return 1; - } - break; - case 'u': - if (strcmp(str, "equiv") == 0) { - *casting = NPY_EQUIV_CASTING; - return 1; - } - break; - case 'f': - if (strcmp(str, "safe") == 0) { - *casting = NPY_SAFE_CASTING; - return 1; - } - break; - case 'm': - if (strcmp(str, "same_kind") == 0) { - *casting = NPY_SAME_KIND_CASTING; - return 1; - } - break; - case 's': - if (strcmp(str, "unsafe") == 0) { - *casting = NPY_UNSAFE_CASTING; - return 1; - } - break; - } - - PyErr_SetString(PyExc_ValueError, - "casting must be one of 'no', 'equiv', 'safe', " - "'same_kind', or 'unsafe'"); - return 0; -} - -static int -NpyIter_OpFlagsConverter(PyObject *op_flags_in, - npy_uint32 *op_flags) -{ - int iflags, nflags; - npy_uint32 flag; - - if (!PyTuple_Check(op_flags_in) && !PyList_Check(op_flags_in)) { - PyErr_SetString(PyExc_ValueError, - "op_flags must be a tuple or array of per-op flag-tuples"); - return 0; - } - - nflags = PySequence_Size(op_flags_in); - - *op_flags = 0; - for (iflags = 0; iflags < nflags; ++iflags) { - PyObject *f; - char *str = NULL; - Py_ssize_t length = 0; - - f = PySequence_GetItem(op_flags_in, iflags); - if (f == NULL) { - return 0; - } - - if (PyUnicode_Check(f)) { - /* accept unicode input */ - PyObject *f_str; - f_str = PyUnicode_AsASCIIString(f); - if (f_str == NULL) { - Py_DECREF(f); - return 0; - } - Py_DECREF(f); - f = f_str; - } - - if (PyBytes_AsStringAndSize(f, &str, &length) == -1) { - Py_DECREF(f); - PyErr_SetString(PyExc_ValueError, - "op_flags must be a tuple or array of per-op flag-tuples"); - return 0; - } - - /* Use switch statements to quickly isolate the right flag */ - flag = 0; - switch (str[0]) { - case 'a': - if (strcmp(str, "allocate") == 0) { - flag = NPY_ITER_ALLOCATE; - } - if (strcmp(str, "aligned") == 0) { - flag = NPY_ITER_ALIGNED; - } - break; - case 'c': - if (strcmp(str, "copy") == 0) { - flag = NPY_ITER_COPY; - } - if (strcmp(str, "contig") == 0) { - flag = NPY_ITER_CONTIG; - } - break; - case 'n': - switch (str[1]) { - case 'b': - if (strcmp(str, "nbo") == 0) { - flag = NPY_ITER_NBO; - } - break; - case 'o': - if (strcmp(str, "no_subtype") == 0) { - flag = NPY_ITER_NO_SUBTYPE; - } - else if (strcmp(str, "no_broadcast") == 0) { - flag = NPY_ITER_NO_BROADCAST; - } - break; - } - break; - case 'r': - if (length > 4) switch (str[4]) { - case 'o': - if (strcmp(str, "readonly") == 0) { - flag = NPY_ITER_READONLY; - } - break; - case 'w': - if (strcmp(str, "readwrite") == 0) { - flag = NPY_ITER_READWRITE; - } - break; - } - break; - case 'u': - if (strcmp(str, "updateifcopy") == 0) { - flag = NPY_ITER_UPDATEIFCOPY; - } - break; - case 'w': - if (strcmp(str, "writeonly") == 0) { - flag = NPY_ITER_WRITEONLY; - } - break; - } - if (flag == 0) { - PyErr_Format(PyExc_ValueError, - "Unexpected per-op iterator flag \"%s\"", str); - Py_DECREF(f); - return 0; - } - else { - *op_flags |= flag; - } - Py_DECREF(f); - } - - return 1; -} - -static int -npyiter_convert_op_flags_array(PyObject *op_flags_in, - npy_uint32 *op_flags_array, npy_intp nop) -{ - npy_intp iop; - - if (!PyTuple_Check(op_flags_in) && !PyList_Check(op_flags_in)) { - PyErr_SetString(PyExc_ValueError, - "op_flags must be a tuple or array of per-op flag-tuples"); - return 0; - } - - if (PySequence_Size(op_flags_in) != nop) { - goto try_single_flags; - } - - for (iop = 0; iop < nop; ++iop) { - PyObject *f = PySequence_GetItem(op_flags_in, iop); - if (f == NULL) { - return 0; - } - /* If the first item is a string, try as one set of flags */ - if (iop == 0 && (PyBytes_Check(f) || PyUnicode_Check(f))) { - Py_DECREF(f); - goto try_single_flags; - } - if (NpyIter_OpFlagsConverter(f, - &op_flags_array[iop]) != 1) { - Py_DECREF(f); - return 0; - } - - Py_DECREF(f); - } - - return 1; - -try_single_flags: - if (NpyIter_OpFlagsConverter(op_flags_in, - &op_flags_array[0]) != 1) { - return 0; - } - - for (iop = 1; iop < nop; ++iop) { - op_flags_array[iop] = op_flags_array[0]; - } - - return 1; -} - -static int -npyiter_convert_dtypes(PyObject *op_dtypes_in, - PyArray_Descr **op_dtypes, - npy_intp nop) -{ - npy_intp iop; - - /* - * If the input isn't a tuple of dtypes, try converting it as-is - * to a dtype, and replicating to all operands. - */ - if ((!PyTuple_Check(op_dtypes_in) && !PyList_Check(op_dtypes_in)) || - PySequence_Size(op_dtypes_in) != nop) { - goto try_single_dtype; - } - - for (iop = 0; iop < nop; ++iop) { - PyObject *dtype = PySequence_GetItem(op_dtypes_in, iop); - if (dtype == NULL) { - npy_intp i; - for (i = 0; i < iop; ++i ) { - Py_XDECREF(op_dtypes[i]); - } - return 0; - } - - /* Try converting the object to a descr */ - if (PyArray_DescrConverter2(dtype, &op_dtypes[iop]) != 1) { - npy_intp i; - for (i = 0; i < iop; ++i ) { - Py_XDECREF(op_dtypes[i]); - } - Py_DECREF(dtype); - PyErr_Clear(); - goto try_single_dtype; - } - - Py_DECREF(dtype); - } - - return 1; - -try_single_dtype: - if (PyArray_DescrConverter2(op_dtypes_in, &op_dtypes[0]) == 1) { - for (iop = 1; iop < nop; ++iop) { - op_dtypes[iop] = op_dtypes[0]; - Py_XINCREF(op_dtypes[iop]); - } - return 1; - } - - return 0; -} - -static int -npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop, - int **op_axes, int *oa_ndim) -{ - PyObject *a; - int iop; - - if ((!PyTuple_Check(op_axes_in) && !PyList_Check(op_axes_in)) || - PySequence_Size(op_axes_in) != nop) { - PyErr_SetString(PyExc_ValueError, - "op_axes must be a tuple/list matching the number of ops"); - return 0; - } - - *oa_ndim = 0; - - /* Copy the tuples into op_axes */ - for (iop = 0; iop < nop; ++iop) { - int idim; - a = PySequence_GetItem(op_axes_in, iop); - if (a == NULL) { - return 0; - } - if (a == Py_None) { - op_axes[iop] = NULL; - } else { - if (!PyTuple_Check(a) && !PyList_Check(a)) { - PyErr_SetString(PyExc_ValueError, - "Each entry of op_axes must be None " - "or a tuple/list"); - Py_DECREF(a); - return 0; - } - if (*oa_ndim == 0) { - *oa_ndim = PySequence_Size(a); - if (*oa_ndim == 0) { - PyErr_SetString(PyExc_ValueError, - "op_axes must have at least one dimension"); - return 0; - } - if (*oa_ndim > NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "Too many dimensions in op_axes"); - return 0; - } - } - if (PySequence_Size(a) != *oa_ndim) { - PyErr_SetString(PyExc_ValueError, - "Each entry of op_axes must have the same size"); - Py_DECREF(a); - return 0; - } - for (idim = 0; idim < *oa_ndim; ++idim) { - PyObject *v = PySequence_GetItem(a, idim); - if (v == NULL) { - Py_DECREF(a); - return 0; - } - /* numpy.newaxis is None */ - if (v == Py_None) { - op_axes[iop][idim] = -1; - } - else { - op_axes[iop][idim] = PyInt_AsLong(v); - if (op_axes[iop][idim]==-1 && - PyErr_Occurred()) { - Py_DECREF(a); - Py_DECREF(v); - return 0; - } - } - Py_DECREF(v); - } - Py_DECREF(a); - } - } - - if (*oa_ndim == 0) { - PyErr_SetString(PyExc_ValueError, - "If op_axes is provided, at least one list of axes " - "must be contained within it"); - return 0; - } - - return 1; -} - -/* - * Converts the operand array and op_flags array into the form NpyIter_AdvancedNew - * needs. Sets nop, and on success, each op[i] owns a reference - * to an array object. - */ -static int -npyiter_convert_ops(PyObject *op_in, PyObject *op_flags_in, - PyArrayObject **op, npy_uint32 *op_flags, - int *nop_out) -{ - int iop, nop; - - /* nop and op */ - if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - nop = PySequence_Size(op_in); - if (nop == 0) { - PyErr_SetString(PyExc_ValueError, - "Must provide at least one operand"); - return 0; - } - if (nop > NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, "Too many operands"); - return 0; - } - - for (iop = 0; iop < nop; ++iop) { - PyObject *item = PySequence_GetItem(op_in, iop); - if (item == NULL) { - npy_intp i; - for (i = 0; i < iop; ++i) { - Py_XDECREF(op[i]); - } - return 0; - } - else if (item == Py_None) { - Py_DECREF(item); - item = NULL; - } - /* This is converted to an array after op flags are retrieved */ - op[iop] = (PyArrayObject *)item; - } - } - else { - nop = 1; - /* Is converted to an array after op flags are retrieved */ - Py_INCREF(op_in); - op[0] = (PyArrayObject *)op_in; - } - - *nop_out = nop; - - /* op_flags */ - if (op_flags_in == NULL || op_flags_in == Py_None) { - for (iop = 0; iop < nop; ++iop) { - /* - * By default, make NULL operands writeonly and flagged for - * allocation, and everything else readonly. To write - * to a provided operand, you must specify the write flag manually. - */ - if (op[iop] == NULL) { - op_flags[iop] = NPY_ITER_WRITEONLY | NPY_ITER_ALLOCATE; - } - else { - op_flags[iop] = NPY_ITER_READONLY; - } - } - } - else if (npyiter_convert_op_flags_array(op_flags_in, - op_flags, nop) != 1) { - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - } - *nop_out = 0; - return 0; - } - - /* Now that we have the flags - convert all the ops to arrays */ - for (iop = 0; iop < nop; ++iop) { - if (op[iop] != NULL) { - PyArrayObject *ao; - int fromanyflags = 0; - - if (op_flags[iop]&(NPY_ITER_READWRITE|NPY_ITER_WRITEONLY)) { - fromanyflags = NPY_UPDATEIFCOPY; - } - ao = (PyArrayObject *)PyArray_FromAny((PyObject *)op[iop], - NULL, 0, 0, fromanyflags, NULL); - if (ao == NULL) { - if (PyErr_Occurred() && - PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_SetString(PyExc_TypeError, - "Iterator operand is flagged as writeable, " - "but is an object which cannot be written " - "back to via UPDATEIFCOPY"); - } - for (iop = 0; iop < nop; ++iop) { - Py_DECREF(op[iop]); - } - *nop_out = 0; - return 0; - } - Py_DECREF(op[iop]); - op[iop] = ao; - } - } - - return 1; -} - -static int -npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"op", "flags", "op_flags", "op_dtypes", - "order", "casting", "op_axes", "itershape", - "buffersize", - NULL}; - - PyObject *op_in = NULL, *op_flags_in = NULL, - *op_dtypes_in = NULL, *op_axes_in = NULL; - - int iop, nop = 0; - PyArrayObject *op[NPY_MAXARGS]; - npy_uint32 flags = 0; - NPY_ORDER order = NPY_KEEPORDER; - NPY_CASTING casting = NPY_SAFE_CASTING; - npy_uint32 op_flags[NPY_MAXARGS]; - PyArray_Descr *op_request_dtypes[NPY_MAXARGS]; - int oa_ndim = 0; - int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS]; - int *op_axes[NPY_MAXARGS]; - PyArray_Dims itershape = {NULL, 0}; - int buffersize = 0; - - if (self->iter != NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator was already initialized"); - return -1; - } - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&OOO&O&OO&i", kwlist, - &op_in, - NpyIter_GlobalFlagsConverter, &flags, - &op_flags_in, - &op_dtypes_in, - npyiter_order_converter, &order, - PyArray_CastingConverter, &casting, - &op_axes_in, - PyArray_IntpConverter, &itershape, - &buffersize)) { - if (itershape.ptr != NULL) { - PyDimMem_FREE(itershape.ptr); - } - return -1; - } - - /* Set the dtypes and ops to all NULL to start */ - memset(op_request_dtypes, 0, sizeof(op_request_dtypes)); - - /* op and op_flags */ - if (npyiter_convert_ops(op_in, op_flags_in, op, op_flags, &nop) - != 1) { - goto fail; - } - - /* op_request_dtypes */ - if (op_dtypes_in != NULL && op_dtypes_in != Py_None && - npyiter_convert_dtypes(op_dtypes_in, - op_request_dtypes, nop) != 1) { - goto fail; - } - - /* op_axes */ - if (op_axes_in != NULL && op_axes_in != Py_None) { - /* Initialize to point to the op_axes arrays */ - for (iop = 0; iop < nop; ++iop) { - op_axes[iop] = op_axes_arrays[iop]; - } - - if (npyiter_convert_op_axes(op_axes_in, nop, - op_axes, &oa_ndim) != 1) { - goto fail; - } - } - - if (itershape.len > 0) { - if (oa_ndim == 0) { - oa_ndim = itershape.len; - memset(op_axes, 0, sizeof(op_axes[0])*oa_ndim); - } - else if (oa_ndim != itershape.len) { - PyErr_SetString(PyExc_ValueError, - "'op_axes' and 'itershape' must have the same number " - "of entries equal to the iterator ndim"); - goto fail; - } - } - else if (itershape.ptr != NULL) { - PyDimMem_FREE(itershape.ptr); - itershape.ptr = NULL; - } - - self->iter = NpyIter_AdvancedNew(nop, op, flags, order, casting, op_flags, - op_request_dtypes, - oa_ndim, oa_ndim > 0 ? op_axes : NULL, - itershape.ptr, - buffersize); - - if (self->iter == NULL) { - goto fail; - } - - /* Cache some values for the member functions to use */ - npyiter_cache_values(self); - - if (NpyIter_GetIterSize(self->iter) == 0) { - self->started = 1; - self->finished = 1; - } - else { - self->started = 0; - self->finished = 0; - } - - if (itershape.ptr != NULL) { - PyDimMem_FREE(itershape.ptr); - } - - /* Release the references we got to the ops and dtypes */ - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - Py_XDECREF(op_request_dtypes[iop]); - } - - return 0; - -fail: - if (itershape.ptr != NULL) { - PyDimMem_FREE(itershape.ptr); - } - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - Py_XDECREF(op_request_dtypes[iop]); - } - return -1; -} - -NPY_NO_EXPORT PyObject * -NpyIter_NestedIters(PyObject *NPY_UNUSED(self), - PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"op", "axes", "flags", "op_flags", - "op_dtypes", "order", - "casting", "buffersize", - NULL}; - - PyObject *op_in = NULL, *axes_in = NULL, - *op_flags_in = NULL, *op_dtypes_in = NULL; - - int iop, nop = 0, inest, nnest = 0; - PyArrayObject *op[NPY_MAXARGS]; - npy_uint32 flags = 0, flags_inner = 0; - NPY_ORDER order = NPY_KEEPORDER; - NPY_CASTING casting = NPY_SAFE_CASTING; - npy_uint32 op_flags[NPY_MAXARGS], op_flags_inner[NPY_MAXARGS]; - PyArray_Descr *op_request_dtypes[NPY_MAXARGS], - *op_request_dtypes_inner[NPY_MAXARGS]; - int op_axes_data[NPY_MAXDIMS]; - int *nested_op_axes[NPY_MAXDIMS]; - int nested_naxes[NPY_MAXDIMS], iaxes, naxes; - int negones[NPY_MAXDIMS]; - char used_axes[NPY_MAXDIMS]; - int buffersize = 0; - - PyObject *ret = NULL; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O&OOO&O&i", kwlist, - &op_in, - &axes_in, - NpyIter_GlobalFlagsConverter, &flags, - &op_flags_in, - &op_dtypes_in, - npyiter_order_converter, &order, - PyArray_CastingConverter, &casting, - &buffersize)) { - return NULL; - } - - /* axes */ - if (!PyTuple_Check(axes_in) && !PyList_Check(axes_in)) { - PyErr_SetString(PyExc_ValueError, - "axes must be a tuple of axis arrays"); - return NULL; - } - nnest = PySequence_Size(axes_in); - if (nnest < 2) { - PyErr_SetString(PyExc_ValueError, - "axes must have at least 2 entries for nested iteration"); - return NULL; - } - naxes = 0; - memset(used_axes, 0, NPY_MAXDIMS); - for (inest = 0; inest < nnest; ++inest) { - PyObject *item = PySequence_GetItem(axes_in, inest); - npy_intp i; - if (item == NULL) { - return NULL; - } - if (!PyTuple_Check(item) && !PyList_Check(item)) { - PyErr_SetString(PyExc_ValueError, - "Each item in axes must be a an integer tuple"); - Py_DECREF(item); - return NULL; - } - nested_naxes[inest] = PySequence_Size(item); - if (naxes + nested_naxes[inest] > NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "Too many axes given"); - Py_DECREF(item); - return NULL; - } - for (i = 0; i < nested_naxes[inest]; ++i) { - PyObject *v = PySequence_GetItem(item, i); - npy_intp axis; - if (v == NULL) { - Py_DECREF(item); - return NULL; - } - axis = PyInt_AsLong(v); - Py_DECREF(v); - if (axis < 0 || axis >= NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "An axis is out of bounds"); - Py_DECREF(item); - return NULL; - } - /* - * This check is very important, without it out of bounds - * data accesses are possible. - */ - if (used_axes[axis] != 0) { - PyErr_SetString(PyExc_ValueError, - "An axis is used more than once"); - Py_DECREF(item); - return NULL; - } - used_axes[axis] = 1; - op_axes_data[naxes+i] = axis; - } - nested_op_axes[inest] = &op_axes_data[naxes]; - naxes += nested_naxes[inest]; - Py_DECREF(item); - } - - /* op and op_flags */ - if (npyiter_convert_ops(op_in, op_flags_in, op, op_flags, &nop) - != 1) { - return NULL; - } - - /* Set the dtypes to all NULL to start as well */ - memset(op_request_dtypes, 0, sizeof(op_request_dtypes[0])*nop); - memset(op_request_dtypes_inner, 0, - sizeof(op_request_dtypes_inner[0])*nop); - - /* op_request_dtypes */ - if (op_dtypes_in != NULL && op_dtypes_in != Py_None && - npyiter_convert_dtypes(op_dtypes_in, - op_request_dtypes, nop) != 1) { - goto fail; - } - - ret = PyTuple_New(nnest); - if (ret == NULL) { - goto fail; - } - - /* For broadcasting allocated arrays */ - for (iaxes = 0; iaxes < naxes; ++iaxes) { - negones[iaxes] = -1; - } - - /* - * Clear any unnecessary ALLOCATE flags, so we can use them - * to indicate exactly the allocated outputs. Also, separate - * the inner loop flags. - */ - for (iop = 0; iop < nop; ++iop) { - if ((op_flags[iop]&NPY_ITER_ALLOCATE) && op[iop] != NULL) { - op_flags[iop] &= ~NPY_ITER_ALLOCATE; - } - - /* - * Clear any flags allowing copies or output allocation for - * the inner loop. - */ - op_flags_inner[iop] = op_flags[iop] & ~(NPY_ITER_COPY| - NPY_ITER_UPDATEIFCOPY| - NPY_ITER_ALLOCATE); - /* - * If buffering is enabled and copying is not, - * clear the nbo_aligned flag and strip the data type - * for the outer loops. - */ - if ((flags&(NPY_ITER_BUFFERED)) && - !(op_flags[iop]&(NPY_ITER_COPY| - NPY_ITER_UPDATEIFCOPY| - NPY_ITER_ALLOCATE))) { - op_flags[iop] &= ~(NPY_ITER_NBO|NPY_ITER_ALIGNED|NPY_ITER_CONTIG); - op_request_dtypes_inner[iop] = op_request_dtypes[iop]; - op_request_dtypes[iop] = NULL; - } - } - - /* Only the inner loop gets the buffering and no inner flags */ - flags_inner = flags&~NPY_ITER_COMMON_DTYPE; - flags &= ~(NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_BUFFERED); - - for (inest = 0; inest < nnest; ++inest) { - NewNpyArrayIterObject *iter; - int *op_axes_nop[NPY_MAXARGS]; - - /* - * All the operands' op_axes are the same, except for - * allocated outputs. - */ - for (iop = 0; iop < nop; ++iop) { - if (op_flags[iop]&NPY_ITER_ALLOCATE) { - if (inest == 0) { - op_axes_nop[iop] = NULL; - } - else { - op_axes_nop[iop] = negones; - } - } - else { - op_axes_nop[iop] = nested_op_axes[inest]; - } - } - - /* - printf("\n"); - for (iop = 0; iop < nop; ++iop) { - npy_intp i; - - for (i = 0; i < nested_naxes[inest]; ++i) { - printf("%d ", (int)op_axes_nop[iop][i]); - } - printf("\n"); - } - */ - - /* Allocate the iterator */ - iter = (NewNpyArrayIterObject *)npyiter_new(&NpyIter_Type, NULL, NULL); - if (iter == NULL) { - Py_DECREF(ret); - goto fail; - } - - if (inest < nnest-1) { - iter->iter = NpyIter_AdvancedNew(nop, op, flags, order, - casting, op_flags, op_request_dtypes, - nested_naxes[inest], op_axes_nop, - NULL, - 0); - } - else { - iter->iter = NpyIter_AdvancedNew(nop, op, flags_inner, order, - casting, op_flags_inner, - op_request_dtypes_inner, - nested_naxes[inest], op_axes_nop, - NULL, - buffersize); - } - - if (iter->iter == NULL) { - Py_DECREF(ret); - goto fail; - } - - /* Cache some values for the member functions to use */ - npyiter_cache_values(iter); - - if (NpyIter_GetIterSize(iter->iter) == 0) { - iter->started = 1; - iter->finished = 1; - } - else { - iter->started = 0; - iter->finished = 0; - } - - /* - * If there are any allocated outputs or any copies were made, - * adjust op so that the other iterators use the same ones. - */ - if (inest == 0) { - PyArrayObject **operands = NpyIter_GetOperandArray(iter->iter); - for (iop = 0; iop < nop; ++iop) { - if (op[iop] != operands[iop]) { - Py_XDECREF(op[iop]); - op[iop] = operands[iop]; - Py_INCREF(op[iop]); - } - - /* - * Clear any flags allowing copies for - * the rest of the iterators - */ - op_flags[iop] &= ~(NPY_ITER_COPY| - NPY_ITER_UPDATEIFCOPY); - } - /* Clear the common dtype flag for the rest of the iterators */ - flags &= ~NPY_ITER_COMMON_DTYPE; - } - - PyTuple_SET_ITEM(ret, inest, (PyObject *)iter); - } - - /* Release our references to the ops and dtypes */ - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - Py_XDECREF(op_request_dtypes[iop]); - Py_XDECREF(op_request_dtypes_inner[iop]); - } - - /* Set up the nested child references */ - for (inest = 0; inest < nnest-1; ++inest) { - NewNpyArrayIterObject *iter; - iter = (NewNpyArrayIterObject *)PyTuple_GET_ITEM(ret, inest); - /* - * Indicates which iterator to reset with new base pointers - * each iteration step. - */ - iter->nested_child = - (NewNpyArrayIterObject *)PyTuple_GET_ITEM(ret, inest+1); - Py_INCREF(iter->nested_child); - /* - * Need to do a nested reset so all the iterators point - * at the right data - */ - if (NpyIter_ResetBasePointers(iter->nested_child->iter, - iter->dataptrs, NULL) != NPY_SUCCEED) { - Py_DECREF(ret); - return NULL; - } - } - - return ret; - -fail: - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - Py_XDECREF(op_request_dtypes[iop]); - Py_XDECREF(op_request_dtypes_inner[iop]); - } - return NULL; -} - -static void -npyiter_dealloc(NewNpyArrayIterObject *self) -{ - if (self->iter) { - NpyIter_Deallocate(self->iter); - self->iter = NULL; - Py_XDECREF(self->nested_child); - self->nested_child = NULL; - } - Py_TYPE(self)->tp_free((PyObject*)self); -} - -static int -npyiter_resetbasepointers(NewNpyArrayIterObject *self) -{ - while (self->nested_child) { - if (NpyIter_ResetBasePointers(self->nested_child->iter, - self->dataptrs, NULL) != NPY_SUCCEED) { - return NPY_FAIL; - } - self = self->nested_child; - if (NpyIter_GetIterSize(self->iter) == 0) { - self->started = 1; - self->finished = 1; - } - else { - self->started = 0; - self->finished = 0; - } - } - - return NPY_SUCCEED; -} - -static PyObject * -npyiter_reset(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - if (NpyIter_Reset(self->iter, NULL) != NPY_SUCCEED) { - return NULL; - } - if (NpyIter_GetIterSize(self->iter) == 0) { - self->started = 1; - self->finished = 1; - } - else { - self->started = 0; - self->finished = 0; - } - - if (self->get_multi_index == NULL && NpyIter_HasMultiIndex(self->iter)) { - self->get_multi_index = NpyIter_GetGetMultiIndex(self->iter, NULL); - } - - /* If there is nesting, the nested iterators should be reset */ - if (npyiter_resetbasepointers(self) != NPY_SUCCEED) { - return NULL; - } - - Py_RETURN_NONE; -} - -/* - * Makes a copy of the iterator. Note that the nesting is not - * copied. - */ -static PyObject * -npyiter_copy(NewNpyArrayIterObject *self) -{ - NewNpyArrayIterObject *iter; - - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - /* Allocate the iterator */ - iter = (NewNpyArrayIterObject *)npyiter_new(&NpyIter_Type, NULL, NULL); - if (iter == NULL) { - return NULL; - } - - /* Copy the C iterator */ - iter->iter = NpyIter_Copy(self->iter); - if (iter->iter == NULL) { - Py_DECREF(iter); - return NULL; - } - - /* Cache some values for the member functions to use */ - npyiter_cache_values(iter); - - iter->started = self->started; - iter->finished = self->finished; - - return (PyObject *)iter; -} - -static PyObject * -npyiter_iternext(NewNpyArrayIterObject *self) -{ - if (self->iter != NULL && self->iternext != NULL && - !self->finished && self->iternext(self->iter)) { - /* If there is nesting, the nested iterators should be reset */ - if (npyiter_resetbasepointers(self) != NPY_SUCCEED) { - return NULL; - } - - Py_RETURN_TRUE; - } - else { - self->finished = 1; - Py_RETURN_FALSE; - } -} - -static PyObject * -npyiter_remove_axis(NewNpyArrayIterObject *self, PyObject *args) -{ - int axis = 0; - - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - if (!PyArg_ParseTuple(args, "i", &axis)) { - return NULL; - } - - if (NpyIter_RemoveAxis(self->iter, axis) != NPY_SUCCEED) { - return NULL; - } - /* RemoveAxis invalidates cached values */ - npyiter_cache_values(self); - /* RemoveAxis also resets the iterator */ - if (NpyIter_GetIterSize(self->iter) == 0) { - self->started = 1; - self->finished = 1; - } - else { - self->started = 0; - self->finished = 0; - } - - Py_RETURN_NONE; -} - -static PyObject * -npyiter_remove_multi_index(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - NpyIter_RemoveMultiIndex(self->iter); - /* RemoveMultiIndex invalidates cached values */ - npyiter_cache_values(self); - /* RemoveMultiIndex also resets the iterator */ - if (NpyIter_GetIterSize(self->iter) == 0) { - self->started = 1; - self->finished = 1; - } - else { - self->started = 0; - self->finished = 0; - } - - Py_RETURN_NONE; -} - -static PyObject * -npyiter_enable_external_loop(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - NpyIter_EnableExternalLoop(self->iter); - /* EnableExternalLoop invalidates cached values */ - npyiter_cache_values(self); - /* EnableExternalLoop also resets the iterator */ - if (NpyIter_GetIterSize(self->iter) == 0) { - self->started = 1; - self->finished = 1; - } - else { - self->started = 0; - self->finished = 0; - } - - Py_RETURN_NONE; -} - -static PyObject * -npyiter_debug_print(NewNpyArrayIterObject *self) -{ - if (self->iter != NULL) { - NpyIter_DebugPrint(self->iter); - } - else { - printf("Iterator: (nil)\n"); - } - - Py_RETURN_NONE; -} - -NPY_NO_EXPORT PyObject * -npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i); - -static PyObject *npyiter_value_get(NewNpyArrayIterObject *self) -{ - PyObject *ret; - - npy_intp iop, nop; - - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return NULL; - } - - nop = NpyIter_GetNOp(self->iter); - - /* Return an array or tuple of arrays with the values */ - if (nop == 1) { - ret = npyiter_seq_item(self, 0); - } - else { - ret = PyTuple_New(nop); - if (ret == NULL) { - return NULL; - } - for (iop = 0; iop < nop; ++iop) { - PyObject *a = npyiter_seq_item(self, iop); - if (a == NULL) { - Py_DECREF(ret); - return NULL; - } - PyTuple_SET_ITEM(ret, iop, a); - } - } - - return ret; -} - -static PyObject *npyiter_operands_get(NewNpyArrayIterObject *self) -{ - PyObject *ret; - - npy_intp iop, nop; - PyArrayObject **operands; - - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - nop = NpyIter_GetNOp(self->iter); - operands = self->operands; - - ret = PyTuple_New(nop); - if (ret == NULL) { - return NULL; - } - for (iop = 0; iop < nop; ++iop) { - PyObject *operand = (PyObject *)operands[iop]; - - Py_INCREF(operand); - PyTuple_SET_ITEM(ret, iop, operand); - } - - return ret; -} - -static PyObject *npyiter_itviews_get(NewNpyArrayIterObject *self) -{ - PyObject *ret; - - npy_intp iop, nop; - - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - nop = NpyIter_GetNOp(self->iter); - - ret = PyTuple_New(nop); - if (ret == NULL) { - return NULL; - } - for (iop = 0; iop < nop; ++iop) { - PyArrayObject *view = NpyIter_GetIterView(self->iter, iop); - - if (view == NULL) { - Py_DECREF(ret); - return NULL; - } - PyTuple_SET_ITEM(ret, iop, (PyObject *)view); - } - - return ret; -} - -static PyObject * -npyiter_next(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL || self->iternext == NULL || self->finished) { - return NULL; - } - - /* - * Use the started flag for the Python iteration protocol to work - * when buffering is enabled. - */ - if (self->started) { - if (!self->iternext(self->iter)) { - self->finished = 1; - return NULL; - } - - /* If there is nesting, the nested iterators should be reset */ - if (npyiter_resetbasepointers(self) != NPY_SUCCEED) { - return NULL; - } - } - self->started = 1; - - return npyiter_value_get(self); -}; - -static PyObject *npyiter_shape_get(NewNpyArrayIterObject *self) -{ - PyObject *ret; - npy_intp idim, ndim, shape[NPY_MAXDIMS]; - - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return NULL; - } - - if (NpyIter_GetShape(self->iter, shape) == NPY_SUCCEED) { - ndim = NpyIter_GetNDim(self->iter); - ret = PyTuple_New(ndim); - if (ret != NULL) { - for (idim = 0; idim < ndim; ++idim) { - PyTuple_SET_ITEM(ret, idim, - PyInt_FromLong(shape[idim])); - } - return ret; - } - } - - return NULL; -} - -static PyObject *npyiter_multi_index_get(NewNpyArrayIterObject *self) -{ - PyObject *ret; - npy_intp idim, ndim, multi_index[NPY_MAXDIMS]; - - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return NULL; - } - - if (self->get_multi_index != NULL) { - ndim = NpyIter_GetNDim(self->iter); - self->get_multi_index(self->iter, multi_index); - ret = PyTuple_New(ndim); - for (idim = 0; idim < ndim; ++idim) { - PyTuple_SET_ITEM(ret, idim, - PyInt_FromLong(multi_index[idim])); - } - return ret; - } - else { - if (!NpyIter_HasMultiIndex(self->iter)) { - PyErr_SetString(PyExc_ValueError, - "Iterator is not tracking a multi-index"); - return NULL; - } - else if (NpyIter_HasDelayedBufAlloc(self->iter)) { - PyErr_SetString(PyExc_ValueError, - "Iterator construction used delayed buffer allocation, " - "and no reset has been done yet"); - return NULL; - } - else { - PyErr_SetString(PyExc_ValueError, - "Iterator is in an invalid state"); - return NULL; - } - } -} - -static int npyiter_multi_index_set(NewNpyArrayIterObject *self, PyObject *value) -{ - npy_intp idim, ndim, multi_index[NPY_MAXDIMS]; - - if (value == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete nditer multi_index"); - return -1; - } - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return -1; - } - - if (NpyIter_HasMultiIndex(self->iter)) { - ndim = NpyIter_GetNDim(self->iter); - if (!PySequence_Check(value)) { - PyErr_SetString(PyExc_ValueError, - "multi_index must be set with a sequence"); - return -1; - } - if (PySequence_Size(value) != ndim) { - PyErr_SetString(PyExc_ValueError, - "Wrong number of indices"); - return -1; - } - for (idim = 0; idim < ndim; ++idim) { - PyObject *v = PySequence_GetItem(value, idim); - multi_index[idim] = PyInt_AsLong(v); - if (multi_index[idim]==-1 && PyErr_Occurred()) { - return -1; - } - } - if (NpyIter_GotoMultiIndex(self->iter, multi_index) != NPY_SUCCEED) { - return -1; - } - self->started = 0; - self->finished = 0; - - /* If there is nesting, the nested iterators should be reset */ - if (npyiter_resetbasepointers(self) != NPY_SUCCEED) { - return -1; - } - - return 0; - } - else { - PyErr_SetString(PyExc_ValueError, - "Iterator is not tracking a multi-index"); - return -1; - } -} - -static PyObject *npyiter_index_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return NULL; - } - - if (NpyIter_HasIndex(self->iter)) { - npy_intp ind = *NpyIter_GetIndexPtr(self->iter); - return PyInt_FromLong(ind); - } - else { - PyErr_SetString(PyExc_ValueError, - "Iterator does not have an index"); - return NULL; - } -} - -static int npyiter_index_set(NewNpyArrayIterObject *self, PyObject *value) -{ - if (value == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete nditer index"); - return -1; - } - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return -1; - } - - if (NpyIter_HasIndex(self->iter)) { - npy_intp ind; - ind = PyInt_AsLong(value); - if (ind==-1 && PyErr_Occurred()) { - return -1; - } - if (NpyIter_GotoIndex(self->iter, ind) != NPY_SUCCEED) { - return -1; - } - self->started = 0; - self->finished = 0; - - /* If there is nesting, the nested iterators should be reset */ - if (npyiter_resetbasepointers(self) != NPY_SUCCEED) { - return -1; - } - - return 0; - } - else { - PyErr_SetString(PyExc_ValueError, - "Iterator does not have an index"); - return -1; - } -} - -static PyObject *npyiter_iterindex_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return NULL; - } - - return PyInt_FromLong(NpyIter_GetIterIndex(self->iter)); -} - -static int npyiter_iterindex_set(NewNpyArrayIterObject *self, PyObject *value) -{ - npy_intp iterindex; - - if (value == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete nditer iterindex"); - return -1; - } - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return -1; - } - - iterindex = PyInt_AsLong(value); - if (iterindex==-1 && PyErr_Occurred()) { - return -1; - } - if (NpyIter_GotoIterIndex(self->iter, iterindex) != NPY_SUCCEED) { - return -1; - } - self->started = 0; - self->finished = 0; - - /* If there is nesting, the nested iterators should be reset */ - if (npyiter_resetbasepointers(self) != NPY_SUCCEED) { - return -1; - } - - return 0; -} - -static PyObject *npyiter_iterrange_get(NewNpyArrayIterObject *self) -{ - npy_intp istart = 0, iend = 0; - PyObject *ret; - - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - NpyIter_GetIterIndexRange(self->iter, &istart, &iend); - - ret = PyTuple_New(2); - if (ret == NULL) { - return NULL; - } - - PyTuple_SET_ITEM(ret, 0, PyInt_FromLong(istart)); - PyTuple_SET_ITEM(ret, 1, PyInt_FromLong(iend)); - - return ret; -} - -static int npyiter_iterrange_set(NewNpyArrayIterObject *self, PyObject *value) -{ -#if PY_VERSION_HEX >= 0x02050000 - npy_intp istart = 0, iend = 0; -#else - long istart = 0, iend = 0; -#endif - - if (value == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete nditer iterrange"); - return -1; - } - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return -1; - } - -#if PY_VERSION_HEX >= 0x02050000 - if (!PyArg_ParseTuple(value, "nn", &istart, &iend)) { -#else - if (!PyArg_ParseTuple(value, "ll", &istart, &iend)) { -#endif - return -1; - } - - if (NpyIter_ResetToIterIndexRange(self->iter, istart, iend, NULL) - != NPY_SUCCEED) { - return -1; - } - if (istart < iend) { - self->started = self->finished = 0; - } - else { - self->started = self->finished = 1; - } - - if (self->get_multi_index == NULL && NpyIter_HasMultiIndex(self->iter)) { - self->get_multi_index = NpyIter_GetGetMultiIndex(self->iter, NULL); - } - - /* If there is nesting, the nested iterators should be reset */ - if (npyiter_resetbasepointers(self) != NPY_SUCCEED) { - return -1; - } - - return 0; -} - -static PyObject *npyiter_has_delayed_bufalloc_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - if (NpyIter_HasDelayedBufAlloc(self->iter)) { - Py_RETURN_TRUE; - } - else { - Py_RETURN_FALSE; - } -} - -static PyObject *npyiter_iterationneedsapi_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - if (NpyIter_IterationNeedsAPI(self->iter)) { - Py_RETURN_TRUE; - } - else { - Py_RETURN_FALSE; - } -} - -static PyObject *npyiter_has_multi_index_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - if (NpyIter_HasMultiIndex(self->iter)) { - Py_RETURN_TRUE; - } - else { - Py_RETURN_FALSE; - } -} - -static PyObject *npyiter_has_index_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - if (NpyIter_HasIndex(self->iter)) { - Py_RETURN_TRUE; - } - else { - Py_RETURN_FALSE; - } -} - -static PyObject *npyiter_dtypes_get(NewNpyArrayIterObject *self) -{ - PyObject *ret; - - npy_intp iop, nop; - PyArray_Descr **dtypes; - - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - nop = NpyIter_GetNOp(self->iter); - - ret = PyTuple_New(nop); - if (ret == NULL) { - return NULL; - } - dtypes = self->dtypes; - for (iop = 0; iop < nop; ++iop) { - PyArray_Descr *dtype = dtypes[iop]; - - Py_INCREF(dtype); - PyTuple_SET_ITEM(ret, iop, (PyObject *)dtype); - } - - return ret; -} - -static PyObject *npyiter_ndim_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - return PyInt_FromLong(NpyIter_GetNDim(self->iter)); -} - -static PyObject *npyiter_nop_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - return PyInt_FromLong(NpyIter_GetNOp(self->iter)); -} - -static PyObject *npyiter_itersize_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator is invalid"); - return NULL; - } - - return PyInt_FromLong(NpyIter_GetIterSize(self->iter)); -} - -static PyObject *npyiter_finished_get(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL || !self->finished) { - Py_RETURN_FALSE; - } - else { - Py_RETURN_TRUE; - } -} - -NPY_NO_EXPORT Py_ssize_t -npyiter_seq_length(NewNpyArrayIterObject *self) -{ - if (self->iter == NULL) { - return 0; - } - else { - return NpyIter_GetNOp(self->iter); - } -} - -NPY_NO_EXPORT PyObject * -npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i) -{ - PyObject *ret; - - npy_intp ret_ndim; - npy_intp nop, innerloopsize, innerstride; - char *dataptr; - PyArray_Descr *dtype; - - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return NULL; - } - - if (NpyIter_HasDelayedBufAlloc(self->iter)) { - PyErr_SetString(PyExc_ValueError, - "Iterator construction used delayed buffer allocation, " - "and no reset has been done yet"); - return NULL; - } - - nop = NpyIter_GetNOp(self->iter); - if (i < 0 || i >= nop) { - PyErr_Format(PyExc_IndexError, - "Iterator operand index %d is out of bounds", (int)i); - return NULL; - } - -#if 0 - /* - * This check is disabled because it prevents things like - * np.add(it[0], it[1], it[2]), where it[2] is a write-only - * parameter. When write-only, the value of it[i] is - * likely random junk, as if it were allocated with an - * np.empty(...) call. - */ - if (!self->readflags[i]) { - PyErr_Format(PyExc_RuntimeError, - "Iterator operand %d is write-only", (int)i); - return NULL; - } -#endif - - dataptr = self->dataptrs[i]; - dtype = self->dtypes[i]; - - if (NpyIter_HasExternalLoop(self->iter)) { - innerloopsize = *self->innerloopsizeptr; - innerstride = self->innerstrides[i]; - ret_ndim = 1; - } - else { - innerloopsize = 1; - innerstride = 0; - /* If the iterator is going over every element, return array scalars */ - ret_ndim = 0; - } - - Py_INCREF(dtype); - ret = (PyObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, - ret_ndim, &innerloopsize, - &innerstride, dataptr, - self->writeflags[i] ? NPY_WRITEABLE : 0, NULL); - Py_INCREF(self); - ((PyArrayObject *)ret)->base = (PyObject *)self; - - PyArray_UpdateFlags((PyArrayObject *)ret, NPY_UPDATE_ALL); - - return ret; -} - -NPY_NO_EXPORT PyObject * -npyiter_seq_slice(NewNpyArrayIterObject *self, - Py_ssize_t ilow, Py_ssize_t ihigh) -{ - PyObject *ret; - npy_intp nop; - Py_ssize_t i; - - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return NULL; - } - - if (NpyIter_HasDelayedBufAlloc(self->iter)) { - PyErr_SetString(PyExc_ValueError, - "Iterator construction used delayed buffer allocation, " - "and no reset has been done yet"); - return NULL; - } - - nop = NpyIter_GetNOp(self->iter); - if (ilow < 0) { - ilow = 0; - } - else if (ilow >= nop) { - ilow = nop-1; - } - if (ihigh < ilow) { - ihigh = ilow; - } - else if (ihigh > nop) { - ihigh = nop; - } - - ret = PyTuple_New(ihigh-ilow); - if (ret == NULL) { - return NULL; - } - for (i = ilow; i < ihigh ; ++i) { - PyObject *item = npyiter_seq_item(self, i); - if (item == NULL) { - Py_DECREF(ret); - return NULL; - } - PyTuple_SET_ITEM(ret, i-ilow, item); - } - return ret; -} - -NPY_NO_EXPORT int -npyiter_seq_ass_item(NewNpyArrayIterObject *self, Py_ssize_t i, PyObject *v) -{ - - npy_intp nop, innerloopsize, innerstride; - char *dataptr; - PyArray_Descr *dtype; - PyArrayObject *tmp; - int ret; - - if (v == NULL) { - PyErr_SetString(PyExc_TypeError, - "Cannot delete iterator elements"); - return -1; - } - - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return -1; - } - - if (NpyIter_HasDelayedBufAlloc(self->iter)) { - PyErr_SetString(PyExc_ValueError, - "Iterator construction used delayed buffer allocation, " - "and no reset has been done yet"); - return -1; - } - - nop = NpyIter_GetNOp(self->iter); - if (i < 0 || i >= nop) { - PyErr_Format(PyExc_IndexError, - "Iterator operand index %d is out of bounds", (int)i); - return -1; - } - if (!self->writeflags[i]) { - PyErr_Format(PyExc_RuntimeError, - "Iterator operand %d is not writeable", (int)i); - return -1; - } - - dataptr = self->dataptrs[i]; - dtype = self->dtypes[i]; - - if (NpyIter_HasExternalLoop(self->iter)) { - innerloopsize = *self->innerloopsizeptr; - innerstride = self->innerstrides[i]; - } - else { - innerloopsize = 1; - innerstride = 0; - } - - /* TODO - there should be a better way than this... */ - Py_INCREF(dtype); - tmp = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, - 1, &innerloopsize, - &innerstride, dataptr, - NPY_WRITEABLE, NULL); - if (tmp == NULL) { - return -1; - } - PyArray_UpdateFlags(tmp, NPY_UPDATE_ALL); - ret = PyArray_CopyObject(tmp, v); - Py_DECREF(tmp); - return ret; -} - -static int -npyiter_seq_ass_slice(NewNpyArrayIterObject *self, Py_ssize_t ilow, - Py_ssize_t ihigh, PyObject *v) -{ - npy_intp nop; - Py_ssize_t i; - - if (v == NULL) { - PyErr_SetString(PyExc_TypeError, - "Cannot delete iterator elements"); - return -1; - } - - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return -1; - } - - if (NpyIter_HasDelayedBufAlloc(self->iter)) { - PyErr_SetString(PyExc_ValueError, - "Iterator construction used delayed buffer allocation, " - "and no reset has been done yet"); - return -1; - } - - nop = NpyIter_GetNOp(self->iter); - if (ilow < 0) { - ilow = 0; - } - else if (ilow >= nop) { - ilow = nop-1; - } - if (ihigh < ilow) { - ihigh = ilow; - } - else if (ihigh > nop) { - ihigh = nop; - } - - if (!PySequence_Check(v) || PySequence_Size(v) != ihigh-ilow) { - PyErr_SetString(PyExc_ValueError, - "Wrong size to assign to iterator slice"); - return -1; - } - - for (i = ilow; i < ihigh ; ++i) { - PyObject *item = PySequence_GetItem(v, i-ilow); - if (item == NULL) { - return -1; - } - if (npyiter_seq_ass_item(self, i, item) < 0) { - Py_DECREF(item); - return -1; - } - Py_DECREF(item); - } - - return 0; -} - -static PyObject * -npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op) -{ - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return NULL; - } - - if (NpyIter_HasDelayedBufAlloc(self->iter)) { - PyErr_SetString(PyExc_ValueError, - "Iterator construction used delayed buffer allocation, " - "and no reset has been done yet"); - return NULL; - } - - if (PyInt_Check(op) || PyLong_Check(op) || - (PyIndex_Check(op) && !PySequence_Check(op))) { - npy_intp i = PyArray_PyIntAsIntp(op); - if (i == -1 && PyErr_Occurred()) { - return NULL; - } - return npyiter_seq_item(self, i); - } - else if (PySlice_Check(op)) { - Py_ssize_t istart = 0, iend = 0, istep = 0; - if (PySlice_GetIndices((PySliceObject *)op, - NpyIter_GetNOp(self->iter), - &istart, &iend, &istep) < 0) { - return NULL; - } - if (istep != 1) { - PyErr_SetString(PyExc_ValueError, - "Iterator slicing only supports a step of 1"); - return NULL; - } - return npyiter_seq_slice(self, istart, iend); - } - - PyErr_SetString(PyExc_TypeError, - "invalid index type for iterator indexing"); - return NULL; -} - -static int -npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op, - PyObject *value) -{ - if (value == NULL) { - PyErr_SetString(PyExc_TypeError, - "Cannot delete iterator elements"); - return -1; - } - if (self->iter == NULL || self->finished) { - PyErr_SetString(PyExc_ValueError, - "Iterator is past the end"); - return -1; - } - - if (NpyIter_HasDelayedBufAlloc(self->iter)) { - PyErr_SetString(PyExc_ValueError, - "Iterator construction used delayed buffer allocation, " - "and no reset has been done yet"); - return -1; - } - - if (PyInt_Check(op) || PyLong_Check(op) || - (PyIndex_Check(op) && !PySequence_Check(op))) { - npy_intp i = PyArray_PyIntAsIntp(op); - if (i == -1 && PyErr_Occurred()) { - return -1; - } - return npyiter_seq_ass_item(self, i, value); - } - else if (PySlice_Check(op)) { - Py_ssize_t istart = 0, iend = 0, istep = 0; - if (PySlice_GetIndices((PySliceObject *)op, - NpyIter_GetNOp(self->iter), - &istart, &iend, &istep) < 0) { - return -1; - } - if (istep != 1) { - PyErr_SetString(PyExc_ValueError, - "Iterator slice assignment only supports a step of 1"); - return -1; - } - return npyiter_seq_ass_slice(self, istart, iend, value); - } - - PyErr_SetString(PyExc_TypeError, - "invalid index type for iterator indexing"); - return -1; -} - -static PyMethodDef npyiter_methods[] = { - {"reset", (PyCFunction)npyiter_reset, METH_NOARGS, NULL}, - {"copy", (PyCFunction)npyiter_copy, METH_NOARGS, NULL}, - {"__copy__", (PyCFunction)npyiter_copy, METH_NOARGS, NULL}, - {"iternext", (PyCFunction)npyiter_iternext, METH_NOARGS, NULL}, - {"remove_axis", (PyCFunction)npyiter_remove_axis, METH_VARARGS, NULL}, - {"remove_multi_index", (PyCFunction)npyiter_remove_multi_index, - METH_NOARGS, NULL}, - {"enable_external_loop", (PyCFunction)npyiter_enable_external_loop, - METH_NOARGS, NULL}, - {"debug_print", (PyCFunction)npyiter_debug_print, METH_NOARGS, NULL}, - {NULL, NULL, 0, NULL}, -}; - -static PyMemberDef npyiter_members[] = { - {NULL, 0, 0, 0, NULL}, -}; - -static PyGetSetDef npyiter_getsets[] = { - {"value", - (getter)npyiter_value_get, - NULL, NULL, NULL}, - {"shape", - (getter)npyiter_shape_get, - NULL, NULL, NULL}, - {"multi_index", - (getter)npyiter_multi_index_get, - (setter)npyiter_multi_index_set, - NULL, NULL}, - {"index", - (getter)npyiter_index_get, - (setter)npyiter_index_set, - NULL, NULL}, - {"iterindex", - (getter)npyiter_iterindex_get, - (setter)npyiter_iterindex_set, - NULL, NULL}, - {"iterrange", - (getter)npyiter_iterrange_get, - (setter)npyiter_iterrange_set, - NULL, NULL}, - {"operands", - (getter)npyiter_operands_get, - NULL, NULL, NULL}, - {"itviews", - (getter)npyiter_itviews_get, - NULL, NULL, NULL}, - {"has_delayed_bufalloc", - (getter)npyiter_has_delayed_bufalloc_get, - NULL, NULL, NULL}, - {"iterationneedsapi", - (getter)npyiter_iterationneedsapi_get, - NULL, NULL, NULL}, - {"has_multi_index", - (getter)npyiter_has_multi_index_get, - NULL, NULL, NULL}, - {"has_index", - (getter)npyiter_has_index_get, - NULL, NULL, NULL}, - {"dtypes", - (getter)npyiter_dtypes_get, - NULL, NULL, NULL}, - {"ndim", - (getter)npyiter_ndim_get, - NULL, NULL, NULL}, - {"nop", - (getter)npyiter_nop_get, - NULL, NULL, NULL}, - {"itersize", - (getter)npyiter_itersize_get, - NULL, NULL, NULL}, - {"finished", - (getter)npyiter_finished_get, - NULL, NULL, NULL}, - - {NULL, NULL, NULL, NULL, NULL}, -}; - -NPY_NO_EXPORT PySequenceMethods npyiter_as_sequence = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)npyiter_seq_length, /*sq_length*/ - (binaryfunc)NULL, /*sq_concat*/ - (ssizeargfunc)NULL, /*sq_repeat*/ - (ssizeargfunc)npyiter_seq_item, /*sq_item*/ - (ssizessizeargfunc)npyiter_seq_slice, /*sq_slice*/ - (ssizeobjargproc)npyiter_seq_ass_item, /*sq_ass_item*/ - (ssizessizeobjargproc)npyiter_seq_ass_slice,/*sq_ass_slice*/ - (objobjproc)NULL, /*sq_contains */ - (binaryfunc)NULL, /*sq_inplace_concat */ - (ssizeargfunc)NULL, /*sq_inplace_repeat */ -#else - (inquiry)npyiter_seq_length, /*sq_length*/ - (binaryfunc)NULL, /*sq_concat is handled by nb_add*/ - (intargfunc)NULL, /*sq_repeat is handled nb_multiply*/ - (intargfunc)npyiter_seq_item, /*sq_item*/ - (intintargfunc)npyiter_seq_slice, /*sq_slice*/ - (intobjargproc)npyiter_seq_ass_item, /*sq_ass_item*/ - (intintobjargproc)npyiter_seq_ass_slice,/*sq_ass_slice*/ - (objobjproc)NULL, /*sq_contains */ - (binaryfunc)NULL, /*sg_inplace_concat */ - (intargfunc)NULL /*sg_inplace_repeat */ -#endif -}; - -NPY_NO_EXPORT PyMappingMethods npyiter_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)npyiter_seq_length, /*mp_length*/ -#else - (inquiry)npyiter_seq_length, /*mp_length*/ -#endif - (binaryfunc)npyiter_subscript, /*mp_subscript*/ - (objobjargproc)npyiter_ass_subscript, /*mp_ass_subscript*/ -}; - -NPY_NO_EXPORT PyTypeObject NpyIter_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.nditer", /* tp_name */ - sizeof(NewNpyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)npyiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - &npyiter_as_sequence, /* tp_as_sequence */ - &npyiter_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)npyiter_next, /* tp_iternext */ - npyiter_methods, /* tp_methods */ - npyiter_members, /* tp_members */ - npyiter_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)npyiter_init, /* tp_init */ - 0, /* tp_alloc */ - npyiter_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; diff --git a/numpy-1.6.2/numpy/core/src/multiarray/nditer_pywrap.h b/numpy-1.6.2/numpy/core/src/multiarray/nditer_pywrap.h deleted file mode 100644 index 49eb5d89de..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/nditer_pywrap.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __NDITER_PYWRAP_H -#define __NDITER_PYWRAP_H - -NPY_NO_EXPORT PyObject * -NpyIter_NestedIters(PyObject *NPY_UNUSED(self), - PyObject *args, PyObject *kwds); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/number.c b/numpy-1.6.2/numpy/core/src/multiarray/number.c deleted file mode 100644 index d5c09379d7..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/number.c +++ /dev/null @@ -1,899 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -/*#include */ -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "number.h" - -/************************************************************************* - **************** Implement Number Protocol **************************** - *************************************************************************/ - -NPY_NO_EXPORT NumericOps n_ops; /* NB: static objects initialized to zero */ - -/* - * Dictionary can contain any of the numeric operations, by name. - * Those not present will not be changed - */ - -/* FIXME - macro contains a return */ -#define SET(op) temp = PyDict_GetItemString(dict, #op); \ - if (temp != NULL) { \ - if (!(PyCallable_Check(temp))) { \ - return -1; \ - } \ - Py_INCREF(temp); \ - Py_XDECREF(n_ops.op); \ - n_ops.op = temp; \ - } - - -/*NUMPY_API - *Set internal structure with number functions that all arrays will use - */ -NPY_NO_EXPORT int -PyArray_SetNumericOps(PyObject *dict) -{ - PyObject *temp = NULL; - SET(add); - SET(subtract); - SET(multiply); - SET(divide); - SET(remainder); - SET(power); - SET(square); - SET(reciprocal); - SET(ones_like); - SET(sqrt); - SET(negative); - SET(absolute); - SET(invert); - SET(left_shift); - SET(right_shift); - SET(bitwise_and); - SET(bitwise_or); - SET(bitwise_xor); - SET(less); - SET(less_equal); - SET(equal); - SET(not_equal); - SET(greater); - SET(greater_equal); - SET(floor_divide); - SET(true_divide); - SET(logical_or); - SET(logical_and); - SET(floor); - SET(ceil); - SET(maximum); - SET(minimum); - SET(rint); - SET(conjugate); - return 0; -} - -/* FIXME - macro contains goto */ -#define GET(op) if (n_ops.op && \ - (PyDict_SetItemString(dict, #op, n_ops.op)==-1)) \ - goto fail; - -/*NUMPY_API - Get dictionary showing number functions that all arrays will use -*/ -NPY_NO_EXPORT PyObject * -PyArray_GetNumericOps(void) -{ - PyObject *dict; - if ((dict = PyDict_New())==NULL) - return NULL; - GET(add); - GET(subtract); - GET(multiply); - GET(divide); - GET(remainder); - GET(power); - GET(square); - GET(reciprocal); - GET(ones_like); - GET(sqrt); - GET(negative); - GET(absolute); - GET(invert); - GET(left_shift); - GET(right_shift); - GET(bitwise_and); - GET(bitwise_or); - GET(bitwise_xor); - GET(less); - GET(less_equal); - GET(equal); - GET(not_equal); - GET(greater); - GET(greater_equal); - GET(floor_divide); - GET(true_divide); - GET(logical_or); - GET(logical_and); - GET(floor); - GET(ceil); - GET(maximum); - GET(minimum); - GET(rint); - GET(conjugate); - return dict; - - fail: - Py_DECREF(dict); - return NULL; -} - -static PyObject * -_get_keywords(int rtype, PyArrayObject *out) -{ - PyObject *kwds = NULL; - if (rtype != PyArray_NOTYPE || out != NULL) { - kwds = PyDict_New(); - if (rtype != PyArray_NOTYPE) { - PyArray_Descr *descr; - descr = PyArray_DescrFromType(rtype); - if (descr) { - PyDict_SetItemString(kwds, "dtype", (PyObject *)descr); - Py_DECREF(descr); - } - } - if (out != NULL) { - PyDict_SetItemString(kwds, "out", (PyObject *)out); - } - } - return kwds; -} - -NPY_NO_EXPORT PyObject * -PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, - int rtype, PyArrayObject *out) -{ - PyObject *args, *ret = NULL, *meth; - PyObject *kwds; - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - args = Py_BuildValue("(Oi)", m1, axis); - kwds = _get_keywords(rtype, out); - meth = PyObject_GetAttrString(op, "reduce"); - if (meth && PyCallable_Check(meth)) { - ret = PyObject_Call(meth, args, kwds); - } - Py_DECREF(args); - Py_DECREF(meth); - Py_XDECREF(kwds); - return ret; -} - - -NPY_NO_EXPORT PyObject * -PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, - int rtype, PyArrayObject *out) -{ - PyObject *args, *ret = NULL, *meth; - PyObject *kwds; - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - args = Py_BuildValue("(Oi)", m1, axis); - kwds = _get_keywords(rtype, out); - meth = PyObject_GetAttrString(op, "accumulate"); - if (meth && PyCallable_Check(meth)) { - ret = PyObject_Call(meth, args, kwds); - } - Py_DECREF(args); - Py_DECREF(meth); - Py_XDECREF(kwds); - return ret; -} - - -NPY_NO_EXPORT PyObject * -PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op) -{ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunction(op, "OO", m1, m2); -} - -NPY_NO_EXPORT PyObject * -PyArray_GenericUnaryFunction(PyArrayObject *m1, PyObject *op) -{ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunction(op, "(O)", m1); -} - -static PyObject * -PyArray_GenericInplaceBinaryFunction(PyArrayObject *m1, - PyObject *m2, PyObject *op) -{ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunction(op, "OOO", m1, m2, m1); -} - -static PyObject * -PyArray_GenericInplaceUnaryFunction(PyArrayObject *m1, PyObject *op) -{ - if (op == NULL) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - return PyObject_CallFunction(op, "OO", m1, m1); -} - -static PyObject * -array_add(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.add); -} - -static PyObject * -array_subtract(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.subtract); -} - -static PyObject * -array_multiply(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.multiply); -} - -static PyObject * -array_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.divide); -} - -static PyObject * -array_remainder(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.remainder); -} - -static int -array_power_is_scalar(PyObject *o2, double* out_exponent) -{ - PyObject *temp; - const int optimize_fpexps = 1; - - if (PyInt_Check(o2)) { - *out_exponent = (double)PyInt_AsLong(o2); - return NPY_INTPOS_SCALAR; - } - if (optimize_fpexps && PyFloat_Check(o2)) { - *out_exponent = PyFloat_AsDouble(o2); - return NPY_FLOAT_SCALAR; - } - if ((PyArray_IsZeroDim(o2) && - ((PyArray_ISINTEGER((PyArrayObject *)o2) || - (optimize_fpexps && PyArray_ISFLOAT((PyArrayObject *)o2))))) || - PyArray_IsScalar(o2, Integer) || - (optimize_fpexps && PyArray_IsScalar(o2, Floating))) { - temp = Py_TYPE(o2)->tp_as_number->nb_float(o2); - if (temp != NULL) { - *out_exponent = PyFloat_AsDouble(o2); - Py_DECREF(temp); - if (PyArray_IsZeroDim(o2)) { - if (PyArray_ISINTEGER((PyArrayObject *)o2)) { - return NPY_INTPOS_SCALAR; - } - else { /* ISFLOAT */ - return NPY_FLOAT_SCALAR; - } - } - else if PyArray_IsScalar(o2, Integer) { - return NPY_INTPOS_SCALAR; - } - else { /* IsScalar(o2, Floating) */ - return NPY_FLOAT_SCALAR; - } - } - } -#if (PY_VERSION_HEX >= 0x02050000) - if (PyIndex_Check(o2)) { - PyObject* value = PyNumber_Index(o2); - Py_ssize_t val; - if (value==NULL) { - if (PyErr_Occurred()) { - PyErr_Clear(); - } - return NPY_NOSCALAR; - } - val = PyInt_AsSsize_t(value); - if (val == -1 && PyErr_Occurred()) { - PyErr_Clear(); - return NPY_NOSCALAR; - } - *out_exponent = (double) val; - return NPY_INTPOS_SCALAR; - } -#endif - return NPY_NOSCALAR; -} - -static PyObject * -fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace) -{ - double exponent; - NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */ - - if (PyArray_Check(a1) && ((kind=array_power_is_scalar(o2, &exponent))>0)) { - PyObject *fastop = NULL; - if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { - if (exponent == 1.0) { - /* we have to do this one special, as the - "copy" method of array objects isn't set - up early enough to be added - by PyArray_SetNumericOps. - */ - if (inplace) { - Py_INCREF(a1); - return (PyObject *)a1; - } else { - return PyArray_Copy(a1); - } - } - else if (exponent == -1.0) { - fastop = n_ops.reciprocal; - } - else if (exponent == 0.0) { - fastop = n_ops.ones_like; - } - else if (exponent == 0.5) { - fastop = n_ops.sqrt; - } - else if (exponent == 2.0) { - fastop = n_ops.square; - } - else { - return NULL; - } - - if (inplace) { - return PyArray_GenericInplaceUnaryFunction(a1, fastop); - } else { - return PyArray_GenericUnaryFunction(a1, fastop); - } - } - /* Because this is called with all arrays, we need to - * change the output if the kind of the scalar is different - * than that of the input and inplace is not on --- - * (thus, the input should be up-cast) - */ - else if (exponent == 2.0) { - fastop = n_ops.multiply; - if (inplace) { - return PyArray_GenericInplaceBinaryFunction - (a1, (PyObject *)a1, fastop); - } - else { - PyArray_Descr *dtype = NULL; - PyObject *res; - - /* We only special-case the FLOAT_SCALAR and integer types */ - if (kind == NPY_FLOAT_SCALAR && PyArray_ISINTEGER(a1)) { - dtype = PyArray_DescrFromType(NPY_DOUBLE); - a1 = (PyArrayObject *)PyArray_CastToType(a1, dtype, - PyArray_ISFORTRAN(a1)); - if (a1 == NULL) { - return NULL; - } - } - else { - Py_INCREF(a1); - } - res = PyArray_GenericBinaryFunction(a1, (PyObject *)a1, fastop); - Py_DECREF(a1); - return res; - } - } - } - return NULL; -} - - -static PyObject * -array_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo)) -{ - /* modulo is ignored! */ - PyObject *value; - value = fast_scalar_power(a1, o2, 0); - if (!value) { - value = PyArray_GenericBinaryFunction(a1, o2, n_ops.power); - } - return value; -} - - -static PyObject * -array_negative(PyArrayObject *m1) -{ - return PyArray_GenericUnaryFunction(m1, n_ops.negative); -} - -static PyObject * -array_absolute(PyArrayObject *m1) -{ - return PyArray_GenericUnaryFunction(m1, n_ops.absolute); -} - -static PyObject * -array_invert(PyArrayObject *m1) -{ - return PyArray_GenericUnaryFunction(m1, n_ops.invert); -} - -static PyObject * -array_left_shift(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.left_shift); -} - -static PyObject * -array_right_shift(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.right_shift); -} - -static PyObject * -array_bitwise_and(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_and); -} - -static PyObject * -array_bitwise_or(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_or); -} - -static PyObject * -array_bitwise_xor(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.bitwise_xor); -} - -static PyObject * -array_inplace_add(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.add); -} - -static PyObject * -array_inplace_subtract(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.subtract); -} - -static PyObject * -array_inplace_multiply(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.multiply); -} - -static PyObject * -array_inplace_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.divide); -} - -static PyObject * -array_inplace_remainder(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.remainder); -} - -static PyObject * -array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo)) -{ - /* modulo is ignored! */ - PyObject *value; - value = fast_scalar_power(a1, o2, 1); - if (!value) { - value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power); - } - return value; -} - -static PyObject * -array_inplace_left_shift(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.left_shift); -} - -static PyObject * -array_inplace_right_shift(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.right_shift); -} - -static PyObject * -array_inplace_bitwise_and(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_and); -} - -static PyObject * -array_inplace_bitwise_or(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_or); -} - -static PyObject * -array_inplace_bitwise_xor(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_xor); -} - -static PyObject * -array_floor_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.floor_divide); -} - -static PyObject * -array_true_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericBinaryFunction(m1, m2, n_ops.true_divide); -} - -static PyObject * -array_inplace_floor_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, - n_ops.floor_divide); -} - -static PyObject * -array_inplace_true_divide(PyArrayObject *m1, PyObject *m2) -{ - return PyArray_GenericInplaceBinaryFunction(m1, m2, - n_ops.true_divide); -} - -static int -_array_nonzero(PyArrayObject *mp) -{ - intp n; - - n = PyArray_SIZE(mp); - if (n == 1) { - return mp->descr->f->nonzero(mp->data, mp); - } - else if (n == 0) { - return 0; - } - else { - PyErr_SetString(PyExc_ValueError, - "The truth value of an array " \ - "with more than one element is ambiguous. " \ - "Use a.any() or a.all()"); - return -1; - } -} - - - -static PyObject * -array_divmod(PyArrayObject *op1, PyObject *op2) -{ - PyObject *divp, *modp, *result; - - divp = array_floor_divide(op1, op2); - if (divp == NULL) { - return NULL; - } - else if(divp == Py_NotImplemented) { - return divp; - } - modp = array_remainder(op1, op2); - if (modp == NULL) { - Py_DECREF(divp); - return NULL; - } - else if(modp == Py_NotImplemented) { - Py_DECREF(divp); - return modp; - } - result = Py_BuildValue("OO", divp, modp); - Py_DECREF(divp); - Py_DECREF(modp); - return result; -} - - -NPY_NO_EXPORT PyObject * -array_int(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can be"\ - " converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_int == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to int"); - Py_DECREF(pv); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && PyDataType_REFCHK(PyArray_DESCR(pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - return NULL; - } - - pv2 = Py_TYPE(pv)->tp_as_number->nb_int(pv); - Py_DECREF(pv); - return pv2; -} - -static PyObject * -array_float(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to a "\ - "float; scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_float == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to float"); - Py_DECREF(pv); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && PyDataType_REFCHK(PyArray_DESCR(pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_float(pv); - Py_DECREF(pv); - return pv2; -} - -#if !defined(NPY_PY3K) - -static PyObject * -array_long(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_long == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to long"); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && PyDataType_REFCHK(PyArray_DESCR(pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_long(pv); - Py_DECREF(pv); - return pv2; -} - -static PyObject * -array_oct(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_oct == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to oct"); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && PyDataType_REFCHK(PyArray_DESCR(pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_oct(pv); - Py_DECREF(pv); - return pv2; -} - -static PyObject * -array_hex(PyArrayObject *v) -{ - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = v->descr->f->getitem(v->data, v); - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_hex == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to hex"); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && PyDataType_REFCHK(PyArray_DESCR(pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_hex(pv); - Py_DECREF(pv); - return pv2; -} - -#endif - -static PyObject * -_array_copy_nice(PyArrayObject *self) -{ - return PyArray_Return((PyArrayObject *) PyArray_Copy(self)); -} - -#if PY_VERSION_HEX >= 0x02050000 -static PyObject * -array_index(PyArrayObject *v) -{ - if (!PyArray_ISINTEGER(v) || PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only integer arrays with " \ - "one element can be converted to an index"); - return NULL; - } - return v->descr->f->getitem(v->data, v); -} -#endif - - -NPY_NO_EXPORT PyNumberMethods array_as_number = { - (binaryfunc)array_add, /*nb_add*/ - (binaryfunc)array_subtract, /*nb_subtract*/ - (binaryfunc)array_multiply, /*nb_multiply*/ -#if defined(NPY_PY3K) -#else - (binaryfunc)array_divide, /*nb_divide*/ -#endif - (binaryfunc)array_remainder, /*nb_remainder*/ - (binaryfunc)array_divmod, /*nb_divmod*/ - (ternaryfunc)array_power, /*nb_power*/ - (unaryfunc)array_negative, /*nb_neg*/ - (unaryfunc)_array_copy_nice, /*nb_pos*/ - (unaryfunc)array_absolute, /*(unaryfunc)array_abs,*/ - (inquiry)_array_nonzero, /*nb_nonzero*/ - (unaryfunc)array_invert, /*nb_invert*/ - (binaryfunc)array_left_shift, /*nb_lshift*/ - (binaryfunc)array_right_shift, /*nb_rshift*/ - (binaryfunc)array_bitwise_and, /*nb_and*/ - (binaryfunc)array_bitwise_xor, /*nb_xor*/ - (binaryfunc)array_bitwise_or, /*nb_or*/ -#if defined(NPY_PY3K) -#else - 0, /*nb_coerce*/ -#endif - (unaryfunc)array_int, /*nb_int*/ -#if defined(NPY_PY3K) - 0, /*nb_reserved*/ -#else - (unaryfunc)array_long, /*nb_long*/ -#endif - (unaryfunc)array_float, /*nb_float*/ -#if defined(NPY_PY3K) -#else - (unaryfunc)array_oct, /*nb_oct*/ - (unaryfunc)array_hex, /*nb_hex*/ -#endif - - /* - * This code adds augmented assignment functionality - * that was made available in Python 2.0 - */ - (binaryfunc)array_inplace_add, /*inplace_add*/ - (binaryfunc)array_inplace_subtract, /*inplace_subtract*/ - (binaryfunc)array_inplace_multiply, /*inplace_multiply*/ -#if defined(NPY_PY3K) -#else - (binaryfunc)array_inplace_divide, /*inplace_divide*/ -#endif - (binaryfunc)array_inplace_remainder, /*inplace_remainder*/ - (ternaryfunc)array_inplace_power, /*inplace_power*/ - (binaryfunc)array_inplace_left_shift, /*inplace_lshift*/ - (binaryfunc)array_inplace_right_shift, /*inplace_rshift*/ - (binaryfunc)array_inplace_bitwise_and, /*inplace_and*/ - (binaryfunc)array_inplace_bitwise_xor, /*inplace_xor*/ - (binaryfunc)array_inplace_bitwise_or, /*inplace_or*/ - - (binaryfunc)array_floor_divide, /*nb_floor_divide*/ - (binaryfunc)array_true_divide, /*nb_true_divide*/ - (binaryfunc)array_inplace_floor_divide, /*nb_inplace_floor_divide*/ - (binaryfunc)array_inplace_true_divide, /*nb_inplace_true_divide*/ - -#if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)array_index, /* nb_index */ -#endif - -}; diff --git a/numpy-1.6.2/numpy/core/src/multiarray/number.h b/numpy-1.6.2/numpy/core/src/multiarray/number.h deleted file mode 100644 index 8f1cb3b913..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/number.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef _NPY_ARRAY_NUMBER_H_ -#define _NPY_ARRAY_NUMBER_H_ - -typedef struct { - PyObject *add; - PyObject *subtract; - PyObject *multiply; - PyObject *divide; - PyObject *remainder; - PyObject *power; - PyObject *square; - PyObject *reciprocal; - PyObject *ones_like; - PyObject *sqrt; - PyObject *negative; - PyObject *absolute; - PyObject *invert; - PyObject *left_shift; - PyObject *right_shift; - PyObject *bitwise_and; - PyObject *bitwise_xor; - PyObject *bitwise_or; - PyObject *less; - PyObject *less_equal; - PyObject *equal; - PyObject *not_equal; - PyObject *greater; - PyObject *greater_equal; - PyObject *floor_divide; - PyObject *true_divide; - PyObject *logical_or; - PyObject *logical_and; - PyObject *floor; - PyObject *ceil; - PyObject *maximum; - PyObject *minimum; - PyObject *rint; - PyObject *conjugate; -} NumericOps; - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT NumericOps n_ops; -extern NPY_NO_EXPORT PyNumberMethods array_as_number; -#else -NPY_NO_EXPORT NumericOps n_ops; -NPY_NO_EXPORT PyNumberMethods array_as_number; -#endif - -NPY_NO_EXPORT PyObject * -array_int(PyArrayObject *v); - -NPY_NO_EXPORT int -PyArray_SetNumericOps(PyObject *dict); - -NPY_NO_EXPORT PyObject * -PyArray_GetNumericOps(void); - -NPY_NO_EXPORT PyObject * -PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op); - -NPY_NO_EXPORT PyObject * -PyArray_GenericUnaryFunction(PyArrayObject *m1, PyObject *op); - -NPY_NO_EXPORT PyObject * -PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, - int rtype, PyArrayObject *out); - -NPY_NO_EXPORT PyObject * -PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, - int rtype, PyArrayObject *out); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/numpymemoryview.c b/numpy-1.6.2/numpy/core/src/multiarray/numpymemoryview.c deleted file mode 100644 index 97d20577ed..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/numpymemoryview.c +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Simple PyMemoryView'ish object for Python 2.6 compatibility. - * - * On Python >= 2.7, we can use the actual PyMemoryView objects. - * - * Some code copied from the CPython implementation. - */ - -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" -#include "numpy/npy_3kcompat.h" - -#include "numpymemoryview.h" - - -#if (PY_VERSION_HEX >= 0x02060000) && (PY_VERSION_HEX < 0x02070000) - -/* - * Memory allocation - */ - -static int -memorysimpleview_traverse(PyMemorySimpleViewObject *self, - visitproc visit, void *arg) -{ - if (self->base != NULL) - Py_VISIT(self->base); - if (self->view.obj != NULL) - Py_VISIT(self->view.obj); - return 0; -} - -static int -memorysimpleview_clear(PyMemorySimpleViewObject *self) -{ - Py_CLEAR(self->base); - PyBuffer_Release(&self->view); - self->view.obj = NULL; - return 0; -} - -static void -memorysimpleview_dealloc(PyMemorySimpleViewObject *self) -{ - PyObject_GC_UnTrack(self); - Py_CLEAR(self->base); - if (self->view.obj != NULL) { - PyBuffer_Release(&self->view); - self->view.obj = NULL; - } - PyObject_GC_Del(self); -} - -static PyObject * -memorysimpleview_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) -{ - PyObject *obj; - static char *kwlist[] = {"object", 0}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:memorysimpleview", kwlist, - &obj)) { - return NULL; - } - return PyMemorySimpleView_FromObject(obj); -} - - -/* - * Buffer interface - */ - -static int -memorysimpleview_getbuffer(PyMemorySimpleViewObject *self, - Py_buffer *view, int flags) -{ - return PyObject_GetBuffer(self->base, view, flags); -} - -static void -memorysimpleview_releasebuffer(PyMemorySimpleViewObject *self, - Py_buffer *view) -{ - PyBuffer_Release(view); -} - -static PyBufferProcs memorysimpleview_as_buffer = { - (readbufferproc)0, /*bf_getreadbuffer*/ - (writebufferproc)0, /*bf_getwritebuffer*/ - (segcountproc)0, /*bf_getsegcount*/ - (charbufferproc)0, /*bf_getcharbuffer*/ - (getbufferproc)memorysimpleview_getbuffer, /* bf_getbuffer */ - (releasebufferproc)memorysimpleview_releasebuffer, /* bf_releasebuffer */ -}; - - -/* - * Getters - */ - -static PyObject * -_IntTupleFromSsizet(int len, Py_ssize_t *vals) -{ - int i; - PyObject *o; - PyObject *intTuple; - - if (vals == NULL) { - Py_INCREF(Py_None); - return Py_None; - } - intTuple = PyTuple_New(len); - if (!intTuple) return NULL; - for(i=0; iview.format); -} - -static PyObject * -memorysimpleview_itemsize_get(PyMemorySimpleViewObject *self) -{ - return PyLong_FromSsize_t(self->view.itemsize); -} - -static PyObject * -memorysimpleview_shape_get(PyMemorySimpleViewObject *self) -{ - return _IntTupleFromSsizet(self->view.ndim, self->view.shape); -} - -static PyObject * -memorysimpleview_strides_get(PyMemorySimpleViewObject *self) -{ - return _IntTupleFromSsizet(self->view.ndim, self->view.strides); -} - -static PyObject * -memorysimpleview_suboffsets_get(PyMemorySimpleViewObject *self) -{ - return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets); -} - -static PyObject * -memorysimpleview_readonly_get(PyMemorySimpleViewObject *self) -{ - return PyBool_FromLong(self->view.readonly); -} - -static PyObject * -memorysimpleview_ndim_get(PyMemorySimpleViewObject *self) -{ - return PyLong_FromLong(self->view.ndim); -} - - -static PyGetSetDef memorysimpleview_getsets[] = -{ - {"format", (getter)memorysimpleview_format_get, NULL, NULL, NULL}, - {"itemsize", (getter)memorysimpleview_itemsize_get, NULL, NULL, NULL}, - {"shape", (getter)memorysimpleview_shape_get, NULL, NULL, NULL}, - {"strides", (getter)memorysimpleview_strides_get, NULL, NULL, NULL}, - {"suboffsets", (getter)memorysimpleview_suboffsets_get, NULL, NULL, NULL}, - {"readonly", (getter)memorysimpleview_readonly_get, NULL, NULL, NULL}, - {"ndim", (getter)memorysimpleview_ndim_get, NULL, NULL, NULL}, - {NULL, NULL, NULL, NULL} -}; - -NPY_NO_EXPORT PyTypeObject PyMemorySimpleView_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.memorysimpleview", - sizeof(PyMemorySimpleViewObject), - 0, /* tp_itemsize */ - /* methods */ - (destructor)memorysimpleview_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - (cmpfunc)0, /* tp_compare */ -#endif - (reprfunc)0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - &memorysimpleview_as_buffer, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC - | Py_TPFLAGS_HAVE_NEWBUFFER, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)memorysimpleview_traverse, /* tp_traverse */ - (inquiry)memorysimpleview_clear, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - memorysimpleview_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - memorysimpleview_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - - -/* - * Factory - */ -NPY_NO_EXPORT PyObject * -PyMemorySimpleView_FromObject(PyObject *base) -{ - PyMemorySimpleViewObject *mview = NULL; - Py_buffer view; - - if (Py_TYPE(base)->tp_as_buffer == NULL || - Py_TYPE(base)->tp_as_buffer->bf_getbuffer == NULL) { - - PyErr_SetString(PyExc_TypeError, - "cannot make memory view because object does " - "not have the buffer interface"); - return NULL; - } - - memset(&view, 0, sizeof(Py_buffer)); - if (PyObject_GetBuffer(base, &view, PyBUF_FULL_RO) < 0) - return NULL; - - mview = (PyMemorySimpleViewObject *) - PyObject_GC_New(PyMemorySimpleViewObject, &PyMemorySimpleView_Type); - if (mview == NULL) { - PyBuffer_Release(&view); - return NULL; - } - memcpy(&mview->view, &view, sizeof(Py_buffer)); - mview->base = base; - Py_INCREF(base); - - PyObject_GC_Track(mview); - return (PyObject *)mview; -} - - -/* - * Module initialization - */ - -NPY_NO_EXPORT int -_numpymemoryview_init(PyObject **typeobject) -{ - if (PyType_Ready(&PyMemorySimpleView_Type) < 0) { - return -1; - } - *typeobject = (PyObject*)&PyMemorySimpleView_Type; - return 0; -} - -#else - -NPY_NO_EXPORT int -_numpymemoryview_init(PyObject **typeobject) -{ - *typeobject = NULL; - return 0; -} - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/numpymemoryview.h b/numpy-1.6.2/numpy/core/src/multiarray/numpymemoryview.h deleted file mode 100644 index 3a26617543..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/numpymemoryview.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _NPY_PRIVATE_NUMPYMEMORYVIEW_H_ -#define _NPY_PRIVATE_NUMPYMEMORYVIEW_H_ - -/* - * Memoryview is introduced to 2.x series only in 2.7, so for supporting 2.6, - * we need to have a minimal implementation here. - */ -#if (PY_VERSION_HEX >= 0x02060000) && (PY_VERSION_HEX < 0x02070000) - -typedef struct { - PyObject_HEAD - PyObject *base; - Py_buffer view; -} PyMemorySimpleViewObject; - -NPY_NO_EXPORT PyObject * -PyMemorySimpleView_FromObject(PyObject *base); - -#define PyMemorySimpleView_GET_BUFFER(op) (&((PyMemorySimpleViewObject *)(op))->view) - -#define PyMemoryView_FromObject PyMemorySimpleView_FromObject -#define PyMemoryView_GET_BUFFER PyMemorySimpleView_GET_BUFFER - -#endif - -NPY_NO_EXPORT int -_numpymemoryview_init(PyObject **typeobject); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/numpyos.c b/numpy-1.6.2/numpy/core/src/multiarray/numpyos.c deleted file mode 100644 index d7f23cfaf4..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/numpyos.c +++ /dev/null @@ -1,690 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include - -#include -#include - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/npy_math.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -/* - * From the C99 standard, section 7.19.6: The exponent always contains at least - * two digits, and only as many more digits as necessary to represent the - * exponent. - */ - -/* We force 3 digits on windows for python < 2.6 for compatibility reason */ -#if defined(MS_WIN32) && (PY_VERSION_HEX < 0x02060000) -#define MIN_EXPONENT_DIGITS 3 -#else -#define MIN_EXPONENT_DIGITS 2 -#endif - -/* - * Ensure that any exponent, if present, is at least MIN_EXPONENT_DIGITS - * in length. - */ -static void -_ensure_minimum_exponent_length(char* buffer, size_t buf_size) -{ - char *p = strpbrk(buffer, "eE"); - if (p && (*(p + 1) == '-' || *(p + 1) == '+')) { - char *start = p + 2; - int exponent_digit_cnt = 0; - int leading_zero_cnt = 0; - int in_leading_zeros = 1; - int significant_digit_cnt; - - /* Skip over the exponent and the sign. */ - p += 2; - - /* Find the end of the exponent, keeping track of leading zeros. */ - while (*p && isdigit(Py_CHARMASK(*p))) { - if (in_leading_zeros && *p == '0') { - ++leading_zero_cnt; - } - if (*p != '0') { - in_leading_zeros = 0; - } - ++p; - ++exponent_digit_cnt; - } - - significant_digit_cnt = exponent_digit_cnt - leading_zero_cnt; - if (exponent_digit_cnt == MIN_EXPONENT_DIGITS) { - /* - * If there are 2 exactly digits, we're done, - * regardless of what they contain - */ - } - else if (exponent_digit_cnt > MIN_EXPONENT_DIGITS) { - int extra_zeros_cnt; - - /* - * There are more than 2 digits in the exponent. See - * if we can delete some of the leading zeros - */ - if (significant_digit_cnt < MIN_EXPONENT_DIGITS) { - significant_digit_cnt = MIN_EXPONENT_DIGITS; - } - extra_zeros_cnt = exponent_digit_cnt - significant_digit_cnt; - - /* - * Delete extra_zeros_cnt worth of characters from the - * front of the exponent - */ - assert(extra_zeros_cnt >= 0); - - /* - * Add one to significant_digit_cnt to copy the - * trailing 0 byte, thus setting the length - */ - memmove(start, start + extra_zeros_cnt, significant_digit_cnt + 1); - } - else { - /* - * If there are fewer than 2 digits, add zeros - * until there are 2, if there's enough room - */ - int zeros = MIN_EXPONENT_DIGITS - exponent_digit_cnt; - if (start + zeros + exponent_digit_cnt + 1 < buffer + buf_size) { - memmove(start + zeros, start, exponent_digit_cnt + 1); - memset(start, '0', zeros); - } - } - } -} - -/* - * Ensure that buffer has a decimal point in it. The decimal point - * will not be in the current locale, it will always be '.' - */ -static void -_ensure_decimal_point(char* buffer, size_t buf_size) -{ - int insert_count = 0; - char* chars_to_insert; - - /* search for the first non-digit character */ - char *p = buffer; - if (*p == '-' || *p == '+') - /* - * Skip leading sign, if present. I think this could only - * ever be '-', but it can't hurt to check for both. - */ - ++p; - while (*p && isdigit(Py_CHARMASK(*p))) { - ++p; - } - if (*p == '.') { - if (isdigit(Py_CHARMASK(*(p+1)))) { - /* - * Nothing to do, we already have a decimal - * point and a digit after it. - */ - } - else { - /* - * We have a decimal point, but no following - * digit. Insert a zero after the decimal. - */ - ++p; - chars_to_insert = "0"; - insert_count = 1; - } - } - else { - chars_to_insert = ".0"; - insert_count = 2; - } - if (insert_count) { - size_t buf_len = strlen(buffer); - if (buf_len + insert_count + 1 >= buf_size) { - /* - * If there is not enough room in the buffer - * for the additional text, just skip it. It's - * not worth generating an error over. - */ - } - else { - memmove(p + insert_count, p, buffer + strlen(buffer) - p + 1); - memcpy(p, chars_to_insert, insert_count); - } - } -} - -/* see FORMATBUFLEN in unicodeobject.c */ -#define FLOAT_FORMATBUFLEN 120 - -/* - * Given a string that may have a decimal point in the current - * locale, change it back to a dot. Since the string cannot get - * longer, no need for a maximum buffer size parameter. - */ -static void -_change_decimal_from_locale_to_dot(char* buffer) -{ - struct lconv *locale_data = localeconv(); - const char *decimal_point = locale_data->decimal_point; - - if (decimal_point[0] != '.' || decimal_point[1] != 0) { - size_t decimal_point_len = strlen(decimal_point); - - if (*buffer == '+' || *buffer == '-') { - buffer++; - } - while (isdigit(Py_CHARMASK(*buffer))) { - buffer++; - } - if (strncmp(buffer, decimal_point, decimal_point_len) == 0) { - *buffer = '.'; - buffer++; - if (decimal_point_len > 1) { - /* buffer needs to get smaller */ - size_t rest_len = strlen(buffer + (decimal_point_len - 1)); - memmove(buffer, buffer + (decimal_point_len - 1), rest_len); - buffer[rest_len] = 0; - } - } - } -} - -/* - * Check that the format string is a valid one for NumPyOS_ascii_format* - */ -static int -_check_ascii_format(const char *format) -{ - char format_char; - size_t format_len = strlen(format); - - /* The last character in the format string must be the format char */ - format_char = format[format_len - 1]; - - if (format[0] != '%') { - return -1; - } - - /* - * I'm not sure why this test is here. It's ensuring that the format - * string after the first character doesn't have a single quote, a - * lowercase l, or a percent. This is the reverse of the commented-out - * test about 10 lines ago. - */ - if (strpbrk(format + 1, "'l%")) { - return -1; - } - - /* - * Also curious about this function is that it accepts format strings - * like "%xg", which are invalid for floats. In general, the - * interface to this function is not very good, but changing it is - * difficult because it's a public API. - */ - if (!(format_char == 'e' || format_char == 'E' - || format_char == 'f' || format_char == 'F' - || format_char == 'g' || format_char == 'G')) { - return -1; - } - - return 0; -} - -/* - * Fix the generated string: make sure the decimal is ., that exponent has a - * minimal number of digits, and that it has a decimal + one digit after that - * decimal if decimal argument != 0 (Same effect that 'Z' format in - * PyOS_ascii_formatd - */ -static char* -_fix_ascii_format(char* buf, size_t buflen, int decimal) -{ - /* - * Get the current locale, and find the decimal point string. - * Convert that string back to a dot. - */ - _change_decimal_from_locale_to_dot(buf); - - /* - * If an exponent exists, ensure that the exponent is at least - * MIN_EXPONENT_DIGITS digits, providing the buffer is large enough - * for the extra zeros. Also, if there are more than - * MIN_EXPONENT_DIGITS, remove as many zeros as possible until we get - * back to MIN_EXPONENT_DIGITS - */ - _ensure_minimum_exponent_length(buf, buflen); - - if (decimal != 0) { - _ensure_decimal_point(buf, buflen); - } - - return buf; -} - -/* - * NumPyOS_ascii_format*: - * - buffer: A buffer to place the resulting string in - * - buf_size: The length of the buffer. - * - format: The printf()-style format to use for the code to use for - * converting. - * - value: The value to convert - * - decimal: if != 0, always has a decimal, and at leasat one digit after - * the decimal. This has the same effect as passing 'Z' in the origianl - * PyOS_ascii_formatd - * - * This is similar to PyOS_ascii_formatd in python > 2.6, except that it does - * not handle 'n', and handles nan / inf. - * - * Converts a #gdouble to a string, using the '.' as decimal point. To format - * the number you pass in a printf()-style format string. Allowed conversion - * specifiers are 'e', 'E', 'f', 'F', 'g', 'G'. - * - * Return value: The pointer to the buffer with the converted string. - */ -#define _ASCII_FORMAT(type, suffix, print_type) \ - NPY_NO_EXPORT char* \ - NumPyOS_ascii_format ## suffix(char *buffer, size_t buf_size, \ - const char *format, \ - type val, int decimal) \ - { \ - if (npy_isfinite(val)) { \ - if(_check_ascii_format(format)) { \ - return NULL; \ - } \ - PyOS_snprintf(buffer, buf_size, format, (print_type)val); \ - return _fix_ascii_format(buffer, buf_size, decimal); \ - } \ - else if (npy_isnan(val)){ \ - if (buf_size < 4) { \ - return NULL; \ - } \ - strcpy(buffer, "nan"); \ - } \ - else { \ - if (npy_signbit(val)) { \ - if (buf_size < 5) { \ - return NULL; \ - } \ - strcpy(buffer, "-inf"); \ - } \ - else { \ - if (buf_size < 4) { \ - return NULL; \ - } \ - strcpy(buffer, "inf"); \ - } \ - } \ - return buffer; \ - } - -_ASCII_FORMAT(float, f, float) -_ASCII_FORMAT(double, d, double) -#ifndef FORCE_NO_LONG_DOUBLE_FORMATTING -_ASCII_FORMAT(long double, l, long double) -#else -_ASCII_FORMAT(long double, l, double) -#endif - -/* - * NumPyOS_ascii_isspace: - * - * Same as isspace under C locale - */ -NPY_NO_EXPORT int -NumPyOS_ascii_isspace(char c) -{ - return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' - || c == '\v'; -} - - -/* - * NumPyOS_ascii_isalpha: - * - * Same as isalpha under C locale - */ -static int -NumPyOS_ascii_isalpha(char c) -{ - return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); -} - - -/* - * NumPyOS_ascii_isdigit: - * - * Same as isdigit under C locale - */ -static int -NumPyOS_ascii_isdigit(char c) -{ - return (c >= '0' && c <= '9'); -} - - -/* - * NumPyOS_ascii_isalnum: - * - * Same as isalnum under C locale - */ -static int -NumPyOS_ascii_isalnum(char c) -{ - return NumPyOS_ascii_isdigit(c) || NumPyOS_ascii_isalpha(c); -} - - -/* - * NumPyOS_ascii_tolower: - * - * Same as tolower under C locale - */ -static char -NumPyOS_ascii_tolower(char c) -{ - if (c >= 'A' && c <= 'Z') { - return c + ('a'-'A'); - } - return c; -} - - -/* - * NumPyOS_ascii_strncasecmp: - * - * Same as strncasecmp under C locale - */ -static int -NumPyOS_ascii_strncasecmp(const char* s1, const char* s2, size_t len) -{ - int diff; - while (len > 0 && *s1 != '\0' && *s2 != '\0') { - diff = ((int)NumPyOS_ascii_tolower(*s1)) - - ((int)NumPyOS_ascii_tolower(*s2)); - if (diff != 0) { - return diff; - } - ++s1; - ++s2; - --len; - } - if (len > 0) { - return ((int)*s1) - ((int)*s2); - } - return 0; -} - -/* - * _NumPyOS_ascii_strtod_plain: - * - * PyOS_ascii_strtod work-alike, with no enhanced features, - * for forward compatibility with Python >= 2.7 - */ -static double -NumPyOS_ascii_strtod_plain(const char *s, char** endptr) -{ - double result; -#if PY_VERSION_HEX >= 0x02070000 - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API - result = PyOS_string_to_double(s, endptr, NULL); - if (PyErr_Occurred()) { - if (endptr) { - *endptr = (char*)s; - } - PyErr_Clear(); - } - NPY_DISABLE_C_API -#else - result = PyOS_ascii_strtod(s, endptr); -#endif - return result; -} - -/* - * NumPyOS_ascii_strtod: - * - * Work around bugs in PyOS_ascii_strtod - */ -NPY_NO_EXPORT double -NumPyOS_ascii_strtod(const char *s, char** endptr) -{ - struct lconv *locale_data = localeconv(); - const char *decimal_point = locale_data->decimal_point; - size_t decimal_point_len = strlen(decimal_point); - - char buffer[FLOAT_FORMATBUFLEN+1]; - const char *p; - char *q; - size_t n; - double result; - - while (NumPyOS_ascii_isspace(*s)) { - ++s; - } - - /* - * ##1 - * - * Recognize POSIX inf/nan representations on all platforms. - */ - p = s; - result = 1.0; - if (*p == '-') { - result = -1.0; - ++p; - } - else if (*p == '+') { - ++p; - } - if (NumPyOS_ascii_strncasecmp(p, "nan", 3) == 0) { - p += 3; - if (*p == '(') { - ++p; - while (NumPyOS_ascii_isalnum(*p) || *p == '_') { - ++p; - } - if (*p == ')') { - ++p; - } - } - if (endptr != NULL) { - *endptr = (char*)p; - } - return NPY_NAN; - } - else if (NumPyOS_ascii_strncasecmp(p, "inf", 3) == 0) { - p += 3; - if (NumPyOS_ascii_strncasecmp(p, "inity", 5) == 0) { - p += 5; - } - if (endptr != NULL) { - *endptr = (char*)p; - } - return result*NPY_INFINITY; - } - /* End of ##1 */ - - /* - * ## 2 - * - * At least Python versions <= 2.5.2 and <= 2.6.1 - * - * Fails to do best-efforts parsing of strings of the form "1234" - * where is the decimal point under the foreign locale. - */ - if (decimal_point[0] != '.' || decimal_point[1] != 0) { - p = s; - if (*p == '+' || *p == '-') { - ++p; - } - while (*p >= '0' && *p <= '9') { - ++p; - } - if (strncmp(p, decimal_point, decimal_point_len) == 0) { - n = (size_t)(p - s); - if (n > FLOAT_FORMATBUFLEN) { - n = FLOAT_FORMATBUFLEN; - } - memcpy(buffer, s, n); - buffer[n] = '\0'; - result = NumPyOS_ascii_strtod_plain(buffer, &q); - if (endptr != NULL) { - *endptr = (char*)(s + (q - buffer)); - } - return result; - } - } - /* End of ##2 */ - - return NumPyOS_ascii_strtod_plain(s, endptr); -} - - -/* - * NumPyOS_ascii_ftolf: - * * fp: FILE pointer - * * value: Place to store the value read - * - * Similar to PyOS_ascii_strtod, except that it reads input from a file. - * - * Similarly to fscanf, this function always consumes leading whitespace, - * and any text that could be the leading part in valid input. - * - * Return value: similar to fscanf. - * * 0 if no number read, - * * 1 if a number read, - * * EOF if end-of-file met before reading anything. - */ -NPY_NO_EXPORT int -NumPyOS_ascii_ftolf(FILE *fp, double *value) -{ - char buffer[FLOAT_FORMATBUFLEN + 1]; - char *endp; - char *p; - int c; - int ok; - - /* - * Pass on to PyOS_ascii_strtod the leftmost matching part in regexp - * - * \s*[+-]? ( [0-9]*\.[0-9]+([eE][+-]?[0-9]+) - * | nan ( \([:alphanum:_]*\) )? - * | inf(inity)? - * ) - * - * case-insensitively. - * - * The "do { ... } while (0)" wrapping in macros ensures that they behave - * properly eg. in "if ... else" structures. - */ - -#define END_MATCH() \ - goto buffer_filled - -#define NEXT_CHAR() \ - do { \ - if (c == EOF || endp >= buffer + FLOAT_FORMATBUFLEN) \ - END_MATCH(); \ - *endp++ = (char)c; \ - c = getc(fp); \ - } while (0) - -#define MATCH_ALPHA_STRING_NOCASE(string) \ - do { \ - for (p=(string); *p!='\0' && (c==*p || c+('a'-'A')==*p); ++p) \ - NEXT_CHAR(); \ - if (*p != '\0') END_MATCH(); \ - } while (0) - -#define MATCH_ONE_OR_NONE(condition) \ - do { if (condition) NEXT_CHAR(); } while (0) - -#define MATCH_ONE_OR_MORE(condition) \ - do { \ - ok = 0; \ - while (condition) { NEXT_CHAR(); ok = 1; } \ - if (!ok) END_MATCH(); \ - } while (0) - -#define MATCH_ZERO_OR_MORE(condition) \ - while (condition) { NEXT_CHAR(); } - - /* 1. emulate fscanf EOF handling */ - c = getc(fp); - if (c == EOF) { - return EOF; - } - /* 2. consume leading whitespace unconditionally */ - while (NumPyOS_ascii_isspace(c)) { - c = getc(fp); - } - - /* 3. start reading matching input to buffer */ - endp = buffer; - - /* 4.1 sign (optional) */ - MATCH_ONE_OR_NONE(c == '+' || c == '-'); - - /* 4.2 nan, inf, infinity; [case-insensitive] */ - if (c == 'n' || c == 'N') { - NEXT_CHAR(); - MATCH_ALPHA_STRING_NOCASE("an"); - - /* accept nan([:alphanum:_]*), similarly to strtod */ - if (c == '(') { - NEXT_CHAR(); - MATCH_ZERO_OR_MORE(NumPyOS_ascii_isalnum(c) || c == '_'); - if (c == ')') { - NEXT_CHAR(); - } - } - END_MATCH(); - } - else if (c == 'i' || c == 'I') { - NEXT_CHAR(); - MATCH_ALPHA_STRING_NOCASE("nfinity"); - END_MATCH(); - } - - /* 4.3 mantissa */ - MATCH_ZERO_OR_MORE(NumPyOS_ascii_isdigit(c)); - - if (c == '.') { - NEXT_CHAR(); - MATCH_ONE_OR_MORE(NumPyOS_ascii_isdigit(c)); - } - - /* 4.4 exponent */ - if (c == 'e' || c == 'E') { - NEXT_CHAR(); - MATCH_ONE_OR_NONE(c == '+' || c == '-'); - MATCH_ONE_OR_MORE(NumPyOS_ascii_isdigit(c)); - } - - END_MATCH(); - -buffer_filled: - - ungetc(c, fp); - *endp = '\0'; - - /* 5. try to convert buffer. */ - *value = NumPyOS_ascii_strtod(buffer, &p); - - /* return 1 if something read, else 0 */ - return (buffer == p) ? 0 : 1; -} - -#undef END_MATCH -#undef NEXT_CHAR -#undef MATCH_ALPHA_STRING_NOCASE -#undef MATCH_ONE_OR_NONE -#undef MATCH_ONE_OR_MORE -#undef MATCH_ZERO_OR_MORE diff --git a/numpy-1.6.2/numpy/core/src/multiarray/numpyos.h b/numpy-1.6.2/numpy/core/src/multiarray/numpyos.h deleted file mode 100644 index 6f247e6085..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/numpyos.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _NPY_NUMPYOS_H_ -#define _NPY_NUMPYOS_H_ - -NPY_NO_EXPORT char* -NumPyOS_ascii_formatd(char *buffer, size_t buf_size, - const char *format, - double val, int decimal); - -NPY_NO_EXPORT char* -NumPyOS_ascii_formatf(char *buffer, size_t buf_size, - const char *format, - float val, int decimal); - -NPY_NO_EXPORT char* -NumPyOS_ascii_formatl(char *buffer, size_t buf_size, - const char *format, - long double val, int decimal); - -NPY_NO_EXPORT double -NumPyOS_ascii_strtod(const char *s, char** endptr); - -NPY_NO_EXPORT int -NumPyOS_ascii_ftolf(FILE *fp, double *value); - -NPY_NO_EXPORT int -NumPyOS_ascii_isspace(char c); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/refcount.c b/numpy-1.6.2/numpy/core/src/multiarray/refcount.c deleted file mode 100644 index c9024a0836..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/refcount.c +++ /dev/null @@ -1,282 +0,0 @@ -/* - * This module corresponds to the `Special functions for PyArray_OBJECT` - * section in the numpy reference for C-API. - */ - -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -static void -_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype); - -/* Incref all objects found at this record */ -/*NUMPY_API - */ -NPY_NO_EXPORT void -PyArray_Item_INCREF(char *data, PyArray_Descr *descr) -{ - PyObject *temp; - - if (!PyDataType_REFCHK(descr)) { - return; - } - if (descr->type_num == PyArray_OBJECT) { - NPY_COPY_PYOBJECT_PTR(&temp, data); - Py_XINCREF(temp); - } - else if (PyDescr_HASFIELDS(descr)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { - return; - } - PyArray_Item_INCREF(data + offset, new); - } - } - return; -} - -/* XDECREF all objects found at this record */ -/*NUMPY_API - */ -NPY_NO_EXPORT void -PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) -{ - PyObject *temp; - - if (!PyDataType_REFCHK(descr)) { - return; - } - - if (descr->type_num == PyArray_OBJECT) { - NPY_COPY_PYOBJECT_PTR(&temp, data); - Py_XDECREF(temp); - } - else if PyDescr_HASFIELDS(descr) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - - while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { - return; - } - PyArray_Item_XDECREF(data + offset, new); - } - } - return; -} - -/* Used for arrays of python objects to increment the reference count of */ -/* every python object in the array. */ -/*NUMPY_API - For object arrays, increment all internal references. -*/ -NPY_NO_EXPORT int -PyArray_INCREF(PyArrayObject *mp) -{ - intp i, n; - PyObject **data; - PyObject *temp; - PyArrayIterObject *it; - - if (!PyDataType_REFCHK(mp->descr)) { - return 0; - } - if (mp->descr->type_num != PyArray_OBJECT) { - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it == NULL) { - return -1; - } - while(it->index < it->size) { - PyArray_Item_INCREF(it->dataptr, mp->descr); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - return 0; - } - - if (PyArray_ISONESEGMENT(mp)) { - data = (PyObject **)mp->data; - n = PyArray_SIZE(mp); - if (PyArray_ISALIGNED(mp)) { - for (i = 0; i < n; i++, data++) { - Py_XINCREF(*data); - } - } - else { - for( i = 0; i < n; i++, data++) { - NPY_COPY_PYOBJECT_PTR(&temp, data); - Py_XINCREF(temp); - } - } - } - else { /* handles misaligned data too */ - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it == NULL) { - return -1; - } - while(it->index < it->size) { - NPY_COPY_PYOBJECT_PTR(&temp, it->dataptr); - Py_XINCREF(temp); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - return 0; -} - -/*NUMPY_API - Decrement all internal references for object arrays. - (or arrays with object fields) -*/ -NPY_NO_EXPORT int -PyArray_XDECREF(PyArrayObject *mp) -{ - intp i, n; - PyObject **data; - PyObject *temp; - PyArrayIterObject *it; - - if (!PyDataType_REFCHK(mp->descr)) { - return 0; - } - if (mp->descr->type_num != PyArray_OBJECT) { - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it == NULL) { - return -1; - } - while(it->index < it->size) { - PyArray_Item_XDECREF(it->dataptr, mp->descr); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - return 0; - } - - if (PyArray_ISONESEGMENT(mp)) { - data = (PyObject **)mp->data; - n = PyArray_SIZE(mp); - if (PyArray_ISALIGNED(mp)) { - for (i = 0; i < n; i++, data++) Py_XDECREF(*data); - } - else { - for (i = 0; i < n; i++, data++) { - NPY_COPY_PYOBJECT_PTR(&temp, data); - Py_XDECREF(temp); - } - } - } - else { /* handles misaligned data too */ - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it == NULL) { - return -1; - } - while(it->index < it->size) { - NPY_COPY_PYOBJECT_PTR(&temp, it->dataptr); - Py_XDECREF(temp); - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - } - return 0; -} - -/*NUMPY_API - * Assumes contiguous - */ -NPY_NO_EXPORT void -PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) -{ - intp i,n; - n = PyArray_SIZE(arr); - if (arr->descr->type_num == PyArray_OBJECT) { - PyObject **optr; - optr = (PyObject **)(arr->data); - n = PyArray_SIZE(arr); - if (obj == NULL) { - for (i = 0; i < n; i++) { - *optr++ = NULL; - } - } - else { - for (i = 0; i < n; i++) { - Py_INCREF(obj); - *optr++ = obj; - } - } - } - else { - char *optr; - optr = arr->data; - for (i = 0; i < n; i++) { - _fillobject(optr, obj, arr->descr); - optr += arr->descr->elsize; - } - } -} - -static void -_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype) -{ - if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - if ((obj == Py_None) || (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) { - return; - } - else { - PyObject *arr; - Py_INCREF(dtype); - arr = PyArray_NewFromDescr(&PyArray_Type, dtype, - 0, NULL, NULL, NULL, - 0, NULL); - if (arr!=NULL) { - dtype->f->setitem(obj, optr, arr); - } - Py_XDECREF(arr); - } - } - else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - - while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - return; - } - _fillobject(optr + offset, obj, new); - } - } - else { - Py_XINCREF(obj); - NPY_COPY_PYOBJECT_PTR(optr, &obj); - return; - } -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/refcount.h b/numpy-1.6.2/numpy/core/src/multiarray/refcount.h deleted file mode 100644 index 761d53dd0d..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/refcount.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _NPY_PRIVATE_REFCOUNT_H_ -#define _NPY_PRIVATE_REFCOUNT_H_ - -NPY_NO_EXPORT void -PyArray_Item_INCREF(char *data, PyArray_Descr *descr); - -NPY_NO_EXPORT void -PyArray_Item_XDECREF(char *data, PyArray_Descr *descr); - -NPY_NO_EXPORT int -PyArray_INCREF(PyArrayObject *mp); - -NPY_NO_EXPORT int -PyArray_XDECREF(PyArrayObject *mp); - -NPY_NO_EXPORT void -PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/scalarapi.c b/numpy-1.6.2/numpy/core/src/multiarray/scalarapi.c deleted file mode 100644 index 54872a8fe2..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/scalarapi.c +++ /dev/null @@ -1,827 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "numpy/npy_math.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "ctors.h" -#include "descriptor.h" -#include "scalartypes.h" - -#include "common.h" - -static PyArray_Descr * -_descr_from_subtype(PyObject *type) -{ - PyObject *mro; - mro = ((PyTypeObject *)type)->tp_mro; - if (PyTuple_GET_SIZE(mro) < 2) { - return PyArray_DescrFromType(PyArray_OBJECT); - } - return PyArray_DescrFromTypeObject(PyTuple_GET_ITEM(mro, 1)); -} - -NPY_NO_EXPORT void * -scalar_value(PyObject *scalar, PyArray_Descr *descr) -{ - int type_num; - int align; - intp memloc; - if (descr == NULL) { - descr = PyArray_DescrFromScalar(scalar); - type_num = descr->type_num; - Py_DECREF(descr); - } - else { - type_num = descr->type_num; - } - switch (type_num) { -#define CASE(ut,lt) case NPY_##ut: return &(((Py##lt##ScalarObject *)scalar)->obval) - CASE(BOOL, Bool); - CASE(BYTE, Byte); - CASE(UBYTE, UByte); - CASE(SHORT, Short); - CASE(USHORT, UShort); - CASE(INT, Int); - CASE(UINT, UInt); - CASE(LONG, Long); - CASE(ULONG, ULong); - CASE(LONGLONG, LongLong); - CASE(ULONGLONG, ULongLong); - CASE(HALF, Half); - CASE(FLOAT, Float); - CASE(DOUBLE, Double); - CASE(LONGDOUBLE, LongDouble); - CASE(CFLOAT, CFloat); - CASE(CDOUBLE, CDouble); - CASE(CLONGDOUBLE, CLongDouble); - CASE(OBJECT, Object); - CASE(DATETIME, Datetime); - CASE(TIMEDELTA, Timedelta); -#undef CASE - case NPY_STRING: - return (void *)PyString_AS_STRING(scalar); - case NPY_UNICODE: - return (void *)PyUnicode_AS_DATA(scalar); - case NPY_VOID: - return ((PyVoidScalarObject *)scalar)->obval; - } - - /* - * Must be a user-defined type --- check to see which - * scalar it inherits from. - */ - -#define _CHK(cls) (PyObject_IsInstance(scalar, \ - (PyObject *)&Py##cls##ArrType_Type)) -#define _OBJ(lt) &(((Py##lt##ScalarObject *)scalar)->obval) -#define _IFCASE(cls) if _CHK(cls) return _OBJ(cls) - - if _CHK(Number) { - if _CHK(Integer) { - if _CHK(SignedInteger) { - _IFCASE(Byte); - _IFCASE(Short); - _IFCASE(Int); - _IFCASE(Long); - _IFCASE(LongLong); - if _CHK(TimeInteger) { - _IFCASE(Datetime); - _IFCASE(Timedelta); - } - } - else { - /* Unsigned Integer */ - _IFCASE(UByte); - _IFCASE(UShort); - _IFCASE(UInt); - _IFCASE(ULong); - _IFCASE(ULongLong); - } - } - else { - /* Inexact */ - if _CHK(Floating) { - _IFCASE(Half); - _IFCASE(Float); - _IFCASE(Double); - _IFCASE(LongDouble); - } - else { - /*ComplexFloating */ - _IFCASE(CFloat); - _IFCASE(CDouble); - _IFCASE(CLongDouble); - } - } - } - else if (_CHK(Bool)) { - return _OBJ(Bool); - } - else if (_CHK(Flexible)) { - if (_CHK(String)) { - return (void *)PyString_AS_STRING(scalar); - } - if (_CHK(Unicode)) { - return (void *)PyUnicode_AS_DATA(scalar); - } - if (_CHK(Void)) { - return ((PyVoidScalarObject *)scalar)->obval; - } - } - else { - _IFCASE(Object); - } - - - /* - * Use the alignment flag to figure out where the data begins - * after a PyObject_HEAD - */ - memloc = (intp)scalar; - memloc += sizeof(PyObject); - /* now round-up to the nearest alignment value */ - align = descr->alignment; - if (align > 1) { - memloc = ((memloc + align - 1)/align)*align; - } - return (void *)memloc; -#undef _IFCASE -#undef _OBJ -#undef _CHK -} - -/*NUMPY_API - * Convert to c-type - * - * no error checking is performed -- ctypeptr must be same type as scalar - * in case of flexible type, the data is not copied - * into ctypeptr which is expected to be a pointer to pointer - */ -NPY_NO_EXPORT void -PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) -{ - PyArray_Descr *typecode; - void *newptr; - typecode = PyArray_DescrFromScalar(scalar); - newptr = scalar_value(scalar, typecode); - - if (PyTypeNum_ISEXTENDED(typecode->type_num)) { - void **ct = (void **)ctypeptr; - *ct = newptr; - } - else { - memcpy(ctypeptr, newptr, typecode->elsize); - } - Py_DECREF(typecode); - return; -} - -/*NUMPY_API - * Cast Scalar to c-type - * - * The output buffer must be large-enough to receive the value - * Even for flexible types which is different from ScalarAsCtype - * where only a reference for flexible types is returned - * - * This may not work right on narrow builds for NumPy unicode scalars. - */ -NPY_NO_EXPORT int -PyArray_CastScalarToCtype(PyObject *scalar, void *ctypeptr, - PyArray_Descr *outcode) -{ - PyArray_Descr* descr; - PyArray_VectorUnaryFunc* castfunc; - - descr = PyArray_DescrFromScalar(scalar); - castfunc = PyArray_GetCastFunc(descr, outcode->type_num); - if (castfunc == NULL) { - return -1; - } - if (PyTypeNum_ISEXTENDED(descr->type_num) || - PyTypeNum_ISEXTENDED(outcode->type_num)) { - PyArrayObject *ain, *aout; - - ain = (PyArrayObject *)PyArray_FromScalar(scalar, NULL); - if (ain == NULL) { - Py_DECREF(descr); - return -1; - } - aout = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, - outcode, - 0, NULL, - NULL, ctypeptr, - CARRAY, NULL); - if (aout == NULL) { - Py_DECREF(ain); - return -1; - } - castfunc(ain->data, aout->data, 1, ain, aout); - Py_DECREF(ain); - Py_DECREF(aout); - } - else { - castfunc(scalar_value(scalar, descr), ctypeptr, 1, NULL, NULL); - } - Py_DECREF(descr); - return 0; -} - -/*NUMPY_API - * Cast Scalar to c-type - */ -NPY_NO_EXPORT int -PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr *indescr, - void *ctypeptr, int outtype) -{ - PyArray_VectorUnaryFunc* castfunc; - void *ptr; - castfunc = PyArray_GetCastFunc(indescr, outtype); - if (castfunc == NULL) { - return -1; - } - ptr = scalar_value(scalar, indescr); - castfunc(ptr, ctypeptr, 1, NULL, NULL); - return 0; -} - -/*NUMPY_API - * Get 0-dim array from scalar - * - * 0-dim array from array-scalar object - * always contains a copy of the data - * unless outcode is NULL, it is of void type and the referrer does - * not own it either. - * - * steals reference to outcode - */ -NPY_NO_EXPORT PyObject * -PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) -{ - PyArray_Descr *typecode; - PyObject *r; - char *memptr; - PyObject *ret; - - /* convert to 0-dim array of scalar typecode */ - typecode = PyArray_DescrFromScalar(scalar); - if ((typecode->type_num == PyArray_VOID) && - !(((PyVoidScalarObject *)scalar)->flags & OWNDATA) && - outcode == NULL) { - r = PyArray_NewFromDescr(&PyArray_Type, - typecode, - 0, NULL, NULL, - ((PyVoidScalarObject *)scalar)->obval, - ((PyVoidScalarObject *)scalar)->flags, - NULL); - PyArray_BASE(r) = (PyObject *)scalar; - Py_INCREF(scalar); - return r; - } - - r = PyArray_NewFromDescr(&PyArray_Type, - typecode, - 0, NULL, - NULL, NULL, 0, NULL); - if (r==NULL) { - Py_XDECREF(outcode); - return NULL; - } - if (PyDataType_FLAGCHK(typecode, NPY_USE_SETITEM)) { - if (typecode->f->setitem(scalar, PyArray_DATA(r), r) < 0) { - Py_XDECREF(outcode); Py_DECREF(r); - return NULL; - } - goto finish; - } - - memptr = scalar_value(scalar, typecode); - -#ifndef Py_UNICODE_WIDE - if (typecode->type_num == PyArray_UNICODE) { - PyUCS2Buffer_AsUCS4((Py_UNICODE *)memptr, - (PyArray_UCS4 *)PyArray_DATA(r), - PyUnicode_GET_SIZE(scalar), - PyArray_ITEMSIZE(r) >> 2); - } - else -#endif - { - memcpy(PyArray_DATA(r), memptr, PyArray_ITEMSIZE(r)); - if (PyDataType_FLAGCHK(typecode, NPY_ITEM_HASOBJECT)) { - /* Need to INCREF just the PyObject portion */ - PyArray_Item_INCREF(memptr, typecode); - } - } - -finish: - if (outcode == NULL) { - return r; - } - if (outcode->type_num == typecode->type_num) { - if (!PyTypeNum_ISEXTENDED(typecode->type_num) - || (outcode->elsize == typecode->elsize)) { - return r; - } - } - - /* cast if necessary to desired output typecode */ - ret = PyArray_CastToType((PyArrayObject *)r, outcode, 0); - Py_DECREF(r); - return ret; -} - -/*NUMPY_API - * Get an Array Scalar From a Python Object - * - * Returns NULL if unsuccessful but error is only set if another error occurred. - * Currently only Numeric-like object supported. - */ -NPY_NO_EXPORT PyObject * -PyArray_ScalarFromObject(PyObject *object) -{ - PyObject *ret=NULL; - if (PyArray_IsZeroDim(object)) { - return PyArray_ToScalar(PyArray_DATA(object), object); - } - /* - * Booleans in Python are implemented as a subclass of integers, - * so PyBool_Check must be called before PyInt_Check. - */ - if (PyBool_Check(object)) { - if (object == Py_True) { - PyArrayScalar_RETURN_TRUE; - } - else { - PyArrayScalar_RETURN_FALSE; - } - } - else if (PyInt_Check(object)) { - ret = PyArrayScalar_New(Long); - if (ret == NULL) { - return NULL; - } - PyArrayScalar_VAL(ret, Long) = PyInt_AS_LONG(object); - } - else if (PyFloat_Check(object)) { - ret = PyArrayScalar_New(Double); - if (ret == NULL) { - return NULL; - } - PyArrayScalar_VAL(ret, Double) = PyFloat_AS_DOUBLE(object); - } - else if (PyComplex_Check(object)) { - ret = PyArrayScalar_New(CDouble); - if (ret == NULL) { - return NULL; - } - PyArrayScalar_VAL(ret, CDouble).real = PyComplex_RealAsDouble(object); - PyArrayScalar_VAL(ret, CDouble).imag = PyComplex_ImagAsDouble(object); - } - else if (PyLong_Check(object)) { - longlong val; - val = PyLong_AsLongLong(object); - if (val==-1 && PyErr_Occurred()) { - PyErr_Clear(); - return NULL; - } - ret = PyArrayScalar_New(LongLong); - if (ret == NULL) { - return NULL; - } - PyArrayScalar_VAL(ret, LongLong) = val; - } - return ret; -} - -/*New reference */ -/*NUMPY_API - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_DescrFromTypeObject(PyObject *type) -{ - int typenum; - PyArray_Descr *new, *conv = NULL; - - /* if it's a builtin type, then use the typenumber */ - typenum = _typenum_fromtypeobj(type,1); - if (typenum != PyArray_NOTYPE) { - new = PyArray_DescrFromType(typenum); - return new; - } - - /* Check the generic types */ - if ((type == (PyObject *) &PyNumberArrType_Type) || - (type == (PyObject *) &PyInexactArrType_Type) || - (type == (PyObject *) &PyFloatingArrType_Type)) { - typenum = PyArray_DOUBLE; - } - else if (type == (PyObject *)&PyComplexFloatingArrType_Type) { - typenum = PyArray_CDOUBLE; - } - else if ((type == (PyObject *)&PyIntegerArrType_Type) || - (type == (PyObject *)&PySignedIntegerArrType_Type)) { - typenum = PyArray_LONG; - } - else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) { - typenum = PyArray_ULONG; - } - else if (type == (PyObject *) &PyCharacterArrType_Type) { - typenum = PyArray_STRING; - } - else if ((type == (PyObject *) &PyGenericArrType_Type) || - (type == (PyObject *) &PyFlexibleArrType_Type)) { - typenum = PyArray_VOID; - } - - if (typenum != PyArray_NOTYPE) { - return PyArray_DescrFromType(typenum); - } - - /* - * Otherwise --- type is a sub-type of an array scalar - * not corresponding to a registered data-type object. - */ - - /* Do special thing for VOID sub-types */ - if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { - new = PyArray_DescrNewFromType(PyArray_VOID); - conv = _arraydescr_fromobj(type); - if (conv) { - new->fields = conv->fields; - Py_INCREF(new->fields); - new->names = conv->names; - Py_INCREF(new->names); - new->elsize = conv->elsize; - new->subarray = conv->subarray; - conv->subarray = NULL; - Py_DECREF(conv); - } - Py_XDECREF(new->typeobj); - new->typeobj = (PyTypeObject *)type; - Py_INCREF(type); - return new; - } - return _descr_from_subtype(type); -} - -/*NUMPY_API - * Return the tuple of ordered field names from a dictionary. - */ -NPY_NO_EXPORT PyObject * -PyArray_FieldNames(PyObject *fields) -{ - PyObject *tup; - PyObject *ret; - PyObject *_numpy_internal; - - if (!PyDict_Check(fields)) { - PyErr_SetString(PyExc_TypeError, - "Fields must be a dictionary"); - return NULL; - } - _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) { - return NULL; - } - tup = PyObject_CallMethod(_numpy_internal, "_makenames_list", "O", fields); - Py_DECREF(_numpy_internal); - if (tup == NULL) { - return NULL; - } - ret = PyTuple_GET_ITEM(tup, 0); - ret = PySequence_Tuple(ret); - Py_DECREF(tup); - return ret; -} - -/*NUMPY_API - * Return descr object from array scalar. - * - * New reference - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_DescrFromScalar(PyObject *sc) -{ - int type_num; - PyArray_Descr *descr; - - if (PyArray_IsScalar(sc, Void)) { - descr = ((PyVoidScalarObject *)sc)->descr; - Py_INCREF(descr); - return descr; - } - - if (PyArray_IsScalar(sc, TimeInteger)) { - PyObject *cobj; - PyArray_DatetimeMetaData *dt_data; - - dt_data = _pya_malloc(sizeof(PyArray_DatetimeMetaData)); - if (PyArray_IsScalar(sc, Datetime)) { - descr = PyArray_DescrNewFromType(PyArray_DATETIME); - memcpy(dt_data, &((PyDatetimeScalarObject *)sc)->obmeta, - sizeof(PyArray_DatetimeMetaData)); - } - else { - /* Timedelta */ - descr = PyArray_DescrNewFromType(PyArray_TIMEDELTA); - memcpy(dt_data, &((PyTimedeltaScalarObject *)sc)->obmeta, - sizeof(PyArray_DatetimeMetaData)); - } - cobj = NpyCapsule_FromVoidPtr((void *)dt_data, simple_capsule_dtor); - - /* Add correct meta-data to the data-type */ - if (descr == NULL) { - Py_DECREF(cobj); - return NULL; - } - Py_XDECREF(descr->metadata); - if ((descr->metadata = PyDict_New()) == NULL) { - Py_DECREF(descr); - Py_DECREF(cobj); - return NULL; - } - - /* Assume this sets a new reference to cobj */ - PyDict_SetItemString(descr->metadata, NPY_METADATA_DTSTR, cobj); - Py_DECREF(cobj); - return descr; - } - - descr = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(sc)); - if (descr->elsize == 0) { - PyArray_DESCR_REPLACE(descr); - type_num = descr->type_num; - if (type_num == PyArray_STRING) { - descr->elsize = PyString_GET_SIZE(sc); - } - else if (type_num == PyArray_UNICODE) { - descr->elsize = PyUnicode_GET_DATA_SIZE(sc); -#ifndef Py_UNICODE_WIDE - descr->elsize <<= 1; -#endif - } - else { - descr->elsize = Py_SIZE((PyVoidScalarObject *)sc); - descr->fields = PyObject_GetAttrString(sc, "fields"); - if (!descr->fields - || !PyDict_Check(descr->fields) - || (descr->fields == Py_None)) { - Py_XDECREF(descr->fields); - descr->fields = NULL; - } - if (descr->fields) { - descr->names = PyArray_FieldNames(descr->fields); - } - PyErr_Clear(); - } - } - return descr; -} - -/*NUMPY_API - * Get a typeobject from a type-number -- can return NULL. - * - * New reference - */ -NPY_NO_EXPORT PyObject * -PyArray_TypeObjectFromType(int type) -{ - PyArray_Descr *descr; - PyObject *obj; - - descr = PyArray_DescrFromType(type); - if (descr == NULL) { - return NULL; - } - obj = (PyObject *)descr->typeobj; - Py_XINCREF(obj); - Py_DECREF(descr); - return obj; -} - -/* Does nothing with descr (cannot be NULL) */ -/*NUMPY_API - Get scalar-equivalent to a region of memory described by a descriptor. -*/ -NPY_NO_EXPORT PyObject * -PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) -{ - PyTypeObject *type; - PyObject *obj; - void *destptr; - PyArray_CopySwapFunc *copyswap; - int type_num; - int itemsize; - int swap; - - type_num = descr->type_num; - if (type_num == PyArray_BOOL) { - PyArrayScalar_RETURN_BOOL_FROM_LONG(*(Bool*)data); - } - else if (PyDataType_FLAGCHK(descr, NPY_USE_GETITEM)) { - return descr->f->getitem(data, base); - } - itemsize = descr->elsize; - copyswap = descr->f->copyswap; - type = descr->typeobj; - swap = !PyArray_ISNBO(descr->byteorder); - if PyTypeNum_ISSTRING(type_num) { - /* Eliminate NULL bytes */ - char *dptr = data; - - dptr += itemsize - 1; - while(itemsize && *dptr-- == 0) { - itemsize--; - } - if (type_num == PyArray_UNICODE && itemsize) { - /* - * make sure itemsize is a multiple of 4 - * so round up to nearest multiple - */ - itemsize = (((itemsize - 1) >> 2) + 1) << 2; - } - } - if (type->tp_itemsize != 0) { - /* String type */ - obj = type->tp_alloc(type, itemsize); - } - else { - obj = type->tp_alloc(type, 0); - } - if (obj == NULL) { - return NULL; - } - if (PyTypeNum_ISDATETIME(type_num)) { - /* - * We need to copy the resolution information over to the scalar - * Get the void * from the metadata dictionary - */ - PyObject *cobj; - PyArray_DatetimeMetaData *dt_data; - cobj = PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR); - -/* FIXME - * There is no error handling here. - */ - dt_data = NpyCapsule_AsVoidPtr(cobj); - memcpy(&(((PyDatetimeScalarObject *)obj)->obmeta), dt_data, - sizeof(PyArray_DatetimeMetaData)); - } - if (PyTypeNum_ISFLEXIBLE(type_num)) { - if (type_num == PyArray_STRING) { - destptr = PyString_AS_STRING(obj); - ((PyStringObject *)obj)->ob_shash = -1; -#if !defined(NPY_PY3K) - ((PyStringObject *)obj)->ob_sstate = SSTATE_NOT_INTERNED; -#endif - memcpy(destptr, data, itemsize); - return obj; - } - else if (type_num == PyArray_UNICODE) { - /* tp_alloc inherited from Python PyBaseObject_Type */ - PyUnicodeObject *uni = (PyUnicodeObject*)obj; - size_t length = itemsize >> 2; - Py_UNICODE *dst; -#ifndef Py_UNICODE_WIDE - char *buffer; - Py_UNICODE *tmp; - int alloc = 0; - - length *= 2; -#endif - /* Set uni->str so that object can be deallocated on failure */ - uni->str = NULL; - uni->defenc = NULL; - uni->hash = -1; - dst = PyObject_MALLOC(sizeof(Py_UNICODE) * (length + 1)); - if (dst == NULL) { - Py_DECREF(obj); - PyErr_NoMemory(); - return NULL; - } -#ifdef Py_UNICODE_WIDE - memcpy(dst, data, itemsize); - if (swap) { - byte_swap_vector(dst, length, 4); - } - uni->str = dst; - uni->str[length] = 0; - uni->length = length; -#else - /* need aligned data buffer */ - if ((swap) || ((((intp)data) % descr->alignment) != 0)) { - buffer = malloc(itemsize); - if (buffer == NULL) { - PyObject_FREE(dst); - Py_DECREF(obj); - PyErr_NoMemory(); - } - alloc = 1; - memcpy(buffer, data, itemsize); - if (swap) { - byte_swap_vector(buffer, itemsize >> 2, 4); - } - } - else { - buffer = data; - } - - /* - * Allocated enough for 2-characters per itemsize. - * Now convert from the data-buffer - */ - length = PyUCS2Buffer_FromUCS4(dst, - (npy_ucs4 *)buffer, itemsize >> 2); - if (alloc) { - free(buffer); - } - /* Resize the unicode result */ - tmp = PyObject_REALLOC(dst, sizeof(Py_UNICODE)*(length + 1)); - if (tmp == NULL) { - PyObject_FREE(dst); - Py_DECREF(obj); - return NULL; - } - uni->str = tmp; - uni->str[length] = 0; - uni->length = length; -#endif - return obj; - } - else { - PyVoidScalarObject *vobj = (PyVoidScalarObject *)obj; - vobj->base = NULL; - vobj->descr = descr; - Py_INCREF(descr); - vobj->obval = NULL; - Py_SIZE(vobj) = itemsize; - vobj->flags = BEHAVED | OWNDATA; - swap = 0; - if (descr->names) { - if (base) { - Py_INCREF(base); - vobj->base = base; - vobj->flags = PyArray_FLAGS(base); - vobj->flags &= ~OWNDATA; - vobj->obval = data; - return obj; - } - } - destptr = PyDataMem_NEW(itemsize); - if (destptr == NULL) { - Py_DECREF(obj); - return PyErr_NoMemory(); - } - vobj->obval = destptr; - } - } - else { - destptr = scalar_value(obj, descr); - } - /* copyswap for OBJECT increments the reference count */ - copyswap(destptr, data, swap, base); - return obj; -} - -/* Return Array Scalar if 0-d array object is encountered */ - -/*NUMPY_API - * - *Return either an array or the appropriate Python object if the array - *is 0d and matches a Python type. - */ -NPY_NO_EXPORT PyObject * -PyArray_Return(PyArrayObject *mp) -{ - - if (mp == NULL) { - return NULL; - } - if (PyErr_Occurred()) { - Py_XDECREF(mp); - return NULL; - } - if (!PyArray_Check(mp)) { - return (PyObject *)mp; - } - if (mp->nd == 0) { - PyObject *ret; - ret = PyArray_ToScalar(mp->data, mp); - Py_DECREF(mp); - return ret; - } - else { - return (PyObject *)mp; - } -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/scalartypes.c.src b/numpy-1.6.2/numpy/core/src/multiarray/scalartypes.c.src deleted file mode 100644 index 58dc398616..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/scalartypes.c.src +++ /dev/null @@ -1,3879 +0,0 @@ -/* -*- c -*- */ -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - -#ifndef _MULTIARRAYMODULE -#define _MULTIARRAYMODULE -#endif -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/npy_math.h" -#include "numpy/halffloat.h" -#include "numpy/arrayscalars.h" - -#include "numpy/npy_3kcompat.h" - -#include "npy_config.h" -#include "mapping.h" -#include "ctors.h" -#include "usertypes.h" -#include "numpyos.h" -#include "common.h" -#include "scalartypes.h" - -NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[] = { - {PyObject_HEAD_INIT(&PyBoolArrType_Type) 0}, - {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, -}; - -/* - * Inheritance is established later when tp_bases is set (or tp_base for - * single inheritance) - */ - -/**begin repeat - * #name = number, integer, signedinteger, unsignedinteger, inexact, - * floating, complexfloating, flexible, character, timeinteger# - * #NAME = Number, Integer, SignedInteger, UnsignedInteger, Inexact, - * Floating, ComplexFloating, Flexible, Character, TimeInteger# - */ -NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.@name@", /* tp_name*/ - sizeof(PyObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ - /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; -/**end repeat**/ - -static PyObject * -gentype_alloc(PyTypeObject *type, Py_ssize_t nitems) -{ - PyObject *obj; - const size_t size = _PyObject_VAR_SIZE(type, nitems + 1); - - obj = (PyObject *)_pya_malloc(size); - memset(obj, 0, size); - if (type->tp_itemsize == 0) { - PyObject_INIT(obj, type); - } - else { - (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems); - } - return obj; -} - -static void -gentype_dealloc(PyObject *v) -{ - Py_TYPE(v)->tp_free(v); -} - - -static PyObject * -gentype_power(PyObject *m1, PyObject *m2, PyObject *NPY_UNUSED(m3)) -{ - PyObject *arr, *ret, *arg2; - char *msg="unsupported operand type(s) for ** or pow()"; - - if (!PyArray_IsScalar(m1,Generic)) { - if (PyArray_Check(m1)) { - ret = Py_TYPE(m1)->tp_as_number->nb_power(m1,m2, Py_None); - } - else { - if (!PyArray_IsScalar(m2,Generic)) { - PyErr_SetString(PyExc_TypeError, msg); - return NULL; - } - arr = PyArray_FromScalar(m2, NULL); - if (arr == NULL) { - return NULL; - } - ret = Py_TYPE(arr)->tp_as_number->nb_power(m1, arr, Py_None); - Py_DECREF(arr); - } - return ret; - } - if (!PyArray_IsScalar(m2, Generic)) { - if (PyArray_Check(m2)) { - ret = Py_TYPE(m2)->tp_as_number->nb_power(m1,m2, Py_None); - } - else { - if (!PyArray_IsScalar(m1, Generic)) { - PyErr_SetString(PyExc_TypeError, msg); - return NULL; - } - arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) { - return NULL; - } - ret = Py_TYPE(arr)->tp_as_number->nb_power(arr, m2, Py_None); - Py_DECREF(arr); - } - return ret; - } - arr = arg2 = NULL; - arr = PyArray_FromScalar(m1, NULL); - arg2 = PyArray_FromScalar(m2, NULL); - if (arr == NULL || arg2 == NULL) { - Py_XDECREF(arr); - Py_XDECREF(arg2); - return NULL; - } - ret = Py_TYPE(arr)->tp_as_number->nb_power(arr, arg2, Py_None); - Py_DECREF(arr); - Py_DECREF(arg2); - return ret; -} - -static PyObject * -gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, - char *str) -{ - PyObject *arr, *meth, *ret; - - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; - } - meth = PyObject_GetAttrString(arr, str); - if (meth == NULL) { - Py_DECREF(arr); - return NULL; - } - if (kwds == NULL) { - ret = PyObject_CallObject(meth, args); - } - else { - ret = PyObject_Call(meth, args, kwds); - } - Py_DECREF(meth); - Py_DECREF(arr); - if (ret && PyArray_Check(ret)) { - return PyArray_Return((PyArrayObject *)ret); - } - else { - return ret; - } -} - -/**begin repeat - * - * #name = add, subtract, remainder, divmod, lshift, rshift, - * and, xor, or, floor_divide, true_divide# - */ -static PyObject * -gentype_@name@(PyObject *m1, PyObject *m2) -{ - return PyArray_Type.tp_as_number->nb_@name@(m1, m2); -} - -/**end repeat**/ - -#if !defined(NPY_PY3K) -/**begin repeat - * - * #name = divide# - */ -static PyObject * -gentype_@name@(PyObject *m1, PyObject *m2) -{ - return PyArray_Type.tp_as_number->nb_@name@(m1, m2); -} -/**end repeat**/ -#endif - -static PyObject * -gentype_multiply(PyObject *m1, PyObject *m2) -{ - PyObject *ret = NULL; - long repeat; - - if (!PyArray_IsScalar(m1, Generic) && - ((Py_TYPE(m1)->tp_as_number == NULL) || - (Py_TYPE(m1)->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m2 to an int and try sequence repeat */ - repeat = PyInt_AsLong(m2); - if (repeat == -1 && PyErr_Occurred()) { - return NULL; - } - ret = PySequence_Repeat(m1, (int) repeat); - } - else if (!PyArray_IsScalar(m2, Generic) && - ((Py_TYPE(m2)->tp_as_number == NULL) || - (Py_TYPE(m2)->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m1 to an int and try sequence repeat */ - repeat = PyInt_AsLong(m1); - if (repeat == -1 && PyErr_Occurred()) { - return NULL; - } - ret = PySequence_Repeat(m2, (int) repeat); - } - if (ret == NULL) { - PyErr_Clear(); /* no effect if not set */ - ret = PyArray_Type.tp_as_number->nb_multiply(m1, m2); - } - return ret; -} - -/**begin repeat - * - * #name=positive, negative, absolute, invert, int, float# - */ -static PyObject * -gentype_@name@(PyObject *m1) -{ - PyObject *arr, *ret; - - arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) { - return NULL; - } - ret = Py_TYPE(arr)->tp_as_number->nb_@name@(arr); - Py_DECREF(arr); - return ret; -} -/**end repeat**/ - -#if !defined(NPY_PY3K) -/**begin repeat - * - * #name=long, oct, hex# - */ -static PyObject * -gentype_@name@(PyObject *m1) -{ - PyObject *arr, *ret; - - arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) { - return NULL; - } - ret = Py_TYPE(arr)->tp_as_number->nb_@name@(arr); - Py_DECREF(arr); - return ret; -} -/**end repeat**/ -#endif - -static int -gentype_nonzero_number(PyObject *m1) -{ - PyObject *arr; - int ret; - - arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) { - return -1; - } -#if defined(NPY_PY3K) - ret = Py_TYPE(arr)->tp_as_number->nb_bool(arr); -#else - ret = Py_TYPE(arr)->tp_as_number->nb_nonzero(arr); -#endif - Py_DECREF(arr); - return ret; -} - -static PyObject * -gentype_str(PyObject *self) -{ - PyArrayObject *arr; - PyObject *ret; - - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; - } - ret = PyObject_Str((PyObject *)arr); - Py_DECREF(arr); - return ret; -} - - -static PyObject * -gentype_repr(PyObject *self) -{ - PyArrayObject *arr; - PyObject *ret; - - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; - } - ret = PyObject_Str((PyObject *)arr); - Py_DECREF(arr); - return ret; -} - -#if PY_VERSION_HEX >= 0x02060000 -/* - * The __format__ method for PEP 3101. - */ -static PyObject * -gentype_format(PyObject *self, PyObject *args) -{ - PyObject *format_spec; - PyObject *obj, *ret; - -#if defined(NPY_PY3K) - if (!PyArg_ParseTuple(args, "U:__format__", &format_spec)) { - return NULL; - } -#else - if (!PyArg_ParseTuple(args, "O:__format__", &format_spec)) { - return NULL; - } - - if (!PyUnicode_Check(format_spec) && !PyString_Check(format_spec)) { - PyErr_SetString(PyExc_TypeError, - "format must be a string"); - return NULL; - } -#endif - - /* - * Convert to an appropriate Python type and call its format. - * TODO: For some types, like long double, this isn't right, - * because it throws away precision. - */ - if (Py_TYPE(self) == &PyBoolArrType_Type) { - obj = PyBool_FromLong(((PyBoolScalarObject *)self)->obval); - } - else if (PyArray_IsScalar(self, Integer)) { -#if defined(NPY_PY3K) - obj = Py_TYPE(self)->tp_as_number->nb_int(self); -#else - obj = Py_TYPE(self)->tp_as_number->nb_long(self); -#endif - } - else if (PyArray_IsScalar(self, Floating)) { - obj = Py_TYPE(self)->tp_as_number->nb_float(self); - } - else if (PyArray_IsScalar(self, ComplexFloating)) { - double val[2]; - PyArray_Descr *dtype = PyArray_DescrFromScalar(self); - - if (dtype == NULL) { - return NULL; - } - if (PyArray_CastScalarDirect(self, dtype, &val[0], NPY_CDOUBLE) < 0) { - Py_DECREF(dtype); - return NULL; - } - obj = PyComplex_FromDoubles(val[0], val[1]); - Py_DECREF(dtype); - } - else { - obj = self; - Py_INCREF(obj); - } - - if (obj == NULL) { - return NULL; - } - - ret = PyObject_Format(obj, format_spec); - Py_DECREF(obj); - return ret; -} -#endif - -#ifdef FORCE_NO_LONG_DOUBLE_FORMATTING -#undef NPY_LONGDOUBLE_FMT -#define NPY_LONGDOUBLE_FMT NPY_DOUBLE_FMT -#endif - -/**begin repeat - * #name = float, double, longdouble# - * #NAME = FLOAT, DOUBLE, LONGDOUBLE# - * #type = f, d, l# - */ - -#define _FMT1 "%%.%i" NPY_@NAME@_FMT -#define _FMT2 "%%+.%i" NPY_@NAME@_FMT - -NPY_NO_EXPORT void -format_@name@(char *buf, size_t buflen, @name@ val, unsigned int prec) -{ - /* XXX: Find a correct size here for format string */ - char format[64], *res; - size_t i, cnt; - - PyOS_snprintf(format, sizeof(format), _FMT1, prec); - res = NumPyOS_ascii_format@type@(buf, buflen, format, val, 0); - if (res == NULL) { - fprintf(stderr, "Error while formatting\n"); - return; - } - - /* If nothing but digits after sign, append ".0" */ - cnt = strlen(buf); - for (i = (val < 0) ? 1 : 0; i < cnt; ++i) { - if (!isdigit(Py_CHARMASK(buf[i]))) { - break; - } - } - if (i == cnt && buflen >= cnt + 3) { - strcpy(&buf[cnt],".0"); - } -} - -static void -format_c@name@(char *buf, size_t buflen, c@name@ val, unsigned int prec) -{ - /* XXX: Find a correct size here for format string */ - char format[64]; - char *res; - - /* - * Ideally, we should handle this nan/inf stuff in NumpyOS_ascii_format* - */ -#if PY_VERSION_HEX >= 0x02070000 - if (val.real == 0.0 && npy_signbit(val.real) == 0) { -#else - if (val.real == 0.0) { -#endif - PyOS_snprintf(format, sizeof(format), _FMT1, prec); - res = NumPyOS_ascii_format@type@(buf, buflen-1, format, val.imag, 0); - if (res == NULL) { - fprintf(stderr, "Error while formatting\n"); - return; - } -#if PY_VERSION_HEX >= 0x02060000 - if (!npy_isfinite(val.imag)) { - strncat(buf, "*", 1); - } -#endif - strncat(buf, "j", 1); - } - else { - char re[64], im[64]; - if (npy_isfinite(val.real)) { - PyOS_snprintf(format, sizeof(format), _FMT1, prec); - res = NumPyOS_ascii_format@type@(re, sizeof(re), format, val.real, 0); - if (res == NULL) { - fprintf(stderr, "Error while formatting\n"); - return; - } - } else { - if (npy_isnan(val.real)) { - strcpy(re, "nan"); - } else if (val.real > 0){ - strcpy(re, "inf"); - } else { - strcpy(re, "-inf"); - } - } - - - if (npy_isfinite(val.imag)) { - PyOS_snprintf(format, sizeof(format), _FMT2, prec); - res = NumPyOS_ascii_format@type@(im, sizeof(im), format, val.imag, 0); - if (res == NULL) { - fprintf(stderr, "Error while formatting\n"); - return; - } - } else { - if (npy_isnan(val.imag)) { - strcpy(im, "+nan"); - } else if (val.imag > 0){ - strcpy(im, "+inf"); - } else { - strcpy(im, "-inf"); - } - #if PY_VERSION_HEX >= 0x02060000 - if (!npy_isfinite(val.imag)) { - strncat(im, "*", 1); - } - #endif - } - PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); - } -} - -#undef _FMT1 -#undef _FMT2 - -/**end repeat**/ - -NPY_NO_EXPORT void -format_half(char *buf, size_t buflen, npy_half val, unsigned int prec) -{ - format_float(buf, buflen, npy_half_to_float(val), prec); -} - -/* - * over-ride repr and str of array-scalar strings and unicode to - * remove NULL bytes and then call the corresponding functions - * of string and unicode. - */ - -/**begin repeat - * #name = string*2,unicode*2# - * #form = (repr,str)*2# - * #Name = String*2,Unicode*2# - * #NAME = STRING*2,UNICODE*2# - * #extra = AndSize*2,,# - * #type = char*2, Py_UNICODE*2# - */ -static PyObject * -@name@type_@form@(PyObject *self) -{ - const @type@ *dptr, *ip; - int len; - PyObject *new; - PyObject *ret; - - ip = dptr = Py@Name@_AS_@NAME@(self); - len = Py@Name@_GET_SIZE(self); - dptr += len-1; - while(len > 0 && *dptr-- == 0) { - len--; - } - new = Py@Name@_From@Name@@extra@(ip, len); - if (new == NULL) { - return PyUString_FromString(""); - } - ret = Py@Name@_Type.tp_@form@(new); - Py_DECREF(new); - return ret; -} -/**end repeat**/ - -/* The REPR values are finfo.precision + 2 */ -#define HALFPREC_REPR 5 -#define HALFPREC_STR 5 -#define FLOATPREC_REPR 8 -#define FLOATPREC_STR 6 -#define DOUBLEPREC_REPR 17 -#define DOUBLEPREC_STR 12 -#if SIZEOF_LONGDOUBLE == SIZEOF_DOUBLE -#define LONGDOUBLEPREC_REPR DOUBLEPREC_REPR -#define LONGDOUBLEPREC_STR DOUBLEPREC_STR -#else /* More than probably needed on Intel FP */ -#define LONGDOUBLEPREC_REPR 20 -#define LONGDOUBLEPREC_STR 12 -#endif - -/* - * float type str and repr - * - * These functions will return NULL if PyString creation fails. - */ - -/**begin repeat - * #name = half, float, double, longdouble# - * #Name = Half, Float, Double, LongDouble# - * #NAME = HALF, FLOAT, DOUBLE, LONGDOUBLE# - * #hascomplex = 0, 1, 1, 1# - */ -/**begin repeat1 - * #kind = str, repr# - * #KIND = STR, REPR# - */ - -#define PREC @NAME@PREC_@KIND@ - -static PyObject * -@name@type_@kind@(PyObject *self) -{ - char buf[100]; - npy_@name@ val = ((Py@Name@ScalarObject *)self)->obval; - - format_@name@(buf, sizeof(buf), val, PREC); - return PyUString_FromString(buf); -} - -#if @hascomplex@ -static PyObject * -c@name@type_@kind@(PyObject *self) -{ - char buf[202]; - c@name@ val = ((PyC@Name@ScalarObject *)self)->obval; - - format_c@name@(buf, sizeof(buf), val, PREC); - return PyUString_FromString(buf); -} -#endif - -#undef PREC - -/**end repeat1**/ -/**end repeat**/ - -/* - * float type print (control print a, where a is a float type instance) - */ -/**begin repeat - * #name = half, float, double, longdouble# - * #Name = Half, Float, Double, LongDouble# - * #NAME = HALF, FLOAT, DOUBLE, LONGDOUBLE# - * #hascomplex = 0, 1, 1, 1# - */ - -static int -@name@type_print(PyObject *v, FILE *fp, int flags) -{ - char buf[100]; - npy_@name@ val = ((Py@Name@ScalarObject *)v)->obval; - - format_@name@(buf, sizeof(buf), val, - (flags & Py_PRINT_RAW) ? @NAME@PREC_STR : @NAME@PREC_REPR); - Py_BEGIN_ALLOW_THREADS - fputs(buf, fp); - Py_END_ALLOW_THREADS - return 0; -} - -#if @hascomplex@ -static int -c@name@type_print(PyObject *v, FILE *fp, int flags) -{ - /* Size of buf: twice sizeof(real) + 2 (for the parenthesis) */ - char buf[202]; - c@name@ val = ((PyC@Name@ScalarObject *)v)->obval; - - format_c@name@(buf, sizeof(buf), val, - (flags & Py_PRINT_RAW) ? @NAME@PREC_STR : @NAME@PREC_REPR); - Py_BEGIN_ALLOW_THREADS - fputs(buf, fp); - Py_END_ALLOW_THREADS - return 0; -} -#endif - -/**end repeat**/ - - -/* - * Could improve this with a PyLong_FromLongDouble(longdouble ldval) - * but this would need some more work... - */ - -/**begin repeat - * - * #name = (int, float)*2# - * #KIND = (Long, Float)*2# - * #char = ,,c*2# - * #CHAR = ,,C*2# - * #POST = ,,.real*2# - */ -static PyObject * -@char@longdoubletype_@name@(PyObject *self) -{ - double dval; - PyObject *obj, *ret; - - dval = (double)(((Py@CHAR@LongDoubleScalarObject *)self)->obval)@POST@; - obj = Py@KIND@_FromDouble(dval); - ret = Py_TYPE(obj)->tp_as_number->nb_@name@(obj); - Py_DECREF(obj); - return ret; -} -/**end repeat**/ - -#if !defined(NPY_PY3K) - -/**begin repeat - * - * #name = (long, hex, oct)*2# - * #KIND = (Long*3)*2# - * #char = ,,,c*3# - * #CHAR = ,,,C*3# - * #POST = ,,,.real*3# - */ -static PyObject * -@char@longdoubletype_@name@(PyObject *self) -{ - double dval; - PyObject *obj, *ret; - - dval = (double)(((Py@CHAR@LongDoubleScalarObject *)self)->obval)@POST@; - obj = Py@KIND@_FromDouble(dval); - ret = Py_TYPE(obj)->tp_as_number->nb_@name@(obj); - Py_DECREF(obj); - return ret; -} -/**end repeat**/ - -#endif /* !defined(NPY_PY3K) */ - -static PyNumberMethods gentype_as_number = { - (binaryfunc)gentype_add, /*nb_add*/ - (binaryfunc)gentype_subtract, /*nb_subtract*/ - (binaryfunc)gentype_multiply, /*nb_multiply*/ -#if defined(NPY_PY3K) -#else - (binaryfunc)gentype_divide, /*nb_divide*/ -#endif - (binaryfunc)gentype_remainder, /*nb_remainder*/ - (binaryfunc)gentype_divmod, /*nb_divmod*/ - (ternaryfunc)gentype_power, /*nb_power*/ - (unaryfunc)gentype_negative, - (unaryfunc)gentype_positive, /*nb_pos*/ - (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/ - (inquiry)gentype_nonzero_number, /*nb_nonzero*/ - (unaryfunc)gentype_invert, /*nb_invert*/ - (binaryfunc)gentype_lshift, /*nb_lshift*/ - (binaryfunc)gentype_rshift, /*nb_rshift*/ - (binaryfunc)gentype_and, /*nb_and*/ - (binaryfunc)gentype_xor, /*nb_xor*/ - (binaryfunc)gentype_or, /*nb_or*/ -#if defined(NPY_PY3K) -#else - 0, /*nb_coerce*/ -#endif - (unaryfunc)gentype_int, /*nb_int*/ -#if defined(NPY_PY3K) - 0, /*nb_reserved*/ -#else - (unaryfunc)gentype_long, /*nb_long*/ -#endif - (unaryfunc)gentype_float, /*nb_float*/ -#if defined(NPY_PY3K) -#else - (unaryfunc)gentype_oct, /*nb_oct*/ - (unaryfunc)gentype_hex, /*nb_hex*/ -#endif - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ -#if defined(NPY_PY3K) -#else - 0, /*inplace_divide*/ -#endif - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/ - (binaryfunc)gentype_true_divide, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ -#if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)NULL, /*nb_index*/ -#endif -}; - - -static PyObject * -gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) -{ - PyObject *arr, *ret; - - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; - } - ret = Py_TYPE(arr)->tp_richcompare(arr, other, cmp_op); - Py_DECREF(arr); - return ret; -} - -static PyObject * -gentype_ndim_get(PyObject *NPY_UNUSED(self)) -{ - return PyInt_FromLong(0); -} - -static PyObject * -gentype_flags_get(PyObject *NPY_UNUSED(self)) -{ - return PyArray_NewFlagsObject(NULL); -} - -static PyObject * -voidtype_flags_get(PyVoidScalarObject *self) -{ - PyObject *flagobj; - flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) { - return NULL; - } - ((PyArrayFlagsObject *)flagobj)->arr = NULL; - ((PyArrayFlagsObject *)flagobj)->flags = self->flags; - return flagobj; -} - -static PyObject * -voidtype_dtypedescr_get(PyVoidScalarObject *self) -{ - Py_INCREF(self->descr); - return (PyObject *)self->descr; -} - - -static PyObject * -gentype_data_get(PyObject *self) -{ -#if defined(NPY_PY3K) - return PyMemoryView_FromObject(self); -#else - return PyBuffer_FromObject(self, 0, Py_END_OF_BUFFER); -#endif -} - - -static PyObject * -gentype_itemsize_get(PyObject *self) -{ - PyArray_Descr *typecode; - PyObject *ret; - int elsize; - - typecode = PyArray_DescrFromScalar(self); - elsize = typecode->elsize; -#ifndef Py_UNICODE_WIDE - if (typecode->type_num == NPY_UNICODE) { - elsize >>= 1; - } -#endif - ret = PyInt_FromLong((long) elsize); - Py_DECREF(typecode); - return ret; -} - -static PyObject * -gentype_size_get(PyObject *NPY_UNUSED(self)) -{ - return PyInt_FromLong(1); -} - -#if PY_VERSION_HEX >= 0x03000000 -NPY_NO_EXPORT void -gentype_struct_free(PyObject *ptr) -{ - PyArrayInterface *arrif; - PyObject *context; - - arrif = (PyArrayInterface*)PyCapsule_GetPointer(ptr, NULL); - context = (PyObject *)PyCapsule_GetContext(ptr); - Py_DECREF(context); - Py_XDECREF(arrif->descr); - _pya_free(arrif->shape); - _pya_free(arrif); -} -#else -NPY_NO_EXPORT void -gentype_struct_free(void *ptr, void *arg) -{ - PyArrayInterface *arrif = (PyArrayInterface *)ptr; - Py_DECREF((PyObject *)arg); - Py_XDECREF(arrif->descr); - _pya_free(arrif->shape); - _pya_free(arrif); -} -#endif - -static PyObject * -gentype_struct_get(PyObject *self) -{ - PyArrayObject *arr; - PyArrayInterface *inter; - PyObject *ret; - - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface)); - inter->two = 2; - inter->nd = 0; - inter->flags = arr->flags; - inter->flags &= ~(UPDATEIFCOPY | OWNDATA); - inter->flags |= NPY_NOTSWAPPED; - inter->typekind = arr->descr->kind; - inter->itemsize = arr->descr->elsize; - inter->strides = NULL; - inter->shape = NULL; - inter->data = arr->data; - inter->descr = NULL; - - ret = NpyCapsule_FromVoidPtrAndDesc(inter, arr, gentype_struct_free); - return ret; -} - -static PyObject * -gentype_priority_get(PyObject *NPY_UNUSED(self)) -{ - return PyFloat_FromDouble(NPY_SCALAR_PRIORITY); -} - -static PyObject * -gentype_shape_get(PyObject *NPY_UNUSED(self)) -{ - return PyTuple_New(0); -} - - -static PyObject * -gentype_interface_get(PyObject *self) -{ - PyArrayObject *arr; - PyObject *inter; - - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; - } - inter = PyObject_GetAttrString((PyObject *)arr, "__array_interface__"); - if (inter != NULL) { - PyDict_SetItemString(inter, "__ref", (PyObject *)arr); - } - Py_DECREF(arr); - return inter; -} - - - -static PyObject * -gentype_typedescr_get(PyObject *self) -{ - return (PyObject *)PyArray_DescrFromScalar(self); -} - - -static PyObject * -gentype_base_get(PyObject *NPY_UNUSED(self)) -{ - Py_INCREF(Py_None); - return Py_None; -} - - -static PyArray_Descr * -_realdescr_fromcomplexscalar(PyObject *self, int *typenum) -{ - if (PyArray_IsScalar(self, CDouble)) { - *typenum = PyArray_CDOUBLE; - return PyArray_DescrFromType(PyArray_DOUBLE); - } - if (PyArray_IsScalar(self, CFloat)) { - *typenum = PyArray_CFLOAT; - return PyArray_DescrFromType(PyArray_FLOAT); - } - if (PyArray_IsScalar(self, CLongDouble)) { - *typenum = PyArray_CLONGDOUBLE; - return PyArray_DescrFromType(PyArray_LONGDOUBLE); - } - return NULL; -} - -static PyObject * -gentype_real_get(PyObject *self) -{ - PyArray_Descr *typecode; - PyObject *ret; - int typenum; - - if (PyArray_IsScalar(self, ComplexFloating)) { - void *ptr; - typecode = _realdescr_fromcomplexscalar(self, &typenum); - ptr = scalar_value(self, NULL); - ret = PyArray_Scalar(ptr, typecode, NULL); - Py_DECREF(typecode); - return ret; - } - else if (PyArray_IsScalar(self, Object)) { - PyObject *obj = ((PyObjectScalarObject *)self)->obval; - ret = PyObject_GetAttrString(obj, "real"); - if (ret != NULL) { - return ret; - } - PyErr_Clear(); - } - Py_INCREF(self); - return (PyObject *)self; -} - -static PyObject * -gentype_imag_get(PyObject *self) -{ - PyArray_Descr *typecode=NULL; - PyObject *ret; - int typenum; - - if (PyArray_IsScalar(self, ComplexFloating)) { - char *ptr; - typecode = _realdescr_fromcomplexscalar(self, &typenum); - ptr = (char *)scalar_value(self, NULL); - ret = PyArray_Scalar(ptr + typecode->elsize, typecode, NULL); - } - else if (PyArray_IsScalar(self, Object)) { - PyObject *obj = ((PyObjectScalarObject *)self)->obval; - PyArray_Descr *newtype; - ret = PyObject_GetAttrString(obj, "imag"); - if (ret == NULL) { - PyErr_Clear(); - obj = PyInt_FromLong(0); - newtype = PyArray_DescrFromType(PyArray_OBJECT); - ret = PyArray_Scalar((char *)&obj, newtype, NULL); - Py_DECREF(newtype); - Py_DECREF(obj); - } - } - else { - char *temp; - int elsize; - typecode = PyArray_DescrFromScalar(self); - elsize = typecode->elsize; - temp = PyDataMem_NEW(elsize); - memset(temp, '\0', elsize); - ret = PyArray_Scalar(temp, typecode, NULL); - PyDataMem_FREE(temp); - } - - Py_XDECREF(typecode); - return ret; -} - -static PyObject * -gentype_flat_get(PyObject *self) -{ - PyObject *ret, *arr; - - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; - } - ret = PyArray_IterNew(arr); - Py_DECREF(arr); - return ret; -} - - -static PyObject * -gentype_transpose_get(PyObject *self) -{ - Py_INCREF(self); - return self; -} - - -static PyGetSetDef gentype_getsets[] = { - {"ndim", - (getter)gentype_ndim_get, - (setter) 0, - "number of array dimensions", - NULL}, - {"flags", - (getter)gentype_flags_get, - (setter)0, - "integer value of flags", - NULL}, - {"shape", - (getter)gentype_shape_get, - (setter)0, - "tuple of array dimensions", - NULL}, - {"strides", - (getter)gentype_shape_get, - (setter) 0, - "tuple of bytes steps in each dimension", - NULL}, - {"data", - (getter)gentype_data_get, - (setter) 0, - "pointer to start of data", - NULL}, - {"itemsize", - (getter)gentype_itemsize_get, - (setter)0, - "length of one element in bytes", - NULL}, - {"size", - (getter)gentype_size_get, - (setter)0, - "number of elements in the gentype", - NULL}, - {"nbytes", - (getter)gentype_itemsize_get, - (setter)0, - "length of item in bytes", - NULL}, - {"base", - (getter)gentype_base_get, - (setter)0, - "base object", - NULL}, - {"dtype", - (getter)gentype_typedescr_get, - NULL, - "get array data-descriptor", - NULL}, - {"real", - (getter)gentype_real_get, - (setter)0, - "real part of scalar", - NULL}, - {"imag", - (getter)gentype_imag_get, - (setter)0, - "imaginary part of scalar", - NULL}, - {"flat", - (getter)gentype_flat_get, - (setter)0, - "a 1-d view of scalar", - NULL}, - {"T", - (getter)gentype_transpose_get, - (setter)0, - "transpose", - NULL}, - {"__array_interface__", - (getter)gentype_interface_get, - NULL, - "Array protocol: Python side", - NULL}, - {"__array_struct__", - (getter)gentype_struct_get, - NULL, - "Array protocol: struct", - NULL}, - {"__array_priority__", - (getter)gentype_priority_get, - NULL, - "Array priority.", - NULL}, - {NULL, NULL, NULL, NULL, NULL} /* Sentinel */ -}; - - -/* 0-dim array from scalar object */ - -static char doc_getarray[] = "sc.__array__(|type) return 0-dim array"; - -static PyObject * -gentype_getarray(PyObject *scalar, PyObject *args) -{ - PyArray_Descr *outcode=NULL; - PyObject *ret; - - if (!PyArg_ParseTuple(args, "|O&", &PyArray_DescrConverter, - &outcode)) { - Py_XDECREF(outcode); - return NULL; - } - ret = PyArray_FromScalar(scalar, outcode); - return ret; -} - -static char doc_sc_wraparray[] = "sc.__array_wrap__(obj) return scalar from array"; - -static PyObject * -gentype_wraparray(PyObject *NPY_UNUSED(scalar), PyObject *args) -{ - PyObject *arr; - - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument."); - return NULL; - } - arr = PyTuple_GET_ITEM(args, 0); - if (!PyArray_Check(arr)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); - return NULL; - } - - return PyArray_Scalar(PyArray_DATA(arr), PyArray_DESCR(arr), arr); -} - -/* - * These gentype_* functions do not take keyword arguments. - * The proper flag is METH_VARARGS. - */ -/**begin repeat - * - * #name = tolist, item, tostring, astype, copy, __deepcopy__, searchsorted, - * view, swapaxes, conj, conjugate, nonzero, flatten, ravel, fill, - * transpose, newbyteorder# - */ -static PyObject * -gentype_@name@(PyObject *self, PyObject *args) -{ - return gentype_generic_method(self, args, NULL, "@name@"); -} -/**end repeat**/ - -static PyObject * -gentype_itemset(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) -{ - PyErr_SetString(PyExc_ValueError, "array-scalars are immutable"); - return NULL; -} - -static PyObject * -gentype_squeeze(PyObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - Py_INCREF(self); - return self; -} - -static Py_ssize_t -gentype_getreadbuf(PyObject *, Py_ssize_t, void **); - -static PyObject * -gentype_byteswap(PyObject *self, PyObject *args) -{ - Bool inplace=FALSE; - - if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) { - return NULL; - } - if (inplace) { - PyErr_SetString(PyExc_ValueError, - "cannot byteswap a scalar in-place"); - return NULL; - } - else { - /* get the data, copyswap it and pass it to a new Array scalar */ - char *data; - PyArray_Descr *descr; - PyObject *new; - char *newmem; - - gentype_getreadbuf(self, 0, (void **)&data); - descr = PyArray_DescrFromScalar(self); - newmem = _pya_malloc(descr->elsize); - if (newmem == NULL) { - Py_DECREF(descr); - return PyErr_NoMemory(); - } - else { - descr->f->copyswap(newmem, data, 1, NULL); - } - new = PyArray_Scalar(newmem, descr, NULL); - _pya_free(newmem); - Py_DECREF(descr); - return new; - } -} - - -/* - * These gentype_* functions take keyword arguments. - * The proper flag is METH_VARARGS | METH_KEYWORDS. - */ -/**begin repeat - * - * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, - * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, - * round, argmax, argmin, max, min, ptp, any, all, resize, reshape, - * choose# - */ -static PyObject * -gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds) -{ - return gentype_generic_method(self, args, kwds, "@name@"); -} -/**end repeat**/ - -static PyObject * -voidtype_getfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *ret, *newargs; - - newargs = PyTuple_GetSlice(args, 0, 2); - if (newargs == NULL) { - return NULL; - } - ret = gentype_generic_method((PyObject *)self, newargs, kwds, "getfield"); - Py_DECREF(newargs); - if (!ret) { - return ret; - } - if (PyArray_IsScalar(ret, Generic) && \ - (!PyArray_IsScalar(ret, Void))) { - PyArray_Descr *new; - void *ptr; - if (!PyArray_ISNBO(self->descr->byteorder)) { - new = PyArray_DescrFromScalar(ret); - ptr = scalar_value(ret, new); - byte_swap_vector(ptr, 1, new->elsize); - Py_DECREF(new); - } - } - return ret; -} - -static PyObject * -gentype_setfield(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) -{ - PyErr_SetString(PyExc_TypeError, - "Can't set fields in a non-void array scalar."); - return NULL; -} - -static PyObject * -voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) -{ - PyArray_Descr *typecode = NULL; - int offset = 0; - PyObject *value, *src; - int mysize; - char *dptr; - static char *kwlist[] = {"value", "dtype", "offset", 0}; - - if ((self->flags & WRITEABLE) != WRITEABLE) { - PyErr_SetString(PyExc_RuntimeError, "Can't write to memory"); - return NULL; - } - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist, - &value, - PyArray_DescrConverter, - &typecode, &offset)) { - Py_XDECREF(typecode); - return NULL; - } - - mysize = Py_SIZE(self); - - if (offset < 0 || (offset + typecode->elsize) > mysize) { - PyErr_Format(PyExc_ValueError, - "Need 0 <= offset <= %d for requested type " \ - "but received offset = %d", - mysize-typecode->elsize, offset); - Py_DECREF(typecode); - return NULL; - } - - dptr = self->obval + offset; - - if (typecode->type_num == PyArray_OBJECT) { - PyObject *temp; - Py_INCREF(value); - NPY_COPY_PYOBJECT_PTR(&temp, dptr); - Py_XDECREF(temp); - NPY_COPY_PYOBJECT_PTR(dptr, &value); - Py_DECREF(typecode); - } - else { - /* Copy data from value to correct place in dptr */ - src = PyArray_FromAny(value, typecode, 0, 0, CARRAY, NULL); - if (src == NULL) { - return NULL; - } - typecode->f->copyswap(dptr, PyArray_DATA(src), - !PyArray_ISNBO(self->descr->byteorder), - src); - Py_DECREF(src); - } - Py_INCREF(Py_None); - return Py_None; -} - - -static PyObject * -gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args)) -{ - PyObject *ret = NULL, *obj = NULL, *mod = NULL; - const char *buffer; - Py_ssize_t buflen; - - /* Return a tuple of (callable object, arguments) */ - ret = PyTuple_New(2); - if (ret == NULL) { - return NULL; - } -#if defined(NPY_PY3K) - if (PyArray_IsScalar(self, Unicode)) { - /* Unicode on Python 3 does not expose the buffer interface */ - buffer = PyUnicode_AS_DATA(self); - buflen = PyUnicode_GET_DATA_SIZE(self); - } - else -#endif - if (PyObject_AsReadBuffer(self, (const void **)&buffer, &buflen)<0) { - Py_DECREF(ret); - return NULL; - } - mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) { - return NULL; - } - obj = PyObject_GetAttrString(mod, "scalar"); - Py_DECREF(mod); - if (obj == NULL) { - return NULL; - } - PyTuple_SET_ITEM(ret, 0, obj); - obj = PyObject_GetAttrString((PyObject *)self, "dtype"); - if (PyArray_IsScalar(self, Object)) { - mod = ((PyObjectScalarObject *)self)->obval; - PyTuple_SET_ITEM(ret, 1, Py_BuildValue("NO", obj, mod)); - } - else { -#ifndef Py_UNICODE_WIDE - /* - * We need to expand the buffer so that we always write - * UCS4 to disk for pickle of unicode scalars. - * - * This could be in a unicode_reduce function, but - * that would require re-factoring. - */ - int alloc = 0; - char *tmp; - int newlen; - - if (PyArray_IsScalar(self, Unicode)) { - tmp = _pya_malloc(buflen*2); - if (tmp == NULL) { - Py_DECREF(ret); - return PyErr_NoMemory(); - } - alloc = 1; - newlen = PyUCS2Buffer_AsUCS4((Py_UNICODE *)buffer, - (PyArray_UCS4 *)tmp, - buflen / 2, buflen / 2); - buflen = newlen*4; - buffer = tmp; - } -#endif - mod = PyBytes_FromStringAndSize(buffer, buflen); - if (mod == NULL) { - Py_DECREF(ret); -#ifndef Py_UNICODE_WIDE - ret = NULL; - goto fail; -#else - return NULL; -#endif - } - PyTuple_SET_ITEM(ret, 1, - Py_BuildValue("NN", obj, mod)); -#ifndef Py_UNICODE_WIDE -fail: - if (alloc) _pya_free((char *)buffer); -#endif - } - return ret; -} - -/* ignores everything */ -static PyObject * -gentype_setstate(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) -{ - Py_INCREF(Py_None); - return (Py_None); -} - -static PyObject * -gentype_dump(PyObject *self, PyObject *args) -{ - PyObject *file = NULL; - int ret; - - if (!PyArg_ParseTuple(args, "O", &file)) { - return NULL; - } - ret = PyArray_Dump(self, file, 2); - if (ret < 0) { - return NULL; - } - Py_INCREF(Py_None); - return Py_None; -} - -static PyObject * -gentype_dumps(PyObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - return PyArray_Dumps(self, 2); -} - - -/* setting flags cannot be done for scalars */ -static PyObject * -gentype_setflags(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), - PyObject *NPY_UNUSED(kwds)) -{ - Py_INCREF(Py_None); - return Py_None; -} - -/* casting complex numbers (that don't inherit from Python complex) - * to Python complex */ - -/**begin repeat - * #name=cfloat,clongdouble# - * #Name=CFloat,CLongDouble# - */ -static PyObject * -@name@_complex(PyObject *self, PyObject *NPY_UNUSED(args), - PyObject *NPY_UNUSED(kwds)) -{ - return PyComplex_FromDoubles(PyArrayScalar_VAL(self, @Name@).real, - PyArrayScalar_VAL(self, @Name@).imag); -} -/**end repeat**/ - -/* need to fill in doc-strings for these methods on import -- copy from - array docstrings -*/ -static PyMethodDef gentype_methods[] = { - {"tolist", - (PyCFunction)gentype_tolist, - METH_VARARGS, NULL}, - {"item", - (PyCFunction)gentype_item, - METH_VARARGS, NULL}, - {"itemset", - (PyCFunction)gentype_itemset, - METH_VARARGS, NULL}, - {"tofile", - (PyCFunction)gentype_tofile, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"tostring", - (PyCFunction)gentype_tostring, - METH_VARARGS, NULL}, - {"byteswap", - (PyCFunction)gentype_byteswap, - METH_VARARGS, NULL}, - {"astype", - (PyCFunction)gentype_astype, - METH_VARARGS, NULL}, - {"getfield", - (PyCFunction)gentype_getfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"setfield", - (PyCFunction)gentype_setfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"copy", - (PyCFunction)gentype_copy, - METH_VARARGS, NULL}, - {"resize", - (PyCFunction)gentype_resize, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"__array__", - (PyCFunction)gentype_getarray, - METH_VARARGS, doc_getarray}, - {"__array_wrap__", - (PyCFunction)gentype_wraparray, - METH_VARARGS, doc_sc_wraparray}, - - /* for the copy module */ - {"__copy__", - (PyCFunction)gentype_copy, - METH_VARARGS, NULL}, - {"__deepcopy__", - (PyCFunction)gentype___deepcopy__, - METH_VARARGS, NULL}, - - {"__reduce__", - (PyCFunction) gentype_reduce, - METH_VARARGS, NULL}, - /* For consistency does nothing */ - {"__setstate__", - (PyCFunction) gentype_setstate, - METH_VARARGS, NULL}, - - {"dumps", - (PyCFunction) gentype_dumps, - METH_VARARGS, NULL}, - {"dump", - (PyCFunction) gentype_dump, - METH_VARARGS, NULL}, - - /* Methods for array */ - {"fill", - (PyCFunction)gentype_fill, - METH_VARARGS, NULL}, - {"transpose", - (PyCFunction)gentype_transpose, - METH_VARARGS, NULL}, - {"take", - (PyCFunction)gentype_take, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"put", - (PyCFunction)gentype_put, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"repeat", - (PyCFunction)gentype_repeat, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"choose", - (PyCFunction)gentype_choose, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"sort", - (PyCFunction)gentype_sort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"argsort", - (PyCFunction)gentype_argsort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"searchsorted", - (PyCFunction)gentype_searchsorted, - METH_VARARGS, NULL}, - {"argmax", - (PyCFunction)gentype_argmax, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"argmin", - (PyCFunction)gentype_argmin, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"reshape", - (PyCFunction)gentype_reshape, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"squeeze", - (PyCFunction)gentype_squeeze, - METH_VARARGS, NULL}, - {"view", - (PyCFunction)gentype_view, - METH_VARARGS, NULL}, - {"swapaxes", - (PyCFunction)gentype_swapaxes, - METH_VARARGS, NULL}, - {"max", - (PyCFunction)gentype_max, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"min", - (PyCFunction)gentype_min, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"ptp", - (PyCFunction)gentype_ptp, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"mean", - (PyCFunction)gentype_mean, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"trace", - (PyCFunction)gentype_trace, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"diagonal", - (PyCFunction)gentype_diagonal, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"clip", - (PyCFunction)gentype_clip, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"conj", - (PyCFunction)gentype_conj, - METH_VARARGS, NULL}, - {"conjugate", - (PyCFunction)gentype_conjugate, - METH_VARARGS, NULL}, - {"nonzero", - (PyCFunction)gentype_nonzero, - METH_VARARGS, NULL}, - {"std", - (PyCFunction)gentype_std, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"var", - (PyCFunction)gentype_var, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"sum", - (PyCFunction)gentype_sum, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"cumsum", - (PyCFunction)gentype_cumsum, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"prod", - (PyCFunction)gentype_prod, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"cumprod", - (PyCFunction)gentype_cumprod, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"all", - (PyCFunction)gentype_all, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"any", - (PyCFunction)gentype_any, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"compress", - (PyCFunction)gentype_compress, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"flatten", - (PyCFunction)gentype_flatten, - METH_VARARGS, NULL}, - {"ravel", - (PyCFunction)gentype_ravel, - METH_VARARGS, NULL}, - {"round", - (PyCFunction)gentype_round, - METH_VARARGS | METH_KEYWORDS, NULL}, -#if defined(NPY_PY3K) - /* Hook for the round() builtin */ - {"__round__", - (PyCFunction)gentype_round, - METH_VARARGS | METH_KEYWORDS, NULL}, -#endif -#if PY_VERSION_HEX >= 0x02060000 - /* For the format function */ - {"__format__", - gentype_format, - METH_VARARGS, - "NumPy array scalar formatter"}, -#endif - {"setflags", - (PyCFunction)gentype_setflags, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"newbyteorder", - (PyCFunction)gentype_newbyteorder, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - - -static PyGetSetDef voidtype_getsets[] = { - {"flags", - (getter)voidtype_flags_get, - (setter)0, - "integer value of flags", - NULL}, - {"dtype", - (getter)voidtype_dtypedescr_get, - (setter)0, - "dtype object", - NULL}, - {NULL, NULL, NULL, NULL, NULL} -}; - -static PyMethodDef voidtype_methods[] = { - {"getfield", - (PyCFunction)voidtype_getfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"setfield", - (PyCFunction)voidtype_setfield, - METH_VARARGS | METH_KEYWORDS, NULL}, - {NULL, NULL, 0, NULL} -}; - -/**begin repeat - * #name=cfloat,clongdouble# - */ -static PyMethodDef @name@type_methods[] = { - {"__complex__", - (PyCFunction)@name@_complex, - METH_VARARGS | METH_KEYWORDS, NULL}, - {NULL, NULL, 0, NULL} -}; -/**end repeat**/ - -/************* As_mapping functions for void array scalar ************/ - -static Py_ssize_t -voidtype_length(PyVoidScalarObject *self) -{ - if (!self->descr->names) { - return 0; - } - else { /* return the number of fields */ - return (Py_ssize_t) PyTuple_GET_SIZE(self->descr->names); - } -} - -static PyObject * -voidtype_item(PyVoidScalarObject *self, Py_ssize_t n) -{ - intp m; - PyObject *flist=NULL, *fieldinfo; - - if (!(PyDescr_HASFIELDS(self->descr))) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return NULL; - } - flist = self->descr->names; - m = PyTuple_GET_SIZE(flist); - if (n < 0) { - n += m; - } - if (n < 0 || n >= m) { - PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); - return NULL; - } - fieldinfo = PyDict_GetItem(self->descr->fields, - PyTuple_GET_ITEM(flist, n)); - return voidtype_getfield(self, fieldinfo, NULL); -} - - -/* get field by name or number */ -static PyObject * -voidtype_subscript(PyVoidScalarObject *self, PyObject *ind) -{ - intp n; - PyObject *fieldinfo; - - if (!(PyDescr_HASFIELDS(self->descr))) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return NULL; - } - -#if defined(NPY_PY3K) - if (PyUString_Check(ind)) { -#else - if (PyBytes_Check(ind) || PyUnicode_Check(ind)) { -#endif - /* look up in fields */ - fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) { - goto fail; - } - return voidtype_getfield(self, fieldinfo, NULL); - } - - /* try to convert it to a number */ - n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) { - goto fail; - } - return voidtype_item(self, (Py_ssize_t)n); - -fail: - PyErr_SetString(PyExc_IndexError, "invalid index"); - return NULL; -} - -static int -voidtype_ass_item(PyVoidScalarObject *self, Py_ssize_t n, PyObject *val) -{ - intp m; - PyObject *flist=NULL, *fieldinfo, *newtup; - PyObject *res; - - if (!(PyDescr_HASFIELDS(self->descr))) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return -1; - } - - flist = self->descr->names; - m = PyTuple_GET_SIZE(flist); - if (n < 0) { - n += m; - } - if (n < 0 || n >= m) { - goto fail; - } - fieldinfo = PyDict_GetItem(self->descr->fields, - PyTuple_GET_ITEM(flist, n)); - newtup = Py_BuildValue("(OOO)", val, - PyTuple_GET_ITEM(fieldinfo, 0), - PyTuple_GET_ITEM(fieldinfo, 1)); - res = voidtype_setfield(self, newtup, NULL); - Py_DECREF(newtup); - if (!res) { - return -1; - } - Py_DECREF(res); - return 0; - -fail: - PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); - return -1; -} - -static int -voidtype_ass_subscript(PyVoidScalarObject *self, PyObject *ind, PyObject *val) -{ - intp n; - char *msg = "invalid index"; - PyObject *fieldinfo, *newtup; - PyObject *res; - - if (!PyDescr_HASFIELDS(self->descr)) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return -1; - } - -#if defined(NPY_PY3K) - if (PyUString_Check(ind)) { -#else - if (PyBytes_Check(ind) || PyUnicode_Check(ind)) { -#endif - /* look up in fields */ - fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) { - goto fail; - } - newtup = Py_BuildValue("(OOO)", val, - PyTuple_GET_ITEM(fieldinfo, 0), - PyTuple_GET_ITEM(fieldinfo, 1)); - res = voidtype_setfield(self, newtup, NULL); - Py_DECREF(newtup); - if (!res) { - return -1; - } - Py_DECREF(res); - return 0; - } - - /* try to convert it to a number */ - n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) { - goto fail; - } - return voidtype_ass_item(self, (Py_ssize_t)n, val); - -fail: - PyErr_SetString(PyExc_IndexError, msg); - return -1; -} - -static PyMappingMethods voidtype_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*mp_length*/ -#else - (inquiry)voidtype_length, /*mp_length*/ -#endif - (binaryfunc)voidtype_subscript, /*mp_subscript*/ - (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/ -}; - - -static PySequenceMethods voidtype_as_sequence = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (ssizeargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/ -#else - (inquiry)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (intargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (intobjargproc)voidtype_ass_item, /*sq_ass_item*/ -#endif - 0, /* ssq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ -}; - - -static Py_ssize_t -gentype_getreadbuf(PyObject *self, Py_ssize_t segment, void **ptrptr) -{ - int numbytes; - PyArray_Descr *outcode; - - if (segment != 0) { - PyErr_SetString(PyExc_SystemError, - "Accessing non-existent array segment"); - return -1; - } - - outcode = PyArray_DescrFromScalar(self); - numbytes = outcode->elsize; - *ptrptr = (void *)scalar_value(self, outcode); - -#ifndef Py_UNICODE_WIDE - if (outcode->type_num == NPY_UNICODE) { - numbytes >>= 1; - } -#endif - Py_DECREF(outcode); - return numbytes; -} - -static Py_ssize_t -gentype_getsegcount(PyObject *self, Py_ssize_t *lenp) -{ - PyArray_Descr *outcode; - - outcode = PyArray_DescrFromScalar(self); - if (lenp) { - *lenp = outcode->elsize; -#ifndef Py_UNICODE_WIDE - if (outcode->type_num == NPY_UNICODE) { - *lenp >>= 1; - } -#endif - } - Py_DECREF(outcode); - return 1; -} - -static Py_ssize_t -gentype_getcharbuf(PyObject *self, Py_ssize_t segment, constchar **ptrptr) -{ - if (PyArray_IsScalar(self, String) || - PyArray_IsScalar(self, Unicode)) { - return gentype_getreadbuf(self, segment, (void **)ptrptr); - } - else { - PyErr_SetString(PyExc_TypeError, - "Non-character array cannot be interpreted "\ - "as character buffer."); - return -1; - } -} - -#if PY_VERSION_HEX >= 0x02060000 - -static int -gentype_getbuffer(PyObject *self, Py_buffer *view, int flags) -{ - Py_ssize_t len; - void *buf; - - /* FIXME: XXX: the format is not implemented! -- this needs more work */ - - len = gentype_getreadbuf(self, 0, &buf); - return PyBuffer_FillInfo(view, self, buf, len, 1, flags); -} - -/* releasebuffer is not needed */ - -#endif - -static PyBufferProcs gentype_as_buffer = { -#if !defined(NPY_PY3K) - gentype_getreadbuf, /* bf_getreadbuffer*/ - NULL, /* bf_getwritebuffer*/ - gentype_getsegcount, /* bf_getsegcount*/ - gentype_getcharbuf, /* bf_getcharbuffer*/ -#endif -#if PY_VERSION_HEX >= 0x02060000 - gentype_getbuffer, /* bf_getbuffer */ - NULL, /* bf_releasebuffer */ -#endif -}; - - -#if defined(NPY_PY3K) -#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE -#define LEAFFLAGS Py_TPFLAGS_DEFAULT -#else -#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES -#define LEAFFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES -#endif - -NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.generic", /* tp_name*/ - sizeof(PyObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ - /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - -static void -void_dealloc(PyVoidScalarObject *v) -{ - if (v->flags & OWNDATA) { - PyDataMem_FREE(v->obval); - } - Py_XDECREF(v->descr); - Py_XDECREF(v->base); - Py_TYPE(v)->tp_free(v); -} - -static void -object_arrtype_dealloc(PyObject *v) -{ - Py_XDECREF(((PyObjectScalarObject *)v)->obval); - Py_TYPE(v)->tp_free(v); -} - -/* - * string and unicode inherit from Python Type first and so GET_ITEM - * is different to get to the Python Type. - * - * ok is a work-around for a bug in complex_new that doesn't allocate - * memory from the sub-types memory allocator. - */ - -#define _WORK(num) \ - if (type->tp_bases && (PyTuple_GET_SIZE(type->tp_bases)==2)) { \ - PyTypeObject *sup; \ - /* We are inheriting from a Python type as well so \ - give it first dibs on conversion */ \ - sup = (PyTypeObject *)PyTuple_GET_ITEM(type->tp_bases, num); \ - robj = sup->tp_new(type, args, kwds); \ - if (robj != NULL) goto finish; \ - if (PyTuple_GET_SIZE(args)!=1) return NULL; \ - PyErr_Clear(); \ - /* now do default conversion */ \ - } - -#define _WORK1 _WORK(1) -#define _WORKz _WORK(0) -#define _WORK0 - -/**begin repeat - * #name = byte, short, int, long, longlong, ubyte, ushort, uint, ulong, - * ulonglong, half, float, double, longdouble, cfloat, cdouble, - * clongdouble, string, unicode, object, datetime, timedelta# - * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, - * ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, - * CLONGDOUBLE, STRING, UNICODE, OBJECT, DATETIME, TIMEDELTA# - * #work = 0,0,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,z,z,0,0,0# - * #default = 0*17,1*2,2,0*2# - */ - -#define _NPY_UNUSED2_1 -#define _NPY_UNUSED2_z -#define _NPY_UNUSED2_0 NPY_UNUSED -#define _NPY_UNUSED1_0 -#define _NPY_UNUSED1_1 -#define _NPY_UNUSED1_2 NPY_UNUSED - -static PyObject * -@name@_arrtype_new(PyTypeObject *_NPY_UNUSED1_@default@(type), PyObject *args, PyObject *_NPY_UNUSED2_@work@(kwds)) -{ - PyObject *obj = NULL; - PyObject *robj; - PyObject *arr; - PyArray_Descr *typecode = NULL; -#if !(@default@ == 2) - int itemsize; - void *dest, *src; -#endif - - /* - * allow base-class (if any) to do conversion - * If successful, this will jump to finish: - */ - _WORK@work@ - - if (!PyArg_ParseTuple(args, "|O", &obj)) { - return NULL; - } - typecode = PyArray_DescrFromType(PyArray_@TYPE@); - /* - * typecode is new reference and stolen by - * PyArray_FromAny but not PyArray_Scalar - */ - if (obj == NULL) { -#if @default@ == 0 - char *mem = malloc(sizeof(npy_@name@)); - - memset(mem, 0, sizeof(npy_@name@)); - robj = PyArray_Scalar(mem, typecode, NULL); - free(mem); -#elif @default@ == 1 - robj = PyArray_Scalar(NULL, typecode, NULL); -#elif @default@ == 2 - Py_INCREF(Py_None); - robj = Py_None; -#endif - Py_DECREF(typecode); - goto finish; - } - - /* - * It is expected at this point that robj is a PyArrayScalar - * (even for Object Data Type) - */ - arr = PyArray_FromAny(obj, typecode, 0, 0, FORCECAST, NULL); - if ((arr == NULL) || (PyArray_NDIM(arr) > 0)) { - return arr; - } - /* 0-d array */ - robj = PyArray_ToScalar(PyArray_DATA(arr), (NPY_AO *)arr); - Py_DECREF(arr); - -finish: - /* - * In OBJECT case, robj is no longer a - * PyArrayScalar at this point but the - * remaining code assumes it is - */ -#if @default@ == 2 - return robj; -#else - /* Normal return */ - if ((robj == NULL) || (Py_TYPE(robj) == type)) { - return robj; - } - - /* - * This return path occurs when the requested type is not created - * but another scalar object is created instead (i.e. when - * the base-class does the conversion in _WORK macro) - */ - - /* Need to allocate new type and copy data-area over */ - if (type->tp_itemsize) { - itemsize = PyBytes_GET_SIZE(robj); - } - else { - itemsize = 0; - } - obj = type->tp_alloc(type, itemsize); - if (obj == NULL) { - Py_DECREF(robj); - return NULL; - } - /* typecode will be NULL */ - typecode = PyArray_DescrFromType(PyArray_@TYPE@); - dest = scalar_value(obj, typecode); - src = scalar_value(robj, typecode); - Py_DECREF(typecode); -#if @default@ == 0 - *((npy_@name@ *)dest) = *((npy_@name@ *)src); -#elif @default@ == 1 /* unicode and strings */ - if (itemsize == 0) { /* unicode */ - itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE); - } - memcpy(dest, src, itemsize); - /* @default@ == 2 won't get here */ -#endif - Py_DECREF(robj); - return obj; -#endif -} -/**end repeat**/ - -#undef _WORK1 -#undef _WORKz -#undef _WORK0 -#undef _WORK - -/* bool->tp_new only returns Py_True or Py_False */ -static PyObject * -bool_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *NPY_UNUSED(kwds)) -{ - PyObject *obj = NULL; - PyObject *arr; - - if (!PyArg_ParseTuple(args, "|O", &obj)) { - return NULL; - } - if (obj == NULL) { - PyArrayScalar_RETURN_FALSE; - } - if (obj == Py_False) { - PyArrayScalar_RETURN_FALSE; - } - if (obj == Py_True) { - PyArrayScalar_RETURN_TRUE; - } - arr = PyArray_FROM_OTF(obj, PyArray_BOOL, FORCECAST); - if (arr && 0 == PyArray_NDIM(arr)) { - Bool val = *((Bool *)PyArray_DATA(arr)); - Py_DECREF(arr); - PyArrayScalar_RETURN_BOOL_FROM_LONG(val); - } - return PyArray_Return((PyArrayObject *)arr); -} - -static PyObject * -bool_arrtype_and(PyObject *a, PyObject *b) -{ - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { - PyArrayScalar_RETURN_BOOL_FROM_LONG - ((a == PyArrayScalar_True)&(b == PyArrayScalar_True)); - } - return PyGenericArrType_Type.tp_as_number->nb_and(a, b); -} - -static PyObject * -bool_arrtype_or(PyObject *a, PyObject *b) -{ - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { - PyArrayScalar_RETURN_BOOL_FROM_LONG - ((a == PyArrayScalar_True)|(b == PyArrayScalar_True)); - } - return PyGenericArrType_Type.tp_as_number->nb_or(a, b); -} - -static PyObject * -bool_arrtype_xor(PyObject *a, PyObject *b) -{ - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { - PyArrayScalar_RETURN_BOOL_FROM_LONG - ((a == PyArrayScalar_True)^(b == PyArrayScalar_True)); - } - return PyGenericArrType_Type.tp_as_number->nb_xor(a, b); -} - -static int -bool_arrtype_nonzero(PyObject *a) -{ - return a == PyArrayScalar_True; -} - -#if PY_VERSION_HEX >= 0x02050000 -/**begin repeat - * #name = byte, short, int, long, ubyte, ushort, longlong, uint, ulong, - * ulonglong# - * #Name = Byte, Short, Int, Long, UByte, UShort, LongLong, UInt, ULong, - * ULongLong# - * #type = PyInt_FromLong*6, PyLong_FromLongLong*1, PyLong_FromUnsignedLong*2, - * PyLong_FromUnsignedLongLong# - */ -static PyNumberMethods @name@_arrtype_as_number; -static PyObject * -@name@_index(PyObject *self) -{ - return @type@(PyArrayScalar_VAL(self, @Name@)); -} -/**end repeat**/ - -static PyObject * -bool_index(PyObject *a) -{ - return PyInt_FromLong(PyArrayScalar_VAL(a, Bool)); -} -#endif - -/* Arithmetic methods -- only so we can override &, |, ^. */ -NPY_NO_EXPORT PyNumberMethods bool_arrtype_as_number = { - 0, /* nb_add */ - 0, /* nb_subtract */ - 0, /* nb_multiply */ -#if defined(NPY_PY3K) -#else - 0, /* nb_divide */ -#endif - 0, /* nb_remainder */ - 0, /* nb_divmod */ - 0, /* nb_power */ - 0, /* nb_negative */ - 0, /* nb_positive */ - 0, /* nb_absolute */ - (inquiry)bool_arrtype_nonzero, /* nb_nonzero / nb_bool */ - 0, /* nb_invert */ - 0, /* nb_lshift */ - 0, /* nb_rshift */ - (binaryfunc)bool_arrtype_and, /* nb_and */ - (binaryfunc)bool_arrtype_xor, /* nb_xor */ - (binaryfunc)bool_arrtype_or, /* nb_or */ -#if defined(NPY_PY3K) -#else - 0, /* nb_coerce */ -#endif - 0, /* nb_int */ -#if defined(NPY_PY3K) - 0, /* nb_reserved */ -#else - 0, /* nb_long */ -#endif - 0, /* nb_float */ -#if defined(NPY_PY3K) -#else - 0, /* nb_oct */ - 0, /* nb_hex */ -#endif - /* Added in release 2.0 */ - 0, /* nb_inplace_add */ - 0, /* nb_inplace_subtract */ - 0, /* nb_inplace_multiply */ -#if defined(NPY_PY3K) -#else - 0, /* nb_inplace_divide */ -#endif - 0, /* nb_inplace_remainder */ - 0, /* nb_inplace_power */ - 0, /* nb_inplace_lshift */ - 0, /* nb_inplace_rshift */ - 0, /* nb_inplace_and */ - 0, /* nb_inplace_xor */ - 0, /* nb_inplace_or */ - /* Added in release 2.2 */ - /* The following require the Py_TPFLAGS_HAVE_CLASS flag */ - 0, /* nb_floor_divide */ - 0, /* nb_true_divide */ - 0, /* nb_inplace_floor_divide */ - 0, /* nb_inplace_true_divide */ - /* Added in release 2.5 */ -#if PY_VERSION_HEX >= 0x02050000 - 0, /* nb_index */ -#endif -}; - -static PyObject * -void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds)) -{ - PyObject *obj, *arr; - ulonglong memu = 1; - PyObject *new = NULL; - char *destptr; - - if (!PyArg_ParseTuple(args, "O", &obj)) { - return NULL; - } - /* - * For a VOID scalar first see if obj is an integer or long - * and create new memory of that size (filled with 0) for the scalar - */ - if (PyLong_Check(obj) || PyInt_Check(obj) || - PyArray_IsScalar(obj, Integer) || - (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && - PyArray_ISINTEGER(obj))) { -#if defined(NPY_PY3K) - new = Py_TYPE(obj)->tp_as_number->nb_int(obj); -#else - new = Py_TYPE(obj)->tp_as_number->nb_long(obj); -#endif - } - if (new && PyLong_Check(new)) { - PyObject *ret; - memu = PyLong_AsUnsignedLongLong(new); - Py_DECREF(new); - if (PyErr_Occurred() || (memu > MAX_INT)) { - PyErr_Clear(); - PyErr_Format(PyExc_OverflowError, - "size must be smaller than %d", - (int) MAX_INT); - return NULL; - } - destptr = PyDataMem_NEW((int) memu); - if (destptr == NULL) { - return PyErr_NoMemory(); - } - ret = type->tp_alloc(type, 0); - if (ret == NULL) { - PyDataMem_FREE(destptr); - return PyErr_NoMemory(); - } - ((PyVoidScalarObject *)ret)->obval = destptr; - Py_SIZE((PyVoidScalarObject *)ret) = (int) memu; - ((PyVoidScalarObject *)ret)->descr = - PyArray_DescrNewFromType(PyArray_VOID); - ((PyVoidScalarObject *)ret)->descr->elsize = (int) memu; - ((PyVoidScalarObject *)ret)->flags = BEHAVED | OWNDATA; - ((PyVoidScalarObject *)ret)->base = NULL; - memset(destptr, '\0', (size_t) memu); - return ret; - } - - arr = PyArray_FROM_OTF(obj, PyArray_VOID, FORCECAST); - return PyArray_Return((PyArrayObject *)arr); -} - - -/**************** Define Hash functions ********************/ - -/**begin repeat - * #lname = bool,ubyte,ushort# - * #name = Bool,UByte, UShort# - */ -static long -@lname@_arrtype_hash(PyObject *obj) -{ - return (long)(((Py@name@ScalarObject *)obj)->obval); -} -/**end repeat**/ - -/**begin repeat - * #lname=byte,short,uint,ulong# - * #name=Byte,Short,UInt,ULong# - */ -static long -@lname@_arrtype_hash(PyObject *obj) -{ - long x = (long)(((Py@name@ScalarObject *)obj)->obval); - if (x == -1) { - x = -2; - } - return x; -} -/**end repeat**/ - -#if (SIZEOF_INT != SIZEOF_LONG) || defined(NPY_PY3K) -static long -int_arrtype_hash(PyObject *obj) -{ - long x = (long)(((PyIntScalarObject *)obj)->obval); - if (x == -1) { - x = -2; - } - return x; -} -#endif - -/**begin repeat - * #char = ,u# - * #Char = ,U# - * #ext = && (x >= LONG_MIN),# - */ -#if SIZEOF_LONG != SIZEOF_LONGLONG -/* we assume SIZEOF_LONGLONG=2*SIZEOF_LONG */ -static long -@char@longlong_arrtype_hash(PyObject *obj) -{ - long y; - @char@longlong x = (((Py@Char@LongLongScalarObject *)obj)->obval); - - if ((x <= LONG_MAX)@ext@) { - y = (long) x; - } - else { - union Mask { - long hashvals[2]; - @char@longlong v; - } both; - - both.v = x; - y = both.hashvals[0] + (1000003)*both.hashvals[1]; - } - if (y == -1) { - y = -2; - } - return y; -} -#else - -static long -@char@longlong_arrtype_hash(PyObject *obj) -{ - long x = (long)(((Py@Char@LongLongScalarObject *)obj)->obval); - if (x == -1) { - x = -2; - } - return x; -} - -#endif -/**end repeat**/ - - -/**begin repeat - * #lname=datetime, timedelta# - * #name=Datetime,Timedelta# - */ -#if SIZEOF_LONG==SIZEOF_DATETIME -static long -@lname@_arrtype_hash(PyObject *obj) -{ - long x = (long)(((Py@name@ScalarObject *)obj)->obval); - if (x == -1) { - x = -2; - } - return x; -} -#elif SIZEOF_LONGLONG==SIZEOF_DATETIME -static long -@lname@_arrtype_hash(PyObject *obj) -{ - long y; - longlong x = (((Py@name@ScalarObject *)obj)->obval); - - if ((x <= LONG_MAX)) { - y = (long) x; - } - else { - union Mask { - long hashvals[2]; - longlong v; - } both; - - both.v = x; - y = both.hashvals[0] + (1000003)*both.hashvals[1]; - } - if (y == -1) { - y = -2; - } - return y; -} -#endif -/**end repeat**/ - - - -/* Wrong thing to do for longdouble, but....*/ - -/**begin repeat - * #lname = float, longdouble# - * #name = Float, LongDouble# - */ -static long -@lname@_arrtype_hash(PyObject *obj) -{ - return _Py_HashDouble((double) ((Py@name@ScalarObject *)obj)->obval); -} - -/* borrowed from complex_hash */ -static long -c@lname@_arrtype_hash(PyObject *obj) -{ - long hashreal, hashimag, combined; - hashreal = _Py_HashDouble((double) - (((PyC@name@ScalarObject *)obj)->obval).real); - - if (hashreal == -1) { - return -1; - } - hashimag = _Py_HashDouble((double) - (((PyC@name@ScalarObject *)obj)->obval).imag); - if (hashimag == -1) { - return -1; - } - combined = hashreal + 1000003 * hashimag; - if (combined == -1) { - combined = -2; - } - return combined; -} -/**end repeat**/ - -static long -half_arrtype_hash(PyObject *obj) -{ - return _Py_HashDouble(npy_half_to_double(((PyHalfScalarObject *)obj)->obval)); -} - -static long -object_arrtype_hash(PyObject *obj) -{ - return PyObject_Hash(((PyObjectScalarObject *)obj)->obval); -} - -/* just hash the pointer */ -static long -void_arrtype_hash(PyObject *obj) -{ - return _Py_HashPointer((void *)(((PyVoidScalarObject *)obj)->obval)); -} - -/*object arrtype getattro and setattro */ -static PyObject * -object_arrtype_getattro(PyObjectScalarObject *obj, PyObject *attr) { - PyObject *res; - - /* first look in object and then hand off to generic type */ - - res = PyObject_GenericGetAttr(obj->obval, attr); - if (res) { - return res; - } - PyErr_Clear(); - return PyObject_GenericGetAttr((PyObject *)obj, attr); -} - -static int -object_arrtype_setattro(PyObjectScalarObject *obj, PyObject *attr, PyObject *val) { - int res; - /* first look in object and then hand off to generic type */ - - res = PyObject_GenericSetAttr(obj->obval, attr, val); - if (res >= 0) { - return res; - } - PyErr_Clear(); - return PyObject_GenericSetAttr((PyObject *)obj, attr, val); -} - -static PyObject * -object_arrtype_concat(PyObjectScalarObject *self, PyObject *other) -{ - return PySequence_Concat(self->obval, other); -} - -static Py_ssize_t -object_arrtype_length(PyObjectScalarObject *self) -{ - return PyObject_Length(self->obval); -} - -static PyObject * -object_arrtype_repeat(PyObjectScalarObject *self, Py_ssize_t count) -{ - return PySequence_Repeat(self->obval, count); -} - -static PyObject * -object_arrtype_subscript(PyObjectScalarObject *self, PyObject *key) -{ - return PyObject_GetItem(self->obval, key); -} - -static int -object_arrtype_ass_subscript(PyObjectScalarObject *self, PyObject *key, - PyObject *value) -{ - return PyObject_SetItem(self->obval, key, value); -} - -static int -object_arrtype_contains(PyObjectScalarObject *self, PyObject *ob) -{ - return PySequence_Contains(self->obval, ob); -} - -static PyObject * -object_arrtype_inplace_concat(PyObjectScalarObject *self, PyObject *o) -{ - return PySequence_InPlaceConcat(self->obval, o); -} - -static PyObject * -object_arrtype_inplace_repeat(PyObjectScalarObject *self, Py_ssize_t count) -{ - return PySequence_InPlaceRepeat(self->obval, count); -} - -static PySequenceMethods object_arrtype_as_sequence = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ -#else - (inquiry)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (intargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ -#endif -}; - -static PyMappingMethods object_arrtype_as_mapping = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)object_arrtype_length, - (binaryfunc)object_arrtype_subscript, - (objobjargproc)object_arrtype_ass_subscript, -#else - (inquiry)object_arrtype_length, - (binaryfunc)object_arrtype_subscript, - (objobjargproc)object_arrtype_ass_subscript, -#endif -}; - -#if !defined(NPY_PY3K) -static Py_ssize_t -object_arrtype_getsegcount(PyObjectScalarObject *self, Py_ssize_t *lenp) -{ - Py_ssize_t newlen; - int cnt; - PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer; - - if (pb == NULL || - pb->bf_getsegcount == NULL || - (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) { - return 0; - } - if (lenp) { - *lenp = newlen; - } - return cnt; -} - -static Py_ssize_t -object_arrtype_getreadbuf(PyObjectScalarObject *self, Py_ssize_t segment, void **ptrptr) -{ - PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer; - - if (pb == NULL || - pb->bf_getreadbuffer == NULL || - pb->bf_getsegcount == NULL) { - PyErr_SetString(PyExc_TypeError, - "expected a readable buffer object"); - return -1; - } - return (*pb->bf_getreadbuffer)(self->obval, segment, ptrptr); -} - -static Py_ssize_t -object_arrtype_getwritebuf(PyObjectScalarObject *self, Py_ssize_t segment, void **ptrptr) -{ - PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer; - - if (pb == NULL || - pb->bf_getwritebuffer == NULL || - pb->bf_getsegcount == NULL) { - PyErr_SetString(PyExc_TypeError, - "expected a writeable buffer object"); - return -1; - } - return (*pb->bf_getwritebuffer)(self->obval, segment, ptrptr); -} - -static Py_ssize_t -object_arrtype_getcharbuf(PyObjectScalarObject *self, Py_ssize_t segment, - constchar **ptrptr) -{ - PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer; - - if (pb == NULL || - pb->bf_getcharbuffer == NULL || - pb->bf_getsegcount == NULL) { - PyErr_SetString(PyExc_TypeError, - "expected a character buffer object"); - return -1; - } - return (*pb->bf_getcharbuffer)(self->obval, segment, ptrptr); -} -#endif - -#if PY_VERSION_HEX >= 0x02060000 -static int -object_arrtype_getbuffer(PyObjectScalarObject *self, Py_buffer *view, int flags) -{ - PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer; - if (pb == NULL || pb->bf_getbuffer == NULL) { - PyErr_SetString(PyExc_TypeError, - "expected a readable buffer object"); - return -1; - } - return (*pb->bf_getbuffer)(self->obval, view, flags); -} - -static void -object_arrtype_releasebuffer(PyObjectScalarObject *self, Py_buffer *view) -{ - PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer; - if (pb == NULL) { - PyErr_SetString(PyExc_TypeError, - "expected a readable buffer object"); - return; - } - if (pb->bf_releasebuffer != NULL) { - (*pb->bf_releasebuffer)(self->obval, view); - } -} -#endif - -static PyBufferProcs object_arrtype_as_buffer = { -#if !defined(NPY_PY3K) -#if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)object_arrtype_getreadbuf, - (writebufferproc)object_arrtype_getwritebuf, - (segcountproc)object_arrtype_getsegcount, - (charbufferproc)object_arrtype_getcharbuf, -#else - (getreadbufferproc)object_arrtype_getreadbuf, - (getwritebufferproc)object_arrtype_getwritebuf, - (getsegcountproc)object_arrtype_getsegcount, - (getcharbufferproc)object_arrtype_getcharbuf, -#endif -#endif -#if PY_VERSION_HEX >= 0x02060000 - (getbufferproc)object_arrtype_getbuffer, - (releasebufferproc)object_arrtype_releasebuffer, -#endif -}; - -static PyObject * -object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds) -{ - return PyObject_Call(obj->obval, args, kwds); -} - -NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.object_", /* tp_name*/ - sizeof(PyObjectScalarObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ - (destructor)object_arrtype_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - &object_arrtype_as_sequence, /* tp_as_sequence */ - &object_arrtype_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - (ternaryfunc)object_arrtype_call, /* tp_call */ - 0, /* tp_str */ - (getattrofunc)object_arrtype_getattro, /* tp_getattro */ - (setattrofunc)object_arrtype_setattro, /* tp_setattro */ - &object_arrtype_as_buffer, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - -static PyObject * -gen_arrtype_subscript(PyObject *self, PyObject *key) -{ - /* - * Only [...], [...,], [, ...], - * is allowed for indexing a scalar - * - * These return a new N-d array with a copy of - * the data where N is the number of None's in . - */ - PyObject *res, *ret; - int N; - - if (key == Py_Ellipsis || key == Py_None || - PyTuple_Check(key)) { - res = PyArray_FromScalar(self, NULL); - } - else { - PyErr_SetString(PyExc_IndexError, - "invalid index to scalar variable."); - return NULL; - } - if (key == Py_Ellipsis) { - return res; - } - if (key == Py_None) { - ret = add_new_axes_0d((PyArrayObject *)res, 1); - Py_DECREF(res); - return ret; - } - /* Must be a Tuple */ - N = count_new_axes_0d(key); - if (N < 0) { - return NULL; - } - ret = add_new_axes_0d((PyArrayObject *)res, N); - Py_DECREF(res); - return ret; -} - - -#define NAME_bool "bool" -#define NAME_void "void" -#if defined(NPY_PY3K) -#define NAME_string "bytes" -#define NAME_unicode "str" -#else -#define NAME_string "string" -#define NAME_unicode "unicode" -#endif - -/**begin repeat - * #name = bool, string, unicode, void# - * #NAME = Bool, String, Unicode, Void# - * #ex = _,_,_,# - */ -NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy." NAME_@name@ "@ex@", /* tp_name*/ - sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; -/**end repeat**/ - -#undef NAME_bool -#undef NAME_void -#undef NAME_string -#undef NAME_unicode - -/**begin repeat - * #NAME = Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, - * ULongLong, Half, Float, Double, LongDouble, Datetime, Timedelta# - * #name = int*5, uint*5, float*4, datetime, timedelta# - * #CNAME = (CHAR, SHORT, INT, LONG, LONGLONG)*2, HALF, FLOAT, DOUBLE, - * LONGDOUBLE, DATETIME, TIMEDELTA# - */ -#if BITSOF_@CNAME@ == 8 -#define _THIS_SIZE "8" -#elif BITSOF_@CNAME@ == 16 -#define _THIS_SIZE "16" -#elif BITSOF_@CNAME@ == 32 -#define _THIS_SIZE "32" -#elif BITSOF_@CNAME@ == 64 -#define _THIS_SIZE "64" -#elif BITSOF_@CNAME@ == 80 -#define _THIS_SIZE "80" -#elif BITSOF_@CNAME@ == 96 -#define _THIS_SIZE "96" -#elif BITSOF_@CNAME@ == 128 -#define _THIS_SIZE "128" -#elif BITSOF_@CNAME@ == 256 -#define _THIS_SIZE "256" -#endif -NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.@name@" _THIS_SIZE, /* tp_name*/ - sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - -#undef _THIS_SIZE -/**end repeat**/ - - -static PyMappingMethods gentype_as_mapping = { - NULL, - (binaryfunc)gen_arrtype_subscript, - NULL -}; - - -/**begin repeat - * #NAME = CFloat, CDouble, CLongDouble# - * #name = complex*3# - * #CNAME = FLOAT, DOUBLE, LONGDOUBLE# - */ -#if BITSOF_@CNAME@ == 16 -#define _THIS_SIZE2 "16" -#define _THIS_SIZE1 "32" -#elif BITSOF_@CNAME@ == 32 -#define _THIS_SIZE2 "32" -#define _THIS_SIZE1 "64" -#elif BITSOF_@CNAME@ == 64 -#define _THIS_SIZE2 "64" -#define _THIS_SIZE1 "128" -#elif BITSOF_@CNAME@ == 80 -#define _THIS_SIZE2 "80" -#define _THIS_SIZE1 "160" -#elif BITSOF_@CNAME@ == 96 -#define _THIS_SIZE2 "96" -#define _THIS_SIZE1 "192" -#elif BITSOF_@CNAME@ == 128 -#define _THIS_SIZE2 "128" -#define _THIS_SIZE1 "256" -#elif BITSOF_@CNAME@ == 256 -#define _THIS_SIZE2 "256" -#define _THIS_SIZE1 "512" -#endif - -#define _THIS_DOC "Composed of two " _THIS_SIZE2 " bit floats" - -NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(0, 0) -#else - PyObject_HEAD_INIT(0) - 0, /* ob_size */ -#endif - "numpy.@name@" _THIS_SIZE1, /* tp_name*/ - sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/ - 0, /* tp_itemsize*/ - 0, /* tp_dealloc*/ - 0, /* tp_print*/ - 0, /* tp_getattr*/ - 0, /* tp_setattr*/ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - 0, /* tp_repr*/ - 0, /* tp_as_number*/ - 0, /* tp_as_sequence*/ - 0, /* tp_as_mapping*/ - 0, /* tp_hash */ - 0, /* tp_call*/ - 0, /* tp_str*/ - 0, /* tp_getattro*/ - 0, /* tp_setattro*/ - 0, /* tp_as_buffer*/ - Py_TPFLAGS_DEFAULT, /* tp_flags*/ - _THIS_DOC, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; -#undef _THIS_SIZE1 -#undef _THIS_SIZE2 -#undef _THIS_DOC - -/**end repeat**/ - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -/* - * This table maps the built-in type numbers to their scalar - * type numbers. Note that signed integers are mapped to INTNEG_SCALAR, - * which is different than what PyArray_ScalarKind returns. - */ -NPY_NO_EXPORT signed char -_npy_scalar_kinds_table[NPY_NTYPES]; - -/* - * This table maps a scalar kind (excluding NPY_NOSCALAR) - * to the smallest type number of that kind. - */ -NPY_NO_EXPORT signed char -_npy_smallest_type_of_kind_table[NPY_NSCALARKINDS]; - -/* - * This table gives the type of the same kind, but next in the sequence - * of sizes. - */ -NPY_NO_EXPORT signed char -_npy_next_larger_type_table[NPY_NTYPES]; - -/* - * This table describes safe casting for small type numbers, - * and is used by PyArray_CanCastSafely. - */ -NPY_NO_EXPORT unsigned char -_npy_can_cast_safely_table[NPY_NTYPES][NPY_NTYPES]; - -/* - * This table gives the smallest-size and smallest-kind type to which - * the input types may be safely cast, according to _npy_can_cast_safely. - */ -NPY_NO_EXPORT signed char -_npy_type_promotion_table[NPY_NTYPES][NPY_NTYPES]; -#endif - -NPY_NO_EXPORT void -initialize_casting_tables(void) -{ - int i, j; - - _npy_smallest_type_of_kind_table[NPY_BOOL_SCALAR] = NPY_BOOL; - _npy_smallest_type_of_kind_table[NPY_INTPOS_SCALAR] = NPY_UBYTE; - _npy_smallest_type_of_kind_table[NPY_INTNEG_SCALAR] = NPY_BYTE; - _npy_smallest_type_of_kind_table[NPY_FLOAT_SCALAR] = NPY_HALF; - _npy_smallest_type_of_kind_table[NPY_COMPLEX_SCALAR] = NPY_CFLOAT; - _npy_smallest_type_of_kind_table[NPY_OBJECT_SCALAR] = NPY_OBJECT; - - /* Default for built-in types is object scalar */ - memset(_npy_scalar_kinds_table, PyArray_OBJECT_SCALAR, - sizeof(_npy_scalar_kinds_table)); - /* Default for next largest type is -1, signalling no bigger */ - memset(_npy_next_larger_type_table, -1, - sizeof(_npy_next_larger_type_table)); - - /* Compile-time loop of scalar kinds */ -/**begin repeat - * #NAME = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# - * #BIGGERTYPE = -1, NPY_SHORT, NPY_USHORT, NPY_INT, NPY_UINT, - * NPY_LONG, NPY_ULONG, NPY_LONGLONG, NPY_ULONGLONG, - * -1, -1, NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, -1, - * NPY_CDOUBLE, NPY_CLONGDOUBLE, -1# - * #SCKIND = BOOL, (INTNEG, INTPOS)*5, FLOAT*4, - * COMPLEX*3# - */ - _npy_scalar_kinds_table[NPY_@NAME@] = PyArray_@SCKIND@_SCALAR; - _npy_next_larger_type_table[NPY_@NAME@] = @BIGGERTYPE@; -/**end repeat**/ - - memset(_npy_can_cast_safely_table, 0, sizeof(_npy_can_cast_safely_table)); - - for (i = 0; i < NPY_NTYPES; ++i) { - /* Identity */ - _npy_can_cast_safely_table[i][i] = 1; - /* Bool -> */ - _npy_can_cast_safely_table[NPY_BOOL][i] = 1; - /* DateTime sits out for these... */ - if (i != PyArray_DATETIME && i != PyArray_TIMEDELTA) { - /* -> Object */ - _npy_can_cast_safely_table[i][NPY_OBJECT] = 1; - /* -> Void */ - _npy_can_cast_safely_table[i][NPY_VOID] = 1; - } - } - - _npy_can_cast_safely_table[NPY_STRING][NPY_UNICODE] = 1; - -#ifndef NPY_SIZEOF_BYTE -#define NPY_SIZEOF_BYTE 1 -#endif - - /* Compile-time loop of casting rules */ -/**begin repeat - * #FROM_NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# - * #FROM_BASENAME = BYTE, BYTE, SHORT, SHORT, INT, INT, LONG, LONG, - * LONGLONG, LONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * FLOAT, DOUBLE, LONGDOUBLE# - * #from_isint = 1, 0, 1, 0, 1, 0, 1, 0, - * 1, 0, 0, 0, 0, 0, - * 0, 0, 0# - * #from_isuint = 0, 1, 0, 1, 0, 1, 0, 1, - * 0, 1, 0, 0, 0, 0, - * 0, 0, 0# - * #from_isfloat = 0, 0, 0, 0, 0, 0, 0, 0, - * 0, 0, 1, 1, 1, 1, - * 0, 0, 0# - * #from_iscomplex = 0, 0, 0, 0, 0, 0, 0, 0, - * 0, 0, 0, 0, 0, 0, - * 1, 1, 1# - */ -#define _FROM_BSIZE NPY_SIZEOF_@FROM_BASENAME@ -#define _FROM_NUM (NPY_@FROM_NAME@) - - _npy_can_cast_safely_table[_FROM_NUM][PyArray_STRING] = 1; - _npy_can_cast_safely_table[_FROM_NUM][PyArray_UNICODE] = 1; - -/**begin repeat1 - * #TO_NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# - * #TO_BASENAME = BYTE, BYTE, SHORT, SHORT, INT, INT, LONG, LONG, - * LONGLONG, LONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * FLOAT, DOUBLE, LONGDOUBLE# - * #to_isint = 1, 0, 1, 0, 1, 0, 1, 0, - * 1, 0, 0, 0, 0, 0, - * 0, 0, 0# - * #to_isuint = 0, 1, 0, 1, 0, 1, 0, 1, - * 0, 1, 0, 0, 0, 0, - * 0, 0, 0# - * #to_isfloat = 0, 0, 0, 0, 0, 0, 0, 0, - * 0, 0, 1, 1, 1, 1, - * 0, 0, 0# - * #to_iscomplex = 0, 0, 0, 0, 0, 0, 0, 0, - * 0, 0, 0, 0, 0, 0, - * 1, 1, 1# - */ -#define _TO_BSIZE NPY_SIZEOF_@TO_BASENAME@ -#define _TO_NUM (NPY_@TO_NAME@) - -/* - * NOTE: _FROM_BSIZE and _TO_BSIZE are the sizes of the "base type" - * which is the same as the size of the type except for - * complex, where it is the size of the real type. - */ - -#if @from_isint@ - -# if @to_isint@ && (_TO_BSIZE >= _FROM_BSIZE) - /* int -> int */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_isfloat@ && (_FROM_BSIZE < 8) && (_TO_BSIZE > _FROM_BSIZE) - /* int -> float */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_isfloat@ && (_FROM_BSIZE >= 8) && (_TO_BSIZE >= _FROM_BSIZE) - /* int -> float */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_iscomplex@ && (_FROM_BSIZE < 8) && (_TO_BSIZE > _FROM_BSIZE) - /* int -> complex */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_iscomplex@ && (_FROM_BSIZE >= 8) && (_TO_BSIZE >= _FROM_BSIZE) - /* int -> complex */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# endif - -#elif @from_isuint@ - -# if @to_isint@ && (_TO_BSIZE > _FROM_BSIZE) - /* uint -> int */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_isuint@ && (_TO_BSIZE >= _FROM_BSIZE) - /* uint -> uint */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_isfloat@ && (_FROM_BSIZE < 8) && (_TO_BSIZE > _FROM_BSIZE) - /* uint -> float */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_isfloat@ && (_FROM_BSIZE >= 8) && (_TO_BSIZE >= _FROM_BSIZE) - /* uint -> float */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_iscomplex@ && (_FROM_BSIZE < 8) && (_TO_BSIZE > _FROM_BSIZE) - /* uint -> complex */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_iscomplex@ && (_FROM_BSIZE >= 8) && (_TO_BSIZE >= _FROM_BSIZE) - /* uint -> complex */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# endif - - -#elif @from_isfloat@ - -# if @to_isfloat@ && (_TO_BSIZE >= _FROM_BSIZE) - /* float -> float */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# elif @to_iscomplex@ && (_TO_BSIZE >= _FROM_BSIZE) - /* float -> complex */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# endif - -#elif @from_iscomplex@ - -# if @to_iscomplex@ && (_TO_BSIZE >= _FROM_BSIZE) - /* complex -> complex */ - _npy_can_cast_safely_table[_FROM_NUM][_TO_NUM] = 1; -# endif - -#endif - -#undef _TO_NUM -#undef _TO_BSIZE - -/**end repeat1**/ - -#undef _FROM_NUM -#undef _FROM_BSIZE - -/**end repeat**/ - - /* - * Now that the _can_cast_safely table is finished, we can - * use it to build the _type_promotion table - */ - for (i = 0; i < NPY_NTYPES; ++i) { - _npy_type_promotion_table[i][i] = i; - /* Don't let number promote to string/unicode/void */ - if (i == NPY_STRING || i == NPY_UNICODE || i == NPY_VOID) { - /* Promoting these types requires examining their contents */ - _npy_type_promotion_table[i][i] = -1; - for (j = i+1; j < NPY_NTYPES; ++j) { - _npy_type_promotion_table[i][j] = -1; - _npy_type_promotion_table[j][i] = -1; - } - /* Except they can convert to OBJECT */ - _npy_type_promotion_table[i][NPY_OBJECT] = NPY_OBJECT; - _npy_type_promotion_table[NPY_OBJECT][i] = NPY_OBJECT; - } - else { - for (j = i+1; j < NPY_NTYPES; ++j) { - /* Don't let number promote to string/unicode/void */ - if (j == NPY_STRING || j == NPY_UNICODE || j == NPY_VOID) { - _npy_type_promotion_table[i][j] = -1; - _npy_type_promotion_table[j][i] = -1; - } - else if (_npy_can_cast_safely_table[i][j]) { - _npy_type_promotion_table[i][j] = j; - _npy_type_promotion_table[j][i] = j; - } - else if (_npy_can_cast_safely_table[j][i]) { - _npy_type_promotion_table[i][j] = i; - _npy_type_promotion_table[j][i] = i; - } - else { - int k, iskind, jskind, skind; - iskind = _npy_scalar_kinds_table[i]; - jskind = _npy_scalar_kinds_table[j]; - /* If there's no kind (void/string/etc) */ - if (iskind == NPY_NOSCALAR || jskind == NPY_NOSCALAR) { - k = -1; - } - else { - /* Start with the type of larger kind */ - if (iskind > jskind) { - skind = iskind; - k = i; - } - else { - skind = jskind; - k = j; - } - for (;;) { - /* Try the next larger type of this kind */ - k = _npy_next_larger_type_table[k]; - - /* If there is no larger, try a larger kind */ - if (k < 0) { - ++skind; - /* Use -1 to signal no promoted type found */ - if (skind < NPY_NSCALARKINDS) { - k = _npy_smallest_type_of_kind_table[skind]; - } - else { - k = -1; - break; - } - } - - if (_npy_can_cast_safely_table[i][k] && - _npy_can_cast_safely_table[j][k]) { - break; - } - } - } - _npy_type_promotion_table[i][j] = k; - _npy_type_promotion_table[j][i] = k; - } - } - } - } - /* Special case date-time */ - _npy_type_promotion_table[NPY_DATETIME][NPY_TIMEDELTA] = NPY_DATETIME; - _npy_type_promotion_table[NPY_TIMEDELTA][NPY_DATETIME] = NPY_DATETIME; -} - - -static PyNumberMethods longdoubletype_as_number; -static PyNumberMethods clongdoubletype_as_number; - - -NPY_NO_EXPORT void -initialize_numeric_types(void) -{ - PyGenericArrType_Type.tp_dealloc = (destructor)gentype_dealloc; - PyGenericArrType_Type.tp_as_number = &gentype_as_number; - PyGenericArrType_Type.tp_as_buffer = &gentype_as_buffer; - PyGenericArrType_Type.tp_as_mapping = &gentype_as_mapping; - PyGenericArrType_Type.tp_flags = BASEFLAGS; - PyGenericArrType_Type.tp_methods = gentype_methods; - PyGenericArrType_Type.tp_getset = gentype_getsets; - PyGenericArrType_Type.tp_new = NULL; - PyGenericArrType_Type.tp_alloc = gentype_alloc; - PyGenericArrType_Type.tp_free = _pya_free; - PyGenericArrType_Type.tp_repr = gentype_repr; - PyGenericArrType_Type.tp_str = gentype_str; - PyGenericArrType_Type.tp_richcompare = gentype_richcompare; - - PyBoolArrType_Type.tp_as_number = &bool_arrtype_as_number; -#if PY_VERSION_HEX >= 0x02050000 - /* - * need to add dummy versions with filled-in nb_index - * in-order for PyType_Ready to fill in .__index__() method - */ - /**begin repeat - * #name = byte, short, int, long, longlong, ubyte, ushort, - * uint, ulong, ulonglong# - * #NAME = Byte, Short, Int, Long, LongLong, UByte, UShort, - * UInt, ULong, ULongLong# - */ - Py@NAME@ArrType_Type.tp_as_number = &@name@_arrtype_as_number; - Py@NAME@ArrType_Type.tp_as_number->nb_index = (unaryfunc)@name@_index; - - /**end repeat**/ - PyBoolArrType_Type.tp_as_number->nb_index = (unaryfunc)bool_index; -#endif - - PyStringArrType_Type.tp_alloc = NULL; - PyStringArrType_Type.tp_free = NULL; - - PyStringArrType_Type.tp_repr = stringtype_repr; - PyStringArrType_Type.tp_str = stringtype_str; - - PyUnicodeArrType_Type.tp_repr = unicodetype_repr; - PyUnicodeArrType_Type.tp_str = unicodetype_str; - - PyVoidArrType_Type.tp_methods = voidtype_methods; - PyVoidArrType_Type.tp_getset = voidtype_getsets; - PyVoidArrType_Type.tp_as_mapping = &voidtype_as_mapping; - PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence; - - /**begin repeat - * #NAME= Number, Integer, SignedInteger, UnsignedInteger, Inexact, - * Floating, ComplexFloating, Flexible, Character, TimeInteger# - */ - Py@NAME@ArrType_Type.tp_flags = BASEFLAGS; - /**end repeat**/ - - /**begin repeat - * #name = bool, byte, short, int, long, longlong, ubyte, ushort, uint, - * ulong, ulonglong, half, float, double, longdouble, cfloat, - * cdouble, clongdouble, string, unicode, void, object, datetime, - * timedelta# - * #NAME = Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, - * ULong, ULongLong, Half, Float, Double, LongDouble, CFloat, - * CDouble, CLongDouble, String, Unicode, Void, Object, Datetime, - * Timedelta# - */ - Py@NAME@ArrType_Type.tp_flags = BASEFLAGS; - Py@NAME@ArrType_Type.tp_new = @name@_arrtype_new; - Py@NAME@ArrType_Type.tp_richcompare = gentype_richcompare; - /**end repeat**/ - - /**begin repeat - * #name = bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, - * half, float, longdouble, cfloat, clongdouble, void, object, - * datetime, timedelta# - * #NAME = Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, - * Half, Float, LongDouble, CFloat, CLongDouble, Void, Object, - * Datetime, Timedelta# - */ - Py@NAME@ArrType_Type.tp_hash = @name@_arrtype_hash; - /**end repeat**/ - - /**begin repeat - * #name = cfloat, clongdouble# - * #NAME = CFloat, CLongDouble# - */ - Py@NAME@ArrType_Type.tp_methods = @name@type_methods; - /**end repeat**/ - -#if (SIZEOF_INT != SIZEOF_LONG) || defined(NPY_PY3K) - /* We won't be inheriting from Python Int type. */ - PyIntArrType_Type.tp_hash = int_arrtype_hash; -#endif - -#if defined(NPY_PY3K) - /* We won't be inheriting from Python Int type. */ - PyLongArrType_Type.tp_hash = int_arrtype_hash; -#endif - -#if (SIZEOF_LONG != SIZEOF_LONGLONG) || defined(NPY_PY3K) - /* We won't be inheriting from Python Int type. */ - PyLongLongArrType_Type.tp_hash = longlong_arrtype_hash; -#endif - - /**begin repeat - * #name = repr, str# - */ - PyHalfArrType_Type.tp_@name@ = halftype_@name@; - - PyFloatArrType_Type.tp_@name@ = floattype_@name@; - PyCFloatArrType_Type.tp_@name@ = cfloattype_@name@; - - PyDoubleArrType_Type.tp_@name@ = doubletype_@name@; - PyCDoubleArrType_Type.tp_@name@ = cdoubletype_@name@; - /**end repeat**/ - - PyHalfArrType_Type.tp_print = halftype_print; - PyFloatArrType_Type.tp_print = floattype_print; - PyDoubleArrType_Type.tp_print = doubletype_print; - PyLongDoubleArrType_Type.tp_print = longdoubletype_print; - - PyCFloatArrType_Type.tp_print = cfloattype_print; - PyCDoubleArrType_Type.tp_print = cdoubletype_print; - PyCLongDoubleArrType_Type.tp_print = clongdoubletype_print; - - /* - * These need to be coded specially because getitem does not - * return a normal Python type - */ - PyLongDoubleArrType_Type.tp_as_number = &longdoubletype_as_number; - PyCLongDoubleArrType_Type.tp_as_number = &clongdoubletype_as_number; - - /**begin repeat - * #name = int, float, repr, str# - * #kind = tp_as_number->nb*2, tp*2# - */ - PyLongDoubleArrType_Type.@kind@_@name@ = longdoubletype_@name@; - PyCLongDoubleArrType_Type.@kind@_@name@ = clongdoubletype_@name@; - /**end repeat**/ - -#if !defined(NPY_PY3K) - /**begin repeat - * #name = long, hex, oct# - * #kind = tp_as_number->nb*3# - */ - PyLongDoubleArrType_Type.@kind@_@name@ = longdoubletype_@name@; - PyCLongDoubleArrType_Type.@kind@_@name@ = clongdoubletype_@name@; - /**end repeat**/ - -#endif - - PyStringArrType_Type.tp_itemsize = sizeof(char); - PyVoidArrType_Type.tp_dealloc = (destructor) void_dealloc; - - PyArrayIter_Type.tp_iter = PyObject_SelfIter; - PyArrayMapIter_Type.tp_iter = PyObject_SelfIter; -} - - -/* the order of this table is important */ -static PyTypeObject *typeobjects[] = { - &PyBoolArrType_Type, - &PyByteArrType_Type, - &PyUByteArrType_Type, - &PyShortArrType_Type, - &PyUShortArrType_Type, - &PyIntArrType_Type, - &PyUIntArrType_Type, - &PyLongArrType_Type, - &PyULongArrType_Type, - &PyLongLongArrType_Type, - &PyULongLongArrType_Type, - &PyFloatArrType_Type, - &PyDoubleArrType_Type, - &PyLongDoubleArrType_Type, - &PyCFloatArrType_Type, - &PyCDoubleArrType_Type, - &PyCLongDoubleArrType_Type, - &PyObjectArrType_Type, - &PyStringArrType_Type, - &PyUnicodeArrType_Type, - &PyVoidArrType_Type, - &PyDatetimeArrType_Type, - &PyTimedeltaArrType_Type, - &PyHalfArrType_Type -}; - -NPY_NO_EXPORT int -_typenum_fromtypeobj(PyObject *type, int user) -{ - int typenum, i; - - typenum = PyArray_NOTYPE; - i = 0; - while(i < PyArray_NTYPES) { - if (type == (PyObject *)typeobjects[i]) { - typenum = i; - break; - } - i++; - } - - if (!user) { - return typenum; - } - /* Search any registered types */ - i = 0; - while (i < PyArray_NUMUSERTYPES) { - if (type == (PyObject *)(userdescrs[i]->typeobj)) { - typenum = i + PyArray_USERDEF; - break; - } - i++; - } - return typenum; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/scalartypes.h b/numpy-1.6.2/numpy/core/src/multiarray/scalartypes.h deleted file mode 100644 index 7397a97e0f..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/scalartypes.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef _NPY_SCALARTYPES_H_ -#define _NPY_SCALARTYPES_H_ - -/* Internal look-up tables */ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT unsigned char -_npy_can_cast_safely_table[NPY_NTYPES][NPY_NTYPES]; -extern NPY_NO_EXPORT signed char -_npy_scalar_kinds_table[NPY_NTYPES]; -extern NPY_NO_EXPORT signed char -_npy_type_promotion_table[NPY_NTYPES][NPY_NTYPES]; -extern NPY_NO_EXPORT signed char -_npy_smallest_type_of_kind_table[NPY_NSCALARKINDS]; -extern NPY_NO_EXPORT signed char -_npy_next_larger_type_table[NPY_NTYPES]; -#else -NPY_NO_EXPORT unsigned char -_npy_can_cast_safely_table[NPY_NTYPES][NPY_NTYPES]; -NPY_NO_EXPORT signed char -_npy_scalar_kinds_table[NPY_NTYPES]; -NPY_NO_EXPORT signed char -_npy_type_promotion_table[NPY_NTYPES][NPY_NTYPES]; -NPY_NO_EXPORT signed char -_npy_smallest_type_of_kind_table[NPY_NSCALARKINDS]; -NPY_NO_EXPORT signed char -_npy_next_larger_type_table[NPY_NTYPES]; -#endif - -NPY_NO_EXPORT void -initialize_casting_tables(void); - -NPY_NO_EXPORT void -initialize_numeric_types(void); - -NPY_NO_EXPORT void -format_longdouble(char *buf, size_t buflen, longdouble val, unsigned int prec); - -#if PY_VERSION_HEX >= 0x03000000 -NPY_NO_EXPORT void -gentype_struct_free(PyObject *ptr); -#else -NPY_NO_EXPORT void -gentype_struct_free(void *ptr, void *arg); -#endif - -NPY_NO_EXPORT int -_typenum_fromtypeobj(PyObject *type, int user); - -NPY_NO_EXPORT void * -scalar_value(PyObject *scalar, PyArray_Descr *descr); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/sequence.c b/numpy-1.6.2/numpy/core/src/multiarray/sequence.c deleted file mode 100644 index dd2ea48eb9..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/sequence.c +++ /dev/null @@ -1,184 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "common.h" -#include "mapping.h" - -#include "sequence.h" - -static int -array_any_nonzero(PyArrayObject *mp); - -/************************************************************************* - **************** Implement Sequence Protocol ************************** - *************************************************************************/ - -/* Some of this is repeated in the array_as_mapping protocol. But - we fill it in here so that PySequence_XXXX calls work as expected -*/ - - -static PyObject * -array_slice(PyArrayObject *self, Py_ssize_t ilow, - Py_ssize_t ihigh) -{ - PyArrayObject *r; - Py_ssize_t l; - char *data; - - if (self->nd == 0) { - PyErr_SetString(PyExc_ValueError, "cannot slice a 0-d array"); - return NULL; - } - - l=self->dimensions[0]; - if (ilow < 0) { - ilow = 0; - } - else if (ilow > l) { - ilow = l; - } - if (ihigh < ilow) { - ihigh = ilow; - } - else if (ihigh > l) { - ihigh = l; - } - - if (ihigh != ilow) { - data = index2ptr(self, ilow); - if (data == NULL) { - return NULL; - } - } - else { - data = self->data; - } - - self->dimensions[0] = ihigh-ilow; - Py_INCREF(self->descr); - r = (PyArrayObject *) \ - PyArray_NewFromDescr(Py_TYPE(self), self->descr, - self->nd, self->dimensions, - self->strides, data, - self->flags, (PyObject *)self); - self->dimensions[0] = l; - if (r == NULL) { - return NULL; - } - r->base = (PyObject *)self; - Py_INCREF(self); - PyArray_UpdateFlags(r, UPDATE_ALL); - return (PyObject *)r; -} - - -static int -array_ass_slice(PyArrayObject *self, Py_ssize_t ilow, - Py_ssize_t ihigh, PyObject *v) { - int ret; - PyArrayObject *tmp; - - if (v == NULL) { - PyErr_SetString(PyExc_ValueError, - "cannot delete array elements"); - return -1; - } - if (!PyArray_ISWRITEABLE(self)) { - PyErr_SetString(PyExc_RuntimeError, - "array is not writeable"); - return -1; - } - if ((tmp = (PyArrayObject *)array_slice(self, ilow, ihigh)) == NULL) { - return -1; - } - ret = PyArray_CopyObject(tmp, v); - Py_DECREF(tmp); - - return ret; -} - -static int -array_contains(PyArrayObject *self, PyObject *el) -{ - /* equivalent to (self == el).any() */ - - PyObject *res; - int ret; - - res = PyArray_EnsureAnyArray(PyObject_RichCompare((PyObject *)self, - el, Py_EQ)); - if (res == NULL) { - return -1; - } - ret = array_any_nonzero((PyArrayObject *)res); - Py_DECREF(res); - return ret; -} - -NPY_NO_EXPORT PySequenceMethods array_as_sequence = { -#if PY_VERSION_HEX >= 0x02050000 - (lenfunc)array_length, /*sq_length*/ - (binaryfunc)NULL, /*sq_concat is handled by nb_add*/ - (ssizeargfunc)NULL, - (ssizeargfunc)array_item_nice, - (ssizessizeargfunc)array_slice, - (ssizeobjargproc)array_ass_item, /*sq_ass_item*/ - (ssizessizeobjargproc)array_ass_slice, /*sq_ass_slice*/ - (objobjproc) array_contains, /*sq_contains */ - (binaryfunc) NULL, /*sg_inplace_concat */ - (ssizeargfunc)NULL, -#else - (inquiry)array_length, /*sq_length*/ - (binaryfunc)NULL, /*sq_concat is handled by nb_add*/ - (intargfunc)NULL, /*sq_repeat is handled nb_multiply*/ - (intargfunc)array_item_nice, /*sq_item*/ - (intintargfunc)array_slice, /*sq_slice*/ - (intobjargproc)array_ass_item, /*sq_ass_item*/ - (intintobjargproc)array_ass_slice, /*sq_ass_slice*/ - (objobjproc) array_contains, /*sq_contains */ - (binaryfunc) NULL, /*sg_inplace_concat */ - (intargfunc) NULL /*sg_inplace_repeat */ -#endif -}; - - -/****************** End of Sequence Protocol ****************************/ - -/* - * Helpers - */ - -/* Array evaluates as "TRUE" if any of the elements are non-zero*/ -static int -array_any_nonzero(PyArrayObject *mp) -{ - intp index; - PyArrayIterObject *it; - Bool anyTRUE = FALSE; - - it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it == NULL) { - return anyTRUE; - } - index = it->size; - while(index--) { - if (mp->descr->f->nonzero(it->dataptr, mp)) { - anyTRUE = TRUE; - break; - } - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); - return anyTRUE; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/sequence.h b/numpy-1.6.2/numpy/core/src/multiarray/sequence.h deleted file mode 100644 index 321c0200fc..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/sequence.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _NPY_ARRAY_SEQUENCE_H_ -#define _NPY_ARRAY_SEQUENCE_H_ - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PySequenceMethods array_as_sequence; -#else -NPY_NO_EXPORT PySequenceMethods array_as_sequence; -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/shape.c b/numpy-1.6.2/numpy/core/src/multiarray/shape.c deleted file mode 100644 index 1672ce394c..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/shape.c +++ /dev/null @@ -1,926 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "numpy/npy_math.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -#include "ctors.h" - -#include "shape.h" - -#define PyAO PyArrayObject - -static int -_check_ones(PyArrayObject *self, int newnd, intp* newdims, intp *strides); - -static int -_fix_unknown_dimension(PyArray_Dims *newshape, intp s_original); - -static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, intp* newdims, - intp *newstrides, int fortran); - -static void -_putzero(char *optr, PyObject *zero, PyArray_Descr *dtype); - -/*NUMPY_API - * Resize (reallocate data). Only works if nothing else is referencing this - * array and it is contiguous. If refcheck is 0, then the reference count is - * not checked and assumed to be 1. You still must own this data and have no - * weak-references and no base object. - */ -NPY_NO_EXPORT PyObject * -PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER fortran) -{ - intp oldsize, newsize; - int new_nd=newshape->len, k, n, elsize; - int refcnt; - intp* new_dimensions=newshape->ptr; - intp new_strides[MAX_DIMS]; - size_t sd; - intp *dimptr; - char *new_data; - intp largest; - - if (!PyArray_ISONESEGMENT(self)) { - PyErr_SetString(PyExc_ValueError, - "resize only works on single-segment arrays"); - return NULL; - } - - if (self->descr->elsize == 0) { - PyErr_SetString(PyExc_ValueError, - "Bad data-type size."); - return NULL; - } - newsize = 1; - largest = MAX_INTP / self->descr->elsize; - for(k = 0; k < new_nd; k++) { - if (new_dimensions[k] == 0) { - break; - } - if (new_dimensions[k] < 0) { - PyErr_SetString(PyExc_ValueError, - "negative dimensions not allowed"); - return NULL; - } - newsize *= new_dimensions[k]; - if (newsize <= 0 || newsize > largest) { - return PyErr_NoMemory(); - } - } - oldsize = PyArray_SIZE(self); - - if (oldsize != newsize) { - if (!(self->flags & OWNDATA)) { - PyErr_SetString(PyExc_ValueError, - "cannot resize this array: it does not own its data"); - return NULL; - } - - if (refcheck) { - refcnt = REFCOUNT(self); - } - else { - refcnt = 1; - } - if ((refcnt > 2) - || (self->base != NULL) - || (self->weakreflist != NULL)) { - PyErr_SetString(PyExc_ValueError, - "cannot resize an array references or is referenced\n"\ - "by another array in this way. Use the resize function"); - return NULL; - } - - if (newsize == 0) { - sd = self->descr->elsize; - } - else { - sd = newsize*self->descr->elsize; - } - /* Reallocate space if needed */ - new_data = PyDataMem_RENEW(self->data, sd); - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate memory for array"); - return NULL; - } - self->data = new_data; - } - - if ((newsize > oldsize) && PyArray_ISWRITEABLE(self)) { - /* Fill new memory with zeros */ - elsize = self->descr->elsize; - if (PyDataType_FLAGCHK(self->descr, NPY_ITEM_REFCOUNT)) { - PyObject *zero = PyInt_FromLong(0); - char *optr; - optr = self->data + oldsize*elsize; - n = newsize - oldsize; - for (k = 0; k < n; k++) { - _putzero((char *)optr, zero, self->descr); - optr += elsize; - } - Py_DECREF(zero); - } - else{ - memset(self->data+oldsize*elsize, 0, (newsize-oldsize)*elsize); - } - } - - if (self->nd != new_nd) { - /* Different number of dimensions. */ - self->nd = new_nd; - /* Need new dimensions and strides arrays */ - dimptr = PyDimMem_RENEW(self->dimensions, 2*new_nd); - if (dimptr == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate memory for array"); - return NULL; - } - self->dimensions = dimptr; - self->strides = dimptr + new_nd; - } - - /* make new_strides variable */ - sd = (size_t) self->descr->elsize; - sd = (size_t) _array_fill_strides(new_strides, new_dimensions, new_nd, sd, - self->flags, &(self->flags)); - memmove(self->dimensions, new_dimensions, new_nd*sizeof(intp)); - memmove(self->strides, new_strides, new_nd*sizeof(intp)); - Py_INCREF(Py_None); - return Py_None; -} - -/* - * Returns a new array - * with the new shape from the data - * in the old array --- order-perspective depends on fortran argument. - * copy-only-if-necessary - */ - -/*NUMPY_API - * New shape for an array - */ -NPY_NO_EXPORT PyObject * -PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, - NPY_ORDER order) -{ - intp i; - intp *dimensions = newdims->ptr; - PyArrayObject *ret; - int n = newdims->len; - Bool same, incref = TRUE; - intp *strides = NULL; - intp newstrides[MAX_DIMS]; - int flags; - - if (order == PyArray_ANYORDER) { - order = PyArray_ISFORTRAN(self); - } - /* Quick check to make sure anything actually needs to be done */ - if (n == self->nd) { - same = TRUE; - i = 0; - while (same && i < n) { - if (PyArray_DIM(self,i) != dimensions[i]) { - same=FALSE; - } - i++; - } - if (same) { - return PyArray_View(self, NULL, NULL); - } - } - - /* - * Returns a pointer to an appropriate strides array - * if all we are doing is inserting ones into the shape, - * or removing ones from the shape - * or doing a combination of the two - * In this case we don't need to do anything but update strides and - * dimensions. So, we can handle non single-segment cases. - */ - i = _check_ones(self, n, dimensions, newstrides); - if (i == 0) { - strides = newstrides; - } - flags = self->flags; - - if (strides == NULL) { - /* - * we are really re-shaping not just adding ones to the shape somewhere - * fix any -1 dimensions and check new-dimensions against old size - */ - if (_fix_unknown_dimension(newdims, PyArray_SIZE(self)) < 0) { - return NULL; - } - /* - * sometimes we have to create a new copy of the array - * in order to get the right orientation and - * because we can't just re-use the buffer with the - * data in the order it is in. - */ - if (!(PyArray_ISONESEGMENT(self)) || - (((PyArray_CHKFLAGS(self, NPY_CONTIGUOUS) && - order == NPY_FORTRANORDER) || - (PyArray_CHKFLAGS(self, NPY_FORTRAN) && - order == NPY_CORDER)) && (self->nd > 1))) { - int success = 0; - success = _attempt_nocopy_reshape(self,n,dimensions, - newstrides,order); - if (success) { - /* no need to copy the array after all */ - strides = newstrides; - flags = self->flags; - } - else { - PyObject *new; - new = PyArray_NewCopy(self, order); - if (new == NULL) { - return NULL; - } - incref = FALSE; - self = (PyArrayObject *)new; - flags = self->flags; - } - } - - /* We always have to interpret the contiguous buffer correctly */ - - /* Make sure the flags argument is set. */ - if (n > 1) { - if (order == NPY_FORTRANORDER) { - flags &= ~NPY_CONTIGUOUS; - flags |= NPY_FORTRAN; - } - else { - flags &= ~NPY_FORTRAN; - flags |= NPY_CONTIGUOUS; - } - } - } - else if (n > 0) { - /* - * replace any 0-valued strides with - * appropriate value to preserve contiguousness - */ - if (order == NPY_FORTRANORDER) { - if (strides[0] == 0) { - strides[0] = self->descr->elsize; - } - for (i = 1; i < n; i++) { - if (strides[i] == 0) { - strides[i] = strides[i-1] * dimensions[i-1]; - } - } - } - else { - if (strides[n-1] == 0) { - strides[n-1] = self->descr->elsize; - } - for (i = n - 2; i > -1; i--) { - if (strides[i] == 0) { - strides[i] = strides[i+1] * dimensions[i+1]; - } - } - } - } - - Py_INCREF(self->descr); - ret = (PyAO *)PyArray_NewFromDescr(Py_TYPE(self), - self->descr, - n, dimensions, - strides, - self->data, - flags, (PyObject *)self); - - if (ret == NULL) { - goto fail; - } - if (incref) { - Py_INCREF(self); - } - ret->base = (PyObject *)self; - PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - return (PyObject *)ret; - - fail: - if (!incref) { - Py_DECREF(self); - } - return NULL; -} - - - -/* For back-ward compatability -- Not recommended */ - -/*NUMPY_API - * Reshape - */ -NPY_NO_EXPORT PyObject * -PyArray_Reshape(PyArrayObject *self, PyObject *shape) -{ - PyObject *ret; - PyArray_Dims newdims; - - if (!PyArray_IntpConverter(shape, &newdims)) { - return NULL; - } - ret = PyArray_Newshape(self, &newdims, PyArray_CORDER); - PyDimMem_FREE(newdims.ptr); - return ret; -} - -/* inserts 0 for strides where dimension will be 1 */ -static int -_check_ones(PyArrayObject *self, int newnd, intp* newdims, intp *strides) -{ - int nd; - intp *dims; - Bool done=FALSE; - int j, k; - - nd = self->nd; - dims = self->dimensions; - - for (k = 0, j = 0; !done && (j < nd || k < newnd);) { - if ((jstrides[j]; - j++; - k++; - } - else if ((k < newnd) && (newdims[k] == 1)) { - strides[k] = 0; - k++; - } - else if ((jelsize); - } - else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - return; - } - _putzero(optr + offset, zero, new); - } - } - else { - Py_INCREF(zero); - NPY_COPY_PYOBJECT_PTR(optr, &zero); - } - return; -} - - -/* - * attempt to reshape an array without copying data - * - * This function should correctly handle all reshapes, including - * axes of length 1. Zero strides should work but are untested. - * - * If a copy is needed, returns 0 - * If no copy is needed, returns 1 and fills newstrides - * with appropriate strides - * - * The "fortran" argument describes how the array should be viewed - * during the reshape, not how it is stored in memory (that - * information is in self->strides). - * - * If some output dimensions have length 1, the strides assigned to - * them are arbitrary. In the current implementation, they are the - * stride of the next-fastest index. - */ -static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, intp* newdims, - intp *newstrides, int fortran) -{ - int oldnd; - intp olddims[MAX_DIMS]; - intp oldstrides[MAX_DIMS]; - int oi, oj, ok, ni, nj, nk; - int np, op; - - oldnd = 0; - for (oi = 0; oi < self->nd; oi++) { - if (self->dimensions[oi]!= 1) { - olddims[oldnd] = self->dimensions[oi]; - oldstrides[oldnd] = self->strides[oi]; - oldnd++; - } - } - - /* - fprintf(stderr, "_attempt_nocopy_reshape( ("); - for (oi=0; oi ("); - for (ni=0; ni ni; nk--) { - newstrides[nk - 1] = newstrides[nk]*newdims[nk]; - } - } - ni = nj++; - oi = oj++; - } - - /* - fprintf(stderr, "success: _attempt_nocopy_reshape ("); - for (oi=0; oi ("); - for (ni=0; niptr; - n = newshape->len; - s_known = 1; - i_unknown = -1; - - for (i = 0; i < n; i++) { - if (dimensions[i] < 0) { - if (i_unknown == -1) { - i_unknown = i; - } - else { - PyErr_SetString(PyExc_ValueError, - "can only specify one" \ - " unknown dimension"); - return -1; - } - } - else { - s_known *= dimensions[i]; - } - } - - if (i_unknown >= 0) { - if ((s_known == 0) || (s_original % s_known != 0)) { - PyErr_SetString(PyExc_ValueError, msg); - return -1; - } - dimensions[i_unknown] = s_original/s_known; - } - else { - if (s_original != s_known) { - PyErr_SetString(PyExc_ValueError, msg); - return -1; - } - } - return 0; -} - -/*NUMPY_API - * - * return a new view of the array object with all of its unit-length - * dimensions squeezed out if needed, otherwise - * return the same array. - */ -NPY_NO_EXPORT PyObject * -PyArray_Squeeze(PyArrayObject *self) -{ - int nd = self->nd; - int newnd = nd; - intp dimensions[MAX_DIMS]; - intp strides[MAX_DIMS]; - int i, j; - PyObject *ret; - - if (nd == 0) { - Py_INCREF(self); - return (PyObject *)self; - } - for (j = 0, i = 0; i < nd; i++) { - if (self->dimensions[i] == 1) { - newnd -= 1; - } - else { - dimensions[j] = self->dimensions[i]; - strides[j++] = self->strides[i]; - } - } - - Py_INCREF(self->descr); - ret = PyArray_NewFromDescr(Py_TYPE(self), - self->descr, - newnd, dimensions, - strides, self->data, - self->flags, - (PyObject *)self); - if (ret == NULL) { - return NULL; - } - PyArray_FLAGS(ret) &= ~OWNDATA; - PyArray_BASE(ret) = (PyObject *)self; - Py_INCREF(self); - return (PyObject *)ret; -} - -/*NUMPY_API - * SwapAxes - */ -NPY_NO_EXPORT PyObject * -PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) -{ - PyArray_Dims new_axes; - intp dims[MAX_DIMS]; - int n, i, val; - PyObject *ret; - - if (a1 == a2) { - Py_INCREF(ap); - return (PyObject *)ap; - } - - n = ap->nd; - if (n <= 1) { - Py_INCREF(ap); - return (PyObject *)ap; - } - - if (a1 < 0) { - a1 += n; - } - if (a2 < 0) { - a2 += n; - } - if ((a1 < 0) || (a1 >= n)) { - PyErr_SetString(PyExc_ValueError, - "bad axis1 argument to swapaxes"); - return NULL; - } - if ((a2 < 0) || (a2 >= n)) { - PyErr_SetString(PyExc_ValueError, - "bad axis2 argument to swapaxes"); - return NULL; - } - new_axes.ptr = dims; - new_axes.len = n; - - for (i = 0; i < n; i++) { - if (i == a1) { - val = a2; - } - else if (i == a2) { - val = a1; - } - else { - val = i; - } - new_axes.ptr[i] = val; - } - ret = PyArray_Transpose(ap, &new_axes); - return ret; -} - -/*NUMPY_API - * Return Transpose. - */ -NPY_NO_EXPORT PyObject * -PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute) -{ - intp *axes, axis; - intp i, n; - intp permutation[MAX_DIMS], reverse_permutation[MAX_DIMS]; - PyArrayObject *ret = NULL; - - if (permute == NULL) { - n = ap->nd; - for (i = 0; i < n; i++) { - permutation[i] = n-1-i; - } - } - else { - n = permute->len; - axes = permute->ptr; - if (n != ap->nd) { - PyErr_SetString(PyExc_ValueError, - "axes don't match array"); - return NULL; - } - for (i = 0; i < n; i++) { - reverse_permutation[i] = -1; - } - for (i = 0; i < n; i++) { - axis = axes[i]; - if (axis < 0) { - axis = ap->nd + axis; - } - if (axis < 0 || axis >= ap->nd) { - PyErr_SetString(PyExc_ValueError, - "invalid axis for this array"); - return NULL; - } - if (reverse_permutation[axis] != -1) { - PyErr_SetString(PyExc_ValueError, - "repeated axis in transpose"); - return NULL; - } - reverse_permutation[axis] = i; - permutation[i] = axis; - } - for (i = 0; i < n; i++) { - } - } - - /* - * this allocates memory for dimensions and strides (but fills them - * incorrectly), sets up descr, and points data at ap->data. - */ - Py_INCREF(ap->descr); - ret = (PyArrayObject *)\ - PyArray_NewFromDescr(Py_TYPE(ap), - ap->descr, - n, ap->dimensions, - NULL, ap->data, ap->flags, - (PyObject *)ap); - if (ret == NULL) { - return NULL; - } - /* point at true owner of memory: */ - ret->base = (PyObject *)ap; - Py_INCREF(ap); - - /* fix the dimensions and strides of the return-array */ - for (i = 0; i < n; i++) { - ret->dimensions[i] = ap->dimensions[permutation[i]]; - ret->strides[i] = ap->strides[permutation[i]]; - } - PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - return (PyObject *)ret; -} - -/* - * Sorts items so stride is descending, because C-order - * is the default in the face of ambiguity. - */ -int _npy_stride_sort_item_comparator(const void *a, const void *b) -{ - npy_intp astride = ((_npy_stride_sort_item *)a)->stride, - bstride = ((_npy_stride_sort_item *)b)->stride; - - /* Sort the absolute value of the strides */ - if (astride < 0) { - astride = -astride; - } - if (bstride < 0) { - bstride = -bstride; - } - - if (astride > bstride) { - return -1; - } - else if (astride == bstride) { - /* - * Make the qsort stable by next comparing the perm order. - * (Note that two perm entries will never be equal) - */ - npy_intp aperm = ((_npy_stride_sort_item *)a)->perm, - bperm = ((_npy_stride_sort_item *)b)->perm; - return (aperm < bperm) ? -1 : 1; - } - else { - return 1; - } -} - -/* - * This function populates the first PyArray_NDIM(arr) elements - * of strideperm with sorted descending by their absolute values. - * For example, the stride array (4, -2, 12) becomes - * [(2, 12), (0, 4), (1, -2)]. - */ -NPY_NO_EXPORT void -PyArray_CreateSortedStridePerm(PyArrayObject *arr, - _npy_stride_sort_item *strideperm) -{ - int i, ndim = PyArray_NDIM(arr); - - /* Set up the strideperm values */ - for (i = 0; i < ndim; ++i) { - strideperm[i].perm = i; - strideperm[i].stride = PyArray_STRIDE(arr, i); - } - - /* Sort them */ - qsort(strideperm, ndim, sizeof(_npy_stride_sort_item), - &_npy_stride_sort_item_comparator); -} - -/*NUMPY_API - * Ravel - * Returns a contiguous array - */ -NPY_NO_EXPORT PyObject * -PyArray_Ravel(PyArrayObject *a, NPY_ORDER order) -{ - PyArray_Dims newdim = {NULL,1}; - intp val[1] = {-1}; - - newdim.ptr = val; - - if (order == NPY_ANYORDER) { - order = PyArray_ISFORTRAN(a) ? NPY_FORTRANORDER : NPY_CORDER; - } - else if (order == NPY_KEEPORDER) { - if (PyArray_IS_C_CONTIGUOUS(a)) { - order = NPY_CORDER; - } - else if (PyArray_IS_F_CONTIGUOUS(a)) { - order = NPY_FORTRANORDER; - } - } - - if (order == NPY_CORDER && PyArray_ISCONTIGUOUS(a)) { - return PyArray_Newshape(a, &newdim, NPY_CORDER); - } - else if (order == NPY_FORTRANORDER && PyArray_ISFORTRAN(a)) { - return PyArray_Newshape(a, &newdim, NPY_FORTRANORDER); - } - /* For KEEPORDER, check if we can make a flattened view */ - else if (order == NPY_KEEPORDER) { - _npy_stride_sort_item strideperm[NPY_MAXDIMS]; - npy_intp stride; - int i, ndim = PyArray_NDIM(a); - - PyArray_CreateSortedStridePerm(a, strideperm); - - stride = PyArray_DESCR(a)->elsize; - for (i = ndim-1; i >= 0; --i) { - if (strideperm[i].stride != stride) { - break; - } - stride *= PyArray_DIM(a, strideperm[i].perm); - } - - /* If all the strides matched a contiguous layout, return a view */ - if (i < 0) { - PyObject *ret; - npy_intp stride = PyArray_DESCR(a)->elsize; - - val[0] = PyArray_SIZE(a); - - Py_INCREF(PyArray_DESCR(a)); - ret = PyArray_NewFromDescr(Py_TYPE(a), - PyArray_DESCR(a), - 1, val, - &stride, - PyArray_BYTES(a), - PyArray_FLAGS(a), - (PyObject *)a); - - if (ret != NULL) { - PyArray_UpdateFlags((PyArrayObject *)ret, - NPY_CONTIGUOUS|NPY_FORTRAN); - Py_INCREF(a); - PyArray_BASE(ret) = (PyObject *)a; - } - return ret; - } - - } - - return PyArray_Flatten(a, order); -} - -/*NUMPY_API - * Flatten - */ -NPY_NO_EXPORT PyObject * -PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) -{ - PyArrayObject *ret; - intp size; - - if (order == NPY_ANYORDER) { - order = PyArray_ISFORTRAN(a) ? NPY_FORTRANORDER : NPY_CORDER; - } - - size = PyArray_SIZE(a); - Py_INCREF(a->descr); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(a), - a->descr, - 1, &size, - NULL, - NULL, - 0, (PyObject *)a); - - if (ret == NULL) { - return NULL; - } - if (PyArray_CopyAnyIntoOrdered(ret, a, order) < 0) { - Py_DECREF(ret); - return NULL; - } - return (PyObject *)ret; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/shape.h b/numpy-1.6.2/numpy/core/src/multiarray/shape.h deleted file mode 100644 index 8038a9f25f..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/shape.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _NPY_ARRAY_SHAPE_H_ -#define _NPY_ARRAY_SHAPE_H_ - -typedef struct { - npy_intp perm, stride; -} _npy_stride_sort_item; - -/* - * This function populates the first PyArray_NDIM(arr) elements - * of strideperm with sorted descending by their absolute values. - * For example, the stride array (4, -2, 12) becomes - * [(2, 12), (0, 4), (1, -2)]. - */ -NPY_NO_EXPORT void -PyArray_CreateSortedStridePerm(PyArrayObject *arr, - _npy_stride_sort_item *strideperm); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/ucsnarrow.c b/numpy-1.6.2/numpy/core/src/multiarray/ucsnarrow.c deleted file mode 100644 index fa6ca7d299..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/ucsnarrow.c +++ /dev/null @@ -1,170 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include - -#include -#include - -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/npy_math.h" - -#include "npy_config.h" - -#include "numpy/npy_3kcompat.h" - -/* Functions only needed on narrow builds of Python - for converting back and forth between the NumPy Unicode data-type - (always 4-byte) - and the Python Unicode scalar (2-bytes on a narrow build). -*/ - -/* the ucs2 buffer must be large enough to hold 2*ucs4length characters - due to the use of surrogate pairs. - - The return value is the number of ucs2 bytes used-up which - is ucs4length + number of surrogate pairs found. - - values above 0xffff are converted to surrogate pairs. -*/ -NPY_NO_EXPORT int -PyUCS2Buffer_FromUCS4(Py_UNICODE *ucs2, PyArray_UCS4 *ucs4, int ucs4length) -{ - int i; - int numucs2 = 0; - PyArray_UCS4 chr; - for (i=0; i 0xffff) { - numucs2++; - chr -= 0x10000L; - *ucs2++ = 0xD800 + (Py_UNICODE) (chr >> 10); - *ucs2++ = 0xDC00 + (Py_UNICODE) (chr & 0x03FF); - } - else { - *ucs2++ = (Py_UNICODE) chr; - } - numucs2++; - } - return numucs2; -} - - -/* This converts a UCS2 buffer of the given length to UCS4 buffer. - It converts up to ucs4len characters of UCS2 - - It returns the number of characters converted which can - be less than ucs2len if there are surrogate pairs in ucs2. - - The return value is the actual size of the used part of the ucs4 buffer. -*/ - -NPY_NO_EXPORT int -PyUCS2Buffer_AsUCS4(Py_UNICODE *ucs2, PyArray_UCS4 *ucs4, int ucs2len, int ucs4len) -{ - int i; - PyArray_UCS4 chr; - Py_UNICODE ch; - int numchars=0; - - for (i=0; (i < ucs2len) && (numchars < ucs4len); i++) { - ch = *ucs2++; - if (ch >= 0xd800 && ch <= 0xdfff) { - /* surrogate pair */ - chr = ((PyArray_UCS4)(ch-0xd800)) << 10; - chr += *ucs2++ + 0x2400; /* -0xdc00 + 0x10000 */ - i++; - } - else { - chr = (PyArray_UCS4) ch; - } - *ucs4++ = chr; - numchars++; - } - return numchars; -} - -/* - * Returns a PyUnicodeObject initialized from a buffer containing - * UCS4 unicode. - * - * Parameters - * ---------- - * src: char * - * Pointer to buffer containing UCS4 unicode. - * size: Py_ssize_t - * Size of buffer in bytes. - * swap: int - * If true, the data will be swapped. - * align: int - * If true, the data will be aligned. - * - * Returns - * ------- - * new_reference: PyUnicodeObject - */ -NPY_NO_EXPORT PyUnicodeObject * -PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align) -{ - Py_ssize_t ucs4len = size / sizeof(npy_ucs4); - npy_ucs4 *buf = (npy_ucs4 *)src; - int alloc = 0; - PyUnicodeObject *ret; - - /* swap and align if needed */ - if (swap || align) { - buf = (npy_ucs4 *)malloc(size); - if (buf == NULL) { - PyErr_NoMemory(); - goto fail; - } - alloc = 1; - memcpy(buf, src, size); - if (swap) { - byte_swap_vector(buf, ucs4len, sizeof(npy_ucs4)); - } - } - - /* trim trailing zeros */ - while (ucs4len > 0 && buf[ucs4len - 1] == 0) { - ucs4len--; - } - - /* produce PyUnicode object */ -#ifdef Py_UNICODE_WIDE - { - ret = (PyUnicodeObject *)PyUnicode_FromUnicode(buf, (Py_ssize_t) ucs4len); - if (ret == NULL) { - goto fail; - } - } -#else - { - Py_ssize_t tmpsiz = 2 * sizeof(Py_UNICODE) * ucs4len; - Py_ssize_t ucs2len; - Py_UNICODE *tmp; - - if ((tmp = (Py_UNICODE *)malloc(tmpsiz)) == NULL) { - PyErr_NoMemory(); - goto fail; - } - ucs2len = PyUCS2Buffer_FromUCS4(tmp, buf, ucs4len); - ret = (PyUnicodeObject *)PyUnicode_FromUnicode(tmp, (Py_ssize_t) ucs2len); - free(tmp); - if (ret == NULL) { - goto fail; - } - } -#endif - - if (alloc) { - free(buf); - } - return ret; - -fail: - if (alloc) { - free(buf); - } - return NULL; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/ucsnarrow.h b/numpy-1.6.2/numpy/core/src/multiarray/ucsnarrow.h deleted file mode 100644 index 2866f3332a..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/ucsnarrow.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _NPY_UCSNARROW_H_ -#define _NPY_UCSNARROW_H_ - -NPY_NO_EXPORT int -PyUCS2Buffer_FromUCS4(Py_UNICODE *ucs2, PyArray_UCS4 *ucs4, int ucs4length); - -NPY_NO_EXPORT int -PyUCS2Buffer_AsUCS4(Py_UNICODE *ucs2, PyArray_UCS4 *ucs4, int ucs2len, int ucs4len); - -NPY_NO_EXPORT PyUnicodeObject * -PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/multiarray/usertypes.c b/numpy-1.6.2/numpy/core/src/multiarray/usertypes.c deleted file mode 100644 index 204afa6184..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/usertypes.c +++ /dev/null @@ -1,275 +0,0 @@ -/* - Provide multidimensional arrays as a basic object type in python. - - Based on Original Numeric implementation - Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu - - with contributions from many Numeric Python developers 1995-2004 - - Heavily modified in 2005 with inspiration from Numarray - - by - - Travis Oliphant, oliphant@ee.byu.edu - Brigham Young Univeristy - - -maintainer email: oliphant.travis@ieee.org - - Numarray design (which provided guidance) by - Space Science Telescope Institute - (J. Todd Miller, Perry Greenfield, Rick White) -*/ -#define PY_SSIZE_T_CLEAN -#include -#include "structmember.h" - -/*#include */ -#define _MULTIARRAYMODULE -#define NPY_NO_PREFIX -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - -#include "npy_config.h" - -#include "common.h" - -#include "numpy/npy_3kcompat.h" - -#include "usertypes.h" - -NPY_NO_EXPORT PyArray_Descr **userdescrs=NULL; - -static int * -_append_new(int *types, int insert) -{ - int n = 0; - int *newtypes; - - while (types[n] != PyArray_NOTYPE) { - n++; - } - newtypes = (int *)realloc(types, (n + 2)*sizeof(int)); - newtypes[n] = insert; - newtypes[n + 1] = PyArray_NOTYPE; - return newtypes; -} - -static Bool -_default_nonzero(void *ip, void *arr) -{ - int elsize = PyArray_ITEMSIZE(arr); - char *ptr = ip; - while (elsize--) { - if (*ptr++ != 0) { - return TRUE; - } - } - return FALSE; -} - -static void -_default_copyswapn(void *dst, npy_intp dstride, void *src, - npy_intp sstride, npy_intp n, int swap, void *arr) -{ - npy_intp i; - PyArray_CopySwapFunc *copyswap; - char *dstptr = dst; - char *srcptr = src; - - copyswap = PyArray_DESCR(arr)->f->copyswap; - - for (i = 0; i < n; i++) { - copyswap(dstptr, srcptr, swap, arr); - dstptr += dstride; - srcptr += sstride; - } -} - -/*NUMPY_API - Initialize arrfuncs to NULL -*/ -NPY_NO_EXPORT void -PyArray_InitArrFuncs(PyArray_ArrFuncs *f) -{ - int i; - - for(i = 0; i < NPY_NTYPES_ABI_COMPATIBLE; i++) { - f->cast[i] = NULL; - } - f->getitem = NULL; - f->setitem = NULL; - f->copyswapn = NULL; - f->copyswap = NULL; - f->compare = NULL; - f->argmax = NULL; - f->argmin = NULL; - f->dotfunc = NULL; - f->scanfunc = NULL; - f->fromstr = NULL; - f->nonzero = NULL; - f->fill = NULL; - f->fillwithscalar = NULL; - for(i = 0; i < PyArray_NSORTS; i++) { - f->sort[i] = NULL; - f->argsort[i] = NULL; - } - f->castdict = NULL; - f->scalarkind = NULL; - f->cancastscalarkindto = NULL; - f->cancastto = NULL; -} - -/* - returns typenum to associate with this type >=PyArray_USERDEF. - needs the userdecrs table and PyArray_NUMUSER variables - defined in arraytypes.inc -*/ -/*NUMPY_API - Register Data type - Does not change the reference count of descr -*/ -NPY_NO_EXPORT int -PyArray_RegisterDataType(PyArray_Descr *descr) -{ - PyArray_Descr *descr2; - int typenum; - int i; - PyArray_ArrFuncs *f; - - /* See if this type is already registered */ - for (i = 0; i < NPY_NUMUSERTYPES; i++) { - descr2 = userdescrs[i]; - if (descr2 == descr) { - return descr->type_num; - } - } - typenum = PyArray_USERDEF + NPY_NUMUSERTYPES; - descr->type_num = typenum; - if (descr->elsize == 0) { - PyErr_SetString(PyExc_ValueError, "cannot register a" \ - "flexible data-type"); - return -1; - } - f = descr->f; - if (f->nonzero == NULL) { - f->nonzero = _default_nonzero; - } - if (f->copyswapn == NULL) { - f->copyswapn = _default_copyswapn; - } - if (f->copyswap == NULL || f->getitem == NULL || - f->setitem == NULL) { - PyErr_SetString(PyExc_ValueError, "a required array function" \ - " is missing."); - return -1; - } - if (descr->typeobj == NULL) { - PyErr_SetString(PyExc_ValueError, "missing typeobject"); - return -1; - } - userdescrs = realloc(userdescrs, - (NPY_NUMUSERTYPES+1)*sizeof(void *)); - if (userdescrs == NULL) { - PyErr_SetString(PyExc_MemoryError, "RegisterDataType"); - return -1; - } - userdescrs[NPY_NUMUSERTYPES++] = descr; - return typenum; -} - -/*NUMPY_API - Register Casting Function - Replaces any function currently stored. -*/ -NPY_NO_EXPORT int -PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype, - PyArray_VectorUnaryFunc *castfunc) -{ - PyObject *cobj, *key; - int ret; - - if (totype < NPY_NTYPES_ABI_COMPATIBLE) { - descr->f->cast[totype] = castfunc; - return 0; - } - if (totype >= NPY_NTYPES && !PyTypeNum_ISUSERDEF(totype)) { - PyErr_SetString(PyExc_TypeError, "invalid type number."); - return -1; - } - if (descr->f->castdict == NULL) { - descr->f->castdict = PyDict_New(); - if (descr->f->castdict == NULL) { - return -1; - } - } - key = PyInt_FromLong(totype); - if (PyErr_Occurred()) { - return -1; - } - cobj = NpyCapsule_FromVoidPtr((void *)castfunc, NULL); - if (cobj == NULL) { - Py_DECREF(key); - return -1; - } - ret = PyDict_SetItem(descr->f->castdict, key, cobj); - Py_DECREF(key); - Py_DECREF(cobj); - return ret; -} - -/*NUMPY_API - * Register a type number indicating that a descriptor can be cast - * to it safely - */ -NPY_NO_EXPORT int -PyArray_RegisterCanCast(PyArray_Descr *descr, int totype, - NPY_SCALARKIND scalar) -{ - /* - * If we were to allow this, the casting lookup table for - * built-in types needs to be modified, as cancastto is - * not checked for them. - */ - if (!PyTypeNum_ISUSERDEF(descr->type_num) && - !PyTypeNum_ISUSERDEF(totype)) { - PyErr_SetString(PyExc_ValueError, - "At least one of the types provided to" - "RegisterCanCast must be user-defined."); - return -1; - } - - if (scalar == PyArray_NOSCALAR) { - /* - * register with cancastto - * These lists won't be freed once created - * -- they become part of the data-type - */ - if (descr->f->cancastto == NULL) { - descr->f->cancastto = (int *)malloc(1*sizeof(int)); - descr->f->cancastto[0] = PyArray_NOTYPE; - } - descr->f->cancastto = _append_new(descr->f->cancastto, - totype); - } - else { - /* register with cancastscalarkindto */ - if (descr->f->cancastscalarkindto == NULL) { - int i; - descr->f->cancastscalarkindto = - (int **)malloc(PyArray_NSCALARKINDS* sizeof(int*)); - for (i = 0; i < PyArray_NSCALARKINDS; i++) { - descr->f->cancastscalarkindto[i] = NULL; - } - } - if (descr->f->cancastscalarkindto[scalar] == NULL) { - descr->f->cancastscalarkindto[scalar] = - (int *)malloc(1*sizeof(int)); - descr->f->cancastscalarkindto[scalar][0] = - PyArray_NOTYPE; - } - descr->f->cancastscalarkindto[scalar] = - _append_new(descr->f->cancastscalarkindto[scalar], totype); - } - return 0; -} diff --git a/numpy-1.6.2/numpy/core/src/multiarray/usertypes.h b/numpy-1.6.2/numpy/core/src/multiarray/usertypes.h deleted file mode 100644 index 51f6a8720c..0000000000 --- a/numpy-1.6.2/numpy/core/src/multiarray/usertypes.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _NPY_PRIVATE_USERTYPES_H_ -#define _NPY_PRIVATE_USERTYPES_H_ - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyArray_Descr **userdescrs; -#else -NPY_NO_EXPORT PyArray_Descr **userdescrs; -#endif - -NPY_NO_EXPORT void -PyArray_InitArrFuncs(PyArray_ArrFuncs *f); - -NPY_NO_EXPORT int -PyArray_RegisterCanCast(PyArray_Descr *descr, int totype, - NPY_SCALARKIND scalar); - -NPY_NO_EXPORT int -PyArray_RegisterDataType(PyArray_Descr *descr); - -NPY_NO_EXPORT int -PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype, - PyArray_VectorUnaryFunc *castfunc); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/npymath/_signbit.c b/numpy-1.6.2/numpy/core/src/npymath/_signbit.c deleted file mode 100644 index a2ad381627..0000000000 --- a/numpy-1.6.2/numpy/core/src/npymath/_signbit.c +++ /dev/null @@ -1,32 +0,0 @@ -/* Adapted from cephes */ - -int -_npy_signbit_d(double x) -{ - union - { - double d; - short s[4]; - int i[2]; - } u; - - u.d = x; - -#if SIZEOF_INT == 4 - -#ifdef WORDS_BIGENDIAN /* defined in pyconfig.h */ - return u.i[0] < 0; -#else - return u.i[1] < 0; -#endif - -#else /* SIZEOF_INT != 4 */ - -#ifdef WORDS_BIGENDIAN - return u.s[0] < 0; -#else - return u.s[3] < 0; -#endif - -#endif /* SIZEOF_INT */ -} diff --git a/numpy-1.6.2/numpy/core/src/npymath/halffloat.c b/numpy-1.6.2/numpy/core/src/npymath/halffloat.c deleted file mode 100644 index a49e3b02ec..0000000000 --- a/numpy-1.6.2/numpy/core/src/npymath/halffloat.c +++ /dev/null @@ -1,529 +0,0 @@ -#include "numpy/halffloat.h" - -/* - * This chooses between 'ties to even' and 'ties away from zero'. - */ -#define NPY_HALF_ROUND_TIES_TO_EVEN 1 -/* - * If these are 1, the conversions try to trigger underflow, - * overflow, and invalid exceptions in the FP system when needed. - */ -#define NPY_HALF_GENERATE_OVERFLOW 1 -#define NPY_HALF_GENERATE_UNDERFLOW 1 -#define NPY_HALF_GENERATE_INVALID 1 - -/* - ******************************************************************** - * HALF-PRECISION ROUTINES * - ******************************************************************** - */ - -float npy_half_to_float(npy_half h) -{ - union { float ret; npy_uint32 retbits; } conv; - conv.retbits = npy_halfbits_to_floatbits(h); - return conv.ret; -} - -double npy_half_to_double(npy_half h) -{ - union { double ret; npy_uint64 retbits; } conv; - conv.retbits = npy_halfbits_to_doublebits(h); - return conv.ret; -} - -npy_half npy_float_to_half(float f) -{ - union { float f; npy_uint32 fbits; } conv; - conv.f = f; - return npy_floatbits_to_halfbits(conv.fbits); -} - -npy_half npy_double_to_half(double d) -{ - union { double d; npy_uint64 dbits; } conv; - conv.d = d; - return npy_doublebits_to_halfbits(conv.dbits); -} - -int npy_half_iszero(npy_half h) -{ - return (h&0x7fff) == 0; -} - -int npy_half_isnan(npy_half h) -{ - return ((h&0x7c00u) == 0x7c00u) && ((h&0x03ffu) != 0x0000u); -} - -int npy_half_isinf(npy_half h) -{ - return ((h&0x7fffu) == 0x7c00u); -} - -int npy_half_isfinite(npy_half h) -{ - return ((h&0x7c00u) != 0x7c00u); -} - -int npy_half_signbit(npy_half h) -{ - return (h&0x8000u) != 0; -} - -npy_half npy_half_spacing(npy_half h) -{ - npy_half ret; - npy_uint16 h_exp = h&0x7c00u; - npy_uint16 h_sig = h&0x03ffu; - if (h_exp == 0x7c00u) { -#if NPY_HALF_GENERATE_INVALID - npy_set_floatstatus_invalid(); -#endif - ret = NPY_HALF_NAN; - } else if (h == 0x7bffu) { -#if NPY_HALF_GENERATE_OVERFLOW - npy_set_floatstatus_overflow(); -#endif - ret = NPY_HALF_PINF; - } else if ((h&0x8000u) && h_sig == 0) { /* Negative boundary case */ - if (h_exp > 0x2c00u) { /* If result is normalized */ - ret = h_exp - 0x2c00u; - } else if(h_exp > 0x0400u) { /* The result is a subnormal, but not the smallest */ - ret = 1 << ((h_exp >> 10) - 2); - } else { - ret = 0x0001u; /* Smallest subnormal half */ - } - } else if (h_exp > 0x2800u) { /* If result is still normalized */ - ret = h_exp - 0x2800u; - } else if (h_exp > 0x0400u) { /* The result is a subnormal, but not the smallest */ - ret = 1 << ((h_exp >> 10) - 1); - } else { - ret = 0x0001u; - } - - return ret; -} - -npy_half npy_half_copysign(npy_half x, npy_half y) -{ - return (x&0x7fffu) | (y&0x8000u); -} - -npy_half npy_half_nextafter(npy_half x, npy_half y) -{ - npy_half ret; - - if (!npy_half_isfinite(x) || npy_half_isnan(y)) { -#if NPY_HALF_GENERATE_INVALID - npy_set_floatstatus_invalid(); -#endif - ret = NPY_HALF_NAN; - } else if (npy_half_eq_nonan(x, y)) { - ret = x; - } else if (npy_half_iszero(x)) { - ret = (y&0x8000u) + 1; /* Smallest subnormal half */ - } else if (!(x&0x8000u)) { /* x > 0 */ - if ((npy_int16)x > (npy_int16)y) { /* x > y */ - ret = x-1; - } else { - ret = x+1; - } - } else { - if (!(y&0x8000u) || (x&0x7fffu) > (y&0x7fffu)) { /* x < y */ - ret = x-1; - } else { - ret = x+1; - } - } -#ifdef NPY_HALF_GENERATE_OVERFLOW - if (npy_half_isinf(ret)) { - npy_set_floatstatus_overflow(); - } -#endif - - return ret; -} - -int npy_half_eq_nonan(npy_half h1, npy_half h2) -{ - return (h1 == h2 || ((h1 | h2) & 0x7fff) == 0); -} - -int npy_half_eq(npy_half h1, npy_half h2) -{ - /* - * The equality cases are as follows: - * - If either value is NaN, never equal. - * - If the values are equal, equal. - * - If the values are both signed zeros, equal. - */ - return (!npy_half_isnan(h1) && !npy_half_isnan(h2)) && - (h1 == h2 || ((h1 | h2) & 0x7fff) == 0); -} - -int npy_half_ne(npy_half h1, npy_half h2) -{ - return !npy_half_eq(h1, h2); -} - -int npy_half_lt_nonan(npy_half h1, npy_half h2) -{ - if (h1&0x8000u) { - if (h2&0x8000u) { - return (h1&0x7fffu) > (h2&0x7fffu); - } else { - /* Signed zeros are equal, have to check for it */ - return (h1 != 0x8000u) || (h2 != 0x0000u); - } - } else { - if (h2&0x8000u) { - return 0; - } else { - return (h1&0x7fffu) < (h2&0x7fffu); - } - } -} - -int npy_half_lt(npy_half h1, npy_half h2) -{ - return (!npy_half_isnan(h1) && !npy_half_isnan(h2)) && npy_half_lt_nonan(h1, h2); -} - -int npy_half_gt(npy_half h1, npy_half h2) -{ - return npy_half_lt(h2, h1); -} - -int npy_half_le_nonan(npy_half h1, npy_half h2) -{ - if (h1&0x8000u) { - if (h2&0x8000u) { - return (h1&0x7fffu) >= (h2&0x7fffu); - } else { - return 1; - } - } else { - if (h2&0x8000u) { - /* Signed zeros are equal, have to check for it */ - return (h1 == 0x0000u) && (h2 == 0x8000u); - } else { - return (h1&0x7fffu) <= (h2&0x7fffu); - } - } -} - -int npy_half_le(npy_half h1, npy_half h2) -{ - return (!npy_half_isnan(h1) && !npy_half_isnan(h2)) && npy_half_le_nonan(h1, h2); -} - -int npy_half_ge(npy_half h1, npy_half h2) -{ - return npy_half_le(h2, h1); -} - - - -/* - ******************************************************************** - * BIT-LEVEL CONVERSIONS * - ******************************************************************** - */ - -npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) -{ - npy_uint32 f_exp, f_sig; - npy_uint16 h_sgn, h_exp, h_sig; - - h_sgn = (npy_uint16) ((f&0x80000000u) >> 16); - f_exp = (f&0x7f800000u); - - /* Exponent overflow/NaN converts to signed inf/NaN */ - if (f_exp >= 0x47800000u) { - if (f_exp == 0x7f800000u) { - /* Inf or NaN */ - f_sig = (f&0x007fffffu); - if (f_sig != 0) { - /* NaN - propagate the flag in the significand... */ - npy_uint16 ret = (npy_uint16) (0x7c00u + (f_sig >> 13)); - /* ...but make sure it stays a NaN */ - if (ret == 0x7c00u) { - ret++; - } - return h_sgn + ret; - } else { - /* signed inf */ - return (npy_uint16) (h_sgn + 0x7c00u); - } - } else { - /* overflow to signed inf */ -#if NPY_HALF_GENERATE_OVERFLOW - npy_set_floatstatus_overflow(); -#endif - return (npy_uint16) (h_sgn + 0x7c00u); - } - } - - /* Exponent underflow converts to a subnormal half or signed zero */ - if (f_exp <= 0x38000000u) { - /* - * Signed zeros, subnormal floats, and floats with small - * exponents all convert to signed zero halfs. - */ - if (f_exp < 0x33000000u) { -#if NPY_HALF_GENERATE_UNDERFLOW - /* If f != 0, it underflowed to 0 */ - if ((f&0x7fffffff) != 0) { - npy_set_floatstatus_underflow(); - } -#endif - return h_sgn; - } - /* Make the subnormal significand */ - f_exp >>= 23; - f_sig = (0x00800000u + (f&0x007fffffu)); -#if NPY_HALF_GENERATE_UNDERFLOW - /* If it's not exactly represented, it underflowed */ - if ((f_sig&(((npy_uint32)1 << (126 - f_exp)) - 1)) != 0) { - npy_set_floatstatus_underflow(); - } -#endif - f_sig >>= (113 - f_exp); - /* Handle rounding by adding 1 to the bit beyond half precision */ -#if NPY_HALF_ROUND_TIES_TO_EVEN - /* - * If the last bit in the half significand is 0 (already even), and - * the remaining bit pattern is 1000...0, then we do not add one - * to the bit after the half significand. In all other cases, we do. - */ - if ((f_sig&0x00003fffu) != 0x00001000u) { - f_sig += 0x00001000u; - } -#else - f_sig += 0x00001000u; -#endif - h_sig = (npy_uint16) (f_sig >> 13); - /* - * If the rounding causes a bit to spill into h_exp, it will - * increment h_exp from zero to one and h_sig will be zero. - * This is the correct result. - */ - return (npy_uint16) (h_sgn + h_sig); - } - - /* Regular case with no overflow or underflow */ - h_exp = (npy_uint16) ((f_exp - 0x38000000u) >> 13); - /* Handle rounding by adding 1 to the bit beyond half precision */ - f_sig = (f&0x007fffffu); -#if NPY_HALF_ROUND_TIES_TO_EVEN - /* - * If the last bit in the half significand is 0 (already even), and - * the remaining bit pattern is 1000...0, then we do not add one - * to the bit after the half significand. In all other cases, we do. - */ - if ((f_sig&0x00003fffu) != 0x00001000u) { - f_sig += 0x00001000u; - } -#else - f_sig += 0x00001000u; -#endif - h_sig = (npy_uint16) (f_sig >> 13); - /* - * If the rounding causes a bit to spill into h_exp, it will - * increment h_exp by one and h_sig will be zero. This is the - * correct result. h_exp may increment to 15, at greatest, in - * which case the result overflows to a signed inf. - */ -#if NPY_HALF_GENERATE_OVERFLOW - h_sig += h_exp; - if (h_sig == 0x7c00u) { - npy_set_floatstatus_overflow(); - } - return h_sgn + h_sig; -#else - return h_sgn + h_exp + h_sig; -#endif -} - -npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) -{ - npy_uint64 d_exp, d_sig; - npy_uint16 h_sgn, h_exp, h_sig; - - h_sgn = (d&0x8000000000000000ULL) >> 48; - d_exp = (d&0x7ff0000000000000ULL); - - /* Exponent overflow/NaN converts to signed inf/NaN */ - if (d_exp >= 0x40f0000000000000ULL) { - if (d_exp == 0x7ff0000000000000ULL) { - /* Inf or NaN */ - d_sig = (d&0x000fffffffffffffULL); - if (d_sig != 0) { - /* NaN - propagate the flag in the significand... */ - npy_uint16 ret = (npy_uint16) (0x7c00u + (d_sig >> 42)); - /* ...but make sure it stays a NaN */ - if (ret == 0x7c00u) { - ret++; - } - return h_sgn + ret; - } else { - /* signed inf */ - return h_sgn + 0x7c00u; - } - } else { - /* overflow to signed inf */ -#if NPY_HALF_GENERATE_OVERFLOW - npy_set_floatstatus_overflow(); -#endif - return h_sgn + 0x7c00u; - } - } - - /* Exponent underflow converts to subnormal half or signed zero */ - if (d_exp <= 0x3f00000000000000ULL) { - /* - * Signed zeros, subnormal floats, and floats with small - * exponents all convert to signed zero halfs. - */ - if (d_exp < 0x3e60000000000000ULL) { -#if NPY_HALF_GENERATE_UNDERFLOW - /* If d != 0, it underflowed to 0 */ - if ((d&0x7fffffffffffffffULL) != 0) { - npy_set_floatstatus_underflow(); - } -#endif - return h_sgn; - } - /* Make the subnormal significand */ - d_exp >>= 52; - d_sig = (0x0010000000000000ULL + (d&0x000fffffffffffffULL)); -#if NPY_HALF_GENERATE_UNDERFLOW - /* If it's not exactly represented, it underflowed */ - if ((d_sig&(((npy_uint64)1 << (1051 - d_exp)) - 1)) != 0) { - npy_set_floatstatus_underflow(); - } -#endif - d_sig >>= (1009 - d_exp); - /* Handle rounding by adding 1 to the bit beyond half precision */ -#if NPY_HALF_ROUND_TIES_TO_EVEN - /* - * If the last bit in the half significand is 0 (already even), and - * the remaining bit pattern is 1000...0, then we do not add one - * to the bit after the half significand. In all other cases, we do. - */ - if ((d_sig&0x000007ffffffffffULL) != 0x0000020000000000ULL) { - d_sig += 0x0000020000000000ULL; - } -#else - d_sig += 0x0000020000000000ULL; -#endif - h_sig = (npy_uint16) (d_sig >> 42); - /* - * If the rounding causes a bit to spill into h_exp, it will - * increment h_exp from zero to one and h_sig will be zero. - * This is the correct result. - */ - return h_sgn + h_sig; - } - - /* Regular case with no overflow or underflow */ - h_exp = (npy_uint16) ((d_exp - 0x3f00000000000000ULL) >> 42); - /* Handle rounding by adding 1 to the bit beyond half precision */ - d_sig = (d&0x000fffffffffffffULL); -#if NPY_HALF_ROUND_TIES_TO_EVEN - /* - * If the last bit in the half significand is 0 (already even), and - * the remaining bit pattern is 1000...0, then we do not add one - * to the bit after the half significand. In all other cases, we do. - */ - if ((d_sig&0x000007ffffffffffULL) != 0x0000020000000000ULL) { - d_sig += 0x0000020000000000ULL; - } -#else - d_sig += 0x0000020000000000ULL; -#endif - h_sig = (npy_uint16) (d_sig >> 42); - - /* - * If the rounding causes a bit to spill into h_exp, it will - * increment h_exp by one and h_sig will be zero. This is the - * correct result. h_exp may increment to 15, at greatest, in - * which case the result overflows to a signed inf. - */ -#if NPY_HALF_GENERATE_OVERFLOW - h_sig += h_exp; - if (h_sig == 0x7c00u) { - npy_set_floatstatus_overflow(); - } - return h_sgn + h_sig; -#else - return h_sgn + h_exp + h_sig; -#endif -} - -npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h) -{ - npy_uint16 h_exp, h_sig; - npy_uint32 f_sgn, f_exp, f_sig; - - h_exp = (h&0x7c00u); - f_sgn = ((npy_uint32)h&0x8000u) << 16; - switch (h_exp) { - case 0x0000u: /* 0 or subnormal */ - h_sig = (h&0x03ffu); - /* Signed zero */ - if (h_sig == 0) { - return f_sgn; - } - /* Subnormal */ - h_sig <<= 1; - while ((h_sig&0x0400u) == 0) { - h_sig <<= 1; - h_exp++; - } - f_exp = ((npy_uint32)(127 - 15 - h_exp)) << 23; - f_sig = ((npy_uint32)(h_sig&0x03ffu)) << 13; - return f_sgn + f_exp + f_sig; - case 0x7c00u: /* inf or NaN */ - /* All-ones exponent and a copy of the significand */ - return f_sgn + 0x7f800000u + (((npy_uint32)(h&0x03ffu)) << 13); - default: /* normalized */ - /* Just need to adjust the exponent and shift */ - return f_sgn + (((npy_uint32)(h&0x7fffu) + 0x1c000u) << 13); - } -} - -npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) -{ - npy_uint16 h_exp, h_sig; - npy_uint64 d_sgn, d_exp, d_sig; - - h_exp = (h&0x7c00u); - d_sgn = ((npy_uint64)h&0x8000u) << 48; - switch (h_exp) { - case 0x0000u: /* 0 or subnormal */ - h_sig = (h&0x03ffu); - /* Signed zero */ - if (h_sig == 0) { - return d_sgn; - } - /* Subnormal */ - h_sig <<= 1; - while ((h_sig&0x0400u) == 0) { - h_sig <<= 1; - h_exp++; - } - d_exp = ((npy_uint64)(1023 - 15 - h_exp)) << 52; - d_sig = ((npy_uint64)(h_sig&0x03ffu)) << 42; - return d_sgn + d_exp + d_sig; - case 0x7c00u: /* inf or NaN */ - /* All-ones exponent and a copy of the significand */ - return d_sgn + 0x7ff0000000000000ULL + - (((npy_uint64)(h&0x03ffu)) << 42); - default: /* normalized */ - /* Just need to adjust the exponent and shift */ - return d_sgn + (((npy_uint64)(h&0x7fffu) + 0xfc000u) << 42); - } -} - diff --git a/numpy-1.6.2/numpy/core/src/npymath/ieee754.c.src b/numpy-1.6.2/numpy/core/src/npymath/ieee754.c.src deleted file mode 100644 index 47e7d0688d..0000000000 --- a/numpy-1.6.2/numpy/core/src/npymath/ieee754.c.src +++ /dev/null @@ -1,674 +0,0 @@ -/* -*- c -*- */ -/* - * vim:syntax=c - * - * Low-level routines related to IEEE-754 format - */ -#include "npy_math_common.h" -#include "npy_math_private.h" - -#ifndef HAVE_COPYSIGN -double npy_copysign(double x, double y) -{ - npy_uint32 hx, hy; - GET_HIGH_WORD(hx, x); - GET_HIGH_WORD(hy, y); - SET_HIGH_WORD(x, (hx & 0x7fffffff) | (hy & 0x80000000)); - return x; -} -#endif - -#if !defined(HAVE_DECL_SIGNBIT) -#include "_signbit.c" - -int _npy_signbit_f(float x) -{ - return _npy_signbit_d((double) x); -} - -int _npy_signbit_ld(long double x) -{ - return _npy_signbit_d((double) x); -} -#endif - -/* - * FIXME: There is a lot of redundancy between _next* and npy_nextafter*. - * refactor this at some point - * - * p >= 0, returnx x + nulp - * p < 0, returnx x - nulp - */ -double _next(double x, int p) -{ - volatile double t; - npy_int32 hx, hy, ix; - npy_uint32 lx; - - EXTRACT_WORDS(hx, lx, x); - ix = hx & 0x7fffffff; /* |x| */ - - if (((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0)) /* x is nan */ - return x; - if ((ix | lx) == 0) { /* x == 0 */ - if (p >= 0) { - INSERT_WORDS(x, 0x0, 1); /* return +minsubnormal */ - } else { - INSERT_WORDS(x, 0x80000000, 1); /* return -minsubnormal */ - } - t = x * x; - if (t == x) - return t; - else - return x; /* raise underflow flag */ - } - if (p < 0) { /* x -= ulp */ - if (lx == 0) - hx -= 1; - lx -= 1; - } else { /* x += ulp */ - lx += 1; - if (lx == 0) - hx += 1; - } - hy = hx & 0x7ff00000; - if (hy >= 0x7ff00000) - return x + x; /* overflow */ - if (hy < 0x00100000) { /* underflow */ - t = x * x; - if (t != x) { /* raise underflow flag */ - INSERT_WORDS(x, hx, lx); - return x; - } - } - INSERT_WORDS(x, hx, lx); - return x; -} - -float _nextf(float x, int p) -{ - volatile float t; - npy_int32 hx, hy, ix; - - GET_FLOAT_WORD(hx, x); - ix = hx & 0x7fffffff; /* |x| */ - - if ((ix > 0x7f800000)) /* x is nan */ - return x; - if (ix == 0) { /* x == 0 */ - if (p >= 0) { - SET_FLOAT_WORD(x, 0x0 | 1); /* return +minsubnormal */ - } else { - SET_FLOAT_WORD(x, 0x80000000 | 1); /* return -minsubnormal */ - } - t = x * x; - if (t == x) - return t; - else - return x; /* raise underflow flag */ - } - if (p < 0) { /* x -= ulp */ - hx -= 1; - } else { /* x += ulp */ - hx += 1; - } - hy = hx & 0x7f800000; - if (hy >= 0x7f800000) - return x + x; /* overflow */ - if (hy < 0x00800000) { /* underflow */ - t = x * x; - if (t != x) { /* raise underflow flag */ - SET_FLOAT_WORD(x, hx); - return x; - } - } - SET_FLOAT_WORD(x, hx); - return x; -} - -#ifdef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE - -/* - * FIXME: this is ugly and untested. The asm part only works with gcc, and we - * should consolidate the GET_LDOUBLE* / SET_LDOUBLE macros - */ -#define math_opt_barrier(x) \ - ({ __typeof (x) __x = x; __asm ("" : "+m" (__x)); __x; }) -#define math_force_eval(x) __asm __volatile ("" : : "m" (x)) - -/* only works for big endian */ -typedef union -{ - npy_longdouble value; - struct - { - npy_uint64 msw; - npy_uint64 lsw; - } parts64; - struct - { - npy_uint32 w0, w1, w2, w3; - } parts32; -} ieee854_long_double_shape_type; - -/* Get two 64 bit ints from a long double. */ - -#define GET_LDOUBLE_WORDS64(ix0,ix1,d) \ -do { \ - ieee854_long_double_shape_type qw_u; \ - qw_u.value = (d); \ - (ix0) = qw_u.parts64.msw; \ - (ix1) = qw_u.parts64.lsw; \ -} while (0) - -/* Set a long double from two 64 bit ints. */ - -#define SET_LDOUBLE_WORDS64(d,ix0,ix1) \ -do { \ - ieee854_long_double_shape_type qw_u; \ - qw_u.parts64.msw = (ix0); \ - qw_u.parts64.lsw = (ix1); \ - (d) = qw_u.value; \ -} while (0) - -npy_longdouble _nextl(npy_longdouble x, int p) -{ - npy_int64 hx,ihx,ilx; - npy_uint64 lx; - - GET_LDOUBLE_WORDS64(hx, lx, x); - ihx = hx & 0x7fffffffffffffffLL; /* |hx| */ - ilx = lx & 0x7fffffffffffffffLL; /* |lx| */ - - if(((ihx & 0x7ff0000000000000LL)==0x7ff0000000000000LL)&& - ((ihx & 0x000fffffffffffffLL)!=0)) { - return x; /* signal the nan */ - } - if(ihx == 0 && ilx == 0) { /* x == 0 */ - npy_longdouble u; - SET_LDOUBLE_WORDS64(x, p, 0ULL);/* return +-minsubnormal */ - u = x * x; - if (u == x) { - return u; - } else { - return x; /* raise underflow flag */ - } - } - - npy_longdouble u; - if(p < 0) { /* p < 0, x -= ulp */ - if((hx==0xffefffffffffffffLL)&&(lx==0xfc8ffffffffffffeLL)) - return x+x; /* overflow, return -inf */ - if (hx >= 0x7ff0000000000000LL) { - SET_LDOUBLE_WORDS64(u,0x7fefffffffffffffLL,0x7c8ffffffffffffeLL); - return u; - } - if(ihx <= 0x0360000000000000LL) { /* x <= LDBL_MIN */ - u = math_opt_barrier (x); - x -= __LDBL_DENORM_MIN__; - if (ihx < 0x0360000000000000LL - || (hx > 0 && (npy_int64) lx <= 0) - || (hx < 0 && (npy_int64) lx > 1)) { - u = u * u; - math_force_eval (u); /* raise underflow flag */ - } - return x; - } - if (ihx < 0x06a0000000000000LL) { /* ulp will denormal */ - SET_LDOUBLE_WORDS64(u,(hx&0x7ff0000000000000LL),0ULL); - u *= 0x1.0000000000000p-105L; - } else - SET_LDOUBLE_WORDS64(u,(hx&0x7ff0000000000000LL)-0x0690000000000000LL,0ULL); - return x - u; - } else { /* p >= 0, x += ulp */ - if((hx==0x7fefffffffffffffLL)&&(lx==0x7c8ffffffffffffeLL)) - return x+x; /* overflow, return +inf */ - if ((npy_uint64) hx >= 0xfff0000000000000ULL) { - SET_LDOUBLE_WORDS64(u,0xffefffffffffffffLL,0xfc8ffffffffffffeLL); - return u; - } - if(ihx <= 0x0360000000000000LL) { /* x <= LDBL_MIN */ - u = math_opt_barrier (x); - x += __LDBL_DENORM_MIN__; - if (ihx < 0x0360000000000000LL - || (hx > 0 && (npy_int64) lx < 0 && lx != 0x8000000000000001LL) - || (hx < 0 && (npy_int64) lx >= 0)) { - u = u * u; - math_force_eval (u); /* raise underflow flag */ - } - if (x == 0.0L) /* handle negative __LDBL_DENORM_MIN__ case */ - x = -0.0L; - return x; - } - if (ihx < 0x06a0000000000000LL) { /* ulp will denormal */ - SET_LDOUBLE_WORDS64(u,(hx&0x7ff0000000000000LL),0ULL); - u *= 0x1.0000000000000p-105L; - } else - SET_LDOUBLE_WORDS64(u,(hx&0x7ff0000000000000LL)-0x0690000000000000LL,0ULL); - return x + u; - } -} -#else -npy_longdouble _nextl(npy_longdouble x, int p) -{ - volatile npy_longdouble t; - union IEEEl2bitsrep ux; - - ux.e = x; - - if ((GET_LDOUBLE_EXP(ux) == 0x7fff && - ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) | GET_LDOUBLE_MANL(ux)) != 0)) { - return ux.e; /* x is nan */ - } - if (ux.e == 0.0) { - SET_LDOUBLE_MANH(ux, 0); /* return +-minsubnormal */ - SET_LDOUBLE_MANL(ux, 1); - if (p >= 0) { - SET_LDOUBLE_SIGN(ux, 0); - } else { - SET_LDOUBLE_SIGN(ux, 1); - } - t = ux.e * ux.e; - if (t == ux.e) { - return t; - } else { - return ux.e; /* raise underflow flag */ - } - } - if (p < 0) { /* x -= ulp */ - if (GET_LDOUBLE_MANL(ux) == 0) { - if ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) == 0) { - SET_LDOUBLE_EXP(ux, GET_LDOUBLE_EXP(ux) - 1); - } - SET_LDOUBLE_MANH(ux, - (GET_LDOUBLE_MANH(ux) - 1) | - (GET_LDOUBLE_MANH(ux) & LDBL_NBIT)); - } - SET_LDOUBLE_MANL(ux, GET_LDOUBLE_MANL(ux) - 1); - } else { /* x += ulp */ - SET_LDOUBLE_MANL(ux, GET_LDOUBLE_MANL(ux) + 1); - if (GET_LDOUBLE_MANL(ux) == 0) { - SET_LDOUBLE_MANH(ux, - (GET_LDOUBLE_MANH(ux) + 1) | - (GET_LDOUBLE_MANH(ux) & LDBL_NBIT)); - if ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) == 0) { - SET_LDOUBLE_EXP(ux, GET_LDOUBLE_EXP(ux) + 1); - } - } - } - if (GET_LDOUBLE_EXP(ux) == 0x7fff) { - return ux.e + ux.e; /* overflow */ - } - if (GET_LDOUBLE_EXP(ux) == 0) { /* underflow */ - if (LDBL_NBIT) { - SET_LDOUBLE_MANH(ux, GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT); - } - t = ux.e * ux.e; - if (t != ux.e) { /* raise underflow flag */ - return ux.e; - } - } - - return ux.e; -} -#endif - -/* - * nextafter code taken from BSD math lib, the code contains the following - * notice: - * - * ==================================================== - * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. - * - * Developed at SunPro, a Sun Microsystems, Inc. business. - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ - -#ifndef HAVE_NEXTAFTER -double npy_nextafter(double x, double y) -{ - volatile double t; - npy_int32 hx, hy, ix, iy; - npy_uint32 lx, ly; - - EXTRACT_WORDS(hx, lx, x); - EXTRACT_WORDS(hy, ly, y); - ix = hx & 0x7fffffff; /* |x| */ - iy = hy & 0x7fffffff; /* |y| */ - - if (((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0) || /* x is nan */ - ((iy >= 0x7ff00000) && ((iy - 0x7ff00000) | ly) != 0)) /* y is nan */ - return x + y; - if (x == y) - return y; /* x=y, return y */ - if ((ix | lx) == 0) { /* x == 0 */ - INSERT_WORDS(x, hy & 0x80000000, 1); /* return +-minsubnormal */ - t = x * x; - if (t == x) - return t; - else - return x; /* raise underflow flag */ - } - if (hx >= 0) { /* x > 0 */ - if (hx > hy || ((hx == hy) && (lx > ly))) { /* x > y, x -= ulp */ - if (lx == 0) - hx -= 1; - lx -= 1; - } else { /* x < y, x += ulp */ - lx += 1; - if (lx == 0) - hx += 1; - } - } else { /* x < 0 */ - if (hy >= 0 || hx > hy || ((hx == hy) && (lx > ly))) { /* x < y, x -= ulp */ - if (lx == 0) - hx -= 1; - lx -= 1; - } else { /* x > y, x += ulp */ - lx += 1; - if (lx == 0) - hx += 1; - } - } - hy = hx & 0x7ff00000; - if (hy >= 0x7ff00000) - return x + x; /* overflow */ - if (hy < 0x00100000) { /* underflow */ - t = x * x; - if (t != x) { /* raise underflow flag */ - INSERT_WORDS(y, hx, lx); - return y; - } - } - INSERT_WORDS(x, hx, lx); - return x; -} -#endif - -#ifndef HAVE_NEXTAFTERF -float npy_nextafterf(float x, float y) -{ - volatile float t; - npy_int32 hx, hy, ix, iy; - - GET_FLOAT_WORD(hx, x); - GET_FLOAT_WORD(hy, y); - ix = hx & 0x7fffffff; /* |x| */ - iy = hy & 0x7fffffff; /* |y| */ - - if ((ix > 0x7f800000) || /* x is nan */ - (iy > 0x7f800000)) /* y is nan */ - return x + y; - if (x == y) - return y; /* x=y, return y */ - if (ix == 0) { /* x == 0 */ - SET_FLOAT_WORD(x, (hy & 0x80000000) | 1); /* return +-minsubnormal */ - t = x * x; - if (t == x) - return t; - else - return x; /* raise underflow flag */ - } - if (hx >= 0) { /* x > 0 */ - if (hx > hy) { /* x > y, x -= ulp */ - hx -= 1; - } else { /* x < y, x += ulp */ - hx += 1; - } - } else { /* x < 0 */ - if (hy >= 0 || hx > hy) { /* x < y, x -= ulp */ - hx -= 1; - } else { /* x > y, x += ulp */ - hx += 1; - } - } - hy = hx & 0x7f800000; - if (hy >= 0x7f800000) - return x + x; /* overflow */ - if (hy < 0x00800000) { /* underflow */ - t = x * x; - if (t != x) { /* raise underflow flag */ - SET_FLOAT_WORD(y, hx); - return y; - } - } - SET_FLOAT_WORD(x, hx); - return x; -} -#endif - -#ifndef HAVE_NEXTAFTERL -npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y) -{ - volatile npy_longdouble t; - union IEEEl2bitsrep ux; - union IEEEl2bitsrep uy; - - ux.e = x; - uy.e = y; - - if ((GET_LDOUBLE_EXP(ux) == 0x7fff && - ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) | GET_LDOUBLE_MANL(ux)) != 0) || - (GET_LDOUBLE_EXP(uy) == 0x7fff && - ((GET_LDOUBLE_MANH(uy) & ~LDBL_NBIT) | GET_LDOUBLE_MANL(uy)) != 0)) { - return ux.e + uy.e; /* x or y is nan */ - } - if (ux.e == uy.e) { - return uy.e; /* x=y, return y */ - } - if (ux.e == 0.0) { - SET_LDOUBLE_MANH(ux, 0); /* return +-minsubnormal */ - SET_LDOUBLE_MANL(ux, 1); - SET_LDOUBLE_SIGN(ux, GET_LDOUBLE_SIGN(uy)); - t = ux.e * ux.e; - if (t == ux.e) { - return t; - } else { - return ux.e; /* raise underflow flag */ - } - } - if ((ux.e > 0.0) ^ (ux.e < uy.e)) { /* x -= ulp */ - if (GET_LDOUBLE_MANL(ux) == 0) { - if ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) == 0) { - SET_LDOUBLE_EXP(ux, GET_LDOUBLE_EXP(ux) - 1); - } - SET_LDOUBLE_MANH(ux, - (GET_LDOUBLE_MANH(ux) - 1) | - (GET_LDOUBLE_MANH(ux) & LDBL_NBIT)); - } - SET_LDOUBLE_MANL(ux, GET_LDOUBLE_MANL(ux) - 1); - } else { /* x += ulp */ - SET_LDOUBLE_MANL(ux, GET_LDOUBLE_MANL(ux) + 1); - if (GET_LDOUBLE_MANL(ux) == 0) { - SET_LDOUBLE_MANH(ux, - (GET_LDOUBLE_MANH(ux) + 1) | - (GET_LDOUBLE_MANH(ux) & LDBL_NBIT)); - if ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) == 0) { - SET_LDOUBLE_EXP(ux, GET_LDOUBLE_EXP(ux) + 1); - } - } - } - if (GET_LDOUBLE_EXP(ux) == 0x7fff) { - return ux.e + ux.e; /* overflow */ - } - if (GET_LDOUBLE_EXP(ux) == 0) { /* underflow */ - if (LDBL_NBIT) { - SET_LDOUBLE_MANH(ux, GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT); - } - t = ux.e * ux.e; - if (t != ux.e) { /* raise underflow flag */ - return ux.e; - } - } - - return ux.e; -} -#endif - -/**begin repeat - * #suff = f,,l# - * #SUFF = F,,L# - * #type = float, double, npy_longdouble# - */ -@type@ npy_spacing@suff@(@type@ x) -{ - /* XXX: npy isnan/isinf may be optimized by bit twiddling */ - if (npy_isinf(x)) { - return NPY_NAN@SUFF@; - } - - return _next@suff@(x, 1) - x; -} -/**end repeat**/ - -/* - * Decorate all the math functions which are available on the current platform - */ - -#ifdef HAVE_NEXTAFTERF -float npy_nextafterf(float x, float y) -{ - return nextafterf(x, y); -} -#endif - -#ifdef HAVE_NEXTAFTER -double npy_nextafter(double x, double y) -{ - return nextafter(x, y); -} -#endif - -#ifdef HAVE_NEXTAFTERL -npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y) -{ - return nextafterl(x, y); -} -#endif - -/* - * Functions to set the floating point status word. - */ - -#if defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \ - (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \ - defined(__NetBSD__) -#include - -void npy_set_floatstatus_divbyzero(void) -{ - fpsetsticky(FP_X_DZ); -} - -void npy_set_floatstatus_overflow(void) -{ - fpsetsticky(FP_X_OFL); -} - -void npy_set_floatstatus_underflow(void) -{ - fpsetsticky(FP_X_UFL); -} - -void npy_set_floatstatus_invalid(void) -{ - fpsetsticky(FP_X_INV); -} - - -#elif defined(__GLIBC__) || defined(__APPLE__) || \ - defined(__CYGWIN__) || defined(__MINGW32__) || \ - (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) - -# if defined(__GLIBC__) || defined(__APPLE__) || \ - defined(__MINGW32__) || defined(__FreeBSD__) -# include -# elif defined(__CYGWIN__) -# include "numpy/fenv/fenv.h" -# endif - -void npy_set_floatstatus_divbyzero(void) -{ - feraiseexcept(FE_DIVBYZERO); -} - -void npy_set_floatstatus_overflow(void) -{ - feraiseexcept(FE_OVERFLOW); -} - -void npy_set_floatstatus_underflow(void) -{ - feraiseexcept(FE_UNDERFLOW); -} - -void npy_set_floatstatus_invalid(void) -{ - feraiseexcept(FE_INVALID); -} - -#elif defined(_AIX) -#include -#include - -void npy_set_floatstatus_divbyzero(void) -{ - fp_raise_xcp(FP_DIV_BY_ZERO); -} - -void npy_set_floatstatus_overflow(void) -{ - fp_raise_xcp(FP_OVERFLOW); -} - -void npy_set_floatstatus_underflow(void) -{ - fp_raise_xcp(FP_UNDERFLOW); -} - -void npy_set_floatstatus_invalid(void) -{ - fp_raise_xcp(FP_INVALID); -} - -#else - -/* - * By using a volatile floating point value, - * the compiler is forced to actually do the requested - * operations because of potential concurrency. - * - * We shouldn't write multiple values to a single - * global here, because that would cause - * a race condition. - */ -static volatile double _npy_floatstatus_x, - _npy_floatstatus_zero = 0.0, _npy_floatstatus_big = 1e300, - _npy_floatstatus_small = 1e-300, _npy_floatstatus_inf; - -void npy_set_floatstatus_divbyzero(void) -{ - _npy_floatstatus_x = 1.0 / _npy_floatstatus_zero; -} - -void npy_set_floatstatus_overflow(void) -{ - _npy_floatstatus_x = _npy_floatstatus_big * 1e300; -} - -void npy_set_floatstatus_underflow(void) -{ - _npy_floatstatus_x = _npy_floatstatus_small * 1e-300; -} - -void npy_set_floatstatus_invalid(void) -{ - _npy_floatstatus_inf = NPY_INFINITY; - _npy_floatstatus_x = _npy_floatstatus_inf - NPY_INFINITY; -} - -#endif - diff --git a/numpy-1.6.2/numpy/core/src/npymath/npy_math.c.src b/numpy-1.6.2/numpy/core/src/npymath/npy_math.c.src deleted file mode 100644 index 04a09bcba1..0000000000 --- a/numpy-1.6.2/numpy/core/src/npymath/npy_math.c.src +++ /dev/null @@ -1,490 +0,0 @@ -/* - * vim:syntax=c - * A small module to implement missing C99 math capabilities required by numpy - * - * Please keep this independant of python ! Only basic types (npy_longdouble) - * can be used, otherwise, pure C, without any use of Python facilities - * - * How to add a function to this section - * ------------------------------------- - * - * Say you want to add `foo`, these are the steps and the reasons for them. - * - * 1) Add foo to the appropriate list in the configuration system. The - * lists can be found in numpy/core/setup.py lines 63-105. Read the - * comments that come with them, they are very helpful. - * - * 2) The configuration system will define a macro HAVE_FOO if your function - * can be linked from the math library. The result can depend on the - * optimization flags as well as the compiler, so can't be known ahead of - * time. If the function can't be linked, then either it is absent, defined - * as a macro, or is an intrinsic (hardware) function. - * - * i) Undefine any possible macros: - * - * #ifdef foo - * #undef foo - * #endif - * - * ii) Avoid as much as possible to declare any function here. Declaring - * functions is not portable: some platforms define some function inline - * with a non standard identifier, for example, or may put another - * idendifier which changes the calling convention of the function. If you - * really have to, ALWAYS declare it for the one platform you are dealing - * with: - * - * Not ok: - * double exp(double a); - * - * Ok: - * #ifdef SYMBOL_DEFINED_WEIRD_PLATFORM - * double exp(double); - * #endif - * - * Some of the code is taken from msun library in FreeBSD, with the following - * notice: - * - * ==================================================== - * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. - * - * Developed at SunPro, a Sun Microsystems, Inc. business. - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ -#include "npy_math_private.h" - -/* - ***************************************************************************** - ** BASIC MATH FUNCTIONS ** - ***************************************************************************** - */ - -/* Original code by Konrad Hinsen. */ -#ifndef HAVE_EXPM1 -double npy_expm1(double x) -{ - const double u = npy_exp(x); - - if (u == 1.0) { - return x; - } else if (u - 1.0 == -1.0) { - return -1; - } else { - return (u - 1.0) * x/npy_log(u); - } -} -#endif - -#ifndef HAVE_LOG1P -double npy_log1p(double x) -{ - const double u = 1. + x; - const double d = u - 1.; - - if (d == 0) { - return x; - } else { - return npy_log(u) * x / d; - } -} -#endif - -/* Taken from FreeBSD mlib, adapted for numpy - * - * XXX: we could be a bit faster by reusing high/low words for inf/nan - * classification instead of calling npy_isinf/npy_isnan: we should have some - * macros for this, though, instead of doing it manually - */ -#ifndef HAVE_ATAN2 -/* XXX: we should have this in npy_math.h */ -#define NPY_DBL_EPSILON 1.2246467991473531772E-16 -double npy_atan2(double y, double x) -{ - npy_int32 k, m, iy, ix, hx, hy; - npy_uint32 lx,ly; - double z; - - EXTRACT_WORDS(hx, lx, x); - ix = hx & 0x7fffffff; - EXTRACT_WORDS(hy, ly, y); - iy = hy & 0x7fffffff; - - /* if x or y is nan, return nan */ - if (npy_isnan(x * y)) { - return x + y; - } - - if (x == 1.0) { - return npy_atan(y); - } - - m = 2 * npy_signbit(x) + npy_signbit(y); - if (y == 0.0) { - switch(m) { - case 0: - case 1: return y; /* atan(+-0,+anything)=+-0 */ - case 2: return NPY_PI;/* atan(+0,-anything) = pi */ - case 3: return -NPY_PI;/* atan(-0,-anything) =-pi */ - } - } - - if (x == 0.0) { - return y > 0 ? NPY_PI_2 : -NPY_PI_2; - } - - if (npy_isinf(x)) { - if (npy_isinf(y)) { - switch(m) { - case 0: return NPY_PI_4;/* atan(+INF,+INF) */ - case 1: return -NPY_PI_4;/* atan(-INF,+INF) */ - case 2: return 3.0*NPY_PI_4;/*atan(+INF,-INF)*/ - case 3: return -3.0*NPY_PI_4;/*atan(-INF,-INF)*/ - } - } else { - switch(m) { - case 0: return NPY_PZERO; /* atan(+...,+INF) */ - case 1: return NPY_NZERO; /* atan(-...,+INF) */ - case 2: return NPY_PI; /* atan(+...,-INF) */ - case 3: return -NPY_PI; /* atan(-...,-INF) */ - } - } - } - - if (npy_isinf(y)) { - return y > 0 ? NPY_PI_2 : -NPY_PI_2; - } - - /* compute y/x */ - k = (iy - ix) >> 20; - if (k > 60) { /* |y/x| > 2**60 */ - z = NPY_PI_2 + 0.5 * NPY_DBL_EPSILON; - m &= 1; - } else if (hx < 0 && k < -60) { - z = 0.0; /* 0 > |y|/x > -2**-60 */ - } else { - z = npy_atan(npy_fabs(y/x)); /* safe to do y/x */ - } - - switch (m) { - case 0: return z ; /* atan(+,+) */ - case 1: return -z ; /* atan(-,+) */ - case 2: return NPY_PI - (z - NPY_DBL_EPSILON);/* atan(+,-) */ - default: /* case 3 */ - return (z - NPY_DBL_EPSILON) - NPY_PI;/* atan(-,-) */ - } -} - -#endif - -#ifndef HAVE_HYPOT -double npy_hypot(double x, double y) -{ - double yx; - - /* Handle the case where x or y is a NaN */ - if (npy_isnan(x * y)) { - if (npy_isinf(x) || npy_isinf(y)) { - return NPY_INFINITY; - } else { - return NPY_NAN; - } - } - - x = npy_fabs(x); - y = npy_fabs(y); - if (x < y) { - double temp = x; - x = y; - y = temp; - } - if (x == 0.) { - return 0.; - } - else { - yx = y/x; - return x*npy_sqrt(1.+yx*yx); - } -} -#endif - -#ifndef HAVE_ACOSH -double npy_acosh(double x) -{ - return 2*npy_log(npy_sqrt((x + 1.0)/2) + npy_sqrt((x - 1.0)/2)); -} -#endif - -#ifndef HAVE_ASINH -double npy_asinh(double xx) -{ - double x, d; - int sign; - if (xx < 0.0) { - sign = -1; - x = -xx; - } - else { - sign = 1; - x = xx; - } - if (x > 1e8) { - d = x; - } else { - d = npy_sqrt(x*x + 1); - } - return sign*npy_log1p(x*(1.0 + x/(d+1))); -} -#endif - -#ifndef HAVE_ATANH -double npy_atanh(double x) -{ - if (x > 0) { - return -0.5*npy_log1p(-2.0*x/(1.0 + x)); - } - else { - return 0.5*npy_log1p(2.0*x/(1.0 - x)); - } -} -#endif - -#ifndef HAVE_RINT -double npy_rint(double x) -{ - double y, r; - - y = npy_floor(x); - r = x - y; - - if (r > 0.5) { - y += 1.0; - } - - /* Round to nearest even */ - if (r == 0.5) { - r = y - 2.0*npy_floor(0.5*y); - if (r == 1.0) { - y += 1.0; - } - } - return y; -} -#endif - -#ifndef HAVE_TRUNC -double npy_trunc(double x) -{ - return x < 0 ? npy_ceil(x) : npy_floor(x); -} -#endif - -#ifndef HAVE_EXP2 -double npy_exp2(double x) -{ - return npy_exp(NPY_LOGE2*x); -} -#endif - -#ifndef HAVE_LOG2 -double npy_log2(double x) -{ - return NPY_LOG2E*npy_log(x); -} -#endif - -/* - * if C99 extensions not available then define dummy functions that use the - * double versions for - * - * sin, cos, tan - * sinh, cosh, tanh, - * fabs, floor, ceil, rint, trunc - * sqrt, log10, log, exp, expm1 - * asin, acos, atan, - * asinh, acosh, atanh - * - * hypot, atan2, pow, fmod, modf - * - * We assume the above are always available in their double versions. - * - * NOTE: some facilities may be available as macro only instead of functions. - * For simplicity, we define our own functions and undef the macros. We could - * instead test for the macro, but I am lazy to do that for now. - */ - -/**begin repeat - * #type = npy_longdouble, float# - * #TYPE = NPY_LONGDOUBLE, FLOAT# - * #c = l,f# - * #C = L,F# - */ - -/**begin repeat1 - * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# - * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, - * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# - */ - -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -#ifndef HAVE_@KIND@@C@ -@type@ npy_@kind@@c@(@type@ x) -{ - return (@type@) npy_@kind@((double)x); -} -#endif - -/**end repeat1**/ - -/**begin repeat1 - * #kind = atan2,hypot,pow,fmod,copysign# - * #KIND = ATAN2,HYPOT,POW,FMOD,COPYSIGN# - */ -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -#ifndef HAVE_@KIND@@C@ -@type@ npy_@kind@@c@(@type@ x, @type@ y) -{ - return (@type@) npy_@kind@((double)x, (double) y); -} -#endif -/**end repeat1**/ - -#ifdef modf@c@ -#undef modf@c@ -#endif -#ifndef HAVE_MODF@C@ -@type@ npy_modf@c@(@type@ x, @type@ *iptr) -{ - double niptr; - double y = npy_modf((double)x, &niptr); - *iptr = (@type@) niptr; - return (@type@) y; -} -#endif - -/**end repeat**/ - - -/* - * Decorate all the math functions which are available on the current platform - */ - -/**begin repeat - * #type = npy_longdouble,double,float# - * #c = l,,f# - * #C = L,,F# - */ -/**begin repeat1 - * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# - * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, - * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# - */ -#ifdef HAVE_@KIND@@C@ -@type@ npy_@kind@@c@(@type@ x) -{ - return @kind@@c@(x); -} -#endif - -/**end repeat1**/ - -/**begin repeat1 - * #kind = atan2,hypot,pow,fmod,copysign# - * #KIND = ATAN2,HYPOT,POW,FMOD,COPYSIGN# - */ -#ifdef HAVE_@KIND@@C@ -@type@ npy_@kind@@c@(@type@ x, @type@ y) -{ - return @kind@@c@(x, y); -} -#endif -/**end repeat1**/ - -#ifdef HAVE_MODF@C@ -@type@ npy_modf@c@(@type@ x, @type@ *iptr) -{ - return modf@c@(x, iptr); -} -#endif - -/**end repeat**/ - - -/* - * Non standard functions - */ - -/**begin repeat - * #type = float, double, npy_longdouble# - * #c = f, ,l# - * #C = F, ,L# - */ - -#define LOGE2 NPY_LOGE2@c@ -#define LOG2E NPY_LOG2E@c@ -#define RAD2DEG (180.0@c@/NPY_PI@c@) -#define DEG2RAD (NPY_PI@c@/180.0@c@) - -@type@ npy_rad2deg@c@(@type@ x) -{ - return x*RAD2DEG; -} - -@type@ npy_deg2rad@c@(@type@ x) -{ - return x*DEG2RAD; -} - -@type@ npy_log2_1p@c@(@type@ x) -{ - return LOG2E*npy_log1p@c@(x); -} - -@type@ npy_exp2_m1@c@(@type@ x) -{ - return npy_expm1@c@(LOGE2*x); -} - -@type@ npy_logaddexp@c@(@type@ x, @type@ y) -{ - const @type@ tmp = x - y; - if (tmp > 0) { - return x + npy_log1p@c@(npy_exp@c@(-tmp)); - } - else if (tmp <= 0) { - return y + npy_log1p@c@(npy_exp@c@(tmp)); - } - else { - /* NaNs, or infinities of the same sign involved */ - return x + y; - } -} - -@type@ npy_logaddexp2@c@(@type@ x, @type@ y) -{ - const @type@ tmp = x - y; - if (tmp > 0) { - return x + npy_log2_1p@c@(npy_exp2@c@(-tmp)); - } - else if (tmp <= 0) { - return y + npy_log2_1p@c@(npy_exp2@c@(tmp)); - } - else { - /* NaNs, or infinities of the same sign involved */ - return x + y; - } -} - -#undef LOGE2 -#undef LOG2E -#undef RAD2DEG -#undef DEG2RAD - -/**end repeat**/ diff --git a/numpy-1.6.2/numpy/core/src/npymath/npy_math_common.h b/numpy-1.6.2/numpy/core/src/npymath/npy_math_common.h deleted file mode 100644 index 1f555a90a6..0000000000 --- a/numpy-1.6.2/numpy/core/src/npymath/npy_math_common.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Common headers needed by every npy math compilation unit - */ -#include -#include -#include - -#include "npy_config.h" -#include "numpy/npy_math.h" diff --git a/numpy-1.6.2/numpy/core/src/npymath/npy_math_complex.c.src b/numpy-1.6.2/numpy/core/src/npymath/npy_math_complex.c.src deleted file mode 100644 index 718de5b6cf..0000000000 --- a/numpy-1.6.2/numpy/core/src/npymath/npy_math_complex.c.src +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Implement some C99-compatible complex math functions - * - * Most of the code is taken from the msun library in FreeBSD (HEAD @ 30th June - * 2009), under the following license: - * - * Copyright (c) 2007 David Schultz - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -#include "npy_math_common.h" -#include "npy_math_private.h" - -/*========================================================== - * Custom implementation of missing complex C99 functions - *=========================================================*/ - -/**begin repeat - * #type = float,double,npy_longdouble# - * #ctype = npy_cfloat,npy_cdouble,npy_clongdouble# - * #c = f, , l# - * #C = F, , L# - * #TMAX = FLT_MAX, DBL_MAX, LDBL_MAX# - */ -#ifndef HAVE_CABS@C@ -@type@ npy_cabs@c@(@ctype@ z) -{ - return npy_hypot@c@(npy_creal@c@(z), npy_cimag@c@(z)); -} -#endif - -#ifndef HAVE_CARG@C@ -@type@ npy_carg@c@(@ctype@ z) -{ - return npy_atan2@c@(npy_cimag@c@(z), npy_creal@c@(z)); -} -#endif - -#ifndef HAVE_CEXP@C@ -@ctype@ npy_cexp@c@(@ctype@ z) -{ - @type@ x, c, s; - @type@ r, i; - @ctype@ ret; - - r = npy_creal@c@(z); - i = npy_cimag@c@(z); - - if (npy_isfinite(r)) { - x = npy_exp@c@(r); - - c = npy_cos@c@(i); - s = npy_sin@c@(i); - - if (npy_isfinite(i)) { - ret = npy_cpack@c@(x * c, x * s); - } else { - ret = npy_cpack@c@(NPY_NAN, npy_copysign@c@(NPY_NAN, i)); - } - - } else if (npy_isnan(r)) { - /* r is nan */ - if (i == 0) { - ret = npy_cpack@c@(r, 0); - } else { - ret = npy_cpack@c@(r, npy_copysign@c@(NPY_NAN, i)); - } - } else { - /* r is +- inf */ - if (r > 0) { - if (i == 0) { - ret = npy_cpack@c@(r, i); - } else if (npy_isfinite(i)) { - c = npy_cos@c@(i); - s = npy_sin@c@(i); - - ret = npy_cpack@c@(r * c, r * s); - } else { - /* x = +inf, y = +-inf | nan */ - ret = npy_cpack@c@(r, NPY_NAN); - } - } else { - if (npy_isfinite(i)) { - x = npy_exp@c@(r); - c = npy_cos@c@(i); - s = npy_sin@c@(i); - - ret = npy_cpack@c@(x * c, x * s); - } else { - /* x = -inf, y = nan | +i inf */ - ret = npy_cpack@c@(0, 0); - } - } - } - - return ret; -} -#endif - -#ifndef HAVE_CLOG@C@ -@ctype@ npy_clog@c@(@ctype@ z) -{ - return npy_cpack@c@(npy_log@c@ (npy_cabs@c@ (z)), npy_carg@c@ (z)); -} -#endif - -#ifndef HAVE_CSQRT@C@ - -/* We risk spurious overflow for components >= DBL_MAX / (1 + sqrt(2)). */ -#define THRESH (@TMAX@ / (1 + NPY_SQRT2@c@)) - -@ctype@ npy_csqrt@c@(@ctype@ z) -{ - @ctype@ result; - @type@ a, b; - @type@ t; - int scale; - - a = npy_creal@c@(z); - b = npy_cimag@c@(z); - - /* Handle special cases. */ - if (a == 0 && b == 0) - return (npy_cpack@c@(0, b)); - if (npy_isinf(b)) - return (npy_cpack@c@(NPY_INFINITY, b)); - if (npy_isnan(a)) { - t = (b - b) / (b - b); /* raise invalid if b is not a NaN */ - return (npy_cpack@c@(a, t)); /* return NaN + NaN i */ - } - if (npy_isinf(a)) { - /* - * csqrt(inf + NaN i) = inf + NaN i - * csqrt(inf + y i) = inf + 0 i - * csqrt(-inf + NaN i) = NaN +- inf i - * csqrt(-inf + y i) = 0 + inf i - */ - if (npy_signbit(a)) - return (npy_cpack@c@(npy_fabs@c@(b - b), npy_copysign@c@(a, b))); - else - return (npy_cpack@c@(a, npy_copysign@c@(b - b, b))); - } - /* - * The remaining special case (b is NaN) is handled just fine by - * the normal code path below. - */ - - /* Scale to avoid overflow. */ - if (npy_fabs@c@(a) >= THRESH || npy_fabs@c@(b) >= THRESH) { - a *= 0.25; - b *= 0.25; - scale = 1; - } else { - scale = 0; - } - - /* Algorithm 312, CACM vol 10, Oct 1967. */ - if (a >= 0) { - t = npy_sqrt@c@((a + npy_hypot@c@(a, b)) * 0.5); - result = npy_cpack@c@(t, b / (2 * t)); - } else { - t = npy_sqrt@c@((-a + npy_hypot@c@(a, b)) * 0.5); - result = npy_cpack@c@(npy_fabs@c@(b) / (2 * t), npy_copysign@c@(t, b)); - } - - /* Rescale. */ - if (scale) - return (npy_cpack@c@(npy_creal@c@(result) * 2, npy_cimag@c@(result))); - else - return (result); -} -#undef THRESH -#endif - -#ifndef HAVE_CPOW@C@ -@ctype@ npy_cpow@c@ (@ctype@ x, @ctype@ y) -{ - @ctype@ b; - @type@ br, bi, yr, yi; - - yr = npy_creal@c@(y); - yi = npy_cimag@c@(y); - b = npy_clog@c@(x); - br = npy_creal@c@(b); - bi = npy_cimag@c@(b); - - return npy_cexp@c@(npy_cpack@c@(br * yr - bi * yi, br * yi + bi * yr)); -} -#endif - -#ifndef HAVE_CCOS@C@ -@ctype@ npy_ccos@c@(@ctype@ z) -{ - @type@ x, y; - x = npy_creal@c@(z); - y = npy_cimag@c@(z); - return npy_cpack@c@(npy_cos@c@(x) * npy_cosh@c@(y), -(npy_sin@c@(x) * npy_sinh@c@(y))); -} -#endif - -#ifndef HAVE_CSIN@C@ -@ctype@ npy_csin@c@(@ctype@ z) -{ - @type@ x, y; - x = npy_creal@c@(z); - y = npy_cimag@c@(z); - return npy_cpack@c@(npy_sin@c@(x) * npy_cosh@c@(y), npy_cos@c@(x) * npy_sinh@c@(y)); -} -#endif -/**end repeat**/ - -/*========================================================== - * Decorate all the functions which are available natively - *=========================================================*/ - -/**begin repeat - * #type = float, double, npy_longdouble# - * #ctype = npy_cfloat, npy_cdouble, npy_clongdouble# - * #c = f, , l# - * #C = F, , L# - */ - -/**begin repeat1 - * #kind = cabs,carg# - * #KIND = CABS,CARG# - */ -#ifdef HAVE_@KIND@@C@ -@type@ npy_@kind@@c@(@ctype@ z) -{ - __@ctype@_to_c99_cast z1 = {z}; - return @kind@@c@(z1.c99_z); -} -#endif -/**end repeat1**/ - -/**begin repeat1 - * #kind = cexp,clog,csqrt,ccos,csin# - * #KIND = CEXP,CLOG,CSQRT,CCOS,CSIN# - */ -#ifdef HAVE_@KIND@@C@ -@ctype@ npy_@kind@@c@(@ctype@ z) -{ - __@ctype@_to_c99_cast z1 = {z}; - __@ctype@_to_c99_cast ret; - ret.c99_z = @kind@@c@(z1.c99_z); - return ret.npy_z; -} -#endif -/**end repeat1**/ - -/**begin repeat1 - * #kind = cpow# - * #KIND = CPOW# - */ -#ifdef HAVE_@KIND@@C@ -@ctype@ npy_@kind@@c@(@ctype@ x, @ctype@ y) -{ - __@ctype@_to_c99_cast xcast = {x}; - __@ctype@_to_c99_cast ycast = {y}; - __@ctype@_to_c99_cast ret; - ret.c99_z = @kind@@c@(xcast.c99_z, ycast.c99_z); - return ret.npy_z; -} -#endif -/**end repeat1**/ - -/**end repeat**/ diff --git a/numpy-1.6.2/numpy/core/src/npymath/npy_math_private.h b/numpy-1.6.2/numpy/core/src/npymath/npy_math_private.h deleted file mode 100644 index 722d03f94b..0000000000 --- a/numpy-1.6.2/numpy/core/src/npymath/npy_math_private.h +++ /dev/null @@ -1,481 +0,0 @@ -/* - * - * ==================================================== - * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. - * - * Developed at SunPro, a Sun Microsystems, Inc. business. - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ - -/* - * from: @(#)fdlibm.h 5.1 93/09/24 - * $FreeBSD$ - */ - -#ifndef _NPY_MATH_PRIVATE_H_ -#define _NPY_MATH_PRIVATE_H_ - -#include -#include - -#include "npy_config.h" -#include "npy_fpmath.h" - -#include "numpy/npy_math.h" -#include "numpy/npy_cpu.h" -#include "numpy/npy_endian.h" -#include "numpy/npy_common.h" - -/* - * The original fdlibm code used statements like: - * n0 = ((*(int*)&one)>>29)^1; * index of high word * - * ix0 = *(n0+(int*)&x); * high word of x * - * ix1 = *((1-n0)+(int*)&x); * low word of x * - * to dig two 32 bit words out of the 64 bit IEEE floating point - * value. That is non-ANSI, and, moreover, the gcc instruction - * scheduler gets it wrong. We instead use the following macros. - * Unlike the original code, we determine the endianness at compile - * time, not at run time; I don't see much benefit to selecting - * endianness at run time. - */ - -/* - * A union which permits us to convert between a double and two 32 bit - * ints. - */ - -/* XXX: not really, but we already make this assumption elsewhere. Will have to - * fix this at some point */ -#define IEEE_WORD_ORDER NPY_BYTE_ORDER - -#if IEEE_WORD_ORDER == NPY_BIG_ENDIAN - -typedef union -{ - double value; - struct - { - npy_uint32 msw; - npy_uint32 lsw; - } parts; -} ieee_double_shape_type; - -#endif - -#if IEEE_WORD_ORDER == NPY_LITTLE_ENDIAN - -typedef union -{ - double value; - struct - { - npy_uint32 lsw; - npy_uint32 msw; - } parts; -} ieee_double_shape_type; - -#endif - -/* Get two 32 bit ints from a double. */ - -#define EXTRACT_WORDS(ix0,ix1,d) \ -do { \ - ieee_double_shape_type ew_u; \ - ew_u.value = (d); \ - (ix0) = ew_u.parts.msw; \ - (ix1) = ew_u.parts.lsw; \ -} while (0) - -/* Get the more significant 32 bit int from a double. */ - -#define GET_HIGH_WORD(i,d) \ -do { \ - ieee_double_shape_type gh_u; \ - gh_u.value = (d); \ - (i) = gh_u.parts.msw; \ -} while (0) - -/* Get the less significant 32 bit int from a double. */ - -#define GET_LOW_WORD(i,d) \ -do { \ - ieee_double_shape_type gl_u; \ - gl_u.value = (d); \ - (i) = gl_u.parts.lsw; \ -} while (0) - -/* Set the more significant 32 bits of a double from an int. */ - -#define SET_HIGH_WORD(d,v) \ -do { \ - ieee_double_shape_type sh_u; \ - sh_u.value = (d); \ - sh_u.parts.msw = (v); \ - (d) = sh_u.value; \ -} while (0) - -/* Set the less significant 32 bits of a double from an int. */ - -#define SET_LOW_WORD(d,v) \ -do { \ - ieee_double_shape_type sl_u; \ - sl_u.value = (d); \ - sl_u.parts.lsw = (v); \ - (d) = sl_u.value; \ -} while (0) - -/* Set a double from two 32 bit ints. */ - -#define INSERT_WORDS(d,ix0,ix1) \ -do { \ - ieee_double_shape_type iw_u; \ - iw_u.parts.msw = (ix0); \ - iw_u.parts.lsw = (ix1); \ - (d) = iw_u.value; \ -} while (0) - -/* - * A union which permits us to convert between a float and a 32 bit - * int. - */ - -typedef union -{ - float value; - /* FIXME: Assumes 32 bit int. */ - npy_uint32 word; -} ieee_float_shape_type; - -/* Get a 32 bit int from a float. */ - -#define GET_FLOAT_WORD(i,d) \ -do { \ - ieee_float_shape_type gf_u; \ - gf_u.value = (d); \ - (i) = gf_u.word; \ -} while (0) - -/* Set a float from a 32 bit int. */ - -#define SET_FLOAT_WORD(d,i) \ -do { \ - ieee_float_shape_type sf_u; \ - sf_u.word = (i); \ - (d) = sf_u.value; \ -} while (0) - -#ifdef NPY_USE_C99_COMPLEX -#include -#endif - -/* - * Long double support - */ -#if defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) - /* - * Intel extended 80 bits precision. Bit representation is - * | junk | s |eeeeeeeeeeeeeee|mmmmmmmm................mmmmmmm| - * | 16 bits| 1 bit | 15 bits | 64 bits | - * | a[2] | a[1] | a[0] | - * - * 16 stronger bits of a[2] are junk - */ - typedef npy_uint32 IEEEl2bitsrep_part; - -/* my machine */ - - union IEEEl2bitsrep { - npy_longdouble e; - IEEEl2bitsrep_part a[3]; - }; - - #define LDBL_MANL_INDEX 0 - #define LDBL_MANL_MASK 0xFFFFFFFF - #define LDBL_MANL_SHIFT 0 - - #define LDBL_MANH_INDEX 1 - #define LDBL_MANH_MASK 0xFFFFFFFF - #define LDBL_MANH_SHIFT 0 - - #define LDBL_EXP_INDEX 2 - #define LDBL_EXP_MASK 0x7FFF - #define LDBL_EXP_SHIFT 0 - - #define LDBL_SIGN_INDEX 2 - #define LDBL_SIGN_MASK 0x8000 - #define LDBL_SIGN_SHIFT 15 - - #define LDBL_NBIT 0x80000000 - - typedef npy_uint32 ldouble_man_t; - typedef npy_uint32 ldouble_exp_t; - typedef npy_uint32 ldouble_sign_t; -#elif defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) - /* - * Intel extended 80 bits precision, 16 bytes alignment.. Bit representation is - * | junk | s |eeeeeeeeeeeeeee|mmmmmmmm................mmmmmmm| - * | 16 bits| 1 bit | 15 bits | 64 bits | - * | a[2] | a[1] | a[0] | - * - * a[3] and 16 stronger bits of a[2] are junk - */ - typedef npy_uint32 IEEEl2bitsrep_part; - - union IEEEl2bitsrep { - npy_longdouble e; - IEEEl2bitsrep_part a[4]; - }; - - #define LDBL_MANL_INDEX 0 - #define LDBL_MANL_MASK 0xFFFFFFFF - #define LDBL_MANL_SHIFT 0 - - #define LDBL_MANH_INDEX 1 - #define LDBL_MANH_MASK 0xFFFFFFFF - #define LDBL_MANH_SHIFT 0 - - #define LDBL_EXP_INDEX 2 - #define LDBL_EXP_MASK 0x7FFF - #define LDBL_EXP_SHIFT 0 - - #define LDBL_SIGN_INDEX 2 - #define LDBL_SIGN_MASK 0x8000 - #define LDBL_SIGN_SHIFT 15 - - #define LDBL_NBIT 0x800000000 - - typedef npy_uint32 ldouble_man_t; - typedef npy_uint32 ldouble_exp_t; - typedef npy_uint32 ldouble_sign_t; -#elif defined(HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE) || \ - defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE) - /* 64 bits IEEE double precision aligned on 16 bytes: used by ppc arch on - * Mac OS X */ - - /* - * IEEE double precision. Bit representation is - * | s |eeeeeeeeeee|mmmmmmmm................mmmmmmm| - * |1 bit| 11 bits | 52 bits | - * | a[0] | a[1] | - */ - typedef npy_uint32 IEEEl2bitsrep_part; - - union IEEEl2bitsrep { - npy_longdouble e; - IEEEl2bitsrep_part a[2]; - }; - - #define LDBL_MANL_INDEX 1 - #define LDBL_MANL_MASK 0xFFFFFFFF - #define LDBL_MANL_SHIFT 0 - - #define LDBL_MANH_INDEX 0 - #define LDBL_MANH_MASK 0x000FFFFF - #define LDBL_MANH_SHIFT 0 - - #define LDBL_EXP_INDEX 0 - #define LDBL_EXP_MASK 0x7FF00000 - #define LDBL_EXP_SHIFT 20 - - #define LDBL_SIGN_INDEX 0 - #define LDBL_SIGN_MASK 0x80000000 - #define LDBL_SIGN_SHIFT 31 - - #define LDBL_NBIT 0 - - typedef npy_uint32 ldouble_man_t; - typedef npy_uint32 ldouble_exp_t; - typedef npy_uint32 ldouble_sign_t; -#elif defined(HAVE_LDOUBLE_IEEE_DOUBLE_LE) - /* 64 bits IEEE double precision, Little Endian. */ - - /* - * IEEE double precision. Bit representation is - * | s |eeeeeeeeeee|mmmmmmmm................mmmmmmm| - * |1 bit| 11 bits | 52 bits | - * | a[1] | a[0] | - */ - typedef npy_uint32 IEEEl2bitsrep_part; - - union IEEEl2bitsrep { - npy_longdouble e; - IEEEl2bitsrep_part a[2]; - }; - - #define LDBL_MANL_INDEX 0 - #define LDBL_MANL_MASK 0xFFFFFFFF - #define LDBL_MANL_SHIFT 0 - - #define LDBL_MANH_INDEX 1 - #define LDBL_MANH_MASK 0x000FFFFF - #define LDBL_MANH_SHIFT 0 - - #define LDBL_EXP_INDEX 1 - #define LDBL_EXP_MASK 0x7FF00000 - #define LDBL_EXP_SHIFT 20 - - #define LDBL_SIGN_INDEX 1 - #define LDBL_SIGN_MASK 0x80000000 - #define LDBL_SIGN_SHIFT 31 - - #define LDBL_NBIT 0x00000080 - - typedef npy_uint32 ldouble_man_t; - typedef npy_uint32 ldouble_exp_t; - typedef npy_uint32 ldouble_sign_t; -#elif defined(HAVE_LDOUBLE_IEEE_QUAD_BE) - /* - * IEEE quad precision, Big Endian. Bit representation is - * | s |eeeeeeeeeee|mmmmmmmm................mmmmmmm| - * |1 bit| 15 bits | 112 bits | - * | a[0] | a[1] | - */ - typedef npy_uint64 IEEEl2bitsrep_part; - - union IEEEl2bitsrep { - npy_longdouble e; - IEEEl2bitsrep_part a[2]; - }; - - #define LDBL_MANL_INDEX 1 - #define LDBL_MANL_MASK 0xFFFFFFFFFFFFFFFF - #define LDBL_MANL_SHIFT 0 - - #define LDBL_MANH_INDEX 0 - #define LDBL_MANH_MASK 0x0000FFFFFFFFFFFF - #define LDBL_MANH_SHIFT 0 - - #define LDBL_EXP_INDEX 0 - #define LDBL_EXP_MASK 0x7FFF000000000000 - #define LDBL_EXP_SHIFT 48 - - #define LDBL_SIGN_INDEX 0 - #define LDBL_SIGN_MASK 0x8000000000000000 - #define LDBL_SIGN_SHIFT 63 - - #define LDBL_NBIT 0 - - typedef npy_uint64 ldouble_man_t; - typedef npy_uint64 ldouble_exp_t; - typedef npy_uint32 ldouble_sign_t; -#elif defined(HAVE_LDOUBLE_IEEE_QUAD_LE) - /* - * IEEE quad precision, Little Endian. Bit representation is - * | s |eeeeeeeeeee|mmmmmmmm................mmmmmmm| - * |1 bit| 15 bits | 112 bits | - * | a[1] | a[0] | - */ - typedef npy_uint64 IEEEl2bitsrep_part; - - union IEEEl2bitsrep { - npy_longdouble e; - IEEEl2bitsrep_part a[2]; - }; - - #define LDBL_MANL_INDEX 0 - #define LDBL_MANL_MASK 0xFFFFFFFFFFFFFFFF - #define LDBL_MANL_SHIFT 0 - - #define LDBL_MANH_INDEX 1 - #define LDBL_MANH_MASK 0x0000FFFFFFFFFFFF - #define LDBL_MANH_SHIFT 0 - - #define LDBL_EXP_INDEX 1 - #define LDBL_EXP_MASK 0x7FFF000000000000 - #define LDBL_EXP_SHIFT 48 - - #define LDBL_SIGN_INDEX 1 - #define LDBL_SIGN_MASK 0x8000000000000000 - #define LDBL_SIGN_SHIFT 63 - - #define LDBL_NBIT 0 - - typedef npy_uint64 ldouble_man_t; - typedef npy_uint64 ldouble_exp_t; - typedef npy_uint32 ldouble_sign_t; -#endif - -#ifndef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE -/* Get the sign bit of x. x should be of type IEEEl2bitsrep */ -#define GET_LDOUBLE_SIGN(x) \ - (((x).a[LDBL_SIGN_INDEX] & LDBL_SIGN_MASK) >> LDBL_SIGN_SHIFT) - -/* Set the sign bit of x to v. x should be of type IEEEl2bitsrep */ -#define SET_LDOUBLE_SIGN(x, v) \ - ((x).a[LDBL_SIGN_INDEX] = \ - ((x).a[LDBL_SIGN_INDEX] & ~LDBL_SIGN_MASK) | \ - (((IEEEl2bitsrep_part)(v) << LDBL_SIGN_SHIFT) & LDBL_SIGN_MASK)) - -/* Get the exp bits of x. x should be of type IEEEl2bitsrep */ -#define GET_LDOUBLE_EXP(x) \ - (((x).a[LDBL_EXP_INDEX] & LDBL_EXP_MASK) >> LDBL_EXP_SHIFT) - -/* Set the exp bit of x to v. x should be of type IEEEl2bitsrep */ -#define SET_LDOUBLE_EXP(x, v) \ - ((x).a[LDBL_EXP_INDEX] = \ - ((x).a[LDBL_EXP_INDEX] & ~LDBL_EXP_MASK) | \ - (((IEEEl2bitsrep_part)(v) << LDBL_EXP_SHIFT) & LDBL_EXP_MASK)) - -/* Get the manl bits of x. x should be of type IEEEl2bitsrep */ -#define GET_LDOUBLE_MANL(x) \ - (((x).a[LDBL_MANL_INDEX] & LDBL_MANL_MASK) >> LDBL_MANL_SHIFT) - -/* Set the manl bit of x to v. x should be of type IEEEl2bitsrep */ -#define SET_LDOUBLE_MANL(x, v) \ - ((x).a[LDBL_MANL_INDEX] = \ - ((x).a[LDBL_MANL_INDEX] & ~LDBL_MANL_MASK) | \ - (((IEEEl2bitsrep_part)(v) << LDBL_MANL_SHIFT) & LDBL_MANL_MASK)) - -/* Get the manh bits of x. x should be of type IEEEl2bitsrep */ -#define GET_LDOUBLE_MANH(x) \ - (((x).a[LDBL_MANH_INDEX] & LDBL_MANH_MASK) >> LDBL_MANH_SHIFT) - -/* Set the manh bit of x to v. x should be of type IEEEl2bitsrep */ -#define SET_LDOUBLE_MANH(x, v) \ - ((x).a[LDBL_MANH_INDEX] = \ - ((x).a[LDBL_MANH_INDEX] & ~LDBL_MANH_MASK) | \ - (((IEEEl2bitsrep_part)(v) << LDBL_MANH_SHIFT) & LDBL_MANH_MASK)) - -#endif /* #ifndef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE */ - -/* - * Those unions are used to convert a pointer of npy_cdouble to native C99 - * complex or our own complex type independently on whether C99 complex - * support is available - */ -#ifdef NPY_USE_C99_COMPLEX -typedef union { - npy_cdouble npy_z; - complex double c99_z; -} __npy_cdouble_to_c99_cast; - -typedef union { - npy_cfloat npy_z; - complex float c99_z; -} __npy_cfloat_to_c99_cast; - -typedef union { - npy_clongdouble npy_z; - complex long double c99_z; -} __npy_clongdouble_to_c99_cast; -#else -typedef union { - npy_cdouble npy_z; - npy_cdouble c99_z; -} __npy_cdouble_to_c99_cast; - -typedef union { - npy_cfloat npy_z; - npy_cfloat c99_z; -} __npy_cfloat_to_c99_cast; - -typedef union { - npy_clongdouble npy_z; - npy_clongdouble c99_z; -} __npy_clongdouble_to_c99_cast; -#endif - -#endif /* !_NPY_MATH_PRIVATE_H_ */ diff --git a/numpy-1.6.2/numpy/core/src/private/lowlevel_strided_loops.h b/numpy-1.6.2/numpy/core/src/private/lowlevel_strided_loops.h deleted file mode 100644 index 5fc42bc406..0000000000 --- a/numpy-1.6.2/numpy/core/src/private/lowlevel_strided_loops.h +++ /dev/null @@ -1,397 +0,0 @@ -#ifndef __LOWLEVEL_STRIDED_LOOPS_H -#define __LOWLEVEL_STRIDED_LOOPS_H - -/* - * NOTE: This API should remain private for the time being, to allow - * for further refinement. I think the 'aligned' mechanism - * needs changing, for example. - */ - -/* - * This function pointer is for functions that transfer an arbitrarily strided - * input to a an arbitrarily strided output. It may be a fully general - * function, or a specialized function when the strides or item size - * have special values. - * - * Examples of transfer functions are a straight copy, a byte-swap, - * and a casting operation, - * - * The 'transferdata' parameter is slightly special, and must always contain - * pointer to deallocation and copying routines at its beginning. The function - * PyArray_FreeStridedTransferData should be used to deallocate such - * pointers, and calls the first function pointer, while the function - * PyArray_CopyStridedTransferData should be used to copy it. - * - */ -typedef void (PyArray_StridedTransferFn)(char *dst, npy_intp dst_stride, - char *src, npy_intp src_stride, - npy_intp N, npy_intp src_itemsize, - void *transferdata); - -/* - * Deallocates a PyArray_StridedTransferFunction data object. See - * the comment with the function typedef for more details. - */ -NPY_NO_EXPORT void -PyArray_FreeStridedTransferData(void *transferdata); - -/* - * Copies a PyArray_StridedTransferFunction data object. See - * the comment with the function typedef for more details. - */ -NPY_NO_EXPORT void * -PyArray_CopyStridedTransferData(void *transferdata); - -/* - * Gives back a function pointer to a specialized function for copying - * strided memory. Returns NULL if there is a problem with the inputs. - * - * aligned: - * Should be 1 if the src and dst pointers are always aligned, - * 0 otherwise. - * src_stride: - * Should be the src stride if it will always be the same, - * NPY_MAX_INTP otherwise. - * dst_stride: - * Should be the dst stride if it will always be the same, - * NPY_MAX_INTP otherwise. - * itemsize: - * Should be the item size if it will always be the same, 0 otherwise. - * - */ -NPY_NO_EXPORT PyArray_StridedTransferFn * -PyArray_GetStridedCopyFn(npy_intp aligned, npy_intp src_stride, - npy_intp dst_stride, npy_intp itemsize); - -/* - * Gives back a function pointer to a specialized function for copying - * and swapping strided memory. This assumes each element is a single - * value to be swapped. - * - * For information on the 'aligned', 'src_stride' and 'dst_stride' parameters - * see above. - * - * Parameters are as for PyArray_GetStridedCopyFn. - */ -NPY_NO_EXPORT PyArray_StridedTransferFn * -PyArray_GetStridedCopySwapFn(npy_intp aligned, npy_intp src_stride, - npy_intp dst_stride, npy_intp itemsize); - -/* - * Gives back a function pointer to a specialized function for copying - * and swapping strided memory. This assumes each element is a pair - * of values, each of which needs to be swapped. - * - * For information on the 'aligned', 'src_stride' and 'dst_stride' parameters - * see above. - * - * Parameters are as for PyArray_GetStridedCopyFn. - */ -NPY_NO_EXPORT PyArray_StridedTransferFn * -PyArray_GetStridedCopySwapPairFn(npy_intp aligned, npy_intp src_stride, - npy_intp dst_stride, npy_intp itemsize); - -/* - * Gives back a transfer function and transfer data pair which copies - * the data from source to dest, truncating it if the data doesn't - * fit, and padding with zero bytes if there's too much space. - * - * For information on the 'aligned', 'src_stride' and 'dst_stride' parameters - * see above. - * - * Returns NPY_SUCCEED or NPY_FAIL - */ -NPY_NO_EXPORT int -PyArray_GetStridedZeroPadCopyFn(int aligned, - npy_intp src_stride, npy_intp dst_stride, - npy_intp src_itemsize, npy_intp dst_itemsize, - PyArray_StridedTransferFn **outstransfer, - void **outtransferdata); - -/* - * For casts between built-in numeric types, - * this produces a function pointer for casting from src_type_num - * to dst_type_num. If a conversion is unsupported, returns NULL - * without setting a Python exception. - */ -NPY_NO_EXPORT PyArray_StridedTransferFn * -PyArray_GetStridedNumericCastFn(npy_intp aligned, npy_intp src_stride, - npy_intp dst_stride, - int src_type_num, int dst_type_num); - -/* - * If it's possible, gives back a transfer function which casts and/or - * byte swaps data with the dtype 'src_dtype' into data with the dtype - * 'dst_dtype'. If the outtransferdata is populated with a non-NULL value, - * it must be deallocated with the ``PyArray_FreeStridedTransferData`` - * function when the transfer function is no longer required. - * - * aligned: - * Should be 1 if the src and dst pointers are always aligned, - * 0 otherwise. - * src_stride: - * Should be the src stride if it will always be the same, - * NPY_MAX_INTP otherwise. - * dst_stride: - * Should be the dst stride if it will always be the same, - * NPY_MAX_INTP otherwise. - * src_dtype: - * The data type of source data. If this is NULL, a transfer - * function which sets the destination to zeros is produced. - * dst_dtype: - * The data type of destination data. If this is NULL and - * move_references is 1, a transfer function which decrements - * source data references is produced. - * move_references: - * If 0, the destination data gets new reference ownership. - * If 1, the references from the source data are moved to - * the destination data. - * out_stransfer: - * The resulting transfer function is placed here. - * out_transferdata: - * The auxiliary data for the transfer function is placed here. - * When finished with the transfer function, the caller must call - * ``PyArray_FreeStridedTransferData`` on this data. - * out_needs_api: - * If this is non-NULL, and the transfer function produced needs - * to call into the (Python) API, this gets set to 1. This - * remains untouched if no API access is required. - * - * WARNING: If you set move_references to 1, it is best that src_stride is - * never zero when calling the transfer function. Otherwise, the - * first destination reference will get the value and all the rest - * will get NULL. - * - * Returns NPY_SUCCEED or NPY_FAIL. - */ -NPY_NO_EXPORT int -PyArray_GetDTypeTransferFunction(int aligned, - npy_intp src_stride, npy_intp dst_stride, - PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, - int move_references, - PyArray_StridedTransferFn **out_stransfer, - void **out_transferdata, - int *out_needs_api); - -/* - * These two functions copy or convert the data of an n-dimensional array - * to/from a 1-dimensional strided buffer. These functions will only call - * 'stransfer' with the provided dst_stride/src_stride and - * dst_strides[0]/src_strides[0], so the caller can use those values to - * specialize the function. - * - * The return value is the number of elements it couldn't copy. A return value - * of 0 means all elements were copied, a larger value means the end of - * the n-dimensional array was reached before 'count' elements were copied. - * - * ndim: - * The number of dimensions of the n-dimensional array. - * dst/src: - * The destination or src starting pointer. - * dst_stride/src_stride: - * The stride of the 1-dimensional strided buffer - * dst_strides/src_strides: - * The strides of the n-dimensional array. - * dst_strides_inc/src_strides_inc: - * How much to add to the ..._strides pointer to get to the next stride. - * coords: - * The starting coordinates in the n-dimensional array. - * coords_inc: - * How much to add to the coords pointer to get to the next coordinate. - * shape: - * The shape of the n-dimensional array. - * shape_inc: - * How much to add to the shape pointer to get to the next shape entry. - * count: - * How many elements to transfer - * src_itemsize: - * How big each element is. If transfering between elements of different - * sizes, for example a casting operation, the 'stransfer' function - * should be specialized for that, in which case 'stransfer' will use - * this parameter as the source item size. - * stransfer: - * The strided transfer function. - * transferdata: - * An auxiliary data pointer passed to the strided transfer function. - * If a non-NULL value is returned, it must be deallocated with the - * function PyArray_FreeStridedTransferData. - */ -NPY_NO_EXPORT npy_intp -PyArray_TransferNDimToStrided(npy_intp ndim, - char *dst, npy_intp dst_stride, - char *src, npy_intp *src_strides, npy_intp src_strides_inc, - npy_intp *coords, npy_intp coords_inc, - npy_intp *shape, npy_intp shape_inc, - npy_intp count, npy_intp src_itemsize, - PyArray_StridedTransferFn *stransfer, - void *transferdata); - -NPY_NO_EXPORT npy_intp -PyArray_TransferStridedToNDim(npy_intp ndim, - char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc, - char *src, npy_intp src_stride, - npy_intp *coords, npy_intp coords_inc, - npy_intp *shape, npy_intp shape_inc, - npy_intp count, npy_intp src_itemsize, - PyArray_StridedTransferFn *stransfer, - void *transferdata); - -/* - * TRIVIAL ITERATION - * - * In some cases when the iteration order isn't important, iteration over - * arrays is trivial. This is the case when: - * * The array has 0 or 1 dimensions. - * * The array is C or Fortran contiguous. - * Use of an iterator can be skipped when this occurs. These macros assist - * in detecting and taking advantage of the situation. Note that it may - * be worthwhile to further check if the stride is a contiguous stride - * and take advantage of that. - * - * Here is example code for a single array: - * - * if (PyArray_TRIVIALLY_ITERABLE(self) { - * char *data; - * npy_intp count, stride; - * - * PyArray_PREPARE_TRIVIAL_ITERATION(self, count, data, stride); - * - * while (count--) { - * // Use the data pointer - * - * data += stride; - * } - * } - * else { - * // Create iterator, etc... - * } - * - * Here is example code for a pair of arrays: - * - * if (PyArray_TRIVIALLY_ITERABLE_PAIR(a1, a2) { - * char *data1, *data2; - * npy_intp count, stride1, stride2; - * - * PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(a1, a2, count, - * data1, data2, stride1, stride2); - * - * while (count--) { - * // Use the data1 and data2 pointers - * - * data1 += stride1; - * data2 += stride2; - * } - * } - * else { - * // Create iterator, etc... - * } - */ - -/* - * Note: Equivalently iterable macro requires one of arr1 or arr2 be - * trivially iterable to be valid. - */ -#define PyArray_EQUIVALENTLY_ITERABLE(arr1, arr2) ( \ - PyArray_NDIM(arr1) == PyArray_NDIM(arr2) && \ - PyArray_CompareLists(PyArray_DIMS(arr1), \ - PyArray_DIMS(arr2), \ - PyArray_NDIM(arr1)) && \ - (arr1->flags&(NPY_CONTIGUOUS|NPY_FORTRAN)) == \ - (arr2->flags&(NPY_CONTIGUOUS|NPY_FORTRAN)) \ - ) - -#define PyArray_TRIVIALLY_ITERABLE(arr) ( \ - PyArray_NDIM(arr) <= 1 || \ - PyArray_CHKFLAGS(arr, NPY_CONTIGUOUS) || \ - PyArray_CHKFLAGS(arr, NPY_FORTRAN) \ - ) -#define PyArray_PREPARE_TRIVIAL_ITERATION(arr, count, data, stride) \ - count = PyArray_SIZE(arr), \ - data = PyArray_BYTES(arr), \ - stride = ((PyArray_NDIM(arr) == 0) ? 0 : \ - (PyArray_CHKFLAGS(arr, NPY_FORTRAN) ? \ - PyArray_STRIDE(arr, 0) : \ - PyArray_STRIDE(arr, \ - PyArray_NDIM(arr)-1))) - -#define PyArray_TRIVIALLY_ITERABLE_PAIR(arr1, arr2) (\ - PyArray_TRIVIALLY_ITERABLE(arr1) && \ - (PyArray_NDIM(arr2) == 0 || \ - PyArray_EQUIVALENTLY_ITERABLE(arr1, arr2) || \ - (PyArray_NDIM(arr1) == 0 && \ - PyArray_TRIVIALLY_ITERABLE(arr2) \ - ) \ - ) \ - ) -#define PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(arr1, arr2, \ - count, \ - data1, data2, \ - stride1, stride2) { \ - npy_intp size1 = PyArray_SIZE(arr1); \ - npy_intp size2 = PyArray_SIZE(arr2); \ - count = ((size1 > size2) || size1 == 0) ? size1 : size2; \ - data1 = PyArray_BYTES(arr1); \ - data2 = PyArray_BYTES(arr2); \ - stride1 = (size1 == 1 ? 0 : \ - (PyArray_CHKFLAGS(arr1, NPY_FORTRAN) ? \ - PyArray_STRIDE(arr1, 0) : \ - PyArray_STRIDE(arr1, \ - PyArray_NDIM(arr1)-1))); \ - stride2 = (size2 == 1 ? 0 : \ - (PyArray_CHKFLAGS(arr2, NPY_FORTRAN) ? \ - PyArray_STRIDE(arr2, 0) : \ - PyArray_STRIDE(arr2, \ - PyArray_NDIM(arr2)-1))); \ - } - -#define PyArray_TRIVIALLY_ITERABLE_TRIPLE(arr1, arr2, arr3) (\ - PyArray_TRIVIALLY_ITERABLE(arr1) && \ - ((PyArray_NDIM(arr2) == 0 && \ - (PyArray_NDIM(arr3) == 0 || \ - PyArray_EQUIVALENTLY_ITERABLE(arr1, arr3) \ - ) \ - ) || \ - (PyArray_EQUIVALENTLY_ITERABLE(arr1, arr2) && \ - (PyArray_NDIM(arr3) == 0 || \ - PyArray_EQUIVALENTLY_ITERABLE(arr1, arr3) \ - ) \ - ) || \ - (PyArray_NDIM(arr1) == 0 && \ - PyArray_TRIVIALLY_ITERABLE(arr2) && \ - (PyArray_NDIM(arr3) == 0 || \ - PyArray_EQUIVALENTLY_ITERABLE(arr2, arr3) \ - ) \ - ) \ - ) \ - ) - -#define PyArray_PREPARE_TRIVIAL_TRIPLE_ITERATION(arr1, arr2, arr3, \ - count, \ - data1, data2, data3, \ - stride1, stride2, stride3) { \ - npy_intp size1 = PyArray_SIZE(arr1); \ - npy_intp size2 = PyArray_SIZE(arr2); \ - npy_intp size3 = PyArray_SIZE(arr3); \ - count = ((size1 > size2) || size1 == 0) ? size1 : size2; \ - count = ((size3 > count) || size3 == 0) ? size3 : count; \ - data1 = PyArray_BYTES(arr1); \ - data2 = PyArray_BYTES(arr2); \ - data3 = PyArray_BYTES(arr3); \ - stride1 = (size1 == 1 ? 0 : \ - (PyArray_CHKFLAGS(arr1, NPY_FORTRAN) ? \ - PyArray_STRIDE(arr1, 0) : \ - PyArray_STRIDE(arr1, \ - PyArray_NDIM(arr1)-1))); \ - stride2 = (size2 == 1 ? 0 : \ - (PyArray_CHKFLAGS(arr2, NPY_FORTRAN) ? \ - PyArray_STRIDE(arr2, 0) : \ - PyArray_STRIDE(arr2, \ - PyArray_NDIM(arr2)-1))); \ - stride3 = (size3 == 1 ? 0 : \ - (PyArray_CHKFLAGS(arr3, NPY_FORTRAN) ? \ - PyArray_STRIDE(arr3, 0) : \ - PyArray_STRIDE(arr3, \ - PyArray_NDIM(arr3)-1))); \ - } - -#endif diff --git a/numpy-1.6.2/numpy/core/src/private/npy_config.h b/numpy-1.6.2/numpy/core/src/private/npy_config.h deleted file mode 100644 index b4842b8320..0000000000 --- a/numpy-1.6.2/numpy/core/src/private/npy_config.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _NPY_NPY_CONFIG_H_ -#define _NPY_NPY_CONFIG_H_ - -#include "config.h" - -/* Disable broken MS math functions */ -#if defined(_MSC_VER) || defined(__MINGW32_VERSION) -#undef HAVE_ATAN2 -#undef HAVE_HYPOT -#endif - -/* Safe to use ldexp and frexp for long double for MSVC builds */ -#if (SIZEOF_LONG_DOUBLE == SIZEOF_DOUBLE) || defined(_MSC_VER) - #ifdef HAVE_LDEXP - #define HAVE_LDEXPL 1 - #endif - #ifdef HAVE_FREXP - #define HAVE_FREXPL 1 - #endif -#endif - -/* Disable broken Sun Workshop Pro math functions */ -#ifdef __SUNPRO_C -#undef HAVE_ATAN2 -#endif - -/* - * On Mac OS X, because there is only one configuration stage for all the archs - * in universal builds, any macro which depends on the arch needs to be - * harcoded - */ -#ifdef __APPLE__ - #undef SIZEOF_LONG - #undef SIZEOF_PY_INTPTR_T - - #ifdef __LP64__ - #define SIZEOF_LONG 8 - #define SIZEOF_PY_INTPTR_T 8 - #else - #define SIZEOF_LONG 4 - #define SIZEOF_PY_INTPTR_T 4 - #endif -#endif -#endif diff --git a/numpy-1.6.2/numpy/core/src/private/npy_fpmath.h b/numpy-1.6.2/numpy/core/src/private/npy_fpmath.h deleted file mode 100644 index 92338e4c7f..0000000000 --- a/numpy-1.6.2/numpy/core/src/private/npy_fpmath.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef _NPY_NPY_FPMATH_H_ -#define _NPY_NPY_FPMATH_H_ - -#include "npy_config.h" - -#include "numpy/npy_os.h" -#include "numpy/npy_cpu.h" -#include "numpy/npy_common.h" - -#ifdef NPY_OS_DARWIN - /* This hardcoded logic is fragile, but universal builds makes it - * difficult to detect arch-specific features */ - - /* MAC OS X < 10.4 and gcc < 4 does not support proper long double, and - * is the same as double on those platforms */ - #if NPY_BITSOF_LONGDOUBLE == NPY_BITSOF_DOUBLE - /* This assumes that FPU and ALU have the same endianness */ - #if NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN - #define HAVE_LDOUBLE_IEEE_DOUBLE_LE - #elif NPY_BYTE_ORDER == NPY_BIG_ENDIAN - #define HAVE_LDOUBLE_IEEE_DOUBLE_BE - #else - #error Endianness undefined ? - #endif - #else - #if defined(NPY_CPU_X86) - #define HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE - #elif defined(NPY_CPU_AMD64) - #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE - #elif defined(NPY_CPU_PPC) || defined(NPY_CPU_PPC64) - #define HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE - #endif - #endif -#endif - -#if !(defined(HAVE_LDOUBLE_IEEE_QUAD_BE) || \ - defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || \ - defined(HAVE_LDOUBLE_IEEE_DOUBLE_LE) || \ - defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE) || \ - defined(HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE) || \ - defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \ - defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \ - defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE)) - #error No long double representation defined -#endif - -#endif diff --git a/numpy-1.6.2/numpy/core/src/scalarmathmodule.c.src b/numpy-1.6.2/numpy/core/src/scalarmathmodule.c.src deleted file mode 100644 index 56f1bc238f..0000000000 --- a/numpy-1.6.2/numpy/core/src/scalarmathmodule.c.src +++ /dev/null @@ -1,1656 +0,0 @@ -/* -*- c -*- */ - -/* The purpose of this module is to add faster math for array scalars - that does not go through the ufunc machinery - - but still supports error-modes. -*/ - -#include "Python.h" -#include "numpy/noprefix.h" -#include "numpy/ufuncobject.h" -#include "numpy/arrayscalars.h" - -#include "numpy/npy_3kcompat.h" - -#include "numpy/halffloat.h" - -/** numarray adapted routines.... **/ - -#if SIZEOF_LONGLONG == 64 || SIZEOF_LONGLONG == 128 -static int ulonglong_overflow(ulonglong a, ulonglong b) -{ - ulonglong ah, al, bh, bl, w, x, y, z; - -#if SIZEOF_LONGLONG == 64 - ah = (a >> 32); - al = (a & 0xFFFFFFFFL); - bh = (b >> 32); - bl = (b & 0xFFFFFFFFL); -#elif SIZEOF_LONGLONG == 128 - ah = (a >> 64); - al = (a & 0xFFFFFFFFFFFFFFFFL); - bh = (b >> 64); - bl = (b & 0xFFFFFFFFFFFFFFFFL); -#else - ah = al = bh = bl = 0; -#endif - - /* 128-bit product: z*2**64 + (x+y)*2**32 + w */ - w = al*bl; - x = bh*al; - y = ah*bl; - z = ah*bh; - - /* *c = ((x + y)<<32) + w; */ -#if SIZEOF_LONGLONG == 64 - return z || (x>>32) || (y>>32) || - (((x & 0xFFFFFFFFL) + (y & 0xFFFFFFFFL) + (w >> 32)) >> 32); -#elif SIZEOF_LONGLONG == 128 - return z || (x>>64) || (y>>64) || - (((x & 0xFFFFFFFFFFFFFFFFL) + (y & 0xFFFFFFFFFFFFFFFFL) + (w >> 64)) >> 64); -#else - return 0; -#endif - -} -#else -static int ulonglong_overflow(ulonglong NPY_UNUSED(a), ulonglong NPY_UNUSED(b)) -{ - return 0; -} -#endif - -static int slonglong_overflow(longlong a0, longlong b0) -{ - ulonglong a, b; - ulonglong ah, al, bh, bl, w, x, y, z; - - /* Convert to non-negative quantities */ - if (a0 < 0) { - a = -a0; - } - else { - a = a0; - } - if (b0 < 0) { - b = -b0; - } - else { - b = b0; - } - - -#if SIZEOF_LONGLONG == 64 - ah = (a >> 32); - al = (a & 0xFFFFFFFFL); - bh = (b >> 32); - bl = (b & 0xFFFFFFFFL); -#elif SIZEOF_LONGLONG == 128 - ah = (a >> 64); - al = (a & 0xFFFFFFFFFFFFFFFFL); - bh = (b >> 64); - bl = (b & 0xFFFFFFFFFFFFFFFFL); -#else - ah = al = bh = bl = 0; -#endif - - w = al*bl; - x = bh*al; - y = ah*bl; - z = ah*bh; - - /* - ulonglong c = ((x + y)<<32) + w; - if ((a0 < 0) ^ (b0 < 0)) - *c = -c; - else - *c = c - */ - -#if SIZEOF_LONGLONG == 64 - return z || (x>>31) || (y>>31) || - (((x & 0xFFFFFFFFL) + (y & 0xFFFFFFFFL) + (w >> 32)) >> 31); -#elif SIZEOF_LONGLONG == 128 - return z || (x>>63) || (y>>63) || - (((x & 0xFFFFFFFFFFFFFFFFL) + (y & 0xFFFFFFFFFFFFFFFFL) + (w >> 64)) >> 63); -#else - return 0; -#endif -} -/** end direct numarray code **/ - - -/* Basic operations: - * - * BINARY: - * - * add, subtract, multiply, divide, remainder, divmod, power, - * floor_divide, true_divide - * - * lshift, rshift, and, or, xor (integers only) - * - * UNARY: - * - * negative, positive, absolute, nonzero, invert, int, long, float, oct, hex - * - */ - -/**begin repeat - * #name = byte, short, int, long, longlong# - */ -static void -@name@_ctype_add(@name@ a, @name@ b, @name@ *out) { - *out = a + b; - if ((*out^a) >= 0 || (*out^b) >= 0) { - return; - } - npy_set_floatstatus_overflow(); - return; -} -static void -@name@_ctype_subtract(@name@ a, @name@ b, @name@ *out) { - *out = a - b; - if ((*out^a) >= 0 || (*out^~b) >= 0) { - return; - } - npy_set_floatstatus_overflow(); - return; -} -/**end repeat**/ - -/**begin repeat - * #name = ubyte, ushort, uint, ulong, ulonglong# - */ -static void -@name@_ctype_add(@name@ a, @name@ b, @name@ *out) { - *out = a + b; - if (*out >= a && *out >= b) { - return; - } - npy_set_floatstatus_overflow(); - return; -} -static void -@name@_ctype_subtract(@name@ a, @name@ b, @name@ *out) { - *out = a - b; - if (a >= b) { - return; - } - npy_set_floatstatus_overflow(); - return; -} -/**end repeat**/ - -#ifndef SIZEOF_BYTE -#define SIZEOF_BYTE 1 -#endif - -/**begin repeat - * - * #name = byte, ubyte, short, ushort, int, uint, long, ulong# - * #big = (int,uint)*2, (longlong,ulonglong)*2# - * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG# - * #SIZENAME = BYTE*2, SHORT*2, INT*2, LONG*2# - * #SIZE = INT*4,LONGLONG*4# - * #neg = (1,0)*4# - */ -#if SIZEOF_@SIZE@ > SIZEOF_@SIZENAME@ -static void -@name@_ctype_multiply(@name@ a, @name@ b, @name@ *out) { - @big@ temp; - temp = ((@big@) a) * ((@big@) b); - *out = (@name@) temp; -#if @neg@ - if (temp > MAX_@NAME@ || temp < MIN_@NAME@) -#else - if (temp > MAX_@NAME@) -#endif - npy_set_floatstatus_overflow(); - return; -} -#endif -/**end repeat**/ - -/**begin repeat - * - * #name = int, uint, long, ulong, longlong, ulonglong# - * #SIZE = INT*2, LONG*2, LONGLONG*2# - * #char = (s,u)*3# - */ -#if SIZEOF_LONGLONG == SIZEOF_@SIZE@ -static void -@name@_ctype_multiply(@name@ a, @name@ b, @name@ *out) { - *out = a * b; - if (@char@longlong_overflow(a, b)) { - npy_set_floatstatus_overflow(); - } - return; -} -#endif -/**end repeat**/ - -/**begin repeat - * - * #name = byte, ubyte, short, ushort, int, uint, long, - * ulong, longlong, ulonglong# - * #neg = (1,0)*5# - */ -static void -@name@_ctype_divide(@name@ a, @name@ b, @name@ *out) { - if (b == 0) { - npy_set_floatstatus_divbyzero(); - *out = 0; - } -#if @neg@ - else if (b == -1 && a < 0 && a == -a) { - npy_set_floatstatus_overflow(); - *out = a / b; - } -#endif - else { -#if @neg@ - @name@ tmp; - tmp = a / b; - if (((a > 0) != (b > 0)) && (a % b != 0)) { - tmp--; - } - *out = tmp; -#else - *out = a / b; -#endif - } -} - -#define @name@_ctype_floor_divide @name@_ctype_divide -static void -@name@_ctype_remainder(@name@ a, @name@ b, @name@ *out) { - if (a == 0 || b == 0) { - if (b == 0) npy_set_floatstatus_divbyzero(); - *out = 0; - return; - } -#if @neg@ - else if ((a > 0) == (b > 0)) { - *out = a % b; - } - else { - /* handled like Python does */ - *out = a % b; - if (*out) *out += b; - } -#else - *out = a % b; -#endif -} -/**end repeat**/ - -/**begin repeat - * - * #name = byte, ubyte, short, ushort, int, uint, long, - * ulong, longlong, ulonglong# - * #otyp = float*4, double*6# - */ -#define @name@_ctype_true_divide(a, b, out) \ - *(out) = ((@otyp@) (a)) / ((@otyp@) (b)); -/**end repeat**/ - -/* b will always be positive in this call */ -/**begin repeat - * - * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# - * #upc = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG# - */ -static void -@name@_ctype_power(@name@ a, @name@ b, @name@ *out) { - @name@ temp, ix, mult; - /* code from Python's intobject.c, with overflow checking removed. */ - temp = a; - ix = 1; - while (b > 0) { - if (b & 1) { - @name@_ctype_multiply(ix, temp, &mult); - ix = mult; - if (temp == 0) { - break; - } - } - b >>= 1; /* Shift exponent down by 1 bit */ - if (b==0) { - break; - } - /* Square the value of temp */ - @name@_ctype_multiply(temp, temp, &mult); - temp = mult; - } - *out = ix; -} -/**end repeat**/ - - - -/* QUESTION: Should we check for overflow / underflow in (l,r)shift? */ - -/**begin repeat - * #name = (byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*5# - * #oper = and*10, xor*10, or*10, lshift*10, rshift*10# - * #op = &*10, ^*10, |*10, <<*10, >>*10# - */ -#define @name@_ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2) -/**end repeat**/ - -/**begin repeat - * #name = float, double, longdouble# - */ -static @name@ (*_basic_@name@_floor)(@name@); -static @name@ (*_basic_@name@_sqrt)(@name@); -static @name@ (*_basic_@name@_fmod)(@name@, @name@); -#define @name@_ctype_add(a, b, outp) *(outp) = a + b -#define @name@_ctype_subtract(a, b, outp) *(outp) = a - b -#define @name@_ctype_multiply(a, b, outp) *(outp) = a * b -#define @name@_ctype_divide(a, b, outp) *(outp) = a / b -#define @name@_ctype_true_divide @name@_ctype_divide -#define @name@_ctype_floor_divide(a, b, outp) \ - *(outp) = _basic_@name@_floor((a) / (b)) -/**end repeat**/ - -static npy_half (*_basic_half_floor)(npy_half); -static npy_half (*_basic_half_sqrt)(npy_half); -static npy_half (*_basic_half_fmod)(npy_half, npy_half); -#define half_ctype_add(a, b, outp) *(outp) = npy_float_to_half(npy_half_to_float(a) + npy_half_to_float(b)) -#define half_ctype_subtract(a, b, outp) *(outp) = npy_float_to_half(npy_half_to_float(a) - npy_half_to_float(b)) -#define half_ctype_multiply(a, b, outp) *(outp) = npy_float_to_half(npy_half_to_float(a) * npy_half_to_float(b)) -#define half_ctype_divide(a, b, outp) *(outp) = npy_float_to_half(npy_half_to_float(a) / npy_half_to_float(b)) -#define half_ctype_true_divide half_ctype_divide -#define half_ctype_floor_divide(a, b, outp) \ - *(outp) = npy_float_to_half(_basic_float_floor(npy_half_to_float(a) / npy_half_to_float(b))) - -/**begin repeat - * #name = cfloat, cdouble, clongdouble# - * #rtype = float, double, longdouble# - * #c = f,,l# - */ -#define @name@_ctype_add(a, b, outp) do{ \ - (outp)->real = (a).real + (b).real; \ - (outp)->imag = (a).imag + (b).imag; \ - } while(0) -#define @name@_ctype_subtract(a, b, outp) do{ \ - (outp)->real = (a).real - (b).real; \ - (outp)->imag = (a).imag - (b).imag; \ - } while(0) -#define @name@_ctype_multiply(a, b, outp) do{ \ - (outp)->real = (a).real * (b).real - (a).imag * (b).imag; \ - (outp)->imag = (a).real * (b).imag + (a).imag * (b).real; \ - } while(0) -/* Note: complex division by zero must yield some complex inf */ -#define @name@_ctype_divide(a, b, outp) do{ \ - @rtype@ d = (b).real*(b).real + (b).imag*(b).imag; \ - if (d != 0) { \ - (outp)->real = ((a).real*(b).real + (a).imag*(b).imag)/d; \ - (outp)->imag = ((a).imag*(b).real - (a).real*(b).imag)/d; \ - } \ - else { \ - (outp)->real = (a).real/d; \ - (outp)->imag = (a).imag/d; \ - } \ - } while(0) -#define @name@_ctype_true_divide @name@_ctype_divide -#define @name@_ctype_floor_divide(a, b, outp) do { \ - (outp)->real = _basic_@rtype@_floor \ - (((a).real*(b).real + (a).imag*(b).imag) / \ - ((b).real*(b).real + (b).imag*(b).imag)); \ - (outp)->imag = 0; \ - } while(0) -/**end repeat**/ - -/**begin repeat - * #name = float, double, longdouble# - */ -static void -@name@_ctype_remainder(@name@ a, @name@ b, @name@ *out) { - @name@ mod; - mod = _basic_@name@_fmod(a, b); - if (mod && (((b < 0) != (mod < 0)))) { - mod += b; - } - *out = mod; -} -/**end repeat**/ - -static void -half_ctype_remainder(npy_half a, npy_half b, npy_half *out) { - float mod, fa = npy_half_to_float(a), fb = npy_half_to_float(b); - mod = _basic_float_fmod(fa, fb); - if (mod && (((fb < 0) != (mod < 0)))) { - mod += fb; - } - *out = npy_float_to_half(mod); -} - - -/**begin repeat - * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, - * ulonglong, half, float, double, longdouble, cfloat, cdouble, clongdouble# - */ -#define @name@_ctype_divmod(a, b, out, out2) { \ - @name@_ctype_floor_divide(a, b, out); \ - @name@_ctype_remainder(a, b, out2); \ - } -/**end repeat**/ - -/**begin repeat - * #name = half, float, double, longdouble# - */ -static npy_@name@ (*_basic_@name@_pow)(npy_@name@ a, npy_@name@ b); -static void -@name@_ctype_power(npy_@name@ a, npy_@name@ b, npy_@name@ *out) { - *out = _basic_@name@_pow(a, b); -} -/**end repeat**/ - -/**begin repeat - * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, - * ulonglong, float, double, longdouble# - * #uns = (0,1)*5,0*3# - */ -static void -@name@_ctype_negative(npy_@name@ a, npy_@name@ *out) -{ -#if @uns@ - npy_set_floatstatus_overflow(); -#endif - *out = -a; -} -/**end repeat**/ - -static void -half_ctype_negative(npy_half a, npy_half *out) -{ - *out = a^0x8000u; -} - - -/**begin repeat - * #name = cfloat, cdouble, clongdouble# - */ -static void -@name@_ctype_negative(@name@ a, @name@ *out) -{ - out->real = -a.real; - out->imag = -a.imag; -} -/**end repeat**/ - -/**begin repeat - * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, - * ulonglong, half, float, double, longdouble# - */ -static void -@name@_ctype_positive(npy_@name@ a, npy_@name@ *out) -{ - *out = a; -} -/**end repeat**/ - -/* - * Get the nc_powf, nc_pow, and nc_powl functions from - * the data area of the power ufunc in umathmodule. - */ - -/**begin repeat - * #name = cfloat, cdouble, clongdouble# - */ -static void -@name@_ctype_positive(@name@ a, @name@ *out) -{ - out->real = a.real; - out->imag = a.imag; -} -static void (*_basic_@name@_pow)(@name@ *, @name@ *, @name@ *); -static void -@name@_ctype_power(@name@ a, @name@ b, @name@ *out) -{ - _basic_@name@_pow(&a, &b, out); -} -/**end repeat**/ - - -/**begin repeat - * #name = ubyte, ushort, uint, ulong, ulonglong# - */ -#define @name@_ctype_absolute @name@_ctype_positive -/**end repeat**/ - - -/**begin repeat - * #name = byte, short, int, long, longlong, float, double, longdouble# - */ -static void -@name@_ctype_absolute(@name@ a, @name@ *out) -{ - *out = (a < 0 ? -a : a); -} -/**end repeat**/ - -static void -half_ctype_absolute(npy_half a, npy_half *out) -{ - *out = a&0x7fffu; -} - -/**begin repeat - * #name = cfloat, cdouble, clongdouble# - * #rname = float, double, longdouble# - */ -static void -@name@_ctype_absolute(@name@ a, @rname@ *out) -{ - *out = _basic_@rname@_sqrt(a.real*a.real + a.imag*a.imag); -} -/**end repeat**/ - -/**begin repeat - * #name = byte, ubyte, short, ushort, int, uint, long, - * ulong, longlong, ulonglong# - */ -#define @name@_ctype_invert(a, out) *(out) = ~a; -/**end repeat**/ - -/*** END OF BASIC CODE **/ - - -/* The general strategy for commutative binary operators is to - * - * 1) Convert the types to the common type if both are scalars (0 return) - * 2) If both are not scalars use ufunc machinery (-2 return) - * 3) If both are scalars but cannot be cast to the right type - * return NotImplmented (-1 return) - * - * 4) Perform the function on the C-type. - * 5) If an error condition occurred, check to see - * what the current error-handling is and handle the error. - * - * 6) Construct and return the output scalar. - */ - -/**begin repeat - * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, - * ulonglong, half, float, double, longdouble, cfloat, cdouble, clongdouble# - * #Name = Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, - * ULongLong, Half, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, - * ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# - */ - -static int -_@name@_convert_to_ctype(PyObject *a, npy_@name@ *arg1) -{ - PyObject *temp; - - if (PyArray_IsScalar(a, @Name@)) { - *arg1 = PyArrayScalar_VAL(a, @Name@); - return 0; - } - else if (PyArray_IsScalar(a, Generic)) { - PyArray_Descr *descr1; - - if (!PyArray_IsScalar(a, Number)) { - return -1; - } - descr1 = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(a)); - if (PyArray_CanCastSafely(descr1->type_num, PyArray_@NAME@)) { - PyArray_CastScalarDirect(a, descr1, arg1, PyArray_@NAME@); - Py_DECREF(descr1); - return 0; - } - else { - Py_DECREF(descr1); - return -1; - } - } - else if (PyArray_GetPriority(a, PyArray_SUBTYPE_PRIORITY) > - PyArray_SUBTYPE_PRIORITY) { - return -2; - } - else if ((temp = PyArray_ScalarFromObject(a)) != NULL) { - int retval = _@name@_convert_to_ctype(temp, arg1); - - Py_DECREF(temp); - return retval; - } - return -2; -} - -/**end repeat**/ - - -/**begin repeat - * #name = byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, half, float, double, cfloat, cdouble# - */ -static int -_@name@_convert2_to_ctypes(PyObject *a, npy_@name@ *arg1, - PyObject *b, npy_@name@ *arg2) -{ - int ret; - ret = _@name@_convert_to_ctype(a, arg1); - if (ret < 0) { - return ret; - } - ret = _@name@_convert_to_ctype(b, arg2); - if (ret < 0) { - return ret; - } - return 0; -} -/**end repeat**/ - -/**begin repeat - * #name = longdouble, clongdouble# - */ - -static int -_@name@_convert2_to_ctypes(PyObject *a, @name@ *arg1, - PyObject *b, @name@ *arg2) -{ - int ret; - ret = _@name@_convert_to_ctype(a, arg1); - if (ret < 0) { - return ret; - } - ret = _@name@_convert_to_ctype(b, arg2); - if (ret == -2) { - ret = -3; - } - if (ret < 0) { - return ret; - } - return 0; -} - -/**end repeat**/ - - -#if defined(NPY_PY3K) -#define CODEGEN_SKIP_divide_FLAG -#endif - -/**begin repeat - * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*13, - * (half, float, double, longdouble, cfloat, cdouble, clongdouble)*6, - * (half, float, double, longdouble)*2# - * #Name=(Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong)*13, - * (Half, Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*6, - * (Half, Float, Double, LongDouble)*2# - * #oper=add*10, subtract*10, multiply*10, divide*10, remainder*10, - * divmod*10, floor_divide*10, lshift*10, rshift*10, and*10, - * or*10, xor*10, true_divide*10, - * add*7, subtract*7, multiply*7, divide*7, floor_divide*7, true_divide*7, - * divmod*4, remainder*4# - * #fperr=1*70,0*50,1*10, - * 1*42, - * 1*8# - * #twoout=0*50,1*10,0*70, - * 0*42, - * 1*4,0*4# - * #otyp=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*12, - * float*4, double*6, - * (half, float, double, longdouble, cfloat, cdouble, clongdouble)*6, - * (half, float, double, longdouble)*2# - * #OName=(Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong)*12, - * Float*4, Double*6, - * (Half, Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*6, - * (Half, Float, Double, LongDouble)*2# - */ - -#if !defined(CODEGEN_SKIP_@oper@_FLAG) - -static PyObject * -@name@_@oper@(PyObject *a, PyObject *b) -{ - PyObject *ret; - npy_@name@ arg1, arg2; - /* - * NOTE: In gcc >= 4.1, the compiler will reorder floating point - * operations and floating point error state checks. In - * particular, the arithmetic operations were being reordered - * so that the errors weren't caught. Declaring this output - * variable volatile was the minimal fix for the issue. - * (Ticket #1671) - */ - volatile npy_@otyp@ out; -#if @twoout@ - npy_@otyp@ out2; - PyObject *obj; -#endif - -#if @fperr@ - int retstatus; - int first; -#endif - - switch(_@name@_convert2_to_ctypes(a, &arg1, b, &arg2)) { - case 0: - break; - case -1: - /* one of them can't be cast safely must be mixed-types*/ - return PyArray_Type.tp_as_number->nb_@oper@(a,b); - case -2: - /* use default handling */ - if (PyErr_Occurred()) { - return NULL; - } - return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b); - case -3: - /* - * special case for longdouble and clongdouble - * because they have a recursive getitem in their dtype - */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - -#if @fperr@ - PyUFunc_clearfperr(); -#endif - - /* - * here we do the actual calculation with arg1 and arg2 - * as a function call. - */ -#if @twoout@ - @name@_ctype_@oper@(arg1, arg2, (npy_@otyp@ *)&out, &out2); -#else - @name@_ctype_@oper@(arg1, arg2, (npy_@otyp@ *)&out); -#endif - -#if @fperr@ - /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); - if (retstatus) { - int bufsize, errmask; - PyObject *errobj; - - if (PyUFunc_GetPyValues("@name@_scalars", &bufsize, &errmask, - &errobj) < 0) { - return NULL; - } - first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { - Py_XDECREF(errobj); - return NULL; - } - Py_XDECREF(errobj); - } -#endif - - -#if @twoout@ - ret = PyTuple_New(2); - if (ret == NULL) { - return NULL; - } - obj = PyArrayScalar_New(@OName@); - if (obj == NULL) { - Py_DECREF(ret); - return NULL; - } - PyArrayScalar_ASSIGN(obj, @OName@, out); - PyTuple_SET_ITEM(ret, 0, obj); - obj = PyArrayScalar_New(@OName@); - if (obj == NULL) { - Py_DECREF(ret); - return NULL; - } - PyArrayScalar_ASSIGN(obj, @OName@, out2); - PyTuple_SET_ITEM(ret, 1, obj); -#else - ret = PyArrayScalar_New(@OName@); - if (ret == NULL) { - return NULL; - } - PyArrayScalar_ASSIGN(ret, @OName@, out); -#endif - return ret; -} -#endif - -/**end repeat**/ - -#undef CODEGEN_SKIP_divide_FLAG - -#define _IS_ZERO(x) (x ==0) -/**begin repeat - * #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, - * half, float, double, longdouble, cfloat, cdouble, clongdouble# - * #Name=Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, - * Half, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - * #otyp=float*4, double*6, half, float, double, longdouble, cfloat, cdouble, clongdouble# - * #OName=Float*4, Double*6, Half, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - * #isint=(1,0)*5,0*7# - * #cmplx=0*14,1*3# - * #iszero=_IS_ZERO*10, npy_half_iszero, _IS_ZERO*6# - * #zero=0*10, NPY_HALF_ZERO, 0*6# - * #one=1*10, NPY_HALF_ONE, 1*6# - */ - -static PyObject * -@name@_power(PyObject *a, PyObject *b, PyObject *NPY_UNUSED(c)) -{ - PyObject *ret; - npy_@name@ arg1, arg2; - int retstatus; - int first; - -#if @cmplx@ - npy_@name@ out = {@zero@,@zero@}; - npy_@otyp@ out1; - out1.real = out.imag = @zero@; -#else - npy_@name@ out = @zero@; - npy_@otyp@ out1 = @zero@; -#endif - - switch(_@name@_convert2_to_ctypes(a, &arg1, b, &arg2)) { - case 0: - break; - case -1: - /* can't cast both safely mixed-types? */ - return PyArray_Type.tp_as_number->nb_power(a,b,NULL); - case -2: - /* use default handling */ - if (PyErr_Occurred()) { - return NULL; - } - return PyGenericArrType_Type.tp_as_number->nb_power(a,b,NULL); - case -3: - /* - * special case for longdouble and clongdouble - * because they have a recursive getitem in their dtype - */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - - PyUFunc_clearfperr(); - - /* - * here we do the actual calculation with arg1 and arg2 - * as a function call. - */ -#if @cmplx@ - if (@iszero@(arg2.real) && @iszero@(arg2.imag)) { - out1.real = out.real = @one@; - out1.imag = out.imag = @zero@; - } -#else - if (@iszero@(arg2)) { - out1 = out = @one@; - } -#endif -#if @isint@ - else if (arg2 < 0) { - @name@_ctype_power(arg1, -arg2, &out); - out1 = (@otyp@) (1.0 / out); - } -#endif - else { - @name@_ctype_power(arg1, arg2, &out); - } - - /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); - if (retstatus) { - int bufsize, errmask; - PyObject *errobj; - - if (PyUFunc_GetPyValues("@name@_scalars", &bufsize, &errmask, - &errobj) < 0) { - return NULL; - } - first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { - Py_XDECREF(errobj); - return NULL; - } - Py_XDECREF(errobj); - } - -#if @isint@ - if (arg2 < 0) { - ret = PyArrayScalar_New(@OName@); - if (ret == NULL) { - return NULL; - } - PyArrayScalar_ASSIGN(ret, @OName@, out1); - } - else { - ret = PyArrayScalar_New(@Name@); - if (ret == NULL) { - return NULL; - } - PyArrayScalar_ASSIGN(ret, @Name@, out); - } -#else - ret = PyArrayScalar_New(@Name@); - if (ret == NULL) { - return NULL; - } - PyArrayScalar_ASSIGN(ret, @Name@, out); -#endif - - return ret; -} -/**end repeat**/ -#undef _IS_ZERO - - -/**begin repeat - * #name = (cfloat,cdouble,clongdouble)*2# - * #oper = divmod*3,remainder*3# - */ -#define @name@_@oper@ NULL -/**end repeat**/ - -/**begin repeat - * #name = (half,float,double,longdouble,cfloat,cdouble,clongdouble)*5# - * #oper = lshift*7, rshift*7, and*7, or*7, xor*7# - */ -#define @name@_@oper@ NULL -/**end repeat**/ - - -/**begin repeat - * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,half,float,double,longdouble,cfloat,cdouble,clongdouble)*3, - * byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - * #otyp=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,half,float,double,longdouble,cfloat,cdouble,clongdouble)*2, - * byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,half,float,double,longdouble,float,double,longdouble, - * byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - * #OName=(Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Half, Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*2, - Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Half, Float, Double, LongDouble, Float, Double, LongDouble, - Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong# - * #oper=negative*17, positive*17, absolute*17, invert*10# - */ -static PyObject * -@name@_@oper@(PyObject *a) -{ - npy_@name@ arg1; - npy_@otyp@ out; - PyObject *ret; - - switch(_@name@_convert_to_ctype(a, &arg1)) { - case 0: - break; - case -1: - /* can't cast both safely use different add function */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - case -2: - /* use default handling */ - if (PyErr_Occurred()) { - return NULL; - } - return PyGenericArrType_Type.tp_as_number->nb_@oper@(a); - } - - /* - * here we do the actual calculation with arg1 and arg2 - * make it a function call. - */ - - @name@_ctype_@oper@(arg1, &out); - - ret = PyArrayScalar_New(@OName@); - PyArrayScalar_ASSIGN(ret, @OName@, out); - - return ret; -} -/**end repeat**/ - -/**begin repeat - * #name = half, float, double, longdouble, cfloat, cdouble, clongdouble# - */ -#define @name@_invert NULL -/**end repeat**/ - -#if defined(NPY_PY3K) -#define NONZERO_NAME(prefix) prefix##bool -#else -#define NONZERO_NAME(prefix) prefix##nonzero -#endif - -#define _IS_NONZERO(x) (x != 0) -/**begin repeat - * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, - * ulonglong, half, float, double, longdouble, cfloat, cdouble, clongdouble# - * #simp=1*14,0*3# - * #nonzero=_IS_NONZERO*10, !npy_half_iszero, _IS_NONZERO*6# - */ -static int -NONZERO_NAME(@name@_)(PyObject *a) -{ - int ret; - npy_@name@ arg1; - - if (_@name@_convert_to_ctype(a, &arg1) < 0) { - if (PyErr_Occurred()) { - return -1; - } - return PyGenericArrType_Type.tp_as_number->NONZERO_NAME(nb_)(a); - } - - /* - * here we do the actual calculation with arg1 and arg2 - * make it a function call. - */ - -#if @simp@ - ret = @nonzero@(arg1); -#else - ret = (@nonzero@(arg1.real) || @nonzero@(arg1.imag)); -#endif - - return ret; -} -/**end repeat**/ -#undef _IS_NONZERO - - -static int -emit_complexwarning() -{ - static PyObject *cls = NULL; - if (cls == NULL) { - PyObject *mod; - mod = PyImport_ImportModule("numpy.core"); - assert(mod != NULL); - cls = PyObject_GetAttrString(mod, "ComplexWarning"); - assert(cls != NULL); - Py_DECREF(mod); - } -#if PY_VERSION_HEX >= 0x02050000 - return PyErr_WarnEx(cls, - "Casting complex values to real discards the imaginary " - "part", 1); -#else - return PyErr_Warn(cls, - "Casting complex values to real discards the imaginary " - "part"); -#endif -} - -/**begin repeat - * - * #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,half,float,double,longdouble,cfloat,cdouble,clongdouble# - * #Name=Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Half,Float,Double,LongDouble,CFloat,CDouble,CLongDouble# - * #cmplx=0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1# - * #sign=(signed,unsigned)*5,,,,,,,# - * #unsigntyp=0,1,0,1,0,1,0,1,0,1,0*7# - * #ctype=long*8,PY_LONG_LONG*2,double*7# - * #to_ctype=,,,,,,,,,,npy_half_to_double,,,,,,# - * #realtyp=0*10,1*7# - * #func=(PyLong_FromLong,PyLong_FromUnsignedLong)*4,PyLong_FromLongLong,PyLong_FromUnsignedLongLong,PyLong_FromDouble*7# - */ -static PyObject * -@name@_int(PyObject *obj) -{ -#if @cmplx@ - @sign@ @ctype@ x= @to_ctype@(PyArrayScalar_VAL(obj, @Name@).real); - int ret; -#else - @sign@ @ctype@ x= @to_ctype@(PyArrayScalar_VAL(obj, @Name@)); -#endif -#if @realtyp@ - double ix; - modf(x, &ix); - x = ix; -#endif -#if @cmplx@ - ret = emit_complexwarning(); - if (ret < 0) { - return NULL; - } -#endif - -#if @unsigntyp@ - if(x < LONG_MAX) - return PyInt_FromLong(x); -#else - if(LONG_MIN < x && x < LONG_MAX) - return PyInt_FromLong(x); -#endif - return @func@(x); -} -/**end repeat**/ - -/**begin repeat - * - * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,half,float,double,longdouble,cfloat,cdouble,clongdouble)*2# - * #Name=(Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Half,Float,Double,LongDouble,CFloat,CDouble,CLongDouble)*2# - * #cmplx=(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1)*2# - * #to_ctype=(,,,,,,,,,,npy_half_to_double,,,,,,)*2# - * #which=long*17,float*17# - * #func=(PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,PyLong_FromDouble*7,PyFloat_FromDouble*17# - */ -static PyObject * -@name@_@which@(PyObject *obj) -{ -#if @cmplx@ - int ret; - ret = emit_complexwarning(); - if (ret < 0) { - return NULL; - } - return @func@(@to_ctype@((PyArrayScalar_VAL(obj, @Name@)).real)); -#else - return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@))); -#endif -} -/**end repeat**/ - -#if !defined(NPY_PY3K) - -/**begin repeat - * - * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,half,float,double,longdouble,cfloat,cdouble,clongdouble)*2# - * #oper=oct*17, hex*17# - * #kind=(int*5, long*5, int*2, long*2, int, long*2)*2# - * #cap=(Int*5, Long*5, Int*2, Long*2, Int, Long*2)*2# - */ -static PyObject * -@name@_@oper@(PyObject *obj) -{ - PyObject *pyint; - pyint = @name@_@kind@(obj); - if (pyint == NULL) return NULL; - return Py@cap@_Type.tp_as_number->nb_@oper@(pyint); -} -/**end repeat**/ - -#endif - -/**begin repeat - * #oper=le,ge,lt,gt,eq,ne# - * #op=<=,>=,<,>,==,!=# - * #halfop=npy_half_le,npy_half_ge,npy_half_lt,npy_half_gt,npy_half_eq,npy_half_ne# - */ -#define def_cmp_@oper@(arg1, arg2) (arg1 @op@ arg2) -#define cmplx_cmp_@oper@(arg1, arg2) ((arg1.real == arg2.real) ? \ - arg1.imag @op@ arg2.imag : \ - arg1.real @op@ arg2.real) -#define def_half_cmp_@oper@(arg1, arg2) @halfop@(arg1, arg2) -/**end repeat**/ - -/**begin repeat - * #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,half,float,double,longdouble,cfloat,cdouble,clongdouble# - * #simp=def*10,def_half,def*3,cmplx*3# - */ -static PyObject* -@name@_richcompare(PyObject *self, PyObject *other, int cmp_op) -{ - npy_@name@ arg1, arg2; - int out=0; - - switch(_@name@_convert2_to_ctypes(self, &arg1, other, &arg2)) { - case 0: - break; - case -1: /* can't cast both safely use different add function */ - case -2: /* use ufunc */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op); - case -3: /* special case for longdouble and clongdouble - because they have a recursive getitem in their dtype */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - - /* here we do the actual calculation with arg1 and arg2 */ - switch (cmp_op) { - case Py_EQ: - out = @simp@_cmp_eq(arg1, arg2); - break; - case Py_NE: - out = @simp@_cmp_ne(arg1, arg2); - break; - case Py_LE: - out = @simp@_cmp_le(arg1, arg2); - break; - case Py_GE: - out = @simp@_cmp_ge(arg1, arg2); - break; - case Py_LT: - out = @simp@_cmp_lt(arg1, arg2); - break; - case Py_GT: - out = @simp@_cmp_gt(arg1, arg2); - break; - } - - if (out) { - PyArrayScalar_RETURN_TRUE; - } - else { - PyArrayScalar_RETURN_FALSE; - } -} -/**end repeat**/ - - -/**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,half,float,double,longdouble,cfloat,cdouble,clongdouble# -**/ -static PyNumberMethods @name@_as_number = { - (binaryfunc)@name@_add, /*nb_add*/ - (binaryfunc)@name@_subtract, /*nb_subtract*/ - (binaryfunc)@name@_multiply, /*nb_multiply*/ -#if defined(NPY_PY3K) -#else - (binaryfunc)@name@_divide, /*nb_divide*/ -#endif - (binaryfunc)@name@_remainder, /*nb_remainder*/ - (binaryfunc)@name@_divmod, /*nb_divmod*/ - (ternaryfunc)@name@_power, /*nb_power*/ - (unaryfunc)@name@_negative, - (unaryfunc)@name@_positive, /*nb_pos*/ - (unaryfunc)@name@_absolute, /*nb_abs*/ -#if defined(NPY_PY3K) - (inquiry)@name@_bool, /*nb_bool*/ -#else - (inquiry)@name@_nonzero, /*nb_nonzero*/ -#endif - (unaryfunc)@name@_invert, /*nb_invert*/ - (binaryfunc)@name@_lshift, /*nb_lshift*/ - (binaryfunc)@name@_rshift, /*nb_rshift*/ - (binaryfunc)@name@_and, /*nb_and*/ - (binaryfunc)@name@_xor, /*nb_xor*/ - (binaryfunc)@name@_or, /*nb_or*/ -#if defined(NPY_PY3K) -#else - 0, /*nb_coerce*/ -#endif - (unaryfunc)@name@_int, /*nb_int*/ -#if defined(NPY_PY3K) - (unaryfunc)0, /*nb_reserved*/ -#else - (unaryfunc)@name@_long, /*nb_long*/ -#endif - (unaryfunc)@name@_float, /*nb_float*/ -#if defined(NPY_PY3K) -#else - (unaryfunc)@name@_oct, /*nb_oct*/ - (unaryfunc)@name@_hex, /*nb_hex*/ -#endif - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ -#if defined(NPY_PY3K) -#else - 0, /*inplace_divide*/ -#endif - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)@name@_floor_divide, /*nb_floor_divide*/ - (binaryfunc)@name@_true_divide, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ -#if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)NULL, /*nb_index*/ -#endif -}; -/**end repeat**/ - -static void *saved_tables_arrtype[9]; - -static void -add_scalarmath(void) -{ - /**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, half, float, double, longdouble, cfloat, cdouble, clongdouble# - #NAME=Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Half, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - **/ -#if PY_VERSION_HEX >= 0x02050000 - @name@_as_number.nb_index = Py@NAME@ArrType_Type.tp_as_number->nb_index; -#endif - Py@NAME@ArrType_Type.tp_as_number = &(@name@_as_number); - Py@NAME@ArrType_Type.tp_richcompare = @name@_richcompare; - /**end repeat**/ - - saved_tables_arrtype[0] = PyLongArrType_Type.tp_as_number; -#if !defined(NPY_PY3K) - saved_tables_arrtype[1] = PyLongArrType_Type.tp_compare; -#endif - saved_tables_arrtype[2] = PyLongArrType_Type.tp_richcompare; - saved_tables_arrtype[3] = PyDoubleArrType_Type.tp_as_number; -#if !defined(NPY_PY3K) - saved_tables_arrtype[4] = PyDoubleArrType_Type.tp_compare; -#endif - saved_tables_arrtype[5] = PyDoubleArrType_Type.tp_richcompare; - saved_tables_arrtype[6] = PyCDoubleArrType_Type.tp_as_number; -#if !defined(NPY_PY3K) - saved_tables_arrtype[7] = PyCDoubleArrType_Type.tp_compare; -#endif - saved_tables_arrtype[8] = PyCDoubleArrType_Type.tp_richcompare; -} - -static int -get_functions(void) -{ - PyObject *mm, *obj; - void **funcdata; - char *signatures; - int i, j; - int ret = -1; - - /* Get the nc_pow functions */ - /* Get the pow functions */ - mm = PyImport_ImportModule("numpy.core.umath"); - if (mm == NULL) return -1; - - obj = PyObject_GetAttrString(mm, "power"); - if (obj == NULL) goto fail; - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - - i = 0; - j = 0; - while(signatures[i] != PyArray_FLOAT) {i+=3; j++;} - _basic_half_pow = funcdata[j-1]; - _basic_float_pow = funcdata[j]; - _basic_double_pow = funcdata[j+1]; - _basic_longdouble_pow = funcdata[j+2]; - _basic_cfloat_pow = funcdata[j+3]; - _basic_cdouble_pow = funcdata[j+4]; - _basic_clongdouble_pow = funcdata[j+5]; - Py_DECREF(obj); - - /* Get the floor functions */ - obj = PyObject_GetAttrString(mm, "floor"); - if (obj == NULL) goto fail; - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - i = 0; - j = 0; - while(signatures[i] != PyArray_FLOAT) {i+=2; j++;} - _basic_half_floor = funcdata[j-1]; - _basic_float_floor = funcdata[j]; - _basic_double_floor = funcdata[j+1]; - _basic_longdouble_floor = funcdata[j+2]; - Py_DECREF(obj); - - /* Get the sqrt functions */ - obj = PyObject_GetAttrString(mm, "sqrt"); - if (obj == NULL) goto fail; - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - i = 0; - j = 0; - while(signatures[i] != PyArray_FLOAT) {i+=2; j++;} - _basic_half_sqrt = funcdata[j-1]; - _basic_float_sqrt = funcdata[j]; - _basic_double_sqrt = funcdata[j+1]; - _basic_longdouble_sqrt = funcdata[j+2]; - Py_DECREF(obj); - - /* Get the fmod functions */ - obj = PyObject_GetAttrString(mm, "fmod"); - if (obj == NULL) goto fail; - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - i = 0; - j = 0; - while(signatures[i] != PyArray_FLOAT) {i+=3; j++;} - _basic_half_fmod = funcdata[j-1]; - _basic_float_fmod = funcdata[j]; - _basic_double_fmod = funcdata[j+1]; - _basic_longdouble_fmod = funcdata[j+2]; - Py_DECREF(obj); - return - - ret = 0; - fail: - Py_DECREF(mm); - return ret; -} - -static void *saved_tables[9]; - -char doc_alterpyscalars[] = ""; - -static PyObject * -alter_pyscalars(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - int n; - PyObject *obj; - n = PyTuple_GET_SIZE(args); - while(n--) { - obj = PyTuple_GET_ITEM(args, n); -#if !defined(NPY_PY3K) - if (obj == (PyObject *)(&PyInt_Type)) { - PyInt_Type.tp_as_number = PyLongArrType_Type.tp_as_number; - PyInt_Type.tp_compare = PyLongArrType_Type.tp_compare; - PyInt_Type.tp_richcompare = PyLongArrType_Type.tp_richcompare; - } - else -#endif - if (obj == (PyObject *)(&PyFloat_Type)) { - PyFloat_Type.tp_as_number = PyDoubleArrType_Type.tp_as_number; -#if !defined(NPY_PY3K) - PyFloat_Type.tp_compare = PyDoubleArrType_Type.tp_compare; -#endif - PyFloat_Type.tp_richcompare = PyDoubleArrType_Type.tp_richcompare; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - PyComplex_Type.tp_as_number = PyCDoubleArrType_Type.tp_as_number; -#if !defined(NPY_PY3K) - PyComplex_Type.tp_compare = PyCDoubleArrType_Type.tp_compare; -#endif - PyComplex_Type.tp_richcompare = \ - PyCDoubleArrType_Type.tp_richcompare; - } - else { - PyErr_SetString(PyExc_ValueError, - "arguments must be int, float, or complex"); - return NULL; - } - } - Py_INCREF(Py_None); - return Py_None; -} - -char doc_restorepyscalars[] = ""; -static PyObject * -restore_pyscalars(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - int n; - PyObject *obj; - n = PyTuple_GET_SIZE(args); - while(n--) { - obj = PyTuple_GET_ITEM(args, n); -#if !defined(NPY_PY3K) - if (obj == (PyObject *)(&PyInt_Type)) { - PyInt_Type.tp_as_number = saved_tables[0]; - PyInt_Type.tp_compare = saved_tables[1]; - PyInt_Type.tp_richcompare = saved_tables[2]; - } - else -#endif - if (obj == (PyObject *)(&PyFloat_Type)) { - PyFloat_Type.tp_as_number = saved_tables[3]; -#if !defined(NPY_PY3K) - PyFloat_Type.tp_compare = saved_tables[4]; -#endif - PyFloat_Type.tp_richcompare = saved_tables[5]; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - PyComplex_Type.tp_as_number = saved_tables[6]; -#if !defined(NPY_PY3K) - PyComplex_Type.tp_compare = saved_tables[7]; -#endif - PyComplex_Type.tp_richcompare = saved_tables[8]; - } - else { - PyErr_SetString(PyExc_ValueError, - "arguments must be int, float, or complex"); - return NULL; - } - } - Py_INCREF(Py_None); - return Py_None; -} - -char doc_usepythonmath[] = ""; -static PyObject * -use_pythonmath(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - int n; - PyObject *obj; - n = PyTuple_GET_SIZE(args); - while(n--) { - obj = PyTuple_GET_ITEM(args, n); -#if !defined(NPY_PY3K) - if (obj == (PyObject *)(&PyInt_Type)) { - PyLongArrType_Type.tp_as_number = saved_tables[0]; - PyLongArrType_Type.tp_compare = saved_tables[1]; - PyLongArrType_Type.tp_richcompare = saved_tables[2]; - } - else -#endif - if (obj == (PyObject *)(&PyFloat_Type)) { - PyDoubleArrType_Type.tp_as_number = saved_tables[3]; -#if !defined(NPY_PY3K) - PyDoubleArrType_Type.tp_compare = saved_tables[4]; -#endif - PyDoubleArrType_Type.tp_richcompare = saved_tables[5]; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - PyCDoubleArrType_Type.tp_as_number = saved_tables[6]; -#if !defined(NPY_PY3K) - PyCDoubleArrType_Type.tp_compare = saved_tables[7]; -#endif - PyCDoubleArrType_Type.tp_richcompare = saved_tables[8]; - } - else { - PyErr_SetString(PyExc_ValueError, - "arguments must be int, float, or complex"); - return NULL; - } - } - Py_INCREF(Py_None); - return Py_None; -} - -char doc_usescalarmath[] = ""; -static PyObject * -use_scalarmath(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - int n; - PyObject *obj; - n = PyTuple_GET_SIZE(args); - while(n--) { - obj = PyTuple_GET_ITEM(args, n); -#if !defined(NPY_PY3K) - if (obj == (PyObject *)(&PyInt_Type)) { - PyLongArrType_Type.tp_as_number = saved_tables_arrtype[0]; - PyLongArrType_Type.tp_compare = saved_tables_arrtype[1]; - PyLongArrType_Type.tp_richcompare = saved_tables_arrtype[2]; - } - else -#endif - if (obj == (PyObject *)(&PyFloat_Type)) { - PyDoubleArrType_Type.tp_as_number = saved_tables_arrtype[3]; -#if !defined(NPY_PY3K) - PyDoubleArrType_Type.tp_compare = saved_tables_arrtype[4]; -#endif - PyDoubleArrType_Type.tp_richcompare = saved_tables_arrtype[5]; - } - else if (obj == (PyObject *)(&PyComplex_Type)) { - PyCDoubleArrType_Type.tp_as_number = saved_tables_arrtype[6]; -#if !defined(NPY_PY3K) - PyCDoubleArrType_Type.tp_compare = saved_tables_arrtype[7]; -#endif - PyCDoubleArrType_Type.tp_richcompare = saved_tables_arrtype[8]; - } - else { - PyErr_SetString(PyExc_ValueError, - "arguments must be int, float, or complex"); - return NULL; - } - } - Py_INCREF(Py_None); - return Py_None; -} - -static struct PyMethodDef methods[] = { - {"alter_pythonmath", (PyCFunction) alter_pyscalars, - METH_VARARGS, doc_alterpyscalars}, - {"restore_pythonmath", (PyCFunction) restore_pyscalars, - METH_VARARGS, doc_restorepyscalars}, - {"use_pythonmath", (PyCFunction) use_pythonmath, - METH_VARARGS, doc_usepythonmath}, - {"use_scalarmath", (PyCFunction) use_scalarmath, - METH_VARARGS, doc_usescalarmath}, - {NULL, NULL, 0, NULL} -}; - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "scalarmath", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#if defined(NPY_PY3K) -#define RETVAL m -PyObject *PyInit_scalarmath(void) -#else -#define RETVAL -PyMODINIT_FUNC -initscalarmath(void) -#endif -{ -#if defined(NPY_PY3K) - PyObject *m = PyModule_Create(&moduledef); - if (!m) { - return NULL; - } -#else - Py_InitModule("scalarmath", methods); -#endif - - import_array(); - import_umath(); - - if (get_functions() < 0) return RETVAL; - - add_scalarmath(); - -#if !defined(NPY_PY3K) - saved_tables[0] = PyInt_Type.tp_as_number; - saved_tables[1] = PyInt_Type.tp_compare; - saved_tables[2] = PyInt_Type.tp_richcompare; -#endif - saved_tables[3] = PyFloat_Type.tp_as_number; -#if !defined(NPY_PY3K) - saved_tables[4] = PyFloat_Type.tp_compare; -#endif - saved_tables[5] = PyFloat_Type.tp_richcompare; - saved_tables[6] = PyComplex_Type.tp_as_number; -#if !defined(NPY_PY3K) - saved_tables[7] = PyComplex_Type.tp_compare; -#endif - saved_tables[8] = PyComplex_Type.tp_richcompare; - - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/core/src/umath/funcs.inc.src b/numpy-1.6.2/numpy/core/src/umath/funcs.inc.src deleted file mode 100644 index 0c9fe131e3..0000000000 --- a/numpy-1.6.2/numpy/core/src/umath/funcs.inc.src +++ /dev/null @@ -1,635 +0,0 @@ -/* -*- c -*- */ - -/* - * This file is for the definitions of the non-c99 functions used in ufuncs. - * All the complex ufuncs are defined here along with a smattering of real and - * object functions. - */ - -#include "numpy/npy_3kcompat.h" - - -/* - ***************************************************************************** - ** PYTHON OBJECT FUNCTIONS ** - ***************************************************************************** - */ - -static PyObject * -Py_square(PyObject *o) -{ - return PyNumber_Multiply(o, o); -} - -static PyObject * -Py_get_one(PyObject *NPY_UNUSED(o)) -{ - return PyInt_FromLong(1); -} - -static PyObject * -Py_reciprocal(PyObject *o) -{ - PyObject *one = PyInt_FromLong(1); - PyObject *result; - - if (!one) { - return NULL; - } -#if defined(NPY_PY3K) - result = PyNumber_TrueDivide(one, o); -#else - result = PyNumber_Divide(one, o); -#endif - Py_DECREF(one); - return result; -} - -/* - * Define numpy version of PyNumber_Power as binary function. - */ -static PyObject * -npy_ObjectPower(PyObject *x, PyObject *y) -{ - return PyNumber_Power(x, y, Py_None); -} - - -#if defined(NPY_PY3K) -/**begin repeat - * #Kind = Max, Min# - * #OP = Py_GE, Py_LE# - */ -static PyObject * -npy_Object@Kind@(PyObject *i1, PyObject *i2) -{ - PyObject *result; - int cmp; - - cmp = PyObject_RichCompareBool(i1, i2, @OP@); - if (cmp < 0) { - return NULL; - } - if (cmp == 1) { - result = i1; - } - else { - result = i2; - } - Py_INCREF(result); - return result; -} -/**end repeat**/ - -#else -/**begin repeat - * #Kind = Max, Min# - * #OP = >=, <=# - */ -static PyObject * -npy_Object@Kind@(PyObject *i1, PyObject *i2) -{ - PyObject *result; - int cmp; - - if (PyObject_Cmp(i1, i2, &cmp) < 0) { - return NULL; - } - if (cmp @OP@ 0) { - result = i1; - } - else { - result = i2; - } - Py_INCREF(result); - return result; -} -/**end repeat**/ -#endif - - - -/* - ***************************************************************************** - ** COMPLEX FUNCTIONS ** - ***************************************************************************** - */ - - -/* - * Don't pass structures between functions (only pointers) because how - * structures are passed is compiler dependent and could cause segfaults if - * umath_ufunc_object.inc is compiled with a different compiler than an - * extension that makes use of the UFUNC API - */ - -/**begin repeat - * - * #typ = float, double, longdouble# - * #c = f, ,l# - * #C = F, ,L# - * #precision = 1,2,4# - */ - -/* - * Perform the operation result := 1 + coef * x * result, - * with real coefficient `coef`. - */ -#define SERIES_HORNER_TERM@c@(result, x, coef) \ - do { \ - nc_prod@c@((result), (x), (result)); \ - (result)->real *= (coef); \ - (result)->imag *= (coef); \ - nc_sum@c@((result), &nc_1@c@, (result)); \ - } while(0) - -/* constants */ -static c@typ@ nc_1@c@ = {1., 0.}; -static c@typ@ nc_half@c@ = {0.5, 0.}; -static c@typ@ nc_i@c@ = {0., 1.}; -static c@typ@ nc_i2@c@ = {0., 0.5}; -/* - * static c@typ@ nc_mi@c@ = {0.0@c@, -1.0@c@}; - * static c@typ@ nc_pi2@c@ = {NPY_PI_2@c@., 0.0@c@}; - */ - - -static void -nc_sum@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - r->real = a->real + b->real; - r->imag = a->imag + b->imag; - return; -} - -static void -nc_diff@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - r->real = a->real - b->real; - r->imag = a->imag - b->imag; - return; -} - -static void -nc_neg@c@(c@typ@ *a, c@typ@ *r) -{ - r->real = -a->real; - r->imag = -a->imag; - return; -} - -static void -nc_prod@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; - r->real = ar*br - ai*bi; - r->imag = ar*bi + ai*br; - return; -} - -static void -nc_quot@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - - @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; - @typ@ d = br*br + bi*bi; - r->real = (ar*br + ai*bi)/d; - r->imag = (ai*br - ar*bi)/d; - return; -} - -static void -nc_sqrt@c@(c@typ@ *x, c@typ@ *r) -{ - *r = npy_csqrt@c@(*x); - return; -} - -static void -nc_rint@c@(c@typ@ *x, c@typ@ *r) -{ - r->real = npy_rint@c@(x->real); - r->imag = npy_rint@c@(x->imag); -} - -static void -nc_log@c@(c@typ@ *x, c@typ@ *r) -{ - *r = npy_clog@c@(*x); - return; -} - -static void -nc_log1p@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ l = npy_hypot@c@(x->real + 1,x->imag); - r->imag = npy_atan2@c@(x->imag, x->real + 1); - r->real = npy_log@c@(l); - return; -} - -static void -nc_exp@c@(c@typ@ *x, c@typ@ *r) -{ - *r = npy_cexp@c@(*x); - return; -} - -static void -nc_exp2@c@(c@typ@ *x, c@typ@ *r) -{ - c@typ@ a; - a.real = x->real*NPY_LOGE2@c@; - a.imag = x->imag*NPY_LOGE2@c@; - nc_exp@c@(&a, r); - return; -} - -static void -nc_expm1@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ a = npy_exp@c@(x->real); - r->real = a*npy_cos@c@(x->imag) - 1.0@c@; - r->imag = a*npy_sin@c@(x->imag); - return; -} - -static void -nc_pow@c@(c@typ@ *a, c@typ@ *b, c@typ@ *r) -{ - intp n; - @typ@ ar = npy_creal@c@(*a); - @typ@ br = npy_creal@c@(*b); - @typ@ ai = npy_cimag@c@(*a); - @typ@ bi = npy_cimag@c@(*b); - - if (br == 0. && bi == 0.) { - *r = npy_cpack@c@(1., 0.); - return; - } - if (ar == 0. && ai == 0.) { - if (br > 0 && bi == 0) { - *r = npy_cpack@c@(0., 0.); - } - else { - /* NB: there are four complex zeros; c0 = (+-0, +-0), so that unlike - * for reals, c0**p, with `p` negative is in general - * ill-defined. - * - * c0**z with z complex is also ill-defined. - */ - *r = npy_cpack@c@(NPY_NAN, NPY_NAN); - - /* Raise invalid */ - ar = NPY_INFINITY; - ar = ar - ar; - } - return; - } - if (bi == 0 && (n=(intp)br) == br) { - if (n == 1) { - /* unroll: handle inf better */ - *r = npy_cpack@c@(ar, ai); - return; - } - else if (n == 2) { - /* unroll: handle inf better */ - nc_prod@c@(a, a, r); - return; - } - else if (n == 3) { - /* unroll: handle inf better */ - nc_prod@c@(a, a, r); - nc_prod@c@(a, r, r); - return; - } - else if (n > -100 && n < 100) { - c@typ@ p, aa; - intp mask = 1; - if (n < 0) n = -n; - aa = nc_1@c@; - p = npy_cpack@c@(ar, ai); - while (1) { - if (n & mask) - nc_prod@c@(&aa,&p,&aa); - mask <<= 1; - if (n < mask || mask <= 0) break; - nc_prod@c@(&p,&p,&p); - } - *r = npy_cpack@c@(npy_creal@c@(aa), npy_cimag@c@(aa)); - if (br < 0) nc_quot@c@(&nc_1@c@, r, r); - return; - } - } - - *r = npy_cpow@c@(*a, *b); - return; -} - - -static void -nc_prodi@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr = x->real; - r->real = -x->imag; - r->imag = xr; - return; -} - - -static void -nc_acos@c@(c@typ@ *x, c@typ@ *r) -{ - /* - * return nc_neg(nc_prodi(nc_log(nc_sum(x,nc_prod(nc_i, - * nc_sqrt(nc_diff(nc_1,nc_prod(x,x)))))))); - */ - nc_prod@c@(x,x,r); - nc_diff@c@(&nc_1@c@, r, r); - nc_sqrt@c@(r, r); - nc_prodi@c@(r, r); - nc_sum@c@(x, r, r); - nc_log@c@(r, r); - nc_prodi@c@(r, r); - nc_neg@c@(r, r); - return; -} - -static void -nc_acosh@c@(c@typ@ *x, c@typ@ *r) -{ - /* - * return nc_log(nc_sum(x, - * nc_prod(nc_sqrt(nc_sum(x,nc_1)), nc_sqrt(nc_diff(x,nc_1))))); - */ - c@typ@ t; - - nc_sum@c@(x, &nc_1@c@, &t); - nc_sqrt@c@(&t, &t); - nc_diff@c@(x, &nc_1@c@, r); - nc_sqrt@c@(r, r); - nc_prod@c@(&t, r, r); - nc_sum@c@(x, r, r); - nc_log@c@(r, r); - return; -} - -static void -nc_asin@c@(c@typ@ *x, c@typ@ *r) -{ - /* - * return nc_neg(nc_prodi(nc_log(nc_sum(nc_prod(nc_i,x), - * nc_sqrt(nc_diff(nc_1,nc_prod(x,x))))))); - */ - if (fabs(x->real) > 1e-3 || fabs(x->imag) > 1e-3) { - c@typ@ a, *pa=&a; - nc_prod@c@(x, x, r); - nc_diff@c@(&nc_1@c@, r, r); - nc_sqrt@c@(r, r); - nc_prodi@c@(x, pa); - nc_sum@c@(pa, r, r); - nc_log@c@(r, r); - nc_prodi@c@(r, r); - nc_neg@c@(r, r); - } - else { - /* - * Small arguments: series expansion, to avoid loss of precision - * asin(x) = x [1 + (1/6) x^2 [1 + (9/20) x^2 [1 + ...]]] - * - * |x| < 1e-3 => |rel. error| < 1e-18 (f), 1e-24, 1e-36 (l) - */ - c@typ@ x2; - nc_prod@c@(x, x, &x2); - - *r = nc_1@c@; -#if @precision@ >= 3 - SERIES_HORNER_TERM@c@(r, &x2, 81.0@C@/110); - SERIES_HORNER_TERM@c@(r, &x2, 49.0@C@/72); -#endif -#if @precision@ >= 2 - SERIES_HORNER_TERM@c@(r, &x2, 25.0@C@/42); -#endif - SERIES_HORNER_TERM@c@(r, &x2, 9.0@C@/20); - SERIES_HORNER_TERM@c@(r, &x2, 1.0@C@/6); - nc_prod@c@(r, x, r); - } - return; -} - - -static void -nc_asinh@c@(c@typ@ *x, c@typ@ *r) -{ - /* - * return nc_log(nc_sum(nc_sqrt(nc_sum(nc_1,nc_prod(x,x))),x)); - */ - if (fabs(x->real) > 1e-3 || fabs(x->imag) > 1e-3) { - nc_prod@c@(x, x, r); - nc_sum@c@(&nc_1@c@, r, r); - nc_sqrt@c@(r, r); - nc_sum@c@(r, x, r); - nc_log@c@(r, r); - } - else { - /* - * Small arguments: series expansion, to avoid loss of precision - * asinh(x) = x [1 - (1/6) x^2 [1 - (9/20) x^2 [1 - ...]]] - * - * |x| < 1e-3 => |rel. error| < 1e-18 (f), 1e-24, 1e-36 (l) - */ - c@typ@ x2; - nc_prod@c@(x, x, &x2); - - *r = nc_1@c@; -#if @precision@ >= 3 - SERIES_HORNER_TERM@c@(r, &x2, -81.0@C@/110); - SERIES_HORNER_TERM@c@(r, &x2, -49.0@C@/72); -#endif -#if @precision@ >= 2 - SERIES_HORNER_TERM@c@(r, &x2, -25.0@C@/42); -#endif - SERIES_HORNER_TERM@c@(r, &x2, -9.0@C@/20); - SERIES_HORNER_TERM@c@(r, &x2, -1.0@C@/6); - nc_prod@c@(r, x, r); - } - return; -} - -static void -nc_atan@c@(c@typ@ *x, c@typ@ *r) -{ - /* - * return nc_prod(nc_i2,nc_log(nc_quot(nc_sum(nc_i,x),nc_diff(nc_i,x)))); - */ - if (fabs(x->real) > 1e-3 || fabs(x->imag) > 1e-3) { - c@typ@ a, *pa=&a; - nc_diff@c@(&nc_i@c@, x, pa); - nc_sum@c@(&nc_i@c@, x, r); - nc_quot@c@(r, pa, r); - nc_log@c@(r,r); - nc_prod@c@(&nc_i2@c@, r, r); - } - else { - /* - * Small arguments: series expansion, to avoid loss of precision - * atan(x) = x [1 - (1/3) x^2 [1 - (3/5) x^2 [1 - ...]]] - * - * |x| < 1e-3 => |rel. error| < 1e-18 (f), 1e-24, 1e-36 (l) - */ - c@typ@ x2; - nc_prod@c@(x, x, &x2); - - *r = nc_1@c@; -#if @precision@ >= 3 - SERIES_HORNER_TERM@c@(r, &x2, -9.0@C@/11); - SERIES_HORNER_TERM@c@(r, &x2, -7.0@C@/9); -#endif -#if @precision@ >= 2 - SERIES_HORNER_TERM@c@(r, &x2, -5.0@C@/7); -#endif - SERIES_HORNER_TERM@c@(r, &x2, -3.0@C@/5); - SERIES_HORNER_TERM@c@(r, &x2, -1.0@C@/3); - nc_prod@c@(r, x, r); - } - return; -} - -static void -nc_atanh@c@(c@typ@ *x, c@typ@ *r) -{ - /* - * return nc_prod(nc_half,nc_log(nc_quot(nc_sum(nc_1,x),nc_diff(nc_1,x)))); - */ - if (fabs(x->real) > 1e-3 || fabs(x->imag) > 1e-3) { - c@typ@ a, *pa=&a; - nc_diff@c@(&nc_1@c@, x, r); - nc_sum@c@(&nc_1@c@, x, pa); - nc_quot@c@(pa, r, r); - nc_log@c@(r, r); - nc_prod@c@(&nc_half@c@, r, r); - } - else { - /* - * Small arguments: series expansion, to avoid loss of precision - * atan(x) = x [1 + (1/3) x^2 [1 + (3/5) x^2 [1 + ...]]] - * - * |x| < 1e-3 => |rel. error| < 1e-18 (f), 1e-24, 1e-36 (l) - */ - c@typ@ x2; - nc_prod@c@(x, x, &x2); - - *r = nc_1@c@; -#if @precision@ >= 3 - SERIES_HORNER_TERM@c@(r, &x2, 9.0@C@/11); - SERIES_HORNER_TERM@c@(r, &x2, 7.0@C@/9); -#endif -#if @precision@ >= 2 - SERIES_HORNER_TERM@c@(r, &x2, 5.0@C@/7); -#endif - SERIES_HORNER_TERM@c@(r, &x2, 3.0@C@/5); - SERIES_HORNER_TERM@c@(r, &x2, 1.0@C@/3); - nc_prod@c@(r, x, r); - } - return; -} - -static void -nc_cos@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = npy_cos@c@(xr)*npy_cosh@c@(xi); - r->imag = -npy_sin@c@(xr)*npy_sinh@c@(xi); - return; -} - -static void -nc_cosh@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = npy_cos@c@(xi)*npy_cosh@c@(xr); - r->imag = npy_sin@c@(xi)*npy_sinh@c@(xr); - return; -} - -static void -nc_log10@c@(c@typ@ *x, c@typ@ *r) -{ - nc_log@c@(x, r); - r->real *= NPY_LOG10E@c@; - r->imag *= NPY_LOG10E@c@; - return; -} - -static void -nc_log2@c@(c@typ@ *x, c@typ@ *r) -{ - nc_log@c@(x, r); - r->real *= NPY_LOG2E@c@; - r->imag *= NPY_LOG2E@c@; - return; -} - -static void -nc_sin@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = npy_sin@c@(xr)*npy_cosh@c@(xi); - r->imag = npy_cos@c@(xr)*npy_sinh@c@(xi); - return; -} - -static void -nc_sinh@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = npy_cos@c@(xi)*npy_sinh@c@(xr); - r->imag = npy_sin@c@(xi)*npy_cosh@c@(xr); - return; -} - -static void -nc_tan@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ sr,cr,shi,chi; - @typ@ rs,is,rc,ic; - @typ@ d; - @typ@ xr=x->real, xi=x->imag; - sr = npy_sin@c@(xr); - cr = npy_cos@c@(xr); - shi = npy_sinh@c@(xi); - chi = npy_cosh@c@(xi); - rs = sr*chi; - is = cr*shi; - rc = cr*chi; - ic = -sr*shi; - d = rc*rc + ic*ic; - r->real = (rs*rc+is*ic)/d; - r->imag = (is*rc-rs*ic)/d; - return; -} - -static void -nc_tanh@c@(c@typ@ *x, c@typ@ *r) -{ - @typ@ si,ci,shr,chr; - @typ@ rs,is,rc,ic; - @typ@ d; - @typ@ xr=x->real, xi=x->imag; - si = npy_sin@c@(xi); - ci = npy_cos@c@(xi); - shr = npy_sinh@c@(xr); - chr = npy_cosh@c@(xr); - rs = ci*shr; - is = si*chr; - rc = ci*chr; - ic = si*shr; - d = rc*rc + ic*ic; - r->real = (rs*rc+is*ic)/d; - r->imag = (is*rc-rs*ic)/d; - return; -} - -#undef SERIES_HORNER_TERM@c@ - -/**end repeat**/ diff --git a/numpy-1.6.2/numpy/core/src/umath/loops.c.src b/numpy-1.6.2/numpy/core/src/umath/loops.c.src deleted file mode 100644 index 54e5ac9849..0000000000 --- a/numpy-1.6.2/numpy/core/src/umath/loops.c.src +++ /dev/null @@ -1,2136 +0,0 @@ -/* -*- c -*- */ - -#define _UMATHMODULE - -#include "Python.h" - -#include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY -#endif - -#include "numpy/noprefix.h" -#include "numpy/ufuncobject.h" -#include "numpy/npy_math.h" -#include "numpy/halffloat.h" - -#include "numpy/npy_3kcompat.h" - -#include "ufunc_object.h" - - -/* - ***************************************************************************** - ** UFUNC LOOPS ** - ***************************************************************************** - */ - -#define IS_BINARY_REDUCE ((args[0] == args[2])\ - && (steps[0] == steps[2])\ - && (steps[0] == 0)) - -#define OUTPUT_LOOP\ - char *op1 = args[1];\ - intp os1 = steps[1];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, op1 += os1) - -#define UNARY_LOOP\ - char *ip1 = args[0], *op1 = args[1];\ - intp is1 = steps[0], os1 = steps[1];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, ip1 += is1, op1 += os1) - -#define UNARY_LOOP_TWO_OUT\ - char *ip1 = args[0], *op1 = args[1], *op2 = args[2];\ - intp is1 = steps[0], os1 = steps[1], os2 = steps[2];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, ip1 += is1, op1 += os1, op2 += os2) - -#define BINARY_LOOP\ - char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ - intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) - -#define BINARY_REDUCE_LOOP_INNER\ - char *ip2 = args[1]; \ - intp is2 = steps[1]; \ - intp n = dimensions[0]; \ - intp i; \ - for(i = 0; i < n; i++, ip2 += is2) - -#define BINARY_REDUCE_LOOP(TYPE)\ - char *iop1 = args[0]; \ - TYPE io1 = *(TYPE *)iop1; \ - BINARY_REDUCE_LOOP_INNER - -#define BINARY_LOOP_TWO_OUT\ - char *ip1 = args[0], *ip2 = args[1], *op1 = args[2], *op2 = args[3];\ - intp is1 = steps[0], is2 = steps[1], os1 = steps[2], os2 = steps[3];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1, op2 += os2) - -/****************************************************************************** - ** GENERIC FLOAT LOOPS ** - *****************************************************************************/ - - -typedef float halfUnaryFunc(npy_half x); -typedef float floatUnaryFunc(float x); -typedef double doubleUnaryFunc(double x); -typedef longdouble longdoubleUnaryFunc(longdouble x); -typedef npy_half halfBinaryFunc(npy_half x, npy_half y); -typedef float floatBinaryFunc(float x, float y); -typedef double doubleBinaryFunc(double x, double y); -typedef longdouble longdoubleBinaryFunc(longdouble x, longdouble y); - - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_e_e(char **args, intp *dimensions, intp *steps, void *func) -{ - halfUnaryFunc *f = (halfUnaryFunc *)func; - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *(npy_half *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_e_e_As_f_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatUnaryFunc *f = (floatUnaryFunc *)func; - UNARY_LOOP { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - *(npy_half *)op1 = npy_float_to_half(f(in1)); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_e_e_As_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - const double in1 = npy_half_to_double(*(npy_half *)ip1); - *(npy_half *)op1 = npy_double_to_half(f(in1)); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatUnaryFunc *f = (floatUnaryFunc *)func; - UNARY_LOOP { - const float in1 = *(float *)ip1; - *(float *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - const float in1 = *(float *)ip1; - *(float *)op1 = (float)f((double)in1); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_ee_e(char **args, intp *dimensions, intp *steps, void *func) -{ - halfBinaryFunc *f = (halfBinaryFunc *)func; - BINARY_LOOP { - npy_half in1 = *(npy_half *)ip1; - npy_half in2 = *(npy_half *)ip2; - *(npy_half *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_ee_e_As_ff_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatBinaryFunc *f = (floatBinaryFunc *)func; - BINARY_LOOP { - float in1 = npy_half_to_float(*(npy_half *)ip1); - float in2 = npy_half_to_float(*(npy_half *)ip2); - *(npy_half *)op1 = npy_float_to_half(f(in1, in2)); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_ee_e_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - double in1 = npy_half_to_double(*(npy_half *)ip1); - double in2 = npy_half_to_double(*(npy_half *)ip2); - *(npy_half *)op1 = npy_double_to_half(f(in1, in2)); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatBinaryFunc *f = (floatBinaryFunc *)func; - BINARY_LOOP { - float in1 = *(float *)ip1; - float in2 = *(float *)ip2; - *(float *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - float in1 = *(float *)ip1; - float in2 = *(float *)ip2; - *(float *)op1 = (double)f((double)in1, (double)in2); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - double in1 = *(double *)ip1; - *(double *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - double in1 = *(double *)ip1; - double in2 = *(double *)ip2; - *(double *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) -{ - longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; - UNARY_LOOP { - longdouble in1 = *(longdouble *)ip1; - *(longdouble *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) -{ - longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; - BINARY_LOOP { - longdouble in1 = *(longdouble *)ip1; - longdouble in2 = *(longdouble *)ip2; - *(longdouble *)op1 = f(in1, in2); - } -} - - - -/****************************************************************************** - ** GENERIC COMPLEX LOOPS ** - *****************************************************************************/ - - -typedef void cdoubleUnaryFunc(cdouble *x, cdouble *r); -typedef void cfloatUnaryFunc(cfloat *x, cfloat *r); -typedef void clongdoubleUnaryFunc(clongdouble *x, clongdouble *r); -typedef void cdoubleBinaryFunc(cdouble *x, cdouble *y, cdouble *r); -typedef void cfloatBinaryFunc(cfloat *x, cfloat *y, cfloat *r); -typedef void clongdoubleBinaryFunc(clongdouble *x, clongdouble *y, - clongdouble *r); - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func) -{ - cfloatUnaryFunc *f = (cfloatUnaryFunc *)func; - UNARY_LOOP { - cfloat in1 = *(cfloat *)ip1; - cfloat *out = (cfloat *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - UNARY_LOOP { - cdouble tmp, out; - tmp.real = (double)((float *)ip1)[0]; - tmp.imag = (double)((float *)ip1)[1]; - f(&tmp, &out); - ((float *)op1)[0] = (float)out.real; - ((float *)op1)[1] = (float)out.imag; - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) -{ - cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; - BINARY_LOOP { - cfloat in1 = *(cfloat *)ip1; - cfloat in2 = *(cfloat *)ip2; - cfloat *out = (cfloat *)op1; - f(&in1, &in2, out); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - BINARY_LOOP { - cdouble tmp1, tmp2, out; - tmp1.real = (double)((float *)ip1)[0]; - tmp1.imag = (double)((float *)ip1)[1]; - tmp2.real = (double)((float *)ip2)[0]; - tmp2.imag = (double)((float *)ip2)[1]; - f(&tmp1, &tmp2, &out); - ((float *)op1)[0] = (float)out.real; - ((float *)op1)[1] = (float)out.imag; - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - UNARY_LOOP { - cdouble in1 = *(cdouble *)ip1; - cdouble *out = (cdouble *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - BINARY_LOOP { - cdouble in1 = *(cdouble *)ip1; - cdouble in2 = *(cdouble *)ip2; - cdouble *out = (cdouble *)op1; - f(&in1, &in2, out); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) -{ - clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; - UNARY_LOOP { - clongdouble in1 = *(clongdouble *)ip1; - clongdouble *out = (clongdouble *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) -{ - clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; - BINARY_LOOP { - clongdouble in1 = *(clongdouble *)ip1; - clongdouble in2 = *(clongdouble *)ip2; - clongdouble *out = (clongdouble *)op1; - f(&in1, &in2, out); - } -} - - -/****************************************************************************** - ** GENERIC OBJECT lOOPS ** - *****************************************************************************/ - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func) -{ - unaryfunc f = (unaryfunc)func; - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = f(in1 ? in1 : Py_None); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func) -{ - char *meth = (char *)func; - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyObject_CallMethod(in1 ? in1 : Py_None, meth, NULL); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) -{ - binaryfunc f = (binaryfunc)func; - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op1; - PyObject *ret = f(in1 ? in1 : Py_None, in2 ? in2 : Py_None); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_OO_O_method(char **args, intp *dimensions, intp *steps, void *func) -{ - char *meth = (char *)func; - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyObject_CallMethod(in1 ? in1 : Py_None, - meth, "(O)", in2); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/* - * A general-purpose ufunc that deals with general-purpose Python callable. - * func is a structure with nin, nout, and a Python callable function - */ - -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_On_Om(char **args, intp *dimensions, intp *steps, void *func) -{ - intp n = dimensions[0]; - PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func; - int nin = data->nin; - int nout = data->nout; - PyObject *tocall = data->callable; - char *ptrs[NPY_MAXARGS]; - PyObject *arglist, *result; - PyObject *in, **op; - intp i, j, ntot; - - ntot = nin+nout; - - for(j = 0; j < ntot; j++) { - ptrs[j] = args[j]; - } - for(i = 0; i < n; i++) { - arglist = PyTuple_New(nin); - if (arglist == NULL) { - return; - } - for(j = 0; j < nin; j++) { - in = *((PyObject **)ptrs[j]); - if (in == NULL) { - in = Py_None; - } - PyTuple_SET_ITEM(arglist, j, in); - Py_INCREF(in); - } - result = PyEval_CallObject(tocall, arglist); - Py_DECREF(arglist); - if (result == NULL) { - return; - } - if (PyTuple_Check(result)) { - if (nout != PyTuple_Size(result)) { - Py_DECREF(result); - return; - } - for(j = 0; j < nout; j++) { - op = (PyObject **)ptrs[j+nin]; - Py_XDECREF(*op); - *op = PyTuple_GET_ITEM(result, j); - Py_INCREF(*op); - } - Py_DECREF(result); - } - else { - op = (PyObject **)ptrs[nin]; - Py_XDECREF(*op); - *op = result; - } - for(j = 0; j < ntot; j++) { - ptrs[j] += steps[j]; - } - } -} - -/* - ***************************************************************************** - ** BOOLEAN LOOPS ** - ***************************************************************************** - */ - -/**begin repeat - * #kind = equal, not_equal, greater, greater_equal, less, less_equal# - * #OP = ==, !=, >, >=, <, <=# - **/ - -NPY_NO_EXPORT void -BOOL_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - Bool in1 = *((Bool *)ip1) != 0; - Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op1)= in1 @OP@ in2; - } -} -/**end repeat**/ - - -/**begin repeat - * #kind = logical_and, logical_or# - * #OP = &&, ||# - * #SC = ==, !=# - **/ - -NPY_NO_EXPORT void -BOOL_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - if(IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(Bool) { - const Bool in2 = *(Bool *)ip2; - io1 = io1 @OP@ in2; - if (io1 @SC@ 0) { - break; - } - } - *((Bool *)iop1) = io1; - } - else { - BINARY_LOOP { - const Bool in1 = *(Bool *)ip1; - const Bool in2 = *(Bool *)ip2; - *((Bool *)op1) = in1 @OP@ in2; - } - } -} -/**end repeat**/ - - -NPY_NO_EXPORT void -BOOL_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - Bool in1 = *((Bool *)ip1) != 0; - Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); - } -} - -/**begin repeat - * #kind = maximum, minimum# - * #OP = >, <# - **/ -NPY_NO_EXPORT void -BOOL_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - Bool in1 = *((Bool *)ip1) != 0; - Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op1) = (in1 @OP@ in2) ? in1 : in2; - } -} -/**end repeat**/ - -/**begin repeat - * #kind = absolute, logical_not# - * #OP = !=, ==# - **/ -NPY_NO_EXPORT void -BOOL_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - Bool in1 = *(Bool *)ip1; - *((Bool *)op1) = in1 @OP@ 0; - } -} -/**end repeat**/ - -NPY_NO_EXPORT void -BOOL_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - *((Bool *)op1) = 1; - } -} - - -/* - ***************************************************************************** - ** INTEGER LOOPS - ***************************************************************************** - */ - -/**begin repeat - * #type = byte, short, int, long, longlong# - * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# - * #ftype = float, float, double, double, double# - */ - -/**begin repeat1 - * both signed and unsigned integer types - * #s = , u# - * #S = , U# - */ - -#define @S@@TYPE@_floor_divide @S@@TYPE@_divide -#define @S@@TYPE@_fmax @S@@TYPE@_maximum -#define @S@@TYPE@_fmin @S@@TYPE@_minimum - -NPY_NO_EXPORT void -@S@@TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - *((@s@@type@ *)op1) = 1; - } -} - -NPY_NO_EXPORT void -@S@@TYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = in1*in1; - } -} - -NPY_NO_EXPORT void -@S@@TYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = (@s@@type@)(1.0/in1); - } -} - -NPY_NO_EXPORT void -@S@@TYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = in1; - } -} - -NPY_NO_EXPORT void -@S@@TYPE@_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = (@s@@type@)(-(@type@)in1); - } -} - -NPY_NO_EXPORT void -@S@@TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((Bool *)op1) = !in1; - } -} - -NPY_NO_EXPORT void -@S@@TYPE@_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = ~in1; - } -} - -/**begin repeat2 - * Arithmetic - * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor, - * left_shift, right_shift# - * #OP = +, -,*, &, |, ^, <<, >># - */ -NPY_NO_EXPORT void -@S@@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - if(IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@s@@type@) { - io1 @OP@= *(@s@@type@ *)ip2; - } - *((@s@@type@ *)iop1) = io1; - } - else { - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((@s@@type@ *)op1) = in1 @OP@ in2; - } - } -} -/**end repeat2**/ - -/**begin repeat2 - * #kind = equal, not_equal, greater, greater_equal, less, less_equal, - * logical_and, logical_or# - * #OP = ==, !=, >, >=, <, <=, &&, ||# - */ -NPY_NO_EXPORT void -@S@@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((Bool *)op1) = in1 @OP@ in2; - } -} -/**end repeat2**/ - -NPY_NO_EXPORT void -@S@@TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); - } -} - -/**begin repeat2 - * #kind = maximum, minimum# - * #OP = >, <# - **/ -NPY_NO_EXPORT void -@S@@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@s@@type@) { - const @s@@type@ in2 = *(@type@ *)ip2; - io1 = (io1 @OP@ in2) ? io1 : in2; - } - *((@s@@type@ *)iop1) = io1; - } - else { - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((@s@@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2; - } - } -} -/**end repeat2**/ - -NPY_NO_EXPORT void -@S@@TYPE@_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const double in1 = (double)(*(@s@@type@ *)ip1); - const double in2 = (double)(*(@s@@type@ *)ip2); - *((double *)op1) = in1/in2; - } -} - -NPY_NO_EXPORT void -@S@@TYPE@_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @ftype@ in1 = (@ftype@)*(@s@@type@ *)ip1; - const @ftype@ in2 = (@ftype@)*(@s@@type@ *)ip2; - *((@s@@type@ *)op1) = (@s@@type@) pow(in1, in2); - } -} - -NPY_NO_EXPORT void -@S@@TYPE@_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - if (in2 == 0) { - npy_set_floatstatus_divbyzero(); - *((@s@@type@ *)op1) = 0; - } - else { - *((@s@@type@ *)op1)= in1 % in2; - } - - } -} - -/**end repeat1**/ - -NPY_NO_EXPORT void -U@TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const u@type@ in1 = *(u@type@ *)ip1; - *((u@type@ *)op1) = in1; - } -} - -NPY_NO_EXPORT void -@TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = (in1 >= 0) ? in1 : -in1; - } -} - -NPY_NO_EXPORT void -U@TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const u@type@ in1 = *(u@type@ *)ip1; - *((u@type@ *)op1) = in1 > 0 ? 1 : 0; - } -} - -NPY_NO_EXPORT void -@TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); - } -} - -NPY_NO_EXPORT void -@TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (in2 == 0 || (in1 == NPY_MIN_@TYPE@ && in2 == -1)) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = in1/in2 - 1; - } - else { - *((@type@ *)op1) = in1/in2; - } - } -} - -NPY_NO_EXPORT void -U@TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const u@type@ in1 = *(u@type@ *)ip1; - const u@type@ in2 = *(u@type@ *)ip2; - if (in2 == 0) { - npy_set_floatstatus_divbyzero(); - *((u@type@ *)op1) = 0; - } - else { - *((u@type@ *)op1)= in1/in2; - } - } -} - -NPY_NO_EXPORT void -@TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - if (in2 == 0) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else { - /* handle mixed case the way Python does */ - const @type@ rem = in1 % in2; - if ((in1 > 0) == (in2 > 0) || rem == 0) { - *((@type@ *)op1) = rem; - } - else { - *((@type@ *)op1) = rem + in2; - } - } - } -} - -NPY_NO_EXPORT void -U@TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const u@type@ in1 = *(u@type@ *)ip1; - const u@type@ in2 = *(u@type@ *)ip2; - if (in2 == 0) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } - else { - *((@type@ *)op1) = in1 % in2; - } - } -} - -/**end repeat**/ - -/* - ***************************************************************************** - ** DATETIME LOOPS ** - ***************************************************************************** - */ - -/**begin repeat - * #type = datetime, timedelta# - * #TYPE = DATETIME, TIMEDELTA# - * #ftype = double, double# - */ - -NPY_NO_EXPORT void -@TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - *((@type@ *)op1) = 1; - } -} - -NPY_NO_EXPORT void -@TYPE@_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = (@type@)(-(@type@)in1); - } -} - -NPY_NO_EXPORT void -@TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((Bool *)op1) = !in1; - } -} - - -/**begin repeat1 - * #kind = equal, not_equal, greater, greater_equal, less, less_equal, - * logical_and, logical_or# - * #OP = ==, !=, >, >=, <, <=, &&, ||# - */ -NPY_NO_EXPORT void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((Bool *)op1) = in1 @OP@ in2; - } -} -/**end repeat1**/ - -NPY_NO_EXPORT void -@TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); - } -} - -/**begin repeat1 - * #kind = maximum, minimum# - * #OP = >, <# - **/ -NPY_NO_EXPORT void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@type@) { - const @type@ in2 = *(@type@ *)ip2; - io1 = (io1 @OP@ in2) ? io1 : in2; - } - *((@type@ *)iop1) = io1; - } - else { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2; - } - } -} -/**end repeat1**/ - -NPY_NO_EXPORT void -@TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = (in1 >= 0) ? in1 : -in1; - } -} - -NPY_NO_EXPORT void -@TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); - } -} - -/**end repeat**/ - -/* FIXME: implement the following correctly using the metadata: data is the - sequence of ndarrays in the same order as args. - */ -NPY_NO_EXPORT void -DATETIME_Mm_M_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - BINARY_LOOP { - const datetime in1 = *(datetime *)ip1; - const timedelta in2 = *(timedelta *)ip2; - *((datetime *)op1) = in1 + in2; - } -} - -NPY_NO_EXPORT void -DATETIME_mM_M_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const timedelta in1 = *(timedelta *)ip1; - const datetime in2 = *(datetime *)ip2; - *((datetime *)op1) = in1 + in2; - } -} - -NPY_NO_EXPORT void -TIMEDELTA_mm_m_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const timedelta in1 = *(timedelta *)ip1; - const timedelta in2 = *(timedelta *)ip2; - *((timedelta *)op1) = in1 + in2; - } -} - -NPY_NO_EXPORT void -DATETIME_Mm_M_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const datetime in1 = *(datetime *)ip1; - const timedelta in2 = *(timedelta *)ip2; - *((datetime *)op1) = in1 - in2; - } -} - -NPY_NO_EXPORT void -DATETIME_MM_m_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const datetime in1 = *(datetime *)ip1; - const datetime in2 = *(datetime *)ip2; - *((timedelta *)op1) = in1 - in2; - } -} - -NPY_NO_EXPORT void -TIMEDELTA_mm_m_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const timedelta in1 = *(timedelta *)ip1; - const timedelta in2 = *(timedelta *)ip2; - *((timedelta *)op1) = in1 - in2; - } -} - - -/* - ***************************************************************************** - ** FLOAT LOOPS ** - ***************************************************************************** - */ - - -/**begin repeat - * Float types - * #type = float, double, longdouble# - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# - * #c = f, , l# - * #C = F, , L# - */ - - -/**begin repeat1 - * Arithmetic - * # kind = add, subtract, multiply, divide# - * # OP = +, -, *, /# - */ -NPY_NO_EXPORT void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - if(IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@type@) { - io1 @OP@= *(@type@ *)ip2; - } - *((@type@ *)iop1) = io1; - } - else { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = in1 @OP@ in2; - } - } -} -/**end repeat1**/ - -/**begin repeat1 - * #kind = equal, not_equal, less, less_equal, greater, greater_equal, - * logical_and, logical_or# - * #OP = ==, !=, <, <=, >, >=, &&, ||# - */ -NPY_NO_EXPORT void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((Bool *)op1) = in1 @OP@ in2; - } -} -/**end repeat1**/ - -NPY_NO_EXPORT void -@TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); - } -} - -NPY_NO_EXPORT void -@TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((Bool *)op1) = !in1; - } -} - -/**begin repeat1 - * #kind = isnan, isinf, isfinite, signbit# - * #func = npy_isnan, npy_isinf, npy_isfinite, npy_signbit# - **/ -NPY_NO_EXPORT void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((Bool *)op1) = @func@(in1) != 0; - } -} -/**end repeat1**/ - -NPY_NO_EXPORT void -@TYPE@_spacing(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = npy_spacing@c@(in1); - } -} - -NPY_NO_EXPORT void -@TYPE@_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1)= npy_copysign@c@(in1, in2); - } -} - -NPY_NO_EXPORT void -@TYPE@_nextafter(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1)= npy_nextafter@c@(in1, in2); - } -} - -/**begin repeat1 - * #kind = maximum, minimum# - * #OP = >=, <=# - **/ -NPY_NO_EXPORT void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* */ - if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@type@) { - const @type@ in2 = *(@type@ *)ip2; - io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2; - } - *((@type@ *)iop1) = io1; - } - else { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2; - } - } -} -/**end repeat1**/ - -/**begin repeat1 - * #kind = fmax, fmin# - * #OP = >=, <=# - **/ -NPY_NO_EXPORT void -@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* */ - if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@type@) { - const @type@ in2 = *(@type@ *)ip2; - io1 = (io1 @OP@ in2 || npy_isnan(in2)) ? io1 : in2; - } - *((@type@ *)iop1) = io1; - } - else { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2; - } - } -} -/**end repeat1**/ - -NPY_NO_EXPORT void -@TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = npy_floor@c@(in1/in2); - } -} - -NPY_NO_EXPORT void -@TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - const @type@ res = npy_fmod@c@(in1,in2); - if (res && ((in2 < 0) != (res < 0))) { - *((@type@ *)op1) = res + in2; - } - else { - *((@type@ *)op1) = res; - } - } -} - -NPY_NO_EXPORT void -@TYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1*in1; - } -} - -NPY_NO_EXPORT void -@TYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = 1/in1; - } -} - -NPY_NO_EXPORT void -@TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - *((@type@ *)op1) = 1; - } -} - -NPY_NO_EXPORT void -@TYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1; - } -} - -NPY_NO_EXPORT void -@TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ tmp = in1 > 0 ? in1 : -in1; - /* add 0 to clear -0.0 */ - *((@type@ *)op1) = tmp + 0; - } -} - -NPY_NO_EXPORT void -@TYPE@_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = -in1; - } -} - -NPY_NO_EXPORT void -@TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* Sign of nan is nan */ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : (in1 == 0 ? 0 : in1)); - } -} - -NPY_NO_EXPORT void -@TYPE@_modf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP_TWO_OUT { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = npy_modf@c@(in1, (@type@ *)op2); - } -} - -#ifdef HAVE_FREXP@C@ -NPY_NO_EXPORT void -@TYPE@_frexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP_TWO_OUT { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = frexp@c@(in1, (int *)op2); - } -} -#endif - -#ifdef HAVE_LDEXP@C@ -NPY_NO_EXPORT void -@TYPE@_ldexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const int in2 = *(int *)ip2; - *((@type@ *)op1) = ldexp@c@(in1, in2); - } -} - -NPY_NO_EXPORT void -@TYPE@_ldexp_long(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* - * Additional loop to handle long integer inputs (cf. #866, #1633). - * long != int on many 64-bit platforms, so we need this second loop - * to handle the default integer type. - */ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const long in2 = *(long *)ip2; - if (((int)in2) == in2) { - /* Range OK */ - *((@type@ *)op1) = ldexp@c@(in1, ((int)in2)); - } - else { - /* - * Outside int range -- also ldexp will overflow in this case, - * given that exponent has less bits than int. - */ - if (in2 > 0) { - *((@type@ *)op1) = ldexp@c@(in1, NPY_MAX_INT); - } - else { - *((@type@ *)op1) = ldexp@c@(in1, NPY_MIN_INT); - } - } - } -} -#endif - -#define @TYPE@_true_divide @TYPE@_divide - -/**end repeat**/ - -/* - ***************************************************************************** - ** HALF-FLOAT LOOPS ** - ***************************************************************************** - */ - - -/**begin repeat - * Arithmetic - * # kind = add, subtract, multiply, divide# - * # OP = +, -, *, /# - */ -NPY_NO_EXPORT void -HALF_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - if(IS_BINARY_REDUCE) { - char *iop1 = args[0]; - float io1 = npy_half_to_float(*(npy_half *)iop1); - BINARY_REDUCE_LOOP_INNER { - io1 @OP@= npy_half_to_float(*(npy_half *)ip2); - } - *((npy_half *)iop1) = npy_float_to_half(io1); - } - else { - BINARY_LOOP { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - const float in2 = npy_half_to_float(*(npy_half *)ip2); - *((npy_half *)op1) = npy_float_to_half(in1 @OP@ in2); - } - } -} -/**end repeat**/ - -#define _HALF_LOGICAL_AND(a,b) (!npy_half_iszero(a) && !npy_half_iszero(b)) -#define _HALF_LOGICAL_OR(a,b) (!npy_half_iszero(a) || !npy_half_iszero(b)) -/**begin repeat - * #kind = equal, not_equal, less, less_equal, greater, greater_equal, - * logical_and, logical_or# - * #OP = npy_half_eq, npy_half_ne, npy_half_lt, npy_half_le, npy_half_gt, npy_half_ge, _HALF_LOGICAL_AND, _HALF_LOGICAL_OR# - */ -NPY_NO_EXPORT void -HALF_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - const npy_half in2 = *(npy_half *)ip2; - *((Bool *)op1) = @OP@(in1, in2); - } -} -/**end repeat**/ -#undef _HALF_LOGICAL_AND -#undef _HALF_LOGICAL_OR - -NPY_NO_EXPORT void -HALF_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const int in1 = !npy_half_iszero(*(npy_half *)ip1); - const int in2 = !npy_half_iszero(*(npy_half *)ip2); - *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); - } -} - -NPY_NO_EXPORT void -HALF_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *((Bool *)op1) = npy_half_iszero(in1); - } -} - -/**begin repeat - * #kind = isnan, isinf, isfinite, signbit# - * #func = npy_half_isnan, npy_half_isinf, npy_half_isfinite, npy_half_signbit# - **/ -NPY_NO_EXPORT void -HALF_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *((Bool *)op1) = @func@(in1) != 0; - } -} -/**end repeat**/ - -NPY_NO_EXPORT void -HALF_spacing(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *((npy_half *)op1) = npy_half_spacing(in1); - } -} - -NPY_NO_EXPORT void -HALF_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - const npy_half in2 = *(npy_half *)ip2; - *((npy_half *)op1)= npy_half_copysign(in1, in2); - } -} - -NPY_NO_EXPORT void -HALF_nextafter(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - const npy_half in2 = *(npy_half *)ip2; - *((npy_half *)op1)= npy_half_nextafter(in1, in2); - } -} - -/**begin repeat - * #kind = maximum, minimum# - * #OP = npy_half_ge, npy_half_le# - **/ -NPY_NO_EXPORT void -HALF_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* */ - BINARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - const npy_half in2 = *(npy_half *)ip2; - *((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in1)) ? in1 : in2; - } -} -/**end repeat**/ - -/**begin repeat - * #kind = fmax, fmin# - * #OP = npy_half_ge, npy_half_le# - **/ -NPY_NO_EXPORT void -HALF_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* */ - BINARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - const npy_half in2 = *(npy_half *)ip2; - *((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2; - } -} -/**end repeat**/ - -NPY_NO_EXPORT void -HALF_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - const float in2 = npy_half_to_float(*(npy_half *)ip2); - *((npy_half *)op1) = npy_float_to_half(npy_floorf(in1/in2)); - } -} - -NPY_NO_EXPORT void -HALF_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - const float in2 = npy_half_to_float(*(npy_half *)ip2); - const float res = npy_fmodf(in1,in2); - if (res && ((in2 < 0) != (res < 0))) { - *((npy_half *)op1) = npy_float_to_half(res + in2); - } - else { - *((npy_half *)op1) = npy_float_to_half(res); - } - } -} - -NPY_NO_EXPORT void -HALF_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - *((npy_half *)op1) = npy_float_to_half(in1*in1); - } -} - -NPY_NO_EXPORT void -HALF_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - *((npy_half *)op1) = npy_float_to_half(1/in1); - } -} - -NPY_NO_EXPORT void -HALF_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - *((npy_half *)op1) = NPY_HALF_ONE; - } -} - -NPY_NO_EXPORT void -HALF_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *((npy_half *)op1) = in1; - } -} - -NPY_NO_EXPORT void -HALF_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *((npy_half *)op1) = in1&0x7fffu; - } -} - -NPY_NO_EXPORT void -HALF_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *((npy_half *)op1) = in1^0x8000u; - } -} - -NPY_NO_EXPORT void -HALF_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* Sign of nan is nan */ - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *((npy_half *)op1) = npy_half_isnan(in1) ? in1 : - (((in1&0x7fffu) == 0) ? 0 : - (((in1&0x8000u) == 0) ? NPY_HALF_ONE : NPY_HALF_NEGONE)); - } -} - -NPY_NO_EXPORT void -HALF_modf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - float temp; - - UNARY_LOOP_TWO_OUT { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - *((npy_half *)op1) = npy_float_to_half(npy_modff(in1, &temp)); - *((npy_half *)op2) = npy_float_to_half(temp); - } -} - -#ifdef HAVE_FREXPF -NPY_NO_EXPORT void -HALF_frexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP_TWO_OUT { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - *((npy_half *)op1) = npy_float_to_half(frexpf(in1, (int *)op2)); - } -} -#endif - -#ifdef HAVE_LDEXPF -NPY_NO_EXPORT void -HALF_ldexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - const int in2 = *(int *)ip2; - *((npy_half *)op1) = npy_float_to_half(ldexpf(in1, in2)); - } -} - -NPY_NO_EXPORT void -HALF_ldexp_long(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* - * Additional loop to handle long integer inputs (cf. #866, #1633). - * long != int on many 64-bit platforms, so we need this second loop - * to handle the default integer type. - */ - BINARY_LOOP { - const float in1 = npy_half_to_float(*(npy_half *)ip1); - const long in2 = *(long *)ip2; - if (((int)in2) == in2) { - /* Range OK */ - *((npy_half *)op1) = npy_float_to_half(ldexpf(in1, ((int)in2))); - } - else { - /* - * Outside int range -- also ldexp will overflow in this case, - * given that exponent has less bits than int. - */ - if (in2 > 0) { - *((npy_half *)op1) = npy_float_to_half(ldexpf(in1, NPY_MAX_INT)); - } - else { - *((npy_half *)op1) = npy_float_to_half(ldexpf(in1, NPY_MIN_INT)); - } - } - } -} -#endif - -#define HALF_true_divide HALF_divide - - -/* - ***************************************************************************** - ** COMPLEX LOOPS ** - ***************************************************************************** - */ - -#define CGE(xr,xi,yr,yi) ((xr > yr && !npy_isnan(xi) && !npy_isnan(yi)) \ - || (xr == yr && xi >= yi)) -#define CLE(xr,xi,yr,yi) ((xr < yr && !npy_isnan(xi) && !npy_isnan(yi)) \ - || (xr == yr && xi <= yi)) -#define CGT(xr,xi,yr,yi) ((xr > yr && !npy_isnan(xi) && !npy_isnan(yi)) \ - || (xr == yr && xi > yi)) -#define CLT(xr,xi,yr,yi) ((xr < yr && !npy_isnan(xi) && !npy_isnan(yi)) \ - || (xr == yr && xi < yi)) -#define CEQ(xr,xi,yr,yi) (xr == yr && xi == yi) -#define CNE(xr,xi,yr,yi) (xr != yr || xi != yi) - -/**begin repeat - * complex types - * #type = float, double, longdouble# - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# - * #c = f, , l# - * #C = F, , L# - */ - -/**begin repeat1 - * arithmetic - * #kind = add, subtract# - * #OP = +, -# - */ -NPY_NO_EXPORT void -C@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - ((@type@ *)op1)[0] = in1r @OP@ in2r; - ((@type@ *)op1)[1] = in1i @OP@ in2i; - } -} -/**end repeat1**/ - -NPY_NO_EXPORT void -C@TYPE@_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - ((@type@ *)op1)[0] = in1r*in2r - in1i*in2i; - ((@type@ *)op1)[1] = in1r*in2i + in1i*in2r; - } -} - -NPY_NO_EXPORT void -C@TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - const @type@ in2r_abs = npy_fabs@c@(in2r); - const @type@ in2i_abs = npy_fabs@c@(in2i); - if (in2r_abs >= in2i_abs) { - if (in2r_abs == 0 && in2i_abs == 0) { - /* divide by zero should yield a complex inf or nan */ - ((@type@ *)op1)[0] = in1r/in2r_abs; - ((@type@ *)op1)[1] = in1i/in2i_abs; - } - else { - const @type@ rat = in2i/in2r; - const @type@ scl = 1.0@c@/(in2r + in2i*rat); - ((@type@ *)op1)[0] = (in1r + in1i*rat)*scl; - ((@type@ *)op1)[1] = (in1i - in1r*rat)*scl; - } - } - else { - const @type@ rat = in2r/in2i; - const @type@ scl = 1.0@c@/(in2i + in2r*rat); - ((@type@ *)op1)[0] = (in1r*rat + in1i)*scl; - ((@type@ *)op1)[1] = (in1i*rat - in1r)*scl; - } - } -} - -NPY_NO_EXPORT void -C@TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - if (npy_fabs@c@(in2r) >= npy_fabs@c@(in2i)) { - const @type@ rat = in2i/in2r; - ((@type@ *)op1)[0] = npy_floor@c@((in1r + in1i*rat)/(in2r + in2i*rat)); - ((@type@ *)op1)[1] = 0; - } - else { - const @type@ rat = in2r/in2i; - ((@type@ *)op1)[0] = npy_floor@c@((in1r*rat + in1i)/(in2i + in2r*rat)); - ((@type@ *)op1)[1] = 0; - } - } -} - -/**begin repeat1 - * #kind= greater, greater_equal, less, less_equal, equal, not_equal# - * #OP = CGT, CGE, CLT, CLE, CEQ, CNE# - */ -NPY_NO_EXPORT void -C@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - *((Bool *)op1) = @OP@(in1r,in1i,in2r,in2i); - } -} -/**end repeat1**/ - -/**begin repeat1 - #kind = logical_and, logical_or# - #OP1 = ||, ||# - #OP2 = &&, ||# -*/ -NPY_NO_EXPORT void -C@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - *((Bool *)op1) = (in1r @OP1@ in1i) @OP2@ (in2r @OP1@ in2i); - } -} -/**end repeat1**/ - -NPY_NO_EXPORT void -C@TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - const Bool tmp1 = (in1r || in1i); - const Bool tmp2 = (in2r || in2i); - *((Bool *)op1) = (tmp1 && !tmp2) || (!tmp1 && tmp2); - } -} - -NPY_NO_EXPORT void -C@TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - *((Bool *)op1) = !(in1r || in1i); - } -} - -/**begin repeat1 - * #kind = isnan, isinf, isfinite# - * #func = npy_isnan, npy_isinf, npy_isfinite# - * #OP = ||, ||, &&# - **/ -NPY_NO_EXPORT void -C@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - *((Bool *)op1) = @func@(in1r) @OP@ @func@(in1i); - } -} -/**end repeat1**/ - -NPY_NO_EXPORT void -C@TYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - ((@type@ *)op1)[0] = in1r*in1r - in1i*in1i; - ((@type@ *)op1)[1] = in1r*in1i + in1i*in1r; - } -} - -NPY_NO_EXPORT void -C@TYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - if (npy_fabs@c@(in1i) <= npy_fabs@c@(in1r)) { - const @type@ r = in1i/in1r; - const @type@ d = in1r + in1i*r; - ((@type@ *)op1)[0] = 1/d; - ((@type@ *)op1)[1] = -r/d; - } else { - const @type@ r = in1r/in1i; - const @type@ d = in1r*r + in1i; - ((@type@ *)op1)[0] = r/d; - ((@type@ *)op1)[1] = -1/d; - } - } -} - -NPY_NO_EXPORT void -C@TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - ((@type@ *)op1)[0] = 1; - ((@type@ *)op1)[1] = 0; - } -} - -NPY_NO_EXPORT void -C@TYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - ((@type@ *)op1)[0] = in1r; - ((@type@ *)op1)[1] = -in1i; - } -} - -NPY_NO_EXPORT void -C@TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - *((@type@ *)op1) = npy_hypot@c@(in1r, in1i); - } -} - -NPY_NO_EXPORT void -C@TYPE@__arg(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - *((@type@ *)op1) = npy_atan2@c@(in1i, in1r); - } -} - -NPY_NO_EXPORT void -C@TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* fixme: sign of nan is currently 0 */ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - ((@type@ *)op1)[0] = CGT(in1r, in1i, 0.0, 0.0) ? 1 : - (CLT(in1r, in1i, 0.0, 0.0) ? -1 : - (CEQ(in1r, in1i, 0.0, 0.0) ? 0 : NPY_NAN@C@)); - ((@type@ *)op1)[1] = 0; - } -} - -/**begin repeat1 - * #kind = maximum, minimum# - * #OP = CGE, CLE# - */ -NPY_NO_EXPORT void -C@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in1r) || npy_isnan(in1i)) { - ((@type@ *)op1)[0] = in1r; - ((@type@ *)op1)[1] = in1i; - } - else { - ((@type@ *)op1)[0] = in2r; - ((@type@ *)op1)[1] = in2i; - } - } -} -/**end repeat1**/ - -/**begin repeat1 - * #kind = fmax, fmin# - * #OP = CGE, CLE# - */ -NPY_NO_EXPORT void -C@TYPE@_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in2r) || npy_isnan(in2i)) { - ((@type@ *)op1)[0] = in1r; - ((@type@ *)op1)[1] = in1i; - } - else { - ((@type@ *)op1)[0] = in2r; - ((@type@ *)op1)[1] = in2i; - } - } -} -/**end repeat1**/ - -#define C@TYPE@_true_divide C@TYPE@_divide - -/**end repeat**/ - -#undef CGE -#undef CLE -#undef CGT -#undef CLT -#undef CEQ -#undef CNE - -/* - ***************************************************************************** - ** OBJECT LOOPS ** - ***************************************************************************** - */ - -/**begin repeat - * #kind = equal, not_equal, greater, greater_equal, less, less_equal# - * #OP = EQ, NE, GT, GE, LT, LE# - */ -NPY_NO_EXPORT void -OBJECT_@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - int ret = PyObject_RichCompareBool( - in1 ? in1 : Py_None, - in2 ? in2 : Py_None, Py_@OP@); - if (ret == -1) { - return; - } - *((Bool *)op1) = (Bool)ret; - } -} -/**end repeat**/ - -NPY_NO_EXPORT void -OBJECT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ -#if defined(NPY_PY3K) - PyObject *zero = PyLong_FromLong(0); - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - int v; - PyObject *ret; - PyObject_Cmp(in1 ? in1 : Py_None, zero, &v); - ret = PyLong_FromLong(v); - if (PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } - Py_DECREF(zero); -#else - PyObject *zero = PyInt_FromLong(0); - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyInt_FromLong( - PyObject_Compare(in1 ? in1 : Py_None, zero)); - if (PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } - Py_DECREF(zero); -#endif -} - -/* - ***************************************************************************** - ** END LOOPS ** - ***************************************************************************** - */ diff --git a/numpy-1.6.2/numpy/core/src/umath/loops.h b/numpy-1.6.2/numpy/core/src/umath/loops.h deleted file mode 100644 index abd8de23e7..0000000000 --- a/numpy-1.6.2/numpy/core/src/umath/loops.h +++ /dev/null @@ -1,2718 +0,0 @@ - -/* - ***************************************************************************** - ** This file was autogenerated from a template DO NOT EDIT!!!! ** - ** Changes should be made to the original source (.src) file ** - ***************************************************************************** - */ - -#line 1 -/* -*- c -*- */ -/* - * vim:syntax=c - */ - -/* - ***************************************************************************** - ** IMPORTANT NOTE for loops.h.src -> loops.h ** - ***************************************************************************** - * The template file loops.h.src is not automatically converted into - * loops.h by the build system. If you edit this file, you must manually - * do the conversion using numpy/distutils/conv_template.py from the - * command line as follows: - * - * $ cd - * $ python numpy/distutils/conv_template.py numpy/core/src/umath/loops.h.src - * $ - */ - -#ifndef _NPY_UMATH_LOOPS_H_ -#define _NPY_UMATH_LOOPS_H_ - -#define BOOL_invert BOOL_logical_not -#define BOOL_negative BOOL_logical_not -#define BOOL_add BOOL_logical_or -#define BOOL_bitwise_and BOOL_logical_and -#define BOOL_bitwise_or BOOL_logical_or -#define BOOL_bitwise_xor BOOL_logical_xor -#define BOOL_multiply BOOL_logical_and -#define BOOL_subtract BOOL_logical_xor -#define BOOL_fmax BOOL_maximum -#define BOOL_fmin BOOL_minimum - -/* - ***************************************************************************** - ** BOOLEAN LOOPS ** - ***************************************************************************** - */ - -#line 46 - -NPY_NO_EXPORT void -BOOL_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_fmax(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_fmin(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 46 - -NPY_NO_EXPORT void -BOOL_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 55 -NPY_NO_EXPORT void -BOOL_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 55 -NPY_NO_EXPORT void -BOOL_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 63 -NPY_NO_EXPORT void -BOOL_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 63 -NPY_NO_EXPORT void -BOOL_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -BOOL_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -/* - ***************************************************************************** - ** INTEGER LOOPS - ***************************************************************************** - */ - -#line 81 - -#line 87 - -#define BYTE_floor_divide BYTE_divide -#define BYTE_fmax BYTE_maximum -#define BYTE_fmin BYTE_minimum - -NPY_NO_EXPORT void -BYTE_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -BYTE_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -BYTE_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -BYTE_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -BYTE_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -BYTE_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -BYTE_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -BYTE_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -BYTE_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -BYTE_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -BYTE_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -BYTE_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -BYTE_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -BYTE_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -BYTE_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -BYTE_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -BYTE_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -BYTE_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -BYTE_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -BYTE_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -BYTE_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -BYTE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -BYTE_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -BYTE_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 87 - -#define UBYTE_floor_divide UBYTE_divide -#define UBYTE_fmax UBYTE_maximum -#define UBYTE_fmin UBYTE_minimum - -NPY_NO_EXPORT void -UBYTE_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -UBYTE_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -UBYTE_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -UBYTE_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UBYTE_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UBYTE_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UBYTE_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -UBYTE_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UBYTE_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UBYTE_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UBYTE_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UBYTE_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UBYTE_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UBYTE_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UBYTE_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -UBYTE_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UBYTE_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UBYTE_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UBYTE_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UBYTE_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UBYTE_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UBYTE_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UBYTE_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -UBYTE_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -UBYTE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -UBYTE_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -UBYTE_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UBYTE_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UBYTE_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -NPY_NO_EXPORT void -UBYTE_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UBYTE_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UBYTE_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -BYTE_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UBYTE_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 81 - -#line 87 - -#define SHORT_floor_divide SHORT_divide -#define SHORT_fmax SHORT_maximum -#define SHORT_fmin SHORT_minimum - -NPY_NO_EXPORT void -SHORT_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -SHORT_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -SHORT_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -SHORT_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -SHORT_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -SHORT_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -SHORT_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -SHORT_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -SHORT_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -SHORT_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -SHORT_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -SHORT_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -SHORT_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -SHORT_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -SHORT_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -SHORT_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -SHORT_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -SHORT_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -SHORT_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -SHORT_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -SHORT_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -SHORT_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -SHORT_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -SHORT_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 87 - -#define USHORT_floor_divide USHORT_divide -#define USHORT_fmax USHORT_maximum -#define USHORT_fmin USHORT_minimum - -NPY_NO_EXPORT void -USHORT_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -USHORT_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -USHORT_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -USHORT_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -USHORT_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -USHORT_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -USHORT_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -USHORT_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -USHORT_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -USHORT_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -USHORT_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -USHORT_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -USHORT_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -USHORT_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -USHORT_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -USHORT_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -USHORT_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -USHORT_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -USHORT_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -USHORT_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -USHORT_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -USHORT_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -USHORT_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -USHORT_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -USHORT_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -USHORT_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -USHORT_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -USHORT_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -USHORT_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -NPY_NO_EXPORT void -USHORT_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -USHORT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -USHORT_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -SHORT_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -USHORT_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 81 - -#line 87 - -#define INT_floor_divide INT_divide -#define INT_fmax INT_maximum -#define INT_fmin INT_minimum - -NPY_NO_EXPORT void -INT_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -INT_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -INT_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -INT_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -INT_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -INT_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -INT_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -INT_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -INT_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -INT_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -INT_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -INT_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -INT_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -INT_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -INT_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -INT_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -INT_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -INT_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -INT_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -INT_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -INT_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -INT_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -INT_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -INT_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 87 - -#define UINT_floor_divide UINT_divide -#define UINT_fmax UINT_maximum -#define UINT_fmin UINT_minimum - -NPY_NO_EXPORT void -UINT_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -UINT_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -UINT_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -UINT_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UINT_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UINT_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UINT_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -UINT_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UINT_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UINT_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UINT_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UINT_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UINT_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UINT_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -UINT_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -UINT_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UINT_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UINT_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UINT_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UINT_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UINT_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UINT_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -UINT_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -UINT_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -UINT_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -UINT_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -UINT_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UINT_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UINT_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -NPY_NO_EXPORT void -UINT_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UINT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UINT_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -INT_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -UINT_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 81 - -#line 87 - -#define LONG_floor_divide LONG_divide -#define LONG_fmax LONG_maximum -#define LONG_fmin LONG_minimum - -NPY_NO_EXPORT void -LONG_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -LONG_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -LONG_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -LONG_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -LONG_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONG_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONG_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONG_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONG_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONG_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONG_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONG_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -LONG_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONG_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONG_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONG_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONG_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONG_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONG_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONG_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -LONG_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -LONG_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -LONG_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -LONG_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 87 - -#define ULONG_floor_divide ULONG_divide -#define ULONG_fmax ULONG_maximum -#define ULONG_fmin ULONG_minimum - -NPY_NO_EXPORT void -ULONG_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -ULONG_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -ULONG_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -ULONG_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONG_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONG_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONG_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -ULONG_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONG_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONG_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONG_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONG_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONG_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONG_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONG_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -ULONG_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONG_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONG_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONG_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONG_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONG_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONG_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONG_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -ULONG_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -ULONG_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -ULONG_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -ULONG_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONG_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONG_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -NPY_NO_EXPORT void -ULONG_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONG_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONG_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONG_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONG_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 81 - -#line 87 - -#define LONGLONG_floor_divide LONGLONG_divide -#define LONGLONG_fmax LONGLONG_maximum -#define LONGLONG_fmin LONGLONG_minimum - -NPY_NO_EXPORT void -LONGLONG_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -LONGLONG_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -LONGLONG_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -LONGLONG_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -LONGLONG_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONGLONG_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONGLONG_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONGLONG_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONGLONG_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONGLONG_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONGLONG_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -LONGLONG_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -LONGLONG_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONGLONG_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONGLONG_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONGLONG_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONGLONG_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONGLONG_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONGLONG_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -LONGLONG_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -LONGLONG_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -LONGLONG_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -LONGLONG_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -LONGLONG_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 87 - -#define ULONGLONG_floor_divide ULONGLONG_divide -#define ULONGLONG_fmax ULONGLONG_maximum -#define ULONGLONG_fmin ULONGLONG_minimum - -NPY_NO_EXPORT void -ULONGLONG_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -ULONGLONG_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -ULONGLONG_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -ULONGLONG_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONGLONG_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONGLONG_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONGLONG_invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 119 -NPY_NO_EXPORT void -ULONGLONG_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONGLONG_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONGLONG_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONGLONG_bitwise_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONGLONG_bitwise_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONGLONG_bitwise_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONGLONG_left_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 119 -NPY_NO_EXPORT void -ULONGLONG_right_shift(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -#line 129 -NPY_NO_EXPORT void -ULONGLONG_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONGLONG_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONGLONG_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONGLONG_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONGLONG_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONGLONG_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONGLONG_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 129 -NPY_NO_EXPORT void -ULONGLONG_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -ULONGLONG_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -ULONGLONG_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 140 -NPY_NO_EXPORT void -ULONGLONG_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -ULONGLONG_true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONGLONG_power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONGLONG_fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -NPY_NO_EXPORT void -ULONGLONG_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONGLONG_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONGLONG_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGLONG_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -ULONGLONG_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -/* - ***************************************************************************** - ** FLOAT LOOPS ** - ***************************************************************************** - */ - - -#line 194 - - -#line 201 -NPY_NO_EXPORT void -HALF_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -HALF_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -HALF_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -HALF_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 210 -NPY_NO_EXPORT void -HALF_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -HALF_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -HALF_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -HALF_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -HALF_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -HALF_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -HALF_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -HALF_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -HALF_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -HALF_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -HALF_isnan(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -HALF_isinf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -HALF_isfinite(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -HALF_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -HALF_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -HALF_nextafter(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -HALF_spacing(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 232 -NPY_NO_EXPORT void -HALF_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 232 -NPY_NO_EXPORT void -HALF_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 240 -NPY_NO_EXPORT void -HALF_fmax(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 240 -NPY_NO_EXPORT void -HALF_fmin(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -HALF_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -HALF_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -HALF_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -HALF_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - - -NPY_NO_EXPORT void -HALF_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -HALF_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -HALF_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -HALF_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -HALF_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -HALF_modf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#ifdef HAVE_FREXPF -NPY_NO_EXPORT void -HALF_frexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#endif - -#ifdef HAVE_LDEXPF -NPY_NO_EXPORT void -HALF_ldexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -NPY_NO_EXPORT void -HALF_ldexp_long(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#endif - -#define HALF_true_divide HALF_divide - - -#line 194 - - -#line 201 -NPY_NO_EXPORT void -FLOAT_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -FLOAT_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -FLOAT_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -FLOAT_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 210 -NPY_NO_EXPORT void -FLOAT_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -FLOAT_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -FLOAT_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -FLOAT_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -FLOAT_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -FLOAT_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -FLOAT_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -FLOAT_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -FLOAT_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -FLOAT_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -FLOAT_isnan(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -FLOAT_isinf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -FLOAT_isfinite(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -FLOAT_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -FLOAT_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -FLOAT_nextafter(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -FLOAT_spacing(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 232 -NPY_NO_EXPORT void -FLOAT_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 232 -NPY_NO_EXPORT void -FLOAT_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 240 -NPY_NO_EXPORT void -FLOAT_fmax(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 240 -NPY_NO_EXPORT void -FLOAT_fmin(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -FLOAT_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -FLOAT_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -FLOAT_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -FLOAT_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - - -NPY_NO_EXPORT void -FLOAT_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -FLOAT_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -FLOAT_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -FLOAT_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -FLOAT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -FLOAT_modf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#ifdef HAVE_FREXPF -NPY_NO_EXPORT void -FLOAT_frexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#endif - -#ifdef HAVE_LDEXPF -NPY_NO_EXPORT void -FLOAT_ldexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -NPY_NO_EXPORT void -FLOAT_ldexp_long(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#endif - -#define FLOAT_true_divide FLOAT_divide - - -#line 194 - - -#line 201 -NPY_NO_EXPORT void -DOUBLE_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -DOUBLE_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -DOUBLE_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -DOUBLE_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 210 -NPY_NO_EXPORT void -DOUBLE_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -DOUBLE_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -DOUBLE_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -DOUBLE_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -DOUBLE_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -DOUBLE_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -DOUBLE_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -DOUBLE_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -DOUBLE_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DOUBLE_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -DOUBLE_isnan(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -DOUBLE_isinf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -DOUBLE_isfinite(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -DOUBLE_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -DOUBLE_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -DOUBLE_nextafter(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -DOUBLE_spacing(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 232 -NPY_NO_EXPORT void -DOUBLE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 232 -NPY_NO_EXPORT void -DOUBLE_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 240 -NPY_NO_EXPORT void -DOUBLE_fmax(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 240 -NPY_NO_EXPORT void -DOUBLE_fmin(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -DOUBLE_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DOUBLE_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DOUBLE_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -DOUBLE_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - - -NPY_NO_EXPORT void -DOUBLE_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -DOUBLE_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DOUBLE_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DOUBLE_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -DOUBLE_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -DOUBLE_modf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#ifdef HAVE_FREXP -NPY_NO_EXPORT void -DOUBLE_frexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#endif - -#ifdef HAVE_LDEXP -NPY_NO_EXPORT void -DOUBLE_ldexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -NPY_NO_EXPORT void -DOUBLE_ldexp_long(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#endif - -#define DOUBLE_true_divide DOUBLE_divide - - -#line 194 - - -#line 201 -NPY_NO_EXPORT void -LONGDOUBLE_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -LONGDOUBLE_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -LONGDOUBLE_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 201 -NPY_NO_EXPORT void -LONGDOUBLE_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 210 -NPY_NO_EXPORT void -LONGDOUBLE_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -LONGDOUBLE_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -LONGDOUBLE_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -LONGDOUBLE_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -LONGDOUBLE_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -LONGDOUBLE_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -LONGDOUBLE_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 210 -NPY_NO_EXPORT void -LONGDOUBLE_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -LONGDOUBLE_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGDOUBLE_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -LONGDOUBLE_isnan(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -LONGDOUBLE_isinf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -LONGDOUBLE_isfinite(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -LONGDOUBLE_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -LONGDOUBLE_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -LONGDOUBLE_nextafter(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 224 -NPY_NO_EXPORT void -LONGDOUBLE_spacing(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 232 -NPY_NO_EXPORT void -LONGDOUBLE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 232 -NPY_NO_EXPORT void -LONGDOUBLE_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 240 -NPY_NO_EXPORT void -LONGDOUBLE_fmax(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 240 -NPY_NO_EXPORT void -LONGDOUBLE_fmin(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -LONGDOUBLE_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGDOUBLE_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGDOUBLE_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -LONGDOUBLE_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - - -NPY_NO_EXPORT void -LONGDOUBLE_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -LONGDOUBLE_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGDOUBLE_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -LONGDOUBLE_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -LONGDOUBLE_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -LONGDOUBLE_modf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#ifdef HAVE_FREXPL -NPY_NO_EXPORT void -LONGDOUBLE_frexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#endif - -#ifdef HAVE_LDEXPL -NPY_NO_EXPORT void -LONGDOUBLE_ldexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -NPY_NO_EXPORT void -LONGDOUBLE_ldexp_long(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#endif - -#define LONGDOUBLE_true_divide LONGDOUBLE_divide - - - - -/* - ***************************************************************************** - ** COMPLEX LOOPS ** - ***************************************************************************** - */ - -#define CGE(xr,xi,yr,yi) (xr > yr || (xr == yr && xi >= yi)); -#define CLE(xr,xi,yr,yi) (xr < yr || (xr == yr && xi <= yi)); -#define CGT(xr,xi,yr,yi) (xr > yr || (xr == yr && xi > yi)); -#define CLT(xr,xi,yr,yi) (xr < yr || (xr == yr && xi < yi)); -#define CEQ(xr,xi,yr,yi) (xr == yr && xi == yi); -#define CNE(xr,xi,yr,yi) (xr != yr || xi != yi); - -#line 314 - -#line 320 -NPY_NO_EXPORT void -CFLOAT_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 320 -NPY_NO_EXPORT void -CFLOAT_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -NPY_NO_EXPORT void -CFLOAT_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CFLOAT_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CFLOAT_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CFLOAT_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CFLOAT_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CFLOAT_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CFLOAT_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CFLOAT_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CFLOAT_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 347 -NPY_NO_EXPORT void -CFLOAT_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 347 -NPY_NO_EXPORT void -CFLOAT_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -CFLOAT_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CFLOAT_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#line 361 -NPY_NO_EXPORT void -CFLOAT_isnan(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 361 -NPY_NO_EXPORT void -CFLOAT_isinf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 361 -NPY_NO_EXPORT void -CFLOAT_isfinite(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -CFLOAT_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CFLOAT_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CFLOAT_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CFLOAT_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CFLOAT_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CFLOAT__arg(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CFLOAT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 390 -NPY_NO_EXPORT void -CFLOAT_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 390 -NPY_NO_EXPORT void -CFLOAT_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 398 -NPY_NO_EXPORT void -CFLOAT_fmax(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 398 -NPY_NO_EXPORT void -CFLOAT_fmin(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#define CFLOAT_true_divide CFLOAT_divide - - -#line 314 - -#line 320 -NPY_NO_EXPORT void -CDOUBLE_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 320 -NPY_NO_EXPORT void -CDOUBLE_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -NPY_NO_EXPORT void -CDOUBLE_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CDOUBLE_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CDOUBLE_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CDOUBLE_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CDOUBLE_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CDOUBLE_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CDOUBLE_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CDOUBLE_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CDOUBLE_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 347 -NPY_NO_EXPORT void -CDOUBLE_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 347 -NPY_NO_EXPORT void -CDOUBLE_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -CDOUBLE_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CDOUBLE_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#line 361 -NPY_NO_EXPORT void -CDOUBLE_isnan(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 361 -NPY_NO_EXPORT void -CDOUBLE_isinf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 361 -NPY_NO_EXPORT void -CDOUBLE_isfinite(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -CDOUBLE_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CDOUBLE_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CDOUBLE_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CDOUBLE_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CDOUBLE_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CDOUBLE__arg(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CDOUBLE_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 390 -NPY_NO_EXPORT void -CDOUBLE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 390 -NPY_NO_EXPORT void -CDOUBLE_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 398 -NPY_NO_EXPORT void -CDOUBLE_fmax(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 398 -NPY_NO_EXPORT void -CDOUBLE_fmin(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#define CDOUBLE_true_divide CDOUBLE_divide - - -#line 314 - -#line 320 -NPY_NO_EXPORT void -CLONGDOUBLE_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 320 -NPY_NO_EXPORT void -CLONGDOUBLE_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - - -NPY_NO_EXPORT void -CLONGDOUBLE_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CLONGDOUBLE_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CLONGDOUBLE_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CLONGDOUBLE_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CLONGDOUBLE_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CLONGDOUBLE_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CLONGDOUBLE_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CLONGDOUBLE_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 338 -NPY_NO_EXPORT void -CLONGDOUBLE_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 347 -NPY_NO_EXPORT void -CLONGDOUBLE_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 347 -NPY_NO_EXPORT void -CLONGDOUBLE_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -CLONGDOUBLE_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CLONGDOUBLE_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); -#line 361 -NPY_NO_EXPORT void -CLONGDOUBLE_isnan(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 361 -NPY_NO_EXPORT void -CLONGDOUBLE_isinf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 361 -NPY_NO_EXPORT void -CLONGDOUBLE_isfinite(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -CLONGDOUBLE_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CLONGDOUBLE_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CLONGDOUBLE_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)); - -NPY_NO_EXPORT void -CLONGDOUBLE_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CLONGDOUBLE_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CLONGDOUBLE__arg(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -CLONGDOUBLE_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 390 -NPY_NO_EXPORT void -CLONGDOUBLE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 390 -NPY_NO_EXPORT void -CLONGDOUBLE_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#line 398 -NPY_NO_EXPORT void -CLONGDOUBLE_fmax(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 398 -NPY_NO_EXPORT void -CLONGDOUBLE_fmin(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -#define CLONGDOUBLE_true_divide CLONGDOUBLE_divide - - - -#undef CGE -#undef CLE -#undef CGT -#undef CLT -#undef CEQ -#undef CNE - -/* - ***************************************************************************** - ** DATETIME LOOPS ** - ***************************************************************************** - */ - -#line 422 -#define DATETIME_fmax DATETIME_maximum -#define DATETIME_fmin DATETIME_minimum - -#line 422 -#define TIMEDELTA_fmax TIMEDELTA_maximum -#define TIMEDELTA_fmin TIMEDELTA_minimum - - -#line 431 -NPY_NO_EXPORT void -DATETIME_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_logical_and(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_logical_or(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_minimum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 431 -NPY_NO_EXPORT void -DATETIME_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -DATETIME_Mm_M_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DATETIME_mM_M_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DATETIME_Mm_M_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -DATETIME_MM_m_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_mm_m_add(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -NPY_NO_EXPORT void -TIMEDELTA_mm_m_subtract(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -/* - ***************************************************************************** - ** OBJECT LOOPS ** - ***************************************************************************** - */ - -#line 466 -NPY_NO_EXPORT void -OBJECT_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 466 -NPY_NO_EXPORT void -OBJECT_not_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 466 -NPY_NO_EXPORT void -OBJECT_greater(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 466 -NPY_NO_EXPORT void -OBJECT_greater_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 466 -NPY_NO_EXPORT void -OBJECT_less(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -#line 466 -NPY_NO_EXPORT void -OBJECT_less_equal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - - -NPY_NO_EXPORT void -OBJECT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)); - -/* - ***************************************************************************** - ** END LOOPS ** - ***************************************************************************** - */ - -#endif diff --git a/numpy-1.6.2/numpy/core/src/umath/ufunc_object.c b/numpy-1.6.2/numpy/core/src/umath/ufunc_object.c deleted file mode 100644 index 930c91ca1d..0000000000 --- a/numpy-1.6.2/numpy/core/src/umath/ufunc_object.c +++ /dev/null @@ -1,5110 +0,0 @@ -/* - * Python Universal Functions Object -- Math for all types, plus fast - * arrays math - * - * Full description - * - * This supports mathematical (and Boolean) functions on arrays and other python - * objects. Math on large arrays of basic C types is rather efficient. - * - * Travis E. Oliphant 2005, 2006 oliphant@ee.byu.edu (oliphant.travis@ieee.org) - * Brigham Young University - * - * based on the - * - * Original Implementation: - * Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu - * - * with inspiration and code from - * Numarray - * Space Science Telescope Institute - * J. Todd Miller - * Perry Greenfield - * Rick White - * - */ -#define _UMATHMODULE - -#include "Python.h" - -#include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#define NO_IMPORT_ARRAY -#endif - -#include "numpy/npy_3kcompat.h" - -#include "numpy/noprefix.h" -#include "numpy/ufuncobject.h" -#include "lowlevel_strided_loops.h" - -#include "ufunc_object.h" - -/********** PRINTF DEBUG TRACING **************/ -#define NPY_UF_DBG_TRACING 0 - -#if NPY_UF_DBG_TRACING -#define NPY_UF_DBG_PRINT(s) printf("%s", s) -#define NPY_UF_DBG_PRINT1(s, p1) printf(s, p1) -#define NPY_UF_DBG_PRINT2(s, p1, p2) printf(s, p1, p2) -#define NPY_UF_DBG_PRINT3(s, p1, p2, p3) printf(s, p1, p2, p3) -#else -#define NPY_UF_DBG_PRINT(s) -#define NPY_UF_DBG_PRINT1(s, p1) -#define NPY_UF_DBG_PRINT2(s, p1, p2) -#define NPY_UF_DBG_PRINT3(s, p1, p2, p3) -#endif -/**********************************************/ - - -/********************/ -#define USE_USE_DEFAULTS 1 -#define USE_NEW_ITERATOR_GENFUNC 1 -/********************/ - -/* ---------------------------------------------------------------- */ - -static int -_does_loop_use_arrays(void *data); - -/* - * fpstatus is the ufunc_formatted hardware status - * errmask is the handling mask specified by the user. - * errobj is a Python object with (string, callable object or None) - * or NULL - */ - -/* - * 2. for each of the flags - * determine whether to ignore, warn, raise error, or call Python function. - * If ignore, do nothing - * If warn, print a warning and continue - * If raise return an error - * If call, call a user-defined function with string - */ - -static int -_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first) -{ - PyObject *pyfunc, *ret, *args; - char *name = PyBytes_AS_STRING(PyTuple_GET_ITEM(errobj,0)); - char msg[100]; - ALLOW_C_API_DEF; - - ALLOW_C_API; - switch(method) { - case UFUNC_ERR_WARN: - PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { - goto fail; - } - break; - case UFUNC_ERR_RAISE: - PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s", - errtype, name); - goto fail; - case UFUNC_ERR_CALL: - pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { - PyErr_Format(PyExc_NameError, - "python callback specified for %s (in " \ - " %s) but no function found.", - errtype, name); - goto fail; - } - args = Py_BuildValue("NN", PyUString_FromString(errtype), - PyInt_FromLong((long) retstatus)); - if (args == NULL) { - goto fail; - } - ret = PyObject_CallObject(pyfunc, args); - Py_DECREF(args); - if (ret == NULL) { - goto fail; - } - Py_DECREF(ret); - break; - case UFUNC_ERR_PRINT: - if (*first) { - fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); - *first = 0; - } - break; - case UFUNC_ERR_LOG: - if (first) { - *first = 0; - pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { - PyErr_Format(PyExc_NameError, - "log specified for %s (in %s) but no " \ - "object with write method found.", - errtype, name); - goto fail; - } - PyOS_snprintf(msg, sizeof(msg), - "Warning: %s encountered in %s\n", errtype, name); - ret = PyObject_CallMethod(pyfunc, "write", "s", msg); - if (ret == NULL) { - goto fail; - } - Py_DECREF(ret); - } - break; - } - DISABLE_C_API; - return 0; - -fail: - DISABLE_C_API; - return -1; -} - - -/*UFUNC_API*/ -NPY_NO_EXPORT int -PyUFunc_getfperr(void) -{ - int retstatus; - UFUNC_CHECK_STATUS(retstatus); - return retstatus; -} - -#define HANDLEIT(NAME, str) {if (retstatus & UFUNC_FPE_##NAME) { \ - handle = errmask & UFUNC_MASK_##NAME; \ - if (handle && \ - _error_handler(handle >> UFUNC_SHIFT_##NAME, \ - errobj, str, retstatus, first) < 0) \ - return -1; \ - }} - -/*UFUNC_API*/ -NPY_NO_EXPORT int -PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int *first) -{ - int handle; - if (errmask && retstatus) { - HANDLEIT(DIVIDEBYZERO, "divide by zero"); - HANDLEIT(OVERFLOW, "overflow"); - HANDLEIT(UNDERFLOW, "underflow"); - HANDLEIT(INVALID, "invalid value"); - } - return 0; -} - -#undef HANDLEIT - - -/*UFUNC_API*/ -NPY_NO_EXPORT int -PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) -{ - int retstatus; - - /* 1. check hardware flag --- this is platform dependent code */ - retstatus = PyUFunc_getfperr(); - return PyUFunc_handlefperr(errmask, errobj, retstatus, first); -} - - -/* Checking the status flag clears it */ -/*UFUNC_API*/ -NPY_NO_EXPORT void -PyUFunc_clearfperr() -{ - PyUFunc_getfperr(); -} - - -#define NO_UFUNCLOOP 0 -#define ZERO_EL_REDUCELOOP 0 -#define ONE_UFUNCLOOP 1 -#define ONE_EL_REDUCELOOP 1 -#define NOBUFFER_UFUNCLOOP 2 -#define NOBUFFER_REDUCELOOP 2 -#define BUFFER_UFUNCLOOP 3 -#define BUFFER_REDUCELOOP 3 -#define SIGNATURE_NOBUFFER_UFUNCLOOP 4 - - -/* - * This function analyzes the input arguments - * and determines an appropriate __array_prepare__ function to call - * for the outputs. - * - * If an output argument is provided, then it is prepped - * with its own __array_prepare__ not with the one determined by - * the input arguments. - * - * if the provided output argument is already an ndarray, - * the prepping function is None (which means no prepping will - * be done --- not even PyArray_Return). - * - * A NULL is placed in output_prep for outputs that - * should just have PyArray_Return called. - */ -static void -_find_array_prepare(PyObject *args, PyObject *kwds, - PyObject **output_prep, int nin, int nout) -{ - Py_ssize_t nargs; - int i; - int np = 0; - PyObject *with_prep[NPY_MAXARGS], *preps[NPY_MAXARGS]; - PyObject *obj, *prep = NULL; - - /* If a 'subok' parameter is passed and isn't True, don't wrap */ - if (kwds != NULL && (obj = PyDict_GetItemString(kwds, "subok")) != NULL) { - if (obj != Py_True) { - for (i = 0; i < nout; i++) { - output_prep[i] = NULL; - } - return; - } - } - - nargs = PyTuple_GET_SIZE(args); - for (i = 0; i < nin; i++) { - obj = PyTuple_GET_ITEM(args, i); - if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) { - continue; - } - prep = PyObject_GetAttrString(obj, "__array_prepare__"); - if (prep) { - if (PyCallable_Check(prep)) { - with_prep[np] = obj; - preps[np] = prep; - ++np; - } - else { - Py_DECREF(prep); - prep = NULL; - } - } - else { - PyErr_Clear(); - } - } - if (np > 0) { - /* If we have some preps defined, find the one of highest priority */ - prep = preps[0]; - if (np > 1) { - double maxpriority = PyArray_GetPriority(with_prep[0], - PyArray_SUBTYPE_PRIORITY); - for (i = 1; i < np; ++i) { - double priority = PyArray_GetPriority(with_prep[i], - PyArray_SUBTYPE_PRIORITY); - if (priority > maxpriority) { - maxpriority = priority; - Py_DECREF(prep); - prep = preps[i]; - } - else { - Py_DECREF(preps[i]); - } - } - } - } - - /* - * Here prep is the prepping function determined from the - * input arrays (could be NULL). - * - * For all the output arrays decide what to do. - * - * 1) Use the prep function determined from the input arrays - * This is the default if the output array is not - * passed in. - * - * 2) Use the __array_prepare__ method of the output object. - * This is special cased for - * exact ndarray so that no PyArray_Return is - * done in that case. - */ - for (i = 0; i < nout; i++) { - int j = nin + i; - int incref = 1; - output_prep[i] = prep; - obj = NULL; - if (j < nargs) { - obj = PyTuple_GET_ITEM(args, j); - /* Output argument one may also be in a keyword argument */ - if (i == 0 && obj == Py_None && kwds != NULL) { - obj = PyDict_GetItemString(kwds, "out"); - } - } - /* Output argument one may also be in a keyword argument */ - else if (i == 0 && kwds != NULL) { - obj = PyDict_GetItemString(kwds, "out"); - } - - if (obj != Py_None && obj != NULL) { - if (PyArray_CheckExact(obj)) { - /* None signals to not call any wrapping */ - output_prep[i] = Py_None; - } - else { - PyObject *oprep = PyObject_GetAttrString(obj, - "__array_prepare__"); - incref = 0; - if (!(oprep) || !(PyCallable_Check(oprep))) { - Py_XDECREF(oprep); - oprep = prep; - incref = 1; - PyErr_Clear(); - } - output_prep[i] = oprep; - } - } - - if (incref) { - Py_XINCREF(output_prep[i]); - } - } - Py_XDECREF(prep); - return; -} - -#if USE_USE_DEFAULTS==1 -static int PyUFunc_NUM_NODEFAULTS = 0; -#endif -static PyObject *PyUFunc_PYVALS_NAME = NULL; - - -/* - * Extracts some values from the global pyvals tuple. - * ref - should hold the global tuple - * name - is the name of the ufunc (ufuncobj->name) - * bufsize - receives the buffer size to use - * errmask - receives the bitmask for error handling - * errobj - receives the python object to call with the error, - * if an error handling method is 'call' - */ -static int -_extract_pyvals(PyObject *ref, char *name, int *bufsize, - int *errmask, PyObject **errobj) -{ - PyObject *retval; - - *errobj = NULL; - if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) { - PyErr_Format(PyExc_TypeError, "%s must be a length 3 list.", - UFUNC_PYVALS_NAME); - return -1; - } - - *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0)); - if ((*bufsize == -1) && PyErr_Occurred()) { - return -1; - } - if ((*bufsize < PyArray_MIN_BUFSIZE) - || (*bufsize > PyArray_MAX_BUFSIZE) - || (*bufsize % 16 != 0)) { - PyErr_Format(PyExc_ValueError, - "buffer size (%d) is not in range " - "(%"INTP_FMT" - %"INTP_FMT") or not a multiple of 16", - *bufsize, (intp) PyArray_MIN_BUFSIZE, - (intp) PyArray_MAX_BUFSIZE); - return -1; - } - - *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1)); - if (*errmask < 0) { - if (PyErr_Occurred()) { - return -1; - } - PyErr_Format(PyExc_ValueError, - "invalid error mask (%d)", - *errmask); - return -1; - } - - retval = PyList_GET_ITEM(ref, 2); - if (retval != Py_None && !PyCallable_Check(retval)) { - PyObject *temp; - temp = PyObject_GetAttrString(retval, "write"); - if (temp == NULL || !PyCallable_Check(temp)) { - PyErr_SetString(PyExc_TypeError, - "python object must be callable or have " \ - "a callable write method"); - Py_XDECREF(temp); - return -1; - } - Py_DECREF(temp); - } - - *errobj = Py_BuildValue("NO", PyBytes_FromString(name), retval); - if (*errobj == NULL) { - return -1; - } - return 0; -} - - - -/*UFUNC_API - * - * On return, if errobj is populated with a non-NULL value, the caller - * owns a new reference to errobj. - */ -NPY_NO_EXPORT int -PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject **errobj) -{ - PyObject *thedict; - PyObject *ref = NULL; - -#if USE_USE_DEFAULTS==1 - if (PyUFunc_NUM_NODEFAULTS != 0) { -#endif - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = PyUString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - ref = PyDict_GetItem(thedict, PyUFunc_PYVALS_NAME); -#if USE_USE_DEFAULTS==1 - } -#endif - if (ref == NULL) { - *errmask = UFUNC_ERR_DEFAULT; - *errobj = Py_BuildValue("NO", PyBytes_FromString(name), Py_None); - *bufsize = PyArray_BUFSIZE; - return 0; - } - return _extract_pyvals(ref, name, bufsize, errmask, errobj); -} - -#define _GETATTR_(str, rstr) do {if (strcmp(name, #str) == 0) \ - return PyObject_HasAttrString(op, "__" #rstr "__");} while (0); - -static int -_has_reflected_op(PyObject *op, char *name) -{ - _GETATTR_(add, radd); - _GETATTR_(subtract, rsub); - _GETATTR_(multiply, rmul); - _GETATTR_(divide, rdiv); - _GETATTR_(true_divide, rtruediv); - _GETATTR_(floor_divide, rfloordiv); - _GETATTR_(remainder, rmod); - _GETATTR_(power, rpow); - _GETATTR_(left_shift, rlshift); - _GETATTR_(right_shift, rrshift); - _GETATTR_(bitwise_and, rand); - _GETATTR_(bitwise_xor, rxor); - _GETATTR_(bitwise_or, ror); - return 0; -} - -#undef _GETATTR_ - - -/* Return the position of next non-white-space char in the string */ -static int -_next_non_white_space(const char* str, int offset) -{ - int ret = offset; - while (str[ret] == ' ' || str[ret] == '\t') { - ret++; - } - return ret; -} - -static int -_is_alpha_underscore(char ch) -{ - return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_'; -} - -static int -_is_alnum_underscore(char ch) -{ - return _is_alpha_underscore(ch) || (ch >= '0' && ch <= '9'); -} - -/* - * Return the ending position of a variable name - */ -static int -_get_end_of_name(const char* str, int offset) -{ - int ret = offset; - while (_is_alnum_underscore(str[ret])) { - ret++; - } - return ret; -} - -/* - * Returns 1 if the dimension names pointed by s1 and s2 are the same, - * otherwise returns 0. - */ -static int -_is_same_name(const char* s1, const char* s2) -{ - while (_is_alnum_underscore(*s1) && _is_alnum_underscore(*s2)) { - if (*s1 != *s2) { - return 0; - } - s1++; - s2++; - } - return !_is_alnum_underscore(*s1) && !_is_alnum_underscore(*s2); -} - -/* - * Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, - * and core_signature in PyUFuncObject "self". Returns 0 unless an - * error occured. - */ -static int -_parse_signature(PyUFuncObject *self, const char *signature) -{ - size_t len; - char const **var_names; - int nd = 0; /* number of dimension of the current argument */ - int cur_arg = 0; /* index into core_num_dims&core_offsets */ - int cur_core_dim = 0; /* index into core_dim_ixs */ - int i = 0; - char *parse_error = NULL; - - if (signature == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "_parse_signature with NULL signature"); - return -1; - } - - len = strlen(signature); - self->core_signature = _pya_malloc(sizeof(char) * (len+1)); - if (self->core_signature) { - strcpy(self->core_signature, signature); - } - /* Allocate sufficient memory to store pointers to all dimension names */ - var_names = _pya_malloc(sizeof(char const*) * len); - if (var_names == NULL) { - PyErr_NoMemory(); - return -1; - } - - self->core_enabled = 1; - self->core_num_dim_ix = 0; - self->core_num_dims = _pya_malloc(sizeof(int) * self->nargs); - self->core_dim_ixs = _pya_malloc(sizeof(int) * len); /* shrink this later */ - self->core_offsets = _pya_malloc(sizeof(int) * self->nargs); - if (self->core_num_dims == NULL || self->core_dim_ixs == NULL - || self->core_offsets == NULL) { - PyErr_NoMemory(); - goto fail; - } - - i = _next_non_white_space(signature, 0); - while (signature[i] != '\0') { - /* loop over input/output arguments */ - if (cur_arg == self->nin) { - /* expect "->" */ - if (signature[i] != '-' || signature[i+1] != '>') { - parse_error = "expect '->'"; - goto fail; - } - i = _next_non_white_space(signature, i + 2); - } - - /* - * parse core dimensions of one argument, - * e.g. "()", "(i)", or "(i,j)" - */ - if (signature[i] != '(') { - parse_error = "expect '('"; - goto fail; - } - i = _next_non_white_space(signature, i + 1); - while (signature[i] != ')') { - /* loop over core dimensions */ - int j = 0; - if (!_is_alpha_underscore(signature[i])) { - parse_error = "expect dimension name"; - goto fail; - } - while (j < self->core_num_dim_ix) { - if (_is_same_name(signature+i, var_names[j])) { - break; - } - j++; - } - if (j >= self->core_num_dim_ix) { - var_names[j] = signature+i; - self->core_num_dim_ix++; - } - self->core_dim_ixs[cur_core_dim] = j; - cur_core_dim++; - nd++; - i = _get_end_of_name(signature, i); - i = _next_non_white_space(signature, i); - if (signature[i] != ',' && signature[i] != ')') { - parse_error = "expect ',' or ')'"; - goto fail; - } - if (signature[i] == ',') - { - i = _next_non_white_space(signature, i + 1); - if (signature[i] == ')') { - parse_error = "',' must not be followed by ')'"; - goto fail; - } - } - } - self->core_num_dims[cur_arg] = nd; - self->core_offsets[cur_arg] = cur_core_dim-nd; - cur_arg++; - nd = 0; - - i = _next_non_white_space(signature, i + 1); - if (cur_arg != self->nin && cur_arg != self->nargs) { - /* - * The list of input arguments (or output arguments) was - * only read partially - */ - if (signature[i] != ',') { - parse_error = "expect ','"; - goto fail; - } - i = _next_non_white_space(signature, i + 1); - } - } - if (cur_arg != self->nargs) { - parse_error = "incomplete signature: not all arguments found"; - goto fail; - } - self->core_dim_ixs = _pya_realloc(self->core_dim_ixs, - sizeof(int)*cur_core_dim); - /* check for trivial core-signature, e.g. "(),()->()" */ - if (cur_core_dim == 0) { - self->core_enabled = 0; - } - _pya_free((void*)var_names); - return 0; - -fail: - _pya_free((void*)var_names); - if (parse_error) { - char *buf = _pya_malloc(sizeof(char) * (len + 200)); - if (buf) { - sprintf(buf, "%s at position %d in \"%s\"", - parse_error, i, signature); - PyErr_SetString(PyExc_ValueError, signature); - _pya_free(buf); - } - else { - PyErr_NoMemory(); - } - } - return -1; -} - - -/********* GENERIC UFUNC USING ITERATOR *********/ - -/* - * Parses the positional and keyword arguments for a generic ufunc call. - * - * Note that if an error is returned, the caller must free the - * non-zero references in out_op. This - * function does not do its own clean-up. - */ -static int get_ufunc_arguments(PyUFuncObject *self, - PyObject *args, PyObject *kwds, - PyArrayObject **out_op, - NPY_ORDER *out_order, - NPY_CASTING *out_casting, - PyObject **out_extobj, - PyObject **out_typetup, - int *out_subok, - int *out_any_object) -{ - npy_intp i, nargs, nin = self->nin; - PyObject *obj, *context; - PyObject *str_key_obj = NULL; - char *ufunc_name; - - int any_flexible = 0, any_object = 0; - - ufunc_name = self->name ? self->name : ""; - - /* Check number of arguments */ - nargs = PyTuple_Size(args); - if ((nargs < nin) || (nargs > self->nargs)) { - PyErr_SetString(PyExc_ValueError, "invalid number of arguments"); - return -1; - } - - /* Get input arguments */ - for(i = 0; i < nin; ++i) { - obj = PyTuple_GET_ITEM(args, i); - if (!PyArray_Check(obj) && !PyArray_IsScalar(obj, Generic)) { - /* - * TODO: There should be a comment here explaining what - * context does. - */ - context = Py_BuildValue("OOi", self, args, i); - if (context == NULL) { - return -1; - } - } - else { - context = NULL; - } - out_op[i] = (PyArrayObject *)PyArray_FromAny(obj, - NULL, 0, 0, 0, context); - Py_XDECREF(context); - if (out_op[i] == NULL) { - return -1; - } - if (!any_flexible && - PyTypeNum_ISFLEXIBLE(PyArray_DESCR(out_op[i])->type_num)) { - any_flexible = 1; - } - if (!any_object && - PyTypeNum_ISOBJECT(PyArray_DESCR(out_op[i])->type_num)) { - any_object = 1; - } - } - - /* - * Indicate not implemented if there are flexible objects (structured - * type or string) but no object types. - * - * Not sure - adding this increased to 246 errors, 150 failures. - */ - if (any_flexible && !any_object) { - return -2; - - } - - /* Get positional output arguments */ - for (i = nin; i < nargs; ++i) { - obj = PyTuple_GET_ITEM(args, i); - /* Translate None to NULL */ - if (obj == Py_None) { - continue; - } - /* If it's an array, can use it */ - if (PyArray_Check(obj)) { - if (!PyArray_ISWRITEABLE(obj)) { - PyErr_SetString(PyExc_ValueError, - "return array is not writeable"); - return -1; - } - Py_INCREF(obj); - out_op[i] = (PyArrayObject *)obj; - } - else { - PyErr_SetString(PyExc_TypeError, - "return arrays must be " - "of ArrayType"); - return -1; - } - } - - /* - * Get keyword output and other arguments. - * Raise an error if anything else is present in the - * keyword dictionary. - */ - if (kwds != NULL) { - PyObject *key, *value; - Py_ssize_t pos = 0; - while (PyDict_Next(kwds, &pos, &key, &value)) { - Py_ssize_t length = 0; - char *str = NULL; - int bad_arg = 1; - -#if defined(NPY_PY3K) - Py_XDECREF(str_key_obj); - str_key_obj = PyUnicode_AsASCIIString(key); - if (str_key_obj != NULL) { - key = str_key_obj; - } -#endif - - if (PyBytes_AsStringAndSize(key, &str, &length) == -1) { - PyErr_SetString(PyExc_TypeError, "invalid keyword argument"); - goto fail; - } - - switch (str[0]) { - case 'c': - /* Provides a policy for allowed casting */ - if (strncmp(str,"casting",7) == 0) { - if (!PyArray_CastingConverter(value, out_casting)) { - goto fail; - } - bad_arg = 0; - } - break; - case 'e': - /* - * Overrides the global parameters buffer size, - * error mask, and error object - */ - if (strncmp(str,"extobj",6) == 0) { - *out_extobj = value; - bad_arg = 0; - } - break; - case 'o': - /* First output may be specified as a keyword parameter */ - if (strncmp(str,"out",3) == 0) { - if (out_op[nin] != NULL) { - PyErr_SetString(PyExc_ValueError, - "cannot specify 'out' as both a " - "positional and keyword argument"); - goto fail; - } - - if (PyArray_Check(value)) { - if (!PyArray_ISWRITEABLE(value)) { - PyErr_SetString(PyExc_ValueError, - "return array is not writeable"); - goto fail; - } - Py_INCREF(value); - out_op[nin] = (PyArrayObject *)value; - } - else { - PyErr_SetString(PyExc_TypeError, - "return arrays must be " - "of ArrayType"); - goto fail; - } - bad_arg = 0; - } - /* Allows the default output layout to be overridden */ - else if (strncmp(str,"order",5) == 0) { - if (!PyArray_OrderConverter(value, out_order)) { - goto fail; - } - bad_arg = 0; - } - break; - case 's': - /* Allows a specific function inner loop to be selected */ - if (strncmp(str,"sig",3) == 0) { - if (*out_typetup != NULL) { - PyErr_SetString(PyExc_RuntimeError, - "cannot specify both 'sig' and 'dtype'"); - goto fail; - } - *out_typetup = value; - Py_INCREF(value); - bad_arg = 0; - } - else if (strncmp(str,"subok",5) == 0) { - if (!PyBool_Check(value)) { - PyErr_SetString(PyExc_TypeError, - "'subok' must be a boolean"); - goto fail; - } - *out_subok = (value == Py_True); - bad_arg = 0; - } - break; - case 'd': - /* Another way to specify 'sig' */ - if (strncmp(str,"dtype",5) == 0) { - /* Allow this parameter to be None */ - PyArray_Descr *dtype; - if (!PyArray_DescrConverter2(value, &dtype)) { - goto fail; - } - if (dtype != NULL) { - if (*out_typetup != NULL) { - PyErr_SetString(PyExc_RuntimeError, - "cannot specify both 'sig' and 'dtype'"); - goto fail; - } - *out_typetup = Py_BuildValue("(N)", dtype); - } - bad_arg = 0; - } - } - - if (bad_arg) { - char *format = "'%s' is an invalid keyword to ufunc '%s'"; - PyErr_Format(PyExc_TypeError, format, str, ufunc_name); - goto fail; - } - } - } - - *out_any_object = any_object; - - Py_XDECREF(str_key_obj); - return 0; - -fail: - Py_XDECREF(str_key_obj); - return -1; -} - -static const char * -_casting_to_string(NPY_CASTING casting) -{ - switch (casting) { - case NPY_NO_CASTING: - return "no"; - case NPY_EQUIV_CASTING: - return "equiv"; - case NPY_SAFE_CASTING: - return "safe"; - case NPY_SAME_KIND_CASTING: - return "same_kind"; - case NPY_UNSAFE_CASTING: - return "unsafe"; - default: - return ""; - } -} - - -static int -ufunc_loop_matches(PyUFuncObject *self, - PyArrayObject **op, - NPY_CASTING input_casting, - NPY_CASTING output_casting, - int any_object, - int use_min_scalar, - int *types, - int *out_no_castable_output, - char *out_err_src_typecode, - char *out_err_dst_typecode) -{ - npy_intp i, nin = self->nin, nop = nin + self->nout; - - /* - * First check if all the inputs can be safely cast - * to the types for this function - */ - for (i = 0; i < nin; ++i) { - PyArray_Descr *tmp; - - /* - * If no inputs are objects and there are more than one - * loop, don't allow conversion to object. The rationale - * behind this is mostly performance. Except for custom - * ufuncs built with just one object-parametered inner loop, - * only the types that are supported are implemented. Trying - * the object version of logical_or on float arguments doesn't - * seem right. - */ - if (types[i] == NPY_OBJECT && !any_object && self->ntypes > 1) { - return 0; - } - - tmp = PyArray_DescrFromType(types[i]); - if (tmp == NULL) { - return -1; - } - -#if NPY_UF_DBG_TRACING - printf("Checking type for op %d, type %d: ", (int)i, (int)types[i]); - PyObject_Print((PyObject *)tmp, stdout, 0); - printf(", operand type: "); - PyObject_Print((PyObject *)PyArray_DESCR(op[i]), stdout, 0); - printf("\n"); -#endif - /* - * If all the inputs are scalars, use the regular - * promotion rules, not the special value-checking ones. - */ - if (!use_min_scalar) { - if (!PyArray_CanCastTypeTo(PyArray_DESCR(op[i]), tmp, - input_casting)) { - Py_DECREF(tmp); - return 0; - } - } - else { - if (!PyArray_CanCastArrayTo(op[i], tmp, input_casting)) { - Py_DECREF(tmp); - return 0; - } - } - Py_DECREF(tmp); - } - NPY_UF_DBG_PRINT("The inputs all worked\n"); - - /* - * If all the inputs were ok, then check casting back to the - * outputs. - */ - for (i = nin; i < nop; ++i) { - if (op[i] != NULL) { - PyArray_Descr *tmp = PyArray_DescrFromType(types[i]); - if (tmp == NULL) { - return -1; - } - if (!PyArray_CanCastTypeTo(tmp, PyArray_DESCR(op[i]), - output_casting)) { - if (!(*out_no_castable_output)) { - *out_no_castable_output = 1; - *out_err_src_typecode = tmp->type; - *out_err_dst_typecode = PyArray_DESCR(op[i])->type; - } - Py_DECREF(tmp); - return 0; - } - Py_DECREF(tmp); - } - } - NPY_UF_DBG_PRINT("The outputs all worked\n"); - - return 1; -} - -static int -set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op, - PyArray_Descr **out_dtype, - int *types, - npy_intp buffersize, int *out_trivial_loop_ok) -{ - npy_intp i, nin = self->nin, nop = nin + self->nout; - - *out_trivial_loop_ok = 1; - /* Fill the dtypes array */ - for (i = 0; i < nop; ++i) { - out_dtype[i] = PyArray_DescrFromType(types[i]); - if (out_dtype[i] == NULL) { - return -1; - } - /* - * If the dtype doesn't match, or the array isn't aligned, - * indicate that the trivial loop can't be done. - */ - if (*out_trivial_loop_ok && op[i] != NULL && - (!PyArray_ISALIGNED(op[i]) || - !PyArray_EquivTypes(out_dtype[i], PyArray_DESCR(op[i])) - )) { - /* - * If op[j] is a scalar or small one dimensional - * array input, make a copy to keep the opportunity - * for a trivial loop. - */ - if (i < nin && (PyArray_NDIM(op[i]) == 0 || - (PyArray_NDIM(op[i]) == 1 && - PyArray_DIM(op[i],0) <= buffersize))) { - PyArrayObject *tmp; - Py_INCREF(out_dtype[i]); - tmp = (PyArrayObject *) - PyArray_CastToType(op[i], out_dtype[i], 0); - if (tmp == NULL) { - return -1; - } - Py_DECREF(op[i]); - op[i] = tmp; - } - else { - *out_trivial_loop_ok = 0; - } - } - } - - return 0; -} - -/* - * Does a search through the arguments and the loops - */ -static int -find_ufunc_matching_userloop(PyUFuncObject *self, - PyArrayObject **op, - NPY_CASTING input_casting, - NPY_CASTING output_casting, - npy_intp buffersize, - int any_object, - int use_min_scalar, - PyArray_Descr **out_dtype, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_trivial_loop_ok, - int *out_no_castable_output, - char *out_err_src_typecode, - char *out_err_dst_typecode) -{ - npy_intp i, nin = self->nin; - PyUFunc_Loop1d *funcdata; - - /* Use this to try to avoid repeating the same userdef loop search */ - int last_userdef = -1; - - for (i = 0; i < nin; ++i) { - int type_num = PyArray_DESCR(op[i])->type_num; - if (type_num != last_userdef && PyTypeNum_ISUSERDEF(type_num)) { - PyObject *key, *obj; - - last_userdef = type_num; - - key = PyInt_FromLong(type_num); - if (key == NULL) { - return -1; - } - obj = PyDict_GetItem(self->userloops, key); - Py_DECREF(key); - if (obj == NULL) { - continue; - } - funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); - while (funcdata != NULL) { - int *types = funcdata->arg_types; - switch (ufunc_loop_matches(self, op, - input_casting, output_casting, - any_object, use_min_scalar, - types, - out_no_castable_output, out_err_src_typecode, - out_err_dst_typecode)) { - /* Error */ - case -1: - return -1; - /* Found a match */ - case 1: - set_ufunc_loop_data_types(self, op, out_dtype, types, - buffersize, out_trivial_loop_ok); - - /* Save the inner loop and its data */ - *out_innerloop = funcdata->func; - *out_innerloopdata = funcdata->data; - - NPY_UF_DBG_PRINT("Returning userdef inner " - "loop successfully\n"); - - return 0; - } - - funcdata = funcdata->next; - } - } - } - - /* Didn't find a match */ - return 0; -} - -/* - * Does a search through the arguments and the loops - */ -static int -find_ufunc_specified_userloop(PyUFuncObject *self, - int n_specified, - int *specified_types, - PyArrayObject **op, - NPY_CASTING casting, - npy_intp buffersize, - int any_object, - int use_min_scalar, - PyArray_Descr **out_dtype, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_trivial_loop_ok) -{ - npy_intp i, j, nin = self->nin, nop = nin + self->nout; - PyUFunc_Loop1d *funcdata; - - /* Use this to try to avoid repeating the same userdef loop search */ - int last_userdef = -1; - - int no_castable_output = 0; - char err_src_typecode = '-', err_dst_typecode = '-'; - - for (i = 0; i < nin; ++i) { - int type_num = PyArray_DESCR(op[i])->type_num; - if (type_num != last_userdef && PyTypeNum_ISUSERDEF(type_num)) { - PyObject *key, *obj; - - last_userdef = type_num; - - key = PyInt_FromLong(type_num); - if (key == NULL) { - return -1; - } - obj = PyDict_GetItem(self->userloops, key); - Py_DECREF(key); - if (obj == NULL) { - continue; - } - funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); - while (funcdata != NULL) { - int *types = funcdata->arg_types; - int matched = 1; - - if (n_specified == nop) { - for (j = 0; j < nop; ++j) { - if (types[j] != specified_types[j]) { - matched = 0; - break; - } - } - } else { - if (types[nin] != specified_types[0]) { - matched = 0; - } - } - if (!matched) { - continue; - } - - switch (ufunc_loop_matches(self, op, - casting, casting, - any_object, use_min_scalar, - types, - &no_castable_output, &err_src_typecode, - &err_dst_typecode)) { - /* It works */ - case 1: - set_ufunc_loop_data_types(self, op, out_dtype, types, - buffersize, out_trivial_loop_ok); - - /* Save the inner loop and its data */ - *out_innerloop = funcdata->func; - *out_innerloopdata = funcdata->data; - - NPY_UF_DBG_PRINT("Returning userdef inner " - "loop successfully\n"); - - return 0; - /* Didn't match */ - case 0: - PyErr_Format(PyExc_TypeError, - "found a user loop for ufunc '%s' " - "matching the type-tuple, " - "but the inputs and/or outputs could not be " - "cast according to the casting rule", - self->name ? self->name : "(unknown)"); - return -1; - /* Error */ - case -1: - return -1; - } - - funcdata = funcdata->next; - } - } - } - - /* Didn't find a match */ - return 0; -} - -/* - * Provides an ordering for the dtype 'kind' character codes, to help - * determine when to use the min_scalar_type function. This groups - * 'kind' into boolean, integer, floating point, and everything else. - */ - -static int -dtype_kind_to_simplified_ordering(char kind) -{ - switch (kind) { - /* Boolean kind */ - case 'b': - return 0; - /* Unsigned int kind */ - case 'u': - /* Signed int kind */ - case 'i': - return 1; - /* Float kind */ - case 'f': - /* Complex kind */ - case 'c': - return 2; - /* Anything else */ - default: - return 3; - } -} - -static int -should_use_min_scalar(PyArrayObject **op, int nop) -{ - int i, use_min_scalar, kind; - int all_scalars = 1, max_scalar_kind = -1, max_array_kind = -1; - - /* - * Determine if there are any scalars, and if so, whether - * the maximum "kind" of the scalars surpasses the maximum - * "kind" of the arrays - */ - use_min_scalar = 0; - if (nop > 1) { - for(i = 0; i < nop; ++i) { - kind = dtype_kind_to_simplified_ordering( - PyArray_DESCR(op[i])->kind); - if (PyArray_NDIM(op[i]) == 0) { - if (kind > max_scalar_kind) { - max_scalar_kind = kind; - } - } - else { - all_scalars = 0; - if (kind > max_array_kind) { - max_array_kind = kind; - } - - } - } - - /* Indicate whether to use the min_scalar_type function */ - if (!all_scalars && max_array_kind >= max_scalar_kind) { - use_min_scalar = 1; - } - } - - return use_min_scalar; -} - -/* - * Does a linear search for the best inner loop of the ufunc. - * When op[i] is a scalar or a one dimensional array smaller than - * the buffersize, and needs a dtype conversion, this function - * may substitute op[i] with a version cast to the correct type. This way, - * the later trivial loop detection has a higher chance of being triggered. - * - * Note that if an error is returned, the caller must free the non-zero - * references in out_dtype. This function does not do its own clean-up. - */ -static int -find_best_ufunc_inner_loop(PyUFuncObject *self, - PyArrayObject **op, - NPY_CASTING input_casting, - NPY_CASTING output_casting, - npy_intp buffersize, - int any_object, - PyArray_Descr **out_dtype, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_trivial_loop_ok) -{ - npy_intp i, j, nin = self->nin, nop = nin + self->nout; - int types[NPY_MAXARGS]; - char *ufunc_name; - int no_castable_output, use_min_scalar; - - /* For making a better error message on coercion error */ - char err_dst_typecode = '-', err_src_typecode = '-'; - - ufunc_name = self->name ? self->name : "(unknown)"; - - use_min_scalar = should_use_min_scalar(op, nin); - - /* If the ufunc has userloops, search for them. */ - if (self->userloops) { - switch (find_ufunc_matching_userloop(self, op, - input_casting, output_casting, - buffersize, any_object, use_min_scalar, - out_dtype, out_innerloop, out_innerloopdata, - out_trivial_loop_ok, - &no_castable_output, &err_src_typecode, - &err_dst_typecode)) { - /* Error */ - case -1: - return -1; - /* A loop was found */ - case 1: - return 0; - } - } - - /* - * Determine the UFunc loop. This could in general be *much* faster, - * and a better way to implement it might be for the ufunc to - * provide a function which gives back the result type and inner - * loop function. - * - * A default fast mechanism could be provided for functions which - * follow the most typical pattern, when all functions have signatures - * "xx...x -> x" for some built-in data type x, as follows. - * - Use PyArray_ResultType to get the output type - * - Look up the inner loop in a table based on the output type_num - * - * The method for finding the loop in the previous code did not - * appear consistent (as noted by some asymmetry in the generated - * coercion tables for np.add). - */ - no_castable_output = 0; - for (i = 0; i < self->ntypes; ++i) { - char *orig_types = self->types + i*self->nargs; - - /* Copy the types into an int array for matching */ - for (j = 0; j < nop; ++j) { - types[j] = orig_types[j]; - } - - NPY_UF_DBG_PRINT1("Trying function loop %d\n", (int)i); - switch (ufunc_loop_matches(self, op, - input_casting, output_casting, - any_object, use_min_scalar, - types, - &no_castable_output, &err_src_typecode, - &err_dst_typecode)) { - /* Error */ - case -1: - return -1; - /* Found a match */ - case 1: - set_ufunc_loop_data_types(self, op, out_dtype, types, - buffersize, out_trivial_loop_ok); - - /* Save the inner loop and its data */ - *out_innerloop = self->functions[i]; - *out_innerloopdata = self->data[i]; - - NPY_UF_DBG_PRINT("Returning inner loop successfully\n"); - - return 0; - } - - } - - /* If no function was found, throw an error */ - NPY_UF_DBG_PRINT("No loop was found\n"); - if (no_castable_output) { - PyErr_Format(PyExc_TypeError, - "ufunc '%s' output (typecode '%c') could not be coerced to " - "provided output parameter (typecode '%c') according " - "to the casting rule '%s'", - ufunc_name, err_src_typecode, err_dst_typecode, - _casting_to_string(output_casting)); - } - else { - /* - * TODO: We should try again if the casting rule is same_kind - * or unsafe, and look for a function more liberally. - */ - PyErr_Format(PyExc_TypeError, - "ufunc '%s' not supported for the input types, and the " - "inputs could not be safely coerced to any supported " - "types according to the casting rule '%s'", - ufunc_name, - _casting_to_string(input_casting)); - } - - return -1; -} - -/* - * Does a linear search for the inner loop of the ufunc specified by type_tup. - * When op[i] is a scalar or a one dimensional array smaller than - * the buffersize, and needs a dtype conversion, this function - * may substitute op[i] with a version cast to the correct type. This way, - * the later trivial loop detection has a higher chance of being triggered. - * - * Note that if an error is returned, the caller must free the non-zero - * references in out_dtype. This function does not do its own clean-up. - */ -static int -find_specified_ufunc_inner_loop(PyUFuncObject *self, - PyObject *type_tup, - PyArrayObject **op, - NPY_CASTING casting, - npy_intp buffersize, - int any_object, - PyArray_Descr **out_dtype, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_trivial_loop_ok) -{ - npy_intp i, j, n, nin = self->nin, nop = nin + self->nout; - int n_specified = 0; - int specified_types[NPY_MAXARGS], types[NPY_MAXARGS]; - char *ufunc_name; - int no_castable_output, use_min_scalar; - - /* For making a better error message on coercion error */ - char err_dst_typecode = '-', err_src_typecode = '-'; - - ufunc_name = self->name ? self->name : "(unknown)"; - - use_min_scalar = should_use_min_scalar(op, nin); - - /* Fill in specified_types from the tuple or string */ - if (PyTuple_Check(type_tup)) { - n = PyTuple_GET_SIZE(type_tup); - if (n != 1 && n != nop) { - PyErr_Format(PyExc_ValueError, - "a type-tuple must be specified " \ - "of length 1 or %d for ufunc '%s'", (int)nop, - self->name ? self->name : "(unknown)"); - return -1; - } - - for (i = 0; i < n; ++i) { - PyArray_Descr *dtype = NULL; - if (!PyArray_DescrConverter(PyTuple_GET_ITEM(type_tup, i), - &dtype)) { - return -1; - } - specified_types[i] = dtype->type_num; - Py_DECREF(dtype); - } - - n_specified = n; - } - else if (PyBytes_Check(type_tup) || PyUnicode_Check(type_tup)) { - Py_ssize_t length; - char *str; - PyObject *str_obj = NULL; - - if (PyUnicode_Check(type_tup)) { - str_obj = PyUnicode_AsASCIIString(type_tup); - if (str_obj == NULL) { - return -1; - } - type_tup = str_obj; - } - - if (!PyBytes_AsStringAndSize(type_tup, &str, &length) < 0) { - Py_XDECREF(str_obj); - return -1; - } - if (length != 1 && (length != nop + 2 || - str[nin] != '-' || str[nin+1] != '>')) { - PyErr_Format(PyExc_ValueError, - "a type-string for %s, " \ - "requires 1 typecode, or " - "%d typecode(s) before " \ - "and %d after the -> sign", - self->name ? self->name : "(unknown)", - self->nin, self->nout); - Py_XDECREF(str_obj); - return -1; - } - if (length == 1) { - PyArray_Descr *dtype; - n_specified = 1; - dtype = PyArray_DescrFromType(str[0]); - if (dtype == NULL) { - Py_XDECREF(str_obj); - return -1; - } - NPY_UF_DBG_PRINT2("signature character '%c', type num %d\n", - str[0], dtype->type_num); - specified_types[0] = dtype->type_num; - Py_DECREF(dtype); - } - else { - PyArray_Descr *dtype; - n_specified = (int)nop; - - for (i = 0; i < nop; ++i) { - npy_intp istr = i < nin ? i : i+2; - - dtype = PyArray_DescrFromType(str[istr]); - if (dtype == NULL) { - Py_XDECREF(str_obj); - return -1; - } - NPY_UF_DBG_PRINT2("signature character '%c', type num %d\n", - str[istr], dtype->type_num); - specified_types[i] = dtype->type_num; - Py_DECREF(dtype); - } - } - Py_XDECREF(str_obj); - } - - /* If the ufunc has userloops, search for them. */ - if (self->userloops) { - NPY_UF_DBG_PRINT("Searching user loops for specified sig\n"); - switch (find_ufunc_specified_userloop(self, - n_specified, specified_types, - op, casting, - buffersize, any_object, use_min_scalar, - out_dtype, out_innerloop, out_innerloopdata, - out_trivial_loop_ok)) { - /* Error */ - case -1: - return -1; - /* Found matching loop */ - case 1: - return 0; - } - } - - NPY_UF_DBG_PRINT("Searching loops for specified sig\n"); - for (i = 0; i < self->ntypes; ++i) { - char *orig_types = self->types + i*self->nargs; - int matched = 1; - - NPY_UF_DBG_PRINT1("Trying function loop %d\n", (int)i); - - /* Copy the types into an int array for matching */ - for (j = 0; j < nop; ++j) { - types[j] = orig_types[j]; - } - - if (n_specified == nop) { - for (j = 0; j < nop; ++j) { - if (types[j] != specified_types[j]) { - matched = 0; - break; - } - } - } else { - NPY_UF_DBG_PRINT2("Specified type: %d, first output type: %d\n", - specified_types[0], types[nin]); - if (types[nin] != specified_types[0]) { - matched = 0; - } - } - if (!matched) { - continue; - } - - NPY_UF_DBG_PRINT("It matches, confirming type casting\n"); - switch (ufunc_loop_matches(self, op, - casting, casting, - any_object, use_min_scalar, - types, - &no_castable_output, &err_src_typecode, - &err_dst_typecode)) { - /* Error */ - case -1: - return -1; - /* It worked */ - case 1: - set_ufunc_loop_data_types(self, op, out_dtype, types, - buffersize, out_trivial_loop_ok); - - /* Save the inner loop and its data */ - *out_innerloop = self->functions[i]; - *out_innerloopdata = self->data[i]; - - NPY_UF_DBG_PRINT("Returning specified inner loop successfully\n"); - - return 0; - /* Didn't work */ - case 0: - PyErr_Format(PyExc_TypeError, - "found a loop for ufunc '%s' " - "matching the type-tuple, " - "but the inputs and/or outputs could not be " - "cast according to the casting rule", - ufunc_name); - return -1; - } - - } - - /* If no function was found, throw an error */ - NPY_UF_DBG_PRINT("No specified loop was found\n"); - - PyErr_Format(PyExc_TypeError, - "No loop matching the specified signature was found " - "for ufunc %s", ufunc_name); - - return -1; -} - -static void -trivial_two_operand_loop(PyArrayObject **op, - PyUFuncGenericFunction innerloop, - void *innerloopdata) -{ - char *data[2]; - npy_intp count[2], stride[2]; - int needs_api; - NPY_BEGIN_THREADS_DEF; - - needs_api = PyDataType_REFCHK(PyArray_DESCR(op[0])) || - PyDataType_REFCHK(PyArray_DESCR(op[1])); - - PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(op[0], op[1], - count[0], - data[0], data[1], - stride[0], stride[1]); - count[1] = count[0]; - NPY_UF_DBG_PRINT1("two operand loop count %d\n", (int)count[0]); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - innerloop(data, count, stride, innerloopdata); - - if (!needs_api) { - NPY_END_THREADS; - } -} - -static void -trivial_three_operand_loop(PyArrayObject **op, - PyUFuncGenericFunction innerloop, - void *innerloopdata) -{ - char *data[3]; - npy_intp count[3], stride[3]; - int needs_api; - NPY_BEGIN_THREADS_DEF; - - needs_api = PyDataType_REFCHK(PyArray_DESCR(op[0])) || - PyDataType_REFCHK(PyArray_DESCR(op[1])) || - PyDataType_REFCHK(PyArray_DESCR(op[2])); - - PyArray_PREPARE_TRIVIAL_TRIPLE_ITERATION(op[0], op[1], op[2], - count[0], - data[0], data[1], data[2], - stride[0], stride[1], stride[2]); - count[1] = count[0]; - count[2] = count[0]; - NPY_UF_DBG_PRINT1("three operand loop count %d\n", (int)count[0]); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - innerloop(data, count, stride, innerloopdata); - - if (!needs_api) { - NPY_END_THREADS; - } -} - -/* - * Calls the given __array_prepare__ function on the operand *op, - * substituting it in place if a new array is returned and matches - * the old one. - * - * This requires that the dimensions, strides and data type remain - * exactly the same, which may be more strict than before. - */ -static int -prepare_ufunc_output(PyUFuncObject *self, - PyArrayObject **op, - PyObject *arr_prep, - PyObject *arr_prep_args, - int i) -{ - if (arr_prep != NULL && arr_prep != Py_None) { - PyObject *res; - - res = PyObject_CallFunction(arr_prep, "O(OOi)", - *op, self, arr_prep_args, i); - if ((res == NULL) || (res == Py_None) || !PyArray_Check(res)) { - if (!PyErr_Occurred()){ - PyErr_SetString(PyExc_TypeError, - "__array_prepare__ must return an " - "ndarray or subclass thereof"); - } - Py_XDECREF(res); - return -1; - } - - /* If the same object was returned, nothing to do */ - if (res == (PyObject *)*op) { - Py_DECREF(res); - } - /* If the result doesn't match, throw an error */ - else if (PyArray_NDIM(res) != PyArray_NDIM(*op) || - !PyArray_CompareLists(PyArray_DIMS(res), - PyArray_DIMS(*op), - PyArray_NDIM(res)) || - !PyArray_CompareLists(PyArray_STRIDES(res), - PyArray_STRIDES(*op), - PyArray_NDIM(res)) || - !PyArray_EquivTypes(PyArray_DESCR(res), - PyArray_DESCR(*op))) { - PyErr_SetString(PyExc_TypeError, - "__array_prepare__ must return an " - "ndarray or subclass thereof which is " - "otherwise identical to its input"); - Py_DECREF(res); - return -1; - } - /* Replace the op value */ - else { - Py_DECREF(*op); - *op = (PyArrayObject *)res; - } - } - - return 0; -} - -static int -iterator_loop(PyUFuncObject *self, - PyArrayObject **op, - PyArray_Descr **dtype, - NPY_ORDER order, - npy_intp buffersize, - PyObject **arr_prep, - PyObject *arr_prep_args, - PyUFuncGenericFunction innerloop, - void *innerloopdata) -{ - npy_intp i, nin = self->nin, nout = self->nout; - npy_intp nop = nin + nout; - npy_uint32 op_flags[NPY_MAXARGS]; - NpyIter *iter; - char *baseptrs[NPY_MAXARGS]; - int needs_api; - - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp *stride; - npy_intp *count_ptr; - - PyArrayObject **op_it; - - NPY_BEGIN_THREADS_DEF; - - /* Set up the flags */ - for (i = 0; i < nin; ++i) { - op_flags[i] = NPY_ITER_READONLY| - NPY_ITER_ALIGNED; - } - for (i = nin; i < nop; ++i) { - op_flags[i] = NPY_ITER_WRITEONLY| - NPY_ITER_ALIGNED| - NPY_ITER_ALLOCATE| - NPY_ITER_NO_BROADCAST| - NPY_ITER_NO_SUBTYPE; - } - - /* - * Allocate the iterator. Because the types of the inputs - * were already checked, we use the casting rule 'unsafe' which - * is faster to calculate. - */ - iter = NpyIter_AdvancedNew(nop, op, - NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_REFS_OK| - NPY_ITER_ZEROSIZE_OK| - NPY_ITER_BUFFERED| - NPY_ITER_GROWINNER| - NPY_ITER_DELAY_BUFALLOC, - order, NPY_UNSAFE_CASTING, - op_flags, dtype, - 0, NULL, NULL, buffersize); - if (iter == NULL) { - return -1; - } - - needs_api = NpyIter_IterationNeedsAPI(iter); - - /* Copy any allocated outputs */ - op_it = NpyIter_GetOperandArray(iter); - for (i = nin; i < nop; ++i) { - if (op[i] == NULL) { - op[i] = op_it[i]; - Py_INCREF(op[i]); - } - } - - /* Call the __array_prepare__ functions where necessary */ - for (i = 0; i < nout; ++i) { - if (prepare_ufunc_output(self, &op[nin+i], - arr_prep[i], arr_prep_args, i) < 0) { - NpyIter_Deallocate(iter); - return -1; - } - } - - /* Only do the loop if the iteration size is non-zero */ - if (NpyIter_GetIterSize(iter) != 0) { - - /* Reset the iterator with the base pointers from the wrapped outputs */ - for (i = 0; i < nin; ++i) { - baseptrs[i] = PyArray_BYTES(op_it[i]); - } - for (i = nin; i < nop; ++i) { - baseptrs[i] = PyArray_BYTES(op[i]); - } - if (NpyIter_ResetBasePointers(iter, baseptrs, NULL) != NPY_SUCCEED) { - NpyIter_Deallocate(iter); - return -1; - } - - /* Get the variables needed for the loop */ - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(iter); - return -1; - } - dataptr = NpyIter_GetDataPtrArray(iter); - stride = NpyIter_GetInnerStrideArray(iter); - count_ptr = NpyIter_GetInnerLoopSizePtr(iter); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - /* Execute the loop */ - do { - NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)*count_ptr); - innerloop(dataptr, count_ptr, stride, innerloopdata); - } while (iternext(iter)); - - if (!needs_api) { - NPY_END_THREADS; - } - } - - NpyIter_Deallocate(iter); - return 0; -} - -/* - * trivial_loop_ok - 1 if no alignment, data conversion, etc required - * nin - number of inputs - * nout - number of outputs - * op - the operands (nin + nout of them) - * order - the loop execution order/output memory order - * buffersize - how big of a buffer to use - * arr_prep - the __array_prepare__ functions for the outputs - * innerloop - the inner loop function - * innerloopdata - data to pass to the inner loop - */ -static int -execute_ufunc_loop(PyUFuncObject *self, - int trivial_loop_ok, - PyArrayObject **op, - PyArray_Descr **dtype, - NPY_ORDER order, - npy_intp buffersize, - PyObject **arr_prep, - PyObject *arr_prep_args, - PyUFuncGenericFunction innerloop, - void *innerloopdata) -{ - npy_intp nin = self->nin, nout = self->nout; - - /* First check for the trivial cases that don't need an iterator */ - if (trivial_loop_ok) { - if (nin == 1 && nout == 1) { - if (op[1] == NULL && - (order == NPY_ANYORDER || order == NPY_KEEPORDER) && - PyArray_TRIVIALLY_ITERABLE(op[0])) { - Py_INCREF(dtype[1]); - op[1] = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - dtype[1], - PyArray_NDIM(op[0]), - PyArray_DIMS(op[0]), - NULL, NULL, - PyArray_ISFORTRAN(op[0]) ? NPY_F_CONTIGUOUS : 0, - NULL); - - /* Call the __prepare_array__ if necessary */ - if (prepare_ufunc_output(self, &op[1], - arr_prep[0], arr_prep_args, 0) < 0) { - return -1; - } - - NPY_UF_DBG_PRINT("trivial 1 input with allocated output\n"); - trivial_two_operand_loop(op, innerloop, innerloopdata); - - return 0; - } - else if (op[1] != NULL && - PyArray_NDIM(op[1]) >= PyArray_NDIM(op[0]) && - PyArray_TRIVIALLY_ITERABLE_PAIR(op[0], op[1])) { - - /* Call the __prepare_array__ if necessary */ - if (prepare_ufunc_output(self, &op[1], - arr_prep[0], arr_prep_args, 0) < 0) { - return -1; - } - - NPY_UF_DBG_PRINT("trivial 1 input\n"); - trivial_two_operand_loop(op, innerloop, innerloopdata); - - return 0; - } - } - else if (nin == 2 && nout == 1) { - if (op[2] == NULL && - (order == NPY_ANYORDER || order == NPY_KEEPORDER) && - PyArray_TRIVIALLY_ITERABLE_PAIR(op[0], op[1])) { - PyArrayObject *tmp; - /* - * Have to choose the input with more dimensions to clone, as - * one of them could be a scalar. - */ - if (PyArray_NDIM(op[0]) >= PyArray_NDIM(op[1])) { - tmp = op[0]; - } - else { - tmp = op[1]; - } - Py_INCREF(dtype[2]); - op[2] = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - dtype[2], - PyArray_NDIM(tmp), - PyArray_DIMS(tmp), - NULL, NULL, - PyArray_ISFORTRAN(tmp) ? NPY_F_CONTIGUOUS : 0, - NULL); - - /* Call the __prepare_array__ if necessary */ - if (prepare_ufunc_output(self, &op[2], - arr_prep[0], arr_prep_args, 0) < 0) { - return -1; - } - - NPY_UF_DBG_PRINT("trivial 2 input with allocated output\n"); - trivial_three_operand_loop(op, innerloop, innerloopdata); - - return 0; - } - else if (op[2] != NULL && - PyArray_NDIM(op[2]) >= PyArray_NDIM(op[0]) && - PyArray_NDIM(op[2]) >= PyArray_NDIM(op[1]) && - PyArray_TRIVIALLY_ITERABLE_TRIPLE(op[0], op[1], op[2])) { - - /* Call the __prepare_array__ if necessary */ - if (prepare_ufunc_output(self, &op[2], - arr_prep[0], arr_prep_args, 0) < 0) { - return -1; - } - - NPY_UF_DBG_PRINT("trivial 2 input\n"); - trivial_three_operand_loop(op, innerloop, innerloopdata); - - return 0; - } - } - } - - /* - * If no trivial loop matched, an iterator is required to - * resolve broadcasting, etc - */ - - NPY_UF_DBG_PRINT("iterator loop\n"); - if (iterator_loop(self, op, dtype, order, - buffersize, arr_prep, arr_prep_args, - innerloop, innerloopdata) < 0) { - return -1; - } - - return 0; -} - -static PyObject * -make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds) -{ - PyObject *out = kwds ? PyDict_GetItemString(kwds, "out") : NULL; - PyObject *arr_prep_args; - - if (out == NULL) { - Py_INCREF(args); - return args; - } - else { - npy_intp i, nargs = PyTuple_GET_SIZE(args), n; - n = nargs; - if (n < nin + 1) { - n = nin + 1; - } - arr_prep_args = PyTuple_New(n); - if (arr_prep_args == NULL) { - return NULL; - } - /* Copy the tuple, but set the nin-th item to the keyword arg */ - for (i = 0; i < nin; ++i) { - PyObject *item = PyTuple_GET_ITEM(args, i); - Py_INCREF(item); - PyTuple_SET_ITEM(arr_prep_args, i, item); - } - Py_INCREF(out); - PyTuple_SET_ITEM(arr_prep_args, nin, out); - for (i = nin+1; i < n; ++i) { - PyObject *item = PyTuple_GET_ITEM(args, i); - Py_INCREF(item); - PyTuple_SET_ITEM(arr_prep_args, i, item); - } - - return arr_prep_args; - } -} - -static int -PyUFunc_GeneralizedFunction(PyUFuncObject *self, - PyObject *args, PyObject *kwds, - PyArrayObject **op) -{ - int nin, nout; - int i, idim, nop; - char *ufunc_name; - int retval = -1, any_object = 0, subok = 1; - NPY_CASTING input_casting; - - PyArray_Descr *dtype[NPY_MAXARGS]; - - /* Use remapped axes for generalized ufunc */ - int broadcast_ndim, op_ndim; - int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS]; - int *op_axes[NPY_MAXARGS]; - - npy_uint32 op_flags[NPY_MAXARGS]; - - NpyIter *iter = NULL; - - /* These parameters come from extobj= or from a TLS global */ - int buffersize = 0, errormask = 0; - PyObject *errobj = NULL; - int first_error = 1; - - /* The selected inner loop */ - PyUFuncGenericFunction innerloop = NULL; - void *innerloopdata = NULL; - /* The dimensions which get passed to the inner loop */ - npy_intp inner_dimensions[NPY_MAXDIMS+1]; - /* The strides which get passed to the inner loop */ - npy_intp *inner_strides = NULL; - - npy_intp *inner_strides_tmp, *ax_strides_tmp[NPY_MAXDIMS]; - int core_dim_ixs_size, *core_dim_ixs; - - /* The __array_prepare__ function to call for each output */ - PyObject *arr_prep[NPY_MAXARGS]; - /* - * This is either args, or args with the out= parameter from - * kwds added appropriately. - */ - PyObject *arr_prep_args = NULL; - - int trivial_loop_ok = 0; - - NPY_ORDER order = NPY_KEEPORDER; - /* - * Many things in NumPy do unsafe casting (doing int += float, etc). - * The strictness should probably become a state parameter, similar - * to the seterr/geterr. - */ - NPY_CASTING casting = NPY_UNSAFE_CASTING; - /* When provided, extobj and typetup contain borrowed references */ - PyObject *extobj = NULL, *type_tup = NULL; - - if (self == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return -1; - } - - nin = self->nin; - nout = self->nout; - nop = nin + nout; - - ufunc_name = self->name ? self->name : ""; - - NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name); - - /* Initialize all the operands and dtypes to NULL */ - for (i = 0; i < nop; ++i) { - op[i] = NULL; - dtype[i] = NULL; - arr_prep[i] = NULL; - } - - NPY_UF_DBG_PRINT("Getting arguments\n"); - - /* Get all the arguments */ - retval = get_ufunc_arguments(self, args, kwds, - op, &order, &casting, &extobj, &type_tup, &subok, &any_object); - if (retval < 0) { - goto fail; - } - - /* Figure out the number of dimensions needed by the iterator */ - broadcast_ndim = 0; - for (i = 0; i < nin; ++i) { - int n = PyArray_NDIM(op[i]) - self->core_num_dims[i]; - if (n > broadcast_ndim) { - broadcast_ndim = n; - } - } - op_ndim = broadcast_ndim + self->core_num_dim_ix; - if (op_ndim > NPY_MAXDIMS) { - PyErr_Format(PyExc_ValueError, - "too many dimensions for generalized ufunc %s", - ufunc_name); - retval = -1; - goto fail; - } - - /* Fill in op_axes for all the operands */ - core_dim_ixs_size = 0; - core_dim_ixs = self->core_dim_ixs; - for (i = 0; i < nop; ++i) { - int n; - if (op[i]) { - /* - * Note that n may be negative if broadcasting - * extends into the core dimensions. - */ - n = PyArray_NDIM(op[i]) - self->core_num_dims[i]; - } - else { - n = broadcast_ndim; - } - /* Broadcast all the unspecified dimensions normally */ - for (idim = 0; idim < broadcast_ndim; ++idim) { - if (idim >= broadcast_ndim - n) { - op_axes_arrays[i][idim] = idim - (broadcast_ndim - n); - } - else { - op_axes_arrays[i][idim] = -1; - } - } - /* Use the signature information for the rest */ - for (idim = broadcast_ndim; idim < op_ndim; ++idim) { - op_axes_arrays[i][idim] = -1; - } - for (idim = 0; idim < self->core_num_dims[i]; ++idim) { - if (n + idim >= 0) { - op_axes_arrays[i][broadcast_ndim + core_dim_ixs[idim]] = - n + idim; - } - else { - op_axes_arrays[i][broadcast_ndim + core_dim_ixs[idim]] = -1; - } - } - core_dim_ixs_size += self->core_num_dims[i]; - core_dim_ixs += self->core_num_dims[i]; - op_axes[i] = op_axes_arrays[i]; - } - - /* Get the buffersize, errormask, and error object globals */ - if (extobj == NULL) { - if (PyUFunc_GetPyValues(ufunc_name, - &buffersize, &errormask, &errobj) < 0) { - retval = -1; - goto fail; - } - } - else { - if (_extract_pyvals(extobj, ufunc_name, - &buffersize, &errormask, &errobj) < 0) { - retval = -1; - goto fail; - } - } - - NPY_UF_DBG_PRINT("Finding inner loop\n"); - - /* - * Decide the casting rules for inputs and outputs. We want - * NPY_SAFE_CASTING or stricter, so that the loop selection code - * doesn't choose an integer loop for float inputs, for example. - */ - input_casting = (casting > NPY_SAFE_CASTING) ? NPY_SAFE_CASTING : casting; - - if (type_tup == NULL) { - /* Find the best ufunc inner loop, and fill in the dtypes */ - retval = find_best_ufunc_inner_loop(self, op, input_casting, casting, - buffersize, any_object, dtype, - &innerloop, &innerloopdata, &trivial_loop_ok); - } else { - /* Find the specified ufunc inner loop, and fill in the dtypes */ - retval = find_specified_ufunc_inner_loop(self, type_tup, - op, casting, - buffersize, any_object, dtype, - &innerloop, &innerloopdata, &trivial_loop_ok); - } - if (retval < 0) { - goto fail; - } - - /* - * FAIL with NotImplemented if the other object has - * the __r__ method and has __array_priority__ as - * an attribute (signalling it can handle ndarray's) - * and is not already an ndarray or a subtype of the same type. - */ - if (nin == 2 && nout == 1 && dtype[1]->type_num == NPY_OBJECT) { - PyObject *_obj = PyTuple_GET_ITEM(args, 1); - if (!PyArray_CheckExact(_obj) - /* If both are same subtype of object arrays, then proceed */ - && !(Py_TYPE(_obj) == Py_TYPE(PyTuple_GET_ITEM(args, 0))) - && PyObject_HasAttrString(_obj, "__array_priority__") - && _has_reflected_op(_obj, ufunc_name)) { - retval = -2; - goto fail; - } - } - -#if NPY_UF_DBG_TRACING - printf("input types:\n"); - for (i = 0; i < nin; ++i) { - PyObject_Print((PyObject *)dtype[i], stdout, 0); - printf(" "); - } - printf("\noutput types:\n"); - for (i = nin; i < nop; ++i) { - PyObject_Print((PyObject *)dtype[i], stdout, 0); - printf(" "); - } - printf("\n"); -#endif - - if (subok) { - /* - * Get the appropriate __array_prepare__ function to call - * for each output - */ - _find_array_prepare(args, kwds, arr_prep, nin, nout); - - /* Set up arr_prep_args if a prep function was needed */ - for (i = 0; i < nout; ++i) { - if (arr_prep[i] != NULL && arr_prep[i] != Py_None) { - arr_prep_args = make_arr_prep_args(nin, args, kwds); - break; - } - } - } - - /* If the loop wants the arrays, provide them */ - if (_does_loop_use_arrays(innerloopdata)) { - innerloopdata = (void*)op; - } - - /* - * Set up the iterator per-op flags. For generalized ufuncs, we - * can't do buffering, so must COPY or UPDATEIFCOPY. - */ - for (i = 0; i < nin; ++i) { - op_flags[i] = NPY_ITER_READONLY| - NPY_ITER_COPY| - NPY_ITER_ALIGNED; - } - for (i = nin; i < nop; ++i) { - op_flags[i] = NPY_ITER_READWRITE| - NPY_ITER_UPDATEIFCOPY| - NPY_ITER_ALIGNED| - NPY_ITER_ALLOCATE| - NPY_ITER_NO_BROADCAST; - } - - /* Create the iterator */ - iter = NpyIter_AdvancedNew(nop, op, NPY_ITER_MULTI_INDEX| - NPY_ITER_REFS_OK| - NPY_ITER_REDUCE_OK, - order, NPY_UNSAFE_CASTING, op_flags, - dtype, op_ndim, op_axes, NULL, 0); - if (iter == NULL) { - retval = -1; - goto fail; - } - - /* Fill in any allocated outputs */ - for (i = nin; i < nop; ++i) { - if (op[i] == NULL) { - op[i] = NpyIter_GetOperandArray(iter)[i]; - Py_INCREF(op[i]); - } - } - - /* - * Set up the inner strides array. Because we're not doing - * buffering, the strides are fixed throughout the looping. - */ - inner_strides = (npy_intp *)_pya_malloc( - NPY_SIZEOF_INTP * (nop+core_dim_ixs_size)); - /* The strides after the first nop match core_dim_ixs */ - core_dim_ixs = self->core_dim_ixs; - inner_strides_tmp = inner_strides + nop; - for (idim = 0; idim < self->core_num_dim_ix; ++idim) { - ax_strides_tmp[idim] = NpyIter_GetAxisStrideArray(iter, - broadcast_ndim+idim); - if (ax_strides_tmp[idim] == NULL) { - retval = -1; - goto fail; - } - } - for (i = 0; i < nop; ++i) { - for (idim = 0; idim < self->core_num_dims[i]; ++idim) { - inner_strides_tmp[idim] = ax_strides_tmp[core_dim_ixs[idim]][i]; - } - - core_dim_ixs += self->core_num_dims[i]; - inner_strides_tmp += self->core_num_dims[i]; - } - - /* Set up the inner dimensions array */ - if (NpyIter_GetShape(iter, inner_dimensions) != NPY_SUCCEED) { - retval = -1; - goto fail; - } - /* Move the core dimensions to start at the second element */ - memmove(&inner_dimensions[1], &inner_dimensions[broadcast_ndim], - NPY_SIZEOF_INTP * self->core_num_dim_ix); - - /* Remove all the core dimensions from the iterator */ - for (i = 0; i < self->core_num_dim_ix; ++i) { - if (NpyIter_RemoveAxis(iter, broadcast_ndim) != NPY_SUCCEED) { - retval = -1; - goto fail; - } - } - if (NpyIter_RemoveMultiIndex(iter) != NPY_SUCCEED) { - retval = -1; - goto fail; - } - if (NpyIter_EnableExternalLoop(iter) != NPY_SUCCEED) { - retval = -1; - goto fail; - } - - /* - * The first nop strides are for the inner loop (but only can - * copy them after removing the core axes - */ - memcpy(inner_strides, NpyIter_GetInnerStrideArray(iter), - NPY_SIZEOF_INTP * nop); - -#if 0 - printf("strides: "); - for (i = 0; i < nop+core_dim_ixs_size; ++i) { - printf("%d ", (int)inner_strides[i]); - } - printf("\n"); -#endif - - /* Start with the floating-point exception flags cleared */ - PyUFunc_clearfperr(); - - NPY_UF_DBG_PRINT("Executing inner loop\n"); - - /* Do the ufunc loop */ - if (NpyIter_GetIterSize(iter) != 0) { - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp *count_ptr; - - /* Get the variables needed for the loop */ - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(iter); - retval = -1; - goto fail; - } - dataptr = NpyIter_GetDataPtrArray(iter); - count_ptr = NpyIter_GetInnerLoopSizePtr(iter); - - do { - inner_dimensions[0] = *count_ptr; - innerloop(dataptr, inner_dimensions, inner_strides, innerloopdata); - } while (iternext(iter)); - } - - /* Check whether any errors occurred during the loop */ - if (PyErr_Occurred() || (errormask && - PyUFunc_checkfperr(errormask, errobj, &first_error))) { - retval = -1; - goto fail; - } - - _pya_free(inner_strides); - NpyIter_Deallocate(iter); - /* The caller takes ownership of all the references in op */ - for (i = 0; i < nop; ++i) { - Py_XDECREF(dtype[i]); - Py_XDECREF(arr_prep[i]); - } - Py_XDECREF(errobj); - Py_XDECREF(type_tup); - Py_XDECREF(arr_prep_args); - - NPY_UF_DBG_PRINT("Returning Success\n"); - - return 0; - -fail: - NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval); - if (inner_strides) { - _pya_free(inner_strides); - } - if (iter != NULL) { - NpyIter_Deallocate(iter); - } - for (i = 0; i < nop; ++i) { - Py_XDECREF(op[i]); - op[i] = NULL; - Py_XDECREF(dtype[i]); - Py_XDECREF(arr_prep[i]); - } - Py_XDECREF(errobj); - Py_XDECREF(type_tup); - Py_XDECREF(arr_prep_args); - - return retval; -} - -/*UFUNC_API - * - * This generic function is called with the ufunc object, the arguments to it, - * and an array of (pointers to) PyArrayObjects which are NULL. - */ -NPY_NO_EXPORT int -PyUFunc_GenericFunction(PyUFuncObject *self, - PyObject *args, PyObject *kwds, - PyArrayObject **op) -{ - int nin, nout; - int i, nop; - char *ufunc_name; - int retval = -1, any_object = 0, subok = 1; - NPY_CASTING input_casting; - - PyArray_Descr *dtype[NPY_MAXARGS]; - - /* These parameters come from extobj= or from a TLS global */ - int buffersize = 0, errormask = 0; - PyObject *errobj = NULL; - int first_error = 1; - - /* The selected inner loop */ - PyUFuncGenericFunction innerloop = NULL; - void *innerloopdata = NULL; - - /* The __array_prepare__ function to call for each output */ - PyObject *arr_prep[NPY_MAXARGS]; - /* - * This is either args, or args with the out= parameter from - * kwds added appropriately. - */ - PyObject *arr_prep_args = NULL; - - int trivial_loop_ok = 0; - - /* TODO: For 1.6, the default should probably be NPY_CORDER */ - NPY_ORDER order = NPY_KEEPORDER; - /* - * Many things in NumPy do unsafe casting (doing int += float, etc). - * The strictness should probably become a state parameter, similar - * to the seterr/geterr. - */ - NPY_CASTING casting = NPY_UNSAFE_CASTING; - /* When provided, extobj and typetup contain borrowed references */ - PyObject *extobj = NULL, *type_tup = NULL; - - if (self == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return -1; - } - - /* TODO: support generalized ufunc */ - if (self->core_enabled) { - return PyUFunc_GeneralizedFunction(self, args, kwds, op); - } - - nin = self->nin; - nout = self->nout; - nop = nin + nout; - - ufunc_name = self->name ? self->name : ""; - - NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name); - - /* Initialize all the operands and dtypes to NULL */ - for (i = 0; i < nop; ++i) { - op[i] = NULL; - dtype[i] = NULL; - arr_prep[i] = NULL; - } - - NPY_UF_DBG_PRINT("Getting arguments\n"); - - /* Get all the arguments */ - retval = get_ufunc_arguments(self, args, kwds, - op, &order, &casting, &extobj, &type_tup, &subok, &any_object); - if (retval < 0) { - goto fail; - } - - /* Get the buffersize, errormask, and error object globals */ - if (extobj == NULL) { - if (PyUFunc_GetPyValues(ufunc_name, - &buffersize, &errormask, &errobj) < 0) { - retval = -1; - goto fail; - } - } - else { - if (_extract_pyvals(extobj, ufunc_name, - &buffersize, &errormask, &errobj) < 0) { - retval = -1; - goto fail; - } - } - - NPY_UF_DBG_PRINT("Finding inner loop\n"); - - /* - * Decide the casting rules for inputs and outputs. We want - * NPY_SAFE_CASTING or stricter, so that the loop selection code - * doesn't choose an integer loop for float inputs, for example. - */ - input_casting = (casting > NPY_SAFE_CASTING) ? NPY_SAFE_CASTING : casting; - - if (type_tup == NULL) { - /* Find the best ufunc inner loop, and fill in the dtypes */ - retval = find_best_ufunc_inner_loop(self, op, input_casting, casting, - buffersize, any_object, dtype, - &innerloop, &innerloopdata, &trivial_loop_ok); - } else { - /* Find the specified ufunc inner loop, and fill in the dtypes */ - retval = find_specified_ufunc_inner_loop(self, type_tup, - op, casting, - buffersize, any_object, dtype, - &innerloop, &innerloopdata, &trivial_loop_ok); - } - if (retval < 0) { - goto fail; - } - - /* - * FAIL with NotImplemented if the other object has - * the __r__ method and has __array_priority__ as - * an attribute (signalling it can handle ndarray's) - * and is not already an ndarray or a subtype of the same type. - */ - if (nin == 2 && nout == 1 && dtype[1]->type_num == NPY_OBJECT) { - PyObject *_obj = PyTuple_GET_ITEM(args, 1); - if (!PyArray_CheckExact(_obj) - /* If both are same subtype of object arrays, then proceed */ - && !(Py_TYPE(_obj) == Py_TYPE(PyTuple_GET_ITEM(args, 0))) - && PyObject_HasAttrString(_obj, "__array_priority__") - && _has_reflected_op(_obj, ufunc_name)) { - retval = -2; - goto fail; - } - } - -#if NPY_UF_DBG_TRACING - printf("input types:\n"); - for (i = 0; i < nin; ++i) { - PyObject_Print((PyObject *)dtype[i], stdout, 0); - printf(" "); - } - printf("\noutput types:\n"); - for (i = nin; i < nop; ++i) { - PyObject_Print((PyObject *)dtype[i], stdout, 0); - printf(" "); - } - printf("\n"); -#endif - - if (subok) { - /* - * Get the appropriate __array_prepare__ function to call - * for each output - */ - _find_array_prepare(args, kwds, arr_prep, nin, nout); - - /* Set up arr_prep_args if a prep function was needed */ - for (i = 0; i < nout; ++i) { - if (arr_prep[i] != NULL && arr_prep[i] != Py_None) { - arr_prep_args = make_arr_prep_args(nin, args, kwds); - break; - } - } - } - - /* If the loop wants the arrays, provide them */ - if (_does_loop_use_arrays(innerloopdata)) { - innerloopdata = (void*)op; - } - - /* Start with the floating-point exception flags cleared */ - PyUFunc_clearfperr(); - - NPY_UF_DBG_PRINT("Executing inner loop\n"); - - /* Do the ufunc loop */ - retval = execute_ufunc_loop(self, trivial_loop_ok, op, dtype, order, - buffersize, arr_prep, arr_prep_args, - innerloop, innerloopdata); - if (retval < 0) { - goto fail; - } - - /* Check whether any errors occurred during the loop */ - if (PyErr_Occurred() || (errormask && - PyUFunc_checkfperr(errormask, errobj, &first_error))) { - retval = -1; - goto fail; - } - - /* The caller takes ownership of all the references in op */ - for (i = 0; i < nop; ++i) { - Py_XDECREF(dtype[i]); - Py_XDECREF(arr_prep[i]); - } - Py_XDECREF(errobj); - Py_XDECREF(type_tup); - Py_XDECREF(arr_prep_args); - - NPY_UF_DBG_PRINT("Returning Success\n"); - - return 0; - -fail: - NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval); - for (i = 0; i < nop; ++i) { - Py_XDECREF(op[i]); - op[i] = NULL; - Py_XDECREF(dtype[i]); - Py_XDECREF(arr_prep[i]); - } - Py_XDECREF(errobj); - Py_XDECREF(type_tup); - Py_XDECREF(arr_prep_args); - - return retval; -} - -/* - * Given the output type, finds the specified binary op. The - * ufunc must have nin==2 and nout==1. The function may modify - * otype if the given type isn't found. - * - * Returns 0 on success, -1 on failure. - */ -static int -get_binary_op_function(PyUFuncObject *self, int *otype, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata) -{ - int i; - PyUFunc_Loop1d *funcdata; - - NPY_UF_DBG_PRINT1("Getting binary op function for type number %d\n", - *otype); - - /* If the type is custom and there are userloops, search for it here */ - if (self->userloops != NULL && PyTypeNum_ISUSERDEF(*otype)) { - PyObject *key, *obj; - key = PyInt_FromLong(*otype); - if (key == NULL) { - return -1; - } - obj = PyDict_GetItem(self->userloops, key); - Py_DECREF(key); - if (obj != NULL) { - funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); - while (funcdata != NULL) { - int *types = funcdata->arg_types; - - if (types[0] == *otype && types[1] == *otype && - types[2] == *otype) { - *out_innerloop = funcdata->func; - *out_innerloopdata = funcdata->data; - return 0; - } - - funcdata = funcdata->next; - } - } - } - - /* Search for a function with compatible inputs */ - for (i = 0; i < self->ntypes; ++i) { - char *types = self->types + i*self->nargs; - - NPY_UF_DBG_PRINT3("Trying loop with signature %d %d -> %d\n", - types[0], types[1], types[2]); - - if (PyArray_CanCastSafely(*otype, types[0]) && - types[0] == types[1] && - (*otype == NPY_OBJECT || types[0] != NPY_OBJECT)) { - /* If the signature is "xx->x", we found the loop */ - if (types[2] == types[0]) { - *out_innerloop = self->functions[i]; - *out_innerloopdata = self->data[i]; - *otype = types[0]; - return 0; - } - /* - * Otherwise, we found the natural type of the reduction, - * replace otype and search again - */ - else { - *otype = types[2]; - break; - } - } - } - - /* Search for the exact function */ - for (i = 0; i < self->ntypes; ++i) { - char *types = self->types + i*self->nargs; - - if (PyArray_CanCastSafely(*otype, types[0]) && - types[0] == types[1] && - types[1] == types[2] && - (*otype == NPY_OBJECT || types[0] != NPY_OBJECT)) { - /* Since the signature is "xx->x", we found the loop */ - *out_innerloop = self->functions[i]; - *out_innerloopdata = self->data[i]; - *otype = types[0]; - return 0; - } - } - - return -1; -} - -/* - * The implementation of the reduction operators with the new iterator - * turned into a bit of a long function here, but I think the design - * of this part needs to be changed to be more like einsum, so it may - * not be worth refactoring it too much. Consider this timing: - * - * >>> a = arange(10000) - * - * >>> timeit sum(a) - * 10000 loops, best of 3: 17 us per loop - * - * >>> timeit einsum("i->",a) - * 100000 loops, best of 3: 13.5 us per loop - * - */ -static PyObject * -PyUFunc_ReductionOp(PyUFuncObject *self, PyArrayObject *arr, - PyArrayObject *out, - int axis, int otype, int operation, char *opname) -{ - PyArrayObject *op[2]; - PyArray_Descr *op_dtypes[2] = {NULL, NULL}; - int op_axes_arrays[2][NPY_MAXDIMS]; - int *op_axes[2] = {op_axes_arrays[0], op_axes_arrays[1]}; - npy_uint32 op_flags[2]; - int i, idim, ndim, otype_final; - int needs_api, need_outer_iterator; - - NpyIter *iter = NULL, *iter_inner = NULL; - - /* The selected inner loop */ - PyUFuncGenericFunction innerloop = NULL; - void *innerloopdata = NULL; - - char *ufunc_name = self->name ? self->name : "(unknown)"; - - /* These parameters come from extobj= or from a TLS global */ - int buffersize = 0, errormask = 0; - PyObject *errobj = NULL; - - NPY_BEGIN_THREADS_DEF; - - NPY_UF_DBG_PRINT2("\nEvaluating ufunc %s.%s\n", ufunc_name, opname); - -#if 0 - printf("Doing %s.%s on array with dtype : ", ufunc_name, opname); - PyObject_Print((PyObject *)PyArray_DESCR(arr), stdout, 0); - printf("\n"); -#endif - - if (PyUFunc_GetPyValues(opname, &buffersize, &errormask, &errobj) < 0) { - return NULL; - } - - /* Take a reference to out for later returning */ - Py_XINCREF(out); - - otype_final = otype; - if (get_binary_op_function(self, &otype_final, - &innerloop, &innerloopdata) < 0) { - PyArray_Descr *dtype = PyArray_DescrFromType(otype); - PyErr_Format(PyExc_ValueError, - "could not find a matching type for %s.%s, " - "requested type has type code '%c'", - ufunc_name, opname, dtype ? dtype->type : '-'); - Py_XDECREF(dtype); - goto fail; - } - - ndim = PyArray_NDIM(arr); - - /* Set up the output data type */ - op_dtypes[0] = PyArray_DescrFromType(otype_final); - if (op_dtypes[0] == NULL) { - goto fail; - } - -#if NPY_UF_DBG_TRACING - printf("Found %s.%s inner loop with dtype : ", ufunc_name, opname); - PyObject_Print((PyObject *)op_dtypes[0], stdout, 0); - printf("\n"); -#endif - - /* Set up the op_axes for the outer loop */ - if (operation == UFUNC_REDUCE) { - for (i = 0, idim = 0; idim < ndim; ++idim) { - if (idim != axis) { - op_axes_arrays[0][i] = i; - op_axes_arrays[1][i] = idim; - i++; - } - } - } - else if (operation == UFUNC_ACCUMULATE) { - for (idim = 0; idim < ndim; ++idim) { - op_axes_arrays[0][idim] = idim; - op_axes_arrays[1][idim] = idim; - } - } - else { - PyErr_Format(PyExc_RuntimeError, - "invalid reduction operation %s.%s", ufunc_name, opname); - goto fail; - } - - /* The per-operand flags for the outer loop */ - op_flags[0] = NPY_ITER_READWRITE| - NPY_ITER_NO_BROADCAST| - NPY_ITER_ALLOCATE| - NPY_ITER_NO_SUBTYPE; - op_flags[1] = NPY_ITER_READONLY; - - op[0] = out; - op[1] = arr; - - need_outer_iterator = (ndim > 1); - if (operation == UFUNC_ACCUMULATE) { - /* This is because we can't buffer, so must do UPDATEIFCOPY */ - if (!PyArray_ISALIGNED(arr) || (out && !PyArray_ISALIGNED(out)) || - !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(arr)) || - (out && - !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(out)))) { - need_outer_iterator = 1; - } - } - - if (need_outer_iterator) { - int ndim_iter = 0; - npy_uint32 flags = NPY_ITER_ZEROSIZE_OK| - NPY_ITER_REFS_OK; - PyArray_Descr **op_dtypes_param = NULL; - - if (operation == UFUNC_REDUCE) { - ndim_iter = ndim - 1; - if (out == NULL) { - op_dtypes_param = op_dtypes; - } - } - else if (operation == UFUNC_ACCUMULATE) { - /* - * The way accumulate is set up, we can't do buffering, - * so make a copy instead when necessary. - */ - ndim_iter = ndim; - flags |= NPY_ITER_MULTI_INDEX; - /* Add some more flags */ - op_flags[0] |= NPY_ITER_UPDATEIFCOPY|NPY_ITER_ALIGNED; - op_flags[1] |= NPY_ITER_COPY|NPY_ITER_ALIGNED; - op_dtypes_param = op_dtypes; - op_dtypes[1] = op_dtypes[0]; - } - NPY_UF_DBG_PRINT("Allocating outer iterator\n"); - iter = NpyIter_AdvancedNew(2, op, flags, - NPY_KEEPORDER, NPY_UNSAFE_CASTING, - op_flags, - op_dtypes_param, - ndim_iter, op_axes, NULL, 0); - if (iter == NULL) { - goto fail; - } - - if (operation == UFUNC_ACCUMULATE) { - /* In case COPY or UPDATEIFCOPY occurred */ - op[0] = NpyIter_GetOperandArray(iter)[0]; - op[1] = NpyIter_GetOperandArray(iter)[1]; - - if (PyArray_SIZE(op[0]) == 0) { - if (out == NULL) { - out = op[0]; - Py_INCREF(out); - } - goto finish; - } - - if (NpyIter_RemoveAxis(iter, axis) != NPY_SUCCEED) { - goto fail; - } - if (NpyIter_RemoveMultiIndex(iter) != NPY_SUCCEED) { - goto fail; - } - } - } - - /* Get the output */ - if (out == NULL) { - if (iter) { - op[0] = out = NpyIter_GetOperandArray(iter)[0]; - Py_INCREF(out); - } - else { - PyArray_Descr *dtype = op_dtypes[0]; - Py_INCREF(dtype); - if (operation == UFUNC_REDUCE) { - op[0] = out = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, dtype, - 0, NULL, NULL, NULL, - 0, NULL); - } - else if (operation == UFUNC_ACCUMULATE) { - op[0] = out = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, dtype, - ndim, PyArray_DIMS(op[1]), NULL, NULL, - 0, NULL); - } - if (out == NULL) { - goto fail; - } - } - } - - /* - * If the reduction unit has size zero, either return the reduction - * unit for UFUNC_REDUCE, or return the zero-sized output array - * for UFUNC_ACCUMULATE. - */ - if (PyArray_DIM(op[1], axis) == 0) { - if (operation == UFUNC_REDUCE) { - if (self->identity == PyUFunc_None) { - PyErr_Format(PyExc_ValueError, - "zero-size array to %s.%s " - "without identity", ufunc_name, opname); - goto fail; - } - if (self->identity == PyUFunc_One) { - PyObject *obj = PyInt_FromLong((long) 1); - if (obj == NULL) { - goto fail; - } - PyArray_FillWithScalar(op[0], obj); - Py_DECREF(obj); - } else { - PyObject *obj = PyInt_FromLong((long) 0); - if (obj == NULL) { - goto fail; - } - PyArray_FillWithScalar(op[0], obj); - Py_DECREF(obj); - } - } - - goto finish; - } - else if (PyArray_SIZE(op[0]) == 0) { - goto finish; - } - - /* Only allocate an inner iterator if it's necessary */ - if (!PyArray_ISALIGNED(op[1]) || !PyArray_ISALIGNED(op[0]) || - !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(op[1])) || - !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(op[0]))) { - /* Also set the dtype for buffering arr */ - op_dtypes[1] = op_dtypes[0]; - - NPY_UF_DBG_PRINT("Allocating inner iterator\n"); - if (operation == UFUNC_REDUCE) { - /* The per-operand flags for the inner loop */ - op_flags[0] = NPY_ITER_READWRITE| - NPY_ITER_ALIGNED; - op_flags[1] = NPY_ITER_READONLY| - NPY_ITER_ALIGNED; - - op_axes[0][0] = -1; - op_axes[1][0] = axis; - - iter_inner = NpyIter_AdvancedNew(2, op, NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_BUFFERED| - NPY_ITER_DELAY_BUFALLOC| - NPY_ITER_GROWINNER| - NPY_ITER_REDUCE_OK| - NPY_ITER_REFS_OK, - NPY_CORDER, NPY_UNSAFE_CASTING, - op_flags, op_dtypes, - 1, op_axes, NULL, buffersize); - } - /* Should never get an inner iterator for ACCUMULATE */ - else { - PyErr_SetString(PyExc_RuntimeError, - "internal ufunc reduce error, should not need inner iterator"); - goto fail; - } - if (iter_inner == NULL) { - goto fail; - } - } - - if (iter && NpyIter_GetIterSize(iter) != 0) { - char *dataptr_copy[3]; - npy_intp stride_copy[3]; - - NpyIter_IterNextFunc *iternext; - char **dataptr; - - int itemsize = op_dtypes[0]->elsize; - - /* Get the variables needed for the loop */ - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - goto fail; - } - dataptr = NpyIter_GetDataPtrArray(iter); - - - /* Execute the loop with two nested iterators */ - if (iter_inner) { - /* Only UFUNC_REDUCE uses iter_inner */ - NpyIter_IterNextFunc *iternext_inner; - char **dataptr_inner; - npy_intp *stride_inner; - npy_intp count, *count_ptr_inner; - - NPY_UF_DBG_PRINT("UFunc: Reduce loop with two nested iterators\n"); - iternext_inner = NpyIter_GetIterNext(iter_inner, NULL); - if (iternext_inner == NULL) { - goto fail; - } - dataptr_inner = NpyIter_GetDataPtrArray(iter_inner); - stride_inner = NpyIter_GetInnerStrideArray(iter_inner); - count_ptr_inner = NpyIter_GetInnerLoopSizePtr(iter_inner); - - needs_api = NpyIter_IterationNeedsAPI(iter) || - NpyIter_IterationNeedsAPI(iter_inner); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - do { - int first = 1; - - /* Reset the inner iterator to the outer's data */ - if (NpyIter_ResetBasePointers(iter_inner, dataptr, NULL) - != NPY_SUCCEED) { - goto fail; - } - - /* Copy the first element to start the reduction */ - if (otype == NPY_OBJECT) { - Py_XDECREF(*(PyObject **)dataptr_inner[0]); - *(PyObject **)dataptr_inner[0] = - *(PyObject **)dataptr_inner[1]; - Py_XINCREF(*(PyObject **)dataptr_inner[0]); - } - else { - memcpy(dataptr_inner[0], dataptr_inner[1], itemsize); - } - - stride_copy[0] = 0; - stride_copy[2] = 0; - do { - count = *count_ptr_inner; - /* Turn the two items into three for the inner loop */ - dataptr_copy[0] = dataptr_inner[0]; - dataptr_copy[1] = dataptr_inner[1]; - dataptr_copy[2] = dataptr_inner[0]; - if (first) { - --count; - dataptr_copy[1] += stride_inner[1]; - first = 0; - } - stride_copy[1] = stride_inner[1]; - NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - innerloop(dataptr_copy, &count, - stride_copy, innerloopdata); - } while(iternext_inner(iter_inner)); - } while (iternext(iter)); - - if (!needs_api) { - NPY_END_THREADS; - } - } - /* Execute the loop with just the outer iterator */ - else { - npy_intp count_m1 = PyArray_DIM(op[1], axis)-1; - npy_intp stride0 = 0, stride1 = PyArray_STRIDE(op[1], axis); - - NPY_UF_DBG_PRINT("UFunc: Reduce loop with just outer iterator\n"); - - if (operation == UFUNC_ACCUMULATE) { - stride0 = PyArray_STRIDE(op[0], axis); - } - - stride_copy[0] = stride0; - stride_copy[1] = stride1; - stride_copy[2] = stride0; - - needs_api = NpyIter_IterationNeedsAPI(iter); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - do { - - dataptr_copy[0] = dataptr[0]; - dataptr_copy[1] = dataptr[1]; - dataptr_copy[2] = dataptr[0]; - - /* Copy the first element to start the reduction */ - if (otype == NPY_OBJECT) { - Py_XDECREF(*(PyObject **)dataptr_copy[0]); - *(PyObject **)dataptr_copy[0] = - *(PyObject **)dataptr_copy[1]; - Py_XINCREF(*(PyObject **)dataptr_copy[0]); - } - else { - memcpy(dataptr_copy[0], dataptr_copy[1], itemsize); - } - - if (count_m1 > 0) { - /* Turn the two items into three for the inner loop */ - if (operation == UFUNC_REDUCE) { - dataptr_copy[1] += stride1; - } - else if (operation == UFUNC_ACCUMULATE) { - dataptr_copy[1] += stride1; - dataptr_copy[2] += stride0; - } - NPY_UF_DBG_PRINT1("iterator loop count %d\n", - (int)count_m1); - innerloop(dataptr_copy, &count_m1, - stride_copy, innerloopdata); - } - } while (iternext(iter)); - - if (!needs_api) { - NPY_END_THREADS; - } - } - } - else if (iter == NULL) { - char *dataptr_copy[3]; - npy_intp stride_copy[3]; - - int itemsize = op_dtypes[0]->elsize; - - /* Execute the loop with just the inner iterator */ - if (iter_inner) { - /* Only UFUNC_REDUCE uses iter_inner */ - NpyIter_IterNextFunc *iternext_inner; - char **dataptr_inner; - npy_intp *stride_inner; - npy_intp count, *count_ptr_inner; - int first = 1; - - NPY_UF_DBG_PRINT("UFunc: Reduce loop with just inner iterator\n"); - - iternext_inner = NpyIter_GetIterNext(iter_inner, NULL); - if (iternext_inner == NULL) { - goto fail; - } - dataptr_inner = NpyIter_GetDataPtrArray(iter_inner); - stride_inner = NpyIter_GetInnerStrideArray(iter_inner); - count_ptr_inner = NpyIter_GetInnerLoopSizePtr(iter_inner); - - /* Reset the inner iterator to prepare the buffers */ - if (NpyIter_Reset(iter_inner, NULL) != NPY_SUCCEED) { - goto fail; - } - - needs_api = NpyIter_IterationNeedsAPI(iter_inner); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - /* Copy the first element to start the reduction */ - if (otype == NPY_OBJECT) { - Py_XDECREF(*(PyObject **)dataptr_inner[0]); - *(PyObject **)dataptr_inner[0] = - *(PyObject **)dataptr_inner[1]; - Py_XINCREF(*(PyObject **)dataptr_inner[0]); - } - else { - memcpy(dataptr_inner[0], dataptr_inner[1], itemsize); - } - - stride_copy[0] = 0; - stride_copy[2] = 0; - do { - count = *count_ptr_inner; - /* Turn the two items into three for the inner loop */ - dataptr_copy[0] = dataptr_inner[0]; - dataptr_copy[1] = dataptr_inner[1]; - dataptr_copy[2] = dataptr_inner[0]; - if (first) { - --count; - dataptr_copy[1] += stride_inner[1]; - first = 0; - } - stride_copy[1] = stride_inner[1]; - NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - innerloop(dataptr_copy, &count, - stride_copy, innerloopdata); - } while(iternext_inner(iter_inner)); - - if (!needs_api) { - NPY_END_THREADS; - } - } - /* Execute the loop with no iterators */ - else { - npy_intp count = PyArray_DIM(op[1], axis); - npy_intp stride0 = 0, stride1 = PyArray_STRIDE(op[1], axis); - - NPY_UF_DBG_PRINT("UFunc: Reduce loop with no iterators\n"); - - if (operation == UFUNC_REDUCE) { - if (PyArray_NDIM(op[0]) != 0) { - PyErr_SetString(PyExc_ValueError, - "provided out is the wrong size " - "for the reduction"); - goto fail; - } - } - else if (operation == UFUNC_ACCUMULATE) { - if (PyArray_NDIM(op[0]) != PyArray_NDIM(op[1]) || - !PyArray_CompareLists(PyArray_DIMS(op[0]), - PyArray_DIMS(op[1]), - PyArray_NDIM(op[0]))) { - PyErr_SetString(PyExc_ValueError, - "provided out is the wrong size " - "for the reduction"); - goto fail; - } - stride0 = PyArray_STRIDE(op[0], axis); - } - - stride_copy[0] = stride0; - stride_copy[1] = stride1; - stride_copy[2] = stride0; - - /* Turn the two items into three for the inner loop */ - dataptr_copy[0] = PyArray_BYTES(op[0]); - dataptr_copy[1] = PyArray_BYTES(op[1]); - dataptr_copy[2] = PyArray_BYTES(op[0]); - - /* Copy the first element to start the reduction */ - if (otype == NPY_OBJECT) { - Py_XDECREF(*(PyObject **)dataptr_copy[0]); - *(PyObject **)dataptr_copy[0] = - *(PyObject **)dataptr_copy[1]; - Py_XINCREF(*(PyObject **)dataptr_copy[0]); - } - else { - memcpy(dataptr_copy[0], dataptr_copy[1], itemsize); - } - - if (count > 1) { - --count; - if (operation == UFUNC_REDUCE) { - dataptr_copy[1] += stride1; - } - else if (operation == UFUNC_ACCUMULATE) { - dataptr_copy[1] += stride1; - dataptr_copy[2] += stride0; - } - - NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - - needs_api = PyDataType_REFCHK(op_dtypes[0]); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - innerloop(dataptr_copy, &count, - stride_copy, innerloopdata); - - if (!needs_api) { - NPY_END_THREADS; - } - } - } - } - -finish: - Py_XDECREF(op_dtypes[0]); - if (iter != NULL) { - NpyIter_Deallocate(iter); - } - if (iter_inner != NULL) { - NpyIter_Deallocate(iter_inner); - } - - Py_XDECREF(errobj); - - return (PyObject *)out; - -fail: - Py_XDECREF(out); - Py_XDECREF(op_dtypes[0]); - - if (iter != NULL) { - NpyIter_Deallocate(iter); - } - if (iter_inner != NULL) { - NpyIter_Deallocate(iter_inner); - } - - Py_XDECREF(errobj); - - return NULL; -} - -/* - * We have two basic kinds of loops. One is used when arr is not-swapped - * and aligned and output type is the same as input type. The other uses - * buffers when one of these is not satisfied. - * - * Zero-length and one-length axes-to-be-reduced are handled separately. - */ -static PyObject * -PyUFunc_Reduce(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, - int axis, int otype) -{ - return PyUFunc_ReductionOp(self, arr, out, axis, otype, - UFUNC_REDUCE, "reduce"); -} - - -static PyObject * -PyUFunc_Accumulate(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, - int axis, int otype) -{ - return PyUFunc_ReductionOp(self, arr, out, axis, otype, - UFUNC_ACCUMULATE, "accumulate"); -} - -/* - * Reduceat performs a reduce over an axis using the indices as a guide - * - * op.reduceat(array,indices) computes - * op.reduce(array[indices[i]:indices[i+1]] - * for i=0..end with an implicit indices[i+1]=len(array) - * assumed when i=end-1 - * - * if indices[i+1] <= indices[i]+1 - * then the result is array[indices[i]] for that value - * - * op.accumulate(array) is the same as - * op.reduceat(array,indices)[::2] - * where indices is range(len(array)-1) with a zero placed in every other sample - * indices = zeros(len(array)*2-1) - * indices[1::2] = range(1,len(array)) - * - * output shape is based on the size of indices - */ -static PyObject * -PyUFunc_Reduceat(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *ind, - PyArrayObject *out, int axis, int otype) -{ - PyArrayObject *op[3]; - PyArray_Descr *op_dtypes[3] = {NULL, NULL, NULL}; - int op_axes_arrays[3][NPY_MAXDIMS]; - int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1], - op_axes_arrays[2]}; - npy_uint32 op_flags[3]; - int i, idim, ndim, otype_final; - int needs_api, need_outer_iterator; - - NpyIter *iter = NULL; - - /* The reduceat indices - ind must be validated outside this call */ - npy_intp *reduceat_ind; - npy_intp ind_size, red_axis_size; - /* The selected inner loop */ - PyUFuncGenericFunction innerloop = NULL; - void *innerloopdata = NULL; - - char *ufunc_name = self->name ? self->name : "(unknown)"; - char *opname = "reduceat"; - - /* These parameters come from extobj= or from a TLS global */ - int buffersize = 0, errormask = 0; - PyObject *errobj = NULL; - - NPY_BEGIN_THREADS_DEF; - - reduceat_ind = (npy_intp *)PyArray_DATA(ind); - ind_size = PyArray_DIM(ind, 0); - red_axis_size = PyArray_DIM(arr, axis); - - /* Check for out-of-bounds values in indices array */ - for (i = 0; i < ind_size; ++i) { - if (reduceat_ind[i] < 0 || reduceat_ind[i] >= red_axis_size) { - PyErr_Format(PyExc_IndexError, - "index %d out-of-bounds in %s.%s [0, %d)", - (int)reduceat_ind[i], ufunc_name, opname, (int)red_axis_size); - return NULL; - } - } - - NPY_UF_DBG_PRINT2("\nEvaluating ufunc %s.%s\n", ufunc_name, opname); - -#if 0 - printf("Doing %s.%s on array with dtype : ", ufunc_name, opname); - PyObject_Print((PyObject *)PyArray_DESCR(arr), stdout, 0); - printf("\n"); - printf("Index size is %d\n", (int)ind_size); -#endif - - if (PyUFunc_GetPyValues(opname, &buffersize, &errormask, &errobj) < 0) { - return NULL; - } - - /* Take a reference to out for later returning */ - Py_XINCREF(out); - - otype_final = otype; - if (get_binary_op_function(self, &otype_final, - &innerloop, &innerloopdata) < 0) { - PyArray_Descr *dtype = PyArray_DescrFromType(otype); - PyErr_Format(PyExc_ValueError, - "could not find a matching type for %s.%s, " - "requested type has type code '%c'", - ufunc_name, opname, dtype ? dtype->type : '-'); - Py_XDECREF(dtype); - goto fail; - } - - ndim = PyArray_NDIM(arr); - - /* Set up the output data type */ - op_dtypes[0] = PyArray_DescrFromType(otype_final); - if (op_dtypes[0] == NULL) { - goto fail; - } - -#if NPY_UF_DBG_TRACING - printf("Found %s.%s inner loop with dtype : ", ufunc_name, opname); - PyObject_Print((PyObject *)op_dtypes[0], stdout, 0); - printf("\n"); -#endif - - /* Set up the op_axes for the outer loop */ - for (i = 0, idim = 0; idim < ndim; ++idim) { - /* Use the i-th iteration dimension to match up ind */ - if (idim == axis) { - op_axes_arrays[0][idim] = axis; - op_axes_arrays[1][idim] = -1; - op_axes_arrays[2][idim] = 0; - } - else { - op_axes_arrays[0][idim] = idim; - op_axes_arrays[1][idim] = idim; - op_axes_arrays[2][idim] = -1; - } - } - - op[0] = out; - op[1] = arr; - op[2] = ind; - - /* Likewise with accumulate, must do UPDATEIFCOPY */ - if (out != NULL || ndim > 1 || !PyArray_ISALIGNED(arr) || - !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(arr))) { - need_outer_iterator = 1; - } - - if (need_outer_iterator) { - npy_uint32 flags = NPY_ITER_ZEROSIZE_OK| - NPY_ITER_REFS_OK| - NPY_ITER_MULTI_INDEX; - - /* - * The way reduceat is set up, we can't do buffering, - * so make a copy instead when necessary. - */ - - /* The per-operand flags for the outer loop */ - op_flags[0] = NPY_ITER_READWRITE| - NPY_ITER_NO_BROADCAST| - NPY_ITER_ALLOCATE| - NPY_ITER_NO_SUBTYPE| - NPY_ITER_UPDATEIFCOPY| - NPY_ITER_ALIGNED; - op_flags[1] = NPY_ITER_READONLY| - NPY_ITER_COPY| - NPY_ITER_ALIGNED; - op_flags[2] = NPY_ITER_READONLY; - - op_dtypes[1] = op_dtypes[0]; - - NPY_UF_DBG_PRINT("Allocating outer iterator\n"); - iter = NpyIter_AdvancedNew(3, op, flags, - NPY_KEEPORDER, NPY_UNSAFE_CASTING, - op_flags, - op_dtypes, - ndim, op_axes, NULL, 0); - if (iter == NULL) { - goto fail; - } - - /* Remove the inner loop axis from the outer iterator */ - if (NpyIter_RemoveAxis(iter, axis) != NPY_SUCCEED) { - goto fail; - } - if (NpyIter_RemoveMultiIndex(iter) != NPY_SUCCEED) { - goto fail; - } - - /* In case COPY or UPDATEIFCOPY occurred */ - op[0] = NpyIter_GetOperandArray(iter)[0]; - op[1] = NpyIter_GetOperandArray(iter)[1]; - - if (out == NULL) { - out = op[0]; - Py_INCREF(out); - } - } - /* Allocate the output for when there's no outer iterator */ - else if (out == NULL) { - Py_INCREF(op_dtypes[0]); - op[0] = out = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, op_dtypes[0], - 1, &ind_size, NULL, NULL, - 0, NULL); - if (out == NULL) { - goto fail; - } - } - - /* - * If the output has zero elements, return now. - */ - if (PyArray_SIZE(op[0]) == 0) { - goto finish; - } - - if (iter && NpyIter_GetIterSize(iter) != 0) { - char *dataptr_copy[3]; - npy_intp stride_copy[3]; - - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp count_m1; - npy_intp stride0, stride1; - npy_intp stride0_ind = PyArray_STRIDE(op[0], axis); - - int itemsize = op_dtypes[0]->elsize; - - /* Get the variables needed for the loop */ - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - goto fail; - } - dataptr = NpyIter_GetDataPtrArray(iter); - - /* Execute the loop with just the outer iterator */ - count_m1 = PyArray_DIM(op[1], axis)-1; - stride0 = 0; - stride1 = PyArray_STRIDE(op[1], axis); - - NPY_UF_DBG_PRINT("UFunc: Reduce loop with just outer iterator\n"); - - stride_copy[0] = stride0; - stride_copy[1] = stride1; - stride_copy[2] = stride0; - - needs_api = NpyIter_IterationNeedsAPI(iter); - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - do { - - for (i = 0; i < ind_size; ++i) { - npy_intp start = reduceat_ind[i], - end = (i == ind_size-1) ? count_m1+1 : - reduceat_ind[i+1]; - npy_intp count = end - start; - - dataptr_copy[0] = dataptr[0] + stride0_ind*i; - dataptr_copy[1] = dataptr[1] + stride1*start; - dataptr_copy[2] = dataptr[0] + stride0_ind*i; - - /* Copy the first element to start the reduction */ - if (otype == NPY_OBJECT) { - Py_XDECREF(*(PyObject **)dataptr_copy[0]); - *(PyObject **)dataptr_copy[0] = - *(PyObject **)dataptr_copy[1]; - Py_XINCREF(*(PyObject **)dataptr_copy[0]); - } - else { - memcpy(dataptr_copy[0], dataptr_copy[1], itemsize); - } - - if (count > 1) { - /* Inner loop like REDUCE */ - --count; - dataptr_copy[1] += stride1; - NPY_UF_DBG_PRINT1("iterator loop count %d\n", - (int)count); - innerloop(dataptr_copy, &count, - stride_copy, innerloopdata); - } - } - } while (iternext(iter)); - - if (!needs_api) { - NPY_END_THREADS; - } - } - else if (iter == NULL) { - char *dataptr_copy[3]; - npy_intp stride_copy[3]; - - int itemsize = op_dtypes[0]->elsize; - - npy_intp stride0_ind = PyArray_STRIDE(op[0], axis); - - /* Execute the loop with no iterators */ - npy_intp stride0 = 0, stride1 = PyArray_STRIDE(op[1], axis); - - needs_api = PyDataType_REFCHK(op_dtypes[0]); - - NPY_UF_DBG_PRINT("UFunc: Reduce loop with no iterators\n"); - - stride_copy[0] = stride0; - stride_copy[1] = stride1; - stride_copy[2] = stride0; - - if (!needs_api) { - NPY_BEGIN_THREADS; - } - - for (i = 0; i < ind_size; ++i) { - npy_intp start = reduceat_ind[i], - end = (i == ind_size-1) ? PyArray_DIM(arr,axis) : - reduceat_ind[i+1]; - npy_intp count = end - start; - - dataptr_copy[0] = PyArray_BYTES(op[0]) + stride0_ind*i; - dataptr_copy[1] = PyArray_BYTES(op[1]) + stride1*start; - dataptr_copy[2] = PyArray_BYTES(op[0]) + stride0_ind*i; - - /* Copy the first element to start the reduction */ - if (otype == NPY_OBJECT) { - Py_XDECREF(*(PyObject **)dataptr_copy[0]); - *(PyObject **)dataptr_copy[0] = - *(PyObject **)dataptr_copy[1]; - Py_XINCREF(*(PyObject **)dataptr_copy[0]); - } - else { - memcpy(dataptr_copy[0], dataptr_copy[1], itemsize); - } - - if (count > 1) { - /* Inner loop like REDUCE */ - --count; - dataptr_copy[1] += stride1; - NPY_UF_DBG_PRINT1("iterator loop count %d\n", - (int)count); - innerloop(dataptr_copy, &count, - stride_copy, innerloopdata); - } - } - - if (!needs_api) { - NPY_END_THREADS; - } - } - -finish: - Py_XDECREF(op_dtypes[0]); - if (iter != NULL) { - NpyIter_Deallocate(iter); - } - - Py_XDECREF(errobj); - - return (PyObject *)out; - -fail: - Py_XDECREF(out); - Py_XDECREF(op_dtypes[0]); - - if (iter != NULL) { - NpyIter_Deallocate(iter); - } - - Py_XDECREF(errobj); - - return NULL; -} - - -/* - * This code handles reduce, reduceat, and accumulate - * (accumulate and reduce are special cases of the more general reduceat - * but they are handled separately for speed) - */ -static PyObject * -PyUFunc_GenericReduction(PyUFuncObject *self, PyObject *args, - PyObject *kwds, int operation) -{ - int axis=0; - PyArrayObject *mp, *ret = NULL; - PyObject *op, *res = NULL; - PyObject *obj_ind, *context; - PyArrayObject *indices = NULL; - PyArray_Descr *otype = NULL; - PyArrayObject *out = NULL; - static char *kwlist1[] = {"array", "axis", "dtype", "out", NULL}; - static char *kwlist2[] = {"array", "indices", "axis", "dtype", "out", NULL}; - static char *_reduce_type[] = {"reduce", "accumulate", "reduceat", NULL}; - - if (self == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return NULL; - } - if (self->core_enabled) { - PyErr_Format(PyExc_RuntimeError, - "Reduction not defined on ufunc with signature"); - return NULL; - } - if (self->nin != 2) { - PyErr_Format(PyExc_ValueError, - "%s only supported for binary functions", - _reduce_type[operation]); - return NULL; - } - if (self->nout != 1) { - PyErr_Format(PyExc_ValueError, - "%s only supported for functions " \ - "returning a single value", - _reduce_type[operation]); - return NULL; - } - - if (operation == UFUNC_REDUCEAT) { - PyArray_Descr *indtype; - indtype = PyArray_DescrFromType(PyArray_INTP); - if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO|iO&O&", kwlist2, - &op, &obj_ind, &axis, - PyArray_DescrConverter2, &otype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(otype); - return NULL; - } - indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, - 1, 1, CARRAY, NULL); - if (indices == NULL) { - Py_XDECREF(otype); - return NULL; - } - } - else { - if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&O&", kwlist1, - &op, &axis, - PyArray_DescrConverter2, &otype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(otype); - return NULL; - } - } - /* Ensure input is an array */ - if (!PyArray_Check(op) && !PyArray_IsScalar(op, Generic)) { - context = Py_BuildValue("O(O)i", self, op, 0); - } - else { - context = NULL; - } - mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); - Py_XDECREF(context); - if (mp == NULL) { - return NULL; - } - /* Check to see if input is zero-dimensional */ - if (mp->nd == 0) { - PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", - _reduce_type[operation]); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; - } - /* Check to see that type (and otype) is not FLEXIBLE */ - if (PyArray_ISFLEXIBLE(mp) || - (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) { - PyErr_Format(PyExc_TypeError, - "cannot perform %s with flexible type", - _reduce_type[operation]); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; - } - - if (axis < 0) { - axis += mp->nd; - } - if (axis < 0 || axis >= mp->nd) { - PyErr_SetString(PyExc_ValueError, "axis not in array"); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; - } - /* - * If out is specified it determines otype - * unless otype already specified. - */ - if (otype == NULL && out != NULL) { - otype = out->descr; - Py_INCREF(otype); - } - if (otype == NULL) { - /* - * For integer types --- make sure at least a long - * is used for add and multiply reduction to avoid overflow - */ - int typenum = PyArray_TYPE(mp); - if ((PyTypeNum_ISBOOL(typenum) || PyTypeNum_ISINTEGER(typenum)) - && ((strcmp(self->name,"add") == 0) - || (strcmp(self->name,"multiply") == 0))) { - if (PyTypeNum_ISBOOL(typenum)) { - typenum = PyArray_LONG; - } - else if ((size_t)mp->descr->elsize < sizeof(long)) { - if (PyTypeNum_ISUNSIGNED(typenum)) { - typenum = PyArray_ULONG; - } - else { - typenum = PyArray_LONG; - } - } - } - otype = PyArray_DescrFromType(typenum); - } - - - switch(operation) { - case UFUNC_REDUCE: - ret = (PyArrayObject *)PyUFunc_Reduce(self, mp, out, axis, - otype->type_num); - break; - case UFUNC_ACCUMULATE: - ret = (PyArrayObject *)PyUFunc_Accumulate(self, mp, out, axis, - otype->type_num); - break; - case UFUNC_REDUCEAT: - ret = (PyArrayObject *)PyUFunc_Reduceat(self, mp, indices, out, - axis, otype->type_num); - Py_DECREF(indices); - break; - } - Py_DECREF(mp); - Py_DECREF(otype); - if (ret == NULL) { - return NULL; - } - if (Py_TYPE(op) != Py_TYPE(ret)) { - res = PyObject_CallMethod(op, "__array_wrap__", "O", ret); - if (res == NULL) { - PyErr_Clear(); - } - else if (res == Py_None) { - Py_DECREF(res); - } - else { - Py_DECREF(ret); - return res; - } - } - return PyArray_Return(ret); -} - -/* - * This function analyzes the input arguments - * and determines an appropriate __array_wrap__ function to call - * for the outputs. - * - * If an output argument is provided, then it is wrapped - * with its own __array_wrap__ not with the one determined by - * the input arguments. - * - * if the provided output argument is already an array, - * the wrapping function is None (which means no wrapping will - * be done --- not even PyArray_Return). - * - * A NULL is placed in output_wrap for outputs that - * should just have PyArray_Return called. - */ -static void -_find_array_wrap(PyObject *args, PyObject *kwds, - PyObject **output_wrap, int nin, int nout) -{ - Py_ssize_t nargs; - int i; - int np = 0; - PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS]; - PyObject *obj, *wrap = NULL; - - /* If a 'subok' parameter is passed and isn't True, don't wrap */ - if (kwds != NULL && (obj = PyDict_GetItemString(kwds, "subok")) != NULL) { - if (obj != Py_True) { - for (i = 0; i < nout; i++) { - output_wrap[i] = NULL; - } - return; - } - } - - nargs = PyTuple_GET_SIZE(args); - for (i = 0; i < nin; i++) { - obj = PyTuple_GET_ITEM(args, i); - if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) { - continue; - } - wrap = PyObject_GetAttrString(obj, "__array_wrap__"); - if (wrap) { - if (PyCallable_Check(wrap)) { - with_wrap[np] = obj; - wraps[np] = wrap; - ++np; - } - else { - Py_DECREF(wrap); - wrap = NULL; - } - } - else { - PyErr_Clear(); - } - } - if (np > 0) { - /* If we have some wraps defined, find the one of highest priority */ - wrap = wraps[0]; - if (np > 1) { - double maxpriority = PyArray_GetPriority(with_wrap[0], - PyArray_SUBTYPE_PRIORITY); - for (i = 1; i < np; ++i) { - double priority = PyArray_GetPriority(with_wrap[i], - PyArray_SUBTYPE_PRIORITY); - if (priority > maxpriority) { - maxpriority = priority; - Py_DECREF(wrap); - wrap = wraps[i]; - } - else { - Py_DECREF(wraps[i]); - } - } - } - } - - /* - * Here wrap is the wrapping function determined from the - * input arrays (could be NULL). - * - * For all the output arrays decide what to do. - * - * 1) Use the wrap function determined from the input arrays - * This is the default if the output array is not - * passed in. - * - * 2) Use the __array_wrap__ method of the output object - * passed in. -- this is special cased for - * exact ndarray so that no PyArray_Return is - * done in that case. - */ - for (i = 0; i < nout; i++) { - int j = nin + i; - int incref = 1; - output_wrap[i] = wrap; - obj = NULL; - if (j < nargs) { - obj = PyTuple_GET_ITEM(args, j); - /* Output argument one may also be in a keyword argument */ - if (i == 0 && obj == Py_None && kwds != NULL) { - obj = PyDict_GetItemString(kwds, "out"); - } - } - /* Output argument one may also be in a keyword argument */ - else if (i == 0 && kwds != NULL) { - obj = PyDict_GetItemString(kwds, "out"); - } - - if (obj != Py_None && obj != NULL) { - if (PyArray_CheckExact(obj)) { - /* None signals to not call any wrapping */ - output_wrap[i] = Py_None; - } - else { - PyObject *owrap = PyObject_GetAttrString(obj,"__array_wrap__"); - incref = 0; - if (!(owrap) || !(PyCallable_Check(owrap))) { - Py_XDECREF(owrap); - owrap = wrap; - incref = 1; - PyErr_Clear(); - } - output_wrap[i] = owrap; - } - } - - if (incref) { - Py_XINCREF(output_wrap[i]); - } - } - Py_XDECREF(wrap); - return; -} - - -static PyObject * -ufunc_generic_call(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - int i; - PyTupleObject *ret; - PyArrayObject *mps[NPY_MAXARGS]; - PyObject *retobj[NPY_MAXARGS]; - PyObject *wraparr[NPY_MAXARGS]; - PyObject *res; - int errval; - - /* - * Initialize all array objects to NULL to make cleanup easier - * if something goes wrong. - */ - for(i = 0; i < self->nargs; i++) { - mps[i] = NULL; - } - errval = PyUFunc_GenericFunction(self, args, kwds, mps); - if (errval < 0) { - for (i = 0; i < self->nargs; i++) { - PyArray_XDECREF_ERR(mps[i]); - } - if (errval == -1) - return NULL; - else if (self->nin == 2 && self->nout == 1) { - /* To allow the other argument to be given a chance - */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - else { - PyErr_SetString(PyExc_NotImplementedError, - "Not implemented for this type"); - return NULL; - } - } - - /* Free the input references */ - for (i = 0; i < self->nin; i++) { - Py_DECREF(mps[i]); - } - - /* - * Use __array_wrap__ on all outputs - * if present on one of the input arguments. - * If present for multiple inputs: - * use __array_wrap__ of input object with largest - * __array_priority__ (default = 0.0) - * - * Exception: we should not wrap outputs for items already - * passed in as output-arguments. These items should either - * be left unwrapped or wrapped by calling their own __array_wrap__ - * routine. - * - * For each output argument, wrap will be either - * NULL --- call PyArray_Return() -- default if no output arguments given - * None --- array-object passed in don't call PyArray_Return - * method --- the __array_wrap__ method to call. - */ - _find_array_wrap(args, kwds, wraparr, self->nin, self->nout); - - /* wrap outputs */ - for (i = 0; i < self->nout; i++) { - int j = self->nin+i; - PyObject *wrap = wraparr[i]; - - if (wrap != NULL) { - if (wrap == Py_None) { - Py_DECREF(wrap); - retobj[i] = (PyObject *)mps[j]; - continue; - } - res = PyObject_CallFunction(wrap, "O(OOi)", mps[j], self, args, i); - if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - res = PyObject_CallFunctionObjArgs(wrap, mps[j], NULL); - } - Py_DECREF(wrap); - if (res == NULL) { - goto fail; - } - else if (res == Py_None) { - Py_DECREF(res); - } - else { - Py_DECREF(mps[j]); - retobj[i] = res; - continue; - } - } - /* default behavior */ - retobj[i] = PyArray_Return(mps[j]); - } - - if (self->nout == 1) { - return retobj[0]; - } - else { - ret = (PyTupleObject *)PyTuple_New(self->nout); - for (i = 0; i < self->nout; i++) { - PyTuple_SET_ITEM(ret, i, retobj[i]); - } - return (PyObject *)ret; - } - -fail: - for (i = self->nin; i < self->nargs; i++) { - Py_XDECREF(mps[i]); - } - return NULL; -} - -NPY_NO_EXPORT PyObject * -ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *thedict; - PyObject *res; - - if (!PyArg_ParseTuple(args, "")) { - return NULL; - } - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = PyUString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - res = PyDict_GetItem(thedict, PyUFunc_PYVALS_NAME); - if (res != NULL) { - Py_INCREF(res); - return res; - } - /* Construct list of defaults */ - res = PyList_New(3); - if (res == NULL) { - return NULL; - } - PyList_SET_ITEM(res, 0, PyInt_FromLong(PyArray_BUFSIZE)); - PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT)); - PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None); - return res; -} - -#if USE_USE_DEFAULTS==1 -/* - * This is a strategy to buy a little speed up and avoid the dictionary - * look-up in the default case. It should work in the presence of - * threads. If it is deemed too complicated or it doesn't actually work - * it could be taken out. - */ -static int -ufunc_update_use_defaults(void) -{ - PyObject *errobj = NULL; - int errmask, bufsize; - int res; - - PyUFunc_NUM_NODEFAULTS += 1; - res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj); - PyUFunc_NUM_NODEFAULTS -= 1; - if (res < 0) { - Py_XDECREF(errobj); - return -1; - } - if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != PyArray_BUFSIZE) - || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { - PyUFunc_NUM_NODEFAULTS += 1; - } - else if (PyUFunc_NUM_NODEFAULTS > 0) { - PyUFunc_NUM_NODEFAULTS -= 1; - } - Py_XDECREF(errobj); - return 0; -} -#endif - -NPY_NO_EXPORT PyObject * -ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *thedict; - int res; - PyObject *val; - static char *msg = "Error object must be a list of length 3"; - - if (!PyArg_ParseTuple(args, "O", &val)) { - return NULL; - } - if (!PyList_CheckExact(val) || PyList_GET_SIZE(val) != 3) { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = PyUString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - res = PyDict_SetItem(thedict, PyUFunc_PYVALS_NAME, val); - if (res < 0) { - return NULL; - } -#if USE_USE_DEFAULTS==1 - if (ufunc_update_use_defaults() < 0) { - return NULL; - } -#endif - Py_INCREF(Py_None); - return Py_None; -} - - - -/*UFUNC_API*/ -NPY_NO_EXPORT int -PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func, - PyUFuncGenericFunction newfunc, - int *signature, - PyUFuncGenericFunction *oldfunc) -{ - int i, j; - int res = -1; - /* Find the location of the matching signature */ - for (i = 0; i < func->ntypes; i++) { - for (j = 0; j < func->nargs; j++) { - if (signature[j] != func->types[i*func->nargs+j]) { - break; - } - } - if (j < func->nargs) { - continue; - } - if (oldfunc != NULL) { - *oldfunc = func->functions[i]; - } - func->functions[i] = newfunc; - res = 0; - break; - } - return res; -} - -/*UFUNC_API*/ -NPY_NO_EXPORT PyObject * -PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data, - char *types, int ntypes, - int nin, int nout, int identity, - char *name, char *doc, int check_return) -{ - return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, - nin, nout, identity, name, doc, check_return, NULL); -} - -/*UFUNC_API*/ -NPY_NO_EXPORT PyObject * -PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, - char *types, int ntypes, - int nin, int nout, int identity, - char *name, char *doc, - int check_return, const char *signature) -{ - PyUFuncObject *self; - - self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) { - return NULL; - } - PyObject_Init((PyObject *)self, &PyUFunc_Type); - - self->nin = nin; - self->nout = nout; - self->nargs = nin+nout; - self->identity = identity; - - self->functions = func; - self->data = data; - self->types = types; - self->ntypes = ntypes; - self->check_return = check_return; - self->ptr = NULL; - self->obj = NULL; - self->userloops=NULL; - - if (name == NULL) { - self->name = "?"; - } - else { - self->name = name; - } - if (doc == NULL) { - self->doc = "NULL"; - } - else { - self->doc = doc; - } - - /* generalized ufunc */ - self->core_enabled = 0; - self->core_num_dim_ix = 0; - self->core_num_dims = NULL; - self->core_dim_ixs = NULL; - self->core_offsets = NULL; - self->core_signature = NULL; - if (signature != NULL) { - if (_parse_signature(self, signature) != 0) { - Py_DECREF(self); - return NULL; - } - } - return (PyObject *)self; -} - -/* Specify that the loop specified by the given index should use the array of - * input and arrays as the data pointer to the loop. - */ -/*UFUNC_API*/ -NPY_NO_EXPORT int -PyUFunc_SetUsesArraysAsData(void **data, size_t i) -{ - data[i] = (void*)PyUFunc_SetUsesArraysAsData; - return 0; -} - -/* Return 1 if the given data pointer for the loop specifies that it needs the - * arrays as the data pointer. - */ -static int -_does_loop_use_arrays(void *data) -{ - return (data == PyUFunc_SetUsesArraysAsData); -} - - -/* - * This is the first-part of the CObject structure. - * - * I don't think this will change, but if it should, then - * this needs to be fixed. The exposed C-API was insufficient - * because I needed to replace the pointer and it wouldn't - * let me with a destructor set (even though it works fine - * with the destructor). - */ -typedef struct { - PyObject_HEAD - void *c_obj; -} _simple_cobj; - -#define _SETCPTR(cobj, val) ((_simple_cobj *)(cobj))->c_obj = (val) - -/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 */ -static int -cmp_arg_types(int *arg1, int *arg2, int n) -{ - for (; n > 0; n--, arg1++, arg2++) { - if (PyArray_EquivTypenums(*arg1, *arg2)) { - continue; - } - if (PyArray_CanCastSafely(*arg1, *arg2)) { - return -1; - } - return 1; - } - return 0; -} - -/* - * This frees the linked-list structure when the CObject - * is destroyed (removed from the internal dictionary) -*/ -static NPY_INLINE void -_free_loop1d_list(PyUFunc_Loop1d *data) -{ - while (data != NULL) { - PyUFunc_Loop1d *next = data->next; - _pya_free(data->arg_types); - _pya_free(data); - data = next; - } -} - -#if PY_VERSION_HEX >= 0x03000000 -static void -_loop1d_list_free(PyObject *ptr) -{ - PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)PyCapsule_GetPointer(ptr, NULL); - _free_loop1d_list(data); -} -#else -static void -_loop1d_list_free(void *ptr) -{ - PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)ptr; - _free_loop1d_list(data); -} -#endif - - -/*UFUNC_API*/ -NPY_NO_EXPORT int -PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, - int usertype, - PyUFuncGenericFunction function, - int *arg_types, - void *data) -{ - PyArray_Descr *descr; - PyUFunc_Loop1d *funcdata; - PyObject *key, *cobj; - int i; - int *newtypes=NULL; - - descr=PyArray_DescrFromType(usertype); - if ((usertype < PyArray_USERDEF) || (descr==NULL)) { - PyErr_SetString(PyExc_TypeError, "unknown user-defined type"); - return -1; - } - Py_DECREF(descr); - - if (ufunc->userloops == NULL) { - ufunc->userloops = PyDict_New(); - } - key = PyInt_FromLong((long) usertype); - if (key == NULL) { - return -1; - } - funcdata = _pya_malloc(sizeof(PyUFunc_Loop1d)); - if (funcdata == NULL) { - goto fail; - } - newtypes = _pya_malloc(sizeof(int)*ufunc->nargs); - if (newtypes == NULL) { - goto fail; - } - if (arg_types != NULL) { - for (i = 0; i < ufunc->nargs; i++) { - newtypes[i] = arg_types[i]; - } - } - else { - for (i = 0; i < ufunc->nargs; i++) { - newtypes[i] = usertype; - } - } - - funcdata->func = function; - funcdata->arg_types = newtypes; - funcdata->data = data; - funcdata->next = NULL; - - /* Get entry for this user-defined type*/ - cobj = PyDict_GetItem(ufunc->userloops, key); - /* If it's not there, then make one and return. */ - if (cobj == NULL) { - cobj = NpyCapsule_FromVoidPtr((void *)funcdata, _loop1d_list_free); - if (cobj == NULL) { - goto fail; - } - PyDict_SetItem(ufunc->userloops, key, cobj); - Py_DECREF(cobj); - Py_DECREF(key); - return 0; - } - else { - PyUFunc_Loop1d *current, *prev = NULL; - int cmp = 1; - /* - * There is already at least 1 loop. Place this one in - * lexicographic order. If the next one signature - * is exactly like this one, then just replace. - * Otherwise insert. - */ - current = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(cobj); - while (current != NULL) { - cmp = cmp_arg_types(current->arg_types, newtypes, ufunc->nargs); - if (cmp >= 0) { - break; - } - prev = current; - current = current->next; - } - if (cmp == 0) { - /* just replace it with new function */ - current->func = function; - current->data = data; - _pya_free(newtypes); - _pya_free(funcdata); - } - else { - /* - * insert it before the current one by hacking the internals - * of cobject to replace the function pointer --- can't use - * CObject API because destructor is set. - */ - funcdata->next = current; - if (prev == NULL) { - /* place this at front */ - _SETCPTR(cobj, funcdata); - } - else { - prev->next = funcdata; - } - } - } - Py_DECREF(key); - return 0; - - fail: - Py_DECREF(key); - _pya_free(funcdata); - _pya_free(newtypes); - if (!PyErr_Occurred()) PyErr_NoMemory(); - return -1; -} - -#undef _SETCPTR - - -static void -ufunc_dealloc(PyUFuncObject *self) -{ - if (self->core_num_dims) { - _pya_free(self->core_num_dims); - } - if (self->core_dim_ixs) { - _pya_free(self->core_dim_ixs); - } - if (self->core_offsets) { - _pya_free(self->core_offsets); - } - if (self->core_signature) { - _pya_free(self->core_signature); - } - if (self->ptr) { - _pya_free(self->ptr); - } - Py_XDECREF(self->userloops); - Py_XDECREF(self->obj); - _pya_free(self); -} - -static PyObject * -ufunc_repr(PyUFuncObject *self) -{ - char buf[100]; - - sprintf(buf, "", self->name); - return PyUString_FromString(buf); -} - - -/****************************************************************************** - *** UFUNC METHODS *** - *****************************************************************************/ - - -/* - * op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) - * where a has b.ndim NewAxis terms appended. - * - * The result has dimensions a.ndim + b.ndim - */ -static PyObject * -ufunc_outer(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - int i; - PyObject *ret; - PyArrayObject *ap1 = NULL, *ap2 = NULL, *ap_new = NULL; - PyObject *new_args, *tmp; - PyObject *shape1, *shape2, *newshape; - - if (self->core_enabled) { - PyErr_Format(PyExc_TypeError, - "method outer is not allowed in ufunc with non-trivial"\ - " signature"); - return NULL; - } - - if(self->nin != 2) { - PyErr_SetString(PyExc_ValueError, - "outer product only supported "\ - "for binary functions"); - return NULL; - } - - if (PySequence_Length(args) != 2) { - PyErr_SetString(PyExc_TypeError, "exactly two arguments expected"); - return NULL; - } - - tmp = PySequence_GetItem(args, 0); - if (tmp == NULL) { - return NULL; - } - ap1 = (PyArrayObject *) PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); - Py_DECREF(tmp); - if (ap1 == NULL) { - return NULL; - } - tmp = PySequence_GetItem(args, 1); - if (tmp == NULL) { - return NULL; - } - ap2 = (PyArrayObject *)PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); - Py_DECREF(tmp); - if (ap2 == NULL) { - Py_DECREF(ap1); - return NULL; - } - /* Construct new shape tuple */ - shape1 = PyTuple_New(ap1->nd); - if (shape1 == NULL) { - goto fail; - } - for (i = 0; i < ap1->nd; i++) { - PyTuple_SET_ITEM(shape1, i, - PyLong_FromLongLong((longlong)ap1->dimensions[i])); - } - shape2 = PyTuple_New(ap2->nd); - for (i = 0; i < ap2->nd; i++) { - PyTuple_SET_ITEM(shape2, i, PyInt_FromLong((long) 1)); - } - if (shape2 == NULL) { - Py_DECREF(shape1); - goto fail; - } - newshape = PyNumber_Add(shape1, shape2); - Py_DECREF(shape1); - Py_DECREF(shape2); - if (newshape == NULL) { - goto fail; - } - ap_new = (PyArrayObject *)PyArray_Reshape(ap1, newshape); - Py_DECREF(newshape); - if (ap_new == NULL) { - goto fail; - } - new_args = Py_BuildValue("(OO)", ap_new, ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - Py_DECREF(ap_new); - ret = ufunc_generic_call(self, new_args, kwds); - Py_DECREF(new_args); - return ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ap_new); - return NULL; -} - - -static PyObject * -ufunc_reduce(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCE); -} - -static PyObject * -ufunc_accumulate(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_ACCUMULATE); -} - -static PyObject * -ufunc_reduceat(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCEAT); -} - - -static struct PyMethodDef ufunc_methods[] = { - {"reduce", - (PyCFunction)ufunc_reduce, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"accumulate", - (PyCFunction)ufunc_accumulate, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"reduceat", - (PyCFunction)ufunc_reduceat, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"outer", - (PyCFunction)ufunc_outer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - - -/****************************************************************************** - *** UFUNC GETSET *** - *****************************************************************************/ - - -/* construct the string y1,y2,...,yn */ -static PyObject * -_makeargs(int num, char *ltr, int null_if_none) -{ - PyObject *str; - int i; - - switch (num) { - case 0: - if (null_if_none) { - return NULL; - } - return PyString_FromString(""); - case 1: - return PyString_FromString(ltr); - } - str = PyString_FromFormat("%s1, %s2", ltr, ltr); - for (i = 3; i <= num; ++i) { - PyString_ConcatAndDel(&str, PyString_FromFormat(", %s%d", ltr, i)); - } - return str; -} - -static char -_typecharfromnum(int num) { - PyArray_Descr *descr; - char ret; - - descr = PyArray_DescrFromType(num); - ret = descr->type; - Py_DECREF(descr); - return ret; -} - -static PyObject * -ufunc_get_doc(PyUFuncObject *self) -{ - /* - * Put docstring first or FindMethod finds it... could so some - * introspection on name and nin + nout to automate the first part - * of it the doc string shouldn't need the calling convention - * construct name(x1, x2, ...,[ out1, out2, ...]) __doc__ - */ - PyObject *outargs, *inargs, *doc; - outargs = _makeargs(self->nout, "out", 1); - inargs = _makeargs(self->nin, "x", 0); - if (outargs == NULL) { - doc = PyUString_FromFormat("%s(%s)\n\n%s", - self->name, - PyString_AS_STRING(inargs), - self->doc); - } - else { - doc = PyUString_FromFormat("%s(%s[, %s])\n\n%s", - self->name, - PyString_AS_STRING(inargs), - PyString_AS_STRING(outargs), - self->doc); - Py_DECREF(outargs); - } - Py_DECREF(inargs); - return doc; -} - -static PyObject * -ufunc_get_nin(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nin); -} - -static PyObject * -ufunc_get_nout(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nout); -} - -static PyObject * -ufunc_get_nargs(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nargs); -} - -static PyObject * -ufunc_get_ntypes(PyUFuncObject *self) -{ - return PyInt_FromLong(self->ntypes); -} - -static PyObject * -ufunc_get_types(PyUFuncObject *self) -{ - /* return a list with types grouped input->output */ - PyObject *list; - PyObject *str; - int k, j, n, nt = self->ntypes; - int ni = self->nin; - int no = self->nout; - char *t; - list = PyList_New(nt); - if (list == NULL) { - return NULL; - } - t = _pya_malloc(no+ni+2); - n = 0; - for (k = 0; k < nt; k++) { - for (j = 0; jtypes[n]); - n++; - } - t[ni] = '-'; - t[ni+1] = '>'; - for (j = 0; j < no; j++) { - t[ni + 2 + j] = _typecharfromnum(self->types[n]); - n++; - } - str = PyUString_FromStringAndSize(t, no + ni + 2); - PyList_SET_ITEM(list, k, str); - } - _pya_free(t); - return list; -} - -static PyObject * -ufunc_get_name(PyUFuncObject *self) -{ - return PyUString_FromString(self->name); -} - -static PyObject * -ufunc_get_identity(PyUFuncObject *self) -{ - switch(self->identity) { - case PyUFunc_One: - return PyInt_FromLong(1); - case PyUFunc_Zero: - return PyInt_FromLong(0); - } - return Py_None; -} - -static PyObject * -ufunc_get_signature(PyUFuncObject *self) -{ - if (!self->core_enabled) { - Py_RETURN_NONE; - } - return PyUString_FromString(self->core_signature); -} - -#undef _typecharfromnum - -/* - * Docstring is now set from python - * static char *Ufunctype__doc__ = NULL; - */ -static PyGetSetDef ufunc_getset[] = { - {"__doc__", - (getter)ufunc_get_doc, - NULL, NULL, NULL}, - {"nin", - (getter)ufunc_get_nin, - NULL, NULL, NULL}, - {"nout", - (getter)ufunc_get_nout, - NULL, NULL, NULL}, - {"nargs", - (getter)ufunc_get_nargs, - NULL, NULL, NULL}, - {"ntypes", - (getter)ufunc_get_ntypes, - NULL, NULL, NULL}, - {"types", - (getter)ufunc_get_types, - NULL, NULL, NULL}, - {"__name__", - (getter)ufunc_get_name, - NULL, NULL, NULL}, - {"identity", - (getter)ufunc_get_identity, - NULL, NULL, NULL}, - {"signature", - (getter)ufunc_get_signature, - NULL, NULL, NULL}, - {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ -}; - - -/****************************************************************************** - *** UFUNC TYPE OBJECT *** - *****************************************************************************/ - -NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - "numpy.ufunc", /* tp_name */ - sizeof(PyUFuncObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)ufunc_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if defined(NPY_PY3K) - 0, /* tp_reserved */ -#else - 0, /* tp_compare */ -#endif - (reprfunc)ufunc_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - (ternaryfunc)ufunc_generic_call, /* tp_call */ - (reprfunc)ufunc_repr, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - ufunc_methods, /* tp_methods */ - 0, /* tp_members */ - ufunc_getset, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - -/* End of code for ufunc objects */ diff --git a/numpy-1.6.2/numpy/core/src/umath/ufunc_object.h b/numpy-1.6.2/numpy/core/src/umath/ufunc_object.h deleted file mode 100644 index a8886be057..0000000000 --- a/numpy-1.6.2/numpy/core/src/umath/ufunc_object.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _NPY_UMATH_UFUNC_OBJECT_H_ -#define _NPY_UMATH_UFUNC_OBJECT_H_ - -NPY_NO_EXPORT PyObject * -ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args); - -NPY_NO_EXPORT PyObject * -ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args); - -#endif diff --git a/numpy-1.6.2/numpy/core/src/umath/umath_tests.c.src b/numpy-1.6.2/numpy/core/src/umath/umath_tests.c.src deleted file mode 100644 index cb1d541f5c..0000000000 --- a/numpy-1.6.2/numpy/core/src/umath/umath_tests.c.src +++ /dev/null @@ -1,341 +0,0 @@ -/* -*- c -*- */ - -/* - ***************************************************************************** - ** INCLUDES ** - ***************************************************************************** - */ -#include "Python.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" - -#include "numpy/npy_3kcompat.h" - -#include "npy_config.h" - -/* - ***************************************************************************** - ** BASICS ** - ***************************************************************************** - */ - -typedef npy_intp intp; - -#define INIT_OUTER_LOOP_1 \ - intp dN = *dimensions++; \ - intp N_; \ - intp s0 = *steps++; - -#define INIT_OUTER_LOOP_2 \ - INIT_OUTER_LOOP_1 \ - intp s1 = *steps++; - -#define INIT_OUTER_LOOP_3 \ - INIT_OUTER_LOOP_2 \ - intp s2 = *steps++; - -#define INIT_OUTER_LOOP_4 \ - INIT_OUTER_LOOP_3 \ - intp s3 = *steps++; - -#define BEGIN_OUTER_LOOP_3 \ - for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1, args[2] += s2) { - -#define BEGIN_OUTER_LOOP_4 \ - for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1, args[2] += s2, args[3] += s3) { - -#define END_OUTER_LOOP } - - -/* - ***************************************************************************** - ** UFUNC LOOPS ** - ***************************************************************************** - */ - -char *inner1d_signature = "(i),(i)->()"; - -/**begin repeat - - #TYPE=LONG,DOUBLE# - #typ=npy_long, npy_double# -*/ - -/* - * This implements the function - * out[n] = sum_i { in1[n, i] * in2[n, i] }. - */ -static void -@TYPE@_inner1d(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - INIT_OUTER_LOOP_3 - intp di = dimensions[0]; - intp i; - intp is1=steps[0], is2=steps[1]; - BEGIN_OUTER_LOOP_3 - char *ip1=args[0], *ip2=args[1], *op=args[2]; - @typ@ sum = 0; - for (i = 0; i < di; i++) { - sum += (*(@typ@ *)ip1) * (*(@typ@ *)ip2); - ip1 += is1; - ip2 += is2; - } - *(@typ@ *)op = sum; - END_OUTER_LOOP -} - -/**end repeat**/ - -char *innerwt_signature = "(i),(i),(i)->()"; - -/**begin repeat - - #TYPE=LONG,DOUBLE# - #typ=npy_long, npy_double# -*/ - - -/* - * This implements the function - * out[n] = sum_i { in1[n, i] * in2[n, i] * in3[n, i] }. - */ - -static void -@TYPE@_innerwt(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - INIT_OUTER_LOOP_4 - intp di = dimensions[0]; - intp i; - intp is1=steps[0], is2=steps[1], is3=steps[2]; - BEGIN_OUTER_LOOP_4 - char *ip1=args[0], *ip2=args[1], *ip3=args[2], *op=args[3]; - @typ@ sum = 0; - for (i = 0; i < di; i++) { - sum += (*(@typ@ *)ip1) * (*(@typ@ *)ip2) * (*(@typ@ *)ip3); - ip1 += is1; - ip2 += is2; - ip3 += is3; - } - *(@typ@ *)op = sum; - END_OUTER_LOOP -} - -/**end repeat**/ - -char *matrix_multiply_signature = "(m,n),(n,p)->(m,p)"; - -/**begin repeat - - #TYPE=FLOAT,DOUBLE,LONG# - #typ=npy_float, npy_double,npy_long# -*/ - -/* - * This implements the function - * out[k, m, p] = sum_n { in1[k, m, n] * in2[k, n, p] }. - */ - - -static void -@TYPE@_matrix_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* no BLAS is available */ - INIT_OUTER_LOOP_3 - intp dm = dimensions[0]; - intp dn = dimensions[1]; - intp dp = dimensions[2]; - intp m,n,p; - intp is1_m=steps[0], is1_n=steps[1], is2_n=steps[2], is2_p=steps[3], - os_m=steps[4], os_p=steps[5]; - intp ib1_n = is1_n*dn; - intp ib2_n = is2_n*dn; - intp ib2_p = is2_p*dp; - intp ob_p = os_p *dp; - BEGIN_OUTER_LOOP_3 - char *ip1=args[0], *ip2=args[1], *op=args[2]; - for (m = 0; m < dm; m++) { - for (n = 0; n < dn; n++) { - @typ@ val1 = (*(@typ@ *)ip1); - for (p = 0; p < dp; p++) { - if (n == 0) *(@typ@ *)op = 0; - *(@typ@ *)op += val1 * (*(@typ@ *)ip2); - ip2 += is2_p; - op += os_p; - } - ip2 -= ib2_p; - op -= ob_p; - ip1 += is1_n; - ip2 += is2_n; - } - ip1 -= ib1_n; - ip2 -= ib2_n; - ip1 += is1_m; - op += os_m; - } - END_OUTER_LOOP -} - -/**end repeat**/ - -/* The following lines were generated using a slightly modified - version of code_generators/generate_umath.py and adding these - lines to defdict: - -defdict = { -'inner1d' : - Ufunc(2, 1, None_, - r'''inner on the last dimension and broadcast on the rest \n" - " \"(i),(i)->()\" \n''', - TD('ld'), - ), -'innerwt' : - Ufunc(3, 1, None_, - r'''inner1d with a weight argument \n" - " \"(i),(i),(i)->()\" \n''', - TD('ld'), - ), -} - -*/ - -static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d }; -static void * inner1d_data[] = { (void *)NULL, (void *)NULL }; -static char inner1d_signatures[] = { PyArray_LONG, PyArray_LONG, PyArray_LONG, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE }; -static PyUFuncGenericFunction innerwt_functions[] = { LONG_innerwt, DOUBLE_innerwt }; -static void * innerwt_data[] = { (void *)NULL, (void *)NULL }; -static char innerwt_signatures[] = { PyArray_LONG, PyArray_LONG, PyArray_LONG, PyArray_LONG, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE }; -static PyUFuncGenericFunction matrix_multiply_functions[] = { LONG_matrix_multiply, FLOAT_matrix_multiply, DOUBLE_matrix_multiply }; -static void *matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL }; -static char matrix_multiply_signatures[] = { PyArray_LONG, PyArray_LONG, PyArray_LONG, PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE }; - -static void -addUfuncs(PyObject *dictionary) { - PyObject *f; - - f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data, inner1d_signatures, 2, - 2, 1, PyUFunc_None, "inner1d", - "inner on the last dimension and broadcast on the rest \n"\ - " \"(i),(i)->()\" \n", - 0, inner1d_signature); - PyDict_SetItemString(dictionary, "inner1d", f); - Py_DECREF(f); - f = PyUFunc_FromFuncAndDataAndSignature(innerwt_functions, innerwt_data, innerwt_signatures, 2, - 3, 1, PyUFunc_None, "innerwt", - "inner1d with a weight argument \n"\ - " \"(i),(i),(i)->()\" \n", - 0, innerwt_signature); - PyDict_SetItemString(dictionary, "innerwt", f); - Py_DECREF(f); - f = PyUFunc_FromFuncAndDataAndSignature(matrix_multiply_functions, - matrix_multiply_data, matrix_multiply_signatures, - 3, 2, 1, PyUFunc_None, "matrix_multiply", - "matrix multiplication on last two dimensions \n"\ - " \"(m,n),(n,p)->(m,p)\" \n", - 0, matrix_multiply_signature); - PyDict_SetItemString(dictionary, "matrix_multiply", f); - Py_DECREF(f); -} - -/* - End of auto-generated code. -*/ - - - -static PyObject * -UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - int nin, nout; - PyObject *signature, *sig_str; - PyObject *f; - int core_enabled; - - if (!PyArg_ParseTuple(args, "iiO", &nin, &nout, &signature)) return NULL; - - - if (PyString_Check(signature)) { - sig_str = signature; - } else if (PyUnicode_Check(signature)) { - sig_str = PyUnicode_AsUTF8String(signature); - } else { - PyErr_SetString(PyExc_ValueError, "signature should be a string"); - return NULL; - } - - f = PyUFunc_FromFuncAndDataAndSignature(NULL, NULL, NULL, - 0, nin, nout, PyUFunc_None, "no name", - "doc:none", - 1, PyString_AS_STRING(sig_str)); - if (sig_str != signature) { - Py_DECREF(sig_str); - } - if (f == NULL) return NULL; - core_enabled = ((PyUFuncObject*)f)->core_enabled; - Py_DECREF(f); - return Py_BuildValue("i", core_enabled); -} - -static PyMethodDef UMath_TestsMethods[] = { - {"test_signature", UMath_Tests_test_signature, METH_VARARGS, - "Test signature parsing of ufunc. \n" - "Arguments: nin nout signature \n" - "If fails, it returns NULL. Otherwise it will returns 0 for scalar ufunc " - "and 1 for generalized ufunc. \n", - }, - {NULL, NULL, 0, NULL} /* Sentinel */ -}; - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "umath_tests", - NULL, - -1, - UMath_TestsMethods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#if defined(NPY_PY3K) -#define RETVAL m -PyObject *PyInit_umath_tests(void) -#else -#define RETVAL -PyMODINIT_FUNC -initumath_tests(void) -#endif -{ - PyObject *m; - PyObject *d; - PyObject *version; - -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("umath_tests", UMath_TestsMethods); -#endif - if (m == NULL) - return RETVAL; - - import_array(); - import_ufunc(); - - d = PyModule_GetDict(m); - - version = PyString_FromString("0.1"); - PyDict_SetItemString(d, "__version__", version); - Py_DECREF(version); - - /* Load the ufunc operators into the module's namespace */ - addUfuncs(d); - - if (PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load umath_tests module."); - } - - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/core/src/umath/umathmodule.c.src b/numpy-1.6.2/numpy/core/src/umath/umathmodule.c.src deleted file mode 100644 index 8d081f85b2..0000000000 --- a/numpy-1.6.2/numpy/core/src/umath/umathmodule.c.src +++ /dev/null @@ -1,397 +0,0 @@ -/* -*- c -*- */ - -/* - * vim:syntax=c - */ - -/* - ***************************************************************************** - ** INCLUDES ** - ***************************************************************************** - */ - -/* - * _UMATHMODULE IS needed in __ufunc_api.h, included from numpy/ufuncobject.h. - * This is a mess and it would be nice to fix it. It has nothing to do with - * __ufunc_api.c - */ -#define _UMATHMODULE - -#include "Python.h" - -#include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION -#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#endif - -#include "numpy/noprefix.h" -#include "numpy/ufuncobject.h" -#include "abstract.h" - -#include "numpy/npy_math.h" - -/* - ***************************************************************************** - ** INCLUDE GENERATED CODE ** - ***************************************************************************** - */ -#include "funcs.inc" -#include "loops.h" -#include "ufunc_object.h" -#include "__umath_generated.c" -#include "__ufunc_api.c" - -static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; - -static PyObject * -ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { - /* Keywords are ignored for now */ - - PyObject *function, *pyname = NULL; - int nin, nout, i; - PyUFunc_PyFuncData *fdata; - PyUFuncObject *self; - char *fname, *str; - Py_ssize_t fname_len = -1; - int offset[2]; - - if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) { - return NULL; - } - if (!PyCallable_Check(function)) { - PyErr_SetString(PyExc_TypeError, "function must be callable"); - return NULL; - } - self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) { - return NULL; - } - PyObject_Init((PyObject *)self, &PyUFunc_Type); - - self->userloops = NULL; - self->nin = nin; - self->nout = nout; - self->nargs = nin + nout; - self->identity = PyUFunc_None; - self->functions = pyfunc_functions; - self->ntypes = 1; - self->check_return = 0; - - /* generalized ufunc */ - self->core_enabled = 0; - self->core_num_dim_ix = 0; - self->core_num_dims = NULL; - self->core_dim_ixs = NULL; - self->core_offsets = NULL; - self->core_signature = NULL; - - pyname = PyObject_GetAttrString(function, "__name__"); - if (pyname) { - (void) PyString_AsStringAndSize(pyname, &fname, &fname_len); - } - if (PyErr_Occurred()) { - fname = "?"; - fname_len = 1; - PyErr_Clear(); - } - - /* - * self->ptr holds a pointer for enough memory for - * self->data[0] (fdata) - * self->data - * self->name - * self->types - * - * To be safest, all of these need their memory aligned on void * pointers - * Therefore, we may need to allocate extra space. - */ - offset[0] = sizeof(PyUFunc_PyFuncData); - i = (sizeof(PyUFunc_PyFuncData) % sizeof(void *)); - if (i) { - offset[0] += (sizeof(void *) - i); - } - offset[1] = self->nargs; - i = (self->nargs % sizeof(void *)); - if (i) { - offset[1] += (sizeof(void *)-i); - } - self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + - (fname_len + 14)); - if (self->ptr == NULL) { - Py_XDECREF(pyname); - return PyErr_NoMemory(); - } - Py_INCREF(function); - self->obj = function; - fdata = (PyUFunc_PyFuncData *)(self->ptr); - fdata->nin = nin; - fdata->nout = nout; - fdata->callable = function; - - self->data = (void **)(((char *)self->ptr) + offset[0]); - self->data[0] = (void *)fdata; - self->types = (char *)self->data + sizeof(void *); - for (i = 0; i < self->nargs; i++) { - self->types[i] = PyArray_OBJECT; - } - str = self->types + offset[1]; - memcpy(str, fname, fname_len); - memcpy(str+fname_len, " (vectorized)", 14); - self->name = str; - - Py_XDECREF(pyname); - - /* Do a better job someday */ - self->doc = "dynamic ufunc based on a python function"; - - return (PyObject *)self; -} - -/* - ***************************************************************************** - ** SETUP UFUNCS ** - ***************************************************************************** - */ - -/* Less automated additions to the ufuncs */ - -static PyUFuncGenericFunction frexp_functions[] = { -#ifdef HAVE_FREXPF - HALF_frexp, - FLOAT_frexp, -#endif - DOUBLE_frexp -#ifdef HAVE_FREXPL - ,LONGDOUBLE_frexp -#endif -}; - -static void * blank3_data[] = { (void *)NULL, (void *)NULL, (void *)NULL}; -static void * blank6_data[] = { (void *)NULL, (void *)NULL, (void *)NULL, - (void *)NULL, (void *)NULL, (void *)NULL}; -static char frexp_signatures[] = { -#ifdef HAVE_FREXPF - PyArray_HALF, PyArray_HALF, PyArray_INT, - PyArray_FLOAT, PyArray_FLOAT, PyArray_INT, -#endif - PyArray_DOUBLE, PyArray_DOUBLE, PyArray_INT -#ifdef HAVE_FREXPL - ,PyArray_LONGDOUBLE, PyArray_LONGDOUBLE, PyArray_INT -#endif -}; - -#if NPY_SIZEOF_LONG == NPY_SIZEOF_INT -#define LDEXP_LONG(typ) typ##_ldexp -#else -#define LDEXP_LONG(typ) typ##_ldexp_long -#endif - -static PyUFuncGenericFunction ldexp_functions[] = { -#ifdef HAVE_LDEXPF - HALF_ldexp, - FLOAT_ldexp, - LDEXP_LONG(HALF), - LDEXP_LONG(FLOAT), -#endif - DOUBLE_ldexp, - LDEXP_LONG(DOUBLE) -#ifdef HAVE_LDEXPL - , - LONGDOUBLE_ldexp, - LDEXP_LONG(LONGDOUBLE) -#endif -}; - -static char ldexp_signatures[] = { -#ifdef HAVE_LDEXPF - PyArray_HALF, PyArray_INT, PyArray_HALF, - PyArray_FLOAT, PyArray_INT, PyArray_FLOAT, - PyArray_HALF, PyArray_LONG, PyArray_HALF, - PyArray_FLOAT, PyArray_LONG, PyArray_FLOAT, -#endif - PyArray_DOUBLE, PyArray_INT, PyArray_DOUBLE, - PyArray_DOUBLE, PyArray_LONG, PyArray_DOUBLE -#ifdef HAVE_LDEXPL - ,PyArray_LONGDOUBLE, PyArray_INT, PyArray_LONGDOUBLE - ,PyArray_LONGDOUBLE, PyArray_LONG, PyArray_LONGDOUBLE -#endif -}; - -static void -InitOtherOperators(PyObject *dictionary) { - PyObject *f; - int num; - - num = sizeof(frexp_functions) / sizeof(frexp_functions[0]); - f = PyUFunc_FromFuncAndData(frexp_functions, blank3_data, - frexp_signatures, num, - 1, 2, PyUFunc_None, "frexp", - "Split the number, x, into a normalized"\ - " fraction (y1) and exponent (y2)",0); - PyDict_SetItemString(dictionary, "frexp", f); - Py_DECREF(f); - - num = sizeof(ldexp_functions) / sizeof(ldexp_functions[0]); - f = PyUFunc_FromFuncAndData(ldexp_functions, blank6_data, ldexp_signatures, num, - 2, 1, PyUFunc_None, "ldexp", - "Compute y = x1 * 2**x2.",0); - PyDict_SetItemString(dictionary, "ldexp", f); - Py_DECREF(f); - -#if defined(NPY_PY3K) - f = PyDict_GetItemString(dictionary, "true_divide"); - PyDict_SetItemString(dictionary, "divide", f); -#endif - return; -} - -/* Setup the umath module */ -/* Remove for time being, it is declared in __ufunc_api.h */ -/*static PyTypeObject PyUFunc_Type;*/ - -static struct PyMethodDef methods[] = { - {"frompyfunc", (PyCFunction) ufunc_frompyfunc, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"seterrobj", (PyCFunction) ufunc_seterr, - METH_VARARGS, NULL}, - {"geterrobj", (PyCFunction) ufunc_geterr, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "umath", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#include - -#if defined(NPY_PY3K) -#define RETVAL m -PyObject *PyInit_umath(void) -#else -#define RETVAL -PyMODINIT_FUNC initumath(void) -#endif -{ - PyObject *m, *d, *s, *s2, *c_api; - int UFUNC_FLOATING_POINT_SUPPORT = 1; - -#ifdef NO_UFUNC_FLOATING_POINT_SUPPORT - UFUNC_FLOATING_POINT_SUPPORT = 0; -#endif - /* Create the module and add the functions */ -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("umath", methods); -#endif - if (!m) { - return RETVAL; - } - - /* Import the array */ - if (_import_array() < 0) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, - "umath failed: Could not import array core."); - } - return RETVAL; - } - - /* Initialize the types */ - if (PyType_Ready(&PyUFunc_Type) < 0) - return RETVAL; - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - - c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL); - if (PyErr_Occurred()) { - goto err; - } - PyDict_SetItemString(d, "_UFUNC_API", c_api); - Py_DECREF(c_api); - if (PyErr_Occurred()) { - goto err; - } - - s = PyString_FromString("0.4.0"); - PyDict_SetItemString(d, "__version__", s); - Py_DECREF(s); - - /* Load the ufunc operators into the array module's namespace */ - InitOperators(d); - - InitOtherOperators(d); - - PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(NPY_PI)); - Py_DECREF(s); - PyDict_SetItemString(d, "e", s = PyFloat_FromDouble(exp(1.0))); - Py_DECREF(s); - -#define ADDCONST(str) PyModule_AddIntConstant(m, #str, UFUNC_##str) -#define ADDSCONST(str) PyModule_AddStringConstant(m, "UFUNC_" #str, UFUNC_##str) - - ADDCONST(ERR_IGNORE); - ADDCONST(ERR_WARN); - ADDCONST(ERR_CALL); - ADDCONST(ERR_RAISE); - ADDCONST(ERR_PRINT); - ADDCONST(ERR_LOG); - ADDCONST(ERR_DEFAULT); - ADDCONST(ERR_DEFAULT2); - - ADDCONST(SHIFT_DIVIDEBYZERO); - ADDCONST(SHIFT_OVERFLOW); - ADDCONST(SHIFT_UNDERFLOW); - ADDCONST(SHIFT_INVALID); - - ADDCONST(FPE_DIVIDEBYZERO); - ADDCONST(FPE_OVERFLOW); - ADDCONST(FPE_UNDERFLOW); - ADDCONST(FPE_INVALID); - - ADDCONST(FLOATING_POINT_SUPPORT); - - ADDSCONST(PYVALS_NAME); - -#undef ADDCONST -#undef ADDSCONST - PyModule_AddIntConstant(m, "UFUNC_BUFSIZE_DEFAULT", (long)PyArray_BUFSIZE); - - PyModule_AddObject(m, "PINF", PyFloat_FromDouble(NPY_INFINITY)); - PyModule_AddObject(m, "NINF", PyFloat_FromDouble(-NPY_INFINITY)); - PyModule_AddObject(m, "PZERO", PyFloat_FromDouble(NPY_PZERO)); - PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO)); - PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN)); - - s = PyDict_GetItemString(d, "conjugate"); - s2 = PyDict_GetItemString(d, "remainder"); - /* Setup the array object's numerical structures with appropriate - ufuncs in d*/ - PyArray_SetNumericOps(d); - - PyDict_SetItemString(d, "conj", s); - PyDict_SetItemString(d, "mod", s2); - - return RETVAL; - - err: - /* Check for errors */ - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load umath module."); - } - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/core/src/umath/umathmodule_onefile.c b/numpy-1.6.2/numpy/core/src/umath/umathmodule_onefile.c deleted file mode 100644 index 722f74eec5..0000000000 --- a/numpy-1.6.2/numpy/core/src/umath/umathmodule_onefile.c +++ /dev/null @@ -1,4 +0,0 @@ -#include "loops.c" - -#include "ufunc_object.c" -#include "umathmodule.c" diff --git a/numpy-1.6.2/numpy/core/tests/data/astype_copy.pkl b/numpy-1.6.2/numpy/core/tests/data/astype_copy.pkl deleted file mode 100644 index 7397c97829..0000000000 Binary files a/numpy-1.6.2/numpy/core/tests/data/astype_copy.pkl and /dev/null differ diff --git a/numpy-1.6.2/numpy/core/tests/data/recarray_from_file.fits b/numpy-1.6.2/numpy/core/tests/data/recarray_from_file.fits deleted file mode 100644 index ca48ee8515..0000000000 Binary files a/numpy-1.6.2/numpy/core/tests/data/recarray_from_file.fits and /dev/null differ diff --git a/numpy-1.6.2/numpy/core/tests/test_api.py b/numpy-1.6.2/numpy/core/tests/test_api.py deleted file mode 100644 index 255ef45655..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_api.py +++ /dev/null @@ -1,28 +0,0 @@ -import sys - -import numpy as np -from numpy.testing import * -from numpy.testing.utils import WarningManager -import warnings - -def test_fastCopyAndTranspose(): - # 0D array - a = np.array(2) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - - # 1D array - a = np.array([3,2,7,0]) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - - # 2D array - a = np.arange(6).reshape(2,3) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_arrayprint.py b/numpy-1.6.2/numpy/core/tests/test_arrayprint.py deleted file mode 100644 index c7b69a09f5..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_arrayprint.py +++ /dev/null @@ -1,57 +0,0 @@ -import numpy as np -from numpy.testing import * - -class TestArrayRepr(object): - def test_nan_inf(self): - x = np.array([np.nan, np.inf]) - assert_equal(repr(x), 'array([ nan, inf])') - -class TestComplexArray(TestCase): - def test_str(self): - rvals = [0, 1, -1, np.inf, -np.inf, np.nan] - cvals = [complex(rp, ip) for rp in rvals for ip in rvals] - dtypes = [np.complex64, np.cdouble, np.clongdouble] - actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] - wanted = [ - '[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]', - '[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]', - '[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]', - '[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]', - '[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]', - '[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]', - '[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]', - '[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]', - '[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]', - '[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]', - '[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]', - '[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]', - '[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]', - '[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]', - '[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]', - '[-1.+infj]', '[-1.+infj]', '[-1.0+infj]', - '[-1.-infj]', '[-1.-infj]', '[-1.0-infj]', - '[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]', - '[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]', - '[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]', - '[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]', - '[ inf+infj]', '[ inf+infj]', '[ inf+infj]', - '[ inf-infj]', '[ inf-infj]', '[ inf-infj]', - '[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]', - '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]', - '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]', - '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]', - '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', - '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', - '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', - '[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]', - '[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]', - '[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]', - '[ nan+infj]', '[ nan+infj]', '[ nan+infj]', - '[ nan-infj]', '[ nan-infj]', '[ nan-infj]', - '[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]'] - - for res, val in zip(actual, wanted): - assert_(res == val) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_blasdot.py b/numpy-1.6.2/numpy/core/tests/test_blasdot.py deleted file mode 100644 index 73c3c4a05a..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_blasdot.py +++ /dev/null @@ -1,91 +0,0 @@ -import numpy as np -import sys -from numpy.core import zeros, float64 -from numpy.testing import dec, TestCase, assert_almost_equal, assert_, \ - assert_raises, assert_array_equal, assert_allclose, assert_equal -from numpy.core.multiarray import inner as inner_ - -DECPREC = 14 - -class TestInner(TestCase): - def test_vecself(self): - """Ticket 844.""" - # Inner product of a vector with itself segfaults or give meaningless - # result - a = zeros(shape = (1, 80), dtype = float64) - p = inner_(a, a) - assert_almost_equal(p, 0, decimal = DECPREC) - -try: - import numpy.core._dotblas as _dotblas -except ImportError: - _dotblas = None - -@dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas") -def test_blasdot_used(): - from numpy.core import dot, vdot, inner, alterdot, restoredot - assert_(dot is _dotblas.dot) - assert_(vdot is _dotblas.vdot) - assert_(inner is _dotblas.inner) - assert_(alterdot is _dotblas.alterdot) - assert_(restoredot is _dotblas.restoredot) - - -def test_dot_2args(): - from numpy.core import dot - - a = np.array([[1, 2], [3, 4]], dtype=float) - b = np.array([[1, 0], [1, 1]], dtype=float) - c = np.array([[3, 2], [7, 4]], dtype=float) - - d = dot(a, b) - assert_allclose(c, d) - -def test_dot_3args(): - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 32)) - for i in xrange(12): - np.dot(f,v,r) - assert_equal(sys.getrefcount(r), 2) - r2 = np.dot(f,v,out=None) - assert_array_equal(r2, r) - assert_(r is np.dot(f,v,out=r)) - - v = v[:,0].copy() # v.shape == (16,) - r = r[:,0].copy() # r.shape == (1024,) - r2 = np.dot(f,v) - assert_(r is np.dot(f,v,r)) - assert_array_equal(r2, r) - -def test_dot_3args_errors(): - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 31)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((1024,)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((32,)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((32, 1024)) - assert_raises(ValueError, np.dot, f, v, r) - assert_raises(ValueError, np.dot, f, v, r.T) - - r = np.empty((1024, 64)) - assert_raises(ValueError, np.dot, f, v, r[:,::2]) - assert_raises(ValueError, np.dot, f, v, r[:,:32]) - - r = np.empty((1024, 32), dtype=np.float32) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((1024, 32), dtype=int) - assert_raises(ValueError, np.dot, f, v, r) - - diff --git a/numpy-1.6.2/numpy/core/tests/test_datetime.py b/numpy-1.6.2/numpy/core/tests/test_datetime.py deleted file mode 100644 index 9cbfde1349..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_datetime.py +++ /dev/null @@ -1,79 +0,0 @@ -from os import path -import numpy as np -from numpy.testing import * - -class TestDateTime(TestCase): - def test_creation(self): - for unit in ['Y', 'M', 'W', 'B', 'D', - 'h', 'm', 's', 'ms', 'us', - 'ns', 'ps', 'fs', 'as']: - dt1 = np.dtype('M8[750%s]'%unit) - assert dt1 == np.dtype('datetime64[750%s]' % unit) - dt2 = np.dtype('m8[%s]' % unit) - assert dt2 == np.dtype('timedelta64[%s]' % unit) - - - def test_hours(self): - t = np.ones(3, dtype='M8[s]') - t[0] = 60*60*24 + 60*60*10 - assert t[0].item().hour == 10 - - def test_divisor_conversion_year(self): - assert np.dtype('M8[Y/4]') == np.dtype('M8[3M]') - assert np.dtype('M8[Y/13]') == np.dtype('M8[4W]') - assert np.dtype('M8[3Y/73]') == np.dtype('M8[15D]') - - def test_divisor_conversion_month(self): - assert np.dtype('M8[M/2]') == np.dtype('M8[2W]') - assert np.dtype('M8[M/15]') == np.dtype('M8[2D]') - assert np.dtype('M8[3M/40]') == np.dtype('M8[54h]') - - def test_divisor_conversion_week(self): - assert np.dtype('m8[W/5]') == np.dtype('m8[B]') - assert np.dtype('m8[W/7]') == np.dtype('m8[D]') - assert np.dtype('m8[3W/14]') == np.dtype('m8[36h]') - assert np.dtype('m8[5W/140]') == np.dtype('m8[360m]') - - def test_divisor_conversion_bday(self): - assert np.dtype('M8[B/12]') == np.dtype('M8[2h]') - assert np.dtype('M8[B/120]') == np.dtype('M8[12m]') - assert np.dtype('M8[3B/960]') == np.dtype('M8[270s]') - - def test_divisor_conversion_day(self): - assert np.dtype('M8[D/12]') == np.dtype('M8[2h]') - assert np.dtype('M8[D/120]') == np.dtype('M8[12m]') - assert np.dtype('M8[3D/960]') == np.dtype('M8[270s]') - - def test_divisor_conversion_hour(self): - assert np.dtype('m8[h/30]') == np.dtype('m8[2m]') - assert np.dtype('m8[3h/300]') == np.dtype('m8[36s]') - - def test_divisor_conversion_minute(self): - assert np.dtype('m8[m/30]') == np.dtype('m8[2s]') - assert np.dtype('m8[3m/300]') == np.dtype('m8[600ms]') - - def test_divisor_conversion_second(self): - assert np.dtype('m8[s/100]') == np.dtype('m8[10ms]') - assert np.dtype('m8[3s/10000]') == np.dtype('m8[300us]') - - def test_divisor_conversion_fs(self): - assert np.dtype('M8[fs/100]') == np.dtype('M8[10as]') - self.assertRaises(ValueError, lambda : np.dtype('M8[3fs/10000]')) - - def test_divisor_conversion_as(self): - self.assertRaises(ValueError, lambda : np.dtype('M8[as/10]')) - - def test_creation_overflow(self): - date = '1980-03-23 20:00:00' - timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64) - for unit in ['ms', 'us', 'ns']: - timesteps *= 1000 - x = np.array([date], dtype='datetime64[%s]' % unit) - - assert_equal(timesteps, x[0].astype(np.int64), - err_msg='Datetime conversion error for unit %s' % unit) - - assert_equal(x[0].astype(np.int64), 322689600000000000) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_defchararray.py b/numpy-1.6.2/numpy/core/tests/test_defchararray.py deleted file mode 100644 index 7ee6921baf..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_defchararray.py +++ /dev/null @@ -1,642 +0,0 @@ -from numpy.testing import * -from numpy.core import * -import numpy as np -import sys -from numpy.core.multiarray import _vec_string - -from numpy.compat import asbytes, asbytes_nested - -kw_unicode_true = {'unicode': True} # make 2to3 work properly -kw_unicode_false = {'unicode': False} - -class TestBasic(TestCase): - def test_from_object_array(self): - A = np.array([['abc', 2], - ['long ', '0123456789']], dtype='O') - B = np.char.array(A) - assert_equal(B.dtype.itemsize, 10) - assert_array_equal(B, asbytes_nested([['abc', '2'], - ['long', '0123456789']])) - - def test_from_object_array_unicode(self): - A = np.array([['abc', u'Sigma \u03a3'], - ['long ', '0123456789']], dtype='O') - self.assertRaises(ValueError, np.char.array, (A,)) - B = np.char.array(A, **kw_unicode_true) - assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) - assert_array_equal(B, [['abc', u'Sigma \u03a3'], - ['long', '0123456789']]) - - def test_from_string_array(self): - A = np.array(asbytes_nested([['abc', 'foo'], - ['long ', '0123456789']])) - assert_equal(A.dtype.type, np.string_) - B = np.char.array(A) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - B[0,0] = 'changed' - assert B[0,0] != A[0,0] - C = np.char.asarray(A) - assert_array_equal(C, A) - assert_equal(C.dtype, A.dtype) - C[0,0] = 'changed again' - assert C[0,0] != B[0,0] - assert C[0,0] == A[0,0] - - def test_from_unicode_array(self): - A = np.array([['abc', u'Sigma \u03a3'], - ['long ', '0123456789']]) - assert_equal(A.dtype.type, np.unicode_) - B = np.char.array(A) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - B = np.char.array(A, **kw_unicode_true) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - def fail(): - B = np.char.array(A, **kw_unicode_false) - self.assertRaises(UnicodeEncodeError, fail) - - def test_unicode_upconvert(self): - A = np.char.array(['abc']) - B = np.char.array([u'\u03a3']) - assert issubclass((A + B).dtype.type, np.unicode_) - - def test_from_string(self): - A = np.char.array(asbytes('abc')) - assert_equal(len(A), 1) - assert_equal(len(A[0]), 3) - assert issubclass(A.dtype.type, np.string_) - - def test_from_unicode(self): - A = np.char.array(u'\u03a3') - assert_equal(len(A), 1) - assert_equal(len(A[0]), 1) - assert_equal(A.itemsize, 4) - assert issubclass(A.dtype.type, np.unicode_) - -class TestVecString(TestCase): - def test_non_existent_method(self): - def fail(): - _vec_string('a', np.string_, 'bogus') - self.assertRaises(AttributeError, fail) - - def test_non_string_array(self): - def fail(): - _vec_string(1, np.string_, 'strip') - self.assertRaises(TypeError, fail) - - def test_invalid_args_tuple(self): - def fail(): - _vec_string(['a'], np.string_, 'strip', 1) - self.assertRaises(TypeError, fail) - - def test_invalid_type_descr(self): - def fail(): - _vec_string(['a'], 'BOGUS', 'strip') - self.assertRaises(TypeError, fail) - - def test_invalid_function_args(self): - def fail(): - _vec_string(['a'], np.string_, 'strip', (1,)) - self.assertRaises(TypeError, fail) - - def test_invalid_result_type(self): - def fail(): - _vec_string(['a'], np.integer, 'strip') - self.assertRaises(TypeError, fail) - - def test_broadcast_error(self): - def fail(): - _vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],)) - self.assertRaises(ValueError, fail) - - -class TestWhitespace(TestCase): - def setUp(self): - self.A = np.array([['abc ', '123 '], - ['789 ', 'xyz ']]).view(np.chararray) - self.B = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - - def test1(self): - assert all(self.A == self.B) - assert all(self.A >= self.B) - assert all(self.A <= self.B) - assert all(negative(self.A > self.B)) - assert all(negative(self.A < self.B)) - assert all(negative(self.A != self.B)) - -class TestChar(TestCase): - def setUp(self): - self.A = np.array('abc1', dtype='c').view(np.chararray) - - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tostring(), asbytes('AB')) - -class TestComparisons(TestCase): - def setUp(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - self.B = np.array([['efg', '123 '], - ['051', 'tuv']]).view(np.chararray) - - def test_not_equal(self): - assert_array_equal((self.A != self.B), [[True, False], [True, True]]) - - def test_equal(self): - assert_array_equal((self.A == self.B), [[False, True], [False, False]]) - - def test_greater_equal(self): - assert_array_equal((self.A >= self.B), [[False, True], [True, True]]) - - def test_less_equal(self): - assert_array_equal((self.A <= self.B), [[True, True], [False, False]]) - - def test_greater(self): - assert_array_equal((self.A > self.B), [[False, False], [True, True]]) - - def test_less(self): - assert_array_equal((self.A < self.B), [[True, False], [False, False]]) - -class TestComparisonsMixed1(TestComparisons): - """Ticket #1276""" - - def setUp(self): - TestComparisons.setUp(self) - self.B = np.array([['efg', '123 '], - ['051', 'tuv']], np.unicode_).view(np.chararray) - -class TestComparisonsMixed2(TestComparisons): - """Ticket #1276""" - - def setUp(self): - TestComparisons.setUp(self) - self.A = np.array([['abc', '123'], - ['789', 'xyz']], np.unicode_).view(np.chararray) - -class TestInformation(TestCase): - def setUp(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) - self.B = np.array([[u' \u03a3 ', u''], - [u'12345', u'MixedCase'], - [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) - - def test_len(self): - assert issubclass(np.char.str_len(self.A).dtype.type, np.integer) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) - - def test_count(self): - assert issubclass(self.A.count('').dtype.type, np.integer) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) - # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) - - def test_endswith(self): - assert issubclass(self.A.endswith('').dtype.type, np.bool_) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) - def fail(): - self.A.endswith('3', 'fdjk') - self.assertRaises(TypeError, fail) - - def test_find(self): - assert issubclass(self.A.find('a').dtype.type, np.integer) - assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]]) - assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]]) - - def test_index(self): - def fail(): - self.A.index('a') - self.assertRaises(ValueError, fail) - assert np.char.index('abcba', 'b') == 1 - assert issubclass(np.char.index('abcba', 'b').dtype.type, np.integer) - - def test_isalnum(self): - assert issubclass(self.A.isalnum().dtype.type, np.bool_) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) - - def test_isalpha(self): - assert issubclass(self.A.isalpha().dtype.type, np.bool_) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) - - def test_isdigit(self): - assert issubclass(self.A.isdigit().dtype.type, np.bool_) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) - - def test_islower(self): - assert issubclass(self.A.islower().dtype.type, np.bool_) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) - - def test_isspace(self): - assert issubclass(self.A.isspace().dtype.type, np.bool_) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) - - def test_istitle(self): - assert issubclass(self.A.istitle().dtype.type, np.bool_) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) - - def test_isupper(self): - assert issubclass(self.A.isupper().dtype.type, np.bool_) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) - - def test_rfind(self): - assert issubclass(self.A.rfind('a').dtype.type, np.integer) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) - - def test_rindex(self): - def fail(): - self.A.rindex('a') - self.assertRaises(ValueError, fail) - assert np.char.rindex('abcba', 'b') == 3 - assert issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer) - - def test_startswith(self): - assert issubclass(self.A.startswith('').dtype.type, np.bool_) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) - def fail(): - self.A.startswith('3', 'fdjk') - self.assertRaises(TypeError, fail) - - -class TestMethods(TestCase): - def setUp(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.chararray) - self.B = np.array([[u' \u03a3 ', u''], - [u'12345', u'MixedCase'], - [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) - - def test_capitalize(self): - assert issubclass(self.A.capitalize().dtype.type, np.string_) - assert_array_equal(self.A.capitalize(), asbytes_nested([ - [' abc ', ''], - ['12345', 'Mixedcase'], - ['123 \t 345 \0 ', 'Upper']])) - assert issubclass(self.B.capitalize().dtype.type, np.unicode_) - assert_array_equal(self.B.capitalize(), [ - [u' \u03c3 ', ''], - ['12345', 'Mixedcase'], - ['123 \t 345 \0 ', 'Upper']]) - - def test_center(self): - assert issubclass(self.A.center(10).dtype.type, np.string_) - widths = np.array([[10, 20]]) - C = self.A.center([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.center(20, asbytes('#')) - assert np.all(C.startswith(asbytes('#'))) - assert np.all(C.endswith(asbytes('#'))) - C = np.char.center(asbytes('FOO'), [[10, 20], [15, 8]]) - assert issubclass(C.dtype.type, np.string_) - assert_array_equal(C, asbytes_nested([ - [' FOO ', ' FOO '], - [' FOO ', ' FOO ']])) - - def test_decode(self): - if sys.version_info[0] >= 3: - A = np.char.array([asbytes('\\u03a3')]) - assert A.decode('unicode-escape')[0] == '\u03a3' - else: - A = np.char.array(['736563726574206d657373616765']) - assert A.decode('hex_codec')[0] == 'secret message' - - def test_encode(self): - B = self.B.encode('unicode_escape') - assert B[0][0] == asbytes(r' \u03a3 ') - - def test_expandtabs(self): - T = self.A.expandtabs() - assert T[2][0] == asbytes('123 345') - - def test_join(self): - if sys.version_info[0] >= 3: - # NOTE: list(b'123') == [49, 50, 51] - # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') - else: - A0 = self.A - - A = np.char.join([',', '#'], A0) - if sys.version_info[0] >= 3: - assert issubclass(A.dtype.type, np.unicode_) - else: - assert issubclass(A.dtype.type, np.string_) - assert_array_equal(np.char.join([',', '#'], A0), - [ - [' ,a,b,c, ', ''], - ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], - ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) - - def test_ljust(self): - assert issubclass(self.A.ljust(10).dtype.type, np.string_) - widths = np.array([[10, 20]]) - C = self.A.ljust([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.ljust(20, asbytes('#')) - assert_array_equal(C.startswith(asbytes('#')), [ - [False, True], [False, False], [False, False]]) - assert np.all(C.endswith(asbytes('#'))) - C = np.char.ljust(asbytes('FOO'), [[10, 20], [15, 8]]) - assert issubclass(C.dtype.type, np.string_) - assert_array_equal(C, asbytes_nested([ - ['FOO ', 'FOO '], - ['FOO ', 'FOO ']])) - - def test_lower(self): - assert issubclass(self.A.lower().dtype.type, np.string_) - assert_array_equal(self.A.lower(), asbytes_nested([ - [' abc ', ''], - ['12345', 'mixedcase'], - ['123 \t 345 \0 ', 'upper']])) - assert issubclass(self.B.lower().dtype.type, np.unicode_) - assert_array_equal(self.B.lower(), [ - [u' \u03c3 ', u''], - [u'12345', u'mixedcase'], - [u'123 \t 345 \0 ', u'upper']]) - - def test_lstrip(self): - assert issubclass(self.A.lstrip().dtype.type, np.string_) - assert_array_equal(self.A.lstrip(), asbytes_nested([ - ['abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']])) - assert_array_equal(self.A.lstrip(asbytes_nested(['1', 'M'])), - asbytes_nested([ - [' abc', ''], - ['2345', 'ixedCase'], - ['23 \t 345 \x00', 'UPPER']])) - assert issubclass(self.B.lstrip().dtype.type, np.unicode_) - assert_array_equal(self.B.lstrip(), [ - [u'\u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) - - def test_partition(self): - if sys.version_info >= (2, 5): - P = self.A.partition(asbytes_nested(['3', 'M'])) - assert issubclass(P.dtype.type, np.string_) - assert_array_equal(P, asbytes_nested([ - [(' abc ', '', ''), ('', '', '')], - [('12', '3', '45'), ('', 'M', 'ixedCase')], - [('12', '3', ' \t 345 \0 '), ('UPPER', '', '')]])) - - def test_replace(self): - R = self.A.replace(asbytes_nested(['3', 'a']), - asbytes_nested(['##########', '@'])) - assert issubclass(R.dtype.type, np.string_) - assert_array_equal(R, asbytes_nested([ - [' abc ', ''], - ['12##########45', 'MixedC@se'], - ['12########## \t ##########45 \x00', 'UPPER']])) - - if sys.version_info[0] < 3: - # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3 - R = self.A.replace(asbytes('a'), u'\u03a3') - assert issubclass(R.dtype.type, np.unicode_) - assert_array_equal(R, [ - [u' \u03a3bc ', ''], - ['12345', u'MixedC\u03a3se'], - ['123 \t 345 \x00', 'UPPER']]) - - def test_rjust(self): - assert issubclass(self.A.rjust(10).dtype.type, np.string_) - widths = np.array([[10, 20]]) - C = self.A.rjust([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.rjust(20, asbytes('#')) - assert np.all(C.startswith(asbytes('#'))) - assert_array_equal(C.endswith(asbytes('#')), - [[False, True], [False, False], [False, False]]) - C = np.char.rjust(asbytes('FOO'), [[10, 20], [15, 8]]) - assert issubclass(C.dtype.type, np.string_) - assert_array_equal(C, asbytes_nested([ - [' FOO', ' FOO'], - [' FOO', ' FOO']])) - - def test_rpartition(self): - if sys.version_info >= (2, 5): - P = self.A.rpartition(asbytes_nested(['3', 'M'])) - assert issubclass(P.dtype.type, np.string_) - assert_array_equal(P, asbytes_nested([ - [('', '', ' abc '), ('', '', '')], - [('12', '3', '45'), ('', 'M', 'ixedCase')], - [('123 \t ', '3', '45 \0 '), ('', '', 'UPPER')]])) - - def test_rsplit(self): - A = self.A.rsplit(asbytes('3')) - assert issubclass(A.dtype.type, np.object_) - assert_equal(A.tolist(), asbytes_nested([ - [[' abc '], ['']], - [['12', '45'], ['MixedCase']], - [['12', ' \t ', '45 \x00 '], ['UPPER']]])) - - def test_rstrip(self): - assert issubclass(self.A.rstrip().dtype.type, np.string_) - assert_array_equal(self.A.rstrip(), asbytes_nested([ - [' abc', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']])) - assert_array_equal(self.A.rstrip(asbytes_nested(['5', 'ER'])), - asbytes_nested([ - [' abc ', ''], - ['1234', 'MixedCase'], - ['123 \t 345 \x00', 'UPP']])) - assert issubclass(self.B.rstrip().dtype.type, np.unicode_) - assert_array_equal(self.B.rstrip(), [ - [u' \u03a3', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']]) - - def test_strip(self): - assert issubclass(self.A.strip().dtype.type, np.string_) - assert_array_equal(self.A.strip(), asbytes_nested([ - ['abc', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']])) - assert_array_equal(self.A.strip(asbytes_nested(['15', 'EReM'])), - asbytes_nested([ - [' abc ', ''], - ['234', 'ixedCas'], - ['23 \t 345 \x00', 'UPP']])) - assert issubclass(self.B.strip().dtype.type, np.unicode_) - assert_array_equal(self.B.strip(), [ - [u'\u03a3', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']]) - - def test_split(self): - A = self.A.split(asbytes('3')) - assert issubclass(A.dtype.type, np.object_) - assert_equal(A.tolist(), asbytes_nested([ - [[' abc '], ['']], - [['12', '45'], ['MixedCase']], - [['12', ' \t ', '45 \x00 '], ['UPPER']]])) - - def test_splitlines(self): - A = np.char.array(['abc\nfds\nwer']).splitlines() - assert issubclass(A.dtype.type, np.object_) - assert A.shape == (1,) - assert len(A[0]) == 3 - - def test_swapcase(self): - assert issubclass(self.A.swapcase().dtype.type, np.string_) - assert_array_equal(self.A.swapcase(), asbytes_nested([ - [' ABC ', ''], - ['12345', 'mIXEDcASE'], - ['123 \t 345 \0 ', 'upper']])) - assert issubclass(self.B.swapcase().dtype.type, np.unicode_) - assert_array_equal(self.B.swapcase(), [ - [u' \u03c3 ', u''], - [u'12345', u'mIXEDcASE'], - [u'123 \t 345 \0 ', u'upper']]) - - def test_title(self): - assert issubclass(self.A.title().dtype.type, np.string_) - assert_array_equal(self.A.title(), asbytes_nested([ - [' Abc ', ''], - ['12345', 'Mixedcase'], - ['123 \t 345 \0 ', 'Upper']])) - assert issubclass(self.B.title().dtype.type, np.unicode_) - assert_array_equal(self.B.title(), [ - [u' \u03a3 ', u''], - [u'12345', u'Mixedcase'], - [u'123 \t 345 \0 ', u'Upper']]) - - def test_upper(self): - assert issubclass(self.A.upper().dtype.type, np.string_) - assert_array_equal(self.A.upper(), asbytes_nested([ - [' ABC ', ''], - ['12345', 'MIXEDCASE'], - ['123 \t 345 \0 ', 'UPPER']])) - assert issubclass(self.B.upper().dtype.type, np.unicode_) - assert_array_equal(self.B.upper(), [ - [u' \u03a3 ', u''], - [u'12345', u'MIXEDCASE'], - [u'123 \t 345 \0 ', u'UPPER']]) - - def test_isnumeric(self): - def fail(): - self.A.isnumeric() - self.assertRaises(TypeError, fail) - assert issubclass(self.B.isnumeric().dtype.type, np.bool_) - assert_array_equal(self.B.isnumeric(), [ - [False, False], [True, False], [False, False]]) - - def test_isdecimal(self): - def fail(): - self.A.isdecimal() - self.assertRaises(TypeError, fail) - assert issubclass(self.B.isdecimal().dtype.type, np.bool_) - assert_array_equal(self.B.isdecimal(), [ - [False, False], [True, False], [False, False]]) - - -class TestOperations(TestCase): - def setUp(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.chararray) - - def test_add(self): - AB = np.array([['abcefg', '123456'], - ['789051', 'xyztuv']]).view(np.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert len((self.A + self.B)[0][0]) == 6 - - def test_radd(self): - QA = np.array([['qabc', 'q123'], - ['q789', 'qxyz']]).view(np.chararray) - assert_array_equal(QA, ('q' + self.A)) - - def test_mul(self): - A = self.A - for r in (2,3,5,7,197): - Ar = np.array([[A[0,0]*r, A[0,1]*r], - [A[1,0]*r, A[1,1]*r]]).view(np.chararray) - - assert_array_equal(Ar, (self.A * r)) - - for ob in [object(), 'qrs']: - try: - A * ob - except ValueError: - pass - else: - self.fail("chararray can only be multiplied by integers") - - def test_rmul(self): - A = self.A - for r in (2,3,5,7,197): - Ar = np.array([[A[0,0]*r, A[0,1]*r], - [A[1,0]*r, A[1,1]*r]]).view(np.chararray) - assert_array_equal(Ar, (r * self.A)) - - for ob in [object(), 'qrs']: - try: - ob * A - except ValueError: - pass - else: - self.fail("chararray can only be multiplied by integers") - - def test_mod(self): - """Ticket #856""" - F = np.array([['%d', '%f'],['%s','%r']]).view(np.chararray) - C = np.array([[3,7],[19,1]]) - FC = np.array([['3', '7.000000'], - ['19', '1']]).view(np.chararray) - assert_array_equal(FC, F % C) - - A = np.array([['%.3f','%d'],['%s','%r']]).view(np.chararray) - A1 = np.array([['1.000','1'],['1','1']]).view(np.chararray) - assert_array_equal(A1, (A % 1)) - - A2 = np.array([['1.000','2'],['3','4']]).view(np.chararray) - assert_array_equal(A2, (A % [[1,2],[3,4]])) - - def test_rmod(self): - assert ("%s" % self.A) == str(self.A) - assert ("%r" % self.A) == repr(self.A) - - for ob in [42, object()]: - try: - ob % self.A - except TypeError: - pass - else: - self.fail("chararray __rmod__ should fail with " \ - "non-string objects") - - -def test_empty_indexing(): - """Regression test for ticket 1948.""" - # Check that indexing a chararray with an empty list/array returns an - # empty chararray instead of a chararray with a single empty string in it. - s = np.chararray((4,)) - assert_(s[[]].size == 0) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_dtype.py b/numpy-1.6.2/numpy/core/tests/test_dtype.py deleted file mode 100644 index 1c0658f844..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_dtype.py +++ /dev/null @@ -1,173 +0,0 @@ -import numpy as np -from numpy.testing import * - -def assert_dtype_equal(a, b): - assert_equal(a, b) - assert_equal(hash(a), hash(b), - "two equivalent types do not hash to the same value !") - -def assert_dtype_not_equal(a, b): - assert_(a != b) - assert_(hash(a) != hash(b), - "two different types hash to the same value !") - -class TestBuiltin(TestCase): - def test_run(self): - """Only test hash runs at all.""" - for t in [np.int, np.float, np.complex, np.int32, np.str, np.object, - np.unicode]: - dt = np.dtype(t) - hash(dt) - - def test_dtype(self): - # Make sure equivalent byte order char hash the same (e.g. < and = on - # little endian) - for t in [np.int, np.float]: - dt = np.dtype(t) - dt2 = dt.newbyteorder("<") - dt3 = dt.newbyteorder(">") - if dt == dt2: - self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test") - assert_dtype_equal(dt, dt2) - else: - self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test") - assert_dtype_equal(dt, dt3) - - def test_equivalent_dtype_hashing(self): - # Make sure equivalent dtypes with different type num hash equal - uintp = np.dtype(np.uintp) - if uintp.itemsize == 4: - left = uintp - right = np.dtype(np.uint32) - else: - left = uintp - right = np.dtype(np.ulonglong) - self.assertTrue(left == right) - self.assertTrue(hash(left) == hash(right)) - -class TestRecord(TestCase): - def test_equivalent_record(self): - """Test whether equivalent record dtypes hash the same.""" - a = np.dtype([('yo', np.int)]) - b = np.dtype([('yo', np.int)]) - assert_dtype_equal(a, b) - - def test_different_names(self): - # In theory, they may hash the same (collision) ? - a = np.dtype([('yo', np.int)]) - b = np.dtype([('ye', np.int)]) - assert_dtype_not_equal(a, b) - - def test_different_titles(self): - # In theory, they may hash the same (collision) ? - a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], - 'titles': ['Red pixel', 'Blue pixel']}) - b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], - 'titles': ['RRed pixel', 'Blue pixel']}) - assert_dtype_not_equal(a, b) - - def test_not_lists(self): - """Test if an appropriate exception is raised when passing bad values to - the dtype constructor. - """ - self.assertRaises(TypeError, np.dtype, - dict(names=set(['A', 'B']), formats=['f8', 'i4'])) - self.assertRaises(TypeError, np.dtype, - dict(names=['A', 'B'], formats=set(['f8', 'i4']))) - -class TestSubarray(TestCase): - def test_single_subarray(self): - a = np.dtype((np.int, (2))) - b = np.dtype((np.int, (2,))) - assert_dtype_equal(a, b) - - assert_equal(type(a.subdtype[1]), tuple) - assert_equal(type(b.subdtype[1]), tuple) - - def test_equivalent_record(self): - """Test whether equivalent subarray dtypes hash the same.""" - a = np.dtype((np.int, (2, 3))) - b = np.dtype((np.int, (2, 3))) - assert_dtype_equal(a, b) - - def test_nonequivalent_record(self): - """Test whether different subarray dtypes hash differently.""" - a = np.dtype((np.int, (2, 3))) - b = np.dtype((np.int, (3, 2))) - assert_dtype_not_equal(a, b) - - a = np.dtype((np.int, (2, 3))) - b = np.dtype((np.int, (2, 2))) - assert_dtype_not_equal(a, b) - - a = np.dtype((np.int, (1, 2, 3))) - b = np.dtype((np.int, (1, 2))) - assert_dtype_not_equal(a, b) - - def test_shape_equal(self): - """Test some data types that are equal""" - assert_dtype_equal(np.dtype('f8'), np.dtype(('f8',tuple()))) - assert_dtype_equal(np.dtype('f8'), np.dtype(('f8',1))) - assert_dtype_equal(np.dtype((np.int,2)), np.dtype((np.int,(2,)))) - assert_dtype_equal(np.dtype(('..j", [0,0]) - assert_raises(ValueError, np.einsum, "j->.j...", [0,0]) - - # invalid subscript character - assert_raises(ValueError, np.einsum, "i%...", [0,0]) - assert_raises(ValueError, np.einsum, "...j$", [0,0]) - assert_raises(ValueError, np.einsum, "i->&", [0,0]) - - # output subscripts must appear in input - assert_raises(ValueError, np.einsum, "i->ij", [0,0]) - - # output subscripts may only be specified once - assert_raises(ValueError, np.einsum, "ij->jij", [[0,0],[0,0]]) - - # dimensions much match when being collapsed - assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2,3)) - assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2,3)) - - # broadcasting to new dimensions must be enabled explicitly - assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2,3)) - assert_raises(ValueError, np.einsum, "i->i", [[0,1],[0,1]], - out=np.arange(4).reshape(2,2)) - - def test_einsum_views(self): - # pass-through - a = np.arange(6).reshape(2,3) - - b = np.einsum("...", a) - assert_(b.base is a) - - b = np.einsum(a, [Ellipsis]) - assert_(b.base is a) - - b = np.einsum("ij", a) - assert_(b.base is a) - assert_equal(b, a) - - b = np.einsum(a, [0,1]) - assert_(b.base is a) - assert_equal(b, a) - - # transpose - a = np.arange(6).reshape(2,3) - - b = np.einsum("ji", a) - assert_(b.base is a) - assert_equal(b, a.T) - - b = np.einsum(a, [1,0]) - assert_(b.base is a) - assert_equal(b, a.T) - - # diagonal - a = np.arange(9).reshape(3,3) - - b = np.einsum("ii->i", a) - assert_(b.base is a) - assert_equal(b, [a[i,i] for i in range(3)]) - - b = np.einsum(a, [0,0], [0]) - assert_(b.base is a) - assert_equal(b, [a[i,i] for i in range(3)]) - - # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27).reshape(3,3,3) - - b = np.einsum("...ii->...i", a) - assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] for x in a]) - - b = np.einsum(a, [Ellipsis,0,0], [Ellipsis,0]) - assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] for x in a]) - - b = np.einsum("ii...->...i", a) - assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] - for x in a.transpose(2,0,1)]) - - b = np.einsum(a, [0,0,Ellipsis], [Ellipsis,0]) - assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] - for x in a.transpose(2,0,1)]) - - b = np.einsum("...ii->i...", a) - assert_(b.base is a) - assert_equal(b, [a[:,i,i] for i in range(3)]) - - b = np.einsum(a, [Ellipsis,0,0], [0,Ellipsis]) - assert_(b.base is a) - assert_equal(b, [a[:,i,i] for i in range(3)]) - - b = np.einsum("jii->ij", a) - assert_(b.base is a) - assert_equal(b, [a[:,i,i] for i in range(3)]) - - b = np.einsum(a, [1,0,0], [0,1]) - assert_(b.base is a) - assert_equal(b, [a[:,i,i] for i in range(3)]) - - b = np.einsum("ii...->i...", a) - assert_(b.base is a) - assert_equal(b, [a.transpose(2,0,1)[:,i,i] for i in range(3)]) - - b = np.einsum(a, [0,0,Ellipsis], [0,Ellipsis]) - assert_(b.base is a) - assert_equal(b, [a.transpose(2,0,1)[:,i,i] for i in range(3)]) - - b = np.einsum("i...i->i...", a) - assert_(b.base is a) - assert_equal(b, [a.transpose(1,0,2)[:,i,i] for i in range(3)]) - - b = np.einsum(a, [0,Ellipsis,0], [0,Ellipsis]) - assert_(b.base is a) - assert_equal(b, [a.transpose(1,0,2)[:,i,i] for i in range(3)]) - - b = np.einsum("i...i->...i", a) - assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] - for x in a.transpose(1,0,2)]) - - b = np.einsum(a, [0,Ellipsis,0], [Ellipsis,0]) - assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] - for x in a.transpose(1,0,2)]) - - # triple diagonal - a = np.arange(27).reshape(3,3,3) - - b = np.einsum("iii->i", a) - assert_(b.base is a) - assert_equal(b, [a[i,i,i] for i in range(3)]) - - b = np.einsum(a, [0,0,0], [0]) - assert_(b.base is a) - assert_equal(b, [a[i,i,i] for i in range(3)]) - - # swap axes - a = np.arange(24).reshape(2,3,4) - - b = np.einsum("ijk->jik", a) - assert_(b.base is a) - assert_equal(b, a.swapaxes(0,1)) - - b = np.einsum(a, [0,1,2], [1,0,2]) - assert_(b.base is a) - assert_equal(b, a.swapaxes(0,1)) - - def check_einsum_sums(self, dtype): - # Check various sums. Does many sizes to exercise unrolled loops. - - # sum(a, axis=-1) - for n in range(1,17): - a = np.arange(n, dtype=dtype) - assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [0], []), - np.sum(a, axis=-1).astype(dtype)) - - for n in range(1,17): - a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n) - assert_equal(np.einsum("...i->...", a), - np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [Ellipsis,0], [Ellipsis]), - np.sum(a, axis=-1).astype(dtype)) - - # sum(a, axis=0) - for n in range(1,17): - a = np.arange(2*n, dtype=dtype).reshape(2,n) - assert_equal(np.einsum("i...->...", a), - np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0,Ellipsis], [Ellipsis]), - np.sum(a, axis=0).astype(dtype)) - - for n in range(1,17): - a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n) - assert_equal(np.einsum("i...->...", a), - np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0,Ellipsis], [Ellipsis]), - np.sum(a, axis=0).astype(dtype)) - - # trace(a) - for n in range(1,17): - a = np.arange(n*n, dtype=dtype).reshape(n,n) - assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype)) - assert_equal(np.einsum(a, [0,0]), np.trace(a).astype(dtype)) - - # multiply(a, b) - for n in range(1,17): - a = np.arange(3*n, dtype=dtype).reshape(3,n) - b = np.arange(2*3*n, dtype=dtype).reshape(2,3,n) - assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b)) - assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]), - np.multiply(a, b)) - - # inner(a,b) - for n in range(1,17): - a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b)) - assert_equal(np.einsum(a, [Ellipsis,0], b, [Ellipsis,0]), - np.inner(a, b)) - - for n in range(1,11): - a = np.arange(n*3*2, dtype=dtype).reshape(n,3,2) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T) - assert_equal(np.einsum(a, [0,Ellipsis], b, [0,Ellipsis]), - np.inner(a.T, b.T).T) - - # outer(a,b) - for n in range(1,17): - a = np.arange(3, dtype=dtype)+1 - b = np.arange(n, dtype=dtype)+1 - assert_equal(np.einsum("i,j", a, b), np.outer(a, b)) - assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b)) - - # Suppress the complex warnings for the 'as f8' tests - ctx = WarningManager() - ctx.__enter__() - try: - warnings.simplefilter('ignore', np.ComplexWarning) - - # matvec(a,b) / a.dot(b) where a is matrix, b is vector - for n in range(1,17): - a = np.arange(4*n, dtype=dtype).reshape(4,n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("ij, j", a, b), np.dot(a, b)) - assert_equal(np.einsum(a, [0,1], b, [1]), np.dot(a, b)) - - c = np.arange(4, dtype=dtype) - np.einsum("ij,j", a, b, out=c, - dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a, [0,1], b, [1], out=c, - dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - - for n in range(1,17): - a = np.arange(4*n, dtype=dtype).reshape(4,n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T)) - assert_equal(np.einsum(a.T, [1,0], b.T, [1]), np.dot(b.T, a.T)) - - c = np.arange(4, dtype=dtype) - np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(b.T.astype('f8'), - a.T.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a.T, [1,0], b.T, [1], out=c, - dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(b.T.astype('f8'), - a.T.astype('f8')).astype(dtype)) - - # matmat(a,b) / a.dot(b) where a is matrix, b is matrix - for n in range(1,17): - if n < 8 or dtype != 'f2': - a = np.arange(4*n, dtype=dtype).reshape(4,n) - b = np.arange(n*6, dtype=dtype).reshape(n,6) - assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b)) - assert_equal(np.einsum(a, [0,1], b, [1,2]), np.dot(a, b)) - - for n in range(1,17): - a = np.arange(4*n, dtype=dtype).reshape(4,n) - b = np.arange(n*6, dtype=dtype).reshape(n,6) - c = np.arange(24, dtype=dtype).reshape(4,6) - np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a, [0,1], b, [1,2], out=c, - dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - - # matrix triple product (note this is not currently an efficient - # way to multiply 3 matrices) - a = np.arange(12, dtype=dtype).reshape(3,4) - b = np.arange(20, dtype=dtype).reshape(4,5) - c = np.arange(30, dtype=dtype).reshape(5,6) - if dtype != 'f2': - assert_equal(np.einsum("ij,jk,kl", a, b, c), - a.dot(b).dot(c)) - assert_equal(np.einsum(a, [0,1], b, [1,2], c, [2,3]), - a.dot(b).dot(c)) - - d = np.arange(18, dtype=dtype).reshape(3,6) - np.einsum("ij,jk,kl", a, b, c, out=d, - dtype='f8', casting='unsafe') - assert_equal(d, a.astype('f8').dot(b.astype('f8') - ).dot(c.astype('f8')).astype(dtype)) - d[...] = 0 - np.einsum(a, [0,1], b, [1,2], c, [2,3], out=d, - dtype='f8', casting='unsafe') - assert_equal(d, a.astype('f8').dot(b.astype('f8') - ).dot(c.astype('f8')).astype(dtype)) - - # tensordot(a, b) - if np.dtype(dtype) != np.dtype('f2'): - a = np.arange(60, dtype=dtype).reshape(3,4,5) - b = np.arange(24, dtype=dtype).reshape(4,3,2) - assert_equal(np.einsum("ijk, jil -> kl", a, b), - np.tensordot(a,b, axes=([1,0],[0,1]))) - assert_equal(np.einsum(a, [0,1,2], b, [1,0,3], [2,3]), - np.tensordot(a,b, axes=([1,0],[0,1]))) - - c = np.arange(10, dtype=dtype).reshape(5,2) - np.einsum("ijk,jil->kl", a, b, out=c, - dtype='f8', casting='unsafe') - assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1,0],[0,1])).astype(dtype)) - c[...] = 0 - np.einsum(a, [0,1,2], b, [1,0,3], [2,3], out=c, - dtype='f8', casting='unsafe') - assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1,0],[0,1])).astype(dtype)) - finally: - ctx.__exit__() - - # logical_and(logical_and(a!=0, b!=0), c!=0) - a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) - b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) - c = np.array([True,True,False,True,True,False,True,True]) - assert_equal(np.einsum("i,i,i->i", a, b, c, - dtype='?', casting='unsafe'), - np.logical_and(np.logical_and(a!=0, b!=0), c!=0)) - assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], - dtype='?', casting='unsafe'), - np.logical_and(np.logical_and(a!=0, b!=0), c!=0)) - - a = np.arange(9, dtype=dtype) - assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) - assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) - assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) - assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) - - # Various stride0, contiguous, and SSE aligned variants - for n in range(1,25): - a = np.arange(n, dtype=dtype) - if np.dtype(dtype).itemsize > 1: - assert_equal(np.einsum("...,...",a,a), np.multiply(a,a)) - assert_equal(np.einsum("i,i", a, a), np.dot(a,a)) - assert_equal(np.einsum("i,->i", a, 2), 2*a) - assert_equal(np.einsum(",i->i", 2, a), 2*a) - assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a)) - assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a)) - - assert_equal(np.einsum("...,...",a[1:],a[:-1]), - np.multiply(a[1:],a[:-1])) - assert_equal(np.einsum("i,i", a[1:], a[:-1]), - np.dot(a[1:],a[:-1])) - assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:]) - assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:]) - assert_equal(np.einsum("i,->", a[1:], 2), 2*np.sum(a[1:])) - assert_equal(np.einsum(",i->", 2, a[1:]), 2*np.sum(a[1:])) - - # An object array, summed as the data type - a = np.arange(9, dtype=object) - - b = np.einsum("i->", a, dtype=dtype, casting='unsafe') - assert_equal(b, np.sum(a)) - assert_equal(b.dtype, np.dtype(dtype)) - - b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') - assert_equal(b, np.sum(a)) - assert_equal(b.dtype, np.dtype(dtype)) - - # A case which was failing (ticket #1885) - p = np.arange(2) + 1 - q = np.arange(4).reshape(2,2) + 3 - r = np.arange(4).reshape(2,2) + 7 - assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) - - def test_einsum_sums_int8(self): - self.check_einsum_sums('i1'); - - def test_einsum_sums_uint8(self): - self.check_einsum_sums('u1'); - - def test_einsum_sums_int16(self): - self.check_einsum_sums('i2'); - - def test_einsum_sums_uint16(self): - self.check_einsum_sums('u2'); - - def test_einsum_sums_int32(self): - self.check_einsum_sums('i4'); - - def test_einsum_sums_uint32(self): - self.check_einsum_sums('u4'); - - def test_einsum_sums_int64(self): - self.check_einsum_sums('i8'); - - def test_einsum_sums_uint64(self): - self.check_einsum_sums('u8'); - - def test_einsum_sums_float16(self): - self.check_einsum_sums('f2'); - - def test_einsum_sums_float32(self): - self.check_einsum_sums('f4'); - - def test_einsum_sums_float64(self): - self.check_einsum_sums('f8'); - - def test_einsum_sums_longdouble(self): - self.check_einsum_sums(np.longdouble); - - def test_einsum_sums_cfloat64(self): - self.check_einsum_sums('c8'); - - def test_einsum_sums_cfloat128(self): - self.check_einsum_sums('c16'); - - def test_einsum_sums_clongdouble(self): - self.check_einsum_sums(np.clongdouble); - - def test_einsum_misc(self): - # This call used to crash because of a bug in - # PyArray_FillWithZero - a = np.ones((1,2)) - b = np.ones((2,2,1)) - assert_equal(np.einsum('ij...,j...->i...',a,b), [[[2],[2]]]) - - # The iterator had an issue with buffering this reduction - a = np.ones((5, 12, 4, 2, 3), np.int64) - b = np.ones((5, 12, 11), np.int64) - assert_equal(np.einsum('ijklm,ijn,ijn->',a,b,b), - np.einsum('ijklm,ijn->',a,b)) - - # Issue #2027, was a problem in the contiguous 3-argument - # inner loop implementation - a = np.arange(1, 3) - b = np.arange(1, 5).reshape(2, 2) - c = np.arange(1, 9).reshape(4, 2) - assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), - [[[1, 3], [3, 9], [5, 15], [7, 21]], - [[8, 16], [16, 32], [24, 48], [32, 64]]]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_errstate.py b/numpy-1.6.2/numpy/core/tests/test_errstate.py deleted file mode 100644 index 732bb3e93b..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_errstate.py +++ /dev/null @@ -1,57 +0,0 @@ -# The following exec statement (or something like it) is needed to -# prevent SyntaxError on Python < 2.5. Even though this is a test, -# SyntaxErrors are not acceptable; on Debian systems, they block -# byte-compilation during install and thus cause the package to fail -# to install. - -import sys -if sys.version_info[:2] >= (2, 5): - exec """ -from __future__ import with_statement -from numpy.core import * -from numpy.random import rand, randint -from numpy.testing import * - -class TestErrstate(TestCase): - def test_invalid(self): - with errstate(all='raise', under='ignore'): - a = -arange(3) - # This should work - with errstate(invalid='ignore'): - sqrt(a) - # While this should fail! - try: - sqrt(a) - except FloatingPointError: - pass - else: - self.fail("Did not raise an invalid error") - - def test_divide(self): - with errstate(all='raise', under='ignore'): - a = -arange(3) - # This should work - with errstate(divide='ignore'): - a // 0 - # While this should fail! - try: - a // 0 - except FloatingPointError: - pass - else: - self.fail("Did not raise divide by zero error") - - def test_errcall(self): - def foo(*args): - print(args) - olderrcall = geterrcall() - with errstate(call=foo): - assert(geterrcall() is foo), 'call is not foo' - with errstate(call=None): - assert(geterrcall() is None), 'call is not None' - assert(geterrcall() is olderrcall), 'call is not olderrcall' - -""" - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_function_base.py b/numpy-1.6.2/numpy/core/tests/test_function_base.py deleted file mode 100644 index 67ce8953f1..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_function_base.py +++ /dev/null @@ -1,37 +0,0 @@ - -from numpy.testing import * -from numpy import logspace, linspace - -class TestLogspace(TestCase): - def test_basic(self): - y = logspace(0,6) - assert(len(y)==50) - y = logspace(0,6,num=100) - assert(y[-1] == 10**6) - y = logspace(0,6,endpoint=0) - assert(y[-1] < 10**6) - y = logspace(0,6,num=7) - assert_array_equal(y,[1,10,100,1e3,1e4,1e5,1e6]) - -class TestLinspace(TestCase): - def test_basic(self): - y = linspace(0,10) - assert(len(y)==50) - y = linspace(2,10,num=100) - assert(y[-1] == 10) - y = linspace(2,10,endpoint=0) - assert(y[-1] < 10) - - def test_corner(self): - y = list(linspace(0,1,1)) - assert y == [0.0], y - y = list(linspace(0,1,2.5)) - assert y == [0.0, 1.0] - - def test_type(self): - t1 = linspace(0,1,0).dtype - t2 = linspace(0,1,1).dtype - t3 = linspace(0,1,2).dtype - assert_equal(t1, t2) - assert_equal(t2, t3) - diff --git a/numpy-1.6.2/numpy/core/tests/test_getlimits.py b/numpy-1.6.2/numpy/core/tests/test_getlimits.py deleted file mode 100644 index 569dc0cc62..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_getlimits.py +++ /dev/null @@ -1,64 +0,0 @@ -""" Test functions for limits module. -""" - -from numpy.testing import * - -from numpy.core import finfo, iinfo -from numpy import half, single, double, longdouble -import numpy as np - -################################################## - -class TestPythonFloat(TestCase): - def test_singleton(self): - ftype = finfo(float) - ftype2 = finfo(float) - assert_equal(id(ftype),id(ftype2)) - -class TestHalf(TestCase): - def test_singleton(self): - ftype = finfo(half) - ftype2 = finfo(half) - assert_equal(id(ftype),id(ftype2)) - -class TestSingle(TestCase): - def test_singleton(self): - ftype = finfo(single) - ftype2 = finfo(single) - assert_equal(id(ftype),id(ftype2)) - -class TestDouble(TestCase): - def test_singleton(self): - ftype = finfo(double) - ftype2 = finfo(double) - assert_equal(id(ftype),id(ftype2)) - -class TestLongdouble(TestCase): - def test_singleton(self,level=2): - ftype = finfo(longdouble) - ftype2 = finfo(longdouble) - assert_equal(id(ftype),id(ftype2)) - -class TestIinfo(TestCase): - def test_basic(self): - dts = zip(['i1', 'i2', 'i4', 'i8', - 'u1', 'u2', 'u4', 'u8'], - [np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64]) - for dt1, dt2 in dts: - assert_equal(iinfo(dt1).min, iinfo(dt2).min) - assert_equal(iinfo(dt1).max, iinfo(dt2).max) - self.assertRaises(ValueError, iinfo, 'f4') - - def test_unsigned_max(self): - types = np.sctypes['uint'] - for T in types: - assert_equal(iinfo(T).max, T(-1)) - - -def test_instances(): - iinfo(10) - finfo(3.0) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_half.py b/numpy-1.6.2/numpy/core/tests/test_half.py deleted file mode 100644 index 191ec28a05..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_half.py +++ /dev/null @@ -1,442 +0,0 @@ -import warnings -import numpy as np -from numpy import uint16, float16, float32, float64 -from numpy.testing import TestCase, run_module_suite, assert_, assert_equal - - -def assert_raises_fpe(strmatch, callable, *args, **kwargs): - try: - callable(*args, **kwargs) - except FloatingPointError, exc: - assert_(str(exc).find(strmatch) >= 0, - "Did not raise floating point %s error" % strmatch) - else: - assert_(False, - "Did not raise floating point %s error" % strmatch) - -class TestHalf(TestCase): - def setUp(self): - # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) - - # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( - (np.arange(0xfc00,0x7fff,-1, dtype=uint16), - np.arange(0x0000,0x7c01,1, dtype=uint16)) - ) - self.nonan_f16.dtype = float16 - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] - - def test_half_conversions(self): - """Checks that all 16-bit values survive conversion - to/from 32-bit and 64-bit float""" - # Because the underlying routines preserve the NaN bits, every - # value is preserved when converting to/from other floats. - - # Convert from float32 back to float16 - b = np.array(self.all_f32, dtype=float16) - assert_equal(self.all_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Convert from float64 back to float16 - b = np.array(self.all_f64, dtype=float16) - assert_equal(self.all_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Convert float16 to longdouble and back - # This doesn't necessarily preserve the extra NaN bits, - # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) - b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Check the range for which all integers can be represented - i_int = np.arange(-2048,2049) - i_f16 = np.array(i_int, dtype=float16) - j = np.array(i_f16, dtype=np.int) - assert_equal(i_int,j) - - def test_nans_infs(self): - oldsettings = np.seterr(all='ignore') - try: - # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) - assert_equal(np.spacing(float16(65504)), np.inf) - - # Check comparisons of all values with NaN - nan = float16(np.nan) - - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) - - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) - - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) - - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) - - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) - - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) - finally: - np.seterr(**oldsettings) - - - def test_half_values(self): - """Confirms a small number of known half values""" - a = np.array([1.0, -1.0, - 2.0, -2.0, - 0.0999755859375, 0.333251953125, # 1/10, 1/3 - 65504, -65504, # Maximum magnitude - 2.0**(-14), -2.0**(-14), # Minimum normal - 2.0**(-24), -2.0**(-24), # Minimum subnormal - 0, -1/1e1000, # Signed zeros - np.inf, -np.inf]) - b = np.array([0x3c00, 0xbc00, - 0x4000, 0xc000, - 0x2e66, 0x3555, - 0x7bff, 0xfbff, - 0x0400, 0x8400, - 0x0001, 0x8001, - 0x0000, 0x8000, - 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 - assert_equal(a, b) - - def test_half_rounding(self): - """Checks that rounding when converting to half is correct""" - a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal - 2.0**-25, # Underflows to zero (nearest even mode) - 2.0**-26, # Underflows to zero - 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) - 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) - 1.0+2.0**-12, # rounds to 1.0 - 65519, # rounds to 65504 - 65520], # rounds to inf - dtype=float64) - rounded = [2.0**-24, - 0.0, - 0.0, - 1.0+2.0**(-10), - 1.0, - 1.0, - 65504, - np.inf] - - # Check float64->float16 rounding - b = np.array(a, dtype=float16) - assert_equal(b, rounded) - - # Check float32->float16 rounding - a = np.array(a, dtype=float32) - b = np.array(a, dtype=float16) - assert_equal(b, rounded) - - def test_half_correctness(self): - """Take every finite float16, and check the casting functions with - a manual conversion.""" - - # Create an array of all finite float16s - a_f16 = self.finite_f16 - a_bits = a_f16.view(dtype=uint16) - - # Convert to 64-bit float manually - a_sgn = (-1.0)**((a_bits&0x8000) >> 15) - a_exp = np.array((a_bits&0x7c00) >> 10, dtype=np.int32) - 15 - a_man = (a_bits&0x03ff) * 2.0**(-10) - # Implicit bit of normalized floats - a_man[a_exp!=-15] += 1 - # Denormalized exponent is -14 - a_exp[a_exp==-15] = -14 - - a_manual = a_sgn * a_man * 2.0**a_exp - - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] - if len(a32_fail) != 0: - bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, - "First non-equal is half value %x -> %g != %g" % - (a[bad_index], - self.finite_f32[bad_index], - a_manual[bad_index])) - - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] - if len(a64_fail) != 0: - bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, - "First non-equal is half value %x -> %g != %g" % - (a[bad_index], - self.finite_f64[bad_index], - a_manual[bad_index])) - - def test_half_ordering(self): - """Make sure comparisons are working right""" - - # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() - - # 32-bit float copy - b = np.array(a, dtype=float32) - - # Should sort the same - a.sort() - b.sort() - assert_equal(a, b) - - # Comparisons should work - assert_((a[:-1] <= a[1:]).all()) - assert_(not (a[:-1] > a[1:]).any()) - assert_((a[1:] >= a[:-1]).all()) - assert_(not (a[1:] < a[:-1]).any()) - # All != except for +/-0 - assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) - assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) - - def test_half_funcs(self): - """Test the various ArrFuncs""" - - # fill - assert_equal(np.arange(10, dtype=float16), - np.arange(10, dtype=float32)) - - # fillwithscalar - a = np.zeros((5,), dtype=float16) - a.fill(1) - assert_equal(a, np.ones((5,), dtype=float16)) - - # nonzero and copyswap - a = np.array([0,0,-1,-1/1e20,0,2.0**-24, 7.629e-6], dtype=float16) - assert_equal(a.nonzero()[0], - [2,5,6]) - a = a.byteswap().newbyteorder() - assert_equal(a.nonzero()[0], - [2,5,6]) - - # dot - a = np.arange(0, 10, 0.5, dtype=float16) - b = np.ones((20,), dtype=float16) - assert_equal(np.dot(a,b), - 95) - - # argmax - a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) - assert_equal(a.argmax(), - 4) - a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) - assert_equal(a.argmax(), - 5) - - # getitem - a = np.arange(10, dtype=float16) - for i in range(10): - assert_equal(a.item(i),i) - - def test_spacing_nextafter(self): - """Test np.spacing and np.nextafter""" - # All non-negative finite #'s - a = np.arange(0x7c00, dtype=uint16) - hinf = np.array((np.inf,), dtype=float16) - a_f16 = a.view(dtype=float16) - - assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) - - assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) - assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) - assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) - - # switch to negatives - a |= 0x8000 - - assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) - assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) - - assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) - assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) - assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) - - - def test_half_ufuncs(self): - """Test the various ufuncs""" - - a = np.array([0,1,2,4,2], dtype=float16) - b = np.array([-2,5,1,4,3], dtype=float16) - c = np.array([0,-1,-np.inf,np.nan,6], dtype=float16) - - assert_equal(np.add(a,b), [-2,6,3,8,5]) - assert_equal(np.subtract(a,b), [2,-4,1,0,-1]) - assert_equal(np.multiply(a,b), [0,5,2,16,6]) - assert_equal(np.divide(a,b), [0,0.199951171875,2,1,0.66650390625]) - - assert_equal(np.equal(a,b), [False,False,False,True,False]) - assert_equal(np.not_equal(a,b), [True,True,True,False,True]) - assert_equal(np.less(a,b), [False,True,False,False,True]) - assert_equal(np.less_equal(a,b), [False,True,False,True,True]) - assert_equal(np.greater(a,b), [True,False,True,False,False]) - assert_equal(np.greater_equal(a,b), [True,False,True,True,False]) - assert_equal(np.logical_and(a,b), [False,True,True,True,True]) - assert_equal(np.logical_or(a,b), [True,True,True,True,True]) - assert_equal(np.logical_xor(a,b), [True,False,False,False,False]) - assert_equal(np.logical_not(a), [True,False,False,False,False]) - - assert_equal(np.isnan(c), [False,False,False,True,False]) - assert_equal(np.isinf(c), [False,False,True,False,False]) - assert_equal(np.isfinite(c), [True,True,False,False,True]) - assert_equal(np.signbit(b), [True,False,False,False,False]) - - assert_equal(np.copysign(b,a), [2,5,1,4,3]) - - assert_equal(np.maximum(a,b), [0,5,2,4,3]) - x = np.maximum(b,c) - assert_(np.isnan(x[3])) - x[3] = 0 - assert_equal(x, [0,5,1,0,6]) - assert_equal(np.minimum(a,b), [-2,1,1,4,2]) - x = np.minimum(b,c) - assert_(np.isnan(x[3])) - x[3] = 0 - assert_equal(x, [-2,-1,-np.inf,0,3]) - assert_equal(np.fmax(a,b), [0,5,2,4,3]) - assert_equal(np.fmax(b,c), [0,5,1,4,6]) - assert_equal(np.fmin(a,b), [-2,1,1,4,2]) - assert_equal(np.fmin(b,c), [-2,-1,-np.inf,4,3]) - - assert_equal(np.floor_divide(a,b), [0,0,2,1,0]) - assert_equal(np.remainder(a,b), [0,1,0,0,2]) - assert_equal(np.square(b), [4,25,1,16,9]) - assert_equal(np.reciprocal(b), [-0.5,0.199951171875,1,0.25,0.333251953125]) - assert_equal(np.ones_like(b), [1,1,1,1,1]) - assert_equal(np.conjugate(b), b) - assert_equal(np.absolute(b), [2,5,1,4,3]) - assert_equal(np.negative(b), [2,-5,-1,-4,-3]) - assert_equal(np.sign(b), [-1,1,1,1,1]) - assert_equal(np.modf(b), ([0,0,0,0,0],b)) - assert_equal(np.frexp(b), ([-0.5,0.625,0.5,0.5,0.75],[2,3,1,3,2])) - assert_equal(np.ldexp(b,[0,1,2,4,2]), [-2,10,4,64,12]) - - def test_half_coercion(self): - """Test that half gets coerced properly with the other types""" - a16 = np.array((1,),dtype=float16) - a32 = np.array((1,),dtype=float32) - b16 = float16(1) - b32 = float32(1) - - assert_equal(np.power(a16,2).dtype, float16) - assert_equal(np.power(a16,2.0).dtype, float16) - assert_equal(np.power(a16,b16).dtype, float16) - assert_equal(np.power(a16,b32).dtype, float16) - assert_equal(np.power(a16,a16).dtype, float16) - assert_equal(np.power(a16,a32).dtype, float32) - - assert_equal(np.power(b16,2).dtype, float64) - assert_equal(np.power(b16,2.0).dtype, float64) - assert_equal(np.power(b16,b16).dtype, float16) - assert_equal(np.power(b16,b32).dtype, float32) - assert_equal(np.power(b16,a16).dtype, float16) - assert_equal(np.power(b16,a32).dtype, float32) - - assert_equal(np.power(a32,a16).dtype, float32) - assert_equal(np.power(a32,b16).dtype, float32) - assert_equal(np.power(b32,a16).dtype, float16) - assert_equal(np.power(b32,b16).dtype, float32) - - def test_half_fpe(self): - """Test that half raises the correct underflows and overflows""" - oldsettings = np.seterr(all='raise') - try: - sx16 = np.array((1e-4,),dtype=float16) - bx16 = np.array((1e4,),dtype=float16) - sy16 = float16(1e-4) - by16 = float16(1e4) - - # Underflow errors - assert_raises_fpe('underflow', lambda a,b:a*b, sx16, sx16) - assert_raises_fpe('underflow', lambda a,b:a*b, sx16, sy16) - assert_raises_fpe('underflow', lambda a,b:a*b, sy16, sx16) - assert_raises_fpe('underflow', lambda a,b:a*b, sy16, sy16) - assert_raises_fpe('underflow', lambda a,b:a/b, sx16, bx16) - assert_raises_fpe('underflow', lambda a,b:a/b, sx16, by16) - assert_raises_fpe('underflow', lambda a,b:a/b, sy16, bx16) - assert_raises_fpe('underflow', lambda a,b:a/b, sy16, by16) - assert_raises_fpe('underflow', lambda a,b:a/b, - float16(2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a,b:a/b, - float16(-2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a,b:a/b, - float16(2.**-14+2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a,b:a/b, - float16(-2.**-14-2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a,b:a/b, - float16(2.**-14+2**-23), float16(4)) - - # Overflow errors - assert_raises_fpe('overflow', lambda a,b:a*b, bx16, bx16) - assert_raises_fpe('overflow', lambda a,b:a*b, bx16, by16) - assert_raises_fpe('overflow', lambda a,b:a*b, by16, bx16) - assert_raises_fpe('overflow', lambda a,b:a*b, by16, by16) - assert_raises_fpe('overflow', lambda a,b:a/b, bx16, sx16) - assert_raises_fpe('overflow', lambda a,b:a/b, bx16, sy16) - assert_raises_fpe('overflow', lambda a,b:a/b, by16, sx16) - assert_raises_fpe('overflow', lambda a,b:a/b, by16, sy16) - assert_raises_fpe('overflow', lambda a,b:a+b, - float16(65504), float16(17)) - assert_raises_fpe('overflow', lambda a,b:a-b, - float16(-65504), float16(17)) - assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) - assert_raises_fpe('overflow', np.spacing, float16(65504)) - - # Invalid value errors - assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) - assert_raises_fpe('invalid', np.spacing, float16(np.inf)) - assert_raises_fpe('invalid', np.spacing, float16(np.nan)) - assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0)) - assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0)) - assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan)) - - # These should not raise - float16(65472)+float16(32) - float16(2**-13)/float16(2) - float16(2**-14)/float16(2**10) - np.spacing(float16(-65504)) - np.nextafter(float16(65504), float16(-np.inf)) - np.nextafter(float16(-65504), float16(np.inf)) - float16(2**-14)/float16(2**10) - float16(-2**-14)/float16(2**10) - float16(2**-14+2**-23)/float16(2) - float16(-2**-14-2**-23)/float16(2) - finally: - np.seterr(**oldsettings) - - def test_half_array_interface(self): - """Test that half is compatible with __array_interface__""" - class Dummy: - pass - - a = np.ones((1,), dtype=float16) - b = Dummy() - b.__array_interface__ = a.__array_interface__ - c = np.array(b) - assert_(c.dtype == float16) - assert_equal(a, c) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_iterator.py b/numpy-1.6.2/numpy/core/tests/test_iterator.py deleted file mode 100644 index 2fbbd4bfa9..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_iterator.py +++ /dev/null @@ -1,2302 +0,0 @@ -import numpy as np -from numpy import array, arange, nditer, all -from numpy.compat import asbytes -from numpy.testing import * -import sys, warnings - -import warnings - -def iter_multi_index(i): - ret = [] - while not i.finished: - ret.append(i.multi_index) - i.iternext() - return ret - -def iter_indices(i): - ret = [] - while not i.finished: - ret.append(i.index) - i.iternext() - return ret - -def iter_iterindices(i): - ret = [] - while not i.finished: - ret.append(i.iterindex) - i.iternext() - return ret - -def test_iter_refcount(): - # Make sure the iterator doesn't leak - - # Basic - a = arange(6) - dt = np.dtype('f4').newbyteorder() - rc_a = sys.getrefcount(a) - rc_dt = sys.getrefcount(dt) - it = nditer(a, [], - [['readwrite','updateifcopy']], - casting='unsafe', - op_dtypes=[dt]) - assert_(not it.iterationneedsapi) - assert_(sys.getrefcount(a) > rc_a) - assert_(sys.getrefcount(dt) > rc_dt) - it = None - assert_equal(sys.getrefcount(a), rc_a) - assert_equal(sys.getrefcount(dt), rc_dt) - - # With a copy - a = arange(6, dtype='f4') - dt = np.dtype('f4') - rc_a = sys.getrefcount(a) - rc_dt = sys.getrefcount(dt) - it = nditer(a, [], - [['readwrite']], - op_dtypes=[dt]) - rc2_a = sys.getrefcount(a) - rc2_dt = sys.getrefcount(dt) - it2 = it.copy() - assert_(sys.getrefcount(a) > rc2_a) - assert_(sys.getrefcount(dt) > rc2_dt) - it = None - assert_equal(sys.getrefcount(a), rc2_a) - assert_equal(sys.getrefcount(dt), rc2_dt) - it2 = None - assert_equal(sys.getrefcount(a), rc_a) - assert_equal(sys.getrefcount(dt), rc_dt) - -def test_iter_best_order(): - # The iterator should always find the iteration order - # with increasing memory addresses - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, [], [['readonly']]) - assert_equal([x for x in i], a) - # Fortran-order - i = nditer(aview.T, [], [['readonly']]) - assert_equal([x for x in i], a) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), [], [['readonly']]) - assert_equal([x for x in i], a) - -def test_iter_c_order(): - # Test forcing C order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='C') - assert_equal([x for x in i], aview.ravel(order='C')) - # Fortran-order - i = nditer(aview.T, order='C') - assert_equal([x for x in i], aview.T.ravel(order='C')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), order='C') - assert_equal([x for x in i], - aview.swapaxes(0,1).ravel(order='C')) - -def test_iter_f_order(): - # Test forcing F order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='F') - assert_equal([x for x in i], aview.ravel(order='F')) - # Fortran-order - i = nditer(aview.T, order='F') - assert_equal([x for x in i], aview.T.ravel(order='F')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), order='F') - assert_equal([x for x in i], - aview.swapaxes(0,1).ravel(order='F')) - -def test_iter_c_or_f_order(): - # Test forcing any contiguous (C or F) order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='A') - assert_equal([x for x in i], aview.ravel(order='A')) - # Fortran-order - i = nditer(aview.T, order='A') - assert_equal([x for x in i], aview.T.ravel(order='A')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), order='A') - assert_equal([x for x in i], - aview.swapaxes(0,1).ravel(order='A')) - -def test_iter_best_order_multi_index_1d(): - # The multi-indices should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a,['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,),(1,),(2,),(3,)]) - # 1D reversed order - i = nditer(a[::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(3,),(2,),(1,),(0,)]) - -def test_iter_best_order_multi_index_2d(): - # The multi-indices should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2,3),['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2)]) - # 2D Fortran-order - i = nditer(a.reshape(2,3).copy(order='F'),['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,0),(1,0),(0,1),(1,1),(0,2),(1,2)]) - # 2D reversed C-order - i = nditer(a.reshape(2,3)[::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(1,0),(1,1),(1,2),(0,0),(0,1),(0,2)]) - i = nditer(a.reshape(2,3)[:,::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,2),(0,1),(0,0),(1,2),(1,1),(1,0)]) - i = nditer(a.reshape(2,3)[::-1,::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(1,2),(1,1),(1,0),(0,2),(0,1),(0,0)]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2,3).copy(order='F')[::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(1,0),(0,0),(1,1),(0,1),(1,2),(0,2)]) - i = nditer(a.reshape(2,3).copy(order='F')[:,::-1], - ['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,2),(1,2),(0,1),(1,1),(0,0),(1,0)]) - i = nditer(a.reshape(2,3).copy(order='F')[::-1,::-1], - ['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(1,2),(0,2),(1,1),(0,1),(1,0),(0,0)]) - -def test_iter_best_order_multi_index_3d(): - # The multi-indices should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2,3,2),['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), - [(0,0,0),(0,0,1),(0,1,0),(0,1,1),(0,2,0),(0,2,1), - (1,0,0),(1,0,1),(1,1,0),(1,1,1),(1,2,0),(1,2,1)]) - # 3D Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F'),['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), - [(0,0,0),(1,0,0),(0,1,0),(1,1,0),(0,2,0),(1,2,0), - (0,0,1),(1,0,1),(0,1,1),(1,1,1),(0,2,1),(1,2,1)]) - # 3D reversed C-order - i = nditer(a.reshape(2,3,2)[::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), - [(1,0,0),(1,0,1),(1,1,0),(1,1,1),(1,2,0),(1,2,1), - (0,0,0),(0,0,1),(0,1,0),(0,1,1),(0,2,0),(0,2,1)]) - i = nditer(a.reshape(2,3,2)[:,::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), - [(0,2,0),(0,2,1),(0,1,0),(0,1,1),(0,0,0),(0,0,1), - (1,2,0),(1,2,1),(1,1,0),(1,1,1),(1,0,0),(1,0,1)]) - i = nditer(a.reshape(2,3,2)[:,:,::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), - [(0,0,1),(0,0,0),(0,1,1),(0,1,0),(0,2,1),(0,2,0), - (1,0,1),(1,0,0),(1,1,1),(1,1,0),(1,2,1),(1,2,0)]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F')[::-1], - ['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), - [(1,0,0),(0,0,0),(1,1,0),(0,1,0),(1,2,0),(0,2,0), - (1,0,1),(0,0,1),(1,1,1),(0,1,1),(1,2,1),(0,2,1)]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,::-1], - ['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), - [(0,2,0),(1,2,0),(0,1,0),(1,1,0),(0,0,0),(1,0,0), - (0,2,1),(1,2,1),(0,1,1),(1,1,1),(0,0,1),(1,0,1)]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,:,::-1], - ['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), - [(0,0,1),(1,0,1),(0,1,1),(1,1,1),(0,2,1),(1,2,1), - (0,0,0),(1,0,0),(0,1,0),(1,1,0),(0,2,0),(1,2,0)]) - -def test_iter_best_order_c_index_1d(): - # The C index should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a,['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,1,2,3]) - # 1D reversed order - i = nditer(a[::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [3,2,1,0]) - -def test_iter_best_order_c_index_2d(): - # The C index should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2,3),['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,1,2,3,4,5]) - # 2D Fortran-order - i = nditer(a.reshape(2,3).copy(order='F'), - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,3,1,4,2,5]) - # 2D reversed C-order - i = nditer(a.reshape(2,3)[::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [3,4,5,0,1,2]) - i = nditer(a.reshape(2,3)[:,::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [2,1,0,5,4,3]) - i = nditer(a.reshape(2,3)[::-1,::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [5,4,3,2,1,0]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2,3).copy(order='F')[::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [3,0,4,1,5,2]) - i = nditer(a.reshape(2,3).copy(order='F')[:,::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [2,5,1,4,0,3]) - i = nditer(a.reshape(2,3).copy(order='F')[::-1,::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [5,2,4,1,3,0]) - -def test_iter_best_order_c_index_3d(): - # The C index should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2,3,2),['c_index'],[['readonly']]) - assert_equal(iter_indices(i), - [0,1,2,3,4,5,6,7,8,9,10,11]) - # 3D Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F'), - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), - [0,6,2,8,4,10,1,7,3,9,5,11]) - # 3D reversed C-order - i = nditer(a.reshape(2,3,2)[::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), - [6,7,8,9,10,11,0,1,2,3,4,5]) - i = nditer(a.reshape(2,3,2)[:,::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), - [4,5,2,3,0,1,10,11,8,9,6,7]) - i = nditer(a.reshape(2,3,2)[:,:,::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), - [1,0,3,2,5,4,7,6,9,8,11,10]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F')[::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), - [6,0,8,2,10,4,7,1,9,3,11,5]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), - [4,10,2,8,0,6,5,11,3,9,1,7]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,:,::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), - [1,7,3,9,5,11,0,6,2,8,4,10]) - -def test_iter_best_order_f_index_1d(): - # The Fortran index should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a,['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,1,2,3]) - # 1D reversed order - i = nditer(a[::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [3,2,1,0]) - -def test_iter_best_order_f_index_2d(): - # The Fortran index should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2,3),['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,2,4,1,3,5]) - # 2D Fortran-order - i = nditer(a.reshape(2,3).copy(order='F'), - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,1,2,3,4,5]) - # 2D reversed C-order - i = nditer(a.reshape(2,3)[::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [1,3,5,0,2,4]) - i = nditer(a.reshape(2,3)[:,::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [4,2,0,5,3,1]) - i = nditer(a.reshape(2,3)[::-1,::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [5,3,1,4,2,0]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2,3).copy(order='F')[::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [1,0,3,2,5,4]) - i = nditer(a.reshape(2,3).copy(order='F')[:,::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [4,5,2,3,0,1]) - i = nditer(a.reshape(2,3).copy(order='F')[::-1,::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [5,4,3,2,1,0]) - -def test_iter_best_order_f_index_3d(): - # The Fortran index should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2,3,2),['f_index'],[['readonly']]) - assert_equal(iter_indices(i), - [0,6,2,8,4,10,1,7,3,9,5,11]) - # 3D Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F'), - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), - [0,1,2,3,4,5,6,7,8,9,10,11]) - # 3D reversed C-order - i = nditer(a.reshape(2,3,2)[::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), - [1,7,3,9,5,11,0,6,2,8,4,10]) - i = nditer(a.reshape(2,3,2)[:,::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), - [4,10,2,8,0,6,5,11,3,9,1,7]) - i = nditer(a.reshape(2,3,2)[:,:,::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), - [6,0,8,2,10,4,7,1,9,3,11,5]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F')[::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), - [1,0,3,2,5,4,7,6,9,8,11,10]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), - [4,5,2,3,0,1,10,11,8,9,6,7]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,:,::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), - [6,7,8,9,10,11,0,1,2,3,4,5]) - -def test_iter_no_inner_full_coalesce(): - # Check no_inner iterators which coalesce into a single inner loop - - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: - size = np.prod(shape) - a = arange(size) - # Test each combination of forward and backwards indexing - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - # Fortran-order - i = nditer(aview.T, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), - ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - -def test_iter_no_inner_dim_coalescing(): - # Check no_inner iterators whose dimensions may not coalesce completely - - # Skipping the last element in a dimension prevents coalescing - # with the next-bigger dimension - a = arange(24).reshape(2,3,4)[:,:,:-1] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 2) - assert_equal(i[0].shape, (3,)) - a = arange(24).reshape(2,3,4)[:,:-1,:] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 2) - assert_equal(i[0].shape, (8,)) - a = arange(24).reshape(2,3,4)[:-1,:,:] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (12,)) - - # Even with lots of 1-sized dimensions, should still coalesce - a = arange(24).reshape(1,1,2,1,1,3,1,1,4,1,1) - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (24,)) - -def test_iter_dim_coalescing(): - # Check that the correct number of dimensions are coalesced - - # Tracking a multi-index disables coalescing - a = arange(24).reshape(2,3,4) - i = nditer(a, ['multi_index'], [['readonly']]) - assert_equal(i.ndim, 3) - - # A tracked index can allow coalescing if it's compatible with the array - a3d = arange(24).reshape(2,3,4) - i = nditer(a3d, ['c_index'], [['readonly']]) - assert_equal(i.ndim, 1) - i = nditer(a3d.swapaxes(0,1), ['c_index'], [['readonly']]) - assert_equal(i.ndim, 3) - i = nditer(a3d.T, ['c_index'], [['readonly']]) - assert_equal(i.ndim, 3) - i = nditer(a3d.T, ['f_index'], [['readonly']]) - assert_equal(i.ndim, 1) - i = nditer(a3d.T.swapaxes(0,1), ['f_index'], [['readonly']]) - assert_equal(i.ndim, 3) - - # When C or F order is forced, coalescing may still occur - a3d = arange(24).reshape(2,3,4) - i = nditer(a3d, order='C') - assert_equal(i.ndim, 1) - i = nditer(a3d.T, order='C') - assert_equal(i.ndim, 3) - i = nditer(a3d, order='F') - assert_equal(i.ndim, 3) - i = nditer(a3d.T, order='F') - assert_equal(i.ndim, 1) - i = nditer(a3d, order='A') - assert_equal(i.ndim, 1) - i = nditer(a3d.T, order='A') - assert_equal(i.ndim, 1) - -def test_iter_broadcasting(): - # Standard NumPy broadcasting rules - - # 1D with scalar - i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (6,)) - - # 2D with scalar - i = nditer([arange(6).reshape(2,3), np.int32(2)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2,3)) - # 2D with 1D - i = nditer([arange(6).reshape(2,3), arange(3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2,3)) - i = nditer([arange(2).reshape(2,1), arange(3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2,3)) - # 2D with 2D - i = nditer([arange(2).reshape(2,1), arange(3).reshape(1,3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2,3)) - - # 3D with scalar - i = nditer([np.int32(2), arange(24).reshape(4,2,3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - # 3D with 1D - i = nditer([arange(3), arange(24).reshape(4,2,3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(3), arange(8).reshape(4,2,1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - # 3D with 2D - i = nditer([arange(6).reshape(2,3), arange(24).reshape(4,2,3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(2).reshape(2,1), arange(24).reshape(4,2,3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(3).reshape(1,3), arange(8).reshape(4,2,1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - # 3D with 3D - i = nditer([arange(2).reshape(1,2,1), arange(3).reshape(1,1,3), - arange(4).reshape(4,1,1)], - ['multi_index'], [['readonly']]*3) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(6).reshape(1,2,3), arange(4).reshape(4,1,1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(24).reshape(4,2,3), arange(12).reshape(4,1,3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - -def test_iter_itershape(): - # Check that allocated outputs work with a specified shape - a = np.arange(6, dtype='i2').reshape(2,3) - i = nditer([a, None], [], [['readonly'], ['writeonly','allocate']], - op_axes=[[0,1,None], None], - itershape=(-1,-1,4)) - assert_equal(i.operands[1].shape, (2,3,4)) - assert_equal(i.operands[1].strides, (24,8,2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly','allocate']], - op_axes=[[0,1,None], None], - itershape=(-1,-1,4)) - assert_equal(i.operands[1].shape, (3,2,4)) - assert_equal(i.operands[1].strides, (8,24,2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly','allocate']], - order='F', - op_axes=[[0,1,None], None], - itershape=(-1,-1,4)) - assert_equal(i.operands[1].shape, (3,2,4)) - assert_equal(i.operands[1].strides, (2,6,12)) - - # If we specify 1 in the itershape, it shouldn't allow broadcasting - # of that dimension to a bigger value - assert_raises(ValueError, nditer, [a, None], [], - [['readonly'], ['writeonly','allocate']], - op_axes=[[0,1,None], None], - itershape=(-1,1,4)) - -def test_iter_broadcasting_errors(): - # Check that errors are thrown for bad broadcasting shapes - - # 1D with 1D - assert_raises(ValueError, nditer, [arange(2), arange(3)], - [], [['readonly']]*2) - # 2D with 1D - assert_raises(ValueError, nditer, - [arange(6).reshape(2,3), arange(2)], - [], [['readonly']]*2) - # 2D with 2D - assert_raises(ValueError, nditer, - [arange(6).reshape(2,3), arange(9).reshape(3,3)], - [], [['readonly']]*2) - assert_raises(ValueError, nditer, - [arange(6).reshape(2,3), arange(4).reshape(2,2)], - [], [['readonly']]*2) - # 3D with 3D - assert_raises(ValueError, nditer, - [arange(36).reshape(3,3,4), arange(24).reshape(2,3,4)], - [], [['readonly']]*2) - assert_raises(ValueError, nditer, - [arange(8).reshape(2,4,1), arange(24).reshape(2,3,4)], - [], [['readonly']]*2) - - # Verify that the error message mentions the right shapes - try: - i = nditer([arange(2).reshape(1,2,1), - arange(3).reshape(1,3), - arange(6).reshape(2,3)], - [], - [['readonly'], ['readonly'], ['writeonly','no_broadcast']]) - assert_(False, 'Should have raised a broadcast error') - except ValueError, e: - msg = str(e) - # The message should contain the shape of the 3rd operand - assert_(msg.find('(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) - # The message should contain the broadcast shape - assert_(msg.find('(1,2,3)') >= 0, - 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) - - try: - i = nditer([arange(6).reshape(2,3), arange(2)], [], - [['readonly'],['readonly']], - op_axes=[[0,1], [0,np.newaxis]], - itershape=(4,3)) - assert_(False, 'Should have raised a broadcast error') - except ValueError, e: - msg = str(e) - # The message should contain "shape->remappedshape" for each operand - assert_(msg.find('(2,3)->(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) - assert_(msg.find('(2)->(2,newaxis)') >= 0, - ('Message "%s" doesn\'t contain remapped operand shape' + - '(2)->(2,newaxis)') % msg) - # The message should contain the itershape parameter - assert_(msg.find('(4,3)') >= 0, - 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) - -def test_iter_flags_errors(): - # Check that bad combinations of flags produce errors - - a = arange(6) - - # Not enough operands - assert_raises(ValueError, nditer, [], [], []) - # Too many operands - assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) - # Bad global flag - assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) - # Bad op flag - assert_raises(ValueError, nditer, [a], [], [['readonly','bad flag']]) - # Bad order parameter - assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') - # Bad casting parameter - assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') - # op_flags must match ops - assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) - # Cannot track both a C and an F index - assert_raises(ValueError, nditer, a, - ['c_index','f_index'], [['readonly']]) - # Inner iteration and multi-indices/indices are incompatible - assert_raises(ValueError, nditer, a, - ['external_loop','multi_index'], [['readonly']]) - assert_raises(ValueError, nditer, a, - ['external_loop','c_index'], [['readonly']]) - assert_raises(ValueError, nditer, a, - ['external_loop','f_index'], [['readonly']]) - # Must specify exactly one of readwrite/readonly/writeonly per operand - assert_raises(ValueError, nditer, a, [], [[]]) - assert_raises(ValueError, nditer, a, [], [['readonly','writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readonly','readwrite']]) - assert_raises(ValueError, nditer, a, [], [['writeonly','readwrite']]) - assert_raises(ValueError, nditer, a, - [], [['readonly','writeonly','readwrite']]) - # Python scalars are always readonly - assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) - assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) - # Array scalars are always readonly - assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) - assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) - # Check readonly array - a.flags.writeable = False - assert_raises(ValueError, nditer, a, [], [['writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readwrite']]) - a.flags.writeable = True - # Multi-indices available only with the multi_index flag - i = nditer(arange(6), [], [['readonly']]) - assert_raises(ValueError, lambda i:i.multi_index, i) - # Index available only with an index flag - assert_raises(ValueError, lambda i:i.index, i) - # GotoCoords and GotoIndex incompatible with buffering or no_inner - def assign_multi_index(i): - i.multi_index = (0,) - def assign_index(i): - i.index = 0 - def assign_iterindex(i): - i.iterindex = 0; - def assign_iterrange(i): - i.iterrange = (0,1); - i = nditer(arange(6), ['external_loop']) - assert_raises(ValueError, assign_multi_index, i) - assert_raises(ValueError, assign_index, i) - assert_raises(ValueError, assign_iterindex, i) - assert_raises(ValueError, assign_iterrange, i) - i = nditer(arange(6), ['buffered']) - assert_raises(ValueError, assign_multi_index, i) - assert_raises(ValueError, assign_index, i) - assert_raises(ValueError, assign_iterrange, i) - # Can't iterate if size is zero - assert_raises(ValueError, nditer, np.array([])) - -def test_iter_slice(): - a, b, c = np.arange(3), np.arange(3), np.arange(3.) - i = nditer([a,b,c], [], ['readwrite']) - i[0:2] = (3,3) - assert_equal(a, [3,1,2]) - assert_equal(b, [3,1,2]) - assert_equal(c, [0,1,2]) - i[1] = 12 - assert_equal(i[0:2], [3,12]) - -def test_iter_nbo_align_contig(): - # Check that byte order, alignment, and contig changes work - - # Byte order change by requesting a specific dtype - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - i = nditer(au, [], [['readwrite','updateifcopy']], - casting='equiv', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) - assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) - assert_equal(i.operands[0], a) - i.operands[0][:] = 2 - i = None - assert_equal(au, [2]*6) - - # Byte order change by requesting NBO - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - i = nditer(au, [], [['readwrite','updateifcopy','nbo']], casting='equiv') - assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) - assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) - assert_equal(i.operands[0], a) - i.operands[0][:] = 2 - i = None - assert_equal(au, [2]*6) - - # Unaligned input - a = np.zeros((6*4+1,), dtype='i1')[1:] - a.dtype = 'f4' - a[:] = np.arange(6, dtype='f4') - assert_(not a.flags.aligned) - # Without 'aligned', shouldn't copy - i = nditer(a, [], [['readonly']]) - assert_(not i.operands[0].flags.aligned) - assert_equal(i.operands[0], a); - # With 'aligned', should make a copy - i = nditer(a, [], [['readwrite','updateifcopy','aligned']]) - assert_(i.operands[0].flags.aligned) - assert_equal(i.operands[0], a); - i.operands[0][:] = 3 - i = None - assert_equal(a, [3]*6) - - # Discontiguous input - a = arange(12) - # If it is contiguous, shouldn't copy - i = nditer(a[:6], [], [['readonly']]) - assert_(i.operands[0].flags.contiguous) - assert_equal(i.operands[0], a[:6]); - # If it isn't contiguous, should buffer - i = nditer(a[::2], ['buffered','external_loop'], - [['readonly','contig']], - buffersize=10) - assert_(i[0].flags.contiguous) - assert_equal(i[0], a[::2]) - -def test_iter_array_cast(): - # Check that arrays are cast as requested - - # No cast 'f4' -> 'f4' - a = np.arange(6, dtype='f4').reshape(2,3) - i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - - # Byte-order cast ' '>f4' - a = np.arange(6, dtype='f4')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('>f4')) - - # Safe case 'f4' -> 'f8' - a = np.arange(24, dtype='f4').reshape(2,3,4).swapaxes(1,2) - i = nditer(a, [], [['readonly','copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f8')) - # The memory layout of the temporary should match a (a is (48,4,16)) - assert_equal(i.operands[0].strides, (96,8,32)) - a = a[::-1,:,::-1] - i = nditer(a, [], [['readonly','copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f8')) - assert_equal(i.operands[0].strides, (-96,8,-32)) - - # Same-kind cast 'f8' -> 'f4' -> 'f8' - a = np.arange(24, dtype='f8').reshape(2,3,4).T - i = nditer(a, [], - [['readwrite','updateifcopy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - assert_equal(i.operands[0].strides, (4, 16, 48)) - # Check that UPDATEIFCOPY is activated - i.operands[0][2,1,1] = -12.5 - assert_(a[2,1,1] != -12.5) - i = None - assert_equal(a[2,1,1], -12.5) - - # Unsafe cast 'f4' -> 'i4' - a = np.arange(6, dtype='i4')[::-2] - i = nditer(a, [], - [['writeonly','updateifcopy']], - casting='unsafe', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - assert_equal(i.operands[0].strides, (-4,)) - i.operands[0][:] = 1 - i = None - assert_equal(a, [1,1,1]) - -def test_iter_array_cast_errors(): - # Check that invalid casts are caught - - # Need to enable copying for casts to occur - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readonly']], op_dtypes=[np.dtype('f8')]) - # Also need to allow casting for casts to occur - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readonly','copy']], casting='no', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readonly','copy']], casting='equiv', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2,dtype='f8'), [], - [['writeonly','updateifcopy']], - casting='no', - op_dtypes=[np.dtype('f4')]) - assert_raises(TypeError, nditer, arange(2,dtype='f8'), [], - [['writeonly','updateifcopy']], - casting='equiv', - op_dtypes=[np.dtype('f4')]) - # ' '>f4' should not work with casting='no' - assert_raises(TypeError, nditer, arange(2,dtype='f4')]) - # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readwrite','updateifcopy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2,dtype='f8'), [], - [['readwrite','updateifcopy']], - casting='safe', - op_dtypes=[np.dtype('f4')]) - # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readonly','copy']], - casting='same_kind', - op_dtypes=[np.dtype('i4')]) - assert_raises(TypeError, nditer, arange(2,dtype='i4'), [], - [['writeonly','updateifcopy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - -def test_iter_scalar_cast(): - # Check that scalars are cast as requested - - # No cast 'f4' -> 'f4' - i = nditer(np.float32(2.5), [], [['readonly']], - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.value.dtype, np.dtype('f4')) - assert_equal(i.value, 2.5) - # Safe cast 'f4' -> 'f8' - i = nditer(np.float32(2.5), [], - [['readonly','copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.dtypes[0], np.dtype('f8')) - assert_equal(i.value.dtype, np.dtype('f8')) - assert_equal(i.value, 2.5) - # Same-kind cast 'f8' -> 'f4' - i = nditer(np.float64(2.5), [], - [['readonly','copy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.value.dtype, np.dtype('f4')) - assert_equal(i.value, 2.5) - # Unsafe cast 'f8' -> 'i4' - i = nditer(np.float64(3.0), [], - [['readonly','copy']], - casting='unsafe', - op_dtypes=[np.dtype('i4')]) - assert_equal(i.dtypes[0], np.dtype('i4')) - assert_equal(i.value.dtype, np.dtype('i4')) - assert_equal(i.value, 3) - # Readonly scalars may be cast even without setting COPY or BUFFERED - i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) - assert_equal(i[0].dtype, np.dtype('f8')) - assert_equal(i[0], 3.) - -def test_iter_scalar_cast_errors(): - # Check that invalid casts are caught - - # Need to allow copying/buffering for write casts of scalars to occur - assert_raises(TypeError, nditer, np.float32(2), [], - [['readwrite']], op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, 2.5, [], - [['readwrite']], op_dtypes=[np.dtype('f4')]) - # 'f8' -> 'f4' isn't a safe cast if the value would overflow - assert_raises(TypeError, nditer, np.float64(1e60), [], - [['readonly']], - casting='safe', - op_dtypes=[np.dtype('f4')]) - # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, np.float32(2), [], - [['readonly']], - casting='same_kind', - op_dtypes=[np.dtype('i4')]) - -def test_iter_object_arrays_basic(): - # Check that object arrays work - - obj = {'a':3,'b':'d'} - a = np.array([[1,2,3], None, obj, None], dtype='O') - rc = sys.getrefcount(obj) - - # Need to allow references for object arrays - assert_raises(TypeError, nditer, a) - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a, ['refs_ok'], ['readonly']) - vals = [x[()] for x in i] - assert_equal(np.array(vals, dtype='O'), a) - vals, i, x = [None]*3 - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a.reshape(2,2).T, ['refs_ok','buffered'], - ['readonly'], order='C') - assert_(i.iterationneedsapi) - vals = [x[()] for x in i] - assert_equal(np.array(vals, dtype='O'), a.reshape(2,2).ravel(order='F')) - vals, i, x = [None]*3 - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a.reshape(2,2).T, ['refs_ok','buffered'], - ['readwrite'], order='C') - for x in i: - x[...] = None - vals, i, x = [None]*3 - assert_equal(sys.getrefcount(obj), rc-1) - assert_equal(a, np.array([None]*4, dtype='O')) - -def test_iter_object_arrays_conversions(): - # Conversions to/from objects - a = np.arange(6, dtype='O') - i = nditer(a, ['refs_ok','buffered'], ['readwrite'], - casting='unsafe', op_dtypes='i4') - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - a = np.arange(6, dtype='i4') - i = nditer(a, ['refs_ok','buffered'], ['readwrite'], - casting='unsafe', op_dtypes='O') - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - # Non-contiguous object array - a = np.zeros((6,), dtype=[('p','i1'),('a','O')]) - a = a['a'] - a[:] = np.arange(6) - i = nditer(a, ['refs_ok','buffered'], ['readwrite'], - casting='unsafe', op_dtypes='i4') - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - #Non-contiguous value array - a = np.zeros((6,), dtype=[('p','i1'),('a','i4')]) - a = a['a'] - a[:] = np.arange(6) + 98172488 - i = nditer(a, ['refs_ok','buffered'], ['readwrite'], - casting='unsafe', op_dtypes='O') - ob = i[0][()] - rc = sys.getrefcount(ob) - for x in i: - x[...] += 1 - assert_equal(sys.getrefcount(ob), rc-1) - assert_equal(a, np.arange(6)+98172489) - -def test_iter_common_dtype(): - # Check that the iterator finds a common data type correctly - - i = nditer([array([3],dtype='f4'),array([0],dtype='f8')], - ['common_dtype'], - [['readonly','copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('f8')); - assert_equal(i.dtypes[1], np.dtype('f8')); - i = nditer([array([3],dtype='i4'),array([0],dtype='f4')], - ['common_dtype'], - [['readonly','copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('f8')); - assert_equal(i.dtypes[1], np.dtype('f8')); - i = nditer([array([3],dtype='f4'),array(0,dtype='f8')], - ['common_dtype'], - [['readonly','copy']]*2, - casting='same_kind') - assert_equal(i.dtypes[0], np.dtype('f4')); - assert_equal(i.dtypes[1], np.dtype('f4')); - i = nditer([array([3],dtype='u4'),array(0,dtype='i4')], - ['common_dtype'], - [['readonly','copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('u4')); - assert_equal(i.dtypes[1], np.dtype('u4')); - i = nditer([array([3],dtype='u4'),array(-12,dtype='i4')], - ['common_dtype'], - [['readonly','copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('i8')); - assert_equal(i.dtypes[1], np.dtype('i8')); - i = nditer([array([3],dtype='u4'),array(-12,dtype='i4'), - array([2j],dtype='c8'),array([9],dtype='f8')], - ['common_dtype'], - [['readonly','copy']]*4, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('c16')); - assert_equal(i.dtypes[1], np.dtype('c16')); - assert_equal(i.dtypes[2], np.dtype('c16')); - assert_equal(i.dtypes[3], np.dtype('c16')); - assert_equal(i.value, (3,-12,2j,9)) - - # When allocating outputs, other outputs aren't factored in - i = nditer([array([3],dtype='i4'),None,array([2j],dtype='c16')], [], - [['readonly','copy'], - ['writeonly','allocate'], - ['writeonly']], - casting='safe') - assert_equal(i.dtypes[0], np.dtype('i4')); - assert_equal(i.dtypes[1], np.dtype('i4')); - assert_equal(i.dtypes[2], np.dtype('c16')); - # But, if common data types are requested, they are - i = nditer([array([3],dtype='i4'),None,array([2j],dtype='c16')], - ['common_dtype'], - [['readonly','copy'], - ['writeonly','allocate'], - ['writeonly']], - casting='safe') - assert_equal(i.dtypes[0], np.dtype('c16')); - assert_equal(i.dtypes[1], np.dtype('c16')); - assert_equal(i.dtypes[2], np.dtype('c16')); - -def test_iter_op_axes(): - # Check that custom axes work - - # Reverse the axes - a = arange(6).reshape(2,3) - i = nditer([a,a.T], [], [['readonly']]*2, op_axes=[[0,1],[1,0]]) - assert_(all([x==y for (x,y) in i])) - a = arange(24).reshape(2,3,4) - i = nditer([a.T,a], [], [['readonly']]*2, op_axes=[[2,1,0],None]) - assert_(all([x==y for (x,y) in i])) - - # Broadcast 1D to any dimension - a = arange(1,31).reshape(2,3,5) - b = arange(1,3) - i = nditer([a,b], [], [['readonly']]*2, op_axes=[None,[0,-1,-1]]) - assert_equal([x*y for (x,y) in i], (a*b.reshape(2,1,1)).ravel()) - b = arange(1,4) - i = nditer([a,b], [], [['readonly']]*2, op_axes=[None,[-1,0,-1]]) - assert_equal([x*y for (x,y) in i], (a*b.reshape(1,3,1)).ravel()) - b = arange(1,6) - i = nditer([a,b], [], [['readonly']]*2, - op_axes=[None,[np.newaxis,np.newaxis,0]]) - assert_equal([x*y for (x,y) in i], (a*b.reshape(1,1,5)).ravel()) - - # Inner product-style broadcasting - a = arange(24).reshape(2,3,4) - b = arange(40).reshape(5,2,4) - i = nditer([a,b], ['multi_index'], [['readonly']]*2, - op_axes=[[0,1,-1,-1],[-1,-1,0,1]]) - assert_equal(i.shape, (2,3,5,2)) - - # Matrix product-style broadcasting - a = arange(12).reshape(3,4) - b = arange(20).reshape(4,5) - i = nditer([a,b], ['multi_index'], [['readonly']]*2, - op_axes=[[0,-1],[-1,1]]) - assert_equal(i.shape, (3,5)) - -def test_iter_op_axes_errors(): - # Check that custom axes throws errors for bad inputs - - # Wrong number of items in op_axes - a = arange(6).reshape(2,3) - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0],[1],[0]]) - # Out of bounds items in op_axes - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[2,1],[0,1]]) - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,1],[2,-1]]) - # Duplicate items in op_axes - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,0],[0,1]]) - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,1],[1,1]]) - - # Different sized arrays in op_axes - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,1],[0,1,0]]) - - # Non-broadcastable dimensions in the result - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,1],[1,0]]) - -def test_iter_copy(): - # Check that copying the iterator works correctly - a = arange(24).reshape(2,3,4) - - # Simple iterator - i = nditer(a) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterindex = 3 - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - # Buffered iterator - i = nditer(a, ['buffered','ranged'], order='F', buffersize=3) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterindex = 3 - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterrange = (3,9) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterrange = (2,18) - i.next(); i.next() - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - # Casting iterator - i = nditer(a, ['buffered'], order='F', casting='unsafe', - op_dtypes='f8', buffersize=5) - j = i.copy() - i = None - assert_equal([x[()] for x in j], a.ravel(order='F')) - - a = arange(24, dtype='cast->swap - - a = np.arange(10, dtype='f4').newbyteorder().byteswap() - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], - casting='same_kind', - op_dtypes=[np.dtype('f8').newbyteorder()], - buffersize=3) - for v in i: - v[...] *= 2 - - assert_equal(a, 2*np.arange(10, dtype='f4')) - - try: - warnings.simplefilter("ignore", np.ComplexWarning) - - a = np.arange(10, dtype='f8').newbyteorder().byteswap() - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], - casting='unsafe', - op_dtypes=[np.dtype('c8').newbyteorder()], - buffersize=3) - for v in i: - v[...] *= 2 - - assert_equal(a, 2*np.arange(10, dtype='f8')) - finally: - warnings.simplefilter("default", np.ComplexWarning) - -def test_iter_buffered_cast_byteswapped_complex(): - # Test that buffering can handle a cast which requires swap->cast->copy - - a = np.arange(10, dtype='c8').newbyteorder().byteswap() - a += 2j - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16')], - buffersize=3) - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) - - a = np.arange(10, dtype='c8') - a += 2j - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16').newbyteorder()], - buffersize=3) - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) - - a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() - a += 2j - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16')], - buffersize=3) - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) - - a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], - casting='same_kind', - op_dtypes=[np.dtype('f4')], - buffersize=7) - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) - -def test_iter_buffered_cast_structured_type(): - # Tests buffering of structured types - - # simple -> struct type (duplicates the value) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2,3)), ('d', 'O')] - a = np.arange(3, dtype='f4') + 0.5 - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt) - vals = [np.array(x) for x in i] - assert_equal(vals[0]['a'], 0.5) - assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) - assert_equal(vals[0]['d'], 0.5) - assert_equal(vals[1]['a'], 1.5) - assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) - assert_equal(vals[1]['d'], 1.5) - assert_equal(vals[0].dtype, np.dtype(sdt)) - - # object -> struct type - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2,3)), ('d', 'O')] - a = np.zeros((3,), dtype='O') - a[0] = (0.5,0.5,[[0.5,0.5,0.5],[0.5,0.5,0.5]],0.5) - a[1] = (1.5,1.5,[[1.5,1.5,1.5],[1.5,1.5,1.5]],1.5) - a[2] = (2.5,2.5,[[2.5,2.5,2.5],[2.5,2.5,2.5]],2.5) - rc = sys.getrefcount(a[0]) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt) - vals = [x.copy() for x in i] - assert_equal(vals[0]['a'], 0.5) - assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) - assert_equal(vals[0]['d'], 0.5) - assert_equal(vals[1]['a'], 1.5) - assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) - assert_equal(vals[1]['d'], 1.5) - assert_equal(vals[0].dtype, np.dtype(sdt)) - vals, i, x = [None]*3 - assert_equal(sys.getrefcount(a[0]), rc) - - # struct type -> simple (takes the first value) - sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - a = np.array([(5.5,7,'test'),(8,10,11)], dtype=sdt) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes='i4') - assert_equal([x[()] for x in i], [5, 8]) - - # struct type -> struct type (field-wise copy) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] - a = np.array([(1,2,3),(4,5,6)], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - assert_equal([np.array(x) for x in i], - [np.array((3,1,2), dtype=sdt2), - np.array((6,4,5), dtype=sdt2)]) - - # struct type -> struct type (field gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1,2,3),(4,5,6)], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2,1), dtype=sdt2), - np.array((5,4), dtype=sdt2)]) - assert_equal(a, np.array([(5,2,None),(8,5,None)], dtype=sdt1)) - - # struct type -> struct type (structured field gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'),('b','i4')])] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1,2,(0,9)),(4,5,(20,21))], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2,1), dtype=sdt2), - np.array((5,4), dtype=sdt2)]) - assert_equal(a, np.array([(5,2,(0,0)),(8,5,(0,0))], dtype=sdt1)) - - # struct type -> struct type (structured field w/ ref gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'),('b','O')])] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1,2,(0,9)),(4,5,(20,21))], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2,1), dtype=sdt2), - np.array((5,4), dtype=sdt2)]) - assert_equal(a, np.array([(5,2,(0,None)),(8,5,(0,None))], dtype=sdt1)) - - # struct type -> struct type back (structured field w/ ref gets discarded) - sdt1 = [('b', 'O'), ('a', 'f8')] - sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'),('b','O')])] - a = np.array([(1,2),(4,5)], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - assert_equal(x['d'], np.array((0, None), dtype=[('a','i2'),('b','O')])) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2,1,(0,None)), dtype=sdt2), - np.array((5,4,(0,None)), dtype=sdt2)]) - assert_equal(a, np.array([(1,4),(4,7)], dtype=sdt1)) - -def test_iter_buffered_cast_subarray(): - # Tests buffering of subarrays - - # one element -> many (copies it to all) - sdt1 = [('a', 'f4')] - sdt2 = [('a', 'f8', (3,2,2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - for x, count in zip(i, range(6)): - assert_(np.all(x['a'] == count)) - - # one element -> many -> back (copies it to all) - sdt1 = [('a', 'O', (1,1))] - sdt2 = [('a', 'O', (3,2,2))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:,0,0] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_(np.all(x['a'] == count)) - x['a'][0] += 2 - count += 1 - assert_equal(a['a'], np.arange(6).reshape(6,1,1)+2) - - # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'O', (3,2,2))] - sdt2 = [('a', 'O', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:,0,0,0] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - x['a'] += 2 - count += 1 - assert_equal(a['a'], np.arange(6).reshape(6,1,1,1)*np.ones((1,3,2,2))+2) - - # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'f8', (3,2,2))] - sdt2 = [('a', 'O', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:,0,0,0] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - count += 1 - - # many -> one element (copies just element 0) - sdt1 = [('a', 'O', (3,2,2))] - sdt2 = [('a', 'f4', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:,0,0,0] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - count += 1 - - # many -> matching shape (straightforward copy) - sdt1 = [('a', 'O', (3,2,2))] - sdt2 = [('a', 'f4', (3,2,2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*3*2*2).reshape(6,3,2,2) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], a[count]['a']) - count += 1 - - # vector -> smaller vector (truncates) - sdt1 = [('a', 'f8', (6,))] - sdt2 = [('a', 'f4', (2,))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*6).reshape(6,6) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], a[count]['a'][:2]) - count += 1 - - # vector -> bigger vector (pads with zeros) - sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (6,))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6,2) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2], a[count]['a']) - assert_equal(x['a'][2:], [0,0,0,0]) - count += 1 - - # vector -> matrix (broadcasts) - sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (2,2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6,2) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][0], a[count]['a']) - assert_equal(x['a'][1], a[count]['a']) - count += 1 - - # vector -> matrix (broadcasts and zero-pads) - sdt1 = [('a', 'f8', (2,1))] - sdt2 = [('a', 'f4', (3,2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6,2,1) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2,0], a[count]['a'][:,0]) - assert_equal(x['a'][:2,1], a[count]['a'][:,0]) - assert_equal(x['a'][2,:], [0,0]) - count += 1 - - # matrix -> matrix (truncates and zero-pads) - sdt1 = [('a', 'f8', (2,3))] - sdt2 = [('a', 'f4', (3,2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2*3).reshape(6,2,3) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2,0], a[count]['a'][:,0]) - assert_equal(x['a'][:2,1], a[count]['a'][:,1]) - assert_equal(x['a'][2,:], [0,0]) - count += 1 - -def test_iter_buffering_badwriteback(): - # Writing back from a buffer cannot combine elements - - # a needs write buffering, but had a broadcast dimension - a = np.arange(6).reshape(2,3,1) - b = np.arange(12).reshape(2,3,2) - assert_raises(ValueError,nditer,[a,b], - ['buffered','external_loop'], - [['readwrite'],['writeonly']], - order='C') - - # But if a is readonly, it's fine - i = nditer([a,b],['buffered','external_loop'], - [['readonly'],['writeonly']], - order='C') - - # If a has just one element, it's fine too (constant 0 stride, a reduction) - a = np.arange(1).reshape(1,1,1) - i = nditer([a,b],['buffered','external_loop','reduce_ok'], - [['readwrite'],['writeonly']], - order='C') - - # check that it fails on other dimensions too - a = np.arange(6).reshape(1,3,2) - assert_raises(ValueError,nditer,[a,b], - ['buffered','external_loop'], - [['readwrite'],['writeonly']], - order='C') - a = np.arange(4).reshape(2,1,2) - assert_raises(ValueError,nditer,[a,b], - ['buffered','external_loop'], - [['readwrite'],['writeonly']], - order='C') - -def test_iter_buffering_string(): - # Safe casting disallows shrinking strings - a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) - assert_equal(a.dtype, np.dtype('S4')); - assert_raises(TypeError,nditer,a,['buffered'],['readonly'], - op_dtypes='S2') - i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') - assert_equal(i[0], asbytes('abc')) - assert_equal(i[0].dtype, np.dtype('S6')) - - a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode) - assert_equal(a.dtype, np.dtype('U4')); - assert_raises(TypeError,nditer,a,['buffered'],['readonly'], - op_dtypes='U2') - i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') - assert_equal(i[0], u'abc') - assert_equal(i[0].dtype, np.dtype('U6')) - -def test_iter_buffering_growinner(): - # Test that the inner loop grows when no buffering is needed - a = np.arange(30) - i = nditer(a, ['buffered','growinner','external_loop'], - buffersize=5) - # Should end up with just one inner loop here - assert_equal(i[0].size, a.size) - -def test_iter_no_broadcast(): - # Test that the no_broadcast flag works - a = np.arange(24).reshape(2,3,4) - b = np.arange(6).reshape(2,3,1) - c = np.arange(12).reshape(3,4) - - i = nditer([a,b,c], [], - [['readonly','no_broadcast'],['readonly'],['readonly']]) - assert_raises(ValueError, nditer, [a,b,c], [], - [['readonly'],['readonly','no_broadcast'],['readonly']]) - assert_raises(ValueError, nditer, [a,b,c], [], - [['readonly'],['readonly'],['readonly','no_broadcast']]) - -def test_iter_nested_iters_basic(): - # Test nested iteration basic usage - a = arange(12).reshape(2,3,2) - - i, j = np.nested_iters(a, [[0],[1,2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,1,2,3,4,5],[6,7,8,9,10,11]]) - - i, j = np.nested_iters(a, [[0,1],[2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]]) - - i, j = np.nested_iters(a, [[0,2],[1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]]) - -def test_iter_nested_iters_reorder(): - # Test nested iteration basic usage - a = arange(12).reshape(2,3,2) - - # In 'K' order (default), it gets reordered - i, j = np.nested_iters(a, [[0],[2,1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,1,2,3,4,5],[6,7,8,9,10,11]]) - - i, j = np.nested_iters(a, [[1,0],[2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]]) - - i, j = np.nested_iters(a, [[2,0],[1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]]) - - # In 'C' order, it doesn't - i, j = np.nested_iters(a, [[0],[2,1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,2,4,1,3,5],[6,8,10,7,9,11]]) - - i, j = np.nested_iters(a, [[1,0],[2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,1],[6,7],[2,3],[8,9],[4,5],[10,11]]) - - i, j = np.nested_iters(a, [[2,0],[1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,2,4],[6,8,10],[1,3,5],[7,9,11]]) - -def test_iter_nested_iters_flip_axes(): - # Test nested iteration with negative axes - a = arange(12).reshape(2,3,2)[::-1,::-1,::-1] - - # In 'K' order (default), the axes all get flipped - i, j = np.nested_iters(a, [[0],[1,2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,1,2,3,4,5],[6,7,8,9,10,11]]) - - i, j = np.nested_iters(a, [[0,1],[2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]]) - - i, j = np.nested_iters(a, [[0,2],[1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]]) - - # In 'C' order, flipping axes is disabled - i, j = np.nested_iters(a, [[0],[1,2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11,10,9,8,7,6],[5,4,3,2,1,0]]) - - i, j = np.nested_iters(a, [[0,1],[2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11,10],[9,8],[7,6],[5,4],[3,2],[1,0]]) - - i, j = np.nested_iters(a, [[0,2],[1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11,9,7],[10,8,6],[5,3,1],[4,2,0]]) - -def test_iter_nested_iters_broadcast(): - # Test nested iteration with broadcasting - a = arange(2).reshape(2,1) - b = arange(3).reshape(1,3) - - i, j = np.nested_iters([a,b], [[0],[1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0,0],[0,1],[0,2]],[[1,0],[1,1],[1,2]]]) - - i, j = np.nested_iters([a,b], [[1],[0]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0,0],[1,0]],[[0,1],[1,1]],[[0,2],[1,2]]]) - -def test_iter_nested_iters_dtype_copy(): - # Test nested iteration with a copy to change dtype - - # copy - a = arange(6, dtype='i4').reshape(2,3) - i, j = np.nested_iters(a, [[0],[1]], - op_flags=['readonly','copy'], - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0,1,2],[3,4,5]]) - vals = None - - # updateifcopy - a = arange(6, dtype='f4').reshape(2,3) - i, j = np.nested_iters(a, [[0],[1]], - op_flags=['readwrite','updateifcopy'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0,1,2],[3,4,5]]) - i, j, x, y = (None,)*4 # force the updateifcopy - assert_equal(a, [[1,2,3],[4,5,6]]) - -def test_iter_nested_iters_dtype_buffered(): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2,3) - i, j = np.nested_iters(a, [[0],[1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1,2,3],[4,5,6]]) - -def test_iter_reduction_error(): - - a = np.arange(6) - assert_raises(ValueError, nditer, [a,None], [], - [['readonly'], ['readwrite','allocate']], - op_axes=[[0],[-1]]) - - a = np.arange(6).reshape(2,3) - assert_raises(ValueError, nditer, [a,None], ['external_loop'], - [['readonly'], ['readwrite','allocate']], - op_axes=[[0,1],[-1,-1]]) - -def test_iter_reduction(): - # Test doing reductions with the iterator - - a = np.arange(6) - i = nditer([a,None], ['reduce_ok'], - [['readonly'], ['readwrite','allocate']], - op_axes=[[0],[-1]]) - # Need to initialize the output operand to the addition unit - i.operands[1][...] = 0 - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(i.operands[1].ndim, 0) - assert_equal(i.operands[1], np.sum(a)) - - a = np.arange(6).reshape(2,3) - i = nditer([a,None], ['reduce_ok','external_loop'], - [['readonly'], ['readwrite','allocate']], - op_axes=[[0,1],[-1,-1]]) - # Need to initialize the output operand to the addition unit - i.operands[1][...] = 0 - # Reduction shape/strides for the output - assert_equal(i[1].shape, (6,)) - assert_equal(i[1].strides, (0,)) - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(i.operands[1].ndim, 0) - assert_equal(i.operands[1], np.sum(a)) - - # This is a tricky reduction case for the buffering double loop - # to handle - a = np.ones((2,3,5)) - it1 = nditer([a,None], ['reduce_ok','external_loop'], - [['readonly'], ['readwrite','allocate']], - op_axes=[None,[0,-1,1]]) - it2 = nditer([a,None], ['reduce_ok','external_loop', - 'buffered','delay_bufalloc'], - [['readonly'], ['readwrite','allocate']], - op_axes=[None,[0,-1,1]], buffersize=10) - it1.operands[1].fill(0) - it2.operands[1].fill(0) - it2.reset() - for x in it1: - x[1][...] += x[0] - for x in it2: - x[1][...] += x[0] - assert_equal(it1.operands[1], it2.operands[1]) - assert_equal(it2.operands[1].sum(), a.size) - -def test_iter_buffering_reduction(): - # Test doing buffered reductions with the iterator - - a = np.arange(6) - b = np.array(0., dtype='f8').byteswap().newbyteorder() - i = nditer([a,b], ['reduce_ok', 'buffered'], - [['readonly'], ['readwrite','nbo']], - op_axes=[[0],[-1]]) - assert_equal(i[1].dtype, np.dtype('f8')) - assert_(i[1].dtype != b.dtype) - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(b, np.sum(a)) - - a = np.arange(6).reshape(2,3) - b = np.array([0,0], dtype='f8').byteswap().newbyteorder() - i = nditer([a,b], ['reduce_ok','external_loop', 'buffered'], - [['readonly'], ['readwrite','nbo']], - op_axes=[[0,1],[0,-1]]) - # Reduction shape/strides for the output - assert_equal(i[1].shape, (3,)) - assert_equal(i[1].strides, (0,)) - # Do the reduction - for x, y in i: - y[...] += x - assert_equal(b, np.sum(a, axis=1)) - - # Iterator inner double loop was wrong on this one - p = np.arange(2) + 1 - it = np.nditer([p,None], - ['delay_bufalloc','reduce_ok','buffered','external_loop'], - [['readonly'],['readwrite','allocate']], - op_axes=[[-1,0],[-1,-1]], - itershape=(2,2)) - it.operands[1].fill(0) - it.reset() - assert_equal(it[0], [1,2,1,2]) - -def test_iter_buffering_reduction_reuse_reduce_loops(): - # There was a bug triggering reuse of the reduce loop inappropriately, - # which caused processing to happen in unnecessarily small chunks - # and overran the buffer. - - a = np.zeros((2,7)) - b = np.zeros((1,7)) - it = np.nditer([a,b], flags=['reduce_ok', 'external_loop', 'buffered'], - op_flags=[['readonly'], ['readwrite']], - buffersize = 5) - - bufsizes = [] - for x, y in it: - bufsizes.append(x.shape[0]) - assert_equal(bufsizes, [5,2,5,2]) - assert_equal(sum(bufsizes), a.size) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_machar.py b/numpy-1.6.2/numpy/core/tests/test_machar.py deleted file mode 100644 index 4175ceeac5..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_machar.py +++ /dev/null @@ -1,31 +0,0 @@ -from numpy.testing import * - -from numpy.core.machar import MachAr -import numpy.core.numerictypes as ntypes -from numpy import seterr, array - -class TestMachAr(TestCase): - def _run_machar_highprec(self): - # Instanciate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - machar = MachAr(lambda v:array([v], hiprec)) - except AttributeError: - "Skipping test: no nyptes.float96 available on this platform." - - def test_underlow(self): - """Regression testing for #759: instanciating MachAr for dtype = - np.float96 raises spurious warning.""" - serrstate = seterr(all='raise') - try: - try: - self._run_machar_highprec() - except FloatingPointError, e: - self.fail("Caught %s exception, should not have been raised." % e) - finally: - seterr(**serrstate) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_memmap.py b/numpy-1.6.2/numpy/core/tests/test_memmap.py deleted file mode 100644 index 18b356ce28..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_memmap.py +++ /dev/null @@ -1,91 +0,0 @@ -from tempfile import NamedTemporaryFile, mktemp -import os -import warnings - -from numpy import memmap -from numpy import arange, allclose -from numpy.testing import TestCase, assert_, assert_array_equal - -class TestMemmap(TestCase): - def setUp(self): - self.tmpfp = NamedTemporaryFile(prefix='mmap') - self.shape = (3,4) - self.dtype = 'float32' - self.data = arange(12, dtype=self.dtype) - self.data.resize(self.shape) - - def tearDown(self): - self.tmpfp.close() - - def test_roundtrip(self): - # Write data to file - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - del fp # Test __del__ machinery, which handles cleanup - - # Read data back from file - newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', - shape=self.shape) - assert_(allclose(self.data, newfp)) - assert_array_equal(self.data, newfp) - - def test_open_with_filename(self): - tmpname = mktemp('','mmap') - fp = memmap(tmpname, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - del fp - os.unlink(tmpname) - - def test_attributes(self): - offset = 1 - mode = "w+" - fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, - shape=self.shape, offset=offset) - self.assertEquals(offset, fp.offset) - self.assertEquals(mode, fp.mode) - del fp - - def test_filename(self): - tmpname = mktemp('','mmap') - fp = memmap(tmpname, dtype=self.dtype, mode='w+', - shape=self.shape) - abspath = os.path.abspath(tmpname) - fp[:] = self.data[:] - self.assertEquals(abspath, fp.filename) - b = fp[:1] - self.assertEquals(abspath, b.filename) - del b - del fp - os.unlink(tmpname) - - def test_filename_fileobj(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", - shape=self.shape) - self.assertEquals(fp.filename, self.tmpfp.name) - - def test_flush(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - fp.flush() - - def test_del(self): - # Make sure a view does not delete the underlying mmap - fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp_view = fp_base[:] - class ViewCloseError(Exception): - pass - _close = memmap._close - def replace_close(self): - raise ViewCloseError('View should not call _close on memmap') - try: - memmap._close = replace_close - del fp_view - finally: - memmap._close = _close - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_multiarray.py b/numpy-1.6.2/numpy/core/tests/test_multiarray.py deleted file mode 100644 index cee88547e4..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_multiarray.py +++ /dev/null @@ -1,2290 +0,0 @@ -import tempfile -import sys -import os -import numpy as np -from numpy.testing import * -from numpy.core import * -from numpy.core.multiarray_tests import test_neighborhood_iterator, test_neighborhood_iterator_oob - -# Need to test an object that does not fully implement math interface -from datetime import timedelta - -from numpy.compat import asbytes, getexception, strchar - -from test_print import in_foreign_locale - -class TestFlags(TestCase): - def setUp(self): - self.a = arange(10) - - def test_writeable(self): - mydict = locals() - self.a.flags.writeable = False - self.assertRaises(RuntimeError, runstring, 'self.a[0] = 3', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 - - def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.updateifcopy, False) - - -class TestAttributes(TestCase): - def setUp(self): - self.one = arange(10) - self.two = arange(20).reshape(4,5) - self.three = arange(60,dtype=float64).reshape(2,5,6) - - def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4,5)) - assert_equal(self.three.shape, (2,5,6)) - self.three.shape = (10,3,2) - assert_equal(self.three.shape, (10,3,2)) - self.three.shape = (2,5,6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5*num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30*num, 6*num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20*num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, arange(20)) - - def test_dtypeattr(self): - assert_equal(self.one.dtype, dtype(int_)) - assert_equal(self.three.dtype, dtype(float_)) - assert_equal(self.one.dtype.char, 'l') - assert_equal(self.three.dtype.char, 'd') - self.assertTrue(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') - - def test_stridesattr(self): - x = self.one - def make_array(size, offset, strides): - return ndarray([size], buffer=x, dtype=int, - offset=offset*x.itemsize, - strides=strides*x.itemsize) - assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) - self.assertRaises(ValueError, make_array, 4, 4, -2) - self.assertRaises(ValueError, make_array, 4, 2, -1) - self.assertRaises(ValueError, make_array, 8, 3, 1) - #self.assertRaises(ValueError, make_array, 8, 3, 0) - #self.assertRaises(ValueError, lambda: ndarray([1], strides=4)) - - - def test_set_stridesattr(self): - x = self.one - def make_array(size, offset, strides): - try: - r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) - except: - raise RuntimeError(getexception()) - r.strides = strides=strides*x.itemsize - return r - assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) - assert_equal(make_array(7,3,1), array([3, 4, 5, 6, 7, 8, 9])) - self.assertRaises(ValueError, make_array, 4, 4, -2) - self.assertRaises(ValueError, make_array, 4, 2, -1) - self.assertRaises(RuntimeError, make_array, 8, 3, 1) - #self.assertRaises(ValueError, make_array, 8, 3, 0) - - def test_fill(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = empty((3,2,1), t) - y = empty((3,2,1), t) - x.fill(1) - y[...] = 1 - assert_equal(x,y) - - x = array([(0,0.0), (1,1.0)], dtype='i4,f8') - x.fill(x[0]) - assert_equal(x['f1'][1], x['f1'][0]) - -class TestAssignment(TestCase): - def test_assignment_broadcasting(self): - a = np.arange(6).reshape(2,3) - - # Broadcasting the input to the output - a[...] = np.arange(3) - assert_equal(a, [[0,1,2],[0,1,2]]) - a[...] = np.arange(2).reshape(2,1) - assert_equal(a, [[0,0,0],[1,1,1]]) - - # For compatibility with <= 1.5, a limited version of broadcasting - # the output to the input. - # - # This behavior is inconsistent with NumPy broadcasting - # in general, because it only uses one of the two broadcasting - # rules (adding a new "1" dimension to the left of the shape), - # applied to the output instead of an input. In NumPy 2.0, this kind - # of broadcasting assignment will likely be disallowed. - a[...] = np.arange(6)[::-1].reshape(1,2,3) - assert_equal(a, [[5,4,3],[2,1,0]]) - # The other type of broadcasting would require a reduction operation. - def assign(a,b): - a[...] = b - assert_raises(ValueError, assign, a, np.arange(12).reshape(2,2,3)) - -class TestDtypedescr(TestCase): - def test_construction(self): - d1 = dtype('i4') - assert_equal(d1, dtype(int32)) - d2 = dtype('f8') - assert_equal(d2, dtype(float64)) - -class TestZeroRank(TestCase): - def setUp(self): - self.d = array(0), array('x', object) - - def test_ellipsis_subscript(self): - a,b = self.d - self.assertEqual(a[...], 0) - self.assertEqual(b[...], 'x') - self.assertTrue(a[...] is a) - self.assertTrue(b[...] is b) - - def test_empty_subscript(self): - a,b = self.d - self.assertEqual(a[()], 0) - self.assertEqual(b[()], 'x') - self.assertTrue(type(a[()]) is a.dtype.type) - self.assertTrue(type(b[()]) is str) - - def test_invalid_subscript(self): - a,b = self.d - self.assertRaises(IndexError, lambda x: x[0], a) - self.assertRaises(IndexError, lambda x: x[0], b) - self.assertRaises(IndexError, lambda x: x[array([], int)], a) - self.assertRaises(IndexError, lambda x: x[array([], int)], b) - - def test_ellipsis_subscript_assignment(self): - a,b = self.d - a[...] = 42 - self.assertEqual(a, 42) - b[...] = '' - self.assertEqual(b.item(), '') - - def test_empty_subscript_assignment(self): - a,b = self.d - a[()] = 42 - self.assertEqual(a, 42) - b[()] = '' - self.assertEqual(b.item(), '') - - def test_invalid_subscript_assignment(self): - a,b = self.d - def assign(x, i, v): - x[i] = v - self.assertRaises(IndexError, assign, a, 0, 42) - self.assertRaises(IndexError, assign, b, 0, '') - self.assertRaises(ValueError, assign, a, (), '') - - def test_newaxis(self): - a,b = self.d - self.assertEqual(a[newaxis].shape, (1,)) - self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ...].shape, (1,)) - self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ..., newaxis].shape, (1,1)) - self.assertEqual(a[..., newaxis, newaxis].shape, (1,1)) - self.assertEqual(a[newaxis, newaxis, ...].shape, (1,1)) - self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) - - def test_invalid_newaxis(self): - a,b = self.d - def subscript(x, i): x[i] - self.assertRaises(IndexError, subscript, a, (newaxis, 0)) - self.assertRaises(IndexError, subscript, a, (newaxis,)*50) - - def test_constructor(self): - x = ndarray(()) - x[()] = 5 - self.assertEqual(x[()], 5) - y = ndarray((),buffer=x) - y[()] = 6 - self.assertEqual(x[()], 6) - - def test_output(self): - x = array(2) - self.assertRaises(ValueError, add, x, [1], x) - - -class TestScalarIndexing(TestCase): - def setUp(self): - self.d = array([0,1])[0] - - def test_ellipsis_subscript(self): - a = self.d - self.assertEqual(a[...], 0) - self.assertEqual(a[...].shape,()) - - def test_empty_subscript(self): - a = self.d - self.assertEqual(a[()], 0) - self.assertEqual(a[()].shape,()) - - def test_invalid_subscript(self): - a = self.d - self.assertRaises(IndexError, lambda x: x[0], a) - self.assertRaises(IndexError, lambda x: x[array([], int)], a) - - def test_invalid_subscript_assignment(self): - a = self.d - def assign(x, i, v): - x[i] = v - self.assertRaises(TypeError, assign, a, 0, 42) - - def test_newaxis(self): - a = self.d - self.assertEqual(a[newaxis].shape, (1,)) - self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ...].shape, (1,)) - self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ..., newaxis].shape, (1,1)) - self.assertEqual(a[..., newaxis, newaxis].shape, (1,1)) - self.assertEqual(a[newaxis, newaxis, ...].shape, (1,1)) - self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) - - def test_invalid_newaxis(self): - a = self.d - def subscript(x, i): x[i] - self.assertRaises(IndexError, subscript, a, (newaxis, 0)) - self.assertRaises(IndexError, subscript, a, (newaxis,)*50) - - def test_overlapping_assignment(self): - # With positive strides - a = np.arange(4) - a[:-1] = a[1:] - assert_equal(a, [1,2,3,3]) - - a = np.arange(4) - a[1:] = a[:-1] - assert_equal(a, [0,0,1,2]) - - # With positive and negative strides - a = np.arange(4) - a[:] = a[::-1] - assert_equal(a, [3,2,1,0]) - - a = np.arange(6).reshape(2,3) - a[::-1,:] = a[:,::-1] - assert_equal(a, [[5,4,3],[2,1,0]]) - - a = np.arange(6).reshape(2,3) - a[::-1,::-1] = a[:,::-1] - assert_equal(a, [[3,4,5],[0,1,2]]) - - # With just one element overlapping - a = np.arange(5) - a[:3] = a[2:] - assert_equal(a, [2,3,4,3,4]) - - a = np.arange(5) - a[2:] = a[:3] - assert_equal(a, [0,1,0,1,2]) - - a = np.arange(5) - a[2::-1] = a[2:] - assert_equal(a, [4,3,2,3,4]) - - a = np.arange(5) - a[2:] = a[2::-1] - assert_equal(a, [0,1,2,1,0]) - - a = np.arange(5) - a[2::-1] = a[:1:-1] - assert_equal(a, [2,3,4,3,4]) - - a = np.arange(5) - a[:1:-1] = a[2::-1] - assert_equal(a, [0,1,0,1,2]) - -class TestCreation(TestCase): - def test_from_attribute(self): - class x(object): - def __array__(self, dtype=None): - pass - self.assertRaises(ValueError, array, x()) - - def test_from_string(self) : - types = np.typecodes['AllInteger'] + np.typecodes['Float'] - nstr = ['123','123'] - result = array([123, 123], dtype=int) - for type in types : - msg = 'String conversion for %s' % type - assert_equal(array(nstr, dtype=type), result, err_msg=msg) - - def test_non_sequence_sequence(self): - """Should not segfault. - - Class Fail breaks the sequence protocol for new style classes, i.e., - those derived from object. Class Map is a mapping type indicated by - raising a ValueError. At some point we may raise a warning instead - of an error in the Fail case. - - """ - class Fail(object): - def __len__(self): - return 1 - - def __getitem__(self, index): - raise ValueError() - - class Map(object): - def __len__(self): - return 1 - - def __getitem__(self, index): - raise KeyError() - - a = np.array([Map()]) - assert_(a.shape == (1,)) - assert_(a.dtype == np.dtype(object)) - assert_raises(ValueError, np.array, [Fail()]) - - -class TestStructured(TestCase): - def test_subarray_field_access(self): - a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) - a['a'] = np.arange(60).reshape(3, 5, 2, 2) - - # Since the subarray is always in C-order, these aren't equal - assert_(np.any(a['a'].T != a.T['a'])) - - # In Fortran order, the subarray gets appended - # like in all other cases, not prepended as a special case - b = a.copy(order='F') - assert_equal(a['a'].shape, b['a'].shape) - assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) - - - def test_subarray_comparison(self): - # Check that comparisons between record arrays with - # multi-dimensional field types work properly - a = np.rec.fromrecords( - [([1,2,3],'a', [[1,2],[3,4]]),([3,3,3],'b',[[0,0],[0,0]])], - dtype=[('a', ('f4',3)), ('b', np.object), ('c', ('i4',(2,2)))]) - b = a.copy() - assert_equal(a==b, [True,True]) - assert_equal(a!=b, [False,False]) - b[1].b = 'c' - assert_equal(a==b, [True,False]) - assert_equal(a!=b, [False,True]) - for i in range(3): - b[0].a = a[0].a - b[0].a[i] = 5 - assert_equal(a==b, [False,False]) - assert_equal(a!=b, [True,True]) - for i in range(2): - for j in range(2): - b = a.copy() - b[0].c[i,j] = 10 - assert_equal(a==b, [False,True]) - assert_equal(a!=b, [True,False]) - - # Check that broadcasting with a subarray works - a = np.array([[(0,)],[(1,)]],dtype=[('a','f8')]) - b = np.array([(0,),(0,),(1,)],dtype=[('a','f8')]) - assert_equal(a==b, [[True, True, False], [False, False, True]]) - assert_equal(b==a, [[True, True, False], [False, False, True]]) - a = np.array([[(0,)],[(1,)]],dtype=[('a','f8',(1,))]) - b = np.array([(0,),(0,),(1,)],dtype=[('a','f8',(1,))]) - assert_equal(a==b, [[True, True, False], [False, False, True]]) - assert_equal(b==a, [[True, True, False], [False, False, True]]) - a = np.array([[([0,0],)],[([1,1],)]],dtype=[('a','f8',(2,))]) - b = np.array([([0,0],),([0,1],),([1,1],)],dtype=[('a','f8',(2,))]) - assert_equal(a==b, [[True, False, False], [False, False, True]]) - assert_equal(b==a, [[True, False, False], [False, False, True]]) - - # Check that broadcasting Fortran-style arrays with a subarray work - a = np.array([[([0,0],)],[([1,1],)]],dtype=[('a','f8',(2,))], order='F') - b = np.array([([0,0],),([0,1],),([1,1],)],dtype=[('a','f8',(2,))]) - assert_equal(a==b, [[True, False, False], [False, False, True]]) - assert_equal(b==a, [[True, False, False], [False, False, True]]) - - # Check that incompatible sub-array shapes don't result to broadcasting - x = np.zeros((1,), dtype=[('a', ('f4', (1,2))), ('b', 'i1')]) - y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) - assert_equal(x == y, False) - - x = np.zeros((1,), dtype=[('a', ('f4', (2,1))), ('b', 'i1')]) - y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) - assert_equal(x == y, False) - - -class TestBool(TestCase): - def test_test_interning(self): - a0 = bool_(0) - b0 = bool_(False) - self.assertTrue(a0 is b0) - a1 = bool_(1) - b1 = bool_(True) - self.assertTrue(a1 is b1) - self.assertTrue(array([True])[0] is a1) - self.assertTrue(array(True)[()] is a1) - - -class TestMethods(TestCase): - def test_test_round(self): - assert_equal(array([1.2,1.5]).round(), [1,2]) - assert_equal(array(1.5).round(), 2) - assert_equal(array([12.2,15.5]).round(-1), [10,20]) - assert_equal(array([12.15,15.51]).round(1), [12.2,15.5]) - - def test_transpose(self): - a = array([[1,2],[3,4]]) - assert_equal(a.transpose(), [[1,3],[2,4]]) - self.assertRaises(ValueError, lambda: a.transpose(0)) - self.assertRaises(ValueError, lambda: a.transpose(0,0)) - self.assertRaises(ValueError, lambda: a.transpose(0,1,2)) - - def test_sort(self): - # test ordering for floats and complex containing nans. It is only - # necessary to check the lessthan comparison, so sorts that - # only follow the insertion sort path are sufficient. We only - # test doubles and complex doubles as the logic is the same. - - # check doubles - msg = "Test real sort order with nans" - a = np.array([np.nan, 1, 0]) - b = sort(a) - assert_equal(b, a[::-1], msg) - # check complex - msg = "Test complex sort order with nans" - a = np.zeros(9, dtype=np.complex128) - a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] - a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] - b = sort(a) - assert_equal(b, a[::-1], msg) - - # all c scalar sorts use the same code with different types - # so it suffices to run a quick check with one type. The number - # of sorted items must be greater than ~50 to check the actual - # algorithm because quick and merge sort fall over to insertion - # sort for small arrays. - a = np.arange(100) - b = a[::-1].copy() - for kind in ['q','m','h'] : - msg = "scalar sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test complex sorts. These use the same code as the scalars - # but the compare fuction differs. - ai = a*1j + 1 - bi = b*1j + 1 - for kind in ['q','m','h'] : - msg = "complex sort, real part == 1, kind=%s" % kind - c = ai.copy(); - c.sort(kind=kind) - assert_equal(c, ai, msg) - c = bi.copy(); - c.sort(kind=kind) - assert_equal(c, ai, msg) - ai = a + 1j - bi = b + 1j - for kind in ['q','m','h'] : - msg = "complex sort, imag part == 1, kind=%s" % kind - c = ai.copy(); - c.sort(kind=kind) - assert_equal(c, ai, msg) - c = bi.copy(); - c.sort(kind=kind) - assert_equal(c, ai, msg) - - # test string sorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(100)]) - b = a[::-1].copy() - for kind in ['q', 'm', 'h'] : - msg = "string sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test unicode sort. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(100)], dtype=np.unicode) - b = a[::-1].copy() - for kind in ['q', 'm', 'h'] : - msg = "unicode sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # todo, check object array sorts. - - # check axis handling. This should be the same for all type - # specific sorts, so we only check it for one type and one kind - a = np.array([[3,2],[1,0]]) - b = np.array([[1,0],[3,2]]) - c = np.array([[2,3],[0,1]]) - d = a.copy() - d.sort(axis=0) - assert_equal(d, b, "test sort with axis=0") - d = a.copy() - d.sort(axis=1) - assert_equal(d, c, "test sort with axis=1") - d = a.copy() - d.sort() - assert_equal(d, c, "test sort with default axis") - # using None is known fail at this point - # d = a.copy() - # d.sort(axis=None) - #assert_equal(d, c, "test sort with axis=None") - - - def test_sort_order(self): - # Test sorting an array with fields - x1=np.array([21,32,14]) - x2=np.array(['my','first','name']) - x3=np.array([3.1,4.5,6.2]) - r=np.rec.fromarrays([x1,x2,x3],names='id,word,number') - - r.sort(order=['id']) - assert_equal(r.id, array([14,21,32])) - assert_equal(r.word, array(['name','my','first'])) - assert_equal(r.number, array([6.2,3.1,4.5])) - - r.sort(order=['word']) - assert_equal(r.id, array([32,21,14])) - assert_equal(r.word, array(['first','my','name'])) - assert_equal(r.number, array([4.5,3.1,6.2])) - - r.sort(order=['number']) - assert_equal(r.id, array([21,32,14])) - assert_equal(r.word, array(['my','first','name'])) - assert_equal(r.number, array([3.1,4.5,6.2])) - - if sys.byteorder == 'little': - strtype = '>i2' - else: - strtype = 'i4') - b = a.searchsorted(np.array(128,dtype='>i4')) - assert_equal(b, 1, msg) - - def test_searchsorted_unicode(self): - # Test searchsorted on unicode strings. - - # 1.6.1 contained a string length miscalculation in - # arraytypes.c.src:UNICODE_compare() which manifested as - # incorrect/inconsistent results from searchsorted. - a = np.array([u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1', - u'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1']) - assert_equal([a.searchsorted(v, 'left') for v in a], np.arange(len(a))) - assert_equal([a.searchsorted(v, 'right') for v in a], np.arange(len(a)) + 1) - assert_equal([a.searchsorted(a[i], 'left') for i in range(len(a))], np.arange(len(a))) - assert_equal([a.searchsorted(a[i], 'right') for i in range(len(a))], np.arange(len(a)) + 1) - - def test_flatten(self): - x0 = np.array([[1,2,3],[4,5,6]], np.int32) - x1 = np.array([[[1,2],[3,4]],[[5,6],[7,8]]], np.int32) - y0 = np.array([1,2,3,4,5,6], np.int32) - y0f = np.array([1,4,2,5,3,6], np.int32) - y1 = np.array([1,2,3,4,5,6,7,8], np.int32) - y1f = np.array([1,5,3,7,2,6,4,8], np.int32) - assert_equal(x0.flatten(), y0) - assert_equal(x0.flatten('F'), y0f) - assert_equal(x0.flatten('F'), x0.T.flatten()) - assert_equal(x1.flatten(), y1) - assert_equal(x1.flatten('F'), y1f) - assert_equal(x1.flatten('F'), x1.T.flatten()) - - def test_dot(self): - a = np.array([[1, 0], [0, 1]]) - b = np.array([[0, 1], [1, 0]]) - c = np.array([[9, 1], [1, -9]]) - - assert_equal(np.dot(a, b), a.dot(b)) - assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c)) - - def test_ravel(self): - a = np.array([[0,1],[2,3]]) - assert_equal(a.ravel(), [0,1,2,3]) - assert_(not a.ravel().flags.owndata) - assert_equal(a.ravel('F'), [0,2,1,3]) - assert_equal(a.ravel(order='C'), [0,1,2,3]) - assert_equal(a.ravel(order='F'), [0,2,1,3]) - assert_equal(a.ravel(order='A'), [0,1,2,3]) - assert_(not a.ravel(order='A').flags.owndata) - assert_equal(a.ravel(order='K'), [0,1,2,3]) - assert_(not a.ravel(order='K').flags.owndata) - assert_equal(a.ravel(), a.reshape(-1)) - - a = np.array([[0,1],[2,3]], order='F') - assert_equal(a.ravel(), [0,1,2,3]) - assert_equal(a.ravel(order='A'), [0,2,1,3]) - assert_equal(a.ravel(order='K'), [0,2,1,3]) - assert_(not a.ravel(order='A').flags.owndata) - assert_(not a.ravel(order='K').flags.owndata) - assert_equal(a.ravel(), a.reshape(-1)) - assert_equal(a.ravel(order='A'), a.reshape(-1, order='A')) - - a = np.array([[0,1],[2,3]])[::-1,:] - assert_equal(a.ravel(), [2,3,0,1]) - assert_equal(a.ravel(order='C'), [2,3,0,1]) - assert_equal(a.ravel(order='F'), [2,0,3,1]) - assert_equal(a.ravel(order='A'), [2,3,0,1]) - # 'K' doesn't reverse the axes of negative strides - assert_equal(a.ravel(order='K'), [2,3,0,1]) - assert_(a.ravel(order='K').flags.owndata) - - def test_setasflat(self): - # In this case, setasflat can treat a as a flat array, - # and must treat b in chunks of 3 - a = np.arange(3*3*4).reshape(3,3,4) - b = np.arange(3*4*3, dtype='f4').reshape(3,4,3).T - - assert_(not np.all(a.ravel() == b.ravel())) - a.setasflat(b) - assert_equal(a.ravel(), b.ravel()) - - # A case where the strides of neither a nor b can be collapsed - a = np.arange(3*2*4).reshape(3,2,4)[:,:,:-1] - b = np.arange(3*3*3, dtype='f4').reshape(3,3,3).T[:,:,:-1] - - assert_(not np.all(a.ravel() == b.ravel())) - a.setasflat(b) - assert_equal(a.ravel(), b.ravel()) - -class TestSubscripting(TestCase): - def test_test_zero_rank(self): - x = array([1,2,3]) - self.assertTrue(isinstance(x[0], np.int_)) - if sys.version_info[0] < 3: - self.assertTrue(isinstance(x[0], int)) - self.assertTrue(type(x[0, ...]) is ndarray) - - -class TestPickling(TestCase): - def test_roundtrip(self): - import pickle - carray = array([[2,9],[7,0],[3,8]]) - DATA = [ - carray, - transpose(carray), - array([('xxx', 1, 2.0)], dtype=[('a', (str,3)), ('b', int), - ('c', float)]) - ] - - for a in DATA: - assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a) - - def _loads(self, obj): - if sys.version_info[0] >= 3: - return loads(obj, encoding='latin1') - else: - return loads(obj) - - # version 0 pickles, using protocol=2 to pickle - # version 0 doesn't have a version field - def test_version0_int8(self): - s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' - a = array([1,2,3,4], dtype=int8) - p = self._loads(asbytes(s)) - assert_equal(a, p) - - def test_version0_float32(self): - s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]]) - - def test_mixed(self): - g1 = array(["spam","spa","spammer","and eggs"]) - g2 = "spam" - assert_array_equal(g1 == g2, [x == g2 for x in g1]) - assert_array_equal(g1 != g2, [x != g2 for x in g1]) - assert_array_equal(g1 < g2, [x < g2 for x in g1]) - assert_array_equal(g1 > g2, [x > g2 for x in g1]) - assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) - assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) - - - def test_unicode(self): - g1 = array([u"This",u"is",u"example"]) - g2 = array([u"This",u"was",u"example"]) - assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]]) - assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]]) - assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]]) - - -class TestArgmax(TestCase): - - nan_arr = [ - ([0, 1, 2, 3, np.nan], 4), - ([0, 1, 2, np.nan, 3], 3), - ([np.nan, 0, 1, 2, 3], 0), - ([np.nan, 0, np.nan, 2, 3], 0), - ([0, 1, 2, 3, complex(0,np.nan)], 4), - ([0, 1, 2, 3, complex(np.nan,0)], 4), - ([0, 1, 2, complex(np.nan,0), 3], 3), - ([0, 1, 2, complex(0,np.nan), 3], 3), - ([complex(0,np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), - - ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), - ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), - ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), - - ([np.datetime64('1923-04-14T12:43:12'), - np.datetime64('1994-06-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('1995-11-25T16:02:16'), - np.datetime64('2005-01-04T03:14:12'), - np.datetime64('2041-12-03T14:05:03')], 5), - ([np.datetime64('1935-09-14T04:40:11'), - np.datetime64('1949-10-12T12:32:11'), - np.datetime64('2010-01-03T05:14:12'), - np.datetime64('2015-11-20T12:20:59'), - np.datetime64('1932-09-23T10:10:13'), - np.datetime64('2014-10-10T03:50:30')], 3), - ([np.datetime64('2059-03-14T12:43:12'), - np.datetime64('1996-09-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('2022-12-25T16:02:16'), - np.datetime64('1963-10-04T03:14:12'), - np.datetime64('2013-05-08T18:15:23')], 0), - - ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), - timedelta(days=-1, seconds=23)], 0), - ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), - timedelta(days=5, seconds=14)], 1), - ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), - timedelta(days=10, seconds=43)], 2), - - # Can't reduce a "flexible type" - #(['a', 'z', 'aa', 'zz'], 3), - #(['zz', 'a', 'aa', 'a'], 0), - #(['aa', 'z', 'zz', 'a'], 2), - ] - - def test_all(self): - a = np.random.normal(0,1,(4,5,6,7,8)) - for i in xrange(a.ndim): - amax = a.max(i) - aargmax = a.argmax(i) - axes = range(a.ndim) - axes.remove(i) - assert all(amax == aargmax.choose(*a.transpose(i,*axes))) - - def test_combinations(self): - for arr, pos in self.nan_arr: - assert_equal(np.argmax(arr), pos, err_msg="%r"%arr) - assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr) - - -class TestArgmin(TestCase): - - nan_arr = [ - ([0, 1, 2, 3, np.nan], 4), - ([0, 1, 2, np.nan, 3], 3), - ([np.nan, 0, 1, 2, 3], 0), - ([np.nan, 0, np.nan, 2, 3], 0), - ([0, 1, 2, 3, complex(0,np.nan)], 4), - ([0, 1, 2, 3, complex(np.nan,0)], 4), - ([0, 1, 2, complex(np.nan,0), 3], 3), - ([0, 1, 2, complex(0,np.nan), 3], 3), - ([complex(0,np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), - - ([complex(0, 0), complex(0, 2), complex(0, 1)], 0), - ([complex(1, 0), complex(0, 2), complex(0, 1)], 2), - ([complex(1, 0), complex(0, 2), complex(1, 1)], 1), - - ([np.datetime64('1923-04-14T12:43:12'), - np.datetime64('1994-06-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('1995-11-25T16:02:16'), - np.datetime64('2005-01-04T03:14:12'), - np.datetime64('2041-12-03T14:05:03')], 0), - ([np.datetime64('1935-09-14T04:40:11'), - np.datetime64('1949-10-12T12:32:11'), - np.datetime64('2010-01-03T05:14:12'), - np.datetime64('2014-11-20T12:20:59'), - np.datetime64('2015-09-23T10:10:13'), - np.datetime64('1932-10-10T03:50:30')], 5), - ([np.datetime64('2059-03-14T12:43:12'), - np.datetime64('1996-09-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('2022-12-25T16:02:16'), - np.datetime64('1963-10-04T03:14:12'), - np.datetime64('2013-05-08T18:15:23')], 4), - - ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), - timedelta(days=-1, seconds=23)], 2), - ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), - timedelta(days=5, seconds=14)], 0), - ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), - timedelta(days=10, seconds=43)], 1), - - # Can't reduce a "flexible type" - #(['a', 'z', 'aa', 'zz'], 0), - #(['zz', 'a', 'aa', 'a'], 1), - #(['aa', 'z', 'zz', 'a'], 3), - ] - - def test_all(self): - a = np.random.normal(0,1,(4,5,6,7,8)) - for i in xrange(a.ndim): - amin = a.min(i) - aargmin = a.argmin(i) - axes = range(a.ndim) - axes.remove(i) - assert_(all(amin == aargmin.choose(*a.transpose(i,*axes)))) - - def test_combinations(self): - for arr, pos in self.nan_arr: - assert_equal(np.argmin(arr), pos, err_msg="%r"%arr) - assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr) - - - -class TestMinMax(TestCase): - def test_scalar(self): - assert_raises(ValueError, np.amax, 1, 1) - assert_raises(ValueError, np.amin, 1, 1) - - assert_equal(np.amax(1, axis=0), 1) - assert_equal(np.amin(1, axis=0), 1) - assert_equal(np.amax(1, axis=None), 1) - assert_equal(np.amin(1, axis=None), 1) - - def test_axis(self): - assert_raises(ValueError, np.amax, [1,2,3], 1000) - assert_equal(np.amax([[1,2,3]], axis=1), 3) - -class TestNewaxis(TestCase): - def test_basic(self): - sk = array([0,-0.1,0.1]) - res = 250*sk[:,newaxis] - assert_almost_equal(res.ravel(),250*sk) - - -class TestClip(TestCase): - def _check_range(self,x,cmin,cmax): - assert np.all(x >= cmin) - assert np.all(x <= cmax) - - def _clip_type(self,type_group,array_max, - clip_min,clip_max,inplace=False, - expected_min=None,expected_max=None): - if expected_min is None: - expected_min = clip_min - if expected_max is None: - expected_max = clip_max - - for T in np.sctypes[type_group]: - if sys.byteorder == 'little': - byte_orders = ['=','>'] - else: - byte_orders = ['<','='] - - for byteorder in byte_orders: - dtype = np.dtype(T).newbyteorder(byteorder) - - x = (np.random.random(1000) * array_max).astype(dtype) - if inplace: - x.clip(clip_min,clip_max,x) - else: - x = x.clip(clip_min,clip_max) - byteorder = '=' - - if x.dtype.byteorder == '|': byteorder = '|' - assert_equal(x.dtype.byteorder,byteorder) - self._check_range(x,expected_min,expected_max) - return x - - def test_basic(self): - for inplace in [False, True]: - self._clip_type('float',1024,-12.8,100.2, inplace=inplace) - self._clip_type('float',1024,0,0, inplace=inplace) - - self._clip_type('int',1024,-120,100.5, inplace=inplace) - self._clip_type('int',1024,0,0, inplace=inplace) - - x = self._clip_type('uint',1024,-120,100,expected_min=0, inplace=inplace) - x = self._clip_type('uint',1024,0,0, inplace=inplace) - - def test_record_array(self): - rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], - dtype=[('x', '= 3) - x = val.clip(min=3) - assert np.all(x >= 3) - x = val.clip(max=4) - assert np.all(x <= 4) - - -class TestPutmask: - def tst_basic(self,x,T,mask,val): - np.putmask(x,mask,val) - assert np.all(x[mask] == T(val)) - assert x.dtype == T - - def test_ip_types(self): - unchecked_types = [str, unicode, np.void, object] - - x = np.random.random(1000)*100 - mask = x < 40 - - for val in [-100,0,15]: - for types in np.sctypes.itervalues(): - for T in types: - if T not in unchecked_types: - yield self.tst_basic,x.copy().astype(T),T,mask,val - - def test_mask_size(self): - assert_raises(ValueError, np.putmask, np.array([1,2,3]), [True], 5) - - def tst_byteorder(self,dtype): - x = np.array([1,2,3],dtype) - np.putmask(x,[True,False,True],-1) - assert_array_equal(x,[-1,2,-1]) - - def test_ip_byteorder(self): - for dtype in ('>i4','f8'), ('z', 'i4','f8'), ('z', ' 1 minute on mechanical hard drive - def test_big_binary(self): - """Test workarounds for 32-bit limited fwrite, fseek, and ftell - calls in windows. These normally would hang doing something like this. - See http://projects.scipy.org/numpy/ticket/1660""" - if sys.platform != 'win32': - return - try: - # before workarounds, only up to 2**32-1 worked - fourgbplus = 2**32 + 2**16 - testbytes = np.arange(8, dtype=np.int8) - n = len(testbytes) - flike = tempfile.NamedTemporaryFile() - f = flike.file - np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) - flike.seek(0) - a = np.fromfile(f, dtype=np.int8) - flike.close() - assert_(len(a) == fourgbplus) - # check only start and end for speed: - assert_((a[:n] == testbytes).all()) - assert_((a[-n:] == testbytes).all()) - except (MemoryError, ValueError): - pass - - def test_string(self): - self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',') - - def test_counted_string(self): - self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') - self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',') - self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') - - def test_string_with_ws(self): - self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') - - def test_counted_string_with_ws(self): - self._check_from('1 2 3 4 ', [1,2,3], count=3, dtype=int, - sep=' ') - - def test_ascii(self): - self._check_from('1 , 2 , 3 , 4', [1.,2.,3.,4.], sep=',') - self._check_from('1,2,3,4', [1.,2.,3.,4.], dtype=float, sep=',') - - def test_malformed(self): - self._check_from('1.234 1,234', [1.234, 1.], sep=' ') - - def test_long_sep(self): - self._check_from('1_x_3_x_4_x_5', [1,3,4,5], sep='_x_') - - def test_dtype(self): - v = np.array([1,2,3,4], dtype=np.int_) - self._check_from('1,2,3,4', v, sep=',', dtype=np.int_) - - def test_dtype_bool(self): - # can't use _check_from because fromstring can't handle True/False - v = np.array([True, False, True, False], dtype=np.bool_) - s = '1,0,-2.3,0' - f = open(self.filename, 'wb') - f.write(asbytes(s)) - f.close() - y = np.fromfile(self.filename, sep=',', dtype=np.bool_) - assert_(y.dtype == '?') - assert_array_equal(y, v) - - def test_tofile_sep(self): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - f = open(self.filename, 'w') - x.tofile(f, sep=',') - f.close() - f = open(self.filename, 'r') - s = f.read() - f.close() - assert_equal(s, '1.51,2.0,3.51,4.0') - os.unlink(self.filename) - - def test_tofile_format(self): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - f = open(self.filename, 'w') - x.tofile(f, sep=',', format='%.2f') - f.close() - f = open(self.filename, 'r') - s = f.read() - f.close() - assert_equal(s, '1.51,2.00,3.51,4.00') - - def test_locale(self): - in_foreign_locale(self.test_numbers)() - in_foreign_locale(self.test_nan)() - in_foreign_locale(self.test_inf)() - in_foreign_locale(self.test_counted_string)() - in_foreign_locale(self.test_ascii)() - in_foreign_locale(self.test_malformed)() - in_foreign_locale(self.test_tofile_sep)() - in_foreign_locale(self.test_tofile_format)() - - -class TestFromBuffer: - def tst_basic(self,buffer,expected,kwargs): - assert_array_equal(np.frombuffer(buffer,**kwargs),expected) - - def test_ip_basic(self): - for byteorder in ['<','>']: - for dtype in [float,int,np.complex]: - dt = np.dtype(dtype).newbyteorder(byteorder) - x = (np.random.random((4,7))*5).astype(dt) - buf = x.tostring() - yield self.tst_basic,buf,x.flat,{'dtype':dt} - - def test_empty(self): - yield self.tst_basic, asbytes(''), np.array([]), {} - - -class TestResize(TestCase): - def test_basic(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - x.resize((5,5)) - assert_array_equal(x.flat[:9],np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) - assert_array_equal(x[9:].flat,0) - - def test_check_reference(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - y = x - self.assertRaises(ValueError,x.resize,(5,1)) - - def test_int_shape(self): - x = np.eye(3) - x.resize(3) - assert_array_equal(x, np.eye(3)[0,:]) - - def test_none_shape(self): - x = np.eye(3) - x.resize(None) - assert_array_equal(x, np.eye(3)) - x.resize() - assert_array_equal(x, np.eye(3)) - - def test_invalid_arguements(self): - self.assertRaises(TypeError, np.eye(3).resize, 'hi') - self.assertRaises(ValueError, np.eye(3).resize, -1) - self.assertRaises(TypeError, np.eye(3).resize, order=1) - self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi') - - def test_freeform_shape(self): - x = np.eye(3) - x.resize(3,2,1) - assert_(x.shape == (3,2,1)) - - def test_zeros_appended(self): - x = np.eye(3) - x.resize(2,3,3) - assert_array_equal(x[0], np.eye(3)) - assert_array_equal(x[1], np.zeros((3,3))) - - -class TestRecord(TestCase): - def test_field_rename(self): - dt = np.dtype([('f',float),('i',int)]) - dt.names = ['p','q'] - assert_equal(dt.names,['p','q']) - - if sys.version_info[0] >= 3: - def test_bytes_fields(self): - # Bytes are not allowed in field names and not recognized in titles - # on Py3 - assert_raises(TypeError, np.dtype, [(asbytes('a'), int)]) - assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)]) - - dt = np.dtype([((asbytes('a'), 'b'), int)]) - assert_raises(ValueError, dt.__getitem__, asbytes('a')) - - x = np.array([(1,), (2,), (3,)], dtype=dt) - assert_raises(ValueError, x.__getitem__, asbytes('a')) - - y = x[0] - assert_raises(IndexError, y.__getitem__, asbytes('a')) - else: - def test_unicode_field_titles(self): - # Unicode field titles are added to field dict on Py2 - title = unicode('b') - dt = np.dtype([((title, 'a'), int)]) - dt[title] - dt['a'] - x = np.array([(1,), (2,), (3,)], dtype=dt) - x[title] - x['a'] - y = x[0] - y[title] - y['a'] - - def test_unicode_field_names(self): - # Unicode field names are not allowed on Py2 - title = unicode('b') - assert_raises(TypeError, np.dtype, [(title, int)]) - assert_raises(TypeError, np.dtype, [(('a', title), int)]) - -class TestView(TestCase): - def test_basic(self): - x = np.array([(1,2,3,4),(5,6,7,8)],dtype=[('r',np.int8),('g',np.int8), - ('b',np.int8),('a',np.int8)]) - # We must be specific about the endianness here: - y = x.view(dtype='= (2, 6): - - if sys.version_info[:2] == (2, 6): - from numpy.core.multiarray import memorysimpleview as memoryview - - from numpy.core._internal import _dtype_from_pep3118 - - class TestPEP3118Dtype(object): - def _check(self, spec, wanted): - dt = np.dtype(wanted) - if isinstance(wanted, list) and isinstance(wanted[-1], tuple): - if wanted[-1][0] == '': - names = list(dt.names) - names[-1] = '' - dt.names = tuple(names) - assert_equal(_dtype_from_pep3118(spec), dt, - err_msg="spec %r != dtype %r" % (spec, wanted)) - - def test_native_padding(self): - align = np.dtype('i').alignment - for j in xrange(8): - if j == 0: - s = 'bi' - else: - s = 'b%dxi' % j - self._check('@'+s, {'f0': ('i1', 0), - 'f1': ('i', align*(1 + j//align))}) - self._check('='+s, {'f0': ('i1', 0), - 'f1': ('i', 1+j)}) - - def test_native_padding_2(self): - # Native padding should work also for structs and sub-arrays - self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) - self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) - - def test_trailing_padding(self): - # Trailing padding should be included, *and*, the item size - # should match the alignment if in aligned mode - align = np.dtype('i').alignment - def VV(n): - return 'V%d' % (align*(1 + (n-1)//align)) - - self._check('ix', [('f0', 'i'), ('', VV(1))]) - self._check('ixx', [('f0', 'i'), ('', VV(2))]) - self._check('ixxx', [('f0', 'i'), ('', VV(3))]) - self._check('ixxxx', [('f0', 'i'), ('', VV(4))]) - self._check('i7x', [('f0', 'i'), ('', VV(7))]) - - self._check('^ix', [('f0', 'i'), ('', 'V1')]) - self._check('^ixx', [('f0', 'i'), ('', 'V2')]) - self._check('^ixxx', [('f0', 'i'), ('', 'V3')]) - self._check('^ixxxx', [('f0', 'i'), ('', 'V4')]) - self._check('^i7x', [('f0', 'i'), ('', 'V7')]) - - def test_native_padding_3(self): - dt = np.dtype([('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) - self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) - - dt = np.dtype([('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) - self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) - - def test_padding_with_array_inside_struct(self): - dt = np.dtype([('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) - self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) - - def test_byteorder_inside_struct(self): - # The byte order after @T{=i} should be '=', not '@'. - # Check this by noting the absence of native alignment. - self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), - 'f1': ('i', 5)}) - - def test_intra_padding(self): - # Natively aligned sub-arrays may require some internal padding - align = np.dtype('i').alignment - def VV(n): - return 'V%d' % (align*(1 + (n-1)//align)) - - self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,))) - - class TestNewBufferProtocol(object): - def _check_roundtrip(self, obj): - obj = np.asarray(obj) - x = memoryview(obj) - y = np.asarray(x) - y2 = np.array(x) - assert not y.flags.owndata - assert y2.flags.owndata - assert_equal(y.dtype, obj.dtype) - assert_array_equal(obj, y) - assert_equal(y2.dtype, obj.dtype) - assert_array_equal(obj, y2) - - def test_roundtrip(self): - x = np.array([1,2,3,4,5], dtype='i4') - self._check_roundtrip(x) - - x = np.array([[1,2],[3,4]], dtype=np.float64) - self._check_roundtrip(x) - - x = np.zeros((3,3,3), dtype=np.float32)[:,0,:] - self._check_roundtrip(x) - - dt = [('a', 'b'), - ('b', 'h'), - ('c', 'i'), - ('d', 'l'), - ('dx', 'q'), - ('e', 'B'), - ('f', 'H'), - ('g', 'I'), - ('h', 'L'), - ('hx', 'Q'), - ('i', np.single), - ('j', np.double), - ('k', np.longdouble), - ('ix', np.csingle), - ('jx', np.cdouble), - ('kx', np.clongdouble), - ('l', 'S4'), - ('m', 'U4'), - ('n', 'V3'), - ('o', '?'), - ('p', np.half), - ] - x = np.array([(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)], - dtype=dt) - self._check_roundtrip(x) - - x = np.array(([[1,2],[3,4]],), dtype=[('a', (int, (2,2)))]) - self._check_roundtrip(x) - - x = np.array([1,2,3], dtype='>i2') - self._check_roundtrip(x) - - x = np.array([1,2,3], dtype='i') - else: - assert_equal(y.format, 'i') - - x = np.array([1,2,3], dtype='= 0, - "Type %s raised wrong fpe error '%s'." % (ftype, exc)) - - def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): - """Check that fpe exception is raised. - - Given a floating operation `flop` and two scalar values, check that - the operation raises the floating point exception specified by - `fpeerr`. Tests all variants with 0-d array scalars as well. - - """ - self.assert_raises_fpe(fpeerr, flop, sc1, sc2); - self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2); - self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]); - self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]); - - @dec.knownfailureif(True, "See ticket 1755") - def test_floating_exceptions(self): - """Test basic arithmetic function errors""" - oldsettings = np.seterr(all='raise') - try: - # Test for all real and complex float types - for typecode in np.typecodes['AllFloat']: - ftype = np.obj2sctype(typecode) - if np.dtype(ftype).kind == 'f': - # Get some extreme values for the type - fi = np.finfo(ftype) - ft_tiny = fi.tiny - ft_max = fi.max - ft_eps = fi.eps - underflow = 'underflow' - divbyzero = 'divide by zero' - else: - # 'c', complex, corresponding real dtype - rtype = type(ftype(0).real) - fi = np.finfo(rtype) - ft_tiny = ftype(fi.tiny) - ft_max = ftype(fi.max) - ft_eps = ftype(fi.eps) - # The complex types raise different exceptions - underflow = '' - divbyzero = '' - overflow = 'overflow' - invalid = 'invalid' - - self.assert_raises_fpe(underflow, - lambda a,b:a/b, ft_tiny, ft_max) - self.assert_raises_fpe(underflow, - lambda a,b:a*b, ft_tiny, ft_tiny) - self.assert_raises_fpe(overflow, - lambda a,b:a*b, ft_max, ftype(2)) - self.assert_raises_fpe(overflow, - lambda a,b:a/b, ft_max, ftype(0.5)) - self.assert_raises_fpe(overflow, - lambda a,b:a+b, ft_max, ft_max*ft_eps) - self.assert_raises_fpe(overflow, - lambda a,b:a-b, -ft_max, ft_max*ft_eps) - self.assert_raises_fpe(divbyzero, - lambda a,b:a/b, ftype(1), ftype(0)) - self.assert_raises_fpe(invalid, - lambda a,b:a/b, ftype(np.inf), ftype(np.inf)) - self.assert_raises_fpe(invalid, - lambda a,b:a/b, ftype(0), ftype(0)) - self.assert_raises_fpe(invalid, - lambda a,b:a-b, ftype(np.inf), ftype(np.inf)) - self.assert_raises_fpe(invalid, - lambda a,b:a+b, ftype(np.inf), ftype(-np.inf)) - self.assert_raises_fpe(invalid, - lambda a,b:a*b, ftype(0), ftype(np.inf)) - finally: - np.seterr(**oldsettings) - - @dec.knownfailureif(True, "See ticket 1755") - def test_floating_exceptions_power(self): - """Test basic arithmetic function errors""" - oldsettings = np.seterr(all='raise') - try: - # Test for all real and complex float types - for typecode in np.typecodes['AllFloat']: - ftype = np.obj2sctype(typecode) - if np.dtype(ftype).kind == 'f': - # Get some extreme values for the type - fi = np.finfo(ftype) - else: - # 'c', complex, corresponding real dtype - rtype = type(ftype(0).real) - fi = np.finfo(rtype) - overflow = 'overflow' - - self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) - finally: - np.seterr(**oldsettings) - -class TestTypes(TestCase): - def check_promotion_cases(self, promote_func): - """Tests that the scalars get coerced correctly.""" - b = np.bool_(0) - i8, i16, i32, i64 = int8(0), int16(0), int32(0), int64(0) - u8, u16, u32, u64 = uint8(0), uint16(0), uint32(0), uint64(0) - f32, f64, fld = float32(0), float64(0), longdouble(0) - c64, c128, cld = complex64(0), complex128(0), clongdouble(0) - - # coercion within the same kind - assert_equal(promote_func(i8,i16), np.dtype(int16)) - assert_equal(promote_func(i32,i8), np.dtype(int32)) - assert_equal(promote_func(i16,i64), np.dtype(int64)) - assert_equal(promote_func(u8,u32), np.dtype(uint32)) - assert_equal(promote_func(f32,f64), np.dtype(float64)) - assert_equal(promote_func(fld,f32), np.dtype(longdouble)) - assert_equal(promote_func(f64,fld), np.dtype(longdouble)) - assert_equal(promote_func(c128,c64), np.dtype(complex128)) - assert_equal(promote_func(cld,c128), np.dtype(clongdouble)) - assert_equal(promote_func(c64,fld), np.dtype(clongdouble)) - - # coercion between kinds - assert_equal(promote_func(b,i32), np.dtype(int32)) - assert_equal(promote_func(b,u8), np.dtype(uint8)) - assert_equal(promote_func(i8,u8), np.dtype(int16)) - assert_equal(promote_func(u8,i32), np.dtype(int32)) - assert_equal(promote_func(i64,u32), np.dtype(int64)) - assert_equal(promote_func(u64,i32), np.dtype(float64)) - assert_equal(promote_func(i32,f32), np.dtype(float64)) - assert_equal(promote_func(i64,f32), np.dtype(float64)) - assert_equal(promote_func(f32,i16), np.dtype(float32)) - assert_equal(promote_func(f32,u32), np.dtype(float64)) - assert_equal(promote_func(f32,c64), np.dtype(complex64)) - assert_equal(promote_func(c128,f32), np.dtype(complex128)) - assert_equal(promote_func(cld,f64), np.dtype(clongdouble)) - - # coercion between scalars and 1-D arrays - assert_equal(promote_func(array([b]),i8), np.dtype(int8)) - assert_equal(promote_func(array([b]),u8), np.dtype(uint8)) - assert_equal(promote_func(array([b]),i32), np.dtype(int32)) - assert_equal(promote_func(array([b]),u32), np.dtype(uint32)) - assert_equal(promote_func(array([i8]),i64), np.dtype(int8)) - assert_equal(promote_func(u64,array([i32])), np.dtype(int32)) - assert_equal(promote_func(i64,array([u32])), np.dtype(uint32)) - assert_equal(promote_func(int32(-1),array([u64])), np.dtype(float64)) - assert_equal(promote_func(f64,array([f32])), np.dtype(float32)) - assert_equal(promote_func(fld,array([f32])), np.dtype(float32)) - assert_equal(promote_func(array([f64]),fld), np.dtype(float64)) - assert_equal(promote_func(fld,array([c64])), np.dtype(complex64)) - assert_equal(promote_func(c64,array([f64])), np.dtype(complex128)) - assert_equal(promote_func(complex64(3j),array([f64])), - np.dtype(complex128)) - - # coercion between scalars and 1-D arrays, where - # the scalar has greater kind than the array - assert_equal(promote_func(array([b]),f64), np.dtype(float64)) - assert_equal(promote_func(array([b]),i64), np.dtype(int64)) - assert_equal(promote_func(array([b]),u64), np.dtype(uint64)) - assert_equal(promote_func(array([i8]),f64), np.dtype(float64)) - assert_equal(promote_func(array([u16]),f64), np.dtype(float64)) - # uint and int are treated as the same "kind" for - # the purposes of array-scalar promotion. - assert_equal(promote_func(array([u16]), i32), np.dtype(uint16)) - # float and complex are treated as the same "kind" for - # the purposes of array-scalar promotion, so that you can do - # (0j + float32array) to get a complex64 array instead of - # a complex128 array. - assert_equal(promote_func(array([f32]),c128), np.dtype(complex64)) - - def test_coercion(self): - def res_type(a, b): - return np.add(a, b).dtype - self.check_promotion_cases(res_type) - - # Use-case: float/complex scalar * bool/int8 array - # shouldn't narrow the float/complex type - for a in [np.array([True,False]), np.array([-3,12], dtype=np.int8)]: - b = 1.234 * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) - b = np.longdouble(1.234) * a - assert_equal(b.dtype, np.dtype(np.longdouble), - "array type %s" % a.dtype) - b = np.float64(1.234) * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) - b = np.float32(1.234) * a - assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) - b = np.float16(1.234) * a - assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) - - b = 1.234j * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) - b = np.clongdouble(1.234j) * a - assert_equal(b.dtype, np.dtype(np.clongdouble), - "array type %s" % a.dtype) - b = np.complex128(1.234j) * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) - b = np.complex64(1.234j) * a - assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) - - # The following use-case is problematic, and to resolve its - # tricky side-effects requires more changes. - # - ## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is - ## a float32, shouldn't promote to float64 - #a = np.array([1.0, 1.5], dtype=np.float32) - #t = np.array([True, False]) - #b = t*a - #assert_equal(b, [1.0, 0.0]) - #assert_equal(b.dtype, np.dtype('f4')) - #b = (1-t)*a - #assert_equal(b, [0.0, 1.5]) - #assert_equal(b.dtype, np.dtype('f4')) - ## Probably ~t (bitwise negation) is more proper to use here, - ## but this is arguably less intuitive to understand at a glance, and - ## would fail if 't' is actually an integer array instead of boolean: - #b = (~t)*a - #assert_equal(b, [0.0, 1.5]) - #assert_equal(b.dtype, np.dtype('f4')) - - def test_result_type(self): - self.check_promotion_cases(np.result_type) - - def test_can_cast(self): - assert_(np.can_cast(np.int32, np.int64)) - assert_(np.can_cast(np.float64, np.complex)) - assert_(not np.can_cast(np.complex, np.float)) - - assert_(np.can_cast('i8', 'f8')) - assert_(not np.can_cast('i8', 'f4')) - assert_(np.can_cast('i4', 'S4')) - - assert_(np.can_cast('i8', 'i8', 'no')) - assert_(not np.can_cast('i8', 'no')) - - assert_(np.can_cast('i8', 'equiv')) - assert_(not np.can_cast('i8', 'equiv')) - - assert_(np.can_cast('i8', 'safe')) - assert_(not np.can_cast('i4', 'safe')) - - assert_(np.can_cast('i4', 'same_kind')) - assert_(not np.can_cast('u4', 'same_kind')) - - assert_(np.can_cast('u4', 'unsafe')) - - assert_raises(TypeError, np.can_cast, 'i4', None) - assert_raises(TypeError, np.can_cast, None, 'i4') - -class TestFromiter(TestCase): - def makegen(self): - for x in xrange(24): - yield x**2 - - def test_types(self): - ai32 = fromiter(self.makegen(), int32) - ai64 = fromiter(self.makegen(), int64) - af = fromiter(self.makegen(), float) - self.assertTrue(ai32.dtype == dtype(int32)) - self.assertTrue(ai64.dtype == dtype(int64)) - self.assertTrue(af.dtype == dtype(float)) - - def test_lengths(self): - expected = array(list(self.makegen())) - a = fromiter(self.makegen(), int) - a20 = fromiter(self.makegen(), int, 20) - self.assertTrue(len(a) == len(expected)) - self.assertTrue(len(a20) == 20) - try: - fromiter(self.makegen(), int, len(expected) + 10) - except ValueError: - pass - else: - self.fail() - - def test_values(self): - expected = array(list(self.makegen())) - a = fromiter(self.makegen(), int) - a20 = fromiter(self.makegen(), int, 20) - self.assertTrue(alltrue(a == expected,axis=0)) - self.assertTrue(alltrue(a20 == expected[:20],axis=0)) - -class TestNonzero(TestCase): - def test_nonzero_trivial(self): - assert_equal(np.count_nonzero(array([])), 0) - assert_equal(np.nonzero(array([])), ([],)) - - assert_equal(np.count_nonzero(array(0)), 0) - assert_equal(np.nonzero(array(0)), ([],)) - assert_equal(np.count_nonzero(array(1)), 1) - assert_equal(np.nonzero(array(1)), ([0],)) - - def test_nonzero_onedim(self): - x = array([1,0,2,-1,0,0,8]) - assert_equal(np.count_nonzero(x), 4) - assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) - - x = array([(1,2),(0,0),(1,1),(-1,3),(0,7)], - dtype=[('a','i4'),('b','i2')]) - assert_equal(np.count_nonzero(x['a']), 3) - assert_equal(np.count_nonzero(x['b']), 4) - assert_equal(np.nonzero(x['a']), ([0,2,3],)) - assert_equal(np.nonzero(x['b']), ([0,2,3,4],)) - - def test_nonzero_twodim(self): - x = array([[0,1,0],[2,0,3]]) - assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0,1,1],[1,0,2])) - - x = np.eye(3) - assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0,1,2],[0,1,2])) - - x = array([[(0,1),(0,0),(1,11)], - [(1,1),(1,0),(0,0)], - [(0,0),(1,5),(0,1)]], dtype=[('a','f4'),('b','u1')]) - assert_equal(np.count_nonzero(x['a']), 4) - assert_equal(np.count_nonzero(x['b']), 5) - assert_equal(np.nonzero(x['a']), ([0,1,1,2],[2,0,1,1])) - assert_equal(np.nonzero(x['b']), ([0,0,1,2,2],[0,2,0,1,2])) - - assert_equal(np.count_nonzero(x['a'].T), 4) - assert_equal(np.count_nonzero(x['b'].T), 5) - assert_equal(np.nonzero(x['a'].T), ([0,1,1,2],[1,1,2,0])) - assert_equal(np.nonzero(x['b'].T), ([0,0,1,2,2],[0,1,2,0,2])) - -class TestIndex(TestCase): - def test_boolean(self): - a = rand(3,5,8) - V = rand(5,8) - g1 = randint(0,5,size=15) - g2 = randint(0,8,size=15) - V[g1,g2] = -V[g1,g2] - assert_((array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all()) - - -class TestBinaryRepr(TestCase): - def test_zero(self): - assert_equal(binary_repr(0),'0') - - def test_large(self): - assert_equal(binary_repr(10736848),'101000111101010011010000') - - def test_negative(self): - assert_equal(binary_repr(-1), '-1') - assert_equal(binary_repr(-1, width=8), '11111111') - -class TestBaseRepr(TestCase): - def test_base3(self): - assert_equal(base_repr(3**5, 3), '100000') - - def test_positive(self): - assert_equal(base_repr(12, 10), '12') - assert_equal(base_repr(12, 10, 4), '000012') - assert_equal(base_repr(12, 4), '30') - assert_equal(base_repr(3731624803700888, 36), '10QR0ROFCEW') - - def test_negative(self): - assert_equal(base_repr(-12, 10), '-12') - assert_equal(base_repr(-12, 10, 4), '-000012') - assert_equal(base_repr(-12, 4), '-30') - -class TestArrayComparisons(TestCase): - def test_array_equal(self): - res = array_equal(array([1,2]), array([1,2])) - assert_(res) - assert_(type(res) is bool) - res = array_equal(array([1,2]), array([1,2,3])) - assert_(not res) - assert_(type(res) is bool) - res = array_equal(array([1,2]), array([3,4])) - assert_(not res) - assert_(type(res) is bool) - res = array_equal(array([1,2]), array([1,3])) - assert_(not res) - assert_(type(res) is bool) - - def test_array_equiv(self): - res = array_equiv(array([1,2]), array([1,2])) - assert_(res) - assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([1,2,3])) - assert_(not res) - assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([3,4])) - assert_(not res) - assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([1,3])) - assert_(not res) - assert_(type(res) is bool) - - res = array_equiv(array([1,1]), array([1])) - assert_(res) - assert_(type(res) is bool) - res = array_equiv(array([1,1]), array([[1],[1]])) - assert_(res) - assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([2])) - assert_(not res) - assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([[1],[2]])) - assert_(not res) - assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([[1,2,3],[4,5,6],[7,8,9]])) - assert_(not res) - assert_(type(res) is bool) - - -def assert_array_strict_equal(x, y): - assert_array_equal(x, y) - # Check flags - assert_(x.flags == y.flags) - # check endianness - assert_(x.dtype.isnative == y.dtype.isnative) - - -class TestClip(TestCase): - def setUp(self): - self.nr = 5 - self.nc = 3 - - def fastclip(self, a, m, M, out=None): - if out is None: - return a.clip(m,M) - else: - return a.clip(m,M,out) - - def clip(self, a, m, M, out=None): - # use slow-clip - selector = less(a, m)+2*greater(a, M) - return selector.choose((a, m, M), out=out) - - # Handy functions - def _generate_data(self, n, m): - return randn(n, m) - - def _generate_data_complex(self, n, m): - return randn(n, m) + 1.j *rand(n, m) - - def _generate_flt_data(self, n, m): - return (randn(n, m)).astype(float32) - - def _neg_byteorder(self, a): - a = asarray(a) - if sys.byteorder == 'little': - a = a.astype(a.dtype.newbyteorder('>')) - else: - a = a.astype(a.dtype.newbyteorder('<')) - return a - - def _generate_non_native_data(self, n, m): - data = randn(n, m) - data = self._neg_byteorder(data) - assert_(not data.dtype.isnative) - return data - - def _generate_int_data(self, n, m): - return (10 * rand(n, m)).astype(int64) - - def _generate_int32_data(self, n, m): - return (10 * rand(n, m)).astype(int32) - - # Now the real test cases - def test_simple_double(self): - """Test native double input with scalar min/max.""" - a = self._generate_data(self.nr, self.nc) - m = 0.1 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_int(self): - """Test native int input with scalar min/max.""" - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(int) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_array_double(self): - """Test native double input with array min/max.""" - a = self._generate_data(self.nr, self.nc) - m = zeros(a.shape) - M = m + 0.5 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_nonnative(self): - """Test non native double input with scalar min/max. - Test native double input with non native double scalar min/max.""" - a = self._generate_non_native_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - "Test native double input with non native double scalar min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = self._neg_byteorder(0.6) - assert_(not M.dtype.isnative) - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - def test_simple_complex(self): - """Test native complex input with native double scalar min/max. - Test native input with complex double scalar min/max. - """ - a = 3 * self._generate_data_complex(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - "Test native input with complex double scalar min/max." - a = 3 * self._generate_data(self.nr, self.nc) - m = -0.5 + 1.j - M = 1. + 2.j - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_clip_non_contig(self): - """Test clip for non contiguous native input and native scalar min/max.""" - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert_(not a.flags['F_CONTIGUOUS']) - assert_(not a.flags['C_CONTIGUOUS']) - ac = self.fastclip(a, -1.6, 1.7) - act = self.clip(a, -1.6, 1.7) - assert_array_strict_equal(ac, act) - - def test_simple_out(self): - """Test native double input with scalar min/max.""" - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = zeros(a.shape) - act = zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int32_inout(self): - """Test native int32 input with double min/max and int32 out.""" - a = self._generate_int32_data(self.nr, self.nc) - m = float64(0) - M = float64(2) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_out(self): - """Test native int32 input with int32 scalar min/max and int64 out.""" - a = self._generate_int32_data(self.nr, self.nc) - m = int32(-1) - M = int32(1) - ac = zeros(a.shape, dtype = int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_inout(self): - """Test native int32 input with double array min/max and int32 out.""" - a = self._generate_int32_data(self.nr, self.nc) - m = zeros(a.shape, float64) - M = float64(1) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int32_out(self): - """Test native double input with scalar min/max and int out.""" - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_inplace_01(self): - """Test native double input with array min/max in-place.""" - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_simple_inplace_02(self): - """Test native double input with scalar min/max in-place.""" - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_noncontig_inplace(self): - """Test non contiguous double input with double scalar min/max in-place.""" - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert_(not a.flags['F_CONTIGUOUS']) - assert_(not a.flags['C_CONTIGUOUS']) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_equal(a, ac) - - def test_type_cast_01(self): - "Test native double input with scalar min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_02(self): - "Test native int32 input with int32 scalar min/max." - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(int32) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_03(self): - "Test native int32 input with float64 scalar min/max." - a = self._generate_int32_data(self.nr, self.nc) - m = -2 - M = 4 - ac = self.fastclip(a, float64(m), float64(M)) - act = self.clip(a, float64(m), float64(M)) - assert_array_strict_equal(ac, act) - - def test_type_cast_04(self): - "Test native int32 input with float32 scalar min/max." - a = self._generate_int32_data(self.nr, self.nc) - m = float32(-2) - M = float32(4) - act = self.fastclip(a,m,M) - ac = self.clip(a,m,M) - assert_array_strict_equal(ac, act) - - def test_type_cast_05(self): - "Test native int32 with double arrays min/max." - a = self._generate_int_data(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m * zeros(a.shape), M) - act = self.clip(a, m * zeros(a.shape), M) - assert_array_strict_equal(ac, act) - - def test_type_cast_06(self): - "Test native with NON native scalar min/max." - a = self._generate_data(self.nr, self.nc) - m = 0.5 - m_s = self._neg_byteorder(m) - M = 1. - act = self.clip(a, m_s, M) - ac = self.fastclip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_07(self): - "Test NON native with native array min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 * ones(a.shape) - M = 1. - a_s = self._neg_byteorder(a) - assert_(not a_s.dtype.isnative) - act = a_s.clip(m, M) - ac = self.fastclip(a_s, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_08(self): - "Test NON native with native scalar min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 1. - a_s = self._neg_byteorder(a) - assert_(not a_s.dtype.isnative) - ac = self.fastclip(a_s, m , M) - act = a_s.clip(m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_09(self): - "Test native with NON native array min/max." - a = self._generate_data(self.nr, self.nc) - m = -0.5 * ones(a.shape) - M = 1. - m_s = self._neg_byteorder(m) - assert_(not m_s.dtype.isnative) - ac = self.fastclip(a, m_s , M) - act = self.clip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_10(self): - """Test native int32 with float min/max and float out for output argument.""" - a = self._generate_int_data(self.nr, self.nc) - b = zeros(a.shape, dtype = float32) - m = float32(-0.5) - M = float32(1) - act = self.clip(a, m, M, out = b) - ac = self.fastclip(a, m , M, out = b) - assert_array_strict_equal(ac, act) - - def test_type_cast_11(self): - "Test non native with native scalar, min/max, out non native" - a = self._generate_non_native_data(self.nr, self.nc) - b = a.copy() - b = b.astype(b.dtype.newbyteorder('>')) - bt = b.copy() - m = -0.5 - M = 1. - self.fastclip(a, m , M, out = b) - self.clip(a, m, M, out = bt) - assert_array_strict_equal(b, bt) - - def test_type_cast_12(self): - "Test native int32 input and min/max and float out" - a = self._generate_int_data(self.nr, self.nc) - b = zeros(a.shape, dtype = float32) - m = int32(0) - M = int32(1) - act = self.clip(a, m, M, out = b) - ac = self.fastclip(a, m , M, out = b) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple(self): - "Test native double input with scalar min/max" - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = zeros(a.shape) - act = zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple2(self): - "Test native int32 input with double min/max and int32 out" - a = self._generate_int32_data(self.nr, self.nc) - m = float64(0) - M = float64(2) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple_int32(self): - "Test native int32 input with int32 scalar min/max and int64 out" - a = self._generate_int32_data(self.nr, self.nc) - m = int32(-1) - M = int32(1) - ac = zeros(a.shape, dtype = int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_int32(self): - "Test native int32 input with double array min/max and int32 out" - a = self._generate_int32_data(self.nr, self.nc) - m = zeros(a.shape, float64) - M = float64(1) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_outint32(self): - "Test native double input with scalar min/max and int out" - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_inplace_array(self): - "Test native double input with array min/max" - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_inplace_simple(self): - "Test native double input with scalar min/max" - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_func_takes_out(self): - """ Ensure that the clip() function takes an out= argument. - """ - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - a2 = clip(a, m, M, out=a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a2, ac) - self.assertTrue(a2 is a) - - -class TestAllclose(object): - rtol = 1e-5 - atol = 1e-8 - - def tst_allclose(self,x,y): - assert_(allclose(x,y), "%s and %s not close" % (x,y)) - - def tst_not_allclose(self,x,y): - assert_(not allclose(x,y), "%s and %s shouldn't be close" % (x,y)) - - def test_ip_allclose(self): - """Parametric test factory.""" - arr = array([100,1000]) - aran = arange(125).reshape((5,5,5)) - - atol = self.atol - rtol = self.rtol - - data = [([1,0], [1,0]), - ([atol], [0]), - ([1], [1+rtol+atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol*2), - (aran, aran + aran*rtol), - (inf, inf), - (inf, [inf])] - - for (x,y) in data: - yield (self.tst_allclose,x,y) - - def test_ip_not_allclose(self): - """Parametric test factory.""" - aran = arange(125).reshape((5,5,5)) - - atol = self.atol - rtol = self.rtol - - data = [([inf,0], [1,inf]), - ([inf,0], [1,0]), - ([inf,inf], [1,inf]), - ([inf,inf], [1,0]), - ([-inf, 0], [inf, 0]), - ([nan,0], [nan,0]), - ([atol*2], [0]), - ([1], [1+rtol+atol*2]), - (aran, aran + aran*atol + atol*2), - (array([inf,1]), array([0,inf]))] - - for (x,y) in data: - yield (self.tst_not_allclose,x,y) - - def test_no_parameter_modification(self): - x = array([inf,1]) - y = array([0,inf]) - allclose(x,y) - assert_array_equal(x,array([inf,1])) - assert_array_equal(y,array([0,inf])) - - -class TestStdVar(TestCase): - def setUp(self): - self.A = array([1,-1,1,-1]) - self.real_var = 1 - - def test_basic(self): - assert_almost_equal(var(self.A),self.real_var) - assert_almost_equal(std(self.A)**2,self.real_var) - - def test_ddof1(self): - assert_almost_equal(var(self.A,ddof=1), - self.real_var*len(self.A)/float(len(self.A)-1)) - assert_almost_equal(std(self.A,ddof=1)**2, - self.real_var*len(self.A)/float(len(self.A)-1)) - - def test_ddof2(self): - assert_almost_equal(var(self.A,ddof=2), - self.real_var*len(self.A)/float(len(self.A)-2)) - assert_almost_equal(std(self.A,ddof=2)**2, - self.real_var*len(self.A)/float(len(self.A)-2)) - - -class TestStdVarComplex(TestCase): - def test_basic(self): - A = array([1,1.j,-1,-1.j]) - real_var = 1 - assert_almost_equal(var(A),real_var) - assert_almost_equal(std(A)**2,real_var) - - -class TestLikeFuncs(TestCase): - '''Test ones_like, zeros_like, and empty_like''' - - def setUp(self): - self.data = [ - # Array scalars - (array(3.), None), - (array(3), 'f8'), - # 1D arrays - (arange(6, dtype='f4'), None), - (arange(6), 'c16'), - # 2D C-layout arrays - (arange(6).reshape(2,3), None), - (arange(6).reshape(3,2), 'i1'), - # 2D F-layout arrays - (arange(6).reshape((2,3), order='F'), None), - (arange(6).reshape((3,2), order='F'), 'i1'), - # 3D C-layout arrays - (arange(24).reshape(2,3,4), None), - (arange(24).reshape(4,3,2), 'f4'), - # 3D F-layout arrays - (arange(24).reshape((2,3,4), order='F'), None), - (arange(24).reshape((4,3,2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (arange(24).reshape(2,3,4).swapaxes(0,1), None), - (arange(24).reshape(4,3,2).swapaxes(0,1), '?'), - ] - - def check_like_function(self, like_function, value): - for d, dtype in self.data: - # default (K) order, dtype - dz = like_function(d, dtype=dtype) - assert_equal(dz.shape, d.shape) - assert_equal(array(dz.strides)*d.dtype.itemsize, - array(d.strides)*dz.dtype.itemsize) - assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) - assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - if not value is None: - assert_(all(dz == value)) - - # C order, default dtype - dz = like_function(d, order='C', dtype=dtype) - assert_equal(dz.shape, d.shape) - assert_(dz.flags.c_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - if not value is None: - assert_(all(dz == value)) - - # F order, default dtype - dz = like_function(d, order='F', dtype=dtype) - assert_equal(dz.shape, d.shape) - assert_(dz.flags.f_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - if not value is None: - assert_(all(dz == value)) - - # A order - dz = like_function(d, order='A', dtype=dtype) - assert_equal(dz.shape, d.shape) - if d.flags.f_contiguous: - assert_(dz.flags.f_contiguous) - else: - assert_(dz.flags.c_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - if not value is None: - assert_(all(dz == value)) - - # Test the 'subok' parameter' - a = np.matrix([[1,2],[3,4]]) - - b = like_function(a) - assert_(type(b) is np.matrix) - - b = like_function(a, subok=False) - assert_(not (type(b) is np.matrix)) - - def test_ones_like(self): - self.check_like_function(np.ones_like, 1) - - def test_zeros_like(self): - self.check_like_function(np.zeros_like, 0) - - def test_empty_like(self): - self.check_like_function(np.empty_like, None) - -class _TestCorrelate(TestCase): - def _setup(self, dt): - self.x = np.array([1, 2, 3, 4, 5], dtype=dt) - self.y = np.array([-1, -2, -3], dtype=dt) - self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) - self.z2 = np.array([ -5., -14., -26., -20., -14., -8., -3.], dtype=dt) - - def test_float(self): - self._setup(np.float) - z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, self.z2) - - def test_object(self): - self._setup(Decimal) - z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, self.z2) - -class TestCorrelate(_TestCorrelate): - old_behavior = True - def _setup(self, dt): - # correlate uses an unconventional definition so that correlate(a, b) - # == correlate(b, a), so force the corresponding outputs to be the same - # as well - _TestCorrelate._setup(self, dt) - self.z2 = self.z1 - - @dec.deprecated() - def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=np.complex) - y = np.array([-1, -2j, 3+1j], dtype=np.complex) - r_z = np.array([3+1j, 6, 8-1j, 9+1j, -1-8j, -4-1j], dtype=np.complex) - z = np.correlate(x, y, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, r_z) - - @dec.deprecated() - def test_float(self): - _TestCorrelate.test_float(self) - - @dec.deprecated() - def test_object(self): - _TestCorrelate.test_object(self) - -class TestCorrelateNew(_TestCorrelate): - old_behavior = False - def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=np.complex) - y = np.array([-1, -2j, 3+1j], dtype=np.complex) - r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex) - #z = np.acorrelate(x, y, 'full') - #assert_array_almost_equal(z, r_z) - - r_z = r_z[::-1].conjugate() - z = np.correlate(y, x, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, r_z) - -class TestArgwhere: - def test_2D(self): - x = np.arange(6).reshape((2, 3)) - assert_array_equal(np.argwhere(x > 1), - [[0, 2], - [1, 0], - [1, 1], - [1, 2]]) - - def test_list(self): - assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) - -class TestStringFunction: - def test_set_string_function(self): - a = np.array([1]) - np.set_string_function(lambda x: "FOO", repr=True) - assert_equal(repr(a), "FOO") - np.set_string_function(None, repr=True) - assert_equal(repr(a), "array([1])") - - np.set_string_function(lambda x: "FOO", repr=False) - assert_equal(str(a), "FOO") - np.set_string_function(None, repr=False) - assert_equal(str(a), "[1]") - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_numerictypes.py b/numpy-1.6.2/numpy/core/tests/test_numerictypes.py deleted file mode 100644 index bb71ce0081..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_numerictypes.py +++ /dev/null @@ -1,372 +0,0 @@ -import sys -from numpy.testing import * -from numpy.compat import asbytes, asunicode -import numpy as np - -# This is the structure of the table used for plain objects: -# -# +-+-+-+ -# |x|y|z| -# +-+-+-+ - -# Structure of a plain array description: -Pdescr = [ - ('x', 'i4', (2,)), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -# A plain list of tuples with values for testing: -PbufferT = [ - # x y z - ([3,2], [[6.,4.],[6.,4.]], 8), - ([4,3], [[7.,5.],[7.,5.]], 9), - ] - - -# This is the structure of the table used for nested objects (DON'T PANIC!): -# -# +-+---------------------------------+-----+----------+-+-+ -# |x|Info |color|info |y|z| -# | +-----+--+----------------+----+--+ +----+-----+ | | -# | |value|y2|Info2 |name|z2| |Name|Value| | | -# | | | +----+-----+--+--+ | | | | | | | -# | | | |name|value|y3|z3| | | | | | | | -# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ -# - -# The corresponding nested array description: -Ndescr = [ - ('x', 'i4', (2,)), - ('Info', [ - ('value', 'c16'), - ('y2', 'f8'), - ('Info2', [ - ('name', 'S2'), - ('value', 'c16', (2,)), - ('y3', 'f8', (2,)), - ('z3', 'u4', (2,))]), - ('name', 'S2'), - ('z2', 'b1')]), - ('color', 'S2'), - ('info', [ - ('Name', 'U8'), - ('Value', 'c16')]), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 - ([3,2], (6j, 6., (asbytes('nn'), [6j,4j], [6.,4.], [1,2]), asbytes('NN'), True), asbytes('cc'), (asunicode('NN'), 6j), [[6.,4.],[6.,4.]], 8), - ([4,3], (7j, 7., (asbytes('oo'), [7j,5j], [7.,5.], [2,1]), asbytes('OO'), False), asbytes('dd'), (asunicode('OO'), 7j), [[7.,5.],[7.,5.]], 9), - ] - - -byteorder = {'little':'<', 'big':'>'}[sys.byteorder] - -def normalize_descr(descr): - "Normalize a description adding the platform byteorder." - - out = [] - for item in descr: - dtype = item[1] - if isinstance(dtype, str): - if dtype[0] not in ['|','<','>']: - onebyte = dtype[1:] == "1" - if onebyte or dtype[0] in ['S', 'V', 'b']: - dtype = "|" + dtype - else: - dtype = byteorder + dtype - if len(item) > 2 and np.prod(item[2]) > 1: - nitem = (item[0], dtype, item[2]) - else: - nitem = (item[0], dtype) - out.append(nitem) - elif isinstance(item[1], list): - l = [] - for j in normalize_descr(item[1]): - l.append(j) - out.append((item[0], l)) - else: - raise ValueError("Expected a str or list and got %s" % \ - (type(item))) - return out - - -############################################################ -# Creation tests -############################################################ - -class create_zeros(object): - """Check the creation of heterogeneous arrays zero-valued""" - - def test_zeros0D(self): - """Check creation of 0-dimensional objects""" - h = np.zeros((), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype.fields['x'][0].name[:4] == 'void') - self.assertTrue(h.dtype.fields['x'][0].char == 'V') - self.assertTrue(h.dtype.fields['x'][0].type == np.void) - # A small check that data is ok - assert_equal(h['z'], np.zeros((), dtype='u1')) - - def test_zerosSD(self): - """Check creation of single-dimensional objects""" - h = np.zeros((2,), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype['y'].name[:4] == 'void') - self.assertTrue(h.dtype['y'].char == 'V') - self.assertTrue(h.dtype['y'].type == np.void) - # A small check that data is ok - assert_equal(h['z'], np.zeros((2,), dtype='u1')) - - def test_zerosMD(self): - """Check creation of multi-dimensional objects""" - h = np.zeros((2,3), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype['z'].name == 'uint8') - self.assertTrue(h.dtype['z'].char == 'B') - self.assertTrue(h.dtype['z'].type == np.uint8) - # A small check that data is ok - assert_equal(h['z'], np.zeros((2,3), dtype='u1')) - - -class test_create_zeros_plain(create_zeros, TestCase): - """Check the creation of heterogeneous arrays zero-valued (plain)""" - _descr = Pdescr - -class test_create_zeros_nested(create_zeros, TestCase): - """Check the creation of heterogeneous arrays zero-valued (nested)""" - _descr = Ndescr - - -class create_values(object): - """Check the creation of heterogeneous arrays with values""" - - def test_tuple(self): - """Check creation from tuples""" - h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assertTrue(h.shape == (2,)) - else: - self.assertTrue(h.shape == ()) - - def test_list_of_tuple(self): - """Check creation from list of tuples""" - h = np.array([self._buffer], dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assertTrue(h.shape == (1,2)) - else: - self.assertTrue(h.shape == (1,)) - - def test_list_of_list_of_tuple(self): - """Check creation from list of list of tuples""" - h = np.array([[self._buffer]], dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assertTrue(h.shape == (1,1,2)) - else: - self.assertTrue(h.shape == (1,1)) - - -class test_create_values_plain_single(create_values, TestCase): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class test_create_values_plain_multiple(create_values, TestCase): - """Check the creation of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class test_create_values_nested_single(create_values, TestCase): - """Check the creation of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = 0 - _buffer = NbufferT[0] - -class test_create_values_nested_multiple(create_values, TestCase): - """Check the creation of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = 1 - _buffer = NbufferT - - -############################################################ -# Reading tests -############################################################ - -class read_values_plain(object): - """Check the reading of values in heterogeneous arrays (plain)""" - - def test_access_fields(self): - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - self.assertTrue(h.shape == ()) - assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) - assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) - else: - self.assertTrue(len(h) == 2) - assert_equal(h['x'], np.array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], np.array([self._buffer[0][1], - self._buffer[1][1]], dtype='f8')) - assert_equal(h['z'], np.array([self._buffer[0][2], - self._buffer[1][2]], dtype='u1')) - - -class test_read_values_plain_single(read_values_plain, TestCase): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class test_read_values_plain_multiple(read_values_plain, TestCase): - """Check the values of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class read_values_nested(object): - """Check the reading of values in heterogeneous arrays (nested)""" - - - def test_access_top_fields(self): - """Check reading the top fields of a nested array""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - self.assertTrue(h.shape == ()) - assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) - assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) - else: - self.assertTrue(len(h) == 2) - assert_equal(h['x'], np.array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], np.array([self._buffer[0][4], - self._buffer[1][4]], dtype='f8')) - assert_equal(h['z'], np.array([self._buffer[0][5], - self._buffer[1][5]], dtype='u1')) - - - def test_nested1_acessors(self): - """Check reading the nested fields of a nested array (1st level)""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['value'], - np.array(self._buffer[1][0], dtype='c16')) - assert_equal(h['Info']['y2'], - np.array(self._buffer[1][1], dtype='f8')) - assert_equal(h['info']['Name'], - np.array(self._buffer[3][0], dtype='U2')) - assert_equal(h['info']['Value'], - np.array(self._buffer[3][1], dtype='c16')) - else: - assert_equal(h['Info']['value'], - np.array([self._buffer[0][1][0], - self._buffer[1][1][0]], - dtype='c16')) - assert_equal(h['Info']['y2'], - np.array([self._buffer[0][1][1], - self._buffer[1][1][1]], - dtype='f8')) - assert_equal(h['info']['Name'], - np.array([self._buffer[0][3][0], - self._buffer[1][3][0]], - dtype='U2')) - assert_equal(h['info']['Value'], - np.array([self._buffer[0][3][1], - self._buffer[1][3][1]], - dtype='c16')) - - def test_nested2_acessors(self): - """Check reading the nested fields of a nested array (2nd level)""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['Info2']['value'], - np.array(self._buffer[1][2][1], dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - np.array(self._buffer[1][2][3], dtype='u4')) - else: - assert_equal(h['Info']['Info2']['value'], - np.array([self._buffer[0][1][2][1], - self._buffer[1][1][2][1]], - dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - np.array([self._buffer[0][1][2][3], - self._buffer[1][1][2][3]], - dtype='u4')) - - def test_nested1_descriptor(self): - """Check access nested descriptors of a nested array (1st level)""" - h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(h.dtype['Info']['value'].name == 'complex128') - self.assertTrue(h.dtype['Info']['y2'].name == 'float64') - if sys.version_info[0] >= 3: - self.assertTrue(h.dtype['info']['Name'].name == 'str256') - else: - self.assertTrue(h.dtype['info']['Name'].name == 'unicode256') - self.assertTrue(h.dtype['info']['Value'].name == 'complex128') - - def test_nested2_descriptor(self): - """Check access nested descriptors of a nested array (2nd level)""" - h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(h.dtype['Info']['Info2']['value'].name == 'void256') - self.assertTrue(h.dtype['Info']['Info2']['z3'].name == 'void64') - - -class test_read_values_nested_single(read_values_nested, TestCase): - """Check the values of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = False - _buffer = NbufferT[0] - -class test_read_values_nested_multiple(read_values_nested, TestCase): - """Check the values of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = True - _buffer = NbufferT - -class TestEmptyField(TestCase): - def test_assign(self): - a = np.arange(10, dtype=np.float32) - a.dtype = [("int", "<0i4"),("float", "<2f4")] - assert(a['int'].shape == (5,0)) - assert(a['float'].shape == (5,2)) - -class TestCommonType(TestCase): - def test_scalar_loses1(self): - res = np.find_common_type(['f4','f4','i2'],['f8']) - assert(res == 'f4') - def test_scalar_loses2(self): - res = np.find_common_type(['f4','f4'],['i8']) - assert(res == 'f4') - def test_scalar_wins(self): - res = np.find_common_type(['f4','f4','i2'],['c8']) - assert(res == 'c8') - def test_scalar_wins2(self): - res = np.find_common_type(['u4','i4','i4'],['f4']) - assert(res == 'f8') - def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose - res = np.find_common_type(['u8','i8','i8'],['f8']) - assert(res == 'f8') - -class TestMultipleFields(TestCase): - def setUp(self): - self.ary = np.array([(1,2,3,4),(5,6,7,8)], dtype='i4,f4,i2,c8') - def _bad_call(self): - return self.ary['f0','f1'] - def test_no_tuple(self): - self.assertRaises(ValueError, self._bad_call) - def test_return(self): - res = self.ary[['f0','f2']].tolist() - assert(res == [(1,3), (5,7)]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_print.py b/numpy-1.6.2/numpy/core/tests/test_print.py deleted file mode 100644 index 349c0b240a..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_print.py +++ /dev/null @@ -1,266 +0,0 @@ -import numpy as np -from numpy.testing import * -import nose - -import locale -import sys -from StringIO import StringIO - -_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} - - -def check_float_type(tp): - for x in [0, 1,-1, 1e20] : - assert_equal(str(tp(x)), str(float(x)), - err_msg='Failed str formatting for type %s' % tp) - - if tp(1e10).itemsize > 4: - assert_equal(str(tp(1e10)), str(float('1e10')), - err_msg='Failed str formatting for type %s' % tp) - else: - if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ - sys.version_info[1] <= 5: - ref = '1e+010' - else: - ref = '1e+10' - assert_equal(str(tp(1e10)), ref, - err_msg='Failed str formatting for type %s' % tp) - -def test_float_types(): - """ Check formatting. - - This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the - python float precision. - - """ - for t in [np.float32, np.double, np.longdouble] : - yield check_float_type, t - -def check_nan_inf_float(tp): - for x in [np.inf, -np.inf, np.nan]: - assert_equal(str(tp(x)), _REF[x], - err_msg='Failed str formatting for type %s' % tp) - -def test_nan_inf_float(): - """ Check formatting of nan & inf. - - This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the - python float precision. - - """ - for t in [np.float32, np.double, np.longdouble] : - yield check_nan_inf_float, t - -def check_complex_type(tp): - for x in [0, 1,-1, 1e20] : - assert_equal(str(tp(x)), str(complex(x)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x*1j)), str(complex(x*1j)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), - err_msg='Failed str formatting for type %s' % tp) - - if tp(1e10).itemsize > 8: - assert_equal(str(tp(1e10)), str(complex(1e10)), - err_msg='Failed str formatting for type %s' % tp) - else: - if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ - sys.version_info[1] <= 5: - ref = '(1e+010+0j)' - else: - ref = '(1e+10+0j)' - assert_equal(str(tp(1e10)), ref, - err_msg='Failed str formatting for type %s' % tp) - -def test_complex_types(): - """Check formatting of complex types. - - This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the - python float precision. - - """ - for t in [np.complex64, np.cdouble, np.clongdouble] : - yield check_complex_type, t - -def test_complex_inf_nan(): - """Check inf/nan formatting of complex types.""" - if sys.version_info >= (2, 6): - TESTS = { - complex(np.inf, 0): "(inf+0j)", - complex(0, np.inf): "inf*j", - complex(-np.inf, 0): "(-inf+0j)", - complex(0, -np.inf): "-inf*j", - complex(np.inf, 1): "(inf+1j)", - complex(1, np.inf): "(1+inf*j)", - complex(-np.inf, 1): "(-inf+1j)", - complex(1, -np.inf): "(1-inf*j)", - complex(np.nan, 0): "(nan+0j)", - complex(0, np.nan): "nan*j", - complex(-np.nan, 0): "(nan+0j)", - complex(0, -np.nan): "nan*j", - complex(np.nan, 1): "(nan+1j)", - complex(1, np.nan): "(1+nan*j)", - complex(-np.nan, 1): "(nan+1j)", - complex(1, -np.nan): "(1+nan*j)", - } - else: - TESTS = { - complex(np.inf, 0): "(inf+0j)", - complex(0, np.inf): "infj", - complex(-np.inf, 0): "(-inf+0j)", - complex(0, -np.inf): "-infj", - complex(np.inf, 1): "(inf+1j)", - complex(1, np.inf): "(1+infj)", - complex(-np.inf, 1): "(-inf+1j)", - complex(1, -np.inf): "(1-infj)", - complex(np.nan, 0): "(nan+0j)", - complex(0, np.nan): "nanj", - complex(-np.nan, 0): "(nan+0j)", - complex(0, -np.nan): "nanj", - complex(np.nan, 1): "(nan+1j)", - complex(1, np.nan): "(1+nanj)", - complex(-np.nan, 1): "(nan+1j)", - complex(1, -np.nan): "(1+nanj)", - } - for tp in [np.complex64, np.cdouble, np.clongdouble]: - for c, s in TESTS.items(): - yield _check_complex_inf_nan, c, s, tp - -def _check_complex_inf_nan(c, s, dtype): - assert_equal(str(dtype(c)), s) - -# print tests -def _test_redirected_print(x, tp, ref=None): - file = StringIO() - file_tp = StringIO() - stdout = sys.stdout - try: - sys.stdout = file_tp - print tp(x) - sys.stdout = file - if ref: - print ref - else: - print x - finally: - sys.stdout = stdout - - assert_equal(file.getvalue(), file_tp.getvalue(), - err_msg='print failed for type%s' % tp) - -def check_float_type_print(tp): - for x in [0, 1,-1, 1e20]: - _test_redirected_print(float(x), tp) - - for x in [np.inf, -np.inf, np.nan]: - _test_redirected_print(float(x), tp, _REF[x]) - - if tp(1e10).itemsize > 4: - _test_redirected_print(float(1e10), tp) - else: - if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ - sys.version_info[1] <= 5: - ref = '1e+010' - else: - ref = '1e+10' - _test_redirected_print(float(1e10), tp, ref) - -def check_complex_type_print(tp): - # We do not create complex with inf/nan directly because the feature is - # missing in python < 2.6 - for x in [0, 1, -1, 1e20]: - _test_redirected_print(complex(x), tp) - - if tp(1e10).itemsize > 8: - _test_redirected_print(complex(1e10), tp) - else: - if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ - sys.version_info[1] <= 5: - ref = '(1e+010+0j)' - else: - ref = '(1e+10+0j)' - _test_redirected_print(complex(1e10), tp, ref) - - _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') - _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') - _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') - -def test_float_type_print(): - """Check formatting when using print """ - for t in [np.float32, np.double, np.longdouble] : - yield check_float_type_print, t - -def test_complex_type_print(): - """Check formatting when using print """ - for t in [np.complex64, np.cdouble, np.clongdouble] : - yield check_complex_type_print, t - -@dec.skipif(sys.version_info < (2,6)) -def test_scalar_format(): - """Test the str.format method with NumPy scalar types""" - tests = [('{0}', True, np.bool_), - ('{0}', False, np.bool_), - ('{0:d}', 130, np.uint8), - ('{0:d}', 50000, np.uint16), - ('{0:d}', 3000000000, np.uint32), - ('{0:d}', 15000000000000000000, np.uint64), - ('{0:d}', -120, np.int8), - ('{0:d}', -30000, np.int16), - ('{0:d}', -2000000000, np.int32), - ('{0:d}', -7000000000000000000, np.int64), - ('{0:g}', 1.5, np.float16), - ('{0:g}', 1.5, np.float32), - ('{0:g}', 1.5, np.float64), - ('{0:g}', 1.5, np.longdouble)] - - for (fmat, val, valtype) in tests: - assert_equal(fmat.format(val), fmat.format(valtype(val)), - "failed with val %s, type %s" % (val, valtype)) - - -# Locale tests: scalar types formatting should be independent of the locale -def in_foreign_locale(func): - """ - Swap LC_NUMERIC locale to one in which the decimal point is ',' and not '.' - If not possible, raise nose.SkipTest - - """ - if sys.platform == 'win32': - locales = ['FRENCH'] - else: - locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] - - def wrapper(*args, **kwargs): - curloc = locale.getlocale(locale.LC_NUMERIC) - try: - for loc in locales: - try: - locale.setlocale(locale.LC_NUMERIC, loc) - break - except locale.Error: - pass - else: - raise nose.SkipTest("Skipping locale test, because " - "French locale not found") - return func(*args, **kwargs) - finally: - locale.setlocale(locale.LC_NUMERIC, locale=curloc) - return nose.tools.make_decorator(func)(wrapper) - -@in_foreign_locale -def test_locale_single(): - assert_equal(str(np.float32(1.2)), str(float(1.2))) - -@in_foreign_locale -def test_locale_double(): - assert_equal(str(np.double(1.2)), str(float(1.2))) - -@in_foreign_locale -def test_locale_longdouble(): - assert_equal(str(np.longdouble(1.2)), str(float(1.2))) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_records.py b/numpy-1.6.2/numpy/core/tests/test_records.py deleted file mode 100644 index f96a5452dc..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_records.py +++ /dev/null @@ -1,160 +0,0 @@ -from os import path -import numpy as np -from numpy.testing import * -from numpy.compat import asbytes, asunicode - -import warnings - - -class TestFromrecords(TestCase): - def test_fromrecords(self): - r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], - names='col1,col2,col3') - assert_equal(r[0].item(), (456, 'dbe', 1.2)) - - def test_method_array(self): - r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big') - assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924)) - - def test_method_array2(self): - r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), - (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') - assert_equal(r[1].item(), (2, 22.0, asbytes('b'))) - - def test_recarray_slices(self): - r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), - (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') - assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d'))) - - def test_recarray_fromarrays(self): - x1 = np.array([1, 2, 3, 4]) - x2 = np.array(['a', 'dd', 'xyz', '12']) - x3 = np.array([1.1, 2, 3, 4]) - r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') - assert_equal(r[1].item(), (2, 'dd', 2.0)) - x1[1] = 34 - assert_equal(r.a, np.array([1, 2, 3, 4])) - - def test_recarray_fromfile(self): - data_dir = path.join(path.dirname(__file__), 'data') - filename = path.join(data_dir, 'recarray_from_file.fits') - fd = open(filename, 'rb') - fd.seek(2880 * 2) - r = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') - fd.seek(2880 * 2) - r = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') - - def test_recarray_from_obj(self): - count = 10 - a = np.zeros(count, dtype='O') - b = np.zeros(count, dtype='f8') - c = np.zeros(count, dtype='f8') - for i in range(len(a)): - a[i] = range(1, 10) - - mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') - for i in range(len(a)): - assert (mine.date[i] == range(1, 10)) - assert (mine.data1[i] == 0.0) - assert (mine.data2[i] == 0.0) - - def test_recarray_from_repr(self): - x = np.rec.array([ (1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) - y = eval("np." + repr(x)) - assert isinstance(y, np.recarray) - assert_equal(y, x) - - def test_recarray_from_names(self): - ra = np.rec.array([ - (1, 'abc', 3.7000002861022949, 0), - (2, 'xy', 6.6999998092651367, 1), - (0, ' ', 0.40000000596046448, 0)], - names='c1, c2, c3, c4') - pa = np.rec.fromrecords([ - (1, 'abc', 3.7000002861022949, 0), - (2, 'xy', 6.6999998092651367, 1), - (0, ' ', 0.40000000596046448, 0)], - names='c1, c2, c3, c4') - assert ra.dtype == pa.dtype - assert ra.shape == pa.shape - for k in xrange(len(ra)): - assert ra[k].item() == pa[k].item() - - def test_recarray_conflict_fields(self): - ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2), - (3, 'wrs', 1.3)], - names='field, shape, mean') - ra.mean = [1.1, 2.2, 3.3] - assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3]) - assert type(ra.mean) is type(ra.var) - ra.shape = (1, 3) - assert ra.shape == (1, 3) - ra.shape = ['A', 'B', 'C'] - assert_array_equal(ra['shape'], [['A', 'B', 'C']]) - ra.field = 5 - assert_array_equal(ra['field'], [[5, 5, 5]]) - assert callable(ra.field) - - def test_fromrecords_with_explicit_dtype(self): - a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], - dtype=[('a', int), ('b', np.object)]) - assert_equal(a.a, [1, 2]) - assert_equal(a[0].a, 1) - assert_equal(a.b, ['a', 'bbb']) - assert_equal(a[-1].b, 'bbb') - # - ndtype = np.dtype([('a', int), ('b', np.object)]) - a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype) - assert_equal(a.a, [1, 2]) - assert_equal(a[0].a, 1) - assert_equal(a.b, ['a', 'bbb']) - assert_equal(a[-1].b, 'bbb') - - -class TestRecord(TestCase): - def setUp(self): - self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], - dtype=[("col1", "= 3: - import io - StringIO = io.BytesIO - -rlevel = 1 - -class TestRegression(TestCase): - def test_invalid_round(self,level=rlevel): - """Ticket #3""" - v = 4.7599999999999998 - assert_array_equal(np.array([v]),np.array(v)) - - def test_mem_empty(self,level=rlevel): - """Ticket #7""" - np.empty((1,),dtype=[('x',np.int64)]) - - def test_pickle_transposed(self,level=rlevel): - """Ticket #16""" - a = np.transpose(np.array([[2,9],[7,0],[3,8]])) - f = StringIO() - pickle.dump(a,f) - f.seek(0) - b = pickle.load(f) - f.close() - assert_array_equal(a,b) - - def test_typeNA(self,level=rlevel): - """Ticket #31""" - assert_equal(np.typeNA[np.int64],'Int64') - assert_equal(np.typeNA[np.uint64],'UInt64') - - def test_dtype_names(self,level=rlevel): - """Ticket #35""" - dt = np.dtype([(('name','label'),np.int32,3)]) - - def test_reduce(self,level=rlevel): - """Ticket #40""" - assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5) - - def test_zeros_order(self,level=rlevel): - """Ticket #43""" - np.zeros([3], int, 'C') - np.zeros([3], order='C') - np.zeros([3], int, order='C') - - def test_sort_bigendian(self,level=rlevel): - """Ticket #47""" - a = np.linspace(0, 10, 11) - c = a.astype(np.dtype('= 3) or - (sys.platform == "win32" and platform.architecture()[0] == "64bit"), - "numpy.intp('0xff', 16) not supported on Py3, " - "as it does not inherit from Python int") - def test_intp(self,level=rlevel): - """Ticket #99""" - i_width = np.int_(0).nbytes*2 - 1 - np.intp('0x' + 'f'*i_width,16) - self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16) - self.assertRaises(ValueError,np.intp,'0x1',32) - assert_equal(255,np.intp('0xFF',16)) - assert_equal(1024,np.intp(1024)) - - def test_endian_bool_indexing(self,level=rlevel): - """Ticket #105""" - a = np.arange(10.,dtype='>f8') - b = np.arange(10.,dtype='2) & (a<6)) - xb = np.where((b>2) & (b<6)) - ya = ((a>2) & (a<6)) - yb = ((b>2) & (b<6)) - assert_array_almost_equal(xa,ya.nonzero()) - assert_array_almost_equal(xb,yb.nonzero()) - assert_(np.all(a[ya] > 0.5)) - assert_(np.all(b[yb] > 0.5)) - - def test_mem_dot(self,level=rlevel): - """Ticket #106""" - x = np.random.randn(0,1) - y = np.random.randn(10,1) - z = np.dot(x, np.transpose(y)) - - def test_arange_endian(self,level=rlevel): - """Ticket #111""" - ref = np.arange(10) - x = np.arange(10,dtype='f8') - assert_array_equal(ref,x) - -# Longfloat support is not consistent enough across -# platforms for this test to be meaningful. -# def test_longfloat_repr(self,level=rlevel): -# """Ticket #112""" -# if np.longfloat(0).itemsize > 8: -# a = np.exp(np.array([1000],dtype=np.longfloat)) -# assert_(str(a)[1:9] == str(a[0])[:8]) - - def test_argmax(self,level=rlevel): - """Ticket #119""" - a = np.random.normal(0,1,(4,5,6,7,8)) - for i in xrange(a.ndim): - aargmax = a.argmax(i) - - def test_mem_divmod(self,level=rlevel): - """Ticket #126""" - for i in range(10): - divmod(np.array([i])[0],10) - - - def test_hstack_invalid_dims(self,level=rlevel): - """Ticket #128""" - x = np.arange(9).reshape((3,3)) - y = np.array([0,0,0]) - self.assertRaises(ValueError,np.hstack,(x,y)) - - def test_squeeze_type(self,level=rlevel): - """Ticket #133""" - a = np.array([3]) - b = np.array(3) - assert_(type(a.squeeze()) is np.ndarray) - assert_(type(b.squeeze()) is np.ndarray) - - def test_add_identity(self,level=rlevel): - """Ticket #143""" - assert_equal(0,np.add.identity) - - def test_binary_repr_0(self,level=rlevel): - """Ticket #151""" - assert_equal('0',np.binary_repr(0)) - - def test_rec_iterate(self,level=rlevel): - """Ticket #160""" - descr = np.dtype([('i',int),('f',float),('s','|S3')]) - x = np.rec.array([(1,1.1,'1.0'), - (2,2.2,'2.0')],dtype=descr) - x[0].tolist() - [i for i in x[0]] - - def test_unicode_string_comparison(self,level=rlevel): - """Ticket #190""" - a = np.array('hello',np.unicode_) - b = np.array('world') - a == b - - def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel): - """Fix in r2836""" - # Create discontiguous Fortran-ordered array - x = np.array(np.random.rand(3,3),order='F')[:,:2] - assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring())) - - def test_flat_assignment(self,level=rlevel): - """Correct behaviour of ticket #194""" - x = np.empty((3,1)) - x.flat = np.arange(3) - assert_array_almost_equal(x,[[0],[1],[2]]) - x.flat = np.arange(3,dtype=float) - assert_array_almost_equal(x,[[0],[1],[2]]) - - def test_broadcast_flat_assignment(self,level=rlevel): - """Ticket #194""" - x = np.empty((3,1)) - def bfa(): x[:] = np.arange(3) - def bfb(): x[:] = np.arange(3,dtype=float) - self.assertRaises(ValueError, bfa) - self.assertRaises(ValueError, bfb) - - def test_unpickle_dtype_with_object(self,level=rlevel): - """Implemented in r2840""" - dt = np.dtype([('x',int),('y',np.object_),('z','O')]) - f = StringIO() - pickle.dump(dt,f) - f.seek(0) - dt_ = pickle.load(f) - f.close() - assert_equal(dt,dt_) - - def test_mem_array_creation_invalid_specification(self,level=rlevel): - """Ticket #196""" - dt = np.dtype([('x',int),('y',np.object_)]) - # Wrong way - self.assertRaises(ValueError, np.array, [1,'object'], dt) - # Correct way - np.array([(1,'object')],dt) - - def test_recarray_single_element(self,level=rlevel): - """Ticket #202""" - a = np.array([1,2,3],dtype=np.int32) - b = a.copy() - r = np.rec.array(a,shape=1,formats=['3i4'],names=['d']) - assert_array_equal(a,b) - assert_equal(a,r[0][0]) - - def test_zero_sized_array_indexing(self,level=rlevel): - """Ticket #205""" - tmp = np.array([]) - def index_tmp(): tmp[np.array(10)] - self.assertRaises(IndexError, index_tmp) - - def test_chararray_rstrip(self,level=rlevel): - """Ticket #222""" - x = np.chararray((1,),5) - x[0] = asbytes('a ') - x = x.rstrip() - assert_equal(x[0], asbytes('a')) - - def test_object_array_shape(self,level=rlevel): - """Ticket #239""" - assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,)) - assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2)) - assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2)) - assert_equal(np.array([],dtype=object).shape, (0,)) - assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0)) - assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,)) - - def test_mem_around(self,level=rlevel): - """Ticket #243""" - x = np.zeros((1,)) - y = [0] - decimal = 6 - np.around(abs(x-y),decimal) <= 10.0**(-decimal) - - def test_character_array_strip(self,level=rlevel): - """Ticket #246""" - x = np.char.array(("x","x ","x ")) - for c in x: assert_equal(c,"x") - - def test_lexsort(self,level=rlevel): - """Lexsort memory error""" - v = np.array([1,2,3,4,5,6,7,8,9,10]) - assert_equal(np.lexsort(v),0) - - def test_pickle_dtype(self,level=rlevel): - """Ticket #251""" - import pickle - pickle.dumps(np.float) - - def test_swap_real(self, level=rlevel): - """Ticket #265""" - assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0) - assert_equal(np.arange(4,dtype=' 1 and x['two'] > 2) - - def test_method_args(self, level=rlevel): - # Make sure methods and functions have same default axis - # keyword and arguments - funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'), - ('sometrue', 'any'), - ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), - 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', - 'round', 'min', 'max', 'argsort', 'sort'] - funcs2 = ['compress', 'take', 'repeat'] - - for func in funcs1: - arr = np.random.rand(8,7) - arr2 = arr.copy() - if isinstance(func, tuple): - func_meth = func[1] - func = func[0] - else: - func_meth = func - res1 = getattr(arr, func_meth)() - res2 = getattr(np, func)(arr2) - if res1 is None: - assert abs(arr-res2).max() < 1e-8, func - else: - assert abs(res1-res2).max() < 1e-8, func - - for func in funcs2: - arr1 = np.random.rand(8,7) - arr2 = np.random.rand(8,7) - res1 = None - if func == 'compress': - arr1 = arr1.ravel() - res1 = getattr(arr2, func)(arr1) - else: - arr2 = (15*arr2).astype(int).ravel() - if res1 is None: - res1 = getattr(arr1, func)(arr2) - res2 = getattr(np, func)(arr1, arr2) - assert abs(res1-res2).max() < 1e-8, func - - def test_mem_lexsort_strings(self, level=rlevel): - """Ticket #298""" - lst = ['abc','cde','fgh'] - np.lexsort((lst,)) - - def test_fancy_index(self, level=rlevel): - """Ticket #302""" - x = np.array([1,2])[np.array([0])] - assert_equal(x.shape,(1,)) - - def test_recarray_copy(self, level=rlevel): - """Ticket #312""" - dt = [('x',np.int16),('y',np.float64)] - ra = np.array([(1,2.3)], dtype=dt) - rb = np.rec.array(ra, dtype=dt) - rb['x'] = 2. - assert ra['x'] != rb['x'] - - def test_rec_fromarray(self, level=rlevel): - """Ticket #322""" - x1 = np.array([[1,2],[3,4],[5,6]]) - x2 = np.array(['a','dd','xyz']) - x3 = np.array([1.1,2,3]) - np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8") - - def test_object_array_assign(self, level=rlevel): - x = np.empty((2,2),object) - x.flat[2] = (1,2,3) - assert_equal(x.flat[2],(1,2,3)) - - def test_ndmin_float64(self, level=rlevel): - """Ticket #324""" - x = np.array([1,2,3],dtype=np.float64) - assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2) - assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2) - - def test_mem_axis_minimization(self, level=rlevel): - """Ticket #327""" - data = np.arange(5) - data = np.add.outer(data,data) - - def test_mem_float_imag(self, level=rlevel): - """Ticket #330""" - np.float64(1.0).imag - - def test_dtype_tuple(self, level=rlevel): - """Ticket #334""" - assert np.dtype('i4') == np.dtype(('i4',())) - - def test_dtype_posttuple(self, level=rlevel): - """Ticket #335""" - np.dtype([('col1', '()i4')]) - - def test_numeric_carray_compare(self, level=rlevel): - """Ticket #341""" - assert_equal(np.array(['X'], 'c'), asbytes('X')) - - def test_string_array_size(self, level=rlevel): - """Ticket #342""" - self.assertRaises(ValueError, - np.array,[['X'],['X','X','X']],'|S1') - - def test_dtype_repr(self, level=rlevel): - """Ticket #344""" - dt1=np.dtype(('uint32', 2)) - dt2=np.dtype(('uint32', (2,))) - assert_equal(dt1.__repr__(), dt2.__repr__()) - - def test_reshape_order(self, level=rlevel): - """Make sure reshape order works.""" - a = np.arange(6).reshape(2,3,order='F') - assert_equal(a,[[0,2,4],[1,3,5]]) - a = np.array([[1,2],[3,4],[5,6],[7,8]]) - b = a[:,1] - assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]]) - - def test_repeat_discont(self, level=rlevel): - """Ticket #352""" - a = np.arange(12).reshape(4,3)[:,2] - assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11]) - - def test_array_index(self, level=rlevel): - """Make sure optimization is not called in this case.""" - a = np.array([1,2,3]) - a2 = np.array([[1,2,3]]) - assert_equal(a[np.where(a==3)], a2[np.where(a2==3)]) - - def test_object_argmax(self, level=rlevel): - a = np.array([1,2,3],dtype=object) - assert a.argmax() == 2 - - def test_recarray_fields(self, level=rlevel): - """Ticket #372""" - dt0 = np.dtype([('f0','i4'),('f1','i4')]) - dt1 = np.dtype([('f0','i8'),('f1','i8')]) - for a in [np.array([(1,2),(3,4)],"i4,i4"), - np.rec.array([(1,2),(3,4)],"i4,i4"), - np.rec.array([(1,2),(3,4)]), - np.rec.fromarrays([(1,2),(3,4)],"i4,i4"), - np.rec.fromarrays([(1,2),(3,4)])]: - assert_(a.dtype in [dt0,dt1]) - - def test_random_shuffle(self, level=rlevel): - """Ticket #374""" - a = np.arange(5).reshape((5,1)) - b = a.copy() - np.random.shuffle(b) - assert_equal(np.sort(b, axis=0),a) - - def test_refcount_vdot(self, level=rlevel): - """Changeset #3443""" - _assert_valid_refcount(np.vdot) - - def test_startswith(self, level=rlevel): - ca = np.char.array(['Hi','There']) - assert_equal(ca.startswith('H'),[True,False]) - - def test_noncommutative_reduce_accumulate(self, level=rlevel): - """Ticket #413""" - tosubtract = np.arange(5) - todivide = np.array([2.0, 0.5, 0.25]) - assert_equal(np.subtract.reduce(tosubtract), -10) - assert_equal(np.divide.reduce(todivide), 16.0) - assert_array_equal(np.subtract.accumulate(tosubtract), - np.array([0, -1, -3, -6, -10])) - assert_array_equal(np.divide.accumulate(todivide), - np.array([2., 4., 16.])) - - def test_convolve_empty(self, level=rlevel): - """Convolve should raise an error for empty input array.""" - self.assertRaises(ValueError,np.convolve,[],[1]) - self.assertRaises(ValueError,np.convolve,[1],[]) - - def test_multidim_byteswap(self, level=rlevel): - """Ticket #449""" - r=np.array([(1,(0,1,2))], dtype="i2,3i2") - assert_array_equal(r.byteswap(), - np.array([(256,(0,256,512))],r.dtype)) - - def test_string_NULL(self, level=rlevel): - """Changeset 3557""" - assert_equal(np.array("a\x00\x0b\x0c\x00").item(), - 'a\x00\x0b\x0c') - - def test_junk_in_string_fields_of_recarray(self, level=rlevel): - """Ticket #483""" - r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')]) - assert asbytes(r['var1'][0][0]) == asbytes('abc') - - def test_take_output(self, level=rlevel): - """Ensure that 'take' honours output parameter.""" - x = np.arange(12).reshape((3,4)) - a = np.take(x,[0,2],axis=1) - b = np.zeros_like(a) - np.take(x,[0,2],axis=1,out=b) - assert_array_equal(a,b) - - def test_array_str_64bit(self, level=rlevel): - """Ticket #501""" - s = np.array([1, np.nan],dtype=np.float64) - errstate = np.seterr(all='raise') - try: - sstr = np.array_str(s) - finally: - np.seterr(**errstate) - - def test_frompyfunc_endian(self, level=rlevel): - """Ticket #503""" - from math import radians - uradians = np.frompyfunc(radians, 1, 1) - big_endian = np.array([83.4, 83.5], dtype='>f8') - little_endian = np.array([83.4, 83.5], dtype=' object - # casting succeeds - def rs(): - x = np.ones([484,286]) - y = np.zeros([484,286]) - x |= y - self.assertRaises(TypeError,rs) - - def test_unicode_scalar(self, level=rlevel): - """Ticket #600""" - import cPickle - x = np.array(["DROND", "DROND1"], dtype="U6") - el = x[1] - new = cPickle.loads(cPickle.dumps(el)) - assert_equal(new, el) - - def test_arange_non_native_dtype(self, level=rlevel): - """Ticket #616""" - for T in ('>f4','0)]=1.0 - self.assertRaises(ValueError,ia,x,s) - - def test_mem_scalar_indexing(self, level=rlevel): - """Ticket #603""" - x = np.array([0],dtype=float) - index = np.array(0,dtype=np.int32) - x[index] - - def test_binary_repr_0_width(self, level=rlevel): - assert_equal(np.binary_repr(0,width=3),'000') - - def test_fromstring(self, level=rlevel): - assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), - [12,9,9]) - - def test_searchsorted_variable_length(self, level=rlevel): - x = np.array(['a','aa','b']) - y = np.array(['d','e']) - assert_equal(x.searchsorted(y), [3,3]) - - def test_string_argsort_with_zeros(self, level=rlevel): - """Check argsort for strings containing zeros.""" - x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") - assert_array_equal(x.argsort(kind='m'), np.array([1,0])) - assert_array_equal(x.argsort(kind='q'), np.array([1,0])) - - def test_string_sort_with_zeros(self, level=rlevel): - """Check sort for strings containing zeros.""" - x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") - y = np.fromstring("\x00\x01\x00\x02", dtype="|S2") - assert_array_equal(np.sort(x, kind="q"), y) - - def test_copy_detection_zero_dim(self, level=rlevel): - """Ticket #658""" - np.indices((0,3,4)).T.reshape(-1,3) - - def test_flat_byteorder(self, level=rlevel): - """Ticket #657""" - x = np.arange(10) - assert_array_equal(x.astype('>i4'),x.astype('i4').flat[:],x.astype('i4')): - x = np.array([-1,0,1],dtype=dt) - assert_equal(x.flat[0].dtype, x[0].dtype) - - def test_copy_detection_corner_case(self, level=rlevel): - """Ticket #658""" - np.indices((0,3,4)).T.reshape(-1,3) - - def test_copy_detection_corner_case2(self, level=rlevel): - """Ticket #771: strides are not set correctly when reshaping 0-sized - arrays""" - b = np.indices((0,3,4)).T.reshape(-1,3) - assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) - - def test_object_array_refcounting(self, level=rlevel): - """Ticket #633""" - if not hasattr(sys, 'getrefcount'): - return - - # NB. this is probably CPython-specific - - cnt = sys.getrefcount - - a = object() - b = object() - c = object() - - cnt0_a = cnt(a) - cnt0_b = cnt(b) - cnt0_c = cnt(c) - - # -- 0d -> 1d broadcasted slice assignment - - arr = np.zeros(5, dtype=np.object_) - - arr[:] = a - assert_equal(cnt(a), cnt0_a + 5) - - arr[:] = b - assert_equal(cnt(a), cnt0_a) - assert_equal(cnt(b), cnt0_b + 5) - - arr[:2] = c - assert_equal(cnt(b), cnt0_b + 3) - assert_equal(cnt(c), cnt0_c + 2) - - del arr - - # -- 1d -> 2d broadcasted slice assignment - - arr = np.zeros((5, 2), dtype=np.object_) - arr0 = np.zeros(2, dtype=np.object_) - - arr0[0] = a - assert cnt(a) == cnt0_a + 1 - arr0[1] = b - assert cnt(b) == cnt0_b + 1 - - arr[:,:] = arr0 - assert cnt(a) == cnt0_a + 6 - assert cnt(b) == cnt0_b + 6 - - arr[:,0] = None - assert cnt(a) == cnt0_a + 1 - - del arr, arr0 - - # -- 2d copying + flattening - - arr = np.zeros((5, 2), dtype=np.object_) - - arr[:,0] = a - arr[:,1] = b - assert cnt(a) == cnt0_a + 5 - assert cnt(b) == cnt0_b + 5 - - arr2 = arr.copy() - assert cnt(a) == cnt0_a + 10 - assert cnt(b) == cnt0_b + 10 - - arr2 = arr[:,0].copy() - assert cnt(a) == cnt0_a + 10 - assert cnt(b) == cnt0_b + 5 - - arr2 = arr.flatten() - assert cnt(a) == cnt0_a + 10 - assert cnt(b) == cnt0_b + 10 - - del arr, arr2 - - # -- concatenate, repeat, take, choose - - arr1 = np.zeros((5, 1), dtype=np.object_) - arr2 = np.zeros((5, 1), dtype=np.object_) - - arr1[...] = a - arr2[...] = b - assert cnt(a) == cnt0_a + 5 - assert cnt(b) == cnt0_b + 5 - - arr3 = np.concatenate((arr1, arr2)) - assert cnt(a) == cnt0_a + 5 + 5 - assert cnt(b) == cnt0_b + 5 + 5 - - arr3 = arr1.repeat(3, axis=0) - assert cnt(a) == cnt0_a + 5 + 3*5 - - arr3 = arr1.take([1,2,3], axis=0) - assert cnt(a) == cnt0_a + 5 + 3 - - x = np.array([[0],[1],[0],[1],[1]], int) - arr3 = x.choose(arr1, arr2) - assert cnt(a) == cnt0_a + 5 + 2 - assert cnt(b) == cnt0_b + 5 + 3 - - def test_mem_custom_float_to_array(self, level=rlevel): - """Ticket 702""" - class MyFloat: - def __float__(self): - return 1.0 - - tmp = np.atleast_1d([MyFloat()]) - tmp2 = tmp.astype(float) - - def test_object_array_refcount_self_assign(self, level=rlevel): - """Ticket #711""" - class VictimObject(object): - deleted = False - def __del__(self): - self.deleted = True - d = VictimObject() - arr = np.zeros(5, dtype=np.object_) - arr[:] = d - del d - arr[:] = arr # refcount of 'd' might hit zero here - assert not arr[0].deleted - arr[:] = arr # trying to induce a segfault by doing it again... - assert not arr[0].deleted - - def test_mem_fromiter_invalid_dtype_string(self, level=rlevel): - x = [1,2,3] - self.assertRaises(ValueError, - np.fromiter, [xi for xi in x], dtype='S') - - def test_reduce_big_object_array(self, level=rlevel): - """Ticket #713""" - oldsize = np.setbufsize(10*16) - a = np.array([None]*161, object) - assert not np.any(a) - np.setbufsize(oldsize) - - def test_mem_0d_array_index(self, level=rlevel): - """Ticket #714""" - np.zeros(10)[np.array(0)] - - def test_floats_from_string(self, level=rlevel): - """Ticket #640, floats from string""" - fsingle = np.single('1.234') - fdouble = np.double('1.234') - flongdouble = np.longdouble('1.234') - assert_almost_equal(fsingle, 1.234) - assert_almost_equal(fdouble, 1.234) - assert_almost_equal(flongdouble, 1.234) - - def test_complex_dtype_printing(self, level=rlevel): - dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])]) - assert_equal(str(dt), - "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,)), " - "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))])]") - - def test_nonnative_endian_fill(self, level=rlevel): - """ Non-native endian arrays were incorrectly filled with scalars before - r5034. - """ - if sys.byteorder == 'little': - dtype = np.dtype('>i4') - else: - dtype = np.dtype('= 3: - xp = pickle.load(open(filename, 'rb'), encoding='latin1') - else: - xp = pickle.load(open(filename)) - xpd = xp.astype(np.float64) - assert (xp.__array_interface__['data'][0] != - xpd.__array_interface__['data'][0]) - - def test_compress_small_type(self, level=rlevel): - """Ticket #789, changeset 5217. - """ - # compress with out argument segfaulted if cannot cast safely - import numpy as np - a = np.array([[1, 2], [3, 4]]) - b = np.zeros((2, 1), dtype = np.single) - try: - a.compress([True, False], axis = 1, out = b) - raise AssertionError("compress with an out which cannot be " \ - "safely casted should not return "\ - "successfully") - except TypeError: - pass - - def test_attributes(self, level=rlevel): - """Ticket #791 - """ - class TestArray(np.ndarray): - def __new__(cls, data, info): - result = np.array(data) - result = result.view(cls) - result.info = info - return result - def __array_finalize__(self, obj): - self.info = getattr(obj, 'info', '') - dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba') - assert_(dat.info == 'jubba') - dat.resize((4,2)) - assert_(dat.info == 'jubba') - dat.sort() - assert_(dat.info == 'jubba') - dat.fill(2) - assert_(dat.info == 'jubba') - dat.put([2,3,4],[6,3,4]) - assert_(dat.info == 'jubba') - dat.setfield(4, np.int32,0) - assert_(dat.info == 'jubba') - dat.setflags() - assert_(dat.info == 'jubba') - assert_(dat.all(1).info == 'jubba') - assert_(dat.any(1).info == 'jubba') - assert_(dat.argmax(1).info == 'jubba') - assert_(dat.argmin(1).info == 'jubba') - assert_(dat.argsort(1).info == 'jubba') - assert_(dat.astype(TestArray).info == 'jubba') - assert_(dat.byteswap().info == 'jubba') - assert_(dat.clip(2,7).info == 'jubba') - assert_(dat.compress([0,1,1]).info == 'jubba') - assert_(dat.conj().info == 'jubba') - assert_(dat.conjugate().info == 'jubba') - assert_(dat.copy().info == 'jubba') - dat2 = TestArray([2, 3, 1, 0],'jubba') - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - assert_(dat2.choose(choices).info == 'jubba') - assert_(dat.cumprod(1).info == 'jubba') - assert_(dat.cumsum(1).info == 'jubba') - assert_(dat.diagonal().info == 'jubba') - assert_(dat.flatten().info == 'jubba') - assert_(dat.getfield(np.int32,0).info == 'jubba') - assert_(dat.imag.info == 'jubba') - assert_(dat.max(1).info == 'jubba') - assert_(dat.mean(1).info == 'jubba') - assert_(dat.min(1).info == 'jubba') - assert_(dat.newbyteorder().info == 'jubba') - assert_(dat.nonzero()[0].info == 'jubba') - assert_(dat.nonzero()[1].info == 'jubba') - assert_(dat.prod(1).info == 'jubba') - assert_(dat.ptp(1).info == 'jubba') - assert_(dat.ravel().info == 'jubba') - assert_(dat.real.info == 'jubba') - assert_(dat.repeat(2).info == 'jubba') - assert_(dat.reshape((2,4)).info == 'jubba') - assert_(dat.round().info == 'jubba') - assert_(dat.squeeze().info == 'jubba') - assert_(dat.std(1).info == 'jubba') - assert_(dat.sum(1).info == 'jubba') - assert_(dat.swapaxes(0,1).info == 'jubba') - assert_(dat.take([2,3,5]).info == 'jubba') - assert_(dat.transpose().info == 'jubba') - assert_(dat.T.info == 'jubba') - assert_(dat.var(1).info == 'jubba') - assert_(dat.view(TestArray).info == 'jubba') - - def test_recarray_tolist(self, level=rlevel): - """Ticket #793, changeset r5215 - """ - # Comparisons fail for NaN, so we can't use random memory - # for the test. - buf = np.zeros(40, dtype=np.int8) - a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf) - b = a.tolist() - assert_( a[0].tolist() == b[0]) - assert_( a[1].tolist() == b[1]) - - def test_char_array_creation(self, level=rlevel): - a = np.array('123', dtype='c') - b = np.array(asbytes_nested(['1','2','3'])) - assert_equal(a,b) - - def test_unaligned_unicode_access(self, level=rlevel) : - """Ticket #825""" - for i in range(1,9) : - msg = 'unicode offset: %d chars'%i - t = np.dtype([('a','S%d'%i),('b','U2')]) - x = np.array([(asbytes('a'),u'b')], dtype=t) - if sys.version_info[0] >= 3: - assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) - else: - assert_equal(str(x), "[('a', u'b')]", err_msg=msg) - - def test_sign_for_complex_nan(self, level=rlevel): - """Ticket 794.""" - C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) - have = np.sign(C) - want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) - assert_equal(have, want) - - def test_for_equal_names(self, level=rlevel): - """Ticket #674""" - dt = np.dtype([('foo', float), ('bar', float)]) - a = np.zeros(10, dt) - b = list(a.dtype.names) - b[0] = "notfoo" - a.dtype.names = b - assert a.dtype.names[0] == "notfoo" - assert a.dtype.names[1] == "bar" - - def test_for_object_scalar_creation(self, level=rlevel): - """Ticket #816""" - a = np.object_() - b = np.object_(3) - b2 = np.object_(3.0) - c = np.object_([4,5]) - d = np.object_([None, {}, []]) - assert a is None - assert type(b) is int - assert type(b2) is float - assert type(c) is np.ndarray - assert c.dtype == object - assert d.dtype == object - - def test_array_resize_method_system_error(self): - """Ticket #840 - order should be an invalid keyword.""" - x = np.array([[0,1],[2,3]]) - self.assertRaises(TypeError, x.resize, (2,2), order='C') - - def test_for_zero_length_in_choose(self, level=rlevel): - "Ticket #882" - a = np.array(1) - self.assertRaises(ValueError, lambda x: x.choose([]), a) - - def test_array_ndmin_overflow(self): - "Ticket #947." - self.assertRaises(ValueError, lambda: np.array([1], ndmin=33)) - - def test_errobj_reference_leak(self, level=rlevel): - """Ticket #955""" - old_err = np.seterr(all="ignore") - try: - z = int(0) - p = np.int32(-1) - - gc.collect() - n_before = len(gc.get_objects()) - z**p # this shouldn't leak a reference to errobj - gc.collect() - n_after = len(gc.get_objects()) - assert n_before >= n_after, (n_before, n_after) - finally: - np.seterr(**old_err) - - def test_void_scalar_with_titles(self, level=rlevel): - """No ticket""" - data = [('john', 4), ('mary', 5)] - dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] - arr = np.array(data, dtype=dtype1) - assert arr[0][0] == 'john' - assert arr[0][1] == 4 - - def test_blasdot_uninitialized_memory(self): - """Ticket #950""" - for m in [0, 1, 2]: - for n in [0, 1, 2]: - for k in xrange(3): - # Try to ensure that x->data contains non-zero floats - x = np.array([123456789e199], dtype=np.float64) - x.resize((m, 0)) - y = np.array([123456789e199], dtype=np.float64) - y.resize((0, n)) - - # `dot` should just return zero (m,n) matrix - z = np.dot(x, y) - assert np.all(z == 0) - assert z.shape == (m, n) - - def test_zeros(self): - """Regression test for #1061.""" - # Set a size which cannot fit into a 64 bits signed integer - sz = 2 ** 64 - good = 'Maximum allowed dimension exceeded' - try: - np.empty(sz) - except ValueError, e: - if not str(e) == good: - self.fail("Got msg '%s', expected '%s'" % (e, good)) - except Exception, e: - self.fail("Got exception of type %s instead of ValueError" % type(e)) - - def test_huge_arange(self): - """Regression test for #1062.""" - # Set a size which cannot fit into a 64 bits signed integer - sz = 2 ** 64 - good = 'Maximum allowed size exceeded' - try: - a = np.arange(sz) - self.assertTrue(np.size == sz) - except ValueError, e: - if not str(e) == good: - self.fail("Got msg '%s', expected '%s'" % (e, good)) - except Exception, e: - self.fail("Got exception of type %s instead of ValueError" % type(e)) - - def test_fromiter_bytes(self): - """Ticket #1058""" - a = np.fromiter(range(10), dtype='b') - b = np.fromiter(range(10), dtype='B') - assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])) - assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])) - - def test_array_from_sequence_scalar_array(self): - """Ticket #1078: segfaults when creating an array with a sequence of 0d - arrays.""" - a = np.array((np.ones(2), np.array(2))) - assert_equal(a.shape, (2,)) - assert_equal(a.dtype, np.dtype(object)) - assert_equal(a[0], np.ones(2)) - assert_equal(a[1], np.array(2)) - - a = np.array(((1,), np.array(1))) - assert_equal(a.shape, (2,)) - assert_equal(a.dtype, np.dtype(object)) - assert_equal(a[0], (1,)) - assert_equal(a[1], np.array(1)) - - def test_array_from_sequence_scalar_array2(self): - """Ticket #1081: weird array with strange input...""" - t = np.array([np.array([]), np.array(0, object)]) - assert_equal(t.shape, (2,)) - assert_equal(t.dtype, np.dtype(object)) - - def test_array_too_big(self): - """Ticket #1080.""" - assert_raises(ValueError, np.zeros, [975]*7, np.int8) - assert_raises(ValueError, np.zeros, [26244]*5, np.int8) - - def test_dtype_keyerrors_(self): - """Ticket #1106.""" - dt = np.dtype([('f1', np.uint)]) - assert_raises(KeyError, dt.__getitem__, "f2") - assert_raises(IndexError, dt.__getitem__, 1) - assert_raises(ValueError, dt.__getitem__, 0.0) - - def test_lexsort_buffer_length(self): - """Ticket #1217, don't segfault.""" - a = np.ones(100, dtype=np.int8) - b = np.ones(100, dtype=np.int32) - i = np.lexsort((a[::-1], b)) - assert_equal(i, np.arange(100, dtype=np.int)) - - def test_object_array_to_fixed_string(self): - """Ticket #1235.""" - a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) - b = np.array(a, dtype=(np.str_, 8)) - assert_equal(a, b) - c = np.array(a, dtype=(np.str_, 5)) - assert_equal(c, np.array(['abcde', 'ijklm'])) - d = np.array(a, dtype=(np.str_, 12)) - assert_equal(a, d) - e = np.empty((2, ), dtype=(np.str_, 8)) - e[:] = a[:] - assert_equal(a, e) - - def test_unicode_to_string_cast(self): - """Ticket #1240.""" - a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U') - def fail(): - b = np.array(a, 'S4') - self.assertRaises(UnicodeEncodeError, fail) - - def test_mixed_string_unicode_array_creation(self): - a = np.array(['1234', u'123']) - assert a.itemsize == 16 - a = np.array([u'123', '1234']) - assert a.itemsize == 16 - a = np.array(['1234', u'123', '12345']) - assert a.itemsize == 20 - a = np.array([u'123', '1234', u'12345']) - assert a.itemsize == 20 - a = np.array([u'123', '1234', u'1234']) - assert a.itemsize == 16 - - def test_misaligned_objects_segfault(self): - """Ticket #1198 and #1267""" - a1 = np.zeros((10,), dtype='O,c') - a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') - a1['f0'] = a2 - r = repr(a1) - np.argmax(a1['f0']) - a1['f0'][1] = "FOO" - a1['f0'] = "FOO" - a3 = np.array(a1['f0'], dtype='S') - np.nonzero(a1['f0']) - a1.sort() - a4 = copy.deepcopy(a1) - - def test_misaligned_scalars_segfault(self): - """Ticket #1267""" - s1 = np.array(('a', 'Foo'), dtype='c,O') - s2 = np.array(('b', 'Bar'), dtype='c,O') - s1['f1'] = s2['f1'] - s1['f1'] = 'Baz' - - def test_misaligned_dot_product_objects(self): - """Ticket #1267""" - # This didn't require a fix, but it's worth testing anyway, because - # it may fail if .dot stops enforcing the arrays to be BEHAVED - a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') - b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') - np.dot(a['f0'], b['f0']) - - def test_byteswap_complex_scalar(self): - """Ticket #1259""" - z = np.array([-1j], 'c') - - def test_log1p_compiler_shenanigans(self): - # Check if log1p is behaving on 32 bit intel systems. - assert_(np.isfinite(np.log1p(np.exp2(-53)))) - - def test_fromiter_comparison(self, level=rlevel): - a = np.fromiter(range(10), dtype='b') - b = np.fromiter(range(10), dtype='B') - assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])) - assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])) - - def test_fromstring_crash(self): - # Ticket #1345: the following should not cause a crash - np.fromstring(asbytes('aa, aa, 1.0'), sep=',') - - def test_ticket_1539(self): - dtypes = [x for x in np.typeDict.values() - if (issubclass(x, np.number) - and not issubclass(x, np.timeinteger))] - a = np.array([], dtypes[0]) - failures = [] - for x in dtypes: - b = a.astype(x) - for y in dtypes: - c = a.astype(y) - try: - np.dot(b, c) - except TypeError, e: - failures.append((x, y)) - if failures: - raise AssertionError("Failures: %r" % failures) - - def test_ticket_1538(self): - x = np.finfo(np.float32) - for name in 'eps epsneg max min resolution tiny'.split(): - assert_equal(type(getattr(x, name)), np.float32, - err_msg=name) - - def test_ticket_1434(self): - # Check that the out= argument in var and std has an effect - data = np.array(((1,2,3),(4,5,6),(7,8,9))) - out = np.zeros((3,)) - - ret = data.var(axis=1, out=out) - assert_(ret is out) - assert_array_equal(ret, data.var(axis=1)) - - ret = data.std(axis=1, out=out) - assert_(ret is out) - assert_array_equal(ret, data.std(axis=1)) - - def test_complex_nan_maximum(self): - cnan = complex(0, np.nan) - assert_equal(np.maximum(1, cnan), cnan) - - def test_subclass_int_tuple_assignment(self): - # ticket #1563 - class Subclass(np.ndarray): - def __new__(cls,i): - return np.ones((i,)).view(cls) - x = Subclass(5) - x[(0,)] = 2 # shouldn't raise an exception - assert_equal(x[0], 2) - - def test_ufunc_no_unnecessary_views(self): - # ticket #1548 - class Subclass(np.ndarray): - pass - x = np.array([1,2,3]).view(Subclass) - y = np.add(x, x, x) - assert_equal(id(x), id(y)) - - def test_take_refcount(self): - # ticket #939 - a = np.arange(16, dtype=np.float) - a.shape = (4,4) - lut = np.ones((5 + 3, 4), np.float) - rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) - c1 = sys.getrefcount(rgba) - try: - lut.take(a, axis=0, mode='clip', out=rgba) - except TypeError: - pass - c2 = sys.getrefcount(rgba) - assert_equal(c1, c2) - - def test_fromfile_tofile_seeks(self): - # On Python 3, tofile/fromfile used to get (#1610) the Python - # file handle out of sync - f0 = tempfile.NamedTemporaryFile() - f = f0.file - f.write(np.arange(255, dtype='u1').tostring()) - - f.seek(20) - ret = np.fromfile(f, count=4, dtype='u1') - assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) - assert_equal(f.tell(), 24) - - f.seek(40) - np.array([1, 2, 3], dtype='u1').tofile(f) - assert_equal(f.tell(), 43) - - f.seek(40) - data = f.read(3) - assert_equal(data, asbytes("\x01\x02\x03")) - - f.seek(80) - f.read(4) - data = np.fromfile(f, dtype='u1', count=4) - assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) - - f.close() - - def test_complex_scalar_warning(self): - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_warns(np.ComplexWarning, float, x) - ctx = WarningManager() - ctx.__enter__() - warnings.simplefilter('ignore') - assert_equal(float(x), float(x.real)) - ctx.__exit__() - - def test_complex_scalar_complex_cast(self): - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_equal(complex(x), 1+2j) - - def test_uint_int_conversion(self): - x = 2**64 - 1 - assert_equal(int(np.uint64(x)), x) - - def test_duplicate_field_names_assign(self): - ra = np.fromiter(((i*3, i*2) for i in xrange(10)), dtype='i8,f8') - ra.dtype.names = ('f1', 'f2') - rep = repr(ra) # should not cause a segmentation fault - assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) - - def test_eq_string_and_object_array(self): - # From e-mail thread "__eq__ with str and object" (Keith Goodman) - a1 = np.array(['a', 'b'], dtype=object) - a2 = np.array(['a', 'c']) - assert_array_equal(a1 == a2, [True, False]) - assert_array_equal(a2 == a1, [True, False]) - - def test_nonzero_byteswap(self): - a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) - a.dtype = np.float32 - assert_equal(a.nonzero()[0], [1]) - a = a.byteswap().newbyteorder() - assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap - - def test_find_common_type_boolean(self): - # Ticket #1695 - assert_(np.find_common_type([],['?','?']) == '?') - - def test_empty_mul(self): - a = np.array([1.]) - a[1:1] *= 2 - assert_equal(a, [1.]) - - def test_array_side_effect(self): - assert_equal(np.dtype('S10').itemsize, 10) - - A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) - - # This was throwing an exception because in ctors.c, - # discover_itemsize was calling PyObject_Length without checking - # the return code. This failed to get the length of the number 2, - # and the exception hung around until something checked - # PyErr_Occurred() and returned an error. - assert_equal(np.dtype('S10').itemsize, 10) - - def test_any_float(self): - # all and any for floats - a = np.array([0.1, 0.9]) - assert_(np.any(a)) - assert_(np.all(a)) - - def test_large_float_sum(self): - a = np.arange(10000, dtype='f') - assert_equal(a.sum(dtype='d'), a.astype('d').sum()) - - def test_ufunc_casting_out(self): - a = np.array(1.0, dtype=np.float32) - b = np.array(1.0, dtype=np.float64) - c = np.array(1.0, dtype=np.float32) - np.add(a, b, out=c) - assert_equal(c, 2.0) - - def test_array_scalar_contiguous(self): - # Array scalars are both C and Fortran contiguous - assert_(np.array(1.0).flags.c_contiguous) - assert_(np.array(1.0).flags.f_contiguous) - assert_(np.array(np.float32(1.0)).flags.c_contiguous) - assert_(np.array(np.float32(1.0)).flags.f_contiguous) - - def test_object_array_self_reference(self): - # Object arrays with references to themselves can cause problems - a = np.array(0, dtype=object) - a[()] = a - assert_raises(TypeError, int, a) - assert_raises(TypeError, long, a) - assert_raises(TypeError, float, a) - assert_raises(TypeError, oct, a) - assert_raises(TypeError, hex, a) - - # This was causing a to become like the above - a = np.array(0, dtype=object) - a[...] += 1 - assert_equal(a, 1) - - def test_zerosize_accumulate(self): - "Ticket #1733" - x = np.array([[42, 0]], dtype=np.uint32) - assert_equal(np.add.accumulate(x[:-1,0]), []) - - def test_objectarray_setfield(self): - # Setfield directly manipulates the raw array data, - # so is invalid for object arrays. - x = np.array([1,2,3], dtype=object) - assert_raises(RuntimeError, x.setfield, 4, np.int32, 0) - - def test_setting_rank0_string(self): - "Ticket #1736" - s1 = asbytes("hello1") - s2 = asbytes("hello2") - a = np.zeros((), dtype="S10") - a[()] = s1 - assert_equal(a, np.array(s1)) - a[()] = np.array(s2) - assert_equal(a, np.array(s2)) - - a = np.zeros((), dtype='f4') - a[()] = 3 - assert_equal(a, np.array(3)) - a[()] = np.array(4) - assert_equal(a, np.array(4)) - - @dec.knownfailureif(sys.version_info[0] >= 3, - "a.dtype is U5 for Py 3.x. Knownfail for 1.6.x") - def test_string_astype(self): - "Ticket #1748" - s1 = asbytes('black') - s2 = asbytes('white') - s3 = asbytes('other') - a = np.array([[s1],[s2],[s3]]) - assert_equal(a.dtype, np.dtype('S5')) - b = a.astype('str') - assert_equal(b.dtype, np.dtype('S5')) - - def test_ticket_1756(self): - """Ticket #1756 """ - s = asbytes('0123456789abcdef') - a = np.array([s]*5) - for i in range(1,17): - a1 = np.array(a, "|S%d"%i) - a2 = np.array([s[:i]]*5) - assert_equal(a1, a2) - - def test_fields_strides(self): - "Ticket #1760" - r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') - assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) - assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) - assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) - assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) - - def test_ticket_1770(self): - "Should not segfault on python 3k" - import numpy as np - try: - a = np.zeros((1,), dtype=[('f1', 'f')]) - a['f1'] = 1 - a['f2'] = 1 - except ValueError: - pass - except: - raise AssertionError - - def test_structured_type_to_object(self): - a_rec = np.array([(0,1), (3,2)], dtype='i4,i8') - a_obj = np.empty((2,), dtype=object) - a_obj[0] = (0,1) - a_obj[1] = (3,2) - # astype records -> object - assert_equal(a_rec.astype(object), a_obj) - # '=' records -> object - b = np.empty_like(a_obj) - b[...] = a_rec - assert_equal(b, a_obj) - # '=' object -> records - b = np.empty_like(a_rec) - b[...] = a_obj - assert_equal(b, a_rec) - - def test_assign_obj_listoflists(self): - # Ticket # 1870 - # The inner list should get assigned to the object elements - a = np.zeros(4, dtype=object) - b = a.copy() - a[0] = [1] - a[1] = [2] - a[2] = [3] - a[3] = [4] - b[...] = [[1], [2], [3], [4]] - assert_equal(a, b) - # The first dimension should get broadcast - a = np.zeros((2,2), dtype=object) - a[...] = [[1,2]] - assert_equal(a, [[1,2], [1,2]]) - - def test_unique_stable(self): - # Ticket #2063 must always choose stable sort for argsort to - # get consistent results - v=np.array([0,0,0,0,0,1,1,1,1,1,1,2,2,2,2,2,2]*4) - w=np.array([0,0,0,0,0,1,1,1,1,1,1,2,2,2,2]) - resv = np.unique(v,return_index=True) - resw = np.unique(w,return_index=True) - assert_equal(resv, resw) - - def test_search_sorted_invalid_arguments(self): - # Ticket #2021, should not segfault. - x = np.arange(0, 4, dtype='datetime64[D]') - assert_raises(TypeError, x.searchsorted, 1) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_scalarmath.py b/numpy-1.6.2/numpy/core/tests/test_scalarmath.py deleted file mode 100644 index 1a5a1e8c90..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_scalarmath.py +++ /dev/null @@ -1,139 +0,0 @@ -import sys -from numpy.testing import * -import numpy as np - -types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, - np.int_, np.uint, np.longlong, np.ulonglong, - np.single, np.double, np.longdouble, np.csingle, - np.cdouble, np.clongdouble] - -# This compares scalarmath against ufuncs. - -class TestTypes(TestCase): - def test_types(self, level=1): - for atype in types: - a = atype(1) - assert a == 1, "error with %r: got %r" % (atype,a) - - def test_type_add(self, level=1): - # list of types - for k, atype in enumerate(types): - a_scalar = atype(3) - a_array = np.array([3],dtype=atype) - for l, btype in enumerate(types): - b_scalar = btype(1) - b_array = np.array([1],dtype=btype) - c_scalar = a_scalar + b_scalar - c_array = a_array + b_array - # It was comparing the type numbers, but the new ufunc - # function-finding mechanism finds the lowest function - # to which both inputs can be cast - which produces 'l' - # when you do 'q' + 'b'. The old function finding mechanism - # skipped ahead based on the first argument, but that - # does not produce properly symmetric results... - assert_equal(c_scalar.dtype, c_array.dtype, - "error with types (%d/'%c' + %d/'%c')" % - (k,np.dtype(atype).char,l,np.dtype(btype).char)) - - def test_type_create(self, level=1): - for k, atype in enumerate(types): - a = np.array([1,2,3],atype) - b = atype([1,2,3]) - assert_equal(a,b) - - -class TestPower(TestCase): - def test_small_types(self): - for t in [np.int8, np.int16]: - a = t(3) - b = a ** 4 - assert b == 81, "error with %r: got %r" % (t,b) - - def test_large_types(self): - for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: - a = t(51) - b = a ** 4 - msg = "error with %r: got %r" % (t,b) - if np.issubdtype(t, np.integer): - assert b == 6765201, msg - else: - assert_almost_equal(b, 6765201, err_msg=msg) - - -class TestComplexDivision(TestCase): - def test_zero_division(self): - err = np.seterr(over="ignore", invalid="ignore", divide='ignore') - try: - for t in [np.complex64, np.complex128]: - a = t(0.0) - b = t(1.0) - assert_(np.isinf(b/a)) - b = t(complex(np.inf, np.inf)) - assert_(np.isinf(b/a)) - b = t(complex(np.inf, np.nan)) - assert_(np.isinf(b/a)) - b = t(complex(np.nan, np.inf)) - assert_(np.isinf(b/a)) - b = t(complex(np.nan, np.nan)) - assert_(np.isnan(b/a)) - b = t(0.) - assert_(np.isnan(b/a)) - finally: - np.seterr(**err) - - -class TestConversion(TestCase): - def test_int_from_long(self): - l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] - li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] - for T in [None, np.float64, np.int64]: - a = np.array(l,dtype=T) - assert_equal(map(int,a), li) - - a = np.array(l[:3], dtype=np.uint64) - assert_equal(map(int,a), li[:3]) - - -#class TestRepr(TestCase): -# def test_repr(self): -# for t in types: -# val = t(1197346475.0137341) -# val_repr = repr(val) -# val2 = eval(val_repr) -# assert_equal( val, val2 ) - - -class TestRepr: - def _test_type_repr(self, t): - finfo=np.finfo(t) - last_fraction_bit_idx = finfo.nexp + finfo.nmant - last_exponent_bit_idx = finfo.nexp - storage_bytes = np.dtype(t).itemsize*8 - # could add some more types to the list below - for which in ['small denorm','small norm']: - # Values from http://en.wikipedia.org/wiki/IEEE_754 - constr = np.array([0x00]*storage_bytes,dtype=np.uint8) - if which == 'small denorm': - byte = last_fraction_bit_idx // 8 - bytebit = 7-(last_fraction_bit_idx % 8) - constr[byte] = 1< real - n 1 negative nums + O - n 1 sign nums + O -> int - n 1 invert bool + ints + O flts raise an error - n 1 degrees real + M cmplx raise an error - n 1 radians real + M cmplx raise an error - n 1 arccos flts + M - n 1 arccosh flts + M - n 1 arcsin flts + M - n 1 arcsinh flts + M - n 1 arctan flts + M - n 1 arctanh flts + M - n 1 cos flts + M - n 1 sin flts + M - n 1 tan flts + M - n 1 cosh flts + M - n 1 sinh flts + M - n 1 tanh flts + M - n 1 exp flts + M - n 1 expm1 flts + M - n 1 log flts + M - n 1 log10 flts + M - n 1 log1p flts + M - n 1 sqrt flts + M real x < 0 raises error - n 1 ceil real + M - n 1 trunc real + M - n 1 floor real + M - n 1 fabs real + M - n 1 rint flts + M - n 1 isnan flts -> bool - n 1 isinf flts -> bool - n 1 isfinite flts -> bool - n 1 signbit real -> bool - n 1 modf real -> (frac, int) - n 1 logical_not bool + nums + M -> bool - n 2 left_shift ints + O flts raise an error - n 2 right_shift ints + O flts raise an error - n 2 add bool + nums + O boolean + is || - n 2 subtract bool + nums + O boolean - is ^ - n 2 multiply bool + nums + O boolean * is & - n 2 divide nums + O - n 2 floor_divide nums + O - n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d - n 2 fmod nums + M - n 2 power nums + O - n 2 greater bool + nums + O -> bool - n 2 greater_equal bool + nums + O -> bool - n 2 less bool + nums + O -> bool - n 2 less_equal bool + nums + O -> bool - n 2 equal bool + nums + O -> bool - n 2 not_equal bool + nums + O -> bool - n 2 logical_and bool + nums + M -> bool - n 2 logical_or bool + nums + M -> bool - n 2 logical_xor bool + nums + M -> bool - n 2 maximum bool + nums + O - n 2 minimum bool + nums + O - n 2 bitwise_and bool + ints + O flts raise an error - n 2 bitwise_or bool + ints + O flts raise an error - n 2 bitwise_xor bool + ints + O flts raise an error - n 2 arctan2 real + M - n 2 remainder ints + real + O - n 2 hypot real + M - ===== ==== ============= =============== ======================== - - Types other than those listed will be accepted, but they are cast to - the smallest compatible type for which the function is defined. The - casting rules are: - - bool -> int8 -> float32 - ints -> double - - """ - pass - - - def test_signature(self): - # the arguments to test_signature are: nin, nout, core_signature - # pass - assert_equal(umt.test_signature(2,1,"(i),(i)->()"), 1) - - # pass. empty core signature; treat as plain ufunc (with trivial core) - assert_equal(umt.test_signature(2,1,"(),()->()"), 0) - - # in the following calls, a ValueError should be raised because - # of error in core signature - # error: extra parenthesis - msg = "core_sig: extra parenthesis" - try: - ret = umt.test_signature(2,1,"((i)),(i)->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: parenthesis matching - msg = "core_sig: parenthesis matching" - try: - ret = umt.test_signature(2,1,"(i),)i(->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: incomplete signature. letters outside of parenthesis are ignored - msg = "core_sig: incomplete signature" - try: - ret = umt.test_signature(2,1,"(i),->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: incomplete signature. 2 output arguments are specified - msg = "core_sig: incomplete signature" - try: - ret = umt.test_signature(2,2,"(i),(i)->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - - # more complicated names for variables - assert_equal(umt.test_signature(2,1,"(i1,i2),(J_1)->(_kAB)"),1) - - def test_get_signature(self): - assert_equal(umt.inner1d.signature, "(i),(i)->()") - - def test_forced_sig(self): - a = 0.5*np.arange(3,dtype='f8') - assert_equal(np.add(a,0.5), [0.5, 1, 1.5]) - assert_equal(np.add(a,0.5,sig='i',casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a,0.5,sig='ii->i',casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a,0.5,sig=('i4',),casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a,0.5,sig=('i4','i4','i4'), - casting='unsafe'), [0, 0, 1]) - - b = np.zeros((3,),dtype='f8') - np.add(a,0.5,out=b) - assert_equal(b, [0.5, 1, 1.5]) - b[:] = 0 - np.add(a,0.5,sig='i',out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a,0.5,sig='ii->i',out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a,0.5,sig=('i4',),out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a,0.5,sig=('i4','i4','i4'),out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - - - def test_inner1d(self): - a = np.arange(6).reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1)) - - def test_broadcast(self): - msg = "broadcast" - a = np.arange(4).reshape((2,1,2)) - b = np.arange(4).reshape((1,2,2)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - msg = "extend & broadcast loop dimensions" - b = np.arange(4).reshape((2,2)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - msg = "broadcast in core dimensions" - a = np.arange(8).reshape((4,2)) - b = np.arange(4).reshape((4,1)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - msg = "extend & broadcast core and loop dimensions" - a = np.arange(8).reshape((4,2)) - b = np.array(7) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - msg = "broadcast should fail" - a = np.arange(2).reshape((2,1,1)) - b = np.arange(3).reshape((3,1,1)) - try: - ret = umt.inner1d(a,b) - assert_equal(ret, None, err_msg=msg) - except ValueError: None - - def test_type_cast(self): - msg = "type cast" - a = np.arange(6, dtype='short').reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) - msg = "type cast on one argument" - a = np.arange(6).reshape((2,3)) - b = a+0.1 - assert_array_almost_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), - err_msg=msg) - - def test_endian(self): - msg = "big endian" - a = np.arange(6, dtype='>i4').reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) - msg = "little endian" - a = np.arange(6, dtype=' 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)) - - -def assert_arctan2_isninf(x, y): - assert (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)) - - -def assert_arctan2_ispzero(x, y): - assert (ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)) - - -def assert_arctan2_isnzero(x, y): - assert (ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)) - - -class TestArctan2SpecialValues(TestCase): - def test_one_one(self): - # atan2(1, 1) returns pi/4. - assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) - assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) - assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) - - def test_zero_nzero(self): - # atan2(+-0, -0) returns +-pi. - assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) - assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) - - def test_zero_pzero(self): - # atan2(+-0, +0) returns +-0. - assert_arctan2_ispzero(np.PZERO, np.PZERO) - assert_arctan2_isnzero(np.NZERO, np.PZERO) - - def test_zero_negative(self): - # atan2(+-0, x) returns +-pi for x < 0. - assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) - assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) - - def test_zero_positive(self): - # atan2(+-0, x) returns +-0 for x > 0. - assert_arctan2_ispzero(np.PZERO, 1) - assert_arctan2_isnzero(np.NZERO, 1) - - def test_positive_zero(self): - # atan2(y, +-0) returns +pi/2 for y > 0. - assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) - assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) - - def test_negative_zero(self): - # atan2(y, +-0) returns -pi/2 for y < 0. - assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) - assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) - - def test_any_ninf(self): - # atan2(+-y, -infinity) returns +-pi for finite y > 0. - assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) - assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) - - def test_any_pinf(self): - # atan2(+-y, +infinity) returns +-0 for finite y > 0. - assert_arctan2_ispzero(1, np.inf) - assert_arctan2_isnzero(-1, np.inf) - - def test_inf_any(self): - # atan2(+-infinity, x) returns +-pi/2 for finite x. - assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) - - def test_inf_ninf(self): - # atan2(+-infinity, -infinity) returns +-3*pi/4. - assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) - - def test_inf_pinf(self): - # atan2(+-infinity, +infinity) returns +-pi/4. - assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) - - def test_nan_any(self): - # atan2(nan, x) returns nan for any x, including inf - assert_arctan2_isnan(np.nan, np.inf) - assert_arctan2_isnan(np.inf, np.nan) - assert_arctan2_isnan(np.nan, np.nan) - - -class TestLdexp(TestCase): - def _check_ldexp(self, tp): - assert_almost_equal(ncu.ldexp(np.array(2., np.float32), - np.array(3, tp)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.float64), - np.array(3, tp)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), - np.array(3, tp)), 16.) - - def test_ldexp(self): - # The default Python int type should work - assert_almost_equal(ncu.ldexp(2., 3), 16.) - # The following int types should all be accepted - self._check_ldexp(np.int8) - self._check_ldexp(np.int16) - self._check_ldexp(np.int32) - self._check_ldexp('i') - self._check_ldexp('l') - - @dec.knownfailureif(sys.platform == 'win32' and sys.version_info < (2, 6), - "python.org < 2.6 binaries have broken ldexp in the " - "C runtime") - def test_ldexp_overflow(self): - # silence warning emitted on overflow - err = np.seterr(over="ignore") - try: - imax = np.iinfo(np.dtype('l')).max - imin = np.iinfo(np.dtype('l')).min - assert_equal(ncu.ldexp(2., imax), np.inf) - assert_equal(ncu.ldexp(2., imin), 0) - finally: - np.seterr(**err) - - -class TestMaximum(TestCase): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.maximum.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), np.nan) - assert_equal(func(tmp2), np.nan) - - def test_reduce_complex(self): - assert_equal(np.maximum.reduce([1,2j]),1) - assert_equal(np.maximum.reduce([1+3j,2j]),1+3j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([nan, nan, nan]) - assert_equal(np.maximum(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([nan, nan, nan], dtype=np.complex) - assert_equal(np.maximum(arg1, arg2), out) - - def test_object_array(self): - arg1 = np.arange(5, dtype=np.object) - arg2 = arg1 + 1 - assert_equal(np.maximum(arg1, arg2), arg2) - - -class TestMinimum(TestCase): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.minimum.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), np.nan) - assert_equal(func(tmp2), np.nan) - - def test_reduce_complex(self): - assert_equal(np.minimum.reduce([1,2j]),2j) - assert_equal(np.minimum.reduce([1+3j,2j]),2j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([nan, nan, nan]) - assert_equal(np.minimum(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([nan, nan, nan], dtype=np.complex) - assert_equal(np.minimum(arg1, arg2), out) - - def test_object_array(self): - arg1 = np.arange(5, dtype=np.object) - arg2 = arg1 + 1 - assert_equal(np.minimum(arg1, arg2), arg1) - - -class TestFmax(TestCase): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.fmax.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), 9) - assert_equal(func(tmp2), 9) - - def test_reduce_complex(self): - assert_equal(np.fmax.reduce([1,2j]),1) - assert_equal(np.fmax.reduce([1+3j,2j]),1+3j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([0, 0, nan]) - assert_equal(np.fmax(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([0, 0, nan], dtype=np.complex) - assert_equal(np.fmax(arg1, arg2), out) - - -class TestFmin(TestCase): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.fmin.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), 1) - assert_equal(func(tmp2), 1) - - def test_reduce_complex(self): - assert_equal(np.fmin.reduce([1,2j]),2j) - assert_equal(np.fmin.reduce([1+3j,2j]),2j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([0, 0, nan]) - assert_equal(np.fmin(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([0, 0, nan], dtype=np.complex) - assert_equal(np.fmin(arg1, arg2), out) - - -class TestFloatingPoint(TestCase): - def test_floating_point(self): - assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) - - -class TestDegrees(TestCase): - def test_degrees(self): - assert_almost_equal(ncu.degrees(np.pi), 180.0) - assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) - - -class TestRadians(TestCase): - def test_radians(self): - assert_almost_equal(ncu.radians(180.0), np.pi) - assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) - - -class TestSign(TestCase): - def test_sign(self): - a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) - out = np.zeros(a.shape) - tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) - - olderr = np.seterr(invalid='ignore') - try: - res = ncu.sign(a) - assert_equal(res, tgt) - res = ncu.sign(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - finally: - np.seterr(**olderr) - - -class TestSpecialMethods(TestCase): - def test_wrap(self): - class with_wrap(object): - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr, context): - r = with_wrap() - r.arr = arr - r.context = context - return r - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x.arr, np.zeros(1)) - func, args, i = x.context - self.assertTrue(func is ncu.minimum) - self.assertEqual(len(args), 2) - assert_equal(args[0], a) - assert_equal(args[1], a) - self.assertEqual(i, 0) - - def test_wrap_with_iterable(self): - # test fix for bug #1026: - class with_wrap(np.ndarray): - __array_priority__ = 10 - def __new__(cls): - return np.asarray(1).view(cls).copy() - def __array_wrap__(self, arr, context): - return arr.view(type(self)) - a = with_wrap() - x = ncu.multiply(a, (1, 2, 3)) - self.assertTrue(isinstance(x, with_wrap)) - assert_array_equal(x, np.array((1, 2, 3))) - - def test_priority_with_scalar(self): - # test fix for bug #826: - class A(np.ndarray): - __array_priority__ = 10 - def __new__(cls): - return np.asarray(1.0, 'float64').view(cls).copy() - a = A() - x = np.float64(1)*a - self.assertTrue(isinstance(x, A)) - assert_array_equal(x, np.array(1)) - - def test_old_wrap(self): - class with_wrap(object): - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr): - r = with_wrap() - r.arr = arr - return r - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x.arr, np.zeros(1)) - - def test_priority(self): - class A(object): - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr, context): - r = type(self)() - r.arr = arr - r.context = context - return r - class B(A): - __array_priority__ = 20. - class C(A): - __array_priority__ = 40. - x = np.zeros(1) - a = A() - b = B() - c = C() - f = ncu.minimum - self.assertTrue(type(f(x,x)) is np.ndarray) - self.assertTrue(type(f(x,a)) is A) - self.assertTrue(type(f(x,b)) is B) - self.assertTrue(type(f(x,c)) is C) - self.assertTrue(type(f(a,x)) is A) - self.assertTrue(type(f(b,x)) is B) - self.assertTrue(type(f(c,x)) is C) - - self.assertTrue(type(f(a,a)) is A) - self.assertTrue(type(f(a,b)) is B) - self.assertTrue(type(f(b,a)) is B) - self.assertTrue(type(f(b,b)) is B) - self.assertTrue(type(f(b,c)) is C) - self.assertTrue(type(f(c,b)) is C) - self.assertTrue(type(f(c,c)) is C) - - self.assertTrue(type(ncu.exp(a) is A)) - self.assertTrue(type(ncu.exp(b) is B)) - self.assertTrue(type(ncu.exp(c) is C)) - - def test_failing_wrap(self): - class A(object): - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr, context): - raise RuntimeError - a = A() - self.assertRaises(RuntimeError, ncu.maximum, a, a) - - def test_default_prepare(self): - class with_wrap(object): - __array_priority__ = 10 - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr, context): - return arr - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x, np.zeros(1)) - assert_equal(type(x), np.ndarray) - - def test_prepare(self): - class with_prepare(np.ndarray): - __array_priority__ = 10 - def __array_prepare__(self, arr, context): - # make sure we can return a new - return np.array(arr).view(type=with_prepare) - a = np.array(1).view(type=with_prepare) - x = np.add(a, a) - assert_equal(x, np.array(2)) - assert_equal(type(x), with_prepare) - - def test_failing_prepare(self): - class A(object): - def __array__(self): - return np.zeros(1) - def __array_prepare__(self, arr, context=None): - raise RuntimeError - a = A() - self.assertRaises(RuntimeError, ncu.maximum, a, a) - - def test_array_with_context(self): - class A(object): - def __array__(self, dtype=None, context=None): - func, args, i = context - self.func = func - self.args = args - self.i = i - return np.zeros(1) - class B(object): - def __array__(self, dtype=None): - return np.zeros(1, dtype) - class C(object): - def __array__(self): - return np.zeros(1) - a = A() - ncu.maximum(np.zeros(1), a) - self.assertTrue(a.func is ncu.maximum) - assert_equal(a.args[0], 0) - self.assertTrue(a.args[1] is a) - self.assertTrue(a.i == 1) - assert_equal(ncu.maximum(a, B()), 0) - assert_equal(ncu.maximum(a, C()), 0) - - -class TestChoose(TestCase): - def test_mixed(self): - c = np.array([True,True]) - a = np.array([True,True]) - assert_equal(np.choose(c, (a, 1)), np.array([1,1])) - - -def is_longdouble_finfo_bogus(): - info = np.finfo(np.longcomplex) - return not np.isfinite(np.log10(info.tiny/info.eps)) - - -class TestComplexFunctions(object): - funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, - np.arctanh, np.sin, np.cos, np.tan, np.exp, - np.exp2, np.log, np.sqrt, np.log10, np.log2, - np.log1p] - - def test_it(self): - for f in self.funcs: - if f is np.arccosh : - x = 1.5 - else : - x = .5 - fr = f(x) - fz = f(np.complex(x)) - assert_almost_equal(fz.real, fr, err_msg='real part %s'%f) - assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f) - - def test_precisions_consistent(self) : - z = 1 + 1j - for f in self.funcs : - fcf = f(np.csingle(z)) - fcd = f(np.cdouble(z)) - fcl = f(np.clongdouble(z)) - assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f) - assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f) - - def test_branch_cuts(self): - # check branch cuts and continuity on them - yield _check_branch_cut, np.log, -0.5, 1j, 1, -1 - yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1 - yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1 - yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1 - yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1 - - yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1 - yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1 - yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1 - - yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1 - yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1 - yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1 - - # check against bogus branch cuts: assert continuity between quadrants - yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1 - yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1 - yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1 - - yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1 - yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1 - yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1 - - @dec.knownfailureif(True, "These branch cuts are known to fail") - def test_branch_cuts_failing(self): - # XXX: signed zero not OK with ICC on 64-bit platform for log, see - # http://permalink.gmane.org/gmane.comp.python.numeric.general/25335 - yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True - # XXX: signed zeros are not OK for sqrt or for the arc* functions - yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True - yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True - yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True - yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True - yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True - yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True - - def test_against_cmath(self): - import cmath, sys - - # cmath.asinh is broken in some versions of Python, see - # http://bugs.python.org/issue1381 - broken_cmath_asinh = False - if sys.version_info < (2,6): - broken_cmath_asinh = True - - points = [-1-1j, -1+1j, +1-1j, +1+1j] - name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', - 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} - atol = 4*np.finfo(np.complex).eps - for func in self.funcs: - fname = func.__name__.split('.')[-1] - cname = name_map.get(fname, fname) - try: - cfunc = getattr(cmath, cname) - except AttributeError: - continue - for p in points: - a = complex(func(np.complex_(p))) - b = cfunc(p) - - if cname == 'asinh' and broken_cmath_asinh: - continue - - assert abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b) - - def check_loss_of_precision(self, dtype): - """Check loss of precision in complex arc* functions""" - - # Check against known-good functions - - info = np.finfo(dtype) - real_dtype = dtype(0.).real.dtype - eps = info.eps - - def check(x, rtol): - x = x.astype(real_dtype) - - z = x.astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) - assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arcsinh') - - z = (1j*x).astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) - assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arcsin') - - z = x.astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) - assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arctanh') - - z = (1j*x).astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) - assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arctan') - - # The switchover was chosen as 1e-3; hence there can be up to - # ~eps/1e-3 of relative cancellation error before it - - x_series = np.logspace(-20, -3.001, 200) - x_basic = np.logspace(-2.999, 0, 10, endpoint=False) - - if dtype is np.longcomplex: - # It's not guaranteed that the system-provided arc functions - # are accurate down to a few epsilons. (Eg. on Linux 64-bit) - # So, give more leeway for long complex tests here: - check(x_series, 50*eps) - else: - check(x_series, 2*eps) - check(x_basic, 2*eps/1e-3) - - # Check a few points - - z = np.array([1e-5*(1+1j)], dtype=dtype) - p = 9.999999999333333333e-6 + 1.000000000066666666e-5j - d = np.absolute(1-np.arctanh(z)/p) - assert np.all(d < 1e-15) - - p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j - d = np.absolute(1-np.arcsinh(z)/p) - assert np.all(d < 1e-15) - - p = 9.999999999333333333e-6j + 1.000000000066666666e-5 - d = np.absolute(1-np.arctan(z)/p) - assert np.all(d < 1e-15) - - p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 - d = np.absolute(1-np.arcsin(z)/p) - assert np.all(d < 1e-15) - - # Check continuity across switchover points - - def check(func, z0, d=1): - z0 = np.asarray(z0, dtype=dtype) - zp = z0 + abs(z0) * d * eps * 2 - zm = z0 - abs(z0) * d * eps * 2 - assert np.all(zp != zm), (zp, zm) - - # NB: the cancellation error at the switchover is at least eps - good = (abs(func(zp) - func(zm)) < 2*eps) - assert np.all(good), (func, z0[~good]) - - for func in (np.arcsinh,np.arcsinh,np.arcsin,np.arctanh,np.arctan): - pts = [rp+1j*ip for rp in (-1e-3,0,1e-3) for ip in(-1e-3,0,1e-3) - if rp != 0 or ip != 0] - check(func, pts, 1) - check(func, pts, 1j) - check(func, pts, 1+1j) - - def test_loss_of_precision(self): - for dtype in [np.complex64, np.complex_]: - yield self.check_loss_of_precision, dtype - - @dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo") - def test_loss_of_precision_longcomplex(self): - self.check_loss_of_precision(np.longcomplex) - - -class TestAttributes(TestCase): - def test_attributes(self): - add = ncu.add - assert_equal(add.__name__, 'add') - assert add.__doc__.startswith('add(x1, x2[, out])\n\n') - self.assertTrue(add.ntypes >= 18) # don't fail if types added - self.assertTrue('ii->i' in add.types) - assert_equal(add.nin, 2) - assert_equal(add.nout, 1) - assert_equal(add.identity, 0) - - -class TestSubclass(TestCase): - def test_subclass_op(self): - class simple(np.ndarray): - def __new__(subtype, shape): - self = np.ndarray.__new__(subtype, shape, dtype=object) - self.fill(0) - return self - a = simple((3,4)) - assert_equal(a+a, a) - -def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, - dtype=np.complex): - """ - Check for a branch cut in a function. - - Assert that `x0` lies on a branch cut of function `f` and `f` is - continuous from the direction `dx`. - - Parameters - ---------- - f : func - Function to check - x0 : array-like - Point on branch cut - dx : array-like - Direction to check continuity in - re_sign, im_sign : {1, -1} - Change of sign of the real or imaginary part expected - sig_zero_ok : bool - Whether to check if the branch cut respects signed zero (if applicable) - dtype : dtype - Dtype to check (should be complex) - - """ - x0 = np.atleast_1d(x0).astype(dtype) - dx = np.atleast_1d(dx).astype(dtype) - - scale = np.finfo(dtype).eps * 1e3 - atol = 1e-4 - - y0 = f(x0) - yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) - ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) - - assert np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp) - assert np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp) - assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym) - assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym) - - if sig_zero_ok: - # check that signed zeros also work as a displacement - jr = (x0.real == 0) & (dx.real != 0) - ji = (x0.imag == 0) & (dx.imag != 0) - - x = -x0 - x.real[jr] = 0.*dx.real - x.imag[ji] = 0.*dx.imag - x = -x - ym = f(x) - ym = ym[jr | ji] - y0 = y0[jr | ji] - assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym) - assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym) - -def test_copysign(): - assert np.copysign(1, -1) == -1 - old_err = np.seterr(divide="ignore") - try: - assert 1 / np.copysign(0, -1) < 0 - assert 1 / np.copysign(0, 1) > 0 - finally: - np.seterr(**old_err) - assert np.signbit(np.copysign(np.nan, -1)) - assert not np.signbit(np.copysign(np.nan, 1)) - -def _test_nextafter(t): - one = t(1) - two = t(2) - zero = t(0) - eps = np.finfo(t).eps - assert np.nextafter(one, two) - one == eps - assert np.nextafter(one, zero) - one < 0 - assert np.isnan(np.nextafter(np.nan, one)) - assert np.isnan(np.nextafter(one, np.nan)) - assert np.nextafter(one, one) == one - -def test_nextafter(): - return _test_nextafter(np.float64) - -def test_nextafterf(): - return _test_nextafter(np.float32) - -@dec.knownfailureif(sys.platform == 'win32' or - ("powerpc" in platform.processor()), - "Long double support buggy on win32 and PPC.") -def test_nextafterl(): - return _test_nextafter(np.longdouble) - -def _test_spacing(t): - err = np.seterr(invalid='ignore') - one = t(1) - eps = np.finfo(t).eps - nan = t(np.nan) - inf = t(np.inf) - try: - assert np.spacing(one) == eps - assert np.isnan(np.spacing(nan)) - assert np.isnan(np.spacing(inf)) - assert np.isnan(np.spacing(-inf)) - assert np.spacing(t(1e30)) != 0 - finally: - np.seterr(**err) - -def test_spacing(): - return _test_spacing(np.float64) - -def test_spacingf(): - return _test_spacing(np.float32) - -@dec.knownfailureif(sys.platform == 'win32' or - ("powerpc" in platform.processor()), - "Long double support buggy on win32 and PPC.") -def test_spacingl(): - return _test_spacing(np.longdouble) - -def test_spacing_gfortran(): - # Reference from this fortran file, built with gfortran 4.3.3 on linux - # 32bits: - # PROGRAM test_spacing - # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) - # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) - # - # WRITE(*,*) spacing(0.00001_DBL) - # WRITE(*,*) spacing(1.0_DBL) - # WRITE(*,*) spacing(1000._DBL) - # WRITE(*,*) spacing(10500._DBL) - # - # WRITE(*,*) spacing(0.00001_SGL) - # WRITE(*,*) spacing(1.0_SGL) - # WRITE(*,*) spacing(1000._SGL) - # WRITE(*,*) spacing(10500._SGL) - # END PROGRAM - ref = {} - ref[np.float64] = [1.69406589450860068E-021, - 2.22044604925031308E-016, - 1.13686837721616030E-013, - 1.81898940354585648E-012] - ref[np.float32] = [ - 9.09494702E-13, - 1.19209290E-07, - 6.10351563E-05, - 9.76562500E-04] - - for dt, dec in zip([np.float32, np.float64], (10, 20)): - x = np.array([1e-5, 1, 1000, 10500], dtype=dt) - assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec) - -def test_nextafter_vs_spacing(): - # XXX: spacing does not handle long double yet - for t in [np.float32, np.float64]: - for _f in [1, 1e-5, 1000]: - f = t(_f) - f1 = t(_f + 1) - assert np.nextafter(f, f1) - f == np.spacing(f) - -def test_pos_nan(): - """Check np.nan is a positive nan.""" - assert np.signbit(np.nan) == 0 - -def test_reduceat(): - """Test bug in reduceat when structured arrays are not copied.""" - db = np.dtype([('name', 'S11'),('time', np.int64), ('value', np.float32)]) - a = np.empty([100], dtype=db) - a['name'] = 'Simple' - a['time'] = 10 - a['value'] = 100 - indx = [0,7,15,25] - - h2 = [] - val1 = indx[0] - for val2 in indx[1:]: - h2.append(np.add.reduce(a['value'][val1:val2])) - val1 = val2 - h2.append(np.add.reduce(a['value'][val1:])) - h2 = np.array(h2) - - # test buffered -- this should work - h1 = np.add.reduceat(a['value'], indx) - assert_array_almost_equal(h1, h2) - - # This is when the error occurs. - # test no buffer - res = np.setbufsize(32) - h1 = np.add.reduceat(a['value'], indx) - np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) - assert_array_almost_equal(h1, h2) - - -def test_complex_nan_comparisons(): - nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] - fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), - complex(1, 1), complex(-1, -1), complex(0, 0)] - - for x in nans + fins: - x = np.array([x]) - for y in nans + fins: - y = np.array([y]) - - if np.isfinite(x) and np.isfinite(y): - continue - - assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) - assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) - assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) - assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) - assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_umath_complex.py b/numpy-1.6.2/numpy/core/tests/test_umath_complex.py deleted file mode 100644 index 5cdb6b8847..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_umath_complex.py +++ /dev/null @@ -1,581 +0,0 @@ -import sys -import platform - -from numpy.testing import * -import numpy.core.umath as ncu -import numpy as np - -# TODO: branch cuts (use Pauli code) -# TODO: conj 'symmetry' -# TODO: FPU exceptions - -# At least on Windows the results of many complex functions are not conforming -# to the C99 standard. See ticket 1574. -# Ditto for Solaris (ticket 1642) and OS X on PowerPC. -olderr = np.seterr(invalid='ignore', divide='ignore') -try: - functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) - or (np.log(complex(np.NZERO, 0)).imag != np.pi)) -finally: - np.seterr(**olderr) -# TODO: replace with a check on whether platform-provided C99 funcs are used -skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) - -def platform_skip(func): - return dec.skipif(skip_complex_tests, - "Numpy is using complex functions (e.g. sqrt) provided by your" - "platform's C library. However, they do not seem to behave according" - "to C99 -- so C99 tests are skipped.")(func) - - -class TestCexp(object): - def test_simple(self): - check = check_complex_value - f = np.exp - - yield check, f, 1, 0, np.exp(1), 0, False - yield check, f, 0, 1, np.cos(1), np.sin(1), False - - ref = np.exp(1) * np.complex(np.cos(1), np.sin(1)) - yield check, f, 1, 1, ref.real, ref.imag, False - - @platform_skip - def test_special_values(self): - # C99: Section G 6.3.1 - - check = check_complex_value - f = np.exp - - # cexp(+-0 + 0i) is 1 + 0i - yield check, f, np.PZERO, 0, 1, 0, False - yield check, f, np.NZERO, 0, 1, 0, False - - # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU - # exception - yield check, f, 1, np.inf, np.nan, np.nan - yield check, f, -1, np.inf, np.nan, np.nan - yield check, f, 0, np.inf, np.nan, np.nan - - # cexp(inf + 0i) is inf + 0i - yield check, f, np.inf, 0, np.inf, 0 - - # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y - ref = np.complex(np.cos(1.), np.sin(1.)) - yield check, f, -np.inf, 1, np.PZERO, np.PZERO - - ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75)) - yield check, f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO - - # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y - ref = np.complex(np.cos(1.), np.sin(1.)) - yield check, f, np.inf, 1, np.inf, np.inf - - ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75)) - yield check, f, np.inf, 0.75 * np.pi, -np.inf, np.inf - - # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) - def _check_ninf_inf(dummy): - msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" - err = np.seterr(invalid='ignore') - try: - z = f(np.array(np.complex(-np.inf, np.inf))) - if z.real != 0 or z.imag != 0: - raise AssertionError(msgform %(z.real, z.imag)) - finally: - np.seterr(**err) - - yield _check_ninf_inf, None - - # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. - def _check_inf_inf(dummy): - msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" - err = np.seterr(invalid='ignore') - try: - z = f(np.array(np.complex(np.inf, np.inf))) - if not np.isinf(z.real) or not np.isnan(z.imag): - raise AssertionError(msgform % (z.real, z.imag)) - finally: - np.seterr(**err) - - yield _check_inf_inf, None - - # cexp(-inf + nan i) is +-0 +- 0i - def _check_ninf_nan(dummy): - msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" - err = np.seterr(invalid='ignore') - try: - z = f(np.array(np.complex(-np.inf, np.nan))) - if z.real != 0 or z.imag != 0: - raise AssertionError(msgform % (z.real, z.imag)) - finally: - np.seterr(**err) - - yield _check_ninf_nan, None - - # cexp(inf + nan i) is +-inf + nan - def _check_inf_nan(dummy): - msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" - err = np.seterr(invalid='ignore') - try: - z = f(np.array(np.complex(np.inf, np.nan))) - if not np.isinf(z.real) or not np.isnan(z.imag): - raise AssertionError(msgform % (z.real, z.imag)) - finally: - np.seterr(**err) - - yield _check_inf_nan, None - - # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU - # ex) - yield check, f, np.nan, 1, np.nan, np.nan - yield check, f, np.nan, -1, np.nan, np.nan - - yield check, f, np.nan, np.inf, np.nan, np.nan - yield check, f, np.nan, -np.inf, np.nan, np.nan - - # cexp(nan + nani) is nan + nani - yield check, f, np.nan, np.nan, np.nan, np.nan - - @dec.knownfailureif(True, "cexp(nan + 0I) is wrong on most implementations") - def test_special_values2(self): - # XXX: most implementations get it wrong here (including glibc <= 2.10) - # cexp(nan + 0i) is nan + 0i - yield check, f, np.nan, 0, np.nan, 0 - -class TestClog(TestCase): - def test_simple(self): - x = np.array([1+0j, 1+2j]) - y_r = np.log(np.abs(x)) + 1j * np.angle(x) - y = np.log(x) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - @platform_skip - def test_special_values(self): - xl = [] - yl = [] - - # From C99 std (Sec 6.3.2) - # XXX: check exceptions raised - # --- raise for invalid fails. - - # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' - # floating-point exception. - err = np.seterr(divide='raise') - try: - x = np.array([np.NZERO], dtype=np.complex) - y = np.complex(-np.inf, np.pi) - self.assertRaises(FloatingPointError, np.log, x) - np.seterr(divide='ignore') - assert_almost_equal(np.log(x), y) - finally: - np.seterr(**err) - - xl.append(x) - yl.append(y) - - # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' - # floating-point exception. - err = np.seterr(divide='raise') - try: - x = np.array([0], dtype=np.complex) - y = np.complex(-np.inf, 0) - self.assertRaises(FloatingPointError, np.log, x) - np.seterr(divide='ignore') - assert_almost_equal(np.log(x), y) - finally: - np.seterr(**err) - - xl.append(x) - yl.append(y) - - # clog(x + i inf returns +inf + i pi /2, for finite x. - x = np.array([complex(1, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.5 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - x = np.array([complex(-1, np.inf)], dtype=np.complex) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(x + iNaN) returns NaN + iNaN and optionally raises the - # 'invalid' floating- point exception, for finite x. - err = np.seterr(invalid='raise') - try: - x = np.array([complex(1., np.nan)], dtype=np.complex) - y = np.complex(np.nan, np.nan) - #self.assertRaises(FloatingPointError, np.log, x) - np.seterr(invalid='ignore') - assert_almost_equal(np.log(x), y) - finally: - np.seterr(**err) - - xl.append(x) - yl.append(y) - - err = np.seterr(invalid='raise') - try: - x = np.array([np.inf + 1j * np.nan], dtype=np.complex) - #self.assertRaises(FloatingPointError, np.log, x) - np.seterr(invalid='ignore') - assert_almost_equal(np.log(x), y) - finally: - np.seterr(**err) - - xl.append(x) - yl.append(y) - - # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. - x = np.array([-np.inf + 1j], dtype=np.complex) - y = np.complex(np.inf, np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. - x = np.array([np.inf + 1j], dtype=np.complex) - y = np.complex(np.inf, 0) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(- inf + i inf) returns +inf + i3pi /4. - x = np.array([complex(-np.inf, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.75 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+ inf + i inf) returns +inf + ipi /4. - x = np.array([complex(np.inf, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.25 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+/- inf + iNaN) returns +inf + iNaN. - x = np.array([complex(np.inf, np.nan)], dtype=np.complex) - y = np.complex(np.inf, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - x = np.array([complex(-np.inf, np.nan)], dtype=np.complex) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + iy) returns NaN + iNaN and optionally raises the - # 'invalid' floating-point exception, for finite y. - x = np.array([complex(np.nan, 1)], dtype=np.complex) - y = np.complex(np.nan, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + i inf) returns +inf + iNaN. - x = np.array([complex(np.nan, np.inf)], dtype=np.complex) - y = np.complex(np.inf, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + iNaN) returns NaN + iNaN. - x = np.array([complex(np.nan, np.nan)], dtype=np.complex) - y = np.complex(np.nan, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(conj(z)) = conj(clog(z)). - xa = np.array(xl, dtype=np.complex) - ya = np.array(yl, dtype=np.complex) - err = np.seterr(divide='ignore') - try: - for i in range(len(xa)): - assert_almost_equal(np.log(np.conj(xa[i])), np.conj(np.log(xa[i]))) - finally: - np.seterr(**err) - -class TestCsqrt(object): - - def test_simple(self): - # sqrt(1) - yield check_complex_value, np.sqrt, 1, 0, 1, 0 - - # sqrt(1i) - yield check_complex_value, np.sqrt, 0, 1, 0.5*np.sqrt(2), 0.5*np.sqrt(2), False - - # sqrt(-1) - yield check_complex_value, np.sqrt, -1, 0, 0, 1 - - def test_simple_conjugate(self): - ref = np.conj(np.sqrt(np.complex(1, 1))) - def f(z): - return np.sqrt(np.conj(z)) - yield check_complex_value, f, 1, 1, ref.real, ref.imag, False - - #def test_branch_cut(self): - # _check_branch_cut(f, -1, 0, 1, -1) - - @platform_skip - def test_special_values(self): - check = check_complex_value - f = np.sqrt - - # C99: Sec G 6.4.2 - x, y = [], [] - - # csqrt(+-0 + 0i) is 0 + 0i - yield check, f, np.PZERO, 0, 0, 0 - yield check, f, np.NZERO, 0, 0, 0 - - # csqrt(x + infi) is inf + infi for any x (including NaN) - yield check, f, 1, np.inf, np.inf, np.inf - yield check, f, -1, np.inf, np.inf, np.inf - - yield check, f, np.PZERO, np.inf, np.inf, np.inf - yield check, f, np.NZERO, np.inf, np.inf, np.inf - yield check, f, np.inf, np.inf, np.inf, np.inf - yield check, f, -np.inf, np.inf, np.inf, np.inf - yield check, f, -np.nan, np.inf, np.inf, np.inf - - # csqrt(x + nani) is nan + nani for any finite x - yield check, f, 1, np.nan, np.nan, np.nan - yield check, f, -1, np.nan, np.nan, np.nan - yield check, f, 0, np.nan, np.nan, np.nan - - # csqrt(-inf + yi) is +0 + infi for any finite y > 0 - yield check, f, -np.inf, 1, np.PZERO, np.inf - - # csqrt(inf + yi) is +inf + 0i for any finite y > 0 - yield check, f, np.inf, 1, np.inf, np.PZERO - - # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) - def _check_ninf_nan(dummy): - msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" - z = np.sqrt(np.array(np.complex(-np.inf, np.nan))) - #Fixme: ugly workaround for isinf bug. - err = np.seterr(invalid='ignore') - try: - if not (np.isnan(z.real) and np.isinf(z.imag)): - raise AssertionError(msgform % (z.real, z.imag)) - finally: - np.seterr(**err) - - yield _check_ninf_nan, None - - # csqrt(+inf + nani) is inf + nani - yield check, f, np.inf, np.nan, np.inf, np.nan - - # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x - # + nani) - yield check, f, np.nan, 0, np.nan, np.nan - yield check, f, np.nan, 1, np.nan, np.nan - yield check, f, np.nan, np.nan, np.nan, np.nan - - # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch - # cuts first) - -class TestCpow(TestCase): - def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) - err = np.seterr(invalid='ignore') - try: - y_r = x ** 2 - y = np.power(x, 2) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - finally: - np.seterr(**err) - - def test_scalar(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) - lx = range(len(x)) - # Compute the values for complex type in python - p_r = [complex(x[i]) ** complex(y[i]) for i in lx] - # Substitute a result allowed by C99 standard - p_r[4] = complex(np.inf, np.nan) - # Do the same with numpy complex scalars - err = np.seterr(invalid='ignore') - try: - n_r = [x[i] ** y[i] for i in lx] - for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) - finally: - np.seterr(**err) - - def test_array(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) - lx = range(len(x)) - # Compute the values for complex type in python - p_r = [complex(x[i]) ** complex(y[i]) for i in lx] - # Substitute a result allowed by C99 standard - p_r[4] = complex(np.inf, np.nan) - # Do the same with numpy arrays - err = np.seterr(invalid='ignore') - try: - n_r = x ** y - for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) - finally: - np.seterr(**err) - -class TestCabs(object): - def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) - y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) - - olderr = np.seterr(invalid='ignore') - try: - y = np.abs(x) - finally: - np.seterr(**olderr) - - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - def test_fabs(self): - # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) - x = np.array([1+0j], dtype=np.complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(1, np.NZERO)], dtype=np.complex) - assert_array_equal(np.abs(x), np.real(x)) - - olderr = np.seterr(invalid='ignore') - try: - x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex) - assert_array_equal(np.abs(x), np.real(x)) - finally: - np.seterr(**olderr) - - x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex) - assert_array_equal(np.abs(x), np.real(x)) - - def test_cabs_inf_nan(self): - x, y = [], [] - - # cabs(+-nan + nani) returns nan - x.append(np.nan) - y.append(np.nan) - yield check_real_value, np.abs, np.nan, np.nan, np.nan - - x.append(np.nan) - y.append(-np.nan) - yield check_real_value, np.abs, -np.nan, np.nan, np.nan - - # According to C99 standard, if exactly one of the real/part is inf and - # the other nan, then cabs should return inf - x.append(np.inf) - y.append(np.nan) - yield check_real_value, np.abs, np.inf, np.nan, np.inf - - x.append(-np.inf) - y.append(np.nan) - yield check_real_value, np.abs, -np.inf, np.nan, np.inf - - # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) - def f(a): - return np.abs(np.conj(a)) - def g(a, b): - return np.abs(np.complex(a, b)) - - xa = np.array(x, dtype=np.complex) - ya = np.array(x, dtype=np.complex) - for i in range(len(xa)): - ref = g(x[i], y[i]) - yield check_real_value, f, x[i], y[i], ref - -class TestCarg(object): - def test_simple(self): - check_real_value(ncu._arg, 1, 0, 0, False) - check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) - - check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) - check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) - - @dec.knownfailureif(True, - "Complex arithmetic with signed zero is buggy on most implementation") - def test_zero(self): - # carg(-0 +- 0i) returns +- pi - yield check_real_value, ncu._arg, np.NZERO, np.PZERO, np.pi, False - yield check_real_value, ncu._arg, np.NZERO, np.NZERO, -np.pi, False - - # carg(+0 +- 0i) returns +- 0 - yield check_real_value, ncu._arg, np.PZERO, np.PZERO, np.PZERO - yield check_real_value, ncu._arg, np.PZERO, np.NZERO, np.NZERO - - # carg(x +- 0i) returns +- 0 for x > 0 - yield check_real_value, ncu._arg, 1, np.PZERO, np.PZERO, False - yield check_real_value, ncu._arg, 1, np.NZERO, np.NZERO, False - - # carg(x +- 0i) returns +- pi for x < 0 - yield check_real_value, ncu._arg, -1, np.PZERO, np.pi, False - yield check_real_value, ncu._arg, -1, np.NZERO, -np.pi, False - - # carg(+- 0 + yi) returns pi/2 for y > 0 - yield check_real_value, ncu._arg, np.PZERO, 1, 0.5 * np.pi, False - yield check_real_value, ncu._arg, np.NZERO, 1, 0.5 * np.pi, False - - # carg(+- 0 + yi) returns -pi/2 for y < 0 - yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False - yield check_real_value, ncu._arg, np.NZERO, -1,-0.5 * np.pi, False - - #def test_branch_cuts(self): - # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) - - def test_special_values(self): - # carg(-np.inf +- yi) returns +-pi for finite y > 0 - yield check_real_value, ncu._arg, -np.inf, 1, np.pi, False - yield check_real_value, ncu._arg, -np.inf, -1, -np.pi, False - - # carg(np.inf +- yi) returns +-0 for finite y > 0 - yield check_real_value, ncu._arg, np.inf, 1, np.PZERO, False - yield check_real_value, ncu._arg, np.inf, -1, np.NZERO, False - - # carg(x +- np.infi) returns +-pi/2 for finite x - yield check_real_value, ncu._arg, 1, np.inf, 0.5 * np.pi, False - yield check_real_value, ncu._arg, 1, -np.inf, -0.5 * np.pi, False - - # carg(-np.inf +- np.infi) returns +-3pi/4 - yield check_real_value, ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False - yield check_real_value, ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False - - # carg(np.inf +- np.infi) returns +-pi/4 - yield check_real_value, ncu._arg, np.inf, np.inf, 0.25 * np.pi, False - yield check_real_value, ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False - - # carg(x + yi) returns np.nan if x or y is nan - yield check_real_value, ncu._arg, np.nan, 0, np.nan, False - yield check_real_value, ncu._arg, 0, np.nan, np.nan, False - - yield check_real_value, ncu._arg, np.nan, np.inf, np.nan, False - yield check_real_value, ncu._arg, np.inf, np.nan, np.nan, False - -def check_real_value(f, x1, y1, x, exact=True): - z1 = np.array([complex(x1, y1)]) - if exact: - assert_equal(f(z1), x) - else: - assert_almost_equal(f(z1), x) - -def check_complex_value(f, x1, y1, x2, y2, exact=True): - err = np.seterr(invalid='ignore') - z1 = np.array([complex(x1, y1)]) - z2 = np.complex(x2, y2) - try: - if exact: - assert_equal(f(z1), z2) - else: - assert_almost_equal(f(z1), z2) - finally: - np.seterr(**err) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/core/tests/test_unicode.py b/numpy-1.6.2/numpy/core/tests/test_unicode.py deleted file mode 100644 index d47ac548bc..0000000000 --- a/numpy-1.6.2/numpy/core/tests/test_unicode.py +++ /dev/null @@ -1,341 +0,0 @@ -import sys - -from numpy.testing import * -from numpy.core import * -from numpy.compat import asbytes - -# Guess the UCS length for this python interpreter -if sys.version_info[0] >= 3: - import array as _array - ucs4 = (_array.array('u').itemsize == 4) - def buffer_length(arr): - if isinstance(arr, unicode): - return _array.array('u').itemsize * len(arr) - v = memoryview(arr) - if v.shape is None: - return len(v) * v.itemsize - else: - return prod(v.shape) * v.itemsize -else: - if len(buffer(u'u')) == 4: - ucs4 = True - else: - ucs4 = False - def buffer_length(arr): - if isinstance(arr, ndarray): - return len(arr.data) - return len(buffer(arr)) - -# Value that can be represented in UCS2 interpreters -ucs2_value = u'\uFFFF' -# Value that cannot be represented in UCS2 interpreters (but can in UCS4) -ucs4_value = u'\U0010FFFF' - - -############################################################ -# Creation tests -############################################################ - -class create_zeros(object): - """Check the creation of zero-valued arrays""" - - def content_check(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) - # Small check that data in array element is ok - self.assertTrue(ua_scalar == u'') - # Encode to ascii and double check - self.assertTrue(ua_scalar.encode('ascii') == asbytes('')) - # Check buffer lengths for scalars - if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 0) - else: - self.assertTrue(buffer_length(ua_scalar) == 0) - - def test_zeros0D(self): - """Check creation of 0-dimensional objects""" - ua = zeros((), dtype='U%s' % self.ulen) - self.content_check(ua, ua[()], 4*self.ulen) - - def test_zerosSD(self): - """Check creation of single-dimensional objects""" - ua = zeros((2,), dtype='U%s' % self.ulen) - self.content_check(ua, ua[0], 4*self.ulen*2) - self.content_check(ua, ua[1], 4*self.ulen*2) - - def test_zerosMD(self): - """Check creation of multi-dimensional objects""" - ua = zeros((2,3,4), dtype='U%s' % self.ulen) - self.content_check(ua, ua[0,0,0], 4*self.ulen*2*3*4) - self.content_check(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) - - -class test_create_zeros_1(create_zeros, TestCase): - """Check the creation of zero-valued arrays (size 1)""" - ulen = 1 - - -class test_create_zeros_2(create_zeros, TestCase): - """Check the creation of zero-valued arrays (size 2)""" - ulen = 2 - - -class test_create_zeros_1009(create_zeros, TestCase): - """Check the creation of zero-valued arrays (size 1009)""" - ulen = 1009 - - -class create_values(object): - """Check the creation of unicode arrays with values""" - - def content_check(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) - # Small check that data in array element is ok - self.assertTrue(ua_scalar == self.ucs_value*self.ulen) - # Encode to UTF-8 and double check - self.assertTrue(ua_scalar.encode('utf-8') == \ - (self.ucs_value*self.ulen).encode('utf-8')) - # Check buffer lengths for scalars - if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) - else: - if self.ucs_value == ucs4_value: - # In UCS2, the \U0010FFFF will be represented using a - # surrogate *pair* - self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) - else: - # In UCS2, the \uFFFF will be represented using a - # regular 2-byte word - self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) - - def test_values0D(self): - """Check creation of 0-dimensional objects with values""" - ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) - self.content_check(ua, ua[()], 4*self.ulen) - - def test_valuesSD(self): - """Check creation of single-dimensional objects with values""" - ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) - self.content_check(ua, ua[0], 4*self.ulen*2) - self.content_check(ua, ua[1], 4*self.ulen*2) - - def test_valuesMD(self): - """Check creation of multi-dimensional objects with values""" - ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) - self.content_check(ua, ua[0,0,0], 4*self.ulen*2*3*4) - self.content_check(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) - - -class test_create_values_1_ucs2(create_values, TestCase): - """Check the creation of valued arrays (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - - -class test_create_values_1_ucs4(create_values, TestCase): - """Check the creation of valued arrays (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - - -class test_create_values_2_ucs2(create_values, TestCase): - """Check the creation of valued arrays (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - - -class test_create_values_2_ucs4(create_values, TestCase): - """Check the creation of valued arrays (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - - -class test_create_values_1009_ucs2(create_values, TestCase): - """Check the creation of valued arrays (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - - -class test_create_values_1009_ucs4(create_values, TestCase): - """Check the creation of valued arrays (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - -############################################################ -# Assignment tests -############################################################ - -class assign_values(object): - """Check the assignment of unicode arrays with values""" - - def content_check(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) - # Small check that data in array element is ok - self.assertTrue(ua_scalar == self.ucs_value*self.ulen) - # Encode to UTF-8 and double check - self.assertTrue(ua_scalar.encode('utf-8') == \ - (self.ucs_value*self.ulen).encode('utf-8')) - # Check buffer lengths for scalars - if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) - else: - if self.ucs_value == ucs4_value: - # In UCS2, the \U0010FFFF will be represented using a - # surrogate *pair* - self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) - else: - # In UCS2, the \uFFFF will be represented using a - # regular 2-byte word - self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) - - def test_values0D(self): - """Check assignment of 0-dimensional objects with values""" - ua = zeros((), dtype='U%s' % self.ulen) - ua[()] = self.ucs_value*self.ulen - self.content_check(ua, ua[()], 4*self.ulen) - - def test_valuesSD(self): - """Check assignment of single-dimensional objects with values""" - ua = zeros((2,), dtype='U%s' % self.ulen) - ua[0] = self.ucs_value*self.ulen - self.content_check(ua, ua[0], 4*self.ulen*2) - ua[1] = self.ucs_value*self.ulen - self.content_check(ua, ua[1], 4*self.ulen*2) - - def test_valuesMD(self): - """Check assignment of multi-dimensional objects with values""" - ua = zeros((2,3,4), dtype='U%s' % self.ulen) - ua[0,0,0] = self.ucs_value*self.ulen - self.content_check(ua, ua[0,0,0], 4*self.ulen*2*3*4) - ua[-1,-1,-1] = self.ucs_value*self.ulen - self.content_check(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) - - -class test_assign_values_1_ucs2(assign_values, TestCase): - """Check the assignment of valued arrays (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - - -class test_assign_values_1_ucs4(assign_values, TestCase): - """Check the assignment of valued arrays (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - - -class test_assign_values_2_ucs2(assign_values, TestCase): - """Check the assignment of valued arrays (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - - -class test_assign_values_2_ucs4(assign_values, TestCase): - """Check the assignment of valued arrays (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - - -class test_assign_values_1009_ucs2(assign_values, TestCase): - """Check the assignment of valued arrays (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - - -class test_assign_values_1009_ucs4(assign_values, TestCase): - """Check the assignment of valued arrays (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - - -############################################################ -# Byteorder tests -############################################################ - -class byteorder_values: - """Check the byteorder of unicode arrays in round-trip conversions""" - - def test_values0D(self): - """Check byteorder of 0-dimensional objects""" - ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - # This changes the interpretation of the data region (but not the - # actual data), therefore the returned scalars are not - # the same (they are byte-swapped versions of each other). - self.assertTrue(ua[()] != ua2[()]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - - def test_valuesSD(self): - """Check byteorder of single-dimensional objects""" - ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - self.assertTrue(ua[0] != ua2[0]) - self.assertTrue(ua[-1] != ua2[-1]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - - def test_valuesMD(self): - """Check byteorder of multi-dimensional objects""" - ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, - dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - self.assertTrue(ua[0,0,0] != ua2[0,0,0]) - self.assertTrue(ua[-1,-1,-1] != ua2[-1,-1,-1]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - - -class test_byteorder_1_ucs2(byteorder_values, TestCase): - """Check the byteorder in unicode (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - - -class test_byteorder_1_ucs4(byteorder_values, TestCase): - """Check the byteorder in unicode (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - - -class test_byteorder_2_ucs2(byteorder_values, TestCase): - """Check the byteorder in unicode (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - - -class test_byteorder_2_ucs4(byteorder_values, TestCase): - """Check the byteorder in unicode (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - - -class test_byteorder_1009_ucs2(byteorder_values, TestCase): - """Check the byteorder in unicode (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - - -class test_byteorder_1009_ucs4(byteorder_values, TestCase): - """Check the byteorder in unicode (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/ctypeslib.py b/numpy-1.6.2/numpy/ctypeslib.py deleted file mode 100644 index 8dcfa635f4..0000000000 --- a/numpy-1.6.2/numpy/ctypeslib.py +++ /dev/null @@ -1,429 +0,0 @@ -""" -============================ -``ctypes`` Utility Functions -============================ - -See Also ---------- -load_library : Load a C library. -ndpointer : Array restype/argtype with verification. -as_ctypes : Create a ctypes array from an ndarray. -as_array : Create an ndarray from a ctypes array. - -References ----------- -.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes - -Examples --------- -Load the C library: - ->>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP - -Our result type, an ndarray that must be of type double, be 1-dimensional -and is C-contiguous in memory: - ->>> array_1d_double = np.ctypeslib.ndpointer( -... dtype=np.double, -... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP - -Our C-function typically takes an array and updates its values -in-place. For example:: - - void foo_func(double* x, int length) - { - int i; - for (i = 0; i < length; i++) { - x[i] = i*i; - } - } - -We wrap it using: - ->>> lib.foo_func.restype = None #doctest: +SKIP ->>> lib.foo.argtypes = [array_1d_double, c_int] #doctest: +SKIP - -Then, we're ready to call ``foo_func``: - ->>> out = np.empty(15, dtype=np.double) ->>> _lib.foo_func(out, len(out)) #doctest: +SKIP - -""" -__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library', - 'c_intp', 'as_ctypes', 'as_array'] - -import sys, os -from numpy import integer, ndarray, dtype as _dtype, deprecate, array -from numpy.core.multiarray import _flagdict, flagsobj - -try: - import ctypes -except ImportError: - ctypes = None - -if ctypes is None: - def _dummy(*args, **kwds): - """ - Dummy object that raises an ImportError if ctypes is not available. - - Raises - ------ - ImportError - If ctypes is not available. - - """ - raise ImportError, "ctypes is not available." - ctypes_load_library = _dummy - load_library = _dummy - as_ctypes = _dummy - as_array = _dummy - from numpy import intp as c_intp - _ndptr_base = object -else: - import numpy.core._internal as nic - c_intp = nic._getintp_ctype() - del nic - _ndptr_base = ctypes.c_void_p - - # Adapted from Albert Strasheim - def load_library(libname, loader_path): - if ctypes.__version__ < '1.0.1': - import warnings - warnings.warn("All features of ctypes interface may not work " \ - "with ctypes < 1.0.1") - - ext = os.path.splitext(libname)[1] - if not ext: - # Try to load library with platform-specific name, otherwise - # default to libname.[so|pyd]. Sometimes, these files are built - # erroneously on non-linux platforms. - from numpy.distutils.misc_util import get_shared_lib_extension - so_ext = get_shared_lib_extension() - libname_ext = [libname + so_ext] - if sys.version[:3] >= '3.2': - # For Python >= 3.2 a tag may be added to lib extension - # (platform dependent). If we find such a tag, try both with - # and without it. - so_ext2 = get_shared_lib_extension(is_python_ext=True) - if not so_ext2 == so_ext: - libname_ext.insert(0, libname + so_ext2) - if sys.platform == 'win32': - libname_ext.insert(0, '%s.dll' % libname) - elif sys.platform == 'darwin': - libname_ext.insert(0, '%s.dylib' % libname) - else: - libname_ext = [libname] - - loader_path = os.path.abspath(loader_path) - if not os.path.isdir(loader_path): - libdir = os.path.dirname(loader_path) - else: - libdir = loader_path - - # Need to save exception when using Python 3k, see PEP 3110. - exc = None - for ln in libname_ext: - try: - libpath = os.path.join(libdir, ln) - return ctypes.cdll[libpath] - except OSError, e: - exc = e - raise exc - - ctypes_load_library = deprecate(load_library, 'ctypes_load_library', - 'load_library') - -def _num_fromflags(flaglist): - num = 0 - for val in flaglist: - num += _flagdict[val] - return num - -_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', - 'OWNDATA', 'UPDATEIFCOPY'] -def _flags_fromnum(num): - res = [] - for key in _flagnames: - value = _flagdict[key] - if (num & value): - res.append(key) - return res - - -class _ndptr(_ndptr_base): - - def _check_retval_(self): - """This method is called when this class is used as the .restype - asttribute for a shared-library function. It constructs a numpy - array from a void pointer.""" - return array(self) - - @property - def __array_interface__(self): - return {'descr': self._dtype_.descr, - '__ref': self, - 'strides': None, - 'shape': self._shape_, - 'version': 3, - 'typestr': self._dtype_.descr[0][1], - 'data': (self.value, False), - } - - @classmethod - def from_param(cls, obj): - if not isinstance(obj, ndarray): - raise TypeError, "argument must be an ndarray" - if cls._dtype_ is not None \ - and obj.dtype != cls._dtype_: - raise TypeError, "array must have data type %s" % cls._dtype_ - if cls._ndim_ is not None \ - and obj.ndim != cls._ndim_: - raise TypeError, "array must have %d dimension(s)" % cls._ndim_ - if cls._shape_ is not None \ - and obj.shape != cls._shape_: - raise TypeError, "array must have shape %s" % str(cls._shape_) - if cls._flags_ is not None \ - and ((obj.flags.num & cls._flags_) != cls._flags_): - raise TypeError, "array must have flags %s" % \ - _flags_fromnum(cls._flags_) - return obj.ctypes - - -# Factory for an array-checking class with from_param defined for -# use with ctypes argtypes mechanism -_pointer_type_cache = {} -def ndpointer(dtype=None, ndim=None, shape=None, flags=None): - """ - Array-checking restype/argtypes. - - An ndpointer instance is used to describe an ndarray in restypes - and argtypes specifications. This approach is more flexible than - using, for example, ``POINTER(c_double)``, since several restrictions - can be specified, which are verified upon calling the ctypes function. - These include data type, number of dimensions, shape and flags. If a - given array does not satisfy the specified restrictions, - a ``TypeError`` is raised. - - Parameters - ---------- - dtype : data-type, optional - Array data-type. - ndim : int, optional - Number of array dimensions. - shape : tuple of ints, optional - Array shape. - flags : str or tuple of str - Array flags; may be one or more of: - - - C_CONTIGUOUS / C / CONTIGUOUS - - F_CONTIGUOUS / F / FORTRAN - - OWNDATA / O - - WRITEABLE / W - - ALIGNED / A - - UPDATEIFCOPY / U - - Returns - ------- - klass : ndpointer type object - A type object, which is an ``_ndtpr`` instance containing - dtype, ndim, shape and flags information. - - Raises - ------ - TypeError - If a given array does not satisfy the specified restrictions. - - Examples - -------- - >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, - ... ndim=1, - ... flags='C_CONTIGUOUS')] - ... #doctest: +SKIP - >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) - ... #doctest: +SKIP - - """ - - if dtype is not None: - dtype = _dtype(dtype) - num = None - if flags is not None: - if isinstance(flags, str): - flags = flags.split(',') - elif isinstance(flags, (int, integer)): - num = flags - flags = _flags_fromnum(num) - elif isinstance(flags, flagsobj): - num = flags.num - flags = _flags_fromnum(num) - if num is None: - try: - flags = [x.strip().upper() for x in flags] - except: - raise TypeError, "invalid flags specification" - num = _num_fromflags(flags) - try: - return _pointer_type_cache[(dtype, ndim, shape, num)] - except KeyError: - pass - if dtype is None: - name = 'any' - elif dtype.names: - name = str(id(dtype)) - else: - name = dtype.str - if ndim is not None: - name += "_%dd" % ndim - if shape is not None: - try: - strshape = [str(x) for x in shape] - except TypeError: - strshape = [str(shape)] - shape = (shape,) - shape = tuple(shape) - name += "_"+"x".join(strshape) - if flags is not None: - name += "_"+"_".join(flags) - else: - flags = [] - klass = type("ndpointer_%s"%name, (_ndptr,), - {"_dtype_": dtype, - "_shape_" : shape, - "_ndim_" : ndim, - "_flags_" : num}) - _pointer_type_cache[dtype] = klass - return klass - -if ctypes is not None: - ct = ctypes - ################################################################ - # simple types - - # maps the numpy typecodes like '=2.3 distutils. - # Any changes here should be applied also to fcompiler.compile - # method to support pre Python 2.3 distutils. - if not sources: - return [] - # FIXME:RELATIVE_IMPORT - if sys.version_info[0] < 3: - from fcompiler import FCompiler - else: - from numpy.distutils.fcompiler import FCompiler - if isinstance(self, FCompiler): - display = [] - for fc in ['f77','f90','fix']: - fcomp = getattr(self,'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - # build any sources in same order as they were originally specified - # especially important for fortran .f90 files using modules - if isinstance(self, FCompiler): - objects_to_build = build.keys() - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - else: - for obj, (src, ext) in build.items(): - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from `distutils.cmd.Command`. - ignore : sequence of str, optional - List of `CCompiler` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name,value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = compiler.executables.keys() - for key in ['version','libraries','library_dirs', - 'object_switch','compile_switch', - 'include_dirs','define','undef','rpath','link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler,key): - v = getattr(compiler, key) - mx = max(mx,len(key)) - props.append((key,repr(v))) - lines = [] - format = '%-' + repr(mx+1) + 's = %s' - for prop in props: - lines.append(format % prop) - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - if 0: - for attrname in ['include_dirs','define','undef', - 'libraries','library_dirs', - 'rpath','link_objects']: - attr = getattr(self,attrname,None) - if not attr: - continue - log.info("compiler '%s' is set to %s" % (attrname,attr)) - try: - self.get_version() - except: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls `distutils.sysconfig.customize_compiler` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self,'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a,b)]\ - + self.compiler[1:] - else: - if hasattr(self,'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - log.warn('Missing compiler_cxx fix for '+self.__class__.__name__) - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a `CCompiler` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n',' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while 1: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of `distutils.version.LooseVersion`. - - """ - if not force and hasattr(self,'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - status, output = exec_command(version_cmd,use_tee=0) - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a `CCompiler` instance. - - """ - if self.compiler_type=='msvc': return self - cxx = copy(self) - cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] - if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler','IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler','IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler','IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['pathcc'] = ('pathccompiler','PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -ccompiler._default_compilers += (('linux.*','intel'), - ('linux.*','intele'), - ('linux.*','intelem'), - ('linux.*','pathcc')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError: - msg = str(get_exception()) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError: - msg = str(get_exception()) - raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ - module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + - "in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - library_dirs = quote_args(library_dirs) - runtime_library_dirs = quote_args(runtime_library_dirs) - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.'+_cc+'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - -_distutils_gen_preprocess_options = gen_preprocess_options -def gen_preprocess_options (macros, include_dirs): - include_dirs = quote_args(include_dirs) - return _distutils_gen_preprocess_options(macros, include_dirs) -ccompiler.gen_preprocess_options = gen_preprocess_options - -##Fix distutils.util.split_quoted: -# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears -# that removing this fix causes f2py problems on Windows XP (see ticket #723). -# Specifically, on WinXP when gfortran is installed in a directory path, which -# contains spaces, then f2py is unable to find it. -import re -import string -_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) -_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") -_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"') -_has_white_re = re.compile(r'\s') -def split_quoted(s): - s = s.strip() - words = [] - pos = 0 - - while s: - m = _wordchars_re.match(s, pos) - end = m.end() - if end == len(s): - words.append(s[:end]) - break - - if s[end] in string.whitespace: # unescaped, unquoted whitespace: now - words.append(s[:end]) # we definitely have a word delimiter - s = s[end:].lstrip() - pos = 0 - - elif s[end] == '\\': # preserve whatever is being escaped; - # will become part of the current word - s = s[:end] + s[end+1:] - pos = end+1 - - else: - if s[end] == "'": # slurp singly-quoted string - m = _squote_re.match(s, end) - elif s[end] == '"': # slurp doubly-quoted string - m = _dquote_re.match(s, end) - else: - raise RuntimeError("this can't happen (bad char '%c')" % s[end]) - - if m is None: - raise ValueError("bad string (mismatched %s quotes?)" % s[end]) - - (beg, end) = m.span() - if _has_white_re.search(s[beg+1:end-1]): - s = s[:beg] + s[beg+1:end-1] + s[end:] - pos = m.end() - 2 - else: - # Keeping quotes when a quoted word does not contain - # white-space. XXX: send a patch to distutils - pos = m.end() - - if pos >= len(s): - words.append(s) - break - - return words -ccompiler.split_quoted = split_quoted -##Fix distutils.util.split_quoted: - -# define DISTUTILS_USE_SDK when necessary to workaround distutils/msvccompiler.py bug -msvc_on_amd64() diff --git a/numpy-1.6.2/numpy/distutils/command/__init__.py b/numpy-1.6.2/numpy/distutils/command/__init__.py deleted file mode 100644 index f8f0884da9..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands.""" - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command',globals(),locals(),distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/numpy-1.6.2/numpy/distutils/command/autodist.py b/numpy-1.6.2/numpy/distutils/command/autodist.py deleted file mode 100644 index fe40119efb..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,39 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful.""" - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = """ -#ifndef __cplusplus -static %(inline)s int static_func (void) -{ - return 0; -} -%(inline)s int nostatic_func (void) -{ - return 0; -} -#endif""" - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - -def check_compiler_gcc4(cmd): - """Return True if the C compiler is GCC 4.x.""" - cmd._check_compiler() - body = """ -int -main() -{ -#ifndef __GNUC__ && (__GNUC__ >= 4) -die in an horrible death -#endif -} -""" - return cmd.try_compile(body, None, None) diff --git a/numpy-1.6.2/numpy/distutils/command/bdist_rpm.py b/numpy-1.6.2/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 60e9b57527..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py',setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/numpy-1.6.2/numpy/distutils/command/build.py b/numpy-1.6.2/numpy/distutils/command/build.py deleted file mode 100644 index 5d986570c9..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/build.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler',None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/numpy-1.6.2/numpy/distutils/command/build_clib.py b/numpy-1.6.2/numpy/distutils/command/build_clib.py deleted file mode 100644 index d9cfca73e5..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,283 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" - -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import filter_sources, has_f_sources,\ - has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \ - get_numpy_include_dirs - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0]+'=',)+_l[_i][1:] -# - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ] - - boolean_options = old_build_clib.boolean_options + ['inplace'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - return - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources',[])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources',[])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language',None) - if l and l not in languages: languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language','c')=='f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: source_languages.append('c') - if cxx_sources: source_languages.append('c++') - if requiref90: source_languages.append('f90') - elif f_sources: source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends',[]) - if not (self.force or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc',{}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script '\ - 'for fortran compiler: %s' \ - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources"\ - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or [] - - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - extra_postargs = build_info.get('extra_compiler_args') or [] - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: self.mkpath(module_build_dir) - - if compiler.compiler_type=='msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - objects = [] - if c_sources: - log.info("compiling C sources") - objects = compiler.compile(c_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile(cxx_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options(\ - module_dirs,module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f)==os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' \ - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - objects.extend(f_objects) - - # assume that default linker is suitable for - # linking Fortran object files - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries',[]) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo[1].get('libraries',[])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/numpy-1.6.2/numpy/distutils/command/build_ext.py b/numpy-1.6.2/numpy/distutils/command/build_ext.py deleted file mode 100644 index f63d5249c6..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,506 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. -""" - -import os -import sys -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import exec_command -from numpy.distutils.system_info import combine_paths -from numpy.distutils.misc_util import filter_sources, has_f_sources, \ - has_cxx_sources, get_ext_source_files, \ - get_numpy_include_dirs, is_sequence, get_build_architecture, \ - msvc_version -from numpy.distutils.command.config_compiler import show_fortran_compilers - -try: - set -except NameError: - from sets import Set as set - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler',None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - - def finalize_options(self): - incl_dirs = self.include_dirs - old_build_ext.finalize_options(self) - if incl_dirs is not None: - self.include_dirs.extend(self.distribution.include_dirs or []) - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' \ - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj('build_clib') - else: - build_clib = self.distribution.get_command_obj('build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - self.compiler.show_customization() - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname,build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,'\ - ' overwriting build_info\n%s... \nwith\n%s...' \ - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname,build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries',[]) - c_lib_dirs += binfo.get('library_dirs',[]) - for m in binfo.get('macros',[]): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname,{}).get('source_languages',[]): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - # reset language attribute for choosing proper linker - if 'c++' in ext_languages: - ext_language = 'c++' - elif 'f90' in ext_languages: - ext_language = 'f90' - elif 'f77' in ext_languages: - ext_language = 'f77' - else: - ext_language = 'c' # default - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name,l,ext_language)) - ext.language = ext_language - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution,need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler = self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - # Make sure that scons based extensions are complete. - if self.inplace: - cmd = self.reinitialize_command('scons') - cmd.inplace = 1 - self.run_command('scons') - - def swig_sources(self, sources): - # Do nothing. Swig sources have beed handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - if not (self.force or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - - - if self.compiler.compiler_type=='msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language=='f90': - fcompiler = self._f90_compiler - elif ext.language=='f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = ext.extra_f77_compile_args or [] - fcompiler.extra_f90_compile_args = ext.extra_f90_compile_args or [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" \ - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " \ - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77','f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " \ - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language=='c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " \ - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends':ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - c_objects = [] - if c_sources: - log.info("compiling C sources") - c_objects = self.compiler.compile(c_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile(cxx_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp,os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs,module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f)==os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type=='msvc': - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs) - - elif ext.language in ['f77','f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language=='c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if sys.version[:3]>='2.3': - kws = {'target_lang':ext.language} - else: - kws = {} - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp,**kws) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib(objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: return - - for libname in c_libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir,'%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir,'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - s,o = exec_command(['cygpath', '-w', dir], use_tee=False) - if not s: - dir = o - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files (self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs (self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/numpy-1.6.2/numpy/distutils/command/build_py.py b/numpy-1.6.2/numpy/distutils/command/build_py.py deleted file mode 100644 index 4c02e4136b..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,32 +0,0 @@ - -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = build_src.py_modules_dict.keys () - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package,[]) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = filter(is_string, self.py_modules) - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/numpy-1.6.2/numpy/distutils/command/build_scripts.py b/numpy-1.6.2/numpy/distutils/command/build_scripts.py deleted file mode 100644 index 99134f2026..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. -""" - -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/numpy-1.6.2/numpy/distutils/command/build_src.py b/numpy-1.6.2/numpy/distutils/command/build_src.py deleted file mode 100644 index ae29aec0ea..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,810 +0,0 @@ -""" Build swig, f2py, pyrex sources. -""" - -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - -def have_pyrex(): - try: - import Pyrex.Compiler.Main - return True - except ImportError: - return False - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import fortran_ext_match, \ - appendpath, is_string, is_sequence, get_cmd -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - fs = open(source, 'r') - try: - ft = open(target, 'w') - try: - for l in fs.readlines(): - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - finally: - ft.close() - finally: - fs.close() - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " + - "directory alongside your pure Python modules"), - ] - - boolean_options = ['force','inplace'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig','swig_opt']: - o = '--'+c.replace('_','-') - v = getattr(build_ext,c,None) - if v: - if getattr(self,c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o,v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data,str): - new_data_files.append(data) - elif isinstance(data,tuple): - d,files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src,d) - funcs = filter(lambda f:hasattr(f, '__call__'), files) - files = filter(lambda f:not hasattr(f, '__call__'), files) - for f in funcs: - if f.func_code.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s,list): - files.extend(s) - elif isinstance(s,str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d,files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - import shutil - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - gd = {} - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources',[])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - - sources = self.template_sources(sources, ext) - - sources = self.swig_sources(sources, ext) - - sources = self.f2py_sources(sources, ext) - - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src]\ - +name.split('.')[:-1])) - self.mkpath(build_dir) - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources,['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources,['.h','.hpp','.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir,os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - fid = open(target_file,'w') - fid.write(outstr) - fid.close() - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - if self.inplace or not have_pyrex(): - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - target_file = os.path.join(target_dir, ext_name + '.c') - depends = [source] + extension.depends - if self.force or newer_group(depends, target_file, 'newer'): - if have_pyrex(): - import Pyrex.Compiler.Main - log.info("pyrexc:> %s" % (target_file)) - self.mkpath(target_dir) - options = Pyrex.Compiler.Main.CompilationOptions( - defaults=Pyrex.Compiler.Main.default_options, - include_path=extension.include_dirs, - output_file=target_file) - pyrex_result = Pyrex.Compiler.Main.compile(source, - options=options) - if pyrex_result.num_errors != 0: - raise DistutilsError("%d errors while compiling %r with Pyrex" \ - % (pyrex_result.num_errors, source)) - elif os.path.isfile(target_file): - log.warn("Pyrex required for compiling %r but not available,"\ - " using old target %r"\ - % (source, target_file)) - else: - raise DistutilsError("Pyrex required for compiling %r"\ - " but notavailable" % (source,)) - return target_file - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir,name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir,name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir,name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name,build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options',[])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file,'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - import numpy.f2py - numpy.f2py.run_main(f2py_options - + ['--build-dir',target_dir,source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src]\ - +name.split('.')[:-1])) - target_file = os.path.join(target_dir,ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - import numpy.f2py - numpy.f2py.run_main(f2py_options + ['--lower', - '--build-dir',target_dir]+\ - ['-m',ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - target_c = os.path.join(self.build_src,'fortranobject.c') - target_h = os.path.join(self.build_src,'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if self.build_src not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." \ - % (self.build_src)) - extension.include_dirs.append(self.build_src) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d,'src','fortranobject.c') - source_h = os.path.join(d,'src','fortranobject.h') - if newer(source_c,target_c) or newer(source_h,target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c,target_c) - self.copy_file(source_h,target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f','-f2pywrappers2.f90']: - filename = os.path.join(target_dir,ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - if is_cpp: - target_ext = '.cpp' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - if is_cpp: - target_ext = '.cpp' - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - target_ext = '.cpp' - else: - log.warn('assuming that %r has c++ swig target' % (source)) - target_file = os.path.join(target_dir,'%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match -_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z',re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-',re.I).search -_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-',re.I).search - -def get_swig_target(source): - f = open(source,'r') - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - f.close() - return result - -def get_swig_modulename(source): - f = open(source,'r') - f_readlines = getattr(f,'xreadlines',f.readlines) - name = None - for line in f_readlines(): - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - f.close() - return name - -def _find_swig_target(target_dir,name): - for ext in ['.cpp','.c']: - target = os.path.join(target_dir,'%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?'\ - '__user__[\w_]*)',re.I).match - -def get_f2py_modulename(source): - name = None - f = open(source) - f_readlines = getattr(f,'xreadlines',f.readlines) - for line in f_readlines(): - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - f.close() - return name - -########################################## diff --git a/numpy-1.6.2/numpy/distutils/command/config.py b/numpy-1.6.2/numpy/distutils/command/config.py deleted file mode 100644 index 85a86990f7..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/config.py +++ /dev/null @@ -1,460 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson - -import os, signal -import warnings -import sys - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import exec_command -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import check_inline, check_compiler_gcc4 -from numpy.distutils.compat import get_exception - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def try_run(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, lang="c"): - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ - "Usage of try_run is deprecated: please do not \n" \ - "use it anymore, and avoid configuration checks \n" \ - "involving running executable on the target machine.\n" \ - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning) - return old_config.try_run(self, body, headers, include_dirs, libraries, - library_dirs, lang) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc': - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an IOError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print an helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except IOError: - e = get_exception() - msg = """\ -Could not initialize compiler instance: do you have Visual Studio -installed ? If you are trying to build with mingw, please use python setup.py -build -c mingw32 instead ). If you have Visual Studio installed, check it is -correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for -2.5, etc...). Original exception was: %s, and the Compiler -class was %s -============================================================================""" \ - % (e, self.compiler.__class__.__name__) - print ("""\ -============================================================================""") - raise distutils.errors.DistutilsPlatformError(msg) - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self,mth,lang,args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77','f90']: - self.compiler = self.fcompiler - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError,CompileError): - msg = str(get_exception()) - self.compiler = save_compiler - raise CompileError - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - return self._wrap_method(old_config._compile,lang, - (body, headers, include_dirs, lang)) - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77','f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - s,o = exec_command(['cygpath', '-w', d], - use_tee=False) - if not s: d = o - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir,'%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir,'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir,'%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link,lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = """ -int main() -{ -#ifndef %s - (void) %s; -#endif - ; - return 0; -}""" % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = """ -int main() -{ -#if %s -#else -#error false or undefined macro -#endif - ; - return 0; -}""" % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = r""" -int main() { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; -} -""" % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = r""" -typedef %(type)s npy_check_sizeof_type; -int main () -{ - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; -} -""" - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = r""" -typedef %(type)s npy_check_sizeof_type; -int main () -{ - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; -} -""" - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = r""" -typedef %(type)s npy_check_sizeof_type; -int main () -{ - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; -} -""" - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs: seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - libraru_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionay, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_compiler_gcc4(self): - """Return True if the C compiler is gcc >= 4.""" - return check_compiler_gcc4(self) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c"): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ - "Usage of get_output is deprecated: please do not \n" \ - "use it anymore, and avoid configuration checks \n" \ - "involving running executable on the target machine.\n" \ - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning) - from distutils.ccompiler import CompileError, LinkError - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - exitstatus, output = exec_command(exe, execute_in='.') - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout(object): - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/numpy-1.6.2/numpy/distutils/command/config_compiler.py b/numpy-1.6.2/numpy/distutils/command/config_compiler.py deleted file mode 100644 index e7fee94dfb..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,123 +0,0 @@ -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=[]): - # Using cache to prevent infinite recursion - if _cache: return - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=',None,"specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=',None,"specify F77 compiler flags"), - ('f90flags=',None,"specify F90 compiler flags"), - ('opt=',None,"specify optimization flags"), - ('arch=',None,"specify architecture specific optimization flags"), - ('debug','g',"compile with debugging information"), - ('noopt',None,"compile without optimization"), - ('noarch',None,"compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler',None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug','noopt','noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c,a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c,a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=',None,"specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c,a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c,a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/numpy-1.6.2/numpy/distutils/command/develop.py b/numpy-1.6.2/numpy/distutils/command/develop.py deleted file mode 100644 index 1677066719..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/develop.py +++ /dev/null @@ -1,15 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. -""" - -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/numpy-1.6.2/numpy/distutils/command/egg_info.py b/numpy-1.6.2/numpy/distutils/command/egg_info.py deleted file mode 100644 index 687faf080a..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,9 +0,0 @@ -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/numpy-1.6.2/numpy/distutils/command/install.py b/numpy-1.6.2/numpy/distutils/command/install.py deleted file mode 100644 index ad3cc507db..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/install.py +++ /dev/null @@ -1,77 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -old_install = old_install_mod.install -from distutils.file_util import write_file - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return old_install_mod._install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__','') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - old_install_mod._install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - f = open(self.record,'r') - lines = [] - need_rewrite = False - for l in f.readlines(): - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - f.close() - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/numpy-1.6.2/numpy/distutils/command/install_clib.py b/numpy-1.6.2/numpy/distutils/command/install_clib.py deleted file mode 100644 index 638d4beacb..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/numpy-1.6.2/numpy/distutils/command/install_data.py b/numpy-1.6.2/numpy/distutils/command/install_data.py deleted file mode 100644 index 0a2e68ae19..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,24 +0,0 @@ -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/numpy-1.6.2/numpy/distutils/command/install_headers.py b/numpy-1.6.2/numpy/distutils/command/install_headers.py deleted file mode 100644 index 58ace10644..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header,tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy.core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/numpy-1.6.2/numpy/distutils/command/scons.py b/numpy-1.6.2/numpy/distutils/command/scons.py deleted file mode 100644 index d7bbec35e5..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/scons.py +++ /dev/null @@ -1,589 +0,0 @@ -import os -import sys -import os.path -from os.path import join as pjoin, dirname as pdirname - -from distutils.errors import DistutilsPlatformError -from distutils.errors import DistutilsExecError, DistutilsSetupError - -from numpy.distutils.command.build_ext import build_ext as old_build_ext -from numpy.distutils.ccompiler import CCompiler, new_compiler -from numpy.distutils.fcompiler import FCompiler, new_fcompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils import log -from numpy.distutils.misc_util import is_bootstrapping, get_cmd -from numpy.distutils.misc_util import get_numpy_include_dirs as _incdir -from numpy.distutils.compat import get_exception - -# A few notes: -# - numscons is not mandatory to build numpy, so we cannot import it here. -# Any numscons import has to happen once we check numscons is available and -# is required for the build (call through setupscons.py or native numscons -# build). -def get_scons_build_dir(): - """Return the top path where everything produced by scons will be put. - - The path is relative to the top setup.py""" - from numscons import get_scons_build_dir - return get_scons_build_dir() - -def get_scons_pkg_build_dir(pkg): - """Return the build directory for the given package (foo.bar). - - The path is relative to the top setup.py""" - from numscons.core.utils import pkg_to_path - return pjoin(get_scons_build_dir(), pkg_to_path(pkg)) - -def get_scons_configres_dir(): - """Return the top path where everything produced by scons will be put. - - The path is relative to the top setup.py""" - from numscons import get_scons_configres_dir - return get_scons_configres_dir() - -def get_scons_configres_filename(): - """Return the top path where everything produced by scons will be put. - - The path is relative to the top setup.py""" - from numscons import get_scons_configres_filename - return get_scons_configres_filename() - -def get_scons_local_path(): - """This returns the full path where scons.py for scons-local is located.""" - from numscons import get_scons_path - return get_scons_path() - -def _get_top_dir(pkg): - # XXX: this mess is necessary because scons is launched per package, and - # has no knowledge outside its build dir, which is package dependent. If - # one day numscons does not launch one process/package, this will be - # unnecessary. - from numscons import get_scons_build_dir - from numscons.core.utils import pkg_to_path - scdir = pjoin(get_scons_build_dir(), pkg_to_path(pkg)) - n = scdir.count(os.sep) - return os.sep.join([os.pardir for i in range(n+1)]) - -def get_distutils_libdir(cmd, pkg): - """Returns the path where distutils install libraries, relatively to the - scons build directory.""" - return pjoin(_get_top_dir(pkg), cmd.build_lib) - -def get_distutils_clibdir(cmd, pkg): - """Returns the path where distutils put pure C libraries.""" - return pjoin(_get_top_dir(pkg), cmd.build_clib) - -def get_distutils_install_prefix(pkg, inplace): - """Returns the installation path for the current package.""" - from numscons.core.utils import pkg_to_path - if inplace == 1: - return pkg_to_path(pkg) - else: - install_cmd = get_cmd('install').get_finalized_command('install') - return pjoin(install_cmd.install_libbase, pkg_to_path(pkg)) - -def get_python_exec_invoc(): - """This returns the python executable from which this file is invocated.""" - # Do we need to take into account the PYTHONPATH, in a cross platform way, - # that is the string returned can be executed directly on supported - # platforms, and the sys.path of the executed python should be the same - # than the caller ? This may not be necessary, since os.system is said to - # take into accound os.environ. This actually also works for my way of - # using "local python", using the alias facility of bash. - return sys.executable - -def get_numpy_include_dirs(sconscript_path): - """Return include dirs for numpy. - - The paths are relatively to the setup.py script path.""" - from numscons import get_scons_build_dir - scdir = pjoin(get_scons_build_dir(), pdirname(sconscript_path)) - n = scdir.count(os.sep) - - dirs = _incdir() - rdirs = [] - for d in dirs: - rdirs.append(pjoin(os.sep.join([os.pardir for i in range(n+1)]), d)) - return rdirs - -def dirl_to_str(dirlist): - """Given a list of directories, returns a string where the paths are - concatenated by the path separator. - - example: ['foo/bar', 'bar/foo'] will return 'foo/bar:bar/foo'.""" - return os.pathsep.join(dirlist) - -def dist2sconscc(compiler): - """This converts the name passed to distutils to scons name convention (C - compiler). compiler should be a CCompiler instance. - - Example: - --compiler=intel -> intelc""" - compiler_type = compiler.compiler_type - if compiler_type == 'msvc': - return 'msvc' - elif compiler_type == 'intel': - return 'intelc' - else: - return compiler.compiler[0] - -def dist2sconsfc(compiler): - """This converts the name passed to distutils to scons name convention - (Fortran compiler). The argument should be a FCompiler instance. - - Example: - --fcompiler=intel -> ifort on linux, ifl on windows""" - if compiler.compiler_type == 'intel': - #raise NotImplementedError('FIXME: intel fortran compiler name ?') - return 'ifort' - elif compiler.compiler_type == 'gnu': - return 'g77' - elif compiler.compiler_type == 'gnu95': - return 'gfortran' - elif compiler.compiler_type == 'sun': - return 'sunf77' - else: - # XXX: Just give up for now, and use generic fortran compiler - return 'fortran' - -def dist2sconscxx(compiler): - """This converts the name passed to distutils to scons name convention - (C++ compiler). The argument should be a Compiler instance.""" - if compiler.compiler_type == 'msvc': - return compiler.compiler_type - - return compiler.compiler_cxx[0] - -def get_compiler_executable(compiler): - """For any give CCompiler instance, this gives us the name of C compiler - (the actual executable). - - NOTE: does NOT work with FCompiler instances.""" - # Geez, why does distutils has no common way to get the compiler name... - if compiler.compiler_type == 'msvc': - # this is harcoded in distutils... A bit cleaner way would be to - # initialize the compiler instance and then get compiler.cc, but this - # may be costly: we really just want a string. - # XXX: we need to initialize the compiler anyway, so do not use - # hardcoded string - #compiler.initialize() - #print compiler.cc - return 'cl.exe' - else: - return compiler.compiler[0] - -def get_f77_compiler_executable(compiler): - """For any give FCompiler instance, this gives us the name of F77 compiler - (the actual executable).""" - return compiler.compiler_f77[0] - -def get_cxxcompiler_executable(compiler): - """For any give CCompiler instance, this gives us the name of CXX compiler - (the actual executable). - - NOTE: does NOT work with FCompiler instances.""" - # Geez, why does distutils has no common way to get the compiler name... - if compiler.compiler_type == 'msvc': - # this is harcoded in distutils... A bit cleaner way would be to - # initialize the compiler instance and then get compiler.cc, but this - # may be costly: we really just want a string. - # XXX: we need to initialize the compiler anyway, so do not use - # hardcoded string - #compiler.initialize() - #print compiler.cc - return 'cl.exe' - else: - return compiler.compiler_cxx[0] - -def get_tool_path(compiler): - """Given a distutils.ccompiler.CCompiler class, returns the path of the - toolset related to C compilation.""" - fullpath_exec = find_executable(get_compiler_executable(compiler)) - if fullpath_exec: - fullpath = pdirname(fullpath_exec) - else: - raise DistutilsSetupError("Could not find compiler executable info for scons") - return fullpath - -def get_f77_tool_path(compiler): - """Given a distutils.ccompiler.FCompiler class, returns the path of the - toolset related to F77 compilation.""" - fullpath_exec = find_executable(get_f77_compiler_executable(compiler)) - if fullpath_exec: - fullpath = pdirname(fullpath_exec) - else: - raise DistutilsSetupError("Could not find F77 compiler executable "\ - "info for scons") - return fullpath - -def get_cxx_tool_path(compiler): - """Given a distutils.ccompiler.CCompiler class, returns the path of the - toolset related to C compilation.""" - fullpath_exec = find_executable(get_cxxcompiler_executable(compiler)) - if fullpath_exec: - fullpath = pdirname(fullpath_exec) - else: - raise DistutilsSetupError("Could not find compiler executable info for scons") - return fullpath - -def protect_path(path): - """Convert path (given as a string) to something the shell will have no - problem to understand (space, etc... problems).""" - if path: - # XXX: to this correctly, this is totally bogus for now (does not check for - # already quoted path, for example). - return '"' + path + '"' - else: - return '""' - -def parse_package_list(pkglist): - return pkglist.split(",") - -def find_common(seq1, seq2): - """Given two list, return the index of the common items. - - The index are relative to seq1. - - Note: do not handle duplicate items.""" - dict2 = dict([(i, None) for i in seq2]) - - return [i for i in range(len(seq1)) if dict2.has_key(seq1[i])] - -def select_packages(sconspkg, pkglist): - """Given a list of packages in pkglist, return the list of packages which - match this list.""" - common = find_common(sconspkg, pkglist) - if not len(common) == len(pkglist): - msg = "the package list contains a package not found in "\ - "the current list. The current list is %s" % sconspkg - raise ValueError(msg) - return common - -def check_numscons(minver): - """Check that we can use numscons. - - minver is a 3 integers tuple which defines the min version.""" - try: - import numscons - except ImportError: - e = get_exception() - raise RuntimeError("importing numscons failed (error was %s), using " \ - "scons within distutils is not possible without " - "this package " % str(e)) - - try: - # version_info was added in 0.10.0 - from numscons import version_info - # Stupid me used string instead of numbers in version_info in - # dev versions of 0.10.0 - if isinstance(version_info[0], str): - raise ValueError("Numscons %s or above expected " \ - "(detected 0.10.0)" % str(minver)) - # Stupid me used list instead of tuple in numscons - version_info = tuple(version_info) - if version_info[:3] < minver: - raise ValueError("Numscons %s or above expected (got %s) " - % (str(minver), str(version_info[:3]))) - except ImportError: - raise RuntimeError("You need numscons >= %s to build numpy "\ - "with numscons (imported numscons path " \ - "is %s)." % (minver, numscons.__file__)) - -# XXX: this is a giantic mess. Refactor this at some point. -class scons(old_build_ext): - # XXX: add an option to the scons command for configuration (auto/force/cache). - description = "Scons builder" - - library_options = [ - ('with-perflib=', None, - 'Specify which performance library to use for BLAS/LAPACK/etc...' \ - 'Examples: mkl/atlas/sunper/accelerate'), - ('with-mkl-lib=', None, 'TODO'), - ('with-mkl-include=', None, 'TODO'), - ('with-mkl-libraries=', None, 'TODO'), - ('with-atlas-lib=', None, 'TODO'), - ('with-atlas-include=', None, 'TODO'), - ('with-atlas-libraries=', None, 'TODO') - ] - user_options = [ - ('jobs=', 'j', "specify number of worker threads when executing" \ - "scons"), - ('inplace', 'i', 'If specified, build in place.'), - ('import-env', 'e', 'If specified, import user environment into scons env["ENV"].'), - ('bypass', 'b', 'Bypass distutils compiler detection (experimental).'), - ('scons-tool-path=', None, 'specify additional path '\ - '(absolute) to look for scons tools'), - ('silent=', None, 'specify whether scons output should less verbose'\ - '(1), silent (2), super silent (3) or not (0, default)'), - ('log-level=', None, 'specify log level for numscons. Any value ' \ - 'valid for the logging python module is valid'), - ('package-list=', None, - 'If specified, only run scons on the given '\ - 'packages (example: --package-list=scipy.cluster). If empty, '\ - 'no package is built'), - ('fcompiler=', None, "specify the Fortran compiler type"), - ('compiler=', None, "specify the C compiler type"), - ('cxxcompiler=', None, - "specify the C++ compiler type (same as C by default)"), - ('debug', 'g', - "compile/link with debugging information"), - ] + library_options - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.build_clib = None - - self.debug = 0 - - self.compiler = None - self.cxxcompiler = None - self.fcompiler = None - - self.jobs = None - self.silent = 0 - self.import_env = 0 - self.scons_tool_path = '' - # If true, we bypass distutils to find the c compiler altogether. This - # is to be used in desperate cases (like incompatible visual studio - # version). - self._bypass_distutils_cc = False - - # scons compilers - self.scons_compiler = None - self.scons_compiler_path = None - self.scons_fcompiler = None - self.scons_fcompiler_path = None - self.scons_cxxcompiler = None - self.scons_cxxcompiler_path = None - - self.package_list = None - self.inplace = 0 - self.bypass = 0 - - # Only critical things - self.log_level = 50 - - # library options - self.with_perflib = [] - self.with_mkl_lib = [] - self.with_mkl_include = [] - self.with_mkl_libraries = [] - self.with_atlas_lib = [] - self.with_atlas_include = [] - self.with_atlas_libraries = [] - - def _init_ccompiler(self, compiler_type): - # XXX: The logic to bypass distutils is ... not so logic. - if compiler_type == 'msvc': - self._bypass_distutils_cc = True - try: - distutils_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - distutils_compiler.customize(self.distribution) - # This initialization seems necessary, sometimes, for find_executable to work... - if hasattr(distutils_compiler, 'initialize'): - distutils_compiler.initialize() - self.scons_compiler = dist2sconscc(distutils_compiler) - self.scons_compiler_path = protect_path(get_tool_path(distutils_compiler)) - except DistutilsPlatformError: - e = get_exception() - if not self._bypass_distutils_cc: - raise e - else: - self.scons_compiler = compiler_type - - def _init_fcompiler(self, compiler_type): - self.fcompiler = new_fcompiler(compiler = compiler_type, - verbose = self.verbose, - dry_run = self.dry_run, - force = self.force) - - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - self.scons_fcompiler = dist2sconsfc(self.fcompiler) - self.scons_fcompiler_path = protect_path(get_f77_tool_path(self.fcompiler)) - - def _init_cxxcompiler(self, compiler_type): - cxxcompiler = new_compiler(compiler = compiler_type, - verbose = self.verbose, - dry_run = self.dry_run, - force = self.force) - if cxxcompiler is not None: - cxxcompiler.customize(self.distribution, need_cxx = 1) - cxxcompiler.customize_cmd(self) - self.cxxcompiler = cxxcompiler.cxx_compiler() - try: - get_cxx_tool_path(self.cxxcompiler) - except DistutilsSetupError: - self.cxxcompiler = None - - if self.cxxcompiler: - self.scons_cxxcompiler = dist2sconscxx(self.cxxcompiler) - self.scons_cxxcompiler_path = protect_path(get_cxx_tool_path(self.cxxcompiler)) - - def finalize_options(self): - old_build_ext.finalize_options(self) - - self.sconscripts = [] - self.pre_hooks = [] - self.post_hooks = [] - self.pkg_names = [] - self.pkg_paths = [] - - if self.distribution.has_scons_scripts(): - for i in self.distribution.scons_data: - self.sconscripts.append(i.scons_path) - self.pre_hooks.append(i.pre_hook) - self.post_hooks.append(i.post_hook) - self.pkg_names.append(i.parent_name) - self.pkg_paths.append(i.pkg_path) - # This crap is needed to get the build_clib - # directory - build_clib_cmd = get_cmd("build_clib").get_finalized_command("build_clib") - self.build_clib = build_clib_cmd.build_clib - - if not self.cxxcompiler: - self.cxxcompiler = self.compiler - - # To avoid trouble, just don't do anything if no sconscripts are used. - # This is useful when for example f2py uses numpy.distutils, because - # f2py does not pass compiler information to scons command, and the - # compilation setup below can crash in some situation. - if len(self.sconscripts) > 0: - if self.bypass: - self.scons_compiler = self.compiler - self.scons_fcompiler = self.fcompiler - self.scons_cxxcompiler = self.cxxcompiler - else: - # Try to get the same compiler than the ones used by distutils: this is - # non trivial because distutils and scons have totally different - # conventions on this one (distutils uses PATH from user's environment, - # whereas scons uses standard locations). The way we do it is once we - # got the c compiler used, we use numpy.distutils function to get the - # full path, and add the path to the env['PATH'] variable in env - # instance (this is done in numpy.distutils.scons module). - - self._init_ccompiler(self.compiler) - self._init_fcompiler(self.fcompiler) - self._init_cxxcompiler(self.cxxcompiler) - - if self.package_list: - self.package_list = parse_package_list(self.package_list) - - def _call_scons(self, scons_exec, sconscript, pkg_name, pkg_path, bootstrapping): - # XXX: when a scons script is missing, scons only prints warnings, and - # does not return a failure (status is 0). We have to detect this from - # distutils (this cannot work for recursive scons builds...) - - # XXX: passing everything at command line may cause some trouble where - # there is a size limitation ? What is the standard solution in thise - # case ? - - cmd = [scons_exec, "-f", sconscript, '-I.'] - if self.jobs: - cmd.append(" --jobs=%d" % int(self.jobs)) - if self.inplace: - cmd.append("inplace=1") - cmd.append('scons_tool_path="%s"' % self.scons_tool_path) - cmd.append('src_dir="%s"' % pdirname(sconscript)) - cmd.append('pkg_path="%s"' % pkg_path) - cmd.append('pkg_name="%s"' % pkg_name) - cmd.append('log_level=%s' % self.log_level) - #cmd.append('distutils_libdir=%s' % protect_path(pjoin(self.build_lib, - # pdirname(sconscript)))) - cmd.append('distutils_libdir=%s' % - protect_path(get_distutils_libdir(self, pkg_name))) - cmd.append('distutils_clibdir=%s' % - protect_path(get_distutils_clibdir(self, pkg_name))) - prefix = get_distutils_install_prefix(pkg_name, self.inplace) - cmd.append('distutils_install_prefix=%s' % protect_path(prefix)) - - if not self._bypass_distutils_cc: - cmd.append('cc_opt=%s' % self.scons_compiler) - if self.scons_compiler_path: - cmd.append('cc_opt_path=%s' % self.scons_compiler_path) - else: - cmd.append('cc_opt=%s' % self.scons_compiler) - - cmd.append('debug=%s' % self.debug) - - if self.scons_fcompiler: - cmd.append('f77_opt=%s' % self.scons_fcompiler) - if self.scons_fcompiler_path: - cmd.append('f77_opt_path=%s' % self.scons_fcompiler_path) - - if self.scons_cxxcompiler: - cmd.append('cxx_opt=%s' % self.scons_cxxcompiler) - if self.scons_cxxcompiler_path: - cmd.append('cxx_opt_path=%s' % self.scons_cxxcompiler_path) - - cmd.append('include_bootstrap=%s' % dirl_to_str(get_numpy_include_dirs(sconscript))) - cmd.append('bypass=%s' % self.bypass) - cmd.append('import_env=%s' % self.import_env) - if self.silent: - if int(self.silent) == 2: - cmd.append('-Q') - elif int(self.silent) == 3: - cmd.append('-s') - cmd.append('silent=%d' % int(self.silent)) - cmd.append('bootstrapping=%d' % bootstrapping) - cmdstr = ' '.join(cmd) - if int(self.silent) < 1: - log.info("Executing scons command (pkg is %s): %s ", pkg_name, cmdstr) - else: - log.info("======== Executing scons command for pkg %s =========", pkg_name) - st = os.system(cmdstr) - if st: - #print "status is %d" % st - msg = "Error while executing scons command." - msg += " See above for more information.\n" - msg += """\ -If you think it is a problem in numscons, you can also try executing the scons -command with --log-level option for more detailed output of what numscons is -doing, for example --log-level=0; the lowest the level is, the more detailed -the output it.""" - raise DistutilsExecError(msg) - - def run(self): - if len(self.sconscripts) < 1: - # nothing to do, just leave it here. - return - - check_numscons(minver=(0, 11, 0)) - - if self.package_list is not None: - id = select_packages(self.pkg_names, self.package_list) - sconscripts = [self.sconscripts[i] for i in id] - pre_hooks = [self.pre_hooks[i] for i in id] - post_hooks = [self.post_hooks[i] for i in id] - pkg_names = [self.pkg_names[i] for i in id] - pkg_paths = [self.pkg_paths[i] for i in id] - else: - sconscripts = self.sconscripts - pre_hooks = self.pre_hooks - post_hooks = self.post_hooks - pkg_names = self.pkg_names - pkg_paths = self.pkg_paths - - if is_bootstrapping(): - bootstrapping = 1 - else: - bootstrapping = 0 - - scons_exec = get_python_exec_invoc() - scons_exec += ' ' + protect_path(pjoin(get_scons_local_path(), 'scons.py')) - - for sconscript, pre_hook, post_hook, pkg_name, pkg_path in zip(sconscripts, - pre_hooks, post_hooks, - pkg_names, pkg_paths): - if pre_hook: - pre_hook() - - if sconscript: - self._call_scons(scons_exec, sconscript, pkg_name, pkg_path, bootstrapping) - - if post_hook: - post_hook(**{'pkg_name': pkg_name, 'scons_cmd' : self}) - diff --git a/numpy-1.6.2/numpy/distutils/command/sdist.py b/numpy-1.6.2/numpy/distutils/command/sdist.py deleted file mode 100644 index 62fce95744..0000000000 --- a/numpy-1.6.2/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h,str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/numpy-1.6.2/numpy/distutils/compat.py b/numpy-1.6.2/numpy/distutils/compat.py deleted file mode 100644 index 1c37dc2b9a..0000000000 --- a/numpy-1.6.2/numpy/distutils/compat.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Small modules to cope with python 2 vs 3 incompatibilities inside -numpy.distutils -""" -import sys - -def get_exception(): - return sys.exc_info()[1] diff --git a/numpy-1.6.2/numpy/distutils/conv_template.py b/numpy-1.6.2/numpy/distutils/conv_template.py deleted file mode 100644 index 368cdd4570..0000000000 --- a/numpy-1.6.2/numpy/distutils/conv_template.py +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/python -""" -takes templated file .xxx.src and produces .xxx file where .xxx is -.i or .c or .h, using the following template rules - -/**begin repeat -- on a line by itself marks the start of a repeated code - segment -/**end repeat**/ -- on a line by itself marks it's end - -After the /**begin repeat and before the */, all the named templates are placed -these should all have the same number of replacements - -Repeat blocks can be nested, with each nested block labeled with its depth, -i.e. -/**begin repeat1 - *.... - */ -/**end repeat1**/ - -When using nested loops, you can optionally exlude particular -combinations of the variables using (inside the comment portion of the inner loop): - - :exclude: var1=value1, var2=value2, ... - -This will exlude the pattern where var1 is value1 and var2 is value2 when -the result is being generated. - - -In the main body each replace will use one entry from the list of named replacements - - Note that all #..# forms in a block must have the same number of - comma-separated entries. - -Example: - - An input file containing - - /**begin repeat - * #a = 1,2,3# - * #b = 1,2,3# - */ - - /**begin repeat1 - * #c = ted, jim# - */ - @a@, @b@, @c@ - /**end repeat1**/ - - /**end repeat**/ - - produces - - line 1 "template.c.src" - - /* - ********************************************************************* - ** This file was autogenerated from a template DO NOT EDIT!!** - ** Changes should be made to the original source (.src) file ** - ********************************************************************* - */ - - #line 9 - 1, 1, ted - - #line 9 - 1, 1, jim - - #line 9 - 2, 2, ted - - #line 9 - 2, 2, jim - - #line 9 - 3, 3, ted - - #line 9 - 3, 3, jim - -""" - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -from numpy.distutils.compat import get_exception - -# names for replacement that are already global. -global_names = {} - -# header placed at the front of head processed file -header =\ -""" -/* - ***************************************************************************** - ** This file was autogenerated from a template DO NOT EDIT!!!! ** - ** Changes should be made to the original source (.src) file ** - ***************************************************************************** - */ - -""" -# Parse string for repeat loops -def parse_structure(astr, level): - """ - The returned line number is from the beginning of the string, starting - at zero. Returns an empty list if no loops found. - - """ - if level == 0 : - loopbeg = "/**begin repeat" - loopend = "/**end repeat**/" - else : - loopbeg = "/**begin repeat%d" % level - loopend = "/**end repeat%d**/" % level - - ind = 0 - line = 0 - spanlist = [] - while 1: - start = astr.find(loopbeg, ind) - if start == -1: - break - start2 = astr.find("*/",start) - start2 = astr.find("\n",start2) - fini1 = astr.find(loopend,start2) - fini2 = astr.find("\n",fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) - ind = fini2 - spanlist.sort() - return spanlist - - -def paren_repl(obj): - torep = obj.group(1) - numrep = obj.group(2) - return ','.join([torep]*int(numrep)) - -parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)") -plainrep = re.compile(r"([^*]+)\*(\d+)") -def parse_values(astr): - # replaces all occurrences of '(a,b,c)*4' in astr - # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate - # empty values, i.e., ()*4 yields ',,,'. The result is - # split at ',' and a list of values returned. - astr = parenrep.sub(paren_repl, astr) - # replaces occurences of xxx*3 with xxx, xxx, xxx - astr = ','.join([plainrep.sub(paren_repl,x.strip()) - for x in astr.split(',')]) - return astr.split(',') - - -stripast = re.compile(r"\n\s*\*?") -named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") -exclude_vars_re = re.compile(r"(\w*)=(\w*)") -exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : - """Find all named replacements in the header - - Returns a list of dictionaries, one for each loop iteration, - where each key is a name to be substituted and the corresponding - value is the replacement string. - - Also return a list of exclusions. The exclusions are dictionaries - of key value pairs. There can be more than one exclusion. - [{'var1':'value1', 'var2', 'value2'[,...]}, ...] - - """ - # Strip out '\n' and leading '*', if any, in continuation lines. - # This should not effect code previous to this change as - # continuation lines were not allowed. - loophead = stripast.sub("", loophead) - # parse out the names and lists of values - names = [] - reps = named_re.findall(loophead) - nsub = None - for rep in reps: - name = rep[0] - vals = parse_values(rep[1]) - size = len(vals) - if nsub is None : - nsub = size - elif nsub != size : - msg = "Mismatch in number of values:\n%s = %s" % (name, vals) - raise ValueError(msg) - names.append((name,vals)) - - - # Find any exclude variables - excludes = [] - - for obj in exclude_re.finditer(loophead): - span = obj.span() - # find next newline - endline = loophead.find('\n', span[1]) - substr = loophead[span[1]:endline] - ex_names = exclude_vars_re.findall(substr) - excludes.append(dict(ex_names)) - - # generate list of dictionaries, one for each template iteration - dlist = [] - if nsub is None : - raise ValueError("No substitution variables found") - for i in range(nsub) : - tmp = {} - for name,vals in names : - tmp[name] = vals[i] - dlist.append(tmp) - return dlist - -replace_re = re.compile(r"@([\w]+)@") -def parse_string(astr, env, level, line) : - lineno = "#line %d\n" % line - - # local function for string replacement, uses env - def replace(match): - name = match.group(1) - try : - val = env[name] - except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) - raise ValueError(msg) - return val - - code = [lineno] - struct = parse_structure(astr, level) - if struct : - # recurse over inner loops - oldend = 0 - newlevel = level + 1 - for sub in struct: - pref = astr[oldend:sub[0]] - head = astr[sub[0]:sub[1]] - text = astr[sub[1]:sub[2]] - oldend = sub[3] - newline = line + sub[4] - code.append(replace_re.sub(replace, pref)) - try : - envlist = parse_loop_header(head) - except ValueError: - e = get_exception() - msg = "line %d: %s" % (newline, e) - raise ValueError(msg) - for newenv in envlist : - newenv.update(env) - newcode = parse_string(text, newenv, newlevel, newline) - code.extend(newcode) - suff = astr[oldend:] - code.append(replace_re.sub(replace, suff)) - else : - # replace keys - code.append(replace_re.sub(replace, astr)) - code.append('\n') - return ''.join(code) - -def process_str(astr): - code = [header] - code.extend(parse_string(astr, global_names, 0, 1)) - return ''.join(code) - - -include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" - r"(?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - fid = open(source) - lines = [] - for line in fid.readlines(): - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d,fn) - if os.path.isfile(fn): - print ('Including file',fn) - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - fid.close() - return lines - -def process_file(source): - lines = resolve_includes(source) - sourcefile = os.path.normcase(source).replace("\\","\\\\") - try: - code = process_str(''.join(lines)) - except ValueError: - e = get_exception() - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) - return '#line 1 "%s"\n%s' % (sourcefile, code) - - -def unique_key(adict): - # this obtains a unique key given a dictionary - # currently it works by appending together n of the letters of the - # current keys and increasing n until a unique key is found - # -- not particularly quick - allkeys = adict.keys() - done = False - n = 1 - while not done: - newkey = "".join([x[:n] for x in allkeys]) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -if __name__ == "__main__": - - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file,'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname,'w') - - allstr = fid.read() - try: - writestr = process_str(allstr) - except ValueError: - e = get_exception() - raise ValueError("In %s loop at %s" % (file, e)) - outfile.write(writestr) diff --git a/numpy-1.6.2/numpy/distutils/core.py b/numpy-1.6.2/numpy/distutils/core.py deleted file mode 100644 index e617589a24..0000000000 --- a/numpy-1.6.2/numpy/distutils/core.py +++ /dev/null @@ -1,227 +0,0 @@ - -import sys -from distutils.core import * - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, scons, \ - install_clib -from numpy.distutils.misc_util import get_data_files, is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'scons': scons.scons, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k,v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=[]): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - #raise NotImplementedError("setuptools not supported yet for numpy.scons branch") - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def _exit_interactive_session(_cache=[]): - if _cache: - return # been here - _cache.append(1) - print('-'*72) - raw_input('Press ENTER to close the interactive session..') - print('='*72) - -def setup(**attr): - - if len(sys.argv)<=1 and not attr.get('script_args',[]): - from interactive import interactive_sys_argv - import atexit - atexit.register(_exit_interactive_session) - sys.argv[:] = interactive_sys_argv(sys.argv) - if len(sys.argv)>1: - return setup(**attr) - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config,'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules',[]): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],)) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],)) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],)) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,)) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,)) - break - libraries.append((lib_name,build_info)) diff --git a/numpy-1.6.2/numpy/distutils/cpuinfo.py b/numpy-1.6.2/numpy/distutils/cpuinfo.py deleted file mode 100644 index a9b2af1080..0000000000 --- a/numpy-1.6.2/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,685 +0,0 @@ -#!/usr/bin/env python -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson -""" - -__all__ = ['cpu'] - -import sys, re, types -import os -if sys.version_info[0] < 3: - from commands import getstatusoutput -else: - from subprocess import getstatusoutput -import warnings -import platform - -from numpy.distutils.compat import get_exception - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except EnvironmentError: - e = get_exception() - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, output - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase(object): - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self,func): - try: - return func() - except: - pass - - def __getattr__(self,name): - if not name.startswith('_'): - if hasattr(self,'_'+name): - attr = getattr(self,'_'+name) - if type(attr) is types.MethodType: - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile('(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except EnvironmentError: - e = get_exception() - warnings.warn(str(e), UserWarning) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6',self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7',self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return self.is_Intel() \ - and (self.info[0]['cpu family'] == '6' \ - or self.info[0]['cpu family'] == '15' ) \ - and (self.has_sse3() and not self.has_ssse3())\ - and re.match(r'.*?\blm\b',self.info[0]['flags']) is not None - - def _is_Core2(self): - return self.is_64bit() and self.is_Intel() and \ - re.match(r'.*?Core\(TM\)2\b', \ - self.info[0]['model name']) is not None - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'],re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b',self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b',self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b',self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b',self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b',self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0,1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self,n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except: pass - def __machine(self,n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self,n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

    [\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW',self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5',self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1',self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250',self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2',self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30',self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4',self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10',self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5',self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60',self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80',self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise',self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000',self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire',self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra',self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - import _winreg - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)"\ - "\s+stepping\s+(?P\d+)",re.IGNORECASE) - chnd=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while 1: - try: - proc=_winreg.EnumKey(chnd,pnum) - except _winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=_winreg.OpenKey(chnd,proc) - pidx=0 - while True: - try: - name,value,vtpe=_winreg.EnumValue(phnd,pidx) - except _winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except: - print(sys.exc_value,'(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0,1,2,3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6,7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3,5,6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7,8,9,10,11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6,15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5,6,15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return (self.info[0]['Family']==6 and \ - self.info[0]['Model'] in [7,8,9,10,11]) \ - or self.info[0]['Family']==15 - elif self.is_AMD(): - return (self.info[0]['Family']==6 and \ - self.info[0]['Model'] in [6,7,8,10]) \ - or self.info[0]['Family']==15 - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5,6,15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6,15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print 'CPU information:', -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print '%s=%s' %(name[1:],r), -# else: -# print name[1:], -# print diff --git a/numpy-1.6.2/numpy/distutils/environment.py b/numpy-1.6.2/numpy/distutils/environment.py deleted file mode 100644 index c701bce472..0000000000 --- a/numpy-1.6.2/numpy/distutils/environment.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig: - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError(name) - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert = conf_desc - var = self._hook_handler(name, hook) - if envvar is not None: - var = os.environ.get(envvar, var) - if confvar is not None and self._conf: - var = self._conf.get(confvar, (None, var))[1] - if convert is not None: - var = convert(var) - return var - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/numpy-1.6.2/numpy/distutils/exec_command.py b/numpy-1.6.2/numpy/distutils/exec_command.py deleted file mode 100644 index e0c3e1c97a..0000000000 --- a/numpy-1.6.2/numpy/distutils/exec_command.py +++ /dev/null @@ -1,596 +0,0 @@ -#!/usr/bin/env python -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Succesfully tested on: - os.name | sys.platform | comments - --------+--------------+---------- - posix | linux2 | Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 - posix | linux2 | Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 - posix | sunos5 | SunOS 5.9, Python 2.2, 2.3.2 - posix | darwin | Darwin 7.2.0, Python 2.3 - nt | win32 | Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 - nt | win32 | Windows 98, Python 2.1.1. Idle 0.8 - nt | win32 | Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. - posix | cygwin | Cygwin 98-4.10, Python 2.3.3(cygming special) - nt | win32 | Windows XP, Python 2.3.3 - -Known bugs: -- Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. -""" - -__all__ = ['exec_command','find_executable'] - -import os -import sys -import shlex - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log -from numpy.distutils.compat import get_exception - -from numpy.compat import open_latin1 - -def temp_file_name(): - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt','dos']: - fdir,fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW','PYTHON') - pythonexe = os.path.join(fdir,fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def splitcmdline(line): - import warnings - warnings.warn('splitcmdline is deprecated; use shlex.split', - DeprecationWarning) - return shlex.split(line) - -def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH',os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt','dos','os2']: - fn,ext = os.path.splitext(exe) - extra_suffixes = ['.exe','.com','.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.good('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {} - for name in names: - env[name] = os.environ.get(name) - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name,value in env.items(): - os.environ[name] = value or '' - -def exec_command( command, - execute_in='', use_shell=None, use_tee = None, - _with_python = 1, - **env ): - """ Return (status,output) of executed command. - - command is a concatenated string of executable and arguments. - The output contains both stdout and stderr messages. - The following special keyword arguments can be used: - use_shell - execute `sh -c command` - use_tee - pipe the output of command through tee - execute_in - before run command `cd execute_in` and after `cd -`. - - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - """ - log.debug('exec_command(%r,%s)' % (command,\ - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( env.keys() ) - _update_environment( **env ) - - try: - # _exec_command is robust but slow, it relies on - # usable sys.std*.fileno() descriptors. If they - # are bad (like in win32 Idle, PyCrust environments) - # then _exec_command_python (even slower) - # will be used as a last resort. - # - # _exec_command_posix uses os.system and is faster - # but not on all platforms os.system will return - # a correct status. - if _with_python and (0 or sys.__stdout__.fileno()==-1): - st = _exec_command_python(command, - exec_command_dir = exec_dir, - **env) - elif os.name=='posix': - st = _exec_command_posix(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - else: - st = _exec_command(command, use_shell=use_shell, - use_tee=use_tee,**env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - -def _exec_command_posix( command, - use_shell = None, - use_tee = None, - **env ): - log.debug('_exec_command_posix(...)') - - if is_sequence(command): - command_str = ' '.join(list(command)) - else: - command_str = command - - tmpfile = temp_file_name() - stsfile = None - if use_tee: - stsfile = temp_file_name() - filter = '' - if use_tee == 2: - filter = r'| tr -cd "\n" | tr "\n" "."; echo' - command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\ - % (command_str,stsfile,tmpfile,filter) - else: - stsfile = temp_file_name() - command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\ - % (command_str,stsfile,tmpfile) - #command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile) - - log.debug('Running os.system(%r)' % (command_posix)) - status = os.system(command_posix) - - if use_tee: - if status: - # if command_tee fails then fall back to robust exec_command - log.warn('_exec_command_posix failed (status=%s)' % status) - return _exec_command(command, use_shell=use_shell, **env) - - if stsfile is not None: - f = open_latin1(stsfile,'r') - status_text = f.read() - status = int(status_text) - f.close() - os.remove(stsfile) - - f = open_latin1(tmpfile,'r') - text = f.read() - f.close() - os.remove(tmpfile) - - if text[-1:]=='\n': - text = text[:-1] - - return status, text - - -def _exec_command_python(command, - exec_command_dir='', **env): - log.debug('_exec_command_python(...)') - - python_exe = get_pythonexe() - cmdfile = temp_file_name() - stsfile = temp_file_name() - outfile = temp_file_name() - - f = open(cmdfile,'w') - f.write('import os\n') - f.write('import sys\n') - f.write('sys.path.insert(0,%r)\n' % (exec_command_dir)) - f.write('from exec_command import exec_command\n') - f.write('del sys.path[0]\n') - f.write('cmd = %r\n' % command) - f.write('os.environ = %r\n' % (os.environ)) - f.write('s,o = exec_command(cmd, _with_python=0, **%r)\n' % (env)) - f.write('f=open(%r,"w")\nf.write(str(s))\nf.close()\n' % (stsfile)) - f.write('f=open(%r,"w")\nf.write(o)\nf.close()\n' % (outfile)) - f.close() - - cmd = '%s %s' % (python_exe, cmdfile) - status = os.system(cmd) - if status: - raise RuntimeError("%r failed" % (cmd,)) - os.remove(cmdfile) - - f = open_latin1(stsfile,'r') - status = int(f.read()) - f.close() - os.remove(stsfile) - - f = open_latin1(outfile,'r') - text = f.read() - f.close() - os.remove(outfile) - - return status, text - -def quote_arg(arg): - if arg[0]!='"' and ' ' in arg: - return '"%s"' % arg - return arg - -def _exec_command( command, use_shell=None, use_tee = None, **env ): - log.debug('_exec_command(...)') - - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - using_command = 0 - if use_shell: - # We use shell (unless use_shell==0) so that wildcards can be - # used. - sh = os.environ.get('SHELL','/bin/sh') - if is_sequence(command): - argv = [sh,'-c',' '.join(list(command))] - else: - argv = [sh,'-c',command] - else: - # On NT, DOS we avoid using command.com as it's exit status is - # not related to the exit status of a command. - if is_sequence(command): - argv = command[:] - else: - argv = shlex.split(command) - - if hasattr(os,'spawnvpe'): - spawn_command = os.spawnvpe - else: - spawn_command = os.spawnve - argv[0] = find_executable(argv[0]) or argv[0] - if not os.path.isfile(argv[0]): - log.warn('Executable %s does not exist' % (argv[0])) - if os.name in ['nt','dos']: - # argv[0] might be internal command - argv = [os.environ['COMSPEC'],'/C'] + argv - using_command = 1 - - # sys.__std*__ is used instead of sys.std* because environments - # like IDLE, PyCrust, etc overwrite sys.std* commands. - so_fileno = sys.__stdout__.fileno() - se_fileno = sys.__stderr__.fileno() - so_flush = sys.__stdout__.flush - se_flush = sys.__stderr__.flush - so_dup = os.dup(so_fileno) - se_dup = os.dup(se_fileno) - - outfile = temp_file_name() - fout = open(outfile,'w') - if using_command: - errfile = temp_file_name() - ferr = open(errfile,'w') - - log.debug('Running %s(%s,%r,%r,os.environ)' \ - % (spawn_command.__name__,os.P_WAIT,argv[0],argv)) - - argv0 = argv[0] - if not using_command: - argv[0] = quote_arg(argv0) - - so_flush() - se_flush() - os.dup2(fout.fileno(),so_fileno) - if using_command: - #XXX: disabled for now as it does not work from cmd under win32. - # Tests fail on msys - os.dup2(ferr.fileno(),se_fileno) - else: - os.dup2(fout.fileno(),se_fileno) - try: - status = spawn_command(os.P_WAIT,argv0,argv,os.environ) - except OSError: - errmess = str(get_exception()) - status = 999 - sys.stderr.write('%s: %s'%(errmess,argv[0])) - - so_flush() - se_flush() - os.dup2(so_dup,so_fileno) - os.dup2(se_dup,se_fileno) - - fout.close() - fout = open_latin1(outfile,'r') - text = fout.read() - fout.close() - os.remove(outfile) - - if using_command: - ferr.close() - ferr = open_latin1(errfile,'r') - errmess = ferr.read() - ferr.close() - os.remove(errfile) - if errmess and not status: - # Not sure how to handle the case where errmess - # contains only warning messages and that should - # not be treated as errors. - #status = 998 - if text: - text = text + '\n' - #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess) - text = text + errmess - print (errmess) - if text[-1:]=='\n': - text = text[:-1] - if status is None: - status = 0 - - if use_tee: - print (text) - - return status, text - - -def test_nt(**kws): - pythonexe = get_pythonexe() - echo = find_executable('echo') - using_cygwin_echo = echo != 'echo' - if using_cygwin_echo: - log.warn('Using cygwin echo in win32 environment is not supported') - - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'AAA\',\'\')"') - assert s==0 and o=='',(s,o) - - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'AAA\')"', - AAA='Tere') - assert s==0 and o=='Tere',(s,o) - - os.environ['BBB'] = 'Hi' - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"') - assert s==0 and o=='Hi',(s,o) - - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"', - BBB='Hey') - assert s==0 and o=='Hey',(s,o) - - s,o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"') - assert s==0 and o=='Hi',(s,o) - elif 0: - s,o=exec_command('echo Hello') - assert s==0 and o=='Hello',(s,o) - - s,o=exec_command('echo a%AAA%') - assert s==0 and o=='a',(s,o) - - s,o=exec_command('echo a%AAA%',AAA='Tere') - assert s==0 and o=='aTere',(s,o) - - os.environ['BBB'] = 'Hi' - s,o=exec_command('echo a%BBB%') - assert s==0 and o=='aHi',(s,o) - - s,o=exec_command('echo a%BBB%',BBB='Hey') - assert s==0 and o=='aHey', (s,o) - s,o=exec_command('echo a%BBB%') - assert s==0 and o=='aHi',(s,o) - - s,o=exec_command('this_is_not_a_command') - assert s and o!='',(s,o) - - s,o=exec_command('type not_existing_file') - assert s and o!='',(s,o) - - s,o=exec_command('echo path=%path%') - assert s==0 and o!='',(s,o) - - s,o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \ - % pythonexe) - assert s==0 and o=='win32',(s,o) - - s,o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe) - assert s==1 and o,(s,o) - - s,o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\ - % pythonexe) - assert s==0 and o=='012',(s,o) - - s,o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe) - assert s==15 and o=='',(s,o) - - s,o=exec_command('%s -c "print \'Heipa\'"' % pythonexe) - assert s==0 and o=='Heipa',(s,o) - - print ('ok') - -def test_posix(**kws): - s,o=exec_command("echo Hello",**kws) - assert s==0 and o=='Hello',(s,o) - - s,o=exec_command('echo $AAA',**kws) - assert s==0 and o=='',(s,o) - - s,o=exec_command('echo "$AAA"',AAA='Tere',**kws) - assert s==0 and o=='Tere',(s,o) - - - s,o=exec_command('echo "$AAA"',**kws) - assert s==0 and o=='',(s,o) - - os.environ['BBB'] = 'Hi' - s,o=exec_command('echo "$BBB"',**kws) - assert s==0 and o=='Hi',(s,o) - - s,o=exec_command('echo "$BBB"',BBB='Hey',**kws) - assert s==0 and o=='Hey',(s,o) - - s,o=exec_command('echo "$BBB"',**kws) - assert s==0 and o=='Hi',(s,o) - - - s,o=exec_command('this_is_not_a_command',**kws) - assert s!=0 and o!='',(s,o) - - s,o=exec_command('echo path=$PATH',**kws) - assert s==0 and o!='',(s,o) - - s,o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws) - assert s==0 and o=='posix',(s,o) - - s,o=exec_command('python -c "raise \'Ignore me.\'"',**kws) - assert s==1 and o,(s,o) - - s,o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws) - assert s==0 and o=='012',(s,o) - - s,o=exec_command('python -c "import sys;sys.exit(15)"',**kws) - assert s==15 and o=='',(s,o) - - s,o=exec_command('python -c "print \'Heipa\'"',**kws) - assert s==0 and o=='Heipa',(s,o) - - print ('ok') - -def test_execute_in(**kws): - pythonexe = get_pythonexe() - tmpfile = temp_file_name() - fn = os.path.basename(tmpfile) - tmpdir = os.path.dirname(tmpfile) - f = open(tmpfile,'w') - f.write('Hello') - f.close() - - s,o = exec_command('%s -c "print \'Ignore the following IOError:\','\ - 'open(%r,\'r\')"' % (pythonexe,fn),**kws) - assert s and o!='',(s,o) - s,o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe,fn), - execute_in = tmpdir,**kws) - assert s==0 and o=='Hello',(s,o) - os.remove(tmpfile) - print ('ok') - -def test_svn(**kws): - s,o = exec_command(['svn','status'],**kws) - assert s,(s,o) - print ('svn ok') - -def test_cl(**kws): - if os.name=='nt': - s,o = exec_command(['cl','/V'],**kws) - assert s,(s,o) - print ('cl ok') - -if os.name=='posix': - test = test_posix -elif os.name in ['nt','dos']: - test = test_nt -else: - raise NotImplementedError('exec_command tests for ', os.name) - -############################################################ - -if __name__ == "__main__": - - test(use_tee=0) - test(use_tee=1) - test_execute_in(use_tee=0) - test_execute_in(use_tee=1) - test_svn(use_tee=1) - test_cl(use_tee=1) diff --git a/numpy-1.6.2/numpy/distutils/extension.py b/numpy-1.6.2/numpy/distutils/extension.py deleted file mode 100644 index 2fc29f6d5b..0000000000 --- a/numpy-1.6.2/numpy/distutils/extension.py +++ /dev/null @@ -1,85 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. -""" - -__revision__ = "$Id: extension.py,v 1.1 2005/04/09 19:29:34 pearu Exp $" - -from distutils.extension import Extension as old_Extension - -import re -cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match -fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match - -class Extension(old_Extension): - def __init__ (self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None, - ): - old_Extension.__init__(self,name, [], - include_dirs, - define_macros, - undef_macros, - library_dirs, - libraries, - runtime_library_dirs, - extra_objects, - extra_compile_args, - extra_link_args, - export_symbols) - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, basestring): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - for source in self.sources: - if cxx_ext_re(str(source)): - return True - return False - - def has_f2py_sources(self): - for source in self.sources: - if fortran_pyf_ext_re(source): - return True - return False - -# class Extension diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/__init__.py b/numpy-1.6.2/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 550ae208ad..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,978 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. -""" - -__all__ = ['FCompiler','new_fcompiler','show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -import types -try: - set -except NameError: - from sets import Set as set - -from numpy.compat import open_latin1 - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.environment import EnvironmentConfig -from numpy.distutils.exec_command import find_executable -from numpy.distutils.compat import get_exception - -__metaclass__ = type - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration descripition is - # (, , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropiate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool), - noarch = (None, None, 'noarch', str2bool), - debug = (None, None, 'debug', str2bool), - verbose = (None, None, 'verbose', str2bool), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None), - version_cmd = ('exe.version_cmd', None, None, None), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None), - archiver = (None, 'AR', 'ar', None), - ranlib = (None, 'RANLIB', 'ranlib', None), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist), - fix = ('flags.fix', None, None, flaglist), - opt = ('flags.opt', 'FOPT', 'opt', flaglist), - opt_f77 = ('flags.opt_f77', None, None, flaglist), - opt_f90 = ('flags.opt_f90', None, None, flaglist), - arch = ('flags.arch', 'FARCH', 'arch', flaglist), - arch_f77 = ('flags.arch_f77', None, None, flaglist), - arch_f90 = ('flags.arch_f90', None, None, flaglist), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist), - debug_f77 = ('flags.debug_f77', None, None, flaglist), - debug_f90 = ('flags.debug_f90', None, None, flaglist), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist), - ) - - language_map = {'.f':'f77', - '.for':'f77', - '.F':'f77', # XXX: needs preprocessor - '.ftn':'f77', - '.f77':'f77', - '.f90':'f90', - '.F90':'f90', # XXX: needs preprocessor - '.f95':'f90', - } - language_order = ['f90','f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd' : ["f77", "-v"], - 'compiler_f77' : ["f77"], - 'compiler_f90' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'linker_so' : ["f90", "-shared"], - 'linker_exe' : ["f90"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for','.ftn','.f77','.f','.f90','.f95','.F','.F90'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropiate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overriden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(elf): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77flags = self.flag_vars.f77 - if f90: - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - if fix: - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=[f77]+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=[fix]+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in self.executables.keys() + \ - ['version','libraries','library_dirs', - 'object_switch','compile_switch']: - if hasattr(self,key): - v = getattr(self,key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if is_f_file(src) and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__,src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__,src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(),obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type,[]) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command,display=display) - except DistutilsExecError: - msg = str(get_exception()) - raise CompileError(msg) - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(),module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ',self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ',self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(),output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError: - msg = str(get_exception()) - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95', - 'intelvem', 'intelem')), - ('cygwin.*', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')), - ('linux.*', ('gnu','intel','lahey','pg','absoft','nag','vast','compaq', - 'intele','intelem','gnu95','g95','pathf95')), - ('darwin.*', ('nag', 'absoft', 'ibm', 'intel', 'gnu', 'gnu95', 'g95', 'pg')), - ('sunos.*', ('sun','gnu','gnu95','g95')), - ('irix.*', ('mips','gnu','gnu95',)), - ('aix.*', ('ibm','gnu','gnu95',)), - # os.name mappings - ('posix', ('gnu','gnu95',)), - ('nt', ('gnu','gnu95',)), - ('mac', ('gnu','gnu95','pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound): - e = get_exception() - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]',re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - f = open_latin1(file,'r') - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - f.close() - return result - -def has_f90_header(src): - f = open_latin1(src,'r') - line = f.readline() - f.close() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)',re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - f = open_latin1(src,'r') - i = 0 - for line in f.readlines(): - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - f.close() - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/absoft.py b/numpy-1.6.2/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index e36f0ff78b..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,159 +0,0 @@ - -# http://www.absoft.com/literature/osxuserguide.pdf -# http://www.absoft.com/documentation.html - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) - -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K","shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link','/PATH:"%s"' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math','fio','f77math','U77']) - else: - opt.extend(['fio','f90math','fmath','U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22','-N90','-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f','-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX", - "-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX", - "-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"]) - opt.extend(["-f","fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='absoft') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/compaq.py b/numpy-1.6.2/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index a00d8bdb8d..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,127 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ - -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.compat import get_exception -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl","-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore','-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g','-check bounds'] - def get_flags_opt(self): - return ['-O4','-align dcommons','-assume bigarrays', - '-assume nozsize','-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared','-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'\ - ' Version (?P[^\s]*).*' - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError: - msg = get_exception() - if '_MSVCCompiler__root' in str(msg): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)) - else: - raise - except IOError: - e = get_exception() - if not "vcvarsall.bat" in str(e): - print("Unexpected IOError in", __file__) - raise e - except ValueError: - e = get_exception() - if not "path']" in str(e): - print("Unexpected ValueError in", __file__) - raise e - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl","/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo','/MD','/WX','/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase','/assume:underscore'] - def get_flags_opt(self): - return ['/Ox','/fast','/optimize:5','/unroll:0','/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='compaq') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/g95.py b/numpy-1.6.2/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index 9352a0b7b5..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,44 +0,0 @@ -# http://g95.sourceforge.net/ - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["","-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - compiler = G95FCompiler() - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/gnu.py b/numpy-1.6.2/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 49df885b66..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,384 +0,0 @@ -import re -import os -import sys -import warnings -import platform -import tempfile -from subprocess import Popen, PIPE, STDOUT - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import exec_command -from numpy.distutils.misc_util import msvc_runtime_library -from numpy.distutils.compat import get_exception - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - -if is_win64(): - #_EXTRAFLAGS = ["-fno-leading-underscore"] - _EXTRAFLAGS = [] -else: - _EXTRAFLAGS = [] - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77',) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - m = re.match(r'GNU Fortran', version_string) - if not m: - return None - m = re.match(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.match(r'GNU Fortran.*?([0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith('0') or v.startswith('2') or v.startswith('3'): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - # 'g77 --version' results - # SunOS: GNU Fortran (GCC 3.2) 3.2 20020814 (release) - # Debian: GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian) - # GNU Fortran (GCC) 3.3.3 (Debian 20040401) - # GNU Fortran 0.5.25 20010319 (prerelease) - # Redhat: GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2 20030222 (Red Hat Linux 3.2.2-5) - # GNU Fortran (GCC) 3.4.2 (mingw-special) - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "--version"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - - suggested_f90_compiler = 'gnu95' - - #def get_linker_so(self): - # # win32 linking should be handled by standard linker - # # Darwin g77 cannot be used as a linker. - # #if re.match(r'(darwin)', sys.platform): - # # return - # return FCompiler.get_linker_so(self) - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform=='darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let disutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from the Python Makefile and then we - # fall back to setting it to 10.3 to maximize the set of - # versions we can work with. This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import distutils.sysconfig as sc - g = {} - filename = sc.get_makefile_filename() - sc.parse_makefile(filename, g) - target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') - os.environ['MACOSX_DEPLOYMENT_TARGET'] = target - if target == '10.3': - s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' - warnings.warn(s) - - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - status, output = exec_command(self.compiler_f77 + - ['-print-libgcc-file-name'], - use_tee=0) - if not status: - return os.path.dirname(output) - return None - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - if not os.path.exists(os.path.join(d, "lib%s.a" % self.g2c)): - d2 = os.path.abspath(os.path.join(d, - '../../../../lib')) - if os.path.exists(os.path.join(d2, "lib%s.a" % self.g2c)): - opt.append(d2) - opt.append(d) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d,f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type=='msvc': - # the following code is not needed (read: breaks) when using MinGW - # in case want to link F77 compiled code with MSVC - opt.append('gcc') - runtime_lib = msvc_runtime_library() - if runtime_lib: - opt.append(runtime_lib) - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v<='3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - from distutils import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran',) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if v>='4.': - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe']: - self.executables[key].append('-mno-cygwin') - return v - - # 'gfortran --version' results: - # XXX is the below right? - # Debian: GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3)) - # GNU Fortran 95 (GCC) 4.1.2 20061115 (prerelease) (Debian 4.1.1-21) - # OS X: GNU Fortran 95 (GCC) 4.1.0 - # GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental) - # GNU Fortran (GCC) 4.3.0 20070316 (experimental) - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : [None, "-Wall", "-ffixed-form", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_fix' : [None, "-Wall", "-ffixed-form", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'linker_so' : ["", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir) - mingwdir = os.path.normpath(os.path.join(root, target, "lib")) - full = os.path.join(mingwdir, "libmingwex.a") - if os.path.exists(full): - opt.append(mingwdir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i+1, "mingwex") - opt.insert(i+1, "mingw32") - # XXX: fix this mess, does not work for mingw - if is_win64(): - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - raise NotImplementedError("Only MS compiler supported with gfortran on win64") - return opt - - def get_target(self): - status, output = exec_command(self.compiler_f77 + - ['-v'], - use_tee=0) - if not status: - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def get_flags_opt(self): - if is_win64(): - return ['-O0'] - else: - return GnuFCompiler.get_flags_opt(self) - -def _can_target(cmd, arch): - """Return true is the command supports the -arch flag for the given - architecture.""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - return False - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - compiler = GnuFCompiler() - compiler.customize() - print(compiler.get_version()) - raw_input('Press ENTER to continue...') - try: - compiler = Gnu95FCompiler() - compiler.customize() - print(compiler.get_version()) - except Exception: - msg = get_exception() - print(msg) - raw_input('Press ENTER to continue...') diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/hpux.py b/numpy-1.6.2/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 866920ee5b..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,43 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256,0,1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self,force,ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='hpux') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/ibm.py b/numpy-1.6.2/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index 113134bbdf..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -import re -import sys - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import exec_command, find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - s,o = exec_command(lslpp + ' -Lc xlfcmp') - m = re.search('xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = os.listdir(xlf_dir) - l.sort() - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir,d,'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0,40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - fi = open(xlf_cfg,'r') - crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P.*)/crt1.o').match - for line in fi.readlines(): - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fi.close() - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - log.set_verbosity(2) - compiler = IBMFCompiler() - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/intel.py b/numpy-1.6.2/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index 1905848292..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,252 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ - -import sys - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags(self): - v = self.get_version() - if v >= '10.0': - # Use -fPIC instead of -KPIC. - pic_flags = ['-fPIC'] - else: - pic_flags = ['-KPIC'] - opt = pic_flags + ["-cm"] - return opt - - def get_flags_free(self): - return ["-FR"] - - def get_flags_opt(self): - return ['-O1'] - - def get_flags_arch(self): - v = self.get_version() - opt = [] - if cpu.has_fdiv_bug(): - opt.append('-fdiv_check') - if cpu.has_f00f_bug(): - opt.append('-0f_check') - if cpu.is_PentiumPro() or cpu.is_PentiumII() or cpu.is_PentiumIII(): - opt.extend(['-tpp6']) - elif cpu.is_PentiumM(): - opt.extend(['-tpp7','-xB']) - elif cpu.is_Pentium(): - opt.append('-tpp5') - elif cpu.is_PentiumIV() or cpu.is_Xeon(): - opt.extend(['-tpp7','-xW']) - if v and v <= '7.1': - if cpu.has_mmx() and (cpu.is_PentiumII() or cpu.is_PentiumIII()): - opt.append('-xM') - elif v and v >= '8.0': - if cpu.is_PentiumIII(): - opt.append('-xK') - if cpu.has_sse3(): - opt.extend(['-xP']) - elif cpu.is_PentiumIV(): - opt.append('-xW') - if cpu.has_sse2(): - opt.append('-xN') - elif cpu.is_PentiumM(): - opt.extend(['-xB']) - if (cpu.is_Xeon() or cpu.is_Core2() or cpu.is_Core2Extreme()) and cpu.getNCPUs()==2: - opt.extend(['-xT']) - if cpu.has_sse3() and (cpu.is_PentiumIV() or cpu.is_CoreDuo() or cpu.is_CoreSolo()): - opt.extend(['-xP']) - - if cpu.has_sse2(): - opt.append('-arch SSE2') - elif cpu.has_sse(): - opt.append('-arch SSE') - return opt - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup', '-Wl,-framework,Python'] - return opt - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_arch(self): - opt = [] - if cpu.is_PentiumIV() or cpu.is_Xeon(): - opt.extend(['-tpp7', '-xW']) - return opt - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None,"-FI","-w90","-w95"], - 'compiler_fix' : [None,"-FI","-4L72","-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' #No space after /Fo! - library_switch = '/OUT:' #No space after /OUT:! - module_dir_switch = '/module:' #No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo','/MD','/nbs','/Qlowercase','/us'] - return opt - - def get_flags_free(self): - return ["-FR"] - - def get_flags_debug(self): - return ['/4Yb','/d2'] - - def get_flags_opt(self): - return ['/O1'] - - def get_flags_arch(self): - opt = [] - if cpu.is_PentiumPro() or cpu.is_PentiumII(): - opt.extend(['/G6','/Qaxi']) - elif cpu.is_PentiumIII(): - opt.extend(['/G6','/QaxK']) - elif cpu.is_Pentium(): - opt.append('/G5') - elif cpu.is_PentiumIV(): - opt.extend(['/G7','/QaxW']) - if cpu.has_mmx(): - opt.append('/QaxM') - return opt - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None,"-FI","-w90","-w95"], - 'compiler_fix' : [None,"-FI","-4L72","-w"], - 'compiler_f90' : [None], - 'linker_so' : ['',"-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start='Intel\(R\).*?64,') - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='intel') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/lahey.py b/numpy-1.6.2/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index cf29506241..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,47 +0,0 @@ -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95","-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g','--chk','--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d,'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='lahey') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/mips.py b/numpy-1.6.2/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index 3c2e9ac84b..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,56 +0,0 @@ -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90","-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu,'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='mips') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/nag.py b/numpy-1.6.2/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index 4aca48450f..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,43 +0,0 @@ -import sys -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler'] - -class NAGFCompiler(FCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - version_pattern = r'NAGWare Fortran 95 compiler Release (?P[^\s]*)' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform=='darwin': - return ['-unsharedf95','-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return [''] - def get_flags_debug(self): - return ['-g','-gline','-g90','-nan','-C'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='nag') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/none.py b/numpy-1.6.2/numpy/distutils/fcompiler/none.py deleted file mode 100644 index 526b42d497..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,30 +0,0 @@ - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77' : None, - 'compiler_f90' : None, - 'compiler_fix' : None, - 'linker_so' : None, - 'linker_exe' : None, - 'archiver' : None, - 'ranlib' : None, - 'version_cmd' : None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - compiler = NoneFCompiler() - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/pathf95.py b/numpy-1.6.2/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index c92653ba73..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,36 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - #compiler = PathScaleFCompiler() - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='pathf95') - compiler.customize() - print compiler.get_version() diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/pg.py b/numpy-1.6.2/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 6ea3c03d6d..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,60 +0,0 @@ - -# http://www.pgroup.com - -from numpy.distutils.fcompiler import FCompiler -from sys import platform - -compilers = ['PGroupFCompiler'] - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["pgfortran", "-dynamiclib"], - 'compiler_fix' : ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90' : ["pgfortran", "-dynamiclib"], - 'linker_so' : ["libtool"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["pgfortran"], - 'compiler_fix' : ["pgfortran", "-Mfixed"], - 'compiler_f90' : ["pgfortran"], - 'linker_so' : ["pgfortran","-shared","-fpic"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform','-Mnosecond_underscore'] - return self.pic_flags + opt - def get_flags_opt(self): - return ['-fast'] - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='pg') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/sun.py b/numpy-1.6.2/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index 85e2c33772..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,50 +0,0 @@ -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["","-Bdynamic","-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast','-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu','sunmath','mvec']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='sun') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/fcompiler/vast.py b/numpy-1.6.2/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index a7b99ce73e..0000000000 --- a/numpy-1.6.2/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = r'\s*Pacific-Sierra Research vf90 '\ - '(Personal|Professional)\s+(?P[^\s]*)' - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='vast') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.6.2/numpy/distutils/from_template.py b/numpy-1.6.2/numpy/distutils/from_template.py deleted file mode 100644 index 413f0721df..0000000000 --- a/numpy-1.6.2/numpy/distutils/from_template.py +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/python -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separeted words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

    ' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

    appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" - -__all__ = ['process_str','process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b',re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)',re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b',re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while 1: - m = routine_start_re.search(astr,ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr,start,m.end()): - while 1: - i = astr.rfind('\n',ind,start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr,m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start,end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace('\,','@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = adict.keys() - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr,names): - substr = substr.replace('\>','@rightarrow@') - substr = substr.replace('\<','@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>",substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace('\,','@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r,names.get(r,None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@',',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)"\ - " for <%s=%s>. Ignoring." % (base_rule, - ','.join(rules[base_rule]), - r,thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name,(k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@','>') - newstr = newstr.replace('@leftarrow@','<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' #_head # using _head will break free-format files - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - writestr += newstr[oldend:sub[0]] - names.update(find_repl_patterns(newstr[oldend:sub[0]])) - writestr += expand_sub(newstr[sub[0]:sub[1]],names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+[.]src)['\"]",re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - fid = open(source) - lines = [] - for line in fid.readlines(): - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d,fn) - if os.path.isfile(fn): - print ('Including file',fn) - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - fid.close() - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -if __name__ == "__main__": - - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file,'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname,'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) diff --git a/numpy-1.6.2/numpy/distutils/info.py b/numpy-1.6.2/numpy/distutils/info.py deleted file mode 100644 index 3d27a8092b..0000000000 --- a/numpy-1.6.2/numpy/distutils/info.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -Enhanced distutils with Fortran compilers support and more. -""" - -postpone_import = True diff --git a/numpy-1.6.2/numpy/distutils/intelccompiler.py b/numpy-1.6.2/numpy/distutils/intelccompiler.py deleted file mode 100644 index 9cff858cef..0000000000 --- a/numpy-1.6.2/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,43 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable - -class IntelCCompiler(UnixCCompiler): - """ A modified Intel compiler compatible with an gcc built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose,dry_run, force) - self.cc_exe = 'icc -fPIC' - compiler = self.cc_exe - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - linker_exe=compiler, - linker_so=compiler + ' -shared') - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - - # On Itanium, the Intel Compiler used to be called ecc, let's search for - # it (now it's also icc, so ecc is last in the search). - for cc_exe in map(find_executable,['icc','ecc']): - if cc_exe: - break - -class IntelEM64TCCompiler(UnixCCompiler): - """ A modified Intel x86_64 compiler compatible with a 64bit gcc built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64 -fPIC' - cc_args = "-fPIC" - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose,dry_run, force) - self.cc_exe = 'icc -m64 -fPIC' - compiler = self.cc_exe - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - linker_exe=compiler, - linker_so=compiler + ' -shared') diff --git a/numpy-1.6.2/numpy/distutils/interactive.py b/numpy-1.6.2/numpy/distutils/interactive.py deleted file mode 100644 index e3dba04eb4..0000000000 --- a/numpy-1.6.2/numpy/distutils/interactive.py +++ /dev/null @@ -1,188 +0,0 @@ -import os -import sys -from pprint import pformat - -__all__ = ['interactive_sys_argv'] - -def show_information(*args): - print 'Python',sys.version - for a in ['platform','prefix','byteorder','path']: - print 'sys.%s = %s' % (a,pformat(getattr(sys,a))) - for a in ['name']: - print 'os.%s = %s' % (a,pformat(getattr(os,a))) - if hasattr(os,'uname'): - print 'system,node,release,version,machine = ',os.uname() - -def show_environ(*args): - for k,i in os.environ.items(): - print ' %s = %s' % (k, i) - -def show_fortran_compilers(*args): - from fcompiler import show_fcompilers - show_fcompilers() - -def show_compilers(*args): - from distutils.ccompiler import show_compilers - show_compilers() - -def show_tasks(argv,ccompiler,fcompiler): - print """\ - -Tasks: - i - Show python/platform/machine information - ie - Show environment information - c - Show C compilers information - c - Set C compiler (current:%s) - f - Show Fortran compilers information - f - Set Fortran compiler (current:%s) - e - Edit proposed sys.argv[1:]. - -Task aliases: - 0 - Configure - 1 - Build - 2 - Install - 2 - Install with prefix. - 3 - Inplace build - 4 - Source distribution - 5 - Binary distribution - -Proposed sys.argv = %s - """ % (ccompiler, fcompiler, argv) - - -import shlex - -def edit_argv(*args): - argv = args[0] - readline = args[1] - if readline is not None: - readline.add_history(' '.join(argv[1:])) - try: - s = raw_input('Edit argv [UpArrow to retrive %r]: ' % (' '.join(argv[1:]))) - except EOFError: - return - if s: - argv[1:] = shlex.split(s) - return - -def interactive_sys_argv(argv): - print '='*72 - print 'Starting interactive session' - print '-'*72 - - readline = None - try: - try: - import readline - except ImportError: - pass - else: - import tempfile - tdir = tempfile.gettempdir() - username = os.environ.get('USER',os.environ.get('USERNAME','UNKNOWN')) - histfile = os.path.join(tdir,".pyhist_interactive_setup-" + username) - try: - try: readline.read_history_file(histfile) - except IOError: pass - import atexit - atexit.register(readline.write_history_file, histfile) - except AttributeError: pass - except Exception, msg: - print msg - - task_dict = {'i':show_information, - 'ie':show_environ, - 'f':show_fortran_compilers, - 'c':show_compilers, - 'e':edit_argv, - } - c_compiler_name = None - f_compiler_name = None - - while 1: - show_tasks(argv,c_compiler_name, f_compiler_name) - try: - task = raw_input('Choose a task (^D to quit, Enter to continue with setup): ') - except EOFError: - print - task = 'quit' - ltask = task.lower() - if task=='': break - if ltask=='quit': sys.exit() - task_func = task_dict.get(ltask,None) - if task_func is None: - if ltask[0]=='c': - c_compiler_name = task[1:] - if c_compiler_name=='none': - c_compiler_name = None - continue - if ltask[0]=='f': - f_compiler_name = task[1:] - if f_compiler_name=='none': - f_compiler_name = None - continue - if task[0]=='2' and len(task)>1: - prefix = task[1:] - task = task[0] - else: - prefix = None - if task == '4': - argv[1:] = ['sdist','-f'] - continue - elif task in '01235': - cmd_opts = {'config':[],'config_fc':[], - 'build_ext':[],'build_src':[], - 'build_clib':[]} - if c_compiler_name is not None: - c = '--compiler=%s' % (c_compiler_name) - cmd_opts['config'].append(c) - if task != '0': - cmd_opts['build_ext'].append(c) - cmd_opts['build_clib'].append(c) - if f_compiler_name is not None: - c = '--fcompiler=%s' % (f_compiler_name) - cmd_opts['config_fc'].append(c) - if task != '0': - cmd_opts['build_ext'].append(c) - cmd_opts['build_clib'].append(c) - if task=='3': - cmd_opts['build_ext'].append('--inplace') - cmd_opts['build_src'].append('--inplace') - conf = [] - sorted_keys = ['config','config_fc','build_src', - 'build_clib','build_ext'] - for k in sorted_keys: - opts = cmd_opts[k] - if opts: conf.extend([k]+opts) - if task=='0': - if 'config' not in conf: - conf.append('config') - argv[1:] = conf - elif task=='1': - argv[1:] = conf+['build'] - elif task=='2': - if prefix is not None: - argv[1:] = conf+['install','--prefix=%s' % (prefix)] - else: - argv[1:] = conf+['install'] - elif task=='3': - argv[1:] = conf+['build'] - elif task=='5': - if sys.platform=='win32': - argv[1:] = conf+['bdist_wininst'] - else: - argv[1:] = conf+['bdist'] - else: - print 'Skipping unknown task:',`task` - else: - print '-'*68 - try: - task_func(argv,readline) - except Exception,msg: - print 'Failed running task %s: %s' % (task,msg) - break - print '-'*68 - print - - print '-'*72 - return argv diff --git a/numpy-1.6.2/numpy/distutils/lib2def.py b/numpy-1.6.2/numpy/distutils/lib2def.py deleted file mode 100644 index a486b13bde..0000000000 --- a/numpy-1.6.2/numpy/distutils/lib2def.py +++ /dev/null @@ -1,114 +0,0 @@ -import re -import sys -import os -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = 'nm -Cs' - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print "I'm assuming that your first argument is the library" - print "and the second is the DEF file." - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnam(nm_cmd = 'nm -Cs py_lib')""" - f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE) - nm_output = f.stdout.read() - f.stdout.close() - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = [str(DEFAULT_NM), str(libfile)] - nm_output = getnm(nm_cmd) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy-1.6.2/numpy/distutils/line_endings.py b/numpy-1.6.2/numpy/distutils/line_endings.py deleted file mode 100644 index 4e6c1f38ec..0000000000 --- a/numpy-1.6.2/numpy/distutils/line_endings.py +++ /dev/null @@ -1,74 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings -""" - -import sys, re, os - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print file, "Directory!" - return - - data = open(file, "rb").read() - if '\0' in data: - print file, "Binary!" - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print 'dos2unix:', file - f = open(file, "wb") - f.write(newdata) - f.close() - return file - else: - print file, 'ok' - -def dos2unix_one_dir(modified_files,dir_name,file_names): - for file in file_names: - full_path = os.path.join(dir_name,file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name,dos2unix_one_dir,modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print file, "Directory!" - return - - data = open(file, "rb").read() - if '\0' in data: - print file, "Binary!" - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print 'unix2dos:', file - f = open(file, "wb") - f.write(newdata) - f.close() - return file - else: - print file, 'ok' - -def unix2dos_one_dir(modified_files,dir_name,file_names): - for file in file_names: - full_path = os.path.join(dir_name,file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name,unix2dos_one_dir,modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/numpy-1.6.2/numpy/distutils/log.py b/numpy-1.6.2/numpy/distutils/log.py deleted file mode 100644 index fe44bb4433..0000000000 --- a/numpy-1.6.2/numpy/distutils/log.py +++ /dev/null @@ -1,81 +0,0 @@ -# Colored log, requires Python 2.3 or up. - -import sys -from distutils.log import * -from distutils.log import Log as old_Log -from distutils.log import _global_log - -if sys.version_info[0] < 3: - from misc_util import red_text, default_text, cyan_text, green_text, is_sequence, is_string -else: - from numpy.distutils.misc_util import red_text, default_text, cyan_text, green_text, is_sequence, is_string - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%','%%') - if flag and is_sequence(args): - return tuple([_fix_args(a,flag=0) for a in args]) - return args - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """If we'd log WARN messages, log this message as a 'nice' anti-warn - message. - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting thershold to DEBUG level, it can be changed only with force argument') - else: - info('set_threshold: not changing thershold from DEBUG level %s to %s' % (prev_level,level)) - return prev_level - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level,1) - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) diff --git a/numpy-1.6.2/numpy/distutils/mingw/gfortran_vs2003_hack.c b/numpy-1.6.2/numpy/distutils/mingw/gfortran_vs2003_hack.c deleted file mode 100644 index 15ed7e6863..0000000000 --- a/numpy-1.6.2/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ /dev/null @@ -1,6 +0,0 @@ -int _get_output_format(void) -{ - return 0; -} - -int _imp____lc_codepage = 0; diff --git a/numpy-1.6.2/numpy/distutils/mingw32ccompiler.py b/numpy-1.6.2/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index f5d9964904..0000000000 --- a/numpy-1.6.2/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,494 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" - -import os -import subprocess -import sys -import subprocess -import re - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler - -if sys.version_info[0] < 3: - import log -else: - from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.version import StrictVersion -from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options -from distutils.errors import DistutilsExecError, CompileError, UnknownFileError - -from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version -from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, - verbose,dry_run, force) - - # we need to support 3.2 which doesn't match the standard - # get_versions methods regex - if self.gcc_version is None: - import re - p = subprocess.Popen(['gcc', '-dumpversion'], shell=True, - stdout=subprocess.PIPE) - out_string = p.stdout.read() - p.stdout.close() - result = re.search('(\d+\.\d+)',out_string) - if result: - self.gcc_version = StrictVersion(result.group(1)) - - # A real mingw32 doesn't need to specify a different entry point, - # but cygwin 2.91.57 in no-cygwin-mode needs it. - if self.gcc_version <= "2.91.57": - entry_point = '--entry _DllMain@12' - else: - entry_point = '' - - if self.linker_dll == 'dllwrap': - # Commented out '--driver-name g++' part that fixes weird - # g++.exe: g++: No such file or directory - # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). - # If the --driver-name part is required for some environment - # then make the inclusion of this part specific to that environment. - self.linker = 'dllwrap' # --driver-name g++' - elif self.linker_dll == 'gcc': - self.linker = 'g++' - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # **changes: eric jones 4/11/01 - # 2. increased optimization and turned off all warnings - # 3. also added --driver-name g++ - #self.set_executables(compiler='gcc -mno-cygwin -O2 -w', - # compiler_so='gcc -mno-cygwin -mdll -O2 -w', - # linker_exe='gcc -mno-cygwin', - # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s' - # % (self.linker, entry_point)) - - # MS_WIN64 should be defined when building for amd64 on windows, but - # python headers define it only for MS compilers, which has all kind of - # bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - if self.gcc_version < "4.": - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes', - linker_exe='gcc -g -mno-cygwin', - linker_so='gcc -g -mno-cygwin -shared') - else: - # gcc-4 series releases do not support -mno-cygwin option - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - if self.gcc_version <= "3.0.0": - self.set_executables(compiler='gcc -mno-cygwin -O2 -w', - compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='%s -mno-cygwin -mdll -static %s' - % (self.linker, entry_point)) - elif self.gcc_version < "4.": - self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', - compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='g++ -mno-cygwin -shared') - else: - # gcc-4 series releases do not support -mno-cygwin option - self.set_executables(compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished - # dlls need another dll (mingwm10.dll see Mingw32 docs) - # (-mthreads: Support thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropiate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - if self.gcc_version < "3.0.0": - func = distutils.cygwinccompiler.CygwinCCompiler.link - else: - func = UnixCCompiler.link - if sys.version_info[0] >= 3: - func(*args[:func.__code__.co_argcount]) - else: - func(*args[:func.im_func.func_code.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv,base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc','.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - maj, min, micro = [int(i) for i in sys.version_info[:3]] - dllname = 'python%d%d.dll' % (maj, min) - print ("Looking for %s" % dllname) - - # We can't do much here: - # - find it in python main dir - # - in system32, - # - ortherwise (Sxs), I don't know how to get it. - lib_dirs = [] - lib_dirs.append(os.path.join(sys.prefix, 'lib')) - try: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32')) - except KeyError: - pass - - for d in lib_dirs: - dll = os.path.join(d, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE) - return st.stdout.readlines() - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i]): - break - - if i == len(dump): - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j]) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - d = open(dfile, 'w') - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - d.close() - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _build_import_library_amd64(): - dll_file = find_python_dll() - - out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building import library: "%s" exists' % (out_file)) - return - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix,'libs',def_name) - - log.info('Building import library (arch=AMD64): "%s" (from %s)' \ - % (out_file, dll_file)) - - generate_def(dll_file, def_file) - - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.Popen(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix,'libs',lib_name) - out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) - out_file = os.path.join(sys.prefix,'libs',out_name) - if not os.path.isfile(lib_file): - log.warn('Cannot build import library: "%s" not found' % (lib_file)) - return - if os.path.isfile(out_file): - log.debug('Skip building import library: "%s" exists' % (out_file)) - return - log.info('Building import library (ARCH=x86): "%s"' % (out_file)) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix,'libs',def_name) - nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) - nm_output = lib2def.getnm(nm_cmd) - dlist, flist = lib2def.parse_nm(nm_output) - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) - - dll_name = "python%d%d.dll" % tuple(sys.version_info[:2]) - args = (dll_name,def_file,out_file) - cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args - status = os.system(cmd) - # for now, fail silently - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - #if not success: - # msg = "Couldn't find import library, and failed to build it." - # raise DistutilsPlatformError, msg - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): - _MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION - else: - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what to do - # in that case: manifest building will fail, but it should not be used in - # that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" \ - % (maj, min)) - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignement constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = """\ - - - - - - - - - - - - - -""" - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- name: str - name of the manifest file to embed - type: str ('dll', 'exe') - type of the binary which will embed the manifest""" - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - msvcv = msvc_runtime_library() - if msvcv: - maj = int(msvcv[5:6]) - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma = int(msver) - mi = int((msver - ma) * 10) - # Write the manifest file - manxml = msvc_manifest_xml(ma, mi) - man = open(manifest_name(config), "w") - config.temp_files.append(manifest_name(config)) - man.write(manxml) - man.close() - # # Write the rc file - # manrc = manifest_rc(manifest_name(self), "exe") - # rc = open(rc_name(self), "w") - # self.temp_files.append(manrc) - # rc.write(manrc) - # rc.close() diff --git a/numpy-1.6.2/numpy/distutils/misc_util.py b/numpy-1.6.2/numpy/distutils/misc_util.py deleted file mode 100644 index a3efad38fd..0000000000 --- a/numpy-1.6.2/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2396 +0,0 @@ -import os -import re -import sys -import imp -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil - -import distutils -from distutils.errors import DistutilsError - -try: - set -except NameError: - from sets import Set as set - -from numpy.distutils.compat import get_exception - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32','mingw32','all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath','njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info'] - -class InstallableLib: - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - -def quote_args(args): - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - splitted = name.split('/') - return os.path.join(*splitted) - -def rel_path(path, parent_path): - """Return path relative to parent_path. - """ - pd = os.path.abspath(parent_path) - apath = os.path.abspath(path) - if len(apath)= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(fg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path): - if sys.platform=='cygwin' and path.startswith('/cygdrive'): - path = path[10] + ':' + os.path.normcase(path[11:]) - return path - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE','')=='msys': - return True - if os.environ.get('MSYSTEM','')=='MINGW32': - return True - return False - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = sys.version[msc_pos+6:msc_pos+10] - lib = {'1300' : 'msvcr70', # MSVC 7.0 - '1310' : 'msvcr71', # MSVC 7.1 - '1400' : 'msvcr80', # MSVC 8 - '1500' : 'msvcr90', # MSVC 9 (VS 2008) - }.get(msc_ver, None) - else: - lib = None - return lib - -def msvc_on_amd64(): - if not (sys.platform=='win32' or os.name=='nt'): - return - if get_build_architecture() != 'AMD64': - return - if 'DISTUTILS_USE_SDK' in os.environ: - return - # try to avoid _MSVCCompiler__root attribute error - print('Forcing DISTUTILS_USE_SDK=1') - os.environ['DISTUTILS_USE_SDK']='1' - return - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match -fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z',re.I).match -f90_ext_match = re.compile(r'.*[.](f90|f95)\Z',re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)',re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - f = open(source,'r') - f_readlines = getattr(f,'xreadlines',f.readlines) - for line in f_readlines(): - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - f.close() - return modules - -def is_string(s): - return isinstance(s, str) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - for item in lst: - if not is_string(item): - return False - return True - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' is s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - for source in sources: - if fortran_ext_match(source): - return True - return False - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - for source in sources: - if cxx_ext_match(source): - return True - return False - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = glob.glob(os.path.join(d,"*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(),abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS','.svn','build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath,f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath,f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = filter(is_string, ext.sources) - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = filter(is_string, scripts) - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources',[]) - sources = filter(is_string, sources) - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends',[]) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. For Python 3.2 this is implemented on - Linux, but not on OS X. - - """ - so_ext = distutils.sysconfig.get_config_var('SO') or '' - # fix long extension for Python >=3.2, see PEP 3149. - if (not is_python_ext) and 'SOABI' in distutils.sysconfig.get_config_vars(): - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + distutils.sysconfig.get_config_var('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:',s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - -class SconsInfo(object): - """ - Container object holding build info for building a package with scons. - - Parameters - ---------- - scons_path : str or None - Path to scons script, relative to the directory of setup.py. - If None, no scons script is specified. This can be useful to add only - pre- and post-hooks to a configuration. - parent_name : str or None - Name of the parent package (for example "numpy"). - pre_hook : sequence of callables or None - Callables that are executed before scons is invoked. - Each callable should be defined as ``callable(*args, **kw)``. - post_hook : sequence of callables or None - Callables that are executed after scons is invoked. - Each callable should be defined as ``callable(*args, **kw)``. - source_files : list of str or None - List of paths to source files, relative to the directory of setup.py. - pkg_path : str or None - Path to the package for which the `SconsInfo` instance holds the - build info, relative to the directory of setup.py. - - Notes - ----- - All parameters are available as attributes of a `SconsInfo` instance. - - """ - def __init__(self, scons_path, parent_name, pre_hook, - post_hook, source_files, pkg_path): - self.scons_path = scons_path - self.parent_name = parent_name - self.pre_hook = pre_hook - self.post_hook = post_hook - self.source_files = source_files - if pkg_path: - self.pkg_path = pkg_path - else: - if scons_path: - self.pkg_path = os.path.dirname(scons_path) - else: - self.pkg_path = '' - -###################### - -class Configuration(object): - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', 'scons_data', - 'installed_libraries'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path,package_path)): - package_path = njoin(self.local_path,package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self,n,a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path,'__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1,3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self',f.f_globals,f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self,n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = filter(os.path.isdir,glob.glob(subpackage_path)) - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d,'__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0,os.path.dirname(setup_py)) - try: - fo_setup_py = open(setup_py, 'U') - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name,subpackage_name,setup_name) - setup_module = imp.load_module('_'.join(n.split('.')), - fo_setup_py, - setup_py, - ('.py', 'U', 1)) - fo_setup_py.close() - if not hasattr(setup_module,'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - def fix_args_py2(args): - if setup_module.configuration.func_code.co_argcount > 1: - args = args + (self.top_path,) - return args - def fix_args_py3(args): - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - return args - if sys.version_info[0] < 3: - args = fix_args_py2(args) - else: - args = fix_args_py3(args) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name,subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name,subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name: str,None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path: str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name: str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name,repr((subpackage_name, subpackage_path,parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name: str - name of the subpackage - subpackage_path: str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone: bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name,subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d,dict),repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self,data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path: seq,str - Argument can be either - - * 2-sequence (,) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths: - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat:: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d,p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = range(len(pattern_list)-1); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping',path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i],repr((s,path_list[i],data_path,d,path,rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list,path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list),path)) - else: - for path in paths: - self.add_data_dir((d,path)) - return - assert not is_glob_pattern(d),repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1,f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package,d,d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p,files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p,list(files)) for p,files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files: sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. *.txt -> parent/a.txt, parent/b.txt - #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt - #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d,files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d,f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d,files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d,paths)) - return - assert not is_glob_pattern(d),repr((d,filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package,d),paths)) - - ### XXX Implement add_py_modules - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_numarray_include_dirs(self): - import numpy.numarray.util as nnu - self.add_include_dirs(*nnu.get_numarray_include_dirs()) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files: str, seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name,p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0],p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - pre-pends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing',True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self,kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources','depends','include_dirs','library_dirs', - 'module_dirs','extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name: str - name of the extension - sources: seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs: - define_macros: - undef_macros: - library_dirs: - libraries: - runtime_library_dirs: - extra_objects: - extra_compile_args: - extra_link_args: - extra_f77_compile_args: - extra_f90_compile_args: - export_symbols: - swig_opts: - depends: - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language: - f2py_options: - module_dirs: - extra_info: dict,list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name,name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries',[]) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname,tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname,lpath = libname.split('@',1) - lpath = os.path.abspath(njoin(self.local_path,lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None,lpath, - caller_level = 2) - if isinstance(c,Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries',[])]: - llname = l.split('__OF__',1)[0] - if llname == lname: - c.pop('name',None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compiler_args - * extra_f90_compiler_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - name = name #+ '__OF__' + self.name - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with `distutils` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compiler_args - * extra_f90_compiler_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - """ - if subst_dict is None: - subst_dict = {} - basename = os.path.splitext(template)[0] - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - def add_scons_installed_library(self, name, install_dir): - """ - Add a scons-built installable library to distutils. - - Parameters - ---------- - name : str - The name of the library. - install_dir : str - Path to install the library, relative to the current sub-package. - - """ - install_dir = os.path.join(self.package_path, install_dir) - self.installed_libraries.append(InstallableLib(name, {}, install_dir)) - - def add_sconscript(self, sconscript, subpackage_path=None, - standalone = False, pre_hook = None, - post_hook = None, source_files = None, package_path=None): - """Add a sconscript to configuration. - - pre_hook and post hook should be sequences of callable, which will be - use before and after executing scons. The callable should be defined as - callable(*args, **kw). It is ugly, but well, hooks are ugly anyway... - - sconscript can be None, which can be useful to add only post/pre - hooks.""" - if standalone: - parent_name = None - else: - parent_name = self.name - - dist = self.get_distribution() - # Convert the sconscript name to a relative filename (relative from top - # setup.py's directory) - fullsconsname = self.paths(sconscript)[0] - - # XXX: Think about a way to automatically register source files from - # scons... - full_source_files = [] - if source_files: - full_source_files.extend([self.paths(i)[0] for i in source_files]) - - scons_info = SconsInfo(fullsconsname, parent_name, - pre_hook, post_hook, - full_source_files, package_path) - if dist is not None: - if dist.scons_data is None: - dist.scons_data = [] - dist.scons_data.append(scons_info) - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - # XXX: we add a fake extension, to correctly initialize some - # options in distutils command. - dist.add_extension('', sources = []) - else: - self.scons_data.append(scons_info) - # XXX: we add a fake extension, to correctly initialize some - # options in distutils command. - self.add_extension('', sources = []) - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self,key) - a.extend(dict.get(key,[])) - for key in self.dict_keys: - a = getattr(self,key) - a.update(dict.get(key,{})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key,dict[key],dict.get('name','?'))) - setattr(self,key,dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self,key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self,k,None) - if a: - s += '%s = %s\n' % (k,pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.',old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib,Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self,path): - """Return path's SVN revision number. - """ - revision = None - m = None - cwd = os.getcwd() - try: - os.chdir(path or '.') - p = subprocess.Popen(['svnversion'], shell=True, - stdout=subprocess.PIPE, stderr=None, - close_fds=True) - sout = p.stdout - m = re.match(r'(?P\d+)', sout.read()) - except: - pass - os.chdir(cwd) - if m: - revision = int(m.group('revision')) - return revision - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK',None): - entries = njoin(path,'_svn','entries') - else: - entries = njoin(path,'.svn','entries') - if os.path.isfile(entries): - f = open(entries) - fstr = f.read() - f.close() - if fstr[:5] == '\d+)"',fstr) - if m: - revision = int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - revision = int(m.group('revision')) - return revision - - def _get_hg_revision(self,path): - """Return path's Mercurial revision number. - """ - revision = None - m = None - cwd = os.getcwd() - try: - os.chdir(path or '.') - p = subprocess.Popen(['hg identify --num'], shell=True, - stdout=subprocess.PIPE, stderr=None, - close_fds=True) - sout = p.stdout - m = re.match(r'(?P\d+)', sout.read()) - except: - pass - os.chdir(cwd) - if m: - revision = int(m.group('revision')) - return revision - branch_fn = njoin(path,'.hg','branch') - branch_cache_fn = njoin(path,'.hg','branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - f = open(branch_fn) - revision0 = f.read().strip() - f.close() - - branch_map = {} - for line in file(branch_cache_fn, 'r'): - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - revision = branch_map.get(branch0) - return revision - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version\__, and - _version, until a version number is found. - """ - version = getattr(self,'version',None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path,f) - if os.path.isfile(fn): - info = (open(fn),fn,('.py','U',1)) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name,name) - try: - version_module = imp.load_module('_'.join(n.split('.')),*info) - except ImportError: - msg = get_exception() - self.warn(str(msg)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module,a,None) - if version is not None: - break - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path,'__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target,version)) - f = open(target,'w') - f.write('version = %r\n' % (version)) - f.close() - - import atexit - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in an Mercurial repository. - """ - target = njoin(self.local_path,'__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target,version)) - f = open(target,'w') - f.write('version = %r\n' % (version)) - f.close() - - import atexit - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name,name,generate_config_py)) - - def scons_make_config_py(self, name = '__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - """ - self.py_modules.append((self.name, name, scons_generate_config_py)) - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory.""" - # XXX: import here for bootstrapping reasons - import numpy - d = os.path.join(os.path.dirname(numpy.__file__), - 'core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - import __builtin__ - try: - __builtin__.__NUMPY_SETUP__ - return True - except AttributeError: - return False - __NUMPY_SETUP__ = False - -def scons_generate_config_py(target): - """generate config.py file containing system_info information - used during building the package. - - usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from distutils.dir_util import mkpath - from numscons import get_scons_configres_dir, get_scons_configres_filename - d = {} - mkpath(os.path.dirname(target)) - f = open(target, 'w') - f.write('# this file is generated by %s\n' % (os.path.abspath(sys.argv[0]))) - f.write('# it contains system_info results at the time of building this package.\n') - f.write('__all__ = ["show"]\n\n') - confdir = get_scons_configres_dir() - confilename = get_scons_configres_filename() - for root, dirs, files in os.walk(confdir): - if files: - file = os.path.join(root, confilename) - assert root.startswith(confdir) - pkg_name = '.'.join(root[len(confdir)+1:].split(os.sep)) - fid = open(file, 'r') - try: - cnt = fid.read() - d[pkg_name] = eval(cnt) - finally: - fid.close() - # d is a dictionary whose keys are package names, and values the - # corresponding configuration. Each configuration is itself a dictionary - # (lib : libinfo) - f.write('_config = %s\n' % d) - f.write(r''' -def show(): - for pkg, config in _config.items(): - print("package %s configuration:" % pkg) - for lib, libc in config.items(): - print(' %s' % lib) - for line in libc.split('\n'): - print('\t%s' % line) - ''') - f.close() - return target - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - )) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov,str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - f = open(target, 'w') - f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(r''' -def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - -def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - ''') - - f.close() - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -if sys.version[:3] >= '2.5': - def get_build_architecture(): - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() -else: - #copied from python 2.5.1 distutils/msvccompiler.py - def get_build_architecture(): - """Return the processor architecture. - - Possible results are "Intel", "Itanium", or "AMD64". - """ - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return "Intel" - j = sys.version.find(")", i) - return sys.version[i+len(prefix):j] diff --git a/numpy-1.6.2/numpy/distutils/npy_pkg_config.py b/numpy-1.6.2/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index 4f64623eda..0000000000 --- a/numpy-1.6.2/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,461 +0,0 @@ -import sys -if sys.version_info[0] < 3: - from ConfigParser import SafeConfigParser, NoOptionError -else: - from configparser import ConfigParser, SafeConfigParser, NoOptionError -import re -import os -import shlex - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(IOError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(IOError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - lexer = shlex.shlex(line) - lexer.whitespace_split = True - - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - def next_token(t): - if t.startswith('-I'): - if len(t) > 2: - d['include_dirs'].append(t[2:]) - else: - t = lexer.get_token() - d['include_dirs'].append(t) - elif t.startswith('-L'): - if len(t) > 2: - d['library_dirs'].append(t[2:]) - else: - t = lexer.get_token() - d['library_dirs'].append(t) - elif t.startswith('-l'): - d['libraries'].append(t[2:]) - elif t.startswith('-D'): - d['macros'].append(t[2:]) - else: - d['ignored'].append(t) - return lexer.get_token() - - t = lexer.get_token() - while t: - t = next_token(t) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo(object): - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return self._sections.keys() - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name] - m.append('Description: %s' % self.description) - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet(object): - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return self._raw_data.keys() - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = {} - for name, value in config.items('meta'): - d[name] = value - - for k in ['name', 'description', 'version']: - if not d.has_key(k): - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not d.has_key('requires'): - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - if sys.version[:3] > '3.1': - # SafeConfigParser is deprecated in py-3.2 and renamed to ConfigParser - config = ConfigParser() - else: - config = SafeConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not vars.has_key(k): - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not vars.has_key("pkgdir") and vars.has_key("pkgname"): - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print npymath_info - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - import sys - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print ("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - import os - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) - else: - info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search('([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " \ - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print (info.cflags(section)) - if options.libs: - print (info.libs(section)) - if options.version: - print (info.version) - if options.min_version: - print (info.version >= options.min_version) diff --git a/numpy-1.6.2/numpy/distutils/numpy_distribution.py b/numpy-1.6.2/numpy/distutils/numpy_distribution.py deleted file mode 100644 index ea8182659c..0000000000 --- a/numpy-1.6.2/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,17 +0,0 @@ -# XXX: Handle setuptools ? -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/numpy-1.6.2/numpy/distutils/pathccompiler.py b/numpy-1.6.2/numpy/distutils/pathccompiler.py deleted file mode 100644 index 48051810ee..0000000000 --- a/numpy-1.6.2/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,21 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with an gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/numpy-1.6.2/numpy/distutils/setup.py b/numpy-1.6.2/numpy/distutils/setup.py deleted file mode 100644 index afc1fadd23..0000000000 --- a/numpy-1.6.2/numpy/distutils/setup.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('distutils',parent_package,top_path) - config.add_subpackage('command') - config.add_subpackage('fcompiler') - config.add_data_dir('tests') - config.add_data_files('site.cfg') - config.add_data_files('mingw/gfortran_vs2003_hack.c') - config.make_config_py() - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/distutils/setupscons.py b/numpy-1.6.2/numpy/distutils/setupscons.py deleted file mode 100644 index 938f07ead0..0000000000 --- a/numpy-1.6.2/numpy/distutils/setupscons.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -import os.path - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('distutils',parent_package,top_path) - config.add_subpackage('command') - config.add_subpackage('fcompiler') - config.add_data_dir('tests') - if os.path.exists("site.cfg"): - config.add_data_files('site.cfg') - config.make_config_py() - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/distutils/system_info.py b/numpy-1.6.2/numpy/distutils/system_info.py deleted file mode 100644 index 8174eaee9b..0000000000 --- a/numpy-1.6.2/numpy/distutils/system_info.py +++ /dev/null @@ -1,2042 +0,0 @@ -#!/bin/env python -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Currently, the following -classes are available: - - atlas_info - atlas_threads_info - atlas_blas_info - atlas_blas_threads_info - lapack_atlas_info - blas_info - lapack_info - blas_opt_info # usage recommended - lapack_opt_info # usage recommended - fftw_info,dfftw_info,sfftw_info - fftw_threads_info,dfftw_threads_info,sfftw_threads_info - djbfft_info - x11_info - lapack_src_info - blas_src_info - numpy_info - numarray_info - numpy_info - boost_python_info - agg2_info - wx_info - gdk_pixbuf_xlib_2_info - gdk_pixbuf_2_info - gdk_x11_2_info - gtkp_x11_2_info - gtkp_2_info - xft_info - freetype2_info - umfpack_info - -Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL has options that are the default for each section. The -available sections are fftw, atlas, and x11. Appropiate defaults are -used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. ALL section in site.cfg -Only the first complete match is returned. - -Example: ----------- -[ALL] -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -fftw_libs = rfftw, fftw -fftw_opt_libs = rfftw_threaded, fftw_threaded -# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -atlas_libs = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -""" - -import sys -import os -import re -import copy -import warnings -from glob import glob -if sys.version_info[0] < 3: - from ConfigParser import NoOptionError, ConfigParser -else: - from configparser import NoOptionError, ConfigParser - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import distutils.sysconfig -from distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import \ - find_executable, exec_command, get_pythonexe -from numpy.distutils.misc_util import is_sequence, is_string, \ - get_shared_lib_extension -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils.compat import get_exception - -if sys.version_info[0] >= 3: - from functools import reduce - -# Determine number of bits -import platform -_bits = {'32bit':32,'64bit':64} -platform_bits = _bits[platform.architecture()[0]] - -def libpaths(paths,bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits==32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p+'64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(distutils.sysconfig.EXEC_PREFIX, - 'libs')] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] -else: - default_lib_dirs = libpaths(['/usr/local/lib','/opt/lib','/usr/lib', - '/opt/local/lib','/sw/lib'], platform_bits) - default_include_dirs = ['/usr/local/include', - '/opt/include', '/usr/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.','/usr/local/src', '/opt/src','/sw/src'] - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib','/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include','/usr/X11/include', - '/usr/include'] - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0,os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = filter(os.path.isdir, default_lib_dirs) -default_include_dirs = filter(os.path.isdir, default_include_dirs) -default_src_dirs = filter(os.path.isdir, default_src_dirs) - -so_ext = get_shared_lib_extension() - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - else: - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.environ['HOME'] - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - -def get_info(name,notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'atlas':atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads':atlas_threads_info, # ditto - 'atlas_blas':atlas_blas_info, - 'atlas_blas_threads':atlas_blas_threads_info, - 'lapack_atlas':lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads':lapack_atlas_threads_info, # ditto - 'mkl':mkl_info, - 'lapack_mkl':lapack_mkl_info, # use lapack_opt instead - 'blas_mkl':blas_mkl_info, # use blas_opt instead - 'x11':x11_info, - 'fft_opt':fft_opt_info, - 'fftw':fftw_info, - 'fftw2':fftw2_info, - 'fftw3':fftw3_info, - 'dfftw':dfftw_info, - 'sfftw':sfftw_info, - 'fftw_threads':fftw_threads_info, - 'dfftw_threads':dfftw_threads_info, - 'sfftw_threads':sfftw_threads_info, - 'djbfft':djbfft_info, - 'blas':blas_info, # use blas_opt instead - 'lapack':lapack_info, # use lapack_opt instead - 'lapack_src':lapack_src_info, - 'blas_src':blas_src_info, - 'numpy':numpy_info, - 'f2py':f2py_info, - 'Numeric':Numeric_info, - 'numeric':Numeric_info, - 'numarray':numarray_info, - 'numerix':numerix_info, - 'lapack_opt':lapack_opt_info, - 'blas_opt':blas_opt_info, - 'boost_python':boost_python_info, - 'agg2':agg2_info, - 'wx':wx_info, - 'gdk_pixbuf_xlib_2':gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0':gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2':gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0':gdk_pixbuf_2_info, - 'gdk':gdk_info, - 'gdk_2':gdk_2_info, - 'gdk-2.0':gdk_2_info, - 'gdk_x11_2':gdk_x11_2_info, - 'gdk-x11-2.0':gdk_x11_2_info, - 'gtkp_x11_2':gtkp_x11_2_info, - 'gtk+-x11-2.0':gtkp_x11_2_info, - 'gtkp_2':gtkp_2_info, - 'gtk+-2.0':gtkp_2_info, - 'xft':xft_info, - 'freetype2':freetype2_info, - 'umfpack':umfpack_info, - 'amd':amd_info, - }.get(name.lower(),system_info) - return cl().get_info(notfound_action) - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://math-atlas.sourceforge.net/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (http://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - -class NumericNotFoundError(NotFoundError): - """ - Numeric (http://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - -class system_info: - - """ get_info() is the only public method. Don't use others. - """ - section = 'ALL' - dir_env_var = None - search_static_first = 0 # XXX: disabled by default, may disappear in - # future unless it is proved to be useful. - verbosity = 1 - saved_results = {} - - notfounderror = NotFoundError - - def __init__ (self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity = 1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {} - defaults['libraries'] = '' - defaults['library_dirs'] = os.pathsep.join(default_lib_dirs) - defaults['include_dirs'] = os.pathsep.join(default_include_dirs) - defaults['src_dirs'] = os.pathsep.join(default_src_dirs) - defaults['search_static_first'] = str(self.search_static_first) - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - if self.section is not None: - self.search_static_first = self.cp.getboolean(self.section, - 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - info = {} - for lib in libs: - i = None - for d in dirs: - i = self.check_libs(d,[lib]) - if i is not None: - break - if i is not None: - dict_append(info,**i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - return info - - def set_info(self,**info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info,**lib_info) - self.saved_results[self.__class__.__name__] = info - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def get_info(self,notfound_action=0): - """ Return a dictonary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action==1: - warnings.warn(self.notfounderror.__doc__) - elif notfound_action==2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if self.verbosity>0 and flag: - for k,v in res.items(): - v = str(v) - if k in ['sources','libraries'] and len(v)>270: - v = v[:120]+'...\n...\n...'+v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0]==e0: - log.info('Setting %s=%s' % (env_var[0],e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d=='None': - log.info('Disabled %s: %s',self.__class__.__name__,'(%s is None)' \ - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self,'_lib_names',[]) - if len(l)==1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3]=='lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include','lib']: - d1 = os.path.join(d,dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - return self.get_libs(key,'') - - def library_extensions(self): - static_exts = ['.a'] - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - # Debian and Ubuntu added a g3f suffix to shared library to deal with - # g77 -> gfortran ABI transition - # XXX: disabled, it hides more problem than it solves. - #if sys.platform[:5] == 'linux': - # exts.append('.so.3gf') - return exts - - def check_libs(self,lib_dir,libs,opt_libs =[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dir,libs,opt_libs,[ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), lib_dir) - return info - - def check_libs2(self, lib_dir, libs, opt_libs =[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dir,libs,opt_libs,exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), lib_dir) - return info - - def _lib_list(self, lib_dir, libs, exts): - assert is_string(lib_dir) - liblist = [] - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for l in libs: - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix+l+ext) - if p: - break - if p: - assert len(p)==1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - l += '.dll' - liblist.append(l) - break - return liblist - - def _check_libs(self, lib_dir, libs, opt_libs, exts): - found_libs = self._lib_list(lib_dir, libs, exts) - if len(found_libs) == len(libs): - info = {'libraries' : found_libs, 'library_dirs' : [lib_dir]} - opt_found_libs = self._lib_list(lib_dir, opt_libs, exts) - if len(opt_found_libs) == len(opt_libs): - info['libraries'].extend(opt_found_libs) - return info - else: - return None - - def combine_paths(self,*args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args,**{'verbosity':self.verbosity}) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info,**fftw_info) - if djbfft_info: - dict_append(info,**djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [ { 'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H',None)]}, - { 'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h','rfftw.h'], - 'macros':[('SCIPY_FFTW_H',None)]}] - - def __init__(self): - system_info.__init__(self) - - def calc_ver_info(self,ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - incl_dir = None - libs = self.get_libs(self.section+'_libs', ver_param['libs']) - info = None - for d in lib_dirs: - r = self.check_libs(d,libs) - if r is not None: - info = r - break - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d,ver_param['includes']))==len(ver_param['includes']): - dict_append(info,include_dirs=[d]) - flag = 1 - incl_dirs = [d] - incl_dir = d - break - if flag: - dict_append(info,define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [ { 'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h','rfftw.h'], - 'macros':[('SCIPY_FFTW_H',None)]} - ] - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [ { 'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H',None)]}, - ] - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'dfftw', - 'libs':['drfftw','dfftw'], - 'includes':['dfftw.h','drfftw.h'], - 'macros':[('SCIPY_DFFTW_H',None)]} ] - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'sfftw', - 'libs':['srfftw','sfftw'], - 'includes':['sfftw.h','srfftw.h'], - 'macros':[('SCIPY_SFFTW_H',None)]} ] - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'fftw threads', - 'libs':['rfftw_threads','fftw_threads'], - 'includes':['fftw_threads.h','rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H',None)]} ] - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'dfftw threads', - 'libs':['drfftw_threads','dfftw_threads'], - 'includes':['dfftw_threads.h','drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H',None)]} ] - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [ { 'name':'sfftw threads', - 'libs':['srfftw_threads','sfftw_threads'], - 'includes':['sfftw_threads.h','srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H',None)]} ] - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d,['djbfft'])+[d]) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths (d,['djbfft.a']) - if p: - info = {'extra_objects':p} - break - p = self.combine_paths (d,['libdjbfft.a','libdjbfft'+so_ext]) - if p: - info = {'libraries':['djbfft'],'library_dirs':[d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d,['fftc8.h','fftfreq.h']))==2: - dict_append(info,include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H',None)]) - self.set_info(**info) - return - return - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKL' - _lib_mkl = ['mkl','vml','guide'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT',None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH','').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - for d in open(ld_so_conf,'r').readlines(): - d = d.strip() - if d: paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m)+2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d,'mkl','*')) + glob(os.path.join(d,'mkl*')) - for d in dirs: - if os.path.isdir(os.path.join(d,'lib')): - return d - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from cpuinfo import cpu - l = 'mkl' # use shared library - if cpu.is_Itanium(): - plt = '64' - #l = 'mkl_ipf' - elif cpu.is_Xeon(): - plt = 'em64t' - #l = 'mkl_em64t' - else: - plt = '32' - #l = 'mkl_ia32' - if l not in self._lib_mkl: - self._lib_mkl.insert(0,l) - system_info.__init__(self, - default_lib_dirs=[os.path.join(mklroot,'lib',plt)], - default_include_dirs=[os.path.join(mklroot,'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - mkl_libs = self.get_libs('mkl_libs',self._lib_mkl) - mkl = None - for d in lib_dirs: - mkl = self.check_libs2(d,mkl_libs) - if mkl is not None: - break - if mkl is None: - return - info = {} - dict_append(info,**mkl) - dict_append(info, - define_macros=[('SCIPY_MKL_H',None)], - include_dirs = incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - -class lapack_mkl_info(mkl_info): - - def calc_info(self): - mkl = get_info('mkl') - if not mkl: - return - if sys.platform == 'win32': - lapack_libs = self.get_libs('lapack_libs',['mkl_lapack']) - else: - lapack_libs = self.get_libs('lapack_libs',['mkl_lapack32','mkl_lapack64']) - - info = {'libraries': lapack_libs} - dict_append(info,**mkl) - self.set_info(**info) - -class blas_mkl_info(mkl_info): - pass - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas','cblas'] - if sys.platform[:7]=='freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d,['atlas*','ATLAS*', - 'sse','3dnow','sse2'])+[d]) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - atlas_libs = self.get_libs('atlas_libs', - self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs',self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - atlas = self.check_libs2(d,atlas_libs,[]) - lapack_atlas = self.check_libs2(d,['lapack_atlas'],[]) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d,['atlas*','ATLAS*']) - for d2 in lib_dirs2: - lapack = self.check_libs2(d2,lapack_libs,[]) - if lapack is not None: - break - else: - lapack = None - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs+include_dirs,'cblas.h') or [None])[0] - if h: - h = os.path.dirname(h) - dict_append(info,include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info,**lapack) - dict_append(info,**atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info,**atlas) - dict_append(info,define_macros=[('ATLAS_WITH_LAPACK_ATLAS',None)]) - self.set_info(**info) - return - else: - dict_append(info,**atlas) - dict_append(info,define_macros=[('ATLAS_WITHOUT_LAPACK',None)]) - message = """ -********************************************************************* - Could not find lapack library within the ATLAS installation. -********************************************************************* -""" - warnings.warn(message) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir,prefix+lapack_name+e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000*1024: - message = """ -********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. -********************************************************************* -""" % (lapack_lib,sz/1024) - warnings.warn(message) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas','cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - atlas_libs = self.get_libs('atlas_libs', - self._lib_names + self._lib_atlas) - atlas = None - for d in lib_dirs: - atlas = self.check_libs2(d,atlas_libs,[]) - if atlas is not None: - break - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs+include_dirs,'cblas.h') or [None])[0] - if h: - h = os.path.dirname(h) - dict_append(info,include_dirs=[h]) - info['language'] = 'c' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info,**atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS','ATLAS'] - _lib_names = ['ptf77blas','ptcblas'] - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS','ATLAS'] - _lib_names = ['ptf77blas','ptcblas'] - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - lapack_libs = self.get_libs('lapack_libs', self._lib_names) - for d in lib_dirs: - lapack = self.check_libs(d,lapack_libs,[]) - if lapack is not None: - info = lapack - break - else: - return - info['language'] = 'f77' - self.set_info(**info) - -class lapack_src_info(system_info): - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d,['LAPACK*/SRC','SRC'])) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d,'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux=''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f'%f for f in (sclaux+slasrc).split()] \ - + ['d%s.f'%f for f in (dzlaux+dlasrc).split()] \ - + ['c%s.f'%f for f in (clasrc).split()] \ - + ['z%s.f'%f for f in (zlasrc).split()] \ - + ['%s.f'%f for f in (allaux+oclasrc+ozlasrc).split()] - sources = [os.path.join(src_dir,f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir,'..','INSTALL') - sources += [os.path.join(src_dir2,p+'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir,p+'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir,'ila'+p+'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir,'ila'+p+'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources':sources,'language':'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs) - if s and re.search (r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], library_dirs=library_dirs) - if not s: - warnings.warn(""" -***************************************************** -Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - -when building extension libraries that use ATLAS. -Make sure that -lgfortran is used for C++ extensions. -***************************************************** -""") - dict_append(info, language='f90', - define_macros = [('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception, msg: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_',o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - - break - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION',None) - if atlas_version: - dict_append(info, define_macros = [('ATLAS_INFO','"\\"%s\\""' % atlas_version)]) - else: - dict_append(info, define_macros = [('NO_ATLAS_INFO',-1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)',o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo',o,re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - if atlas_version=='3.2.1_pre3.3.6': - dict_append(info, define_macros = [('NO_ATLAS_INFO',-2)]) - else: - dict_append(info, define_macros = [('ATLAS_INFO','"\\"%s\\""' % atlas_version)]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - -class lapack_opt_info(system_info): - - notfounderror = LapackNotFoundError - - def calc_info(self): - - if sys.platform=='darwin' and not os.environ.get('ATLAS',None): - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if os.path.exists('/System/Library/Frameworks/Accelerate.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - link_args.extend(['-Wl,-framework','-Wl,Accelerate']) - elif os.path.exists('/System/Library/Frameworks/vecLib.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - link_args.extend(['-Wl,-framework','-Wl,vecLib']) - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO',3)]) - return - - lapack_mkl_info = get_info('lapack_mkl') - if lapack_mkl_info: - self.set_info(**lapack_mkl_info) - return - - atlas_info = get_info('atlas_threads') - if not atlas_info: - atlas_info = get_info('atlas') - #atlas_info = {} ## uncomment for testing - atlas_version = None - need_lapack = 0 - need_blas = 0 - info = {} - if atlas_info: - l = atlas_info.get ('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS',None) in l \ - or ('ATLAS_WITHOUT_LAPACK',None) in l: - need_lapack = 1 - info = atlas_info - - else: - warnings.warn(AtlasNotFoundError.__doc__) - need_blas = 1 - need_lapack = 1 - dict_append(info,define_macros=[('NO_ATLAS_INFO',1)]) - - if need_lapack: - lapack_info = get_info('lapack') - #lapack_info = {} ## uncomment for testing - if lapack_info: - dict_append(info,**lapack_info) - else: - warnings.warn(LapackNotFoundError.__doc__) - lapack_src_info = get_info('lapack_src') - if not lapack_src_info: - warnings.warn(LapackSrcNotFoundError.__doc__) - return - dict_append(info,libraries=[('flapack_src',lapack_src_info)]) - - if need_blas: - blas_info = get_info('blas') - #blas_info = {} ## uncomment for testing - if blas_info: - dict_append(info,**blas_info) - else: - warnings.warn(BlasNotFoundError.__doc__) - blas_src_info = get_info('blas_src') - if not blas_src_info: - warnings.warn(BlasSrcNotFoundError.__doc__) - return - dict_append(info,libraries=[('fblas_src',blas_src_info)]) - - self.set_info(**info) - return - - -class blas_opt_info(system_info): - - notfounderror = BlasNotFoundError - - def calc_info(self): - - if sys.platform=='darwin' and not os.environ.get('ATLAS',None): - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if os.path.exists('/System/Library/Frameworks/Accelerate.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework','-Wl,Accelerate']) - elif os.path.exists('/System/Library/Frameworks/vecLib.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework','-Wl,vecLib']) - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO',3)]) - return - - blas_mkl_info = get_info('blas_mkl') - if blas_mkl_info: - self.set_info(**blas_mkl_info) - return - - atlas_info = get_info('atlas_blas_threads') - if not atlas_info: - atlas_info = get_info('atlas_blas') - atlas_version = None - need_blas = 0 - info = {} - if atlas_info: - info = atlas_info - else: - warnings.warn(AtlasNotFoundError.__doc__) - need_blas = 1 - dict_append(info,define_macros=[('NO_ATLAS_INFO',1)]) - - if need_blas: - blas_info = get_info('blas') - if blas_info: - dict_append(info,**blas_info) - else: - warnings.warn(BlasNotFoundError.__doc__) - blas_src_info = get_info('blas_src') - if not blas_src_info: - warnings.warn(BlasSrcNotFoundError.__doc__) - return - dict_append(info,libraries=[('fblas_src',blas_src_info)]) - - self.set_info(**info) - return - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - blas_libs = self.get_libs('blas_libs', self._lib_names) - for d in lib_dirs: - blas = self.check_libs(d,blas_libs,[]) - if blas is not None: - info = blas - break - else: - return - info['language'] = 'f77' # XXX: is it generally true? - self.set_info(**info) - - -class blas_src_info(system_info): - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d,['blas'])) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d,'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir,f+'.f') \ - for f in (blas1+blas2+blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources':sources,'language':'f77'} - self.set_info(**info) - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - x11_libs = self.get_libs('x11_libs', ['X11']) - for lib_dir in lib_dirs: - info = self.check_libs(lib_dir, x11_libs, []) - if info is not None: - break - else: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name=='lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(distutils.sysconfig.get_python_inc( - prefix=os.sep.join(prefix))) - except ImportError: - pass - py_incl_dir = distutils.sysconfig.get_python_inc() - include_dirs.append(py_incl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__','version']: - vrs = getattr(module,v,None) - if vrs is None: - continue - macros = [(self.modulename.upper()+'_VERSION', - '"\\"%s\\""' % (vrs)), - (self.modulename.upper(),None)] - break -## try: -## macros.append( -## (self.modulename.upper()+'_VERSION_HEX', -## hex(vstr2hex(module.__version__))), -## ) -## except Exception,msg: -## print msg - dict_append(info, define_macros = macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - -class numerix_info(system_info): - section = 'numerix' - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy - which = "numpy", "defaulted" - except ImportError: - msg1 = str(get_exception()) - try: - import Numeric - which = "numeric", "defaulted" - except ImportError: - msg2 = str(get_exception()) - try: - import numarray - which = "numarray", "defaulted" - except ImportError: - msg3 = str(get_exception()) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__),'src') - self.set_info(sources = [os.path.join(f2py_dir,'fortranobject.c')], - include_dirs = [f2py_dir]) - return - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d,['boost*'])) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d,'libs','python','src','module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dir = distutils.sysconfig.get_python_inc() - srcs_dir = os.path.join(src_dir,'libs','python','src') - bpl_srcs = glob(os.path.join(srcs_dir,'*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir,'*','*.cpp')) - info = {'libraries':[('boost_python_src',{'include_dirs':[src_dir,py_incl_dir], - 'sources':bpl_srcs})], - 'include_dirs':[src_dir], - } - if info: - self.set_info(**info) - return - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d,['agg2*'])) - return [ d for d in dirs if os.path.isdir(d) ] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d,'src','agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform=='win32': - agg2_srcs = glob(os.path.join(src_dir,'src','platform','win32','agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir,'src','*.cpp')) - agg2_srcs += [os.path.join(src_dir,'src','platform','X11','agg_platform_support.cpp')] - - info = {'libraries':[('agg2_src',{'sources':agg2_srcs, - 'include_dirs':[os.path.join(src_dir,'include')], - })], - 'include_dirs':[os.path.join(src_dir,'include')], - } - if info: - self.set_info(**info) - return - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - def get_config_output(self, config_exe, option): - s,o = exec_command(config_exe+' '+self.append_config_exe+' '+option,use_tee=0) - if not s: - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe,self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - '"\\"%s\\""' % (version))) - if self.version_macro_name: - macros.append((self.version_macro_name+'_%s' % (version.replace('.','_')),None)) - if self.release_macro_name: - release = self.get_config_output(config_exe,'--release') - if release: - macros.append((self.release_macro_name+'_%s' % (release.replace('.','_')),None)) - opts = self.get_config_output(config_exe,'--libs') - if opts: - for opt in opts.split(): - if opt[:2]=='-l': - libraries.append(opt[2:]) - elif opt[:2]=='-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe,self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2]=='-I': - include_dirs.append(opt[2:]) - elif opt[:2]=='-D': - if '=' in opt: - n,v = opt[2:].split('=') - macros.append((n,v)) - else: - macros.append((opt[2:],None)) - else: - extra_compile_args.append(opt) - if macros: dict_append(info, define_macros = macros) - if libraries: dict_append(info, libraries = libraries) - if library_dirs: dict_append(info, library_dirs = library_dirs) - if include_dirs: dict_append(info, include_dirs = include_dirs) - if extra_link_args: dict_append(info, extra_link_args = extra_link_args) - if extra_compile_args: dict_append(info, extra_compile_args = extra_compile_args) - if info: - self.set_info(**info) - return - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - amd_libs = self.get_libs('amd_libs', self._lib_names) - for d in lib_dirs: - amd = self.check_libs(d,amd_libs,[]) - if amd is not None: - info = amd - break - else: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d,'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H',None)], - swig_opts = ['-I' + inc_dir]) - - self.set_info(**info) - return - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - umfpack_libs = self.get_libs('umfpack_libs', self._lib_names) - for d in lib_dirs: - umf = self.check_libs(d,umfpack_libs,[]) - if umf is not None: - info = umf - break - else: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d,['','umfpack'],'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H',None)], - swig_opts = ['-I' + inc_dir]) - - amd = get_info('amd') - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - -## def vstr2hex(version): -## bits = [] -## n = [24,16,8,4,0] -## r = 0 -## for s in version.split('.'): -## r |= int(s) << n[0] -## del n[0] -## return r - -#-------------------------------------------------------------------- - -def combine_paths(*args,**kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: return [] - if len(args)==1: - result = reduce(lambda a,b:a+b,map(glob,args[0]),[]) - elif len (args)==2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0,a1))) - else: - result = combine_paths(*(combine_paths(args[0],args[1])+args[2:])) - verbosity = kws.get('verbosity',1) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c':0,'c++':1,'f77':2,'f90':3} -inv_language_map = {0:'c',1:'c++',2:'f77',3:'f90'} -def dict_append(d,**kws): - languages = [] - for k,v in kws.items(): - if k=='language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs','include_dirs','define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l,0) for l in languages])] - d['language'] = l - return - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.iteritems(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - r = conf.get_info() - if show_only: - log.info('Info classes not defined: %s',','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/__init__.py b/numpy-1.6.2/numpy/distutils/tests/f2py_ext/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/setup.py b/numpy-1.6.2/numpy/distutils/tests/f2py_ext/setup.py deleted file mode 100644 index e3dfddb747..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_ext',parent_package,top_path) - config.add_extension('fib2', ['src/fib2.pyf','src/fib1.f']) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/src/fib1.f b/numpy-1.6.2/numpy/distutils/tests/f2py_ext/src/fib1.f deleted file mode 100644 index cfbb1eea0d..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/src/fib1.f +++ /dev/null @@ -1,18 +0,0 @@ -C FILE: FIB1.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB1.F diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/src/fib2.pyf b/numpy-1.6.2/numpy/distutils/tests/f2py_ext/src/fib2.pyf deleted file mode 100644 index 90a8cf00cb..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/src/fib2.pyf +++ /dev/null @@ -1,9 +0,0 @@ -! -*- f90 -*- -python module fib2 - interface - subroutine fib(a,n) - real*8 dimension(n),intent(out),depend(n) :: a - integer intent(in) :: n - end subroutine fib - end interface -end python module fib2 diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy-1.6.2/numpy/distutils/tests/f2py_ext/tests/test_fib2.py deleted file mode 100644 index 027f455dec..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/f2py_ext/tests/test_fib2.py +++ /dev/null @@ -1,11 +0,0 @@ -import sys -from numpy.testing import * -from f2py_ext import fib2 - -class TestFib2(TestCase): - - def test_fib(self): - assert_array_equal(fib2.fib(6),[0,1,1,2,3,5]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/__init__.py b/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/include/body.f90 b/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/include/body.f90 deleted file mode 100644 index 90b44e29dc..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/include/body.f90 +++ /dev/null @@ -1,5 +0,0 @@ - subroutine bar13(a) - !f2py intent(out) a - integer a - a = 13 - end subroutine bar13 diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/setup.py deleted file mode 100644 index ee56cc3a61..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/setup.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_f90_ext',parent_package,top_path) - config.add_extension('foo', - ['src/foo_free.f90'], - include_dirs=['include'], - f2py_options=['--include_paths', - config.paths('include')[0]] - ) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 b/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 deleted file mode 100644 index c7713be59e..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 +++ /dev/null @@ -1,6 +0,0 @@ -module foo_free -contains - -include "body.f90" - -end module foo_free diff --git a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py deleted file mode 100644 index 1543051dc3..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py +++ /dev/null @@ -1,10 +0,0 @@ -import sys -from numpy.testing import * -from f2py_f90_ext import foo - -class TestFoo(TestCase): - def test_foo_free(self): - assert_equal(foo.foo_free.bar13(),13) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/gen_ext/__init__.py b/numpy-1.6.2/numpy/distutils/tests/gen_ext/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/numpy-1.6.2/numpy/distutils/tests/gen_ext/setup.py b/numpy-1.6.2/numpy/distutils/tests/gen_ext/setup.py deleted file mode 100644 index bf029062c6..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/gen_ext/setup.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python - -fib3_f = ''' -C FILE: FIB3.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) -Cf2py intent(in) n -Cf2py intent(out) a -Cf2py depend(n) a - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB3.F -''' - -def source_func(ext, build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'fib3.f') - if newer(__file__, target): - f = open(target,'w') - f.write(fib3_f) - f.close() - return [target] - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('gen_ext',parent_package,top_path) - config.add_extension('fib3', - [source_func] - ) - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy-1.6.2/numpy/distutils/tests/gen_ext/tests/test_fib3.py deleted file mode 100644 index 8a9a443a59..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/gen_ext/tests/test_fib3.py +++ /dev/null @@ -1,10 +0,0 @@ -import sys -from numpy.testing import * -from gen_ext import fib3 - -class TestFib3(TestCase): - def test_fib(self): - assert_array_equal(fib3.fib(6),[0,1,1,2,3,5]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/__init__.py b/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/primes.pyx b/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/primes.pyx deleted file mode 100644 index 2ada0c5a08..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/primes.pyx +++ /dev/null @@ -1,22 +0,0 @@ -# -# Calculate prime numbers -# - -def primes(int kmax): - cdef int n, k, i - cdef int p[1000] - result = [] - if kmax > 1000: - kmax = 1000 - k = 0 - n = 2 - while k < kmax: - i = 0 - while i < k and n % p[i] <> 0: - i = i + 1 - if i == k: - p[k] = n - k = k + 1 - result.append(n) - n = n + 1 - return result diff --git a/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/setup.py b/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/setup.py deleted file mode 100644 index 5b348b916b..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('pyrex_ext',parent_package,top_path) - config.add_extension('primes', - ['primes.pyx']) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/tests/test_primes.py b/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/tests/test_primes.py deleted file mode 100644 index eb2c91da77..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/pyrex_ext/tests/test_primes.py +++ /dev/null @@ -1,12 +0,0 @@ -import sys -from numpy.testing import * -from pyrex_ext.primes import primes - -class TestPrimes(TestCase): - def test_simple(self, level=1): - l = primes(10) - assert_equal(l, [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/setup.py b/numpy-1.6.2/numpy/distutils/tests/setup.py deleted file mode 100644 index 89d73800ed..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('testnumpydistutils',parent_package,top_path) - config.add_subpackage('pyrex_ext') - config.add_subpackage('f2py_ext') - #config.add_subpackage('f2py_f90_ext') - config.add_subpackage('swig_ext') - config.add_subpackage('gen_ext') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/__init__.py b/numpy-1.6.2/numpy/distutils/tests/swig_ext/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/setup.py b/numpy-1.6.2/numpy/distutils/tests/swig_ext/setup.py deleted file mode 100644 index 7f0dbe6271..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/swig_ext/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('swig_ext',parent_package,top_path) - config.add_extension('_example', - ['src/example.i','src/example.c'] - ) - config.add_extension('_example2', - ['src/zoo.i','src/zoo.cc'], - depends=['src/zoo.h'], - include_dirs=['src'] - ) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/example.c b/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/example.c deleted file mode 100644 index be151725ce..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/example.c +++ /dev/null @@ -1,14 +0,0 @@ -/* File : example.c */ - -double My_variable = 3.0; - -/* Compute factorial of n */ -int fact(int n) { - if (n <= 1) return 1; - else return n*fact(n-1); -} - -/* Compute n mod m */ -int my_mod(int n, int m) { - return(n % m); -} diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/example.i b/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/example.i deleted file mode 100644 index f4fc11e663..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/example.i +++ /dev/null @@ -1,14 +0,0 @@ -/* -*- c -*- */ - -/* File : example.i */ -%module example -%{ -/* Put headers and other declarations here */ -extern double My_variable; -extern int fact(int); -extern int my_mod(int n, int m); -%} - -extern double My_variable; -extern int fact(int); -extern int my_mod(int n, int m); diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.cc b/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.cc deleted file mode 100644 index 0a643d1e5d..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.cc +++ /dev/null @@ -1,23 +0,0 @@ -#include "zoo.h" -#include -#include - -Zoo::Zoo() -{ - n = 0; -} - -void Zoo::shut_up(char *animal) -{ - if (n < 10) { - strcpy(animals[n], animal); - n++; - } -} - -void Zoo::display() -{ - int i; - for(i = 0; i < n; i++) - printf("%s\n", animals[i]); -} diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.h b/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.h deleted file mode 100644 index cb26e6ceff..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.h +++ /dev/null @@ -1,9 +0,0 @@ - -class Zoo{ - int n; - char animals[10][50]; -public: - Zoo(); - void shut_up(char *animal); - void display(); -}; diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.i b/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.i deleted file mode 100644 index a029c03e84..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/swig_ext/src/zoo.i +++ /dev/null @@ -1,10 +0,0 @@ -// -*- c++ -*- -// Example copied from http://linuxgazette.net/issue49/pramode.html - -%module example2 - -%{ -#include "zoo.h" -%} - -%include "zoo.h" diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy-1.6.2/numpy/distutils/tests/swig_ext/tests/test_example.py deleted file mode 100644 index 9afc01cb22..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/swig_ext/tests/test_example.py +++ /dev/null @@ -1,16 +0,0 @@ -import sys -from numpy.testing import * -from swig_ext import example - -class TestExample(TestCase): - def test_fact(self): - assert_equal(example.fact(10),3628800) - - def test_cvar(self): - assert_equal(example.cvar.My_variable,3.0) - example.cvar.My_variable = 5 - assert_equal(example.cvar.My_variable,5.0) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/swig_ext/tests/test_example2.py b/numpy-1.6.2/numpy/distutils/tests/swig_ext/tests/test_example2.py deleted file mode 100644 index 42d1fcbcd6..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/swig_ext/tests/test_example2.py +++ /dev/null @@ -1,14 +0,0 @@ -import sys -from numpy.testing import * -from swig_ext import example2 - -class TestExample2(TestCase): - def test_zoo(self): - z = example2.Zoo() - z.shut_up('Tiger') - z.shut_up('Lion') - z.display() - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy-1.6.2/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 3d727cd94e..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,50 +0,0 @@ -from numpy.testing import * - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), -] - -class TestG77Versions(TestCase): - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert v == version, (vs, v) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert v is None, (vs, v) - -class TestGortranVersions(TestCase): - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert v == version, (vs, v) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert v is None, (vs, v) - - -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/test_fcompiler_intel.py b/numpy-1.6.2/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index ad03daeeae..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,34 +0,0 @@ -from numpy.testing import * - -import numpy.distutils.fcompiler - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"\ - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications"\ - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"\ - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions(TestCase): - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions(TestCase): - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/test_misc_util.py b/numpy-1.6.2/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 6a671a9315..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python - -from numpy.testing import * -from numpy.distutils.misc_util import appendpath, minrelpath, gpaths, rel_path -from os.path import join, sep, dirname - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath(TestCase): - - def test_1(self): - assert_equal(appendpath('prefix','name'),join('prefix','name')) - assert_equal(appendpath('/prefix','name'),ajoin('prefix','name')) - assert_equal(appendpath('/prefix','/name'),ajoin('prefix','name')) - assert_equal(appendpath('prefix','/name'),join('prefix','name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub','name'), - join('prefix','sub','name')) - assert_equal(appendpath('prefix/sub','sup/name'), - join('prefix','sub','sup','name')) - assert_equal(appendpath('/prefix/sub','/prefix/name'), - ajoin('prefix','sub','name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub','/prefix/sup/name'), - ajoin('prefix','sub','sup','name')) - assert_equal(appendpath('/prefix/sub/sub2','/prefix/sup/sup2/name'), - ajoin('prefix','sub','sub2','sup','sup2','name')) - assert_equal(appendpath('/prefix/sub/sub2','/prefix/sub/sup/name'), - ajoin('prefix','sub','sub2','sup','name')) - -class TestMinrelpath(TestCase): - - def test_1(self): - n = lambda path: path.replace('/',sep) - assert_equal(minrelpath(n('aa/bb')),n('aa/bb')) - assert_equal(minrelpath('..'),'..') - assert_equal(minrelpath(n('aa/..')),'') - assert_equal(minrelpath(n('aa/../bb')),'bb') - assert_equal(minrelpath(n('aa/bb/..')),'aa') - assert_equal(minrelpath(n('aa/bb/../..')),'') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')),n('aa/dd')) - assert_equal(minrelpath(n('.././..')),n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')),n('dd')) - -class TestGpaths(TestCase): - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__),'..')) - ls = gpaths('command/*.py', local_path) - assert join(local_path,'command','build_src.py') in ls,`ls` - f = gpaths('system_info.py', local_path) - assert join(local_path,'system_info.py')==f[0],`f` - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/distutils/tests/test_npy_pkg_config.py b/numpy-1.6.2/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index 6122e303bf..0000000000 --- a/numpy-1.6.2/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -from tempfile import mkstemp - -from numpy.testing import * -from numpy.distutils.npy_pkg_config import read_config, parse_flags - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo(TestCase): - def test_simple(self): - fd, filename = mkstemp('foo.ini') - try: - pkg = os.path.splitext(filename)[0] - try: - os.write(fd, simple.encode('ascii')) - finally: - os.close(fd) - - out = read_config(pkg) - self.assertTrue(out.cflags() == simple_d['cflags']) - self.assertTrue(out.libs() == simple_d['libflags']) - self.assertTrue(out.name == simple_d['name']) - self.assertTrue(out.version == simple_d['version']) - finally: - os.remove(filename) - - def test_simple_variable(self): - fd, filename = mkstemp('foo.ini') - try: - pkg = os.path.splitext(filename)[0] - try: - os.write(fd, simple_variable.encode('ascii')) - finally: - os.close(fd) - - out = read_config(pkg) - self.assertTrue(out.cflags() == simple_variable_d['cflags']) - self.assertTrue(out.libs() == simple_variable_d['libflags']) - self.assertTrue(out.name == simple_variable_d['name']) - self.assertTrue(out.version == simple_variable_d['version']) - - out.vars['prefix'] = '/Users/david' - self.assertTrue(out.cflags() == '-I/Users/david/include') - finally: - os.remove(filename) - -class TestParseFlags(TestCase): - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - self.assertTrue(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - self.assertTrue(d['include_dirs'] == ['/usr/include']) - self.assertTrue(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - self.assertTrue(d['include_dirs'] == ['/usr/include']) - self.assertTrue(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - self.assertTrue(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - self.assertTrue(d['libraries'] == ['foo', 'bar']) diff --git a/numpy-1.6.2/numpy/distutils/unixccompiler.py b/numpy-1.6.2/numpy/distutils/unixccompiler.py deleted file mode 100644 index dfc5a676f5..0000000000 --- a/numpy-1.6.2/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. -""" - -import os - -from distutils.errors import DistutilsExecError, CompileError -from distutils.unixccompiler import * -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.compat import get_exception - -if sys.version_info[0] < 3: - import log -else: - from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]),src) - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + - extra_postargs, display = display) - except DistutilsExecError: - msg = str(get_exception()) - raise CompileError(msg) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except (IOError, OSError): - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError: - msg = str(get_exception()) - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/numpy-1.6.2/numpy/doc/__init__.py b/numpy-1.6.2/numpy/doc/__init__.py deleted file mode 100644 index 6589b54929..0000000000 --- a/numpy-1.6.2/numpy/doc/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -import os - -ref_dir = os.path.join(os.path.dirname(__file__)) - -__all__ = [f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and - not f.startswith('__')] -__all__.sort() - -for f in __all__: - __import__(__name__ + '.' + f) - -del f, ref_dir - -__doc__ = """\ -Topical documentation -===================== - -The following topics are available: -%s - -You can view them by - ->>> help(np.doc.TOPIC) #doctest: +SKIP - -""" % '\n- '.join([''] + __all__) - -__all__.extend(['__doc__']) diff --git a/numpy-1.6.2/numpy/doc/basics.py b/numpy-1.6.2/numpy/doc/basics.py deleted file mode 100644 index 97e9822043..0000000000 --- a/numpy-1.6.2/numpy/doc/basics.py +++ /dev/null @@ -1,138 +0,0 @@ -""" -============ -Array basics -============ - -Array types and conversions between types -========================================= - -Numpy supports a much greater variety of numerical types than Python does. -This section shows which are available, and how to modify an array's data-type. - -========== ========================================================= -Data type Description -========== ========================================================= -bool Boolean (True or False) stored as a byte -int Platform integer (normally either ``int32`` or ``int64``) -int8 Byte (-128 to 127) -int16 Integer (-32768 to 32767) -int32 Integer (-2147483648 to 2147483647) -int64 Integer (9223372036854775808 to 9223372036854775807) -uint8 Unsigned integer (0 to 255) -uint16 Unsigned integer (0 to 65535) -uint32 Unsigned integer (0 to 4294967295) -uint64 Unsigned integer (0 to 18446744073709551615) -float Shorthand for ``float64``. -float16 Half precision float: sign bit, 5 bits exponent, - 10 bits mantissa -float32 Single precision float: sign bit, 8 bits exponent, - 23 bits mantissa -float64 Double precision float: sign bit, 11 bits exponent, - 52 bits mantissa -complex Shorthand for ``complex128``. -complex64 Complex number, represented by two 32-bit floats (real - and imaginary components) -complex128 Complex number, represented by two 64-bit floats (real - and imaginary components) -========== ========================================================= - -Numpy numerical types are instances of ``dtype`` (data-type) objects, each -having unique characteristics. Once you have imported NumPy using - - :: - - >>> import numpy as np - -the dtypes are available as ``np.bool``, ``np.float32``, etc. - -Advanced types, not listed in the table above, are explored in -section :ref:`structured_arrays`. - -There are 5 basic numerical types representing booleans (bool), integers (int), -unsigned integers (uint) floating point (float) and complex. Those with numbers -in their name indicate the bitsize of the type (i.e. how many bits are needed -to represent a single value in memory). Some types, such as ``int`` and -``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit -vs. 64-bit machines). This should be taken into account when interfacing -with low-level code (such as C or Fortran) where the raw memory is addressed. - -Data-types can be used as functions to convert python numbers to array scalars -(see the array scalar section for an explanation), python sequences of numbers -to arrays of that type, or as arguments to the dtype keyword that many numpy -functions or methods accept. Some examples:: - - >>> import numpy as np - >>> x = np.float32(1.0) - >>> x - 1.0 - >>> y = np.int_([1,2,4]) - >>> y - array([1, 2, 4]) - >>> z = np.arange(3, dtype=np.uint8) - >>> z - array([0, 1, 2], dtype=uint8) - -Array types can also be referred to by character codes, mostly to retain -backward compatibility with older packages such as Numeric. Some -documentation may still refer to these, for example:: - - >>> np.array([1, 2, 3], dtype='f') - array([ 1., 2., 3.], dtype=float32) - -We recommend using dtype objects instead. - -To convert the type of an array, use the .astype() method (preferred) or -the type itself as a function. For example: :: - - >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE - array([ 0., 1., 2.]) - >>> np.int8(z) - array([0, 1, 2], dtype=int8) - -Note that, above, we use the *Python* float object as a dtype. NumPy knows -that ``int`` refers to ``np.int``, ``bool`` means ``np.bool`` and -that ``float`` is ``np.float``. The other data-types do not have Python -equivalents. - -To determine the type of an array, look at the dtype attribute:: - - >>> z.dtype - dtype('uint8') - -dtype objects also contain information about the type, such as its bit-width -and its byte-order. The data type can also be used indirectly to query -properties of the type, such as whether it is an integer:: - - >>> d = np.dtype(int) - >>> d - dtype('int32') - - >>> np.issubdtype(d, int) - True - - >>> np.issubdtype(d, float) - False - - -Array Scalars -============= - -Numpy generally returns elements of arrays as array scalars (a scalar -with an associated dtype). Array scalars differ from Python scalars, but -for the most part they can be used interchangeably (the primary -exception is for versions of Python older than v2.x, where integer array -scalars cannot act as indices for lists and tuples). There are some -exceptions, such as when code requires very specific attributes of a scalar -or when it checks specifically whether a value is a Python scalar. Generally, -problems are easily fixed by explicitly converting array scalars -to Python scalars, using the corresponding Python type function -(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). - -The primary advantage of using array scalars is that -they preserve the array type (Python may not have a matching scalar type -available, e.g. ``int16``). Therefore, the use of array scalars ensures -identical behaviour between arrays and scalars, irrespective of whether the -value is inside an array or not. NumPy scalars also have many of the same -methods arrays do. - -""" diff --git a/numpy-1.6.2/numpy/doc/broadcasting.py b/numpy-1.6.2/numpy/doc/broadcasting.py deleted file mode 100644 index 7b61796636..0000000000 --- a/numpy-1.6.2/numpy/doc/broadcasting.py +++ /dev/null @@ -1,177 +0,0 @@ -""" -======================== -Broadcasting over arrays -======================== - -The term broadcasting describes how numpy treats arrays with different -shapes during arithmetic operations. Subject to certain constraints, -the smaller array is "broadcast" across the larger array so that they -have compatible shapes. Broadcasting provides a means of vectorizing -array operations so that looping occurs in C instead of Python. It does -this without making needless copies of data and usually leads to -efficient algorithm implementations. There are, however, cases where -broadcasting is a bad idea because it leads to inefficient use of memory -that slows computation. - -NumPy operations are usually done on pairs of arrays on an -element-by-element basis. In the simplest case, the two arrays must -have exactly the same shape, as in the following example: - - >>> a = np.array([1.0, 2.0, 3.0]) - >>> b = np.array([2.0, 2.0, 2.0]) - >>> a * b - array([ 2., 4., 6.]) - -NumPy's broadcasting rule relaxes this constraint when the arrays' -shapes meet certain constraints. The simplest broadcasting example occurs -when an array and a scalar value are combined in an operation: - ->>> a = np.array([1.0, 2.0, 3.0]) ->>> b = 2.0 ->>> a * b -array([ 2., 4., 6.]) - -The result is equivalent to the previous example where ``b`` was an array. -We can think of the scalar ``b`` being *stretched* during the arithmetic -operation into an array with the same shape as ``a``. The new elements in -``b`` are simply copies of the original scalar. The stretching analogy is -only conceptual. NumPy is smart enough to use the original scalar value -without actually making copies, so that broadcasting operations are as -memory and computationally efficient as possible. - -The code in the second example is more efficient than that in the first -because broadcasting moves less memory around during the multiplication -(``b`` is a scalar rather than an array). - -General Broadcasting Rules -========================== -When operating on two arrays, NumPy compares their shapes element-wise. -It starts with the trailing dimensions, and works its way forward. Two -dimensions are compatible when - -1) they are equal, or -2) one of them is 1 - -If these conditions are not met, a -``ValueError: frames are not aligned`` exception is thrown, indicating that -the arrays have incompatible shapes. The size of the resulting array -is the maximum size along each dimension of the input arrays. - -Arrays do not need to have the same *number* of dimensions. For example, -if you have a ``256x256x3`` array of RGB values, and you want to scale -each color in the image by a different value, you can multiply the image -by a one-dimensional array with 3 values. Lining up the sizes of the -trailing axes of these arrays according to the broadcast rules, shows that -they are compatible:: - - Image (3d array): 256 x 256 x 3 - Scale (1d array): 3 - Result (3d array): 256 x 256 x 3 - -When either of the dimensions compared is one, the larger of the two is -used. In other words, the smaller of two axes is stretched or "copied" -to match the other. - -In the following example, both the ``A`` and ``B`` arrays have axes with -length one that are expanded to a larger size during the broadcast -operation:: - - A (4d array): 8 x 1 x 6 x 1 - B (3d array): 7 x 1 x 5 - Result (4d array): 8 x 7 x 6 x 5 - -Here are some more examples:: - - A (2d array): 5 x 4 - B (1d array): 1 - Result (2d array): 5 x 4 - - A (2d array): 5 x 4 - B (1d array): 4 - Result (2d array): 5 x 4 - - A (3d array): 15 x 3 x 5 - B (3d array): 15 x 1 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 1 - Result (3d array): 15 x 3 x 5 - -Here are examples of shapes that do not broadcast:: - - A (1d array): 3 - B (1d array): 4 # trailing dimensions do not match - - A (2d array): 2 x 1 - B (3d array): 8 x 4 x 3 # second from last dimensions mismatched - -An example of broadcasting in practice:: - - >>> x = np.arange(4) - >>> xx = x.reshape(4,1) - >>> y = np.ones(5) - >>> z = np.ones((3,4)) - - >>> x.shape - (4,) - - >>> y.shape - (5,) - - >>> x + y - : shape mismatch: objects cannot be broadcast to a single shape - - >>> xx.shape - (4, 1) - - >>> y.shape - (5,) - - >>> (xx + y).shape - (4, 5) - - >>> xx + y - array([[ 1., 1., 1., 1., 1.], - [ 2., 2., 2., 2., 2.], - [ 3., 3., 3., 3., 3.], - [ 4., 4., 4., 4., 4.]]) - - >>> x.shape - (4,) - - >>> z.shape - (3, 4) - - >>> (x + z).shape - (3, 4) - - >>> x + z - array([[ 1., 2., 3., 4.], - [ 1., 2., 3., 4.], - [ 1., 2., 3., 4.]]) - -Broadcasting provides a convenient way of taking the outer product (or -any other outer operation) of two arrays. The following example shows an -outer addition operation of two 1-d arrays:: - - >>> a = np.array([0.0, 10.0, 20.0, 30.0]) - >>> b = np.array([1.0, 2.0, 3.0]) - >>> a[:, np.newaxis] + b - array([[ 1., 2., 3.], - [ 11., 12., 13.], - [ 21., 22., 23.], - [ 31., 32., 33.]]) - -Here the ``newaxis`` index operator inserts a new axis into ``a``, -making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array -with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. - -See `this article `_ -for illustrations of broadcasting concepts. - -""" diff --git a/numpy-1.6.2/numpy/doc/byteswapping.py b/numpy-1.6.2/numpy/doc/byteswapping.py deleted file mode 100644 index 23e7d7f6ee..0000000000 --- a/numpy-1.6.2/numpy/doc/byteswapping.py +++ /dev/null @@ -1,137 +0,0 @@ -''' - -============================= - Byteswapping and byte order -============================= - -Introduction to byte ordering and ndarrays -========================================== - -The ``ndarray`` is an object that provide a python array interface to data -in memory. - -It often happens that the memory that you want to view with an array is -not of the same byte ordering as the computer on which you are running -Python. - -For example, I might be working on a computer with a little-endian CPU - -such as an Intel Pentium, but I have loaded some data from a file -written by a computer that is big-endian. Let's say I have loaded 4 -bytes from a file written by a Sun (big-endian) computer. I know that -these 4 bytes represent two 16-bit integers. On a big-endian machine, a -two-byte integer is stored with the Most Significant Byte (MSB) first, -and then the Least Significant Byte (LSB). Thus the bytes are, in memory order: - -#. MSB integer 1 -#. LSB integer 1 -#. MSB integer 2 -#. LSB integer 2 - -Let's say the two integers were in fact 1 and 770. Because 770 = 256 * -3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2. -The bytes I have loaded from the file would have these contents: - ->>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2) ->>> big_end_str -'\\x00\\x01\\x03\\x02' - -We might want to use an ``ndarray`` to access these integers. In that -case, we can create an array around this memory, and tell numpy that -there are two integers, and that they are 16 bit and big-endian: - ->>> import numpy as np ->>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str) ->>> big_end_arr[0] -1 ->>> big_end_arr[1] -770 - -Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' -(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For -example, if our data represented a single unsigned 4-byte little-endian -integer, the dtype string would be ``>> little_end_u4 = np.ndarray(shape=(1,),dtype='>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3 -True - -Returning to our ``big_end_arr`` - in this case our underlying data is -big-endian (data endianness) and we've set the dtype to match (the dtype -is also big-endian). However, sometimes you need to flip these around. - -Changing byte ordering -====================== - -As you can imagine from the introduction, there are two ways you can -affect the relationship between the byte ordering of the array and the -underlying memory it is looking at: - -* Change the byte-ordering information in the array dtype so that it - interprets the undelying data as being in a different byte order. - This is the role of ``arr.newbyteorder()`` -* Change the byte-ordering of the underlying data, leaving the dtype - interpretation as it was. This is what ``arr.byteswap()`` does. - -The common situations in which you need to change byte ordering are: - -#. Your data and dtype endianess don't match, and you want to change - the dtype so that it matches the data. -#. Your data and dtype endianess don't match, and you want to swap the - data so that they match the dtype -#. Your data and dtype endianess match, but you want the data swapped - and the dtype to reflect this - -Data and dtype endianness don't match, change dtype to match data ------------------------------------------------------------------ - -We make something where they don't match: - ->>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='>> wrong_end_dtype_arr[0] -256 - -The obvious fix for this situation is to change the dtype so it gives -the correct endianness: - ->>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder() ->>> fixed_end_dtype_arr[0] -1 - -Note the the array has not changed in memory: - ->>> fixed_end_dtype_arr.tostring() == big_end_str -True - -Data and type endianness don't match, change data to match dtype ----------------------------------------------------------------- - -You might want to do this if you need the data in memory to be a certain -ordering. For example you might be writing the memory out to a file -that needs a certain byte ordering. - ->>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() ->>> fixed_end_mem_arr[0] -1 - -Now the array *has* changed in memory: - ->>> fixed_end_mem_arr.tostring() == big_end_str -False - -Data and dtype endianness match, swap data and dtype ----------------------------------------------------- - -You may have a correctly specified array dtype, but you need the array -to have the opposite byte order in memory, and you want the dtype to -match so the array values make sense. In this case you just do both of -the previous operations: - ->>> swapped_end_arr = big_end_arr.byteswap().newbyteorder() ->>> swapped_end_arr[0] -1 ->>> swapped_end_arr.tostring() == big_end_str -False - -''' diff --git a/numpy-1.6.2/numpy/doc/constants.py b/numpy-1.6.2/numpy/doc/constants.py deleted file mode 100644 index 722147dd8a..0000000000 --- a/numpy-1.6.2/numpy/doc/constants.py +++ /dev/null @@ -1,391 +0,0 @@ -""" -========= -Constants -========= - -Numpy includes several constants: - -%(constant_list)s -""" -# -# Note: the docstring is autogenerated. -# -import textwrap, re - -# Maintain same format as in numpy.add_newdocs -constants = [] -def add_newdoc(module, name, doc): - constants.append((name, doc)) - -add_newdoc('numpy', 'Inf', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'Infinity', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'NAN', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - `NaN` and `NAN` are equivalent definitions of `nan`. Please use - `nan` instead of `NAN`. - - See Also - -------- - nan - - """) - -add_newdoc('numpy', 'NINF', - """ - IEEE 754 floating point representation of negative infinity. - - Returns - ------- - y : float - A floating point representation of negative infinity. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity - - isposinf : Shows which elements are positive infinity - - isneginf : Shows which elements are negative infinity - - isnan : Shows which elements are Not a Number - - isfinite : Shows which elements are finite (not one of Not a Number, - positive infinity and negative infinity) - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - Examples - -------- - >>> np.NINF - -inf - >>> np.log(0) - -inf - - """) - -add_newdoc('numpy', 'NZERO', - """ - IEEE 754 floating point representation of negative zero. - - Returns - ------- - y : float - A floating point representation of negative zero. - - See Also - -------- - PZERO : Defines positive zero. - - isinf : Shows which elements are positive or negative infinity. - - isposinf : Shows which elements are positive infinity. - - isneginf : Shows which elements are negative infinity. - - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite - not one of - Not a Number, positive infinity and negative infinity. - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). Negative zero is considered to be a finite number. - - Examples - -------- - >>> np.NZERO - -0.0 - >>> np.PZERO - 0.0 - - >>> np.isfinite([np.NZERO]) - array([ True], dtype=bool) - >>> np.isnan([np.NZERO]) - array([False], dtype=bool) - >>> np.isinf([np.NZERO]) - array([False], dtype=bool) - - """) - -add_newdoc('numpy', 'NaN', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - `NaN` and `NAN` are equivalent definitions of `nan`. Please use - `nan` instead of `NaN`. - - See Also - -------- - nan - - """) - -add_newdoc('numpy', 'PINF', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'PZERO', - """ - IEEE 754 floating point representation of positive zero. - - Returns - ------- - y : float - A floating point representation of positive zero. - - See Also - -------- - NZERO : Defines negative zero. - - isinf : Shows which elements are positive or negative infinity. - - isposinf : Shows which elements are positive infinity. - - isneginf : Shows which elements are negative infinity. - - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite - not one of - Not a Number, positive infinity and negative infinity. - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). Positive zero is considered to be a finite number. - - Examples - -------- - >>> np.PZERO - 0.0 - >>> np.NZERO - -0.0 - - >>> np.isfinite([np.PZERO]) - array([ True], dtype=bool) - >>> np.isnan([np.PZERO]) - array([False], dtype=bool) - >>> np.isinf([np.PZERO]) - array([False], dtype=bool) - - """) - -add_newdoc('numpy', 'e', - """ - Euler's constant, base of natural logarithms, Napier's constant. - - ``e = 2.71828182845904523536028747135266249775724709369995...`` - - See Also - -------- - exp : Exponential function - log : Natural logarithm - - References - ---------- - .. [1] http://en.wikipedia.org/wiki/Napier_constant - - """) - -add_newdoc('numpy', 'inf', - """ - IEEE 754 floating point representation of (positive) infinity. - - Returns - ------- - y : float - A floating point representation of positive infinity. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity - - isposinf : Shows which elements are positive infinity - - isneginf : Shows which elements are negative infinity - - isnan : Shows which elements are Not a Number - - isfinite : Shows which elements are finite (not one of Not a Number, - positive infinity and negative infinity) - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. - - Examples - -------- - >>> np.inf - inf - >>> np.array([1]) / 0. - array([ Inf]) - - """) - -add_newdoc('numpy', 'infty', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'nan', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - Returns - ------- - y : A floating point representation of Not a Number. - - See Also - -------- - isnan : Shows which elements are Not a Number. - isfinite : Shows which elements are finite (not one of - Not a Number, positive infinity and negative infinity) - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - `NaN` and `NAN` are aliases of `nan`. - - Examples - -------- - >>> np.nan - nan - >>> np.log(-1) - nan - >>> np.log([-1, 1, 2]) - array([ NaN, 0. , 0.69314718]) - - """) - -add_newdoc('numpy', 'newaxis', - """ - A convenient alias for None, useful for indexing arrays. - - See Also - -------- - `numpy.doc.indexing` - - Examples - -------- - >>> newaxis is None - True - >>> x = np.arange(3) - >>> x - array([0, 1, 2]) - >>> x[:, newaxis] - array([[0], - [1], - [2]]) - >>> x[:, newaxis, newaxis] - array([[[0]], - [[1]], - [[2]]]) - >>> x[:, newaxis] * x - array([[0, 0, 0], - [0, 1, 2], - [0, 2, 4]]) - - Outer product, same as ``outer(x, y)``: - - >>> y = np.arange(3, 6) - >>> x[:, newaxis] * y - array([[ 0, 0, 0], - [ 3, 4, 5], - [ 6, 8, 10]]) - - ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: - - >>> x[newaxis, :].shape - (1, 3) - >>> x[newaxis].shape - (1, 3) - >>> x[None].shape - (1, 3) - >>> x[:, newaxis].shape - (3, 1) - - """) - -if __doc__: - constants_str = [] - constants.sort() - for name, doc in constants: - s = textwrap.dedent(doc).replace("\n", "\n ") - - # Replace sections by rubrics - lines = s.split("\n") - new_lines = [] - for line in lines: - m = re.match(r'^(\s+)[-=]+\s*$', line) - if m and new_lines: - prev = textwrap.dedent(new_lines.pop()) - new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) - new_lines.append('') - else: - new_lines.append(line) - s = "\n".join(new_lines) - - # Done. - constants_str.append(""".. const:: %s\n %s""" % (name, s)) - constants_str = "\n".join(constants_str) - - __doc__ = __doc__ % dict(constant_list=constants_str) - del constants_str, name, doc - del line, lines, new_lines, m, s, prev - -del constants, add_newdoc diff --git a/numpy-1.6.2/numpy/doc/creation.py b/numpy-1.6.2/numpy/doc/creation.py deleted file mode 100644 index 9a204e252a..0000000000 --- a/numpy-1.6.2/numpy/doc/creation.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -============== -Array Creation -============== - -Introduction -============ - -There are 5 general mechanisms for creating arrays: - -1) Conversion from other Python structures (e.g., lists, tuples) -2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros, - etc.) -3) Reading arrays from disk, either from standard or custom formats -4) Creating arrays from raw bytes through the use of strings or buffers -5) Use of special library functions (e.g., random) - -This section will not cover means of replicating, joining, or otherwise -expanding or mutating existing arrays. Nor will it cover creating object -arrays or record arrays. Both of those are covered in their own sections. - -Converting Python array_like Objects to Numpy Arrays -==================================================== - -In general, numerical data arranged in an array-like structure in Python can -be converted to arrays through the use of the array() function. The most -obvious examples are lists and tuples. See the documentation for array() for -details for its use. Some objects may support the array-protocol and allow -conversion to arrays this way. A simple way to find out if the object can be -converted to a numpy array using array() is simply to try it interactively and -see if it works! (The Python Way). - -Examples: :: - - >>> x = np.array([2,3,1,0]) - >>> x = np.array([2, 3, 1, 0]) - >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, - and types - >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) - -Intrinsic Numpy Array Creation -============================== - -Numpy has built-in functions for creating arrays from scratch: - -zeros(shape) will create an array filled with 0 values with the specified -shape. The default dtype is float64. - -``>>> np.zeros((2, 3)) -array([[ 0., 0., 0.], [ 0., 0., 0.]])`` - -ones(shape) will create an array filled with 1 values. It is identical to -zeros in all other respects. - -arange() will create arrays with regularly incrementing values. Check the -docstring for complete information on the various ways it can be used. A few -examples will be given here: :: - - >>> np.arange(10) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=np.float) - array([ 2., 3., 4., 5., 6., 7., 8., 9.]) - >>> np.arange(2, 3, 0.1) - array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) - -Note that there are some subtleties regarding the last usage that the user -should be aware of that are described in the arange docstring. - -linspace() will create arrays with a specified number of elements, and -spaced equally between the specified beginning and end values. For -example: :: - - >>> np.linspace(1., 4., 6) - array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ]) - -The advantage of this creation function is that one can guarantee the -number of elements and the starting and end point, which arange() -generally will not do for arbitrary start, stop, and step values. - -indices() will create a set of arrays (stacked as a one-higher dimensioned -array), one per dimension with each representing variation in that dimension. -An example illustrates much better than a verbal description: :: - - >>> np.indices((3,3)) - array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) - -This is particularly useful for evaluating functions of multiple dimensions on -a regular grid. - -Reading Arrays From Disk -======================== - -This is presumably the most common case of large array creation. The details, -of course, depend greatly on the format of data on disk and so this section -can only give general pointers on how to handle various formats. - -Standard Binary Formats ------------------------ - -Various fields have standard formats for array data. The following lists the -ones with known python libraries to read them and return numpy arrays (there -may be others for which it is possible to read and convert to numpy arrays so -check the last section as well) -:: - - HDF5: PyTables - FITS: PyFITS - -Examples of formats that cannot be read directly but for which it is not hard -to convert are libraries like PIL (able to read and write many image formats -such as jpg, png, etc). - -Common ASCII Formats ------------------------- - -Comma Separated Value files (CSV) are widely used (and an export and import -option for programs like Excel). There are a number of ways of reading these -files in Python. There are CSV functions in Python and functions in pylab -(part of matplotlib). - -More generic ascii files can be read using the io package in scipy. - -Custom Binary Formats ---------------------- - -There are a variety of approaches one can use. If the file has a relatively -simple format then one can write a simple I/O library and use the numpy -fromfile() function and .tofile() method to read and write numpy arrays -directly (mind your byteorder though!) If a good C or C++ library exists that -read the data, one can wrap that library with a variety of techniques though -that certainly is much more work and requires significantly more advanced -knowledge to interface with C or C++. - -Use of Special Libraries ------------------------- - -There are libraries that can be used to generate arrays for special purposes -and it isn't possible to enumerate all of them. The most common uses are use -of the many array generation functions in random that can generate arrays of -random values, and some utility functions to generate special matrices (e.g. -diagonal). - -""" diff --git a/numpy-1.6.2/numpy/doc/glossary.py b/numpy-1.6.2/numpy/doc/glossary.py deleted file mode 100644 index dc7c75a0a3..0000000000 --- a/numpy-1.6.2/numpy/doc/glossary.py +++ /dev/null @@ -1,415 +0,0 @@ -""" -======== -Glossary -======== - -along an axis - Axes are defined for arrays with more than one dimension. A - 2-dimensional array has two corresponding axes: the first running - vertically downwards across rows (axis 0), and the second running - horizontally across columns (axis 1). - - Many operation can take place along one of these axes. For example, - we can sum each row of an array, in which case we operate along - columns, or axis 1:: - - >>> x = np.arange(12).reshape((3,4)) - - >>> x - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - - >>> x.sum(axis=1) - array([ 6, 22, 38]) - -array - A homogeneous container of numerical elements. Each element in the - array occupies a fixed amount of memory (hence homogeneous), and - can be a numerical element of a single type (such as float, int - or complex) or a combination (such as ``(float, int, float)``). Each - array has an associated data-type (or ``dtype``), which describes - the numerical type of its elements:: - - >>> x = np.array([1, 2, 3], float) - - >>> x - array([ 1., 2., 3.]) - - >>> x.dtype # floating point number, 64 bits of memory per element - dtype('float64') - - - # More complicated data type: each array element is a combination of - # and integer and a floating point number - >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) - array([(1, 2.0), (3, 4.0)], - dtype=[('x', '>> x = np.array([1, 2, 3]) - >>> x.shape - (3,) - -BLAS - `Basic Linear Algebra Subprograms `_ - -broadcast - NumPy can do operations on arrays whose shapes are mismatched:: - - >>> x = np.array([1, 2]) - >>> y = np.array([[3], [4]]) - - >>> x - array([1, 2]) - - >>> y - array([[3], - [4]]) - - >>> x + y - array([[4, 5], - [5, 6]]) - - See `doc.broadcasting`_ for more information. - -C order - See `row-major` - -column-major - A way to represent items in a N-dimensional array in the 1-dimensional - computer memory. In column-major order, the leftmost index "varies the - fastest": for example the array:: - - [[1, 2, 3], - [4, 5, 6]] - - is represented in the column-major order as:: - - [1, 4, 2, 5, 3, 6] - - Column-major order is also known as the Fortran order, as the Fortran - programming language uses it. - -decorator - An operator that transforms a function. For example, a ``log`` - decorator may be defined to print debugging information upon - function execution:: - - >>> def log(f): - ... def new_logging_func(*args, **kwargs): - ... print "Logging call with parameters:", args, kwargs - ... return f(*args, **kwargs) - ... - ... return new_logging_func - - Now, when we define a function, we can "decorate" it using ``log``:: - - >>> @log - ... def add(a, b): - ... return a + b - - Calling ``add`` then yields: - - >>> add(1, 2) - Logging call with parameters: (1, 2) {} - 3 - -dictionary - Resembling a language dictionary, which provides a mapping between - words and descriptions thereof, a Python dictionary is a mapping - between two objects:: - - >>> x = {1: 'one', 'two': [1, 2]} - - Here, `x` is a dictionary mapping keys to values, in this case - the integer 1 to the string "one", and the string "two" to - the list ``[1, 2]``. The values may be accessed using their - corresponding keys:: - - >>> x[1] - 'one' - - >>> x['two'] - [1, 2] - - Note that dictionaries are not stored in any specific order. Also, - most mutable (see *immutable* below) objects, such as lists, may not - be used as keys. - - For more information on dictionaries, read the - `Python tutorial `_. - -Fortran order - See `column-major` - -flattened - Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details. - -immutable - An object that cannot be modified after execution is called - immutable. Two common examples are strings and tuples. - -instance - A class definition gives the blueprint for constructing an object:: - - >>> class House(object): - ... wall_colour = 'white' - - Yet, we have to *build* a house before it exists:: - - >>> h = House() # build a house - - Now, ``h`` is called a ``House`` instance. An instance is therefore - a specific realisation of a class. - -iterable - A sequence that allows "walking" (iterating) over items, typically - using a loop such as:: - - >>> x = [1, 2, 3] - >>> [item**2 for item in x] - [1, 4, 9] - - It is often used in combintion with ``enumerate``:: - >>> keys = ['a','b','c'] - >>> for n, k in enumerate(keys): - ... print "Key %d: %s" % (n, k) - ... - Key 0: a - Key 1: b - Key 2: c - -list - A Python container that can hold any number of objects or items. - The items do not have to be of the same type, and can even be - lists themselves:: - - >>> x = [2, 2.0, "two", [2, 2.0]] - - The list `x` contains 4 items, each which can be accessed individually:: - - >>> x[2] # the string 'two' - 'two' - - >>> x[3] # a list, containing an integer 2 and a float 2.0 - [2, 2.0] - - It is also possible to select more than one item at a time, - using *slicing*:: - - >>> x[0:2] # or, equivalently, x[:2] - [2, 2.0] - - In code, arrays are often conveniently expressed as nested lists:: - - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - For more information, read the section on lists in the `Python - tutorial `_. For a mapping - type (key-value), see *dictionary*. - -mask - A boolean array, used to select only certain elements for an operation:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> mask = (x > 2) - >>> mask - array([False, False, False, True, True], dtype=bool) - - >>> x[mask] = -1 - >>> x - array([ 0, 1, 2, -1, -1]) - -masked array - Array that suppressed values indicated by a mask:: - - >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) - >>> x - masked_array(data = [-- 2.0 --], - mask = [ True False True], - fill_value = 1e+20) - - - >>> x + [1, 2, 3] - masked_array(data = [-- 4.0 --], - mask = [ True False True], - fill_value = 1e+20) - - - - Masked arrays are often used when operating on arrays containing - missing or invalid entries. - -matrix - A 2-dimensional ndarray that preserves its two-dimensional nature - throughout operations. It has certain special operations, such as ``*`` - (matrix multiplication) and ``**`` (matrix power), defined:: - - >>> x = np.mat([[1, 2], [3, 4]]) - - >>> x - matrix([[1, 2], - [3, 4]]) - - >>> x**2 - matrix([[ 7, 10], - [15, 22]]) - -method - A function associated with an object. For example, each ndarray has a - method called ``repeat``:: - - >>> x = np.array([1, 2, 3]) - - >>> x.repeat(2) - array([1, 1, 2, 2, 3, 3]) - -ndarray - See *array*. - -reference - If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, - ``a`` and ``b`` are different names for the same Python object. - -row-major - A way to represent items in a N-dimensional array in the 1-dimensional - computer memory. In row-major order, the rightmost index "varies - the fastest": for example the array:: - - [[1, 2, 3], - [4, 5, 6]] - - is represented in the row-major order as:: - - [1, 2, 3, 4, 5, 6] - - Row-major order is also known as the C order, as the C programming - language uses it. New Numpy arrays are by default in row-major order. - -self - Often seen in method signatures, ``self`` refers to the instance - of the associated class. For example: - - >>> class Paintbrush(object): - ... color = 'blue' - ... - ... def paint(self): - ... print "Painting the city %s!" % self.color - ... - >>> p = Paintbrush() - >>> p.color = 'red' - >>> p.paint() # self refers to 'p' - Painting the city red! - -slice - Used to select only certain elements from a sequence:: - - >>> x = range(5) - >>> x - [0, 1, 2, 3, 4] - - >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) - [1, 2] - - >>> x[1:5:2] # slice from 1 to 5, but skipping every second element - [1, 3] - - >>> x[::-1] # slice a sequence in reverse - [4, 3, 2, 1, 0] - - Arrays may have more than one dimension, each which can be sliced - individually:: - - >>> x = np.array([[1, 2], [3, 4]]) - >>> x - array([[1, 2], - [3, 4]]) - - >>> x[:, 1] - array([2, 4]) - -tuple - A sequence that may contain a variable number of types of any - kind. A tuple is immutable, i.e., once constructed it cannot be - changed. Similar to a list, it can be indexed and sliced:: - - >>> x = (1, 'one', [1, 2]) - - >>> x - (1, 'one', [1, 2]) - - >>> x[0] - 1 - - >>> x[:2] - (1, 'one') - - A useful concept is "tuple unpacking", which allows variables to - be assigned to the contents of a tuple:: - - >>> x, y = (1, 2) - >>> x, y = 1, 2 - - This is often used when a function returns multiple values: - - >>> def return_many(): - ... return 1, 'alpha', None - - >>> a, b, c = return_many() - >>> a, b, c - (1, 'alpha', None) - - >>> a - 1 - >>> b - 'alpha' - -ufunc - Universal function. A fast element-wise array operation. Examples include - ``add``, ``sin`` and ``logical_or``. - -view - An array that does not own its data, but refers to another array's - data instead. For example, we may create a view that only shows - every second element of another array:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> y = x[::2] - >>> y - array([0, 2, 4]) - - >>> x[0] = 3 # changing x changes y as well, since y is a view on x - >>> y - array([3, 2, 4]) - -wrapper - Python is a high-level (highly abstracted, or English-like) language. - This abstraction comes at a price in execution speed, and sometimes - it becomes necessary to use lower level languages to do fast - computations. A wrapper is code that provides a bridge between - high and the low level languages, allowing, e.g., Python to execute - code written in C or Fortran. - - Examples include ctypes, SWIG and Cython (which wraps C and C++) - and f2py (which wraps Fortran). - -""" diff --git a/numpy-1.6.2/numpy/doc/howtofind.py b/numpy-1.6.2/numpy/doc/howtofind.py deleted file mode 100644 index 29ad05318e..0000000000 --- a/numpy-1.6.2/numpy/doc/howtofind.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -================= -How to Find Stuff -================= - -How to find things in NumPy. - -""" diff --git a/numpy-1.6.2/numpy/doc/indexing.py b/numpy-1.6.2/numpy/doc/indexing.py deleted file mode 100644 index 99def88892..0000000000 --- a/numpy-1.6.2/numpy/doc/indexing.py +++ /dev/null @@ -1,407 +0,0 @@ -""" -============== -Array indexing -============== - -Array indexing refers to any use of the square brackets ([]) to index -array values. There are many options to indexing, which give numpy -indexing great power, but with power comes some complexity and the -potential for confusion. This section is just an overview of the -various options and issues related to indexing. Aside from single -element indexing, the details on most of these options are to be -found in related sections. - -Assignment vs referencing -========================= - -Most of the following examples show the use of indexing when -referencing data in an array. The examples work just as well -when assigning to an array. See the section at the end for -specific examples and explanations on how assignments work. - -Single element indexing -======================= - -Single element indexing for a 1-D array is what one expects. It work -exactly like that for other standard Python sequences. It is 0-based, -and accepts negative indices for indexing from the end of the array. :: - - >>> x = np.arange(10) - >>> x[2] - 2 - >>> x[-2] - 8 - -Unlike lists and tuples, numpy arrays support multidimensional indexing -for multidimensional arrays. That means that it is not necessary to -separate each dimension's index into its own set of square brackets. :: - - >>> x.shape = (2,5) # now x is 2-dimensional - >>> x[1,3] - 8 - >>> x[1,-1] - 9 - -Note that if one indexes a multidimensional array with fewer indices -than dimensions, one gets a subdimensional array. For example: :: - - >>> x[0] - array([0, 1, 2, 3, 4]) - -That is, each index specified selects the array corresponding to the -rest of the dimensions selected. In the above example, choosing 0 -means that remaining dimension of lenth 5 is being left unspecified, -and that what is returned is an array of that dimensionality and size. -It must be noted that the returned array is not a copy of the original, -but points to the same values in memory as does the original array. -In this case, the 1-D array at the first position (0) is returned. -So using a single index on the returned array, results in a single -element being returned. That is: :: - - >>> x[0][2] - 2 - -So note that ``x[0,2] = x[0][2]`` though the second case is more -inefficient a new temporary array is created after the first index -that is subsequently indexed by 2. - -Note to those used to IDL or Fortran memory order as it relates to -indexing. Numpy uses C-order indexing. That means that the last -index usually represents the most rapidly changing memory location, -unlike Fortran or IDL, where the first index represents the most -rapidly changing location in memory. This difference represents a -great potential for confusion. - -Other indexing options -====================== - -It is possible to slice and stride arrays to extract arrays of the -same number of dimensions, but of different sizes than the original. -The slicing and striding works exactly the same way it does for lists -and tuples except that they can be applied to multiple dimensions as -well. A few examples illustrates best: :: - - >>> x = np.arange(10) - >>> x[2:5] - array([2, 3, 4]) - >>> x[:-7] - array([0, 1, 2]) - >>> x[1:7:2] - array([1, 3, 5]) - >>> y = np.arange(35).reshape(5,7) - >>> y[1:5:2,::3] - array([[ 7, 10, 13], - [21, 24, 27]]) - -Note that slices of arrays do not copy the internal array data but -also produce new views of the original data. - -It is possible to index arrays with other arrays for the purposes of -selecting lists of values out of arrays into new arrays. There are -two different ways of accomplishing this. One uses one or more arrays -of index values. The other involves giving a boolean array of the proper -shape to indicate the values to be selected. Index arrays are a very -powerful tool that allow one to avoid looping over individual elements in -arrays and thus greatly improve performance. - -It is possible to use special features to effectively increase the -number of dimensions in an array through indexing so the resulting -array aquires the shape needed for use in an expression or with a -specific function. - -Index arrays -============ - -Numpy arrays may be indexed with other arrays (or any other sequence- -like object that can be converted to an array, such as lists, with the -exception of tuples; see the end of this document for why this is). The -use of index arrays ranges from simple, straightforward cases to -complex, hard-to-understand cases. For all cases of index arrays, what -is returned is a copy of the original data, not a view as one gets for -slices. - -Index arrays must be of integer type. Each value in the array indicates -which value in the array to use in place of the index. To illustrate: :: - - >>> x = np.arange(10,1,-1) - >>> x - array([10, 9, 8, 7, 6, 5, 4, 3, 2]) - >>> x[np.array([3, 3, 1, 8])] - array([7, 7, 9, 2]) - - -The index array consisting of the values 3, 3, 1 and 8 correspondingly -create an array of length 4 (same as the index array) where each index -is replaced by the value the index array has in the array being indexed. - -Negative values are permitted and work as they do with single indices -or slices: :: - - >>> x[np.array([3,3,-3,8])] - array([7, 7, 4, 2]) - -It is an error to have index values out of bounds: :: - - >>> x[np.array([3, 3, 20, 8])] - : index 20 out of bounds 0<=index<9 - -Generally speaking, what is returned when index arrays are used is -an array with the same shape as the index array, but with the type -and values of the array being indexed. As an example, we can use a -multidimensional index array instead: :: - - >>> x[np.array([[1,1],[2,3]])] - array([[9, 9], - [8, 7]]) - -Indexing Multi-dimensional arrays -================================= - -Things become more complex when multidimensional arrays are indexed, -particularly with multidimensional index arrays. These tend to be -more unusal uses, but theyare permitted, and they are useful for some -problems. We'll start with thesimplest multidimensional case (using -the array y from the previous examples): :: - - >>> y[np.array([0,2,4]), np.array([0,1,2])] - array([ 0, 15, 30]) - -In this case, if the index arrays have a matching shape, and there is -an index array for each dimension of the array being indexed, the -resultant array has the same shape as the index arrays, and the values -correspond to the index set for each position in the index arrays. In -this example, the first index value is 0 for both index arrays, and -thus the first value of the resultant array is y[0,0]. The next value -is y[2,1], and the last is y[4,2]. - -If the index arrays do not have the same shape, there is an attempt to -broadcast them to the same shape. If they cannot be broadcast to the -same shape, an exception is raised: :: - - >>> y[np.array([0,2,4]), np.array([0,1])] - : shape mismatch: objects cannot be - broadcast to a single shape - -The broadcasting mechanism permits index arrays to be combined with -scalars for other indices. The effect is that the scalar value is used -for all the corresponding values of the index arrays: :: - - >>> y[np.array([0,2,4]), 1] - array([ 1, 15, 29]) - -Jumping to the next level of complexity, it is possible to only -partially index an array with index arrays. It takes a bit of thought -to understand what happens in such cases. For example if we just use -one index array with y: :: - - >>> y[np.array([0,2,4])] - array([[ 0, 1, 2, 3, 4, 5, 6], - [14, 15, 16, 17, 18, 19, 20], - [28, 29, 30, 31, 32, 33, 34]]) - -What results is the construction of a new array where each value of -the index array selects one row from the array being indexed and the -resultant array has the resulting shape (size of row, number index -elements). - -An example of where this may be useful is for a color lookup table -where we want to map the values of an image into RGB triples for -display. The lookup table could have a shape (nlookup, 3). Indexing -such an array with an image with shape (ny, nx) with dtype=np.uint8 -(or any integer type so long as values are with the bounds of the -lookup table) will result in an array of shape (ny, nx, 3) where a -triple of RGB values is associated with each pixel location. - -In general, the shape of the resulant array will be the concatenation -of the shape of the index array (or the shape that all the index arrays -were broadcast to) with the shape of any unused dimensions (those not -indexed) in the array being indexed. - -Boolean or "mask" index arrays -============================== - -Boolean arrays used as indices are treated in a different manner -entirely than index arrays. Boolean arrays must be of the same shape -as the array being indexed, or broadcastable to the same shape. In the -most straightforward case, the boolean array has the same shape: :: - - >>> b = y>20 - >>> y[b] - array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) - -The result is a 1-D array containing all the elements in the indexed -array corresponding to all the true elements in the boolean array. As -with index arrays, what is returned is a copy of the data, not a view -as one gets with slices. - -With broadcasting, multidimensional arrays may be the result. For -example: :: - - >>> b[:,5] # use a 1-D boolean that broadcasts with y - array([False, False, False, True, True], dtype=bool) - >>> y[b[:,5]] - array([[21, 22, 23, 24, 25, 26, 27], - [28, 29, 30, 31, 32, 33, 34]]) - -Here the 4th and 5th rows are selected from the indexed array and -combined to make a 2-D array. - -Combining index arrays with slices -================================== - -Index arrays may be combined with slices. For example: :: - - >>> y[np.array([0,2,4]),1:3] - array([[ 1, 2], - [15, 16], - [29, 30]]) - -In effect, the slice is converted to an index array -np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array -to produce a resultant array of shape (3,2). - -Likewise, slicing can be combined with broadcasted boolean indices: :: - - >>> y[b[:,5],1:3] - array([[22, 23], - [29, 30]]) - -Structural indexing tools -========================= - -To facilitate easy matching of array shapes with expressions and in -assignments, the np.newaxis object can be used within array indices -to add new dimensions with a size of 1. For example: :: - - >>> y.shape - (5, 7) - >>> y[:,np.newaxis,:].shape - (5, 1, 7) - -Note that there are no new elements in the array, just that the -dimensionality is increased. This can be handy to combine two -arrays in a way that otherwise would require explicitly reshaping -operations. For example: :: - - >>> x = np.arange(5) - >>> x[:,np.newaxis] + x[np.newaxis,:] - array([[0, 1, 2, 3, 4], - [1, 2, 3, 4, 5], - [2, 3, 4, 5, 6], - [3, 4, 5, 6, 7], - [4, 5, 6, 7, 8]]) - -The ellipsis syntax maybe used to indicate selecting in full any -remaining unspecified dimensions. For example: :: - - >>> z = np.arange(81).reshape(3,3,3,3) - >>> z[1,...,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -This is equivalent to: :: - - >>> z[1,:,:,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -Assigning values to indexed arrays -================================== - -As mentioned, one can select a subset of an array to assign to using -a single index, slices, and index and mask arrays. The value being -assigned to the indexed array must be shape consistent (the same shape -or broadcastable to the shape the index produces). For example, it is -permitted to assign a constant to a slice: :: - - >>> x = np.arange(10) - >>> x[2:7] = 1 - -or an array of the right size: :: - - >>> x[2:7] = np.arange(5) - -Note that assignments may result in changes if assigning -higher types to lower types (like floats to ints) or even -exceptions (assigning complex to floats or ints): :: - - >>> x[1] = 1.2 - >>> x[1] - 1 - >>> x[1] = 1.2j - : can't convert complex to long; use - long(abs(z)) - - -Unlike some of the references (such as array and mask indices) -assignments are always made to the original data in the array -(indeed, nothing else would make sense!). Note though, that some -actions may not work as one may naively expect. This particular -example is often surprising to people: :: - - >>> x = np.arange(0, 50, 10) - >>> x - array([ 0, 10, 20, 30, 40]) - >>> x[np.array([1, 1, 3, 1])] += 1 - >>> x - array([ 0, 11, 20, 31, 40]) - -Where people expect that the 1st location will be incremented by 3. -In fact, it will only be incremented by 1. The reason is because -a new array is extracted from the original (as a temporary) containing -the values at 1, 1, 3, 1, then the value 1 is added to the temporary, -and then the temporary is assigned back to the original array. Thus -the value of the array at x[1]+1 is assigned to x[1] three times, -rather than being incremented 3 times. - -Dealing with variable numbers of indices within programs -======================================================== - -The index syntax is very powerful but limiting when dealing with -a variable number of indices. For example, if you want to write -a function that can handle arguments with various numbers of -dimensions without having to write special case code for each -number of possible dimensions, how can that be done? If one -supplies to the index a tuple, the tuple will be interpreted -as a list of indices. For example (using the previous definition -for the array z): :: - - >>> indices = (1,1,1,1) - >>> z[indices] - 40 - -So one can use code to construct tuples of any number of indices -and then use these within an index. - -Slices can be specified within programs by using the slice() function -in Python. For example: :: - - >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] - >>> z[indices] - array([39, 40]) - -Likewise, ellipsis can be specified by code by using the Ellipsis -object: :: - - >>> indices = (1, Ellipsis, 1) # same as [1,...,1] - >>> z[indices] - array([[28, 31, 34], - [37, 40, 43], - [46, 49, 52]]) - -For this reason it is possible to use the output from the np.where() -function directly as an index since it always returns a tuple of index -arrays. - -Because the special treatment of tuples, they are not automatically -converted to an array as a list would be. As an example: :: - - >>> z[[1,1,1,1]] # produces a large array - array([[[[27, 28, 29], - [30, 31, 32], ... - >>> z[(1,1,1,1)] # returns a single value - 40 - -""" diff --git a/numpy-1.6.2/numpy/doc/internals.py b/numpy-1.6.2/numpy/doc/internals.py deleted file mode 100644 index a744293683..0000000000 --- a/numpy-1.6.2/numpy/doc/internals.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -=============== -Array Internals -=============== - -Internal organization of numpy arrays -===================================== - -It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to Numpy". - -Numpy arrays consist of two major components, the raw array data (from now on, -referred to as the data buffer), and the information about the raw array data. -The data buffer is typically what people think of as arrays in C or Fortran, -a contiguous (and fixed) block of memory containing fixed sized data items. -Numpy also contains a significant set of data that describes how to interpret -the data in the data buffer. This extra information contains (among other things): - - 1) The basic data element's size in bytes - 2) The start of the data within the data buffer (an offset relative to the - beginning of the data buffer). - 3) The number of dimensions and the size of each dimension - 4) The separation between elements for each dimension (the 'stride'). This - does not have to be a multiple of the element size - 5) The byte order of the data (which may not be the native byte order) - 6) Whether the buffer is read-only - 7) Information (via the dtype object) about the interpretation of the basic - data element. The basic data element may be as simple as a int or a float, - or it may be a compound object (e.g., struct-like), a fixed character field, - or Python object pointers. - 8) Whether the array is to interpreted as C-order or Fortran-order. - -This arrangement allow for very flexible use of arrays. One thing that it allows -is simple changes of the metadata to change the interpretation of the array buffer. -Changing the byteorder of the array is a simple change involving no rearrangement -of the data. The shape of the array can be changed very easily without changing -anything in the data buffer or any data copying at all - -Among other things that are made possible is one can create a new array metadata -object that uses the same data buffer -to create a new view of that data buffer that has a different interpretation -of the buffer (e.g., different shape, offset, byte order, strides, etc) but -shares the same data bytes. Many operations in numpy do just this such as -slices. Other operations, such as transpose, don't move data elements -around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. - -Typically these new versions of the array metadata but the same data buffer are -new 'views' into the data buffer. There is a different ndarray object, but it -uses the same data buffer. This is why it is necessary to force copies through -use of the .copy() method if one really wants to make a new and independent -copy of the data buffer. - -New views into arrays mean the the object reference counts for the data buffer -increase. Simply doing away with the original array object will not remove the -data buffer if other views of it still exist. - -Multidimensional Array Indexing Order Issues -============================================ - -What is the right way to index -multi-dimensional arrays? Before you jump to conclusions about the one and -true way to index multi-dimensional arrays, it pays to understand why this is -a confusing issue. This section will try to explain in detail how numpy -indexing works and why we adopt the convention we do for images, and when it -may be appropriate to adopt other conventions. - -The first thing to understand is -that there are two conflicting conventions for indexing 2-dimensional arrays. -Matrix notation uses the first index to indicate which row is being selected and -the second index to indicate which column is selected. This is opposite the -geometrically oriented-convention for images where people generally think the -first index represents x position (i.e., column) and the second represents y -position (i.e., row). This alone is the source of much confusion; -matrix-oriented users and image-oriented users expect two different things with -regard to indexing. - -The second issue to understand is how indices correspond -to the order the array is stored in memory. In Fortran the first index is the -most rapidly varying index when moving through the elements of a two -dimensional array as it is stored in memory. If you adopt the matrix -convention for indexing, then this means the matrix is stored one column at a -time (since the first index moves to the next row as it changes). Thus Fortran -is considered a Column-major language. C has just the opposite convention. In -C, the last index changes most rapidly as one moves through the array as -stored in memory. Thus C is a Row-major language. The matrix is stored by -rows. Note that in both cases it presumes that the matrix convention for -indexing is being used, i.e., for both Fortran and C, the first index is the -row. Note this convention implies that the indexing convention is invariant -and that the data order changes to keep that so. - -But that's not the only way -to look at it. Suppose one has large two-dimensional arrays (images or -matrices) stored in data files. Suppose the data are stored by rows rather than -by columns. If we are to preserve our index convention (whether matrix or -image) that means that depending on the language we use, we may be forced to -reorder the data if it is read into memory to preserve our indexing -convention. For example if we read row-ordered data into memory without -reordering, it will match the matrix indexing convention for C, but not for -Fortran. Conversely, it will match the image indexing convention for Fortran, -but not for C. For C, if one is using data stored in row order, and one wants -to preserve the image index convention, the data must be reordered when -reading into memory. - -In the end, which you do for Fortran or C depends on -which is more important, not reordering data or preserving the indexing -convention. For large images, reordering data is potentially expensive, and -often the indexing convention is inverted to avoid that. - -The situation with -numpy makes this issue yet more complicated. The internal machinery of numpy -arrays is flexible enough to accept any ordering of indices. One can simply -reorder indices by manipulating the internal stride information for arrays -without reordering the data at all. Numpy will know how to map the new index -order to the data without moving the data. - -So if this is true, why not choose -the index order that matches what you most expect? In particular, why not define -row-ordered images to use the image convention? (This is sometimes referred -to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' -order options for array ordering in numpy.) The drawback of doing this is -potential performance penalties. It's common to access the data sequentially, -either implicitly in array operations or explicitly by looping over rows of an -image. When that is done, then the data will be accessed in non-optimal order. -As the first index is incremented, what is actually happening is that elements -spaced far apart in memory are being sequentially accessed, with usually poor -memory access speeds. For example, for a two dimensional image 'im' defined so -that im[0, 10] represents the value at x=0, y=10. To be consistent with usual -Python behavior then im[0] would represent a column at x=0. Yet that data -would be spread over the whole array since the data are stored in row order. -Despite the flexibility of numpy's indexing, it can't really paper over the fact -basic operations are rendered inefficient because of data order or that getting -contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs -im[0]), thus one can't use an idiom such as for row in im; for col in im does -work, but doesn't yield contiguous column data. - -As it turns out, numpy is -smart enough when dealing with ufuncs to determine which index is the most -rapidly varying one in memory and uses that for the innermost loop. Thus for -ufuncs there is no large intrinsic advantage to either approach in most cases. -On the other hand, use of .flat with an FORTRAN ordered array will lead to -non-optimal memory access as adjacent elements in the flattened array (iterator, -actually) are not contiguous in memory. - -Indeed, the fact is that Python -indexing on lists and other sequences naturally leads to an outside-to inside -ordering (the first index gets the largest grouping, the next the next largest, -and the last gets the smallest element). Since image data are normally stored -by rows, this corresponds to position within rows being the last item indexed. - -If you do want to use Fortran ordering realize that -there are two approaches to consider: 1) accept that the first index is just not -the most rapidly changing in memory and have all your I/O routines reorder -your data when going from memory to disk or visa versa, or use numpy's -mechanism for mapping the first index to the most rapidly varying data. We -recommend the former if possible. The disadvantage of the latter is that many -of numpy's functions will yield arrays without Fortran ordering unless you are -careful to use the 'order' keyword. Doing this would be highly inconvenient. - -Otherwise we recommend simply learning to reverse the usual order of indices -when accessing elements of an array. Granted, it goes against the grain, but -it is more in line with Python semantics and the natural order of the data. - -""" diff --git a/numpy-1.6.2/numpy/doc/io.py b/numpy-1.6.2/numpy/doc/io.py deleted file mode 100644 index 3cde40bd0b..0000000000 --- a/numpy-1.6.2/numpy/doc/io.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -========= -Array I/O -========= - -Placeholder for array I/O documentation. - -""" diff --git a/numpy-1.6.2/numpy/doc/jargon.py b/numpy-1.6.2/numpy/doc/jargon.py deleted file mode 100644 index e13ff5686a..0000000000 --- a/numpy-1.6.2/numpy/doc/jargon.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -====== -Jargon -====== - -Placeholder for computer science, engineering and other jargon. - -""" diff --git a/numpy-1.6.2/numpy/doc/methods_vs_functions.py b/numpy-1.6.2/numpy/doc/methods_vs_functions.py deleted file mode 100644 index 22eadccf72..0000000000 --- a/numpy-1.6.2/numpy/doc/methods_vs_functions.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -===================== -Methods vs. Functions -===================== - -Placeholder for Methods vs. Functions documentation. - -""" diff --git a/numpy-1.6.2/numpy/doc/misc.py b/numpy-1.6.2/numpy/doc/misc.py deleted file mode 100644 index 8fa3f8a31a..0000000000 --- a/numpy-1.6.2/numpy/doc/misc.py +++ /dev/null @@ -1,228 +0,0 @@ -""" -============= -Miscellaneous -============= - -IEEE 754 Floating Point Special Values: ------------------------------------------------ - -Special values defined in numpy: nan, inf, - -NaNs can be used as a poor-man's mask (if you don't care what the -original value was) - -Note: cannot use equality to test NaNs. E.g.: :: - - >>> myarr = np.array([1., 0., np.nan, 3.]) - >>> np.where(myarr == np.nan) - >>> np.nan == np.nan # is always False! Use special numpy functions instead. - False - >>> myarr[myarr == np.nan] = 0. # doesn't work - >>> myarr - array([ 1., 0., NaN, 3.]) - >>> myarr[np.isnan(myarr)] = 0. # use this instead find - >>> myarr - array([ 1., 0., 0., 3.]) - -Other related special value functions: :: - - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float - -The following corresponds to the usual functions except that nans are excluded -from the results: :: - - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() - - >>> x = np.arange(10.) - >>> x[3] = np.nan - >>> x.sum() - nan - >>> np.nansum(x) - 42.0 - -How numpy handles numerical exceptions: ------------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples: ------------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print "saw stupid error!" - >>> np.seterrcall(errorhandler) - - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - FloatingPointError: invalid value encountered in divide - saw stupid error! - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C: ------------------ -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - - - API will change for Python 3.0! - -2) pyrex - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in psuedo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - become pretty popular within Python community - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - - Maintainers not easily adaptable to new features - -Thus: - -3) cython - fork of pyrex to allow needed features for SAGE - - - being considered as the standard scipy/numpy wrapping tool - - fast indexing support for arrays - -4) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing sharable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data a.ctypes.get_strides - a.ctypes.data_as a.ctypes.shape - a.ctypes.get_as_parameter a.ctypes.shape_as - a.ctypes.get_data a.ctypes.strides - a.ctypes.get_shape a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -5) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -7) Weave - - - Plusses: - - - Phenomenal tool - - can turn many numpy expressions into C code - - dynamic compiling and loading of generated C code - - can embed pure C code in Python module and have weave extract, generate - interfaces and compile, etc. - - - Minuses: - - - Future uncertain--lacks a champion - -8) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -Fortran: Clear choice is f2py. (Pyfort is an older alternative, but not -supported any longer) - -Interfacing to C++: -------------------- - 1) CXX - 2) Boost.python - 3) SWIG - 4) Sage has used cython to wrap C++ (not pretty, but it can be done) - 5) SIP (used mainly in PyQT) - -""" diff --git a/numpy-1.6.2/numpy/doc/performance.py b/numpy-1.6.2/numpy/doc/performance.py deleted file mode 100644 index 1429e232ff..0000000000 --- a/numpy-1.6.2/numpy/doc/performance.py +++ /dev/null @@ -1,9 +0,0 @@ -""" - -=========== -Performance -=========== - -Placeholder for Improving Performance documentation. - -""" diff --git a/numpy-1.6.2/numpy/doc/structured_arrays.py b/numpy-1.6.2/numpy/doc/structured_arrays.py deleted file mode 100644 index af777efa49..0000000000 --- a/numpy-1.6.2/numpy/doc/structured_arrays.py +++ /dev/null @@ -1,223 +0,0 @@ -""" -===================================== -Structured Arrays (and Record Arrays) -===================================== - -Introduction -============ - -Numpy provides powerful capabilities to create arrays of structs or records. -These arrays permit one to manipulate the data by the structs or by fields of -the struct. A simple example will show what is meant.: :: - - >>> x = np.zeros((2,),dtype=('i4,f4,a10')) - >>> x[:] = [(1,2.,'Hello'),(2,3.,"World")] - >>> x - array([(1, 2.0, 'Hello'), (2, 3.0, 'World')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - -Here we have created a one-dimensional array of length 2. Each element of -this array is a record that contains three items, a 32-bit integer, a 32-bit -float, and a string of length 10 or less. If we index this array at the second -position we get the second record: :: - - >>> x[1] - (2,3.,"World") - -Conveniently, one can access any field of the array by indexing using the -string that names that field. In this case the fields have received the -default names 'f0', 'f1' and 'f2'. :: - - >>> y = x['f1'] - >>> y - array([ 2., 3.], dtype=float32) - >>> y[:] = 2*y - >>> y - array([ 4., 6.], dtype=float32) - >>> x - array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - -In these examples, y is a simple float array consisting of the 2nd field -in the record. But, rather than being a copy of the data in the structured -array, it is a view, i.e., it shares exactly the same memory locations. -Thus, when we updated this array by doubling its values, the structured -array shows the corresponding values as doubled as well. Likewise, if one -changes the record, the field view also changes: :: - - >>> x[1] = (-1,-1.,"Master") - >>> x - array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - >>> y - array([ 4., -1.], dtype=float32) - -Defining Structured Arrays -========================== - -One defines a structured array through the dtype object. There are -**several** alternative ways to define the fields of a record. Some of -these variants provide backward compatibility with Numeric, numarray, or -another module, and should not be used except for such purposes. These -will be so noted. One specifies record structure in -one of four alternative ways, using an argument (as supplied to a dtype -function keyword or a dtype object constructor itself). This -argument must be one of the following: 1) string, 2) tuple, 3) list, or -4) dictionary. Each of these is briefly described below. - -1) String argument (as used in the above examples). -In this case, the constructor expects a comma-separated list of type -specifiers, optionally with extra shape information. -The type specifiers can take 4 different forms: :: - - a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a - (representing bytes, ints, unsigned ints, floats, complex and - fixed length strings of specified byte lengths) - b) int8,...,uint8,...,float16, float32, float64, complex64, complex128 - (this time with bit sizes) - c) older Numeric/numarray type specifications (e.g. Float32). - Don't use these in new code! - d) Single character type specifiers (e.g H for unsigned short ints). - Avoid using these unless you must. Details can be found in the - Numpy book - -These different styles can be mixed within the same string (but why would you -want to do that?). Furthermore, each type specifier can be prefixed -with a repetition number, or a shape. In these cases an array -element is created, i.e., an array within a record. That array -is still referred to as a single field. An example: :: - - >>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64') - >>> x - array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), - ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), - ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])], - dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))]) - -By using strings to define the record structure, it precludes being -able to name the fields in the original definition. The names can -be changed as shown later, however. - -2) Tuple argument: The only relevant tuple case that applies to record -structures is when a structure is mapped to an existing data type. This -is done by pairing in a tuple, the existing data type with a matching -dtype definition (using any of the variants being described here). As -an example (using a definition using a list, so see 3) for further -details): :: - - >>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')])) - >>> x - array([0, 0, 0]) - >>> x['r'] - array([0, 0, 0], dtype=uint8) - -In this case, an array is produced that looks and acts like a simple int32 array, -but also has definitions for fields that use only one byte of the int32 (a bit -like Fortran equivalencing). - -3) List argument: In this case the record structure is defined with a list of -tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field -('' is permitted), 2) the type of the field, and 3) the shape (optional). -For example:: - - >>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) - >>> x - array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), - (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), - (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])], - dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))]) - -4) Dictionary argument: two different forms are permitted. The first consists -of a dictionary with two required keys ('names' and 'formats'), each having an -equal sized list of values. The format list contains any type/shape specifier -allowed in other contexts. The names must be strings. There are two optional -keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to -the required two where offsets contain integer offsets for each field, and -titles are objects containing metadata for each field (these do not have -to be strings), where the value of None is permitted. As an example: :: - - >>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']}) - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[('col1', '>i4'), ('col2', '>f4')]) - -The other dictionary form permitted is a dictionary of name keys with tuple -values specifying type, offset, and an optional title. :: - - >>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')}) - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')]) - -Accessing and modifying field names -=================================== - -The field names are an attribute of the dtype object defining the record structure. -For the last example: :: - - >>> x.dtype.names - ('col1', 'col2') - >>> x.dtype.names = ('x', 'y') - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')]) - >>> x.dtype.names = ('x', 'y', 'z') # wrong number of names - : must replace all names at once with a sequence of length 2 - -Accessing field titles -==================================== - -The field titles provide a standard place to put associated info for fields. -They do not have to be strings. :: - - >>> x.dtype.fields['x'][2] - 'title 1' - -Accessing multiple fields at once -==================================== - -You can access multiple fields at once using a list of field names: :: - - >>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))], - dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) - -Notice that `x` is created with a list of tuples. :: - - >>> x[['x','y']] - array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)], - dtype=[('x', '>> x[['x','value']] - array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]), - (1.0, [[2.0, 6.0], [2.0, 6.0]])], - dtype=[('x', '>> x[['y','x']] - array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)], - dtype=[('x', '>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')]) - >>> arr['var1'] = np.arange(5) - -If you fill it in row by row, it takes a take a tuple -(but not a list or array!):: - - >>> arr[0] = (10,20) - >>> arr - array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)], - dtype=[('var1', '`_. - -""" diff --git a/numpy-1.6.2/numpy/doc/subclassing.py b/numpy-1.6.2/numpy/doc/subclassing.py deleted file mode 100644 index de0338060c..0000000000 --- a/numpy-1.6.2/numpy/doc/subclassing.py +++ /dev/null @@ -1,559 +0,0 @@ -""" -============================= -Subclassing ndarray in python -============================= - -Credits -------- - -This page is based with thanks on the wiki page on subclassing by Pierre -Gerard-Marchant - http://www.scipy.org/Subclasses. - -Introduction ------------- - -Subclassing ndarray is relatively simple, but it has some complications -compared to other Python objects. On this page we explain the machinery -that allows you to subclass ndarray, and the implications for -implementing a subclass. - -ndarrays and object creation -============================ - -Subclassing ndarray is complicated by the fact that new instances of -ndarray classes can come about in three different ways. These are: - -#. Explicit constructor call - as in ``MySubClass(params)``. This is - the usual route to Python instance creation. -#. View casting - casting an existing ndarray as a given subclass -#. New from template - creating a new instance from a template - instance. Examples include returning slices from a subclassed array, - creating return types from ufuncs, and copying arrays. See - :ref:`new-from-template` for more details - -The last two are characteristics of ndarrays - in order to support -things like array slicing. The complications of subclassing ndarray are -due to the mechanisms numpy has to support these latter two routes of -instance creation. - -.. _view-casting: - -View casting ------------- - -*View casting* is the standard ndarray mechanism by which you take an -ndarray of any subclass, and return a view of the array as another -(specified) subclass: - ->>> import numpy as np ->>> # create a completely useless ndarray subclass ->>> class C(np.ndarray): pass ->>> # create a standard ndarray ->>> arr = np.zeros((3,)) ->>> # take a view of it, as our useless subclass ->>> c_arr = arr.view(C) ->>> type(c_arr) - - -.. _new-from-template: - -Creating new from template --------------------------- - -New instances of an ndarray subclass can also come about by a very -similar mechanism to :ref:`view-casting`, when numpy finds it needs to -create a new instance from a template instance. The most obvious place -this has to happen is when you are taking slices of subclassed arrays. -For example: - ->>> v = c_arr[1:] ->>> type(v) # the view is of type 'C' - ->>> v is c_arr # but it's a new instance -False - -The slice is a *view* onto the original ``c_arr`` data. So, when we -take a view from the ndarray, we return a new ndarray, of the same -class, that points to the data in the original. - -There are other points in the use of ndarrays where we need such views, -such as copying arrays (``c_arr.copy()``), creating ufunc output arrays -(see also :ref:`array-wrap`), and reducing methods (like -``c_arr.mean()``. - -Relationship of view casting and new-from-template --------------------------------------------------- - -These paths both use the same machinery. We make the distinction here, -because they result in different input to your methods. Specifically, -:ref:`view-casting` means you have created a new instance of your array -type from any potential subclass of ndarray. :ref:`new-from-template` -means you have created a new instance of your class from a pre-existing -instance, allowing you - for example - to copy across attributes that -are particular to your subclass. - -Implications for subclassing ----------------------------- - -If we subclass ndarray, we need to deal not only with explicit -construction of our array type, but also :ref:`view-casting` or -:ref:`new-from-template`. Numpy has the machinery to do this, and this -machinery that makes subclassing slightly non-standard. - -There are two aspects to the machinery that ndarray uses to support -views and new-from-template in subclasses. - -The first is the use of the ``ndarray.__new__`` method for the main work -of object initialization, rather then the more usual ``__init__`` -method. The second is the use of the ``__array_finalize__`` method to -allow subclasses to clean up after the creation of views and new -instances from templates. - -A brief Python primer on ``__new__`` and ``__init__`` -===================================================== - -``__new__`` is a standard Python method, and, if present, is called -before ``__init__`` when we create a class instance. See the `python -__new__ documentation -`_ for more detail. - -For example, consider the following Python code: - -.. testcode:: - - class C(object): - def __new__(cls, *args): - print 'Cls in __new__:', cls - print 'Args in __new__:', args - return object.__new__(cls, *args) - - def __init__(self, *args): - print 'type(self) in __init__:', type(self) - print 'Args in __init__:', args - -meaning that we get: - ->>> c = C('hello') -Cls in __new__: -Args in __new__: ('hello',) -type(self) in __init__: -Args in __init__: ('hello',) - -When we call ``C('hello')``, the ``__new__`` method gets its own class -as first argument, and the passed argument, which is the string -``'hello'``. After python calls ``__new__``, it usually (see below) -calls our ``__init__`` method, with the output of ``__new__`` as the -first argument (now a class instance), and the passed arguments -following. - -As you can see, the object can be initialized in the ``__new__`` -method or the ``__init__`` method, or both, and in fact ndarray does -not have an ``__init__`` method, because all the initialization is -done in the ``__new__`` method. - -Why use ``__new__`` rather than just the usual ``__init__``? Because -in some cases, as for ndarray, we want to be able to return an object -of some other class. Consider the following: - -.. testcode:: - - class D(C): - def __new__(cls, *args): - print 'D cls is:', cls - print 'D args in __new__:', args - return C.__new__(C, *args) - - def __init__(self, *args): - # we never get here - print 'In D __init__' - -meaning that: - ->>> obj = D('hello') -D cls is: -D args in __new__: ('hello',) -Cls in __new__: -Args in __new__: ('hello',) ->>> type(obj) - - -The definition of ``C`` is the same as before, but for ``D``, the -``__new__`` method returns an instance of class ``C`` rather than -``D``. Note that the ``__init__`` method of ``D`` does not get -called. In general, when the ``__new__`` method returns an object of -class other than the class in which it is defined, the ``__init__`` -method of that class is not called. - -This is how subclasses of the ndarray class are able to return views -that preserve the class type. When taking a view, the standard -ndarray machinery creates the new ndarray object with something -like:: - - obj = ndarray.__new__(subtype, shape, ... - -where ``subdtype`` is the subclass. Thus the returned view is of the -same class as the subclass, rather than being of class ``ndarray``. - -That solves the problem of returning views of the same type, but now -we have a new problem. The machinery of ndarray can set the class -this way, in its standard methods for taking views, but the ndarray -``__new__`` method knows nothing of what we have done in our own -``__new__`` method in order to set attributes, and so on. (Aside - -why not call ``obj = subdtype.__new__(...`` then? Because we may not -have a ``__new__`` method with the same call signature). - -The role of ``__array_finalize__`` -================================== - -``__array_finalize__`` is the mechanism that numpy provides to allow -subclasses to handle the various ways that new instances get created. - -Remember that subclass instances can come about in these three ways: - -#. explicit constructor call (``obj = MySubClass(params)``). This will - call the usual sequence of ``MySubClass.__new__`` then (if it exists) - ``MySubClass.__init__``. -#. :ref:`view-casting` -#. :ref:`new-from-template` - -Our ``MySubClass.__new__`` method only gets called in the case of the -explicit constructor call, so we can't rely on ``MySubClass.__new__`` or -``MySubClass.__init__`` to deal with the view casting and -new-from-template. It turns out that ``MySubClass.__array_finalize__`` -*does* get called for all three methods of object creation, so this is -where our object creation housekeeping usually goes. - -* For the explicit constructor call, our subclass will need to create a - new ndarray instance of its own class. In practice this means that - we, the authors of the code, will need to make a call to - ``ndarray.__new__(MySubClass,...)``, or do view casting of an existing - array (see below) -* For view casting and new-from-template, the equivalent of - ``ndarray.__new__(MySubClass,...`` is called, at the C level. - -The arguments that ``__array_finalize__`` recieves differ for the three -methods of instance creation above. - -The following code allows us to look at the call sequences and arguments: - -.. testcode:: - - import numpy as np - - class C(np.ndarray): - def __new__(cls, *args, **kwargs): - print 'In __new__ with class %s' % cls - return np.ndarray.__new__(cls, *args, **kwargs) - - def __init__(self, *args, **kwargs): - # in practice you probably will not need or want an __init__ - # method for your subclass - print 'In __init__ with class %s' % self.__class__ - - def __array_finalize__(self, obj): - print 'In array_finalize:' - print ' self type is %s' % type(self) - print ' obj type is %s' % type(obj) - - -Now: - ->>> # Explicit constructor ->>> c = C((10,)) -In __new__ with class -In array_finalize: - self type is - obj type is -In __init__ with class ->>> # View casting ->>> a = np.arange(10) ->>> cast_a = a.view(C) -In array_finalize: - self type is - obj type is ->>> # Slicing (example of new-from-template) ->>> cv = c[:1] -In array_finalize: - self type is - obj type is - -The signature of ``__array_finalize__`` is:: - - def __array_finalize__(self, obj): - -``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our -own class (``self``) as well as the object from which the view has been -taken (``obj``). As you can see from the output above, the ``self`` is -always a newly created instance of our subclass, and the type of ``obj`` -differs for the three instance creation methods: - -* When called from the explicit constructor, ``obj`` is ``None`` -* When called from view casting, ``obj`` can be an instance of any - subclass of ndarray, including our own. -* When called in new-from-template, ``obj`` is another instance of our - own subclass, that we might use to update the new ``self`` instance. - -Because ``__array_finalize__`` is the only method that always sees new -instances being created, it is the sensible place to fill in instance -defaults for new object attributes, among other tasks. - -This may be clearer with an example. - -Simple example - adding an extra attribute to ndarray ------------------------------------------------------ - -.. testcode:: - - import numpy as np - - class InfoArray(np.ndarray): - - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, - strides=None, order=None, info=None): - # Create the ndarray instance of our type, given the usual - # ndarray input arguments. This will call the standard - # ndarray constructor, but return an object of our type. - # It also triggers a call to InfoArray.__array_finalize__ - obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides, - order) - # set the new 'info' attribute to the value passed - obj.info = info - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # ``self`` is a new object resulting from - # ndarray.__new__(InfoArray, ...), therefore it only has - # attributes that the ndarray.__new__ constructor gave it - - # i.e. those of a standard ndarray. - # - # We could have got to the ndarray.__new__ call in 3 ways: - # From an explicit constructor - e.g. InfoArray(): - # obj is None - # (we're in the middle of the InfoArray.__new__ - # constructor, and self.info will be set when we return to - # InfoArray.__new__) - if obj is None: return - # From view casting - e.g arr.view(InfoArray): - # obj is arr - # (type(obj) can be InfoArray) - # From new-from-template - e.g infoarr[:3] - # type(obj) is InfoArray - # - # Note that it is here, rather than in the __new__ method, - # that we set the default value for 'info', because this - # method sees all creation of default objects - with the - # InfoArray.__new__ constructor, but also with - # arr.view(InfoArray). - self.info = getattr(obj, 'info', None) - # We do not need to return anything - - -Using the object looks like this: - - >>> obj = InfoArray(shape=(3,)) # explicit constructor - >>> type(obj) - - >>> obj.info is None - True - >>> obj = InfoArray(shape=(3,), info='information') - >>> obj.info - 'information' - >>> v = obj[1:] # new-from-template - here - slicing - >>> type(v) - - >>> v.info - 'information' - >>> arr = np.arange(10) - >>> cast_arr = arr.view(InfoArray) # view casting - >>> type(cast_arr) - - >>> cast_arr.info is None - True - -This class isn't very useful, because it has the same constructor as the -bare ndarray object, including passing in buffers and shapes and so on. -We would probably prefer the constructor to be able to take an already -formed ndarray from the usual numpy calls to ``np.array`` and return an -object. - -Slightly more realistic example - attribute added to existing array -------------------------------------------------------------------- - -Here is a class that takes a standard ndarray that already exists, casts -as our type, and adds an extra attribute. - -.. testcode:: - - import numpy as np - - class RealisticInfoArray(np.ndarray): - - def __new__(cls, input_array, info=None): - # Input array is an already formed ndarray instance - # We first cast to be our class type - obj = np.asarray(input_array).view(cls) - # add the new attribute to the created instance - obj.info = info - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # see InfoArray.__array_finalize__ for comments - if obj is None: return - self.info = getattr(obj, 'info', None) - - -So: - - >>> arr = np.arange(5) - >>> obj = RealisticInfoArray(arr, info='information') - >>> type(obj) - - >>> obj.info - 'information' - >>> v = obj[1:] - >>> type(v) - - >>> v.info - 'information' - -.. _array-wrap: - -``__array_wrap__`` for ufuncs -------------------------------------------------------- - -``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy -functions, to allow a subclass to set the type of the return value -and update attributes and metadata. Let's show how this works with an example. -First we make the same subclass as above, but with a different name and -some print statements: - -.. testcode:: - - import numpy as np - - class MySubClass(np.ndarray): - - def __new__(cls, input_array, info=None): - obj = np.asarray(input_array).view(cls) - obj.info = info - return obj - - def __array_finalize__(self, obj): - print 'In __array_finalize__:' - print ' self is %s' % repr(self) - print ' obj is %s' % repr(obj) - if obj is None: return - self.info = getattr(obj, 'info', None) - - def __array_wrap__(self, out_arr, context=None): - print 'In __array_wrap__:' - print ' self is %s' % repr(self) - print ' arr is %s' % repr(out_arr) - # then just call the parent - return np.ndarray.__array_wrap__(self, out_arr, context) - -We run a ufunc on an instance of our new array: - ->>> obj = MySubClass(np.arange(5), info='spam') -In __array_finalize__: - self is MySubClass([0, 1, 2, 3, 4]) - obj is array([0, 1, 2, 3, 4]) ->>> arr2 = np.arange(5)+1 ->>> ret = np.add(arr2, obj) -In __array_wrap__: - self is MySubClass([0, 1, 2, 3, 4]) - arr is array([1, 3, 5, 7, 9]) -In __array_finalize__: - self is MySubClass([1, 3, 5, 7, 9]) - obj is MySubClass([0, 1, 2, 3, 4]) ->>> ret -MySubClass([1, 3, 5, 7, 9]) ->>> ret.info -'spam' - -Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the -input with the highest ``__array_priority__`` value, in this case -``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and -``out_arr`` as the (ndarray) result of the addition. In turn, the -default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the -result to class ``MySubClass``, and called ``__array_finalize__`` - -hence the copying of the ``info`` attribute. This has all happened at the C level. - -But, we could do anything we wanted: - -.. testcode:: - - class SillySubClass(np.ndarray): - - def __array_wrap__(self, arr, context=None): - return 'I lost your data' - ->>> arr1 = np.arange(5) ->>> obj = arr1.view(SillySubClass) ->>> arr2 = np.arange(5) ->>> ret = np.multiply(obj, arr2) ->>> ret -'I lost your data' - -So, by defining a specific ``__array_wrap__`` method for our subclass, -we can tweak the output from ufuncs. The ``__array_wrap__`` method -requires ``self``, then an argument - which is the result of the ufunc - -and an optional parameter *context*. This parameter is returned by some -ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc, -domain of the ufunc). ``__array_wrap__`` should return an instance of -its containing class. See the masked array subclass for an -implementation. - -In addition to ``__array_wrap__``, which is called on the way out of the -ufunc, there is also an ``__array_prepare__`` method which is called on -the way into the ufunc, after the output arrays are created but before any -computation has been performed. The default implementation does nothing -but pass through the array. ``__array_prepare__`` should not attempt to -access the array data or resize the array, it is intended for setting the -output array type, updating attributes and metadata, and performing any -checks based on the input that may be desired before computation begins. -Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or -subclass thereof or raise an error. - -Extra gotchas - custom ``__del__`` methods and ndarray.base ------------------------------------------------------------ - -One of the problems that ndarray solves is keeping track of memory -ownership of ndarrays and their views. Consider the case where we have -created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. -The two objects are looking at the same memory. Numpy keeps track of -where the data came from for a particular array or view, with the -``base`` attribute: - ->>> # A normal ndarray, that owns its own data ->>> arr = np.zeros((4,)) ->>> # In this case, base is None ->>> arr.base is None -True ->>> # We take a view ->>> v1 = arr[1:] ->>> # base now points to the array that it derived from ->>> v1.base is arr -True ->>> # Take a view of a view ->>> v2 = v1[1:] ->>> # base points to the view it derived from ->>> v2.base is v1 -True - -In general, if the array owns its own memory, as for ``arr`` in this -case, then ``arr.base`` will be None - there are some exceptions to this -- see the numpy book for more details. - -The ``base`` attribute is useful in being able to tell whether we have -a view or the original array. This in turn can be useful if we need -to know whether or not to do some specific cleanup when the subclassed -array is deleted. For example, we may only want to do the cleanup if -the original array is deleted, but not the views. For an example of -how this can work, have a look at the ``memmap`` class in -``numpy.core``. - - -""" diff --git a/numpy-1.6.2/numpy/doc/ufuncs.py b/numpy-1.6.2/numpy/doc/ufuncs.py deleted file mode 100644 index e85b477635..0000000000 --- a/numpy-1.6.2/numpy/doc/ufuncs.py +++ /dev/null @@ -1,137 +0,0 @@ -""" -=================== -Universal Functions -=================== - -Ufuncs are, generally speaking, mathematical functions or operations that are -applied element-by-element to the contents of an array. That is, the result -in each output array element only depends on the value in the corresponding -input array (or arrays) and on no other array elements. Numpy comes with a -large suite of ufuncs, and scipy extends that suite substantially. The simplest -example is the addition operator: :: - - >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) - array([1, 3, 2, 6]) - -The unfunc module lists all the available ufuncs in numpy. Documentation on -the specific ufuncs may be found in those modules. This documentation is -intended to address the more general aspects of unfuncs common to most of -them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) -have equivalent functions defined (e.g. add() for +) - -Type coercion -============= - -What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of -two different types? What is the type of the result? Typically, the result is -the higher of the two types. For example: :: - - float32 + float64 -> float64 - int8 + int32 -> int32 - int16 + float32 -> float32 - float32 + complex64 -> complex64 - -There are some less obvious cases generally involving mixes of types -(e.g. uints, ints and floats) where equal bit sizes for each are not -capable of saving all the information in a different type of equivalent -bit size. Some examples are int32 vs float32 or uint32 vs int32. -Generally, the result is the higher type of larger size than both -(if available). So: :: - - int32 + float32 -> float64 - uint32 + int32 -> int64 - -Finally, the type coercion behavior when expressions involve Python -scalars is different than that seen for arrays. Since Python has a -limited number of types, combining a Python int with a dtype=np.int8 -array does not coerce to the higher type but instead, the type of the -array prevails. So the rules for Python scalars combined with arrays is -that the result will be that of the array equivalent the Python scalar -if the Python scalar is of a higher 'kind' than the array (e.g., float -vs. int), otherwise the resultant type will be that of the array. -For example: :: - - Python int + int8 -> int8 - Python float + int8 -> float64 - -ufunc methods -============= - -Binary ufuncs support 4 methods. - -**.reduce(arr)** applies the binary operator to elements of the array in - sequence. For example: :: - - >>> np.add.reduce(np.arange(10)) # adds all elements of array - 45 - -For multidimensional arrays, the first dimension is reduced by default: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5)) - array([ 5, 7, 9, 11, 13]) - -The axis keyword can be used to specify different axes to reduce: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) - array([10, 35]) - -**.accumulate(arr)** applies the binary operator and generates an an -equivalently shaped array that includes the accumulated amount for each -element of the array. A couple examples: :: - - >>> np.add.accumulate(np.arange(10)) - array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) - >>> np.multiply.accumulate(np.arange(1,9)) - array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) - -The behavior for multidimensional arrays is the same as for .reduce(), -as is the use of the axis keyword). - -**.reduceat(arr,indices)** allows one to apply reduce to selected parts - of an array. It is a difficult method to understand. See the documentation - at: - -**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and - arr2. It will work on multidimensional arrays (the shape of the result is - the concatenation of the two input shapes.: :: - - >>> np.multiply.outer(np.arange(3),np.arange(4)) - array([[0, 0, 0, 0], - [0, 1, 2, 3], - [0, 2, 4, 6]]) - -Output arguments -================ - -All ufuncs accept an optional output array. The array must be of the expected -output shape. Beware that if the type of the output array is of a different -(and lower) type than the output result, the results may be silently truncated -or otherwise corrupted in the downcast to the lower type. This usage is useful -when one wants to avoid creating large temporary arrays and instead allows one -to reuse the same array memory repeatedly (at the expense of not being able to -use more convenient operator notation in expressions). Note that when the -output argument is used, the ufunc still returns a reference to the result. - - >>> x = np.arange(2) - >>> np.add(np.arange(2),np.arange(2.),x) - array([0, 2]) - >>> x - array([0, 2]) - -and & or as ufuncs -================== - -Invariably people try to use the python 'and' and 'or' as logical operators -(and quite understandably). But these operators do not behave as normal -operators since Python treats these quite differently. They cannot be -overloaded with array equivalents. Thus using 'and' or 'or' with an array -results in an error. There are two alternatives: - - 1) use the ufunc functions logical_and() and logical_or(). - 2) use the bitwise operators & and \\|. The drawback of these is that if - the arguments to these operators are not boolean arrays, the result is - likely incorrect. On the other hand, most usages of logical_and and - logical_or are with boolean arrays. As long as one is careful, this is - a convenient way to apply these operators. - -""" diff --git a/numpy-1.6.2/numpy/dual.py b/numpy-1.6.2/numpy/dual.py deleted file mode 100644 index 3c863bf6f5..0000000000 --- a/numpy-1.6.2/numpy/dual.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Aliases for functions which may be accelerated by Scipy. - -Scipy_ can be built to use accelerated or otherwise improved libraries -for FFTs, linear algebra, and special functions. This module allows -developers to transparently support these accelerated functions when -scipy is available but still support users who have only installed -Numpy. - -.. _Scipy : http://www.scipy.org - -""" -# This module should be used for functions both in numpy and scipy if -# you want to use the numpy version if available but the scipy version -# otherwise. -# Usage --- from numpy.dual import fft, inv - -__all__ = ['fft','ifft','fftn','ifftn','fft2','ifft2', - 'norm','inv','svd','solve','det','eig','eigvals', - 'eigh','eigvalsh','lstsq', 'pinv','cholesky','i0'] - -import numpy.linalg as linpkg -import numpy.fft as fftpkg -from numpy.lib import i0 -import sys - - -fft = fftpkg.fft -ifft = fftpkg.ifft -fftn = fftpkg.fftn -ifftn = fftpkg.ifftn -fft2 = fftpkg.fft2 -ifft2 = fftpkg.ifft2 - -norm = linpkg.norm -inv = linpkg.inv -svd = linpkg.svd -solve = linpkg.solve -det = linpkg.det -eig = linpkg.eig -eigvals = linpkg.eigvals -eigh = linpkg.eigh -eigvalsh = linpkg.eigvalsh -lstsq = linpkg.lstsq -pinv = linpkg.pinv -cholesky = linpkg.cholesky - -_restore_dict = {} - -def register_func(name, func): - if name not in __all__: - raise ValueError, "%s not a dual function." % name - f = sys._getframe(0).f_globals - _restore_dict[name] = f[name] - f[name] = func - -def restore_func(name): - if name not in __all__: - raise ValueError, "%s not a dual function." % name - try: - val = _restore_dict[name] - except KeyError: - return - else: - sys._getframe(0).f_globals[name] = val - -def restore_all(): - for name in _restore_dict.keys(): - restore_func(name) diff --git a/numpy-1.6.2/numpy/f2py/__init__.py b/numpy-1.6.2/numpy/f2py/__init__.py deleted file mode 100644 index 220cb3d879..0000000000 --- a/numpy-1.6.2/numpy/f2py/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python - -__all__ = ['run_main','compile','f2py_testing'] - -import os -import sys -import commands - -import f2py2e -import f2py_testing -import diagnose - -from info import __doc__ - -run_main = f2py2e.run_main -main = f2py2e.main - -def compile(source, - modulename = 'untitled', - extra_args = '', - verbose = 1, - source_fn = None - ): - ''' Build extension module from processing source with f2py. - Read the source of this function for more information. - ''' - from numpy.distutils.exec_command import exec_command - import tempfile - if source_fn is None: - fname = os.path.join(tempfile.mktemp()+'.f') - else: - fname = source_fn - - f = open(fname,'w') - f.write(source) - f.close() - - args = ' -c -m %s %s %s'%(modulename,fname,extra_args) - c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args) - s,o = exec_command(c) - if source_fn is None: - try: os.remove(fname) - except OSError: pass - return s - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/f2py/__version__.py b/numpy-1.6.2/numpy/f2py/__version__.py deleted file mode 100644 index 104c2e1a89..0000000000 --- a/numpy-1.6.2/numpy/f2py/__version__.py +++ /dev/null @@ -1,8 +0,0 @@ -major = 2 - -try: - from __svn_version__ import version - version_info = (major, version) - version = '%s_%s' % version_info -except (ImportError, ValueError): - version = str(major) diff --git a/numpy-1.6.2/numpy/f2py/auxfuncs.py b/numpy-1.6.2/numpy/f2py/auxfuncs.py deleted file mode 100644 index a12d92b7ea..0000000000 --- a/numpy-1.6.2/numpy/f2py/auxfuncs.py +++ /dev/null @@ -1,710 +0,0 @@ -#!/usr/bin/env python -""" - -Auxiliary functions for f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) LICENSE. - - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/24 19:01:55 $ -Pearu Peterson -""" -__version__ = "$Revision: 1.65 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import pprint -import sys -import types -import cfuncs - - -errmess=sys.stderr.write -#outmess=sys.stdout.write -show=pprint.pprint - -options={} -debugoptions=[] -wrapfuncs = 1 - -if sys.version_info[0] >= 3: - from functools import reduce - -def outmess(t): - if options.get('verbose',1): - sys.stdout.write(t) - -def debugcapi(var): - return 'capi' in debugoptions - -def _isstring(var): - return 'typespec' in var and var['typespec']=='character' and (not isexternal(var)) - -def isstring(var): - return _isstring(var) and not isarray(var) - -def ischaracter(var): - return isstring(var) and 'charselector' not in var - -def isstringarray(var): - return isarray(var) and _isstring(var) - -def isarrayofstrings(var): - # leaving out '*' for now so that - # `character*(*) a(m)` and `character a(m,*)` - # are treated differently. Luckily `character**` is illegal. - return isstringarray(var) and var['dimension'][-1]=='(*)' - -def isarray(var): - return 'dimension' in var and (not isexternal(var)) - -def isscalar(var): - return not (isarray(var) or isstring(var) or isexternal(var)) - -def iscomplex(var): - return isscalar(var) and var.get('typespec') in ['complex','double complex'] - -def islogical(var): - return isscalar(var) and var.get('typespec')=='logical' - -def isinteger(var): - return isscalar(var) and var.get('typespec')=='integer' - -def isreal(var): - return isscalar(var) and var.get('typespec')=='real' - -def get_kind(var): - try: - return var['kindselector']['*'] - except KeyError: - try: - return var['kindselector']['kind'] - except KeyError: - pass - -def islong_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') not in ['integer','logical']: - return 0 - return get_kind(var)=='8' - -def isunsigned_char(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-1' - -def isunsigned_short(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-2' - -def isunsigned(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-4' - -def isunsigned_long_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-8' - -def isdouble(var): - if not isscalar(var): - return 0 - if not var.get('typespec')=='real': - return 0 - return get_kind(var)=='8' - -def islong_double(var): - if not isscalar(var): - return 0 - if not var.get('typespec')=='real': - return 0 - return get_kind(var)=='16' - -def islong_complex(var): - if not iscomplex(var): - return 0 - return get_kind(var)=='32' - -def iscomplexarray(var): - return isarray(var) and var.get('typespec') in ['complex','double complex'] - -def isint1array(var): - return isarray(var) and var.get('typespec')=='integer' \ - and get_kind(var)=='1' - -def isunsigned_chararray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='-1' - -def isunsigned_shortarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='-2' - -def isunsignedarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='-4' - -def isunsigned_long_longarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='-8' - -def issigned_chararray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='1' - -def issigned_shortarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='2' - -def issigned_array(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='4' - -def issigned_long_longarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='8' - -def isallocatable(var): - return 'attrspec' in var and 'allocatable' in var['attrspec'] - -def ismutable(var): - return not (not 'dimension' in var or isstring(var)) - -def ismoduleroutine(rout): - return 'modulename' in rout - -def ismodule(rout): - return ('block' in rout and 'module'==rout['block']) - -def isfunction(rout): - return ('block' in rout and 'function'==rout['block']) - -#def isfunction_wrap(rout): -# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout)) - -def isfunction_wrap(rout): - if isintent_c(rout): - return 0 - return wrapfuncs and isfunction(rout) and (not isexternal(rout)) - -def issubroutine(rout): - return ('block' in rout and 'subroutine'==rout['block']) - -def issubroutine_wrap(rout): - if isintent_c(rout): - return 0 - return issubroutine(rout) and hasassumedshape(rout) - -def hasassumedshape(rout): - if rout.get('hasassumedshape'): - return True - for a in rout['args']: - for d in rout['vars'].get(a,{}).get('dimension',[]): - if d==':': - rout['hasassumedshape'] = True - return True - return False - -def isroutine(rout): - return isfunction(rout) or issubroutine(rout) - -def islogicalfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islogical(rout['vars'][a]) - return 0 - -def islong_longfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islong_long(rout['vars'][a]) - return 0 - -def islong_doublefunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islong_double(rout['vars'][a]) - return 0 - -def iscomplexfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return iscomplex(rout['vars'][a]) - return 0 - -def iscomplexfunction_warn(rout): - if iscomplexfunction(rout): - outmess("""\ - ************************************************************** - Warning: code with a function returning complex value - may not work correctly with your Fortran compiler. - Run the following test before using it in your applications: - $(f2py install dir)/test-site/{b/runme_scalar,e/runme} - When using GNU gcc/g77 compilers, codes should work correctly. - **************************************************************\n""") - return 1 - return 0 - -def isstringfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return isstring(rout['vars'][a]) - return 0 - -def hasexternals(rout): - return 'externals' in rout and rout['externals'] - -def isthreadsafe(rout): - return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements'] - -def hasvariables(rout): - return 'vars' in rout and rout['vars'] - -def isoptional(var): - return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var) - -def isexternal(var): - return ('attrspec' in var and 'external' in var['attrspec']) - -def isrequired(var): - return not isoptional(var) and isintent_nothide(var) - -def isintent_in(var): - if 'intent' not in var: - return 1 - if 'hide' in var['intent']: - return 0 - if 'inplace' in var['intent']: - return 0 - if 'in' in var['intent']: - return 1 - if 'out' in var['intent']: - return 0 - if 'inout' in var['intent']: - return 0 - if 'outin' in var['intent']: - return 0 - return 1 - -def isintent_inout(var): - return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent'] - -def isintent_out(var): - return 'out' in var.get('intent',[]) - -def isintent_hide(var): - return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout,isintent_inplace)(var))))) - -def isintent_nothide(var): - return not isintent_hide(var) - -def isintent_c(var): - return 'c' in var.get('intent',[]) - -# def isintent_f(var): -# return not isintent_c(var) - -def isintent_cache(var): - return 'cache' in var.get('intent',[]) - -def isintent_copy(var): - return 'copy' in var.get('intent',[]) - -def isintent_overwrite(var): - return 'overwrite' in var.get('intent',[]) - -def isintent_callback(var): - return 'callback' in var.get('intent',[]) - -def isintent_inplace(var): - return 'inplace' in var.get('intent',[]) - -def isintent_aux(var): - return 'aux' in var.get('intent',[]) - -def isintent_aligned4(var): - return 'aligned4' in var.get('intent',[]) -def isintent_aligned8(var): - return 'aligned8' in var.get('intent',[]) -def isintent_aligned16(var): - return 'aligned16' in var.get('intent',[]) - -isintent_dict = {isintent_in:'INTENT_IN',isintent_inout:'INTENT_INOUT', - isintent_out:'INTENT_OUT',isintent_hide:'INTENT_HIDE', - isintent_cache:'INTENT_CACHE', - isintent_c:'INTENT_C',isoptional:'OPTIONAL', - isintent_inplace:'INTENT_INPLACE', - isintent_aligned4:'INTENT_ALIGNED4', - isintent_aligned8:'INTENT_ALIGNED8', - isintent_aligned16:'INTENT_ALIGNED16', - } - -def isprivate(var): - return 'attrspec' in var and 'private' in var['attrspec'] - -def hasinitvalue(var): - return '=' in var - -def hasinitvalueasstring(var): - if not hasinitvalue(var): - return 0 - return var['='][0] in ['"',"'"] - -def hasnote(var): - return 'note' in var - -def hasresultnote(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return hasnote(rout['vars'][a]) - return 0 - -def hascommon(rout): - return 'common' in rout - -def containscommon(rout): - if hascommon(rout): - return 1 - if hasbody(rout): - for b in rout['body']: - if containscommon(b): - return 1 - return 0 - -def containsmodule(block): - if ismodule(block): - return 1 - if not hasbody(block): - return 0 - for b in block['body']: - if containsmodule(b): - return 1 - return 0 - -def hasbody(rout): - return 'body' in rout - -def hascallstatement(rout): - return getcallstatement(rout) is not None - -def istrue(var): - return 1 - -def isfalse(var): - return 0 - -class F2PYError(Exception): - pass - -class throw_error: - def __init__(self,mess): - self.mess = mess - def __call__(self,var): - mess = '\n\n var = %s\n Message: %s\n' % (var,self.mess) - raise F2PYError,mess - -def l_and(*f): - l,l2='lambda v',[] - for i in range(len(f)): - l='%s,f%d=f[%d]'%(l,i,i) - l2.append('f%d(v)'%(i)) - return eval('%s:%s'%(l,' and '.join(l2))) - -def l_or(*f): - l,l2='lambda v',[] - for i in range(len(f)): - l='%s,f%d=f[%d]'%(l,i,i) - l2.append('f%d(v)'%(i)) - return eval('%s:%s'%(l,' or '.join(l2))) - -def l_not(f): - return eval('lambda v,f=f:not f(v)') - -def isdummyroutine(rout): - try: - return rout['f2pyenhancements']['fortranname']=='' - except KeyError: - return 0 - -def getfortranname(rout): - try: - name = rout['f2pyenhancements']['fortranname'] - if name=='': - raise KeyError - if not name: - errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements'])) - raise KeyError - except KeyError: - name = rout['name'] - return name - -def getmultilineblock(rout,blockname,comment=1,counter=0): - try: - r = rout['f2pyenhancements'].get(blockname) - except KeyError: - return - if not r: return - if counter>0 and type(r) is type(''): - return - if type(r) is type([]): - if counter>=len(r): return - r = r[counter] - if r[:3]=="'''": - if comment: - r = '\t/* start ' + blockname + ' multiline ('+`counter`+') */\n' + r[3:] - else: - r = r[3:] - if r[-3:]=="'''": - if comment: - r = r[:-3] + '\n\t/* end multiline ('+`counter`+')*/' - else: - r = r[:-3] - else: - errmess("%s multiline block should end with `'''`: %s\n" \ - % (blockname,repr(r))) - return r - -def getcallstatement(rout): - return getmultilineblock(rout,'callstatement') - -def getcallprotoargument(rout,cb_map={}): - r = getmultilineblock(rout,'callprotoargument',comment=0) - if r: return r - if hascallstatement(rout): - outmess('warning: callstatement is defined without callprotoargument\n') - return - from capi_maps import getctype - arg_types,arg_types2 = [],[] - if l_and(isstringfunction,l_not(isfunction_wrap))(rout): - arg_types.extend(['char*','size_t']) - for n in rout['args']: - var = rout['vars'][n] - if isintent_callback(var): - continue - if n in cb_map: - ctype = cb_map[n]+'_typedef' - else: - ctype = getctype(var) - if l_and(isintent_c,l_or(isscalar,iscomplex))(var): - pass - elif isstring(var): - pass - #ctype = 'void*' - else: - ctype = ctype+'*' - if isstring(var) or isarrayofstrings(var): - arg_types2.append('size_t') - arg_types.append(ctype) - - proto_args = ','.join(arg_types+arg_types2) - if not proto_args: - proto_args = 'void' - #print proto_args - return proto_args - -def getusercode(rout): - return getmultilineblock(rout,'usercode') - -def getusercode1(rout): - return getmultilineblock(rout,'usercode',counter=1) - -def getpymethoddef(rout): - return getmultilineblock(rout,'pymethoddef') - -def getargs(rout): - sortargs,args=[],[] - if 'args' in rout: - args=rout['args'] - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: sortargs=rout['args'] - return args,sortargs - -def getargs2(rout): - sortargs,args=[],rout.get('args',[]) - auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\ - and a not in args] - args = auxvars + args - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: sortargs=auxvars + rout['args'] - return args,sortargs - -def getrestdoc(rout): - if 'f2pymultilines' not in rout: - return None - k = None - if rout['block']=='python module': - k = rout['block'],rout['name'] - return rout['f2pymultilines'].get(k,None) - -def gentitle(name): - l=(80-len(name)-6)//2 - return '/*%s %s %s*/'%(l*'*',name,l*'*') - -def flatlist(l): - if type(l)==types.ListType: - return reduce(lambda x,y,f=flatlist:x+f(y),l,[]) - return [l] - -def stripcomma(s): - if s and s[-1]==',': return s[:-1] - return s - -def replace(str,d,defaultsep=''): - if type(d)==types.ListType: - return map(lambda d,f=replace,sep=defaultsep,s=str:f(s,d,sep),d) - if type(str)==types.ListType: - return map(lambda s,f=replace,sep=defaultsep,d=d:f(s,d,sep),str) - for k in 2*d.keys(): - if k=='separatorsfor': - continue - if 'separatorsfor' in d and k in d['separatorsfor']: - sep=d['separatorsfor'][k] - else: - sep=defaultsep - if type(d[k])==types.ListType: - str=str.replace('#%s#'%(k),sep.join(flatlist(d[k]))) - else: - str=str.replace('#%s#'%(k),d[k]) - return str - -def dictappend(rd,ar): - if type(ar)==types.ListType: - for a in ar: - rd=dictappend(rd,a) - return rd - for k in ar.keys(): - if k[0]=='_': - continue - if k in rd: - if type(rd[k])==str: - rd[k]=[rd[k]] - if type(rd[k])==types.ListType: - if type(ar[k])==types.ListType: - rd[k]=rd[k]+ar[k] - else: - rd[k].append(ar[k]) - elif type(rd[k])==types.DictType: - if type(ar[k])==types.DictType: - if k=='separatorsfor': - for k1 in ar[k].keys(): - if k1 not in rd[k]: - rd[k][k1]=ar[k][k1] - else: - rd[k]=dictappend(rd[k],ar[k]) - else: - rd[k]=ar[k] - return rd - -def applyrules(rules,d,var={}): - ret={} - if type(rules)==types.ListType: - for r in rules: - rr=applyrules(r,d,var) - ret=dictappend(ret,rr) - if '_break' in rr: - break - return ret - if '_check' in rules and (not rules['_check'](var)): - return ret - if 'need' in rules: - res = applyrules({'needs':rules['need']},d,var) - if 'needs' in res: - cfuncs.append_needs(res['needs']) - - for k in rules.keys(): - if k=='separatorsfor': - ret[k]=rules[k]; continue - if type(rules[k])==str: - ret[k]=replace(rules[k],d) - elif type(rules[k])==types.ListType: - ret[k]=[] - for i in rules[k]: - ar=applyrules({k:i},d,var) - if k in ar: - ret[k].append(ar[k]) - elif k[0]=='_': - continue - elif type(rules[k])==types.DictType: - ret[k]=[] - for k1 in rules[k].keys(): - if type(k1)==types.FunctionType and k1(var): - if type(rules[k][k1])==types.ListType: - for i in rules[k][k1]: - if type(i)==types.DictType: - res=applyrules({'supertext':i},d,var) - if 'supertext' in res: - i=res['supertext'] - else: i='' - ret[k].append(replace(i,d)) - else: - i=rules[k][k1] - if type(i)==types.DictType: - res=applyrules({'supertext':i},d) - if 'supertext' in res: - i=res['supertext'] - else: i='' - ret[k].append(replace(i,d)) - else: - errmess('applyrules: ignoring rule %s.\n'%`rules[k]`) - if type(ret[k])==types.ListType: - if len(ret[k])==1: - ret[k]=ret[k][0] - if ret[k]==[]: - del ret[k] - return ret diff --git a/numpy-1.6.2/numpy/f2py/capi_maps.py b/numpy-1.6.2/numpy/f2py/capi_maps.py deleted file mode 100644 index beff1e2125..0000000000 --- a/numpy-1.6.2/numpy/f2py/capi_maps.py +++ /dev/null @@ -1,771 +0,0 @@ -#!/usr/bin/env python -""" - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.60 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import copy -import re -import os -import sys -from auxfuncs import * -from crackfortran import markoutercomma -import cb_rules - -# Numarray and Numeric users should set this False -using_newcore = True - -depargs=[] -lcb_map={} -lcb2_map={} -# forced casting: mainly caused by the fact that Python or Numeric -# C/APIs do not support the corresponding C types. -c2py_map={'double':'float', - 'float':'float', # forced casting - 'long_double':'float', # forced casting - 'char':'int', # forced casting - 'signed_char':'int', # forced casting - 'unsigned_char':'int', # forced casting - 'short':'int', # forced casting - 'unsigned_short':'int', # forced casting - 'int':'int', # (forced casting) - 'long':'int', - 'long_long':'long', - 'unsigned':'int', # forced casting - 'complex_float':'complex', # forced casting - 'complex_double':'complex', - 'complex_long_double':'complex', # forced casting - 'string':'string', - } -c2capi_map={'double':'PyArray_DOUBLE', - 'float':'PyArray_FLOAT', - 'long_double':'PyArray_DOUBLE', # forced casting - 'char':'PyArray_CHAR', - 'unsigned_char':'PyArray_UBYTE', - 'signed_char':'PyArray_SBYTE', - 'short':'PyArray_SHORT', - 'unsigned_short':'PyArray_USHORT', - 'int':'PyArray_INT', - 'unsigned':'PyArray_UINT', - 'long':'PyArray_LONG', - 'long_long':'PyArray_LONG', # forced casting - 'complex_float':'PyArray_CFLOAT', - 'complex_double':'PyArray_CDOUBLE', - 'complex_long_double':'PyArray_CDOUBLE', # forced casting - 'string':'PyArray_CHAR'} - -#These new maps aren't used anyhere yet, but should be by default -# unless building numeric or numarray extensions. -if using_newcore: - c2capi_map={'double':'PyArray_DOUBLE', - 'float':'PyArray_FLOAT', - 'long_double':'PyArray_LONGDOUBLE', - 'char':'PyArray_BYTE', - 'unsigned_char':'PyArray_UBYTE', - 'signed_char':'PyArray_BYTE', - 'short':'PyArray_SHORT', - 'unsigned_short':'PyArray_USHORT', - 'int':'PyArray_INT', - 'unsigned':'PyArray_UINT', - 'long':'PyArray_LONG', - 'unsigned_long':'PyArray_ULONG', - 'long_long':'PyArray_LONGLONG', - 'unsigned_long_long':'Pyarray_ULONGLONG', - 'complex_float':'PyArray_CFLOAT', - 'complex_double':'PyArray_CDOUBLE', - 'complex_long_double':'PyArray_CDOUBLE', - 'string':'PyArray_CHAR', # f2py 2e is not ready for PyArray_STRING (must set itemisize etc) - #'string':'PyArray_STRING' - - } -c2pycode_map={'double':'d', - 'float':'f', - 'long_double':'d', # forced casting - 'char':'1', - 'signed_char':'1', - 'unsigned_char':'b', - 'short':'s', - 'unsigned_short':'w', - 'int':'i', - 'unsigned':'u', - 'long':'l', - 'long_long':'L', - 'complex_float':'F', - 'complex_double':'D', - 'complex_long_double':'D', # forced casting - 'string':'c' - } -if using_newcore: - c2pycode_map={'double':'d', - 'float':'f', - 'long_double':'g', - 'char':'b', - 'unsigned_char':'B', - 'signed_char':'b', - 'short':'h', - 'unsigned_short':'H', - 'int':'i', - 'unsigned':'I', - 'long':'l', - 'unsigned_long':'L', - 'long_long':'q', - 'unsigned_long_long':'Q', - 'complex_float':'F', - 'complex_double':'D', - 'complex_long_double':'G', - 'string':'S'} -c2buildvalue_map={'double':'d', - 'float':'f', - 'char':'b', - 'signed_char':'b', - 'short':'h', - 'int':'i', - 'long':'l', - 'long_long':'L', - 'complex_float':'N', - 'complex_double':'N', - 'complex_long_double':'N', - 'string':'z'} - -if sys.version_info[0] >= 3: - # Bytes, not Unicode strings - c2buildvalue_map['string'] = 'y' - -if using_newcore: - #c2buildvalue_map=??? - pass - -f2cmap_all={'real':{'':'float','4':'float','8':'double','12':'long_double','16':'long_double'}, - 'integer':{'':'int','1':'signed_char','2':'short','4':'int','8':'long_long', - '-1':'unsigned_char','-2':'unsigned_short','-4':'unsigned', - '-8':'unsigned_long_long'}, - 'complex':{'':'complex_float','8':'complex_float', - '16':'complex_double','24':'complex_long_double', - '32':'complex_long_double'}, - 'complexkind':{'':'complex_float','4':'complex_float', - '8':'complex_double','12':'complex_long_double', - '16':'complex_long_double'}, - 'logical':{'':'int','1':'char','2':'short','4':'int','8':'long_long'}, - 'double complex':{'':'complex_double'}, - 'double precision':{'':'double'}, - 'byte':{'':'char'}, - 'character':{'':'string'} - } - -if os.path.isfile('.f2py_f2cmap'): - # User defined additions to f2cmap_all. - # .f2py_f2cmap must contain a dictionary of dictionaries, only. - # For example, {'real':{'low':'float'}} means that Fortran 'real(low)' is - # interpreted as C 'float'. - # This feature is useful for F90/95 users if they use PARAMETERSs - # in type specifications. - try: - outmess('Reading .f2py_f2cmap ...\n') - f = open('.f2py_f2cmap','r') - d = eval(f.read(),{},{}) - f.close() - for k,d1 in d.items(): - for k1 in d1.keys(): - d1[k1.lower()] = d1[k1] - d[k.lower()] = d[k] - for k in d.keys(): - if k not in f2cmap_all: - f2cmap_all[k]={} - for k1 in d[k].keys(): - if d[k][k1] in c2py_map: - if k1 in f2cmap_all[k]: - outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k,k1,f2cmap_all[k][k1],d[k][k1])) - f2cmap_all[k][k1] = d[k][k1] - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k,k1,d[k][k1])) - else: - errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k,k1,d[k][k1],d[k][k1],c2py_map.keys())) - outmess('Succesfully applied user defined changes from .f2py_f2cmap\n') - except Exception, msg: - errmess('Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg)) -cformat_map={'double':'%g', - 'float':'%g', - 'long_double':'%Lg', - 'char':'%d', - 'signed_char':'%d', - 'unsigned_char':'%hhu', - 'short':'%hd', - 'unsigned_short':'%hu', - 'int':'%d', - 'unsigned':'%u', - 'long':'%ld', - 'unsigned_long':'%lu', - 'long_long':'%ld', - 'complex_float':'(%g,%g)', - 'complex_double':'(%g,%g)', - 'complex_long_double':'(%Lg,%Lg)', - 'string':'%s', - } - -############### Auxiliary functions -def getctype(var): - """ - Determines C type - """ - ctype='void' - if isfunction(var): - if 'result' in var: - a=var['result'] - else: - a=var['name'] - if a in var['vars']: - return getctype(var['vars'][a]) - else: - errmess('getctype: function %s has no return value?!\n'%a) - elif issubroutine(var): - return ctype - elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: - typespec = var['typespec'].lower() - f2cmap=f2cmap_all[typespec] - ctype=f2cmap[''] # default type - if 'kindselector' in var: - if '*' in var['kindselector']: - try: - ctype=f2cmap[var['kindselector']['*']] - except KeyError: - errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'],'*',var['kindselector']['*'])) - elif 'kind' in var['kindselector']: - if typespec+'kind' in f2cmap_all: - f2cmap=f2cmap_all[typespec+'kind'] - try: - ctype=f2cmap[var['kindselector']['kind']] - except KeyError: - if typespec in f2cmap_all: - f2cmap=f2cmap_all[typespec] - try: - ctype=f2cmap[str(var['kindselector']['kind'])] - except KeyError: - errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n'\ - %(typespec,var['kindselector']['kind'], ctype, - typespec,var['kindselector']['kind'], os.getcwd())) - - else: - if not isexternal(var): - errmess('getctype: No C-type found in "%s", assuming void.\n'%var) - return ctype - -def getstrlength(var): - if isstringfunction(var): - if 'result' in var: - a=var['result'] - else: - a=var['name'] - if a in var['vars']: - return getstrlength(var['vars'][a]) - else: - errmess('getstrlength: function %s has no return value?!\n'%a) - if not isstring(var): - errmess('getstrlength: expected a signature of a string but got: %s\n'%(`var`)) - len='1' - if 'charselector' in var: - a=var['charselector'] - if '*' in a: - len=a['*'] - elif 'len' in a: - len=a['len'] - if re.match(r'\(\s*([*]|[:])\s*\)',len) or re.match(r'([*]|[:])',len): - #if len in ['(*)','*','(:)',':']: - if isintent_hide(var): - errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(`var`)) - len='-1' - return len - -def getarrdims(a,var,verbose=0): - global depargs - ret={} - if isstring(var) and not isarray(var): - ret['dims']=getstrlength(var) - ret['size']=ret['dims'] - ret['rank']='1' - elif isscalar(var): - ret['size']='1' - ret['rank']='0' - ret['dims']='' - elif isarray(var): -# if not isintent_c(var): -# var['dimension'].reverse() - dim=copy.copy(var['dimension']) - ret['size']='*'.join(dim) - try: ret['size']=`eval(ret['size'])` - except: pass - ret['dims']=','.join(dim) - ret['rank']=`len(dim)` - ret['rank*[-1]']=`len(dim)*[-1]`[1:-1] - for i in range(len(dim)): # solve dim for dependecies - v=[] - if dim[i] in depargs: v=[dim[i]] - else: - for va in depargs: - if re.match(r'.*?\b%s\b.*'%va,dim[i]): - v.append(va) - for va in v: - if depargs.index(va)>depargs.index(a): - dim[i]='*' - break - ret['setdims'],i='',-1 - for d in dim: - i=i+1 - if d not in ['*',':','(*)','(:)']: - ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'],i,d) - if ret['setdims']: ret['setdims']=ret['setdims'][:-1] - ret['cbsetdims'],i='',-1 - for d in var['dimension']: - i=i+1 - if d not in ['*',':','(*)','(:)']: - ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'],i,d) - elif isintent_in(var): - outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' \ - % (d)) - ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'],i,0) - elif verbose : - errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(`a`,`d`)) - if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1] -# if not isintent_c(var): -# var['dimension'].reverse() - return ret - -def getpydocsign(a,var): - global lcb_map - if isfunction(var): - if 'result' in var: - af=var['result'] - else: - af=var['name'] - if af in var['vars']: - return getpydocsign(af,var['vars'][af]) - else: - errmess('getctype: function %s has no return value?!\n'%af) - return '','' - sig,sigout=a,a - opt='' - if isintent_in(var): opt='input' - elif isintent_inout(var): opt='in/output' - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4]=='out=': - out_a = k[4:] - break - init='' - ctype=getctype(var) - - if hasinitvalue(var): - init,showinit=getinit(a,var) - init='= %s'%(showinit) - if isscalar(var): - if isintent_inout(var): - sig='%s :%s %s rank-0 array(%s,\'%s\')'%(a,init,opt,c2py_map[ctype], - c2pycode_map[ctype],) - else: - sig='%s :%s %s %s'%(a,init,opt,c2py_map[ctype]) - sigout='%s : %s'%(out_a,c2py_map[ctype]) - elif isstring(var): - if isintent_inout(var): - sig='%s :%s %s rank-0 array(string(len=%s),\'c\')'%(a,init,opt,getstrlength(var)) - else: - sig='%s :%s %s string(len=%s)'%(a,init,opt,getstrlength(var)) - sigout='%s : string(len=%s)'%(out_a,getstrlength(var)) - elif isarray(var): - dim=var['dimension'] - rank=`len(dim)` - sig='%s :%s %s rank-%s array(\'%s\') with bounds (%s)'%(a,init,opt,rank, - c2pycode_map[ctype], - ','.join(dim)) - if a==out_a: - sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\ - %(a,rank,c2pycode_map[ctype],','.join(dim)) - else: - sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - %(out_a,rank,c2pycode_map[ctype],','.join(dim),a) - elif isexternal(var): - ua='' - if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: - ua=lcb2_map[lcb_map[a]]['argname'] - if not ua==a: ua=' => %s'%ua - else: ua='' - sig='%s : call-back function%s'%(a,ua) - sigout=sig - else: - errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a) - return sig,sigout - -def getarrdocsign(a,var): - ctype=getctype(var) - if isstring(var) and (not isarray(var)): - sig='%s : rank-0 array(string(len=%s),\'c\')'%(a,getstrlength(var)) - elif isscalar(var): - sig='%s : rank-0 array(%s,\'%s\')'%(a,c2py_map[ctype], - c2pycode_map[ctype],) - elif isarray(var): - dim=var['dimension'] - rank=`len(dim)` - sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a,rank, - c2pycode_map[ctype], - ','.join(dim)) - return sig - -def getinit(a,var): - if isstring(var): init,showinit='""',"''" - else: init,showinit='','' - if hasinitvalue(var): - init=var['='] - showinit=init - if iscomplex(var) or iscomplexarray(var): - ret={} - - try: - v = var["="] - if ',' in v: - ret['init.r'],ret['init.i']=markoutercomma(v[1:-1]).split('@,@') - else: - v = eval(v,{},{}) - ret['init.r'],ret['init.i']=str(v.real),str(v.imag) - except: - raise ValueError('getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) - if isarray(var): - init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'],ret['init.i']) - elif isstring(var): - if not init: init,showinit='""',"''" - if init[0]=="'": - init='"%s"'%(init[1:-1].replace('"','\\"')) - if init[0]=='"': showinit="'%s'"%(init[1:-1]) - return init,showinit - -def sign2map(a,var): - """ - varname,ctype,atype - init,init.r,init.i,pytype - vardebuginfo,vardebugshowvalue,varshowvalue - varrfromat - intent - """ - global lcb_map,cb_map - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4]=='out=': - out_a = k[4:] - break - ret={'varname':a,'outvarname':out_a} - ret['ctype']=getctype(var) - intent_flags = [] - for f,s in isintent_dict.items(): - if f(var): intent_flags.append('F2PY_%s'%s) - if intent_flags: - #XXX: Evaluate intent_flags here. - ret['intent'] = '|'.join(intent_flags) - else: - ret['intent'] = 'F2PY_INTENT_IN' - if isarray(var): ret['varrformat']='N' - elif ret['ctype'] in c2buildvalue_map: - ret['varrformat']=c2buildvalue_map[ret['ctype']] - else: ret['varrformat']='O' - ret['init'],ret['showinit']=getinit(a,var) - if hasinitvalue(var) and iscomplex(var) and not isarray(var): - ret['init.r'],ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@') - if isexternal(var): - ret['cbnamekey']=a - if a in lcb_map: - ret['cbname']=lcb_map[a] - ret['maxnofargs']=lcb2_map[lcb_map[a]]['maxnofargs'] - ret['nofoptargs']=lcb2_map[lcb_map[a]]['nofoptargs'] - ret['cbdocstr']=lcb2_map[lcb_map[a]]['docstr'] - ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr'] - else: - ret['cbname']=a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a,lcb_map.keys())) - if isstring(var): - ret['length']=getstrlength(var) - if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) - dim=copy.copy(var['dimension']) - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - # Debug info - if debugcapi(var): - il=[isintent_in,'input',isintent_out,'output', - isintent_inout,'inoutput',isrequired,'required', - isoptional,'optional',isintent_hide,'hidden', - iscomplex,'complex scalar', - l_and(isscalar,l_not(iscomplex)),'scalar', - isstring,'string',isarray,'array', - iscomplexarray,'complex array',isstringarray,'string array', - iscomplexfunction,'complex function', - l_and(isfunction,l_not(iscomplexfunction)),'function', - isexternal,'callback', - isintent_callback,'callback', - isintent_aux,'auxiliary', - #ismutable,'mutable',l_not(ismutable),'immutable', - ] - rl=[] - for i in range(0,len(il),2): - if il[i](var): rl.append(il[i+1]) - if isstring(var): - rl.append('slen(%s)=%s'%(a,ret['length'])) - if isarray(var): -# if not isintent_c(var): -# var['dimension'].reverse() - ddim=','.join(map(lambda x,y:'%s|%s'%(x,y),var['dimension'],dim)) - rl.append('dims(%s)'%ddim) -# if not isintent_c(var): -# var['dimension'].reverse() - if isexternal(var): - ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a,ret['cbname'],','.join(rl)) - else: - ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'],a,ret['showinit'],','.join(rl)) - if isscalar(var): - if ret['ctype'] in cformat_map: - ret['vardebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']]) - if isstring(var): - ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) - if isexternal(var): - ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a) - if ret['ctype'] in cformat_map: - ret['varshowvalue']='#name#:%s=%s'%(a,cformat_map[ret['ctype']]) - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isstring(var): - ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) - if hasnote(var): - ret['note']=var['note'] - return ret - -def routsign2map(rout): - """ - name,NAME,begintitle,endtitle - rname,ctype,rformat - routdebugshowvalue - """ - global lcb_map - name = rout['name'] - fname = getfortranname(rout) - ret={'name':name, - 'texname':name.replace('_','\\_'), - 'name_lower':name.lower(), - 'NAME':name.upper(), - 'begintitle':gentitle(name), - 'endtitle':gentitle('end of %s'%name), - 'fortranname':fname, - 'FORTRANNAME':fname.upper(), - 'callstatement':getcallstatement(rout) or '', - 'usercode':getusercode(rout) or '', - 'usercode1':getusercode1(rout) or '', - } - if '_' in fname: - ret['F_FUNC'] = 'F_FUNC_US' - else: - ret['F_FUNC'] = 'F_FUNC' - if '_' in name: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' - else: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' - lcb_map={} - if 'use' in rout: - for u in rout['use'].keys(): - if u in cb_rules.cb_map: - for un in cb_rules.cb_map[u]: - ln=un[0] - if 'map' in rout['use'][u]: - for k in rout['use'][u]['map'].keys(): - if rout['use'][u]['map'][k]==un[0]: ln=k;break - lcb_map[ln]=un[1] - #else: - # errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u)) - elif 'externals' in rout and rout['externals']: - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'],`rout['externals']`)) - ret['callprotoargument'] = getcallprotoargument(rout,lcb_map) or '' - if isfunction(rout): - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - ret['rname']=a - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,rout) - ret['ctype']=getctype(rout['vars'][a]) - if hasresultnote(rout): - ret['resultnote']=rout['vars'][a]['note'] - rout['vars'][a]['note']=['See elsewhere.'] - if ret['ctype'] in c2buildvalue_map: - ret['rformat']=c2buildvalue_map[ret['ctype']] - else: - ret['rformat']='O' - errmess('routsign2map: no c2buildvalue key for type %s\n'%(`ret['ctype']`)) - if debugcapi(rout): - if ret['ctype'] in cformat_map: - ret['routdebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) - if isstringfunction(rout): - ret['rlength']=getstrlength(rout['vars'][a]) - if ret['rlength']=='-1': - errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(`rout['name']`)) - ret['rlength']='10' - if hasnote(rout): - ret['note']=rout['note'] - rout['note']=['See elsewhere.'] - return ret - -def modsign2map(m): - """ - modulename - """ - if ismodule(m): - ret={'f90modulename':m['name'], - 'F90MODULENAME':m['name'].upper(), - 'texf90modulename':m['name'].replace('_','\\_')} - else: - ret={'modulename':m['name'], - 'MODULENAME':m['name'].upper(), - 'texmodulename':m['name'].replace('_','\\_')} - ret['restdoc'] = getrestdoc(m) or [] - if hasnote(m): - ret['note']=m['note'] - #m['note']=['See elsewhere.'] - ret['usercode'] = getusercode(m) or '' - ret['usercode1'] = getusercode1(m) or '' - if m['body']: - ret['interface_usercode'] = getusercode(m['body'][0]) or '' - else: - ret['interface_usercode'] = '' - ret['pymethoddef'] = getpymethoddef(m) or '' - if 'coutput' in m: - ret['coutput'] = m['coutput'] - if 'f2py_wrapper_output' in m: - ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] - return ret - -def cb_sign2map(a,var,index=None): - ret={'varname':a} - if index is None or 1: # disable 7712 patch - ret['varname_i'] = ret['varname'] - else: - ret['varname_i'] = ret['varname'] + '_' + str(index) - ret['ctype']=getctype(var) - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) - if hasnote(var): - ret['note']=var['note'] - var['note']=['See elsewhere.'] - return ret - -def cb_routsign2map(rout,um): - """ - name,begintitle,endtitle,argname - ctype,rctype,maxnofargs,nofoptargs,returncptr - """ - ret={'name':'cb_%s_in_%s'%(rout['name'],um), - 'returncptr':''} - if isintent_callback(rout): - if '_' in rout['name']: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - ret['callbackname'] = '%s(%s,%s)' \ - % (F_FUNC, - rout['name'].lower(), - rout['name'].upper(), - ) - ret['static'] = 'extern' - else: - ret['callbackname'] = ret['name'] - ret['static'] = 'static' - ret['argname']=rout['name'] - ret['begintitle']=gentitle(ret['name']) - ret['endtitle']=gentitle('end of %s'%ret['name']) - ret['ctype']=getctype(rout) - ret['rctype']='void' - if ret['ctype']=='string': ret['rctype']='void' - else: - ret['rctype']=ret['ctype'] - if ret['rctype']!='void': - if iscomplexfunction(rout): - ret['returncptr'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -return_value= -#endif -""" - else: - ret['returncptr'] = 'return_value=' - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['strlength']=getstrlength(rout) - if isfunction(rout): - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if hasnote(rout['vars'][a]): - ret['note']=rout['vars'][a]['note'] - rout['vars'][a]['note']=['See elsewhere.'] - ret['rname']=a - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,rout) - if iscomplexfunction(rout): - ret['rctype']=""" -#ifdef F2PY_CB_RETURNCOMPLEX -#ctype# -#else -void -#endif -""" - else: - if hasnote(rout): - ret['note']=rout['note'] - rout['note']=['See elsewhere.'] - nofargs=0 - nofoptargs=0 - if 'args' in rout and 'vars' in rout: - for a in rout['args']: - var=rout['vars'][a] - if l_or(isintent_in,isintent_inout)(var): - nofargs=nofargs+1 - if isoptional(var): - nofoptargs=nofoptargs+1 - ret['maxnofargs']=`nofargs` - ret['nofoptargs']=`nofoptargs` - if hasnote(rout) and isfunction(rout) and 'result' in rout: - ret['routnote']=rout['note'] - rout['note']=['See elsewhere.'] - return ret - -def common_sign2map(a,var): # obsolute - ret={'varname':a} - ret['ctype']=getctype(var) - if isstringarray(var): - ret['ctype']='char' - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) - elif isstring(var): - ret['size']=getstrlength(var) - ret['rank']='1' - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) - if hasnote(var): - ret['note']=var['note'] - var['note']=['See elsewhere.'] - ret['arrdocstr']=getarrdocsign(a,var) # for strings this returns 0-rank but actually is 1-rank - return ret diff --git a/numpy-1.6.2/numpy/f2py/cb_rules.py b/numpy-1.6.2/numpy/f2py/cb_rules.py deleted file mode 100644 index 8e8320bfd3..0000000000 --- a/numpy-1.6.2/numpy/f2py/cb_rules.py +++ /dev/null @@ -1,539 +0,0 @@ -#!/usr/bin/env python -""" - -Build call-back mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/20 11:27:58 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.53 $"[10:-1] - -import __version__ -f2py_version = __version__.version - - -import pprint -import sys -import types -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -import cfuncs - -################## Rules for callback function ############## - -cb_routine_rules={ - 'cbtypedefs':'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', - 'body':""" -#begintitle# -PyObject *#name#_capi = NULL;/*was Py_None*/ -PyTupleObject *#name#_args_capi = NULL; -int #name#_nofargs = 0; -jmp_buf #name#_jmpbuf; -/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ -#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { -\tPyTupleObject *capi_arglist = #name#_args_capi; -\tPyObject *capi_return = NULL; -\tPyObject *capi_tmp = NULL; -\tint capi_j,capi_i = 0; -\tint capi_longjmp_ok = 1; -#decl# -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_clock(); -#endif -\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); -\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi); -\tif (#name#_capi==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); -\t} -\tif (#name#_capi==NULL) { -\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); -\t\tgoto capi_fail; -\t} -\tif (F2PyCapsule_Check(#name#_capi)) { -\t#name#_typedef #name#_cptr; -\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi); -\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); -\t#return# -\t} -\tif (capi_arglist==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); -\t\tif (capi_tmp) { -\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); -\t\t\tif (capi_arglist==NULL) { -\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t} else { -\t\t\tPyErr_Clear(); -\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); -\t\t} -\t} -\tif (capi_arglist == NULL) { -\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); -\t\tgoto capi_fail; -\t} -#setdims# -#pyobjfrom# -\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); -\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_call_clock(); -#endif -\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_call_clock(); -#endif -\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return); -\tif (capi_return == NULL) { -\t\tfprintf(stderr,\"capi_return is NULL\\n\"); -\t\tgoto capi_fail; -\t} -\tif (capi_return == Py_None) { -\t\tPy_DECREF(capi_return); -\t\tcapi_return = Py_BuildValue(\"()\"); -\t} -\telse if (!PyTuple_Check(capi_return)) { -\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return); -\t} -\tcapi_j = PyTuple_Size(capi_return); -\tcapi_i = 0; -#frompyobj# -\tCFUNCSMESS(\"cb:#name#:successful\\n\"); -\tPy_DECREF(capi_return); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_clock(); -#endif -\tgoto capi_return_pt; -capi_fail: -\tfprintf(stderr,\"Call-back #name# failed.\\n\"); -\tPy_XDECREF(capi_return); -\tif (capi_longjmp_ok) -\t\tlongjmp(#name#_jmpbuf,-1); -capi_return_pt: -\t; -#return# -} -#endtitle# -""", - 'need':['setjmp.h','CFUNCSMESS'], - 'maxnofargs':'#maxnofargs#', - 'nofoptargs':'#nofoptargs#', - 'docstr':"""\ -\tdef #argname#(#docsignature#): return #docreturn#\\n\\ -#docstrsigns#""", - 'latexdocstr':""" -{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} -#routnote# - -#latexdocstrsigns#""", - 'docstrshort':'def #argname#(#docsignature#): return #docreturn#' - } -cb_rout_rules=[ - {# Init - 'separatorsfor':{'decl':'\n', - 'args':',','optargs':'','pyobjfrom':'\n','freemem':'\n', - 'args_td':',','optargs_td':'', - 'args_nm':',','optargs_nm':'', - 'frompyobj':'\n','setdims':'\n', - 'docstrsigns':'\\n"\n"', - 'latexdocstrsigns':'\n', - 'latexdocstrreq':'\n','latexdocstropt':'\n', - 'latexdocstrout':'\n','latexdocstrcbs':'\n', - }, - 'decl':'/*decl*/','pyobjfrom':'/*pyobjfrom*/','frompyobj':'/*frompyobj*/', - 'args':[],'optargs':'','return':'','strarglens':'','freemem':'/*freemem*/', - 'args_td':[],'optargs_td':'','strarglens_td':'', - 'args_nm':[],'optargs_nm':'','strarglens_nm':'', - 'noargs':'', - 'setdims':'/*setdims*/', - 'docstrsigns':'','latexdocstrsigns':'', - 'docstrreq':'\tRequired arguments:', - 'docstropt':'\tOptional arguments:', - 'docstrout':'\tReturn objects:', - 'docstrcbs':'\tCall-back functions:', - 'docreturn':'','docsign':'','docsignopt':'', - 'latexdocstrreq':'\\noindent Required arguments:', - 'latexdocstropt':'\\noindent Optional arguments:', - 'latexdocstrout':'\\noindent Return objects:', - 'latexdocstrcbs':'\\noindent Call-back functions:', - 'routnote':{hasnote:'--- #note#',l_not(hasnote):''}, - },{ # Function - 'decl':'\t#ctype# return_value;', - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, - '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', - {debugcapi:'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} - ], - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},'GETSCALARFROMPYTUPLE'], - 'return':'\treturn return_value;', - '_check':l_and(isfunction,l_not(isstringfunction),l_not(iscomplexfunction)) - }, - {# String function - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, - 'args':'#ctype# return_value,int return_value_len', - 'args_nm':'return_value,&return_value_len', - 'args_td':'#ctype# ,int', - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", - {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} - ], - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'}, - 'string.h','GETSTRFROMPYTUPLE'], - 'return':'return;', - '_check':isstringfunction - }, - {# Complex function - 'optargs':""" -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# *return_value -#endif -""", - 'optargs_nm':""" -#ifndef F2PY_CB_RETURNCOMPLEX -return_value -#endif -""", - 'optargs_td':""" -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# * -#endif -""", - 'decl':""" -#ifdef F2PY_CB_RETURNCOMPLEX -\t#ctype# return_value; -#endif -""", - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, - """\ -\tif (capi_j>capi_i) -#ifdef F2PY_CB_RETURNCOMPLEX -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#else -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#endif -""", - {debugcapi:""" -#ifdef F2PY_CB_RETURNCOMPLEX -\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); -#else -\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); -#endif - -"""} - ], - 'return':""" -#ifdef F2PY_CB_RETURNCOMPLEX -\treturn return_value; -#else -\treturn; -#endif -""", - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'}, - 'string.h','GETSCALARFROMPYTUPLE','#ctype#'], - '_check':iscomplexfunction - }, - {'docstrout':'\t\t#pydocsignout#', - 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasnote:'--- #note#'}], - 'docreturn':'#rname#,', - '_check':isfunction}, - {'_check':issubroutine,'return':'return;'} - ] - -cb_arg_rules=[ - { # Doc - 'docstropt':{l_and(isoptional,isintent_nothide):'\t\t#pydocsign#'}, - 'docstrreq':{l_and(isrequired,isintent_nothide):'\t\t#pydocsign#'}, - 'docstrout':{isintent_out:'\t\t#pydocsignout#'}, - 'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote,isintent_hide):'--- #note#', - l_and(hasnote,isintent_nothide):'--- See above.'}]}, - 'docsign':{l_and(isrequired,isintent_nothide):'#varname#,'}, - 'docsignopt':{l_and(isoptional,isintent_nothide):'#varname#,'}, - 'depend':'' - }, - { - 'args':{ - l_and (isscalar,isintent_c):'#ctype# #varname_i#', - l_and (isscalar,l_not(isintent_c)):'#ctype# *#varname_i#_cb_capi', - isarray:'#ctype# *#varname_i#', - isstring:'#ctype# #varname_i#' - }, - 'args_nm':{ - l_and (isscalar,isintent_c):'#varname_i#', - l_and (isscalar,l_not(isintent_c)):'#varname_i#_cb_capi', - isarray:'#varname_i#', - isstring:'#varname_i#' - }, - 'args_td':{ - l_and (isscalar,isintent_c):'#ctype#', - l_and (isscalar,l_not(isintent_c)):'#ctype# *', - isarray:'#ctype# *', - isstring:'#ctype#' - }, - 'strarglens':{isstring:',int #varname_i#_cb_len'}, # untested with multiple args - 'strarglens_td':{isstring:',int'}, # untested with multiple args - 'strarglens_nm':{isstring:',#varname_i#_cb_len'}, # untested with multiple args - }, - { # Scalars - 'decl':{l_not(isintent_c):'\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'}, - 'error': {l_and(isintent_c,isintent_out, - throw_error('intent(c,out) is forbidden for callback scalar arguments')):\ - ''}, - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, - {isintent_out:'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, - {l_and(debugcapi,l_and(l_not(iscomplex),isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, - {l_and(debugcapi,l_and(l_not(iscomplex),l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, - {l_and(debugcapi,l_and(iscomplex,isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, - {l_and(debugcapi,l_and(iscomplex,l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, - ], - 'need':[{isintent_out:['#ctype#_from_pyobj','GETSCALARFROMPYTUPLE']}, - {debugcapi:'CFUNCSMESS'}], - '_check':isscalar - },{ - 'pyobjfrom':[{isintent_in:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) -\t\t\tgoto capi_fail;"""}], - 'need':[{isintent_in:'pyobj_from_#ctype#1'}, - {isintent_inout:'pyarr_from_p_#ctype#1'}, - {iscomplex:'#ctype#'}], - '_check':l_and(isscalar,isintent_nothide), - '_optional':'' - },{# String - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", - {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, - ], - 'need':['#ctype#','GETSTRFROMPYTUPLE', - {debugcapi:'CFUNCSMESS'},'string.h'], - '_check':l_and(isstring,isintent_out) - },{ - 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, - {isintent_in:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout:"""\ -\tif (#name#_nofargs>capi_i) { -\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len}; -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) -\t\t\tgoto capi_fail; -\t}"""}], - 'need':[{isintent_in:'pyobj_from_#ctype#1size'}, - {isintent_inout:'pyarr_from_p_#ctype#1'}], - '_check':l_and(isstring,isintent_nothide), - '_optional':'' - }, -# Array ... - { - 'decl':'\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', - 'setdims':'\t#cbsetdims#;', - '_check':isarray, - '_depend':'' - }, - { - 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, - {isintent_c:"""\ -\tif (#name#_nofargs>capi_i) { -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ -""", - l_not(isintent_c):"""\ -\tif (#name#_nofargs>capi_i) { -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ -""", - }, - """ -\t\tif (tmp_arr==NULL) -\t\t\tgoto capi_fail; -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr)) -\t\t\tgoto capi_fail; -}"""], - '_check':l_and(isarray,isintent_nothide,l_or(isintent_in,isintent_inout)), - '_optional':'', - },{ - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, - """\tif (capi_j>capi_i) { -\t\tPyArrayObject *rv_cb_arr = NULL; -\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; -\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", - {isintent_c:'|F2PY_INTENT_C'}, - """,capi_tmp); -\t\tif (rv_cb_arr == NULL) { -\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tMEMCOPY(#varname_i#,rv_cb_arr->data,PyArray_NBYTES(rv_cb_arr)); -\t\tif (capi_tmp != (PyObject *)rv_cb_arr) { -\t\t\tPy_DECREF(rv_cb_arr); -\t\t} -\t}""", - {debugcapi:'\tfprintf(stderr,"<-.\\n");'}, - ], - 'need':['MEMCOPY',{iscomplexarray:'#ctype#'}], - '_check':l_and(isarray,isintent_out) - },{ - 'docreturn':'#varname#,', - '_check':isintent_out - } - ] - -################## Build call-back module ############# -cb_map={} -def buildcallbacks(m): - global cb_map - cb_map[m['name']]=[] - for bi in m['body']: - if bi['block']=='interface': - for b in bi['body']: - if b: - buildcallback(b,m['name']) - else: - errmess('warning: empty body for %s\n' % (m['name'])) - -def buildcallback(rout,um): - global cb_map - import capi_maps - - outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'],um)) - args,depargs=getargs(rout) - capi_maps.depargs=depargs - var=rout['vars'] - vrd=capi_maps.cb_routsign2map(rout,um) - rd=dictappend({},vrd) - cb_map[um].append([rout['name'],rd['name']]) - for r in cb_rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar=applyrules(r,vrd,rout) - rd=dictappend(rd,ar) - savevrd={} - for i,a in enumerate(args): - vrd=capi_maps.cb_sign2map(a,var[a], index=i) - savevrd[a]=vrd - for r in cb_arg_rules: - if '_depend' in r: - continue - if '_optional' in r and isoptional(var[a]): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) - if '_break' in r: - break - for a in args: - vrd=savevrd[a] - for r in cb_arg_rules: - if '_depend' in r: - continue - if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) - if '_break' in r: - break - for a in depargs: - vrd=savevrd[a] - for r in cb_arg_rules: - if '_depend' not in r: - continue - if '_optional' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) - if '_break' in r: - break - if 'args' in rd and 'optargs' in rd: - if type(rd['optargs'])==type([]): - rd['optargs']=rd['optargs']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_nm']=rd['optargs_nm']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_td']=rd['optargs_td']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - if type(rd['docreturn'])==types.ListType: - rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']})) - optargs=stripcomma(replace('#docsignopt#', - {'docsignopt':rd['docsignopt']} - )) - if optargs=='': - rd['docsignature']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']})) - else: - rd['docsignature']=replace('#docsign#[#docsignopt#]', - {'docsign':rd['docsign'], - 'docsignopt':optargs, - }) - rd['latexdocsignature']=rd['docsignature'].replace('_','\\_') - rd['latexdocsignature']=rd['latexdocsignature'].replace(',',', ') - rd['docstrsigns']=[] - rd['latexdocstrsigns']=[] - for k in ['docstrreq','docstropt','docstrout','docstrcbs']: - if k in rd and type(rd[k])==types.ListType: - rd['docstrsigns']=rd['docstrsigns']+rd[k] - k='latex'+k - if k in rd and type(rd[k])==types.ListType: - rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ - ['\\begin{description}']+rd[k][1:]+\ - ['\\end{description}'] - if 'args' not in rd: - rd['args']='' - rd['args_td']='' - rd['args_nm']='' - if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): - rd['noargs'] = 'void' - - ar=applyrules(cb_routine_rules,rd) - cfuncs.callbacks[rd['name']]=ar['body'] - if type(ar['need'])==str: - ar['need']=[ar['need']] - - if 'need' in rd: - for t in cfuncs.typedefs.keys(): - if t in rd['need']: - ar['need'].append(t) - - cfuncs.typedefs_generated[rd['name']+'_typedef'] = ar['cbtypedefs'] - ar['need'].append(rd['name']+'_typedef') - cfuncs.needs[rd['name']]=ar['need'] - - capi_maps.lcb2_map[rd['name']]={'maxnofargs':ar['maxnofargs'], - 'nofoptargs':ar['nofoptargs'], - 'docstr':ar['docstr'], - 'latexdocstr':ar['latexdocstr'], - 'argname':rd['argname'] - } - outmess('\t %s\n'%(ar['docstrshort'])) - #print ar['body'] - return -################## Build call-back function ############# diff --git a/numpy-1.6.2/numpy/f2py/cfuncs.py b/numpy-1.6.2/numpy/f2py/cfuncs.py deleted file mode 100644 index 9410a9f276..0000000000 --- a/numpy-1.6.2/numpy/f2py/cfuncs.py +++ /dev/null @@ -1,1223 +0,0 @@ -#!/usr/bin/env python -""" - -C declarations, CPP macros, and C functions for f2py2e. -Only required declarations/macros/functions will be used. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 11:42:34 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.75 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import types -import sys -import copy -errmess=sys.stderr.write - -##################### Definitions ################## - -outneeds={'includes0':[],'includes':[],'typedefs':[],'typedefs_generated':[], - 'userincludes':[], - 'cppmacros':[],'cfuncs':[],'callbacks':[],'f90modhooks':[], - 'commonhooks':[]} -needs={} -includes0={'includes0':'/*need_includes0*/'} -includes={'includes':'/*need_includes*/'} -userincludes={'userincludes':'/*need_userincludes*/'} -typedefs={'typedefs':'/*need_typedefs*/'} -typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'} -cppmacros={'cppmacros':'/*need_cppmacros*/'} -cfuncs={'cfuncs':'/*need_cfuncs*/'} -callbacks={'callbacks':'/*need_callbacks*/'} -f90modhooks={'f90modhooks':'/*need_f90modhooks*/', - 'initf90modhooksstatic':'/*initf90modhooksstatic*/', - 'initf90modhooksdynamic':'/*initf90modhooksdynamic*/', - } -commonhooks={'commonhooks':'/*need_commonhooks*/', - 'initcommonhooks':'/*need_initcommonhooks*/', - } - -############ Includes ################### - -includes0['math.h']='#include ' -includes0['string.h']='#include ' -includes0['setjmp.h']='#include ' - -includes['Python.h']='#include "Python.h"' -needs['arrayobject.h']=['Python.h'] -includes['arrayobject.h']='''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API -#include "arrayobject.h"''' - -includes['arrayobject.h']='#include "fortranobject.h"' -includes['stdarg.h']='#include ' - -############# Type definitions ############### - -typedefs['unsigned_char']='typedef unsigned char unsigned_char;' -typedefs['unsigned_short']='typedef unsigned short unsigned_short;' -typedefs['unsigned_long']='typedef unsigned long unsigned_long;' -typedefs['signed_char']='typedef signed char signed_char;' -typedefs['long_long']="""\ -#ifdef _WIN32 -typedef __int64 long_long; -#else -typedef long long long_long; -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['insinged_long_long']="""\ -#ifdef _WIN32 -typedef __uint64 long_long; -#else -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['long_double']="""\ -#ifndef _LONG_DOUBLE -typedef long double long_double; -#endif -""" -typedefs['complex_long_double']='typedef struct {long double r,i;} complex_long_double;' -typedefs['complex_float']='typedef struct {float r,i;} complex_float;' -typedefs['complex_double']='typedef struct {double r,i;} complex_double;' -typedefs['string']="""typedef char * string;""" - - -############### CPP macros #################### -cppmacros['CFUNCSMESS']="""\ -#ifdef DEBUGCFUNCS -#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); -#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ -\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ -\tfprintf(stderr,\"\\n\"); -#else -#define CFUNCSMESS(mess) -#define CFUNCSMESSPY(mess,obj) -#endif -""" -cppmacros['F_FUNC']="""\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F -#else -#define F_FUNC(f,F) _##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F##_ -#else -#define F_FUNC(f,F) _##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) -#else -#define F_FUNC_US(f,F) F_FUNC(f,F) -#endif -""" -cppmacros['F_WRAPPEDFUNC']="""\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) -#else -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) -#endif -""" -cppmacros['F_MODFUNC']="""\ -#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f -#else -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f -#else -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) f ## .in. ## m -#else -#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ -#endif -#endif -/* -#if defined(UPPERCASE_FORTRAN) -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) -#else -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) -#endif -*/ - -#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) -""" -cppmacros['SWAPUNSAFE']="""\ -#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) -""" -cppmacros['SWAP']="""\ -#define SWAP(a,b,t) {\\ -\tt *c;\\ -\tc = a;\\ -\ta = b;\\ -\tb = c;} -""" -#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)' -cppmacros['PRINTPYOBJERR']="""\ -#define PRINTPYOBJERR(obj)\\ -\tfprintf(stderr,\"#modulename#.error is related to \");\\ -\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ -\tfprintf(stderr,\"\\n\"); -""" -cppmacros['MINMAX']="""\ -#ifndef max -#define max(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef min -#define min(a,b) ((a < b) ? (a) : (b)) -#endif -#ifndef MAX -#define MAX(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef MIN -#define MIN(a,b) ((a < b) ? (a) : (b)) -#endif -""" -needs['len..']=['f2py_size'] -cppmacros['len..']="""\ -#define rank(var) var ## _Rank -#define shape(var,dim) var ## _Dims[dim] -#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd) -#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim]) -#define fshape(var,dim) shape(var,rank(var)-dim-1) -#define len(var) shape(var,0) -#define flen(var) fshape(var,0) -#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) -/* #define index(i) capi_i ## i */ -#define slen(var) capi_ ## var ## _len -#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) -""" -needs['f2py_size']=['stdarg.h'] -cfuncs['f2py_size']="""\ -int f2py_size(PyArrayObject* var, ...) -{ - npy_int sz = 0; - npy_int dim; - npy_int rank; - va_list argp; - va_start(argp, var); - dim = va_arg(argp, npy_int); - if (dim==-1) - { - sz = PyArray_SIZE(var); - } - else - { - rank = PyArray_NDIM(var); - if (dim>=1 && dim<=rank) - sz = PyArray_DIM(var, dim-1); - else - fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); - } - va_end(argp); - return sz; -} -""" - -cppmacros['pyobj_from_char1']='#define pyobj_from_char1(v) (PyInt_FromLong(v))' -cppmacros['pyobj_from_short1']='#define pyobj_from_short1(v) (PyInt_FromLong(v))' -needs['pyobj_from_int1']=['signed_char'] -cppmacros['pyobj_from_int1']='#define pyobj_from_int1(v) (PyInt_FromLong(v))' -cppmacros['pyobj_from_long1']='#define pyobj_from_long1(v) (PyLong_FromLong(v))' -needs['pyobj_from_long_long1']=['long_long'] -cppmacros['pyobj_from_long_long1']="""\ -#ifdef HAVE_LONG_LONG -#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) -#else -#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. -#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) -#endif -""" -needs['pyobj_from_long_double1']=['long_double'] -cppmacros['pyobj_from_long_double1']='#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' -cppmacros['pyobj_from_double1']='#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' -cppmacros['pyobj_from_float1']='#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' -needs['pyobj_from_complex_long_double1']=['complex_long_double'] -cppmacros['pyobj_from_complex_long_double1']='#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_double1']=['complex_double'] -cppmacros['pyobj_from_complex_double1']='#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_float1']=['complex_float'] -cppmacros['pyobj_from_complex_float1']='#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_string1']=['string'] -cppmacros['pyobj_from_string1']='#define pyobj_from_string1(v) (PyString_FromString((char *)v))' -needs['pyobj_from_string1size']=['string'] -cppmacros['pyobj_from_string1size']='#define pyobj_from_string1size(v,len) (PyString_FromStringAndSize((char *)v, len))' -needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR'] -cppmacros['TRYPYARRAYTEMPLATE']="""\ -/* New SciPy */ -#define TRYPYARRAYTEMPLATECHAR case PyArray_STRING: *(char *)(arr->data)=*v; break; -#define TRYPYARRAYTEMPLATELONG case PyArray_LONG: *(long *)(arr->data)=*v; break; -#define TRYPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break; - -#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\ - switch (arr->descr->type_num) {\\ - case PyArray_DOUBLE: *(double *)(arr->data)=*v; break;\\ - case PyArray_INT: *(int *)(arr->data)=*v; break;\\ - case PyArray_LONG: *(long *)(arr->data)=*v; break;\\ - case PyArray_FLOAT: *(float *)(arr->data)=*v; break;\\ - case PyArray_CDOUBLE: *(double *)(arr->data)=*v; break;\\ - case PyArray_CFLOAT: *(float *)(arr->data)=*v; break;\\ - case PyArray_BOOL: *(npy_bool *)(arr->data)=(*v!=0); break;\\ - case PyArray_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\ - case PyArray_BYTE: *(signed char *)(arr->data)=*v; break;\\ - case PyArray_SHORT: *(short *)(arr->data)=*v; break;\\ - case PyArray_USHORT: *(npy_ushort *)(arr->data)=*v; break;\\ - case PyArray_UINT: *(npy_uint *)(arr->data)=*v; break;\\ - case PyArray_ULONG: *(npy_ulong *)(arr->data)=*v; break;\\ - case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=*v; break;\\ - case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=*v; break;\\ - case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\ - case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\ - case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\ - default: return -2;\\ - };\\ - return 1 -""" - -needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR'] -cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\ -#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break; -#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (arr->descr->type==typecode) {\\ - *(ctype *)(arr->data)=(*v).r;\\ - *(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\ - return 1;\\ - }\\ - switch (arr->descr->type_num) {\\ - case PyArray_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\ - case PyArray_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\ - case PyArray_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\ - case PyArray_LONG: *(long *)(arr->data)=(*v).r; break;\\ - case PyArray_FLOAT: *(float *)(arr->data)=(*v).r; break;\\ - case PyArray_INT: *(int *)(arr->data)=(*v).r; break;\\ - case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\ - case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\ - case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\ - case PyArray_BOOL: *(npy_bool *)(arr->data)=((*v).r!=0 && (*v).i!=0); break;\\ - case PyArray_USHORT: *(npy_ushort *)(arr->data)=(*v).r; break;\\ - case PyArray_UINT: *(npy_uint *)(arr->data)=(*v).r; break;\\ - case PyArray_ULONG: *(npy_ulong *)(arr->data)=(*v).r; break;\\ - case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=(*v).r; break;\\ - case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=(*v).r; break;\\ - case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r; break;\\ - case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r;*(npy_longdouble *)(arr->data+sizeof(npy_longdouble))=(*v).i;break;\\ - case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\ - default: return -2;\\ - };\\ - return -1; -""" -## cppmacros['NUMFROMARROBJ']="""\ -## #define NUMFROMARROBJ(typenum,ctype) \\ -## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -## \tif (arr) {\\ -## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\ -## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} else {\\ -## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\ -## \t\t}\\ -## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -## \t\treturn 1;\\ -## \t} -## """ -## #XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ -## cppmacros['CNUMFROMARROBJ']="""\ -## #define CNUMFROMARROBJ(typenum,ctype) \\ -## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -## \tif (arr) {\\ -## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\ -## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} else {\\ -## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\ -## \t\t}\\ -## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -## \t\treturn 1;\\ -## \t} -## """ - - -needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN','PRINTPYOBJERR'] -cppmacros['GETSTRFROMPYTUPLE']="""\ -#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ -\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ -\t\tif (rv_cb_str == NULL)\\ -\t\t\tgoto capi_fail;\\ -\t\tif (PyString_Check(rv_cb_str)) {\\ -\t\t\tstr[len-1]='\\0';\\ -\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ -\t\t} else {\\ -\t\t\tPRINTPYOBJERR(rv_cb_str);\\ -\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\ -\t\t\tgoto capi_fail;\\ -\t\t}\\ -\t} -""" -cppmacros['GETSCALARFROMPYTUPLE']="""\ -#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ -\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ -\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ -\t\t\tgoto capi_fail;\\ -\t} -""" - -cppmacros['FAILNULL']="""\\ -#define FAILNULL(p) do { \\ - if ((p) == NULL) { \\ - PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ - goto capi_fail; \\ - } \\ -} while (0) -""" -needs['MEMCOPY']=['string.h', 'FAILNULL'] -cppmacros['MEMCOPY']="""\ -#define MEMCOPY(to,from,n)\\ - do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) -""" -cppmacros['STRINGMALLOC']="""\ -#define STRINGMALLOC(str,len)\\ -\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ -\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ -\t\tgoto capi_fail;\\ -\t} else {\\ -\t\t(str)[len] = '\\0';\\ -\t} -""" -cppmacros['STRINGFREE']="""\ -#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) -""" -needs['STRINGCOPYN']=['string.h', 'FAILNULL'] -cppmacros['STRINGCOPYN']="""\ -#define STRINGCOPYN(to,from,buf_size) \\ - do { \\ - int _m = (buf_size); \\ - char *_to = (to); \\ - char *_from = (from); \\ - FAILNULL(_to); FAILNULL(_from); \\ - (void)strncpy(_to, _from, sizeof(char)*_m); \\ - _to[_m-1] = '\\0'; \\ - /* Padding with spaces instead of nulls */ \\ - for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ - _to[_m] = ' '; \\ - } \\ - } while (0) -""" -needs['STRINGCOPY']=['string.h', 'FAILNULL'] -cppmacros['STRINGCOPY']="""\ -#define STRINGCOPY(to,from)\\ - do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) -""" -cppmacros['CHECKGENERIC']="""\ -#define CHECKGENERIC(check,tcheck,name) \\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKARRAY']="""\ -#define CHECKARRAY(check,tcheck,name) \\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKSTRING']="""\ -#define CHECKSTRING(check,tcheck,name,show,var)\\ -\tif (!(check)) {\\ -\t\tchar errstring[256];\\ -\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ -\t\tPyErr_SetString(#modulename#_error, errstring);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKSCALAR']="""\ -#define CHECKSCALAR(check,tcheck,name,show,var)\\ -\tif (!(check)) {\\ -\t\tchar errstring[256];\\ -\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ -\t\tPyErr_SetString(#modulename#_error,errstring);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -## cppmacros['CHECKDIMS']="""\ -## #define CHECKDIMS(dims,rank) \\ -## \tfor (int i=0;i<(rank);i++)\\ -## \t\tif (dims[i]<0) {\\ -## \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} -## """ -cppmacros['ARRSIZE']='#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' -cppmacros['OLDPYNUM']="""\ -#ifdef OLDPYNUM -#error You need to intall Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369 -#endif -""" -################# C functions ############### - -cfuncs['calcarrindex']="""\ -static int calcarrindex(int *i,PyArrayObject *arr) { -\tint k,ii = i[0]; -\tfor (k=1; k < arr->nd; k++) -\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */ -\treturn ii; -}""" -cfuncs['calcarrindextr']="""\ -static int calcarrindextr(int *i,PyArrayObject *arr) { -\tint k,ii = i[arr->nd-1]; -\tfor (k=1; k < arr->nd; k++) -\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */ -\treturn ii; -}""" -cfuncs['forcomb']="""\ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { - int k; - if (dims==NULL) return 0; - if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - for (k=1;kdata,str,PyArray_NBYTES(arr)); } -\treturn 1; -capi_fail: -\tPRINTPYOBJERR(obj); -\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\"); -\treturn 0; -} -""" -needs['string_from_pyobj']=['string','STRINGMALLOC','STRINGCOPYN'] -cfuncs['string_from_pyobj']="""\ -static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) { -\tPyArrayObject *arr = NULL; -\tPyObject *tmp = NULL; -#ifdef DEBUGCFUNCS -fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj); -#endif -\tif (obj == Py_None) { -\t\tif (*len == -1) -\t\t\t*len = strlen(inistr); /* Will this cause problems? */ -\t\tSTRINGMALLOC(*str,*len); -\t\tSTRINGCOPYN(*str,inistr,*len+1); -\t\treturn 1; -\t} -\tif (PyArray_Check(obj)) { -\t\tif ((arr = (PyArrayObject *)obj) == NULL) -\t\t\tgoto capi_fail; -\t\tif (!ISCONTIGUOUS(arr)) { -\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tif (*len == -1) -\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr); -\t\tSTRINGMALLOC(*str,*len); -\t\tSTRINGCOPYN(*str,arr->data,*len+1); -\t\treturn 1; -\t} -\tif (PyString_Check(obj)) { -\t\ttmp = obj; -\t\tPy_INCREF(tmp); -\t} -#if PY_VERSION_HEX >= 0x03000000 -\telse if (PyUnicode_Check(obj)) { -\t\ttmp = PyUnicode_AsASCIIString(obj); -\t} -\telse { -\t\tPyObject *tmp2; -\t\ttmp2 = PyObject_Str(obj); -\t\tif (tmp2) { -\t\t\ttmp = PyUnicode_AsASCIIString(tmp2); -\t\t\tPy_DECREF(tmp2); -\t\t} -\t\telse { -\t\t\ttmp = NULL; -\t\t} -\t} -#else -\telse { -\t\ttmp = PyObject_Str(obj); -\t} -#endif -\tif (tmp == NULL) goto capi_fail; -\tif (*len == -1) -\t\t*len = PyString_GET_SIZE(tmp); -\tSTRINGMALLOC(*str,*len); -\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); -\tPy_DECREF(tmp); -\treturn 1; -capi_fail: -\tPy_XDECREF(tmp); -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['char_from_pyobj']=['int_from_pyobj'] -cfuncs['char_from_pyobj']="""\ -static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (char)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['signed_char_from_pyobj']=['int_from_pyobj','signed_char'] -cfuncs['signed_char_from_pyobj']="""\ -static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (signed_char)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['short_from_pyobj']=['int_from_pyobj'] -cfuncs['short_from_pyobj']="""\ -static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (short)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -cfuncs['int_from_pyobj']="""\ -static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyInt_Check(obj)) { -\t\t*v = (int)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Int(obj); -\tif (tmp) { -\t\t*v = PyInt_AS_LONG(tmp); -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -cfuncs['long_from_pyobj']="""\ -static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyInt_Check(obj)) { -\t\t*v = PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Int(obj); -\tif (tmp) { -\t\t*v = PyInt_AS_LONG(tmp); -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['long_long_from_pyobj']=['long_long'] -cfuncs['long_long_from_pyobj']="""\ -static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyLong_Check(obj)) { -\t\t*v = PyLong_AsLongLong(obj); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PyInt_Check(obj)) { -\t\t*v = (long_long)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Long(obj); -\tif (tmp) { -\t\t*v = PyLong_AsLongLong(tmp); -\t\tPy_DECREF(tmp); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['long_double_from_pyobj']=['double_from_pyobj','long_double'] -cfuncs['long_double_from_pyobj']="""\ -static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { -\tdouble d=0; -\tif (PyArray_CheckScalar(obj)){ -\t\tif PyArray_IsScalar(obj, LongDouble) { -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t\treturn 1; -\t\t} -\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_LONGDOUBLE) { -\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj)); -\t\t\treturn 1; -\t\t} -\t} -\tif (double_from_pyobj(&d,obj,errmess)) { -\t\t*v = (long_double)d; -\t\treturn 1; -\t} -\treturn 0; -} -""" -cfuncs['double_from_pyobj']="""\ -static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyFloat_Check(obj)) { -#ifdef __sgi -\t\t*v = PyFloat_AsDouble(obj); -#else -\t\t*v = PyFloat_AS_DOUBLE(obj); -#endif -\t\treturn 1; -\t} -\ttmp = PyNumber_Float(obj); -\tif (tmp) { -#ifdef __sgi -\t\t*v = PyFloat_AsDouble(tmp); -#else -\t\t*v = PyFloat_AS_DOUBLE(tmp); -#endif -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['float_from_pyobj']=['double_from_pyobj'] -cfuncs['float_from_pyobj']="""\ -static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { -\tdouble d=0.0; -\tif (double_from_pyobj(&d,obj,errmess)) { -\t\t*v = (float)d; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['complex_long_double_from_pyobj']=['complex_long_double','long_double', - 'complex_double_from_pyobj'] -cfuncs['complex_long_double_from_pyobj']="""\ -static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { -\tcomplex_double cd={0.0,0.0}; -\tif (PyArray_CheckScalar(obj)){ -\t\tif PyArray_IsScalar(obj, CLongDouble) { -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t\treturn 1; -\t\t} -\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_CLONGDOUBLE) { -\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; -\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; -\t\t\treturn 1; -\t\t} -\t} -\tif (complex_double_from_pyobj(&cd,obj,errmess)) { -\t\t(*v).r = (long_double)cd.r; -\t\t(*v).i = (long_double)cd.i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['complex_double_from_pyobj']=['complex_double'] -cfuncs['complex_double_from_pyobj']="""\ -static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { -\tPy_complex c; -\tif (PyComplex_Check(obj)) { -\t\tc=PyComplex_AsCComplex(obj); -\t\t(*v).r=c.real, (*v).i=c.imag; -\t\treturn 1; -\t} -\tif (PyArray_IsScalar(obj, ComplexFloating)) { -\t\tif (PyArray_IsScalar(obj, CFloat)) { -\t\t\tnpy_cfloat new; -\t\t\tPyArray_ScalarAsCtype(obj, &new); -\t\t\t(*v).r = (double)new.real; -\t\t\t(*v).i = (double)new.imag; -\t\t} -\t\telse if (PyArray_IsScalar(obj, CLongDouble)) { -\t\t\tnpy_clongdouble new; -\t\t\tPyArray_ScalarAsCtype(obj, &new); -\t\t\t(*v).r = (double)new.real; -\t\t\t(*v).i = (double)new.imag; -\t\t} -\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */ -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t} -\t\treturn 1; -\t} -\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ -\t\tPyObject *arr; -\t\tif (PyArray_Check(obj)) { -\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, PyArray_CDOUBLE); -\t\t} -\t\telse { -\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(PyArray_CDOUBLE)); -\t\t} -\t\tif (arr==NULL) return 0; -\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; -\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; -\t\treturn 1; -\t} -\t/* Python does not provide PyNumber_Complex function :-( */ -\t(*v).i=0.0; -\tif (PyFloat_Check(obj)) { -#ifdef __sgi -\t\t(*v).r = PyFloat_AsDouble(obj); -#else -\t\t(*v).r = PyFloat_AS_DOUBLE(obj); -#endif -\t\treturn 1; -\t} -\tif (PyInt_Check(obj)) { -\t\t(*v).r = (double)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\tif (PyLong_Check(obj)) { -\t\t(*v).r = PyLong_AsDouble(obj); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { -\t\tPyObject *tmp = PySequence_GetItem(obj,0); -\t\tif (tmp) { -\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) { -\t\t\t\tPy_DECREF(tmp); -\t\t\t\treturn 1; -\t\t\t} -\t\t\tPy_DECREF(tmp); -\t\t} -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) -\t\t\terr = PyExc_TypeError; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['complex_float_from_pyobj']=['complex_float','complex_double_from_pyobj'] -cfuncs['complex_float_from_pyobj']="""\ -static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { -\tcomplex_double cd={0.0,0.0}; -\tif (complex_double_from_pyobj(&cd,obj,errmess)) { -\t\t(*v).r = (float)cd.r; -\t\t(*v).i = (float)cd.i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['try_pyarr_from_char']=['pyobj_from_char1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n' -needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','unsigned_char'] -cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' -needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','signed_char'] -cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' -needs['try_pyarr_from_short']=['pyobj_from_short1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n' -needs['try_pyarr_from_int']=['pyobj_from_int1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n' -needs['try_pyarr_from_long']=['pyobj_from_long1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n' -needs['try_pyarr_from_long_long']=['pyobj_from_long_long1','TRYPYARRAYTEMPLATE','long_long'] -cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' -needs['try_pyarr_from_float']=['pyobj_from_float1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n' -needs['try_pyarr_from_double']=['pyobj_from_double1','TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n' -needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1','TRYCOMPLEXPYARRAYTEMPLATE','complex_float'] -cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' -needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1','TRYCOMPLEXPYARRAYTEMPLATE','complex_double'] -cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' - -needs['create_cb_arglist']=['CFUNCSMESS','PRINTPYOBJERR','MINMAX'] -cfuncs['create_cb_arglist']="""\ -static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { -\tPyObject *tmp = NULL; -\tPyObject *tmp_fun = NULL; -\tint tot,opt,ext,siz,i,di=0; -\tCFUNCSMESS(\"create_cb_arglist\\n\"); -\ttot=opt=ext=siz=0; -\t/* Get the total number of arguments */ -\tif (PyFunction_Check(fun)) -\t\ttmp_fun = fun; -\telse { -\t\tdi = 1; -\t\tif (PyObject_HasAttrString(fun,\"im_func\")) { -\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\"); -\t\t} -\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) { -\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\"); -\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\")) -\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); -\t\t\telse { -\t\t\t\ttmp_fun = fun; /* built-in function */ -\t\t\t\ttot = maxnofargs; -\t\t\t\tif (xa != NULL) -\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa); -\t\t\t} -\t\t\tPy_XDECREF(tmp); -\t\t} -\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { -\t\t\ttot = maxnofargs; -\t\t\tif (xa != NULL) -\t\t\t\ttot += PyTuple_Size((PyObject *)xa); -\t\t\ttmp_fun = fun; -\t\t} -\t\telse if (F2PyCapsule_Check(fun)) { -\t\t\ttot = maxnofargs; -\t\t\tif (xa != NULL) -\t\t\t\text = PyTuple_Size((PyObject *)xa); -\t\t\tif(ext>0) { -\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t\ttmp_fun = fun; -\t\t} -\t} -if (tmp_fun==NULL) { -fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); -goto capi_fail; -} -#if PY_VERSION_HEX >= 0x03000000 -\tif (PyObject_HasAttrString(tmp_fun,\"__code__\")) { -\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) -#else -\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) { -\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) -#endif -\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; -\t\tPy_XDECREF(tmp); -\t} -\t/* Get the number of optional arguments */ -#if PY_VERSION_HEX >= 0x03000000 -\tif (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) -\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) -#else -\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) -\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) -#endif -\t\t\topt = PyTuple_Size(tmp); -\t\tPy_XDECREF(tmp); -\t/* Get the number of extra arguments */ -\tif (xa != NULL) -\t\text = PyTuple_Size((PyObject *)xa); -\t/* Calculate the size of call-backs argument list */ -\tsiz = MIN(maxnofargs+ext,tot); -\t*nofargs = MAX(0,siz-ext); -#ifdef DEBUGCFUNCS -\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); -#endif -\tif (siz0: - if outneeds[n][0] not in needs: - out.append(outneeds[n][0]) - del outneeds[n][0] - else: - flag=0 - for k in outneeds[n][1:]: - if k in needs[outneeds[n][0]]: - flag=1 - break - if flag: - outneeds[n]=outneeds[n][1:]+[outneeds[n][0]] - else: - out.append(outneeds[n][0]) - del outneeds[n][0] - if saveout and (0 not in map(lambda x,y:x==y,saveout,outneeds[n])) \ - and outneeds[n] != []: - print n,saveout - errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') - out=out+saveout - break - saveout=copy.copy(outneeds[n]) - if out==[]: - out=[n] - res[n]=out - return res diff --git a/numpy-1.6.2/numpy/f2py/common_rules.py b/numpy-1.6.2/numpy/f2py/common_rules.py deleted file mode 100644 index 3295676ef2..0000000000 --- a/numpy-1.6.2/numpy/f2py/common_rules.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python -""" - -Build common block mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.19 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import pprint -import sys -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -import capi_maps -import func2subr -from crackfortran import rmbadname -############## - -def findcommonblocks(block,top=1): - ret = [] - if hascommon(block): - for n in block['common'].keys(): - vars={} - for v in block['common'][n]: - vars[v]=block['vars'][v] - ret.append((n,block['common'][n],vars)) - elif hasbody(block): - for b in block['body']: - ret=ret+findcommonblocks(b,0) - if top: - tret=[] - names=[] - for t in ret: - if t[0] not in names: - names.append(t[0]) - tret.append(t) - return tret - return ret - -def buildhooks(m): - ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']} - fwrap = [''] - def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0],line) - chooks = [''] - def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line) - ihooks = [''] - def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line) - doc = [''] - def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line) - for (name,vnames,vars) in findcommonblocks(m): - lower_name = name.lower() - hnames,inames = [],[] - for n in vnames: - if isintent_hide(vars[n]): hnames.append(n) - else: inames.append(n) - if hnames: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name,','.join(inames),','.join(hnames))) - else: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name,','.join(inames))) - fadd('subroutine f2pyinit%s(setupfunc)'%name) - fadd('external setupfunc') - for n in vnames: - fadd(func2subr.var2fixfortran(vars,n)) - if name=='_BLNK_': - fadd('common %s'%(','.join(vnames))) - else: - fadd('common /%s/ %s'%(name,','.join(vnames))) - fadd('call setupfunc(%s)'%(','.join(inames))) - fadd('end\n') - cadd('static FortranDataDef f2py_%s_def[] = {'%(name)) - idims=[] - for n in inames: - ct = capi_maps.getctype(vars[n]) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n,vars[n]) - if dm['dims']: idims.append('(%s)'%(dm['dims'])) - else: idims.append('') - dms=dm['dims'].strip() - if not dms: dms='-1' - cadd('\t{\"%s\",%s,{{%s}},%s},'%(n,dm['rank'],dms,at)) - cadd('\t{NULL}\n};') - inames1 = rmbadname(inames) - inames1_tps = ','.join(map(lambda s:'char *'+s,inames1)) - cadd('static void f2py_setup_%s(%s) {'%(name,inames1_tps)) - cadd('\tint i_f2py=0;') - for n in inames1: - cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name,n)) - cadd('}') - if '_' in lower_name: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\ - %(F_FUNC,lower_name,name.upper(), - ','.join(['char*']*len(inames1)))) - cadd('static void f2py_init_%s(void) {'%name) - cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ - %(F_FUNC,lower_name,name.upper(),name)) - cadd('}\n') - iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name,name,name)) - tname = name.replace('_','\\_') - dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname)) - dadd('\\begin{description}') - for n in inames: - dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,vars[n]))) - if hasnote(vars[n]): - note = vars[n]['note'] - if type(note) is type([]): note='\n'.join(note) - dadd('--- %s'%(note)) - dadd('\\end{description}') - ret['docs'].append('"\t/%s/ %s\\n"'%(name,','.join(map(lambda v,d:v+d,inames,idims)))) - ret['commonhooks']=chooks - ret['initcommonhooks']=ihooks - ret['latexdoc']=doc[0] - if len(ret['docs'])<=1: ret['docs']='' - return ret,fwrap[0] diff --git a/numpy-1.6.2/numpy/f2py/crackfortran.py b/numpy-1.6.2/numpy/f2py/crackfortran.py deleted file mode 100755 index 2b92b1b0ff..0000000000 --- a/numpy-1.6.2/numpy/f2py/crackfortran.py +++ /dev/null @@ -1,2828 +0,0 @@ -#!/usr/bin/env python -""" -crackfortran --- read fortran (77,90) code and extract declaration information. - Usage is explained in the comment block below. - -Copyright 1999-2004 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/09/27 07:13:49 $ -Pearu Peterson -""" -__version__ = "$Revision: 1.177 $"[10:-1] -import platform -import __version__ -f2py_version = __version__.version - -""" - Usage of crackfortran: - ====================== - Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h - -m ,--ignore-contains - Functions: crackfortran, crack2fortran - The following Fortran statements/constructions are supported - (or will be if needed): - block data,byte,call,character,common,complex,contains,data, - dimension,double complex,double precision,end,external,function, - implicit,integer,intent,interface,intrinsic, - logical,module,optional,parameter,private,public, - program,real,(sequence?),subroutine,type,use,virtual, - include,pythonmodule - Note: 'virtual' is mapped to 'dimension'. - Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). - Note: code after 'contains' will be ignored until its scope ends. - Note: 'common' statement is extended: dimensions are moved to variable definitions - Note: f2py directive: f2py is read as - Note: pythonmodule is introduced to represent Python module - - Usage: - `postlist=crackfortran(files,funcs)` - `postlist` contains declaration information read from the list of files `files`. - `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file - - `postlist` has the following structure: - *** it is a list of dictionaries containing `blocks': - B = {'block','body','vars','parent_block'[,'name','prefix','args','result', - 'implicit','externals','interfaced','common','sortvars', - 'commonvars','note']} - B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | - 'program' | 'block data' | 'type' | 'pythonmodule' - B['body'] --- list containing `subblocks' with the same structure as `blocks' - B['parent_block'] --- dictionary of a parent block: - C['body'][]['parent_block'] is C - B['vars'] --- dictionary of variable definitions - B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) - B['name'] --- name of the block (not if B['block']=='interface') - B['prefix'] --- prefix string (only if B['block']=='function') - B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' - B['result'] --- name of the return value (only if B['block']=='function') - B['implicit'] --- dictionary {'a':,'b':...} | None - B['externals'] --- list of variables being external - B['interfaced'] --- list of variables being external and defined - B['common'] --- dictionary of common blocks (list of objects) - B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) - B['from'] --- string showing the 'parents' of the current block - B['use'] --- dictionary of modules used in current block: - {:{['only':<0|1>],['map':{:,...}]}} - B['note'] --- list of LaTeX comments on the block - B['f2pyenhancements'] --- optional dictionary - {'threadsafe':'','fortranname':, - 'callstatement':|, - 'callprotoargument':, - 'usercode':|, - 'pymethoddef:' - } - B['entry'] --- dictionary {entryname:argslist,..} - B['varnames'] --- list of variable names given in the order of reading the - Fortran code, useful for derived types. - B['saved_interface'] --- a string of scanned routine signature, defines explicit interface - *** Variable definition is a dictionary - D = B['vars'][] = - {'typespec'[,'attrspec','kindselector','charselector','=','typename']} - D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | - 'double precision' | 'integer' | 'logical' | 'real' | 'type' - D['attrspec'] --- list of attributes (e.g. 'dimension()', - 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', - 'optional','required', etc) - K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = - 'complex' | 'integer' | 'logical' | 'real' ) - C = D['charselector'] = {['*','len','kind']} - (only if D['typespec']=='character') - D['='] --- initialization expression string - D['typename'] --- name of the type if D['typespec']=='type' - D['dimension'] --- list of dimension bounds - D['intent'] --- list of intent specifications - D['depend'] --- list of variable names on which current variable depends on - D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised - D['note'] --- list of LaTeX comments on the variable - *** Meaning of kind/char selectors (few examples): - D['typespec>']*K['*'] - D['typespec'](kind=K['kind']) - character*C['*'] - character(len=C['len'],kind=C['kind']) - (see also fortran type declaration statement formats below) - - Fortran 90 type declaration statement format (F77 is subset of F90) -==================================================================== - (Main source: IBM XL Fortran 5.1 Language Reference Manual) - type declaration = [[]::] - = byte | - character[] | - complex[] | - double complex | - double precision | - integer[] | - logical[] | - real[] | - type() - = * | - ([len=][,[kind=]]) | - (kind=[,len=]) - = * | - ([kind=]) - = comma separated list of attributes. - Only the following attributes are used in - building up the interface: - external - (parameter --- affects '=' key) - optional - intent - Other attributes are ignored. - = in | out | inout - = comma separated list of dimension bounds. - = [[*][()] | [()]*] - [// | =] [,] - - In addition, the following attributes are used: check,depend,note - - TODO: - * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' - -> 'real x(2)') - The above may be solved by creating appropriate preprocessor program, for example. -""" -# -import sys -import string -import fileinput -import re -import pprint -import os -import copy -from auxfuncs import * - -# Global flags: -strictf77=1 # Ignore `!' comments unless line[0]=='!' -sourcecodeform='fix' # 'fix','free' -quiet=0 # Be verbose if 0 (Obsolete: not used any more) -verbose=1 # Be quiet if 0, extra verbose if > 1. -tabchar=4*' ' -pyffilename='' -f77modulename='' -skipemptyends=0 # for old F77 programs without 'program' statement -ignorecontains=1 -dolowercase=1 -debug=[] -## do_analyze = 1 - -###### global variables - -## use reload(crackfortran) to reset these variables - -groupcounter=0 -grouplist={groupcounter:[]} -neededmodule=-1 -expectbegin=1 -skipblocksuntil=-1 -usermodules=[] -f90modulevars={} -gotnextfile=1 -filepositiontext='' -currentfilename='' -skipfunctions=[] -skipfuncs=[] -onlyfuncs=[] -include_paths=[] -previous_context = None - -###### Some helper functions -def show(o,f=0):pprint.pprint(o) -errmess=sys.stderr.write -def outmess(line,flag=1): - global filepositiontext - if not verbose: return - if not quiet: - if flag:sys.stdout.write(filepositiontext) - sys.stdout.write(line) -re._MAXCACHE=50 -defaultimplicitrules={} -for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'} -for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'} -del c -badnames={} -invbadnames={} -for n in ['int','double','float','char','short','long','void','case','while', - 'return','signed','unsigned','if','for','typedef','sizeof','union', - 'struct','static','register','new','break','do','goto','switch', - 'continue','else','inline','extern','delete','const','auto', - 'len','rank','shape','index','slen','size','_i', - 'max', 'min', - 'flen','fshape', - 'string','complex_double','float_double','stdin','stderr','stdout', - 'type','default']: - badnames[n]=n+'_bn' - invbadnames[n+'_bn']=n -def rmbadname1(name): - if name in badnames: - errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name,badnames[name])) - return badnames[name] - return name -def rmbadname(names): return map(rmbadname1,names) - -def undo_rmbadname1(name): - if name in invbadnames: - errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\ - %(name,invbadnames[name])) - return invbadnames[name] - return name -def undo_rmbadname(names): return map(undo_rmbadname1,names) - -def getextension(name): - i=name.rfind('.') - if i==-1: return '' - if '\\' in name[i:]: return '' - if '/' in name[i:]: return '' - return name[i+1:] - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search -_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]',re.I).match -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - f = open(file,'r') - line = f.readline() - n = 15 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - if line[0]!='!' and line.strip(): - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&': - result = 1 - break - line = f.readline() - f.close() - return result - - -####### Read fortran (77,90) code -def readfortrancode(ffile,dowithline=show,istop=1): - """ - Read fortran codes from files and - 1) Get rid of comments, line continuations, and empty lines; lower cases. - 2) Call dowithline(line) on every line. - 3) Recursively call itself when statement \"include ''\" is met. - """ - global gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase,include_paths - if not istop: - saveglobals=gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase - if ffile==[]: return - localdolowercase = dolowercase - cont=0 - finalline='' - ll='' - commentline=re.compile(r'(?P([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P.*)') - includeline=re.compile(r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")',re.I) - cont1=re.compile(r'(?P.*)&\s*\Z') - cont2=re.compile(r'(\s*&|)(?P.*)') - mline_mark = re.compile(r".*?'''") - if istop: dowithline('',-1) - ll,l1='','' - spacedigits=[' ']+map(str,range(10)) - filepositiontext='' - fin=fileinput.FileInput(ffile) - while 1: - l=fin.readline() - if not l: break - if fin.isfirstline(): - filepositiontext='' - currentfilename=fin.filename() - gotnextfile=1 - l1=l - strictf77=0 - sourcecodeform='fix' - ext = os.path.splitext(currentfilename)[1] - if is_f_file(currentfilename) and \ - not (_has_f90_header(l) or _has_fix_header(l)): - strictf77=1 - elif is_free_format(currentfilename) and not _has_fix_header(l): - sourcecodeform='free' - if strictf77: beginpattern=beginpattern77 - else: beginpattern=beginpattern90 - outmess('\tReading file %s (format:%s%s)\n'\ - %(`currentfilename`,sourcecodeform, - strictf77 and ',strict' or '')) - - l=l.expandtabs().replace('\xa0',' ') - while not l=='': # Get rid of newline characters - if l[-1] not in "\n\r\f": break - l=l[:-1] - if not strictf77: - r=commentline.match(l) - if r: - l=r.group('line')+' ' # Strip comments starting with `!' - rl=r.group('rest') - if rl[:4].lower()=='f2py': # f2py directive - l = l + 4*' ' - r=commentline.match(rl[4:]) - if r: l=l+r.group('line') - else: l = l + rl[4:] - if l.strip()=='': # Skip empty line - cont=0 - continue - if sourcecodeform=='fix': - if l[0] in ['*','c','!','C','#']: - if l[1:5].lower()=='f2py': # f2py directive - l=' '+l[5:] - else: # Skip comment line - cont=0 - continue - elif strictf77: - if len(l)>72: l=l[:72] - if not (l[0] in spacedigits): - raise Exception('readfortrancode: Found non-(space,digit) char ' - 'in the first column.\n\tAre you sure that ' - 'this code is in fix form?\n\tline=%s' % `l`) - - if (not cont or strictf77) and (len(l)>5 and not l[5]==' '): - # Continuation of a previous line - ll=ll+l[6:] - finalline='' - origfinalline='' - else: - if not strictf77: - # F90 continuation - r=cont1.match(l) - if r: l=r.group('line') # Continuation follows .. - if cont: - ll=ll+cont2.match(l).group('line') - finalline='' - origfinalline='' - else: - l=' '+l[5:] # clean up line beginning from possible digits. - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline=ll - ll=l - cont=(r is not None) - else: - l=' '+l[5:] # clean up line beginning from possible digits. - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline =ll - ll=l - - elif sourcecodeform=='free': - if not cont and ext=='.pyf' and mline_mark.match(l): - l = l + '\n' - while 1: - lc = fin.readline() - if not lc: - errmess('Unexpected end of file when reading multiline\n') - break - l = l + lc - if mline_mark.match(lc): - break - l = l.rstrip() - r=cont1.match(l) - if r: l=r.group('line') # Continuation follows .. - if cont: - ll=ll+cont2.match(l).group('line') - finalline='' - origfinalline='' - else: - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline =ll - ll=l - cont=(r is not None) - else: - raise ValueError,"Flag sourcecodeform must be either 'fix' or 'free': %s"%`sourcecodeform` - filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1) - m=includeline.match(origfinalline) - if m: - fn=m.group('name') - if os.path.isfile(fn): - readfortrancode(fn,dowithline=dowithline,istop=0) - else: - include_dirs = [os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir,fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1,dowithline=dowithline,istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(`fn`, os.pathsep.join(include_dirs))) - else: - dowithline(finalline) - l1=ll - if localdolowercase: - finalline=ll.lower() - else: finalline=ll - origfinalline = ll - filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1) - m=includeline.match(origfinalline) - if m: - fn=m.group('name') - if os.path.isfile(fn): - readfortrancode(fn,dowithline=dowithline,istop=0) - else: - include_dirs = [os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir,fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1,dowithline=dowithline,istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(`fn`, os.pathsep.join(include_dirs))) - else: - dowithline(finalline) - filepositiontext='' - fin.close() - if istop: dowithline('',1) - else: - gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase=saveglobals - -########### Crack line -beforethisafter=r'\s*(?P%s(?=\s*(\b(%s)\b)))'+ \ - r'\s*(?P(\b(%s)\b))'+ \ - r'\s*(?P%s)\s*\Z' -## -fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' -typespattern=re.compile(beforethisafter%('',fortrantypes,fortrantypes,'.*'),re.I),'type' -typespattern4implicit=re.compile(beforethisafter%('',fortrantypes+'|static|automatic|undefined',fortrantypes+'|static|automatic|undefined','.*'),re.I) -# -functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)','function','function','.*'),re.I),'begin' -subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?','subroutine','subroutine','.*'),re.I),'begin' -#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' -# -groupbegins77=r'program|block\s*data' -beginpattern77=re.compile(beforethisafter%('',groupbegins77,groupbegins77,'.*'),re.I),'begin' -groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' -beginpattern90=re.compile(beforethisafter%('',groupbegins90,groupbegins90,'.*'),re.I),'begin' -groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' -endpattern=re.compile(beforethisafter%('',groupends,groupends,'[\w\s]*'),re.I),'end' -#endifs='end\s*(if|do|where|select|while|forall)' -endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' -endifpattern=re.compile(beforethisafter%('[\w]*?',endifs,endifs,'[\w\s]*'),re.I),'endif' -# -implicitpattern=re.compile(beforethisafter%('','implicit','implicit','.*'),re.I),'implicit' -dimensionpattern=re.compile(beforethisafter%('','dimension|virtual','dimension|virtual','.*'),re.I),'dimension' -externalpattern=re.compile(beforethisafter%('','external','external','.*'),re.I),'external' -optionalpattern=re.compile(beforethisafter%('','optional','optional','.*'),re.I),'optional' -requiredpattern=re.compile(beforethisafter%('','required','required','.*'),re.I),'required' -publicpattern=re.compile(beforethisafter%('','public','public','.*'),re.I),'public' -privatepattern=re.compile(beforethisafter%('','private','private','.*'),re.I),'private' -intrisicpattern=re.compile(beforethisafter%('','intrisic','intrisic','.*'),re.I),'intrisic' -intentpattern=re.compile(beforethisafter%('','intent|depend|note|check','intent|depend|note|check','\s*\(.*?\).*'),re.I),'intent' -parameterpattern=re.compile(beforethisafter%('','parameter','parameter','\s*\(.*'),re.I),'parameter' -datapattern=re.compile(beforethisafter%('','data','data','.*'),re.I),'data' -callpattern=re.compile(beforethisafter%('','call','call','.*'),re.I),'call' -entrypattern=re.compile(beforethisafter%('','entry','entry','.*'),re.I),'entry' -callfunpattern=re.compile(beforethisafter%('','callfun','callfun','.*'),re.I),'callfun' -commonpattern=re.compile(beforethisafter%('','common','common','.*'),re.I),'common' -usepattern=re.compile(beforethisafter%('','use','use','.*'),re.I),'use' -containspattern=re.compile(beforethisafter%('','contains','contains',''),re.I),'contains' -formatpattern=re.compile(beforethisafter%('','format','format','.*'),re.I),'format' -## Non-fortran and f2py-specific statements -f2pyenhancementspattern=re.compile(beforethisafter%('','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','.*'),re.I|re.S),'f2pyenhancements' -multilinepattern = re.compile(r"\s*(?P''')(?P.*?)(?P''')\s*\Z",re.S),'multiline' -## - -def _simplifyargs(argsline): - a = [] - for n in markoutercomma(argsline).split('@,@'): - for r in '(),': - n = n.replace(r,'_') - a.append(n) - return ','.join(a) - -crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+[\w]*\b)\s*[=].*',re.I) -def crackline(line,reset=0): - """ - reset=-1 --- initialize - reset=0 --- crack the line - reset=1 --- final check if mismatch of blocks occured - - Cracked data is saved in grouplist[0]. - """ - global beginpattern,groupcounter,groupname,groupcache,grouplist,gotnextfile,\ - filepositiontext,currentfilename,neededmodule,expectbegin,skipblocksuntil,\ - skipemptyends,previous_context - if ';' in line and not (f2pyenhancementspattern[0].match(line) or - multilinepattern[0].match(line)): - for l in line.split(';'): - assert reset==0,`reset` # XXX: non-zero reset values need testing - crackline(l,reset) - return - if reset<0: - groupcounter=0 - groupname={groupcounter:''} - groupcache={groupcounter:{}} - grouplist={groupcounter:[]} - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['block']='' - groupcache[groupcounter]['name']='' - neededmodule=-1 - skipblocksuntil=-1 - return - if reset>0: - fl=0 - if f77modulename and neededmodule==groupcounter: fl=2 - while groupcounter>fl: - outmess('crackline: groupcounter=%s groupname=%s\n'%(`groupcounter`,`groupname`)) - outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 - if f77modulename and neededmodule==groupcounter: - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end interface - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end module - neededmodule=-1 - return - if line=='': return - flag=0 - for pat in [dimensionpattern,externalpattern,intentpattern,optionalpattern, - requiredpattern, - parameterpattern,datapattern,publicpattern,privatepattern, - intrisicpattern, - endifpattern,endpattern, - formatpattern, - beginpattern,functionpattern,subroutinepattern, - implicitpattern,typespattern,commonpattern, - callpattern,usepattern,containspattern, - entrypattern, - f2pyenhancementspattern, - multilinepattern - ]: - m = pat[0].match(line) - if m: - break - flag=flag+1 - if not m: - re_1 = crackline_re_1 - if 0<=skipblocksuntil<=groupcounter:return - if 'externals' in groupcache[groupcounter]: - for name in groupcache[groupcounter]['externals']: - if name in invbadnames: - name=invbadnames[name] - if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: - continue - m1=re.match(r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z'%name,markouterparen(line),re.I) - if m1: - m2 = re_1.match(m1.group('before')) - a = _simplifyargs(m1.group('args')) - if m2: - line='callfun %s(%s) result (%s)'%(name,a,m2.group('result')) - else: line='callfun %s(%s)'%(name,a) - m = callfunpattern[0].match(line) - if not m: - outmess('crackline: could not resolve function call for line=%s.\n'%`line`) - return - analyzeline(m,'callfun',line) - return - if verbose>1 or (verbose==1 and currentfilename.lower().endswith('.pyf')): - previous_context = None - outmess('crackline:%d: No pattern for line\n'%(groupcounter)) - return - elif pat[1]=='end': - if 0<=skipblocksuntil(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z',re.I) -nameargspattern=re.compile(r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z',re.I) -callnameargspattern=re.compile(r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z',re.I) -real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') -real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') - -_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b',re.I) -def _is_intent_callback(vdecl): - for a in vdecl.get('attrspec',[]): - if _intentcallbackpattern.match(a): - return 1 - return 0 - -def _resolvenameargspattern(line): - line = markouterparen(line) - m1=nameargspattern.match(line) - if m1: - return m1.group('name'),m1.group('args'),m1.group('result'), m1.group('bind') - m1=callnameargspattern.match(line) - if m1: - return m1.group('name'),m1.group('args'),None, None - return None,[],None, None - -def analyzeline(m,case,line): - global groupcounter,groupname,groupcache,grouplist,filepositiontext,\ - currentfilename,f77modulename,neededinterface,neededmodule,expectbegin,\ - gotnextfile,previous_context - block=m.group('this') - if case != 'multiline': - previous_context = None - if expectbegin and case not in ['begin','call','callfun','type'] \ - and not skipemptyends and groupcounter<1: - newname=os.path.basename(currentfilename).split('.')[0] - outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname) - gotnextfile=0 - groupcounter=groupcounter+1 - groupname[groupcounter]='program' - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['block']='program' - groupcache[groupcounter]['name']=newname - groupcache[groupcounter]['from']='fromsky' - expectbegin=0 - if case in ['begin','call','callfun']: - # Crack line => block,name,args,result - block = block.lower() - if re.match(r'block\s*data',block,re.I): block='block data' - if re.match(r'python\s*module',block,re.I): block='python module' - name,args,result,bind = _resolvenameargspattern(m.group('after')) - if name is None: - if block=='block data': - name = '_BLOCK_DATA_' - else: - name = '' - if block not in ['interface','block data']: - outmess('analyzeline: No name/args pattern found for line.\n') - - previous_context = (block,name,groupcounter) - if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) - else: args=[] - if '' in args: - while '' in args: - args.remove('') - outmess('analyzeline: argument list is malformed (missing argument).\n') - - # end of crack line => block,name,args,result - needmodule=0 - needinterface=0 - - if case in ['call','callfun']: - needinterface=1 - if 'args' not in groupcache[groupcounter]: - return - if name not in groupcache[groupcounter]['args']: - return - for it in grouplist[groupcounter]: - if it['name']==name: - return - if name in groupcache[groupcounter]['interfaced']: - return - block={'call':'subroutine','callfun':'function'}[case] - if f77modulename and neededmodule==-1 and groupcounter<=1: - neededmodule=groupcounter+2 - needmodule=1 - if block != 'interface': - needinterface=1 - # Create new block(s) - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - if needmodule: - if verbose>1: - outmess('analyzeline: Creating module block %s\n'%`f77modulename`,0) - groupname[groupcounter]='module' - groupcache[groupcounter]['block']='python module' - groupcache[groupcounter]['name']=f77modulename - groupcache[groupcounter]['from']='' - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - if needinterface: - if verbose>1: - outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter),0) - groupname[groupcounter]='interface' - groupcache[groupcounter]['block']='interface' - groupcache[groupcounter]['name']='unknown_interface' - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name']) - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - groupname[groupcounter]=block - groupcache[groupcounter]['block']=block - if not name: name='unknown_'+block - groupcache[groupcounter]['prefix']=m.group('before') - groupcache[groupcounter]['name']=rmbadname1(name) - groupcache[groupcounter]['result']=result - if groupcounter==1: - groupcache[groupcounter]['from']=currentfilename - else: - if f77modulename and groupcounter==3: - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],currentfilename) - else: - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name']) - for k in groupcache[groupcounter].keys(): - if not groupcache[groupcounter][k]: - del groupcache[groupcounter][k] - - groupcache[groupcounter]['args']=args - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['entry']={} - # end of creation - if block=='type': - groupcache[groupcounter]['varnames'] = [] - - if case in ['call','callfun']: # set parents variables - if name not in groupcache[groupcounter-2]['externals']: - groupcache[groupcounter-2]['externals'].append(name) - groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars']) - #try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']] - #except: pass - try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] - except: pass - if block in ['function','subroutine']: # set global attributes - try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter-2]['vars']['']) - except: pass - if case=='callfun': # return type - if result and result in groupcache[groupcounter]['vars']: - if not name==result: - groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter]['vars'][result]) - #if groupcounter>1: # name is interfaced - try: groupcache[groupcounter-2]['interfaced'].append(name) - except: pass - if block=='function': - t=typespattern[0].match(m.group('before')+' '+name) - if t: - typespec,selector,attr,edecl=cracktypespec0(t.group('this'),t.group('after')) - updatevars(typespec,selector,attr,edecl) - - if case in ['call','callfun']: - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end routine - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end interface - - elif case=='entry': - name,args,result,bind=_resolvenameargspattern(m.group('after')) - if name is not None: - if args: - args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) - else: args=[] - assert result is None,`result` - groupcache[groupcounter]['entry'][name] = args - previous_context = ('entry',name,groupcounter) - elif case=='type': - typespec,selector,attr,edecl=cracktypespec0(block,m.group('after')) - last_name = updatevars(typespec,selector,attr,edecl) - if last_name is not None: - previous_context = ('variable',last_name,groupcounter) - elif case in ['dimension','intent','optional','required','external','public','private','intrisic']: - edecl=groupcache[groupcounter]['vars'] - ll=m.group('after').strip() - i=ll.find('::') - if i<0 and case=='intent': - i=markouterparen(ll).find('@)@')-2 - ll=ll[:i+1]+'::'+ll[i+1:] - i=ll.find('::') - if ll[i:]=='::' and 'args' in groupcache[groupcounter]: - outmess('All arguments will have attribute %s%s\n'%(m.group('this'),ll[:i])) - ll = ll + ','.join(groupcache[groupcounter]['args']) - if i<0:i=0;pl='' - else: pl=ll[:i].strip();ll=ll[i+2:] - ch = markoutercomma(pl).split('@,@') - if len(ch)>1: - pl = ch[0] - outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:]))) - last_name = None - - for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: - m1=namepattern.match(e) - if not m1: - if case in ['public','private']: k='' - else: - print m.groupdict() - outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,`e`)) - continue - else: - k=rmbadname1(m1.group('name')) - if k not in edecl: - edecl[k]={} - if case=='dimension': - ap=case+m1.group('after') - if case=='intent': - ap=m.group('this')+pl - if _intentcallbackpattern.match(ap): - if k not in groupcache[groupcounter]['args']: - if groupcounter>1: - if '__user__' not in groupcache[groupcounter-2]['name']: - outmess('analyzeline: missing __user__ module (could be nothing)\n') - if k!=groupcache[groupcounter]['name']: # fixes ticket 1693 - outmess('analyzeline: appending intent(callback) %s'\ - ' to %s arguments\n' % (k,groupcache[groupcounter]['name'])) - groupcache[groupcounter]['args'].append(k) - else: - errmess('analyzeline: intent(callback) %s is ignored' % (k)) - else: - errmess('analyzeline: intent(callback) %s is already'\ - ' in argument list' % (k)) - if case in ['optional','required','public','external','private','intrisic']: - ap=case - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append(ap) - else: - edecl[k]['attrspec']=[ap] - if case=='external': - if groupcache[groupcounter]['block']=='program': - outmess('analyzeline: ignoring program arguments\n') - continue - if k not in groupcache[groupcounter]['args']: - #outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`)) - continue - if 'externals' not in groupcache[groupcounter]: - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['externals'].append(k) - last_name = k - groupcache[groupcounter]['vars']=edecl - if last_name is not None: - previous_context = ('variable',last_name,groupcounter) - elif case=='parameter': - edecl=groupcache[groupcounter]['vars'] - ll=m.group('after').strip()[1:-1] - last_name = None - for e in markoutercomma(ll).split('@,@'): - try: - k,initexpr=[x.strip() for x in e.split('=')] - except: - outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e,ll));continue - params = get_parameters(edecl) - k=rmbadname1(k) - if k not in edecl: - edecl[k]={} - if '=' in edecl[k] and (not edecl[k]['=']==initexpr): - outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k,edecl[k]['='],initexpr)) - t = determineexprtype(initexpr,params) - if t: - if t.get('typespec')=='real': - tt = list(initexpr) - for m in real16pattern.finditer(initexpr): - tt[m.start():m.end()] = list(\ - initexpr[m.start():m.end()].lower().replace('d', 'e')) - initexpr = ''.join(tt) - elif t.get('typespec')=='complex': - initexpr = initexpr[1:].lower().replace('d','e').\ - replace(',','+1j*(') - try: - v = eval(initexpr,{},params) - except (SyntaxError,NameError,TypeError),msg: - errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\ - % (initexpr, msg)) - continue - edecl[k]['='] = repr(v) - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append('parameter') - else: edecl[k]['attrspec']=['parameter'] - last_name = k - groupcache[groupcounter]['vars']=edecl - if last_name is not None: - previous_context = ('variable',last_name,groupcounter) - elif case=='implicit': - if m.group('after').strip().lower()=='none': - groupcache[groupcounter]['implicit']=None - elif m.group('after'): - if 'implicit' in groupcache[groupcounter]: - impl=groupcache[groupcounter]['implicit'] - else: impl={} - if impl is None: - outmess('analyzeline: Overwriting earlier "implicit none" statement.\n') - impl={} - for e in markoutercomma(m.group('after')).split('@,@'): - decl={} - m1=re.match(r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z',e,re.I) - if not m1: - outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue - m2=typespattern4implicit.match(m1.group('this')) - if not m2: - outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue - typespec,selector,attr,edecl=cracktypespec0(m2.group('this'),m2.group('after')) - kindselect,charselect,typename=cracktypespec(typespec,selector) - decl['typespec']=typespec - decl['kindselector']=kindselect - decl['charselector']=charselect - decl['typename']=typename - for k in decl.keys(): - if not decl[k]: del decl[k] - for r in markoutercomma(m1.group('after')).split('@,@'): - if '-' in r: - try: begc,endc=[x.strip() for x in r.split('-')] - except: - outmess('analyzeline: expected "-" instead of "%s" in range list of implicit statement\n'%r);continue - else: begc=endc=r.strip() - if not len(begc)==len(endc)==1: - outmess('analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n'%r);continue - for o in range(ord(begc),ord(endc)+1): - impl[chr(o)]=decl - groupcache[groupcounter]['implicit']=impl - elif case=='data': - ll=[] - dl='';il='';f=0;fc=1;inp=0 - for c in m.group('after'): - if not inp: - if c=="'": fc=not fc - if c=='/' and fc: f=f+1;continue - if c=='(': inp = inp + 1 - elif c==')': inp = inp - 1 - if f==0: dl=dl+c - elif f==1: il=il+c - elif f==2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl,il]) - dl=c;il='';f=0 - if f==2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl,il]) - vars={} - if 'vars' in groupcache[groupcounter]: - vars=groupcache[groupcounter]['vars'] - last_name = None - for l in ll: - l=[x.strip() for x in l] - if l[0][0]==',':l[0]=l[0][1:] - if l[0][0]=='(': - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0]) - continue - #if '(' in l[0]: - # #outmess('analyzeline: ignoring this data statement.\n') - # continue - i=0;j=0;llen=len(l[1]) - for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): - if v[0]=='(': - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%v) - # XXX: subsequent init expressions may get wrong values. - # Ignoring since data statements are irrelevant for wrapping. - continue - fc=0 - while (i=3: - bn = bn.strip() - if not bn: bn='_BLNK_' - cl.append([bn,ol]) - f=f-2;bn='';ol='' - if f%2: bn=bn+c - else: ol=ol+c - bn = bn.strip() - if not bn: bn='_BLNK_' - cl.append([bn,ol]) - commonkey={} - if 'common' in groupcache[groupcounter]: - commonkey=groupcache[groupcounter]['common'] - for c in cl: - if c[0] in commonkey: - outmess('analyzeline: previously defined common block encountered. Skipping.\n') - continue - commonkey[c[0]]=[] - for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: - if i: commonkey[c[0]].append(i) - groupcache[groupcounter]['common']=commonkey - previous_context = ('common',bn,groupcounter) - elif case=='use': - m1=re.match(r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z',m.group('after'),re.I) - if m1: - mm=m1.groupdict() - if 'use' not in groupcache[groupcounter]: - groupcache[groupcounter]['use']={} - name=m1.group('name') - groupcache[groupcounter]['use'][name]={} - isonly=0 - if 'list' in mm and mm['list'] is not None: - if 'notonly' in mm and mm['notonly'] is None: - isonly=1 - groupcache[groupcounter]['use'][name]['only']=isonly - ll=[x.strip() for x in mm['list'].split(',')] - rl={} - for l in ll: - if '=' in l: - m2=re.match(r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z',l,re.I) - if m2: rl[m2.group('local').strip()]=m2.group('use').strip() - else: - outmess('analyzeline: Not local=>use pattern found in %s\n'%`l`) - else: - rl[l]=l - groupcache[groupcounter]['use'][name]['map']=rl - else: - pass - else: - print m.groupdict() - outmess('analyzeline: Could not crack the use statement.\n') - elif case in ['f2pyenhancements']: - if 'f2pyenhancements' not in groupcache[groupcounter]: - groupcache[groupcounter]['f2pyenhancements'] = {} - d = groupcache[groupcounter]['f2pyenhancements'] - if m.group('this')=='usercode' and 'usercode' in d: - if type(d['usercode']) is type(''): - d['usercode'] = [d['usercode']] - d['usercode'].append(m.group('after')) - else: - d[m.group('this')] = m.group('after') - elif case=='multiline': - if previous_context is None: - if verbose: - outmess('analyzeline: No context for multiline block.\n') - return - gc = groupcounter - #gc = previous_context[2] - appendmultiline(groupcache[gc], - previous_context[:2], - m.group('this')) - else: - if verbose>1: - print m.groupdict() - outmess('analyzeline: No code implemented for line.\n') - -def appendmultiline(group, context_name,ml): - if 'f2pymultilines' not in group: - group['f2pymultilines'] = {} - d = group['f2pymultilines'] - if context_name not in d: - d[context_name] = [] - d[context_name].append(ml) - return - -def cracktypespec0(typespec,ll): - selector=None - attr=None - if re.match(r'double\s*complex',typespec,re.I): typespec='double complex' - elif re.match(r'double\s*precision',typespec,re.I): typespec='double precision' - else: typespec=typespec.strip().lower() - m1=selectpattern.match(markouterparen(ll)) - if not m1: - outmess('cracktypespec0: no kind/char_selector pattern found for line.\n') - return - d=m1.groupdict() - for k in d.keys(): d[k]=unmarkouterparen(d[k]) - if typespec in ['complex','integer','logical','real','character','type']: - selector=d['this'] - ll=d['after'] - i=ll.find('::') - if i>=0: - attr=ll[:i].strip() - ll=ll[i+2:] - return typespec,selector,attr,ll -##### -namepattern=re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z',re.I) -kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z',re.I) -charselector=re.compile(r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z',re.I) -lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z',re.I) -lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z',re.I) -def removespaces(expr): - expr=expr.strip() - if len(expr)<=1: return expr - expr2=expr[0] - for i in range(1,len(expr)-1): - if expr[i]==' ' and \ - ((expr[i+1] in "()[]{}=+-/* ") or (expr[i-1] in "()[]{}=+-/* ")): continue - expr2=expr2+expr[i] - expr2=expr2+expr[-1] - return expr2 -def markinnerspaces(line): - l='';f=0 - cc='\'' - cc1='"' - cb='' - for c in line: - if cb=='\\' and c in ['\\','\'','"']: - l=l+c; - cb=c - continue - if f==0 and c in ['\'','"']: cc=c; cc1={'\'':'"','"':'\''}[c] - if c==cc:f=f+1 - elif c==cc:f=f-1 - elif c==' ' and f==1: l=l+'@_@'; continue - l=l+c;cb=c - return l -def updatevars(typespec,selector,attrspec,entitydecl): - global groupcache,groupcounter - last_name = None - kindselect,charselect,typename=cracktypespec(typespec,selector) - if attrspec: - attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')] - l = [] - c = re.compile(r'(?P[a-zA-Z]+)') - for a in attrspec: - if not a: - continue - m = c.match(a) - if m: - s = m.group('start').lower() - a = s + a[len(s):] - l.append(a) - attrspec = l - el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')] - el1=[] - for e in el: - for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)),comma=' ').split('@ @')]: - if e1: el1.append(e1.replace('@_@',' ')) - for e in el1: - m=namepattern.match(e) - if not m: - outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(`e`)) - continue - ename=rmbadname1(m.group('name')) - edecl={} - if ename in groupcache[groupcounter]['vars']: - edecl=groupcache[groupcounter]['vars'][ename].copy() - not_has_typespec = 'typespec' not in edecl - if not_has_typespec: - edecl['typespec']=typespec - elif typespec and (not typespec==edecl['typespec']): - outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typespec'],typespec)) - if 'kindselector' not in edecl: - edecl['kindselector']=copy.copy(kindselect) - elif kindselect: - for k in kindselect.keys(): - if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]): - outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['kindselector'][k],kindselect[k])) - else: edecl['kindselector'][k]=copy.copy(kindselect[k]) - if 'charselector' not in edecl and charselect: - if not_has_typespec: - edecl['charselector']=charselect - else: - errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \ - %(ename,charselect)) - elif charselect: - for k in charselect.keys(): - if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]): - outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['charselector'][k],charselect[k])) - else: edecl['charselector'][k]=copy.copy(charselect[k]) - if 'typename' not in edecl: - edecl['typename']=typename - elif typename and (not edecl['typename']==typename): - outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typename'],typename)) - if 'attrspec' not in edecl: - edecl['attrspec']=copy.copy(attrspec) - elif attrspec: - for a in attrspec: - if a not in edecl['attrspec']: - edecl['attrspec'].append(a) - else: - edecl['typespec']=copy.copy(typespec) - edecl['kindselector']=copy.copy(kindselect) - edecl['charselector']=copy.copy(charselect) - edecl['typename']=typename - edecl['attrspec']=copy.copy(attrspec) - if m.group('after'): - m1=lenarraypattern.match(markouterparen(m.group('after'))) - if m1: - d1=m1.groupdict() - for lk in ['len','array','init']: - if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2'] - for k in d1.keys(): - if d1[k] is not None: d1[k]=unmarkouterparen(d1[k]) - else: del d1[k] - if 'len' in d1 and 'array' in d1: - if d1['len']=='': - d1['len']=d1['array'] - del d1['array'] - else: - d1['array']=d1['array']+','+d1['len'] - del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec,e,typespec,ename,d1['array'])) - if 'array' in d1: - dm = 'dimension(%s)'%d1['array'] - if 'attrspec' not in edecl or (not edecl['attrspec']): - edecl['attrspec']=[dm] - else: - edecl['attrspec'].append(dm) - for dm1 in edecl['attrspec']: - if dm1[:9]=='dimension' and dm1!=dm: - del edecl['attrspec'][-1] - errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \ - % (ename,dm1,dm)) - break - - if 'len' in d1: - if typespec in ['complex','integer','logical','real']: - if ('kindselector' not in edecl) or (not edecl['kindselector']): - edecl['kindselector']={} - edecl['kindselector']['*']=d1['len'] - elif typespec == 'character': - if ('charselector' not in edecl) or (not edecl['charselector']): - edecl['charselector']={} - if 'len' in edecl['charselector']: - del edecl['charselector']['len'] - edecl['charselector']['*']=d1['len'] - if 'init' in d1: - if '=' in edecl and (not edecl['=']==d1['init']): - outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['='],d1['init'])) - else: - edecl['=']=d1['init'] - else: - outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after'))) - for k in edecl.keys(): - if not edecl[k]: - del edecl[k] - groupcache[groupcounter]['vars'][ename]=edecl - if 'varnames' in groupcache[groupcounter]: - groupcache[groupcounter]['varnames'].append(ename) - last_name = ename - return last_name - -def cracktypespec(typespec,selector): - kindselect=None - charselect=None - typename=None - if selector: - if typespec in ['complex','integer','logical','real']: - kindselect=kindselector.match(selector) - if not kindselect: - outmess('cracktypespec: no kindselector pattern found for %s\n'%(`selector`)) - return - kindselect=kindselect.groupdict() - kindselect['*']=kindselect['kind2'] - del kindselect['kind2'] - for k in kindselect.keys(): - if not kindselect[k]: del kindselect[k] - for k,i in kindselect.items(): - kindselect[k] = rmbadname1(i) - elif typespec=='character': - charselect=charselector.match(selector) - if not charselect: - outmess('cracktypespec: no charselector pattern found for %s\n'%(`selector`)) - return - charselect=charselect.groupdict() - charselect['*']=charselect['charlen'] - del charselect['charlen'] - if charselect['lenkind']: - lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind'])) - lenkind=lenkind.groupdict() - for lk in ['len','kind']: - if lenkind[lk+'2']: - lenkind[lk]=lenkind[lk+'2'] - charselect[lk]=lenkind[lk] - del lenkind[lk+'2'] - del charselect['lenkind'] - for k in charselect.keys(): - if not charselect[k]: del charselect[k] - for k,i in charselect.items(): - charselect[k] = rmbadname1(i) - elif typespec=='type': - typename=re.match(r'\s*\(\s*(?P\w+)\s*\)',selector,re.I) - if typename: typename=typename.group('name') - else: outmess('cracktypespec: no typename found in %s\n'%(`typespec+selector`)) - else: - outmess('cracktypespec: no selector used for %s\n'%(`selector`)) - return kindselect,charselect,typename -###### -def setattrspec(decl,attr,force=0): - if not decl: - decl={} - if not attr: - return decl - if 'attrspec' not in decl: - decl['attrspec']=[attr] - return decl - if force: decl['attrspec'].append(attr) - if attr in decl['attrspec']: return decl - if attr=='static' and 'automatic' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='automatic' and 'static' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='public' and 'private' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='private' and 'public' not in decl['attrspec']: - decl['attrspec'].append(attr) - else: - decl['attrspec'].append(attr) - return decl - -def setkindselector(decl,sel,force=0): - if not decl: - decl={} - if not sel: - return decl - if 'kindselector' not in decl: - decl['kindselector']=sel - return decl - for k in sel.keys(): - if force or k not in decl['kindselector']: - decl['kindselector'][k]=sel[k] - return decl - -def setcharselector(decl,sel,force=0): - if not decl: - decl={} - if not sel: - return decl - if 'charselector' not in decl: - decl['charselector']=sel - return decl - for k in sel.keys(): - if force or k not in decl['charselector']: - decl['charselector'][k]=sel[k] - return decl - -def getblockname(block,unknown='unknown'): - if 'name' in block: - return block['name'] - return unknown - -###### post processing - -def setmesstext(block): - global filepositiontext - try: - filepositiontext='In: %s:%s\n'%(block['from'],block['name']) - except: - pass - -def get_usedict(block): - usedict = {} - if 'parent_block' in block: - usedict = get_usedict(block['parent_block']) - if 'use' in block: - usedict.update(block['use']) - return usedict - -def get_useparameters(block, param_map=None): - global f90modulevars - if param_map is None: - param_map = {} - usedict = get_usedict(block) - if not usedict: - return param_map - for usename,mapping in usedict.items(): - usename = usename.lower() - if usename not in f90modulevars: - outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name'))) - continue - mvars = f90modulevars[usename] - params = get_parameters(mvars) - if not params: - continue - # XXX: apply mapping - if mapping: - errmess('get_useparameters: mapping for %s not impl.' % (mapping)) - for k,v in params.items(): - if k in param_map: - outmess('get_useparameters: overriding parameter %s with'\ - ' value from module %s' % (`k`,`usename`)) - param_map[k] = v - - return param_map - -def postcrack2(block,tab='',param_map=None): - global f90modulevars - if not f90modulevars: - return block - if type(block)==types.ListType: - ret = [] - for g in block: - g = postcrack2(g,tab=tab+'\t',param_map=param_map) - ret.append(g) - return ret - setmesstext(block) - outmess('%sBlock: %s\n'%(tab,block['name']),0) - - if param_map is None: - param_map = get_useparameters(block) - - if param_map is not None and 'vars' in block: - vars = block['vars'] - for n in vars.keys(): - var = vars[n] - if 'kindselector' in var: - kind = var['kindselector'] - if 'kind' in kind: - val = kind['kind'] - if val in param_map: - kind['kind'] = param_map[val] - new_body = [] - for b in block['body']: - b = postcrack2(b,tab=tab+'\t',param_map=param_map) - new_body.append(b) - block['body'] = new_body - - return block - -def postcrack(block,args=None,tab=''): - """ - TODO: - function return values - determine expression types if in argument list - """ - global usermodules,onlyfunctions - if type(block)==types.ListType: - gret=[] - uret=[] - for g in block: - setmesstext(g) - g=postcrack(g,tab=tab+'\t') - if 'name' in g and '__user__' in g['name']: # sort user routines to appear first - uret.append(g) - else: - gret.append(g) - return uret+gret - setmesstext(block) - if (not type(block)==types.DictType) and 'block' not in block: - raise Exception('postcrack: Expected block dictionary instead of ' + \ - str(block)) - if 'name' in block and not block['name']=='unknown_interface': - outmess('%sBlock: %s\n'%(tab,block['name']),0) - blocktype=block['block'] - block=analyzeargs(block) - block=analyzecommon(block) - block['vars']=analyzevars(block) - block['sortvars']=sortvarnames(block['vars']) - if 'args' in block and block['args']: - args=block['args'] - block['body']=analyzebody(block,args,tab=tab) - - userisdefined=[] -## fromuser = [] - if 'use' in block: - useblock=block['use'] - for k in useblock.keys(): - if '__user__' in k: - userisdefined.append(k) -## if 'map' in useblock[k]: -## for n in useblock[k]['map'].values(): -## if n not in fromuser: fromuser.append(n) - else: useblock={} - name='' - if 'name' in block: - name=block['name'] - if 'externals' in block and block['externals']:# and not userisdefined: # Build a __user__ module - interfaced=[] - if 'interfaced' in block: - interfaced=block['interfaced'] - mvars=copy.copy(block['vars']) - if name: - mname=name+'__user__routines' - else: - mname='unknown__user__routines' - if mname in userisdefined: - i=1 - while '%s_%i'%(mname,i) in userisdefined: i=i+1 - mname='%s_%i'%(mname,i) - interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'} - for e in block['externals']: -## if e in fromuser: -## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`)) -## continue - if e in interfaced: - edef=[] - j=-1 - for b in block['body']: - j=j+1 - if b['block']=='interface': - i=-1 - for bb in b['body']: - i=i+1 - if 'name' in bb and bb['name']==e: - edef=copy.copy(bb) - del b['body'][i] - break - if edef: - if not b['body']: del block['body'][j] - del interfaced[interfaced.index(e)] - break - interface['body'].append(edef) - else: - if e in mvars and not isexternal(mvars[e]): - interface['vars'][e]=mvars[e] - if interface['vars'] or interface['body']: - block['interfaced']=interfaced - mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']} - useblock[mname]={} - usermodules.append(mblock) - if useblock: - block['use']=useblock - return block - -def sortvarnames(vars): - indep = [] - dep = [] - for v in vars.keys(): - if 'depend' in vars[v] and vars[v]['depend']: - dep.append(v) - #print '%s depends on %s'%(v,vars[v]['depend']) - else: indep.append(v) - n = len(dep) - i = 0 - while dep: #XXX: How to catch dependence cycles correctly? - v = dep[0] - fl = 0 - for w in dep[1:]: - if w in vars[v]['depend']: - fl = 1 - break - if fl: - dep = dep[1:]+[v] - i = i + 1 - if i>n: - errmess('sortvarnames: failed to compute dependencies because' - ' of cyclic dependencies between ' - +', '.join(dep)+'\n') - indep = indep + dep - break - else: - indep.append(v) - dep = dep[1:] - n = len(dep) - i = 0 - #print indep - return indep - -def analyzecommon(block): - if not hascommon(block): return block - commonvars=[] - for k in block['common'].keys(): - comvars=[] - for e in block['common'][k]: - m=re.match(r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z',e,re.I) - if m: - dims=[] - if m.group('dims'): - dims=[x.strip() for x in markoutercomma(m.group('dims')).split('@,@')] - n=m.group('name').strip() - if n in block['vars']: - if 'attrspec' in block['vars'][n]: - block['vars'][n]['attrspec'].append('dimension(%s)'%(','.join(dims))) - else: - block['vars'][n]['attrspec']=['dimension(%s)'%(','.join(dims))] - else: - if dims: - block['vars'][n]={'attrspec':['dimension(%s)'%(','.join(dims))]} - else: block['vars'][n]={} - if n not in commonvars: commonvars.append(n) - else: - n=e - errmess('analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n'%(e,k)) - comvars.append(n) - block['common'][k]=comvars - if 'commonvars' not in block: - block['commonvars']=commonvars - else: - block['commonvars']=block['commonvars']+commonvars - return block - -def analyzebody(block,args,tab=''): - global usermodules,skipfuncs,onlyfuncs,f90modulevars - setmesstext(block) - body=[] - for b in block['body']: - b['parent_block'] = block - if b['block'] in ['function','subroutine']: - if args is not None and b['name'] not in args: - continue - else: - as_=b['args'] - if b['name'] in skipfuncs: - continue - if onlyfuncs and b['name'] not in onlyfuncs: - continue - b['saved_interface'] = crack2fortrangen(b, '\n'+' '*6, as_interface=True) - - else: as_=args - b=postcrack(b,as_,tab=tab+'\t') - if b['block']=='interface' and not b['body']: - if 'f2pyenhancements' not in b: - continue - if b['block'].replace(' ','')=='pythonmodule': - usermodules.append(b) - else: - if b['block']=='module': - f90modulevars[b['name']] = b['vars'] - body.append(b) - return body - -def buildimplicitrules(block): - setmesstext(block) - implicitrules=defaultimplicitrules - attrrules={} - if 'implicit' in block: - if block['implicit'] is None: - implicitrules=None - if verbose>1: - outmess('buildimplicitrules: no implicit rules for routine %s.\n'%`block['name']`) - else: - for k in block['implicit'].keys(): - if block['implicit'][k].get('typespec') not in ['static','automatic']: - implicitrules[k]=block['implicit'][k] - else: - attrrules[k]=block['implicit'][k]['typespec'] - return implicitrules,attrrules - -def myeval(e,g=None,l=None): - r = eval(e,g,l) - if type(r) in [type(0),type(0.0)]: - return r - raise ValueError('r=%r' % (r)) - -getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z',re.I) -def getlincoef(e,xset): # e = a*x+b ; x in xset - try: - c = int(myeval(e,{},{})) - return 0,c,None - except: pass - if getlincoef_re_1.match(e): - return 1,0,e - len_e = len(e) - for x in xset: - if len(x)>len_e: continue - if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e): - # skip function calls having x as an argument, e.g max(1, x) - continue - re_1 = re.compile(r'(?P.*?)\b'+x+r'\b(?P.*)',re.I) - m = re_1.match(e) - if m: - try: - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'),0,m1.group('after')) - m1 = re_1.match(ee) - b = myeval(ee,{},{}) - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'),1,m1.group('after')) - m1 = re_1.match(ee) - a = myeval(ee,{},{}) - b - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'),0.5,m1.group('after')) - m1 = re_1.match(ee) - c = myeval(ee,{},{}) - # computing another point to be sure that expression is linear - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'),1.5,m1.group('after')) - m1 = re_1.match(ee) - c2 = myeval(ee,{},{}) - if (a*0.5+b==c and a*1.5+b==c2): - return a,b,x - except: pass - break - return None,None,None - -_varname_match = re.compile(r'\A[a-z]\w*\Z').match -def getarrlen(dl,args,star='*'): - edl = [] - try: edl.append(myeval(dl[0],{},{})) - except: edl.append(dl[0]) - try: edl.append(myeval(dl[1],{},{})) - except: edl.append(dl[1]) - if type(edl[0]) is type(0): - p1 = 1-edl[0] - if p1==0: d = str(dl[1]) - elif p1<0: d = '%s-%s'%(dl[1],-p1) - else: d = '%s+%s'%(dl[1],p1) - elif type(edl[1]) is type(0): - p1 = 1+edl[1] - if p1==0: d='-(%s)' % (dl[0]) - else: d='%s-(%s)' % (p1,dl[0]) - else: d = '%s-(%s)+1'%(dl[1],dl[0]) - try: return `myeval(d,{},{})`,None,None - except: pass - d1,d2=getlincoef(dl[0],args),getlincoef(dl[1],args) - if None not in [d1[0],d2[0]]: - if (d1[0],d2[0])==(0,0): - return `d2[1]-d1[1]+1`,None,None - b = d2[1] - d1[1] + 1 - d1 = (d1[0],0,d1[2]) - d2 = (d2[0],b,d2[2]) - if d1[0]==0 and d2[2] in args: - if b<0: return '%s * %s - %s'%(d2[0],d2[2],-b),d2[2],'+%s)/(%s)'%(-b,d2[0]) - elif b: return '%s * %s + %s'%(d2[0],d2[2],b),d2[2],'-%s)/(%s)'%(b,d2[0]) - else: return '%s * %s'%(d2[0],d2[2]),d2[2],')/(%s)'%(d2[0]) - if d2[0]==0 and d1[2] in args: - - if b<0: return '%s * %s - %s'%(-d1[0],d1[2],-b),d1[2],'+%s)/(%s)'%(-b,-d1[0]) - elif b: return '%s * %s + %s'%(-d1[0],d1[2],b),d1[2],'-%s)/(%s)'%(b,-d1[0]) - else: return '%s * %s'%(-d1[0],d1[2]),d1[2],')/(%s)'%(-d1[0]) - if d1[2]==d2[2] and d1[2] in args: - a = d2[0] - d1[0] - if not a: return `b`,None,None - if b<0: return '%s * %s - %s'%(a,d1[2],-b),d2[2],'+%s)/(%s)'%(-b,a) - elif b: return '%s * %s + %s'%(a,d1[2],b),d2[2],'-%s)/(%s)'%(b,a) - else: return '%s * %s'%(a,d1[2]),d2[2],')/(%s)'%(a) - if d1[0]==d2[0]==1: - c = str(d1[2]) - if c not in args: - if _varname_match(c): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) - c = '(%s)'%c - if b==0: d='%s-%s' % (d2[2],c) - elif b<0: d='%s-%s-%s' % (d2[2],c,-b) - else: d='%s-%s+%s' % (d2[2],c,b) - elif d1[0]==0: - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)'%c2 - if d2[0]==1: pass - elif d2[0]==-1: c2='-%s' %c2 - else: c2='%s*%s'%(d2[0],c2) - - if b==0: d=c2 - elif b<0: d='%s-%s' % (c2,-b) - else: d='%s+%s' % (c2,b) - elif d2[0]==0: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)'%c1 - if d1[0]==1: c1='-%s'%c1 - elif d1[0]==-1: c1='+%s'%c1 - elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1) - else: c1 = '-%s*%s' % (d1[0],c1) - - if b==0: d=c1 - elif b<0: d='%s-%s' % (c1,-b) - else: d='%s+%s' % (c1,b) - else: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)'%c1 - if d1[0]==1: c1='-%s'%c1 - elif d1[0]==-1: c1='+%s'%c1 - elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1) - else: c1 = '-%s*%s' % (d1[0],c1) - - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)'%c2 - if d2[0]==1: pass - elif d2[0]==-1: c2='-%s' %c2 - else: c2='%s*%s'%(d2[0],c2) - - if b==0: d='%s%s' % (c2,c1) - elif b<0: d='%s%s-%s' % (c2,c1,-b) - else: d='%s%s+%s' % (c2,c1,b) - return d,None,None - -word_pattern = re.compile(r'\b[a-z][\w$]*\b',re.I) - -def _get_depend_dict(name, vars, deps): - if name in vars: - words = vars[name].get('depend',[]) - - if '=' in vars[name] and not isstring(vars[name]): - for word in word_pattern.findall(vars[name]['=']): - if word not in words and word in vars: - words.append(word) - for word in words[:]: - for w in deps.get(word,[]) \ - or _get_depend_dict(word, vars, deps): - if w not in words: - words.append(w) - else: - outmess('_get_depend_dict: no dependence info for %s\n' % (`name`)) - words = [] - deps[name] = words - return words - -def _calc_depend_dict(vars): - names = vars.keys() - depend_dict = {} - for n in names: - _get_depend_dict(n, vars, depend_dict) - return depend_dict - -def get_sorted_names(vars): - """ - """ - depend_dict = _calc_depend_dict(vars) - names = [] - for name in depend_dict.keys(): - if not depend_dict[name]: - names.append(name) - del depend_dict[name] - while depend_dict: - for name, lst in depend_dict.items(): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - return [name for name in names if name in vars] - -def _kind_func(string): - #XXX: return something sensible. - if string[0] in "'\"": - string = string[1:-1] - if real16pattern.match(string): - return 8 - elif real8pattern.match(string): - return 4 - return 'kind('+string+')' - -def _selected_int_kind_func(r): - #XXX: This should be processor dependent - m = 10**r - if m<=2**8: return 1 - if m<=2**16: return 2 - if m<=2**32: return 4 - if m<=2**63: return 8 - if m<=2**128: return 16 - return -1 - -def _selected_real_kind_func(p, r=0, radix=0): - #XXX: This should be processor dependent - # This is only good for 0 <= p <= 20 - if p < 7: return 4 - if p < 16: return 8 - if platform.machine().lower().startswith('power'): - if p <= 20: - return 16 - else: - if p < 19: - return 10 - elif p <= 20: - return 16 - return -1 - -def get_parameters(vars, global_params={}): - params = copy.copy(global_params) - g_params = copy.copy(global_params) - for name,func in [('kind',_kind_func), - ('selected_int_kind',_selected_int_kind_func), - ('selected_real_kind',_selected_real_kind_func), - ]: - if name not in g_params: - g_params[name] = func - param_names = [] - for n in get_sorted_names(vars): - if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: - param_names.append(n) - kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)',re.I) - selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)',re.I) - selected_kind_re = re.compile(r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)',re.I) - for n in param_names: - if '=' in vars[n]: - v = vars[n]['='] - if islogical(vars[n]): - v = v.lower() - for repl in [ - ('.false.','False'), - ('.true.','True'), - #TODO: test .eq., .neq., etc replacements. - ]: - v = v.replace(*repl) - v = kind_re.sub(r'kind("\1")',v) - v = selected_int_kind_re.sub(r'selected_int_kind(\1)',v) - if isinteger(vars[n]) and not selected_kind_re.match(v): - v = v.split('_')[0] - if isdouble(vars[n]): - tt = list(v) - for m in real16pattern.finditer(v): - tt[m.start():m.end()] = list(\ - v[m.start():m.end()].lower().replace('d', 'e')) - v = ''.join(tt) - if iscomplex(vars[n]): - if v[0]=='(' and v[-1]==')': - l = markoutercomma(v[1:-1]).split('@,@') - try: - params[n] = eval(v,g_params,params) - except Exception,msg: - params[n] = v - #print params - outmess('get_parameters: got "%s" on %s\n' % (msg,`v`)) - if isstring(vars[n]) and type(params[n]) is type(0): - params[n] = chr(params[n]) - nl = n.lower() - if nl!=n: - params[nl] = params[n] - else: - print vars[n] - outmess('get_parameters:parameter %s does not have value?!\n'%(`n`)) - return params - -def _eval_length(length,params): - if length in ['(:)','(*)','*']: - return '(*)' - return _eval_scalar(length,params) - -_is_kind_number = re.compile(r'\d+_').match - -def _eval_scalar(value,params): - if _is_kind_number(value): - value = value.split('_')[0] - try: - value = str(eval(value,{},params)) - except (NameError, SyntaxError): - return value - except Exception,msg: - errmess('"%s" in evaluating %r '\ - '(available names: %s)\n' \ - % (msg,value,params.keys())) - return value - -def analyzevars(block): - global f90modulevars - setmesstext(block) - implicitrules,attrrules=buildimplicitrules(block) - vars=copy.copy(block['vars']) - if block['block']=='function' and block['name'] not in vars: - vars[block['name']]={} - if '' in block['vars']: - del vars[''] - if 'attrspec' in block['vars']['']: - gen=block['vars']['']['attrspec'] - for n in vars.keys(): - for k in ['public','private']: - if k in gen: - vars[n]=setattrspec(vars[n],k) - svars=[] - args = block['args'] - for a in args: - try: - vars[a] - svars.append(a) - except KeyError: - pass - for n in vars.keys(): - if n not in args: svars.append(n) - - params = get_parameters(vars, get_useparameters(block)) - - dep_matches = {} - name_match = re.compile(r'\w[\w\d_$]*').match - for v in vars.keys(): - m = name_match(v) - if m: - n = v[m.start():m.end()] - try: - dep_matches[n] - except KeyError: - dep_matches[n] = re.compile(r'.*\b%s\b'%(v),re.I).match - for n in svars: - if n[0] in attrrules.keys(): - vars[n]=setattrspec(vars[n],attrrules[n[0]]) - if 'typespec' not in vars[n]: - if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): - if implicitrules: - ln0 = n[0].lower() - for k in implicitrules[ln0].keys(): - if k=='typespec' and implicitrules[ln0][k]=='undefined': - continue - if k not in vars[n]: - vars[n][k]=implicitrules[ln0][k] - elif k=='attrspec': - for l in implicitrules[ln0][k]: - vars[n]=setattrspec(vars[n],l) - elif n in block['args']: - outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(`n`,block['name'])) - - if 'charselector' in vars[n]: - if 'len' in vars[n]['charselector']: - l = vars[n]['charselector']['len'] - try: - l = str(eval(l,{},params)) - except: - pass - vars[n]['charselector']['len'] = l - - if 'kindselector' in vars[n]: - if 'kind' in vars[n]['kindselector']: - l = vars[n]['kindselector']['kind'] - try: - l = str(eval(l,{},params)) - except: - pass - vars[n]['kindselector']['kind'] = l - - savelindims = {} - if 'attrspec' in vars[n]: - attr=vars[n]['attrspec'] - attr.reverse() - vars[n]['attrspec']=[] - dim,intent,depend,check,note=None,None,None,None,None - for a in attr: - if a[:9]=='dimension': dim=(a[9:].strip())[1:-1] - elif a[:6]=='intent': intent=(a[6:].strip())[1:-1] - elif a[:6]=='depend': depend=(a[6:].strip())[1:-1] - elif a[:5]=='check': check=(a[5:].strip())[1:-1] - elif a[:4]=='note': note=(a[4:].strip())[1:-1] - else: vars[n]=setattrspec(vars[n],a) - if intent: - if 'intent' not in vars[n]: - vars[n]['intent']=[] - for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: - if not c in vars[n]['intent']: - vars[n]['intent'].append(c) - intent=None - if note: - note=note.replace('\\n\\n','\n\n') - note=note.replace('\\n ','\n') - if 'note' not in vars[n]: - vars[n]['note']=[note] - else: - vars[n]['note'].append(note) - note=None - if depend is not None: - if 'depend' not in vars[n]: - vars[n]['depend']=[] - for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): - if c not in vars[n]['depend']: - vars[n]['depend'].append(c) - depend=None - if check is not None: - if 'check' not in vars[n]: - vars[n]['check']=[] - for c in [x.strip() for x in markoutercomma(check).split('@,@')]: - if not c in vars[n]['check']: - vars[n]['check'].append(c) - check=None - if dim and 'dimension' not in vars[n]: - vars[n]['dimension']=[] - for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): - star = '*' - if d==':': - star=':' - if d in params: - d = str(params[d]) - for p in params.keys(): - m = re.match(r'(?P.*?)\b'+p+r'\b(?P.*)',d,re.I) - if m: - #outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`)) - d = m.group('before')+str(params[p])+m.group('after') - if d==star: - dl = [star] - else: - dl=markoutercomma(d,':').split('@:@') - if len(dl)==2 and '*' in dl: # e.g. dimension(5:*) - dl = ['*'] - d = '*' - if len(dl)==1 and not dl[0]==star: dl = ['1',dl[0]] - if len(dl)==2: - d,v,di = getarrlen(dl,block['vars'].keys()) - if d[:4] == '1 * ': d = d[4:] - if di and di[-4:] == '/(1)': di = di[:-4] - if v: savelindims[d] = v,di - vars[n]['dimension'].append(d) - if 'dimension' in vars[n]: - if isintent_c(vars[n]): - shape_macro = 'shape' - else: - shape_macro = 'shape'#'fshape' - if isstringarray(vars[n]): - if 'charselector' in vars[n]: - d = vars[n]['charselector'] - if '*' in d: - d = d['*'] - errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\ - %(d,n, - ','.join(vars[n]['dimension']), - n,','.join(vars[n]['dimension']+[d]))) - vars[n]['dimension'].append(d) - del vars[n]['charselector'] - if 'intent' not in vars[n]: - vars[n]['intent'] = [] - if 'c' not in vars[n]['intent']: - vars[n]['intent'].append('c') - else: - errmess("analyzevars: charselector=%r unhandled." % (d)) - if 'check' not in vars[n] and 'args' in block and n in block['args']: - flag = 'depend' not in vars[n] - if flag: - vars[n]['depend']=[] - vars[n]['check']=[] - if 'dimension' in vars[n]: - #/----< no check - #vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension']))) - i=-1; ni=len(vars[n]['dimension']) - for d in vars[n]['dimension']: - ddeps=[] # dependecies of 'd' - ad='' - pd='' - #origd = d - if d not in vars: - if d in savelindims: - pd,ad='(',savelindims[d][1] - d = savelindims[d][0] - else: - for r in block['args']: - #for r in block['vars'].keys(): - if r not in vars: - continue - if re.match(r'.*?\b'+r+r'\b',d,re.I): - ddeps.append(r) - if d in vars: - if 'attrspec' in vars[d]: - for aa in vars[d]['attrspec']: - if aa[:6]=='depend': - ddeps += aa[6:].strip()[1:-1].split(',') - if 'depend' in vars[d]: - ddeps=ddeps+vars[d]['depend'] - i=i+1 - if d in vars and ('depend' not in vars[d]) \ - and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ - and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): - vars[d]['depend']=[n] - if ni>1: - vars[d]['=']='%s%s(%s,%s)%s'% (pd,shape_macro,n,i,ad) - else: - vars[d]['=']='%slen(%s)%s'% (pd,n,ad) - # /---< no check - if 1 and 'check' not in vars[d]: - if ni>1: - vars[d]['check']=['%s%s(%s,%i)%s==%s'\ - %(pd,shape_macro,n,i,ad,d)] - else: - vars[d]['check']=['%slen(%s)%s>=%s'%(pd,n,ad,d)] - if 'attrspec' not in vars[d]: - vars[d]['attrspec']=['optional'] - if ('optional' not in vars[d]['attrspec']) and\ - ('required' not in vars[d]['attrspec']): - vars[d]['attrspec'].append('optional') - elif d not in ['*',':']: - #/----< no check - #if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d)) - #else: vars[n]['check'].append('len(%s)>=%s'%(n,d)) - if flag: - if d in vars: - if n not in ddeps: - vars[n]['depend'].append(d) - else: - vars[n]['depend'] = vars[n]['depend'] + ddeps - elif isstring(vars[n]): - length='1' - if 'charselector' in vars[n]: - if '*' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['*'], - params) - vars[n]['charselector']['*']=length - elif 'len' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['len'], - params) - del vars[n]['charselector']['len'] - vars[n]['charselector']['*']=length - - if not vars[n]['check']: - del vars[n]['check'] - if flag and not vars[n]['depend']: - del vars[n]['depend'] - if '=' in vars[n]: - if 'attrspec' not in vars[n]: - vars[n]['attrspec']=[] - if ('optional' not in vars[n]['attrspec']) and \ - ('required' not in vars[n]['attrspec']): - vars[n]['attrspec'].append('optional') - if 'depend' not in vars[n]: - vars[n]['depend']=[] - for v,m in dep_matches.items(): - if m(vars[n]['=']): vars[n]['depend'].append(v) - if not vars[n]['depend']: del vars[n]['depend'] - if isscalar(vars[n]): - vars[n]['='] = _eval_scalar(vars[n]['='],params) - - for n in vars.keys(): - if n==block['name']: # n is block name - if 'note' in vars[n]: - block['note']=vars[n]['note'] - if block['block']=='function': - if 'result' in block and block['result'] in vars: - vars[n]=appenddecl(vars[n],vars[block['result']]) - if 'prefix' in block: - pr=block['prefix']; ispure=0; isrec=1 - pr1=pr.replace('pure','') - ispure=(not pr==pr1) - pr=pr1.replace('recursive','') - isrec=(not pr==pr1) - m=typespattern[0].match(pr) - if m: - typespec,selector,attr,edecl=cracktypespec0(m.group('this'),m.group('after')) - kindselect,charselect,typename=cracktypespec(typespec,selector) - vars[n]['typespec']=typespec - if kindselect: - if 'kind' in kindselect: - try: - kindselect['kind'] = eval(kindselect['kind'],{},params) - except: - pass - vars[n]['kindselector']=kindselect - if charselect: vars[n]['charselector']=charselect - if typename: vars[n]['typename']=typename - if ispure: vars[n]=setattrspec(vars[n],'pure') - if isrec: vars[n]=setattrspec(vars[n],'recursive') - else: - outmess('analyzevars: prefix (%s) were not used\n'%`block['prefix']`) - if not block['block'] in ['module','pythonmodule','python module','block data']: - if 'commonvars' in block: - neededvars=copy.copy(block['args']+block['commonvars']) - else: - neededvars=copy.copy(block['args']) - for n in vars.keys(): - if l_or(isintent_callback,isintent_aux)(vars[n]): - neededvars.append(n) - if 'entry' in block: - neededvars.extend(block['entry'].keys()) - for k in block['entry'].keys(): - for n in block['entry'][k]: - if n not in neededvars: - neededvars.append(n) - if block['block']=='function': - if 'result' in block: - neededvars.append(block['result']) - else: - neededvars.append(block['name']) - if block['block'] in ['subroutine','function']: - name = block['name'] - if name in vars and 'intent' in vars[name]: - block['intent'] = vars[name]['intent'] - if block['block'] == 'type': - neededvars.extend(vars.keys()) - for n in vars.keys(): - if n not in neededvars: - del vars[n] - return vars - -analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z',re.I) -def expr2name(a, block, args=[]): - orig_a = a - a_is_expr = not analyzeargs_re_1.match(a) - if a_is_expr: # `a` is an expression - implicitrules,attrrules=buildimplicitrules(block) - at=determineexprtype(a,block['vars'],implicitrules) - na='e_' - for c in a: - c = c.lower() - if c not in string.lowercase+string.digits: c='_' - na=na+c - if na[-1]=='_': na=na+'e' - else: na=na+'_e' - a=na - while a in block['vars'] or a in block['args']: - a=a+'r' - if a in args: - k = 1 - while a + str(k) in args: - k = k + 1 - a = a + str(k) - if a_is_expr: - block['vars'][a]=at - else: - if a not in block['vars']: - if orig_a in block['vars']: - block['vars'][a] = block['vars'][orig_a] - else: - block['vars'][a]={} - if 'externals' in block and orig_a in block['externals']+block['interfaced']: - block['vars'][a]=setattrspec(block['vars'][a],'external') - return a - -def analyzeargs(block): - setmesstext(block) - implicitrules,attrrules=buildimplicitrules(block) - if 'args' not in block: - block['args']=[] - args=[] - for a in block['args']: - a = expr2name(a, block, args) - args.append(a) - block['args']=args - if 'entry' in block: - for k,args1 in block['entry'].items(): - for a in args1: - if a not in block['vars']: - block['vars'][a]={} - - for b in block['body']: - if b['name'] in args: - if 'externals' not in block: - block['externals']=[] - if b['name'] not in block['externals']: - block['externals'].append(b['name']) - if 'result' in block and block['result'] not in block['vars']: - block['vars'][block['result']]={} - return block - -determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z',re.I) -determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P[\w]+)|)\Z',re.I) -determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P[\w]+)|)\Z',re.I) -determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z',re.I) -determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z',re.I) -def _ensure_exprdict(r): - if type(r) is type(0): - return {'typespec':'integer'} - if type(r) is type(0.0): - return {'typespec':'real'} - if type(r) is type(0j): - return {'typespec':'complex'} - assert type(r) is type({}),`r` - return r - -def determineexprtype(expr,vars,rules={}): - if expr in vars: - return _ensure_exprdict(vars[expr]) - expr=expr.strip() - if determineexprtype_re_1.match(expr): - return {'typespec':'complex'} - m=determineexprtype_re_2.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`) - return {'typespec':'integer'} - m = determineexprtype_re_3.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`) - return {'typespec':'real'} - for op in ['+','-','*','/']: - for e in [x.strip() for x in markoutercomma(expr,comma=op).split('@'+op+'@')]: - if e in vars: - return _ensure_exprdict(vars[e]) - t={} - if determineexprtype_re_4.match(expr): # in parenthesis - t=determineexprtype(expr[1:-1],vars,rules) - else: - m = determineexprtype_re_5.match(expr) - if m: - rn=m.group('name') - t=determineexprtype(m.group('name'),vars,rules) - if t and 'attrspec' in t: - del t['attrspec'] - if not t: - if rn[0] in rules: - return _ensure_exprdict(rules[rn[0]]) - if expr[0] in '\'"': - return {'typespec':'character','charselector':{'*':'*'}} - if not t: - outmess('determineexprtype: could not determine expressions (%s) type.\n'%(`expr`)) - return t - -###### -def crack2fortrangen(block,tab='\n', as_interface=False): - global skipfuncs, onlyfuncs - setmesstext(block) - ret='' - if isinstance(block, list): - for g in block: - if g and g['block'] in ['function','subroutine']: - if g['name'] in skipfuncs: - continue - if onlyfuncs and g['name'] not in onlyfuncs: - continue - ret=ret+crack2fortrangen(g,tab,as_interface=as_interface) - return ret - prefix='' - name='' - args='' - blocktype=block['block'] - if blocktype=='program': return '' - argsl = [] - if 'name' in block: - name=block['name'] - if 'args' in block: - vars = block['vars'] - for a in block['args']: - a = expr2name(a, block, argsl) - if not isintent_callback(vars[a]): - argsl.append(a) - if block['block']=='function' or argsl: - args='(%s)'%','.join(argsl) - f2pyenhancements = '' - if 'f2pyenhancements' in block: - for k in block['f2pyenhancements'].keys(): - f2pyenhancements = '%s%s%s %s'%(f2pyenhancements,tab+tabchar,k,block['f2pyenhancements'][k]) - intent_lst = block.get('intent',[])[:] - if blocktype=='function' and 'callback' in intent_lst: - intent_lst.remove('callback') - if intent_lst: - f2pyenhancements = '%s%sintent(%s) %s'%\ - (f2pyenhancements,tab+tabchar, - ','.join(intent_lst),name) - use='' - if 'use' in block: - use=use2fortran(block['use'],tab+tabchar) - common='' - if 'common' in block: - common=common2fortran(block['common'],tab+tabchar) - if name=='unknown_interface': name='' - result='' - if 'result' in block: - result=' result (%s)'%block['result'] - if block['result'] not in argsl: - argsl.append(block['result']) - #if 'prefix' in block: - # prefix=block['prefix']+' ' - body=crack2fortrangen(block['body'],tab+tabchar) - vars=vars2fortran(block,block['vars'],argsl,tab+tabchar, as_interface=as_interface) - mess='' - if 'from' in block and not as_interface: - mess='! in %s'%block['from'] - if 'entry' in block: - entry_stmts = '' - for k,i in block['entry'].items(): - entry_stmts = '%s%sentry %s(%s)' \ - % (entry_stmts,tab+tabchar,k,','.join(i)) - body = body + entry_stmts - if blocktype=='block data' and name=='_BLOCK_DATA_': - name = '' - ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab,prefix,blocktype,name,args,result,mess,f2pyenhancements,use,vars,common,body,tab,blocktype,name) - return ret - -def common2fortran(common,tab=''): - ret='' - for k in common.keys(): - if k=='_BLNK_': - ret='%s%scommon %s'%(ret,tab,','.join(common[k])) - else: - ret='%s%scommon /%s/ %s'%(ret,tab,k,','.join(common[k])) - return ret - -def use2fortran(use,tab=''): - ret='' - for m in use.keys(): - ret='%s%suse %s,'%(ret,tab,m) - if use[m]=={}: - if ret and ret[-1]==',': ret=ret[:-1] - continue - if 'only' in use[m] and use[m]['only']: - ret='%s only:'%(ret) - if 'map' in use[m] and use[m]['map']: - c=' ' - for k in use[m]['map'].keys(): - if k==use[m]['map'][k]: - ret='%s%s%s'%(ret,c,k); c=',' - else: - ret='%s%s%s=>%s'%(ret,c,k,use[m]['map'][k]); c=',' - if ret and ret[-1]==',': ret=ret[:-1] - return ret - -def true_intent_list(var): - lst = var['intent'] - ret = [] - for intent in lst: - try: - exec('c = isintent_%s(var)' % intent) - except NameError: - c = 0 - if c: - ret.append(intent) - return ret - -def vars2fortran(block,vars,args,tab='', as_interface=False): - """ - TODO: - public sub - ... - """ - setmesstext(block) - ret='' - nout=[] - for a in args: - if a in block['vars']: - nout.append(a) - if 'commonvars' in block: - for a in block['commonvars']: - if a in vars: - if a not in nout: - nout.append(a) - else: - errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a) - if 'varnames' in block: - nout.extend(block['varnames']) - if not as_interface: - for a in vars.keys(): - if a not in nout: - nout.append(a) - for a in nout: - if 'depend' in vars[a]: - for d in vars[a]['depend']: - if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: - errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a,d)) - if 'externals' in block and a in block['externals']: - if isintent_callback(vars[a]): - ret='%s%sintent(callback) %s'%(ret,tab,a) - ret='%s%sexternal %s'%(ret,tab,a) - if isoptional(vars[a]): - ret='%s%soptional %s'%(ret,tab,a) - if a in vars and 'typespec' not in vars[a]: - continue - cont=1 - for b in block['body']: - if a==b['name'] and b['block']=='function': - cont=0;break - if cont: - continue - if a not in vars: - show(vars) - outmess('vars2fortran: No definition for argument "%s".\n'%a) - continue - if a==block['name'] and not block['block']=='function': - continue - if 'typespec' not in vars[a]: - if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: - if a in args: - ret='%s%sexternal %s'%(ret,tab,a) - continue - show(vars[a]) - outmess('vars2fortran: No typespec for argument "%s".\n'%a) - continue - vardef=vars[a]['typespec'] - if vardef=='type' and 'typename' in vars[a]: - vardef='%s(%s)'%(vardef,vars[a]['typename']) - selector={} - if 'kindselector' in vars[a]: - selector=vars[a]['kindselector'] - elif 'charselector' in vars[a]: - selector=vars[a]['charselector'] - if '*' in selector: - if selector['*'] in ['*',':']: - vardef='%s*(%s)'%(vardef,selector['*']) - else: - vardef='%s*%s'%(vardef,selector['*']) - else: - if 'len' in selector: - vardef='%s(len=%s'%(vardef,selector['len']) - if 'kind' in selector: - vardef='%s,kind=%s)'%(vardef,selector['kind']) - else: - vardef='%s)'%(vardef) - elif 'kind' in selector: - vardef='%s(kind=%s)'%(vardef,selector['kind']) - c=' ' - if 'attrspec' in vars[a]: - attr=[] - for l in vars[a]['attrspec']: - if l not in ['external']: - attr.append(l) - if attr: - vardef='%s, %s'%(vardef,','.join(attr)) - c=',' - if 'dimension' in vars[a]: -# if not isintent_c(vars[a]): -# vars[a]['dimension'].reverse() - vardef='%s%sdimension(%s)'%(vardef,c,','.join(vars[a]['dimension'])) - c=',' - if 'intent' in vars[a]: - lst = true_intent_list(vars[a]) - if lst: - vardef='%s%sintent(%s)'%(vardef,c,','.join(lst)) - c=',' - if 'check' in vars[a]: - vardef='%s%scheck(%s)'%(vardef,c,','.join(vars[a]['check'])) - c=',' - if 'depend' in vars[a]: - vardef='%s%sdepend(%s)'%(vardef,c,','.join(vars[a]['depend'])) - c=',' - if '=' in vars[a]: - v = vars[a]['='] - if vars[a]['typespec'] in ['complex','double complex']: - try: - v = eval(v) - v = '(%s,%s)' % (v.real,v.imag) - except: - pass - vardef='%s :: %s=%s'%(vardef,a,v) - else: - vardef='%s :: %s'%(vardef,a) - ret='%s%s%s'%(ret,tab,vardef) - return ret -###### - -def crackfortran(files): - global usermodules - outmess('Reading fortran codes...\n',0) - readfortrancode(files,crackline) - outmess('Post-processing...\n',0) - usermodules=[] - postlist=postcrack(grouplist[0]) - outmess('Post-processing (stage 2)...\n',0) - postlist=postcrack2(postlist) - return usermodules+postlist - -def crack2fortran(block): - global f2py_version - pyf=crack2fortrangen(block)+'\n' - header="""! -*- f90 -*- -! Note: the context of this file is case sensitive. -""" - footer=""" -! This file was auto-generated with f2py (version:%s). -! See http://cens.ioc.ee/projects/f2py2e/ -"""%(f2py_version) - return header+pyf+footer - -if __name__ == "__main__": - files=[] - funcs=[] - f=1;f2=0;f3=0 - showblocklist=0 - for l in sys.argv[1:]: - if l=='': pass - elif l[0]==':': - f=0 - elif l=='-quiet': - quiet=1 - verbose=0 - elif l=='-verbose': - verbose=2 - quiet=0 - elif l=='-fix': - if strictf77: - outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n',0) - skipemptyends=1 - sourcecodeform='fix' - elif l=='-skipemptyends': - skipemptyends=1 - elif l=='--ignore-contains': - ignorecontains=1 - elif l=='-f77': - strictf77=1 - sourcecodeform='fix' - elif l=='-f90': - strictf77=0 - sourcecodeform='free' - skipemptyends=1 - elif l=='-h': - f2=1 - elif l=='-show': - showblocklist=1 - elif l=='-m': - f3=1 - elif l[0]=='-': - errmess('Unknown option %s\n'%`l`) - elif f2: - f2=0 - pyffilename=l - elif f3: - f3=0 - f77modulename=l - elif f: - try: - open(l).close() - files.append(l) - except IOError,detail: - errmess('IOError: %s\n'%str(detail)) - else: - funcs.append(l) - if not strictf77 and f77modulename and not skipemptyends: - outmess("""\ - Warning: You have specifyied module name for non Fortran 77 code - that should not need one (expect if you are scanning F90 code - for non module blocks but then you should use flag -skipemptyends - and also be sure that the files do not contain programs without program statement). -""",0) - - postlist=crackfortran(files,funcs) - if pyffilename: - outmess('Writing fortran code to file %s\n'%`pyffilename`,0) - pyf=crack2fortran(postlist) - f=open(pyffilename,'w') - f.write(pyf) - f.close() - if showblocklist: - show(postlist) diff --git a/numpy-1.6.2/numpy/f2py/diagnose.py b/numpy-1.6.2/numpy/f2py/diagnose.py deleted file mode 100644 index 3b517a5c9b..0000000000 --- a/numpy-1.6.2/numpy/f2py/diagnose.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -import tempfile - -def run_command(cmd): - print 'Running %r:' % (cmd) - s = os.system(cmd) - print '------' -def run(): - _path = os.getcwd() - os.chdir(tempfile.gettempdir()) - print '------' - print 'os.name=%r' % (os.name) - print '------' - print 'sys.platform=%r' % (sys.platform) - print '------' - print 'sys.version:' - print sys.version - print '------' - print 'sys.prefix:' - print sys.prefix - print '------' - print 'sys.path=%r' % (':'.join(sys.path)) - print '------' - - try: - import numpy - has_newnumpy = 1 - except ImportError: - print 'Failed to import new numpy:', sys.exc_value - has_newnumpy = 0 - - try: - from numpy.f2py import f2py2e - has_f2py2e = 1 - except ImportError: - print 'Failed to import f2py2e:',sys.exc_value - has_f2py2e = 0 - - try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError: - print 'Failed to import numpy_distutils:',sys.exc_value - has_numpy_distutils = 0 - - if has_newnumpy: - try: - print 'Found new numpy version %r in %s' % \ - (numpy.__version__, numpy.__file__) - except Exception,msg: - print 'error:', msg - print '------' - - if has_f2py2e: - try: - print 'Found f2py2e version %r in %s' % \ - (f2py2e.__version__.version,f2py2e.__file__) - except Exception,msg: - print 'error:',msg - print '------' - - if has_numpy_distutils: - try: - if has_numpy_distutils == 2: - print 'Found numpy.distutils version %r in %r' % (\ - numpy.distutils.__version__, - numpy.distutils.__file__) - else: - print 'Found numpy_distutils version %r in %r' % (\ - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__) - print '------' - except Exception,msg: - print 'error:',msg - print '------' - try: - if has_numpy_distutils == 1: - print 'Importing numpy_distutils.command.build_flib ...', - import numpy_distutils.command.build_flib as build_flib - print 'ok' - print '------' - try: - print 'Checking availability of supported Fortran compilers:' - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print '------' - except Exception,msg: - print 'error:',msg - print '------' - except Exception,msg: - print 'error:',msg,'(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)' - print '------' - try: - if has_numpy_distutils == 2: - print 'Importing numpy.distutils.fcompiler ...', - import numpy.distutils.fcompiler as fcompiler - else: - print 'Importing numpy_distutils.fcompiler ...', - import numpy_distutils.fcompiler as fcompiler - print 'ok' - print '------' - try: - print 'Checking availability of supported Fortran compilers:' - fcompiler.show_fcompilers() - print '------' - except Exception,msg: - print 'error:',msg - print '------' - except Exception,msg: - print 'error:',msg - print '------' - try: - if has_numpy_distutils == 2: - print 'Importing numpy.distutils.cpuinfo ...', - from numpy.distutils.cpuinfo import cpuinfo - print 'ok' - print '------' - else: - try: - print 'Importing numpy_distutils.command.cpuinfo ...', - from numpy_distutils.command.cpuinfo import cpuinfo - print 'ok' - print '------' - except Exception,msg: - print 'error:',msg,'(ignore it)' - print 'Importing numpy_distutils.cpuinfo ...', - from numpy_distutils.cpuinfo import cpuinfo - print 'ok' - print '------' - cpu = cpuinfo() - print 'CPU information:', - for name in dir(cpuinfo): - if name[0]=='_' and name[1]!='_' and getattr(cpu,name[1:])(): - print name[1:], - print '------' - except Exception,msg: - print 'error:',msg - print '------' - os.chdir(_path) -if __name__ == "__main__": - run() diff --git a/numpy-1.6.2/numpy/f2py/docs/FAQ.txt b/numpy-1.6.2/numpy/f2py/docs/FAQ.txt deleted file mode 100644 index 416560e920..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/FAQ.txt +++ /dev/null @@ -1,615 +0,0 @@ - -====================================================================== - F2PY Frequently Asked Questions -====================================================================== - -.. contents:: - -General information -=================== - -Q: How to get started? ----------------------- - -First, install__ F2PY. Then check that F2PY installation works -properly (see below__). Try out a `simple example`__. - -Read `F2PY Users Guide and Reference Manual`__. It contains lots -of complete examples. - -If you have any questions/problems when using F2PY, don't hesitate to -turn to `F2PY users mailing list`__ or directly to me. - -__ index.html#installation -__ #testing -__ index.html#usage -__ usersguide/index.html -__ index.html#mailing-list - -Q: When to report bugs? ------------------------ - -* If F2PY scanning fails on Fortran sources that otherwise compile - fine. - -* After checking that you have the latest version of F2PY from its - CVS. It is possible that a bug has been fixed already. See also the - log entries in the file `HISTORY.txt`_ (`HISTORY.txt in CVS`_). - -* After checking that your Python and Numerical Python installations - work correctly. - -* After checking that your C and Fortran compilers work correctly. - - -Q: How to report bugs? ----------------------- - -You can send bug reports directly to me. Please, include information -about your platform (operating system, version) and -compilers/linkers, e.g. the output (both stdout/stderr) of -:: - - python -c 'import f2py2e.diagnose;f2py2e.diagnose.run()' - -Feel free to add any other relevant information. However, avoid -sending the output of F2PY generated ``.pyf`` files (unless they are -manually modified) or any binary files like shared libraries or object -codes. - -While reporting bugs, you may find the following notes useful: - -* `How To Ask Questions The Smart Way`__ by E. S. Raymond and R. Moen. - -* `How to Report Bugs Effectively`__ by S. Tatham. - -__ http://www.catb.org/~esr/faqs/smart-questions.html -__ http://www.chiark.greenend.org.uk/~sgtatham/bugs.html - -Installation -============ - -Q: How to use F2PY with different Python versions? --------------------------------------------------- - -Run the installation command using the corresponding Python -executable. For example, -:: - - python2.1 setup.py install - -installs the ``f2py`` script as ``f2py2.1``. - -See `Distutils User Documentation`__ for more information how to -install Python modules to non-standard locations. - -__ http://www.python.org/sigs/distutils-sig/doc/inst/inst.html - - -Q: Why F2PY is not working after upgrading? -------------------------------------------- - -If upgrading from F2PY version 2.3.321 or earlier then remove all f2py -specific files from ``/path/to/python/bin`` directory before -running installation command. - -Q: How to get/upgrade numpy_distutils when using F2PY from CVS? ---------------------------------------------------------------- - -To get numpy_distutils from SciPy CVS repository, run -:: - - cd cvs/f2py2e/ - make numpy_distutils - -This will checkout numpy_distutils to the current directory. - -You can upgrade numpy_distutils by executing -:: - - cd cvs/f2py2e/numpy_distutils - cvs update -Pd - -and install it by executing -:: - - cd cvs/f2py2e/numpy_distutils - python setup_numpy_distutils.py install - -In most of the time, f2py2e and numpy_distutils can be upgraded -independently. - -Testing -======= - -Q: How to test if F2PY is installed correctly? ----------------------------------------------- - -Run -:: - - f2py - -without arguments. If F2PY is installed correctly then it should print -the usage information for f2py. - -Q: How to test if F2PY is working correctly? --------------------------------------------- - -For a quick test, try out an example problem from Usage__ -section in `README.txt`_. - -__ index.html#usage - -For running F2PY unit tests, see `TESTING.txt`_. - - -Q: How to run tests and examples in f2py2e/test-suite/ directory? ---------------------------------------------------------------------- - -You shouldn't. These tests are obsolete and I have no intention to -make them work. They will be removed in future. - - -Compiler/Platform-specific issues -================================= - -Q: What are supported platforms and compilers? ----------------------------------------------- - -F2PY is developed on Linux system with a GCC compiler (versions -2.95.x, 3.x). Fortran 90 related hooks are tested against Intel -Fortran Compiler. F2PY should work under any platform where Python and -Numeric are installed and has supported Fortran compiler installed. - -To see a list of supported compilers, execute:: - - f2py -c --help-fcompiler - -Example output:: - - List of available Fortran compilers: - --fcompiler=gnu GNU Fortran Compiler (3.3.4) - --fcompiler=intel Intel Fortran Compiler for 32-bit apps (8.0) - List of unavailable Fortran compilers: - --fcompiler=absoft Absoft Corp Fortran Compiler - --fcompiler=compaq Compaq Fortran Compiler - --fcompiler=compaqv DIGITAL|Compaq Visual Fortran Compiler - --fcompiler=hpux HP Fortran 90 Compiler - --fcompiler=ibm IBM XL Fortran Compiler - --fcompiler=intele Intel Fortran Compiler for Itanium apps - --fcompiler=intelev Intel Visual Fortran Compiler for Itanium apps - --fcompiler=intelv Intel Visual Fortran Compiler for 32-bit apps - --fcompiler=lahey Lahey/Fujitsu Fortran 95 Compiler - --fcompiler=mips MIPSpro Fortran Compiler - --fcompiler=nag NAGWare Fortran 95 Compiler - --fcompiler=pg Portland Group Fortran Compiler - --fcompiler=sun Sun|Forte Fortran 95 Compiler - --fcompiler=vast Pacific-Sierra Research Fortran 90 Compiler - List of unimplemented Fortran compilers: - --fcompiler=f Fortran Company/NAG F Compiler - For compiler details, run 'config_fc --verbose' setup command. - - -Q: How to use the F compiler in F2PY? -------------------------------------- - -Read `f2py2e/doc/using_F_compiler.txt`__. It describes why the F -compiler cannot be used in a normal way (i.e. using ``-c`` switch) to -build F2PY generated modules. It also gives a workaround to this -problem. - -__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/doc/using_F_compiler.txt?rev=HEAD&content-type=text/vnd.viewcvs-markup - -Q: How to use F2PY under Windows? ---------------------------------- - -F2PY can be used both within Cygwin__ and MinGW__ environments under -Windows, F2PY can be used also in Windows native terminal. -See the section `Setting up environment`__ for Cygwin and MinGW. - -__ http://cygwin.com/ -__ http://www.mingw.org/ -__ http://cens.ioc.ee/~pearu/numpy/BUILD_WIN32.html#setting-up-environment - -Install numpy_distutils and F2PY. Win32 installers of these packages -are provided in `F2PY Download`__ section. - -__ http://cens.ioc.ee/projects/f2py2e/#download - -Use ``--compiler=`` and ``--fcompiler`` F2PY command line switches to -to specify which C and Fortran compilers F2PY should use, respectively. - -Under MinGW environment, ``mingw32`` is default for a C compiler. - -Supported and Unsupported Features -================================== - -Q: Does F2PY support ``ENTRY`` statements? ------------------------------------------- - -Yes, starting at F2PY version higher than 2.39.235_1706. - -Q: Does F2PY support derived types in F90 code? ------------------------------------------------ - -Not yet. However I do have plans to implement support for F90 TYPE -constructs in future. But note that the task in non-trivial and may -require the next edition of F2PY for which I don't have resources to -work with at the moment. - -Jeffrey Hagelberg from LLNL has made progress on adding -support for derived types to f2py. He writes: - - At this point, I have a version of f2py that supports derived types - for most simple cases. I have multidimensional arrays of derived - types and allocatable arrays of derived types working. I'm just now - starting to work on getting nested derived types to work. I also - haven't tried putting complex number in derived types yet. - -Hopefully he can contribute his changes to f2py soon. - -Q: Does F2PY support pointer data in F90 code? ------------------------------------------------ - -No. I have never needed it and I haven't studied if there are any -obstacles to add pointer data support to F2PY. - -Q: What if Fortran 90 code uses ``(kind=KIND(..))``? ---------------------------------------------------------------- - -Currently, F2PY can handle only ``(kind=)`` -declarations where ```` is a numeric integer (e.g. 1, 2, -4,...) but not a function call ``KIND(..)`` or any other -expression. F2PY needs to know what would be the corresponding C type -and a general solution for that would be too complicated to implement. - -However, F2PY provides a hook to overcome this difficulty, namely, -users can define their own to maps. For -example, if Fortran 90 code contains:: - - REAL(kind=KIND(0.0D0)) ... - -then create a file ``.f2py_f2cmap`` (into the working directory) -containing a Python dictionary:: - - {'real':{'KIND(0.0D0)':'double'}} - -for instance. - -Or more generally, the file ``.f2py_f2cmap`` must contain a dictionary -with items:: - - : {:} - -that defines mapping between Fortran type:: - - ([kind=]) - -and the corresponding ````. ```` can be one of the -following:: - - char - signed_char - short - int - long_long - float - double - long_double - complex_float - complex_double - complex_long_double - string - -For more information, see ``f2py2e/capi_maps.py``. - -Related software -================ - -Q: How F2PY distinguishes from Pyfort? --------------------------------------- - -F2PY and Pyfort have very similar aims and ideology of how they are -targeted. Both projects started to evolve in the same year 1999 -independently. When we discovered each others projects, a discussion -started to join the projects but that unfortunately failed for -various reasons, e.g. both projects had evolved too far that merging -the tools would have been impractical and giving up the efforts that -the developers of both projects have made was unacceptable to both -parties. And so, nowadays we have two tools for connecting Fortran -with Python and this fact will hardly change in near future. To decide -which one to choose is a matter of taste, I can only recommend to try -out both to make up your choice. - -At the moment F2PY can handle more wrapping tasks than Pyfort, -e.g. with F2PY one can wrap Fortran 77 common blocks, Fortran 90 -module routines, Fortran 90 module data (including allocatable -arrays), one can call Python from Fortran, etc etc. F2PY scans Fortran -codes to create signature (.pyf) files. F2PY is free from most of the -limitations listed in in `the corresponding section of Pyfort -Reference Manual`__. - -__ http://pyfortran.sourceforge.net/pyfort/pyfort_reference.htm#pgfId-296925 - -There is a conceptual difference on how F2PY and Pyfort handle the -issue of different data ordering in Fortran and C multi-dimensional -arrays. Pyfort generated wrapper functions have optional arguments -TRANSPOSE and MIRROR that can be used to control explicitly how the array -arguments and their dimensions are passed to Fortran routine in order -to deal with the C/Fortran data ordering issue. F2PY generated wrapper -functions hide the whole issue from an end-user so that translation -between Fortran and C/Python loops and array element access codes is -one-to-one. How the F2PY generated wrappers deal with the issue is -determined by a person who creates a signature file via using -attributes like ``intent(c)``, ``intent(copy|overwrite)``, -``intent(inout|in,out|inplace)`` etc. - -For example, let's consider a typical usage of both F2PY and Pyfort -when wrapping the following simple Fortran code: - -.. include:: simple.f - :literal: - -The comment lines starting with ``cf2py`` are read by F2PY (so that we -don't need to generate/handwrite an intermediate signature file in -this simple case) while for a Fortran compiler they are just comment -lines. - -And here is a Python version of the Fortran code: - -.. include:: pytest.py - :literal: - -To generate a wrapper for subroutine ``foo`` using F2PY, execute:: - - $ f2py -m f2pytest simple.f -c - -that will generate an extension module ``f2pytest`` into the current -directory. - -To generate a wrapper using Pyfort, create the following file - -.. include:: pyforttest.pyf - :literal: - -and execute:: - - $ pyfort pyforttest - -In Pyfort GUI add ``simple.f`` to the list of Fortran sources and -check that the signature file is in free format. And then copy -``pyforttest.so`` from the build directory to the current directory. - -Now, in Python - -.. include:: simple_session.dat - :literal: - -Q: Can Pyfort .pyf files used with F2PY and vice versa? -------------------------------------------------------- - -After some simple modifications, yes. You should take into account the -following differences in Pyfort and F2PY .pyf files. - -+ F2PY signature file contains ``python module`` and ``interface`` - blocks that are equivalent to Pyfort ``module`` block usage. - -+ F2PY attribute ``intent(inplace)`` is equivalent to Pyfort - ``intent(inout)``. F2PY ``intent(inout)`` is a strict (but safe) - version of ``intent(inplace)``, any mismatch in arguments with - expected type, size, or contiguouness will trigger an exception - while ``intent(inplace)`` (dangerously) modifies arguments - attributes in-place. - -Misc -==== - -Q: How to establish which Fortran compiler F2PY will use? ---------------------------------------------------------- - -This question may be releavant when using F2PY in Makefiles. Here -follows a script demonstrating how to determine which Fortran compiler -and flags F2PY will use:: - - # Using post-0.2.2 numpy_distutils - from numpy_distutils.fcompiler import new_fcompiler - compiler = new_fcompiler() # or new_fcompiler(compiler='intel') - compiler.dump_properties() - - # Using pre-0.2.2 numpy_distutils - import os - from numpy_distutils.command.build_flib import find_fortran_compiler - def main(): - fcompiler = os.environ.get('FC_VENDOR') - fcompiler_exec = os.environ.get('F77') - f90compiler_exec = os.environ.get('F90') - fc = find_fortran_compiler(fcompiler, - fcompiler_exec, - f90compiler_exec, - verbose = 0) - print 'FC=',fc.f77_compiler - print 'FFLAGS=',fc.f77_switches - print 'FOPT=',fc.f77_opt - if __name__ == "__main__": - main() - -Users feedback -============== - -Q: Where to find additional information on using F2PY? ------------------------------------------------------- - -There are several F2PY related tutorials, slides, papers, etc -available: - -+ `Fortran to Python Interface Generator with an Application to - Aerospace Engineering`__ by P. Peterson, J. R. R. A. Martins, and - J. J. Alonso in `In Proceedings of the 9th International Python - Conference`__, Long Beach, California, 2001. - -__ http://www.python9.org/p9-cdrom/07/index.htm -__ http://www.python9.org/ - -+ Section `Adding Fortran90 code`__ in the UG of `The Bolometer Data - Analysis Project`__. - -__ http://www.astro.rub.de/laboca/download/boa_master_doc/7_4Adding_Fortran90_code.html -__ http://www.openboa.de/ - -+ Powerpoint presentation `Python for Scientific Computing`__ by Eric - Jones in `The Ninth International Python Conference`__. - -__ http://www.python9.org/p9-jones.ppt -__ http://www.python9.org/ - -+ Paper `Scripting a Large Fortran Code with Python`__ by Alvaro Caceres - Calleja in `International Workshop on Software Engineering for High - Performance Computing System Applications`__. - -__ http://csdl.ics.hawaii.edu/se-hpcs/pdf/calleja.pdf -__ http://csdl.ics.hawaii.edu/se-hpcs/ - -+ Section `Automatic building of C/Fortran extension for Python`__ by - Simon Lacoste-Julien in `Summer 2002 Report about Hybrid Systems - Modelling`__. - -__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html#tth_sEc3.4 -__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html - -+ `Scripting for Computational Science`__ by Hans Petter Langtangen - (see the `Mixed language programming`__ and `NumPy array programming`__ - sections for examples on using F2PY). - -__ http://www.ifi.uio.no/~inf3330/lecsplit/ -__ http://www.ifi.uio.no/~inf3330/lecsplit/slide662.html -__ http://www.ifi.uio.no/~inf3330/lecsplit/slide718.html - -+ Chapters 5 and 9 of `Python Scripting for Computational Science`__ - by H. P. Langtangen for case studies on using F2PY. - -__ http://www.springeronline.com/3-540-43508-5 - -+ Section `Fortran Wrapping`__ in `Continuity`__, a computational tool - for continuum problems in bioengineering and physiology. - -__ http://www.continuity.ucsd.edu/cont6_html/docs_fram.html -__ http://www.continuity.ucsd.edu/ - -+ Presentation `PYFORT and F2PY: 2 ways to bind C and Fortran with Python`__ - by Reiner Vogelsang. - -__ http://www.prism.enes.org/WPs/WP4a/Slides/pyfort/pyfort.html - -+ Lecture slides of `Extending Python: speed it up`__. - -__ http://www.astro.uni-bonn.de/~heith/lecture_pdf/friedrich5.pdf - -+ Wiki topics on `Wrapping Tools`__ and `Wrapping Bemchmarks`__ for Climate - System Center at the University of Chicago. - -__ https://geodoc.uchicago.edu/climatewiki/DiscussWrappingTools -__ https://geodoc.uchicago.edu/climatewiki/WrappingBenchmarks - -+ `Performance Python with Weave`__ by Prabhu Ramachandran. - -__ http://www.numpy.org/documentation/weave/weaveperformance.html - -+ `How To Install py-f2py on Mac OSX`__ - -__ http://py-f2py.darwinports.com/ - -Please, let me know if there are any other sites that document F2PY -usage in one or another way. - -Q: What projects use F2PY? --------------------------- - -+ `SciPy: Scientific tools for Python`__ - -__ http://www.numpy.org/ - -+ `The Bolometer Data Analysis Project`__ - -__ http://www.openboa.de/ - -+ `pywavelet`__ - -__ http://www.met.wau.nl/index.html?http://www.met.wau.nl/medewerkers/moenea/python/pywavelet.html - -+ `PyARTS: an ARTS related Python package`__. - -__ http://www.met.ed.ac.uk/~cory/PyARTS/ - -+ `Python interface to PSPLINE`__, a collection of Spline and - Hermite interpolation tools for 1D, 2D, and 3D datasets on - rectilinear grids. - -__ http://pypspline.sourceforge.net - -+ `Markovian Analysis Package for Python`__. - -__ http://pymc.sourceforge.net - -+ `Modular toolkit for Data Processing (MDP)`__ - -__ http://mdp-toolkit.sourceforge.net/ - - -Please, send me a note if you are using F2PY in your project. - -Q: What people think about F2PY? --------------------------------- - -*F2PY is GOOD*: - -Here are some comments people have posted to f2py mailing list and c.l.py: - -+ Ryan Krauss: I really appreciate f2py. It seems weird to say, but I - am excited about relearning FORTRAN to compliment my python stuff. - -+ Fabien Wahl: f2py is great, and is used extensively over here... - -+ Fernando Perez: Anyway, many many thanks for this amazing tool. - - I haven't used pyfort, but I can definitely vouch for the amazing quality of - f2py. And since f2py is actively used by numpy, it won't go unmaintained. - It's quite impressive, and very easy to use. - -+ Kevin Mueller: First off, thanks to those responsible for F2PY; - its been an integral tool of my research for years now. - -+ David Linke: Best regards and thanks for the great tool! - -+ Perrin Meyer: F2Py is really useful! - -+ Hans Petter Langtangen: First of all, thank you for developing - F2py. This is a very important contribution to the scientific - computing community. We are using F2py a lot and are very happy with - it. - -+ Berthold Höllmann: Thank's alot. It seems it is also working in my - 'real' application :-) - -+ John Hunter: At first I wrapped them with f2py (unbelievably easy!)... - -+ Cameron Laird: Among many other features, Python boasts a mature - f2py, which makes it particularly rewarding to yoke Fortran- and - Python-coded modules into finished applications. - -+ Ryan Gutenkunst: f2py is sweet magic. - -*F2PY is BAD*: - -+ `Is it worth using on a large scale python drivers for Fortran - subroutines, interfaced with f2py?`__ - -__ http://sepwww.stanford.edu/internal/computing/python.html - -Additional comments on F2PY, good or bad, are welcome! - -.. References: -.. _README.txt: index.html -.. _HISTORY.txt: HISTORY.html -.. _HISTORY.txt in CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup -.. _TESTING.txt: TESTING.html diff --git a/numpy-1.6.2/numpy/f2py/docs/HISTORY.txt b/numpy-1.6.2/numpy/f2py/docs/HISTORY.txt deleted file mode 100644 index 72b683eb01..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/HISTORY.txt +++ /dev/null @@ -1,1044 +0,0 @@ -.. -*- rest -*- - -========================= - F2PY History -========================= - -:Author: Pearu Peterson -:Web-site: http://cens.ioc.ee/projects/f2py2e/ -:Date: $Date: 2005/09/16 08:36:45 $ -:Revision: $Revision: 1.191 $ - -.. Contents:: - -Release 2.46.243 -===================== - -* common_rules.py - - - Fixed compiler warnings. - -* fortranobject.c - - - Fixed another dims calculation bug. - - Fixed dims calculation bug and added the corresponding check. - - Accept higher dimensional arrays if their effective rank matches. - Effective rank is multiplication of non-unit dimensions. - -* f2py2e.py - - - Added support for numpy.distutils version 0.4.0. - -* Documentation - - - Added example about ``intent(callback,hide)`` usage. Updates. - - Updated FAQ. - -* cb_rules.py - - - Fixed missing need kw error. - - Fixed getting callback non-existing extra arguments. - - External callback functions and extra_args can be set via - ext.module namespace. - - Avoid crash when external callback function is not set. - -* rules.py - - - Enabled ``intent(out)`` for ``intent(aux)`` non-complex scalars. - - Fixed splitting lines in F90 fixed form mode. - - Fixed FORTRANAME typo, relevant when wrapping scalar functions with - ``--no-wrap-functions``. - - Improved failure handling for callback functions. - - Fixed bug in writting F90 wrapper functions when a line length - is exactly 66. - -* cfuncs.py - - - Fixed dependency issue with typedefs. - - Introduced ``-DUNDERSCORE_G77`` that cause extra underscore to be - used for external names that contain an underscore. - -* capi_maps.py - - - Fixed typos. - - Fixed using complex cb functions. - -* crackfortran.py - - - Introduced parent_block key. Get ``use`` statements recursively - from parent blocks. - - Apply parameter values to kindselectors. - - Fixed bug evaluating ``selected_int_kind`` function. - - Ignore Name and Syntax errors when evaluating scalars. - - Treat ``_intType`` as ```` in get_parameters. - - Added support for F90 line continuation in fix format mode. - - Include optional attribute of external to signature file. - - Add ``entry`` arguments to variable lists. - - Treat \xa0 character as space. - - Fixed bug where __user__ callback subroutine was added to its - argument list. - - In strict 77 mode read only the first 72 columns. - - Fixed parsing ``v(i) = func(r)``. - - Fixed parsing ``integer*4::``. - - Fixed parsing ``1.d-8`` when used as a parameter value. - -Release 2.45.241_1926 -===================== - -* diagnose.py - - - Clean up output. - -* cb_rules.py - - - Fixed ``_cpointer`` usage for subroutines. - - Fortran function ``_cpointer`` can be used for callbacks. - -* func2subr.py - - - Use result name when wrapping functions with subroutines. - -* f2py2e.py - - - Fixed ``--help-link`` switch. - - Fixed ``--[no-]lower`` usage with ``-c`` option. - - Added support for ``.pyf.src`` template files. - -* __init__.py - - - Using ``exec_command`` in ``compile()``. - -* setup.py - - - Clean up. - - Disabled ``need_numpy_distutils`` function. From now on it is assumed - that proper version of ``numpy_distutils`` is already installed. - -* capi_maps.py - - - Added support for wrapping unsigned integers. In a .pyf file - ``integer(-1)``, ``integer(-2)``, ``integer(-4)`` correspond to - ``unsigned char``, ``unsigned short``, ``unsigned`` C types, - respectively. - -* tests/c/return_real.py - - - Added tests to wrap C functions returning float/double. - -* fortranobject.c - - - Added ``_cpointer`` attribute to wrapped objects. - -* rules.py - - - ``_cpointer`` feature for wrapped module functions is not - functional at the moment. - - Introduced ``intent(aux)`` attribute. Useful to save a value - of a parameter to auxiliary C variable. Note that ``intent(aux)`` - implies ``intent(c)``. - - Added ``usercode`` section. When ``usercode`` is used in ``python - module`` block twise then the contents of the second multi-line - block is inserted after the definition of external routines. - - Call-back function arguments can be CObjects. - -* cfuncs.py - - - Allow call-back function arguments to be fortran objects. - - Allow call-back function arguments to be built-in functions. - -* crackfortran.py - - - Fixed detection of a function signature from usage example. - - Cleaned up -h output for intent(callback) variables. - - Repair malformed argument list (missing argument name). - - Warn on the usage of multiple attributes without type specification. - - Evaluate only scalars ```` (e.g. not of strings). - - Evaluate ```` using parameters name space. - - Fixed resolving `()[result()]` pattern. - - ``usercode`` can be used more than once in the same context. - -Release 2.43.239_1831 -===================== - -* auxfuncs.py - - - Made ``intent(in,inplace)`` to mean ``intent(inplace)``. - -* f2py2e.py - - - Intoduced ``--help-link`` and ``--link-`` - switches to link generated extension module with system - ```` as defined by numpy_distutils/system_info.py. - -* fortranobject.c - - - Patch to make PyArray_CanCastSafely safe on 64-bit machines. - Fixes incorrect results when passing ``array('l')`` to - ``real*8 intent(in,out,overwrite)`` arguments. - -* rules.py - - - Avoid empty continuation lines in Fortran wrappers. - -* cfuncs.py - - - Adding ``\0`` at the end of a space-padded string, fixes tests - on 64-bit Gentoo. - -* crackfortran.py - - - Fixed splitting lines with string parameters. - -Release 2.43.239_1806 -===================== - -* Tests - - - Fixed test site that failed after padding strings with spaces - instead of zeros. - -* Documentation - - - Documented ``intent(inplace)`` attribute. - - Documented ``intent(callback)`` attribute. - - Updated FAQ, added Users Feedback section. - -* cfuncs.py - - - Padding longer (than provided from Python side) strings with spaces - (that is Fortran behavior) instead of nulls (that is C strncpy behavior). - -* f90mod_rules.py - - - Undoing rmbadnames in Python and Fortran layers. - -* common_rules.py - - - Renaming common block items that have names identical to C keywords. - - Fixed wrapping blank common blocks. - -* fortranobject.h - - - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller). - -* fortranobject.c - - - Introduced ``intent(inplace)`` feature. - - Fix numarray reference counts (patch by Todd). - - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller). - - Enabled F2PY_REPORT_ON_ARRAY_COPY for Numarray. - -* capi_maps.py - - - Always normalize .f2py_f2cmap keys to lower case. - -* rules.py - - - Disabled ``index`` macro as it conflicts with the one defined - in string.h. - - Moved ``externroutines`` up to make it visible to ``usercode``. - - Fixed bug in f90 code generation: no empty line continuation is - allowed. - - Fixed undefined symbols failure when ``fortranname`` is used - to rename a wrapped function. - - Support for ``entry`` statement. - -* auxfuncs.py - - - Made is* functions more robust with respect to parameters that - have no typespec specified. - - Using ``size_t`` instead of ``int`` as the type of string - length. Fixes issues on 64-bit platforms. - -* setup.py - - - Fixed bug of installing ``f2py`` script as ``.exe`` file. - -* f2py2e.py - - - ``--compiler=`` and ``--fcompiler=`` can be specified at the same time. - -* crackfortran.py - - - Fixed dependency detection for non-intent(in|inout|inplace) arguments. - They must depend on their dimensions, not vice-versa. - - Don't match ``!!f2py`` as a start of f2py directive. - - Only effective intent attributes will be output to ``-h`` target. - - Introduced ``intent(callback)`` to build interface between Python - functions and Fortran external routines. - - Avoid including external arguments to __user__ modules. - - Initial hooks to evaluate ``kind`` and ``selected_int_kind``. - - Evaluating parameters in {char,kind}selectors and applying rmbadname. - - Evaluating parameters using also module parameters. Fixed the order - of parameter evaluation. - - Fixed silly bug: when block name was not lower cased, it was not - recognized correctly. - - Applying mapping '.false.'->'False', '.true.'->'True' to logical - parameters. TODO: Support for logical expressions is needed. - - Added support for multiple statements in one line (separated with semicolon). - - Impl. get_useparameters function for using parameter values from - other f90 modules. - - Applied Bertholds patch to fix bug in evaluating expressions - like ``1.d0/dvar``. - - Fixed bug in reading string parameters. - - Evaluating parameters in charselector. Code cleanup. - - Using F90 module parameters to resolve kindselectors. - - Made the evaluation of module data init-expression more robust. - - Support for ``entry`` statement. - - Fixed ``determineexprtype`` that in the case of parameters - returned non-dictionary objects. - - Use ``-*- fix -*-`` to specify that a file is in fixed format. - -Release 2.39.235_1693 -===================== - -* fortranobject.{h,c} - - - Support for allocatable string arrays. - -* cfuncs.py - - - Call-back arguments can now be also instances that have ``__call__`` method - as well as instance methods. - -* f2py2e.py - - - Introduced ``--include_paths ::..`` command line - option. - - Added ``--compiler=`` support to change the C/C++ compiler from - f2py command line. - -* capi_maps.py - - - Handle ``XDY`` parameter constants. - -* crackfortran.py - - - Handle ``XDY`` parameter constants. - - - Introduced formatpattern to workaround a corner case where reserved - keywords are used in format statement. Other than that, format pattern - has no use. - - - Parameters are now fully evaluated. - -* More splitting of documentation strings. - -* func2subr.py - fixed bug for function names that f77 compiler - would set ``integer`` type. - -Release 2.39.235_1660 -===================== - -* f2py2e.py - - - Fixed bug in using --f90flags=.. - -* f90mod_rules.py - - - Splitted generated documentation strings (to avoid MSVC issue when - string length>2k) - - - Ignore ``private`` module data. - -Release 2.39.235_1644 -===================== - -:Date:24 February 2004 - -* Character arrays: - - - Finished complete support for character arrays and arrays of strings. - - ``character*n a(m)`` is treated like ``character a(m,n)`` with ``intent(c)``. - - Character arrays are now considered as ordinary arrays (not as arrays - of strings which actually didn't work). - -* docs - - - Initial f2py manpage file f2py.1. - - Updated usersguide and other docs when using numpy_distutils 0.2.2 - and up. - -* capi_maps.py - - - Try harder to use .f2py_f2cmap mappings when kind is used. - -* crackfortran.py - - - Included files are first search in the current directory and - then from the source file directory. - - Ignoring dimension and character selector changes. - - Fixed bug in Fortran 90 comments of fixed format. - - Warn when .pyf signatures contain undefined symbols. - - Better detection of source code formats. Using ``-*- fortran -*-`` - or ``-*- f90 -*-`` in the first line of a Fortran source file is - recommended to help f2py detect the format, fixed or free, - respectively, correctly. - -* cfuncs.py - - - Fixed intent(inout) scalars when typecode=='l'. - - Fixed intent(inout) scalars when not using numarray. - - Fixed intent(inout) scalars when using numarray. - -* diagnose.py - - - Updated for numpy_distutils 0.2.2 and up. - - Added numarray support to diagnose. - -* fortranobject.c - - - Fixed nasty bug with intent(in,copy) complex slice arrays. - - Applied Todd's patch to support numarray's byteswapped or - misaligned arrays, requires numarray-0.8 or higher. - -* f2py2e.py - - - Applying new hooks for numpy_distutils 0.2.2 and up, keeping - backward compatibility with depreciation messages. - - Using always os.system on non-posix platforms in f2py2e.compile - function. - -* rules.py - - - Changed the order of buildcallback and usercode junks. - -* setup.cfg - - - Added so that docs/ and tests/ directories are included to RPMs. - -* setup.py - - - Installing f2py.py instead of f2py.bat under NT. - - Introduced ``--with-numpy_distutils`` that is useful when making - f2py tar-ball with numpy_distutils included. - -Release 2.37.233-1545 -===================== - -:Date: 11 September 2003 - -* rules.py - - - Introduced ``interface_usercode`` replacement. When ``usercode`` - statement is used inside the first interface block, its contents - will be inserted at the end of initialization function of a F2PY - generated extension module (feature request: Berthold Höllmann). - - Introduced auxiliary function ``as_column_major_storage`` that - converts input array to an array with column major storage order - (feature request: Hans Petter Langtangen). - -* crackfortran.py - - - Introduced ``pymethoddef`` statement. - -* cfuncs.py - - - Fixed "#ifdef in #define TRYPYARRAYTEMPLATE" bug (patch thanks - to Bernhard Gschaider) - -* auxfuncs.py - - - Introduced ``getpymethod`` function. - - Enabled multi-line blocks in ``callprotoargument`` statement. - -* f90mod_rules.py - - - Undone "Fixed Warning 43 emitted by Intel Fortran compiler" that - causes (curios) segfaults. - -* fortranobject.c - - - Fixed segfaults (that were introduced with recent memory leak - fixes) when using allocatable arrays. - - Introduced F2PY_REPORT_ON_ARRAY_COPY CPP macro int-variable. If defined - then a message is printed to stderr whenever a copy of an array is - made and arrays size is larger than F2PY_REPORT_ON_ARRAY_COPY. - -Release 2.35.229-1505 -===================== - -:Date: 5 August 2003 - -* General - - - Introduced ``usercode`` statement (dropped ``c_code`` hooks). - -* setup.py - - - Updated the CVS location of numpy_distutils. - -* auxfuncs.py - - - Introduced ``isint1array(var)`` for fixing ``integer*1 intent(out)`` - support. - -* tests/f77/callback.py - - Introduced some basic tests. - -* src/fortranobject.{c,h} - - - Fixed memory leaks when getting/setting allocatable arrays. - (Bug report by Bernhard Gschaider) - - - Initial support for numarray (Todd Miller's patch). Use -DNUMARRAY - on the f2py command line to enable numarray support. Note that - there is no character arrays support and these hooks are not - tested with F90 compilers yet. - -* cfuncs.py - - - Fixed reference counting bug that appeared when constructing extra - argument list to callback functions. - - Added ``PyArray_LONG != PyArray_INT`` test. - -* f2py2e.py - - Undocumented ``--f90compiler``. - -* crackfortran.py - - - Introduced ``usercode`` statement. - - Fixed newlines when outputting multi-line blocks. - - Optimized ``getlincoef`` loop and ``analyzevars`` for cases where - len(vars) is large. - - Fixed callback string argument detection. - - Fixed evaluating expressions: only int|float expressions are - evaluated succesfully. - -* docs - - Documented -DF2PY_REPORT_ATEXIT feature. - -* diagnose.py - - Added CPU information and sys.prefix printout. - -* tests/run_all.py - - Added cwd to PYTHONPATH. - -* tests/f??/return_{real,complex}.py - - Pass "infinity" check in SunOS. - -* rules.py - - - Fixed ``integer*1 intent(out)`` support - - Fixed free format continuation of f2py generated F90 files. - -* tests/mixed/ - - Introduced tests for mixing Fortran 77, Fortran 90 fixed and free - format codes in one module. - -* f90mod_rules.py - - - Fixed non-prototype warnings. - - Fixed Warning 43 emitted by Intel Fortran compiler. - - Avoid long lines in Fortran codes to reduce possible problems with - continuations of lines. - -Public Release 2.32.225-1419 -============================ - -:Date: 8 December 2002 - -* docs/usersguide/ - - Complete revision of F2PY Users Guide - -* tests/run_all.py - - - New file. A Python script to run all f2py unit tests. - -* Removed files: buildmakefile.py, buildsetup.py. - -* tests/f77/ - - - Added intent(out) scalar tests. - -* f2py_testing.py - - - Introduced. It contains jiffies, memusage, run, cmdline functions - useful for f2py unit tests site. - -* setup.py - - - Install numpy_distutils only if it is missing or is too old - for f2py. - -* f90modrules.py - - - Fixed wrapping f90 module data. - - Fixed wrapping f90 module subroutines. - - Fixed f90 compiler warnings for wrapped functions by using interface - instead of external stmt for functions. - -* tests/f90/ - - - Introduced return_*.py tests. - -* func2subr.py - - - Added optional signature argument to createfuncwrapper. - - In f2pywrappers routines, declare external, scalar, remaining - arguments in that order. Fixes compiler error 'Invalid declaration' - for:: - - real function foo(a,b) - integer b - real a(b) - end - -* crackfortran.py - - - Removed first-line comment information support. - - Introduced multiline block. Currently usable only for - ``callstatement`` statement. - - Improved array length calculation in getarrlen(..). - - "From sky" program group is created only if ``groupcounter<1``. - See TODO.txt. - - Added support for ``dimension(n:*)``, ``dimension(*:n)``. They are - treated as ``dimesnion(*)`` by f2py. - - Fixed parameter substitution (this fixes TODO item by Patrick - LeGresley, 22 Aug 2001). - -* f2py2e.py - - - Disabled all makefile, setup, manifest file generation hooks. - - Disabled --[no]-external-modroutines option. All F90 module - subroutines will have Fortran/C interface hooks. - - --build-dir can be used with -c option. - - only/skip modes can be used with -c option. - - Fixed and documented `-h stdout` feature. - - Documented extra options. - - Introduced --quiet and --verbose flags. - -* cb_rules.py - - - Fixed debugcapi hooks for intent(c) scalar call-back arguments - (bug report: Pierre Schnizer). - - Fixed intent(c) for scalar call-back arguments. - - Improved failure reports. - -* capi_maps.py - - - Fixed complex(kind=..) to C type mapping bug. The following hold - complex==complex(kind=4)==complex*8, complex(kind=8)==complex*16 - - Using signed_char for integer*1 (bug report: Steve M. Robbins). - - Fixed logical*8 function bug: changed its C correspondence to - long_long. - - Fixed memory leak when returning complex scalar. - -* __init__.py - - - Introduced a new function (for f2py test site, but could be useful - in general) ``compile(source[,modulename,extra_args])`` for - compiling fortran source codes directly from Python. - -* src/fortranobject.c - - - Multi-dimensional common block members and allocatable arrays - are returned as Fortran-contiguous arrays. - - Fixed NULL return to Python without exception. - - Fixed memory leak in getattr(,'__doc__'). - - .__doc__ is saved to .__dict__ (previously - it was generated each time when requested). - - Fixed a nasty typo from the previous item that caused data - corruption and occasional SEGFAULTs. - - array_from_pyobj accepts arbitrary rank arrays if the last dimension - is undefined. E.g. dimension(3,*) accepts a(3,4,5) and the result is - array with dimension(3,20). - - Fixed (void*) casts to make g++ happy (bug report: eric). - - Changed the interface of ARR_IS_NULL macro to avoid "``NULL used in - arithmetics``" warnings from g++. - -* src/fortranobject.h - - - Undone previous item. Defining NO_IMPORT_ARRAY for - src/fortranobject.c (bug report: travis) - - Ensured that PY_ARRAY_UNIQUE_SYMBOL is defined only for - src/fortranobject.c (bug report: eric). - -* rules.py - - - Introduced dummy routine feature. - - F77 and F90 wrapper subroutines (if any) as saved to different - files, -f2pywrappers.f and -f2pywrappers2.f90, - respectively. Therefore, wrapping F90 requires numpy_distutils >= - 0.2.0_alpha_2.229. - - Fixed compiler warnings about meaningless ``const void (*f2py_func)(..)``. - - Improved error messages for ``*_from_pyobj``. - - Changed __CPLUSPLUS__ macros to __cplusplus (bug report: eric). - - Changed (void*) casts to (f2py_init_func) (bug report: eric). - - Removed unnecessary (void*) cast for f2py_has_column_major_storage - in f2py_module_methods definition (bug report: eric). - - Changed the interface of f2py_has_column_major_storage function: - removed const from the 1st argument. - -* cfuncs.py - - - Introduced -DPREPEND_FORTRAN. - - Fixed bus error on SGI by using PyFloat_AsDouble when ``__sgi`` is defined. - This seems to be `know bug`__ with Python 2.1 and SGI. - - string_from_pyobj accepts only arrays whos elements size==sizeof(char). - - logical scalars (intent(in),function) are normalized to 0 or 1. - - Removed NUMFROMARROBJ macro. - - (char|short)_from_pyobj now use int_from_pyobj. - - (float|long_double)_from_pyobj now use double_from_pyobj. - - complex_(float|long_double)_from_pyobj now use complex_double_from_pyobj. - - Rewrote ``*_from_pyobj`` to be more robust. This fixes segfaults if - getting * from a string. Note that int_from_pyobj differs - from PyNumber_Int in that it accepts also complex arguments - (takes the real part) and sequences (takes the 1st element). - - Removed unnecessary void* casts in NUMFROMARROBJ. - - Fixed casts in ``*_from_pyobj`` functions. - - Replaced CNUMFROMARROBJ with NUMFROMARROBJ. - -.. __: http://sourceforge.net/tracker/index.php?func=detail&aid=435026&group_id=5470&atid=105470 - -* auxfuncs.py - - - Introduced isdummyroutine(). - - Fixed islong_* functions. - - Fixed isintent_in for intent(c) arguments (bug report: Pierre Schnizer). - - Introduced F2PYError and throw_error. Using throw_error, f2py - rejects illegal .pyf file constructs that otherwise would cause - compilation failures or python crashes. - - Fixed islong_long(logical*8)->True. - - Introduced islogical() and islogicalfunction(). - - Fixed prototype string argument (bug report: eric). - -* Updated README.txt and doc strings. Starting to use docutils. - -* Speed up for ``*_from_pyobj`` functions if obj is a sequence. - -* Fixed SegFault (reported by M.Braun) due to invalid ``Py_DECREF`` - in ``GETSCALARFROMPYTUPLE``. - -Older Releases -============== - -:: - - *** Fixed missing includes when wrapping F90 module data. - *** Fixed typos in docs of build_flib options. - *** Implemented prototype calculator if no callstatement or - callprotoargument statements are used. A warning is issued if - callstatement is used without callprotoargument. - *** Fixed transposing issue with array arguments in callback functions. - *** Removed -pyinc command line option. - *** Complete tests for Fortran 77 functions returning scalars. - *** Fixed returning character bug if --no-wrap-functions. - *** Described how to wrap F compiled Fortran F90 module procedures - with F2PY. See doc/using_F_compiler.txt. - *** Fixed the order of build_flib options when using --fcompiler=... - *** Recognize .f95 and .F95 files as Fortran sources with free format. - *** Cleaned up the output of 'f2py -h': removed obsolete items, - added build_flib options section. - *** Added --help-compiler option: it lists available Fortran compilers - as detected by numpy_distutils/command/build_flib.py. This option - is available only with -c option. - - -:Release: 2.13.175-1250 -:Date: 4 April 2002 - -:: - - *** Fixed copying of non-contigious 1-dimensional arrays bug. - (Thanks to Travis O.). - - -:Release: 2.13.175-1242 -:Date: 26 March 2002 - -:: - - *** Fixed ignoring type declarations. - *** Turned F2PY_REPORT_ATEXIT off by default. - *** Made MAX,MIN macros available by default so that they can be - always used in signature files. - *** Disabled F2PY_REPORT_ATEXIT for FreeBSD. - - -:Release: 2.13.175-1233 -:Date: 13 March 2002 - -:: - - *** Fixed Win32 port when using f2py.bat. (Thanks to Erik Wilsher). - *** F2PY_REPORT_ATEXIT is disabled for MACs. - *** Fixed incomplete dependency calculator. - - -:Release: 2.13.175-1222 -:Date: 3 March 2002 - -:: - - *** Plugged a memory leak for intent(out) arrays with overwrite=0. - *** Introduced CDOUBLE_to_CDOUBLE,.. functions for copy_ND_array. - These cast functions probably work incorrectly in Numeric. - - -:Release: 2.13.175-1212 -:Date: 23 February 2002 - -:: - - *** Updated f2py for the latest numpy_distutils. - *** A nasty bug with multi-dimensional Fortran arrays is fixed - (intent(out) arrays had wrong shapes). (Thanks to Eric for - pointing out this bug). - *** F2PY_REPORT_ATEXIT is disabled by default for __WIN32__. - - -:Release: 2.11.174-1161 -:Date: 14 February 2002 - -:: - - *** Updated f2py for the latest numpy_distutils. - *** Fixed raise error when f2py missed -m flag. - *** Script name `f2py' now depends on the name of python executable. - For example, `python2.2 setup.py install' will create a f2py - script with a name `f2py2.2'. - *** Introduced 'callprotoargument' statement so that proper prototypes - can be declared. This is crucial when wrapping C functions as it - will fix segmentation faults when these wrappers use non-pointer - arguments (thanks to R. Clint Whaley for explaining this to me). - Note that in f2py generated wrapper, the prototypes have - the following forms: - extern #rtype# #fortranname#(#callprotoargument#); - or - extern #rtype# F_FUNC(#fortranname#,#FORTRANNAME#)(#callprotoargument#); - *** Cosmetic fixes to F2PY_REPORT_ATEXIT feature. - - -:Release: 2.11.174-1146 -:Date: 3 February 2002 - -:: - - *** Reviewed reference counting in call-back mechanism. Fixed few bugs. - *** Enabled callstatement for complex functions. - *** Fixed bug with initializing capi_overwrite_ - *** Introduced intent(overwrite) that is similar to intent(copy) but - has opposite effect. Renamed copy_=1 to overwrite_=0. - intent(overwrite) will make default overwrite_=1. - *** Introduced intent(in|inout,out,out=) attribute that renames - arguments name when returned. This renaming has effect only in - documentation strings. - *** Introduced 'callstatement' statement to pyf file syntax. With this - one can specify explicitly how wrapped function should be called - from the f2py generated module. WARNING: this is a dangerous feature - and should be used with care. It is introduced to provide a hack - to construct wrappers that may have very different signature - pattern from the wrapped function. Currently 'callstatement' can - be used only inside a subroutine or function block (it should be enough - though) and must be only in one continuous line. The syntax of the - statement is: callstatement ; - - -:Release: 2.11.174 -:Date: 18 January 2002 - -:: - - *** Fixed memory-leak for PyFortranObject. - *** Introduced extra keyword argument copy_ for intent(copy) - variables. It defaults to 1 and forces to make a copy for - intent(in) variables when passing on to wrapped functions (in case - they undesirably change the variable in-situ). - *** Introduced has_column_major_storage member function for all f2py - generated extension modules. It is equivalent to Python call - 'transpose(obj).iscontiguous()' but very efficient. - *** Introduced -DF2PY_REPORT_ATEXIT. If this is used when compiling, - a report is printed to stderr as python exits. The report includes - the following timings: - 1) time spent in all wrapped function calls; - 2) time spent in f2py generated interface around the wrapped - functions. This gives a hint whether one should worry - about storing data in proper order (C or Fortran). - 3) time spent in Python functions called by wrapped functions - through call-back interface. - 4) time spent in f2py generated call-back interface. - For now, -DF2PY_REPORT_ATEXIT is enabled by default. Use - -DF2PY_REPORT_ATEXIT_DISABLE to disable it (I am not sure if - Windows has needed tools, let me know). - Also, I appreciate if you could send me the output of 'F2PY - performance report' (with CPU and platform information) so that I - could optimize f2py generated interfaces for future releases. - *** Extension modules can be linked with dmalloc library. Use - -DDMALLOC when compiling. - *** Moved array_from_pyobj to fortranobject.c. - *** Usage of intent(inout) arguments is made more strict -- only - with proper type contiguous arrays are accepted. In general, - you should avoid using intent(inout) attribute as it makes - wrappers of C and Fortran functions asymmetric. I recommend using - intent(in,out) instead. - *** intent(..) has new keywords: copy,cache. - intent(copy,in) - forces a copy of an input argument; this - may be useful for cases where the wrapped function changes - the argument in situ and this may not be desired side effect. - Otherwise, it is safe to not use intent(copy) for the sake - of a better performance. - intent(cache,hide|optional) - just creates a junk of memory. - It does not care about proper storage order. Can be also - intent(in) but then the corresponding argument must be a - contiguous array with a proper elsize. - *** intent(c) can be used also for subroutine names so that - -DNO_APPEND_FORTRAN can be avoided for C functions. - - *** IMPORTANT BREAKING GOOD ... NEWS!!!: - - From now on you don't have to worry about the proper storage order - in multi-dimensional arrays that was earlier a real headache when - wrapping Fortran functions. Now f2py generated modules take care - of the proper conversations when needed. I have carefully designed - and optimized this interface to avoid any unnecessary memory usage - or copying of data. However, it is wise to use input arrays that - has proper storage order: for C arguments it is row-major and for - Fortran arguments it is column-major. But you don't need to worry - about that when developing your programs. The optimization of - initializing the program with proper data for possibly better - memory usage can be safely postponed until the program is working. - - This change also affects the signatures in .pyf files. If you have - created wrappers that take multi-dimensional arrays in arguments, - it is better to let f2py re-generate these files. Or you have to - manually do the following changes: reverse the axes indices in all - 'shape' macros. For example, if you have defined an array A(n,m) - and n=shape(A,1), m=shape(A,0) then you must change the last - statements to n=shape(A,0), m=shape(A,1). - - -:Release: 2.8.172 -:Date: 13 January 2002 - -:: - - *** Fixed -c process. Removed pyf_extensions function and pyf_file class. - *** Reorganized setup.py. It generates f2py or f2py.bat scripts - depending on the OS and the location of the python executable. - *** Started to use update_version from numpy_distutils that makes - f2py startup faster. As a side effect, the version number system - changed. - *** Introduced test-site/test_f2py2e.py script that runs all - tests. - *** Fixed global variables initialization problem in crackfortran - when run_main is called several times. - *** Added 'import Numeric' to C/API init function. - *** Fixed f2py.bat in setup.py. - *** Switched over to numpy_distutils and dropped fortran_support. - *** On Windows create f2py.bat file. - *** Introduced -c option: read fortran or pyf files, construct extension - modules, build, and save them to current directory. - In one word: do-it-all-in-one-call. - *** Introduced pyf_extensions(sources,f2py_opts) function. It simplifies - the extension building process considerably. Only for internal use. - *** Converted tests to use numpy_distutils in order to improve portability: - a,b,c - *** f2py2e.run_main() returns a pyf_file class instance containing - information about f2py generated files. - *** Introduced `--build-dir ' command line option. - *** Fixed setup.py for bdist_rpm command. - *** Added --numpy-setup command line option. - *** Fixed crackfortran that did not recognized capitalized type - specification with --no-lower flag. - *** `-h stdout' writes signature to stdout. - *** Fixed incorrect message for check() with empty name list. - - -:Release: 2.4.366 -:Date: 17 December 2001 - -:: - - *** Added command line option --[no-]manifest. - *** `make test' should run on Windows, but the results are not truthful. - *** Reorganized f2py2e.py a bit. Introduced run_main(comline_list) function - that can be useful when running f2py from another Python module. - *** Removed command line options -f77,-fix,-f90 as the file format - is determined from the extension of the fortran file - or from its header (first line starting with `!%' and containing keywords - free, fix, or f77). The later overrides the former one. - *** Introduced command line options --[no-]makefile,--[no-]latex-doc. - Users must explicitly use --makefile,--latex-doc if Makefile-, - module.tex is desired. --setup is default. Use --no-setup - to disable setup_.py generation. --overwrite-makefile - will set --makefile. - *** Added `f2py_rout_' to #capiname# in rules.py. - *** intent(...) statement with empty namelist forces intent(...) attribute for - all arguments. - *** Dropped DL_IMPORT and DL_EXPORT in fortranobject.h. - *** Added missing PyFortran_Type.ob_type initialization. - *** Added gcc-3.0 support. - *** Raising non-existing/broken Numeric as a FatalError exception. - *** Fixed Python 2.x specific += construct in fortran_support.py. - *** Fixed copy_ND_array for 1-rank arrays that used to call calloc(0,..) - and caused core dump with a non-gcc compiler (Thanks to Pierre Schnizer - for reporting this bug). - *** Fixed "warning: variable `..' might be clobbered by `longjmp' or `vfork'": - - Reorganized the structure of wrapper functions to get rid of - `goto capi_fail' statements that caused the above warning. - - -:Release: 2.3.343 -:Date: 12 December 2001 - -:: - - *** Issues with the Win32 support (thanks to Eric Jones and Tiffany Kamm): - - Using DL_EXPORT macro for init#modulename#. - - Changed PyObject_HEAD_INIT(&PyType_Type) to PyObject_HEAD_INIT(0). - - Initializing #name#_capi=NULL instead of Py_None in cb hooks. - *** Fixed some 'warning: function declaration isn't a prototype', mainly - in fortranobject.{c,h}. - *** Fixed 'warning: missing braces around initializer'. - *** Fixed reading a line containing only a label. - *** Fixed nonportable 'cp -fv' to shutil.copy in f2py2e.py. - *** Replaced PyEval_CallObject with PyObject_CallObject in cb_rules. - *** Replaced Py_DECREF with Py_XDECREF when freeing hidden arguments. - (Reason: Py_DECREF caused segfault when an error was raised) - *** Impl. support for `include "file"' (in addition to `include 'file'') - *** Fixed bugs (buildsetup.py missing in Makefile, in generated MANIFEST.in) - - -:Release: 2.3.327 -:Date: 4 December 2001 - -:: - - *** Sending out the third public release of f2py. - *** Support for Intel(R) Fortran Compiler (thanks to Patrick LeGresley). - *** Introduced `threadsafe' statement to pyf-files (or to be used with - the 'f2py' directive in fortran codes) to force - Py_BEGIN|END_ALLOW_THREADS block around the Fortran subroutine - calling statement in Python C/API. `threadsafe' statement has - an effect only inside a subroutine block. - *** Introduced `fortranname ' statement to be used only within - pyf-files. This is useful when the wrapper (Python C/API) function - has different name from the wrapped (Fortran) function. - *** Introduced `intent(c)' directive and statement. It is useful when - wrapping C functions. Use intent(c) for arguments that are - scalars (not pointers) or arrays (with row-ordering of elements). - - -:Release: 2.3.321 -:Date: 3 December 2001 - -:: - - *** f2py2e can be installed using distutils (run `python setup.py install'). - *** f2py builds setup_.py. Use --[no-]setup to control this - feature. setup_.py uses fortran_support module (from SciPy), - but for your convenience it is included also with f2py as an additional - package. Note that it has not as many compilers supported as with - using Makefile-, but new compilers should be added to - fortran_support module, not to f2py2e package. - *** Fixed some compiler warnings about else statements. - diff --git a/numpy-1.6.2/numpy/f2py/docs/OLDNEWS.txt b/numpy-1.6.2/numpy/f2py/docs/OLDNEWS.txt deleted file mode 100644 index 401d2dcee4..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/OLDNEWS.txt +++ /dev/null @@ -1,63 +0,0 @@ - -.. topic:: Old F2PY NEWS - - March 30, 2004 - F2PY bug fix release (version 2.39.235-1693). Two new command line switches: - ``--compiler`` and ``--include_paths``. Support for allocatable string arrays. - Callback arguments may now be arbitrary callable objects. Win32 installers - for F2PY and Scipy_core are provided. - `Differences with the previous release (version 2.37.235-1660)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.98&r2=1.87&f=h - - - March 9, 2004 - F2PY bug fix release (version 2.39.235-1660). - `Differences with the previous release (version 2.37.235-1644)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.87&r2=1.83&f=h - - February 24, 2004 - Latest F2PY release (version 2.39.235-1644). - Support for numpy_distutils 0.2.2 and up (e.g. compiler flags can be - changed via f2py command line options). Implemented support for - character arrays and arrays of strings (e.g. ``character*(*) a(m,..)``). - *Important bug fixes regarding complex arguments, upgrading is - highly recommended*. Documentation updates. - `Differences with the previous release (version 2.37.233-1545)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.83&r2=1.58&f=h - - September 11, 2003 - Latest F2PY release (version 2.37.233-1545). - New statements: ``pymethoddef`` and ``usercode`` in interface blocks. - New function: ``as_column_major_storage``. - New CPP macro: ``F2PY_REPORT_ON_ARRAY_COPY``. - Bug fixes. - `Differences with the previous release (version 2.35.229-1505)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.58&r2=1.49&f=h - - August 2, 2003 - Latest F2PY release (version 2.35.229-1505). - `Differences with the previous release (version 2.32.225-1419)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.49&r2=1.28&f=h - - April 2, 2003 - Initial support for Numarray_ (thanks to Todd Miller). - - December 8, 2002 - Sixth public release of F2PY (version 2.32.225-1419). Comes with - revised `F2PY Users Guide`__, `new testing site`__, lots of fixes - and other improvements, see `HISTORY.txt`_ for details. - - __ usersguide/index.html - __ TESTING.txt_ - -.. References - ========== - -.. _HISTORY.txt: HISTORY.html -.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray -.. _TESTING.txt: TESTING.html \ No newline at end of file diff --git a/numpy-1.6.2/numpy/f2py/docs/README.txt b/numpy-1.6.2/numpy/f2py/docs/README.txt deleted file mode 100644 index cec8a6ec09..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/README.txt +++ /dev/null @@ -1,461 +0,0 @@ -.. -*- rest -*- - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - F2PY: Fortran to Python interface generator -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Author: Pearu Peterson -:License: NumPy License -:Web-site: http://cens.ioc.ee/projects/f2py2e/ -:Discussions to: `f2py-users mailing list`_ -:Documentation: `User's Guide`__, FAQ__ -:Platforms: All -:Date: $Date: 2005/01/30 18:54:53 $ - -.. _f2py-users mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/ -__ usersguide/index.html -__ FAQ.html - -------------------------------- - -.. topic:: NEWS!!! - - January 5, 2006 - - WARNING -- these notes are out of date! The package structure for NumPy and - SciPy has changed considerably. Much of this information is now incorrect. - - January 30, 2005 - - Latest F2PY release (version 2.45.241_1926). - New features: wrapping unsigned integers, support for ``.pyf.src`` template files, - callback arguments can now be CObjects, fortran objects, built-in functions. - Introduced ``intent(aux)`` attribute. Wrapped objects have ``_cpointer`` - attribute holding C pointer to wrapped functions or variables. - Many bug fixes and improvements, updated documentation. - `Differences with the previous release (version 2.43.239_1831)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.163&r2=1.137&f=h - - October 4, 2004 - F2PY bug fix release (version 2.43.239_1831). - Better support for 64-bit platforms. - Introduced ``--help-link`` and ``--link-`` options. - Bug fixes. - `Differences with the previous release (version 2.43.239_1806)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.137&r2=1.131&f=h - - September 25, 2004 - Latest F2PY release (version 2.43.239_1806). - Support for ``ENTRY`` statement. New attributes: - ``intent(inplace)``, ``intent(callback)``. Supports Numarray 1.1. - Introduced ``-*- fix -*-`` header content. Improved ``PARAMETER`` support. - Documentation updates. `Differences with the previous release - (version 2.39.235-1693)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.131&r2=1.98&f=h - - `History of NEWS`__ - - __ OLDNEWS.html - -------------------------------- - -.. Contents:: - -============== - Introduction -============== - -The purpose of the F2PY --*Fortran to Python interface generator*-- -project is to provide connection between Python_ and Fortran -languages. F2PY is a Python extension tool for creating Python C/API -modules from (handwritten or F2PY generated) signature files (or -directly from Fortran sources). The generated extension modules -facilitate: - -* Calling Fortran 77/90/95, Fortran 90/95 module, and C functions from - Python. - -* Accessing Fortran 77 ``COMMON`` blocks and Fortran 90/95 module - data (including allocatable arrays) from Python. - -* Calling Python functions from Fortran or C (call-backs). - -* Automatically handling the difference in the data storage order of - multi-dimensional Fortran and Numerical Python (i.e. C) arrays. - -In addition, F2PY can build the generated extension modules to shared -libraries with one command. F2PY uses the ``numpy_distutils`` module -from SciPy_ that supports number of major Fortran compilers. - -.. - (see `COMPILERS.txt`_ for more information). - -F2PY generated extension modules depend on NumPy_ package that -provides fast multi-dimensional array language facility to Python. - - ---------------- - Main features ---------------- - -Here follows a more detailed list of F2PY features: - -* F2PY scans real Fortran codes to produce the so-called signature - files (.pyf files). The signature files contain all the information - (function names, arguments and their types, etc.) that is needed to - construct Python bindings to Fortran (or C) functions. - - The syntax of signature files is borrowed from the - Fortran 90/95 language specification and has some F2PY specific - extensions. The signature files can be modified to dictate how - Fortran (or C) programs are called from Python: - - + F2PY solves dependencies between arguments (this is relevant for - the order of initializing variables in extension modules). - - + Arguments can be specified to be optional or hidden that - simplifies calling Fortran programs from Python considerably. - - + In principle, one can design any Python signature for a given - Fortran function, e.g. change the order arguments, introduce - auxiliary arguments, hide the arguments, process the arguments - before passing to Fortran, return arguments as output of F2PY - generated functions, etc. - -* F2PY automatically generates __doc__ strings (and optionally LaTeX - documentation) for extension modules. - -* F2PY generated functions accept arbitrary (but sensible) Python - objects as arguments. The F2PY interface automatically takes care of - type-casting and handling of non-contiguous arrays. - -* The following Fortran constructs are recognized by F2PY: - - + All basic Fortran types:: - - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ] - integer*([ -1 | -2 | -4 | -8 ]) - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision - complex[ | *8 | *16 | *32 ] - - Negative ``integer`` kinds are used to wrap unsigned integers. - - + Multi-dimensional arrays of all basic types with the following - dimension specifications:: - - | : | * | : - - + Attributes and statements:: - - intent([ in | inout | out | hide | in,out | inout,out | c | - copy | cache | callback | inplace | aux ]) - dimension() - common, parameter - allocatable - optional, required, external - depend([]) - check([]) - note() - usercode, callstatement, callprotoargument, threadsafe, fortranname - pymethoddef - entry - -* Because there are only little (and easily handleable) differences - between calling C and Fortran functions from F2PY generated - extension modules, then F2PY is also well suited for wrapping C - libraries to Python. - -* Practice has shown that F2PY generated interfaces (to C or Fortran - functions) are less error prone and even more efficient than - handwritten extension modules. The F2PY generated interfaces are - easy to maintain and any future optimization of F2PY generated - interfaces transparently apply to extension modules by just - regenerating them with the latest version of F2PY. - -* `F2PY Users Guide and Reference Manual`_ - - -=============== - Prerequisites -=============== - -F2PY requires the following software installed: - -* Python_ (versions 1.5.2 or later; 2.1 and up are recommended). - You must have python-dev package installed. -* NumPy_ (versions 13 or later; 20.x, 21.x, 22.x, 23.x are recommended) -* Numarray_ (version 0.9 and up), optional, partial support. -* Scipy_distutils (version 0.2.2 and up are recommended) from SciPy_ - project. Get it from Scipy CVS or download it below. - -Python 1.x users also need distutils_. - -Of course, to build extension modules, you'll need also working C -and/or Fortran compilers installed. - -========== - Download -========== - -You can download the sources for the latest F2PY and numpy_distutils -releases as: - -* `2.x`__/`F2PY-2-latest.tar.gz`__ -* `2.x`__/`numpy_distutils-latest.tar.gz`__ - -Windows users might be interested in Win32 installer for F2PY and -Scipy_distutils (these installers are built using Python 2.3): - -* `2.x`__/`F2PY-2-latest.win32.exe`__ -* `2.x`__/`numpy_distutils-latest.win32.exe`__ - -Older releases are also available in the directories -`rel-0.x`__, `rel-1.x`__, `rel-2.x`__, `rel-3.x`__, `rel-4.x`__, `rel-5.x`__, -if you need them. - -.. __: 2.x/ -.. __: 2.x/F2PY-2-latest.tar.gz -.. __: 2.x/ -.. __: 2.x/numpy_distutils-latest.tar.gz -.. __: 2.x/ -.. __: 2.x/F2PY-2-latest.win32.exe -.. __: 2.x/ -.. __: 2.x/numpy_distutils-latest.win32.exe -.. __: rel-0.x -.. __: rel-1.x -.. __: rel-2.x -.. __: rel-3.x -.. __: rel-4.x -.. __: rel-5.x - -Development version of F2PY from CVS is available as `f2py2e.tar.gz`__. - -__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/f2py2e.tar.gz?tarball=1 - -Debian Sid users can simply install ``python-f2py`` package. - -============== - Installation -============== - -Unpack the source file, change to directrory ``F2PY-?-???/`` and run -(you may need to become a root):: - - python setup.py install - -The F2PY installation installs a Python package ``f2py2e`` to your -Python ``site-packages`` directory and a script ``f2py`` to your -Python executable path. - -See also Installation__ section in `F2PY FAQ`_. - -.. __: FAQ.html#installation - -Similarly, to install ``numpy_distutils``, unpack its tar-ball and run:: - - python setup.py install - -======= - Usage -======= - -To check if F2PY is installed correctly, run -:: - - f2py - -without any arguments. This should print out the usage information of -the ``f2py`` program. - -Next, try out the following three steps: - -1) Create a Fortran file `hello.f`__ that contains:: - - C File hello.f - subroutine foo (a) - integer a - print*, "Hello from Fortran!" - print*, "a=",a - end - -__ hello.f - -2) Run - - :: - - f2py -c -m hello hello.f - - This will build an extension module ``hello.so`` (or ``hello.sl``, - or ``hello.pyd``, etc. depending on your platform) into the current - directory. - -3) Now in Python try:: - - >>> import hello - >>> print hello.__doc__ - >>> print hello.foo.__doc__ - >>> hello.foo(4) - Hello from Fortran! - a= 4 - >>> - -If the above works, then you can try out more thorough -`F2PY unit tests`__ and read the `F2PY Users Guide and Reference Manual`_. - -__ FAQ.html#q-how-to-test-if-f2py-is-working-correctly - -=============== - Documentation -=============== - -The documentation of the F2PY project is collected in ``f2py2e/docs/`` -directory. It contains the following documents: - -`README.txt`_ (in CVS__) - The first thing to read about F2PY -- this document. - -__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/README.txt?rev=HEAD&content-type=text/x-cvsweb-markup - -`usersguide/index.txt`_, `usersguide/f2py_usersguide.pdf`_ - F2PY Users Guide and Reference Manual. Contains lots of examples. - -`FAQ.txt`_ (in CVS__) - F2PY Frequently Asked Questions. - -__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/FAQ.txt?rev=HEAD&content-type=text/x-cvsweb-markup - -`TESTING.txt`_ (in CVS__) - About F2PY testing site. What tests are available and how to run them. - -__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/TESTING.txt?rev=HEAD&content-type=text/x-cvsweb-markup - -`HISTORY.txt`_ (in CVS__) - A list of latest changes in F2PY. This is the most up-to-date - document on F2PY. - -__ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup - -`THANKS.txt`_ - Acknowledgments. - -.. - `COMPILERS.txt`_ - Compiler and platform specific notes. - -=============== - Mailing list -=============== - -A mailing list f2py-users@cens.ioc.ee is open for F2PY releated -discussion/questions/etc. - -* `Subscribe..`__ -* `Archives..`__ - -__ http://cens.ioc.ee/mailman/listinfo/f2py-users -__ http://cens.ioc.ee/pipermail/f2py-users - - -===== - CVS -===== - -F2PY is being developed under CVS_. The CVS version of F2PY can be -obtained as follows: - -1) First you need to login (the password is ``guest``):: - - cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login - -2) and then do the checkout:: - - cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e - -3) You can update your local F2PY tree ``f2py2e/`` by executing:: - - cvs -z6 update -P -d - -You can browse the `F2PY CVS`_ repository. - -=============== - Contributions -=============== - -* `A short introduction to F2PY`__ by Pierre Schnizer. - -* `F2PY notes`__ by Fernando Perez. - -* `Debian packages of F2PY`__ by José Fonseca. [OBSOLETE, Debian Sid - ships python-f2py package] - -__ http://fubphpc.tu-graz.ac.at/~pierre/f2py_tutorial.tar.gz -__ http://cens.ioc.ee/pipermail/f2py-users/2003-April/000472.html -__ http://jrfonseca.dyndns.org/debian/ - - -=============== - Related sites -=============== - -* `Numerical Python`_ -- adds a fast array facility to the Python language. -* Pyfort_ -- A Python-Fortran connection tool. -* SciPy_ -- An open source library of scientific tools for Python. -* `Scientific Python`_ -- A collection of Python modules that are - useful for scientific computing. -* `The Fortran Company`_ -- A place to find products, services, and general - information related to the Fortran programming language. -* `American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978`__ -* `J3`_ -- The US Fortran standards committee. -* SWIG_ -- A software development tool that connects programs written - in C and C++ with a variety of high-level programming languages. -* `Mathtools.net`_ -- A technical computing portal for all scientific - and engineering needs. - -.. __: http://www.fortran.com/fortran/F77_std/rjcnf.html - -.. References - ========== - - -.. _F2PY Users Guide and Reference Manual: usersguide/index.html -.. _usersguide/index.txt: usersguide/index.html -.. _usersguide/f2py_usersguide.pdf: usersguide/f2py_usersguide.pdf -.. _README.txt: README.html -.. _COMPILERS.txt: COMPILERS.html -.. _F2PY FAQ: -.. _FAQ.txt: FAQ.html -.. _HISTORY.txt: HISTORY.html -.. _HISTORY.txt from CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup -.. _THANKS.txt: THANKS.html -.. _TESTING.txt: TESTING.html -.. _F2PY CVS2: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/ -.. _F2PY CVS: http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/ - -.. _CVS: http://www.cvshome.org/ -.. _Python: http://www.python.org/ -.. _SciPy: http://www.numpy.org/ -.. _NumPy: http://www.numpy.org/ -.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray -.. _docutils: http://docutils.sourceforge.net/ -.. _distutils: http://www.python.org/sigs/distutils-sig/ -.. _Numerical Python: http://www.numpy.org/ -.. _Pyfort: http://pyfortran.sourceforge.net/ -.. _Scientific Python: - http://starship.python.net/crew/hinsen/scientific.html -.. _The Fortran Company: http://www.fortran.com/fortran/ -.. _J3: http://www.j3-fortran.org/ -.. _Mathtools.net: http://www.mathtools.net/ -.. _SWIG: http://www.swig.org/ - -.. - Local Variables: - mode: indented-text - indent-tabs-mode: nil - sentence-end-double-space: t - fill-column: 70 - End: diff --git a/numpy-1.6.2/numpy/f2py/docs/TESTING.txt b/numpy-1.6.2/numpy/f2py/docs/TESTING.txt deleted file mode 100644 index d905211754..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/TESTING.txt +++ /dev/null @@ -1,108 +0,0 @@ - -======================================================= - F2PY unit testing site -======================================================= - -.. Contents:: - -Tests ------ - -* To run all F2PY unit tests in one command:: - - cd tests - python run_all.py [] - - For example:: - - localhost:~/src_cvs/f2py2e/tests$ python2.2 run_all.py 100 --quiet - ********************************************** - Running '/usr/bin/python2.2 f77/return_integer.py 100 --quiet' - run 1000 tests in 1.87 seconds - initial virtual memory size: 3952640 bytes - current virtual memory size: 3952640 bytes - ok - ********************************************** - Running '/usr/bin/python2.2 f77/return_logical.py 100 --quiet' - run 1000 tests in 1.47 seconds - initial virtual memory size: 3952640 bytes - current virtual memory size: 3952640 bytes - ok - ... - - If some tests fail, try to run the failing tests separately (without - the ``--quiet`` option) as described below to get more information - about the failure. - -* Test intent(in), intent(out) scalar arguments, - scalars returned by F77 functions - and F90 module functions:: - - tests/f77/return_integer.py - tests/f77/return_real.py - tests/f77/return_logical.py - tests/f77/return_complex.py - tests/f77/return_character.py - tests/f90/return_integer.py - tests/f90/return_real.py - tests/f90/return_logical.py - tests/f90/return_complex.py - tests/f90/return_character.py - - Change to tests/ directory and run:: - - python f77/return_.py [] - python f90/return_.py [] - - where ```` is integer, real, logical, complex, or character. - Test scripts options are described below. - - A test is considered succesful if the last printed line is "ok". - - If you get import errors like:: - - ImportError: No module named f77_ext_return_integer - - but ``f77_ext_return_integer.so`` exists in the current directory then - it means that the current directory is not included in to `sys.path` - in your Python installation. As a fix, prepend ``.`` to ``PYTHONPATH`` - environment variable and rerun the tests. For example:: - - PYTHONPATH=. python f77/return_integer.py - -* Test mixing Fortran 77, Fortran 90 fixed and free format codes:: - - tests/mixed/run.py - -* Test basic callback hooks:: - - tests/f77/callback.py - -Options -------- - -You may want to use the following options when running the test -scripts: - -```` - Run tests ```` times. Useful for detecting memory leaks. Under - Linux tests scripts output virtual memory size state of the process - before and after calling the wrapped functions. - -``--quiet`` - Suppress all messages. On success only "ok" should be displayed. - -``--fcompiler=`` - Use:: - - f2py -c --help-fcompiler - - to find out what compilers are available (or more precisely, which - ones are recognized by ``numpy_distutils``). - -Reporting failures ------------------- - -XXX: (1) make sure that failures are due to f2py and (2) send full -stdout/stderr messages to me. Also add compiler,python,platform -information. diff --git a/numpy-1.6.2/numpy/f2py/docs/THANKS.txt b/numpy-1.6.2/numpy/f2py/docs/THANKS.txt deleted file mode 100644 index 0a3f0b9d66..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/THANKS.txt +++ /dev/null @@ -1,63 +0,0 @@ - -================= - Acknowledgments -================= - -F2PY__ is an open source Python package and command line tool developed and -maintained by Pearu Peterson (me__). - -.. __: http://cens.ioc.ee/projects/f2py2e/ -.. __: http://cens.ioc.ee/~pearu/ - -Many people have contributed to the F2PY project in terms of interest, -encouragement, suggestions, criticism, bug reports, code -contributions, and keeping me busy with developing F2PY. For all that -I thank - - James Amundson, John Barnard, David Beazley, Frank Bertoldi, Roman - Bertle, James Boyle, Moritz Braun, Rolv Erlend Bredesen, John - Chaffer, Fred Clare, Adam Collard, Ben Cornett, Jose L Gomez Dans, - Jaime D. Perea Duarte, Paul F Dubois, Thilo Ernst, Bonilla Fabian, - Martin Gelfand, Eduardo A. Gonzalez, Siegfried Gonzi, Bernhard - Gschaider, Charles Doutriaux, Jeff Hagelberg, Janko Hauser, Thomas - Hauser, Heiko Henkelmann, William Henney, Yueqiang Huang, Asim - Hussain, Berthold Höllmann, Vladimir Janku, Henk Jansen, Curtis - Jensen, Eric Jones, Tiffany Kamm, Andrey Khavryuchenko, Greg - Kochanski, Jochen Küpper, Simon Lacoste-Julien, Tim Lahey, Hans - Petter Langtangen, Jeff Layton, Matthew Lewis, Patrick LeGresley, - Joaquim R R A Martins, Paul Magwene Lionel Maziere, Craig McNeile, - Todd Miller, David C. Morrill, Dirk Muders, Kevin Mueller, Andrew - Mullhaupt, Vijayendra Munikoti, Travis Oliphant, Kevin O'Mara, Arno - Paehler, Fernando Perez, Didrik Pinte, Todd Alan Pitts, Prabhu - Ramachandran, Brad Reisfeld, Steve M. Robbins, Theresa Robinson, - Pedro Rodrigues, Les Schaffer, Christoph Scheurer, Herb Schilling, - Pierre Schnizer, Kevin Smith, Paulo Teotonio Sobrinho, José Rui - Faustino de Sousa, Andrew Swan, Dustin Tang, Charlie Taylor, Paul le - Texier, Michael Tiller, Semen Trygubenko, Ravi C Venkatesan, Peter - Verveer, Nils Wagner, R. Clint Whaley, Erik Wilsher, Martin - Wiechert, Gilles Zerah, SungPil Yoon. - -(This list may not be complete. Please forgive me if I have left you -out and let me know, I'll add your name.) - -Special thanks are due to ... - -Eric Jones - he and Travis O. are responsible for starting the -numpy_distutils project that allowed to move most of the platform and -compiler specific codes out from F2PY. This simplified maintaining the -F2PY project considerably. - -Joaquim R R A Martins - he made possible for me to test F2PY on IRIX64 -platform. He also presented our paper about F2PY in the 9th Python -Conference that I planned to attend but had to cancel in very last -minutes. - -Travis Oliphant - his knowledge and experience on Numerical Python -C/API has been invaluable in early development of the F2PY program. -His major contributions are call-back mechanism and copying N-D arrays -of arbitrary types. - -Todd Miller - he is responsible for Numarray support in F2PY. - -Thanks! - Pearu diff --git a/numpy-1.6.2/numpy/f2py/docs/default.css b/numpy-1.6.2/numpy/f2py/docs/default.css deleted file mode 100644 index 9289e28260..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/default.css +++ /dev/null @@ -1,180 +0,0 @@ -/* -:Author: David Goodger -:Contact: goodger@users.sourceforge.net -:date: $Date: 2002/08/01 20:52:44 $ -:version: $Revision: 1.1 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ - -body { - background: #FFFFFF ; - color: #000000 -} - -a.footnote-reference { - font-size: smaller ; - vertical-align: super } - -a.target { - color: blue } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.first { - margin-top: 0 } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #eeeeee } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.field-argument { - font-style: italic } - -span.interpreted { - font-family: sans-serif } - -span.option-argument { - font-style: italic } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: baseline } - -td.docinfo-name { - font-weight: bold ; - text-align: right } - -td.field-name { - font-weight: bold } diff --git a/numpy-1.6.2/numpy/f2py/docs/docutils.conf b/numpy-1.6.2/numpy/f2py/docs/docutils.conf deleted file mode 100644 index 4e5a8425bb..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/docutils.conf +++ /dev/null @@ -1,16 +0,0 @@ -[general] - -# These entries affect all processing: -#source-link: 1 -datestamp: %Y-%m-%d %H:%M UTC -generator: 1 - -# These entries affect HTML output: -#stylesheet-path: pearu_style.css -output-encoding: latin-1 - -# These entries affect reStructuredText-style PEPs: -#pep-template: pep-html-template -#pep-stylesheet-path: stylesheets/pep.css -#python-home: http://www.python.org -#no-random: 1 diff --git a/numpy-1.6.2/numpy/f2py/docs/hello.f b/numpy-1.6.2/numpy/f2py/docs/hello.f deleted file mode 100644 index 3e0dc6d212..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/hello.f +++ /dev/null @@ -1,7 +0,0 @@ -C File hello.f - subroutine foo (a) - integer a - print*, "Hello from Fortran!" - print*, "a=",a - end - diff --git a/numpy-1.6.2/numpy/f2py/docs/pyforttest.pyf b/numpy-1.6.2/numpy/f2py/docs/pyforttest.pyf deleted file mode 100644 index 79a9ae205f..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/pyforttest.pyf +++ /dev/null @@ -1,5 +0,0 @@ -subroutine foo(a,m,n) -integer m = size(a,1) -integer n = size(a,2) -real, intent(inout) :: a(m,n) -end subroutine foo diff --git a/numpy-1.6.2/numpy/f2py/docs/pytest.py b/numpy-1.6.2/numpy/f2py/docs/pytest.py deleted file mode 100644 index abd3487dfb..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/pytest.py +++ /dev/null @@ -1,10 +0,0 @@ -#File: pytest.py -import Numeric -def foo(a): - a = Numeric.array(a) - m,n = a.shape - for i in range(m): - for j in range(n): - a[i,j] = a[i,j] + 10*(i+1) + (j+1) - return a -#eof diff --git a/numpy-1.6.2/numpy/f2py/docs/simple.f b/numpy-1.6.2/numpy/f2py/docs/simple.f deleted file mode 100644 index ba468a509c..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/simple.f +++ /dev/null @@ -1,13 +0,0 @@ -cFile: simple.f - subroutine foo(a,m,n) - integer m,n,i,j - real a(m,n) -cf2py intent(in,out) a -cf2py intent(hide) m,n - do i=1,m - do j=1,n - a(i,j) = a(i,j) + 10*i+j - enddo - enddo - end -cEOF diff --git a/numpy-1.6.2/numpy/f2py/docs/simple_session.dat b/numpy-1.6.2/numpy/f2py/docs/simple_session.dat deleted file mode 100644 index 10d9dc9627..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/simple_session.dat +++ /dev/null @@ -1,51 +0,0 @@ ->>> import pytest ->>> import f2pytest ->>> import pyforttest ->>> print f2pytest.foo.__doc__ -foo - Function signature: - a = foo(a) -Required arguments: - a : input rank-2 array('f') with bounds (m,n) -Return objects: - a : rank-2 array('f') with bounds (m,n) - ->>> print pyforttest.foo.__doc__ -foo(a) - ->>> pytest.foo([[1,2],[3,4]]) -array([[12, 14], - [24, 26]]) ->>> f2pytest.foo([[1,2],[3,4]]) # F2PY can handle arbitrary input sequences -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> pyforttest.foo([[1,2],[3,4]]) -Traceback (most recent call last): - File "", line 1, in ? -pyforttest.error: foo, argument A: Argument intent(inout) must be an array. - ->>> import Numeric ->>> a=Numeric.array([[1,2],[3,4]],'f') ->>> f2pytest.foo(a) -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> a # F2PY makes a copy when input array is not Fortran contiguous -array([[ 1., 2.], - [ 3., 4.]],'f') ->>> a=Numeric.transpose(Numeric.array([[1,3],[2,4]],'f')) ->>> a -array([[ 1., 2.], - [ 3., 4.]],'f') ->>> f2pytest.foo(a) -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> a # F2PY passes Fortran contiguous input array directly to Fortran -array([[ 12., 14.], - [ 24., 26.]],'f') -# See intent(copy), intent(overwrite), intent(inplace), intent(inout) -# attributes documentation to enhance the above behavior. - ->>> a=Numeric.array([[1,2],[3,4]],'f') ->>> pyforttest.foo(a) ->>> a # Huh? Pyfort 8.5 gives wrong results.. -array([[ 12., 23.], - [ 15., 26.]],'f') diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/allocarr.f90 b/numpy-1.6.2/numpy/f2py/docs/usersguide/allocarr.f90 deleted file mode 100644 index e0d6c2ec85..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/allocarr.f90 +++ /dev/null @@ -1,16 +0,0 @@ -module mod - real, allocatable, dimension(:,:) :: b -contains - subroutine foo - integer k - if (allocated(b)) then - print*, "b=[" - do k = 1,size(b,1) - print*, b(k,1:size(b,2)) - enddo - print*, "]" - else - print*, "b is not allocated" - endif - end subroutine foo -end module mod diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/allocarr_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/allocarr_session.dat deleted file mode 100644 index fc91959b73..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/allocarr_session.dat +++ /dev/null @@ -1,27 +0,0 @@ ->>> import allocarr ->>> print allocarr.mod.__doc__ -b - 'f'-array(-1,-1), not allocated -foo - Function signature: - foo() - ->>> allocarr.mod.foo() - b is not allocated ->>> allocarr.mod.b = [[1,2,3],[4,5,6]] # allocate/initialize b ->>> allocarr.mod.foo() - b=[ - 1.000000 2.000000 3.000000 - 4.000000 5.000000 6.000000 - ] ->>> allocarr.mod.b # b is Fortran-contiguous -array([[ 1., 2., 3.], - [ 4., 5., 6.]],'f') ->>> allocarr.mod.b = [[1,2,3],[4,5,6],[7,8,9]] # reallocate/initialize b ->>> allocarr.mod.foo() - b=[ - 1.000000 2.000000 3.000000 - 4.000000 5.000000 6.000000 - 7.000000 8.000000 9.000000 - ] ->>> allocarr.mod.b = None # deallocate array ->>> allocarr.mod.foo() - b is not allocated diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/array.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/array.f deleted file mode 100644 index ef20c9c206..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/array.f +++ /dev/null @@ -1,17 +0,0 @@ -C FILE: ARRAY.F - SUBROUTINE FOO(A,N,M) -C -C INCREMENT THE FIRST ROW AND DECREMENT THE FIRST COLUMN OF A -C - INTEGER N,M,I,J - REAL*8 A(N,M) -Cf2py intent(in,out,copy) a -Cf2py integer intent(hide),depend(a) :: n=shape(a,0), m=shape(a,1) - DO J=1,M - A(1,J) = A(1,J) + 1D0 - ENDDO - DO I=1,N - A(I,1) = A(I,1) - 1D0 - ENDDO - END -C END OF FILE ARRAY.F diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/array_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/array_session.dat deleted file mode 100644 index f649334821..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/array_session.dat +++ /dev/null @@ -1,65 +0,0 @@ ->>> import arr ->>> from Numeric import array ->>> print arr.foo.__doc__ -foo - Function signature: - a = foo(a,[overwrite_a]) -Required arguments: - a : input rank-2 array('d') with bounds (n,m) -Optional arguments: - overwrite_a := 0 input int -Return objects: - a : rank-2 array('d') with bounds (n,m) - ->>> a=arr.foo([[1,2,3], -... [4,5,6]]) -copied an array using PyArray_CopyFromObject: size=6, elsize=8 ->>> print a -[[ 1. 3. 4.] - [ 3. 5. 6.]] ->>> a.iscontiguous(), arr.has_column_major_storage(a) -(0, 1) ->>> b=arr.foo(a) # even if a is proper-contiguous -... # and has proper type, a copy is made -... # forced by intent(copy) attribute -... # to preserve its original contents -... -copied an array using copy_ND_array: size=6, elsize=8 ->>> print a -[[ 1. 3. 4.] - [ 3. 5. 6.]] ->>> print b -[[ 1. 4. 5.] - [ 2. 5. 6.]] ->>> b=arr.foo(a,overwrite_a=1) # a is passed directly to Fortran -... # routine and its contents is discarded -... ->>> print a -[[ 1. 4. 5.] - [ 2. 5. 6.]] ->>> print b -[[ 1. 4. 5.] - [ 2. 5. 6.]] ->>> a is b # a and b are acctually the same objects -1 ->>> print arr.foo([1,2,3]) # different rank arrays are allowed -copied an array using PyArray_CopyFromObject: size=3, elsize=8 -[ 1. 1. 2.] ->>> print arr.foo([[[1],[2],[3]]]) -copied an array using PyArray_CopyFromObject: size=3, elsize=8 -[ [[ 1.] - [ 3.] - [ 4.]]] ->>> ->>> # Creating arrays with column major data storage order: -... ->>> s = arr.as_column_major_storage(array([[1,2,3],[4,5,6]])) -copied an array using copy_ND_array: size=6, elsize=4 ->>> arr.has_column_major_storage(s) -1 ->>> print s -[[1 2 3] - [4 5 6]] ->>> s2 = arr.as_column_major_storage(s) ->>> s2 is s # an array with column major storage order - # is returned immediately -1 \ No newline at end of file diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/calculate.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/calculate.f deleted file mode 100644 index 1cda1c8ddd..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/calculate.f +++ /dev/null @@ -1,14 +0,0 @@ - subroutine calculate(x,n) -cf2py intent(callback) func - external func -c The following lines define the signature of func for F2PY: -cf2py real*8 y -cf2py y = func(y) -c -cf2py intent(in,out,copy) x - integer n,i - real*8 x(n) - do i=1,n - x(i) = func(x(i)) - end do - end diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/calculate_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/calculate_session.dat deleted file mode 100644 index 2fe64f5224..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/calculate_session.dat +++ /dev/null @@ -1,6 +0,0 @@ ->>> import foo ->>> foo.calculate(range(5), lambda x: x*x) -array([ 0., 1., 4., 9., 16.]) ->>> import math ->>> foo.calculate(range(5), math.exp) -array([ 1. , 2.71828175, 7.38905621, 20.08553696, 54.59814835]) diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/callback.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/callback.f deleted file mode 100644 index 6e9bfb920c..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/callback.f +++ /dev/null @@ -1,12 +0,0 @@ -C FILE: CALLBACK.F - SUBROUTINE FOO(FUN,R) - EXTERNAL FUN - INTEGER I - REAL*8 R -Cf2py intent(out) r - R = 0D0 - DO I=-5,5 - R = R + FUN(I) - ENDDO - END -C END OF FILE CALLBACK.F diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/callback2.pyf b/numpy-1.6.2/numpy/f2py/docs/usersguide/callback2.pyf deleted file mode 100644 index 3d77eed24f..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/callback2.pyf +++ /dev/null @@ -1,19 +0,0 @@ -! -*- f90 -*- -python module __user__routines - interface - function fun(i) result (r) - integer :: i - real*8 :: r - end function fun - end interface -end python module __user__routines - -python module callback2 - interface - subroutine foo(f,r) - use __user__routines, f=>fun - external f - real*8 intent(out) :: r - end subroutine foo - end interface -end python module callback2 diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/callback_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/callback_session.dat deleted file mode 100644 index cd2f260849..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/callback_session.dat +++ /dev/null @@ -1,23 +0,0 @@ ->>> import callback ->>> print callback.foo.__doc__ -foo - Function signature: - r = foo(fun,[fun_extra_args]) -Required arguments: - fun : call-back function -Optional arguments: - fun_extra_args := () input tuple -Return objects: - r : float -Call-back functions: - def fun(i): return r - Required arguments: - i : input int - Return objects: - r : float - ->>> def f(i): return i*i -... ->>> print callback.foo(f) -110.0 ->>> print callback.foo(lambda i:1) -11.0 diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/common.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/common.f deleted file mode 100644 index b098ab20cf..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/common.f +++ /dev/null @@ -1,13 +0,0 @@ -C FILE: COMMON.F - SUBROUTINE FOO - INTEGER I,X - REAL A - COMMON /DATA/ I,X(4),A(2,3) - PRINT*, "I=",I - PRINT*, "X=[",X,"]" - PRINT*, "A=[" - PRINT*, "[",A(1,1),",",A(1,2),",",A(1,3),"]" - PRINT*, "[",A(2,1),",",A(2,2),",",A(2,3),"]" - PRINT*, "]" - END -C END OF COMMON.F diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/common_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/common_session.dat deleted file mode 100644 index 846fdaa076..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/common_session.dat +++ /dev/null @@ -1,27 +0,0 @@ ->>> import common ->>> print common.data.__doc__ -i - 'i'-scalar -x - 'i'-array(4) -a - 'f'-array(2,3) - ->>> common.data.i = 5 ->>> common.data.x[1] = 2 ->>> common.data.a = [[1,2,3],[4,5,6]] ->>> common.foo() - I= 5 - X=[ 0 2 0 0] - A=[ - [ 1., 2., 3.] - [ 4., 5., 6.] - ] ->>> common.data.a[1] = 45 ->>> common.foo() - I= 5 - X=[ 0 2 0 0] - A=[ - [ 1., 2., 3.] - [ 45., 45., 45.] - ] ->>> common.data.a # a is Fortran-contiguous -array([[ 1., 2., 3.], - [ 45., 45., 45.]],'f') diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/compile_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/compile_session.dat deleted file mode 100644 index 0d84081988..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/compile_session.dat +++ /dev/null @@ -1,11 +0,0 @@ ->>> import f2py2e ->>> fsource = ''' -... subroutine foo -... print*, "Hello world!" -... end -... ''' ->>> f2py2e.compile(fsource,modulename='hello',verbose=0) -0 ->>> import hello ->>> hello.foo() - Hello world! diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/default.css b/numpy-1.6.2/numpy/f2py/docs/usersguide/default.css deleted file mode 100644 index bb7226161d..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/default.css +++ /dev/null @@ -1,180 +0,0 @@ -/* -:Author: David Goodger -:Contact: goodger@users.sourceforge.net -:date: $Date: 2002/12/07 23:59:33 $ -:version: $Revision: 1.2 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ - -body { - background: #FFFFFF ; - color: #000000 -} - -a.footnote-reference { - font-size: smaller ; - vertical-align: super } - -a.target { - color: blue } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.first { - margin-top: 0 } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #ee9e9e } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.field-argument { - font-style: italic } - -span.interpreted { - font-family: sans-serif } - -span.option-argument { - font-style: italic } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: baseline } - -td.docinfo-name { - font-weight: bold ; - text-align: right } - -td.field-name { - font-weight: bold } diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/docutils.conf b/numpy-1.6.2/numpy/f2py/docs/usersguide/docutils.conf deleted file mode 100644 index b772fd1376..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/docutils.conf +++ /dev/null @@ -1,16 +0,0 @@ -[general] - -# These entries affect all processing: -#source-link: 1 -datestamp: %Y-%m-%d %H:%M UTC -generator: 1 - -# These entries affect HTML output: -#stylesheet-path: f2py_style.css -output-encoding: latin-1 - -# These entries affect reStructuredText-style PEPs: -#pep-template: pep-html-template -#pep-stylesheet-path: stylesheets/pep.css -#python-home: http://www.python.org -#no-random: 1 diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/extcallback.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/extcallback.f deleted file mode 100644 index 9a800628e0..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/extcallback.f +++ /dev/null @@ -1,14 +0,0 @@ - subroutine f1() - print *, "in f1, calling f2 twice.." - call f2() - call f2() - return - end - - subroutine f2() -cf2py intent(callback, hide) fpy - external fpy - print *, "in f2, calling f2py.." - call fpy() - return - end diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/extcallback_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/extcallback_session.dat deleted file mode 100644 index c22935ea0f..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/extcallback_session.dat +++ /dev/null @@ -1,19 +0,0 @@ ->>> import pfromf ->>> pfromf.f2() -Traceback (most recent call last): - File "", line 1, in ? -pfromf.error: Callback fpy not defined (as an argument or module pfromf attribute). - ->>> def f(): print "python f" -... ->>> pfromf.fpy = f ->>> pfromf.f2() - in f2, calling f2py.. -python f ->>> pfromf.f1() - in f1, calling f2 twice.. - in f2, calling f2py.. -python f - in f2, calling f2py.. -python f ->>> \ No newline at end of file diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/fib1.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/fib1.f deleted file mode 100644 index cfbb1eea0d..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/fib1.f +++ /dev/null @@ -1,18 +0,0 @@ -C FILE: FIB1.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB1.F diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/fib1.pyf b/numpy-1.6.2/numpy/f2py/docs/usersguide/fib1.pyf deleted file mode 100644 index 3d6cc0a548..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/fib1.pyf +++ /dev/null @@ -1,12 +0,0 @@ -! -*- f90 -*- -python module fib2 ! in - interface ! in :fib2 - subroutine fib(a,n) ! in :fib2:fib1.f - real*8 dimension(n) :: a - integer optional,check(len(a)>=n),depend(a) :: n=len(a) - end subroutine fib - end interface -end python module fib2 - -! This file was auto-generated with f2py (version:2.28.198-1366). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/fib2.pyf b/numpy-1.6.2/numpy/f2py/docs/usersguide/fib2.pyf deleted file mode 100644 index 4a5ae29f1e..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/fib2.pyf +++ /dev/null @@ -1,9 +0,0 @@ -! -*- f90 -*- -python module fib2 - interface - subroutine fib(a,n) - real*8 dimension(n),intent(out),depend(n) :: a - integer intent(in) :: n - end subroutine fib - end interface -end python module fib2 diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/fib3.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/fib3.f deleted file mode 100644 index 08b050cd26..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/fib3.f +++ /dev/null @@ -1,21 +0,0 @@ -C FILE: FIB3.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) -Cf2py intent(in) n -Cf2py intent(out) a -Cf2py depend(n) a - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB3.F diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/ftype.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/ftype.f deleted file mode 100644 index cabbb9e2d5..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/ftype.f +++ /dev/null @@ -1,9 +0,0 @@ -C FILE: FTYPE.F - SUBROUTINE FOO(N) - INTEGER N -Cf2py integer optional,intent(in) :: n = 13 - REAL A,X - COMMON /DATA/ A,X(3) - PRINT*, "IN FOO: N=",N," A=",A," X=[",X(1),X(2),X(3),"]" - END -C END OF FTYPE.F diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/ftype_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/ftype_session.dat deleted file mode 100644 index 01f9febaf4..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/ftype_session.dat +++ /dev/null @@ -1,21 +0,0 @@ ->>> import ftype ->>> print ftype.__doc__ -This module 'ftype' is auto-generated with f2py (version:2.28.198-1366). -Functions: - foo(n=13) -COMMON blocks: - /data/ a,x(3) -. ->>> type(ftype.foo),type(ftype.data) -(, ) ->>> ftype.foo() - IN FOO: N= 13 A= 0. X=[ 0. 0. 0.] ->>> ftype.data.a = 3 ->>> ftype.data.x = [1,2,3] ->>> ftype.foo() - IN FOO: N= 13 A= 3. X=[ 1. 2. 3.] ->>> ftype.data.x[1] = 45 ->>> ftype.foo(24) - IN FOO: N= 24 A= 3. X=[ 1. 45. 3.] ->>> ftype.data.x -array([ 1., 45., 3.],'f') diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/index.txt b/numpy-1.6.2/numpy/f2py/docs/usersguide/index.txt deleted file mode 100644 index 5a8d12c68e..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/index.txt +++ /dev/null @@ -1,1772 +0,0 @@ -.. -*- rest -*- - -////////////////////////////////////////////////////////////////////// - F2PY Users Guide and Reference Manual -////////////////////////////////////////////////////////////////////// - -:Author: Pearu Peterson -:Contact: pearu@cens.ioc.ee -:Web site: http://cens.ioc.ee/projects/f2py2e/ -:Date: $Date: 2005/04/02 10:03:26 $ -:Revision: $Revision: 1.27 $ - - -.. section-numbering:: - -.. Contents:: - - -================ - Introduction -================ - -The purpose of the F2PY_ --*Fortran to Python interface generator*-- -project is to provide a connection between Python and Fortran -languages. F2PY is a Python_ package (with a command line tool -``f2py`` and a module ``f2py2e``) that facilitates creating/building -Python C/API extension modules that make it possible - -* to call Fortran 77/90/95 external subroutines and Fortran 90/95 - module subroutines as well as C functions; -* to access Fortran 77 ``COMMON`` blocks and Fortran 90/95 module data, - including allocatable arrays - -from Python. See F2PY_ web site for more information and installation -instructions. - -====================================== - Three ways to wrap - getting started -====================================== - -Wrapping Fortran or C functions to Python using F2PY consists of the -following steps: - -* Creating the so-called signature file that contains descriptions of - wrappers to Fortran or C functions, also called as signatures of the - functions. In the case of Fortran routines, F2PY can create initial - signature file by scanning Fortran source codes and - catching all relevant information needed to create wrapper - functions. - -* Optionally, F2PY created signature files can be edited to optimize - wrappers functions, make them "smarter" and more "Pythonic". - -* F2PY reads a signature file and writes a Python C/API module containing - Fortran/C/Python bindings. - -* F2PY compiles all sources and builds an extension module containing - the wrappers. In building extension modules, F2PY uses - ``numpy_distutils`` that supports a number of Fortran 77/90/95 - compilers, including Gnu, Intel, - Sun Fortre, SGI MIPSpro, Absoft, NAG, Compaq etc. compilers. - -Depending on a particular situation, these steps can be carried out -either by just in one command or step-by-step, some steps can be -ommited or combined with others. - -Below I'll describe three typical approaches of using F2PY. -The following `example Fortran 77 code`__ will be used for -illustration: - -.. include:: fib1.f - :literal: - -__ fib1.f - -The quick way -============== - -The quickest way to wrap the Fortran subroutine ``FIB`` to Python is -to run - -:: - - f2py -c fib1.f -m fib1 - -This command builds (see ``-c`` flag, execute ``f2py`` without -arguments to see the explanation of command line options) an extension -module ``fib1.so`` (see ``-m`` flag) to the current directory. Now, in -Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: - - >>> import Numeric - >>> import fib1 - >>> print fib1.fib.__doc__ - fib - Function signature: - fib(a,[n]) - Required arguments: - a : input rank-1 array('d') with bounds (n) - Optional arguments: - n := len(a) input int - - >>> a=Numeric.zeros(8,'d') - >>> fib1.fib(a) - >>> print a - [ 0. 1. 1. 2. 3. 5. 8. 13.] - -.. topic:: Comments - - * Note that F2PY found that the second argument ``n`` is the - dimension of the first array argument ``a``. Since by default all - arguments are input-only arguments, F2PY concludes that ``n`` can - be optional with the default value ``len(a)``. - - * One can use different values for optional ``n``:: - - >>> a1=Numeric.zeros(8,'d') - >>> fib1.fib(a1,6) - >>> print a1 - [ 0. 1. 1. 2. 3. 5. 0. 0.] - - but an exception is raised when it is incompatible with the input - array ``a``:: - - >>> fib1.fib(a,10) - fib:n=10 - Traceback (most recent call last): - File "", line 1, in ? - fib.error: (len(a)>=n) failed for 1st keyword n - >>> - - This demonstrates one of the useful features in F2PY, that it, - F2PY implements basic compatibility checks between related - arguments in order to avoid any unexpected crashes. - - * When a Numeric array, that is Fortran contiguous and has a typecode - corresponding to presumed Fortran type, is used as an input array - argument, then its C pointer is directly passed to Fortran. - - Otherwise F2PY makes a contiguous copy (with a proper typecode) of - the input array and passes C pointer of the copy to Fortran - subroutine. As a result, any possible changes to the (copy of) - input array have no effect to the original argument, as - demonstrated below:: - - >>> a=Numeric.ones(8,'i') - >>> fib1.fib(a) - >>> print a - [1 1 1 1 1 1 1 1] - - Clearly, this is not an expected behaviour. The fact that the - above example worked with ``typecode='d'`` is considered - accidental. - - F2PY provides ``intent(inplace)`` attribute that would modify - the attributes of an input array so that any changes made by - Fortran routine will be effective also in input argument. For example, - if one specifies ``intent(inplace) a`` (see below, how), then - the example above would read: - - >>> a=Numeric.ones(8,'i') - >>> fib1.fib(a) - >>> print a - [ 0. 1. 1. 2. 3. 5. 8. 13.] - - However, the recommended way to get changes made by Fortran - subroutine back to python is to use ``intent(out)`` attribute. It - is more efficient and a cleaner solution. - - * The usage of ``fib1.fib`` in Python is very similar to using - ``FIB`` in Fortran. However, using *in situ* output arguments in - Python indicates a poor style as there is no safety mechanism - in Python with respect to wrong argument types. When using Fortran - or C, compilers naturally discover any type mismatches during - compile time but in Python the types must be checked in - runtime. So, using *in situ* output arguments in Python may cause - difficult to find bugs, not to mention that the codes will be less - readable when all required type checks are implemented. - - Though the demonstrated way of wrapping Fortran routines to Python - is very straightforward, it has several drawbacks (see the comments - above). These drawbacks are due to the fact that there is no way - that F2PY can determine what is the acctual intention of one or the - other argument, is it input or output argument, or both, or - something else. So, F2PY conservatively assumes that all arguments - are input arguments by default. - - However, there are ways (see below) how to "teach" F2PY about the - true intentions (among other things) of function arguments; and then - F2PY is able to generate more Pythonic (more explicit, easier to - use, and less error prone) wrappers to Fortran functions. - -The smart way -============== - -Let's apply the steps of wrapping Fortran functions to Python one by -one. - -* First, we create a signature file from ``fib1.f`` by running - - :: - - f2py fib1.f -m fib2 -h fib1.pyf - - The signature file is saved to ``fib1.pyf`` (see ``-h`` flag) and - its contents is shown below. - - .. include:: fib1.pyf - :literal: - -* Next, we'll teach F2PY that the argument ``n`` is a input argument - (use ``intent(in)`` attribute) and that the result, i.e. the - contents of ``a`` after calling Fortran function ``FIB``, should be - returned to Python (use ``intent(out)`` attribute). In addition, an - array ``a`` should be created dynamically using the size given by - the input argument ``n`` (use ``depend(n)`` attribute to indicate - dependence relation). - - The content of a modified version of ``fib1.pyf`` (saved as - ``fib2.pyf``) is as follows: - - .. include:: fib2.pyf - :literal: - -* And finally, we build the extension module by running - - :: - - f2py -c fib2.pyf fib1.f - -In Python:: - - >>> import fib2 - >>> print fib2.fib.__doc__ - fib - Function signature: - a = fib(n) - Required arguments: - n : input int - Return objects: - a : rank-1 array('d') with bounds (n) - - >>> print fib2.fib(8) - [ 0. 1. 1. 2. 3. 5. 8. 13.] - -.. topic:: Comments - - * Clearly, the signature of ``fib2.fib`` now corresponds to the - intention of Fortran subroutine ``FIB`` more closely: given the - number ``n``, ``fib2.fib`` returns the first ``n`` Fibonacci numbers - as a Numeric array. Also, the new Python signature ``fib2.fib`` - rules out any surprises that we experienced with ``fib1.fib``. - - * Note that by default using single ``intent(out)`` also implies - ``intent(hide)``. Argument that has ``intent(hide)`` attribute - specified, will not be listed in the argument list of a wrapper - function. - -The quick and smart way -======================== - -The "smart way" of wrapping Fortran functions, as explained above, is -suitable for wrapping (e.g. third party) Fortran codes for which -modifications to their source codes are not desirable nor even -possible. - -However, if editing Fortran codes is acceptable, then the generation -of an intermediate signature file can be skipped in most -cases. Namely, F2PY specific attributes can be inserted directly to -Fortran source codes using the so-called F2PY directive. A F2PY -directive defines special comment lines (starting with ``Cf2py``, for -example) which are ignored by Fortran compilers but F2PY interprets -them as normal lines. - -Here is shown a `modified version of the example Fortran code`__, saved -as ``fib3.f``: - -.. include:: fib3.f - :literal: - -__ fib3.f - -Building the extension module can be now carried out in one command:: - - f2py -c -m fib3 fib3.f - -Notice that the resulting wrapper to ``FIB`` is as "smart" as in -previous case:: - - >>> import fib3 - >>> print fib3.fib.__doc__ - fib - Function signature: - a = fib(n) - Required arguments: - n : input int - Return objects: - a : rank-1 array('d') with bounds (n) - - >>> print fib3.fib(8) - [ 0. 1. 1. 2. 3. 5. 8. 13.] - - -================== - Signature file -================== - -The syntax specification for signature files (.pyf files) is borrowed -from the Fortran 90/95 language specification. Almost all Fortran -90/95 standard constructs are understood, both in free and fixed -format (recall that Fortran 77 is a subset of Fortran 90/95). F2PY -introduces also some extensions to Fortran 90/95 language -specification that help designing Fortran to Python interface, make it -more "Pythonic". - -Signature files may contain arbitrary Fortran code (so that Fortran -codes can be considered as signature files). F2PY silently ignores -Fortran constructs that are irrelevant for creating the interface. -However, this includes also syntax errors. So, be careful not making -ones;-). - -In general, the contents of signature files is case-sensitive. When -scanning Fortran codes and writing a signature file, F2PY lowers all -cases automatically except in multi-line blocks or when ``--no-lower`` -option is used. - -The syntax of signature files is overvied below. - -Python module block -===================== - -A signature file may contain one (recommended) or more ``python -module`` blocks. ``python module`` block describes the contents of -a Python/C extension module ``module.c`` that F2PY -generates. - -Exception: if ```` contains a substring ``__user__``, then -the corresponding ``python module`` block describes the signatures of -so-called call-back functions (see `Call-back arguments`_). - -A ``python module`` block has the following structure:: - - python module - []... - [ - interface - - - - end [interface] - ]... - [ - interface - module - [] - [] - end [module []] - end [interface] - ]... - end [python module []] - -Here brackets ``[]`` indicate a optional part, dots ``...`` indicate -one or more of a previous part. So, ``[]...`` reads zero or more of a -previous part. - - -Fortran/C routine signatures -============================= - -The signature of a Fortran routine has the following structure:: - - [] function | subroutine \ - [ ( [] ) ] [ result ( ) ] - [] - [] - [] - [] - [] - end [ function | subroutine [] ] - -From a Fortran routine signature F2PY generates a Python/C extension -function that has the following signature:: - - def ([,]): - ... - return - -The signature of a Fortran block data has the following structure:: - - block data [ ] - [] - [] - [] - [] - [] - end [ block data [] ] - -Type declarations -------------------- - - The definition of the ```` part - is - - :: - - [ [] :: ] - - where - - :: - - := byte | character [] - | complex [] | real [] - | double complex | double precision - | integer [] | logical [] - - := * - | ( [len=] [ , [kind=] ] ) - | ( kind= [ , len= ] ) - := * | ( [kind=] ) - - := [ [ * ] [ ( ) ] - | [ ( ) ] * ] - | [ / / | = ] \ - [ , ] - - and - - + ```` is a comma separated list of attributes_; - - + ```` is a comma separated list of dimension bounds; - - + ```` is a `C expression`__. - - + ```` may be negative integer for ``integer`` type - specifications. In such cases ``integer*`` represents - unsigned C integers. - -__ `C expressions`_ - - If an argument has no ````, its type is - determined by applying ``implicit`` rules to its name. - - -Statements ------------- - -Attribute statements: - - The ```` is - ```` without ````. - In addition, in an attribute statement one cannot use other - attributes, also ```` can be only a list of names. - -Use statements: - - The definition of the ```` part is - - :: - - use [ , | , ONLY : ] - - where - - :: - - := => [ , ] - - Currently F2PY uses ``use`` statement only for linking call-back - modules and ``external`` arguments (call-back functions), see - `Call-back arguments`_. - -Common block statements: - - The definition of the ```` part is - - :: - - common / / - - where - - :: - - := [ ( ) ] [ , ] - - One ``python module`` block should not contain two or more - ``common`` blocks with the same name. Otherwise, the latter ones are - ignored. The types of variables in ```` are defined - using ````. Note that the corresponding - ```` may contain array specifications; - then you don't need to specify these in ````. - -Other statements: - - The ```` part refers to any other Fortran language - constructs that are not described above. F2PY ignores most of them - except - - + ``call`` statements and function calls of ``external`` arguments - (`more details`__?); - -__ external_ - - + ``include`` statements - - :: - - include '' - include "" - - If a file ```` does not exist, the ``include`` - statement is ignored. Otherwise, the file ```` is - included to a signature file. ``include`` statements can be used - in any part of a signature file, also outside the Fortran/C - routine signature blocks. - - + ``implicit`` statements - - :: - - implicit none - implicit - - where - - :: - - := ( ) - - Implicit rules are used to deterimine the type specification of - a variable (from the first-letter of its name) if the variable - is not defined using ````. Default - implicit rule is given by - - :: - - implicit real (a-h,o-z,$_), integer (i-m) - - + ``entry`` statements - - :: - - entry [([])] - - F2PY generates wrappers to all entry names using the signature - of the routine block. - - Tip: ``entry`` statement can be used to describe the signature - of an arbitrary routine allowing F2PY to generate a number of - wrappers from only one routine block signature. There are few - restrictions while doing this: ``fortranname`` cannot be used, - ``callstatement`` and ``callprotoargument`` can be used only if - they are valid for all entry routines, etc. - - In addition, F2PY introduces the following statements: - - + ``threadsafe`` - Use ``Py_BEGIN_ALLOW_THREADS .. Py_END_ALLOW_THREADS`` block - around the call to Fortran/C function. - - + ``callstatement `` - Replace F2PY generated call statement to Fortran/C function with - ````. The wrapped Fortran/C function - is available as ``(*f2py_func)``. To raise an exception, set - ``f2py_success = 0`` in ````. - - + ``callprotoargument `` - When ``callstatement`` statement is used then F2PY may not - generate proper prototypes for Fortran/C functions (because - ```` may contain any function calls and F2PY has no way - to determine what should be the proper prototype). With this - statement you can explicitely specify the arguments of the - corresponding prototype:: - - extern FUNC_F(,)(); - - + ``fortranname []`` - You can use arbitrary ```` for a given Fortran/C - function. Then you have to specify - ```` with this statement. - - If ``fortranname`` statement is used without - ```` then a dummy wrapper is - generated. - - + ``usercode `` - When used inside ``python module`` block, then given C code - will be inserted to generated C/API source just before - wrapper function definitions. Here you can define arbitrary - C functions to be used in initialization of optional arguments, - for example. If ``usercode`` is used twise inside ``python - module`` block then the second multi-line block is inserted - after the definition of external routines. - - When used inside ````, then given C code will - be inserted to the corresponding wrapper function just after - declaring variables but before any C statements. So, ``usercode`` - follow-up can contain both declarations and C statements. - - When used inside the first ``interface`` block, then given C - code will be inserted at the end of the initialization - function of the extension module. Here you can modify extension - modules dictionary. For example, for defining additional - variables etc. - - + ``pymethoddef `` - Multiline block will be inserted to the definition of - module methods ``PyMethodDef``-array. It must be a - comma-separated list of C arrays (see `Extending and Embedding`__ - Python documentation for details). - ``pymethoddef`` statement can be used only inside - ``python module`` block. - - __ http://www.python.org/doc/current/ext/ext.html - -Attributes ------------- - -The following attributes are used by F2PY: - -``optional`` - The corresponding argument is moved to the end of ```` list. A default value for an optional argument can be - specified ````, see ``entitydecl`` definition. Note that - the default value must be given as a valid C expression. - - Note that whenever ```` is used, ``optional`` attribute - is set automatically by F2PY. - - For an optional array argument, all its dimensions must be bounded. - -``required`` - The corresponding argument is considered as a required one. This is - default. You need to specify ``required`` only if there is a need to - disable automatic ``optional`` setting when ```` is used. - - If Python ``None`` object is used as an required argument, the - argument is treated as optional. That is, in the case of array - argument, the memory is allocated. And if ```` is given, - the corresponding initialization is carried out. - -``dimension()`` - The corresponding variable is considered as an array with given - dimensions in ````. - -``intent()`` - This specifies the "intention" of the corresponding - argument. ```` is a comma separated list of the - following keys: - - + ``in`` - The argument is considered as an input-only argument. It means - that the value of the argument is passed to Fortran/C function and - that function is expected not to change the value of an argument. - - + ``inout`` - The argument is considered as an input/output or *in situ* - output argument. ``intent(inout)`` arguments can be only - "contiguous" Numeric arrays with proper type and size. Here - "contiguous" can be either in Fortran or C sense. The latter one - coincides with the contiguous concept used in Numeric and is - effective only if ``intent(c)`` is used. Fortran-contiguousness - is assumed by default. - - Using ``intent(inout)`` is generally not recommended, use - ``intent(in,out)`` instead. See also ``intent(inplace)`` attribute. - - + ``inplace`` - The argument is considered as an input/output or *in situ* - output argument. ``intent(inplace)`` arguments must be - Numeric arrays with proper size. If the type of an array is - not "proper" or the array is non-contiguous then the array - will be changed in-place to fix the type and make it contiguous. - - Using ``intent(inplace)`` is generally not recommended either. - For example, when slices have been taken from an - ``intent(inplace)`` argument then after in-place changes, - slices data pointers may point to unallocated memory area. - - + ``out`` - The argument is considered as an return variable. It is appended - to the ```` list. Using ``intent(out)`` - sets ``intent(hide)`` automatically, unless also - ``intent(in)`` or ``intent(inout)`` were used. - - By default, returned multidimensional arrays are - Fortran-contiguous. If ``intent(c)`` is used, then returned - multi-dimensional arrays are C-contiguous. - - + ``hide`` - The argument is removed from the list of required or optional - arguments. Typically ``intent(hide)`` is used with ``intent(out)`` - or when ```` completely determines the value of the - argument like in the following example:: - - integer intent(hide),depend(a) :: n = len(a) - real intent(in),dimension(n) :: a - - + ``c`` - The argument is treated as a C scalar or C array argument. In - the case of a scalar argument, its value is passed to C function - as a C scalar argument (recall that Fortran scalar arguments are - actually C pointer arguments). In the case of an array - argument, the wrapper function is assumed to treat - multi-dimensional arrays as C-contiguous arrays. - - There is no need to use ``intent(c)`` for one-dimensional - arrays, no matter if the wrapped function is either a Fortran or - a C function. This is because the concepts of Fortran- and - C-contiguousness overlap in one-dimensional cases. - - If ``intent(c)`` is used as an statement but without entity - declaration list, then F2PY adds ``intent(c)`` attibute to all - arguments. - - Also, when wrapping C functions, one must use ``intent(c)`` - attribute for ```` in order to disable Fortran - specific ``F_FUNC(..,..)`` macros. - - + ``cache`` - The argument is treated as a junk of memory. No Fortran nor C - contiguousness checks are carried out. Using ``intent(cache)`` - makes sense only for array arguments, also in connection with - ``intent(hide)`` or ``optional`` attributes. - - + ``copy`` - Ensure that the original contents of ``intent(in)`` argument is - preserved. Typically used in connection with ``intent(in,out)`` - attribute. F2PY creates an optional argument - ``overwrite_`` with the default value ``0``. - - + ``overwrite`` - The original contents of the ``intent(in)`` argument may be - altered by the Fortran/C function. F2PY creates an optional - argument ``overwrite_`` with the default value - ``1``. - - + ``out=`` - Replace the return name with ```` in the ``__doc__`` - string of a wrapper function. - - + ``callback`` - Construct an external function suitable for calling Python function - from Fortran. ``intent(callback)`` must be specified before the - corresponding ``external`` statement. If 'argument' is not in - argument list then it will be added to Python wrapper but only - initializing external function. - - Use ``intent(callback)`` in situations where a Fortran/C code - assumes that a user implements a function with given prototype - and links it to an executable. Don't use ``intent(callback)`` - if function appears in the argument list of a Fortran routine. - - With ``intent(hide)`` or ``optional`` attributes specified and - using a wrapper function without specifying the callback argument - in argument list then call-back function is looked in the - namespace of F2PY generated extension module where it can be - set as a module attribute by a user. - - + ``aux`` - Define auxiliary C variable in F2PY generated wrapper function. - Useful to save parameter values so that they can be accessed - in initialization expression of other variables. Note that - ``intent(aux)`` silently implies ``intent(c)``. - - The following rules apply: - - + If no ``intent(in | inout | out | hide)`` is specified, - ``intent(in)`` is assumed. - + ``intent(in,inout)`` is ``intent(in)``. - + ``intent(in,hide)`` or ``intent(inout,hide)`` is - ``intent(hide)``. - + ``intent(out)`` is ``intent(out,hide)`` unless ``intent(in)`` or - ``intent(inout)`` is specified. - + If ``intent(copy)`` or ``intent(overwrite)`` is used, then an - additional optional argument is introduced with a name - ``overwrite_`` and a default value 0 or 1, respectively. - + ``intent(inout,inplace)`` is ``intent(inplace)``. - + ``intent(in,inplace)`` is ``intent(inplace)``. - + ``intent(hide)`` disables ``optional`` and ``required``. - -``check([])`` - Perform consistency check of arguments by evaluating - ````; if ```` returns 0, an exception - is raised. - - If ``check(..)`` is not used then F2PY generates few standard checks - (e.g. in a case of an array argument, check for the proper shape - and size) automatically. Use ``check()`` to disable checks generated - by F2PY. - -``depend([])`` - This declares that the corresponding argument depends on the values - of variables in the list ````. For example, ```` - may use the values of other arguments. Using information given by - ``depend(..)`` attributes, F2PY ensures that arguments are - initialized in a proper order. If ``depend(..)`` attribute is not - used then F2PY determines dependence relations automatically. Use - ``depend()`` to disable dependence relations generated by F2PY. - - When you edit dependence relations that were initially generated by - F2PY, be careful not to break the dependence relations of other - relevant variables. Another thing to watch out is cyclic - dependencies. F2PY is able to detect cyclic dependencies - when constructing wrappers and it complains if any are found. - -``allocatable`` - The corresponding variable is Fortran 90 allocatable array defined - as Fortran 90 module data. - -.. _external: - -``external`` - The corresponding argument is a function provided by user. The - signature of this so-called call-back function can be defined - - - in ``__user__`` module block, - - or by demonstrative (or real, if the signature file is a real Fortran - code) call in the ```` block. - - For example, F2PY generates from - - :: - - external cb_sub, cb_fun - integer n - real a(n),r - call cb_sub(a,n) - r = cb_fun(4) - - the following call-back signatures:: - - subroutine cb_sub(a,n) - real dimension(n) :: a - integer optional,check(len(a)>=n),depend(a) :: n=len(a) - end subroutine cb_sub - function cb_fun(e_4_e) result (r) - integer :: e_4_e - real :: r - end function cb_fun - - The corresponding user-provided Python function are then:: - - def cb_sub(a,[n]): - ... - return - def cb_fun(e_4_e): - ... - return r - - See also ``intent(callback)`` attribute. - -``parameter`` - The corresponding variable is a parameter and it must have a fixed - value. F2PY replaces all parameter occurrences by their - corresponding values. - -Extensions -============ - -F2PY directives ------------------ - -The so-called F2PY directives allow using F2PY signature file -constructs also in Fortran 77/90 source codes. With this feature you -can skip (almost) completely intermediate signature file generations -and apply F2PY directly to Fortran source codes. - -F2PY directive has the following form:: - - f2py ... - -where allowed comment characters for fixed and free format Fortran -codes are ``cC*!#`` and ``!``, respectively. Everything that follows -``f2py`` is ignored by a compiler but read by F2PY as a -normal Fortran (non-comment) line: - - When F2PY finds a line with F2PY directive, the directive is first - replaced by 5 spaces and then the line is reread. - -For fixed format Fortran codes, ```` must be at the -first column of a file, of course. For free format Fortran codes, -F2PY directives can appear anywhere in a file. - -C expressions --------------- - -C expressions are used in the following parts of signature files: - -* ```` of variable initialization; -* ```` of the ``check`` attribute; -* `` of the ``dimension`` attribute; -* ``callstatement`` statement, here also a C multi-line block can be used. - -A C expression may contain: - -* standard C constructs; -* functions from ``math.h`` and ``Python.h``; -* variables from the argument list, presumably initialized before - according to given dependence relations; -* the following CPP macros: - - ``rank()`` - Returns the rank of an array ````. - ``shape(,)`` - Returns the ````-th dimension of an array ````. - ``len()`` - Returns the lenght of an array ````. - ``size()`` - Returns the size of an array ````. - ``slen()`` - Returns the length of a string ````. - -For initializing an array ````, F2PY generates a loop over -all indices and dimensions that executes the following -pseudo-statement:: - - (_i[0],_i[1],...) = ; - -where ``_i[]`` refers to the ````-th index value and that runs -from ``0`` to ``shape(,)-1``. - -For example, a function ``myrange(n)`` generated from the following -signature - -:: - - subroutine myrange(a,n) - fortranname ! myrange is a dummy wrapper - integer intent(in) :: n - real*8 intent(c,out),dimension(n),depend(n) :: a = _i[0] - end subroutine myrange - -is equivalent to ``Numeric.arange(n,typecode='d')``. - -.. topic:: Warning! - - F2PY may lower cases also in C expressions when scanning Fortran codes - (see ``--[no]-lower`` option). - -Multi-line blocks ------------------- - -A multi-line block starts with ``'''`` (triple single-quotes) and ends -with ``'''`` in some *strictly* subsequent line. Multi-line blocks can -be used only within .pyf files. The contents of a multi-line block can -be arbitrary (except that it cannot contain ``'''``) and no -transformations (e.g. lowering cases) are applied to it. - -Currently, multi-line blocks can be used in the following constructs: - -+ as a C expression of the ``callstatement`` statement; - -+ as a C type specification of the ``callprotoargument`` statement; - -+ as a C code block of the ``usercode`` statement; - -+ as a list of C arrays of the ``pymethoddef`` statement; - -+ as documentation string. - -================================== -Using F2PY bindings in Python -================================== - -All wrappers (to Fortran/C routines or to common blocks or to Fortran -90 module data) generated by F2PY are exposed to Python as ``fortran`` -type objects. Routine wrappers are callable ``fortran`` type objects -while wrappers to Fortran data have attributes referring to data -objects. - -All ``fortran`` type object have attribute ``_cpointer`` that contains -CObject referring to the C pointer of the corresponding Fortran/C -function or variable in C level. Such CObjects can be used as an -callback argument of F2PY generated functions to bypass Python C/API -layer of calling Python functions from Fortran or C when the -computational part of such functions is implemented in C or Fortran -and wrapped with F2PY (or any other tool capable of providing CObject -of a function). - -.. topic:: Example - - Consider a `Fortran 77 file`__ ``ftype.f``: - - .. include:: ftype.f - :literal: - - and build a wrapper using:: - - f2py -c ftype.f -m ftype - - __ ftype.f - - In Python: - - .. include:: ftype_session.dat - :literal: - - -Scalar arguments -================= - -In general, a scalar argument of a F2PY generated wrapper function can -be ordinary Python scalar (integer, float, complex number) as well as -an arbitrary sequence object (list, tuple, array, string) of -scalars. In the latter case, the first element of the sequence object -is passed to Fortran routine as a scalar argument. - -Note that when type-casting is required and there is possible loss of -information (e.g. when type-casting float to integer or complex to -float), F2PY does not raise any exception. In complex to real -type-casting only the real part of a complex number is used. - -``intent(inout)`` scalar arguments are assumed to be array objects in -order to *in situ* changes to be effective. It is recommended to use -arrays with proper type but also other types work. - -.. topic:: Example - - Consider the following `Fortran 77 code`__: - - .. include:: scalar.f - :literal: - - and wrap it using ``f2py -c -m scalar scalar.f``. - - __ scalar.f - - In Python: - - .. include:: scalar_session.dat - :literal: - - -String arguments -================= - -F2PY generated wrapper functions accept (almost) any Python object as -a string argument, ``str`` is applied for non-string objects. -Exceptions are Numeric arrays that must have type code ``'c'`` or -``'1'`` when used as string arguments. - -A string can have arbitrary length when using it as a string argument -to F2PY generated wrapper function. If the length is greater than -expected, the string is truncated. If the length is smaller that -expected, additional memory is allocated and filled with ``\0``. - -Because Python strings are immutable, an ``intent(inout)`` argument -expects an array version of a string in order to *in situ* changes to -be effective. - -.. topic:: Example - - Consider the following `Fortran 77 code`__: - - .. include:: string.f - :literal: - - and wrap it using ``f2py -c -m mystring string.f``. - - __ string.f - - Python session: - - .. include:: string_session.dat - :literal: - - -Array arguments -================ - -In general, array arguments of F2PY generated wrapper functions accept -arbitrary sequences that can be transformed to Numeric array objects. -An exception is ``intent(inout)`` array arguments that always must be -proper-contiguous and have proper type, otherwise an exception is -raised. Another exception is ``intent(inplace)`` array arguments that -attributes will be changed in-situ if the argument has different type -than expected (see ``intent(inplace)`` attribute for more -information). - -In general, if a Numeric array is proper-contiguous and has a proper -type then it is directly passed to wrapped Fortran/C function. -Otherwise, an element-wise copy of an input array is made and the -copy, being proper-contiguous and with proper type, is used as an -array argument. - -There are two types of proper-contiguous Numeric arrays: - -* Fortran-contiguous arrays when data is stored column-wise, - i.e. indexing of data as stored in memory starts from the lowest - dimension; -* C-contiguous or simply contiguous arrays when data is stored - row-wise, i.e. indexing of data as stored in memory starts from the - highest dimension. - -For one-dimensional arrays these notions coincide. - -For example, an 2x2 array ``A`` is Fortran-contiguous if its elements -are stored in memory in the following order:: - - A[0,0] A[1,0] A[0,1] A[1,1] - -and C-contiguous if the order is as follows:: - - A[0,0] A[0,1] A[1,0] A[1,1] - -To test whether an array is C-contiguous, use ``.iscontiguous()`` -method of Numeric arrays. To test for Fortran-contiguousness, all -F2PY generated extension modules provide a function -``has_column_major_storage()``. This function is equivalent to -``Numeric.transpose().iscontiguous()`` but more efficient. - -Usually there is no need to worry about how the arrays are stored in -memory and whether the wrapped functions, being either Fortran or C -functions, assume one or another storage order. F2PY automatically -ensures that wrapped functions get arguments with proper storage -order; the corresponding algorithm is designed to make copies of -arrays only when absolutely necessary. However, when dealing with very -large multi-dimensional input arrays with sizes close to the size of -the physical memory in your computer, then a care must be taken to use -always proper-contiguous and proper type arguments. - -To transform input arrays to column major storage order before passing -them to Fortran routines, use a function -``as_column_major_storage()`` that is provided by all F2PY -generated extension modules. - -.. topic:: Example - - Consider `Fortran 77 code`__: - - .. include:: array.f - :literal: - - and wrap it using ``f2py -c -m arr array.f -DF2PY_REPORT_ON_ARRAY_COPY=1``. - - __ array.f - - In Python: - - .. include:: array_session.dat - :literal: - -Call-back arguments -==================== - -F2PY supports calling Python functions from Fortran or C codes. - - -.. topic:: Example - - Consider the following `Fortran 77 code`__ - - .. include:: callback.f - :literal: - - and wrap it using ``f2py -c -m callback callback.f``. - - __ callback.f - - In Python: - - .. include:: callback_session.dat - :literal: - -In the above example F2PY was able to guess accurately the signature -of a call-back function. However, sometimes F2PY cannot establish the -signature as one would wish and then the signature of a call-back -function must be modified in the signature file manually. Namely, -signature files may contain special modules (the names of such modules -contain a substring ``__user__``) that collect various signatures of -call-back functions. Callback arguments in routine signatures have -attribute ``external`` (see also ``intent(callback)`` attribute). To -relate a callback argument and its signature in ``__user__`` module -block, use ``use`` statement as illustrated below. The same signature -of a callback argument can be referred in different routine -signatures. - -.. topic:: Example - - We use the same `Fortran 77 code`__ as in previous example but now - we'll pretend that F2PY was not able to guess the signatures of - call-back arguments correctly. First, we create an initial signature - file ``callback2.pyf`` using F2PY:: - - f2py -m callback2 -h callback2.pyf callback.f - - Then modify it as follows - - .. include:: callback2.pyf - :literal: - - Finally, build the extension module using:: - - f2py -c callback2.pyf callback.f - - An example Python session would be identical to the previous example - except that argument names would differ. - - __ callback.f - -Sometimes a Fortran package may require that users provide routines -that the package will use. F2PY can construct an interface to such -routines so that Python functions could be called from Fortran. - -.. topic:: Example - - Consider the following `Fortran 77 subroutine`__ that takes an array - and applies a function ``func`` to its elements. - - .. include:: calculate.f - :literal: - - __ calculate.f - - It is expected that function ``func`` has been defined - externally. In order to use a Python function as ``func``, it must - have an attribute ``intent(callback)`` (it must be specified before - the ``external`` statement). - - Finally, build an extension module using:: - - f2py -c -m foo calculate.f - - In Python: - - .. include:: calculate_session.dat - :literal: - -The function is included as an argument to the python function call to -the FORTRAN subroutine eventhough it was NOT in the FORTRAN subroutine argument -list. The "external" refers to the C function generated by f2py, not the python -function itself. The python function must be supplied to the C function. - -The callback function may also be explicitly set in the module. -Then it is not necessary to pass the function in the argument list to -the FORTRAN function. This may be desired if the FORTRAN function calling -the python callback function is itself called by another FORTRAN function. - -.. topic:: Example - - Consider the following `Fortran 77 subroutine`__. - - .. include:: extcallback.f - :literal: - - __ extcallback.f - - and wrap it using ``f2py -c -m pfromf extcallback.f``. - - In Python: - - .. include:: extcallback_session.dat - :literal: - -Resolving arguments to call-back functions ------------------------------------------- - -F2PY generated interface is very flexible with respect to call-back -arguments. For each call-back argument an additional optional -argument ``_extra_args`` is introduced by F2PY. This argument -can be used to pass extra arguments to user provided call-back -arguments. - -If a F2PY generated wrapper function expects the following call-back -argument:: - - def fun(a_1,...,a_n): - ... - return x_1,...,x_k - -but the following Python function - -:: - - def gun(b_1,...,b_m): - ... - return y_1,...,y_l - -is provided by an user, and in addition, - -:: - - fun_extra_args = (e_1,...,e_p) - -is used, then the following rules are applied when a Fortran or C -function calls the call-back argument ``gun``: - -* If ``p==0`` then ``gun(a_1,...,a_q)`` is called, here - ``q=min(m,n)``. -* If ``n+p<=m`` then ``gun(a_1,...,a_n,e_1,...,e_p)`` is called. -* If ``p<=mm`` then ``gun(e_1,...,e_m)`` is called. -* If ``n+p`` is less than the number of required arguments to ``gun`` - then an exception is raised. - -The function ``gun`` may return any number of objects as a tuple. Then -following rules are applied: - -* If ``kl``, then only ``x_1,...,x_l`` are set. - - - -Common blocks -============== - -F2PY generates wrappers to ``common`` blocks defined in a routine -signature block. Common blocks are visible by all Fortran codes linked -with the current extension module, but not to other extension modules -(this restriction is due to how Python imports shared libraries). In -Python, the F2PY wrappers to ``common`` blocks are ``fortran`` type -objects that have (dynamic) attributes related to data members of -common blocks. When accessed, these attributes return as Numeric array -objects (multi-dimensional arrays are Fortran-contiguous) that -directly link to data members in common blocks. Data members can be -changed by direct assignment or by in-place changes to the -corresponding array objects. - -.. topic:: Example - - Consider the following `Fortran 77 code`__ - - .. include:: common.f - :literal: - - and wrap it using ``f2py -c -m common common.f``. - - __ common.f - - In Python: - - .. include:: common_session.dat - :literal: - -Fortran 90 module data -======================= - -The F2PY interface to Fortran 90 module data is similar to Fortran 77 -common blocks. - -.. topic:: Example - - Consider the following `Fortran 90 code`__ - - .. include:: moddata.f90 - :literal: - - and wrap it using ``f2py -c -m moddata moddata.f90``. - - __ moddata.f90 - - In Python: - - .. include:: moddata_session.dat - :literal: - -Allocatable arrays -------------------- - -F2PY has basic support for Fortran 90 module allocatable arrays. - -.. topic:: Example - - Consider the following `Fortran 90 code`__ - - .. include:: allocarr.f90 - :literal: - - and wrap it using ``f2py -c -m allocarr allocarr.f90``. - - __ allocarr.f90 - - In Python: - - .. include:: allocarr_session.dat - :literal: - - -=========== -Using F2PY -=========== - -F2PY can be used either as a command line tool ``f2py`` or as a Python -module ``f2py2e``. - -Command ``f2py`` -================= - -When used as a command line tool, ``f2py`` has three major modes, -distinguished by the usage of ``-c`` and ``-h`` switches: - -1. To scan Fortran sources and generate a signature file, use - - :: - - f2py -h \ - [[ only: : ] \ - [ skip: : ]]... \ - [ ...] - - Note that a Fortran source file can contain many routines, and not - necessarily all routines are needed to be used from Python. So, you - can either specify which routines should be wrapped (in ``only: .. :`` - part) or which routines F2PY should ignored (in ``skip: .. :`` part). - - If ```` is specified as ``stdout`` then signatures - are send to standard output instead of a file. - - Among other options (see below), the following options can be used - in this mode: - - ``--overwrite-signature`` - Overwrite existing signature file. - -2. To construct an extension module, use - - :: - - f2py \ - [[ only: : ] \ - [ skip: : ]]... \ - [ ...] - - The constructed extension module is saved as - ``module.c`` to the current directory. - - Here ```` may also contain signature files. - Among other options (see below), the following options can be used - in this mode: - - ``--debug-capi`` - Add debugging hooks to the extension module. When using this - extension module, various information about the wrapper is printed - to standard output, for example, the values of variables, the - steps taken, etc. - - ``-include''`` - Add a CPP ``#include`` statement to the extension module source. - ```` should be given in one of the following forms:: - - "filename.ext" - - - The include statement is inserted just before the wrapper - functions. This feature enables using arbitrary C functions - (defined in ````) in F2PY generated wrappers. - - This option is deprecated. Use ``usercode`` statement to specify - C codelets directly in signature filess - - - ``--[no-]wrap-functions`` - - Create Fortran subroutine wrappers to Fortran functions. - ``--wrap-functions`` is default because it ensures maximum - portability and compiler independence. - - ``--include-paths ::..`` - Search include files from given directories. - - ``--help-link []`` - List system resources found by ``numpy_distutils/system_info.py``. - For example, try ``f2py --help-link lapack_opt``. - -3. To build an extension module, use - - :: - - f2py -c \ - [[ only: : ] \ - [ skip: : ]]... \ - [ ] [ <.o, .a, .so files> ] - - If ```` contains a signature file, then a source for - an extension module is constructed, all Fortran and C sources are - compiled, and finally all object and library files are linked to the - extension module ``.so`` which is saved into the current - directory. - - If ```` does not contain a signature file, then an - extension module is constructed by scanning all Fortran source codes - for routine signatures. - - Among other options (see below) and options described in previous - mode, the following options can be used in this mode: - - ``--help-fcompiler`` - List available Fortran compilers. - ``--help-compiler`` [depreciated] - List available Fortran compilers. - ``--fcompiler=`` - Specify Fortran compiler type by vendor. - ``--f77exec=`` - Specify the path to F77 compiler - ``--fcompiler-exec=`` [depreciated] - Specify the path to F77 compiler - ``--f90exec=`` - Specify the path to F90 compiler - ``--f90compiler-exec=`` [depreciated] - Specify the path to F90 compiler - - ``--f77flags=`` - Specify F77 compiler flags - ``--f90flags=`` - Specify F90 compiler flags - ``--opt=`` - Specify optimization flags - ``--arch=`` - Specify architecture specific optimization flags - ``--noopt`` - Compile without optimization - ``--noarch`` - Compile without arch-dependent optimization - ``--debug`` - Compile with debugging information - - ``-l`` - Use the library ```` when linking. - ``-D[=]`` - Define macro ```` as ````. - ``-U`` - Define macro ```` - ``-I

    `` - Append directory ```` to the list of directories searched for - include files. - ``-L`` - Add directory ```` to the list of directories to be searched - for ``-l``. - - ``link-`` - - Link extension module with as defined by - ``numpy_distutils/system_info.py``. E.g. to link with optimized - LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use - ``--link-lapack_opt``. See also ``--help-link`` switch. - - When building an extension module, a combination of the following - macros may be required for non-gcc Fortran compilers:: - - -DPREPEND_FORTRAN - -DNO_APPEND_FORTRAN - -DUPPERCASE_FORTRAN - - To test the performance of F2PY generated interfaces, use - ``-DF2PY_REPORT_ATEXIT``. Then a report of various timings is - printed out at the exit of Python. This feature may not work on - all platforms, currently only Linux platform is supported. - - To see whether F2PY generated interface performs copies of array - arguments, use ``-DF2PY_REPORT_ON_ARRAY_COPY=``. When the size - of an array argument is larger than ````, a message about - the coping is sent to ``stderr``. - -Other options: - -``-m `` - Name of an extension module. Default is ``untitled``. Don't use this option - if a signature file (*.pyf) is used. -``--[no-]lower`` - Do [not] lower the cases in ````. By default, - ``--lower`` is assumed with ``-h`` switch, and ``--no-lower`` - without the ``-h`` switch. -``--build-dir `` - All F2PY generated files are created in ````. Default is - ``tempfile.mktemp()``. -``--quiet`` - Run quietly. -``--verbose`` - Run with extra verbosity. -``-v`` - Print f2py version ID and exit. - -Execute ``f2py`` without any options to get an up-to-date list of -available options. - -Python module ``f2py2e`` -========================= - -.. topic:: Warning - - The current Python interface to ``f2py2e`` module is not mature and - may change in future depending on users needs. - -The following functions are provided by the ``f2py2e`` module: - -``run_main()`` - Equivalent to running:: - - f2py - - where ``=string.join(,' ')``, but in Python. Unless - ``-h`` is used, this function returns a dictionary containing - information on generated modules and their dependencies on source - files. For example, the command ``f2py -m scalar scalar.f`` can be - executed from Python as follows - - .. include:: run_main_session.dat - :literal: - - You cannot build extension modules with this function, that is, - using ``-c`` is not allowed. Use ``compile`` command instead, see - below. - -``compile(source, modulename='untitled', extra_args='', verbose=1, source_fn=None)`` - - Build extension module from Fortran 77 source string ``source``. - Return 0 if successful. - Note that this function actually calls ``f2py -c ..`` from shell to - ensure safety of the current Python process. - For example, - - .. include:: compile_session.dat - :literal: - -========================== -Using ``numpy_distutils`` -========================== - -``numpy_distutils`` is part of the SciPy_ project and aims to extend -standard Python ``distutils`` to deal with Fortran sources and F2PY -signature files, e.g. compile Fortran sources, call F2PY to construct -extension modules, etc. - -.. topic:: Example - - Consider the following `setup file`__: - - .. include:: setup_example.py - :literal: - - Running - - :: - - python setup_example.py build - - will build two extension modules ``scalar`` and ``fib2`` to the - build directory. - - __ setup_example.py - -``numpy_distutils`` extends ``distutils`` with the following features: - -* ``Extension`` class argument ``sources`` may contain Fortran source - files. In addition, the list ``sources`` may contain at most one - F2PY signature file, and then the name of an Extension module must - match with the ```` used in signature file. It is - assumed that an F2PY signature file contains exactly one ``python - module`` block. - - If ``sources`` does not contain a signature files, then F2PY is used - to scan Fortran source files for routine signatures to construct the - wrappers to Fortran codes. - - Additional options to F2PY process can be given using ``Extension`` - class argument ``f2py_options``. - -``numpy_distutils`` 0.2.2 and up -================================ - -* The following new ``distutils`` commands are defined: - - ``build_src`` - to construct Fortran wrapper extension modules, among many other things. - ``config_fc`` - to change Fortran compiler options - - as well as ``build_ext`` and ``build_clib`` commands are enhanced - to support Fortran sources. - - Run - - :: - - python config_fc build_src build_ext --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, then one - can choose different Fortran compilers by using ``build_ext`` - command option ``--fcompiler=``. Here ```` can be one of the - following names:: - - absoft sun mips intel intelv intele intelev nag compaq compaqv gnu vast pg hpux - - See ``numpy_distutils/fcompiler.py`` for up-to-date list of - supported compilers or run - - :: - - f2py -c --help-fcompiler - -``numpy_distutils`` pre 0.2.2 -============================= - -* The following new ``distutils`` commands are defined: - - ``build_flib`` - to build f77/f90 libraries used by Python extensions; - ``run_f2py`` - to construct Fortran wrapper extension modules. - - Run - - :: - - python build_flib run_f2py --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, then one - can choose different Fortran compilers either by using ``build_flib`` - command option ``--fcompiler=`` or by defining environment - variable ``FC_VENDOR=``. Here ```` can be one of the - following names:: - - Absoft Sun SGI Intel Itanium NAG Compaq Digital Gnu VAST PG - - See ``numpy_distutils/command/build_flib.py`` for up-to-date list of - supported compilers. - -====================== - Extended F2PY usages -====================== - -Adding self-written functions to F2PY generated modules -======================================================= - -Self-written Python C/API functions can be defined inside -signature files using ``usercode`` and ``pymethoddef`` statements -(they must be used inside the ``python module`` block). For -example, the following signature file ``spam.pyf`` - -.. include:: spam.pyf - :literal: - -wraps the C library function ``system()``:: - - f2py -c spam.pyf - -In Python: - -.. include:: spam_session.dat - :literal: - -Modifying the dictionary of a F2PY generated module -=================================================== - -The following example illustrates how to add an user-defined -variables to a F2PY generated extension module. Given the following -signature file - -.. include:: var.pyf - :literal: - -compile it as ``f2py -c var.pyf``. - -Notice that the second ``usercode`` statement must be defined inside -an ``interface`` block and where the module dictionary is available through -the variable ``d`` (see ``f2py var.pyf``-generated ``varmodule.c`` for -additional details). - -In Python: - -.. include:: var_session.dat - :literal: - -.. References - ========== -.. _F2PY: http://cens.ioc.ee/projects/f2py2e/ -.. _Python: http://www.python.org/ -.. _NumPy: http://www.numpy.org/ -.. _SciPy: http://www.numpy.org/ diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/moddata.f90 b/numpy-1.6.2/numpy/f2py/docs/usersguide/moddata.f90 deleted file mode 100644 index 0e98f04674..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/moddata.f90 +++ /dev/null @@ -1,18 +0,0 @@ -module mod - integer i - integer :: x(4) - real, dimension(2,3) :: a - real, allocatable, dimension(:,:) :: b -contains - subroutine foo - integer k - print*, "i=",i - print*, "x=[",x,"]" - print*, "a=[" - print*, "[",a(1,1),",",a(1,2),",",a(1,3),"]" - print*, "[",a(2,1),",",a(2,2),",",a(2,3),"]" - print*, "]" - print*, "Setting a(1,2)=a(1,2)+3" - a(1,2) = a(1,2)+3 - end subroutine foo -end module mod diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/moddata_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/moddata_session.dat deleted file mode 100644 index 1ec212f8bd..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/moddata_session.dat +++ /dev/null @@ -1,23 +0,0 @@ ->>> import moddata ->>> print moddata.mod.__doc__ -i - 'i'-scalar -x - 'i'-array(4) -a - 'f'-array(2,3) -foo - Function signature: - foo() - - ->>> moddata.mod.i = 5 ->>> moddata.mod.x[:2] = [1,2] ->>> moddata.mod.a = [[1,2,3],[4,5,6]] ->>> moddata.mod.foo() - i= 5 - x=[ 1 2 0 0 ] - a=[ - [ 1.000000 , 2.000000 , 3.000000 ] - [ 4.000000 , 5.000000 , 6.000000 ] - ] - Setting a(1,2)=a(1,2)+3 ->>> moddata.mod.a # a is Fortran-contiguous -array([[ 1., 5., 3.], - [ 4., 5., 6.]],'f') diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/run_main_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/run_main_session.dat deleted file mode 100644 index 29ecc3dfe4..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/run_main_session.dat +++ /dev/null @@ -1,14 +0,0 @@ ->>> import f2py2e ->>> r=f2py2e.run_main(['-m','scalar','docs/usersguide/scalar.f']) -Reading fortran codes... - Reading file 'docs/usersguide/scalar.f' -Post-processing... - Block: scalar - Block: FOO -Building modules... - Building module "scalar"... - Wrote C/API module "scalar" to file "./scalarmodule.c" ->>> print r -{'scalar': {'h': ['/home/users/pearu/src_cvs/f2py2e/src/fortranobject.h'], - 'csrc': ['./scalarmodule.c', - '/home/users/pearu/src_cvs/f2py2e/src/fortranobject.c']}} diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/scalar.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/scalar.f deleted file mode 100644 index c22f639edb..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/scalar.f +++ /dev/null @@ -1,12 +0,0 @@ -C FILE: SCALAR.F - SUBROUTINE FOO(A,B) - REAL*8 A, B -Cf2py intent(in) a -Cf2py intent(inout) b - PRINT*, " A=",A," B=",B - PRINT*, "INCREMENT A AND B" - A = A + 1D0 - B = B + 1D0 - PRINT*, "NEW A=",A," B=",B - END -C END OF FILE SCALAR.F diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/scalar_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/scalar_session.dat deleted file mode 100644 index 4fe8c03b1d..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/scalar_session.dat +++ /dev/null @@ -1,21 +0,0 @@ ->>> import scalar ->>> print scalar.foo.__doc__ -foo - Function signature: - foo(a,b) -Required arguments: - a : input float - b : in/output rank-0 array(float,'d') - ->>> scalar.foo(2,3) - A= 2. B= 3. - INCREMENT A AND B - NEW A= 3. B= 4. ->>> import Numeric ->>> a=Numeric.array(2) # these are integer rank-0 arrays ->>> b=Numeric.array(3) ->>> scalar.foo(a,b) - A= 2. B= 3. - INCREMENT A AND B - NEW A= 3. B= 4. ->>> print a,b # note that only b is changed in situ -2 4 \ No newline at end of file diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/setup_example.py b/numpy-1.6.2/numpy/f2py/docs/usersguide/setup_example.py deleted file mode 100644 index e5f5e84413..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/setup_example.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# File: setup_example.py - -from numpy_distutils.core import Extension - -ext1 = Extension(name = 'scalar', - sources = ['scalar.f']) -ext2 = Extension(name = 'fib2', - sources = ['fib2.pyf','fib1.f']) - -if __name__ == "__main__": - from numpy_distutils.core import setup - setup(name = 'f2py_example', - description = "F2PY Users Guide examples", - author = "Pearu Peterson", - author_email = "pearu@cens.ioc.ee", - ext_modules = [ext1,ext2] - ) -# End of setup_example.py diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/spam.pyf b/numpy-1.6.2/numpy/f2py/docs/usersguide/spam.pyf deleted file mode 100644 index 21ea18b77f..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/spam.pyf +++ /dev/null @@ -1,19 +0,0 @@ -! -*- f90 -*- -python module spam - usercode ''' - static char doc_spam_system[] = "Execute a shell command."; - static PyObject *spam_system(PyObject *self, PyObject *args) - { - char *command; - int sts; - - if (!PyArg_ParseTuple(args, "s", &command)) - return NULL; - sts = system(command); - return Py_BuildValue("i", sts); - } - ''' - pymethoddef ''' - {"system", spam_system, METH_VARARGS, doc_spam_system}, - ''' -end python module spam diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/spam_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/spam_session.dat deleted file mode 100644 index 7f99d13f9a..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/spam_session.dat +++ /dev/null @@ -1,5 +0,0 @@ ->>> import spam ->>> status = spam.system('whoami') -pearu ->> status = spam.system('blah') -sh: line 1: blah: command not found \ No newline at end of file diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/string.f b/numpy-1.6.2/numpy/f2py/docs/usersguide/string.f deleted file mode 100644 index 9246f02e78..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/string.f +++ /dev/null @@ -1,21 +0,0 @@ -C FILE: STRING.F - SUBROUTINE FOO(A,B,C,D) - CHARACTER*5 A, B - CHARACTER*(*) C,D -Cf2py intent(in) a,c -Cf2py intent(inout) b,d - PRINT*, "A=",A - PRINT*, "B=",B - PRINT*, "C=",C - PRINT*, "D=",D - PRINT*, "CHANGE A,B,C,D" - A(1:1) = 'A' - B(1:1) = 'B' - C(1:1) = 'C' - D(1:1) = 'D' - PRINT*, "A=",A - PRINT*, "B=",B - PRINT*, "C=",C - PRINT*, "D=",D - END -C END OF FILE STRING.F diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/string_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/string_session.dat deleted file mode 100644 index 64ebcb3f4a..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/string_session.dat +++ /dev/null @@ -1,27 +0,0 @@ ->>> import mystring ->>> print mystring.foo.__doc__ -foo - Function signature: - foo(a,b,c,d) -Required arguments: - a : input string(len=5) - b : in/output rank-0 array(string(len=5),'c') - c : input string(len=-1) - d : in/output rank-0 array(string(len=-1),'c') - ->>> import Numeric ->>> a=Numeric.array('123') ->>> b=Numeric.array('123') ->>> c=Numeric.array('123') ->>> d=Numeric.array('123') ->>> mystring.foo(a,b,c,d) - A=123 - B=123 - C=123 - D=123 - CHANGE A,B,C,D - A=A23 - B=B23 - C=C23 - D=D23 ->>> a.tostring(),b.tostring(),c.tostring(),d.tostring() -('123', 'B23', '123', 'D23') \ No newline at end of file diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/var.pyf b/numpy-1.6.2/numpy/f2py/docs/usersguide/var.pyf deleted file mode 100644 index 8275ff3afe..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/var.pyf +++ /dev/null @@ -1,11 +0,0 @@ -! -*- f90 -*- -python module var - usercode ''' - int BAR = 5; - ''' - interface - usercode ''' - PyDict_SetItemString(d,"BAR",PyInt_FromLong(BAR)); - ''' - end interface -end python module diff --git a/numpy-1.6.2/numpy/f2py/docs/usersguide/var_session.dat b/numpy-1.6.2/numpy/f2py/docs/usersguide/var_session.dat deleted file mode 100644 index fb0f798bf8..0000000000 --- a/numpy-1.6.2/numpy/f2py/docs/usersguide/var_session.dat +++ /dev/null @@ -1,3 +0,0 @@ ->>> import var ->>> var.BAR -5 \ No newline at end of file diff --git a/numpy-1.6.2/numpy/f2py/f2py.1 b/numpy-1.6.2/numpy/f2py/f2py.1 deleted file mode 100644 index b9391e5920..0000000000 --- a/numpy-1.6.2/numpy/f2py/f2py.1 +++ /dev/null @@ -1,209 +0,0 @@ -.TH "F2PY" 1 -.SH NAME -f2py \- Fortran to Python interface generator -.SH SYNOPSIS -(1) To construct extension module sources: - -.B f2py -[] [[[only:]||[skip:]] ] [: ...] - -(2) To compile fortran files and build extension modules: - -.B f2py --c [, , ] - -(3) To generate signature files: - -.B f2py --h ...< same options as in (1) > -.SH DESCRIPTION -This program generates a Python C/API file (module.c) -that contains wrappers for given Fortran or C functions so that they -can be called from Python. -With the \-c option the corresponding -extension modules are built. -.SH OPTIONS -.TP -.B \-h -Write signatures of the fortran routines to file and -exit. You can then edit and use it instead of . If ==stdout then the signatures are printed to -stdout. -.TP -.B -Names of fortran routines for which Python C/API functions will be -generated. Default is all that are found in . -.TP -.B skip: -Ignore fortran functions that follow until `:'. -.TP -.B only: -Use only fortran functions that follow until `:'. -.TP -.B : -Get back to mode. -.TP -.B \-m -Name of the module; f2py generates a Python/C API file -module.c or extension module . Default is -\'untitled\'. -.TP -.B \-\-[no\-]lower -Do [not] lower the cases in . By default, \-\-lower is -assumed with \-h key, and \-\-no\-lower without \-h key. -.TP -.B \-\-build\-dir -All f2py generated files are created in . Default is tempfile.mktemp(). -.TP -.B \-\-overwrite\-signature -Overwrite existing signature file. -.TP -.B \-\-[no\-]latex\-doc -Create (or not) module.tex. Default is \-\-no\-latex\-doc. -.TP -.B \-\-short\-latex -Create 'incomplete' LaTeX document (without commands \\documentclass, -\\tableofcontents, and \\begin{document}, \\end{document}). -.TP -.B \-\-[no\-]rest\-doc -Create (or not) module.rst. Default is \-\-no\-rest\-doc. -.TP -.B \-\-debug\-capi -Create C/API code that reports the state of the wrappers during -runtime. Useful for debugging. -.TP -.B \-include\'\' -Add CPP #include statement to the C/API code. should be -in the format of either `"filename.ext"' or `'. As a -result will be included just before wrapper functions -part in the C/API code. The option is depreciated, use `usercode` -statement in signature files instead. -.TP -.B \-\-[no\-]wrap\-functions -Create Fortran subroutine wrappers to Fortran 77 -functions. \-\-wrap\-functions is default because it ensures maximum -portability/compiler independence. -.TP -.B \-\-help\-link [..] -List system resources found by system_info.py. [..] may contain -a list of resources names. See also \-\-link\- switch below. -.TP -.B \-\-quiet -Run quietly. -.TP -.B \-\-verbose -Run with extra verbosity. -.TP -.B \-v -Print f2py version ID and exit. -.TP -.B \-\-include_paths path1:path2:... -Search include files (that f2py will scan) from the given directories. -.SH "CONFIG_FC OPTIONS" -The following options are effective only when \-c switch is used. -.TP -.B \-\-help-compiler -List available Fortran compilers [DEPRECIATED]. -.TP -.B \-\-fcompiler= -Specify Fortran compiler type by vendor. -.TP -.B \-\-compiler= -Specify C compiler type (as defined by distutils) -.TP -.B \-\-fcompiler-exec= -Specify the path to F77 compiler [DEPRECIATED]. -.TP -.B \-\-f90compiler\-exec= -Specify the path to F90 compiler [DEPRECIATED]. -.TP -.B \-\-help\-fcompiler -List available Fortran compilers and exit. -.TP -.B \-\-f77exec= -Specify the path to F77 compiler. -.TP -.B \-\-f90exec= -Specify the path to F90 compiler. -.TP -.B \-\-f77flags="..." -Specify F77 compiler flags. -.TP -.B \-\-f90flags="..." -Specify F90 compiler flags. -.TP -.B \-\-opt="..." -Specify optimization flags. -.TP -.B \-\-arch="..." -Specify architecture specific optimization flags. -.TP -.B \-\-noopt -Compile without optimization. -.TP -.B \-\-noarch -Compile without arch-dependent optimization. -.TP -.B \-\-debug -Compile with debugging information. -.SH "EXTRA OPTIONS" -The following options are effective only when \-c switch is used. -.TP -.B \-\-link- -Link extension module with as defined by -numpy_distutils/system_info.py. E.g. to link with optimized LAPACK -libraries (vecLib on MacOSX, ATLAS elsewhere), use -\-\-link\-lapack_opt. See also \-\-help\-link switch. - -.TP -.B -L/path/to/lib/ -l -.TP -.B -D -U -I/path/to/include/ -.TP -.B .o .so .a - -.TP -.B -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN -DUNDERSCORE_G77 -Macros that might be required with non-gcc Fortran compilers. - -.TP -.B -DF2PY_REPORT_ATEXIT -To print out a performance report of F2PY interface when python -exits. Available for Linux. - -.TP -.B -DF2PY_REPORT_ON_ARRAY_COPY= -To send a message to stderr whenever F2PY interface makes a copy of an -array. Integer sets the threshold for array sizes when a message -should be shown. - -.SH REQUIREMENTS -Python 1.5.2 or higher (2.x is supported). - -Numerical Python 13 or higher (20.x,21.x,22.x,23.x are supported). - -Optional Numarray 0.9 or higher partially supported. - -numpy_distutils from Scipy (can be downloaded from F2PY homepage) -.SH "SEE ALSO" -python(1) -.SH BUGS -For instructions on reporting bugs, see - - http://cens.ioc.ee/projects/f2py2e/FAQ.html -.SH AUTHOR -Pearu Peterson -.SH "INTERNET RESOURCES" -Main website: http://cens.ioc.ee/projects/f2py2e/ - -User's Guide: http://cens.ioc.ee/projects/f2py2e/usersguide/ - -Mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/ - -Scipy website: http://www.numpy.org -.SH COPYRIGHT -Copyright (c) 1999, 2000, 2001, 2002, 2003, 2004, 2005 Pearu Peterson -.SH LICENSE -NumPy License -.SH VERSION -2.45.241 diff --git a/numpy-1.6.2/numpy/f2py/f2py2e.py b/numpy-1.6.2/numpy/f2py/f2py2e.py deleted file mode 100755 index 1d0631e8d0..0000000000 --- a/numpy-1.6.2/numpy/f2py/f2py2e.py +++ /dev/null @@ -1,596 +0,0 @@ -#!/usr/bin/env python -""" - -f2py2e - Fortran to Python C/API generator. 2nd Edition. - See __usage__ below. - -Copyright 1999--2011 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 08:31:19 $ -Pearu Peterson -""" - -import __version__ -f2py_version = __version__.version - -import sys -import os -import pprint -import types -import re -errmess=sys.stderr.write -#outmess=sys.stdout.write -show=pprint.pprint - -import crackfortran -import rules -import cb_rules -import auxfuncs -import cfuncs -import f90mod_rules - -outmess = auxfuncs.outmess - -try: - from numpy import __version__ as numpy_version -except ImportError: - numpy_version = 'N/A' - -__usage__ = """\ -Usage: - -1) To construct extension module sources: - - f2py [] [[[only:]||[skip:]] \\ - ] \\ - [: ...] - -2) To compile fortran files and build extension modules: - - f2py -c [, , ] - -3) To generate signature files: - - f2py -h ...< same options as in (1) > - -Description: This program generates a Python C/API file (module.c) - that contains wrappers for given fortran functions so that they - can be called from Python. With the -c option the corresponding - extension modules are built. - -Options: - - --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] - --2d-numeric Use f2py2e tool with Numeric support. - --2d-numarray Use f2py2e tool with Numarray support. - --g3-numpy Use 3rd generation f2py from the separate f2py package. - [NOT AVAILABLE YET] - - -h Write signatures of the fortran routines to file - and exit. You can then edit and use it instead - of . If ==stdout then the - signatures are printed to stdout. - Names of fortran routines for which Python C/API - functions will be generated. Default is all that are found - in . - Paths to fortran/signature files that will be scanned for - in order to determine their signatures. - skip: Ignore fortran functions that follow until `:'. - only: Use only fortran functions that follow until `:'. - : Get back to mode. - - -m Name of the module; f2py generates a Python/C API - file module.c or extension module . - Default is 'untitled'. - - --[no-]lower Do [not] lower the cases in . By default, - --lower is assumed with -h key, and --no-lower without -h key. - - --build-dir All f2py generated files are created in . - Default is tempfile.mktemp(). - - --overwrite-signature Overwrite existing signature file. - - --[no-]latex-doc Create (or not) module.tex. - Default is --no-latex-doc. - --short-latex Create 'incomplete' LaTeX document (without commands - \\documentclass, \\tableofcontents, and \\begin{document}, - \\end{document}). - - --[no-]rest-doc Create (or not) module.rst. - Default is --no-rest-doc. - - --debug-capi Create C/API code that reports the state of the wrappers - during runtime. Useful for debugging. - - --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 - functions. --wrap-functions is default because it ensures - maximum portability/compiler independence. - - --include-paths ::... Search include files from the given - directories. - - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - - --quiet Run quietly. - --verbose Run with extra verbosity. - -v Print f2py version ID and exit. - - -numpy.distutils options (only effective with -c): - - --fcompiler= Specify Fortran compiler type by vendor - --compiler= Specify C compiler type (as defined by distutils) - - --help-fcompiler List available Fortran compilers and exit - --f77exec= Specify the path to F77 compiler - --f90exec= Specify the path to F90 compiler - --f77flags= Specify F77 compiler flags - --f90flags= Specify F90 compiler flags - --opt= Specify optimization flags - --arch= Specify architecture specific optimization flags - --noopt Compile without optimization - --noarch Compile without arch-dependent optimization - --debug Compile with debugging information - -Extra options (only effective with -c): - - --link- Link extension module with as defined - by numpy.distutils/system_info.py. E.g. to link - with optimized LAPACK libraries (vecLib on MacOSX, - ATLAS elsewhere), use --link-lapack_opt. - See also --help-link switch. - - -L/path/to/lib/ -l - -D -U - -I/path/to/include/ - .o .so .a - - Using the following macros may be required with non-gcc Fortran - compilers: - -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN - -DUNDERSCORE_G77 - - When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY - interface is printed out at exit (platforms: Linux). - - When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is - sent to stderr whenever F2PY interface makes a copy of an - array. Integer sets the threshold for array sizes when - a message should be shown. - -Version: %s -numpy Version: %s -Requires: Python 2.3 or higher. -License: NumPy license (see LICENSE.txt in the NumPy source code) -Copyright 1999 - 2011 Pearu Peterson all rights reserved. -http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, numpy_version) - -def scaninputline(inputline): - files,funcs,skipfuncs,onlyfuncs,debug=[],[],[],[],[] - f,f2,f3,f4,f5,f6,f7,f8,f9=1,0,0,0,0,0,0,0,0 - verbose = 1 - dolc=-1 - dolatexdoc = 0 - dorestdoc = 0 - wrapfuncs = 1 - buildpath = '.' - include_paths = [] - signsfile,modulename=None,None - options = {'buildpath':buildpath, - 'coutput': None, - 'f2py_wrapper_output': None} - for l in inputline: - if l=='': pass - elif l=='only:': f=0 - elif l=='skip:': f=-1 - elif l==':': f=1;f4=0 - elif l[:8]=='--debug-': debug.append(l[8:]) - elif l=='--lower': dolc=1 - elif l=='--build-dir': f6=1 - elif l=='--no-lower': dolc=0 - elif l=='--quiet': verbose = 0 - elif l=='--verbose': verbose += 1 - elif l=='--latex-doc': dolatexdoc=1 - elif l=='--no-latex-doc': dolatexdoc=0 - elif l=='--rest-doc': dorestdoc=1 - elif l=='--no-rest-doc': dorestdoc=0 - elif l=='--wrap-functions': wrapfuncs=1 - elif l=='--no-wrap-functions': wrapfuncs=0 - elif l=='--short-latex': options['shortlatex']=1 - elif l=='--coutput': f8=1 - elif l=='--f2py-wrapper-output': f9=1 - elif l=='--overwrite-signature': options['h-overwrite']=1 - elif l=='-h': f2=1 - elif l=='-m': f3=1 - elif l[:2]=='-v': - print f2py_version - sys.exit() - elif l=='--show-compilers': - f5=1 - elif l[:8]=='-include': - cfuncs.outneeds['userincludes'].append(l[9:-1]) - cfuncs.userincludes[l[9:-1]]='#include '+l[8:] - elif l[:15] in '--include_paths': - outmess('f2py option --include_paths is deprecated, use --include-paths instead.\n') - f7=1 - elif l[:15] in '--include-paths': - f7=1 - elif l[0]=='-': - errmess('Unknown option %s\n'%`l`) - sys.exit() - elif f2: f2=0;signsfile=l - elif f3: f3=0;modulename=l - elif f6: f6=0;buildpath=l - elif f7: f7=0;include_paths.extend(l.split(os.pathsep)) - elif f8: f8=0;options["coutput"]=l - elif f9: f9=0;options["f2py_wrapper_output"]=l - elif f==1: - try: - open(l).close() - files.append(l) - except IOError,detail: - errmess('IOError: %s. Skipping file "%s".\n'%(str(detail),l)) - elif f==-1: skipfuncs.append(l) - elif f==0: onlyfuncs.append(l) - if not f5 and not files and not modulename: - print __usage__ - sys.exit() - if not os.path.isdir(buildpath): - if not verbose: - outmess('Creating build directory %s'%(buildpath)) - os.mkdir(buildpath) - if signsfile: - signsfile = os.path.join(buildpath,signsfile) - if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: - errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile)) - sys.exit() - - options['debug']=debug - options['verbose']=verbose - if dolc==-1 and not signsfile: options['do-lower']=0 - else: options['do-lower']=dolc - if modulename: options['module']=modulename - if signsfile: options['signsfile']=signsfile - if onlyfuncs: options['onlyfuncs']=onlyfuncs - if skipfuncs: options['skipfuncs']=skipfuncs - options['dolatexdoc'] = dolatexdoc - options['dorestdoc'] = dorestdoc - options['wrapfuncs'] = wrapfuncs - options['buildpath']=buildpath - options['include_paths']=include_paths - return files,options - -def callcrackfortran(files,options): - rules.options=options - funcs=[] - crackfortran.debug=options['debug'] - crackfortran.verbose=options['verbose'] - if 'module' in options: - crackfortran.f77modulename=options['module'] - if 'skipfuncs' in options: - crackfortran.skipfuncs=options['skipfuncs'] - if 'onlyfuncs' in options: - crackfortran.onlyfuncs=options['onlyfuncs'] - crackfortran.include_paths[:]=options['include_paths'] - crackfortran.dolowercase=options['do-lower'] - postlist=crackfortran.crackfortran(files) - if 'signsfile' in options: - outmess('Saving signatures to file "%s"\n'%(options['signsfile'])) - pyf=crackfortran.crack2fortran(postlist) - if options['signsfile'][-6:]=='stdout': - sys.stdout.write(pyf) - else: - f=open(options['signsfile'],'w') - f.write(pyf) - f.close() - if options["coutput"] is None: - for mod in postlist: - mod["coutput"] = "%smodule.c" % mod["name"] - else: - for mod in postlist: - mod["coutput"] = options["coutput"] - if options["f2py_wrapper_output"] is None: - for mod in postlist: - mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] - else: - for mod in postlist: - mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] - return postlist - -def buildmodules(lst): - cfuncs.buildcfuncs() - outmess('Building modules...\n') - modules,mnames,isusedby=[],[],{} - for i in range(len(lst)): - if '__user__' in lst[i]['name']: - cb_rules.buildcallbacks(lst[i]) - else: - if 'use' in lst[i]: - for u in lst[i]['use'].keys(): - if u not in isusedby: - isusedby[u]=[] - isusedby[u].append(lst[i]['name']) - modules.append(lst[i]) - mnames.append(lst[i]['name']) - ret = {} - for i in range(len(mnames)): - if mnames[i] in isusedby: - outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],','.join(map(lambda s:'"%s"'%s,isusedby[mnames[i]])))) - else: - um=[] - if 'use' in modules[i]: - for u in modules[i]['use'].keys(): - if u in isusedby and u in mnames: - um.append(modules[mnames.index(u)]) - else: - outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i],u)) - ret[mnames[i]] = {} - dict_append(ret[mnames[i]],rules.buildmodule(modules[i],um)) - return ret - -def dict_append(d_out,d_in): - for (k,v) in d_in.items(): - if k not in d_out: - d_out[k] = [] - if type(v) is types.ListType: - d_out[k] = d_out[k] + v - else: - d_out[k].append(v) - -def run_main(comline_list): - """Run f2py as if string.join(comline_list,' ') is used as a command line. - In case of using -h flag, return None. - """ - if sys.version_info[0] >= 3: - import imp - imp.reload(crackfortran) - else: - reload(crackfortran) - f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__)) - fobjhsrc = os.path.join(f2pydir,'src','fortranobject.h') - fobjcsrc = os.path.join(f2pydir,'src','fortranobject.c') - files,options=scaninputline(comline_list) - auxfuncs.options=options - postlist=callcrackfortran(files,options) - isusedby={} - for i in range(len(postlist)): - if 'use' in postlist[i]: - for u in postlist[i]['use'].keys(): - if u not in isusedby: - isusedby[u]=[] - isusedby[u].append(postlist[i]['name']) - for i in range(len(postlist)): - if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']: - if postlist[i]['name'] in isusedby: - #if not quiet: - outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],','.join(map(lambda s:'"%s"'%s,isusedby[postlist[i]['name']])))) - if 'signsfile' in options: - if options['verbose']>1: - outmess('Stopping. Edit the signature file and then run f2py on the signature file: ') - outmess('%s %s\n'%(os.path.basename(sys.argv[0]),options['signsfile'])) - return - for i in range(len(postlist)): - if postlist[i]['block']!='python module': - if 'python module' not in options: - errmess('Tip: If your original code is Fortran source then you must use -m option.\n') - raise TypeError,'All blocks must be python module blocks but got %s'%(`postlist[i]['block']`) - auxfuncs.debugoptions=options['debug'] - f90mod_rules.options=options - auxfuncs.wrapfuncs=options['wrapfuncs'] - - ret=buildmodules(postlist) - - for mn in ret.keys(): - dict_append(ret[mn],{'csrc':fobjcsrc,'h':fobjhsrc}) - return ret - -def filter_files(prefix,suffix,files,remove_prefix=None): - """ - Filter files by prefix and suffix. - """ - filtered,rest = [],[] - match = re.compile(prefix+r'.*'+suffix+r'\Z').match - if remove_prefix: - ind = len(prefix) - else: - ind = 0 - for file in [x.strip() for x in files]: - if match(file): filtered.append(file[ind:]) - else: rest.append(file) - return filtered,rest - -def get_prefix(module): - p = os.path.dirname(os.path.dirname(module.__file__)) - return p - -def run_compile(): - """ - Do it all in one call! - """ - import tempfile - - i = sys.argv.index('-c') - del sys.argv[i] - - remove_build_dir = 0 - try: i = sys.argv.index('--build-dir') - except ValueError: i=None - if i is not None: - build_dir = sys.argv[i+1] - del sys.argv[i+1] - del sys.argv[i] - else: - remove_build_dir = 1 - build_dir = os.path.join(tempfile.mktemp()) - - sysinfo_flags = filter(re.compile(r'[-][-]link[-]').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=sysinfo_flags:a not in flags,sys.argv) - if sysinfo_flags: - sysinfo_flags = [f[7:] for f in sysinfo_flags] - - f2py_flags = filter(re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=f2py_flags:a not in flags,sys.argv) - f2py_flags2 = [] - fl = 0 - for a in sys.argv[1:]: - if a in ['only:','skip:']: - fl = 1 - elif a==':': - fl = 0 - if fl or a==':': - f2py_flags2.append(a) - if f2py_flags2 and f2py_flags2[-1]!=':': - f2py_flags2.append(':') - f2py_flags.extend(f2py_flags2) - - sys.argv = filter(lambda a,flags=f2py_flags2:a not in flags,sys.argv) - - flib_flags = filter(re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=flib_flags:a not in flags,sys.argv) - fc_flags = filter(re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=fc_flags:a not in flags,sys.argv) - - if 1: - del_list = [] - for s in flib_flags: - v = '--fcompiler=' - if s[:len(v)]==v: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = fcompiler.fcompiler_class.keys() - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print 'Unknown vendor: "%s"' % (s[len(v):]) - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv - continue - for s in del_list: - i = flib_flags.index(s) - del flib_flags[i] - assert len(flib_flags)<=2,`flib_flags` - setup_flags = filter(re.compile(r'[-][-](verbose)').match,sys.argv[1:]) - sys.argv = filter(lambda a,flags=setup_flags:a not in flags,sys.argv) - if '--quiet' in f2py_flags: - setup_flags.append('--quiet') - - modulename = 'untitled' - sources = sys.argv[1:] - - for optname in ['--include_paths', '--include-paths']: - if optname in sys.argv: - i = sys.argv.index (optname) - f2py_flags.extend (sys.argv[i:i+2]) - del sys.argv[i+1],sys.argv[i] - sources = sys.argv[1:] - - if '-m' in sys.argv: - i = sys.argv.index('-m') - modulename = sys.argv[i+1] - del sys.argv[i+1],sys.argv[i] - sources = sys.argv[1:] - else: - from numpy.distutils.command.build_src import get_f2py_modulename - pyf_files,sources = filter_files('','[.]pyf([.]src|)',sources) - sources = pyf_files + sources - for f in pyf_files: - modulename = get_f2py_modulename(f) - if modulename: - break - - extra_objects, sources = filter_files('','[.](o|a|so)',sources) - include_dirs, sources = filter_files('-I','',sources,remove_prefix=1) - library_dirs, sources = filter_files('-L','',sources,remove_prefix=1) - libraries, sources = filter_files('-l','',sources,remove_prefix=1) - undef_macros, sources = filter_files('-U','',sources,remove_prefix=1) - define_macros, sources = filter_files('-D','',sources,remove_prefix=1) - using_numarray = 0 - using_numeric = 0 - for i in range(len(define_macros)): - name_value = define_macros[i].split('=',1) - if len(name_value)==1: - name_value.append(None) - if len(name_value)==2: - define_macros[i] = tuple(name_value) - else: - print 'Invalid use of -D:',name_value - - from numpy.distutils.system_info import get_info - - num_include_dir = None - num_info = {} - #import numpy - #n = 'numpy' - #p = get_prefix(numpy) - #from numpy.distutils.misc_util import get_numpy_include_dirs - #num_info = {'include_dirs': get_numpy_include_dirs()} - - if num_info: - include_dirs.extend(num_info.get('include_dirs',[])) - - from numpy.distutils.core import setup,Extension - ext_args = {'name':modulename,'sources':sources, - 'include_dirs': include_dirs, - 'library_dirs': library_dirs, - 'libraries': libraries, - 'define_macros': define_macros, - 'undef_macros': undef_macros, - 'extra_objects': extra_objects, - 'f2py_options': f2py_flags, - } - - if sysinfo_flags: - from numpy.distutils.misc_util import dict_append - for n in sysinfo_flags: - i = get_info(n) - if not i: - outmess('No %s resources found in system'\ - ' (try `f2py --help-link`)\n' % (`n`)) - dict_append(ext_args,**i) - - ext = Extension(**ext_args) - sys.argv = [sys.argv[0]] + setup_flags - sys.argv.extend(['build', - '--build-temp',build_dir, - '--build-base',build_dir, - '--build-platlib','.']) - if fc_flags: - sys.argv.extend(['config_fc']+fc_flags) - if flib_flags: - sys.argv.extend(['build_ext']+flib_flags) - - setup(ext_modules = [ext]) - - if remove_build_dir and os.path.exists(build_dir): - import shutil - outmess('Removing build directory %s\n'%(build_dir)) - shutil.rmtree(build_dir) - -def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - from numpy.distutils.system_info import show_all - show_all() - return - if '-c' in sys.argv[1:]: - run_compile() - else: - run_main(sys.argv[1:]) - -#if __name__ == "__main__": -# main() - - -# EOF diff --git a/numpy-1.6.2/numpy/f2py/f2py_testing.py b/numpy-1.6.2/numpy/f2py/f2py_testing.py deleted file mode 100644 index 0c78f35946..0000000000 --- a/numpy-1.6.2/numpy/f2py/f2py_testing.py +++ /dev/null @@ -1,44 +0,0 @@ -import sys -import re - -from numpy.testing.utils import jiffies, memusage - -def cmdline(): - m=re.compile(r'\A\d+\Z') - args = [] - repeat = 1 - for a in sys.argv[1:]: - if m.match(a): - repeat = eval(a) - else: - args.append(a) - f2py_opts = ' '.join(args) - return repeat,f2py_opts - -def run(runtest,test_functions,repeat=1): - l = [(t,repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] - #l = [(t,'') for t in test_functions] - start_memusage = memusage() - diff_memusage = None - start_jiffies = jiffies() - i = 0 - while i -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/02/03 19:30:23 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.27 $"[10:-1] - -f2py_version='See `f2py -v`' - -import pprint -import sys -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -import numpy as np -import capi_maps -import func2subr -from crackfortran import undo_rmbadname, undo_rmbadname1 - -options={} - -def findf90modules(m): - if ismodule(m): return [m] - if not hasbody(m): return [] - ret = [] - for b in m['body']: - if ismodule(b): ret.append(b) - else: ret=ret+findf90modules(b) - return ret - -fgetdims1 = """\ - external f2pysetdata - logical ns - integer r,i,j - integer(%d) s(*) - ns = .FALSE. - if (allocated(d)) then - do i=1,r - if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then - ns = .TRUE. - end if - end do - if (ns) then - deallocate(d) - end if - end if - if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize - -fgetdims2="""\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - end if - flag = 1 - call f2pysetdata(d,allocated(d))""" - -fgetdims2_sa="""\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - !s(r) must be equal to len(d(1)) - end if - flag = 2 - call f2pysetdata(d,allocated(d))""" - - -def buildhooks(pymod): - global fgetdims1,fgetdims2 - import rules - ret = {'f90modhooks':[],'initf90modhooks':[],'body':[], - 'need':['F_FUNC','arrayobject.h'], - 'separatorsfor':{'includes0':'\n','includes':'\n'}, - 'docs':['"Fortran 90/95 modules:\\n"'], - 'latexdoc':[]} - fhooks=[''] - def fadd(line,s=fhooks): s[0] = '%s\n %s'%(s[0],line) - doc = [''] - def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line) - for m in findf90modules(pymod): - sargs,fargs,efargs,modobjs,notvars,onlyvars=[],[],[],[],[m['name']],[] - sargsp = [] - ifargs = [] - mfargs = [] - if hasbody(m): - for b in m['body']: notvars.append(b['name']) - for n in m['vars'].keys(): - var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide,isprivate)(var)): - onlyvars.append(n) - mfargs.append(n) - outmess('\t\tConstructing F90 module support for "%s"...\n'%(m['name'])) - if onlyvars: - outmess('\t\t Variables: %s\n'%(' '.join(onlyvars))) - chooks=[''] - def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line) - ihooks=[''] - def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line) - - vrd=capi_maps.modsign2map(m) - cadd('static FortranDataDef f2py_%s_def[] = {'%(m['name'])) - dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n'%(m['name'])) - if hasnote(m): - note = m['note'] - if type(note) is type([]): note='\n'.join(note) - dadd(note) - if onlyvars: - dadd('\\begin{description}') - for n in onlyvars: - var = m['vars'][n] - modobjs.append(n) - ct = capi_maps.getctype(var) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n,var) - dms = dm['dims'].replace('*','-1').strip() - dms = dms.replace(':','-1').strip() - if not dms: dms='-1' - use_fgetdims2 = fgetdims2 - if isstringarray(var): - if 'charselector' in var and 'len' in var['charselector']: - cadd('\t{"%s",%s,{{%s,%s}},%s},'\ - %(undo_rmbadname1(n),dm['rank'],dms,var['charselector']['len'],at)) - use_fgetdims2 = fgetdims2_sa - else: - cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n),dm['rank'],dms,at)) - else: - cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n),dm['rank'],dms,at)) - dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,var))) - if hasnote(var): - note = var['note'] - if type(note) is type([]): note='\n'.join(note) - dadd('--- %s'%(note)) - if isallocatable(var): - fargs.append('f2py_%s_getdims_%s'%(m['name'],n)) - efargs.append(fargs[-1]) - sargs.append('void (*%s)(int*,int*,void(*)(char*,int*),int*)'%(n)) - sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') - iadd('\tf2py_%s_def[i_f2py++].func = %s;'%(m['name'],n)) - fadd('subroutine %s(r,s,f2pysetdata,flag)'%(fargs[-1])) - fadd('use %s, only: d => %s\n'%(m['name'],undo_rmbadname1(n))) - fadd('integer flag\n') - fhooks[0]=fhooks[0]+fgetdims1 - dms = eval('range(1,%s+1)'%(dm['rank'])) - fadd(' allocate(d(%s))\n'%(','.join(map(lambda i:'s(%s)'%i,dms)))) - fhooks[0]=fhooks[0]+use_fgetdims2 - fadd('end subroutine %s'%(fargs[-1])) - else: - fargs.append(n) - sargs.append('char *%s'%(n)) - sargsp.append('char*') - iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'],n)) - if onlyvars: - dadd('\\end{description}') - if hasbody(m): - for b in m['body']: - if not isroutine(b): - print 'Skipping',b['block'],b['name'] - continue - modobjs.append('%s()'%(b['name'])) - b['modulename'] = m['name'] - api,wrap=rules.buildapi(b) - if isfunction(b): - fhooks[0]=fhooks[0]+wrap - fargs.append('f2pywrap_%s_%s'%(m['name'],b['name'])) - #efargs.append(fargs[-1]) - ifargs.append(func2subr.createfuncwrapper(b,signature=1)) - else: - if wrap: - fhooks[0]=fhooks[0]+wrap - fargs.append('f2pywrap_%s_%s'%(m['name'],b['name'])) - ifargs.append(func2subr.createsubrwrapper(b,signature=1)) - else: - fargs.append(b['name']) - mfargs.append(fargs[-1]) - #if '--external-modroutines' in options and options['--external-modroutines']: - # outmess('\t\t\tapplying --external-modroutines for %s\n'%(b['name'])) - # efargs.append(fargs[-1]) - api['externroutines']=[] - ar=applyrules(api,vrd) - ar['docs']=[] - ar['docshort']=[] - ret=dictappend(ret,ar) - cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},'%(b['name'],m['name'],b['name'],m['name'],b['name'])) - sargs.append('char *%s'%(b['name'])) - sargsp.append('char *') - iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'],b['name'])) - cadd('\t{NULL}\n};\n') - iadd('}') - ihooks[0]='static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s'%(m['name'],','.join(sargs),ihooks[0]) - if '_' in m['name']: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'\ - %(F_FUNC,m['name'],m['name'].upper(),','.join(sargsp))) - iadd('static void f2py_init_%s(void) {'%(m['name'])) - iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ - %(F_FUNC,m['name'],m['name'].upper(),m['name'])) - iadd('}\n') - ret['f90modhooks']=ret['f90modhooks']+chooks+ihooks - ret['initf90modhooks']=['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(m['name'],m['name'],m['name'])]+ret['initf90modhooks'] - fadd('') - fadd('subroutine f2pyinit%s(f2pysetupfunc)'%(m['name'])) - #fadd('use %s'%(m['name'])) - if mfargs: - for a in undo_rmbadname(mfargs): - fadd('use %s, only : %s'%(m['name'],a)) - if ifargs: - fadd(' '.join(['interface']+ifargs)) - fadd('end interface') - fadd('external f2pysetupfunc') - if efargs: - for a in undo_rmbadname(efargs): - fadd('external %s'%(a)) - fadd('call f2pysetupfunc(%s)'%(','.join(undo_rmbadname(fargs)))) - fadd('end subroutine f2pyinit%s\n'%(m['name'])) - - dadd('\n'.join(ret['latexdoc']).replace(r'\subsection{',r'\subsubsection{')) - - ret['latexdoc']=[] - ret['docs'].append('"\t%s --- %s"'%(m['name'], - ','.join(undo_rmbadname(modobjs)))) - - ret['routine_defs']='' - ret['doc']=[] - ret['docshort']=[] - ret['latexdoc']=doc[0] - if len(ret['docs'])<=1: ret['docs']='' - return ret,fhooks[0] diff --git a/numpy-1.6.2/numpy/f2py/func2subr.py b/numpy-1.6.2/numpy/f2py/func2subr.py deleted file mode 100644 index f746108ad1..0000000000 --- a/numpy-1.6.2/numpy/f2py/func2subr.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2004/11/26 11:13:06 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.16 $"[10:-1] - -f2py_version='See `f2py -v`' - -import pprint -import copy -import sys -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -def var2fixfortran(vars,a,fa=None,f90mode=None): - if fa is None: - fa = a - if a not in vars: - show(vars) - outmess('var2fixfortran: No definition for argument "%s".\n'%a) - return '' - if 'typespec' not in vars[a]: - show(vars[a]) - outmess('var2fixfortran: No typespec for argument "%s".\n'%a) - return '' - vardef=vars[a]['typespec'] - if vardef=='type' and 'typename' in vars[a]: - vardef='%s(%s)'%(vardef,vars[a]['typename']) - selector={} - lk = '' - if 'kindselector' in vars[a]: - selector=vars[a]['kindselector'] - lk = 'kind' - elif 'charselector' in vars[a]: - selector=vars[a]['charselector'] - lk = 'len' - if '*' in selector: - if f90mode: - if selector['*'] in ['*',':','(*)']: - vardef='%s(len=*)'%(vardef) - else: - vardef='%s(%s=%s)'%(vardef,lk,selector['*']) - else: - if selector['*'] in ['*',':']: - vardef='%s*(%s)'%(vardef,selector['*']) - else: - vardef='%s*%s'%(vardef,selector['*']) - else: - if 'len' in selector: - vardef='%s(len=%s'%(vardef,selector['len']) - if 'kind' in selector: - vardef='%s,kind=%s)'%(vardef,selector['kind']) - else: - vardef='%s)'%(vardef) - elif 'kind' in selector: - vardef='%s(kind=%s)'%(vardef,selector['kind']) - - vardef='%s %s'%(vardef,fa) - if 'dimension' in vars[a]: - vardef='%s(%s)'%(vardef,','.join(vars[a]['dimension'])) - return vardef - -def createfuncwrapper(rout,signature=0): - assert isfunction(rout) - - extra_args = [] - vars = rout['vars'] - for a in rout['args']: - v = rout['vars'][a] - for i,d in enumerate(v.get('dimension',[])): - if d==':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) - extra_args.append(dn) - vars[dn] = dv - v['dimension'][i] = dn - rout['args'].extend(extra_args) - need_interface = bool(extra_args) - - ret = [''] - def add(line,ret=ret): - ret[0] = '%s\n %s'%(ret[0],line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - newname = '%sf2pywrap'%(name) - - if newname not in vars: - vars[newname] = vars[name] - args = [newname]+rout['args'][1:] - else: - args = [newname]+rout['args'] - - l = var2fixfortran(vars,name,newname,f90mode) - return_char_star = 0 - if l[:13]=='character*(*)': - return_char_star = 1 - if f90mode: l = 'character(len=10)'+l[13:] - else: l = 'character*10'+l[13:] - charselect = vars[name]['charselector'] - if charselect.get('*','')=='(*)': - charselect['*'] = '10' - sargs = ', '.join(args) - if f90mode: - add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'],name,sargs)) - if not signature: - add('use %s, only : %s'%(rout['modulename'],fortranname)) - else: - add('subroutine f2pywrap%s (%s)'%(name,sargs)) - if not need_interface: - add('external %s'%(fortranname)) - l = l + ', '+fortranname - if need_interface: - for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): - add(line) - - args = args[1:] - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s'%(a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - if isscalar(vars[a]): - add(var2fixfortran(vars,a,f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - if isintent_in(vars[a]): - add(var2fixfortran(vars,a,f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - add(var2fixfortran(vars,a,f90mode=f90mode)) - - add(l) - - if need_interface: - if f90mode: - # f90 module already defines needed interface - pass - else: - add('interface') - add(rout['saved_interface'].lstrip()) - add('end interface') - - sargs = ', '.join([a for a in args if a not in extra_args]) - - if not signature: - if islogicalfunction(rout): - add('%s = .not.(.not.%s(%s))'%(newname,fortranname,sargs)) - else: - add('%s = %s(%s)'%(newname,fortranname,sargs)) - if f90mode: - add('end subroutine f2pywrap_%s_%s'%(rout['modulename'],name)) - else: - add('end') - #print '**'*10 - #print ret[0] - #print '**'*10 - return ret[0] - -def createsubrwrapper(rout,signature=0): - assert issubroutine(rout) - - extra_args = [] - vars = rout['vars'] - for a in rout['args']: - v = rout['vars'][a] - for i,d in enumerate(v.get('dimension',[])): - if d==':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) - extra_args.append(dn) - vars[dn] = dv - v['dimension'][i] = dn - rout['args'].extend(extra_args) - need_interface = bool(extra_args) - - ret = [''] - def add(line,ret=ret): - ret[0] = '%s\n %s'%(ret[0],line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - - args = rout['args'] - - sargs = ', '.join(args) - if f90mode: - add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'],name,sargs)) - if not signature: - add('use %s, only : %s'%(rout['modulename'],fortranname)) - else: - add('subroutine f2pywrap%s (%s)'%(name,sargs)) - if not need_interface: - add('external %s'%(fortranname)) - - if need_interface: - for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): - add(line) - - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s'%(a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - if isscalar(vars[a]): - add(var2fixfortran(vars,a,f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - add(var2fixfortran(vars,a,f90mode=f90mode)) - - if need_interface: - if f90mode: - # f90 module already defines needed interface - pass - else: - add('interface') - add(rout['saved_interface'].lstrip()) - add('end interface') - - sargs = ', '.join([a for a in args if a not in extra_args]) - - if not signature: - add('call %s(%s)'%(fortranname,sargs)) - if f90mode: - add('end subroutine f2pywrap_%s_%s'%(rout['modulename'],name)) - else: - add('end') - #print '**'*10 - #print ret[0] - #print '**'*10 - return ret[0] - - -def assubr(rout): - if isfunction_wrap(rout): - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n'%(name,fortranname)) - rout = copy.copy(rout) - fname = name - rname = fname - if 'result' in rout: - rname = rout['result'] - rout['vars'][fname]=rout['vars'][rname] - fvar = rout['vars'][fname] - if not isintent_out(fvar): - if 'intent' not in fvar: - fvar['intent']=[] - fvar['intent'].append('out') - flag=1 - for i in fvar['intent']: - if i.startswith('out='): - flag = 0 - break - if flag: - fvar['intent'].append('out=%s' % (rname)) - rout['args'][:] = [fname] + rout['args'] - return rout,createfuncwrapper(rout) - if issubroutine_wrap(rout): - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n'%(name,fortranname)) - rout = copy.copy(rout) - return rout,createsubrwrapper(rout) - return rout,'' - diff --git a/numpy-1.6.2/numpy/f2py/info.py b/numpy-1.6.2/numpy/f2py/info.py deleted file mode 100644 index 8beaba2280..0000000000 --- a/numpy-1.6.2/numpy/f2py/info.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Fortran to Python Interface Generator. - -""" - -postpone_import = True diff --git a/numpy-1.6.2/numpy/f2py/rules.py b/numpy-1.6.2/numpy/f2py/rules.py deleted file mode 100644 index 83f5811e59..0000000000 --- a/numpy-1.6.2/numpy/f2py/rules.py +++ /dev/null @@ -1,1446 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Here is a skeleton of a new wrapper function (13Dec2001): - -wrapper_function(args) - declarations - get_python_arguments, say, `a' and `b' - - get_a_from_python - if (successful) { - - get_b_from_python - if (successful) { - - callfortran - if (succesful) { - - put_a_to_python - if (succesful) { - - put_b_to_python - if (succesful) { - - buildvalue = ... - - } - - } - - } - - } - cleanup_b - - } - cleanup_a - - return buildvalue -""" -""" -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/08/30 08:58:42 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.129 $"[10:-1] - -import __version__ -f2py_version = __version__.version - -import pprint -import sys -import time -import types -import copy -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -import capi_maps -from capi_maps import * -import cfuncs -import common_rules -import use_rules -import f90mod_rules -import func2subr -options={} - -sepdict={} -#for k in ['need_cfuncs']: sepdict[k]=',' -for k in ['decl', - 'frompyobj', - 'cleanupfrompyobj', - 'topyarr','method', - 'pyobjfrom','closepyobjfrom', - 'freemem', - 'userincludes', - 'includes0','includes','typedefs','typedefs_generated', - 'cppmacros','cfuncs','callbacks', - 'latexdoc', - 'restdoc', - 'routine_defs','externroutines', - 'initf2pywraphooks', - 'commonhooks','initcommonhooks', - 'f90modhooks','initf90modhooks']: - sepdict[k]='\n' - -#################### Rules for C/API module ################# - -module_rules={ - 'modulebody':"""\ -/* File: #modulename#module.c - * This file is auto-generated with f2py (version:#f2py_version#). - * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, - * written by Pearu Peterson . - * See http://cens.ioc.ee/projects/f2py2e/ - * Generation date: """+time.asctime(time.localtime(time.time()))+""" - * $R"""+"""evision:$ - * $D"""+"""ate:$ - * Do not edit this file directly unless you know what you are doing!!! - */ -#ifdef __cplusplus -extern \"C\" { -#endif - -"""+gentitle("See f2py2e/cfuncs.py: includes")+""" -#includes# -#includes0# - -"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+""" -static PyObject *#modulename#_error; -static PyObject *#modulename#_module; - -"""+gentitle("See f2py2e/cfuncs.py: typedefs")+""" -#typedefs# - -"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+""" -#typedefs_generated# - -"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+""" -#cppmacros# - -"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+""" -#cfuncs# - -"""+gentitle("See f2py2e/cfuncs.py: userincludes")+""" -#userincludes# - -"""+gentitle("See f2py2e/capi_rules.py: usercode")+""" -#usercode# - -/* See f2py2e/rules.py */ -#externroutines# - -"""+gentitle("See f2py2e/capi_rules.py: usercode1")+""" -#usercode1# - -"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+""" -#callbacks# - -"""+gentitle("See f2py2e/rules.py: buildapi")+""" -#body# - -"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+""" -#f90modhooks# - -"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+""" - -"""+gentitle("See f2py2e/common_rules.py: buildhooks")+""" -#commonhooks# - -"""+gentitle("See f2py2e/rules.py")+""" - -static FortranDataDef f2py_routine_defs[] = { -#routine_defs# -\t{NULL} -}; - -static PyMethodDef f2py_module_methods[] = { -#pymethoddef# -\t{NULL,NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { -\tPyModuleDef_HEAD_INIT, -\t"#modulename#", -\tNULL, -\t-1, -\tf2py_module_methods, -\tNULL, -\tNULL, -\tNULL, -\tNULL -}; -#endif - -#if PY_VERSION_HEX >= 0x03000000 -#define RETVAL m -PyObject *PyInit_#modulename#(void) { -#else -#define RETVAL -PyMODINIT_FUNC init#modulename#(void) { -#endif -\tint i; -\tPyObject *m,*d, *s; -#if PY_VERSION_HEX >= 0x03000000 -\tm = #modulename#_module = PyModule_Create(&moduledef); -#else -\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods); -#endif -\tPy_TYPE(&PyFortran_Type) = &PyType_Type; -\timport_array(); -\tif (PyErr_Occurred()) -\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;} -\td = PyModule_GetDict(m); -\ts = PyString_FromString(\"$R"""+"""evision: $\"); -\tPyDict_SetItemString(d, \"__version__\", s); -#if PY_VERSION_HEX >= 0x03000000 -\ts = PyUnicode_FromString( -#else -\ts = PyString_FromString( -#endif -\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); -\tPyDict_SetItemString(d, \"__doc__\", s); -\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); -\tPy_DECREF(s); -\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) -\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i])); -#initf2pywraphooks# -#initf90modhooks# -#initcommonhooks# -#interface_usercode# - -#ifdef F2PY_REPORT_ATEXIT -\tif (! PyErr_Occurred()) -\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); -#endif - -\treturn RETVAL; -} -#ifdef __cplusplus -} -#endif -""", - 'separatorsfor':{'latexdoc':'\n\n', - 'restdoc':'\n\n'}, - 'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n', - '#modnote#\n', - '#latexdoc#'], - 'restdoc':['Module #modulename#\n'+'='*80, - '\n#restdoc#'] - } - -defmod_rules=[ - {'body':'/*eof body*/', - 'method':'/*eof method*/', - 'externroutines':'/*eof externroutines*/', - 'routine_defs':'/*eof routine_defs*/', - 'initf90modhooks':'/*eof initf90modhooks*/', - 'initf2pywraphooks':'/*eof initf2pywraphooks*/', - 'initcommonhooks':'/*eof initcommonhooks*/', - 'latexdoc':'', - 'restdoc':'', - 'modnote':{hasnote:'#note#',l_not(hasnote):''}, - } - ] - -routine_rules={ - 'separatorsfor':sepdict, - 'body':""" -#begintitle# -static char doc_#apiname#[] = \"\\\nFunction signature:\\n\\\n\t#docreturn##name#(#docsignatureshort#)\\n\\\n#docstrsigns#\"; -/* #declfortranroutine# */ -static PyObject *#apiname#(const PyObject *capi_self, - PyObject *capi_args, - PyObject *capi_keywds, - #functype# (*f2py_func)(#callprotoargument#)) { -\tPyObject * volatile capi_buildvalue = NULL; -\tvolatile int f2py_success = 1; -#decl# -\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; -#usercode# -#routdebugenter# -#ifdef F2PY_REPORT_ATEXIT -f2py_start_clock(); -#endif -\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ -\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ -\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; -#frompyobj# -/*end of frompyobj*/ -#ifdef F2PY_REPORT_ATEXIT -f2py_start_call_clock(); -#endif -#callfortranroutine# -if (PyErr_Occurred()) - f2py_success = 0; -#ifdef F2PY_REPORT_ATEXIT -f2py_stop_call_clock(); -#endif -/*end of callfortranroutine*/ -\t\tif (f2py_success) { -#pyobjfrom# -/*end of pyobjfrom*/ -\t\tCFUNCSMESS(\"Building return value.\\n\"); -\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); -/*closepyobjfrom*/ -#closepyobjfrom# -\t\t} /*if (f2py_success) after callfortranroutine*/ -/*cleanupfrompyobj*/ -#cleanupfrompyobj# -\tif (capi_buildvalue == NULL) { -#routdebugfailure# -\t} else { -#routdebugleave# -\t} -\tCFUNCSMESS(\"Freeing memory.\\n\"); -#freemem# -#ifdef F2PY_REPORT_ATEXIT -f2py_stop_clock(); -#endif -\treturn capi_buildvalue; -} -#endtitle# -""", - 'routine_defs':'#routine_def#', - 'initf2pywraphooks':'#initf2pywraphook#', - 'externroutines':'#declfortranroutine#', - 'doc':'#docreturn##name#(#docsignature#)', - 'docshort':'#docreturn##name#(#docsignatureshort#)', - 'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n', - 'need':['arrayobject.h','CFUNCSMESS','MINMAX'], - 'cppmacros':{debugcapi:'#define DEBUGCFUNCS'}, - 'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n', - """ -\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} -#routnote# - -#latexdocstrsigns# -"""], - 'restdoc':['Wrapped function ``#name#``\n'+'-'*80, - - ] - } - -################## Rules for C/API function ############## - -rout_rules=[ - { # Init - 'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n', - 'routdebugleave':'\n','routdebugfailure':'\n', - 'setjmpbuf':' || ', - 'docstrreq':'\n','docstropt':'\n','docstrout':'\n', - 'docstrcbs':'\n','docstrsigns':'\\n"\n"', - 'latexdocstrsigns':'\n', - 'latexdocstrreq':'\n','latexdocstropt':'\n', - 'latexdocstrout':'\n','latexdocstrcbs':'\n', - }, - 'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'', - 'docsign':'','docsignopt':'','decl':'/*decl*/', - 'freemem':'/*freemem*/', - 'docsignshort':'','docsignoptshort':'', - 'docstrsigns':'','latexdocstrsigns':'', - 'docstrreq':'Required arguments:', - 'docstropt':'Optional arguments:', - 'docstrout':'Return objects:', - 'docstrcbs':'Call-back functions:', - 'latexdocstrreq':'\\noindent Required arguments:', - 'latexdocstropt':'\\noindent Optional arguments:', - 'latexdocstrout':'\\noindent Return objects:', - 'latexdocstrcbs':'\\noindent Call-back functions:', - 'args_capi':'','keys_capi':'','functype':'', - 'frompyobj':'/*frompyobj*/', - 'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed - 'pyobjfrom':'/*pyobjfrom*/', - 'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed - 'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/', - 'routdebugenter':'/*routdebugenter*/', - 'routdebugfailure':'/*routdebugfailure*/', - 'callfortranroutine':'/*callfortranroutine*/', - 'argformat':'','keyformat':'','need_cfuncs':'', - 'docreturn':'','return':'','returnformat':'','rformat':'', - 'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'', - 'initf2pywraphook':'', - 'routnote':{hasnote:'--- #note#',l_not(hasnote):''}, - },{ - 'apiname':'f2py_rout_#modulename#_#name#', - 'pyname':'#modulename#.#name#', - 'decl':'', - '_check':l_not(ismoduleroutine) - },{ - 'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#', - 'pyname':'#modulename#.#f90modulename#.#name#', - 'decl':'', - '_check':ismoduleroutine - },{ # Subroutine - 'functype':'void', - 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);', - ismoduleroutine:'', - isdummyroutine:'' - }, - 'routine_def':{l_not(l_or(ismoduleroutine,isintent_c,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine),isdummyroutine):'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'need':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'F_FUNC'}, - 'callfortranroutine':[ - {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, - {hasexternals:"""\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement:'''\t\t\t\t#callstatement#; -\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, - {l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'}, - {isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'}, - {hasexternals:"""\t\t}"""} - ], - '_check':l_and(issubroutine,l_not(issubroutine_wrap)), - },{ # Wrapped function - 'functype':'void', - 'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine:'', - }, - - 'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):''' - { - extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); - PyObject* o = PyDict_GetItemString(d,"#name#"); - PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); -#if PY_VERSION_HEX >= 0x03000000 - PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); -#else - PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); -#endif - } - '''}, - 'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']}, - 'callfortranroutine':[ - {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, - {hasexternals:"""\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, - {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, - {hasexternals:'\t}'} - ], - '_check':isfunction_wrap, - },{ # Wrapped subroutine - 'functype':'void', - 'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine:'', - }, - - 'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):''' - { - extern void #F_FUNC#(#name_lower#,#NAME#)(void); - PyObject* o = PyDict_GetItemString(d,"#name#"); - PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); -#if PY_VERSION_HEX >= 0x03000000 - PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); -#else - PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); -#endif - } - '''}, - 'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']}, - 'callfortranroutine':[ - {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, - {hasexternals:"""\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, - {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, - {hasexternals:'\t}'} - ], - '_check':issubroutine_wrap, - },{ # Function - 'functype':'#ctype#', - 'docreturn':{l_not(isintent_hide):'#rname#,'}, - 'docstrout':'\t#pydocsignout#', - 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasresultnote:'--- #resultnote#'}], - 'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\ -#ifdef USESCOMPAQFORTRAN -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); -#else -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); -#endif -"""}, - {l_and(debugcapi,l_not(isstringfunction)):"""\ -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); -"""} - ], - '_check':l_and(isfunction,l_not(isfunction_wrap)) - },{ # Scalar function - 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);', - isdummyroutine:'' - }, - 'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};', - l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'}, - {iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'} - ], - 'callfortranroutine':[ - {hasexternals:"""\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement:'''\t#callstatement#; -/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ -'''}, - {l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'}, - {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, - {hasexternals:'\t}'}, - {l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, - {l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], - 'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, - 'need':[{l_not(isdummyroutine):'F_FUNC'}, - {iscomplexfunction:'pyobj_from_#ctype#1'}, - {islong_longfunction:'long_long'}, - {islong_doublefunction:'long_double'}], - 'returnformat':{l_not(isintent_hide):'#rformat#'}, - 'return':{iscomplexfunction:',#name#_return_value_capi', - l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'}, - '_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap)) - },{ # String function # in use for --no-wrap - 'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - 'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)): -# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},', - '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine),isintent_c): -# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},' - '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' - }, - 'decl':['\t#ctype# #name#_return_value = NULL;', - '\tint #name#_return_value_len = 0;'], - 'callfortran':'#name#_return_value,#name#_return_value_len,', - 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', - '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {', - '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', - '\t\tf2py_success = 0;', - '\t} else {', - "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", - '\t}', - '\tif (f2py_success) {', - {hasexternals:"""\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'}, - """\ -#ifdef USESCOMPAQFORTRAN -\t\t(*f2py_func)(#callcompaqfortran#); -#else -\t\t(*f2py_func)(#callfortran#); -#endif -""", - {isthreadsafe:'\t\tPy_END_ALLOW_THREADS'}, - {hasexternals:'\t\t}'}, - {debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, - '\t} /* if (f2py_success) after (string)malloc */', - ], - 'returnformat':'#rformat#', - 'return':',#name#_return_value', - 'freemem':'\tSTRINGFREE(#name#_return_value);', - 'need':['F_FUNC','#ctype#','STRINGFREE'], - '_check':l_and(isstringfunction,l_not(isfunction_wrap)) # ???obsolete - }, - { # Debugging - 'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', - 'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', - 'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', - '_check':debugcapi - } - ] - -################ Rules for arguments ################## - -typedef_need_dict = {islong_long:'long_long', - islong_double:'long_double', - islong_complex:'complex_long_double', - isunsigned_char:'unsigned_char', - isunsigned_short:'unsigned_short', - isunsigned:'unsigned', - isunsigned_long_long:'unsigned_long_long', - isunsigned_chararray:'unsigned_char', - isunsigned_shortarray:'unsigned_short', - isunsigned_long_longarray:'unsigned_long_long', - issigned_long_longarray:'long_long', - } - -aux_rules=[ - { - 'separatorsfor':sepdict - }, - { # Common - 'frompyobj':['\t/* Processing auxiliary variable #varname# */', - {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], - 'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */', - 'need':typedef_need_dict, - }, -# Scalars (not complex) - { # Common - 'decl':'\t#ctype# #varname# = 0;', - 'need':{hasinitvalue:'math.h'}, - 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'}, - '_check':l_and(isscalar,l_not(iscomplex)), - }, - { - 'return':',#varname#', - 'docstrout':'\t#pydocsignout#', - 'docreturn':'#outvarname#,', - 'returnformat':'#varrformat#', - '_check':l_and(isscalar,l_not(iscomplex),isintent_out), - }, -# Complex scalars - { # Common - 'decl':'\t#ctype# #varname#;', - 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check':iscomplex - }, -# String - { # Common - 'decl':['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - ], - 'need':['len..'], - '_check':isstring - }, -# Array - { # Common - 'decl':['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - ], - 'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}], - '_check':isarray - }, -# Scalararray - { # Common - '_check':l_and(isarray,l_not(iscomplexarray)) - },{ # Not hidden - '_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide) - }, -# Integer*1 array - {'need':'#ctype#', - '_check':isint1array, - '_depend':'' - }, -# Integer*-1 array - {'need':'#ctype#', - '_check':isunsigned_chararray, - '_depend':'' - }, -# Integer*-2 array - {'need':'#ctype#', - '_check':isunsigned_shortarray, - '_depend':'' - }, -# Integer*-8 array - {'need':'#ctype#', - '_check':isunsigned_long_longarray, - '_depend':'' - }, -# Complexarray - {'need':'#ctype#', - '_check':iscomplexarray, - '_depend':'' - }, -# Stringarray - { - 'callfortranappend':{isarrayofstrings:'flen(#varname#),'}, - 'need':'string', - '_check':isstringarray - } - ] - -arg_rules=[ - { - 'separatorsfor':sepdict - }, - { # Common - 'frompyobj':['\t/* Processing variable #varname# */', - {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], - 'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */', - '_depend':'', - 'need':typedef_need_dict, - }, -# Doc signatures - { - 'docstropt':{l_and(isoptional,isintent_nothide):'\t#pydocsign#'}, - 'docstrreq':{l_and(isrequired,isintent_nothide):'\t#pydocsign#'}, - 'docstrout':{isintent_out:'\t#pydocsignout#'}, - 'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote,isintent_hide):'--- #note#', - l_and(hasnote,isintent_nothide):'--- See above.'}]}, - 'depend':'' - }, -# Required/Optional arguments - { - 'kwlist':'"#varname#",', - 'docsign':'#varname#,', - '_check':l_and(isintent_nothide,l_not(isoptional)) - }, - { - 'kwlistopt':'"#varname#",', - 'docsignopt':'#varname#=#showinit#,', - 'docsignoptshort':'#varname#,', - '_check':l_and(isintent_nothide,isoptional) - }, -# Docstring/BuildValue - { - 'docreturn':'#outvarname#,', - 'returnformat':'#varrformat#', - '_check':isintent_out - }, -# Externals (call-back functions) - { # Common - 'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'}, - 'docsignxashort':{isintent_nothide:'#varname#_extra_args,'}, - 'docstropt':{isintent_nothide:'\t#varname#_extra_args := () input tuple'}, - 'docstrcbs':'#cbdocstr#', - 'latexdocstrcbs':'\\item[] #cblatexdocstr#', - 'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, - 'decl':['\tPyObject *#varname#_capi = Py_None;', - '\tPyTupleObject *#varname#_xa_capi = NULL;', - '\tPyTupleObject *#varname#_args_capi = NULL;', - '\tint #varname#_nofargs_capi = 0;', - {l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'} - ], - 'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'}, - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'xaformat':{isintent_nothide:'O!'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, - 'keys_xa':',&PyTuple_Type,&#varname#_xa_capi', - 'setjmpbuf':'(setjmp(#cbname#_jmpbuf))', - 'callfortran':{l_not(isintent_callback):'#varname#_cptr,'}, - 'need':['#cbname#','setjmp.h'], - '_check':isexternal - }, - { - 'frompyobj':[{l_not(isintent_callback):"""\ -if(F2PyCapsule_Check(#varname#_capi)) { - #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi); -} else { - #varname#_cptr = #cbname#; -} -"""},{isintent_callback:"""\ -if (#varname#_capi==Py_None) { - #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); - if (#varname#_capi) { - if (#varname#_xa_capi==NULL) { - if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { - PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); - if (capi_tmp) - #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); - else - #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); - if (#varname#_xa_capi==NULL) { - PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); - return NULL; - } - } - } - } - if (#varname#_capi==NULL) { - PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); - return NULL; - } -} -"""}, -## {l_not(isintent_callback):"""\ -## if (#varname#_capi==Py_None) { -## printf(\"hoi\\n\"); -## } -## """}, -"""\ -\t#varname#_nofargs_capi = #cbname#_nofargs; -\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) { -\t\tjmp_buf #varname#_jmpbuf;""", -{debugcapi:["""\ -\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs); -\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", -{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, - """\ -\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\"); -\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject); -\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject); -\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""", - ], -'cleanupfrompyobj': -"""\ -\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\"); -\t\t#cbname#_capi = #varname#_capi; -\t\tPy_DECREF(#cbname#_args_capi); -\t\t#cbname#_args_capi = #varname#_args_capi; -\t\t#cbname#_nofargs = #varname#_nofargs_capi; -\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf)); -\t}""", - 'need':['SWAP','create_cb_arglist'], - '_check':isexternal, - '_depend':'' - }, -# Scalars (not complex) - { # Common - 'decl':'\t#ctype# #varname# = 0;', - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, - 'return':{isintent_out:',#varname#'}, - '_check':l_and(isscalar,l_not(iscomplex)) - },{ - 'need':{hasinitvalue:'math.h'}, - '_check':l_and(isscalar,l_not(iscomplex)), - #'_depend':'' - },{ # Not hidden - 'decl':'\tPyObject *#varname#_capi = Py_None;', - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, - 'pyobjfrom':{isintent_inout:"""\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\tif (f2py_success) {"""}, - 'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, - '_check':l_and(isscalar,l_not(iscomplex),isintent_nothide) - },{ - 'frompyobj':[ -# hasinitvalue... -# if pyobj is None: -# varname = init -# else -# from_pyobj(varname) -# -# isoptional and noinitvalue... -# if pyobj is not None: -# from_pyobj(varname) -# else: -# varname is uninitialized -# -# ... -# from_pyobj(varname) -# - {hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else', - '_depend':''}, - {l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)', - '_depend':''}, - {l_not(islogical):'''\ -\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); -\tif (f2py_success) {'''}, - {islogical:'''\ -\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); -\t\tf2py_success = 1; -\tif (f2py_success) {'''}, - ], - 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/', - 'need':{l_not(islogical):'#ctype#_from_pyobj'}, - '_check':l_and(isscalar,l_not(iscomplex),isintent_nothide), - '_depend':'' -# },{ # Hidden -# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide) - },{ # Hidden - 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'}, - 'need':typedef_need_dict, - '_check':l_and(isscalar,l_not(iscomplex),isintent_hide), - '_depend':'' - },{ # Common - 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - '_check':l_and(isscalar,l_not(iscomplex)), - '_depend':'' - }, -# Complex scalars - { # Common - 'decl':'\t#ctype# #varname#;', - 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, - 'return':{isintent_out:',#varname#_capi'}, - '_check':iscomplex - },{ # Not hidden - 'decl':'\tPyObject *#varname#_capi = Py_None;', - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, - 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, - 'pyobjfrom':{isintent_inout:"""\ -\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\t\tif (f2py_success) {"""}, - 'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - '_check':l_and(iscomplex,isintent_nothide) - },{ - 'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, - {l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'}, -# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");' - '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' - '\n\tif (f2py_success) {'], - 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/', - 'need':['#ctype#_from_pyobj'], - '_check':l_and(iscomplex,isintent_nothide), - '_depend':'' - },{ # Hidden - 'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'}, - '_check':l_and(iscomplex,isintent_hide) - },{ - 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check':l_and(iscomplex,isintent_hide), - '_depend':'' - },{ # Common - 'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, - 'need':['pyobj_from_#ctype#1'], - '_check':iscomplex - },{ - 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, - '_check':iscomplex, - '_depend':'' - }, -# String - { # Common - 'decl':['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - '\tPyObject *#varname#_capi = Py_None;'], - 'callfortran':'#varname#,', - 'callfortranappend':'slen(#varname#),', - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, -# 'freemem':'\tSTRINGFREE(#varname#);', - 'return':{isintent_out:',#varname#'}, - 'need':['len..'],#'STRINGFREE'], - '_check':isstring - },{ # Common - 'frompyobj':"""\ -\tslen(#varname#) = #length#; -\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); -\tif (f2py_success) {""", - 'cleanupfrompyobj':"""\ -\t\tSTRINGFREE(#varname#); -\t} /*if (f2py_success) of #varname#*/""", - 'need':['#ctype#_from_pyobj','len..','STRINGFREE'], - '_check':isstring, - '_depend':'' - },{ # Not hidden - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, - 'pyobjfrom':{isintent_inout:'''\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#); -\tif (f2py_success) {'''}, - 'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, - 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, - '_check':l_and(isstring,isintent_nothide) - },{ # Hidden - '_check':l_and(isstring,isintent_hide) - },{ - 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, - '_check':isstring, - '_depend':'' - }, -# Array - { # Common - 'decl':['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - '\tPyArrayObject *capi_#varname#_tmp = NULL;', - '\tint capi_#varname#_intent = 0;', - ], - 'callfortran':'#varname#,', - 'return':{isintent_out:',capi_#varname#_tmp'}, - 'need':'len..', - '_check':isarray - },{ # intent(overwrite) array - 'decl':'\tint capi_overwrite_#varname# = 1;', - 'kwlistxa':'"overwrite_#varname#",', - 'xaformat':'i', - 'keys_xa':',&capi_overwrite_#varname#', - 'docsignxa':'overwrite_#varname#=1,', - 'docsignxashort':'overwrite_#varname#,', - 'docstropt':'\toverwrite_#varname# := 1 input int', - '_check':l_and(isarray,isintent_overwrite), - },{ - 'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check':l_and(isarray,isintent_overwrite), - '_depend':'', - }, - { # intent(copy) array - 'decl':'\tint capi_overwrite_#varname# = 0;', - 'kwlistxa':'"overwrite_#varname#",', - 'xaformat':'i', - 'keys_xa':',&capi_overwrite_#varname#', - 'docsignxa':'overwrite_#varname#=0,', - 'docsignxashort':'overwrite_#varname#,', - 'docstropt':'\toverwrite_#varname# := 0 input int', - '_check':l_and(isarray,isintent_copy), - },{ - 'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check':l_and(isarray,isintent_copy), - '_depend':'', - },{ - 'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}], - '_check':isarray, - '_depend':'' - },{ # Not hidden - 'decl':'\tPyObject *#varname#_capi = Py_None;', - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, -# 'pyobjfrom':{isintent_inout:"""\ -# /* Partly because of the following hack, intent(inout) is depreciated, -# Use intent(in,out) instead. - -# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\ -# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) { -# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) { -# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base) -# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi); -# \t\t} else -# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi); -# \t} -# */ -# """}, -# 'need':{isintent_inout:'copy_ND_array'}, - '_check':l_and(isarray,isintent_nothide) - },{ - 'frompyobj':['\t#setdims#;', - '\tcapi_#varname#_intent |= #intent#;', - {isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, - {isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, - """\ -\tif (capi_#varname#_tmp == NULL) { -\t\tif (!PyErr_Occurred()) -\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); -\t} else { -\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data); -""", -{hasinitvalue:[ - {isintent_nothide:'\tif (#varname#_capi == Py_None) {'}, - {isintent_hide:'\t{'}, - {iscomplexarray:'\t\t#ctype# capi_c;'}, - """\ -\t\tint *_i,capi_i=0; -\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); -\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) { -\t\t\twhile ((_i = nextforcomb())) -\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ -\t\t} else { -\t\t\tif (!PyErr_Occurred()) -\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); -\t\t\tf2py_success = 0; -\t\t} -\t} -\tif (f2py_success) {"""]}, - ], - 'cleanupfrompyobj':[ # note that this list will be reversed - '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', - {l_not(l_or(isintent_out,isintent_hide)):"""\ -\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { -\t\tPy_XDECREF(capi_#varname#_tmp); }"""}, - {l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""}, - {hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'}, - ], - '_check':isarray, - '_depend':'' - }, -# { # Hidden -# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'}, -# '_check':l_and(isarray,isintent_hide) -# }, -# Scalararray - { # Common - '_check':l_and(isarray,l_not(iscomplexarray)) - },{ # Not hidden - '_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide) - }, -# Integer*1 array - {'need':'#ctype#', - '_check':isint1array, - '_depend':'' - }, -# Integer*-1 array - {'need':'#ctype#', - '_check':isunsigned_chararray, - '_depend':'' - }, -# Integer*-2 array - {'need':'#ctype#', - '_check':isunsigned_shortarray, - '_depend':'' - }, -# Integer*-8 array - {'need':'#ctype#', - '_check':isunsigned_long_longarray, - '_depend':'' - }, -# Complexarray - {'need':'#ctype#', - '_check':iscomplexarray, - '_depend':'' - }, -# Stringarray - { - 'callfortranappend':{isarrayofstrings:'flen(#varname#),'}, - 'need':'string', - '_check':isstringarray - } - ] - -################# Rules for checking ############### - -check_rules=[ - { - 'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, - 'need':'len..' - },{ - 'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/', - 'need':'CHECKSCALAR', - '_check':l_and(isscalar,l_not(iscomplex)), - '_break':'' - },{ - 'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/', - 'need':'CHECKSTRING', - '_check':isstring, - '_break':'' - },{ - 'need':'CHECKARRAY', - 'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/', - '_check':isarray, - '_break':'' - },{ - 'need':'CHECKGENERIC', - 'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/', - } -] - -########## Applying the rules. No need to modify what follows ############# - -#################### Build C/API module ####################### - -def buildmodule(m,um): - """ - Return - """ - global f2py_version,options - outmess('\tBuilding module "%s"...\n'%(m['name'])) - ret = {} - mod_rules=defmod_rules[:] - vrd=modsign2map(m) - rd=dictappend({'f2py_version':f2py_version},vrd) - funcwrappers = [] - funcwrappers2 = [] # F90 codes - for n in m['interfaced']: - nb=None - for bi in m['body']: - if not bi['block']=='interface': - errmess('buildmodule: Expected interface block. Skipping.\n') - continue - for b in bi['body']: - if b['name']==n: nb=b;break - - if not nb: - errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n)) - continue - nb_list = [nb] - if 'entry' in nb: - for k,a in nb['entry'].items(): - nb1 = copy.deepcopy(nb) - del nb1['entry'] - nb1['name'] = k - nb1['args'] = a - nb_list.append(nb1) - for nb in nb_list: - api,wrap=buildapi(nb) - if wrap: - if ismoduleroutine(nb): - funcwrappers2.append(wrap) - else: - funcwrappers.append(wrap) - ar=applyrules(api,vrd) - rd=dictappend(rd,ar) - - # Construct COMMON block support - cr,wrap = common_rules.buildhooks(m) - if wrap: - funcwrappers.append(wrap) - ar=applyrules(cr,vrd) - rd=dictappend(rd,ar) - - # Construct F90 module support - mr,wrap = f90mod_rules.buildhooks(m) - if wrap: - funcwrappers2.append(wrap) - ar=applyrules(mr,vrd) - rd=dictappend(rd,ar) - - for u in um: - ar=use_rules.buildusevars(u,m['use'][u['name']]) - rd=dictappend(rd,ar) - - needs=cfuncs.get_needs() - code={} - for n in needs.keys(): - code[n]=[] - for k in needs[n]: - c='' - if k in cfuncs.includes0: - c=cfuncs.includes0[k] - elif k in cfuncs.includes: - c=cfuncs.includes[k] - elif k in cfuncs.userincludes: - c=cfuncs.userincludes[k] - elif k in cfuncs.typedefs: - c=cfuncs.typedefs[k] - elif k in cfuncs.typedefs_generated: - c=cfuncs.typedefs_generated[k] - elif k in cfuncs.cppmacros: - c=cfuncs.cppmacros[k] - elif k in cfuncs.cfuncs: - c=cfuncs.cfuncs[k] - elif k in cfuncs.callbacks: - c=cfuncs.callbacks[k] - elif k in cfuncs.f90modhooks: - c=cfuncs.f90modhooks[k] - elif k in cfuncs.commonhooks: - c=cfuncs.commonhooks[k] - else: - errmess('buildmodule: unknown need %s.\n'%(`k`));continue - code[n].append(c) - mod_rules.append(code) - for r in mod_rules: - if ('_check' in r and r['_check'](m)) or ('_check' not in r): - ar=applyrules(r,vrd,m) - rd=dictappend(rd,ar) - ar=applyrules(module_rules,rd) - - fn = os.path.join(options['buildpath'],vrd['coutput']) - ret['csrc'] = fn - f=open(fn,'w') - f.write(ar['modulebody'].replace('\t',2*' ')) - f.close() - outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'],fn)) - - if options['dorestdoc']: - fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest') - f=open(fn,'w') - f.write('.. -*- rest -*-\n') - f.write('\n'.join(ar['restdoc'])) - f.close() - outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename'])) - if options['dolatexdoc']: - fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex') - ret['ltx'] = fn - f=open(fn,'w') - f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version)) - if 'shortlatex' not in options: - f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') - f.write('\n'.join(ar['latexdoc'])) - if 'shortlatex' not in options: - f.write('\\end{document}') - f.close() - outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename'])) - if funcwrappers: - wn = os.path.join(options['buildpath'],vrd['f2py_wrapper_output']) - ret['fsrc'] = wn - f=open(wn,'w') - f.write('C -*- fortran -*-\n') - f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) - f.write('C It contains Fortran 77 wrappers to fortran functions.\n') - lines = [] - for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'): - if l and l[0]==' ': - while len(l)>=66: - lines.append(l[:66]+'\n &') - l = l[66:] - lines.append(l+'\n') - else: lines.append(l+'\n') - lines = ''.join(lines).replace('\n &\n','\n') - f.write(lines) - f.close() - outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn)) - if funcwrappers2: - wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename'])) - ret['fsrc'] = wn - f=open(wn,'w') - f.write('! -*- f90 -*-\n') - f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) - f.write('! It contains Fortran 90 wrappers to fortran functions.\n') - lines = [] - for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'): - if len(l)>72 and l[0]==' ': - lines.append(l[:72]+'&\n &') - l = l[72:] - while len(l)>66: - lines.append(l[:66]+'&\n &') - l = l[66:] - lines.append(l+'\n') - else: lines.append(l+'\n') - lines = ''.join(lines).replace('\n &\n','\n') - f.write(lines) - f.close() - outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn)) - return ret - -################## Build C/API function ############# - -stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'} -def buildapi(rout): - rout,wrap = func2subr.assubr(rout) - args,depargs=getargs2(rout) - capi_maps.depargs=depargs - var=rout['vars'] - auxvars = [a for a in var.keys() if isintent_aux(var[a])] - - if ismoduleroutine(rout): - outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'],rout['name'])) - else: - outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name'])) - # Routine - vrd=routsign2map(rout) - rd=dictappend({},vrd) - for r in rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar=applyrules(r,vrd,rout) - rd=dictappend(rd,ar) - - # Args - nth,nthk=0,0 - savevrd={} - for a in args: - vrd=sign2map(a,var[a]) - if isintent_aux(var[a]): - _rules = aux_rules - else: - _rules = arg_rules - if not isintent_hide(var[a]): - if not isoptional(var[a]): - nth=nth+1 - vrd['nth']=`nth`+stnd[nth%10]+' argument' - else: - nthk=nthk+1 - vrd['nth']=`nthk`+stnd[nthk%10]+' keyword' - else: vrd['nth']='hidden' - savevrd[a]=vrd - for r in _rules: - if '_depend' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) - if '_break' in r: - break - for a in depargs: - if isintent_aux(var[a]): - _rules = aux_rules - else: - _rules = arg_rules - vrd=savevrd[a] - for r in _rules: - if '_depend' not in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) - if '_break' in r: - break - if 'check' in var[a]: - for c in var[a]['check']: - vrd['check']=c - ar=applyrules(check_rules,vrd,var[a]) - rd=dictappend(rd,ar) - if type(rd['cleanupfrompyobj']) is types.ListType: - rd['cleanupfrompyobj'].reverse() - if type(rd['closepyobjfrom']) is types.ListType: - rd['closepyobjfrom'].reverse() - rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#', - {'docsign':rd['docsign'], - 'docsignopt':rd['docsignopt'], - 'docsignxa':rd['docsignxa']})) - optargs=stripcomma(replace('#docsignopt##docsignxa#', - {'docsignxa':rd['docsignxashort'], - 'docsignopt':rd['docsignoptshort']} - )) - if optargs=='': - rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']})) - else: - rd['docsignatureshort']=replace('#docsign#[#docsignopt#]', - {'docsign':rd['docsign'], - 'docsignopt':optargs, - }) - rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_','\\_') - rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',',', ') - cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) - if len(rd['callfortranappend'])>1: - rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) - else: - rd['callcompaqfortran']=cfs - rd['callfortran']=cfs - if type(rd['docreturn'])==types.ListType: - rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = ' - rd['docstrsigns']=[] - rd['latexdocstrsigns']=[] - for k in ['docstrreq','docstropt','docstrout','docstrcbs']: - if k in rd and type(rd[k])==types.ListType: - rd['docstrsigns']=rd['docstrsigns']+rd[k] - k='latex'+k - if k in rd and type(rd[k])==types.ListType: - rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ - ['\\begin{description}']+rd[k][1:]+\ - ['\\end{description}'] - - # Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720 - if rd['keyformat'] or rd['xaformat']: - argformat = rd['argformat'] - if isinstance(argformat, list): - argformat.append('|') - else: - assert isinstance(argformat, str),repr((argformat, type(argformat))) - rd['argformat'] += '|' - - ar=applyrules(routine_rules,rd) - if ismoduleroutine(rout): - outmess('\t\t\t %s\n'%(ar['docshort'])) - else: - outmess('\t\t %s\n'%(ar['docshort'])) - return ar,wrap - - -#################### EOF rules.py ####################### diff --git a/numpy-1.6.2/numpy/f2py/setup.py b/numpy-1.6.2/numpy/f2py/setup.py deleted file mode 100644 index 37aab191c4..0000000000 --- a/numpy-1.6.2/numpy/f2py/setup.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -""" -setup.py for installing F2PY - -Usage: - python setup.py install - -Copyright 2001-2005 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Revision: 1.32 $ -$Date: 2005/01/30 17:22:14 $ -Pearu Peterson -""" - -__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $" - -import os -import sys -from distutils.dep_util import newer -from numpy.distutils import log -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration - -from __version__ import version - -def configuration(parent_package='',top_path=None): - config = Configuration('f2py', parent_package, top_path) - - config.add_data_dir('docs') - config.add_data_dir('tests') - - config.add_data_files('src/fortranobject.c', - 'src/fortranobject.h', - 'f2py.1' - ) - - config.make_svn_version_py() - - def generate_f2py_py(build_dir): - f2py_exe = 'f2py'+os.path.basename(sys.executable)[6:] - if f2py_exe[-4:]=='.exe': - f2py_exe = f2py_exe[:-4] + '.py' - if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py': - f2py_exe = f2py_exe + '.py' - target = os.path.join(build_dir,f2py_exe) - if newer(__file__,target): - log.info('Creating %s', target) - f = open(target,'w') - f.write('''\ -#!/usr/bin/env %s -# See http://cens.ioc.ee/projects/f2py2e/ -import os, sys -for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]: - try: - i=sys.argv.index("--"+mode) - del sys.argv[i] - break - except ValueError: pass -os.environ["NO_SCIPY_IMPORT"]="f2py" -if mode=="g3-numpy": - sys.stderr.write("G3 f2py support is not implemented, yet.\\n") - sys.exit(1) -elif mode=="2e-numeric": - from f2py2e import main -elif mode=="2e-numarray": - sys.argv.append("-DNUMARRAY") - from f2py2e import main -elif mode=="2e-numpy": - from numpy.f2py import main -else: - sys.stderr.write("Unknown mode: " + repr(mode) + "\\n") - sys.exit(1) -main() -'''%(os.path.basename(sys.executable))) - f.close() - return target - - config.add_scripts(generate_f2py_py) - - log.info('F2PY Version %s', config.get_version()) - - return config - -if __name__ == "__main__": - - config = configuration(top_path='') - version = config.get_version() - print('F2PY Version',version) - config = config.todict() - - if sys.version[:3]>='2.3': - config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ - "/F2PY-2-latest.tar.gz" - config['classifiers'] = [ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: NumPy License', - 'Natural Language :: English', - 'Operating System :: OS Independent', - 'Programming Language :: C', - 'Programming Language :: Fortran', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering', - 'Topic :: Software Development :: Code Generators', - ] - setup(version=version, - description = "F2PY - Fortran to Python Interface Generaton", - author = "Pearu Peterson", - author_email = "pearu@cens.ioc.ee", - maintainer = "Pearu Peterson", - maintainer_email = "pearu@cens.ioc.ee", - license = "BSD", - platforms = "Unix, Windows (mingw|cygwin), Mac OSX", - long_description = """\ -The Fortran to Python Interface Generator, or F2PY for short, is a -command line tool (f2py) for generating Python C/API modules for -wrapping Fortran 77/90/95 subroutines, accessing common blocks from -Python, and calling Python functions from Fortran (call-backs). -Interfacing subroutines/data from Fortran 90/95 modules is supported.""", - url = "http://cens.ioc.ee/projects/f2py2e/", - keywords = ['Fortran','f2py'], - **config) diff --git a/numpy-1.6.2/numpy/f2py/setupscons.py b/numpy-1.6.2/numpy/f2py/setupscons.py deleted file mode 100755 index e30fd87433..0000000000 --- a/numpy-1.6.2/numpy/f2py/setupscons.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python -""" -setup.py for installing F2PY - -Usage: - python setup.py install - -Copyright 2001-2005 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Revision: 1.32 $ -$Date: 2005/01/30 17:22:14 $ -Pearu Peterson -""" - -__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $" - -import os -import sys -from distutils.dep_util import newer -from numpy.distutils import log -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration - -from __version__ import version - -def configuration(parent_package='',top_path=None): - config = Configuration('f2py', parent_package, top_path) - - config.add_data_dir('docs') - - config.add_data_files('src/fortranobject.c', - 'src/fortranobject.h', - 'f2py.1' - ) - - config.make_svn_version_py() - - def generate_f2py_py(build_dir): - f2py_exe = 'f2py'+os.path.basename(sys.executable)[6:] - if f2py_exe[-4:]=='.exe': - f2py_exe = f2py_exe[:-4] + '.py' - if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py': - f2py_exe = f2py_exe + '.py' - target = os.path.join(build_dir,f2py_exe) - if newer(__file__,target): - log.info('Creating %s', target) - f = open(target,'w') - f.write('''\ -#!/usr/bin/env %s -# See http://cens.ioc.ee/projects/f2py2e/ -import os, sys -for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]: - try: - i=sys.argv.index("--"+mode) - del sys.argv[i] - break - except ValueError: pass -os.environ["NO_SCIPY_IMPORT"]="f2py" -if mode=="g3-numpy": - print >> sys.stderr, "G3 f2py support is not implemented, yet." - sys.exit(1) -elif mode=="2e-numeric": - from f2py2e import main -elif mode=="2e-numarray": - sys.argv.append("-DNUMARRAY") - from f2py2e import main -elif mode=="2e-numpy": - from numpy.f2py import main -else: - print >> sys.stderr, "Unknown mode:",`mode` - sys.exit(1) -main() -'''%(os.path.basename(sys.executable))) - f.close() - return target - - config.add_scripts(generate_f2py_py) - - return config - -if __name__ == "__main__": - - config = configuration(top_path='') - version = config.get_version() - print 'F2PY Version',version - config = config.todict() - - if sys.version[:3]>='2.3': - config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ - "/F2PY-2-latest.tar.gz" - config['classifiers'] = [ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: NumPy License', - 'Natural Language :: English', - 'Operating System :: OS Independent', - 'Programming Language :: C', - 'Programming Language :: Fortran', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering', - 'Topic :: Software Development :: Code Generators', - ] - setup(version=version, - description = "F2PY - Fortran to Python Interface Generaton", - author = "Pearu Peterson", - author_email = "pearu@cens.ioc.ee", - maintainer = "Pearu Peterson", - maintainer_email = "pearu@cens.ioc.ee", - license = "BSD", - platforms = "Unix, Windows (mingw|cygwin), Mac OSX", - long_description = """\ -The Fortran to Python Interface Generator, or F2PY for short, is a -command line tool (f2py) for generating Python C/API modules for -wrapping Fortran 77/90/95 subroutines, accessing common blocks from -Python, and calling Python functions from Fortran (call-backs). -Interfacing subroutines/data from Fortran 90/95 modules is supported.""", - url = "http://cens.ioc.ee/projects/f2py2e/", - keywords = ['Fortran','f2py'], - **config) diff --git a/numpy-1.6.2/numpy/f2py/src/fortranobject.c b/numpy-1.6.2/numpy/f2py/src/fortranobject.c deleted file mode 100644 index ff80fa7e58..0000000000 --- a/numpy-1.6.2/numpy/f2py/src/fortranobject.c +++ /dev/null @@ -1,974 +0,0 @@ -#define FORTRANOBJECT_C -#include "fortranobject.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* - This file implements: FortranObject, array_from_pyobj, copy_ND_array - - Author: Pearu Peterson - $Revision: 1.52 $ - $Date: 2005/07/11 07:44:20 $ -*/ - -int -F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) -{ - if (obj==NULL) { - fprintf(stderr, "Error loading %s\n", name); - if (PyErr_Occurred()) { - PyErr_Print(); - PyErr_Clear(); - } - return -1; - } - return PyDict_SetItemString(dict, name, obj); -} - -/************************* FortranObject *******************************/ - -typedef PyObject *(*fortranfunc)(PyObject *,PyObject *,PyObject *,void *); - -PyObject * -PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { - int i; - PyFortranObject *fp = NULL; - PyObject *v = NULL; - if (init!=NULL) /* Initialize F90 module objects */ - (*(init))(); - if ((fp = PyObject_New(PyFortranObject, &PyFortran_Type))==NULL) return NULL; - if ((fp->dict = PyDict_New())==NULL) return NULL; - fp->len = 0; - while (defs[fp->len].name != NULL) fp->len++; - if (fp->len == 0) goto fail; - fp->defs = defs; - for (i=0;ilen;i++) - if (fp->defs[i].rank == -1) { /* Is Fortran routine */ - v = PyFortranObject_NewAsAttr(&(fp->defs[i])); - if (v==NULL) return NULL; - PyDict_SetItemString(fp->dict,fp->defs[i].name,v); - } else - if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */ - if (fp->defs[i].type == PyArray_STRING) { - int n = fp->defs[i].rank-1; - v = PyArray_New(&PyArray_Type, n, fp->defs[i].dims.d, - PyArray_STRING, NULL, fp->defs[i].data, fp->defs[i].dims.d[n], - NPY_FARRAY, NULL); - } - else { - v = PyArray_New(&PyArray_Type, fp->defs[i].rank, fp->defs[i].dims.d, - fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_FARRAY, - NULL); - } - if (v==NULL) return NULL; - PyDict_SetItemString(fp->dict,fp->defs[i].name,v); - } - Py_XDECREF(v); - return (PyObject *)fp; - fail: - Py_XDECREF(v); - return NULL; -} - -PyObject * -PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module routines */ - PyFortranObject *fp = NULL; - fp = PyObject_New(PyFortranObject, &PyFortran_Type); - if (fp == NULL) return NULL; - if ((fp->dict = PyDict_New())==NULL) return NULL; - fp->len = 1; - fp->defs = defs; - return (PyObject *)fp; -} - -/* Fortran methods */ - -static void -fortran_dealloc(PyFortranObject *fp) { - Py_XDECREF(fp->dict); - PyMem_Del(fp); -} - - -#if PY_VERSION_HEX >= 0x03000000 -#else -static PyMethodDef fortran_methods[] = { - {NULL, NULL} /* sentinel */ -}; -#endif - - -static PyObject * -fortran_doc (FortranDataDef def) { - char *p; - /* - p is used as a buffer to hold generated documentation strings. - A common operation in generating the documentation strings, is - appending a string to the buffer p. Earlier, the following - idiom was: - - sprintf(p, "%s", p); - - but this does not work when _FORTIFY_SOURCE=2 is enabled: instead - of appending the string, the string is inserted. - - As a fix, the following idiom should be used for appending - strings to a buffer p: - - sprintf(p + strlen(p), ""); - */ - PyObject *s = NULL; - int i; - unsigned size=100; - if (def.doc!=NULL) - size += strlen(def.doc); - p = (char*)malloc (size); - p[0] = '\0'; /* make sure that the buffer has zero length */ - if (sprintf(p,"%s - ",def.name)==0) goto fail; - if (def.rank==-1) { - if (def.doc==NULL) { - if (sprintf(p+strlen(p),"no docs available")==0) - goto fail; - } else { - if (sprintf(p+strlen(p),"%s",def.doc)==0) - goto fail; - } - } else { - PyArray_Descr *d = PyArray_DescrFromType(def.type); - if (sprintf(p+strlen(p),"'%c'-",d->type)==0) { - Py_DECREF(d); - goto fail; - } - Py_DECREF(d); - if (def.data==NULL) { - if (sprintf(p+strlen(p),"array(%" NPY_INTP_FMT,def.dims.d[0])==0) - goto fail; - for(i=1;i0) { - if (sprintf(p+strlen(p),"array(%"NPY_INTP_FMT,def.dims.d[0])==0) - goto fail; - for(i=1;isize) { - fprintf(stderr,"fortranobject.c:fortran_doc:len(p)=%zd>%d(size):"\ - " too long doc string required, increase size\n",\ - strlen(p),size); - goto fail; - } -#if PY_VERSION_HEX >= 0x03000000 - s = PyUnicode_FromString(p); -#else - s = PyString_FromString(p); -#endif - fail: - free(p); - return s; -} - -static FortranDataDef *save_def; /* save pointer of an allocatable array */ -static void set_data(char *d,npy_intp *f) { /* callback from Fortran */ - if (*f) /* In fortran f=allocated(d) */ - save_def->data = d; - else - save_def->data = NULL; - /* printf("set_data: d=%p,f=%d\n",d,*f); */ -} - -static PyObject * -fortran_getattr(PyFortranObject *fp, char *name) { - int i,j,k,flag; - if (fp->dict != NULL) { - PyObject *v = PyDict_GetItemString(fp->dict, name); - if (v != NULL) { - Py_INCREF(v); - return v; - } - } - for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); - if (j==0) - if (fp->defs[i].rank!=-1) { /* F90 allocatable array */ - if (fp->defs[i].func==NULL) return NULL; - for(k=0;kdefs[i].rank;++k) - fp->defs[i].dims.d[k]=-1; - save_def = &fp->defs[i]; - (*(fp->defs[i].func))(&fp->defs[i].rank,fp->defs[i].dims.d,set_data,&flag); - if (flag==2) - k = fp->defs[i].rank + 1; - else - k = fp->defs[i].rank; - if (fp->defs[i].data !=NULL) { /* array is allocated */ - PyObject *v = PyArray_New(&PyArray_Type, k, fp->defs[i].dims.d, - fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_FARRAY, - NULL); - if (v==NULL) return NULL; - /* Py_INCREF(v); */ - return v; - } else { /* array is not allocated */ - Py_INCREF(Py_None); - return Py_None; - } - } - if (strcmp(name,"__dict__")==0) { - Py_INCREF(fp->dict); - return fp->dict; - } - if (strcmp(name,"__doc__")==0) { -#if PY_VERSION_HEX >= 0x03000000 - PyObject *s = PyUnicode_FromString(""), *s2, *s3; - for (i=0;ilen;i++) { - s2 = fortran_doc(fp->defs[i]); - s3 = PyUnicode_Concat(s, s2); - Py_DECREF(s2); - Py_DECREF(s); - s = s3; - } -#else - PyObject *s = PyString_FromString(""); - for (i=0;ilen;i++) - PyString_ConcatAndDel(&s,fortran_doc(fp->defs[i])); -#endif - if (PyDict_SetItemString(fp->dict, name, s)) - return NULL; - return s; - } - if ((strcmp(name,"_cpointer")==0) && (fp->len==1)) { - PyObject *cobj = F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data),NULL); - if (PyDict_SetItemString(fp->dict, name, cobj)) - return NULL; - return cobj; - } -#if PY_VERSION_HEX >= 0x03000000 - if (1) { - PyObject *str, *ret; - str = PyUnicode_FromString(name); - ret = PyObject_GenericGetAttr((PyObject *)fp, str); - Py_DECREF(str); - return ret; - } -#else - return Py_FindMethod(fortran_methods, (PyObject *)fp, name); -#endif -} - -static int -fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) { - int i,j,flag; - PyArrayObject *arr = NULL; - for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); - if (j==0) { - if (fp->defs[i].rank==-1) { - PyErr_SetString(PyExc_AttributeError,"over-writing fortran routine"); - return -1; - } - if (fp->defs[i].func!=NULL) { /* is allocatable array */ - npy_intp dims[F2PY_MAX_DIMS]; - int k; - save_def = &fp->defs[i]; - if (v!=Py_None) { /* set new value (reallocate if needed -- - see f2py generated code for more - details ) */ - for(k=0;kdefs[i].rank;k++) dims[k]=-1; - if ((arr = array_from_pyobj(fp->defs[i].type,dims,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) - return -1; - (*(fp->defs[i].func))(&fp->defs[i].rank,arr->dimensions,set_data,&flag); - } else { /* deallocate */ - for(k=0;kdefs[i].rank;k++) dims[k]=0; - (*(fp->defs[i].func))(&fp->defs[i].rank,dims,set_data,&flag); - for(k=0;kdefs[i].rank;k++) dims[k]=-1; - } - memcpy(fp->defs[i].dims.d,dims,fp->defs[i].rank*sizeof(npy_intp)); - } else { /* not allocatable array */ - if ((arr = array_from_pyobj(fp->defs[i].type,fp->defs[i].dims.d,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) - return -1; - } - if (fp->defs[i].data!=NULL) { /* copy Python object to Fortran array */ - npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,arr->nd); - if (s==-1) - s = PyArray_MultiplyList(arr->dimensions,arr->nd); - if (s<0 || - (memcpy(fp->defs[i].data,arr->data,s*PyArray_ITEMSIZE(arr)))==NULL) { - if ((PyObject*)arr!=v) { - Py_DECREF(arr); - } - return -1; - } - if ((PyObject*)arr!=v) { - Py_DECREF(arr); - } - } else return (fp->defs[i].func==NULL?-1:0); - return 0; /* succesful */ - } - if (fp->dict == NULL) { - fp->dict = PyDict_New(); - if (fp->dict == NULL) - return -1; - } - if (v == NULL) { - int rv = PyDict_DelItemString(fp->dict, name); - if (rv < 0) - PyErr_SetString(PyExc_AttributeError,"delete non-existing fortran attribute"); - return rv; - } - else - return PyDict_SetItemString(fp->dict, name, v); -} - -static PyObject* -fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) { - int i = 0; - /* printf("fortran call - name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, - fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ - if (fp->defs[i].rank==-1) {/* is Fortran routine */ - if ((fp->defs[i].func==NULL)) { - PyErr_Format(PyExc_RuntimeError, "no function to call"); - return NULL; - } - else if (fp->defs[i].data==NULL) - /* dummy routine */ - return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw,NULL); - else - return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw, - (void *)fp->defs[i].data); - } - PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); - return NULL; -} - -static PyObject * -fortran_repr(PyFortranObject *fp) -{ - PyObject *name = NULL, *repr = NULL; - name = PyObject_GetAttrString((PyObject *)fp, "__name__"); - PyErr_Clear(); -#if PY_VERSION_HEX >= 0x03000000 - if (name != NULL && PyUnicode_Check(name)) { - repr = PyUnicode_FromFormat("", name); - } - else { - repr = PyUnicode_FromString(""); - } -#else - if (name != NULL && PyString_Check(name)) { - repr = PyString_FromFormat("", PyString_AsString(name)); - } - else { - repr = PyString_FromString(""); - } -#endif - Py_XDECREF(name); - return repr; -} - - -PyTypeObject PyFortran_Type = { -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(0) - 0, /*ob_size*/ -#endif - "fortran", /*tp_name*/ - sizeof(PyFortranObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - /* methods */ - (destructor)fortran_dealloc, /*tp_dealloc*/ - 0, /*tp_print*/ - (getattrfunc)fortran_getattr, /*tp_getattr*/ - (setattrfunc)fortran_setattr, /*tp_setattr*/ - 0, /*tp_compare/tp_reserved*/ - (reprfunc)fortran_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - (ternaryfunc)fortran_call, /*tp_call*/ -}; - -/************************* f2py_report_atexit *******************************/ - -#ifdef F2PY_REPORT_ATEXIT -static int passed_time = 0; -static int passed_counter = 0; -static int passed_call_time = 0; -static struct timeb start_time; -static struct timeb stop_time; -static struct timeb start_call_time; -static struct timeb stop_call_time; -static int cb_passed_time = 0; -static int cb_passed_counter = 0; -static int cb_passed_call_time = 0; -static struct timeb cb_start_time; -static struct timeb cb_stop_time; -static struct timeb cb_start_call_time; -static struct timeb cb_stop_call_time; - -extern void f2py_start_clock(void) { ftime(&start_time); } -extern -void f2py_start_call_clock(void) { - f2py_stop_clock(); - ftime(&start_call_time); -} -extern -void f2py_stop_clock(void) { - ftime(&stop_time); - passed_time += 1000*(stop_time.time - start_time.time); - passed_time += stop_time.millitm - start_time.millitm; -} -extern -void f2py_stop_call_clock(void) { - ftime(&stop_call_time); - passed_call_time += 1000*(stop_call_time.time - start_call_time.time); - passed_call_time += stop_call_time.millitm - start_call_time.millitm; - passed_counter += 1; - f2py_start_clock(); -} - -extern void f2py_cb_start_clock(void) { ftime(&cb_start_time); } -extern -void f2py_cb_start_call_clock(void) { - f2py_cb_stop_clock(); - ftime(&cb_start_call_time); -} -extern -void f2py_cb_stop_clock(void) { - ftime(&cb_stop_time); - cb_passed_time += 1000*(cb_stop_time.time - cb_start_time.time); - cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; -} -extern -void f2py_cb_stop_call_clock(void) { - ftime(&cb_stop_call_time); - cb_passed_call_time += 1000*(cb_stop_call_time.time - cb_start_call_time.time); - cb_passed_call_time += cb_stop_call_time.millitm - cb_start_call_time.millitm; - cb_passed_counter += 1; - f2py_cb_start_clock(); -} - -static int f2py_report_on_exit_been_here = 0; -extern -void f2py_report_on_exit(int exit_flag,void *name) { - if (f2py_report_on_exit_been_here) { - fprintf(stderr," %s\n",(char*)name); - return; - } - f2py_report_on_exit_been_here = 1; - fprintf(stderr," /-----------------------\\\n"); - fprintf(stderr," < F2PY performance report >\n"); - fprintf(stderr," \\-----------------------/\n"); - fprintf(stderr,"Overall time spent in ...\n"); - fprintf(stderr,"(a) wrapped (Fortran/C) functions : %8d msec\n", - passed_call_time); - fprintf(stderr,"(b) f2py interface, %6d calls : %8d msec\n", - passed_counter,passed_time); - fprintf(stderr,"(c) call-back (Python) functions : %8d msec\n", - cb_passed_call_time); - fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n", - cb_passed_counter,cb_passed_time); - - fprintf(stderr,"(e) wrapped (Fortran/C) functions (acctual) : %8d msec\n\n", - passed_call_time-cb_passed_call_time-cb_passed_time); - fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); - fprintf(stderr,"Exit status: %d\n",exit_flag); - fprintf(stderr,"Modules : %s\n",(char*)name); -} -#endif - -/********************** report on array copy ****************************/ - -#ifdef F2PY_REPORT_ON_ARRAY_COPY -static void f2py_report_on_array_copy(PyArrayObject* arr) { - const long arr_size = PyArray_Size((PyObject *)arr); - if (arr_size>F2PY_REPORT_ON_ARRAY_COPY) { - fprintf(stderr,"copied an array: size=%ld, elsize=%d\n", - arr_size, PyArray_ITEMSIZE(arr)); - } -} -static void f2py_report_on_array_copy_fromany(void) { - fprintf(stderr,"created an array from object\n"); -} - -#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR f2py_report_on_array_copy((PyArrayObject *)arr) -#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() -#else -#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR -#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY -#endif - - -/************************* array_from_obj *******************************/ - -/* - * File: array_from_pyobj.c - * - * Description: - * ------------ - * Provides array_from_pyobj function that returns a contigious array - * object with the given dimensions and required storage order, either - * in row-major (C) or column-major (Fortran) order. The function - * array_from_pyobj is very flexible about its Python object argument - * that can be any number, list, tuple, or array. - * - * array_from_pyobj is used in f2py generated Python extension - * modules. - * - * Author: Pearu Peterson - * Created: 13-16 January 2002 - * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ - */ - -static int -count_nonpos(const int rank, - const npy_intp *dims) { - int i=0,r=0; - while (ind; - npy_intp size = PyArray_Size((PyObject *)arr); - printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", - rank,arr->flags,size); - printf("\tstrides = "); - dump_dims(rank,arr->strides); - printf("\tdimensions = "); - dump_dims(rank,arr->dimensions); -} -#endif - -#define SWAPTYPE(a,b,t) {t c; c = (a); (a) = (b); (b) = c; } - -static int swap_arrays(PyArrayObject* arr1, PyArrayObject* arr2) { - SWAPTYPE(arr1->data,arr2->data,char*); - SWAPTYPE(arr1->nd,arr2->nd,int); - SWAPTYPE(arr1->dimensions,arr2->dimensions,npy_intp*); - SWAPTYPE(arr1->strides,arr2->strides,npy_intp*); - SWAPTYPE(arr1->base,arr2->base,PyObject*); - SWAPTYPE(arr1->descr,arr2->descr,PyArray_Descr*); - SWAPTYPE(arr1->flags,arr2->flags,int); - /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ - return 0; -} - -#define ARRAY_ISCOMPATIBLE(arr,type_num) \ - ( (PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) \ - ||(PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) \ - ||(PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) \ - ||(PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) \ - ) - -extern -PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj) { - /* Note about reference counting - ----------------------------- - If the caller returns the array to Python, it must be done with - Py_BuildValue("N",arr). - Otherwise, if obj!=arr then the caller must call Py_DECREF(arr). - - Note on intent(cache,out,..) - --------------------- - Don't expect correct data when returning intent(cache) array. - - */ - char mess[200]; - PyArrayObject *arr = NULL; - PyArray_Descr *descr; - char typechar; - int elsize; - - if ((intent & F2PY_INTENT_HIDE) - || ((intent & F2PY_INTENT_CACHE) && (obj==Py_None)) - || ((intent & F2PY_OPTIONAL) && (obj==Py_None)) - ) { - /* intent(cache), optional, intent(hide) */ - if (count_nonpos(rank,dims)) { - int i; - sprintf(mess,"failed to create intent(cache|hide)|optional array" - "-- must have defined dimensions but got ("); - for(i=0;ielsize; - typechar = descr->type; - Py_DECREF(descr); - if (PyArray_Check(obj)) { - arr = (PyArrayObject *)obj; - - if (intent & F2PY_INTENT_CACHE) { - /* intent(cache) */ - if (PyArray_ISONESEGMENT(obj) - && PyArray_ITEMSIZE((PyArrayObject *)obj)>=elsize) { - if (check_and_fix_dimensions((PyArrayObject *)obj,rank,dims)) { - return NULL; /*XXX: set exception */ - } - if (intent & F2PY_INTENT_OUT) - Py_INCREF(obj); - return (PyArrayObject *)obj; - } - sprintf(mess,"failed to initialize intent(cache) array"); - if (!PyArray_ISONESEGMENT(obj)) - sprintf(mess+strlen(mess)," -- input must be in one segment"); - if (PyArray_ITEMSIZE(arr)descr->type,typechar); - if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) - sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent)); - PyErr_SetString(PyExc_ValueError,mess); - return NULL; - } - - /* here we have always intent(in) or intent(inplace) */ - - { - PyArrayObject *retarr = (PyArrayObject *) \ - PyArray_New(&PyArray_Type, arr->nd, arr->dimensions, type_num, - NULL,NULL,0, - !(intent&F2PY_INTENT_C), - NULL); - if (retarr==NULL) - return NULL; - F2PY_REPORT_ON_ARRAY_COPY_FROMARR; - if (PyArray_CopyInto(retarr, arr)) { - Py_DECREF(retarr); - return NULL; - } - if (intent & F2PY_INTENT_INPLACE) { - if (swap_arrays(arr,retarr)) - return NULL; /* XXX: set exception */ - Py_XDECREF(retarr); - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - } else { - arr = retarr; - } - } - return arr; - } - - if ((intent & F2PY_INTENT_INOUT) - || (intent & F2PY_INTENT_INPLACE) - || (intent & F2PY_INTENT_CACHE)) { - sprintf(mess,"failed to initialize intent(inout|inplace|cache) array" - " -- input must be array but got %s", - PyString_AsString(PyObject_Str(PyObject_Type(obj))) - ); - PyErr_SetString(PyExc_TypeError,mess); - return NULL; - } - - { - F2PY_REPORT_ON_ARRAY_COPY_FROMANY; - arr = (PyArrayObject *) \ - PyArray_FromAny(obj,PyArray_DescrFromType(type_num), 0,0, - ((intent & F2PY_INTENT_C)?NPY_CARRAY:NPY_FARRAY) \ - | NPY_FORCECAST, NULL); - if (arr==NULL) - return NULL; - if (check_and_fix_dimensions(arr,rank,dims)) - return NULL; /*XXX: set exception */ - return arr; - } - -} - -/*****************************************/ -/* Helper functions for array_from_pyobj */ -/*****************************************/ - -static -int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *dims) { - /* - This function fills in blanks (that are -1\'s) in dims list using - the dimensions from arr. It also checks that non-blank dims will - match with the corresponding values in arr dimensions. - */ - const npy_intp arr_size = (arr->nd)?PyArray_Size((PyObject *)arr):1; -#ifdef DEBUG_COPY_ND_ARRAY - dump_attrs(arr); - printf("check_and_fix_dimensions:init: dims="); - dump_dims(rank,dims); -#endif - if (rank > arr->nd) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ - npy_intp new_size = 1; - int free_axe = -1; - int i; - npy_intp d; - /* Fill dims where -1 or 0; check dimensions; calc new_size; */ - for(i=0;ind;++i) { - d = arr->dimensions[i]; - if (dims[i] >= 0) { - if (d>1 && dims[i]!=d) { - fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT - " but got %" NPY_INTP_FMT "\n", - i,dims[i], d); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else { - dims[i] = d ? d : 1; - } - new_size *= dims[i]; - } - for(i=arr->nd;i1) { - fprintf(stderr,"%d-th dimension must be %" NPY_INTP_FMT - " but got 0 (not defined).\n", - i,dims[i]); - return 1; - } else if (free_axe<0) - free_axe = i; - else - dims[i] = 1; - if (free_axe>=0) { - dims[free_axe] = arr_size/new_size; - new_size *= dims[free_axe]; - } - if (new_size != arr_size) { - fprintf(stderr,"unexpected array size: new_size=%" NPY_INTP_FMT - ", got array with arr_size=%" NPY_INTP_FMT " (maybe too many free" - " indices)\n", new_size,arr_size); - return 1; - } - } else if (rank==arr->nd) { - npy_intp new_size = 1; - int i; - npy_intp d; - for (i=0; idimensions[i]; - if (dims[i]>=0) { - if (d > 1 && d!=dims[i]) { - fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT - " but got %" NPY_INTP_FMT "\n", - i,dims[i],d); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else dims[i] = d; - new_size *= dims[i]; - } - if (new_size != arr_size) { - fprintf(stderr,"unexpected array size: new_size=%" NPY_INTP_FMT - ", got array with arr_size=%" NPY_INTP_FMT "\n", new_size,arr_size); - return 1; - } - } else { /* [[1,2]] -> [[1],[2]] */ - int i,j; - npy_intp d; - int effrank; - npy_intp size; - for (i=0,effrank=0;ind;++i) - if (arr->dimensions[i]>1) ++effrank; - if (dims[rank-1]>=0) - if (effrank>rank) { - fprintf(stderr,"too many axes: %d (effrank=%d), expected rank=%d\n", - arr->nd,effrank,rank); - return 1; - } - - for (i=0,j=0;ind && arr->dimensions[j]<2) ++j; - if (j>=arr->nd) d = 1; - else d = arr->dimensions[j++]; - if (dims[i]>=0) { - if (d>1 && d!=dims[i]) { - fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT - " but got %" NPY_INTP_FMT " (real index=%d)\n", - i,dims[i],d,j-1); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else - dims[i] = d; - } - - for (i=rank;ind;++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */ - while (jnd && arr->dimensions[j]<2) ++j; - if (j>=arr->nd) d = 1; - else d = arr->dimensions[j++]; - dims[rank-1] *= d; - } - for (i=0,size=1;ind); - for (i=0;ind;++i) fprintf(stderr," %" NPY_INTP_FMT,arr->dimensions[i]); - fprintf(stderr," ]\n"); - return 1; - } - } -#ifdef DEBUG_COPY_ND_ARRAY - printf("check_and_fix_dimensions:end: dims="); - dump_dims(rank,dims); -#endif - return 0; -} - -/* End of file: array_from_pyobj.c */ - -/************************* copy_ND_array *******************************/ - -extern -int copy_ND_array(const PyArrayObject *arr, PyArrayObject *out) -{ - F2PY_REPORT_ON_ARRAY_COPY_FROMARR; - return PyArray_CopyInto(out, (PyArrayObject *)arr); -} - -/*********************************************/ -/* Compatibility functions for Python >= 3.0 */ -/*********************************************/ - -#if PY_VERSION_HEX >= 0x03000000 - -PyObject * -F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) -{ - PyObject *ret = PyCapsule_New(ptr, NULL, dtor); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -void * -F2PyCapsule_AsVoidPtr(PyObject *obj) -{ - void *ret = PyCapsule_GetPointer(obj, NULL); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -int -F2PyCapsule_Check(PyObject *ptr) -{ - return PyCapsule_CheckExact(ptr); -} - -#else - -PyObject * -F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) -{ - return PyCObject_FromVoidPtr(ptr, dtor); -} - -void * -F2PyCapsule_AsVoidPtr(PyObject *ptr) -{ - return PyCObject_AsVoidPtr(ptr); -} - -int -F2PyCapsule_Check(PyObject *ptr) -{ - return PyCObject_Check(ptr); -} - -#endif - - -#ifdef __cplusplus -} -#endif -/************************* EOF fortranobject.c *******************************/ diff --git a/numpy-1.6.2/numpy/f2py/src/fortranobject.h b/numpy-1.6.2/numpy/f2py/src/fortranobject.h deleted file mode 100644 index 283021aa12..0000000000 --- a/numpy-1.6.2/numpy/f2py/src/fortranobject.h +++ /dev/null @@ -1,178 +0,0 @@ -#ifndef Py_FORTRANOBJECT_H -#define Py_FORTRANOBJECT_H -#ifdef __cplusplus -extern "C" { -#endif - -#include "Python.h" - -#ifdef FORTRANOBJECT_C -#define NO_IMPORT_ARRAY -#endif -#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API -#include "numpy/arrayobject.h" - -/* - * Python 3 support macros - */ -#if PY_VERSION_HEX >= 0x03000000 -#define PyString_Check PyBytes_Check -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_FromString PyBytes_FromString -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString - -#define PyInt_Check PyLong_Check -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsLong PyLong_AsLong - -#define PyNumber_Int PyNumber_Long -#endif - -#if (PY_VERSION_HEX < 0x02060000) -#define Py_TYPE(o) (((PyObject*)(o))->ob_type) -#define Py_REFCNT(o) (((PyObject*)(o))->ob_refcnt) -#define Py_SIZE(o) (((PyVarObject*)(o))->ob_size) -#endif - - /* -#ifdef F2PY_REPORT_ATEXIT_DISABLE -#undef F2PY_REPORT_ATEXIT -#else - -#ifndef __FreeBSD__ -#ifndef __WIN32__ -#ifndef __APPLE__ -#define F2PY_REPORT_ATEXIT -#endif -#endif -#endif - -#endif - */ - -#ifdef F2PY_REPORT_ATEXIT -#include - extern void f2py_start_clock(void); - extern void f2py_stop_clock(void); - extern void f2py_start_call_clock(void); - extern void f2py_stop_call_clock(void); - extern void f2py_cb_start_clock(void); - extern void f2py_cb_stop_clock(void); - extern void f2py_cb_start_call_clock(void); - extern void f2py_cb_stop_call_clock(void); - extern void f2py_report_on_exit(int,void*); -#endif - -#ifdef DMALLOC -#include "dmalloc.h" -#endif - -/* Fortran object interface */ - -/* -123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 - -PyFortranObject represents various Fortran objects: -Fortran (module) routines, COMMON blocks, module data. - -Author: Pearu Peterson -*/ - -#define F2PY_MAX_DIMS 40 - -typedef void (*f2py_set_data_func)(char*,npy_intp*); -typedef void (*f2py_void_func)(void); -typedef void (*f2py_init_func)(int*,npy_intp*,f2py_set_data_func,int*); - - /*typedef void* (*f2py_c_func)(void*,...);*/ - -typedef void *(*f2pycfunc)(void); - -typedef struct { - char *name; /* attribute (array||routine) name */ - int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, - || rank=-1 for Fortran routine */ - struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ - int type; /* PyArray_ || not used */ - char *data; /* pointer to array || Fortran routine */ - f2py_init_func func; /* initialization function for - allocatable arrays: - func(&rank,dims,set_ptr_func,name,len(name)) - || C/API wrapper for Fortran routine */ - char *doc; /* documentation string; only recommended - for routines. */ -} FortranDataDef; - -typedef struct { - PyObject_HEAD - int len; /* Number of attributes */ - FortranDataDef *defs; /* An array of FortranDataDef's */ - PyObject *dict; /* Fortran object attribute dictionary */ -} PyFortranObject; - -#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) -#define PyFortran_Check1(op) (0==strcmp(Py_TYPE(op)->tp_name,"fortran")) - - extern PyTypeObject PyFortran_Type; - extern int F2PyDict_SetItemString(PyObject* dict, char *name, PyObject *obj); - extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); - extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); - -#if PY_VERSION_HEX >= 0x03000000 - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); -void * F2PyCapsule_AsVoidPtr(PyObject *obj); -int F2PyCapsule_Check(PyObject *ptr); - -#else - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)); -void * F2PyCapsule_AsVoidPtr(PyObject *ptr); -int F2PyCapsule_Check(PyObject *ptr); - -#endif - -#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS) -#define F2PY_INTENT_IN 1 -#define F2PY_INTENT_INOUT 2 -#define F2PY_INTENT_OUT 4 -#define F2PY_INTENT_HIDE 8 -#define F2PY_INTENT_CACHE 16 -#define F2PY_INTENT_COPY 32 -#define F2PY_INTENT_C 64 -#define F2PY_OPTIONAL 128 -#define F2PY_INTENT_INPLACE 256 -#define F2PY_INTENT_ALIGNED4 512 -#define F2PY_INTENT_ALIGNED8 1024 -#define F2PY_INTENT_ALIGNED16 2048 - -#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) -#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) -#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) -#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) - -#define F2PY_GET_ALIGNMENT(intent) \ - (F2PY_ALIGN4(intent) ? 4 : \ - (F2PY_ALIGN8(intent) ? 8 : \ - (F2PY_ALIGN16(intent) ? 16 : 1) )) -#define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) - - extern PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj); - extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); - -#ifdef DEBUG_COPY_ND_ARRAY - extern void dump_attrs(const PyArrayObject* arr); -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_FORTRANOBJECT_H */ diff --git a/numpy-1.6.2/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy-1.6.2/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c deleted file mode 100644 index 73aa408629..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ /dev/null @@ -1,223 +0,0 @@ -/* File: wrapmodule.c - * This file is auto-generated with f2py (version:2_1330). - * Hand edited by Pearu. - * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, - * written by Pearu Peterson . - * See http://cens.ioc.ee/projects/f2py2e/ - * Generation date: Fri Oct 21 22:41:12 2005 - * $Revision:$ - * $Date:$ - * Do not edit this file directly unless you know what you are doing!!! - */ -#ifdef __cplusplus -extern "C" { -#endif - -/*********************** See f2py2e/cfuncs.py: includes ***********************/ -#include "Python.h" -#include "fortranobject.h" -#include - -static PyObject *wrap_error; -static PyObject *wrap_module; - -/************************************ call ************************************/ -static char doc_f2py_rout_wrap_call[] = "\ -Function signature:\n\ - arr = call(type_num,dims,intent,obj)\n\ -Required arguments:\n" -" type_num : input int\n" -" dims : input int-sequence\n" -" intent : input int\n" -" obj : input python object\n" -"Return objects:\n" -" arr : array"; -static PyObject *f2py_rout_wrap_call(PyObject *capi_self, - PyObject *capi_args) { - PyObject * volatile capi_buildvalue = NULL; - int type_num = 0; - npy_intp *dims = NULL; - PyObject *dims_capi = Py_None; - int rank = 0; - int intent = 0; - PyArrayObject *capi_arr_tmp = NULL; - PyObject *arr_capi = Py_None; - int i; - - if (!PyArg_ParseTuple(capi_args,"iOiO|:wrap.call",\ - &type_num,&dims_capi,&intent,&arr_capi)) - return NULL; - rank = PySequence_Length(dims_capi); - dims = malloc(rank*sizeof(npy_intp)); - for (i=0;idata); - dimensions = PyTuple_New(arr->nd); - strides = PyTuple_New(arr->nd); - for (i=0;ind;++i) { - PyTuple_SetItem(dimensions,i,PyInt_FromLong(arr->dimensions[i])); - PyTuple_SetItem(strides,i,PyInt_FromLong(arr->strides[i])); - } - return Py_BuildValue("siOOO(cciii)ii",s,arr->nd, - dimensions,strides, - (arr->base==NULL?Py_None:arr->base), - arr->descr->kind, - arr->descr->type, - arr->descr->type_num, - arr->descr->elsize, - arr->descr->alignment, - arr->flags, - PyArray_ITEMSIZE(arr)); -} - -static PyMethodDef f2py_module_methods[] = { - - {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, - {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, - {NULL,NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "test_array_from_pyobj_ext", - NULL, - -1, - f2py_module_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#if PY_VERSION_HEX >= 0x03000000 -#define RETVAL m -PyObject *PyInit_test_array_from_pyobj_ext(void) { -#else -#define RETVAL -PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) { -#endif - PyObject *m,*d, *s; -#if PY_VERSION_HEX >= 0x03000000 - m = wrap_module = PyModule_Create(&moduledef); -#else - m = wrap_module = Py_InitModule("test_array_from_pyobj_ext", f2py_module_methods); -#endif - Py_TYPE(&PyFortran_Type) = &PyType_Type; - import_array(); - if (PyErr_Occurred()) - Py_FatalError("can't initialize module wrap (failed to import numpy)"); - d = PyModule_GetDict(m); - s = PyString_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" -" arr = call(type_num,dims,intent,obj)\n" -"."); - PyDict_SetItemString(d, "__doc__", s); - wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); - Py_DECREF(s); - PyDict_SetItemString(d, "F2PY_INTENT_IN", PyInt_FromLong(F2PY_INTENT_IN)); - PyDict_SetItemString(d, "F2PY_INTENT_INOUT", PyInt_FromLong(F2PY_INTENT_INOUT)); - PyDict_SetItemString(d, "F2PY_INTENT_OUT", PyInt_FromLong(F2PY_INTENT_OUT)); - PyDict_SetItemString(d, "F2PY_INTENT_HIDE", PyInt_FromLong(F2PY_INTENT_HIDE)); - PyDict_SetItemString(d, "F2PY_INTENT_CACHE", PyInt_FromLong(F2PY_INTENT_CACHE)); - PyDict_SetItemString(d, "F2PY_INTENT_COPY", PyInt_FromLong(F2PY_INTENT_COPY)); - PyDict_SetItemString(d, "F2PY_INTENT_C", PyInt_FromLong(F2PY_INTENT_C)); - PyDict_SetItemString(d, "F2PY_OPTIONAL", PyInt_FromLong(F2PY_OPTIONAL)); - PyDict_SetItemString(d, "F2PY_INTENT_INPLACE", PyInt_FromLong(F2PY_INTENT_INPLACE)); - PyDict_SetItemString(d, "PyArray_BOOL", PyInt_FromLong(PyArray_BOOL)); - PyDict_SetItemString(d, "PyArray_BYTE", PyInt_FromLong(PyArray_BYTE)); - PyDict_SetItemString(d, "PyArray_UBYTE", PyInt_FromLong(PyArray_UBYTE)); - PyDict_SetItemString(d, "PyArray_SHORT", PyInt_FromLong(PyArray_SHORT)); - PyDict_SetItemString(d, "PyArray_USHORT", PyInt_FromLong(PyArray_USHORT)); - PyDict_SetItemString(d, "PyArray_INT", PyInt_FromLong(PyArray_INT)); - PyDict_SetItemString(d, "PyArray_UINT", PyInt_FromLong(PyArray_UINT)); - PyDict_SetItemString(d, "PyArray_INTP", PyInt_FromLong(PyArray_INTP)); - PyDict_SetItemString(d, "PyArray_UINTP", PyInt_FromLong(PyArray_UINTP)); - PyDict_SetItemString(d, "PyArray_LONG", PyInt_FromLong(PyArray_LONG)); - PyDict_SetItemString(d, "PyArray_ULONG", PyInt_FromLong(PyArray_ULONG)); - PyDict_SetItemString(d, "PyArray_LONGLONG", PyInt_FromLong(PyArray_LONGLONG)); - PyDict_SetItemString(d, "PyArray_ULONGLONG", PyInt_FromLong(PyArray_ULONGLONG)); - PyDict_SetItemString(d, "PyArray_FLOAT", PyInt_FromLong(PyArray_FLOAT)); - PyDict_SetItemString(d, "PyArray_DOUBLE", PyInt_FromLong(PyArray_DOUBLE)); - PyDict_SetItemString(d, "PyArray_LONGDOUBLE", PyInt_FromLong(PyArray_LONGDOUBLE)); - PyDict_SetItemString(d, "PyArray_CFLOAT", PyInt_FromLong(PyArray_CFLOAT)); - PyDict_SetItemString(d, "PyArray_CDOUBLE", PyInt_FromLong(PyArray_CDOUBLE)); - PyDict_SetItemString(d, "PyArray_CLONGDOUBLE", PyInt_FromLong(PyArray_CLONGDOUBLE)); - PyDict_SetItemString(d, "PyArray_OBJECT", PyInt_FromLong(PyArray_OBJECT)); - PyDict_SetItemString(d, "PyArray_STRING", PyInt_FromLong(PyArray_STRING)); - PyDict_SetItemString(d, "PyArray_UNICODE", PyInt_FromLong(PyArray_UNICODE)); - PyDict_SetItemString(d, "PyArray_VOID", PyInt_FromLong(PyArray_VOID)); - PyDict_SetItemString(d, "PyArray_NTYPES", PyInt_FromLong(PyArray_NTYPES)); - PyDict_SetItemString(d, "PyArray_NOTYPE", PyInt_FromLong(PyArray_NOTYPE)); - PyDict_SetItemString(d, "PyArray_UDERDEF", PyInt_FromLong(PyArray_USERDEF)); - - PyDict_SetItemString(d, "CONTIGUOUS", PyInt_FromLong(NPY_CONTIGUOUS)); - PyDict_SetItemString(d, "FORTRAN", PyInt_FromLong(NPY_FORTRAN)); - PyDict_SetItemString(d, "OWNDATA", PyInt_FromLong(NPY_OWNDATA)); - PyDict_SetItemString(d, "FORCECAST", PyInt_FromLong(NPY_FORCECAST)); - PyDict_SetItemString(d, "ENSURECOPY", PyInt_FromLong(NPY_ENSURECOPY)); - PyDict_SetItemString(d, "ENSUREARRAY", PyInt_FromLong(NPY_ENSUREARRAY)); - PyDict_SetItemString(d, "ALIGNED", PyInt_FromLong(NPY_ALIGNED)); - PyDict_SetItemString(d, "WRITEABLE", PyInt_FromLong(NPY_WRITEABLE)); - PyDict_SetItemString(d, "UPDATEIFCOPY", PyInt_FromLong(NPY_UPDATEIFCOPY)); - - PyDict_SetItemString(d, "BEHAVED", PyInt_FromLong(NPY_BEHAVED)); - PyDict_SetItemString(d, "BEHAVED_NS", PyInt_FromLong(NPY_BEHAVED_NS)); - PyDict_SetItemString(d, "CARRAY", PyInt_FromLong(NPY_CARRAY)); - PyDict_SetItemString(d, "FARRAY", PyInt_FromLong(NPY_FARRAY)); - PyDict_SetItemString(d, "CARRAY_RO", PyInt_FromLong(NPY_CARRAY_RO)); - PyDict_SetItemString(d, "FARRAY_RO", PyInt_FromLong(NPY_FARRAY_RO)); - PyDict_SetItemString(d, "DEFAULT", PyInt_FromLong(NPY_DEFAULT)); - PyDict_SetItemString(d, "UPDATE_ALL", PyInt_FromLong(NPY_UPDATE_ALL)); - - if (PyErr_Occurred()) - Py_FatalError("can't initialize module wrap"); - -#ifdef F2PY_REPORT_ATEXIT - on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); -#endif - - return RETVAL; -} -#ifdef __cplusplus -} -#endif diff --git a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap deleted file mode 100644 index 2665f89b52..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap +++ /dev/null @@ -1 +0,0 @@ -dict(real=dict(rk="double")) diff --git a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_free.f90 deleted file mode 100644 index b301710f5d..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_free.f90 +++ /dev/null @@ -1,34 +0,0 @@ - -subroutine sum(x, res) - implicit none - real, intent(in) :: x(:) - real, intent(out) :: res - - integer :: i - - !print *, "sum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end subroutine sum - -function fsum(x) result (res) - implicit none - real, intent(in) :: x(:) - real :: res - - integer :: i - - !print *, "fsum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end function fsum diff --git a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 deleted file mode 100644 index cbe6317ed8..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 +++ /dev/null @@ -1,41 +0,0 @@ - -module mod - -contains - -subroutine sum(x, res) - implicit none - real, intent(in) :: x(:) - real, intent(out) :: res - - integer :: i - - !print *, "sum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end subroutine sum - -function fsum(x) result (res) - implicit none - real, intent(in) :: x(:) - real :: res - - integer :: i - - !print *, "fsum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end function fsum - - -end module mod diff --git a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_use.f90 deleted file mode 100644 index 337465ac54..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/foo_use.f90 +++ /dev/null @@ -1,19 +0,0 @@ -subroutine sum_with_use(x, res) - use precision - - implicit none - - real(kind=rk), intent(in) :: x(:) - real(kind=rk), intent(out) :: res - - integer :: i - - !print *, "size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - - end subroutine diff --git a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/precision.f90 b/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/precision.f90 deleted file mode 100644 index ed6c70cbbe..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/assumed_shape/precision.f90 +++ /dev/null @@ -1,4 +0,0 @@ -module precision - integer, parameter :: rk = selected_real_kind(8) - integer, parameter :: ik = selected_real_kind(4) -end module diff --git a/numpy-1.6.2/numpy/f2py/tests/src/kind/foo.f90 b/numpy-1.6.2/numpy/f2py/tests/src/kind/foo.f90 deleted file mode 100644 index d3d15cfb20..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/kind/foo.f90 +++ /dev/null @@ -1,20 +0,0 @@ - - -subroutine selectedrealkind(p, r, res) - implicit none - - integer, intent(in) :: p, r - !f2py integer :: r=0 - integer, intent(out) :: res - res = selected_real_kind(p, r) - -end subroutine - -subroutine selectedintkind(p, res) - implicit none - - integer, intent(in) :: p - integer, intent(out) :: res - res = selected_int_kind(p) - -end subroutine diff --git a/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo.f b/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo.f deleted file mode 100644 index c34742578f..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo.f +++ /dev/null @@ -1,5 +0,0 @@ - subroutine bar11(a) -cf2py intent(out) a - integer a - a = 11 - end diff --git a/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo_fixed.f90 deleted file mode 100644 index 7543a6acb7..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo_fixed.f90 +++ /dev/null @@ -1,8 +0,0 @@ - module foo_fixed - contains - subroutine bar12(a) -!f2py intent(out) a - integer a - a = 12 - end subroutine bar12 - end module foo_fixed diff --git a/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo_free.f90 b/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo_free.f90 deleted file mode 100644 index c1b641f13e..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/mixed/foo_free.f90 +++ /dev/null @@ -1,8 +0,0 @@ -module foo_free -contains - subroutine bar13(a) - !f2py intent(out) a - integer a - a = 13 - end subroutine bar13 -end module foo_free diff --git a/numpy-1.6.2/numpy/f2py/tests/src/size/foo.f90 b/numpy-1.6.2/numpy/f2py/tests/src/size/foo.f90 deleted file mode 100644 index 5b66f8c430..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/src/size/foo.f90 +++ /dev/null @@ -1,44 +0,0 @@ - -subroutine foo(a, n, m, b) - implicit none - - real, intent(in) :: a(n, m) - integer, intent(in) :: n, m - real, intent(out) :: b(size(a, 1)) - - integer :: i - - do i = 1, size(b) - b(i) = sum(a(i,:)) - enddo -end subroutine - -subroutine trans(x,y) - implicit none - real, intent(in), dimension(:,:) :: x - real, intent(out), dimension( size(x,2), size(x,1) ) :: y - integer :: N, M, i, j - N = size(x,1) - M = size(x,2) - DO i=1,N - do j=1,M - y(j,i) = x(i,j) - END DO - END DO -end subroutine trans - -subroutine flatten(x,y) - implicit none - real, intent(in), dimension(:,:) :: x - real, intent(out), dimension( size(x) ) :: y - integer :: N, M, i, j, k - N = size(x,1) - M = size(x,2) - k = 1 - DO i=1,N - do j=1,M - y(k) = x(i,j) - k = k + 1 - END DO - END DO -end subroutine flatten diff --git a/numpy-1.6.2/numpy/f2py/tests/test_array_from_pyobj.py b/numpy-1.6.2/numpy/f2py/tests/test_array_from_pyobj.py deleted file mode 100644 index 488fd4db54..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_array_from_pyobj.py +++ /dev/null @@ -1,545 +0,0 @@ -import unittest -import os -import sys -import copy - -import nose - -from numpy.testing import * -from numpy import array, alltrue, ndarray, asarray, can_cast,zeros, dtype -from numpy.core.multiarray import typeinfo - -import util - -wrap = None -def setup(): - """ - Build the required testing extension module - - """ - global wrap - - # Check compiler availability first - if not util.has_c_compiler(): - raise nose.SkipTest("No C compiler available") - - if wrap is None: - config_code = """ - config.add_extension('test_array_from_pyobj_ext', - sources=['wrapmodule.c', 'fortranobject.c'], - define_macros=[]) - """ - d = os.path.dirname(__file__) - src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), - os.path.join(d, '..', 'src', 'fortranobject.c'), - os.path.join(d, '..', 'src', 'fortranobject.h')] - wrap = util.build_module_distutils(src, config_code, - 'test_array_from_pyobj_ext') - -def flags_info(arr): - flags = wrap.array_attrs(arr)[6] - return flags2names(flags) - -def flags2names(flags): - info = [] - for flagname in ['CONTIGUOUS','FORTRAN','OWNDATA','ENSURECOPY', - 'ENSUREARRAY','ALIGNED','NOTSWAPPED','WRITEABLE', - 'UPDATEIFCOPY','BEHAVED','BEHAVED_RO', - 'CARRAY','FARRAY' - ]: - if abs(flags) & getattr(wrap,flagname, 0): - info.append(flagname) - return info - -class Intent: - def __init__(self,intent_list=[]): - self.intent_list = intent_list[:] - flags = 0 - for i in intent_list: - if i=='optional': - flags |= wrap.F2PY_OPTIONAL - else: - flags |= getattr(wrap,'F2PY_INTENT_'+i.upper()) - self.flags = flags - def __getattr__(self,name): - name = name.lower() - if name=='in_': name='in' - return self.__class__(self.intent_list+[name]) - def __str__(self): - return 'intent(%s)' % (','.join(self.intent_list)) - def __repr__(self): - return 'Intent(%r)' % (self.intent_list) - def is_intent(self,*names): - for name in names: - if name not in self.intent_list: - return False - return True - def is_intent_exact(self,*names): - return len(self.intent_list)==len(names) and self.is_intent(*names) - -intent = Intent() - -class Type(object): - _type_names = ['BOOL','BYTE','UBYTE','SHORT','USHORT','INT','UINT', - 'LONG','ULONG','LONGLONG','ULONGLONG', - 'FLOAT','DOUBLE','LONGDOUBLE','CFLOAT','CDOUBLE', - 'CLONGDOUBLE'] - _type_cache = {} - - _cast_dict = {'BOOL':['BOOL']} - _cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] - _cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] - _cast_dict['BYTE'] = ['BYTE'] - _cast_dict['UBYTE'] = ['UBYTE'] - _cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE','SHORT'] - _cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE','USHORT'] - _cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT','INT'] - _cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT','UINT'] - - _cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] - _cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] - - _cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] - _cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] - - _cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT','FLOAT'] - _cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT','FLOAT','DOUBLE'] - _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + ['ULONG','FLOAT','DOUBLE','LONGDOUBLE'] - - _cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] - _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT','CDOUBLE'] - _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + ['CFLOAT','CDOUBLE','CLONGDOUBLE'] - - - def __new__(cls,name): - if isinstance(name,dtype): - dtype0 = name - name = None - for n,i in typeinfo.items(): - if isinstance(i,tuple) and dtype0.type is i[-1]: - name = n - break - obj = cls._type_cache.get(name.upper(),None) - if obj is not None: - return obj - obj = object.__new__(cls) - obj._init(name) - cls._type_cache[name.upper()] = obj - return obj - - def _init(self,name): - self.NAME = name.upper() - self.type_num = getattr(wrap,'PyArray_'+self.NAME) - assert_equal(self.type_num,typeinfo[self.NAME][1]) - self.dtype = typeinfo[self.NAME][-1] - self.elsize = typeinfo[self.NAME][2] / 8 - self.dtypechar = typeinfo[self.NAME][0] - - def cast_types(self): - return map(self.__class__,self._cast_dict[self.NAME]) - - def all_types(self): - return map(self.__class__,self._type_names) - - def smaller_types(self): - bits = typeinfo[self.NAME][3] - types = [] - for name in self._type_names: - if typeinfo[name][3]bits: - types.append(Type(name)) - return types - -class Array: - def __init__(self,typ,dims,intent,obj): - self.type = typ - self.dims = dims - self.intent = intent - self.obj_copy = copy.deepcopy(obj) - self.obj = obj - - # arr.dtypechar may be different from typ.dtypechar - self.arr = wrap.call(typ.type_num,dims,intent.flags,obj) - - assert_(isinstance(self.arr, ndarray),`type(self.arr)`) - - self.arr_attr = wrap.array_attrs(self.arr) - - if len(dims)>1: - if self.intent.is_intent('c'): - assert_(intent.flags & wrap.F2PY_INTENT_C) - assert_(not self.arr.flags['FORTRAN'],`self.arr.flags,getattr(obj,'flags',None)`) - assert_(self.arr.flags['CONTIGUOUS']) - assert_(not self.arr_attr[6] & wrap.FORTRAN) - else: - assert_(not intent.flags & wrap.F2PY_INTENT_C) - assert_(self.arr.flags['FORTRAN']) - assert_(not self.arr.flags['CONTIGUOUS']) - assert_(self.arr_attr[6] & wrap.FORTRAN) - - if obj is None: - self.pyarr = None - self.pyarr_attr = None - return - - if intent.is_intent('cache'): - assert_(isinstance(obj,ndarray),`type(obj)`) - self.pyarr = array(obj).reshape(*dims).copy() - else: - self.pyarr = array(array(obj, - dtype = typ.dtypechar).reshape(*dims), - order=self.intent.is_intent('c') and 'C' or 'F') - assert_(self.pyarr.dtype == typ, \ - `self.pyarr.dtype,typ`) - assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) - self.pyarr_attr = wrap.array_attrs(self.pyarr) - - if len(dims)>1: - if self.intent.is_intent('c'): - assert_(not self.pyarr.flags['FORTRAN']) - assert_(self.pyarr.flags['CONTIGUOUS']) - assert_(not self.pyarr_attr[6] & wrap.FORTRAN) - else: - assert_(self.pyarr.flags['FORTRAN']) - assert_(not self.pyarr.flags['CONTIGUOUS']) - assert_(self.pyarr_attr[6] & wrap.FORTRAN) - - - assert_(self.arr_attr[1]==self.pyarr_attr[1]) # nd - assert_(self.arr_attr[2]==self.pyarr_attr[2]) # dimensions - if self.arr_attr[1]<=1: - assert_(self.arr_attr[3]==self.pyarr_attr[3],\ - `self.arr_attr[3],self.pyarr_attr[3],self.arr.tostring(),self.pyarr.tostring()`) # strides - assert_(self.arr_attr[5][-2:]==self.pyarr_attr[5][-2:],\ - `self.arr_attr[5],self.pyarr_attr[5]`) # descr - assert_(self.arr_attr[6]==self.pyarr_attr[6],\ - `self.arr_attr[6],self.pyarr_attr[6],flags2names(0*self.arr_attr[6]-self.pyarr_attr[6]),flags2names(self.arr_attr[6]),intent`) # flags - - if intent.is_intent('cache'): - assert_(self.arr_attr[5][3]>=self.type.elsize,\ - `self.arr_attr[5][3],self.type.elsize`) - else: - assert_(self.arr_attr[5][3]==self.type.elsize,\ - `self.arr_attr[5][3],self.type.elsize`) - assert_(self.arr_equal(self.pyarr,self.arr)) - - if isinstance(self.obj,ndarray): - if typ.elsize==Type(obj.dtype).elsize: - if not intent.is_intent('copy') and self.arr_attr[1]<=1: - assert_(self.has_shared_memory()) - - def arr_equal(self,arr1,arr2): - if arr1.shape != arr2.shape: - return False - s = arr1==arr2 - return alltrue(s.flatten()) - - def __str__(self): - return str(self.arr) - - def has_shared_memory(self): - """Check that created array shares data with input array. - """ - if self.obj is self.arr: - return True - if not isinstance(self.obj,ndarray): - return False - obj_attr = wrap.array_attrs(self.obj) - return obj_attr[0]==self.arr_attr[0] - -################################################## - -class test_intent(unittest.TestCase): - def test_in_out(self): - assert_equal(str(intent.in_.out),'intent(in,out)') - assert_(intent.in_.c.is_intent('c')) - assert_(not intent.in_.c.is_intent_exact('c')) - assert_(intent.in_.c.is_intent_exact('c','in')) - assert_(intent.in_.c.is_intent_exact('in','c')) - assert_(not intent.in_.is_intent('c')) - -class _test_shared_memory: - num2seq = [1,2] - num23seq = [[1,2,3],[4,5,6]] - def test_in_from_2seq(self): - a = self.array([2],intent.in_,self.num2seq) - assert_(not a.has_shared_memory()) - - def test_in_from_2casttype(self): - for t in self.type.cast_types(): - obj = array(self.num2seq,dtype=t.dtype) - a = self.array([len(self.num2seq)],intent.in_,obj) - if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(),`self.type.dtype,t.dtype`) - else: - assert_(not a.has_shared_memory(),`t.dtype`) - - def test_inout_2seq(self): - obj = array(self.num2seq,dtype=self.type.dtype) - a = self.array([len(self.num2seq)],intent.inout,obj) - assert_(a.has_shared_memory()) - - try: - a = self.array([2],intent.in_.inout,self.num2seq) - except TypeError,msg: - if not str(msg).startswith('failed to initialize intent(inout|inplace|cache) array'): - raise - else: - raise SystemError,'intent(inout) should have failed on sequence' - - def test_f_inout_23seq(self): - obj = array(self.num23seq,dtype=self.type.dtype,order='F') - shape = (len(self.num23seq),len(self.num23seq[0])) - a = self.array(shape,intent.in_.inout,obj) - assert_(a.has_shared_memory()) - - obj = array(self.num23seq,dtype=self.type.dtype,order='C') - shape = (len(self.num23seq),len(self.num23seq[0])) - try: - a = self.array(shape,intent.in_.inout,obj) - except ValueError,msg: - if not str(msg).startswith('failed to initialize intent(inout) array'): - raise - else: - raise SystemError,'intent(inout) should have failed on improper array' - - def test_c_inout_23seq(self): - obj = array(self.num23seq,dtype=self.type.dtype) - shape = (len(self.num23seq),len(self.num23seq[0])) - a = self.array(shape,intent.in_.c.inout,obj) - assert_(a.has_shared_memory()) - - def test_in_copy_from_2casttype(self): - for t in self.type.cast_types(): - obj = array(self.num2seq,dtype=t.dtype) - a = self.array([len(self.num2seq)],intent.in_.copy,obj) - assert_(not a.has_shared_memory(),`t.dtype`) - - def test_c_in_from_23seq(self): - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_,self.num23seq) - assert_(not a.has_shared_memory()) - - def test_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype) - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_,obj) - assert_(not a.has_shared_memory(),`t.dtype`) - - def test_f_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype,order='F') - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_,obj) - if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(),`t.dtype`) - else: - assert_(not a.has_shared_memory(),`t.dtype`) - - def test_c_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype) - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_.c,obj) - if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(),`t.dtype`) - else: - assert_(not a.has_shared_memory(),`t.dtype`) - - def test_f_copy_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype,order='F') - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_.copy,obj) - assert_(not a.has_shared_memory(),`t.dtype`) - - def test_c_copy_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype) - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_.c.copy,obj) - assert_(not a.has_shared_memory(),`t.dtype`) - - def test_in_cache_from_2casttype(self): - for t in self.type.all_types(): - if t.elsize != self.type.elsize: - continue - obj = array(self.num2seq,dtype=t.dtype) - shape = (len(self.num2seq),) - a = self.array(shape,intent.in_.c.cache,obj) - assert_(a.has_shared_memory(),`t.dtype`) - - a = self.array(shape,intent.in_.cache,obj) - assert_(a.has_shared_memory(),`t.dtype`) - - obj = array(self.num2seq,dtype=t.dtype,order='F') - a = self.array(shape,intent.in_.c.cache,obj) - assert_(a.has_shared_memory(),`t.dtype`) - - a = self.array(shape,intent.in_.cache,obj) - assert_(a.has_shared_memory(),`t.dtype`) - - try: - a = self.array(shape,intent.in_.cache,obj[::-1]) - except ValueError,msg: - if not str(msg).startswith('failed to initialize intent(cache) array'): - raise - else: - raise SystemError,'intent(cache) should have failed on multisegmented array' - def test_in_cache_from_2casttype_failure(self): - for t in self.type.all_types(): - if t.elsize >= self.type.elsize: - continue - obj = array(self.num2seq,dtype=t.dtype) - shape = (len(self.num2seq),) - try: - a = self.array(shape,intent.in_.cache,obj) - except ValueError,msg: - if not str(msg).startswith('failed to initialize intent(cache) array'): - raise - else: - raise SystemError,'intent(cache) should have failed on smaller array' - - def test_cache_hidden(self): - shape = (2,) - a = self.array(shape,intent.cache.hide,None) - assert_(a.arr.shape==shape) - - shape = (2,3) - a = self.array(shape,intent.cache.hide,None) - assert_(a.arr.shape==shape) - - shape = (-1,3) - try: - a = self.array(shape,intent.cache.hide,None) - except ValueError,msg: - if not str(msg).startswith('failed to create intent(cache|hide)|optional array'): - raise - else: - raise SystemError,'intent(cache) should have failed on undefined dimensions' - - def test_hidden(self): - shape = (2,) - a = self.array(shape,intent.hide,None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) - - shape = (2,3) - a = self.array(shape,intent.hide,None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - - shape = (2,3) - a = self.array(shape,intent.c.hide,None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - - shape = (-1,3) - try: - a = self.array(shape,intent.hide,None) - except ValueError,msg: - if not str(msg).startswith('failed to create intent(cache|hide)|optional array'): - raise - else: - raise SystemError,'intent(hide) should have failed on undefined dimensions' - - def test_optional_none(self): - shape = (2,) - a = self.array(shape,intent.optional,None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) - - shape = (2,3) - a = self.array(shape,intent.optional,None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - - shape = (2,3) - a = self.array(shape,intent.c.optional,None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - - def test_optional_from_2seq(self): - obj = self.num2seq - shape = (len(obj),) - a = self.array(shape,intent.optional,obj) - assert_(a.arr.shape==shape) - assert_(not a.has_shared_memory()) - - def test_optional_from_23seq(self): - obj = self.num23seq - shape = (len(obj),len(obj[0])) - a = self.array(shape,intent.optional,obj) - assert_(a.arr.shape==shape) - assert_(not a.has_shared_memory()) - - a = self.array(shape,intent.optional.c,obj) - assert_(a.arr.shape==shape) - assert_(not a.has_shared_memory()) - - def test_inplace(self): - obj = array(self.num23seq,dtype=self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) - shape = obj.shape - a = self.array(shape,intent.inplace,obj) - assert_(obj[1][2]==a.arr[1][2],`obj,a.arr`) - a.arr[1][2]=54 - assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),`obj,a.arr`) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - - def test_inplace_from_casttype(self): - for t in self.type.cast_types(): - if t is self.type: - continue - obj = array(self.num23seq,dtype=t.dtype) - assert_(obj.dtype.type==t.dtype) - assert_(obj.dtype.type is not self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) - shape = obj.shape - a = self.array(shape,intent.inplace,obj) - assert_(obj[1][2]==a.arr[1][2],`obj,a.arr`) - a.arr[1][2]=54 - assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),`obj,a.arr`) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - assert_(obj.dtype.type is self.type.dtype) # obj type is changed inplace! - - -for t in Type._type_names: - exec '''\ -class test_%s_gen(unittest.TestCase, - _test_shared_memory - ): - def setUp(self): - self.type = Type(%r) - array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj) -''' % (t,t,t) - -if __name__ == "__main__": - setup() - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_assumed_shape.py b/numpy-1.6.2/numpy/f2py/tests/test_assumed_shape.py deleted file mode 100644 index e501b13c3d..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_assumed_shape.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import math - -from numpy.testing import * -from numpy import array - -import util - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestAssumedShapeSumExample(util.F2PyTest): - sources = [_path('src', 'assumed_shape', 'foo_free.f90'), - _path('src', 'assumed_shape', 'foo_use.f90'), - _path('src', 'assumed_shape', 'precision.f90'), - _path('src', 'assumed_shape', 'foo_mod.f90'), - ] - - @dec.slow - def test_all(self): - r = self.module.fsum([1,2]) - assert_(r==3,`r`) - r = self.module.sum([1,2]) - assert_(r==3,`r`) - r = self.module.sum_with_use([1,2]) - assert_(r==3,`r`) - - r = self.module.mod.sum([1,2]) - assert_(r==3,`r`) - r = self.module.mod.fsum([1,2]) - assert_(r==3,`r`) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_callback.py b/numpy-1.6.2/numpy/f2py/tests/test_callback.py deleted file mode 100644 index 7f0107fd59..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_callback.py +++ /dev/null @@ -1,75 +0,0 @@ -from numpy.testing import * -from numpy import array -import math -import util - -class TestF77Callback(util.F2PyTest): - code = """ - subroutine t(fun,a) - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine func(a) -cf2py intent(in,out) a - integer a - a = a + 11 - end - - subroutine func0(a) -cf2py intent(out) a - integer a - a = 11 - end - - subroutine t2(a) -cf2py intent(callback) fun - integer a -cf2py intent(out) a - external fun - call fun(a) - end - """ - - @dec.slow - def test_all(self): - for name in "t,t2".split(","): - self.check_function(name) - - def check_function(self, name): - t = getattr(self.module, name) - r = t(lambda : 4) - assert_( r==4,`r`) - r = t(lambda a:5,fun_extra_args=(6,)) - assert_( r==5,`r`) - r = t(lambda a:a,fun_extra_args=(6,)) - assert_( r==6,`r`) - r = t(lambda a:5+a,fun_extra_args=(7,)) - assert_( r==12,`r`) - r = t(lambda a:math.degrees(a),fun_extra_args=(math.pi,)) - assert_( r==180,`r`) - r = t(math.degrees,fun_extra_args=(math.pi,)) - assert_( r==180,`r`) - - r = t(self.module.func, fun_extra_args=(6,)) - assert_( r==17,`r`) - r = t(self.module.func0) - assert_( r==11,`r`) - r = t(self.module.func0._cpointer) - assert_( r==11,`r`) - class A: - def __call__(self): - return 7 - def mth(self): - return 9 - a = A() - r = t(a) - assert_( r==7,`r`) - r = t(a.mth) - assert_( r==9,`r`) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_kind.py b/numpy-1.6.2/numpy/f2py/tests/test_kind.py deleted file mode 100644 index a6d485a881..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_kind.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import math - -from numpy.testing import * -from numpy import array - -import util - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -from numpy.f2py.crackfortran import _selected_int_kind_func as selected_int_kind -from numpy.f2py.crackfortran import _selected_real_kind_func as selected_real_kind - -class TestKind(util.F2PyTest): - sources = [_path('src', 'kind', 'foo.f90'), - ] - - @dec.slow - def test_all(self): - selectedrealkind = self.module.selectedrealkind - selectedintkind = self.module.selectedintkind - - for i in range(40): - assert_(selectedintkind(i) in [selected_int_kind(i),-1],\ - 'selectedintkind(%s): expected %r but got %r' % (i, selected_int_kind(i), selectedintkind(i))) - - for i in range(20): - assert_(selectedrealkind(i) in [selected_real_kind(i),-1],\ - 'selectedrealkind(%s): expected %r but got %r' % (i, selected_real_kind(i), selectedrealkind(i))) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_mixed.py b/numpy-1.6.2/numpy/f2py/tests/test_mixed.py deleted file mode 100644 index a8a14ca4b9..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_mixed.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import math - -from numpy.testing import * -from numpy import array - -import util - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestMixed(util.F2PyTest): - sources = [_path('src', 'mixed', 'foo.f'), - _path('src', 'mixed', 'foo_fixed.f90'), - _path('src', 'mixed', 'foo_free.f90')] - - @dec.slow - def test_all(self): - assert_( self.module.bar11() == 11) - assert_( self.module.foo_fixed.bar12() == 12) - assert_( self.module.foo_free.bar13() == 13) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_return_character.py b/numpy-1.6.2/numpy/f2py/tests/test_return_character.py deleted file mode 100644 index 67c542688b..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_return_character.py +++ /dev/null @@ -1,140 +0,0 @@ -from numpy.testing import * -from numpy import array -from numpy.compat import asbytes -import util - -class TestReturnCharacter(util.F2PyTest): - def check_function(self, t): - tname = t.__doc__.split()[0] - if tname in ['t0','t1','s0','s1']: - assert_( t(23)==asbytes('2')) - r = t('ab');assert_( r==asbytes('a'),`r`) - r = t(array('ab'));assert_( r==asbytes('a'),`r`) - r = t(array(77,'u1'));assert_( r==asbytes('M'),`r`) - #assert_(_raises(ValueError, t, array([77,87]))) - #assert_(_raises(ValueError, t, array(77))) - elif tname in ['ts','ss']: - assert_( t(23)==asbytes('23 '),`t(23)`) - assert_( t('123456789abcdef')==asbytes('123456789a')) - elif tname in ['t5','s5']: - assert_( t(23)==asbytes('23 '),`t(23)`) - assert_( t('ab')==asbytes('ab '),`t('ab')`) - assert_( t('123456789abcdef')==asbytes('12345')) - else: - raise NotImplementedError - -class TestF77ReturnCharacter(TestReturnCharacter): - code = """ - function t0(value) - character value - character t0 - t0 = value - end - function t1(value) - character*1 value - character*1 t1 - t1 = value - end - function t5(value) - character*5 value - character*5 t5 - t5 = value - end - function ts(value) - character*(*) value - character*(*) ts - ts = value - end - - subroutine s0(t0,value) - character value - character t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - character*1 value - character*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s5(t5,value) - character*5 value - character*5 t5 -cf2py intent(out) t5 - t5 = value - end - subroutine ss(ts,value) - character*(*) value - character*10 ts -cf2py intent(out) ts - ts = value - end - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t5,s0,s1,s5,ss".split(","): - self.check_function(getattr(self.module, name)) - -class TestF90ReturnCharacter(TestReturnCharacter): - suffix = ".f90" - code = """ -module f90_return_char - contains - function t0(value) - character :: value - character :: t0 - t0 = value - end function t0 - function t1(value) - character(len=1) :: value - character(len=1) :: t1 - t1 = value - end function t1 - function t5(value) - character(len=5) :: value - character(len=5) :: t5 - t5 = value - end function t5 - function ts(value) - character(len=*) :: value - character(len=10) :: ts - ts = value - end function ts - - subroutine s0(t0,value) - character :: value - character :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - character(len=1) :: value - character(len=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s5(t5,value) - character(len=5) :: value - character(len=5) :: t5 -!f2py intent(out) t5 - t5 = value - end subroutine s5 - subroutine ss(ts,value) - character(len=*) :: value - character(len=10) :: ts -!f2py intent(out) ts - ts = value - end subroutine ss -end module f90_return_char - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t5,ts,s0,s1,s5,ss".split(","): - self.check_function(getattr(self.module.f90_return_char, name)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_return_complex.py b/numpy-1.6.2/numpy/f2py/tests/test_return_complex.py deleted file mode 100644 index f8c6d226af..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_return_complex.py +++ /dev/null @@ -1,166 +0,0 @@ -from numpy.testing import * -from numpy import array -import util - -class TestReturnComplex(util.F2PyTest): - def check_function(self, t): - tname = t.__doc__.split()[0] - if tname in ['t0','t8','s0','s8']: - err = 1e-5 - else: - err = 0.0 - assert_( abs(t(234j)-234.0j)<=err) - assert_( abs(t(234.6)-234.6)<=err) - assert_( abs(t(234l)-234.0)<=err) - assert_( abs(t(234.6+3j)-(234.6+3j))<=err) - #assert_( abs(t('234')-234.)<=err) - #assert_( abs(t('234.6')-234.6)<=err) - assert_( abs(t(-234)+234.)<=err) - assert_( abs(t([234])-234.)<=err) - assert_( abs(t((234,))-234.)<=err) - assert_( abs(t(array(234))-234.)<=err) - assert_( abs(t(array(23+4j,'F'))-(23+4j))<=err) - assert_( abs(t(array([234]))-234.)<=err) - assert_( abs(t(array([[234]]))-234.)<=err) - assert_( abs(t(array([234],'b'))+22.)<=err) - assert_( abs(t(array([234],'h'))-234.)<=err) - assert_( abs(t(array([234],'i'))-234.)<=err) - assert_( abs(t(array([234],'l'))-234.)<=err) - assert_( abs(t(array([234],'q'))-234.)<=err) - assert_( abs(t(array([234],'f'))-234.)<=err) - assert_( abs(t(array([234],'d'))-234.)<=err) - assert_( abs(t(array([234+3j],'F'))-(234+3j))<=err) - assert_( abs(t(array([234],'D'))-234.)<=err) - - #assert_raises(TypeError, t, array([234], 'a1')) - assert_raises(TypeError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(TypeError, t, t) - assert_raises(TypeError, t, {}) - - try: - r = t(10l**400) - assert_( `r` in ['(inf+0j)','(Infinity+0j)'],`r`) - except OverflowError: - pass - - -class TestF77ReturnComplex(TestReturnComplex): - code = """ - function t0(value) - complex value - complex t0 - t0 = value - end - function t8(value) - complex*8 value - complex*8 t8 - t8 = value - end - function t16(value) - complex*16 value - complex*16 t16 - t16 = value - end - function td(value) - double complex value - double complex td - td = value - end - - subroutine s0(t0,value) - complex value - complex t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s8(t8,value) - complex*8 value - complex*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine s16(t16,value) - complex*16 value - complex*16 t16 -cf2py intent(out) t16 - t16 = value - end - subroutine sd(td,value) - double complex value - double complex td -cf2py intent(out) td - td = value - end - """ - - @dec.slow - def test_all(self): - for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnComplex(TestReturnComplex): - suffix = ".f90" - code = """ -module f90_return_complex - contains - function t0(value) - complex :: value - complex :: t0 - t0 = value - end function t0 - function t8(value) - complex(kind=4) :: value - complex(kind=4) :: t8 - t8 = value - end function t8 - function t16(value) - complex(kind=8) :: value - complex(kind=8) :: t16 - t16 = value - end function t16 - function td(value) - double complex :: value - double complex :: td - td = value - end function td - - subroutine s0(t0,value) - complex :: value - complex :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s8(t8,value) - complex(kind=4) :: value - complex(kind=4) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine s16(t16,value) - complex(kind=8) :: value - complex(kind=8) :: t16 -!f2py intent(out) t16 - t16 = value - end subroutine s16 - subroutine sd(td,value) - double complex :: value - double complex :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_complex - """ - - @dec.slow - def test_all(self): - for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","): - self.check_function(getattr(self.module.f90_return_complex, name)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_return_integer.py b/numpy-1.6.2/numpy/f2py/tests/test_return_integer.py deleted file mode 100644 index e1b3a37aa2..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_return_integer.py +++ /dev/null @@ -1,175 +0,0 @@ -from numpy.testing import * -from numpy import array -import util - -class TestReturnInteger(util.F2PyTest): - def check_function(self, t): - assert_( t(123)==123,`t(123)`) - assert_( t(123.6)==123) - assert_( t(123l)==123) - assert_( t('123')==123) - assert_( t(-123)==-123) - assert_( t([123])==123) - assert_( t((123,))==123) - assert_( t(array(123))==123) - assert_( t(array([123]))==123) - assert_( t(array([[123]]))==123) - assert_( t(array([123],'b'))==123) - assert_( t(array([123],'h'))==123) - assert_( t(array([123],'i'))==123) - assert_( t(array([123],'l'))==123) - assert_( t(array([123],'B'))==123) - assert_( t(array([123],'f'))==123) - assert_( t(array([123],'d'))==123) - - #assert_raises(ValueError, t, array([123],'S3')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - if t.__doc__.split()[0] in ['t8','s8']: - assert_raises(OverflowError, t, 100000000000000000000000l) - assert_raises(OverflowError, t, 10000000011111111111111.23) - -class TestF77ReturnInteger(TestReturnInteger): - code = """ - function t0(value) - integer value - integer t0 - t0 = value - end - function t1(value) - integer*1 value - integer*1 t1 - t1 = value - end - function t2(value) - integer*2 value - integer*2 t2 - t2 = value - end - function t4(value) - integer*4 value - integer*4 t4 - t4 = value - end - function t8(value) - integer*8 value - integer*8 t8 - t8 = value - end - - subroutine s0(t0,value) - integer value - integer t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - integer*1 value - integer*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - integer*2 value - integer*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - integer*4 value - integer*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - integer*8 value - integer*8 t8 -cf2py intent(out) t8 - t8 = value - end - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnInteger(TestReturnInteger): - suffix = ".f90" - code = """ -module f90_return_integer - contains - function t0(value) - integer :: value - integer :: t0 - t0 = value - end function t0 - function t1(value) - integer(kind=1) :: value - integer(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - integer(kind=2) :: value - integer(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - integer(kind=4) :: value - integer(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - integer(kind=8) :: value - integer(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - integer :: value - integer :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - integer(kind=1) :: value - integer(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - integer(kind=2) :: value - integer(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - integer(kind=4) :: value - integer(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - integer(kind=8) :: value - integer(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_integer - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): - self.check_function(getattr(self.module.f90_return_integer, name)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_return_logical.py b/numpy-1.6.2/numpy/f2py/tests/test_return_logical.py deleted file mode 100644 index 059b843dc0..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_return_logical.py +++ /dev/null @@ -1,184 +0,0 @@ -from numpy.testing import * -from numpy import array -import util - -class TestReturnLogical(util.F2PyTest): - def check_function(self, t): - assert_( t(True)==1,`t(True)`) - assert_( t(False)==0,`t(False)`) - assert_( t(0)==0) - assert_( t(None)==0) - assert_( t(0.0)==0) - assert_( t(0j)==0) - assert_( t(1j)==1) - assert_( t(234)==1) - assert_( t(234.6)==1) - assert_( t(234l)==1) - assert_( t(234.6+3j)==1) - assert_( t('234')==1) - assert_( t('aaa')==1) - assert_( t('')==0) - assert_( t([])==0) - assert_( t(())==0) - assert_( t({})==0) - assert_( t(t)==1) - assert_( t(-234)==1) - assert_( t(10l**100)==1) - assert_( t([234])==1) - assert_( t((234,))==1) - assert_( t(array(234))==1) - assert_( t(array([234]))==1) - assert_( t(array([[234]]))==1) - assert_( t(array([234],'b'))==1) - assert_( t(array([234],'h'))==1) - assert_( t(array([234],'i'))==1) - assert_( t(array([234],'l'))==1) - assert_( t(array([234],'f'))==1) - assert_( t(array([234],'d'))==1) - assert_( t(array([234+3j],'F'))==1) - assert_( t(array([234],'D'))==1) - assert_( t(array(0))==0) - assert_( t(array([0]))==0) - assert_( t(array([[0]]))==0) - assert_( t(array([0j]))==0) - assert_( t(array([1]))==1) - assert_raises(ValueError, t, array([0,0])) - - -class TestF77ReturnLogical(TestReturnLogical): - code = """ - function t0(value) - logical value - logical t0 - t0 = value - end - function t1(value) - logical*1 value - logical*1 t1 - t1 = value - end - function t2(value) - logical*2 value - logical*2 t2 - t2 = value - end - function t4(value) - logical*4 value - logical*4 t4 - t4 = value - end -c function t8(value) -c logical*8 value -c logical*8 t8 -c t8 = value -c end - - subroutine s0(t0,value) - logical value - logical t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - logical*1 value - logical*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - logical*2 value - logical*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - logical*4 value - logical*4 t4 -cf2py intent(out) t4 - t4 = value - end -c subroutine s8(t8,value) -c logical*8 value -c logical*8 t8 -cf2py intent(out) t8 -c t8 = value -c end - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t2,t4,s0,s1,s2,s4".split(","): - self.check_function(getattr(self.module, name)) - -class TestF90ReturnLogical(TestReturnLogical): - suffix = ".f90" - code = """ -module f90_return_logical - contains - function t0(value) - logical :: value - logical :: t0 - t0 = value - end function t0 - function t1(value) - logical(kind=1) :: value - logical(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - logical(kind=2) :: value - logical(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - logical(kind=4) :: value - logical(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - logical(kind=8) :: value - logical(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - logical :: value - logical :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - logical(kind=1) :: value - logical(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - logical(kind=2) :: value - logical(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - logical(kind=4) :: value - logical(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - logical(kind=8) :: value - logical(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_logical - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): - self.check_function(getattr(self.module.f90_return_logical, name)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_return_real.py b/numpy-1.6.2/numpy/f2py/tests/test_return_real.py deleted file mode 100644 index 5dc12708e4..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_return_real.py +++ /dev/null @@ -1,200 +0,0 @@ -from numpy.testing import * -from numpy import array -import math -import util - -class TestReturnReal(util.F2PyTest): - def check_function(self, t): - if t.__doc__.split()[0] in ['t0','t4','s0','s4']: - err = 1e-5 - else: - err = 0.0 - assert_( abs(t(234)-234.0)<=err) - assert_( abs(t(234.6)-234.6)<=err) - assert_( abs(t(234l)-234.0)<=err) - assert_( abs(t('234')-234)<=err) - assert_( abs(t('234.6')-234.6)<=err) - assert_( abs(t(-234)+234)<=err) - assert_( abs(t([234])-234)<=err) - assert_( abs(t((234,))-234.)<=err) - assert_( abs(t(array(234))-234.)<=err) - assert_( abs(t(array([234]))-234.)<=err) - assert_( abs(t(array([[234]]))-234.)<=err) - assert_( abs(t(array([234],'b'))+22)<=err) - assert_( abs(t(array([234],'h'))-234.)<=err) - assert_( abs(t(array([234],'i'))-234.)<=err) - assert_( abs(t(array([234],'l'))-234.)<=err) - assert_( abs(t(array([234],'B'))-234.)<=err) - assert_( abs(t(array([234],'f'))-234.)<=err) - assert_( abs(t(array([234],'d'))-234.)<=err) - if t.__doc__.split()[0] in ['t0','t4','s0','s4']: - assert_( t(1e200)==t(1e300)) # inf - - #assert_raises(ValueError, t, array([234], 'S1')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - try: - r = t(10l**400) - assert_( `r` in ['inf','Infinity'],`r`) - except OverflowError: - pass - -class TestCReturnReal(TestReturnReal): - suffix = ".pyf" - module_name = "c_ext_return_real" - code = """ -python module c_ext_return_real -usercode \'\'\' -float t4(float value) { return value; } -void s4(float *t4, float value) { *t4 = value; } -double t8(double value) { return value; } -void s8(double *t8, double value) { *t8 = value; } -\'\'\' -interface - function t4(value) - real*4 intent(c) :: t4,value - end - function t8(value) - real*8 intent(c) :: t8,value - end - subroutine s4(t4,value) - intent(c) s4 - real*4 intent(out) :: t4 - real*4 intent(c) :: value - end - subroutine s8(t8,value) - intent(c) s8 - real*8 intent(out) :: t8 - real*8 intent(c) :: value - end -end interface -end python module c_ext_return_real - """ - - @dec.slow - def test_all(self): - for name in "t4,t8,s4,s8".split(","): - self.check_function(getattr(self.module, name)) - -class TestF77ReturnReal(TestReturnReal): - code = """ - function t0(value) - real value - real t0 - t0 = value - end - function t4(value) - real*4 value - real*4 t4 - t4 = value - end - function t8(value) - real*8 value - real*8 t8 - t8 = value - end - function td(value) - double precision value - double precision td - td = value - end - - subroutine s0(t0,value) - real value - real t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s4(t4,value) - real*4 value - real*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - real*8 value - real*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine sd(td,value) - double precision value - double precision td -cf2py intent(out) td - td = value - end - """ - - @dec.slow - def test_all(self): - for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","): - self.check_function(getattr(self.module, name)) - -class TestF90ReturnReal(TestReturnReal): - suffix = ".f90" - code = """ -module f90_return_real - contains - function t0(value) - real :: value - real :: t0 - t0 = value - end function t0 - function t4(value) - real(kind=4) :: value - real(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - real(kind=8) :: value - real(kind=8) :: t8 - t8 = value - end function t8 - function td(value) - double precision :: value - double precision :: td - td = value - end function td - - subroutine s0(t0,value) - real :: value - real :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s4(t4,value) - real(kind=4) :: value - real(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - real(kind=8) :: value - real(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine sd(td,value) - double precision :: value - double precision :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_real - """ - - @dec.slow - def test_all(self): - for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","): - self.check_function(getattr(self.module.f90_return_real, name)) - - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/test_size.py b/numpy-1.6.2/numpy/f2py/tests/test_size.py deleted file mode 100644 index a548e9885e..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/test_size.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import math - -from numpy.testing import * -from numpy import array - -import util - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestSizeSumExample(util.F2PyTest): - sources = [_path('src', 'size', 'foo.f90'), - ] - - @dec.slow - def test_all(self): - r = self.module.foo([[1,2]]) - assert_equal(r, [3],`r`) - - r = self.module.foo([[1,2],[3,4]]) - assert_equal(r, [3,7],`r`) - - r = self.module.foo([[1,2],[3,4],[5,6]]) - assert_equal(r, [3,7,11],`r`) - - @dec.slow - def test_transpose(self): - r = self.module.trans([[1,2]]) - assert_equal(r, [[1],[2]],`r`) - - r = self.module.trans([[1,2,3],[4,5,6]]) - assert_equal(r, [[1,4],[2,5],[3,6]],`r`) - - @dec.slow - def test_flatten(self): - r = self.module.flatten([[1,2]]) - assert_equal(r, [1,2],`r`) - - r = self.module.flatten([[1,2,3],[4,5,6]]) - assert_equal(r, [1,2,3,4,5,6],`r`) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.6.2/numpy/f2py/tests/util.py b/numpy-1.6.2/numpy/f2py/tests/util.py deleted file mode 100644 index a5816b96fe..0000000000 --- a/numpy-1.6.2/numpy/f2py/tests/util.py +++ /dev/null @@ -1,352 +0,0 @@ -""" -Utility functions for - -- building and importing modules on test time, using a temporary location -- detecting if compilers are present - -""" - -import os -import sys -import subprocess -import tempfile -import shutil -import atexit -import textwrap -import re -import random - -import nose - -from numpy.compat import asbytes, asstr -import numpy.f2py - -try: - from hashlib import md5 -except ImportError: - from md5 import new as md5 - -# -# Maintaining a temporary module directory -# - -_module_dir = None - -def _cleanup(): - global _module_dir - if _module_dir is not None: - try: - sys.path.remove(_module_dir) - except ValueError: - pass - try: - shutil.rmtree(_module_dir) - except (IOError, OSError): - pass - _module_dir = None - -def get_module_dir(): - global _module_dir - if _module_dir is None: - _module_dir = tempfile.mkdtemp() - atexit.register(_cleanup) - if _module_dir not in sys.path: - sys.path.insert(0, _module_dir) - return _module_dir - -def get_temp_module_name(): - # Assume single-threaded, and the module dir usable only by this thread - d = get_module_dir() - for j in xrange(5403, 9999999): - name = "_test_ext_module_%d" % j - fn = os.path.join(d, name) - if name not in sys.modules and not os.path.isfile(fn+'.py'): - return name - raise RuntimeError("Failed to create a temporary module name") - -def _memoize(func): - memo = {} - def wrapper(*a, **kw): - key = repr((a, kw)) - if key not in memo: - try: - memo[key] = func(*a, **kw) - except Exception, e: - memo[key] = e - raise - ret = memo[key] - if isinstance(ret, Exception): - raise ret - return ret - wrapper.__name__ = func.__name__ - return wrapper - -# -# Building modules -# - -@_memoize -def build_module(source_files, options=[], skip=[], only=[], module_name=None): - """ - Compile and import a f2py module, built from the given files. - - """ - - code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; " - "f2py2e.main()" % repr(sys.path)) - - d = get_module_dir() - - # Copy files - dst_sources = [] - for fn in source_files: - if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) - dst = os.path.join(d, os.path.basename(fn)) - shutil.copyfile(fn, dst) - dst_sources.append(dst) - - fn = os.path.join(os.path.dirname(fn), '.f2py_f2cmap') - if os.path.isfile(fn): - dst = os.path.join(d, os.path.basename(fn)) - if not os.path.isfile(dst): - shutil.copyfile(fn, dst) - - # Prepare options - if module_name is None: - module_name = get_temp_module_name() - f2py_opts = ['-c', '-m', module_name] + options + dst_sources - if skip: - f2py_opts += ['skip:'] + skip - if only: - f2py_opts += ['only:'] + only - - # Build - cwd = os.getcwd() - try: - os.chdir(d) - cmd = [sys.executable, '-c', code] + f2py_opts - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - if p.returncode != 0: - raise RuntimeError("Running f2py failed: %s\n%s" - % (cmd[4:], asstr(out))) - finally: - os.chdir(cwd) - - # Partial cleanup - for fn in dst_sources: - os.unlink(fn) - - # Import - __import__(module_name) - return sys.modules[module_name] - -@_memoize -def build_code(source_code, options=[], skip=[], only=[], suffix=None, - module_name=None): - """ - Compile and import Fortran code using f2py. - - """ - if suffix is None: - suffix = '.f' - - fd, tmp_fn = tempfile.mkstemp(suffix=suffix) - os.write(fd, asbytes(source_code)) - os.close(fd) - - try: - return build_module([tmp_fn], options=options, skip=skip, only=only, - module_name=module_name) - finally: - os.unlink(tmp_fn) - -# -# Check if compilers are available at all... -# - -_compiler_status = None -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = """ -import os -import sys -sys.path = %(syspath)s - -def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - -from numpy.distutils.core import setup -setup(configuration=configuration) - -config_cmd = config.get_config_cmd() -have_c = config_cmd.try_compile('void foo() {}') -print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) -sys.exit(99) -""" - code = code % dict(syspath=repr(sys.path)) - - fd, script = tempfile.mkstemp(suffix='.py') - os.write(fd, asbytes(code)) - os.close(fd) - - try: - cmd = [sys.executable, script, 'config'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - m = re.search(asbytes(r'COMPILERS:(\d+),(\d+),(\d+)'), out) - if m: - _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), - bool(int(m.group(3)))) - finally: - os.unlink(script) - - # Finished - return _compiler_status - -def has_c_compiler(): - return _get_compiler_status()[0] - -def has_f77_compiler(): - return _get_compiler_status()[1] - -def has_f90_compiler(): - return _get_compiler_status()[2] - -# -# Building with distutils -# - -@_memoize -def build_module_distutils(source_files, config_code, module_name, **kw): - """ - Build a module via distutils and import it. - - """ - from numpy.distutils.misc_util import Configuration - from numpy.distutils.core import setup - - d = get_module_dir() - - # Copy files - dst_sources = [] - for fn in source_files: - if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) - dst = os.path.join(d, os.path.basename(fn)) - shutil.copyfile(fn, dst) - dst_sources.append(dst) - - # Build script - config_code = textwrap.dedent(config_code).replace("\n", "\n ") - - code = """\ -import os -import sys -sys.path = %(syspath)s - -def configuration(parent_name='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - %(config_code)s - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) -""" % dict(config_code=config_code, syspath = repr(sys.path)) - - script = os.path.join(d, get_temp_module_name() + '.py') - dst_sources.append(script) - f = open(script, 'wb') - f.write(asbytes(code)) - f.close() - - # Build - cwd = os.getcwd() - try: - os.chdir(d) - cmd = [sys.executable, script, 'build_ext', '-i'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - if p.returncode != 0: - raise RuntimeError("Running distutils build failed: %s\n%s" - % (cmd[4:], asstr(out))) - finally: - os.chdir(cwd) - - # Partial cleanup - for fn in dst_sources: - os.unlink(fn) - - # Import - __import__(module_name) - return sys.modules[module_name] - -# -# Unittest convenience -# - -class F2PyTest(object): - code = None - sources = None - options = [] - skip = [] - only = [] - suffix = '.f' - module = None - module_name = None - - def setUp(self): - if self.module is not None: - return - - # Check compiler availability first - if not has_c_compiler(): - raise nose.SkipTest("No C compiler available") - - codes = [] - if self.sources: - codes.extend(self.sources) - if self.code is not None: - codes.append(self.suffix) - - needs_f77 = False - needs_f90 = False - for fn in codes: - if fn.endswith('.f'): - needs_f77 = True - elif fn.endswith('.f90'): - needs_f90 = True - if needs_f77 and not has_f77_compiler(): - raise nose.SkipTest("No Fortran 77 compiler available") - if needs_f90 and not has_f90_compiler(): - raise nose.SkipTest("No Fortran 90 compiler available") - - # Build the module - if self.code is not None: - self.module = build_code(self.code, options=self.options, - skip=self.skip, only=self.only, - suffix=self.suffix, - module_name=self.module_name) - - if self.sources is not None: - self.module = build_module(self.sources, options=self.options, - skip=self.skip, only=self.only, - module_name=self.module_name) diff --git a/numpy-1.6.2/numpy/f2py/use_rules.py b/numpy-1.6.2/numpy/f2py/use_rules.py deleted file mode 100644 index 021d08601d..0000000000 --- a/numpy-1.6.2/numpy/f2py/use_rules.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python -""" - -Build 'use others module data' mechanism for f2py2e. - -Unfinished. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2000/09/10 12:35:43 $ -Pearu Peterson -""" - -__version__ = "$Revision: 1.3 $"[10:-1] - -f2py_version='See `f2py -v`' - -import pprint -import sys -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from auxfuncs import * -############## - -usemodule_rules={ - 'body':""" -#begintitle# -static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ -\t #name# = get_#name#()\\n\\ -Arguments:\\n\\ -#docstr#\"; -extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); -static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { -/*#decl#*/ -\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; -printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); -\treturn Py_BuildValue(\"\"); -capi_fail: -\treturn NULL; -} -""", - 'method':'\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', - 'need':['F_MODFUNC'] - } - -################ - -def buildusevars(m,r): - ret={} - outmess('\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n'%(m['name'])) - varsmap={} - revmap={} - if 'map' in r: - for k in r['map'].keys(): - if r['map'][k] in revmap: - outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n'%(r['map'][k],k,revmap[r['map'][k]])) - else: - revmap[r['map'][k]]=k - if 'only' in r and r['only']: - for v in r['map'].keys(): - if r['map'][v] in m['vars']: - - if revmap[r['map'][v]]==v: - varsmap[v]=r['map'][v] - else: - outmess('\t\t\tIgnoring map "%s=>%s". See above.\n'%(v,r['map'][v])) - else: - outmess('\t\t\tNo definition for variable "%s=>%s". Skipping.\n'%(v,r['map'][v])) - else: - for v in m['vars'].keys(): - if v in revmap: - varsmap[v]=revmap[v] - else: - varsmap[v]=v - for v in varsmap.keys(): - ret=dictappend(ret,buildusevar(v,varsmap[v],m['vars'],m['name'])) - return ret -def buildusevar(name,realname,vars,usemodulename): - outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n'%(name,realname)) - ret={} - vrd={'name':name, - 'realname':realname, - 'REALNAME':realname.upper(), - 'usemodulename':usemodulename, - 'USEMODULENAME':usemodulename.upper(), - 'texname':name.replace('_','\\_'), - 'begintitle':gentitle('%s=>%s'%(name,realname)), - 'endtitle':gentitle('end of %s=>%s'%(name,realname)), - 'apiname':'#modulename#_use_%s_from_%s'%(realname,usemodulename) - } - nummap={0:'Ro',1:'Ri',2:'Rii',3:'Riii',4:'Riv',5:'Rv',6:'Rvi',7:'Rvii',8:'Rviii',9:'Rix'} - vrd['texnamename']=name - for i in nummap.keys(): - vrd['texnamename']=vrd['texnamename'].replace(`i`,nummap[i]) - if hasnote(vars[realname]): vrd['note']=vars[realname]['note'] - rd=dictappend({},vrd) - var=vars[realname] - - print name,realname,vars[realname] - ret=applyrules(usemodule_rules,rd) - return ret diff --git a/numpy-1.6.2/numpy/fft/SConscript b/numpy-1.6.2/numpy/fft/SConscript deleted file mode 100644 index adceea0112..0000000000 --- a/numpy-1.6.2/numpy/fft/SConscript +++ /dev/null @@ -1,8 +0,0 @@ -# Last Change: Thu Jun 12 06:00 PM 2008 J -# vim:syntax=python -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.NumpyPythonExtension('fftpack_lite', - source = ['fftpack_litemodule.c', 'fftpack.c']) diff --git a/numpy-1.6.2/numpy/fft/SConstruct b/numpy-1.6.2/numpy/fft/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/numpy-1.6.2/numpy/fft/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/numpy-1.6.2/numpy/fft/__init__.py b/numpy-1.6.2/numpy/fft/__init__.py deleted file mode 100644 index 324e39f4d0..0000000000 --- a/numpy-1.6.2/numpy/fft/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# To get sub-modules -from info import __doc__ - -from fftpack import * -from helper import * - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/fft/fftpack.c b/numpy-1.6.2/numpy/fft/fftpack.c deleted file mode 100644 index 9c8fd118a2..0000000000 --- a/numpy-1.6.2/numpy/fft/fftpack.c +++ /dev/null @@ -1,1501 +0,0 @@ -/* -fftpack.c : A set of FFT routines in C. -Algorithmically based on Fortran-77 FFTPACK by Paul N. Swarztrauber (Version 4, 1985). - -*/ - -/* isign is +1 for backward and -1 for forward transforms */ - -#include -#include -#define DOUBLE - -#ifdef DOUBLE -#define Treal double -#else -#define Treal float -#endif - - -#define ref(u,a) u[a] - -#define MAXFAC 13 /* maximum number of factors in factorization of n */ -#define NSPECIAL 4 /* number of factors for which we have special-case routines */ - -#ifdef __cplusplus -extern "C" { -#endif - - -/* ---------------------------------------------------------------------- - passf2, passf3, passf4, passf5, passf. Complex FFT passes fwd and bwd. ----------------------------------------------------------------------- */ - -static void passf2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[], int isign) - /* isign==+1 for backward transform */ - { - int i, k, ah, ac; - Treal ti2, tr2; - if (ido <= 2) { - for (k=0; k= l1) { - for (j=1; j idp) idlj -= idp; - war = wa[idlj - 2]; - wai = wa[idlj-1]; - for (ik=0; ik= l1) { - for (j=1; j= l1) { - for (k=0; k= l1) { - for (j=1; j= l1) { - for (k=0; k= l1) { - for (j=1; j= l1) { - for (j=1; j 5) { - wa[i1-1] = wa[i-1]; - wa[i1] = wa[i]; - } - } - l1 = l2; - } - } /* cffti1 */ - - -void cffti(int n, Treal wsave[]) - { - int iw1, iw2; - if (n == 1) return; - iw1 = 2*n; - iw2 = iw1 + 2*n; - cffti1(n, wsave+iw1, (int*)(wsave+iw2)); - } /* cffti */ - - /* ---------------------------------------------------------------------- -rfftf1, rfftb1, rfftf, rfftb, rffti1, rffti. Treal FFTs. ----------------------------------------------------------------------- */ - -static void rfftf1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2]) - { - int i; - int k1, l1, l2, na, kh, nf, ip, iw, ix2, ix3, ix4, ido, idl1; - Treal *cinput, *coutput; - nf = ifac[1]; - na = 1; - l2 = n; - iw = n-1; - for (k1 = 1; k1 <= nf; ++k1) { - kh = nf - k1; - ip = ifac[kh + 2]; - l1 = l2 / ip; - ido = n / l2; - idl1 = ido*l1; - iw -= (ip - 1)*ido; - na = !na; - if (na) { - cinput = ch; - coutput = c; - } else { - cinput = c; - coutput = ch; - } - switch (ip) { - case 4: - ix2 = iw + ido; - ix3 = ix2 + ido; - radf4(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3]); - break; - case 2: - radf2(ido, l1, cinput, coutput, &wa[iw]); - break; - case 3: - ix2 = iw + ido; - radf3(ido, l1, cinput, coutput, &wa[iw], &wa[ix2]); - break; - case 5: - ix2 = iw + ido; - ix3 = ix2 + ido; - ix4 = ix3 + ido; - radf5(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]); - break; - default: - if (ido == 1) - na = !na; - if (na == 0) { - radfg(ido, ip, l1, idl1, c, ch, &wa[iw]); - na = 1; - } else { - radfg(ido, ip, l1, idl1, ch, c, &wa[iw]); - na = 0; - } - } - l2 = l1; - } - if (na == 1) return; - for (i = 0; i < n; i++) c[i] = ch[i]; - } /* rfftf1 */ - - -void rfftb1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2]) - { - int i; - int k1, l1, l2, na, nf, ip, iw, ix2, ix3, ix4, ido, idl1; - Treal *cinput, *coutput; - nf = ifac[1]; - na = 0; - l1 = 1; - iw = 0; - for (k1=1; k1<=nf; k1++) { - ip = ifac[k1 + 1]; - l2 = ip*l1; - ido = n / l2; - idl1 = ido*l1; - if (na) { - cinput = ch; - coutput = c; - } else { - cinput = c; - coutput = ch; - } - switch (ip) { - case 4: - ix2 = iw + ido; - ix3 = ix2 + ido; - radb4(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3]); - na = !na; - break; - case 2: - radb2(ido, l1, cinput, coutput, &wa[iw]); - na = !na; - break; - case 3: - ix2 = iw + ido; - radb3(ido, l1, cinput, coutput, &wa[iw], &wa[ix2]); - na = !na; - break; - case 5: - ix2 = iw + ido; - ix3 = ix2 + ido; - ix4 = ix3 + ido; - radb5(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]); - na = !na; - break; - default: - radbg(ido, ip, l1, idl1, cinput, coutput, &wa[iw]); - if (ido == 1) na = !na; - } - l1 = l2; - iw += (ip - 1)*ido; - } - if (na == 0) return; - for (i=0; i n: - index = [slice(None)]*len(s) - index[axis] = slice(0,n) - a = a[index] - else: - index = [slice(None)]*len(s) - index[axis] = slice(0,s[axis]) - s[axis] = n - z = zeros(s, a.dtype.char) - z[index] = a - a = z - - if axis != -1: - a = swapaxes(a, axis, -1) - r = work_function(a, wsave) - if axis != -1: - r = swapaxes(r, axis, -1) - return r - - -def fft(a, n=None, axis=-1): - """ - Compute the one-dimensional discrete Fourier Transform. - - This function computes the one-dimensional *n*-point discrete Fourier - Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. - - Parameters - ---------- - a : array_like - Input array, can be complex. - n : int, optional - Length of the transformed axis of the output. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input (along the axis specified by `axis`) is used. - axis : int, optional - Axis over which to compute the FFT. If not given, the last axis is - used. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - - Raises - ------ - IndexError - if `axes` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : for definition of the DFT and conventions used. - ifft : The inverse of `fft`. - fft2 : The two-dimensional FFT. - fftn : The *n*-dimensional FFT. - rfftn : The *n*-dimensional FFT of real input. - fftfreq : Frequency bins for given FFT parameters. - - Notes - ----- - FFT (Fast Fourier Transform) refers to a way the discrete Fourier - Transform (DFT) can be calculated efficiently, by using symmetries in the - calculated terms. The symmetry is highest when `n` is a power of 2, and - the transform is therefore most efficient for these sizes. - - The DFT is defined, with the conventions used in this implementation, in - the documentation for the `numpy.fft` module. - - References - ---------- - .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - - Examples - -------- - >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) - array([ -3.44505240e-16 +1.14383329e-17j, - 8.00000000e+00 -5.71092652e-15j, - 2.33482938e-16 +1.22460635e-16j, - 1.64863782e-15 +1.77635684e-15j, - 9.95839695e-17 +2.33482938e-16j, - 0.00000000e+00 +1.66837030e-15j, - 1.14383329e-17 +1.22460635e-16j, - -1.64863782e-15 +1.77635684e-15j]) - - >>> import matplotlib.pyplot as plt - >>> t = np.arange(256) - >>> sp = np.fft.fft(np.sin(t)) - >>> freq = np.fft.fftfreq(t.shape[-1]) - >>> plt.plot(freq, sp.real, freq, sp.imag) - [, ] - >>> plt.show() - - In this example, real input has an FFT which is Hermitian, i.e., symmetric - in the real part and anti-symmetric in the imaginary part, as described in - the `numpy.fft` documentation. - - """ - - return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache) - - -def ifft(a, n=None, axis=-1): - """ - Compute the one-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the one-dimensional *n*-point - discrete Fourier transform computed by `fft`. In other words, - ``ifft(fft(a)) == a`` to within numerical accuracy. - For a general description of the algorithm and definitions, - see `numpy.fft`. - - The input should be ordered in the same way as is returned by `fft`, - i.e., ``a[0]`` should contain the zero frequency term, - ``a[1:n/2+1]`` should contain the positive-frequency terms, and - ``a[n/2+1:]`` should contain the negative-frequency terms, in order of - decreasingly negative frequency. See `numpy.fft` for details. - - Parameters - ---------- - a : array_like - Input array, can be complex. - n : int, optional - Length of the transformed axis of the output. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input (along the axis specified by `axis`) is used. - See notes about padding issues. - axis : int, optional - Axis over which to compute the inverse DFT. If not given, the last - axis is used. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - - Raises - ------ - IndexError - If `axes` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : An introduction, with definitions and general explanations. - fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse - ifft2 : The two-dimensional inverse FFT. - ifftn : The n-dimensional inverse FFT. - - Notes - ----- - If the input parameter `n` is larger than the size of the input, the input - is padded by appending zeros at the end. Even though this is the common - approach, it might lead to surprising results. If a different padding is - desired, it must be performed before calling `ifft`. - - Examples - -------- - >>> np.fft.ifft([0, 4, 0, 0]) - array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) - - Create and plot a band-limited signal with random phases: - - >>> import matplotlib.pyplot as plt - >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) - >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) - >>> s = np.fft.ifft(n) - >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') - [, ] - >>> plt.legend(('real', 'imaginary')) - - >>> plt.show() - - """ - - a = asarray(a).astype(complex) - if n is None: - n = shape(a)[axis] - return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n - - -def rfft(a, n=None, axis=-1): - """ - Compute the one-dimensional discrete Fourier Transform for real input. - - This function computes the one-dimensional *n*-point discrete Fourier - Transform (DFT) of a real-valued array by means of an efficient algorithm - called the Fast Fourier Transform (FFT). - - Parameters - ---------- - a : array_like - Input array - n : int, optional - Number of points along transformation axis in the input to use. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input (along the axis specified by `axis`) is used. - axis : int, optional - Axis over which to compute the FFT. If not given, the last axis is - used. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is ``n/2+1``. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : For definition of the DFT and conventions used. - irfft : The inverse of `rfft`. - fft : The one-dimensional FFT of general (complex) input. - fftn : The *n*-dimensional FFT. - rfftn : The *n*-dimensional FFT of real input. - - Notes - ----- - When the DFT is computed for purely real input, the output is - Hermite-symmetric, i.e. the negative frequency terms are just the complex - conjugates of the corresponding positive-frequency terms, and the - negative-frequency terms are therefore redundant. This function does not - compute the negative frequency terms, and the length of the transformed - axis of the output is therefore ``n/2+1``. - - When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which - must be purely real due to the Hermite symmetry. - - If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and - ``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]`` - contains the term for frequency ``A[(n-1)/2]``, and is complex in the - general case. - - If the input `a` contains an imaginary part, it is silently discarded. - - Examples - -------- - >>> np.fft.fft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) - >>> np.fft.rfft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j]) - - Notice how the final element of the `fft` output is the complex conjugate - of the second element, for real input. For `rfft`, this symmetry is - exploited to compute only the non-negative frequency terms. - - """ - - a = asarray(a).astype(float) - return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache) - - -def irfft(a, n=None, axis=-1): - """ - Compute the inverse of the n-point DFT for real input. - - This function computes the inverse of the one-dimensional *n*-point - discrete Fourier Transform of real input computed by `rfft`. - In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical - accuracy. (See Notes below for why ``len(a)`` is necessary here.) - - The input is expected to be in the form returned by `rfft`, i.e. the - real zero-frequency term followed by the complex positive frequency terms - in order of increasing frequency. Since the discrete Fourier Transform of - real input is Hermite-symmetric, the negative frequency terms are taken - to be the complex conjugates of the corresponding positive frequency terms. - - Parameters - ---------- - a : array_like - The input array. - n : int, optional - Length of the transformed axis of the output. - For `n` output points, ``n/2+1`` input points are necessary. If the - input is longer than this, it is cropped. If it is shorter than this, - it is padded with zeros. If `n` is not given, it is determined from - the length of the input (along the axis specified by `axis`). - axis : int, optional - Axis over which to compute the inverse FFT. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is `n`, or, if `n` is not given, - ``2*(m-1)`` where `m` is the length of the transformed axis of the - input. To get an odd number of output points, `n` must be specified. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : For definition of the DFT and conventions used. - rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. - fft : The one-dimensional FFT. - irfft2 : The inverse of the two-dimensional FFT of real input. - irfftn : The inverse of the *n*-dimensional FFT of real input. - - Notes - ----- - Returns the real valued `n`-point inverse discrete Fourier transform - of `a`, where `a` contains the non-negative frequency terms of a - Hermite-symmetric sequence. `n` is the length of the result, not the - input. - - If you specify an `n` such that `a` must be zero-padded or truncated, the - extra/removed values will be added/removed at high frequencies. One can - thus resample a series to `m` points via Fourier interpolation by: - ``a_resamp = irfft(rfft(a), m)``. - - - Examples - -------- - >>> np.fft.ifft([1, -1j, -1, 1j]) - array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) - >>> np.fft.irfft([1, -1j, -1]) - array([ 0., 1., 0., 0.]) - - Notice how the last term in the input to the ordinary `ifft` is the - complex conjugate of the second term, and the output has zero imaginary - part everywhere. When calling `irfft`, the negative frequencies are not - specified, and the output array is purely real. - - """ - - a = asarray(a).astype(complex) - if n is None: - n = (shape(a)[axis] - 1) * 2 - return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb, - _real_fft_cache) / n - - -def hfft(a, n=None, axis=-1): - """ - Compute the FFT of a signal whose spectrum has Hermitian symmetry. - - Parameters - ---------- - a : array_like - The input array. - n : int, optional - The length of the FFT. - axis : int, optional - The axis over which to compute the FFT, assuming Hermitian symmetry - of the spectrum. Default is the last axis. - - Returns - ------- - out : ndarray - The transformed input. - - See also - -------- - rfft : Compute the one-dimensional FFT for real input. - ihfft : The inverse of `hfft`. - - Notes - ----- - `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the - opposite case: here the signal is real in the frequency domain and has - Hermite symmetry in the time domain. So here it's `hfft` for which - you must supply the length of the result if it is to be odd: - ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. - - Examples - -------- - >>> signal = np.array([[1, 1.j], [-1.j, 2]]) - >>> np.conj(signal.T) - signal # check Hermitian symmetry - array([[ 0.-0.j, 0.+0.j], - [ 0.+0.j, 0.-0.j]]) - >>> freq_spectrum = np.fft.hfft(signal) - >>> freq_spectrum - array([[ 1., 1.], - [ 2., -2.]]) - - """ - - a = asarray(a).astype(complex) - if n is None: - n = (shape(a)[axis] - 1) * 2 - return irfft(conjugate(a), n, axis) * n - - -def ihfft(a, n=None, axis=-1): - """ - Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry. - - Parameters - ---------- - a : array_like - Input array. - n : int, optional - Length of the inverse FFT. - axis : int, optional - Axis over which to compute the inverse FFT, assuming Hermitian - symmetry of the spectrum. Default is the last axis. - - Returns - ------- - out : ndarray - The transformed input. - - See also - -------- - hfft, irfft - - Notes - ----- - `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the - opposite case: here the signal is real in the frequency domain and has - Hermite symmetry in the time domain. So here it's `hfft` for which - you must supply the length of the result if it is to be odd: - ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. - - """ - - a = asarray(a).astype(float) - if n is None: - n = shape(a)[axis] - return conjugate(rfft(a, n, axis))/n - - -def _cook_nd_args(a, s=None, axes=None, invreal=0): - if s is None: - shapeless = 1 - if axes is None: - s = list(a.shape) - else: - s = take(a.shape, axes) - else: - shapeless = 0 - s = list(s) - if axes is None: - axes = range(-len(s), 0) - if len(s) != len(axes): - raise ValueError, "Shape and axes have different lengths." - if invreal and shapeless: - s[axes[-1]] = (s[axes[-1]] - 1) * 2 - return s, axes - - -def _raw_fftnd(a, s=None, axes=None, function=fft): - a = asarray(a) - s, axes = _cook_nd_args(a, s, axes) - itl = range(len(axes)) - itl.reverse() - for ii in itl: - a = function(a, n=s[ii], axis=axes[ii]) - return a - - -def fftn(a, s=None, axes=None): - """ - Compute the N-dimensional discrete Fourier Transform. - - This function computes the *N*-dimensional discrete Fourier Transform over - any number of axes in an *M*-dimensional array by means of the Fast Fourier - Transform (FFT). - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). - This corresponds to `n` for `fft(x, n)`. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input (along the axes specified - by `axes`) is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the transform over that axis is - performed multiple times. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` and `a`, - as explained in the parameters section above. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. - fft : The one-dimensional FFT, with definitions and conventions used. - rfftn : The *n*-dimensional FFT of real input. - fft2 : The two-dimensional FFT. - fftshift : Shifts zero-frequency terms to centre of array - - Notes - ----- - The output, analogously to `fft`, contains the term for zero frequency in - the low-order corner of all axes, the positive frequency terms in the - first half of all axes, the term for the Nyquist frequency in the middle - of all axes and the negative frequency terms in the second half of all - axes, in order of decreasingly negative frequency. - - See `numpy.fft` for details, definitions and conventions used. - - Examples - -------- - >>> a = np.mgrid[:3, :3, :3][0] - >>> np.fft.fftn(a, axes=(1, 2)) - array([[[ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[ 9.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[ 18.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) - array([[[ 2.+0.j, 2.+0.j, 2.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[-2.+0.j, -2.+0.j, -2.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - - >>> import matplotlib.pyplot as plt - >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, - ... 2 * np.pi * np.arange(200) / 34) - >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) - >>> FS = np.fft.fftn(S) - >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) - - >>> plt.show() - - """ - - return _raw_fftnd(a,s,axes,fft) - -def ifftn(a, s=None, axes=None): - """ - Compute the N-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the N-dimensional discrete - Fourier Transform over any number of axes in an M-dimensional array by - means of the Fast Fourier Transform (FFT). In other words, - ``ifftn(fftn(a)) == a`` to within numerical accuracy. - For a description of the definitions and conventions used, see `numpy.fft`. - - The input, analogously to `ifft`, should be ordered in the same way as is - returned by `fftn`, i.e. it should have the term for zero frequency - in all axes in the low-order corner, the positive frequency terms in the - first half of all axes, the term for the Nyquist frequency in the middle - of all axes and the negative frequency terms in the second half of all - axes, in order of decreasingly negative frequency. - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - This corresponds to ``n`` for ``ifft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input (along the axes specified - by `axes`) is used. See notes for issue on `ifft` zero padding. - axes : sequence of ints, optional - Axes over which to compute the IFFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the inverse transform over that - axis is performed multiple times. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` or `a`, - as explained in the parameters section above. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. - ifft : The one-dimensional inverse FFT. - ifft2 : The two-dimensional inverse FFT. - ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning - of array. - - Notes - ----- - See `numpy.fft` for definitions and conventions used. - - Zero-padding, analogously with `ifft`, is performed by appending zeros to - the input along the specified dimension. Although this is the common - approach, it might lead to surprising results. If another form of zero - padding is desired, it must be performed before `ifftn` is called. - - Examples - -------- - >>> a = np.eye(4) - >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) - array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) - - - Create and plot an image with band-limited frequency content: - - >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) - >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) - >>> im = np.fft.ifftn(n).real - >>> plt.imshow(im) - - >>> plt.show() - - """ - - return _raw_fftnd(a, s, axes, ifft) - - -def fft2(a, s=None, axes=(-2,-1)): - """ - Compute the 2-dimensional discrete Fourier Transform - - This function computes the *n*-dimensional discrete Fourier Transform - over any axes in an *M*-dimensional array by means of the - Fast Fourier Transform (FFT). By default, the transform is computed over - the last two axes of the input array, i.e., a 2-dimensional FFT. - - Parameters - ---------- - a : array_like - Input array, can be complex - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). - This corresponds to `n` for `fft(x, n)`. - Along each axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input (along the axes specified - by `axes`) is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last two - axes are used. A repeated index in `axes` means the transform over - that axis is performed multiple times. A one-element sequence means - that a one-dimensional FFT is performed. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or the last two axes if `axes` is not given. - - Raises - ------ - ValueError - If `s` and `axes` have different length, or `axes` not given and - ``len(s) != 2``. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - ifft2 : The inverse two-dimensional FFT. - fft : The one-dimensional FFT. - fftn : The *n*-dimensional FFT. - fftshift : Shifts zero-frequency terms to the center of the array. - For two-dimensional input, swaps first and third quadrants, and second - and fourth quadrants. - - Notes - ----- - `fft2` is just `fftn` with a different default for `axes`. - - The output, analogously to `fft`, contains the term for zero frequency in - the low-order corner of the transformed axes, the positive frequency terms - in the first half of these axes, the term for the Nyquist frequency in the - middle of the axes and the negative frequency terms in the second half of - the axes, in order of decreasingly negative frequency. - - See `fftn` for details and a plotting example, and `numpy.fft` for - definitions and conventions used. - - - Examples - -------- - >>> a = np.mgrid[:5, :5][0] - >>> np.fft.fft2(a) - array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]]) - - """ - - return _raw_fftnd(a,s,axes,fft) - - -def ifft2(a, s=None, axes=(-2,-1)): - """ - Compute the 2-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the 2-dimensional discrete Fourier - Transform over any number of axes in an M-dimensional array by means of - the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` - to within numerical accuracy. By default, the inverse transform is - computed over the last two axes of the input array. - - The input, analogously to `ifft`, should be ordered in the same way as is - returned by `fft2`, i.e. it should have the term for zero frequency - in the low-order corner of the two axes, the positive frequency terms in - the first half of these axes, the term for the Nyquist frequency in the - middle of the axes and the negative frequency terms in the second half of - both axes, in order of decreasingly negative frequency. - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each axis) of the output (``s[0]`` refers to axis 0, - ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. - Along each axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input (along the axes specified - by `axes`) is used. See notes for issue on `ifft` zero padding. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last two - axes are used. A repeated index in `axes` means the transform over - that axis is performed multiple times. A one-element sequence means - that a one-dimensional FFT is performed. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or the last two axes if `axes` is not given. - - Raises - ------ - ValueError - If `s` and `axes` have different length, or `axes` not given and - ``len(s) != 2``. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. - ifftn : The inverse of the *n*-dimensional FFT. - fft : The one-dimensional FFT. - ifft : The one-dimensional inverse FFT. - - Notes - ----- - `ifft2` is just `ifftn` with a different default for `axes`. - - See `ifftn` for details and a plotting example, and `numpy.fft` for - definition and conventions used. - - Zero-padding, analogously with `ifft`, is performed by appending zeros to - the input along the specified dimension. Although this is the common - approach, it might lead to surprising results. If another form of zero - padding is desired, it must be performed before `ifft2` is called. - - Examples - -------- - >>> a = 4 * np.eye(4) - >>> np.fft.ifft2(a) - array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], - [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) - - """ - - return _raw_fftnd(a, s, axes, ifft) - - -def rfftn(a, s=None, axes=None): - """ - Compute the N-dimensional discrete Fourier Transform for real input. - - This function computes the N-dimensional discrete Fourier Transform over - any number of axes in an M-dimensional real array by means of the Fast - Fourier Transform (FFT). By default, all axes are transformed, with the - real transform performed over the last axis, while the remaining - transforms are complex. - - Parameters - ---------- - a : array_like - Input array, taken to be real. - s : sequence of ints, optional - Shape (length along each transformed axis) to use from the input. - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - The final element of `s` corresponds to `n` for ``rfft(x, n)``, while - for the remaining axes, it corresponds to `n` for ``fft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input (along the axes specified - by `axes`) is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` and `a`, - as explained in the parameters section above. - The length of the last axis transformed will be ``s[-1]//2+1``, - while the remaining transformed axes will have lengths according to - `s`, or unchanged from the input. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT - of real input. - fft : The one-dimensional FFT, with definitions and conventions used. - rfft : The one-dimensional FFT of real input. - fftn : The n-dimensional FFT. - rfft2 : The two-dimensional FFT of real input. - - Notes - ----- - The transform for real input is performed over the last transformation - axis, as by `rfft`, then the transform over the remaining axes is - performed as by `fftn`. The order of the output is as for `rfft` for the - final transformation axis, and as for `fftn` for the remaining - transformation axes. - - See `fft` for details, definitions and conventions used. - - Examples - -------- - >>> a = np.ones((2, 2, 2)) - >>> np.fft.rfftn(a) - array([[[ 8.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]], - [[ 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]]]) - - >>> np.fft.rfftn(a, axes=(2, 0)) - array([[[ 4.+0.j, 0.+0.j], - [ 4.+0.j, 0.+0.j]], - [[ 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]]]) - - """ - - a = asarray(a).astype(float) - s, axes = _cook_nd_args(a, s, axes) - a = rfft(a, s[-1], axes[-1]) - for ii in range(len(axes)-1): - a = fft(a, s[ii], axes[ii]) - return a - -def rfft2(a, s=None, axes=(-2,-1)): - """ - Compute the 2-dimensional FFT of a real array. - - Parameters - ---------- - a : array - Input array, taken to be real. - s : sequence of ints, optional - Shape of the FFT. - axes : sequence of ints, optional - Axes over which to compute the FFT. - - Returns - ------- - out : ndarray - The result of the real 2-D FFT. - - See Also - -------- - rfftn : Compute the N-dimensional discrete Fourier Transform for real - input. - - Notes - ----- - This is really just `rfftn` with different default behavior. - For more details see `rfftn`. - - """ - - return rfftn(a, s, axes) - -def irfftn(a, s=None, axes=None): - """ - Compute the inverse of the N-dimensional FFT of real input. - - This function computes the inverse of the N-dimensional discrete - Fourier Transform for real input over any number of axes in an - M-dimensional array by means of the Fast Fourier Transform (FFT). In - other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical - accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, - and for the same reason.) - - The input should be ordered in the same way as is returned by `rfftn`, - i.e. as for `irfft` for the final transformation axis, and as for `ifftn` - along all the other axes. - - Parameters - ---------- - a : array_like - Input array. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the - number of input points used along this axis, except for the last axis, - where ``s[-1]//2+1`` points of the input are used. - Along any axis, if the shape indicated by `s` is smaller than that of - the input, the input is cropped. If it is larger, the input is padded - with zeros. If `s` is not given, the shape of the input (along the - axes specified by `axes`) is used. - axes : sequence of ints, optional - Axes over which to compute the inverse FFT. If not given, the last - `len(s)` axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the inverse transform over that - axis is performed multiple times. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` or `a`, - as explained in the parameters section above. - The length of each transformed axis is as given by the corresponding - element of `s`, or the length of the input in every axis except for the - last one if `s` is not given. In the final transformed axis the length - of the output when `s` is not given is ``2*(m-1)`` where `m` is the - length of the final transformed axis of the input. To get an odd - number of output points in the final axis, `s` must be specified. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - rfftn : The forward n-dimensional FFT of real input, - of which `ifftn` is the inverse. - fft : The one-dimensional FFT, with definitions and conventions used. - irfft : The inverse of the one-dimensional FFT of real input. - irfft2 : The inverse of the two-dimensional FFT of real input. - - Notes - ----- - See `fft` for definitions and conventions used. - - See `rfft` for definitions and conventions used for real input. - - Examples - -------- - >>> a = np.zeros((3, 2, 2)) - >>> a[0, 0, 0] = 3 * 2 * 2 - >>> np.fft.irfftn(a) - array([[[ 1., 1.], - [ 1., 1.]], - [[ 1., 1.], - [ 1., 1.]], - [[ 1., 1.], - [ 1., 1.]]]) - - """ - - a = asarray(a).astype(complex) - s, axes = _cook_nd_args(a, s, axes, invreal=1) - for ii in range(len(axes)-1): - a = ifft(a, s[ii], axes[ii]) - a = irfft(a, s[-1], axes[-1]) - return a - -def irfft2(a, s=None, axes=(-2,-1)): - """ - Compute the 2-dimensional inverse FFT of a real array. - - Parameters - ---------- - a : array_like - The input array - s : sequence of ints, optional - Shape of the inverse FFT. - axes : sequence of ints, optional - The axes over which to compute the inverse fft. - Default is the last two axes. - - Returns - ------- - out : ndarray - The result of the inverse real 2-D FFT. - - See Also - -------- - irfftn : Compute the inverse of the N-dimensional FFT of real input. - - Notes - ----- - This is really `irfftn` with different defaults. - For more details see `irfftn`. - - """ - - return irfftn(a, s, axes) diff --git a/numpy-1.6.2/numpy/fft/fftpack_litemodule.c b/numpy-1.6.2/numpy/fft/fftpack_litemodule.c deleted file mode 100644 index 21343574d8..0000000000 --- a/numpy-1.6.2/numpy/fft/fftpack_litemodule.c +++ /dev/null @@ -1,353 +0,0 @@ -#include "fftpack.h" -#include "Python.h" -#include "numpy/arrayobject.h" - -static PyObject *ErrorObject; - -/* ----------------------------------------------------- */ - -static char fftpack_cfftf__doc__[] = ""; - -PyObject * -fftpack_cfftf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *data; - PyArray_Descr *descr; - double *wsave, *dptr; - npy_intp nsave; - int npts, nrepeats, i; - - if(!PyArg_ParseTuple(args, "OO", &op1, &op2)) { - return NULL; - } - data = (PyArrayObject *)PyArray_CopyFromObject(op1, - PyArray_CDOUBLE, 1, 0); - if (data == NULL) { - return NULL; - } - descr = PyArray_DescrFromType(PyArray_DOUBLE); - if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) { - goto fail; - } - if (data == NULL) { - goto fail; - } - - npts = data->dimensions[data->nd - 1]; - if (nsave != npts*4 + 15) { - PyErr_SetString(ErrorObject, "invalid work array for fft size"); - goto fail; - } - - nrepeats = PyArray_SIZE(data)/npts; - dptr = (double *)data->data; - NPY_SIGINT_ON; - for (i = 0; i < nrepeats; i++) { - cfftf(npts, dptr, wsave); - dptr += npts*2; - } - NPY_SIGINT_OFF; - PyArray_Free(op2, (char *)wsave); - return (PyObject *)data; - -fail: - PyArray_Free(op2, (char *)wsave); - Py_DECREF(data); - return NULL; -} - -static char fftpack_cfftb__doc__[] = ""; - -PyObject * -fftpack_cfftb(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *data; - PyArray_Descr *descr; - double *wsave, *dptr; - npy_intp nsave; - int npts, nrepeats, i; - - if(!PyArg_ParseTuple(args, "OO", &op1, &op2)) { - return NULL; - } - data = (PyArrayObject *)PyArray_CopyFromObject(op1, - PyArray_CDOUBLE, 1, 0); - if (data == NULL) { - return NULL; - } - descr = PyArray_DescrFromType(PyArray_DOUBLE); - if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) { - goto fail; - } - if (data == NULL) { - goto fail; - } - - npts = data->dimensions[data->nd - 1]; - if (nsave != npts*4 + 15) { - PyErr_SetString(ErrorObject, "invalid work array for fft size"); - goto fail; - } - - nrepeats = PyArray_SIZE(data)/npts; - dptr = (double *)data->data; - NPY_SIGINT_ON; - for (i = 0; i < nrepeats; i++) { - cfftb(npts, dptr, wsave); - dptr += npts*2; - } - NPY_SIGINT_OFF; - PyArray_Free(op2, (char *)wsave); - return (PyObject *)data; - -fail: - PyArray_Free(op2, (char *)wsave); - Py_DECREF(data); - return NULL; -} - -static char fftpack_cffti__doc__[] =""; - -static PyObject * -fftpack_cffti(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyArrayObject *op; - npy_intp dim; - long n; - - if (!PyArg_ParseTuple(args, "l", &n)) { - return NULL; - } - /*Magic size needed by cffti*/ - dim = 4*n + 15; - /*Create a 1 dimensional array of dimensions of type double*/ - op = (PyArrayObject *)PyArray_SimpleNew(1, &dim, PyArray_DOUBLE); - if (op == NULL) { - return NULL; - } - - NPY_SIGINT_ON; - cffti(n, (double *)((PyArrayObject*)op)->data); - NPY_SIGINT_OFF; - - return (PyObject *)op; -} - -static char fftpack_rfftf__doc__[] =""; - -PyObject * -fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *data, *ret; - PyArray_Descr *descr; - double *wsave, *dptr, *rptr; - npy_intp nsave; - int npts, nrepeats, i, rstep; - - if(!PyArg_ParseTuple(args, "OO", &op1, &op2)) { - return NULL; - } - data = (PyArrayObject *)PyArray_ContiguousFromObject(op1, - PyArray_DOUBLE, 1, 0); - if (data == NULL) { - return NULL; - } - npts = data->dimensions[data->nd-1]; - data->dimensions[data->nd - 1] = npts/2 + 1; - ret = (PyArrayObject *)PyArray_Zeros(data->nd, data->dimensions, - PyArray_DescrFromType(PyArray_CDOUBLE), 0); - data->dimensions[data->nd - 1] = npts; - rstep = (ret->dimensions[ret->nd - 1])*2; - - descr = PyArray_DescrFromType(PyArray_DOUBLE); - if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) { - goto fail; - } - if (data == NULL || ret == NULL) { - goto fail; - } - if (nsave != npts*2+15) { - PyErr_SetString(ErrorObject, "invalid work array for fft size"); - goto fail; - } - - nrepeats = PyArray_SIZE(data)/npts; - rptr = (double *)ret->data; - dptr = (double *)data->data; - - - NPY_SIGINT_ON; - for (i = 0; i < nrepeats; i++) { - memcpy((char *)(rptr+1), dptr, npts*sizeof(double)); - rfftf(npts, rptr+1, wsave); - rptr[0] = rptr[1]; - rptr[1] = 0.0; - rptr += rstep; - dptr += npts; - } - NPY_SIGINT_OFF; - PyArray_Free(op2, (char *)wsave); - Py_DECREF(data); - return (PyObject *)ret; - -fail: - PyArray_Free(op2, (char *)wsave); - Py_XDECREF(data); - Py_XDECREF(ret); - return NULL; -} - -static char fftpack_rfftb__doc__[] =""; - - -PyObject * -fftpack_rfftb(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *data, *ret; - PyArray_Descr *descr; - double *wsave, *dptr, *rptr; - npy_intp nsave; - int npts, nrepeats, i; - - if(!PyArg_ParseTuple(args, "OO", &op1, &op2)) { - return NULL; - } - data = (PyArrayObject *)PyArray_ContiguousFromObject(op1, - PyArray_CDOUBLE, 1, 0); - if (data == NULL) { - return NULL; - } - npts = data->dimensions[data->nd - 1]; - ret = (PyArrayObject *)PyArray_Zeros(data->nd, data->dimensions, - PyArray_DescrFromType(PyArray_DOUBLE), 0); - - descr = PyArray_DescrFromType(PyArray_DOUBLE); - if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) { - goto fail; - } - if (data == NULL || ret == NULL) { - goto fail; - } - if (nsave != npts*2 + 15) { - PyErr_SetString(ErrorObject, "invalid work array for fft size"); - goto fail; - } - - nrepeats = PyArray_SIZE(ret)/npts; - rptr = (double *)ret->data; - dptr = (double *)data->data; - - NPY_SIGINT_ON; - for (i = 0; i < nrepeats; i++) { - memcpy((char *)(rptr + 1), (dptr + 2), (npts - 1)*sizeof(double)); - rptr[0] = dptr[0]; - rfftb(npts, rptr, wsave); - rptr += npts; - dptr += npts*2; - } - NPY_SIGINT_OFF; - PyArray_Free(op2, (char *)wsave); - Py_DECREF(data); - return (PyObject *)ret; - -fail: - PyArray_Free(op2, (char *)wsave); - Py_XDECREF(data); - Py_XDECREF(ret); - return NULL; -} - - -static char fftpack_rffti__doc__[] =""; - -static PyObject * -fftpack_rffti(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyArrayObject *op; - npy_intp dim; - long n; - - if (!PyArg_ParseTuple(args, "l", &n)) { - return NULL; - } - /*Magic size needed by rffti*/ - dim = 2*n + 15; - /*Create a 1 dimensional array of dimensions of type double*/ - op = (PyArrayObject *)PyArray_SimpleNew(1, &dim, PyArray_DOUBLE); - if (op == NULL) { - return NULL; - } - NPY_SIGINT_ON; - rffti(n, (double *)((PyArrayObject*)op)->data); - NPY_SIGINT_OFF; - - return (PyObject *)op; -} - - -/* List of methods defined in the module */ - -static struct PyMethodDef fftpack_methods[] = { - {"cfftf", fftpack_cfftf, 1, fftpack_cfftf__doc__}, - {"cfftb", fftpack_cfftb, 1, fftpack_cfftb__doc__}, - {"cffti", fftpack_cffti, 1, fftpack_cffti__doc__}, - {"rfftf", fftpack_rfftf, 1, fftpack_rfftf__doc__}, - {"rfftb", fftpack_rfftb, 1, fftpack_rfftb__doc__}, - {"rffti", fftpack_rffti, 1, fftpack_rffti__doc__}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - - -/* Initialization function for the module (*must* be called initfftpack) */ - -static char fftpack_module_documentation[] = "" ; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "fftpack_lite", - NULL, - -1, - fftpack_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -/* Initialization function for the module */ -#if PY_MAJOR_VERSION >= 3 -#define RETVAL m -PyObject *PyInit_fftpack_lite(void) -#else -#define RETVAL -PyMODINIT_FUNC -initfftpack_lite(void) -#endif -{ - PyObject *m,*d; -#if PY_MAJOR_VERSION >= 3 - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule4("fftpack_lite", fftpack_methods, - fftpack_module_documentation, - (PyObject*)NULL,PYTHON_API_VERSION); -#endif - - /* Import the array object */ - import_array(); - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - ErrorObject = PyErr_NewException("fftpack.error", NULL, NULL); - PyDict_SetItemString(d, "error", ErrorObject); - - /* XXXX Add constants here */ - - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/fft/helper.py b/numpy-1.6.2/numpy/fft/helper.py deleted file mode 100644 index f6c5704450..0000000000 --- a/numpy-1.6.2/numpy/fft/helper.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -Discrete Fourier Transforms - helper.py -""" -# Created by Pearu Peterson, September 2002 - -__all__ = ['fftshift','ifftshift','fftfreq'] - -from numpy.core import asarray, concatenate, arange, take, \ - integer, empty -import numpy.core.numerictypes as nt -import types - -def fftshift(x,axes=None): - """ - Shift the zero-frequency component to the center of the spectrum. - - This function swaps half-spaces for all axes listed (defaults to all). - Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. - - Parameters - ---------- - x : array_like - Input array. - axes : int or shape tuple, optional - Axes over which to shift. Default is None, which shifts all axes. - - Returns - ------- - y : ndarray - The shifted array. - - See Also - -------- - ifftshift : The inverse of `fftshift`. - - Examples - -------- - >>> freqs = np.fft.fftfreq(10, 0.1) - >>> freqs - array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) - >>> np.fft.fftshift(freqs) - array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) - - Shift the zero-frequency component only along the second axis: - - >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) - >>> freqs - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - >>> np.fft.fftshift(freqs, axes=(1,)) - array([[ 2., 0., 1.], - [-4., 3., 4.], - [-1., -3., -2.]]) - - """ - tmp = asarray(x) - ndim = len(tmp.shape) - if axes is None: - axes = range(ndim) - elif isinstance(axes, (int, nt.integer)): - axes = (axes,) - y = tmp - for k in axes: - n = tmp.shape[k] - p2 = (n+1)//2 - mylist = concatenate((arange(p2,n),arange(p2))) - y = take(y,mylist,k) - return y - - -def ifftshift(x,axes=None): - """ - The inverse of fftshift. - - Parameters - ---------- - x : array_like - Input array. - axes : int or shape tuple, optional - Axes over which to calculate. Defaults to None, which shifts all axes. - - Returns - ------- - y : ndarray - The shifted array. - - See Also - -------- - fftshift : Shift zero-frequency component to the center of the spectrum. - - Examples - -------- - >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) - >>> freqs - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - >>> np.fft.ifftshift(np.fft.fftshift(freqs)) - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - - """ - tmp = asarray(x) - ndim = len(tmp.shape) - if axes is None: - axes = range(ndim) - elif isinstance(axes, (int, nt.integer)): - axes = (axes,) - y = tmp - for k in axes: - n = tmp.shape[k] - p2 = n-(n+1)//2 - mylist = concatenate((arange(p2,n),arange(p2))) - y = take(y,mylist,k) - return y - -def fftfreq(n,d=1.0): - """ - Return the Discrete Fourier Transform sample frequencies. - - The returned float array contains the frequency bins in - cycles/unit (with zero at the start) given a window length `n` and a - sample spacing `d`:: - - f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even - f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd - - Parameters - ---------- - n : int - Window length. - d : scalar - Sample spacing. - - Returns - ------- - out : ndarray - The array of length `n`, containing the sample frequencies. - - Examples - -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) - >>> fourier = np.fft.fft(signal) - >>> n = signal.size - >>> timestep = 0.1 - >>> freq = np.fft.fftfreq(n, d=timestep) - >>> freq - array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25]) - - """ - assert isinstance(n,types.IntType) or isinstance(n, integer) - val = 1.0/(n*d) - results = empty(n, int) - N = (n-1)//2 + 1 - p1 = arange(0,N,dtype=int) - results[:N] = p1 - p2 = arange(-(n//2),0,dtype=int) - results[N:] = p2 - return results * val - #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d) diff --git a/numpy-1.6.2/numpy/fft/info.py b/numpy-1.6.2/numpy/fft/info.py deleted file mode 100644 index 890b2add22..0000000000 --- a/numpy-1.6.2/numpy/fft/info.py +++ /dev/null @@ -1,173 +0,0 @@ -""" -Discrete Fourier Transform (:mod:`numpy.fft`) -============================================= - -.. currentmodule:: numpy.fft - - -Standard FFTs -------------- - -.. autosummary:: - :toctree: generated/ - - fft Discrete Fourier transform. - ifft Inverse discrete Fourier transform. - fft2 Discrete Fourier transform in two dimensions. - ifft2 Inverse discrete Fourier transform in two dimensions. - fftn Discrete Fourier transform in N-dimensions. - ifftn Inverse discrete Fourier transform in N dimensions. - -Real FFTs ---------- - -.. autosummary:: - :toctree: generated/ - - rfft Real discrete Fourier transform. - irfft Inverse real discrete Fourier transform. - rfft2 Real discrete Fourier transform in two dimensions. - irfft2 Inverse real discrete Fourier transform in two dimensions. - rfftn Real discrete Fourier transform in N dimensions. - irfftn Inverse real discrete Fourier transform in N dimensions. - - -Hermitian FFTs --------------- - -.. autosummary:: - :toctree: generated/ - - hfft Hermitian discrete Fourier transform. - ihfft Inverse Hermitian discrete Fourier transform. - - -Helper routines ---------------- - -.. autosummary:: - :toctree: generated/ - - fftfreq Discrete Fourier Transform sample frequencies. - fftshift Shift zero-frequency component to center of spectrum. - ifftshift Inverse of fftshift. - -Background information ----------------------- - -Fourier analysis is fundamentally a method for expressing a function as a -sum of periodic components, and for recovering the signal from those -components. When both the function and its Fourier transform are -replaced with discretized counterparts, it is called the discrete Fourier -transform (DFT). The DFT has become a mainstay of numerical computing in -part because of a very fast algorithm for computing it, called the Fast -Fourier Transform (FFT), which was known to Gauss (1805) and was brought -to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ -provide an accessible introduction to Fourier analysis and its -applications. - -Because the discrete Fourier transform separates its input into -components that contribute at discrete frequencies, it has a great number -of applications in digital signal processing, e.g., for filtering, and in -this context the discretized input to the transform is customarily -referred to as a *signal*, which exists in the *time domain*. The output -is called a *spectrum* or *transform* and exists in the *frequency -domain*. - -There are many ways to define the DFT, varying in the sign of the -exponent, normalization, etc. In this implementation, the DFT is defined -as - -.. math:: - A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} - \\qquad k = 0,\\ldots,n-1. - -The DFT is in general defined for complex inputs and outputs, and a -single-frequency component at linear frequency :math:`f` is -represented by a complex exponential -:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` -is the sampling interval. - -The values in the result follow so-called "standard" order: If ``A = -fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the mean of -the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` -contains the positive-frequency terms, and ``A[n/2+1:]`` contains the -negative-frequency terms, in order of decreasingly negative frequency. -For an even number of input points, ``A[n/2]`` represents both positive and -negative Nyquist frequency, and is also purely real for real input. For -an odd number of input points, ``A[(n-1)/2]`` contains the largest positive -frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. -The routine ``np.fft.fftfreq(A)`` returns an array giving the frequencies -of corresponding elements in the output. The routine -``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the -zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes -that shift. - -When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` -is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. -The phase spectrum is obtained by ``np.angle(A)``. - -The inverse DFT is defined as - -.. math:: - a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} - \\qquad n = 0,\\ldots,n-1. - -It differs from the forward transform by the sign of the exponential -argument and the normalization by :math:`1/n`. - -Real and Hermitian transforms -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When the input is purely real, its transform is Hermitian, i.e., the -component at frequency :math:`f_k` is the complex conjugate of the -component at frequency :math:`-f_k`, which means that for real -inputs there is no information in the negative frequency components that -is not already available from the positive frequency components. -The family of `rfft` functions is -designed to operate on real inputs, and exploits this symmetry by -computing only the positive frequency components, up to and including the -Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex -output points. The inverses of this family assumes the same symmetry of -its input, and for an output of ``n`` points uses ``n/2+1`` input points. - -Correspondingly, when the spectrum is purely real, the signal is -Hermitian. The `hfft` family of functions exploits this symmetry by -using ``n/2+1`` complex points in the input (time) domain for ``n`` real -points in the frequency domain. - -In higher dimensions, FFTs are used, e.g., for image analysis and -filtering. The computational efficiency of the FFT means that it can -also be a faster way to compute large convolutions, using the property -that a convolution in the time domain is equivalent to a point-by-point -multiplication in the frequency domain. - -In two dimensions, the DFT is defined as - -.. math:: - A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} - a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} - \\qquad k = 0, \\ldots, N-1;\\quad l = 0, \\ldots, M-1, - -which extends in the obvious way to higher dimensions, and the inverses -in higher dimensions also extend in the same way. - -References -^^^^^^^^^^ - -.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - -.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., - 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. - 12-13. Cambridge Univ. Press, Cambridge, UK. - -Examples -^^^^^^^^ - -For examples, see the various functions. - -""" - -depends = ['core'] diff --git a/numpy-1.6.2/numpy/fft/setup.py b/numpy-1.6.2/numpy/fft/setup.py deleted file mode 100644 index 6acad7c9a6..0000000000 --- a/numpy-1.6.2/numpy/fft/setup.py +++ /dev/null @@ -1,19 +0,0 @@ - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('fft',parent_package,top_path) - - config.add_data_dir('tests') - - # Configure fftpack_lite - config.add_extension('fftpack_lite', - sources=['fftpack_litemodule.c', 'fftpack.c'] - ) - - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/fft/setupscons.py b/numpy-1.6.2/numpy/fft/setupscons.py deleted file mode 100644 index 54551b0a33..0000000000 --- a/numpy-1.6.2/numpy/fft/setupscons.py +++ /dev/null @@ -1,15 +0,0 @@ -def configuration(parent_package = '', top_path = None): - from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs - config = Configuration('fft', parent_package, top_path) - - config.add_data_dir('tests') - - config.add_sconscript('SConstruct', - source_files = ['fftpack_litemodule.c', 'fftpack.c', - 'fftpack.h']) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/fft/tests/test_fftpack.py b/numpy-1.6.2/numpy/fft/tests/test_fftpack.py deleted file mode 100644 index 4f70d3bc57..0000000000 --- a/numpy-1.6.2/numpy/fft/tests/test_fftpack.py +++ /dev/null @@ -1,23 +0,0 @@ -from numpy.testing import * -import numpy as np - -def fft1(x): - L = len(x) - phase = -2j*np.pi*(np.arange(L)/float(L)) - phase = np.arange(L).reshape(-1,1) * phase - return np.sum(x*np.exp(phase),axis=1) - -class TestFFTShift(TestCase): - def test_fft_n(self): - self.assertRaises(ValueError,np.fft.fft,[1,2,3],0) - - -class TestFFT1D(TestCase): - def test_basic(self): - rand = np.random.random - x = rand(30) + 1j*rand(30) - assert_array_almost_equal(fft1(x), np.fft.fft(x)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/fft/tests/test_helper.py b/numpy-1.6.2/numpy/fft/tests/test_helper.py deleted file mode 100644 index 8ddac931fa..0000000000 --- a/numpy-1.6.2/numpy/fft/tests/test_helper.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# Copied from fftpack.helper by Pearu Peterson, October 2005 -""" Test functions for fftpack.helper module -""" - -from numpy.testing import * -from numpy.fft import fftshift,ifftshift,fftfreq - -from numpy import pi - -def random(size): - return rand(*size) - -class TestFFTShift(TestCase): - def test_definition(self): - x = [0,1,2,3,4,-4,-3,-2,-1] - y = [-4,-3,-2,-1,0,1,2,3,4] - assert_array_almost_equal(fftshift(x),y) - assert_array_almost_equal(ifftshift(y),x) - x = [0,1,2,3,4,-5,-4,-3,-2,-1] - y = [-5,-4,-3,-2,-1,0,1,2,3,4] - assert_array_almost_equal(fftshift(x),y) - assert_array_almost_equal(ifftshift(y),x) - - def test_inverse(self): - for n in [1,4,9,100,211]: - x = random((n,)) - assert_array_almost_equal(ifftshift(fftshift(x)),x) - - def test_axes_keyword(self): - freqs = [[ 0, 1, 2], [ 3, 4, -4], [-3, -2, -1]] - shifted = [[-1, -3, -2], [ 2, 0, 1], [-4, 3, 4]] - assert_array_almost_equal(fftshift(freqs, axes=(0, 1)), shifted) - assert_array_almost_equal(fftshift(freqs, axes=0), fftshift(freqs, axes=(0,))) - assert_array_almost_equal(ifftshift(shifted, axes=(0, 1)), freqs) - assert_array_almost_equal(ifftshift(shifted, axes=0), ifftshift(shifted, axes=(0,))) - - -class TestFFTFreq(TestCase): - def test_definition(self): - x = [0,1,2,3,4,-4,-3,-2,-1] - assert_array_almost_equal(9*fftfreq(9),x) - assert_array_almost_equal(9*pi*fftfreq(9,pi),x) - x = [0,1,2,3,4,-5,-4,-3,-2,-1] - assert_array_almost_equal(10*fftfreq(10),x) - assert_array_almost_equal(10*pi*fftfreq(10,pi),x) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/SConscript b/numpy-1.6.2/numpy/lib/SConscript deleted file mode 100644 index 2d1ed55766..0000000000 --- a/numpy-1.6.2/numpy/lib/SConscript +++ /dev/null @@ -1,7 +0,0 @@ -# Last Change: Thu Jun 12 06:00 PM 2008 J -# vim:syntax=python -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) -env.Prepend(CPPPATH=["#$build_prefix/numpy/core/src/private"]) -env.NumpyPythonExtension('_compiled_base', source = ['src/_compiled_base.c']) diff --git a/numpy-1.6.2/numpy/lib/SConstruct b/numpy-1.6.2/numpy/lib/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/numpy-1.6.2/numpy/lib/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/numpy-1.6.2/numpy/lib/__init__.py b/numpy-1.6.2/numpy/lib/__init__.py deleted file mode 100644 index 1fd94a0135..0000000000 --- a/numpy-1.6.2/numpy/lib/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -from info import __doc__ -from numpy.version import version as __version__ - -from type_check import * -from index_tricks import * -from function_base import * -from shape_base import * -from stride_tricks import * -from twodim_base import * -from ufunclike import * - -import scimath as emath -from polynomial import * -#import convertcode -from utils import * -from arraysetops import * -from npyio import * -from financial import * -import math -from arrayterator import * - -__all__ = ['emath','math'] -__all__ += type_check.__all__ -__all__ += index_tricks.__all__ -__all__ += function_base.__all__ -__all__ += shape_base.__all__ -__all__ += stride_tricks.__all__ -__all__ += twodim_base.__all__ -__all__ += ufunclike.__all__ -__all__ += polynomial.__all__ -__all__ += utils.__all__ -__all__ += arraysetops.__all__ -__all__ += npyio.__all__ -__all__ += financial.__all__ - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/lib/_datasource.py b/numpy-1.6.2/numpy/lib/_datasource.py deleted file mode 100644 index ce6d2391b0..0000000000 --- a/numpy-1.6.2/numpy/lib/_datasource.py +++ /dev/null @@ -1,639 +0,0 @@ -"""A file interface for handling local and remote data files. -The goal of datasource is to abstract some of the file system operations when -dealing with data files so the researcher doesn't have to know all the -low-level details. Through datasource, a researcher can obtain and use a -file with one function call, regardless of location of the file. - -DataSource is meant to augment standard python libraries, not replace them. -It should work seemlessly with standard file IO operations and the os module. - -DataSource files can originate locally or remotely: - -- local files : '/home/guido/src/local/data.txt' -- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' - -DataSource files can also be compressed or uncompressed. Currently only gzip -and bz2 are supported. - -Example:: - - >>> # Create a DataSource, use os.curdir (default) for local storage. - >>> ds = datasource.DataSource() - >>> - >>> # Open a remote file. - >>> # DataSource downloads the file, stores it locally in: - >>> # './www.google.com/index.html' - >>> # opens the file and returns a file object. - >>> fp = ds.open('http://www.google.com/index.html') - >>> - >>> # Use the file as you normally would - >>> fp.read() - >>> fp.close() - -""" - -__docformat__ = "restructuredtext en" - -import os -from shutil import rmtree, copyfile, copyfileobj - -_open = open - -# Using a class instead of a module-level dictionary -# to reduce the inital 'import numpy' overhead by -# deferring the import of bz2 and gzip until needed - -# TODO: .zip support, .tar support? -class _FileOpeners(object): - """ - Container for different methods to open (un-)compressed files. - - `_FileOpeners` contains a dictionary that holds one method for each - supported file format. Attribute lookup is implemented in such a way that - an instance of `_FileOpeners` itself can be indexed with the keys of that - dictionary. Currently uncompressed files as well as files - compressed with ``gzip`` or ``bz2`` compression are supported. - - Notes - ----- - `_file_openers`, an instance of `_FileOpeners`, is made available for - use in the `_datasource` module. - - Examples - -------- - >>> np.lib._datasource._file_openers.keys() - [None, '.bz2', '.gz'] - >>> np.lib._datasource._file_openers['.gz'] is gzip.open - True - - """ - def __init__(self): - self._loaded = False - self._file_openers = {None: open} - def _load(self): - if self._loaded: - return - try: - import bz2 - self._file_openers[".bz2"] = bz2.BZ2File - except ImportError: - pass - try: - import gzip - self._file_openers[".gz"] = gzip.open - except ImportError: - pass - self._loaded = True - - def keys(self): - """ - Return the keys of currently supported file openers. - - Parameters - ---------- - None - - Returns - ------- - keys : list - The keys are None for uncompressed files and the file extension - strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression - methods. - - """ - self._load() - return self._file_openers.keys() - def __getitem__(self, key): - self._load() - return self._file_openers[key] - -_file_openers = _FileOpeners() - -def open(path, mode='r', destpath=os.curdir): - """ - Open `path` with `mode` and return the file object. - - If ``path`` is an URL, it will be downloaded, stored in the `DataSource` - `destpath` directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. - mode : str, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to - append. Available modes depend on the type of object specified by path. - Default is 'r'. - destpath : str, optional - Path to the directory where the source file gets downloaded to for use. - If `destpath` is None, a temporary directory will be created. The - default path is the current directory. - - Returns - ------- - out : file object - The opened file. - - Notes - ----- - This is a convenience function that instantiates a `DataSource` and - returns the file object from ``DataSource.open(path)``. - - """ - - ds = DataSource(destpath) - return ds.open(path, mode) - - -class DataSource (object): - """ - DataSource(destpath='.') - - A generic data source file (file, http, ftp, ...). - - DataSources can be local files or remote files/URLs. The files may - also be compressed or uncompressed. DataSource hides some of the low-level - details of downloading the file, allowing you to simply pass in a valid - file path (or URL) and obtain a file object. - - Parameters - ---------- - destpath : str or None, optional - Path to the directory where the source file gets downloaded to for use. - If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Notes - ----- - URLs require a scheme string (``http://``) to be used, without it they - will fail:: - - >>> repos = DataSource() - >>> repos.exists('www.google.com/index.html') - False - >>> repos.exists('http://www.google.com/index.html') - True - - Temporary directories are deleted when the DataSource is deleted. - - Examples - -------- - :: - - >>> ds = DataSource('/home/guido') - >>> urlname = 'http://www.google.com/index.html' - >>> gfile = ds.open('http://www.google.com/index.html') # remote file - >>> ds.abspath(urlname) - '/home/guido/www.google.com/site/index.html' - - >>> ds = DataSource(None) # use with temporary file - >>> ds.open('/home/guido/foobar.txt') - - >>> ds.abspath('/home/guido/foobar.txt') - '/tmp/tmpy4pgsP/home/guido/foobar.txt' - - """ - - def __init__(self, destpath=os.curdir): - """Create a DataSource with a local path at destpath.""" - if destpath: - self._destpath = os.path.abspath(destpath) - self._istmpdest = False - else: - import tempfile # deferring import to improve startup time - self._destpath = tempfile.mkdtemp() - self._istmpdest = True - - def __del__(self): - # Remove temp directories - if self._istmpdest: - rmtree(self._destpath) - - def _iszip(self, filename): - """Test if the filename is a zip file by looking at the file extension. - """ - fname, ext = os.path.splitext(filename) - return ext in _file_openers.keys() - - def _iswritemode(self, mode): - """Test if the given mode will open a file for writing.""" - - # Currently only used to test the bz2 files. - _writemodes = ("w", "+") - for c in mode: - if c in _writemodes: - return True - return False - - def _splitzipext(self, filename): - """Split zip extension from filename and return filename. - - *Returns*: - base, zip_ext : {tuple} - - """ - - if self._iszip(filename): - return os.path.splitext(filename) - else: - return filename, None - - def _possible_names(self, filename): - """Return a tuple containing compressed filename variations.""" - names = [filename] - if not self._iszip(filename): - for zipext in _file_openers.keys(): - if zipext: - names.append(filename+zipext) - return names - - def _isurl(self, path): - """Test if path is a net location. Tests the scheme and netloc.""" - - # We do this here to reduce the 'import numpy' initial import time. - from urlparse import urlparse - - # BUG : URLs require a scheme string ('http://') to be used. - # www.google.com will fail. - # Should we prepend the scheme for those that don't have it and - # test that also? Similar to the way we append .gz and test for - # for compressed versions of files. - - scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) - return bool(scheme and netloc) - - def _cache(self, path): - """Cache the file specified by path. - - Creates a copy of the file in the datasource cache. - - """ - # We import these here because importing urllib2 is slow and - # a significant fraction of numpy's total import time. - from urllib2 import urlopen - from urllib2 import URLError - - upath = self.abspath(path) - - # ensure directory exists - if not os.path.exists(os.path.dirname(upath)): - os.makedirs(os.path.dirname(upath)) - - # TODO: Doesn't handle compressed files! - if self._isurl(path): - try: - openedurl = urlopen(path) - f = _open(upath, 'wb') - try: - copyfileobj(openedurl, f) - finally: - f.close() - except URLError: - raise URLError("URL not found: %s" % path) - else: - shutil.copyfile(path, upath) - return upath - - def _findfile(self, path): - """Searches for ``path`` and returns full path if found. - - If path is an URL, _findfile will cache a local copy and return - the path to the cached file. - If path is a local file, _findfile will return a path to that local - file. - - The search will include possible compressed versions of the file and - return the first occurence found. - - """ - - # Build list of possible local file paths - if not self._isurl(path): - # Valid local paths - filelist = self._possible_names(path) - # Paths in self._destpath - filelist += self._possible_names(self.abspath(path)) - else: - # Cached URLs in self._destpath - filelist = self._possible_names(self.abspath(path)) - # Remote URLs - filelist = filelist + self._possible_names(path) - - for name in filelist: - if self.exists(name): - if self._isurl(name): - name = self._cache(name) - return name - return None - - def abspath(self, path): - """ - Return absolute path of file in the DataSource directory. - - If `path` is an URL, then `abspath` will return either the location - the file exists locally or the location it would exist when opened - using the `open` method. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. - - Returns - ------- - out : str - Complete path, including the `DataSource` destination directory. - - Notes - ----- - The functionality is based on `os.path.abspath`. - - """ - # We do this here to reduce the 'import numpy' initial import time. - from urlparse import urlparse - - - # TODO: This should be more robust. Handles case where path includes - # the destpath, but not other sub-paths. Failing case: - # path = /home/guido/datafile.txt - # destpath = /home/alex/ - # upath = self.abspath(path) - # upath == '/home/alex/home/guido/datafile.txt' - - # handle case where path includes self._destpath - splitpath = path.split(self._destpath, 2) - if len(splitpath) > 1: - path = splitpath[1] - scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) - netloc = self._sanitize_relative_path(netloc) - upath = self._sanitize_relative_path(upath) - return os.path.join(self._destpath, netloc, upath) - - def _sanitize_relative_path(self, path): - """Return a sanitised relative path for which - os.path.abspath(os.path.join(base, path)).startswith(base) - """ - last = None - path = os.path.normpath(path) - while path != last: - last = path - # Note: os.path.join treats '/' as os.sep on Windows - path = path.lstrip(os.sep).lstrip('/') - path = path.lstrip(os.pardir).lstrip('..') - drive, path = os.path.splitdrive(path) # for Windows - return path - - def exists(self, path): - """ - Test if path exists. - - Test if `path` exists as (and in this order): - - - a local file. - - a remote URL that has been downloaded and stored locally in the - `DataSource` directory. - - a remote URL that has not been downloaded, but is valid and accessible. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. - - Returns - ------- - out : bool - True if `path` exists. - - Notes - ----- - When `path` is an URL, `exists` will return True if it's either stored - locally in the `DataSource` directory, or is a valid remote URL. - `DataSource` does not discriminate between the two, the file is accessible - if it exists in either location. - - """ - # We import this here because importing urllib2 is slow and - # a significant fraction of numpy's total import time. - from urllib2 import urlopen - from urllib2 import URLError - - # Test local path - if os.path.exists(path): - return True - - # Test cached url - upath = self.abspath(path) - if os.path.exists(upath): - return True - - # Test remote url - if self._isurl(path): - try: - netfile = urlopen(path) - del(netfile) - return True - except URLError: - return False - return False - - def open(self, path, mode='r'): - """ - Open and return file-like object. - - If `path` is an URL, it will be downloaded, stored in the `DataSource` - directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. - mode : {'r', 'w', 'a'}, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to - append. Available modes depend on the type of object specified by - `path`. Default is 'r'. - - Returns - ------- - out : file object - File object. - - """ - - # TODO: There is no support for opening a file for writing which - # doesn't exist yet (creating a file). Should there be? - - # TODO: Add a ``subdir`` parameter for specifying the subdirectory - # used to store URLs in self._destpath. - - if self._isurl(path) and self._iswritemode(mode): - raise ValueError("URLs are not writeable") - - # NOTE: _findfile will fail on a new file opened for writing. - found = self._findfile(path) - if found: - _fname, ext = self._splitzipext(found) - if ext == 'bz2': - mode.replace("+", "") - return _file_openers[ext](found, mode=mode) - else: - raise IOError("%s not found." % path) - - -class Repository (DataSource): - """ - Repository(baseurl, destpath='.') - - A data repository where multiple DataSource's share a base URL/directory. - - `Repository` extends `DataSource` by prepending a base URL (or directory) - to all the files it handles. Use `Repository` when you will be working - with multiple files from one base URL. Initialize `Repository` with the - base URL, then refer to each file by its filename only. - - Parameters - ---------- - baseurl : str - Path to the local directory or remote location that contains the - data files. - destpath : str or None, optional - Path to the directory where the source file gets downloaded to for use. - If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Examples - -------- - To analyze all files in the repository, do something like this - (note: this is not self-contained code):: - - >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') - >>> for filename in filelist: - ... fp = repos.open(filename) - ... fp.analyze() - ... fp.close() - - Similarly you could use a URL for a repository:: - - >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') - - """ - - def __init__(self, baseurl, destpath=os.curdir): - """Create a Repository with a shared url or directory of baseurl.""" - DataSource.__init__(self, destpath=destpath) - self._baseurl = baseurl - - def __del__(self): - DataSource.__del__(self) - - def _fullpath(self, path): - """Return complete path for path. Prepends baseurl if necessary.""" - splitpath = path.split(self._baseurl, 2) - if len(splitpath) == 1: - result = os.path.join(self._baseurl, path) - else: - result = path # path contains baseurl already - return result - - def _findfile(self, path): - """Extend DataSource method to prepend baseurl to ``path``.""" - return DataSource._findfile(self, self._fullpath(path)) - - def abspath(self, path): - """ - Return absolute path of file in the Repository directory. - - If `path` is an URL, then `abspath` will return either the location - the file exists locally or the location it would exist when opened - using the `open` method. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. This may, but does not have - to, include the `baseurl` with which the `Repository` was initialized. - - Returns - ------- - out : str - Complete path, including the `DataSource` destination directory. - - """ - return DataSource.abspath(self, self._fullpath(path)) - - def exists(self, path): - """ - Test if path exists prepending Repository base URL to path. - - Test if `path` exists as (and in this order): - - - a local file. - - a remote URL that has been downloaded and stored locally in the - `DataSource` directory. - - a remote URL that has not been downloaded, but is valid and - accessible. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. This may, but does not have - to, include the `baseurl` with which the `Repository` was initialized. - - Returns - ------- - out : bool - True if `path` exists. - - Notes - ----- - When `path` is an URL, `exists` will return True if it's either stored - locally in the `DataSource` directory, or is a valid remote URL. - `DataSource` does not discriminate between the two, the file is accessible - if it exists in either location. - - """ - return DataSource.exists(self, self._fullpath(path)) - - def open(self, path, mode='r'): - """ - Open and return file-like object prepending Repository base URL. - - If `path` is an URL, it will be downloaded, stored in the DataSource - directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. This may, but does not have to, - include the `baseurl` with which the `Repository` was initialized. - mode : {'r', 'w', 'a'}, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to - append. Available modes depend on the type of object specified by - `path`. Default is 'r'. - - Returns - ------- - out : file object - File object. - - """ - return DataSource.open(self, self._fullpath(path), mode) - - def listdir(self): - """ - List files in the source Repository. - - Returns - ------- - files : list of str - List of file names (not containing a directory part). - - Notes - ----- - Does not currently work for remote repositories. - - """ - if self._isurl(self._baseurl): - raise NotImplementedError, \ - "Directory listing of URLs, not supported yet." - else: - return os.listdir(self._baseurl) diff --git a/numpy-1.6.2/numpy/lib/_iotools.py b/numpy-1.6.2/numpy/lib/_iotools.py deleted file mode 100644 index a5c3c4b28c..0000000000 --- a/numpy-1.6.2/numpy/lib/_iotools.py +++ /dev/null @@ -1,844 +0,0 @@ -"""A collection of functions designed to help I/O with ascii files.""" -__docformat__ = "restructuredtext en" - -import sys -import numpy as np -import numpy.core.numeric as nx -from __builtin__ import bool, int, long, float, complex, object, unicode, str - -from numpy.compat import asbytes, bytes, asbytes_nested - -if sys.version_info[0] >= 3: - def _bytes_to_complex(s): - return complex(s.decode('ascii')) - def _bytes_to_name(s): - return s.decode('ascii') -else: - _bytes_to_complex = complex - _bytes_to_name = str - -def _is_string_like(obj): - """ - Check whether obj behaves like a string. - """ - try: - obj + '' - except (TypeError, ValueError): - return False - return True - -def _is_bytes_like(obj): - """ - Check whether obj behaves like a bytes object. - """ - try: - obj + asbytes('') - except (TypeError, ValueError): - return False - return True - - -def _to_filehandle(fname, flag='r', return_opened=False): - """ - Returns the filehandle corresponding to a string or a file. - If the string ends in '.gz', the file is automatically unzipped. - - Parameters - ---------- - fname : string, filehandle - Name of the file whose filehandle must be returned. - flag : string, optional - Flag indicating the status of the file ('r' for read, 'w' for write). - return_opened : boolean, optional - Whether to return the opening status of the file. - """ - if _is_string_like(fname): - if fname.endswith('.gz'): - import gzip - fhd = gzip.open(fname, flag) - elif fname.endswith('.bz2'): - import bz2 - fhd = bz2.BZ2File(fname) - else: - fhd = file(fname, flag) - opened = True - elif hasattr(fname, 'seek'): - fhd = fname - opened = False - else: - raise ValueError('fname must be a string or file handle') - if return_opened: - return fhd, opened - return fhd - - -def has_nested_fields(ndtype): - """ - Returns whether one or several fields of a dtype are nested. - - Parameters - ---------- - ndtype : dtype - Data-type of a structured array. - - Raises - ------ - AttributeError : If `ndtype` does not have a `names` attribute. - - Examples - -------- - >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) - >>> np.lib._iotools.has_nested_fields(dt) - False - - """ - for name in ndtype.names or (): - if ndtype[name].names: - return True - return False - - -def flatten_dtype(ndtype, flatten_base=False): - """ - Unpack a structured data-type by collapsing nested fields and/or fields - with a shape. - - Note that the field names are lost. - - Parameters - ---------- - ndtype : dtype - The datatype to collapse - flatten_base : {False, True}, optional - Whether to transform a field with a shape into several fields or not. - - Examples - -------- - >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ... ('block', int, (2, 3))]) - >>> np.lib._iotools.flatten_dtype(dt) - [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')] - >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) - [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'), - dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'), - dtype('int32')] - - """ - names = ndtype.names - if names is None: - if flatten_base: - return [ndtype.base] * int(np.prod(ndtype.shape)) - return [ndtype.base] - else: - types = [] - for field in names: - info = ndtype.fields[field] - flat_dt = flatten_dtype(info[0], flatten_base) - types.extend(flat_dt) - return types - - - - - - -class LineSplitter: - """ - Object to split a string at a given delimiter or at given places. - - Parameters - ---------- - delimiter : str, int, or sequence of ints, optional - If a string, character used to delimit consecutive fields. - If an integer or a sequence of integers, width(s) of each field. - comment : str, optional - Character used to mark the beginning of a comment. Default is '#'. - autostrip : bool, optional - Whether to strip each individual field. Default is True. - - """ - - def autostrip(self, method): - """ - Wrapper to strip each member of the output of `method`. - - Parameters - ---------- - method : function - Function that takes a single argument and returns a sequence of - strings. - - Returns - ------- - wrapped : function - The result of wrapping `method`. `wrapped` takes a single input - argument and returns a list of strings that are stripped of - white-space. - - """ - return lambda input: [_.strip() for _ in method(input)] - # - def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True): - self.comments = comments - # Delimiter is a character - if isinstance(delimiter, unicode): - delimiter = delimiter.encode('ascii') - if (delimiter is None) or _is_bytes_like(delimiter): - delimiter = delimiter or None - _handyman = self._delimited_splitter - # Delimiter is a list of field widths - elif hasattr(delimiter, '__iter__'): - _handyman = self._variablewidth_splitter - idx = np.cumsum([0] + list(delimiter)) - delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] - # Delimiter is a single integer - elif int(delimiter): - (_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter)) - else: - (_handyman, delimiter) = (self._delimited_splitter, None) - self.delimiter = delimiter - if autostrip: - self._handyman = self.autostrip(_handyman) - else: - self._handyman = _handyman - # - def _delimited_splitter(self, line): - line = line.split(self.comments)[0].strip(asbytes(" \r\n")) - if not line: - return [] - return line.split(self.delimiter) - # - def _fixedwidth_splitter(self, line): - line = line.split(self.comments)[0].strip(asbytes("\r\n")) - if not line: - return [] - fixed = self.delimiter - slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] - return [line[s] for s in slices] - # - def _variablewidth_splitter(self, line): - line = line.split(self.comments)[0] - if not line: - return [] - slices = self.delimiter - return [line[s] for s in slices] - # - def __call__(self, line): - return self._handyman(line) - - - -class NameValidator: - """ - Object to validate a list of strings to use as field names. - - The strings are stripped of any non alphanumeric character, and spaces - are replaced by '_'. During instantiation, the user can define a list of - names to exclude, as well as a list of invalid characters. Names in the - exclusion list are appended a '_' character. - - Once an instance has been created, it can be called with a list of names, - and a list of valid names will be created. - The `__call__` method accepts an optional keyword "default" that sets - the default name in case of ambiguity. By default this is 'f', so - that names will default to `f0`, `f1`, etc. - - Parameters - ---------- - excludelist : sequence, optional - A list of names to exclude. This list is appended to the default list - ['return', 'file', 'print']. Excluded names are appended an underscore: - for example, `file` becomes `file_` if supplied. - deletechars : str, optional - A string combining invalid characters that must be deleted from the - names. - casesensitive : {True, False, 'upper', 'lower'}, optional - * If True, field names are case-sensitive. - * If False or 'upper', field names are converted to upper case. - * If 'lower', field names are converted to lower case. - - The default value is True. - replace_space: '_', optional - Character(s) used in replacement of white spaces. - - Notes - ----- - Calling an instance of `NameValidator` is the same as calling its method - `validate`. - - Examples - -------- - >>> validator = np.lib._iotools.NameValidator() - >>> validator(['file', 'field2', 'with space', 'CaSe']) - ['file_', 'field2', 'with_space', 'CaSe'] - - >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], - deletechars='q', - case_sensitive='False') - >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) - ['excl_', 'field2', 'no_', 'with_space', 'case'] - - """ - # - defaultexcludelist = ['return', 'file', 'print'] - defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") - # - def __init__(self, excludelist=None, deletechars=None, - case_sensitive=None, replace_space='_'): - # Process the exclusion list .. - if excludelist is None: - excludelist = [] - excludelist.extend(self.defaultexcludelist) - self.excludelist = excludelist - # Process the list of characters to delete - if deletechars is None: - delete = self.defaultdeletechars - else: - delete = set(deletechars) - delete.add('"') - self.deletechars = delete - # Process the case option ..... - if (case_sensitive is None) or (case_sensitive is True): - self.case_converter = lambda x: x - elif (case_sensitive is False) or ('u' in case_sensitive): - self.case_converter = lambda x: x.upper() - elif 'l' in case_sensitive: - self.case_converter = lambda x: x.lower() - else: - self.case_converter = lambda x: x - # - self.replace_space = replace_space - - def validate(self, names, defaultfmt="f%i", nbfields=None): - """ - Validate a list of strings to use as field names for a structured array. - - Parameters - ---------- - names : sequence of str - Strings to be validated. - defaultfmt : str, optional - Default format string, used if validating a given string reduces its - length to zero. - nboutput : integer, optional - Final number of validated names, used to expand or shrink the initial - list of names. - - Returns - ------- - validatednames : list of str - The list of validated field names. - - Notes - ----- - A `NameValidator` instance can be called directly, which is the same as - calling `validate`. For examples, see `NameValidator`. - - """ - # Initial checks .............. - if (names is None): - if (nbfields is None): - return None - names = [] - if isinstance(names, basestring): - names = [names, ] - if nbfields is not None: - nbnames = len(names) - if (nbnames < nbfields): - names = list(names) + [''] * (nbfields - nbnames) - elif (nbnames > nbfields): - names = names[:nbfields] - # Set some shortcuts ........... - deletechars = self.deletechars - excludelist = self.excludelist - case_converter = self.case_converter - replace_space = self.replace_space - # Initializes some variables ... - validatednames = [] - seen = dict() - nbempty = 0 - # - for item in names: - item = case_converter(item).strip() - if replace_space: - item = item.replace(' ', replace_space) - item = ''.join([c for c in item if c not in deletechars]) - if item == '': - item = defaultfmt % nbempty - while item in names: - nbempty += 1 - item = defaultfmt % nbempty - nbempty += 1 - elif item in excludelist: - item += '_' - cnt = seen.get(item, 0) - if cnt > 0: - validatednames.append(item + '_%d' % cnt) - else: - validatednames.append(item) - seen[item] = cnt + 1 - return tuple(validatednames) - # - def __call__(self, names, defaultfmt="f%i", nbfields=None): - return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) - - - -def str2bool(value): - """ - Tries to transform a string supposed to represent a boolean to a boolean. - - Parameters - ---------- - value : str - The string that is transformed to a boolean. - - Returns - ------- - boolval : bool - The boolean representation of `value`. - - Raises - ------ - ValueError - If the string is not 'True' or 'False' (case independent) - - Examples - -------- - >>> np.lib._iotools.str2bool('TRUE') - True - >>> np.lib._iotools.str2bool('false') - False - - """ - value = value.upper() - if value == asbytes('TRUE'): - return True - elif value == asbytes('FALSE'): - return False - else: - raise ValueError("Invalid boolean") - - -class ConverterError(Exception): - """ - Exception raised when an error occurs in a converter for string values. - - """ - pass - -class ConverterLockError(ConverterError): - """ - Exception raised when an attempt is made to upgrade a locked converter. - - """ - pass - -class ConversionWarning(UserWarning): - """ - Warning issued when a string converter has a problem. - - Notes - ----- - In `genfromtxt` a `ConversionWarning` is issued if raising exceptions - is explicitly suppressed with the "invalid_raise" keyword. - - """ - pass - - - -class StringConverter: - """ - Factory class for function transforming a string into another object (int, - float). - - After initialization, an instance can be called to transform a string - into another object. If the string is recognized as representing a missing - value, a default value is returned. - - Attributes - ---------- - func : function - Function used for the conversion. - default : any - Default value to return when the input corresponds to a missing value. - type : type - Type of the output. - _status : int - Integer representing the order of the conversion. - _mapper : sequence of tuples - Sequence of tuples (dtype, function, default value) to evaluate in - order. - _locked : bool - Holds `locked` parameter. - - Parameters - ---------- - dtype_or_func : {None, dtype, function}, optional - If a `dtype`, specifies the input data type, used to define a basic - function and a default value for missing data. For example, when - `dtype` is float, the `func` attribute is set to `float` and the - default value to `np.nan`. - If a function, this function is used to convert a string to another - object. In this case, it is recommended to give an associated default - value as input. - default : any, optional - Value to return by default, that is, when the string to be converted - is flagged as missing. If not given, `StringConverter` tries to supply - a reasonable default value. - missing_values : sequence of str, optional - Sequence of strings indicating a missing value. - locked : bool, optional - Whether the StringConverter should be locked to prevent automatic - upgrade or not. Default is False. - - """ - # - _mapper = [(nx.bool_, str2bool, False), - (nx.integer, int, -1), - (nx.floating, float, nx.nan), - (complex, _bytes_to_complex, nx.nan + 0j), - (nx.string_, bytes, asbytes('???'))] - (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper) - # - @classmethod - def _getsubdtype(cls, val): - """Returns the type of the dtype of the input variable.""" - return np.array(val).dtype.type - # - @classmethod - def upgrade_mapper(cls, func, default=None): - """ - Upgrade the mapper of a StringConverter by adding a new function and its - corresponding default. - - The input function (or sequence of functions) and its associated default - value (if any) is inserted in penultimate position of the mapper. - The corresponding type is estimated from the dtype of the default value. - - Parameters - ---------- - func : var - Function, or sequence of functions - - Examples - -------- - >>> import dateutil.parser - >>> import datetime - >>> dateparser = datetustil.parser.parse - >>> defaultdate = datetime.date(2000, 1, 1) - >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) - """ - # Func is a single functions - if hasattr(func, '__call__'): - cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) - return - elif hasattr(func, '__iter__'): - if isinstance(func[0], (tuple, list)): - for _ in func: - cls._mapper.insert(-1, _) - return - if default is None: - default = [None] * len(func) - else: - default = list(default) - default.append([None] * (len(func) - len(default))) - for (fct, dft) in zip(func, default): - cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) - # - def __init__(self, dtype_or_func=None, default=None, missing_values=None, - locked=False): - # Convert unicode (for Py3) - if isinstance(missing_values, unicode): - missing_values = asbytes(missing_values) - elif isinstance(missing_values, (list, tuple)): - missing_values = asbytes_nested(missing_values) - # Defines a lock for upgrade - self._locked = bool(locked) - # No input dtype: minimal initialization - if dtype_or_func is None: - self.func = str2bool - self._status = 0 - self.default = default or False - ttype = np.bool - else: - # Is the input a np.dtype ? - try: - self.func = None - ttype = np.dtype(dtype_or_func).type - except TypeError: - # dtype_or_func must be a function, then - if not hasattr(dtype_or_func, '__call__'): - errmsg = "The input argument `dtype` is neither a function"\ - " or a dtype (got '%s' instead)" - raise TypeError(errmsg % type(dtype_or_func)) - # Set the function - self.func = dtype_or_func - # If we don't have a default, try to guess it or set it to None - if default is None: - try: - default = self.func(asbytes('0')) - except ValueError: - default = None - ttype = self._getsubdtype(default) - # Set the status according to the dtype - _status = -1 - for (i, (deftype, func, default_def)) in enumerate(self._mapper): - if np.issubdtype(ttype, deftype): - _status = i - if default is None: - self.default = default_def - else: - self.default = default - break - if _status == -1: - # We never found a match in the _mapper... - _status = 0 - self.default = default - self._status = _status - # If the input was a dtype, set the function to the last we saw - if self.func is None: - self.func = func - # If the status is 1 (int), change the function to - # something more robust. - if self.func == self._mapper[1][1]: - if issubclass(ttype, np.uint64): - self.func = np.uint64 - elif issubclass(ttype, np.int64): - self.func = np.int64 - else: - self.func = lambda x : int(float(x)) - # Store the list of strings corresponding to missing values. - if missing_values is None: - self.missing_values = set([asbytes('')]) - else: - if isinstance(missing_values, bytes): - missing_values = missing_values.split(asbytes(",")) - self.missing_values = set(list(missing_values) + [asbytes('')]) - # - self._callingfunction = self._strict_call - self.type = ttype - self._checked = False - self._initial_default = default - # - def _loose_call(self, value): - try: - return self.func(value) - except ValueError: - return self.default - # - def _strict_call(self, value): - try: - return self.func(value) - except ValueError: - if value.strip() in self.missing_values: - if not self._status: - self._checked = False - return self.default - raise ValueError("Cannot convert string '%s'" % value) - # - def __call__(self, value): - return self._callingfunction(value) - # - def upgrade(self, value): - """ - Try to find the best converter for a given string, and return the result. - - The supplied string `value` is converted by testing different - converters in order. First the `func` method of the `StringConverter` - instance is tried, if this fails other available converters are tried. - The order in which these other converters are tried is determined by the - `_status` attribute of the instance. - - Parameters - ---------- - value : str - The string to convert. - - Returns - ------- - out : any - The result of converting `value` with the appropriate converter. - - """ - self._checked = True - try: - self._strict_call(value) - except ValueError: - # Raise an exception if we locked the converter... - if self._locked: - errmsg = "Converter is locked and cannot be upgraded" - raise ConverterLockError(errmsg) - _statusmax = len(self._mapper) - # Complains if we try to upgrade by the maximum - _status = self._status - if _status == _statusmax: - errmsg = "Could not find a valid conversion function" - raise ConverterError(errmsg) - elif _status < _statusmax - 1: - _status += 1 - (self.type, self.func, default) = self._mapper[_status] - self._status = _status - if self._initial_default is not None: - self.default = self._initial_default - else: - self.default = default - self.upgrade(value) - - def iterupgrade(self, value): - self._checked = True - if not hasattr(value, '__iter__'): - value = (value,) - _strict_call = self._strict_call - try: - map(_strict_call, value) - except ValueError: - # Raise an exception if we locked the converter... - if self._locked: - errmsg = "Converter is locked and cannot be upgraded" - raise ConverterLockError(errmsg) - _statusmax = len(self._mapper) - # Complains if we try to upgrade by the maximum - _status = self._status - if _status == _statusmax: - raise ConverterError("Could not find a valid conversion function") - elif _status < _statusmax - 1: - _status += 1 - (self.type, self.func, default) = self._mapper[_status] - if self._initial_default is not None: - self.default = self._initial_default - else: - self.default = default - self._status = _status - self.iterupgrade(value) - - def update(self, func, default=None, testing_value=None, - missing_values=asbytes(''), locked=False): - """ - Set StringConverter attributes directly. - - Parameters - ---------- - func : function - Conversion function. - default : any, optional - Value to return by default, that is, when the string to be converted - is flagged as missing. If not given, `StringConverter` tries to supply - a reasonable default value. - testing_value : str, optional - A string representing a standard input value of the converter. - This string is used to help defining a reasonable default value. - missing_values : sequence of str, optional - Sequence of strings indicating a missing value. - locked : bool, optional - Whether the StringConverter should be locked to prevent automatic - upgrade or not. Default is False. - - Notes - ----- - `update` takes the same parameters as the constructor of `StringConverter`, - except that `func` does not accept a `dtype` whereas `dtype_or_func` in - the constructor does. - - """ - self.func = func - self._locked = locked - # Don't reset the default to None if we can avoid it - if default is not None: - self.default = default - self.type = self._getsubdtype(default) - else: - try: - tester = func(testing_value or asbytes('1')) - except (TypeError, ValueError): - tester = None - self.type = self._getsubdtype(tester) - # Add the missing values to the existing set - if missing_values is not None: - if _is_bytes_like(missing_values): - self.missing_values.add(missing_values) - elif hasattr(missing_values, '__iter__'): - for val in missing_values: - self.missing_values.add(val) - else: - self.missing_values = [] - - - -def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): - """ - Convenience function to create a `np.dtype` object. - - The function processes the input `dtype` and matches it with the given - names. - - Parameters - ---------- - ndtype : var - Definition of the dtype. Can be any string or dictionary - recognized by the `np.dtype` function, or a sequence of types. - names : str or sequence, optional - Sequence of strings to use as field names for a structured dtype. - For convenience, `names` can be a string of a comma-separated list of - names. - defaultfmt : str, optional - Format string used to define missing names, such as ``"f%i"`` - (default) or ``"fields_%02i"``. - validationargs : optional - A series of optional arguments used to initialize a `NameValidator`. - - Examples - -------- - >>> np.lib._iotools.easy_dtype(float) - dtype('float64') - >>> np.lib._iotools.easy_dtype("i4, f8") - dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") - dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") - dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") - dtype([('a', ' 0): - validate = NameValidator(**validationargs) - # Default initial names : should we change the format ? - if (ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and \ - (defaultfmt != "f%i"): - ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt) - # Explicit initial names : just validate - else: - ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt) - return ndtype - diff --git a/numpy-1.6.2/numpy/lib/arraysetops.py b/numpy-1.6.2/numpy/lib/arraysetops.py deleted file mode 100644 index 47e94bc4d8..0000000000 --- a/numpy-1.6.2/numpy/lib/arraysetops.py +++ /dev/null @@ -1,417 +0,0 @@ -""" -Set operations for 1D numeric arrays based on sorting. - -:Contains: - ediff1d, - unique, - intersect1d, - setxor1d, - in1d, - union1d, - setdiff1d - -:Notes: - -For floating point arrays, inaccurate results may appear due to usual round-off -and floating point comparison issues. - -Speed could be gained in some operations by an implementation of -sort(), that can provide directly the permutation vectors, avoiding -thus calls to argsort(). - -To do: Optionally return indices analogously to unique for all functions. - -:Author: Robert Cimrman -""" -__all__ = ['ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', - 'unique', 'in1d'] - -import numpy as np -from numpy.lib.utils import deprecate - -def ediff1d(ary, to_end=None, to_begin=None): - """ - The differences between consecutive elements of an array. - - Parameters - ---------- - ary : array_like - If necessary, will be flattened before the differences are taken. - to_end : array_like, optional - Number(s) to append at the end of the returned differences. - to_begin : array_like, optional - Number(s) to prepend at the beginning of the returned differences. - - Returns - ------- - ed : ndarray - The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. - - See Also - -------- - diff, gradient - - Notes - ----- - When applied to masked arrays, this function drops the mask information - if the `to_begin` and/or `to_end` parameters are used. - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 0]) - >>> np.ediff1d(x) - array([ 1, 2, 3, -7]) - - >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) - array([-99, 1, 2, 3, -7, 88, 99]) - - The returned array is always 1D. - - >>> y = [[1, 2, 4], [1, 6, 24]] - >>> np.ediff1d(y) - array([ 1, 2, -3, 5, 18]) - - """ - ary = np.asanyarray(ary).flat - ed = ary[1:] - ary[:-1] - arrays = [ed] - if to_begin is not None: - arrays.insert(0, to_begin) - if to_end is not None: - arrays.append(to_end) - - if len(arrays) != 1: - # We'll save ourselves a copy of a potentially large array in - # the common case where neither to_begin or to_end was given. - ed = np.hstack(arrays) - - return ed - -def unique(ar, return_index=False, return_inverse=False): - """ - Find the unique elements of an array. - - Returns the sorted unique elements of an array. There are two optional - outputs in addition to the unique elements: the indices of the input array - that give the unique values, and the indices of the unique array that - reconstruct the input array. - - Parameters - ---------- - ar : array_like - Input array. This will be flattened if it is not already 1-D. - return_index : bool, optional - If True, also return the indices of `ar` that result in the unique - array. - return_inverse : bool, optional - If True, also return the indices of the unique array that can be used - to reconstruct `ar`. - - Returns - ------- - unique : ndarray - The sorted unique values. - unique_indices : ndarray, optional - The indices of the first occurrences of the unique values in the - (flattened) original array. Only provided if `return_index` is True. - unique_inverse : ndarray, optional - The indices to reconstruct the (flattened) original array from the - unique array. Only provided if `return_inverse` is True. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.unique([1, 1, 2, 2, 3, 3]) - array([1, 2, 3]) - >>> a = np.array([[1, 1], [2, 3]]) - >>> np.unique(a) - array([1, 2, 3]) - - Return the indices of the original array that give the unique values: - - >>> a = np.array(['a', 'b', 'b', 'c', 'a']) - >>> u, indices = np.unique(a, return_index=True) - >>> u - array(['a', 'b', 'c'], - dtype='|S1') - >>> indices - array([0, 1, 3]) - >>> a[indices] - array(['a', 'b', 'c'], - dtype='|S1') - - Reconstruct the input array from the unique values: - - >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) - >>> u, indices = np.unique(a, return_inverse=True) - >>> u - array([1, 2, 3, 4, 6]) - >>> indices - array([0, 1, 4, 3, 1, 2, 1]) - >>> u[indices] - array([1, 2, 6, 4, 2, 3, 2]) - - """ - try: - ar = ar.flatten() - except AttributeError: - if not return_inverse and not return_index: - items = sorted(set(ar)) - return np.asarray(items) - else: - ar = np.asanyarray(ar).flatten() - - if ar.size == 0: - if return_inverse and return_index: - return ar, np.empty(0, np.bool), np.empty(0, np.bool) - elif return_inverse or return_index: - return ar, np.empty(0, np.bool) - else: - return ar - - if return_inverse or return_index: - if return_index: - perm = ar.argsort(kind='mergesort') - else: - perm = ar.argsort() - aux = ar[perm] - flag = np.concatenate(([True], aux[1:] != aux[:-1])) - if return_inverse: - iflag = np.cumsum(flag) - 1 - iperm = perm.argsort() - if return_index: - return aux[flag], perm[flag], iflag[iperm] - else: - return aux[flag], iflag[iperm] - else: - return aux[flag], perm[flag] - - else: - ar.sort() - flag = np.concatenate(([True], ar[1:] != ar[:-1])) - return ar[flag] - - -def intersect1d(ar1, ar2, assume_unique=False): - """ - Find the intersection of two arrays. - - Return the sorted, unique values that are in both of the input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - out : ndarray - Sorted 1D array of common and unique elements. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) - array([1, 3]) - - """ - if not assume_unique: - # Might be faster than unique( intersect1d( ar1, ar2 ) )? - ar1 = unique(ar1) - ar2 = unique(ar2) - aux = np.concatenate( (ar1, ar2) ) - aux.sort() - return aux[aux[1:] == aux[:-1]] - -def setxor1d(ar1, ar2, assume_unique=False): - """ - Find the set exclusive-or of two arrays. - - Return the sorted, unique values that are in only one (not both) of the - input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - xor : ndarray - Sorted 1D array of unique values that are in only one of the input - arrays. - - Examples - -------- - >>> a = np.array([1, 2, 3, 2, 4]) - >>> b = np.array([2, 3, 5, 7, 5]) - >>> np.setxor1d(a,b) - array([1, 4, 5, 7]) - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - - aux = np.concatenate( (ar1, ar2) ) - if aux.size == 0: - return aux - - aux.sort() -# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 - flag = np.concatenate( ([True], aux[1:] != aux[:-1], [True] ) ) -# flag2 = ediff1d( flag ) == 0 - flag2 = flag[1:] == flag[:-1] - return aux[flag2] - -def in1d(ar1, ar2, assume_unique=False): - """ - Test whether each element of a 1D array is also present in a second array. - - Returns a boolean array the same length as `ar1` that is True - where an element of `ar1` is in `ar2` and False otherwise. - - Parameters - ---------- - ar1 : array_like, shape (M,) - Input array. - ar2 : array_like - The values against which to test each value of `ar1`. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - mask : ndarray of bools, shape(M,) - The values `ar1[mask]` are in `ar2`. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Notes - ----- - `in1d` can be considered as an element-wise function version of the - python keyword `in`, for 1D sequences. ``in1d(a, b)`` is roughly - equivalent to ``np.array([item in b for item in a])``. - - .. versionadded:: 1.4.0 - - Examples - -------- - >>> test = np.array([0, 1, 2, 5, 0]) - >>> states = [0, 2] - >>> mask = np.in1d(test, states) - >>> mask - array([ True, False, True, False, True], dtype=bool) - >>> test[mask] - array([0, 2, 0]) - - """ - if not assume_unique: - ar1, rev_idx = np.unique(ar1, return_inverse=True) - ar2 = np.unique(ar2) - - ar = np.concatenate( (ar1, ar2) ) - # We need this to be a stable sort, so always use 'mergesort' - # here. The values from the first array should always come before - # the values from the second array. - order = ar.argsort(kind='mergesort') - sar = ar[order] - equal_adj = (sar[1:] == sar[:-1]) - flag = np.concatenate( (equal_adj, [False] ) ) - indx = order.argsort(kind='mergesort')[:len( ar1 )] - - if assume_unique: - return flag[indx] - else: - return flag[indx][rev_idx] - -def union1d(ar1, ar2): - """ - Find the union of two arrays. - - Return the unique, sorted array of values that are in either of the two - input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. They are flattened if they are not already 1D. - - Returns - ------- - union : ndarray - Unique, sorted union of the input arrays. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.union1d([-1, 0, 1], [-2, 0, 2]) - array([-2, -1, 0, 1, 2]) - - """ - return unique( np.concatenate( (ar1, ar2) ) ) - -def setdiff1d(ar1, ar2, assume_unique=False): - """ - Find the set difference of two arrays. - - Return the sorted, unique values in `ar1` that are not in `ar2`. - - Parameters - ---------- - ar1 : array_like - Input array. - ar2 : array_like - Input comparison array. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - difference : ndarray - Sorted 1D array of values in `ar1` that are not in `ar2`. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> a = np.array([1, 2, 3, 2, 4, 1]) - >>> b = np.array([3, 4, 5, 6]) - >>> np.setdiff1d(a, b) - array([1, 2]) - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - aux = in1d(ar1, ar2, assume_unique=True) - if aux.size == 0: - return aux - else: - return np.asarray(ar1)[aux == 0] diff --git a/numpy-1.6.2/numpy/lib/arrayterator.py b/numpy-1.6.2/numpy/lib/arrayterator.py deleted file mode 100644 index 2df05e5147..0000000000 --- a/numpy-1.6.2/numpy/lib/arrayterator.py +++ /dev/null @@ -1,224 +0,0 @@ -""" -A buffered iterator for big arrays. - -This module solves the problem of iterating over a big file-based array -without having to read it into memory. The `Arrayterator` class wraps -an array object, and when iterated it will return sub-arrays with at most -a user-specified number of elements. - -""" - -from __future__ import division - -from operator import mul - -__all__ = ['Arrayterator'] - -import sys -if sys.version_info[0] >= 3: - from functools import reduce - -class Arrayterator(object): - """ - Buffered iterator for big arrays. - - `Arrayterator` creates a buffered iterator for reading big arrays in small - contiguous blocks. The class is useful for objects stored in the - file system. It allows iteration over the object *without* reading - everything in memory; instead, small blocks are read and iterated over. - - `Arrayterator` can be used with any object that supports multidimensional - slices. This includes NumPy arrays, but also variables from - Scientific.IO.NetCDF or pynetcdf for example. - - Parameters - ---------- - var : array_like - The object to iterate over. - buf_size : int, optional - The buffer size. If `buf_size` is supplied, the maximum amount of - data that will be read into memory is `buf_size` elements. - Default is None, which will read as many element as possible - into memory. - - Attributes - ---------- - var - buf_size - start - stop - step - shape - flat - - See Also - -------- - ndenumerate : Multidimensional array iterator. - flatiter : Flat array iterator. - memmap : Create a memory-map to an array stored in a binary file on disk. - - Notes - ----- - The algorithm works by first finding a "running dimension", along which - the blocks will be extracted. Given an array of dimensions - ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the - first dimension will be used. If, on the other hand, - ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. - Blocks are extracted along this dimension, and when the last block is - returned the process continues from the next dimension, until all - elements have been read. - - Examples - -------- - >>> import numpy as np - >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2) - >>> a_itor.shape - (3, 4, 5, 6) - - Now we can iterate over ``a_itor``, and it will return arrays of size - two. Since `buf_size` was smaller than any dimension, the first - dimension will be iterated over first: - - >>> for subarr in a_itor: - ... if not subarr.all(): - ... print subarr, subarr.shape - ... - [[[[0 1]]]] (1, 1, 1, 2) - - """ - - def __init__(self, var, buf_size=None): - self.var = var - self.buf_size = buf_size - - self.start = [0 for dim in var.shape] - self.stop = [dim for dim in var.shape] - self.step = [1 for dim in var.shape] - - def __getattr__(self, attr): - return getattr(self.var, attr) - - def __getitem__(self, index): - """ - Return a new arrayterator. - - """ - # Fix index, handling ellipsis and incomplete slices. - if not isinstance(index, tuple): index = (index,) - fixed = [] - length, dims = len(index), len(self.shape) - for slice_ in index: - if slice_ is Ellipsis: - fixed.extend([slice(None)] * (dims-length+1)) - length = len(fixed) - elif isinstance(slice_, (int, long)): - fixed.append(slice(slice_, slice_+1, 1)) - else: - fixed.append(slice_) - index = tuple(fixed) - if len(index) < dims: - index += (slice(None),) * (dims-len(index)) - - # Return a new arrayterator object. - out = self.__class__(self.var, self.buf_size) - for i, (start, stop, step, slice_) in enumerate( - zip(self.start, self.stop, self.step, index)): - out.start[i] = start + (slice_.start or 0) - out.step[i] = step * (slice_.step or 1) - out.stop[i] = start + (slice_.stop or stop-start) - out.stop[i] = min(stop, out.stop[i]) - return out - - def __array__(self): - """ - Return corresponding data. - - """ - slice_ = tuple(slice(*t) for t in zip( - self.start, self.stop, self.step)) - return self.var[slice_] - - @property - def flat(self): - """ - A 1-D flat iterator for Arrayterator objects. - - This iterator returns elements of the array to be iterated over in - `Arrayterator` one by one. It is similar to `flatiter`. - - See Also - -------- - `Arrayterator` - flatiter - - Examples - -------- - >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2) - - >>> for subarr in a_itor.flat: - ... if not subarr: - ... print subarr, type(subarr) - ... - 0 - - """ - for block in self: - for value in block.flat: - yield value - - @property - def shape(self): - """ - The shape of the array to be iterated over. - - For an example, see `Arrayterator`. - - """ - return tuple(((stop-start-1)//step+1) for start, stop, step in - zip(self.start, self.stop, self.step)) - - def __iter__(self): - # Skip arrays with degenerate dimensions - if [dim for dim in self.shape if dim <= 0]: raise StopIteration - - start = self.start[:] - stop = self.stop[:] - step = self.step[:] - ndims = len(self.var.shape) - - while 1: - count = self.buf_size or reduce(mul, self.shape) - - # iterate over each dimension, looking for the - # running dimension (ie, the dimension along which - # the blocks will be built from) - rundim = 0 - for i in range(ndims-1, -1, -1): - # if count is zero we ran out of elements to read - # along higher dimensions, so we read only a single position - if count == 0: - stop[i] = start[i]+1 - elif count <= self.shape[i]: # limit along this dimension - stop[i] = start[i] + count*step[i] - rundim = i - else: - stop[i] = self.stop[i] # read everything along this - # dimension - stop[i] = min(self.stop[i], stop[i]) - count = count//self.shape[i] - - # yield a block - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) - yield self.var[slice_] - - # Update start position, taking care of overflow to - # other dimensions - start[rundim] = stop[rundim] # start where we stopped - for i in range(ndims-1, 0, -1): - if start[i] >= self.stop[i]: - start[i] = self.start[i] - start[i-1] += self.step[i-1] - if start[0] >= self.stop[0]: - raise StopIteration diff --git a/numpy-1.6.2/numpy/lib/financial.py b/numpy-1.6.2/numpy/lib/financial.py deleted file mode 100644 index 55ed2839ed..0000000000 --- a/numpy-1.6.2/numpy/lib/financial.py +++ /dev/null @@ -1,658 +0,0 @@ -# Some simple financial calculations -# patterned after spreadsheet computations. - -# There is some complexity in each function -# so that the functions behave like ufuncs with -# broadcasting and being able to be called with scalars -# or arrays (or other sequences). -import numpy as np - -__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', - 'irr', 'npv', 'mirr'] - -_when_to_num = {'end':0, 'begin':1, - 'e':0, 'b':1, - 0:0, 1:1, - 'beginning':1, - 'start':1, - 'finish':0} - -def _convert_when(when): - try: - return _when_to_num[when] - except KeyError: - return [_when_to_num[x] for x in when] - - -def fv(rate, nper, pmt, pv, when='end'): - """ - Compute the future value. - - Given: - * a present value, `pv` - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * a (fixed) payment, `pmt`, paid either - * at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the value at the end of the `nper` periods - - Parameters - ---------- - rate : scalar or array_like of shape(M, ) - Rate of interest as decimal (not per cent) per period - nper : scalar or array_like of shape(M, ) - Number of compounding periods - pmt : scalar or array_like of shape(M, ) - Payment - pv : scalar or array_like of shape(M, ) - Present value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)). - Defaults to {'end', 0}. - - Returns - ------- - out : ndarray - Future values. If all input is scalar, returns a scalar float. If - any input is array_like, returns future values for each input element. - If multiple inputs are array_like, they all must have the same shape. - - Notes - ----- - The future value is computed by solving the equation:: - - fv + - pv*(1+rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 - - or, when ``rate == 0``:: - - fv + pv + pmt * nper == 0 - - References - ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - Examples - -------- - What is the future value after 10 years of saving $100 now, with - an additional monthly savings of $100. Assume the interest rate is - 5% (annually) compounded monthly? - - >>> np.fv(0.05/12, 10*12, -100, -100) - 15692.928894335748 - - By convention, the negative sign represents cash flow out (i.e. money not - available today). Thus, saving $100 a month at 5% annual interest leads - to $15,692.93 available to spend in 10 years. - - If any input is array_like, returns an array of equal shape. Let's - compare different interest rates from the example above. - - >>> a = np.array((0.05, 0.06, 0.07))/12 - >>> np.fv(a, 10*12, -100, -100) - array([ 15692.92889434, 16569.87435405, 17509.44688102]) - - """ - when = _convert_when(when) - rate, nper, pmt, pv, when = map(np.asarray, [rate, nper, pmt, pv, when]) - temp = (1+rate)**nper - miter = np.broadcast(rate, nper, pmt, pv, when) - zer = np.zeros(miter.shape) - fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer) - return -(pv*temp + pmt*fact) - -def pmt(rate, nper, pv, fv=0, when='end'): - """ - Compute the payment against loan principal plus interest. - - Given: - * a present value, `pv` (e.g., an amount borrowed) - * a future value, `fv` (e.g., 0) - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * and (optional) specification of whether payment is made - at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the (fixed) periodic payment. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - nper : array_like - Number of compounding periods - pv : array_like - Present value - fv : array_like (optional) - Future value (default = 0) - when : {{'begin', 1}, {'end', 0}}, {string, int} - When payments are due ('begin' (1) or 'end' (0)) - - Returns - ------- - out : ndarray - Payment against loan plus interest. If all input is scalar, returns a - scalar float. If any input is array_like, returns payment for each - input element. If multiple inputs are array_like, they all must have - the same shape. - - Notes - ----- - The payment is computed by solving the equation:: - - fv + - pv*(1 + rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 - - or, when ``rate == 0``:: - - fv + pv + pmt * nper == 0 - - for ``pmt``. - - Note that computing a monthly mortgage payment is only - one use for this function. For example, pmt returns the - periodic deposit one must make to achieve a specified - future balance given an initial deposit, a fixed, - periodically compounded interest rate, and the total - number of periods. - - References - ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php - ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt - - Examples - -------- - What is the monthly payment needed to pay off a $200,000 loan in 15 - years at an annual interest rate of 7.5%? - - >>> np.pmt(0.075/12, 12*15, 200000) - -1854.0247200054619 - - In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained - today, a monthly payment of $1,854.02 would be required. Note that this - example illustrates usage of `fv` having a default value of 0. - - """ - when = _convert_when(when) - rate, nper, pv, fv, when = map(np.asarray, [rate, nper, pv, fv, when]) - temp = (1+rate)**nper - miter = np.broadcast(rate, nper, pv, fv, when) - zer = np.zeros(miter.shape) - fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer) - return -(fv + pv*temp) / fact - -def nper(rate, pmt, pv, fv=0, when='end'): - """ - Compute the number of periodic payments. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - pmt : array_like - Payment - pv : array_like - Present value - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - - Notes - ----- - The number of periods ``nper`` is computed by solving the equation:: - - fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0 - - but if ``rate = 0`` then:: - - fv + pv + pmt*nper = 0 - - Examples - -------- - If you only had $150/month to pay towards the loan, how long would it take - to pay-off a loan of $8,000 at 7% annual interest? - - >>> np.nper(0.07/12, -150, 8000) - 64.073348770661852 - - So, over 64 months would be required to pay off the loan. - - The same analysis could be done with several different interest rates - and/or payments and/or total amounts to produce an entire table. - - >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, - ... -150 : -99 : 50 , - ... 8000 : 9001 : 1000])) - array([[[ 64.07334877, 74.06368256], - [ 108.07548412, 127.99022654]], - [[ 66.12443902, 76.87897353], - [ 114.70165583, 137.90124779]]]) - - """ - when = _convert_when(when) - rate, pmt, pv, fv, when = map(np.asarray, [rate, pmt, pv, fv, when]) - - use_zero_rate = False - old_err = np.seterr(divide="raise") - try: - try: - z = pmt*(1.0+rate*when)/rate - except FloatingPointError: - use_zero_rate = True - finally: - np.seterr(**old_err) - - if use_zero_rate: - return (-fv + pv) / (pmt + 0.0) - else: - A = -(fv + pv)/(pmt+0.0) - B = np.log((-fv+z) / (pv+z))/np.log(1.0+rate) - miter = np.broadcast(rate, pmt, pv, fv, when) - zer = np.zeros(miter.shape) - return np.where(rate==zer, A+zer, B+zer) + 0.0 - -def ipmt(rate, per, nper, pv, fv=0.0, when='end'): - """ - Not implemented. Compute the payment portion for loan interest. - - Parameters - ---------- - rate : scalar or array_like of shape(M, ) - Rate of interest as decimal (not per cent) per period - per : scalar or array_like of shape(M, ) - Interest paid against the loan changes during the life or the loan. - The `per` is the payment period to calculate the interest amount. - nper : scalar or array_like of shape(M, ) - Number of compounding periods - pv : scalar or array_like of shape(M, ) - Present value - fv : scalar or array_like of shape(M, ), optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)). - Defaults to {'end', 0}. - - Returns - ------- - out : ndarray - Interest portion of payment. If all input is scalar, returns a scalar - float. If any input is array_like, returns interest payment for each - input element. If multiple inputs are array_like, they all must have - the same shape. - - See Also - -------- - ppmt, pmt, pv - - Notes - ----- - The total payment is made up of payment against principal plus interest. - - ``pmt = ppmt + ipmt`` - - """ - total = pmt(rate, nper, pv, fv, when) - # Now, compute the nth step in the amortization - raise NotImplementedError - -def ppmt(rate, per, nper, pv, fv=0.0, when='end'): - """ - Not implemented. Compute the payment against loan principal. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - per : array_like, int - Amount paid against the loan changes. The `per` is the period of - interest. - nper : array_like - Number of compounding periods - pv : array_like - Present value - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int} - When payments are due ('begin' (1) or 'end' (0)) - - See Also - -------- - pmt, pv, ipmt - - """ - total = pmt(rate, nper, pv, fv, when) - return total - ipmt(rate, per, nper, pv, fv, when) - -def pv(rate, nper, pmt, fv=0.0, when='end'): - """ - Compute the present value. - - Given: - * a future value, `fv` - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * a (fixed) payment, `pmt`, paid either - * at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the value now - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - nper : array_like - Number of compounding periods - pmt : array_like - Payment - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - - Returns - ------- - out : ndarray, float - Present value of a series of payments or investments. - - Notes - ----- - The present value is computed by solving the equation:: - - fv + - pv*(1 + rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0 - - or, when ``rate = 0``:: - - fv + pv + pmt * nper = 0 - - for `pv`, which is then returned. - - References - ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - Examples - -------- - What is the present value (e.g., the initial investment) - of an investment that needs to total $15692.93 - after 10 years of saving $100 every month? Assume the - interest rate is 5% (annually) compounded monthly. - - >>> np.pv(0.05/12, 10*12, -100, 15692.93) - -100.00067131625819 - - By convention, the negative sign represents cash flow out - (i.e., money not available today). Thus, to end up with - $15,692.93 in 10 years saving $100 a month at 5% annual - interest, one's initial deposit should also be $100. - - If any input is array_like, ``pv`` returns an array of equal shape. - Let's compare different interest rates in the example above: - - >>> a = np.array((0.05, 0.04, 0.03))/12 - >>> np.pv(a, 10*12, -100, 15692.93) - array([ -100.00067132, -649.26771385, -1273.78633713]) - - So, to end up with the same $15692.93 under the same $100 per month - "savings plan," for annual interest rates of 4% and 3%, one would - need initial investments of $649.27 and $1273.79, respectively. - - """ - when = _convert_when(when) - rate, nper, pmt, fv, when = map(np.asarray, [rate, nper, pmt, fv, when]) - temp = (1+rate)**nper - miter = np.broadcast(rate, nper, pmt, fv, when) - zer = np.zeros(miter.shape) - fact = np.where(rate == zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer) - return -(fv + pmt*fact)/temp - -# Computed with Sage -# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + p*((r + 1)^n - 1)*w/r) - -def _g_div_gp(r, n, p, x, y, w): - t1 = (r+1)**n - t2 = (r+1)**(n-1) - return (y + t1*x + p*(t1 - 1)*(r*w + 1)/r)/(n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + p*(t1 - 1)*w/r) - -# Use Newton's iteration until the change is less than 1e-6 -# for all values or a maximum of 100 iterations is reached. -# Newton's rule is -# r_{n+1} = r_{n} - g(r_n)/g'(r_n) -# where -# g(r) is the formula -# g'(r) is the derivative with respect to r. -def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100): - """ - Compute the rate of interest per period. - - Parameters - ---------- - nper : array_like - Number of compounding periods - pmt : array_like - Payment - pv : array_like - Present value - fv : array_like - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - guess : float, optional - Starting guess for solving the rate of interest - tol : float, optional - Required tolerance for the solution - maxiter : int, optional - Maximum iterations in finding the solution - - Notes - ----- - The rate of interest is computed by iteratively solving the - (non-linear) equation:: - - fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 - - for ``rate``. - - References - ---------- - Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document - Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated - Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. - Organization for the Advancement of Structured Information Standards - (OASIS). Billerica, MA, USA. [ODT Document]. Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - """ - when = _convert_when(when) - nper, pmt, pv, fv, when = map(np.asarray, [nper, pmt, pv, fv, when]) - rn = guess - iter = 0 - close = False - while (iter < maxiter) and not close: - rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when) - diff = abs(rnp1-rn) - close = np.all(diff>> np.irr([-100, 39, 59, 55, 20]) - 0.2809484211599611 - - (Compare with the Example given for numpy.lib.financial.npv) - - """ - res = np.roots(values[::-1]) - # Find the root(s) between 0 and 1 - mask = (res.imag == 0) & (res.real > 0) & (res.real <= 1) - res = res[mask].real - if res.size == 0: - return np.nan - rate = 1.0/res - 1 - if rate.size == 1: - rate = rate.item() - return rate - -def npv(rate, values): - """ - Returns the NPV (Net Present Value) of a cash flow series. - - Parameters - ---------- - rate : scalar - The discount rate. - values : array_like, shape(M, ) - The values of the time series of cash flows. The (fixed) time - interval between cash flow "events" must be the same as that - for which `rate` is given (i.e., if `rate` is per year, then - precisely a year is understood to elapse between each cash flow - event). By convention, investments or "deposits" are negative, - income or "withdrawals" are positive; `values` must begin with - the initial investment, thus `values[0]` will typically be - negative. - - Returns - ------- - out : float - The NPV of the input cash flow series `values` at the discount `rate`. - - Notes - ----- - Returns the result of: [G]_ - - .. math :: \\sum_{t=0}^M{\\frac{values_t}{(1+rate)^{t}}} - - References - ---------- - .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., - Addison-Wesley, 2003, pg. 346. - - Examples - -------- - >>> np.npv(0.281,[-100, 39, 59, 55, 20]) - -0.0066187288356340801 - - (Compare with the Example given for numpy.lib.financial.irr) - - """ - values = np.asarray(values) - return (values / (1+rate)**np.arange(1,len(values)+1)).sum(axis=0) - -def mirr(values, finance_rate, reinvest_rate): - """ - Modified internal rate of return. - - Parameters - ---------- - values : array_like - Cash flows (must contain at least one positive and one negative value) - or nan is returned. The first value is considered a sunk cost at time zero. - finance_rate : scalar - Interest rate paid on the cash flows - reinvest_rate : scalar - Interest rate received on the cash flows upon reinvestment - - Returns - ------- - out : float - Modified internal rate of return - - """ - - values = np.asarray(values, dtype=np.double) - n = values.size - pos = values > 0 - neg = values < 0 - if not (pos.any() and neg.any()): - return np.nan - numer = np.abs(npv(reinvest_rate, values*pos))*(1 + reinvest_rate) - denom = np.abs(npv(finance_rate, values*neg))*(1 + finance_rate) - return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1 - diff --git a/numpy-1.6.2/numpy/lib/format.py b/numpy-1.6.2/numpy/lib/format.py deleted file mode 100644 index 1e508f3e5d..0000000000 --- a/numpy-1.6.2/numpy/lib/format.py +++ /dev/null @@ -1,577 +0,0 @@ -""" -Define a simple format for saving numpy arrays to disk with the full -information about them. - -The ``.npy`` format is the standard binary file format in NumPy for -persisting a *single* arbitrary NumPy array on disk. The format stores all -of the shape and dtype information necessary to reconstruct the array -correctly even on another machine with a different architecture. -The format is designed to be as simple as possible while achieving -its limited goals. - -The ``.npz`` format is the standard format for persisting *multiple* NumPy -arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` -files, one for each array. - -Capabilities ------------- - -- Can represent all NumPy arrays including nested record arrays and - object arrays. - -- Represents the data in its native binary form. - -- Supports Fortran-contiguous arrays directly. - -- Stores all of the necessary information to reconstruct the array - including shape and dtype on a machine of a different - architecture. Both little-endian and big-endian arrays are - supported, and a file with little-endian numbers will yield - a little-endian array on any machine reading the file. The - types are described in terms of their actual sizes. For example, - if a machine with a 64-bit C "long int" writes out an array with - "long ints", a reading machine with 32-bit C "long ints" will yield - an array with 64-bit integers. - -- Is straightforward to reverse engineer. Datasets often live longer than - the programs that created them. A competent developer should be - able create a solution in his preferred programming language to - read most ``.npy`` files that he has been given without much - documentation. - -- Allows memory-mapping of the data. See `open_memmep`. - -- Can be read from a filelike stream object instead of an actual file. - -- Stores object arrays, i.e. arrays containing elements that are arbitrary - Python objects. Files with object arrays are not to be mmapable, but - can be read and written to disk. - -Limitations ------------ - -- Arbitrary subclasses of numpy.ndarray are not completely preserved. - Subclasses will be accepted for writing, but only the array data will - be written out. A regular numpy.ndarray object will be created - upon reading the file. - -.. warning:: - - Due to limitations in the interpretation of structured dtypes, dtypes - with fields with empty names will have the names replaced by 'f0', 'f1', - etc. Such arrays will not round-trip through the format entirely - accurately. The data is intact; only the field names will differ. We are - working on a fix for this. This fix will not require a change in the - file format. The arrays with such structures can still be saved and - restored, and the correct dtype may be restored by using the - ``loadedarray.view(correct_dtype)`` method. - -File extensions ---------------- - -We recommend using the ``.npy`` and ``.npz`` extensions for files saved -in this format. This is by no means a requirement; applications may wish -to use these file formats but use an extension specific to the -application. In the absence of an obvious alternative, however, -we suggest using ``.npy`` and ``.npz``. - -Version numbering ------------------ - -The version numbering of these formats is independent of NumPy version -numbering. If the format is upgraded, the code in `numpy.io` will still -be able to read and write Version 1.0 files. - -Format Version 1.0 ------------------- - -The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. - -The next 1 byte is an unsigned byte: the major version number of the file -format, e.g. ``\\x01``. - -The next 1 byte is an unsigned byte: the minor version number of the file -format, e.g. ``\\x00``. Note: the version of the file format is not tied -to the version of the numpy package. - -The next 2 bytes form a little-endian unsigned short int: the length of -the header data HEADER_LEN. - -The next HEADER_LEN bytes form the header data describing the array's -format. It is an ASCII string which contains a Python literal expression -of a dictionary. It is terminated by a newline (``\\n``) and padded with -spaces (``\\x20``) to make the total length of -``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment -purposes. - -The dictionary contains three keys: - - "descr" : dtype.descr - An object that can be passed as an argument to the `numpy.dtype` - constructor to create the array's dtype. - "fortran_order" : bool - Whether the array data is Fortran-contiguous or not. Since - Fortran-contiguous arrays are a common form of non-C-contiguity, - we allow them to be written directly to disk for efficiency. - "shape" : tuple of int - The shape of the array. - -For repeatability and readability, the dictionary keys are sorted in -alphabetic order. This is for convenience only. A writer SHOULD implement -this if possible. A reader MUST NOT depend on this. - -Following the header comes the array data. If the dtype contains Python -objects (i.e. ``dtype.hasobject is True``), then the data is a Python -pickle of the array. Otherwise the data is the contiguous (either C- -or Fortran-, depending on ``fortran_order``) bytes of the array. -Consumers can figure out the number of bytes by multiplying the number -of elements given by the shape (noting that ``shape=()`` means there is -1 element) by ``dtype.itemsize``. - -Notes ------ -The ``.npy`` format, including reasons for creating it and a comparison of -alternatives, is described fully in the "npy-format" NEP. - -""" - -import cPickle - -import numpy -import sys -from numpy.lib.utils import safe_eval -from numpy.compat import asbytes, isfileobj - -MAGIC_PREFIX = asbytes('\x93NUMPY') -MAGIC_LEN = len(MAGIC_PREFIX) + 2 - -def magic(major, minor): - """ Return the magic string for the given file format version. - - Parameters - ---------- - major : int in [0, 255] - minor : int in [0, 255] - - Returns - ------- - magic : str - - Raises - ------ - ValueError if the version cannot be formatted. - """ - if major < 0 or major > 255: - raise ValueError("major version must be 0 <= major < 256") - if minor < 0 or minor > 255: - raise ValueError("minor version must be 0 <= minor < 256") - if sys.version_info[0] < 3: - return MAGIC_PREFIX + chr(major) + chr(minor) - else: - return MAGIC_PREFIX + bytes([major, minor]) - -def read_magic(fp): - """ Read the magic string to get the version of the file format. - - Parameters - ---------- - fp : filelike object - - Returns - ------- - major : int - minor : int - """ - magic_str = fp.read(MAGIC_LEN) - if len(magic_str) != MAGIC_LEN: - msg = "could not read %d characters for the magic string; got %r" - raise ValueError(msg % (MAGIC_LEN, magic_str)) - if magic_str[:-2] != MAGIC_PREFIX: - msg = "the magic string is not correct; expected %r, got %r" - raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) - if sys.version_info[0] < 3: - major, minor = map(ord, magic_str[-2:]) - else: - major, minor = magic_str[-2:] - return major, minor - -def dtype_to_descr(dtype): - """ - Get a serializable descriptor from the dtype. - - The .descr attribute of a dtype object cannot be round-tripped through - the dtype() constructor. Simple types, like dtype('float32'), have - a descr which looks like a record array with one field with '' as - a name. The dtype() constructor interprets this as a request to give - a default name. Instead, we construct descriptor that can be passed to - dtype(). - - Parameters - ---------- - dtype : dtype - The dtype of the array that will be written to disk. - - Returns - ------- - descr : object - An object that can be passed to `numpy.dtype()` in order to - replicate the input dtype. - - """ - if dtype.names is not None: - # This is a record array. The .descr is fine. - # XXX: parts of the record array with an empty name, like padding bytes, - # still get fiddled with. This needs to be fixed in the C implementation - # of dtype(). - return dtype.descr - else: - return dtype.str - -def header_data_from_array_1_0(array): - """ Get the dictionary of header metadata from a numpy.ndarray. - - Parameters - ---------- - array : numpy.ndarray - - Returns - ------- - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - """ - d = {} - d['shape'] = array.shape - if array.flags.c_contiguous: - d['fortran_order'] = False - elif array.flags.f_contiguous: - d['fortran_order'] = True - else: - # Totally non-contiguous data. We will have to make it C-contiguous - # before writing. Note that we need to test for C_CONTIGUOUS first - # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. - d['fortran_order'] = False - - d['descr'] = dtype_to_descr(array.dtype) - return d - -def write_array_header_1_0(fp, d): - """ Write the header for an array using the 1.0 format. - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - """ - import struct - header = ["{"] - for key, value in sorted(d.items()): - # Need to use repr here, since we eval these when reading - header.append("'%s': %s, " % (key, repr(value))) - header.append("}") - header = "".join(header) - # Pad the header with spaces and a final newline such that the magic - # string, the header-length short and the header are aligned on a 16-byte - # boundary. Hopefully, some system, possibly memory-mapping, can take - # advantage of our premature optimization. - current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline - topad = 16 - (current_header_len % 16) - header = asbytes(header + ' '*topad + '\n') - if len(header) >= (256*256): - raise ValueError("header does not fit inside %s bytes" % (256*256)) - header_len_str = struct.pack('>> np.iterable([1, 2, 3]) - 1 - >>> np.iterable(2) - 0 - - """ - try: iter(y) - except: return 0 - return 1 - -def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): - """ - Compute the histogram of a set of data. - - Parameters - ---------- - a : array_like - Input data. The histogram is computed over the flattened array. - bins : int or sequence of scalars, optional - If `bins` is an int, it defines the number of equal-width - bins in the given range (10, by default). If `bins` is a sequence, - it defines the bin edges, including the rightmost edge, allowing - for non-uniform bin widths. - range : (float, float), optional - The lower and upper range of the bins. If not provided, range - is simply ``(a.min(), a.max())``. Values outside the range are - ignored. - normed : bool, optional - This keyword is deprecated in Numpy 1.6 due to confusing/buggy - behavior. It will be removed in Numpy 2.0. Use the density keyword - instead. - If False, the result will contain the number of samples - in each bin. If True, the result is the value of the - probability *density* function at the bin, normalized such that - the *integral* over the range is 1. Note that this latter behavior is - known to be buggy with unequal bin widths; use `density` instead. - weights : array_like, optional - An array of weights, of the same shape as `a`. Each value in `a` - only contributes its associated weight towards the bin count - (instead of 1). If `normed` is True, the weights are normalized, - so that the integral of the density over the range remains 1 - density : bool, optional - If False, the result will contain the number of samples - in each bin. If True, the result is the value of the - probability *density* function at the bin, normalized such that - the *integral* over the range is 1. Note that the sum of the - histogram values will not be equal to 1 unless bins of unity - width are chosen; it is not a probability *mass* function. - Overrides the `normed` keyword if given. - - Returns - ------- - hist : array - The values of the histogram. See `normed` and `weights` for a - description of the possible semantics. - bin_edges : array of dtype float - Return the bin edges ``(length(hist)+1)``. - - - See Also - -------- - histogramdd, bincount, searchsorted, digitize - - Notes - ----- - All but the last (righthand-most) bin is half-open. In other words, if - `bins` is:: - - [1, 2, 3, 4] - - then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the - second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* - 4. - - Examples - -------- - >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) - (array([0, 2, 1]), array([0, 1, 2, 3])) - >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) - (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) - >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) - (array([1, 4, 1]), array([0, 1, 2, 3])) - - >>> a = np.arange(5) - >>> hist, bin_edges = np.histogram(a, density=True) - >>> hist - array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) - >>> hist.sum() - 2.4999999999999996 - >>> np.sum(hist*np.diff(bin_edges)) - 1.0 - - """ - - a = asarray(a) - if weights is not None: - weights = asarray(weights) - if np.any(weights.shape != a.shape): - raise ValueError( - 'weights should have the same shape as a.') - weights = weights.ravel() - a = a.ravel() - - if (range is not None): - mn, mx = range - if (mn > mx): - raise AttributeError( - 'max must be larger than min in range parameter.') - - if not iterable(bins): - if np.isscalar(bins) and bins < 1: - raise ValueError("`bins` should be a positive integer.") - if range is None: - if a.size == 0: - # handle empty arrays. Can't determine range, so use 0-1. - range = (0, 1) - else: - range = (a.min(), a.max()) - mn, mx = [mi+0.0 for mi in range] - if mn == mx: - mn -= 0.5 - mx += 0.5 - bins = linspace(mn, mx, bins+1, endpoint=True) - else: - bins = asarray(bins) - if (np.diff(bins) < 0).any(): - raise AttributeError( - 'bins must increase monotonically.') - - # Histogram is an integer or a float array depending on the weights. - if weights is None: - ntype = int - else: - ntype = weights.dtype - n = np.zeros(bins.shape, ntype) - - block = 65536 - if weights is None: - for i in arange(0, len(a), block): - sa = sort(a[i:i+block]) - n += np.r_[sa.searchsorted(bins[:-1], 'left'), \ - sa.searchsorted(bins[-1], 'right')] - else: - zero = array(0, dtype=ntype) - for i in arange(0, len(a), block): - tmp_a = a[i:i+block] - tmp_w = weights[i:i+block] - sorting_index = np.argsort(tmp_a) - sa = tmp_a[sorting_index] - sw = tmp_w[sorting_index] - cw = np.concatenate(([zero,], sw.cumsum())) - bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \ - sa.searchsorted(bins[-1], 'right')] - n += cw[bin_index] - - n = np.diff(n) - - if density is not None: - if density: - db = array(np.diff(bins), float) - return n/db/n.sum(), bins - else: - return n, bins - else: - # deprecated, buggy behavior. Remove for Numpy 2.0 - if normed: - db = array(np.diff(bins), float) - return n/(n*db).sum(), bins - else: - return n, bins - - -def histogramdd(sample, bins=10, range=None, normed=False, weights=None): - """ - Compute the multidimensional histogram of some data. - - Parameters - ---------- - sample : array_like - The data to be histogrammed. It must be an (N,D) array or data - that can be converted to such. The rows of the resulting array - are the coordinates of points in a D dimensional polytope. - bins : sequence or int, optional - The bin specification: - - * A sequence of arrays describing the bin edges along each dimension. - * The number of bins for each dimension (nx, ny, ... =bins) - * The number of bins for all dimensions (nx=ny=...=bins). - - range : sequence, optional - A sequence of lower and upper bin edges to be used if the edges are - not given explicitely in `bins`. Defaults to the minimum and maximum - values along each dimension. - normed : bool, optional - If False, returns the number of samples in each bin. If True, returns - the bin density, ie, the bin count divided by the bin hypervolume. - weights : array_like (N,), optional - An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. - Weights are normalized to 1 if normed is True. If normed is False, the - values of the returned histogram are equal to the sum of the weights - belonging to the samples falling into each bin. - - Returns - ------- - H : ndarray - The multidimensional histogram of sample x. See normed and weights for - the different possible semantics. - edges : list - A list of D arrays describing the bin edges for each dimension. - - See Also - -------- - histogram: 1-D histogram - histogram2d: 2-D histogram - - Examples - -------- - >>> r = np.random.randn(100,3) - >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) - >>> H.shape, edges[0].size, edges[1].size, edges[2].size - ((5, 8, 4), 6, 9, 5) - - """ - - try: - # Sample is an ND-array. - N, D = sample.shape - except (AttributeError, ValueError): - # Sample is a sequence of 1D arrays. - sample = atleast_2d(sample).T - N, D = sample.shape - - nbin = empty(D, int) - edges = D*[None] - dedges = D*[None] - if weights is not None: - weights = asarray(weights) - - try: - M = len(bins) - if M != D: - raise AttributeError( - 'The dimension of bins must be equal'\ - ' to the dimension of the sample x.') - except TypeError: - # bins is an integer - bins = D*[bins] - - # Select range for each dimension - # Used only if number of bins is given. - if range is None: - # Handle empty input. Range can't be determined in that case, use 0-1. - if N == 0: - smin = zeros(D) - smax = ones(D) - else: - smin = atleast_1d(array(sample.min(0), float)) - smax = atleast_1d(array(sample.max(0), float)) - else: - smin = zeros(D) - smax = zeros(D) - for i in arange(D): - smin[i], smax[i] = range[i] - - # Make sure the bins have a finite width. - for i in arange(len(smin)): - if smin[i] == smax[i]: - smin[i] = smin[i] - .5 - smax[i] = smax[i] + .5 - - # Create edge arrays - for i in arange(D): - if isscalar(bins[i]): - if bins[i] < 1: - raise ValueError("Element at index %s in `bins` should be " - "a positive integer." % i) - nbin[i] = bins[i] + 2 # +2 for outlier bins - edges[i] = linspace(smin[i], smax[i], nbin[i]-1) - else: - edges[i] = asarray(bins[i], float) - nbin[i] = len(edges[i])+1 # +1 for outlier bins - dedges[i] = diff(edges[i]) - if np.any(np.asarray(dedges[i]) <= 0): - raise ValueError(""" - Found bin edge of size <= 0. Did you specify `bins` with - non-monotonic sequence?""") - - nbin = asarray(nbin) - - # Handle empty input. - if N == 0: - return np.zeros(nbin-2), edges - - # Compute the bin number each sample falls into. - Ncount = {} - for i in arange(D): - Ncount[i] = digitize(sample[:,i], edges[i]) - - # Using digitize, values that fall on an edge are put in the right bin. - # For the rightmost bin, we want values equal to the right - # edge to be counted in the last bin, and not as an outlier. - outliers = zeros(N, int) - for i in arange(D): - # Rounding precision - mindiff = dedges[i].min() - if not np.isinf(mindiff): - decimal = int(-log10(mindiff)) + 6 - # Find which points are on the rightmost edge. - on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1], - decimal))[0] - # Shift these points one bin to the left. - Ncount[i][on_edge] -= 1 - - # Flattened histogram matrix (1D) - # Reshape is used so that overlarge arrays - # will raise an error. - hist = zeros(nbin, float).reshape(-1) - - # Compute the sample indices in the flattened histogram matrix. - ni = nbin.argsort() - shape = [] - xy = zeros(N, int) - for i in arange(0, D-1): - xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() - xy += Ncount[ni[-1]] - - # Compute the number of repetitions in xy and assign it to the - # flattened histmat. - if len(xy) == 0: - return zeros(nbin-2, int), edges - - flatcount = bincount(xy, weights) - a = arange(len(flatcount)) - hist[a] = flatcount - - # Shape into a proper matrix - hist = hist.reshape(sort(nbin)) - for i in arange(nbin.size): - j = ni.argsort()[i] - hist = hist.swapaxes(i,j) - ni[i],ni[j] = ni[j],ni[i] - - # Remove outliers (indices 0 and -1 for each dimension). - core = D*[slice(1,-1)] - hist = hist[core] - - # Normalize if normed is True - if normed: - s = hist.sum() - for i in arange(D): - shape = ones(D, int) - shape[i] = nbin[i] - 2 - hist = hist / dedges[i].reshape(shape) - hist /= s - - if (hist.shape != nbin - 2).any(): - raise RuntimeError( - "Internal Shape Error") - return hist, edges - - -def average(a, axis=None, weights=None, returned=False): - """ - Compute the weighted average along the specified axis. - - Parameters - ---------- - a : array_like - Array containing data to be averaged. If `a` is not an array, a - conversion is attempted. - axis : int, optional - Axis along which to average `a`. If `None`, averaging is done over - the flattened array. - weights : array_like, optional - An array of weights associated with the values in `a`. Each value in - `a` contributes to the average according to its associated weight. - The weights array can either be 1-D (in which case its length must be - the size of `a` along the given axis) or of the same shape as `a`. - If `weights=None`, then all data in `a` are assumed to have a - weight equal to one. - returned : bool, optional - Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) - is returned, otherwise only the average is returned. - If `weights=None`, `sum_of_weights` is equivalent to the number of - elements over which the average is taken. - - - Returns - ------- - average, [sum_of_weights] : {array_type, double} - Return the average along the specified axis. When returned is `True`, - return a tuple with the average as the first element and the sum - of the weights as the second element. The return type is `Float` - if `a` is of integer type, otherwise it is of the same type as `a`. - `sum_of_weights` is of the same type as `average`. - - Raises - ------ - ZeroDivisionError - When all weights along axis are zero. See `numpy.ma.average` for a - version robust to this type of error. - TypeError - When the length of 1D `weights` is not the same as the shape of `a` - along axis. - - See Also - -------- - mean - - ma.average : average for masked arrays - - Examples - -------- - >>> data = range(1,5) - >>> data - [1, 2, 3, 4] - >>> np.average(data) - 2.5 - >>> np.average(range(1,11), weights=range(10,0,-1)) - 4.0 - - >>> data = np.arange(6).reshape((3,2)) - >>> data - array([[0, 1], - [2, 3], - [4, 5]]) - >>> np.average(data, axis=1, weights=[1./4, 3./4]) - array([ 0.75, 2.75, 4.75]) - >>> np.average(data, weights=[1./4, 3./4]) - Traceback (most recent call last): - ... - TypeError: Axis must be specified when shapes of a and weights differ. - - """ - if not isinstance(a, np.matrix) : - a = np.asarray(a) - - if weights is None : - avg = a.mean(axis) - scl = avg.dtype.type(a.size/avg.size) - else : - a = a + 0.0 - wgt = np.array(weights, dtype=a.dtype, copy=0) - - # Sanity checks - if a.shape != wgt.shape : - if axis is None : - raise TypeError( - "Axis must be specified when shapes of a "\ - "and weights differ.") - if wgt.ndim != 1 : - raise TypeError( - "1D weights expected when shapes of a and "\ - "weights differ.") - if wgt.shape[0] != a.shape[axis] : - raise ValueError( - "Length of weights not compatible with "\ - "specified axis.") - - # setup wgt to broadcast along axis - wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) - - scl = wgt.sum(axis=axis) - if (scl == 0.0).any(): - raise ZeroDivisionError( - "Weights sum to zero, can't be normalized") - - avg = np.multiply(a, wgt).sum(axis)/scl - - if returned: - scl = np.multiply(avg, 0) + scl - return avg, scl - else: - return avg - -def asarray_chkfinite(a): - """ - Convert the input to an array, checking for NaNs or Infs. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. Success requires no NaNs or Infs. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Defaults to 'C'. - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray. If `a` is a subclass of ndarray, a base - class ndarray is returned. - - Raises - ------ - ValueError - Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). - - See Also - -------- - asarray : Create and array. - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array. If all elements are finite - ``asarray_chkfinite`` is identical to ``asarray``. - - >>> a = [1, 2] - >>> np.asarray_chkfinite(a) - array([1, 2]) - - Raises ValueError if array_like contains Nans or Infs. - - >>> a = [1, 2, np.inf] - >>> try: - ... np.asarray_chkfinite(a) - ... except ValueError: - ... print 'ValueError' - ... - ValueError - - """ - a = asarray(a) - if (a.dtype.char in typecodes['AllFloat']) \ - and (_nx.isnan(a).any() or _nx.isinf(a).any()): - raise ValueError( - "array must not contain infs or NaNs") - return a - -def piecewise(x, condlist, funclist, *args, **kw): - """ - Evaluate a piecewise-defined function. - - Given a set of conditions and corresponding functions, evaluate each - function on the input data wherever its condition is true. - - Parameters - ---------- - x : ndarray - The input domain. - condlist : list of bool arrays - Each boolean array corresponds to a function in `funclist`. Wherever - `condlist[i]` is True, `funclist[i](x)` is used as the output value. - - Each boolean array in `condlist` selects a piece of `x`, - and should therefore be of the same shape as `x`. - - The length of `condlist` must correspond to that of `funclist`. - If one extra function is given, i.e. if - ``len(funclist) - len(condlist) == 1``, then that extra function - is the default value, used wherever all conditions are false. - funclist : list of callables, f(x,*args,**kw), or scalars - Each function is evaluated over `x` wherever its corresponding - condition is True. It should take an array as input and give an array - or a scalar value as output. If, instead of a callable, - a scalar is provided then a constant function (``lambda x: scalar``) is - assumed. - args : tuple, optional - Any further arguments given to `piecewise` are passed to the functions - upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then - each function is called as ``f(x, 1, 'a')``. - kw : dict, optional - Keyword arguments used in calling `piecewise` are passed to the - functions upon execution, i.e., if called - ``piecewise(..., ..., lambda=1)``, then each function is called as - ``f(x, lambda=1)``. - - Returns - ------- - out : ndarray - The output is the same shape and type as x and is found by - calling the functions in `funclist` on the appropriate portions of `x`, - as defined by the boolean arrays in `condlist`. Portions not covered - by any condition have undefined values. - - - See Also - -------- - choose, select, where - - Notes - ----- - This is similar to choose or select, except that functions are - evaluated on elements of `x` that satisfy the corresponding condition from - `condlist`. - - The result is:: - - |-- - |funclist[0](x[condlist[0]]) - out = |funclist[1](x[condlist[1]]) - |... - |funclist[n2](x[condlist[n2]]) - |-- - - Examples - -------- - Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. - - >>> x = np.arange(6) - 2.5 - >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) - array([-1., -1., -1., 1., 1., 1.]) - - Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for - ``x >= 0``. - - >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) - array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) - - """ - x = asanyarray(x) - n2 = len(funclist) - if isscalar(condlist) or \ - not (isinstance(condlist[0], list) or - isinstance(condlist[0], ndarray)): - condlist = [condlist] - condlist = [asarray(c, dtype=bool) for c in condlist] - n = len(condlist) - if n == n2-1: # compute the "otherwise" condition. - totlist = condlist[0] - for k in range(1, n): - totlist |= condlist[k] - condlist.append(~totlist) - n += 1 - if (n != n2): - raise ValueError( - "function list and condition list must be the same") - zerod = False - # This is a hack to work around problems with NumPy's - # handling of 0-d arrays and boolean indexing with - # numpy.bool_ scalars - if x.ndim == 0: - x = x[None] - zerod = True - newcondlist = [] - for k in range(n): - if condlist[k].ndim == 0: - condition = condlist[k][None] - else: - condition = condlist[k] - newcondlist.append(condition) - condlist = newcondlist - - y = zeros(x.shape, x.dtype) - for k in range(n): - item = funclist[k] - if not callable(item): - y[condlist[k]] = item - else: - vals = x[condlist[k]] - if vals.size > 0: - y[condlist[k]] = item(vals, *args, **kw) - if zerod: - y = y.squeeze() - return y - -def select(condlist, choicelist, default=0): - """ - Return an array drawn from elements in choicelist, depending on conditions. - - Parameters - ---------- - condlist : list of bool ndarrays - The list of conditions which determine from which array in `choicelist` - the output elements are taken. When multiple conditions are satisfied, - the first one encountered in `condlist` is used. - choicelist : list of ndarrays - The list of arrays from which the output elements are taken. It has - to be of the same length as `condlist`. - default : scalar, optional - The element inserted in `output` when all conditions evaluate to False. - - Returns - ------- - output : ndarray - The output at position m is the m-th element of the array in - `choicelist` where the m-th element of the corresponding array in - `condlist` is True. - - See Also - -------- - where : Return elements from one of two arrays depending on condition. - take, choose, compress, diag, diagonal - - Examples - -------- - >>> x = np.arange(10) - >>> condlist = [x<3, x>5] - >>> choicelist = [x, x**2] - >>> np.select(condlist, choicelist) - array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) - - """ - n = len(condlist) - n2 = len(choicelist) - if n2 != n: - raise ValueError( - "list of cases must be same length as list of conditions") - choicelist = [default] + choicelist - S = 0 - pfac = 1 - for k in range(1, n+1): - S += k * pfac * asarray(condlist[k-1]) - if k < n: - pfac *= (1-asarray(condlist[k-1])) - # handle special case of a 1-element condition but - # a multi-element choice - if type(S) in ScalarType or max(asarray(S).shape)==1: - pfac = asarray(1) - for k in range(n2+1): - pfac = pfac + asarray(choicelist[k]) - if type(S) in ScalarType: - S = S*ones(asarray(pfac).shape, type(S)) - else: - S = S*ones(asarray(pfac).shape, S.dtype) - return choose(S, tuple(choicelist)) - -def copy(a): - """ - Return an array copy of the given object. - - Parameters - ---------- - a : array_like - Input data. - - Returns - ------- - arr : ndarray - Array interpretation of `a`. - - Notes - ----- - This is equivalent to - - >>> np.array(a, copy=True) #doctest: +SKIP - - Examples - -------- - Create an array x, with a reference y and a copy z: - - >>> x = np.array([1, 2, 3]) - >>> y = x - >>> z = np.copy(x) - - Note that, when we modify x, y changes, but not z: - - >>> x[0] = 10 - >>> x[0] == y[0] - True - >>> x[0] == z[0] - False - - """ - return array(a, copy=True) - -# Basic operations - -def gradient(f, *varargs): - """ - Return the gradient of an N-dimensional array. - - The gradient is computed using central differences in the interior - and first differences at the boundaries. The returned gradient hence has - the same shape as the input array. - - Parameters - ---------- - f : array_like - An N-dimensional array containing samples of a scalar function. - `*varargs` : scalars - 0, 1, or N scalars specifying the sample distances in each direction, - that is: `dx`, `dy`, `dz`, ... The default distance is 1. - - - Returns - ------- - g : ndarray - N arrays of the same shape as `f` giving the derivative of `f` with - respect to each dimension. - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) - >>> np.gradient(x) - array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - >>> np.gradient(x, 2) - array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) - - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) - [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), - array([[ 1. , 2.5, 4. ], - [ 1. , 1. , 1. ]])] - - """ - N = len(f.shape) # number of dimensions - n = len(varargs) - if n == 0: - dx = [1.0]*N - elif n == 1: - dx = [varargs[0]]*N - elif n == N: - dx = list(varargs) - else: - raise SyntaxError( - "invalid number of arguments") - - # use central differences on interior and first differences on endpoints - - outvals = [] - - # create slice objects --- initially all are [:, :, ..., :] - slice1 = [slice(None)]*N - slice2 = [slice(None)]*N - slice3 = [slice(None)]*N - - otype = f.dtype.char - if otype not in ['f', 'd', 'F', 'D']: - otype = 'd' - - for axis in range(N): - # select out appropriate parts for this dimension - out = np.zeros_like(f).astype(otype) - slice1[axis] = slice(1, -1) - slice2[axis] = slice(2, None) - slice3[axis] = slice(None, -2) - # 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0 - out[slice1] = (f[slice2] - f[slice3])/2.0 - slice1[axis] = 0 - slice2[axis] = 1 - slice3[axis] = 0 - # 1D equivalent -- out[0] = (f[1] - f[0]) - out[slice1] = (f[slice2] - f[slice3]) - slice1[axis] = -1 - slice2[axis] = -1 - slice3[axis] = -2 - # 1D equivalent -- out[-1] = (f[-1] - f[-2]) - out[slice1] = (f[slice2] - f[slice3]) - - # divide by step size - outvals.append(out / dx[axis]) - - # reset the slice object in this dimension to ":" - slice1[axis] = slice(None) - slice2[axis] = slice(None) - slice3[axis] = slice(None) - - if N == 1: - return outvals[0] - else: - return outvals - - -def diff(a, n=1, axis=-1): - """ - Calculate the n-th order discrete difference along given axis. - - The first order difference is given by ``out[n] = a[n+1] - a[n]`` along - the given axis, higher order differences are calculated by using `diff` - recursively. - - Parameters - ---------- - a : array_like - Input array - n : int, optional - The number of times values are differenced. - axis : int, optional - The axis along which the difference is taken, default is the last axis. - - Returns - ------- - out : ndarray - The `n` order differences. The shape of the output is the same as `a` - except along `axis` where the dimension is smaller by `n`. - - See Also - -------- - gradient, ediff1d - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 0]) - >>> np.diff(x) - array([ 1, 2, 3, -7]) - >>> np.diff(x, n=2) - array([ 1, 1, -10]) - - >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) - >>> np.diff(x) - array([[2, 3, 4], - [5, 1, 2]]) - >>> np.diff(x, axis=0) - array([[-1, 2, 0, -2]]) - - """ - if n == 0: - return a - if n < 0: - raise ValueError( - "order must be non-negative but got " + repr(n)) - a = asanyarray(a) - nd = len(a.shape) - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd - slice1[axis] = slice(1, None) - slice2[axis] = slice(None, -1) - slice1 = tuple(slice1) - slice2 = tuple(slice2) - if n > 1: - return diff(a[slice1]-a[slice2], n-1, axis=axis) - else: - return a[slice1]-a[slice2] - -def interp(x, xp, fp, left=None, right=None): - """ - One-dimensional linear interpolation. - - Returns the one-dimensional piecewise linear interpolant to a function - with given values at discrete data-points. - - Parameters - ---------- - x : array_like - The x-coordinates of the interpolated values. - - xp : 1-D sequence of floats - The x-coordinates of the data points, must be increasing. - - fp : 1-D sequence of floats - The y-coordinates of the data points, same length as `xp`. - - left : float, optional - Value to return for `x < xp[0]`, default is `fp[0]`. - - right : float, optional - Value to return for `x > xp[-1]`, defaults is `fp[-1]`. - - Returns - ------- - y : {float, ndarray} - The interpolated values, same shape as `x`. - - Raises - ------ - ValueError - If `xp` and `fp` have different length - - Notes - ----- - Does not check that the x-coordinate sequence `xp` is increasing. - If `xp` is not increasing, the results are nonsense. - A simple check for increasingness is:: - - np.all(np.diff(xp) > 0) - - - Examples - -------- - >>> xp = [1, 2, 3] - >>> fp = [3, 2, 0] - >>> np.interp(2.5, xp, fp) - 1.0 - >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) - array([ 3. , 3. , 2.5 , 0.56, 0. ]) - >>> UNDEF = -99.0 - >>> np.interp(3.14, xp, fp, right=UNDEF) - -99.0 - - Plot an interpolant to the sine function: - - >>> x = np.linspace(0, 2*np.pi, 10) - >>> y = np.sin(x) - >>> xvals = np.linspace(0, 2*np.pi, 50) - >>> yinterp = np.interp(xvals, x, y) - >>> import matplotlib.pyplot as plt - >>> plt.plot(x, y, 'o') - [] - >>> plt.plot(xvals, yinterp, '-x') - [] - >>> plt.show() - - """ - if isinstance(x, (float, int, number)): - return compiled_interp([x], xp, fp, left, right).item() - elif isinstance(x, np.ndarray) and x.ndim == 0: - return compiled_interp([x], xp, fp, left, right).item() - else: - return compiled_interp(x, xp, fp, left, right) - - -def angle(z, deg=0): - """ - Return the angle of the complex argument. - - Parameters - ---------- - z : array_like - A complex number or sequence of complex numbers. - deg : bool, optional - Return angle in degrees if True, radians if False (default). - - Returns - ------- - angle : {ndarray, scalar} - The counterclockwise angle from the positive real axis on - the complex plane, with dtype as numpy.float64. - - See Also - -------- - arctan2 - absolute - - - - Examples - -------- - >>> np.angle([1.0, 1.0j, 1+1j]) # in radians - array([ 0. , 1.57079633, 0.78539816]) - >>> np.angle(1+1j, deg=True) # in degrees - 45.0 - - """ - if deg: - fact = 180/pi - else: - fact = 1.0 - z = asarray(z) - if (issubclass(z.dtype.type, _nx.complexfloating)): - zimag = z.imag - zreal = z.real - else: - zimag = 0 - zreal = z - return arctan2(zimag, zreal) * fact - -def unwrap(p, discont=pi, axis=-1): - """ - Unwrap by changing deltas between values to 2*pi complement. - - Unwrap radian phase `p` by changing absolute jumps greater than - `discont` to their 2*pi complement along the given axis. - - Parameters - ---------- - p : array_like - Input array. - discont : float, optional - Maximum discontinuity between values, default is ``pi``. - axis : int, optional - Axis along which unwrap will operate, default is the last axis. - - Returns - ------- - out : ndarray - Output array. - - See Also - -------- - rad2deg, deg2rad - - Notes - ----- - If the discontinuity in `p` is smaller than ``pi``, but larger than - `discont`, no unwrapping is done because taking the 2*pi complement - would only make the discontinuity larger. - - Examples - -------- - >>> phase = np.linspace(0, np.pi, num=5) - >>> phase[3:] += np.pi - >>> phase - array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) - >>> np.unwrap(phase) - array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) - - """ - p = asarray(p) - nd = len(p.shape) - dd = diff(p, axis=axis) - slice1 = [slice(None, None)]*nd # full slices - slice1[axis] = slice(1, None) - ddmod = mod(dd+pi, 2*pi)-pi - _nx.putmask(ddmod, (ddmod==-pi) & (dd > 0), pi) - ph_correct = ddmod - dd; - _nx.putmask(ph_correct, abs(dd)>> np.sort_complex([5, 3, 6, 2, 1]) - array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) - - >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) - array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) - - """ - b = array(a,copy=True) - b.sort() - if not issubclass(b.dtype.type, _nx.complexfloating): - if b.dtype.char in 'bhBH': - return b.astype('F') - elif b.dtype.char == 'g': - return b.astype('G') - else: - return b.astype('D') - else: - return b - -def trim_zeros(filt, trim='fb'): - """ - Trim the leading and/or trailing zeros from a 1-D array or sequence. - - Parameters - ---------- - filt : 1-D array or sequence - Input array. - trim : str, optional - A string with 'f' representing trim from front and 'b' to trim from - back. Default is 'fb', trim zeros from both front and back of the - array. - - Returns - ------- - trimmed : 1-D array or sequence - The result of trimming the input. The input data type is preserved. - - Examples - -------- - >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) - >>> np.trim_zeros(a) - array([1, 2, 3, 0, 2, 1]) - - >>> np.trim_zeros(a, 'b') - array([0, 0, 0, 1, 2, 3, 0, 2, 1]) - - The input data type is preserved, list/tuple in means list/tuple out. - - >>> np.trim_zeros([0, 1, 2, 0]) - [1, 2] - - """ - first = 0 - trim = trim.upper() - if 'F' in trim: - for i in filt: - if i != 0.: break - else: first = first + 1 - last = len(filt) - if 'B' in trim: - for i in filt[::-1]: - if i != 0.: break - else: last = last - 1 - return filt[first:last] - -import sys -if sys.hexversion < 0x2040000: - from sets import Set as set - -@deprecate -def unique(x): - """ - This function is deprecated. Use numpy.lib.arraysetops.unique() - instead. - """ - try: - tmp = x.flatten() - if tmp.size == 0: - return tmp - tmp.sort() - idx = concatenate(([True],tmp[1:]!=tmp[:-1])) - return tmp[idx] - except AttributeError: - items = list(set(x)) - items.sort() - return asarray(items) - -def extract(condition, arr): - """ - Return the elements of an array that satisfy some condition. - - This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If - `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. - - Parameters - ---------- - condition : array_like - An array whose nonzero or True entries indicate the elements of `arr` - to extract. - arr : array_like - Input array of the same size as `condition`. - - See Also - -------- - take, put, putmask, compress - - Examples - -------- - >>> arr = np.arange(12).reshape((3, 4)) - >>> arr - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> condition = np.mod(arr, 3)==0 - >>> condition - array([[ True, False, False, True], - [False, False, True, False], - [False, True, False, False]], dtype=bool) - >>> np.extract(condition, arr) - array([0, 3, 6, 9]) - - - If `condition` is boolean: - - >>> arr[condition] - array([0, 3, 6, 9]) - - """ - return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) - -def place(arr, mask, vals): - """ - Change elements of an array based on conditional and input values. - - Similar to ``np.putmask(arr, mask, vals)``, the difference is that `place` - uses the first N elements of `vals`, where N is the number of True values - in `mask`, while `putmask` uses the elements where `mask` is True. - - Note that `extract` does the exact opposite of `place`. - - Parameters - ---------- - arr : array_like - Array to put data into. - mask : array_like - Boolean mask array. Must have the same size as `a`. - vals : 1-D sequence - Values to put into `a`. Only the first N elements are used, where - N is the number of True values in `mask`. If `vals` is smaller - than N it will be repeated. - - See Also - -------- - putmask, put, take, extract - - Examples - -------- - >>> arr = np.arange(6).reshape(2, 3) - >>> np.place(arr, arr>2, [44, 55]) - >>> arr - array([[ 0, 1, 2], - [44, 55, 44]]) - - """ - return _insert(arr, mask, vals) - -def _nanop(op, fill, a, axis=None): - """ - General operation on arrays with not-a-number values. - - Parameters - ---------- - op : callable - Operation to perform. - fill : float - NaN values are set to fill before doing the operation. - a : array-like - Input array. - axis : {int, None}, optional - Axis along which the operation is computed. - By default the input is flattened. - - Returns - ------- - y : {ndarray, scalar} - Processed data. - - """ - y = array(a, subok=True) - - # We only need to take care of NaN's in floating point arrays - if np.issubdtype(y.dtype, np.integer): - return op(y, axis=axis) - mask = isnan(a) - # y[mask] = fill - # We can't use fancy indexing here as it'll mess w/ MaskedArrays - # Instead, let's fill the array directly... - np.putmask(y, mask, fill) - res = op(y, axis=axis) - mask_all_along_axis = mask.all(axis=axis) - - # Along some axes, only nan's were encountered. As such, any values - # calculated along that axis should be set to nan. - if mask_all_along_axis.any(): - if np.isscalar(res): - res = np.nan - else: - res[mask_all_along_axis] = np.nan - - return res - -def nansum(a, axis=None): - """ - Return the sum of array elements over a given axis treating - Not a Numbers (NaNs) as zero. - - Parameters - ---------- - a : array_like - Array containing numbers whose sum is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the sum is computed. The default is to compute - the sum of the flattened array. - - Returns - ------- - y : ndarray - An array with the same shape as a, with the specified axis removed. - If a is a 0-d array, or if axis is None, a scalar is returned with - the same dtype as `a`. - - See Also - -------- - numpy.sum : Sum across array including Not a Numbers. - isnan : Shows which elements are Not a Number (NaN). - isfinite: Shows which elements are not: Not a Number, positive and - negative infinity - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - If positive or negative infinity are present the result is positive or - negative infinity. But if both positive and negative infinity are present, - the result is Not A Number (NaN). - - Arithmetic is modular when using integer types (all elements of `a` must - be finite i.e. no elements that are NaNs, positive infinity and negative - infinity because NaNs are floating point types), and no error is raised - on overflow. - - - Examples - -------- - >>> np.nansum(1) - 1 - >>> np.nansum([1]) - 1 - >>> np.nansum([1, np.nan]) - 1.0 - >>> a = np.array([[1, 1], [1, np.nan]]) - >>> np.nansum(a) - 3.0 - >>> np.nansum(a, axis=0) - array([ 2., 1.]) - - When positive infinity and negative infinity are present - - >>> np.nansum([1, np.nan, np.inf]) - inf - >>> np.nansum([1, np.nan, np.NINF]) - -inf - >>> np.nansum([1, np.nan, np.inf, np.NINF]) - nan - - """ - return _nanop(np.sum, 0, a, axis) - -def nanmin(a, axis=None): - """ - Return the minimum of an array or minimum along an axis ignoring any NaNs. - - Parameters - ---------- - a : array_like - Array containing numbers whose minimum is desired. - axis : int, optional - Axis along which the minimum is computed.The default is to compute - the minimum of the flattened array. - - Returns - ------- - nanmin : ndarray - A new array or a scalar array with the result. - - See Also - -------- - numpy.amin : Minimum across array including any Not a Numbers. - numpy.nanmax : Maximum across array ignoring any Not a Numbers. - isnan : Shows which elements are Not a Number (NaN). - isfinite: Shows which elements are not: Not a Number, positive and - negative infinity - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Positive infinity is treated as a very large number and negative infinity - is treated as a very small (i.e. negative) number. - - If the input has a integer type the function is equivalent to np.min. - - - Examples - -------- - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanmin(a) - 1.0 - >>> np.nanmin(a, axis=0) - array([ 1., 2.]) - >>> np.nanmin(a, axis=1) - array([ 1., 3.]) - - When positive infinity and negative infinity are present: - - >>> np.nanmin([1, 2, np.nan, np.inf]) - 1.0 - >>> np.nanmin([1, 2, np.nan, np.NINF]) - -inf - - """ - a = np.asanyarray(a) - if axis is not None: - return np.fmin.reduce(a, axis) - else: - return np.fmin.reduce(a.flat) - -def nanargmin(a, axis=None): - """ - Return indices of the minimum values over an axis, ignoring NaNs. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - - Returns - ------- - index_array : ndarray - An array of indices or a single index value. - - See Also - -------- - argmin, nanargmax - - Examples - -------- - >>> a = np.array([[np.nan, 4], [2, 3]]) - >>> np.argmin(a) - 0 - >>> np.nanargmin(a) - 2 - >>> np.nanargmin(a, axis=0) - array([1, 1]) - >>> np.nanargmin(a, axis=1) - array([1, 0]) - - """ - return _nanop(np.argmin, np.inf, a, axis) - -def nanmax(a, axis=None): - """ - Return the maximum of an array or maximum along an axis ignoring any NaNs. - - Parameters - ---------- - a : array_like - Array containing numbers whose maximum is desired. If `a` is not - an array, a conversion is attempted. - axis : int, optional - Axis along which the maximum is computed. The default is to compute - the maximum of the flattened array. - - Returns - ------- - nanmax : ndarray - An array with the same shape as `a`, with the specified axis removed. - If `a` is a 0-d array, or if axis is None, a ndarray scalar is - returned. The the same dtype as `a` is returned. - - See Also - -------- - numpy.amax : Maximum across array including any Not a Numbers. - numpy.nanmin : Minimum across array ignoring any Not a Numbers. - isnan : Shows which elements are Not a Number (NaN). - isfinite: Shows which elements are not: Not a Number, positive and - negative infinity - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Positive infinity is treated as a very large number and negative infinity - is treated as a very small (i.e. negative) number. - - If the input has a integer type the function is equivalent to np.max. - - Examples - -------- - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanmax(a) - 3.0 - >>> np.nanmax(a, axis=0) - array([ 3., 2.]) - >>> np.nanmax(a, axis=1) - array([ 2., 3.]) - - When positive infinity and negative infinity are present: - - >>> np.nanmax([1, 2, np.nan, np.NINF]) - 2.0 - >>> np.nanmax([1, 2, np.nan, np.inf]) - inf - - """ - a = np.asanyarray(a) - if axis is not None: - return np.fmax.reduce(a, axis) - else: - return np.fmax.reduce(a.flat) - -def nanargmax(a, axis=None): - """ - Return indices of the maximum values over an axis, ignoring NaNs. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - - Returns - ------- - index_array : ndarray - An array of indices or a single index value. - - See Also - -------- - argmax, nanargmin - - Examples - -------- - >>> a = np.array([[np.nan, 4], [2, 3]]) - >>> np.argmax(a) - 0 - >>> np.nanargmax(a) - 1 - >>> np.nanargmax(a, axis=0) - array([1, 0]) - >>> np.nanargmax(a, axis=1) - array([1, 1]) - - """ - return _nanop(np.argmax, -np.inf, a, axis) - -def disp(mesg, device=None, linefeed=True): - """ - Display a message on a device. - - Parameters - ---------- - mesg : str - Message to display. - device : object - Device to write message. If None, defaults to ``sys.stdout`` which is - very similar to ``print``. `device` needs to have ``write()`` and - ``flush()`` methods. - linefeed : bool, optional - Option whether to print a line feed or not. Defaults to True. - - Raises - ------ - AttributeError - If `device` does not have a ``write()`` or ``flush()`` method. - - Examples - -------- - Besides ``sys.stdout``, a file-like object can also be used as it has - both required methods: - - >>> from StringIO import StringIO - >>> buf = StringIO() - >>> np.disp('"Display" in a file', device=buf) - >>> buf.getvalue() - '"Display" in a file\\n' - - """ - if device is None: - import sys - device = sys.stdout - if linefeed: - device.write('%s\n' % mesg) - else: - device.write('%s' % mesg) - device.flush() - return - -# return number of input arguments and -# number of default arguments - -def _get_nargs(obj): - import re - - terr = re.compile(r'.*? takes (exactly|at least) (?P(\d+)|(\w+))' + - r' argument(s|) \((?P(\d+)|(\w+)) given\)') - def _convert_to_int(strval): - try: - result = int(strval) - except ValueError: - if strval=='zero': - result = 0 - elif strval=='one': - result = 1 - elif strval=='two': - result = 2 - # How high to go? English only? - else: - raise - return result - - if not callable(obj): - raise TypeError( - "Object is not callable.") - if sys.version_info[0] >= 3: - # inspect currently fails for binary extensions - # like math.cos. So fall back to other methods if - # it fails. - import inspect - try: - spec = inspect.getargspec(obj) - nargs = len(spec.args) - if spec.defaults: - ndefaults = len(spec.defaults) - else: - ndefaults = 0 - if inspect.ismethod(obj): - nargs -= 1 - return nargs, ndefaults - except: - pass - - if hasattr(obj,'func_code'): - fcode = obj.func_code - nargs = fcode.co_argcount - if obj.func_defaults is not None: - ndefaults = len(obj.func_defaults) - else: - ndefaults = 0 - if isinstance(obj, types.MethodType): - nargs -= 1 - return nargs, ndefaults - - try: - obj() - return 0, 0 - except TypeError, msg: - m = terr.match(str(msg)) - if m: - nargs = _convert_to_int(m.group('exargs')) - ndefaults = _convert_to_int(m.group('gargs')) - if isinstance(obj, types.MethodType): - nargs -= 1 - return nargs, ndefaults - - raise ValueError( - "failed to determine the number of arguments for %s" % (obj)) - - -class vectorize(object): - """ - vectorize(pyfunc, otypes='', doc=None) - - Generalized function class. - - Define a vectorized function which takes a nested sequence - of objects or numpy arrays as inputs and returns a - numpy array as output. The vectorized function evaluates `pyfunc` over - successive tuples of the input arrays like the python map function, - except it uses the broadcasting rules of numpy. - - The data type of the output of `vectorized` is determined by calling - the function with the first element of the input. This can be avoided - by specifying the `otypes` argument. - - Parameters - ---------- - pyfunc : callable - A python function or method. - otypes : str or list of dtypes, optional - The output data type. It must be specified as either a string of - typecode characters or a list of data type specifiers. There should - be one data type specifier for each output. - doc : str, optional - The docstring for the function. If None, the docstring will be the - `pyfunc` one. - - Examples - -------- - >>> def myfunc(a, b): - ... \"\"\"Return a-b if a>b, otherwise return a+b\"\"\" - ... if a > b: - ... return a - b - ... else: - ... return a + b - - >>> vfunc = np.vectorize(myfunc) - >>> vfunc([1, 2, 3, 4], 2) - array([3, 4, 1, 2]) - - The docstring is taken from the input function to `vectorize` unless it - is specified - - >>> vfunc.__doc__ - 'Return a-b if a>b, otherwise return a+b' - >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') - >>> vfunc.__doc__ - 'Vectorized `myfunc`' - - The output type is determined by evaluating the first element of the input, - unless it is specified - - >>> out = vfunc([1, 2, 3, 4], 2) - >>> type(out[0]) - - >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) - >>> out = vfunc([1, 2, 3, 4], 2) - >>> type(out[0]) - - - """ - def __init__(self, pyfunc, otypes='', doc=None): - self.thefunc = pyfunc - self.ufunc = None - nin, ndefault = _get_nargs(pyfunc) - if nin == 0 and ndefault == 0: - self.nin = None - self.nin_wo_defaults = None - else: - self.nin = nin - self.nin_wo_defaults = nin - ndefault - self.nout = None - if doc is None: - self.__doc__ = pyfunc.__doc__ - else: - self.__doc__ = doc - if isinstance(otypes, str): - self.otypes = otypes - for char in self.otypes: - if char not in typecodes['All']: - raise ValueError( - "invalid otype specified") - elif iterable(otypes): - self.otypes = ''.join([_nx.dtype(x).char for x in otypes]) - else: - raise ValueError( - "Invalid otype specification") - self.lastcallargs = 0 - - def __call__(self, *args): - # get number of outputs and output types by calling - # the function on the first entries of args - nargs = len(args) - if self.nin: - if (nargs > self.nin) or (nargs < self.nin_wo_defaults): - raise ValueError( - "Invalid number of arguments") - - # we need a new ufunc if this is being called with more arguments. - if (self.lastcallargs != nargs): - self.lastcallargs = nargs - self.ufunc = None - self.nout = None - - if self.nout is None or self.otypes == '': - newargs = [] - for arg in args: - newargs.append(asarray(arg).flat[0]) - theout = self.thefunc(*newargs) - if isinstance(theout, tuple): - self.nout = len(theout) - else: - self.nout = 1 - theout = (theout,) - if self.otypes == '': - otypes = [] - for k in range(self.nout): - otypes.append(asarray(theout[k]).dtype.char) - self.otypes = ''.join(otypes) - - # Create ufunc if not already created - if (self.ufunc is None): - self.ufunc = frompyfunc(self.thefunc, nargs, self.nout) - - # Convert to object arrays first - newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args] - if self.nout == 1: - _res = array(self.ufunc(*newargs),copy=False, - subok=True,dtype=self.otypes[0]) - else: - _res = tuple([array(x,copy=False,subok=True,dtype=c) \ - for x, c in zip(self.ufunc(*newargs), self.otypes)]) - return _res - -def cov(m, y=None, rowvar=1, bias=0, ddof=None): - """ - Estimate a covariance matrix, given data. - - Covariance indicates the level to which two variables vary together. - If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, - then the covariance matrix element :math:`C_{ij}` is the covariance of - :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance - of :math:`x_i`. - - Parameters - ---------- - m : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `m` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - form as that of `m`. - rowvar : int, optional - If `rowvar` is non-zero (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : int, optional - Default normalization is by ``(N - 1)``, where ``N`` is the number of - observations given (unbiased estimate). If `bias` is 1, then - normalization is by ``N``. These values can be overridden by using - the keyword ``ddof`` in numpy versions >= 1.5. - ddof : int, optional - .. versionadded:: 1.5 - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - Returns - ------- - out : ndarray - The covariance matrix of the variables. - - See Also - -------- - corrcoef : Normalized covariance matrix - - Examples - -------- - Consider two variables, :math:`x_0` and :math:`x_1`, which - correlate perfectly, but in opposite directions: - - >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T - >>> x - array([[0, 1, 2], - [2, 1, 0]]) - - Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance - matrix shows this clearly: - - >>> np.cov(x) - array([[ 1., -1.], - [-1., 1.]]) - - Note that element :math:`C_{0,1}`, which shows the correlation between - :math:`x_0` and :math:`x_1`, is negative. - - Further, note how `x` and `y` are combined: - - >>> x = [-2.1, -1, 4.3] - >>> y = [3, 1.1, 0.12] - >>> X = np.vstack((x,y)) - >>> print np.cov(X) - [[ 11.71 -4.286 ] - [ -4.286 2.14413333]] - >>> print np.cov(x, y) - [[ 11.71 -4.286 ] - [ -4.286 2.14413333]] - >>> print np.cov(x) - 11.71 - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError("ddof must be integer") - - X = array(m, ndmin=2, dtype=float) - if X.size == 0: - # handle empty arrays - return np.array(m) - if X.shape[0] == 1: - rowvar = 1 - if rowvar: - axis = 0 - tup = (slice(None),newaxis) - else: - axis = 1 - tup = (newaxis, slice(None)) - - - if y is not None: - y = array(y, copy=False, ndmin=2, dtype=float) - X = concatenate((X,y), axis) - - X -= X.mean(axis=1-axis)[tup] - if rowvar: - N = X.shape[1] - else: - N = X.shape[0] - - if ddof is None: - if bias == 0: - ddof = 1 - else: - ddof = 0 - fact = float(N - ddof) - - if not rowvar: - return (dot(X.T, X.conj()) / fact).squeeze() - else: - return (dot(X, X.T.conj()) / fact).squeeze() - - -def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None): - """ - Return correlation coefficients. - - Please refer to the documentation for `cov` for more detail. The - relationship between the correlation coefficient matrix, `P`, and the - covariance matrix, `C`, is - - .. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } - - The values of `P` are between -1 and 1, inclusive. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `m` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - shape as `m`. - rowvar : int, optional - If `rowvar` is non-zero (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : int, optional - Default normalization is by ``(N - 1)``, where ``N`` is the number of - observations (unbiased estimate). If `bias` is 1, then - normalization is by ``N``. These values can be overridden by using - the keyword ``ddof`` in numpy versions >= 1.5. - ddof : {None, int}, optional - .. versionadded:: 1.5 - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - Returns - ------- - out : ndarray - The correlation coefficient matrix of the variables. - - See Also - -------- - cov : Covariance matrix - - """ - c = cov(x, y, rowvar, bias, ddof) - if c.size == 0: - # handle empty arrays - return c - try: - d = diag(c) - except ValueError: # scalar covariance - return 1 - return c/sqrt(multiply.outer(d,d)) - -def blackman(M): - """ - Return the Blackman window. - - The Blackman window is a taper formed by using the the first three - terms of a summation of cosines. It was designed to have close to the - minimal leakage possible. It is close to optimal, only slightly worse - than a Kaiser window. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an empty - array is returned. - - Returns - ------- - out : ndarray - The window, normalized to one (the value one appears only if the - number of samples is odd). - - See Also - -------- - bartlett, hamming, hanning, kaiser - - Notes - ----- - The Blackman window is defined as - - .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) - - Most references to the Blackman window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. It is known as a - "near optimal" tapering function, almost as good (by some measures) - as the kaiser window. - - References - ---------- - Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, - Dover Publications, New York. - - Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. - Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. - - Examples - -------- - >>> from numpy import blackman - >>> blackman(12) - array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, - 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, - 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, - 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) - - - Plot the window and the frequency response: - - >>> from numpy import clip, log10, array, blackman, linspace - >>> from numpy.fft import fft, fftshift - >>> import matplotlib.pyplot as plt - - >>> window = blackman(51) - >>> plt.plot(window) - [] - >>> plt.title("Blackman window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = abs(fftshift(A)) - >>> freq = linspace(-0.5,0.5,len(A)) - >>> response = 20*log10(mag) - >>> response = clip(response,-100,100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Blackman window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0,M) - return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) - -def bartlett(M): - """ - Return the Bartlett window. - - The Bartlett window is very similar to a triangular window, except - that the end points are at zero. It is often used in signal - processing for tapering a signal, without generating too much - ripple in the frequency domain. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : array - The triangular window, normalized to one (the value one - appears only if the number of samples is odd), with the first - and last samples equal to zero. - - See Also - -------- - blackman, hamming, hanning, kaiser - - Notes - ----- - The Bartlett window is defined as - - .. math:: w(n) = \\frac{2}{M-1} \\left( - \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| - \\right) - - Most references to the Bartlett window come from the signal - processing literature, where it is used as one of many windowing - functions for smoothing values. Note that convolution with this - window produces linear interpolation. It is also known as an - apodization (which means"removing the foot", i.e. smoothing - discontinuities at the beginning and end of the sampled signal) or - tapering function. The fourier transform of the Bartlett is the product - of two sinc functions. - Note the excellent discussion in Kanasewich. - - References - ---------- - .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", - Biometrika 37, 1-16, 1950. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", - The University of Alberta Press, 1975, pp. 109-110. - .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal - Processing", Prentice-Hall, 1999, pp. 468-471. - .. [4] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function - .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 429. - - - Examples - -------- - >>> np.bartlett(12) - array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, - 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, - 0.18181818, 0. ]) - - Plot the window and its frequency response (requires SciPy and matplotlib): - - >>> from numpy import clip, log10, array, bartlett, linspace - >>> from numpy.fft import fft, fftshift - >>> import matplotlib.pyplot as plt - - >>> window = bartlett(51) - >>> plt.plot(window) - [] - >>> plt.title("Bartlett window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = abs(fftshift(A)) - >>> freq = linspace(-0.5,0.5,len(A)) - >>> response = 20*log10(mag) - >>> response = clip(response,-100,100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Bartlett window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0,M) - return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1)) - -def hanning(M): - """ - Return the Hanning window. - - The Hanning window is a taper formed by using a weighted cosine. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : ndarray, shape(M,) - The window, normalized to one (the value one - appears only if `M` is odd). - - See Also - -------- - bartlett, blackman, hamming, kaiser - - Notes - ----- - The Hanning window is defined as - - .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) - \\qquad 0 \\leq n \\leq M-1 - - The Hanning was named for Julius van Hann, an Austrian meterologist. It is - also known as the Cosine Bell. Some authors prefer that it be called a - Hann window, to help avoid confusion with the very similar Hamming window. - - Most references to the Hanning window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power - spectra, Dover Publications, New York. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", - The University of Alberta Press, 1975, pp. 106-108. - .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 425. - - Examples - -------- - >>> from numpy import hanning - >>> hanning(12) - array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, - 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, - 0.07937323, 0. ]) - - Plot the window and its frequency response: - - >>> from numpy.fft import fft, fftshift - >>> import matplotlib.pyplot as plt - - >>> window = np.hanning(51) - >>> plt.plot(window) - [] - >>> plt.title("Hann window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = abs(fftshift(A)) - >>> freq = np.linspace(-0.5,0.5,len(A)) - >>> response = 20*np.log10(mag) - >>> response = np.clip(response,-100,100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of the Hann window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - # XXX: this docstring is inconsistent with other filter windows, e.g. - # Blackman and Bartlett - they should all follow the same convention for - # clarity. Either use np. for all numpy members (as above), or import all - # numpy members (as in Blackman and Bartlett examples) - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0,M) - return 0.5-0.5*cos(2.0*pi*n/(M-1)) - -def hamming(M): - """ - Return the Hamming window. - - The Hamming window is a taper formed by using a weighted cosine. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : ndarray - The window, normalized to one (the value one - appears only if the number of samples is odd). - - See Also - -------- - bartlett, blackman, hanning, kaiser - - Notes - ----- - The Hamming window is defined as - - .. math:: w(n) = 0.54 + 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) - \\qquad 0 \\leq n \\leq M-1 - - The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and - is described in Blackman and Tukey. It was recommended for smoothing the - truncated autocovariance function in the time domain. - Most references to the Hamming window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power - spectra, Dover Publications, New York. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The - University of Alberta Press, 1975, pp. 109-110. - .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 425. - - Examples - -------- - >>> np.hamming(12) - array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, - 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, - 0.15302337, 0.08 ]) - - Plot the window and the frequency response: - - >>> from numpy.fft import fft, fftshift - >>> import matplotlib.pyplot as plt - - >>> window = np.hamming(51) - >>> plt.plot(window) - [] - >>> plt.title("Hamming window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Hamming window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1,float) - n = arange(0,M) - return 0.54-0.46*cos(2.0*pi*n/(M-1)) - -## Code from cephes for i0 - -_i0A = [ --4.41534164647933937950E-18, - 3.33079451882223809783E-17, --2.43127984654795469359E-16, - 1.71539128555513303061E-15, --1.16853328779934516808E-14, - 7.67618549860493561688E-14, --4.85644678311192946090E-13, - 2.95505266312963983461E-12, --1.72682629144155570723E-11, - 9.67580903537323691224E-11, --5.18979560163526290666E-10, - 2.65982372468238665035E-9, --1.30002500998624804212E-8, - 6.04699502254191894932E-8, --2.67079385394061173391E-7, - 1.11738753912010371815E-6, --4.41673835845875056359E-6, - 1.64484480707288970893E-5, --5.75419501008210370398E-5, - 1.88502885095841655729E-4, --5.76375574538582365885E-4, - 1.63947561694133579842E-3, --4.32430999505057594430E-3, - 1.05464603945949983183E-2, --2.37374148058994688156E-2, - 4.93052842396707084878E-2, --9.49010970480476444210E-2, - 1.71620901522208775349E-1, --3.04682672343198398683E-1, - 6.76795274409476084995E-1] - -_i0B = [ --7.23318048787475395456E-18, --4.83050448594418207126E-18, - 4.46562142029675999901E-17, - 3.46122286769746109310E-17, --2.82762398051658348494E-16, --3.42548561967721913462E-16, - 1.77256013305652638360E-15, - 3.81168066935262242075E-15, --9.55484669882830764870E-15, --4.15056934728722208663E-14, - 1.54008621752140982691E-14, - 3.85277838274214270114E-13, - 7.18012445138366623367E-13, --1.79417853150680611778E-12, --1.32158118404477131188E-11, --3.14991652796324136454E-11, - 1.18891471078464383424E-11, - 4.94060238822496958910E-10, - 3.39623202570838634515E-9, - 2.26666899049817806459E-8, - 2.04891858946906374183E-7, - 2.89137052083475648297E-6, - 6.88975834691682398426E-5, - 3.36911647825569408990E-3, - 8.04490411014108831608E-1] - -def _chbevl(x, vals): - b0 = vals[0] - b1 = 0.0 - - for i in xrange(1,len(vals)): - b2 = b1 - b1 = b0 - b0 = x*b1 - b2 + vals[i] - - return 0.5*(b0 - b2) - -def _i0_1(x): - return exp(x) * _chbevl(x/2.0-2, _i0A) - -def _i0_2(x): - return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) - -def i0(x): - """ - Modified Bessel function of the first kind, order 0. - - Usually denoted :math:`I_0`. This function does broadcast, but will *not* - "up-cast" int dtype arguments unless accompanied by at least one float or - complex dtype argument (see Raises below). - - Parameters - ---------- - x : array_like, dtype float or complex - Argument of the Bessel function. - - Returns - ------- - out : ndarray, shape = x.shape, dtype = x.dtype - The modified Bessel function evaluated at each of the elements of `x`. - - Raises - ------ - TypeError: array cannot be safely cast to required type - If argument consists exclusively of int dtypes. - - See Also - -------- - scipy.special.iv, scipy.special.ive - - Notes - ----- - We use the algorithm published by Clenshaw [1]_ and referenced by - Abramowitz and Stegun [2]_, for which the function domain is partitioned - into the two intervals [0,8] and (8,inf), and Chebyshev polynomial - expansions are employed in each interval. Relative error on the domain - [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16 - with an rms of 1.4e-16 (n = 30000). - - References - ---------- - .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in - *National Physical Laboratory Mathematical Tables*, vol. 5, London: - Her Majesty's Stationery Office, 1962. - .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical - Functions*, 10th printing, New York: Dover, 1964, pp. 379. - http://www.math.sfu.ca/~cbm/aands/page_379.htm - .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html - - Examples - -------- - >>> np.i0([0.]) - array(1.0) - >>> np.i0([0., 1. + 2j]) - array([ 1.00000000+0.j , 0.18785373+0.64616944j]) - - """ - x = atleast_1d(x).copy() - y = empty_like(x) - ind = (x<0) - x[ind] = -x[ind] - ind = (x<=8.0) - y[ind] = _i0_1(x[ind]) - ind2 = ~ind - y[ind2] = _i0_2(x[ind2]) - return y.squeeze() - -## End of cephes code for i0 - -def kaiser(M,beta): - """ - Return the Kaiser window. - - The Kaiser window is a taper formed by using a Bessel function. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - beta : float - Shape parameter for window. - - Returns - ------- - out : array - The window, normalized to one (the value one - appears only if the number of samples is odd). - - See Also - -------- - bartlett, blackman, hamming, hanning - - Notes - ----- - The Kaiser window is defined as - - .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} - \\right)/I_0(\\beta) - - with - - .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, - - where :math:`I_0` is the modified zeroth-order Bessel function. - - The Kaiser was named for Jim Kaiser, who discovered a simple approximation - to the DPSS window based on Bessel functions. - The Kaiser window is a very good approximation to the Digital Prolate - Spheroidal Sequence, or Slepian window, which is the transform which - maximizes the energy in the main lobe of the window relative to total - energy. - - The Kaiser can approximate many other windows by varying the beta - parameter. - - ==== ======================= - beta Window shape - ==== ======================= - 0 Rectangular - 5 Similar to a Hamming - 6 Similar to a Hanning - 8.6 Similar to a Blackman - ==== ======================= - - A beta value of 14 is probably a good starting point. Note that as beta - gets large, the window narrows, and so the number of samples needs to be - large enough to sample the increasingly narrow spike, otherwise nans will - get returned. - - - Most references to the Kaiser window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by - digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. - John Wiley and Sons, New York, (1966). - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The - University of Alberta Press, 1975, pp. 177-178. - .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function - - Examples - -------- - >>> from numpy import kaiser - >>> kaiser(12, 14) - array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, - 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, - 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, - 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) - - - Plot the window and the frequency response: - - >>> from numpy import clip, log10, array, kaiser, linspace - >>> from numpy.fft import fft, fftshift - >>> import matplotlib.pyplot as plt - - >>> window = kaiser(51, 14) - >>> plt.plot(window) - [] - >>> plt.title("Kaiser window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = abs(fftshift(A)) - >>> freq = linspace(-0.5,0.5,len(A)) - >>> response = 20*log10(mag) - >>> response = clip(response,-100,100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Kaiser window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - from numpy.dual import i0 - if M == 1: - return np.array([1.]) - n = arange(0,M) - alpha = (M-1)/2.0 - return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) - -def sinc(x): - """ - Return the sinc function. - - The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. - - Parameters - ---------- - x : ndarray - Array (possibly multi-dimensional) of values for which to to - calculate ``sinc(x)``. - - Returns - ------- - out : ndarray - ``sinc(x)``, which has the same shape as the input. - - Notes - ----- - ``sinc(0)`` is the limit value 1. - - The name sinc is short for "sine cardinal" or "sinus cardinalis". - - The sinc function is used in various signal processing applications, - including in anti-aliasing, in the construction of a - Lanczos resampling filter, and in interpolation. - - For bandlimited interpolation of discrete-time signals, the ideal - interpolation kernel is proportional to the sinc function. - - References - ---------- - .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web - Resource. http://mathworld.wolfram.com/SincFunction.html - .. [2] Wikipedia, "Sinc function", - http://en.wikipedia.org/wiki/Sinc_function - - Examples - -------- - >>> x = np.arange(-20., 21.)/5. - >>> np.sinc(x) - array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, - -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, - 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, - 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, - -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, - 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, - 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, - 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, - 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, - -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, - -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, - 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, - -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, - -4.92362781e-02, -3.89804309e-17]) - - >>> import matplotlib.pyplot as plt - >>> plt.plot(x, np.sinc(x)) - [] - >>> plt.title("Sinc Function") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("X") - - >>> plt.show() - - It works in 2-D as well: - - >>> x = np.arange(-200., 201.)/50. - >>> xx = np.outer(x, x) - >>> plt.imshow(np.sinc(xx)) - - - """ - x = np.asanyarray(x) - y = pi* where(x == 0, 1.0e-20, x) - return sin(y)/y - -def msort(a): - """ - Return a copy of an array sorted along the first axis. - - Parameters - ---------- - a : array_like - Array to be sorted. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - sort - - Notes - ----- - ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. - - """ - b = array(a,subok=True,copy=True) - b.sort(0) - return b - -def median(a, axis=None, out=None, overwrite_input=False): - """ - Compute the median along the specified axis. - - Returns the median of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int, optional - Axis along which the medians are computed. The default (axis=None) - is to compute the median along a flattened version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool optional - If True, then allow use of memory of input array (a) for - calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. Note that, if `overwrite_input` is True and the input - is not already an ndarray, an error will be raised. - - Returns - ------- - median : ndarray - A new array holding the result (unless `out` is specified, in - which case that array is returned instead). If the input contains - integers, or floats of smaller precision than 64, then the output - data-type is float64. Otherwise, the output data-type is the same - as that of the input. - - See Also - -------- - mean, percentile - - Notes - ----- - Given a vector V of length N, the median of V is the middle value of - a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is - odd. When N is even, it is the average of the two middle values of - ``V_sorted``. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.median(a) - 3.5 - >>> np.median(a, axis=0) - array([ 6.5, 4.5, 2.5]) - >>> np.median(a, axis=1) - array([ 7., 2.]) - >>> m = np.median(a, axis=0) - >>> out = np.zeros_like(m) - >>> np.median(a, axis=0, out=m) - array([ 6.5, 4.5, 2.5]) - >>> m - array([ 6.5, 4.5, 2.5]) - >>> b = a.copy() - >>> np.median(b, axis=1, overwrite_input=True) - array([ 7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.median(b, axis=None, overwrite_input=True) - 3.5 - >>> assert not np.all(a==b) - - """ - if overwrite_input: - if axis is None: - sorted = a.ravel() - sorted.sort() - else: - a.sort(axis=axis) - sorted = a - else: - sorted = sort(a, axis=axis) - if sorted.shape == (): - # make 0-D arrays work - return sorted.item() - if axis is None: - axis = 0 - indexer = [slice(None)] * sorted.ndim - index = int(sorted.shape[axis]/2) - if sorted.shape[axis] % 2 == 1: - # index with slice to allow mean (below) to work - indexer[axis] = slice(index, index+1) - else: - indexer[axis] = slice(index-1, index+1) - # Use mean in odd and even case to coerce data type - # and check, use out array. - return mean(sorted[indexer], axis=axis, out=out) - -def percentile(a, q, axis=None, out=None, overwrite_input=False): - """ - Compute the qth percentile of the data along the specified axis. - - Returns the qth percentile of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - q : float in range of [0,100] (or sequence of floats) - Percentile to compute which must be between 0 and 100 inclusive. - axis : int, optional - Axis along which the percentiles are computed. The default (None) - is to compute the median along a flattened version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for - calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. - Default is False. Note that, if `overwrite_input` is True and the - input is not already an array, an error will be raised. - - Returns - ------- - pcntile : ndarray - A new array holding the result (unless `out` is specified, in - which case that array is returned instead). If the input contains - integers, or floats of smaller precision than 64, then the output - data-type is float64. Otherwise, the output data-type is the same - as that of the input. - - See Also - -------- - mean, median - - Notes - ----- - Given a vector V of length N, the qth percentile of V is the qth ranked - value in a sorted copy of V. A weighted average of the two nearest - neighbors is used if the normalized ranking does not match q exactly. - The same as the median if ``q=0.5``, the same as the minimum if ``q=0`` - and the same as the maximum if ``q=1``. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.percentile(a, 50) - 3.5 - >>> np.percentile(a, 0.5, axis=0) - array([ 6.5, 4.5, 2.5]) - >>> np.percentile(a, 50, axis=1) - array([ 7., 2.]) - - >>> m = np.percentile(a, 50, axis=0) - >>> out = np.zeros_like(m) - >>> np.percentile(a, 50, axis=0, out=m) - array([ 6.5, 4.5, 2.5]) - >>> m - array([ 6.5, 4.5, 2.5]) - - >>> b = a.copy() - >>> np.percentile(b, 50, axis=1, overwrite_input=True) - array([ 7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.percentile(b, 50, axis=None, overwrite_input=True) - 3.5 - - """ - a = np.asarray(a) - - if q == 0: - return a.min(axis=axis, out=out) - elif q == 100: - return a.max(axis=axis, out=out) - - if overwrite_input: - if axis is None: - sorted = a.ravel() - sorted.sort() - else: - a.sort(axis=axis) - sorted = a - else: - sorted = sort(a, axis=axis) - if axis is None: - axis = 0 - - return _compute_qth_percentile(sorted, q, axis, out) - -# handle sequence of q's without calling sort multiple times -def _compute_qth_percentile(sorted, q, axis, out): - if not isscalar(q): - p = [_compute_qth_percentile(sorted, qi, axis, None) - for qi in q] - - if out is not None: - out.flat = p - - return p - - q = q / 100.0 - if (q < 0) or (q > 1): - raise ValueError, "percentile must be either in the range [0,100]" - - indexer = [slice(None)] * sorted.ndim - Nx = sorted.shape[axis] - index = q*(Nx-1) - i = int(index) - if i == index: - indexer[axis] = slice(i, i+1) - weights = array(1) - sumval = 1.0 - else: - indexer[axis] = slice(i, i+2) - j = i + 1 - weights = array([(j - index), (index - i)],float) - wshape = [1]*sorted.ndim - wshape[axis] = 2 - weights.shape = wshape - sumval = weights.sum() - - # Use add.reduce in both cases to coerce data type as well as - # check and use out array. - return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval - -def trapz(y, x=None, dx=1.0, axis=-1): - """ - Integrate along the given axis using the composite trapezoidal rule. - - Integrate `y` (`x`) along given axis. - - Parameters - ---------- - y : array_like - Input array to integrate. - x : array_like, optional - If `x` is None, then spacing between all `y` elements is `dx`. - dx : scalar, optional - If `x` is None, spacing given by `dx` is assumed. Default is 1. - axis : int, optional - Specify the axis. - - Returns - ------- - out : float - Definite integral as approximated by trapezoidal rule. - - See Also - -------- - sum, cumsum - - Notes - ----- - Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will - be taken from `y` array, by default x-axis distances between points will be - 1.0, alternatively they can be provided with `x` array or with `dx` scalar. - Return value will be equal to combined area under the red lines. - - - References - ---------- - .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule - - .. [2] Illustration image: - http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png - - Examples - -------- - >>> np.trapz([1,2,3]) - 4.0 - >>> np.trapz([1,2,3], x=[4,6,8]) - 8.0 - >>> np.trapz([1,2,3], dx=2) - 8.0 - >>> a = np.arange(6).reshape(2, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.trapz(a, axis=0) - array([ 1.5, 2.5, 3.5]) - >>> np.trapz(a, axis=1) - array([ 2., 8.]) - - """ - y = asanyarray(y) - if x is None: - d = dx - else: - x = asanyarray(x) - if x.ndim == 1: - d = diff(x) - # reshape to correct shape - shape = [1]*y.ndim - shape[axis] = d.shape[0] - d = d.reshape(shape) - else: - d = diff(x, axis=axis) - nd = len(y.shape) - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd - slice1[axis] = slice(1,None) - slice2[axis] = slice(None,-1) - try: - ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis) - except ValueError: # Operations didn't work, cast to ndarray - d = np.asarray(d) - y = np.asarray(y) - ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) - return ret - -#always succeed -def add_newdoc(place, obj, doc): - """Adds documentation to obj which is in module place. - - If doc is a string add it to obj as a docstring - - If doc is a tuple, then the first element is interpreted as - an attribute of obj and the second as the docstring - (method, docstring) - - If doc is a list, then each element of the list should be a - sequence of length two --> [(method1, docstring1), - (method2, docstring2), ...] - - This routine never raises an error. - """ - try: - new = {} - exec 'from %s import %s' % (place, obj) in new - if isinstance(doc, str): - add_docstring(new[obj], doc.strip()) - elif isinstance(doc, tuple): - add_docstring(getattr(new[obj], doc[0]), doc[1].strip()) - elif isinstance(doc, list): - for val in doc: - add_docstring(getattr(new[obj], val[0]), val[1].strip()) - except: - pass - - -# From matplotlib -def meshgrid(x,y): - """ - Return coordinate matrices from two coordinate vectors. - - Parameters - ---------- - x, y : ndarray - Two 1-D arrays representing the x and y coordinates of a grid. - - Returns - ------- - X, Y : ndarray - For vectors `x`, `y` with lengths ``Nx=len(x)`` and ``Ny=len(y)``, - return `X`, `Y` where `X` and `Y` are ``(Ny, Nx)`` shaped arrays - with the elements of `x` and y repeated to fill the matrix along - the first dimension for `x`, the second for `y`. - - See Also - -------- - index_tricks.mgrid : Construct a multi-dimensional "meshgrid" - using indexing notation. - index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" - using indexing notation. - - Examples - -------- - >>> X, Y = np.meshgrid([1,2,3], [4,5,6,7]) - >>> X - array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3], - [1, 2, 3]]) - >>> Y - array([[4, 4, 4], - [5, 5, 5], - [6, 6, 6], - [7, 7, 7]]) - - `meshgrid` is very useful to evaluate functions on a grid. - - >>> x = np.arange(-5, 5, 0.1) - >>> y = np.arange(-5, 5, 0.1) - >>> xx, yy = np.meshgrid(x, y) - >>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2) - - """ - x = asarray(x) - y = asarray(y) - numRows, numCols = len(y), len(x) # yes, reversed - x = x.reshape(1,numCols) - X = x.repeat(numRows, axis=0) - - y = y.reshape(numRows,1) - Y = y.repeat(numCols, axis=1) - return X, Y - -def delete(arr, obj, axis=None): - """ - Return a new array with sub-arrays along an axis deleted. - - Parameters - ---------- - arr : array_like - Input array. - obj : slice, int or array of ints - Indicate which sub-arrays to remove. - axis : int, optional - The axis along which to delete the subarray defined by `obj`. - If `axis` is None, `obj` is applied to the flattened array. - - Returns - ------- - out : ndarray - A copy of `arr` with the elements specified by `obj` removed. Note - that `delete` does not occur in-place. If `axis` is None, `out` is - a flattened array. - - See Also - -------- - insert : Insert elements into an array. - append : Append elements at the end of an array. - - Examples - -------- - >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) - >>> arr - array([[ 1, 2, 3, 4], - [ 5, 6, 7, 8], - [ 9, 10, 11, 12]]) - >>> np.delete(arr, 1, 0) - array([[ 1, 2, 3, 4], - [ 9, 10, 11, 12]]) - - >>> np.delete(arr, np.s_[::2], 1) - array([[ 2, 4], - [ 6, 8], - [10, 12]]) - >>> np.delete(arr, [1,3,5], None) - array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) - - """ - wrap = None - if type(arr) is not ndarray: - try: - wrap = arr.__array_wrap__ - except AttributeError: - pass - - - arr = asarray(arr) - ndim = arr.ndim - if axis is None: - if ndim != 1: - arr = arr.ravel() - ndim = arr.ndim; - axis = ndim-1; - if ndim == 0: - if wrap: - return wrap(arr) - else: - return arr.copy() - slobj = [slice(None)]*ndim - N = arr.shape[axis] - newshape = list(arr.shape) - if isinstance(obj, (int, long, integer)): - if (obj < 0): obj += N - if (obj < 0 or obj >=N): - raise ValueError( - "invalid entry") - newshape[axis]-=1; - new = empty(newshape, arr.dtype, arr.flags.fnc) - slobj[axis] = slice(None, obj) - new[slobj] = arr[slobj] - slobj[axis] = slice(obj,None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(obj+1,None) - new[slobj] = arr[slobj2] - elif isinstance(obj, slice): - start, stop, step = obj.indices(N) - numtodel = len(xrange(start, stop, step)) - if numtodel <= 0: - if wrap: - return wrap(new) - else: - return arr.copy() - newshape[axis] -= numtodel - new = empty(newshape, arr.dtype, arr.flags.fnc) - # copy initial chunk - if start == 0: - pass - else: - slobj[axis] = slice(None, start) - new[slobj] = arr[slobj] - # copy end chunck - if stop == N: - pass - else: - slobj[axis] = slice(stop-numtodel,None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(stop, None) - new[slobj] = arr[slobj2] - # copy middle pieces - if step == 1: - pass - else: # use array indexing. - obj = arange(start, stop, step, dtype=intp) - all = arange(start, stop, dtype=intp) - obj = setdiff1d(all, obj) - slobj[axis] = slice(start, stop-numtodel) - slobj2 = [slice(None)]*ndim - slobj2[axis] = obj - new[slobj] = arr[slobj2] - else: # default behavior - obj = array(obj, dtype=intp, copy=0, ndmin=1) - all = arange(N, dtype=intp) - obj = setdiff1d(all, obj) - slobj[axis] = obj - new = arr[slobj] - if wrap: - return wrap(new) - else: - return new - -def insert(arr, obj, values, axis=None): - """ - Insert values along the given axis before the given indices. - - Parameters - ---------- - arr : array_like - Input array. - obj : int, slice or sequence of ints - Object that defines the index or indices before which `values` is - inserted. - values : array_like - Values to insert into `arr`. If the type of `values` is different - from that of `arr`, `values` is converted to the type of `arr`. - axis : int, optional - Axis along which to insert `values`. If `axis` is None then `arr` - is flattened first. - - Returns - ------- - out : ndarray - A copy of `arr` with `values` inserted. Note that `insert` - does not occur in-place: a new array is returned. If - `axis` is None, `out` is a flattened array. - - See Also - -------- - append : Append elements at the end of an array. - delete : Delete elements from an array. - - Examples - -------- - >>> a = np.array([[1, 1], [2, 2], [3, 3]]) - >>> a - array([[1, 1], - [2, 2], - [3, 3]]) - >>> np.insert(a, 1, 5) - array([1, 5, 1, 2, 2, 3, 3]) - >>> np.insert(a, 1, 5, axis=1) - array([[1, 5, 1], - [2, 5, 2], - [3, 5, 3]]) - - >>> b = a.flatten() - >>> b - array([1, 1, 2, 2, 3, 3]) - >>> np.insert(b, [2, 2], [5, 6]) - array([1, 1, 5, 6, 2, 2, 3, 3]) - - >>> np.insert(b, slice(2, 4), [5, 6]) - array([1, 1, 5, 2, 6, 2, 3, 3]) - - >>> np.insert(b, [2, 2], [7.13, False]) # type casting - array([1, 1, 7, 0, 2, 2, 3, 3]) - - >>> x = np.arange(8).reshape(2, 4) - >>> idx = (1, 3) - >>> np.insert(x, idx, 999, axis=1) - array([[ 0, 999, 1, 2, 999, 3], - [ 4, 999, 5, 6, 999, 7]]) - - """ - wrap = None - if type(arr) is not ndarray: - try: - wrap = arr.__array_wrap__ - except AttributeError: - pass - - arr = asarray(arr) - ndim = arr.ndim - if axis is None: - if ndim != 1: - arr = arr.ravel() - ndim = arr.ndim - axis = ndim-1 - if (ndim == 0): - arr = arr.copy() - arr[...] = values - if wrap: - return wrap(arr) - else: - return arr - slobj = [slice(None)]*ndim - N = arr.shape[axis] - newshape = list(arr.shape) - if isinstance(obj, (int, long, integer)): - if (obj < 0): obj += N - if obj < 0 or obj > N: - raise ValueError( - "index (%d) out of range (0<=index<=%d) "\ - "in dimension %d" % (obj, N, axis)) - newshape[axis] += 1; - new = empty(newshape, arr.dtype, arr.flags.fnc) - slobj[axis] = slice(None, obj) - new[slobj] = arr[slobj] - slobj[axis] = obj - new[slobj] = values - slobj[axis] = slice(obj+1,None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(obj,None) - new[slobj] = arr[slobj2] - if wrap: - return wrap(new) - return new - - elif isinstance(obj, slice): - # turn it into a range object - obj = arange(*obj.indices(N),**{'dtype':intp}) - - # get two sets of indices - # one is the indices which will hold the new stuff - # two is the indices where arr will be copied over - - obj = asarray(obj, dtype=intp) - numnew = len(obj) - index1 = obj + arange(numnew) - index2 = setdiff1d(arange(numnew+N),index1) - newshape[axis] += numnew - new = empty(newshape, arr.dtype, arr.flags.fnc) - slobj2 = [slice(None)]*ndim - slobj[axis] = index1 - slobj2[axis] = index2 - new[slobj] = values - new[slobj2] = arr - - if wrap: - return wrap(new) - return new - -def append(arr, values, axis=None): - """ - Append values to the end of an array. - - Parameters - ---------- - arr : array_like - Values are appended to a copy of this array. - values : array_like - These values are appended to a copy of `arr`. It must be of the - correct shape (the same shape as `arr`, excluding `axis`). If `axis` - is not specified, `values` can be any shape and will be flattened - before use. - axis : int, optional - The axis along which `values` are appended. If `axis` is not given, - both `arr` and `values` are flattened before use. - - Returns - ------- - out : ndarray - A copy of `arr` with `values` appended to `axis`. Note that `append` - does not occur in-place: a new array is allocated and filled. If - `axis` is None, `out` is a flattened array. - - See Also - -------- - insert : Insert elements into an array. - delete : Delete elements from an array. - - Examples - -------- - >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) - array([1, 2, 3, 4, 5, 6, 7, 8, 9]) - - When `axis` is specified, `values` must have the correct shape. - - >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) - array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) - Traceback (most recent call last): - ... - ValueError: arrays must have same number of dimensions - - """ - arr = asanyarray(arr) - if axis is None: - if arr.ndim != 1: - arr = arr.ravel() - values = ravel(values) - axis = arr.ndim-1 - return concatenate((arr, values), axis=axis) diff --git a/numpy-1.6.2/numpy/lib/index_tricks.py b/numpy-1.6.2/numpy/lib/index_tricks.py deleted file mode 100644 index 69539d4821..0000000000 --- a/numpy-1.6.2/numpy/lib/index_tricks.py +++ /dev/null @@ -1,832 +0,0 @@ -__all__ = ['ravel_multi_index', - 'unravel_index', - 'mgrid', - 'ogrid', - 'r_', 'c_', 's_', - 'index_exp', 'ix_', - 'ndenumerate','ndindex', - 'fill_diagonal','diag_indices','diag_indices_from'] - -import sys -import numpy.core.numeric as _nx -from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod, - arange ) -from numpy.core.numerictypes import find_common_type -import math - -import function_base -import numpy.matrixlib as matrix -from function_base import diff -from numpy.lib._compiled_base import ravel_multi_index, unravel_index -makemat = matrix.matrix - -def ix_(*args): - """ - Construct an open mesh from multiple sequences. - - This function takes N 1-D sequences and returns N outputs with N - dimensions each, such that the shape is 1 in all but one dimension - and the dimension with the non-unit shape value cycles through all - N dimensions. - - Using `ix_` one can quickly construct index arrays that will index - the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array - ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. - - Parameters - ---------- - args : 1-D sequences - - Returns - ------- - out : tuple of ndarrays - N arrays with N dimensions each, with N the number of input - sequences. Together these arrays form an open mesh. - - See Also - -------- - ogrid, mgrid, meshgrid - - Examples - -------- - >>> a = np.arange(10).reshape(2, 5) - >>> a - array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]]) - >>> ixgrid = np.ix_([0,1], [2,4]) - >>> ixgrid - (array([[0], - [1]]), array([[2, 4]])) - >>> ixgrid[0].shape, ixgrid[1].shape - ((2, 1), (1, 2)) - >>> a[ixgrid] - array([[2, 4], - [7, 9]]) - - """ - out = [] - nd = len(args) - baseshape = [1]*nd - for k in range(nd): - new = _nx.asarray(args[k]) - if (new.ndim != 1): - raise ValueError, "Cross index must be 1 dimensional" - if issubclass(new.dtype.type, _nx.bool_): - new = new.nonzero()[0] - baseshape[k] = len(new) - new = new.reshape(tuple(baseshape)) - out.append(new) - baseshape[k] = 1 - return tuple(out) - -class nd_grid(object): - """ - Construct a multi-dimensional "meshgrid". - - ``grid = nd_grid()`` creates an instance which will return a mesh-grid - when indexed. The dimension and number of the output arrays are equal - to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then the - integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - If instantiated with an argument of ``sparse=True``, the mesh-grid is - open (or not fleshed out) so that only one-dimension of each returned - argument is greater than 1. - - Parameters - ---------- - sparse : bool, optional - Whether the grid is sparse or not. Default is False. - - Notes - ----- - Two instances of `nd_grid` are made available in the NumPy namespace, - `mgrid` and `ogrid`:: - - mgrid = nd_grid(sparse=False) - ogrid = nd_grid(sparse=True) - - Users should use these pre-defined instances instead of using `nd_grid` - directly. - - Examples - -------- - >>> mgrid = np.lib.index_tricks.nd_grid() - >>> mgrid[0:5,0:5] - array([[[0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [2, 2, 2, 2, 2], - [3, 3, 3, 3, 3], - [4, 4, 4, 4, 4]], - [[0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4]]]) - >>> mgrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - - >>> ogrid = np.lib.index_tricks.nd_grid(sparse=True) - >>> ogrid[0:5,0:5] - [array([[0], - [1], - [2], - [3], - [4]]), array([[0, 1, 2, 3, 4]])] - - """ - def __init__(self, sparse=False): - self.sparse = sparse - def __getitem__(self,key): - try: - size = [] - typ = int - for k in range(len(key)): - step = key[k].step - start = key[k].start - if start is None: start=0 - if step is None: step=1 - if isinstance(step, complex): - size.append(int(abs(step))) - typ = float - else: - size.append(math.ceil((key[k].stop - start)/(step*1.0))) - if isinstance(step, float) or \ - isinstance(start, float) or \ - isinstance(key[k].stop, float): - typ = float - if self.sparse: - nn = map(lambda x,t: _nx.arange(x, dtype=t), size, \ - (typ,)*len(size)) - else: - nn = _nx.indices(size, typ) - for k in range(len(size)): - step = key[k].step - start = key[k].start - if start is None: start=0 - if step is None: step=1 - if isinstance(step, complex): - step = int(abs(step)) - if step != 1: - step = (key[k].stop - start)/float(step-1) - nn[k] = (nn[k]*step+start) - if self.sparse: - slobj = [_nx.newaxis]*len(size) - for k in range(len(size)): - slobj[k] = slice(None,None) - nn[k] = nn[k][slobj] - slobj[k] = _nx.newaxis - return nn - except (IndexError, TypeError): - step = key.step - stop = key.stop - start = key.start - if start is None: start = 0 - if isinstance(step, complex): - step = abs(step) - length = int(step) - if step != 1: - step = (key.stop-start)/float(step-1) - stop = key.stop+step - return _nx.arange(0, length,1, float)*step + start - else: - return _nx.arange(start, stop, step) - - def __getslice__(self,i,j): - return _nx.arange(i,j) - - def __len__(self): - return 0 - -mgrid = nd_grid(sparse=False) -ogrid = nd_grid(sparse=True) -mgrid.__doc__ = None # set in numpy.add_newdocs -ogrid.__doc__ = None # set in numpy.add_newdocs - -class AxisConcatenator(object): - """ - Translates slice objects to concatenation along an axis. - - For detailed documentation on usage, see `r_`. - - """ - def _retval(self, res): - if self.matrix: - oldndim = res.ndim - res = makemat(res) - if oldndim == 1 and self.col: - res = res.T - self.axis = self._axis - self.matrix = self._matrix - self.col = 0 - return res - - def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): - self._axis = axis - self._matrix = matrix - self.axis = axis - self.matrix = matrix - self.col = 0 - self.trans1d = trans1d - self.ndmin = ndmin - - def __getitem__(self,key): - trans1d = self.trans1d - ndmin = self.ndmin - if isinstance(key, str): - frame = sys._getframe().f_back - mymat = matrix.bmat(key,frame.f_globals,frame.f_locals) - return mymat - if type(key) is not tuple: - key = (key,) - objs = [] - scalars = [] - arraytypes = [] - scalartypes = [] - for k in range(len(key)): - scalar = False - if type(key[k]) is slice: - step = key[k].step - start = key[k].start - stop = key[k].stop - if start is None: start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - size = int(abs(step)) - newobj = function_base.linspace(start, stop, num=size) - else: - newobj = _nx.arange(start, stop, step) - if ndmin > 1: - newobj = array(newobj,copy=False,ndmin=ndmin) - if trans1d != -1: - newobj = newobj.swapaxes(-1,trans1d) - elif isinstance(key[k],str): - if k != 0: - raise ValueError, "special directives must be the"\ - "first entry." - key0 = key[0] - if key0 in 'rc': - self.matrix = True - self.col = (key0 == 'c') - continue - if ',' in key0: - vec = key0.split(',') - try: - self.axis, ndmin = \ - [int(x) for x in vec[:2]] - if len(vec) == 3: - trans1d = int(vec[2]) - continue - except: - raise ValueError, "unknown special directive" - try: - self.axis = int(key[k]) - continue - except (ValueError, TypeError): - raise ValueError, "unknown special directive" - elif type(key[k]) in ScalarType: - newobj = array(key[k],ndmin=ndmin) - scalars.append(k) - scalar = True - scalartypes.append(newobj.dtype) - else: - newobj = key[k] - if ndmin > 1: - tempobj = array(newobj, copy=False, subok=True) - newobj = array(newobj, copy=False, subok=True, - ndmin=ndmin) - if trans1d != -1 and tempobj.ndim < ndmin: - k2 = ndmin-tempobj.ndim - if (trans1d < 0): - trans1d += k2 + 1 - defaxes = range(ndmin) - k1 = trans1d - axes = defaxes[:k1] + defaxes[k2:] + \ - defaxes[k1:k2] - newobj = newobj.transpose(axes) - del tempobj - objs.append(newobj) - if not scalar and isinstance(newobj, _nx.ndarray): - arraytypes.append(newobj.dtype) - - # Esure that scalars won't up-cast unless warranted - final_dtype = find_common_type(arraytypes, scalartypes) - if final_dtype is not None: - for k in scalars: - objs[k] = objs[k].astype(final_dtype) - - res = _nx.concatenate(tuple(objs),axis=self.axis) - return self._retval(res) - - def __getslice__(self,i,j): - res = _nx.arange(i,j) - return self._retval(res) - - def __len__(self): - return 0 - -# separate classes are used here instead of just making r_ = concatentor(0), -# etc. because otherwise we couldn't get the doc string to come out right -# in help(r_) - -class RClass(AxisConcatenator): - """ - Translates slice objects to concatenation along the first axis. - - This is a simple way to build up arrays quickly. There are two use cases. - - 1. If the index expression contains comma separated arrays, then stack - them along their first axis. - 2. If the index expression contains slice notation or scalars then create - a 1-D array with a range indicated by the slice notation. - - If slice notation is used, the syntax ``start:stop:step`` is equivalent - to ``np.arange(start, stop, step)`` inside of the brackets. However, if - ``step`` is an imaginary number (i.e. 100j) then its integer portion is - interpreted as a number-of-points desired and the start and stop are - inclusive. In other words ``start:stop:stepj`` is interpreted as - ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. - After expansion of slice notation, all comma separated sequences are - concatenated together. - - Optional character strings placed as the first element of the index - expression can be used to change the output. The strings 'r' or 'c' result - in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) - matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 - (column) matrix is produced. If the result is 2-D then both provide the - same matrix result. - - A string integer specifies which axis to stack multiple comma separated - arrays along. A string of two comma-separated integers allows indication - of the minimum number of dimensions to force each entry into as the - second integer (the axis to concatenate along is still the first integer). - - A string with three comma-separated integers allows specification of the - axis to concatenate along, the minimum number of dimensions to force the - entries to, and which axis should contain the start of the arrays which - are less than the specified number of dimensions. In other words the third - integer allows you to specify where the 1's should be placed in the shape - of the arrays that have their shapes upgraded. By default, they are placed - in the front of the shape tuple. The third argument allows you to specify - where the start of the array should be instead. Thus, a third argument of - '0' would place the 1's at the end of the array shape. Negative integers - specify where in the new shape tuple the last dimension of upgraded arrays - should be placed, so the default is '-1'. - - Parameters - ---------- - Not a function, so takes no parameters - - - Returns - ------- - A concatenated ndarray or matrix. - - See Also - -------- - concatenate : Join a sequence of arrays together. - c_ : Translates slice objects to concatenation along the second axis. - - Examples - -------- - >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) - >>> np.r_[-1:1:6j, [0]*3, 5, 6] - array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) - - String integers specify the axis to concatenate along or the minimum - number of dimensions to force entries into. - - >>> a = np.array([[0, 1, 2], [3, 4, 5]]) - >>> np.r_['-1', a, a] # concatenate along last axis - array([[0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5]]) - >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 - array([[1, 2, 3], - [4, 5, 6]]) - - >>> np.r_['0,2,0', [1,2,3], [4,5,6]] - array([[1], - [2], - [3], - [4], - [5], - [6]]) - >>> np.r_['1,2,0', [1,2,3], [4,5,6]] - array([[1, 4], - [2, 5], - [3, 6]]) - - Using 'r' or 'c' as a first string argument creates a matrix. - - >>> np.r_['r',[1,2,3], [4,5,6]] - matrix([[1, 2, 3, 4, 5, 6]]) - - """ - def __init__(self): - AxisConcatenator.__init__(self, 0) - -r_ = RClass() - -class CClass(AxisConcatenator): - """ - Translates slice objects to concatenation along the second axis. - - This is short-hand for ``np.r_['-1,2,0', index expression]``, which is - useful because of its common occurrence. In particular, arrays will be - stacked along their last axis after being upgraded to at least 2-D with - 1's post-pended to the shape (column vectors made out of 1-D arrays). - - For detailed documentation, see `r_`. - - Examples - -------- - >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] - array([[1, 2, 3, 0, 0, 4, 5, 6]]) - - """ - def __init__(self): - AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) - -c_ = CClass() - -class ndenumerate(object): - """ - Multidimensional index iterator. - - Return an iterator yielding pairs of array coordinates and values. - - Parameters - ---------- - a : ndarray - Input array. - - See Also - -------- - ndindex, flatiter - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> for index, x in np.ndenumerate(a): - ... print index, x - (0, 0) 1 - (0, 1) 2 - (1, 0) 3 - (1, 1) 4 - - """ - def __init__(self, arr): - self.iter = asarray(arr).flat - - def next(self): - """ - Standard iterator method, returns the index tuple and array value. - - Returns - ------- - coords : tuple of ints - The indices of the current iteration. - val : scalar - The array element of the current iteration. - - """ - return self.iter.coords, self.iter.next() - - def __iter__(self): - return self - - -class ndindex(object): - """ - An N-dimensional iterator object to index arrays. - - Given the shape of an array, an `ndindex` instance iterates over - the N-dimensional index of the array. At each iteration a tuple - of indices is returned, the last dimension is iterated over first. - - Parameters - ---------- - `*args` : ints - The size of each dimension of the array. - - See Also - -------- - ndenumerate, flatiter - - Examples - -------- - >>> for index in np.ndindex(3, 2, 1): - ... print index - (0, 0, 0) - (0, 1, 0) - (1, 0, 0) - (1, 1, 0) - (2, 0, 0) - (2, 1, 0) - - """ - - def __init__(self, *args): - if len(args) == 1 and isinstance(args[0], tuple): - args = args[0] - self.nd = len(args) - self.ind = [0]*self.nd - self.index = 0 - self.maxvals = args - tot = 1 - for k in range(self.nd): - tot *= args[k] - self.total = tot - - def _incrementone(self, axis): - if (axis < 0): # base case - return - if (self.ind[axis] < self.maxvals[axis]-1): - self.ind[axis] += 1 - else: - self.ind[axis] = 0 - self._incrementone(axis-1) - - def ndincr(self): - """ - Increment the multi-dimensional index by one. - - `ndincr` takes care of the "wrapping around" of the axes. - It is called by `ndindex.next` and not normally used directly. - - """ - self._incrementone(self.nd-1) - - def next(self): - """ - Standard iterator method, updates the index and returns the index tuple. - - Returns - ------- - val : tuple of ints - Returns a tuple containing the indices of the current iteration. - - """ - if (self.index >= self.total): - raise StopIteration - val = tuple(self.ind) - self.index += 1 - self.ndincr() - return val - - def __iter__(self): - return self - - - - -# You can do all this with slice() plus a few special objects, -# but there's a lot to remember. This version is simpler because -# it uses the standard array indexing syntax. -# -# Written by Konrad Hinsen -# last revision: 1999-7-23 -# -# Cosmetic changes by T. Oliphant 2001 -# -# - -class IndexExpression(object): - """ - A nicer way to build up index tuples for arrays. - - .. note:: - Use one of the two predefined instances `index_exp` or `s_` - rather than directly using `IndexExpression`. - - For any index combination, including slicing and axis insertion, - ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any - array `a`. However, ``np.index_exp[indices]`` can be used anywhere - in Python code and returns a tuple of slice objects that can be - used in the construction of complex index expressions. - - Parameters - ---------- - maketuple : bool - If True, always returns a tuple. - - See Also - -------- - index_exp : Predefined instance that always returns a tuple: - `index_exp = IndexExpression(maketuple=True)`. - s_ : Predefined instance without tuple conversion: - `s_ = IndexExpression(maketuple=False)`. - - Notes - ----- - You can do all this with `slice()` plus a few special objects, - but there's a lot to remember and this version is simpler because - it uses the standard array indexing syntax. - - Examples - -------- - >>> np.s_[2::2] - slice(2, None, 2) - >>> np.index_exp[2::2] - (slice(2, None, 2),) - - >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] - array([2, 4]) - - """ - def __init__(self, maketuple): - self.maketuple = maketuple - - def __getitem__(self, item): - if self.maketuple and type(item) != tuple: - return (item,) - else: - return item - -index_exp = IndexExpression(maketuple=True) -s_ = IndexExpression(maketuple=False) - -# End contribution from Konrad. - - -# The following functions complement those in twodim_base, but are -# applicable to N-dimensions. - -def fill_diagonal(a, val): - """ - Fill the main diagonal of the given array of any dimensionality. - - For an array `a` with ``a.ndim > 2``, the diagonal is the list of - locations with indices ``a[i, i, ..., i]`` all identical. This function - modifies the input array in-place, it does not return a value. - - Parameters - ---------- - a : array, at least 2-D. - Array whose diagonal is to be filled, it gets modified in-place. - - val : scalar - Value to be written on the diagonal, its type must be compatible with - that of the array a. - - See also - -------- - diag_indices, diag_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - - This functionality can be obtained via `diag_indices`, but internally - this version uses a much faster implementation that never constructs the - indices and uses simple slicing. - - Examples - -------- - >>> a = np.zeros((3, 3), int) - >>> np.fill_diagonal(a, 5) - >>> a - array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5]]) - - The same function can operate on a 4-D array: - - >>> a = np.zeros((3, 3, 3, 3), int) - >>> np.fill_diagonal(a, 4) - - We only show a few blocks for clarity: - - >>> a[0, 0] - array([[4, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - >>> a[1, 1] - array([[0, 0, 0], - [0, 4, 0], - [0, 0, 0]]) - >>> a[2, 2] - array([[0, 0, 0], - [0, 0, 0], - [0, 0, 4]]) - - """ - if a.ndim < 2: - raise ValueError("array must be at least 2-d") - if a.ndim == 2: - # Explicit, fast formula for the common case. For 2-d arrays, we - # accept rectangular ones. - step = a.shape[1] + 1 - else: - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - if not alltrue(diff(a.shape)==0): - raise ValueError("All dimensions of input must be of equal length") - step = 1 + (cumprod(a.shape[:-1])).sum() - - # Write the value out into the diagonal. - a.flat[::step] = val - - -def diag_indices(n, ndim=2): - """ - Return the indices to access the main diagonal of an array. - - This returns a tuple of indices that can be used to access the main - diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape - (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for - ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` - for ``i = [0..n-1]``. - - Parameters - ---------- - n : int - The size, along each dimension, of the arrays for which the returned - indices can be used. - - ndim : int, optional - The number of dimensions. - - See also - -------- - diag_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Create a set of indices to access the diagonal of a (4, 4) array: - - >>> di = np.diag_indices(4) - >>> di - (array([0, 1, 2, 3]), array([0, 1, 2, 3])) - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - >>> a[di] = 100 - >>> a - array([[100, 1, 2, 3], - [ 4, 100, 6, 7], - [ 8, 9, 100, 11], - [ 12, 13, 14, 100]]) - - Now, we create indices to manipulate a 3-D array: - - >>> d3 = np.diag_indices(2, 3) - >>> d3 - (array([0, 1]), array([0, 1]), array([0, 1])) - - And use it to set the diagonal of an array of zeros to 1: - - >>> a = np.zeros((2, 2, 2), dtype=np.int) - >>> a[d3] = 1 - >>> a - array([[[1, 0], - [0, 0]], - [[0, 0], - [0, 1]]]) - - """ - idx = arange(n) - return (idx,) * ndim - - -def diag_indices_from(arr): - """ - Return the indices to access the main diagonal of an n-dimensional array. - - See `diag_indices` for full details. - - Parameters - ---------- - arr : array, at least 2-D - - See Also - -------- - diag_indices - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - - if not arr.ndim >= 2: - raise ValueError("input array must be at least 2-d") - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - if not alltrue(diff(arr.shape) == 0): - raise ValueError("All dimensions of input must be of equal length") - - return diag_indices(arr.shape[0], arr.ndim) diff --git a/numpy-1.6.2/numpy/lib/info.py b/numpy-1.6.2/numpy/lib/info.py deleted file mode 100644 index 4a781a2ca4..0000000000 --- a/numpy-1.6.2/numpy/lib/info.py +++ /dev/null @@ -1,150 +0,0 @@ -""" -Basic functions used by several sub-packages and -useful to have in the main name-space. - -Type Handling -------------- -================ =================== -iscomplexobj Test for complex object, scalar result -isrealobj Test for real object, scalar result -iscomplex Test for complex elements, array result -isreal Test for real elements, array result -imag Imaginary part -real Real part -real_if_close Turns complex number with tiny imaginary part to real -isneginf Tests for negative infinity, array result -isposinf Tests for positive infinity, array result -isnan Tests for nans, array result -isinf Tests for infinity, array result -isfinite Tests for finite numbers, array result -isscalar True if argument is a scalar -nan_to_num Replaces NaN's with 0 and infinities with large numbers -cast Dictionary of functions to force cast to each type -common_type Determine the minimum common type code for a group - of arrays -mintypecode Return minimal allowed common typecode. -================ =================== - -Index Tricks ------------- -================ =================== -mgrid Method which allows easy construction of N-d - 'mesh-grids' -``r_`` Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends rows. -index_exp Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. -================ =================== - -Useful Functions ----------------- -================ =================== -select Extension of where to multiple conditions and choices -extract Extract 1d array from flattened array according to mask -insert Insert 1d array of values into Nd array according to mask -linspace Evenly spaced samples in linear space -logspace Evenly spaced samples in logarithmic space -fix Round x to nearest integer towards zero -mod Modulo mod(x,y) = x % y except keeps sign of y -amax Array maximum along axis -amin Array minimum along axis -ptp Array max-min along axis -cumsum Cumulative sum along axis -prod Product of elements along axis -cumprod Cumluative product along axis -diff Discrete differences along axis -angle Returns angle of complex argument -unwrap Unwrap phase along given axis (1-d algorithm) -sort_complex Sort a complex-array (based on real, then imaginary) -trim_zeros Trim the leading and trailing zeros from 1D array. -vectorize A class that wraps a Python function taking scalar - arguments into a generalized function which can handle - arrays of arguments using the broadcast rules of - numerix Python. -================ =================== - -Shape Manipulation ------------------- -================ =================== -squeeze Return a with length-one dimensions removed. -atleast_1d Force arrays to be > 1D -atleast_2d Force arrays to be > 2D -atleast_3d Force arrays to be > 3D -vstack Stack arrays vertically (row on row) -hstack Stack arrays horizontally (column on column) -column_stack Stack 1D arrays as columns into 2D array -dstack Stack arrays depthwise (along third dimension) -split Divide array into a list of sub-arrays -hsplit Split into columns -vsplit Split into rows -dsplit Split along third dimension -================ =================== - -Matrix (2D Array) Manipulations -------------------------------- -================ =================== -fliplr 2D array with columns flipped -flipud 2D array with rows flipped -rot90 Rotate a 2D array a multiple of 90 degrees -eye Return a 2D array with ones down a given diagonal -diag Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat Construct a Matrix -bmat Build a Matrix from blocks -================ =================== - -Polynomials ------------ -================ =================== -poly1d A one-dimensional polynomial class -poly Return polynomial coefficients from roots -roots Find roots of polynomial given coefficients -polyint Integrate polynomial -polyder Differentiate polynomial -polyadd Add polynomials -polysub Substract polynomials -polymul Multiply polynomials -polydiv Divide polynomials -polyval Evaluate polynomial at given argument -================ =================== - -Import Tricks -------------- -================ =================== -ppimport Postpone module import until trying to use it -ppimport_attr Postpone module import until trying to use its attribute -ppresolve Import postponed module and return it. -================ =================== - -Machine Arithmetics -------------------- -================ =================== -machar_single Single precision floating point arithmetic parameters -machar_double Double precision floating point arithmetic parameters -================ =================== - -Threading Tricks ----------------- -================ =================== -ParallelExec Execute commands in parallel thread. -================ =================== - -1D Array Set Operations ------------------------ -Set operations for 1D numeric arrays based on sort() function. - -================ =================== -ediff1d Array difference (auxiliary function). -unique Unique elements of an array. -intersect1d Intersection of 1D arrays with unique elements. -setxor1d Set exclusive-or of 1D arrays with unique elements. -in1d Test whether elements in a 1D array are also present in - another array. -union1d Union of 1D arrays with unique elements. -setdiff1d Set difference of 1D arrays with unique elements. -================ =================== - -""" - -depends = ['core','testing'] -global_symbols = ['*'] diff --git a/numpy-1.6.2/numpy/lib/npyio.py b/numpy-1.6.2/numpy/lib/npyio.py deleted file mode 100644 index 9177d5f2af..0000000000 --- a/numpy-1.6.2/numpy/lib/npyio.py +++ /dev/null @@ -1,1818 +0,0 @@ -__all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', - 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', - 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'] - -import numpy as np -import format -import sys -import os -import sys -import itertools -import warnings -import weakref -from operator import itemgetter - -from cPickle import load as _cload, loads -from _datasource import DataSource -from _compiled_base import packbits, unpackbits - -from _iotools import LineSplitter, NameValidator, StringConverter, \ - ConverterError, ConverterLockError, ConversionWarning, \ - _is_string_like, has_nested_fields, flatten_dtype, \ - easy_dtype, _bytes_to_name - -from numpy.compat import asbytes, asstr, asbytes_nested, bytes - -if sys.version_info[0] >= 3: - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - -_string_like = _is_string_like - -def seek_gzip_factory(f): - """Use this factory to produce the class so that we can do a lazy - import on gzip. - - """ - import gzip - - class GzipFile(gzip.GzipFile): - - def seek(self, offset, whence=0): - # figure out new position (we can only seek forwards) - if whence == 1: - offset = self.offset + offset - - if whence not in [0, 1]: - raise IOError, "Illegal argument" - - if offset < self.offset: - # for negative seek, rewind and do positive seek - self.rewind() - count = offset - self.offset - for i in range(count // 1024): - self.read(1024) - self.read(count % 1024) - - def tell(self): - return self.offset - - - if isinstance(f, str): - f = GzipFile(f) - elif isinstance(f, gzip.GzipFile): - # cast to our GzipFile if its already a gzip.GzipFile - - try: - name = f.name - except AttributeError: - # Backward compatibility for <= 2.5 - name = f.filename - mode = f.mode - - f = GzipFile(fileobj=f.fileobj, filename=name) - f.mode = mode - - return f - - - -class BagObj(object): - """ - BagObj(obj) - - Convert attribute look-ups to getitems on the object passed in. - - Parameters - ---------- - obj : class instance - Object on which attribute look-up is performed. - - Examples - -------- - >>> from numpy.lib.npyio import BagObj as BO - >>> class BagDemo(object): - ... def __getitem__(self, key): # An instance of BagObj(BagDemo) - ... # will call this method when any - ... # attribute look-up is required - ... result = "Doesn't matter what you want, " - ... return result + "you're gonna get this" - ... - >>> demo_obj = BagDemo() - >>> bagobj = BO(demo_obj) - >>> bagobj.hello_there - "Doesn't matter what you want, you're gonna get this" - >>> bagobj.I_can_be_anything - "Doesn't matter what you want, you're gonna get this" - - """ - def __init__(self, obj): - # Use weakref to make NpzFile objects collectable by refcount - self._obj = weakref.proxy(obj) - def __getattribute__(self, key): - try: - return object.__getattribute__(self, '_obj')[key] - except KeyError: - raise AttributeError, key - -def zipfile_factory(*args, **kwargs): - import zipfile - if sys.version_info >= (2, 5): - kwargs['allowZip64'] = True - return zipfile.ZipFile(*args, **kwargs) - -class NpzFile(object): - """ - NpzFile(fid) - - A dictionary-like object with lazy-loading of files in the zipped - archive provided on construction. - - `NpzFile` is used to load files in the NumPy ``.npz`` data archive - format. It assumes that files in the archive have a ".npy" extension, - other files are ignored. - - The arrays and file strings are lazily loaded on either - getitem access using ``obj['key']`` or attribute lookup using - ``obj.f.key``. A list of all files (without ".npy" extensions) can - be obtained with ``obj.files`` and the ZipFile object itself using - ``obj.zip``. - - Attributes - ---------- - files : list of str - List of all files in the archive with a ".npy" extension. - zip : ZipFile instance - The ZipFile object initialized with the zipped archive. - f : BagObj instance - An object on which attribute can be performed as an alternative - to getitem access on the `NpzFile` instance itself. - - Parameters - ---------- - fid : file or str - The zipped archive to open. This is either a file-like object - or a string containing the path to the archive. - own_fid : bool, optional - Whether NpzFile should close the file handle. - Requires that `fid` is a file-like object. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - >>> x = np.arange(10) - >>> y = np.sin(x) - >>> np.savez(outfile, x=x, y=y) - >>> outfile.seek(0) - - >>> npz = np.load(outfile) - >>> isinstance(npz, np.lib.io.NpzFile) - True - >>> npz.files - ['y', 'x'] - >>> npz['x'] # getitem access - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> npz.f.x # attribute lookup - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - """ - def __init__(self, fid, own_fid=False): - # Import is postponed to here since zipfile depends on gzip, an optional - # component of the so-called standard library. - _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) - self.zip = _zip - self.f = BagObj(self) - if own_fid: - self.fid = fid - else: - self.fid = None - - def close(self): - """ - Close the file. - - """ - if self.zip is not None: - self.zip.close() - self.zip = None - if self.fid is not None: - self.fid.close() - self.fid = None - self.f = None # break reference cycle - - def __del__(self): - self.close() - - def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = 0 - if key in self._files: - member = 1 - elif key in self.files: - member = 1 - key += '.npy' - if member: - bytes = self.zip.read(key) - if bytes.startswith(format.MAGIC_PREFIX): - value = BytesIO(bytes) - return format.read_array(value) - else: - return bytes - else: - raise KeyError, "%s is not a file in the archive" % key - - - def __iter__(self): - return iter(self.files) - - def items(self): - """ - Return a list of tuples, with each tuple (filename, array in file). - - """ - return [(f, self[f]) for f in self.files] - - def iteritems(self): - """Generator that returns tuples (filename, array in file).""" - for f in self.files: - yield (f, self[f]) - - def keys(self): - """Return files in the archive with a ".npy" extension.""" - return self.files - - def iterkeys(self): - """Return an iterator over the files in the archive.""" - return self.__iter__() - - def __contains__(self, key): - return self.files.__contains__(key) - - -def load(file, mmap_mode=None): - """ - Load a pickled, ``.npy``, or ``.npz`` binary file. - - Parameters - ---------- - file : file-like object or string - The file to read. It must support ``seek()`` and ``read()`` methods. - If the filename extension is ``.gz``, the file is first decompressed. - mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional - If not None, then memory-map the file, using the given mode - (see `numpy.memmap`). The mode has no effect for pickled or - zipped files. - A memory-mapped array is stored on disk, and not directly loaded - into memory. However, it can be accessed and sliced like any - ndarray. Memory mapping is especially useful for accessing - small fragments of large files without reading the entire file - into memory. - - Returns - ------- - result : array, tuple, dict, etc. - Data stored in the file. - - Raises - ------ - IOError - If the input file does not exist or cannot be read. - - See Also - -------- - save, savez, loadtxt - memmap : Create a memory-map to an array stored in a file on disk. - - Notes - ----- - - If the file contains pickle data, then whatever is stored in the - pickle is returned. - - If the file is a ``.npy`` file, then an array is returned. - - If the file is a ``.npz`` file, then a dictionary-like object is - returned, containing ``{filename: array}`` key-value pairs, one for - each file in the archive. - - Examples - -------- - Store data to disk, and load it again: - - >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) - >>> np.load('/tmp/123.npy') - array([[1, 2, 3], - [4, 5, 6]]) - - Mem-map the stored array, and then access the second row - directly from disk: - - >>> X = np.load('/tmp/123.npy', mmap_mode='r') - >>> X[1, :] - memmap([4, 5, 6]) - - """ - import gzip - - own_fid = False - if isinstance(file, basestring): - fid = open(file, "rb") - own_fid = True - elif isinstance(file, gzip.GzipFile): - fid = seek_gzip_factory(file) - own_fid = True - else: - fid = file - - try: - # Code to distinguish from NumPy binary files and pickles. - _ZIP_PREFIX = asbytes('PK\x03\x04') - N = len(format.MAGIC_PREFIX) - magic = fid.read(N) - fid.seek(-N, 1) # back-up - if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz) - own_fid = False - return NpzFile(fid, own_fid=True) - elif magic == format.MAGIC_PREFIX: # .npy file - if mmap_mode: - return format.open_memmap(file, mode=mmap_mode) - else: - return format.read_array(fid) - else: # Try a pickle - try: - return _cload(fid) - except: - raise IOError, \ - "Failed to interpret file %s as a pickle" % repr(file) - finally: - if own_fid: - fid.close() - -def save(file, arr): - """ - Save an array to a binary file in NumPy ``.npy`` format. - - Parameters - ---------- - file : file or str - File or filename to which the data is saved. If file is a file-object, - then the filename is unchanged. If file is a string, a ``.npy`` - extension will be appended to the file name if it does not already - have one. - arr : array_like - Array data to be saved. - - See Also - -------- - savez : Save several arrays into a ``.npz`` archive - savetxt, load - - Notes - ----- - For a description of the ``.npy`` format, see `format`. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - - >>> x = np.arange(10) - >>> np.save(outfile, x) - - >>> outfile.seek(0) # Only needed here to simulate closing & reopening file - >>> np.load(outfile) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - """ - own_fid = False - if isinstance(file, basestring): - if not file.endswith('.npy'): - file = file + '.npy' - fid = open(file, "wb") - own_fid = True - else: - fid = file - - try: - arr = np.asanyarray(arr) - format.write_array(fid, arr) - finally: - if own_fid: - fid.close() - -def savez(file, *args, **kwds): - """ - Save several arrays into a single file in uncompressed ``.npz`` format. - - If arguments are passed in with no keywords, the corresponding variable - names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments - are given, the corresponding variable names, in the ``.npz`` file will - match the keyword names. - - Parameters - ---------- - file : str or file - Either the file name (string) or an open file (file-like object) - where the data will be saved. If file is a string, the ``.npz`` - extension will be appended to the file name if it is not already there. - *args : Arguments, optional - Arrays to save to the file. Since it is not possible for Python to - know the names of the arrays outside `savez`, the arrays will be saved - with names "arr_0", "arr_1", and so on. These arguments can be any - expression. - **kwds : Keyword arguments, optional - Arrays to save to the file. Arrays will be saved in the file with the - keyword names. - - Returns - ------- - None - - See Also - -------- - save : Save a single array to a binary file in NumPy format. - savetxt : Save an array to a file as plain text. - - Notes - ----- - The ``.npz`` file format is a zipped archive of files named after the - variables they contain. The archive is not compressed and each file - in the archive contains one variable in ``.npy`` format. For a - description of the ``.npy`` format, see `format`. - - When opening the saved ``.npz`` file with `load` a `NpzFile` object is - returned. This is a dictionary-like object which can be queried for - its list of arrays (with the ``.files`` attribute), and for the arrays - themselves. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - >>> x = np.arange(10) - >>> y = np.sin(x) - - Using `savez` with *args, the arrays are saved with default names. - - >>> np.savez(outfile, x, y) - >>> outfile.seek(0) # Only needed here to simulate closing & reopening file - >>> npzfile = np.load(outfile) - >>> npzfile.files - ['arr_1', 'arr_0'] - >>> npzfile['arr_0'] - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - Using `savez` with **kwds, the arrays are saved with the keyword names. - - >>> outfile = TemporaryFile() - >>> np.savez(outfile, x=x, y=y) - >>> outfile.seek(0) - >>> npzfile = np.load(outfile) - >>> npzfile.files - ['y', 'x'] - >>> npzfile['x'] - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - See Also - -------- - numpy.savez_compressed : Save several arrays into a compressed .npz file format - - """ - _savez(file, args, kwds, False) - -def savez_compressed(file, *args, **kwds): - """ - Save several arrays into a single file in compressed ``.npz`` format. - - If keyword arguments are given, then filenames are taken from the keywords. - If arguments are passed in with no keywords, then stored file names are - arr_0, arr_1, etc. - - Parameters - ---------- - file : str - File name of .npz file. - args : Arguments - Function arguments. - kwds : Keyword arguments - Keywords. - - See Also - -------- - numpy.savez : Save several arrays into an uncompressed .npz file format - - """ - _savez(file, args, kwds, True) - -def _savez(file, args, kwds, compress): - # Import is postponed to here since zipfile depends on gzip, an optional - # component of the so-called standard library. - import zipfile - # Import deferred for startup time improvement - import tempfile - - if isinstance(file, basestring): - if not file.endswith('.npz'): - file = file + '.npz' - - namedict = kwds - for i, val in enumerate(args): - key = 'arr_%d' % i - if key in namedict.keys(): - raise ValueError, "Cannot use un-named variables and keyword %s" % key - namedict[key] = val - - if compress: - compression = zipfile.ZIP_DEFLATED - else: - compression = zipfile.ZIP_STORED - - zip = zipfile_factory(file, mode="w", compression=compression) - - # Stage arrays in a temporary file on disk, before writing to zip. - fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy') - os.close(fd) - try: - for key, val in namedict.iteritems(): - fname = key + '.npy' - fid = open(tmpfile, 'wb') - try: - format.write_array(fid, np.asanyarray(val)) - fid.close() - fid = None - zip.write(tmpfile, arcname=fname) - finally: - if fid: - fid.close() - finally: - os.remove(tmpfile) - - zip.close() - -# Adapted from matplotlib - -def _getconv(dtype): - typ = dtype.type - if issubclass(typ, np.bool_): - return lambda x: bool(int(x)) - if issubclass(typ, np.uint64): - return np.uint64 - if issubclass(typ, np.int64): - return np.int64 - if issubclass(typ, np.integer): - return lambda x: int(float(x)) - elif issubclass(typ, np.floating): - return float - elif issubclass(typ, np.complex): - return complex - elif issubclass(typ, np.bytes_): - return bytes - else: - return str - - - -def loadtxt(fname, dtype=float, comments='#', delimiter=None, - converters=None, skiprows=0, usecols=None, unpack=False, - ndmin=0): - """ - Load data from a text file. - - Each row in the text file must have the same number of values. - - Parameters - ---------- - fname : file or str - File, filename, or generator to read. If the filename extension is - ``.gz`` or ``.bz2``, the file is first decompressed. Note that - generators should return byte strings for Python 3k. - dtype : data-type, optional - Data-type of the resulting array; default: float. If this is a - record data-type, the resulting array will be 1-dimensional, and - each row will be interpreted as an element of the array. In this - case, the number of columns used must match the number of fields in - the data-type. - comments : str, optional - The character used to indicate the start of a comment; - default: '#'. - delimiter : str, optional - The string used to separate values. By default, this is any - whitespace. - converters : dict, optional - A dictionary mapping column number to a function that will convert - that column to a float. E.g., if column 0 is a date string: - ``converters = {0: datestr2num}``. Converters can also be used to - provide a default value for missing data (but see also `genfromtxt`): - ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. - skiprows : int, optional - Skip the first `skiprows` lines; default: 0. - usecols : sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. - The default, None, results in all columns being read. - unpack : bool, optional - If True, the returned array is transposed, so that arguments may be - unpacked using ``x, y, z = loadtxt(...)``. When used with a record - data-type, arrays are returned for each field. Default is False. - ndmin : int, optional - The returned array will have at least `ndmin` dimensions. - Otherwise mono-dimensional axes will be squeezed. - Legal values: 0 (default), 1 or 2. - .. versionadded:: 1.6.0 - - Returns - ------- - out : ndarray - Data read from the text file. - - See Also - -------- - load, fromstring, fromregex - genfromtxt : Load data with missing values handled as specified. - scipy.io.loadmat : reads MATLAB data files - - Notes - ----- - This function aims to be a fast reader for simply formatted files. The - `genfromtxt` function provides more sophisticated handling of, e.g., - lines with missing values. - - Examples - -------- - >>> from StringIO import StringIO # StringIO behaves like a file object - >>> c = StringIO("0 1\\n2 3") - >>> np.loadtxt(c) - array([[ 0., 1.], - [ 2., 3.]]) - - >>> d = StringIO("M 21 72\\nF 35 58") - >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), - ... 'formats': ('S1', 'i4', 'f4')}) - array([('M', 21, 72.0), ('F', 35, 58.0)], - dtype=[('gender', '|S1'), ('age', '>> c = StringIO("1,0,2\\n3,0,4") - >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) - >>> x - array([ 1., 3.]) - >>> y - array([ 2., 4.]) - - """ - # Type conversions for Py3 convenience - comments = asbytes(comments) - user_converters = converters - if delimiter is not None: - delimiter = asbytes(delimiter) - if usecols is not None: - usecols = list(usecols) - - fown = False - try: - if _is_string_like(fname): - fown = True - if fname.endswith('.gz'): - fh = iter(seek_gzip_factory(fname)) - elif fname.endswith('.bz2'): - import bz2 - fh = iter(bz2.BZ2File(fname)) - else: - fh = iter(open(fname, 'U')) - else: - fh = iter(fname) - except TypeError: - raise ValueError('fname must be a string, file handle, or generator') - X = [] - - def flatten_dtype(dt): - """Unpack a structured data-type, and produce re-packing info.""" - if dt.names is None: - # If the dtype is flattened, return. - # If the dtype has a shape, the dtype occurs - # in the list more than once. - shape = dt.shape - if len(shape) == 0: - return ([dt.base], None) - else: - packing = [(shape[-1], list)] - if len(shape) > 1: - for dim in dt.shape[-2::-1]: - packing = [(dim*packing[0][0], packing*dim)] - return ([dt.base] * int(np.prod(dt.shape)), packing) - else: - types = [] - packing = [] - for field in dt.names: - tp, bytes = dt.fields[field] - flat_dt, flat_packing = flatten_dtype(tp) - types.extend(flat_dt) - # Avoid extra nesting for subarrays - if len(tp.shape) > 0: - packing.extend(flat_packing) - else: - packing.append((len(flat_dt), flat_packing)) - return (types, packing) - - def pack_items(items, packing): - """Pack items into nested lists based on re-packing info.""" - if packing == None: - return items[0] - elif packing is tuple: - return tuple(items) - elif packing is list: - return list(items) - else: - start = 0 - ret = [] - for length, subpacking in packing: - ret.append(pack_items(items[start:start+length], subpacking)) - start += length - return tuple(ret) - - def split_line(line): - """Chop off comments, strip, and split at delimiter.""" - line = asbytes(line).split(comments)[0].strip(asbytes('\r\n')) - if line: - return line.split(delimiter) - else: - return [] - - try: - # Make sure we're dealing with a proper dtype - dtype = np.dtype(dtype) - defconv = _getconv(dtype) - - # Skip the first `skiprows` lines - for i in xrange(skiprows): - fh.next() - - # Read until we find a line with some values, and use - # it to estimate the number of columns, N. - first_vals = None - try: - while not first_vals: - first_line = fh.next() - first_vals = split_line(first_line) - except StopIteration: - # End of lines reached - first_line = '' - first_vals = [] - warnings.warn('loadtxt: Empty input file: "%s"' % fname) - N = len(usecols or first_vals) - - dtype_types, packing = flatten_dtype(dtype) - if len(dtype_types) > 1: - # We're dealing with a structured array, each field of - # the dtype matches a column - converters = [_getconv(dt) for dt in dtype_types] - else: - # All fields have the same dtype - converters = [defconv for i in xrange(N)] - if N > 1: - packing = [(N, tuple)] - - # By preference, use the converters specified by the user - for i, conv in (user_converters or {}).iteritems(): - if usecols: - try: - i = usecols.index(i) - except ValueError: - # Unused converter specified - continue - converters[i] = conv - - # Parse each line, including the first - for i, line in enumerate(itertools.chain([first_line], fh)): - vals = split_line(line) - if len(vals) == 0: - continue - if usecols: - vals = [vals[i] for i in usecols] - # Convert each value according to its column and store - items = [conv(val) for (conv, val) in zip(converters, vals)] - # Then pack it according to the dtype's nesting - items = pack_items(items, packing) - X.append(items) - finally: - if fown: - fh.close() - - X = np.array(X, dtype) - # Multicolumn data are returned with shape (1, N, M), i.e. - # (1, 1, M) for a single row - remove the singleton dimension there - if X.ndim == 3 and X.shape[:2] == (1, 1): - X.shape = (1, -1) - - # Verify that the array has at least dimensions `ndmin`. - # Check correctness of the values of `ndmin` - if not ndmin in [0, 1, 2]: - raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) - # Tweak the size and shape of the arrays - remove extraneous dimensions - if X.ndim > ndmin: - X = np.squeeze(X) - # and ensure we have the minimum number of dimensions asked for - # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 - if X.ndim < ndmin: - if ndmin == 1: - X = np.atleast_1d(X) - elif ndmin == 2: - X = np.atleast_2d(X).T - - if unpack: - if len(dtype_types) > 1: - # For structured arrays, return an array for each field. - return [X[field] for field in dtype.names] - else: - return X.T - else: - return X - - -def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'): - """ - Save an array to a text file. - - Parameters - ---------- - fname : filename or file handle - If the filename ends in ``.gz``, the file is automatically saved in - compressed gzip format. `loadtxt` understands gzipped files - transparently. - X : array_like - Data to be saved to a text file. - fmt : str or sequence of strs - A single format (%10.5f), a sequence of formats, or a - multi-format string, e.g. 'Iteration %d -- %10.5f', in which - case `delimiter` is ignored. For complex `X`, the legal options - for `fmt` are: - a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted - like `' (%s+%sj)' % (fmt, fmt)` - b) a full string specifying every real and imaginary part, e.g. - `' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns - c) a list of specifiers, one per column - in this case, the real - and imaginary part must have separate specifiers, - e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns - delimiter : str, optional - Character separating columns. - newline : str - .. versionadded:: 1.5.0 - - Character separating lines. - - See Also - -------- - save : Save an array to a binary file in NumPy ``.npy`` format - savez : Save several arrays into a ``.npz`` compressed archive - - Notes - ----- - Further explanation of the `fmt` parameter - (``%[flag]width[.precision]specifier``): - - flags: - ``-`` : left justify - - ``+`` : Forces to preceed result with + or -. - - ``0`` : Left pad the number with zeros instead of space (see width). - - width: - Minimum number of characters to be printed. The value is not truncated - if it has more characters. - - precision: - - For integer specifiers (eg. ``d,i,o,x``), the minimum number of - digits. - - For ``e, E`` and ``f`` specifiers, the number of digits to print - after the decimal point. - - For ``g`` and ``G``, the maximum number of significant digits. - - For ``s``, the maximum number of characters. - - specifiers: - ``c`` : character - - ``d`` or ``i`` : signed decimal integer - - ``e`` or ``E`` : scientific notation with ``e`` or ``E``. - - ``f`` : decimal floating point - - ``g,G`` : use the shorter of ``e,E`` or ``f`` - - ``o`` : signed octal - - ``s`` : string of characters - - ``u`` : unsigned decimal integer - - ``x,X`` : unsigned hexadecimal integer - - This explanation of ``fmt`` is not complete, for an exhaustive - specification see [1]_. - - References - ---------- - .. [1] `Format Specification Mini-Language - `_, Python Documentation. - - Examples - -------- - >>> x = y = z = np.arange(0.0,5.0,1.0) - >>> np.savetxt('test.out', x, delimiter=',') # X is an array - >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays - >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation - - """ - - # Py3 conversions first - if isinstance(fmt, bytes): - fmt = asstr(fmt) - delimiter = asstr(delimiter) - - own_fh = False - if _is_string_like(fname): - own_fh = True - if fname.endswith('.gz'): - import gzip - fh = gzip.open(fname, 'wb') - else: - if sys.version_info[0] >= 3: - fh = open(fname, 'wb') - else: - fh = open(fname, 'w') - elif hasattr(fname, 'seek'): - fh = fname - else: - raise ValueError('fname must be a string or file handle') - - try: - X = np.asarray(X) - - # Handle 1-dimensional arrays - if X.ndim == 1: - # Common case -- 1d array of numbers - if X.dtype.names is None: - X = np.atleast_2d(X).T - ncol = 1 - - # Complex dtype -- each field indicates a separate column - else: - ncol = len(X.dtype.descr) - else: - ncol = X.shape[1] - - iscomplex_X = np.iscomplexobj(X) - # `fmt` can be a string with multiple insertion points or a - # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') - if type(fmt) in (list, tuple): - if len(fmt) != ncol: - raise AttributeError('fmt has wrong shape. %s' % str(fmt)) - format = asstr(delimiter).join(map(asstr, fmt)) - elif type(fmt) is str: - n_fmt_chars = fmt.count('%') - error = ValueError('fmt has wrong number of %% formats: %s' % fmt) - if n_fmt_chars == 1: - if iscomplex_X: - fmt = [' (%s+%sj)' % (fmt, fmt),] * ncol - else: - fmt = [fmt, ] * ncol - format = delimiter.join(fmt) - elif iscomplex_X and n_fmt_chars != (2 * ncol): - raise error - elif ((not iscomplex_X) and n_fmt_chars != ncol): - raise error - else: - format = fmt - - if iscomplex_X: - for row in X: - row2 = [] - for number in row: - row2.append(number.real) - row2.append(number.imag) - fh.write(asbytes(format % tuple(row2) + newline)) - else: - for row in X: - fh.write(asbytes(format % tuple(row) + newline)) - finally: - if own_fh: - fh.close() - -import re -def fromregex(file, regexp, dtype): - """ - Construct an array from a text file, using regular expression parsing. - - The returned array is always a structured array, and is constructed from - all matches of the regular expression in the file. Groups in the regular - expression are converted to fields of the structured array. - - Parameters - ---------- - file : str or file - File name or file object to read. - regexp : str or regexp - Regular expression used to parse the file. - Groups in the regular expression correspond to fields in the dtype. - dtype : dtype or list of dtypes - Dtype for the structured array. - - Returns - ------- - output : ndarray - The output array, containing the part of the content of `file` that - was matched by `regexp`. `output` is always a structured array. - - Raises - ------ - TypeError - When `dtype` is not a valid dtype for a structured array. - - See Also - -------- - fromstring, loadtxt - - Notes - ----- - Dtypes for structured arrays can be specified in several forms, but all - forms specify at least the data type and field name. For details see - `doc.structured_arrays`. - - Examples - -------- - >>> f = open('test.dat', 'w') - >>> f.write("1312 foo\\n1534 bar\\n444 qux") - >>> f.close() - - >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] - >>> output = np.fromregex('test.dat', regexp, - ... [('num', np.int64), ('key', 'S3')]) - >>> output - array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], - dtype=[('num', '>> output['num'] - array([1312, 1534, 444], dtype=int64) - - """ - own_fh = False - if not hasattr(file, "read"): - file = open(file, 'rb') - own_fh = True - - try: - if not hasattr(regexp, 'match'): - regexp = re.compile(asbytes(regexp)) - if not isinstance(dtype, np.dtype): - dtype = np.dtype(dtype) - - seq = regexp.findall(file.read()) - if seq and not isinstance(seq[0], tuple): - # Only one group is in the regexp. - # Create the new array as a single data-type and then - # re-interpret as a single-field structured array. - newdtype = np.dtype(dtype[dtype.names[0]]) - output = np.array(seq, dtype=newdtype) - output.dtype = dtype - else: - output = np.array(seq, dtype=dtype) - - return output - finally: - if own_fh: - file.close() - - - - -#####-------------------------------------------------------------------------- -#---- --- ASCII functions --- -#####-------------------------------------------------------------------------- - - - -def genfromtxt(fname, dtype=float, comments='#', delimiter=None, - skiprows=0, skip_header=0, skip_footer=0, converters=None, - missing='', missing_values=None, filling_values=None, - usecols=None, names=None, - excludelist=None, deletechars=None, replace_space='_', - autostrip=False, case_sensitive=True, defaultfmt="f%i", - unpack=None, usemask=False, loose=True, invalid_raise=True): - """ - Load data from a text file, with missing values handled as specified. - - Each line past the first `skip_header` lines is split at the `delimiter` - character, and characters following the `comments` character are discarded. - - Parameters - ---------- - fname : file or str - File, filename, or generator to read. If the filename extension is - `.gz` or `.bz2`, the file is first decompressed. Note that - generators must return byte strings in Python 3k. - dtype : dtype, optional - Data type of the resulting array. - If None, the dtypes will be determined by the contents of each - column, individually. - comments : str, optional - The character used to indicate the start of a comment. - All the characters occurring on a line after a comment are discarded - delimiter : str, int, or sequence, optional - The string used to separate values. By default, any consecutive - whitespaces act as delimiter. An integer or sequence of integers - can also be provided as width(s) of each field. - skip_header : int, optional - The numbers of lines to skip at the beginning of the file. - skip_footer : int, optional - The numbers of lines to skip at the end of the file - converters : variable, optional - The set of functions that convert the data of a column to a value. - The converters can also be used to provide a default value - for missing data: ``converters = {3: lambda s: float(s or 0)}``. - missing_values : variable, optional - The set of strings corresponding to missing data. - filling_values : variable, optional - The set of values to be used as default when the data are missing. - usecols : sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. - names : {None, True, str, sequence}, optional - If `names` is True, the field names are read from the first valid line - after the first `skip_header` lines. - If `names` is a sequence or a single-string of comma-separated names, - the names will be used to define the field names in a structured dtype. - If `names` is None, the names of the dtype fields will be used, if any. - excludelist : sequence, optional - A list of names to exclude. This list is appended to the default list - ['return','file','print']. Excluded names are appended an underscore: - for example, `file` would become `file_`. - deletechars : str, optional - A string combining invalid characters that must be deleted from the - names. - defaultfmt : str, optional - A format used to define default field names, such as "f%i" or "f_%02i". - autostrip : bool, optional - Whether to automatically strip white spaces from the variables. - replace_space : char, optional - Character(s) used in replacement of white spaces in the variables names. - By default, use a '_'. - case_sensitive : {True, False, 'upper', 'lower'}, optional - If True, field names are case sensitive. - If False or 'upper', field names are converted to upper case. - If 'lower', field names are converted to lower case. - unpack : bool, optional - If True, the returned array is transposed, so that arguments may be - unpacked using ``x, y, z = loadtxt(...)`` - usemask : bool, optional - If True, return a masked array. - If False, return a regular array. - invalid_raise : bool, optional - If True, an exception is raised if an inconsistency is detected in the - number of columns. - If False, a warning is emitted and the offending lines are skipped. - - Returns - ------- - out : ndarray - Data read from the text file. If `usemask` is True, this is a - masked array. - - See Also - -------- - numpy.loadtxt : equivalent function when no data is missing. - - Notes - ----- - * When spaces are used as delimiters, or when no delimiter has been given - as input, there should not be any missing data between two fields. - * When the variables are named (either by a flexible dtype or with `names`, - there must not be any header in the file (else a ValueError - exception is raised). - * Individual values are not stripped of spaces by default. - When using a custom converter, make sure the function does remove spaces. - - Examples - --------- - >>> from StringIO import StringIO - >>> import numpy as np - - Comma delimited file with mixed dtype - - >>> s = StringIO("1,1.3,abcde") - >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), - ... ('mystring','S5')], delimiter=",") - >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '>> s.seek(0) # needed for StringIO example only - >>> data = np.genfromtxt(s, dtype=None, - ... names = ['myint','myfloat','mystring'], delimiter=",") - >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '>> s.seek(0) - >>> data = np.genfromtxt(s, dtype="i8,f8,S5", - ... names=['myint','myfloat','mystring'], delimiter=",") - >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '>> s = StringIO("11.3abcde") - >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], - ... delimiter=[1,3,5]) - >>> data - array((1, 1.3, 'abcde'), - dtype=[('intvar', ' nbcols): - descr = dtype.descr - dtype = np.dtype([descr[_] for _ in usecols]) - names = list(dtype.names) - # If `names` is not None, update the names - elif (names is not None) and (len(names) > nbcols): - names = [names[_] for _ in usecols] - elif (names is not None) and (dtype is not None): - names = list(dtype.names) - - - # Process the missing values ............................... - # Rename missing_values for convenience - user_missing_values = missing_values or () - - # Define the list of missing_values (one column: one list) - missing_values = [list([asbytes('')]) for _ in range(nbcols)] - - # We have a dictionary: process it field by field - if isinstance(user_missing_values, dict): - # Loop on the items - for (key, val) in user_missing_values.items(): - # Is the key a string ? - if _is_string_like(key): - try: - # Transform it into an integer - key = names.index(key) - except ValueError: - # We couldn't find it: the name must have been dropped, then - continue - # Redefine the key as needed if it's a column number - if usecols: - try: - key = usecols.index(key) - except ValueError: - pass - # Transform the value as a list of string - if isinstance(val, (list, tuple)): - val = [str(_) for _ in val] - else: - val = [str(val), ] - # Add the value(s) to the current list of missing - if key is None: - # None acts as default - for miss in missing_values: - miss.extend(val) - else: - missing_values[key].extend(val) - # We have a sequence : each item matches a column - elif isinstance(user_missing_values, (list, tuple)): - for (value, entry) in zip(user_missing_values, missing_values): - value = str(value) - if value not in entry: - entry.append(value) - # We have a string : apply it to all entries - elif isinstance(user_missing_values, bytes): - user_value = user_missing_values.split(asbytes(",")) - for entry in missing_values: - entry.extend(user_value) - # We have something else: apply it to all entries - else: - for entry in missing_values: - entry.extend([str(user_missing_values)]) - - # Process the deprecated `missing` - if missing != asbytes(''): - warnings.warn(\ - "The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \ - "Please use `missing_values` instead.", - DeprecationWarning) - values = [str(_) for _ in missing.split(asbytes(","))] - for entry in missing_values: - entry.extend(values) - - # Process the filling_values ............................... - # Rename the input for convenience - user_filling_values = filling_values or [] - # Define the default - filling_values = [None] * nbcols - # We have a dictionary : update each entry individually - if isinstance(user_filling_values, dict): - for (key, val) in user_filling_values.items(): - if _is_string_like(key): - try: - # Transform it into an integer - key = names.index(key) - except ValueError: - # We couldn't find it: the name must have been dropped, then - continue - # Redefine the key if it's a column number and usecols is defined - if usecols: - try: - key = usecols.index(key) - except ValueError: - pass - # Add the value to the list - filling_values[key] = val - # We have a sequence : update on a one-to-one basis - elif isinstance(user_filling_values, (list, tuple)): - n = len(user_filling_values) - if (n <= nbcols): - filling_values[:n] = user_filling_values - else: - filling_values = user_filling_values[:nbcols] - # We have something else : use it for all entries - else: - filling_values = [user_filling_values] * nbcols - - # Initialize the converters ................................ - if dtype is None: - # Note: we can't use a [...]*nbcols, as we would have 3 times the same - # ... converter, instead of 3 different converters. - converters = [StringConverter(None, missing_values=miss, default=fill) - for (miss, fill) in zip(missing_values, filling_values)] - else: - dtype_flat = flatten_dtype(dtype, flatten_base=True) - # Initialize the converters - if len(dtype_flat) > 1: - # Flexible type : get a converter from each dtype - zipit = zip(dtype_flat, missing_values, filling_values) - converters = [StringConverter(dt, locked=True, - missing_values=miss, default=fill) - for (dt, miss, fill) in zipit] - else: - # Set to a default converter (but w/ different missing values) - zipit = zip(missing_values, filling_values) - converters = [StringConverter(dtype, locked=True, - missing_values=miss, default=fill) - for (miss, fill) in zipit] - # Update the converters to use the user-defined ones - uc_update = [] - for (i, conv) in user_converters.items(): - # If the converter is specified by column names, use the index instead - if _is_string_like(i): - try: - i = names.index(i) - except ValueError: - continue - elif usecols: - try: - i = usecols.index(i) - except ValueError: - # Unused converter specified - continue - # Find the value to test: - if len(first_line): - testing_value = first_values[i] - else: - testing_value = None - converters[i].update(conv, locked=True, - testing_value=testing_value, - default=filling_values[i], - missing_values=missing_values[i],) - uc_update.append((i, conv)) - # Make sure we have the corrected keys in user_converters... - user_converters.update(uc_update) - - miss_chars = [_.missing_values for _ in converters] - - - # Initialize the output lists ... - # ... rows - rows = [] - append_to_rows = rows.append - # ... masks - if usemask: - masks = [] - append_to_masks = masks.append - # ... invalid - invalid = [] - append_to_invalid = invalid.append - - # Parse each line - for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): - values = split_line(line) - nbvalues = len(values) - # Skip an empty line - if nbvalues == 0: - continue - # Select only the columns we need - if usecols: - try: - values = [values[_] for _ in usecols] - except IndexError: - append_to_invalid((i + skip_header + 1, nbvalues)) - continue - elif nbvalues != nbcols: - append_to_invalid((i + skip_header + 1, nbvalues)) - continue - # Store the values - append_to_rows(tuple(values)) - if usemask: - append_to_masks(tuple([v.strip() in m - for (v, m) in zip(values, missing_values)])) - - if own_fhd: - fhd.close() - - # Upgrade the converters (if needed) - if dtype is None: - for (i, converter) in enumerate(converters): - current_column = map(itemgetter(i), rows) - try: - converter.iterupgrade(current_column) - except ConverterLockError: - errmsg = "Converter #%i is locked and cannot be upgraded: " % i - current_column = itertools.imap(itemgetter(i), rows) - for (j, value) in enumerate(current_column): - try: - converter.upgrade(value) - except (ConverterError, ValueError): - errmsg += "(occurred line #%i for value '%s')" - errmsg %= (j + 1 + skip_header, value) - raise ConverterError(errmsg) - - # Check that we don't have invalid values - nbinvalid = len(invalid) - if nbinvalid > 0: - nbrows = len(rows) + nbinvalid - skip_footer - # Construct the error message - template = " Line #%%i (got %%i columns instead of %i)" % nbcols - if skip_footer > 0: - nbinvalid_skipped = len([_ for _ in invalid - if _[0] > nbrows + skip_header]) - invalid = invalid[:nbinvalid - nbinvalid_skipped] - skip_footer -= nbinvalid_skipped -# -# nbrows -= skip_footer -# errmsg = [template % (i, nb) -# for (i, nb) in invalid if i < nbrows] -# else: - errmsg = [template % (i, nb) - for (i, nb) in invalid] - if len(errmsg): - errmsg.insert(0, "Some errors were detected !") - errmsg = "\n".join(errmsg) - # Raise an exception ? - if invalid_raise: - raise ValueError(errmsg) - # Issue a warning ? - else: - warnings.warn(errmsg, ConversionWarning) - - # Strip the last skip_footer data - if skip_footer > 0: - rows = rows[:-skip_footer] - if usemask: - masks = masks[:-skip_footer] - - - # Convert each value according to the converter: - # We want to modify the list in place to avoid creating a new one... -# if loose: -# conversionfuncs = [conv._loose_call for conv in converters] -# else: -# conversionfuncs = [conv._strict_call for conv in converters] -# for (i, vals) in enumerate(rows): -# rows[i] = tuple([convert(val) -# for (convert, val) in zip(conversionfuncs, vals)]) - if loose: - rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows)) - for (i, converter) in enumerate(converters)]) - else: - rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows)) - for (i, converter) in enumerate(converters)]) - # Reset the dtype - data = rows - if dtype is None: - # Get the dtypes from the types of the converters - column_types = [conv.type for conv in converters] - # Find the columns with strings... - strcolidx = [i for (i, v) in enumerate(column_types) - if v in (type('S'), np.string_)] - # ... and take the largest number of chars. - for i in strcolidx: - column_types[i] = "|S%i" % max(len(row[i]) for row in data) - # - if names is None: - # If the dtype is uniform, don't define names, else use '' - base = set([c.type for c in converters if c._checked]) - if len(base) == 1: - (ddtype, mdtype) = (list(base)[0], np.bool) - else: - ddtype = [(defaultfmt % i, dt) - for (i, dt) in enumerate(column_types)] - if usemask: - mdtype = [(defaultfmt % i, np.bool) - for (i, dt) in enumerate(column_types)] - else: - ddtype = zip(names, column_types) - mdtype = zip(names, [np.bool] * len(column_types)) - output = np.array(data, dtype=ddtype) - if usemask: - outputmask = np.array(masks, dtype=mdtype) - else: - # Overwrite the initial dtype names if needed - if names and dtype.names: - dtype.names = names - # Case 1. We have a structured type - if len(dtype_flat) > 1: - # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] - # First, create the array using a flattened dtype: - # [('a', int), ('b1', int), ('b2', float)] - # Then, view the array using the specified dtype. - if 'O' in (_.char for _ in dtype_flat): - if has_nested_fields(dtype): - errmsg = "Nested fields involving objects "\ - "are not supported..." - raise NotImplementedError(errmsg) - else: - output = np.array(data, dtype=dtype) - else: - rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) - output = rows.view(dtype) - # Now, process the rowmasks the same way - if usemask: - rowmasks = np.array(masks, - dtype=np.dtype([('', np.bool) - for t in dtype_flat])) - # Construct the new dtype - mdtype = make_mask_descr(dtype) - outputmask = rowmasks.view(mdtype) - # Case #2. We have a basic dtype - else: - # We used some user-defined converters - if user_converters: - ishomogeneous = True - descr = [] - for (i, ttype) in enumerate([conv.type for conv in converters]): - # Keep the dtype of the current converter - if i in user_converters: - ishomogeneous &= (ttype == dtype.type) - if ttype == np.string_: - ttype = "|S%i" % max(len(row[i]) for row in data) - descr.append(('', ttype)) - else: - descr.append(('', dtype)) - # So we changed the dtype ? - if not ishomogeneous: - # We have more than one field - if len(descr) > 1: - dtype = np.dtype(descr) - # We have only one field: drop the name if not needed. - else: - dtype = np.dtype(ttype) - # - output = np.array(data, dtype) - if usemask: - if dtype.names: - mdtype = [(_, np.bool) for _ in dtype.names] - else: - mdtype = np.bool - outputmask = np.array(masks, dtype=mdtype) - # Try to take care of the missing data we missed - names = output.dtype.names - if usemask and names: - for (name, conv) in zip(names or (), converters): - missing_values = [conv(_) for _ in conv.missing_values - if _ != asbytes('')] - for mval in missing_values: - outputmask[name] |= (output[name] == mval) - # Construct the final array - if usemask: - output = output.view(MaskedArray) - output._mask = outputmask - if unpack: - return output.squeeze().T - return output.squeeze() - - - -def ndfromtxt(fname, **kwargs): - """ - Load ASCII data stored in a file and return it as a single array. - - Complete description of all the optional input parameters is available in - the docstring of the `genfromtxt` function. - - See Also - -------- - numpy.genfromtxt : generic function. - - """ - kwargs['usemask'] = False - return genfromtxt(fname, **kwargs) - - -def mafromtxt(fname, **kwargs): - """ - Load ASCII data stored in a text file and return a masked array. - - For a complete description of all the input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - """ - kwargs['usemask'] = True - return genfromtxt(fname, **kwargs) - - -def recfromtxt(fname, **kwargs): - """ - Load ASCII data from a file and return it in a record array. - - If ``usemask=False`` a standard `recarray` is returned, - if ``usemask=True`` a MaskedRecords array is returned. - - Complete description of all the optional input parameters is available in - the docstring of the `genfromtxt` function. - - See Also - -------- - numpy.genfromtxt : generic function - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - kwargs.update(dtype=kwargs.get('dtype', None)) - usemask = kwargs.get('usemask', False) - output = genfromtxt(fname, **kwargs) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output - - -def recfromcsv(fname, **kwargs): - """ - Load ASCII data stored in a comma-separated file. - - The returned array is a record array (if ``usemask=False``, see - `recarray`) or a masked record array (if ``usemask=True``, - see `ma.mrecords.MaskedRecords`). - - For a complete description of all the input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - """ - case_sensitive = kwargs.get('case_sensitive', "lower") or "lower" - names = kwargs.get('names', True) - if names is None: - names = True - kwargs.update(dtype=kwargs.get('update', None), - delimiter=kwargs.get('delimiter', ",") or ",", - names=names, - case_sensitive=case_sensitive) - usemask = kwargs.get("usemask", False) - output = genfromtxt(fname, **kwargs) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output diff --git a/numpy-1.6.2/numpy/lib/polynomial.py b/numpy-1.6.2/numpy/lib/polynomial.py deleted file mode 100644 index f3146d6916..0000000000 --- a/numpy-1.6.2/numpy/lib/polynomial.py +++ /dev/null @@ -1,1233 +0,0 @@ -""" -Functions to operate on polynomials. -""" - -__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', - 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', - 'polyfit', 'RankWarning'] - -import re -import warnings -import numpy.core.numeric as NX - -from numpy.core import isscalar, abs, finfo, atleast_1d, hstack -from numpy.lib.twodim_base import diag, vander -from numpy.lib.function_base import trim_zeros, sort_complex -from numpy.lib.type_check import iscomplex, real, imag -from numpy.linalg import eigvals, lstsq - -class RankWarning(UserWarning): - """ - Issued by `polyfit` when the Vandermonde matrix is rank deficient. - - For more information, a way to suppress the warning, and an example of - `RankWarning` being issued, see `polyfit`. - - """ - pass - -def poly(seq_of_zeros): - """ - Find the coefficients of a polynomial with the given sequence of roots. - - Returns the coefficients of the polynomial whose leading coefficient - is one for the given sequence of zeros (multiple roots must be included - in the sequence as many times as their multiplicity; see Examples). - A square matrix (or array, which will be treated as a matrix) can also - be given, in which case the coefficients of the characteristic polynomial - of the matrix are returned. - - Parameters - ---------- - seq_of_zeros : array_like, shape (N,) or (N, N) - A sequence of polynomial roots, or a square array or matrix object. - - Returns - ------- - c : ndarray - 1D array of polynomial coefficients from highest to lowest degree: - - ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` - where c[0] always equals 1. - - Raises - ------ - ValueError - If input is the wrong shape (the input must be a 1-D or square - 2-D array). - - See Also - -------- - polyval : Evaluate a polynomial at a point. - roots : Return the roots of a polynomial. - polyfit : Least squares polynomial fit. - poly1d : A one-dimensional polynomial class. - - Notes - ----- - Specifying the roots of a polynomial still leaves one degree of - freedom, typically represented by an undetermined leading - coefficient. [1]_ In the case of this function, that coefficient - - the first one in the returned array - is always taken as one. (If - for some reason you have one other point, the only automatic way - presently to leverage that information is to use ``polyfit``.) - - The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` - matrix **A** is given by - - :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, - - where **I** is the `n`-by-`n` identity matrix. [2]_ - - References - ---------- - .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, - Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. - - .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," - Academic Press, pg. 182, 1980. - - Examples - -------- - Given a sequence of a polynomial's zeros: - - >>> np.poly((0, 0, 0)) # Multiple root example - array([1, 0, 0, 0]) - - The line above represents z**3 + 0*z**2 + 0*z + 0. - - >>> np.poly((-1./2, 0, 1./2)) - array([ 1. , 0. , -0.25, 0. ]) - - The line above represents z**3 - z/4 - - >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) - array([ 1. , -0.77086955, 0.08618131, 0. ]) #random - - Given a square array object: - - >>> P = np.array([[0, 1./3], [-1./2, 0]]) - >>> np.poly(P) - array([ 1. , 0. , 0.16666667]) - - Or a square matrix object: - - >>> np.poly(np.matrix(P)) - array([ 1. , 0. , 0.16666667]) - - Note how in all cases the leading coefficient is always 1. - - """ - seq_of_zeros = atleast_1d(seq_of_zeros) - sh = seq_of_zeros.shape - if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: - seq_of_zeros = eigvals(seq_of_zeros) - elif len(sh) == 1: - pass - else: - raise ValueError, "input must be 1d or square 2d array." - - if len(seq_of_zeros) == 0: - return 1.0 - - a = [1] - for k in range(len(seq_of_zeros)): - a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full') - - if issubclass(a.dtype.type, NX.complexfloating): - # if complex roots are all complex conjugates, the roots are real. - roots = NX.asarray(seq_of_zeros, complex) - pos_roots = sort_complex(NX.compress(roots.imag > 0, roots)) - neg_roots = NX.conjugate(sort_complex( - NX.compress(roots.imag < 0,roots))) - if (len(pos_roots) == len(neg_roots) and - NX.alltrue(neg_roots == pos_roots)): - a = a.real.copy() - - return a - -def roots(p): - """ - Return the roots of a polynomial with coefficients given in p. - - The values in the rank-1 array `p` are coefficients of a polynomial. - If the length of `p` is n+1 then the polynomial is described by:: - - p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] - - Parameters - ---------- - p : array_like - Rank-1 array of polynomial coefficients. - - Returns - ------- - out : ndarray - An array containing the complex roots of the polynomial. - - Raises - ------ - ValueError : - When `p` cannot be converted to a rank-1 array. - - See also - -------- - poly : Find the coefficients of a polynomial with a given sequence - of roots. - polyval : Evaluate a polynomial at a point. - polyfit : Least squares polynomial fit. - poly1d : A one-dimensional polynomial class. - - Notes - ----- - The algorithm relies on computing the eigenvalues of the - companion matrix [1]_. - - References - ---------- - .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: - Cambridge University Press, 1999, pp. 146-7. - - Examples - -------- - >>> coeff = [3.2, 2, 1] - >>> np.roots(coeff) - array([-0.3125+0.46351241j, -0.3125-0.46351241j]) - - """ - # If input is scalar, this makes it an array - p = atleast_1d(p) - if len(p.shape) != 1: - raise ValueError,"Input must be a rank-1 array." - - # find non-zero array entries - non_zero = NX.nonzero(NX.ravel(p))[0] - - # Return an empty array if polynomial is all zeros - if len(non_zero) == 0: - return NX.array([]) - - # find the number of trailing zeros -- this is the number of roots at 0. - trailing_zeros = len(p) - non_zero[-1] - 1 - - # strip leading and trailing zeros - p = p[int(non_zero[0]):int(non_zero[-1])+1] - - # casting: if incoming array isn't floating point, make it floating point. - if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): - p = p.astype(float) - - N = len(p) - if N > 1: - # build companion matrix and find its eigenvalues (the roots) - A = diag(NX.ones((N-2,), p.dtype), -1) - A[0, :] = -p[1:] / p[0] - roots = eigvals(A) - else: - roots = NX.array([]) - - # tack any zeros onto the back of the array - roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) - return roots - -def polyint(p, m=1, k=None): - """ - Return an antiderivative (indefinite integral) of a polynomial. - - The returned order `m` antiderivative `P` of polynomial `p` satisfies - :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` - integration constants `k`. The constants determine the low-order - polynomial part - - .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} - - of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. - - Parameters - ---------- - p : {array_like, poly1d} - Polynomial to differentiate. - A sequence is interpreted as polynomial coefficients, see `poly1d`. - m : int, optional - Order of the antiderivative. (Default: 1) - k : {None, list of `m` scalars, scalar}, optional - Integration constants. They are given in the order of integration: - those corresponding to highest-order terms come first. - - If ``None`` (default), all constants are assumed to be zero. - If `m = 1`, a single scalar can be given instead of a list. - - See Also - -------- - polyder : derivative of a polynomial - poly1d.integ : equivalent method - - Examples - -------- - The defining property of the antiderivative: - - >>> p = np.poly1d([1,1,1]) - >>> P = np.polyint(p) - >>> P - poly1d([ 0.33333333, 0.5 , 1. , 0. ]) - >>> np.polyder(P) == p - True - - The integration constants default to zero, but can be specified: - - >>> P = np.polyint(p, 3) - >>> P(0) - 0.0 - >>> np.polyder(P)(0) - 0.0 - >>> np.polyder(P, 2)(0) - 0.0 - >>> P = np.polyint(p, 3, k=[6,5,3]) - >>> P - poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) - - Note that 3 = 6 / 2!, and that the constants are given in the order of - integrations. Constant of the highest-order polynomial term comes first: - - >>> np.polyder(P, 2)(0) - 6.0 - >>> np.polyder(P, 1)(0) - 5.0 - >>> P(0) - 3.0 - - """ - m = int(m) - if m < 0: - raise ValueError, "Order of integral must be positive (see polyder)" - if k is None: - k = NX.zeros(m, float) - k = atleast_1d(k) - if len(k) == 1 and m > 1: - k = k[0]*NX.ones(m, float) - if len(k) < m: - raise ValueError, \ - "k must be a scalar or a rank-1 array of length 1 or >m." - - truepoly = isinstance(p, poly1d) - p = NX.asarray(p) - if m == 0: - if truepoly: - return poly1d(p) - return p - else: - # Note: this must work also with object and integer arrays - y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) - val = polyint(y, m - 1, k=k[1:]) - if truepoly: - return poly1d(val) - return val - -def polyder(p, m=1): - """ - Return the derivative of the specified order of a polynomial. - - Parameters - ---------- - p : poly1d or sequence - Polynomial to differentiate. - A sequence is interpreted as polynomial coefficients, see `poly1d`. - m : int, optional - Order of differentiation (default: 1) - - Returns - ------- - der : poly1d - A new polynomial representing the derivative. - - See Also - -------- - polyint : Anti-derivative of a polynomial. - poly1d : Class for one-dimensional polynomials. - - Examples - -------- - The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: - - >>> p = np.poly1d([1,1,1,1]) - >>> p2 = np.polyder(p) - >>> p2 - poly1d([3, 2, 1]) - - which evaluates to: - - >>> p2(2.) - 17.0 - - We can verify this, approximating the derivative with - ``(f(x + h) - f(x))/h``: - - >>> (p(2. + 0.001) - p(2.)) / 0.001 - 17.007000999997857 - - The fourth-order derivative of a 3rd-order polynomial is zero: - - >>> np.polyder(p, 2) - poly1d([6, 2]) - >>> np.polyder(p, 3) - poly1d([6]) - >>> np.polyder(p, 4) - poly1d([ 0.]) - - """ - m = int(m) - if m < 0: - raise ValueError, "Order of derivative must be positive (see polyint)" - - truepoly = isinstance(p, poly1d) - p = NX.asarray(p) - n = len(p) - 1 - y = p[:-1] * NX.arange(n, 0, -1) - if m == 0: - val = p - else: - val = polyder(y, m - 1) - if truepoly: - val = poly1d(val) - return val - -def polyfit(x, y, deg, rcond=None, full=False): - """ - Least squares polynomial fit. - - Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` - to points `(x, y)`. Returns a vector of coefficients `p` that minimises - the squared error. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than this - relative to the largest singular value will be ignored. The default - value is len(x)*eps, where eps is the relative precision of the float - type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is - False (the default) just the coefficients are returned, when True - diagnostic information from the singular value decomposition is also - returned. - - Returns - ------- - p : ndarray, shape (M,) or (M, K) - Polynomial coefficients, highest power first. - If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``. - - residuals, rank, singular_values, rcond : present only if `full` = True - Residuals of the least-squares fit, the effective rank of the scaled - Vandermonde coefficient matrix, its singular values, and the specified - value of `rcond`. For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. - - The warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - polyval : Computes polynomial values. - linalg.lstsq : Computes a least-squares fit. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution minimizes the squared error - - .. math :: - E = \\sum_{j=0}^k |p(x_j) - y_j|^2 - - in the equations:: - - x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0] - x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1] - ... - x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k] - - The coefficient matrix of the coefficients `p` is a Vandermonde matrix. - - `polyfit` issues a `RankWarning` when the least-squares fit is badly - conditioned. This implies that the best fit is not well-defined due - to numerical error. The results may be improved by lowering the polynomial - degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter - can also be set to a value smaller than its default, but the resulting - fit may be spurious: including contributions from the small singular - values can add numerical noise to the result. - - Note that fitting polynomial coefficients is inherently badly conditioned - when the degree of the polynomial is large or the interval of sample points - is badly centered. The quality of the fit should always be checked in these - cases. When polynomial fits are not satisfactory, splines may be a good - alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - .. [2] Wikipedia, "Polynomial interpolation", - http://en.wikipedia.org/wiki/Polynomial_interpolation - - Examples - -------- - >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) - >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) - >>> z = np.polyfit(x, y, 3) - >>> z - array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) - - It is convenient to use `poly1d` objects for dealing with polynomials: - - >>> p = np.poly1d(z) - >>> p(0.5) - 0.6143849206349179 - >>> p(3.5) - -0.34732142857143039 - >>> p(10) - 22.579365079365115 - - High-order polynomials may oscillate wildly: - - >>> p30 = np.poly1d(np.polyfit(x, y, 30)) - /... RankWarning: Polyfit may be poorly conditioned... - >>> p30(4) - -0.80000000000000204 - >>> p30(5) - -0.99999999999999445 - >>> p30(4.5) - -0.10547061179440398 - - Illustration: - - >>> import matplotlib.pyplot as plt - >>> xp = np.linspace(-2, 6, 100) - >>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') - [, , ] - >>> plt.ylim(-2,2) - (-2, 2) - >>> plt.show() - - """ - order = int(deg) + 1 - x = NX.asarray(x) + 0.0 - y = NX.asarray(y) + 0.0 - - # check arguments. - if deg < 0 : - raise ValueError, "expected deg >= 0" - if x.ndim != 1: - raise TypeError, "expected 1D vector for x" - if x.size == 0: - raise TypeError, "expected non-empty vector for x" - if y.ndim < 1 or y.ndim > 2 : - raise TypeError, "expected 1D or 2D array for y" - if x.shape[0] != y.shape[0] : - raise TypeError, "expected x and y to have same length" - - # set rcond - if rcond is None : - rcond = len(x)*finfo(x.dtype).eps - - # scale x to improve condition number - scale = abs(x).max() - if scale != 0 : - x /= scale - - # solve least squares equation for powers of x - v = vander(x, order) - c, resids, rank, s = lstsq(v, y, rcond) - - # warn on rank reduction, which indicates an ill conditioned matrix - if rank != order and not full: - msg = "Polyfit may be poorly conditioned" - warnings.warn(msg, RankWarning) - - # scale returned coefficients - if scale != 0 : - if c.ndim == 1 : - c /= vander([scale], order)[0] - else : - c /= vander([scale], order).T - - if full : - return c, resids, rank, s, rcond - else : - return c - - - -def polyval(p, x): - """ - Evaluate a polynomial at specific values. - - If `p` is of length N, this function returns the value: - - ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` - - If `x` is a sequence, then `p(x)` is returned for each element of `x`. - If `x` is another polynomial then the composite polynomial `p(x(t))` - is returned. - - Parameters - ---------- - p : array_like or poly1d object - 1D array of polynomial coefficients (including coefficients equal - to zero) from highest degree to the constant term, or an - instance of poly1d. - x : array_like or poly1d object - A number, a 1D array of numbers, or an instance of poly1d, "at" - which to evaluate `p`. - - Returns - ------- - values : ndarray or poly1d - If `x` is a poly1d instance, the result is the composition of the two - polynomials, i.e., `x` is "substituted" in `p` and the simplified - result is returned. In addition, the type of `x` - array_like or - poly1d - governs the type of the output: `x` array_like => `values` - array_like, `x` a poly1d object => `values` is also. - - See Also - -------- - poly1d: A polynomial class. - - Notes - ----- - Horner's scheme [1]_ is used to evaluate the polynomial. Even so, - for polynomials of high degree the values may be inaccurate due to - rounding errors. Use carefully. - - References - ---------- - .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. - trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand - Reinhold Co., 1985, pg. 720. - - Examples - -------- - >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 - 76 - >>> np.polyval([3,0,1], np.poly1d(5)) - poly1d([ 76.]) - >>> np.polyval(np.poly1d([3,0,1]), 5) - 76 - >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) - poly1d([ 76.]) - - """ - p = NX.asarray(p) - if isinstance(x, poly1d): - y = 0 - else: - x = NX.asarray(x) - y = NX.zeros_like(x) - for i in range(len(p)): - y = x * y + p[i] - return y - -def polyadd(a1, a2): - """ - Find the sum of two polynomials. - - Returns the polynomial resulting from the sum of two input polynomials. - Each input must be either a poly1d object or a 1D sequence of polynomial - coefficients, from highest to lowest degree. - - Parameters - ---------- - a1, a2 : array_like or poly1d object - Input polynomials. - - Returns - ------- - out : ndarray or poly1d object - The sum of the inputs. If either input is a poly1d object, then the - output is also a poly1d object. Otherwise, it is a 1D array of - polynomial coefficients from highest to lowest degree. - - See Also - -------- - poly1d : A one-dimensional polynomial class. - poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval - - Examples - -------- - >>> np.polyadd([1, 2], [9, 5, 4]) - array([9, 6, 6]) - - Using poly1d objects: - - >>> p1 = np.poly1d([1, 2]) - >>> p2 = np.poly1d([9, 5, 4]) - >>> print p1 - 1 x + 2 - >>> print p2 - 2 - 9 x + 5 x + 4 - >>> print np.polyadd(p1, p2) - 2 - 9 x + 6 x + 6 - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1 = atleast_1d(a1) - a2 = atleast_1d(a2) - diff = len(a2) - len(a1) - if diff == 0: - val = a1 + a2 - elif diff > 0: - zr = NX.zeros(diff, a1.dtype) - val = NX.concatenate((zr, a1)) + a2 - else: - zr = NX.zeros(abs(diff), a2.dtype) - val = a1 + NX.concatenate((zr, a2)) - if truepoly: - val = poly1d(val) - return val - -def polysub(a1, a2): - """ - Difference (subtraction) of two polynomials. - - Given two polynomials `a1` and `a2`, returns ``a1 - a2``. - `a1` and `a2` can be either array_like sequences of the polynomials' - coefficients (including coefficients equal to zero), or `poly1d` objects. - - Parameters - ---------- - a1, a2 : array_like or poly1d - Minuend and subtrahend polynomials, respectively. - - Returns - ------- - out : ndarray or poly1d - Array or `poly1d` object of the difference polynomial's coefficients. - - See Also - -------- - polyval, polydiv, polymul, polyadd - - Examples - -------- - .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) - - >>> np.polysub([2, 10, -2], [3, 10, -4]) - array([-1, 0, 2]) - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1 = atleast_1d(a1) - a2 = atleast_1d(a2) - diff = len(a2) - len(a1) - if diff == 0: - val = a1 - a2 - elif diff > 0: - zr = NX.zeros(diff, a1.dtype) - val = NX.concatenate((zr, a1)) - a2 - else: - zr = NX.zeros(abs(diff), a2.dtype) - val = a1 - NX.concatenate((zr, a2)) - if truepoly: - val = poly1d(val) - return val - - -def polymul(a1, a2): - """ - Find the product of two polynomials. - - Finds the polynomial resulting from the multiplication of the two input - polynomials. Each input must be either a poly1d object or a 1D sequence - of polynomial coefficients, from highest to lowest degree. - - Parameters - ---------- - a1, a2 : array_like or poly1d object - Input polynomials. - - Returns - ------- - out : ndarray or poly1d object - The polynomial resulting from the multiplication of the inputs. If - either inputs is a poly1d object, then the output is also a poly1d - object. Otherwise, it is a 1D array of polynomial coefficients from - highest to lowest degree. - - See Also - -------- - poly1d : A one-dimensional polynomial class. - poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, - polyval - - Examples - -------- - >>> np.polymul([1, 2, 3], [9, 5, 1]) - array([ 9, 23, 38, 17, 3]) - - Using poly1d objects: - - >>> p1 = np.poly1d([1, 2, 3]) - >>> p2 = np.poly1d([9, 5, 1]) - >>> print p1 - 2 - 1 x + 2 x + 3 - >>> print p2 - 2 - 9 x + 5 x + 1 - >>> print np.polymul(p1, p2) - 4 3 2 - 9 x + 23 x + 38 x + 17 x + 3 - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1,a2 = poly1d(a1),poly1d(a2) - val = NX.convolve(a1, a2) - if truepoly: - val = poly1d(val) - return val - -def polydiv(u, v): - """ - Returns the quotient and remainder of polynomial division. - - The input arrays are the coefficients (including any coefficients - equal to zero) of the "numerator" (dividend) and "denominator" - (divisor) polynomials, respectively. - - Parameters - ---------- - u : array_like or poly1d - Dividend polynomial's coefficients. - - v : array_like or poly1d - Divisor polynomial's coefficients. - - Returns - ------- - q : ndarray - Coefficients, including those equal to zero, of the quotient. - r : ndarray - Coefficients, including those equal to zero, of the remainder. - - See Also - -------- - poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub, - polyval - - Notes - ----- - Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need - not equal `v.ndim`. In other words, all four possible combinations - - ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, - ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. - - Examples - -------- - .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 - - >>> x = np.array([3.0, 5.0, 2.0]) - >>> y = np.array([2.0, 1.0]) - >>> np.polydiv(x, y) - (array([ 1.5 , 1.75]), array([ 0.25])) - - """ - truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) - u = atleast_1d(u) + 0.0 - v = atleast_1d(v) + 0.0 - # w has the common type - w = u[0] + v[0] - m = len(u) - 1 - n = len(v) - 1 - scale = 1. / v[0] - q = NX.zeros((max(m - n + 1, 1),), w.dtype) - r = u.copy() - for k in range(0, m-n+1): - d = scale * r[k] - q[k] = d - r[k:k+n+1] -= d*v - while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): - r = r[1:] - if truepoly: - return poly1d(q), poly1d(r) - return q, r - -_poly_mat = re.compile(r"[*][*]([0-9]*)") -def _raise_power(astr, wrap=70): - n = 0 - line1 = '' - line2 = '' - output = ' ' - while 1: - mat = _poly_mat.search(astr, n) - if mat is None: - break - span = mat.span() - power = mat.groups()[0] - partstr = astr[n:span[0]] - n = span[1] - toadd2 = partstr + ' '*(len(power)-1) - toadd1 = ' '*(len(partstr)-1) + power - if ((len(line2)+len(toadd2) > wrap) or \ - (len(line1)+len(toadd1) > wrap)): - output += line1 + "\n" + line2 + "\n " - line1 = toadd1 - line2 = toadd2 - else: - line2 += partstr + ' '*(len(power)-1) - line1 += ' '*(len(partstr)-1) + power - output += line1 + "\n" + line2 - return output + astr[n:] - - -class poly1d(object): - """ - A one-dimensional polynomial class. - - A convenience class, used to encapsulate "natural" operations on - polynomials so that said operations may take on their customary - form in code (see Examples). - - Parameters - ---------- - c_or_r : array_like - The polynomial's coefficients, in decreasing powers, or if - the value of the second parameter is True, the polynomial's - roots (values where the polynomial evaluates to 0). For example, - ``poly1d([1, 2, 3])`` returns an object that represents - :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns - one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. - r : bool, optional - If True, `c_or_r` specifies the polynomial's roots; the default - is False. - variable : str, optional - Changes the variable used when printing `p` from `x` to `variable` - (see Examples). - - Examples - -------- - Construct the polynomial :math:`x^2 + 2x + 3`: - - >>> p = np.poly1d([1, 2, 3]) - >>> print np.poly1d(p) - 2 - 1 x + 2 x + 3 - - Evaluate the polynomial at :math:`x = 0.5`: - - >>> p(0.5) - 4.25 - - Find the roots: - - >>> p.r - array([-1.+1.41421356j, -1.-1.41421356j]) - >>> p(p.r) - array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) - - These numbers in the previous line represent (0, 0) to machine precision - - Show the coefficients: - - >>> p.c - array([1, 2, 3]) - - Display the order (the leading zero-coefficients are removed): - - >>> p.order - 2 - - Show the coefficient of the k-th power in the polynomial - (which is equivalent to ``p.c[-(i+1)]``): - - >>> p[1] - 2 - - Polynomials can be added, subtracted, multiplied, and divided - (returns quotient and remainder): - - >>> p * p - poly1d([ 1, 4, 10, 12, 9]) - - >>> (p**3 + 4) / p - (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) - - ``asarray(p)`` gives the coefficient array, so polynomials can be - used in all functions that accept arrays: - - >>> p**2 # square of polynomial - poly1d([ 1, 4, 10, 12, 9]) - - >>> np.square(p) # square of individual coefficients - array([1, 4, 9]) - - The variable used in the string representation of `p` can be modified, - using the `variable` parameter: - - >>> p = np.poly1d([1,2,3], variable='z') - >>> print p - 2 - 1 z + 2 z + 3 - - Construct a polynomial from its roots: - - >>> np.poly1d([1, 2], True) - poly1d([ 1, -3, 2]) - - This is the same polynomial as obtained by: - - >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) - poly1d([ 1, -3, 2]) - - """ - coeffs = None - order = None - variable = None - def __init__(self, c_or_r, r=0, variable=None): - if isinstance(c_or_r, poly1d): - for key in c_or_r.__dict__.keys(): - self.__dict__[key] = c_or_r.__dict__[key] - if variable is not None: - self.__dict__['variable'] = variable - return - if r: - c_or_r = poly(c_or_r) - c_or_r = atleast_1d(c_or_r) - if len(c_or_r.shape) > 1: - raise ValueError, "Polynomial must be 1d only." - c_or_r = trim_zeros(c_or_r, trim='f') - if len(c_or_r) == 0: - c_or_r = NX.array([0.]) - self.__dict__['coeffs'] = c_or_r - self.__dict__['order'] = len(c_or_r) - 1 - if variable is None: - variable = 'x' - self.__dict__['variable'] = variable - - def __array__(self, t=None): - if t: - return NX.asarray(self.coeffs, t) - else: - return NX.asarray(self.coeffs) - - def __repr__(self): - vals = repr(self.coeffs) - vals = vals[6:-1] - return "poly1d(%s)" % vals - - def __len__(self): - return self.order - - def __str__(self): - thestr = "0" - var = self.variable - - # Remove leading zeros - coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] - N = len(coeffs)-1 - - def fmt_float(q): - s = '%.4g' % q - if s.endswith('.0000'): - s = s[:-5] - return s - - for k in range(len(coeffs)): - if not iscomplex(coeffs[k]): - coefstr = fmt_float(real(coeffs[k])) - elif real(coeffs[k]) == 0: - coefstr = '%sj' % fmt_float(imag(coeffs[k])) - else: - coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), - fmt_float(imag(coeffs[k]))) - - power = (N-k) - if power == 0: - if coefstr != '0': - newstr = '%s' % (coefstr,) - else: - if k == 0: - newstr = '0' - else: - newstr = '' - elif power == 1: - if coefstr == '0': - newstr = '' - elif coefstr == 'b': - newstr = var - else: - newstr = '%s %s' % (coefstr, var) - else: - if coefstr == '0': - newstr = '' - elif coefstr == 'b': - newstr = '%s**%d' % (var, power,) - else: - newstr = '%s %s**%d' % (coefstr, var, power) - - if k > 0: - if newstr != '': - if newstr.startswith('-'): - thestr = "%s - %s" % (thestr, newstr[1:]) - else: - thestr = "%s + %s" % (thestr, newstr) - else: - thestr = newstr - return _raise_power(thestr) - - - def __call__(self, val): - return polyval(self.coeffs, val) - - def __neg__(self): - return poly1d(-self.coeffs) - - def __pos__(self): - return self - - def __mul__(self, other): - if isscalar(other): - return poly1d(self.coeffs * other) - else: - other = poly1d(other) - return poly1d(polymul(self.coeffs, other.coeffs)) - - def __rmul__(self, other): - if isscalar(other): - return poly1d(other * self.coeffs) - else: - other = poly1d(other) - return poly1d(polymul(self.coeffs, other.coeffs)) - - def __add__(self, other): - other = poly1d(other) - return poly1d(polyadd(self.coeffs, other.coeffs)) - - def __radd__(self, other): - other = poly1d(other) - return poly1d(polyadd(self.coeffs, other.coeffs)) - - def __pow__(self, val): - if not isscalar(val) or int(val) != val or val < 0: - raise ValueError, "Power to non-negative integers only." - res = [1] - for _ in range(val): - res = polymul(self.coeffs, res) - return poly1d(res) - - def __sub__(self, other): - other = poly1d(other) - return poly1d(polysub(self.coeffs, other.coeffs)) - - def __rsub__(self, other): - other = poly1d(other) - return poly1d(polysub(other.coeffs, self.coeffs)) - - def __div__(self, other): - if isscalar(other): - return poly1d(self.coeffs/other) - else: - other = poly1d(other) - return polydiv(self, other) - - __truediv__ = __div__ - - def __rdiv__(self, other): - if isscalar(other): - return poly1d(other/self.coeffs) - else: - other = poly1d(other) - return polydiv(other, self) - - __rtruediv__ = __rdiv__ - - def __eq__(self, other): - return NX.alltrue(self.coeffs == other.coeffs) - - def __ne__(self, other): - return NX.any(self.coeffs != other.coeffs) - - def __setattr__(self, key, val): - raise ValueError, "Attributes cannot be changed this way." - - def __getattr__(self, key): - if key in ['r', 'roots']: - return roots(self.coeffs) - elif key in ['c','coef','coefficients']: - return self.coeffs - elif key in ['o']: - return self.order - else: - try: - return self.__dict__[key] - except KeyError: - raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key)) - - def __getitem__(self, val): - ind = self.order - val - if val > self.order: - return 0 - if val < 0: - return 0 - return self.coeffs[ind] - - def __setitem__(self, key, val): - ind = self.order - key - if key < 0: - raise ValueError, "Does not support negative powers." - if key > self.order: - zr = NX.zeros(key-self.order, self.coeffs.dtype) - self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs)) - self.__dict__['order'] = key - ind = 0 - self.__dict__['coeffs'][ind] = val - return - - def __iter__(self): - return iter(self.coeffs) - - def integ(self, m=1, k=0): - """ - Return an antiderivative (indefinite integral) of this polynomial. - - Refer to `polyint` for full documentation. - - See Also - -------- - polyint : equivalent function - - """ - return poly1d(polyint(self.coeffs, m=m, k=k)) - - def deriv(self, m=1): - """ - Return a derivative of this polynomial. - - Refer to `polyder` for full documentation. - - See Also - -------- - polyder : equivalent function - - """ - return poly1d(polyder(self.coeffs, m=m)) - -# Stuff to do on module import - -warnings.simplefilter('always',RankWarning) diff --git a/numpy-1.6.2/numpy/lib/recfunctions.py b/numpy-1.6.2/numpy/lib/recfunctions.py deleted file mode 100644 index b3c210fff7..0000000000 --- a/numpy-1.6.2/numpy/lib/recfunctions.py +++ /dev/null @@ -1,995 +0,0 @@ -""" -Collection of utilities to manipulate structured arrays. - -Most of these functions were initially implemented by John Hunter for matplotlib. -They have been rewritten and extended for convenience. - - -""" - -import sys -import itertools -import numpy as np -import numpy.ma as ma -from numpy import ndarray, recarray -from numpy.ma import MaskedArray -from numpy.ma.mrecords import MaskedRecords -from numpy.lib._iotools import _is_string_like - -_check_fill_value = np.ma.core._check_fill_value - -__all__ = ['append_fields', - 'drop_fields', - 'find_duplicates', - 'get_fieldstructure', - 'join_by', - 'merge_arrays', - 'rec_append_fields', 'rec_drop_fields', 'rec_join', - 'recursive_fill_fields', 'rename_fields', - 'stack_arrays', - ] - - -def recursive_fill_fields(input, output): - """ - Fills fields from output with fields from input, - with support for nested structures. - - Parameters - ---------- - input : ndarray - Input array. - output : ndarray - Output array. - - Notes - ----- - * `output` should be at least the same size as `input` - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) - >>> b = np.zeros((3,), dtype=a.dtype) - >>> rfn.recursive_fill_fields(a, b) - array([(1, 10.0), (2, 20.0), (0, 0.0)], - dtype=[('A', '>> from numpy.lib import recfunctions as rfn - >>> rfn.get_names(np.empty((1,), dtype=int)) is None - True - >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) - ('A', 'B') - >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) - >>> rfn.get_names(adtype) - ('a', ('b', ('ba', 'bb'))) - """ - listnames = [] - names = adtype.names - for name in names: - current = adtype[name] - if current.names: - listnames.append((name, tuple(get_names(current)))) - else: - listnames.append(name) - return tuple(listnames) or None - - -def get_names_flat(adtype): - """ - Returns the field names of the input datatype as a tuple. Nested structure - are flattend beforehand. - - Parameters - ---------- - adtype : dtype - Input datatype - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None - True - >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) - ('A', 'B') - >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) - >>> rfn.get_names_flat(adtype) - ('a', 'b', 'ba', 'bb') - """ - listnames = [] - names = adtype.names - for name in names: - listnames.append(name) - current = adtype[name] - if current.names: - listnames.extend(get_names_flat(current)) - return tuple(listnames) or None - - -def flatten_descr(ndtype): - """ - Flatten a structured data-type description. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) - (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) - - """ - names = ndtype.names - if names is None: - return ndtype.descr - else: - descr = [] - for field in names: - (typ, _) = ndtype.fields[field] - if typ.names: - descr.extend(flatten_descr(typ)) - else: - descr.append((field, typ)) - return tuple(descr) - - -def zip_descr(seqarrays, flatten=False): - """ - Combine the dtype description of a series of arrays. - - Parameters - ---------- - seqarrays : sequence of arrays - Sequence of arrays - flatten : {boolean}, optional - Whether to collapse nested descriptions. - """ - newdtype = [] - if flatten: - for a in seqarrays: - newdtype.extend(flatten_descr(a.dtype)) - else: - for a in seqarrays: - current = a.dtype - names = current.names or () - if len(names) > 1: - newdtype.append(('', current.descr)) - else: - newdtype.extend(current.descr) - return np.dtype(newdtype).descr - - -def get_fieldstructure(adtype, lastname=None, parents=None,): - """ - Returns a dictionary with fields as keys and a list of parent fields as values. - - This function is used to simplify access to fields nested in other fields. - - Parameters - ---------- - adtype : np.dtype - Input datatype - lastname : optional - Last processed field name (used internally during recursion). - parents : dictionary - Dictionary of parent fields (used interbally during recursion). - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = np.dtype([('A', int), - ... ('B', [('BA', int), - ... ('BB', [('BBA', int), ('BBB', int)])])]) - >>> rfn.get_fieldstructure(ndtype) - ... # XXX: possible regression, order of BBA and BBB is swapped - {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} - - """ - if parents is None: - parents = {} - names = adtype.names - for name in names: - current = adtype[name] - if current.names: - if lastname: - parents[name] = [lastname, ] - else: - parents[name] = [] - parents.update(get_fieldstructure(current, name, parents)) - else: - lastparent = [_ for _ in (parents.get(lastname, []) or [])] - if lastparent: -# if (lastparent[-1] != lastname): - lastparent.append(lastname) - elif lastname: - lastparent = [lastname, ] - parents[name] = lastparent or [] - return parents or None - - -def _izip_fields_flat(iterable): - """ - Returns an iterator of concatenated fields from a sequence of arrays, - collapsing any nested structure. - """ - for element in iterable: - if isinstance(element, np.void): - for f in _izip_fields_flat(tuple(element)): - yield f - else: - yield element - - -def _izip_fields(iterable): - """ - Returns an iterator of concatenated fields from a sequence of arrays. - """ - for element in iterable: - if hasattr(element, '__iter__') and not isinstance(element, basestring): - for f in _izip_fields(element): - yield f - elif isinstance(element, np.void) and len(tuple(element)) == 1: - for f in _izip_fields(element): - yield f - else: - yield element - - -def izip_records(seqarrays, fill_value=None, flatten=True): - """ - Returns an iterator of concatenated items from a sequence of arrays. - - Parameters - ---------- - seqarray : sequence of arrays - Sequence of arrays. - fill_value : {None, integer} - Value used to pad shorter iterables. - flatten : {True, False}, - Whether to - """ - # OK, that's a complete ripoff from Python2.6 itertools.izip_longest - def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop): - "Yields the fill_value or raises IndexError" - yield counter() - # - fillers = itertools.repeat(fill_value) - iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays] - # Should we flatten the items, or just use a nested approach - if flatten: - zipfunc = _izip_fields_flat - else: - zipfunc = _izip_fields - # - try: - for tup in itertools.izip(*iters): - yield tuple(zipfunc(tup)) - except IndexError: - pass - - -def _fix_output(output, usemask=True, asrecarray=False): - """ - Private function: return a recarray, a ndarray, a MaskedArray - or a MaskedRecords depending on the input parameters - """ - if not isinstance(output, MaskedArray): - usemask = False - if usemask: - if asrecarray: - output = output.view(MaskedRecords) - else: - output = ma.filled(output) - if asrecarray: - output = output.view(recarray) - return output - - -def _fix_defaults(output, defaults=None): - """ - Update the fill_value and masked data of `output` - from the default given in a dictionary defaults. - """ - names = output.dtype.names - (data, mask, fill_value) = (output.data, output.mask, output.fill_value) - for (k, v) in (defaults or {}).iteritems(): - if k in names: - fill_value[k] = v - data[k][mask[k]] = v - return output - - - -def merge_arrays(seqarrays, - fill_value= -1, flatten=False, usemask=False, asrecarray=False): - """ - Merge arrays field by field. - - Parameters - ---------- - seqarrays : sequence of ndarrays - Sequence of arrays - fill_value : {float}, optional - Filling value used to pad missing data on the shorter arrays. - flatten : {False, True}, optional - Whether to collapse nested fields. - usemask : {False, True}, optional - Whether to return a masked array or not. - asrecarray : {False, True}, optional - Whether to return a recarray (MaskedRecords) or not. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) - masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)], - mask = [(False, False) (False, False) (True, False)], - fill_value = (999999, 1e+20), - dtype = [('f0', '>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])), - ... usemask=False) - array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]), - ... np.array([10., 20., 30.])), - ... usemask=False, asrecarray=True) - rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('a', '>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - >>> rfn.drop_fields(a, 'a') - array([((2.0, 3),), ((5.0, 6),)], - dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') - array([(1, (3,)), (4, (6,))], - dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) - array([(1,), (4,)], - dtype=[('a', '>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], - ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) - >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) - array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))], - dtype=[('A', ' 1: - data = merge_arrays(data, flatten=True, usemask=usemask, - fill_value=fill_value) - else: - data = data.pop() - # - output = ma.masked_all(max(len(base), len(data)), - dtype=base.dtype.descr + data.dtype.descr) - output = recursive_fill_fields(base, output) - output = recursive_fill_fields(data, output) - # - return _fix_output(output, usemask=usemask, asrecarray=asrecarray) - - - -def rec_append_fields(base, names, data, dtypes=None): - """ - Add new fields to an existing array. - - The names of the fields are given with the `names` arguments, - the corresponding values with the `data` arguments. - If a single field is appended, `names`, `data` and `dtypes` do not have - to be lists but just values. - - Parameters - ---------- - base : array - Input array to extend. - names : string, sequence - String or sequence of strings corresponding to the names - of the new fields. - data : array or sequence of arrays - Array or sequence of arrays storing the fields to add to the base. - dtypes : sequence of datatypes, optional - Datatype or sequence of datatypes. - If None, the datatypes are estimated from the `data`. - - See Also - -------- - append_fields - - Returns - ------- - appended_array : np.recarray - """ - return append_fields(base, names, data=data, dtypes=dtypes, - asrecarray=True, usemask=False) - - - -def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, - autoconvert=False): - """ - Superposes arrays fields by fields - - Parameters - ---------- - seqarrays : array or sequence - Sequence of input arrays. - defaults : dictionary, optional - Dictionary mapping field names to the corresponding default values. - usemask : {True, False}, optional - Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`) - or a ndarray. - asrecarray : {False, True}, optional - Whether to return a recarray (or MaskedRecords if `usemask==True`) or - just a flexible-type ndarray. - autoconvert : {False, True}, optional - Whether automatically cast the type of the field to the maximum. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> x = np.array([1, 2,]) - >>> rfn.stack_arrays(x) is x - True - >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) - >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) - >>> test = rfn.stack_arrays((z,zz)) - >>> test - masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) - ('c', 30.0, 300.0)], - mask = [(False, False, True) (False, False, True) (False, False, False) - (False, False, False) (False, False, False)], - fill_value = ('N/A', 1e+20, 1e+20), - dtype = [('A', '|S3'), ('B', ' np.dtype(current_descr[-1]): - current_descr = list(current_descr) - current_descr[-1] = descr[1] - newdescr[nameidx] = tuple(current_descr) - elif descr[1] != current_descr[-1]: - raise TypeError("Incompatible type '%s' <> '%s'" % \ - (dict(newdescr)[name], descr[1])) - # Only one field: use concatenate - if len(newdescr) == 1: - output = ma.concatenate(seqarrays) - else: - # - output = ma.masked_all((np.sum(nrecords),), newdescr) - offset = np.cumsum(np.r_[0, nrecords]) - seen = [] - for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): - names = a.dtype.names - if names is None: - output['f%i' % len(seen)][i:j] = a - else: - for name in n: - output[name][i:j] = a[name] - if name not in seen: - seen.append(name) - # - return _fix_output(_fix_defaults(output, defaults), - usemask=usemask, asrecarray=asrecarray) - - - -def find_duplicates(a, key=None, ignoremask=True, return_index=False): - """ - Find the duplicates in a structured array along a given key - - Parameters - ---------- - a : array-like - Input array - key : {string, None}, optional - Name of the fields along which to check the duplicates. - If None, the search is performed by records - ignoremask : {True, False}, optional - Whether masked data should be discarded or considered as duplicates. - return_index : {False, True}, optional - Whether to return the indices of the duplicated values. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = [('a', int)] - >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], - ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) - ... # XXX: judging by the output, the ignoremask flag has no effect - """ - a = np.asanyarray(a).ravel() - # Get a dictionary of fields - fields = get_fieldstructure(a.dtype) - # Get the sorting data (by selecting the corresponding field) - base = a - if key: - for f in fields[key]: - base = base[f] - base = base[key] - # Get the sorting indices and the sorted data - sortidx = base.argsort() - sortedbase = base[sortidx] - sorteddata = sortedbase.filled() - # Compare the sorting data - flag = (sorteddata[:-1] == sorteddata[1:]) - # If masked data must be ignored, set the flag to false where needed - if ignoremask: - sortedmask = sortedbase.recordmask - flag[sortedmask[1:]] = False - flag = np.concatenate(([False], flag)) - # We need to take the point on the left as well (else we're missing it) - flag[:-1] = flag[:-1] + flag[1:] - duplicates = a[sortidx][flag] - if return_index: - return (duplicates, sortidx[flag]) - else: - return duplicates - - - -def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', - defaults=None, usemask=True, asrecarray=False): - """ - Join arrays `r1` and `r2` on key `key`. - - The key should be either a string or a sequence of string corresponding - to the fields used to join the array. - An exception is raised if the `key` field cannot be found in the two input - arrays. - Neither `r1` nor `r2` should have any duplicates along `key`: the presence - of duplicates will make the output quite unreliable. Note that duplicates - are not looked for by the algorithm. - - Parameters - ---------- - key : {string, sequence} - A string or a sequence of strings corresponding to the fields used - for comparison. - r1, r2 : arrays - Structured arrays. - jointype : {'inner', 'outer', 'leftouter'}, optional - If 'inner', returns the elements common to both r1 and r2. - If 'outer', returns the common elements as well as the elements of r1 - not in r2 and the elements of not in r2. - If 'leftouter', returns the common elements and the elements of r1 not - in r2. - r1postfix : string, optional - String appended to the names of the fields of r1 that are present in r2 - but absent of the key. - r2postfix : string, optional - String appended to the names of the fields of r2 that are present in r1 - but absent of the key. - defaults : {dictionary}, optional - Dictionary mapping field names to the corresponding default values. - usemask : {True, False}, optional - Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`) - or a ndarray. - asrecarray : {False, True}, optional - Whether to return a recarray (or MaskedRecords if `usemask==True`) or - just a flexible-type ndarray. - - Notes - ----- - * The output is sorted along the key. - * A temporary array is formed by dropping the fields not in the key for the - two arrays and concatenating the result. This array is then sorted, and - the common entries selected. The output is constructed by filling the fields - with the selected entries. Matching is not preserved if there are some - duplicates... - - """ - # Check jointype - if jointype not in ('inner', 'outer', 'leftouter'): - raise ValueError("The 'jointype' argument should be in 'inner', "\ - "'outer' or 'leftouter' (got '%s' instead)" % jointype) - # If we have a single key, put it in a tuple - if isinstance(key, basestring): - key = (key,) - - # Check the keys - for name in key: - if name not in r1.dtype.names: - raise ValueError('r1 does not have key field %s' % name) - if name not in r2.dtype.names: - raise ValueError('r2 does not have key field %s' % name) - - # Make sure we work with ravelled arrays - r1 = r1.ravel() - r2 = r2.ravel() - (nb1, nb2) = (len(r1), len(r2)) - (r1names, r2names) = (r1.dtype.names, r2.dtype.names) - - # Make temporary arrays of just the keys - r1k = drop_fields(r1, [n for n in r1names if n not in key]) - r2k = drop_fields(r2, [n for n in r2names if n not in key]) - - # Concatenate the two arrays for comparison - aux = ma.concatenate((r1k, r2k)) - idx_sort = aux.argsort(order=key) - aux = aux[idx_sort] - # - # Get the common keys - flag_in = ma.concatenate(([False], aux[1:] == aux[:-1])) - flag_in[:-1] = flag_in[1:] + flag_in[:-1] - idx_in = idx_sort[flag_in] - idx_1 = idx_in[(idx_in < nb1)] - idx_2 = idx_in[(idx_in >= nb1)] - nb1 - (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) - if jointype == 'inner': - (r1spc, r2spc) = (0, 0) - elif jointype == 'outer': - idx_out = idx_sort[~flag_in] - idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) - idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) - (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) - elif jointype == 'leftouter': - idx_out = idx_sort[~flag_in] - idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) - (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) - # Select the entries from each input - (s1, s2) = (r1[idx_1], r2[idx_2]) - # - # Build the new description of the output array ....... - # Start with the key fields - ndtype = [list(_) for _ in r1k.dtype.descr] - # Add the other fields - ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key) - # Find the new list of names (it may be different from r1names) - names = list(_[0] for _ in ndtype) - for desc in r2.dtype.descr: - desc = list(desc) - name = desc[0] - # Have we seen the current name already ? - if name in names: - nameidx = names.index(name) - current = ndtype[nameidx] - # The current field is part of the key: take the largest dtype - if name in key: - current[-1] = max(desc[1], current[-1]) - # The current field is not part of the key: add the suffixes - else: - current[0] += r1postfix - desc[0] += r2postfix - ndtype.insert(nameidx + 1, desc) - #... we haven't: just add the description to the current list - else: - names.extend(desc[0]) - ndtype.append(desc) - # Revert the elements to tuples - ndtype = [tuple(_) for _ in ndtype] - # Find the largest nb of common fields : r1cmn and r2cmn should be equal, but... - cmn = max(r1cmn, r2cmn) - # Construct an empty array - output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) - names = output.dtype.names - for f in r1names: - selected = s1[f] - if f not in names: - f += r1postfix - current = output[f] - current[:r1cmn] = selected[:r1cmn] - if jointype in ('outer', 'leftouter'): - current[cmn:cmn + r1spc] = selected[r1cmn:] - for f in r2names: - selected = s2[f] - if f not in names: - f += r2postfix - current = output[f] - current[:r2cmn] = selected[:r2cmn] - if (jointype == 'outer') and r2spc: - current[-r2spc:] = selected[r2cmn:] - # Sort and finalize the output - output.sort(order=key) - kwargs = dict(usemask=usemask, asrecarray=asrecarray) - return _fix_output(_fix_defaults(output, defaults), **kwargs) - - -def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', - defaults=None): - """ - Join arrays `r1` and `r2` on keys. - Alternative to join_by, that always returns a np.recarray. - - See Also - -------- - join_by : equivalent function - """ - kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, - defaults=defaults, usemask=False, asrecarray=True) - return join_by(key, r1, r2, **kwargs) diff --git a/numpy-1.6.2/numpy/lib/scimath.py b/numpy-1.6.2/numpy/lib/scimath.py deleted file mode 100644 index 48ed1dc25c..0000000000 --- a/numpy-1.6.2/numpy/lib/scimath.py +++ /dev/null @@ -1,559 +0,0 @@ -""" -Wrapper functions to more user-friendly calling of certain math functions -whose output data-type is different than the input data-type in certain -domains of the input. - -For example, for functions like `log` with branch cuts, the versions in this -module provide the mathematically valid answers in the complex plane:: - - >>> import math - >>> from numpy.lib import scimath - >>> scimath.log(-math.exp(1)) == (1+1j*math.pi) - True - -Similarly, `sqrt`, other base logarithms, `power` and trig functions are -correctly handled. See their respective docstrings for specific examples. - -""" - -__all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos', - 'arcsin', 'arctanh'] - -import numpy.core.numeric as nx -import numpy.core.numerictypes as nt -from numpy.core.numeric import asarray, any -from numpy.lib.type_check import isreal - -_ln2 = nx.log(2.0) - -def _tocomplex(arr): - """Convert its input `arr` to a complex array. - - The input is returned as a complex array of the smallest type that will fit - the original data: types like single, byte, short, etc. become csingle, - while others become cdouble. - - A copy of the input is always made. - - Parameters - ---------- - arr : array - - Returns - ------- - array - An array with the same input data as the input but in complex form. - - Examples - -------- - - First, consider an input of type short: - - >>> a = np.array([1,2,3],np.short) - - >>> ac = np.lib.scimath._tocomplex(a); ac - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - - >>> ac.dtype - dtype('complex64') - - If the input is of type double, the output is correspondingly of the - complex double type as well: - - >>> b = np.array([1,2,3],np.double) - - >>> bc = np.lib.scimath._tocomplex(b); bc - array([ 1.+0.j, 2.+0.j, 3.+0.j]) - - >>> bc.dtype - dtype('complex128') - - Note that even if the input was complex to begin with, a copy is still - made, since the astype() method always copies: - - >>> c = np.array([1,2,3],np.csingle) - - >>> cc = np.lib.scimath._tocomplex(c); cc - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - - >>> c *= 2; c - array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) - - >>> cc - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - """ - if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, - nt.ushort,nt.csingle)): - return arr.astype(nt.csingle) - else: - return arr.astype(nt.cdouble) - -def _fix_real_lt_zero(x): - """Convert `x` to complex if it has real, negative components. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_real_lt_zero([1,2]) - array([1, 2]) - - >>> np.lib.scimath._fix_real_lt_zero([-1,2]) - array([-1.+0.j, 2.+0.j]) - """ - x = asarray(x) - if any(isreal(x) & (x<0)): - x = _tocomplex(x) - return x - -def _fix_int_lt_zero(x): - """Convert `x` to double if it has real, negative components. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_int_lt_zero([1,2]) - array([1, 2]) - - >>> np.lib.scimath._fix_int_lt_zero([-1,2]) - array([-1., 2.]) - """ - x = asarray(x) - if any(isreal(x) & (x < 0)): - x = x * 1.0 - return x - -def _fix_real_abs_gt_1(x): - """Convert `x` to complex if it has real components x_i with abs(x_i)>1. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) - array([0, 1]) - - >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) - array([ 0.+0.j, 2.+0.j]) - """ - x = asarray(x) - if any(isreal(x) & (abs(x)>1)): - x = _tocomplex(x) - return x - -def sqrt(x): - """ - Compute the square root of x. - - For negative input elements, a complex value is returned - (unlike `numpy.sqrt` which returns NaN). - - Parameters - ---------- - x : array_like - The input value(s). - - Returns - ------- - out : ndarray or scalar - The square root of `x`. If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.sqrt - - Examples - -------- - For real, non-negative inputs this works just like `numpy.sqrt`: - - >>> np.lib.scimath.sqrt(1) - 1.0 - >>> np.lib.scimath.sqrt([1, 4]) - array([ 1., 2.]) - - But it automatically handles negative inputs: - - >>> np.lib.scimath.sqrt(-1) - (0.0+1.0j) - >>> np.lib.scimath.sqrt([-1,4]) - array([ 0.+1.j, 2.+0.j]) - - """ - x = _fix_real_lt_zero(x) - return nx.sqrt(x) - -def log(x): - """ - Compute the natural logarithm of `x`. - - Return the "principal value" (for a description of this, see `numpy.log`) - of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` - returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the - complex principle value is returned. - - Parameters - ---------- - x : array_like - The value(s) whose log is (are) required. - - Returns - ------- - out : ndarray or scalar - The log of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.log - - Notes - ----- - For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` - (note, however, that otherwise `numpy.log` and this `log` are identical, - i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, - notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - >>> np.emath.log(np.exp(1)) - 1.0 - - Negative arguments are handled "correctly" (recall that - ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): - - >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) - True - - """ - x = _fix_real_lt_zero(x) - return nx.log(x) - -def log10(x): - """ - Compute the logarithm base 10 of `x`. - - Return the "principal value" (for a description of this, see - `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this - is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` - returns ``inf``). Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose log base 10 is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array object is returned. - - See Also - -------- - numpy.log10 - - Notes - ----- - For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` - (note, however, that otherwise `numpy.log10` and this `log10` are - identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, - and, notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - - (We set the printing precision so the example can be auto-tested) - - >>> np.set_printoptions(precision=4) - - >>> np.emath.log10(10**1) - 1.0 - - >>> np.emath.log10([-10**1, -10**2, 10**2]) - array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - return nx.log10(x) - -def logn(n, x): - """ - Take log base n of x. - - If `x` contains negative inputs, the answer is computed and returned in the - complex domain. - - Parameters - ---------- - n : int - The base in which the log is taken. - x : array_like - The value(s) whose log base `n` is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base `n` of the `x` value(s). If `x` was a scalar, so is - `out`, otherwise an array is returned. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.lib.scimath.logn(2, [4, 8]) - array([ 2., 3.]) - >>> np.lib.scimath.logn(2, [-4, -8, 8]) - array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - n = _fix_real_lt_zero(n) - return nx.log(x)/nx.log(n) - -def log2(x): - """ - Compute the logarithm base 2 of `x`. - - Return the "principal value" (for a description of this, see - `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is - a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns - ``inf``). Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like - The value(s) whose log base 2 is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.log2 - - Notes - ----- - For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` - (note, however, that otherwise `numpy.log2` and this `log2` are - identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, - and, notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - We set the printing precision so the example can be auto-tested: - - >>> np.set_printoptions(precision=4) - - >>> np.emath.log2(8) - 3.0 - >>> np.emath.log2([-4, -8, 8]) - array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - return nx.log2(x) - -def power(x, p): - """ - Return x to the power p, (x**p). - - If `x` contains negative values, the output is converted to the - complex domain. - - Parameters - ---------- - x : array_like - The input value(s). - p : array_like of ints - The power(s) to which `x` is raised. If `x` contains multiple values, - `p` has to either be a scalar, or contain the same number of values - as `x`. In the latter case, the result is - ``x[0]**p[0], x[1]**p[1], ...``. - - Returns - ------- - out : ndarray or scalar - The result of ``x**p``. If `x` and `p` are scalars, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.power - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.lib.scimath.power([2, 4], 2) - array([ 4, 16]) - >>> np.lib.scimath.power([2, 4], -2) - array([ 0.25 , 0.0625]) - >>> np.lib.scimath.power([-2, 4], 2) - array([ 4.+0.j, 16.+0.j]) - - """ - x = _fix_real_lt_zero(x) - p = _fix_int_lt_zero(p) - return nx.power(x, p) - -def arccos(x): - """ - Compute the inverse cosine of x. - - Return the "principal value" (for a description of this, see - `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that - `abs(x) <= 1`, this is a real number in the closed interval - :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose arccos is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so - is `out`, otherwise an array object is returned. - - See Also - -------- - numpy.arccos - - Notes - ----- - For an arccos() that returns ``NAN`` when real `x` is not in the - interval ``[-1,1]``, use `numpy.arccos`. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.emath.arccos(1) # a scalar is returned - 0.0 - - >>> np.emath.arccos([1,2]) - array([ 0.-0.j , 0.+1.317j]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arccos(x) - -def arcsin(x): - """ - Compute the inverse sine of x. - - Return the "principal value" (for a description of this, see - `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that - `abs(x) <= 1`, this is a real number in the closed interval - :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is - returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose arcsin is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse sine(s) of the `x` value(s). If `x` was a scalar, so - is `out`, otherwise an array object is returned. - - See Also - -------- - numpy.arcsin - - Notes - ----- - For an arcsin() that returns ``NAN`` when real `x` is not in the - interval ``[-1,1]``, use `numpy.arcsin`. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.emath.arcsin(0) - 0.0 - - >>> np.emath.arcsin([0,1]) - array([ 0. , 1.5708]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arcsin(x) - -def arctanh(x): - """ - Compute the inverse hyperbolic tangent of `x`. - - Return the "principal value" (for a description of this, see - `numpy.arctanh`) of `arctanh(x)`. For real `x` such that - `abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is - complex, the result is complex. Finally, `x = 1` returns``inf`` and - `x=-1` returns ``-inf``. - - Parameters - ---------- - x : array_like - The value(s) whose arctanh is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was - a scalar so is `out`, otherwise an array is returned. - - - See Also - -------- - numpy.arctanh - - Notes - ----- - For an arctanh() that returns ``NAN`` when real `x` is not in the - interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does - return +/-inf for `x = +/-1`). - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.emath.arctanh(np.matrix(np.eye(2))) - array([[ Inf, 0.], - [ 0., Inf]]) - >>> np.emath.arctanh([1j]) - array([ 0.+0.7854j]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arctanh(x) diff --git a/numpy-1.6.2/numpy/lib/setup.py b/numpy-1.6.2/numpy/lib/setup.py deleted file mode 100644 index e85fdb517d..0000000000 --- a/numpy-1.6.2/numpy/lib/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('lib',parent_package,top_path) - - config.add_include_dirs(join('..','core','include')) - - - config.add_extension('_compiled_base', - sources=[join('src','_compiled_base.c')] - ) - - config.add_data_dir('benchmarks') - config.add_data_dir('tests') - - return config - -if __name__=='__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/lib/setupscons.py b/numpy-1.6.2/numpy/lib/setupscons.py deleted file mode 100644 index 4f31f6e8ab..0000000000 --- a/numpy-1.6.2/numpy/lib/setupscons.py +++ /dev/null @@ -1,16 +0,0 @@ -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('lib',parent_package,top_path) - - config.add_sconscript('SConstruct', - source_files = [join('src', '_compiled_base.c')]) - config.add_data_dir('tests') - - return config - -if __name__=='__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/lib/shape_base.py b/numpy-1.6.2/numpy/lib/shape_base.py deleted file mode 100644 index 5ea2648cbe..0000000000 --- a/numpy-1.6.2/numpy/lib/shape_base.py +++ /dev/null @@ -1,839 +0,0 @@ -__all__ = ['column_stack','row_stack', 'dstack','array_split','split','hsplit', - 'vsplit','dsplit','apply_over_axes','expand_dims', - 'apply_along_axis', 'kron', 'tile', 'get_array_wrap'] - -import numpy.core.numeric as _nx -from numpy.core.numeric import asarray, zeros, newaxis, outer, \ - concatenate, isscalar, array, asanyarray -from numpy.core.fromnumeric import product, reshape -from numpy.core import hstack, vstack, atleast_3d - -def apply_along_axis(func1d,axis,arr,*args): - """ - Apply a function to 1-D slices along the given axis. - - Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a` - is a 1-D slice of `arr` along `axis`. - - Parameters - ---------- - func1d : function - This function should accept 1-D arrays. It is applied to 1-D - slices of `arr` along the specified axis. - axis : integer - Axis along which `arr` is sliced. - arr : ndarray - Input array. - args : any - Additional arguments to `func1d`. - - Returns - ------- - outarr : ndarray - The output array. The shape of `outarr` is identical to the shape of - `arr`, except along the `axis` dimension, where the length of `outarr` - is equal to the size of the return value of `func1d`. If `func1d` - returns a scalar `outarr` will have one fewer dimensions than `arr`. - - See Also - -------- - apply_over_axes : Apply a function repeatedly over multiple axes. - - Examples - -------- - >>> def my_func(a): - ... \"\"\"Average first and last element of a 1-D array\"\"\" - ... return (a[0] + a[-1]) * 0.5 - >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) - >>> np.apply_along_axis(my_func, 0, b) - array([ 4., 5., 6.]) - >>> np.apply_along_axis(my_func, 1, b) - array([ 2., 5., 8.]) - - For a function that doesn't return a scalar, the number of dimensions in - `outarr` is the same as `arr`. - - >>> def new_func(a): - ... \"\"\"Divide elements of a by 2.\"\"\" - ... return a * 0.5 - >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) - >>> np.apply_along_axis(new_func, 0, b) - array([[ 0.5, 1. , 1.5], - [ 2. , 2.5, 3. ], - [ 3.5, 4. , 4.5]]) - - """ - arr = asarray(arr) - nd = arr.ndim - if axis < 0: - axis += nd - if (axis >= nd): - raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." - % (axis,nd)) - ind = [0]*(nd-1) - i = zeros(nd,'O') - indlist = range(nd) - indlist.remove(axis) - i[axis] = slice(None,None) - outshape = asarray(arr.shape).take(indlist) - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())],*args) - # if res is a number, then we have a smaller output array - if isscalar(res): - outarr = zeros(outshape,asarray(res).dtype) - outarr[tuple(ind)] = res - Ntot = product(outshape) - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= outshape[n]) and (n > (1-nd)): - ind[n-1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist,ind) - res = func1d(arr[tuple(i.tolist())],*args) - outarr[tuple(ind)] = res - k += 1 - return outarr - else: - Ntot = product(outshape) - holdshape = outshape - outshape = list(arr.shape) - outshape[axis] = len(res) - outarr = zeros(outshape,asarray(res).dtype) - outarr[tuple(i.tolist())] = res - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= holdshape[n]) and (n > (1-nd)): - ind[n-1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())],*args) - outarr[tuple(i.tolist())] = res - k += 1 - return outarr - - -def apply_over_axes(func, a, axes): - """ - Apply a function repeatedly over multiple axes. - - `func` is called as `res = func(a, axis)`, where `axis` is the first - element of `axes`. The result `res` of the function call must have - either the same dimensions as `a` or one less dimension. If `res` - has one less dimension than `a`, a dimension is inserted before - `axis`. The call to `func` is then repeated for each axis in `axes`, - with `res` as the first argument. - - Parameters - ---------- - func : function - This function must take two arguments, `func(a, axis)`. - a : array_like - Input array. - axes : array_like - Axes over which `func` is applied; the elements must be integers. - - Returns - ------- - val : ndarray - The output array. The number of dimensions is the same as `a`, - but the shape can be different. This depends on whether `func` - changes the shape of its output with respect to its input. - - See Also - -------- - apply_along_axis : - Apply a function to 1-D slices of an array along the given axis. - - Examples - -------- - >>> a = np.arange(24).reshape(2,3,4) - >>> a - array([[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23]]]) - - Sum over axes 0 and 2. The result has same number of dimensions - as the original array: - - >>> np.apply_over_axes(np.sum, a, [0,2]) - array([[[ 60], - [ 92], - [124]]]) - - """ - val = asarray(a) - N = a.ndim - if array(axes).ndim == 0: - axes = (axes,) - for axis in axes: - if axis < 0: axis = N + axis - args = (val, axis) - res = func(*args) - if res.ndim == val.ndim: - val = res - else: - res = expand_dims(res,axis) - if res.ndim == val.ndim: - val = res - else: - raise ValueError, "function is not returning"\ - " an array of correct shape" - return val - -def expand_dims(a, axis): - """ - Expand the shape of an array. - - Insert a new axis, corresponding to a given position in the array shape. - - Parameters - ---------- - a : array_like - Input array. - axis : int - Position (amongst axes) where new axis is to be inserted. - - Returns - ------- - res : ndarray - Output array. The number of dimensions is one greater than that of - the input array. - - See Also - -------- - doc.indexing, atleast_1d, atleast_2d, atleast_3d - - Examples - -------- - >>> x = np.array([1,2]) - >>> x.shape - (2,) - - The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``: - - >>> y = np.expand_dims(x, axis=0) - >>> y - array([[1, 2]]) - >>> y.shape - (1, 2) - - >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis] - >>> y - array([[1], - [2]]) - >>> y.shape - (2, 1) - - Note that some examples may use ``None`` instead of ``np.newaxis``. These - are the same objects: - - >>> np.newaxis is None - True - - """ - a = asarray(a) - shape = a.shape - if axis < 0: - axis = axis + len(shape) + 1 - return a.reshape(shape[:axis] + (1,) + shape[axis:]) - -row_stack = vstack - -def column_stack(tup): - """ - Stack 1-D arrays as columns into a 2-D array. - - Take a sequence of 1-D arrays and stack them as columns - to make a single 2-D array. 2-D arrays are stacked as-is, - just like with `hstack`. 1-D arrays are turned into 2-D columns - first. - - Parameters - ---------- - tup : sequence of 1-D or 2-D arrays. - Arrays to stack. All of them must have the same first dimension. - - Returns - ------- - stacked : 2-D array - The array formed by stacking the given arrays. - - See Also - -------- - hstack, vstack, concatenate - - Notes - ----- - This function is equivalent to ``np.vstack(tup).T``. - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - arrays = [] - for v in tup: - arr = array(v,copy=False,subok=True) - if arr.ndim < 2: - arr = array(arr,copy=False,subok=True,ndmin=2).T - arrays.append(arr) - return _nx.concatenate(arrays,1) - -def dstack(tup): - """ - Stack arrays in sequence depth wise (along third axis). - - Takes a sequence of arrays and stack them along the third axis - to make a single array. Rebuilds arrays divided by `dsplit`. - This is a simple way to stack 2D arrays (images) into a single - 3D array for processing. - - Parameters - ---------- - tup : sequence of arrays - Arrays to stack. All of them must have the same shape along all - but the third axis. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays. - - See Also - -------- - vstack : Stack along first axis. - hstack : Stack along second axis. - concatenate : Join arrays. - dsplit : Split array along third axis. - - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=2)``. - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) - - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.dstack((a,b)) - array([[[1, 2]], - [[2, 3]], - [[3, 4]]]) - - """ - return _nx.concatenate(map(atleast_3d,tup),2) - -def _replace_zero_by_x_arrays(sub_arys): - for i in range(len(sub_arys)): - if len(_nx.shape(sub_arys[i])) == 0: - sub_arys[i] = _nx.array([]) - elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]),0)): - sub_arys[i] = _nx.array([]) - return sub_arys - -def array_split(ary,indices_or_sections,axis = 0): - """ - Split an array into multiple sub-arrays of equal or near-equal size. - - Please refer to the ``split`` documentation. The only difference - between these functions is that ``array_split`` allows - `indices_or_sections` to be an integer that does *not* equally - divide the axis. - - See Also - -------- - split : Split array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(8.0) - >>> np.array_split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] - - """ - try: - Ntotal = ary.shape[axis] - except AttributeError: - Ntotal = len(ary) - try: # handle scalar case. - Nsections = len(indices_or_sections) + 1 - div_points = [0] + list(indices_or_sections) + [Ntotal] - except TypeError: #indices_or_sections is a scalar, not an array. - Nsections = int(indices_or_sections) - if Nsections <= 0: - raise ValueError, 'number sections must be larger than 0.' - Neach_section,extras = divmod(Ntotal,Nsections) - section_sizes = [0] + \ - extras * [Neach_section+1] + \ - (Nsections-extras) * [Neach_section] - div_points = _nx.array(section_sizes).cumsum() - - sub_arys = [] - sary = _nx.swapaxes(ary,axis,0) - for i in range(Nsections): - st = div_points[i]; end = div_points[i+1] - sub_arys.append(_nx.swapaxes(sary[st:end],axis,0)) - - # there is a wierd issue with array slicing that allows - # 0x10 arrays and other such things. The following cluge is needed - # to get around this issue. - sub_arys = _replace_zero_by_x_arrays(sub_arys) - # end cluge. - - return sub_arys - -def split(ary,indices_or_sections,axis=0): - """ - Split an array into multiple sub-arrays of equal size. - - Parameters - ---------- - ary : ndarray - Array to be divided into sub-arrays. - indices_or_sections : int or 1-D array - If `indices_or_sections` is an integer, N, the array will be divided - into N equal arrays along `axis`. If such a split is not possible, - an error is raised. - - If `indices_or_sections` is a 1-D array of sorted integers, the entries - indicate where along `axis` the array is split. For example, - ``[2, 3]`` would, for ``axis=0``, result in - - - ary[:2] - - ary[2:3] - - ary[3:] - - If an index exceeds the dimension of the array along `axis`, - an empty sub-array is returned correspondingly. - axis : int, optional - The axis along which to split, default is 0. - - Returns - ------- - sub-arrays : list of ndarrays - A list of sub-arrays. - - Raises - ------ - ValueError - If `indices_or_sections` is given as an integer, but - a split does not result in equal division. - - See Also - -------- - array_split : Split an array into multiple sub-arrays of equal or - near-equal size. Does not raise an exception if - an equal division cannot be made. - hsplit : Split array into multiple sub-arrays horizontally (column-wise). - vsplit : Split array into multiple sub-arrays vertically (row wise). - dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). - concatenate : Join arrays together. - hstack : Stack arrays in sequence horizontally (column wise). - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - - Examples - -------- - >>> x = np.arange(9.0) - >>> np.split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])] - - >>> x = np.arange(8.0) - >>> np.split(x, [3, 5, 6, 10]) - [array([ 0., 1., 2.]), - array([ 3., 4.]), - array([ 5.]), - array([ 6., 7.]), - array([], dtype=float64)] - - """ - try: len(indices_or_sections) - except TypeError: - sections = indices_or_sections - N = ary.shape[axis] - if N % sections: - raise ValueError, 'array split does not result in an equal division' - res = array_split(ary,indices_or_sections,axis) - return res - -def hsplit(ary,indices_or_sections): - """ - Split an array into multiple sub-arrays horizontally (column-wise). - - Please refer to the `split` documentation. `hsplit` is equivalent - to `split` with ``axis=1``, the array is always split along the second - axis regardless of the array dimension. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(4, 4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) - >>> np.hsplit(x, 2) - [array([[ 0., 1.], - [ 4., 5.], - [ 8., 9.], - [ 12., 13.]]), - array([[ 2., 3.], - [ 6., 7.], - [ 10., 11.], - [ 14., 15.]])] - >>> np.hsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2.], - [ 4., 5., 6.], - [ 8., 9., 10.], - [ 12., 13., 14.]]), - array([[ 3.], - [ 7.], - [ 11.], - [ 15.]]), - array([], dtype=float64)] - - With a higher dimensional array the split is still along the second axis. - - >>> x = np.arange(8.0).reshape(2, 2, 2) - >>> x - array([[[ 0., 1.], - [ 2., 3.]], - [[ 4., 5.], - [ 6., 7.]]]) - >>> np.hsplit(x, 2) - [array([[[ 0., 1.]], - [[ 4., 5.]]]), - array([[[ 2., 3.]], - [[ 6., 7.]]])] - - """ - if len(_nx.shape(ary)) == 0: - raise ValueError, 'hsplit only works on arrays of 1 or more dimensions' - if len(ary.shape) > 1: - return split(ary,indices_or_sections,1) - else: - return split(ary,indices_or_sections,0) - -def vsplit(ary,indices_or_sections): - """ - Split an array into multiple sub-arrays vertically (row-wise). - - Please refer to the ``split`` documentation. ``vsplit`` is equivalent - to ``split`` with `axis=0` (default), the array is always split along the - first axis regardless of the array dimension. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(4, 4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) - >>> np.vsplit(x, 2) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]]), - array([[ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]])] - >>> np.vsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]]), - array([[ 12., 13., 14., 15.]]), - array([], dtype=float64)] - - With a higher dimensional array the split is still along the first axis. - - >>> x = np.arange(8.0).reshape(2, 2, 2) - >>> x - array([[[ 0., 1.], - [ 2., 3.]], - [[ 4., 5.], - [ 6., 7.]]]) - >>> np.vsplit(x, 2) - [array([[[ 0., 1.], - [ 2., 3.]]]), - array([[[ 4., 5.], - [ 6., 7.]]])] - - """ - if len(_nx.shape(ary)) < 2: - raise ValueError, 'vsplit only works on arrays of 2 or more dimensions' - return split(ary,indices_or_sections,0) - -def dsplit(ary,indices_or_sections): - """ - Split array into multiple sub-arrays along the 3rd axis (depth). - - Please refer to the `split` documentation. `dsplit` is equivalent - to `split` with ``axis=2``, the array is always split along the third - axis provided the array dimension is greater than or equal to 3. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(2, 2, 4) - >>> x - array([[[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]], - [[ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]]) - >>> np.dsplit(x, 2) - [array([[[ 0., 1.], - [ 4., 5.]], - [[ 8., 9.], - [ 12., 13.]]]), - array([[[ 2., 3.], - [ 6., 7.]], - [[ 10., 11.], - [ 14., 15.]]])] - >>> np.dsplit(x, np.array([3, 6])) - [array([[[ 0., 1., 2.], - [ 4., 5., 6.]], - [[ 8., 9., 10.], - [ 12., 13., 14.]]]), - array([[[ 3.], - [ 7.]], - [[ 11.], - [ 15.]]]), - array([], dtype=float64)] - - """ - if len(_nx.shape(ary)) < 3: - raise ValueError, 'vsplit only works on arrays of 3 or more dimensions' - return split(ary,indices_or_sections,2) - -def get_array_prepare(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None - """ - wrappers = [(getattr(x, '__array_priority__', 0), -i, - x.__array_prepare__) for i, x in enumerate(args) - if hasattr(x, '__array_prepare__')] - wrappers.sort() - if wrappers: - return wrappers[-1][-1] - return None - -def get_array_wrap(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None - """ - wrappers = [(getattr(x, '__array_priority__', 0), -i, - x.__array_wrap__) for i, x in enumerate(args) - if hasattr(x, '__array_wrap__')] - wrappers.sort() - if wrappers: - return wrappers[-1][-1] - return None - -def kron(a,b): - """ - Kronecker product of two arrays. - - Computes the Kronecker product, a composite array made of blocks of the - second array scaled by the first. - - Parameters - ---------- - a, b : array_like - - Returns - ------- - out : ndarray - - See Also - -------- - - outer : The outer product - - Notes - ----- - - The function assumes that the number of dimenensions of `a` and `b` - are the same, if necessary prepending the smallest with ones. - If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, - the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. - The elements are products of elements from `a` and `b`, organized - explicitly by:: - - kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] - - where:: - - kt = it * st + jt, t = 0,...,N - - In the common 2-D case (N=1), the block structure can be visualized:: - - [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], - [ ... ... ], - [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] - - - Examples - -------- - >>> np.kron([1,10,100], [5,6,7]) - array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) - >>> np.kron([5,6,7], [1,10,100]) - array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) - - >>> np.kron(np.eye(2), np.ones((2,2))) - array([[ 1., 1., 0., 0.], - [ 1., 1., 0., 0.], - [ 0., 0., 1., 1.], - [ 0., 0., 1., 1.]]) - - >>> a = np.arange(100).reshape((2,5,2,5)) - >>> b = np.arange(24).reshape((2,3,4)) - >>> c = np.kron(a,b) - >>> c.shape - (2, 10, 6, 20) - >>> I = (1,3,0,2) - >>> J = (0,2,1) - >>> J1 = (0,) + J # extend to ndim=4 - >>> S1 = (1,) + b.shape - >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) - >>> c[K] == a[I]*b[J] - True - - """ - b = asanyarray(b) - a = array(a,copy=False,subok=True,ndmin=b.ndim) - ndb, nda = b.ndim, a.ndim - if (nda == 0 or ndb == 0): - return _nx.multiply(a,b) - as_ = a.shape - bs = b.shape - if not a.flags.contiguous: - a = reshape(a, as_) - if not b.flags.contiguous: - b = reshape(b, bs) - nd = ndb - if (ndb != nda): - if (ndb > nda): - as_ = (1,)*(ndb-nda) + as_ - else: - bs = (1,)*(nda-ndb) + bs - nd = nda - result = outer(a,b).reshape(as_+bs) - axis = nd-1 - for _ in xrange(nd): - result = concatenate(result, axis=axis) - wrapper = get_array_prepare(a, b) - if wrapper is not None: - result = wrapper(result) - wrapper = get_array_wrap(a, b) - if wrapper is not None: - result = wrapper(result) - return result - - -def tile(A, reps): - """ - Construct an array by repeating A the number of times given by reps. - - If `reps` has length ``d``, the result will have dimension of - ``max(d, A.ndim)``. - - If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new - axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, - or shape (1, 1, 3) for 3-D replication. If this is not the desired - behavior, promote `A` to d-dimensions manually before calling this - function. - - If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. - Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as - (1, 1, 2, 2). - - Parameters - ---------- - A : array_like - The input array. - reps : array_like - The number of repetitions of `A` along each axis. - - Returns - ------- - c : ndarray - The tiled output array. - - See Also - -------- - repeat : Repeat elements of an array. - - Examples - -------- - >>> a = np.array([0, 1, 2]) - >>> np.tile(a, 2) - array([0, 1, 2, 0, 1, 2]) - >>> np.tile(a, (2, 2)) - array([[0, 1, 2, 0, 1, 2], - [0, 1, 2, 0, 1, 2]]) - >>> np.tile(a, (2, 1, 2)) - array([[[0, 1, 2, 0, 1, 2]], - [[0, 1, 2, 0, 1, 2]]]) - - >>> b = np.array([[1, 2], [3, 4]]) - >>> np.tile(b, 2) - array([[1, 2, 1, 2], - [3, 4, 3, 4]]) - >>> np.tile(b, (2, 1)) - array([[1, 2], - [3, 4], - [1, 2], - [3, 4]]) - - """ - try: - tup = tuple(reps) - except TypeError: - tup = (reps,) - d = len(tup) - c = _nx.array(A,copy=False,subok=True,ndmin=d) - shape = list(c.shape) - n = max(c.size,1) - if (d < c.ndim): - tup = (1,)*(c.ndim-d) + tup - for i, nrep in enumerate(tup): - if nrep!=1: - c = c.reshape(-1,n).repeat(nrep,0) - dim_in = shape[i] - dim_out = dim_in*nrep - shape[i] = dim_out - n /= max(dim_in,1) - return c.reshape(shape) diff --git a/numpy-1.6.2/numpy/lib/src/_compiled_base.c b/numpy-1.6.2/numpy/lib/src/_compiled_base.c deleted file mode 100644 index de6d8b2a8d..0000000000 --- a/numpy-1.6.2/numpy/lib/src/_compiled_base.c +++ /dev/null @@ -1,1521 +0,0 @@ -#include "Python.h" -#include "structmember.h" -#include "numpy/noprefix.h" -#include "numpy/npy_3kcompat.h" -#include "npy_config.h" - -static intp -incr_slot_(double x, double *bins, intp lbins) -{ - intp i; - - for ( i = 0; i < lbins; i ++ ) { - if ( x < bins [i] ) { - return i; - } - } - return lbins; -} - -static intp -decr_slot_(double x, double * bins, intp lbins) -{ - intp i; - - for ( i = lbins - 1; i >= 0; i -- ) { - if (x < bins [i]) { - return i + 1; - } - } - return 0; -} - -static int -monotonic_(double * a, int lena) -{ - int i; - - if (a [0] <= a [1]) { - /* possibly monotonic increasing */ - for (i = 1; i < lena - 1; i ++) { - if (a [i] > a [i + 1]) { - return 0; - } - } - return 1; - } - else { - /* possibly monotonic decreasing */ - for (i = 1; i < lena - 1; i ++) { - if (a [i] < a [i + 1]) { - return 0; - } - } - return -1; - } -} - - - -/* find the index of the maximum element of an integer array */ -static intp -mxx (intp *i , intp len) -{ - intp mx = 0, max = i[0]; - intp j; - - for ( j = 1; j < len; j ++ ) { - if ( i [j] > max ) { - max = i [j]; - mx = j; - } - } - return mx; -} - -/* find the index of the minimum element of an integer array */ -static intp -mnx (intp *i , intp len) -{ - intp mn = 0, min = i [0]; - intp j; - - for ( j = 1; j < len; j ++ ) - if ( i [j] < min ) - {min = i [j]; - mn = j;} - return mn; -} - - -/* - * arr_bincount is registered as bincount. - * - * bincount accepts one, two or three arguments. The first is an array of - * non-negative integers The second, if present, is an array of weights, - * which must be promotable to double. Call these arguments list and - * weight. Both must be one-dimensional with len(weight) == len(list). If - * weight is not present then bincount(list)[i] is the number of occurrences - * of i in list. If weight is present then bincount(self,list, weight)[i] - * is the sum of all weight[j] where list [j] == i. Self is not used. - * The third argument, if present, is a minimum length desired for the - * output array. - */ -static PyObject * -arr_bincount(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) -{ - PyArray_Descr *type; - PyObject *list = NULL, *weight=Py_None, *mlength=Py_None; - PyObject *lst=NULL, *ans=NULL, *wts=NULL; - intp *numbers, *ians, len , mxi, mni, ans_size, minlength; - int i; - double *weights , *dans; - static char *kwlist[] = {"list", "weights", "minlength", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO", - kwlist, &list, &weight, &mlength)) { - goto fail; - } - - lst = (PyArrayObject *)PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1); - if (lst == NULL) { - goto fail; - } - len = PyArray_SIZE(lst); - type = PyArray_DescrFromType(NPY_INTP); - - /* handle empty list */ - if (len < 1) { - if (mlength == Py_None) { - minlength = 0; - } - else if (!(minlength = PyArray_PyIntAsIntp(mlength))) { - goto fail; - } - if (!(ans = PyArray_Zeros(1, &minlength, type, 0))){ - goto fail; - } - Py_DECREF(lst); - return ans; - } - - numbers = (npy_intp *) PyArray_DATA(lst); - mxi = mxx(numbers, len); - mni = mnx(numbers, len); - if (numbers[mni] < 0) { - PyErr_SetString(PyExc_ValueError, - "The first argument of bincount must be non-negative"); - goto fail; - } - ans_size = numbers [mxi] + 1; - if (mlength != Py_None) { - if (!(minlength = PyArray_PyIntAsIntp(mlength))) { - goto fail; - } - if (minlength <= 0) { - /* superfluous, but may catch incorrect usage */ - PyErr_SetString(PyExc_ValueError, - "minlength must be positive"); - goto fail; - } - if (ans_size < minlength) { - ans_size = minlength; - } - } - if (weight == Py_None) { - if (!(ans = PyArray_Zeros(1, &ans_size, type, 0))) { - goto fail; - } - ians = (intp *)(PyArray_DATA(ans)); - for (i = 0; i < len; i++) - ians [numbers [i]] += 1; - Py_DECREF(lst); - } - else { - if (!(wts = PyArray_ContiguousFromAny(weight, PyArray_DOUBLE, 1, 1))) { - goto fail; - } - weights = (double *)PyArray_DATA (wts); - if (PyArray_SIZE(wts) != len) { - PyErr_SetString(PyExc_ValueError, - "The weights and list don't have the same length."); - goto fail; - } - type = PyArray_DescrFromType(PyArray_DOUBLE); - if (!(ans = PyArray_Zeros(1, &ans_size, type, 0))) { - goto fail; - } - dans = (double *)PyArray_DATA (ans); - for (i = 0; i < len; i++) { - dans[numbers[i]] += weights[i]; - } - Py_DECREF(lst); - Py_DECREF(wts); - } - return ans; - -fail: - Py_XDECREF(lst); - Py_XDECREF(wts); - Py_XDECREF(ans); - return NULL; -} - - -/* - * digitize (x, bins) returns an array of python integers the same - * length of x. The values i returned are such that bins [i - 1] <= x < - * bins [i] if bins is monotonically increasing, or bins [i - 1] > x >= - * bins [i] if bins is monotonically decreasing. Beyond the bounds of - * bins, returns either i = 0 or i = len (bins) as appropriate. - */ -static PyObject * -arr_digitize(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) -{ - /* self is not used */ - PyObject *ox, *obins; - PyObject *ax = NULL, *abins = NULL, *aret = NULL; - double *dx, *dbins; - intp lbins, lx; /* lengths */ - intp *iret; - int m, i; - static char *kwlist[] = {"x", "bins", NULL}; - PyArray_Descr *type; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO", kwlist, &ox, &obins)) { - goto fail; - } - type = PyArray_DescrFromType(PyArray_DOUBLE); - if (!(ax = PyArray_FromAny(ox, type, 1, 1, CARRAY, NULL))) { - goto fail; - } - Py_INCREF(type); - if (!(abins = PyArray_FromAny(obins, type, 1, 1, CARRAY, NULL))) { - goto fail; - } - - lx = PyArray_SIZE(ax); - dx = (double *)PyArray_DATA(ax); - lbins = PyArray_SIZE(abins); - dbins = (double *)PyArray_DATA(abins); - if (!(aret = PyArray_SimpleNew(1, &lx, PyArray_INTP))) { - goto fail; - } - iret = (intp *)PyArray_DATA(aret); - - if (lx <= 0 || lbins < 0) { - PyErr_SetString(PyExc_ValueError, - "Both x and bins must have non-zero length"); - goto fail; - } - - if (lbins == 1) { - for (i = 0; i < lx; i++) { - if (dx [i] >= dbins[0]) { - iret[i] = 1; - } - else { - iret[i] = 0; - } - } - } - else { - m = monotonic_ (dbins, lbins); - if ( m == -1 ) { - for ( i = 0; i < lx; i ++ ) { - iret [i] = decr_slot_ ((double)dx[i], dbins, lbins); - } - } - else if ( m == 1 ) { - for ( i = 0; i < lx; i ++ ) { - iret [i] = incr_slot_ ((double)dx[i], dbins, lbins); - } - } - else { - PyErr_SetString(PyExc_ValueError, - "The bins must be montonically increasing or decreasing"); - goto fail; - } - } - - Py_DECREF(ax); - Py_DECREF(abins); - return aret; - -fail: - Py_XDECREF(ax); - Py_XDECREF(abins); - Py_XDECREF(aret); - return NULL; -} - - - -static char arr_insert__doc__[] = "Insert vals sequentially into equivalent 1-d positions indicated by mask."; - -/* - * Returns input array with values inserted sequentially into places - * indicated by the mask - */ -static PyObject * -arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) -{ - PyObject *mask = NULL, *vals = NULL; - PyArrayObject *ainput = NULL, *amask = NULL, *avals = NULL, *tmp = NULL; - int numvals, totmask, sameshape; - char *input_data, *mptr, *vptr, *zero = NULL; - int melsize, delsize, copied, nd; - intp *instrides, *inshape; - int mindx, rem_indx, indx, i, k, objarray; - - static char *kwlist[] = {"input", "mask", "vals", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwdict, "O&OO", kwlist, - PyArray_Converter, &ainput, - &mask, &vals)) { - goto fail; - } - - amask = (PyArrayObject *) PyArray_FROM_OF(mask, CARRAY); - if (amask == NULL) { - goto fail; - } - /* Cast an object array */ - if (amask->descr->type_num == PyArray_OBJECT) { - tmp = (PyArrayObject *)PyArray_Cast(amask, PyArray_INTP); - if (tmp == NULL) { - goto fail; - } - Py_DECREF(amask); - amask = tmp; - } - - sameshape = 1; - if (amask->nd == ainput->nd) { - for (k = 0; k < amask->nd; k++) { - if (amask->dimensions[k] != ainput->dimensions[k]) { - sameshape = 0; - } - } - } - else { - /* Test to see if amask is 1d */ - if (amask->nd != 1) { - sameshape = 0; - } - else if ((PyArray_SIZE(ainput)) != PyArray_SIZE(amask)) { - sameshape = 0; - } - } - if (!sameshape) { - PyErr_SetString(PyExc_TypeError, - "mask array must be 1-d or same shape as input array"); - goto fail; - } - - avals = (PyArrayObject *)PyArray_FromObject(vals, ainput->descr->type_num, 0, 1); - if (avals == NULL) { - goto fail; - } - numvals = PyArray_SIZE(avals); - nd = ainput->nd; - input_data = ainput->data; - mptr = amask->data; - melsize = amask->descr->elsize; - vptr = avals->data; - delsize = avals->descr->elsize; - zero = PyArray_Zero(amask); - if (zero == NULL) { - goto fail; - } - objarray = (ainput->descr->type_num == PyArray_OBJECT); - - /* Handle zero-dimensional case separately */ - if (nd == 0) { - if (memcmp(mptr,zero,melsize) != 0) { - /* Copy value element over to input array */ - memcpy(input_data,vptr,delsize); - if (objarray) { - Py_INCREF(*((PyObject **)vptr)); - } - } - Py_DECREF(amask); - Py_DECREF(avals); - PyDataMem_FREE(zero); - Py_DECREF(ainput); - Py_INCREF(Py_None); - return Py_None; - } - - /* - * Walk through mask array, when non-zero is encountered - * copy next value in the vals array to the input array. - * If we get through the value array, repeat it as necessary. - */ - totmask = (int) PyArray_SIZE(amask); - copied = 0; - instrides = ainput->strides; - inshape = ainput->dimensions; - for (mindx = 0; mindx < totmask; mindx++) { - if (memcmp(mptr,zero,melsize) != 0) { - /* compute indx into input array */ - rem_indx = mindx; - indx = 0; - for(i = nd - 1; i > 0; --i) { - indx += (rem_indx % inshape[i]) * instrides[i]; - rem_indx /= inshape[i]; - } - indx += rem_indx * instrides[0]; - /* fprintf(stderr, "mindx = %d, indx=%d\n", mindx, indx); */ - /* Copy value element over to input array */ - memcpy(input_data+indx,vptr,delsize); - if (objarray) { - Py_INCREF(*((PyObject **)vptr)); - } - vptr += delsize; - copied += 1; - /* If we move past value data. Reset */ - if (copied >= numvals) { - vptr = avals->data; - } - } - mptr += melsize; - } - - Py_DECREF(amask); - Py_DECREF(avals); - PyDataMem_FREE(zero); - Py_DECREF(ainput); - Py_INCREF(Py_None); - return Py_None; - -fail: - PyDataMem_FREE(zero); - Py_XDECREF(ainput); - Py_XDECREF(amask); - Py_XDECREF(avals); - return NULL; -} - -/** @brief Use bisection on a sorted array to find first entry > key. - * - * Use bisection to find an index i s.t. arr[i] <= key < arr[i + 1]. If there is - * no such i the error returns are: - * key < arr[0] -- -1 - * key == arr[len - 1] -- len - 1 - * key > arr[len - 1] -- len - * The array is assumed contiguous and sorted in ascending order. - * - * @param key key value. - * @param arr contiguous sorted array to be searched. - * @param len length of the array. - * @return index - */ -static npy_intp -binary_search(double key, double arr [], npy_intp len) -{ - npy_intp imin = 0; - npy_intp imax = len; - - if (key > arr[len - 1]) { - return len; - } - while (imin < imax) { - npy_intp imid = imin + ((imax - imin) >> 1); - if (key >= arr[imid]) { - imin = imid + 1; - } - else { - imax = imid; - } - } - return imin - 1; -} - -static PyObject * -arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) -{ - - PyObject *fp, *xp, *x; - PyObject *left = NULL, *right = NULL; - PyArrayObject *afp = NULL, *axp = NULL, *ax = NULL, *af = NULL; - npy_intp i, lenx, lenxp, indx; - double lval, rval; - double *dy, *dx, *dz, *dres, *slopes; - - static char *kwlist[] = {"x", "xp", "fp", "left", "right", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwdict, "OOO|OO", kwlist, - &x, &xp, &fp, &left, &right)) { - return NULL; - } - - afp = (NPY_AO*)PyArray_ContiguousFromAny(fp, NPY_DOUBLE, 1, 1); - if (afp == NULL) { - return NULL; - } - axp = (NPY_AO*)PyArray_ContiguousFromAny(xp, NPY_DOUBLE, 1, 1); - if (axp == NULL) { - goto fail; - } - ax = (NPY_AO*)PyArray_ContiguousFromAny(x, NPY_DOUBLE, 1, 0); - if (ax == NULL) { - goto fail; - } - lenxp = axp->dimensions[0]; - if (lenxp == 0) { - PyErr_SetString(PyExc_ValueError, - "array of sample points is empty"); - goto fail; - } - if (afp->dimensions[0] != lenxp) { - PyErr_SetString(PyExc_ValueError, - "fp and xp are not of the same length."); - goto fail; - } - - af = (NPY_AO*)PyArray_SimpleNew(ax->nd, ax->dimensions, NPY_DOUBLE); - if (af == NULL) { - goto fail; - } - lenx = PyArray_SIZE(ax); - - dy = (double *)PyArray_DATA(afp); - dx = (double *)PyArray_DATA(axp); - dz = (double *)PyArray_DATA(ax); - dres = (double *)PyArray_DATA(af); - - /* Get left and right fill values. */ - if ((left == NULL) || (left == Py_None)) { - lval = dy[0]; - } - else { - lval = PyFloat_AsDouble(left); - if ((lval == -1) && PyErr_Occurred()) { - goto fail; - } - } - if ((right == NULL) || (right == Py_None)) { - rval = dy[lenxp-1]; - } - else { - rval = PyFloat_AsDouble(right); - if ((rval == -1) && PyErr_Occurred()) { - goto fail; - } - } - - slopes = (double *) PyDataMem_NEW((lenxp - 1)*sizeof(double)); - for (i = 0; i < lenxp - 1; i++) { - slopes[i] = (dy[i + 1] - dy[i])/(dx[i + 1] - dx[i]); - } - for (i = 0; i < lenx; i++) { - indx = binary_search(dz[i], dx, lenxp); - if (indx == -1) { - dres[i] = lval; - } - else if (indx == lenxp - 1) { - dres[i] = dy[indx]; - } - else if (indx == lenxp) { - dres[i] = rval; - } - else { - dres[i] = slopes[indx]*(dz[i] - dx[indx]) + dy[indx]; - } - } - - PyDataMem_FREE(slopes); - Py_DECREF(afp); - Py_DECREF(axp); - Py_DECREF(ax); - return (PyObject *)af; - -fail: - Py_XDECREF(afp); - Py_XDECREF(axp); - Py_XDECREF(ax); - Py_XDECREF(af); - return NULL; -} - -/* - * Converts a Python sequence into 'count' PyArrayObjects - * - * seq - Input Python object, usually a tuple but any sequence works. - * op - Where the arrays are placed. - * count - How many arrays there should be (errors if it doesn't match). - * paramname - The name of the parameter that produced 'seq'. - */ -static int sequence_to_arrays(PyObject *seq, - PyArrayObject **op, int count, - char *paramname) -{ - int i; - - if (!PySequence_Check(seq) || PySequence_Size(seq) != count) { - PyErr_Format(PyExc_ValueError, - "parameter %s must be a sequence of length %d", - paramname, count); - return -1; - } - - for (i = 0; i < count; ++i) { - PyObject *item = PySequence_GetItem(seq, i); - if (item == NULL) { - while (--i >= 0) { - Py_DECREF(op[i]); - op[i] = NULL; - } - return -1; - } - - op[i] = (PyArrayObject *)PyArray_FromAny(item, NULL, 0, 0, 0, NULL); - if (op[i] == NULL) { - while (--i >= 0) { - Py_DECREF(op[i]); - op[i] = NULL; - } - Py_DECREF(item); - return -1; - } - - Py_DECREF(item); - } - - return 0; -} - -/* Inner loop for unravel_index */ -static int -ravel_multi_index_loop(int ravel_ndim, npy_intp *ravel_dims, - npy_intp *ravel_strides, - npy_intp count, - NPY_CLIPMODE *modes, - char **coords, npy_intp *coords_strides) -{ - int i; - npy_intp j, m; - - while (count--) { - npy_intp raveled = 0; - for (i = 0; i < ravel_ndim; ++i) { - m = ravel_dims[i]; - j = *(npy_intp *)coords[i]; - switch (modes[i]) { - case NPY_RAISE: - if(j < 0 || j>=m) { - PyErr_SetString(PyExc_ValueError, - "invalid entry in coordinates array"); - return NPY_FAIL; - } - break; - case NPY_WRAP: - if(j < 0) { - j += m; - if(j < 0) { - j = j%m; - if(j != 0) { - j += m; - } - } - } - else if(j >= m) { - j -= m; - if(j >= m) { - j = j%m; - } - } - break; - case NPY_CLIP: - if(j < 0) { - j = 0; - } - else if(j >= m) { - j = m-1; - } - break; - - } - raveled += j * ravel_strides[i]; - - coords[i] += coords_strides[i]; - } - *(npy_intp *)coords[ravel_ndim] = raveled; - coords[ravel_ndim] += coords_strides[ravel_ndim]; - } - - return NPY_SUCCEED; -} - -/* ravel_multi_index implementation - see add_newdocs.py */ -static PyObject * -arr_ravel_multi_index(PyObject *self, PyObject *args, PyObject *kwds) -{ - int i, s; - PyObject *mode0=NULL, *coords0=NULL; - PyArrayObject *ret = NULL; - PyArray_Dims dimensions={0,0}; - npy_intp ravel_strides[NPY_MAXDIMS]; - PyArray_ORDER order = NPY_CORDER; - NPY_CLIPMODE modes[NPY_MAXDIMS]; - - PyArrayObject *op[NPY_MAXARGS]; - PyArray_Descr *dtype[NPY_MAXARGS]; - npy_uint32 op_flags[NPY_MAXARGS]; - - NpyIter *iter = NULL; - - char *kwlist[] = {"multi_index", "dims", "mode", "order", NULL}; - - memset(op, 0, sizeof(op)); - dtype[0] = NULL; - - if(!PyArg_ParseTupleAndKeywords(args, kwds, - "OO&|OO&:ravel_multi_index", kwlist, - &coords0, - PyArray_IntpConverter, &dimensions, - &mode0, - PyArray_OrderConverter, &order)) { - goto fail; - } - - if (dimensions.len+1 > NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, - "too many dimensions passed to ravel_multi_index"); - goto fail; - } - - if(!PyArray_ConvertClipmodeSequence(mode0, modes, dimensions.len)) { - goto fail; - } - - switch (order) { - case NPY_CORDER: - s = 1; - for (i = dimensions.len-1; i >= 0; --i) { - ravel_strides[i] = s; - s *= dimensions.ptr[i]; - } - break; - case NPY_FORTRANORDER: - s = 1; - for (i = 0; i < dimensions.len; ++i) { - ravel_strides[i] = s; - s *= dimensions.ptr[i]; - } - break; - default: - PyErr_SetString(PyExc_ValueError, - "only 'C' or 'F' order is permitted"); - goto fail; - } - - /* Get the multi_index into op */ - if (sequence_to_arrays(coords0, op, dimensions.len, "multi_index") < 0) { - goto fail; - } - - - for (i = 0; i < dimensions.len; ++i) { - op_flags[i] = NPY_ITER_READONLY| - NPY_ITER_ALIGNED; - } - op_flags[dimensions.len] = NPY_ITER_WRITEONLY| - NPY_ITER_ALIGNED| - NPY_ITER_ALLOCATE; - dtype[0] = PyArray_DescrFromType(NPY_INTP); - for (i = 1; i <= dimensions.len; ++i) { - dtype[i] = dtype[0]; - } - - iter = NpyIter_MultiNew(dimensions.len+1, op, NPY_ITER_BUFFERED| - NPY_ITER_EXTERNAL_LOOP| - NPY_ITER_ZEROSIZE_OK, - NPY_KEEPORDER, - NPY_SAME_KIND_CASTING, - op_flags, dtype); - if (iter == NULL) { - goto fail; - } - - if (NpyIter_GetIterSize(iter) != 0) { - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp *strides; - npy_intp *countptr; - - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - goto fail; - } - dataptr = NpyIter_GetDataPtrArray(iter); - strides = NpyIter_GetInnerStrideArray(iter); - countptr = NpyIter_GetInnerLoopSizePtr(iter); - - do { - if (ravel_multi_index_loop(dimensions.len, dimensions.ptr, - ravel_strides, *countptr, modes, - dataptr, strides) != NPY_SUCCEED) { - goto fail; - } - } while(iternext(iter)); - } - - ret = NpyIter_GetOperandArray(iter)[dimensions.len]; - Py_INCREF(ret); - - Py_DECREF(dtype[0]); - for (i = 0; i < dimensions.len; ++i) { - Py_XDECREF(op[i]); - } - PyDimMem_FREE(dimensions.ptr); - NpyIter_Deallocate(iter); - return PyArray_Return(ret); - -fail: - Py_XDECREF(dtype[0]); - for (i = 0; i < dimensions.len; ++i) { - Py_XDECREF(op[i]); - } - if (dimensions.ptr) { - PyDimMem_FREE(dimensions.ptr); - } - if (iter != NULL) { - NpyIter_Deallocate(iter); - } - return NULL; -} - -/* C-order inner loop for unravel_index */ -static int -unravel_index_loop_corder(int unravel_ndim, npy_intp *unravel_dims, - npy_intp unravel_size, npy_intp count, - char *indices, npy_intp indices_stride, - npy_intp *coords) -{ - int i; - npy_intp val; - - while (count--) { - val = *(npy_intp *)indices; - if (val < 0 || val >= unravel_size) { - PyErr_SetString(PyExc_ValueError, - "invalid entry in index array"); - return NPY_FAIL; - } - for (i = unravel_ndim-1; i >= 0; --i) { - coords[i] = val % unravel_dims[i]; - val /= unravel_dims[i]; - } - coords += unravel_ndim; - indices += indices_stride; - } - - return NPY_SUCCEED; -} - -/* Fortran-order inner loop for unravel_index */ -static int -unravel_index_loop_forder(int unravel_ndim, npy_intp *unravel_dims, - npy_intp unravel_size, npy_intp count, - char *indices, npy_intp indices_stride, - npy_intp *coords) -{ - int i; - npy_intp val; - - while (count--) { - val = *(npy_intp *)indices; - if (val < 0 || val >= unravel_size) { - PyErr_SetString(PyExc_ValueError, - "invalid entry in index array"); - return NPY_FAIL; - } - for (i = 0; i < unravel_ndim; ++i) { - *coords++ = val % unravel_dims[i]; - val /= unravel_dims[i]; - } - indices += indices_stride; - } - - return NPY_SUCCEED; -} - -/* unravel_index implementation - see add_newdocs.py */ -static PyObject * -arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *indices0 = NULL, *ret_tuple = NULL; - PyArrayObject *ret_arr = NULL; - PyArrayObject *indices = NULL; - PyArray_Descr *dtype = NULL; - PyArray_Dims dimensions={0,0}; - PyArray_ORDER order = PyArray_CORDER; - npy_intp unravel_size; - - NpyIter *iter = NULL; - int i, ret_ndim; - npy_intp ret_dims[NPY_MAXDIMS], ret_strides[NPY_MAXDIMS]; - - char *kwlist[] = {"indices", "dims", "order", NULL}; - - if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:unravel_index", - kwlist, - &indices0, - PyArray_IntpConverter, &dimensions, - PyArray_OrderConverter, &order)) { - goto fail; - } - - if (dimensions.len == 0) { - PyErr_SetString(PyExc_ValueError, - "dims must have at least one value"); - goto fail; - } - - unravel_size = PyArray_MultiplyList(dimensions.ptr, dimensions.len); - - if(!PyArray_Check(indices0)) { - indices = (PyArrayObject*)PyArray_FromAny(indices0, - NULL, 0, 0, 0, NULL); - if(indices == NULL) { - goto fail; - } - } - else { - indices = (PyArrayObject *)indices0; - Py_INCREF(indices); - } - - dtype = PyArray_DescrFromType(NPY_INTP); - if (dtype == NULL) { - goto fail; - } - - iter = NpyIter_New(indices, NPY_ITER_READONLY| - NPY_ITER_ALIGNED| - NPY_ITER_BUFFERED| - NPY_ITER_ZEROSIZE_OK| - NPY_ITER_DONT_NEGATE_STRIDES| - NPY_ITER_MULTI_INDEX, - NPY_KEEPORDER, NPY_SAME_KIND_CASTING, - dtype); - if (iter == NULL) { - goto fail; - } - - /* - * Create the return array with a layout compatible with the indices - * and with a dimension added to the end for the multi-index - */ - ret_ndim = PyArray_NDIM(indices) + 1; - if (NpyIter_GetShape(iter, ret_dims) != NPY_SUCCEED) { - goto fail; - } - ret_dims[ret_ndim-1] = dimensions.len; - if (NpyIter_CreateCompatibleStrides(iter, - dimensions.len*sizeof(npy_intp), ret_strides) != NPY_SUCCEED) { - goto fail; - } - ret_strides[ret_ndim-1] = sizeof(npy_intp); - - /* Remove the multi-index and inner loop */ - if (NpyIter_RemoveMultiIndex(iter) != NPY_SUCCEED) { - goto fail; - } - if (NpyIter_EnableExternalLoop(iter) != NPY_SUCCEED) { - goto fail; - } - - ret_arr = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, - ret_ndim, ret_dims, ret_strides, NULL, 0, NULL); - dtype = NULL; - if (ret_arr == NULL) { - goto fail; - } - - if (order == NPY_CORDER) { - if (NpyIter_GetIterSize(iter) != 0) { - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp *strides; - npy_intp *countptr, count; - npy_intp *coordsptr = (npy_intp *)PyArray_DATA(ret_arr); - - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - goto fail; - } - dataptr = NpyIter_GetDataPtrArray(iter); - strides = NpyIter_GetInnerStrideArray(iter); - countptr = NpyIter_GetInnerLoopSizePtr(iter); - - do { - count = *countptr; - if (unravel_index_loop_corder(dimensions.len, dimensions.ptr, - unravel_size, count, *dataptr, *strides, - coordsptr) != NPY_SUCCEED) { - goto fail; - } - coordsptr += count*dimensions.len; - } while(iternext(iter)); - } - } - else if (order == NPY_FORTRANORDER) { - if (NpyIter_GetIterSize(iter) != 0) { - NpyIter_IterNextFunc *iternext; - char **dataptr; - npy_intp *strides; - npy_intp *countptr, count; - npy_intp *coordsptr = (npy_intp *)PyArray_DATA(ret_arr); - - iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { - goto fail; - } - dataptr = NpyIter_GetDataPtrArray(iter); - strides = NpyIter_GetInnerStrideArray(iter); - countptr = NpyIter_GetInnerLoopSizePtr(iter); - - do { - count = *countptr; - if (unravel_index_loop_forder(dimensions.len, dimensions.ptr, - unravel_size, count, *dataptr, *strides, - coordsptr) != NPY_SUCCEED) { - goto fail; - } - coordsptr += count*dimensions.len; - } while(iternext(iter)); - } - } - else { - PyErr_SetString(PyExc_ValueError, - "only 'C' or 'F' order is permitted"); - goto fail; - } - - /* Now make a tuple of views, one per index */ - ret_tuple = PyTuple_New(dimensions.len); - if (ret_tuple == NULL) { - goto fail; - } - for (i = 0; i < dimensions.len; ++i) { - PyArrayObject *view; - - view = (PyArrayObject *)PyArray_New(&PyArray_Type, ret_ndim-1, - ret_dims, NPY_INTP, - ret_strides, - PyArray_BYTES(ret_arr) + i*sizeof(npy_intp), - 0, 0, NULL); - if (view == NULL) { - goto fail; - } - Py_INCREF(ret_arr); - view->base = (PyObject *)ret_arr; - PyTuple_SET_ITEM(ret_tuple, i, PyArray_Return(view)); - } - - Py_DECREF(ret_arr); - Py_XDECREF(indices); - PyDimMem_FREE(dimensions.ptr); - NpyIter_Deallocate(iter); - - return ret_tuple; - -fail: - Py_XDECREF(ret_tuple); - Py_XDECREF(ret_arr); - Py_XDECREF(dtype); - Py_XDECREF(indices); - if (dimensions.ptr) { - PyDimMem_FREE(dimensions.ptr); - } - if (iter != NULL) { - NpyIter_Deallocate(iter); - } - return NULL; -} - - -static PyTypeObject *PyMemberDescr_TypePtr = NULL; -static PyTypeObject *PyGetSetDescr_TypePtr = NULL; -static PyTypeObject *PyMethodDescr_TypePtr = NULL; - -/* Can only be called if doc is currently NULL */ -static PyObject * -arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *obj; - PyObject *str; - char *docstr; - static char *msg = "already has a docstring"; - - /* Don't add docstrings */ - if (Py_OptimizeFlag > 1) { - Py_INCREF(Py_None); - return Py_None; - } -#if defined(NPY_PY3K) - if (!PyArg_ParseTuple(args, "OO!", &obj, &PyUnicode_Type, &str)) { - return NULL; - } - - docstr = PyBytes_AS_STRING(PyUnicode_AsUTF8String(str)); -#else - if (!PyArg_ParseTuple(args, "OO!", &obj, &PyString_Type, &str)) { - return NULL; - } - - docstr = PyString_AS_STRING(str); -#endif - -#define _TESTDOC1(typebase) (Py_TYPE(obj) == &Py##typebase##_Type) -#define _TESTDOC2(typebase) (Py_TYPE(obj) == Py##typebase##_TypePtr) -#define _ADDDOC(typebase, doc, name) do { \ - Py##typebase##Object *new = (Py##typebase##Object *)obj; \ - if (!(doc)) { \ - doc = docstr; \ - } \ - else { \ - PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \ - return NULL; \ - } \ - } while (0) - - if (_TESTDOC1(CFunction)) { - _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name); - } - else if (_TESTDOC1(Type)) { - _ADDDOC(Type, new->tp_doc, new->tp_name); - } - else if (_TESTDOC2(MemberDescr)) { - _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name); - } - else if (_TESTDOC2(GetSetDescr)) { - _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); - } - else if (_TESTDOC2(MethodDescr)) { - _ADDDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); - } - else { - PyObject *doc_attr; - - doc_attr = PyObject_GetAttrString(obj, "__doc__"); - if (doc_attr != NULL && doc_attr != Py_None) { - PyErr_Format(PyExc_RuntimeError, "object %s", msg); - return NULL; - } - Py_XDECREF(doc_attr); - - if (PyObject_SetAttrString(obj, "__doc__", str) < 0) { - PyErr_SetString(PyExc_TypeError, - "Cannot set a docstring for that object"); - return NULL; - } - Py_INCREF(Py_None); - return Py_None; - } - -#undef _TESTDOC1 -#undef _TESTDOC2 -#undef _ADDDOC - - Py_INCREF(str); - Py_INCREF(Py_None); - return Py_None; -} - - -/* PACKBITS - * - * This function packs binary (0 or 1) 1-bit per pixel arrays - * into contiguous bytes. - * - */ - -static void -_packbits( void *In, - int element_size, /* in bytes */ - npy_intp in_N, - npy_intp in_stride, - void *Out, - npy_intp out_N, - npy_intp out_stride -) -{ - char build; - int i, index; - npy_intp out_Nm1; - int maxi, remain, nonzero, j; - char *outptr,*inptr; - - outptr = Out; /* pointer to output buffer */ - inptr = In; /* pointer to input buffer */ - - /* - * Loop through the elements of In - * Determine whether or not it is nonzero. - * Yes: set correspdoning bit (and adjust build value) - * No: move on - * Every 8th value, set the value of build and increment the outptr - */ - - remain = in_N % 8; /* uneven bits */ - if (remain == 0) { - remain = 8; - } - out_Nm1 = out_N - 1; - for (index = 0; index < out_N; index++) { - build = 0; - maxi = (index != out_Nm1 ? 8 : remain); - for (i = 0; i < maxi; i++) { - build <<= 1; - nonzero = 0; - for (j = 0; j < element_size; j++) { - nonzero += (*(inptr++) != 0); - } - inptr += (in_stride - element_size); - build += (nonzero != 0); - } - if (index == out_Nm1) build <<= (8-remain); - /* printf("Here: %d %d %d %d\n",build,slice,index,maxi); */ - *outptr = build; - outptr += out_stride; - } - return; -} - - -static void -_unpackbits(void *In, - int NPY_UNUSED(el_size), /* unused */ - npy_intp in_N, - npy_intp in_stride, - void *Out, - npy_intp NPY_UNUSED(out_N), - npy_intp out_stride - ) -{ - unsigned char mask; - int i, index; - char *inptr, *outptr; - - outptr = Out; - inptr = In; - for (index = 0; index < in_N; index++) { - mask = 128; - for (i = 0; i < 8; i++) { - *outptr = ((mask & (unsigned char)(*inptr)) != 0); - outptr += out_stride; - mask >>= 1; - } - inptr += in_stride; - } - return; -} - -/* Fixme -- pack and unpack should be separate routines */ -static PyObject * -pack_or_unpack_bits(PyObject *input, int axis, int unpack) -{ - PyArrayObject *inp; - PyObject *new = NULL; - PyObject *out = NULL; - npy_intp outdims[MAX_DIMS]; - int i; - void (*thefunc)(void *, int, npy_intp, npy_intp, void *, npy_intp, npy_intp); - PyArrayIterObject *it, *ot; - - inp = (PyArrayObject *)PyArray_FROM_O(input); - - if (inp == NULL) { - return NULL; - } - if (unpack) { - if (PyArray_TYPE(inp) != NPY_UBYTE) { - PyErr_SetString(PyExc_TypeError, - "Expected an input array of unsigned byte data type"); - goto fail; - } - } - else if (!PyArray_ISINTEGER(inp)) { - PyErr_SetString(PyExc_TypeError, - "Expected an input array of integer data type"); - goto fail; - } - - new = PyArray_CheckAxis(inp, &axis, 0); - Py_DECREF(inp); - if (new == NULL) { - return NULL; - } - /* Handle zero-dim array separately */ - if (PyArray_SIZE(new) == 0) { - return PyArray_Copy((PyArrayObject *)new); - } - - if (PyArray_NDIM(new) == 0) { - if (unpack) { - /* Handle 0-d array by converting it to a 1-d array */ - PyObject *temp; - PyArray_Dims newdim = {NULL, 1}; - npy_intp shape = 1; - - newdim.ptr = &shape; - temp = PyArray_Newshape((PyArrayObject *)new, &newdim, NPY_CORDER); - if (temp == NULL) { - goto fail; - } - Py_DECREF(new); - new = temp; - } - else { - char *optr, *iptr; - out = (PyArrayObject *)PyArray_New(Py_TYPE(new), 0, NULL, NPY_UBYTE, - NULL, NULL, 0, 0, NULL); - if (out == NULL) { - goto fail; - } - optr = PyArray_DATA(out); - iptr = PyArray_DATA(new); - *optr = 0; - for (i = 0; i 1, 9 -> 2, 16 -> 2, 17 -> 3 etc.. - */ - outdims[axis] = ((outdims[axis] - 1) >> 3) + 1; - thefunc = _packbits; - } - - /* Create output array */ - out = (PyArrayObject *)PyArray_New(Py_TYPE(new), - PyArray_NDIM(new), outdims, NPY_UBYTE, - NULL, NULL, 0, PyArray_ISFORTRAN(new), NULL); - if (out == NULL) { - goto fail; - } - /* Setup iterators to iterate over all but given axis */ - it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)new, &axis); - ot = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)out, &axis); - if (it == NULL || ot == NULL) { - Py_XDECREF(it); - Py_XDECREF(ot); - goto fail; - } - - while(PyArray_ITER_NOTDONE(it)) { - thefunc(PyArray_ITER_DATA(it), PyArray_ITEMSIZE(new), - PyArray_DIM(new, axis), PyArray_STRIDE(new, axis), - PyArray_ITER_DATA(ot), PyArray_DIM(out, axis), - PyArray_STRIDE(out, axis)); - PyArray_ITER_NEXT(it); - PyArray_ITER_NEXT(ot); - } - Py_DECREF(it); - Py_DECREF(ot); - -finish: - Py_DECREF(new); - return out; - -fail: - Py_XDECREF(new); - Py_XDECREF(out); - return NULL; -} - - -static PyObject * -io_pack(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) -{ - PyObject *obj; - int axis = NPY_MAXDIMS; - static char *kwlist[] = {"in", "axis", NULL}; - - if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&" , kwlist, - &obj, PyArray_AxisConverter, &axis)) { - return NULL; - } - return pack_or_unpack_bits(obj, axis, 0); -} - -static PyObject * -io_unpack(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) -{ - PyObject *obj; - int axis = NPY_MAXDIMS; - static char *kwlist[] = {"in", "axis", NULL}; - - if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&" , kwlist, - &obj, PyArray_AxisConverter, &axis)) { - return NULL; - } - return pack_or_unpack_bits(obj, axis, 1); -} - -/* The docstrings for many of these methods are in add_newdocs.py. */ -static struct PyMethodDef methods[] = { - {"_insert", (PyCFunction)arr_insert, - METH_VARARGS | METH_KEYWORDS, arr_insert__doc__}, - {"bincount", (PyCFunction)arr_bincount, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"digitize", (PyCFunction)arr_digitize, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"interp", (PyCFunction)arr_interp, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"ravel_multi_index", (PyCFunction)arr_ravel_multi_index, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"unravel_index", (PyCFunction)arr_unravel_index, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"add_docstring", (PyCFunction)arr_add_docstring, - METH_VARARGS, NULL}, - {"packbits", (PyCFunction)io_pack, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"unpackbits", (PyCFunction)io_unpack, - METH_VARARGS | METH_KEYWORDS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -static void -define_types(void) -{ - PyObject *tp_dict; - PyObject *myobj; - - tp_dict = PyArrayDescr_Type.tp_dict; - /* Get "subdescr" */ - myobj = PyDict_GetItemString(tp_dict, "fields"); - if (myobj == NULL) { - return; - } - PyGetSetDescr_TypePtr = Py_TYPE(myobj); - myobj = PyDict_GetItemString(tp_dict, "alignment"); - if (myobj == NULL) { - return; - } - PyMemberDescr_TypePtr = Py_TYPE(myobj); - myobj = PyDict_GetItemString(tp_dict, "newbyteorder"); - if (myobj == NULL) { - return; - } - PyMethodDescr_TypePtr = Py_TYPE(myobj); - return; -} - -#if defined(NPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_compiled_base", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#if defined(NPY_PY3K) -#define RETVAL m -PyObject *PyInit__compiled_base(void) -#else -#define RETVAL -PyMODINIT_FUNC -init_compiled_base(void) -#endif -{ - PyObject *m, *d; - -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("_compiled_base", methods); -#endif - if (!m) { - return RETVAL; - } - - /* Import the array objects */ - import_array(); - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - - /* - * PyExc_Exception should catch all the standard errors that are - * now raised instead of the string exception "numpy.lib.error". - * This is for backward compatibility with existing code. - */ - PyDict_SetItemString(d, "error", PyExc_Exception); - - - /* define PyGetSetDescr_Type and PyMemberDescr_Type */ - define_types(); - - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/lib/stride_tricks.py b/numpy-1.6.2/numpy/lib/stride_tricks.py deleted file mode 100644 index 7358be2226..0000000000 --- a/numpy-1.6.2/numpy/lib/stride_tricks.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Utilities that manipulate strides to achieve desirable effects. - -An explanation of strides can be found in the "ndarray.rst" file in the -NumPy reference guide. - -""" -import numpy as np - -__all__ = ['broadcast_arrays'] - -class DummyArray(object): - """ Dummy object that just exists to hang __array_interface__ dictionaries - and possibly keep alive a reference to a base array. - """ - def __init__(self, interface, base=None): - self.__array_interface__ = interface - self.base = base - -def as_strided(x, shape=None, strides=None): - """ Make an ndarray from the given array with the given shape and strides. - """ - interface = dict(x.__array_interface__) - if shape is not None: - interface['shape'] = tuple(shape) - if strides is not None: - interface['strides'] = tuple(strides) - return np.asarray(DummyArray(interface, base=x)) - -def broadcast_arrays(*args): - """ - Broadcast any number of arrays against each other. - - Parameters - ---------- - `*args` : array_likes - The arrays to broadcast. - - Returns - ------- - broadcasted : list of arrays - These arrays are views on the original arrays. They are typically - not contiguous. Furthermore, more than one element of a - broadcasted array may refer to a single memory location. If you - need to write to the arrays, make copies first. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> y = np.array([[1],[2],[3]]) - >>> np.broadcast_arrays(x, y) - [array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3]]), array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]])] - - Here is a useful idiom for getting contiguous copies instead of - non-contiguous views. - - >>> map(np.array, np.broadcast_arrays(x, y)) - [array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3]]), array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]])] - - """ - args = map(np.asarray, args) - shapes = [x.shape for x in args] - if len(set(shapes)) == 1: - # Common case where nothing needs to be broadcasted. - return args - shapes = [list(s) for s in shapes] - strides = [list(x.strides) for x in args] - nds = [len(s) for s in shapes] - biggest = max(nds) - # Go through each array and prepend dimensions of length 1 to each of the - # shapes in order to make the number of dimensions equal. - for i in range(len(args)): - diff = biggest - nds[i] - if diff > 0: - shapes[i] = [1] * diff + shapes[i] - strides[i] = [0] * diff + strides[i] - # Chech each dimension for compatibility. A dimension length of 1 is - # accepted as compatible with any other length. - common_shape = [] - for axis in range(biggest): - lengths = [s[axis] for s in shapes] - unique = set(lengths + [1]) - if len(unique) > 2: - # There must be at least two non-1 lengths for this axis. - raise ValueError("shape mismatch: two or more arrays have " - "incompatible dimensions on axis %r." % (axis,)) - elif len(unique) == 2: - # There is exactly one non-1 length. The common shape will take this - # value. - unique.remove(1) - new_length = unique.pop() - common_shape.append(new_length) - # For each array, if this axis is being broadcasted from a length of - # 1, then set its stride to 0 so that it repeats its data. - for i in range(len(args)): - if shapes[i][axis] == 1: - shapes[i][axis] = new_length - strides[i][axis] = 0 - else: - # Every array has a length of 1 on this axis. Strides can be left - # alone as nothing is broadcasted. - common_shape.append(1) - - # Construct the new arrays. - broadcasted = [as_strided(x, shape=sh, strides=st) for (x,sh,st) in - zip(args, shapes, strides)] - return broadcasted diff --git a/numpy-1.6.2/numpy/lib/tests/test__datasource.py b/numpy-1.6.2/numpy/lib/tests/test__datasource.py deleted file mode 100644 index ed5af516ff..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test__datasource.py +++ /dev/null @@ -1,327 +0,0 @@ -import os -from tempfile import mkdtemp, mkstemp, NamedTemporaryFile -from shutil import rmtree -from urlparse import urlparse -from urllib2 import URLError -import urllib2 - -from numpy.testing import * - -from numpy.compat import asbytes - -import numpy.lib._datasource as datasource - -def urlopen_stub(url, data=None): - '''Stub to replace urlopen for testing.''' - if url == valid_httpurl(): - tmpfile = NamedTemporaryFile(prefix='urltmp_') - return tmpfile - else: - raise URLError('Name or service not known') - -old_urlopen = None -def setup(): - global old_urlopen - old_urlopen = urllib2.urlopen - urllib2.urlopen = urlopen_stub - -def teardown(): - urllib2.urlopen = old_urlopen - -# A valid website for more robust testing -http_path = 'http://www.google.com/' -http_file = 'index.html' - -http_fakepath = 'http://fake.abc.web/site/' -http_fakefile = 'fake.txt' - -malicious_files = ['/etc/shadow', '../../shadow', - '..\\system.dat', 'c:\\windows\\system.dat'] - -magic_line = asbytes('three is the magic number') - - -# Utility functions used by many TestCases -def valid_textfile(filedir): - # Generate and return a valid temporary file. - fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) - os.close(fd) - return path - -def invalid_textfile(filedir): - # Generate and return an invalid filename. - fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) - os.close(fd) - os.remove(path) - return path - -def valid_httpurl(): - return http_path+http_file - -def invalid_httpurl(): - return http_fakepath+http_fakefile - -def valid_baseurl(): - return http_path - -def invalid_baseurl(): - return http_fakepath - -def valid_httpfile(): - return http_file - -def invalid_httpfile(): - return http_fakefile - -class TestDataSourceOpen(TestCase): - def setUp(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - fh = self.ds.open(valid_httpurl()) - assert_(fh) - fh.close() - - def test_InvalidHTTP(self): - url = invalid_httpurl() - self.assertRaises(IOError, self.ds.open, url) - try: - self.ds.open(url) - except IOError, e: - # Regression test for bug fixed in r4342. - assert_(e.errno is None) - - def test_InvalidHTTPCacheURLError(self): - self.assertRaises(URLError, self.ds._cache, invalid_httpurl()) - - def test_ValidFile(self): - local_file = valid_textfile(self.tmpdir) - fh = self.ds.open(local_file) - assert_(fh) - fh.close() - - def test_InvalidFile(self): - invalid_file = invalid_textfile(self.tmpdir) - self.assertRaises(IOError, self.ds.open, invalid_file) - - def test_ValidGzipFile(self): - try: - import gzip - except ImportError: - # We don't have the gzip capabilities to test. - import nose - raise nose.SkipTest - # Test datasource's internal file_opener for Gzip files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') - fp = gzip.open(filepath, 'w') - fp.write(magic_line) - fp.close() - fp = self.ds.open(filepath) - result = fp.readline() - fp.close() - self.assertEqual(magic_line, result) - - def test_ValidBz2File(self): - try: - import bz2 - except ImportError: - # We don't have the bz2 capabilities to test. - import nose - raise nose.SkipTest - # Test datasource's internal file_opener for BZip2 files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') - fp = bz2.BZ2File(filepath, 'w') - fp.write(magic_line) - fp.close() - fp = self.ds.open(filepath) - result = fp.readline() - fp.close() - self.assertEqual(magic_line, result) - - -class TestDataSourceExists(TestCase): - def setUp(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - assert_(self.ds.exists(valid_httpurl())) - - def test_InvalidHTTP(self): - self.assertEqual(self.ds.exists(invalid_httpurl()), False) - - def test_ValidFile(self): - # Test valid file in destpath - tmpfile = valid_textfile(self.tmpdir) - assert_(self.ds.exists(tmpfile)) - # Test valid local file not in destpath - localdir = mkdtemp() - tmpfile = valid_textfile(localdir) - assert_(self.ds.exists(tmpfile)) - rmtree(localdir) - - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - self.assertEqual(self.ds.exists(tmpfile), False) - - -class TestDataSourceAbspath(TestCase): - def setUp(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.ds = datasource.DataSource(self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.tmpdir, netloc, - upath.strip(os.sep).strip('/')) - self.assertEqual(local_path, self.ds.abspath(valid_httpurl())) - - def test_ValidFile(self): - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - # Test with filename only - self.assertEqual(tmpfile, self.ds.abspath(os.path.split(tmpfile)[-1])) - # Test filename with complete path - self.assertEqual(tmpfile, self.ds.abspath(tmpfile)) - - def test_InvalidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) - invalidhttp = os.path.join(self.tmpdir, netloc, - upath.strip(os.sep).strip('/')) - self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl())) - - def test_InvalidFile(self): - invalidfile = valid_textfile(self.tmpdir) - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - # Test with filename only - self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename)) - # Test filename with complete path - self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile)) - - def test_sandboxing(self): - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - - tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) - - assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(tmpfile).startswith(self.tmpdir)) - assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) - for fn in malicious_files: - assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) - - def test_windows_os_sep(self): - orig_os_sep = os.sep - try: - os.sep = '\\' - self.test_ValidHTTP() - self.test_ValidFile() - self.test_InvalidHTTP() - self.test_InvalidFile() - self.test_sandboxing() - finally: - os.sep = orig_os_sep - - -class TestRepositoryAbspath(TestCase): - def setUp(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.repos._destpath, netloc, \ - upath.strip(os.sep).strip('/')) - filepath = self.repos.abspath(valid_httpfile()) - self.assertEqual(local_path, filepath) - - def test_sandboxing(self): - tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) - assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) - for fn in malicious_files: - assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) - - def test_windows_os_sep(self): - orig_os_sep = os.sep - try: - os.sep = '\\' - self.test_ValidHTTP() - self.test_sandboxing() - finally: - os.sep = orig_os_sep - - -class TestRepositoryExists(TestCase): - def setUp(self): - self.tmpdir = mkdtemp() - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidFile(self): - # Create local temp file - tmpfile = valid_textfile(self.tmpdir) - assert_(self.repos.exists(tmpfile)) - - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - self.assertEqual(self.repos.exists(tmpfile), False) - - def test_RemoveHTTPFile(self): - assert_(self.repos.exists(valid_httpurl())) - - def test_CachedHTTPFile(self): - localfile = valid_httpurl() - # Create a locally cached temp file with an URL based - # directory structure. This is similar to what Repository.open - # would do. - scheme, netloc, upath, pms, qry, frg = urlparse(localfile) - local_path = os.path.join(self.repos._destpath, netloc) - os.mkdir(local_path, 0700) - tmpfile = valid_textfile(local_path) - assert_(self.repos.exists(tmpfile)) - -class TestOpenFunc(TestCase): - def setUp(self): - self.tmpdir = mkdtemp() - - def tearDown(self): - rmtree(self.tmpdir) - - def test_DataSourceOpen(self): - local_file = valid_textfile(self.tmpdir) - # Test case where destpath is passed in - fp = datasource.open(local_file, destpath=self.tmpdir) - assert_(fp) - fp.close() - # Test case where default destpath is used - fp = datasource.open(local_file) - assert_(fp) - fp.close() - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test__iotools.py b/numpy-1.6.2/numpy/lib/tests/test__iotools.py deleted file mode 100644 index 853d060877..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test__iotools.py +++ /dev/null @@ -1,327 +0,0 @@ -import sys -if sys.version_info[0] >= 3: - from io import BytesIO - def StringIO(s=""): - return BytesIO(asbytes(s)) -else: - from StringIO import StringIO - -from datetime import date -import time - -import numpy as np -from numpy.lib._iotools import LineSplitter, NameValidator, StringConverter, \ - has_nested_fields, easy_dtype, flatten_dtype -from numpy.testing import * - -from numpy.compat import asbytes, asbytes_nested - -class TestLineSplitter(TestCase): - "Tests the LineSplitter class." - # - def test_no_delimiter(self): - "Test LineSplitter w/o delimiter" - strg = asbytes(" 1 2 3 4 5 # test") - test = LineSplitter()(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5'])) - test = LineSplitter('')(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5'])) - - def test_space_delimiter(self): - "Test space delimiter" - strg = asbytes(" 1 2 3 4 5 # test") - test = LineSplitter(asbytes(' '))(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) - test = LineSplitter(asbytes(' '))(strg) - assert_equal(test, asbytes_nested(['1 2 3 4', '5'])) - - def test_tab_delimiter(self): - "Test tab delimiter" - strg = asbytes(" 1\t 2\t 3\t 4\t 5 6") - test = LineSplitter(asbytes('\t'))(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5 6'])) - strg = asbytes(" 1 2\t 3 4\t 5 6") - test = LineSplitter(asbytes('\t'))(strg) - assert_equal(test, asbytes_nested(['1 2', '3 4', '5 6'])) - - def test_other_delimiter(self): - "Test LineSplitter on delimiter" - strg = asbytes("1,2,3,4,,5") - test = LineSplitter(asbytes(','))(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) - # - strg = asbytes(" 1,2,3,4,,5 # test") - test = LineSplitter(asbytes(','))(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) - - def test_constant_fixed_width(self): - "Test LineSplitter w/ fixed-width fields" - strg = asbytes(" 1 2 3 4 5 # test") - test = LineSplitter(3)(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5', ''])) - # - strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter(20)(strg) - assert_equal(test, asbytes_nested(['1 3 4 5 6'])) - # - strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter(30)(strg) - assert_equal(test, asbytes_nested(['1 3 4 5 6'])) - - def test_variable_fixed_width(self): - strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter((3, 6, 6, 3))(strg) - assert_equal(test, asbytes_nested(['1', '3', '4 5', '6'])) - # - strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter((6, 6, 9))(strg) - assert_equal(test, asbytes_nested(['1', '3 4', '5 6'])) - - -#------------------------------------------------------------------------------- - -class TestNameValidator(TestCase): - # - def test_case_sensitivity(self): - "Test case sensitivity" - names = ['A', 'a', 'b', 'c'] - test = NameValidator().validate(names) - assert_equal(test, ['A', 'a', 'b', 'c']) - test = NameValidator(case_sensitive=False).validate(names) - assert_equal(test, ['A', 'A_1', 'B', 'C']) - test = NameValidator(case_sensitive='upper').validate(names) - assert_equal(test, ['A', 'A_1', 'B', 'C']) - test = NameValidator(case_sensitive='lower').validate(names) - assert_equal(test, ['a', 'a_1', 'b', 'c']) - # - def test_excludelist(self): - "Test excludelist" - names = ['dates', 'data', 'Other Data', 'mask'] - validator = NameValidator(excludelist=['dates', 'data', 'mask']) - test = validator.validate(names) - assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) - # - def test_missing_names(self): - "Test validate missing names" - namelist = ('a', 'b', 'c') - validator = NameValidator() - assert_equal(validator(namelist), ['a', 'b', 'c']) - namelist = ('', 'b', 'c') - assert_equal(validator(namelist), ['f0', 'b', 'c']) - namelist = ('a', 'b', '') - assert_equal(validator(namelist), ['a', 'b', 'f0']) - namelist = ('', 'f0', '') - assert_equal(validator(namelist), ['f1', 'f0', 'f2']) - # - def test_validate_nb_names(self): - "Test validate nb names" - namelist = ('a', 'b', 'c') - validator = NameValidator() - assert_equal(validator(namelist, nbfields=1), ('a',)) - assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), - ['a', 'b', 'c', 'g0', 'g1']) - # - def test_validate_wo_names(self): - "Test validate no names" - namelist = None - validator = NameValidator() - assert(validator(namelist) is None) - assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) - - - - -#------------------------------------------------------------------------------- - -def _bytes_to_date(s): - if sys.version_info[0] >= 3: - return date(*time.strptime(s.decode('latin1'), "%Y-%m-%d")[:3]) - else: - return date(*time.strptime(s, "%Y-%m-%d")[:3]) - -class TestStringConverter(TestCase): - "Test StringConverter" - # - def test_creation(self): - "Test creation of a StringConverter" - converter = StringConverter(int, -99999) - assert_equal(converter._status, 1) - assert_equal(converter.default, -99999) - # - def test_upgrade(self): - "Tests the upgrade method." - converter = StringConverter() - assert_equal(converter._status, 0) - converter.upgrade(asbytes('0')) - assert_equal(converter._status, 1) - converter.upgrade(asbytes('0.')) - assert_equal(converter._status, 2) - converter.upgrade(asbytes('0j')) - assert_equal(converter._status, 3) - converter.upgrade(asbytes('a')) - assert_equal(converter._status, len(converter._mapper) - 1) - # - def test_missing(self): - "Tests the use of missing values." - converter = StringConverter(missing_values=(asbytes('missing'), - asbytes('missed'))) - converter.upgrade(asbytes('0')) - assert_equal(converter(asbytes('0')), 0) - assert_equal(converter(asbytes('')), converter.default) - assert_equal(converter(asbytes('missing')), converter.default) - assert_equal(converter(asbytes('missed')), converter.default) - try: - converter('miss') - except ValueError: - pass - # - def test_upgrademapper(self): - "Tests updatemapper" - dateparser = _bytes_to_date - StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) - convert = StringConverter(dateparser, date(2000, 1, 1)) - test = convert(asbytes('2001-01-01')) - assert_equal(test, date(2001, 01, 01)) - test = convert(asbytes('2009-01-01')) - assert_equal(test, date(2009, 01, 01)) - test = convert(asbytes('')) - assert_equal(test, date(2000, 01, 01)) - # - def test_string_to_object(self): - "Make sure that string-to-object functions are properly recognized" - conv = StringConverter(_bytes_to_date) - assert_equal(conv._mapper[-2][0](0), 0j) - assert(hasattr(conv, 'default')) - # - def test_keep_default(self): - "Make sure we don't lose an explicit default" - converter = StringConverter(None, missing_values=asbytes(''), - default= -999) - converter.upgrade(asbytes('3.14159265')) - assert_equal(converter.default, -999) - assert_equal(converter.type, np.dtype(float)) - # - converter = StringConverter(None, missing_values=asbytes(''), default=0) - converter.upgrade(asbytes('3.14159265')) - assert_equal(converter.default, 0) - assert_equal(converter.type, np.dtype(float)) - # - def test_keep_default_zero(self): - "Check that we don't lose a default of 0" - converter = StringConverter(int, default=0, - missing_values=asbytes("N/A")) - assert_equal(converter.default, 0) - # - def test_keep_missing_values(self): - "Check that we're not losing missing values" - converter = StringConverter(int, default=0, - missing_values=asbytes("N/A")) - assert_equal(converter.missing_values, set(asbytes_nested(['', 'N/A']))) - - def test_int64_dtype(self): - "Check that int64 integer types can be specified" - converter = StringConverter(np.int64, default=0) - val = asbytes("-9223372036854775807") - assert_(converter(val) == -9223372036854775807) - val = asbytes("9223372036854775807") - assert_(converter(val) == 9223372036854775807) - - def test_uint64_dtype(self): - "Check that uint64 integer types can be specified" - converter = StringConverter(np.uint64, default=0) - val = asbytes("9223372043271415339") - assert_(converter(val) == 9223372043271415339) - -#------------------------------------------------------------------------------- - -class TestMiscFunctions(TestCase): - # - def test_has_nested_dtype(self): - "Test has_nested_dtype" - ndtype = np.dtype(np.float) - assert_equal(has_nested_fields(ndtype), False) - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - assert_equal(has_nested_fields(ndtype), False) - ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - assert_equal(has_nested_fields(ndtype), True) - - def test_easy_dtype(self): - "Test ndtype on dtypes" - # Simple case - ndtype = float - assert_equal(easy_dtype(ndtype), np.dtype(float)) - # As string w/o names - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype), - np.dtype([('f0', "i4"), ('f1', "f8")])) - # As string w/o names but different default format - assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), - np.dtype([('field_000', "i4"), ('field_001', "f8")])) - # As string w/ names - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names="a, b"), - np.dtype([('a', "i4"), ('b', "f8")])) - # As string w/ names (too many) - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([('a', "i4"), ('b', "f8")])) - # As string w/ names (not enough) - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names=", b"), - np.dtype([('f0', "i4"), ('b', "f8")])) - # ... (with different default format) - assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), - np.dtype([('a', "i4"), ('f00', "f8")])) - # As list of tuples w/o names - ndtype = [('A', int), ('B', float)] - assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) - # As list of tuples w/ names - assert_equal(easy_dtype(ndtype, names="a,b"), - np.dtype([('a', int), ('b', float)])) - # As list of tuples w/ not enough names - assert_equal(easy_dtype(ndtype, names="a"), - np.dtype([('a', int), ('f0', float)])) - # As list of tuples w/ too many names - assert_equal(easy_dtype(ndtype, names="a,b,c"), - np.dtype([('a', int), ('b', float)])) - # As list of types w/o names - ndtype = (int, float, float) - assert_equal(easy_dtype(ndtype), - np.dtype([('f0', int), ('f1', float), ('f2', float)])) - # As list of types w names - ndtype = (int, float, float) - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([('a', int), ('b', float), ('c', float)])) - # As simple dtype w/ names - ndtype = np.dtype(float) - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([(_, float) for _ in ('a', 'b', 'c')])) - # As simple dtype w/o names (but multiple fields) - ndtype = np.dtype(float) - assert_equal(easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), - np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) - - - def test_flatten_dtype(self): - "Testing flatten_dtype" - # Standard dtype - dt = np.dtype([("a", "f8"), ("b", "f8")]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, float]) - # Recursive dtype - dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) - # dtype with shaped fields - dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, int]) - dt_flat = flatten_dtype(dt, True) - assert_equal(dt_flat, [float] * 2 + [int] * 3) - # dtype w/ titles - dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, float]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_arraysetops.py b/numpy-1.6.2/numpy/lib/tests/test_arraysetops.py deleted file mode 100644 index 907a27a8c6..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_arraysetops.py +++ /dev/null @@ -1,207 +0,0 @@ -""" Test functions for 1D array set operations. - -""" - -from numpy.testing import * -import numpy as np -from numpy.lib.arraysetops import * - -import warnings - -class TestAso(TestCase): - def test_unique( self ): - a = np.array( [5, 7, 1, 2, 1, 5, 7] ) - - ec = np.array( [1, 2, 5, 7] ) - c = unique( a ) - assert_array_equal( c, ec ) - - vals, indices = unique( a, return_index=True ) - - - ed = np.array( [2, 3, 0, 1] ) - assert_array_equal(vals, ec) - assert_array_equal(indices, ed) - - vals, ind0, ind1 = unique( a, return_index=True, - return_inverse=True ) - - - ee = np.array( [2, 3, 0, 1, 0, 2, 3] ) - assert_array_equal(vals, ec) - assert_array_equal(ind0, ed) - assert_array_equal(ind1, ee) - - assert_array_equal([], unique([])) - - def test_intersect1d( self ): - # unique inputs - a = np.array( [5, 7, 1, 2] ) - b = np.array( [2, 4, 3, 1, 5] ) - - ec = np.array( [1, 2, 5] ) - c = intersect1d( a, b, assume_unique=True ) - assert_array_equal( c, ec ) - - # non-unique inputs - a = np.array( [5, 5, 7, 1, 2] ) - b = np.array( [2, 1, 4, 3, 3, 1, 5] ) - - ed = np.array( [1, 2, 5] ) - c = intersect1d( a, b ) - assert_array_equal( c, ed ) - - assert_array_equal([], intersect1d([],[])) - - def test_setxor1d( self ): - a = np.array( [5, 7, 1, 2] ) - b = np.array( [2, 4, 3, 1, 5] ) - - ec = np.array( [3, 4, 7] ) - c = setxor1d( a, b ) - assert_array_equal( c, ec ) - - a = np.array( [1, 2, 3] ) - b = np.array( [6, 5, 4] ) - - ec = np.array( [1, 2, 3, 4, 5, 6] ) - c = setxor1d( a, b ) - assert_array_equal( c, ec ) - - a = np.array( [1, 8, 2, 3] ) - b = np.array( [6, 5, 4, 8] ) - - ec = np.array( [1, 2, 3, 4, 5, 6] ) - c = setxor1d( a, b ) - assert_array_equal( c, ec ) - - assert_array_equal([], setxor1d([],[])) - - def test_ediff1d(self): - zero_elem = np.array([]) - one_elem = np.array([1]) - two_elem = np.array([1,2]) - - assert_array_equal([],ediff1d(zero_elem)) - assert_array_equal([0],ediff1d(zero_elem,to_begin=0)) - assert_array_equal([0],ediff1d(zero_elem,to_end=0)) - assert_array_equal([-1,0],ediff1d(zero_elem,to_begin=-1,to_end=0)) - assert_array_equal([],ediff1d(one_elem)) - assert_array_equal([1],ediff1d(two_elem)) - - def test_in1d(self): - a = np.array( [5, 7, 1, 2] ) - b = np.array( [2, 4, 3, 1, 5] ) - - ec = np.array( [True, False, True, True] ) - c = in1d( a, b, assume_unique=True ) - assert_array_equal( c, ec ) - - a[0] = 8 - ec = np.array( [False, False, True, True] ) - c = in1d( a, b, assume_unique=True ) - assert_array_equal( c, ec ) - - a[0], a[3] = 4, 8 - ec = np.array( [True, False, True, False] ) - c = in1d( a, b, assume_unique=True ) - assert_array_equal( c, ec ) - - a = np.array([5,4,5,3,4,4,3,4,3,5,2,1,5,5]) - b = [2,3,4] - - ec = [False, True, False, True, True, True, True, True, True, False, - True, False, False, False] - c = in1d(a, b) - assert_array_equal(c, ec) - - b = b + [5, 5, 4] - - ec = [True, True, True, True, True, True, True, True, True, True, - True, False, True, True] - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 7, 1, 2]) - b = np.array([2, 4, 3, 1, 5]) - - ec = np.array([True, False, True, True]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 7, 1, 1, 2]) - b = np.array([2, 4, 3, 3, 1, 5]) - - ec = np.array([True, False, True, True, True]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5]) - b = np.array([2]) - - ec = np.array([False]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 5]) - b = np.array([2, 2]) - - ec = np.array([False, False]) - c = in1d(a, b) - assert_array_equal(c, ec) - - assert_array_equal(in1d([], []), []) - - def test_in1d_char_array( self ): - a = np.array(['a', 'b', 'c','d','e','c','e','b']) - b = np.array(['a','c']) - - ec = np.array([True, False, True, False, False, True, False, False]) - c = in1d(a, b) - - assert_array_equal(c, ec) - - def test_union1d( self ): - a = np.array( [5, 4, 7, 1, 2] ) - b = np.array( [2, 4, 3, 3, 2, 1, 5] ) - - ec = np.array( [1, 2, 3, 4, 5, 7] ) - c = union1d( a, b ) - assert_array_equal( c, ec ) - - assert_array_equal([], union1d([],[])) - - def test_setdiff1d( self ): - a = np.array( [6, 5, 4, 7, 1, 2, 7, 4] ) - b = np.array( [2, 4, 3, 3, 2, 1, 5] ) - - ec = np.array( [6, 7] ) - c = setdiff1d( a, b ) - assert_array_equal( c, ec ) - - a = np.arange( 21 ) - b = np.arange( 19 ) - ec = np.array( [19, 20] ) - c = setdiff1d( a, b ) - assert_array_equal( c, ec ) - - assert_array_equal([], setdiff1d([],[])) - - def test_setdiff1d_char_array(self): - a = np.array(['a','b','c']) - b = np.array(['a','b','s']) - assert_array_equal(setdiff1d(a,b),np.array(['c'])) - - def test_manyways( self ): - a = np.array( [5, 7, 1, 2, 8] ) - b = np.array( [9, 8, 2, 4, 3, 1, 5] ) - - c1 = setxor1d( a, b ) - aux1 = intersect1d( a, b ) - aux2 = union1d( a, b ) - c2 = setdiff1d( aux2, aux1 ) - assert_array_equal( c1, c2 ) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_arrayterator.py b/numpy-1.6.2/numpy/lib/tests/test_arrayterator.py deleted file mode 100644 index 3dce009d31..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_arrayterator.py +++ /dev/null @@ -1,51 +0,0 @@ -from operator import mul - -import numpy as np -from numpy.random import randint -from numpy.lib import Arrayterator - -import sys -if sys.version_info[0] >= 3: - from functools import reduce - -def test(): - np.random.seed(np.arange(10)) - - # Create a random array - ndims = randint(5)+1 - shape = tuple(randint(10)+1 for dim in range(ndims)) - els = reduce(mul, shape) - a = np.arange(els) - a.shape = shape - - buf_size = randint(2*els) - b = Arrayterator(a, buf_size) - - # Check that each block has at most ``buf_size`` elements - for block in b: - assert len(block.flat) <= (buf_size or els) - - # Check that all elements are iterated correctly - assert list(b.flat) == list(a.flat) - - # Slice arrayterator - start = [randint(dim) for dim in shape] - stop = [randint(dim)+1 for dim in shape] - step = [randint(dim)+1 for dim in shape] - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) - c = b[slice_] - d = a[slice_] - - # Check that each block has at most ``buf_size`` elements - for block in c: - assert len(block.flat) <= (buf_size or els) - - # Check that the arrayterator is sliced correctly - assert np.all(c.__array__() == d) - - # Check that all elements are iterated correctly - assert list(c.flat) == list(d.flat) - -if __name__ == '__main__': - from numpy.testing import run_module_suite - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_financial.py b/numpy-1.6.2/numpy/lib/tests/test_financial.py deleted file mode 100644 index f3143049fe..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_financial.py +++ /dev/null @@ -1,62 +0,0 @@ -from numpy.testing import * -import numpy as np - -class TestFinancial(TestCase): - def test_rate(self): - assert_almost_equal(np.rate(10,0,-3500,10000), - 0.1107, 4) - - def test_irr(self): - v = [-150000, 15000, 25000, 35000, 45000, 60000] - assert_almost_equal(np.irr(v), - 0.0524, 2) - - def test_pv(self): - assert_almost_equal(np.pv(0.07,20,12000,0), - -127128.17, 2) - - def test_fv(self): - assert_almost_equal(np.fv(0.075, 20, -2000,0,0), - 86609.36, 2) - - def test_pmt(self): - assert_almost_equal(np.pmt(0.08/12,5*12,15000), - -304.146, 3) - - def test_nper(self): - assert_almost_equal(np.nper(0.075,-2000,0,100000.), - 21.54, 2) - - def test_nper2(self): - assert_almost_equal(np.nper(0.0,-2000,0,100000.), - 50.0, 1) - - def test_npv(self): - assert_almost_equal(np.npv(0.05,[-15000,1500,2500,3500,4500,6000]), - 117.04, 2) - - def test_mirr(self): - val = [-4500,-800,800,800,600,600,800,800,700,3000] - assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) - - val = [-120000,39000,30000,21000,37000,46000] - assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6) - - val = [100,200,-50,300,-200] - assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4) - - val = [39000,30000,21000,37000,46000] - assert_(np.isnan(np.mirr(val, 0.10, 0.12))) - - - -def test_unimplemented(): - # np.round(np.ppmt(0.1/12,1,60,55000),2) == 710.25 - assert_raises(NotImplementedError, np.ppmt, 0.1/12, 1, 60, 55000) - - # np.round(np.ipmt(0.1/12,1,24,2000),2) == 16.67 - assert_raises(NotImplementedError, np.ipmt, 0.1/12, 1, 24, 2000) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_format.py b/numpy-1.6.2/numpy/lib/tests/test_format.py deleted file mode 100644 index ff8e937049..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_format.py +++ /dev/null @@ -1,564 +0,0 @@ -r''' Test the .npy file format. - -Set up: - - >>> import sys - >>> if sys.version_info[0] >= 3: - ... from io import BytesIO as StringIO - ... else: - ... from cStringIO import StringIO - >>> from numpy.lib import format - >>> - >>> scalars = [ - ... np.uint8, - ... np.int8, - ... np.uint16, - ... np.int16, - ... np.uint32, - ... np.int32, - ... np.uint64, - ... np.int64, - ... np.float32, - ... np.float64, - ... np.complex64, - ... np.complex128, - ... object, - ... ] - >>> - >>> basic_arrays = [] - >>> - >>> for scalar in scalars: - ... for endian in '<>': - ... dtype = np.dtype(scalar).newbyteorder(endian) - ... basic = np.arange(15).astype(dtype) - ... basic_arrays.extend([ - ... np.array([], dtype=dtype), - ... np.array(10, dtype=dtype), - ... basic, - ... basic.reshape((3,5)), - ... basic.reshape((3,5)).T, - ... basic.reshape((3,5))[::-1,::2], - ... ]) - ... - >>> - >>> Pdescr = [ - ... ('x', 'i4', (2,)), - ... ('y', 'f8', (2, 2)), - ... ('z', 'u1')] - >>> - >>> - >>> PbufferT = [ - ... ([3,2], [[6.,4.],[6.,4.]], 8), - ... ([4,3], [[7.,5.],[7.,5.]], 9), - ... ] - >>> - >>> - >>> Ndescr = [ - ... ('x', 'i4', (2,)), - ... ('Info', [ - ... ('value', 'c16'), - ... ('y2', 'f8'), - ... ('Info2', [ - ... ('name', 'S2'), - ... ('value', 'c16', (2,)), - ... ('y3', 'f8', (2,)), - ... ('z3', 'u4', (2,))]), - ... ('name', 'S2'), - ... ('z2', 'b1')]), - ... ('color', 'S2'), - ... ('info', [ - ... ('Name', 'U8'), - ... ('Value', 'c16')]), - ... ('y', 'f8', (2, 2)), - ... ('z', 'u1')] - >>> - >>> - >>> NbufferT = [ - ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), - ... ] - >>> - >>> - >>> record_arrays = [ - ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), - ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), - ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), - ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), - ... ] - -Test the magic string writing. - - >>> format.magic(1, 0) - '\x93NUMPY\x01\x00' - >>> format.magic(0, 0) - '\x93NUMPY\x00\x00' - >>> format.magic(255, 255) - '\x93NUMPY\xff\xff' - >>> format.magic(2, 5) - '\x93NUMPY\x02\x05' - -Test the magic string reading. - - >>> format.read_magic(StringIO(format.magic(1, 0))) - (1, 0) - >>> format.read_magic(StringIO(format.magic(0, 0))) - (0, 0) - >>> format.read_magic(StringIO(format.magic(255, 255))) - (255, 255) - >>> format.read_magic(StringIO(format.magic(2, 5))) - (2, 5) - -Test the header writing. - - >>> for arr in basic_arrays + record_arrays: - ... f = StringIO() - ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it - ... print repr(f.getvalue()) - ... - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|O4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|O4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|O4', 'fortran_order': False, 'shape': (3, 3)} \n" - "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" - "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" -''' - - -import sys -import os -import shutil -import tempfile - -if sys.version_info[0] >= 3: - from io import BytesIO as StringIO -else: - from cStringIO import StringIO - -import numpy as np -from numpy.testing import * - -from numpy.lib import format - -from numpy.compat import asbytes, asbytes_nested - - -tempdir = None - -# Module-level setup. -def setup_module(): - global tempdir - tempdir = tempfile.mkdtemp() - -def teardown_module(): - global tempdir - if tempdir is not None and os.path.isdir(tempdir): - shutil.rmtree(tempdir) - tempdir = None - - -# Generate some basic arrays to test with. -scalars = [ - np.uint8, - np.int8, - np.uint16, - np.int16, - np.uint32, - np.int32, - np.uint64, - np.int64, - np.float32, - np.float64, - np.complex64, - np.complex128, - object, -] -basic_arrays = [] -for scalar in scalars: - for endian in '<>': - dtype = np.dtype(scalar).newbyteorder(endian) - basic = np.arange(15).astype(dtype) - basic_arrays.extend([ - # Empty - np.array([], dtype=dtype), - # Rank-0 - np.array(10, dtype=dtype), - # 1-D - basic, - # 2-D C-contiguous - basic.reshape((3,5)), - # 2-D F-contiguous - basic.reshape((3,5)).T, - # 2-D non-contiguous - basic.reshape((3,5))[::-1,::2], - ]) - -# More complicated record arrays. -# This is the structure of the table used for plain objects: -# -# +-+-+-+ -# |x|y|z| -# +-+-+-+ - -# Structure of a plain array description: -Pdescr = [ - ('x', 'i4', (2,)), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -# A plain list of tuples with values for testing: -PbufferT = [ - # x y z - ([3,2], [[6.,4.],[6.,4.]], 8), - ([4,3], [[7.,5.],[7.,5.]], 9), - ] - - -# This is the structure of the table used for nested objects (DON'T PANIC!): -# -# +-+---------------------------------+-----+----------+-+-+ -# |x|Info |color|info |y|z| -# | +-----+--+----------------+----+--+ +----+-----+ | | -# | |value|y2|Info2 |name|z2| |Name|Value| | | -# | | | +----+-----+--+--+ | | | | | | | -# | | | |name|value|y3|z3| | | | | | | | -# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ -# - -# The corresponding nested array description: -Ndescr = [ - ('x', 'i4', (2,)), - ('Info', [ - ('value', 'c16'), - ('y2', 'f8'), - ('Info2', [ - ('name', 'S2'), - ('value', 'c16', (2,)), - ('y3', 'f8', (2,)), - ('z3', 'u4', (2,))]), - ('name', 'S2'), - ('z2', 'b1')]), - ('color', 'S2'), - ('info', [ - ('Name', 'U8'), - ('Value', 'c16')]), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 - ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), - ] - -record_arrays = [ - np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), - np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), - np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), - np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), -] - -def roundtrip(arr): - f = StringIO() - format.write_array(f, arr) - f2 = StringIO(f.getvalue()) - arr2 = format.read_array(f2) - return arr2 - -def assert_equal(o1, o2): - assert o1 == o2 - - -def test_roundtrip(): - for arr in basic_arrays + record_arrays: - arr2 = roundtrip(arr) - yield assert_array_equal, arr, arr2 - -def test_memmap_roundtrip(): - # XXX: test crashes nose on windows. Fix this - if not (sys.platform == 'win32' or sys.platform == 'cygwin'): - for arr in basic_arrays + record_arrays: - if arr.dtype.hasobject: - # Skip these since they can't be mmap'ed. - continue - # Write it out normally and through mmap. - nfn = os.path.join(tempdir, 'normal.npy') - mfn = os.path.join(tempdir, 'memmap.npy') - fp = open(nfn, 'wb') - try: - format.write_array(fp, arr) - finally: - fp.close() - - fortran_order = (arr.flags.f_contiguous and not arr.flags.c_contiguous) - ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype, - shape=arr.shape, fortran_order=fortran_order) - ma[...] = arr - del ma - - # Check that both of these files' contents are the same. - fp = open(nfn, 'rb') - normal_bytes = fp.read() - fp.close() - fp = open(mfn, 'rb') - memmap_bytes = fp.read() - fp.close() - yield assert_equal, normal_bytes, memmap_bytes - - # Check that reading the file using memmap works. - ma = format.open_memmap(nfn, mode='r') - #yield assert_array_equal, ma, arr - del ma - - -def test_write_version_1_0(): - f = StringIO() - arr = np.arange(1) - # These should pass. - format.write_array(f, arr, version=(1, 0)) - format.write_array(f, arr) - - # These should all fail. - bad_versions = [ - (1, 1), - (0, 0), - (0, 1), - (2, 0), - (2, 2), - (255, 255), - ] - for version in bad_versions: - try: - format.write_array(f, arr, version=version) - except ValueError: - pass - else: - raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,)) - - -bad_version_magic = asbytes_nested([ - '\x93NUMPY\x01\x01', - '\x93NUMPY\x00\x00', - '\x93NUMPY\x00\x01', - '\x93NUMPY\x02\x00', - '\x93NUMPY\x02\x02', - '\x93NUMPY\xff\xff', -]) -malformed_magic = asbytes_nested([ - '\x92NUMPY\x01\x00', - '\x00NUMPY\x01\x00', - '\x93numpy\x01\x00', - '\x93MATLB\x01\x00', - '\x93NUMPY\x01', - '\x93NUMPY', - '', -]) - -def test_read_magic_bad_magic(): - for magic in malformed_magic: - f = StringIO(magic) - yield raises(ValueError)(format.read_magic), f - -def test_read_version_1_0_bad_magic(): - for magic in bad_version_magic + malformed_magic: - f = StringIO(magic) - yield raises(ValueError)(format.read_array), f - -def test_bad_magic_args(): - assert_raises(ValueError, format.magic, -1, 1) - assert_raises(ValueError, format.magic, 256, 1) - assert_raises(ValueError, format.magic, 1, -1) - assert_raises(ValueError, format.magic, 1, 256) - -def test_large_header(): - s = StringIO() - d = {'a':1,'b':2} - format.write_array_header_1_0(s,d) - - s = StringIO() - d = {'a':1,'b':2,'c':'x'*256*256} - assert_raises(ValueError, format.write_array_header_1_0, s, d) - -def test_bad_header(): - # header of length less than 2 should fail - s = StringIO() - assert_raises(ValueError, format.read_array_header_1_0, s) - s = StringIO(asbytes('1')) - assert_raises(ValueError, format.read_array_header_1_0, s) - - # header shorter than indicated size should fail - s = StringIO(asbytes('\x01\x00')) - assert_raises(ValueError, format.read_array_header_1_0, s) - - # headers without the exact keys required should fail - d = {"shape":(1,2), - "descr":"x"} - s = StringIO() - format.write_array_header_1_0(s,d) - assert_raises(ValueError, format.read_array_header_1_0, s) - - d = {"shape":(1,2), - "fortran_order":False, - "descr":"x", - "extrakey":-1} - s = StringIO() - format.write_array_header_1_0(s,d) - assert_raises(ValueError, format.read_array_header_1_0, s) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_function_base.py b/numpy-1.6.2/numpy/lib/tests/test_function_base.py deleted file mode 100644 index d7d5513a6d..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_function_base.py +++ /dev/null @@ -1,1194 +0,0 @@ -import warnings - -from numpy.testing import * -import numpy.lib -from numpy.lib import * -from numpy.core import * -from numpy import matrix, asmatrix - -import numpy as np - -class TestAny(TestCase): - def test_basic(self): - y1 = [0, 0, 1, 0] - y2 = [0, 0, 0, 0] - y3 = [1, 0, 1, 0] - assert_(any(y1)) - assert_(any(y3)) - assert_(not any(y2)) - - def test_nd(self): - y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]] - assert_(any(y1)) - assert_array_equal(sometrue(y1, axis=0), [1, 1, 0]) - assert_array_equal(sometrue(y1, axis=1), [0, 1, 1]) - - -class TestAll(TestCase): - def test_basic(self): - y1 = [0, 1, 1, 0] - y2 = [0, 0, 0, 0] - y3 = [1, 1, 1, 1] - assert_(not all(y1)) - assert_(all(y3)) - assert_(not all(y2)) - assert_(all(~array(y2))) - - def test_nd(self): - y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]] - assert_(not all(y1)) - assert_array_equal(alltrue(y1, axis=0), [0, 0, 1]) - assert_array_equal(alltrue(y1, axis=1), [0, 0, 1]) - - -class TestAverage(TestCase): - def test_basic(self): - y1 = array([1, 2, 3]) - assert_(average(y1, axis=0) == 2.) - y2 = array([1., 2., 3.]) - assert_(average(y2, axis=0) == 2.) - y3 = [0., 0., 0.] - assert_(average(y3, axis=0) == 0.) - - y4 = ones((4, 4)) - y4[0, 1] = 0 - y4[1, 0] = 2 - assert_almost_equal(y4.mean(0), average(y4, 0)) - assert_almost_equal(y4.mean(1), average(y4, 1)) - - y5 = rand(5, 5) - assert_almost_equal(y5.mean(0), average(y5, 0)) - assert_almost_equal(y5.mean(1), average(y5, 1)) - - y6 = matrix(rand(5, 5)) - assert_array_equal(y6.mean(0), average(y6, 0)) - - def test_weights(self): - y = arange(10) - w = arange(10) - actual = average(y, weights=w) - desired = (arange(10) ** 2).sum()*1. / arange(10).sum() - assert_almost_equal(actual, desired) - - y1 = array([[1, 2, 3], [4, 5, 6]]) - w0 = [1, 2] - actual = average(y1, weights=w0, axis=0) - desired = array([3., 4., 5.]) - assert_almost_equal(actual, desired) - - w1 = [0, 0, 1] - actual = average(y1, weights=w1, axis=1) - desired = array([3., 6.]) - assert_almost_equal(actual, desired) - - # This should raise an error. Can we test for that ? - # assert_equal(average(y1, weights=w1), 9./2.) - - # 2D Case - w2 = [[0, 0, 1], [0, 0, 2]] - desired = array([3., 6.]) - assert_array_equal(average(y1, weights=w2, axis=1), desired) - assert_equal(average(y1, weights=w2), 5.) - - def test_returned(self): - y = array([[1, 2, 3], [4, 5, 6]]) - - # No weights - avg, scl = average(y, returned=True) - assert_equal(scl, 6.) - - avg, scl = average(y, 0, returned=True) - assert_array_equal(scl, array([2., 2., 2.])) - - avg, scl = average(y, 1, returned=True) - assert_array_equal(scl, array([3., 3.])) - - # With weights - w0 = [1, 2] - avg, scl = average(y, weights=w0, axis=0, returned=True) - assert_array_equal(scl, array([3., 3., 3.])) - - w1 = [1, 2, 3] - avg, scl = average(y, weights=w1, axis=1, returned=True) - assert_array_equal(scl, array([6., 6.])) - - w2 = [[0, 0, 1], [1, 2, 3]] - avg, scl = average(y, weights=w2, axis=1, returned=True) - assert_array_equal(scl, array([1., 6.])) - - -class TestSelect(TestCase): - def _select(self, cond, values, default=0): - output = [] - for m in range(len(cond)): - output += [V[m] for V, C in zip(values, cond) if C[m]] or [default] - return output - - def test_basic(self): - choices = [array([1, 2, 3]), - array([4, 5, 6]), - array([7, 8, 9])] - conditions = [array([0, 0, 0]), - array([0, 1, 0]), - array([0, 0, 1])] - assert_array_equal(select(conditions, choices, default=15), - self._select(conditions, choices, default=15)) - - assert_equal(len(choices), 3) - assert_equal(len(conditions), 3) - - -class TestInsert(TestCase): - def test_basic(self): - a = [1, 2, 3] - assert_equal(insert(a, 0, 1), [1, 1, 2, 3]) - assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) - assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) - - -class TestAmax(TestCase): - def test_basic(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(amax(a), 10.0) - b = [[3, 6.0, 9.0], - [4, 10.0, 5.0], - [8, 3.0, 2.0]] - assert_equal(amax(b, axis=0), [8.0, 10.0, 9.0]) - assert_equal(amax(b, axis=1), [9.0, 10.0, 8.0]) - - -class TestAmin(TestCase): - def test_basic(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(amin(a), -5.0) - b = [[3, 6.0, 9.0], - [4, 10.0, 5.0], - [8, 3.0, 2.0]] - assert_equal(amin(b, axis=0), [3.0, 3.0, 2.0]) - assert_equal(amin(b, axis=1), [3.0, 4.0, 2.0]) - - -class TestPtp(TestCase): - def test_basic(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(ptp(a, axis=0), 15.0) - b = [[3, 6.0, 9.0], - [4, 10.0, 5.0], - [8, 3.0, 2.0]] - assert_equal(ptp(b, axis=0), [5.0, 7.0, 7.0]) - assert_equal(ptp(b, axis= -1), [6.0, 6.0, 6.0]) - - -class TestCumsum(TestCase): - def test_basic(self): - ba = [1, 2, 10, 11, 6, 5, 4] - ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] - for ctype in [int8, uint8, int16, uint16, int32, uint32, - float32, float64, complex64, complex128]: - a = array(ba, ctype) - a2 = array(ba2, ctype) - assert_array_equal(cumsum(a, axis=0), array([1, 3, 13, 24, 30, 35, 39], ctype)) - assert_array_equal(cumsum(a2, axis=0), array([[1, 2, 3, 4], [6, 8, 10, 13], - [16, 11, 14, 18]], ctype)) - assert_array_equal(cumsum(a2, axis=1), - array([[1, 3, 6, 10], - [5, 11, 18, 27], - [10, 13, 17, 22]], ctype)) - - -class TestProd(TestCase): - def test_basic(self): - ba = [1, 2, 10, 11, 6, 5, 4] - ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] - for ctype in [int16, uint16, int32, uint32, - float32, float64, complex64, complex128]: - a = array(ba, ctype) - a2 = array(ba2, ctype) - if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, prod, a) - self.assertRaises(ArithmeticError, prod, a2, 1) - self.assertRaises(ArithmeticError, prod, a) - else: - assert_equal(prod(a, axis=0), 26400) - assert_array_equal(prod(a2, axis=0), - array([50, 36, 84, 180], ctype)) - assert_array_equal(prod(a2, axis= -1), array([24, 1890, 600], ctype)) - - -class TestCumprod(TestCase): - def test_basic(self): - ba = [1, 2, 10, 11, 6, 5, 4] - ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] - for ctype in [int16, uint16, int32, uint32, - float32, float64, complex64, complex128]: - a = array(ba, ctype) - a2 = array(ba2, ctype) - if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, cumprod, a) - self.assertRaises(ArithmeticError, cumprod, a2, 1) - self.assertRaises(ArithmeticError, cumprod, a) - else: - assert_array_equal(cumprod(a, axis= -1), - array([1, 2, 20, 220, - 1320, 6600, 26400], ctype)) - assert_array_equal(cumprod(a2, axis=0), - array([[ 1, 2, 3, 4], - [ 5, 12, 21, 36], - [50, 36, 84, 180]], ctype)) - assert_array_equal(cumprod(a2, axis= -1), - array([[ 1, 2, 6, 24], - [ 5, 30, 210, 1890], - [10, 30, 120, 600]], ctype)) - - -class TestDiff(TestCase): - def test_basic(self): - x = [1, 4, 6, 7, 12] - out = array([3, 2, 1, 5]) - out2 = array([-1, -1, 4]) - out3 = array([0, 5]) - assert_array_equal(diff(x), out) - assert_array_equal(diff(x, n=2), out2) - assert_array_equal(diff(x, n=3), out3) - - def test_nd(self): - x = 20 * rand(10, 20, 30) - out1 = x[:, :, 1:] - x[:, :, :-1] - out2 = out1[:, :, 1:] - out1[:, :, :-1] - out3 = x[1:, :, :] - x[:-1, :, :] - out4 = out3[1:, :, :] - out3[:-1, :, :] - assert_array_equal(diff(x), out1) - assert_array_equal(diff(x, n=2), out2) - assert_array_equal(diff(x, axis=0), out3) - assert_array_equal(diff(x, n=2, axis=0), out4) - - -class TestGradient(TestCase): - def test_basic(self): - x = array([[1, 1], [3, 4]]) - dx = [array([[2., 3.], [2., 3.]]), - array([[0., 0.], [1., 1.]])] - assert_array_equal(gradient(x), dx) - - def test_badargs(self): - # for 2D array, gradient can take 0,1, or 2 extra args - x = array([[1, 1], [3, 4]]) - assert_raises(SyntaxError, gradient, x, array([1., 1.]), - array([1., 1.]), array([1., 1.])) - - def test_masked(self): - # Make sure that gradient supports subclasses like masked arrays - x = np.ma.array([[1, 1], [3, 4]]) - assert_equal(type(gradient(x)[0]), type(x)) - - -class TestAngle(TestCase): - def test_basic(self): - x = [1 + 3j, sqrt(2) / 2.0 + 1j * sqrt(2) / 2, 1, 1j, -1, -1j, 1 - 3j, -1 + 3j] - y = angle(x) - yo = [arctan(3.0 / 1.0), arctan(1.0), 0, pi / 2, pi, -pi / 2.0, - - arctan(3.0 / 1.0), pi - arctan(3.0 / 1.0)] - z = angle(x, deg=1) - zo = array(yo) * 180 / pi - assert_array_almost_equal(y, yo, 11) - assert_array_almost_equal(z, zo, 11) - - -class TestTrimZeros(TestCase): - """ only testing for integer splits. - """ - def test_basic(self): - a = array([0, 0, 1, 2, 3, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, array([1, 2, 3, 4])) - - def test_leading_skip(self): - a = array([0, 0, 1, 0, 2, 3, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, array([1, 0, 2, 3, 4])) - - def test_trailing_skip(self): - a = array([0, 0, 1, 0, 2, 3, 0, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, array([1, 0, 2, 3, 0, 4])) - - -class TestExtins(TestCase): - def test_basic(self): - a = array([1, 3, 2, 1, 2, 3, 3]) - b = extract(a > 1, a) - assert_array_equal(b, [3, 2, 2, 3, 3]) - - def test_place(self): - a = array([1, 4, 3, 2, 5, 8, 7]) - place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) - assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) - - def test_both(self): - a = rand(10) - mask = a > 0.5 - ac = a.copy() - c = extract(mask, a) - place(a, mask, 0) - place(a, mask, c) - assert_array_equal(a, ac) - - -class TestVectorize(TestCase): - def test_simple(self): - def addsubtract(a, b): - if a > b: - return a - b - else: - return a + b - f = vectorize(addsubtract) - r = f([0, 3, 6, 9], [1, 3, 5, 7]) - assert_array_equal(r, [1, 6, 1, 2]) - - def test_scalar(self): - def addsubtract(a, b): - if a > b: - return a - b - else: - return a + b - f = vectorize(addsubtract) - r = f([0, 3, 6, 9], 5) - assert_array_equal(r, [5, 8, 1, 4]) - - def test_large(self): - x = linspace(-3, 2, 10000) - f = vectorize(lambda x: x) - y = f(x) - assert_array_equal(y, x) - - def test_ufunc(self): - import math - f = vectorize(math.cos) - args = array([0, 0.5*pi, pi, 1.5*pi, 2*pi]) - r1 = f(args) - r2 = cos(args) - assert_array_equal(r1, r2) - - def test_keywords(self): - import math - def foo(a, b=1): - return a + b - f = vectorize(foo) - args = array([1,2,3]) - r1 = f(args) - r2 = array([2,3,4]) - assert_array_equal(r1, r2) - r1 = f(args, 2) - r2 = array([3,4,5]) - assert_array_equal(r1, r2) - - def test_keywords_no_func_code(self): - # This needs to test a function that has keywords but - # no func_code attribute, since otherwise vectorize will - # inspect the func_code. - import random - try: - f = vectorize(random.randrange) - except: - raise AssertionError() - - -class TestDigitize(TestCase): - def test_forward(self): - x = arange(-6, 5) - bins = arange(-5, 5) - assert_array_equal(digitize(x, bins), arange(11)) - - def test_reverse(self): - x = arange(5, -6, -1) - bins = arange(5, -5, -1) - assert_array_equal(digitize(x, bins), arange(11)) - - def test_random(self): - x = rand(10) - bin = linspace(x.min(), x.max(), 10) - assert_(all(digitize(x, bin) != 0)) - - -class TestUnwrap(TestCase): - def test_simple(self): - #check that unwrap removes jumps greather that 2*pi - assert_array_equal(unwrap([1, 1 + 2 * pi]), [1, 1]) - #check that unwrap maintans continuity - assert_(all(diff(unwrap(rand(10) * 100)) < pi)) - - -class TestFilterwindows(TestCase): - def test_hanning(self): - #check symmetry - w = hanning(10) - assert_array_almost_equal(w, flipud(w), 7) - #check known value - assert_almost_equal(sum(w, axis=0), 4.500, 4) - - def test_hamming(self): - #check symmetry - w = hamming(10) - assert_array_almost_equal(w, flipud(w), 7) - #check known value - assert_almost_equal(sum(w, axis=0), 4.9400, 4) - - def test_bartlett(self): - #check symmetry - w = bartlett(10) - assert_array_almost_equal(w, flipud(w), 7) - #check known value - assert_almost_equal(sum(w, axis=0), 4.4444, 4) - - def test_blackman(self): - #check symmetry - w = blackman(10) - assert_array_almost_equal(w, flipud(w), 7) - #check known value - assert_almost_equal(sum(w, axis=0), 3.7800, 4) - - -class TestTrapz(TestCase): - def test_simple(self): - r = trapz(exp(-1.0 / 2 * (arange(-10, 10, .1)) ** 2) / sqrt(2 * pi), dx=0.1) - #check integral of normal equals 1 - assert_almost_equal(sum(r, axis=0), 1, 7) - - def test_ndim(self): - x = linspace(0, 1, 3) - y = linspace(0, 2, 8) - z = linspace(0, 3, 13) - - wx = ones_like(x) * (x[1] - x[0]) - wx[0] /= 2 - wx[-1] /= 2 - wy = ones_like(y) * (y[1] - y[0]) - wy[0] /= 2 - wy[-1] /= 2 - wz = ones_like(z) * (z[1] - z[0]) - wz[0] /= 2 - wz[-1] /= 2 - - q = x[:, None, None] + y[None, :, None] + z[None, None, :] - - qx = (q * wx[:, None, None]).sum(axis=0) - qy = (q * wy[None, :, None]).sum(axis=1) - qz = (q * wz[None, None, :]).sum(axis=2) - - # n-d `x` - r = trapz(q, x=x[:, None, None], axis=0) - assert_almost_equal(r, qx) - r = trapz(q, x=y[None, :, None], axis=1) - assert_almost_equal(r, qy) - r = trapz(q, x=z[None, None, :], axis=2) - assert_almost_equal(r, qz) - - # 1-d `x` - r = trapz(q, x=x, axis=0) - assert_almost_equal(r, qx) - r = trapz(q, x=y, axis=1) - assert_almost_equal(r, qy) - r = trapz(q, x=z, axis=2) - assert_almost_equal(r, qz) - - def test_masked(self): - #Testing that masked arrays behave as if the function is 0 where - #masked - x = arange(5) - y = x * x - mask = x == 2 - ym = np.ma.array(y, mask=mask) - r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) - assert_almost_equal(trapz(ym, x), r) - - xm = np.ma.array(x, mask=mask) - assert_almost_equal(trapz(ym, xm), r) - - xm = np.ma.array(x, mask=mask) - assert_almost_equal(trapz(y, xm), r) - - def test_matrix(self): - #Test to make sure matrices give the same answer as ndarrays - x = linspace(0, 5) - y = x * x - r = trapz(y, x) - mx = matrix(x) - my = matrix(y) - mr = trapz(my, mx) - assert_almost_equal(mr, r) - - -class TestSinc(TestCase): - def test_simple(self): - assert_(sinc(0) == 1) - w = sinc(linspace(-1, 1, 100)) - #check symmetry - assert_array_almost_equal(w, flipud(w), 7) - - def test_array_like(self): - x = [0, 0.5] - y1 = sinc(array(x)) - y2 = sinc(list(x)) - y3 = sinc(tuple(x)) - assert_array_equal(y1, y2) - assert_array_equal(y1, y3) - -class TestHistogram(TestCase): - def setUp(self): - pass - - def tearDown(self): - pass - - def test_simple(self): - n = 100 - v = rand(n) - (a, b) = histogram(v) - #check if the sum of the bins equals the number of samples - assert_equal(sum(a, axis=0), n) - #check that the bin counts are evenly spaced when the data is from a - # linear function - (a, b) = histogram(linspace(0, 10, 100)) - assert_array_equal(a, 10) - - def test_one_bin(self): - # Ticket 632 - hist, edges = histogram([1, 2, 3, 4], [1, 2]) - assert_array_equal(hist, [2, ]) - assert_array_equal(edges, [1, 2]) - assert_raises(ValueError, histogram, [1, 2], bins=0) - h, e = histogram([1,2], bins=1) - assert_equal(h, array([2])) - assert_allclose(e, array([1., 2.])) - - def test_normed(self): - # Check that the integral of the density equals 1. - n = 100 - v = rand(n) - a, b = histogram(v, normed=True) - area = sum(a * diff(b)) - assert_almost_equal(area, 1) - - # Check with non-constant bin widths (buggy but backwards compatible) - v = np.arange(10) - bins = [0, 1, 5, 9, 10] - a, b = histogram(v, bins, normed=True) - area = sum(a * diff(b)) - assert_almost_equal(area, 1) - - def test_density(self): - # Check that the integral of the density equals 1. - n = 100 - v = rand(n) - a, b = histogram(v, density=True) - area = sum(a * diff(b)) - assert_almost_equal(area, 1) - - # Check with non-constant bin widths - v = np.arange(10) - bins = [0,1,3,6,10] - a, b = histogram(v, bins, density=True) - assert_array_equal(a, .1) - assert_equal(sum(a*diff(b)), 1) - - # Variale bin widths are especially useful to deal with - # infinities. - v = np.arange(10) - bins = [0,1,3,6,np.inf] - a, b = histogram(v, bins, density=True) - assert_array_equal(a, [.1,.1,.1,0.]) - - # Taken from a bug report from N. Becker on the numpy-discussion - # mailing list Aug. 6, 2010. - counts, dmy = np.histogram([1,2,3,4], [0.5,1.5,np.inf], density=True) - assert_equal(counts, [.25, 0]) - - def test_outliers(self): - # Check that outliers are not tallied - a = arange(10) + .5 - - # Lower outliers - h, b = histogram(a, range=[0, 9]) - assert_equal(h.sum(), 9) - - # Upper outliers - h, b = histogram(a, range=[1, 10]) - assert_equal(h.sum(), 9) - - # Normalization - h, b = histogram(a, range=[1, 9], normed=True) - assert_equal((h * diff(b)).sum(), 1) - - # Weights - w = arange(10) + .5 - h, b = histogram(a, range=[1, 9], weights=w, normed=True) - assert_equal((h * diff(b)).sum(), 1) - - h, b = histogram(a, bins=8, range=[1, 9], weights=w) - assert_equal(h, w[1:-1]) - - def test_type(self): - # Check the type of the returned histogram - a = arange(10) + .5 - h, b = histogram(a) - assert_(issubdtype(h.dtype, int)) - - h, b = histogram(a, normed=True) - assert_(issubdtype(h.dtype, float)) - - h, b = histogram(a, weights=ones(10, int)) - assert_(issubdtype(h.dtype, int)) - - h, b = histogram(a, weights=ones(10, float)) - assert_(issubdtype(h.dtype, float)) - - def test_weights(self): - v = rand(100) - w = ones(100) * 5 - a, b = histogram(v) - na, nb = histogram(v, normed=True) - wa, wb = histogram(v, weights=w) - nwa, nwb = histogram(v, weights=w, normed=True) - assert_array_almost_equal(a * 5, wa) - assert_array_almost_equal(na, nwa) - - # Check weights are properly applied. - v = linspace(0, 10, 10) - w = concatenate((zeros(5), ones(5))) - wa, wb = histogram(v, bins=arange(11), weights=w) - assert_array_almost_equal(wa, w) - - # Check with integer weights - wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) - assert_array_equal(wa, [4, 5, 0, 1]) - wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True) - assert_array_almost_equal(wa, array([4, 5, 0, 1]) / 10. / 3. * 4) - - # Check weights with non-uniform bin widths - a,b = histogram(np.arange(9), [0,1,3,6,10], \ - weights=[2,1,1,1,1,1,1,1,1], density=True) - assert_almost_equal(a, [.2, .1, .1, .075]) - - def test_empty(self): - a, b = histogram([], bins=([0,1])) - assert_array_equal(a, array([0])) - assert_array_equal(b, array([0, 1])) - - -class TestHistogramdd(TestCase): - def test_simple(self): - x = array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], \ - [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) - H, edges = histogramdd(x, (2, 3, 3), range=[[-1, 1], [0, 3], [0, 3]]) - answer = asarray([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], [[0, 1, 0], [0, 0, 1], - [0, 0, 1]]]) - assert_array_equal(H, answer) - # Check normalization - ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] - H, edges = histogramdd(x, bins=ed, normed=True) - assert_(all(H == answer / 12.)) - # Check that H has the correct shape. - H, edges = histogramdd(x, (2, 3, 4), range=[[-1, 1], [0, 3], [0, 4]], - normed=True) - answer = asarray([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], [[0, 1, 0, 0], - [0, 0, 1, 0], [0, 0, 1, 0]]]) - assert_array_almost_equal(H, answer / 6., 4) - # Check that a sequence of arrays is accepted and H has the correct - # shape. - z = [squeeze(y) for y in split(x, 3, axis=1)] - H, edges = histogramdd(z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) - answer = asarray([[[0, 0], [0, 0], [0, 0]], - [[0, 1], [0, 0], [1, 0]], - [[0, 1], [0, 0], [0, 0]], - [[0, 0], [0, 0], [0, 0]]]) - assert_array_equal(H, answer) - - Z = zeros((5, 5, 5)) - Z[range(5), range(5), range(5)] = 1. - H, edges = histogramdd([arange(5), arange(5), arange(5)], 5) - assert_array_equal(H, Z) - - def test_shape_3d(self): - # All possible permutations for bins of different lengths in 3D. - bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), - (4, 5, 6)) - r = rand(10, 3) - for b in bins: - H, edges = histogramdd(r, b) - assert_(H.shape == b) - - def test_shape_4d(self): - # All possible permutations for bins of different lengths in 4D. - bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), - (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), - (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), - (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), - (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), - (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) - - r = rand(10, 4) - for b in bins: - H, edges = histogramdd(r, b) - assert_(H.shape == b) - - def test_weights(self): - v = rand(100, 2) - hist, edges = histogramdd(v) - n_hist, edges = histogramdd(v, normed=True) - w_hist, edges = histogramdd(v, weights=ones(100)) - assert_array_equal(w_hist, hist) - w_hist, edges = histogramdd(v, weights=ones(100) * 2, normed=True) - assert_array_equal(w_hist, n_hist) - w_hist, edges = histogramdd(v, weights=ones(100, int) * 2) - assert_array_equal(w_hist, 2 * hist) - - def test_identical_samples(self): - x = zeros((10, 2), int) - hist, edges = histogramdd(x, bins=2) - assert_array_equal(edges[0], array([-0.5, 0. , 0.5])) - - def test_empty(self): - a, b = histogramdd([[], []], bins=([0,1], [0,1])) - assert_array_max_ulp(a, array([[ 0.]])) - a, b = np.histogramdd([[], [], []], bins=2) - assert_array_max_ulp(a, np.zeros((2, 2, 2))) - - - def test_bins_errors(self): - """There are two ways to specify bins. Check for the right errors when - mixing those.""" - x = np.arange(8).reshape(2, 4) - assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) - assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) - assert_raises(ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]]) - assert_raises(ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) - assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) - - def test_inf_edges(self): - """Test using +/-inf bin edges works. See #1788.""" - x = np.arange(6).reshape(3, 2) - expected = np.array([[1, 0], [0, 1], [0, 1]]) - h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) - assert_allclose(h, expected) - h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) - assert_allclose(h, expected) - h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) - assert_allclose(h, expected) - - -class TestUnique(TestCase): - def test_simple(self): - x = array([4, 3, 2, 1, 1, 2, 3, 4, 0]) - assert_(all(unique(x) == [0, 1, 2, 3, 4])) - assert_(unique(array([1, 1, 1, 1, 1])) == array([1])) - x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] - assert_(all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) - x = array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) - assert_(all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) - - -class TestCheckFinite(TestCase): - def test_simple(self): - a = [1, 2, 3] - b = [1, 2, inf] - c = [1, 2, nan] - numpy.lib.asarray_chkfinite(a) - assert_raises(ValueError, numpy.lib.asarray_chkfinite, b) - assert_raises(ValueError, numpy.lib.asarray_chkfinite, c) - - -class TestNaNFuncts(TestCase): - def setUp(self): - self.A = array([[[ nan, 0.01319214, 0.01620964], - [ 0.11704017, nan, 0.75157887], - [ 0.28333658, 0.1630199 , nan ]], - [[ 0.59541557, nan, 0.37910852], - [ nan, 0.87964135, nan ], - [ 0.70543747, nan, 0.34306596]], - [[ 0.72687499, 0.91084584, nan ], - [ 0.84386844, 0.38944762, 0.23913896], - [ nan, 0.37068164, 0.33850425]]]) - - def test_nansum(self): - assert_almost_equal(nansum(self.A), 8.0664079100000006) - assert_almost_equal(nansum(self.A, 0), - array([[ 1.32229056, 0.92403798, 0.39531816], - [ 0.96090861, 1.26908897, 0.99071783], - [ 0.98877405, 0.53370154, 0.68157021]])) - assert_almost_equal(nansum(self.A, 1), - array([[ 0.40037675, 0.17621204, 0.76778851], - [ 1.30085304, 0.87964135, 0.72217448], - [ 1.57074343, 1.6709751 , 0.57764321]])) - assert_almost_equal(nansum(self.A, 2), - array([[ 0.02940178, 0.86861904, 0.44635648], - [ 0.97452409, 0.87964135, 1.04850343], - [ 1.63772083, 1.47245502, 0.70918589]])) - - def test_nanmin(self): - assert_almost_equal(nanmin(self.A), 0.01319214) - assert_almost_equal(nanmin(self.A, 0), - array([[ 0.59541557, 0.01319214, 0.01620964], - [ 0.11704017, 0.38944762, 0.23913896], - [ 0.28333658, 0.1630199 , 0.33850425]])) - assert_almost_equal(nanmin(self.A, 1), - array([[ 0.11704017, 0.01319214, 0.01620964], - [ 0.59541557, 0.87964135, 0.34306596], - [ 0.72687499, 0.37068164, 0.23913896]])) - assert_almost_equal(nanmin(self.A, 2), - array([[ 0.01319214, 0.11704017, 0.1630199 ], - [ 0.37910852, 0.87964135, 0.34306596], - [ 0.72687499, 0.23913896, 0.33850425]])) - assert_(np.isnan(nanmin([nan, nan]))) - - def test_nanargmin(self): - assert_almost_equal(nanargmin(self.A), 1) - assert_almost_equal(nanargmin(self.A, 0), - array([[1, 0, 0], - [0, 2, 2], - [0, 0, 2]])) - assert_almost_equal(nanargmin(self.A, 1), - array([[1, 0, 0], - [0, 1, 2], - [0, 2, 1]])) - assert_almost_equal(nanargmin(self.A, 2), - array([[1, 0, 1], - [2, 1, 2], - [0, 2, 2]])) - - def test_nanmax(self): - assert_almost_equal(nanmax(self.A), 0.91084584000000002) - assert_almost_equal(nanmax(self.A, 0), - array([[ 0.72687499, 0.91084584, 0.37910852], - [ 0.84386844, 0.87964135, 0.75157887], - [ 0.70543747, 0.37068164, 0.34306596]])) - assert_almost_equal(nanmax(self.A, 1), - array([[ 0.28333658, 0.1630199 , 0.75157887], - [ 0.70543747, 0.87964135, 0.37910852], - [ 0.84386844, 0.91084584, 0.33850425]])) - assert_almost_equal(nanmax(self.A, 2), - array([[ 0.01620964, 0.75157887, 0.28333658], - [ 0.59541557, 0.87964135, 0.70543747], - [ 0.91084584, 0.84386844, 0.37068164]])) - assert_(np.isnan(nanmax([nan, nan]))) - - def test_nanmin_allnan_on_axis(self): - assert_array_equal(isnan(nanmin([[nan] * 2] * 3, axis=1)), - [True, True, True]) - - def test_nanmin_masked(self): - a = np.ma.fix_invalid([[2, 1, 3, nan], [5, 2, 3, nan]]) - ctrl_mask = a._mask.copy() - test = np.nanmin(a, axis=1) - assert_equal(test, [1, 2]) - assert_equal(a._mask, ctrl_mask) - assert_equal(np.isinf(a), np.zeros((2, 4), dtype=bool)) - - -class TestNanFunctsIntTypes(TestCase): - - int_types = (int8, int16, int32, int64, uint8, uint16, uint32, uint64) - - def setUp(self, *args, **kwargs): - self.A = array([127, 39, 93, 87, 46]) - - def integer_arrays(self): - for dtype in self.int_types: - yield self.A.astype(dtype) - - def test_nanmin(self): - min_value = min(self.A) - for A in self.integer_arrays(): - assert_equal(nanmin(A), min_value) - - def test_nanmax(self): - max_value = max(self.A) - for A in self.integer_arrays(): - assert_equal(nanmax(A), max_value) - - def test_nanargmin(self): - min_arg = argmin(self.A) - for A in self.integer_arrays(): - assert_equal(nanargmin(A), min_arg) - - def test_nanargmax(self): - max_arg = argmax(self.A) - for A in self.integer_arrays(): - assert_equal(nanargmax(A), max_arg) - - -class TestCorrCoef(TestCase): - A = array([[ 0.15391142, 0.18045767, 0.14197213], - [ 0.70461506, 0.96474128, 0.27906989], - [ 0.9297531 , 0.32296769, 0.19267156]]) - B = array([[ 0.10377691, 0.5417086 , 0.49807457], - [ 0.82872117, 0.77801674, 0.39226705], - [ 0.9314666 , 0.66800209, 0.03538394]]) - res1 = array([[ 1. , 0.9379533 , -0.04931983], - [ 0.9379533 , 1. , 0.30007991], - [-0.04931983, 0.30007991, 1. ]]) - res2 = array([[ 1. , 0.9379533 , -0.04931983, - 0.30151751, 0.66318558, 0.51532523], - [ 0.9379533 , 1. , 0.30007991, - - 0.04781421, 0.88157256, 0.78052386], - [-0.04931983, 0.30007991, 1. , - - 0.96717111, 0.71483595, 0.83053601], - [ 0.30151751, -0.04781421, -0.96717111, - 1. , -0.51366032, -0.66173113], - [ 0.66318558, 0.88157256, 0.71483595, - - 0.51366032, 1. , 0.98317823], - [ 0.51532523, 0.78052386, 0.83053601, - - 0.66173113, 0.98317823, 1. ]]) - - def test_simple(self): - assert_almost_equal(corrcoef(self.A), self.res1) - assert_almost_equal(corrcoef(self.A, self.B), self.res2) - - def test_ddof(self): - assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) - - def test_empty(self): - assert_equal(corrcoef(np.array([])).size, 0) - assert_equal(corrcoef(np.array([]).reshape(0, 2)).shape, (0, 2)) - - -class TestCov(TestCase): - def test_basic(self): - x = np.array([[0, 2], [1, 1], [2, 0]]).T - assert_allclose(np.cov(x), np.array([[ 1.,-1.], [-1.,1.]])) - - def test_empty(self): - assert_equal(cov(np.array([])).size, 0) - assert_equal(cov(np.array([]).reshape(0, 2)).shape, (0, 2)) - - -class Test_i0(TestCase): - def test_simple(self): - assert_almost_equal(i0(0.5), array(1.0634833707413234)) - A = array([ 0.49842636, 0.6969809 , 0.22011976, 0.0155549]) - assert_almost_equal(i0(A), - array([ 1.06307822, 1.12518299, 1.01214991, 1.00006049])) - B = array([[ 0.827002 , 0.99959078], - [ 0.89694769, 0.39298162], - [ 0.37954418, 0.05206293], - [ 0.36465447, 0.72446427], - [ 0.48164949, 0.50324519]]) - assert_almost_equal(i0(B), - array([[ 1.17843223, 1.26583466], - [ 1.21147086, 1.0389829 ], - [ 1.03633899, 1.00067775], - [ 1.03352052, 1.13557954], - [ 1.0588429 , 1.06432317]])) - - -class TestKaiser(TestCase): - def test_simple(self): - assert_almost_equal(kaiser(0, 1.0), array([])) - assert_(isfinite(kaiser(1, 1.0))) - assert_almost_equal(kaiser(2, 1.0), array([ 0.78984831, 0.78984831])) - assert_almost_equal(kaiser(5, 1.0), - array([ 0.78984831, 0.94503323, 1. , - 0.94503323, 0.78984831])) - assert_almost_equal(kaiser(5, 1.56789), - array([ 0.58285404, 0.88409679, 1. , - 0.88409679, 0.58285404])) - - def test_int_beta(self): - kaiser(3, 4) - - -class TestMsort(TestCase): - def test_simple(self): - A = array([[ 0.44567325, 0.79115165, 0.5490053 ], - [ 0.36844147, 0.37325583, 0.96098397], - [ 0.64864341, 0.52929049, 0.39172155]]) - assert_almost_equal(msort(A), - array([[ 0.36844147, 0.37325583, 0.39172155], - [ 0.44567325, 0.52929049, 0.5490053 ], - [ 0.64864341, 0.79115165, 0.96098397]])) - - -class TestMeshgrid(TestCase): - def test_simple(self): - [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) - assert_(all(X == array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3], - [1, 2, 3]]))) - assert_(all(Y == array([[4, 4, 4], - [5, 5, 5], - [6, 6, 6], - [7, 7, 7]]))) - - -class TestPiecewise(TestCase): - def test_simple(self): - # Condition is single bool list - x = piecewise([0, 0], [True, False], [1]) - assert_array_equal(x, [1, 0]) - - # List of conditions: single bool list - x = piecewise([0, 0], [[True, False]], [1]) - assert_array_equal(x, [1, 0]) - - # Conditions is single bool array - x = piecewise([0, 0], array([True, False]), [1]) - assert_array_equal(x, [1, 0]) - - # Condition is single int array - x = piecewise([0, 0], array([1, 0]), [1]) - assert_array_equal(x, [1, 0]) - - # List of conditions: int array - x = piecewise([0, 0], [array([1, 0])], [1]) - assert_array_equal(x, [1, 0]) - - - x = piecewise([0, 0], [[False, True]], [lambda x:-1]) - assert_array_equal(x, [0, -1]) - - x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) - assert_array_equal(x, [3, 4]) - - def test_default(self): - # No value specified for x[1], should be 0 - x = piecewise([1, 2], [True, False], [2]) - assert_array_equal(x, [2, 0]) - - # Should set x[1] to 3 - x = piecewise([1, 2], [True, False], [2, 3]) - assert_array_equal(x, [2, 3]) - - def test_0d(self): - x = array(3) - y = piecewise(x, x > 3, [4, 0]) - assert_(y.ndim == 0) - assert_(y == 0) - - -class TestBincount(TestCase): - def test_simple(self): - y = np.bincount(np.arange(4)) - assert_array_equal(y, np.ones(4)) - - def test_simple2(self): - y = np.bincount(np.array([1, 5, 2, 4, 1])) - assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) - - def test_simple_weight(self): - x = np.arange(4) - w = np.array([0.2, 0.3, 0.5, 0.1]) - y = np.bincount(x, w) - assert_array_equal(y, w) - - def test_simple_weight2(self): - x = np.array([1, 2, 4, 5, 2]) - w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) - y = np.bincount(x, w) - assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) - - def test_with_minlength(self): - x = np.array([0, 1, 0, 1, 1]) - y = np.bincount(x, minlength=3) - assert_array_equal(y, np.array([2, 3, 0])) - - def test_with_minlength_smaller_than_maxvalue(self): - x = np.array([0, 1, 1, 2, 2, 3, 3]) - y = np.bincount(x, minlength=2) - assert_array_equal(y, np.array([1, 2, 2, 2])) - - def test_with_minlength_and_weights(self): - x = np.array([1, 2, 4, 5, 2]) - w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) - y = np.bincount(x, w, 8) - assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) - - def test_empty(self): - x = np.array([], dtype=int) - y = np.bincount(x) - assert_array_equal(x,y) - - def test_empty_with_minlength(self): - x = np.array([], dtype=int) - y = np.bincount(x, minlength=5) - assert_array_equal(y, np.zeros(5, dtype=int)) - - -class TestInterp(TestCase): - def test_exceptions(self): - assert_raises(ValueError, interp, 0, [], []) - assert_raises(ValueError, interp, 0, [0], [1, 2]) - - def test_basic(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = np.linspace(0, 1, 50) - assert_almost_equal(np.interp(x0, x, y), x0) - - def test_right_left_behavior(self): - assert_equal(interp([-1, 0, 1], [0], [1]), [1,1,1]) - assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0,1,1]) - assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1,1,0]) - assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0,1,0]) - - def test_scalar_interpolation_point(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = 0 - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = .3 - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.float32(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.float64(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - - def test_zero_dimensional_interpolation_point(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = np.array(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.array(.3, dtype=object) - assert_almost_equal(np.interp(x0, x, y), .3) - - -def compare_results(res, desired): - for i in range(len(desired)): - assert_array_equal(res[i], desired[i]) - - -def test_percentile_list(): - assert_equal(np.percentile([1, 2, 3], 0), 1) - -def test_percentile_out(): - x = np.array([1, 2, 3]) - y = np.zeros((3,)) - p = (1, 2, 3) - np.percentile(x, p, out=y) - assert_equal(y, np.percentile(x, p)) - - x = np.array([[1, 2, 3], - [4, 5, 6]]) - - y = np.zeros((3, 3)) - np.percentile(x, p, axis=0, out=y) - assert_equal(y, np.percentile(x, p, axis=0)) - - y = np.zeros((3, 2)) - np.percentile(x, p, axis=1, out=y) - assert_equal(y, np.percentile(x, p, axis=1)) - - -def test_median(): - a0 = np.array(1) - a1 = np.arange(2) - a2 = np.arange(6).reshape(2, 3) - assert_allclose(np.median(a0), 1) - assert_allclose(np.median(a1), 0.5) - assert_allclose(np.median(a2), 2.5) - assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) - assert_allclose(np.median(a2, axis=1), [1, 4]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_index_tricks.py b/numpy-1.6.2/numpy/lib/tests/test_index_tricks.py deleted file mode 100644 index 8b42292a29..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_index_tricks.py +++ /dev/null @@ -1,199 +0,0 @@ -from numpy.testing import * -import numpy as np -from numpy import ( array, ones, r_, mgrid, unravel_index, zeros, where, - ndenumerate, fill_diagonal, diag_indices, - diag_indices_from, s_, index_exp ) - -class TestRavelUnravelIndex(TestCase): - def test_basic(self): - assert_equal(np.unravel_index(2,(2,2)), (1,0)) - assert_equal(np.ravel_multi_index((1,0),(2,2)), 2) - assert_equal(np.unravel_index(254,(17,94)), (2,66)) - assert_equal(np.ravel_multi_index((2,66),(17,94)), 254) - assert_raises(ValueError, np.unravel_index, -1, (2,2)) - assert_raises(TypeError, np.unravel_index, 0.5, (2,2)) - assert_raises(ValueError, np.unravel_index, 4, (2,2)) - assert_raises(ValueError, np.ravel_multi_index, (-3,1), (2,2)) - assert_raises(ValueError, np.ravel_multi_index, (2,1), (2,2)) - assert_raises(ValueError, np.ravel_multi_index, (0,-3), (2,2)) - assert_raises(ValueError, np.ravel_multi_index, (0,2), (2,2)) - assert_raises(TypeError, np.ravel_multi_index, (0.1,0.), (2,2)) - - assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4,3,6)), [2,1,4]) - assert_equal(np.ravel_multi_index([2,1,4], (4,3,6)), (2*3 + 1)*6 + 4) - - arr = np.array([[3,6,6],[4,5,1]]) - assert_equal(np.ravel_multi_index(arr, (7,6)), [22,41,37]) - assert_equal(np.ravel_multi_index(arr, (7,6), order='F'), [31,41,13]) - assert_equal(np.ravel_multi_index(arr, (4,6), mode='clip'), [22,23,19]) - assert_equal(np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')), - [12,13,13]) - assert_equal(np.ravel_multi_index((3,1,4,1), (6,7,8,9)), 1621) - - assert_equal(np.unravel_index(np.array([22, 41, 37]), (7,6)), - [[3, 6, 6],[4, 5, 1]]) - assert_equal(np.unravel_index(np.array([31, 41, 13]), (7,6), order='F'), - [[3, 6, 6], [4, 5, 1]]) - assert_equal(np.unravel_index(1621, (6,7,8,9)), [3,1,4,1]) - - def test_dtypes(self): - # Test with different data types - for dtype in [np.int16, np.uint16, np.int32, - np.uint32, np.int64, np.uint64]: - coords = np.array([[1,0,1,2,3,4],[1,6,1,3,2,0]], dtype=dtype) - shape = (5,8) - uncoords = 8*coords[0]+coords[1] - assert_equal(np.ravel_multi_index(coords, shape), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape)) - uncoords = coords[0]+5*coords[1] - assert_equal(np.ravel_multi_index(coords, shape, order='F'), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) - - coords = np.array([[1,0,1,2,3,4],[1,6,1,3,2,0],[1,3,1,0,9,5]], - dtype=dtype) - shape = (5,8,10) - uncoords = 10*(8*coords[0]+coords[1])+coords[2] - assert_equal(np.ravel_multi_index(coords, shape), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape)) - uncoords = coords[0]+5*(coords[1]+8*coords[2]) - assert_equal(np.ravel_multi_index(coords, shape, order='F'), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) - - def test_clipmodes(self): - # Test clipmodes - assert_equal(np.ravel_multi_index([5,1,-1,2], (4,3,7,12), mode='wrap'), - np.ravel_multi_index([1,1,6,2], (4,3,7,12))) - assert_equal(np.ravel_multi_index([5,1,-1,2], (4,3,7,12), - mode=('wrap','raise','clip','raise')), - np.ravel_multi_index([1,1,0,2], (4,3,7,12))) - assert_raises(ValueError, np.ravel_multi_index, [5,1,-1,2], (4,3,7,12)) - -class TestGrid(TestCase): - def test_basic(self): - a = mgrid[-1:1:10j] - b = mgrid[-1:1:0.1] - assert(a.shape == (10,)) - assert(b.shape == (20,)) - assert(a[0] == -1) - assert_almost_equal(a[-1],1) - assert(b[0] == -1) - assert_almost_equal(b[1]-b[0],0.1,11) - assert_almost_equal(b[-1],b[0]+19*0.1,11) - assert_almost_equal(a[1]-a[0],2.0/9.0,11) - - def test_linspace_equivalence(self): - y,st = np.linspace(2,10,retstep=1) - assert_almost_equal(st,8/49.0) - assert_array_almost_equal(y,mgrid[2:10:50j],13) - - def test_nd(self): - c = mgrid[-1:1:10j,-2:2:10j] - d = mgrid[-1:1:0.1,-2:2:0.2] - assert(c.shape == (2,10,10)) - assert(d.shape == (2,20,20)) - assert_array_equal(c[0][0,:],-ones(10,'d')) - assert_array_equal(c[1][:,0],-2*ones(10,'d')) - assert_array_almost_equal(c[0][-1,:],ones(10,'d'),11) - assert_array_almost_equal(c[1][:,-1],2*ones(10,'d'),11) - assert_array_almost_equal(d[0,1,:]-d[0,0,:], 0.1*ones(20,'d'),11) - assert_array_almost_equal(d[1,:,1]-d[1,:,0], 0.2*ones(20,'d'),11) - - -class TestConcatenator(TestCase): - def test_1d(self): - assert_array_equal(r_[1,2,3,4,5,6],array([1,2,3,4,5,6])) - b = ones(5) - c = r_[b,0,0,b] - assert_array_equal(c,[1,1,1,1,1,0,0,1,1,1,1,1]) - - def test_mixed_type(self): - g = r_[10.1, 1:10] - assert(g.dtype == 'f8') - - def test_more_mixed_type(self): - g = r_[-10.1, array([1]), array([2,3,4]), 10.0] - assert(g.dtype == 'f8') - - def test_2d(self): - b = rand(5,5) - c = rand(5,5) - d = r_['1',b,c] # append columns - assert(d.shape == (5,10)) - assert_array_equal(d[:,:5],b) - assert_array_equal(d[:,5:],c) - d = r_[b,c] - assert(d.shape == (10,5)) - assert_array_equal(d[:5,:],b) - assert_array_equal(d[5:,:],c) - - -class TestNdenumerate(TestCase): - def test_basic(self): - a = array([[1,2], [3,4]]) - assert_equal(list(ndenumerate(a)), - [((0,0), 1), ((0,1), 2), ((1,0), 3), ((1,1), 4)]) - - -class TestIndexExpression(TestCase): - def test_regression_1(self): - # ticket #1196 - a = np.arange(2) - assert_equal(a[:-1], a[s_[:-1]]) - assert_equal(a[:-1], a[index_exp[:-1]]) - - def test_simple_1(self): - a = np.random.rand(4,5,6) - - assert_equal(a[:,:3,[1,2]], a[index_exp[:,:3,[1,2]]]) - assert_equal(a[:,:3,[1,2]], a[s_[:,:3,[1,2]]]) - -def test_fill_diagonal(): - a = zeros((3, 3),int) - fill_diagonal(a, 5) - yield (assert_array_equal, a, - array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5]])) - - # The same function can operate on a 4-d array: - a = zeros((3, 3, 3, 3), int) - fill_diagonal(a, 4) - i = array([0, 1, 2]) - yield (assert_equal, where(a != 0), (i, i, i, i)) - - -def test_diag_indices(): - di = diag_indices(4) - a = array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - a[di] = 100 - yield (assert_array_equal, a, - array([[100, 2, 3, 4], - [ 5, 100, 7, 8], - [ 9, 10, 100, 12], - [ 13, 14, 15, 100]])) - - # Now, we create indices to manipulate a 3-d array: - d3 = diag_indices(2, 3) - - # And use it to set the diagonal of a zeros array to 1: - a = zeros((2, 2, 2),int) - a[d3] = 1 - yield (assert_array_equal, a, - array([[[1, 0], - [0, 0]], - - [[0, 0], - [0, 1]]]) ) - -def test_diag_indices_from(): - x = np.random.random((4, 4)) - r, c = diag_indices_from(x) - assert_array_equal(r, np.arange(4)) - assert_array_equal(c, np.arange(4)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_io.py b/numpy-1.6.2/numpy/lib/tests/test_io.py deleted file mode 100644 index adb8db0ff0..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_io.py +++ /dev/null @@ -1,1510 +0,0 @@ -import numpy as np -import numpy.ma as ma -from numpy.ma.testutils import (TestCase, assert_equal, assert_array_equal, - assert_raises, run_module_suite) -from numpy.testing import assert_warns, assert_, build_err_msg - -import sys - -import gzip -import os -import threading - -from tempfile import mkstemp, NamedTemporaryFile -import time -from datetime import datetime -import warnings -import gc -from numpy.testing.utils import WarningManager - -from numpy.lib._iotools import ConverterError, ConverterLockError, \ - ConversionWarning -from numpy.compat import asbytes, asbytes_nested, bytes - -if sys.version_info[0] >= 3: - from io import BytesIO - def StringIO(s=""): - return BytesIO(asbytes(s)) -else: - from StringIO import StringIO - BytesIO = StringIO - -MAJVER, MINVER = sys.version_info[:2] - -def strptime(s, fmt=None): - """This function is available in the datetime module only - from Python >= 2.5. - - """ - if sys.version_info[0] >= 3: - return datetime(*time.strptime(s.decode('latin1'), fmt)[:3]) - else: - return datetime(*time.strptime(s, fmt)[:3]) - -class RoundtripTest(object): - def roundtrip(self, save_func, *args, **kwargs): - """ - save_func : callable - Function used to save arrays to file. - file_on_disk : bool - If true, store the file on disk, instead of in a - string buffer. - save_kwds : dict - Parameters passed to `save_func`. - load_kwds : dict - Parameters passed to `numpy.load`. - args : tuple of arrays - Arrays stored to file. - - """ - save_kwds = kwargs.get('save_kwds', {}) - load_kwds = kwargs.get('load_kwds', {}) - file_on_disk = kwargs.get('file_on_disk', False) - - if file_on_disk: - # Do not delete the file on windows, because we can't - # reopen an already opened file on that platform, so we - # need to close the file and reopen it, implying no - # automatic deletion. - if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6: - target_file = NamedTemporaryFile(delete=False) - else: - target_file = NamedTemporaryFile() - load_file = target_file.name - else: - target_file = StringIO() - load_file = target_file - - arr = args - - save_func(target_file, *arr, **save_kwds) - target_file.flush() - target_file.seek(0) - - if sys.platform == 'win32' and not isinstance(target_file, BytesIO): - target_file.close() - - arr_reloaded = np.load(load_file, **load_kwds) - - self.arr = arr - self.arr_reloaded = arr_reloaded - - def test_array(self): - a = np.array([[1, 2], [3, 4]], float) - self.roundtrip(a) - - a = np.array([[1, 2], [3, 4]], int) - self.roundtrip(a) - - a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) - self.roundtrip(a) - - a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) - self.roundtrip(a) - - def test_1D(self): - a = np.array([1, 2, 3, 4], int) - self.roundtrip(a) - - @np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32") - def test_mmap(self): - a = np.array([[1, 2.5], [4, 7.3]]) - self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) - - def test_record(self): - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - self.roundtrip(a) - -class TestSaveLoad(RoundtripTest, TestCase): - def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.save, *args, **kwargs) - assert_equal(self.arr[0], self.arr_reloaded) - -class TestSavezLoad(RoundtripTest, TestCase): - def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) - for n, arr in enumerate(self.arr): - assert_equal(arr, self.arr_reloaded['arr_%d' % n]) - - def test_multiple_arrays(self): - a = np.array([[1, 2], [3, 4]], float) - b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) - self.roundtrip(a, b) - - def test_named_arrays(self): - a = np.array([[1, 2], [3, 4]], float) - b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) - c = StringIO() - np.savez(c, file_a=a, file_b=b) - c.seek(0) - l = np.load(c) - assert_equal(a, l['file_a']) - assert_equal(b, l['file_b']) - - def test_savez_filename_clashes(self): - # Test that issue #852 is fixed - # and savez functions in multithreaded environment - - def writer(error_list): - fd, tmp = mkstemp(suffix='.npz') - os.close(fd) - try: - arr = np.random.randn(500, 500) - try: - np.savez(tmp, arr=arr) - except OSError, err: - error_list.append(err) - finally: - os.remove(tmp) - - errors = [] - threads = [threading.Thread(target=writer, args=(errors,)) - for j in xrange(3)] - for t in threads: - t.start() - for t in threads: - t.join() - - if errors: - raise AssertionError(errors) - -class TestSaveTxt(TestCase): - def test_array(self): - a = np.array([[1, 2], [3, 4]], float) - fmt = "%.18e" - c = StringIO() - np.savetxt(c, a, fmt=fmt) - c.seek(0) - assert_equal(c.readlines(), - asbytes_nested( - [(fmt + ' ' + fmt + '\n') % (1, 2), - (fmt + ' ' + fmt + '\n') % (3, 4)])) - - a = np.array([[1, 2], [3, 4]], int) - c = StringIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - assert_equal(c.readlines(), asbytes_nested(['1 2\n', '3 4\n'])) - - def test_1D(self): - a = np.array([1, 2, 3, 4], int) - c = StringIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - lines = c.readlines() - assert_equal(lines, asbytes_nested(['1\n', '2\n', '3\n', '4\n'])) - - def test_record(self): - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - c = StringIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - assert_equal(c.readlines(), asbytes_nested(['1 2\n', '3 4\n'])) - - def test_delimiter(self): - a = np.array([[1., 2.], [3., 4.]]) - c = StringIO() - np.savetxt(c, a, delimiter=asbytes(','), fmt='%d') - c.seek(0) - assert_equal(c.readlines(), asbytes_nested(['1,2\n', '3,4\n'])) - - def test_format(self): - a = np.array([(1, 2), (3, 4)]) - c = StringIO() - # Sequence of formats - np.savetxt(c, a, fmt=['%02d', '%3.1f']) - c.seek(0) - assert_equal(c.readlines(), asbytes_nested(['01 2.0\n', '03 4.0\n'])) - - # A single multiformat string - c = StringIO() - np.savetxt(c, a, fmt='%02d : %3.1f') - c.seek(0) - lines = c.readlines() - assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n'])) - - # Specify delimiter, should be overiden - c = StringIO() - np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') - c.seek(0) - lines = c.readlines() - assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n'])) - - def test_file_roundtrip(self): - f, name = mkstemp() - os.close(f) - try: - a = np.array([(1, 2), (3, 4)]) - np.savetxt(name, a) - b = np.loadtxt(name) - assert_array_equal(a, b) - finally: - os.unlink(name) - - def test_complex_arrays(self): - ncols = 2 - nrows = 2 - a = np.zeros((ncols, nrows), dtype=np.complex128) - re = np.pi - im = np.e - a[:] = re + 1.0j * im - # One format only - c = StringIO() - np.savetxt(c, a, fmt=' %+.3e') - c.seek(0) - lines = c.readlines() - _assert_floatstr_lines_equal(lines, asbytes_nested([ - ' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', - ' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])) - # One format for each real and imaginary part - c = StringIO() - np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) - c.seek(0) - lines = c.readlines() - _assert_floatstr_lines_equal(lines, asbytes_nested([ - ' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', - ' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])) - # One format for each complex number - c = StringIO() - np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) - c.seek(0) - lines = c.readlines() - _assert_floatstr_lines_equal(lines, asbytes_nested([ - '(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', - '(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])) - - -def _assert_floatstr_lines_equal(actual_lines, expected_lines): - """A string comparison function that also works on Windows + Python 2.5. - - This is necessary because Python 2.5 on Windows inserts an extra 0 in - the exponent of the string representation of floating point numbers. - - Only used in TestSaveTxt.test_complex_arrays, no attempt made to make this - more generic. - - Once Python 2.5 compatibility is dropped, simply use `assert_equal` instead - of this function. - """ - for actual, expected in zip(actual_lines, expected_lines): - if actual != expected: - expected_win25 = expected.replace("e+00", "e+000") - if actual != expected_win25: - msg = build_err_msg([actual, expected], '', verbose=True) - raise AssertionError(msg) - - -class TestLoadTxt(TestCase): - def test_record(self): - c = StringIO() - c.write(asbytes('1 2\n3 4')) - c.seek(0) - x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - assert_array_equal(x, a) - - d = StringIO() - d.write(asbytes('M 64.0 75.0\nF 25.0 60.0')) - d.seek(0) - mydescriptor = {'names': ('gender', 'age', 'weight'), - 'formats': ('S1', - 'i4', 'f4')} - b = np.array([('M', 64.0, 75.0), - ('F', 25.0, 60.0)], dtype=mydescriptor) - y = np.loadtxt(d, dtype=mydescriptor) - assert_array_equal(y, b) - - def test_array(self): - c = StringIO() - c.write(asbytes('1 2\n3 4')) - - c.seek(0) - x = np.loadtxt(c, dtype=int) - a = np.array([[1, 2], [3, 4]], int) - assert_array_equal(x, a) - - c.seek(0) - x = np.loadtxt(c, dtype=float) - a = np.array([[1, 2], [3, 4]], float) - assert_array_equal(x, a) - - def test_1D(self): - c = StringIO() - c.write(asbytes('1\n2\n3\n4\n')) - c.seek(0) - x = np.loadtxt(c, dtype=int) - a = np.array([1, 2, 3, 4], int) - assert_array_equal(x, a) - - c = StringIO() - c.write(asbytes('1,2,3,4\n')) - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',') - a = np.array([1, 2, 3, 4], int) - assert_array_equal(x, a) - - def test_missing(self): - c = StringIO() - c.write(asbytes('1,2,3,,5\n')) - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', \ - converters={3:lambda s: int(s or - 999)}) - a = np.array([1, 2, 3, -999, 5], int) - assert_array_equal(x, a) - - def test_converters_with_usecols(self): - c = StringIO() - c.write(asbytes('1,2,3,,5\n6,7,8,9,10\n')) - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', \ - converters={3:lambda s: int(s or - 999)}, \ - usecols=(1, 3,)) - a = np.array([[2, -999], [7, 9]], int) - assert_array_equal(x, a) - - def test_comments(self): - c = StringIO() - c.write(asbytes('# comment\n1,2,3,5\n')) - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', \ - comments='#') - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - def test_skiprows(self): - c = StringIO() - c.write(asbytes('comment\n1,2,3,5\n')) - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', \ - skiprows=1) - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - c = StringIO() - c.write(asbytes('# comment\n1,2,3,5\n')) - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', \ - skiprows=1) - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - def test_usecols(self): - a = np.array([[1, 2], [3, 4]], float) - c = StringIO() - np.savetxt(c, a) - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=(1,)) - assert_array_equal(x, a[:, 1]) - - a = np.array([[1, 2, 3], [3, 4, 5]], float) - c = StringIO() - np.savetxt(c, a) - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=(1, 2)) - assert_array_equal(x, a[:, 1:]) - - # Testing with arrays instead of tuples. - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) - assert_array_equal(x, a[:, 1:]) - - # Checking with dtypes defined converters. - data = '''JOE 70.1 25.3 - BOB 60.5 27.9 - ''' - c = StringIO(data) - names = ['stid', 'temp'] - dtypes = ['S4', 'f8'] - arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes)) - assert_equal(arr['stid'], asbytes_nested(["JOE", "BOB"])) - assert_equal(arr['temp'], [25.3, 27.9]) - - def test_fancy_dtype(self): - c = StringIO() - c.write(asbytes('1,2,3.0\n4,5,6.0\n')) - c.seek(0) - dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) - x = np.loadtxt(c, dtype=dt, delimiter=',') - a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) - assert_array_equal(x, a) - - def test_shaped_dtype(self): - c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6") - dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ('block', int, (2, 3))]) - x = np.loadtxt(c, dtype=dt) - a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], - dtype=dt) - assert_array_equal(x, a) - - def test_3d_shaped_dtype(self): - c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") - dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ('block', int, (2, 2, 3))]) - x = np.loadtxt(c, dtype=dt) - a = np.array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])], - dtype=dt) - assert_array_equal(x, a) - - def test_empty_file(self): - warnings.filterwarnings("ignore", message="loadtxt: Empty input file:") - c = StringIO() - x = np.loadtxt(c) - assert_equal(x.shape, (0,)) - x = np.loadtxt(c, dtype=np.int64) - assert_equal(x.shape, (0,)) - assert_(x.dtype == np.int64) - - - def test_unused_converter(self): - c = StringIO() - c.writelines([asbytes('1 21\n'), asbytes('3 42\n')]) - c.seek(0) - data = np.loadtxt(c, usecols=(1,), - converters={0: lambda s: int(s, 16)}) - assert_array_equal(data, [21, 42]) - - c.seek(0) - data = np.loadtxt(c, usecols=(1,), - converters={1: lambda s: int(s, 16)}) - assert_array_equal(data, [33, 66]) - - def test_dtype_with_object(self): - "Test using an explicit dtype with an object" - from datetime import date - import time - data = asbytes(""" 1; 2001-01-01 - 2; 2002-01-31 """) - ndtype = [('idx', int), ('code', np.object)] - func = lambda s: strptime(s.strip(), "%Y-%m-%d") - converters = {1: func} - test = np.loadtxt(StringIO(data), delimiter=";", dtype=ndtype, - converters=converters) - control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], - dtype=ndtype) - assert_equal(test, control) - - def test_uint64_type(self): - tgt = (9223372043271415339, 9223372043271415853) - c = StringIO() - c.write(asbytes("%s %s" % tgt)) - c.seek(0) - res = np.loadtxt(c, dtype=np.uint64) - assert_equal(res, tgt) - - def test_int64_type(self): - tgt = (-9223372036854775807, 9223372036854775807) - c = StringIO() - c.write(asbytes("%s %s" % tgt)) - c.seek(0) - res = np.loadtxt(c, dtype=np.int64) - assert_equal(res, tgt) - - def test_universal_newline(self): - f, name = mkstemp() - os.write(f, asbytes('1 21\r3 42\r')) - os.close(f) - - try: - data = np.loadtxt(name) - assert_array_equal(data, [[1, 21], [3, 42]]) - finally: - os.unlink(name) - - def test_empty_field_after_tab(self): - c = StringIO() - c.write(asbytes('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')) - c.seek(0) - dt = { 'names': ('x', 'y', 'z', 'comment'), - 'formats': ('>> p = np.poly1d([1.,2,3]) ->>> p -poly1d([ 1., 2., 3.]) ->>> print(p) - 2 -1 x + 2 x + 3 ->>> q = np.poly1d([3.,2,1]) ->>> q -poly1d([ 3., 2., 1.]) ->>> print(q) - 2 -3 x + 2 x + 1 ->>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j])) - 3 2 -(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j) ->>> print(np.poly1d([-3, -2, -1])) - 2 --3 x - 2 x - 1 - ->>> p(0) -3.0 ->>> p(5) -38.0 ->>> q(0) -1.0 ->>> q(5) -86.0 - ->>> p * q -poly1d([ 3., 8., 14., 8., 3.]) ->>> p / q -(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667])) ->>> p + q -poly1d([ 4., 4., 4.]) ->>> p - q -poly1d([-2., 0., 2.]) ->>> p ** 4 -poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.]) - ->>> p(q) -poly1d([ 9., 12., 16., 8., 6.]) ->>> q(p) -poly1d([ 3., 12., 32., 40., 34.]) - ->>> np.asarray(p) -array([ 1., 2., 3.]) ->>> len(p) -2 - ->>> p[0], p[1], p[2], p[3] -(3.0, 2.0, 1.0, 0) - ->>> p.integ() -poly1d([ 0.33333333, 1. , 3. , 0. ]) ->>> p.integ(1) -poly1d([ 0.33333333, 1. , 3. , 0. ]) ->>> p.integ(5) -poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. , - 0. , 0. , 0. ]) ->>> p.deriv() -poly1d([ 2., 2.]) ->>> p.deriv(2) -poly1d([ 2.]) - ->>> q = np.poly1d([1.,2,3], variable='y') ->>> print(q) - 2 -1 y + 2 y + 3 ->>> q = np.poly1d([1.,2,3], variable='lambda') ->>> print(q) - 2 -1 lambda + 2 lambda + 3 - ->>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1])) -(poly1d([ 1., -1.]), poly1d([ 0.])) -""" - -from numpy.testing import * -import numpy as np - -class TestDocs(TestCase): - def test_doctests(self): - return rundocs() - - def test_roots(self): - assert_array_equal(np.roots([1,0,0]), [0,0]) - - def test_str_leading_zeros(self): - p = np.poly1d([4,3,2,1]) - p[3] = 0 - assert_equal(str(p), - " 2\n" - "3 x + 2 x + 1") - - p = np.poly1d([1,2]) - p[0] = 0 - p[1] = 0 - assert_equal(str(p), " \n0") - - def test_polyfit(self) : - c = np.array([3., 2., 1.]) - x = np.linspace(0,2,5) - y = np.polyval(c,x) - # check 1D case - assert_almost_equal(c, np.polyfit(x,y,2)) - # check 2D (n,1) case - y = y[:,np.newaxis] - c = c[:,np.newaxis] - assert_almost_equal(c, np.polyfit(x,y,2)) - # check 2D (n,2) case - yy = np.concatenate((y,y), axis=1) - cc = np.concatenate((c,c), axis=1) - assert_almost_equal(cc, np.polyfit(x,yy,2)) - - def test_objects(self): - from decimal import Decimal - p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) - p2 = p * Decimal('1.333333333333333') - assert p2[1] == Decimal("3.9999999999999990") - p2 = p.deriv() - assert p2[1] == Decimal('8.0') - p2 = p.integ() - assert p2[3] == Decimal("1.333333333333333333333333333") - assert p2[2] == Decimal('1.5') - assert np.issubdtype(p2.coeffs.dtype, np.object_) - - def test_complex(self): - p = np.poly1d([3j, 2j, 1j]) - p2 = p.integ() - assert (p2.coeffs == [1j,1j,1j,0]).all() - p2 = p.deriv() - assert (p2.coeffs == [6j,2j]).all() - - def test_integ_coeffs(self): - p = np.poly1d([3,2,1]) - p2 = p.integ(3, k=[9,7,6]) - assert (p2.coeffs == [1/4./5.,1/3./4.,1/2./3.,9/1./2.,7,6]).all() - - def test_zero_dims(self): - try: - np.poly(np.zeros((0, 0))) - except ValueError: - pass - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_recfunctions.py b/numpy-1.6.2/numpy/lib/tests/test_recfunctions.py deleted file mode 100644 index 14af43a59c..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_recfunctions.py +++ /dev/null @@ -1,621 +0,0 @@ -import sys - -import numpy as np -import numpy.ma as ma -from numpy.ma.testutils import * - -from numpy.ma.mrecords import MaskedRecords - -from numpy.lib.recfunctions import * -get_names = np.lib.recfunctions.get_names -get_names_flat = np.lib.recfunctions.get_names_flat -zip_descr = np.lib.recfunctions.zip_descr - -class TestRecFunctions(TestCase): - """ - Misc tests - """ - # - def setUp(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array([('A', 1.), ('B', 2.)], - dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - - def test_zip_descr(self): - "Test zip_descr" - (w, x, y, z) = self.data - # Std array - test = zip_descr((x, x), flatten=True) - assert_equal(test, - np.dtype([('', int), ('', int)])) - test = zip_descr((x, x), flatten=False) - assert_equal(test, - np.dtype([('', int), ('', int)])) - # Std & flexible-dtype - test = zip_descr((x, z), flatten=True) - assert_equal(test, - np.dtype([('', int), ('A', '|S3'), ('B', float)])) - test = zip_descr((x, z), flatten=False) - assert_equal(test, - np.dtype([('', int), - ('', [('A', '|S3'), ('B', float)])])) - # Standard & nested dtype - test = zip_descr((x, w), flatten=True) - assert_equal(test, - np.dtype([('', int), - ('a', int), - ('ba', float), ('bb', int)])) - test = zip_descr((x, w), flatten=False) - assert_equal(test, - np.dtype([('', int), - ('', [('a', int), - ('b', [('ba', float), ('bb', int)])])])) - - - def test_drop_fields(self): - "Test drop_fields" - a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - # A basic field - test = drop_fields(a, 'a') - control = np.array([((2, 3.0),), ((5, 6.0),)], - dtype=[('b', [('ba', float), ('bb', int)])]) - assert_equal(test, control) - # Another basic field (but nesting two fields) - test = drop_fields(a, 'b') - control = np.array([(1,), (4,)], dtype=[('a', int)]) - assert_equal(test, control) - # A nested sub-field - test = drop_fields(a, ['ba', ]) - control = np.array([(1, (3.0,)), (4, (6.0,))], - dtype=[('a', int), ('b', [('bb', int)])]) - assert_equal(test, control) - # All the nested sub-field from a field: zap that field - test = drop_fields(a, ['ba', 'bb']) - control = np.array([(1,), (4,)], dtype=[('a', int)]) - assert_equal(test, control) - # - test = drop_fields(a, ['a', 'b']) - assert(test is None) - - - def test_rename_fields(self): - "Tests rename fields" - a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], - dtype=[('a', int), - ('b', [('ba', float), ('bb', (float, 2))])]) - test = rename_fields(a, {'a':'A', 'bb':'BB'}) - newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] - control = a.view(newdtype) - assert_equal(test.dtype, newdtype) - assert_equal(test, control) - - - def test_get_names(self): - "Tests get_names" - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_names(ndtype) - assert_equal(test, ('A', 'B')) - # - ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) - test = get_names(ndtype) - assert_equal(test, ('a', ('b', ('ba', 'bb')))) - - - def test_get_names_flat(self): - "Test get_names_flat" - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_names_flat(ndtype) - assert_equal(test, ('A', 'B')) - # - ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) - test = get_names_flat(ndtype) - assert_equal(test, ('a', 'b', 'ba', 'bb')) - - - def test_get_fieldstructure(self): - "Test get_fieldstructure" - # No nested fields - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_fieldstructure(ndtype) - assert_equal(test, {'A':[], 'B':[]}) - # One 1-nested field - ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - test = get_fieldstructure(ndtype) - assert_equal(test, {'A': [], 'B': [], 'BA':['B', ], 'BB':['B']}) - # One 2-nested fields - ndtype = np.dtype([('A', int), - ('B', [('BA', int), - ('BB', [('BBA', int), ('BBB', int)])])]) - test = get_fieldstructure(ndtype) - control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], - 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} - assert_equal(test, control) - - def test_find_duplicates(self): - "Test find_duplicates" - a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), - (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], - mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), - (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], - dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - # - test = find_duplicates(a, ignoremask=False, return_index=True) - control = [0, 2] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - # - test = find_duplicates(a, key='A', return_index=True) - control = [0, 1, 2, 3, 5] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - # - test = find_duplicates(a, key='B', return_index=True) - control = [0, 1, 2, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - # - test = find_duplicates(a, key='BA', return_index=True) - control = [0, 1, 2, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - # - test = find_duplicates(a, key='BB', return_index=True) - control = [0, 1, 2, 3, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - def test_find_duplicates_ignoremask(self): - "Test the ignoremask option of find_duplicates" - ndtype = [('a', int)] - a = ma.array([1, 1, 1, 2, 2, 3, 3], - mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - test = find_duplicates(a, ignoremask=True, return_index=True) - control = [0, 1, 3, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - # - test = find_duplicates(a, ignoremask=False, return_index=True) - control = [0, 1, 2, 3, 4, 6] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - -class TestRecursiveFillFields(TestCase): - """ - Test recursive_fill_fields. - """ - def test_simple_flexible(self): - "Test recursive_fill_fields on flexible-array" - a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) - b = np.zeros((3,), dtype=a.dtype) - test = recursive_fill_fields(a, b) - control = np.array([(1, 10.), (2, 20.), (0, 0.)], - dtype=[('A', int), ('B', float)]) - assert_equal(test, control) - # - def test_masked_flexible(self): - "Test recursive_fill_fields on masked flexible-array" - a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], - dtype=[('A', int), ('B', float)]) - b = ma.zeros((3,), dtype=a.dtype) - test = recursive_fill_fields(a, b) - control = ma.array([(1, 10.), (2, 20.), (0, 0.)], - mask=[(0, 1), (1, 0), (0, 0)], - dtype=[('A', int), ('B', float)]) - assert_equal(test, control) - # - - - -class TestMergeArrays(TestCase): - """ - Test merge_arrays - """ - def setUp(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - # - def test_solo(self): - "Test merge_arrays on a single array." - (_, x, _, z) = self.data - # - test = merge_arrays(x) - control = np.array([(1,), (2,)], dtype=[('f0', int)]) - assert_equal(test, control) - test = merge_arrays((x,)) - assert_equal(test, control) - # - test = merge_arrays(z, flatten=False) - assert_equal(test, z) - test = merge_arrays(z, flatten=True) - assert_equal(test, z) - # - def test_solo_w_flatten(self): - "Test merge_arrays on a single array w & w/o flattening" - w = self.data[0] - test = merge_arrays(w, flatten=False) - assert_equal(test, w) - # - test = merge_arrays(w, flatten=True) - control = np.array([(1, 2, 3.0), (4, 5, 6.0)], - dtype=[('a', int), ('ba', float), ('bb', int)]) - assert_equal(test, control) - # - def test_standard(self): - "Test standard & standard" - # Test merge arrays - (_, x, y, _) = self.data - test = merge_arrays((x, y), usemask=False) - control = np.array([(1, 10), (2, 20), (-1, 30)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - # - test = merge_arrays((x, y), usemask=True) - control = ma.array([(1, 10), (2, 20), (-1, 30)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - # - def test_flatten(self): - "Test standard & flexible" - (_, x, _, z) = self.data - test = merge_arrays((x, z), flatten=True) - control = np.array([(1, 'A', 1.), (2, 'B', 2.)], - dtype=[('f0', int), ('A', '|S3'), ('B', float)]) - assert_equal(test, control) - # - test = merge_arrays((x, z), flatten=False) - control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], - dtype=[('f0', int), - ('f1', [('A', '|S3'), ('B', float)])]) - assert_equal(test, control) - # - def test_flatten_wflexible(self): - "Test flatten standard & nested" - (w, x, _, _) = self.data - test = merge_arrays((x, w), flatten=True) - control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], - dtype=[('f0', int), - ('a', int), ('ba', float), ('bb', int)]) - assert_equal(test, control) - # - test = merge_arrays((x, w), flatten=False) - controldtype = dtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int)])])] - control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))], - dtype=controldtype) - # - def test_wmasked_arrays(self): - "Test merge_arrays masked arrays" - (_, x, _, _) = self.data - mx = ma.array([1, 2, 3], mask=[1, 0, 0]) - test = merge_arrays((x, mx), usemask=True) - control = ma.array([(1, 1), (2, 2), (-1, 3)], - mask=[(0, 1), (0, 0), (1, 0)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - test = merge_arrays((x, mx), usemask=True, asrecarray=True) - assert_equal(test, control) - assert(isinstance(test, MaskedRecords)) - # - def test_w_singlefield(self): - "Test single field" - test = merge_arrays((np.array([1, 2]).view([('a', int)]), - np.array([10., 20., 30.])),) - control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('a', int), ('f1', float)]) - assert_equal(test, control) - # - def test_w_shorter_flex(self): - "Test merge_arrays w/ a shorter flexndarray." - z = self.data[-1] - test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) - control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], - dtype=[('A', '|S3'), ('B', float), ('C', int)]) - # - def test_singlerecord(self): - (_, x, y, z) = self.data - test = merge_arrays((x[0], y[0], z[0]), usemask=False) - control = np.array([(1, 10, ('A', 1))], - dtype=[('f0', int), - ('f1', int), - ('f2', [('A', '|S3'), ('B', float)])]) - assert_equal(test, control) - - - -class TestAppendFields(TestCase): - """ - Test append_fields - """ - def setUp(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - # - def test_append_single(self): - "Test simple case" - (_, x, _, _) = self.data - test = append_fields(x, 'A', data=[10, 20, 30]) - control = ma.array([(1, 10), (2, 20), (-1, 30)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('f0', int), ('A', int)],) - assert_equal(test, control) - # - def test_append_double(self): - "Test simple case" - (_, x, _, _) = self.data - test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) - control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], - mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], - dtype=[('f0', int), ('A', int), ('B', int)],) - assert_equal(test, control) - # - def test_append_on_flex(self): - "Test append_fields on flexible type arrays" - z = self.data[-1] - test = append_fields(z, 'C', data=[10, 20, 30]) - control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], - mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('C', int)],) - assert_equal(test, control) - # - def test_append_on_nested(self): - "Test append_fields on nested fields" - w = self.data[0] - test = append_fields(w, 'C', data=[10, 20, 30]) - control = ma.array([(1, (2, 3.0), 10), - (4, (5, 6.0), 20), - (-1, (-1, -1.), 30)], - mask=[(0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], - dtype=[('a', int), - ('b', [('ba', float), ('bb', int)]), - ('C', int)],) - assert_equal(test, control) - - - -class TestStackArrays(TestCase): - """ - Test stack_arrays - """ - def setUp(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - # - def test_solo(self): - "Test stack_arrays on single arrays" - (_, x, _, _) = self.data - test = stack_arrays((x,)) - assert_equal(test, x) - self.assertTrue(test is x) - # - test = stack_arrays(x) - assert_equal(test, x) - self.assertTrue(test is x) - # - def test_unnamed_fields(self): - "Tests combinations of arrays w/o named fields" - (_, x, y, _) = self.data - # - test = stack_arrays((x, x), usemask=False) - control = np.array([1, 2, 1, 2]) - assert_equal(test, control) - # - test = stack_arrays((x, y), usemask=False) - control = np.array([1, 2, 10, 20, 30]) - assert_equal(test, control) - # - test = stack_arrays((y, x), usemask=False) - control = np.array([10, 20, 30, 1, 2]) - assert_equal(test, control) - # - def test_unnamed_and_named_fields(self): - "Test combination of arrays w/ & w/o named fields" - (_, x, _, z) = self.data - # - test = stack_arrays((x, z)) - control = ma.array([(1, -1, -1), (2, -1, -1), - (-1, 'A', 1), (-1, 'B', 2)], - mask=[(0, 1, 1), (0, 1, 1), - (1, 0, 0), (1, 0, 0)], - dtype=[('f0', int), ('A', '|S3'), ('B', float)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - # - test = stack_arrays((z, x)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - (-1, -1, 1), (-1, -1, 2), ], - mask=[(0, 0, 1), (0, 0, 1), - (1, 1, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('f2', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - # - test = stack_arrays((z, z, x)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - ('A', 1, -1), ('B', 2, -1), - (-1, -1, 1), (-1, -1, 2), ], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 1), (0, 0, 1), - (1, 1, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('f2', int)]) - assert_equal(test, control) - # - def test_matching_named_fields(self): - "Test combination of arrays w/ matching field names" - (_, x, _, z) = self.data - zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)]) - test = stack_arrays((z, zz)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - ('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 0), (0, 0, 0), (0, 0, 0)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - # - test = stack_arrays((z, zz, x)) - ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] - control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), - ('a', 10., 100., -1), ('b', 20., 200., -1), - ('c', 30., 300., -1), - (-1, -1, -1, 1), (-1, -1, -1, 2)], - dtype=ndtype, - mask=[(0, 0, 1, 1), (0, 0, 1, 1), - (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), - (1, 1, 1, 0), (1, 1, 1, 0)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - - def test_defaults(self): - "Test defaults: no exception raised if keys of defaults are not fields." - (_, _, _, z) = self.data - zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)]) - defaults = {'A':'???', 'B':-999., 'C':-9999., 'D':-99999.} - test = stack_arrays((z, zz), defaults=defaults) - control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), - ('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 0), (0, 0, 0), (0, 0, 0)]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - - def test_autoconversion(self): - "Tests autoconversion" - adtype = [('A', int), ('B', bool), ('C', float)] - a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) - bdtype = [('A', int), ('B', float), ('C', float)] - b = ma.array([(4, 5, 6)], dtype=bdtype) - control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], - dtype=bdtype) - test = stack_arrays((a, b), autoconvert=True) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - try: - test = stack_arrays((a, b), autoconvert=False) - except TypeError: - pass - else: - raise AssertionError - - - def test_checktitles(self): - "Test using titles in the field names" - adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] - a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) - bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] - b = ma.array([(4, 5, 6)], dtype=bdtype) - test = stack_arrays((a, b)) - control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], - dtype=bdtype) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - -class TestJoinBy(TestCase): - def setUp(self): - self.a = np.array(zip(np.arange(10), np.arange(50, 60), - np.arange(100, 110)), - dtype=[('a', int), ('b', int), ('c', int)]) - self.b = np.array(zip(np.arange(5, 15), np.arange(65, 75), - np.arange(100, 110)), - dtype=[('a', int), ('b', int), ('d', int)]) - # - def test_inner_join(self): - "Basic test of join_by" - a, b = self.a, self.b - # - test = join_by('a', a, b, jointype='inner') - control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), - (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), - (9, 59, 69, 109, 104)], - dtype=[('a', int), ('b1', int), ('b2', int), - ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_join(self): - a, b = self.a, self.b - # - test = join_by(('a', 'b'), a, b) - control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), - (7, 57, 107, 102), (8, 58, 108, 103), - (9, 59, 109, 104)], - dtype=[('a', int), ('b', int), - ('c', int), ('d', int)]) - - def test_outer_join(self): - a, b = self.a, self.b - # - test = join_by(('a', 'b'), a, b, 'outer') - control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), - (2, 52, 102, -1), (3, 53, 103, -1), - (4, 54, 104, -1), (5, 55, 105, -1), - (5, 65, -1, 100), (6, 56, 106, -1), - (6, 66, -1, 101), (7, 57, 107, -1), - (7, 67, -1, 102), (8, 58, 108, -1), - (8, 68, -1, 103), (9, 59, 109, -1), - (9, 69, -1, 104), (10, 70, -1, 105), - (11, 71, -1, 106), (12, 72, -1, 107), - (13, 73, -1, 108), (14, 74, -1, 109)], - mask=[(0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 1, 0), - (0, 0, 1, 0), (0, 0, 1, 0), - (0, 0, 1, 0), (0, 0, 1, 0)], - dtype=[('a', int), ('b', int), - ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_leftouter_join(self): - a, b = self.a, self.b - # - test = join_by(('a', 'b'), a, b, 'leftouter') - control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), - (2, 52, 102, -1), (3, 53, 103, -1), - (4, 54, 104, -1), (5, 55, 105, -1), - (6, 56, 106, -1), (7, 57, 107, -1), - (8, 58, 108, -1), (9, 59, 109, -1)], - mask=[(0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1)], - dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) - - - - -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_regression.py b/numpy-1.6.2/numpy/lib/tests/test_regression.py deleted file mode 100644 index 71400d112e..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_regression.py +++ /dev/null @@ -1,222 +0,0 @@ -from numpy.testing import * -from numpy.testing.utils import _assert_valid_refcount -import numpy as np - -rlevel = 1 - -class TestRegression(TestCase): - def test_poly1d(self,level=rlevel): - """Ticket #28""" - assert_equal(np.poly1d([1]) - np.poly1d([1,0]), - np.poly1d([-1,1])) - - def test_cov_parameters(self,level=rlevel): - """Ticket #91""" - x = np.random.random((3,3)) - y = x.copy() - np.cov(x, rowvar=1) - np.cov(y, rowvar=0) - assert_array_equal(x,y) - - def test_mem_digitize(self,level=rlevel): - """Ticket #95""" - for i in range(100): - np.digitize([1,2,3,4],[1,3]) - np.digitize([0,1,2,3,4],[1,3]) - - def test_unique_zero_sized(self,level=rlevel): - """Ticket #205""" - assert_array_equal([], np.unique(np.array([]))) - - def test_mem_vectorise(self, level=rlevel): - """Ticket #325""" - vt = np.vectorize(lambda *args: args) - vt(np.zeros((1,2,1)), np.zeros((2,1,1)), np.zeros((1,1,2))) - vt(np.zeros((1,2,1)), np.zeros((2,1,1)), np.zeros((1,1,2)), np.zeros((2,2))) - - def test_mgrid_single_element(self, level=rlevel): - """Ticket #339""" - assert_array_equal(np.mgrid[0:0:1j],[0]) - assert_array_equal(np.mgrid[0:0],[]) - - def test_refcount_vectorize(self, level=rlevel): - """Ticket #378""" - def p(x,y): return 123 - v = np.vectorize(p) - _assert_valid_refcount(v) - - def test_poly1d_nan_roots(self, level=rlevel): - """Ticket #396""" - p = np.poly1d([np.nan,np.nan,1], r=0) - self.assertRaises(np.linalg.LinAlgError,getattr,p,"r") - - def test_mem_polymul(self, level=rlevel): - """Ticket #448""" - np.polymul([],[1.]) - - def test_mem_string_concat(self, level=rlevel): - """Ticket #469""" - x = np.array([]) - np.append(x,'asdasd\tasdasd') - - def test_poly_div(self, level=rlevel): - """Ticket #553""" - u = np.poly1d([1,2,3]) - v = np.poly1d([1,2,3,4,5]) - q,r = np.polydiv(u,v) - assert_equal(q*v + r, u) - - def test_poly_eq(self, level=rlevel): - """Ticket #554""" - x = np.poly1d([1,2,3]) - y = np.poly1d([3,4]) - assert_(x != y) - assert_(x == x) - - def test_mem_insert(self, level=rlevel): - """Ticket #572""" - np.lib.place(1,1,1) - - def test_polyfit_build(self): - """Ticket #628""" - ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01, - 9.95368241e+00, -3.14526520e+02] - x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129, - 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176] - y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0, - 6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0, - 13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0, - 7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0, - 6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0, - 6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0, - 8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0] - tested = np.polyfit(x, y, 4) - assert_array_almost_equal(ref, tested) - - - def test_polydiv_type(self) : - """Make polydiv work for complex types""" - msg = "Wrong type, should be complex" - x = np.ones(3, dtype=np.complex) - q,r = np.polydiv(x,x) - assert_(q.dtype == np.complex, msg) - msg = "Wrong type, should be float" - x = np.ones(3, dtype=np.int) - q,r = np.polydiv(x,x) - assert_(q.dtype == np.float, msg) - - def test_histogramdd_too_many_bins(self) : - """Ticket 928.""" - assert_raises(ValueError, np.histogramdd, np.ones((1,10)), bins=2**10) - - def test_polyint_type(self) : - """Ticket #944""" - msg = "Wrong type, should be complex" - x = np.ones(3, dtype=np.complex) - assert_(np.polyint(x).dtype == np.complex, msg) - msg = "Wrong type, should be float" - x = np.ones(3, dtype=np.int) - assert_(np.polyint(x).dtype == np.float, msg) - - def test_ndenumerate_crash(self): - """Ticket 1140""" - # Shouldn't crash: - list(np.ndenumerate(np.array([[]]))) - - def test_asfarray_none(self, level=rlevel): - """Test for changeset r5065""" - assert_array_equal(np.array([np.nan]), np.asfarray([None])) - - def test_large_fancy_indexing(self, level=rlevel): - # Large enough to fail on 64-bit. - nbits = np.dtype(np.intp).itemsize * 8 - thesize = int((2**nbits)**(1.0/5.0)+1) - def dp(): - n = 3 - a = np.ones((n,)*5) - i = np.random.randint(0,n,size=thesize) - a[np.ix_(i,i,i,i,i)] = 0 - def dp2(): - n = 3 - a = np.ones((n,)*5) - i = np.random.randint(0,n,size=thesize) - g = a[np.ix_(i,i,i,i,i)] - self.assertRaises(ValueError, dp) - self.assertRaises(ValueError, dp2) - - def test_void_coercion(self, level=rlevel): - dt = np.dtype([('a','f4'),('b','i4')]) - x = np.zeros((1,),dt) - assert_(np.r_[x,x].dtype == dt) - - def test_who_with_0dim_array(self, level=rlevel) : - """ticket #1243""" - import os, sys - - oldstdout = sys.stdout - sys.stdout = open(os.devnull, 'w') - try: - try: - tmp = np.who({'foo' : np.array(1)}) - except: - raise AssertionError("ticket #1243") - finally: - sys.stdout.close() - sys.stdout = oldstdout - - def test_include_dirs(self): - """As a sanity check, just test that get_include and - get_numarray_include include something reasonable. Somewhat - related to ticket #1405.""" - include_dirs = [np.get_include(), np.get_numarray_include()] - for path in include_dirs: - assert_(isinstance(path, (str, unicode))) - assert_(path != '') - - def test_polyder_return_type(self): - """Ticket #1249""" - assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d)) - assert_(isinstance(np.polyder([1], 0), np.ndarray)) - assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d)) - assert_(isinstance(np.polyder([1], 1), np.ndarray)) - - def test_append_fields_dtype_list(self): - """Ticket #1676""" - from numpy.lib.recfunctions import append_fields - F = False - base = np.array([1,2,3], dtype=np.int32) - data = np.eye(3).astype(np.int32) - names = ['a','b','c'] - dlist = [np.float64, np.int32, np.int32] - try: - a = append_fields(base, names, data, dlist) - except: - raise AssertionError() - - def test_loadtxt_fields_subarrays(self): - # For ticket #1936 - from StringIO import StringIO - dt = [("a", 'u1', 2), ("b", 'u1', 2)] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) - - dt = [("a", [("a", 'u1', (1,3)), ("b", 'u1')])] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([(((0,1,2), 3),)], dtype=dt)) - - dt = [("a", 'u1', (2,2))] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt)) - - dt = [("a", 'u1', (2,3,2))] - x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt) - data = [((((0,1), (2,3), (4,5)), ((6,7), (8,9), (10,11))),)] - assert_equal(x, np.array(data, dtype=dt)) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_shape_base.py b/numpy-1.6.2/numpy/lib/tests/test_shape_base.py deleted file mode 100644 index 403761e93b..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_shape_base.py +++ /dev/null @@ -1,316 +0,0 @@ -from numpy.testing import * -from numpy.lib import * -from numpy.core import * -from numpy import matrix, asmatrix - -class TestApplyAlongAxis(TestCase): - def test_simple(self): - a = ones((20,10),'d') - assert_array_equal(apply_along_axis(len,0,a),len(a)*ones(shape(a)[1])) - - def test_simple101(self,level=11): - a = ones((10,101),'d') - assert_array_equal(apply_along_axis(len,0,a),len(a)*ones(shape(a)[1])) - - def test_3d(self): - a = arange(27).reshape((3,3,3)) - assert_array_equal(apply_along_axis(sum,0,a), - [[27,30,33],[36,39,42],[45,48,51]]) - - -class TestApplyOverAxes(TestCase): - def test_simple(self): - a = arange(24).reshape(2,3,4) - aoa_a = apply_over_axes(sum, a, [0,2]) - assert_array_equal(aoa_a, array([[[60],[92],[124]]])) - - -class TestArraySplit(TestCase): - def test_integer_0_split(self): - a = arange(10) - try: - res = array_split(a,0) - assert(0) # it should have thrown a value error - except ValueError: - pass - - def test_integer_split(self): - a = arange(10) - res = array_split(a,1) - desired = [arange(10)] - compare_results(res,desired) - - res = array_split(a,2) - desired = [arange(5),arange(5,10)] - compare_results(res,desired) - - res = array_split(a,3) - desired = [arange(4),arange(4,7),arange(7,10)] - compare_results(res,desired) - - res = array_split(a,4) - desired = [arange(3),arange(3,6),arange(6,8),arange(8,10)] - compare_results(res,desired) - - res = array_split(a,5) - desired = [arange(2),arange(2,4),arange(4,6),arange(6,8),arange(8,10)] - compare_results(res,desired) - - res = array_split(a,6) - desired = [arange(2),arange(2,4),arange(4,6),arange(6,8),arange(8,9), - arange(9,10)] - compare_results(res,desired) - - res = array_split(a,7) - desired = [arange(2),arange(2,4),arange(4,6),arange(6,7),arange(7,8), - arange(8,9), arange(9,10)] - compare_results(res,desired) - - res = array_split(a,8) - desired = [arange(2),arange(2,4),arange(4,5),arange(5,6),arange(6,7), - arange(7,8), arange(8,9), arange(9,10)] - compare_results(res,desired) - - res = array_split(a,9) - desired = [arange(2),arange(2,3),arange(3,4),arange(4,5),arange(5,6), - arange(6,7), arange(7,8), arange(8,9), arange(9,10)] - compare_results(res,desired) - - res = array_split(a,10) - desired = [arange(1),arange(1,2),arange(2,3),arange(3,4), - arange(4,5),arange(5,6), arange(6,7), arange(7,8), - arange(8,9), arange(9,10)] - compare_results(res,desired) - - res = array_split(a,11) - desired = [arange(1),arange(1,2),arange(2,3),arange(3,4), - arange(4,5),arange(5,6), arange(6,7), arange(7,8), - arange(8,9), arange(9,10),array([])] - compare_results(res,desired) - - def test_integer_split_2D_rows(self): - a = array([arange(10),arange(10)]) - res = array_split(a,3,axis=0) - desired = [array([arange(10)]),array([arange(10)]),array([])] - compare_results(res,desired) - - def test_integer_split_2D_cols(self): - a = array([arange(10),arange(10)]) - res = array_split(a,3,axis=-1) - desired = [array([arange(4),arange(4)]), - array([arange(4,7),arange(4,7)]), - array([arange(7,10),arange(7,10)])] - compare_results(res,desired) - - def test_integer_split_2D_default(self): - """ This will fail if we change default axis - """ - a = array([arange(10),arange(10)]) - res = array_split(a,3) - desired = [array([arange(10)]),array([arange(10)]),array([])] - compare_results(res,desired) - #perhaps should check higher dimensions - - def test_index_split_simple(self): - a = arange(10) - indices = [1,5,7] - res = array_split(a,indices,axis=-1) - desired = [arange(0,1),arange(1,5),arange(5,7),arange(7,10)] - compare_results(res,desired) - - def test_index_split_low_bound(self): - a = arange(10) - indices = [0,5,7] - res = array_split(a,indices,axis=-1) - desired = [array([]),arange(0,5),arange(5,7),arange(7,10)] - compare_results(res,desired) - - def test_index_split_high_bound(self): - a = arange(10) - indices = [0,5,7,10,12] - res = array_split(a,indices,axis=-1) - desired = [array([]),arange(0,5),arange(5,7),arange(7,10), - array([]),array([])] - compare_results(res,desired) - - -class TestSplit(TestCase): - """* This function is essentially the same as array_split, - except that it test if splitting will result in an - equal split. Only test for this case. - *""" - def test_equal_split(self): - a = arange(10) - res = split(a,2) - desired = [arange(5),arange(5,10)] - compare_results(res,desired) - - def test_unequal_split(self): - a = arange(10) - try: - res = split(a,3) - assert(0) # should raise an error - except ValueError: - pass - - -class TestDstack(TestCase): - def test_0D_array(self): - a = array(1); b = array(2); - res=dstack([a,b]) - desired = array([[[1,2]]]) - assert_array_equal(res,desired) - - def test_1D_array(self): - a = array([1]); b = array([2]); - res=dstack([a,b]) - desired = array([[[1,2]]]) - assert_array_equal(res,desired) - - def test_2D_array(self): - a = array([[1],[2]]); b = array([[1],[2]]); - res=dstack([a,b]) - desired = array([[[1,1]],[[2,2,]]]) - assert_array_equal(res,desired) - - def test_2D_array2(self): - a = array([1,2]); b = array([1,2]); - res=dstack([a,b]) - desired = array([[[1,1],[2,2]]]) - assert_array_equal(res,desired) - -""" array_split has more comprehensive test of splitting. - only do simple test on hsplit, vsplit, and dsplit -""" -class TestHsplit(TestCase): - """ only testing for integer splits. - """ - def test_0D_array(self): - a= array(1) - try: - hsplit(a,2) - assert(0) - except ValueError: - pass - - def test_1D_array(self): - a= array([1,2,3,4]) - res = hsplit(a,2) - desired = [array([1,2]),array([3,4])] - compare_results(res,desired) - - def test_2D_array(self): - a= array([[1,2,3,4], - [1,2,3,4]]) - res = hsplit(a,2) - desired = [array([[1,2],[1,2]]),array([[3,4],[3,4]])] - compare_results(res,desired) - - -class TestVsplit(TestCase): - """ only testing for integer splits. - """ - def test_1D_array(self): - a= array([1,2,3,4]) - try: - vsplit(a,2) - assert(0) - except ValueError: - pass - - def test_2D_array(self): - a= array([[1,2,3,4], - [1,2,3,4]]) - res = vsplit(a,2) - desired = [array([[1,2,3,4]]),array([[1,2,3,4]])] - compare_results(res,desired) - - -class TestDsplit(TestCase): - """ only testing for integer splits. - """ - def test_2D_array(self): - a= array([[1,2,3,4], - [1,2,3,4]]) - try: - dsplit(a,2) - assert(0) - except ValueError: - pass - - def test_3D_array(self): - a= array([[[1,2,3,4], - [1,2,3,4]], - [[1,2,3,4], - [1,2,3,4]]]) - res = dsplit(a,2) - desired = [array([[[1,2],[1,2]],[[1,2],[1,2]]]), - array([[[3,4],[3,4]],[[3,4],[3,4]]])] - compare_results(res,desired) - - -class TestSqueeze(TestCase): - def test_basic(self): - a = rand(20,10,10,1,1) - b = rand(20,1,10,1,20) - c = rand(1,1,20,10) - assert_array_equal(squeeze(a),reshape(a,(20,10,10))) - assert_array_equal(squeeze(b),reshape(b,(20,10,20))) - assert_array_equal(squeeze(c),reshape(c,(20,10))) - - -class TestKron(TestCase): - def test_return_type(self): - a = ones([2,2]) - m = asmatrix(a) - assert_equal(type(kron(a,a)), ndarray) - assert_equal(type(kron(m,m)), matrix) - assert_equal(type(kron(a,m)), matrix) - assert_equal(type(kron(m,a)), matrix) - class myarray(ndarray): - __array_priority__ = 0.0 - ma = myarray(a.shape, a.dtype, a.data) - assert_equal(type(kron(a,a)), ndarray) - assert_equal(type(kron(ma,ma)), myarray) - assert_equal(type(kron(a,ma)), ndarray) - assert_equal(type(kron(ma,a)), myarray) - - -class TestTile(TestCase): - def test_basic(self): - a = array([0,1,2]) - b = [[1,2],[3,4]] - assert_equal(tile(a,2), [0,1,2,0,1,2]) - assert_equal(tile(a,(2,2)), [[0,1,2,0,1,2],[0,1,2,0,1,2]]) - assert_equal(tile(a,(1,2)), [[0,1,2,0,1,2]]) - assert_equal(tile(b, 2), [[1,2,1,2],[3,4,3,4]]) - assert_equal(tile(b,(2,1)),[[1,2],[3,4],[1,2],[3,4]]) - assert_equal(tile(b,(2,2)),[[1,2,1,2],[3,4,3,4], - [1,2,1,2],[3,4,3,4]]) - - def test_empty(self): - a = array([[[]]]) - d = tile(a,(3,2,5)).shape - assert_equal(d,(3,2,0)) - - def test_kroncompare(self): - import numpy.random as nr - reps=[(2,),(1,2),(2,1),(2,2),(2,3,2),(3,2)] - shape=[(3,),(2,3),(3,4,3),(3,2,3),(4,3,2,4),(2,2)] - for s in shape: - b = nr.randint(0,10,size=s) - for r in reps: - a = ones(r, b.dtype) - large = tile(b, r) - klarge = kron(a, b) - assert_equal(large, klarge) - - -# Utility -def compare_results(res,desired): - for i in range(len(desired)): - assert_array_equal(res[i],desired[i]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_stride_tricks.py b/numpy-1.6.2/numpy/lib/tests/test_stride_tricks.py deleted file mode 100644 index 8f0ac52b86..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_stride_tricks.py +++ /dev/null @@ -1,208 +0,0 @@ -import numpy as np -from numpy.testing import * -from numpy.lib.stride_tricks import broadcast_arrays - - -def assert_shapes_correct(input_shapes, expected_shape): - """ Broadcast a list of arrays with the given input shapes and check the - common output shape. - """ - inarrays = [np.zeros(s) for s in input_shapes] - outarrays = broadcast_arrays(*inarrays) - outshapes = [a.shape for a in outarrays] - expected = [expected_shape] * len(inarrays) - assert outshapes == expected - -def assert_incompatible_shapes_raise(input_shapes): - """ Broadcast a list of arrays with the given (incompatible) input shapes - and check that they raise a ValueError. - """ - inarrays = [np.zeros(s) for s in input_shapes] - assert_raises(ValueError, broadcast_arrays, *inarrays) - -def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): - """ Broadcast two shapes against each other and check that the data layout - is the same as if a ufunc did the broadcasting. - """ - x0 = np.zeros(shape0, dtype=int) - # Note that multiply.reduce's identity element is 1.0, so when shape1==(), - # this gives the desired n==1. - n = int(np.multiply.reduce(shape1)) - x1 = np.arange(n).reshape(shape1) - if transposed: - x0 = x0.T - x1 = x1.T - if flipped: - x0 = x0[::-1] - x1 = x1[::-1] - # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the - # result should be exactly the same as the broadcasted view of x1. - y = x0 + x1 - b0, b1 = broadcast_arrays(x0, x1) - assert_array_equal(y, b1) - - -def test_same(): - x = np.arange(10) - y = np.arange(10) - bx, by = broadcast_arrays(x, y) - assert_array_equal(x, bx) - assert_array_equal(y, by) - -def test_one_off(): - x = np.array([[1,2,3]]) - y = np.array([[1],[2],[3]]) - bx, by = broadcast_arrays(x, y) - bx0 = np.array([[1,2,3],[1,2,3],[1,2,3]]) - by0 = bx0.T - assert_array_equal(bx0, bx) - assert_array_equal(by0, by) - -def test_same_input_shapes(): - """ Check that the final shape is just the input shape. - """ - data = [ - (), - (1,), - (3,), - (0,1), - (0,3), - (1,0), - (3,0), - (1,3), - (3,1), - (3,3), - ] - for shape in data: - input_shapes = [shape] - # Single input. - yield assert_shapes_correct, input_shapes, shape - # Double input. - input_shapes2 = [shape, shape] - yield assert_shapes_correct, input_shapes2, shape - # Triple input. - input_shapes3 = [shape, shape, shape] - yield assert_shapes_correct, input_shapes3, shape - -def test_two_compatible_by_ones_input_shapes(): - """ Check that two different input shapes (of the same length but some have - 1s) broadcast to the correct shape. - """ - data = [ - [[(1,), (3,)], (3,)], - [[(1,3), (3,3)], (3,3)], - [[(3,1), (3,3)], (3,3)], - [[(1,3), (3,1)], (3,3)], - [[(1,1), (3,3)], (3,3)], - [[(1,1), (1,3)], (1,3)], - [[(1,1), (3,1)], (3,1)], - [[(1,0), (0,0)], (0,0)], - [[(0,1), (0,0)], (0,0)], - [[(1,0), (0,1)], (0,0)], - [[(1,1), (0,0)], (0,0)], - [[(1,1), (1,0)], (1,0)], - [[(1,1), (0,1)], (0,1)], - ] - for input_shapes, expected_shape in data: - yield assert_shapes_correct, input_shapes, expected_shape - # Reverse the input shapes since broadcasting should be symmetric. - yield assert_shapes_correct, input_shapes[::-1], expected_shape - -def test_two_compatible_by_prepending_ones_input_shapes(): - """ Check that two different input shapes (of different lengths) broadcast - to the correct shape. - """ - data = [ - [[(), (3,)], (3,)], - [[(3,), (3,3)], (3,3)], - [[(3,), (3,1)], (3,3)], - [[(1,), (3,3)], (3,3)], - [[(), (3,3)], (3,3)], - [[(1,1), (3,)], (1,3)], - [[(1,), (3,1)], (3,1)], - [[(1,), (1,3)], (1,3)], - [[(), (1,3)], (1,3)], - [[(), (3,1)], (3,1)], - [[(), (0,)], (0,)], - [[(0,), (0,0)], (0,0)], - [[(0,), (0,1)], (0,0)], - [[(1,), (0,0)], (0,0)], - [[(), (0,0)], (0,0)], - [[(1,1), (0,)], (1,0)], - [[(1,), (0,1)], (0,1)], - [[(1,), (1,0)], (1,0)], - [[(), (1,0)], (1,0)], - [[(), (0,1)], (0,1)], - ] - for input_shapes, expected_shape in data: - yield assert_shapes_correct, input_shapes, expected_shape - # Reverse the input shapes since broadcasting should be symmetric. - yield assert_shapes_correct, input_shapes[::-1], expected_shape - -def test_incompatible_shapes_raise_valueerror(): - """ Check that a ValueError is raised for incompatible shapes. - """ - data = [ - [(3,), (4,)], - [(2,3), (2,)], - [(3,), (3,), (4,)], - [(1,3,4), (2,3,3)], - ] - for input_shapes in data: - yield assert_incompatible_shapes_raise, input_shapes - # Reverse the input shapes since broadcasting should be symmetric. - yield assert_incompatible_shapes_raise, input_shapes[::-1] - -def test_same_as_ufunc(): - """ Check that the data layout is the same as if a ufunc did the operation. - """ - data = [ - [[(1,), (3,)], (3,)], - [[(1,3), (3,3)], (3,3)], - [[(3,1), (3,3)], (3,3)], - [[(1,3), (3,1)], (3,3)], - [[(1,1), (3,3)], (3,3)], - [[(1,1), (1,3)], (1,3)], - [[(1,1), (3,1)], (3,1)], - [[(1,0), (0,0)], (0,0)], - [[(0,1), (0,0)], (0,0)], - [[(1,0), (0,1)], (0,0)], - [[(1,1), (0,0)], (0,0)], - [[(1,1), (1,0)], (1,0)], - [[(1,1), (0,1)], (0,1)], - [[(), (3,)], (3,)], - [[(3,), (3,3)], (3,3)], - [[(3,), (3,1)], (3,3)], - [[(1,), (3,3)], (3,3)], - [[(), (3,3)], (3,3)], - [[(1,1), (3,)], (1,3)], - [[(1,), (3,1)], (3,1)], - [[(1,), (1,3)], (1,3)], - [[(), (1,3)], (1,3)], - [[(), (3,1)], (3,1)], - [[(), (0,)], (0,)], - [[(0,), (0,0)], (0,0)], - [[(0,), (0,1)], (0,0)], - [[(1,), (0,0)], (0,0)], - [[(), (0,0)], (0,0)], - [[(1,1), (0,)], (1,0)], - [[(1,), (0,1)], (0,1)], - [[(1,), (1,0)], (1,0)], - [[(), (1,0)], (1,0)], - [[(), (0,1)], (0,1)], - ] - for input_shapes, expected_shape in data: - yield assert_same_as_ufunc, input_shapes[0], input_shapes[1] - # Reverse the input shapes since broadcasting should be symmetric. - yield assert_same_as_ufunc, input_shapes[1], input_shapes[0] - # Try them transposed, too. - yield assert_same_as_ufunc, input_shapes[0], input_shapes[1], True - # ... and flipped for non-rank-0 inputs in order to test negative - # strides. - if () not in input_shapes: - yield assert_same_as_ufunc, input_shapes[0], input_shapes[1], False, True - yield assert_same_as_ufunc, input_shapes[0], input_shapes[1], True, True - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_twodim_base.py b/numpy-1.6.2/numpy/lib/tests/test_twodim_base.py deleted file mode 100644 index e5731ff889..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_twodim_base.py +++ /dev/null @@ -1,341 +0,0 @@ -""" Test functions for matrix module - -""" - -from numpy.testing import * - -from numpy import ( arange, rot90, add, fliplr, flipud, zeros, ones, eye, - array, diag, histogram2d, tri, mask_indices, triu_indices, - triu_indices_from, tril_indices, tril_indices_from ) - -import numpy as np -from numpy.compat import asbytes, asbytes_nested - -def get_mat(n): - data = arange(n) - data = add.outer(data,data) - return data - -class TestEye(TestCase): - def test_basic(self): - assert_equal(eye(4),array([[1,0,0,0], - [0,1,0,0], - [0,0,1,0], - [0,0,0,1]])) - assert_equal(eye(4,dtype='f'),array([[1,0,0,0], - [0,1,0,0], - [0,0,1,0], - [0,0,0,1]],'f')) - assert_equal(eye(3) == 1, eye(3,dtype=bool)) - - def test_diag(self): - assert_equal(eye(4,k=1),array([[0,1,0,0], - [0,0,1,0], - [0,0,0,1], - [0,0,0,0]])) - assert_equal(eye(4,k=-1),array([[0,0,0,0], - [1,0,0,0], - [0,1,0,0], - [0,0,1,0]])) - def test_2d(self): - assert_equal(eye(4,3),array([[1,0,0], - [0,1,0], - [0,0,1], - [0,0,0]])) - assert_equal(eye(3,4),array([[1,0,0,0], - [0,1,0,0], - [0,0,1,0]])) - def test_diag2d(self): - assert_equal(eye(3,4,k=2),array([[0,0,1,0], - [0,0,0,1], - [0,0,0,0]])) - assert_equal(eye(4,3,k=-2),array([[0,0,0], - [0,0,0], - [1,0,0], - [0,1,0]])) - - def test_eye_bounds(self): - assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) - assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) - assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) - assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) - assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) - assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) - assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) - assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) - assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) - - def test_strings(self): - assert_equal(eye(2, 2, dtype='S3'), - asbytes_nested([['1', ''], ['', '1']])) - - def test_bool(self): - assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) - -class TestDiag(TestCase): - def test_vector(self): - vals = (100 * arange(5)).astype('l') - b = zeros((5, 5)) - for k in range(5): - b[k, k] = vals[k] - assert_equal(diag(vals), b) - b = zeros((7, 7)) - c = b.copy() - for k in range(5): - b[k, k + 2] = vals[k] - c[k + 2, k] = vals[k] - assert_equal(diag(vals, k=2), b) - assert_equal(diag(vals, k=-2), c) - - def test_matrix(self, vals=None): - if vals is None: - vals = (100 * get_mat(5) + 1).astype('l') - b = zeros((5,)) - for k in range(5): - b[k] = vals[k,k] - assert_equal(diag(vals), b) - b = b * 0 - for k in range(3): - b[k] = vals[k, k + 2] - assert_equal(diag(vals, 2), b[:3]) - for k in range(3): - b[k] = vals[k + 2, k] - assert_equal(diag(vals, -2), b[:3]) - - def test_fortran_order(self): - vals = array((100 * get_mat(5) + 1), order='F', dtype='l') - self.test_matrix(vals) - - def test_diag_bounds(self): - A = [[1, 2], [3, 4], [5, 6]] - assert_equal(diag(A, k=2), []) - assert_equal(diag(A, k=1), [2]) - assert_equal(diag(A, k=0), [1, 4]) - assert_equal(diag(A, k=-1), [3, 6]) - assert_equal(diag(A, k=-2), [5]) - assert_equal(diag(A, k=-3), []) - - def test_failure(self): - self.assertRaises(ValueError, diag, [[[1]]]) - -class TestFliplr(TestCase): - def test_basic(self): - self.assertRaises(ValueError, fliplr, ones(4)) - a = get_mat(4) - b = a[:,::-1] - assert_equal(fliplr(a),b) - a = [[0,1,2], - [3,4,5]] - b = [[2,1,0], - [5,4,3]] - assert_equal(fliplr(a),b) - -class TestFlipud(TestCase): - def test_basic(self): - a = get_mat(4) - b = a[::-1,:] - assert_equal(flipud(a),b) - a = [[0,1,2], - [3,4,5]] - b = [[3,4,5], - [0,1,2]] - assert_equal(flipud(a),b) - -class TestRot90(TestCase): - def test_basic(self): - self.assertRaises(ValueError, rot90, ones(4)) - - a = [[0,1,2], - [3,4,5]] - b1 = [[2,5], - [1,4], - [0,3]] - b2 = [[5,4,3], - [2,1,0]] - b3 = [[3,0], - [4,1], - [5,2]] - b4 = [[0,1,2], - [3,4,5]] - - for k in range(-3,13,4): - assert_equal(rot90(a,k=k),b1) - for k in range(-2,13,4): - assert_equal(rot90(a,k=k),b2) - for k in range(-1,13,4): - assert_equal(rot90(a,k=k),b3) - for k in range(0,13,4): - assert_equal(rot90(a,k=k),b4) - - def test_axes(self): - a = ones((50,40,3)) - assert_equal(rot90(a).shape,(40,50,3)) - -class TestHistogram2d(TestCase): - def test_simple(self): - x = array([ 0.41702200, 0.72032449, 0.00011437481, 0.302332573, 0.146755891]) - y = array([ 0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) - xedges = np.linspace(0,1,10) - yedges = np.linspace(0,1,10) - H = histogram2d(x, y, (xedges, yedges))[0] - answer = array([[0, 0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 0, 1, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]]) - assert_array_equal(H.T, answer) - H = histogram2d(x, y, xedges)[0] - assert_array_equal(H.T, answer) - H,xedges,yedges = histogram2d(range(10),range(10)) - assert_array_equal(H, eye(10,10)) - assert_array_equal(xedges, np.linspace(0,9,11)) - assert_array_equal(yedges, np.linspace(0,9,11)) - - def test_asym(self): - x = array([1, 1, 2, 3, 4, 4, 4, 5]) - y = array([1, 3, 2, 0, 1, 2, 3, 4]) - H, xed, yed = histogram2d(x,y, (6, 5), range = [[0,6],[0,5]], normed=True) - answer = array([[0.,0,0,0,0], - [0,1,0,1,0], - [0,0,1,0,0], - [1,0,0,0,0], - [0,1,1,1,0], - [0,0,0,0,1]]) - assert_array_almost_equal(H, answer/8., 3) - assert_array_equal(xed, np.linspace(0,6,7)) - assert_array_equal(yed, np.linspace(0,5,6)) - def test_norm(self): - x = array([1,2,3,1,2,3,1,2,3]) - y = array([1,1,1,2,2,2,3,3,3]) - H, xed, yed = histogram2d(x,y,[[1,2,3,5], [1,2,3,5]], normed=True) - answer=array([[1,1,.5], - [1,1,.5], - [.5,.5,.25]])/9. - assert_array_almost_equal(H, answer, 3) - - def test_all_outliers(self): - r = rand(100)+1. - H, xed, yed = histogram2d(r, r, (4, 5), range=([0,1], [0,1])) - assert_array_equal(H, 0) - - def test_empty(self): - a, edge1, edge2 = histogram2d([],[], bins=([0,1],[0,1])) - assert_array_max_ulp(a, array([[ 0.]])) - - a, edge1, edge2 = histogram2d([], [], bins=4) - assert_array_max_ulp(a, np.zeros((4, 4))) - - -class TestTri(TestCase): - def test_dtype(self): - out = array([[1,0,0], - [1,1,0], - [1,1,1]]) - assert_array_equal(tri(3),out) - assert_array_equal(tri(3,dtype=bool),out.astype(bool)) - - -def test_tril_triu(): - for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: - a = np.ones((2, 2), dtype=dtype) - b = np.tril(a) - c = np.triu(a) - assert_array_equal(b, [[1, 0], [1, 1]]) - assert_array_equal(c, b.T) - # should return the same dtype as the original array - assert_equal(b.dtype, a.dtype) - assert_equal(c.dtype, a.dtype) - - -def test_mask_indices(): - # simple test without offset - iu = mask_indices(3, np.triu) - a = np.arange(9).reshape(3, 3) - yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8])) - # Now with an offset - iu1 = mask_indices(3, np.triu, 1) - yield (assert_array_equal, a[iu1], array([1, 2, 5])) - - -def test_tril_indices(): - # indices without and with offset - il1 = tril_indices(4) - il2 = tril_indices(4, 2) - - a = np.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - - # indexing: - yield (assert_array_equal, a[il1], - array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) ) - - # And for assigning values: - a[il1] = -1 - yield (assert_array_equal, a, - array([[-1, 2, 3, 4], - [-1, -1, 7, 8], - [-1, -1, -1, 12], - [-1, -1, -1, -1]]) ) - - # These cover almost the whole array (two diagonals right of the main one): - a[il2] = -10 - yield (assert_array_equal, a, - array([[-10, -10, -10, 4], - [-10, -10, -10, -10], - [-10, -10, -10, -10], - [-10, -10, -10, -10]]) ) - - -class TestTriuIndices: - def test_triu_indices(self): - iu1 = triu_indices(4) - iu2 = triu_indices(4, 2) - - a = np.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - - # Both for indexing: - yield (assert_array_equal, a[iu1], - array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) - - # And for assigning values: - a[iu1] = -1 - yield (assert_array_equal, a, - array([[-1, -1, -1, -1], - [ 5, -1, -1, -1], - [ 9, 10, -1, -1], - [13, 14, 15, -1]]) ) - - # These cover almost the whole array (two diagonals right of the main one): - a[iu2] = -10 - yield ( assert_array_equal, a, - array([[ -1, -1, -10, -10], - [ 5, -1, -1, -10], - [ 9, 10, -1, -1], - [ 13, 14, 15, -1]]) ) - - -class TestTrilIndicesFrom: - def test_exceptions(self): - assert_raises(ValueError, tril_indices_from, np.ones((2,))) - assert_raises(ValueError, tril_indices_from, np.ones((2,2,2))) - assert_raises(ValueError, tril_indices_from, np.ones((2,3))) - - -class TestTriuIndicesFrom: - def test_exceptions(self): - assert_raises(ValueError, triu_indices_from, np.ones((2,))) - assert_raises(ValueError, triu_indices_from, np.ones((2,2,2))) - assert_raises(ValueError, triu_indices_from, np.ones((2,3))) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_type_check.py b/numpy-1.6.2/numpy/lib/tests/test_type_check.py deleted file mode 100644 index 941768aa5a..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_type_check.py +++ /dev/null @@ -1,394 +0,0 @@ -from numpy.testing import * -from numpy.lib import * -from numpy.core import * -from numpy.compat import asbytes - -try: - import ctypes - _HAS_CTYPE = True -except ImportError: - _HAS_CTYPE = False - - -def assert_all(x): - assert(all(x)), x - - -class TestCommonType(TestCase): - def test_basic(self): - ai32 = array([[1,2],[3,4]], dtype=int32) - af32 = array([[1,2],[3,4]], dtype=float32) - af64 = array([[1,2],[3,4]], dtype=float64) - acs = array([[1+5j,2+6j],[3+7j,4+8j]], dtype=csingle) - acd = array([[1+5j,2+6j],[3+7j,4+8j]], dtype=cdouble) - assert common_type(af32) == float32 - assert common_type(af64) == float64 - assert common_type(acs) == csingle - assert common_type(acd) == cdouble - - - -class TestMintypecode(TestCase): - - def test_default_1(self): - for itype in '1bcsuwil': - assert_equal(mintypecode(itype),'d') - assert_equal(mintypecode('f'),'f') - assert_equal(mintypecode('d'),'d') - assert_equal(mintypecode('F'),'F') - assert_equal(mintypecode('D'),'D') - - def test_default_2(self): - for itype in '1bcsuwil': - assert_equal(mintypecode(itype+'f'),'f') - assert_equal(mintypecode(itype+'d'),'d') - assert_equal(mintypecode(itype+'F'),'F') - assert_equal(mintypecode(itype+'D'),'D') - assert_equal(mintypecode('ff'),'f') - assert_equal(mintypecode('fd'),'d') - assert_equal(mintypecode('fF'),'F') - assert_equal(mintypecode('fD'),'D') - assert_equal(mintypecode('df'),'d') - assert_equal(mintypecode('dd'),'d') - #assert_equal(mintypecode('dF',savespace=1),'F') - assert_equal(mintypecode('dF'),'D') - assert_equal(mintypecode('dD'),'D') - assert_equal(mintypecode('Ff'),'F') - #assert_equal(mintypecode('Fd',savespace=1),'F') - assert_equal(mintypecode('Fd'),'D') - assert_equal(mintypecode('FF'),'F') - assert_equal(mintypecode('FD'),'D') - assert_equal(mintypecode('Df'),'D') - assert_equal(mintypecode('Dd'),'D') - assert_equal(mintypecode('DF'),'D') - assert_equal(mintypecode('DD'),'D') - - def test_default_3(self): - assert_equal(mintypecode('fdF'),'D') - #assert_equal(mintypecode('fdF',savespace=1),'F') - assert_equal(mintypecode('fdD'),'D') - assert_equal(mintypecode('fFD'),'D') - assert_equal(mintypecode('dFD'),'D') - - assert_equal(mintypecode('ifd'),'d') - assert_equal(mintypecode('ifF'),'F') - assert_equal(mintypecode('ifD'),'D') - assert_equal(mintypecode('idF'),'D') - #assert_equal(mintypecode('idF',savespace=1),'F') - assert_equal(mintypecode('idD'),'D') - - -class TestIsscalar(TestCase): - - def test_basic(self): - assert(isscalar(3)) - assert(not isscalar([3])) - assert(not isscalar((3,))) - assert(isscalar(3j)) - assert(isscalar(10L)) - assert(isscalar(4.0)) - - -class TestReal(TestCase): - - def test_real(self): - y = rand(10,) - assert_array_equal(y,real(y)) - - def test_cmplx(self): - y = rand(10,)+1j*rand(10,) - assert_array_equal(y.real,real(y)) - - -class TestImag(TestCase): - - def test_real(self): - y = rand(10,) - assert_array_equal(0,imag(y)) - - def test_cmplx(self): - y = rand(10,)+1j*rand(10,) - assert_array_equal(y.imag,imag(y)) - - -class TestIscomplex(TestCase): - - def test_fail(self): - z = array([-1,0,1]) - res = iscomplex(z) - assert(not sometrue(res,axis=0)) - def test_pass(self): - z = array([-1j,1,0]) - res = iscomplex(z) - assert_array_equal(res,[1,0,0]) - - -class TestIsreal(TestCase): - - def test_pass(self): - z = array([-1,0,1j]) - res = isreal(z) - assert_array_equal(res,[1,1,0]) - def test_fail(self): - z = array([-1j,1,0]) - res = isreal(z) - assert_array_equal(res,[0,1,1]) - - -class TestIscomplexobj(TestCase): - - def test_basic(self): - z = array([-1,0,1]) - assert(not iscomplexobj(z)) - z = array([-1j,0,-1]) - assert(iscomplexobj(z)) - - - -class TestIsrealobj(TestCase): - def test_basic(self): - z = array([-1,0,1]) - assert(isrealobj(z)) - z = array([-1j,0,-1]) - assert(not isrealobj(z)) - - -class TestIsnan(TestCase): - - def test_goodvalues(self): - z = array((-1.,0.,1.)) - res = isnan(z) == 0 - assert_all(alltrue(res,axis=0)) - - def test_posinf(self): - olderr = seterr(divide='ignore') - try: - assert_all(isnan(array((1.,))/0.) == 0) - finally: - seterr(**olderr) - - def test_neginf(self): - olderr = seterr(divide='ignore') - try: - assert_all(isnan(array((-1.,))/0.) == 0) - finally: - seterr(**olderr) - - def test_ind(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isnan(array((0.,))/0.) == 1) - finally: - seterr(**olderr) - - #def test_qnan(self): log(-1) return pi*j now - # assert_all(isnan(log(-1.)) == 1) - - def test_integer(self): - assert_all(isnan(1) == 0) - - def test_complex(self): - assert_all(isnan(1+1j) == 0) - - def test_complex1(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isnan(array(0+0j)/0.) == 1) - finally: - seterr(**olderr) - - -class TestIsfinite(TestCase): - - def test_goodvalues(self): - z = array((-1.,0.,1.)) - res = isfinite(z) == 1 - assert_all(alltrue(res,axis=0)) - - def test_posinf(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isfinite(array((1.,))/0.) == 0) - finally: - seterr(**olderr) - - def test_neginf(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isfinite(array((-1.,))/0.) == 0) - finally: - seterr(**olderr) - - def test_ind(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isfinite(array((0.,))/0.) == 0) - finally: - seterr(**olderr) - - #def test_qnan(self): - # assert_all(isfinite(log(-1.)) == 0) - - def test_integer(self): - assert_all(isfinite(1) == 1) - - def test_complex(self): - assert_all(isfinite(1+1j) == 1) - - def test_complex1(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isfinite(array(1+1j)/0.) == 0) - finally: - seterr(**olderr) - - -class TestIsinf(TestCase): - - def test_goodvalues(self): - z = array((-1.,0.,1.)) - res = isinf(z) == 0 - assert_all(alltrue(res,axis=0)) - - def test_posinf(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isinf(array((1.,))/0.) == 1) - finally: - seterr(**olderr) - - def test_posinf_scalar(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isinf(array(1.,)/0.) == 1) - finally: - seterr(**olderr) - - def test_neginf(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isinf(array((-1.,))/0.) == 1) - finally: - seterr(**olderr) - - def test_neginf_scalar(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isinf(array(-1.)/0.) == 1) - finally: - seterr(**olderr) - - def test_ind(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - assert_all(isinf(array((0.,))/0.) == 0) - finally: - seterr(**olderr) - - #def test_qnan(self): - # assert_all(isinf(log(-1.)) == 0) - # assert_all(isnan(log(-1.)) == 1) - - -class TestIsposinf(TestCase): - - def test_generic(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - vals = isposinf(array((-1.,0,1))/0.) - finally: - seterr(**olderr) - assert(vals[0] == 0) - assert(vals[1] == 0) - assert(vals[2] == 1) - - -class TestIsneginf(TestCase): - def test_generic(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - vals = isneginf(array((-1.,0,1))/0.) - finally: - seterr(**olderr) - assert(vals[0] == 1) - assert(vals[1] == 0) - assert(vals[2] == 0) - - -class TestNanToNum(TestCase): - - def test_generic(self): - olderr = seterr(divide='ignore', invalid='ignore') - try: - vals = nan_to_num(array((-1.,0,1))/0.) - finally: - seterr(**olderr) - assert_all(vals[0] < -1e10) and assert_all(isfinite(vals[0])) - assert(vals[1] == 0) - assert_all(vals[2] > 1e10) and assert_all(isfinite(vals[2])) - - def test_integer(self): - vals = nan_to_num(1) - assert_all(vals == 1) - - def test_complex_good(self): - vals = nan_to_num(1+1j) - assert_all(vals == 1+1j) - - def test_complex_bad(self): - v = 1+1j - olderr = seterr(divide='ignore', invalid='ignore') - try: - v += array(0+1.j)/0. - finally: - seterr(**olderr) - vals = nan_to_num(v) - # !! This is actually (unexpectedly) zero - assert_all(isfinite(vals)) - - def test_complex_bad2(self): - v = 1+1j - olderr = seterr(divide='ignore', invalid='ignore') - try: - v += array(-1+1.j)/0. - finally: - seterr(**olderr) - vals = nan_to_num(v) - assert_all(isfinite(vals)) - #assert_all(vals.imag > 1e10) and assert_all(isfinite(vals)) - # !! This is actually (unexpectedly) positive - # !! inf. Comment out for now, and see if it - # !! changes - #assert_all(vals.real < -1e10) and assert_all(isfinite(vals)) - - -class TestRealIfClose(TestCase): - - def test_basic(self): - a = rand(10) - b = real_if_close(a+1e-15j) - assert_all(isrealobj(b)) - assert_array_equal(a,b) - b = real_if_close(a+1e-7j) - assert_all(iscomplexobj(b)) - b = real_if_close(a+1e-7j,tol=1e-6) - assert_all(isrealobj(b)) - - -class TestArrayConversion(TestCase): - - def test_asfarray(self): - a = asfarray(array([1,2,3])) - assert_equal(a.__class__,ndarray) - assert issubdtype(a.dtype,float) - -class TestDateTimeData: - - @dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation") - def test_basic(self): - a = array(['1980-03-23'], dtype=datetime64) - assert_equal(datetime_data(a.dtype), (asbytes('us'), 1, 1, 1)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_ufunclike.py b/numpy-1.6.2/numpy/lib/tests/test_ufunclike.py deleted file mode 100644 index 29b47f2571..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_ufunclike.py +++ /dev/null @@ -1,60 +0,0 @@ -from numpy.testing import * -import numpy.core as nx -import numpy.lib.ufunclike as ufl -from numpy.testing.decorators import deprecated - -class TestUfunclike(TestCase): - - def test_isposinf(self): - a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) - out = nx.zeros(a.shape, bool) - tgt = nx.array([True, False, False, False, False, False]) - - res = ufl.isposinf(a) - assert_equal(res, tgt) - res = ufl.isposinf(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - def test_isneginf(self): - a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) - out = nx.zeros(a.shape, bool) - tgt = nx.array([False, True, False, False, False, False]) - - res = ufl.isneginf(a) - assert_equal(res, tgt) - res = ufl.isneginf(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - def test_fix(self): - a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) - out = nx.zeros(a.shape, float) - tgt = nx.array([[ 1., 1., 1., 1.], [-1., -1., -1., -1.]]) - - res = ufl.fix(a) - assert_equal(res, tgt) - res = ufl.fix(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - assert_equal(ufl.fix(3.14), 3) - - def test_fix_with_subclass(self): - class MyArray(nx.ndarray): - def __new__(cls, data, metadata=None): - res = nx.array(data, copy=True).view(cls) - res.metadata = metadata - return res - def __array_wrap__(self, obj, context=None): - obj.metadata = self.metadata - return obj - - a = nx.array([1.1, -1.1]) - m = MyArray(a, metadata='foo') - f = ufl.fix(m) - assert_array_equal(f, nx.array([1,-1])) - assert_(isinstance(f, MyArray)) - assert_equal(f.metadata, 'foo') - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/tests/test_utils.py b/numpy-1.6.2/numpy/lib/tests/test_utils.py deleted file mode 100644 index 6a09c6dbd9..0000000000 --- a/numpy-1.6.2/numpy/lib/tests/test_utils.py +++ /dev/null @@ -1,38 +0,0 @@ -from numpy.testing import * -import numpy.lib.utils as utils -from numpy.lib import deprecate - -from StringIO import StringIO - -def test_lookfor(): - out = StringIO() - utils.lookfor('eigenvalue', module='numpy', output=out, - import_modules=False) - out = out.getvalue() - assert 'numpy.linalg.eig' in out - - -@deprecate -def old_func(self, x): - return x - -@deprecate(message="Rather use new_func2") -def old_func2(self, x): - return x - -def old_func3(self, x): - return x -new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") - -def test_deprecate_decorator(): - assert 'deprecated' in old_func.__doc__ - -def test_deprecate_decorator_message(): - assert 'Rather use new_func2' in old_func2.__doc__ - -def test_deprecate_fn(): - assert 'old_func3' in new_func3.__doc__ - assert 'new_func3' in new_func3.__doc__ - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/lib/twodim_base.py b/numpy-1.6.2/numpy/lib/twodim_base.py deleted file mode 100644 index 27424c9962..0000000000 --- a/numpy-1.6.2/numpy/lib/twodim_base.py +++ /dev/null @@ -1,890 +0,0 @@ -""" Basic functions for manipulating 2d arrays - -""" - -__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu', - 'tril','vander','histogram2d','mask_indices', - 'tril_indices','tril_indices_from','triu_indices','triu_indices_from', - ] - -from numpy.core.numeric import asanyarray, equal, subtract, arange, \ - zeros, greater_equal, multiply, ones, asarray, alltrue, where, \ - empty - -def fliplr(m): - """ - Flip array in the left/right direction. - - Flip the entries in each row in the left/right direction. - Columns are preserved, but appear in a different order than before. - - Parameters - ---------- - m : array_like - Input array. - - Returns - ------- - f : ndarray - A view of `m` with the columns reversed. Since a view - is returned, this operation is :math:`\\mathcal O(1)`. - - See Also - -------- - flipud : Flip array in the up/down direction. - rot90 : Rotate array counterclockwise. - - Notes - ----- - Equivalent to A[:,::-1]. Does not require the array to be - two-dimensional. - - Examples - -------- - >>> A = np.diag([1.,2.,3.]) - >>> A - array([[ 1., 0., 0.], - [ 0., 2., 0.], - [ 0., 0., 3.]]) - >>> np.fliplr(A) - array([[ 0., 0., 1.], - [ 0., 2., 0.], - [ 3., 0., 0.]]) - - >>> A = np.random.randn(2,3,5) - >>> np.all(np.fliplr(A)==A[:,::-1,...]) - True - - """ - m = asanyarray(m) - if m.ndim < 2: - raise ValueError("Input must be >= 2-d.") - return m[:, ::-1] - -def flipud(m): - """ - Flip array in the up/down direction. - - Flip the entries in each column in the up/down direction. - Rows are preserved, but appear in a different order than before. - - Parameters - ---------- - m : array_like - Input array. - - Returns - ------- - out : array_like - A view of `m` with the rows reversed. Since a view is - returned, this operation is :math:`\\mathcal O(1)`. - - See Also - -------- - fliplr : Flip array in the left/right direction. - rot90 : Rotate array counterclockwise. - - Notes - ----- - Equivalent to ``A[::-1,...]``. - Does not require the array to be two-dimensional. - - Examples - -------- - >>> A = np.diag([1.0, 2, 3]) - >>> A - array([[ 1., 0., 0.], - [ 0., 2., 0.], - [ 0., 0., 3.]]) - >>> np.flipud(A) - array([[ 0., 0., 3.], - [ 0., 2., 0.], - [ 1., 0., 0.]]) - - >>> A = np.random.randn(2,3,5) - >>> np.all(np.flipud(A)==A[::-1,...]) - True - - >>> np.flipud([1,2]) - array([2, 1]) - - """ - m = asanyarray(m) - if m.ndim < 1: - raise ValueError("Input must be >= 1-d.") - return m[::-1,...] - -def rot90(m, k=1): - """ - Rotate an array by 90 degrees in the counter-clockwise direction. - - The first two dimensions are rotated; therefore, the array must be at - least 2-D. - - Parameters - ---------- - m : array_like - Array of two or more dimensions. - k : integer - Number of times the array is rotated by 90 degrees. - - Returns - ------- - y : ndarray - Rotated array. - - See Also - -------- - fliplr : Flip an array horizontally. - flipud : Flip an array vertically. - - Examples - -------- - >>> m = np.array([[1,2],[3,4]], int) - >>> m - array([[1, 2], - [3, 4]]) - >>> np.rot90(m) - array([[2, 4], - [1, 3]]) - >>> np.rot90(m, 2) - array([[4, 3], - [2, 1]]) - - """ - m = asanyarray(m) - if m.ndim < 2: - raise ValueError("Input must >= 2-d.") - k = k % 4 - if k == 0: - return m - elif k == 1: - return fliplr(m).swapaxes(0,1) - elif k == 2: - return fliplr(flipud(m)) - else: - # k == 3 - return fliplr(m.swapaxes(0,1)) - -def eye(N, M=None, k=0, dtype=float): - """ - Return a 2-D array with ones on the diagonal and zeros elsewhere. - - Parameters - ---------- - N : int - Number of rows in the output. - M : int, optional - Number of columns in the output. If None, defaults to `N`. - k : int, optional - Index of the diagonal: 0 (the default) refers to the main diagonal, - a positive value refers to an upper diagonal, and a negative value - to a lower diagonal. - dtype : data-type, optional - Data-type of the returned array. - - Returns - ------- - I : ndarray of shape (N,M) - An array where all elements are equal to zero, except for the `k`-th - diagonal, whose values are equal to one. - - See Also - -------- - identity : (almost) equivalent function - diag : diagonal 2-D array from a 1-D array specified by the user. - - Examples - -------- - >>> np.eye(2, dtype=int) - array([[1, 0], - [0, 1]]) - >>> np.eye(3, k=1) - array([[ 0., 1., 0.], - [ 0., 0., 1.], - [ 0., 0., 0.]]) - - """ - if M is None: - M = N - m = zeros((N, M), dtype=dtype) - if k >= M: - return m - if k >= 0: - i = k - else: - i = (-k) * M - m[:M-k].flat[i::M+1] = 1 - return m - -def diag(v, k=0): - """ - Extract a diagonal or construct a diagonal array. - - Parameters - ---------- - v : array_like - If `v` is a 2-D array, return a copy of its `k`-th diagonal. - If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th - diagonal. - k : int, optional - Diagonal in question. The default is 0. Use `k>0` for diagonals - above the main diagonal, and `k<0` for diagonals below the main - diagonal. - - Returns - ------- - out : ndarray - The extracted diagonal or constructed diagonal array. - - See Also - -------- - diagonal : Return specified diagonals. - diagflat : Create a 2-D array with the flattened input as a diagonal. - trace : Sum along diagonals. - triu : Upper triangle of an array. - tril : Lower triange of an array. - - Examples - -------- - >>> x = np.arange(9).reshape((3,3)) - >>> x - array([[0, 1, 2], - [3, 4, 5], - [6, 7, 8]]) - - >>> np.diag(x) - array([0, 4, 8]) - >>> np.diag(x, k=1) - array([1, 5]) - >>> np.diag(x, k=-1) - array([3, 7]) - - >>> np.diag(np.diag(x)) - array([[0, 0, 0], - [0, 4, 0], - [0, 0, 8]]) - - """ - v = asarray(v) - s = v.shape - if len(s) == 1: - n = s[0]+abs(k) - res = zeros((n,n), v.dtype) - if k >= 0: - i = k - else: - i = (-k) * n - res[:n-k].flat[i::n+1] = v - return res - elif len(s) == 2: - if k >= s[1]: - return empty(0, dtype=v.dtype) - if v.flags.f_contiguous: - # faster slicing - v, k, s = v.T, -k, s[::-1] - if k >= 0: - i = k - else: - i = (-k) * s[1] - return v[:s[1]-k].flat[i::s[1]+1] - else: - raise ValueError("Input must be 1- or 2-d.") - -def diagflat(v, k=0): - """ - Create a two-dimensional array with the flattened input as a diagonal. - - Parameters - ---------- - v : array_like - Input data, which is flattened and set as the `k`-th - diagonal of the output. - k : int, optional - Diagonal to set; 0, the default, corresponds to the "main" diagonal, - a positive (negative) `k` giving the number of the diagonal above - (below) the main. - - Returns - ------- - out : ndarray - The 2-D output array. - - See Also - -------- - diag : MATLAB work-alike for 1-D and 2-D arrays. - diagonal : Return specified diagonals. - trace : Sum along diagonals. - - Examples - -------- - >>> np.diagflat([[1,2], [3,4]]) - array([[1, 0, 0, 0], - [0, 2, 0, 0], - [0, 0, 3, 0], - [0, 0, 0, 4]]) - - >>> np.diagflat([1,2], 1) - array([[0, 1, 0], - [0, 0, 2], - [0, 0, 0]]) - - """ - try: - wrap = v.__array_wrap__ - except AttributeError: - wrap = None - v = asarray(v).ravel() - s = len(v) - n = s + abs(k) - res = zeros((n,n), v.dtype) - if (k >= 0): - i = arange(0,n-k) - fi = i+k+i*n - else: - i = arange(0,n+k) - fi = i+(i-k)*n - res.flat[fi] = v - if not wrap: - return res - return wrap(res) - -def tri(N, M=None, k=0, dtype=float): - """ - An array with ones at and below the given diagonal and zeros elsewhere. - - Parameters - ---------- - N : int - Number of rows in the array. - M : int, optional - Number of columns in the array. - By default, `M` is taken equal to `N`. - k : int, optional - The sub-diagonal at and below which the array is filled. - `k` = 0 is the main diagonal, while `k` < 0 is below it, - and `k` > 0 is above. The default is 0. - dtype : dtype, optional - Data type of the returned array. The default is float. - - Returns - ------- - T : ndarray of shape (N, M) - Array with its lower triangle filled with ones and zero elsewhere; - in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise. - - Examples - -------- - >>> np.tri(3, 5, 2, dtype=int) - array([[1, 1, 1, 0, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 1]]) - - >>> np.tri(3, 5, -1) - array([[ 0., 0., 0., 0., 0.], - [ 1., 0., 0., 0., 0.], - [ 1., 1., 0., 0., 0.]]) - - """ - if M is None: - M = N - m = greater_equal(subtract.outer(arange(N), arange(M)),-k) - return m.astype(dtype) - -def tril(m, k=0): - """ - Lower triangle of an array. - - Return a copy of an array with elements above the `k`-th diagonal zeroed. - - Parameters - ---------- - m : array_like, shape (M, N) - Input array. - k : int, optional - Diagonal above which to zero elements. `k = 0` (the default) is the - main diagonal, `k < 0` is below it and `k > 0` is above. - - Returns - ------- - L : ndarray, shape (M, N) - Lower triangle of `m`, of same shape and data-type as `m`. - - See Also - -------- - triu : same thing, only for the upper triangle - - Examples - -------- - >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) - array([[ 0, 0, 0], - [ 4, 0, 0], - [ 7, 8, 0], - [10, 11, 12]]) - - """ - m = asanyarray(m) - out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype),m) - return out - -def triu(m, k=0): - """ - Upper triangle of an array. - - Return a copy of a matrix with the elements below the `k`-th diagonal - zeroed. - - Please refer to the documentation for `tril` for further details. - - See Also - -------- - tril : lower triangle of an array - - Examples - -------- - >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) - array([[ 1, 2, 3], - [ 4, 5, 6], - [ 0, 8, 9], - [ 0, 0, 12]]) - - """ - m = asanyarray(m) - out = multiply((1 - tri(m.shape[0], m.shape[1], k - 1, dtype=m.dtype)), m) - return out - -# borrowed from John Hunter and matplotlib -def vander(x, N=None): - """ - Generate a Van der Monde matrix. - - The columns of the output matrix are decreasing powers of the input - vector. Specifically, the `i`-th output column is the input vector - raised element-wise to the power of ``N - i - 1``. Such a matrix with - a geometric progression in each row is named for Alexandre-Theophile - Vandermonde. - - Parameters - ---------- - x : array_like - 1-D input array. - N : int, optional - Order of (number of columns in) the output. If `N` is not specified, - a square array is returned (``N = len(x)``). - - Returns - ------- - out : ndarray - Van der Monde matrix of order `N`. The first column is ``x^(N-1)``, - the second ``x^(N-2)`` and so forth. - - Examples - -------- - >>> x = np.array([1, 2, 3, 5]) - >>> N = 3 - >>> np.vander(x, N) - array([[ 1, 1, 1], - [ 4, 2, 1], - [ 9, 3, 1], - [25, 5, 1]]) - - >>> np.column_stack([x**(N-1-i) for i in range(N)]) - array([[ 1, 1, 1], - [ 4, 2, 1], - [ 9, 3, 1], - [25, 5, 1]]) - - >>> x = np.array([1, 2, 3, 5]) - >>> np.vander(x) - array([[ 1, 1, 1, 1], - [ 8, 4, 2, 1], - [ 27, 9, 3, 1], - [125, 25, 5, 1]]) - - The determinant of a square Vandermonde matrix is the product - of the differences between the values of the input vector: - - >>> np.linalg.det(np.vander(x)) - 48.000000000000043 - >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) - 48 - - """ - x = asarray(x) - if N is None: - N=len(x) - X = ones( (len(x),N), x.dtype) - for i in range(N - 1): - X[:,i] = x**(N - i - 1) - return X - - -def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): - """ - Compute the bi-dimensional histogram of two data samples. - - Parameters - ---------- - x : array_like, shape(N,) - A sequence of values to be histogrammed along the first dimension. - y : array_like, shape(M,) - A sequence of values to be histogrammed along the second dimension. - bins : int or [int, int] or array_like or [array, array], optional - The bin specification: - - * If int, the number of bins for the two dimensions (nx=ny=bins). - * If [int, int], the number of bins in each dimension (nx, ny = bins). - * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins). - * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins). - - range : array_like, shape(2,2), optional - The leftmost and rightmost edges of the bins along each dimension - (if not specified explicitly in the `bins` parameters): - ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range - will be considered outliers and not tallied in the histogram. - normed : bool, optional - If False, returns the number of samples in each bin. If True, returns - the bin density, i.e. the bin count divided by the bin area. - weights : array_like, shape(N,), optional - An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights - are normalized to 1 if `normed` is True. If `normed` is False, the - values of the returned histogram are equal to the sum of the weights - belonging to the samples falling into each bin. - - Returns - ------- - H : ndarray, shape(nx, ny) - The bi-dimensional histogram of samples `x` and `y`. Values in `x` - are histogrammed along the first dimension and values in `y` are - histogrammed along the second dimension. - xedges : ndarray, shape(nx,) - The bin edges along the first dimension. - yedges : ndarray, shape(ny,) - The bin edges along the second dimension. - - See Also - -------- - histogram: 1D histogram - histogramdd: Multidimensional histogram - - Notes - ----- - When `normed` is True, then the returned histogram is the sample density, - defined such that: - - .. math:: - \\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1 - - where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i` - the area of bin `{i,j}`. - - Please note that the histogram does not follow the Cartesian convention - where `x` values are on the abcissa and `y` values on the ordinate axis. - Rather, `x` is histogrammed along the first dimension of the array - (vertical), and `y` along the second dimension of the array (horizontal). - This ensures compatibility with `histogramdd`. - - Examples - -------- - >>> x, y = np.random.randn(2, 100) - >>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8)) - >>> H.shape, xedges.shape, yedges.shape - ((5, 8), (6,), (9,)) - - We can now use the Matplotlib to visualize this 2-dimensional histogram: - - >>> extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]] - >>> import matplotlib.pyplot as plt - >>> plt.imshow(H, extent=extent, interpolation='nearest') - - >>> plt.colorbar() - - >>> plt.show() - - """ - from numpy import histogramdd - - try: - N = len(bins) - except TypeError: - N = 1 - - if N != 1 and N != 2: - xedges = yedges = asarray(bins, float) - bins = [xedges, yedges] - hist, edges = histogramdd([x,y], bins, range, normed, weights) - return hist, edges[0], edges[1] - - -def mask_indices(n, mask_func, k=0): - """ - Return the indices to access (n, n) arrays, given a masking function. - - Assume `mask_func` is a function that, for a square array a of size - ``(n, n)`` with a possible offset argument `k`, when called as - ``mask_func(a, k)`` returns a new array with zeros in certain locations - (functions like `triu` or `tril` do precisely this). Then this function - returns the indices where the non-zero values would be located. - - Parameters - ---------- - n : int - The returned indices will be valid to access arrays of shape (n, n). - mask_func : callable - A function whose call signature is similar to that of `triu`, `tril`. - That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. - `k` is an optional argument to the function. - k : scalar - An optional argument which is passed through to `mask_func`. Functions - like `triu`, `tril` take a second argument that is interpreted as an - offset. - - Returns - ------- - indices : tuple of arrays. - The `n` arrays of indices corresponding to the locations where - ``mask_func(np.ones((n, n)), k)`` is True. - - See Also - -------- - triu, tril, triu_indices, tril_indices - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - These are the indices that would allow you to access the upper triangular - part of any 3x3 array: - - >>> iu = np.mask_indices(3, np.triu) - - For example, if `a` is a 3x3 array: - - >>> a = np.arange(9).reshape(3, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5], - [6, 7, 8]]) - >>> a[iu] - array([0, 1, 2, 4, 5, 8]) - - An offset can be passed also to the masking function. This gets us the - indices starting on the first diagonal right of the main one: - - >>> iu1 = np.mask_indices(3, np.triu, 1) - - with which we now extract only three elements: - - >>> a[iu1] - array([1, 2, 5]) - - """ - m = ones((n,n), int) - a = mask_func(m, k) - return where(a != 0) - - -def tril_indices(n, k=0): - """ - Return the indices for the lower-triangle of an (n, n) array. - - Parameters - ---------- - n : int - The row dimension of the square arrays for which the returned - indices will be valid. - k : int, optional - Diagonal offset (see `tril` for details). - - Returns - ------- - inds : tuple of arrays - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. - - See also - -------- - triu_indices : similar function, for upper-triangular. - mask_indices : generic function accepting an arbitrary mask function. - tril, triu - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Compute two different sets of indices to access 4x4 arrays, one for the - lower triangular part starting at the main diagonal, and one starting two - diagonals further right: - - >>> il1 = np.tril_indices(4) - >>> il2 = np.tril_indices(4, 2) - - Here is how they can be used with a sample array: - - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - - Both for indexing: - - >>> a[il1] - array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) - - And for assigning values: - - >>> a[il1] = -1 - >>> a - array([[-1, 1, 2, 3], - [-1, -1, 6, 7], - [-1, -1, -1, 11], - [-1, -1, -1, -1]]) - - These cover almost the whole array (two diagonals right of the main one): - - >>> a[il2] = -10 - >>> a - array([[-10, -10, -10, 3], - [-10, -10, -10, -10], - [-10, -10, -10, -10], - [-10, -10, -10, -10]]) - - """ - return mask_indices(n, tril, k) - - -def tril_indices_from(arr, k=0): - """ - Return the indices for the lower-triangle of arr. - - See `tril_indices` for full details. - - Parameters - ---------- - arr : array_like - The indices will be valid for square arrays whose dimensions are - the same as arr. - k : int, optional - Diagonal offset (see `tril` for details). - - See Also - -------- - tril_indices, tril - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]): - raise ValueError("input array must be 2-d and square") - return tril_indices(arr.shape[0], k) - - -def triu_indices(n, k=0): - """ - Return the indices for the upper-triangle of an (n, n) array. - - Parameters - ---------- - n : int - The size of the arrays for which the returned indices will - be valid. - k : int, optional - Diagonal offset (see `triu` for details). - - Returns - ------- - inds : tuple of arrays - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. - - See also - -------- - tril_indices : similar function, for lower-triangular. - mask_indices : generic function accepting an arbitrary mask function. - triu, tril - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Compute two different sets of indices to access 4x4 arrays, one for the - upper triangular part starting at the main diagonal, and one starting two - diagonals further right: - - >>> iu1 = np.triu_indices(4) - >>> iu2 = np.triu_indices(4, 2) - - Here is how they can be used with a sample array: - - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - - Both for indexing: - - >>> a[iu1] - array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) - - And for assigning values: - - >>> a[iu1] = -1 - >>> a - array([[-1, -1, -1, -1], - [ 4, -1, -1, -1], - [ 8, 9, -1, -1], - [12, 13, 14, -1]]) - - These cover only a small part of the whole array (two diagonals right - of the main one): - - >>> a[iu2] = -10 - >>> a - array([[ -1, -1, -10, -10], - [ 4, -1, -1, -10], - [ 8, 9, -1, -1], - [ 12, 13, 14, -1]]) - - """ - return mask_indices(n, triu, k) - - -def triu_indices_from(arr, k=0): - """ - Return the indices for the upper-triangle of an (n, n) array. - - See `triu_indices` for full details. - - Parameters - ---------- - arr : array_like - The indices will be valid for square arrays whose dimensions are - the same as arr. - k : int, optional - Diagonal offset (see `triu` for details). - - See Also - -------- - triu_indices, triu - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]): - raise ValueError("input array must be 2-d and square") - return triu_indices(arr.shape[0],k) - diff --git a/numpy-1.6.2/numpy/lib/type_check.py b/numpy-1.6.2/numpy/lib/type_check.py deleted file mode 100644 index 24701574ae..0000000000 --- a/numpy-1.6.2/numpy/lib/type_check.py +++ /dev/null @@ -1,648 +0,0 @@ -## Automatically adapted for numpy Sep 19, 2005 by convertcode.py - -__all__ = ['iscomplexobj','isrealobj','imag','iscomplex', - 'isreal','nan_to_num','real','real_if_close', - 'typename','asfarray','mintypecode','asscalar', - 'common_type', 'datetime_data'] - -import numpy.core.numeric as _nx -from numpy.core.numeric import asarray, asanyarray, array, isnan, \ - obj2sctype, zeros -from numpy.core.multiarray import METADATA_DTSTR -from ufunclike import isneginf, isposinf - -_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' - -def mintypecode(typechars,typeset='GDFgdf',default='d'): - """ - Return the character for the minimum-size type to which given types can - be safely cast. - - The returned type character must represent the smallest size dtype such - that an array of the returned type can handle the data from an array of - all types in `typechars` (or if `typechars` is an array, then its - dtype.char). - - Parameters - ---------- - typechars : list of str or array_like - If a list of strings, each string should represent a dtype. - If array_like, the character representation of the array dtype is used. - typeset : str or list of str, optional - The set of characters that the returned character is chosen from. - The default set is 'GDFgdf'. - default : str, optional - The default character, this is returned if none of the characters in - `typechars` matches a character in `typeset`. - - Returns - ------- - typechar : str - The character representing the minimum-size type that was found. - - See Also - -------- - dtype, sctype2char, maximum_sctype - - Examples - -------- - >>> np.mintypecode(['d', 'f', 'S']) - 'd' - >>> x = np.array([1.1, 2-3.j]) - >>> np.mintypecode(x) - 'D' - - >>> np.mintypecode('abceh', default='G') - 'G' - - """ - typecodes = [(type(t) is type('') and t) or asarray(t).dtype.char\ - for t in typechars] - intersection = [t for t in typecodes if t in typeset] - if not intersection: - return default - if 'F' in intersection and 'd' in intersection: - return 'D' - l = [] - for t in intersection: - i = _typecodes_by_elsize.index(t) - l.append((i,t)) - l.sort() - return l[0][1] - -def asfarray(a, dtype=_nx.float_): - """ - Return an array converted to a float type. - - Parameters - ---------- - a : array_like - The input array. - dtype : str or dtype object, optional - Float type code to coerce input array `a`. If `dtype` is one of the - 'int' dtypes, it is replaced with float64. - - Returns - ------- - out : ndarray - The input `a` as a float ndarray. - - Examples - -------- - >>> np.asfarray([2, 3]) - array([ 2., 3.]) - >>> np.asfarray([2, 3], dtype='float') - array([ 2., 3.]) - >>> np.asfarray([2, 3], dtype='int8') - array([ 2., 3.]) - - """ - dtype = _nx.obj2sctype(dtype) - if not issubclass(dtype, _nx.inexact): - dtype = _nx.float_ - return asarray(a,dtype=dtype) - -def real(val): - """ - Return the real part of the elements of the array. - - Parameters - ---------- - val : array_like - Input array. - - Returns - ------- - out : ndarray - Output array. If `val` is real, the type of `val` is used for the - output. If `val` has complex elements, the returned type is float. - - See Also - -------- - real_if_close, imag, angle - - Examples - -------- - >>> a = np.array([1+2j, 3+4j, 5+6j]) - >>> a.real - array([ 1., 3., 5.]) - >>> a.real = 9 - >>> a - array([ 9.+2.j, 9.+4.j, 9.+6.j]) - >>> a.real = np.array([9, 8, 7]) - >>> a - array([ 9.+2.j, 8.+4.j, 7.+6.j]) - - """ - return asanyarray(val).real - -def imag(val): - """ - Return the imaginary part of the elements of the array. - - Parameters - ---------- - val : array_like - Input array. - - Returns - ------- - out : ndarray - Output array. If `val` is real, the type of `val` is used for the - output. If `val` has complex elements, the returned type is float. - - See Also - -------- - real, angle, real_if_close - - Examples - -------- - >>> a = np.array([1+2j, 3+4j, 5+6j]) - >>> a.imag - array([ 2., 4., 6.]) - >>> a.imag = np.array([8, 10, 12]) - >>> a - array([ 1. +8.j, 3.+10.j, 5.+12.j]) - - """ - return asanyarray(val).imag - -def iscomplex(x): - """ - Returns a bool array, where True if input element is complex. - - What is tested is whether the input has a non-zero imaginary part, not if - the input type is complex. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray of bools - Output array. - - See Also - -------- - isreal - iscomplexobj : Return True if x is a complex type or an array of complex - numbers. - - Examples - -------- - >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) - array([ True, False, False, False, False, True], dtype=bool) - - """ - ax = asanyarray(x) - if issubclass(ax.dtype.type, _nx.complexfloating): - return ax.imag != 0 - res = zeros(ax.shape, bool) - return +res # convet to array-scalar if needed - -def isreal(x): - """ - Returns a bool array, where True if input element is real. - - If element has complex type with zero complex part, the return value - for that element is True. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray, bool - Boolean array of same shape as `x`. - - See Also - -------- - iscomplex - isrealobj : Return True if x is not a complex type. - - Examples - -------- - >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) - array([False, True, True, True, True, False], dtype=bool) - - """ - return imag(x) == 0 - -def iscomplexobj(x): - """ - Return True if x is a complex type or an array of complex numbers. - - The type of the input is checked, not the value. So even if the input - has an imaginary part equal to zero, `iscomplexobj` evaluates to True - if the data type is complex. - - Parameters - ---------- - x : any - The input can be of any type and shape. - - Returns - ------- - y : bool - The return value, True if `x` is of a complex type. - - See Also - -------- - isrealobj, iscomplex - - Examples - -------- - >>> np.iscomplexobj(1) - False - >>> np.iscomplexobj(1+0j) - True - >>> np.iscomplexobj([3, 1+0j, True]) - True - - """ - return issubclass( asarray(x).dtype.type, _nx.complexfloating) - -def isrealobj(x): - """ - Return True if x is a not complex type or an array of complex numbers. - - The type of the input is checked, not the value. So even if the input - has an imaginary part equal to zero, `isrealobj` evaluates to False - if the data type is complex. - - Parameters - ---------- - x : any - The input can be of any type and shape. - - Returns - ------- - y : bool - The return value, False if `x` is of a complex type. - - See Also - -------- - iscomplexobj, isreal - - Examples - -------- - >>> np.isrealobj(1) - True - >>> np.isrealobj(1+0j) - False - >>> np.isrealobj([3, 1+0j, True]) - False - - """ - return not issubclass( asarray(x).dtype.type, _nx.complexfloating) - -#----------------------------------------------------------------------------- - -def _getmaxmin(t): - from numpy.core import getlimits - f = getlimits.finfo(t) - return f.max, f.min - -def nan_to_num(x): - """ - Replace nan with zero and inf with finite numbers. - - Returns an array or scalar replacing Not a Number (NaN) with zero, - (positive) infinity with a very large number and negative infinity - with a very small (or negative) number. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - out : ndarray, float - Array with the same shape as `x` and dtype of the element in `x` with - the greatest precision. NaN is replaced by zero, and infinity - (-infinity) is replaced by the largest (smallest or most negative) - floating point value that fits in the output dtype. All finite numbers - are upcast to the output dtype (default float64). - - See Also - -------- - isinf : Shows which elements are negative or negative infinity. - isneginf : Shows which elements are negative infinity. - isposinf : Shows which elements are positive infinity. - isnan : Shows which elements are Not a Number (NaN). - isfinite : Shows which elements are finite (not NaN, not infinity) - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - - Examples - -------- - >>> np.set_printoptions(precision=8) - >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) - >>> np.nan_to_num(x) - array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, - -1.28000000e+002, 1.28000000e+002]) - - """ - try: - t = x.dtype.type - except AttributeError: - t = obj2sctype(type(x)) - if issubclass(t, _nx.complexfloating): - return nan_to_num(x.real) + 1j * nan_to_num(x.imag) - else: - try: - y = x.copy() - except AttributeError: - y = array(x) - if not issubclass(t, _nx.integer): - if not y.shape: - y = array([x]) - scalar = True - else: - scalar = False - are_inf = isposinf(y) - are_neg_inf = isneginf(y) - are_nan = isnan(y) - maxf, minf = _getmaxmin(y.dtype.type) - y[are_nan] = 0 - y[are_inf] = maxf - y[are_neg_inf] = minf - if scalar: - y = y[0] - return y - -#----------------------------------------------------------------------------- - -def real_if_close(a,tol=100): - """ - If complex input returns a real array if complex parts are close to zero. - - "Close to zero" is defined as `tol` * (machine epsilon of the type for - `a`). - - Parameters - ---------- - a : array_like - Input array. - tol : float - Tolerance in machine epsilons for the complex part of the elements - in the array. - - Returns - ------- - out : ndarray - If `a` is real, the type of `a` is used for the output. If `a` - has complex elements, the returned type is float. - - See Also - -------- - real, imag, angle - - Notes - ----- - Machine epsilon varies from machine to machine and between data types - but Python floats on most platforms have a machine epsilon equal to - 2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print - out the machine epsilon for floats. - - Examples - -------- - >>> np.finfo(np.float).eps - 2.2204460492503131e-16 - - >>> np.real_if_close([2.1 + 4e-14j], tol=1000) - array([ 2.1]) - >>> np.real_if_close([2.1 + 4e-13j], tol=1000) - array([ 2.1 +4.00000000e-13j]) - - """ - a = asanyarray(a) - if not issubclass(a.dtype.type, _nx.complexfloating): - return a - if tol > 1: - from numpy.core import getlimits - f = getlimits.finfo(a.dtype.type) - tol = f.eps * tol - if _nx.allclose(a.imag, 0, atol=tol): - a = a.real - return a - - -def asscalar(a): - """ - Convert an array of size 1 to its scalar equivalent. - - Parameters - ---------- - a : ndarray - Input array of size 1. - - Returns - ------- - out : scalar - Scalar representation of `a`. The input data type is preserved. - - Examples - -------- - >>> np.asscalar(np.array([24])) - 24 - - """ - return a.item() - -#----------------------------------------------------------------------------- - -_namefromtype = {'S1' : 'character', - '?' : 'bool', - 'b' : 'signed char', - 'B' : 'unsigned char', - 'h' : 'short', - 'H' : 'unsigned short', - 'i' : 'integer', - 'I' : 'unsigned integer', - 'l' : 'long integer', - 'L' : 'unsigned long integer', - 'q' : 'long long integer', - 'Q' : 'unsigned long long integer', - 'f' : 'single precision', - 'd' : 'double precision', - 'g' : 'long precision', - 'F' : 'complex single precision', - 'D' : 'complex double precision', - 'G' : 'complex long double precision', - 'S' : 'string', - 'U' : 'unicode', - 'V' : 'void', - 'O' : 'object' - } - -def typename(char): - """ - Return a description for the given data type code. - - Parameters - ---------- - char : str - Data type code. - - Returns - ------- - out : str - Description of the input data type code. - - See Also - -------- - dtype, typecodes - - Examples - -------- - >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', - ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] - >>> for typechar in typechars: - ... print typechar, ' : ', np.typename(typechar) - ... - S1 : character - ? : bool - B : unsigned char - D : complex double precision - G : complex long double precision - F : complex single precision - I : unsigned integer - H : unsigned short - L : unsigned long integer - O : object - Q : unsigned long long integer - S : string - U : unicode - V : void - b : signed char - d : double precision - g : long precision - f : single precision - i : integer - h : short - l : long integer - q : long long integer - - """ - return _namefromtype[char] - -#----------------------------------------------------------------------------- - -#determine the "minimum common type" for a group of arrays. -array_type = [[_nx.single, _nx.double, _nx.longdouble], - [_nx.csingle, _nx.cdouble, _nx.clongdouble]] -array_precision = {_nx.single : 0, - _nx.double : 1, - _nx.longdouble : 2, - _nx.csingle : 0, - _nx.cdouble : 1, - _nx.clongdouble : 2} -def common_type(*arrays): - """ - Return a scalar type which is common to the input arrays. - - The return type will always be an inexact (i.e. floating point) scalar - type, even if all the arrays are integer arrays. If one of the inputs is - an integer array, the minimum precision type that is returned is a - 64-bit floating point dtype. - - All input arrays can be safely cast to the returned dtype without loss - of information. - - Parameters - ---------- - array1, array2, ... : ndarrays - Input arrays. - - Returns - ------- - out : data type code - Data type code. - - See Also - -------- - dtype, mintypecode - - Examples - -------- - >>> np.common_type(np.arange(2, dtype=np.float32)) - - >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) - - >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) - - - """ - is_complex = False - precision = 0 - for a in arrays: - t = a.dtype.type - if iscomplexobj(a): - is_complex = True - if issubclass(t, _nx.integer): - p = 1 - else: - p = array_precision.get(t, None) - if p is None: - raise TypeError("can't get common type for non-numeric array") - precision = max(precision, p) - if is_complex: - return array_type[1][precision] - else: - return array_type[0][precision] - -def datetime_data(dtype): - """Return (unit, numerator, denominator, events) from a datetime dtype - """ - try: - import ctypes - except ImportError: - raise RuntimeError, "Cannot access date-time internals without ctypes installed" - - if dtype.kind not in ['m','M']: - raise ValueError, "Not a date-time dtype" - - obj = dtype.metadata[METADATA_DTSTR] - class DATETIMEMETA(ctypes.Structure): - _fields_ = [('base', ctypes.c_int), - ('num', ctypes.c_int), - ('den', ctypes.c_int), - ('events', ctypes.c_int)] - - import sys - if sys.version_info[:2] >= (3, 0): - func = ctypes.pythonapi.PyCapsule_GetPointer - func.argtypes = [ctypes.py_object, ctypes.c_char_p] - func.restype = ctypes.c_void_p - result = func(ctypes.py_object(obj), ctypes.c_char_p(None)) - else: - func = ctypes.pythonapi.PyCObject_AsVoidPtr - func.argtypes = [ctypes.py_object] - func.restype = ctypes.c_void_p - result = func(ctypes.py_object(obj)) - result = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(DATETIMEMETA)) - - struct = result[0] - base = struct.base - - # FIXME: This needs to be kept consistent with enum in ndarrayobject.h - from numpy.core.multiarray import DATETIMEUNITS - obj = ctypes.py_object(DATETIMEUNITS) - if sys.version_info[:2] >= (2,7): - result = func(obj, ctypes.c_char_p(None)) - else: - result = func(obj) - _unitnum2name = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(ctypes.c_char_p)) - - return (_unitnum2name[base], struct.num, struct.den, struct.events) - diff --git a/numpy-1.6.2/numpy/lib/ufunclike.py b/numpy-1.6.2/numpy/lib/ufunclike.py deleted file mode 100644 index b51365f41a..0000000000 --- a/numpy-1.6.2/numpy/lib/ufunclike.py +++ /dev/null @@ -1,174 +0,0 @@ -""" -Module of functions that are like ufuncs in acting on arrays and optionally -storing results in an output array. -""" -__all__ = ['fix', 'isneginf', 'isposinf'] - -import numpy.core.numeric as nx - -def fix(x, y=None): - """ - Round to nearest integer towards zero. - - Round an array of floats element-wise to nearest integer towards zero. - The rounded values are returned as floats. - - Parameters - ---------- - x : array_like - An array of floats to be rounded - y : ndarray, optional - Output array - - Returns - ------- - out : ndarray of floats - The array of rounded numbers - - See Also - -------- - trunc, floor, ceil - around : Round to given number of decimals - - Examples - -------- - >>> np.fix(3.14) - 3.0 - >>> np.fix(3) - 3.0 - >>> np.fix([2.1, 2.9, -2.1, -2.9]) - array([ 2., 2., -2., -2.]) - - """ - x = nx.asanyarray(x) - y1 = nx.floor(x) - y2 = nx.ceil(x) - if y is None: - y = nx.asanyarray(y1) - y[...] = nx.where(x >= 0, y1, y2) - return y - -def isposinf(x, y=None): - """ - Test element-wise for positive infinity, return result as bool array. - - Parameters - ---------- - x : array_like - The input array. - y : array_like, optional - A boolean array with the same shape as `x` to store the result. - - Returns - ------- - y : ndarray - A boolean array with the same dimensions as the input. - If second argument is not supplied then a boolean array is returned - with values True where the corresponding element of the input is - positive infinity and values False where the element of the input is - not positive infinity. - - If a second argument is supplied the result is stored there. If the - type of that array is a numeric type the result is represented as zeros - and ones, if the type is boolean then as False and True. - The return value `y` is then a reference to that array. - - See Also - -------- - isinf, isneginf, isfinite, isnan - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). - - Errors result if the second argument is also supplied when `x` is a - scalar input, or if first and second arguments have different shapes. - - Examples - -------- - >>> np.isposinf(np.PINF) - array(True, dtype=bool) - >>> np.isposinf(np.inf) - array(True, dtype=bool) - >>> np.isposinf(np.NINF) - array(False, dtype=bool) - >>> np.isposinf([-np.inf, 0., np.inf]) - array([False, False, True], dtype=bool) - - >>> x = np.array([-np.inf, 0., np.inf]) - >>> y = np.array([2, 2, 2]) - >>> np.isposinf(x, y) - array([0, 0, 1]) - >>> y - array([0, 0, 1]) - - """ - if y is None: - x = nx.asarray(x) - y = nx.empty(x.shape, dtype=nx.bool_) - nx.logical_and(nx.isinf(x), ~nx.signbit(x), y) - return y - -def isneginf(x, y=None): - """ - Test element-wise for negative infinity, return result as bool array. - - Parameters - ---------- - x : array_like - The input array. - y : array_like, optional - A boolean array with the same shape and type as `x` to store the - result. - - Returns - ------- - y : ndarray - A boolean array with the same dimensions as the input. - If second argument is not supplied then a numpy boolean array is - returned with values True where the corresponding element of the - input is negative infinity and values False where the element of - the input is not negative infinity. - - If a second argument is supplied the result is stored there. If the - type of that array is a numeric type the result is represented as - zeros and ones, if the type is boolean then as False and True. The - return value `y` is then a reference to that array. - - See Also - -------- - isinf, isposinf, isnan, isfinite - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). - - Errors result if the second argument is also supplied when x is a scalar - input, or if first and second arguments have different shapes. - - Examples - -------- - >>> np.isneginf(np.NINF) - array(True, dtype=bool) - >>> np.isneginf(np.inf) - array(False, dtype=bool) - >>> np.isneginf(np.PINF) - array(False, dtype=bool) - >>> np.isneginf([-np.inf, 0., np.inf]) - array([ True, False, False], dtype=bool) - - >>> x = np.array([-np.inf, 0., np.inf]) - >>> y = np.array([2, 2, 2]) - >>> np.isneginf(x, y) - array([1, 0, 0]) - >>> y - array([1, 0, 0]) - - """ - if y is None: - x = nx.asarray(x) - y = nx.empty(x.shape, dtype=nx.bool_) - nx.logical_and(nx.isinf(x), nx.signbit(x), y) - return y diff --git a/numpy-1.6.2/numpy/lib/user_array.py b/numpy-1.6.2/numpy/lib/user_array.py deleted file mode 100644 index 43e9da3f2e..0000000000 --- a/numpy-1.6.2/numpy/lib/user_array.py +++ /dev/null @@ -1,217 +0,0 @@ -""" -Standard container-class for easy multiple-inheritance. -Try to inherit from the ndarray instead of using this class as this is not -complete. -""" - -from numpy.core import array, asarray, absolute, add, subtract, multiply, \ - divide, remainder, power, left_shift, right_shift, bitwise_and, \ - bitwise_or, bitwise_xor, invert, less, less_equal, not_equal, equal, \ - greater, greater_equal, shape, reshape, arange, sin, sqrt, transpose - -class container(object): - def __init__(self, data, dtype=None, copy=True): - self.array = array(data, dtype, copy=copy) - - def __repr__(self): - if len(self.shape) > 0: - return self.__class__.__name__+repr(self.array)[len("array"):] - else: - return self.__class__.__name__+"("+repr(self.array)+")" - - def __array__(self,t=None): - if t: return self.array.astype(t) - return self.array - - # Array as sequence - def __len__(self): return len(self.array) - - def __getitem__(self, index): - return self._rc(self.array[index]) - - def __getslice__(self, i, j): - return self._rc(self.array[i:j]) - - - def __setitem__(self, index, value): - self.array[index] = asarray(value,self.dtype) - def __setslice__(self, i, j, value): - self.array[i:j] = asarray(value,self.dtype) - - def __abs__(self): - return self._rc(absolute(self.array)) - def __neg__(self): - return self._rc(-self.array) - - def __add__(self, other): - return self._rc(self.array+asarray(other)) - __radd__ = __add__ - - def __iadd__(self, other): - add(self.array, other, self.array) - return self - - def __sub__(self, other): - return self._rc(self.array-asarray(other)) - def __rsub__(self, other): - return self._rc(asarray(other)-self.array) - def __isub__(self, other): - subtract(self.array, other, self.array) - return self - - def __mul__(self, other): - return self._rc(multiply(self.array,asarray(other))) - __rmul__ = __mul__ - def __imul__(self, other): - multiply(self.array, other, self.array) - return self - - def __div__(self, other): - return self._rc(divide(self.array,asarray(other))) - def __rdiv__(self, other): - return self._rc(divide(asarray(other),self.array)) - def __idiv__(self, other): - divide(self.array, other, self.array) - return self - - def __mod__(self, other): - return self._rc(remainder(self.array, other)) - def __rmod__(self, other): - return self._rc(remainder(other, self.array)) - def __imod__(self, other): - remainder(self.array, other, self.array) - return self - - def __divmod__(self, other): - return (self._rc(divide(self.array,other)), - self._rc(remainder(self.array, other))) - def __rdivmod__(self, other): - return (self._rc(divide(other, self.array)), - self._rc(remainder(other, self.array))) - - def __pow__(self,other): - return self._rc(power(self.array,asarray(other))) - def __rpow__(self,other): - return self._rc(power(asarray(other),self.array)) - def __ipow__(self,other): - power(self.array, other, self.array) - return self - - def __lshift__(self,other): - return self._rc(left_shift(self.array, other)) - def __rshift__(self,other): - return self._rc(right_shift(self.array, other)) - def __rlshift__(self,other): - return self._rc(left_shift(other, self.array)) - def __rrshift__(self,other): - return self._rc(right_shift(other, self.array)) - def __ilshift__(self,other): - left_shift(self.array, other, self.array) - return self - def __irshift__(self,other): - right_shift(self.array, other, self.array) - return self - - def __and__(self, other): - return self._rc(bitwise_and(self.array, other)) - def __rand__(self, other): - return self._rc(bitwise_and(other, self.array)) - def __iand__(self, other): - bitwise_and(self.array, other, self.array) - return self - - def __xor__(self, other): - return self._rc(bitwise_xor(self.array, other)) - def __rxor__(self, other): - return self._rc(bitwise_xor(other, self.array)) - def __ixor__(self, other): - bitwise_xor(self.array, other, self.array) - return self - - def __or__(self, other): - return self._rc(bitwise_or(self.array, other)) - def __ror__(self, other): - return self._rc(bitwise_or(other, self.array)) - def __ior__(self, other): - bitwise_or(self.array, other, self.array) - return self - - def __neg__(self): - return self._rc(-self.array) - def __pos__(self): - return self._rc(self.array) - def __abs__(self): - return self._rc(abs(self.array)) - def __invert__(self): - return self._rc(invert(self.array)) - - def _scalarfunc(self, func): - if len(self.shape) == 0: - return func(self[0]) - else: - raise TypeError, "only rank-0 arrays can be converted to Python scalars." - - def __complex__(self): return self._scalarfunc(complex) - def __float__(self): return self._scalarfunc(float) - def __int__(self): return self._scalarfunc(int) - def __long__(self): return self._scalarfunc(long) - def __hex__(self): return self._scalarfunc(hex) - def __oct__(self): return self._scalarfunc(oct) - - def __lt__(self,other): return self._rc(less(self.array,other)) - def __le__(self,other): return self._rc(less_equal(self.array,other)) - def __eq__(self,other): return self._rc(equal(self.array,other)) - def __ne__(self,other): return self._rc(not_equal(self.array,other)) - def __gt__(self,other): return self._rc(greater(self.array,other)) - def __ge__(self,other): return self._rc(greater_equal(self.array,other)) - - def copy(self): return self._rc(self.array.copy()) - - def tostring(self): return self.array.tostring() - - def byteswap(self): return self._rc(self.array.byteswap()) - - def astype(self, typecode): return self._rc(self.array.astype(typecode)) - - def _rc(self, a): - if len(shape(a)) == 0: return a - else: return self.__class__(a) - - def __array_wrap__(self, *args): - return self.__class__(args[0]) - - def __setattr__(self,attr,value): - if attr == 'array': - object.__setattr__(self, attr, value) - return - try: - self.array.__setattr__(attr, value) - except AttributeError: - object.__setattr__(self, attr, value) - - # Only called after other approaches fail. - def __getattr__(self,attr): - if (attr == 'array'): - return object.__getattribute__(self, attr) - return self.array.__getattribute__(attr) - -############################################################# -# Test of class container -############################################################# -if __name__ == '__main__': - temp=reshape(arange(10000),(100,100)) - - ua=container(temp) - # new object created begin test - print dir(ua) - print shape(ua),ua.shape # I have changed Numeric.py - - ua_small=ua[:3,:5] - print ua_small - ua_small[0,0]=10 # this did not change ua[0,0], which is not normal behavior - print ua_small[0,0],ua[0,0] - print sin(ua_small)/3.*6.+sqrt(ua_small**2) - print less(ua_small,103),type(less(ua_small,103)) - print type(ua_small*reshape(arange(15),shape(ua_small))) - print reshape(ua_small,(5,3)) - print transpose(ua_small) diff --git a/numpy-1.6.2/numpy/lib/utils.py b/numpy-1.6.2/numpy/lib/utils.py deleted file mode 100644 index 1e7364adc2..0000000000 --- a/numpy-1.6.2/numpy/lib/utils.py +++ /dev/null @@ -1,1145 +0,0 @@ -import os -import sys -import types -import re - -from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype -from numpy.core import product, ndarray, ufunc - -__all__ = ['issubclass_', 'issubsctype', 'issubdtype', - 'deprecate', 'deprecate_with_doc', 'get_numarray_include', - 'get_include', 'info', 'source', 'who', 'lookfor', 'byte_bounds', - 'may_share_memory', 'safe_eval'] - -def get_include(): - """ - Return the directory that contains the NumPy \\*.h header files. - - Extension modules that need to compile against NumPy should use this - function to locate the appropriate include directory. - - Notes - ----- - When using ``distutils``, for example in ``setup.py``. - :: - - import numpy as np - ... - Extension('extension_name', ... - include_dirs=[np.get_include()]) - ... - - """ - import numpy - if numpy.show_config is None: - # running from numpy source directory - d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include') - else: - # using installed numpy core headers - import numpy.core as core - d = os.path.join(os.path.dirname(core.__file__), 'include') - return d - -def get_numarray_include(type=None): - """ - Return the directory that contains the numarray \\*.h header files. - - Extension modules that need to compile against numarray should use this - function to locate the appropriate include directory. - - Parameters - ---------- - type : any, optional - If `type` is not None, the location of the NumPy headers is returned - as well. - - Returns - ------- - dirs : str or list of str - If `type` is None, `dirs` is a string containing the path to the - numarray headers. - If `type` is not None, `dirs` is a list of strings with first the - path(s) to the numarray headers, followed by the path to the NumPy - headers. - - Notes - ----- - Useful when using ``distutils``, for example in ``setup.py``. - :: - - import numpy as np - ... - Extension('extension_name', ... - include_dirs=[np.get_numarray_include()]) - ... - - """ - from numpy.numarray import get_numarray_include_dirs - include_dirs = get_numarray_include_dirs() - if type is None: - return include_dirs[0] - else: - return include_dirs + [get_include()] - - -if sys.version_info < (2, 4): - # Can't set __name__ in 2.3 - import new - def _set_function_name(func, name): - func = new.function(func.func_code, func.func_globals, - name, func.func_defaults, func.func_closure) - return func -else: - def _set_function_name(func, name): - func.__name__ = name - return func - -class _Deprecate(object): - """ - Decorator class to deprecate old functions. - - Refer to `deprecate` for details. - - See Also - -------- - deprecate - - """ - def __init__(self, old_name=None, new_name=None, message=None): - self.old_name = old_name - self.new_name = new_name - self.message = message - - def __call__(self, func, *args, **kwargs): - """ - Decorator call. Refer to ``decorate``. - - """ - old_name = self.old_name - new_name = self.new_name - message = self.message - - import warnings - if old_name is None: - try: - old_name = func.func_name - except AttributeError: - old_name = func.__name__ - if new_name is None: - depdoc = "`%s` is deprecated!" % old_name - else: - depdoc = "`%s` is deprecated, use `%s` instead!" % \ - (old_name, new_name) - - if message is not None: - depdoc += "\n" + message - - def newfunc(*args,**kwds): - """`arrayrange` is deprecated, use `arange` instead!""" - warnings.warn(depdoc, DeprecationWarning) - return func(*args, **kwds) - - newfunc = _set_function_name(newfunc, old_name) - doc = func.__doc__ - if doc is None: - doc = depdoc - else: - doc = '\n\n'.join([depdoc, doc]) - newfunc.__doc__ = doc - try: - d = func.__dict__ - except AttributeError: - pass - else: - newfunc.__dict__.update(d) - return newfunc - -def deprecate(*args, **kwargs): - """ - Issues a DeprecationWarning, adds warning to `old_name`'s - docstring, rebinds ``old_name.__name__`` and returns the new - function object. - - This function may also be used as a decorator. - - Parameters - ---------- - func : function - The function to be deprecated. - old_name : str, optional - The name of the function to be deprecated. Default is None, in which - case the name of `func` is used. - new_name : str, optional - The new name for the function. Default is None, in which case - the deprecation message is that `old_name` is deprecated. If given, - the deprecation message is that `old_name` is deprecated and `new_name` - should be used instead. - message : str, optional - Additional explanation of the deprecation. Displayed in the docstring - after the warning. - - Returns - ------- - old_func : function - The deprecated function. - - Examples - -------- - Note that ``olduint`` returns a value after printing Deprecation Warning: - - >>> olduint = np.deprecate(np.uint) - >>> olduint(6) - /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114: - DeprecationWarning: uint32 is deprecated - warnings.warn(str1, DeprecationWarning) - 6 - - """ - # Deprecate may be run as a function or as a decorator - # If run as a function, we initialise the decorator class - # and execute its __call__ method. - - if args: - fn = args[0] - args = args[1:] - - # backward compatibility -- can be removed - # after next release - if 'newname' in kwargs: - kwargs['new_name'] = kwargs.pop('newname') - if 'oldname' in kwargs: - kwargs['old_name'] = kwargs.pop('oldname') - - return _Deprecate(*args, **kwargs)(fn) - else: - return _Deprecate(*args, **kwargs) - -deprecate_with_doc = lambda msg: _Deprecate(message=msg) - - -#-------------------------------------------- -# Determine if two arrays can share memory -#-------------------------------------------- - -def byte_bounds(a): - """ - Returns pointers to the end-points of an array. - - Parameters - ---------- - a : ndarray - Input array. It must conform to the Python-side of the array interface. - - Returns - ------- - (low, high) : tuple of 2 integers - The first integer is the first byte of the array, the second integer is - just past the last byte of the array. If `a` is not contiguous it - will not use every byte between the (`low`, `high`) values. - - Examples - -------- - >>> I = np.eye(2, dtype='f'); I.dtype - dtype('float32') - >>> low, high = np.byte_bounds(I) - >>> high - low == I.size*I.itemsize - True - >>> I = np.eye(2, dtype='G'); I.dtype - dtype('complex192') - >>> low, high = np.byte_bounds(I) - >>> high - low == I.size*I.itemsize - True - - """ - ai = a.__array_interface__ - a_data = ai['data'][0] - astrides = ai['strides'] - ashape = ai['shape'] - nd_a = len(ashape) - bytes_a = int(ai['typestr'][2:]) - - a_low = a_high = a_data - if astrides is None: # contiguous case - a_high += product(ashape, dtype=int)*bytes_a - else: - for shape, stride in zip(ashape, astrides): - if stride < 0: - a_low += (shape-1)*stride - else: - a_high += (shape-1)*stride - a_high += bytes_a - return a_low, a_high - - -def may_share_memory(a, b): - """ - Determine if two arrays can share memory - - The memory-bounds of a and b are computed. If they overlap then - this function returns True. Otherwise, it returns False. - - A return of True does not necessarily mean that the two arrays - share any element. It just means that they *might*. - - Parameters - ---------- - a, b : ndarray - - Returns - ------- - out : bool - - Examples - -------- - >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) - False - - """ - a_low, a_high = byte_bounds(a) - b_low, b_high = byte_bounds(b) - if b_low >= a_high or a_low >= b_high: - return False - return True - -#----------------------------------------------------------------------------- -# Function for output and information on the variables used. -#----------------------------------------------------------------------------- - - -def who(vardict=None): - """ - Print the Numpy arrays in the given dictionary. - - If there is no dictionary passed in or `vardict` is None then returns - Numpy arrays in the globals() dictionary (all Numpy arrays in the - namespace). - - Parameters - ---------- - vardict : dict, optional - A dictionary possibly containing ndarrays. Default is globals(). - - Returns - ------- - out : None - Returns 'None'. - - Notes - ----- - Prints out the name, shape, bytes and type of all of the ndarrays present - in `vardict`. - - Examples - -------- - >>> a = np.arange(10) - >>> b = np.ones(20) - >>> np.who() - Name Shape Bytes Type - =========================================================== - a 10 40 int32 - b 20 160 float64 - Upper bound on total bytes = 200 - - >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', - ... 'idx':5} - >>> np.who(d) - Name Shape Bytes Type - =========================================================== - y 3 24 float64 - x 2 16 float64 - Upper bound on total bytes = 40 - - """ - if vardict is None: - frame = sys._getframe().f_back - vardict = frame.f_globals - sta = [] - cache = {} - for name in vardict.keys(): - if isinstance(vardict[name],ndarray): - var = vardict[name] - idv = id(var) - if idv in cache.keys(): - namestr = name + " (%s)" % cache[idv] - original=0 - else: - cache[idv] = name - namestr = name - original=1 - shapestr = " x ".join(map(str, var.shape)) - bytestr = str(var.nbytes) - sta.append([namestr, shapestr, bytestr, var.dtype.name, - original]) - - maxname = 0 - maxshape = 0 - maxbyte = 0 - totalbytes = 0 - for k in range(len(sta)): - val = sta[k] - if maxname < len(val[0]): - maxname = len(val[0]) - if maxshape < len(val[1]): - maxshape = len(val[1]) - if maxbyte < len(val[2]): - maxbyte = len(val[2]) - if val[4]: - totalbytes += int(val[2]) - - if len(sta) > 0: - sp1 = max(10,maxname) - sp2 = max(10,maxshape) - sp3 = max(10,maxbyte) - prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') - print prval + "\n" + "="*(len(prval)+5) + "\n" - - for k in range(len(sta)): - val = sta[k] - print "%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4), - val[1], ' '*(sp2-len(val[1])+5), - val[2], ' '*(sp3-len(val[2])+5), - val[3]) - print "\nUpper bound on total bytes = %d" % totalbytes - return - -#----------------------------------------------------------------------------- - - -# NOTE: pydoc defines a help function which works simliarly to this -# except it uses a pager to take over the screen. - -# combine name and arguments and split to multiple lines of -# width characters. End lines on a comma and begin argument list -# indented with the rest of the arguments. -def _split_line(name, arguments, width): - firstwidth = len(name) - k = firstwidth - newstr = name - sepstr = ", " - arglist = arguments.split(sepstr) - for argument in arglist: - if k == firstwidth: - addstr = "" - else: - addstr = sepstr - k = k + len(argument) + len(addstr) - if k > width: - k = firstwidth + 1 + len(argument) - newstr = newstr + ",\n" + " "*(firstwidth+2) + argument - else: - newstr = newstr + addstr + argument - return newstr - -_namedict = None -_dictlist = None - -# Traverse all module directories underneath globals -# to see if something is defined -def _makenamedict(module='numpy'): - module = __import__(module, globals(), locals(), []) - thedict = {module.__name__:module.__dict__} - dictlist = [module.__name__] - totraverse = [module.__dict__] - while 1: - if len(totraverse) == 0: - break - thisdict = totraverse.pop(0) - for x in thisdict.keys(): - if isinstance(thisdict[x],types.ModuleType): - modname = thisdict[x].__name__ - if modname not in dictlist: - moddict = thisdict[x].__dict__ - dictlist.append(modname) - totraverse.append(moddict) - thedict[modname] = moddict - return thedict, dictlist - -def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'): - """ - Get help information for a function, class, or module. - - Parameters - ---------- - object : object or str, optional - Input object or name to get information about. If `object` is a - numpy object, its docstring is given. If it is a string, available - modules are searched for matching objects. - If None, information about `info` itself is returned. - maxwidth : int, optional - Printing width. - output : file like object, optional - File like object that the output is written to, default is ``stdout``. - The object has to be opened in 'w' or 'a' mode. - toplevel : str, optional - Start search at this level. - - See Also - -------- - source, lookfor - - Notes - ----- - When used interactively with an object, ``np.info(obj)`` is equivalent to - ``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt. - - Examples - -------- - >>> np.info(np.polyval) # doctest: +SKIP - polyval(p, x) - Evaluate the polynomial p at x. - ... - - When using a string for `object` it is possible to get multiple results. - - >>> np.info('fft') # doctest: +SKIP - *** Found in numpy *** - Core FFT routines - ... - *** Found in numpy.fft *** - fft(a, n=None, axis=-1) - ... - *** Repeat reference found in numpy.fft.fftpack *** - *** Total of 3 references found. *** - - """ - global _namedict, _dictlist - # Local import to speed up numpy's import time. - import pydoc, inspect - - if hasattr(object,'_ppimport_importer') or \ - hasattr(object, '_ppimport_module'): - object = object._ppimport_module - elif hasattr(object, '_ppimport_attr'): - object = object._ppimport_attr - - if object is None: - info(info) - elif isinstance(object, ndarray): - import numpy.numarray as nn - nn.info(object, output=output, numpy=1) - elif isinstance(object, str): - if _namedict is None: - _namedict, _dictlist = _makenamedict(toplevel) - numfound = 0 - objlist = [] - for namestr in _dictlist: - try: - obj = _namedict[namestr][object] - if id(obj) in objlist: - print >> output, "\n *** Repeat reference found in %s *** " % namestr - else: - objlist.append(id(obj)) - print >> output, " *** Found in %s ***" % namestr - info(obj) - print >> output, "-"*maxwidth - numfound += 1 - except KeyError: - pass - if numfound == 0: - print >> output, "Help for %s not found." % object - else: - print >> output, "\n *** Total of %d references found. ***" % numfound - - elif inspect.isfunction(object): - name = object.func_name - arguments = inspect.formatargspec(*inspect.getargspec(object)) - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print >> output, " " + argstr + "\n" - print >> output, inspect.getdoc(object) - - elif inspect.isclass(object): - name = object.__name__ - arguments = "()" - try: - if hasattr(object, '__init__'): - arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.im_func)) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - except: - pass - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print >> output, " " + argstr + "\n" - doc1 = inspect.getdoc(object) - if doc1 is None: - if hasattr(object,'__init__'): - print >> output, inspect.getdoc(object.__init__) - else: - print >> output, inspect.getdoc(object) - - methods = pydoc.allmethods(object) - if methods != []: - print >> output, "\n\nMethods:\n" - for meth in methods: - if meth[0] == '_': - continue - thisobj = getattr(object, meth, None) - if thisobj is not None: - methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None") - print >> output, " %s -- %s" % (meth, methstr) - - elif type(object) is types.InstanceType: ## check for __call__ method - print >> output, "Instance of class: ", object.__class__.__name__ - print >> output - if hasattr(object, '__call__'): - arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.im_func)) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - else: - arguments = "()" - - if hasattr(object,'name'): - name = "%s" % object.name - else: - name = "" - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print >> output, " " + argstr + "\n" - doc = inspect.getdoc(object.__call__) - if doc is not None: - print >> output, inspect.getdoc(object.__call__) - print >> output, inspect.getdoc(object) - - else: - print >> output, inspect.getdoc(object) - - elif inspect.ismethod(object): - name = object.__name__ - arguments = inspect.formatargspec(*inspect.getargspec(object.im_func)) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - else: - arguments = "()" - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print >> output, " " + argstr + "\n" - print >> output, inspect.getdoc(object) - - elif hasattr(object, '__doc__'): - print >> output, inspect.getdoc(object) - - -def source(object, output=sys.stdout): - """ - Print or write to a file the source code for a Numpy object. - - The source code is only returned for objects written in Python. Many - functions and classes are defined in C and will therefore not return - useful information. - - Parameters - ---------- - object : numpy object - Input object. This can be any object (function, class, module, ...). - output : file object, optional - If `output` not supplied then source code is printed to screen - (sys.stdout). File object must be created with either write 'w' or - append 'a' modes. - - See Also - -------- - lookfor, info - - Examples - -------- - >>> np.source(np.interp) #doctest: +SKIP - In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py - def interp(x, xp, fp, left=None, right=None): - \"\"\".... (full docstring printed)\"\"\" - if isinstance(x, (float, int, number)): - return compiled_interp([x], xp, fp, left, right).item() - else: - return compiled_interp(x, xp, fp, left, right) - - The source code is only returned for objects written in Python. - - >>> np.source(np.array) #doctest: +SKIP - Not available for this object. - - """ - # Local import to speed up numpy's import time. - import inspect - try: - print >> output, "In file: %s\n" % inspect.getsourcefile(object) - print >> output, inspect.getsource(object) - except: - print >> output, "Not available for this object." - - -# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} -# where kind: "func", "class", "module", "object" -# and index: index in breadth-first namespace traversal -_lookfor_caches = {} - -# regexp whose match indicates that the string may contain a function signature -_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) - -def lookfor(what, module=None, import_modules=True, regenerate=False, - output=None): - """ - Do a keyword search on docstrings. - - A list of of objects that matched the search is displayed, - sorted by relevance. All given keywords need to be found in the - docstring for it to be returned as a result, but the order does - not matter. - - Parameters - ---------- - what : str - String containing words to look for. - module : str or list, optional - Name of module(s) whose docstrings to go through. - import_modules : bool, optional - Whether to import sub-modules in packages. Default is True. - regenerate : bool, optional - Whether to re-generate the docstring cache. Default is False. - output : file-like, optional - File-like object to write the output to. If omitted, use a pager. - - See Also - -------- - source, info - - Notes - ----- - Relevance is determined only roughly, by checking if the keywords occur - in the function name, at the start of a docstring, etc. - - Examples - -------- - >>> np.lookfor('binary representation') - Search results for 'binary representation' - ------------------------------------------ - numpy.binary_repr - Return the binary representation of the input number as a string. - numpy.core.setup_common.long_double_representation - Given a binary dump as given by GNU od -b, look for long double - numpy.base_repr - Return a string representation of a number in the given base system. - ... - - """ - import pydoc - - # Cache - cache = _lookfor_generate_cache(module, import_modules, regenerate) - - # Search - # XXX: maybe using a real stemming search engine would be better? - found = [] - whats = str(what).lower().split() - if not whats: return - - for name, (docstring, kind, index) in cache.iteritems(): - if kind in ('module', 'object'): - # don't show modules or objects - continue - ok = True - doc = docstring.lower() - for w in whats: - if w not in doc: - ok = False - break - if ok: - found.append(name) - - # Relevance sort - # XXX: this is full Harrison-Stetson heuristics now, - # XXX: it probably could be improved - - kind_relevance = {'func': 1000, 'class': 1000, - 'module': -1000, 'object': -1000} - - def relevance(name, docstr, kind, index): - r = 0 - # do the keywords occur within the start of the docstring? - first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) - r += sum([200 for w in whats if w in first_doc]) - # do the keywords occur in the function name? - r += sum([30 for w in whats if w in name]) - # is the full name long? - r += -len(name) * 5 - # is the object of bad type? - r += kind_relevance.get(kind, -1000) - # is the object deep in namespace hierarchy? - r += -name.count('.') * 10 - r += max(-index / 100, -100) - return r - - def relevance_value(a): - return relevance(a, *cache[a]) - found.sort(key=relevance_value) - - # Pretty-print - s = "Search results for '%s'" % (' '.join(whats)) - help_text = [s, "-"*len(s)] - for name in found[::-1]: - doc, kind, ix = cache[name] - - doclines = [line.strip() for line in doc.strip().split("\n") - if line.strip()] - - # find a suitable short description - try: - first_doc = doclines[0].strip() - if _function_signature_re.search(first_doc): - first_doc = doclines[1].strip() - except IndexError: - first_doc = "" - help_text.append("%s\n %s" % (name, first_doc)) - - if not found: - help_text.append("Nothing found.") - - # Output - if output is not None: - output.write("\n".join(help_text)) - elif len(help_text) > 10: - pager = pydoc.getpager() - pager("\n".join(help_text)) - else: - print "\n".join(help_text) - -def _lookfor_generate_cache(module, import_modules, regenerate): - """ - Generate docstring cache for given module. - - Parameters - ---------- - module : str, None, module - Module for which to generate docstring cache - import_modules : bool - Whether to import sub-modules in packages. - regenerate: bool - Re-generate the docstring cache - - Returns - ------- - cache : dict {obj_full_name: (docstring, kind, index), ...} - Docstring cache for the module, either cached one (regenerate=False) - or newly generated. - - """ - global _lookfor_caches - # Local import to speed up numpy's import time. - import inspect - from cStringIO import StringIO - - if module is None: - module = "numpy" - - if isinstance(module, str): - try: - __import__(module) - except ImportError: - return {} - module = sys.modules[module] - elif isinstance(module, list) or isinstance(module, tuple): - cache = {} - for mod in module: - cache.update(_lookfor_generate_cache(mod, import_modules, - regenerate)) - return cache - - if id(module) in _lookfor_caches and not regenerate: - return _lookfor_caches[id(module)] - - # walk items and collect docstrings - cache = {} - _lookfor_caches[id(module)] = cache - seen = {} - index = 0 - stack = [(module.__name__, module)] - while stack: - name, item = stack.pop(0) - if id(item) in seen: continue - seen[id(item)] = True - - index += 1 - kind = "object" - - if inspect.ismodule(item): - kind = "module" - try: - _all = item.__all__ - except AttributeError: - _all = None - - # import sub-packages - if import_modules and hasattr(item, '__path__'): - for pth in item.__path__: - for mod_path in os.listdir(pth): - this_py = os.path.join(pth, mod_path) - init_py = os.path.join(pth, mod_path, '__init__.py') - if os.path.isfile(this_py) and mod_path.endswith('.py'): - to_import = mod_path[:-3] - elif os.path.isfile(init_py): - to_import = mod_path - else: - continue - if to_import == '__init__': - continue - - try: - # Catch SystemExit, too - base_exc = BaseException - except NameError: - # Python 2.4 doesn't have BaseException - base_exc = Exception - - try: - old_stdout = sys.stdout - old_stderr = sys.stderr - try: - sys.stdout = StringIO() - sys.stderr = StringIO() - __import__("%s.%s" % (name, to_import)) - finally: - sys.stdout = old_stdout - sys.stderr = old_stderr - except base_exc: - continue - - for n, v in _getmembers(item): - item_name = getattr(v, '__name__', "%s.%s" % (name, n)) - mod_name = getattr(v, '__module__', None) - if '.' not in item_name and mod_name: - item_name = "%s.%s" % (mod_name, item_name) - - if not item_name.startswith(name + '.'): - # don't crawl "foreign" objects - if isinstance(v, ufunc): - # ... unless they are ufuncs - pass - else: - continue - elif not (inspect.ismodule(v) or _all is None or n in _all): - continue - stack.append(("%s.%s" % (name, n), v)) - elif inspect.isclass(item): - kind = "class" - for n, v in _getmembers(item): - stack.append(("%s.%s" % (name, n), v)) - elif hasattr(item, "__call__"): - kind = "func" - - doc = inspect.getdoc(item) - if doc is not None: - cache[name] = (doc, kind, index) - - return cache - -def _getmembers(item): - import inspect - try: - members = inspect.getmembers(item) - except AttributeError: - members = [(x, getattr(item, x)) for x in dir(item) - if hasattr(item, x)] - return members - -#----------------------------------------------------------------------------- - -# The following SafeEval class and company are adapted from Michael Spencer's -# ASPN Python Cookbook recipe: -# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469 -# Accordingly it is mostly Copyright 2006 by Michael Spencer. -# The recipe, like most of the other ASPN Python Cookbook recipes was made -# available under the Python license. -# http://www.python.org/license - -# It has been modified to: -# * handle unary -/+ -# * support True/False/None -# * raise SyntaxError instead of a custom exception. - -class SafeEval(object): - """ - Object to evaluate constant string expressions. - - This includes strings with lists, dicts and tuples using the abstract - syntax tree created by ``compiler.parse``. - - For an example of usage, see `safe_eval`. - - See Also - -------- - safe_eval - - """ - - if sys.version_info[0] < 3: - def visit(self, node, **kw): - cls = node.__class__ - meth = getattr(self,'visit'+cls.__name__,self.default) - return meth(node, **kw) - - def default(self, node, **kw): - raise SyntaxError("Unsupported source construct: %s" - % node.__class__) - - def visitExpression(self, node, **kw): - for child in node.getChildNodes(): - return self.visit(child, **kw) - - def visitConst(self, node, **kw): - return node.value - - def visitDict(self, node,**kw): - return dict([(self.visit(k),self.visit(v)) for k,v in node.items]) - - def visitTuple(self, node, **kw): - return tuple([self.visit(i) for i in node.nodes]) - - def visitList(self, node, **kw): - return [self.visit(i) for i in node.nodes] - - def visitUnaryAdd(self, node, **kw): - return +self.visit(node.getChildNodes()[0]) - - def visitUnarySub(self, node, **kw): - return -self.visit(node.getChildNodes()[0]) - - def visitName(self, node, **kw): - if node.name == 'False': - return False - elif node.name == 'True': - return True - elif node.name == 'None': - return None - else: - raise SyntaxError("Unknown name: %s" % node.name) - else: - - def visit(self, node): - cls = node.__class__ - meth = getattr(self, 'visit' + cls.__name__, self.default) - return meth(node) - - def default(self, node): - raise SyntaxError("Unsupported source construct: %s" - % node.__class__) - - def visitExpression(self, node): - return self.visit(node.body) - - def visitNum(self, node): - return node.n - - def visitStr(self, node): - return node.s - - def visitBytes(self, node): - return node.s - - def visitDict(self, node,**kw): - return dict([(self.visit(k), self.visit(v)) - for k, v in zip(node.keys, node.values)]) - - def visitTuple(self, node): - return tuple([self.visit(i) for i in node.elts]) - - def visitList(self, node): - return [self.visit(i) for i in node.elts] - - def visitUnaryOp(self, node): - import ast - if isinstance(node.op, ast.UAdd): - return +self.visit(node.operand) - elif isinstance(node.op, ast.USub): - return -self.visit(node.operand) - else: - raise SyntaxError("Unknown unary op: %r" % node.op) - - def visitName(self, node): - if node.id == 'False': - return False - elif node.id == 'True': - return True - elif node.id == 'None': - return None - else: - raise SyntaxError("Unknown name: %s" % node.id) - -def safe_eval(source): - """ - Protected string evaluation. - - Evaluate a string containing a Python literal expression without - allowing the execution of arbitrary non-literal code. - - Parameters - ---------- - source : str - The string to evaluate. - - Returns - ------- - obj : object - The result of evaluating `source`. - - Raises - ------ - SyntaxError - If the code has invalid Python syntax, or if it contains non-literal - code. - - Examples - -------- - >>> np.safe_eval('1') - 1 - >>> np.safe_eval('[1, 2, 3]') - [1, 2, 3] - >>> np.safe_eval('{"foo": ("bar", 10.0)}') - {'foo': ('bar', 10.0)} - - >>> np.safe_eval('import os') - Traceback (most recent call last): - ... - SyntaxError: invalid syntax - - >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') - Traceback (most recent call last): - ... - SyntaxError: Unsupported source construct: compiler.ast.CallFunc - - """ - # Local import to speed up numpy's import time. - try: - import compiler - except ImportError: - import ast as compiler - walker = SafeEval() - try: - ast = compiler.parse(source, mode="eval") - except SyntaxError, err: - raise - try: - return walker.visit(ast) - except SyntaxError, err: - raise - -#----------------------------------------------------------------------------- diff --git a/numpy-1.6.2/numpy/linalg/SConscript b/numpy-1.6.2/numpy/linalg/SConscript deleted file mode 100644 index 78c4d569b8..0000000000 --- a/numpy-1.6.2/numpy/linalg/SConscript +++ /dev/null @@ -1,23 +0,0 @@ -# Last Change: Thu Jun 12 06:00 PM 2008 J -# vim:syntax=python -from numscons import GetNumpyEnvironment, scons_get_mathlib -from numscons import CheckF77LAPACK -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) - -config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK}) - -use_lapack = config.CheckLAPACK() - -mlib = scons_get_mathlib(env) -env.AppendUnique(LIBS = mlib) - -config.Finish() -write_info(env) - -sources = ['lapack_litemodule.c'] -if not use_lapack: - sources.extend(['python_xerbla.c', 'zlapack_lite.c', 'dlapack_lite.c', - 'blas_lite.c', 'dlamch.c', 'f2c_lite.c']) -env.NumpyPythonExtension('lapack_lite', source = sources) diff --git a/numpy-1.6.2/numpy/linalg/SConstruct b/numpy-1.6.2/numpy/linalg/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/numpy-1.6.2/numpy/linalg/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/numpy-1.6.2/numpy/linalg/__init__.py b/numpy-1.6.2/numpy/linalg/__init__.py deleted file mode 100644 index a74a31950f..0000000000 --- a/numpy-1.6.2/numpy/linalg/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Core Linear Algebra Tools -========================= - -=============== ========================================================== -Linear algebra basics -========================================================================== -norm Vector or matrix norm -inv Inverse of a square matrix -solve Solve a linear system of equations -det Determinant of a square matrix -slogdet Logarithm of the determinant of a square matrix -lstsq Solve linear least-squares problem -pinv Pseudo-inverse (Moore-Penrose) calculated using a singular - value decomposition -matrix_power Integer power of a square matrix -=============== ========================================================== - -=============== ========================================================== -Eigenvalues and decompositions -========================================================================== -eig Eigenvalues and vectors of a square matrix -eigh Eigenvalues and eigenvectors of a Hermitian matrix -eigvals Eigenvalues of a square matrix -eigvalsh Eigenvalues of a Hermitian matrix -qr QR decomposition of a matrix -svd Singular value decomposition of a matrix -cholesky Cholesky decomposition of a matrix -=============== ========================================================== - -=============== ========================================================== -Tensor operations -========================================================================== -tensorsolve Solve a linear tensor equation -tensorinv Calculate an inverse of a tensor -=============== ========================================================== - -=============== ========================================================== -Exceptions -========================================================================== -LinAlgError Indicates a failed linear algebra operation -=============== ========================================================== - -""" -# To get sub-modules -from info import __doc__ - -from linalg import * - -from numpy.testing import Tester -test = Tester().test -bench = Tester().test diff --git a/numpy-1.6.2/numpy/linalg/blas_lite.c b/numpy-1.6.2/numpy/linalg/blas_lite.c deleted file mode 100644 index d0de434789..0000000000 --- a/numpy-1.6.2/numpy/linalg/blas_lite.c +++ /dev/null @@ -1,10660 +0,0 @@ -/* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ -#include "f2c.h" - -#ifdef HAVE_CONFIG -#include "config.h" -#else -extern doublereal dlamch_(char *); -#define EPSILON dlamch_("Epsilon") -#define SAFEMINIMUM dlamch_("Safe minimum") -#define PRECISION dlamch_("Precision") -#define BASE dlamch_("Base") -#endif - -extern doublereal dlapy2_(doublereal *x, doublereal *y); - - - -/* Table of constant values */ - -static integer c__1 = 1; -static doublecomplex c_b359 = {1.,0.}; - -/* Subroutine */ int daxpy_(integer *n, doublereal *da, doublereal *dx, - integer *incx, doublereal *dy, integer *incy) -{ - /* System generated locals */ - integer i__1; - - /* Local variables */ - static integer i__, m, ix, iy, mp1; - - -/* - constant times a vector plus a vector. - uses unrolled loops for increments equal to one. - jack dongarra, linpack, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --dy; - --dx; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - if (*da == 0.) { - return 0; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments - not equal to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dy[iy] += *da * dx[ix]; - ix += *incx; - iy += *incy; -/* L10: */ - } - return 0; - -/* - code for both increments equal to 1 - - - clean-up loop -*/ - -L20: - m = *n % 4; - if (m == 0) { - goto L40; - } - i__1 = m; - for (i__ = 1; i__ <= i__1; ++i__) { - dy[i__] += *da * dx[i__]; -/* L30: */ - } - if (*n < 4) { - return 0; - } -L40: - mp1 = m + 1; - i__1 = *n; - for (i__ = mp1; i__ <= i__1; i__ += 4) { - dy[i__] += *da * dx[i__]; - dy[i__ + 1] += *da * dx[i__ + 1]; - dy[i__ + 2] += *da * dx[i__ + 2]; - dy[i__ + 3] += *da * dx[i__ + 3]; -/* L50: */ - } - return 0; -} /* daxpy_ */ - -doublereal dcabs1_(doublecomplex *z__) -{ - /* System generated locals */ - doublereal ret_val; - static doublecomplex equiv_0[1]; - - /* Local variables */ -#define t ((doublereal *)equiv_0) -#define zz (equiv_0) - - zz->r = z__->r, zz->i = z__->i; - ret_val = abs(t[0]) + abs(t[1]); - return ret_val; -} /* dcabs1_ */ - -#undef zz -#undef t - - -/* Subroutine */ int dcopy_(integer *n, doublereal *dx, integer *incx, - doublereal *dy, integer *incy) -{ - /* System generated locals */ - integer i__1; - - /* Local variables */ - static integer i__, m, ix, iy, mp1; - - -/* - copies a vector, x, to a vector, y. - uses unrolled loops for increments equal to one. - jack dongarra, linpack, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --dy; - --dx; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments - not equal to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dy[iy] = dx[ix]; - ix += *incx; - iy += *incy; -/* L10: */ - } - return 0; - -/* - code for both increments equal to 1 - - - clean-up loop -*/ - -L20: - m = *n % 7; - if (m == 0) { - goto L40; - } - i__1 = m; - for (i__ = 1; i__ <= i__1; ++i__) { - dy[i__] = dx[i__]; -/* L30: */ - } - if (*n < 7) { - return 0; - } -L40: - mp1 = m + 1; - i__1 = *n; - for (i__ = mp1; i__ <= i__1; i__ += 7) { - dy[i__] = dx[i__]; - dy[i__ + 1] = dx[i__ + 1]; - dy[i__ + 2] = dx[i__ + 2]; - dy[i__ + 3] = dx[i__ + 3]; - dy[i__ + 4] = dx[i__ + 4]; - dy[i__ + 5] = dx[i__ + 5]; - dy[i__ + 6] = dx[i__ + 6]; -/* L50: */ - } - return 0; -} /* dcopy_ */ - -doublereal ddot_(integer *n, doublereal *dx, integer *incx, doublereal *dy, - integer *incy) -{ - /* System generated locals */ - integer i__1; - doublereal ret_val; - - /* Local variables */ - static integer i__, m, ix, iy, mp1; - static doublereal dtemp; - - -/* - forms the dot product of two vectors. - uses unrolled loops for increments equal to one. - jack dongarra, linpack, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --dy; - --dx; - - /* Function Body */ - ret_val = 0.; - dtemp = 0.; - if (*n <= 0) { - return ret_val; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments - not equal to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dtemp += dx[ix] * dy[iy]; - ix += *incx; - iy += *incy; -/* L10: */ - } - ret_val = dtemp; - return ret_val; - -/* - code for both increments equal to 1 - - - clean-up loop -*/ - -L20: - m = *n % 5; - if (m == 0) { - goto L40; - } - i__1 = m; - for (i__ = 1; i__ <= i__1; ++i__) { - dtemp += dx[i__] * dy[i__]; -/* L30: */ - } - if (*n < 5) { - goto L60; - } -L40: - mp1 = m + 1; - i__1 = *n; - for (i__ = mp1; i__ <= i__1; i__ += 5) { - dtemp = dtemp + dx[i__] * dy[i__] + dx[i__ + 1] * dy[i__ + 1] + dx[ - i__ + 2] * dy[i__ + 2] + dx[i__ + 3] * dy[i__ + 3] + dx[i__ + - 4] * dy[i__ + 4]; -/* L50: */ - } -L60: - ret_val = dtemp; - return ret_val; -} /* ddot_ */ - -/* Subroutine */ int dgemm_(char *transa, char *transb, integer *m, integer * - n, integer *k, doublereal *alpha, doublereal *a, integer *lda, - doublereal *b, integer *ldb, doublereal *beta, doublereal *c__, - integer *ldc) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, - i__3; - - /* Local variables */ - static integer i__, j, l, info; - static logical nota, notb; - static doublereal temp; - static integer ncola; - extern logical lsame_(char *, char *); - static integer nrowa, nrowb; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - DGEMM performs one of the matrix-matrix operations - - C := alpha*op( A )*op( B ) + beta*C, - - where op( X ) is one of - - op( X ) = X or op( X ) = X', - - alpha and beta are scalars, and A, B and C are matrices, with op( A ) - an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. - - Parameters - ========== - - TRANSA - CHARACTER*1. - On entry, TRANSA specifies the form of op( A ) to be used in - the matrix multiplication as follows: - - TRANSA = 'N' or 'n', op( A ) = A. - - TRANSA = 'T' or 't', op( A ) = A'. - - TRANSA = 'C' or 'c', op( A ) = A'. - - Unchanged on exit. - - TRANSB - CHARACTER*1. - On entry, TRANSB specifies the form of op( B ) to be used in - the matrix multiplication as follows: - - TRANSB = 'N' or 'n', op( B ) = B. - - TRANSB = 'T' or 't', op( B ) = B'. - - TRANSB = 'C' or 'c', op( B ) = B'. - - Unchanged on exit. - - M - INTEGER. - On entry, M specifies the number of rows of the matrix - op( A ) and of the matrix C. M must be at least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of the matrix - op( B ) and the number of columns of the matrix C. N must be - at least zero. - Unchanged on exit. - - K - INTEGER. - On entry, K specifies the number of columns of the matrix - op( A ) and the number of rows of the matrix op( B ). K must - be at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is - k when TRANSA = 'N' or 'n', and is m otherwise. - Before entry with TRANSA = 'N' or 'n', the leading m by k - part of the array A must contain the matrix A, otherwise - the leading k by m part of the array A must contain the - matrix A. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When TRANSA = 'N' or 'n' then - LDA must be at least max( 1, m ), otherwise LDA must be at - least max( 1, k ). - Unchanged on exit. - - B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is - n when TRANSB = 'N' or 'n', and is k otherwise. - Before entry with TRANSB = 'N' or 'n', the leading k by n - part of the array B must contain the matrix B, otherwise - the leading n by k part of the array B must contain the - matrix B. - Unchanged on exit. - - LDB - INTEGER. - On entry, LDB specifies the first dimension of B as declared - in the calling (sub) program. When TRANSB = 'N' or 'n' then - LDB must be at least max( 1, k ), otherwise LDB must be at - least max( 1, n ). - Unchanged on exit. - - BETA - DOUBLE PRECISION. - On entry, BETA specifies the scalar beta. When BETA is - supplied as zero then C need not be set on input. - Unchanged on exit. - - C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). - Before entry, the leading m by n part of the array C must - contain the matrix C, except when beta is zero, in which - case C need not be set on entry. - On exit, the array C is overwritten by the m by n matrix - ( alpha*op( A )*op( B ) + beta*C ). - - LDC - INTEGER. - On entry, LDC specifies the first dimension of C as declared - in the calling (sub) program. LDC must be at least - max( 1, m ). - Unchanged on exit. - - - Level 3 Blas routine. - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - - Set NOTA and NOTB as true if A and B respectively are not - transposed and set NROWA, NCOLA and NROWB as the number of rows - and columns of A and the number of rows of B respectively. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - - /* Function Body */ - nota = lsame_(transa, "N"); - notb = lsame_(transb, "N"); - if (nota) { - nrowa = *m; - ncola = *k; - } else { - nrowa = *k; - ncola = *m; - } - if (notb) { - nrowb = *k; - } else { - nrowb = *n; - } - -/* Test the input parameters. */ - - info = 0; - if (((! nota && ! lsame_(transa, "C")) && ! lsame_( - transa, "T"))) { - info = 1; - } else if (((! notb && ! lsame_(transb, "C")) && ! - lsame_(transb, "T"))) { - info = 2; - } else if (*m < 0) { - info = 3; - } else if (*n < 0) { - info = 4; - } else if (*k < 0) { - info = 5; - } else if (*lda < max(1,nrowa)) { - info = 8; - } else if (*ldb < max(1,nrowb)) { - info = 10; - } else if (*ldc < max(1,*m)) { - info = 13; - } - if (info != 0) { - xerbla_("DGEMM ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) { - return 0; - } - -/* And if alpha.eq.zero. */ - - if (*alpha == 0.) { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L30: */ - } -/* L40: */ - } - } - return 0; - } - -/* Start the operations. */ - - if (notb) { - if (nota) { - -/* Form C := alpha*A*B + beta*C. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L50: */ - } - } else if (*beta != 1.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L60: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - if (b[l + j * b_dim1] != 0.) { - temp = *alpha * b[l + j * b_dim1]; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - c__[i__ + j * c_dim1] += temp * a[i__ + l * - a_dim1]; -/* L70: */ - } - } -/* L80: */ - } -/* L90: */ - } - } else { - -/* Form C := alpha*A'*B + beta*C */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - temp += a[l + i__ * a_dim1] * b[l + j * b_dim1]; -/* L100: */ - } - if (*beta == 0.) { - c__[i__ + j * c_dim1] = *alpha * temp; - } else { - c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ - i__ + j * c_dim1]; - } -/* L110: */ - } -/* L120: */ - } - } - } else { - if (nota) { - -/* Form C := alpha*A*B' + beta*C */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L130: */ - } - } else if (*beta != 1.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L140: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - if (b[j + l * b_dim1] != 0.) { - temp = *alpha * b[j + l * b_dim1]; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - c__[i__ + j * c_dim1] += temp * a[i__ + l * - a_dim1]; -/* L150: */ - } - } -/* L160: */ - } -/* L170: */ - } - } else { - -/* Form C := alpha*A'*B' + beta*C */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - temp += a[l + i__ * a_dim1] * b[j + l * b_dim1]; -/* L180: */ - } - if (*beta == 0.) { - c__[i__ + j * c_dim1] = *alpha * temp; - } else { - c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ - i__ + j * c_dim1]; - } -/* L190: */ - } -/* L200: */ - } - } - } - - return 0; - -/* End of DGEMM . */ - -} /* dgemm_ */ - -/* Subroutine */ int dgemv_(char *trans, integer *m, integer *n, doublereal * - alpha, doublereal *a, integer *lda, doublereal *x, integer *incx, - doublereal *beta, doublereal *y, integer *incy) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j, ix, iy, jx, jy, kx, ky, info; - static doublereal temp; - static integer lenx, leny; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - DGEMV performs one of the matrix-vector operations - - y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, - - where alpha and beta are scalars, x and y are vectors and A is an - m by n matrix. - - Parameters - ========== - - TRANS - CHARACTER*1. - On entry, TRANS specifies the operation to be performed as - follows: - - TRANS = 'N' or 'n' y := alpha*A*x + beta*y. - - TRANS = 'T' or 't' y := alpha*A'*x + beta*y. - - TRANS = 'C' or 'c' y := alpha*A'*x + beta*y. - - Unchanged on exit. - - M - INTEGER. - On entry, M specifies the number of rows of the matrix A. - M must be at least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). - Before entry, the leading m by n part of the array A must - contain the matrix of coefficients. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, m ). - Unchanged on exit. - - X - DOUBLE PRECISION array of DIMENSION at least - ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' - and at least - ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. - Before entry, the incremented array X must contain the - vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - BETA - DOUBLE PRECISION. - On entry, BETA specifies the scalar beta. When BETA is - supplied as zero then Y need not be set on input. - Unchanged on exit. - - Y - DOUBLE PRECISION array of DIMENSION at least - ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' - and at least - ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. - Before entry with BETA non-zero, the incremented array Y - must contain the vector y. On exit, Y is overwritten by the - updated vector y. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --x; - --y; - - /* Function Body */ - info = 0; - if (((! lsame_(trans, "N") && ! lsame_(trans, "T")) && ! lsame_(trans, "C"))) { - info = 1; - } else if (*m < 0) { - info = 2; - } else if (*n < 0) { - info = 3; - } else if (*lda < max(1,*m)) { - info = 6; - } else if (*incx == 0) { - info = 8; - } else if (*incy == 0) { - info = 11; - } - if (info != 0) { - xerbla_("DGEMV ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0 || (*alpha == 0. && *beta == 1.)) { - return 0; - } - -/* - Set LENX and LENY, the lengths of the vectors x and y, and set - up the start points in X and Y. -*/ - - if (lsame_(trans, "N")) { - lenx = *n; - leny = *m; - } else { - lenx = *m; - leny = *n; - } - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (lenx - 1) * *incx; - } - if (*incy > 0) { - ky = 1; - } else { - ky = 1 - (leny - 1) * *incy; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. - - First form y := beta*y. -*/ - - if (*beta != 1.) { - if (*incy == 1) { - if (*beta == 0.) { - i__1 = leny; - for (i__ = 1; i__ <= i__1; ++i__) { - y[i__] = 0.; -/* L10: */ - } - } else { - i__1 = leny; - for (i__ = 1; i__ <= i__1; ++i__) { - y[i__] = *beta * y[i__]; -/* L20: */ - } - } - } else { - iy = ky; - if (*beta == 0.) { - i__1 = leny; - for (i__ = 1; i__ <= i__1; ++i__) { - y[iy] = 0.; - iy += *incy; -/* L30: */ - } - } else { - i__1 = leny; - for (i__ = 1; i__ <= i__1; ++i__) { - y[iy] = *beta * y[iy]; - iy += *incy; -/* L40: */ - } - } - } - } - if (*alpha == 0.) { - return 0; - } - if (lsame_(trans, "N")) { - -/* Form y := alpha*A*x + y. */ - - jx = kx; - if (*incy == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[jx] != 0.) { - temp = *alpha * x[jx]; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - y[i__] += temp * a[i__ + j * a_dim1]; -/* L50: */ - } - } - jx += *incx; -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[jx] != 0.) { - temp = *alpha * x[jx]; - iy = ky; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - y[iy] += temp * a[i__ + j * a_dim1]; - iy += *incy; -/* L70: */ - } - } - jx += *incx; -/* L80: */ - } - } - } else { - -/* Form y := alpha*A'*x + y. */ - - jy = ky; - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = 0.; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp += a[i__ + j * a_dim1] * x[i__]; -/* L90: */ - } - y[jy] += *alpha * temp; - jy += *incy; -/* L100: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = 0.; - ix = kx; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp += a[i__ + j * a_dim1] * x[ix]; - ix += *incx; -/* L110: */ - } - y[jy] += *alpha * temp; - jy += *incy; -/* L120: */ - } - } - } - - return 0; - -/* End of DGEMV . */ - -} /* dgemv_ */ - -/* Subroutine */ int dger_(integer *m, integer *n, doublereal *alpha, - doublereal *x, integer *incx, doublereal *y, integer *incy, - doublereal *a, integer *lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j, ix, jy, kx, info; - static doublereal temp; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - DGER performs the rank 1 operation - - A := alpha*x*y' + A, - - where alpha is a scalar, x is an m element vector, y is an n element - vector and A is an m by n matrix. - - Parameters - ========== - - M - INTEGER. - On entry, M specifies the number of rows of the matrix A. - M must be at least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - X - DOUBLE PRECISION array of dimension at least - ( 1 + ( m - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the m - element vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - Y - DOUBLE PRECISION array of dimension at least - ( 1 + ( n - 1 )*abs( INCY ) ). - Before entry, the incremented array Y must contain the n - element vector y. - Unchanged on exit. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). - Before entry, the leading m by n part of the array A must - contain the matrix of coefficients. On exit, A is - overwritten by the updated matrix. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, m ). - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --x; - --y; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - info = 0; - if (*m < 0) { - info = 1; - } else if (*n < 0) { - info = 2; - } else if (*incx == 0) { - info = 5; - } else if (*incy == 0) { - info = 7; - } else if (*lda < max(1,*m)) { - info = 9; - } - if (info != 0) { - xerbla_("DGER ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0 || *alpha == 0.) { - return 0; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. -*/ - - if (*incy > 0) { - jy = 1; - } else { - jy = 1 - (*n - 1) * *incy; - } - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (y[jy] != 0.) { - temp = *alpha * y[jy]; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] += x[i__] * temp; -/* L10: */ - } - } - jy += *incy; -/* L20: */ - } - } else { - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (*m - 1) * *incx; - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (y[jy] != 0.) { - temp = *alpha * y[jy]; - ix = kx; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] += x[ix] * temp; - ix += *incx; -/* L30: */ - } - } - jy += *incy; -/* L40: */ - } - } - - return 0; - -/* End of DGER . */ - -} /* dger_ */ - -doublereal dnrm2_(integer *n, doublereal *x, integer *incx) -{ - /* System generated locals */ - integer i__1, i__2; - doublereal ret_val, d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer ix; - static doublereal ssq, norm, scale, absxi; - - -/* - DNRM2 returns the euclidean norm of a vector via the function - name, so that - - DNRM2 := sqrt( x'*x ) - - - -- This version written on 25-October-1982. - Modified on 14-October-1993 to inline the call to DLASSQ. - Sven Hammarling, Nag Ltd. -*/ - - - /* Parameter adjustments */ - --x; - - /* Function Body */ - if (*n < 1 || *incx < 1) { - norm = 0.; - } else if (*n == 1) { - norm = abs(x[1]); - } else { - scale = 0.; - ssq = 1.; -/* - The following loop is equivalent to this call to the LAPACK - auxiliary routine: - CALL DLASSQ( N, X, INCX, SCALE, SSQ ) -*/ - - i__1 = (*n - 1) * *incx + 1; - i__2 = *incx; - for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) { - if (x[ix] != 0.) { - absxi = (d__1 = x[ix], abs(d__1)); - if (scale < absxi) { -/* Computing 2nd power */ - d__1 = scale / absxi; - ssq = ssq * (d__1 * d__1) + 1.; - scale = absxi; - } else { -/* Computing 2nd power */ - d__1 = absxi / scale; - ssq += d__1 * d__1; - } - } -/* L10: */ - } - norm = scale * sqrt(ssq); - } - - ret_val = norm; - return ret_val; - -/* End of DNRM2. */ - -} /* dnrm2_ */ - -/* Subroutine */ int drot_(integer *n, doublereal *dx, integer *incx, - doublereal *dy, integer *incy, doublereal *c__, doublereal *s) -{ - /* System generated locals */ - integer i__1; - - /* Local variables */ - static integer i__, ix, iy; - static doublereal dtemp; - - -/* - applies a plane rotation. - jack dongarra, linpack, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --dy; - --dx; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments not equal - to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dtemp = *c__ * dx[ix] + *s * dy[iy]; - dy[iy] = *c__ * dy[iy] - *s * dx[ix]; - dx[ix] = dtemp; - ix += *incx; - iy += *incy; -/* L10: */ - } - return 0; - -/* code for both increments equal to 1 */ - -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dtemp = *c__ * dx[i__] + *s * dy[i__]; - dy[i__] = *c__ * dy[i__] - *s * dx[i__]; - dx[i__] = dtemp; -/* L30: */ - } - return 0; -} /* drot_ */ - -/* Subroutine */ int dscal_(integer *n, doublereal *da, doublereal *dx, - integer *incx) -{ - /* System generated locals */ - integer i__1, i__2; - - /* Local variables */ - static integer i__, m, mp1, nincx; - - -/* - scales a vector by a constant. - uses unrolled loops for increment equal to one. - jack dongarra, linpack, 3/11/78. - modified 3/93 to return if incx .le. 0. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --dx; - - /* Function Body */ - if (*n <= 0 || *incx <= 0) { - return 0; - } - if (*incx == 1) { - goto L20; - } - -/* code for increment not equal to 1 */ - - nincx = *n * *incx; - i__1 = nincx; - i__2 = *incx; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - dx[i__] = *da * dx[i__]; -/* L10: */ - } - return 0; - -/* - code for increment equal to 1 - - - clean-up loop -*/ - -L20: - m = *n % 5; - if (m == 0) { - goto L40; - } - i__2 = m; - for (i__ = 1; i__ <= i__2; ++i__) { - dx[i__] = *da * dx[i__]; -/* L30: */ - } - if (*n < 5) { - return 0; - } -L40: - mp1 = m + 1; - i__2 = *n; - for (i__ = mp1; i__ <= i__2; i__ += 5) { - dx[i__] = *da * dx[i__]; - dx[i__ + 1] = *da * dx[i__ + 1]; - dx[i__ + 2] = *da * dx[i__ + 2]; - dx[i__ + 3] = *da * dx[i__ + 3]; - dx[i__ + 4] = *da * dx[i__ + 4]; -/* L50: */ - } - return 0; -} /* dscal_ */ - -/* Subroutine */ int dswap_(integer *n, doublereal *dx, integer *incx, - doublereal *dy, integer *incy) -{ - /* System generated locals */ - integer i__1; - - /* Local variables */ - static integer i__, m, ix, iy, mp1; - static doublereal dtemp; - - -/* - interchanges two vectors. - uses unrolled loops for increments equal one. - jack dongarra, linpack, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --dy; - --dx; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments not equal - to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dtemp = dx[ix]; - dx[ix] = dy[iy]; - dy[iy] = dtemp; - ix += *incx; - iy += *incy; -/* L10: */ - } - return 0; - -/* - code for both increments equal to 1 - - - clean-up loop -*/ - -L20: - m = *n % 3; - if (m == 0) { - goto L40; - } - i__1 = m; - for (i__ = 1; i__ <= i__1; ++i__) { - dtemp = dx[i__]; - dx[i__] = dy[i__]; - dy[i__] = dtemp; -/* L30: */ - } - if (*n < 3) { - return 0; - } -L40: - mp1 = m + 1; - i__1 = *n; - for (i__ = mp1; i__ <= i__1; i__ += 3) { - dtemp = dx[i__]; - dx[i__] = dy[i__]; - dy[i__] = dtemp; - dtemp = dx[i__ + 1]; - dx[i__ + 1] = dy[i__ + 1]; - dy[i__ + 1] = dtemp; - dtemp = dx[i__ + 2]; - dx[i__ + 2] = dy[i__ + 2]; - dy[i__ + 2] = dtemp; -/* L50: */ - } - return 0; -} /* dswap_ */ - -/* Subroutine */ int dsymv_(char *uplo, integer *n, doublereal *alpha, - doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal - *beta, doublereal *y, integer *incy) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j, ix, iy, jx, jy, kx, ky, info; - static doublereal temp1, temp2; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - DSYMV performs the matrix-vector operation - - y := alpha*A*x + beta*y, - - where alpha and beta are scalars, x and y are n element vectors and - A is an n by n symmetric matrix. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the upper or lower - triangular part of the array A is to be referenced as - follows: - - UPLO = 'U' or 'u' Only the upper triangular part of A - is to be referenced. - - UPLO = 'L' or 'l' Only the lower triangular part of A - is to be referenced. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array A must contain the upper - triangular part of the symmetric matrix and the strictly - lower triangular part of A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array A must contain the lower - triangular part of the symmetric matrix and the strictly - upper triangular part of A is not referenced. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, n ). - Unchanged on exit. - - X - DOUBLE PRECISION array of dimension at least - ( 1 + ( n - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the n - element vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - BETA - DOUBLE PRECISION. - On entry, BETA specifies the scalar beta. When BETA is - supplied as zero then Y need not be set on input. - Unchanged on exit. - - Y - DOUBLE PRECISION array of dimension at least - ( 1 + ( n - 1 )*abs( INCY ) ). - Before entry, the incremented array Y must contain the n - element vector y. On exit, Y is overwritten by the updated - vector y. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --x; - --y; - - /* Function Body */ - info = 0; - if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) { - info = 1; - } else if (*n < 0) { - info = 2; - } else if (*lda < max(1,*n)) { - info = 5; - } else if (*incx == 0) { - info = 7; - } else if (*incy == 0) { - info = 10; - } - if (info != 0) { - xerbla_("DSYMV ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0 || (*alpha == 0. && *beta == 1.)) { - return 0; - } - -/* Set up the start points in X and Y. */ - - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (*n - 1) * *incx; - } - if (*incy > 0) { - ky = 1; - } else { - ky = 1 - (*n - 1) * *incy; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through the triangular part - of A. - - First form y := beta*y. -*/ - - if (*beta != 1.) { - if (*incy == 1) { - if (*beta == 0.) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - y[i__] = 0.; -/* L10: */ - } - } else { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - y[i__] = *beta * y[i__]; -/* L20: */ - } - } - } else { - iy = ky; - if (*beta == 0.) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - y[iy] = 0.; - iy += *incy; -/* L30: */ - } - } else { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - y[iy] = *beta * y[iy]; - iy += *incy; -/* L40: */ - } - } - } - } - if (*alpha == 0.) { - return 0; - } - if (lsame_(uplo, "U")) { - -/* Form y when A is stored in upper triangle. */ - - if ((*incx == 1 && *incy == 1)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp1 = *alpha * x[j]; - temp2 = 0.; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - y[i__] += temp1 * a[i__ + j * a_dim1]; - temp2 += a[i__ + j * a_dim1] * x[i__]; -/* L50: */ - } - y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2; -/* L60: */ - } - } else { - jx = kx; - jy = ky; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp1 = *alpha * x[jx]; - temp2 = 0.; - ix = kx; - iy = ky; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - y[iy] += temp1 * a[i__ + j * a_dim1]; - temp2 += a[i__ + j * a_dim1] * x[ix]; - ix += *incx; - iy += *incy; -/* L70: */ - } - y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2; - jx += *incx; - jy += *incy; -/* L80: */ - } - } - } else { - -/* Form y when A is stored in lower triangle. */ - - if ((*incx == 1 && *incy == 1)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp1 = *alpha * x[j]; - temp2 = 0.; - y[j] += temp1 * a[j + j * a_dim1]; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - y[i__] += temp1 * a[i__ + j * a_dim1]; - temp2 += a[i__ + j * a_dim1] * x[i__]; -/* L90: */ - } - y[j] += *alpha * temp2; -/* L100: */ - } - } else { - jx = kx; - jy = ky; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp1 = *alpha * x[jx]; - temp2 = 0.; - y[jy] += temp1 * a[j + j * a_dim1]; - ix = jx; - iy = jy; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - iy += *incy; - y[iy] += temp1 * a[i__ + j * a_dim1]; - temp2 += a[i__ + j * a_dim1] * x[ix]; -/* L110: */ - } - y[jy] += *alpha * temp2; - jx += *incx; - jy += *incy; -/* L120: */ - } - } - } - - return 0; - -/* End of DSYMV . */ - -} /* dsymv_ */ - -/* Subroutine */ int dsyr2_(char *uplo, integer *n, doublereal *alpha, - doublereal *x, integer *incx, doublereal *y, integer *incy, - doublereal *a, integer *lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j, ix, iy, jx, jy, kx, ky, info; - static doublereal temp1, temp2; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - DSYR2 performs the symmetric rank 2 operation - - A := alpha*x*y' + alpha*y*x' + A, - - where alpha is a scalar, x and y are n element vectors and A is an n - by n symmetric matrix. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the upper or lower - triangular part of the array A is to be referenced as - follows: - - UPLO = 'U' or 'u' Only the upper triangular part of A - is to be referenced. - - UPLO = 'L' or 'l' Only the lower triangular part of A - is to be referenced. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - X - DOUBLE PRECISION array of dimension at least - ( 1 + ( n - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the n - element vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - Y - DOUBLE PRECISION array of dimension at least - ( 1 + ( n - 1 )*abs( INCY ) ). - Before entry, the incremented array Y must contain the n - element vector y. - Unchanged on exit. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array A must contain the upper - triangular part of the symmetric matrix and the strictly - lower triangular part of A is not referenced. On exit, the - upper triangular part of the array A is overwritten by the - upper triangular part of the updated matrix. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array A must contain the lower - triangular part of the symmetric matrix and the strictly - upper triangular part of A is not referenced. On exit, the - lower triangular part of the array A is overwritten by the - lower triangular part of the updated matrix. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, n ). - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --x; - --y; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - info = 0; - if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) { - info = 1; - } else if (*n < 0) { - info = 2; - } else if (*incx == 0) { - info = 5; - } else if (*incy == 0) { - info = 7; - } else if (*lda < max(1,*n)) { - info = 9; - } - if (info != 0) { - xerbla_("DSYR2 ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0 || *alpha == 0.) { - return 0; - } - -/* - Set up the start points in X and Y if the increments are not both - unity. -*/ - - if (*incx != 1 || *incy != 1) { - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (*n - 1) * *incx; - } - if (*incy > 0) { - ky = 1; - } else { - ky = 1 - (*n - 1) * *incy; - } - jx = kx; - jy = ky; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through the triangular part - of A. -*/ - - if (lsame_(uplo, "U")) { - -/* Form A when A is stored in the upper triangle. */ - - if ((*incx == 1 && *incy == 1)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[j] != 0. || y[j] != 0.) { - temp1 = *alpha * y[j]; - temp2 = *alpha * x[j]; - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] * - temp1 + y[i__] * temp2; -/* L10: */ - } - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[jx] != 0. || y[jy] != 0.) { - temp1 = *alpha * y[jy]; - temp2 = *alpha * x[jx]; - ix = kx; - iy = ky; - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] * - temp1 + y[iy] * temp2; - ix += *incx; - iy += *incy; -/* L30: */ - } - } - jx += *incx; - jy += *incy; -/* L40: */ - } - } - } else { - -/* Form A when A is stored in the lower triangle. */ - - if ((*incx == 1 && *incy == 1)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[j] != 0. || y[j] != 0.) { - temp1 = *alpha * y[j]; - temp2 = *alpha * x[j]; - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] * - temp1 + y[i__] * temp2; -/* L50: */ - } - } -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[jx] != 0. || y[jy] != 0.) { - temp1 = *alpha * y[jy]; - temp2 = *alpha * x[jx]; - ix = jx; - iy = jy; - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] * - temp1 + y[iy] * temp2; - ix += *incx; - iy += *incy; -/* L70: */ - } - } - jx += *incx; - jy += *incy; -/* L80: */ - } - } - } - - return 0; - -/* End of DSYR2 . */ - -} /* dsyr2_ */ - -/* Subroutine */ int dsyr2k_(char *uplo, char *trans, integer *n, integer *k, - doublereal *alpha, doublereal *a, integer *lda, doublereal *b, - integer *ldb, doublereal *beta, doublereal *c__, integer *ldc) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, - i__3; - - /* Local variables */ - static integer i__, j, l, info; - static doublereal temp1, temp2; - extern logical lsame_(char *, char *); - static integer nrowa; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - DSYR2K performs one of the symmetric rank 2k operations - - C := alpha*A*B' + alpha*B*A' + beta*C, - - or - - C := alpha*A'*B + alpha*B'*A + beta*C, - - where alpha and beta are scalars, C is an n by n symmetric matrix - and A and B are n by k matrices in the first case and k by n - matrices in the second case. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the upper or lower - triangular part of the array C is to be referenced as - follows: - - UPLO = 'U' or 'u' Only the upper triangular part of C - is to be referenced. - - UPLO = 'L' or 'l' Only the lower triangular part of C - is to be referenced. - - Unchanged on exit. - - TRANS - CHARACTER*1. - On entry, TRANS specifies the operation to be performed as - follows: - - TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' + - beta*C. - - TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A + - beta*C. - - TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A + - beta*C. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix C. N must be - at least zero. - Unchanged on exit. - - K - INTEGER. - On entry with TRANS = 'N' or 'n', K specifies the number - of columns of the matrices A and B, and on entry with - TRANS = 'T' or 't' or 'C' or 'c', K specifies the number - of rows of the matrices A and B. K must be at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is - k when TRANS = 'N' or 'n', and is n otherwise. - Before entry with TRANS = 'N' or 'n', the leading n by k - part of the array A must contain the matrix A, otherwise - the leading k by n part of the array A must contain the - matrix A. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When TRANS = 'N' or 'n' - then LDA must be at least max( 1, n ), otherwise LDA must - be at least max( 1, k ). - Unchanged on exit. - - B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is - k when TRANS = 'N' or 'n', and is n otherwise. - Before entry with TRANS = 'N' or 'n', the leading n by k - part of the array B must contain the matrix B, otherwise - the leading k by n part of the array B must contain the - matrix B. - Unchanged on exit. - - LDB - INTEGER. - On entry, LDB specifies the first dimension of B as declared - in the calling (sub) program. When TRANS = 'N' or 'n' - then LDB must be at least max( 1, n ), otherwise LDB must - be at least max( 1, k ). - Unchanged on exit. - - BETA - DOUBLE PRECISION. - On entry, BETA specifies the scalar beta. - Unchanged on exit. - - C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array C must contain the upper - triangular part of the symmetric matrix and the strictly - lower triangular part of C is not referenced. On exit, the - upper triangular part of the array C is overwritten by the - upper triangular part of the updated matrix. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array C must contain the lower - triangular part of the symmetric matrix and the strictly - upper triangular part of C is not referenced. On exit, the - lower triangular part of the array C is overwritten by the - lower triangular part of the updated matrix. - - LDC - INTEGER. - On entry, LDC specifies the first dimension of C as declared - in the calling (sub) program. LDC must be at least - max( 1, n ). - Unchanged on exit. - - - Level 3 Blas routine. - - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - - /* Function Body */ - if (lsame_(trans, "N")) { - nrowa = *n; - } else { - nrowa = *k; - } - upper = lsame_(uplo, "U"); - - info = 0; - if ((! upper && ! lsame_(uplo, "L"))) { - info = 1; - } else if (((! lsame_(trans, "N") && ! lsame_(trans, - "T")) && ! lsame_(trans, "C"))) { - info = 2; - } else if (*n < 0) { - info = 3; - } else if (*k < 0) { - info = 4; - } else if (*lda < max(1,nrowa)) { - info = 7; - } else if (*ldb < max(1,nrowa)) { - info = 9; - } else if (*ldc < max(1,*n)) { - info = 12; - } - if (info != 0) { - xerbla_("DSYR2K", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if (*alpha == 0.) { - if (upper) { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L30: */ - } -/* L40: */ - } - } - } else { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L50: */ - } -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L70: */ - } -/* L80: */ - } - } - } - return 0; - } - -/* Start the operations. */ - - if (lsame_(trans, "N")) { - -/* Form C := alpha*A*B' + alpha*B*A' + C. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L90: */ - } - } else if (*beta != 1.) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L100: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) { - temp1 = *alpha * b[j + l * b_dim1]; - temp2 = *alpha * a[j + l * a_dim1]; - i__3 = j; - for (i__ = 1; i__ <= i__3; ++i__) { - c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[ - i__ + l * a_dim1] * temp1 + b[i__ + l * - b_dim1] * temp2; -/* L110: */ - } - } -/* L120: */ - } -/* L130: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L140: */ - } - } else if (*beta != 1.) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L150: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) { - temp1 = *alpha * b[j + l * b_dim1]; - temp2 = *alpha * a[j + l * a_dim1]; - i__3 = *n; - for (i__ = j; i__ <= i__3; ++i__) { - c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[ - i__ + l * a_dim1] * temp1 + b[i__ + l * - b_dim1] * temp2; -/* L160: */ - } - } -/* L170: */ - } -/* L180: */ - } - } - } else { - -/* Form C := alpha*A'*B + alpha*B'*A + C. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - temp1 = 0.; - temp2 = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1]; - temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1]; -/* L190: */ - } - if (*beta == 0.) { - c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha * - temp2; - } else { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] - + *alpha * temp1 + *alpha * temp2; - } -/* L200: */ - } -/* L210: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - temp1 = 0.; - temp2 = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1]; - temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1]; -/* L220: */ - } - if (*beta == 0.) { - c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha * - temp2; - } else { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] - + *alpha * temp1 + *alpha * temp2; - } -/* L230: */ - } -/* L240: */ - } - } - } - - return 0; - -/* End of DSYR2K. */ - -} /* dsyr2k_ */ - -/* Subroutine */ int dsyrk_(char *uplo, char *trans, integer *n, integer *k, - doublereal *alpha, doublereal *a, integer *lda, doublereal *beta, - doublereal *c__, integer *ldc) -{ - /* System generated locals */ - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j, l, info; - static doublereal temp; - extern logical lsame_(char *, char *); - static integer nrowa; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - DSYRK performs one of the symmetric rank k operations - - C := alpha*A*A' + beta*C, - - or - - C := alpha*A'*A + beta*C, - - where alpha and beta are scalars, C is an n by n symmetric matrix - and A is an n by k matrix in the first case and a k by n matrix - in the second case. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the upper or lower - triangular part of the array C is to be referenced as - follows: - - UPLO = 'U' or 'u' Only the upper triangular part of C - is to be referenced. - - UPLO = 'L' or 'l' Only the lower triangular part of C - is to be referenced. - - Unchanged on exit. - - TRANS - CHARACTER*1. - On entry, TRANS specifies the operation to be performed as - follows: - - TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. - - TRANS = 'T' or 't' C := alpha*A'*A + beta*C. - - TRANS = 'C' or 'c' C := alpha*A'*A + beta*C. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix C. N must be - at least zero. - Unchanged on exit. - - K - INTEGER. - On entry with TRANS = 'N' or 'n', K specifies the number - of columns of the matrix A, and on entry with - TRANS = 'T' or 't' or 'C' or 'c', K specifies the number - of rows of the matrix A. K must be at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is - k when TRANS = 'N' or 'n', and is n otherwise. - Before entry with TRANS = 'N' or 'n', the leading n by k - part of the array A must contain the matrix A, otherwise - the leading k by n part of the array A must contain the - matrix A. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When TRANS = 'N' or 'n' - then LDA must be at least max( 1, n ), otherwise LDA must - be at least max( 1, k ). - Unchanged on exit. - - BETA - DOUBLE PRECISION. - On entry, BETA specifies the scalar beta. - Unchanged on exit. - - C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array C must contain the upper - triangular part of the symmetric matrix and the strictly - lower triangular part of C is not referenced. On exit, the - upper triangular part of the array C is overwritten by the - upper triangular part of the updated matrix. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array C must contain the lower - triangular part of the symmetric matrix and the strictly - upper triangular part of C is not referenced. On exit, the - lower triangular part of the array C is overwritten by the - lower triangular part of the updated matrix. - - LDC - INTEGER. - On entry, LDC specifies the first dimension of C as declared - in the calling (sub) program. LDC must be at least - max( 1, n ). - Unchanged on exit. - - - Level 3 Blas routine. - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - - /* Function Body */ - if (lsame_(trans, "N")) { - nrowa = *n; - } else { - nrowa = *k; - } - upper = lsame_(uplo, "U"); - - info = 0; - if ((! upper && ! lsame_(uplo, "L"))) { - info = 1; - } else if (((! lsame_(trans, "N") && ! lsame_(trans, - "T")) && ! lsame_(trans, "C"))) { - info = 2; - } else if (*n < 0) { - info = 3; - } else if (*k < 0) { - info = 4; - } else if (*lda < max(1,nrowa)) { - info = 7; - } else if (*ldc < max(1,*n)) { - info = 10; - } - if (info != 0) { - xerbla_("DSYRK ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if (*alpha == 0.) { - if (upper) { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L30: */ - } -/* L40: */ - } - } - } else { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L50: */ - } -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L70: */ - } -/* L80: */ - } - } - } - return 0; - } - -/* Start the operations. */ - - if (lsame_(trans, "N")) { - -/* Form C := alpha*A*A' + beta*C. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L90: */ - } - } else if (*beta != 1.) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L100: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - if (a[j + l * a_dim1] != 0.) { - temp = *alpha * a[j + l * a_dim1]; - i__3 = j; - for (i__ = 1; i__ <= i__3; ++i__) { - c__[i__ + j * c_dim1] += temp * a[i__ + l * - a_dim1]; -/* L110: */ - } - } -/* L120: */ - } -/* L130: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = 0.; -/* L140: */ - } - } else if (*beta != 1.) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; -/* L150: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - if (a[j + l * a_dim1] != 0.) { - temp = *alpha * a[j + l * a_dim1]; - i__3 = *n; - for (i__ = j; i__ <= i__3; ++i__) { - c__[i__ + j * c_dim1] += temp * a[i__ + l * - a_dim1]; -/* L160: */ - } - } -/* L170: */ - } -/* L180: */ - } - } - } else { - -/* Form C := alpha*A'*A + beta*C. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - temp += a[l + i__ * a_dim1] * a[l + j * a_dim1]; -/* L190: */ - } - if (*beta == 0.) { - c__[i__ + j * c_dim1] = *alpha * temp; - } else { - c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ - i__ + j * c_dim1]; - } -/* L200: */ - } -/* L210: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - temp = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - temp += a[l + i__ * a_dim1] * a[l + j * a_dim1]; -/* L220: */ - } - if (*beta == 0.) { - c__[i__ + j * c_dim1] = *alpha * temp; - } else { - c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ - i__ + j * c_dim1]; - } -/* L230: */ - } -/* L240: */ - } - } - } - - return 0; - -/* End of DSYRK . */ - -} /* dsyrk_ */ - -/* Subroutine */ int dtrmm_(char *side, char *uplo, char *transa, char *diag, - integer *m, integer *n, doublereal *alpha, doublereal *a, integer * - lda, doublereal *b, integer *ldb) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j, k, info; - static doublereal temp; - static logical lside; - extern logical lsame_(char *, char *); - static integer nrowa; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical nounit; - - -/* - Purpose - ======= - - DTRMM performs one of the matrix-matrix operations - - B := alpha*op( A )*B, or B := alpha*B*op( A ), - - where alpha is a scalar, B is an m by n matrix, A is a unit, or - non-unit, upper or lower triangular matrix and op( A ) is one of - - op( A ) = A or op( A ) = A'. - - Parameters - ========== - - SIDE - CHARACTER*1. - On entry, SIDE specifies whether op( A ) multiplies B from - the left or right as follows: - - SIDE = 'L' or 'l' B := alpha*op( A )*B. - - SIDE = 'R' or 'r' B := alpha*B*op( A ). - - Unchanged on exit. - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the matrix A is an upper or - lower triangular matrix as follows: - - UPLO = 'U' or 'u' A is an upper triangular matrix. - - UPLO = 'L' or 'l' A is a lower triangular matrix. - - Unchanged on exit. - - TRANSA - CHARACTER*1. - On entry, TRANSA specifies the form of op( A ) to be used in - the matrix multiplication as follows: - - TRANSA = 'N' or 'n' op( A ) = A. - - TRANSA = 'T' or 't' op( A ) = A'. - - TRANSA = 'C' or 'c' op( A ) = A'. - - Unchanged on exit. - - DIAG - CHARACTER*1. - On entry, DIAG specifies whether or not A is unit triangular - as follows: - - DIAG = 'U' or 'u' A is assumed to be unit triangular. - - DIAG = 'N' or 'n' A is not assumed to be unit - triangular. - - Unchanged on exit. - - M - INTEGER. - On entry, M specifies the number of rows of B. M must be at - least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of B. N must be - at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. When alpha is - zero then A is not referenced and B need not be set before - entry. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m - when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. - Before entry with UPLO = 'U' or 'u', the leading k by k - upper triangular part of the array A must contain the upper - triangular matrix and the strictly lower triangular part of - A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading k by k - lower triangular part of the array A must contain the lower - triangular matrix and the strictly upper triangular part of - A is not referenced. - Note that when DIAG = 'U' or 'u', the diagonal elements of - A are not referenced either, but are assumed to be unity. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When SIDE = 'L' or 'l' then - LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' - then LDA must be at least max( 1, n ). - Unchanged on exit. - - B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). - Before entry, the leading m by n part of the array B must - contain the matrix B, and on exit is overwritten by the - transformed matrix. - - LDB - INTEGER. - On entry, LDB specifies the first dimension of B as declared - in the calling (sub) program. LDB must be at least - max( 1, m ). - Unchanged on exit. - - - Level 3 Blas routine. - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - lside = lsame_(side, "L"); - if (lside) { - nrowa = *m; - } else { - nrowa = *n; - } - nounit = lsame_(diag, "N"); - upper = lsame_(uplo, "U"); - - info = 0; - if ((! lside && ! lsame_(side, "R"))) { - info = 1; - } else if ((! upper && ! lsame_(uplo, "L"))) { - info = 2; - } else if (((! lsame_(transa, "N") && ! lsame_( - transa, "T")) && ! lsame_(transa, "C"))) { - info = 3; - } else if ((! lsame_(diag, "U") && ! lsame_(diag, - "N"))) { - info = 4; - } else if (*m < 0) { - info = 5; - } else if (*n < 0) { - info = 6; - } else if (*lda < max(1,nrowa)) { - info = 9; - } else if (*ldb < max(1,*m)) { - info = 11; - } - if (info != 0) { - xerbla_("DTRMM ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if (*alpha == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = 0.; -/* L10: */ - } -/* L20: */ - } - return 0; - } - -/* Start the operations. */ - - if (lside) { - if (lsame_(transa, "N")) { - -/* Form B := alpha*A*B. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (k = 1; k <= i__2; ++k) { - if (b[k + j * b_dim1] != 0.) { - temp = *alpha * b[k + j * b_dim1]; - i__3 = k - 1; - for (i__ = 1; i__ <= i__3; ++i__) { - b[i__ + j * b_dim1] += temp * a[i__ + k * - a_dim1]; -/* L30: */ - } - if (nounit) { - temp *= a[k + k * a_dim1]; - } - b[k + j * b_dim1] = temp; - } -/* L40: */ - } -/* L50: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - for (k = *m; k >= 1; --k) { - if (b[k + j * b_dim1] != 0.) { - temp = *alpha * b[k + j * b_dim1]; - b[k + j * b_dim1] = temp; - if (nounit) { - b[k + j * b_dim1] *= a[k + k * a_dim1]; - } - i__2 = *m; - for (i__ = k + 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] += temp * a[i__ + k * - a_dim1]; -/* L60: */ - } - } -/* L70: */ - } -/* L80: */ - } - } - } else { - -/* Form B := alpha*A'*B. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - for (i__ = *m; i__ >= 1; --i__) { - temp = b[i__ + j * b_dim1]; - if (nounit) { - temp *= a[i__ + i__ * a_dim1]; - } - i__2 = i__ - 1; - for (k = 1; k <= i__2; ++k) { - temp += a[k + i__ * a_dim1] * b[k + j * b_dim1]; -/* L90: */ - } - b[i__ + j * b_dim1] = *alpha * temp; -/* L100: */ - } -/* L110: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = b[i__ + j * b_dim1]; - if (nounit) { - temp *= a[i__ + i__ * a_dim1]; - } - i__3 = *m; - for (k = i__ + 1; k <= i__3; ++k) { - temp += a[k + i__ * a_dim1] * b[k + j * b_dim1]; -/* L120: */ - } - b[i__ + j * b_dim1] = *alpha * temp; -/* L130: */ - } -/* L140: */ - } - } - } - } else { - if (lsame_(transa, "N")) { - -/* Form B := alpha*B*A. */ - - if (upper) { - for (j = *n; j >= 1; --j) { - temp = *alpha; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; -/* L150: */ - } - i__1 = j - 1; - for (k = 1; k <= i__1; ++k) { - if (a[k + j * a_dim1] != 0.) { - temp = *alpha * a[k + j * a_dim1]; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] += temp * b[i__ + k * - b_dim1]; -/* L160: */ - } - } -/* L170: */ - } -/* L180: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = *alpha; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; -/* L190: */ - } - i__2 = *n; - for (k = j + 1; k <= i__2; ++k) { - if (a[k + j * a_dim1] != 0.) { - temp = *alpha * a[k + j * a_dim1]; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - b[i__ + j * b_dim1] += temp * b[i__ + k * - b_dim1]; -/* L200: */ - } - } -/* L210: */ - } -/* L220: */ - } - } - } else { - -/* Form B := alpha*B*A'. */ - - if (upper) { - i__1 = *n; - for (k = 1; k <= i__1; ++k) { - i__2 = k - 1; - for (j = 1; j <= i__2; ++j) { - if (a[j + k * a_dim1] != 0.) { - temp = *alpha * a[j + k * a_dim1]; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - b[i__ + j * b_dim1] += temp * b[i__ + k * - b_dim1]; -/* L230: */ - } - } -/* L240: */ - } - temp = *alpha; - if (nounit) { - temp *= a[k + k * a_dim1]; - } - if (temp != 1.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; -/* L250: */ - } - } -/* L260: */ - } - } else { - for (k = *n; k >= 1; --k) { - i__1 = *n; - for (j = k + 1; j <= i__1; ++j) { - if (a[j + k * a_dim1] != 0.) { - temp = *alpha * a[j + k * a_dim1]; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] += temp * b[i__ + k * - b_dim1]; -/* L270: */ - } - } -/* L280: */ - } - temp = *alpha; - if (nounit) { - temp *= a[k + k * a_dim1]; - } - if (temp != 1.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; -/* L290: */ - } - } -/* L300: */ - } - } - } - } - - return 0; - -/* End of DTRMM . */ - -} /* dtrmm_ */ - -/* Subroutine */ int dtrmv_(char *uplo, char *trans, char *diag, integer *n, - doublereal *a, integer *lda, doublereal *x, integer *incx) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j, ix, jx, kx, info; - static doublereal temp; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical nounit; - - -/* - Purpose - ======= - - DTRMV performs one of the matrix-vector operations - - x := A*x, or x := A'*x, - - where x is an n element vector and A is an n by n unit, or non-unit, - upper or lower triangular matrix. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the matrix is an upper or - lower triangular matrix as follows: - - UPLO = 'U' or 'u' A is an upper triangular matrix. - - UPLO = 'L' or 'l' A is a lower triangular matrix. - - Unchanged on exit. - - TRANS - CHARACTER*1. - On entry, TRANS specifies the operation to be performed as - follows: - - TRANS = 'N' or 'n' x := A*x. - - TRANS = 'T' or 't' x := A'*x. - - TRANS = 'C' or 'c' x := A'*x. - - Unchanged on exit. - - DIAG - CHARACTER*1. - On entry, DIAG specifies whether or not A is unit - triangular as follows: - - DIAG = 'U' or 'u' A is assumed to be unit triangular. - - DIAG = 'N' or 'n' A is not assumed to be unit - triangular. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix A. - N must be at least zero. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array A must contain the upper - triangular matrix and the strictly lower triangular part of - A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array A must contain the lower - triangular matrix and the strictly upper triangular part of - A is not referenced. - Note that when DIAG = 'U' or 'u', the diagonal elements of - A are not referenced either, but are assumed to be unity. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, n ). - Unchanged on exit. - - X - DOUBLE PRECISION array of dimension at least - ( 1 + ( n - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the n - element vector x. On exit, X is overwritten with the - tranformed vector x. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --x; - - /* Function Body */ - info = 0; - if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) { - info = 1; - } else if (((! lsame_(trans, "N") && ! lsame_(trans, - "T")) && ! lsame_(trans, "C"))) { - info = 2; - } else if ((! lsame_(diag, "U") && ! lsame_(diag, - "N"))) { - info = 3; - } else if (*n < 0) { - info = 4; - } else if (*lda < max(1,*n)) { - info = 6; - } else if (*incx == 0) { - info = 8; - } - if (info != 0) { - xerbla_("DTRMV ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - - nounit = lsame_(diag, "N"); - -/* - Set up the start point in X if the increment is not unity. This - will be ( N - 1 )*INCX too small for descending loops. -*/ - - if (*incx <= 0) { - kx = 1 - (*n - 1) * *incx; - } else if (*incx != 1) { - kx = 1; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. -*/ - - if (lsame_(trans, "N")) { - -/* Form x := A*x. */ - - if (lsame_(uplo, "U")) { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[j] != 0.) { - temp = x[j]; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - x[i__] += temp * a[i__ + j * a_dim1]; -/* L10: */ - } - if (nounit) { - x[j] *= a[j + j * a_dim1]; - } - } -/* L20: */ - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[jx] != 0.) { - temp = x[jx]; - ix = kx; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - x[ix] += temp * a[i__ + j * a_dim1]; - ix += *incx; -/* L30: */ - } - if (nounit) { - x[jx] *= a[j + j * a_dim1]; - } - } - jx += *incx; -/* L40: */ - } - } - } else { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - if (x[j] != 0.) { - temp = x[j]; - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - x[i__] += temp * a[i__ + j * a_dim1]; -/* L50: */ - } - if (nounit) { - x[j] *= a[j + j * a_dim1]; - } - } -/* L60: */ - } - } else { - kx += (*n - 1) * *incx; - jx = kx; - for (j = *n; j >= 1; --j) { - if (x[jx] != 0.) { - temp = x[jx]; - ix = kx; - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - x[ix] += temp * a[i__ + j * a_dim1]; - ix -= *incx; -/* L70: */ - } - if (nounit) { - x[jx] *= a[j + j * a_dim1]; - } - } - jx -= *incx; -/* L80: */ - } - } - } - } else { - -/* Form x := A'*x. */ - - if (lsame_(uplo, "U")) { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - temp = x[j]; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - for (i__ = j - 1; i__ >= 1; --i__) { - temp += a[i__ + j * a_dim1] * x[i__]; -/* L90: */ - } - x[j] = temp; -/* L100: */ - } - } else { - jx = kx + (*n - 1) * *incx; - for (j = *n; j >= 1; --j) { - temp = x[jx]; - ix = jx; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - for (i__ = j - 1; i__ >= 1; --i__) { - ix -= *incx; - temp += a[i__ + j * a_dim1] * x[ix]; -/* L110: */ - } - x[jx] = temp; - jx -= *incx; -/* L120: */ - } - } - } else { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = x[j]; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - temp += a[i__ + j * a_dim1] * x[i__]; -/* L130: */ - } - x[j] = temp; -/* L140: */ - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = x[jx]; - ix = jx; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - temp += a[i__ + j * a_dim1] * x[ix]; -/* L150: */ - } - x[jx] = temp; - jx += *incx; -/* L160: */ - } - } - } - } - - return 0; - -/* End of DTRMV . */ - -} /* dtrmv_ */ - -/* Subroutine */ int dtrsm_(char *side, char *uplo, char *transa, char *diag, - integer *m, integer *n, doublereal *alpha, doublereal *a, integer * - lda, doublereal *b, integer *ldb) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j, k, info; - static doublereal temp; - static logical lside; - extern logical lsame_(char *, char *); - static integer nrowa; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical nounit; - - -/* - Purpose - ======= - - DTRSM solves one of the matrix equations - - op( A )*X = alpha*B, or X*op( A ) = alpha*B, - - where alpha is a scalar, X and B are m by n matrices, A is a unit, or - non-unit, upper or lower triangular matrix and op( A ) is one of - - op( A ) = A or op( A ) = A'. - - The matrix X is overwritten on B. - - Parameters - ========== - - SIDE - CHARACTER*1. - On entry, SIDE specifies whether op( A ) appears on the left - or right of X as follows: - - SIDE = 'L' or 'l' op( A )*X = alpha*B. - - SIDE = 'R' or 'r' X*op( A ) = alpha*B. - - Unchanged on exit. - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the matrix A is an upper or - lower triangular matrix as follows: - - UPLO = 'U' or 'u' A is an upper triangular matrix. - - UPLO = 'L' or 'l' A is a lower triangular matrix. - - Unchanged on exit. - - TRANSA - CHARACTER*1. - On entry, TRANSA specifies the form of op( A ) to be used in - the matrix multiplication as follows: - - TRANSA = 'N' or 'n' op( A ) = A. - - TRANSA = 'T' or 't' op( A ) = A'. - - TRANSA = 'C' or 'c' op( A ) = A'. - - Unchanged on exit. - - DIAG - CHARACTER*1. - On entry, DIAG specifies whether or not A is unit triangular - as follows: - - DIAG = 'U' or 'u' A is assumed to be unit triangular. - - DIAG = 'N' or 'n' A is not assumed to be unit - triangular. - - Unchanged on exit. - - M - INTEGER. - On entry, M specifies the number of rows of B. M must be at - least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of B. N must be - at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION. - On entry, ALPHA specifies the scalar alpha. When alpha is - zero then A is not referenced and B need not be set before - entry. - Unchanged on exit. - - A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m - when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. - Before entry with UPLO = 'U' or 'u', the leading k by k - upper triangular part of the array A must contain the upper - triangular matrix and the strictly lower triangular part of - A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading k by k - lower triangular part of the array A must contain the lower - triangular matrix and the strictly upper triangular part of - A is not referenced. - Note that when DIAG = 'U' or 'u', the diagonal elements of - A are not referenced either, but are assumed to be unity. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When SIDE = 'L' or 'l' then - LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' - then LDA must be at least max( 1, n ). - Unchanged on exit. - - B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). - Before entry, the leading m by n part of the array B must - contain the right-hand side matrix B, and on exit is - overwritten by the solution matrix X. - - LDB - INTEGER. - On entry, LDB specifies the first dimension of B as declared - in the calling (sub) program. LDB must be at least - max( 1, m ). - Unchanged on exit. - - - Level 3 Blas routine. - - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - lside = lsame_(side, "L"); - if (lside) { - nrowa = *m; - } else { - nrowa = *n; - } - nounit = lsame_(diag, "N"); - upper = lsame_(uplo, "U"); - - info = 0; - if ((! lside && ! lsame_(side, "R"))) { - info = 1; - } else if ((! upper && ! lsame_(uplo, "L"))) { - info = 2; - } else if (((! lsame_(transa, "N") && ! lsame_( - transa, "T")) && ! lsame_(transa, "C"))) { - info = 3; - } else if ((! lsame_(diag, "U") && ! lsame_(diag, - "N"))) { - info = 4; - } else if (*m < 0) { - info = 5; - } else if (*n < 0) { - info = 6; - } else if (*lda < max(1,nrowa)) { - info = 9; - } else if (*ldb < max(1,*m)) { - info = 11; - } - if (info != 0) { - xerbla_("DTRSM ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if (*alpha == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = 0.; -/* L10: */ - } -/* L20: */ - } - return 0; - } - -/* Start the operations. */ - - if (lside) { - if (lsame_(transa, "N")) { - -/* Form B := alpha*inv( A )*B. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*alpha != 1.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] - ; -/* L30: */ - } - } - for (k = *m; k >= 1; --k) { - if (b[k + j * b_dim1] != 0.) { - if (nounit) { - b[k + j * b_dim1] /= a[k + k * a_dim1]; - } - i__2 = k - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[ - i__ + k * a_dim1]; -/* L40: */ - } - } -/* L50: */ - } -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*alpha != 1.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] - ; -/* L70: */ - } - } - i__2 = *m; - for (k = 1; k <= i__2; ++k) { - if (b[k + j * b_dim1] != 0.) { - if (nounit) { - b[k + j * b_dim1] /= a[k + k * a_dim1]; - } - i__3 = *m; - for (i__ = k + 1; i__ <= i__3; ++i__) { - b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[ - i__ + k * a_dim1]; -/* L80: */ - } - } -/* L90: */ - } -/* L100: */ - } - } - } else { - -/* Form B := alpha*inv( A' )*B. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = *alpha * b[i__ + j * b_dim1]; - i__3 = i__ - 1; - for (k = 1; k <= i__3; ++k) { - temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1]; -/* L110: */ - } - if (nounit) { - temp /= a[i__ + i__ * a_dim1]; - } - b[i__ + j * b_dim1] = temp; -/* L120: */ - } -/* L130: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - for (i__ = *m; i__ >= 1; --i__) { - temp = *alpha * b[i__ + j * b_dim1]; - i__2 = *m; - for (k = i__ + 1; k <= i__2; ++k) { - temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1]; -/* L140: */ - } - if (nounit) { - temp /= a[i__ + i__ * a_dim1]; - } - b[i__ + j * b_dim1] = temp; -/* L150: */ - } -/* L160: */ - } - } - } - } else { - if (lsame_(transa, "N")) { - -/* Form B := alpha*B*inv( A ). */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*alpha != 1.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] - ; -/* L170: */ - } - } - i__2 = j - 1; - for (k = 1; k <= i__2; ++k) { - if (a[k + j * a_dim1] != 0.) { - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[ - i__ + k * b_dim1]; -/* L180: */ - } - } -/* L190: */ - } - if (nounit) { - temp = 1. / a[j + j * a_dim1]; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; -/* L200: */ - } - } -/* L210: */ - } - } else { - for (j = *n; j >= 1; --j) { - if (*alpha != 1.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] - ; -/* L220: */ - } - } - i__1 = *n; - for (k = j + 1; k <= i__1; ++k) { - if (a[k + j * a_dim1] != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[ - i__ + k * b_dim1]; -/* L230: */ - } - } -/* L240: */ - } - if (nounit) { - temp = 1. / a[j + j * a_dim1]; - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; -/* L250: */ - } - } -/* L260: */ - } - } - } else { - -/* Form B := alpha*B*inv( A' ). */ - - if (upper) { - for (k = *n; k >= 1; --k) { - if (nounit) { - temp = 1. / a[k + k * a_dim1]; - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; -/* L270: */ - } - } - i__1 = k - 1; - for (j = 1; j <= i__1; ++j) { - if (a[j + k * a_dim1] != 0.) { - temp = a[j + k * a_dim1]; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] -= temp * b[i__ + k * - b_dim1]; -/* L280: */ - } - } -/* L290: */ - } - if (*alpha != 1.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1] - ; -/* L300: */ - } - } -/* L310: */ - } - } else { - i__1 = *n; - for (k = 1; k <= i__1; ++k) { - if (nounit) { - temp = 1. / a[k + k * a_dim1]; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; -/* L320: */ - } - } - i__2 = *n; - for (j = k + 1; j <= i__2; ++j) { - if (a[j + k * a_dim1] != 0.) { - temp = a[j + k * a_dim1]; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - b[i__ + j * b_dim1] -= temp * b[i__ + k * - b_dim1]; -/* L330: */ - } - } -/* L340: */ - } - if (*alpha != 1.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1] - ; -/* L350: */ - } - } -/* L360: */ - } - } - } - } - - return 0; - -/* End of DTRSM . */ - -} /* dtrsm_ */ - -doublereal dzasum_(integer *n, doublecomplex *zx, integer *incx) -{ - /* System generated locals */ - integer i__1; - doublereal ret_val; - - /* Local variables */ - static integer i__, ix; - static doublereal stemp; - extern doublereal dcabs1_(doublecomplex *); - - -/* - takes the sum of the absolute values. - jack dongarra, 3/11/78. - modified 3/93 to return if incx .le. 0. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --zx; - - /* Function Body */ - ret_val = 0.; - stemp = 0.; - if (*n <= 0 || *incx <= 0) { - return ret_val; - } - if (*incx == 1) { - goto L20; - } - -/* code for increment not equal to 1 */ - - ix = 1; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - stemp += dcabs1_(&zx[ix]); - ix += *incx; -/* L10: */ - } - ret_val = stemp; - return ret_val; - -/* code for increment equal to 1 */ - -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - stemp += dcabs1_(&zx[i__]); -/* L30: */ - } - ret_val = stemp; - return ret_val; -} /* dzasum_ */ - -doublereal dznrm2_(integer *n, doublecomplex *x, integer *incx) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - doublereal ret_val, d__1; - - /* Builtin functions */ - double d_imag(doublecomplex *), sqrt(doublereal); - - /* Local variables */ - static integer ix; - static doublereal ssq, temp, norm, scale; - - -/* - DZNRM2 returns the euclidean norm of a vector via the function - name, so that - - DZNRM2 := sqrt( conjg( x' )*x ) - - - -- This version written on 25-October-1982. - Modified on 14-October-1993 to inline the call to ZLASSQ. - Sven Hammarling, Nag Ltd. -*/ - - - /* Parameter adjustments */ - --x; - - /* Function Body */ - if (*n < 1 || *incx < 1) { - norm = 0.; - } else { - scale = 0.; - ssq = 1.; -/* - The following loop is equivalent to this call to the LAPACK - auxiliary routine: - CALL ZLASSQ( N, X, INCX, SCALE, SSQ ) -*/ - - i__1 = (*n - 1) * *incx + 1; - i__2 = *incx; - for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) { - i__3 = ix; - if (x[i__3].r != 0.) { - i__3 = ix; - temp = (d__1 = x[i__3].r, abs(d__1)); - if (scale < temp) { -/* Computing 2nd power */ - d__1 = scale / temp; - ssq = ssq * (d__1 * d__1) + 1.; - scale = temp; - } else { -/* Computing 2nd power */ - d__1 = temp / scale; - ssq += d__1 * d__1; - } - } - if (d_imag(&x[ix]) != 0.) { - temp = (d__1 = d_imag(&x[ix]), abs(d__1)); - if (scale < temp) { -/* Computing 2nd power */ - d__1 = scale / temp; - ssq = ssq * (d__1 * d__1) + 1.; - scale = temp; - } else { -/* Computing 2nd power */ - d__1 = temp / scale; - ssq += d__1 * d__1; - } - } -/* L10: */ - } - norm = scale * sqrt(ssq); - } - - ret_val = norm; - return ret_val; - -/* End of DZNRM2. */ - -} /* dznrm2_ */ - -integer idamax_(integer *n, doublereal *dx, integer *incx) -{ - /* System generated locals */ - integer ret_val, i__1; - doublereal d__1; - - /* Local variables */ - static integer i__, ix; - static doublereal dmax__; - - -/* - finds the index of element having max. absolute value. - jack dongarra, linpack, 3/11/78. - modified 3/93 to return if incx .le. 0. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --dx; - - /* Function Body */ - ret_val = 0; - if (*n < 1 || *incx <= 0) { - return ret_val; - } - ret_val = 1; - if (*n == 1) { - return ret_val; - } - if (*incx == 1) { - goto L20; - } - -/* code for increment not equal to 1 */ - - ix = 1; - dmax__ = abs(dx[1]); - ix += *incx; - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - if ((d__1 = dx[ix], abs(d__1)) <= dmax__) { - goto L5; - } - ret_val = i__; - dmax__ = (d__1 = dx[ix], abs(d__1)); -L5: - ix += *incx; -/* L10: */ - } - return ret_val; - -/* code for increment equal to 1 */ - -L20: - dmax__ = abs(dx[1]); - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - if ((d__1 = dx[i__], abs(d__1)) <= dmax__) { - goto L30; - } - ret_val = i__; - dmax__ = (d__1 = dx[i__], abs(d__1)); -L30: - ; - } - return ret_val; -} /* idamax_ */ - -integer izamax_(integer *n, doublecomplex *zx, integer *incx) -{ - /* System generated locals */ - integer ret_val, i__1; - - /* Local variables */ - static integer i__, ix; - static doublereal smax; - extern doublereal dcabs1_(doublecomplex *); - - -/* - finds the index of element having max. absolute value. - jack dongarra, 1/15/85. - modified 3/93 to return if incx .le. 0. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --zx; - - /* Function Body */ - ret_val = 0; - if (*n < 1 || *incx <= 0) { - return ret_val; - } - ret_val = 1; - if (*n == 1) { - return ret_val; - } - if (*incx == 1) { - goto L20; - } - -/* code for increment not equal to 1 */ - - ix = 1; - smax = dcabs1_(&zx[1]); - ix += *incx; - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - if (dcabs1_(&zx[ix]) <= smax) { - goto L5; - } - ret_val = i__; - smax = dcabs1_(&zx[ix]); -L5: - ix += *incx; -/* L10: */ - } - return ret_val; - -/* code for increment equal to 1 */ - -L20: - smax = dcabs1_(&zx[1]); - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - if (dcabs1_(&zx[i__]) <= smax) { - goto L30; - } - ret_val = i__; - smax = dcabs1_(&zx[i__]); -L30: - ; - } - return ret_val; -} /* izamax_ */ - -logical lsame_(char *ca, char *cb) -{ - /* System generated locals */ - logical ret_val; - - /* Local variables */ - static integer inta, intb, zcode; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - LSAME returns .TRUE. if CA is the same letter as CB regardless of - case. - - Arguments - ========= - - CA (input) CHARACTER*1 - CB (input) CHARACTER*1 - CA and CB specify the single characters to be compared. - - ===================================================================== - - - Test if the characters are equal -*/ - - ret_val = *(unsigned char *)ca == *(unsigned char *)cb; - if (ret_val) { - return ret_val; - } - -/* Now test for equivalence if both characters are alphabetic. */ - - zcode = 'Z'; - -/* - Use 'Z' rather than 'A' so that ASCII can be detected on Prime - machines, on which ICHAR returns a value with bit 8 set. - ICHAR('A') on Prime machines returns 193 which is the same as - ICHAR('A') on an EBCDIC machine. -*/ - - inta = *(unsigned char *)ca; - intb = *(unsigned char *)cb; - - if (zcode == 90 || zcode == 122) { - -/* - ASCII is assumed - ZCODE is the ASCII code of either lower or - upper case 'Z'. -*/ - - if ((inta >= 97 && inta <= 122)) { - inta += -32; - } - if ((intb >= 97 && intb <= 122)) { - intb += -32; - } - - } else if (zcode == 233 || zcode == 169) { - -/* - EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or - upper case 'Z'. -*/ - - if ((inta >= 129 && inta <= 137) || (inta >= 145 && inta <= 153) || ( - inta >= 162 && inta <= 169)) { - inta += 64; - } - if ((intb >= 129 && intb <= 137) || (intb >= 145 && intb <= 153) || ( - intb >= 162 && intb <= 169)) { - intb += 64; - } - - } else if (zcode == 218 || zcode == 250) { - -/* - ASCII is assumed, on Prime machines - ZCODE is the ASCII code - plus 128 of either lower or upper case 'Z'. -*/ - - if ((inta >= 225 && inta <= 250)) { - inta += -32; - } - if ((intb >= 225 && intb <= 250)) { - intb += -32; - } - } - ret_val = inta == intb; - -/* - RETURN - - End of LSAME -*/ - - return ret_val; -} /* lsame_ */ - -/* Using xerbla_ from pythonxerbla.c */ -/* Subroutine */ int xerbla_DISABLE(char *srname, integer *info) -{ - /* Format strings */ - static char fmt_9999[] = "(\002 ** On entry to \002,a6,\002 parameter nu" - "mber \002,i2,\002 had \002,\002an illegal value\002)"; - - /* Builtin functions */ - integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void); - /* Subroutine */ int s_stop(char *, ftnlen); - - /* Fortran I/O blocks */ - static cilist io___147 = { 0, 6, 0, fmt_9999, 0 }; - - -/* - -- LAPACK auxiliary routine (preliminary version) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - XERBLA is an error handler for the LAPACK routines. - It is called by an LAPACK routine if an input parameter has an - invalid value. A message is printed and execution stops. - - Installers may consider modifying the STOP statement in order to - call system-specific exception-handling facilities. - - Arguments - ========= - - SRNAME (input) CHARACTER*6 - The name of the routine which called XERBLA. - - INFO (input) INTEGER - The position of the invalid parameter in the parameter list - of the calling routine. -*/ - - - s_wsfe(&io___147); - do_fio(&c__1, srname, (ftnlen)6); - do_fio(&c__1, (char *)&(*info), (ftnlen)sizeof(integer)); - e_wsfe(); - - s_stop("", (ftnlen)0); - - -/* End of XERBLA */ - - return 0; -} /* xerbla_ */ - -/* Subroutine */ int zaxpy_(integer *n, doublecomplex *za, doublecomplex *zx, - integer *incx, doublecomplex *zy, integer *incy) -{ - /* System generated locals */ - integer i__1, i__2, i__3, i__4; - doublecomplex z__1, z__2; - - /* Local variables */ - static integer i__, ix, iy; - extern doublereal dcabs1_(doublecomplex *); - - -/* - constant times a vector plus a vector. - jack dongarra, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - /* Parameter adjustments */ - --zy; - --zx; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - if (dcabs1_(za) == 0.) { - return 0; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments - not equal to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = iy; - i__3 = iy; - i__4 = ix; - z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[ - i__4].i + za->i * zx[i__4].r; - z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i; - zy[i__2].r = z__1.r, zy[i__2].i = z__1.i; - ix += *incx; - iy += *incy; -/* L10: */ - } - return 0; - -/* code for both increments equal to 1 */ - -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - i__3 = i__; - i__4 = i__; - z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[ - i__4].i + za->i * zx[i__4].r; - z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i; - zy[i__2].r = z__1.r, zy[i__2].i = z__1.i; -/* L30: */ - } - return 0; -} /* zaxpy_ */ - -/* Subroutine */ int zcopy_(integer *n, doublecomplex *zx, integer *incx, - doublecomplex *zy, integer *incy) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - - /* Local variables */ - static integer i__, ix, iy; - - -/* - copies a vector, x, to a vector, y. - jack dongarra, linpack, 4/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --zy; - --zx; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments - not equal to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = iy; - i__3 = ix; - zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i; - ix += *incx; - iy += *incy; -/* L10: */ - } - return 0; - -/* code for both increments equal to 1 */ - -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - i__3 = i__; - zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i; -/* L30: */ - } - return 0; -} /* zcopy_ */ - -/* Double Complex */ VOID zdotc_(doublecomplex * ret_val, integer *n, - doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy) -{ - /* System generated locals */ - integer i__1, i__2; - doublecomplex z__1, z__2, z__3; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, ix, iy; - static doublecomplex ztemp; - - -/* - forms the dot product of a vector. - jack dongarra, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - /* Parameter adjustments */ - --zy; - --zx; - - /* Function Body */ - ztemp.r = 0., ztemp.i = 0.; - ret_val->r = 0., ret_val->i = 0.; - if (*n <= 0) { - return ; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments - not equal to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - d_cnjg(&z__3, &zx[ix]); - i__2 = iy; - z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r * - zy[i__2].i + z__3.i * zy[i__2].r; - z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i; - ztemp.r = z__1.r, ztemp.i = z__1.i; - ix += *incx; - iy += *incy; -/* L10: */ - } - ret_val->r = ztemp.r, ret_val->i = ztemp.i; - return ; - -/* code for both increments equal to 1 */ - -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - d_cnjg(&z__3, &zx[i__]); - i__2 = i__; - z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r * - zy[i__2].i + z__3.i * zy[i__2].r; - z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i; - ztemp.r = z__1.r, ztemp.i = z__1.i; -/* L30: */ - } - ret_val->r = ztemp.r, ret_val->i = ztemp.i; - return ; -} /* zdotc_ */ - -/* Double Complex */ VOID zdotu_(doublecomplex * ret_val, integer *n, - doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - doublecomplex z__1, z__2; - - /* Local variables */ - static integer i__, ix, iy; - static doublecomplex ztemp; - - -/* - forms the dot product of two vectors. - jack dongarra, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - /* Parameter adjustments */ - --zy; - --zx; - - /* Function Body */ - ztemp.r = 0., ztemp.i = 0.; - ret_val->r = 0., ret_val->i = 0.; - if (*n <= 0) { - return ; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments - not equal to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = ix; - i__3 = iy; - z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i = - zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r; - z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i; - ztemp.r = z__1.r, ztemp.i = z__1.i; - ix += *incx; - iy += *incy; -/* L10: */ - } - ret_val->r = ztemp.r, ret_val->i = ztemp.i; - return ; - -/* code for both increments equal to 1 */ - -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - i__3 = i__; - z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i = - zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r; - z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i; - ztemp.r = z__1.r, ztemp.i = z__1.i; -/* L30: */ - } - ret_val->r = ztemp.r, ret_val->i = ztemp.i; - return ; -} /* zdotu_ */ - -/* Subroutine */ int zdscal_(integer *n, doublereal *da, doublecomplex *zx, - integer *incx) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - doublecomplex z__1, z__2; - - /* Local variables */ - static integer i__, ix; - - -/* - scales a vector by a constant. - jack dongarra, 3/11/78. - modified 3/93 to return if incx .le. 0. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --zx; - - /* Function Body */ - if (*n <= 0 || *incx <= 0) { - return 0; - } - if (*incx == 1) { - goto L20; - } - -/* code for increment not equal to 1 */ - - ix = 1; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = ix; - z__2.r = *da, z__2.i = 0.; - i__3 = ix; - z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r * - zx[i__3].i + z__2.i * zx[i__3].r; - zx[i__2].r = z__1.r, zx[i__2].i = z__1.i; - ix += *incx; -/* L10: */ - } - return 0; - -/* code for increment equal to 1 */ - -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - z__2.r = *da, z__2.i = 0.; - i__3 = i__; - z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r * - zx[i__3].i + z__2.i * zx[i__3].r; - zx[i__2].r = z__1.r, zx[i__2].i = z__1.i; -/* L30: */ - } - return 0; -} /* zdscal_ */ - -/* Subroutine */ int zgemm_(char *transa, char *transb, integer *m, integer * - n, integer *k, doublecomplex *alpha, doublecomplex *a, integer *lda, - doublecomplex *b, integer *ldb, doublecomplex *beta, doublecomplex * - c__, integer *ldc) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, - i__3, i__4, i__5, i__6; - doublecomplex z__1, z__2, z__3, z__4; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, l, info; - static logical nota, notb; - static doublecomplex temp; - static logical conja, conjb; - static integer ncola; - extern logical lsame_(char *, char *); - static integer nrowa, nrowb; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - ZGEMM performs one of the matrix-matrix operations - - C := alpha*op( A )*op( B ) + beta*C, - - where op( X ) is one of - - op( X ) = X or op( X ) = X' or op( X ) = conjg( X' ), - - alpha and beta are scalars, and A, B and C are matrices, with op( A ) - an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. - - Parameters - ========== - - TRANSA - CHARACTER*1. - On entry, TRANSA specifies the form of op( A ) to be used in - the matrix multiplication as follows: - - TRANSA = 'N' or 'n', op( A ) = A. - - TRANSA = 'T' or 't', op( A ) = A'. - - TRANSA = 'C' or 'c', op( A ) = conjg( A' ). - - Unchanged on exit. - - TRANSB - CHARACTER*1. - On entry, TRANSB specifies the form of op( B ) to be used in - the matrix multiplication as follows: - - TRANSB = 'N' or 'n', op( B ) = B. - - TRANSB = 'T' or 't', op( B ) = B'. - - TRANSB = 'C' or 'c', op( B ) = conjg( B' ). - - Unchanged on exit. - - M - INTEGER. - On entry, M specifies the number of rows of the matrix - op( A ) and of the matrix C. M must be at least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of the matrix - op( B ) and the number of columns of the matrix C. N must be - at least zero. - Unchanged on exit. - - K - INTEGER. - On entry, K specifies the number of columns of the matrix - op( A ) and the number of rows of the matrix op( B ). K must - be at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is - k when TRANSA = 'N' or 'n', and is m otherwise. - Before entry with TRANSA = 'N' or 'n', the leading m by k - part of the array A must contain the matrix A, otherwise - the leading k by m part of the array A must contain the - matrix A. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When TRANSA = 'N' or 'n' then - LDA must be at least max( 1, m ), otherwise LDA must be at - least max( 1, k ). - Unchanged on exit. - - B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is - n when TRANSB = 'N' or 'n', and is k otherwise. - Before entry with TRANSB = 'N' or 'n', the leading k by n - part of the array B must contain the matrix B, otherwise - the leading n by k part of the array B must contain the - matrix B. - Unchanged on exit. - - LDB - INTEGER. - On entry, LDB specifies the first dimension of B as declared - in the calling (sub) program. When TRANSB = 'N' or 'n' then - LDB must be at least max( 1, k ), otherwise LDB must be at - least max( 1, n ). - Unchanged on exit. - - BETA - COMPLEX*16 . - On entry, BETA specifies the scalar beta. When BETA is - supplied as zero then C need not be set on input. - Unchanged on exit. - - C - COMPLEX*16 array of DIMENSION ( LDC, n ). - Before entry, the leading m by n part of the array C must - contain the matrix C, except when beta is zero, in which - case C need not be set on entry. - On exit, the array C is overwritten by the m by n matrix - ( alpha*op( A )*op( B ) + beta*C ). - - LDC - INTEGER. - On entry, LDC specifies the first dimension of C as declared - in the calling (sub) program. LDC must be at least - max( 1, m ). - Unchanged on exit. - - - Level 3 Blas routine. - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - - Set NOTA and NOTB as true if A and B respectively are not - conjugated or transposed, set CONJA and CONJB as true if A and - B respectively are to be transposed but not conjugated and set - NROWA, NCOLA and NROWB as the number of rows and columns of A - and the number of rows of B respectively. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - - /* Function Body */ - nota = lsame_(transa, "N"); - notb = lsame_(transb, "N"); - conja = lsame_(transa, "C"); - conjb = lsame_(transb, "C"); - if (nota) { - nrowa = *m; - ncola = *k; - } else { - nrowa = *k; - ncola = *m; - } - if (notb) { - nrowb = *k; - } else { - nrowb = *n; - } - -/* Test the input parameters. */ - - info = 0; - if (((! nota && ! conja) && ! lsame_(transa, "T"))) - { - info = 1; - } else if (((! notb && ! conjb) && ! lsame_(transb, "T"))) { - info = 2; - } else if (*m < 0) { - info = 3; - } else if (*n < 0) { - info = 4; - } else if (*k < 0) { - info = 5; - } else if (*lda < max(1,nrowa)) { - info = 8; - } else if (*ldb < max(1,nrowb)) { - info = 10; - } else if (*ldc < max(1,*m)) { - info = 13; - } - if (info != 0) { - xerbla_("ZGEMM ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0 || (((alpha->r == 0. && alpha->i == 0.) || *k == 0) - && ((beta->r == 1. && beta->i == 0.)))) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if ((alpha->r == 0. && alpha->i == 0.)) { - if ((beta->r == 0. && beta->i == 0.)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i, - z__1.i = beta->r * c__[i__4].i + beta->i * c__[ - i__4].r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L30: */ - } -/* L40: */ - } - } - return 0; - } - -/* Start the operations. */ - - if (notb) { - if (nota) { - -/* Form C := alpha*A*B + beta*C. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if ((beta->r == 0. && beta->i == 0.)) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L50: */ - } - } else if (beta->r != 1. || beta->i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__1.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L60: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - i__3 = l + j * b_dim1; - if (b[i__3].r != 0. || b[i__3].i != 0.) { - i__3 = l + j * b_dim1; - z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i, - z__1.i = alpha->r * b[i__3].i + alpha->i * b[ - i__3].r; - temp.r = z__1.r, temp.i = z__1.i; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * c_dim1; - i__6 = i__ + l * a_dim1; - z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i, - z__2.i = temp.r * a[i__6].i + temp.i * a[ - i__6].r; - z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5] - .i + z__2.i; - c__[i__4].r = z__1.r, c__[i__4].i = z__1.i; -/* L70: */ - } - } -/* L80: */ - } -/* L90: */ - } - } else if (conja) { - -/* Form C := alpha*conjg( A' )*B + beta*C. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp.r = 0., temp.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - d_cnjg(&z__3, &a[l + i__ * a_dim1]); - i__4 = l + j * b_dim1; - z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i, - z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4] - .r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L100: */ - } - if ((beta->r == 0. && beta->i == 0.)) { - i__3 = i__ + j * c_dim1; - z__1.r = alpha->r * temp.r - alpha->i * temp.i, - z__1.i = alpha->r * temp.i + alpha->i * - temp.r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - z__2.r = alpha->r * temp.r - alpha->i * temp.i, - z__2.i = alpha->r * temp.i + alpha->i * - temp.r; - i__4 = i__ + j * c_dim1; - z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__3.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } -/* L110: */ - } -/* L120: */ - } - } else { - -/* Form C := alpha*A'*B + beta*C */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp.r = 0., temp.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - i__4 = l + i__ * a_dim1; - i__5 = l + j * b_dim1; - z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5] - .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4] - .i * b[i__5].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L130: */ - } - if ((beta->r == 0. && beta->i == 0.)) { - i__3 = i__ + j * c_dim1; - z__1.r = alpha->r * temp.r - alpha->i * temp.i, - z__1.i = alpha->r * temp.i + alpha->i * - temp.r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - z__2.r = alpha->r * temp.r - alpha->i * temp.i, - z__2.i = alpha->r * temp.i + alpha->i * - temp.r; - i__4 = i__ + j * c_dim1; - z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__3.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } -/* L140: */ - } -/* L150: */ - } - } - } else if (nota) { - if (conjb) { - -/* Form C := alpha*A*conjg( B' ) + beta*C. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if ((beta->r == 0. && beta->i == 0.)) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L160: */ - } - } else if (beta->r != 1. || beta->i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__1.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L170: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - i__3 = j + l * b_dim1; - if (b[i__3].r != 0. || b[i__3].i != 0.) { - d_cnjg(&z__2, &b[j + l * b_dim1]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, - z__1.i = alpha->r * z__2.i + alpha->i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * c_dim1; - i__6 = i__ + l * a_dim1; - z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i, - z__2.i = temp.r * a[i__6].i + temp.i * a[ - i__6].r; - z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5] - .i + z__2.i; - c__[i__4].r = z__1.r, c__[i__4].i = z__1.i; -/* L180: */ - } - } -/* L190: */ - } -/* L200: */ - } - } else { - -/* Form C := alpha*A*B' + beta*C */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if ((beta->r == 0. && beta->i == 0.)) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L210: */ - } - } else if (beta->r != 1. || beta->i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__1.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L220: */ - } - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - i__3 = j + l * b_dim1; - if (b[i__3].r != 0. || b[i__3].i != 0.) { - i__3 = j + l * b_dim1; - z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i, - z__1.i = alpha->r * b[i__3].i + alpha->i * b[ - i__3].r; - temp.r = z__1.r, temp.i = z__1.i; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * c_dim1; - i__6 = i__ + l * a_dim1; - z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i, - z__2.i = temp.r * a[i__6].i + temp.i * a[ - i__6].r; - z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5] - .i + z__2.i; - c__[i__4].r = z__1.r, c__[i__4].i = z__1.i; -/* L230: */ - } - } -/* L240: */ - } -/* L250: */ - } - } - } else if (conja) { - if (conjb) { - -/* Form C := alpha*conjg( A' )*conjg( B' ) + beta*C. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp.r = 0., temp.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - d_cnjg(&z__3, &a[l + i__ * a_dim1]); - d_cnjg(&z__4, &b[j + l * b_dim1]); - z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i = - z__3.r * z__4.i + z__3.i * z__4.r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L260: */ - } - if ((beta->r == 0. && beta->i == 0.)) { - i__3 = i__ + j * c_dim1; - z__1.r = alpha->r * temp.r - alpha->i * temp.i, - z__1.i = alpha->r * temp.i + alpha->i * - temp.r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - z__2.r = alpha->r * temp.r - alpha->i * temp.i, - z__2.i = alpha->r * temp.i + alpha->i * - temp.r; - i__4 = i__ + j * c_dim1; - z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__3.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } -/* L270: */ - } -/* L280: */ - } - } else { - -/* Form C := alpha*conjg( A' )*B' + beta*C */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp.r = 0., temp.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - d_cnjg(&z__3, &a[l + i__ * a_dim1]); - i__4 = j + l * b_dim1; - z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i, - z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4] - .r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L290: */ - } - if ((beta->r == 0. && beta->i == 0.)) { - i__3 = i__ + j * c_dim1; - z__1.r = alpha->r * temp.r - alpha->i * temp.i, - z__1.i = alpha->r * temp.i + alpha->i * - temp.r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - z__2.r = alpha->r * temp.r - alpha->i * temp.i, - z__2.i = alpha->r * temp.i + alpha->i * - temp.r; - i__4 = i__ + j * c_dim1; - z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__3.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } -/* L300: */ - } -/* L310: */ - } - } - } else { - if (conjb) { - -/* Form C := alpha*A'*conjg( B' ) + beta*C */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp.r = 0., temp.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - i__4 = l + i__ * a_dim1; - d_cnjg(&z__3, &b[j + l * b_dim1]); - z__2.r = a[i__4].r * z__3.r - a[i__4].i * z__3.i, - z__2.i = a[i__4].r * z__3.i + a[i__4].i * - z__3.r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L320: */ - } - if ((beta->r == 0. && beta->i == 0.)) { - i__3 = i__ + j * c_dim1; - z__1.r = alpha->r * temp.r - alpha->i * temp.i, - z__1.i = alpha->r * temp.i + alpha->i * - temp.r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - z__2.r = alpha->r * temp.r - alpha->i * temp.i, - z__2.i = alpha->r * temp.i + alpha->i * - temp.r; - i__4 = i__ + j * c_dim1; - z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__3.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } -/* L330: */ - } -/* L340: */ - } - } else { - -/* Form C := alpha*A'*B' + beta*C */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp.r = 0., temp.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - i__4 = l + i__ * a_dim1; - i__5 = j + l * b_dim1; - z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5] - .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4] - .i * b[i__5].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L350: */ - } - if ((beta->r == 0. && beta->i == 0.)) { - i__3 = i__ + j * c_dim1; - z__1.r = alpha->r * temp.r - alpha->i * temp.i, - z__1.i = alpha->r * temp.i + alpha->i * - temp.r; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - z__2.r = alpha->r * temp.r - alpha->i * temp.i, - z__2.i = alpha->r * temp.i + alpha->i * - temp.r; - i__4 = i__ + j * c_dim1; - z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4] - .i, z__3.i = beta->r * c__[i__4].i + beta->i * - c__[i__4].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } -/* L360: */ - } -/* L370: */ - } - } - } - - return 0; - -/* End of ZGEMM . */ - -} /* zgemm_ */ - -/* Subroutine */ int zgemv_(char *trans, integer *m, integer *n, - doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex * - x, integer *incx, doublecomplex *beta, doublecomplex *y, integer * - incy) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1, z__2, z__3; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, ix, iy, jx, jy, kx, ky, info; - static doublecomplex temp; - static integer lenx, leny; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical noconj; - - -/* - Purpose - ======= - - ZGEMV performs one of the matrix-vector operations - - y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or - - y := alpha*conjg( A' )*x + beta*y, - - where alpha and beta are scalars, x and y are vectors and A is an - m by n matrix. - - Parameters - ========== - - TRANS - CHARACTER*1. - On entry, TRANS specifies the operation to be performed as - follows: - - TRANS = 'N' or 'n' y := alpha*A*x + beta*y. - - TRANS = 'T' or 't' y := alpha*A'*x + beta*y. - - TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y. - - Unchanged on exit. - - M - INTEGER. - On entry, M specifies the number of rows of the matrix A. - M must be at least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, n ). - Before entry, the leading m by n part of the array A must - contain the matrix of coefficients. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, m ). - Unchanged on exit. - - X - COMPLEX*16 array of DIMENSION at least - ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' - and at least - ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. - Before entry, the incremented array X must contain the - vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - BETA - COMPLEX*16 . - On entry, BETA specifies the scalar beta. When BETA is - supplied as zero then Y need not be set on input. - Unchanged on exit. - - Y - COMPLEX*16 array of DIMENSION at least - ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' - and at least - ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. - Before entry with BETA non-zero, the incremented array Y - must contain the vector y. On exit, Y is overwritten by the - updated vector y. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --x; - --y; - - /* Function Body */ - info = 0; - if (((! lsame_(trans, "N") && ! lsame_(trans, "T")) && ! lsame_(trans, "C"))) { - info = 1; - } else if (*m < 0) { - info = 2; - } else if (*n < 0) { - info = 3; - } else if (*lda < max(1,*m)) { - info = 6; - } else if (*incx == 0) { - info = 8; - } else if (*incy == 0) { - info = 11; - } - if (info != 0) { - xerbla_("ZGEMV ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0 || ((alpha->r == 0. && alpha->i == 0.) && (( - beta->r == 1. && beta->i == 0.)))) { - return 0; - } - - noconj = lsame_(trans, "T"); - -/* - Set LENX and LENY, the lengths of the vectors x and y, and set - up the start points in X and Y. -*/ - - if (lsame_(trans, "N")) { - lenx = *n; - leny = *m; - } else { - lenx = *m; - leny = *n; - } - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (lenx - 1) * *incx; - } - if (*incy > 0) { - ky = 1; - } else { - ky = 1 - (leny - 1) * *incy; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. - - First form y := beta*y. -*/ - - if (beta->r != 1. || beta->i != 0.) { - if (*incy == 1) { - if ((beta->r == 0. && beta->i == 0.)) { - i__1 = leny; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - y[i__2].r = 0., y[i__2].i = 0.; -/* L10: */ - } - } else { - i__1 = leny; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - i__3 = i__; - z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, - z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] - .r; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; -/* L20: */ - } - } - } else { - iy = ky; - if ((beta->r == 0. && beta->i == 0.)) { - i__1 = leny; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = iy; - y[i__2].r = 0., y[i__2].i = 0.; - iy += *incy; -/* L30: */ - } - } else { - i__1 = leny; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = iy; - i__3 = iy; - z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, - z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] - .r; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; - iy += *incy; -/* L40: */ - } - } - } - } - if ((alpha->r == 0. && alpha->i == 0.)) { - return 0; - } - if (lsame_(trans, "N")) { - -/* Form y := alpha*A*x + y. */ - - jx = kx; - if (*incy == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - if (x[i__2].r != 0. || x[i__2].i != 0.) { - i__2 = jx; - z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, - z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2] - .r; - temp.r = z__1.r, temp.i = z__1.i; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__; - i__4 = i__; - i__5 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__2.i = temp.r * a[i__5].i + temp.i * a[i__5] - .r; - z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + - z__2.i; - y[i__3].r = z__1.r, y[i__3].i = z__1.i; -/* L50: */ - } - } - jx += *incx; -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - if (x[i__2].r != 0. || x[i__2].i != 0.) { - i__2 = jx; - z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, - z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2] - .r; - temp.r = z__1.r, temp.i = z__1.i; - iy = ky; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = iy; - i__4 = iy; - i__5 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__2.i = temp.r * a[i__5].i + temp.i * a[i__5] - .r; - z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + - z__2.i; - y[i__3].r = z__1.r, y[i__3].i = z__1.i; - iy += *incy; -/* L70: */ - } - } - jx += *incx; -/* L80: */ - } - } - } else { - -/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */ - - jy = ky; - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp.r = 0., temp.i = 0.; - if (noconj) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__; - z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4] - .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3] - .i * x[i__4].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L90: */ - } - } else { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = i__; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, - z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3] - .r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L100: */ - } - } - i__2 = jy; - i__3 = jy; - z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i = - alpha->r * temp.i + alpha->i * temp.r; - z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; - jy += *incy; -/* L110: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp.r = 0., temp.i = 0.; - ix = kx; - if (noconj) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = ix; - z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4] - .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3] - .i * x[i__4].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - ix += *incx; -/* L120: */ - } - } else { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = ix; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, - z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3] - .r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - ix += *incx; -/* L130: */ - } - } - i__2 = jy; - i__3 = jy; - z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i = - alpha->r * temp.i + alpha->i * temp.r; - z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; - jy += *incy; -/* L140: */ - } - } - } - - return 0; - -/* End of ZGEMV . */ - -} /* zgemv_ */ - -/* Subroutine */ int zgerc_(integer *m, integer *n, doublecomplex *alpha, - doublecomplex *x, integer *incx, doublecomplex *y, integer *incy, - doublecomplex *a, integer *lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1, z__2; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, ix, jy, kx, info; - static doublecomplex temp; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - ZGERC performs the rank 1 operation - - A := alpha*x*conjg( y' ) + A, - - where alpha is a scalar, x is an m element vector, y is an n element - vector and A is an m by n matrix. - - Parameters - ========== - - M - INTEGER. - On entry, M specifies the number of rows of the matrix A. - M must be at least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - X - COMPLEX*16 array of dimension at least - ( 1 + ( m - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the m - element vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - Y - COMPLEX*16 array of dimension at least - ( 1 + ( n - 1 )*abs( INCY ) ). - Before entry, the incremented array Y must contain the n - element vector y. - Unchanged on exit. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, n ). - Before entry, the leading m by n part of the array A must - contain the matrix of coefficients. On exit, A is - overwritten by the updated matrix. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, m ). - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --x; - --y; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - info = 0; - if (*m < 0) { - info = 1; - } else if (*n < 0) { - info = 2; - } else if (*incx == 0) { - info = 5; - } else if (*incy == 0) { - info = 7; - } else if (*lda < max(1,*m)) { - info = 9; - } - if (info != 0) { - xerbla_("ZGERC ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0 || (alpha->r == 0. && alpha->i == 0.)) { - return 0; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. -*/ - - if (*incy > 0) { - jy = 1; - } else { - jy = 1 - (*n - 1) * *incy; - } - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jy; - if (y[i__2].r != 0. || y[i__2].i != 0.) { - d_cnjg(&z__2, &y[jy]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = - alpha->r * z__2.i + alpha->i * z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = i__; - z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = - x[i__5].r * temp.i + x[i__5].i * temp.r; - z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L10: */ - } - } - jy += *incy; -/* L20: */ - } - } else { - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (*m - 1) * *incx; - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jy; - if (y[i__2].r != 0. || y[i__2].i != 0.) { - d_cnjg(&z__2, &y[jy]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = - alpha->r * z__2.i + alpha->i * z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - ix = kx; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = ix; - z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = - x[i__5].r * temp.i + x[i__5].i * temp.r; - z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - ix += *incx; -/* L30: */ - } - } - jy += *incy; -/* L40: */ - } - } - - return 0; - -/* End of ZGERC . */ - -} /* zgerc_ */ - -/* Subroutine */ int zgeru_(integer *m, integer *n, doublecomplex *alpha, - doublecomplex *x, integer *incx, doublecomplex *y, integer *incy, - doublecomplex *a, integer *lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1, z__2; - - /* Local variables */ - static integer i__, j, ix, jy, kx, info; - static doublecomplex temp; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - ZGERU performs the rank 1 operation - - A := alpha*x*y' + A, - - where alpha is a scalar, x is an m element vector, y is an n element - vector and A is an m by n matrix. - - Parameters - ========== - - M - INTEGER. - On entry, M specifies the number of rows of the matrix A. - M must be at least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - X - COMPLEX*16 array of dimension at least - ( 1 + ( m - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the m - element vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - Y - COMPLEX*16 array of dimension at least - ( 1 + ( n - 1 )*abs( INCY ) ). - Before entry, the incremented array Y must contain the n - element vector y. - Unchanged on exit. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, n ). - Before entry, the leading m by n part of the array A must - contain the matrix of coefficients. On exit, A is - overwritten by the updated matrix. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, m ). - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --x; - --y; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - info = 0; - if (*m < 0) { - info = 1; - } else if (*n < 0) { - info = 2; - } else if (*incx == 0) { - info = 5; - } else if (*incy == 0) { - info = 7; - } else if (*lda < max(1,*m)) { - info = 9; - } - if (info != 0) { - xerbla_("ZGERU ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0 || (alpha->r == 0. && alpha->i == 0.)) { - return 0; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. -*/ - - if (*incy > 0) { - jy = 1; - } else { - jy = 1 - (*n - 1) * *incy; - } - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jy; - if (y[i__2].r != 0. || y[i__2].i != 0.) { - i__2 = jy; - z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i = - alpha->r * y[i__2].i + alpha->i * y[i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = i__; - z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = - x[i__5].r * temp.i + x[i__5].i * temp.r; - z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L10: */ - } - } - jy += *incy; -/* L20: */ - } - } else { - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (*m - 1) * *incx; - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jy; - if (y[i__2].r != 0. || y[i__2].i != 0.) { - i__2 = jy; - z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i = - alpha->r * y[i__2].i + alpha->i * y[i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - ix = kx; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = ix; - z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = - x[i__5].r * temp.i + x[i__5].i * temp.r; - z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - ix += *incx; -/* L30: */ - } - } - jy += *incy; -/* L40: */ - } - } - - return 0; - -/* End of ZGERU . */ - -} /* zgeru_ */ - -/* Subroutine */ int zhemv_(char *uplo, integer *n, doublecomplex *alpha, - doublecomplex *a, integer *lda, doublecomplex *x, integer *incx, - doublecomplex *beta, doublecomplex *y, integer *incy) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublereal d__1; - doublecomplex z__1, z__2, z__3, z__4; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, ix, iy, jx, jy, kx, ky, info; - static doublecomplex temp1, temp2; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - ZHEMV performs the matrix-vector operation - - y := alpha*A*x + beta*y, - - where alpha and beta are scalars, x and y are n element vectors and - A is an n by n hermitian matrix. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the upper or lower - triangular part of the array A is to be referenced as - follows: - - UPLO = 'U' or 'u' Only the upper triangular part of A - is to be referenced. - - UPLO = 'L' or 'l' Only the lower triangular part of A - is to be referenced. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array A must contain the upper - triangular part of the hermitian matrix and the strictly - lower triangular part of A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array A must contain the lower - triangular part of the hermitian matrix and the strictly - upper triangular part of A is not referenced. - Note that the imaginary parts of the diagonal elements need - not be set and are assumed to be zero. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, n ). - Unchanged on exit. - - X - COMPLEX*16 array of dimension at least - ( 1 + ( n - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the n - element vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - BETA - COMPLEX*16 . - On entry, BETA specifies the scalar beta. When BETA is - supplied as zero then Y need not be set on input. - Unchanged on exit. - - Y - COMPLEX*16 array of dimension at least - ( 1 + ( n - 1 )*abs( INCY ) ). - Before entry, the incremented array Y must contain the n - element vector y. On exit, Y is overwritten by the updated - vector y. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --x; - --y; - - /* Function Body */ - info = 0; - if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) { - info = 1; - } else if (*n < 0) { - info = 2; - } else if (*lda < max(1,*n)) { - info = 5; - } else if (*incx == 0) { - info = 7; - } else if (*incy == 0) { - info = 10; - } - if (info != 0) { - xerbla_("ZHEMV ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0 || ((alpha->r == 0. && alpha->i == 0.) && ((beta->r == 1. && - beta->i == 0.)))) { - return 0; - } - -/* Set up the start points in X and Y. */ - - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (*n - 1) * *incx; - } - if (*incy > 0) { - ky = 1; - } else { - ky = 1 - (*n - 1) * *incy; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through the triangular part - of A. - - First form y := beta*y. -*/ - - if (beta->r != 1. || beta->i != 0.) { - if (*incy == 1) { - if ((beta->r == 0. && beta->i == 0.)) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - y[i__2].r = 0., y[i__2].i = 0.; -/* L10: */ - } - } else { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - i__3 = i__; - z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, - z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] - .r; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; -/* L20: */ - } - } - } else { - iy = ky; - if ((beta->r == 0. && beta->i == 0.)) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = iy; - y[i__2].r = 0., y[i__2].i = 0.; - iy += *incy; -/* L30: */ - } - } else { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = iy; - i__3 = iy; - z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, - z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] - .r; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; - iy += *incy; -/* L40: */ - } - } - } - } - if ((alpha->r == 0. && alpha->i == 0.)) { - return 0; - } - if (lsame_(uplo, "U")) { - -/* Form y when A is stored in upper triangle. */ - - if ((*incx == 1 && *incy == 1)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = - alpha->r * x[i__2].i + alpha->i * x[i__2].r; - temp1.r = z__1.r, temp1.i = z__1.i; - temp2.r = 0., temp2.i = 0.; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__; - i__4 = i__; - i__5 = i__ + j * a_dim1; - z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, - z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] - .r; - z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; - y[i__3].r = z__1.r, y[i__3].i = z__1.i; - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = i__; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = - z__3.r * x[i__3].i + z__3.i * x[i__3].r; - z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; - temp2.r = z__1.r, temp2.i = z__1.i; -/* L50: */ - } - i__2 = j; - i__3 = j; - i__4 = j + j * a_dim1; - d__1 = a[i__4].r; - z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i; - z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i; - z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i = - alpha->r * temp2.i + alpha->i * temp2.r; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; -/* L60: */ - } - } else { - jx = kx; - jy = ky; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = - alpha->r * x[i__2].i + alpha->i * x[i__2].r; - temp1.r = z__1.r, temp1.i = z__1.i; - temp2.r = 0., temp2.i = 0.; - ix = kx; - iy = ky; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = iy; - i__4 = iy; - i__5 = i__ + j * a_dim1; - z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, - z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] - .r; - z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; - y[i__3].r = z__1.r, y[i__3].i = z__1.i; - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = ix; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = - z__3.r * x[i__3].i + z__3.i * x[i__3].r; - z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; - temp2.r = z__1.r, temp2.i = z__1.i; - ix += *incx; - iy += *incy; -/* L70: */ - } - i__2 = jy; - i__3 = jy; - i__4 = j + j * a_dim1; - d__1 = a[i__4].r; - z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i; - z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i; - z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i = - alpha->r * temp2.i + alpha->i * temp2.r; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; - jx += *incx; - jy += *incy; -/* L80: */ - } - } - } else { - -/* Form y when A is stored in lower triangle. */ - - if ((*incx == 1 && *incy == 1)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = - alpha->r * x[i__2].i + alpha->i * x[i__2].r; - temp1.r = z__1.r, temp1.i = z__1.i; - temp2.r = 0., temp2.i = 0.; - i__2 = j; - i__3 = j; - i__4 = j + j * a_dim1; - d__1 = a[i__4].r; - z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i; - z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__; - i__4 = i__; - i__5 = i__ + j * a_dim1; - z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, - z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] - .r; - z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; - y[i__3].r = z__1.r, y[i__3].i = z__1.i; - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = i__; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = - z__3.r * x[i__3].i + z__3.i * x[i__3].r; - z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; - temp2.r = z__1.r, temp2.i = z__1.i; -/* L90: */ - } - i__2 = j; - i__3 = j; - z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i = - alpha->r * temp2.i + alpha->i * temp2.r; - z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; -/* L100: */ - } - } else { - jx = kx; - jy = ky; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = - alpha->r * x[i__2].i + alpha->i * x[i__2].r; - temp1.r = z__1.r, temp1.i = z__1.i; - temp2.r = 0., temp2.i = 0.; - i__2 = jy; - i__3 = jy; - i__4 = j + j * a_dim1; - d__1 = a[i__4].r; - z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i; - z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; - ix = jx; - iy = jy; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - iy += *incy; - i__3 = iy; - i__4 = iy; - i__5 = i__ + j * a_dim1; - z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, - z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] - .r; - z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; - y[i__3].r = z__1.r, y[i__3].i = z__1.i; - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = ix; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = - z__3.r * x[i__3].i + z__3.i * x[i__3].r; - z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; - temp2.r = z__1.r, temp2.i = z__1.i; -/* L110: */ - } - i__2 = jy; - i__3 = jy; - z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i = - alpha->r * temp2.i + alpha->i * temp2.r; - z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; - y[i__2].r = z__1.r, y[i__2].i = z__1.i; - jx += *incx; - jy += *incy; -/* L120: */ - } - } - } - - return 0; - -/* End of ZHEMV . */ - -} /* zhemv_ */ - -/* Subroutine */ int zher2_(char *uplo, integer *n, doublecomplex *alpha, - doublecomplex *x, integer *incx, doublecomplex *y, integer *incy, - doublecomplex *a, integer *lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6; - doublereal d__1; - doublecomplex z__1, z__2, z__3, z__4; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, ix, iy, jx, jy, kx, ky, info; - static doublecomplex temp1, temp2; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - ZHER2 performs the hermitian rank 2 operation - - A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A, - - where alpha is a scalar, x and y are n element vectors and A is an n - by n hermitian matrix. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the upper or lower - triangular part of the array A is to be referenced as - follows: - - UPLO = 'U' or 'u' Only the upper triangular part of A - is to be referenced. - - UPLO = 'L' or 'l' Only the lower triangular part of A - is to be referenced. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix A. - N must be at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - X - COMPLEX*16 array of dimension at least - ( 1 + ( n - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the n - element vector x. - Unchanged on exit. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - Y - COMPLEX*16 array of dimension at least - ( 1 + ( n - 1 )*abs( INCY ) ). - Before entry, the incremented array Y must contain the n - element vector y. - Unchanged on exit. - - INCY - INTEGER. - On entry, INCY specifies the increment for the elements of - Y. INCY must not be zero. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array A must contain the upper - triangular part of the hermitian matrix and the strictly - lower triangular part of A is not referenced. On exit, the - upper triangular part of the array A is overwritten by the - upper triangular part of the updated matrix. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array A must contain the lower - triangular part of the hermitian matrix and the strictly - upper triangular part of A is not referenced. On exit, the - lower triangular part of the array A is overwritten by the - lower triangular part of the updated matrix. - Note that the imaginary parts of the diagonal elements need - not be set, they are assumed to be zero, and on exit they - are set to zero. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, n ). - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --x; - --y; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - info = 0; - if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) { - info = 1; - } else if (*n < 0) { - info = 2; - } else if (*incx == 0) { - info = 5; - } else if (*incy == 0) { - info = 7; - } else if (*lda < max(1,*n)) { - info = 9; - } - if (info != 0) { - xerbla_("ZHER2 ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0 || (alpha->r == 0. && alpha->i == 0.)) { - return 0; - } - -/* - Set up the start points in X and Y if the increments are not both - unity. -*/ - - if (*incx != 1 || *incy != 1) { - if (*incx > 0) { - kx = 1; - } else { - kx = 1 - (*n - 1) * *incx; - } - if (*incy > 0) { - ky = 1; - } else { - ky = 1 - (*n - 1) * *incy; - } - jx = kx; - jy = ky; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through the triangular part - of A. -*/ - - if (lsame_(uplo, "U")) { - -/* Form A when A is stored in the upper triangle. */ - - if ((*incx == 1 && *incy == 1)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - i__3 = j; - if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. || - y[i__3].i != 0.)) { - d_cnjg(&z__2, &y[j]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = - alpha->r * z__2.i + alpha->i * z__2.r; - temp1.r = z__1.r, temp1.i = z__1.i; - i__2 = j; - z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, - z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2] - .r; - d_cnjg(&z__1, &z__2); - temp2.r = z__1.r, temp2.i = z__1.i; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = i__; - z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i, - z__3.i = x[i__5].r * temp1.i + x[i__5].i * - temp1.r; - z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i + - z__3.i; - i__6 = i__; - z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i, - z__4.i = y[i__6].r * temp2.i + y[i__6].i * - temp2.r; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L10: */ - } - i__2 = j + j * a_dim1; - i__3 = j + j * a_dim1; - i__4 = j; - z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i, - z__2.i = x[i__4].r * temp1.i + x[i__4].i * - temp1.r; - i__5 = j; - z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i, - z__3.i = y[i__5].r * temp2.i + y[i__5].i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - d__1 = a[i__3].r + z__1.r; - a[i__2].r = d__1, a[i__2].i = 0.; - } else { - i__2 = j + j * a_dim1; - i__3 = j + j * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - i__3 = jy; - if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. || - y[i__3].i != 0.)) { - d_cnjg(&z__2, &y[jy]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = - alpha->r * z__2.i + alpha->i * z__2.r; - temp1.r = z__1.r, temp1.i = z__1.i; - i__2 = jx; - z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, - z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2] - .r; - d_cnjg(&z__1, &z__2); - temp2.r = z__1.r, temp2.i = z__1.i; - ix = kx; - iy = ky; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = ix; - z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i, - z__3.i = x[i__5].r * temp1.i + x[i__5].i * - temp1.r; - z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i + - z__3.i; - i__6 = iy; - z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i, - z__4.i = y[i__6].r * temp2.i + y[i__6].i * - temp2.r; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - ix += *incx; - iy += *incy; -/* L30: */ - } - i__2 = j + j * a_dim1; - i__3 = j + j * a_dim1; - i__4 = jx; - z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i, - z__2.i = x[i__4].r * temp1.i + x[i__4].i * - temp1.r; - i__5 = jy; - z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i, - z__3.i = y[i__5].r * temp2.i + y[i__5].i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - d__1 = a[i__3].r + z__1.r; - a[i__2].r = d__1, a[i__2].i = 0.; - } else { - i__2 = j + j * a_dim1; - i__3 = j + j * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - } - jx += *incx; - jy += *incy; -/* L40: */ - } - } - } else { - -/* Form A when A is stored in the lower triangle. */ - - if ((*incx == 1 && *incy == 1)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - i__3 = j; - if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. || - y[i__3].i != 0.)) { - d_cnjg(&z__2, &y[j]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = - alpha->r * z__2.i + alpha->i * z__2.r; - temp1.r = z__1.r, temp1.i = z__1.i; - i__2 = j; - z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, - z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2] - .r; - d_cnjg(&z__1, &z__2); - temp2.r = z__1.r, temp2.i = z__1.i; - i__2 = j + j * a_dim1; - i__3 = j + j * a_dim1; - i__4 = j; - z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i, - z__2.i = x[i__4].r * temp1.i + x[i__4].i * - temp1.r; - i__5 = j; - z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i, - z__3.i = y[i__5].r * temp2.i + y[i__5].i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - d__1 = a[i__3].r + z__1.r; - a[i__2].r = d__1, a[i__2].i = 0.; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = i__; - z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i, - z__3.i = x[i__5].r * temp1.i + x[i__5].i * - temp1.r; - z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i + - z__3.i; - i__6 = i__; - z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i, - z__4.i = y[i__6].r * temp2.i + y[i__6].i * - temp2.r; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L50: */ - } - } else { - i__2 = j + j * a_dim1; - i__3 = j + j * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - } -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - i__3 = jy; - if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. || - y[i__3].i != 0.)) { - d_cnjg(&z__2, &y[jy]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = - alpha->r * z__2.i + alpha->i * z__2.r; - temp1.r = z__1.r, temp1.i = z__1.i; - i__2 = jx; - z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, - z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2] - .r; - d_cnjg(&z__1, &z__2); - temp2.r = z__1.r, temp2.i = z__1.i; - i__2 = j + j * a_dim1; - i__3 = j + j * a_dim1; - i__4 = jx; - z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i, - z__2.i = x[i__4].r * temp1.i + x[i__4].i * - temp1.r; - i__5 = jy; - z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i, - z__3.i = y[i__5].r * temp2.i + y[i__5].i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - d__1 = a[i__3].r + z__1.r; - a[i__2].r = d__1, a[i__2].i = 0.; - ix = jx; - iy = jy; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - iy += *incy; - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = ix; - z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i, - z__3.i = x[i__5].r * temp1.i + x[i__5].i * - temp1.r; - z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i + - z__3.i; - i__6 = iy; - z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i, - z__4.i = y[i__6].r * temp2.i + y[i__6].i * - temp2.r; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L70: */ - } - } else { - i__2 = j + j * a_dim1; - i__3 = j + j * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - } - jx += *incx; - jy += *incy; -/* L80: */ - } - } - } - - return 0; - -/* End of ZHER2 . */ - -} /* zher2_ */ - -/* Subroutine */ int zher2k_(char *uplo, char *trans, integer *n, integer *k, - doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex * - b, integer *ldb, doublereal *beta, doublecomplex *c__, integer *ldc) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, - i__3, i__4, i__5, i__6, i__7; - doublereal d__1; - doublecomplex z__1, z__2, z__3, z__4, z__5, z__6; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, l, info; - static doublecomplex temp1, temp2; - extern logical lsame_(char *, char *); - static integer nrowa; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - ZHER2K performs one of the hermitian rank 2k operations - - C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + beta*C, - - or - - C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + beta*C, - - where alpha and beta are scalars with beta real, C is an n by n - hermitian matrix and A and B are n by k matrices in the first case - and k by n matrices in the second case. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the upper or lower - triangular part of the array C is to be referenced as - follows: - - UPLO = 'U' or 'u' Only the upper triangular part of C - is to be referenced. - - UPLO = 'L' or 'l' Only the lower triangular part of C - is to be referenced. - - Unchanged on exit. - - TRANS - CHARACTER*1. - On entry, TRANS specifies the operation to be performed as - follows: - - TRANS = 'N' or 'n' C := alpha*A*conjg( B' ) + - conjg( alpha )*B*conjg( A' ) + - beta*C. - - TRANS = 'C' or 'c' C := alpha*conjg( A' )*B + - conjg( alpha )*conjg( B' )*A + - beta*C. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix C. N must be - at least zero. - Unchanged on exit. - - K - INTEGER. - On entry with TRANS = 'N' or 'n', K specifies the number - of columns of the matrices A and B, and on entry with - TRANS = 'C' or 'c', K specifies the number of rows of the - matrices A and B. K must be at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is - k when TRANS = 'N' or 'n', and is n otherwise. - Before entry with TRANS = 'N' or 'n', the leading n by k - part of the array A must contain the matrix A, otherwise - the leading k by n part of the array A must contain the - matrix A. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When TRANS = 'N' or 'n' - then LDA must be at least max( 1, n ), otherwise LDA must - be at least max( 1, k ). - Unchanged on exit. - - B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is - k when TRANS = 'N' or 'n', and is n otherwise. - Before entry with TRANS = 'N' or 'n', the leading n by k - part of the array B must contain the matrix B, otherwise - the leading k by n part of the array B must contain the - matrix B. - Unchanged on exit. - - LDB - INTEGER. - On entry, LDB specifies the first dimension of B as declared - in the calling (sub) program. When TRANS = 'N' or 'n' - then LDB must be at least max( 1, n ), otherwise LDB must - be at least max( 1, k ). - Unchanged on exit. - - BETA - DOUBLE PRECISION . - On entry, BETA specifies the scalar beta. - Unchanged on exit. - - C - COMPLEX*16 array of DIMENSION ( LDC, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array C must contain the upper - triangular part of the hermitian matrix and the strictly - lower triangular part of C is not referenced. On exit, the - upper triangular part of the array C is overwritten by the - upper triangular part of the updated matrix. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array C must contain the lower - triangular part of the hermitian matrix and the strictly - upper triangular part of C is not referenced. On exit, the - lower triangular part of the array C is overwritten by the - lower triangular part of the updated matrix. - Note that the imaginary parts of the diagonal elements need - not be set, they are assumed to be zero, and on exit they - are set to zero. - - LDC - INTEGER. - On entry, LDC specifies the first dimension of C as declared - in the calling (sub) program. LDC must be at least - max( 1, n ). - Unchanged on exit. - - - Level 3 Blas routine. - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1. - Ed Anderson, Cray Research Inc. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - - /* Function Body */ - if (lsame_(trans, "N")) { - nrowa = *n; - } else { - nrowa = *k; - } - upper = lsame_(uplo, "U"); - - info = 0; - if ((! upper && ! lsame_(uplo, "L"))) { - info = 1; - } else if ((! lsame_(trans, "N") && ! lsame_(trans, - "C"))) { - info = 2; - } else if (*n < 0) { - info = 3; - } else if (*k < 0) { - info = 4; - } else if (*lda < max(1,nrowa)) { - info = 7; - } else if (*ldb < max(1,nrowa)) { - info = 9; - } else if (*ldc < max(1,*n)) { - info = 12; - } - if (info != 0) { - xerbla_("ZHER2K", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0 || (((alpha->r == 0. && alpha->i == 0.) || *k == 0) && *beta - == 1.)) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if ((alpha->r == 0. && alpha->i == 0.)) { - if (upper) { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[ - i__4].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L30: */ - } - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; -/* L40: */ - } - } - } else { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L50: */ - } -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[ - i__4].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L70: */ - } -/* L80: */ - } - } - } - return 0; - } - -/* Start the operations. */ - - if (lsame_(trans, "N")) { - -/* - Form C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + - C. -*/ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L90: */ - } - } else if (*beta != 1.) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[ - i__4].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L100: */ - } - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } else { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - i__3 = j + l * a_dim1; - i__4 = j + l * b_dim1; - if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r != - 0. || b[i__4].i != 0.)) { - d_cnjg(&z__2, &b[j + l * b_dim1]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, - z__1.i = alpha->r * z__2.i + alpha->i * - z__2.r; - temp1.r = z__1.r, temp1.i = z__1.i; - i__3 = j + l * a_dim1; - z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i, - z__2.i = alpha->r * a[i__3].i + alpha->i * a[ - i__3].r; - d_cnjg(&z__1, &z__2); - temp2.r = z__1.r, temp2.i = z__1.i; - i__3 = j - 1; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * c_dim1; - i__6 = i__ + l * a_dim1; - z__3.r = a[i__6].r * temp1.r - a[i__6].i * - temp1.i, z__3.i = a[i__6].r * temp1.i + a[ - i__6].i * temp1.r; - z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5] - .i + z__3.i; - i__7 = i__ + l * b_dim1; - z__4.r = b[i__7].r * temp2.r - b[i__7].i * - temp2.i, z__4.i = b[i__7].r * temp2.i + b[ - i__7].i * temp2.r; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + - z__4.i; - c__[i__4].r = z__1.r, c__[i__4].i = z__1.i; -/* L110: */ - } - i__3 = j + j * c_dim1; - i__4 = j + j * c_dim1; - i__5 = j + l * a_dim1; - z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i, - z__2.i = a[i__5].r * temp1.i + a[i__5].i * - temp1.r; - i__6 = j + l * b_dim1; - z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i, - z__3.i = b[i__6].r * temp2.i + b[i__6].i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - d__1 = c__[i__4].r + z__1.r; - c__[i__3].r = d__1, c__[i__3].i = 0.; - } -/* L120: */ - } -/* L130: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L140: */ - } - } else if (*beta != 1.) { - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[ - i__4].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L150: */ - } - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } else { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - i__3 = j + l * a_dim1; - i__4 = j + l * b_dim1; - if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r != - 0. || b[i__4].i != 0.)) { - d_cnjg(&z__2, &b[j + l * b_dim1]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, - z__1.i = alpha->r * z__2.i + alpha->i * - z__2.r; - temp1.r = z__1.r, temp1.i = z__1.i; - i__3 = j + l * a_dim1; - z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i, - z__2.i = alpha->r * a[i__3].i + alpha->i * a[ - i__3].r; - d_cnjg(&z__1, &z__2); - temp2.r = z__1.r, temp2.i = z__1.i; - i__3 = *n; - for (i__ = j + 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * c_dim1; - i__6 = i__ + l * a_dim1; - z__3.r = a[i__6].r * temp1.r - a[i__6].i * - temp1.i, z__3.i = a[i__6].r * temp1.i + a[ - i__6].i * temp1.r; - z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5] - .i + z__3.i; - i__7 = i__ + l * b_dim1; - z__4.r = b[i__7].r * temp2.r - b[i__7].i * - temp2.i, z__4.i = b[i__7].r * temp2.i + b[ - i__7].i * temp2.r; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + - z__4.i; - c__[i__4].r = z__1.r, c__[i__4].i = z__1.i; -/* L160: */ - } - i__3 = j + j * c_dim1; - i__4 = j + j * c_dim1; - i__5 = j + l * a_dim1; - z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i, - z__2.i = a[i__5].r * temp1.i + a[i__5].i * - temp1.r; - i__6 = j + l * b_dim1; - z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i, - z__3.i = b[i__6].r * temp2.i + b[i__6].i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - d__1 = c__[i__4].r + z__1.r; - c__[i__3].r = d__1, c__[i__3].i = 0.; - } -/* L170: */ - } -/* L180: */ - } - } - } else { - -/* - Form C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + - C. -*/ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - temp1.r = 0., temp1.i = 0.; - temp2.r = 0., temp2.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - d_cnjg(&z__3, &a[l + i__ * a_dim1]); - i__4 = l + j * b_dim1; - z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i, - z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4] - .r; - z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i; - temp1.r = z__1.r, temp1.i = z__1.i; - d_cnjg(&z__3, &b[l + i__ * b_dim1]); - i__4 = l + j * a_dim1; - z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i, - z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4] - .r; - z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; - temp2.r = z__1.r, temp2.i = z__1.i; -/* L190: */ - } - if (i__ == j) { - if (*beta == 0.) { - i__3 = j + j * c_dim1; - z__2.r = alpha->r * temp1.r - alpha->i * temp1.i, - z__2.i = alpha->r * temp1.i + alpha->i * - temp1.r; - d_cnjg(&z__4, alpha); - z__3.r = z__4.r * temp2.r - z__4.i * temp2.i, - z__3.i = z__4.r * temp2.i + z__4.i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - d__1 = z__1.r; - c__[i__3].r = d__1, c__[i__3].i = 0.; - } else { - i__3 = j + j * c_dim1; - i__4 = j + j * c_dim1; - z__2.r = alpha->r * temp1.r - alpha->i * temp1.i, - z__2.i = alpha->r * temp1.i + alpha->i * - temp1.r; - d_cnjg(&z__4, alpha); - z__3.r = z__4.r * temp2.r - z__4.i * temp2.i, - z__3.i = z__4.r * temp2.i + z__4.i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - d__1 = *beta * c__[i__4].r + z__1.r; - c__[i__3].r = d__1, c__[i__3].i = 0.; - } - } else { - if (*beta == 0.) { - i__3 = i__ + j * c_dim1; - z__2.r = alpha->r * temp1.r - alpha->i * temp1.i, - z__2.i = alpha->r * temp1.i + alpha->i * - temp1.r; - d_cnjg(&z__4, alpha); - z__3.r = z__4.r * temp2.r - z__4.i * temp2.i, - z__3.i = z__4.r * temp2.i + z__4.i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__3.r = *beta * c__[i__4].r, z__3.i = *beta * - c__[i__4].i; - z__4.r = alpha->r * temp1.r - alpha->i * temp1.i, - z__4.i = alpha->r * temp1.i + alpha->i * - temp1.r; - z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + - z__4.i; - d_cnjg(&z__6, alpha); - z__5.r = z__6.r * temp2.r - z__6.i * temp2.i, - z__5.i = z__6.r * temp2.i + z__6.i * - temp2.r; - z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + - z__5.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } - } -/* L200: */ - } -/* L210: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - temp1.r = 0., temp1.i = 0.; - temp2.r = 0., temp2.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - d_cnjg(&z__3, &a[l + i__ * a_dim1]); - i__4 = l + j * b_dim1; - z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i, - z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4] - .r; - z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i; - temp1.r = z__1.r, temp1.i = z__1.i; - d_cnjg(&z__3, &b[l + i__ * b_dim1]); - i__4 = l + j * a_dim1; - z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i, - z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4] - .r; - z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; - temp2.r = z__1.r, temp2.i = z__1.i; -/* L220: */ - } - if (i__ == j) { - if (*beta == 0.) { - i__3 = j + j * c_dim1; - z__2.r = alpha->r * temp1.r - alpha->i * temp1.i, - z__2.i = alpha->r * temp1.i + alpha->i * - temp1.r; - d_cnjg(&z__4, alpha); - z__3.r = z__4.r * temp2.r - z__4.i * temp2.i, - z__3.i = z__4.r * temp2.i + z__4.i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - d__1 = z__1.r; - c__[i__3].r = d__1, c__[i__3].i = 0.; - } else { - i__3 = j + j * c_dim1; - i__4 = j + j * c_dim1; - z__2.r = alpha->r * temp1.r - alpha->i * temp1.i, - z__2.i = alpha->r * temp1.i + alpha->i * - temp1.r; - d_cnjg(&z__4, alpha); - z__3.r = z__4.r * temp2.r - z__4.i * temp2.i, - z__3.i = z__4.r * temp2.i + z__4.i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - d__1 = *beta * c__[i__4].r + z__1.r; - c__[i__3].r = d__1, c__[i__3].i = 0.; - } - } else { - if (*beta == 0.) { - i__3 = i__ + j * c_dim1; - z__2.r = alpha->r * temp1.r - alpha->i * temp1.i, - z__2.i = alpha->r * temp1.i + alpha->i * - temp1.r; - d_cnjg(&z__4, alpha); - z__3.r = z__4.r * temp2.r - z__4.i * temp2.i, - z__3.i = z__4.r * temp2.i + z__4.i * - temp2.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__3.r = *beta * c__[i__4].r, z__3.i = *beta * - c__[i__4].i; - z__4.r = alpha->r * temp1.r - alpha->i * temp1.i, - z__4.i = alpha->r * temp1.i + alpha->i * - temp1.r; - z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + - z__4.i; - d_cnjg(&z__6, alpha); - z__5.r = z__6.r * temp2.r - z__6.i * temp2.i, - z__5.i = z__6.r * temp2.i + z__6.i * - temp2.r; - z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + - z__5.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } - } -/* L230: */ - } -/* L240: */ - } - } - } - - return 0; - -/* End of ZHER2K. */ - -} /* zher2k_ */ - -/* Subroutine */ int zherk_(char *uplo, char *trans, integer *n, integer *k, - doublereal *alpha, doublecomplex *a, integer *lda, doublereal *beta, - doublecomplex *c__, integer *ldc) -{ - /* System generated locals */ - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5, - i__6; - doublereal d__1; - doublecomplex z__1, z__2, z__3; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, l, info; - static doublecomplex temp; - extern logical lsame_(char *, char *); - static integer nrowa; - static doublereal rtemp; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - Purpose - ======= - - ZHERK performs one of the hermitian rank k operations - - C := alpha*A*conjg( A' ) + beta*C, - - or - - C := alpha*conjg( A' )*A + beta*C, - - where alpha and beta are real scalars, C is an n by n hermitian - matrix and A is an n by k matrix in the first case and a k by n - matrix in the second case. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the upper or lower - triangular part of the array C is to be referenced as - follows: - - UPLO = 'U' or 'u' Only the upper triangular part of C - is to be referenced. - - UPLO = 'L' or 'l' Only the lower triangular part of C - is to be referenced. - - Unchanged on exit. - - TRANS - CHARACTER*1. - On entry, TRANS specifies the operation to be performed as - follows: - - TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C. - - TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix C. N must be - at least zero. - Unchanged on exit. - - K - INTEGER. - On entry with TRANS = 'N' or 'n', K specifies the number - of columns of the matrix A, and on entry with - TRANS = 'C' or 'c', K specifies the number of rows of the - matrix A. K must be at least zero. - Unchanged on exit. - - ALPHA - DOUBLE PRECISION . - On entry, ALPHA specifies the scalar alpha. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is - k when TRANS = 'N' or 'n', and is n otherwise. - Before entry with TRANS = 'N' or 'n', the leading n by k - part of the array A must contain the matrix A, otherwise - the leading k by n part of the array A must contain the - matrix A. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When TRANS = 'N' or 'n' - then LDA must be at least max( 1, n ), otherwise LDA must - be at least max( 1, k ). - Unchanged on exit. - - BETA - DOUBLE PRECISION. - On entry, BETA specifies the scalar beta. - Unchanged on exit. - - C - COMPLEX*16 array of DIMENSION ( LDC, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array C must contain the upper - triangular part of the hermitian matrix and the strictly - lower triangular part of C is not referenced. On exit, the - upper triangular part of the array C is overwritten by the - upper triangular part of the updated matrix. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array C must contain the lower - triangular part of the hermitian matrix and the strictly - upper triangular part of C is not referenced. On exit, the - lower triangular part of the array C is overwritten by the - lower triangular part of the updated matrix. - Note that the imaginary parts of the diagonal elements need - not be set, they are assumed to be zero, and on exit they - are set to zero. - - LDC - INTEGER. - On entry, LDC specifies the first dimension of C as declared - in the calling (sub) program. LDC must be at least - max( 1, n ). - Unchanged on exit. - - - Level 3 Blas routine. - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1. - Ed Anderson, Cray Research Inc. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - - /* Function Body */ - if (lsame_(trans, "N")) { - nrowa = *n; - } else { - nrowa = *k; - } - upper = lsame_(uplo, "U"); - - info = 0; - if ((! upper && ! lsame_(uplo, "L"))) { - info = 1; - } else if ((! lsame_(trans, "N") && ! lsame_(trans, - "C"))) { - info = 2; - } else if (*n < 0) { - info = 3; - } else if (*k < 0) { - info = 4; - } else if (*lda < max(1,nrowa)) { - info = 7; - } else if (*ldc < max(1,*n)) { - info = 10; - } - if (info != 0) { - xerbla_("ZHERK ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if (*alpha == 0.) { - if (upper) { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[ - i__4].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L30: */ - } - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; -/* L40: */ - } - } - } else { - if (*beta == 0.) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L50: */ - } -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[ - i__4].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L70: */ - } -/* L80: */ - } - } - } - return 0; - } - -/* Start the operations. */ - - if (lsame_(trans, "N")) { - -/* Form C := alpha*A*conjg( A' ) + beta*C. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L90: */ - } - } else if (*beta != 1.) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[ - i__4].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L100: */ - } - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } else { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - i__3 = j + l * a_dim1; - if (a[i__3].r != 0. || a[i__3].i != 0.) { - d_cnjg(&z__2, &a[j + l * a_dim1]); - z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - i__3 = j - 1; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * c_dim1; - i__6 = i__ + l * a_dim1; - z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i, - z__2.i = temp.r * a[i__6].i + temp.i * a[ - i__6].r; - z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5] - .i + z__2.i; - c__[i__4].r = z__1.r, c__[i__4].i = z__1.i; -/* L110: */ - } - i__3 = j + j * c_dim1; - i__4 = j + j * c_dim1; - i__5 = i__ + l * a_dim1; - z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__1.i = temp.r * a[i__5].i + temp.i * a[i__5] - .r; - d__1 = c__[i__4].r + z__1.r; - c__[i__3].r = d__1, c__[i__3].i = 0.; - } -/* L120: */ - } -/* L130: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*beta == 0.) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - c__[i__3].r = 0., c__[i__3].i = 0.; -/* L140: */ - } - } else if (*beta != 1.) { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[ - i__4].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L150: */ - } - } else { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - i__3 = j + l * a_dim1; - if (a[i__3].r != 0. || a[i__3].i != 0.) { - d_cnjg(&z__2, &a[j + l * a_dim1]); - z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - i__3 = j + j * c_dim1; - i__4 = j + j * c_dim1; - i__5 = j + l * a_dim1; - z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__1.i = temp.r * a[i__5].i + temp.i * a[i__5] - .r; - d__1 = c__[i__4].r + z__1.r; - c__[i__3].r = d__1, c__[i__3].i = 0.; - i__3 = *n; - for (i__ = j + 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * c_dim1; - i__6 = i__ + l * a_dim1; - z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i, - z__2.i = temp.r * a[i__6].i + temp.i * a[ - i__6].r; - z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5] - .i + z__2.i; - c__[i__4].r = z__1.r, c__[i__4].i = z__1.i; -/* L160: */ - } - } -/* L170: */ - } -/* L180: */ - } - } - } else { - -/* Form C := alpha*conjg( A' )*A + beta*C. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - temp.r = 0., temp.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - d_cnjg(&z__3, &a[l + i__ * a_dim1]); - i__4 = l + j * a_dim1; - z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i, - z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4] - .r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L190: */ - } - if (*beta == 0.) { - i__3 = i__ + j * c_dim1; - z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i; - i__4 = i__ + j * c_dim1; - z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[ - i__4].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } -/* L200: */ - } - rtemp = 0.; - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - d_cnjg(&z__3, &a[l + j * a_dim1]); - i__3 = l + j * a_dim1; - z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i = - z__3.r * a[i__3].i + z__3.i * a[i__3].r; - z__1.r = rtemp + z__2.r, z__1.i = z__2.i; - rtemp = z__1.r; -/* L210: */ - } - if (*beta == 0.) { - i__2 = j + j * c_dim1; - d__1 = *alpha * rtemp; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } else { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *alpha * rtemp + *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } -/* L220: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - rtemp = 0.; - i__2 = *k; - for (l = 1; l <= i__2; ++l) { - d_cnjg(&z__3, &a[l + j * a_dim1]); - i__3 = l + j * a_dim1; - z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i = - z__3.r * a[i__3].i + z__3.i * a[i__3].r; - z__1.r = rtemp + z__2.r, z__1.i = z__2.i; - rtemp = z__1.r; -/* L230: */ - } - if (*beta == 0.) { - i__2 = j + j * c_dim1; - d__1 = *alpha * rtemp; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } else { - i__2 = j + j * c_dim1; - i__3 = j + j * c_dim1; - d__1 = *alpha * rtemp + *beta * c__[i__3].r; - c__[i__2].r = d__1, c__[i__2].i = 0.; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - temp.r = 0., temp.i = 0.; - i__3 = *k; - for (l = 1; l <= i__3; ++l) { - d_cnjg(&z__3, &a[l + i__ * a_dim1]); - i__4 = l + j * a_dim1; - z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i, - z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4] - .r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L240: */ - } - if (*beta == 0.) { - i__3 = i__ + j * c_dim1; - z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } else { - i__3 = i__ + j * c_dim1; - z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i; - i__4 = i__ + j * c_dim1; - z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[ - i__4].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; - } -/* L250: */ - } -/* L260: */ - } - } - } - - return 0; - -/* End of ZHERK . */ - -} /* zherk_ */ - -/* Subroutine */ int zscal_(integer *n, doublecomplex *za, doublecomplex *zx, - integer *incx) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - doublecomplex z__1; - - /* Local variables */ - static integer i__, ix; - - -/* - scales a vector by a constant. - jack dongarra, 3/11/78. - modified 3/93 to return if incx .le. 0. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --zx; - - /* Function Body */ - if (*n <= 0 || *incx <= 0) { - return 0; - } - if (*incx == 1) { - goto L20; - } - -/* code for increment not equal to 1 */ - - ix = 1; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = ix; - i__3 = ix; - z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[ - i__3].i + za->i * zx[i__3].r; - zx[i__2].r = z__1.r, zx[i__2].i = z__1.i; - ix += *incx; -/* L10: */ - } - return 0; - -/* code for increment equal to 1 */ - -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - i__3 = i__; - z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[ - i__3].i + za->i * zx[i__3].r; - zx[i__2].r = z__1.r, zx[i__2].i = z__1.i; -/* L30: */ - } - return 0; -} /* zscal_ */ - -/* Subroutine */ int zswap_(integer *n, doublecomplex *zx, integer *incx, - doublecomplex *zy, integer *incy) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - - /* Local variables */ - static integer i__, ix, iy; - static doublecomplex ztemp; - - -/* - interchanges two vectors. - jack dongarra, 3/11/78. - modified 12/3/93, array(1) declarations changed to array(*) -*/ - - - /* Parameter adjustments */ - --zy; - --zx; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - if ((*incx == 1 && *incy == 1)) { - goto L20; - } - -/* - code for unequal increments or equal increments not equal - to 1 -*/ - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = ix; - ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i; - i__2 = ix; - i__3 = iy; - zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i; - i__2 = iy; - zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i; - ix += *incx; - iy += *incy; -/* L10: */ - } - return 0; - -/* code for both increments equal to 1 */ -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i; - i__2 = i__; - i__3 = i__; - zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i; - i__2 = i__; - zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i; -/* L30: */ - } - return 0; -} /* zswap_ */ - -/* Subroutine */ int ztrmm_(char *side, char *uplo, char *transa, char *diag, - integer *m, integer *n, doublecomplex *alpha, doublecomplex *a, - integer *lda, doublecomplex *b, integer *ldb) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, - i__6; - doublecomplex z__1, z__2, z__3; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, k, info; - static doublecomplex temp; - static logical lside; - extern logical lsame_(char *, char *); - static integer nrowa; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical noconj, nounit; - - -/* - Purpose - ======= - - ZTRMM performs one of the matrix-matrix operations - - B := alpha*op( A )*B, or B := alpha*B*op( A ) - - where alpha is a scalar, B is an m by n matrix, A is a unit, or - non-unit, upper or lower triangular matrix and op( A ) is one of - - op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ). - - Parameters - ========== - - SIDE - CHARACTER*1. - On entry, SIDE specifies whether op( A ) multiplies B from - the left or right as follows: - - SIDE = 'L' or 'l' B := alpha*op( A )*B. - - SIDE = 'R' or 'r' B := alpha*B*op( A ). - - Unchanged on exit. - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the matrix A is an upper or - lower triangular matrix as follows: - - UPLO = 'U' or 'u' A is an upper triangular matrix. - - UPLO = 'L' or 'l' A is a lower triangular matrix. - - Unchanged on exit. - - TRANSA - CHARACTER*1. - On entry, TRANSA specifies the form of op( A ) to be used in - the matrix multiplication as follows: - - TRANSA = 'N' or 'n' op( A ) = A. - - TRANSA = 'T' or 't' op( A ) = A'. - - TRANSA = 'C' or 'c' op( A ) = conjg( A' ). - - Unchanged on exit. - - DIAG - CHARACTER*1. - On entry, DIAG specifies whether or not A is unit triangular - as follows: - - DIAG = 'U' or 'u' A is assumed to be unit triangular. - - DIAG = 'N' or 'n' A is not assumed to be unit - triangular. - - Unchanged on exit. - - M - INTEGER. - On entry, M specifies the number of rows of B. M must be at - least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of B. N must be - at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. When alpha is - zero then A is not referenced and B need not be set before - entry. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m - when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. - Before entry with UPLO = 'U' or 'u', the leading k by k - upper triangular part of the array A must contain the upper - triangular matrix and the strictly lower triangular part of - A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading k by k - lower triangular part of the array A must contain the lower - triangular matrix and the strictly upper triangular part of - A is not referenced. - Note that when DIAG = 'U' or 'u', the diagonal elements of - A are not referenced either, but are assumed to be unity. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When SIDE = 'L' or 'l' then - LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' - then LDA must be at least max( 1, n ). - Unchanged on exit. - - B - COMPLEX*16 array of DIMENSION ( LDB, n ). - Before entry, the leading m by n part of the array B must - contain the matrix B, and on exit is overwritten by the - transformed matrix. - - LDB - INTEGER. - On entry, LDB specifies the first dimension of B as declared - in the calling (sub) program. LDB must be at least - max( 1, m ). - Unchanged on exit. - - - Level 3 Blas routine. - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - lside = lsame_(side, "L"); - if (lside) { - nrowa = *m; - } else { - nrowa = *n; - } - noconj = lsame_(transa, "T"); - nounit = lsame_(diag, "N"); - upper = lsame_(uplo, "U"); - - info = 0; - if ((! lside && ! lsame_(side, "R"))) { - info = 1; - } else if ((! upper && ! lsame_(uplo, "L"))) { - info = 2; - } else if (((! lsame_(transa, "N") && ! lsame_( - transa, "T")) && ! lsame_(transa, "C"))) { - info = 3; - } else if ((! lsame_(diag, "U") && ! lsame_(diag, - "N"))) { - info = 4; - } else if (*m < 0) { - info = 5; - } else if (*n < 0) { - info = 6; - } else if (*lda < max(1,nrowa)) { - info = 9; - } else if (*ldb < max(1,*m)) { - info = 11; - } - if (info != 0) { - xerbla_("ZTRMM ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if ((alpha->r == 0. && alpha->i == 0.)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - b[i__3].r = 0., b[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - return 0; - } - -/* Start the operations. */ - - if (lside) { - if (lsame_(transa, "N")) { - -/* Form B := alpha*A*B. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (k = 1; k <= i__2; ++k) { - i__3 = k + j * b_dim1; - if (b[i__3].r != 0. || b[i__3].i != 0.) { - i__3 = k + j * b_dim1; - z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3] - .i, z__1.i = alpha->r * b[i__3].i + - alpha->i * b[i__3].r; - temp.r = z__1.r, temp.i = z__1.i; - i__3 = k - 1; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * b_dim1; - i__5 = i__ + j * b_dim1; - i__6 = i__ + k * a_dim1; - z__2.r = temp.r * a[i__6].r - temp.i * a[i__6] - .i, z__2.i = temp.r * a[i__6].i + - temp.i * a[i__6].r; - z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5] - .i + z__2.i; - b[i__4].r = z__1.r, b[i__4].i = z__1.i; -/* L30: */ - } - if (nounit) { - i__3 = k + k * a_dim1; - z__1.r = temp.r * a[i__3].r - temp.i * a[i__3] - .i, z__1.i = temp.r * a[i__3].i + - temp.i * a[i__3].r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__3 = k + j * b_dim1; - b[i__3].r = temp.r, b[i__3].i = temp.i; - } -/* L40: */ - } -/* L50: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - for (k = *m; k >= 1; --k) { - i__2 = k + j * b_dim1; - if (b[i__2].r != 0. || b[i__2].i != 0.) { - i__2 = k + j * b_dim1; - z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2] - .i, z__1.i = alpha->r * b[i__2].i + - alpha->i * b[i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - i__2 = k + j * b_dim1; - b[i__2].r = temp.r, b[i__2].i = temp.i; - if (nounit) { - i__2 = k + j * b_dim1; - i__3 = k + j * b_dim1; - i__4 = k + k * a_dim1; - z__1.r = b[i__3].r * a[i__4].r - b[i__3].i * - a[i__4].i, z__1.i = b[i__3].r * a[ - i__4].i + b[i__3].i * a[i__4].r; - b[i__2].r = z__1.r, b[i__2].i = z__1.i; - } - i__2 = *m; - for (i__ = k + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - i__5 = i__ + k * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5] - .i, z__2.i = temp.r * a[i__5].i + - temp.i * a[i__5].r; - z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4] - .i + z__2.i; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L60: */ - } - } -/* L70: */ - } -/* L80: */ - } - } - } else { - -/* Form B := alpha*A'*B or B := alpha*conjg( A' )*B. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - for (i__ = *m; i__ >= 1; --i__) { - i__2 = i__ + j * b_dim1; - temp.r = b[i__2].r, temp.i = b[i__2].i; - if (noconj) { - if (nounit) { - i__2 = i__ + i__ * a_dim1; - z__1.r = temp.r * a[i__2].r - temp.i * a[i__2] - .i, z__1.i = temp.r * a[i__2].i + - temp.i * a[i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = i__ - 1; - for (k = 1; k <= i__2; ++k) { - i__3 = k + i__ * a_dim1; - i__4 = k + j * b_dim1; - z__2.r = a[i__3].r * b[i__4].r - a[i__3].i * - b[i__4].i, z__2.i = a[i__3].r * b[ - i__4].i + a[i__3].i * b[i__4].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L90: */ - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[i__ + i__ * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = i__ - 1; - for (k = 1; k <= i__2; ++k) { - d_cnjg(&z__3, &a[k + i__ * a_dim1]); - i__3 = k + j * b_dim1; - z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3] - .i, z__2.i = z__3.r * b[i__3].i + - z__3.i * b[i__3].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L100: */ - } - } - i__2 = i__ + j * b_dim1; - z__1.r = alpha->r * temp.r - alpha->i * temp.i, - z__1.i = alpha->r * temp.i + alpha->i * - temp.r; - b[i__2].r = z__1.r, b[i__2].i = z__1.i; -/* L110: */ - } -/* L120: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - temp.r = b[i__3].r, temp.i = b[i__3].i; - if (noconj) { - if (nounit) { - i__3 = i__ + i__ * a_dim1; - z__1.r = temp.r * a[i__3].r - temp.i * a[i__3] - .i, z__1.i = temp.r * a[i__3].i + - temp.i * a[i__3].r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__3 = *m; - for (k = i__ + 1; k <= i__3; ++k) { - i__4 = k + i__ * a_dim1; - i__5 = k + j * b_dim1; - z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * - b[i__5].i, z__2.i = a[i__4].r * b[ - i__5].i + a[i__4].i * b[i__5].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L130: */ - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[i__ + i__ * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__3 = *m; - for (k = i__ + 1; k <= i__3; ++k) { - d_cnjg(&z__3, &a[k + i__ * a_dim1]); - i__4 = k + j * b_dim1; - z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4] - .i, z__2.i = z__3.r * b[i__4].i + - z__3.i * b[i__4].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L140: */ - } - } - i__3 = i__ + j * b_dim1; - z__1.r = alpha->r * temp.r - alpha->i * temp.i, - z__1.i = alpha->r * temp.i + alpha->i * - temp.r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L150: */ - } -/* L160: */ - } - } - } - } else { - if (lsame_(transa, "N")) { - -/* Form B := alpha*B*A. */ - - if (upper) { - for (j = *n; j >= 1; --j) { - temp.r = alpha->r, temp.i = alpha->i; - if (nounit) { - i__1 = j + j * a_dim1; - z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i, - z__1.i = temp.r * a[i__1].i + temp.i * a[i__1] - .r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + j * b_dim1; - i__3 = i__ + j * b_dim1; - z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i, - z__1.i = temp.r * b[i__3].i + temp.i * b[i__3] - .r; - b[i__2].r = z__1.r, b[i__2].i = z__1.i; -/* L170: */ - } - i__1 = j - 1; - for (k = 1; k <= i__1; ++k) { - i__2 = k + j * a_dim1; - if (a[i__2].r != 0. || a[i__2].i != 0.) { - i__2 = k + j * a_dim1; - z__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2] - .i, z__1.i = alpha->r * a[i__2].i + - alpha->i * a[i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - i__5 = i__ + k * b_dim1; - z__2.r = temp.r * b[i__5].r - temp.i * b[i__5] - .i, z__2.i = temp.r * b[i__5].i + - temp.i * b[i__5].r; - z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4] - .i + z__2.i; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L180: */ - } - } -/* L190: */ - } -/* L200: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp.r = alpha->r, temp.i = alpha->i; - if (nounit) { - i__2 = j + j * a_dim1; - z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i, - z__1.i = temp.r * a[i__2].i + temp.i * a[i__2] - .r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i, - z__1.i = temp.r * b[i__4].i + temp.i * b[i__4] - .r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L210: */ - } - i__2 = *n; - for (k = j + 1; k <= i__2; ++k) { - i__3 = k + j * a_dim1; - if (a[i__3].r != 0. || a[i__3].i != 0.) { - i__3 = k + j * a_dim1; - z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3] - .i, z__1.i = alpha->r * a[i__3].i + - alpha->i * a[i__3].r; - temp.r = z__1.r, temp.i = z__1.i; - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * b_dim1; - i__5 = i__ + j * b_dim1; - i__6 = i__ + k * b_dim1; - z__2.r = temp.r * b[i__6].r - temp.i * b[i__6] - .i, z__2.i = temp.r * b[i__6].i + - temp.i * b[i__6].r; - z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5] - .i + z__2.i; - b[i__4].r = z__1.r, b[i__4].i = z__1.i; -/* L220: */ - } - } -/* L230: */ - } -/* L240: */ - } - } - } else { - -/* Form B := alpha*B*A' or B := alpha*B*conjg( A' ). */ - - if (upper) { - i__1 = *n; - for (k = 1; k <= i__1; ++k) { - i__2 = k - 1; - for (j = 1; j <= i__2; ++j) { - i__3 = j + k * a_dim1; - if (a[i__3].r != 0. || a[i__3].i != 0.) { - if (noconj) { - i__3 = j + k * a_dim1; - z__1.r = alpha->r * a[i__3].r - alpha->i * a[ - i__3].i, z__1.i = alpha->r * a[i__3] - .i + alpha->i * a[i__3].r; - temp.r = z__1.r, temp.i = z__1.i; - } else { - d_cnjg(&z__2, &a[j + k * a_dim1]); - z__1.r = alpha->r * z__2.r - alpha->i * - z__2.i, z__1.i = alpha->r * z__2.i + - alpha->i * z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * b_dim1; - i__5 = i__ + j * b_dim1; - i__6 = i__ + k * b_dim1; - z__2.r = temp.r * b[i__6].r - temp.i * b[i__6] - .i, z__2.i = temp.r * b[i__6].i + - temp.i * b[i__6].r; - z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5] - .i + z__2.i; - b[i__4].r = z__1.r, b[i__4].i = z__1.i; -/* L250: */ - } - } -/* L260: */ - } - temp.r = alpha->r, temp.i = alpha->i; - if (nounit) { - if (noconj) { - i__2 = k + k * a_dim1; - z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i, - z__1.i = temp.r * a[i__2].i + temp.i * a[ - i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - } else { - d_cnjg(&z__2, &a[k + k * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - } - if (temp.r != 1. || temp.i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + k * b_dim1; - i__4 = i__ + k * b_dim1; - z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i, - z__1.i = temp.r * b[i__4].i + temp.i * b[ - i__4].r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L270: */ - } - } -/* L280: */ - } - } else { - for (k = *n; k >= 1; --k) { - i__1 = *n; - for (j = k + 1; j <= i__1; ++j) { - i__2 = j + k * a_dim1; - if (a[i__2].r != 0. || a[i__2].i != 0.) { - if (noconj) { - i__2 = j + k * a_dim1; - z__1.r = alpha->r * a[i__2].r - alpha->i * a[ - i__2].i, z__1.i = alpha->r * a[i__2] - .i + alpha->i * a[i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - } else { - d_cnjg(&z__2, &a[j + k * a_dim1]); - z__1.r = alpha->r * z__2.r - alpha->i * - z__2.i, z__1.i = alpha->r * z__2.i + - alpha->i * z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - i__5 = i__ + k * b_dim1; - z__2.r = temp.r * b[i__5].r - temp.i * b[i__5] - .i, z__2.i = temp.r * b[i__5].i + - temp.i * b[i__5].r; - z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4] - .i + z__2.i; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L290: */ - } - } -/* L300: */ - } - temp.r = alpha->r, temp.i = alpha->i; - if (nounit) { - if (noconj) { - i__1 = k + k * a_dim1; - z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i, - z__1.i = temp.r * a[i__1].i + temp.i * a[ - i__1].r; - temp.r = z__1.r, temp.i = z__1.i; - } else { - d_cnjg(&z__2, &a[k + k * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - } - if (temp.r != 1. || temp.i != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + k * b_dim1; - i__3 = i__ + k * b_dim1; - z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i, - z__1.i = temp.r * b[i__3].i + temp.i * b[ - i__3].r; - b[i__2].r = z__1.r, b[i__2].i = z__1.i; -/* L310: */ - } - } -/* L320: */ - } - } - } - } - - return 0; - -/* End of ZTRMM . */ - -} /* ztrmm_ */ - -/* Subroutine */ int ztrmv_(char *uplo, char *trans, char *diag, integer *n, - doublecomplex *a, integer *lda, doublecomplex *x, integer *incx) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1, z__2, z__3; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, ix, jx, kx, info; - static doublecomplex temp; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical noconj, nounit; - - -/* - Purpose - ======= - - ZTRMV performs one of the matrix-vector operations - - x := A*x, or x := A'*x, or x := conjg( A' )*x, - - where x is an n element vector and A is an n by n unit, or non-unit, - upper or lower triangular matrix. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the matrix is an upper or - lower triangular matrix as follows: - - UPLO = 'U' or 'u' A is an upper triangular matrix. - - UPLO = 'L' or 'l' A is a lower triangular matrix. - - Unchanged on exit. - - TRANS - CHARACTER*1. - On entry, TRANS specifies the operation to be performed as - follows: - - TRANS = 'N' or 'n' x := A*x. - - TRANS = 'T' or 't' x := A'*x. - - TRANS = 'C' or 'c' x := conjg( A' )*x. - - Unchanged on exit. - - DIAG - CHARACTER*1. - On entry, DIAG specifies whether or not A is unit - triangular as follows: - - DIAG = 'U' or 'u' A is assumed to be unit triangular. - - DIAG = 'N' or 'n' A is not assumed to be unit - triangular. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix A. - N must be at least zero. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array A must contain the upper - triangular matrix and the strictly lower triangular part of - A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array A must contain the lower - triangular matrix and the strictly upper triangular part of - A is not referenced. - Note that when DIAG = 'U' or 'u', the diagonal elements of - A are not referenced either, but are assumed to be unity. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, n ). - Unchanged on exit. - - X - COMPLEX*16 array of dimension at least - ( 1 + ( n - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the n - element vector x. On exit, X is overwritten with the - tranformed vector x. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --x; - - /* Function Body */ - info = 0; - if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) { - info = 1; - } else if (((! lsame_(trans, "N") && ! lsame_(trans, - "T")) && ! lsame_(trans, "C"))) { - info = 2; - } else if ((! lsame_(diag, "U") && ! lsame_(diag, - "N"))) { - info = 3; - } else if (*n < 0) { - info = 4; - } else if (*lda < max(1,*n)) { - info = 6; - } else if (*incx == 0) { - info = 8; - } - if (info != 0) { - xerbla_("ZTRMV ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - - noconj = lsame_(trans, "T"); - nounit = lsame_(diag, "N"); - -/* - Set up the start point in X if the increment is not unity. This - will be ( N - 1 )*INCX too small for descending loops. -*/ - - if (*incx <= 0) { - kx = 1 - (*n - 1) * *incx; - } else if (*incx != 1) { - kx = 1; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. -*/ - - if (lsame_(trans, "N")) { - -/* Form x := A*x. */ - - if (lsame_(uplo, "U")) { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - if (x[i__2].r != 0. || x[i__2].i != 0.) { - i__2 = j; - temp.r = x[i__2].r, temp.i = x[i__2].i; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__; - i__4 = i__; - i__5 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__2.i = temp.r * a[i__5].i + temp.i * a[ - i__5].r; - z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i + - z__2.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; -/* L10: */ - } - if (nounit) { - i__2 = j; - i__3 = j; - i__4 = j + j * a_dim1; - z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[ - i__4].i, z__1.i = x[i__3].r * a[i__4].i + - x[i__3].i * a[i__4].r; - x[i__2].r = z__1.r, x[i__2].i = z__1.i; - } - } -/* L20: */ - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - if (x[i__2].r != 0. || x[i__2].i != 0.) { - i__2 = jx; - temp.r = x[i__2].r, temp.i = x[i__2].i; - ix = kx; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = ix; - i__4 = ix; - i__5 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__2.i = temp.r * a[i__5].i + temp.i * a[ - i__5].r; - z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i + - z__2.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - ix += *incx; -/* L30: */ - } - if (nounit) { - i__2 = jx; - i__3 = jx; - i__4 = j + j * a_dim1; - z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[ - i__4].i, z__1.i = x[i__3].r * a[i__4].i + - x[i__3].i * a[i__4].r; - x[i__2].r = z__1.r, x[i__2].i = z__1.i; - } - } - jx += *incx; -/* L40: */ - } - } - } else { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - i__1 = j; - if (x[i__1].r != 0. || x[i__1].i != 0.) { - i__1 = j; - temp.r = x[i__1].r, temp.i = x[i__1].i; - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - i__2 = i__; - i__3 = i__; - i__4 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i, - z__2.i = temp.r * a[i__4].i + temp.i * a[ - i__4].r; - z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i + - z__2.i; - x[i__2].r = z__1.r, x[i__2].i = z__1.i; -/* L50: */ - } - if (nounit) { - i__1 = j; - i__2 = j; - i__3 = j + j * a_dim1; - z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[ - i__3].i, z__1.i = x[i__2].r * a[i__3].i + - x[i__2].i * a[i__3].r; - x[i__1].r = z__1.r, x[i__1].i = z__1.i; - } - } -/* L60: */ - } - } else { - kx += (*n - 1) * *incx; - jx = kx; - for (j = *n; j >= 1; --j) { - i__1 = jx; - if (x[i__1].r != 0. || x[i__1].i != 0.) { - i__1 = jx; - temp.r = x[i__1].r, temp.i = x[i__1].i; - ix = kx; - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - i__2 = ix; - i__3 = ix; - i__4 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i, - z__2.i = temp.r * a[i__4].i + temp.i * a[ - i__4].r; - z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i + - z__2.i; - x[i__2].r = z__1.r, x[i__2].i = z__1.i; - ix -= *incx; -/* L70: */ - } - if (nounit) { - i__1 = jx; - i__2 = jx; - i__3 = j + j * a_dim1; - z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[ - i__3].i, z__1.i = x[i__2].r * a[i__3].i + - x[i__2].i * a[i__3].r; - x[i__1].r = z__1.r, x[i__1].i = z__1.i; - } - } - jx -= *incx; -/* L80: */ - } - } - } - } else { - -/* Form x := A'*x or x := conjg( A' )*x. */ - - if (lsame_(uplo, "U")) { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - i__1 = j; - temp.r = x[i__1].r, temp.i = x[i__1].i; - if (noconj) { - if (nounit) { - i__1 = j + j * a_dim1; - z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i, - z__1.i = temp.r * a[i__1].i + temp.i * a[ - i__1].r; - temp.r = z__1.r, temp.i = z__1.i; - } - for (i__ = j - 1; i__ >= 1; --i__) { - i__1 = i__ + j * a_dim1; - i__2 = i__; - z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ - i__2].i, z__2.i = a[i__1].r * x[i__2].i + - a[i__1].i * x[i__2].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L90: */ - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - for (i__ = j - 1; i__ >= 1; --i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__1 = i__; - z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i, - z__2.i = z__3.r * x[i__1].i + z__3.i * x[ - i__1].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L100: */ - } - } - i__1 = j; - x[i__1].r = temp.r, x[i__1].i = temp.i; -/* L110: */ - } - } else { - jx = kx + (*n - 1) * *incx; - for (j = *n; j >= 1; --j) { - i__1 = jx; - temp.r = x[i__1].r, temp.i = x[i__1].i; - ix = jx; - if (noconj) { - if (nounit) { - i__1 = j + j * a_dim1; - z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i, - z__1.i = temp.r * a[i__1].i + temp.i * a[ - i__1].r; - temp.r = z__1.r, temp.i = z__1.i; - } - for (i__ = j - 1; i__ >= 1; --i__) { - ix -= *incx; - i__1 = i__ + j * a_dim1; - i__2 = ix; - z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ - i__2].i, z__2.i = a[i__1].r * x[i__2].i + - a[i__1].i * x[i__2].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L120: */ - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - for (i__ = j - 1; i__ >= 1; --i__) { - ix -= *incx; - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__1 = ix; - z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i, - z__2.i = z__3.r * x[i__1].i + z__3.i * x[ - i__1].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L130: */ - } - } - i__1 = jx; - x[i__1].r = temp.r, x[i__1].i = temp.i; - jx -= *incx; -/* L140: */ - } - } - } else { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - temp.r = x[i__2].r, temp.i = x[i__2].i; - if (noconj) { - if (nounit) { - i__2 = j + j * a_dim1; - z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i, - z__1.i = temp.r * a[i__2].i + temp.i * a[ - i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__; - z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[ - i__4].i, z__2.i = a[i__3].r * x[i__4].i + - a[i__3].i * x[i__4].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L150: */ - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = i__; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, - z__2.i = z__3.r * x[i__3].i + z__3.i * x[ - i__3].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L160: */ - } - } - i__2 = j; - x[i__2].r = temp.r, x[i__2].i = temp.i; -/* L170: */ - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - temp.r = x[i__2].r, temp.i = x[i__2].i; - ix = jx; - if (noconj) { - if (nounit) { - i__2 = j + j * a_dim1; - z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i, - z__1.i = temp.r * a[i__2].i + temp.i * a[ - i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - i__3 = i__ + j * a_dim1; - i__4 = ix; - z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[ - i__4].i, z__2.i = a[i__3].r * x[i__4].i + - a[i__3].i * x[i__4].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L180: */ - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = ix; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, - z__2.i = z__3.r * x[i__3].i + z__3.i * x[ - i__3].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L190: */ - } - } - i__2 = jx; - x[i__2].r = temp.r, x[i__2].i = temp.i; - jx += *incx; -/* L200: */ - } - } - } - } - - return 0; - -/* End of ZTRMV . */ - -} /* ztrmv_ */ - -/* Subroutine */ int ztrsm_(char *side, char *uplo, char *transa, char *diag, - integer *m, integer *n, doublecomplex *alpha, doublecomplex *a, - integer *lda, doublecomplex *b, integer *ldb) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, - i__6, i__7; - doublecomplex z__1, z__2, z__3; - - /* Builtin functions */ - void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg( - doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, k, info; - static doublecomplex temp; - static logical lside; - extern logical lsame_(char *, char *); - static integer nrowa; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical noconj, nounit; - - -/* - Purpose - ======= - - ZTRSM solves one of the matrix equations - - op( A )*X = alpha*B, or X*op( A ) = alpha*B, - - where alpha is a scalar, X and B are m by n matrices, A is a unit, or - non-unit, upper or lower triangular matrix and op( A ) is one of - - op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ). - - The matrix X is overwritten on B. - - Parameters - ========== - - SIDE - CHARACTER*1. - On entry, SIDE specifies whether op( A ) appears on the left - or right of X as follows: - - SIDE = 'L' or 'l' op( A )*X = alpha*B. - - SIDE = 'R' or 'r' X*op( A ) = alpha*B. - - Unchanged on exit. - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the matrix A is an upper or - lower triangular matrix as follows: - - UPLO = 'U' or 'u' A is an upper triangular matrix. - - UPLO = 'L' or 'l' A is a lower triangular matrix. - - Unchanged on exit. - - TRANSA - CHARACTER*1. - On entry, TRANSA specifies the form of op( A ) to be used in - the matrix multiplication as follows: - - TRANSA = 'N' or 'n' op( A ) = A. - - TRANSA = 'T' or 't' op( A ) = A'. - - TRANSA = 'C' or 'c' op( A ) = conjg( A' ). - - Unchanged on exit. - - DIAG - CHARACTER*1. - On entry, DIAG specifies whether or not A is unit triangular - as follows: - - DIAG = 'U' or 'u' A is assumed to be unit triangular. - - DIAG = 'N' or 'n' A is not assumed to be unit - triangular. - - Unchanged on exit. - - M - INTEGER. - On entry, M specifies the number of rows of B. M must be at - least zero. - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the number of columns of B. N must be - at least zero. - Unchanged on exit. - - ALPHA - COMPLEX*16 . - On entry, ALPHA specifies the scalar alpha. When alpha is - zero then A is not referenced and B need not be set before - entry. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m - when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. - Before entry with UPLO = 'U' or 'u', the leading k by k - upper triangular part of the array A must contain the upper - triangular matrix and the strictly lower triangular part of - A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading k by k - lower triangular part of the array A must contain the lower - triangular matrix and the strictly upper triangular part of - A is not referenced. - Note that when DIAG = 'U' or 'u', the diagonal elements of - A are not referenced either, but are assumed to be unity. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. When SIDE = 'L' or 'l' then - LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' - then LDA must be at least max( 1, n ). - Unchanged on exit. - - B - COMPLEX*16 array of DIMENSION ( LDB, n ). - Before entry, the leading m by n part of the array B must - contain the right-hand side matrix B, and on exit is - overwritten by the solution matrix X. - - LDB - INTEGER. - On entry, LDB specifies the first dimension of B as declared - in the calling (sub) program. LDB must be at least - max( 1, m ). - Unchanged on exit. - - - Level 3 Blas routine. - - -- Written on 8-February-1989. - Jack Dongarra, Argonne National Laboratory. - Iain Duff, AERE Harwell. - Jeremy Du Croz, Numerical Algorithms Group Ltd. - Sven Hammarling, Numerical Algorithms Group Ltd. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - lside = lsame_(side, "L"); - if (lside) { - nrowa = *m; - } else { - nrowa = *n; - } - noconj = lsame_(transa, "T"); - nounit = lsame_(diag, "N"); - upper = lsame_(uplo, "U"); - - info = 0; - if ((! lside && ! lsame_(side, "R"))) { - info = 1; - } else if ((! upper && ! lsame_(uplo, "L"))) { - info = 2; - } else if (((! lsame_(transa, "N") && ! lsame_( - transa, "T")) && ! lsame_(transa, "C"))) { - info = 3; - } else if ((! lsame_(diag, "U") && ! lsame_(diag, - "N"))) { - info = 4; - } else if (*m < 0) { - info = 5; - } else if (*n < 0) { - info = 6; - } else if (*lda < max(1,nrowa)) { - info = 9; - } else if (*ldb < max(1,*m)) { - info = 11; - } - if (info != 0) { - xerbla_("ZTRSM ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - -/* And when alpha.eq.zero. */ - - if ((alpha->r == 0. && alpha->i == 0.)) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - b[i__3].r = 0., b[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - return 0; - } - -/* Start the operations. */ - - if (lside) { - if (lsame_(transa, "N")) { - -/* Form B := alpha*inv( A )*B. */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (alpha->r != 1. || alpha->i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4] - .i, z__1.i = alpha->r * b[i__4].i + - alpha->i * b[i__4].r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L30: */ - } - } - for (k = *m; k >= 1; --k) { - i__2 = k + j * b_dim1; - if (b[i__2].r != 0. || b[i__2].i != 0.) { - if (nounit) { - i__2 = k + j * b_dim1; - z_div(&z__1, &b[k + j * b_dim1], &a[k + k * - a_dim1]); - b[i__2].r = z__1.r, b[i__2].i = z__1.i; - } - i__2 = k - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - i__5 = k + j * b_dim1; - i__6 = i__ + k * a_dim1; - z__2.r = b[i__5].r * a[i__6].r - b[i__5].i * - a[i__6].i, z__2.i = b[i__5].r * a[ - i__6].i + b[i__5].i * a[i__6].r; - z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4] - .i - z__2.i; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L40: */ - } - } -/* L50: */ - } -/* L60: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (alpha->r != 1. || alpha->i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4] - .i, z__1.i = alpha->r * b[i__4].i + - alpha->i * b[i__4].r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L70: */ - } - } - i__2 = *m; - for (k = 1; k <= i__2; ++k) { - i__3 = k + j * b_dim1; - if (b[i__3].r != 0. || b[i__3].i != 0.) { - if (nounit) { - i__3 = k + j * b_dim1; - z_div(&z__1, &b[k + j * b_dim1], &a[k + k * - a_dim1]); - b[i__3].r = z__1.r, b[i__3].i = z__1.i; - } - i__3 = *m; - for (i__ = k + 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * b_dim1; - i__5 = i__ + j * b_dim1; - i__6 = k + j * b_dim1; - i__7 = i__ + k * a_dim1; - z__2.r = b[i__6].r * a[i__7].r - b[i__6].i * - a[i__7].i, z__2.i = b[i__6].r * a[ - i__7].i + b[i__6].i * a[i__7].r; - z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5] - .i - z__2.i; - b[i__4].r = z__1.r, b[i__4].i = z__1.i; -/* L80: */ - } - } -/* L90: */ - } -/* L100: */ - } - } - } else { - -/* - Form B := alpha*inv( A' )*B - or B := alpha*inv( conjg( A' ) )*B. -*/ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i, - z__1.i = alpha->r * b[i__3].i + alpha->i * b[ - i__3].r; - temp.r = z__1.r, temp.i = z__1.i; - if (noconj) { - i__3 = i__ - 1; - for (k = 1; k <= i__3; ++k) { - i__4 = k + i__ * a_dim1; - i__5 = k + j * b_dim1; - z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * - b[i__5].i, z__2.i = a[i__4].r * b[ - i__5].i + a[i__4].i * b[i__5].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L110: */ - } - if (nounit) { - z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - i__3 = i__ - 1; - for (k = 1; k <= i__3; ++k) { - d_cnjg(&z__3, &a[k + i__ * a_dim1]); - i__4 = k + j * b_dim1; - z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4] - .i, z__2.i = z__3.r * b[i__4].i + - z__3.i * b[i__4].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L120: */ - } - if (nounit) { - d_cnjg(&z__2, &a[i__ + i__ * a_dim1]); - z_div(&z__1, &temp, &z__2); - temp.r = z__1.r, temp.i = z__1.i; - } - } - i__3 = i__ + j * b_dim1; - b[i__3].r = temp.r, b[i__3].i = temp.i; -/* L130: */ - } -/* L140: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - for (i__ = *m; i__ >= 1; --i__) { - i__2 = i__ + j * b_dim1; - z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i, - z__1.i = alpha->r * b[i__2].i + alpha->i * b[ - i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - if (noconj) { - i__2 = *m; - for (k = i__ + 1; k <= i__2; ++k) { - i__3 = k + i__ * a_dim1; - i__4 = k + j * b_dim1; - z__2.r = a[i__3].r * b[i__4].r - a[i__3].i * - b[i__4].i, z__2.i = a[i__3].r * b[ - i__4].i + a[i__3].i * b[i__4].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L150: */ - } - if (nounit) { - z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - i__2 = *m; - for (k = i__ + 1; k <= i__2; ++k) { - d_cnjg(&z__3, &a[k + i__ * a_dim1]); - i__3 = k + j * b_dim1; - z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3] - .i, z__2.i = z__3.r * b[i__3].i + - z__3.i * b[i__3].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L160: */ - } - if (nounit) { - d_cnjg(&z__2, &a[i__ + i__ * a_dim1]); - z_div(&z__1, &temp, &z__2); - temp.r = z__1.r, temp.i = z__1.i; - } - } - i__2 = i__ + j * b_dim1; - b[i__2].r = temp.r, b[i__2].i = temp.i; -/* L170: */ - } -/* L180: */ - } - } - } - } else { - if (lsame_(transa, "N")) { - -/* Form B := alpha*B*inv( A ). */ - - if (upper) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (alpha->r != 1. || alpha->i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4] - .i, z__1.i = alpha->r * b[i__4].i + - alpha->i * b[i__4].r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L190: */ - } - } - i__2 = j - 1; - for (k = 1; k <= i__2; ++k) { - i__3 = k + j * a_dim1; - if (a[i__3].r != 0. || a[i__3].i != 0.) { - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * b_dim1; - i__5 = i__ + j * b_dim1; - i__6 = k + j * a_dim1; - i__7 = i__ + k * b_dim1; - z__2.r = a[i__6].r * b[i__7].r - a[i__6].i * - b[i__7].i, z__2.i = a[i__6].r * b[ - i__7].i + a[i__6].i * b[i__7].r; - z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5] - .i - z__2.i; - b[i__4].r = z__1.r, b[i__4].i = z__1.i; -/* L200: */ - } - } -/* L210: */ - } - if (nounit) { - z_div(&z__1, &c_b359, &a[j + j * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i, - z__1.i = temp.r * b[i__4].i + temp.i * b[ - i__4].r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L220: */ - } - } -/* L230: */ - } - } else { - for (j = *n; j >= 1; --j) { - if (alpha->r != 1. || alpha->i != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + j * b_dim1; - i__3 = i__ + j * b_dim1; - z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3] - .i, z__1.i = alpha->r * b[i__3].i + - alpha->i * b[i__3].r; - b[i__2].r = z__1.r, b[i__2].i = z__1.i; -/* L240: */ - } - } - i__1 = *n; - for (k = j + 1; k <= i__1; ++k) { - i__2 = k + j * a_dim1; - if (a[i__2].r != 0. || a[i__2].i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - i__5 = k + j * a_dim1; - i__6 = i__ + k * b_dim1; - z__2.r = a[i__5].r * b[i__6].r - a[i__5].i * - b[i__6].i, z__2.i = a[i__5].r * b[ - i__6].i + a[i__5].i * b[i__6].r; - z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4] - .i - z__2.i; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L250: */ - } - } -/* L260: */ - } - if (nounit) { - z_div(&z__1, &c_b359, &a[j + j * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + j * b_dim1; - i__3 = i__ + j * b_dim1; - z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i, - z__1.i = temp.r * b[i__3].i + temp.i * b[ - i__3].r; - b[i__2].r = z__1.r, b[i__2].i = z__1.i; -/* L270: */ - } - } -/* L280: */ - } - } - } else { - -/* - Form B := alpha*B*inv( A' ) - or B := alpha*B*inv( conjg( A' ) ). -*/ - - if (upper) { - for (k = *n; k >= 1; --k) { - if (nounit) { - if (noconj) { - z_div(&z__1, &c_b359, &a[k + k * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } else { - d_cnjg(&z__2, &a[k + k * a_dim1]); - z_div(&z__1, &c_b359, &z__2); - temp.r = z__1.r, temp.i = z__1.i; - } - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + k * b_dim1; - i__3 = i__ + k * b_dim1; - z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i, - z__1.i = temp.r * b[i__3].i + temp.i * b[ - i__3].r; - b[i__2].r = z__1.r, b[i__2].i = z__1.i; -/* L290: */ - } - } - i__1 = k - 1; - for (j = 1; j <= i__1; ++j) { - i__2 = j + k * a_dim1; - if (a[i__2].r != 0. || a[i__2].i != 0.) { - if (noconj) { - i__2 = j + k * a_dim1; - temp.r = a[i__2].r, temp.i = a[i__2].i; - } else { - d_cnjg(&z__1, &a[j + k * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * b_dim1; - i__5 = i__ + k * b_dim1; - z__2.r = temp.r * b[i__5].r - temp.i * b[i__5] - .i, z__2.i = temp.r * b[i__5].i + - temp.i * b[i__5].r; - z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4] - .i - z__2.i; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L300: */ - } - } -/* L310: */ - } - if (alpha->r != 1. || alpha->i != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + k * b_dim1; - i__3 = i__ + k * b_dim1; - z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3] - .i, z__1.i = alpha->r * b[i__3].i + - alpha->i * b[i__3].r; - b[i__2].r = z__1.r, b[i__2].i = z__1.i; -/* L320: */ - } - } -/* L330: */ - } - } else { - i__1 = *n; - for (k = 1; k <= i__1; ++k) { - if (nounit) { - if (noconj) { - z_div(&z__1, &c_b359, &a[k + k * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } else { - d_cnjg(&z__2, &a[k + k * a_dim1]); - z_div(&z__1, &c_b359, &z__2); - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + k * b_dim1; - i__4 = i__ + k * b_dim1; - z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i, - z__1.i = temp.r * b[i__4].i + temp.i * b[ - i__4].r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L340: */ - } - } - i__2 = *n; - for (j = k + 1; j <= i__2; ++j) { - i__3 = j + k * a_dim1; - if (a[i__3].r != 0. || a[i__3].i != 0.) { - if (noconj) { - i__3 = j + k * a_dim1; - temp.r = a[i__3].r, temp.i = a[i__3].i; - } else { - d_cnjg(&z__1, &a[j + k * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } - i__3 = *m; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * b_dim1; - i__5 = i__ + j * b_dim1; - i__6 = i__ + k * b_dim1; - z__2.r = temp.r * b[i__6].r - temp.i * b[i__6] - .i, z__2.i = temp.r * b[i__6].i + - temp.i * b[i__6].r; - z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5] - .i - z__2.i; - b[i__4].r = z__1.r, b[i__4].i = z__1.i; -/* L350: */ - } - } -/* L360: */ - } - if (alpha->r != 1. || alpha->i != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + k * b_dim1; - i__4 = i__ + k * b_dim1; - z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4] - .i, z__1.i = alpha->r * b[i__4].i + - alpha->i * b[i__4].r; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L370: */ - } - } -/* L380: */ - } - } - } - } - - return 0; - -/* End of ZTRSM . */ - -} /* ztrsm_ */ - -/* Subroutine */ int ztrsv_(char *uplo, char *trans, char *diag, integer *n, - doublecomplex *a, integer *lda, doublecomplex *x, integer *incx) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1, z__2, z__3; - - /* Builtin functions */ - void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg( - doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, ix, jx, kx, info; - static doublecomplex temp; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical noconj, nounit; - - -/* - Purpose - ======= - - ZTRSV solves one of the systems of equations - - A*x = b, or A'*x = b, or conjg( A' )*x = b, - - where b and x are n element vectors and A is an n by n unit, or - non-unit, upper or lower triangular matrix. - - No test for singularity or near-singularity is included in this - routine. Such tests must be performed before calling this routine. - - Parameters - ========== - - UPLO - CHARACTER*1. - On entry, UPLO specifies whether the matrix is an upper or - lower triangular matrix as follows: - - UPLO = 'U' or 'u' A is an upper triangular matrix. - - UPLO = 'L' or 'l' A is a lower triangular matrix. - - Unchanged on exit. - - TRANS - CHARACTER*1. - On entry, TRANS specifies the equations to be solved as - follows: - - TRANS = 'N' or 'n' A*x = b. - - TRANS = 'T' or 't' A'*x = b. - - TRANS = 'C' or 'c' conjg( A' )*x = b. - - Unchanged on exit. - - DIAG - CHARACTER*1. - On entry, DIAG specifies whether or not A is unit - triangular as follows: - - DIAG = 'U' or 'u' A is assumed to be unit triangular. - - DIAG = 'N' or 'n' A is not assumed to be unit - triangular. - - Unchanged on exit. - - N - INTEGER. - On entry, N specifies the order of the matrix A. - N must be at least zero. - Unchanged on exit. - - A - COMPLEX*16 array of DIMENSION ( LDA, n ). - Before entry with UPLO = 'U' or 'u', the leading n by n - upper triangular part of the array A must contain the upper - triangular matrix and the strictly lower triangular part of - A is not referenced. - Before entry with UPLO = 'L' or 'l', the leading n by n - lower triangular part of the array A must contain the lower - triangular matrix and the strictly upper triangular part of - A is not referenced. - Note that when DIAG = 'U' or 'u', the diagonal elements of - A are not referenced either, but are assumed to be unity. - Unchanged on exit. - - LDA - INTEGER. - On entry, LDA specifies the first dimension of A as declared - in the calling (sub) program. LDA must be at least - max( 1, n ). - Unchanged on exit. - - X - COMPLEX*16 array of dimension at least - ( 1 + ( n - 1 )*abs( INCX ) ). - Before entry, the incremented array X must contain the n - element right-hand side vector b. On exit, X is overwritten - with the solution vector x. - - INCX - INTEGER. - On entry, INCX specifies the increment for the elements of - X. INCX must not be zero. - Unchanged on exit. - - - Level 2 Blas routine. - - -- Written on 22-October-1986. - Jack Dongarra, Argonne National Lab. - Jeremy Du Croz, Nag Central Office. - Sven Hammarling, Nag Central Office. - Richard Hanson, Sandia National Labs. - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --x; - - /* Function Body */ - info = 0; - if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) { - info = 1; - } else if (((! lsame_(trans, "N") && ! lsame_(trans, - "T")) && ! lsame_(trans, "C"))) { - info = 2; - } else if ((! lsame_(diag, "U") && ! lsame_(diag, - "N"))) { - info = 3; - } else if (*n < 0) { - info = 4; - } else if (*lda < max(1,*n)) { - info = 6; - } else if (*incx == 0) { - info = 8; - } - if (info != 0) { - xerbla_("ZTRSV ", &info); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - - noconj = lsame_(trans, "T"); - nounit = lsame_(diag, "N"); - -/* - Set up the start point in X if the increment is not unity. This - will be ( N - 1 )*INCX too small for descending loops. -*/ - - if (*incx <= 0) { - kx = 1 - (*n - 1) * *incx; - } else if (*incx != 1) { - kx = 1; - } - -/* - Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. -*/ - - if (lsame_(trans, "N")) { - -/* Form x := inv( A )*x. */ - - if (lsame_(uplo, "U")) { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - i__1 = j; - if (x[i__1].r != 0. || x[i__1].i != 0.) { - if (nounit) { - i__1 = j; - z_div(&z__1, &x[j], &a[j + j * a_dim1]); - x[i__1].r = z__1.r, x[i__1].i = z__1.i; - } - i__1 = j; - temp.r = x[i__1].r, temp.i = x[i__1].i; - for (i__ = j - 1; i__ >= 1; --i__) { - i__1 = i__; - i__2 = i__; - i__3 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i, - z__2.i = temp.r * a[i__3].i + temp.i * a[ - i__3].r; - z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i - - z__2.i; - x[i__1].r = z__1.r, x[i__1].i = z__1.i; -/* L10: */ - } - } -/* L20: */ - } - } else { - jx = kx + (*n - 1) * *incx; - for (j = *n; j >= 1; --j) { - i__1 = jx; - if (x[i__1].r != 0. || x[i__1].i != 0.) { - if (nounit) { - i__1 = jx; - z_div(&z__1, &x[jx], &a[j + j * a_dim1]); - x[i__1].r = z__1.r, x[i__1].i = z__1.i; - } - i__1 = jx; - temp.r = x[i__1].r, temp.i = x[i__1].i; - ix = jx; - for (i__ = j - 1; i__ >= 1; --i__) { - ix -= *incx; - i__1 = ix; - i__2 = ix; - i__3 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i, - z__2.i = temp.r * a[i__3].i + temp.i * a[ - i__3].r; - z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i - - z__2.i; - x[i__1].r = z__1.r, x[i__1].i = z__1.i; -/* L30: */ - } - } - jx -= *incx; -/* L40: */ - } - } - } else { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - if (x[i__2].r != 0. || x[i__2].i != 0.) { - if (nounit) { - i__2 = j; - z_div(&z__1, &x[j], &a[j + j * a_dim1]); - x[i__2].r = z__1.r, x[i__2].i = z__1.i; - } - i__2 = j; - temp.r = x[i__2].r, temp.i = x[i__2].i; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__; - i__4 = i__; - i__5 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__2.i = temp.r * a[i__5].i + temp.i * a[ - i__5].r; - z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i - - z__2.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; -/* L50: */ - } - } -/* L60: */ - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jx; - if (x[i__2].r != 0. || x[i__2].i != 0.) { - if (nounit) { - i__2 = jx; - z_div(&z__1, &x[jx], &a[j + j * a_dim1]); - x[i__2].r = z__1.r, x[i__2].i = z__1.i; - } - i__2 = jx; - temp.r = x[i__2].r, temp.i = x[i__2].i; - ix = jx; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - i__3 = ix; - i__4 = ix; - i__5 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__2.i = temp.r * a[i__5].i + temp.i * a[ - i__5].r; - z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i - - z__2.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; -/* L70: */ - } - } - jx += *incx; -/* L80: */ - } - } - } - } else { - -/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */ - - if (lsame_(uplo, "U")) { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - temp.r = x[i__2].r, temp.i = x[i__2].i; - if (noconj) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__; - z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[ - i__4].i, z__2.i = a[i__3].r * x[i__4].i + - a[i__3].i * x[i__4].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L90: */ - } - if (nounit) { - z_div(&z__1, &temp, &a[j + j * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = i__; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, - z__2.i = z__3.r * x[i__3].i + z__3.i * x[ - i__3].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L100: */ - } - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z_div(&z__1, &temp, &z__2); - temp.r = z__1.r, temp.i = z__1.i; - } - } - i__2 = j; - x[i__2].r = temp.r, x[i__2].i = temp.i; -/* L110: */ - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - ix = kx; - i__2 = jx; - temp.r = x[i__2].r, temp.i = x[i__2].i; - if (noconj) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = ix; - z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[ - i__4].i, z__2.i = a[i__3].r * x[i__4].i + - a[i__3].i * x[i__4].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - ix += *incx; -/* L120: */ - } - if (nounit) { - z_div(&z__1, &temp, &a[j + j * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = ix; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, - z__2.i = z__3.r * x[i__3].i + z__3.i * x[ - i__3].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - ix += *incx; -/* L130: */ - } - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z_div(&z__1, &temp, &z__2); - temp.r = z__1.r, temp.i = z__1.i; - } - } - i__2 = jx; - x[i__2].r = temp.r, x[i__2].i = temp.i; - jx += *incx; -/* L140: */ - } - } - } else { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - i__1 = j; - temp.r = x[i__1].r, temp.i = x[i__1].i; - if (noconj) { - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - i__2 = i__ + j * a_dim1; - i__3 = i__; - z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[ - i__3].i, z__2.i = a[i__2].r * x[i__3].i + - a[i__2].i * x[i__3].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L150: */ - } - if (nounit) { - z_div(&z__1, &temp, &a[j + j * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__2 = i__; - z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i, - z__2.i = z__3.r * x[i__2].i + z__3.i * x[ - i__2].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; -/* L160: */ - } - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z_div(&z__1, &temp, &z__2); - temp.r = z__1.r, temp.i = z__1.i; - } - } - i__1 = j; - x[i__1].r = temp.r, x[i__1].i = temp.i; -/* L170: */ - } - } else { - kx += (*n - 1) * *incx; - jx = kx; - for (j = *n; j >= 1; --j) { - ix = kx; - i__1 = jx; - temp.r = x[i__1].r, temp.i = x[i__1].i; - if (noconj) { - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - i__2 = i__ + j * a_dim1; - i__3 = ix; - z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[ - i__3].i, z__2.i = a[i__2].r * x[i__3].i + - a[i__2].i * x[i__3].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - ix -= *incx; -/* L180: */ - } - if (nounit) { - z_div(&z__1, &temp, &a[j + j * a_dim1]); - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__2 = ix; - z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i, - z__2.i = z__3.r * x[i__2].i + z__3.i * x[ - i__2].r; - z__1.r = temp.r - z__2.r, z__1.i = temp.i - - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - ix -= *incx; -/* L190: */ - } - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z_div(&z__1, &temp, &z__2); - temp.r = z__1.r, temp.i = z__1.i; - } - } - i__1 = jx; - x[i__1].r = temp.r, x[i__1].i = temp.i; - jx -= *incx; -/* L200: */ - } - } - } - } - - return 0; - -/* End of ZTRSV . */ - -} /* ztrsv_ */ - diff --git a/numpy-1.6.2/numpy/linalg/dlamch.c b/numpy-1.6.2/numpy/linalg/dlamch.c deleted file mode 100644 index bf1dfdb059..0000000000 --- a/numpy-1.6.2/numpy/linalg/dlamch.c +++ /dev/null @@ -1,951 +0,0 @@ -#include -#include "f2c.h" - -/* If config.h is available, we only need dlamc3 */ -#ifndef HAVE_CONFIG -doublereal dlamch_(char *cmach) -{ -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAMCH determines double precision machine parameters. - - Arguments - ========= - - CMACH (input) CHARACTER*1 - Specifies the value to be returned by DLAMCH: - = 'E' or 'e', DLAMCH := eps - = 'S' or 's , DLAMCH := sfmin - = 'B' or 'b', DLAMCH := base - = 'P' or 'p', DLAMCH := eps*base - = 'N' or 'n', DLAMCH := t - = 'R' or 'r', DLAMCH := rnd - = 'M' or 'm', DLAMCH := emin - = 'U' or 'u', DLAMCH := rmin - = 'L' or 'l', DLAMCH := emax - = 'O' or 'o', DLAMCH := rmax - - where - - eps = relative machine precision - sfmin = safe minimum, such that 1/sfmin does not overflow - base = base of the machine - prec = eps*base - t = number of (base) digits in the mantissa - rnd = 1.0 when rounding occurs in addition, 0.0 otherwise - emin = minimum exponent before (gradual) underflow - rmin = underflow threshold - base**(emin-1) - emax = largest exponent before overflow - rmax = overflow threshold - (base**emax)*(1-eps) - - ===================================================================== -*/ -/* >>Start of File<< - Initialized data */ - static logical first = TRUE_; - /* System generated locals */ - integer i__1; - doublereal ret_val; - /* Builtin functions */ - double pow_di(doublereal *, integer *); - /* Local variables */ - static doublereal base; - static integer beta; - static doublereal emin, prec, emax; - static integer imin, imax; - static logical lrnd; - static doublereal rmin, rmax, t, rmach; - extern logical lsame_(char *, char *); - static doublereal small, sfmin; - extern /* Subroutine */ int dlamc2_(integer *, integer *, logical *, - doublereal *, integer *, doublereal *, integer *, doublereal *); - static integer it; - static doublereal rnd, eps; - - - - if (first) { - first = FALSE_; - dlamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax); - base = (doublereal) beta; - t = (doublereal) it; - if (lrnd) { - rnd = 1.; - i__1 = 1 - it; - eps = pow_di(&base, &i__1) / 2; - } else { - rnd = 0.; - i__1 = 1 - it; - eps = pow_di(&base, &i__1); - } - prec = eps * base; - emin = (doublereal) imin; - emax = (doublereal) imax; - sfmin = rmin; - small = 1. / rmax; - if (small >= sfmin) { - -/* Use SMALL plus a bit, to avoid the possibility of rou -nding - causing overflow when computing 1/sfmin. */ - - sfmin = small * (eps + 1.); - } - } - - if (lsame_(cmach, "E")) { - rmach = eps; - } else if (lsame_(cmach, "S")) { - rmach = sfmin; - } else if (lsame_(cmach, "B")) { - rmach = base; - } else if (lsame_(cmach, "P")) { - rmach = prec; - } else if (lsame_(cmach, "N")) { - rmach = t; - } else if (lsame_(cmach, "R")) { - rmach = rnd; - } else if (lsame_(cmach, "M")) { - rmach = emin; - } else if (lsame_(cmach, "U")) { - rmach = rmin; - } else if (lsame_(cmach, "L")) { - rmach = emax; - } else if (lsame_(cmach, "O")) { - rmach = rmax; - } - - ret_val = rmach; - return ret_val; - -/* End of DLAMCH */ - -} /* dlamch_ */ - - -/* Subroutine */ int dlamc1_(integer *beta, integer *t, logical *rnd, logical - *ieee1) -{ -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAMC1 determines the machine parameters given by BETA, T, RND, and - IEEE1. - - Arguments - ========= - - BETA (output) INTEGER - The base of the machine. - - T (output) INTEGER - The number of ( BETA ) digits in the mantissa. - - RND (output) LOGICAL - Specifies whether proper rounding ( RND = .TRUE. ) or - chopping ( RND = .FALSE. ) occurs in addition. This may not - - be a reliable guide to the way in which the machine performs - - its arithmetic. - - IEEE1 (output) LOGICAL - Specifies whether rounding appears to be done in the IEEE - 'round to nearest' style. - - Further Details - =============== - - The routine is based on the routine ENVRON by Malcolm and - incorporates suggestions by Gentleman and Marovich. See - - Malcolm M. A. (1972) Algorithms to reveal properties of - floating-point arithmetic. Comms. of the ACM, 15, 949-951. - - Gentleman W. M. and Marovich S. B. (1974) More on algorithms - that reveal properties of floating point arithmetic units. - Comms. of the ACM, 17, 276-277. - - ===================================================================== -*/ - /* Initialized data */ - static logical first = TRUE_; - /* System generated locals */ - doublereal d__1, d__2; - /* Local variables */ - static logical lrnd; - static doublereal a, b, c, f; - static integer lbeta; - static doublereal savec; - extern doublereal dlamc3_(doublereal *, doublereal *); - static logical lieee1; - static doublereal t1, t2; - static integer lt; - static doublereal one, qtr; - - - - if (first) { - first = FALSE_; - one = 1.; - -/* LBETA, LIEEE1, LT and LRND are the local values of BE -TA, - IEEE1, T and RND. - - Throughout this routine we use the function DLAMC3 to ens -ure - that relevant values are stored and not held in registers, - or - are not affected by optimizers. - - Compute a = 2.0**m with the smallest positive integer m s -uch - that - - fl( a + 1.0 ) = a. */ - - a = 1.; - c = 1.; - -/* + WHILE( C.EQ.ONE )LOOP */ -L10: - if (c == one) { - a *= 2; - c = dlamc3_(&a, &one); - d__1 = -a; - c = dlamc3_(&c, &d__1); - goto L10; - } -/* + END WHILE - - Now compute b = 2.0**m with the smallest positive integer -m - such that - - fl( a + b ) .gt. a. */ - - b = 1.; - c = dlamc3_(&a, &b); - -/* + WHILE( C.EQ.A )LOOP */ -L20: - if (c == a) { - b *= 2; - c = dlamc3_(&a, &b); - goto L20; - } -/* + END WHILE - - Now compute the base. a and c are neighbouring floating po -int - numbers in the interval ( beta**t, beta**( t + 1 ) ) and - so - their difference is beta. Adding 0.25 to c is to ensure that - it - is truncated to beta and not ( beta - 1 ). */ - - qtr = one / 4; - savec = c; - d__1 = -a; - c = dlamc3_(&c, &d__1); - lbeta = (integer) (c + qtr); - -/* Now determine whether rounding or chopping occurs, by addin -g a - bit less than beta/2 and a bit more than beta/2 to - a. */ - - b = (doublereal) lbeta; - d__1 = b / 2; - d__2 = -b / 100; - f = dlamc3_(&d__1, &d__2); - c = dlamc3_(&f, &a); - if (c == a) { - lrnd = TRUE_; - } else { - lrnd = FALSE_; - } - d__1 = b / 2; - d__2 = b / 100; - f = dlamc3_(&d__1, &d__2); - c = dlamc3_(&f, &a); - if (lrnd && c == a) { - lrnd = FALSE_; - } - -/* Try and decide whether rounding is done in the IEEE 'round - to - nearest' style. B/2 is half a unit in the last place of the -two - numbers A and SAVEC. Furthermore, A is even, i.e. has last -bit - zero, and SAVEC is odd. Thus adding B/2 to A should not cha -nge - A, but adding B/2 to SAVEC should change SAVEC. */ - - d__1 = b / 2; - t1 = dlamc3_(&d__1, &a); - d__1 = b / 2; - t2 = dlamc3_(&d__1, &savec); - lieee1 = t1 == a && t2 > savec && lrnd; - -/* Now find the mantissa, t. It should be the integer part - of - log to the base beta of a, however it is safer to determine - t - by powering. So we find t as the smallest positive integer -for - which - - fl( beta**t + 1.0 ) = 1.0. */ - - lt = 0; - a = 1.; - c = 1.; - -/* + WHILE( C.EQ.ONE )LOOP */ -L30: - if (c == one) { - ++lt; - a *= lbeta; - c = dlamc3_(&a, &one); - d__1 = -a; - c = dlamc3_(&c, &d__1); - goto L30; - } -/* + END WHILE */ - - } - - *beta = lbeta; - *t = lt; - *rnd = lrnd; - *ieee1 = lieee1; - return 0; - -/* End of DLAMC1 */ - -} /* dlamc1_ */ - - -/* Subroutine */ int dlamc2_(integer *beta, integer *t, logical *rnd, - doublereal *eps, integer *emin, doublereal *rmin, integer *emax, - doublereal *rmax) -{ -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAMC2 determines the machine parameters specified in its argument - list. - - Arguments - ========= - - BETA (output) INTEGER - The base of the machine. - - T (output) INTEGER - The number of ( BETA ) digits in the mantissa. - - RND (output) LOGICAL - Specifies whether proper rounding ( RND = .TRUE. ) or - chopping ( RND = .FALSE. ) occurs in addition. This may not - - be a reliable guide to the way in which the machine performs - - its arithmetic. - - EPS (output) DOUBLE PRECISION - The smallest positive number such that - - fl( 1.0 - EPS ) .LT. 1.0, - - where fl denotes the computed value. - - EMIN (output) INTEGER - The minimum exponent before (gradual) underflow occurs. - - RMIN (output) DOUBLE PRECISION - The smallest normalized number for the machine, given by - BASE**( EMIN - 1 ), where BASE is the floating point value - - of BETA. - - EMAX (output) INTEGER - The maximum exponent before overflow occurs. - - RMAX (output) DOUBLE PRECISION - The largest positive number for the machine, given by - BASE**EMAX * ( 1 - EPS ), where BASE is the floating point - - value of BETA. - - Further Details - =============== - - The computation of EPS is based on a routine PARANOIA by - W. Kahan of the University of California at Berkeley. - - ===================================================================== -*/ - - /* Initialized data */ - static logical first = TRUE_; - static logical iwarn = FALSE_; - /* System generated locals */ - integer i__1; - doublereal d__1, d__2, d__3, d__4, d__5; - /* Builtin functions */ - double pow_di(doublereal *, integer *); - /* Local variables */ - static logical ieee; - static doublereal half; - static logical lrnd; - static doublereal leps, zero, a, b, c; - static integer i, lbeta; - static doublereal rbase; - static integer lemin, lemax, gnmin; - static doublereal small; - static integer gpmin; - static doublereal third, lrmin, lrmax, sixth; - extern /* Subroutine */ int dlamc1_(integer *, integer *, logical *, - logical *); - extern doublereal dlamc3_(doublereal *, doublereal *); - static logical lieee1; - extern /* Subroutine */ int dlamc4_(integer *, doublereal *, integer *), - dlamc5_(integer *, integer *, integer *, logical *, integer *, - doublereal *); - static integer lt, ngnmin, ngpmin; - static doublereal one, two; - - - - if (first) { - first = FALSE_; - zero = 0.; - one = 1.; - two = 2.; - -/* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values - of - BETA, T, RND, EPS, EMIN and RMIN. - - Throughout this routine we use the function DLAMC3 to ens -ure - that relevant values are stored and not held in registers, - or - are not affected by optimizers. - - DLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. -*/ - - dlamc1_(&lbeta, <, &lrnd, &lieee1); - -/* Start to find EPS. */ - - b = (doublereal) lbeta; - i__1 = -lt; - a = pow_di(&b, &i__1); - leps = a; - -/* Try some tricks to see whether or not this is the correct E -PS. */ - - b = two / 3; - half = one / 2; - d__1 = -half; - sixth = dlamc3_(&b, &d__1); - third = dlamc3_(&sixth, &sixth); - d__1 = -half; - b = dlamc3_(&third, &d__1); - b = dlamc3_(&b, &sixth); - b = abs(b); - if (b < leps) { - b = leps; - } - - leps = 1.; - -/* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */ -L10: - if (leps > b && b > zero) { - leps = b; - d__1 = half * leps; -/* Computing 5th power */ - d__3 = two, d__4 = d__3, d__3 *= d__3; -/* Computing 2nd power */ - d__5 = leps; - d__2 = d__4 * (d__3 * d__3) * (d__5 * d__5); - c = dlamc3_(&d__1, &d__2); - d__1 = -c; - c = dlamc3_(&half, &d__1); - b = dlamc3_(&half, &c); - d__1 = -b; - c = dlamc3_(&half, &d__1); - b = dlamc3_(&half, &c); - goto L10; - } -/* + END WHILE */ - - if (a < leps) { - leps = a; - } - -/* Computation of EPS complete. - - Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3 -)). - Keep dividing A by BETA until (gradual) underflow occurs. T -his - is detected when we cannot recover the previous A. */ - - rbase = one / lbeta; - small = one; - for (i = 1; i <= 3; ++i) { - d__1 = small * rbase; - small = dlamc3_(&d__1, &zero); -/* L20: */ - } - a = dlamc3_(&one, &small); - dlamc4_(&ngpmin, &one, &lbeta); - d__1 = -one; - dlamc4_(&ngnmin, &d__1, &lbeta); - dlamc4_(&gpmin, &a, &lbeta); - d__1 = -a; - dlamc4_(&gnmin, &d__1, &lbeta); - ieee = FALSE_; - - if (ngpmin == ngnmin && gpmin == gnmin) { - if (ngpmin == gpmin) { - lemin = ngpmin; -/* ( Non twos-complement machines, no gradual under -flow; - e.g., VAX ) */ - } else if (gpmin - ngpmin == 3) { - lemin = ngpmin - 1 + lt; - ieee = TRUE_; -/* ( Non twos-complement machines, with gradual und -erflow; - e.g., IEEE standard followers ) */ - } else { - lemin = min(ngpmin,gpmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else if (ngpmin == gpmin && ngnmin == gnmin) { - if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) { - lemin = max(ngpmin,ngnmin); -/* ( Twos-complement machines, no gradual underflow -; - e.g., CYBER 205 ) */ - } else { - lemin = min(ngpmin,ngnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin) - { - if (gpmin - min(ngpmin,ngnmin) == 3) { - lemin = max(ngpmin,ngnmin) - 1 + lt; -/* ( Twos-complement machines with gradual underflo -w; - no known machine ) */ - } else { - lemin = min(ngpmin,ngnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else { -/* Computing MIN */ - i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin); - lemin = min(i__1,gnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } -/* ** - Comment out this if block if EMIN is ok */ - if (iwarn) { - first = TRUE_; - printf("\n\n WARNING. The value EMIN may be incorrect:- "); - printf("EMIN = %8i\n",lemin); - printf("If, after inspection, the value EMIN looks acceptable"); - printf("please comment out \n the IF block as marked within the"); - printf("code of routine DLAMC2, \n otherwise supply EMIN"); - printf("explicitly.\n"); - } -/* ** - - Assume IEEE arithmetic if we found denormalised numbers abo -ve, - or if arithmetic seems to round in the IEEE style, determi -ned - in routine DLAMC1. A true IEEE machine should have both thi -ngs - true; however, faulty machines may have one or the other. */ - - ieee = ieee || lieee1; - -/* Compute RMIN by successive division by BETA. We could comp -ute - RMIN as BASE**( EMIN - 1 ), but some machines underflow dur -ing - this computation. */ - - lrmin = 1.; - i__1 = 1 - lemin; - for (i = 1; i <= 1-lemin; ++i) { - d__1 = lrmin * rbase; - lrmin = dlamc3_(&d__1, &zero); -/* L30: */ - } - -/* Finally, call DLAMC5 to compute EMAX and RMAX. */ - - dlamc5_(&lbeta, <, &lemin, &ieee, &lemax, &lrmax); - } - - *beta = lbeta; - *t = lt; - *rnd = lrnd; - *eps = leps; - *emin = lemin; - *rmin = lrmin; - *emax = lemax; - *rmax = lrmax; - - return 0; - - -/* End of DLAMC2 */ - -} /* dlamc2_ */ -#endif - - -doublereal dlamc3_(doublereal *a, doublereal *b) -{ -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAMC3 is intended to force A and B to be stored prior to doing - - the addition of A and B , for use in situations where optimizers - - might hold one of these in a register. - - Arguments - ========= - - A, B (input) DOUBLE PRECISION - The values A and B. - - ===================================================================== -*/ -/* >>Start of File<< - System generated locals */ - volatile doublereal ret_val; - - - - ret_val = *a + *b; - - return ret_val; - -/* End of DLAMC3 */ - -} /* dlamc3_ */ - - -#ifndef HAVE_CONFIG -/* Subroutine */ int dlamc4_(integer *emin, doublereal *start, integer *base) -{ -/* -- LAPACK auxiliary routine (version 2.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAMC4 is a service routine for DLAMC2. - - Arguments - ========= - - EMIN (output) EMIN - The minimum exponent before (gradual) underflow, computed by - - setting A = START and dividing by BASE until the previous A - can not be recovered. - - START (input) DOUBLE PRECISION - The starting point for determining EMIN. - - BASE (input) INTEGER - The base of the machine. - - ===================================================================== -*/ - /* System generated locals */ - integer i__1; - doublereal d__1; - /* Local variables */ - static doublereal zero, a; - static integer i; - static doublereal rbase, b1, b2, c1, c2, d1, d2; - extern doublereal dlamc3_(doublereal *, doublereal *); - static doublereal one; - - - - a = *start; - one = 1.; - rbase = one / *base; - zero = 0.; - *emin = 1; - d__1 = a * rbase; - b1 = dlamc3_(&d__1, &zero); - c1 = a; - c2 = a; - d1 = a; - d2 = a; -/* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. - $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */ -L10: - if (c1 == a && c2 == a && d1 == a && d2 == a) { - --(*emin); - a = b1; - d__1 = a / *base; - b1 = dlamc3_(&d__1, &zero); - d__1 = b1 * *base; - c1 = dlamc3_(&d__1, &zero); - d1 = zero; - i__1 = *base; - for (i = 1; i <= *base; ++i) { - d1 += b1; -/* L20: */ - } - d__1 = a * rbase; - b2 = dlamc3_(&d__1, &zero); - d__1 = b2 / rbase; - c2 = dlamc3_(&d__1, &zero); - d2 = zero; - i__1 = *base; - for (i = 1; i <= *base; ++i) { - d2 += b2; -/* L30: */ - } - goto L10; - } -/* + END WHILE */ - - return 0; - -/* End of DLAMC4 */ - -} /* dlamc4_ */ - - -/* Subroutine */ int dlamc5_(integer *beta, integer *p, integer *emin, - logical *ieee, integer *emax, doublereal *rmax) -{ -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAMC5 attempts to compute RMAX, the largest machine floating-point - number, without overflow. It assumes that EMAX + abs(EMIN) sum - approximately to a power of 2. It will fail on machines where this - assumption does not hold, for example, the Cyber 205 (EMIN = -28625, - - EMAX = 28718). It will also fail if the value supplied for EMIN is - too large (i.e. too close to zero), probably with overflow. - - Arguments - ========= - - BETA (input) INTEGER - The base of floating-point arithmetic. - - P (input) INTEGER - The number of base BETA digits in the mantissa of a - floating-point value. - - EMIN (input) INTEGER - The minimum exponent before (gradual) underflow. - - IEEE (input) LOGICAL - A logical flag specifying whether or not the arithmetic - system is thought to comply with the IEEE standard. - - EMAX (output) INTEGER - The largest exponent before overflow - - RMAX (output) DOUBLE PRECISION - The largest machine floating-point number. - - ===================================================================== - - - - First compute LEXP and UEXP, two powers of 2 that bound - abs(EMIN). We then assume that EMAX + abs(EMIN) will sum - approximately to the bound that is closest to abs(EMIN). - (EMAX is the exponent of the required number RMAX). */ - /* Table of constant values */ - static doublereal c_b5 = 0.; - - /* System generated locals */ - integer i__1; - doublereal d__1; - /* Local variables */ - static integer lexp; - static doublereal oldy; - static integer uexp, i; - static doublereal y, z; - static integer nbits; - extern doublereal dlamc3_(doublereal *, doublereal *); - static doublereal recbas; - static integer exbits, expsum, try__; - - - - lexp = 1; - exbits = 1; -L10: - try__ = lexp << 1; - if (try__ <= -(*emin)) { - lexp = try__; - ++exbits; - goto L10; - } - if (lexp == -(*emin)) { - uexp = lexp; - } else { - uexp = try__; - ++exbits; - } - -/* Now -LEXP is less than or equal to EMIN, and -UEXP is greater - than or equal to EMIN. EXBITS is the number of bits needed to - store the exponent. */ - - if (uexp + *emin > -lexp - *emin) { - expsum = lexp << 1; - } else { - expsum = uexp << 1; - } - -/* EXPSUM is the exponent range, approximately equal to - EMAX - EMIN + 1 . */ - - *emax = expsum + *emin - 1; - nbits = exbits + 1 + *p; - -/* NBITS is the total number of bits needed to store a - floating-point number. */ - - if (nbits % 2 == 1 && *beta == 2) { - -/* Either there are an odd number of bits used to store a - floating-point number, which is unlikely, or some bits are - - not used in the representation of numbers, which is possible -, - (e.g. Cray machines) or the mantissa has an implicit bit, - (e.g. IEEE machines, Dec Vax machines), which is perhaps the - - most likely. We have to assume the last alternative. - If this is true, then we need to reduce EMAX by one because - - there must be some way of representing zero in an implicit-b -it - system. On machines like Cray, we are reducing EMAX by one - - unnecessarily. */ - - --(*emax); - } - - if (*ieee) { - -/* Assume we are on an IEEE machine which reserves one exponent - - for infinity and NaN. */ - - --(*emax); - } - -/* Now create RMAX, the largest machine number, which should - be equal to (1.0 - BETA**(-P)) * BETA**EMAX . - - First compute 1.0 - BETA**(-P), being careful that the - result is less than 1.0 . */ - - recbas = 1. / *beta; - z = *beta - 1.; - y = 0.; - i__1 = *p; - for (i = 1; i <= *p; ++i) { - z *= recbas; - if (y < 1.) { - oldy = y; - } - y = dlamc3_(&y, &z); -/* L20: */ - } - if (y >= 1.) { - y = oldy; - } - -/* Now multiply by BETA**EMAX to get RMAX. */ - - i__1 = *emax; - for (i = 1; i <= *emax; ++i) { - d__1 = y * *beta; - y = dlamc3_(&d__1, &c_b5); -/* L30: */ - } - - *rmax = y; - return 0; - -/* End of DLAMC5 */ - -} /* dlamc5_ */ -#endif diff --git a/numpy-1.6.2/numpy/linalg/dlapack_lite.c b/numpy-1.6.2/numpy/linalg/dlapack_lite.c deleted file mode 100644 index d2c1d8129b..0000000000 --- a/numpy-1.6.2/numpy/linalg/dlapack_lite.c +++ /dev/null @@ -1,36008 +0,0 @@ -#define MAXITERLOOPS 100 - -/* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ -#include "f2c.h" - -#ifdef HAVE_CONFIG -#include "config.h" -#else -extern doublereal dlamch_(char *); -#define EPSILON dlamch_("Epsilon") -#define SAFEMINIMUM dlamch_("Safe minimum") -#define PRECISION dlamch_("Precision") -#define BASE dlamch_("Base") -#endif - -extern doublereal dlapy2_(doublereal *x, doublereal *y); - - - -/* Table of constant values */ - -static integer c__9 = 9; -static integer c__0 = 0; -static doublereal c_b15 = 1.; -static integer c__1 = 1; -static doublereal c_b29 = 0.; -static doublereal c_b94 = -.125; -static doublereal c_b151 = -1.; -static integer c_n1 = -1; -static integer c__3 = 3; -static integer c__2 = 2; -static integer c__8 = 8; -static integer c__4 = 4; -static integer c__65 = 65; -static integer c__6 = 6; -static integer c__15 = 15; -static logical c_false = FALSE_; -static integer c__10 = 10; -static integer c__11 = 11; -static doublereal c_b2804 = 2.; -static logical c_true = TRUE_; -static real c_b3825 = 0.f; -static real c_b3826 = 1.f; - -/* Subroutine */ int dbdsdc_(char *uplo, char *compq, integer *n, doublereal * - d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, - integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer * - iwork, integer *info) -{ - /* System generated locals */ - integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; - doublereal d__1; - - /* Builtin functions */ - double d_sign(doublereal *, doublereal *), log(doublereal); - - /* Local variables */ - static integer i__, j, k; - static doublereal p, r__; - static integer z__, ic, ii, kk; - static doublereal cs; - static integer is, iu; - static doublereal sn; - static integer nm1; - static doublereal eps; - static integer ivt, difl, difr, ierr, perm, mlvl, sqre; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer * - , doublereal *, integer *), dswap_(integer *, doublereal *, - integer *, doublereal *, integer *); - static integer poles, iuplo, nsize, start; - extern /* Subroutine */ int dlasd0_(integer *, integer *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - integer *, integer *, doublereal *, integer *); - - extern /* Subroutine */ int dlasda_(integer *, integer *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *, integer *, integer *, integer *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *, - integer *), dlascl_(char *, integer *, integer *, doublereal *, - doublereal *, integer *, integer *, doublereal *, integer *, - integer *), dlasdq_(char *, integer *, integer *, integer - *, integer *, integer *, doublereal *, doublereal *, doublereal *, - integer *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *), dlaset_(char *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int xerbla_(char *, integer *); - static integer givcol; - extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); - static integer icompq; - static doublereal orgnrm; - static integer givnum, givptr, qstart, smlsiz, wstart, smlszp; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - December 1, 1999 - - - Purpose - ======= - - DBDSDC computes the singular value decomposition (SVD) of a real - N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT, - using a divide and conquer method, where S is a diagonal matrix - with non-negative diagonal elements (the singular values of B), and - U and VT are orthogonal matrices of left and right singular vectors, - respectively. DBDSDC can be used to compute all singular values, - and optionally, singular vectors or singular vectors in compact form. - - This code makes very mild assumptions about floating point - arithmetic. It will work on machines with a guard digit in - add/subtract, or on those binary machines without guard digits - which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. - It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. See DLASD3 for details. - - The code currently call DLASDQ if singular values only are desired. - However, it can be slightly modified to compute singular values - using the divide and conquer method. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - = 'U': B is upper bidiagonal. - = 'L': B is lower bidiagonal. - - COMPQ (input) CHARACTER*1 - Specifies whether singular vectors are to be computed - as follows: - = 'N': Compute singular values only; - = 'P': Compute singular values and compute singular - vectors in compact form; - = 'I': Compute singular values and singular vectors. - - N (input) INTEGER - The order of the matrix B. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the n diagonal elements of the bidiagonal matrix B. - On exit, if INFO=0, the singular values of B. - - E (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the elements of E contain the offdiagonal - elements of the bidiagonal matrix whose SVD is desired. - On exit, E has been destroyed. - - U (output) DOUBLE PRECISION array, dimension (LDU,N) - If COMPQ = 'I', then: - On exit, if INFO = 0, U contains the left singular vectors - of the bidiagonal matrix. - For other values of COMPQ, U is not referenced. - - LDU (input) INTEGER - The leading dimension of the array U. LDU >= 1. - If singular vectors are desired, then LDU >= max( 1, N ). - - VT (output) DOUBLE PRECISION array, dimension (LDVT,N) - If COMPQ = 'I', then: - On exit, if INFO = 0, VT' contains the right singular - vectors of the bidiagonal matrix. - For other values of COMPQ, VT is not referenced. - - LDVT (input) INTEGER - The leading dimension of the array VT. LDVT >= 1. - If singular vectors are desired, then LDVT >= max( 1, N ). - - Q (output) DOUBLE PRECISION array, dimension (LDQ) - If COMPQ = 'P', then: - On exit, if INFO = 0, Q and IQ contain the left - and right singular vectors in a compact form, - requiring O(N log N) space instead of 2*N**2. - In particular, Q contains all the DOUBLE PRECISION data in - LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1)))) - words of memory, where SMLSIZ is returned by ILAENV and - is equal to the maximum size of the subproblems at the - bottom of the computation tree (usually about 25). - For other values of COMPQ, Q is not referenced. - - IQ (output) INTEGER array, dimension (LDIQ) - If COMPQ = 'P', then: - On exit, if INFO = 0, Q and IQ contain the left - and right singular vectors in a compact form, - requiring O(N log N) space instead of 2*N**2. - In particular, IQ contains all INTEGER data in - LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1)))) - words of memory, where SMLSIZ is returned by ILAENV and - is equal to the maximum size of the subproblems at the - bottom of the computation tree (usually about 25). - For other values of COMPQ, IQ is not referenced. - - WORK (workspace) DOUBLE PRECISION array, dimension (LWORK) - If COMPQ = 'N' then LWORK >= (4 * N). - If COMPQ = 'P' then LWORK >= (6 * N). - If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N). - - IWORK (workspace) INTEGER array, dimension (8*N) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: The algorithm failed to compute an singular value. - The update process of divide and conquer failed. - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - --q; - --iq; - --work; - --iwork; - - /* Function Body */ - *info = 0; - - iuplo = 0; - if (lsame_(uplo, "U")) { - iuplo = 1; - } - if (lsame_(uplo, "L")) { - iuplo = 2; - } - if (lsame_(compq, "N")) { - icompq = 0; - } else if (lsame_(compq, "P")) { - icompq = 1; - } else if (lsame_(compq, "I")) { - icompq = 2; - } else { - icompq = -1; - } - if (iuplo == 0) { - *info = -1; - } else if (icompq < 0) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*ldu < 1 || (icompq == 2 && *ldu < *n)) { - *info = -7; - } else if (*ldvt < 1 || (icompq == 2 && *ldvt < *n)) { - *info = -9; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DBDSDC", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - smlsiz = ilaenv_(&c__9, "DBDSDC", " ", &c__0, &c__0, &c__0, &c__0, ( - ftnlen)6, (ftnlen)1); - if (*n == 1) { - if (icompq == 1) { - q[1] = d_sign(&c_b15, &d__[1]); - q[smlsiz * *n + 1] = 1.; - } else if (icompq == 2) { - u[u_dim1 + 1] = d_sign(&c_b15, &d__[1]); - vt[vt_dim1 + 1] = 1.; - } - d__[1] = abs(d__[1]); - return 0; - } - nm1 = *n - 1; - -/* - If matrix lower bidiagonal, rotate to be upper bidiagonal - by applying Givens rotations on the left -*/ - - wstart = 1; - qstart = 3; - if (icompq == 1) { - dcopy_(n, &d__[1], &c__1, &q[1], &c__1); - i__1 = *n - 1; - dcopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1); - } - if (iuplo == 2) { - qstart = 5; - wstart = ((*n) << (1)) - 1; - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); - d__[i__] = r__; - e[i__] = sn * d__[i__ + 1]; - d__[i__ + 1] = cs * d__[i__ + 1]; - if (icompq == 1) { - q[i__ + ((*n) << (1))] = cs; - q[i__ + *n * 3] = sn; - } else if (icompq == 2) { - work[i__] = cs; - work[nm1 + i__] = -sn; - } -/* L10: */ - } - } - -/* If ICOMPQ = 0, use DLASDQ to compute the singular values. */ - - if (icompq == 0) { - dlasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[ - vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[ - wstart], info); - goto L40; - } - -/* - If N is smaller than the minimum divide size SMLSIZ, then solve - the problem with another solver. -*/ - - if (*n <= smlsiz) { - if (icompq == 2) { - dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu); - dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt); - dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset] - , ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[ - wstart], info); - } else if (icompq == 1) { - iu = 1; - ivt = iu + *n; - dlaset_("A", n, n, &c_b29, &c_b15, &q[iu + (qstart - 1) * *n], n); - dlaset_("A", n, n, &c_b29, &c_b15, &q[ivt + (qstart - 1) * *n], n); - dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + ( - qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[ - iu + (qstart - 1) * *n], n, &work[wstart], info); - } - goto L40; - } - - if (icompq == 2) { - dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu); - dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt); - } - -/* Scale. */ - - orgnrm = dlanst_("M", n, &d__[1], &e[1]); - if (orgnrm == 0.) { - return 0; - } - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, &ierr); - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, & - ierr); - - eps = EPSILON; - - mlvl = (integer) (log((doublereal) (*n) / (doublereal) (smlsiz + 1)) / - log(2.)) + 1; - smlszp = smlsiz + 1; - - if (icompq == 1) { - iu = 1; - ivt = smlsiz + 1; - difl = ivt + smlszp; - difr = difl + mlvl; - z__ = difr + ((mlvl) << (1)); - ic = z__ + mlvl; - is = ic + 1; - poles = is + 1; - givnum = poles + ((mlvl) << (1)); - - k = 1; - givptr = 2; - perm = 3; - givcol = perm + mlvl; - } - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = d__[i__], abs(d__1)) < eps) { - d__[i__] = d_sign(&eps, &d__[i__]); - } -/* L20: */ - } - - start = 1; - sqre = 0; - - i__1 = nm1; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) { - -/* - Subproblem found. First determine its size and then - apply divide and conquer on it. -*/ - - if (i__ < nm1) { - -/* A subproblem with E(I) small for I < NM1. */ - - nsize = i__ - start + 1; - } else if ((d__1 = e[i__], abs(d__1)) >= eps) { - -/* A subproblem with E(NM1) not too small but I = NM1. */ - - nsize = *n - start + 1; - } else { - -/* - A subproblem with E(NM1) small. This implies an - 1-by-1 subproblem at D(N). Solve this 1-by-1 problem - first. -*/ - - nsize = i__ - start + 1; - if (icompq == 2) { - u[*n + *n * u_dim1] = d_sign(&c_b15, &d__[*n]); - vt[*n + *n * vt_dim1] = 1.; - } else if (icompq == 1) { - q[*n + (qstart - 1) * *n] = d_sign(&c_b15, &d__[*n]); - q[*n + (smlsiz + qstart - 1) * *n] = 1.; - } - d__[*n] = (d__1 = d__[*n], abs(d__1)); - } - if (icompq == 2) { - dlasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start + - start * u_dim1], ldu, &vt[start + start * vt_dim1], - ldvt, &smlsiz, &iwork[1], &work[wstart], info); - } else { - dlasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[ - start], &q[start + (iu + qstart - 2) * *n], n, &q[ - start + (ivt + qstart - 2) * *n], &iq[start + k * *n], - &q[start + (difl + qstart - 2) * *n], &q[start + ( - difr + qstart - 2) * *n], &q[start + (z__ + qstart - - 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[ - start + givptr * *n], &iq[start + givcol * *n], n, & - iq[start + perm * *n], &q[start + (givnum + qstart - - 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[ - start + (is + qstart - 2) * *n], &work[wstart], & - iwork[1], info); - if (*info != 0) { - return 0; - } - } - start = i__ + 1; - } -/* L30: */ - } - -/* Unscale */ - - dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, &ierr); -L40: - -/* Use Selection Sort to minimize swaps of singular vectors */ - - i__1 = *n; - for (ii = 2; ii <= i__1; ++ii) { - i__ = ii - 1; - kk = i__; - p = d__[i__]; - i__2 = *n; - for (j = ii; j <= i__2; ++j) { - if (d__[j] > p) { - kk = j; - p = d__[j]; - } -/* L50: */ - } - if (kk != i__) { - d__[kk] = d__[i__]; - d__[i__] = p; - if (icompq == 1) { - iq[i__] = kk; - } else if (icompq == 2) { - dswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], & - c__1); - dswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt); - } - } else if (icompq == 1) { - iq[i__] = i__; - } -/* L60: */ - } - -/* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */ - - if (icompq == 1) { - if (iuplo == 1) { - iq[*n] = 1; - } else { - iq[*n] = 0; - } - } - -/* - If B is lower bidiagonal, update U by those Givens rotations - which rotated B to be upper bidiagonal -*/ - - if ((iuplo == 2 && icompq == 2)) { - dlasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu); - } - - return 0; - -/* End of DBDSDC */ - -} /* dbdsdc_ */ - -/* Subroutine */ int dbdsqr_(char *uplo, integer *n, integer *ncvt, integer * - nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt, - integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer * - ldc, doublereal *work, integer *info) -{ - /* System generated locals */ - integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, - i__2; - doublereal d__1, d__2, d__3, d__4; - - /* Builtin functions */ - double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign( - doublereal *, doublereal *); - - /* Local variables */ - static doublereal f, g, h__; - static integer i__, j, m; - static doublereal r__, cs; - static integer ll; - static doublereal sn, mu; - static integer nm1, nm12, nm13, lll; - static doublereal eps, sll, tol, abse; - static integer idir; - static doublereal abss; - static integer oldm; - static doublereal cosl; - static integer isub, iter; - static doublereal unfl, sinl, cosr, smin, smax, sinr; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *), dlas2_( - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *), dscal_(integer *, doublereal *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - static doublereal oldcs; - extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *); - static integer oldll; - static doublereal shift, sigmn, oldsn; - extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer maxit; - static doublereal sminl, sigmx; - static logical lower; - extern /* Subroutine */ int dlasq1_(integer *, doublereal *, doublereal *, - doublereal *, integer *), dlasv2_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *); - - extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *), xerbla_(char *, - integer *); - static doublereal sminoa, thresh; - static logical rotate; - static doublereal sminlo, tolmul; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DBDSQR computes the singular value decomposition (SVD) of a real - N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' - denotes the transpose of P), where S is a diagonal matrix with - non-negative diagonal elements (the singular values of B), and Q - and P are orthogonal matrices. - - The routine computes S, and optionally computes U * Q, P' * VT, - or Q' * C, for given real input matrices U, VT, and C. - - See "Computing Small Singular Values of Bidiagonal Matrices With - Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, - LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, - no. 5, pp. 873-912, Sept 1990) and - "Accurate singular values and differential qd algorithms," by - B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics - Department, University of California at Berkeley, July 1992 - for a detailed description of the algorithm. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - = 'U': B is upper bidiagonal; - = 'L': B is lower bidiagonal. - - N (input) INTEGER - The order of the matrix B. N >= 0. - - NCVT (input) INTEGER - The number of columns of the matrix VT. NCVT >= 0. - - NRU (input) INTEGER - The number of rows of the matrix U. NRU >= 0. - - NCC (input) INTEGER - The number of columns of the matrix C. NCC >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the n diagonal elements of the bidiagonal matrix B. - On exit, if INFO=0, the singular values of B in decreasing - order. - - E (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the elements of E contain the - offdiagonal elements of the bidiagonal matrix whose SVD - is desired. On normal exit (INFO = 0), E is destroyed. - If the algorithm does not converge (INFO > 0), D and E - will contain the diagonal and superdiagonal elements of a - bidiagonal matrix orthogonally equivalent to the one given - as input. E(N) is used for workspace. - - VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) - On entry, an N-by-NCVT matrix VT. - On exit, VT is overwritten by P' * VT. - VT is not referenced if NCVT = 0. - - LDVT (input) INTEGER - The leading dimension of the array VT. - LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. - - U (input/output) DOUBLE PRECISION array, dimension (LDU, N) - On entry, an NRU-by-N matrix U. - On exit, U is overwritten by U * Q. - U is not referenced if NRU = 0. - - LDU (input) INTEGER - The leading dimension of the array U. LDU >= max(1,NRU). - - C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) - On entry, an N-by-NCC matrix C. - On exit, C is overwritten by Q' * C. - C is not referenced if NCC = 0. - - LDC (input) INTEGER - The leading dimension of the array C. - LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. - - WORK (workspace) DOUBLE PRECISION array, dimension (4*N) - - INFO (output) INTEGER - = 0: successful exit - < 0: If INFO = -i, the i-th argument had an illegal value - > 0: the algorithm did not converge; D and E contain the - elements of a bidiagonal matrix which is orthogonally - similar to the input matrix B; if INFO = i, i - elements of E have not converged to zero. - - Internal Parameters - =================== - - TOLMUL DOUBLE PRECISION, default = max(10,min(100,EPS**(-1/8))) - TOLMUL controls the convergence criterion of the QR loop. - If it is positive, TOLMUL*EPS is the desired relative - precision in the computed singular values. - If it is negative, abs(TOLMUL*EPS*sigma_max) is the - desired absolute accuracy in the computed singular - values (corresponds to relative accuracy - abs(TOLMUL*EPS) in the largest singular value. - abs(TOLMUL) should be between 1 and 1/EPS, and preferably - between 10 (for fast convergence) and .1/EPS - (for there to be some accuracy in the results). - Default is to lose at either one eighth or 2 of the - available decimal digits in each computed singular value - (whichever is smaller). - - MAXITR INTEGER, default = 6 - MAXITR controls the maximum number of passes of the - algorithm through its inner loop. The algorithms stops - (and so fails to converge) if the number of passes - through the inner loop exceeds MAXITR*N**2. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - lower = lsame_(uplo, "L"); - if ((! lsame_(uplo, "U") && ! lower)) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*ncvt < 0) { - *info = -3; - } else if (*nru < 0) { - *info = -4; - } else if (*ncc < 0) { - *info = -5; - } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n))) - { - *info = -9; - } else if (*ldu < max(1,*nru)) { - *info = -11; - } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) { - *info = -13; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DBDSQR", &i__1); - return 0; - } - if (*n == 0) { - return 0; - } - if (*n == 1) { - goto L160; - } - -/* ROTATE is true if any singular vectors desired, false otherwise */ - - rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; - -/* If no singular vectors desired, use qd algorithm */ - - if (! rotate) { - dlasq1_(n, &d__[1], &e[1], &work[1], info); - return 0; - } - - nm1 = *n - 1; - nm12 = nm1 + nm1; - nm13 = nm12 + nm1; - idir = 0; - -/* Get machine constants */ - - eps = EPSILON; - unfl = SAFEMINIMUM; - -/* - If matrix lower bidiagonal, rotate to be upper bidiagonal - by applying Givens rotations on the left -*/ - - if (lower) { - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); - d__[i__] = r__; - e[i__] = sn * d__[i__ + 1]; - d__[i__ + 1] = cs * d__[i__ + 1]; - work[i__] = cs; - work[nm1 + i__] = sn; -/* L10: */ - } - -/* Update singular vectors if desired */ - - if (*nru > 0) { - dlasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset], - ldu); - } - if (*ncc > 0) { - dlasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset], - ldc); - } - } - -/* - Compute singular values to relative accuracy TOL - (By setting TOL to be negative, algorithm will compute - singular values to absolute accuracy ABS(TOL)*norm(input matrix)) - - Computing MAX - Computing MIN -*/ - d__3 = 100., d__4 = pow_dd(&eps, &c_b94); - d__1 = 10., d__2 = min(d__3,d__4); - tolmul = max(d__1,d__2); - tol = tolmul * eps; - -/* Compute approximate maximum, minimum singular values */ - - smax = 0.; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__2 = smax, d__3 = (d__1 = d__[i__], abs(d__1)); - smax = max(d__2,d__3); -/* L20: */ - } - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__2 = smax, d__3 = (d__1 = e[i__], abs(d__1)); - smax = max(d__2,d__3); -/* L30: */ - } - sminl = 0.; - if (tol >= 0.) { - -/* Relative accuracy desired */ - - sminoa = abs(d__[1]); - if (sminoa == 0.) { - goto L50; - } - mu = sminoa; - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - mu = (d__2 = d__[i__], abs(d__2)) * (mu / (mu + (d__1 = e[i__ - 1] - , abs(d__1)))); - sminoa = min(sminoa,mu); - if (sminoa == 0.) { - goto L50; - } -/* L40: */ - } -L50: - sminoa /= sqrt((doublereal) (*n)); -/* Computing MAX */ - d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl; - thresh = max(d__1,d__2); - } else { - -/* - Absolute accuracy desired - - Computing MAX -*/ - d__1 = abs(tol) * smax, d__2 = *n * 6 * *n * unfl; - thresh = max(d__1,d__2); - } - -/* - Prepare for main iteration loop for the singular values - (MAXIT is the maximum number of passes through the inner - loop permitted before nonconvergence signalled.) -*/ - - maxit = *n * 6 * *n; - iter = 0; - oldll = -1; - oldm = -1; - -/* M points to last element of unconverged part of matrix */ - - m = *n; - -/* Begin main iteration loop */ - -L60: - -/* Check for convergence or exceeding iteration count */ - - if (m <= 1) { - goto L160; - } - if (iter > maxit) { - goto L200; - } - -/* Find diagonal block of matrix to work on */ - - if ((tol < 0. && (d__1 = d__[m], abs(d__1)) <= thresh)) { - d__[m] = 0.; - } - smax = (d__1 = d__[m], abs(d__1)); - smin = smax; - i__1 = m - 1; - for (lll = 1; lll <= i__1; ++lll) { - ll = m - lll; - abss = (d__1 = d__[ll], abs(d__1)); - abse = (d__1 = e[ll], abs(d__1)); - if ((tol < 0. && abss <= thresh)) { - d__[ll] = 0.; - } - if (abse <= thresh) { - goto L80; - } - smin = min(smin,abss); -/* Computing MAX */ - d__1 = max(smax,abss); - smax = max(d__1,abse); -/* L70: */ - } - ll = 0; - goto L90; -L80: - e[ll] = 0.; - -/* Matrix splits since E(LL) = 0 */ - - if (ll == m - 1) { - -/* Convergence of bottom singular value, return to top of loop */ - - --m; - goto L60; - } -L90: - ++ll; - -/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */ - - if (ll == m - 1) { - -/* 2 by 2 block, handle separately */ - - dlasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr, - &sinl, &cosl); - d__[m - 1] = sigmx; - e[m - 1] = 0.; - d__[m] = sigmn; - -/* Compute singular vectors, if desired */ - - if (*ncvt > 0) { - drot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, & - cosr, &sinr); - } - if (*nru > 0) { - drot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], & - c__1, &cosl, &sinl); - } - if (*ncc > 0) { - drot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, & - cosl, &sinl); - } - m += -2; - goto L60; - } - -/* - If working on new submatrix, choose shift direction - (from larger end diagonal element towards smaller) -*/ - - if (ll > oldm || m < oldll) { - if ((d__1 = d__[ll], abs(d__1)) >= (d__2 = d__[m], abs(d__2))) { - -/* Chase bulge from top (big end) to bottom (small end) */ - - idir = 1; - } else { - -/* Chase bulge from bottom (big end) to top (small end) */ - - idir = 2; - } - } - -/* Apply convergence tests */ - - if (idir == 1) { - -/* - Run convergence test in forward direction - First apply standard test to bottom of matrix -*/ - - if ((d__2 = e[m - 1], abs(d__2)) <= abs(tol) * (d__1 = d__[m], abs( - d__1)) || (tol < 0. && (d__3 = e[m - 1], abs(d__3)) <= thresh) - ) { - e[m - 1] = 0.; - goto L60; - } - - if (tol >= 0.) { - -/* - If relative accuracy desired, - apply convergence criterion forward -*/ - - mu = (d__1 = d__[ll], abs(d__1)); - sminl = mu; - i__1 = m - 1; - for (lll = ll; lll <= i__1; ++lll) { - if ((d__1 = e[lll], abs(d__1)) <= tol * mu) { - e[lll] = 0.; - goto L60; - } - sminlo = sminl; - mu = (d__2 = d__[lll + 1], abs(d__2)) * (mu / (mu + (d__1 = e[ - lll], abs(d__1)))); - sminl = min(sminl,mu); -/* L100: */ - } - } - - } else { - -/* - Run convergence test in backward direction - First apply standard test to top of matrix -*/ - - if ((d__2 = e[ll], abs(d__2)) <= abs(tol) * (d__1 = d__[ll], abs(d__1) - ) || (tol < 0. && (d__3 = e[ll], abs(d__3)) <= thresh)) { - e[ll] = 0.; - goto L60; - } - - if (tol >= 0.) { - -/* - If relative accuracy desired, - apply convergence criterion backward -*/ - - mu = (d__1 = d__[m], abs(d__1)); - sminl = mu; - i__1 = ll; - for (lll = m - 1; lll >= i__1; --lll) { - if ((d__1 = e[lll], abs(d__1)) <= tol * mu) { - e[lll] = 0.; - goto L60; - } - sminlo = sminl; - mu = (d__2 = d__[lll], abs(d__2)) * (mu / (mu + (d__1 = e[lll] - , abs(d__1)))); - sminl = min(sminl,mu); -/* L110: */ - } - } - } - oldll = ll; - oldm = m; - -/* - Compute shift. First, test if shifting would ruin relative - accuracy, and if so set the shift to zero. - - Computing MAX -*/ - d__1 = eps, d__2 = tol * .01; - if ((tol >= 0. && *n * tol * (sminl / smax) <= max(d__1,d__2))) { - -/* Use a zero shift to avoid loss of relative accuracy */ - - shift = 0.; - } else { - -/* Compute the shift from 2-by-2 block at end of matrix */ - - if (idir == 1) { - sll = (d__1 = d__[ll], abs(d__1)); - dlas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__); - } else { - sll = (d__1 = d__[m], abs(d__1)); - dlas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__); - } - -/* Test if shift negligible, and if so set to zero */ - - if (sll > 0.) { -/* Computing 2nd power */ - d__1 = shift / sll; - if (d__1 * d__1 < eps) { - shift = 0.; - } - } - } - -/* Increment iteration count */ - - iter = iter + m - ll; - -/* If SHIFT = 0, do simplified QR iteration */ - - if (shift == 0.) { - if (idir == 1) { - -/* - Chase bulge from top to bottom - Save cosines and sines for later singular vector updates -*/ - - cs = 1.; - oldcs = 1.; - i__1 = m - 1; - for (i__ = ll; i__ <= i__1; ++i__) { - d__1 = d__[i__] * cs; - dlartg_(&d__1, &e[i__], &cs, &sn, &r__); - if (i__ > ll) { - e[i__ - 1] = oldsn * r__; - } - d__1 = oldcs * r__; - d__2 = d__[i__ + 1] * sn; - dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]); - work[i__ - ll + 1] = cs; - work[i__ - ll + 1 + nm1] = sn; - work[i__ - ll + 1 + nm12] = oldcs; - work[i__ - ll + 1 + nm13] = oldsn; -/* L120: */ - } - h__ = d__[m] * cs; - d__[m] = h__ * oldcs; - e[m - 1] = h__ * oldsn; - -/* Update singular vectors */ - - if (*ncvt > 0) { - i__1 = m - ll + 1; - dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[ - ll + vt_dim1], ldvt); - } - if (*nru > 0) { - i__1 = m - ll + 1; - dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 - + 1], &u[ll * u_dim1 + 1], ldu); - } - if (*ncc > 0) { - i__1 = m - ll + 1; - dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 - + 1], &c__[ll + c_dim1], ldc); - } - -/* Test convergence */ - - if ((d__1 = e[m - 1], abs(d__1)) <= thresh) { - e[m - 1] = 0.; - } - - } else { - -/* - Chase bulge from bottom to top - Save cosines and sines for later singular vector updates -*/ - - cs = 1.; - oldcs = 1.; - i__1 = ll + 1; - for (i__ = m; i__ >= i__1; --i__) { - d__1 = d__[i__] * cs; - dlartg_(&d__1, &e[i__ - 1], &cs, &sn, &r__); - if (i__ < m) { - e[i__] = oldsn * r__; - } - d__1 = oldcs * r__; - d__2 = d__[i__ - 1] * sn; - dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]); - work[i__ - ll] = cs; - work[i__ - ll + nm1] = -sn; - work[i__ - ll + nm12] = oldcs; - work[i__ - ll + nm13] = -oldsn; -/* L130: */ - } - h__ = d__[ll] * cs; - d__[ll] = h__ * oldcs; - e[ll] = h__ * oldsn; - -/* Update singular vectors */ - - if (*ncvt > 0) { - i__1 = m - ll + 1; - dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[ - nm13 + 1], &vt[ll + vt_dim1], ldvt); - } - if (*nru > 0) { - i__1 = m - ll + 1; - dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll * - u_dim1 + 1], ldu); - } - if (*ncc > 0) { - i__1 = m - ll + 1; - dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[ - ll + c_dim1], ldc); - } - -/* Test convergence */ - - if ((d__1 = e[ll], abs(d__1)) <= thresh) { - e[ll] = 0.; - } - } - } else { - -/* Use nonzero shift */ - - if (idir == 1) { - -/* - Chase bulge from top to bottom - Save cosines and sines for later singular vector updates -*/ - - f = ((d__1 = d__[ll], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[ - ll]) + shift / d__[ll]); - g = e[ll]; - i__1 = m - 1; - for (i__ = ll; i__ <= i__1; ++i__) { - dlartg_(&f, &g, &cosr, &sinr, &r__); - if (i__ > ll) { - e[i__ - 1] = r__; - } - f = cosr * d__[i__] + sinr * e[i__]; - e[i__] = cosr * e[i__] - sinr * d__[i__]; - g = sinr * d__[i__ + 1]; - d__[i__ + 1] = cosr * d__[i__ + 1]; - dlartg_(&f, &g, &cosl, &sinl, &r__); - d__[i__] = r__; - f = cosl * e[i__] + sinl * d__[i__ + 1]; - d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__]; - if (i__ < m - 1) { - g = sinl * e[i__ + 1]; - e[i__ + 1] = cosl * e[i__ + 1]; - } - work[i__ - ll + 1] = cosr; - work[i__ - ll + 1 + nm1] = sinr; - work[i__ - ll + 1 + nm12] = cosl; - work[i__ - ll + 1 + nm13] = sinl; -/* L140: */ - } - e[m - 1] = f; - -/* Update singular vectors */ - - if (*ncvt > 0) { - i__1 = m - ll + 1; - dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[ - ll + vt_dim1], ldvt); - } - if (*nru > 0) { - i__1 = m - ll + 1; - dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 - + 1], &u[ll * u_dim1 + 1], ldu); - } - if (*ncc > 0) { - i__1 = m - ll + 1; - dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 - + 1], &c__[ll + c_dim1], ldc); - } - -/* Test convergence */ - - if ((d__1 = e[m - 1], abs(d__1)) <= thresh) { - e[m - 1] = 0.; - } - - } else { - -/* - Chase bulge from bottom to top - Save cosines and sines for later singular vector updates -*/ - - f = ((d__1 = d__[m], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[m] - ) + shift / d__[m]); - g = e[m - 1]; - i__1 = ll + 1; - for (i__ = m; i__ >= i__1; --i__) { - dlartg_(&f, &g, &cosr, &sinr, &r__); - if (i__ < m) { - e[i__] = r__; - } - f = cosr * d__[i__] + sinr * e[i__ - 1]; - e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__]; - g = sinr * d__[i__ - 1]; - d__[i__ - 1] = cosr * d__[i__ - 1]; - dlartg_(&f, &g, &cosl, &sinl, &r__); - d__[i__] = r__; - f = cosl * e[i__ - 1] + sinl * d__[i__ - 1]; - d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1]; - if (i__ > ll + 1) { - g = sinl * e[i__ - 2]; - e[i__ - 2] = cosl * e[i__ - 2]; - } - work[i__ - ll] = cosr; - work[i__ - ll + nm1] = -sinr; - work[i__ - ll + nm12] = cosl; - work[i__ - ll + nm13] = -sinl; -/* L150: */ - } - e[ll] = f; - -/* Test convergence */ - - if ((d__1 = e[ll], abs(d__1)) <= thresh) { - e[ll] = 0.; - } - -/* Update singular vectors if desired */ - - if (*ncvt > 0) { - i__1 = m - ll + 1; - dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[ - nm13 + 1], &vt[ll + vt_dim1], ldvt); - } - if (*nru > 0) { - i__1 = m - ll + 1; - dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll * - u_dim1 + 1], ldu); - } - if (*ncc > 0) { - i__1 = m - ll + 1; - dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[ - ll + c_dim1], ldc); - } - } - } - -/* QR iteration finished, go back and check convergence */ - - goto L60; - -/* All singular values converged, so make them positive */ - -L160: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - if (d__[i__] < 0.) { - d__[i__] = -d__[i__]; - -/* Change sign of singular vectors, if desired */ - - if (*ncvt > 0) { - dscal_(ncvt, &c_b151, &vt[i__ + vt_dim1], ldvt); - } - } -/* L170: */ - } - -/* - Sort the singular values into decreasing order (insertion sort on - singular values, but only one transposition per singular vector) -*/ - - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Scan for smallest D(I) */ - - isub = 1; - smin = d__[1]; - i__2 = *n + 1 - i__; - for (j = 2; j <= i__2; ++j) { - if (d__[j] <= smin) { - isub = j; - smin = d__[j]; - } -/* L180: */ - } - if (isub != *n + 1 - i__) { - -/* Swap singular values and vectors */ - - d__[isub] = d__[*n + 1 - i__]; - d__[*n + 1 - i__] = smin; - if (*ncvt > 0) { - dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ + - vt_dim1], ldvt); - } - if (*nru > 0) { - dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) * - u_dim1 + 1], &c__1); - } - if (*ncc > 0) { - dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ + - c_dim1], ldc); - } - } -/* L190: */ - } - goto L220; - -/* Maximum number of iterations exceeded, failure to converge */ - -L200: - *info = 0; - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - if (e[i__] != 0.) { - ++(*info); - } -/* L210: */ - } -L220: - return 0; - -/* End of DBDSQR */ - -} /* dbdsqr_ */ - -/* Subroutine */ int dgebak_(char *job, char *side, integer *n, integer *ilo, - integer *ihi, doublereal *scale, integer *m, doublereal *v, integer * - ldv, integer *info) -{ - /* System generated locals */ - integer v_dim1, v_offset, i__1; - - /* Local variables */ - static integer i__, k; - static doublereal s; - static integer ii; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, - doublereal *, integer *); - static logical leftv; - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical rightv; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - DGEBAK forms the right or left eigenvectors of a real general matrix - by backward transformation on the computed eigenvectors of the - balanced matrix output by DGEBAL. - - Arguments - ========= - - JOB (input) CHARACTER*1 - Specifies the type of backward transformation required: - = 'N', do nothing, return immediately; - = 'P', do backward transformation for permutation only; - = 'S', do backward transformation for scaling only; - = 'B', do backward transformations for both permutation and - scaling. - JOB must be the same as the argument JOB supplied to DGEBAL. - - SIDE (input) CHARACTER*1 - = 'R': V contains right eigenvectors; - = 'L': V contains left eigenvectors. - - N (input) INTEGER - The number of rows of the matrix V. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - The integers ILO and IHI determined by DGEBAL. - 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. - - SCALE (input) DOUBLE PRECISION array, dimension (N) - Details of the permutation and scaling factors, as returned - by DGEBAL. - - M (input) INTEGER - The number of columns of the matrix V. M >= 0. - - V (input/output) DOUBLE PRECISION array, dimension (LDV,M) - On entry, the matrix of right or left eigenvectors to be - transformed, as returned by DHSEIN or DTREVC. - On exit, V is overwritten by the transformed eigenvectors. - - LDV (input) INTEGER - The leading dimension of the array V. LDV >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - ===================================================================== - - - Decode and Test the input parameters -*/ - - /* Parameter adjustments */ - --scale; - v_dim1 = *ldv; - v_offset = 1 + v_dim1 * 1; - v -= v_offset; - - /* Function Body */ - rightv = lsame_(side, "R"); - leftv = lsame_(side, "L"); - - *info = 0; - if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S")) - && ! lsame_(job, "B"))) { - *info = -1; - } else if ((! rightv && ! leftv)) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -4; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -5; - } else if (*m < 0) { - *info = -7; - } else if (*ldv < max(1,*n)) { - *info = -9; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGEBAK", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - if (*m == 0) { - return 0; - } - if (lsame_(job, "N")) { - return 0; - } - - if (*ilo == *ihi) { - goto L30; - } - -/* Backward balance */ - - if (lsame_(job, "S") || lsame_(job, "B")) { - - if (rightv) { - i__1 = *ihi; - for (i__ = *ilo; i__ <= i__1; ++i__) { - s = scale[i__]; - dscal_(m, &s, &v[i__ + v_dim1], ldv); -/* L10: */ - } - } - - if (leftv) { - i__1 = *ihi; - for (i__ = *ilo; i__ <= i__1; ++i__) { - s = 1. / scale[i__]; - dscal_(m, &s, &v[i__ + v_dim1], ldv); -/* L20: */ - } - } - - } - -/* - Backward permutation - - For I = ILO-1 step -1 until 1, - IHI+1 step 1 until N do -- -*/ - -L30: - if (lsame_(job, "P") || lsame_(job, "B")) { - if (rightv) { - i__1 = *n; - for (ii = 1; ii <= i__1; ++ii) { - i__ = ii; - if ((i__ >= *ilo && i__ <= *ihi)) { - goto L40; - } - if (i__ < *ilo) { - i__ = *ilo - ii; - } - k = (integer) scale[i__]; - if (k == i__) { - goto L40; - } - dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); -L40: - ; - } - } - - if (leftv) { - i__1 = *n; - for (ii = 1; ii <= i__1; ++ii) { - i__ = ii; - if ((i__ >= *ilo && i__ <= *ihi)) { - goto L50; - } - if (i__ < *ilo) { - i__ = *ilo - ii; - } - k = (integer) scale[i__]; - if (k == i__) { - goto L50; - } - dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); -L50: - ; - } - } - } - - return 0; - -/* End of DGEBAK */ - -} /* dgebak_ */ - -/* Subroutine */ int dgebal_(char *job, integer *n, doublereal *a, integer * - lda, integer *ilo, integer *ihi, doublereal *scale, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - doublereal d__1, d__2; - - /* Local variables */ - static doublereal c__, f, g; - static integer i__, j, k, l, m; - static doublereal r__, s, ca, ra; - static integer ica, ira, iexc; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, - doublereal *, integer *); - static doublereal sfmin1, sfmin2, sfmax1, sfmax2; - - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical noconv; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DGEBAL balances a general real matrix A. This involves, first, - permuting A by a similarity transformation to isolate eigenvalues - in the first 1 to ILO-1 and last IHI+1 to N elements on the - diagonal; and second, applying a diagonal similarity transformation - to rows and columns ILO to IHI to make the rows and columns as - close in norm as possible. Both steps are optional. - - Balancing may reduce the 1-norm of the matrix, and improve the - accuracy of the computed eigenvalues and/or eigenvectors. - - Arguments - ========= - - JOB (input) CHARACTER*1 - Specifies the operations to be performed on A: - = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0 - for i = 1,...,N; - = 'P': permute only; - = 'S': scale only; - = 'B': both permute and scale. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the input matrix A. - On exit, A is overwritten by the balanced matrix. - If JOB = 'N', A is not referenced. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - ILO (output) INTEGER - IHI (output) INTEGER - ILO and IHI are set to integers such that on exit - A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N. - If JOB = 'N' or 'S', ILO = 1 and IHI = N. - - SCALE (output) DOUBLE PRECISION array, dimension (N) - Details of the permutations and scaling factors applied to - A. If P(j) is the index of the row and column interchanged - with row and column j and D(j) is the scaling factor - applied to row and column j, then - SCALE(j) = P(j) for j = 1,...,ILO-1 - = D(j) for j = ILO,...,IHI - = P(j) for j = IHI+1,...,N. - The order in which the interchanges are made is N to IHI+1, - then 1 to ILO-1. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The permutations consist of row and column interchanges which put - the matrix in the form - - ( T1 X Y ) - P A P = ( 0 B Z ) - ( 0 0 T2 ) - - where T1 and T2 are upper triangular matrices whose eigenvalues lie - along the diagonal. The column indices ILO and IHI mark the starting - and ending columns of the submatrix B. Balancing consists of applying - a diagonal similarity transformation inv(D) * B * D to make the - 1-norms of each row of B and its corresponding column nearly equal. - The output matrix is - - ( T1 X*D Y ) - ( 0 inv(D)*B*D inv(D)*Z ). - ( 0 0 T2 ) - - Information about the permutations P and the diagonal matrix D is - returned in the vector SCALE. - - This subroutine is based on the EISPACK routine BALANC. - - Modified by Tzu-Yi Chen, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --scale; - - /* Function Body */ - *info = 0; - if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S")) - && ! lsame_(job, "B"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGEBAL", &i__1); - return 0; - } - - k = 1; - l = *n; - - if (*n == 0) { - goto L210; - } - - if (lsame_(job, "N")) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - scale[i__] = 1.; -/* L10: */ - } - goto L210; - } - - if (lsame_(job, "S")) { - goto L120; - } - -/* Permutation to isolate eigenvalues if possible */ - - goto L50; - -/* Row and column exchange. */ - -L20: - scale[m] = (doublereal) j; - if (j == m) { - goto L30; - } - - dswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1); - i__1 = *n - k + 1; - dswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda); - -L30: - switch (iexc) { - case 1: goto L40; - case 2: goto L80; - } - -/* Search for rows isolating an eigenvalue and push them down. */ - -L40: - if (l == 1) { - goto L210; - } - --l; - -L50: - for (j = l; j >= 1; --j) { - - i__1 = l; - for (i__ = 1; i__ <= i__1; ++i__) { - if (i__ == j) { - goto L60; - } - if (a[j + i__ * a_dim1] != 0.) { - goto L70; - } -L60: - ; - } - - m = l; - iexc = 1; - goto L20; -L70: - ; - } - - goto L90; - -/* Search for columns isolating an eigenvalue and push them left. */ - -L80: - ++k; - -L90: - i__1 = l; - for (j = k; j <= i__1; ++j) { - - i__2 = l; - for (i__ = k; i__ <= i__2; ++i__) { - if (i__ == j) { - goto L100; - } - if (a[i__ + j * a_dim1] != 0.) { - goto L110; - } -L100: - ; - } - - m = k; - iexc = 2; - goto L20; -L110: - ; - } - -L120: - i__1 = l; - for (i__ = k; i__ <= i__1; ++i__) { - scale[i__] = 1.; -/* L130: */ - } - - if (lsame_(job, "P")) { - goto L210; - } - -/* - Balance the submatrix in rows K to L. - - Iterative loop for norm reduction -*/ - - sfmin1 = SAFEMINIMUM / PRECISION; - sfmax1 = 1. / sfmin1; - sfmin2 = sfmin1 * 8.; - sfmax2 = 1. / sfmin2; -L140: - noconv = FALSE_; - - i__1 = l; - for (i__ = k; i__ <= i__1; ++i__) { - c__ = 0.; - r__ = 0.; - - i__2 = l; - for (j = k; j <= i__2; ++j) { - if (j == i__) { - goto L150; - } - c__ += (d__1 = a[j + i__ * a_dim1], abs(d__1)); - r__ += (d__1 = a[i__ + j * a_dim1], abs(d__1)); -L150: - ; - } - ica = idamax_(&l, &a[i__ * a_dim1 + 1], &c__1); - ca = (d__1 = a[ica + i__ * a_dim1], abs(d__1)); - i__2 = *n - k + 1; - ira = idamax_(&i__2, &a[i__ + k * a_dim1], lda); - ra = (d__1 = a[i__ + (ira + k - 1) * a_dim1], abs(d__1)); - -/* Guard against zero C or R due to underflow. */ - - if (c__ == 0. || r__ == 0.) { - goto L200; - } - g = r__ / 8.; - f = 1.; - s = c__ + r__; -L160: -/* Computing MAX */ - d__1 = max(f,c__); -/* Computing MIN */ - d__2 = min(r__,g); - if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) { - goto L170; - } - f *= 8.; - c__ *= 8.; - ca *= 8.; - r__ /= 8.; - g /= 8.; - ra /= 8.; - goto L160; - -L170: - g = c__ / 8.; -L180: -/* Computing MIN */ - d__1 = min(f,c__), d__1 = min(d__1,g); - if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) { - goto L190; - } - f /= 8.; - c__ /= 8.; - g /= 8.; - ca /= 8.; - r__ *= 8.; - ra *= 8.; - goto L180; - -/* Now balance. */ - -L190: - if (c__ + r__ >= s * .95) { - goto L200; - } - if ((f < 1. && scale[i__] < 1.)) { - if (f * scale[i__] <= sfmin1) { - goto L200; - } - } - if ((f > 1. && scale[i__] > 1.)) { - if (scale[i__] >= sfmax1 / f) { - goto L200; - } - } - g = 1. / f; - scale[i__] *= f; - noconv = TRUE_; - - i__2 = *n - k + 1; - dscal_(&i__2, &g, &a[i__ + k * a_dim1], lda); - dscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1); - -L200: - ; - } - - if (noconv) { - goto L140; - } - -L210: - *ilo = k; - *ihi = l; - - return 0; - -/* End of DGEBAL */ - -} /* dgebal_ */ - -/* Subroutine */ int dgebd2_(integer *m, integer *n, doublereal *a, integer * - lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal * - taup, doublereal *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__; - extern /* Subroutine */ int dlarf_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *), dlarfg_(integer *, doublereal *, - doublereal *, integer *, doublereal *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DGEBD2 reduces a real general m by n matrix A to upper or lower - bidiagonal form B by an orthogonal transformation: Q' * A * P = B. - - If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. - - Arguments - ========= - - M (input) INTEGER - The number of rows in the matrix A. M >= 0. - - N (input) INTEGER - The number of columns in the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the m by n general matrix to be reduced. - On exit, - if m >= n, the diagonal and the first superdiagonal are - overwritten with the upper bidiagonal matrix B; the - elements below the diagonal, with the array TAUQ, represent - the orthogonal matrix Q as a product of elementary - reflectors, and the elements above the first superdiagonal, - with the array TAUP, represent the orthogonal matrix P as - a product of elementary reflectors; - if m < n, the diagonal and the first subdiagonal are - overwritten with the lower bidiagonal matrix B; the - elements below the first subdiagonal, with the array TAUQ, - represent the orthogonal matrix Q as a product of - elementary reflectors, and the elements above the diagonal, - with the array TAUP, represent the orthogonal matrix P as - a product of elementary reflectors. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - D (output) DOUBLE PRECISION array, dimension (min(M,N)) - The diagonal elements of the bidiagonal matrix B: - D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) - The off-diagonal elements of the bidiagonal matrix B: - if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; - if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. - - TAUQ (output) DOUBLE PRECISION array dimension (min(M,N)) - The scalar factors of the elementary reflectors which - represent the orthogonal matrix Q. See Further Details. - - TAUP (output) DOUBLE PRECISION array, dimension (min(M,N)) - The scalar factors of the elementary reflectors which - represent the orthogonal matrix P. See Further Details. - - WORK (workspace) DOUBLE PRECISION array, dimension (max(M,N)) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The matrices Q and P are represented as products of elementary - reflectors: - - If m >= n, - - Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are real scalars, and v and u are real vectors; - v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); - u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); - tauq is stored in TAUQ(i) and taup in TAUP(i). - - If m < n, - - Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are real scalars, and v and u are real vectors; - v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); - u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); - tauq is stored in TAUQ(i) and taup in TAUP(i). - - The contents of A on exit are illustrated by the following examples: - - m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): - - ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) - ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) - ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) - ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) - ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) - ( v1 v2 v3 v4 v5 ) - - where d and e denote diagonal and off-diagonal elements of B, vi - denotes an element of the vector defining H(i), and ui an element of - the vector defining G(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tauq; - --taup; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info < 0) { - i__1 = -(*info); - xerbla_("DGEBD2", &i__1); - return 0; - } - - if (*m >= *n) { - -/* Reduce to upper bidiagonal form */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */ - - i__2 = *m - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * - a_dim1], &c__1, &tauq[i__]); - d__[i__] = a[i__ + i__ * a_dim1]; - a[i__ + i__ * a_dim1] = 1.; - -/* Apply H(i) to A(i:m,i+1:n) from the left */ - - i__2 = *m - i__ + 1; - i__3 = *n - i__; - dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tauq[ - i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); - a[i__ + i__ * a_dim1] = d__[i__]; - - if (i__ < *n) { - -/* - Generate elementary reflector G(i) to annihilate - A(i,i+2:n) -*/ - - i__2 = *n - i__; -/* Computing MIN */ - i__3 = i__ + 2; - dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min( - i__3,*n) * a_dim1], lda, &taup[i__]); - e[i__] = a[i__ + (i__ + 1) * a_dim1]; - a[i__ + (i__ + 1) * a_dim1] = 1.; - -/* Apply G(i) to A(i+1:m,i+1:n) from the right */ - - i__2 = *m - i__; - i__3 = *n - i__; - dlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1], - lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1], - lda, &work[1]); - a[i__ + (i__ + 1) * a_dim1] = e[i__]; - } else { - taup[i__] = 0.; - } -/* L10: */ - } - } else { - -/* Reduce to lower bidiagonal form */ - - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */ - - i__2 = *n - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * - a_dim1], lda, &taup[i__]); - d__[i__] = a[i__ + i__ * a_dim1]; - a[i__ + i__ * a_dim1] = 1.; - -/* Apply G(i) to A(i+1:m,i:n) from the right */ - - i__2 = *m - i__; - i__3 = *n - i__ + 1; -/* Computing MIN */ - i__4 = i__ + 1; - dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &taup[ - i__], &a[min(i__4,*m) + i__ * a_dim1], lda, &work[1]); - a[i__ + i__ * a_dim1] = d__[i__]; - - if (i__ < *m) { - -/* - Generate elementary reflector H(i) to annihilate - A(i+2:m,i) -*/ - - i__2 = *m - i__; -/* Computing MIN */ - i__3 = i__ + 2; - dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) + - i__ * a_dim1], &c__1, &tauq[i__]); - e[i__] = a[i__ + 1 + i__ * a_dim1]; - a[i__ + 1 + i__ * a_dim1] = 1.; - -/* Apply H(i) to A(i+1:m,i+1:n) from the left */ - - i__2 = *m - i__; - i__3 = *n - i__; - dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], & - c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1], - lda, &work[1]); - a[i__ + 1 + i__ * a_dim1] = e[i__]; - } else { - tauq[i__] = 0.; - } -/* L20: */ - } - } - return 0; - -/* End of DGEBD2 */ - -} /* dgebd2_ */ - -/* Subroutine */ int dgebrd_(integer *m, integer *n, doublereal *a, integer * - lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal * - taup, doublereal *work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, j, nb, nx; - static doublereal ws; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer nbmin, iinfo, minmn; - extern /* Subroutine */ int dgebd2_(integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *), dlabrd_(integer *, integer *, integer * - , doublereal *, integer *, doublereal *, doublereal *, doublereal - *, doublereal *, doublereal *, integer *, doublereal *, integer *) - , xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer ldwrkx, ldwrky, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DGEBRD reduces a general real M-by-N matrix A to upper or lower - bidiagonal form B by an orthogonal transformation: Q**T * A * P = B. - - If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. - - Arguments - ========= - - M (input) INTEGER - The number of rows in the matrix A. M >= 0. - - N (input) INTEGER - The number of columns in the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the M-by-N general matrix to be reduced. - On exit, - if m >= n, the diagonal and the first superdiagonal are - overwritten with the upper bidiagonal matrix B; the - elements below the diagonal, with the array TAUQ, represent - the orthogonal matrix Q as a product of elementary - reflectors, and the elements above the first superdiagonal, - with the array TAUP, represent the orthogonal matrix P as - a product of elementary reflectors; - if m < n, the diagonal and the first subdiagonal are - overwritten with the lower bidiagonal matrix B; the - elements below the first subdiagonal, with the array TAUQ, - represent the orthogonal matrix Q as a product of - elementary reflectors, and the elements above the diagonal, - with the array TAUP, represent the orthogonal matrix P as - a product of elementary reflectors. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - D (output) DOUBLE PRECISION array, dimension (min(M,N)) - The diagonal elements of the bidiagonal matrix B: - D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) - The off-diagonal elements of the bidiagonal matrix B: - if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; - if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. - - TAUQ (output) DOUBLE PRECISION array dimension (min(M,N)) - The scalar factors of the elementary reflectors which - represent the orthogonal matrix Q. See Further Details. - - TAUP (output) DOUBLE PRECISION array, dimension (min(M,N)) - The scalar factors of the elementary reflectors which - represent the orthogonal matrix P. See Further Details. - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The length of the array WORK. LWORK >= max(1,M,N). - For optimum performance LWORK >= (M+N)*NB, where NB - is the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The matrices Q and P are represented as products of elementary - reflectors: - - If m >= n, - - Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are real scalars, and v and u are real vectors; - v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); - u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); - tauq is stored in TAUQ(i) and taup in TAUP(i). - - If m < n, - - Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are real scalars, and v and u are real vectors; - v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); - u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); - tauq is stored in TAUQ(i) and taup in TAUP(i). - - The contents of A on exit are illustrated by the following examples: - - m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): - - ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) - ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) - ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) - ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) - ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) - ( v1 v2 v3 v4 v5 ) - - where d and e denote diagonal and off-diagonal elements of B, vi - denotes an element of the vector defining H(i), and ui an element of - the vector defining G(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tauq; - --taup; - --work; - - /* Function Body */ - *info = 0; -/* Computing MAX */ - i__1 = 1, i__2 = ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - nb = max(i__1,i__2); - lwkopt = (*m + *n) * nb; - work[1] = (doublereal) lwkopt; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } else /* if(complicated condition) */ { -/* Computing MAX */ - i__1 = max(1,*m); - if ((*lwork < max(i__1,*n) && ! lquery)) { - *info = -10; - } - } - if (*info < 0) { - i__1 = -(*info); - xerbla_("DGEBRD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - minmn = min(*m,*n); - if (minmn == 0) { - work[1] = 1.; - return 0; - } - - ws = (doublereal) max(*m,*n); - ldwrkx = *m; - ldwrky = *n; - - if ((nb > 1 && nb < minmn)) { - -/* - Set the crossover point NX. - - Computing MAX -*/ - i__1 = nb, i__2 = ilaenv_(&c__3, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - -/* Determine when to switch from blocked to unblocked code. */ - - if (nx < minmn) { - ws = (doublereal) ((*m + *n) * nb); - if ((doublereal) (*lwork) < ws) { - -/* - Not enough work space for the optimal NB, consider using - a smaller block size. -*/ - - nbmin = ilaenv_(&c__2, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - if (*lwork >= (*m + *n) * nbmin) { - nb = *lwork / (*m + *n); - } else { - nb = 1; - nx = minmn; - } - } - } - } else { - nx = minmn; - } - - i__1 = minmn - nx; - i__2 = nb; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - -/* - Reduce rows and columns i:i+nb-1 to bidiagonal form and return - the matrices X and Y which are needed to update the unreduced - part of the matrix -*/ - - i__3 = *m - i__ + 1; - i__4 = *n - i__ + 1; - dlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[ - i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx - * nb + 1], &ldwrky); - -/* - Update the trailing submatrix A(i+nb:m,i+nb:n), using an update - of the form A := A - V*Y' - X*U' -*/ - - i__3 = *m - i__ - nb + 1; - i__4 = *n - i__ - nb + 1; - dgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b151, &a[ - i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], & - ldwrky, &c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda); - i__3 = *m - i__ - nb + 1; - i__4 = *n - i__ - nb + 1; - dgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b151, & - work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, & - c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda); - -/* Copy diagonal and off-diagonal elements of B back into A */ - - if (*m >= *n) { - i__3 = i__ + nb - 1; - for (j = i__; j <= i__3; ++j) { - a[j + j * a_dim1] = d__[j]; - a[j + (j + 1) * a_dim1] = e[j]; -/* L10: */ - } - } else { - i__3 = i__ + nb - 1; - for (j = i__; j <= i__3; ++j) { - a[j + j * a_dim1] = d__[j]; - a[j + 1 + j * a_dim1] = e[j]; -/* L20: */ - } - } -/* L30: */ - } - -/* Use unblocked code to reduce the remainder of the matrix */ - - i__2 = *m - i__ + 1; - i__1 = *n - i__ + 1; - dgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], & - tauq[i__], &taup[i__], &work[1], &iinfo); - work[1] = ws; - return 0; - -/* End of DGEBRD */ - -} /* dgebrd_ */ - -/* Subroutine */ int dgeev_(char *jobvl, char *jobvr, integer *n, doublereal * - a, integer *lda, doublereal *wr, doublereal *wi, doublereal *vl, - integer *ldvl, doublereal *vr, integer *ldvr, doublereal *work, - integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, - i__2, i__3, i__4; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__, k; - static doublereal r__, cs, sn; - static integer ihi; - static doublereal scl; - static integer ilo; - static doublereal dum[1], eps; - static integer ibal; - static char side[1]; - static integer maxb; - static doublereal anrm; - static integer ierr, itau; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *); - static integer iwrk, nout; - extern doublereal dnrm2_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - extern doublereal dlapy2_(doublereal *, doublereal *); - extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_( - char *, char *, integer *, integer *, integer *, doublereal *, - integer *, doublereal *, integer *, integer *), - dgebal_(char *, integer *, doublereal *, integer *, integer *, - integer *, doublereal *, integer *); - static logical scalea; - - static doublereal cscale; - extern doublereal dlange_(char *, integer *, integer *, doublereal *, - integer *, doublereal *); - extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - integer *), dlascl_(char *, integer *, integer *, doublereal *, - doublereal *, integer *, integer *, doublereal *, integer *, - integer *); - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *), - dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *), xerbla_(char *, integer *); - static logical select[1]; - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static doublereal bignum; - extern /* Subroutine */ int dorghr_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - integer *), dhseqr_(char *, char *, integer *, integer *, integer - *, doublereal *, integer *, doublereal *, doublereal *, - doublereal *, integer *, doublereal *, integer *, integer *), dtrevc_(char *, char *, logical *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *, integer *, integer *, doublereal *, integer *); - static integer minwrk, maxwrk; - static logical wantvl; - static doublereal smlnum; - static integer hswork; - static logical lquery, wantvr; - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - December 8, 1999 - - - Purpose - ======= - - DGEEV computes for an N-by-N real nonsymmetric matrix A, the - eigenvalues and, optionally, the left and/or right eigenvectors. - - The right eigenvector v(j) of A satisfies - A * v(j) = lambda(j) * v(j) - where lambda(j) is its eigenvalue. - The left eigenvector u(j) of A satisfies - u(j)**H * A = lambda(j) * u(j)**H - where u(j)**H denotes the conjugate transpose of u(j). - - The computed eigenvectors are normalized to have Euclidean norm - equal to 1 and largest component real. - - Arguments - ========= - - JOBVL (input) CHARACTER*1 - = 'N': left eigenvectors of A are not computed; - = 'V': left eigenvectors of A are computed. - - JOBVR (input) CHARACTER*1 - = 'N': right eigenvectors of A are not computed; - = 'V': right eigenvectors of A are computed. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the N-by-N matrix A. - On exit, A has been overwritten. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - WR (output) DOUBLE PRECISION array, dimension (N) - WI (output) DOUBLE PRECISION array, dimension (N) - WR and WI contain the real and imaginary parts, - respectively, of the computed eigenvalues. Complex - conjugate pairs of eigenvalues appear consecutively - with the eigenvalue having the positive imaginary part - first. - - VL (output) DOUBLE PRECISION array, dimension (LDVL,N) - If JOBVL = 'V', the left eigenvectors u(j) are stored one - after another in the columns of VL, in the same order - as their eigenvalues. - If JOBVL = 'N', VL is not referenced. - If the j-th eigenvalue is real, then u(j) = VL(:,j), - the j-th column of VL. - If the j-th and (j+1)-st eigenvalues form a complex - conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and - u(j+1) = VL(:,j) - i*VL(:,j+1). - - LDVL (input) INTEGER - The leading dimension of the array VL. LDVL >= 1; if - JOBVL = 'V', LDVL >= N. - - VR (output) DOUBLE PRECISION array, dimension (LDVR,N) - If JOBVR = 'V', the right eigenvectors v(j) are stored one - after another in the columns of VR, in the same order - as their eigenvalues. - If JOBVR = 'N', VR is not referenced. - If the j-th eigenvalue is real, then v(j) = VR(:,j), - the j-th column of VR. - If the j-th and (j+1)-st eigenvalues form a complex - conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and - v(j+1) = VR(:,j) - i*VR(:,j+1). - - LDVR (input) INTEGER - The leading dimension of the array VR. LDVR >= 1; if - JOBVR = 'V', LDVR >= N. - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,3*N), and - if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good - performance, LWORK must generally be larger. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = i, the QR algorithm failed to compute all the - eigenvalues, and no eigenvectors have been computed; - elements i+1:N of WR and WI contain eigenvalues which - have converged. - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --wr; - --wi; - vl_dim1 = *ldvl; - vl_offset = 1 + vl_dim1 * 1; - vl -= vl_offset; - vr_dim1 = *ldvr; - vr_offset = 1 + vr_dim1 * 1; - vr -= vr_offset; - --work; - - /* Function Body */ - *info = 0; - lquery = *lwork == -1; - wantvl = lsame_(jobvl, "V"); - wantvr = lsame_(jobvr, "V"); - if ((! wantvl && ! lsame_(jobvl, "N"))) { - *info = -1; - } else if ((! wantvr && ! lsame_(jobvr, "N"))) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if (*ldvl < 1 || (wantvl && *ldvl < *n)) { - *info = -9; - } else if (*ldvr < 1 || (wantvr && *ldvr < *n)) { - *info = -11; - } - -/* - Compute workspace - (Note: Comments in the code beginning "Workspace:" describe the - minimal amount of workspace needed at that point in the code, - as well as the preferred amount for good performance. - NB refers to the optimal block size for the immediately - following subroutine, as returned by ILAENV. - HSWORK refers to the workspace preferred by DHSEQR, as - calculated below. HSWORK is computed assuming ILO=1 and IHI=N, - the worst case.) -*/ - - minwrk = 1; - if ((*info == 0 && (*lwork >= 1 || lquery))) { - maxwrk = ((*n) << (1)) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1, - n, &c__0, (ftnlen)6, (ftnlen)1); - if ((! wantvl && ! wantvr)) { -/* Computing MAX */ - i__1 = 1, i__2 = *n * 3; - minwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = ilaenv_(&c__8, "DHSEQR", "EN", n, &c__1, n, &c_n1, (ftnlen) - 6, (ftnlen)2); - maxb = max(i__1,2); -/* - Computing MIN - Computing MAX -*/ - i__3 = 2, i__4 = ilaenv_(&c__4, "DHSEQR", "EN", n, &c__1, n, & - c_n1, (ftnlen)6, (ftnlen)2); - i__1 = min(maxb,*n), i__2 = max(i__3,i__4); - k = min(i__1,i__2); -/* Computing MAX */ - i__1 = k * (k + 2), i__2 = (*n) << (1); - hswork = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n + - hswork; - maxwrk = max(i__1,i__2); - } else { -/* Computing MAX */ - i__1 = 1, i__2 = (*n) << (2); - minwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + (*n - 1) * ilaenv_(&c__1, - "DORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen)1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = ilaenv_(&c__8, "DHSEQR", "SV", n, &c__1, n, &c_n1, (ftnlen) - 6, (ftnlen)2); - maxb = max(i__1,2); -/* - Computing MIN - Computing MAX -*/ - i__3 = 2, i__4 = ilaenv_(&c__4, "DHSEQR", "SV", n, &c__1, n, & - c_n1, (ftnlen)6, (ftnlen)2); - i__1 = min(maxb,*n), i__2 = max(i__3,i__4); - k = min(i__1,i__2); -/* Computing MAX */ - i__1 = k * (k + 2), i__2 = (*n) << (1); - hswork = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n + - hswork; - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = (*n) << (2); - maxwrk = max(i__1,i__2); - } - work[1] = (doublereal) maxwrk; - } - if ((*lwork < minwrk && ! lquery)) { - *info = -13; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGEEV ", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* Get machine constants */ - - eps = PRECISION; - smlnum = SAFEMINIMUM; - bignum = 1. / smlnum; - dlabad_(&smlnum, &bignum); - smlnum = sqrt(smlnum) / eps; - bignum = 1. / smlnum; - -/* Scale A if max element outside range [SMLNUM,BIGNUM] */ - - anrm = dlange_("M", n, n, &a[a_offset], lda, dum); - scalea = FALSE_; - if ((anrm > 0. && anrm < smlnum)) { - scalea = TRUE_; - cscale = smlnum; - } else if (anrm > bignum) { - scalea = TRUE_; - cscale = bignum; - } - if (scalea) { - dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, & - ierr); - } - -/* - Balance the matrix - (Workspace: need N) -*/ - - ibal = 1; - dgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr); - -/* - Reduce to upper Hessenberg form - (Workspace: need 3*N, prefer 2*N+N*NB) -*/ - - itau = ibal + *n; - iwrk = itau + *n; - i__1 = *lwork - iwrk + 1; - dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, - &ierr); - - if (wantvl) { - -/* - Want left eigenvectors - Copy Householder vectors to VL -*/ - - *(unsigned char *)side = 'L'; - dlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl) - ; - -/* - Generate orthogonal matrix in VL - (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) -*/ - - i__1 = *lwork - iwrk + 1; - dorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], - &i__1, &ierr); - -/* - Perform QR iteration, accumulating Schur vectors in VL - (Workspace: need N+1, prefer N+HSWORK (see comments) ) -*/ - - iwrk = itau; - i__1 = *lwork - iwrk + 1; - dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & - vl[vl_offset], ldvl, &work[iwrk], &i__1, info); - - if (wantvr) { - -/* - Want left and right eigenvectors - Copy Schur vectors to VR -*/ - - *(unsigned char *)side = 'B'; - dlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr); - } - - } else if (wantvr) { - -/* - Want right eigenvectors - Copy Householder vectors to VR -*/ - - *(unsigned char *)side = 'R'; - dlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr) - ; - -/* - Generate orthogonal matrix in VR - (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) -*/ - - i__1 = *lwork - iwrk + 1; - dorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], - &i__1, &ierr); - -/* - Perform QR iteration, accumulating Schur vectors in VR - (Workspace: need N+1, prefer N+HSWORK (see comments) ) -*/ - - iwrk = itau; - i__1 = *lwork - iwrk + 1; - dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & - vr[vr_offset], ldvr, &work[iwrk], &i__1, info); - - } else { - -/* - Compute eigenvalues only - (Workspace: need N+1, prefer N+HSWORK (see comments) ) -*/ - - iwrk = itau; - i__1 = *lwork - iwrk + 1; - dhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & - vr[vr_offset], ldvr, &work[iwrk], &i__1, info); - } - -/* If INFO > 0 from DHSEQR, then quit */ - - if (*info > 0) { - goto L50; - } - - if (wantvl || wantvr) { - -/* - Compute left and/or right eigenvectors - (Workspace: need 4*N) -*/ - - dtrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl, - &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr); - } - - if (wantvl) { - -/* - Undo balancing of left eigenvectors - (Workspace: need N) -*/ - - dgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl, - &ierr); - -/* Normalize left eigenvectors and make largest component real */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - if (wi[i__] == 0.) { - scl = 1. / dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1); - dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1); - } else if (wi[i__] > 0.) { - d__1 = dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1); - d__2 = dnrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1); - scl = 1. / dlapy2_(&d__1, &d__2); - dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1); - dscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1); - i__2 = *n; - for (k = 1; k <= i__2; ++k) { -/* Computing 2nd power */ - d__1 = vl[k + i__ * vl_dim1]; -/* Computing 2nd power */ - d__2 = vl[k + (i__ + 1) * vl_dim1]; - work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2; -/* L10: */ - } - k = idamax_(n, &work[iwrk], &c__1); - dlartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1], - &cs, &sn, &r__); - drot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) * - vl_dim1 + 1], &c__1, &cs, &sn); - vl[k + (i__ + 1) * vl_dim1] = 0.; - } -/* L20: */ - } - } - - if (wantvr) { - -/* - Undo balancing of right eigenvectors - (Workspace: need N) -*/ - - dgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr, - &ierr); - -/* Normalize right eigenvectors and make largest component real */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - if (wi[i__] == 0.) { - scl = 1. / dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1); - dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1); - } else if (wi[i__] > 0.) { - d__1 = dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1); - d__2 = dnrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1); - scl = 1. / dlapy2_(&d__1, &d__2); - dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1); - dscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1); - i__2 = *n; - for (k = 1; k <= i__2; ++k) { -/* Computing 2nd power */ - d__1 = vr[k + i__ * vr_dim1]; -/* Computing 2nd power */ - d__2 = vr[k + (i__ + 1) * vr_dim1]; - work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2; -/* L30: */ - } - k = idamax_(n, &work[iwrk], &c__1); - dlartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1], - &cs, &sn, &r__); - drot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) * - vr_dim1 + 1], &c__1, &cs, &sn); - vr[k + (i__ + 1) * vr_dim1] = 0.; - } -/* L40: */ - } - } - -/* Undo scaling if necessary */ - -L50: - if (scalea) { - i__1 = *n - *info; -/* Computing MAX */ - i__3 = *n - *info; - i__2 = max(i__3,1); - dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info + - 1], &i__2, &ierr); - i__1 = *n - *info; -/* Computing MAX */ - i__3 = *n - *info; - i__2 = max(i__3,1); - dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info + - 1], &i__2, &ierr); - if (*info > 0) { - i__1 = ilo - 1; - dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1], - n, &ierr); - i__1 = ilo - 1; - dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1], - n, &ierr); - } - } - - work[1] = (doublereal) maxwrk; - return 0; - -/* End of DGEEV */ - -} /* dgeev_ */ - -/* Subroutine */ int dgehd2_(integer *n, integer *ilo, integer *ihi, - doublereal *a, integer *lda, doublereal *tau, doublereal *work, - integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__; - static doublereal aii; - extern /* Subroutine */ int dlarf_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *), dlarfg_(integer *, doublereal *, - doublereal *, integer *, doublereal *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DGEHD2 reduces a real general matrix A to upper Hessenberg form H by - an orthogonal similarity transformation: Q' * A * Q = H . - - Arguments - ========= - - N (input) INTEGER - The order of the matrix A. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - It is assumed that A is already upper triangular in rows - and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally - set by a previous call to DGEBAL; otherwise they should be - set to 1 and N respectively. See Further Details. - 1 <= ILO <= IHI <= max(1,N). - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the n by n general matrix to be reduced. - On exit, the upper triangle and the first subdiagonal of A - are overwritten with the upper Hessenberg matrix H, and the - elements below the first subdiagonal, with the array TAU, - represent the orthogonal matrix Q as a product of elementary - reflectors. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - TAU (output) DOUBLE PRECISION array, dimension (N-1) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace) DOUBLE PRECISION array, dimension (N) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The matrix Q is represented as a product of (ihi-ilo) elementary - reflectors - - Q = H(ilo) H(ilo+1) . . . H(ihi-1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on - exit in A(i+2:ihi,i), and tau in TAU(i). - - The contents of A are illustrated by the following example, with - n = 7, ilo = 2 and ihi = 6: - - on entry, on exit, - - ( a a a a a a a ) ( a a h h h h a ) - ( a a a a a a ) ( a h h h h a ) - ( a a a a a a ) ( h h h h h h ) - ( a a a a a a ) ( v2 h h h h h ) - ( a a a a a a ) ( v2 v3 h h h h ) - ( a a a a a a ) ( v2 v3 v4 h h h ) - ( a ) ( a ) - - where a denotes an element of the original matrix A, h denotes a - modified element of the upper Hessenberg matrix H, and vi denotes an - element of the vector defining H(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*n < 0) { - *info = -1; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -2; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGEHD2", &i__1); - return 0; - } - - i__1 = *ihi - 1; - for (i__ = *ilo; i__ <= i__1; ++i__) { - -/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */ - - i__2 = *ihi - i__; -/* Computing MIN */ - i__3 = i__ + 2; - dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ * - a_dim1], &c__1, &tau[i__]); - aii = a[i__ + 1 + i__ * a_dim1]; - a[i__ + 1 + i__ * a_dim1] = 1.; - -/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */ - - i__2 = *ihi - i__; - dlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ - i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]); - -/* Apply H(i) to A(i+1:ihi,i+1:n) from the left */ - - i__2 = *ihi - i__; - i__3 = *n - i__; - dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ - i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]); - - a[i__ + 1 + i__ * a_dim1] = aii; -/* L10: */ - } - - return 0; - -/* End of DGEHD2 */ - -} /* dgehd2_ */ - -/* Subroutine */ int dgehrd_(integer *n, integer *ilo, integer *ihi, - doublereal *a, integer *lda, doublereal *tau, doublereal *work, - integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__; - static doublereal t[4160] /* was [65][64] */; - static integer ib; - static doublereal ei; - static integer nb, nh, nx, iws; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer nbmin, iinfo; - extern /* Subroutine */ int dgehd2_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *), - dlarfb_(char *, char *, char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, integer *), dlahrd_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DGEHRD reduces a real general matrix A to upper Hessenberg form H by - an orthogonal similarity transformation: Q' * A * Q = H . - - Arguments - ========= - - N (input) INTEGER - The order of the matrix A. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - It is assumed that A is already upper triangular in rows - and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally - set by a previous call to DGEBAL; otherwise they should be - set to 1 and N respectively. See Further Details. - 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the N-by-N general matrix to be reduced. - On exit, the upper triangle and the first subdiagonal of A - are overwritten with the upper Hessenberg matrix H, and the - elements below the first subdiagonal, with the array TAU, - represent the orthogonal matrix Q as a product of elementary - reflectors. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - TAU (output) DOUBLE PRECISION array, dimension (N-1) - The scalar factors of the elementary reflectors (see Further - Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to - zero. - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The length of the array WORK. LWORK >= max(1,N). - For optimum performance LWORK >= N*NB, where NB is the - optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The matrix Q is represented as a product of (ihi-ilo) elementary - reflectors - - Q = H(ilo) H(ilo+1) . . . H(ihi-1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on - exit in A(i+2:ihi,i), and tau in TAU(i). - - The contents of A are illustrated by the following example, with - n = 7, ilo = 2 and ihi = 6: - - on entry, on exit, - - ( a a a a a a a ) ( a a h h h h a ) - ( a a a a a a ) ( a h h h h a ) - ( a a a a a a ) ( h h h h h h ) - ( a a a a a a ) ( v2 h h h h h ) - ( a a a a a a ) ( v2 v3 h h h h ) - ( a a a a a a ) ( v2 v3 v4 h h h ) - ( a ) ( a ) - - where a denotes an element of the original matrix A, h denotes a - modified element of the upper Hessenberg matrix H, and vi denotes an - element of the vector defining H(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; -/* Computing MIN */ - i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( - ftnlen)6, (ftnlen)1); - nb = min(i__1,i__2); - lwkopt = *n * nb; - work[1] = (doublereal) lwkopt; - lquery = *lwork == -1; - if (*n < 0) { - *info = -1; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -2; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if ((*lwork < max(1,*n) && ! lquery)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGEHRD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */ - - i__1 = *ilo - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - tau[i__] = 0.; -/* L10: */ - } - i__1 = *n - 1; - for (i__ = max(1,*ihi); i__ <= i__1; ++i__) { - tau[i__] = 0.; -/* L20: */ - } - -/* Quick return if possible */ - - nh = *ihi - *ilo + 1; - if (nh <= 1) { - work[1] = 1.; - return 0; - } - -/* - Determine the block size. - - Computing MIN -*/ - i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( - ftnlen)6, (ftnlen)1); - nb = min(i__1,i__2); - nbmin = 2; - iws = 1; - if ((nb > 1 && nb < nh)) { - -/* - Determine when to cross over from blocked to unblocked code - (last block is always handled by unblocked code). - - Computing MAX -*/ - i__1 = nb, i__2 = ilaenv_(&c__3, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < nh) { - -/* Determine if workspace is large enough for blocked code. */ - - iws = *n * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: determine the - minimum value of NB, and reduce NB or force use of - unblocked code. - - Computing MAX -*/ - i__1 = 2, i__2 = ilaenv_(&c__2, "DGEHRD", " ", n, ilo, ihi, & - c_n1, (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - if (*lwork >= *n * nbmin) { - nb = *lwork / *n; - } else { - nb = 1; - } - } - } - } - ldwork = *n; - - if (nb < nbmin || nb >= nh) { - -/* Use unblocked code below */ - - i__ = *ilo; - - } else { - -/* Use blocked code */ - - i__1 = *ihi - 1 - nx; - i__2 = nb; - for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__3 = nb, i__4 = *ihi - i__; - ib = min(i__3,i__4); - -/* - Reduce columns i:i+ib-1 to Hessenberg form, returning the - matrices V and T of the block reflector H = I - V*T*V' - which performs the reduction, and also the matrix Y = A*V*T -*/ - - dlahrd_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, & - c__65, &work[1], &ldwork); - -/* - Apply the block reflector H to A(1:ihi,i+ib:ihi) from the - right, computing A := A - Y * V'. V(i+ib,ib-1) must be set - to 1. -*/ - - ei = a[i__ + ib + (i__ + ib - 1) * a_dim1]; - a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.; - i__3 = *ihi - i__ - ib + 1; - dgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b151, & - work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, & - c_b15, &a[(i__ + ib) * a_dim1 + 1], lda); - a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei; - -/* - Apply the block reflector H to A(i+1:ihi,i+ib:n) from the - left -*/ - - i__3 = *ihi - i__; - i__4 = *n - i__ - ib + 1; - dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, & - i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[ - i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork); -/* L30: */ - } - } - -/* Use unblocked code to reduce the rest of the matrix */ - - dgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo); - work[1] = (doublereal) iws; - - return 0; - -/* End of DGEHRD */ - -} /* dgehrd_ */ - -/* Subroutine */ int dgelq2_(integer *m, integer *n, doublereal *a, integer * - lda, doublereal *tau, doublereal *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, k; - static doublereal aii; - extern /* Subroutine */ int dlarf_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *), dlarfg_(integer *, doublereal *, - doublereal *, integer *, doublereal *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DGELQ2 computes an LQ factorization of a real m by n matrix A: - A = L * Q. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the m by n matrix A. - On exit, the elements on and below the diagonal of the array - contain the m by min(m,n) lower trapezoidal matrix L (L is - lower triangular if m <= n); the elements above the diagonal, - with the array TAU, represent the orthogonal matrix Q as a - product of elementary reflectors (see Further Details). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace) DOUBLE PRECISION array, dimension (M) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The matrix Q is represented as a product of elementary reflectors - - Q = H(k) . . . H(2) H(1), where k = min(m,n). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), - and tau in TAU(i). - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGELQ2", &i__1); - return 0; - } - - k = min(*m,*n); - - i__1 = k; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */ - - i__2 = *n - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1] - , lda, &tau[i__]); - if (i__ < *m) { - -/* Apply H(i) to A(i+1:m,i:n) from the right */ - - aii = a[i__ + i__ * a_dim1]; - a[i__ + i__ * a_dim1] = 1.; - i__2 = *m - i__; - i__3 = *n - i__ + 1; - dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[ - i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); - a[i__ + i__ * a_dim1] = aii; - } -/* L10: */ - } - return 0; - -/* End of DGELQ2 */ - -} /* dgelq2_ */ - -/* Subroutine */ int dgelqf_(integer *m, integer *n, doublereal *a, integer * - lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, k, ib, nb, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int dgelq2_(integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *), dlarfb_(char *, - char *, char *, char *, integer *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal - *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DGELQF computes an LQ factorization of a real M-by-N matrix A: - A = L * Q. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the M-by-N matrix A. - On exit, the elements on and below the diagonal of the array - contain the m-by-min(m,n) lower trapezoidal matrix L (L is - lower triangular if m <= n); the elements above the diagonal, - with the array TAU, represent the orthogonal matrix Q as a - product of elementary reflectors (see Further Details). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,M). - For optimum performance LWORK >= M*NB, where NB is the - optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The matrix Q is represented as a product of elementary reflectors - - Q = H(k) . . . H(2) H(1), where k = min(m,n). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), - and tau in TAU(i). - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nb = ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) - 1); - lwkopt = *m * nb; - work[1] = (doublereal) lwkopt; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } else if ((*lwork < max(1,*m) && ! lquery)) { - *info = -7; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGELQF", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - k = min(*m,*n); - if (k == 0) { - work[1] = 1.; - return 0; - } - - nbmin = 2; - nx = 0; - iws = *m; - if ((nb > 1 && nb < k)) { - -/* - Determine when to cross over from blocked to unblocked code. - - Computing MAX -*/ - i__1 = 0, i__2 = ilaenv_(&c__3, "DGELQF", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < k) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *m; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: reduce NB and - determine the minimum value of NB. -*/ - - nb = *lwork / ldwork; -/* Computing MAX */ - i__1 = 2, i__2 = ilaenv_(&c__2, "DGELQF", " ", m, n, &c_n1, & - c_n1, (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - } - } - } - - if (((nb >= nbmin && nb < k) && nx < k)) { - -/* Use blocked code initially */ - - i__1 = k - nx; - i__2 = nb; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__3 = k - i__ + 1; - ib = min(i__3,nb); - -/* - Compute the LQ factorization of the current block - A(i:i+ib-1,i:n) -*/ - - i__3 = *n - i__ + 1; - dgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[ - 1], &iinfo); - if (i__ + ib <= *m) { - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__3 = *n - i__ + 1; - dlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], &work[1], &ldwork); - -/* Apply H to A(i+ib:m,i:n) from the right */ - - i__3 = *m - i__ - ib + 1; - i__4 = *n - i__ + 1; - dlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3, - &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & - ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib + - 1], &ldwork); - } -/* L10: */ - } - } else { - i__ = 1; - } - -/* Use unblocked code to factor the last or only block. */ - - if (i__ <= k) { - i__2 = *m - i__ + 1; - i__1 = *n - i__ + 1; - dgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1] - , &iinfo); - } - - work[1] = (doublereal) iws; - return 0; - -/* End of DGELQF */ - -} /* dgelqf_ */ - -/* Subroutine */ int dgelsd_(integer *m, integer *n, integer *nrhs, - doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal * - s, doublereal *rcond, integer *rank, doublereal *work, integer *lwork, - integer *iwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4; - - /* Builtin functions */ - double log(doublereal); - - /* Local variables */ - static integer ie, il, mm; - static doublereal eps, anrm, bnrm; - static integer itau, nlvl, iascl, ibscl; - static doublereal sfmin; - static integer minmn, maxmn, itaup, itauq, mnthr, nwork; - extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebrd_( - integer *, integer *, doublereal *, integer *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *, - integer *); - extern doublereal dlamch_(char *), dlange_(char *, integer *, - integer *, doublereal *, integer *, doublereal *); - extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *, integer *), - dlalsd_(char *, integer *, integer *, integer *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *, integer *), dlascl_(char *, - integer *, integer *, doublereal *, doublereal *, integer *, - integer *, doublereal *, integer *, integer *), dgeqrf_( - integer *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *, integer *), dlacpy_(char *, integer *, - integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, - doublereal *, doublereal *, integer *), xerbla_(char *, - integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static doublereal bignum; - extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *, - integer *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, integer *); - static integer wlalsd; - extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *, integer *); - static integer ldwork; - extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *, integer *); - static integer minwrk, maxwrk; - static doublereal smlnum; - static logical lquery; - static integer smlsiz; - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DGELSD computes the minimum-norm solution to a real linear least - squares problem: - minimize 2-norm(| b - A*x |) - using the singular value decomposition (SVD) of A. A is an M-by-N - matrix which may be rank-deficient. - - Several right hand side vectors b and solution vectors x can be - handled in a single call; they are stored as the columns of the - M-by-NRHS right hand side matrix B and the N-by-NRHS solution - matrix X. - - The problem is solved in three steps: - (1) Reduce the coefficient matrix A to bidiagonal form with - Householder transformations, reducing the original problem - into a "bidiagonal least squares problem" (BLS) - (2) Solve the BLS using a divide and conquer approach. - (3) Apply back all the Householder tranformations to solve - the original least squares problem. - - The effective rank of A is determined by treating as zero those - singular values which are less than RCOND times the largest singular - value. - - The divide and conquer algorithm makes very mild assumptions about - floating point arithmetic. It will work on machines with a guard - digit in add/subtract, or on those binary machines without guard - digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or - Cray-2. It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Arguments - ========= - - M (input) INTEGER - The number of rows of A. M >= 0. - - N (input) INTEGER - The number of columns of A. N >= 0. - - NRHS (input) INTEGER - The number of right hand sides, i.e., the number of columns - of the matrices B and X. NRHS >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the M-by-N matrix A. - On exit, A has been destroyed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) - On entry, the M-by-NRHS right hand side matrix B. - On exit, B is overwritten by the N-by-NRHS solution - matrix X. If m >= n and RANK = n, the residual - sum-of-squares for the solution in the i-th column is given - by the sum of squares of elements n+1:m in that column. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,max(M,N)). - - S (output) DOUBLE PRECISION array, dimension (min(M,N)) - The singular values of A in decreasing order. - The condition number of A in the 2-norm = S(1)/S(min(m,n)). - - RCOND (input) DOUBLE PRECISION - RCOND is used to determine the effective rank of A. - Singular values S(i) <= RCOND*S(1) are treated as zero. - If RCOND < 0, machine precision is used instead. - - RANK (output) INTEGER - The effective rank of A, i.e., the number of singular values - which are greater than RCOND*S(1). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK must be at least 1. - The exact minimum amount of workspace needed depends on M, - N and NRHS. As long as LWORK is at least - 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2, - if M is greater than or equal to N or - 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2, - if M is less than N, the code will execute correctly. - SMLSIZ is returned by ILAENV and is equal to the maximum - size of the subproblems at the bottom of the computation - tree (usually about 25), and - NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) - For good performance, LWORK should generally be larger. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - IWORK (workspace) INTEGER array, dimension (LIWORK) - LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, - where MINMN = MIN( M,N ). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: the algorithm for computing the SVD failed to converge; - if INFO = i, i off-diagonal elements of an intermediate - bidiagonal form did not converge to zero. - - Further Details - =============== - - Based on contributions by - Ming Gu and Ren-Cang Li, Computer Science Division, University of - California at Berkeley, USA - Osni Marques, LBNL/NERSC, USA - - ===================================================================== - - - Test the input arguments. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - --s; - --work; - --iwork; - - /* Function Body */ - *info = 0; - minmn = min(*m,*n); - maxmn = max(*m,*n); - mnthr = ilaenv_(&c__6, "DGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, ( - ftnlen)1); - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*nrhs < 0) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } else if (*ldb < max(1,maxmn)) { - *info = -7; - } - - smlsiz = ilaenv_(&c__9, "DGELSD", " ", &c__0, &c__0, &c__0, &c__0, ( - ftnlen)6, (ftnlen)1); - -/* - Compute workspace. - (Note: Comments in the code beginning "Workspace:" describe the - minimal amount of workspace needed at that point in the code, - as well as the preferred amount for good performance. - NB refers to the optimal block size for the immediately - following subroutine, as returned by ILAENV.) -*/ - - minwrk = 1; - minmn = max(1,minmn); -/* Computing MAX */ - i__1 = (integer) (log((doublereal) minmn / (doublereal) (smlsiz + 1)) / - log(2.)) + 1; - nlvl = max(i__1,0); - - if (*info == 0) { - maxwrk = 0; - mm = *m; - if ((*m >= *n && *m >= mnthr)) { - -/* Path 1a - overdetermined, with many more rows than columns. */ - - mm = *n; -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, - n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "DORMQR", "LT", - m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2); - maxwrk = max(i__1,i__2); - } - if (*m >= *n) { - -/* - Path 1 - overdetermined or exactly determined. - - Computing MAX -*/ - i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1, "DGEBRD" - , " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "DORMBR", - "QLT", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1, "DORMBR", - "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing 2nd power */ - i__1 = smlsiz + 1; - wlalsd = *n * 9 + ((*n) << (1)) * smlsiz + ((*n) << (3)) * nlvl + - *n * *nrhs + i__1 * i__1; -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n * 3 + wlalsd; - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,i__2), - i__2 = *n * 3 + wlalsd; - minwrk = max(i__1,i__2); - } - if (*n > *m) { -/* Computing 2nd power */ - i__1 = smlsiz + 1; - wlalsd = *m * 9 + ((*m) << (1)) * smlsiz + ((*m) << (3)) * nlvl + - *m * *nrhs + i__1 * i__1; - if (*n >= mnthr) { - -/* - Path 2a - underdetermined, with many more columns - than rows. -*/ - - maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, - &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + ((*m) << (1)) - * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *nrhs * - ilaenv_(&c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1, ( - ftnlen)6, (ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + (*m - 1) * - ilaenv_(&c__1, "DORMBR", "PLN", m, nrhs, m, &c_n1, ( - ftnlen)6, (ftnlen)3); - maxwrk = max(i__1,i__2); - if (*nrhs > 1) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs; - maxwrk = max(i__1,i__2); - } else { -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (1)); - maxwrk = max(i__1,i__2); - } -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "DORMLQ", - "LT", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)2); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + wlalsd; - maxwrk = max(i__1,i__2); - } else { - -/* Path 2 - remaining underdetermined cases. */ - - maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "DGEBRD", " ", m, - n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1, "DORMBR" - , "QLT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR", - "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * 3 + wlalsd; - maxwrk = max(i__1,i__2); - } -/* Computing MAX */ - i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,i__2), - i__2 = *m * 3 + wlalsd; - minwrk = max(i__1,i__2); - } - minwrk = min(minwrk,maxwrk); - work[1] = (doublereal) maxwrk; - if ((*lwork < minwrk && ! lquery)) { - *info = -12; - } - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGELSD", &i__1); - return 0; - } else if (lquery) { - goto L10; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0) { - *rank = 0; - return 0; - } - -/* Get machine parameters. */ - - eps = PRECISION; - sfmin = SAFEMINIMUM; - smlnum = sfmin / eps; - bignum = 1. / smlnum; - dlabad_(&smlnum, &bignum); - -/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */ - - anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]); - iascl = 0; - if ((anrm > 0. && anrm < smlnum)) { - -/* Scale matrix norm up to SMLNUM. */ - - dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, - info); - iascl = 1; - } else if (anrm > bignum) { - -/* Scale matrix norm down to BIGNUM. */ - - dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, - info); - iascl = 2; - } else if (anrm == 0.) { - -/* Matrix all zero. Return zero solution. */ - - i__1 = max(*m,*n); - dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); - dlaset_("F", &minmn, &c__1, &c_b29, &c_b29, &s[1], &c__1); - *rank = 0; - goto L10; - } - -/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */ - - bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]); - ibscl = 0; - if ((bnrm > 0. && bnrm < smlnum)) { - -/* Scale matrix norm up to SMLNUM. */ - - dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb, - info); - ibscl = 1; - } else if (bnrm > bignum) { - -/* Scale matrix norm down to BIGNUM. */ - - dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb, - info); - ibscl = 2; - } - -/* If M < N make sure certain entries of B are zero. */ - - if (*m < *n) { - i__1 = *n - *m; - dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], ldb); - } - -/* Overdetermined case. */ - - if (*m >= *n) { - -/* Path 1 - overdetermined or exactly determined. */ - - mm = *m; - if (*m >= mnthr) { - -/* Path 1a - overdetermined, with many more rows than columns. */ - - mm = *n; - itau = 1; - nwork = itau + *n; - -/* - Compute A=Q*R. - (Workspace: need 2*N, prefer N+N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, - info); - -/* - Multiply B by transpose(Q). - (Workspace: need N+NRHS, prefer N+NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[ - b_offset], ldb, &work[nwork], &i__1, info); - -/* Zero out below R. */ - - if (*n > 1) { - i__1 = *n - 1; - i__2 = *n - 1; - dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2], - lda); - } - } - - ie = 1; - itauq = ie + *n; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in A. - (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & - work[itaup], &work[nwork], &i__1, info); - -/* - Multiply B by transpose of left bidiagonalizing vectors of R. - (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq], - &b[b_offset], ldb, &work[nwork], &i__1, info); - -/* Solve the bidiagonal least squares problem. */ - - dlalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb, - rcond, rank, &work[nwork], &iwork[1], info); - if (*info != 0) { - goto L10; - } - -/* Multiply B by right bidiagonalizing vectors of R. */ - - i__1 = *lwork - nwork + 1; - dormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], & - b[b_offset], ldb, &work[nwork], &i__1, info); - - } else /* if(complicated condition) */ { -/* Computing MAX */ - i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 = - max(i__1,*nrhs), i__2 = *n - *m * 3; - if ((*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2) - )) { - -/* - Path 2a - underdetermined, with many more columns than rows - and sufficient workspace for an efficient algorithm. -*/ - - ldwork = *m; -/* - Computing MAX - Computing MAX -*/ - i__3 = *m, i__4 = ((*m) << (1)) - 4, i__3 = max(i__3,i__4), i__3 = - max(i__3,*nrhs), i__4 = *n - *m * 3; - i__1 = ((*m) << (2)) + *m * *lda + max(i__3,i__4), i__2 = *m * * - lda + *m + *m * *nrhs; - if (*lwork >= max(i__1,i__2)) { - ldwork = *lda; - } - itau = 1; - nwork = *m + 1; - -/* - Compute A=L*Q. - (Workspace: need 2*M, prefer M+M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, - info); - il = nwork; - -/* Copy L to WORK(IL), zeroing out above its diagonal. */ - - dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork); - i__1 = *m - 1; - i__2 = *m - 1; - dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwork], & - ldwork); - ie = il + ldwork * *m; - itauq = ie + *m; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in WORK(IL). - (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq], - &work[itaup], &work[nwork], &i__1, info); - -/* - Multiply B by transpose of left bidiagonalizing vectors of L. - (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[ - itauq], &b[b_offset], ldb, &work[nwork], &i__1, info); - -/* Solve the bidiagonal least squares problem. */ - - dlalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset], - ldb, rcond, rank, &work[nwork], &iwork[1], info); - if (*info != 0) { - goto L10; - } - -/* Multiply B by right bidiagonalizing vectors of L. */ - - i__1 = *lwork - nwork + 1; - dormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[ - itaup], &b[b_offset], ldb, &work[nwork], &i__1, info); - -/* Zero out below first M rows of B. */ - - i__1 = *n - *m; - dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], - ldb); - nwork = itau + *m; - -/* - Multiply transpose(Q) by B. - (Workspace: need M+NRHS, prefer M+NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[ - b_offset], ldb, &work[nwork], &i__1, info); - - } else { - -/* Path 2 - remaining underdetermined cases. */ - - ie = 1; - itauq = ie + *m; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize A. - (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & - work[itaup], &work[nwork], &i__1, info); - -/* - Multiply B by transpose of left bidiagonalizing vectors. - (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq] - , &b[b_offset], ldb, &work[nwork], &i__1, info); - -/* Solve the bidiagonal least squares problem. */ - - dlalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset], - ldb, rcond, rank, &work[nwork], &iwork[1], info); - if (*info != 0) { - goto L10; - } - -/* Multiply B by right bidiagonalizing vectors of A. */ - - i__1 = *lwork - nwork + 1; - dormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup] - , &b[b_offset], ldb, &work[nwork], &i__1, info); - - } - } - -/* Undo scaling. */ - - if (iascl == 1) { - dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb, - info); - dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], & - minmn, info); - } else if (iascl == 2) { - dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb, - info); - dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], & - minmn, info); - } - if (ibscl == 1) { - dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb, - info); - } else if (ibscl == 2) { - dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb, - info); - } - -L10: - work[1] = (doublereal) maxwrk; - return 0; - -/* End of DGELSD */ - -} /* dgelsd_ */ - -/* Subroutine */ int dgeqr2_(integer *m, integer *n, doublereal *a, integer * - lda, doublereal *tau, doublereal *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, k; - static doublereal aii; - extern /* Subroutine */ int dlarf_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *), dlarfg_(integer *, doublereal *, - doublereal *, integer *, doublereal *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DGEQR2 computes a QR factorization of a real m by n matrix A: - A = Q * R. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the m by n matrix A. - On exit, the elements on and above the diagonal of the array - contain the min(m,n) by n upper trapezoidal matrix R (R is - upper triangular if m >= n); the elements below the diagonal, - with the array TAU, represent the orthogonal matrix Q as a - product of elementary reflectors (see Further Details). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace) DOUBLE PRECISION array, dimension (N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The matrix Q is represented as a product of elementary reflectors - - Q = H(1) H(2) . . . H(k), where k = min(m,n). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), - and tau in TAU(i). - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGEQR2", &i__1); - return 0; - } - - k = min(*m,*n); - - i__1 = k; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */ - - i__2 = *m - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1] - , &c__1, &tau[i__]); - if (i__ < *n) { - -/* Apply H(i) to A(i:m,i+1:n) from the left */ - - aii = a[i__ + i__ * a_dim1]; - a[i__ + i__ * a_dim1] = 1.; - i__2 = *m - i__ + 1; - i__3 = *n - i__; - dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[ - i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); - a[i__ + i__ * a_dim1] = aii; - } -/* L10: */ - } - return 0; - -/* End of DGEQR2 */ - -} /* dgeqr2_ */ - -/* Subroutine */ int dgeqrf_(integer *m, integer *n, doublereal *a, integer * - lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, k, ib, nb, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int dgeqr2_(integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *), dlarfb_(char *, - char *, char *, char *, integer *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal - *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DGEQRF computes a QR factorization of a real M-by-N matrix A: - A = Q * R. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the M-by-N matrix A. - On exit, the elements on and above the diagonal of the array - contain the min(M,N)-by-N upper trapezoidal matrix R (R is - upper triangular if m >= n); the elements below the diagonal, - with the array TAU, represent the orthogonal matrix Q as a - product of min(m,n) elementary reflectors (see Further - Details). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,N). - For optimum performance LWORK >= N*NB, where NB is - the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The matrix Q is represented as a product of elementary reflectors - - Q = H(1) H(2) . . . H(k), where k = min(m,n). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), - and tau in TAU(i). - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) - 1); - lwkopt = *n * nb; - work[1] = (doublereal) lwkopt; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } else if ((*lwork < max(1,*n) && ! lquery)) { - *info = -7; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGEQRF", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - k = min(*m,*n); - if (k == 0) { - work[1] = 1.; - return 0; - } - - nbmin = 2; - nx = 0; - iws = *n; - if ((nb > 1 && nb < k)) { - -/* - Determine when to cross over from blocked to unblocked code. - - Computing MAX -*/ - i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQRF", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < k) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *n; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: reduce NB and - determine the minimum value of NB. -*/ - - nb = *lwork / ldwork; -/* Computing MAX */ - i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQRF", " ", m, n, &c_n1, & - c_n1, (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - } - } - } - - if (((nb >= nbmin && nb < k) && nx < k)) { - -/* Use blocked code initially */ - - i__1 = k - nx; - i__2 = nb; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__3 = k - i__ + 1; - ib = min(i__3,nb); - -/* - Compute the QR factorization of the current block - A(i:m,i:i+ib-1) -*/ - - i__3 = *m - i__ + 1; - dgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[ - 1], &iinfo); - if (i__ + ib <= *n) { - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__3 = *m - i__ + 1; - dlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], &work[1], &ldwork); - -/* Apply H' to A(i:m,i+ib:n) from the left */ - - i__3 = *m - i__ + 1; - i__4 = *n - i__ - ib + 1; - dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, & - i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & - ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib - + 1], &ldwork); - } -/* L10: */ - } - } else { - i__ = 1; - } - -/* Use unblocked code to factor the last or only block. */ - - if (i__ <= k) { - i__2 = *m - i__ + 1; - i__1 = *n - i__ + 1; - dgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1] - , &iinfo); - } - - work[1] = (doublereal) iws; - return 0; - -/* End of DGEQRF */ - -} /* dgeqrf_ */ - -/* Subroutine */ int dgesdd_(char *jobz, integer *m, integer *n, doublereal * - a, integer *lda, doublereal *s, doublereal *u, integer *ldu, - doublereal *vt, integer *ldvt, doublereal *work, integer *lwork, - integer *iwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, - i__2, i__3; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__, ie, il, ir, iu, blk; - static doublereal dum[1], eps; - static integer ivt, iscl; - static doublereal anrm; - static integer idum[1], ierr, itau; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - extern logical lsame_(char *, char *); - static integer chunk, minmn, wrkbl, itaup, itauq, mnthr; - static logical wntqa; - static integer nwork; - static logical wntqn, wntqo, wntqs; - extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal - *, doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, integer *, integer *), dgebrd_(integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *, integer *); - extern doublereal dlamch_(char *), dlange_(char *, integer *, - integer *, doublereal *, integer *, doublereal *); - static integer bdspac; - extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *, integer *), - dlascl_(char *, integer *, integer *, doublereal *, doublereal *, - integer *, integer *, doublereal *, integer *, integer *), - dgeqrf_(integer *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *, integer *), dlacpy_(char *, - integer *, integer *, doublereal *, integer *, doublereal *, - integer *), dlaset_(char *, integer *, integer *, - doublereal *, doublereal *, doublereal *, integer *), - xerbla_(char *, integer *), dorgbr_(char *, integer *, - integer *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static doublereal bignum; - extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *, - integer *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, integer *), dorglq_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - integer *), dorgqr_(integer *, integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *, integer *); - static integer ldwrkl, ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt; - static doublereal smlnum; - static logical wntqas, lquery; - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DGESDD computes the singular value decomposition (SVD) of a real - M-by-N matrix A, optionally computing the left and right singular - vectors. If singular vectors are desired, it uses a - divide-and-conquer algorithm. - - The SVD is written - - A = U * SIGMA * transpose(V) - - where SIGMA is an M-by-N matrix which is zero except for its - min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and - V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA - are the singular values of A; they are real and non-negative, and - are returned in descending order. The first min(m,n) columns of - U and V are the left and right singular vectors of A. - - Note that the routine returns VT = V**T, not V. - - The divide and conquer algorithm makes very mild assumptions about - floating point arithmetic. It will work on machines with a guard - digit in add/subtract, or on those binary machines without guard - digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or - Cray-2. It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Arguments - ========= - - JOBZ (input) CHARACTER*1 - Specifies options for computing all or part of the matrix U: - = 'A': all M columns of U and all N rows of V**T are - returned in the arrays U and VT; - = 'S': the first min(M,N) columns of U and the first - min(M,N) rows of V**T are returned in the arrays U - and VT; - = 'O': If M >= N, the first N columns of U are overwritten - on the array A and all rows of V**T are returned in - the array VT; - otherwise, all columns of U are returned in the - array U and the first M rows of V**T are overwritten - in the array VT; - = 'N': no columns of U or rows of V**T are computed. - - M (input) INTEGER - The number of rows of the input matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the input matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the M-by-N matrix A. - On exit, - if JOBZ = 'O', A is overwritten with the first N columns - of U (the left singular vectors, stored - columnwise) if M >= N; - A is overwritten with the first M rows - of V**T (the right singular vectors, stored - rowwise) otherwise. - if JOBZ .ne. 'O', the contents of A are destroyed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - S (output) DOUBLE PRECISION array, dimension (min(M,N)) - The singular values of A, sorted so that S(i) >= S(i+1). - - U (output) DOUBLE PRECISION array, dimension (LDU,UCOL) - UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; - UCOL = min(M,N) if JOBZ = 'S'. - If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M - orthogonal matrix U; - if JOBZ = 'S', U contains the first min(M,N) columns of U - (the left singular vectors, stored columnwise); - if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. - - LDU (input) INTEGER - The leading dimension of the array U. LDU >= 1; if - JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. - - VT (output) DOUBLE PRECISION array, dimension (LDVT,N) - If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the - N-by-N orthogonal matrix V**T; - if JOBZ = 'S', VT contains the first min(M,N) rows of - V**T (the right singular vectors, stored rowwise); - if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. - - LDVT (input) INTEGER - The leading dimension of the array VT. LDVT >= 1; if - JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; - if JOBZ = 'S', LDVT >= min(M,N). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK; - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= 1. - If JOBZ = 'N', - LWORK >= 3*min(M,N) + max(max(M,N),6*min(M,N)). - If JOBZ = 'O', - LWORK >= 3*min(M,N)*min(M,N) + - max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)). - If JOBZ = 'S' or 'A' - LWORK >= 3*min(M,N)*min(M,N) + - max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)). - For good performance, LWORK should generally be larger. - If LWORK < 0 but other input arguments are legal, WORK(1) - returns the optimal LWORK. - - IWORK (workspace) INTEGER array, dimension (8*min(M,N)) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: DBDSDC did not converge, updating process failed. - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --s; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - --work; - --iwork; - - /* Function Body */ - *info = 0; - minmn = min(*m,*n); - mnthr = (integer) (minmn * 11. / 6.); - wntqa = lsame_(jobz, "A"); - wntqs = lsame_(jobz, "S"); - wntqas = wntqa || wntqs; - wntqo = lsame_(jobz, "O"); - wntqn = lsame_(jobz, "N"); - minwrk = 1; - maxwrk = 1; - lquery = *lwork == -1; - - if (! (wntqa || wntqs || wntqo || wntqn)) { - *info = -1; - } else if (*m < 0) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } else if (*ldu < 1 || (wntqas && *ldu < *m) || ((wntqo && *m < *n) && * - ldu < *m)) { - *info = -8; - } else if (*ldvt < 1 || (wntqa && *ldvt < *n) || (wntqs && *ldvt < minmn) - || ((wntqo && *m >= *n) && *ldvt < *n)) { - *info = -10; - } - -/* - Compute workspace - (Note: Comments in the code beginning "Workspace:" describe the - minimal amount of workspace needed at that point in the code, - as well as the preferred amount for good performance. - NB refers to the optimal block size for the immediately - following subroutine, as returned by ILAENV.) -*/ - - if (((*info == 0 && *m > 0) && *n > 0)) { - if (*m >= *n) { - -/* Compute space needed for DBDSDC */ - - if (wntqn) { - bdspac = *n * 7; - } else { - bdspac = *n * 3 * *n + ((*n) << (2)); - } - if (*m >= mnthr) { - if (wntqn) { - -/* Path 1 (M much larger than N, JOBZ='N') */ - - wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(& - c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen) - 6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *n; - maxwrk = max(i__1,i__2); - minwrk = bdspac + *n; - } else if (wntqo) { - -/* Path 2 (M much larger than N, JOBZ='O') */ - - wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR", - " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(& - c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen) - 6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *n * 3; - wrkbl = max(i__1,i__2); - maxwrk = wrkbl + ((*n) << (1)) * *n; - minwrk = bdspac + ((*n) << (1)) * *n + *n * 3; - } else if (wntqs) { - -/* Path 3 (M much larger than N, JOBZ='S') */ - - wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR", - " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(& - c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen) - 6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *n * 3; - wrkbl = max(i__1,i__2); - maxwrk = wrkbl + *n * *n; - minwrk = bdspac + *n * *n + *n * 3; - } else if (wntqa) { - -/* Path 4 (M much larger than N, JOBZ='A') */ - - wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "DORGQR", - " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(& - c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen) - 6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *n * 3; - wrkbl = max(i__1,i__2); - maxwrk = wrkbl + *n * *n; - minwrk = bdspac + *n * *n + *n * 3; - } - } else { - -/* Path 5 (M at least N, but not much larger) */ - - wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m, - n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - if (wntqn) { -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *n * 3; - maxwrk = max(i__1,i__2); - minwrk = *n * 3 + max(*m,bdspac); - } else if (wntqo) { -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *n * 3; - wrkbl = max(i__1,i__2); - maxwrk = wrkbl + *m * *n; -/* Computing MAX */ - i__1 = *m, i__2 = *n * *n + bdspac; - minwrk = *n * 3 + max(i__1,i__2); - } else if (wntqs) { -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *n * 3; - maxwrk = max(i__1,i__2); - minwrk = *n * 3 + max(*m,bdspac); - } else if (wntqa) { -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" - , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = bdspac + *n * 3; - maxwrk = max(i__1,i__2); - minwrk = *n * 3 + max(*m,bdspac); - } - } - } else { - -/* Compute space needed for DBDSDC */ - - if (wntqn) { - bdspac = *m * 7; - } else { - bdspac = *m * 3 * *m + ((*m) << (2)); - } - if (*n >= mnthr) { - if (wntqn) { - -/* Path 1t (N much larger than M, JOBZ='N') */ - - wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(& - c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen) - 6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *m; - maxwrk = max(i__1,i__2); - minwrk = bdspac + *m; - } else if (wntqo) { - -/* Path 2t (N much larger than M, JOBZ='O') */ - - wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ", - " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(& - c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen) - 6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *m * 3; - wrkbl = max(i__1,i__2); - maxwrk = wrkbl + ((*m) << (1)) * *m; - minwrk = bdspac + ((*m) << (1)) * *m + *m * 3; - } else if (wntqs) { - -/* Path 3t (N much larger than M, JOBZ='S') */ - - wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ", - " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(& - c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen) - 6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *m * 3; - wrkbl = max(i__1,i__2); - maxwrk = wrkbl + *m * *m; - minwrk = bdspac + *m * *m + *m * 3; - } else if (wntqa) { - -/* Path 4t (N much larger than M, JOBZ='A') */ - - wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "DORGLQ", - " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(& - c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen) - 6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *m * 3; - wrkbl = max(i__1,i__2); - maxwrk = wrkbl + *m * *m; - minwrk = bdspac + *m * *m + *m * 3; - } - } else { - -/* Path 5t (N greater than M, but not much larger) */ - - wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m, - n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - if (wntqn) { -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *m * 3; - maxwrk = max(i__1,i__2); - minwrk = *m * 3 + max(*n,bdspac); - } else if (wntqo) { -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *m * 3; - wrkbl = max(i__1,i__2); - maxwrk = wrkbl + *m * *n; -/* Computing MAX */ - i__1 = *n, i__2 = *m * *m + bdspac; - minwrk = *m * 3 + max(i__1,i__2); - } else if (wntqs) { -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *m * 3; - maxwrk = max(i__1,i__2); - minwrk = *m * 3 + max(*n,bdspac); - } else if (wntqa) { -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" - , "PRT", n, n, m, &c_n1, (ftnlen)6, (ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = bdspac + *m * 3; - maxwrk = max(i__1,i__2); - minwrk = *m * 3 + max(*n,bdspac); - } - } - } - work[1] = (doublereal) maxwrk; - } - - if ((*lwork < minwrk && ! lquery)) { - *info = -12; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGESDD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - if (*lwork >= 1) { - work[1] = 1.; - } - return 0; - } - -/* Get machine constants */ - - eps = PRECISION; - smlnum = sqrt(SAFEMINIMUM) / eps; - bignum = 1. / smlnum; - -/* Scale A if max element outside range [SMLNUM,BIGNUM] */ - - anrm = dlange_("M", m, n, &a[a_offset], lda, dum); - iscl = 0; - if ((anrm > 0. && anrm < smlnum)) { - iscl = 1; - dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, & - ierr); - } else if (anrm > bignum) { - iscl = 1; - dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, & - ierr); - } - - if (*m >= *n) { - -/* - A has at least as many rows as columns. If A has sufficiently - more rows than columns, first reduce using the QR - decomposition (if sufficient workspace available) -*/ - - if (*m >= mnthr) { - - if (wntqn) { - -/* - Path 1 (M much larger than N, JOBZ='N') - No singular vectors to be computed -*/ - - itau = 1; - nwork = itau + *n; - -/* - Compute A=Q*R - (Workspace: need 2*N, prefer N+N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__1, &ierr); - -/* Zero out below R */ - - i__1 = *n - 1; - i__2 = *n - 1; - dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2], - lda); - ie = 1; - itauq = ie + *n; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in A - (Workspace: need 4*N, prefer 3*N+2*N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__1, &ierr); - nwork = ie + *n; - -/* - Perform bidiagonal SVD, computing singular values only - (Workspace: need N+BDSPAC) -*/ - - dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1, - dum, idum, &work[nwork], &iwork[1], info); - - } else if (wntqo) { - -/* - Path 2 (M much larger than N, JOBZ = 'O') - N left singular vectors to be overwritten on A and - N right singular vectors to be computed in VT -*/ - - ir = 1; - -/* WORK(IR) is LDWRKR by N */ - - if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) { - ldwrkr = *lda; - } else { - ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n; - } - itau = ir + ldwrkr * *n; - nwork = itau + *n; - -/* - Compute A=Q*R - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__1, &ierr); - -/* Copy R to WORK(IR), zeroing out below it */ - - dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr); - i__1 = *n - 1; - i__2 = *n - 1; - dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &work[ir + 1], & - ldwrkr); - -/* - Generate Q in A - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork], - &i__1, &ierr); - ie = itau; - itauq = ie + *n; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in VT, copying result to WORK(IR) - (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__1, &ierr); - -/* WORK(IU) is N by N */ - - iu = nwork; - nwork = iu + *n * *n; - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in WORK(IU) and computing right - singular vectors of bidiagonal matrix in VT - (Workspace: need N+N*N+BDSPAC) -*/ - - dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[ - vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], - info); - -/* - Overwrite WORK(IU) by left singular vectors of R - and VT by right singular vectors of R - (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[ - itauq], &work[iu], n, &work[nwork], &i__1, &ierr); - i__1 = *lwork - nwork + 1; - dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - -/* - Multiply Q in A by left singular vectors of R in - WORK(IU), storing result in WORK(IR) and copying to A - (Workspace: need 2*N*N, prefer N*N+M*N) -*/ - - i__1 = *m; - i__2 = ldwrkr; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += - i__2) { -/* Computing MIN */ - i__3 = *m - i__ + 1; - chunk = min(i__3,ldwrkr); - dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ + a_dim1], - lda, &work[iu], n, &c_b29, &work[ir], &ldwrkr); - dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ + - a_dim1], lda); -/* L10: */ - } - - } else if (wntqs) { - -/* - Path 3 (M much larger than N, JOBZ='S') - N left singular vectors to be computed in U and - N right singular vectors to be computed in VT -*/ - - ir = 1; - -/* WORK(IR) is N by N */ - - ldwrkr = *n; - itau = ir + ldwrkr * *n; - nwork = itau + *n; - -/* - Compute A=Q*R - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__2, &ierr); - -/* Copy R to WORK(IR), zeroing out below it */ - - dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr); - i__2 = *n - 1; - i__1 = *n - 1; - dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &work[ir + 1], & - ldwrkr); - -/* - Generate Q in A - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork], - &i__2, &ierr); - ie = itau; - itauq = ie + *n; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in WORK(IR) - (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__2, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagoal matrix in U and computing right singular - vectors of bidiagonal matrix in VT - (Workspace: need N+BDSPAC) -*/ - - dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ - vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], - info); - -/* - Overwrite U by left singular vectors of R and VT - by right singular vectors of R - (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - - i__2 = *lwork - nwork + 1; - dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & - ierr); - -/* - Multiply Q in A by left singular vectors of R in - WORK(IR), storing result in U - (Workspace: need N*N) -*/ - - dlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr); - dgemm_("N", "N", m, n, n, &c_b15, &a[a_offset], lda, &work[ir] - , &ldwrkr, &c_b29, &u[u_offset], ldu); - - } else if (wntqa) { - -/* - Path 4 (M much larger than N, JOBZ='A') - M left singular vectors to be computed in U and - N right singular vectors to be computed in VT -*/ - - iu = 1; - -/* WORK(IU) is N by N */ - - ldwrku = *n; - itau = iu + ldwrku * *n; - nwork = itau + *n; - -/* - Compute A=Q*R, copying result to U - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__2, &ierr); - dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu); - -/* - Generate Q in U - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - i__2 = *lwork - nwork + 1; - dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork], - &i__2, &ierr); - -/* Produce R in A, zeroing out other entries */ - - i__2 = *n - 1; - i__1 = *n - 1; - dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &a[a_dim1 + 2], - lda); - ie = itau; - itauq = ie + *n; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in A - (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__2, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in WORK(IU) and computing right - singular vectors of bidiagonal matrix in VT - (Workspace: need N+N*N+BDSPAC) -*/ - - dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[ - vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], - info); - -/* - Overwrite WORK(IU) by left singular vectors of R and VT - by right singular vectors of R - (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[ - itauq], &work[iu], &ldwrku, &work[nwork], &i__2, & - ierr); - i__2 = *lwork - nwork + 1; - dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & - ierr); - -/* - Multiply Q in U by left singular vectors of R in - WORK(IU), storing result in A - (Workspace: need N*N) -*/ - - dgemm_("N", "N", m, n, n, &c_b15, &u[u_offset], ldu, &work[iu] - , &ldwrku, &c_b29, &a[a_offset], lda); - -/* Copy left singular vectors of A from A to U */ - - dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu); - - } - - } else { - -/* - M .LT. MNTHR - - Path 5 (M at least N, but not much larger) - Reduce to bidiagonal form without QR decomposition -*/ - - ie = 1; - itauq = ie + *n; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize A - (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & - work[itaup], &work[nwork], &i__2, &ierr); - if (wntqn) { - -/* - Perform bidiagonal SVD, only computing singular values - (Workspace: need N+BDSPAC) -*/ - - dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1, - dum, idum, &work[nwork], &iwork[1], info); - } else if (wntqo) { - iu = nwork; - if (*lwork >= *m * *n + *n * 3 + bdspac) { - -/* WORK( IU ) is M by N */ - - ldwrku = *m; - nwork = iu + ldwrku * *n; - dlaset_("F", m, n, &c_b29, &c_b29, &work[iu], &ldwrku); - } else { - -/* WORK( IU ) is N by N */ - - ldwrku = *n; - nwork = iu + ldwrku * *n; - -/* WORK(IR) is LDWRKR by N */ - - ir = nwork; - ldwrkr = (*lwork - *n * *n - *n * 3) / *n; - } - nwork = iu + ldwrku * *n; - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in WORK(IU) and computing right - singular vectors of bidiagonal matrix in VT - (Workspace: need N+N*N+BDSPAC) -*/ - - dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, & - vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[ - 1], info); - -/* - Overwrite VT by right singular vectors of A - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & - ierr); - - if (*lwork >= *m * *n + *n * 3 + bdspac) { - -/* - Overwrite WORK(IU) by left singular vectors of A - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[ - itauq], &work[iu], &ldwrku, &work[nwork], &i__2, & - ierr); - -/* Copy left singular vectors of A from WORK(IU) to A */ - - dlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda); - } else { - -/* - Generate Q in A - (Workspace: need N*N+2*N, prefer N*N+N+N*NB) -*/ - - i__2 = *lwork - nwork + 1; - dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], & - work[nwork], &i__2, &ierr); - -/* - Multiply Q in A by left singular vectors of - bidiagonal matrix in WORK(IU), storing result in - WORK(IR) and copying to A - (Workspace: need 2*N*N, prefer N*N+M*N) -*/ - - i__2 = *m; - i__1 = ldwrkr; - for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += - i__1) { -/* Computing MIN */ - i__3 = *m - i__ + 1; - chunk = min(i__3,ldwrkr); - dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ + - a_dim1], lda, &work[iu], &ldwrku, &c_b29, & - work[ir], &ldwrkr); - dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ + - a_dim1], lda); -/* L20: */ - } - } - - } else if (wntqs) { - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in U and computing right singular - vectors of bidiagonal matrix in VT - (Workspace: need N+BDSPAC) -*/ - - dlaset_("F", m, n, &c_b29, &c_b29, &u[u_offset], ldu); - dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ - vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], - info); - -/* - Overwrite U by left singular vectors of A and VT - by right singular vectors of A - (Workspace: need 3*N, prefer 2*N+N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - i__1 = *lwork - nwork + 1; - dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - } else if (wntqa) { - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in U and computing right singular - vectors of bidiagonal matrix in VT - (Workspace: need N+BDSPAC) -*/ - - dlaset_("F", m, m, &c_b29, &c_b29, &u[u_offset], ldu); - dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ - vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], - info); - -/* Set the right corner of U to identity matrix */ - - i__1 = *m - *n; - i__2 = *m - *n; - dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &u[*n + 1 + (*n + - 1) * u_dim1], ldu); - -/* - Overwrite U by left singular vectors of A and VT - by right singular vectors of A - (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - i__1 = *lwork - nwork + 1; - dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - } - - } - - } else { - -/* - A has more columns than rows. If A has sufficiently more - columns than rows, first reduce using the LQ decomposition (if - sufficient workspace available) -*/ - - if (*n >= mnthr) { - - if (wntqn) { - -/* - Path 1t (N much larger than M, JOBZ='N') - No singular vectors to be computed -*/ - - itau = 1; - nwork = itau + *m; - -/* - Compute A=L*Q - (Workspace: need 2*M, prefer M+M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__1, &ierr); - -/* Zero out above L */ - - i__1 = *m - 1; - i__2 = *m - 1; - dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &a[((a_dim1) << (1) - ) + 1], lda); - ie = 1; - itauq = ie + *m; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in A - (Workspace: need 4*M, prefer 3*M+2*M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__1, &ierr); - nwork = ie + *m; - -/* - Perform bidiagonal SVD, computing singular values only - (Workspace: need M+BDSPAC) -*/ - - dbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1, - dum, idum, &work[nwork], &iwork[1], info); - - } else if (wntqo) { - -/* - Path 2t (N much larger than M, JOBZ='O') - M right singular vectors to be overwritten on A and - M left singular vectors to be computed in U -*/ - - ivt = 1; - -/* IVT is M by M */ - - il = ivt + *m * *m; - if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) { - -/* WORK(IL) is M by N */ - - ldwrkl = *m; - chunk = *n; - } else { - ldwrkl = *m; - chunk = (*lwork - *m * *m) / *m; - } - itau = il + ldwrkl * *m; - nwork = itau + *m; - -/* - Compute A=L*Q - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__1, &ierr); - -/* Copy L to WORK(IL), zeroing about above it */ - - dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl); - i__1 = *m - 1; - i__2 = *m - 1; - dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwrkl], - &ldwrkl); - -/* - Generate Q in A - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork], - &i__1, &ierr); - ie = itau; - itauq = ie + *m; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in WORK(IL) - (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__1, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in U, and computing right singular - vectors of bidiagonal matrix in WORK(IVT) - (Workspace: need M+M*M+BDSPAC) -*/ - - dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & - work[ivt], m, dum, idum, &work[nwork], &iwork[1], - info); - -/* - Overwrite U by left singular vectors of L and WORK(IVT) - by right singular vectors of L - (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - i__1 = *lwork - nwork + 1; - dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[ - itaup], &work[ivt], m, &work[nwork], &i__1, &ierr); - -/* - Multiply right singular vectors of L in WORK(IVT) by Q - in A, storing result in WORK(IL) and copying to A - (Workspace: need 2*M*M, prefer M*M+M*N) -*/ - - i__1 = *n; - i__2 = chunk; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += - i__2) { -/* Computing MIN */ - i__3 = *n - i__ + 1; - blk = min(i__3,chunk); - dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], m, &a[ - i__ * a_dim1 + 1], lda, &c_b29, &work[il], & - ldwrkl); - dlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1 - + 1], lda); -/* L30: */ - } - - } else if (wntqs) { - -/* - Path 3t (N much larger than M, JOBZ='S') - M right singular vectors to be computed in VT and - M left singular vectors to be computed in U -*/ - - il = 1; - -/* WORK(IL) is M by M */ - - ldwrkl = *m; - itau = il + ldwrkl * *m; - nwork = itau + *m; - -/* - Compute A=L*Q - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__2, &ierr); - -/* Copy L to WORK(IL), zeroing out above it */ - - dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl); - i__2 = *m - 1; - i__1 = *m - 1; - dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &work[il + ldwrkl], - &ldwrkl); - -/* - Generate Q in A - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork], - &i__2, &ierr); - ie = itau; - itauq = ie + *m; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in WORK(IU), copying result to U - (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__2, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in U and computing right singular - vectors of bidiagonal matrix in VT - (Workspace: need M+BDSPAC) -*/ - - dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ - vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], - info); - -/* - Overwrite U by left singular vectors of L and VT - by right singular vectors of L - (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - i__2 = *lwork - nwork + 1; - dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & - ierr); - -/* - Multiply right singular vectors of L in WORK(IL) by - Q in A, storing result in VT - (Workspace: need M*M) -*/ - - dlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl); - dgemm_("N", "N", m, n, m, &c_b15, &work[il], &ldwrkl, &a[ - a_offset], lda, &c_b29, &vt[vt_offset], ldvt); - - } else if (wntqa) { - -/* - Path 4t (N much larger than M, JOBZ='A') - N right singular vectors to be computed in VT and - M left singular vectors to be computed in U -*/ - - ivt = 1; - -/* WORK(IVT) is M by M */ - - ldwkvt = *m; - itau = ivt + ldwkvt * *m; - nwork = itau + *m; - -/* - Compute A=L*Q, copying result to VT - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__2, &ierr); - dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - -/* - Generate Q in VT - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[ - nwork], &i__2, &ierr); - -/* Produce L in A, zeroing out other entries */ - - i__2 = *m - 1; - i__1 = *m - 1; - dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &a[((a_dim1) << (1) - ) + 1], lda); - ie = itau; - itauq = ie + *m; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in A - (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__2, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in U and computing right singular - vectors of bidiagonal matrix in WORK(IVT) - (Workspace: need M+M*M+BDSPAC) -*/ - - dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & - work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1] - , info); - -/* - Overwrite U by left singular vectors of L and WORK(IVT) - by right singular vectors of L - (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - i__2 = *lwork - nwork + 1; - dormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[ - itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, & - ierr); - -/* - Multiply right singular vectors of L in WORK(IVT) by - Q in VT, storing result in A - (Workspace: need M*M) -*/ - - dgemm_("N", "N", m, n, m, &c_b15, &work[ivt], &ldwkvt, &vt[ - vt_offset], ldvt, &c_b29, &a[a_offset], lda); - -/* Copy right singular vectors of A from A to VT */ - - dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - - } - - } else { - -/* - N .LT. MNTHR - - Path 5t (N greater than M, but not much larger) - Reduce to bidiagonal form without LQ decomposition -*/ - - ie = 1; - itauq = ie + *m; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize A - (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) -*/ - - i__2 = *lwork - nwork + 1; - dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & - work[itaup], &work[nwork], &i__2, &ierr); - if (wntqn) { - -/* - Perform bidiagonal SVD, only computing singular values - (Workspace: need M+BDSPAC) -*/ - - dbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1, - dum, idum, &work[nwork], &iwork[1], info); - } else if (wntqo) { - ldwkvt = *m; - ivt = nwork; - if (*lwork >= *m * *n + *m * 3 + bdspac) { - -/* WORK( IVT ) is M by N */ - - dlaset_("F", m, n, &c_b29, &c_b29, &work[ivt], &ldwkvt); - nwork = ivt + ldwkvt * *n; - } else { - -/* WORK( IVT ) is M by M */ - - nwork = ivt + ldwkvt * *m; - il = nwork; - -/* WORK(IL) is M by CHUNK */ - - chunk = (*lwork - *m * *m - *m * 3) / *m; - } - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in U and computing right singular - vectors of bidiagonal matrix in WORK(IVT) - (Workspace: need M*M+BDSPAC) -*/ - - dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & - work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1] - , info); - -/* - Overwrite U by left singular vectors of A - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - - if (*lwork >= *m * *n + *m * 3 + bdspac) { - -/* - Overwrite WORK(IVT) by left singular vectors of A - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[ - itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, - &ierr); - -/* Copy right singular vectors of A from WORK(IVT) to A */ - - dlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda); - } else { - -/* - Generate P**T in A - (Workspace: need M*M+2*M, prefer M*M+M+M*NB) -*/ - - i__2 = *lwork - nwork + 1; - dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], & - work[nwork], &i__2, &ierr); - -/* - Multiply Q in A by right singular vectors of - bidiagonal matrix in WORK(IVT), storing result in - WORK(IL) and copying to A - (Workspace: need 2*M*M, prefer M*M+M*N) -*/ - - i__2 = *n; - i__1 = chunk; - for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += - i__1) { -/* Computing MIN */ - i__3 = *n - i__ + 1; - blk = min(i__3,chunk); - dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], & - ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b29, & - work[il], m); - dlacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 + - 1], lda); -/* L40: */ - } - } - } else if (wntqs) { - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in U and computing right singular - vectors of bidiagonal matrix in VT - (Workspace: need M+BDSPAC) -*/ - - dlaset_("F", m, n, &c_b29, &c_b29, &vt[vt_offset], ldvt); - dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ - vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], - info); - -/* - Overwrite U by left singular vectors of A and VT - by right singular vectors of A - (Workspace: need 3*M, prefer 2*M+M*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - i__1 = *lwork - nwork + 1; - dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - } else if (wntqa) { - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in U and computing right singular - vectors of bidiagonal matrix in VT - (Workspace: need M+BDSPAC) -*/ - - dlaset_("F", n, n, &c_b29, &c_b29, &vt[vt_offset], ldvt); - dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ - vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], - info); - -/* Set the right corner of VT to identity matrix */ - - i__1 = *n - *m; - i__2 = *n - *m; - dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &vt[*m + 1 + (*m + - 1) * vt_dim1], ldvt); - -/* - Overwrite U by left singular vectors of A and VT - by right singular vectors of A - (Workspace: need 2*M+N, prefer 2*M+N*NB) -*/ - - i__1 = *lwork - nwork + 1; - dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - i__1 = *lwork - nwork + 1; - dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - } - - } - - } - -/* Undo scaling if necessary */ - - if (iscl == 1) { - if (anrm > bignum) { - dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], & - minmn, &ierr); - } - if (anrm < smlnum) { - dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], & - minmn, &ierr); - } - } - -/* Return optimal workspace in WORK(1) */ - - work[1] = (doublereal) maxwrk; - - return 0; - -/* End of DGESDD */ - -} /* dgesdd_ */ - -/* Subroutine */ int dgesv_(integer *n, integer *nrhs, doublereal *a, integer - *lda, integer *ipiv, doublereal *b, integer *ldb, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1; - - /* Local variables */ - extern /* Subroutine */ int dgetrf_(integer *, integer *, doublereal *, - integer *, integer *, integer *), xerbla_(char *, integer *), dgetrs_(char *, integer *, integer *, doublereal *, - integer *, integer *, doublereal *, integer *, integer *); - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - March 31, 1993 - - - Purpose - ======= - - DGESV computes the solution to a real system of linear equations - A * X = B, - where A is an N-by-N matrix and X and B are N-by-NRHS matrices. - - The LU decomposition with partial pivoting and row interchanges is - used to factor A as - A = P * L * U, - where P is a permutation matrix, L is unit lower triangular, and U is - upper triangular. The factored form of A is then used to solve the - system of equations A * X = B. - - Arguments - ========= - - N (input) INTEGER - The number of linear equations, i.e., the order of the - matrix A. N >= 0. - - NRHS (input) INTEGER - The number of right hand sides, i.e., the number of columns - of the matrix B. NRHS >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the N-by-N coefficient matrix A. - On exit, the factors L and U from the factorization - A = P*L*U; the unit diagonal elements of L are not stored. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - IPIV (output) INTEGER array, dimension (N) - The pivot indices that define the permutation matrix P; - row i of the matrix was interchanged with row IPIV(i). - - B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) - On entry, the N-by-NRHS matrix of right hand side matrix B. - On exit, if INFO = 0, the N-by-NRHS solution matrix X. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, U(i,i) is exactly zero. The factorization - has been completed, but the factor U is exactly - singular, so the solution could not be computed. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - *info = 0; - if (*n < 0) { - *info = -1; - } else if (*nrhs < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } else if (*ldb < max(1,*n)) { - *info = -7; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGESV ", &i__1); - return 0; - } - -/* Compute the LU factorization of A. */ - - dgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info); - if (*info == 0) { - -/* Solve the system A*X = B, overwriting B with X. */ - - dgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[ - b_offset], ldb, info); - } - return 0; - -/* End of DGESV */ - -} /* dgesv_ */ - -/* Subroutine */ int dgetf2_(integer *m, integer *n, doublereal *a, integer * - lda, integer *ipiv, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublereal d__1; - - /* Local variables */ - static integer j, jp; - extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *), dscal_(integer *, doublereal *, doublereal *, integer - *), dswap_(integer *, doublereal *, integer *, doublereal *, - integer *); - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1992 - - - Purpose - ======= - - DGETF2 computes an LU factorization of a general m-by-n matrix A - using partial pivoting with row interchanges. - - The factorization has the form - A = P * L * U - where P is a permutation matrix, L is lower triangular with unit - diagonal elements (lower trapezoidal if m > n), and U is upper - triangular (upper trapezoidal if m < n). - - This is the right-looking Level 2 BLAS version of the algorithm. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the m by n matrix to be factored. - On exit, the factors L and U from the factorization - A = P*L*U; the unit diagonal elements of L are not stored. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - IPIV (output) INTEGER array, dimension (min(M,N)) - The pivot indices; for 1 <= i <= min(M,N), row i of the - matrix was interchanged with row IPIV(i). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization - has been completed, but the factor U is exactly - singular, and division by zero will occur if it is used - to solve a system of equations. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGETF2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - return 0; - } - - i__1 = min(*m,*n); - for (j = 1; j <= i__1; ++j) { - -/* Find pivot and test for singularity. */ - - i__2 = *m - j + 1; - jp = j - 1 + idamax_(&i__2, &a[j + j * a_dim1], &c__1); - ipiv[j] = jp; - if (a[jp + j * a_dim1] != 0.) { - -/* Apply the interchange to columns 1:N. */ - - if (jp != j) { - dswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda); - } - -/* Compute elements J+1:M of J-th column. */ - - if (j < *m) { - i__2 = *m - j; - d__1 = 1. / a[j + j * a_dim1]; - dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1); - } - - } else if (*info == 0) { - - *info = j; - } - - if (j < min(*m,*n)) { - -/* Update trailing submatrix. */ - - i__2 = *m - j; - i__3 = *n - j; - dger_(&i__2, &i__3, &c_b151, &a[j + 1 + j * a_dim1], &c__1, &a[j - + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], - lda); - } -/* L10: */ - } - return 0; - -/* End of DGETF2 */ - -} /* dgetf2_ */ - -/* Subroutine */ int dgetrf_(integer *m, integer *n, doublereal *a, integer * - lda, integer *ipiv, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - - /* Local variables */ - static integer i__, j, jb, nb; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer iinfo; - extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, - integer *, integer *, doublereal *, doublereal *, integer *, - doublereal *, integer *), dgetf2_( - integer *, integer *, doublereal *, integer *, integer *, integer - *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *, - integer *, integer *, integer *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - March 31, 1993 - - - Purpose - ======= - - DGETRF computes an LU factorization of a general M-by-N matrix A - using partial pivoting with row interchanges. - - The factorization has the form - A = P * L * U - where P is a permutation matrix, L is lower triangular with unit - diagonal elements (lower trapezoidal if m > n), and U is upper - triangular (upper trapezoidal if m < n). - - This is the right-looking Level 3 BLAS version of the algorithm. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the M-by-N matrix to be factored. - On exit, the factors L and U from the factorization - A = P*L*U; the unit diagonal elements of L are not stored. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - IPIV (output) INTEGER array, dimension (min(M,N)) - The pivot indices; for 1 <= i <= min(M,N), row i of the - matrix was interchanged with row IPIV(i). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, U(i,i) is exactly zero. The factorization - has been completed, but the factor U is exactly - singular, and division by zero will occur if it is used - to solve a system of equations. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGETRF", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - return 0; - } - -/* Determine the block size for this environment. */ - - nb = ilaenv_(&c__1, "DGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) - 1); - if (nb <= 1 || nb >= min(*m,*n)) { - -/* Use unblocked code. */ - - dgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info); - } else { - -/* Use blocked code. */ - - i__1 = min(*m,*n); - i__2 = nb; - for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { -/* Computing MIN */ - i__3 = min(*m,*n) - j + 1; - jb = min(i__3,nb); - -/* - Factor diagonal and subdiagonal blocks and test for exact - singularity. -*/ - - i__3 = *m - j + 1; - dgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo); - -/* Adjust INFO and the pivot indices. */ - - if ((*info == 0 && iinfo > 0)) { - *info = iinfo + j - 1; - } -/* Computing MIN */ - i__4 = *m, i__5 = j + jb - 1; - i__3 = min(i__4,i__5); - for (i__ = j; i__ <= i__3; ++i__) { - ipiv[i__] = j - 1 + ipiv[i__]; -/* L10: */ - } - -/* Apply interchanges to columns 1:J-1. */ - - i__3 = j - 1; - i__4 = j + jb - 1; - dlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1); - - if (j + jb <= *n) { - -/* Apply interchanges to columns J+JB:N. */ - - i__3 = *n - j - jb + 1; - i__4 = j + jb - 1; - dlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, & - ipiv[1], &c__1); - -/* Compute block row of U. */ - - i__3 = *n - j - jb + 1; - dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, & - c_b15, &a[j + j * a_dim1], lda, &a[j + (j + jb) * - a_dim1], lda); - if (j + jb <= *m) { - -/* Update trailing submatrix. */ - - i__3 = *m - j - jb + 1; - i__4 = *n - j - jb + 1; - dgemm_("No transpose", "No transpose", &i__3, &i__4, &jb, - &c_b151, &a[j + jb + j * a_dim1], lda, &a[j + (j - + jb) * a_dim1], lda, &c_b15, &a[j + jb + (j + jb) - * a_dim1], lda); - } - } -/* L20: */ - } - } - return 0; - -/* End of DGETRF */ - -} /* dgetrf_ */ - -/* Subroutine */ int dgetrs_(char *trans, integer *n, integer *nrhs, - doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer * - ldb, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1; - - /* Local variables */ - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, - integer *, integer *, doublereal *, doublereal *, integer *, - doublereal *, integer *), xerbla_( - char *, integer *), dlaswp_(integer *, doublereal *, - integer *, integer *, integer *, integer *, integer *); - static logical notran; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - March 31, 1993 - - - Purpose - ======= - - DGETRS solves a system of linear equations - A * X = B or A' * X = B - with a general N-by-N matrix A using the LU factorization computed - by DGETRF. - - Arguments - ========= - - TRANS (input) CHARACTER*1 - Specifies the form of the system of equations: - = 'N': A * X = B (No transpose) - = 'T': A'* X = B (Transpose) - = 'C': A'* X = B (Conjugate transpose = Transpose) - - N (input) INTEGER - The order of the matrix A. N >= 0. - - NRHS (input) INTEGER - The number of right hand sides, i.e., the number of columns - of the matrix B. NRHS >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA,N) - The factors L and U from the factorization A = P*L*U - as computed by DGETRF. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - IPIV (input) INTEGER array, dimension (N) - The pivot indices from DGETRF; for 1<=i<=N, row i of the - matrix was interchanged with row IPIV(i). - - B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) - On entry, the right hand side matrix B. - On exit, the solution matrix X. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - *info = 0; - notran = lsame_(trans, "N"); - if (((! notran && ! lsame_(trans, "T")) && ! lsame_( - trans, "C"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*nrhs < 0) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if (*ldb < max(1,*n)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DGETRS", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0 || *nrhs == 0) { - return 0; - } - - if (notran) { - -/* - Solve A * X = B. - - Apply row interchanges to the right hand sides. -*/ - - dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1); - -/* Solve L*X = B, overwriting B with X. */ - - dtrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b15, &a[ - a_offset], lda, &b[b_offset], ldb); - -/* Solve U*X = B, overwriting B with X. */ - - dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b15, & - a[a_offset], lda, &b[b_offset], ldb); - } else { - -/* - Solve A' * X = B. - - Solve U'*X = B, overwriting B with X. -*/ - - dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b15, &a[ - a_offset], lda, &b[b_offset], ldb); - -/* Solve L'*X = B, overwriting B with X. */ - - dtrsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b15, &a[ - a_offset], lda, &b[b_offset], ldb); - -/* Apply row interchanges to the solution vectors. */ - - dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1); - } - - return 0; - -/* End of DGETRS */ - -} /* dgetrs_ */ - -/* Subroutine */ int dhseqr_(char *job, char *compz, integer *n, integer *ilo, - integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, - doublereal *wi, doublereal *z__, integer *ldz, doublereal *work, - integer *lwork, integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2], i__4, - i__5; - doublereal d__1, d__2; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i__, j, k, l; - static doublereal s[225] /* was [15][15] */, v[16]; - static integer i1, i2, ii, nh, nr, ns, nv; - static doublereal vv[16]; - static integer itn; - static doublereal tau; - static integer its; - static doublereal ulp, tst1; - static integer maxb; - static doublereal absw; - static integer ierr; - static doublereal unfl, temp, ovfl; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *); - static integer itemp; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static logical initz, wantt, wantz; - extern doublereal dlapy2_(doublereal *, doublereal *); - extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); - - extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, - integer *, doublereal *); - extern integer idamax_(integer *, doublereal *, integer *); - extern doublereal dlanhs_(char *, integer *, doublereal *, integer *, - doublereal *); - extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, - integer *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *, integer *, doublereal *, integer *, - integer *), dlacpy_(char *, integer *, integer *, doublereal *, - integer *, doublereal *, integer *), dlaset_(char *, - integer *, integer *, doublereal *, doublereal *, doublereal *, - integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int xerbla_(char *, integer *), dlarfx_( - char *, integer *, integer *, doublereal *, doublereal *, - doublereal *, integer *, doublereal *); - static doublereal smlnum; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DHSEQR computes the eigenvalues of a real upper Hessenberg matrix H - and, optionally, the matrices T and Z from the Schur decomposition - H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur - form), and Z is the orthogonal matrix of Schur vectors. - - Optionally Z may be postmultiplied into an input orthogonal matrix Q, - so that this routine can give the Schur factorization of a matrix A - which has been reduced to the Hessenberg form H by the orthogonal - matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. - - Arguments - ========= - - JOB (input) CHARACTER*1 - = 'E': compute eigenvalues only; - = 'S': compute eigenvalues and the Schur form T. - - COMPZ (input) CHARACTER*1 - = 'N': no Schur vectors are computed; - = 'I': Z is initialized to the unit matrix and the matrix Z - of Schur vectors of H is returned; - = 'V': Z must contain an orthogonal matrix Q on entry, and - the product Q*Z is returned. - - N (input) INTEGER - The order of the matrix H. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - It is assumed that H is already upper triangular in rows - and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally - set by a previous call to DGEBAL, and then passed to SGEHRD - when the matrix output by DGEBAL is reduced to Hessenberg - form. Otherwise ILO and IHI should be set to 1 and N - respectively. - 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. - - H (input/output) DOUBLE PRECISION array, dimension (LDH,N) - On entry, the upper Hessenberg matrix H. - On exit, if JOB = 'S', H contains the upper quasi-triangular - matrix T from the Schur decomposition (the Schur form); - 2-by-2 diagonal blocks (corresponding to complex conjugate - pairs of eigenvalues) are returned in standard form, with - H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. If JOB = 'E', - the contents of H are unspecified on exit. - - LDH (input) INTEGER - The leading dimension of the array H. LDH >= max(1,N). - - WR (output) DOUBLE PRECISION array, dimension (N) - WI (output) DOUBLE PRECISION array, dimension (N) - The real and imaginary parts, respectively, of the computed - eigenvalues. If two eigenvalues are computed as a complex - conjugate pair, they are stored in consecutive elements of - WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and - WI(i+1) < 0. If JOB = 'S', the eigenvalues are stored in the - same order as on the diagonal of the Schur form returned in - H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 - diagonal block, WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and - WI(i+1) = -WI(i). - - Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) - If COMPZ = 'N': Z is not referenced. - If COMPZ = 'I': on entry, Z need not be set, and on exit, Z - contains the orthogonal matrix Z of the Schur vectors of H. - If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q, - which is assumed to be equal to the unit matrix except for - the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z. - Normally Q is the orthogonal matrix generated by DORGHR after - the call to DGEHRD which formed the Hessenberg matrix H. - - LDZ (input) INTEGER - The leading dimension of the array Z. - LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise. - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,N). - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, DHSEQR failed to compute all of the - eigenvalues in a total of 30*(IHI-ILO+1) iterations; - elements 1:ilo-1 and i+1:n of WR and WI contain those - eigenvalues which have been successfully computed. - - ===================================================================== - - - Decode and test the input parameters -*/ - - /* Parameter adjustments */ - h_dim1 = *ldh; - h_offset = 1 + h_dim1 * 1; - h__ -= h_offset; - --wr; - --wi; - z_dim1 = *ldz; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - --work; - - /* Function Body */ - wantt = lsame_(job, "S"); - initz = lsame_(compz, "I"); - wantz = initz || lsame_(compz, "V"); - - *info = 0; - work[1] = (doublereal) max(1,*n); - lquery = *lwork == -1; - if ((! lsame_(job, "E") && ! wantt)) { - *info = -1; - } else if ((! lsame_(compz, "N") && ! wantz)) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -4; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -5; - } else if (*ldh < max(1,*n)) { - *info = -7; - } else if (*ldz < 1 || (wantz && *ldz < max(1,*n))) { - *info = -11; - } else if ((*lwork < max(1,*n) && ! lquery)) { - *info = -13; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DHSEQR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Initialize Z, if necessary */ - - if (initz) { - dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz); - } - -/* Store the eigenvalues isolated by DGEBAL. */ - - i__1 = *ilo - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - wr[i__] = h__[i__ + i__ * h_dim1]; - wi[i__] = 0.; -/* L10: */ - } - i__1 = *n; - for (i__ = *ihi + 1; i__ <= i__1; ++i__) { - wr[i__] = h__[i__ + i__ * h_dim1]; - wi[i__] = 0.; -/* L20: */ - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - if (*ilo == *ihi) { - wr[*ilo] = h__[*ilo + *ilo * h_dim1]; - wi[*ilo] = 0.; - return 0; - } - -/* - Set rows and columns ILO to IHI to zero below the first - subdiagonal. -*/ - - i__1 = *ihi - 2; - for (j = *ilo; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j + 2; i__ <= i__2; ++i__) { - h__[i__ + j * h_dim1] = 0.; -/* L30: */ - } -/* L40: */ - } - nh = *ihi - *ilo + 1; - -/* - Determine the order of the multi-shift QR algorithm to be used. - - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = job; - i__3[1] = 1, a__1[1] = compz; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - ns = ilaenv_(&c__4, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, ( - ftnlen)2); -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = job; - i__3[1] = 1, a__1[1] = compz; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - maxb = ilaenv_(&c__8, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, ( - ftnlen)2); - if (ns <= 2 || ns > nh || maxb >= nh) { - -/* Use the standard double-shift algorithm */ - - dlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[ - 1], ilo, ihi, &z__[z_offset], ldz, info); - return 0; - } - maxb = max(3,maxb); -/* Computing MIN */ - i__1 = min(ns,maxb); - ns = min(i__1,15); - -/* - Now 2 < NS <= MAXB < NH. - - Set machine-dependent constants for the stopping criterion. - If norm(H) <= sqrt(OVFL), overflow should not occur. -*/ - - unfl = SAFEMINIMUM; - ovfl = 1. / unfl; - dlabad_(&unfl, &ovfl); - ulp = PRECISION; - smlnum = unfl * (nh / ulp); - -/* - I1 and I2 are the indices of the first row and last column of H - to which transformations must be applied. If eigenvalues only are - being computed, I1 and I2 are set inside the main loop. -*/ - - if (wantt) { - i1 = 1; - i2 = *n; - } - -/* ITN is the total number of multiple-shift QR iterations allowed. */ - - itn = nh * 30; - -/* - The main loop begins here. I is the loop index and decreases from - IHI to ILO in steps of at most MAXB. Each iteration of the loop - works with the active submatrix in rows and columns L to I. - Eigenvalues I+1 to IHI have already converged. Either L = ILO or - H(L,L-1) is negligible so that the matrix splits. -*/ - - i__ = *ihi; -L50: - l = *ilo; - if (i__ < *ilo) { - goto L170; - } - -/* - Perform multiple-shift QR iterations on rows and columns ILO to I - until a submatrix of order at most MAXB splits off at the bottom - because a subdiagonal element has become negligible. -*/ - - i__1 = itn; - for (its = 0; its <= i__1; ++its) { - -/* Look for a single small subdiagonal element. */ - - i__2 = l + 1; - for (k = i__; k >= i__2; --k) { - tst1 = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 = - h__[k + k * h_dim1], abs(d__2)); - if (tst1 == 0.) { - i__4 = i__ - l + 1; - tst1 = dlanhs_("1", &i__4, &h__[l + l * h_dim1], ldh, &work[1] - ); - } -/* Computing MAX */ - d__2 = ulp * tst1; - if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= max(d__2, - smlnum)) { - goto L70; - } -/* L60: */ - } -L70: - l = k; - if (l > *ilo) { - -/* H(L,L-1) is negligible. */ - - h__[l + (l - 1) * h_dim1] = 0.; - } - -/* Exit from loop if a submatrix of order <= MAXB has split off. */ - - if (l >= i__ - maxb + 1) { - goto L160; - } - -/* - Now the active submatrix is in rows and columns L to I. If - eigenvalues only are being computed, only the active submatrix - need be transformed. -*/ - - if (! wantt) { - i1 = l; - i2 = i__; - } - - if (its == 20 || its == 30) { - -/* Exceptional shifts. */ - - i__2 = i__; - for (ii = i__ - ns + 1; ii <= i__2; ++ii) { - wr[ii] = ((d__1 = h__[ii + (ii - 1) * h_dim1], abs(d__1)) + ( - d__2 = h__[ii + ii * h_dim1], abs(d__2))) * 1.5; - wi[ii] = 0.; -/* L80: */ - } - } else { - -/* Use eigenvalues of trailing submatrix of order NS as shifts. */ - - dlacpy_("Full", &ns, &ns, &h__[i__ - ns + 1 + (i__ - ns + 1) * - h_dim1], ldh, s, &c__15); - dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &wr[i__ - - ns + 1], &wi[i__ - ns + 1], &c__1, &ns, &z__[z_offset], - ldz, &ierr); - if (ierr > 0) { - -/* - If DLAHQR failed to compute all NS eigenvalues, use the - unconverged diagonal elements as the remaining shifts. -*/ - - i__2 = ierr; - for (ii = 1; ii <= i__2; ++ii) { - wr[i__ - ns + ii] = s[ii + ii * 15 - 16]; - wi[i__ - ns + ii] = 0.; -/* L90: */ - } - } - } - -/* - Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns)) - where G is the Hessenberg submatrix H(L:I,L:I) and w is - the vector of shifts (stored in WR and WI). The result is - stored in the local array V. -*/ - - v[0] = 1.; - i__2 = ns + 1; - for (ii = 2; ii <= i__2; ++ii) { - v[ii - 1] = 0.; -/* L100: */ - } - nv = 1; - i__2 = i__; - for (j = i__ - ns + 1; j <= i__2; ++j) { - if (wi[j] >= 0.) { - if (wi[j] == 0.) { - -/* real shift */ - - i__4 = nv + 1; - dcopy_(&i__4, v, &c__1, vv, &c__1); - i__4 = nv + 1; - d__1 = -wr[j]; - dgemv_("No transpose", &i__4, &nv, &c_b15, &h__[l + l * - h_dim1], ldh, vv, &c__1, &d__1, v, &c__1); - ++nv; - } else if (wi[j] > 0.) { - -/* complex conjugate pair of shifts */ - - i__4 = nv + 1; - dcopy_(&i__4, v, &c__1, vv, &c__1); - i__4 = nv + 1; - d__1 = wr[j] * -2.; - dgemv_("No transpose", &i__4, &nv, &c_b15, &h__[l + l * - h_dim1], ldh, v, &c__1, &d__1, vv, &c__1); - i__4 = nv + 1; - itemp = idamax_(&i__4, vv, &c__1); -/* Computing MAX */ - d__2 = (d__1 = vv[itemp - 1], abs(d__1)); - temp = 1. / max(d__2,smlnum); - i__4 = nv + 1; - dscal_(&i__4, &temp, vv, &c__1); - absw = dlapy2_(&wr[j], &wi[j]); - temp = temp * absw * absw; - i__4 = nv + 2; - i__5 = nv + 1; - dgemv_("No transpose", &i__4, &i__5, &c_b15, &h__[l + l * - h_dim1], ldh, vv, &c__1, &temp, v, &c__1); - nv += 2; - } - -/* - Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero, - reset it to the unit vector. -*/ - - itemp = idamax_(&nv, v, &c__1); - temp = (d__1 = v[itemp - 1], abs(d__1)); - if (temp == 0.) { - v[0] = 1.; - i__4 = nv; - for (ii = 2; ii <= i__4; ++ii) { - v[ii - 1] = 0.; -/* L110: */ - } - } else { - temp = max(temp,smlnum); - d__1 = 1. / temp; - dscal_(&nv, &d__1, v, &c__1); - } - } -/* L120: */ - } - -/* Multiple-shift QR step */ - - i__2 = i__ - 1; - for (k = l; k <= i__2; ++k) { - -/* - The first iteration of this loop determines a reflection G - from the vector V and applies it from left and right to H, - thus creating a nonzero bulge below the subdiagonal. - - Each subsequent iteration determines a reflection G to - restore the Hessenberg form in the (K-1)th column, and thus - chases the bulge one step toward the bottom of the active - submatrix. NR is the order of G. - - Computing MIN -*/ - i__4 = ns + 1, i__5 = i__ - k + 1; - nr = min(i__4,i__5); - if (k > l) { - dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1); - } - dlarfg_(&nr, v, &v[1], &c__1, &tau); - if (k > l) { - h__[k + (k - 1) * h_dim1] = v[0]; - i__4 = i__; - for (ii = k + 1; ii <= i__4; ++ii) { - h__[ii + (k - 1) * h_dim1] = 0.; -/* L130: */ - } - } - v[0] = 1.; - -/* - Apply G from the left to transform the rows of the matrix in - columns K to I2. -*/ - - i__4 = i2 - k + 1; - dlarfx_("Left", &nr, &i__4, v, &tau, &h__[k + k * h_dim1], ldh, & - work[1]); - -/* - Apply G from the right to transform the columns of the - matrix in rows I1 to min(K+NR,I). - - Computing MIN -*/ - i__5 = k + nr; - i__4 = min(i__5,i__) - i1 + 1; - dlarfx_("Right", &i__4, &nr, v, &tau, &h__[i1 + k * h_dim1], ldh, - &work[1]); - - if (wantz) { - -/* Accumulate transformations in the matrix Z */ - - dlarfx_("Right", &nh, &nr, v, &tau, &z__[*ilo + k * z_dim1], - ldz, &work[1]); - } -/* L140: */ - } - -/* L150: */ - } - -/* Failure to converge in remaining number of iterations */ - - *info = i__; - return 0; - -L160: - -/* - A submatrix of order <= MAXB in rows and columns L to I has split - off. Use the double-shift QR algorithm to handle it. -*/ - - dlahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &wr[1], &wi[1], - ilo, ihi, &z__[z_offset], ldz, info); - if (*info > 0) { - return 0; - } - -/* - Decrement number of remaining iterations, and return to start of - the main loop with a new value of I. -*/ - - itn -= its; - i__ = l - 1; - goto L50; - -L170: - work[1] = (doublereal) max(1,*n); - return 0; - -/* End of DHSEQR */ - -} /* dhseqr_ */ - -/* Subroutine */ int dlabad_(doublereal *small, doublereal *large) -{ - /* Builtin functions */ - double d_lg10(doublereal *), sqrt(doublereal); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLABAD takes as input the values computed by DLAMCH for underflow and - overflow, and returns the square root of each of these values if the - log of LARGE is sufficiently large. This subroutine is intended to - identify machines with a large exponent range, such as the Crays, and - redefine the underflow and overflow limits to be the square roots of - the values computed by DLAMCH. This subroutine is needed because - DLAMCH does not compensate for poor arithmetic in the upper half of - the exponent range, as is found on a Cray. - - Arguments - ========= - - SMALL (input/output) DOUBLE PRECISION - On entry, the underflow threshold as computed by DLAMCH. - On exit, if LOG10(LARGE) is sufficiently large, the square - root of SMALL, otherwise unchanged. - - LARGE (input/output) DOUBLE PRECISION - On entry, the overflow threshold as computed by DLAMCH. - On exit, if LOG10(LARGE) is sufficiently large, the square - root of LARGE, otherwise unchanged. - - ===================================================================== - - - If it looks like we're on a Cray, take the square root of - SMALL and LARGE to avoid overflow and underflow problems. -*/ - - if (d_lg10(large) > 2e3) { - *small = sqrt(*small); - *large = sqrt(*large); - } - - return 0; - -/* End of DLABAD */ - -} /* dlabad_ */ - -/* Subroutine */ int dlabrd_(integer *m, integer *n, integer *nb, doublereal * - a, integer *lda, doublereal *d__, doublereal *e, doublereal *tauq, - doublereal *taup, doublereal *x, integer *ldx, doublereal *y, integer - *ldy) -{ - /* System generated locals */ - integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2, - i__3; - - /* Local variables */ - static integer i__; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *), dgemv_(char *, integer *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *), dlarfg_(integer *, doublereal *, - doublereal *, integer *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DLABRD reduces the first NB rows and columns of a real general - m by n matrix A to upper or lower bidiagonal form by an orthogonal - transformation Q' * A * P, and returns the matrices X and Y which - are needed to apply the transformation to the unreduced part of A. - - If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower - bidiagonal form. - - This is an auxiliary routine called by DGEBRD - - Arguments - ========= - - M (input) INTEGER - The number of rows in the matrix A. - - N (input) INTEGER - The number of columns in the matrix A. - - NB (input) INTEGER - The number of leading rows and columns of A to be reduced. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the m by n general matrix to be reduced. - On exit, the first NB rows and columns of the matrix are - overwritten; the rest of the array is unchanged. - If m >= n, elements on and below the diagonal in the first NB - columns, with the array TAUQ, represent the orthogonal - matrix Q as a product of elementary reflectors; and - elements above the diagonal in the first NB rows, with the - array TAUP, represent the orthogonal matrix P as a product - of elementary reflectors. - If m < n, elements below the diagonal in the first NB - columns, with the array TAUQ, represent the orthogonal - matrix Q as a product of elementary reflectors, and - elements on and above the diagonal in the first NB rows, - with the array TAUP, represent the orthogonal matrix P as - a product of elementary reflectors. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - D (output) DOUBLE PRECISION array, dimension (NB) - The diagonal elements of the first NB rows and columns of - the reduced matrix. D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (NB) - The off-diagonal elements of the first NB rows and columns of - the reduced matrix. - - TAUQ (output) DOUBLE PRECISION array dimension (NB) - The scalar factors of the elementary reflectors which - represent the orthogonal matrix Q. See Further Details. - - TAUP (output) DOUBLE PRECISION array, dimension (NB) - The scalar factors of the elementary reflectors which - represent the orthogonal matrix P. See Further Details. - - X (output) DOUBLE PRECISION array, dimension (LDX,NB) - The m-by-nb matrix X required to update the unreduced part - of A. - - LDX (input) INTEGER - The leading dimension of the array X. LDX >= M. - - Y (output) DOUBLE PRECISION array, dimension (LDY,NB) - The n-by-nb matrix Y required to update the unreduced part - of A. - - LDY (output) INTEGER - The leading dimension of the array Y. LDY >= N. - - Further Details - =============== - - The matrices Q and P are represented as products of elementary - reflectors: - - Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are real scalars, and v and u are real vectors. - - If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in - A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in - A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). - - If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in - A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in - A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). - - The elements of the vectors v and u together form the m-by-nb matrix - V and the nb-by-n matrix U' which are needed, with X and Y, to apply - the transformation to the unreduced part of the matrix, using a block - update of the form: A := A - V*Y' - X*U'. - - The contents of A on exit are illustrated by the following examples - with nb = 2: - - m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): - - ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 ) - ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 ) - ( v1 v2 a a a ) ( v1 1 a a a a ) - ( v1 v2 a a a ) ( v1 v2 a a a a ) - ( v1 v2 a a a ) ( v1 v2 a a a a ) - ( v1 v2 a a a ) - - where a denotes an element of the original matrix which is unchanged, - vi denotes an element of the vector defining H(i), and ui an element - of the vector defining G(i). - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tauq; - --taup; - x_dim1 = *ldx; - x_offset = 1 + x_dim1 * 1; - x -= x_offset; - y_dim1 = *ldy; - y_offset = 1 + y_dim1 * 1; - y -= y_offset; - - /* Function Body */ - if (*m <= 0 || *n <= 0) { - return 0; - } - - if (*m >= *n) { - -/* Reduce to upper bidiagonal form */ - - i__1 = *nb; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Update A(i:m,i) */ - - i__2 = *m - i__ + 1; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1], - lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ + i__ * a_dim1] - , &c__1); - i__2 = *m - i__ + 1; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + x_dim1], - ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[i__ + i__ * - a_dim1], &c__1); - -/* Generate reflection Q(i) to annihilate A(i+1:m,i) */ - - i__2 = *m - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * - a_dim1], &c__1, &tauq[i__]); - d__[i__] = a[i__ + i__ * a_dim1]; - if (i__ < *n) { - a[i__ + i__ * a_dim1] = 1.; - -/* Compute Y(i+1:n,i) */ - - i__2 = *m - i__ + 1; - i__3 = *n - i__; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + (i__ + 1) * - a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29, - &y[i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *m - i__ + 1; - i__3 = i__ - 1; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + a_dim1], - lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ * - y_dim1 + 1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 + - y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[ - i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *m - i__ + 1; - i__3 = i__ - 1; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &x[i__ + x_dim1], - ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ * - y_dim1 + 1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) * - a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15, - &y[i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *n - i__; - dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1); - -/* Update A(i,i+1:n) */ - - i__2 = *n - i__; - dgemv_("No transpose", &i__2, &i__, &c_b151, &y[i__ + 1 + - y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + - (i__ + 1) * a_dim1], lda); - i__2 = i__ - 1; - i__3 = *n - i__; - dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) * - a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b15, &a[ - i__ + (i__ + 1) * a_dim1], lda); - -/* Generate reflection P(i) to annihilate A(i,i+2:n) */ - - i__2 = *n - i__; -/* Computing MIN */ - i__3 = i__ + 2; - dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min( - i__3,*n) * a_dim1], lda, &taup[i__]); - e[i__] = a[i__ + (i__ + 1) * a_dim1]; - a[i__ + (i__ + 1) * a_dim1] = 1.; - -/* Compute X(i+1:m,i) */ - - i__2 = *m - i__; - i__3 = *n - i__; - dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + ( - i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1], - lda, &c_b29, &x[i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *n - i__; - dgemv_("Transpose", &i__2, &i__, &c_b15, &y[i__ + 1 + y_dim1], - ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b29, &x[ - i__ * x_dim1 + 1], &c__1); - i__2 = *m - i__; - dgemv_("No transpose", &i__2, &i__, &c_b151, &a[i__ + 1 + - a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ - i__ + 1 + i__ * x_dim1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) * - a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, & - c_b29, &x[i__ * x_dim1 + 1], &c__1); - i__2 = *m - i__; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 + - x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ - i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *m - i__; - dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1); - } -/* L10: */ - } - } else { - -/* Reduce to lower bidiagonal form */ - - i__1 = *nb; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Update A(i,i:n) */ - - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + y_dim1], - ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1] - , lda); - i__2 = i__ - 1; - i__3 = *n - i__ + 1; - dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[i__ * a_dim1 + 1], - lda, &x[i__ + x_dim1], ldx, &c_b15, &a[i__ + i__ * a_dim1] - , lda); - -/* Generate reflection P(i) to annihilate A(i,i+1:n) */ - - i__2 = *n - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * - a_dim1], lda, &taup[i__]); - d__[i__] = a[i__ + i__ * a_dim1]; - if (i__ < *m) { - a[i__ + i__ * a_dim1] = 1.; - -/* Compute X(i+1:m,i) */ - - i__2 = *m - i__; - i__3 = *n - i__ + 1; - dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + i__ - * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, & - x[i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &y[i__ + y_dim1], - ldy, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[i__ * - x_dim1 + 1], &c__1); - i__2 = *m - i__; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + - a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ - i__ + 1 + i__ * x_dim1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__ + 1; - dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ * a_dim1 - + 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[ - i__ * x_dim1 + 1], &c__1); - i__2 = *m - i__; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 + - x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ - i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *m - i__; - dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1); - -/* Update A(i+1:m,i) */ - - i__2 = *m - i__; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + - a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ + - 1 + i__ * a_dim1], &c__1); - i__2 = *m - i__; - dgemv_("No transpose", &i__2, &i__, &c_b151, &x[i__ + 1 + - x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[ - i__ + 1 + i__ * a_dim1], &c__1); - -/* Generate reflection Q(i) to annihilate A(i+2:m,i) */ - - i__2 = *m - i__; -/* Computing MIN */ - i__3 = i__ + 2; - dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) + - i__ * a_dim1], &c__1, &tauq[i__]); - e[i__] = a[i__ + 1 + i__ * a_dim1]; - a[i__ + 1 + i__ * a_dim1] = 1.; - -/* Compute Y(i+1:n,i) */ - - i__2 = *m - i__; - i__3 = *n - i__; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + (i__ + - 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, - &c_b29, &y[i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *m - i__; - i__3 = i__ - 1; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1] - , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[ - i__ * y_dim1 + 1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 + - y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[ - i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *m - i__; - dgemv_("Transpose", &i__2, &i__, &c_b15, &x[i__ + 1 + x_dim1], - ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[ - i__ * y_dim1 + 1], &c__1); - i__2 = *n - i__; - dgemv_("Transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) * - a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15, - &y[i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *n - i__; - dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1); - } -/* L20: */ - } - } - return 0; - -/* End of DLABRD */ - -} /* dlabrd_ */ - -/* Subroutine */ int dlacpy_(char *uplo, integer *m, integer *n, doublereal * - a, integer *lda, doublereal *b, integer *ldb) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j; - extern logical lsame_(char *, char *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DLACPY copies all or part of a two-dimensional matrix A to another - matrix B. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies the part of the matrix A to be copied to B. - = 'U': Upper triangular part - = 'L': Lower triangular part - Otherwise: All of the matrix A - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA,N) - The m by n matrix A. If UPLO = 'U', only the upper triangle - or trapezoid is accessed; if UPLO = 'L', only the lower - triangle or trapezoid is accessed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - B (output) DOUBLE PRECISION array, dimension (LDB,N) - On exit, B = A in the locations specified by UPLO. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,M). - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = min(j,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; -/* L10: */ - } -/* L20: */ - } - } else if (lsame_(uplo, "L")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = j; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; -/* L30: */ - } -/* L40: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; -/* L50: */ - } -/* L60: */ - } - } - return 0; - -/* End of DLACPY */ - -} /* dlacpy_ */ - -/* Subroutine */ int dladiv_(doublereal *a, doublereal *b, doublereal *c__, - doublereal *d__, doublereal *p, doublereal *q) -{ - static doublereal e, f; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLADIV performs complex division in real arithmetic - - a + i*b - p + i*q = --------- - c + i*d - - The algorithm is due to Robert L. Smith and can be found - in D. Knuth, The art of Computer Programming, Vol.2, p.195 - - Arguments - ========= - - A (input) DOUBLE PRECISION - B (input) DOUBLE PRECISION - C (input) DOUBLE PRECISION - D (input) DOUBLE PRECISION - The scalars a, b, c, and d in the above expression. - - P (output) DOUBLE PRECISION - Q (output) DOUBLE PRECISION - The scalars p and q in the above expression. - - ===================================================================== -*/ - - - if (abs(*d__) < abs(*c__)) { - e = *d__ / *c__; - f = *c__ + *d__ * e; - *p = (*a + *b * e) / f; - *q = (*b - *a * e) / f; - } else { - e = *c__ / *d__; - f = *d__ + *c__ * e; - *p = (*b + *a * e) / f; - *q = (-(*a) + *b * e) / f; - } - - return 0; - -/* End of DLADIV */ - -} /* dladiv_ */ - -/* Subroutine */ int dlae2_(doublereal *a, doublereal *b, doublereal *c__, - doublereal *rt1, doublereal *rt2) -{ - /* System generated locals */ - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal ab, df, tb, sm, rt, adf, acmn, acmx; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix - [ A B ] - [ B C ]. - On return, RT1 is the eigenvalue of larger absolute value, and RT2 - is the eigenvalue of smaller absolute value. - - Arguments - ========= - - A (input) DOUBLE PRECISION - The (1,1) element of the 2-by-2 matrix. - - B (input) DOUBLE PRECISION - The (1,2) and (2,1) elements of the 2-by-2 matrix. - - C (input) DOUBLE PRECISION - The (2,2) element of the 2-by-2 matrix. - - RT1 (output) DOUBLE PRECISION - The eigenvalue of larger absolute value. - - RT2 (output) DOUBLE PRECISION - The eigenvalue of smaller absolute value. - - Further Details - =============== - - RT1 is accurate to a few ulps barring over/underflow. - - RT2 may be inaccurate if there is massive cancellation in the - determinant A*C-B*B; higher precision or correctly rounded or - correctly truncated arithmetic would be needed to compute RT2 - accurately in all cases. - - Overflow is possible only if RT1 is within a factor of 5 of overflow. - Underflow is harmless if the input data is 0 or exceeds - underflow_threshold / macheps. - - ===================================================================== - - - Compute the eigenvalues -*/ - - sm = *a + *c__; - df = *a - *c__; - adf = abs(df); - tb = *b + *b; - ab = abs(tb); - if (abs(*a) > abs(*c__)) { - acmx = *a; - acmn = *c__; - } else { - acmx = *c__; - acmn = *a; - } - if (adf > ab) { -/* Computing 2nd power */ - d__1 = ab / adf; - rt = adf * sqrt(d__1 * d__1 + 1.); - } else if (adf < ab) { -/* Computing 2nd power */ - d__1 = adf / ab; - rt = ab * sqrt(d__1 * d__1 + 1.); - } else { - -/* Includes case AB=ADF=0 */ - - rt = ab * sqrt(2.); - } - if (sm < 0.) { - *rt1 = (sm - rt) * .5; - -/* - Order of execution important. - To get fully accurate smaller eigenvalue, - next line needs to be executed in higher precision. -*/ - - *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; - } else if (sm > 0.) { - *rt1 = (sm + rt) * .5; - -/* - Order of execution important. - To get fully accurate smaller eigenvalue, - next line needs to be executed in higher precision. -*/ - - *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; - } else { - -/* Includes case RT1 = RT2 = 0 */ - - *rt1 = rt * .5; - *rt2 = rt * -.5; - } - return 0; - -/* End of DLAE2 */ - -} /* dlae2_ */ - -/* Subroutine */ int dlaed0_(integer *icompq, integer *qsiz, integer *n, - doublereal *d__, doublereal *e, doublereal *q, integer *ldq, - doublereal *qstore, integer *ldqs, doublereal *work, integer *iwork, - integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2; - doublereal d__1; - - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - - /* Local variables */ - static integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2; - static doublereal temp; - static integer curr; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer iperm; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer indxq, iwrem; - extern /* Subroutine */ int dlaed1_(integer *, doublereal *, doublereal *, - integer *, integer *, doublereal *, integer *, doublereal *, - integer *, integer *); - static integer iqptr; - extern /* Subroutine */ int dlaed7_(integer *, integer *, integer *, - integer *, integer *, integer *, doublereal *, doublereal *, - integer *, integer *, doublereal *, integer *, doublereal *, - integer *, integer *, integer *, integer *, integer *, doublereal - *, doublereal *, integer *, integer *); - static integer tlvls; - extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *); - static integer igivcl; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer igivnm, submat, curprb, subpbs, igivpt; - extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, integer *); - static integer curlvl, matsiz, iprmpt, smlsiz; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLAED0 computes all eigenvalues and corresponding eigenvectors of a - symmetric tridiagonal matrix using the divide and conquer method. - - Arguments - ========= - - ICOMPQ (input) INTEGER - = 0: Compute eigenvalues only. - = 1: Compute eigenvectors of original dense symmetric matrix - also. On entry, Q contains the orthogonal matrix used - to reduce the original matrix to tridiagonal form. - = 2: Compute eigenvalues and eigenvectors of tridiagonal - matrix. - - QSIZ (input) INTEGER - The dimension of the orthogonal matrix used to reduce - the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the main diagonal of the tridiagonal matrix. - On exit, its eigenvalues. - - E (input) DOUBLE PRECISION array, dimension (N-1) - The off-diagonal elements of the tridiagonal matrix. - On exit, E has been destroyed. - - Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) - On entry, Q must contain an N-by-N orthogonal matrix. - If ICOMPQ = 0 Q is not referenced. - If ICOMPQ = 1 On entry, Q is a subset of the columns of the - orthogonal matrix used to reduce the full - matrix to tridiagonal form corresponding to - the subset of the full matrix which is being - decomposed at this time. - If ICOMPQ = 2 On entry, Q will be the identity matrix. - On exit, Q contains the eigenvectors of the - tridiagonal matrix. - - LDQ (input) INTEGER - The leading dimension of the array Q. If eigenvectors are - desired, then LDQ >= max(1,N). In any case, LDQ >= 1. - - QSTORE (workspace) DOUBLE PRECISION array, dimension (LDQS, N) - Referenced only when ICOMPQ = 1. Used to store parts of - the eigenvector matrix when the updating matrix multiplies - take place. - - LDQS (input) INTEGER - The leading dimension of the array QSTORE. If ICOMPQ = 1, - then LDQS >= max(1,N). In any case, LDQS >= 1. - - WORK (workspace) DOUBLE PRECISION array, - If ICOMPQ = 0 or 1, the dimension of WORK must be at least - 1 + 3*N + 2*N*lg N + 2*N**2 - ( lg( N ) = smallest integer k - such that 2^k >= N ) - If ICOMPQ = 2, the dimension of WORK must be at least - 4*N + N**2. - - IWORK (workspace) INTEGER array, - If ICOMPQ = 0 or 1, the dimension of IWORK must be at least - 6 + 6*N + 5*N*lg N. - ( lg( N ) = smallest integer k - such that 2^k >= N ) - If ICOMPQ = 2, the dimension of IWORK must be at least - 3 + 5*N. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: The algorithm failed to compute an eigenvalue while - working on the submatrix lying in rows and columns - INFO/(N+1) through mod(INFO,N+1). - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - qstore_dim1 = *ldqs; - qstore_offset = 1 + qstore_dim1 * 1; - qstore -= qstore_offset; - --work; - --iwork; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 2) { - *info = -1; - } else if ((*icompq == 1 && *qsiz < max(0,*n))) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*ldq < max(1,*n)) { - *info = -7; - } else if (*ldqs < max(1,*n)) { - *info = -9; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLAED0", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - smlsiz = ilaenv_(&c__9, "DLAED0", " ", &c__0, &c__0, &c__0, &c__0, ( - ftnlen)6, (ftnlen)1); - -/* - Determine the size and placement of the submatrices, and save in - the leading elements of IWORK. -*/ - - iwork[1] = *n; - subpbs = 1; - tlvls = 0; -L10: - if (iwork[subpbs] > smlsiz) { - for (j = subpbs; j >= 1; --j) { - iwork[j * 2] = (iwork[j] + 1) / 2; - iwork[((j) << (1)) - 1] = iwork[j] / 2; -/* L20: */ - } - ++tlvls; - subpbs <<= 1; - goto L10; - } - i__1 = subpbs; - for (j = 2; j <= i__1; ++j) { - iwork[j] += iwork[j - 1]; -/* L30: */ - } - -/* - Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1 - using rank-1 modifications (cuts). -*/ - - spm1 = subpbs - 1; - i__1 = spm1; - for (i__ = 1; i__ <= i__1; ++i__) { - submat = iwork[i__] + 1; - smm1 = submat - 1; - d__[smm1] -= (d__1 = e[smm1], abs(d__1)); - d__[submat] -= (d__1 = e[smm1], abs(d__1)); -/* L40: */ - } - - indxq = ((*n) << (2)) + 3; - if (*icompq != 2) { - -/* - Set up workspaces for eigenvalues only/accumulate new vectors - routine -*/ - - temp = log((doublereal) (*n)) / log(2.); - lgn = (integer) temp; - if (pow_ii(&c__2, &lgn) < *n) { - ++lgn; - } - if (pow_ii(&c__2, &lgn) < *n) { - ++lgn; - } - iprmpt = indxq + *n + 1; - iperm = iprmpt + *n * lgn; - iqptr = iperm + *n * lgn; - igivpt = iqptr + *n + 2; - igivcl = igivpt + *n * lgn; - - igivnm = 1; - iq = igivnm + ((*n) << (1)) * lgn; -/* Computing 2nd power */ - i__1 = *n; - iwrem = iq + i__1 * i__1 + 1; - -/* Initialize pointers */ - - i__1 = subpbs; - for (i__ = 0; i__ <= i__1; ++i__) { - iwork[iprmpt + i__] = 1; - iwork[igivpt + i__] = 1; -/* L50: */ - } - iwork[iqptr] = 1; - } - -/* - Solve each submatrix eigenproblem at the bottom of the divide and - conquer tree. -*/ - - curr = 0; - i__1 = spm1; - for (i__ = 0; i__ <= i__1; ++i__) { - if (i__ == 0) { - submat = 1; - matsiz = iwork[1]; - } else { - submat = iwork[i__] + 1; - matsiz = iwork[i__ + 1] - iwork[i__]; - } - if (*icompq == 2) { - dsteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat + - submat * q_dim1], ldq, &work[1], info); - if (*info != 0) { - goto L130; - } - } else { - dsteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 + - iwork[iqptr + curr]], &matsiz, &work[1], info); - if (*info != 0) { - goto L130; - } - if (*icompq == 1) { - dgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b15, &q[submat * - q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]], - &matsiz, &c_b29, &qstore[submat * qstore_dim1 + 1], - ldqs); - } -/* Computing 2nd power */ - i__2 = matsiz; - iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2; - ++curr; - } - k = 1; - i__2 = iwork[i__ + 1]; - for (j = submat; j <= i__2; ++j) { - iwork[indxq + j] = k; - ++k; -/* L60: */ - } -/* L70: */ - } - -/* - Successively merge eigensystems of adjacent submatrices - into eigensystem for the corresponding larger matrix. - - while ( SUBPBS > 1 ) -*/ - - curlvl = 1; -L80: - if (subpbs > 1) { - spm2 = subpbs - 2; - i__1 = spm2; - for (i__ = 0; i__ <= i__1; i__ += 2) { - if (i__ == 0) { - submat = 1; - matsiz = iwork[2]; - msd2 = iwork[1]; - curprb = 0; - } else { - submat = iwork[i__] + 1; - matsiz = iwork[i__ + 2] - iwork[i__]; - msd2 = matsiz / 2; - ++curprb; - } - -/* - Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2) - into an eigensystem of size MATSIZ. - DLAED1 is used only for the full eigensystem of a tridiagonal - matrix. - DLAED7 handles the cases in which eigenvalues only or eigenvalues - and eigenvectors of a full symmetric matrix (which was reduced to - tridiagonal form) are desired. -*/ - - if (*icompq == 2) { - dlaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1], - ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], & - msd2, &work[1], &iwork[subpbs + 1], info); - } else { - dlaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[ - submat], &qstore[submat * qstore_dim1 + 1], ldqs, & - iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, & - work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm] - , &iwork[igivpt], &iwork[igivcl], &work[igivnm], & - work[iwrem], &iwork[subpbs + 1], info); - } - if (*info != 0) { - goto L130; - } - iwork[i__ / 2 + 1] = iwork[i__ + 2]; -/* L90: */ - } - subpbs /= 2; - ++curlvl; - goto L80; - } - -/* - end while - - Re-merge the eigenvalues/vectors which were deflated at the final - merge step. -*/ - - if (*icompq == 1) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - j = iwork[indxq + i__]; - work[i__] = d__[j]; - dcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1 - + 1], &c__1); -/* L100: */ - } - dcopy_(n, &work[1], &c__1, &d__[1], &c__1); - } else if (*icompq == 2) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - j = iwork[indxq + i__]; - work[i__] = d__[j]; - dcopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1); -/* L110: */ - } - dcopy_(n, &work[1], &c__1, &d__[1], &c__1); - dlacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq); - } else { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - j = iwork[indxq + i__]; - work[i__] = d__[j]; -/* L120: */ - } - dcopy_(n, &work[1], &c__1, &d__[1], &c__1); - } - goto L140; - -L130: - *info = submat * (*n + 1) + submat + matsiz - 1; - -L140: - return 0; - -/* End of DLAED0 */ - -} /* dlaed0_ */ - -/* Subroutine */ int dlaed1_(integer *n, doublereal *d__, doublereal *q, - integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt, - doublereal *work, integer *iwork, integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, i__1, i__2; - - /* Local variables */ - static integer i__, k, n1, n2, is, iw, iz, iq2, zpp1, indx, indxc; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer indxp; - extern /* Subroutine */ int dlaed2_(integer *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *, - integer *, integer *, integer *, integer *), dlaed3_(integer *, - integer *, integer *, doublereal *, doublereal *, integer *, - doublereal *, doublereal *, doublereal *, integer *, integer *, - doublereal *, doublereal *, integer *); - static integer idlmda; - extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, - integer *, integer *, integer *), xerbla_(char *, integer *); - static integer coltyp; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLAED1 computes the updated eigensystem of a diagonal - matrix after modification by a rank-one symmetric matrix. This - routine is used only for the eigenproblem which requires all - eigenvalues and eigenvectors of a tridiagonal matrix. DLAED7 handles - the case in which eigenvalues only or eigenvalues and eigenvectors - of a full symmetric matrix (which was reduced to tridiagonal form) - are desired. - - T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) - - where Z = Q'u, u is a vector of length N with ones in the - CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. - - The eigenvectors of the original matrix are stored in Q, and the - eigenvalues are in D. The algorithm consists of three stages: - - The first stage consists of deflating the size of the problem - when there are multiple eigenvalues or if there is a zero in - the Z vector. For each such occurence the dimension of the - secular equation problem is reduced by one. This stage is - performed by the routine DLAED2. - - The second stage consists of calculating the updated - eigenvalues. This is done by finding the roots of the secular - equation via the routine DLAED4 (as called by DLAED3). - This routine also calculates the eigenvectors of the current - problem. - - The final stage consists of computing the updated eigenvectors - directly using the updated eigenvalues. The eigenvectors for - the current problem are multiplied with the eigenvectors from - the overall problem. - - Arguments - ========= - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the eigenvalues of the rank-1-perturbed matrix. - On exit, the eigenvalues of the repaired matrix. - - Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) - On entry, the eigenvectors of the rank-1-perturbed matrix. - On exit, the eigenvectors of the repaired tridiagonal matrix. - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max(1,N). - - INDXQ (input/output) INTEGER array, dimension (N) - On entry, the permutation which separately sorts the two - subproblems in D into ascending order. - On exit, the permutation which will reintegrate the - subproblems back into sorted order, - i.e. D( INDXQ( I = 1, N ) ) will be in ascending order. - - RHO (input) DOUBLE PRECISION - The subdiagonal entry used to create the rank-1 modification. - - CUTPNT (input) INTEGER - The location of the last eigenvalue in the leading sub-matrix. - min(1,N) <= CUTPNT <= N/2. - - WORK (workspace) DOUBLE PRECISION array, dimension (4*N + N**2) - - IWORK (workspace) INTEGER array, dimension (4*N) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an eigenvalue did not converge - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - Modified by Francoise Tisseur, University of Tennessee. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --indxq; - --work; - --iwork; - - /* Function Body */ - *info = 0; - - if (*n < 0) { - *info = -1; - } else if (*ldq < max(1,*n)) { - *info = -4; - } else /* if(complicated condition) */ { -/* Computing MIN */ - i__1 = 1, i__2 = *n / 2; - if (min(i__1,i__2) > *cutpnt || *n / 2 < *cutpnt) { - *info = -7; - } - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLAED1", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* - The following values are integer pointers which indicate - the portion of the workspace - used by a particular array in DLAED2 and DLAED3. -*/ - - iz = 1; - idlmda = iz + *n; - iw = idlmda + *n; - iq2 = iw + *n; - - indx = 1; - indxc = indx + *n; - coltyp = indxc + *n; - indxp = coltyp + *n; - - -/* - Form the z-vector which consists of the last row of Q_1 and the - first row of Q_2. -*/ - - dcopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1); - zpp1 = *cutpnt + 1; - i__1 = *n - *cutpnt; - dcopy_(&i__1, &q[zpp1 + zpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1); - -/* Deflate eigenvalues. */ - - dlaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[ - iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[ - indxc], &iwork[indxp], &iwork[coltyp], info); - - if (*info != 0) { - goto L20; - } - -/* Solve Secular Equation. */ - - if (k != 0) { - is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp + - 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2; - dlaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda], - &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[ - is], info); - if (*info != 0) { - goto L20; - } - -/* Prepare the INDXQ sorting permutation. */ - - n1 = k; - n2 = *n - k; - dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]); - } else { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - indxq[i__] = i__; -/* L10: */ - } - } - -L20: - return 0; - -/* End of DLAED1 */ - -} /* dlaed1_ */ - -/* Subroutine */ int dlaed2_(integer *k, integer *n, integer *n1, doublereal * - d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, - doublereal *z__, doublereal *dlamda, doublereal *w, doublereal *q2, - integer *indx, integer *indxc, integer *indxp, integer *coltyp, - integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, i__1, i__2; - doublereal d__1, d__2, d__3, d__4; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal c__; - static integer i__, j; - static doublereal s, t; - static integer k2, n2, ct, nj, pj, js, iq1, iq2, n1p1; - static doublereal eps, tau, tol; - static integer psm[4], imax, jmax; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *); - static integer ctot[4]; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *), dcopy_(integer *, doublereal *, integer *, doublereal - *, integer *); - - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, - integer *, integer *, integer *), dlacpy_(char *, integer *, - integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DLAED2 merges the two sets of eigenvalues together into a single - sorted set. Then it tries to deflate the size of the problem. - There are two ways in which deflation can occur: when two or more - eigenvalues are close together or if there is a tiny entry in the - Z vector. For each such occurrence the order of the related secular - equation problem is reduced by one. - - Arguments - ========= - - K (output) INTEGER - The number of non-deflated eigenvalues, and the order of the - related secular equation. 0 <= K <=N. - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - N1 (input) INTEGER - The location of the last eigenvalue in the leading sub-matrix. - min(1,N) <= N1 <= N/2. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, D contains the eigenvalues of the two submatrices to - be combined. - On exit, D contains the trailing (N-K) updated eigenvalues - (those which were deflated) sorted into increasing order. - - Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) - On entry, Q contains the eigenvectors of two submatrices in - the two square blocks with corners at (1,1), (N1,N1) - and (N1+1, N1+1), (N,N). - On exit, Q contains the trailing (N-K) updated eigenvectors - (those which were deflated) in its last N-K columns. - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max(1,N). - - INDXQ (input/output) INTEGER array, dimension (N) - The permutation which separately sorts the two sub-problems - in D into ascending order. Note that elements in the second - half of this permutation must first have N1 added to their - values. Destroyed on exit. - - RHO (input/output) DOUBLE PRECISION - On entry, the off-diagonal element associated with the rank-1 - cut which originally split the two submatrices which are now - being recombined. - On exit, RHO has been modified to the value required by - DLAED3. - - Z (input) DOUBLE PRECISION array, dimension (N) - On entry, Z contains the updating vector (the last - row of the first sub-eigenvector matrix and the first row of - the second sub-eigenvector matrix). - On exit, the contents of Z have been destroyed by the updating - process. - - DLAMDA (output) DOUBLE PRECISION array, dimension (N) - A copy of the first K eigenvalues which will be used by - DLAED3 to form the secular equation. - - W (output) DOUBLE PRECISION array, dimension (N) - The first k values of the final deflation-altered z-vector - which will be passed to DLAED3. - - Q2 (output) DOUBLE PRECISION array, dimension (N1**2+(N-N1)**2) - A copy of the first K eigenvectors which will be used by - DLAED3 in a matrix multiply (DGEMM) to solve for the new - eigenvectors. - - INDX (workspace) INTEGER array, dimension (N) - The permutation used to sort the contents of DLAMDA into - ascending order. - - INDXC (output) INTEGER array, dimension (N) - The permutation used to arrange the columns of the deflated - Q matrix into three groups: the first group contains non-zero - elements only at and above N1, the second contains - non-zero elements only below N1, and the third is dense. - - INDXP (workspace) INTEGER array, dimension (N) - The permutation used to place deflated values of D at the end - of the array. INDXP(1:K) points to the nondeflated D-values - and INDXP(K+1:N) points to the deflated eigenvalues. - - COLTYP (workspace/output) INTEGER array, dimension (N) - During execution, a label which will indicate which of the - following types a column in the Q2 matrix is: - 1 : non-zero in the upper half only; - 2 : dense; - 3 : non-zero in the lower half only; - 4 : deflated. - On exit, COLTYP(i) is the number of columns of type i, - for i=1 to 4 only. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - Modified by Francoise Tisseur, University of Tennessee. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --indxq; - --z__; - --dlamda; - --w; - --q2; - --indx; - --indxc; - --indxp; - --coltyp; - - /* Function Body */ - *info = 0; - - if (*n < 0) { - *info = -2; - } else if (*ldq < max(1,*n)) { - *info = -6; - } else /* if(complicated condition) */ { -/* Computing MIN */ - i__1 = 1, i__2 = *n / 2; - if (min(i__1,i__2) > *n1 || *n / 2 < *n1) { - *info = -3; - } - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLAED2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - n2 = *n - *n1; - n1p1 = *n1 + 1; - - if (*rho < 0.) { - dscal_(&n2, &c_b151, &z__[n1p1], &c__1); - } - -/* - Normalize z so that norm(z) = 1. Since z is the concatenation of - two normalized vectors, norm2(z) = sqrt(2). -*/ - - t = 1. / sqrt(2.); - dscal_(n, &t, &z__[1], &c__1); - -/* RHO = ABS( norm(z)**2 * RHO ) */ - - *rho = (d__1 = *rho * 2., abs(d__1)); - -/* Sort the eigenvalues into increasing order */ - - i__1 = *n; - for (i__ = n1p1; i__ <= i__1; ++i__) { - indxq[i__] += *n1; -/* L10: */ - } - -/* re-integrate the deflated parts from the last pass */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dlamda[i__] = d__[indxq[i__]]; -/* L20: */ - } - dlamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]); - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - indx[i__] = indxq[indxc[i__]]; -/* L30: */ - } - -/* Calculate the allowable deflation tolerance */ - - imax = idamax_(n, &z__[1], &c__1); - jmax = idamax_(n, &d__[1], &c__1); - eps = EPSILON; -/* Computing MAX */ - d__3 = (d__1 = d__[jmax], abs(d__1)), d__4 = (d__2 = z__[imax], abs(d__2)) - ; - tol = eps * 8. * max(d__3,d__4); - -/* - If the rank-1 modifier is small enough, no more needs to be done - except to reorganize Q so that its columns correspond with the - elements in D. -*/ - - if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) { - *k = 0; - iq2 = 1; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__ = indx[j]; - dcopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1); - dlamda[j] = d__[i__]; - iq2 += *n; -/* L40: */ - } - dlacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq); - dcopy_(n, &dlamda[1], &c__1, &d__[1], &c__1); - goto L190; - } - -/* - If there are multiple eigenvalues then the problem deflates. Here - the number of equal eigenvalues are found. As each equal - eigenvalue is found, an elementary reflector is computed to rotate - the corresponding eigensubspace so that the corresponding - components of Z are zero in this new basis. -*/ - - i__1 = *n1; - for (i__ = 1; i__ <= i__1; ++i__) { - coltyp[i__] = 1; -/* L50: */ - } - i__1 = *n; - for (i__ = n1p1; i__ <= i__1; ++i__) { - coltyp[i__] = 3; -/* L60: */ - } - - - *k = 0; - k2 = *n + 1; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - nj = indx[j]; - if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - coltyp[nj] = 4; - indxp[k2] = nj; - if (j == *n) { - goto L100; - } - } else { - pj = nj; - goto L80; - } -/* L70: */ - } -L80: - ++j; - nj = indx[j]; - if (j > *n) { - goto L100; - } - if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - coltyp[nj] = 4; - indxp[k2] = nj; - } else { - -/* Check if eigenvalues are close enough to allow deflation. */ - - s = z__[pj]; - c__ = z__[nj]; - -/* - Find sqrt(a**2+b**2) without overflow or - destructive underflow. -*/ - - tau = dlapy2_(&c__, &s); - t = d__[nj] - d__[pj]; - c__ /= tau; - s = -s / tau; - if ((d__1 = t * c__ * s, abs(d__1)) <= tol) { - -/* Deflation is possible. */ - - z__[nj] = tau; - z__[pj] = 0.; - if (coltyp[nj] != coltyp[pj]) { - coltyp[nj] = 2; - } - coltyp[pj] = 4; - drot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, & - c__, &s); -/* Computing 2nd power */ - d__1 = c__; -/* Computing 2nd power */ - d__2 = s; - t = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2); -/* Computing 2nd power */ - d__1 = s; -/* Computing 2nd power */ - d__2 = c__; - d__[nj] = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2); - d__[pj] = t; - --k2; - i__ = 1; -L90: - if (k2 + i__ <= *n) { - if (d__[pj] < d__[indxp[k2 + i__]]) { - indxp[k2 + i__ - 1] = indxp[k2 + i__]; - indxp[k2 + i__] = pj; - ++i__; - goto L90; - } else { - indxp[k2 + i__ - 1] = pj; - } - } else { - indxp[k2 + i__ - 1] = pj; - } - pj = nj; - } else { - ++(*k); - dlamda[*k] = d__[pj]; - w[*k] = z__[pj]; - indxp[*k] = pj; - pj = nj; - } - } - goto L80; -L100: - -/* Record the last eigenvalue. */ - - ++(*k); - dlamda[*k] = d__[pj]; - w[*k] = z__[pj]; - indxp[*k] = pj; - -/* - Count up the total number of the various types of columns, then - form a permutation which positions the four column types into - four uniform groups (although one or more of these groups may be - empty). -*/ - - for (j = 1; j <= 4; ++j) { - ctot[j - 1] = 0; -/* L110: */ - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - ct = coltyp[j]; - ++ctot[ct - 1]; -/* L120: */ - } - -/* PSM(*) = Position in SubMatrix (of types 1 through 4) */ - - psm[0] = 1; - psm[1] = ctot[0] + 1; - psm[2] = psm[1] + ctot[1]; - psm[3] = psm[2] + ctot[2]; - *k = *n - ctot[3]; - -/* - Fill out the INDXC array so that the permutation which it induces - will place all type-1 columns first, all type-2 columns next, - then all type-3's, and finally all type-4's. -*/ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - js = indxp[j]; - ct = coltyp[js]; - indx[psm[ct - 1]] = js; - indxc[psm[ct - 1]] = j; - ++psm[ct - 1]; -/* L130: */ - } - -/* - Sort the eigenvalues and corresponding eigenvectors into DLAMDA - and Q2 respectively. The eigenvalues/vectors which were not - deflated go into the first K slots of DLAMDA and Q2 respectively, - while those which were deflated go into the last N - K slots. -*/ - - i__ = 1; - iq1 = 1; - iq2 = (ctot[0] + ctot[1]) * *n1 + 1; - i__1 = ctot[0]; - for (j = 1; j <= i__1; ++j) { - js = indx[i__]; - dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1); - z__[i__] = d__[js]; - ++i__; - iq1 += *n1; -/* L140: */ - } - - i__1 = ctot[1]; - for (j = 1; j <= i__1; ++j) { - js = indx[i__]; - dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1); - dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1); - z__[i__] = d__[js]; - ++i__; - iq1 += *n1; - iq2 += n2; -/* L150: */ - } - - i__1 = ctot[2]; - for (j = 1; j <= i__1; ++j) { - js = indx[i__]; - dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1); - z__[i__] = d__[js]; - ++i__; - iq2 += n2; -/* L160: */ - } - - iq1 = iq2; - i__1 = ctot[3]; - for (j = 1; j <= i__1; ++j) { - js = indx[i__]; - dcopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1); - iq2 += *n; - z__[i__] = d__[js]; - ++i__; -/* L170: */ - } - -/* - The deflated eigenvalues and their corresponding vectors go back - into the last N - K slots of D and Q respectively. -*/ - - dlacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq); - i__1 = *n - *k; - dcopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1); - -/* Copy CTOT into COLTYP for referencing in DLAED3. */ - - for (j = 1; j <= 4; ++j) { - coltyp[j] = ctot[j - 1]; -/* L180: */ - } - -L190: - return 0; - -/* End of DLAED2 */ - -} /* dlaed2_ */ - -/* Subroutine */ int dlaed3_(integer *k, integer *n, integer *n1, doublereal * - d__, doublereal *q, integer *ldq, doublereal *rho, doublereal *dlamda, - doublereal *q2, integer *indx, integer *ctot, doublereal *w, - doublereal *s, integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, i__1, i__2; - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static integer i__, j, n2, n12, ii, n23, iq2; - static doublereal temp; - extern doublereal dnrm2_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *), - dcopy_(integer *, doublereal *, integer *, doublereal *, integer - *), dlaed4_(integer *, integer *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, integer *); - extern doublereal dlamc3_(doublereal *, doublereal *); - extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *), - dlaset_(char *, integer *, integer *, doublereal *, doublereal *, - doublereal *, integer *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - June 30, 1999 - - - Purpose - ======= - - DLAED3 finds the roots of the secular equation, as defined by the - values in D, W, and RHO, between 1 and K. It makes the - appropriate calls to DLAED4 and then updates the eigenvectors by - multiplying the matrix of eigenvectors of the pair of eigensystems - being combined by the matrix of eigenvectors of the K-by-K system - which is solved here. - - This code makes very mild assumptions about floating point - arithmetic. It will work on machines with a guard digit in - add/subtract, or on those binary machines without guard digits - which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. - It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Arguments - ========= - - K (input) INTEGER - The number of terms in the rational function to be solved by - DLAED4. K >= 0. - - N (input) INTEGER - The number of rows and columns in the Q matrix. - N >= K (deflation may result in N>K). - - N1 (input) INTEGER - The location of the last eigenvalue in the leading submatrix. - min(1,N) <= N1 <= N/2. - - D (output) DOUBLE PRECISION array, dimension (N) - D(I) contains the updated eigenvalues for - 1 <= I <= K. - - Q (output) DOUBLE PRECISION array, dimension (LDQ,N) - Initially the first K columns are used as workspace. - On output the columns 1 to K contain - the updated eigenvectors. - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max(1,N). - - RHO (input) DOUBLE PRECISION - The value of the parameter in the rank one update equation. - RHO >= 0 required. - - DLAMDA (input/output) DOUBLE PRECISION array, dimension (K) - The first K elements of this array contain the old roots - of the deflated updating problem. These are the poles - of the secular equation. May be changed on output by - having lowest order bit set to zero on Cray X-MP, Cray Y-MP, - Cray-2, or Cray C-90, as described above. - - Q2 (input) DOUBLE PRECISION array, dimension (LDQ2, N) - The first K columns of this matrix contain the non-deflated - eigenvectors for the split problem. - - INDX (input) INTEGER array, dimension (N) - The permutation used to arrange the columns of the deflated - Q matrix into three groups (see DLAED2). - The rows of the eigenvectors found by DLAED4 must be likewise - permuted before the matrix multiply can take place. - - CTOT (input) INTEGER array, dimension (4) - A count of the total number of the various types of columns - in Q, as described in INDX. The fourth column type is any - column which has been deflated. - - W (input/output) DOUBLE PRECISION array, dimension (K) - The first K elements of this array contain the components - of the deflation-adjusted updating vector. Destroyed on - output. - - S (workspace) DOUBLE PRECISION array, dimension (N1 + 1)*K - Will contain the eigenvectors of the repaired matrix which - will be multiplied by the previously accumulated eigenvectors - to update the system. - - LDS (input) INTEGER - The leading dimension of S. LDS >= max(1,K). - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an eigenvalue did not converge - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - Modified by Francoise Tisseur, University of Tennessee. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --dlamda; - --q2; - --indx; - --ctot; - --w; - --s; - - /* Function Body */ - *info = 0; - - if (*k < 0) { - *info = -1; - } else if (*n < *k) { - *info = -2; - } else if (*ldq < max(1,*n)) { - *info = -6; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLAED3", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*k == 0) { - return 0; - } - -/* - Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can - be computed with high relative accuracy (barring over/underflow). - This is a problem on machines without a guard digit in - add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). - The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), - which on any of these machines zeros out the bottommost - bit of DLAMDA(I) if it is 1; this makes the subsequent - subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation - occurs. On binary machines with a guard digit (almost all - machines) it does not change DLAMDA(I) at all. On hexadecimal - and decimal machines with a guard digit, it slightly - changes the bottommost bits of DLAMDA(I). It does not account - for hexadecimal or decimal machines without guard digits - (we know of none). We use a subroutine call to compute - 2*DLAMBDA(I) to prevent optimizing compilers from eliminating - this code. -*/ - - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__]; -/* L10: */ - } - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j], - info); - -/* If the zero finder fails, the computation is terminated. */ - - if (*info != 0) { - goto L120; - } -/* L20: */ - } - - if (*k == 1) { - goto L110; - } - if (*k == 2) { - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - w[1] = q[j * q_dim1 + 1]; - w[2] = q[j * q_dim1 + 2]; - ii = indx[1]; - q[j * q_dim1 + 1] = w[ii]; - ii = indx[2]; - q[j * q_dim1 + 2] = w[ii]; -/* L30: */ - } - goto L110; - } - -/* Compute updated W. */ - - dcopy_(k, &w[1], &c__1, &s[1], &c__1); - -/* Initialize W(I) = Q(I,I) */ - - i__1 = *ldq + 1; - dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1); - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); -/* L40: */ - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); -/* L50: */ - } -/* L60: */ - } - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - d__1 = sqrt(-w[i__]); - w[i__] = d_sign(&d__1, &s[i__]); -/* L70: */ - } - -/* Compute eigenvectors of the modified rank-1 modification. */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *k; - for (i__ = 1; i__ <= i__2; ++i__) { - s[i__] = w[i__] / q[i__ + j * q_dim1]; -/* L80: */ - } - temp = dnrm2_(k, &s[1], &c__1); - i__2 = *k; - for (i__ = 1; i__ <= i__2; ++i__) { - ii = indx[i__]; - q[i__ + j * q_dim1] = s[ii] / temp; -/* L90: */ - } -/* L100: */ - } - -/* Compute the updated eigenvectors. */ - -L110: - - n2 = *n - *n1; - n12 = ctot[1] + ctot[2]; - n23 = ctot[2] + ctot[3]; - - dlacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23); - iq2 = *n1 * n12 + 1; - if (n23 != 0) { - dgemm_("N", "N", &n2, k, &n23, &c_b15, &q2[iq2], &n2, &s[1], &n23, & - c_b29, &q[*n1 + 1 + q_dim1], ldq); - } else { - dlaset_("A", &n2, k, &c_b29, &c_b29, &q[*n1 + 1 + q_dim1], ldq); - } - - dlacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12); - if (n12 != 0) { - dgemm_("N", "N", n1, k, &n12, &c_b15, &q2[1], n1, &s[1], &n12, &c_b29, - &q[q_offset], ldq); - } else { - dlaset_("A", n1, k, &c_b29, &c_b29, &q[q_dim1 + 1], ldq); - } - - -L120: - return 0; - -/* End of DLAED3 */ - -} /* dlaed3_ */ - -/* Subroutine */ int dlaed4_(integer *n, integer *i__, doublereal *d__, - doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam, - integer *info) -{ - /* System generated locals */ - integer i__1; - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal a, b, c__; - static integer j; - static doublereal w; - static integer ii; - static doublereal dw, zz[3]; - static integer ip1; - static doublereal del, eta, phi, eps, tau, psi; - static integer iim1, iip1; - static doublereal dphi, dpsi; - static integer iter; - static doublereal temp, prew, temp1, dltlb, dltub, midpt; - static integer niter; - static logical swtch; - extern /* Subroutine */ int dlaed5_(integer *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *), dlaed6_(integer *, - logical *, doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *); - static logical swtch3; - - static logical orgati; - static doublereal erretm, rhoinv; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - December 23, 1999 - - - Purpose - ======= - - This subroutine computes the I-th updated eigenvalue of a symmetric - rank-one modification to a diagonal matrix whose elements are - given in the array d, and that - - D(i) < D(j) for i < j - - and that RHO > 0. This is arranged by the calling routine, and is - no loss in generality. The rank-one modified system is thus - - diag( D ) + RHO * Z * Z_transpose. - - where we assume the Euclidean norm of Z is 1. - - The method consists of approximating the rational functions in the - secular equation by simpler interpolating rational functions. - - Arguments - ========= - - N (input) INTEGER - The length of all arrays. - - I (input) INTEGER - The index of the eigenvalue to be computed. 1 <= I <= N. - - D (input) DOUBLE PRECISION array, dimension (N) - The original eigenvalues. It is assumed that they are in - order, D(I) < D(J) for I < J. - - Z (input) DOUBLE PRECISION array, dimension (N) - The components of the updating vector. - - DELTA (output) DOUBLE PRECISION array, dimension (N) - If N .ne. 1, DELTA contains (D(j) - lambda_I) in its j-th - component. If N = 1, then DELTA(1) = 1. The vector DELTA - contains the information necessary to construct the - eigenvectors. - - RHO (input) DOUBLE PRECISION - The scalar in the symmetric updating formula. - - DLAM (output) DOUBLE PRECISION - The computed lambda_I, the I-th updated eigenvalue. - - INFO (output) INTEGER - = 0: successful exit - > 0: if INFO = 1, the updating process failed. - - Internal Parameters - =================== - - Logical variable ORGATI (origin-at-i?) is used for distinguishing - whether D(i) or D(i+1) is treated as the origin. - - ORGATI = .true. origin at i - ORGATI = .false. origin at i+1 - - Logical variable SWTCH3 (switch-for-3-poles?) is for noting - if we are working with THREE poles! - - MAXIT is the maximum number of iterations allowed for each - eigenvalue. - - Further Details - =============== - - Based on contributions by - Ren-Cang Li, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Since this routine is called in an inner loop, we do no argument - checking. - - Quick return for N=1 and 2. -*/ - - /* Parameter adjustments */ - --delta; - --z__; - --d__; - - /* Function Body */ - *info = 0; - if (*n == 1) { - -/* Presumably, I=1 upon entry */ - - *dlam = d__[1] + *rho * z__[1] * z__[1]; - delta[1] = 1.; - return 0; - } - if (*n == 2) { - dlaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam); - return 0; - } - -/* Compute machine epsilon */ - - eps = EPSILON; - rhoinv = 1. / *rho; - -/* The case I = N */ - - if (*i__ == *n) { - -/* Initialize some basic variables */ - - ii = *n - 1; - niter = 1; - -/* Calculate initial guess */ - - midpt = *rho / 2.; - -/* - If ||Z||_2 is not one, then TEMP should be set to - RHO * ||Z||_2^2 / TWO -*/ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] = d__[j] - d__[*i__] - midpt; -/* L10: */ - } - - psi = 0.; - i__1 = *n - 2; - for (j = 1; j <= i__1; ++j) { - psi += z__[j] * z__[j] / delta[j]; -/* L20: */ - } - - c__ = rhoinv + psi; - w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[* - n]; - - if (w <= 0.) { - temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho) - + z__[*n] * z__[*n] / *rho; - if (c__ <= temp) { - tau = *rho; - } else { - del = d__[*n] - d__[*n - 1]; - a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n] - ; - b = z__[*n] * z__[*n] * del; - if (a < 0.) { - tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); - } else { - tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); - } - } - -/* - It can be proved that - D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO -*/ - - dltlb = midpt; - dltub = *rho; - } else { - del = d__[*n] - d__[*n - 1]; - a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]; - b = z__[*n] * z__[*n] * del; - if (a < 0.) { - tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); - } else { - tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); - } - -/* - It can be proved that - D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2 -*/ - - dltlb = 0.; - dltub = midpt; - } - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] = d__[j] - d__[*i__] - tau; -/* L30: */ - } - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / delta[j]; - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L40: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - temp = z__[*n] / delta[*n]; - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi - + dphi); - - w = rhoinv + phi + psi; - -/* Test for convergence */ - - if (abs(w) <= eps * erretm) { - *dlam = d__[*i__] + tau; - goto L250; - } - - if (w <= 0.) { - dltlb = max(dltlb,tau); - } else { - dltub = min(dltub,tau); - } - -/* Calculate the new step */ - - ++niter; - c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi; - a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * ( - dpsi + dphi); - b = delta[*n - 1] * delta[*n] * w; - if (c__ < 0.) { - c__ = abs(c__); - } - if (c__ == 0.) { -/* - ETA = B/A - ETA = RHO - TAU -*/ - eta = dltub - tau; - } else if (a >= 0.) { - eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ - * 2.); - } else { - eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) - ); - } - -/* - Note, eta should be positive if w is negative, and - eta should be negative otherwise. However, - if for some reason caused by roundoff, eta*w > 0, - we simply use one Newton step instead. This way - will guarantee eta*w < 0. -*/ - - if (w * eta > 0.) { - eta = -w / (dpsi + dphi); - } - temp = tau + eta; - if (temp > dltub || temp < dltlb) { - if (w < 0.) { - eta = (dltub - tau) / 2.; - } else { - eta = (dltlb - tau) / 2.; - } - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] -= eta; -/* L50: */ - } - - tau += eta; - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / delta[j]; - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L60: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - temp = z__[*n] / delta[*n]; - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi - + dphi); - - w = rhoinv + phi + psi; - -/* Main loop to update the values of the array DELTA */ - - iter = niter + 1; - - for (niter = iter; niter <= MAXITERLOOPS; ++niter) { - -/* Test for convergence */ - - if (abs(w) <= eps * erretm) { - *dlam = d__[*i__] + tau; - goto L250; - } - - if (w <= 0.) { - dltlb = max(dltlb,tau); - } else { - dltub = min(dltub,tau); - } - -/* Calculate the new step */ - - c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi; - a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * - (dpsi + dphi); - b = delta[*n - 1] * delta[*n] * w; - if (a >= 0.) { - eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } else { - eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } - -/* - Note, eta should be positive if w is negative, and - eta should be negative otherwise. However, - if for some reason caused by roundoff, eta*w > 0, - we simply use one Newton step instead. This way - will guarantee eta*w < 0. -*/ - - if (w * eta > 0.) { - eta = -w / (dpsi + dphi); - } - temp = tau + eta; - if (temp > dltub || temp < dltlb) { - if (w < 0.) { - eta = (dltub - tau) / 2.; - } else { - eta = (dltlb - tau) / 2.; - } - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] -= eta; -/* L70: */ - } - - tau += eta; - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / delta[j]; - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L80: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - temp = z__[*n] / delta[*n]; - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * ( - dpsi + dphi); - - w = rhoinv + phi + psi; -/* L90: */ - } - -/* Return with INFO = 1, NITER = MAXIT and not converged */ - - *info = 1; - *dlam = d__[*i__] + tau; - goto L250; - -/* End for the case I = N */ - - } else { - -/* The case for I < N */ - - niter = 1; - ip1 = *i__ + 1; - -/* Calculate initial guess */ - - del = d__[ip1] - d__[*i__]; - midpt = del / 2.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] = d__[j] - d__[*i__] - midpt; -/* L100: */ - } - - psi = 0.; - i__1 = *i__ - 1; - for (j = 1; j <= i__1; ++j) { - psi += z__[j] * z__[j] / delta[j]; -/* L110: */ - } - - phi = 0.; - i__1 = *i__ + 2; - for (j = *n; j >= i__1; --j) { - phi += z__[j] * z__[j] / delta[j]; -/* L120: */ - } - c__ = rhoinv + psi + phi; - w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] / - delta[ip1]; - - if (w > 0.) { - -/* - d(i)< the ith eigenvalue < (d(i)+d(i+1))/2 - - We choose d(i) as origin. -*/ - - orgati = TRUE_; - a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1]; - b = z__[*i__] * z__[*i__] * del; - if (a > 0.) { - tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } else { - tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } - dltlb = 0.; - dltub = midpt; - } else { - -/* - (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1) - - We choose d(i+1) as origin. -*/ - - orgati = FALSE_; - a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1]; - b = z__[ip1] * z__[ip1] * del; - if (a < 0.) { - tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs( - d__1)))); - } else { - tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) / - (c__ * 2.); - } - dltlb = -midpt; - dltub = 0.; - } - - if (orgati) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] = d__[j] - d__[*i__] - tau; -/* L130: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] = d__[j] - d__[ip1] - tau; -/* L140: */ - } - } - if (orgati) { - ii = *i__; - } else { - ii = *i__ + 1; - } - iim1 = ii - 1; - iip1 = ii + 1; - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / delta[j]; - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L150: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / delta[j]; - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; -/* L160: */ - } - - w = rhoinv + phi + psi; - -/* - W is the value of the secular function with - its ii-th element removed. -*/ - - swtch3 = FALSE_; - if (orgati) { - if (w < 0.) { - swtch3 = TRUE_; - } - } else { - if (w > 0.) { - swtch3 = TRUE_; - } - } - if (ii == 1 || ii == *n) { - swtch3 = FALSE_; - } - - temp = z__[ii] / delta[ii]; - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w += temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + - abs(tau) * dw; - -/* Test for convergence */ - - if (abs(w) <= eps * erretm) { - if (orgati) { - *dlam = d__[*i__] + tau; - } else { - *dlam = d__[ip1] + tau; - } - goto L250; - } - - if (w <= 0.) { - dltlb = max(dltlb,tau); - } else { - dltub = min(dltub,tau); - } - -/* Calculate the new step */ - - ++niter; - if (! swtch3) { - if (orgati) { -/* Computing 2nd power */ - d__1 = z__[*i__] / delta[*i__]; - c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (d__1 * - d__1); - } else { -/* Computing 2nd power */ - d__1 = z__[ip1] / delta[ip1]; - c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 * - d__1); - } - a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] * - dw; - b = delta[*i__] * delta[ip1] * w; - if (c__ == 0.) { - if (a == 0.) { - if (orgati) { - a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] * - (dpsi + dphi); - } else { - a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] * - (dpsi + dphi); - } - } - eta = b / a; - } else if (a <= 0.) { - eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } else { - eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } - } else { - -/* Interpolation using THREE most relevant poles */ - - temp = rhoinv + psi + phi; - if (orgati) { - temp1 = z__[iim1] / delta[iim1]; - temp1 *= temp1; - c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[ - iip1]) * temp1; - zz[0] = z__[iim1] * z__[iim1]; - zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi); - } else { - temp1 = z__[iip1] / delta[iip1]; - temp1 *= temp1; - c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[ - iim1]) * temp1; - zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1)); - zz[2] = z__[iip1] * z__[iip1]; - } - zz[1] = z__[ii] * z__[ii]; - dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info); - if (*info != 0) { - goto L250; - } - } - -/* - Note, eta should be positive if w is negative, and - eta should be negative otherwise. However, - if for some reason caused by roundoff, eta*w > 0, - we simply use one Newton step instead. This way - will guarantee eta*w < 0. -*/ - - if (w * eta >= 0.) { - eta = -w / dw; - } - temp = tau + eta; - if (temp > dltub || temp < dltlb) { - if (w < 0.) { - eta = (dltub - tau) / 2.; - } else { - eta = (dltlb - tau) / 2.; - } - } - - prew = w; - -/* L170: */ - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] -= eta; -/* L180: */ - } - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / delta[j]; - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L190: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / delta[j]; - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; -/* L200: */ - } - - temp = z__[ii] / delta[ii]; - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w = rhoinv + phi + psi + temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + ( - d__1 = tau + eta, abs(d__1)) * dw; - - swtch = FALSE_; - if (orgati) { - if (-w > abs(prew) / 10.) { - swtch = TRUE_; - } - } else { - if (w > abs(prew) / 10.) { - swtch = TRUE_; - } - } - - tau += eta; - -/* Main loop to update the values of the array DELTA */ - - iter = niter + 1; - - for (niter = iter; niter <= MAXITERLOOPS; ++niter) { - -/* Test for convergence */ - - if (abs(w) <= eps * erretm) { - if (orgati) { - *dlam = d__[*i__] + tau; - } else { - *dlam = d__[ip1] + tau; - } - goto L250; - } - - if (w <= 0.) { - dltlb = max(dltlb,tau); - } else { - dltub = min(dltub,tau); - } - -/* Calculate the new step */ - - if (! swtch3) { - if (! swtch) { - if (orgati) { -/* Computing 2nd power */ - d__1 = z__[*i__] / delta[*i__]; - c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * ( - d__1 * d__1); - } else { -/* Computing 2nd power */ - d__1 = z__[ip1] / delta[ip1]; - c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * - (d__1 * d__1); - } - } else { - temp = z__[ii] / delta[ii]; - if (orgati) { - dpsi += temp * temp; - } else { - dphi += temp * temp; - } - c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi; - } - a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] - * dw; - b = delta[*i__] * delta[ip1] * w; - if (c__ == 0.) { - if (a == 0.) { - if (! swtch) { - if (orgati) { - a = z__[*i__] * z__[*i__] + delta[ip1] * - delta[ip1] * (dpsi + dphi); - } else { - a = z__[ip1] * z__[ip1] + delta[*i__] * delta[ - *i__] * (dpsi + dphi); - } - } else { - a = delta[*i__] * delta[*i__] * dpsi + delta[ip1] - * delta[ip1] * dphi; - } - } - eta = b / a; - } else if (a <= 0.) { - eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) - / (c__ * 2.); - } else { - eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, - abs(d__1)))); - } - } else { - -/* Interpolation using THREE most relevant poles */ - - temp = rhoinv + psi + phi; - if (swtch) { - c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi; - zz[0] = delta[iim1] * delta[iim1] * dpsi; - zz[2] = delta[iip1] * delta[iip1] * dphi; - } else { - if (orgati) { - temp1 = z__[iim1] / delta[iim1]; - temp1 *= temp1; - c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - - d__[iip1]) * temp1; - zz[0] = z__[iim1] * z__[iim1]; - zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + - dphi); - } else { - temp1 = z__[iip1] / delta[iip1]; - temp1 *= temp1; - c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - - d__[iim1]) * temp1; - zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - - temp1)); - zz[2] = z__[iip1] * z__[iip1]; - } - } - dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, - info); - if (*info != 0) { - goto L250; - } - } - -/* - Note, eta should be positive if w is negative, and - eta should be negative otherwise. However, - if for some reason caused by roundoff, eta*w > 0, - we simply use one Newton step instead. This way - will guarantee eta*w < 0. -*/ - - if (w * eta >= 0.) { - eta = -w / dw; - } - temp = tau + eta; - if (temp > dltub || temp < dltlb) { - if (w < 0.) { - eta = (dltub - tau) / 2.; - } else { - eta = (dltlb - tau) / 2.; - } - } - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] -= eta; -/* L210: */ - } - - tau += eta; - prew = w; - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / delta[j]; - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L220: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / delta[j]; - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; -/* L230: */ - } - - temp = z__[ii] / delta[ii]; - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w = rhoinv + phi + psi + temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. - + abs(tau) * dw; - if ((w * prew > 0. && abs(w) > abs(prew) / 10.)) { - swtch = ! swtch; - } - -/* L240: */ - } - -/* Return with INFO = 1, NITER = MAXIT and not converged */ - - *info = 1; - if (orgati) { - *dlam = d__[*i__] + tau; - } else { - *dlam = d__[ip1] + tau; - } - - } - -L250: - - return 0; - -/* End of DLAED4 */ - -} /* dlaed4_ */ - -/* Subroutine */ int dlaed5_(integer *i__, doublereal *d__, doublereal *z__, - doublereal *delta, doublereal *rho, doublereal *dlam) -{ - /* System generated locals */ - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal b, c__, w, del, tau, temp; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - September 30, 1994 - - - Purpose - ======= - - This subroutine computes the I-th eigenvalue of a symmetric rank-one - modification of a 2-by-2 diagonal matrix - - diag( D ) + RHO * Z * transpose(Z) . - - The diagonal elements in the array D are assumed to satisfy - - D(i) < D(j) for i < j . - - We also assume RHO > 0 and that the Euclidean norm of the vector - Z is one. - - Arguments - ========= - - I (input) INTEGER - The index of the eigenvalue to be computed. I = 1 or I = 2. - - D (input) DOUBLE PRECISION array, dimension (2) - The original eigenvalues. We assume D(1) < D(2). - - Z (input) DOUBLE PRECISION array, dimension (2) - The components of the updating vector. - - DELTA (output) DOUBLE PRECISION array, dimension (2) - The vector DELTA contains the information necessary - to construct the eigenvectors. - - RHO (input) DOUBLE PRECISION - The scalar in the symmetric updating formula. - - DLAM (output) DOUBLE PRECISION - The computed lambda_I, the I-th updated eigenvalue. - - Further Details - =============== - - Based on contributions by - Ren-Cang Li, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --delta; - --z__; - --d__; - - /* Function Body */ - del = d__[2] - d__[1]; - if (*i__ == 1) { - w = *rho * 2. * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.; - if (w > 0.) { - b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[1] * z__[1] * del; - -/* B > ZERO, always */ - - tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1)))); - *dlam = d__[1] + tau; - delta[1] = -z__[1] / tau; - delta[2] = z__[2] / (del - tau); - } else { - b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[2] * z__[2] * del; - if (b > 0.) { - tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.)); - } else { - tau = (b - sqrt(b * b + c__ * 4.)) / 2.; - } - *dlam = d__[2] + tau; - delta[1] = -z__[1] / (del + tau); - delta[2] = -z__[2] / tau; - } - temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]); - delta[1] /= temp; - delta[2] /= temp; - } else { - -/* Now I=2 */ - - b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[2] * z__[2] * del; - if (b > 0.) { - tau = (b + sqrt(b * b + c__ * 4.)) / 2.; - } else { - tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.)); - } - *dlam = d__[2] + tau; - delta[1] = -z__[1] / (del + tau); - delta[2] = -z__[2] / tau; - temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]); - delta[1] /= temp; - delta[2] /= temp; - } - return 0; - -/* End OF DLAED5 */ - -} /* dlaed5_ */ - -/* Subroutine */ int dlaed6_(integer *kniter, logical *orgati, doublereal * - rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal * - tau, integer *info) -{ - /* Initialized data */ - - static logical first = TRUE_; - - /* System generated locals */ - integer i__1; - doublereal d__1, d__2, d__3, d__4; - - /* Builtin functions */ - double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *); - - /* Local variables */ - static doublereal a, b, c__, f; - static integer i__; - static doublereal fc, df, ddf, eta, eps, base; - static integer iter; - static doublereal temp, temp1, temp2, temp3, temp4; - static logical scale; - static integer niter; - static doublereal small1, small2, sminv1, sminv2; - - static doublereal dscale[3], sclfac, zscale[3], erretm, sclinv; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - June 30, 1999 - - - Purpose - ======= - - DLAED6 computes the positive or negative root (closest to the origin) - of - z(1) z(2) z(3) - f(x) = rho + --------- + ---------- + --------- - d(1)-x d(2)-x d(3)-x - - It is assumed that - - if ORGATI = .true. the root is between d(2) and d(3); - otherwise it is between d(1) and d(2) - - This routine will be called by DLAED4 when necessary. In most cases, - the root sought is the smallest in magnitude, though it might not be - in some extremely rare situations. - - Arguments - ========= - - KNITER (input) INTEGER - Refer to DLAED4 for its significance. - - ORGATI (input) LOGICAL - If ORGATI is true, the needed root is between d(2) and - d(3); otherwise it is between d(1) and d(2). See - DLAED4 for further details. - - RHO (input) DOUBLE PRECISION - Refer to the equation f(x) above. - - D (input) DOUBLE PRECISION array, dimension (3) - D satisfies d(1) < d(2) < d(3). - - Z (input) DOUBLE PRECISION array, dimension (3) - Each of the elements in z must be positive. - - FINIT (input) DOUBLE PRECISION - The value of f at 0. It is more accurate than the one - evaluated inside this routine (if someone wants to do - so). - - TAU (output) DOUBLE PRECISION - The root of the equation f(x). - - INFO (output) INTEGER - = 0: successful exit - > 0: if INFO = 1, failure to converge - - Further Details - =============== - - Based on contributions by - Ren-Cang Li, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== -*/ - - /* Parameter adjustments */ - --z__; - --d__; - - /* Function Body */ - - *info = 0; - - niter = 1; - *tau = 0.; - if (*kniter == 2) { - if (*orgati) { - temp = (d__[3] - d__[2]) / 2.; - c__ = *rho + z__[1] / (d__[1] - d__[2] - temp); - a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3]; - b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2]; - } else { - temp = (d__[1] - d__[2]) / 2.; - c__ = *rho + z__[3] / (d__[3] - d__[2] - temp); - a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2]; - b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1]; - } -/* Computing MAX */ - d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); - temp = max(d__1,d__2); - a /= temp; - b /= temp; - c__ /= temp; - if (c__ == 0.) { - *tau = b / a; - } else if (a <= 0.) { - *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } else { - *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)) - )); - } - temp = *rho + z__[1] / (d__[1] - *tau) + z__[2] / (d__[2] - *tau) + - z__[3] / (d__[3] - *tau); - if (abs(*finit) <= abs(temp)) { - *tau = 0.; - } - } - -/* - On first call to routine, get machine parameters for - possible scaling to avoid overflow -*/ - - if (first) { - eps = EPSILON; - base = BASE; - i__1 = (integer) (log(SAFEMINIMUM) / log(base) / 3.); - small1 = pow_di(&base, &i__1); - sminv1 = 1. / small1; - small2 = small1 * small1; - sminv2 = sminv1 * sminv1; - first = FALSE_; - } - -/* - Determine if scaling of inputs necessary to avoid overflow - when computing 1/TEMP**3 -*/ - - if (*orgati) { -/* Computing MIN */ - d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - * - tau, abs(d__2)); - temp = min(d__3,d__4); - } else { -/* Computing MIN */ - d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - * - tau, abs(d__2)); - temp = min(d__3,d__4); - } - scale = FALSE_; - if (temp <= small1) { - scale = TRUE_; - if (temp <= small2) { - -/* Scale up by power of radix nearest 1/SAFMIN**(2/3) */ - - sclfac = sminv2; - sclinv = small2; - } else { - -/* Scale up by power of radix nearest 1/SAFMIN**(1/3) */ - - sclfac = sminv1; - sclinv = small1; - } - -/* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */ - - for (i__ = 1; i__ <= 3; ++i__) { - dscale[i__ - 1] = d__[i__] * sclfac; - zscale[i__ - 1] = z__[i__] * sclfac; -/* L10: */ - } - *tau *= sclfac; - } else { - -/* Copy D and Z to DSCALE and ZSCALE */ - - for (i__ = 1; i__ <= 3; ++i__) { - dscale[i__ - 1] = d__[i__]; - zscale[i__ - 1] = z__[i__]; -/* L20: */ - } - } - - fc = 0.; - df = 0.; - ddf = 0.; - for (i__ = 1; i__ <= 3; ++i__) { - temp = 1. / (dscale[i__ - 1] - *tau); - temp1 = zscale[i__ - 1] * temp; - temp2 = temp1 * temp; - temp3 = temp2 * temp; - fc += temp1 / dscale[i__ - 1]; - df += temp2; - ddf += temp3; -/* L30: */ - } - f = *finit + *tau * fc; - - if (abs(f) <= 0.) { - goto L60; - } - -/* - Iteration begins - - It is not hard to see that - - 1) Iterations will go up monotonically - if FINIT < 0; - - 2) Iterations will go down monotonically - if FINIT > 0. -*/ - - iter = niter + 1; - - for (niter = iter; niter <= MAXITERLOOPS; ++niter) { - - if (*orgati) { - temp1 = dscale[1] - *tau; - temp2 = dscale[2] - *tau; - } else { - temp1 = dscale[0] - *tau; - temp2 = dscale[1] - *tau; - } - a = (temp1 + temp2) * f - temp1 * temp2 * df; - b = temp1 * temp2 * f; - c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf; -/* Computing MAX */ - d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); - temp = max(d__1,d__2); - a /= temp; - b /= temp; - c__ /= temp; - if (c__ == 0.) { - eta = b / a; - } else if (a <= 0.) { - eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ - * 2.); - } else { - eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) - ); - } - if (f * eta >= 0.) { - eta = -f / df; - } - - temp = eta + *tau; - if (*orgati) { - if ((eta > 0. && temp >= dscale[2])) { - eta = (dscale[2] - *tau) / 2.; - } - if ((eta < 0. && temp <= dscale[1])) { - eta = (dscale[1] - *tau) / 2.; - } - } else { - if ((eta > 0. && temp >= dscale[1])) { - eta = (dscale[1] - *tau) / 2.; - } - if ((eta < 0. && temp <= dscale[0])) { - eta = (dscale[0] - *tau) / 2.; - } - } - *tau += eta; - - fc = 0.; - erretm = 0.; - df = 0.; - ddf = 0.; - for (i__ = 1; i__ <= 3; ++i__) { - temp = 1. / (dscale[i__ - 1] - *tau); - temp1 = zscale[i__ - 1] * temp; - temp2 = temp1 * temp; - temp3 = temp2 * temp; - temp4 = temp1 / dscale[i__ - 1]; - fc += temp4; - erretm += abs(temp4); - df += temp2; - ddf += temp3; -/* L40: */ - } - f = *finit + *tau * fc; - erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df; - if (abs(f) <= eps * erretm) { - goto L60; - } -/* L50: */ - } - *info = 1; -L60: - -/* Undo scaling */ - - if (scale) { - *tau *= sclinv; - } - return 0; - -/* End of DLAED6 */ - -} /* dlaed6_ */ - -/* Subroutine */ int dlaed7_(integer *icompq, integer *n, integer *qsiz, - integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__, - doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer - *cutpnt, doublereal *qstore, integer *qptr, integer *prmptr, integer * - perm, integer *givptr, integer *givcol, doublereal *givnum, - doublereal *work, integer *iwork, integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, i__1, i__2; - - /* Builtin functions */ - integer pow_ii(integer *, integer *); - - /* Local variables */ - static integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer indxc, indxp; - extern /* Subroutine */ int dlaed8_(integer *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, doublereal *, - integer *, doublereal *, integer *, integer *, integer *, - doublereal *, integer *, integer *, integer *), dlaed9_(integer *, - integer *, integer *, integer *, doublereal *, doublereal *, - integer *, doublereal *, doublereal *, doublereal *, doublereal *, - integer *, integer *), dlaeda_(integer *, integer *, integer *, - integer *, integer *, integer *, integer *, integer *, doublereal - *, doublereal *, integer *, doublereal *, doublereal *, integer *) - ; - static integer idlmda; - extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, - integer *, integer *, integer *), xerbla_(char *, integer *); - static integer coltyp; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - DLAED7 computes the updated eigensystem of a diagonal - matrix after modification by a rank-one symmetric matrix. This - routine is used only for the eigenproblem which requires all - eigenvalues and optionally eigenvectors of a dense symmetric matrix - that has been reduced to tridiagonal form. DLAED1 handles - the case in which all eigenvalues and eigenvectors of a symmetric - tridiagonal matrix are desired. - - T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) - - where Z = Q'u, u is a vector of length N with ones in the - CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. - - The eigenvectors of the original matrix are stored in Q, and the - eigenvalues are in D. The algorithm consists of three stages: - - The first stage consists of deflating the size of the problem - when there are multiple eigenvalues or if there is a zero in - the Z vector. For each such occurence the dimension of the - secular equation problem is reduced by one. This stage is - performed by the routine DLAED8. - - The second stage consists of calculating the updated - eigenvalues. This is done by finding the roots of the secular - equation via the routine DLAED4 (as called by DLAED9). - This routine also calculates the eigenvectors of the current - problem. - - The final stage consists of computing the updated eigenvectors - directly using the updated eigenvalues. The eigenvectors for - the current problem are multiplied with the eigenvectors from - the overall problem. - - Arguments - ========= - - ICOMPQ (input) INTEGER - = 0: Compute eigenvalues only. - = 1: Compute eigenvectors of original dense symmetric matrix - also. On entry, Q contains the orthogonal matrix used - to reduce the original matrix to tridiagonal form. - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - QSIZ (input) INTEGER - The dimension of the orthogonal matrix used to reduce - the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. - - TLVLS (input) INTEGER - The total number of merging levels in the overall divide and - conquer tree. - - CURLVL (input) INTEGER - The current level in the overall merge routine, - 0 <= CURLVL <= TLVLS. - - CURPBM (input) INTEGER - The current problem in the current level in the overall - merge routine (counting from upper left to lower right). - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the eigenvalues of the rank-1-perturbed matrix. - On exit, the eigenvalues of the repaired matrix. - - Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) - On entry, the eigenvectors of the rank-1-perturbed matrix. - On exit, the eigenvectors of the repaired tridiagonal matrix. - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max(1,N). - - INDXQ (output) INTEGER array, dimension (N) - The permutation which will reintegrate the subproblem just - solved back into sorted order, i.e., D( INDXQ( I = 1, N ) ) - will be in ascending order. - - RHO (input) DOUBLE PRECISION - The subdiagonal element used to create the rank-1 - modification. - - CUTPNT (input) INTEGER - Contains the location of the last eigenvalue in the leading - sub-matrix. min(1,N) <= CUTPNT <= N. - - QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1) - Stores eigenvectors of submatrices encountered during - divide and conquer, packed together. QPTR points to - beginning of the submatrices. - - QPTR (input/output) INTEGER array, dimension (N+2) - List of indices pointing to beginning of submatrices stored - in QSTORE. The submatrices are numbered starting at the - bottom left of the divide and conquer tree, from left to - right and bottom to top. - - PRMPTR (input) INTEGER array, dimension (N lg N) - Contains a list of pointers which indicate where in PERM a - level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) - indicates the size of the permutation and also the size of - the full, non-deflated problem. - - PERM (input) INTEGER array, dimension (N lg N) - Contains the permutations (from deflation and sorting) to be - applied to each eigenblock. - - GIVPTR (input) INTEGER array, dimension (N lg N) - Contains a list of pointers which indicate where in GIVCOL a - level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) - indicates the number of Givens rotations. - - GIVCOL (input) INTEGER array, dimension (2, N lg N) - Each pair of numbers indicates a pair of columns to take place - in a Givens rotation. - - GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) - Each number indicates the S value to be used in the - corresponding Givens rotation. - - WORK (workspace) DOUBLE PRECISION array, dimension (3*N+QSIZ*N) - - IWORK (workspace) INTEGER array, dimension (4*N) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an eigenvalue did not converge - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --indxq; - --qstore; - --qptr; - --prmptr; - --perm; - --givptr; - givcol -= 3; - givnum -= 3; - --work; - --iwork; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if ((*icompq == 1 && *qsiz < *n)) { - *info = -4; - } else if (*ldq < max(1,*n)) { - *info = -9; - } else if (min(1,*n) > *cutpnt || *n < *cutpnt) { - *info = -12; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLAED7", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* - The following values are for bookkeeping purposes only. They are - integer pointers which indicate the portion of the workspace - used by a particular array in DLAED8 and DLAED9. -*/ - - if (*icompq == 1) { - ldq2 = *qsiz; - } else { - ldq2 = *n; - } - - iz = 1; - idlmda = iz + *n; - iw = idlmda + *n; - iq2 = iw + *n; - is = iq2 + *n * ldq2; - - indx = 1; - indxc = indx + *n; - coltyp = indxc + *n; - indxp = coltyp + *n; - -/* - Form the z-vector which consists of the last row of Q_1 and the - first row of Q_2. -*/ - - ptr = pow_ii(&c__2, tlvls) + 1; - i__1 = *curlvl - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = *tlvls - i__; - ptr += pow_ii(&c__2, &i__2); -/* L10: */ - } - curr = ptr + *curpbm; - dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], & - givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz - + *n], info); - -/* - When solving the final problem, we no longer need the stored data, - so we will overwrite the data from this level onto the previously - used storage space. -*/ - - if (*curlvl == *tlvls) { - qptr[curr] = 1; - prmptr[curr] = 1; - givptr[curr] = 1; - } - -/* Sort and Deflate eigenvalues. */ - - dlaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho, - cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], & - perm[prmptr[curr]], &givptr[curr + 1], &givcol[((givptr[curr]) << - (1)) + 1], &givnum[((givptr[curr]) << (1)) + 1], &iwork[indxp], & - iwork[indx], info); - prmptr[curr + 1] = prmptr[curr] + *n; - givptr[curr + 1] += givptr[curr]; - -/* Solve Secular Equation. */ - - if (k != 0) { - dlaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda], - &work[iw], &qstore[qptr[curr]], &k, info); - if (*info != 0) { - goto L30; - } - if (*icompq == 1) { - dgemm_("N", "N", qsiz, &k, &k, &c_b15, &work[iq2], &ldq2, &qstore[ - qptr[curr]], &k, &c_b29, &q[q_offset], ldq); - } -/* Computing 2nd power */ - i__1 = k; - qptr[curr + 1] = qptr[curr] + i__1 * i__1; - -/* Prepare the INDXQ sorting permutation. */ - - n1 = k; - n2 = *n - k; - dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]); - } else { - qptr[curr + 1] = qptr[curr]; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - indxq[i__] = i__; -/* L20: */ - } - } - -L30: - return 0; - -/* End of DLAED7 */ - -} /* dlaed7_ */ - -/* Subroutine */ int dlaed8_(integer *icompq, integer *k, integer *n, integer - *qsiz, doublereal *d__, doublereal *q, integer *ldq, integer *indxq, - doublereal *rho, integer *cutpnt, doublereal *z__, doublereal *dlamda, - doublereal *q2, integer *ldq2, doublereal *w, integer *perm, integer - *givptr, integer *givcol, doublereal *givnum, integer *indxp, integer - *indx, integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal c__; - static integer i__, j; - static doublereal s, t; - static integer k2, n1, n2, jp, n1p1; - static doublereal eps, tau, tol; - static integer jlam, imax, jmax; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *), dscal_( - integer *, doublereal *, doublereal *, integer *), dcopy_(integer - *, doublereal *, integer *, doublereal *, integer *); - - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, - integer *, integer *, integer *), dlacpy_(char *, integer *, - integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - September 30, 1994 - - - Purpose - ======= - - DLAED8 merges the two sets of eigenvalues together into a single - sorted set. Then it tries to deflate the size of the problem. - There are two ways in which deflation can occur: when two or more - eigenvalues are close together or if there is a tiny element in the - Z vector. For each such occurrence the order of the related secular - equation problem is reduced by one. - - Arguments - ========= - - ICOMPQ (input) INTEGER - = 0: Compute eigenvalues only. - = 1: Compute eigenvectors of original dense symmetric matrix - also. On entry, Q contains the orthogonal matrix used - to reduce the original matrix to tridiagonal form. - - K (output) INTEGER - The number of non-deflated eigenvalues, and the order of the - related secular equation. - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - QSIZ (input) INTEGER - The dimension of the orthogonal matrix used to reduce - the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the eigenvalues of the two submatrices to be - combined. On exit, the trailing (N-K) updated eigenvalues - (those which were deflated) sorted into increasing order. - - Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) - If ICOMPQ = 0, Q is not referenced. Otherwise, - on entry, Q contains the eigenvectors of the partially solved - system which has been previously updated in matrix - multiplies with other partially solved eigensystems. - On exit, Q contains the trailing (N-K) updated eigenvectors - (those which were deflated) in its last N-K columns. - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max(1,N). - - INDXQ (input) INTEGER array, dimension (N) - The permutation which separately sorts the two sub-problems - in D into ascending order. Note that elements in the second - half of this permutation must first have CUTPNT added to - their values in order to be accurate. - - RHO (input/output) DOUBLE PRECISION - On entry, the off-diagonal element associated with the rank-1 - cut which originally split the two submatrices which are now - being recombined. - On exit, RHO has been modified to the value required by - DLAED3. - - CUTPNT (input) INTEGER - The location of the last eigenvalue in the leading - sub-matrix. min(1,N) <= CUTPNT <= N. - - Z (input) DOUBLE PRECISION array, dimension (N) - On entry, Z contains the updating vector (the last row of - the first sub-eigenvector matrix and the first row of the - second sub-eigenvector matrix). - On exit, the contents of Z are destroyed by the updating - process. - - DLAMDA (output) DOUBLE PRECISION array, dimension (N) - A copy of the first K eigenvalues which will be used by - DLAED3 to form the secular equation. - - Q2 (output) DOUBLE PRECISION array, dimension (LDQ2,N) - If ICOMPQ = 0, Q2 is not referenced. Otherwise, - a copy of the first K eigenvectors which will be used by - DLAED7 in a matrix multiply (DGEMM) to update the new - eigenvectors. - - LDQ2 (input) INTEGER - The leading dimension of the array Q2. LDQ2 >= max(1,N). - - W (output) DOUBLE PRECISION array, dimension (N) - The first k values of the final deflation-altered z-vector and - will be passed to DLAED3. - - PERM (output) INTEGER array, dimension (N) - The permutations (from deflation and sorting) to be applied - to each eigenblock. - - GIVPTR (output) INTEGER - The number of Givens rotations which took place in this - subproblem. - - GIVCOL (output) INTEGER array, dimension (2, N) - Each pair of numbers indicates a pair of columns to take place - in a Givens rotation. - - GIVNUM (output) DOUBLE PRECISION array, dimension (2, N) - Each number indicates the S value to be used in the - corresponding Givens rotation. - - INDXP (workspace) INTEGER array, dimension (N) - The permutation used to place deflated values of D at the end - of the array. INDXP(1:K) points to the nondeflated D-values - and INDXP(K+1:N) points to the deflated eigenvalues. - - INDX (workspace) INTEGER array, dimension (N) - The permutation used to sort the contents of D into ascending - order. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --indxq; - --z__; - --dlamda; - q2_dim1 = *ldq2; - q2_offset = 1 + q2_dim1 * 1; - q2 -= q2_offset; - --w; - --perm; - givcol -= 3; - givnum -= 3; - --indxp; - --indx; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*n < 0) { - *info = -3; - } else if ((*icompq == 1 && *qsiz < *n)) { - *info = -4; - } else if (*ldq < max(1,*n)) { - *info = -7; - } else if (*cutpnt < min(1,*n) || *cutpnt > *n) { - *info = -10; - } else if (*ldq2 < max(1,*n)) { - *info = -14; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLAED8", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - n1 = *cutpnt; - n2 = *n - n1; - n1p1 = n1 + 1; - - if (*rho < 0.) { - dscal_(&n2, &c_b151, &z__[n1p1], &c__1); - } - -/* Normalize z so that norm(z) = 1 */ - - t = 1. / sqrt(2.); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - indx[j] = j; -/* L10: */ - } - dscal_(n, &t, &z__[1], &c__1); - *rho = (d__1 = *rho * 2., abs(d__1)); - -/* Sort the eigenvalues into increasing order */ - - i__1 = *n; - for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) { - indxq[i__] += *cutpnt; -/* L20: */ - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dlamda[i__] = d__[indxq[i__]]; - w[i__] = z__[indxq[i__]]; -/* L30: */ - } - i__ = 1; - j = *cutpnt + 1; - dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]); - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - d__[i__] = dlamda[indx[i__]]; - z__[i__] = w[indx[i__]]; -/* L40: */ - } - -/* Calculate the allowable deflation tolerence */ - - imax = idamax_(n, &z__[1], &c__1); - jmax = idamax_(n, &d__[1], &c__1); - eps = EPSILON; - tol = eps * 8. * (d__1 = d__[jmax], abs(d__1)); - -/* - If the rank-1 modifier is small enough, no more needs to be done - except to reorganize Q so that its columns correspond with the - elements in D. -*/ - - if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) { - *k = 0; - if (*icompq == 0) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - perm[j] = indxq[indx[j]]; -/* L50: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - perm[j] = indxq[indx[j]]; - dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 - + 1], &c__1); -/* L60: */ - } - dlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq); - } - return 0; - } - -/* - If there are multiple eigenvalues then the problem deflates. Here - the number of equal eigenvalues are found. As each equal - eigenvalue is found, an elementary reflector is computed to rotate - the corresponding eigensubspace so that the corresponding - components of Z are zero in this new basis. -*/ - - *k = 0; - *givptr = 0; - k2 = *n + 1; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - indxp[k2] = j; - if (j == *n) { - goto L110; - } - } else { - jlam = j; - goto L80; - } -/* L70: */ - } -L80: - ++j; - if (j > *n) { - goto L100; - } - if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - indxp[k2] = j; - } else { - -/* Check if eigenvalues are close enough to allow deflation. */ - - s = z__[jlam]; - c__ = z__[j]; - -/* - Find sqrt(a**2+b**2) without overflow or - destructive underflow. -*/ - - tau = dlapy2_(&c__, &s); - t = d__[j] - d__[jlam]; - c__ /= tau; - s = -s / tau; - if ((d__1 = t * c__ * s, abs(d__1)) <= tol) { - -/* Deflation is possible. */ - - z__[j] = tau; - z__[jlam] = 0.; - -/* Record the appropriate Givens rotation */ - - ++(*givptr); - givcol[((*givptr) << (1)) + 1] = indxq[indx[jlam]]; - givcol[((*givptr) << (1)) + 2] = indxq[indx[j]]; - givnum[((*givptr) << (1)) + 1] = c__; - givnum[((*givptr) << (1)) + 2] = s; - if (*icompq == 1) { - drot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[ - indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s); - } - t = d__[jlam] * c__ * c__ + d__[j] * s * s; - d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__; - d__[jlam] = t; - --k2; - i__ = 1; -L90: - if (k2 + i__ <= *n) { - if (d__[jlam] < d__[indxp[k2 + i__]]) { - indxp[k2 + i__ - 1] = indxp[k2 + i__]; - indxp[k2 + i__] = jlam; - ++i__; - goto L90; - } else { - indxp[k2 + i__ - 1] = jlam; - } - } else { - indxp[k2 + i__ - 1] = jlam; - } - jlam = j; - } else { - ++(*k); - w[*k] = z__[jlam]; - dlamda[*k] = d__[jlam]; - indxp[*k] = jlam; - jlam = j; - } - } - goto L80; -L100: - -/* Record the last eigenvalue. */ - - ++(*k); - w[*k] = z__[jlam]; - dlamda[*k] = d__[jlam]; - indxp[*k] = jlam; - -L110: - -/* - Sort the eigenvalues and corresponding eigenvectors into DLAMDA - and Q2 respectively. The eigenvalues/vectors which were not - deflated go into the first K slots of DLAMDA and Q2 respectively, - while those which were deflated go into the last N - K slots. -*/ - - if (*icompq == 0) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - jp = indxp[j]; - dlamda[j] = d__[jp]; - perm[j] = indxq[indx[jp]]; -/* L120: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - jp = indxp[j]; - dlamda[j] = d__[jp]; - perm[j] = indxq[indx[jp]]; - dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1] - , &c__1); -/* L130: */ - } - } - -/* - The deflated eigenvalues and their corresponding vectors go back - into the last N - K slots of D and Q respectively. -*/ - - if (*k < *n) { - if (*icompq == 0) { - i__1 = *n - *k; - dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); - } else { - i__1 = *n - *k; - dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); - i__1 = *n - *k; - dlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(* - k + 1) * q_dim1 + 1], ldq); - } - } - - return 0; - -/* End of DLAED8 */ - -} /* dlaed8_ */ - -/* Subroutine */ int dlaed9_(integer *k, integer *kstart, integer *kstop, - integer *n, doublereal *d__, doublereal *q, integer *ldq, doublereal * - rho, doublereal *dlamda, doublereal *w, doublereal *s, integer *lds, - integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2; - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static integer i__, j; - static doublereal temp; - extern doublereal dnrm2_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *), dlaed4_(integer *, integer *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *); - extern doublereal dlamc3_(doublereal *, doublereal *); - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - September 30, 1994 - - - Purpose - ======= - - DLAED9 finds the roots of the secular equation, as defined by the - values in D, Z, and RHO, between KSTART and KSTOP. It makes the - appropriate calls to DLAED4 and then stores the new matrix of - eigenvectors for use in calculating the next level of Z vectors. - - Arguments - ========= - - K (input) INTEGER - The number of terms in the rational function to be solved by - DLAED4. K >= 0. - - KSTART (input) INTEGER - KSTOP (input) INTEGER - The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP - are to be computed. 1 <= KSTART <= KSTOP <= K. - - N (input) INTEGER - The number of rows and columns in the Q matrix. - N >= K (delation may result in N > K). - - D (output) DOUBLE PRECISION array, dimension (N) - D(I) contains the updated eigenvalues - for KSTART <= I <= KSTOP. - - Q (workspace) DOUBLE PRECISION array, dimension (LDQ,N) - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max( 1, N ). - - RHO (input) DOUBLE PRECISION - The value of the parameter in the rank one update equation. - RHO >= 0 required. - - DLAMDA (input) DOUBLE PRECISION array, dimension (K) - The first K elements of this array contain the old roots - of the deflated updating problem. These are the poles - of the secular equation. - - W (input) DOUBLE PRECISION array, dimension (K) - The first K elements of this array contain the components - of the deflation-adjusted updating vector. - - S (output) DOUBLE PRECISION array, dimension (LDS, K) - Will contain the eigenvectors of the repaired matrix which - will be stored for subsequent Z vector calculation and - multiplied by the previously accumulated eigenvectors - to update the system. - - LDS (input) INTEGER - The leading dimension of S. LDS >= max( 1, K ). - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an eigenvalue did not converge - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --dlamda; - --w; - s_dim1 = *lds; - s_offset = 1 + s_dim1 * 1; - s -= s_offset; - - /* Function Body */ - *info = 0; - - if (*k < 0) { - *info = -1; - } else if (*kstart < 1 || *kstart > max(1,*k)) { - *info = -2; - } else if (max(1,*kstop) < *kstart || *kstop > max(1,*k)) { - *info = -3; - } else if (*n < *k) { - *info = -4; - } else if (*ldq < max(1,*k)) { - *info = -7; - } else if (*lds < max(1,*k)) { - *info = -12; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLAED9", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*k == 0) { - return 0; - } - -/* - Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can - be computed with high relative accuracy (barring over/underflow). - This is a problem on machines without a guard digit in - add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). - The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), - which on any of these machines zeros out the bottommost - bit of DLAMDA(I) if it is 1; this makes the subsequent - subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation - occurs. On binary machines with a guard digit (almost all - machines) it does not change DLAMDA(I) at all. On hexadecimal - and decimal machines with a guard digit, it slightly - changes the bottommost bits of DLAMDA(I). It does not account - for hexadecimal or decimal machines without guard digits - (we know of none). We use a subroutine call to compute - 2*DLAMBDA(I) to prevent optimizing compilers from eliminating - this code. -*/ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__]; -/* L10: */ - } - - i__1 = *kstop; - for (j = *kstart; j <= i__1; ++j) { - dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j], - info); - -/* If the zero finder fails, the computation is terminated. */ - - if (*info != 0) { - goto L120; - } -/* L20: */ - } - - if (*k == 1 || *k == 2) { - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = *k; - for (j = 1; j <= i__2; ++j) { - s[j + i__ * s_dim1] = q[j + i__ * q_dim1]; -/* L30: */ - } -/* L40: */ - } - goto L120; - } - -/* Compute updated W. */ - - dcopy_(k, &w[1], &c__1, &s[s_offset], &c__1); - -/* Initialize W(I) = Q(I,I) */ - - i__1 = *ldq + 1; - dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1); - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); -/* L50: */ - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); -/* L60: */ - } -/* L70: */ - } - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - d__1 = sqrt(-w[i__]); - w[i__] = d_sign(&d__1, &s[i__ + s_dim1]); -/* L80: */ - } - -/* Compute eigenvectors of the modified rank-1 modification. */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *k; - for (i__ = 1; i__ <= i__2; ++i__) { - q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1]; -/* L90: */ - } - temp = dnrm2_(k, &q[j * q_dim1 + 1], &c__1); - i__2 = *k; - for (i__ = 1; i__ <= i__2; ++i__) { - s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp; -/* L100: */ - } -/* L110: */ - } - -L120: - return 0; - -/* End of DLAED9 */ - -} /* dlaed9_ */ - -/* Subroutine */ int dlaeda_(integer *n, integer *tlvls, integer *curlvl, - integer *curpbm, integer *prmptr, integer *perm, integer *givptr, - integer *givcol, doublereal *givnum, doublereal *q, integer *qptr, - doublereal *z__, doublereal *ztemp, integer *info) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - - /* Builtin functions */ - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - - /* Local variables */ - static integer i__, k, mid, ptr; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *); - static integer curr, bsiz1, bsiz2, psiz1, psiz2, zptr1; - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *), dcopy_(integer *, - doublereal *, integer *, doublereal *, integer *), xerbla_(char *, - integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - DLAEDA computes the Z vector corresponding to the merge step in the - CURLVLth step of the merge process with TLVLS steps for the CURPBMth - problem. - - Arguments - ========= - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - TLVLS (input) INTEGER - The total number of merging levels in the overall divide and - conquer tree. - - CURLVL (input) INTEGER - The current level in the overall merge routine, - 0 <= curlvl <= tlvls. - - CURPBM (input) INTEGER - The current problem in the current level in the overall - merge routine (counting from upper left to lower right). - - PRMPTR (input) INTEGER array, dimension (N lg N) - Contains a list of pointers which indicate where in PERM a - level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) - indicates the size of the permutation and incidentally the - size of the full, non-deflated problem. - - PERM (input) INTEGER array, dimension (N lg N) - Contains the permutations (from deflation and sorting) to be - applied to each eigenblock. - - GIVPTR (input) INTEGER array, dimension (N lg N) - Contains a list of pointers which indicate where in GIVCOL a - level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) - indicates the number of Givens rotations. - - GIVCOL (input) INTEGER array, dimension (2, N lg N) - Each pair of numbers indicates a pair of columns to take place - in a Givens rotation. - - GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) - Each number indicates the S value to be used in the - corresponding Givens rotation. - - Q (input) DOUBLE PRECISION array, dimension (N**2) - Contains the square eigenblocks from previous levels, the - starting positions for blocks are given by QPTR. - - QPTR (input) INTEGER array, dimension (N+2) - Contains a list of pointers which indicate where in Q an - eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates - the size of the block. - - Z (output) DOUBLE PRECISION array, dimension (N) - On output this vector contains the updating vector (the last - row of the first sub-eigenvector matrix and the first row of - the second sub-eigenvector matrix). - - ZTEMP (workspace) DOUBLE PRECISION array, dimension (N) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --ztemp; - --z__; - --qptr; - --q; - givnum -= 3; - givcol -= 3; - --givptr; - --perm; - --prmptr; - - /* Function Body */ - *info = 0; - - if (*n < 0) { - *info = -1; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLAEDA", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* Determine location of first number in second half. */ - - mid = *n / 2 + 1; - -/* Gather last/first rows of appropriate eigenblocks into center of Z */ - - ptr = 1; - -/* - Determine location of lowest level subproblem in the full storage - scheme -*/ - - i__1 = *curlvl - 1; - curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1; - -/* - Determine size of these matrices. We add HALF to the value of - the SQRT in case the machine underestimates one of these square - roots. -*/ - - bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5); - bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])) + - .5); - i__1 = mid - bsiz1 - 1; - for (k = 1; k <= i__1; ++k) { - z__[k] = 0.; -/* L10: */ - } - dcopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], & - c__1); - dcopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1); - i__1 = *n; - for (k = mid + bsiz2; k <= i__1; ++k) { - z__[k] = 0.; -/* L20: */ - } - -/* - Loop thru remaining levels 1 -> CURLVL applying the Givens - rotations and permutation and then multiplying the center matrices - against the current Z. -*/ - - ptr = pow_ii(&c__2, tlvls) + 1; - i__1 = *curlvl - 1; - for (k = 1; k <= i__1; ++k) { - i__2 = *curlvl - k; - i__3 = *curlvl - k - 1; - curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) - - 1; - psiz1 = prmptr[curr + 1] - prmptr[curr]; - psiz2 = prmptr[curr + 2] - prmptr[curr + 1]; - zptr1 = mid - psiz1; - -/* Apply Givens at CURR and CURR+1 */ - - i__2 = givptr[curr + 1] - 1; - for (i__ = givptr[curr]; i__ <= i__2; ++i__) { - drot_(&c__1, &z__[zptr1 + givcol[((i__) << (1)) + 1] - 1], &c__1, - &z__[zptr1 + givcol[((i__) << (1)) + 2] - 1], &c__1, & - givnum[((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]); -/* L30: */ - } - i__2 = givptr[curr + 2] - 1; - for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) { - drot_(&c__1, &z__[mid - 1 + givcol[((i__) << (1)) + 1]], &c__1, & - z__[mid - 1 + givcol[((i__) << (1)) + 2]], &c__1, &givnum[ - ((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]); -/* L40: */ - } - psiz1 = prmptr[curr + 1] - prmptr[curr]; - psiz2 = prmptr[curr + 2] - prmptr[curr + 1]; - i__2 = psiz1 - 1; - for (i__ = 0; i__ <= i__2; ++i__) { - ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1]; -/* L50: */ - } - i__2 = psiz2 - 1; - for (i__ = 0; i__ <= i__2; ++i__) { - ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] - - 1]; -/* L60: */ - } - -/* - Multiply Blocks at CURR and CURR+1 - - Determine size of these matrices. We add HALF to the value of - the SQRT in case the machine underestimates one of these - square roots. -*/ - - bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + - .5); - bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1]) - ) + .5); - if (bsiz1 > 0) { - dgemv_("T", &bsiz1, &bsiz1, &c_b15, &q[qptr[curr]], &bsiz1, & - ztemp[1], &c__1, &c_b29, &z__[zptr1], &c__1); - } - i__2 = psiz1 - bsiz1; - dcopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1); - if (bsiz2 > 0) { - dgemv_("T", &bsiz2, &bsiz2, &c_b15, &q[qptr[curr + 1]], &bsiz2, & - ztemp[psiz1 + 1], &c__1, &c_b29, &z__[mid], &c__1); - } - i__2 = psiz2 - bsiz2; - dcopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], & - c__1); - - i__2 = *tlvls - k; - ptr += pow_ii(&c__2, &i__2); -/* L70: */ - } - - return 0; - -/* End of DLAEDA */ - -} /* dlaeda_ */ - -/* Subroutine */ int dlaev2_(doublereal *a, doublereal *b, doublereal *c__, - doublereal *rt1, doublereal *rt2, doublereal *cs1, doublereal *sn1) -{ - /* System generated locals */ - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal ab, df, cs, ct, tb, sm, tn, rt, adf, acs; - static integer sgn1, sgn2; - static doublereal acmn, acmx; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix - [ A B ] - [ B C ]. - On return, RT1 is the eigenvalue of larger absolute value, RT2 is the - eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right - eigenvector for RT1, giving the decomposition - - [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ] - [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ]. - - Arguments - ========= - - A (input) DOUBLE PRECISION - The (1,1) element of the 2-by-2 matrix. - - B (input) DOUBLE PRECISION - The (1,2) element and the conjugate of the (2,1) element of - the 2-by-2 matrix. - - C (input) DOUBLE PRECISION - The (2,2) element of the 2-by-2 matrix. - - RT1 (output) DOUBLE PRECISION - The eigenvalue of larger absolute value. - - RT2 (output) DOUBLE PRECISION - The eigenvalue of smaller absolute value. - - CS1 (output) DOUBLE PRECISION - SN1 (output) DOUBLE PRECISION - The vector (CS1, SN1) is a unit right eigenvector for RT1. - - Further Details - =============== - - RT1 is accurate to a few ulps barring over/underflow. - - RT2 may be inaccurate if there is massive cancellation in the - determinant A*C-B*B; higher precision or correctly rounded or - correctly truncated arithmetic would be needed to compute RT2 - accurately in all cases. - - CS1 and SN1 are accurate to a few ulps barring over/underflow. - - Overflow is possible only if RT1 is within a factor of 5 of overflow. - Underflow is harmless if the input data is 0 or exceeds - underflow_threshold / macheps. - - ===================================================================== - - - Compute the eigenvalues -*/ - - sm = *a + *c__; - df = *a - *c__; - adf = abs(df); - tb = *b + *b; - ab = abs(tb); - if (abs(*a) > abs(*c__)) { - acmx = *a; - acmn = *c__; - } else { - acmx = *c__; - acmn = *a; - } - if (adf > ab) { -/* Computing 2nd power */ - d__1 = ab / adf; - rt = adf * sqrt(d__1 * d__1 + 1.); - } else if (adf < ab) { -/* Computing 2nd power */ - d__1 = adf / ab; - rt = ab * sqrt(d__1 * d__1 + 1.); - } else { - -/* Includes case AB=ADF=0 */ - - rt = ab * sqrt(2.); - } - if (sm < 0.) { - *rt1 = (sm - rt) * .5; - sgn1 = -1; - -/* - Order of execution important. - To get fully accurate smaller eigenvalue, - next line needs to be executed in higher precision. -*/ - - *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; - } else if (sm > 0.) { - *rt1 = (sm + rt) * .5; - sgn1 = 1; - -/* - Order of execution important. - To get fully accurate smaller eigenvalue, - next line needs to be executed in higher precision. -*/ - - *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; - } else { - -/* Includes case RT1 = RT2 = 0 */ - - *rt1 = rt * .5; - *rt2 = rt * -.5; - sgn1 = 1; - } - -/* Compute the eigenvector */ - - if (df >= 0.) { - cs = df + rt; - sgn2 = 1; - } else { - cs = df - rt; - sgn2 = -1; - } - acs = abs(cs); - if (acs > ab) { - ct = -tb / cs; - *sn1 = 1. / sqrt(ct * ct + 1.); - *cs1 = ct * *sn1; - } else { - if (ab == 0.) { - *cs1 = 1.; - *sn1 = 0.; - } else { - tn = -cs / tb; - *cs1 = 1. / sqrt(tn * tn + 1.); - *sn1 = tn * *cs1; - } - } - if (sgn1 == sgn2) { - tn = *cs1; - *cs1 = -(*sn1); - *sn1 = tn; - } - return 0; - -/* End of DLAEV2 */ - -} /* dlaev2_ */ - -/* Subroutine */ int dlahqr_(logical *wantt, logical *wantz, integer *n, - integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal - *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__, - integer *ldz, integer *info) -{ - /* System generated locals */ - integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static integer i__, j, k, l, m; - static doublereal s, v[3]; - static integer i1, i2; - static doublereal t1, t2, t3, v1, v2, v3, h00, h10, h11, h12, h21, h22, - h33, h44; - static integer nh; - static doublereal cs; - static integer nr; - static doublereal sn; - static integer nz; - static doublereal ave, h33s, h44s; - static integer itn, its; - static doublereal ulp, sum, tst1, h43h34, disc, unfl, ovfl; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *); - static doublereal work[1]; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *), dlanv2_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *), dlabad_( - doublereal *, doublereal *); - - extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, - integer *, doublereal *); - extern doublereal dlanhs_(char *, integer *, doublereal *, integer *, - doublereal *); - static doublereal smlnum; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLAHQR is an auxiliary routine called by DHSEQR to update the - eigenvalues and Schur decomposition already computed by DHSEQR, by - dealing with the Hessenberg submatrix in rows and columns ILO to IHI. - - Arguments - ========= - - WANTT (input) LOGICAL - = .TRUE. : the full Schur form T is required; - = .FALSE.: only eigenvalues are required. - - WANTZ (input) LOGICAL - = .TRUE. : the matrix of Schur vectors Z is required; - = .FALSE.: Schur vectors are not required. - - N (input) INTEGER - The order of the matrix H. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - It is assumed that H is already upper quasi-triangular in - rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless - ILO = 1). DLAHQR works primarily with the Hessenberg - submatrix in rows and columns ILO to IHI, but applies - transformations to all of H if WANTT is .TRUE.. - 1 <= ILO <= max(1,IHI); IHI <= N. - - H (input/output) DOUBLE PRECISION array, dimension (LDH,N) - On entry, the upper Hessenberg matrix H. - On exit, if WANTT is .TRUE., H is upper quasi-triangular in - rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in - standard form. If WANTT is .FALSE., the contents of H are - unspecified on exit. - - LDH (input) INTEGER - The leading dimension of the array H. LDH >= max(1,N). - - WR (output) DOUBLE PRECISION array, dimension (N) - WI (output) DOUBLE PRECISION array, dimension (N) - The real and imaginary parts, respectively, of the computed - eigenvalues ILO to IHI are stored in the corresponding - elements of WR and WI. If two eigenvalues are computed as a - complex conjugate pair, they are stored in consecutive - elements of WR and WI, say the i-th and (i+1)th, with - WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the - eigenvalues are stored in the same order as on the diagonal - of the Schur form returned in H, with WR(i) = H(i,i), and, if - H(i:i+1,i:i+1) is a 2-by-2 diagonal block, - WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). - - ILOZ (input) INTEGER - IHIZ (input) INTEGER - Specify the rows of Z to which transformations must be - applied if WANTZ is .TRUE.. - 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. - - Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) - If WANTZ is .TRUE., on entry Z must contain the current - matrix Z of transformations accumulated by DHSEQR, and on - exit Z has been updated; transformations are applied only to - the submatrix Z(ILOZ:IHIZ,ILO:IHI). - If WANTZ is .FALSE., Z is not referenced. - - LDZ (input) INTEGER - The leading dimension of the array Z. LDZ >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - > 0: DLAHQR failed to compute all the eigenvalues ILO to IHI - in a total of 30*(IHI-ILO+1) iterations; if INFO = i, - elements i+1:ihi of WR and WI contain those eigenvalues - which have been successfully computed. - - Further Details - =============== - - 2-96 Based on modifications by - David Day, Sandia National Laboratory, USA - - ===================================================================== -*/ - - - /* Parameter adjustments */ - h_dim1 = *ldh; - h_offset = 1 + h_dim1 * 1; - h__ -= h_offset; - --wr; - --wi; - z_dim1 = *ldz; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - - /* Function Body */ - *info = 0; - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - if (*ilo == *ihi) { - wr[*ilo] = h__[*ilo + *ilo * h_dim1]; - wi[*ilo] = 0.; - return 0; - } - - nh = *ihi - *ilo + 1; - nz = *ihiz - *iloz + 1; - -/* - Set machine-dependent constants for the stopping criterion. - If norm(H) <= sqrt(OVFL), overflow should not occur. -*/ - - unfl = SAFEMINIMUM; - ovfl = 1. / unfl; - dlabad_(&unfl, &ovfl); - ulp = PRECISION; - smlnum = unfl * (nh / ulp); - -/* - I1 and I2 are the indices of the first row and last column of H - to which transformations must be applied. If eigenvalues only are - being computed, I1 and I2 are set inside the main loop. -*/ - - if (*wantt) { - i1 = 1; - i2 = *n; - } - -/* ITN is the total number of QR iterations allowed. */ - - itn = nh * 30; - -/* - The main loop begins here. I is the loop index and decreases from - IHI to ILO in steps of 1 or 2. Each iteration of the loop works - with the active submatrix in rows and columns L to I. - Eigenvalues I+1 to IHI have already converged. Either L = ILO or - H(L,L-1) is negligible so that the matrix splits. -*/ - - i__ = *ihi; -L10: - l = *ilo; - if (i__ < *ilo) { - goto L150; - } - -/* - Perform QR iterations on rows and columns ILO to I until a - submatrix of order 1 or 2 splits off at the bottom because a - subdiagonal element has become negligible. -*/ - - i__1 = itn; - for (its = 0; its <= i__1; ++its) { - -/* Look for a single small subdiagonal element. */ - - i__2 = l + 1; - for (k = i__; k >= i__2; --k) { - tst1 = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 = - h__[k + k * h_dim1], abs(d__2)); - if (tst1 == 0.) { - i__3 = i__ - l + 1; - tst1 = dlanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, work); - } -/* Computing MAX */ - d__2 = ulp * tst1; - if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= max(d__2, - smlnum)) { - goto L30; - } -/* L20: */ - } -L30: - l = k; - if (l > *ilo) { - -/* H(L,L-1) is negligible */ - - h__[l + (l - 1) * h_dim1] = 0.; - } - -/* Exit from loop if a submatrix of order 1 or 2 has split off. */ - - if (l >= i__ - 1) { - goto L140; - } - -/* - Now the active submatrix is in rows and columns L to I. If - eigenvalues only are being computed, only the active submatrix - need be transformed. -*/ - - if (! (*wantt)) { - i1 = l; - i2 = i__; - } - - if (its == 10 || its == 20) { - -/* Exceptional shift. */ - - s = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 = - h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2)); - h44 = s * .75 + h__[i__ + i__ * h_dim1]; - h33 = h44; - h43h34 = s * -.4375 * s; - } else { - -/* - Prepare to use Francis' double shift - (i.e. 2nd degree generalized Rayleigh quotient) -*/ - - h44 = h__[i__ + i__ * h_dim1]; - h33 = h__[i__ - 1 + (i__ - 1) * h_dim1]; - h43h34 = h__[i__ + (i__ - 1) * h_dim1] * h__[i__ - 1 + i__ * - h_dim1]; - s = h__[i__ - 1 + (i__ - 2) * h_dim1] * h__[i__ - 1 + (i__ - 2) * - h_dim1]; - disc = (h33 - h44) * .5; - disc = disc * disc + h43h34; - if (disc > 0.) { - -/* Real roots: use Wilkinson's shift twice */ - - disc = sqrt(disc); - ave = (h33 + h44) * .5; - if (abs(h33) - abs(h44) > 0.) { - h33 = h33 * h44 - h43h34; - h44 = h33 / (d_sign(&disc, &ave) + ave); - } else { - h44 = d_sign(&disc, &ave) + ave; - } - h33 = h44; - h43h34 = 0.; - } - } - -/* Look for two consecutive small subdiagonal elements. */ - - i__2 = l; - for (m = i__ - 2; m >= i__2; --m) { -/* - Determine the effect of starting the double-shift QR - iteration at row M, and see if this would make H(M,M-1) - negligible. -*/ - - h11 = h__[m + m * h_dim1]; - h22 = h__[m + 1 + (m + 1) * h_dim1]; - h21 = h__[m + 1 + m * h_dim1]; - h12 = h__[m + (m + 1) * h_dim1]; - h44s = h44 - h11; - h33s = h33 - h11; - v1 = (h33s * h44s - h43h34) / h21 + h12; - v2 = h22 - h11 - h33s - h44s; - v3 = h__[m + 2 + (m + 1) * h_dim1]; - s = abs(v1) + abs(v2) + abs(v3); - v1 /= s; - v2 /= s; - v3 /= s; - v[0] = v1; - v[1] = v2; - v[2] = v3; - if (m == l) { - goto L50; - } - h00 = h__[m - 1 + (m - 1) * h_dim1]; - h10 = h__[m + (m - 1) * h_dim1]; - tst1 = abs(v1) * (abs(h00) + abs(h11) + abs(h22)); - if (abs(h10) * (abs(v2) + abs(v3)) <= ulp * tst1) { - goto L50; - } -/* L40: */ - } -L50: - -/* Double-shift QR step */ - - i__2 = i__ - 1; - for (k = m; k <= i__2; ++k) { - -/* - The first iteration of this loop determines a reflection G - from the vector V and applies it from left and right to H, - thus creating a nonzero bulge below the subdiagonal. - - Each subsequent iteration determines a reflection G to - restore the Hessenberg form in the (K-1)th column, and thus - chases the bulge one step toward the bottom of the active - submatrix. NR is the order of G. - - Computing MIN -*/ - i__3 = 3, i__4 = i__ - k + 1; - nr = min(i__3,i__4); - if (k > m) { - dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1); - } - dlarfg_(&nr, v, &v[1], &c__1, &t1); - if (k > m) { - h__[k + (k - 1) * h_dim1] = v[0]; - h__[k + 1 + (k - 1) * h_dim1] = 0.; - if (k < i__ - 1) { - h__[k + 2 + (k - 1) * h_dim1] = 0.; - } - } else if (m > l) { - h__[k + (k - 1) * h_dim1] = -h__[k + (k - 1) * h_dim1]; - } - v2 = v[1]; - t2 = t1 * v2; - if (nr == 3) { - v3 = v[2]; - t3 = t1 * v3; - -/* - Apply G from the left to transform the rows of the matrix - in columns K to I2. -*/ - - i__3 = i2; - for (j = k; j <= i__3; ++j) { - sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1] - + v3 * h__[k + 2 + j * h_dim1]; - h__[k + j * h_dim1] -= sum * t1; - h__[k + 1 + j * h_dim1] -= sum * t2; - h__[k + 2 + j * h_dim1] -= sum * t3; -/* L60: */ - } - -/* - Apply G from the right to transform the columns of the - matrix in rows I1 to min(K+3,I). - - Computing MIN -*/ - i__4 = k + 3; - i__3 = min(i__4,i__); - for (j = i1; j <= i__3; ++j) { - sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1] - + v3 * h__[j + (k + 2) * h_dim1]; - h__[j + k * h_dim1] -= sum * t1; - h__[j + (k + 1) * h_dim1] -= sum * t2; - h__[j + (k + 2) * h_dim1] -= sum * t3; -/* L70: */ - } - - if (*wantz) { - -/* Accumulate transformations in the matrix Z */ - - i__3 = *ihiz; - for (j = *iloz; j <= i__3; ++j) { - sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) * - z_dim1] + v3 * z__[j + (k + 2) * z_dim1]; - z__[j + k * z_dim1] -= sum * t1; - z__[j + (k + 1) * z_dim1] -= sum * t2; - z__[j + (k + 2) * z_dim1] -= sum * t3; -/* L80: */ - } - } - } else if (nr == 2) { - -/* - Apply G from the left to transform the rows of the matrix - in columns K to I2. -*/ - - i__3 = i2; - for (j = k; j <= i__3; ++j) { - sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]; - h__[k + j * h_dim1] -= sum * t1; - h__[k + 1 + j * h_dim1] -= sum * t2; -/* L90: */ - } - -/* - Apply G from the right to transform the columns of the - matrix in rows I1 to min(K+3,I). -*/ - - i__3 = i__; - for (j = i1; j <= i__3; ++j) { - sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1] - ; - h__[j + k * h_dim1] -= sum * t1; - h__[j + (k + 1) * h_dim1] -= sum * t2; -/* L100: */ - } - - if (*wantz) { - -/* Accumulate transformations in the matrix Z */ - - i__3 = *ihiz; - for (j = *iloz; j <= i__3; ++j) { - sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) * - z_dim1]; - z__[j + k * z_dim1] -= sum * t1; - z__[j + (k + 1) * z_dim1] -= sum * t2; -/* L110: */ - } - } - } -/* L120: */ - } - -/* L130: */ - } - -/* Failure to converge in remaining number of iterations */ - - *info = i__; - return 0; - -L140: - - if (l == i__) { - -/* H(I,I-1) is negligible: one eigenvalue has converged. */ - - wr[i__] = h__[i__ + i__ * h_dim1]; - wi[i__] = 0.; - } else if (l == i__ - 1) { - -/* - H(I-1,I-2) is negligible: a pair of eigenvalues have converged. - - Transform the 2-by-2 submatrix to standard Schur form, - and compute and store the eigenvalues. -*/ - - dlanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ * - h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ * - h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs, - &sn); - - if (*wantt) { - -/* Apply the transformation to the rest of H. */ - - if (i2 > i__) { - i__1 = i2 - i__; - drot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[ - i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn); - } - i__1 = i__ - i1 - 1; - drot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ * - h_dim1], &c__1, &cs, &sn); - } - if (*wantz) { - -/* Apply the transformation to Z. */ - - drot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz + - i__ * z_dim1], &c__1, &cs, &sn); - } - } - -/* - Decrement number of remaining iterations, and return to start of - the main loop with new value of I. -*/ - - itn -= its; - i__ = l - 1; - goto L10; - -L150: - return 0; - -/* End of DLAHQR */ - -} /* dlahqr_ */ - -/* Subroutine */ int dlahrd_(integer *n, integer *k, integer *nb, doublereal * - a, integer *lda, doublereal *tau, doublereal *t, integer *ldt, - doublereal *y, integer *ldy) -{ - /* System generated locals */ - integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, - i__3; - doublereal d__1; - - /* Local variables */ - static integer i__; - static doublereal ei; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *), dgemv_(char *, integer *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *), dcopy_(integer *, doublereal *, - integer *, doublereal *, integer *), daxpy_(integer *, doublereal - *, doublereal *, integer *, doublereal *, integer *), dtrmv_(char - *, char *, char *, integer *, doublereal *, integer *, doublereal - *, integer *), dlarfg_(integer *, - doublereal *, doublereal *, integer *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLAHRD reduces the first NB columns of a real general n-by-(n-k+1) - matrix A so that elements below the k-th subdiagonal are zero. The - reduction is performed by an orthogonal similarity transformation - Q' * A * Q. The routine returns the matrices V and T which determine - Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. - - This is an auxiliary routine called by DGEHRD. - - Arguments - ========= - - N (input) INTEGER - The order of the matrix A. - - K (input) INTEGER - The offset for the reduction. Elements below the k-th - subdiagonal in the first NB columns are reduced to zero. - - NB (input) INTEGER - The number of columns to be reduced. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N-K+1) - On entry, the n-by-(n-k+1) general matrix A. - On exit, the elements on and above the k-th subdiagonal in - the first NB columns are overwritten with the corresponding - elements of the reduced matrix; the elements below the k-th - subdiagonal, with the array TAU, represent the matrix Q as a - product of elementary reflectors. The other columns of A are - unchanged. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - TAU (output) DOUBLE PRECISION array, dimension (NB) - The scalar factors of the elementary reflectors. See Further - Details. - - T (output) DOUBLE PRECISION array, dimension (LDT,NB) - The upper triangular matrix T. - - LDT (input) INTEGER - The leading dimension of the array T. LDT >= NB. - - Y (output) DOUBLE PRECISION array, dimension (LDY,NB) - The n-by-nb matrix Y. - - LDY (input) INTEGER - The leading dimension of the array Y. LDY >= N. - - Further Details - =============== - - The matrix Q is represented as a product of nb elementary reflectors - - Q = H(1) H(2) . . . H(nb). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in - A(i+k+1:n,i), and tau in TAU(i). - - The elements of the vectors v together form the (n-k+1)-by-nb matrix - V which is needed, with T and Y, to apply the transformation to the - unreduced part of the matrix, using an update of the form: - A := (I - V*T*V') * (A - Y*V'). - - The contents of A on exit are illustrated by the following example - with n = 7, k = 3 and nb = 2: - - ( a h a a a ) - ( a h a a a ) - ( a h a a a ) - ( h h a a a ) - ( v1 h a a a ) - ( v1 v2 a a a ) - ( v1 v2 a a a ) - - where a denotes an element of the original matrix A, h denotes a - modified element of the upper Hessenberg matrix H, and vi denotes an - element of the vector defining H(i). - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - --tau; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - t_dim1 = *ldt; - t_offset = 1 + t_dim1 * 1; - t -= t_offset; - y_dim1 = *ldy; - y_offset = 1 + y_dim1 * 1; - y -= y_offset; - - /* Function Body */ - if (*n <= 1) { - return 0; - } - - i__1 = *nb; - for (i__ = 1; i__ <= i__1; ++i__) { - if (i__ > 1) { - -/* - Update A(1:n,i) - - Compute i-th column of A - Y * V' -*/ - - i__2 = i__ - 1; - dgemv_("No transpose", n, &i__2, &c_b151, &y[y_offset], ldy, &a[* - k + i__ - 1 + a_dim1], lda, &c_b15, &a[i__ * a_dim1 + 1], - &c__1); - -/* - Apply I - V * T' * V' to this column (call it b) from the - left, using the last column of T as workspace - - Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) - ( V2 ) ( b2 ) - - where V1 is unit lower triangular - - w := V1' * b1 -*/ - - i__2 = i__ - 1; - dcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 + - 1], &c__1); - i__2 = i__ - 1; - dtrmv_("Lower", "Transpose", "Unit", &i__2, &a[*k + 1 + a_dim1], - lda, &t[*nb * t_dim1 + 1], &c__1); - -/* w := w + V2'*b2 */ - - i__2 = *n - *k - i__ + 1; - i__3 = i__ - 1; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1], - lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b15, &t[*nb * - t_dim1 + 1], &c__1); - -/* w := T'*w */ - - i__2 = i__ - 1; - dtrmv_("Upper", "Transpose", "Non-unit", &i__2, &t[t_offset], ldt, - &t[*nb * t_dim1 + 1], &c__1); - -/* b2 := b2 - V2*w */ - - i__2 = *n - *k - i__ + 1; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[*k + i__ + - a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1, &c_b15, &a[*k - + i__ + i__ * a_dim1], &c__1); - -/* b1 := b1 - V1*w */ - - i__2 = i__ - 1; - dtrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1] - , lda, &t[*nb * t_dim1 + 1], &c__1); - i__2 = i__ - 1; - daxpy_(&i__2, &c_b151, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + - i__ * a_dim1], &c__1); - - a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei; - } - -/* - Generate the elementary reflector H(i) to annihilate - A(k+i+1:n,i) -*/ - - i__2 = *n - *k - i__ + 1; -/* Computing MIN */ - i__3 = *k + i__ + 1; - dlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3,*n) + i__ * - a_dim1], &c__1, &tau[i__]); - ei = a[*k + i__ + i__ * a_dim1]; - a[*k + i__ + i__ * a_dim1] = 1.; - -/* Compute Y(1:n,i) */ - - i__2 = *n - *k - i__ + 1; - dgemv_("No transpose", n, &i__2, &c_b15, &a[(i__ + 1) * a_dim1 + 1], - lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ * - y_dim1 + 1], &c__1); - i__2 = *n - *k - i__ + 1; - i__3 = i__ - 1; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1], lda, - &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &t[i__ * t_dim1 + - 1], &c__1); - i__2 = i__ - 1; - dgemv_("No transpose", n, &i__2, &c_b151, &y[y_offset], ldy, &t[i__ * - t_dim1 + 1], &c__1, &c_b15, &y[i__ * y_dim1 + 1], &c__1); - dscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1); - -/* Compute T(1:i,i) */ - - i__2 = i__ - 1; - d__1 = -tau[i__]; - dscal_(&i__2, &d__1, &t[i__ * t_dim1 + 1], &c__1); - i__2 = i__ - 1; - dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt, - &t[i__ * t_dim1 + 1], &c__1) - ; - t[i__ + i__ * t_dim1] = tau[i__]; - -/* L10: */ - } - a[*k + *nb + *nb * a_dim1] = ei; - - return 0; - -/* End of DLAHRD */ - -} /* dlahrd_ */ - -/* Subroutine */ int dlaln2_(logical *ltrans, integer *na, integer *nw, - doublereal *smin, doublereal *ca, doublereal *a, integer *lda, - doublereal *d1, doublereal *d2, doublereal *b, integer *ldb, - doublereal *wr, doublereal *wi, doublereal *x, integer *ldx, - doublereal *scale, doublereal *xnorm, integer *info) -{ - /* Initialized data */ - - static logical zswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ }; - static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ }; - static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2, - 4,3,2,1 }; - - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset; - doublereal d__1, d__2, d__3, d__4, d__5, d__6; - static doublereal equiv_0[4], equiv_1[4]; - - /* Local variables */ - static integer j; -#define ci (equiv_0) -#define cr (equiv_1) - static doublereal bi1, bi2, br1, br2, xi1, xi2, xr1, xr2, ci21, ci22, - cr21, cr22, li21, csi, ui11, lr21, ui12, ui22; -#define civ (equiv_0) - static doublereal csr, ur11, ur12, ur22; -#define crv (equiv_1) - static doublereal bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s, u22abs; - static integer icmax; - static doublereal bnorm, cnorm, smini; - - extern /* Subroutine */ int dladiv_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *); - static doublereal bignum, smlnum; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLALN2 solves a system of the form (ca A - w D ) X = s B - or (ca A' - w D) X = s B with possible scaling ("s") and - perturbation of A. (A' means A-transpose.) - - A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA - real diagonal matrix, w is a real or complex value, and X and B are - NA x 1 matrices -- real if w is real, complex if w is complex. NA - may be 1 or 2. - - If w is complex, X and B are represented as NA x 2 matrices, - the first column of each being the real part and the second - being the imaginary part. - - "s" is a scaling factor (.LE. 1), computed by DLALN2, which is - so chosen that X can be computed without overflow. X is further - scaled if necessary to assure that norm(ca A - w D)*norm(X) is less - than overflow. - - If both singular values of (ca A - w D) are less than SMIN, - SMIN*identity will be used instead of (ca A - w D). If only one - singular value is less than SMIN, one element of (ca A - w D) will be - perturbed enough to make the smallest singular value roughly SMIN. - If both singular values are at least SMIN, (ca A - w D) will not be - perturbed. In any case, the perturbation will be at most some small - multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values - are computed by infinity-norm approximations, and thus will only be - correct to a factor of 2 or so. - - Note: all input quantities are assumed to be smaller than overflow - by a reasonable factor. (See BIGNUM.) - - Arguments - ========== - - LTRANS (input) LOGICAL - =.TRUE.: A-transpose will be used. - =.FALSE.: A will be used (not transposed.) - - NA (input) INTEGER - The size of the matrix A. It may (only) be 1 or 2. - - NW (input) INTEGER - 1 if "w" is real, 2 if "w" is complex. It may only be 1 - or 2. - - SMIN (input) DOUBLE PRECISION - The desired lower bound on the singular values of A. This - should be a safe distance away from underflow or overflow, - say, between (underflow/machine precision) and (machine - precision * overflow ). (See BIGNUM and ULP.) - - CA (input) DOUBLE PRECISION - The coefficient c, which A is multiplied by. - - A (input) DOUBLE PRECISION array, dimension (LDA,NA) - The NA x NA matrix A. - - LDA (input) INTEGER - The leading dimension of A. It must be at least NA. - - D1 (input) DOUBLE PRECISION - The 1,1 element in the diagonal matrix D. - - D2 (input) DOUBLE PRECISION - The 2,2 element in the diagonal matrix D. Not used if NW=1. - - B (input) DOUBLE PRECISION array, dimension (LDB,NW) - The NA x NW matrix B (right-hand side). If NW=2 ("w" is - complex), column 1 contains the real part of B and column 2 - contains the imaginary part. - - LDB (input) INTEGER - The leading dimension of B. It must be at least NA. - - WR (input) DOUBLE PRECISION - The real part of the scalar "w". - - WI (input) DOUBLE PRECISION - The imaginary part of the scalar "w". Not used if NW=1. - - X (output) DOUBLE PRECISION array, dimension (LDX,NW) - The NA x NW matrix X (unknowns), as computed by DLALN2. - If NW=2 ("w" is complex), on exit, column 1 will contain - the real part of X and column 2 will contain the imaginary - part. - - LDX (input) INTEGER - The leading dimension of X. It must be at least NA. - - SCALE (output) DOUBLE PRECISION - The scale factor that B must be multiplied by to insure - that overflow does not occur when computing X. Thus, - (ca A - w D) X will be SCALE*B, not B (ignoring - perturbations of A.) It will be at most 1. - - XNORM (output) DOUBLE PRECISION - The infinity-norm of X, when X is regarded as an NA x NW - real matrix. - - INFO (output) INTEGER - An error flag. It will be set to zero if no error occurs, - a negative number if an argument is in error, or a positive - number if ca A - w D had to be perturbed. - The possible values are: - = 0: No error occurred, and (ca A - w D) did not have to be - perturbed. - = 1: (ca A - w D) had to be perturbed to make its smallest - (or only) singular value greater than SMIN. - NOTE: In the interests of speed, this routine does not - check the inputs for errors. - - ===================================================================== -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - x_dim1 = *ldx; - x_offset = 1 + x_dim1 * 1; - x -= x_offset; - - /* Function Body */ - -/* Compute BIGNUM */ - - smlnum = 2. * SAFEMINIMUM; - bignum = 1. / smlnum; - smini = max(*smin,smlnum); - -/* Don't check for input errors */ - - *info = 0; - -/* Standard Initializations */ - - *scale = 1.; - - if (*na == 1) { - -/* 1 x 1 (i.e., scalar) system C X = B */ - - if (*nw == 1) { - -/* - Real 1x1 system. - - C = ca A - w D -*/ - - csr = *ca * a[a_dim1 + 1] - *wr * *d1; - cnorm = abs(csr); - -/* If | C | < SMINI, use C = SMINI */ - - if (cnorm < smini) { - csr = smini; - cnorm = smini; - *info = 1; - } - -/* Check scaling for X = B / C */ - - bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)); - if ((cnorm < 1. && bnorm > 1.)) { - if (bnorm > bignum * cnorm) { - *scale = 1. / bnorm; - } - } - -/* Compute X */ - - x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr; - *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)); - } else { - -/* - Complex 1x1 system (w is complex) - - C = ca A - w D -*/ - - csr = *ca * a[a_dim1 + 1] - *wr * *d1; - csi = -(*wi) * *d1; - cnorm = abs(csr) + abs(csi); - -/* If | C | < SMINI, use C = SMINI */ - - if (cnorm < smini) { - csr = smini; - csi = 0.; - cnorm = smini; - *info = 1; - } - -/* Check scaling for X = B / C */ - - bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[((b_dim1) << - (1)) + 1], abs(d__2)); - if ((cnorm < 1. && bnorm > 1.)) { - if (bnorm > bignum * cnorm) { - *scale = 1. / bnorm; - } - } - -/* Compute X */ - - d__1 = *scale * b[b_dim1 + 1]; - d__2 = *scale * b[((b_dim1) << (1)) + 1]; - dladiv_(&d__1, &d__2, &csr, &csi, &x[x_dim1 + 1], &x[((x_dim1) << - (1)) + 1]); - *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[((x_dim1) - << (1)) + 1], abs(d__2)); - } - - } else { - -/* - 2x2 System - - Compute the real part of C = ca A - w D (or ca A' - w D ) -*/ - - cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1; - cr[3] = *ca * a[((a_dim1) << (1)) + 2] - *wr * *d2; - if (*ltrans) { - cr[2] = *ca * a[a_dim1 + 2]; - cr[1] = *ca * a[((a_dim1) << (1)) + 1]; - } else { - cr[1] = *ca * a[a_dim1 + 2]; - cr[2] = *ca * a[((a_dim1) << (1)) + 1]; - } - - if (*nw == 1) { - -/* - Real 2x2 system (w is real) - - Find the largest element in C -*/ - - cmax = 0.; - icmax = 0; - - for (j = 1; j <= 4; ++j) { - if ((d__1 = crv[j - 1], abs(d__1)) > cmax) { - cmax = (d__1 = crv[j - 1], abs(d__1)); - icmax = j; - } -/* L10: */ - } - -/* If norm(C) < SMINI, use SMINI*identity. */ - - if (cmax < smini) { -/* Computing MAX */ - d__3 = (d__1 = b[b_dim1 + 1], abs(d__1)), d__4 = (d__2 = b[ - b_dim1 + 2], abs(d__2)); - bnorm = max(d__3,d__4); - if ((smini < 1. && bnorm > 1.)) { - if (bnorm > bignum * smini) { - *scale = 1. / bnorm; - } - } - temp = *scale / smini; - x[x_dim1 + 1] = temp * b[b_dim1 + 1]; - x[x_dim1 + 2] = temp * b[b_dim1 + 2]; - *xnorm = temp * bnorm; - *info = 1; - return 0; - } - -/* Gaussian elimination with complete pivoting. */ - - ur11 = crv[icmax - 1]; - cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1]; - ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1]; - cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1]; - ur11r = 1. / ur11; - lr21 = ur11r * cr21; - ur22 = cr22 - ur12 * lr21; - -/* If smaller pivot < SMINI, use SMINI */ - - if (abs(ur22) < smini) { - ur22 = smini; - *info = 1; - } - if (rswap[icmax - 1]) { - br1 = b[b_dim1 + 2]; - br2 = b[b_dim1 + 1]; - } else { - br1 = b[b_dim1 + 1]; - br2 = b[b_dim1 + 2]; - } - br2 -= lr21 * br1; -/* Computing MAX */ - d__2 = (d__1 = br1 * (ur22 * ur11r), abs(d__1)), d__3 = abs(br2); - bbnd = max(d__2,d__3); - if ((bbnd > 1. && abs(ur22) < 1.)) { - if (bbnd >= bignum * abs(ur22)) { - *scale = 1. / bbnd; - } - } - - xr2 = br2 * *scale / ur22; - xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12); - if (zswap[icmax - 1]) { - x[x_dim1 + 1] = xr2; - x[x_dim1 + 2] = xr1; - } else { - x[x_dim1 + 1] = xr1; - x[x_dim1 + 2] = xr2; - } -/* Computing MAX */ - d__1 = abs(xr1), d__2 = abs(xr2); - *xnorm = max(d__1,d__2); - -/* Further scaling if norm(A) norm(X) > overflow */ - - if ((*xnorm > 1. && cmax > 1.)) { - if (*xnorm > bignum / cmax) { - temp = cmax / bignum; - x[x_dim1 + 1] = temp * x[x_dim1 + 1]; - x[x_dim1 + 2] = temp * x[x_dim1 + 2]; - *xnorm = temp * *xnorm; - *scale = temp * *scale; - } - } - } else { - -/* - Complex 2x2 system (w is complex) - - Find the largest element in C -*/ - - ci[0] = -(*wi) * *d1; - ci[1] = 0.; - ci[2] = 0.; - ci[3] = -(*wi) * *d2; - cmax = 0.; - icmax = 0; - - for (j = 1; j <= 4; ++j) { - if ((d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1], abs( - d__2)) > cmax) { - cmax = (d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1] - , abs(d__2)); - icmax = j; - } -/* L20: */ - } - -/* If norm(C) < SMINI, use SMINI*identity. */ - - if (cmax < smini) { -/* Computing MAX */ - d__5 = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[((b_dim1) - << (1)) + 1], abs(d__2)), d__6 = (d__3 = b[b_dim1 + - 2], abs(d__3)) + (d__4 = b[((b_dim1) << (1)) + 2], - abs(d__4)); - bnorm = max(d__5,d__6); - if ((smini < 1. && bnorm > 1.)) { - if (bnorm > bignum * smini) { - *scale = 1. / bnorm; - } - } - temp = *scale / smini; - x[x_dim1 + 1] = temp * b[b_dim1 + 1]; - x[x_dim1 + 2] = temp * b[b_dim1 + 2]; - x[((x_dim1) << (1)) + 1] = temp * b[((b_dim1) << (1)) + 1]; - x[((x_dim1) << (1)) + 2] = temp * b[((b_dim1) << (1)) + 2]; - *xnorm = temp * bnorm; - *info = 1; - return 0; - } - -/* Gaussian elimination with complete pivoting. */ - - ur11 = crv[icmax - 1]; - ui11 = civ[icmax - 1]; - cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1]; - ci21 = civ[ipivot[((icmax) << (2)) - 3] - 1]; - ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1]; - ui12 = civ[ipivot[((icmax) << (2)) - 2] - 1]; - cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1]; - ci22 = civ[ipivot[((icmax) << (2)) - 1] - 1]; - if (icmax == 1 || icmax == 4) { - -/* Code when off-diagonals of pivoted C are real */ - - if (abs(ur11) > abs(ui11)) { - temp = ui11 / ur11; -/* Computing 2nd power */ - d__1 = temp; - ur11r = 1. / (ur11 * (d__1 * d__1 + 1.)); - ui11r = -temp * ur11r; - } else { - temp = ur11 / ui11; -/* Computing 2nd power */ - d__1 = temp; - ui11r = -1. / (ui11 * (d__1 * d__1 + 1.)); - ur11r = -temp * ui11r; - } - lr21 = cr21 * ur11r; - li21 = cr21 * ui11r; - ur12s = ur12 * ur11r; - ui12s = ur12 * ui11r; - ur22 = cr22 - ur12 * lr21; - ui22 = ci22 - ur12 * li21; - } else { - -/* Code when diagonals of pivoted C are real */ - - ur11r = 1. / ur11; - ui11r = 0.; - lr21 = cr21 * ur11r; - li21 = ci21 * ur11r; - ur12s = ur12 * ur11r; - ui12s = ui12 * ur11r; - ur22 = cr22 - ur12 * lr21 + ui12 * li21; - ui22 = -ur12 * li21 - ui12 * lr21; - } - u22abs = abs(ur22) + abs(ui22); - -/* If smaller pivot < SMINI, use SMINI */ - - if (u22abs < smini) { - ur22 = smini; - ui22 = 0.; - *info = 1; - } - if (rswap[icmax - 1]) { - br2 = b[b_dim1 + 1]; - br1 = b[b_dim1 + 2]; - bi2 = b[((b_dim1) << (1)) + 1]; - bi1 = b[((b_dim1) << (1)) + 2]; - } else { - br1 = b[b_dim1 + 1]; - br2 = b[b_dim1 + 2]; - bi1 = b[((b_dim1) << (1)) + 1]; - bi2 = b[((b_dim1) << (1)) + 2]; - } - br2 = br2 - lr21 * br1 + li21 * bi1; - bi2 = bi2 - li21 * br1 - lr21 * bi1; -/* Computing MAX */ - d__1 = (abs(br1) + abs(bi1)) * (u22abs * (abs(ur11r) + abs(ui11r)) - ), d__2 = abs(br2) + abs(bi2); - bbnd = max(d__1,d__2); - if ((bbnd > 1. && u22abs < 1.)) { - if (bbnd >= bignum * u22abs) { - *scale = 1. / bbnd; - br1 = *scale * br1; - bi1 = *scale * bi1; - br2 = *scale * br2; - bi2 = *scale * bi2; - } - } - - dladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2); - xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2; - xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2; - if (zswap[icmax - 1]) { - x[x_dim1 + 1] = xr2; - x[x_dim1 + 2] = xr1; - x[((x_dim1) << (1)) + 1] = xi2; - x[((x_dim1) << (1)) + 2] = xi1; - } else { - x[x_dim1 + 1] = xr1; - x[x_dim1 + 2] = xr2; - x[((x_dim1) << (1)) + 1] = xi1; - x[((x_dim1) << (1)) + 2] = xi2; - } -/* Computing MAX */ - d__1 = abs(xr1) + abs(xi1), d__2 = abs(xr2) + abs(xi2); - *xnorm = max(d__1,d__2); - -/* Further scaling if norm(A) norm(X) > overflow */ - - if ((*xnorm > 1. && cmax > 1.)) { - if (*xnorm > bignum / cmax) { - temp = cmax / bignum; - x[x_dim1 + 1] = temp * x[x_dim1 + 1]; - x[x_dim1 + 2] = temp * x[x_dim1 + 2]; - x[((x_dim1) << (1)) + 1] = temp * x[((x_dim1) << (1)) + 1] - ; - x[((x_dim1) << (1)) + 2] = temp * x[((x_dim1) << (1)) + 2] - ; - *xnorm = temp * *xnorm; - *scale = temp * *scale; - } - } - } - } - - return 0; - -/* End of DLALN2 */ - -} /* dlaln2_ */ - -#undef crv -#undef civ -#undef cr -#undef ci - - -/* Subroutine */ int dlals0_(integer *icompq, integer *nl, integer *nr, - integer *sqre, integer *nrhs, doublereal *b, integer *ldb, doublereal - *bx, integer *ldbx, integer *perm, integer *givptr, integer *givcol, - integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal * - poles, doublereal *difl, doublereal *difr, doublereal *z__, integer * - k, doublereal *c__, doublereal *s, doublereal *work, integer *info) -{ - /* System generated locals */ - integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset, - difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1, - poles_offset, i__1, i__2; - doublereal d__1; - - /* Local variables */ - static integer i__, j, m, n; - static doublereal dj; - static integer nlp1; - static doublereal temp; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *); - extern doublereal dnrm2_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - static doublereal diflj, difrj, dsigj; - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *), dcopy_(integer *, - doublereal *, integer *, doublereal *, integer *); - extern doublereal dlamc3_(doublereal *, doublereal *); - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dlacpy_(char *, integer *, integer - *, doublereal *, integer *, doublereal *, integer *), - xerbla_(char *, integer *); - static doublereal dsigjp; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - December 1, 1999 - - - Purpose - ======= - - DLALS0 applies back the multiplying factors of either the left or the - right singular vector matrix of a diagonal matrix appended by a row - to the right hand side matrix B in solving the least squares problem - using the divide-and-conquer SVD approach. - - For the left singular vector matrix, three types of orthogonal - matrices are involved: - - (1L) Givens rotations: the number of such rotations is GIVPTR; the - pairs of columns/rows they were applied to are stored in GIVCOL; - and the C- and S-values of these rotations are stored in GIVNUM. - - (2L) Permutation. The (NL+1)-st row of B is to be moved to the first - row, and for J=2:N, PERM(J)-th row of B is to be moved to the - J-th row. - - (3L) The left singular vector matrix of the remaining matrix. - - For the right singular vector matrix, four types of orthogonal - matrices are involved: - - (1R) The right singular vector matrix of the remaining matrix. - - (2R) If SQRE = 1, one extra Givens rotation to generate the right - null space. - - (3R) The inverse transformation of (2L). - - (4R) The inverse transformation of (1L). - - Arguments - ========= - - ICOMPQ (input) INTEGER - Specifies whether singular vectors are to be computed in - factored form: - = 0: Left singular vector matrix. - = 1: Right singular vector matrix. - - NL (input) INTEGER - The row dimension of the upper block. NL >= 1. - - NR (input) INTEGER - The row dimension of the lower block. NR >= 1. - - SQRE (input) INTEGER - = 0: the lower block is an NR-by-NR square matrix. - = 1: the lower block is an NR-by-(NR+1) rectangular matrix. - - The bidiagonal matrix has row dimension N = NL + NR + 1, - and column dimension M = N + SQRE. - - NRHS (input) INTEGER - The number of columns of B and BX. NRHS must be at least 1. - - B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS ) - On input, B contains the right hand sides of the least - squares problem in rows 1 through M. On output, B contains - the solution X in rows 1 through N. - - LDB (input) INTEGER - The leading dimension of B. LDB must be at least - max(1,MAX( M, N ) ). - - BX (workspace) DOUBLE PRECISION array, dimension ( LDBX, NRHS ) - - LDBX (input) INTEGER - The leading dimension of BX. - - PERM (input) INTEGER array, dimension ( N ) - The permutations (from deflation and sorting) applied - to the two blocks. - - GIVPTR (input) INTEGER - The number of Givens rotations which took place in this - subproblem. - - GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) - Each pair of numbers indicates a pair of rows/columns - involved in a Givens rotation. - - LDGCOL (input) INTEGER - The leading dimension of GIVCOL, must be at least N. - - GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) - Each number indicates the C or S value used in the - corresponding Givens rotation. - - LDGNUM (input) INTEGER - The leading dimension of arrays DIFR, POLES and - GIVNUM, must be at least K. - - POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) - On entry, POLES(1:K, 1) contains the new singular - values obtained from solving the secular equation, and - POLES(1:K, 2) is an array containing the poles in the secular - equation. - - DIFL (input) DOUBLE PRECISION array, dimension ( K ). - On entry, DIFL(I) is the distance between I-th updated - (undeflated) singular value and the I-th (undeflated) old - singular value. - - DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ). - On entry, DIFR(I, 1) contains the distances between I-th - updated (undeflated) singular value and the I+1-th - (undeflated) old singular value. And DIFR(I, 2) is the - normalizing factor for the I-th right singular vector. - - Z (input) DOUBLE PRECISION array, dimension ( K ) - Contain the components of the deflation-adjusted updating row - vector. - - K (input) INTEGER - Contains the dimension of the non-deflated matrix, - This is the order of the related secular equation. 1 <= K <=N. - - C (input) DOUBLE PRECISION - C contains garbage if SQRE =0 and the C-value of a Givens - rotation related to the right null space if SQRE = 1. - - S (input) DOUBLE PRECISION - S contains garbage if SQRE =0 and the S-value of a Givens - rotation related to the right null space if SQRE = 1. - - WORK (workspace) DOUBLE PRECISION array, dimension ( K ) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Ming Gu and Ren-Cang Li, Computer Science Division, University of - California at Berkeley, USA - Osni Marques, LBNL/NERSC, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - bx_dim1 = *ldbx; - bx_offset = 1 + bx_dim1 * 1; - bx -= bx_offset; - --perm; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1 * 1; - givcol -= givcol_offset; - difr_dim1 = *ldgnum; - difr_offset = 1 + difr_dim1 * 1; - difr -= difr_offset; - poles_dim1 = *ldgnum; - poles_offset = 1 + poles_dim1 * 1; - poles -= poles_offset; - givnum_dim1 = *ldgnum; - givnum_offset = 1 + givnum_dim1 * 1; - givnum -= givnum_offset; - --difl; - --z__; - --work; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*nl < 1) { - *info = -2; - } else if (*nr < 1) { - *info = -3; - } else if (*sqre < 0 || *sqre > 1) { - *info = -4; - } - - n = *nl + *nr + 1; - - if (*nrhs < 1) { - *info = -5; - } else if (*ldb < n) { - *info = -7; - } else if (*ldbx < n) { - *info = -9; - } else if (*givptr < 0) { - *info = -11; - } else if (*ldgcol < n) { - *info = -13; - } else if (*ldgnum < n) { - *info = -15; - } else if (*k < 1) { - *info = -20; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLALS0", &i__1); - return 0; - } - - m = n + *sqre; - nlp1 = *nl + 1; - - if (*icompq == 0) { - -/* - Apply back orthogonal transformations from the left. - - Step (1L): apply back the Givens rotations performed. -*/ - - i__1 = *givptr; - for (i__ = 1; i__ <= i__1; ++i__) { - drot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1], - ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[ - i__ + ((givnum_dim1) << (1))], &givnum[i__ + givnum_dim1]) - ; -/* L10: */ - } - -/* Step (2L): permute rows of B. */ - - dcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx); - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - dcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1], - ldbx); -/* L20: */ - } - -/* - Step (3L): apply the inverse of the left singular vector - matrix to BX. -*/ - - if (*k == 1) { - dcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb); - if (z__[1] < 0.) { - dscal_(nrhs, &c_b151, &b[b_offset], ldb); - } - } else { - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - diflj = difl[j]; - dj = poles[j + poles_dim1]; - dsigj = -poles[j + ((poles_dim1) << (1))]; - if (j < *k) { - difrj = -difr[j + difr_dim1]; - dsigjp = -poles[j + 1 + ((poles_dim1) << (1))]; - } - if (z__[j] == 0. || poles[j + ((poles_dim1) << (1))] == 0.) { - work[j] = 0.; - } else { - work[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] / - diflj / (poles[j + ((poles_dim1) << (1))] + dj); - } - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))] - == 0.) { - work[i__] = 0.; - } else { - work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[ - i__] / (dlamc3_(&poles[i__ + ((poles_dim1) << - (1))], &dsigj) - diflj) / (poles[i__ + (( - poles_dim1) << (1))] + dj); - } -/* L30: */ - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))] - == 0.) { - work[i__] = 0.; - } else { - work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[ - i__] / (dlamc3_(&poles[i__ + ((poles_dim1) << - (1))], &dsigjp) + difrj) / (poles[i__ + (( - poles_dim1) << (1))] + dj); - } -/* L40: */ - } - work[1] = -1.; - temp = dnrm2_(k, &work[1], &c__1); - dgemv_("T", k, nrhs, &c_b15, &bx[bx_offset], ldbx, &work[1], & - c__1, &c_b29, &b[j + b_dim1], ldb); - dlascl_("G", &c__0, &c__0, &temp, &c_b15, &c__1, nrhs, &b[j + - b_dim1], ldb, info); -/* L50: */ - } - } - -/* Move the deflated rows of BX to B also. */ - - if (*k < max(m,n)) { - i__1 = n - *k; - dlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1 - + b_dim1], ldb); - } - } else { - -/* - Apply back the right orthogonal transformations. - - Step (1R): apply back the new right singular vector matrix - to B. -*/ - - if (*k == 1) { - dcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx); - } else { - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dsigj = poles[j + ((poles_dim1) << (1))]; - if (z__[j] == 0.) { - work[j] = 0.; - } else { - work[j] = -z__[j] / difl[j] / (dsigj + poles[j + - poles_dim1]) / difr[j + ((difr_dim1) << (1))]; - } - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - if (z__[j] == 0.) { - work[i__] = 0.; - } else { - d__1 = -poles[i__ + 1 + ((poles_dim1) << (1))]; - work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[ - i__ + difr_dim1]) / (dsigj + poles[i__ + - poles_dim1]) / difr[i__ + ((difr_dim1) << (1)) - ]; - } -/* L60: */ - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - if (z__[j] == 0.) { - work[i__] = 0.; - } else { - d__1 = -poles[i__ + ((poles_dim1) << (1))]; - work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[ - i__]) / (dsigj + poles[i__ + poles_dim1]) / - difr[i__ + ((difr_dim1) << (1))]; - } -/* L70: */ - } - dgemv_("T", k, nrhs, &c_b15, &b[b_offset], ldb, &work[1], & - c__1, &c_b29, &bx[j + bx_dim1], ldbx); -/* L80: */ - } - } - -/* - Step (2R): if SQRE = 1, apply back the rotation that is - related to the right null space of the subproblem. -*/ - - if (*sqre == 1) { - dcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx); - drot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__, - s); - } - if (*k < max(m,n)) { - i__1 = n - *k; - dlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 + - bx_dim1], ldbx); - } - -/* Step (3R): permute rows of B. */ - - dcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb); - if (*sqre == 1) { - dcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb); - } - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - dcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1], - ldb); -/* L90: */ - } - -/* Step (4R): apply back the Givens rotations performed. */ - - for (i__ = *givptr; i__ >= 1; --i__) { - d__1 = -givnum[i__ + givnum_dim1]; - drot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1], - ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[ - i__ + ((givnum_dim1) << (1))], &d__1); -/* L100: */ - } - } - - return 0; - -/* End of DLALS0 */ - -} /* dlals0_ */ - -/* Subroutine */ int dlalsa_(integer *icompq, integer *smlsiz, integer *n, - integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer * - ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *k, - doublereal *difl, doublereal *difr, doublereal *z__, doublereal * - poles, integer *givptr, integer *givcol, integer *ldgcol, integer * - perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal * - work, integer *iwork, integer *info) -{ - /* System generated locals */ - integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1, - b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1, - difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, - u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1, - i__2; - - /* Builtin functions */ - integer pow_ii(integer *, integer *); - - /* Local variables */ - static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl, - ndb1, nlp1, lvl2, nrp1, nlvl, sqre; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer inode, ndiml, ndimr; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *), dlals0_(integer *, integer *, integer *, - integer *, integer *, doublereal *, integer *, doublereal *, - integer *, integer *, integer *, integer *, integer *, doublereal - *, integer *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *, doublereal *, doublereal *, doublereal *, - integer *), dlasdt_(integer *, integer *, integer *, integer *, - integer *, integer *, integer *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLALSA is an itermediate step in solving the least squares problem - by computing the SVD of the coefficient matrix in compact form (The - singular vectors are computed as products of simple orthorgonal - matrices.). - - If ICOMPQ = 0, DLALSA applies the inverse of the left singular vector - matrix of an upper bidiagonal matrix to the right hand side; and if - ICOMPQ = 1, DLALSA applies the right singular vector matrix to the - right hand side. The singular vector matrices were generated in - compact form by DLALSA. - - Arguments - ========= - - - ICOMPQ (input) INTEGER - Specifies whether the left or the right singular vector - matrix is involved. - = 0: Left singular vector matrix - = 1: Right singular vector matrix - - SMLSIZ (input) INTEGER - The maximum size of the subproblems at the bottom of the - computation tree. - - N (input) INTEGER - The row and column dimensions of the upper bidiagonal matrix. - - NRHS (input) INTEGER - The number of columns of B and BX. NRHS must be at least 1. - - B (input) DOUBLE PRECISION array, dimension ( LDB, NRHS ) - On input, B contains the right hand sides of the least - squares problem in rows 1 through M. On output, B contains - the solution X in rows 1 through N. - - LDB (input) INTEGER - The leading dimension of B in the calling subprogram. - LDB must be at least max(1,MAX( M, N ) ). - - BX (output) DOUBLE PRECISION array, dimension ( LDBX, NRHS ) - On exit, the result of applying the left or right singular - vector matrix to B. - - LDBX (input) INTEGER - The leading dimension of BX. - - U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ). - On entry, U contains the left singular vector matrices of all - subproblems at the bottom level. - - LDU (input) INTEGER, LDU = > N. - The leading dimension of arrays U, VT, DIFL, DIFR, - POLES, GIVNUM, and Z. - - VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ). - On entry, VT' contains the right singular vector matrices of - all subproblems at the bottom level. - - K (input) INTEGER array, dimension ( N ). - - DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). - where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1. - - DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). - On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record - distances between singular values on the I-th level and - singular values on the (I -1)-th level, and DIFR(*, 2 * I) - record the normalizing factors of the right singular vectors - matrices of subproblems on I-th level. - - Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). - On entry, Z(1, I) contains the components of the deflation- - adjusted updating row vector for subproblems on the I-th - level. - - POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). - On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old - singular values involved in the secular equations on the I-th - level. - - GIVPTR (input) INTEGER array, dimension ( N ). - On entry, GIVPTR( I ) records the number of Givens - rotations performed on the I-th problem on the computation - tree. - - GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ). - On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the - locations of Givens rotations performed on the I-th level on - the computation tree. - - LDGCOL (input) INTEGER, LDGCOL = > N. - The leading dimension of arrays GIVCOL and PERM. - - PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ). - On entry, PERM(*, I) records permutations done on the I-th - level of the computation tree. - - GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). - On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S- - values of Givens rotations performed on the I-th level on the - computation tree. - - C (input) DOUBLE PRECISION array, dimension ( N ). - On entry, if the I-th subproblem is not square, - C( I ) contains the C-value of a Givens rotation related to - the right null space of the I-th subproblem. - - S (input) DOUBLE PRECISION array, dimension ( N ). - On entry, if the I-th subproblem is not square, - S( I ) contains the S-value of a Givens rotation related to - the right null space of the I-th subproblem. - - WORK (workspace) DOUBLE PRECISION array. - The dimension must be at least N. - - IWORK (workspace) INTEGER array. - The dimension must be at least 3 * N - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Ming Gu and Ren-Cang Li, Computer Science Division, University of - California at Berkeley, USA - Osni Marques, LBNL/NERSC, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - bx_dim1 = *ldbx; - bx_offset = 1 + bx_dim1 * 1; - bx -= bx_offset; - givnum_dim1 = *ldu; - givnum_offset = 1 + givnum_dim1 * 1; - givnum -= givnum_offset; - poles_dim1 = *ldu; - poles_offset = 1 + poles_dim1 * 1; - poles -= poles_offset; - z_dim1 = *ldu; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - difr_dim1 = *ldu; - difr_offset = 1 + difr_dim1 * 1; - difr -= difr_offset; - difl_dim1 = *ldu; - difl_offset = 1 + difl_dim1 * 1; - difl -= difl_offset; - vt_dim1 = *ldu; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - --k; - --givptr; - perm_dim1 = *ldgcol; - perm_offset = 1 + perm_dim1 * 1; - perm -= perm_offset; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1 * 1; - givcol -= givcol_offset; - --c__; - --s; - --work; - --iwork; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*smlsiz < 3) { - *info = -2; - } else if (*n < *smlsiz) { - *info = -3; - } else if (*nrhs < 1) { - *info = -4; - } else if (*ldb < *n) { - *info = -6; - } else if (*ldbx < *n) { - *info = -8; - } else if (*ldu < *n) { - *info = -10; - } else if (*ldgcol < *n) { - *info = -19; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLALSA", &i__1); - return 0; - } - -/* Book-keeping and setting up the computation tree. */ - - inode = 1; - ndiml = inode + *n; - ndimr = ndiml + *n; - - dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], - smlsiz); - -/* - The following code applies back the left singular vector factors. - For applying back the right singular vector factors, go to 50. -*/ - - if (*icompq == 1) { - goto L50; - } - -/* - The nodes on the bottom level of the tree were solved - by DLASDQ. The corresponding left and right singular vector - matrices are in explicit form. First apply back the left - singular vector matrices. -*/ - - ndb1 = (nd + 1) / 2; - i__1 = nd; - for (i__ = ndb1; i__ <= i__1; ++i__) { - -/* - IC : center row of each node - NL : number of rows of left subproblem - NR : number of rows of right subproblem - NLF: starting row of the left subproblem - NRF: starting row of the right subproblem -*/ - - i1 = i__ - 1; - ic = iwork[inode + i1]; - nl = iwork[ndiml + i1]; - nr = iwork[ndimr + i1]; - nlf = ic - nl; - nrf = ic + 1; - dgemm_("T", "N", &nl, nrhs, &nl, &c_b15, &u[nlf + u_dim1], ldu, &b[ - nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx); - dgemm_("T", "N", &nr, nrhs, &nr, &c_b15, &u[nrf + u_dim1], ldu, &b[ - nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx); -/* L10: */ - } - -/* - Next copy the rows of B that correspond to unchanged rows - in the bidiagonal matrix to BX. -*/ - - i__1 = nd; - for (i__ = 1; i__ <= i__1; ++i__) { - ic = iwork[inode + i__ - 1]; - dcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx); -/* L20: */ - } - -/* - Finally go through the left singular vector matrices of all - the other subproblems bottom-up on the tree. -*/ - - j = pow_ii(&c__2, &nlvl); - sqre = 0; - - for (lvl = nlvl; lvl >= 1; --lvl) { - lvl2 = ((lvl) << (1)) - 1; - -/* - find the first node LF and last node LL on - the current level LVL -*/ - - if (lvl == 1) { - lf = 1; - ll = 1; - } else { - i__1 = lvl - 1; - lf = pow_ii(&c__2, &i__1); - ll = ((lf) << (1)) - 1; - } - i__1 = ll; - for (i__ = lf; i__ <= i__1; ++i__) { - im1 = i__ - 1; - ic = iwork[inode + im1]; - nl = iwork[ndiml + im1]; - nr = iwork[ndimr + im1]; - nlf = ic - nl; - nrf = ic + 1; - --j; - dlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, & - b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], & - givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, & - givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * - poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf + - lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[ - j], &s[j], &work[1], info); -/* L30: */ - } -/* L40: */ - } - goto L90; - -/* ICOMPQ = 1: applying back the right singular vector factors. */ - -L50: - -/* - First now go through the right singular vector matrices of all - the tree nodes top-down. -*/ - - j = 0; - i__1 = nlvl; - for (lvl = 1; lvl <= i__1; ++lvl) { - lvl2 = ((lvl) << (1)) - 1; - -/* - Find the first node LF and last node LL on - the current level LVL. -*/ - - if (lvl == 1) { - lf = 1; - ll = 1; - } else { - i__2 = lvl - 1; - lf = pow_ii(&c__2, &i__2); - ll = ((lf) << (1)) - 1; - } - i__2 = lf; - for (i__ = ll; i__ >= i__2; --i__) { - im1 = i__ - 1; - ic = iwork[inode + im1]; - nl = iwork[ndiml + im1]; - nr = iwork[ndimr + im1]; - nlf = ic - nl; - nrf = ic + 1; - if (i__ == ll) { - sqre = 0; - } else { - sqre = 1; - } - ++j; - dlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[ - nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], & - givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, & - givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * - poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf + - lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[ - j], &s[j], &work[1], info); -/* L60: */ - } -/* L70: */ - } - -/* - The nodes on the bottom level of the tree were solved - by DLASDQ. The corresponding right singular vector - matrices are in explicit form. Apply them back. -*/ - - ndb1 = (nd + 1) / 2; - i__1 = nd; - for (i__ = ndb1; i__ <= i__1; ++i__) { - i1 = i__ - 1; - ic = iwork[inode + i1]; - nl = iwork[ndiml + i1]; - nr = iwork[ndimr + i1]; - nlp1 = nl + 1; - if (i__ == nd) { - nrp1 = nr; - } else { - nrp1 = nr + 1; - } - nlf = ic - nl; - nrf = ic + 1; - dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b15, &vt[nlf + vt_dim1], ldu, - &b[nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx); - dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b15, &vt[nrf + vt_dim1], ldu, - &b[nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx); -/* L80: */ - } - -L90: - - return 0; - -/* End of DLALSA */ - -} /* dlalsa_ */ - -/* Subroutine */ int dlalsd_(char *uplo, integer *smlsiz, integer *n, integer - *nrhs, doublereal *d__, doublereal *e, doublereal *b, integer *ldb, - doublereal *rcond, integer *rank, doublereal *work, integer *iwork, - integer *info) -{ - /* System generated locals */ - integer b_dim1, b_offset, i__1, i__2; - doublereal d__1; - - /* Builtin functions */ - double log(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static integer c__, i__, j, k; - static doublereal r__; - static integer s, u, z__; - static doublereal cs; - static integer bx; - static doublereal sn; - static integer st, vt, nm1, st1; - static doublereal eps; - static integer iwk; - static doublereal tol; - static integer difl, difr, perm, nsub; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *); - static integer nlvl, sqre, bxst; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *), - dcopy_(integer *, doublereal *, integer *, doublereal *, integer - *); - static integer poles, sizei, nsize, nwork, icmpq1, icmpq2; - - extern /* Subroutine */ int dlasda_(integer *, integer *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *, integer *, integer *, integer *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *, - integer *), dlalsa_(integer *, integer *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - doublereal *, doublereal *, integer *, integer *, integer *, - integer *, doublereal *, doublereal *, doublereal *, doublereal *, - integer *, integer *), dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *); - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer - *, integer *, integer *, doublereal *, doublereal *, doublereal *, - integer *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *), dlacpy_(char *, integer *, - integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *), dlaset_(char *, integer *, integer *, - doublereal *, doublereal *, doublereal *, integer *), - xerbla_(char *, integer *); - static integer givcol; - extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); - extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, - integer *); - static doublereal orgnrm; - static integer givnum, givptr, smlszp; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DLALSD uses the singular value decomposition of A to solve the least - squares problem of finding X to minimize the Euclidean norm of each - column of A*X-B, where A is N-by-N upper bidiagonal, and X and B - are N-by-NRHS. The solution X overwrites B. - - The singular values of A smaller than RCOND times the largest - singular value are treated as zero in solving the least squares - problem; in this case a minimum norm solution is returned. - The actual singular values are returned in D in ascending order. - - This code makes very mild assumptions about floating point - arithmetic. It will work on machines with a guard digit in - add/subtract, or on those binary machines without guard digits - which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. - It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - = 'U': D and E define an upper bidiagonal matrix. - = 'L': D and E define a lower bidiagonal matrix. - - SMLSIZ (input) INTEGER - The maximum size of the subproblems at the bottom of the - computation tree. - - N (input) INTEGER - The dimension of the bidiagonal matrix. N >= 0. - - NRHS (input) INTEGER - The number of columns of B. NRHS must be at least 1. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry D contains the main diagonal of the bidiagonal - matrix. On exit, if INFO = 0, D contains its singular values. - - E (input) DOUBLE PRECISION array, dimension (N-1) - Contains the super-diagonal entries of the bidiagonal matrix. - On exit, E has been destroyed. - - B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) - On input, B contains the right hand sides of the least - squares problem. On output, B contains the solution X. - - LDB (input) INTEGER - The leading dimension of B in the calling subprogram. - LDB must be at least max(1,N). - - RCOND (input) DOUBLE PRECISION - The singular values of A less than or equal to RCOND times - the largest singular value are treated as zero in solving - the least squares problem. If RCOND is negative, - machine precision is used instead. - For example, if diag(S)*X=B were the least squares problem, - where diag(S) is a diagonal matrix of singular values, the - solution would be X(i) = B(i) / S(i) if S(i) is greater than - RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to - RCOND*max(S). - - RANK (output) INTEGER - The number of singular values of A greater than RCOND times - the largest singular value. - - WORK (workspace) DOUBLE PRECISION array, dimension at least - (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2), - where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1). - - IWORK (workspace) INTEGER array, dimension at least - (3*N*NLVL + 11*N) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: The algorithm failed to compute an singular value while - working on the submatrix lying in rows and columns - INFO/(N+1) through MOD(INFO,N+1). - - Further Details - =============== - - Based on contributions by - Ming Gu and Ren-Cang Li, Computer Science Division, University of - California at Berkeley, USA - Osni Marques, LBNL/NERSC, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - --work; - --iwork; - - /* Function Body */ - *info = 0; - - if (*n < 0) { - *info = -3; - } else if (*nrhs < 1) { - *info = -4; - } else if (*ldb < 1 || *ldb < *n) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLALSD", &i__1); - return 0; - } - - eps = EPSILON; - -/* Set up the tolerance. */ - - if (*rcond <= 0. || *rcond >= 1.) { - *rcond = eps; - } - - *rank = 0; - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } else if (*n == 1) { - if (d__[1] == 0.) { - dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); - } else { - *rank = 1; - dlascl_("G", &c__0, &c__0, &d__[1], &c_b15, &c__1, nrhs, &b[ - b_offset], ldb, info); - d__[1] = abs(d__[1]); - } - return 0; - } - -/* Rotate the matrix if it is lower bidiagonal. */ - - if (*(unsigned char *)uplo == 'L') { - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); - d__[i__] = r__; - e[i__] = sn * d__[i__ + 1]; - d__[i__ + 1] = cs * d__[i__ + 1]; - if (*nrhs == 1) { - drot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], & - c__1, &cs, &sn); - } else { - work[((i__) << (1)) - 1] = cs; - work[i__ * 2] = sn; - } -/* L10: */ - } - if (*nrhs > 1) { - i__1 = *nrhs; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = *n - 1; - for (j = 1; j <= i__2; ++j) { - cs = work[((j) << (1)) - 1]; - sn = work[j * 2]; - drot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ * - b_dim1], &c__1, &cs, &sn); -/* L20: */ - } -/* L30: */ - } - } - } - -/* Scale. */ - - nm1 = *n - 1; - orgnrm = dlanst_("M", n, &d__[1], &e[1]); - if (orgnrm == 0.) { - dlaset_("A", n, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); - return 0; - } - - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, info); - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, - info); - -/* - If N is smaller than the minimum divide size SMLSIZ, then solve - the problem with another solver. -*/ - - if (*n <= *smlsiz) { - nwork = *n * *n + 1; - dlaset_("A", n, n, &c_b29, &c_b15, &work[1], n); - dlasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, & - work[1], n, &b[b_offset], ldb, &work[nwork], info); - if (*info != 0) { - return 0; - } - tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1)); - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - if (d__[i__] <= tol) { - dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[i__ + b_dim1], - ldb); - } else { - dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &b[ - i__ + b_dim1], ldb, info); - ++(*rank); - } -/* L40: */ - } - dgemm_("T", "N", n, nrhs, n, &c_b15, &work[1], n, &b[b_offset], ldb, & - c_b29, &work[nwork], n); - dlacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb); - -/* Unscale. */ - - dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, - info); - dlasrt_("D", n, &d__[1], info); - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], - ldb, info); - - return 0; - } - -/* Book-keeping and setting up some constants. */ - - nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) / - log(2.)) + 1; - - smlszp = *smlsiz + 1; - - u = 1; - vt = *smlsiz * *n + 1; - difl = vt + smlszp * *n; - difr = difl + nlvl * *n; - z__ = difr + ((nlvl * *n) << (1)); - c__ = z__ + nlvl * *n; - s = c__ + *n; - poles = s + *n; - givnum = poles + ((nlvl) << (1)) * *n; - bx = givnum + ((nlvl) << (1)) * *n; - nwork = bx + *n * *nrhs; - - sizei = *n + 1; - k = sizei + *n; - givptr = k + *n; - perm = givptr + *n; - givcol = perm + nlvl * *n; - iwk = givcol + ((nlvl * *n) << (1)); - - st = 1; - sqre = 0; - icmpq1 = 1; - icmpq2 = 0; - nsub = 0; - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = d__[i__], abs(d__1)) < eps) { - d__[i__] = d_sign(&eps, &d__[i__]); - } -/* L50: */ - } - - i__1 = nm1; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) { - ++nsub; - iwork[nsub] = st; - -/* - Subproblem found. First determine its size and then - apply divide and conquer on it. -*/ - - if (i__ < nm1) { - -/* A subproblem with E(I) small for I < NM1. */ - - nsize = i__ - st + 1; - iwork[sizei + nsub - 1] = nsize; - } else if ((d__1 = e[i__], abs(d__1)) >= eps) { - -/* A subproblem with E(NM1) not too small but I = NM1. */ - - nsize = *n - st + 1; - iwork[sizei + nsub - 1] = nsize; - } else { - -/* - A subproblem with E(NM1) small. This implies an - 1-by-1 subproblem at D(N), which is not solved - explicitly. -*/ - - nsize = i__ - st + 1; - iwork[sizei + nsub - 1] = nsize; - ++nsub; - iwork[nsub] = *n; - iwork[sizei + nsub - 1] = 1; - dcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n); - } - st1 = st - 1; - if (nsize == 1) { - -/* - This is a 1-by-1 subproblem and is not solved - explicitly. -*/ - - dcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n); - } else if (nsize <= *smlsiz) { - -/* This is a small subproblem and is solved by DLASDQ. */ - - dlaset_("A", &nsize, &nsize, &c_b29, &c_b15, &work[vt + st1], - n); - dlasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[ - st], &work[vt + st1], n, &work[nwork], n, &b[st + - b_dim1], ldb, &work[nwork], info); - if (*info != 0) { - return 0; - } - dlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx + - st1], n); - } else { - -/* A large problem. Solve it using divide and conquer. */ - - dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], & - work[u + st1], n, &work[vt + st1], &iwork[k + st1], & - work[difl + st1], &work[difr + st1], &work[z__ + st1], - &work[poles + st1], &iwork[givptr + st1], &iwork[ - givcol + st1], n, &iwork[perm + st1], &work[givnum + - st1], &work[c__ + st1], &work[s + st1], &work[nwork], - &iwork[iwk], info); - if (*info != 0) { - return 0; - } - bxst = bx + st1; - dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, & - work[bxst], n, &work[u + st1], n, &work[vt + st1], & - iwork[k + st1], &work[difl + st1], &work[difr + st1], - &work[z__ + st1], &work[poles + st1], &iwork[givptr + - st1], &iwork[givcol + st1], n, &iwork[perm + st1], & - work[givnum + st1], &work[c__ + st1], &work[s + st1], - &work[nwork], &iwork[iwk], info); - if (*info != 0) { - return 0; - } - } - st = i__ + 1; - } -/* L60: */ - } - -/* Apply the singular values and treat the tiny ones as zero. */ - - tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1)); - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* - Some of the elements in D can be negative because 1-by-1 - subproblems were not solved explicitly. -*/ - - if ((d__1 = d__[i__], abs(d__1)) <= tol) { - dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &work[bx + i__ - 1], n); - } else { - ++(*rank); - dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &work[ - bx + i__ - 1], n, info); - } - d__[i__] = (d__1 = d__[i__], abs(d__1)); -/* L70: */ - } - -/* Now apply back the right singular vectors. */ - - icmpq2 = 1; - i__1 = nsub; - for (i__ = 1; i__ <= i__1; ++i__) { - st = iwork[i__]; - st1 = st - 1; - nsize = iwork[sizei + i__ - 1]; - bxst = bx + st1; - if (nsize == 1) { - dcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb); - } else if (nsize <= *smlsiz) { - dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b15, &work[vt + st1], n, - &work[bxst], n, &c_b29, &b[st + b_dim1], ldb); - } else { - dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st + - b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[ - k + st1], &work[difl + st1], &work[difr + st1], &work[z__ - + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[ - givcol + st1], n, &iwork[perm + st1], &work[givnum + st1], - &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[ - iwk], info); - if (*info != 0) { - return 0; - } - } -/* L80: */ - } - -/* Unscale and sort the singular values. */ - - dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, info); - dlasrt_("D", n, &d__[1], info); - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], ldb, - info); - - return 0; - -/* End of DLALSD */ - -} /* dlalsd_ */ - -/* Subroutine */ int dlamrg_(integer *n1, integer *n2, doublereal *a, integer - *dtrd1, integer *dtrd2, integer *index) -{ - /* System generated locals */ - integer i__1; - - /* Local variables */ - static integer i__, ind1, ind2, n1sv, n2sv; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - DLAMRG will create a permutation list which will merge the elements - of A (which is composed of two independently sorted sets) into a - single set which is sorted in ascending order. - - Arguments - ========= - - N1 (input) INTEGER - N2 (input) INTEGER - These arguements contain the respective lengths of the two - sorted lists to be merged. - - A (input) DOUBLE PRECISION array, dimension (N1+N2) - The first N1 elements of A contain a list of numbers which - are sorted in either ascending or descending order. Likewise - for the final N2 elements. - - DTRD1 (input) INTEGER - DTRD2 (input) INTEGER - These are the strides to be taken through the array A. - Allowable strides are 1 and -1. They indicate whether a - subset of A is sorted in ascending (DTRDx = 1) or descending - (DTRDx = -1) order. - - INDEX (output) INTEGER array, dimension (N1+N2) - On exit this array will contain a permutation such that - if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be - sorted in ascending order. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --index; - --a; - - /* Function Body */ - n1sv = *n1; - n2sv = *n2; - if (*dtrd1 > 0) { - ind1 = 1; - } else { - ind1 = *n1; - } - if (*dtrd2 > 0) { - ind2 = *n1 + 1; - } else { - ind2 = *n1 + *n2; - } - i__ = 1; -/* while ( (N1SV > 0) & (N2SV > 0) ) */ -L10: - if ((n1sv > 0 && n2sv > 0)) { - if (a[ind1] <= a[ind2]) { - index[i__] = ind1; - ++i__; - ind1 += *dtrd1; - --n1sv; - } else { - index[i__] = ind2; - ++i__; - ind2 += *dtrd2; - --n2sv; - } - goto L10; - } -/* end while */ - if (n1sv == 0) { - i__1 = n2sv; - for (n1sv = 1; n1sv <= i__1; ++n1sv) { - index[i__] = ind2; - ++i__; - ind2 += *dtrd2; -/* L20: */ - } - } else { -/* N2SV .EQ. 0 */ - i__1 = n1sv; - for (n2sv = 1; n2sv <= i__1; ++n2sv) { - index[i__] = ind1; - ++i__; - ind1 += *dtrd1; -/* L30: */ - } - } - - return 0; - -/* End of DLAMRG */ - -} /* dlamrg_ */ - -doublereal dlange_(char *norm, integer *m, integer *n, doublereal *a, integer - *lda, doublereal *work) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - doublereal ret_val, d__1, d__2, d__3; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__, j; - static doublereal sum, scale; - extern logical lsame_(char *, char *); - static doublereal value; - extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, - doublereal *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLANGE returns the value of the one norm, or the Frobenius norm, or - the infinity norm, or the element of largest absolute value of a - real matrix A. - - Description - =========== - - DLANGE returns the value - - DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' - ( - ( norm1(A), NORM = '1', 'O' or 'o' - ( - ( normI(A), NORM = 'I' or 'i' - ( - ( normF(A), NORM = 'F', 'f', 'E' or 'e' - - where norm1 denotes the one norm of a matrix (maximum column sum), - normI denotes the infinity norm of a matrix (maximum row sum) and - normF denotes the Frobenius norm of a matrix (square root of sum of - squares). Note that max(abs(A(i,j))) is not a matrix norm. - - Arguments - ========= - - NORM (input) CHARACTER*1 - Specifies the value to be returned in DLANGE as described - above. - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. When M = 0, - DLANGE is set to zero. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. When N = 0, - DLANGE is set to zero. - - A (input) DOUBLE PRECISION array, dimension (LDA,N) - The m by n matrix A. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(M,1). - - WORK (workspace) DOUBLE PRECISION array, dimension (LWORK), - where LWORK >= M when NORM = 'I'; otherwise, WORK is not - referenced. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --work; - - /* Function Body */ - if (min(*m,*n) == 0) { - value = 0.; - } else if (lsame_(norm, "M")) { - -/* Find max(abs(A(i,j))). */ - - value = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { -/* Computing MAX */ - d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1)); - value = max(d__2,d__3); -/* L10: */ - } -/* L20: */ - } - } else if (lsame_(norm, "O") || *(unsigned char *) - norm == '1') { - -/* Find norm1(A). */ - - value = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = 0.; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - sum += (d__1 = a[i__ + j * a_dim1], abs(d__1)); -/* L30: */ - } - value = max(value,sum); -/* L40: */ - } - } else if (lsame_(norm, "I")) { - -/* Find normI(A). */ - - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - work[i__] = 0.; -/* L50: */ - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1)); -/* L60: */ - } -/* L70: */ - } - value = 0.; - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = work[i__]; - value = max(d__1,d__2); -/* L80: */ - } - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - -/* Find normF(A). */ - - scale = 0.; - sum = 1.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - dlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum); -/* L90: */ - } - value = scale * sqrt(sum); - } - - ret_val = value; - return ret_val; - -/* End of DLANGE */ - -} /* dlange_ */ - -doublereal dlanhs_(char *norm, integer *n, doublereal *a, integer *lda, - doublereal *work) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - doublereal ret_val, d__1, d__2, d__3; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__, j; - static doublereal sum, scale; - extern logical lsame_(char *, char *); - static doublereal value; - extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, - doublereal *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLANHS returns the value of the one norm, or the Frobenius norm, or - the infinity norm, or the element of largest absolute value of a - Hessenberg matrix A. - - Description - =========== - - DLANHS returns the value - - DLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm' - ( - ( norm1(A), NORM = '1', 'O' or 'o' - ( - ( normI(A), NORM = 'I' or 'i' - ( - ( normF(A), NORM = 'F', 'f', 'E' or 'e' - - where norm1 denotes the one norm of a matrix (maximum column sum), - normI denotes the infinity norm of a matrix (maximum row sum) and - normF denotes the Frobenius norm of a matrix (square root of sum of - squares). Note that max(abs(A(i,j))) is not a matrix norm. - - Arguments - ========= - - NORM (input) CHARACTER*1 - Specifies the value to be returned in DLANHS as described - above. - - N (input) INTEGER - The order of the matrix A. N >= 0. When N = 0, DLANHS is - set to zero. - - A (input) DOUBLE PRECISION array, dimension (LDA,N) - The n by n upper Hessenberg matrix A; the part of A below the - first sub-diagonal is not referenced. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(N,1). - - WORK (workspace) DOUBLE PRECISION array, dimension (LWORK), - where LWORK >= N when NORM = 'I'; otherwise, WORK is not - referenced. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --work; - - /* Function Body */ - if (*n == 0) { - value = 0.; - } else if (lsame_(norm, "M")) { - -/* Find max(abs(A(i,j))). */ - - value = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = *n, i__4 = j + 1; - i__2 = min(i__3,i__4); - for (i__ = 1; i__ <= i__2; ++i__) { -/* Computing MAX */ - d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1)); - value = max(d__2,d__3); -/* L10: */ - } -/* L20: */ - } - } else if (lsame_(norm, "O") || *(unsigned char *) - norm == '1') { - -/* Find norm1(A). */ - - value = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = 0.; -/* Computing MIN */ - i__3 = *n, i__4 = j + 1; - i__2 = min(i__3,i__4); - for (i__ = 1; i__ <= i__2; ++i__) { - sum += (d__1 = a[i__ + j * a_dim1], abs(d__1)); -/* L30: */ - } - value = max(value,sum); -/* L40: */ - } - } else if (lsame_(norm, "I")) { - -/* Find normI(A). */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - work[i__] = 0.; -/* L50: */ - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = *n, i__4 = j + 1; - i__2 = min(i__3,i__4); - for (i__ = 1; i__ <= i__2; ++i__) { - work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1)); -/* L60: */ - } -/* L70: */ - } - value = 0.; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = work[i__]; - value = max(d__1,d__2); -/* L80: */ - } - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - -/* Find normF(A). */ - - scale = 0.; - sum = 1.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = *n, i__4 = j + 1; - i__2 = min(i__3,i__4); - dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum); -/* L90: */ - } - value = scale * sqrt(sum); - } - - ret_val = value; - return ret_val; - -/* End of DLANHS */ - -} /* dlanhs_ */ - -doublereal dlanst_(char *norm, integer *n, doublereal *d__, doublereal *e) -{ - /* System generated locals */ - integer i__1; - doublereal ret_val, d__1, d__2, d__3, d__4, d__5; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__; - static doublereal sum, scale; - extern logical lsame_(char *, char *); - static doublereal anorm; - extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, - doublereal *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DLANST returns the value of the one norm, or the Frobenius norm, or - the infinity norm, or the element of largest absolute value of a - real symmetric tridiagonal matrix A. - - Description - =========== - - DLANST returns the value - - DLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm' - ( - ( norm1(A), NORM = '1', 'O' or 'o' - ( - ( normI(A), NORM = 'I' or 'i' - ( - ( normF(A), NORM = 'F', 'f', 'E' or 'e' - - where norm1 denotes the one norm of a matrix (maximum column sum), - normI denotes the infinity norm of a matrix (maximum row sum) and - normF denotes the Frobenius norm of a matrix (square root of sum of - squares). Note that max(abs(A(i,j))) is not a matrix norm. - - Arguments - ========= - - NORM (input) CHARACTER*1 - Specifies the value to be returned in DLANST as described - above. - - N (input) INTEGER - The order of the matrix A. N >= 0. When N = 0, DLANST is - set to zero. - - D (input) DOUBLE PRECISION array, dimension (N) - The diagonal elements of A. - - E (input) DOUBLE PRECISION array, dimension (N-1) - The (n-1) sub-diagonal or super-diagonal elements of A. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --e; - --d__; - - /* Function Body */ - if (*n <= 0) { - anorm = 0.; - } else if (lsame_(norm, "M")) { - -/* Find max(abs(A(i,j))). */ - - anorm = (d__1 = d__[*n], abs(d__1)); - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1)); - anorm = max(d__2,d__3); -/* Computing MAX */ - d__2 = anorm, d__3 = (d__1 = e[i__], abs(d__1)); - anorm = max(d__2,d__3); -/* L10: */ - } - } else if (lsame_(norm, "O") || *(unsigned char *) - norm == '1' || lsame_(norm, "I")) { - -/* Find norm1(A). */ - - if (*n == 1) { - anorm = abs(d__[1]); - } else { -/* Computing MAX */ - d__3 = abs(d__[1]) + abs(e[1]), d__4 = (d__1 = e[*n - 1], abs( - d__1)) + (d__2 = d__[*n], abs(d__2)); - anorm = max(d__3,d__4); - i__1 = *n - 1; - for (i__ = 2; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[ - i__], abs(d__2)) + (d__3 = e[i__ - 1], abs(d__3)); - anorm = max(d__4,d__5); -/* L20: */ - } - } - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - -/* Find normF(A). */ - - scale = 0.; - sum = 1.; - if (*n > 1) { - i__1 = *n - 1; - dlassq_(&i__1, &e[1], &c__1, &scale, &sum); - sum *= 2; - } - dlassq_(n, &d__[1], &c__1, &scale, &sum); - anorm = scale * sqrt(sum); - } - - ret_val = anorm; - return ret_val; - -/* End of DLANST */ - -} /* dlanst_ */ - -doublereal dlansy_(char *norm, char *uplo, integer *n, doublereal *a, integer - *lda, doublereal *work) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - doublereal ret_val, d__1, d__2, d__3; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__, j; - static doublereal sum, absa, scale; - extern logical lsame_(char *, char *); - static doublereal value; - extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, - doublereal *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLANSY returns the value of the one norm, or the Frobenius norm, or - the infinity norm, or the element of largest absolute value of a - real symmetric matrix A. - - Description - =========== - - DLANSY returns the value - - DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' - ( - ( norm1(A), NORM = '1', 'O' or 'o' - ( - ( normI(A), NORM = 'I' or 'i' - ( - ( normF(A), NORM = 'F', 'f', 'E' or 'e' - - where norm1 denotes the one norm of a matrix (maximum column sum), - normI denotes the infinity norm of a matrix (maximum row sum) and - normF denotes the Frobenius norm of a matrix (square root of sum of - squares). Note that max(abs(A(i,j))) is not a matrix norm. - - Arguments - ========= - - NORM (input) CHARACTER*1 - Specifies the value to be returned in DLANSY as described - above. - - UPLO (input) CHARACTER*1 - Specifies whether the upper or lower triangular part of the - symmetric matrix A is to be referenced. - = 'U': Upper triangular part of A is referenced - = 'L': Lower triangular part of A is referenced - - N (input) INTEGER - The order of the matrix A. N >= 0. When N = 0, DLANSY is - set to zero. - - A (input) DOUBLE PRECISION array, dimension (LDA,N) - The symmetric matrix A. If UPLO = 'U', the leading n by n - upper triangular part of A contains the upper triangular part - of the matrix A, and the strictly lower triangular part of A - is not referenced. If UPLO = 'L', the leading n by n lower - triangular part of A contains the lower triangular part of - the matrix A, and the strictly upper triangular part of A is - not referenced. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(N,1). - - WORK (workspace) DOUBLE PRECISION array, dimension (LWORK), - where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, - WORK is not referenced. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --work; - - /* Function Body */ - if (*n == 0) { - value = 0.; - } else if (lsame_(norm, "M")) { - -/* Find max(abs(A(i,j))). */ - - value = 0.; - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j; - for (i__ = 1; i__ <= i__2; ++i__) { -/* Computing MAX */ - d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs( - d__1)); - value = max(d__2,d__3); -/* L10: */ - } -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j; i__ <= i__2; ++i__) { -/* Computing MAX */ - d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs( - d__1)); - value = max(d__2,d__3); -/* L30: */ - } -/* L40: */ - } - } - } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') { - -/* Find normI(A) ( = norm1(A), since A is symmetric). */ - - value = 0.; - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = 0.; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - absa = (d__1 = a[i__ + j * a_dim1], abs(d__1)); - sum += absa; - work[i__] += absa; -/* L50: */ - } - work[j] = sum + (d__1 = a[j + j * a_dim1], abs(d__1)); -/* L60: */ - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = work[i__]; - value = max(d__1,d__2); -/* L70: */ - } - } else { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - work[i__] = 0.; -/* L80: */ - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = work[j] + (d__1 = a[j + j * a_dim1], abs(d__1)); - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - absa = (d__1 = a[i__ + j * a_dim1], abs(d__1)); - sum += absa; - work[i__] += absa; -/* L90: */ - } - value = max(value,sum); -/* L100: */ - } - } - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - -/* Find normF(A). */ - - scale = 0.; - sum = 1.; - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 2; j <= i__1; ++j) { - i__2 = j - 1; - dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum); -/* L110: */ - } - } else { - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - i__2 = *n - j; - dlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum); -/* L120: */ - } - } - sum *= 2; - i__1 = *lda + 1; - dlassq_(n, &a[a_offset], &i__1, &scale, &sum); - value = scale * sqrt(sum); - } - - ret_val = value; - return ret_val; - -/* End of DLANSY */ - -} /* dlansy_ */ - -/* Subroutine */ int dlanv2_(doublereal *a, doublereal *b, doublereal *c__, - doublereal *d__, doublereal *rt1r, doublereal *rt1i, doublereal *rt2r, - doublereal *rt2i, doublereal *cs, doublereal *sn) -{ - /* System generated locals */ - doublereal d__1, d__2; - - /* Builtin functions */ - double d_sign(doublereal *, doublereal *), sqrt(doublereal); - - /* Local variables */ - static doublereal p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau, - temp, scale, bcmax, bcmis, sigma; - - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric - matrix in standard form: - - [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ] - [ C D ] [ SN CS ] [ CC DD ] [-SN CS ] - - where either - 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or - 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex - conjugate eigenvalues. - - Arguments - ========= - - A (input/output) DOUBLE PRECISION - B (input/output) DOUBLE PRECISION - C (input/output) DOUBLE PRECISION - D (input/output) DOUBLE PRECISION - On entry, the elements of the input matrix. - On exit, they are overwritten by the elements of the - standardised Schur form. - - RT1R (output) DOUBLE PRECISION - RT1I (output) DOUBLE PRECISION - RT2R (output) DOUBLE PRECISION - RT2I (output) DOUBLE PRECISION - The real and imaginary parts of the eigenvalues. If the - eigenvalues are a complex conjugate pair, RT1I > 0. - - CS (output) DOUBLE PRECISION - SN (output) DOUBLE PRECISION - Parameters of the rotation matrix. - - Further Details - =============== - - Modified by V. Sima, Research Institute for Informatics, Bucharest, - Romania, to reduce the risk of cancellation errors, - when computing real eigenvalues, and to ensure, if possible, that - abs(RT1R) >= abs(RT2R). - - ===================================================================== -*/ - - - eps = PRECISION; - if (*c__ == 0.) { - *cs = 1.; - *sn = 0.; - goto L10; - - } else if (*b == 0.) { - -/* Swap rows and columns */ - - *cs = 0.; - *sn = 1.; - temp = *d__; - *d__ = *a; - *a = temp; - *b = -(*c__); - *c__ = 0.; - goto L10; - } else if ((*a - *d__ == 0. && d_sign(&c_b15, b) != d_sign(&c_b15, c__))) - { - *cs = 1.; - *sn = 0.; - goto L10; - } else { - - temp = *a - *d__; - p = temp * .5; -/* Computing MAX */ - d__1 = abs(*b), d__2 = abs(*c__); - bcmax = max(d__1,d__2); -/* Computing MIN */ - d__1 = abs(*b), d__2 = abs(*c__); - bcmis = min(d__1,d__2) * d_sign(&c_b15, b) * d_sign(&c_b15, c__); -/* Computing MAX */ - d__1 = abs(p); - scale = max(d__1,bcmax); - z__ = p / scale * p + bcmax / scale * bcmis; - -/* - If Z is of the order of the machine accuracy, postpone the - decision on the nature of eigenvalues -*/ - - if (z__ >= eps * 4.) { - -/* Real eigenvalues. Compute A and D. */ - - d__1 = sqrt(scale) * sqrt(z__); - z__ = p + d_sign(&d__1, &p); - *a = *d__ + z__; - *d__ -= bcmax / z__ * bcmis; - -/* Compute B and the rotation matrix */ - - tau = dlapy2_(c__, &z__); - *cs = z__ / tau; - *sn = *c__ / tau; - *b -= *c__; - *c__ = 0.; - } else { - -/* - Complex eigenvalues, or real (almost) equal eigenvalues. - Make diagonal elements equal. -*/ - - sigma = *b + *c__; - tau = dlapy2_(&sigma, &temp); - *cs = sqrt((abs(sigma) / tau + 1.) * .5); - *sn = -(p / (tau * *cs)) * d_sign(&c_b15, &sigma); - -/* - Compute [ AA BB ] = [ A B ] [ CS -SN ] - [ CC DD ] [ C D ] [ SN CS ] -*/ - - aa = *a * *cs + *b * *sn; - bb = -(*a) * *sn + *b * *cs; - cc = *c__ * *cs + *d__ * *sn; - dd = -(*c__) * *sn + *d__ * *cs; - -/* - Compute [ A B ] = [ CS SN ] [ AA BB ] - [ C D ] [-SN CS ] [ CC DD ] -*/ - - *a = aa * *cs + cc * *sn; - *b = bb * *cs + dd * *sn; - *c__ = -aa * *sn + cc * *cs; - *d__ = -bb * *sn + dd * *cs; - - temp = (*a + *d__) * .5; - *a = temp; - *d__ = temp; - - if (*c__ != 0.) { - if (*b != 0.) { - if (d_sign(&c_b15, b) == d_sign(&c_b15, c__)) { - -/* Real eigenvalues: reduce to upper triangular form */ - - sab = sqrt((abs(*b))); - sac = sqrt((abs(*c__))); - d__1 = sab * sac; - p = d_sign(&d__1, c__); - tau = 1. / sqrt((d__1 = *b + *c__, abs(d__1))); - *a = temp + p; - *d__ = temp - p; - *b -= *c__; - *c__ = 0.; - cs1 = sab * tau; - sn1 = sac * tau; - temp = *cs * cs1 - *sn * sn1; - *sn = *cs * sn1 + *sn * cs1; - *cs = temp; - } - } else { - *b = -(*c__); - *c__ = 0.; - temp = *cs; - *cs = -(*sn); - *sn = temp; - } - } - } - - } - -L10: - -/* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */ - - *rt1r = *a; - *rt2r = *d__; - if (*c__ == 0.) { - *rt1i = 0.; - *rt2i = 0.; - } else { - *rt1i = sqrt((abs(*b))) * sqrt((abs(*c__))); - *rt2i = -(*rt1i); - } - return 0; - -/* End of DLANV2 */ - -} /* dlanv2_ */ - -doublereal dlapy2_(doublereal *x, doublereal *y) -{ - /* System generated locals */ - doublereal ret_val, d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal w, z__, xabs, yabs; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary - overflow. - - Arguments - ========= - - X (input) DOUBLE PRECISION - Y (input) DOUBLE PRECISION - X and Y specify the values x and y. - - ===================================================================== -*/ - - - xabs = abs(*x); - yabs = abs(*y); - w = max(xabs,yabs); - z__ = min(xabs,yabs); - if (z__ == 0.) { - ret_val = w; - } else { -/* Computing 2nd power */ - d__1 = z__ / w; - ret_val = w * sqrt(d__1 * d__1 + 1.); - } - return ret_val; - -/* End of DLAPY2 */ - -} /* dlapy2_ */ - -doublereal dlapy3_(doublereal *x, doublereal *y, doublereal *z__) -{ - /* System generated locals */ - doublereal ret_val, d__1, d__2, d__3; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal w, xabs, yabs, zabs; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause - unnecessary overflow. - - Arguments - ========= - - X (input) DOUBLE PRECISION - Y (input) DOUBLE PRECISION - Z (input) DOUBLE PRECISION - X, Y and Z specify the values x, y and z. - - ===================================================================== -*/ - - - xabs = abs(*x); - yabs = abs(*y); - zabs = abs(*z__); -/* Computing MAX */ - d__1 = max(xabs,yabs); - w = max(d__1,zabs); - if (w == 0.) { - ret_val = 0.; - } else { -/* Computing 2nd power */ - d__1 = xabs / w; -/* Computing 2nd power */ - d__2 = yabs / w; -/* Computing 2nd power */ - d__3 = zabs / w; - ret_val = w * sqrt(d__1 * d__1 + d__2 * d__2 + d__3 * d__3); - } - return ret_val; - -/* End of DLAPY3 */ - -} /* dlapy3_ */ - -/* Subroutine */ int dlarf_(char *side, integer *m, integer *n, doublereal *v, - integer *incv, doublereal *tau, doublereal *c__, integer *ldc, - doublereal *work) -{ - /* System generated locals */ - integer c_dim1, c_offset; - doublereal d__1; - - /* Local variables */ - extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DLARF applies a real elementary reflector H to a real m by n matrix - C, from either the left or the right. H is represented in the form - - H = I - tau * v * v' - - where tau is a real scalar and v is a real vector. - - If tau = 0, then H is taken to be the unit matrix. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': form H * C - = 'R': form C * H - - M (input) INTEGER - The number of rows of the matrix C. - - N (input) INTEGER - The number of columns of the matrix C. - - V (input) DOUBLE PRECISION array, dimension - (1 + (M-1)*abs(INCV)) if SIDE = 'L' - or (1 + (N-1)*abs(INCV)) if SIDE = 'R' - The vector v in the representation of H. V is not used if - TAU = 0. - - INCV (input) INTEGER - The increment between elements of v. INCV <> 0. - - TAU (input) DOUBLE PRECISION - The value tau in the representation of H. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the m by n matrix C. - On exit, C is overwritten by the matrix H * C if SIDE = 'L', - or C * H if SIDE = 'R'. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) DOUBLE PRECISION array, dimension - (N) if SIDE = 'L' - or (M) if SIDE = 'R' - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --v; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - if (lsame_(side, "L")) { - -/* Form H * C */ - - if (*tau != 0.) { - -/* w := C' * v */ - - dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], - incv, &c_b29, &work[1], &c__1); - -/* C := C - v * w' */ - - d__1 = -(*tau); - dger_(m, n, &d__1, &v[1], incv, &work[1], &c__1, &c__[c_offset], - ldc); - } - } else { - -/* Form C * H */ - - if (*tau != 0.) { - -/* w := C * v */ - - dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], - incv, &c_b29, &work[1], &c__1); - -/* C := C - w * v' */ - - d__1 = -(*tau); - dger_(m, n, &d__1, &work[1], &c__1, &v[1], incv, &c__[c_offset], - ldc); - } - } - return 0; - -/* End of DLARF */ - -} /* dlarf_ */ - -/* Subroutine */ int dlarfb_(char *side, char *trans, char *direct, char * - storev, integer *m, integer *n, integer *k, doublereal *v, integer * - ldv, doublereal *t, integer *ldt, doublereal *c__, integer *ldc, - doublereal *work, integer *ldwork) -{ - /* System generated locals */ - integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1, - work_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *), dtrmm_(char *, char *, char *, char *, - integer *, integer *, doublereal *, doublereal *, integer *, - doublereal *, integer *); - static char transt[1]; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DLARFB applies a real block reflector H or its transpose H' to a - real m by n matrix C, from either the left or the right. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply H or H' from the Left - = 'R': apply H or H' from the Right - - TRANS (input) CHARACTER*1 - = 'N': apply H (No transpose) - = 'T': apply H' (Transpose) - - DIRECT (input) CHARACTER*1 - Indicates how H is formed from a product of elementary - reflectors - = 'F': H = H(1) H(2) . . . H(k) (Forward) - = 'B': H = H(k) . . . H(2) H(1) (Backward) - - STOREV (input) CHARACTER*1 - Indicates how the vectors which define the elementary - reflectors are stored: - = 'C': Columnwise - = 'R': Rowwise - - M (input) INTEGER - The number of rows of the matrix C. - - N (input) INTEGER - The number of columns of the matrix C. - - K (input) INTEGER - The order of the matrix T (= the number of elementary - reflectors whose product defines the block reflector). - - V (input) DOUBLE PRECISION array, dimension - (LDV,K) if STOREV = 'C' - (LDV,M) if STOREV = 'R' and SIDE = 'L' - (LDV,N) if STOREV = 'R' and SIDE = 'R' - The matrix V. See further details. - - LDV (input) INTEGER - The leading dimension of the array V. - If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); - if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); - if STOREV = 'R', LDV >= K. - - T (input) DOUBLE PRECISION array, dimension (LDT,K) - The triangular k by k matrix T in the representation of the - block reflector. - - LDT (input) INTEGER - The leading dimension of the array T. LDT >= K. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the m by n matrix C. - On exit, C is overwritten by H*C or H'*C or C*H or C*H'. - - LDC (input) INTEGER - The leading dimension of the array C. LDA >= max(1,M). - - WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,K) - - LDWORK (input) INTEGER - The leading dimension of the array WORK. - If SIDE = 'L', LDWORK >= max(1,N); - if SIDE = 'R', LDWORK >= max(1,M). - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - v_dim1 = *ldv; - v_offset = 1 + v_dim1 * 1; - v -= v_offset; - t_dim1 = *ldt; - t_offset = 1 + t_dim1 * 1; - t -= t_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - work_dim1 = *ldwork; - work_offset = 1 + work_dim1 * 1; - work -= work_offset; - - /* Function Body */ - if (*m <= 0 || *n <= 0) { - return 0; - } - - if (lsame_(trans, "N")) { - *(unsigned char *)transt = 'T'; - } else { - *(unsigned char *)transt = 'N'; - } - - if (lsame_(storev, "C")) { - - if (lsame_(direct, "F")) { - -/* - Let V = ( V1 ) (first K rows) - ( V2 ) - where V1 is unit lower triangular. -*/ - - if (lsame_(side, "L")) { - -/* - Form H * C or H' * C where C = ( C1 ) - ( C2 ) - - W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) - - W := C1' -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], - &c__1); -/* L10: */ - } - -/* W := W * V1 */ - - dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15, - &v[v_offset], ldv, &work[work_offset], ldwork); - if (*m > *k) { - -/* W := W + C2'*V2 */ - - i__1 = *m - *k; - dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, & - c__[*k + 1 + c_dim1], ldc, &v[*k + 1 + v_dim1], - ldv, &c_b15, &work[work_offset], ldwork); - } - -/* W := W * T' or W * T */ - - dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - V * W' */ - - if (*m > *k) { - -/* C2 := C2 - V2 * W' */ - - i__1 = *m - *k; - dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151, - &v[*k + 1 + v_dim1], ldv, &work[work_offset], - ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc); - } - -/* W := W * V1' */ - - dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, & - v[v_offset], ldv, &work[work_offset], ldwork); - -/* C1 := C1 - W' */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1]; -/* L20: */ - } -/* L30: */ - } - - } else if (lsame_(side, "R")) { - -/* - Form C * H or C * H' where C = ( C1 C2 ) - - W := C * V = (C1*V1 + C2*V2) (stored in WORK) - - W := C1 -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * - work_dim1 + 1], &c__1); -/* L40: */ - } - -/* W := W * V1 */ - - dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15, - &v[v_offset], ldv, &work[work_offset], ldwork); - if (*n > *k) { - -/* W := W + C2 * V2 */ - - i__1 = *n - *k; - dgemm_("No transpose", "No transpose", m, k, &i__1, & - c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k + - 1 + v_dim1], ldv, &c_b15, &work[work_offset], - ldwork); - } - -/* W := W * T or W * T' */ - - dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - W * V' */ - - if (*n > *k) { - -/* C2 := C2 - W * V2' */ - - i__1 = *n - *k; - dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151, - &work[work_offset], ldwork, &v[*k + 1 + v_dim1], - ldv, &c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc); - } - -/* W := W * V1' */ - - dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, & - v[v_offset], ldv, &work[work_offset], ldwork); - -/* C1 := C1 - W */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1]; -/* L50: */ - } -/* L60: */ - } - } - - } else { - -/* - Let V = ( V1 ) - ( V2 ) (last K rows) - where V2 is unit upper triangular. -*/ - - if (lsame_(side, "L")) { - -/* - Form H * C or H' * C where C = ( C1 ) - ( C2 ) - - W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) - - W := C2' -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j * - work_dim1 + 1], &c__1); -/* L70: */ - } - -/* W := W * V2 */ - - dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15, - &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset], - ldwork); - if (*m > *k) { - -/* W := W + C1'*V1 */ - - i__1 = *m - *k; - dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, & - c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & - work[work_offset], ldwork); - } - -/* W := W * T' or W * T */ - - dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - V * W' */ - - if (*m > *k) { - -/* C1 := C1 - V1 * W' */ - - i__1 = *m - *k; - dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151, - &v[v_offset], ldv, &work[work_offset], ldwork, & - c_b15, &c__[c_offset], ldc) - ; - } - -/* W := W * V2' */ - - dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, & - v[*m - *k + 1 + v_dim1], ldv, &work[work_offset], - ldwork); - -/* C2 := C2 - W' */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j * - work_dim1]; -/* L80: */ - } -/* L90: */ - } - - } else if (lsame_(side, "R")) { - -/* - Form C * H or C * H' where C = ( C1 C2 ) - - W := C * V = (C1*V1 + C2*V2) (stored in WORK) - - W := C2 -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[ - j * work_dim1 + 1], &c__1); -/* L100: */ - } - -/* W := W * V2 */ - - dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15, - &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset], - ldwork); - if (*n > *k) { - -/* W := W + C1 * V1 */ - - i__1 = *n - *k; - dgemm_("No transpose", "No transpose", m, k, &i__1, & - c_b15, &c__[c_offset], ldc, &v[v_offset], ldv, & - c_b15, &work[work_offset], ldwork); - } - -/* W := W * T or W * T' */ - - dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - W * V' */ - - if (*n > *k) { - -/* C1 := C1 - W * V1' */ - - i__1 = *n - *k; - dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151, - &work[work_offset], ldwork, &v[v_offset], ldv, & - c_b15, &c__[c_offset], ldc) - ; - } - -/* W := W * V2' */ - - dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, & - v[*n - *k + 1 + v_dim1], ldv, &work[work_offset], - ldwork); - -/* C2 := C2 - W */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j * - work_dim1]; -/* L110: */ - } -/* L120: */ - } - } - } - - } else if (lsame_(storev, "R")) { - - if (lsame_(direct, "F")) { - -/* - Let V = ( V1 V2 ) (V1: first K columns) - where V1 is unit upper triangular. -*/ - - if (lsame_(side, "L")) { - -/* - Form H * C or H' * C where C = ( C1 ) - ( C2 ) - - W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) - - W := C1' -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], - &c__1); -/* L130: */ - } - -/* W := W * V1' */ - - dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, & - v[v_offset], ldv, &work[work_offset], ldwork); - if (*m > *k) { - -/* W := W + C2'*V2' */ - - i__1 = *m - *k; - dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, & - c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1 + - 1], ldv, &c_b15, &work[work_offset], ldwork); - } - -/* W := W * T' or W * T */ - - dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - V' * W' */ - - if (*m > *k) { - -/* C2 := C2 - V2' * W' */ - - i__1 = *m - *k; - dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[ - (*k + 1) * v_dim1 + 1], ldv, &work[work_offset], - ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc); - } - -/* W := W * V1 */ - - dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15, - &v[v_offset], ldv, &work[work_offset], ldwork); - -/* C1 := C1 - W' */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1]; -/* L140: */ - } -/* L150: */ - } - - } else if (lsame_(side, "R")) { - -/* - Form C * H or C * H' where C = ( C1 C2 ) - - W := C * V' = (C1*V1' + C2*V2') (stored in WORK) - - W := C1 -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * - work_dim1 + 1], &c__1); -/* L160: */ - } - -/* W := W * V1' */ - - dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, & - v[v_offset], ldv, &work[work_offset], ldwork); - if (*n > *k) { - -/* W := W + C2 * V2' */ - - i__1 = *n - *k; - dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, & - c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k + 1) * - v_dim1 + 1], ldv, &c_b15, &work[work_offset], - ldwork); - } - -/* W := W * T or W * T' */ - - dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - W * V */ - - if (*n > *k) { - -/* C2 := C2 - W * V2 */ - - i__1 = *n - *k; - dgemm_("No transpose", "No transpose", m, &i__1, k, & - c_b151, &work[work_offset], ldwork, &v[(*k + 1) * - v_dim1 + 1], ldv, &c_b15, &c__[(*k + 1) * c_dim1 - + 1], ldc); - } - -/* W := W * V1 */ - - dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15, - &v[v_offset], ldv, &work[work_offset], ldwork); - -/* C1 := C1 - W */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1]; -/* L170: */ - } -/* L180: */ - } - - } - - } else { - -/* - Let V = ( V1 V2 ) (V2: last K columns) - where V2 is unit lower triangular. -*/ - - if (lsame_(side, "L")) { - -/* - Form H * C or H' * C where C = ( C1 ) - ( C2 ) - - W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) - - W := C2' -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j * - work_dim1 + 1], &c__1); -/* L190: */ - } - -/* W := W * V2' */ - - dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, & - v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[work_offset] - , ldwork); - if (*m > *k) { - -/* W := W + C1'*V1' */ - - i__1 = *m - *k; - dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, & - c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & - work[work_offset], ldwork); - } - -/* W := W * T' or W * T */ - - dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - V' * W' */ - - if (*m > *k) { - -/* C1 := C1 - V1' * W' */ - - i__1 = *m - *k; - dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[ - v_offset], ldv, &work[work_offset], ldwork, & - c_b15, &c__[c_offset], ldc); - } - -/* W := W * V2 */ - - dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15, - &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[ - work_offset], ldwork); - -/* C2 := C2 - W' */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j * - work_dim1]; -/* L200: */ - } -/* L210: */ - } - - } else if (lsame_(side, "R")) { - -/* - Form C * H or C * H' where C = ( C1 C2 ) - - W := C * V' = (C1*V1' + C2*V2') (stored in WORK) - - W := C2 -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[ - j * work_dim1 + 1], &c__1); -/* L220: */ - } - -/* W := W * V2' */ - - dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, & - v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[work_offset] - , ldwork); - if (*n > *k) { - -/* W := W + C1 * V1' */ - - i__1 = *n - *k; - dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, & - c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & - work[work_offset], ldwork); - } - -/* W := W * T or W * T' */ - - dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - W * V */ - - if (*n > *k) { - -/* C1 := C1 - W * V1 */ - - i__1 = *n - *k; - dgemm_("No transpose", "No transpose", m, &i__1, k, & - c_b151, &work[work_offset], ldwork, &v[v_offset], - ldv, &c_b15, &c__[c_offset], ldc); - } - -/* W := W * V2 */ - - dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15, - &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[ - work_offset], ldwork); - -/* C1 := C1 - W */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j * - work_dim1]; -/* L230: */ - } -/* L240: */ - } - - } - - } - } - - return 0; - -/* End of DLARFB */ - -} /* dlarfb_ */ - -/* Subroutine */ int dlarfg_(integer *n, doublereal *alpha, doublereal *x, - integer *incx, doublereal *tau) -{ - /* System generated locals */ - integer i__1; - doublereal d__1; - - /* Builtin functions */ - double d_sign(doublereal *, doublereal *); - - /* Local variables */ - static integer j, knt; - static doublereal beta; - extern doublereal dnrm2_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - static doublereal xnorm; - - static doublereal safmin, rsafmn; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - DLARFG generates a real elementary reflector H of order n, such - that - - H * ( alpha ) = ( beta ), H' * H = I. - ( x ) ( 0 ) - - where alpha and beta are scalars, and x is an (n-1)-element real - vector. H is represented in the form - - H = I - tau * ( 1 ) * ( 1 v' ) , - ( v ) - - where tau is a real scalar and v is a real (n-1)-element - vector. - - If the elements of x are all zero, then tau = 0 and H is taken to be - the unit matrix. - - Otherwise 1 <= tau <= 2. - - Arguments - ========= - - N (input) INTEGER - The order of the elementary reflector. - - ALPHA (input/output) DOUBLE PRECISION - On entry, the value alpha. - On exit, it is overwritten with the value beta. - - X (input/output) DOUBLE PRECISION array, dimension - (1+(N-2)*abs(INCX)) - On entry, the vector x. - On exit, it is overwritten with the vector v. - - INCX (input) INTEGER - The increment between elements of X. INCX > 0. - - TAU (output) DOUBLE PRECISION - The value tau. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --x; - - /* Function Body */ - if (*n <= 1) { - *tau = 0.; - return 0; - } - - i__1 = *n - 1; - xnorm = dnrm2_(&i__1, &x[1], incx); - - if (xnorm == 0.) { - -/* H = I */ - - *tau = 0.; - } else { - -/* general case */ - - d__1 = dlapy2_(alpha, &xnorm); - beta = -d_sign(&d__1, alpha); - safmin = SAFEMINIMUM / EPSILON; - if (abs(beta) < safmin) { - -/* XNORM, BETA may be inaccurate; scale X and recompute them */ - - rsafmn = 1. / safmin; - knt = 0; -L10: - ++knt; - i__1 = *n - 1; - dscal_(&i__1, &rsafmn, &x[1], incx); - beta *= rsafmn; - *alpha *= rsafmn; - if (abs(beta) < safmin) { - goto L10; - } - -/* New BETA is at most 1, at least SAFMIN */ - - i__1 = *n - 1; - xnorm = dnrm2_(&i__1, &x[1], incx); - d__1 = dlapy2_(alpha, &xnorm); - beta = -d_sign(&d__1, alpha); - *tau = (beta - *alpha) / beta; - i__1 = *n - 1; - d__1 = 1. / (*alpha - beta); - dscal_(&i__1, &d__1, &x[1], incx); - -/* If ALPHA is subnormal, it may lose relative accuracy */ - - *alpha = beta; - i__1 = knt; - for (j = 1; j <= i__1; ++j) { - *alpha *= safmin; -/* L20: */ - } - } else { - *tau = (beta - *alpha) / beta; - i__1 = *n - 1; - d__1 = 1. / (*alpha - beta); - dscal_(&i__1, &d__1, &x[1], incx); - *alpha = beta; - } - } - - return 0; - -/* End of DLARFG */ - -} /* dlarfg_ */ - -/* Subroutine */ int dlarft_(char *direct, char *storev, integer *n, integer * - k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t, - integer *ldt) -{ - /* System generated locals */ - integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3; - doublereal d__1; - - /* Local variables */ - static integer i__, j; - static doublereal vii; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *), dtrmv_(char *, - char *, char *, integer *, doublereal *, integer *, doublereal *, - integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DLARFT forms the triangular factor T of a real block reflector H - of order n, which is defined as a product of k elementary reflectors. - - If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; - - If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. - - If STOREV = 'C', the vector which defines the elementary reflector - H(i) is stored in the i-th column of the array V, and - - H = I - V * T * V' - - If STOREV = 'R', the vector which defines the elementary reflector - H(i) is stored in the i-th row of the array V, and - - H = I - V' * T * V - - Arguments - ========= - - DIRECT (input) CHARACTER*1 - Specifies the order in which the elementary reflectors are - multiplied to form the block reflector: - = 'F': H = H(1) H(2) . . . H(k) (Forward) - = 'B': H = H(k) . . . H(2) H(1) (Backward) - - STOREV (input) CHARACTER*1 - Specifies how the vectors which define the elementary - reflectors are stored (see also Further Details): - = 'C': columnwise - = 'R': rowwise - - N (input) INTEGER - The order of the block reflector H. N >= 0. - - K (input) INTEGER - The order of the triangular factor T (= the number of - elementary reflectors). K >= 1. - - V (input/output) DOUBLE PRECISION array, dimension - (LDV,K) if STOREV = 'C' - (LDV,N) if STOREV = 'R' - The matrix V. See further details. - - LDV (input) INTEGER - The leading dimension of the array V. - If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i). - - T (output) DOUBLE PRECISION array, dimension (LDT,K) - The k by k triangular factor T of the block reflector. - If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is - lower triangular. The rest of the array is not used. - - LDT (input) INTEGER - The leading dimension of the array T. LDT >= K. - - Further Details - =============== - - The shape of the matrix V and the storage of the vectors which define - the H(i) is best illustrated by the following example with n = 5 and - k = 3. The elements equal to 1 are not stored; the corresponding - array elements are modified but restored on exit. The rest of the - array is not used. - - DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': - - V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) - ( v1 1 ) ( 1 v2 v2 v2 ) - ( v1 v2 1 ) ( 1 v3 v3 ) - ( v1 v2 v3 ) - ( v1 v2 v3 ) - - DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': - - V = ( v1 v2 v3 ) V = ( v1 v1 1 ) - ( v1 v2 v3 ) ( v2 v2 v2 1 ) - ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) - ( 1 v3 ) - ( 1 ) - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - v_dim1 = *ldv; - v_offset = 1 + v_dim1 * 1; - v -= v_offset; - --tau; - t_dim1 = *ldt; - t_offset = 1 + t_dim1 * 1; - t -= t_offset; - - /* Function Body */ - if (*n == 0) { - return 0; - } - - if (lsame_(direct, "F")) { - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - if (tau[i__] == 0.) { - -/* H(i) = I */ - - i__2 = i__; - for (j = 1; j <= i__2; ++j) { - t[j + i__ * t_dim1] = 0.; -/* L10: */ - } - } else { - -/* general case */ - - vii = v[i__ + i__ * v_dim1]; - v[i__ + i__ * v_dim1] = 1.; - if (lsame_(storev, "C")) { - -/* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */ - - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - d__1 = -tau[i__]; - dgemv_("Transpose", &i__2, &i__3, &d__1, &v[i__ + v_dim1], - ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b29, &t[ - i__ * t_dim1 + 1], &c__1); - } else { - -/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */ - - i__2 = i__ - 1; - i__3 = *n - i__ + 1; - d__1 = -tau[i__]; - dgemv_("No transpose", &i__2, &i__3, &d__1, &v[i__ * - v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, & - c_b29, &t[i__ * t_dim1 + 1], &c__1); - } - v[i__ + i__ * v_dim1] = vii; - -/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */ - - i__2 = i__ - 1; - dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[ - t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1); - t[i__ + i__ * t_dim1] = tau[i__]; - } -/* L20: */ - } - } else { - for (i__ = *k; i__ >= 1; --i__) { - if (tau[i__] == 0.) { - -/* H(i) = I */ - - i__1 = *k; - for (j = i__; j <= i__1; ++j) { - t[j + i__ * t_dim1] = 0.; -/* L30: */ - } - } else { - -/* general case */ - - if (i__ < *k) { - if (lsame_(storev, "C")) { - vii = v[*n - *k + i__ + i__ * v_dim1]; - v[*n - *k + i__ + i__ * v_dim1] = 1.; - -/* - T(i+1:k,i) := - - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i) -*/ - - i__1 = *n - *k + i__; - i__2 = *k - i__; - d__1 = -tau[i__]; - dgemv_("Transpose", &i__1, &i__2, &d__1, &v[(i__ + 1) - * v_dim1 + 1], ldv, &v[i__ * v_dim1 + 1], & - c__1, &c_b29, &t[i__ + 1 + i__ * t_dim1], & - c__1); - v[*n - *k + i__ + i__ * v_dim1] = vii; - } else { - vii = v[i__ + (*n - *k + i__) * v_dim1]; - v[i__ + (*n - *k + i__) * v_dim1] = 1.; - -/* - T(i+1:k,i) := - - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)' -*/ - - i__1 = *k - i__; - i__2 = *n - *k + i__; - d__1 = -tau[i__]; - dgemv_("No transpose", &i__1, &i__2, &d__1, &v[i__ + - 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, & - c_b29, &t[i__ + 1 + i__ * t_dim1], &c__1); - v[i__ + (*n - *k + i__) * v_dim1] = vii; - } - -/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */ - - i__1 = *k - i__; - dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ - + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * - t_dim1], &c__1) - ; - } - t[i__ + i__ * t_dim1] = tau[i__]; - } -/* L40: */ - } - } - return 0; - -/* End of DLARFT */ - -} /* dlarft_ */ - -/* Subroutine */ int dlarfx_(char *side, integer *m, integer *n, doublereal * - v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) -{ - /* System generated locals */ - integer c_dim1, c_offset, i__1; - doublereal d__1; - - /* Local variables */ - static integer j; - static doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, - v6, v7, v8, v9, t10, v10, sum; - extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DLARFX applies a real elementary reflector H to a real m by n - matrix C, from either the left or the right. H is represented in the - form - - H = I - tau * v * v' - - where tau is a real scalar and v is a real vector. - - If tau = 0, then H is taken to be the unit matrix - - This version uses inline code if H has order < 11. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': form H * C - = 'R': form C * H - - M (input) INTEGER - The number of rows of the matrix C. - - N (input) INTEGER - The number of columns of the matrix C. - - V (input) DOUBLE PRECISION array, dimension (M) if SIDE = 'L' - or (N) if SIDE = 'R' - The vector v in the representation of H. - - TAU (input) DOUBLE PRECISION - The value tau in the representation of H. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the m by n matrix C. - On exit, C is overwritten by the matrix H * C if SIDE = 'L', - or C * H if SIDE = 'R'. - - LDC (input) INTEGER - The leading dimension of the array C. LDA >= (1,M). - - WORK (workspace) DOUBLE PRECISION array, dimension - (N) if SIDE = 'L' - or (M) if SIDE = 'R' - WORK is not referenced if H has order < 11. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --v; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - if (*tau == 0.) { - return 0; - } - if (lsame_(side, "L")) { - -/* Form H * C, where H has order m. */ - - switch (*m) { - case 1: goto L10; - case 2: goto L30; - case 3: goto L50; - case 4: goto L70; - case 5: goto L90; - case 6: goto L110; - case 7: goto L130; - case 8: goto L150; - case 9: goto L170; - case 10: goto L190; - } - -/* - Code for general M - - w := C'*v -*/ - - dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], &c__1, & - c_b29, &work[1], &c__1); - -/* C := C - tau * v * w' */ - - d__1 = -(*tau); - dger_(m, n, &d__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], ldc) - ; - goto L410; -L10: - -/* Special code for 1 x 1 Householder */ - - t1 = 1. - *tau * v[1] * v[1]; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1]; -/* L20: */ - } - goto L410; -L30: - -/* Special code for 2 x 2 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; -/* L40: */ - } - goto L410; -L50: - -/* Special code for 3 x 3 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; -/* L60: */ - } - goto L410; -L70: - -/* Special code for 4 x 4 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; -/* L80: */ - } - goto L410; -L90: - -/* Special code for 5 x 5 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; -/* L100: */ - } - goto L410; -L110: - -/* Special code for 6 x 6 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; -/* L120: */ - } - goto L410; -L130: - -/* Special code for 7 x 7 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * - c_dim1 + 7]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - c__[j * c_dim1 + 7] -= sum * t7; -/* L140: */ - } - goto L410; -L150: - -/* Special code for 8 x 8 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * - c_dim1 + 7] + v8 * c__[j * c_dim1 + 8]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - c__[j * c_dim1 + 7] -= sum * t7; - c__[j * c_dim1 + 8] -= sum * t8; -/* L160: */ - } - goto L410; -L170: - -/* Special code for 9 x 9 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - v9 = v[9]; - t9 = *tau * v9; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * - c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * - c_dim1 + 9]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - c__[j * c_dim1 + 7] -= sum * t7; - c__[j * c_dim1 + 8] -= sum * t8; - c__[j * c_dim1 + 9] -= sum * t9; -/* L180: */ - } - goto L410; -L190: - -/* Special code for 10 x 10 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - v9 = v[9]; - t9 = *tau * v9; - v10 = v[10]; - t10 = *tau * v10; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * - c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * - c_dim1 + 9] + v10 * c__[j * c_dim1 + 10]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - c__[j * c_dim1 + 7] -= sum * t7; - c__[j * c_dim1 + 8] -= sum * t8; - c__[j * c_dim1 + 9] -= sum * t9; - c__[j * c_dim1 + 10] -= sum * t10; -/* L200: */ - } - goto L410; - } else { - -/* Form C * H, where H has order n. */ - - switch (*n) { - case 1: goto L210; - case 2: goto L230; - case 3: goto L250; - case 4: goto L270; - case 5: goto L290; - case 6: goto L310; - case 7: goto L330; - case 8: goto L350; - case 9: goto L370; - case 10: goto L390; - } - -/* - Code for general N - - w := C * v -*/ - - dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], & - c__1, &c_b29, &work[1], &c__1); - -/* C := C - tau * w * v' */ - - d__1 = -(*tau); - dger_(m, n, &d__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], ldc) - ; - goto L410; -L210: - -/* Special code for 1 x 1 Householder */ - - t1 = 1. - *tau * v[1] * v[1]; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - c__[j + c_dim1] = t1 * c__[j + c_dim1]; -/* L220: */ - } - goto L410; -L230: - -/* Special code for 2 x 2 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; -/* L240: */ - } - goto L410; -L250: - -/* Special code for 3 x 3 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3 - * c__[j + c_dim1 * 3]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; -/* L260: */ - } - goto L410; -L270: - -/* Special code for 4 x 4 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3 - * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + ((c_dim1) << (2))] -= sum * t4; -/* L280: */ - } - goto L410; -L290: - -/* Special code for 5 x 5 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3 - * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] + - v5 * c__[j + c_dim1 * 5]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + ((c_dim1) << (2))] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; -/* L300: */ - } - goto L410; -L310: - -/* Special code for 6 x 6 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3 - * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] + - v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + ((c_dim1) << (2))] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; -/* L320: */ - } - goto L410; -L330: - -/* Special code for 7 x 7 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3 - * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] + - v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * - c__[j + c_dim1 * 7]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + ((c_dim1) << (2))] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - c__[j + c_dim1 * 7] -= sum * t7; -/* L340: */ - } - goto L410; -L350: - -/* Special code for 8 x 8 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3 - * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] + - v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * - c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + ((c_dim1) << (2))] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - c__[j + c_dim1 * 7] -= sum * t7; - c__[j + ((c_dim1) << (3))] -= sum * t8; -/* L360: */ - } - goto L410; -L370: - -/* Special code for 9 x 9 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - v9 = v[9]; - t9 = *tau * v9; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3 - * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] + - v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * - c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] + - v9 * c__[j + c_dim1 * 9]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + ((c_dim1) << (2))] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - c__[j + c_dim1 * 7] -= sum * t7; - c__[j + ((c_dim1) << (3))] -= sum * t8; - c__[j + c_dim1 * 9] -= sum * t9; -/* L380: */ - } - goto L410; -L390: - -/* Special code for 10 x 10 Householder */ - - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - v9 = v[9]; - t9 = *tau * v9; - v10 = v[10]; - t10 = *tau * v10; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3 - * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] + - v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * - c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] + - v9 * c__[j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10]; - c__[j + c_dim1] -= sum * t1; - c__[j + ((c_dim1) << (1))] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + ((c_dim1) << (2))] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - c__[j + c_dim1 * 7] -= sum * t7; - c__[j + ((c_dim1) << (3))] -= sum * t8; - c__[j + c_dim1 * 9] -= sum * t9; - c__[j + c_dim1 * 10] -= sum * t10; -/* L400: */ - } - goto L410; - } -L410: - return 0; - -/* End of DLARFX */ - -} /* dlarfx_ */ - -/* Subroutine */ int dlartg_(doublereal *f, doublereal *g, doublereal *cs, - doublereal *sn, doublereal *r__) -{ - /* Initialized data */ - - static logical first = TRUE_; - - /* System generated locals */ - integer i__1; - doublereal d__1, d__2; - - /* Builtin functions */ - double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal); - - /* Local variables */ - static integer i__; - static doublereal f1, g1, eps, scale; - static integer count; - static doublereal safmn2, safmx2; - - static doublereal safmin; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - DLARTG generate a plane rotation so that - - [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1. - [ -SN CS ] [ G ] [ 0 ] - - This is a slower, more accurate version of the BLAS1 routine DROTG, - with the following other differences: - F and G are unchanged on return. - If G=0, then CS=1 and SN=0. - If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any - floating point operations (saves work in DBDSQR when - there are zeros on the diagonal). - - If F exceeds G in magnitude, CS will be positive. - - Arguments - ========= - - F (input) DOUBLE PRECISION - The first component of vector to be rotated. - - G (input) DOUBLE PRECISION - The second component of vector to be rotated. - - CS (output) DOUBLE PRECISION - The cosine of the rotation. - - SN (output) DOUBLE PRECISION - The sine of the rotation. - - R (output) DOUBLE PRECISION - The nonzero component of the rotated vector. - - ===================================================================== -*/ - - - if (first) { - first = FALSE_; - safmin = SAFEMINIMUM; - eps = EPSILON; - d__1 = BASE; - i__1 = (integer) (log(safmin / eps) / log(BASE) / - 2.); - safmn2 = pow_di(&d__1, &i__1); - safmx2 = 1. / safmn2; - } - if (*g == 0.) { - *cs = 1.; - *sn = 0.; - *r__ = *f; - } else if (*f == 0.) { - *cs = 0.; - *sn = 1.; - *r__ = *g; - } else { - f1 = *f; - g1 = *g; -/* Computing MAX */ - d__1 = abs(f1), d__2 = abs(g1); - scale = max(d__1,d__2); - if (scale >= safmx2) { - count = 0; -L10: - ++count; - f1 *= safmn2; - g1 *= safmn2; -/* Computing MAX */ - d__1 = abs(f1), d__2 = abs(g1); - scale = max(d__1,d__2); - if (scale >= safmx2) { - goto L10; - } -/* Computing 2nd power */ - d__1 = f1; -/* Computing 2nd power */ - d__2 = g1; - *r__ = sqrt(d__1 * d__1 + d__2 * d__2); - *cs = f1 / *r__; - *sn = g1 / *r__; - i__1 = count; - for (i__ = 1; i__ <= i__1; ++i__) { - *r__ *= safmx2; -/* L20: */ - } - } else if (scale <= safmn2) { - count = 0; -L30: - ++count; - f1 *= safmx2; - g1 *= safmx2; -/* Computing MAX */ - d__1 = abs(f1), d__2 = abs(g1); - scale = max(d__1,d__2); - if (scale <= safmn2) { - goto L30; - } -/* Computing 2nd power */ - d__1 = f1; -/* Computing 2nd power */ - d__2 = g1; - *r__ = sqrt(d__1 * d__1 + d__2 * d__2); - *cs = f1 / *r__; - *sn = g1 / *r__; - i__1 = count; - for (i__ = 1; i__ <= i__1; ++i__) { - *r__ *= safmn2; -/* L40: */ - } - } else { -/* Computing 2nd power */ - d__1 = f1; -/* Computing 2nd power */ - d__2 = g1; - *r__ = sqrt(d__1 * d__1 + d__2 * d__2); - *cs = f1 / *r__; - *sn = g1 / *r__; - } - if ((abs(*f) > abs(*g) && *cs < 0.)) { - *cs = -(*cs); - *sn = -(*sn); - *r__ = -(*r__); - } - } - return 0; - -/* End of DLARTG */ - -} /* dlartg_ */ - -/* Subroutine */ int dlas2_(doublereal *f, doublereal *g, doublereal *h__, - doublereal *ssmin, doublereal *ssmax) -{ - /* System generated locals */ - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal c__, fa, ga, ha, as, at, au, fhmn, fhmx; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - DLAS2 computes the singular values of the 2-by-2 matrix - [ F G ] - [ 0 H ]. - On return, SSMIN is the smaller singular value and SSMAX is the - larger singular value. - - Arguments - ========= - - F (input) DOUBLE PRECISION - The (1,1) element of the 2-by-2 matrix. - - G (input) DOUBLE PRECISION - The (1,2) element of the 2-by-2 matrix. - - H (input) DOUBLE PRECISION - The (2,2) element of the 2-by-2 matrix. - - SSMIN (output) DOUBLE PRECISION - The smaller singular value. - - SSMAX (output) DOUBLE PRECISION - The larger singular value. - - Further Details - =============== - - Barring over/underflow, all output quantities are correct to within - a few units in the last place (ulps), even in the absence of a guard - digit in addition/subtraction. - - In IEEE arithmetic, the code works correctly if one matrix element is - infinite. - - Overflow will not occur unless the largest singular value itself - overflows, or is within a few ulps of overflow. (On machines with - partial overflow, like the Cray, overflow may occur if the largest - singular value is within a factor of 2 of overflow.) - - Underflow is harmless if underflow is gradual. Otherwise, results - may correspond to a matrix modified by perturbations of size near - the underflow threshold. - - ==================================================================== -*/ - - - fa = abs(*f); - ga = abs(*g); - ha = abs(*h__); - fhmn = min(fa,ha); - fhmx = max(fa,ha); - if (fhmn == 0.) { - *ssmin = 0.; - if (fhmx == 0.) { - *ssmax = ga; - } else { -/* Computing 2nd power */ - d__1 = min(fhmx,ga) / max(fhmx,ga); - *ssmax = max(fhmx,ga) * sqrt(d__1 * d__1 + 1.); - } - } else { - if (ga < fhmx) { - as = fhmn / fhmx + 1.; - at = (fhmx - fhmn) / fhmx; -/* Computing 2nd power */ - d__1 = ga / fhmx; - au = d__1 * d__1; - c__ = 2. / (sqrt(as * as + au) + sqrt(at * at + au)); - *ssmin = fhmn * c__; - *ssmax = fhmx / c__; - } else { - au = fhmx / ga; - if (au == 0.) { - -/* - Avoid possible harmful underflow if exponent range - asymmetric (true SSMIN may not underflow even if - AU underflows) -*/ - - *ssmin = fhmn * fhmx / ga; - *ssmax = ga; - } else { - as = fhmn / fhmx + 1.; - at = (fhmx - fhmn) / fhmx; -/* Computing 2nd power */ - d__1 = as * au; -/* Computing 2nd power */ - d__2 = at * au; - c__ = 1. / (sqrt(d__1 * d__1 + 1.) + sqrt(d__2 * d__2 + 1.)); - *ssmin = fhmn * c__ * au; - *ssmin += *ssmin; - *ssmax = ga / (c__ + c__); - } - } - } - return 0; - -/* End of DLAS2 */ - -} /* dlas2_ */ - -/* Subroutine */ int dlascl_(char *type__, integer *kl, integer *ku, - doublereal *cfrom, doublereal *cto, integer *m, integer *n, - doublereal *a, integer *lda, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - - /* Local variables */ - static integer i__, j, k1, k2, k3, k4; - static doublereal mul, cto1; - static logical done; - static doublereal ctoc; - extern logical lsame_(char *, char *); - static integer itype; - static doublereal cfrom1; - - static doublereal cfromc; - extern /* Subroutine */ int xerbla_(char *, integer *); - static doublereal bignum, smlnum; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DLASCL multiplies the M by N real matrix A by the real scalar - CTO/CFROM. This is done without over/underflow as long as the final - result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that - A may be full, upper triangular, lower triangular, upper Hessenberg, - or banded. - - Arguments - ========= - - TYPE (input) CHARACTER*1 - TYPE indices the storage type of the input matrix. - = 'G': A is a full matrix. - = 'L': A is a lower triangular matrix. - = 'U': A is an upper triangular matrix. - = 'H': A is an upper Hessenberg matrix. - = 'B': A is a symmetric band matrix with lower bandwidth KL - and upper bandwidth KU and with the only the lower - half stored. - = 'Q': A is a symmetric band matrix with lower bandwidth KL - and upper bandwidth KU and with the only the upper - half stored. - = 'Z': A is a band matrix with lower bandwidth KL and upper - bandwidth KU. - - KL (input) INTEGER - The lower bandwidth of A. Referenced only if TYPE = 'B', - 'Q' or 'Z'. - - KU (input) INTEGER - The upper bandwidth of A. Referenced only if TYPE = 'B', - 'Q' or 'Z'. - - CFROM (input) DOUBLE PRECISION - CTO (input) DOUBLE PRECISION - The matrix A is multiplied by CTO/CFROM. A(I,J) is computed - without over/underflow if the final result CTO*A(I,J)/CFROM - can be represented without over/underflow. CFROM must be - nonzero. - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,M) - The matrix to be multiplied by CTO/CFROM. See TYPE for the - storage type. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - INFO (output) INTEGER - 0 - successful exit - <0 - if INFO = -i, the i-th argument had an illegal value. - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - *info = 0; - - if (lsame_(type__, "G")) { - itype = 0; - } else if (lsame_(type__, "L")) { - itype = 1; - } else if (lsame_(type__, "U")) { - itype = 2; - } else if (lsame_(type__, "H")) { - itype = 3; - } else if (lsame_(type__, "B")) { - itype = 4; - } else if (lsame_(type__, "Q")) { - itype = 5; - } else if (lsame_(type__, "Z")) { - itype = 6; - } else { - itype = -1; - } - - if (itype == -1) { - *info = -1; - } else if (*cfrom == 0.) { - *info = -4; - } else if (*m < 0) { - *info = -6; - } else if (*n < 0 || (itype == 4 && *n != *m) || (itype == 5 && *n != *m)) - { - *info = -7; - } else if ((itype <= 3 && *lda < max(1,*m))) { - *info = -9; - } else if (itype >= 4) { -/* Computing MAX */ - i__1 = *m - 1; - if (*kl < 0 || *kl > max(i__1,0)) { - *info = -2; - } else /* if(complicated condition) */ { -/* Computing MAX */ - i__1 = *n - 1; - if (*ku < 0 || *ku > max(i__1,0) || ((itype == 4 || itype == 5) && - *kl != *ku)) { - *info = -3; - } else if ((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda < - *ku + 1) || (itype == 6 && *lda < ((*kl) << (1)) + *ku + - 1)) { - *info = -9; - } - } - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASCL", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0 || *m == 0) { - return 0; - } - -/* Get machine parameters */ - - smlnum = SAFEMINIMUM; - bignum = 1. / smlnum; - - cfromc = *cfrom; - ctoc = *cto; - -L10: - cfrom1 = cfromc * smlnum; - cto1 = ctoc / bignum; - if ((abs(cfrom1) > abs(ctoc) && ctoc != 0.)) { - mul = smlnum; - done = FALSE_; - cfromc = cfrom1; - } else if (abs(cto1) > abs(cfromc)) { - mul = bignum; - done = FALSE_; - ctoc = cto1; - } else { - mul = ctoc / cfromc; - done = TRUE_; - } - - if (itype == 0) { - -/* Full matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] *= mul; -/* L20: */ - } -/* L30: */ - } - - } else if (itype == 1) { - -/* Lower triangular matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = j; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] *= mul; -/* L40: */ - } -/* L50: */ - } - - } else if (itype == 2) { - -/* Upper triangular matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = min(j,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] *= mul; -/* L60: */ - } -/* L70: */ - } - - } else if (itype == 3) { - -/* Upper Hessenberg matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = j + 1; - i__2 = min(i__3,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] *= mul; -/* L80: */ - } -/* L90: */ - } - - } else if (itype == 4) { - -/* Lower half of a symmetric band matrix */ - - k3 = *kl + 1; - k4 = *n + 1; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = k3, i__4 = k4 - j; - i__2 = min(i__3,i__4); - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] *= mul; -/* L100: */ - } -/* L110: */ - } - - } else if (itype == 5) { - -/* Upper half of a symmetric band matrix */ - - k1 = *ku + 2; - k3 = *ku + 1; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MAX */ - i__2 = k1 - j; - i__3 = k3; - for (i__ = max(i__2,1); i__ <= i__3; ++i__) { - a[i__ + j * a_dim1] *= mul; -/* L120: */ - } -/* L130: */ - } - - } else if (itype == 6) { - -/* Band matrix */ - - k1 = *kl + *ku + 2; - k2 = *kl + 1; - k3 = ((*kl) << (1)) + *ku + 1; - k4 = *kl + *ku + 1 + *m; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MAX */ - i__3 = k1 - j; -/* Computing MIN */ - i__4 = k3, i__5 = k4 - j; - i__2 = min(i__4,i__5); - for (i__ = max(i__3,k2); i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] *= mul; -/* L140: */ - } -/* L150: */ - } - - } - - if (! done) { - goto L10; - } - - return 0; - -/* End of DLASCL */ - -} /* dlascl_ */ - -/* Subroutine */ int dlasd0_(integer *n, integer *sqre, doublereal *d__, - doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer * - ldvt, integer *smlsiz, integer *iwork, doublereal *work, integer * - info) -{ - /* System generated locals */ - integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; - - /* Builtin functions */ - integer pow_ii(integer *, integer *); - - /* Local variables */ - static integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf, - iwk, lvl, ndb1, nlp1, nrp1; - static doublereal beta; - static integer idxq, nlvl; - static doublereal alpha; - static integer inode, ndiml, idxqc, ndimr, itemp, sqrei; - extern /* Subroutine */ int dlasd1_(integer *, integer *, integer *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *, - doublereal *, integer *, integer *, integer *, doublereal *, - integer *), dlasdq_(char *, integer *, integer *, integer *, - integer *, integer *, doublereal *, doublereal *, doublereal *, - integer *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *), dlasdt_(integer *, integer *, - integer *, integer *, integer *, integer *, integer *), xerbla_( - char *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - Using a divide and conquer approach, DLASD0 computes the singular - value decomposition (SVD) of a real upper bidiagonal N-by-M - matrix B with diagonal D and offdiagonal E, where M = N + SQRE. - The algorithm computes orthogonal matrices U and VT such that - B = U * S * VT. The singular values S are overwritten on D. - - A related subroutine, DLASDA, computes only the singular values, - and optionally, the singular vectors in compact form. - - Arguments - ========= - - N (input) INTEGER - On entry, the row dimension of the upper bidiagonal matrix. - This is also the dimension of the main diagonal array D. - - SQRE (input) INTEGER - Specifies the column dimension of the bidiagonal matrix. - = 0: The bidiagonal matrix has column dimension M = N; - = 1: The bidiagonal matrix has column dimension M = N+1; - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry D contains the main diagonal of the bidiagonal - matrix. - On exit D, if INFO = 0, contains its singular values. - - E (input) DOUBLE PRECISION array, dimension (M-1) - Contains the subdiagonal entries of the bidiagonal matrix. - On exit, E has been destroyed. - - U (output) DOUBLE PRECISION array, dimension at least (LDQ, N) - On exit, U contains the left singular vectors. - - LDU (input) INTEGER - On entry, leading dimension of U. - - VT (output) DOUBLE PRECISION array, dimension at least (LDVT, M) - On exit, VT' contains the right singular vectors. - - LDVT (input) INTEGER - On entry, leading dimension of VT. - - SMLSIZ (input) INTEGER - On entry, maximum size of the subproblems at the - bottom of the computation tree. - - IWORK INTEGER work array. - Dimension must be at least (8 * N) - - WORK DOUBLE PRECISION work array. - Dimension must be at least (3 * M**2 + 2 * M) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an singular value did not converge - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - --iwork; - --work; - - /* Function Body */ - *info = 0; - - if (*n < 0) { - *info = -1; - } else if (*sqre < 0 || *sqre > 1) { - *info = -2; - } - - m = *n + *sqre; - - if (*ldu < *n) { - *info = -6; - } else if (*ldvt < m) { - *info = -8; - } else if (*smlsiz < 3) { - *info = -9; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASD0", &i__1); - return 0; - } - -/* If the input matrix is too small, call DLASDQ to find the SVD. */ - - if (*n <= *smlsiz) { - dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset], - ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info); - return 0; - } - -/* Set up the computation tree. */ - - inode = 1; - ndiml = inode + *n; - ndimr = ndiml + *n; - idxq = ndimr + *n; - iwk = idxq + *n; - dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], - smlsiz); - -/* - For the nodes on bottom level of the tree, solve - their subproblems by DLASDQ. -*/ - - ndb1 = (nd + 1) / 2; - ncc = 0; - i__1 = nd; - for (i__ = ndb1; i__ <= i__1; ++i__) { - -/* - IC : center row of each node - NL : number of rows of left subproblem - NR : number of rows of right subproblem - NLF: starting row of the left subproblem - NRF: starting row of the right subproblem -*/ - - i1 = i__ - 1; - ic = iwork[inode + i1]; - nl = iwork[ndiml + i1]; - nlp1 = nl + 1; - nr = iwork[ndimr + i1]; - nrp1 = nr + 1; - nlf = ic - nl; - nrf = ic + 1; - sqrei = 1; - dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[ - nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[ - nlf + nlf * u_dim1], ldu, &work[1], info); - if (*info != 0) { - return 0; - } - itemp = idxq + nlf - 2; - i__2 = nl; - for (j = 1; j <= i__2; ++j) { - iwork[itemp + j] = j; -/* L10: */ - } - if (i__ == nd) { - sqrei = *sqre; - } else { - sqrei = 1; - } - nrp1 = nr + sqrei; - dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[ - nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[ - nrf + nrf * u_dim1], ldu, &work[1], info); - if (*info != 0) { - return 0; - } - itemp = idxq + ic; - i__2 = nr; - for (j = 1; j <= i__2; ++j) { - iwork[itemp + j - 1] = j; -/* L20: */ - } -/* L30: */ - } - -/* Now conquer each subproblem bottom-up. */ - - for (lvl = nlvl; lvl >= 1; --lvl) { - -/* - Find the first node LF and last node LL on the - current level LVL. -*/ - - if (lvl == 1) { - lf = 1; - ll = 1; - } else { - i__1 = lvl - 1; - lf = pow_ii(&c__2, &i__1); - ll = ((lf) << (1)) - 1; - } - i__1 = ll; - for (i__ = lf; i__ <= i__1; ++i__) { - im1 = i__ - 1; - ic = iwork[inode + im1]; - nl = iwork[ndiml + im1]; - nr = iwork[ndimr + im1]; - nlf = ic - nl; - if ((*sqre == 0 && i__ == ll)) { - sqrei = *sqre; - } else { - sqrei = 1; - } - idxqc = idxq + nlf - 1; - alpha = d__[ic]; - beta = e[ic]; - dlasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf * - u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[ - idxqc], &iwork[iwk], &work[1], info); - if (*info != 0) { - return 0; - } -/* L40: */ - } -/* L50: */ - } - - return 0; - -/* End of DLASD0 */ - -} /* dlasd0_ */ - -/* Subroutine */ int dlasd1_(integer *nl, integer *nr, integer *sqre, - doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u, - integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer * - iwork, doublereal *work, integer *info) -{ - /* System generated locals */ - integer u_dim1, u_offset, vt_dim1, vt_offset, i__1; - doublereal d__1, d__2; - - /* Local variables */ - static integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2, - idxc, idxp, ldvt2; - extern /* Subroutine */ int dlasd2_(integer *, integer *, integer *, - integer *, doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, integer *, - integer *, integer *, integer *, integer *, integer *), dlasd3_( - integer *, integer *, integer *, integer *, doublereal *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *, integer *, integer *, doublereal *, integer *), - dlascl_(char *, integer *, integer *, doublereal *, doublereal *, - integer *, integer *, doublereal *, integer *, integer *), - dlamrg_(integer *, integer *, doublereal *, integer *, integer *, - integer *); - static integer isigma; - extern /* Subroutine */ int xerbla_(char *, integer *); - static doublereal orgnrm; - static integer coltyp; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B, - where N = NL + NR + 1 and M = N + SQRE. DLASD1 is called from DLASD0. - - A related subroutine DLASD7 handles the case in which the singular - values (and the singular vectors in factored form) are desired. - - DLASD1 computes the SVD as follows: - - ( D1(in) 0 0 0 ) - B = U(in) * ( Z1' a Z2' b ) * VT(in) - ( 0 0 D2(in) 0 ) - - = U(out) * ( D(out) 0) * VT(out) - - where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M - with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros - elsewhere; and the entry b is empty if SQRE = 0. - - The left singular vectors of the original matrix are stored in U, and - the transpose of the right singular vectors are stored in VT, and the - singular values are in D. The algorithm consists of three stages: - - The first stage consists of deflating the size of the problem - when there are multiple singular values or when there are zeros in - the Z vector. For each such occurence the dimension of the - secular equation problem is reduced by one. This stage is - performed by the routine DLASD2. - - The second stage consists of calculating the updated - singular values. This is done by finding the square roots of the - roots of the secular equation via the routine DLASD4 (as called - by DLASD3). This routine also calculates the singular vectors of - the current problem. - - The final stage consists of computing the updated singular vectors - directly using the updated singular values. The singular vectors - for the current problem are multiplied with the singular vectors - from the overall problem. - - Arguments - ========= - - NL (input) INTEGER - The row dimension of the upper block. NL >= 1. - - NR (input) INTEGER - The row dimension of the lower block. NR >= 1. - - SQRE (input) INTEGER - = 0: the lower block is an NR-by-NR square matrix. - = 1: the lower block is an NR-by-(NR+1) rectangular matrix. - - The bidiagonal matrix has row dimension N = NL + NR + 1, - and column dimension M = N + SQRE. - - D (input/output) DOUBLE PRECISION array, - dimension (N = NL+NR+1). - On entry D(1:NL,1:NL) contains the singular values of the - upper block; and D(NL+2:N) contains the singular values of - the lower block. On exit D(1:N) contains the singular values - of the modified matrix. - - ALPHA (input) DOUBLE PRECISION - Contains the diagonal element associated with the added row. - - BETA (input) DOUBLE PRECISION - Contains the off-diagonal element associated with the added - row. - - U (input/output) DOUBLE PRECISION array, dimension(LDU,N) - On entry U(1:NL, 1:NL) contains the left singular vectors of - the upper block; U(NL+2:N, NL+2:N) contains the left singular - vectors of the lower block. On exit U contains the left - singular vectors of the bidiagonal matrix. - - LDU (input) INTEGER - The leading dimension of the array U. LDU >= max( 1, N ). - - VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M) - where M = N + SQRE. - On entry VT(1:NL+1, 1:NL+1)' contains the right singular - vectors of the upper block; VT(NL+2:M, NL+2:M)' contains - the right singular vectors of the lower block. On exit - VT' contains the right singular vectors of the - bidiagonal matrix. - - LDVT (input) INTEGER - The leading dimension of the array VT. LDVT >= max( 1, M ). - - IDXQ (output) INTEGER array, dimension(N) - This contains the permutation which will reintegrate the - subproblem just solved back into sorted order, i.e. - D( IDXQ( I = 1, N ) ) will be in ascending order. - - IWORK (workspace) INTEGER array, dimension( 4 * N ) - - WORK (workspace) DOUBLE PRECISION array, dimension( 3*M**2 + 2*M ) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an singular value did not converge - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - --idxq; - --iwork; - --work; - - /* Function Body */ - *info = 0; - - if (*nl < 1) { - *info = -1; - } else if (*nr < 1) { - *info = -2; - } else if (*sqre < 0 || *sqre > 1) { - *info = -3; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASD1", &i__1); - return 0; - } - - n = *nl + *nr + 1; - m = n + *sqre; - -/* - The following values are for bookkeeping purposes only. They are - integer pointers which indicate the portion of the workspace - used by a particular array in DLASD2 and DLASD3. -*/ - - ldu2 = n; - ldvt2 = m; - - iz = 1; - isigma = iz + m; - iu2 = isigma + n; - ivt2 = iu2 + ldu2 * n; - iq = ivt2 + ldvt2 * m; - - idx = 1; - idxc = idx + n; - coltyp = idxc + n; - idxp = coltyp + n; - -/* - Scale. - - Computing MAX -*/ - d__1 = abs(*alpha), d__2 = abs(*beta); - orgnrm = max(d__1,d__2); - d__[*nl + 1] = 0.; - i__1 = n; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { - orgnrm = (d__1 = d__[i__], abs(d__1)); - } -/* L10: */ - } - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info); - *alpha /= orgnrm; - *beta /= orgnrm; - -/* Deflate singular values. */ - - dlasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset], - ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, & - work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], & - idxq[1], &iwork[coltyp], info); - -/* Solve Secular Equation and update singular vectors. */ - - ldq = k; - dlasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[ - u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[ - ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info); - if (*info != 0) { - return 0; - } - -/* Unscale. */ - - dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info); - -/* Prepare the IDXQ sorting permutation. */ - - n1 = k; - n2 = n - k; - dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]); - - return 0; - -/* End of DLASD1 */ - -} /* dlasd1_ */ - -/* Subroutine */ int dlasd2_(integer *nl, integer *nr, integer *sqre, integer - *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal * - beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, - doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2, - integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer * - idxq, integer *coltyp, integer *info) -{ - /* System generated locals */ - integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset, - vt2_dim1, vt2_offset, i__1; - doublereal d__1, d__2; - - /* Local variables */ - static doublereal c__; - static integer i__, j, m, n; - static doublereal s; - static integer k2; - static doublereal z1; - static integer ct, jp; - static doublereal eps, tau, tol; - static integer psm[4], nlp1, nlp2, idxi, idxj; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *); - static integer ctot[4], idxjp; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer jprev; - - extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, - integer *, integer *, integer *), dlacpy_(char *, integer *, - integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, - doublereal *, doublereal *, integer *), xerbla_(char *, - integer *); - static doublereal hlftol; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - October 31, 1999 - - - Purpose - ======= - - DLASD2 merges the two sets of singular values together into a single - sorted set. Then it tries to deflate the size of the problem. - There are two ways in which deflation can occur: when two or more - singular values are close together or if there is a tiny entry in the - Z vector. For each such occurrence the order of the related secular - equation problem is reduced by one. - - DLASD2 is called from DLASD1. - - Arguments - ========= - - NL (input) INTEGER - The row dimension of the upper block. NL >= 1. - - NR (input) INTEGER - The row dimension of the lower block. NR >= 1. - - SQRE (input) INTEGER - = 0: the lower block is an NR-by-NR square matrix. - = 1: the lower block is an NR-by-(NR+1) rectangular matrix. - - The bidiagonal matrix has N = NL + NR + 1 rows and - M = N + SQRE >= N columns. - - K (output) INTEGER - Contains the dimension of the non-deflated matrix, - This is the order of the related secular equation. 1 <= K <=N. - - D (input/output) DOUBLE PRECISION array, dimension(N) - On entry D contains the singular values of the two submatrices - to be combined. On exit D contains the trailing (N-K) updated - singular values (those which were deflated) sorted into - increasing order. - - ALPHA (input) DOUBLE PRECISION - Contains the diagonal element associated with the added row. - - BETA (input) DOUBLE PRECISION - Contains the off-diagonal element associated with the added - row. - - U (input/output) DOUBLE PRECISION array, dimension(LDU,N) - On entry U contains the left singular vectors of two - submatrices in the two square blocks with corners at (1,1), - (NL, NL), and (NL+2, NL+2), (N,N). - On exit U contains the trailing (N-K) updated left singular - vectors (those which were deflated) in its last N-K columns. - - LDU (input) INTEGER - The leading dimension of the array U. LDU >= N. - - Z (output) DOUBLE PRECISION array, dimension(N) - On exit Z contains the updating row vector in the secular - equation. - - DSIGMA (output) DOUBLE PRECISION array, dimension (N) - Contains a copy of the diagonal elements (K-1 singular values - and one zero) in the secular equation. - - U2 (output) DOUBLE PRECISION array, dimension(LDU2,N) - Contains a copy of the first K-1 left singular vectors which - will be used by DLASD3 in a matrix multiply (DGEMM) to solve - for the new left singular vectors. U2 is arranged into four - blocks. The first block contains a column with 1 at NL+1 and - zero everywhere else; the second block contains non-zero - entries only at and above NL; the third contains non-zero - entries only below NL+1; and the fourth is dense. - - LDU2 (input) INTEGER - The leading dimension of the array U2. LDU2 >= N. - - VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M) - On entry VT' contains the right singular vectors of two - submatrices in the two square blocks with corners at (1,1), - (NL+1, NL+1), and (NL+2, NL+2), (M,M). - On exit VT' contains the trailing (N-K) updated right singular - vectors (those which were deflated) in its last N-K columns. - In case SQRE =1, the last row of VT spans the right null - space. - - LDVT (input) INTEGER - The leading dimension of the array VT. LDVT >= M. - - VT2 (output) DOUBLE PRECISION array, dimension(LDVT2,N) - VT2' contains a copy of the first K right singular vectors - which will be used by DLASD3 in a matrix multiply (DGEMM) to - solve for the new right singular vectors. VT2 is arranged into - three blocks. The first block contains a row that corresponds - to the special 0 diagonal element in SIGMA; the second block - contains non-zeros only at and before NL +1; the third block - contains non-zeros only at and after NL +2. - - LDVT2 (input) INTEGER - The leading dimension of the array VT2. LDVT2 >= M. - - IDXP (workspace) INTEGER array, dimension(N) - This will contain the permutation used to place deflated - values of D at the end of the array. On output IDXP(2:K) - points to the nondeflated D-values and IDXP(K+1:N) - points to the deflated singular values. - - IDX (workspace) INTEGER array, dimension(N) - This will contain the permutation used to sort the contents of - D into ascending order. - - IDXC (output) INTEGER array, dimension(N) - This will contain the permutation used to arrange the columns - of the deflated U matrix into three groups: the first group - contains non-zero entries only at and above NL, the second - contains non-zero entries only below NL+2, and the third is - dense. - - COLTYP (workspace/output) INTEGER array, dimension(N) - As workspace, this will contain a label which will indicate - which of the following types a column in the U2 matrix or a - row in the VT2 matrix is: - 1 : non-zero in the upper half only - 2 : non-zero in the lower half only - 3 : dense - 4 : deflated - - On exit, it is an array of dimension 4, with COLTYP(I) being - the dimension of the I-th type columns. - - IDXQ (input) INTEGER array, dimension(N) - This contains the permutation which separately sorts the two - sub-problems in D into ascending order. Note that entries in - the first hlaf of this permutation must first be moved one - position backward; and entries in the second half - must first have NL+1 added to their values. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --z__; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - --dsigma; - u2_dim1 = *ldu2; - u2_offset = 1 + u2_dim1 * 1; - u2 -= u2_offset; - vt2_dim1 = *ldvt2; - vt2_offset = 1 + vt2_dim1 * 1; - vt2 -= vt2_offset; - --idxp; - --idx; - --idxc; - --idxq; - --coltyp; - - /* Function Body */ - *info = 0; - - if (*nl < 1) { - *info = -1; - } else if (*nr < 1) { - *info = -2; - } else if ((*sqre != 1 && *sqre != 0)) { - *info = -3; - } - - n = *nl + *nr + 1; - m = n + *sqre; - - if (*ldu < n) { - *info = -10; - } else if (*ldvt < m) { - *info = -12; - } else if (*ldu2 < n) { - *info = -15; - } else if (*ldvt2 < m) { - *info = -17; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASD2", &i__1); - return 0; - } - - nlp1 = *nl + 1; - nlp2 = *nl + 2; - -/* - Generate the first part of the vector Z; and move the singular - values in the first part of D one position backward. -*/ - - z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1]; - z__[1] = z1; - for (i__ = *nl; i__ >= 1; --i__) { - z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1]; - d__[i__ + 1] = d__[i__]; - idxq[i__ + 1] = idxq[i__] + 1; -/* L10: */ - } - -/* Generate the second part of the vector Z. */ - - i__1 = m; - for (i__ = nlp2; i__ <= i__1; ++i__) { - z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1]; -/* L20: */ - } - -/* Initialize some reference arrays. */ - - i__1 = nlp1; - for (i__ = 2; i__ <= i__1; ++i__) { - coltyp[i__] = 1; -/* L30: */ - } - i__1 = n; - for (i__ = nlp2; i__ <= i__1; ++i__) { - coltyp[i__] = 2; -/* L40: */ - } - -/* Sort the singular values into increasing order */ - - i__1 = n; - for (i__ = nlp2; i__ <= i__1; ++i__) { - idxq[i__] += nlp1; -/* L50: */ - } - -/* - DSIGMA, IDXC, IDXC, and the first column of U2 - are used as storage space. -*/ - - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - dsigma[i__] = d__[idxq[i__]]; - u2[i__ + u2_dim1] = z__[idxq[i__]]; - idxc[i__] = coltyp[idxq[i__]]; -/* L60: */ - } - - dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]); - - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - idxi = idx[i__] + 1; - d__[i__] = dsigma[idxi]; - z__[i__] = u2[idxi + u2_dim1]; - coltyp[i__] = idxc[idxi]; -/* L70: */ - } - -/* Calculate the allowable deflation tolerance */ - - eps = EPSILON; -/* Computing MAX */ - d__1 = abs(*alpha), d__2 = abs(*beta); - tol = max(d__1,d__2); -/* Computing MAX */ - d__2 = (d__1 = d__[n], abs(d__1)); - tol = eps * 8. * max(d__2,tol); - -/* - There are 2 kinds of deflation -- first a value in the z-vector - is small, second two (or more) singular values are very close - together (their difference is small). - - If the value in the z-vector is small, we simply permute the - array so that the corresponding singular value is moved to the - end. - - If two values in the D-vector are close, we perform a two-sided - rotation designed to make one of the corresponding z-vector - entries zero, and then permute the array so that the deflated - singular value is moved to the end. - - If there are multiple singular values then the problem deflates. - Here the number of equal singular values are found. As each equal - singular value is found, an elementary reflector is computed to - rotate the corresponding singular subspace so that the - corresponding components of Z are zero in this new basis. -*/ - - *k = 1; - k2 = n + 1; - i__1 = n; - for (j = 2; j <= i__1; ++j) { - if ((d__1 = z__[j], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - idxp[k2] = j; - coltyp[j] = 4; - if (j == n) { - goto L120; - } - } else { - jprev = j; - goto L90; - } -/* L80: */ - } -L90: - j = jprev; -L100: - ++j; - if (j > n) { - goto L110; - } - if ((d__1 = z__[j], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - idxp[k2] = j; - coltyp[j] = 4; - } else { - -/* Check if singular values are close enough to allow deflation. */ - - if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { - -/* Deflation is possible. */ - - s = z__[jprev]; - c__ = z__[j]; - -/* - Find sqrt(a**2+b**2) without overflow or - destructive underflow. -*/ - - tau = dlapy2_(&c__, &s); - c__ /= tau; - s = -s / tau; - z__[j] = tau; - z__[jprev] = 0.; - -/* - Apply back the Givens rotation to the left and right - singular vector matrices. -*/ - - idxjp = idxq[idx[jprev] + 1]; - idxj = idxq[idx[j] + 1]; - if (idxjp <= nlp1) { - --idxjp; - } - if (idxj <= nlp1) { - --idxj; - } - drot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], & - c__1, &c__, &s); - drot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, & - c__, &s); - if (coltyp[j] != coltyp[jprev]) { - coltyp[j] = 3; - } - coltyp[jprev] = 4; - --k2; - idxp[k2] = jprev; - jprev = j; - } else { - ++(*k); - u2[*k + u2_dim1] = z__[jprev]; - dsigma[*k] = d__[jprev]; - idxp[*k] = jprev; - jprev = j; - } - } - goto L100; -L110: - -/* Record the last singular value. */ - - ++(*k); - u2[*k + u2_dim1] = z__[jprev]; - dsigma[*k] = d__[jprev]; - idxp[*k] = jprev; - -L120: - -/* - Count up the total number of the various types of columns, then - form a permutation which positions the four column types into - four groups of uniform structure (although one or more of these - groups may be empty). -*/ - - for (j = 1; j <= 4; ++j) { - ctot[j - 1] = 0; -/* L130: */ - } - i__1 = n; - for (j = 2; j <= i__1; ++j) { - ct = coltyp[j]; - ++ctot[ct - 1]; -/* L140: */ - } - -/* PSM(*) = Position in SubMatrix (of types 1 through 4) */ - - psm[0] = 2; - psm[1] = ctot[0] + 2; - psm[2] = psm[1] + ctot[1]; - psm[3] = psm[2] + ctot[2]; - -/* - Fill out the IDXC array so that the permutation which it induces - will place all type-1 columns first, all type-2 columns next, - then all type-3's, and finally all type-4's, starting from the - second column. This applies similarly to the rows of VT. -*/ - - i__1 = n; - for (j = 2; j <= i__1; ++j) { - jp = idxp[j]; - ct = coltyp[jp]; - idxc[psm[ct - 1]] = j; - ++psm[ct - 1]; -/* L150: */ - } - -/* - Sort the singular values and corresponding singular vectors into - DSIGMA, U2, and VT2 respectively. The singular values/vectors - which were not deflated go into the first K slots of DSIGMA, U2, - and VT2 respectively, while those which were deflated go into the - last N - K slots, except that the first column/row will be treated - separately. -*/ - - i__1 = n; - for (j = 2; j <= i__1; ++j) { - jp = idxp[j]; - dsigma[j] = d__[jp]; - idxj = idxq[idx[idxp[idxc[j]]] + 1]; - if (idxj <= nlp1) { - --idxj; - } - dcopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1); - dcopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2); -/* L160: */ - } - -/* Determine DSIGMA(1), DSIGMA(2) and Z(1) */ - - dsigma[1] = 0.; - hlftol = tol / 2.; - if (abs(dsigma[2]) <= hlftol) { - dsigma[2] = hlftol; - } - if (m > n) { - z__[1] = dlapy2_(&z1, &z__[m]); - if (z__[1] <= tol) { - c__ = 1.; - s = 0.; - z__[1] = tol; - } else { - c__ = z1 / z__[1]; - s = z__[m] / z__[1]; - } - } else { - if (abs(z1) <= tol) { - z__[1] = tol; - } else { - z__[1] = z1; - } - } - -/* Move the rest of the updating row to Z. */ - - i__1 = *k - 1; - dcopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1); - -/* - Determine the first column of U2, the first row of VT2 and the - last row of VT. -*/ - - dlaset_("A", &n, &c__1, &c_b29, &c_b29, &u2[u2_offset], ldu2); - u2[nlp1 + u2_dim1] = 1.; - if (m > n) { - i__1 = nlp1; - for (i__ = 1; i__ <= i__1; ++i__) { - vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1]; - vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1]; -/* L170: */ - } - i__1 = m; - for (i__ = nlp2; i__ <= i__1; ++i__) { - vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1]; - vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1]; -/* L180: */ - } - } else { - dcopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2); - } - if (m > n) { - dcopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2); - } - -/* - The deflated singular values and their corresponding vectors go - into the back of D, U, and V respectively. -*/ - - if (n > *k) { - i__1 = n - *k; - dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1); - i__1 = n - *k; - dlacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1) - * u_dim1 + 1], ldu); - i__1 = n - *k; - dlacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 + - vt_dim1], ldvt); - } - -/* Copy CTOT into COLTYP for referencing in DLASD3. */ - - for (j = 1; j <= 4; ++j) { - coltyp[j] = ctot[j - 1]; -/* L190: */ - } - - return 0; - -/* End of DLASD2 */ - -} /* dlasd2_ */ - -/* Subroutine */ int dlasd3_(integer *nl, integer *nr, integer *sqre, integer - *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma, - doublereal *u, integer *ldu, doublereal *u2, integer *ldu2, - doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2, - integer *idxc, integer *ctot, doublereal *z__, integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, - vt_offset, vt2_dim1, vt2_offset, i__1, i__2; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static integer i__, j, m, n, jc; - static doublereal rho; - static integer nlp1, nlp2, nrp1; - static doublereal temp; - extern doublereal dnrm2_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer ctemp; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer ktemp; - extern doublereal dlamc3_(doublereal *, doublereal *); - extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *), dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dlacpy_(char *, integer *, integer - *, doublereal *, integer *, doublereal *, integer *), - xerbla_(char *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - October 31, 1999 - - - Purpose - ======= - - DLASD3 finds all the square roots of the roots of the secular - equation, as defined by the values in D and Z. It makes the - appropriate calls to DLASD4 and then updates the singular - vectors by matrix multiplication. - - This code makes very mild assumptions about floating point - arithmetic. It will work on machines with a guard digit in - add/subtract, or on those binary machines without guard digits - which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. - It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - DLASD3 is called from DLASD1. - - Arguments - ========= - - NL (input) INTEGER - The row dimension of the upper block. NL >= 1. - - NR (input) INTEGER - The row dimension of the lower block. NR >= 1. - - SQRE (input) INTEGER - = 0: the lower block is an NR-by-NR square matrix. - = 1: the lower block is an NR-by-(NR+1) rectangular matrix. - - The bidiagonal matrix has N = NL + NR + 1 rows and - M = N + SQRE >= N columns. - - K (input) INTEGER - The size of the secular equation, 1 =< K = < N. - - D (output) DOUBLE PRECISION array, dimension(K) - On exit the square roots of the roots of the secular equation, - in ascending order. - - Q (workspace) DOUBLE PRECISION array, - dimension at least (LDQ,K). - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= K. - - DSIGMA (input) DOUBLE PRECISION array, dimension(K) - The first K elements of this array contain the old roots - of the deflated updating problem. These are the poles - of the secular equation. - - U (input) DOUBLE PRECISION array, dimension (LDU, N) - The last N - K columns of this matrix contain the deflated - left singular vectors. - - LDU (input) INTEGER - The leading dimension of the array U. LDU >= N. - - U2 (input) DOUBLE PRECISION array, dimension (LDU2, N) - The first K columns of this matrix contain the non-deflated - left singular vectors for the split problem. - - LDU2 (input) INTEGER - The leading dimension of the array U2. LDU2 >= N. - - VT (input) DOUBLE PRECISION array, dimension (LDVT, M) - The last M - K columns of VT' contain the deflated - right singular vectors. - - LDVT (input) INTEGER - The leading dimension of the array VT. LDVT >= N. - - VT2 (input) DOUBLE PRECISION array, dimension (LDVT2, N) - The first K columns of VT2' contain the non-deflated - right singular vectors for the split problem. - - LDVT2 (input) INTEGER - The leading dimension of the array VT2. LDVT2 >= N. - - IDXC (input) INTEGER array, dimension ( N ) - The permutation used to arrange the columns of U (and rows of - VT) into three groups: the first group contains non-zero - entries only at and above (or before) NL +1; the second - contains non-zero entries only at and below (or after) NL+2; - and the third is dense. The first column of U and the row of - VT are treated separately, however. - - The rows of the singular vectors found by DLASD4 - must be likewise permuted before the matrix multiplies can - take place. - - CTOT (input) INTEGER array, dimension ( 4 ) - A count of the total number of the various types of columns - in U (or rows in VT), as described in IDXC. The fourth column - type is any column which has been deflated. - - Z (input) DOUBLE PRECISION array, dimension (K) - The first K elements of this array contain the components - of the deflation-adjusted updating row vector. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an singular value did not converge - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --dsigma; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - u2_dim1 = *ldu2; - u2_offset = 1 + u2_dim1 * 1; - u2 -= u2_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - vt2_dim1 = *ldvt2; - vt2_offset = 1 + vt2_dim1 * 1; - vt2 -= vt2_offset; - --idxc; - --ctot; - --z__; - - /* Function Body */ - *info = 0; - - if (*nl < 1) { - *info = -1; - } else if (*nr < 1) { - *info = -2; - } else if ((*sqre != 1 && *sqre != 0)) { - *info = -3; - } - - n = *nl + *nr + 1; - m = n + *sqre; - nlp1 = *nl + 1; - nlp2 = *nl + 2; - - if (*k < 1 || *k > n) { - *info = -4; - } else if (*ldq < *k) { - *info = -7; - } else if (*ldu < n) { - *info = -10; - } else if (*ldu2 < n) { - *info = -12; - } else if (*ldvt < m) { - *info = -14; - } else if (*ldvt2 < m) { - *info = -16; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASD3", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*k == 1) { - d__[1] = abs(z__[1]); - dcopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt); - if (z__[1] > 0.) { - dcopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1); - } else { - i__1 = n; - for (i__ = 1; i__ <= i__1; ++i__) { - u[i__ + u_dim1] = -u2[i__ + u2_dim1]; -/* L10: */ - } - } - return 0; - } - -/* - Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can - be computed with high relative accuracy (barring over/underflow). - This is a problem on machines without a guard digit in - add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). - The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), - which on any of these machines zeros out the bottommost - bit of DSIGMA(I) if it is 1; this makes the subsequent - subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation - occurs. On binary machines with a guard digit (almost all - machines) it does not change DSIGMA(I) at all. On hexadecimal - and decimal machines with a guard digit, it slightly - changes the bottommost bits of DSIGMA(I). It does not account - for hexadecimal or decimal machines without guard digits - (we know of none). We use a subroutine call to compute - 2*DLAMBDA(I) to prevent optimizing compilers from eliminating - this code. -*/ - - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; -/* L20: */ - } - -/* Keep a copy of Z. */ - - dcopy_(k, &z__[1], &c__1, &q[q_offset], &c__1); - -/* Normalize Z. */ - - rho = dnrm2_(k, &z__[1], &c__1); - dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info); - rho *= rho; - -/* Find the new singular values. */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dlasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j], - &vt[j * vt_dim1 + 1], info); - -/* If the zero finder fails, the computation is terminated. */ - - if (*info != 0) { - return 0; - } -/* L30: */ - } - -/* Compute updated Z. */ - - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1]; - i__2 = i__ - 1; - for (j = 1; j <= i__2; ++j) { - z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ - i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]); -/* L40: */ - } - i__2 = *k - 1; - for (j = i__; j <= i__2; ++j) { - z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ - i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]); -/* L50: */ - } - d__2 = sqrt((d__1 = z__[i__], abs(d__1))); - z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]); -/* L60: */ - } - -/* - Compute left singular vectors of the modified diagonal matrix, - and store related information for the right singular vectors. -*/ - - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ * - vt_dim1 + 1]; - u[i__ * u_dim1 + 1] = -1.; - i__2 = *k; - for (j = 2; j <= i__2; ++j) { - vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__ - * vt_dim1]; - u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1]; -/* L70: */ - } - temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &c__1); - q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp; - i__2 = *k; - for (j = 2; j <= i__2; ++j) { - jc = idxc[j]; - q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp; -/* L80: */ - } -/* L90: */ - } - -/* Update the left singular vector matrix. */ - - if (*k == 2) { - dgemm_("N", "N", &n, k, k, &c_b15, &u2[u2_offset], ldu2, &q[q_offset], - ldq, &c_b29, &u[u_offset], ldu); - goto L100; - } - if (ctot[1] > 0) { - dgemm_("N", "N", nl, k, &ctot[1], &c_b15, &u2[((u2_dim1) << (1)) + 1], - ldu2, &q[q_dim1 + 2], ldq, &c_b29, &u[u_dim1 + 1], ldu); - if (ctot[3] > 0) { - ktemp = ctot[1] + 2 + ctot[2]; - dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1] - , ldu2, &q[ktemp + q_dim1], ldq, &c_b15, &u[u_dim1 + 1], - ldu); - } - } else if (ctot[3] > 0) { - ktemp = ctot[1] + 2 + ctot[2]; - dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1], - ldu2, &q[ktemp + q_dim1], ldq, &c_b29, &u[u_dim1 + 1], ldu); - } else { - dlacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu); - } - dcopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu); - ktemp = ctot[1] + 2; - ctemp = ctot[2] + ctot[3]; - dgemm_("N", "N", nr, k, &ctemp, &c_b15, &u2[nlp2 + ktemp * u2_dim1], ldu2, - &q[ktemp + q_dim1], ldq, &c_b29, &u[nlp2 + u_dim1], ldu); - -/* Generate the right singular vectors. */ - -L100: - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1); - q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp; - i__2 = *k; - for (j = 2; j <= i__2; ++j) { - jc = idxc[j]; - q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp; -/* L110: */ - } -/* L120: */ - } - -/* Update the right singular vector matrix. */ - - if (*k == 2) { - dgemm_("N", "N", k, &m, k, &c_b15, &q[q_offset], ldq, &vt2[vt2_offset] - , ldvt2, &c_b29, &vt[vt_offset], ldvt); - return 0; - } - ktemp = ctot[1] + 1; - dgemm_("N", "N", k, &nlp1, &ktemp, &c_b15, &q[q_dim1 + 1], ldq, &vt2[ - vt2_dim1 + 1], ldvt2, &c_b29, &vt[vt_dim1 + 1], ldvt); - ktemp = ctot[1] + 2 + ctot[2]; - if (ktemp <= *ldvt2) { - dgemm_("N", "N", k, &nlp1, &ctot[3], &c_b15, &q[ktemp * q_dim1 + 1], - ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b15, &vt[vt_dim1 + 1], - ldvt); - } - - ktemp = ctot[1] + 1; - nrp1 = *nr + *sqre; - if (ktemp > 1) { - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - q[i__ + ktemp * q_dim1] = q[i__ + q_dim1]; -/* L130: */ - } - i__1 = m; - for (i__ = nlp2; i__ <= i__1; ++i__) { - vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1]; -/* L140: */ - } - } - ctemp = ctot[2] + 1 + ctot[3]; - dgemm_("N", "N", k, &nrp1, &ctemp, &c_b15, &q[ktemp * q_dim1 + 1], ldq, & - vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b29, &vt[nlp2 * vt_dim1 + - 1], ldvt); - - return 0; - -/* End of DLASD3 */ - -} /* dlasd3_ */ - - -/* Subroutine */ int dlasd4_(integer *n, integer *i__, doublereal *d__, - doublereal *z__, doublereal *delta, doublereal *rho, doublereal * - sigma, doublereal *work, integer *info) -{ - /* System generated locals */ - integer i__1; - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal a, b, c__; - static integer j; - static doublereal w, dd[3]; - static integer ii; - static doublereal dw, zz[3]; - static integer ip1; - static doublereal eta, phi, eps, tau, psi; - static integer iim1, iip1; - static doublereal dphi, dpsi; - static integer iter; - static doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq, - dtiip; - static integer niter; - static doublereal dtisq; - static logical swtch; - static doublereal dtnsq; - extern /* Subroutine */ int dlaed6_(integer *, logical *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *) - , dlasd5_(integer *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *); - static doublereal delsq2, dtnsq1; - static logical swtch3; - - static logical orgati; - static doublereal erretm, dtipsq, rhoinv; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - October 31, 1999 - - - Purpose - ======= - - This subroutine computes the square root of the I-th updated - eigenvalue of a positive symmetric rank-one modification to - a positive diagonal matrix whose entries are given as the squares - of the corresponding entries in the array d, and that - - 0 <= D(i) < D(j) for i < j - - and that RHO > 0. This is arranged by the calling routine, and is - no loss in generality. The rank-one modified system is thus - - diag( D ) * diag( D ) + RHO * Z * Z_transpose. - - where we assume the Euclidean norm of Z is 1. - - The method consists of approximating the rational functions in the - secular equation by simpler interpolating rational functions. - - Arguments - ========= - - N (input) INTEGER - The length of all arrays. - - I (input) INTEGER - The index of the eigenvalue to be computed. 1 <= I <= N. - - D (input) DOUBLE PRECISION array, dimension ( N ) - The original eigenvalues. It is assumed that they are in - order, 0 <= D(I) < D(J) for I < J. - - Z (input) DOUBLE PRECISION array, dimension ( N ) - The components of the updating vector. - - DELTA (output) DOUBLE PRECISION array, dimension ( N ) - If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th - component. If N = 1, then DELTA(1) = 1. The vector DELTA - contains the information necessary to construct the - (singular) eigenvectors. - - RHO (input) DOUBLE PRECISION - The scalar in the symmetric updating formula. - - SIGMA (output) DOUBLE PRECISION - The computed lambda_I, the I-th updated eigenvalue. - - WORK (workspace) DOUBLE PRECISION array, dimension ( N ) - If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th - component. If N = 1, then WORK( 1 ) = 1. - - INFO (output) INTEGER - = 0: successful exit - > 0: if INFO = 1, the updating process failed. - - Internal Parameters - =================== - - Logical variable ORGATI (origin-at-i?) is used for distinguishing - whether D(i) or D(i+1) is treated as the origin. - - ORGATI = .true. origin at i - ORGATI = .false. origin at i+1 - - Logical variable SWTCH3 (switch-for-3-poles?) is for noting - if we are working with THREE poles! - - MAXIT is the maximum number of iterations allowed for each - eigenvalue. - - Further Details - =============== - - Based on contributions by - Ren-Cang Li, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Since this routine is called in an inner loop, we do no argument - checking. - - Quick return for N=1 and 2. -*/ - - /* Parameter adjustments */ - --work; - --delta; - --z__; - --d__; - - /* Function Body */ - *info = 0; - if (*n == 1) { - -/* Presumably, I=1 upon entry */ - - *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]); - delta[1] = 1.; - work[1] = 1.; - return 0; - } - if (*n == 2) { - dlasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]); - return 0; - } - -/* Compute machine epsilon */ - - eps = EPSILON; - rhoinv = 1. / *rho; - -/* The case I = N */ - - if (*i__ == *n) { - -/* Initialize some basic variables */ - - ii = *n - 1; - niter = 1; - -/* Calculate initial guess */ - - temp = *rho / 2.; - -/* - If ||Z||_2 is not one, then TEMP should be set to - RHO * ||Z||_2^2 / TWO -*/ - - temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp)); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] = d__[j] + d__[*n] + temp1; - delta[j] = d__[j] - d__[*n] - temp1; -/* L10: */ - } - - psi = 0.; - i__1 = *n - 2; - for (j = 1; j <= i__1; ++j) { - psi += z__[j] * z__[j] / (delta[j] * work[j]); -/* L20: */ - } - - c__ = rhoinv + psi; - w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[* - n] / (delta[*n] * work[*n]); - - if (w <= 0.) { - temp1 = sqrt(d__[*n] * d__[*n] + *rho); - temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[* - n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] * - z__[*n] / *rho; - -/* - The following TAU is to approximate - SIGMA_n^2 - D( N )*D( N ) -*/ - - if (c__ <= temp) { - tau = *rho; - } else { - delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); - a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[* - n]; - b = z__[*n] * z__[*n] * delsq; - if (a < 0.) { - tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); - } else { - tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); - } - } - -/* - It can be proved that - D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO -*/ - - } else { - delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); - a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]; - b = z__[*n] * z__[*n] * delsq; - -/* - The following TAU is to approximate - SIGMA_n^2 - D( N )*D( N ) -*/ - - if (a < 0.) { - tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); - } else { - tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); - } - -/* - It can be proved that - D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2 -*/ - - } - -/* The following ETA is to approximate SIGMA_n - D( N ) */ - - eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau)); - - *sigma = d__[*n] + eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] = d__[j] - d__[*i__] - eta; - work[j] = d__[j] + d__[*i__] + eta; -/* L30: */ - } - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (delta[j] * work[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L40: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - temp = z__[*n] / (delta[*n] * work[*n]); - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi - + dphi); - - w = rhoinv + phi + psi; - -/* Test for convergence */ - - if (abs(w) <= eps * erretm) { - goto L240; - } - -/* Calculate the new step */ - - ++niter; - dtnsq1 = work[*n - 1] * delta[*n - 1]; - dtnsq = work[*n] * delta[*n]; - c__ = w - dtnsq1 * dpsi - dtnsq * dphi; - a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi); - b = dtnsq * dtnsq1 * w; - if (c__ < 0.) { - c__ = abs(c__); - } - if (c__ == 0.) { - eta = *rho - *sigma * *sigma; - } else if (a >= 0.) { - eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ - * 2.); - } else { - eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) - ); - } - -/* - Note, eta should be positive if w is negative, and - eta should be negative otherwise. However, - if for some reason caused by roundoff, eta*w > 0, - we simply use one Newton step instead. This way - will guarantee eta*w < 0. -*/ - - if (w * eta > 0.) { - eta = -w / (dpsi + dphi); - } - temp = eta - dtnsq; - if (temp > *rho) { - eta = *rho + dtnsq; - } - - tau += eta; - eta /= *sigma + sqrt(eta + *sigma * *sigma); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] -= eta; - work[j] += eta; -/* L50: */ - } - - *sigma += eta; - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L60: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - temp = z__[*n] / (work[*n] * delta[*n]); - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi - + dphi); - - w = rhoinv + phi + psi; - -/* Main loop to update the values of the array DELTA */ - - iter = niter + 1; - - for (niter = iter; niter <= MAXITERLOOPS; ++niter) { - -/* Test for convergence */ - - if (abs(w) <= eps * erretm) { - goto L240; - } - -/* Calculate the new step */ - - dtnsq1 = work[*n - 1] * delta[*n - 1]; - dtnsq = work[*n] * delta[*n]; - c__ = w - dtnsq1 * dpsi - dtnsq * dphi; - a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi); - b = dtnsq1 * dtnsq * w; - if (a >= 0.) { - eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } else { - eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } - -/* - Note, eta should be positive if w is negative, and - eta should be negative otherwise. However, - if for some reason caused by roundoff, eta*w > 0, - we simply use one Newton step instead. This way - will guarantee eta*w < 0. -*/ - - if (w * eta > 0.) { - eta = -w / (dpsi + dphi); - } - temp = eta - dtnsq; - if (temp <= 0.) { - eta /= 2.; - } - - tau += eta; - eta /= *sigma + sqrt(eta + *sigma * *sigma); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] -= eta; - work[j] += eta; -/* L70: */ - } - - *sigma += eta; - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L80: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - temp = z__[*n] / (work[*n] * delta[*n]); - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * ( - dpsi + dphi); - - w = rhoinv + phi + psi; -/* L90: */ - } - -/* Return with INFO = 1, NITER = MAXIT and not converged */ - - *info = 1; - goto L240; - -/* End for the case I = N */ - - } else { - -/* The case for I < N */ - - niter = 1; - ip1 = *i__ + 1; - -/* Calculate initial guess */ - - delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]); - delsq2 = delsq / 2.; - temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2)); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] = d__[j] + d__[*i__] + temp; - delta[j] = d__[j] - d__[*i__] - temp; -/* L100: */ - } - - psi = 0.; - i__1 = *i__ - 1; - for (j = 1; j <= i__1; ++j) { - psi += z__[j] * z__[j] / (work[j] * delta[j]); -/* L110: */ - } - - phi = 0.; - i__1 = *i__ + 2; - for (j = *n; j >= i__1; --j) { - phi += z__[j] * z__[j] / (work[j] * delta[j]); -/* L120: */ - } - c__ = rhoinv + psi + phi; - w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[ - ip1] * z__[ip1] / (work[ip1] * delta[ip1]); - - if (w > 0.) { - -/* - d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2 - - We choose d(i) as origin. -*/ - - orgati = TRUE_; - sg2lb = 0.; - sg2ub = delsq2; - a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1]; - b = z__[*i__] * z__[*i__] * delsq; - if (a > 0.) { - tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } else { - tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } - -/* - TAU now is an estimation of SIGMA^2 - D( I )^2. The - following, however, is the corresponding estimation of - SIGMA - D( I ). -*/ - - eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau)); - } else { - -/* - (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2 - - We choose d(i+1) as origin. -*/ - - orgati = FALSE_; - sg2lb = -delsq2; - sg2ub = 0.; - a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1]; - b = z__[ip1] * z__[ip1] * delsq; - if (a < 0.) { - tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs( - d__1)))); - } else { - tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) / - (c__ * 2.); - } - -/* - TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The - following, however, is the corresponding estimation of - SIGMA - D( IP1 ). -*/ - - eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau, - abs(d__1)))); - } - - if (orgati) { - ii = *i__; - *sigma = d__[*i__] + eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] = d__[j] + d__[*i__] + eta; - delta[j] = d__[j] - d__[*i__] - eta; -/* L130: */ - } - } else { - ii = *i__ + 1; - *sigma = d__[ip1] + eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] = d__[j] + d__[ip1] + eta; - delta[j] = d__[j] - d__[ip1] - eta; -/* L140: */ - } - } - iim1 = ii - 1; - iip1 = ii + 1; - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L150: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / (work[j] * delta[j]); - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; -/* L160: */ - } - - w = rhoinv + phi + psi; - -/* - W is the value of the secular function with - its ii-th element removed. -*/ - - swtch3 = FALSE_; - if (orgati) { - if (w < 0.) { - swtch3 = TRUE_; - } - } else { - if (w > 0.) { - swtch3 = TRUE_; - } - } - if (ii == 1 || ii == *n) { - swtch3 = FALSE_; - } - - temp = z__[ii] / (work[ii] * delta[ii]); - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w += temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + - abs(tau) * dw; - -/* Test for convergence */ - - if (abs(w) <= eps * erretm) { - goto L240; - } - - if (w <= 0.) { - sg2lb = max(sg2lb,tau); - } else { - sg2ub = min(sg2ub,tau); - } - -/* Calculate the new step */ - - ++niter; - if (! swtch3) { - dtipsq = work[ip1] * delta[ip1]; - dtisq = work[*i__] * delta[*i__]; - if (orgati) { -/* Computing 2nd power */ - d__1 = z__[*i__] / dtisq; - c__ = w - dtipsq * dw + delsq * (d__1 * d__1); - } else { -/* Computing 2nd power */ - d__1 = z__[ip1] / dtipsq; - c__ = w - dtisq * dw - delsq * (d__1 * d__1); - } - a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; - b = dtipsq * dtisq * w; - if (c__ == 0.) { - if (a == 0.) { - if (orgati) { - a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi + - dphi); - } else { - a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi + - dphi); - } - } - eta = b / a; - } else if (a <= 0.) { - eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } else { - eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } - } else { - -/* Interpolation using THREE most relevant poles */ - - dtiim = work[iim1] * delta[iim1]; - dtiip = work[iip1] * delta[iip1]; - temp = rhoinv + psi + phi; - if (orgati) { - temp1 = z__[iim1] / dtiim; - temp1 *= temp1; - c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) * - (d__[iim1] + d__[iip1]) * temp1; - zz[0] = z__[iim1] * z__[iim1]; - if (dpsi < temp1) { - zz[2] = dtiip * dtiip * dphi; - } else { - zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); - } - } else { - temp1 = z__[iip1] / dtiip; - temp1 *= temp1; - c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) * - (d__[iim1] + d__[iip1]) * temp1; - if (dphi < temp1) { - zz[0] = dtiim * dtiim * dpsi; - } else { - zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); - } - zz[2] = z__[iip1] * z__[iip1]; - } - zz[1] = z__[ii] * z__[ii]; - dd[0] = dtiim; - dd[1] = delta[ii] * work[ii]; - dd[2] = dtiip; - dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info); - if (*info != 0) { - goto L240; - } - } - -/* - Note, eta should be positive if w is negative, and - eta should be negative otherwise. However, - if for some reason caused by roundoff, eta*w > 0, - we simply use one Newton step instead. This way - will guarantee eta*w < 0. -*/ - - if (w * eta >= 0.) { - eta = -w / dw; - } - if (orgati) { - temp1 = work[*i__] * delta[*i__]; - temp = eta - temp1; - } else { - temp1 = work[ip1] * delta[ip1]; - temp = eta - temp1; - } - if (temp > sg2ub || temp < sg2lb) { - if (w < 0.) { - eta = (sg2ub - tau) / 2.; - } else { - eta = (sg2lb - tau) / 2.; - } - } - - tau += eta; - eta /= *sigma + sqrt(*sigma * *sigma + eta); - - prew = w; - - *sigma += eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] += eta; - delta[j] -= eta; -/* L170: */ - } - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L180: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / (work[j] * delta[j]); - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; -/* L190: */ - } - - temp = z__[ii] / (work[ii] * delta[ii]); - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w = rhoinv + phi + psi + temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + - abs(tau) * dw; - - if (w <= 0.) { - sg2lb = max(sg2lb,tau); - } else { - sg2ub = min(sg2ub,tau); - } - - swtch = FALSE_; - if (orgati) { - if (-w > abs(prew) / 10.) { - swtch = TRUE_; - } - } else { - if (w > abs(prew) / 10.) { - swtch = TRUE_; - } - } - -/* Main loop to update the values of the array DELTA and WORK */ - - iter = niter + 1; - - for (niter = iter; niter <= MAXITERLOOPS; ++niter) { - -/* Test for convergence */ - - if (abs(w) <= eps * erretm) { - goto L240; - } - -/* Calculate the new step */ - - if (! swtch3) { - dtipsq = work[ip1] * delta[ip1]; - dtisq = work[*i__] * delta[*i__]; - if (! swtch) { - if (orgati) { -/* Computing 2nd power */ - d__1 = z__[*i__] / dtisq; - c__ = w - dtipsq * dw + delsq * (d__1 * d__1); - } else { -/* Computing 2nd power */ - d__1 = z__[ip1] / dtipsq; - c__ = w - dtisq * dw - delsq * (d__1 * d__1); - } - } else { - temp = z__[ii] / (work[ii] * delta[ii]); - if (orgati) { - dpsi += temp * temp; - } else { - dphi += temp * temp; - } - c__ = w - dtisq * dpsi - dtipsq * dphi; - } - a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; - b = dtipsq * dtisq * w; - if (c__ == 0.) { - if (a == 0.) { - if (! swtch) { - if (orgati) { - a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * - (dpsi + dphi); - } else { - a = z__[ip1] * z__[ip1] + dtisq * dtisq * ( - dpsi + dphi); - } - } else { - a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi; - } - } - eta = b / a; - } else if (a <= 0.) { - eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) - / (c__ * 2.); - } else { - eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, - abs(d__1)))); - } - } else { - -/* Interpolation using THREE most relevant poles */ - - dtiim = work[iim1] * delta[iim1]; - dtiip = work[iip1] * delta[iip1]; - temp = rhoinv + psi + phi; - if (swtch) { - c__ = temp - dtiim * dpsi - dtiip * dphi; - zz[0] = dtiim * dtiim * dpsi; - zz[2] = dtiip * dtiip * dphi; - } else { - if (orgati) { - temp1 = z__[iim1] / dtiim; - temp1 *= temp1; - temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[ - iip1]) * temp1; - c__ = temp - dtiip * (dpsi + dphi) - temp2; - zz[0] = z__[iim1] * z__[iim1]; - if (dpsi < temp1) { - zz[2] = dtiip * dtiip * dphi; - } else { - zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); - } - } else { - temp1 = z__[iip1] / dtiip; - temp1 *= temp1; - temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[ - iip1]) * temp1; - c__ = temp - dtiim * (dpsi + dphi) - temp2; - if (dphi < temp1) { - zz[0] = dtiim * dtiim * dpsi; - } else { - zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); - } - zz[2] = z__[iip1] * z__[iip1]; - } - } - dd[0] = dtiim; - dd[1] = delta[ii] * work[ii]; - dd[2] = dtiip; - dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info); - if (*info != 0) { - goto L240; - } - } - -/* - Note, eta should be positive if w is negative, and - eta should be negative otherwise. However, - if for some reason caused by roundoff, eta*w > 0, - we simply use one Newton step instead. This way - will guarantee eta*w < 0. -*/ - - if (w * eta >= 0.) { - eta = -w / dw; - } - if (orgati) { - temp1 = work[*i__] * delta[*i__]; - temp = eta - temp1; - } else { - temp1 = work[ip1] * delta[ip1]; - temp = eta - temp1; - } - if (temp > sg2ub || temp < sg2lb) { - if (w < 0.) { - eta = (sg2ub - tau) / 2.; - } else { - eta = (sg2lb - tau) / 2.; - } - } - - tau += eta; - eta /= *sigma + sqrt(*sigma * *sigma + eta); - - *sigma += eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] += eta; - delta[j] -= eta; -/* L200: */ - } - - prew = w; - -/* Evaluate PSI and the derivative DPSI */ - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; -/* L210: */ - } - erretm = abs(erretm); - -/* Evaluate PHI and the derivative DPHI */ - - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / (work[j] * delta[j]); - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; -/* L220: */ - } - - temp = z__[ii] / (work[ii] * delta[ii]); - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w = rhoinv + phi + psi + temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. - + abs(tau) * dw; - if ((w * prew > 0. && abs(w) > abs(prew) / 10.)) { - swtch = ! swtch; - } - - if (w <= 0.) { - sg2lb = max(sg2lb,tau); - } else { - sg2ub = min(sg2ub,tau); - } - -/* L230: */ - } - -/* Return with INFO = 1, NITER = MAXIT and not converged */ - - *info = 1; - - } - -L240: - return 0; - -/* End of DLASD4 */ - -} /* dlasd4_ */ - -/* Subroutine */ int dlasd5_(integer *i__, doublereal *d__, doublereal *z__, - doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal * - work) -{ - /* System generated locals */ - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal b, c__, w, del, tau, delsq; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - June 30, 1999 - - - Purpose - ======= - - This subroutine computes the square root of the I-th eigenvalue - of a positive symmetric rank-one modification of a 2-by-2 diagonal - matrix - - diag( D ) * diag( D ) + RHO * Z * transpose(Z) . - - The diagonal entries in the array D are assumed to satisfy - - 0 <= D(i) < D(j) for i < j . - - We also assume RHO > 0 and that the Euclidean norm of the vector - Z is one. - - Arguments - ========= - - I (input) INTEGER - The index of the eigenvalue to be computed. I = 1 or I = 2. - - D (input) DOUBLE PRECISION array, dimension ( 2 ) - The original eigenvalues. We assume 0 <= D(1) < D(2). - - Z (input) DOUBLE PRECISION array, dimension ( 2 ) - The components of the updating vector. - - DELTA (output) DOUBLE PRECISION array, dimension ( 2 ) - Contains (D(j) - lambda_I) in its j-th component. - The vector DELTA contains the information necessary - to construct the eigenvectors. - - RHO (input) DOUBLE PRECISION - The scalar in the symmetric updating formula. - - DSIGMA (output) DOUBLE PRECISION - The computed lambda_I, the I-th updated eigenvalue. - - WORK (workspace) DOUBLE PRECISION array, dimension ( 2 ) - WORK contains (D(j) + sigma_I) in its j-th component. - - Further Details - =============== - - Based on contributions by - Ren-Cang Li, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --work; - --delta; - --z__; - --d__; - - /* Function Body */ - del = d__[2] - d__[1]; - delsq = del * (d__[2] + d__[1]); - if (*i__ == 1) { - w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] * - z__[1] / (d__[1] * 3. + d__[2])) / del + 1.; - if (w > 0.) { - b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[1] * z__[1] * delsq; - -/* - B > ZERO, always - - The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 ) -*/ - - tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1)))); - -/* The following TAU is DSIGMA - D( 1 ) */ - - tau /= d__[1] + sqrt(d__[1] * d__[1] + tau); - *dsigma = d__[1] + tau; - delta[1] = -tau; - delta[2] = del - tau; - work[1] = d__[1] * 2. + tau; - work[2] = d__[1] + tau + d__[2]; -/* - DELTA( 1 ) = -Z( 1 ) / TAU - DELTA( 2 ) = Z( 2 ) / ( DEL-TAU ) -*/ - } else { - b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[2] * z__[2] * delsq; - -/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */ - - if (b > 0.) { - tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.)); - } else { - tau = (b - sqrt(b * b + c__ * 4.)) / 2.; - } - -/* The following TAU is DSIGMA - D( 2 ) */ - - tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1))); - *dsigma = d__[2] + tau; - delta[1] = -(del + tau); - delta[2] = -tau; - work[1] = d__[1] + tau + d__[2]; - work[2] = d__[2] * 2. + tau; -/* - DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) - DELTA( 2 ) = -Z( 2 ) / TAU -*/ - } -/* - TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) - DELTA( 1 ) = DELTA( 1 ) / TEMP - DELTA( 2 ) = DELTA( 2 ) / TEMP -*/ - } else { - -/* Now I=2 */ - - b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[2] * z__[2] * delsq; - -/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */ - - if (b > 0.) { - tau = (b + sqrt(b * b + c__ * 4.)) / 2.; - } else { - tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.)); - } - -/* The following TAU is DSIGMA - D( 2 ) */ - - tau /= d__[2] + sqrt(d__[2] * d__[2] + tau); - *dsigma = d__[2] + tau; - delta[1] = -(del + tau); - delta[2] = -tau; - work[1] = d__[1] + tau + d__[2]; - work[2] = d__[2] * 2. + tau; -/* - DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) - DELTA( 2 ) = -Z( 2 ) / TAU - TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) - DELTA( 1 ) = DELTA( 1 ) / TEMP - DELTA( 2 ) = DELTA( 2 ) / TEMP -*/ - } - return 0; - -/* End of DLASD5 */ - -} /* dlasd5_ */ - -/* Subroutine */ int dlasd6_(integer *icompq, integer *nl, integer *nr, - integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl, - doublereal *alpha, doublereal *beta, integer *idxq, integer *perm, - integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, - integer *ldgnum, doublereal *poles, doublereal *difl, doublereal * - difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s, - doublereal *work, integer *iwork, integer *info) -{ - /* System generated locals */ - integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, - poles_dim1, poles_offset, i__1; - doublereal d__1, d__2; - - /* Local variables */ - static integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *), dlasd7_(integer *, integer *, integer *, - integer *, integer *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, integer *, integer *, - integer *, integer *, integer *, integer *, integer *, doublereal - *, integer *, doublereal *, doublereal *, integer *), dlasd8_( - integer *, integer *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, integer *, doublereal *, - doublereal *, integer *), dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dlamrg_(integer *, integer *, - doublereal *, integer *, integer *, integer *); - static integer isigma; - extern /* Subroutine */ int xerbla_(char *, integer *); - static doublereal orgnrm; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLASD6 computes the SVD of an updated upper bidiagonal matrix B - obtained by merging two smaller ones by appending a row. This - routine is used only for the problem which requires all singular - values and optionally singular vector matrices in factored form. - B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE. - A related subroutine, DLASD1, handles the case in which all singular - values and singular vectors of the bidiagonal matrix are desired. - - DLASD6 computes the SVD as follows: - - ( D1(in) 0 0 0 ) - B = U(in) * ( Z1' a Z2' b ) * VT(in) - ( 0 0 D2(in) 0 ) - - = U(out) * ( D(out) 0) * VT(out) - - where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M - with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros - elsewhere; and the entry b is empty if SQRE = 0. - - The singular values of B can be computed using D1, D2, the first - components of all the right singular vectors of the lower block, and - the last components of all the right singular vectors of the upper - block. These components are stored and updated in VF and VL, - respectively, in DLASD6. Hence U and VT are not explicitly - referenced. - - The singular values are stored in D. The algorithm consists of two - stages: - - The first stage consists of deflating the size of the problem - when there are multiple singular values or if there is a zero - in the Z vector. For each such occurence the dimension of the - secular equation problem is reduced by one. This stage is - performed by the routine DLASD7. - - The second stage consists of calculating the updated - singular values. This is done by finding the roots of the - secular equation via the routine DLASD4 (as called by DLASD8). - This routine also updates VF and VL and computes the distances - between the updated singular values and the old singular - values. - - DLASD6 is called from DLASDA. - - Arguments - ========= - - ICOMPQ (input) INTEGER - Specifies whether singular vectors are to be computed in - factored form: - = 0: Compute singular values only. - = 1: Compute singular vectors in factored form as well. - - NL (input) INTEGER - The row dimension of the upper block. NL >= 1. - - NR (input) INTEGER - The row dimension of the lower block. NR >= 1. - - SQRE (input) INTEGER - = 0: the lower block is an NR-by-NR square matrix. - = 1: the lower block is an NR-by-(NR+1) rectangular matrix. - - The bidiagonal matrix has row dimension N = NL + NR + 1, - and column dimension M = N + SQRE. - - D (input/output) DOUBLE PRECISION array, dimension ( NL+NR+1 ). - On entry D(1:NL,1:NL) contains the singular values of the - upper block, and D(NL+2:N) contains the singular values - of the lower block. On exit D(1:N) contains the singular - values of the modified matrix. - - VF (input/output) DOUBLE PRECISION array, dimension ( M ) - On entry, VF(1:NL+1) contains the first components of all - right singular vectors of the upper block; and VF(NL+2:M) - contains the first components of all right singular vectors - of the lower block. On exit, VF contains the first components - of all right singular vectors of the bidiagonal matrix. - - VL (input/output) DOUBLE PRECISION array, dimension ( M ) - On entry, VL(1:NL+1) contains the last components of all - right singular vectors of the upper block; and VL(NL+2:M) - contains the last components of all right singular vectors of - the lower block. On exit, VL contains the last components of - all right singular vectors of the bidiagonal matrix. - - ALPHA (input) DOUBLE PRECISION - Contains the diagonal element associated with the added row. - - BETA (input) DOUBLE PRECISION - Contains the off-diagonal element associated with the added - row. - - IDXQ (output) INTEGER array, dimension ( N ) - This contains the permutation which will reintegrate the - subproblem just solved back into sorted order, i.e. - D( IDXQ( I = 1, N ) ) will be in ascending order. - - PERM (output) INTEGER array, dimension ( N ) - The permutations (from deflation and sorting) to be applied - to each block. Not referenced if ICOMPQ = 0. - - GIVPTR (output) INTEGER - The number of Givens rotations which took place in this - subproblem. Not referenced if ICOMPQ = 0. - - GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) - Each pair of numbers indicates a pair of columns to take place - in a Givens rotation. Not referenced if ICOMPQ = 0. - - LDGCOL (input) INTEGER - leading dimension of GIVCOL, must be at least N. - - GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) - Each number indicates the C or S value to be used in the - corresponding Givens rotation. Not referenced if ICOMPQ = 0. - - LDGNUM (input) INTEGER - The leading dimension of GIVNUM and POLES, must be at least N. - - POLES (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) - On exit, POLES(1,*) is an array containing the new singular - values obtained from solving the secular equation, and - POLES(2,*) is an array containing the poles in the secular - equation. Not referenced if ICOMPQ = 0. - - DIFL (output) DOUBLE PRECISION array, dimension ( N ) - On exit, DIFL(I) is the distance between I-th updated - (undeflated) singular value and the I-th (undeflated) old - singular value. - - DIFR (output) DOUBLE PRECISION array, - dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and - dimension ( N ) if ICOMPQ = 0. - On exit, DIFR(I, 1) is the distance between I-th updated - (undeflated) singular value and the I+1-th (undeflated) old - singular value. - - If ICOMPQ = 1, DIFR(1:K,2) is an array containing the - normalizing factors for the right singular vector matrix. - - See DLASD8 for details on DIFL and DIFR. - - Z (output) DOUBLE PRECISION array, dimension ( M ) - The first elements of this array contain the components - of the deflation-adjusted updating row vector. - - K (output) INTEGER - Contains the dimension of the non-deflated matrix, - This is the order of the related secular equation. 1 <= K <=N. - - C (output) DOUBLE PRECISION - C contains garbage if SQRE =0 and the C-value of a Givens - rotation related to the right null space if SQRE = 1. - - S (output) DOUBLE PRECISION - S contains garbage if SQRE =0 and the S-value of a Givens - rotation related to the right null space if SQRE = 1. - - WORK (workspace) DOUBLE PRECISION array, dimension ( 4 * M ) - - IWORK (workspace) INTEGER array, dimension ( 3 * N ) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an singular value did not converge - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --vf; - --vl; - --idxq; - --perm; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1 * 1; - givcol -= givcol_offset; - poles_dim1 = *ldgnum; - poles_offset = 1 + poles_dim1 * 1; - poles -= poles_offset; - givnum_dim1 = *ldgnum; - givnum_offset = 1 + givnum_dim1 * 1; - givnum -= givnum_offset; - --difl; - --difr; - --z__; - --work; - --iwork; - - /* Function Body */ - *info = 0; - n = *nl + *nr + 1; - m = n + *sqre; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*nl < 1) { - *info = -2; - } else if (*nr < 1) { - *info = -3; - } else if (*sqre < 0 || *sqre > 1) { - *info = -4; - } else if (*ldgcol < n) { - *info = -14; - } else if (*ldgnum < n) { - *info = -16; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASD6", &i__1); - return 0; - } - -/* - The following values are for bookkeeping purposes only. They are - integer pointers which indicate the portion of the workspace - used by a particular array in DLASD7 and DLASD8. -*/ - - isigma = 1; - iw = isigma + n; - ivfw = iw + m; - ivlw = ivfw + m; - - idx = 1; - idxc = idx + n; - idxp = idxc + n; - -/* - Scale. - - Computing MAX -*/ - d__1 = abs(*alpha), d__2 = abs(*beta); - orgnrm = max(d__1,d__2); - d__[*nl + 1] = 0.; - i__1 = n; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { - orgnrm = (d__1 = d__[i__], abs(d__1)); - } -/* L10: */ - } - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info); - *alpha /= orgnrm; - *beta /= orgnrm; - -/* Sort and Deflate singular values. */ - - dlasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], & - work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], & - iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[ - givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s, - info); - -/* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */ - - dlasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1], - ldgnum, &work[isigma], &work[iw], info); - -/* Save the poles if ICOMPQ = 1. */ - - if (*icompq == 1) { - dcopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1); - dcopy_(k, &work[isigma], &c__1, &poles[((poles_dim1) << (1)) + 1], & - c__1); - } - -/* Unscale. */ - - dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info); - -/* Prepare the IDXQ sorting permutation. */ - - n1 = *k; - n2 = n - *k; - dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]); - - return 0; - -/* End of DLASD6 */ - -} /* dlasd6_ */ - -/* Subroutine */ int dlasd7_(integer *icompq, integer *nl, integer *nr, - integer *sqre, integer *k, doublereal *d__, doublereal *z__, - doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl, - doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal * - dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm, - integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, - integer *ldgnum, doublereal *c__, doublereal *s, integer *info) -{ - /* System generated locals */ - integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1; - doublereal d__1, d__2; - - /* Local variables */ - static integer i__, j, m, n, k2; - static doublereal z1; - static integer jp; - static doublereal eps, tau, tol; - static integer nlp1, nlp2, idxi, idxj; - extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *); - static integer idxjp; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer jprev; - - extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, - integer *, integer *, integer *), xerbla_(char *, integer *); - static doublereal hlftol; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - June 30, 1999 - - - Purpose - ======= - - DLASD7 merges the two sets of singular values together into a single - sorted set. Then it tries to deflate the size of the problem. There - are two ways in which deflation can occur: when two or more singular - values are close together or if there is a tiny entry in the Z - vector. For each such occurrence the order of the related - secular equation problem is reduced by one. - - DLASD7 is called from DLASD6. - - Arguments - ========= - - ICOMPQ (input) INTEGER - Specifies whether singular vectors are to be computed - in compact form, as follows: - = 0: Compute singular values only. - = 1: Compute singular vectors of upper - bidiagonal matrix in compact form. - - NL (input) INTEGER - The row dimension of the upper block. NL >= 1. - - NR (input) INTEGER - The row dimension of the lower block. NR >= 1. - - SQRE (input) INTEGER - = 0: the lower block is an NR-by-NR square matrix. - = 1: the lower block is an NR-by-(NR+1) rectangular matrix. - - The bidiagonal matrix has - N = NL + NR + 1 rows and - M = N + SQRE >= N columns. - - K (output) INTEGER - Contains the dimension of the non-deflated matrix, this is - the order of the related secular equation. 1 <= K <=N. - - D (input/output) DOUBLE PRECISION array, dimension ( N ) - On entry D contains the singular values of the two submatrices - to be combined. On exit D contains the trailing (N-K) updated - singular values (those which were deflated) sorted into - increasing order. - - Z (output) DOUBLE PRECISION array, dimension ( M ) - On exit Z contains the updating row vector in the secular - equation. - - ZW (workspace) DOUBLE PRECISION array, dimension ( M ) - Workspace for Z. - - VF (input/output) DOUBLE PRECISION array, dimension ( M ) - On entry, VF(1:NL+1) contains the first components of all - right singular vectors of the upper block; and VF(NL+2:M) - contains the first components of all right singular vectors - of the lower block. On exit, VF contains the first components - of all right singular vectors of the bidiagonal matrix. - - VFW (workspace) DOUBLE PRECISION array, dimension ( M ) - Workspace for VF. - - VL (input/output) DOUBLE PRECISION array, dimension ( M ) - On entry, VL(1:NL+1) contains the last components of all - right singular vectors of the upper block; and VL(NL+2:M) - contains the last components of all right singular vectors - of the lower block. On exit, VL contains the last components - of all right singular vectors of the bidiagonal matrix. - - VLW (workspace) DOUBLE PRECISION array, dimension ( M ) - Workspace for VL. - - ALPHA (input) DOUBLE PRECISION - Contains the diagonal element associated with the added row. - - BETA (input) DOUBLE PRECISION - Contains the off-diagonal element associated with the added - row. - - DSIGMA (output) DOUBLE PRECISION array, dimension ( N ) - Contains a copy of the diagonal elements (K-1 singular values - and one zero) in the secular equation. - - IDX (workspace) INTEGER array, dimension ( N ) - This will contain the permutation used to sort the contents of - D into ascending order. - - IDXP (workspace) INTEGER array, dimension ( N ) - This will contain the permutation used to place deflated - values of D at the end of the array. On output IDXP(2:K) - points to the nondeflated D-values and IDXP(K+1:N) - points to the deflated singular values. - - IDXQ (input) INTEGER array, dimension ( N ) - This contains the permutation which separately sorts the two - sub-problems in D into ascending order. Note that entries in - the first half of this permutation must first be moved one - position backward; and entries in the second half - must first have NL+1 added to their values. - - PERM (output) INTEGER array, dimension ( N ) - The permutations (from deflation and sorting) to be applied - to each singular block. Not referenced if ICOMPQ = 0. - - GIVPTR (output) INTEGER - The number of Givens rotations which took place in this - subproblem. Not referenced if ICOMPQ = 0. - - GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) - Each pair of numbers indicates a pair of columns to take place - in a Givens rotation. Not referenced if ICOMPQ = 0. - - LDGCOL (input) INTEGER - The leading dimension of GIVCOL, must be at least N. - - GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) - Each number indicates the C or S value to be used in the - corresponding Givens rotation. Not referenced if ICOMPQ = 0. - - LDGNUM (input) INTEGER - The leading dimension of GIVNUM, must be at least N. - - C (output) DOUBLE PRECISION - C contains garbage if SQRE =0 and the C-value of a Givens - rotation related to the right null space if SQRE = 1. - - S (output) DOUBLE PRECISION - S contains garbage if SQRE =0 and the S-value of a Givens - rotation related to the right null space if SQRE = 1. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --z__; - --zw; - --vf; - --vfw; - --vl; - --vlw; - --dsigma; - --idx; - --idxp; - --idxq; - --perm; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1 * 1; - givcol -= givcol_offset; - givnum_dim1 = *ldgnum; - givnum_offset = 1 + givnum_dim1 * 1; - givnum -= givnum_offset; - - /* Function Body */ - *info = 0; - n = *nl + *nr + 1; - m = n + *sqre; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*nl < 1) { - *info = -2; - } else if (*nr < 1) { - *info = -3; - } else if (*sqre < 0 || *sqre > 1) { - *info = -4; - } else if (*ldgcol < n) { - *info = -22; - } else if (*ldgnum < n) { - *info = -24; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASD7", &i__1); - return 0; - } - - nlp1 = *nl + 1; - nlp2 = *nl + 2; - if (*icompq == 1) { - *givptr = 0; - } - -/* - Generate the first part of the vector Z and move the singular - values in the first part of D one position backward. -*/ - - z1 = *alpha * vl[nlp1]; - vl[nlp1] = 0.; - tau = vf[nlp1]; - for (i__ = *nl; i__ >= 1; --i__) { - z__[i__ + 1] = *alpha * vl[i__]; - vl[i__] = 0.; - vf[i__ + 1] = vf[i__]; - d__[i__ + 1] = d__[i__]; - idxq[i__ + 1] = idxq[i__] + 1; -/* L10: */ - } - vf[1] = tau; - -/* Generate the second part of the vector Z. */ - - i__1 = m; - for (i__ = nlp2; i__ <= i__1; ++i__) { - z__[i__] = *beta * vf[i__]; - vf[i__] = 0.; -/* L20: */ - } - -/* Sort the singular values into increasing order */ - - i__1 = n; - for (i__ = nlp2; i__ <= i__1; ++i__) { - idxq[i__] += nlp1; -/* L30: */ - } - -/* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */ - - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - dsigma[i__] = d__[idxq[i__]]; - zw[i__] = z__[idxq[i__]]; - vfw[i__] = vf[idxq[i__]]; - vlw[i__] = vl[idxq[i__]]; -/* L40: */ - } - - dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]); - - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - idxi = idx[i__] + 1; - d__[i__] = dsigma[idxi]; - z__[i__] = zw[idxi]; - vf[i__] = vfw[idxi]; - vl[i__] = vlw[idxi]; -/* L50: */ - } - -/* Calculate the allowable deflation tolerence */ - - eps = EPSILON; -/* Computing MAX */ - d__1 = abs(*alpha), d__2 = abs(*beta); - tol = max(d__1,d__2); -/* Computing MAX */ - d__2 = (d__1 = d__[n], abs(d__1)); - tol = eps * 64. * max(d__2,tol); - -/* - There are 2 kinds of deflation -- first a value in the z-vector - is small, second two (or more) singular values are very close - together (their difference is small). - - If the value in the z-vector is small, we simply permute the - array so that the corresponding singular value is moved to the - end. - - If two values in the D-vector are close, we perform a two-sided - rotation designed to make one of the corresponding z-vector - entries zero, and then permute the array so that the deflated - singular value is moved to the end. - - If there are multiple singular values then the problem deflates. - Here the number of equal singular values are found. As each equal - singular value is found, an elementary reflector is computed to - rotate the corresponding singular subspace so that the - corresponding components of Z are zero in this new basis. -*/ - - *k = 1; - k2 = n + 1; - i__1 = n; - for (j = 2; j <= i__1; ++j) { - if ((d__1 = z__[j], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - idxp[k2] = j; - if (j == n) { - goto L100; - } - } else { - jprev = j; - goto L70; - } -/* L60: */ - } -L70: - j = jprev; -L80: - ++j; - if (j > n) { - goto L90; - } - if ((d__1 = z__[j], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - idxp[k2] = j; - } else { - -/* Check if singular values are close enough to allow deflation. */ - - if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { - -/* Deflation is possible. */ - - *s = z__[jprev]; - *c__ = z__[j]; - -/* - Find sqrt(a**2+b**2) without overflow or - destructive underflow. -*/ - - tau = dlapy2_(c__, s); - z__[j] = tau; - z__[jprev] = 0.; - *c__ /= tau; - *s = -(*s) / tau; - -/* Record the appropriate Givens rotation */ - - if (*icompq == 1) { - ++(*givptr); - idxjp = idxq[idx[jprev] + 1]; - idxj = idxq[idx[j] + 1]; - if (idxjp <= nlp1) { - --idxjp; - } - if (idxj <= nlp1) { - --idxj; - } - givcol[*givptr + ((givcol_dim1) << (1))] = idxjp; - givcol[*givptr + givcol_dim1] = idxj; - givnum[*givptr + ((givnum_dim1) << (1))] = *c__; - givnum[*givptr + givnum_dim1] = *s; - } - drot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s); - drot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s); - --k2; - idxp[k2] = jprev; - jprev = j; - } else { - ++(*k); - zw[*k] = z__[jprev]; - dsigma[*k] = d__[jprev]; - idxp[*k] = jprev; - jprev = j; - } - } - goto L80; -L90: - -/* Record the last singular value. */ - - ++(*k); - zw[*k] = z__[jprev]; - dsigma[*k] = d__[jprev]; - idxp[*k] = jprev; - -L100: - -/* - Sort the singular values into DSIGMA. The singular values which - were not deflated go into the first K slots of DSIGMA, except - that DSIGMA(1) is treated separately. -*/ - - i__1 = n; - for (j = 2; j <= i__1; ++j) { - jp = idxp[j]; - dsigma[j] = d__[jp]; - vfw[j] = vf[jp]; - vlw[j] = vl[jp]; -/* L110: */ - } - if (*icompq == 1) { - i__1 = n; - for (j = 2; j <= i__1; ++j) { - jp = idxp[j]; - perm[j] = idxq[idx[jp] + 1]; - if (perm[j] <= nlp1) { - --perm[j]; - } -/* L120: */ - } - } - -/* - The deflated singular values go back into the last N - K slots of - D. -*/ - - i__1 = n - *k; - dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1); - -/* - Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and - VL(M). -*/ - - dsigma[1] = 0.; - hlftol = tol / 2.; - if (abs(dsigma[2]) <= hlftol) { - dsigma[2] = hlftol; - } - if (m > n) { - z__[1] = dlapy2_(&z1, &z__[m]); - if (z__[1] <= tol) { - *c__ = 1.; - *s = 0.; - z__[1] = tol; - } else { - *c__ = z1 / z__[1]; - *s = -z__[m] / z__[1]; - } - drot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s); - drot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s); - } else { - if (abs(z1) <= tol) { - z__[1] = tol; - } else { - z__[1] = z1; - } - } - -/* Restore Z, VF, and VL. */ - - i__1 = *k - 1; - dcopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1); - i__1 = n - 1; - dcopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1); - i__1 = n - 1; - dcopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1); - - return 0; - -/* End of DLASD7 */ - -} /* dlasd7_ */ - -/* Subroutine */ int dlasd8_(integer *icompq, integer *k, doublereal *d__, - doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl, - doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal * - work, integer *info) -{ - /* System generated locals */ - integer difr_dim1, difr_offset, i__1, i__2; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static integer i__, j; - static doublereal dj, rho; - static integer iwk1, iwk2, iwk3; - extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, - integer *); - static doublereal temp; - extern doublereal dnrm2_(integer *, doublereal *, integer *); - static integer iwk2i, iwk3i; - static doublereal diflj, difrj, dsigj; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - extern doublereal dlamc3_(doublereal *, doublereal *); - extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *), dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dlaset_(char *, integer *, integer - *, doublereal *, doublereal *, doublereal *, integer *), - xerbla_(char *, integer *); - static doublereal dsigjp; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - June 30, 1999 - - - Purpose - ======= - - DLASD8 finds the square roots of the roots of the secular equation, - as defined by the values in DSIGMA and Z. It makes the appropriate - calls to DLASD4, and stores, for each element in D, the distance - to its two nearest poles (elements in DSIGMA). It also updates - the arrays VF and VL, the first and last components of all the - right singular vectors of the original bidiagonal matrix. - - DLASD8 is called from DLASD6. - - Arguments - ========= - - ICOMPQ (input) INTEGER - Specifies whether singular vectors are to be computed in - factored form in the calling routine: - = 0: Compute singular values only. - = 1: Compute singular vectors in factored form as well. - - K (input) INTEGER - The number of terms in the rational function to be solved - by DLASD4. K >= 1. - - D (output) DOUBLE PRECISION array, dimension ( K ) - On output, D contains the updated singular values. - - Z (input) DOUBLE PRECISION array, dimension ( K ) - The first K elements of this array contain the components - of the deflation-adjusted updating row vector. - - VF (input/output) DOUBLE PRECISION array, dimension ( K ) - On entry, VF contains information passed through DBEDE8. - On exit, VF contains the first K components of the first - components of all right singular vectors of the bidiagonal - matrix. - - VL (input/output) DOUBLE PRECISION array, dimension ( K ) - On entry, VL contains information passed through DBEDE8. - On exit, VL contains the first K components of the last - components of all right singular vectors of the bidiagonal - matrix. - - DIFL (output) DOUBLE PRECISION array, dimension ( K ) - On exit, DIFL(I) = D(I) - DSIGMA(I). - - DIFR (output) DOUBLE PRECISION array, - dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and - dimension ( K ) if ICOMPQ = 0. - On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not - defined and will not be referenced. - - If ICOMPQ = 1, DIFR(1:K,2) is an array containing the - normalizing factors for the right singular vector matrix. - - LDDIFR (input) INTEGER - The leading dimension of DIFR, must be at least K. - - DSIGMA (input) DOUBLE PRECISION array, dimension ( K ) - The first K elements of this array contain the old roots - of the deflated updating problem. These are the poles - of the secular equation. - - WORK (workspace) DOUBLE PRECISION array, dimension at least 3 * K - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an singular value did not converge - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --z__; - --vf; - --vl; - --difl; - difr_dim1 = *lddifr; - difr_offset = 1 + difr_dim1 * 1; - difr -= difr_offset; - --dsigma; - --work; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*k < 1) { - *info = -2; - } else if (*lddifr < *k) { - *info = -9; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASD8", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*k == 1) { - d__[1] = abs(z__[1]); - difl[1] = d__[1]; - if (*icompq == 1) { - difl[2] = 1.; - difr[((difr_dim1) << (1)) + 1] = 1.; - } - return 0; - } - -/* - Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can - be computed with high relative accuracy (barring over/underflow). - This is a problem on machines without a guard digit in - add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). - The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), - which on any of these machines zeros out the bottommost - bit of DSIGMA(I) if it is 1; this makes the subsequent - subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation - occurs. On binary machines with a guard digit (almost all - machines) it does not change DSIGMA(I) at all. On hexadecimal - and decimal machines with a guard digit, it slightly - changes the bottommost bits of DSIGMA(I). It does not account - for hexadecimal or decimal machines without guard digits - (we know of none). We use a subroutine call to compute - 2*DLAMBDA(I) to prevent optimizing compilers from eliminating - this code. -*/ - - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; -/* L10: */ - } - -/* Book keeping. */ - - iwk1 = 1; - iwk2 = iwk1 + *k; - iwk3 = iwk2 + *k; - iwk2i = iwk2 - 1; - iwk3i = iwk3 - 1; - -/* Normalize Z. */ - - rho = dnrm2_(k, &z__[1], &c__1); - dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info); - rho *= rho; - -/* Initialize WORK(IWK3). */ - - dlaset_("A", k, &c__1, &c_b15, &c_b15, &work[iwk3], k); - -/* - Compute the updated singular values, the arrays DIFL, DIFR, - and the updated Z. -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dlasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[ - iwk2], info); - -/* If the root finder fails, the computation is terminated. */ - - if (*info != 0) { - return 0; - } - work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j]; - difl[j] = -work[j]; - difr[j + difr_dim1] = -work[j + 1]; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + - i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ - j]); -/* L20: */ - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + - i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ - j]); -/* L30: */ - } -/* L40: */ - } - -/* Compute updated Z. */ - - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1))); - z__[i__] = d_sign(&d__2, &z__[i__]); -/* L50: */ - } - -/* Update VF and VL. */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - diflj = difl[j]; - dj = d__[j]; - dsigj = -dsigma[j]; - if (j < *k) { - difrj = -difr[j + difr_dim1]; - dsigjp = -dsigma[j + 1]; - } - work[j] = -z__[j] / diflj / (dsigma[j] + dj); - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / ( - dsigma[i__] + dj); -/* L60: */ - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) / - (dsigma[i__] + dj); -/* L70: */ - } - temp = dnrm2_(k, &work[1], &c__1); - work[iwk2i + j] = ddot_(k, &work[1], &c__1, &vf[1], &c__1) / temp; - work[iwk3i + j] = ddot_(k, &work[1], &c__1, &vl[1], &c__1) / temp; - if (*icompq == 1) { - difr[j + ((difr_dim1) << (1))] = temp; - } -/* L80: */ - } - - dcopy_(k, &work[iwk2], &c__1, &vf[1], &c__1); - dcopy_(k, &work[iwk3], &c__1, &vl[1], &c__1); - - return 0; - -/* End of DLASD8 */ - -} /* dlasd8_ */ - -/* Subroutine */ int dlasda_(integer *icompq, integer *smlsiz, integer *n, - integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer - *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr, - doublereal *z__, doublereal *poles, integer *givptr, integer *givcol, - integer *ldgcol, integer *perm, doublereal *givnum, doublereal *c__, - doublereal *s, doublereal *work, integer *iwork, integer *info) -{ - /* System generated locals */ - integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1, - difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, - poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset, - z_dim1, z_offset, i__1, i__2; - - /* Builtin functions */ - integer pow_ii(integer *, integer *); - - /* Local variables */ - static integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc, - nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1; - static doublereal beta; - static integer idxq, nlvl; - static doublereal alpha; - static integer inode, ndiml, ndimr, idxqi, itemp; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer sqrei; - extern /* Subroutine */ int dlasd6_(integer *, integer *, integer *, - integer *, doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *, integer *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, doublereal *, - doublereal *, integer *, integer *); - static integer nwork1, nwork2; - extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer - *, integer *, integer *, doublereal *, doublereal *, doublereal *, - integer *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *), dlasdt_(integer *, integer *, - integer *, integer *, integer *, integer *, integer *), dlaset_( - char *, integer *, integer *, doublereal *, doublereal *, - doublereal *, integer *), xerbla_(char *, integer *); - static integer smlszp; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - Using a divide and conquer approach, DLASDA computes the singular - value decomposition (SVD) of a real upper bidiagonal N-by-M matrix - B with diagonal D and offdiagonal E, where M = N + SQRE. The - algorithm computes the singular values in the SVD B = U * S * VT. - The orthogonal matrices U and VT are optionally computed in - compact form. - - A related subroutine, DLASD0, computes the singular values and - the singular vectors in explicit form. - - Arguments - ========= - - ICOMPQ (input) INTEGER - Specifies whether singular vectors are to be computed - in compact form, as follows - = 0: Compute singular values only. - = 1: Compute singular vectors of upper bidiagonal - matrix in compact form. - - SMLSIZ (input) INTEGER - The maximum size of the subproblems at the bottom of the - computation tree. - - N (input) INTEGER - The row dimension of the upper bidiagonal matrix. This is - also the dimension of the main diagonal array D. - - SQRE (input) INTEGER - Specifies the column dimension of the bidiagonal matrix. - = 0: The bidiagonal matrix has column dimension M = N; - = 1: The bidiagonal matrix has column dimension M = N + 1. - - D (input/output) DOUBLE PRECISION array, dimension ( N ) - On entry D contains the main diagonal of the bidiagonal - matrix. On exit D, if INFO = 0, contains its singular values. - - E (input) DOUBLE PRECISION array, dimension ( M-1 ) - Contains the subdiagonal entries of the bidiagonal matrix. - On exit, E has been destroyed. - - U (output) DOUBLE PRECISION array, - dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced - if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left - singular vector matrices of all subproblems at the bottom - level. - - LDU (input) INTEGER, LDU = > N. - The leading dimension of arrays U, VT, DIFL, DIFR, POLES, - GIVNUM, and Z. - - VT (output) DOUBLE PRECISION array, - dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced - if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right - singular vector matrices of all subproblems at the bottom - level. - - K (output) INTEGER array, - dimension ( N ) if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0. - If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th - secular equation on the computation tree. - - DIFL (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ), - where NLVL = floor(log_2 (N/SMLSIZ))). - - DIFR (output) DOUBLE PRECISION array, - dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and - dimension ( N ) if ICOMPQ = 0. - If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1) - record distances between singular values on the I-th - level and singular values on the (I -1)-th level, and - DIFR(1:N, 2 * I ) contains the normalizing factors for - the right singular vector matrix. See DLASD8 for details. - - Z (output) DOUBLE PRECISION array, - dimension ( LDU, NLVL ) if ICOMPQ = 1 and - dimension ( N ) if ICOMPQ = 0. - The first K elements of Z(1, I) contain the components of - the deflation-adjusted updating row vector for subproblems - on the I-th level. - - POLES (output) DOUBLE PRECISION array, - dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced - if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and - POLES(1, 2*I) contain the new and old singular values - involved in the secular equations on the I-th level. - - GIVPTR (output) INTEGER array, - dimension ( N ) if ICOMPQ = 1, and not referenced if - ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records - the number of Givens rotations performed on the I-th - problem on the computation tree. - - GIVCOL (output) INTEGER array, - dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not - referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, - GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations - of Givens rotations performed on the I-th level on the - computation tree. - - LDGCOL (input) INTEGER, LDGCOL = > N. - The leading dimension of arrays GIVCOL and PERM. - - PERM (output) INTEGER array, - dimension ( LDGCOL, NLVL ) if ICOMPQ = 1, and not referenced - if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records - permutations done on the I-th level of the computation tree. - - GIVNUM (output) DOUBLE PRECISION array, - dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not - referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, - GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S- - values of Givens rotations performed on the I-th level on - the computation tree. - - C (output) DOUBLE PRECISION array, - dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. - If ICOMPQ = 1 and the I-th subproblem is not square, on exit, - C( I ) contains the C-value of a Givens rotation related to - the right null space of the I-th subproblem. - - S (output) DOUBLE PRECISION array, dimension ( N ) if - ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1 - and the I-th subproblem is not square, on exit, S( I ) - contains the S-value of a Givens rotation related to - the right null space of the I-th subproblem. - - WORK (workspace) DOUBLE PRECISION array, dimension - (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)). - - IWORK (workspace) INTEGER array. - Dimension must be at least (7 * N). - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an singular value did not converge - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - givnum_dim1 = *ldu; - givnum_offset = 1 + givnum_dim1 * 1; - givnum -= givnum_offset; - poles_dim1 = *ldu; - poles_offset = 1 + poles_dim1 * 1; - poles -= poles_offset; - z_dim1 = *ldu; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - difr_dim1 = *ldu; - difr_offset = 1 + difr_dim1 * 1; - difr -= difr_offset; - difl_dim1 = *ldu; - difl_offset = 1 + difl_dim1 * 1; - difl -= difl_offset; - vt_dim1 = *ldu; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - --k; - --givptr; - perm_dim1 = *ldgcol; - perm_offset = 1 + perm_dim1 * 1; - perm -= perm_offset; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1 * 1; - givcol -= givcol_offset; - --c__; - --s; - --work; - --iwork; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*smlsiz < 3) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*sqre < 0 || *sqre > 1) { - *info = -4; - } else if (*ldu < *n + *sqre) { - *info = -8; - } else if (*ldgcol < *n) { - *info = -17; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASDA", &i__1); - return 0; - } - - m = *n + *sqre; - -/* If the input matrix is too small, call DLASDQ to find the SVD. */ - - if (*n <= *smlsiz) { - if (*icompq == 0) { - dlasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[ - vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, & - work[1], info); - } else { - dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset] - , ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], - info); - } - return 0; - } - -/* Book-keeping and set up the computation tree. */ - - inode = 1; - ndiml = inode + *n; - ndimr = ndiml + *n; - idxq = ndimr + *n; - iwk = idxq + *n; - - ncc = 0; - nru = 0; - - smlszp = *smlsiz + 1; - vf = 1; - vl = vf + m; - nwork1 = vl + m; - nwork2 = nwork1 + smlszp * smlszp; - - dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], - smlsiz); - -/* - for the nodes on bottom level of the tree, solve - their subproblems by DLASDQ. -*/ - - ndb1 = (nd + 1) / 2; - i__1 = nd; - for (i__ = ndb1; i__ <= i__1; ++i__) { - -/* - IC : center row of each node - NL : number of rows of left subproblem - NR : number of rows of right subproblem - NLF: starting row of the left subproblem - NRF: starting row of the right subproblem -*/ - - i1 = i__ - 1; - ic = iwork[inode + i1]; - nl = iwork[ndiml + i1]; - nlp1 = nl + 1; - nr = iwork[ndimr + i1]; - nlf = ic - nl; - nrf = ic + 1; - idxqi = idxq + nlf - 2; - vfi = vf + nlf - 1; - vli = vl + nlf - 1; - sqrei = 1; - if (*icompq == 0) { - dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &work[nwork1], &smlszp); - dlasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], & - work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2], - &nl, &work[nwork2], info); - itemp = nwork1 + nl * smlszp; - dcopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1); - dcopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1); - } else { - dlaset_("A", &nl, &nl, &c_b29, &c_b15, &u[nlf + u_dim1], ldu); - dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &vt[nlf + vt_dim1], - ldu); - dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], & - vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf + - u_dim1], ldu, &work[nwork1], info); - dcopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1); - dcopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1) - ; - } - if (*info != 0) { - return 0; - } - i__2 = nl; - for (j = 1; j <= i__2; ++j) { - iwork[idxqi + j] = j; -/* L10: */ - } - if ((i__ == nd && *sqre == 0)) { - sqrei = 0; - } else { - sqrei = 1; - } - idxqi += nlp1; - vfi += nlp1; - vli += nlp1; - nrp1 = nr + sqrei; - if (*icompq == 0) { - dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &work[nwork1], &smlszp); - dlasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], & - work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2], - &nr, &work[nwork2], info); - itemp = nwork1 + (nrp1 - 1) * smlszp; - dcopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1); - dcopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1); - } else { - dlaset_("A", &nr, &nr, &c_b29, &c_b15, &u[nrf + u_dim1], ldu); - dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &vt[nrf + vt_dim1], - ldu); - dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], & - vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf + - u_dim1], ldu, &work[nwork1], info); - dcopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1); - dcopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1) - ; - } - if (*info != 0) { - return 0; - } - i__2 = nr; - for (j = 1; j <= i__2; ++j) { - iwork[idxqi + j] = j; -/* L20: */ - } -/* L30: */ - } - -/* Now conquer each subproblem bottom-up. */ - - j = pow_ii(&c__2, &nlvl); - for (lvl = nlvl; lvl >= 1; --lvl) { - lvl2 = ((lvl) << (1)) - 1; - -/* - Find the first node LF and last node LL on - the current level LVL. -*/ - - if (lvl == 1) { - lf = 1; - ll = 1; - } else { - i__1 = lvl - 1; - lf = pow_ii(&c__2, &i__1); - ll = ((lf) << (1)) - 1; - } - i__1 = ll; - for (i__ = lf; i__ <= i__1; ++i__) { - im1 = i__ - 1; - ic = iwork[inode + im1]; - nl = iwork[ndiml + im1]; - nr = iwork[ndimr + im1]; - nlf = ic - nl; - nrf = ic + 1; - if (i__ == ll) { - sqrei = *sqre; - } else { - sqrei = 1; - } - vfi = vf + nlf - 1; - vli = vl + nlf - 1; - idxqi = idxq + nlf - 1; - alpha = d__[ic]; - beta = e[ic]; - if (*icompq == 0) { - dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], & - work[vli], &alpha, &beta, &iwork[idxqi], &perm[ - perm_offset], &givptr[1], &givcol[givcol_offset], - ldgcol, &givnum[givnum_offset], ldu, &poles[ - poles_offset], &difl[difl_offset], &difr[difr_offset], - &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1], - &iwork[iwk], info); - } else { - --j; - dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], & - work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf + - lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 * - givcol_dim1], ldgcol, &givnum[nlf + lvl2 * - givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], & - difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 * - difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j], - &s[j], &work[nwork1], &iwork[iwk], info); - } - if (*info != 0) { - return 0; - } -/* L40: */ - } -/* L50: */ - } - - return 0; - -/* End of DLASDA */ - -} /* dlasda_ */ - -/* Subroutine */ int dlasdq_(char *uplo, integer *sqre, integer *n, integer * - ncvt, integer *nru, integer *ncc, doublereal *d__, doublereal *e, - doublereal *vt, integer *ldvt, doublereal *u, integer *ldu, - doublereal *c__, integer *ldc, doublereal *work, integer *info) -{ - /* System generated locals */ - integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, - i__2; - - /* Local variables */ - static integer i__, j; - static doublereal r__, cs, sn; - static integer np1, isub; - static doublereal smin; - static integer sqre1; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *), dswap_(integer *, doublereal *, integer * - , doublereal *, integer *); - static integer iuplo; - extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *), xerbla_(char *, - integer *), dbdsqr_(char *, integer *, integer *, integer - *, integer *, doublereal *, doublereal *, doublereal *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *); - static logical rotate; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DLASDQ computes the singular value decomposition (SVD) of a real - (upper or lower) bidiagonal matrix with diagonal D and offdiagonal - E, accumulating the transformations if desired. Letting B denote - the input bidiagonal matrix, the algorithm computes orthogonal - matrices Q and P such that B = Q * S * P' (P' denotes the transpose - of P). The singular values S are overwritten on D. - - The input matrix U is changed to U * Q if desired. - The input matrix VT is changed to P' * VT if desired. - The input matrix C is changed to Q' * C if desired. - - See "Computing Small Singular Values of Bidiagonal Matrices With - Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, - LAPACK Working Note #3, for a detailed description of the algorithm. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - On entry, UPLO specifies whether the input bidiagonal matrix - is upper or lower bidiagonal, and wether it is square are - not. - UPLO = 'U' or 'u' B is upper bidiagonal. - UPLO = 'L' or 'l' B is lower bidiagonal. - - SQRE (input) INTEGER - = 0: then the input matrix is N-by-N. - = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and - (N+1)-by-N if UPLU = 'L'. - - The bidiagonal matrix has - N = NL + NR + 1 rows and - M = N + SQRE >= N columns. - - N (input) INTEGER - On entry, N specifies the number of rows and columns - in the matrix. N must be at least 0. - - NCVT (input) INTEGER - On entry, NCVT specifies the number of columns of - the matrix VT. NCVT must be at least 0. - - NRU (input) INTEGER - On entry, NRU specifies the number of rows of - the matrix U. NRU must be at least 0. - - NCC (input) INTEGER - On entry, NCC specifies the number of columns of - the matrix C. NCC must be at least 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, D contains the diagonal entries of the - bidiagonal matrix whose SVD is desired. On normal exit, - D contains the singular values in ascending order. - - E (input/output) DOUBLE PRECISION array. - dimension is (N-1) if SQRE = 0 and N if SQRE = 1. - On entry, the entries of E contain the offdiagonal entries - of the bidiagonal matrix whose SVD is desired. On normal - exit, E will contain 0. If the algorithm does not converge, - D and E will contain the diagonal and superdiagonal entries - of a bidiagonal matrix orthogonally equivalent to the one - given as input. - - VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) - On entry, contains a matrix which on exit has been - premultiplied by P', dimension N-by-NCVT if SQRE = 0 - and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0). - - LDVT (input) INTEGER - On entry, LDVT specifies the leading dimension of VT as - declared in the calling (sub) program. LDVT must be at - least 1. If NCVT is nonzero LDVT must also be at least N. - - U (input/output) DOUBLE PRECISION array, dimension (LDU, N) - On entry, contains a matrix which on exit has been - postmultiplied by Q, dimension NRU-by-N if SQRE = 0 - and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0). - - LDU (input) INTEGER - On entry, LDU specifies the leading dimension of U as - declared in the calling (sub) program. LDU must be at - least max( 1, NRU ) . - - C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) - On entry, contains an N-by-NCC matrix which on exit - has been premultiplied by Q' dimension N-by-NCC if SQRE = 0 - and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0). - - LDC (input) INTEGER - On entry, LDC specifies the leading dimension of C as - declared in the calling (sub) program. LDC must be at - least 1. If NCC is nonzero, LDC must also be at least N. - - WORK (workspace) DOUBLE PRECISION array, dimension (4*N) - Workspace. Only referenced if one of NCVT, NRU, or NCC is - nonzero, and if N is at least 2. - - INFO (output) INTEGER - On exit, a value of 0 indicates a successful exit. - If INFO < 0, argument number -INFO is illegal. - If INFO > 0, the algorithm did not converge, and INFO - specifies how many superdiagonals did not converge. - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - iuplo = 0; - if (lsame_(uplo, "U")) { - iuplo = 1; - } - if (lsame_(uplo, "L")) { - iuplo = 2; - } - if (iuplo == 0) { - *info = -1; - } else if (*sqre < 0 || *sqre > 1) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*ncvt < 0) { - *info = -4; - } else if (*nru < 0) { - *info = -5; - } else if (*ncc < 0) { - *info = -6; - } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n))) - { - *info = -10; - } else if (*ldu < max(1,*nru)) { - *info = -12; - } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) { - *info = -14; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASDQ", &i__1); - return 0; - } - if (*n == 0) { - return 0; - } - -/* ROTATE is true if any singular vectors desired, false otherwise */ - - rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; - np1 = *n + 1; - sqre1 = *sqre; - -/* - If matrix non-square upper bidiagonal, rotate to be lower - bidiagonal. The rotations are on the right. -*/ - - if ((iuplo == 1 && sqre1 == 1)) { - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); - d__[i__] = r__; - e[i__] = sn * d__[i__ + 1]; - d__[i__ + 1] = cs * d__[i__ + 1]; - if (rotate) { - work[i__] = cs; - work[*n + i__] = sn; - } -/* L10: */ - } - dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__); - d__[*n] = r__; - e[*n] = 0.; - if (rotate) { - work[*n] = cs; - work[*n + *n] = sn; - } - iuplo = 2; - sqre1 = 0; - -/* Update singular vectors if desired. */ - - if (*ncvt > 0) { - dlasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[ - vt_offset], ldvt); - } - } - -/* - If matrix lower bidiagonal, rotate to be upper bidiagonal - by applying Givens rotations on the left. -*/ - - if (iuplo == 2) { - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); - d__[i__] = r__; - e[i__] = sn * d__[i__ + 1]; - d__[i__ + 1] = cs * d__[i__ + 1]; - if (rotate) { - work[i__] = cs; - work[*n + i__] = sn; - } -/* L20: */ - } - -/* - If matrix (N+1)-by-N lower bidiagonal, one additional - rotation is needed. -*/ - - if (sqre1 == 1) { - dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__); - d__[*n] = r__; - if (rotate) { - work[*n] = cs; - work[*n + *n] = sn; - } - } - -/* Update singular vectors if desired. */ - - if (*nru > 0) { - if (sqre1 == 0) { - dlasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[ - u_offset], ldu); - } else { - dlasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[ - u_offset], ldu); - } - } - if (*ncc > 0) { - if (sqre1 == 0) { - dlasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[ - c_offset], ldc); - } else { - dlasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[ - c_offset], ldc); - } - } - } - -/* - Call DBDSQR to compute the SVD of the reduced real - N-by-N upper bidiagonal matrix. -*/ - - dbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[ - u_offset], ldu, &c__[c_offset], ldc, &work[1], info); - -/* - Sort the singular values into ascending order (insertion sort on - singular values, but only one transposition per singular vector) -*/ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Scan for smallest D(I). */ - - isub = i__; - smin = d__[i__]; - i__2 = *n; - for (j = i__ + 1; j <= i__2; ++j) { - if (d__[j] < smin) { - isub = j; - smin = d__[j]; - } -/* L30: */ - } - if (isub != i__) { - -/* Swap singular values and vectors. */ - - d__[isub] = d__[i__]; - d__[i__] = smin; - if (*ncvt > 0) { - dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1], - ldvt); - } - if (*nru > 0) { - dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1] - , &c__1); - } - if (*ncc > 0) { - dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc) - ; - } - } -/* L40: */ - } - - return 0; - -/* End of DLASDQ */ - -} /* dlasdq_ */ - -/* Subroutine */ int dlasdt_(integer *n, integer *lvl, integer *nd, integer * - inode, integer *ndiml, integer *ndimr, integer *msub) -{ - /* System generated locals */ - integer i__1, i__2; - - /* Builtin functions */ - double log(doublereal); - - /* Local variables */ - static integer i__, il, ir, maxn; - static doublereal temp; - static integer nlvl, llst, ncrnt; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLASDT creates a tree of subproblems for bidiagonal divide and - conquer. - - Arguments - ========= - - N (input) INTEGER - On entry, the number of diagonal elements of the - bidiagonal matrix. - - LVL (output) INTEGER - On exit, the number of levels on the computation tree. - - ND (output) INTEGER - On exit, the number of nodes on the tree. - - INODE (output) INTEGER array, dimension ( N ) - On exit, centers of subproblems. - - NDIML (output) INTEGER array, dimension ( N ) - On exit, row dimensions of left children. - - NDIMR (output) INTEGER array, dimension ( N ) - On exit, row dimensions of right children. - - MSUB (input) INTEGER. - On entry, the maximum row dimension each subproblem at the - bottom of the tree can be of. - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Find the number of levels on the tree. -*/ - - /* Parameter adjustments */ - --ndimr; - --ndiml; - --inode; - - /* Function Body */ - maxn = max(1,*n); - temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.); - *lvl = (integer) temp + 1; - - i__ = *n / 2; - inode[1] = i__ + 1; - ndiml[1] = i__; - ndimr[1] = *n - i__ - 1; - il = 0; - ir = 1; - llst = 1; - i__1 = *lvl - 1; - for (nlvl = 1; nlvl <= i__1; ++nlvl) { - -/* - Constructing the tree at (NLVL+1)-st level. The number of - nodes created on this level is LLST * 2. -*/ - - i__2 = llst - 1; - for (i__ = 0; i__ <= i__2; ++i__) { - il += 2; - ir += 2; - ncrnt = llst + i__; - ndiml[il] = ndiml[ncrnt] / 2; - ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1; - inode[il] = inode[ncrnt] - ndimr[il] - 1; - ndiml[ir] = ndimr[ncrnt] / 2; - ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1; - inode[ir] = inode[ncrnt] + ndiml[ir] + 1; -/* L10: */ - } - llst <<= 1; -/* L20: */ - } - *nd = ((llst) << (1)) - 1; - - return 0; - -/* End of DLASDT */ - -} /* dlasdt_ */ - -/* Subroutine */ int dlaset_(char *uplo, integer *m, integer *n, doublereal * - alpha, doublereal *beta, doublereal *a, integer *lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j; - extern logical lsame_(char *, char *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLASET initializes an m-by-n matrix A to BETA on the diagonal and - ALPHA on the offdiagonals. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies the part of the matrix A to be set. - = 'U': Upper triangular part is set; the strictly lower - triangular part of A is not changed. - = 'L': Lower triangular part is set; the strictly upper - triangular part of A is not changed. - Otherwise: All of the matrix A is set. - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - ALPHA (input) DOUBLE PRECISION - The constant to which the offdiagonal elements are to be set. - - BETA (input) DOUBLE PRECISION - The constant to which the diagonal elements are to be set. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On exit, the leading m-by-n submatrix of A is set as follows: - - if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n, - if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n, - otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j, - - and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - if (lsame_(uplo, "U")) { - -/* - Set the strictly upper triangular or trapezoidal part of the - array to ALPHA. -*/ - - i__1 = *n; - for (j = 2; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = j - 1; - i__2 = min(i__3,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = *alpha; -/* L10: */ - } -/* L20: */ - } - - } else if (lsame_(uplo, "L")) { - -/* - Set the strictly lower triangular or trapezoidal part of the - array to ALPHA. -*/ - - i__1 = min(*m,*n); - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = j + 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = *alpha; -/* L30: */ - } -/* L40: */ - } - - } else { - -/* Set the leading m-by-n submatrix to ALPHA. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = *alpha; -/* L50: */ - } -/* L60: */ - } - } - -/* Set the first min(M,N) diagonal elements to BETA. */ - - i__1 = min(*m,*n); - for (i__ = 1; i__ <= i__1; ++i__) { - a[i__ + i__ * a_dim1] = *beta; -/* L70: */ - } - - return 0; - -/* End of DLASET */ - -} /* dlaset_ */ - -/* Subroutine */ int dlasq1_(integer *n, doublereal *d__, doublereal *e, - doublereal *work, integer *info) -{ - /* System generated locals */ - integer i__1, i__2; - doublereal d__1, d__2, d__3; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__; - static doublereal eps; - extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal - *, doublereal *, doublereal *); - static doublereal scale; - static integer iinfo; - static doublereal sigmn; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static doublereal sigmx; - extern /* Subroutine */ int dlasq2_(integer *, doublereal *, integer *); - - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *); - static doublereal safmin; - extern /* Subroutine */ int xerbla_(char *, integer *), dlasrt_( - char *, integer *, doublereal *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DLASQ1 computes the singular values of a real N-by-N bidiagonal - matrix with diagonal D and off-diagonal E. The singular values - are computed to high relative accuracy, in the absence of - denormalization, underflow and overflow. The algorithm was first - presented in - - "Accurate singular values and differential qd algorithms" by K. V. - Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230, - 1994, - - and the present implementation is described in "An implementation of - the dqds Algorithm (Positive Case)", LAPACK Working Note. - - Arguments - ========= - - N (input) INTEGER - The number of rows and columns in the matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, D contains the diagonal elements of the - bidiagonal matrix whose SVD is desired. On normal exit, - D contains the singular values in decreasing order. - - E (input/output) DOUBLE PRECISION array, dimension (N) - On entry, elements E(1:N-1) contain the off-diagonal elements - of the bidiagonal matrix whose SVD is desired. - On exit, E is overwritten. - - WORK (workspace) DOUBLE PRECISION array, dimension (4*N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: the algorithm failed - = 1, a split was marked by a positive value in E - = 2, current block of Z not diagonalized after 30*N - iterations (in inner while loop) - = 3, termination criterion of outer while loop not met - (program created more than N unreduced blocks) - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --work; - --e; - --d__; - - /* Function Body */ - *info = 0; - if (*n < 0) { - *info = -2; - i__1 = -(*info); - xerbla_("DLASQ1", &i__1); - return 0; - } else if (*n == 0) { - return 0; - } else if (*n == 1) { - d__[1] = abs(d__[1]); - return 0; - } else if (*n == 2) { - dlas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx); - d__[1] = sigmx; - d__[2] = sigmn; - return 0; - } - -/* Estimate the largest singular value. */ - - sigmx = 0.; - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - d__[i__] = (d__1 = d__[i__], abs(d__1)); -/* Computing MAX */ - d__2 = sigmx, d__3 = (d__1 = e[i__], abs(d__1)); - sigmx = max(d__2,d__3); -/* L10: */ - } - d__[*n] = (d__1 = d__[*n], abs(d__1)); - -/* Early return if SIGMX is zero (matrix is already diagonal). */ - - if (sigmx == 0.) { - dlasrt_("D", n, &d__[1], &iinfo); - return 0; - } - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__1 = sigmx, d__2 = d__[i__]; - sigmx = max(d__1,d__2); -/* L20: */ - } - -/* - Copy D and E into WORK (in the Z format) and scale (squaring the - input data makes scaling by a power of the radix pointless). -*/ - - eps = PRECISION; - safmin = SAFEMINIMUM; - scale = sqrt(eps / safmin); - dcopy_(n, &d__[1], &c__1, &work[1], &c__2); - i__1 = *n - 1; - dcopy_(&i__1, &e[1], &c__1, &work[2], &c__2); - i__1 = ((*n) << (1)) - 1; - i__2 = ((*n) << (1)) - 1; - dlascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2, - &iinfo); - -/* Compute the q's and e's. */ - - i__1 = ((*n) << (1)) - 1; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing 2nd power */ - d__1 = work[i__]; - work[i__] = d__1 * d__1; -/* L30: */ - } - work[*n * 2] = 0.; - - dlasq2_(n, &work[1], info); - - if (*info == 0) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - d__[i__] = sqrt(work[i__]); -/* L40: */ - } - dlascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, & - iinfo); - } - - return 0; - -/* End of DLASQ1 */ - -} /* dlasq1_ */ - -/* Subroutine */ int dlasq2_(integer *n, doublereal *z__, integer *info) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal d__, e; - static integer k; - static doublereal s, t; - static integer i0, i4, n0, pp; - static doublereal eps, tol; - static integer ipn4; - static doublereal tol2; - static logical ieee; - static integer nbig; - static doublereal dmin__, emin, emax; - static integer ndiv, iter; - static doublereal qmin, temp, qmax, zmax; - static integer splt, nfail; - static doublereal desig, trace, sigma; - static integer iinfo; - extern /* Subroutine */ int dlasq3_(integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, doublereal *, doublereal *, - integer *, integer *, integer *, logical *); - - static integer iwhila, iwhilb; - static doublereal oldemn, safmin; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, - integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DLASQ2 computes all the eigenvalues of the symmetric positive - definite tridiagonal matrix associated with the qd array Z to high - relative accuracy are computed to high relative accuracy, in the - absence of denormalization, underflow and overflow. - - To see the relation of Z to the tridiagonal matrix, let L be a - unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and - let U be an upper bidiagonal matrix with 1's above and diagonal - Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the - symmetric tridiagonal to which it is similar. - - Note : DLASQ2 defines a logical variable, IEEE, which is true - on machines which follow ieee-754 floating-point standard in their - handling of infinities and NaNs, and false otherwise. This variable - is passed to DLASQ3. - - Arguments - ========= - - N (input) INTEGER - The number of rows and columns in the matrix. N >= 0. - - Z (workspace) DOUBLE PRECISION array, dimension ( 4*N ) - On entry Z holds the qd array. On exit, entries 1 to N hold - the eigenvalues in decreasing order, Z( 2*N+1 ) holds the - trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If - N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 ) - holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of - shifts that failed. - - INFO (output) INTEGER - = 0: successful exit - < 0: if the i-th argument is a scalar and had an illegal - value, then INFO = -i, if the i-th argument is an - array and the j-entry had an illegal value, then - INFO = -(i*100+j) - > 0: the algorithm failed - = 1, a split was marked by a positive value in E - = 2, current block of Z not diagonalized after 30*N - iterations (in inner while loop) - = 3, termination criterion of outer while loop not met - (program created more than N unreduced blocks) - - Further Details - =============== - Local Variables: I0:N0 defines a current unreduced segment of Z. - The shifts are accumulated in SIGMA. Iteration count is in ITER. - Ping-pong is controlled by PP (alternates between 0 and 1). - - ===================================================================== - - - Test the input arguments. - (in case DLASQ2 is not called by DLASQ1) -*/ - - /* Parameter adjustments */ - --z__; - - /* Function Body */ - *info = 0; - eps = PRECISION; - safmin = SAFEMINIMUM; - tol = eps * 100.; -/* Computing 2nd power */ - d__1 = tol; - tol2 = d__1 * d__1; - - if (*n < 0) { - *info = -1; - xerbla_("DLASQ2", &c__1); - return 0; - } else if (*n == 0) { - return 0; - } else if (*n == 1) { - -/* 1-by-1 case. */ - - if (z__[1] < 0.) { - *info = -201; - xerbla_("DLASQ2", &c__2); - } - return 0; - } else if (*n == 2) { - -/* 2-by-2 case. */ - - if (z__[2] < 0. || z__[3] < 0.) { - *info = -2; - xerbla_("DLASQ2", &c__2); - return 0; - } else if (z__[3] > z__[1]) { - d__ = z__[3]; - z__[3] = z__[1]; - z__[1] = d__; - } - z__[5] = z__[1] + z__[2] + z__[3]; - if (z__[2] > z__[3] * tol2) { - t = (z__[1] - z__[3] + z__[2]) * .5; - s = z__[3] * (z__[2] / t); - if (s <= t) { - s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.))); - } else { - s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s))); - } - t = z__[1] + (s + z__[2]); - z__[3] *= z__[1] / t; - z__[1] = t; - } - z__[2] = z__[3]; - z__[6] = z__[2] + z__[1]; - return 0; - } - -/* Check for negative data and compute sums of q's and e's. */ - - z__[*n * 2] = 0.; - emin = z__[2]; - qmax = 0.; - zmax = 0.; - d__ = 0.; - e = 0.; - - i__1 = (*n - 1) << (1); - for (k = 1; k <= i__1; k += 2) { - if (z__[k] < 0.) { - *info = -(k + 200); - xerbla_("DLASQ2", &c__2); - return 0; - } else if (z__[k + 1] < 0.) { - *info = -(k + 201); - xerbla_("DLASQ2", &c__2); - return 0; - } - d__ += z__[k]; - e += z__[k + 1]; -/* Computing MAX */ - d__1 = qmax, d__2 = z__[k]; - qmax = max(d__1,d__2); -/* Computing MIN */ - d__1 = emin, d__2 = z__[k + 1]; - emin = min(d__1,d__2); -/* Computing MAX */ - d__1 = max(qmax,zmax), d__2 = z__[k + 1]; - zmax = max(d__1,d__2); -/* L10: */ - } - if (z__[((*n) << (1)) - 1] < 0.) { - *info = -(((*n) << (1)) + 199); - xerbla_("DLASQ2", &c__2); - return 0; - } - d__ += z__[((*n) << (1)) - 1]; -/* Computing MAX */ - d__1 = qmax, d__2 = z__[((*n) << (1)) - 1]; - qmax = max(d__1,d__2); - zmax = max(qmax,zmax); - -/* Check for diagonality. */ - - if (e == 0.) { - i__1 = *n; - for (k = 2; k <= i__1; ++k) { - z__[k] = z__[((k) << (1)) - 1]; -/* L20: */ - } - dlasrt_("D", n, &z__[1], &iinfo); - z__[((*n) << (1)) - 1] = d__; - return 0; - } - - trace = d__ + e; - -/* Check for zero data. */ - - if (trace == 0.) { - z__[((*n) << (1)) - 1] = 0.; - return 0; - } - -/* Check whether the machine is IEEE conformable. */ - - ieee = (ilaenv_(&c__10, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, ( - ftnlen)6, (ftnlen)1) == 1 && ilaenv_(&c__11, "DLASQ2", "N", &c__1, - &c__2, &c__3, &c__4, (ftnlen)6, (ftnlen)1) == 1); - -/* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */ - - for (k = (*n) << (1); k >= 2; k += -2) { - z__[k * 2] = 0.; - z__[((k) << (1)) - 1] = z__[k]; - z__[((k) << (1)) - 2] = 0.; - z__[((k) << (1)) - 3] = z__[k - 1]; -/* L30: */ - } - - i0 = 1; - n0 = *n; - -/* Reverse the qd-array, if warranted. */ - - if (z__[((i0) << (2)) - 3] * 1.5 < z__[((n0) << (2)) - 3]) { - ipn4 = (i0 + n0) << (2); - i__1 = (i0 + n0 - 1) << (1); - for (i4 = (i0) << (2); i4 <= i__1; i4 += 4) { - temp = z__[i4 - 3]; - z__[i4 - 3] = z__[ipn4 - i4 - 3]; - z__[ipn4 - i4 - 3] = temp; - temp = z__[i4 - 1]; - z__[i4 - 1] = z__[ipn4 - i4 - 5]; - z__[ipn4 - i4 - 5] = temp; -/* L40: */ - } - } - -/* Initial split checking via dqd and Li's test. */ - - pp = 0; - - for (k = 1; k <= 2; ++k) { - - d__ = z__[((n0) << (2)) + pp - 3]; - i__1 = ((i0) << (2)) + pp; - for (i4 = ((n0 - 1) << (2)) + pp; i4 >= i__1; i4 += -4) { - if (z__[i4 - 1] <= tol2 * d__) { - z__[i4 - 1] = -0.; - d__ = z__[i4 - 3]; - } else { - d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1])); - } -/* L50: */ - } - -/* dqd maps Z to ZZ plus Li's test. */ - - emin = z__[((i0) << (2)) + pp + 1]; - d__ = z__[((i0) << (2)) + pp - 3]; - i__1 = ((n0 - 1) << (2)) + pp; - for (i4 = ((i0) << (2)) + pp; i4 <= i__1; i4 += 4) { - z__[i4 - ((pp) << (1)) - 2] = d__ + z__[i4 - 1]; - if (z__[i4 - 1] <= tol2 * d__) { - z__[i4 - 1] = -0.; - z__[i4 - ((pp) << (1)) - 2] = d__; - z__[i4 - ((pp) << (1))] = 0.; - d__ = z__[i4 + 1]; - } else if ((safmin * z__[i4 + 1] < z__[i4 - ((pp) << (1)) - 2] && - safmin * z__[i4 - ((pp) << (1)) - 2] < z__[i4 + 1])) { - temp = z__[i4 + 1] / z__[i4 - ((pp) << (1)) - 2]; - z__[i4 - ((pp) << (1))] = z__[i4 - 1] * temp; - d__ *= temp; - } else { - z__[i4 - ((pp) << (1))] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4 - - ((pp) << (1)) - 2]); - d__ = z__[i4 + 1] * (d__ / z__[i4 - ((pp) << (1)) - 2]); - } -/* Computing MIN */ - d__1 = emin, d__2 = z__[i4 - ((pp) << (1))]; - emin = min(d__1,d__2); -/* L60: */ - } - z__[((n0) << (2)) - pp - 2] = d__; - -/* Now find qmax. */ - - qmax = z__[((i0) << (2)) - pp - 2]; - i__1 = ((n0) << (2)) - pp - 2; - for (i4 = ((i0) << (2)) - pp + 2; i4 <= i__1; i4 += 4) { -/* Computing MAX */ - d__1 = qmax, d__2 = z__[i4]; - qmax = max(d__1,d__2); -/* L70: */ - } - -/* Prepare for the next iteration on K. */ - - pp = 1 - pp; -/* L80: */ - } - - iter = 2; - nfail = 0; - ndiv = (n0 - i0) << (1); - - i__1 = *n + 1; - for (iwhila = 1; iwhila <= i__1; ++iwhila) { - if (n0 < 1) { - goto L150; - } - -/* - While array unfinished do - - E(N0) holds the value of SIGMA when submatrix in I0:N0 - splits from the rest of the array, but is negated. -*/ - - desig = 0.; - if (n0 == *n) { - sigma = 0.; - } else { - sigma = -z__[((n0) << (2)) - 1]; - } - if (sigma < 0.) { - *info = 1; - return 0; - } - -/* - Find last unreduced submatrix's top index I0, find QMAX and - EMIN. Find Gershgorin-type bound if Q's much greater than E's. -*/ - - emax = 0.; - if (n0 > i0) { - emin = (d__1 = z__[((n0) << (2)) - 5], abs(d__1)); - } else { - emin = 0.; - } - qmin = z__[((n0) << (2)) - 3]; - qmax = qmin; - for (i4 = (n0) << (2); i4 >= 8; i4 += -4) { - if (z__[i4 - 5] <= 0.) { - goto L100; - } - if (qmin >= emax * 4.) { -/* Computing MIN */ - d__1 = qmin, d__2 = z__[i4 - 3]; - qmin = min(d__1,d__2); -/* Computing MAX */ - d__1 = emax, d__2 = z__[i4 - 5]; - emax = max(d__1,d__2); - } -/* Computing MAX */ - d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5]; - qmax = max(d__1,d__2); -/* Computing MIN */ - d__1 = emin, d__2 = z__[i4 - 5]; - emin = min(d__1,d__2); -/* L90: */ - } - i4 = 4; - -L100: - i0 = i4 / 4; - -/* Store EMIN for passing to DLASQ3. */ - - z__[((n0) << (2)) - 1] = emin; - -/* - Put -(initial shift) into DMIN. - - Computing MAX -*/ - d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax); - dmin__ = -max(d__1,d__2); - -/* Now I0:N0 is unreduced. PP = 0 for ping, PP = 1 for pong. */ - - pp = 0; - - nbig = (n0 - i0 + 1) * 30; - i__2 = nbig; - for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) { - if (i0 > n0) { - goto L130; - } - -/* While submatrix unfinished take a good dqds step. */ - - dlasq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, & - nfail, &iter, &ndiv, &ieee); - - pp = 1 - pp; - -/* When EMIN is very small check for splits. */ - - if ((pp == 0 && n0 - i0 >= 3)) { - if (z__[n0 * 4] <= tol2 * qmax || z__[((n0) << (2)) - 1] <= - tol2 * sigma) { - splt = i0 - 1; - qmax = z__[((i0) << (2)) - 3]; - emin = z__[((i0) << (2)) - 1]; - oldemn = z__[i0 * 4]; - i__3 = (n0 - 3) << (2); - for (i4 = (i0) << (2); i4 <= i__3; i4 += 4) { - if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <= - tol2 * sigma) { - z__[i4 - 1] = -sigma; - splt = i4 / 4; - qmax = 0.; - emin = z__[i4 + 3]; - oldemn = z__[i4 + 4]; - } else { -/* Computing MAX */ - d__1 = qmax, d__2 = z__[i4 + 1]; - qmax = max(d__1,d__2); -/* Computing MIN */ - d__1 = emin, d__2 = z__[i4 - 1]; - emin = min(d__1,d__2); -/* Computing MIN */ - d__1 = oldemn, d__2 = z__[i4]; - oldemn = min(d__1,d__2); - } -/* L110: */ - } - z__[((n0) << (2)) - 1] = emin; - z__[n0 * 4] = oldemn; - i0 = splt + 1; - } - } - -/* L120: */ - } - - *info = 2; - return 0; - -/* end IWHILB */ - -L130: - -/* L140: */ - ; - } - - *info = 3; - return 0; - -/* end IWHILA */ - -L150: - -/* Move q's to the front. */ - - i__1 = *n; - for (k = 2; k <= i__1; ++k) { - z__[k] = z__[((k) << (2)) - 3]; -/* L160: */ - } - -/* Sort and compute sum of eigenvalues. */ - - dlasrt_("D", n, &z__[1], &iinfo); - - e = 0.; - for (k = *n; k >= 1; --k) { - e += z__[k]; -/* L170: */ - } - -/* Store trace, sum(eigenvalues) and information on performance. */ - - z__[((*n) << (1)) + 1] = trace; - z__[((*n) << (1)) + 2] = e; - z__[((*n) << (1)) + 3] = (doublereal) iter; -/* Computing 2nd power */ - i__1 = *n; - z__[((*n) << (1)) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1); - z__[((*n) << (1)) + 5] = nfail * 100. / (doublereal) iter; - return 0; - -/* End of DLASQ2 */ - -} /* dlasq2_ */ - -/* Subroutine */ int dlasq3_(integer *i0, integer *n0, doublereal *z__, - integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig, - doublereal *qmax, integer *nfail, integer *iter, integer *ndiv, - logical *ieee) -{ - /* Initialized data */ - - static integer ttype = 0; - static doublereal dmin1 = 0.; - static doublereal dmin2 = 0.; - static doublereal dn = 0.; - static doublereal dn1 = 0.; - static doublereal dn2 = 0.; - static doublereal tau = 0.; - - /* System generated locals */ - integer i__1; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal s, t; - static integer j4, nn; - static doublereal eps, tol; - static integer n0in, ipn4; - static doublereal tol2, temp; - extern /* Subroutine */ int dlasq4_(integer *, integer *, doublereal *, - integer *, integer *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *) - , dlasq5_(integer *, integer *, doublereal *, integer *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, logical *), dlasq6_( - integer *, integer *, doublereal *, integer *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *); - - static doublereal safmin; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - May 17, 2000 - - - Purpose - ======= - - DLASQ3 checks for deflation, computes a shift (TAU) and calls dqds. - In case of failure it changes shifts, and tries again until output - is positive. - - Arguments - ========= - - I0 (input) INTEGER - First index. - - N0 (input) INTEGER - Last index. - - Z (input) DOUBLE PRECISION array, dimension ( 4*N ) - Z holds the qd array. - - PP (input) INTEGER - PP=0 for ping, PP=1 for pong. - - DMIN (output) DOUBLE PRECISION - Minimum value of d. - - SIGMA (output) DOUBLE PRECISION - Sum of shifts used in current segment. - - DESIG (input/output) DOUBLE PRECISION - Lower order part of SIGMA - - QMAX (input) DOUBLE PRECISION - Maximum value of q. - - NFAIL (output) INTEGER - Number of times shift was too big. - - ITER (output) INTEGER - Number of iterations. - - NDIV (output) INTEGER - Number of divisions. - - TTYPE (output) INTEGER - Shift type. - - IEEE (input) LOGICAL - Flag for IEEE or non IEEE arithmetic (passed to DLASQ5). - - ===================================================================== -*/ - - /* Parameter adjustments */ - --z__; - - /* Function Body */ - - n0in = *n0; - eps = PRECISION; - safmin = SAFEMINIMUM; - tol = eps * 100.; -/* Computing 2nd power */ - d__1 = tol; - tol2 = d__1 * d__1; - -/* Check for deflation. */ - -L10: - - if (*n0 < *i0) { - return 0; - } - if (*n0 == *i0) { - goto L20; - } - nn = ((*n0) << (2)) + *pp; - if (*n0 == *i0 + 1) { - goto L40; - } - -/* Check whether E(N0-1) is negligible, 1 eigenvalue. */ - - if ((z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - ((*pp) << (1) - ) - 4] > tol2 * z__[nn - 7])) { - goto L30; - } - -L20: - - z__[((*n0) << (2)) - 3] = z__[((*n0) << (2)) + *pp - 3] + *sigma; - --(*n0); - goto L10; - -/* Check whether E(N0-2) is negligible, 2 eigenvalues. */ - -L30: - - if ((z__[nn - 9] > tol2 * *sigma && z__[nn - ((*pp) << (1)) - 8] > tol2 * - z__[nn - 11])) { - goto L50; - } - -L40: - - if (z__[nn - 3] > z__[nn - 7]) { - s = z__[nn - 3]; - z__[nn - 3] = z__[nn - 7]; - z__[nn - 7] = s; - } - if (z__[nn - 5] > z__[nn - 3] * tol2) { - t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5; - s = z__[nn - 3] * (z__[nn - 5] / t); - if (s <= t) { - s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.))); - } else { - s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s))); - } - t = z__[nn - 7] + (s + z__[nn - 5]); - z__[nn - 3] *= z__[nn - 7] / t; - z__[nn - 7] = t; - } - z__[((*n0) << (2)) - 7] = z__[nn - 7] + *sigma; - z__[((*n0) << (2)) - 3] = z__[nn - 3] + *sigma; - *n0 += -2; - goto L10; - -L50: - -/* Reverse the qd-array, if warranted. */ - - if (*dmin__ <= 0. || *n0 < n0in) { - if (z__[((*i0) << (2)) + *pp - 3] * 1.5 < z__[((*n0) << (2)) + *pp - - 3]) { - ipn4 = (*i0 + *n0) << (2); - i__1 = (*i0 + *n0 - 1) << (1); - for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) { - temp = z__[j4 - 3]; - z__[j4 - 3] = z__[ipn4 - j4 - 3]; - z__[ipn4 - j4 - 3] = temp; - temp = z__[j4 - 2]; - z__[j4 - 2] = z__[ipn4 - j4 - 2]; - z__[ipn4 - j4 - 2] = temp; - temp = z__[j4 - 1]; - z__[j4 - 1] = z__[ipn4 - j4 - 5]; - z__[ipn4 - j4 - 5] = temp; - temp = z__[j4]; - z__[j4] = z__[ipn4 - j4 - 4]; - z__[ipn4 - j4 - 4] = temp; -/* L60: */ - } - if (*n0 - *i0 <= 4) { - z__[((*n0) << (2)) + *pp - 1] = z__[((*i0) << (2)) + *pp - 1]; - z__[((*n0) << (2)) - *pp] = z__[((*i0) << (2)) - *pp]; - } -/* Computing MIN */ - d__1 = dmin2, d__2 = z__[((*n0) << (2)) + *pp - 1]; - dmin2 = min(d__1,d__2); -/* Computing MIN */ - d__1 = z__[((*n0) << (2)) + *pp - 1], d__2 = z__[((*i0) << (2)) + - *pp - 1], d__1 = min(d__1,d__2), d__2 = z__[((*i0) << (2)) - + *pp + 3]; - z__[((*n0) << (2)) + *pp - 1] = min(d__1,d__2); -/* Computing MIN */ - d__1 = z__[((*n0) << (2)) - *pp], d__2 = z__[((*i0) << (2)) - *pp] - , d__1 = min(d__1,d__2), d__2 = z__[((*i0) << (2)) - *pp - + 4]; - z__[((*n0) << (2)) - *pp] = min(d__1,d__2); -/* Computing MAX */ - d__1 = *qmax, d__2 = z__[((*i0) << (2)) + *pp - 3], d__1 = max( - d__1,d__2), d__2 = z__[((*i0) << (2)) + *pp + 1]; - *qmax = max(d__1,d__2); - *dmin__ = -0.; - } - } - -/* - L70: - - Computing MIN -*/ - d__1 = z__[((*n0) << (2)) + *pp - 1], d__2 = z__[((*n0) << (2)) + *pp - 9] - , d__1 = min(d__1,d__2), d__2 = dmin2 + z__[((*n0) << (2)) - *pp]; - if (*dmin__ < 0. || safmin * *qmax < min(d__1,d__2)) { - -/* Choose a shift. */ - - dlasq4_(i0, n0, &z__[1], pp, &n0in, dmin__, &dmin1, &dmin2, &dn, &dn1, - &dn2, &tau, &ttype); - -/* Call dqds until DMIN > 0. */ - -L80: - - dlasq5_(i0, n0, &z__[1], pp, &tau, dmin__, &dmin1, &dmin2, &dn, &dn1, - &dn2, ieee); - - *ndiv += *n0 - *i0 + 2; - ++(*iter); - -/* Check status. */ - - if ((*dmin__ >= 0. && dmin1 > 0.)) { - -/* Success. */ - - goto L100; - - } else if ((((*dmin__ < 0. && dmin1 > 0.) && z__[((*n0 - 1) << (2)) - - *pp] < tol * (*sigma + dn1)) && abs(dn) < tol * *sigma)) { - -/* Convergence hidden by negative DN. */ - - z__[((*n0 - 1) << (2)) - *pp + 2] = 0.; - *dmin__ = 0.; - goto L100; - } else if (*dmin__ < 0.) { - -/* TAU too big. Select new TAU and try again. */ - - ++(*nfail); - if (ttype < -22) { - -/* Failed twice. Play it safe. */ - - tau = 0.; - } else if (dmin1 > 0.) { - -/* Late failure. Gives excellent shift. */ - - tau = (tau + *dmin__) * (1. - eps * 2.); - ttype += -11; - } else { - -/* Early failure. Divide by 4. */ - - tau *= .25; - ttype += -12; - } - goto L80; - } else if (*dmin__ != *dmin__) { - -/* NaN. */ - - tau = 0.; - goto L80; - } else { - -/* Possible underflow. Play it safe. */ - - goto L90; - } - } - -/* Risk of underflow. */ - -L90: - dlasq6_(i0, n0, &z__[1], pp, dmin__, &dmin1, &dmin2, &dn, &dn1, &dn2); - *ndiv += *n0 - *i0 + 2; - ++(*iter); - tau = 0.; - -L100: - if (tau < *sigma) { - *desig += tau; - t = *sigma + *desig; - *desig -= t - *sigma; - } else { - t = *sigma + tau; - *desig = *sigma - (t - tau) + *desig; - } - *sigma = t; - - return 0; - -/* End of DLASQ3 */ - -} /* dlasq3_ */ - -/* Subroutine */ int dlasq4_(integer *i0, integer *n0, doublereal *z__, - integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1, - doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, - doublereal *tau, integer *ttype) -{ - /* Initialized data */ - - static doublereal g = 0.; - - /* System generated locals */ - integer i__1; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal s, a2, b1, b2; - static integer i4, nn, np; - static doublereal gam, gap1, gap2; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DLASQ4 computes an approximation TAU to the smallest eigenvalue - using values of d from the previous transform. - - I0 (input) INTEGER - First index. - - N0 (input) INTEGER - Last index. - - Z (input) DOUBLE PRECISION array, dimension ( 4*N ) - Z holds the qd array. - - PP (input) INTEGER - PP=0 for ping, PP=1 for pong. - - NOIN (input) INTEGER - The value of N0 at start of EIGTEST. - - DMIN (input) DOUBLE PRECISION - Minimum value of d. - - DMIN1 (input) DOUBLE PRECISION - Minimum value of d, excluding D( N0 ). - - DMIN2 (input) DOUBLE PRECISION - Minimum value of d, excluding D( N0 ) and D( N0-1 ). - - DN (input) DOUBLE PRECISION - d(N) - - DN1 (input) DOUBLE PRECISION - d(N-1) - - DN2 (input) DOUBLE PRECISION - d(N-2) - - TAU (output) DOUBLE PRECISION - This is the shift. - - TTYPE (output) INTEGER - Shift type. - - Further Details - =============== - CNST1 = 9/16 - - ===================================================================== -*/ - - /* Parameter adjustments */ - --z__; - - /* Function Body */ - -/* - A negative DMIN forces the shift to take that absolute value - TTYPE records the type of shift. -*/ - - if (*dmin__ <= 0.) { - *tau = -(*dmin__); - *ttype = -1; - return 0; - } - - nn = ((*n0) << (2)) + *pp; - if (*n0in == *n0) { - -/* No eigenvalues deflated. */ - - if (*dmin__ == *dn || *dmin__ == *dn1) { - - b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]); - b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]); - a2 = z__[nn - 7] + z__[nn - 5]; - -/* Cases 2 and 3. */ - - if ((*dmin__ == *dn && *dmin1 == *dn1)) { - gap2 = *dmin2 - a2 - *dmin2 * .25; - if ((gap2 > 0. && gap2 > b2)) { - gap1 = a2 - *dn - b2 / gap2 * b2; - } else { - gap1 = a2 - *dn - (b1 + b2); - } - if ((gap1 > 0. && gap1 > b1)) { -/* Computing MAX */ - d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5; - s = max(d__1,d__2); - *ttype = -2; - } else { - s = 0.; - if (*dn > b1) { - s = *dn - b1; - } - if (a2 > b1 + b2) { -/* Computing MIN */ - d__1 = s, d__2 = a2 - (b1 + b2); - s = min(d__1,d__2); - } -/* Computing MAX */ - d__1 = s, d__2 = *dmin__ * .333; - s = max(d__1,d__2); - *ttype = -3; - } - } else { - -/* Case 4. */ - - *ttype = -4; - s = *dmin__ * .25; - if (*dmin__ == *dn) { - gam = *dn; - a2 = 0.; - if (z__[nn - 5] > z__[nn - 7]) { - return 0; - } - b2 = z__[nn - 5] / z__[nn - 7]; - np = nn - 9; - } else { - np = nn - ((*pp) << (1)); - b2 = z__[np - 2]; - gam = *dn1; - if (z__[np - 4] > z__[np - 2]) { - return 0; - } - a2 = z__[np - 4] / z__[np - 2]; - if (z__[nn - 9] > z__[nn - 11]) { - return 0; - } - b2 = z__[nn - 9] / z__[nn - 11]; - np = nn - 13; - } - -/* Approximate contribution to norm squared from I < NN-1. */ - - a2 += b2; - i__1 = ((*i0) << (2)) - 1 + *pp; - for (i4 = np; i4 >= i__1; i4 += -4) { - if (b2 == 0.) { - goto L20; - } - b1 = b2; - if (z__[i4] > z__[i4 - 2]) { - return 0; - } - b2 *= z__[i4] / z__[i4 - 2]; - a2 += b2; - if (max(b2,b1) * 100. < a2 || .563 < a2) { - goto L20; - } -/* L10: */ - } -L20: - a2 *= 1.05; - -/* Rayleigh quotient residual bound. */ - - if (a2 < .563) { - s = gam * (1. - sqrt(a2)) / (a2 + 1.); - } - } - } else if (*dmin__ == *dn2) { - -/* Case 5. */ - - *ttype = -5; - s = *dmin__ * .25; - -/* Compute contribution to norm squared from I > NN-2. */ - - np = nn - ((*pp) << (1)); - b1 = z__[np - 2]; - b2 = z__[np - 6]; - gam = *dn2; - if (z__[np - 8] > b2 || z__[np - 4] > b1) { - return 0; - } - a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.); - -/* Approximate contribution to norm squared from I < NN-2. */ - - if (*n0 - *i0 > 2) { - b2 = z__[nn - 13] / z__[nn - 15]; - a2 += b2; - i__1 = ((*i0) << (2)) - 1 + *pp; - for (i4 = nn - 17; i4 >= i__1; i4 += -4) { - if (b2 == 0.) { - goto L40; - } - b1 = b2; - if (z__[i4] > z__[i4 - 2]) { - return 0; - } - b2 *= z__[i4] / z__[i4 - 2]; - a2 += b2; - if (max(b2,b1) * 100. < a2 || .563 < a2) { - goto L40; - } -/* L30: */ - } -L40: - a2 *= 1.05; - } - - if (a2 < .563) { - s = gam * (1. - sqrt(a2)) / (a2 + 1.); - } - } else { - -/* Case 6, no information to guide us. */ - - if (*ttype == -6) { - g += (1. - g) * .333; - } else if (*ttype == -18) { - g = .083250000000000005; - } else { - g = .25; - } - s = g * *dmin__; - *ttype = -6; - } - - } else if (*n0in == *n0 + 1) { - -/* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */ - - if ((*dmin1 == *dn1 && *dmin2 == *dn2)) { - -/* Cases 7 and 8. */ - - *ttype = -7; - s = *dmin1 * .333; - if (z__[nn - 5] > z__[nn - 7]) { - return 0; - } - b1 = z__[nn - 5] / z__[nn - 7]; - b2 = b1; - if (b2 == 0.) { - goto L60; - } - i__1 = ((*i0) << (2)) - 1 + *pp; - for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) { - a2 = b1; - if (z__[i4] > z__[i4 - 2]) { - return 0; - } - b1 *= z__[i4] / z__[i4 - 2]; - b2 += b1; - if (max(b1,a2) * 100. < b2) { - goto L60; - } -/* L50: */ - } -L60: - b2 = sqrt(b2 * 1.05); -/* Computing 2nd power */ - d__1 = b2; - a2 = *dmin1 / (d__1 * d__1 + 1.); - gap2 = *dmin2 * .5 - a2; - if ((gap2 > 0. && gap2 > b2 * a2)) { -/* Computing MAX */ - d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); - s = max(d__1,d__2); - } else { -/* Computing MAX */ - d__1 = s, d__2 = a2 * (1. - b2 * 1.01); - s = max(d__1,d__2); - *ttype = -8; - } - } else { - -/* Case 9. */ - - s = *dmin1 * .25; - if (*dmin1 == *dn1) { - s = *dmin1 * .5; - } - *ttype = -9; - } - - } else if (*n0in == *n0 + 2) { - -/* - Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. - - Cases 10 and 11. -*/ - - if ((*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7])) { - *ttype = -10; - s = *dmin2 * .333; - if (z__[nn - 5] > z__[nn - 7]) { - return 0; - } - b1 = z__[nn - 5] / z__[nn - 7]; - b2 = b1; - if (b2 == 0.) { - goto L80; - } - i__1 = ((*i0) << (2)) - 1 + *pp; - for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) { - if (z__[i4] > z__[i4 - 2]) { - return 0; - } - b1 *= z__[i4] / z__[i4 - 2]; - b2 += b1; - if (b1 * 100. < b2) { - goto L80; - } -/* L70: */ - } -L80: - b2 = sqrt(b2 * 1.05); -/* Computing 2nd power */ - d__1 = b2; - a2 = *dmin2 / (d__1 * d__1 + 1.); - gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[ - nn - 9]) - a2; - if ((gap2 > 0. && gap2 > b2 * a2)) { -/* Computing MAX */ - d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); - s = max(d__1,d__2); - } else { -/* Computing MAX */ - d__1 = s, d__2 = a2 * (1. - b2 * 1.01); - s = max(d__1,d__2); - } - } else { - s = *dmin2 * .25; - *ttype = -11; - } - } else if (*n0in > *n0 + 2) { - -/* Case 12, more than two eigenvalues deflated. No information. */ - - s = 0.; - *ttype = -12; - } - - *tau = s; - return 0; - -/* End of DLASQ4 */ - -} /* dlasq4_ */ - -/* Subroutine */ int dlasq5_(integer *i0, integer *n0, doublereal *z__, - integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1, - doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2, - logical *ieee) -{ - /* System generated locals */ - integer i__1; - doublereal d__1, d__2; - - /* Local variables */ - static doublereal d__; - static integer j4, j4p2; - static doublereal emin, temp; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - May 17, 2000 - - - Purpose - ======= - - DLASQ5 computes one dqds transform in ping-pong form, one - version for IEEE machines another for non IEEE machines. - - Arguments - ========= - - I0 (input) INTEGER - First index. - - N0 (input) INTEGER - Last index. - - Z (input) DOUBLE PRECISION array, dimension ( 4*N ) - Z holds the qd array. EMIN is stored in Z(4*N0) to avoid - an extra argument. - - PP (input) INTEGER - PP=0 for ping, PP=1 for pong. - - TAU (input) DOUBLE PRECISION - This is the shift. - - DMIN (output) DOUBLE PRECISION - Minimum value of d. - - DMIN1 (output) DOUBLE PRECISION - Minimum value of d, excluding D( N0 ). - - DMIN2 (output) DOUBLE PRECISION - Minimum value of d, excluding D( N0 ) and D( N0-1 ). - - DN (output) DOUBLE PRECISION - d(N0), the last value of d. - - DNM1 (output) DOUBLE PRECISION - d(N0-1). - - DNM2 (output) DOUBLE PRECISION - d(N0-2). - - IEEE (input) LOGICAL - Flag for IEEE or non IEEE arithmetic. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --z__; - - /* Function Body */ - if (*n0 - *i0 - 1 <= 0) { - return 0; - } - - j4 = ((*i0) << (2)) + *pp - 3; - emin = z__[j4 + 4]; - d__ = z__[j4] - *tau; - *dmin__ = d__; - *dmin1 = -z__[j4]; - - if (*ieee) { - -/* Code for IEEE arithmetic. */ - - if (*pp == 0) { - i__1 = (*n0 - 3) << (2); - for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) { - z__[j4 - 2] = d__ + z__[j4 - 1]; - temp = z__[j4 + 1] / z__[j4 - 2]; - d__ = d__ * temp - *tau; - *dmin__ = min(*dmin__,d__); - z__[j4] = z__[j4 - 1] * temp; -/* Computing MIN */ - d__1 = z__[j4]; - emin = min(d__1,emin); -/* L10: */ - } - } else { - i__1 = (*n0 - 3) << (2); - for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) { - z__[j4 - 3] = d__ + z__[j4]; - temp = z__[j4 + 2] / z__[j4 - 3]; - d__ = d__ * temp - *tau; - *dmin__ = min(*dmin__,d__); - z__[j4 - 1] = z__[j4] * temp; -/* Computing MIN */ - d__1 = z__[j4 - 1]; - emin = min(d__1,emin); -/* L20: */ - } - } - -/* Unroll last two steps. */ - - *dnm2 = d__; - *dmin2 = *dmin__; - j4 = ((*n0 - 2) << (2)) - *pp; - j4p2 = j4 + ((*pp) << (1)) - 1; - z__[j4 - 2] = *dnm2 + z__[j4p2]; - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; - *dmin__ = min(*dmin__,*dnm1); - - *dmin1 = *dmin__; - j4 += 4; - j4p2 = j4 + ((*pp) << (1)) - 1; - z__[j4 - 2] = *dnm1 + z__[j4p2]; - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; - *dmin__ = min(*dmin__,*dn); - - } else { - -/* Code for non IEEE arithmetic. */ - - if (*pp == 0) { - i__1 = (*n0 - 3) << (2); - for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) { - z__[j4 - 2] = d__ + z__[j4 - 1]; - if (d__ < 0.) { - return 0; - } else { - z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); - d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau; - } - *dmin__ = min(*dmin__,d__); -/* Computing MIN */ - d__1 = emin, d__2 = z__[j4]; - emin = min(d__1,d__2); -/* L30: */ - } - } else { - i__1 = (*n0 - 3) << (2); - for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) { - z__[j4 - 3] = d__ + z__[j4]; - if (d__ < 0.) { - return 0; - } else { - z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); - d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau; - } - *dmin__ = min(*dmin__,d__); -/* Computing MIN */ - d__1 = emin, d__2 = z__[j4 - 1]; - emin = min(d__1,d__2); -/* L40: */ - } - } - -/* Unroll last two steps. */ - - *dnm2 = d__; - *dmin2 = *dmin__; - j4 = ((*n0 - 2) << (2)) - *pp; - j4p2 = j4 + ((*pp) << (1)) - 1; - z__[j4 - 2] = *dnm2 + z__[j4p2]; - if (*dnm2 < 0.) { - return 0; - } else { - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; - } - *dmin__ = min(*dmin__,*dnm1); - - *dmin1 = *dmin__; - j4 += 4; - j4p2 = j4 + ((*pp) << (1)) - 1; - z__[j4 - 2] = *dnm1 + z__[j4p2]; - if (*dnm1 < 0.) { - return 0; - } else { - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; - } - *dmin__ = min(*dmin__,*dn); - - } - - z__[j4 + 2] = *dn; - z__[((*n0) << (2)) - *pp] = emin; - return 0; - -/* End of DLASQ5 */ - -} /* dlasq5_ */ - -/* Subroutine */ int dlasq6_(integer *i0, integer *n0, doublereal *z__, - integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, - doublereal *dn, doublereal *dnm1, doublereal *dnm2) -{ - /* System generated locals */ - integer i__1; - doublereal d__1, d__2; - - /* Local variables */ - static doublereal d__; - static integer j4, j4p2; - static doublereal emin, temp; - - static doublereal safmin; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - DLASQ6 computes one dqd (shift equal to zero) transform in - ping-pong form, with protection against underflow and overflow. - - Arguments - ========= - - I0 (input) INTEGER - First index. - - N0 (input) INTEGER - Last index. - - Z (input) DOUBLE PRECISION array, dimension ( 4*N ) - Z holds the qd array. EMIN is stored in Z(4*N0) to avoid - an extra argument. - - PP (input) INTEGER - PP=0 for ping, PP=1 for pong. - - DMIN (output) DOUBLE PRECISION - Minimum value of d. - - DMIN1 (output) DOUBLE PRECISION - Minimum value of d, excluding D( N0 ). - - DMIN2 (output) DOUBLE PRECISION - Minimum value of d, excluding D( N0 ) and D( N0-1 ). - - DN (output) DOUBLE PRECISION - d(N0), the last value of d. - - DNM1 (output) DOUBLE PRECISION - d(N0-1). - - DNM2 (output) DOUBLE PRECISION - d(N0-2). - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --z__; - - /* Function Body */ - if (*n0 - *i0 - 1 <= 0) { - return 0; - } - - safmin = SAFEMINIMUM; - j4 = ((*i0) << (2)) + *pp - 3; - emin = z__[j4 + 4]; - d__ = z__[j4]; - *dmin__ = d__; - - if (*pp == 0) { - i__1 = (*n0 - 3) << (2); - for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) { - z__[j4 - 2] = d__ + z__[j4 - 1]; - if (z__[j4 - 2] == 0.) { - z__[j4] = 0.; - d__ = z__[j4 + 1]; - *dmin__ = d__; - emin = 0.; - } else if ((safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4 - - 2] < z__[j4 + 1])) { - temp = z__[j4 + 1] / z__[j4 - 2]; - z__[j4] = z__[j4 - 1] * temp; - d__ *= temp; - } else { - z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); - d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]); - } - *dmin__ = min(*dmin__,d__); -/* Computing MIN */ - d__1 = emin, d__2 = z__[j4]; - emin = min(d__1,d__2); -/* L10: */ - } - } else { - i__1 = (*n0 - 3) << (2); - for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) { - z__[j4 - 3] = d__ + z__[j4]; - if (z__[j4 - 3] == 0.) { - z__[j4 - 1] = 0.; - d__ = z__[j4 + 2]; - *dmin__ = d__; - emin = 0.; - } else if ((safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4 - - 3] < z__[j4 + 2])) { - temp = z__[j4 + 2] / z__[j4 - 3]; - z__[j4 - 1] = z__[j4] * temp; - d__ *= temp; - } else { - z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); - d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]); - } - *dmin__ = min(*dmin__,d__); -/* Computing MIN */ - d__1 = emin, d__2 = z__[j4 - 1]; - emin = min(d__1,d__2); -/* L20: */ - } - } - -/* Unroll last two steps. */ - - *dnm2 = d__; - *dmin2 = *dmin__; - j4 = ((*n0 - 2) << (2)) - *pp; - j4p2 = j4 + ((*pp) << (1)) - 1; - z__[j4 - 2] = *dnm2 + z__[j4p2]; - if (z__[j4 - 2] == 0.) { - z__[j4] = 0.; - *dnm1 = z__[j4p2 + 2]; - *dmin__ = *dnm1; - emin = 0.; - } else if ((safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < - z__[j4p2 + 2])) { - temp = z__[j4p2 + 2] / z__[j4 - 2]; - z__[j4] = z__[j4p2] * temp; - *dnm1 = *dnm2 * temp; - } else { - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]); - } - *dmin__ = min(*dmin__,*dnm1); - - *dmin1 = *dmin__; - j4 += 4; - j4p2 = j4 + ((*pp) << (1)) - 1; - z__[j4 - 2] = *dnm1 + z__[j4p2]; - if (z__[j4 - 2] == 0.) { - z__[j4] = 0.; - *dn = z__[j4p2 + 2]; - *dmin__ = *dn; - emin = 0.; - } else if ((safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < - z__[j4p2 + 2])) { - temp = z__[j4p2 + 2] / z__[j4 - 2]; - z__[j4] = z__[j4p2] * temp; - *dn = *dnm1 * temp; - } else { - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]); - } - *dmin__ = min(*dmin__,*dn); - - z__[j4 + 2] = *dn; - z__[((*n0) << (2)) - *pp] = emin; - return 0; - -/* End of DLASQ6 */ - -} /* dlasq6_ */ - -/* Subroutine */ int dlasr_(char *side, char *pivot, char *direct, integer *m, - integer *n, doublereal *c__, doublereal *s, doublereal *a, integer * - lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j, info; - static doublereal temp; - extern logical lsame_(char *, char *); - static doublereal ctemp, stemp; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLASR performs the transformation - - A := P*A, when SIDE = 'L' or 'l' ( Left-hand side ) - - A := A*P', when SIDE = 'R' or 'r' ( Right-hand side ) - - where A is an m by n real matrix and P is an orthogonal matrix, - consisting of a sequence of plane rotations determined by the - parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l' - and z = n when SIDE = 'R' or 'r' ): - - When DIRECT = 'F' or 'f' ( Forward sequence ) then - - P = P( z - 1 )*...*P( 2 )*P( 1 ), - - and when DIRECT = 'B' or 'b' ( Backward sequence ) then - - P = P( 1 )*P( 2 )*...*P( z - 1 ), - - where P( k ) is a plane rotation matrix for the following planes: - - when PIVOT = 'V' or 'v' ( Variable pivot ), - the plane ( k, k + 1 ) - - when PIVOT = 'T' or 't' ( Top pivot ), - the plane ( 1, k + 1 ) - - when PIVOT = 'B' or 'b' ( Bottom pivot ), - the plane ( k, z ) - - c( k ) and s( k ) must contain the cosine and sine that define the - matrix P( k ). The two by two plane rotation part of the matrix - P( k ), R( k ), is assumed to be of the form - - R( k ) = ( c( k ) s( k ) ). - ( -s( k ) c( k ) ) - - This version vectorises across rows of the array A when SIDE = 'L'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - Specifies whether the plane rotation matrix P is applied to - A on the left or the right. - = 'L': Left, compute A := P*A - = 'R': Right, compute A:= A*P' - - DIRECT (input) CHARACTER*1 - Specifies whether P is a forward or backward sequence of - plane rotations. - = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 ) - = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 ) - - PIVOT (input) CHARACTER*1 - Specifies the plane for which P(k) is a plane rotation - matrix. - = 'V': Variable pivot, the plane (k,k+1) - = 'T': Top pivot, the plane (1,k+1) - = 'B': Bottom pivot, the plane (k,z) - - M (input) INTEGER - The number of rows of the matrix A. If m <= 1, an immediate - return is effected. - - N (input) INTEGER - The number of columns of the matrix A. If n <= 1, an - immediate return is effected. - - C, S (input) DOUBLE PRECISION arrays, dimension - (M-1) if SIDE = 'L' - (N-1) if SIDE = 'R' - c(k) and s(k) contain the cosine and sine that define the - matrix P(k). The two by two plane rotation part of the - matrix P(k), R(k), is assumed to be of the form - R( k ) = ( c( k ) s( k ) ). - ( -s( k ) c( k ) ) - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - The m by n matrix A. On exit, A is overwritten by P*A if - SIDE = 'R' or by A*P' if SIDE = 'L'. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - --c__; - --s; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - info = 0; - if (! (lsame_(side, "L") || lsame_(side, "R"))) { - info = 1; - } else if (! (lsame_(pivot, "V") || lsame_(pivot, - "T") || lsame_(pivot, "B"))) { - info = 2; - } else if (! (lsame_(direct, "F") || lsame_(direct, - "B"))) { - info = 3; - } else if (*m < 0) { - info = 4; - } else if (*n < 0) { - info = 5; - } else if (*lda < max(1,*m)) { - info = 9; - } - if (info != 0) { - xerbla_("DLASR ", &info); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - return 0; - } - if (lsame_(side, "L")) { - -/* Form P * A */ - - if (lsame_(pivot, "V")) { - if (lsame_(direct, "F")) { - i__1 = *m - 1; - for (j = 1; j <= i__1; ++j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = a[j + 1 + i__ * a_dim1]; - a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp * - a[j + i__ * a_dim1]; - a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j - + i__ * a_dim1]; -/* L10: */ - } - } -/* L20: */ - } - } else if (lsame_(direct, "B")) { - for (j = *m - 1; j >= 1; --j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - temp = a[j + 1 + i__ * a_dim1]; - a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp * - a[j + i__ * a_dim1]; - a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j - + i__ * a_dim1]; -/* L30: */ - } - } -/* L40: */ - } - } - } else if (lsame_(pivot, "T")) { - if (lsame_(direct, "F")) { - i__1 = *m; - for (j = 2; j <= i__1; ++j) { - ctemp = c__[j - 1]; - stemp = s[j - 1]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = a[j + i__ * a_dim1]; - a[j + i__ * a_dim1] = ctemp * temp - stemp * a[ - i__ * a_dim1 + 1]; - a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[ - i__ * a_dim1 + 1]; -/* L50: */ - } - } -/* L60: */ - } - } else if (lsame_(direct, "B")) { - for (j = *m; j >= 2; --j) { - ctemp = c__[j - 1]; - stemp = s[j - 1]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - temp = a[j + i__ * a_dim1]; - a[j + i__ * a_dim1] = ctemp * temp - stemp * a[ - i__ * a_dim1 + 1]; - a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[ - i__ * a_dim1 + 1]; -/* L70: */ - } - } -/* L80: */ - } - } - } else if (lsame_(pivot, "B")) { - if (lsame_(direct, "F")) { - i__1 = *m - 1; - for (j = 1; j <= i__1; ++j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = a[j + i__ * a_dim1]; - a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1] - + ctemp * temp; - a[*m + i__ * a_dim1] = ctemp * a[*m + i__ * - a_dim1] - stemp * temp; -/* L90: */ - } - } -/* L100: */ - } - } else if (lsame_(direct, "B")) { - for (j = *m - 1; j >= 1; --j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - temp = a[j + i__ * a_dim1]; - a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1] - + ctemp * temp; - a[*m + i__ * a_dim1] = ctemp * a[*m + i__ * - a_dim1] - stemp * temp; -/* L110: */ - } - } -/* L120: */ - } - } - } - } else if (lsame_(side, "R")) { - -/* Form A * P' */ - - if (lsame_(pivot, "V")) { - if (lsame_(direct, "F")) { - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = a[i__ + (j + 1) * a_dim1]; - a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp * - a[i__ + j * a_dim1]; - a[i__ + j * a_dim1] = stemp * temp + ctemp * a[ - i__ + j * a_dim1]; -/* L130: */ - } - } -/* L140: */ - } - } else if (lsame_(direct, "B")) { - for (j = *n - 1; j >= 1; --j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - temp = a[i__ + (j + 1) * a_dim1]; - a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp * - a[i__ + j * a_dim1]; - a[i__ + j * a_dim1] = stemp * temp + ctemp * a[ - i__ + j * a_dim1]; -/* L150: */ - } - } -/* L160: */ - } - } - } else if (lsame_(pivot, "T")) { - if (lsame_(direct, "F")) { - i__1 = *n; - for (j = 2; j <= i__1; ++j) { - ctemp = c__[j - 1]; - stemp = s[j - 1]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = a[i__ + j * a_dim1]; - a[i__ + j * a_dim1] = ctemp * temp - stemp * a[ - i__ + a_dim1]; - a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ + - a_dim1]; -/* L170: */ - } - } -/* L180: */ - } - } else if (lsame_(direct, "B")) { - for (j = *n; j >= 2; --j) { - ctemp = c__[j - 1]; - stemp = s[j - 1]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - temp = a[i__ + j * a_dim1]; - a[i__ + j * a_dim1] = ctemp * temp - stemp * a[ - i__ + a_dim1]; - a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ + - a_dim1]; -/* L190: */ - } - } -/* L200: */ - } - } - } else if (lsame_(pivot, "B")) { - if (lsame_(direct, "F")) { - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - temp = a[i__ + j * a_dim1]; - a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1] - + ctemp * temp; - a[i__ + *n * a_dim1] = ctemp * a[i__ + *n * - a_dim1] - stemp * temp; -/* L210: */ - } - } -/* L220: */ - } - } else if (lsame_(direct, "B")) { - for (j = *n - 1; j >= 1; --j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - temp = a[i__ + j * a_dim1]; - a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1] - + ctemp * temp; - a[i__ + *n * a_dim1] = ctemp * a[i__ + *n * - a_dim1] - stemp * temp; -/* L230: */ - } - } -/* L240: */ - } - } - } - } - - return 0; - -/* End of DLASR */ - -} /* dlasr_ */ - -/* Subroutine */ int dlasrt_(char *id, integer *n, doublereal *d__, integer * - info) -{ - /* System generated locals */ - integer i__1, i__2; - - /* Local variables */ - static integer i__, j; - static doublereal d1, d2, d3; - static integer dir; - static doublereal tmp; - static integer endd; - extern logical lsame_(char *, char *); - static integer stack[64] /* was [2][32] */; - static doublereal dmnmx; - static integer start; - extern /* Subroutine */ int xerbla_(char *, integer *); - static integer stkpnt; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - Sort the numbers in D in increasing order (if ID = 'I') or - in decreasing order (if ID = 'D' ). - - Use Quick Sort, reverting to Insertion sort on arrays of - size <= 20. Dimension of STACK limits N to about 2**32. - - Arguments - ========= - - ID (input) CHARACTER*1 - = 'I': sort D in increasing order; - = 'D': sort D in decreasing order. - - N (input) INTEGER - The length of the array D. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the array to be sorted. - On exit, D has been sorted into increasing order - (D(1) <= ... <= D(N) ) or into decreasing order - (D(1) >= ... >= D(N) ), depending on ID. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input paramters. -*/ - - /* Parameter adjustments */ - --d__; - - /* Function Body */ - *info = 0; - dir = -1; - if (lsame_(id, "D")) { - dir = 0; - } else if (lsame_(id, "I")) { - dir = 1; - } - if (dir == -1) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DLASRT", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n <= 1) { - return 0; - } - - stkpnt = 1; - stack[0] = 1; - stack[1] = *n; -L10: - start = stack[((stkpnt) << (1)) - 2]; - endd = stack[((stkpnt) << (1)) - 1]; - --stkpnt; - if ((endd - start <= 20 && endd - start > 0)) { - -/* Do Insertion sort on D( START:ENDD ) */ - - if (dir == 0) { - -/* Sort into decreasing order */ - - i__1 = endd; - for (i__ = start + 1; i__ <= i__1; ++i__) { - i__2 = start + 1; - for (j = i__; j >= i__2; --j) { - if (d__[j] > d__[j - 1]) { - dmnmx = d__[j]; - d__[j] = d__[j - 1]; - d__[j - 1] = dmnmx; - } else { - goto L30; - } -/* L20: */ - } -L30: - ; - } - - } else { - -/* Sort into increasing order */ - - i__1 = endd; - for (i__ = start + 1; i__ <= i__1; ++i__) { - i__2 = start + 1; - for (j = i__; j >= i__2; --j) { - if (d__[j] < d__[j - 1]) { - dmnmx = d__[j]; - d__[j] = d__[j - 1]; - d__[j - 1] = dmnmx; - } else { - goto L50; - } -/* L40: */ - } -L50: - ; - } - - } - - } else if (endd - start > 20) { - -/* - Partition D( START:ENDD ) and stack parts, largest one first - - Choose partition entry as median of 3 -*/ - - d1 = d__[start]; - d2 = d__[endd]; - i__ = (start + endd) / 2; - d3 = d__[i__]; - if (d1 < d2) { - if (d3 < d1) { - dmnmx = d1; - } else if (d3 < d2) { - dmnmx = d3; - } else { - dmnmx = d2; - } - } else { - if (d3 < d2) { - dmnmx = d2; - } else if (d3 < d1) { - dmnmx = d3; - } else { - dmnmx = d1; - } - } - - if (dir == 0) { - -/* Sort into decreasing order */ - - i__ = start - 1; - j = endd + 1; -L60: -L70: - --j; - if (d__[j] < dmnmx) { - goto L70; - } -L80: - ++i__; - if (d__[i__] > dmnmx) { - goto L80; - } - if (i__ < j) { - tmp = d__[i__]; - d__[i__] = d__[j]; - d__[j] = tmp; - goto L60; - } - if (j - start > endd - j - 1) { - ++stkpnt; - stack[((stkpnt) << (1)) - 2] = start; - stack[((stkpnt) << (1)) - 1] = j; - ++stkpnt; - stack[((stkpnt) << (1)) - 2] = j + 1; - stack[((stkpnt) << (1)) - 1] = endd; - } else { - ++stkpnt; - stack[((stkpnt) << (1)) - 2] = j + 1; - stack[((stkpnt) << (1)) - 1] = endd; - ++stkpnt; - stack[((stkpnt) << (1)) - 2] = start; - stack[((stkpnt) << (1)) - 1] = j; - } - } else { - -/* Sort into increasing order */ - - i__ = start - 1; - j = endd + 1; -L90: -L100: - --j; - if (d__[j] > dmnmx) { - goto L100; - } -L110: - ++i__; - if (d__[i__] < dmnmx) { - goto L110; - } - if (i__ < j) { - tmp = d__[i__]; - d__[i__] = d__[j]; - d__[j] = tmp; - goto L90; - } - if (j - start > endd - j - 1) { - ++stkpnt; - stack[((stkpnt) << (1)) - 2] = start; - stack[((stkpnt) << (1)) - 1] = j; - ++stkpnt; - stack[((stkpnt) << (1)) - 2] = j + 1; - stack[((stkpnt) << (1)) - 1] = endd; - } else { - ++stkpnt; - stack[((stkpnt) << (1)) - 2] = j + 1; - stack[((stkpnt) << (1)) - 1] = endd; - ++stkpnt; - stack[((stkpnt) << (1)) - 2] = start; - stack[((stkpnt) << (1)) - 1] = j; - } - } - } - if (stkpnt > 0) { - goto L10; - } - return 0; - -/* End of DLASRT */ - -} /* dlasrt_ */ - -/* Subroutine */ int dlassq_(integer *n, doublereal *x, integer *incx, - doublereal *scale, doublereal *sumsq) -{ - /* System generated locals */ - integer i__1, i__2; - doublereal d__1; - - /* Local variables */ - static integer ix; - static doublereal absxi; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLASSQ returns the values scl and smsq such that - - ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq, - - where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is - assumed to be non-negative and scl returns the value - - scl = max( scale, abs( x( i ) ) ). - - scale and sumsq must be supplied in SCALE and SUMSQ and - scl and smsq are overwritten on SCALE and SUMSQ respectively. - - The routine makes only one pass through the vector x. - - Arguments - ========= - - N (input) INTEGER - The number of elements to be used from the vector X. - - X (input) DOUBLE PRECISION array, dimension (N) - The vector for which a scaled sum of squares is computed. - x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. - - INCX (input) INTEGER - The increment between successive values of the vector X. - INCX > 0. - - SCALE (input/output) DOUBLE PRECISION - On entry, the value scale in the equation above. - On exit, SCALE is overwritten with scl , the scaling factor - for the sum of squares. - - SUMSQ (input/output) DOUBLE PRECISION - On entry, the value sumsq in the equation above. - On exit, SUMSQ is overwritten with smsq , the basic sum of - squares from which scl has been factored out. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --x; - - /* Function Body */ - if (*n > 0) { - i__1 = (*n - 1) * *incx + 1; - i__2 = *incx; - for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) { - if (x[ix] != 0.) { - absxi = (d__1 = x[ix], abs(d__1)); - if (*scale < absxi) { -/* Computing 2nd power */ - d__1 = *scale / absxi; - *sumsq = *sumsq * (d__1 * d__1) + 1; - *scale = absxi; - } else { -/* Computing 2nd power */ - d__1 = absxi / *scale; - *sumsq += d__1 * d__1; - } - } -/* L10: */ - } - } - return 0; - -/* End of DLASSQ */ - -} /* dlassq_ */ - -/* Subroutine */ int dlasv2_(doublereal *f, doublereal *g, doublereal *h__, - doublereal *ssmin, doublereal *ssmax, doublereal *snr, doublereal * - csr, doublereal *snl, doublereal *csl) -{ - /* System generated locals */ - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static doublereal a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt, - clt, crt, slt, srt; - static integer pmax; - static doublereal temp; - static logical swap; - static doublereal tsign; - - static logical gasmal; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLASV2 computes the singular value decomposition of a 2-by-2 - triangular matrix - [ F G ] - [ 0 H ]. - On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the - smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and - right singular vectors for abs(SSMAX), giving the decomposition - - [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ] - [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ]. - - Arguments - ========= - - F (input) DOUBLE PRECISION - The (1,1) element of the 2-by-2 matrix. - - G (input) DOUBLE PRECISION - The (1,2) element of the 2-by-2 matrix. - - H (input) DOUBLE PRECISION - The (2,2) element of the 2-by-2 matrix. - - SSMIN (output) DOUBLE PRECISION - abs(SSMIN) is the smaller singular value. - - SSMAX (output) DOUBLE PRECISION - abs(SSMAX) is the larger singular value. - - SNL (output) DOUBLE PRECISION - CSL (output) DOUBLE PRECISION - The vector (CSL, SNL) is a unit left singular vector for the - singular value abs(SSMAX). - - SNR (output) DOUBLE PRECISION - CSR (output) DOUBLE PRECISION - The vector (CSR, SNR) is a unit right singular vector for the - singular value abs(SSMAX). - - Further Details - =============== - - Any input parameter may be aliased with any output parameter. - - Barring over/underflow and assuming a guard digit in subtraction, all - output quantities are correct to within a few units in the last - place (ulps). - - In IEEE arithmetic, the code works correctly if one matrix element is - infinite. - - Overflow will not occur unless the largest singular value itself - overflows or is within a few ulps of overflow. (On machines with - partial overflow, like the Cray, overflow may occur if the largest - singular value is within a factor of 2 of overflow.) - - Underflow is harmless if underflow is gradual. Otherwise, results - may correspond to a matrix modified by perturbations of size near - the underflow threshold. - - ===================================================================== -*/ - - - ft = *f; - fa = abs(ft); - ht = *h__; - ha = abs(*h__); - -/* - PMAX points to the maximum absolute element of matrix - PMAX = 1 if F largest in absolute values - PMAX = 2 if G largest in absolute values - PMAX = 3 if H largest in absolute values -*/ - - pmax = 1; - swap = ha > fa; - if (swap) { - pmax = 3; - temp = ft; - ft = ht; - ht = temp; - temp = fa; - fa = ha; - ha = temp; - -/* Now FA .ge. HA */ - - } - gt = *g; - ga = abs(gt); - if (ga == 0.) { - -/* Diagonal matrix */ - - *ssmin = ha; - *ssmax = fa; - clt = 1.; - crt = 1.; - slt = 0.; - srt = 0.; - } else { - gasmal = TRUE_; - if (ga > fa) { - pmax = 2; - if (fa / ga < EPSILON) { - -/* Case of very large GA */ - - gasmal = FALSE_; - *ssmax = ga; - if (ha > 1.) { - *ssmin = fa / (ga / ha); - } else { - *ssmin = fa / ga * ha; - } - clt = 1.; - slt = ht / gt; - srt = 1.; - crt = ft / gt; - } - } - if (gasmal) { - -/* Normal case */ - - d__ = fa - ha; - if (d__ == fa) { - -/* Copes with infinite F or H */ - - l = 1.; - } else { - l = d__ / fa; - } - -/* Note that 0 .le. L .le. 1 */ - - m = gt / ft; - -/* Note that abs(M) .le. 1/macheps */ - - t = 2. - l; - -/* Note that T .ge. 1 */ - - mm = m * m; - tt = t * t; - s = sqrt(tt + mm); - -/* Note that 1 .le. S .le. 1 + 1/macheps */ - - if (l == 0.) { - r__ = abs(m); - } else { - r__ = sqrt(l * l + mm); - } - -/* Note that 0 .le. R .le. 1 + 1/macheps */ - - a = (s + r__) * .5; - -/* Note that 1 .le. A .le. 1 + abs(M) */ - - *ssmin = ha / a; - *ssmax = fa * a; - if (mm == 0.) { - -/* Note that M is very tiny */ - - if (l == 0.) { - t = d_sign(&c_b2804, &ft) * d_sign(&c_b15, >); - } else { - t = gt / d_sign(&d__, &ft) + m / t; - } - } else { - t = (m / (s + t) + m / (r__ + l)) * (a + 1.); - } - l = sqrt(t * t + 4.); - crt = 2. / l; - srt = t / l; - clt = (crt + srt * m) / a; - slt = ht / ft * srt / a; - } - } - if (swap) { - *csl = srt; - *snl = crt; - *csr = slt; - *snr = clt; - } else { - *csl = clt; - *snl = slt; - *csr = crt; - *snr = srt; - } - -/* Correct signs of SSMAX and SSMIN */ - - if (pmax == 1) { - tsign = d_sign(&c_b15, csr) * d_sign(&c_b15, csl) * d_sign(&c_b15, f); - } - if (pmax == 2) { - tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, csl) * d_sign(&c_b15, g); - } - if (pmax == 3) { - tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, snl) * d_sign(&c_b15, - h__); - } - *ssmax = d_sign(ssmax, &tsign); - d__1 = tsign * d_sign(&c_b15, f) * d_sign(&c_b15, h__); - *ssmin = d_sign(ssmin, &d__1); - return 0; - -/* End of DLASV2 */ - -} /* dlasv2_ */ - -/* Subroutine */ int dlaswp_(integer *n, doublereal *a, integer *lda, integer - *k1, integer *k2, integer *ipiv, integer *incx) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc; - static doublereal temp; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DLASWP performs a series of row interchanges on the matrix A. - One row interchange is initiated for each of rows K1 through K2 of A. - - Arguments - ========= - - N (input) INTEGER - The number of columns of the matrix A. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the matrix of column dimension N to which the row - interchanges will be applied. - On exit, the permuted matrix. - - LDA (input) INTEGER - The leading dimension of the array A. - - K1 (input) INTEGER - The first element of IPIV for which a row interchange will - be done. - - K2 (input) INTEGER - The last element of IPIV for which a row interchange will - be done. - - IPIV (input) INTEGER array, dimension (M*abs(INCX)) - The vector of pivot indices. Only the elements in positions - K1 through K2 of IPIV are accessed. - IPIV(K) = L implies rows K and L are to be interchanged. - - INCX (input) INTEGER - The increment between successive values of IPIV. If IPIV - is negative, the pivots are applied in reverse order. - - Further Details - =============== - - Modified by - R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA - - ===================================================================== - - - Interchange row I with row IPIV(I) for each of rows K1 through K2. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - - /* Function Body */ - if (*incx > 0) { - ix0 = *k1; - i1 = *k1; - i2 = *k2; - inc = 1; - } else if (*incx < 0) { - ix0 = (1 - *k2) * *incx + 1; - i1 = *k2; - i2 = *k1; - inc = -1; - } else { - return 0; - } - - n32 = (*n / 32) << (5); - if (n32 != 0) { - i__1 = n32; - for (j = 1; j <= i__1; j += 32) { - ix = ix0; - i__2 = i2; - i__3 = inc; - for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) - { - ip = ipiv[ix]; - if (ip != i__) { - i__4 = j + 31; - for (k = j; k <= i__4; ++k) { - temp = a[i__ + k * a_dim1]; - a[i__ + k * a_dim1] = a[ip + k * a_dim1]; - a[ip + k * a_dim1] = temp; -/* L10: */ - } - } - ix += *incx; -/* L20: */ - } -/* L30: */ - } - } - if (n32 != *n) { - ++n32; - ix = ix0; - i__1 = i2; - i__3 = inc; - for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) { - ip = ipiv[ix]; - if (ip != i__) { - i__2 = *n; - for (k = n32; k <= i__2; ++k) { - temp = a[i__ + k * a_dim1]; - a[i__ + k * a_dim1] = a[ip + k * a_dim1]; - a[ip + k * a_dim1] = temp; -/* L40: */ - } - } - ix += *incx; -/* L50: */ - } - } - - return 0; - -/* End of DLASWP */ - -} /* dlaswp_ */ - -/* Subroutine */ int dlatrd_(char *uplo, integer *n, integer *nb, doublereal * - a, integer *lda, doublereal *e, doublereal *tau, doublereal *w, - integer *ldw) -{ - /* System generated locals */ - integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, iw; - extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, - integer *); - static doublereal alpha; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *), daxpy_(integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *), - dsymv_(char *, integer *, doublereal *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, - doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLATRD reduces NB rows and columns of a real symmetric matrix A to - symmetric tridiagonal form by an orthogonal similarity - transformation Q' * A * Q, and returns the matrices V and W which are - needed to apply the transformation to the unreduced part of A. - - If UPLO = 'U', DLATRD reduces the last NB rows and columns of a - matrix, of which the upper triangle is supplied; - if UPLO = 'L', DLATRD reduces the first NB rows and columns of a - matrix, of which the lower triangle is supplied. - - This is an auxiliary routine called by DSYTRD. - - Arguments - ========= - - UPLO (input) CHARACTER - Specifies whether the upper or lower triangular part of the - symmetric matrix A is stored: - = 'U': Upper triangular - = 'L': Lower triangular - - N (input) INTEGER - The order of the matrix A. - - NB (input) INTEGER - The number of rows and columns to be reduced. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the symmetric matrix A. If UPLO = 'U', the leading - n-by-n upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading n-by-n lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - On exit: - if UPLO = 'U', the last NB columns have been reduced to - tridiagonal form, with the diagonal elements overwriting - the diagonal elements of A; the elements above the diagonal - with the array TAU, represent the orthogonal matrix Q as a - product of elementary reflectors; - if UPLO = 'L', the first NB columns have been reduced to - tridiagonal form, with the diagonal elements overwriting - the diagonal elements of A; the elements below the diagonal - with the array TAU, represent the orthogonal matrix Q as a - product of elementary reflectors. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= (1,N). - - E (output) DOUBLE PRECISION array, dimension (N-1) - If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal - elements of the last NB columns of the reduced matrix; - if UPLO = 'L', E(1:nb) contains the subdiagonal elements of - the first NB columns of the reduced matrix. - - TAU (output) DOUBLE PRECISION array, dimension (N-1) - The scalar factors of the elementary reflectors, stored in - TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. - See Further Details. - - W (output) DOUBLE PRECISION array, dimension (LDW,NB) - The n-by-nb matrix W required to update the unreduced part - of A. - - LDW (input) INTEGER - The leading dimension of the array W. LDW >= max(1,N). - - Further Details - =============== - - If UPLO = 'U', the matrix Q is represented as a product of elementary - reflectors - - Q = H(n) H(n-1) . . . H(n-nb+1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), - and tau in TAU(i-1). - - If UPLO = 'L', the matrix Q is represented as a product of elementary - reflectors - - Q = H(1) H(2) . . . H(nb). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), - and tau in TAU(i). - - The elements of the vectors v together form the n-by-nb matrix V - which is needed, with W, to apply the transformation to the unreduced - part of the matrix, using a symmetric rank-2k update of the form: - A := A - V*W' - W*V'. - - The contents of A on exit are illustrated by the following examples - with n = 5 and nb = 2: - - if UPLO = 'U': if UPLO = 'L': - - ( a a a v4 v5 ) ( d ) - ( a a v4 v5 ) ( 1 d ) - ( a 1 v5 ) ( v1 1 a ) - ( d 1 ) ( v1 v2 a a ) - ( d ) ( v1 v2 a a a ) - - where d denotes a diagonal element of the reduced matrix, a denotes - an element of the original matrix that is unchanged, and vi denotes - an element of the vector defining H(i). - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --e; - --tau; - w_dim1 = *ldw; - w_offset = 1 + w_dim1 * 1; - w -= w_offset; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - - if (lsame_(uplo, "U")) { - -/* Reduce last NB columns of upper triangle */ - - i__1 = *n - *nb + 1; - for (i__ = *n; i__ >= i__1; --i__) { - iw = i__ - *n + *nb; - if (i__ < *n) { - -/* Update A(1:i,i) */ - - i__2 = *n - i__; - dgemv_("No transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) * - a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, & - c_b15, &a[i__ * a_dim1 + 1], &c__1); - i__2 = *n - i__; - dgemv_("No transpose", &i__, &i__2, &c_b151, &w[(iw + 1) * - w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, & - c_b15, &a[i__ * a_dim1 + 1], &c__1); - } - if (i__ > 1) { - -/* - Generate elementary reflector H(i) to annihilate - A(1:i-2,i) -*/ - - i__2 = i__ - 1; - dlarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 + - 1], &c__1, &tau[i__ - 1]); - e[i__ - 1] = a[i__ - 1 + i__ * a_dim1]; - a[i__ - 1 + i__ * a_dim1] = 1.; - -/* Compute W(1:i-1,i) */ - - i__2 = i__ - 1; - dsymv_("Upper", &i__2, &c_b15, &a[a_offset], lda, &a[i__ * - a_dim1 + 1], &c__1, &c_b29, &w[iw * w_dim1 + 1], & - c__1); - if (i__ < *n) { - i__2 = i__ - 1; - i__3 = *n - i__; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[(iw + 1) * - w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, & - c_b29, &w[i__ + 1 + iw * w_dim1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) - * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], & - c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) * - a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, & - c_b29, &w[i__ + 1 + iw * w_dim1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[(iw + 1) - * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], & - c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1); - } - i__2 = i__ - 1; - dscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1); - i__2 = i__ - 1; - alpha = tau[i__ - 1] * -.5 * ddot_(&i__2, &w[iw * w_dim1 + 1], - &c__1, &a[i__ * a_dim1 + 1], &c__1); - i__2 = i__ - 1; - daxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * - w_dim1 + 1], &c__1); - } - -/* L10: */ - } - } else { - -/* Reduce first NB columns of lower triangle */ - - i__1 = *nb; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Update A(i:n,i) */ - - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1], - lda, &w[i__ + w_dim1], ldw, &c_b15, &a[i__ + i__ * a_dim1] - , &c__1); - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + w_dim1], - ldw, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1] - , &c__1); - if (i__ < *n) { - -/* - Generate elementary reflector H(i) to annihilate - A(i+2:n,i) -*/ - - i__2 = *n - i__; -/* Computing MIN */ - i__3 = i__ + 2; - dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + - i__ * a_dim1], &c__1, &tau[i__]); - e[i__] = a[i__ + 1 + i__ * a_dim1]; - a[i__ + 1 + i__ * a_dim1] = 1.; - -/* Compute W(i+1:n,i) */ - - i__2 = *n - i__; - dsymv_("Lower", &i__2, &c_b15, &a[i__ + 1 + (i__ + 1) * - a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & - c_b29, &w[i__ + 1 + i__ * w_dim1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[i__ + 1 + w_dim1] - , ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[ - i__ * w_dim1 + 1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + - a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[ - i__ + 1 + i__ * w_dim1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1] - , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[ - i__ * w_dim1 + 1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + 1 + - w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[ - i__ + 1 + i__ * w_dim1], &c__1); - i__2 = *n - i__; - dscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1); - i__2 = *n - i__; - alpha = tau[i__] * -.5 * ddot_(&i__2, &w[i__ + 1 + i__ * - w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1); - i__2 = *n - i__; - daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[ - i__ + 1 + i__ * w_dim1], &c__1); - } - -/* L20: */ - } - } - - return 0; - -/* End of DLATRD */ - -} /* dlatrd_ */ - -/* Subroutine */ int dorg2r_(integer *m, integer *n, integer *k, doublereal * - a, integer *lda, doublereal *tau, doublereal *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - doublereal d__1; - - /* Local variables */ - static integer i__, j, l; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *), dlarf_(char *, integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DORG2R generates an m by n real matrix Q with orthonormal columns, - which is defined as the first n columns of a product of k elementary - reflectors of order m - - Q = H(1) H(2) . . . H(k) - - as returned by DGEQRF. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix Q. M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q. M >= N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines the - matrix Q. N >= K >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the i-th column must contain the vector which - defines the elementary reflector H(i), for i = 1,2,...,k, as - returned by DGEQRF in the first k columns of its array - argument A. - On exit, the m-by-n matrix Q. - - LDA (input) INTEGER - The first dimension of the array A. LDA >= max(1,M). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGEQRF. - - WORK (workspace) DOUBLE PRECISION array, dimension (N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument has an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0 || *n > *m) { - *info = -2; - } else if (*k < 0 || *k > *n) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORG2R", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n <= 0) { - return 0; - } - -/* Initialise columns k+1:n to columns of the unit matrix */ - - i__1 = *n; - for (j = *k + 1; j <= i__1; ++j) { - i__2 = *m; - for (l = 1; l <= i__2; ++l) { - a[l + j * a_dim1] = 0.; -/* L10: */ - } - a[j + j * a_dim1] = 1.; -/* L20: */ - } - - for (i__ = *k; i__ >= 1; --i__) { - -/* Apply H(i) to A(i:m,i:n) from the left */ - - if (i__ < *n) { - a[i__ + i__ * a_dim1] = 1.; - i__1 = *m - i__ + 1; - i__2 = *n - i__; - dlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[ - i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); - } - if (i__ < *m) { - i__1 = *m - i__; - d__1 = -tau[i__]; - dscal_(&i__1, &d__1, &a[i__ + 1 + i__ * a_dim1], &c__1); - } - a[i__ + i__ * a_dim1] = 1. - tau[i__]; - -/* Set A(1:i-1,i) to zero */ - - i__1 = i__ - 1; - for (l = 1; l <= i__1; ++l) { - a[l + i__ * a_dim1] = 0.; -/* L30: */ - } -/* L40: */ - } - return 0; - -/* End of DORG2R */ - -} /* dorg2r_ */ - -/* Subroutine */ int dorgbr_(char *vect, integer *m, integer *n, integer *k, - doublereal *a, integer *lda, doublereal *tau, doublereal *work, - integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j, nb, mn; - extern logical lsame_(char *, char *); - static integer iinfo; - static logical wantq; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int dorglq_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - integer *), dorgqr_(integer *, integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *, integer *); - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORGBR generates one of the real orthogonal matrices Q or P**T - determined by DGEBRD when reducing a real matrix A to bidiagonal - form: A = Q * B * P**T. Q and P**T are defined as products of - elementary reflectors H(i) or G(i) respectively. - - If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q - is of order M: - if m >= k, Q = H(1) H(2) . . . H(k) and DORGBR returns the first n - columns of Q, where m >= n >= k; - if m < k, Q = H(1) H(2) . . . H(m-1) and DORGBR returns Q as an - M-by-M matrix. - - If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T - is of order N: - if k < n, P**T = G(k) . . . G(2) G(1) and DORGBR returns the first m - rows of P**T, where n >= m >= k; - if k >= n, P**T = G(n-1) . . . G(2) G(1) and DORGBR returns P**T as - an N-by-N matrix. - - Arguments - ========= - - VECT (input) CHARACTER*1 - Specifies whether the matrix Q or the matrix P**T is - required, as defined in the transformation applied by DGEBRD: - = 'Q': generate Q; - = 'P': generate P**T. - - M (input) INTEGER - The number of rows of the matrix Q or P**T to be returned. - M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q or P**T to be returned. - N >= 0. - If VECT = 'Q', M >= N >= min(M,K); - if VECT = 'P', N >= M >= min(N,K). - - K (input) INTEGER - If VECT = 'Q', the number of columns in the original M-by-K - matrix reduced by DGEBRD. - If VECT = 'P', the number of rows in the original K-by-N - matrix reduced by DGEBRD. - K >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the vectors which define the elementary reflectors, - as returned by DGEBRD. - On exit, the M-by-N matrix Q or P**T. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (input) DOUBLE PRECISION array, dimension - (min(M,K)) if VECT = 'Q' - (min(N,K)) if VECT = 'P' - TAU(i) must contain the scalar factor of the elementary - reflector H(i) or G(i), which determines Q or P**T, as - returned by DGEBRD in its array argument TAUQ or TAUP. - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,min(M,N)). - For optimum performance LWORK >= min(M,N)*NB, where NB - is the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - wantq = lsame_(vect, "Q"); - mn = min(*m,*n); - lquery = *lwork == -1; - if ((! wantq && ! lsame_(vect, "P"))) { - *info = -1; - } else if (*m < 0) { - *info = -2; - } else if (*n < 0 || (wantq && (*n > *m || *n < min(*m,*k))) || (! wantq - && (*m > *n || *m < min(*n,*k)))) { - *info = -3; - } else if (*k < 0) { - *info = -4; - } else if (*lda < max(1,*m)) { - *info = -6; - } else if ((*lwork < max(1,mn) && ! lquery)) { - *info = -9; - } - - if (*info == 0) { - if (wantq) { - nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, ( - ftnlen)1); - } else { - nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, ( - ftnlen)1); - } - lwkopt = max(1,mn) * nb; - work[1] = (doublereal) lwkopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORGBR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - work[1] = 1.; - return 0; - } - - if (wantq) { - -/* - Form Q, determined by a call to DGEBRD to reduce an m-by-k - matrix -*/ - - if (*m >= *k) { - -/* If m >= k, assume m >= n >= k */ - - dorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, & - iinfo); - - } else { - -/* - If m < k, assume m = n - - Shift the vectors which define the elementary reflectors one - column to the right, and set the first row and column of Q - to those of the unit matrix -*/ - - for (j = *m; j >= 2; --j) { - a[j * a_dim1 + 1] = 0.; - i__1 = *m; - for (i__ = j + 1; i__ <= i__1; ++i__) { - a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1]; -/* L10: */ - } -/* L20: */ - } - a[a_dim1 + 1] = 1.; - i__1 = *m; - for (i__ = 2; i__ <= i__1; ++i__) { - a[i__ + a_dim1] = 0.; -/* L30: */ - } - if (*m > 1) { - -/* Form Q(2:m,2:m) */ - - i__1 = *m - 1; - i__2 = *m - 1; - i__3 = *m - 1; - dorgqr_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, & - tau[1], &work[1], lwork, &iinfo); - } - } - } else { - -/* - Form P', determined by a call to DGEBRD to reduce a k-by-n - matrix -*/ - - if (*k < *n) { - -/* If k < n, assume k <= m <= n */ - - dorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, & - iinfo); - - } else { - -/* - If k >= n, assume m = n - - Shift the vectors which define the elementary reflectors one - row downward, and set the first row and column of P' to - those of the unit matrix -*/ - - a[a_dim1 + 1] = 1.; - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - a[i__ + a_dim1] = 0.; -/* L40: */ - } - i__1 = *n; - for (j = 2; j <= i__1; ++j) { - for (i__ = j - 1; i__ >= 2; --i__) { - a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1]; -/* L50: */ - } - a[j * a_dim1 + 1] = 0.; -/* L60: */ - } - if (*n > 1) { - -/* Form P'(2:n,2:n) */ - - i__1 = *n - 1; - i__2 = *n - 1; - i__3 = *n - 1; - dorglq_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, & - tau[1], &work[1], lwork, &iinfo); - } - } - } - work[1] = (doublereal) lwkopt; - return 0; - -/* End of DORGBR */ - -} /* dorgbr_ */ - -/* Subroutine */ int dorghr_(integer *n, integer *ilo, integer *ihi, - doublereal *a, integer *lda, doublereal *tau, doublereal *work, - integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - - /* Local variables */ - static integer i__, j, nb, nh, iinfo; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - integer *); - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORGHR generates a real orthogonal matrix Q which is defined as the - product of IHI-ILO elementary reflectors of order N, as returned by - DGEHRD: - - Q = H(ilo) H(ilo+1) . . . H(ihi-1). - - Arguments - ========= - - N (input) INTEGER - The order of the matrix Q. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - ILO and IHI must have the same values as in the previous call - of DGEHRD. Q is equal to the unit matrix except in the - submatrix Q(ilo+1:ihi,ilo+1:ihi). - 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the vectors which define the elementary reflectors, - as returned by DGEHRD. - On exit, the N-by-N orthogonal matrix Q. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - TAU (input) DOUBLE PRECISION array, dimension (N-1) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGEHRD. - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= IHI-ILO. - For optimum performance LWORK >= (IHI-ILO)*NB, where NB is - the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nh = *ihi - *ilo; - lquery = *lwork == -1; - if (*n < 0) { - *info = -1; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -2; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if ((*lwork < max(1,nh) && ! lquery)) { - *info = -8; - } - - if (*info == 0) { - nb = ilaenv_(&c__1, "DORGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, ( - ftnlen)1); - lwkopt = max(1,nh) * nb; - work[1] = (doublereal) lwkopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORGHR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - work[1] = 1.; - return 0; - } - -/* - Shift the vectors which define the elementary reflectors one - column to the right, and set the first ilo and the last n-ihi - rows and columns to those of the unit matrix -*/ - - i__1 = *ilo + 1; - for (j = *ihi; j >= i__1; --j) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = 0.; -/* L10: */ - } - i__2 = *ihi; - for (i__ = j + 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1]; -/* L20: */ - } - i__2 = *n; - for (i__ = *ihi + 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = 0.; -/* L30: */ - } -/* L40: */ - } - i__1 = *ilo; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = 0.; -/* L50: */ - } - a[j + j * a_dim1] = 1.; -/* L60: */ - } - i__1 = *n; - for (j = *ihi + 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = 0.; -/* L70: */ - } - a[j + j * a_dim1] = 1.; -/* L80: */ - } - - if (nh > 0) { - -/* Generate Q(ilo+1:ihi,ilo+1:ihi) */ - - dorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[* - ilo], &work[1], lwork, &iinfo); - } - work[1] = (doublereal) lwkopt; - return 0; - -/* End of DORGHR */ - -} /* dorghr_ */ - -/* Subroutine */ int dorgl2_(integer *m, integer *n, integer *k, doublereal * - a, integer *lda, doublereal *tau, doublereal *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - doublereal d__1; - - /* Local variables */ - static integer i__, j, l; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *), dlarf_(char *, integer *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORGL2 generates an m by n real matrix Q with orthonormal rows, - which is defined as the first m rows of a product of k elementary - reflectors of order n - - Q = H(k) . . . H(2) H(1) - - as returned by DGELQF. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix Q. M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q. N >= M. - - K (input) INTEGER - The number of elementary reflectors whose product defines the - matrix Q. M >= K >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the i-th row must contain the vector which defines - the elementary reflector H(i), for i = 1,2,...,k, as returned - by DGELQF in the first k rows of its array argument A. - On exit, the m-by-n matrix Q. - - LDA (input) INTEGER - The first dimension of the array A. LDA >= max(1,M). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGELQF. - - WORK (workspace) DOUBLE PRECISION array, dimension (M) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument has an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < *m) { - *info = -2; - } else if (*k < 0 || *k > *m) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORGL2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m <= 0) { - return 0; - } - - if (*k < *m) { - -/* Initialise rows k+1:m to rows of the unit matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (l = *k + 1; l <= i__2; ++l) { - a[l + j * a_dim1] = 0.; -/* L10: */ - } - if ((j > *k && j <= *m)) { - a[j + j * a_dim1] = 1.; - } -/* L20: */ - } - } - - for (i__ = *k; i__ >= 1; --i__) { - -/* Apply H(i) to A(i:m,i:n) from the right */ - - if (i__ < *n) { - if (i__ < *m) { - a[i__ + i__ * a_dim1] = 1.; - i__1 = *m - i__; - i__2 = *n - i__ + 1; - dlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, & - tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); - } - i__1 = *n - i__; - d__1 = -tau[i__]; - dscal_(&i__1, &d__1, &a[i__ + (i__ + 1) * a_dim1], lda); - } - a[i__ + i__ * a_dim1] = 1. - tau[i__]; - -/* Set A(i,1:i-1) to zero */ - - i__1 = i__ - 1; - for (l = 1; l <= i__1; ++l) { - a[i__ + l * a_dim1] = 0.; -/* L30: */ - } -/* L40: */ - } - return 0; - -/* End of DORGL2 */ - -} /* dorgl2_ */ - -/* Subroutine */ int dorglq_(integer *m, integer *n, integer *k, doublereal * - a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int dorgl2_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *), - dlarfb_(char *, char *, char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORGLQ generates an M-by-N real matrix Q with orthonormal rows, - which is defined as the first M rows of a product of K elementary - reflectors of order N - - Q = H(k) . . . H(2) H(1) - - as returned by DGELQF. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix Q. M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q. N >= M. - - K (input) INTEGER - The number of elementary reflectors whose product defines the - matrix Q. M >= K >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the i-th row must contain the vector which defines - the elementary reflector H(i), for i = 1,2,...,k, as returned - by DGELQF in the first k rows of its array argument A. - On exit, the M-by-N matrix Q. - - LDA (input) INTEGER - The first dimension of the array A. LDA >= max(1,M). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGELQF. - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,M). - For optimum performance LWORK >= M*NB, where NB is - the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument has an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); - lwkopt = max(1,*m) * nb; - work[1] = (doublereal) lwkopt; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < *m) { - *info = -2; - } else if (*k < 0 || *k > *m) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } else if ((*lwork < max(1,*m) && ! lquery)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORGLQ", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m <= 0) { - work[1] = 1.; - return 0; - } - - nbmin = 2; - nx = 0; - iws = *m; - if ((nb > 1 && nb < *k)) { - -/* - Determine when to cross over from blocked to unblocked code. - - Computing MAX -*/ - i__1 = 0, i__2 = ilaenv_(&c__3, "DORGLQ", " ", m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < *k) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *m; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: reduce NB and - determine the minimum value of NB. -*/ - - nb = *lwork / ldwork; -/* Computing MAX */ - i__1 = 2, i__2 = ilaenv_(&c__2, "DORGLQ", " ", m, n, k, &c_n1, - (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - } - } - } - - if (((nb >= nbmin && nb < *k) && nx < *k)) { - -/* - Use blocked code after the last block. - The first kk rows are handled by the block method. -*/ - - ki = (*k - nx - 1) / nb * nb; -/* Computing MIN */ - i__1 = *k, i__2 = ki + nb; - kk = min(i__1,i__2); - -/* Set A(kk+1:m,1:kk) to zero. */ - - i__1 = kk; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = kk + 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - kk = 0; - } - -/* Use unblocked code for the last or only block. */ - - if (kk < *m) { - i__1 = *m - kk; - i__2 = *n - kk; - i__3 = *k - kk; - dorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, & - tau[kk + 1], &work[1], &iinfo); - } - - if (kk > 0) { - -/* Use blocked code */ - - i__1 = -nb; - for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) { -/* Computing MIN */ - i__2 = nb, i__3 = *k - i__ + 1; - ib = min(i__2,i__3); - if (i__ + ib <= *m) { - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__2 = *n - i__ + 1; - dlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], &work[1], &ldwork); - -/* Apply H' to A(i+ib:m,i:n) from the right */ - - i__2 = *m - i__ - ib + 1; - i__3 = *n - i__ + 1; - dlarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, & - i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & - ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib + - 1], &ldwork); - } - -/* Apply H' to columns i:n of current block */ - - i__2 = *n - i__ + 1; - dorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], & - work[1], &iinfo); - -/* Set columns 1:i-1 of current block to zero */ - - i__2 = i__ - 1; - for (j = 1; j <= i__2; ++j) { - i__3 = i__ + ib - 1; - for (l = i__; l <= i__3; ++l) { - a[l + j * a_dim1] = 0.; -/* L30: */ - } -/* L40: */ - } -/* L50: */ - } - } - - work[1] = (doublereal) iws; - return 0; - -/* End of DORGLQ */ - -} /* dorglq_ */ - -/* Subroutine */ int dorgqr_(integer *m, integer *n, integer *k, doublereal * - a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int dorg2r_(integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *), - dlarfb_(char *, char *, char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORGQR generates an M-by-N real matrix Q with orthonormal columns, - which is defined as the first N columns of a product of K elementary - reflectors of order M - - Q = H(1) H(2) . . . H(k) - - as returned by DGEQRF. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix Q. M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q. M >= N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines the - matrix Q. N >= K >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the i-th column must contain the vector which - defines the elementary reflector H(i), for i = 1,2,...,k, as - returned by DGEQRF in the first k columns of its array - argument A. - On exit, the M-by-N matrix Q. - - LDA (input) INTEGER - The first dimension of the array A. LDA >= max(1,M). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGEQRF. - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,N). - For optimum performance LWORK >= N*NB, where NB is the - optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument has an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); - lwkopt = max(1,*n) * nb; - work[1] = (doublereal) lwkopt; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0 || *n > *m) { - *info = -2; - } else if (*k < 0 || *k > *n) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } else if ((*lwork < max(1,*n) && ! lquery)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORGQR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n <= 0) { - work[1] = 1.; - return 0; - } - - nbmin = 2; - nx = 0; - iws = *n; - if ((nb > 1 && nb < *k)) { - -/* - Determine when to cross over from blocked to unblocked code. - - Computing MAX -*/ - i__1 = 0, i__2 = ilaenv_(&c__3, "DORGQR", " ", m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < *k) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *n; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: reduce NB and - determine the minimum value of NB. -*/ - - nb = *lwork / ldwork; -/* Computing MAX */ - i__1 = 2, i__2 = ilaenv_(&c__2, "DORGQR", " ", m, n, k, &c_n1, - (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - } - } - } - - if (((nb >= nbmin && nb < *k) && nx < *k)) { - -/* - Use blocked code after the last block. - The first kk columns are handled by the block method. -*/ - - ki = (*k - nx - 1) / nb * nb; -/* Computing MIN */ - i__1 = *k, i__2 = ki + nb; - kk = min(i__1,i__2); - -/* Set A(1:kk,kk+1:n) to zero. */ - - i__1 = *n; - for (j = kk + 1; j <= i__1; ++j) { - i__2 = kk; - for (i__ = 1; i__ <= i__2; ++i__) { - a[i__ + j * a_dim1] = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - kk = 0; - } - -/* Use unblocked code for the last or only block. */ - - if (kk < *n) { - i__1 = *m - kk; - i__2 = *n - kk; - i__3 = *k - kk; - dorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, & - tau[kk + 1], &work[1], &iinfo); - } - - if (kk > 0) { - -/* Use blocked code */ - - i__1 = -nb; - for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) { -/* Computing MIN */ - i__2 = nb, i__3 = *k - i__ + 1; - ib = min(i__2,i__3); - if (i__ + ib <= *n) { - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__2 = *m - i__ + 1; - dlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], &work[1], &ldwork); - -/* Apply H to A(i:m,i+ib:n) from the left */ - - i__2 = *m - i__ + 1; - i__3 = *n - i__ - ib + 1; - dlarfb_("Left", "No transpose", "Forward", "Columnwise", & - i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[ - 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, & - work[ib + 1], &ldwork); - } - -/* Apply H to rows i:m of current block */ - - i__2 = *m - i__ + 1; - dorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], & - work[1], &iinfo); - -/* Set rows 1:i-1 of current block to zero */ - - i__2 = i__ + ib - 1; - for (j = i__; j <= i__2; ++j) { - i__3 = i__ - 1; - for (l = 1; l <= i__3; ++l) { - a[l + j * a_dim1] = 0.; -/* L30: */ - } -/* L40: */ - } -/* L50: */ - } - } - - work[1] = (doublereal) iws; - return 0; - -/* End of DORGQR */ - -} /* dorgqr_ */ - -/* Subroutine */ int dorm2l_(char *side, char *trans, integer *m, integer *n, - integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * - c__, integer *ldc, doublereal *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; - - /* Local variables */ - static integer i__, i1, i2, i3, mi, ni, nq; - static doublereal aii; - static logical left; - extern /* Subroutine */ int dlarf_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical notran; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DORM2L overwrites the general real m by n matrix C with - - Q * C if SIDE = 'L' and TRANS = 'N', or - - Q'* C if SIDE = 'L' and TRANS = 'T', or - - C * Q if SIDE = 'R' and TRANS = 'N', or - - C * Q' if SIDE = 'R' and TRANS = 'T', - - where Q is a real orthogonal matrix defined as the product of k - elementary reflectors - - Q = H(k) . . . H(2) H(1) - - as returned by DGEQLF. Q is of order m if SIDE = 'L' and of order n - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q' from the Left - = 'R': apply Q or Q' from the Right - - TRANS (input) CHARACTER*1 - = 'N': apply Q (No transpose) - = 'T': apply Q' (Transpose) - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA,K) - The i-th column must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - DGEQLF in the last k columns of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. - If SIDE = 'L', LDA >= max(1,M); - if SIDE = 'R', LDA >= max(1,N). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGEQLF. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the m by n matrix C. - On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) DOUBLE PRECISION array, dimension - (N) if SIDE = 'L', - (M) if SIDE = 'R' - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - -/* NQ is the order of Q */ - - if (left) { - nq = *m; - } else { - nq = *n; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "T"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORM2L", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - return 0; - } - - if ((left && notran) || (! left && ! notran)) { - i1 = 1; - i2 = *k; - i3 = 1; - } else { - i1 = *k; - i2 = 1; - i3 = -1; - } - - if (left) { - ni = *n; - } else { - mi = *m; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - if (left) { - -/* H(i) is applied to C(1:m-k+i,1:n) */ - - mi = *m - *k + i__; - } else { - -/* H(i) is applied to C(1:m,1:n-k+i) */ - - ni = *n - *k + i__; - } - -/* Apply H(i) */ - - aii = a[nq - *k + i__ + i__ * a_dim1]; - a[nq - *k + i__ + i__ * a_dim1] = 1.; - dlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[ - c_offset], ldc, &work[1]); - a[nq - *k + i__ + i__ * a_dim1] = aii; -/* L10: */ - } - return 0; - -/* End of DORM2L */ - -} /* dorm2l_ */ - -/* Subroutine */ int dorm2r_(char *side, char *trans, integer *m, integer *n, - integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * - c__, integer *ldc, doublereal *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; - - /* Local variables */ - static integer i__, i1, i2, i3, ic, jc, mi, ni, nq; - static doublereal aii; - static logical left; - extern /* Subroutine */ int dlarf_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical notran; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DORM2R overwrites the general real m by n matrix C with - - Q * C if SIDE = 'L' and TRANS = 'N', or - - Q'* C if SIDE = 'L' and TRANS = 'T', or - - C * Q if SIDE = 'R' and TRANS = 'N', or - - C * Q' if SIDE = 'R' and TRANS = 'T', - - where Q is a real orthogonal matrix defined as the product of k - elementary reflectors - - Q = H(1) H(2) . . . H(k) - - as returned by DGEQRF. Q is of order m if SIDE = 'L' and of order n - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q' from the Left - = 'R': apply Q or Q' from the Right - - TRANS (input) CHARACTER*1 - = 'N': apply Q (No transpose) - = 'T': apply Q' (Transpose) - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA,K) - The i-th column must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - DGEQRF in the first k columns of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. - If SIDE = 'L', LDA >= max(1,M); - if SIDE = 'R', LDA >= max(1,N). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGEQRF. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the m by n matrix C. - On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) DOUBLE PRECISION array, dimension - (N) if SIDE = 'L', - (M) if SIDE = 'R' - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - -/* NQ is the order of Q */ - - if (left) { - nq = *m; - } else { - nq = *n; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "T"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORM2R", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - return 0; - } - - if ((left && ! notran) || (! left && notran)) { - i1 = 1; - i2 = *k; - i3 = 1; - } else { - i1 = *k; - i2 = 1; - i3 = -1; - } - - if (left) { - ni = *n; - jc = 1; - } else { - mi = *m; - ic = 1; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - if (left) { - -/* H(i) is applied to C(i:m,1:n) */ - - mi = *m - i__ + 1; - ic = i__; - } else { - -/* H(i) is applied to C(1:m,i:n) */ - - ni = *n - i__ + 1; - jc = i__; - } - -/* Apply H(i) */ - - aii = a[i__ + i__ * a_dim1]; - a[i__ + i__ * a_dim1] = 1.; - dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[ - ic + jc * c_dim1], ldc, &work[1]); - a[i__ + i__ * a_dim1] = aii; -/* L10: */ - } - return 0; - -/* End of DORM2R */ - -} /* dorm2r_ */ - -/* Subroutine */ int dormbr_(char *vect, char *side, char *trans, integer *m, - integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, - doublereal *c__, integer *ldc, doublereal *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2]; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i1, i2, nb, mi, ni, nq, nw; - static logical left; - extern logical lsame_(char *, char *); - static integer iinfo; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *, integer *); - static logical notran; - extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *, integer *); - static logical applyq; - static char transt[1]; - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - If VECT = 'Q', DORMBR overwrites the general real M-by-N matrix C - with - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'T': Q**T * C C * Q**T - - If VECT = 'P', DORMBR overwrites the general real M-by-N matrix C - with - SIDE = 'L' SIDE = 'R' - TRANS = 'N': P * C C * P - TRANS = 'T': P**T * C C * P**T - - Here Q and P**T are the orthogonal matrices determined by DGEBRD when - reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and - P**T are defined as products of elementary reflectors H(i) and G(i) - respectively. - - Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the - order of the orthogonal matrix Q or P**T that is applied. - - If VECT = 'Q', A is assumed to have been an NQ-by-K matrix: - if nq >= k, Q = H(1) H(2) . . . H(k); - if nq < k, Q = H(1) H(2) . . . H(nq-1). - - If VECT = 'P', A is assumed to have been a K-by-NQ matrix: - if k < nq, P = G(1) G(2) . . . G(k); - if k >= nq, P = G(1) G(2) . . . G(nq-1). - - Arguments - ========= - - VECT (input) CHARACTER*1 - = 'Q': apply Q or Q**T; - = 'P': apply P or P**T. - - SIDE (input) CHARACTER*1 - = 'L': apply Q, Q**T, P or P**T from the Left; - = 'R': apply Q, Q**T, P or P**T from the Right. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q or P; - = 'T': Transpose, apply Q**T or P**T. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - If VECT = 'Q', the number of columns in the original - matrix reduced by DGEBRD. - If VECT = 'P', the number of rows in the original - matrix reduced by DGEBRD. - K >= 0. - - A (input) DOUBLE PRECISION array, dimension - (LDA,min(nq,K)) if VECT = 'Q' - (LDA,nq) if VECT = 'P' - The vectors which define the elementary reflectors H(i) and - G(i), whose products determine the matrices Q and P, as - returned by DGEBRD. - - LDA (input) INTEGER - The leading dimension of the array A. - If VECT = 'Q', LDA >= max(1,nq); - if VECT = 'P', LDA >= max(1,min(nq,K)). - - TAU (input) DOUBLE PRECISION array, dimension (min(nq,K)) - TAU(i) must contain the scalar factor of the elementary - reflector H(i) or G(i) which determines Q or P, as returned - by DGEBRD in the array argument TAUQ or TAUP. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q - or P*C or P**T*C or C*P or C*P**T. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - applyq = lsame_(vect, "Q"); - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - lquery = *lwork == -1; - -/* NQ is the order of Q or P and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! applyq && ! lsame_(vect, "P"))) { - *info = -1; - } else if ((! left && ! lsame_(side, "R"))) { - *info = -2; - } else if ((! notran && ! lsame_(trans, "T"))) { - *info = -3; - } else if (*m < 0) { - *info = -4; - } else if (*n < 0) { - *info = -5; - } else if (*k < 0) { - *info = -6; - } else /* if(complicated condition) */ { -/* Computing MAX */ - i__1 = 1, i__2 = min(nq,*k); - if ((applyq && *lda < max(1,nq)) || (! applyq && *lda < max(i__1,i__2) - )) { - *info = -8; - } else if (*ldc < max(1,*m)) { - *info = -11; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -13; - } - } - - if (*info == 0) { - if (applyq) { - if (left) { -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = *m - 1; - i__2 = *m - 1; - nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__1, n, &i__2, &c_n1, ( - ftnlen)6, (ftnlen)2); - } else { -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = *n - 1; - i__2 = *n - 1; - nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__1, &i__2, &c_n1, ( - ftnlen)6, (ftnlen)2); - } - } else { - if (left) { -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = *m - 1; - i__2 = *m - 1; - nb = ilaenv_(&c__1, "DORMLQ", ch__1, &i__1, n, &i__2, &c_n1, ( - ftnlen)6, (ftnlen)2); - } else { -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = *n - 1; - i__2 = *n - 1; - nb = ilaenv_(&c__1, "DORMLQ", ch__1, m, &i__1, &i__2, &c_n1, ( - ftnlen)6, (ftnlen)2); - } - } - lwkopt = max(1,nw) * nb; - work[1] = (doublereal) lwkopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORMBR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - work[1] = 1.; - if (*m == 0 || *n == 0) { - return 0; - } - - if (applyq) { - -/* Apply Q */ - - if (nq >= *k) { - -/* Q was determined by a call to DGEBRD with nq >= k */ - - dormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], lwork, &iinfo); - } else if (nq > 1) { - -/* Q was determined by a call to DGEBRD with nq < k */ - - if (left) { - mi = *m - 1; - ni = *n; - i1 = 2; - i2 = 1; - } else { - mi = *m; - ni = *n - 1; - i1 = 1; - i2 = 2; - } - i__1 = nq - 1; - dormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1] - , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo); - } - } else { - -/* Apply P */ - - if (notran) { - *(unsigned char *)transt = 'T'; - } else { - *(unsigned char *)transt = 'N'; - } - if (nq > *k) { - -/* P was determined by a call to DGEBRD with nq > k */ - - dormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], lwork, &iinfo); - } else if (nq > 1) { - -/* P was determined by a call to DGEBRD with nq <= k */ - - if (left) { - mi = *m - 1; - ni = *n; - i1 = 2; - i2 = 1; - } else { - mi = *m; - ni = *n - 1; - i1 = 1; - i2 = 2; - } - i__1 = nq - 1; - dormlq_(side, transt, &mi, &ni, &i__1, &a[((a_dim1) << (1)) + 1], - lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1], - lwork, &iinfo); - } - } - work[1] = (doublereal) lwkopt; - return 0; - -/* End of DORMBR */ - -} /* dormbr_ */ - -/* Subroutine */ int dorml2_(char *side, char *trans, integer *m, integer *n, - integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * - c__, integer *ldc, doublereal *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; - - /* Local variables */ - static integer i__, i1, i2, i3, ic, jc, mi, ni, nq; - static doublereal aii; - static logical left; - extern /* Subroutine */ int dlarf_(char *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static logical notran; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DORML2 overwrites the general real m by n matrix C with - - Q * C if SIDE = 'L' and TRANS = 'N', or - - Q'* C if SIDE = 'L' and TRANS = 'T', or - - C * Q if SIDE = 'R' and TRANS = 'N', or - - C * Q' if SIDE = 'R' and TRANS = 'T', - - where Q is a real orthogonal matrix defined as the product of k - elementary reflectors - - Q = H(k) . . . H(2) H(1) - - as returned by DGELQF. Q is of order m if SIDE = 'L' and of order n - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q' from the Left - = 'R': apply Q or Q' from the Right - - TRANS (input) CHARACTER*1 - = 'N': apply Q (No transpose) - = 'T': apply Q' (Transpose) - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) DOUBLE PRECISION array, dimension - (LDA,M) if SIDE = 'L', - (LDA,N) if SIDE = 'R' - The i-th row must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - DGELQF in the first k rows of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,K). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGELQF. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the m by n matrix C. - On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) DOUBLE PRECISION array, dimension - (N) if SIDE = 'L', - (M) if SIDE = 'R' - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - -/* NQ is the order of Q */ - - if (left) { - nq = *m; - } else { - nq = *n; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "T"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,*k)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORML2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - return 0; - } - - if ((left && notran) || (! left && ! notran)) { - i1 = 1; - i2 = *k; - i3 = 1; - } else { - i1 = *k; - i2 = 1; - i3 = -1; - } - - if (left) { - ni = *n; - jc = 1; - } else { - mi = *m; - ic = 1; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - if (left) { - -/* H(i) is applied to C(i:m,1:n) */ - - mi = *m - i__ + 1; - ic = i__; - } else { - -/* H(i) is applied to C(1:m,i:n) */ - - ni = *n - i__ + 1; - jc = i__; - } - -/* Apply H(i) */ - - aii = a[i__ + i__ * a_dim1]; - a[i__ + i__ * a_dim1] = 1.; - dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[ - ic + jc * c_dim1], ldc, &work[1]); - a[i__ + i__ * a_dim1] = aii; -/* L10: */ - } - return 0; - -/* End of DORML2 */ - -} /* dorml2_ */ - -/* Subroutine */ int dormlq_(char *side, char *trans, integer *m, integer *n, - integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * - c__, integer *ldc, doublereal *work, integer *lwork, integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, - i__5; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i__; - static doublereal t[4160] /* was [65][64] */; - static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws; - static logical left; - extern logical lsame_(char *, char *); - static integer nbmin, iinfo; - extern /* Subroutine */ int dorml2_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *), dlarfb_(char - *, char *, char *, char *, integer *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal - *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static logical notran; - static integer ldwork; - static char transt[1]; - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORMLQ overwrites the general real M-by-N matrix C with - - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'T': Q**T * C C * Q**T - - where Q is a real orthogonal matrix defined as the product of k - elementary reflectors - - Q = H(k) . . . H(2) H(1) - - as returned by DGELQF. Q is of order M if SIDE = 'L' and of order N - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q**T from the Left; - = 'R': apply Q or Q**T from the Right. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q; - = 'T': Transpose, apply Q**T. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) DOUBLE PRECISION array, dimension - (LDA,M) if SIDE = 'L', - (LDA,N) if SIDE = 'R' - The i-th row must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - DGELQF in the first k rows of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,K). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGELQF. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - lquery = *lwork == -1; - -/* NQ is the order of Q and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "T"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,*k)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - -/* - Determine the block size. NB may be at most NBMAX, where NBMAX - is used to define the local array T. - - Computing MIN - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 64, i__2 = ilaenv_(&c__1, "DORMLQ", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nb = min(i__1,i__2); - lwkopt = max(1,nw) * nb; - work[1] = (doublereal) lwkopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORMLQ", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - work[1] = 1.; - return 0; - } - - nbmin = 2; - ldwork = nw; - if ((nb > 1 && nb < *k)) { - iws = nw * nb; - if (*lwork < iws) { - nb = *lwork / ldwork; -/* - Computing MAX - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 2, i__2 = ilaenv_(&c__2, "DORMLQ", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nbmin = max(i__1,i__2); - } - } else { - iws = nw; - } - - if (nb < nbmin || nb >= *k) { - -/* Use unblocked code */ - - dorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], &iinfo); - } else { - -/* Use blocked code */ - - if ((left && notran) || (! left && ! notran)) { - i1 = 1; - i2 = *k; - i3 = nb; - } else { - i1 = (*k - 1) / nb * nb + 1; - i2 = 1; - i3 = -nb; - } - - if (left) { - ni = *n; - jc = 1; - } else { - mi = *m; - ic = 1; - } - - if (notran) { - *(unsigned char *)transt = 'T'; - } else { - *(unsigned char *)transt = 'N'; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__4 = nb, i__5 = *k - i__ + 1; - ib = min(i__4,i__5); - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__4 = nq - i__ + 1; - dlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1], - lda, &tau[i__], t, &c__65); - if (left) { - -/* H or H' is applied to C(i:m,1:n) */ - - mi = *m - i__ + 1; - ic = i__; - } else { - -/* H or H' is applied to C(1:m,i:n) */ - - ni = *n - i__ + 1; - jc = i__; - } - -/* Apply H or H' */ - - dlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__ - + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1], - ldc, &work[1], &ldwork); -/* L10: */ - } - } - work[1] = (doublereal) lwkopt; - return 0; - -/* End of DORMLQ */ - -} /* dormlq_ */ - -/* Subroutine */ int dormql_(char *side, char *trans, integer *m, integer *n, - integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * - c__, integer *ldc, doublereal *work, integer *lwork, integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, - i__5; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i__; - static doublereal t[4160] /* was [65][64] */; - static integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws; - static logical left; - extern logical lsame_(char *, char *); - static integer nbmin, iinfo; - extern /* Subroutine */ int dorm2l_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *), dlarfb_(char - *, char *, char *, char *, integer *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal - *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static logical notran; - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORMQL overwrites the general real M-by-N matrix C with - - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'T': Q**T * C C * Q**T - - where Q is a real orthogonal matrix defined as the product of k - elementary reflectors - - Q = H(k) . . . H(2) H(1) - - as returned by DGEQLF. Q is of order M if SIDE = 'L' and of order N - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q**T from the Left; - = 'R': apply Q or Q**T from the Right. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q; - = 'T': Transpose, apply Q**T. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA,K) - The i-th column must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - DGEQLF in the last k columns of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. - If SIDE = 'L', LDA >= max(1,M); - if SIDE = 'R', LDA >= max(1,N). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGEQLF. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - lquery = *lwork == -1; - -/* NQ is the order of Q and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "T"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - -/* - Determine the block size. NB may be at most NBMAX, where NBMAX - is used to define the local array T. - - Computing MIN - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQL", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nb = min(i__1,i__2); - lwkopt = max(1,nw) * nb; - work[1] = (doublereal) lwkopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORMQL", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - work[1] = 1.; - return 0; - } - - nbmin = 2; - ldwork = nw; - if ((nb > 1 && nb < *k)) { - iws = nw * nb; - if (*lwork < iws) { - nb = *lwork / ldwork; -/* - Computing MAX - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQL", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nbmin = max(i__1,i__2); - } - } else { - iws = nw; - } - - if (nb < nbmin || nb >= *k) { - -/* Use unblocked code */ - - dorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], &iinfo); - } else { - -/* Use blocked code */ - - if ((left && notran) || (! left && ! notran)) { - i1 = 1; - i2 = *k; - i3 = nb; - } else { - i1 = (*k - 1) / nb * nb + 1; - i2 = 1; - i3 = -nb; - } - - if (left) { - ni = *n; - } else { - mi = *m; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__4 = nb, i__5 = *k - i__ + 1; - ib = min(i__4,i__5); - -/* - Form the triangular factor of the block reflector - H = H(i+ib-1) . . . H(i+1) H(i) -*/ - - i__4 = nq - *k + i__ + ib - 1; - dlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1] - , lda, &tau[i__], t, &c__65); - if (left) { - -/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */ - - mi = *m - *k + i__ + ib - 1; - } else { - -/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */ - - ni = *n - *k + i__ + ib - 1; - } - -/* Apply H or H' */ - - dlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[ - i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, & - work[1], &ldwork); -/* L10: */ - } - } - work[1] = (doublereal) lwkopt; - return 0; - -/* End of DORMQL */ - -} /* dormql_ */ - -/* Subroutine */ int dormqr_(char *side, char *trans, integer *m, integer *n, - integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * - c__, integer *ldc, doublereal *work, integer *lwork, integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, - i__5; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i__; - static doublereal t[4160] /* was [65][64] */; - static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws; - static logical left; - extern logical lsame_(char *, char *); - static integer nbmin, iinfo; - extern /* Subroutine */ int dorm2r_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *), dlarfb_(char - *, char *, char *, char *, integer *, integer *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal - *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static logical notran; - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORMQR overwrites the general real M-by-N matrix C with - - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'T': Q**T * C C * Q**T - - where Q is a real orthogonal matrix defined as the product of k - elementary reflectors - - Q = H(1) H(2) . . . H(k) - - as returned by DGEQRF. Q is of order M if SIDE = 'L' and of order N - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q**T from the Left; - = 'R': apply Q or Q**T from the Right. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q; - = 'T': Transpose, apply Q**T. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA,K) - The i-th column must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - DGEQRF in the first k columns of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. - If SIDE = 'L', LDA >= max(1,M); - if SIDE = 'R', LDA >= max(1,N). - - TAU (input) DOUBLE PRECISION array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DGEQRF. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - lquery = *lwork == -1; - -/* NQ is the order of Q and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "T"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - -/* - Determine the block size. NB may be at most NBMAX, where NBMAX - is used to define the local array T. - - Computing MIN - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQR", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nb = min(i__1,i__2); - lwkopt = max(1,nw) * nb; - work[1] = (doublereal) lwkopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DORMQR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - work[1] = 1.; - return 0; - } - - nbmin = 2; - ldwork = nw; - if ((nb > 1 && nb < *k)) { - iws = nw * nb; - if (*lwork < iws) { - nb = *lwork / ldwork; -/* - Computing MAX - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQR", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nbmin = max(i__1,i__2); - } - } else { - iws = nw; - } - - if (nb < nbmin || nb >= *k) { - -/* Use unblocked code */ - - dorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], &iinfo); - } else { - -/* Use blocked code */ - - if ((left && ! notran) || (! left && notran)) { - i1 = 1; - i2 = *k; - i3 = nb; - } else { - i1 = (*k - 1) / nb * nb + 1; - i2 = 1; - i3 = -nb; - } - - if (left) { - ni = *n; - jc = 1; - } else { - mi = *m; - ic = 1; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__4 = nb, i__5 = *k - i__ + 1; - ib = min(i__4,i__5); - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__4 = nq - i__ + 1; - dlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], t, &c__65) - ; - if (left) { - -/* H or H' is applied to C(i:m,1:n) */ - - mi = *m - i__ + 1; - ic = i__; - } else { - -/* H or H' is applied to C(1:m,i:n) */ - - ni = *n - i__ + 1; - jc = i__; - } - -/* Apply H or H' */ - - dlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[ - i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * - c_dim1], ldc, &work[1], &ldwork); -/* L10: */ - } - } - work[1] = (doublereal) lwkopt; - return 0; - -/* End of DORMQR */ - -} /* dormqr_ */ - -/* Subroutine */ int dormtr_(char *side, char *uplo, char *trans, integer *m, - integer *n, doublereal *a, integer *lda, doublereal *tau, doublereal * - c__, integer *ldc, doublereal *work, integer *lwork, integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i1, i2, nb, mi, ni, nq, nw; - static logical left; - extern logical lsame_(char *, char *); - static integer iinfo; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int dormql_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *, integer *), - dormqr_(char *, char *, integer *, integer *, integer *, - doublereal *, integer *, doublereal *, doublereal *, integer *, - doublereal *, integer *, integer *); - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DORMTR overwrites the general real M-by-N matrix C with - - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'T': Q**T * C C * Q**T - - where Q is a real orthogonal matrix of order nq, with nq = m if - SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of - nq-1 elementary reflectors, as returned by DSYTRD: - - if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); - - if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q**T from the Left; - = 'R': apply Q or Q**T from the Right. - - UPLO (input) CHARACTER*1 - = 'U': Upper triangle of A contains elementary reflectors - from DSYTRD; - = 'L': Lower triangle of A contains elementary reflectors - from DSYTRD. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q; - = 'T': Transpose, apply Q**T. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - A (input) DOUBLE PRECISION array, dimension - (LDA,M) if SIDE = 'L' - (LDA,N) if SIDE = 'R' - The vectors which define the elementary reflectors, as - returned by DSYTRD. - - LDA (input) INTEGER - The leading dimension of the array A. - LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. - - TAU (input) DOUBLE PRECISION array, dimension - (M-1) if SIDE = 'L' - (N-1) if SIDE = 'R' - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by DSYTRD. - - C (input/output) DOUBLE PRECISION array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - upper = lsame_(uplo, "U"); - lquery = *lwork == -1; - -/* NQ is the order of Q and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! upper && ! lsame_(uplo, "L"))) { - *info = -2; - } else if ((! lsame_(trans, "N") && ! lsame_(trans, - "T"))) { - *info = -3; - } else if (*m < 0) { - *info = -4; - } else if (*n < 0) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - if (upper) { - if (left) { -/* Writing concatenation */ - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); - i__2 = *m - 1; - i__3 = *m - 1; - nb = ilaenv_(&c__1, "DORMQL", ch__1, &i__2, n, &i__3, &c_n1, ( - ftnlen)6, (ftnlen)2); - } else { -/* Writing concatenation */ - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); - i__2 = *n - 1; - i__3 = *n - 1; - nb = ilaenv_(&c__1, "DORMQL", ch__1, m, &i__2, &i__3, &c_n1, ( - ftnlen)6, (ftnlen)2); - } - } else { - if (left) { -/* Writing concatenation */ - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); - i__2 = *m - 1; - i__3 = *m - 1; - nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__2, n, &i__3, &c_n1, ( - ftnlen)6, (ftnlen)2); - } else { -/* Writing concatenation */ - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); - i__2 = *n - 1; - i__3 = *n - 1; - nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__2, &i__3, &c_n1, ( - ftnlen)6, (ftnlen)2); - } - } - lwkopt = max(1,nw) * nb; - work[1] = (doublereal) lwkopt; - } - - if (*info != 0) { - i__2 = -(*info); - xerbla_("DORMTR", &i__2); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || nq == 1) { - work[1] = 1.; - return 0; - } - - if (left) { - mi = *m - 1; - ni = *n; - } else { - mi = *m; - ni = *n - 1; - } - - if (upper) { - -/* Q was determined by a call to DSYTRD with UPLO = 'U' */ - - i__2 = nq - 1; - dormql_(side, trans, &mi, &ni, &i__2, &a[((a_dim1) << (1)) + 1], lda, - &tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo); - } else { - -/* Q was determined by a call to DSYTRD with UPLO = 'L' */ - - if (left) { - i1 = 2; - i2 = 1; - } else { - i1 = 1; - i2 = 2; - } - i__2 = nq - 1; - dormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], & - c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo); - } - work[1] = (doublereal) lwkopt; - return 0; - -/* End of DORMTR */ - -} /* dormtr_ */ - -/* Subroutine */ int dpotf2_(char *uplo, integer *n, doublereal *a, integer * - lda, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer j; - static doublereal ajj; - extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, - integer *); - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *); - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - DPOTF2 computes the Cholesky factorization of a real symmetric - positive definite matrix A. - - The factorization has the form - A = U' * U , if UPLO = 'U', or - A = L * L', if UPLO = 'L', - where U is an upper triangular matrix and L is lower triangular. - - This is the unblocked version of the algorithm, calling Level 2 BLAS. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies whether the upper or lower triangular part of the - symmetric matrix A is stored. - = 'U': Upper triangular - = 'L': Lower triangular - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the symmetric matrix A. If UPLO = 'U', the leading - n by n upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading n by n lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - - On exit, if INFO = 0, the factor U or L from the Cholesky - factorization A = U'*U or A = L*L'. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not - positive definite, and the factorization could not be - completed. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DPOTF2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - if (upper) { - -/* Compute the Cholesky factorization A = U'*U. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - -/* Compute U(J,J) and test for non-positive-definiteness. */ - - i__2 = j - 1; - ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j * a_dim1 + 1], &c__1, - &a[j * a_dim1 + 1], &c__1); - if (ajj <= 0.) { - a[j + j * a_dim1] = ajj; - goto L30; - } - ajj = sqrt(ajj); - a[j + j * a_dim1] = ajj; - -/* Compute elements J+1:N of row J. */ - - if (j < *n) { - i__2 = j - 1; - i__3 = *n - j; - dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(j + 1) * - a_dim1 + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b15, & - a[j + (j + 1) * a_dim1], lda); - i__2 = *n - j; - d__1 = 1. / ajj; - dscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda); - } -/* L10: */ - } - } else { - -/* Compute the Cholesky factorization A = L*L'. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - -/* Compute L(J,J) and test for non-positive-definiteness. */ - - i__2 = j - 1; - ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j + a_dim1], lda, &a[j - + a_dim1], lda); - if (ajj <= 0.) { - a[j + j * a_dim1] = ajj; - goto L30; - } - ajj = sqrt(ajj); - a[j + j * a_dim1] = ajj; - -/* Compute elements J+1:N of column J. */ - - if (j < *n) { - i__2 = *n - j; - i__3 = j - 1; - dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[j + 1 + - a_dim1], lda, &a[j + a_dim1], lda, &c_b15, &a[j + 1 + - j * a_dim1], &c__1); - i__2 = *n - j; - d__1 = 1. / ajj; - dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1); - } -/* L20: */ - } - } - goto L40; - -L30: - *info = j; - -L40: - return 0; - -/* End of DPOTF2 */ - -} /* dpotf2_ */ - -/* Subroutine */ int dpotrf_(char *uplo, integer *n, doublereal *a, integer * - lda, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer j, jb, nb; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, - integer *, integer *, doublereal *, doublereal *, integer *, - doublereal *, integer *); - static logical upper; - extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, doublereal *, - integer *), dpotf2_(char *, integer *, - doublereal *, integer *, integer *), xerbla_(char *, - integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - March 31, 1993 - - - Purpose - ======= - - DPOTRF computes the Cholesky factorization of a real symmetric - positive definite matrix A. - - The factorization has the form - A = U**T * U, if UPLO = 'U', or - A = L * L**T, if UPLO = 'L', - where U is an upper triangular matrix and L is lower triangular. - - This is the block version of the algorithm, calling Level 3 BLAS. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - = 'U': Upper triangle of A is stored; - = 'L': Lower triangle of A is stored. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the symmetric matrix A. If UPLO = 'U', the leading - N-by-N upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading N-by-N lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - - On exit, if INFO = 0, the factor U or L from the Cholesky - factorization A = U**T*U or A = L*L**T. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, the leading minor of order i is not - positive definite, and the factorization could not be - completed. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DPOTRF", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* Determine the block size for this environment. */ - - nb = ilaenv_(&c__1, "DPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, ( - ftnlen)1); - if (nb <= 1 || nb >= *n) { - -/* Use unblocked code. */ - - dpotf2_(uplo, n, &a[a_offset], lda, info); - } else { - -/* Use blocked code. */ - - if (upper) { - -/* Compute the Cholesky factorization A = U'*U. */ - - i__1 = *n; - i__2 = nb; - for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { - -/* - Update and factorize the current diagonal block and test - for non-positive-definiteness. - - Computing MIN -*/ - i__3 = nb, i__4 = *n - j + 1; - jb = min(i__3,i__4); - i__3 = j - 1; - dsyrk_("Upper", "Transpose", &jb, &i__3, &c_b151, &a[j * - a_dim1 + 1], lda, &c_b15, &a[j + j * a_dim1], lda); - dpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info); - if (*info != 0) { - goto L30; - } - if (j + jb <= *n) { - -/* Compute the current block row. */ - - i__3 = *n - j - jb + 1; - i__4 = j - 1; - dgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, & - c_b151, &a[j * a_dim1 + 1], lda, &a[(j + jb) * - a_dim1 + 1], lda, &c_b15, &a[j + (j + jb) * - a_dim1], lda); - i__3 = *n - j - jb + 1; - dtrsm_("Left", "Upper", "Transpose", "Non-unit", &jb, & - i__3, &c_b15, &a[j + j * a_dim1], lda, &a[j + (j - + jb) * a_dim1], lda); - } -/* L10: */ - } - - } else { - -/* Compute the Cholesky factorization A = L*L'. */ - - i__2 = *n; - i__1 = nb; - for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) { - -/* - Update and factorize the current diagonal block and test - for non-positive-definiteness. - - Computing MIN -*/ - i__3 = nb, i__4 = *n - j + 1; - jb = min(i__3,i__4); - i__3 = j - 1; - dsyrk_("Lower", "No transpose", &jb, &i__3, &c_b151, &a[j + - a_dim1], lda, &c_b15, &a[j + j * a_dim1], lda); - dpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info); - if (*info != 0) { - goto L30; - } - if (j + jb <= *n) { - -/* Compute the current block column. */ - - i__3 = *n - j - jb + 1; - i__4 = j - 1; - dgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, & - c_b151, &a[j + jb + a_dim1], lda, &a[j + a_dim1], - lda, &c_b15, &a[j + jb + j * a_dim1], lda); - i__3 = *n - j - jb + 1; - dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, & - jb, &c_b15, &a[j + j * a_dim1], lda, &a[j + jb + - j * a_dim1], lda); - } -/* L20: */ - } - } - } - goto L40; - -L30: - *info = *info + j - 1; - -L40: - return 0; - -/* End of DPOTRF */ - -} /* dpotrf_ */ - -/* Subroutine */ int dstedc_(char *compz, integer *n, doublereal *d__, - doublereal *e, doublereal *z__, integer *ldz, doublereal *work, - integer *lwork, integer *iwork, integer *liwork, integer *info) -{ - /* System generated locals */ - integer z_dim1, z_offset, i__1, i__2; - doublereal d__1, d__2; - - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - - /* Local variables */ - static integer i__, j, k, m; - static doublereal p; - static integer ii, end, lgn; - static doublereal eps, tiny; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer lwmin; - extern /* Subroutine */ int dlaed0_(integer *, integer *, integer *, - doublereal *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, integer *, integer *); - static integer start; - - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dlacpy_(char *, integer *, integer - *, doublereal *, integer *, doublereal *, integer *), - dlaset_(char *, integer *, integer *, doublereal *, doublereal *, - doublereal *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int xerbla_(char *, integer *); - extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); - extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *, - integer *), dlasrt_(char *, integer *, doublereal *, integer *); - static integer liwmin, icompz; - extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, integer *); - static doublereal orgnrm; - static logical lquery; - static integer smlsiz, dtrtrw, storez; - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DSTEDC computes all eigenvalues and, optionally, eigenvectors of a - symmetric tridiagonal matrix using the divide and conquer method. - The eigenvectors of a full or band real symmetric matrix can also be - found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this - matrix to tridiagonal form. - - This code makes very mild assumptions about floating point - arithmetic. It will work on machines with a guard digit in - add/subtract, or on those binary machines without guard digits - which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. - It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. See DLAED3 for details. - - Arguments - ========= - - COMPZ (input) CHARACTER*1 - = 'N': Compute eigenvalues only. - = 'I': Compute eigenvectors of tridiagonal matrix also. - = 'V': Compute eigenvectors of original dense symmetric - matrix also. On entry, Z contains the orthogonal - matrix used to reduce the original matrix to - tridiagonal form. - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the diagonal elements of the tridiagonal matrix. - On exit, if INFO = 0, the eigenvalues in ascending order. - - E (input/output) DOUBLE PRECISION array, dimension (N-1) - On entry, the subdiagonal elements of the tridiagonal matrix. - On exit, E has been destroyed. - - Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) - On entry, if COMPZ = 'V', then Z contains the orthogonal - matrix used in the reduction to tridiagonal form. - On exit, if INFO = 0, then if COMPZ = 'V', Z contains the - orthonormal eigenvectors of the original symmetric matrix, - and if COMPZ = 'I', Z contains the orthonormal eigenvectors - of the symmetric tridiagonal matrix. - If COMPZ = 'N', then Z is not referenced. - - LDZ (input) INTEGER - The leading dimension of the array Z. LDZ >= 1. - If eigenvectors are desired, then LDZ >= max(1,N). - - WORK (workspace/output) DOUBLE PRECISION array, - dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If COMPZ = 'N' or N <= 1 then LWORK must be at least 1. - If COMPZ = 'V' and N > 1 then LWORK must be at least - ( 1 + 3*N + 2*N*lg N + 3*N**2 ), - where lg( N ) = smallest integer k such - that 2**k >= N. - If COMPZ = 'I' and N > 1 then LWORK must be at least - ( 1 + 4*N + N**2 ). - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - IWORK (workspace/output) INTEGER array, dimension (LIWORK) - On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. - - LIWORK (input) INTEGER - The dimension of the array IWORK. - If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1. - If COMPZ = 'V' and N > 1 then LIWORK must be at least - ( 6 + 6*N + 5*N*lg N ). - If COMPZ = 'I' and N > 1 then LIWORK must be at least - ( 3 + 5*N ). - - If LIWORK = -1, then a workspace query is assumed; the - routine only calculates the optimal size of the IWORK array, - returns this value as the first entry of the IWORK array, and - no error message related to LIWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: The algorithm failed to compute an eigenvalue while - working on the submatrix lying in rows and columns - INFO/(N+1) through mod(INFO,N+1). - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - Modified by Francoise Tisseur, University of Tennessee. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - z_dim1 = *ldz; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - --work; - --iwork; - - /* Function Body */ - *info = 0; - lquery = *lwork == -1 || *liwork == -1; - - if (lsame_(compz, "N")) { - icompz = 0; - } else if (lsame_(compz, "V")) { - icompz = 1; - } else if (lsame_(compz, "I")) { - icompz = 2; - } else { - icompz = -1; - } - if (*n <= 1 || icompz <= 0) { - liwmin = 1; - lwmin = 1; - } else { - lgn = (integer) (log((doublereal) (*n)) / log(2.)); - if (pow_ii(&c__2, &lgn) < *n) { - ++lgn; - } - if (pow_ii(&c__2, &lgn) < *n) { - ++lgn; - } - if (icompz == 1) { -/* Computing 2nd power */ - i__1 = *n; - lwmin = *n * 3 + 1 + ((*n) << (1)) * lgn + i__1 * i__1 * 3; - liwmin = *n * 6 + 6 + *n * 5 * lgn; - } else if (icompz == 2) { -/* Computing 2nd power */ - i__1 = *n; - lwmin = ((*n) << (2)) + 1 + i__1 * i__1; - liwmin = *n * 5 + 3; - } - } - if (icompz < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) { - *info = -6; - } else if ((*lwork < lwmin && ! lquery)) { - *info = -8; - } else if ((*liwork < liwmin && ! lquery)) { - *info = -10; - } - - if (*info == 0) { - work[1] = (doublereal) lwmin; - iwork[1] = liwmin; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DSTEDC", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - if (*n == 1) { - if (icompz != 0) { - z__[z_dim1 + 1] = 1.; - } - return 0; - } - - smlsiz = ilaenv_(&c__9, "DSTEDC", " ", &c__0, &c__0, &c__0, &c__0, ( - ftnlen)6, (ftnlen)1); - -/* - If the following conditional clause is removed, then the routine - will use the Divide and Conquer routine to compute only the - eigenvalues, which requires (3N + 3N**2) real workspace and - (2 + 5N + 2N lg(N)) integer workspace. - Since on many architectures DSTERF is much faster than any other - algorithm for finding eigenvalues only, it is used here - as the default. - - If COMPZ = 'N', use DSTERF to compute the eigenvalues. -*/ - - if (icompz == 0) { - dsterf_(n, &d__[1], &e[1], info); - return 0; - } - -/* - If N is smaller than the minimum divide size (SMLSIZ+1), then - solve the problem with another solver. -*/ - - if (*n <= smlsiz) { - if (icompz == 0) { - dsterf_(n, &d__[1], &e[1], info); - return 0; - } else if (icompz == 2) { - dsteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], - info); - return 0; - } else { - dsteqr_("V", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], - info); - return 0; - } - } - -/* - If COMPZ = 'V', the Z matrix must be stored elsewhere for later - use. -*/ - - if (icompz == 1) { - storez = *n * *n + 1; - } else { - storez = 1; - } - - if (icompz == 2) { - dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz); - } - -/* Scale. */ - - orgnrm = dlanst_("M", n, &d__[1], &e[1]); - if (orgnrm == 0.) { - return 0; - } - - eps = EPSILON; - - start = 1; - -/* while ( START <= N ) */ - -L10: - if (start <= *n) { - -/* - Let END be the position of the next subdiagonal entry such that - E( END ) <= TINY or END = N if no such subdiagonal exists. The - matrix identified by the elements between START and END - constitutes an independent sub-problem. -*/ - - end = start; -L20: - if (end < *n) { - tiny = eps * sqrt((d__1 = d__[end], abs(d__1))) * sqrt((d__2 = - d__[end + 1], abs(d__2))); - if ((d__1 = e[end], abs(d__1)) > tiny) { - ++end; - goto L20; - } - } - -/* (Sub) Problem determined. Compute its size and solve it. */ - - m = end - start + 1; - if (m == 1) { - start = end + 1; - goto L10; - } - if (m > smlsiz) { - *info = smlsiz; - -/* Scale. */ - - orgnrm = dlanst_("M", &m, &d__[start], &e[start]); - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &m, &c__1, &d__[start] - , &m, info); - i__1 = m - 1; - i__2 = m - 1; - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &i__1, &c__1, &e[ - start], &i__2, info); - - if (icompz == 1) { - dtrtrw = 1; - } else { - dtrtrw = start; - } - dlaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[dtrtrw + - start * z_dim1], ldz, &work[1], n, &work[storez], &iwork[ - 1], info); - if (*info != 0) { - *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m - + 1) + start - 1; - return 0; - } - -/* Scale back. */ - - dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &m, &c__1, &d__[start] - , &m, info); - - } else { - if (icompz == 1) { - -/* - Since QR won't update a Z matrix which is larger than the - length of D, we must solve the sub-problem in a workspace and - then multiply back into Z. -*/ - - dsteqr_("I", &m, &d__[start], &e[start], &work[1], &m, &work[ - m * m + 1], info); - dlacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[ - storez], n); - dgemm_("N", "N", n, &m, &m, &c_b15, &work[storez], ldz, &work[ - 1], &m, &c_b29, &z__[start * z_dim1 + 1], ldz); - } else if (icompz == 2) { - dsteqr_("I", &m, &d__[start], &e[start], &z__[start + start * - z_dim1], ldz, &work[1], info); - } else { - dsterf_(&m, &d__[start], &e[start], info); - } - if (*info != 0) { - *info = start * (*n + 1) + end; - return 0; - } - } - - start = end + 1; - goto L10; - } - -/* - endwhile - - If the problem split any number of times, then the eigenvalues - will not be properly ordered. Here we permute the eigenvalues - (and the associated eigenvectors) into ascending order. -*/ - - if (m != *n) { - if (icompz == 0) { - -/* Use Quick Sort */ - - dlasrt_("I", n, &d__[1], info); - - } else { - -/* Use Selection Sort to minimize swaps of eigenvectors */ - - i__1 = *n; - for (ii = 2; ii <= i__1; ++ii) { - i__ = ii - 1; - k = i__; - p = d__[i__]; - i__2 = *n; - for (j = ii; j <= i__2; ++j) { - if (d__[j] < p) { - k = j; - p = d__[j]; - } -/* L30: */ - } - if (k != i__) { - d__[k] = d__[i__]; - d__[i__] = p; - dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 - + 1], &c__1); - } -/* L40: */ - } - } - } - - work[1] = (doublereal) lwmin; - iwork[1] = liwmin; - - return 0; - -/* End of DSTEDC */ - -} /* dstedc_ */ - -/* Subroutine */ int dsteqr_(char *compz, integer *n, doublereal *d__, - doublereal *e, doublereal *z__, integer *ldz, doublereal *work, - integer *info) -{ - /* System generated locals */ - integer z_dim1, z_offset, i__1, i__2; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static doublereal b, c__, f, g; - static integer i__, j, k, l, m; - static doublereal p, r__, s; - static integer l1, ii, mm, lm1, mm1, nm1; - static doublereal rt1, rt2, eps; - static integer lsv; - static doublereal tst, eps2; - static integer lend, jtot; - extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal - *, doublereal *, doublereal *); - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *); - static doublereal anorm; - extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, - doublereal *, integer *), dlaev2_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *); - static integer lendm1, lendp1; - - static integer iscale; - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dlaset_(char *, integer *, integer - *, doublereal *, doublereal *, doublereal *, integer *); - static doublereal safmin; - extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *); - static doublereal safmax; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); - extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, - integer *); - static integer lendsv; - static doublereal ssfmin; - static integer nmaxit, icompz; - static doublereal ssfmax; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - DSTEQR computes all eigenvalues and, optionally, eigenvectors of a - symmetric tridiagonal matrix using the implicit QL or QR method. - The eigenvectors of a full or band symmetric matrix can also be found - if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to - tridiagonal form. - - Arguments - ========= - - COMPZ (input) CHARACTER*1 - = 'N': Compute eigenvalues only. - = 'V': Compute eigenvalues and eigenvectors of the original - symmetric matrix. On entry, Z must contain the - orthogonal matrix used to reduce the original matrix - to tridiagonal form. - = 'I': Compute eigenvalues and eigenvectors of the - tridiagonal matrix. Z is initialized to the identity - matrix. - - N (input) INTEGER - The order of the matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the diagonal elements of the tridiagonal matrix. - On exit, if INFO = 0, the eigenvalues in ascending order. - - E (input/output) DOUBLE PRECISION array, dimension (N-1) - On entry, the (n-1) subdiagonal elements of the tridiagonal - matrix. - On exit, E has been destroyed. - - Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) - On entry, if COMPZ = 'V', then Z contains the orthogonal - matrix used in the reduction to tridiagonal form. - On exit, if INFO = 0, then if COMPZ = 'V', Z contains the - orthonormal eigenvectors of the original symmetric matrix, - and if COMPZ = 'I', Z contains the orthonormal eigenvectors - of the symmetric tridiagonal matrix. - If COMPZ = 'N', then Z is not referenced. - - LDZ (input) INTEGER - The leading dimension of the array Z. LDZ >= 1, and if - eigenvectors are desired, then LDZ >= max(1,N). - - WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2)) - If COMPZ = 'N', then WORK is not referenced. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: the algorithm has failed to find all the eigenvalues in - a total of 30*N iterations; if INFO = i, then i - elements of E have not converged to zero; on exit, D - and E contain the elements of a symmetric tridiagonal - matrix which is orthogonally similar to the original - matrix. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - z_dim1 = *ldz; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - --work; - - /* Function Body */ - *info = 0; - - if (lsame_(compz, "N")) { - icompz = 0; - } else if (lsame_(compz, "V")) { - icompz = 1; - } else if (lsame_(compz, "I")) { - icompz = 2; - } else { - icompz = -1; - } - if (icompz < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) { - *info = -6; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DSTEQR", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - if (*n == 1) { - if (icompz == 2) { - z__[z_dim1 + 1] = 1.; - } - return 0; - } - -/* Determine the unit roundoff and over/underflow thresholds. */ - - eps = EPSILON; -/* Computing 2nd power */ - d__1 = eps; - eps2 = d__1 * d__1; - safmin = SAFEMINIMUM; - safmax = 1. / safmin; - ssfmax = sqrt(safmax) / 3.; - ssfmin = sqrt(safmin) / eps2; - -/* - Compute the eigenvalues and eigenvectors of the tridiagonal - matrix. -*/ - - if (icompz == 2) { - dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz); - } - - nmaxit = *n * 30; - jtot = 0; - -/* - Determine where the matrix splits and choose QL or QR iteration - for each block, according to whether top or bottom diagonal - element is smaller. -*/ - - l1 = 1; - nm1 = *n - 1; - -L10: - if (l1 > *n) { - goto L160; - } - if (l1 > 1) { - e[l1 - 1] = 0.; - } - if (l1 <= nm1) { - i__1 = nm1; - for (m = l1; m <= i__1; ++m) { - tst = (d__1 = e[m], abs(d__1)); - if (tst == 0.) { - goto L30; - } - if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m - + 1], abs(d__2))) * eps) { - e[m] = 0.; - goto L30; - } -/* L20: */ - } - } - m = *n; - -L30: - l = l1; - lsv = l; - lend = m; - lendsv = lend; - l1 = m + 1; - if (lend == l) { - goto L10; - } - -/* Scale submatrix in rows and columns L to LEND */ - - i__1 = lend - l + 1; - anorm = dlanst_("I", &i__1, &d__[l], &e[l]); - iscale = 0; - if (anorm == 0.) { - goto L10; - } - if (anorm > ssfmax) { - iscale = 1; - i__1 = lend - l + 1; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, - info); - i__1 = lend - l; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, - info); - } else if (anorm < ssfmin) { - iscale = 2; - i__1 = lend - l + 1; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, - info); - i__1 = lend - l; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, - info); - } - -/* Choose between QL and QR iteration */ - - if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) { - lend = lsv; - l = lendsv; - } - - if (lend > l) { - -/* - QL Iteration - - Look for small subdiagonal element. -*/ - -L40: - if (l != lend) { - lendm1 = lend - 1; - i__1 = lendm1; - for (m = l; m <= i__1; ++m) { -/* Computing 2nd power */ - d__2 = (d__1 = e[m], abs(d__1)); - tst = d__2 * d__2; - if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m - + 1], abs(d__2)) + safmin) { - goto L60; - } -/* L50: */ - } - } - - m = lend; - -L60: - if (m < lend) { - e[m] = 0.; - } - p = d__[l]; - if (m == l) { - goto L80; - } - -/* - If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 - to compute its eigensystem. -*/ - - if (m == l + 1) { - if (icompz > 0) { - dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s); - work[l] = c__; - work[*n - 1 + l] = s; - dlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], & - z__[l * z_dim1 + 1], ldz); - } else { - dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2); - } - d__[l] = rt1; - d__[l + 1] = rt2; - e[l] = 0.; - l += 2; - if (l <= lend) { - goto L40; - } - goto L140; - } - - if (jtot == nmaxit) { - goto L140; - } - ++jtot; - -/* Form shift. */ - - g = (d__[l + 1] - p) / (e[l] * 2.); - r__ = dlapy2_(&g, &c_b15); - g = d__[m] - p + e[l] / (g + d_sign(&r__, &g)); - - s = 1.; - c__ = 1.; - p = 0.; - -/* Inner loop */ - - mm1 = m - 1; - i__1 = l; - for (i__ = mm1; i__ >= i__1; --i__) { - f = s * e[i__]; - b = c__ * e[i__]; - dlartg_(&g, &f, &c__, &s, &r__); - if (i__ != m - 1) { - e[i__ + 1] = r__; - } - g = d__[i__ + 1] - p; - r__ = (d__[i__] - g) * s + c__ * 2. * b; - p = s * r__; - d__[i__ + 1] = g + p; - g = c__ * r__ - b; - -/* If eigenvectors are desired, then save rotations. */ - - if (icompz > 0) { - work[i__] = c__; - work[*n - 1 + i__] = -s; - } - -/* L70: */ - } - -/* If eigenvectors are desired, then apply saved rotations. */ - - if (icompz > 0) { - mm = m - l + 1; - dlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l - * z_dim1 + 1], ldz); - } - - d__[l] -= p; - e[l] = g; - goto L40; - -/* Eigenvalue found. */ - -L80: - d__[l] = p; - - ++l; - if (l <= lend) { - goto L40; - } - goto L140; - - } else { - -/* - QR Iteration - - Look for small superdiagonal element. -*/ - -L90: - if (l != lend) { - lendp1 = lend + 1; - i__1 = lendp1; - for (m = l; m >= i__1; --m) { -/* Computing 2nd power */ - d__2 = (d__1 = e[m - 1], abs(d__1)); - tst = d__2 * d__2; - if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m - - 1], abs(d__2)) + safmin) { - goto L110; - } -/* L100: */ - } - } - - m = lend; - -L110: - if (m > lend) { - e[m - 1] = 0.; - } - p = d__[l]; - if (m == l) { - goto L130; - } - -/* - If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 - to compute its eigensystem. -*/ - - if (m == l - 1) { - if (icompz > 0) { - dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s) - ; - work[m] = c__; - work[*n - 1 + m] = s; - dlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], & - z__[(l - 1) * z_dim1 + 1], ldz); - } else { - dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2); - } - d__[l - 1] = rt1; - d__[l] = rt2; - e[l - 1] = 0.; - l += -2; - if (l >= lend) { - goto L90; - } - goto L140; - } - - if (jtot == nmaxit) { - goto L140; - } - ++jtot; - -/* Form shift. */ - - g = (d__[l - 1] - p) / (e[l - 1] * 2.); - r__ = dlapy2_(&g, &c_b15); - g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g)); - - s = 1.; - c__ = 1.; - p = 0.; - -/* Inner loop */ - - lm1 = l - 1; - i__1 = lm1; - for (i__ = m; i__ <= i__1; ++i__) { - f = s * e[i__]; - b = c__ * e[i__]; - dlartg_(&g, &f, &c__, &s, &r__); - if (i__ != m) { - e[i__ - 1] = r__; - } - g = d__[i__] - p; - r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b; - p = s * r__; - d__[i__] = g + p; - g = c__ * r__ - b; - -/* If eigenvectors are desired, then save rotations. */ - - if (icompz > 0) { - work[i__] = c__; - work[*n - 1 + i__] = s; - } - -/* L120: */ - } - -/* If eigenvectors are desired, then apply saved rotations. */ - - if (icompz > 0) { - mm = l - m + 1; - dlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m - * z_dim1 + 1], ldz); - } - - d__[l] -= p; - e[lm1] = g; - goto L90; - -/* Eigenvalue found. */ - -L130: - d__[l] = p; - - --l; - if (l >= lend) { - goto L90; - } - goto L140; - - } - -/* Undo scaling if necessary */ - -L140: - if (iscale == 1) { - i__1 = lendsv - lsv + 1; - dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], - n, info); - i__1 = lendsv - lsv; - dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n, - info); - } else if (iscale == 2) { - i__1 = lendsv - lsv + 1; - dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], - n, info); - i__1 = lendsv - lsv; - dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n, - info); - } - -/* - Check for no convergence to an eigenvalue after a total - of N*MAXIT iterations. -*/ - - if (jtot < nmaxit) { - goto L10; - } - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - if (e[i__] != 0.) { - ++(*info); - } -/* L150: */ - } - goto L190; - -/* Order eigenvalues and eigenvectors. */ - -L160: - if (icompz == 0) { - -/* Use Quick Sort */ - - dlasrt_("I", n, &d__[1], info); - - } else { - -/* Use Selection Sort to minimize swaps of eigenvectors */ - - i__1 = *n; - for (ii = 2; ii <= i__1; ++ii) { - i__ = ii - 1; - k = i__; - p = d__[i__]; - i__2 = *n; - for (j = ii; j <= i__2; ++j) { - if (d__[j] < p) { - k = j; - p = d__[j]; - } -/* L170: */ - } - if (k != i__) { - d__[k] = d__[i__]; - d__[i__] = p; - dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1], - &c__1); - } -/* L180: */ - } - } - -L190: - return 0; - -/* End of DSTEQR */ - -} /* dsteqr_ */ - -/* Subroutine */ int dsterf_(integer *n, doublereal *d__, doublereal *e, - integer *info) -{ - /* System generated locals */ - integer i__1; - doublereal d__1, d__2, d__3; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static doublereal c__; - static integer i__, l, m; - static doublereal p, r__, s; - static integer l1; - static doublereal bb, rt1, rt2, eps, rte; - static integer lsv; - static doublereal eps2, oldc; - static integer lend, jtot; - extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal - *, doublereal *, doublereal *); - static doublereal gamma, alpha, sigma, anorm; - - static integer iscale; - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *); - static doublereal oldgam, safmin; - extern /* Subroutine */ int xerbla_(char *, integer *); - static doublereal safmax; - extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); - extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, - integer *); - static integer lendsv; - static doublereal ssfmin; - static integer nmaxit; - static doublereal ssfmax; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DSTERF computes all eigenvalues of a symmetric tridiagonal matrix - using the Pal-Walker-Kahan variant of the QL or QR algorithm. - - Arguments - ========= - - N (input) INTEGER - The order of the matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the n diagonal elements of the tridiagonal matrix. - On exit, if INFO = 0, the eigenvalues in ascending order. - - E (input/output) DOUBLE PRECISION array, dimension (N-1) - On entry, the (n-1) subdiagonal elements of the tridiagonal - matrix. - On exit, E has been destroyed. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: the algorithm failed to find all of the eigenvalues in - a total of 30*N iterations; if INFO = i, then i - elements of E have not converged to zero. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --e; - --d__; - - /* Function Body */ - *info = 0; - -/* Quick return if possible */ - - if (*n < 0) { - *info = -1; - i__1 = -(*info); - xerbla_("DSTERF", &i__1); - return 0; - } - if (*n <= 1) { - return 0; - } - -/* Determine the unit roundoff for this environment. */ - - eps = EPSILON; -/* Computing 2nd power */ - d__1 = eps; - eps2 = d__1 * d__1; - safmin = SAFEMINIMUM; - safmax = 1. / safmin; - ssfmax = sqrt(safmax) / 3.; - ssfmin = sqrt(safmin) / eps2; - -/* Compute the eigenvalues of the tridiagonal matrix. */ - - nmaxit = *n * 30; - sigma = 0.; - jtot = 0; - -/* - Determine where the matrix splits and choose QL or QR iteration - for each block, according to whether top or bottom diagonal - element is smaller. -*/ - - l1 = 1; - -L10: - if (l1 > *n) { - goto L170; - } - if (l1 > 1) { - e[l1 - 1] = 0.; - } - i__1 = *n - 1; - for (m = l1; m <= i__1; ++m) { - if ((d__3 = e[m], abs(d__3)) <= sqrt((d__1 = d__[m], abs(d__1))) * - sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) { - e[m] = 0.; - goto L30; - } -/* L20: */ - } - m = *n; - -L30: - l = l1; - lsv = l; - lend = m; - lendsv = lend; - l1 = m + 1; - if (lend == l) { - goto L10; - } - -/* Scale submatrix in rows and columns L to LEND */ - - i__1 = lend - l + 1; - anorm = dlanst_("I", &i__1, &d__[l], &e[l]); - iscale = 0; - if (anorm > ssfmax) { - iscale = 1; - i__1 = lend - l + 1; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, - info); - i__1 = lend - l; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, - info); - } else if (anorm < ssfmin) { - iscale = 2; - i__1 = lend - l + 1; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, - info); - i__1 = lend - l; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, - info); - } - - i__1 = lend - 1; - for (i__ = l; i__ <= i__1; ++i__) { -/* Computing 2nd power */ - d__1 = e[i__]; - e[i__] = d__1 * d__1; -/* L40: */ - } - -/* Choose between QL and QR iteration */ - - if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) { - lend = lsv; - l = lendsv; - } - - if (lend >= l) { - -/* - QL Iteration - - Look for small subdiagonal element. -*/ - -L50: - if (l != lend) { - i__1 = lend - 1; - for (m = l; m <= i__1; ++m) { - if ((d__2 = e[m], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m - + 1], abs(d__1))) { - goto L70; - } -/* L60: */ - } - } - m = lend; - -L70: - if (m < lend) { - e[m] = 0.; - } - p = d__[l]; - if (m == l) { - goto L90; - } - -/* - If remaining matrix is 2 by 2, use DLAE2 to compute its - eigenvalues. -*/ - - if (m == l + 1) { - rte = sqrt(e[l]); - dlae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2); - d__[l] = rt1; - d__[l + 1] = rt2; - e[l] = 0.; - l += 2; - if (l <= lend) { - goto L50; - } - goto L150; - } - - if (jtot == nmaxit) { - goto L150; - } - ++jtot; - -/* Form shift. */ - - rte = sqrt(e[l]); - sigma = (d__[l + 1] - p) / (rte * 2.); - r__ = dlapy2_(&sigma, &c_b15); - sigma = p - rte / (sigma + d_sign(&r__, &sigma)); - - c__ = 1.; - s = 0.; - gamma = d__[m] - sigma; - p = gamma * gamma; - -/* Inner loop */ - - i__1 = l; - for (i__ = m - 1; i__ >= i__1; --i__) { - bb = e[i__]; - r__ = p + bb; - if (i__ != m - 1) { - e[i__ + 1] = s * r__; - } - oldc = c__; - c__ = p / r__; - s = bb / r__; - oldgam = gamma; - alpha = d__[i__]; - gamma = c__ * (alpha - sigma) - s * oldgam; - d__[i__ + 1] = oldgam + (alpha - gamma); - if (c__ != 0.) { - p = gamma * gamma / c__; - } else { - p = oldc * bb; - } -/* L80: */ - } - - e[l] = s * p; - d__[l] = sigma + gamma; - goto L50; - -/* Eigenvalue found. */ - -L90: - d__[l] = p; - - ++l; - if (l <= lend) { - goto L50; - } - goto L150; - - } else { - -/* - QR Iteration - - Look for small superdiagonal element. -*/ - -L100: - i__1 = lend + 1; - for (m = l; m >= i__1; --m) { - if ((d__2 = e[m - 1], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m - - 1], abs(d__1))) { - goto L120; - } -/* L110: */ - } - m = lend; - -L120: - if (m > lend) { - e[m - 1] = 0.; - } - p = d__[l]; - if (m == l) { - goto L140; - } - -/* - If remaining matrix is 2 by 2, use DLAE2 to compute its - eigenvalues. -*/ - - if (m == l - 1) { - rte = sqrt(e[l - 1]); - dlae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2); - d__[l] = rt1; - d__[l - 1] = rt2; - e[l - 1] = 0.; - l += -2; - if (l >= lend) { - goto L100; - } - goto L150; - } - - if (jtot == nmaxit) { - goto L150; - } - ++jtot; - -/* Form shift. */ - - rte = sqrt(e[l - 1]); - sigma = (d__[l - 1] - p) / (rte * 2.); - r__ = dlapy2_(&sigma, &c_b15); - sigma = p - rte / (sigma + d_sign(&r__, &sigma)); - - c__ = 1.; - s = 0.; - gamma = d__[m] - sigma; - p = gamma * gamma; - -/* Inner loop */ - - i__1 = l - 1; - for (i__ = m; i__ <= i__1; ++i__) { - bb = e[i__]; - r__ = p + bb; - if (i__ != m) { - e[i__ - 1] = s * r__; - } - oldc = c__; - c__ = p / r__; - s = bb / r__; - oldgam = gamma; - alpha = d__[i__ + 1]; - gamma = c__ * (alpha - sigma) - s * oldgam; - d__[i__] = oldgam + (alpha - gamma); - if (c__ != 0.) { - p = gamma * gamma / c__; - } else { - p = oldc * bb; - } -/* L130: */ - } - - e[l - 1] = s * p; - d__[l] = sigma + gamma; - goto L100; - -/* Eigenvalue found. */ - -L140: - d__[l] = p; - - --l; - if (l >= lend) { - goto L100; - } - goto L150; - - } - -/* Undo scaling if necessary */ - -L150: - if (iscale == 1) { - i__1 = lendsv - lsv + 1; - dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], - n, info); - } - if (iscale == 2) { - i__1 = lendsv - lsv + 1; - dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], - n, info); - } - -/* - Check for no convergence to an eigenvalue after a total - of N*MAXIT iterations. -*/ - - if (jtot < nmaxit) { - goto L10; - } - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - if (e[i__] != 0.) { - ++(*info); - } -/* L160: */ - } - goto L180; - -/* Sort eigenvalues in increasing order. */ - -L170: - dlasrt_("I", n, &d__[1], info); - -L180: - return 0; - -/* End of DSTERF */ - -} /* dsterf_ */ - -/* Subroutine */ int dsyevd_(char *jobz, char *uplo, integer *n, doublereal * - a, integer *lda, doublereal *w, doublereal *work, integer *lwork, - integer *iwork, integer *liwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal eps; - static integer inde; - static doublereal anrm, rmin, rmax; - static integer lopt; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - static doublereal sigma; - extern logical lsame_(char *, char *); - static integer iinfo, lwmin, liopt; - static logical lower, wantz; - static integer indwk2, llwrk2; - - static integer iscale; - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dstedc_(char *, integer *, - doublereal *, doublereal *, doublereal *, integer *, doublereal *, - integer *, integer *, integer *, integer *), dlacpy_( - char *, integer *, integer *, doublereal *, integer *, doublereal - *, integer *); - static doublereal safmin; - extern /* Subroutine */ int xerbla_(char *, integer *); - static doublereal bignum; - static integer indtau; - extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *, - integer *); - extern doublereal dlansy_(char *, char *, integer *, doublereal *, - integer *, doublereal *); - static integer indwrk, liwmin; - extern /* Subroutine */ int dormtr_(char *, char *, char *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *, integer *), dsytrd_(char *, integer *, doublereal *, integer *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *, - integer *); - static integer llwork; - static doublereal smlnum; - static logical lquery; - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DSYEVD computes all eigenvalues and, optionally, eigenvectors of a - real symmetric matrix A. If eigenvectors are desired, it uses a - divide and conquer algorithm. - - The divide and conquer algorithm makes very mild assumptions about - floating point arithmetic. It will work on machines with a guard - digit in add/subtract, or on those binary machines without guard - digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or - Cray-2. It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Because of large use of BLAS of level 3, DSYEVD needs N**2 more - workspace than DSYEVX. - - Arguments - ========= - - JOBZ (input) CHARACTER*1 - = 'N': Compute eigenvalues only; - = 'V': Compute eigenvalues and eigenvectors. - - UPLO (input) CHARACTER*1 - = 'U': Upper triangle of A is stored; - = 'L': Lower triangle of A is stored. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA, N) - On entry, the symmetric matrix A. If UPLO = 'U', the - leading N-by-N upper triangular part of A contains the - upper triangular part of the matrix A. If UPLO = 'L', - the leading N-by-N lower triangular part of A contains - the lower triangular part of the matrix A. - On exit, if JOBZ = 'V', then if INFO = 0, A contains the - orthonormal eigenvectors of the matrix A. - If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') - or the upper triangle (if UPLO='U') of A, including the - diagonal, is destroyed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - W (output) DOUBLE PRECISION array, dimension (N) - If INFO = 0, the eigenvalues in ascending order. - - WORK (workspace/output) DOUBLE PRECISION array, - dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If N <= 1, LWORK must be at least 1. - If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1. - If JOBZ = 'V' and N > 1, LWORK must be at least - 1 + 6*N + 2*N**2. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - IWORK (workspace/output) INTEGER array, dimension (LIWORK) - On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. - - LIWORK (input) INTEGER - The dimension of the array IWORK. - If N <= 1, LIWORK must be at least 1. - If JOBZ = 'N' and N > 1, LIWORK must be at least 1. - If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. - - If LIWORK = -1, then a workspace query is assumed; the - routine only calculates the optimal size of the IWORK array, - returns this value as the first entry of the IWORK array, and - no error message related to LIWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, the algorithm failed to converge; i - off-diagonal elements of an intermediate tridiagonal - form did not converge to zero. - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - Modified by Francoise Tisseur, University of Tennessee. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --w; - --work; - --iwork; - - /* Function Body */ - wantz = lsame_(jobz, "V"); - lower = lsame_(uplo, "L"); - lquery = *lwork == -1 || *liwork == -1; - - *info = 0; - if (*n <= 1) { - liwmin = 1; - lwmin = 1; - lopt = lwmin; - liopt = liwmin; - } else { - if (wantz) { - liwmin = *n * 5 + 3; -/* Computing 2nd power */ - i__1 = *n; - lwmin = *n * 6 + 1 + ((i__1 * i__1) << (1)); - } else { - liwmin = 1; - lwmin = ((*n) << (1)) + 1; - } - lopt = lwmin; - liopt = liwmin; - } - if (! (wantz || lsame_(jobz, "N"))) { - *info = -1; - } else if (! (lower || lsame_(uplo, "U"))) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if ((*lwork < lwmin && ! lquery)) { - *info = -8; - } else if ((*liwork < liwmin && ! lquery)) { - *info = -10; - } - - if (*info == 0) { - work[1] = (doublereal) lopt; - iwork[1] = liopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DSYEVD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - if (*n == 1) { - w[1] = a[a_dim1 + 1]; - if (wantz) { - a[a_dim1 + 1] = 1.; - } - return 0; - } - -/* Get machine constants. */ - - safmin = SAFEMINIMUM; - eps = PRECISION; - smlnum = safmin / eps; - bignum = 1. / smlnum; - rmin = sqrt(smlnum); - rmax = sqrt(bignum); - -/* Scale matrix to allowable range, if necessary. */ - - anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]); - iscale = 0; - if ((anrm > 0. && anrm < rmin)) { - iscale = 1; - sigma = rmin / anrm; - } else if (anrm > rmax) { - iscale = 1; - sigma = rmax / anrm; - } - if (iscale == 1) { - dlascl_(uplo, &c__0, &c__0, &c_b15, &sigma, n, n, &a[a_offset], lda, - info); - } - -/* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */ - - inde = 1; - indtau = inde + *n; - indwrk = indtau + *n; - llwork = *lwork - indwrk + 1; - indwk2 = indwrk + *n * *n; - llwrk2 = *lwork - indwk2 + 1; - - dsytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], & - work[indwrk], &llwork, &iinfo); - lopt = (integer) (((*n) << (1)) + work[indwrk]); - -/* - For eigenvalues only, call DSTERF. For eigenvectors, first call - DSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the - tridiagonal matrix, then call DORMTR to multiply it by the - Householder transformations stored in A. -*/ - - if (! wantz) { - dsterf_(n, &w[1], &work[inde], info); - } else { - dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], & - llwrk2, &iwork[1], liwork, info); - dormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[ - indwrk], n, &work[indwk2], &llwrk2, &iinfo); - dlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda); -/* - Computing MAX - Computing 2nd power -*/ - i__3 = *n; - i__1 = lopt, i__2 = *n * 6 + 1 + ((i__3 * i__3) << (1)); - lopt = max(i__1,i__2); - } - -/* If matrix was scaled, then rescale eigenvalues appropriately. */ - - if (iscale == 1) { - d__1 = 1. / sigma; - dscal_(n, &d__1, &w[1], &c__1); - } - - work[1] = (doublereal) lopt; - iwork[1] = liopt; - - return 0; - -/* End of DSYEVD */ - -} /* dsyevd_ */ - -/* Subroutine */ int dsytd2_(char *uplo, integer *n, doublereal *a, integer * - lda, doublereal *d__, doublereal *e, doublereal *tau, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__; - extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, - integer *); - static doublereal taui; - extern /* Subroutine */ int dsyr2_(char *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - integer *); - static doublereal alpha; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *); - static logical upper; - extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *), dlarfg_(integer *, doublereal *, - doublereal *, integer *, doublereal *), xerbla_(char *, integer * - ); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal - form T by an orthogonal similarity transformation: Q' * A * Q = T. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies whether the upper or lower triangular part of the - symmetric matrix A is stored: - = 'U': Upper triangular - = 'L': Lower triangular - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the symmetric matrix A. If UPLO = 'U', the leading - n-by-n upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading n-by-n lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - On exit, if UPLO = 'U', the diagonal and first superdiagonal - of A are overwritten by the corresponding elements of the - tridiagonal matrix T, and the elements above the first - superdiagonal, with the array TAU, represent the orthogonal - matrix Q as a product of elementary reflectors; if UPLO - = 'L', the diagonal and first subdiagonal of A are over- - written by the corresponding elements of the tridiagonal - matrix T, and the elements below the first subdiagonal, with - the array TAU, represent the orthogonal matrix Q as a product - of elementary reflectors. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - D (output) DOUBLE PRECISION array, dimension (N) - The diagonal elements of the tridiagonal matrix T: - D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (N-1) - The off-diagonal elements of the tridiagonal matrix T: - E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. - - TAU (output) DOUBLE PRECISION array, dimension (N-1) - The scalar factors of the elementary reflectors (see Further - Details). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - If UPLO = 'U', the matrix Q is represented as a product of elementary - reflectors - - Q = H(n-1) . . . H(2) H(1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in - A(1:i-1,i+1), and tau in TAU(i). - - If UPLO = 'L', the matrix Q is represented as a product of elementary - reflectors - - Q = H(1) H(2) . . . H(n-1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), - and tau in TAU(i). - - The contents of A on exit are illustrated by the following examples - with n = 5: - - if UPLO = 'U': if UPLO = 'L': - - ( d e v2 v3 v4 ) ( d ) - ( d e v3 v4 ) ( e d ) - ( d e v4 ) ( v1 e d ) - ( d e ) ( v1 v2 e d ) - ( d ) ( v1 v2 v3 e d ) - - where d and e denote diagonal and off-diagonal elements of T, and vi - denotes an element of the vector defining H(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tau; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DSYTD2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n <= 0) { - return 0; - } - - if (upper) { - -/* Reduce the upper triangle of A */ - - for (i__ = *n - 1; i__ >= 1; --i__) { - -/* - Generate elementary reflector H(i) = I - tau * v * v' - to annihilate A(1:i-1,i+1) -*/ - - dlarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1 - + 1], &c__1, &taui); - e[i__] = a[i__ + (i__ + 1) * a_dim1]; - - if (taui != 0.) { - -/* Apply H(i) from both sides to A(1:i,1:i) */ - - a[i__ + (i__ + 1) * a_dim1] = 1.; - -/* Compute x := tau * A * v storing x in TAU(1:i) */ - - dsymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) * - a_dim1 + 1], &c__1, &c_b29, &tau[1], &c__1) - ; - -/* Compute w := x - 1/2 * tau * (x'*v) * v */ - - alpha = taui * -.5 * ddot_(&i__, &tau[1], &c__1, &a[(i__ + 1) - * a_dim1 + 1], &c__1); - daxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[ - 1], &c__1); - -/* - Apply the transformation as a rank-2 update: - A := A - v * w' - w * v' -*/ - - dsyr2_(uplo, &i__, &c_b151, &a[(i__ + 1) * a_dim1 + 1], &c__1, - &tau[1], &c__1, &a[a_offset], lda); - - a[i__ + (i__ + 1) * a_dim1] = e[i__]; - } - d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1]; - tau[i__] = taui; -/* L10: */ - } - d__[1] = a[a_dim1 + 1]; - } else { - -/* Reduce the lower triangle of A */ - - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* - Generate elementary reflector H(i) = I - tau * v * v' - to annihilate A(i+2:n,i) -*/ - - i__2 = *n - i__; -/* Computing MIN */ - i__3 = i__ + 2; - dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ * - a_dim1], &c__1, &taui); - e[i__] = a[i__ + 1 + i__ * a_dim1]; - - if (taui != 0.) { - -/* Apply H(i) from both sides to A(i+1:n,i+1:n) */ - - a[i__ + 1 + i__ * a_dim1] = 1.; - -/* Compute x := tau * A * v storing y in TAU(i:n-1) */ - - i__2 = *n - i__; - dsymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1], - lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &tau[ - i__], &c__1); - -/* Compute w := x - 1/2 * tau * (x'*v) * v */ - - i__2 = *n - i__; - alpha = taui * -.5 * ddot_(&i__2, &tau[i__], &c__1, &a[i__ + - 1 + i__ * a_dim1], &c__1); - i__2 = *n - i__; - daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ - i__], &c__1); - -/* - Apply the transformation as a rank-2 update: - A := A - v * w' - w * v' -*/ - - i__2 = *n - i__; - dsyr2_(uplo, &i__2, &c_b151, &a[i__ + 1 + i__ * a_dim1], & - c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * - a_dim1], lda); - - a[i__ + 1 + i__ * a_dim1] = e[i__]; - } - d__[i__] = a[i__ + i__ * a_dim1]; - tau[i__] = taui; -/* L20: */ - } - d__[*n] = a[*n + *n * a_dim1]; - } - - return 0; - -/* End of DSYTD2 */ - -} /* dsytd2_ */ - -/* Subroutine */ int dsytrd_(char *uplo, integer *n, doublereal *a, integer * - lda, doublereal *d__, doublereal *e, doublereal *tau, doublereal * - work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j, nb, kk, nx, iws; - extern logical lsame_(char *, char *); - static integer nbmin, iinfo; - static logical upper; - extern /* Subroutine */ int dsytd2_(char *, integer *, doublereal *, - integer *, doublereal *, doublereal *, doublereal *, integer *), dsyr2k_(char *, char *, integer *, integer *, doublereal - *, doublereal *, integer *, doublereal *, integer *, doublereal *, - doublereal *, integer *), dlatrd_(char *, - integer *, integer *, doublereal *, integer *, doublereal *, - doublereal *, doublereal *, integer *), xerbla_(char *, - integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DSYTRD reduces a real symmetric matrix A to real symmetric - tridiagonal form T by an orthogonal similarity transformation: - Q**T * A * Q = T. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - = 'U': Upper triangle of A is stored; - = 'L': Lower triangle of A is stored. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) DOUBLE PRECISION array, dimension (LDA,N) - On entry, the symmetric matrix A. If UPLO = 'U', the leading - N-by-N upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading N-by-N lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - On exit, if UPLO = 'U', the diagonal and first superdiagonal - of A are overwritten by the corresponding elements of the - tridiagonal matrix T, and the elements above the first - superdiagonal, with the array TAU, represent the orthogonal - matrix Q as a product of elementary reflectors; if UPLO - = 'L', the diagonal and first subdiagonal of A are over- - written by the corresponding elements of the tridiagonal - matrix T, and the elements below the first subdiagonal, with - the array TAU, represent the orthogonal matrix Q as a product - of elementary reflectors. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - D (output) DOUBLE PRECISION array, dimension (N) - The diagonal elements of the tridiagonal matrix T: - D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (N-1) - The off-diagonal elements of the tridiagonal matrix T: - E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. - - TAU (output) DOUBLE PRECISION array, dimension (N-1) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= 1. - For optimum performance LWORK >= N*NB, where NB is the - optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - If UPLO = 'U', the matrix Q is represented as a product of elementary - reflectors - - Q = H(n-1) . . . H(2) H(1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in - A(1:i-1,i+1), and tau in TAU(i). - - If UPLO = 'L', the matrix Q is represented as a product of elementary - reflectors - - Q = H(1) H(2) . . . H(n-1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), - and tau in TAU(i). - - The contents of A on exit are illustrated by the following examples - with n = 5: - - if UPLO = 'U': if UPLO = 'L': - - ( d e v2 v3 v4 ) ( d ) - ( d e v3 v4 ) ( e d ) - ( d e v4 ) ( v1 e d ) - ( d e ) ( v1 v2 e d ) - ( d ) ( v1 v2 v3 e d ) - - where d and e denote diagonal and off-diagonal elements of T, and vi - denotes an element of the vector defining H(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tau; - --work; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - lquery = *lwork == -1; - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } else if ((*lwork < 1 && ! lquery)) { - *info = -9; - } - - if (*info == 0) { - -/* Determine the block size. */ - - nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, - (ftnlen)1); - lwkopt = *n * nb; - work[1] = (doublereal) lwkopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("DSYTRD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - work[1] = 1.; - return 0; - } - - nx = *n; - iws = 1; - if ((nb > 1 && nb < *n)) { - -/* - Determine when to cross over from blocked to unblocked code - (last block is always handled by unblocked code). - - Computing MAX -*/ - i__1 = nb, i__2 = ilaenv_(&c__3, "DSYTRD", uplo, n, &c_n1, &c_n1, & - c_n1, (ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < *n) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *n; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: determine the - minimum value of NB, and reduce NB or force use of - unblocked code by setting NX = N. - - Computing MAX -*/ - i__1 = *lwork / ldwork; - nb = max(i__1,1); - nbmin = ilaenv_(&c__2, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - if (nb < nbmin) { - nx = *n; - } - } - } else { - nx = *n; - } - } else { - nb = 1; - } - - if (upper) { - -/* - Reduce the upper triangle of A. - Columns 1:kk are handled by the unblocked method. -*/ - - kk = *n - (*n - nx + nb - 1) / nb * nb; - i__1 = kk + 1; - i__2 = -nb; - for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += - i__2) { - -/* - Reduce columns i:i+nb-1 to tridiagonal form and form the - matrix W which is needed to update the unreduced part of - the matrix -*/ - - i__3 = i__ + nb - 1; - dlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], & - work[1], &ldwork); - -/* - Update the unreduced submatrix A(1:i-1,1:i-1), using an - update of the form: A := A - V*W' - W*V' -*/ - - i__3 = i__ - 1; - dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ * - a_dim1 + 1], lda, &work[1], &ldwork, &c_b15, &a[a_offset], - lda); - -/* - Copy superdiagonal elements back into A, and diagonal - elements into D -*/ - - i__3 = i__ + nb - 1; - for (j = i__; j <= i__3; ++j) { - a[j - 1 + j * a_dim1] = e[j - 1]; - d__[j] = a[j + j * a_dim1]; -/* L10: */ - } -/* L20: */ - } - -/* Use unblocked code to reduce the last or only block */ - - dsytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo); - } else { - -/* Reduce the lower triangle of A */ - - i__2 = *n - nx; - i__1 = nb; - for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { - -/* - Reduce columns i:i+nb-1 to tridiagonal form and form the - matrix W which is needed to update the unreduced part of - the matrix -*/ - - i__3 = *n - i__ + 1; - dlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], & - tau[i__], &work[1], &ldwork); - -/* - Update the unreduced submatrix A(i+ib:n,i+ib:n), using - an update of the form: A := A - V*W' - W*V' -*/ - - i__3 = *n - i__ - nb + 1; - dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ + nb + - i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b15, &a[ - i__ + nb + (i__ + nb) * a_dim1], lda); - -/* - Copy subdiagonal elements back into A, and diagonal - elements into D -*/ - - i__3 = i__ + nb - 1; - for (j = i__; j <= i__3; ++j) { - a[j + 1 + j * a_dim1] = e[j]; - d__[j] = a[j + j * a_dim1]; -/* L30: */ - } -/* L40: */ - } - -/* Use unblocked code to reduce the last or only block */ - - i__1 = *n - i__ + 1; - dsytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], - &tau[i__], &iinfo); - } - - work[1] = (doublereal) lwkopt; - return 0; - -/* End of DSYTRD */ - -} /* dsytrd_ */ - -/* Subroutine */ int dtrevc_(char *side, char *howmny, logical *select, - integer *n, doublereal *t, integer *ldt, doublereal *vl, integer * - ldvl, doublereal *vr, integer *ldvr, integer *mm, integer *m, - doublereal *work, integer *info) -{ - /* System generated locals */ - integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, - i__2, i__3; - doublereal d__1, d__2, d__3, d__4; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__, j, k; - static doublereal x[4] /* was [2][2] */; - static integer j1, j2, n2, ii, ki, ip, is; - static doublereal wi, wr, rec, ulp, beta, emax; - static logical pair; - extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, - integer *); - static logical allv; - static integer ierr; - static doublereal unfl, ovfl, smin; - static logical over; - static doublereal vmax; - static integer jnxt; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - static doublereal scale; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *); - static doublereal remax; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static logical leftv, bothv; - extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *, - integer *, doublereal *, integer *); - static doublereal vcrit; - static logical somev; - static doublereal xnorm; - extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *, - doublereal *, doublereal *, doublereal *, integer *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, doublereal * - , doublereal *, integer *, doublereal *, doublereal *, integer *), - dlabad_(doublereal *, doublereal *); - - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int xerbla_(char *, integer *); - static doublereal bignum; - static logical rightv; - static doublereal smlnum; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - DTREVC computes some or all of the right and/or left eigenvectors of - a real upper quasi-triangular matrix T. - - The right eigenvector x and the left eigenvector y of T corresponding - to an eigenvalue w are defined by: - - T*x = w*x, y'*T = w*y' - - where y' denotes the conjugate transpose of the vector y. - - If all eigenvectors are requested, the routine may either return the - matrices X and/or Y of right or left eigenvectors of T, or the - products Q*X and/or Q*Y, where Q is an input orthogonal - matrix. If T was obtained from the real-Schur factorization of an - original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of - right or left eigenvectors of A. - - T must be in Schur canonical form (as returned by DHSEQR), that is, - block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each - 2-by-2 diagonal block has its diagonal elements equal and its - off-diagonal elements of opposite sign. Corresponding to each 2-by-2 - diagonal block is a complex conjugate pair of eigenvalues and - eigenvectors; only one eigenvector of the pair is computed, namely - the one corresponding to the eigenvalue with positive imaginary part. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'R': compute right eigenvectors only; - = 'L': compute left eigenvectors only; - = 'B': compute both right and left eigenvectors. - - HOWMNY (input) CHARACTER*1 - = 'A': compute all right and/or left eigenvectors; - = 'B': compute all right and/or left eigenvectors, - and backtransform them using the input matrices - supplied in VR and/or VL; - = 'S': compute selected right and/or left eigenvectors, - specified by the logical array SELECT. - - SELECT (input/output) LOGICAL array, dimension (N) - If HOWMNY = 'S', SELECT specifies the eigenvectors to be - computed. - If HOWMNY = 'A' or 'B', SELECT is not referenced. - To select the real eigenvector corresponding to a real - eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select - the complex eigenvector corresponding to a complex conjugate - pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be - set to .TRUE.; then on exit SELECT(j) is .TRUE. and - SELECT(j+1) is .FALSE.. - - N (input) INTEGER - The order of the matrix T. N >= 0. - - T (input) DOUBLE PRECISION array, dimension (LDT,N) - The upper quasi-triangular matrix T in Schur canonical form. - - LDT (input) INTEGER - The leading dimension of the array T. LDT >= max(1,N). - - VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM) - On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must - contain an N-by-N matrix Q (usually the orthogonal matrix Q - of Schur vectors returned by DHSEQR). - On exit, if SIDE = 'L' or 'B', VL contains: - if HOWMNY = 'A', the matrix Y of left eigenvectors of T; - VL has the same quasi-lower triangular form - as T'. If T(i,i) is a real eigenvalue, then - the i-th column VL(i) of VL is its - corresponding eigenvector. If T(i:i+1,i:i+1) - is a 2-by-2 block whose eigenvalues are - complex-conjugate eigenvalues of T, then - VL(i)+sqrt(-1)*VL(i+1) is the complex - eigenvector corresponding to the eigenvalue - with positive real part. - if HOWMNY = 'B', the matrix Q*Y; - if HOWMNY = 'S', the left eigenvectors of T specified by - SELECT, stored consecutively in the columns - of VL, in the same order as their - eigenvalues. - A complex eigenvector corresponding to a complex eigenvalue - is stored in two consecutive columns, the first holding the - real part, and the second the imaginary part. - If SIDE = 'R', VL is not referenced. - - LDVL (input) INTEGER - The leading dimension of the array VL. LDVL >= max(1,N) if - SIDE = 'L' or 'B'; LDVL >= 1 otherwise. - - VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) - On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must - contain an N-by-N matrix Q (usually the orthogonal matrix Q - of Schur vectors returned by DHSEQR). - On exit, if SIDE = 'R' or 'B', VR contains: - if HOWMNY = 'A', the matrix X of right eigenvectors of T; - VR has the same quasi-upper triangular form - as T. If T(i,i) is a real eigenvalue, then - the i-th column VR(i) of VR is its - corresponding eigenvector. If T(i:i+1,i:i+1) - is a 2-by-2 block whose eigenvalues are - complex-conjugate eigenvalues of T, then - VR(i)+sqrt(-1)*VR(i+1) is the complex - eigenvector corresponding to the eigenvalue - with positive real part. - if HOWMNY = 'B', the matrix Q*X; - if HOWMNY = 'S', the right eigenvectors of T specified by - SELECT, stored consecutively in the columns - of VR, in the same order as their - eigenvalues. - A complex eigenvector corresponding to a complex eigenvalue - is stored in two consecutive columns, the first holding the - real part and the second the imaginary part. - If SIDE = 'L', VR is not referenced. - - LDVR (input) INTEGER - The leading dimension of the array VR. LDVR >= max(1,N) if - SIDE = 'R' or 'B'; LDVR >= 1 otherwise. - - MM (input) INTEGER - The number of columns in the arrays VL and/or VR. MM >= M. - - M (output) INTEGER - The number of columns in the arrays VL and/or VR actually - used to store the eigenvectors. - If HOWMNY = 'A' or 'B', M is set to N. - Each selected real eigenvector occupies one column and each - selected complex eigenvector occupies two columns. - - WORK (workspace) DOUBLE PRECISION array, dimension (3*N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The algorithm used in this program is basically backward (forward) - substitution, with scaling to make the the code robust against - possible overflow. - - Each eigenvector is normalized so that the element of largest - magnitude has magnitude 1; here the magnitude of a complex number - (x,y) is taken to be |x| + |y|. - - ===================================================================== - - - Decode and test the input parameters -*/ - - /* Parameter adjustments */ - --select; - t_dim1 = *ldt; - t_offset = 1 + t_dim1 * 1; - t -= t_offset; - vl_dim1 = *ldvl; - vl_offset = 1 + vl_dim1 * 1; - vl -= vl_offset; - vr_dim1 = *ldvr; - vr_offset = 1 + vr_dim1 * 1; - vr -= vr_offset; - --work; - - /* Function Body */ - bothv = lsame_(side, "B"); - rightv = lsame_(side, "R") || bothv; - leftv = lsame_(side, "L") || bothv; - - allv = lsame_(howmny, "A"); - over = lsame_(howmny, "B"); - somev = lsame_(howmny, "S"); - - *info = 0; - if ((! rightv && ! leftv)) { - *info = -1; - } else if (((! allv && ! over) && ! somev)) { - *info = -2; - } else if (*n < 0) { - *info = -4; - } else if (*ldt < max(1,*n)) { - *info = -6; - } else if (*ldvl < 1 || (leftv && *ldvl < *n)) { - *info = -8; - } else if (*ldvr < 1 || (rightv && *ldvr < *n)) { - *info = -10; - } else { - -/* - Set M to the number of columns required to store the selected - eigenvectors, standardize the array SELECT if necessary, and - test MM. -*/ - - if (somev) { - *m = 0; - pair = FALSE_; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (pair) { - pair = FALSE_; - select[j] = FALSE_; - } else { - if (j < *n) { - if (t[j + 1 + j * t_dim1] == 0.) { - if (select[j]) { - ++(*m); - } - } else { - pair = TRUE_; - if (select[j] || select[j + 1]) { - select[j] = TRUE_; - *m += 2; - } - } - } else { - if (select[*n]) { - ++(*m); - } - } - } -/* L10: */ - } - } else { - *m = *n; - } - - if (*mm < *m) { - *info = -11; - } - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("DTREVC", &i__1); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - -/* Set the constants to control overflow. */ - - unfl = SAFEMINIMUM; - ovfl = 1. / unfl; - dlabad_(&unfl, &ovfl); - ulp = PRECISION; - smlnum = unfl * (*n / ulp); - bignum = (1. - ulp) / smlnum; - -/* - Compute 1-norm of each column of strictly upper triangular - part of T to control overflow in triangular solver. -*/ - - work[1] = 0.; - i__1 = *n; - for (j = 2; j <= i__1; ++j) { - work[j] = 0.; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - work[j] += (d__1 = t[i__ + j * t_dim1], abs(d__1)); -/* L20: */ - } -/* L30: */ - } - -/* - Index IP is used to specify the real or complex eigenvalue: - IP = 0, real eigenvalue, - 1, first of conjugate complex pair: (wr,wi) - -1, second of conjugate complex pair: (wr,wi) -*/ - - n2 = (*n) << (1); - - if (rightv) { - -/* Compute right eigenvectors. */ - - ip = 0; - is = *m; - for (ki = *n; ki >= 1; --ki) { - - if (ip == 1) { - goto L130; - } - if (ki == 1) { - goto L40; - } - if (t[ki + (ki - 1) * t_dim1] == 0.) { - goto L40; - } - ip = -1; - -L40: - if (somev) { - if (ip == 0) { - if (! select[ki]) { - goto L130; - } - } else { - if (! select[ki - 1]) { - goto L130; - } - } - } - -/* Compute the KI-th eigenvalue (WR,WI). */ - - wr = t[ki + ki * t_dim1]; - wi = 0.; - if (ip != 0) { - wi = sqrt((d__1 = t[ki + (ki - 1) * t_dim1], abs(d__1))) * - sqrt((d__2 = t[ki - 1 + ki * t_dim1], abs(d__2))); - } -/* Computing MAX */ - d__1 = ulp * (abs(wr) + abs(wi)); - smin = max(d__1,smlnum); - - if (ip == 0) { - -/* Real right eigenvector */ - - work[ki + *n] = 1.; - -/* Form right-hand side */ - - i__1 = ki - 1; - for (k = 1; k <= i__1; ++k) { - work[k + *n] = -t[k + ki * t_dim1]; -/* L50: */ - } - -/* - Solve the upper quasi-triangular system: - (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK. -*/ - - jnxt = ki - 1; - for (j = ki - 1; j >= 1; --j) { - if (j > jnxt) { - goto L60; - } - j1 = j; - j2 = j; - jnxt = j - 1; - if (j > 1) { - if (t[j + (j - 1) * t_dim1] != 0.) { - j1 = j - 1; - jnxt = j - 2; - } - } - - if (j1 == j2) { - -/* 1-by-1 diagonal block */ - - dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j + - j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * - n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, - &ierr); - -/* - Scale X(1,1) to avoid overflow when updating - the right-hand side. -*/ - - if (xnorm > 1.) { - if (work[j] > bignum / xnorm) { - x[0] /= xnorm; - scale /= xnorm; - } - } - -/* Scale if necessary */ - - if (scale != 1.) { - dscal_(&ki, &scale, &work[*n + 1], &c__1); - } - work[j + *n] = x[0]; - -/* Update right-hand side */ - - i__1 = j - 1; - d__1 = -x[0]; - daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ - *n + 1], &c__1); - - } else { - -/* 2-by-2 diagonal block */ - - dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b15, &t[j - - 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, & - work[j - 1 + *n], n, &wr, &c_b29, x, &c__2, & - scale, &xnorm, &ierr); - -/* - Scale X(1,1) and X(2,1) to avoid overflow when - updating the right-hand side. -*/ - - if (xnorm > 1.) { -/* Computing MAX */ - d__1 = work[j - 1], d__2 = work[j]; - beta = max(d__1,d__2); - if (beta > bignum / xnorm) { - x[0] /= xnorm; - x[1] /= xnorm; - scale /= xnorm; - } - } - -/* Scale if necessary */ - - if (scale != 1.) { - dscal_(&ki, &scale, &work[*n + 1], &c__1); - } - work[j - 1 + *n] = x[0]; - work[j + *n] = x[1]; - -/* Update right-hand side */ - - i__1 = j - 2; - d__1 = -x[0]; - daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, - &work[*n + 1], &c__1); - i__1 = j - 2; - d__1 = -x[1]; - daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ - *n + 1], &c__1); - } -L60: - ; - } - -/* Copy the vector x or Q*x to VR and normalize. */ - - if (! over) { - dcopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], & - c__1); - - ii = idamax_(&ki, &vr[is * vr_dim1 + 1], &c__1); - remax = 1. / (d__1 = vr[ii + is * vr_dim1], abs(d__1)); - dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1); - - i__1 = *n; - for (k = ki + 1; k <= i__1; ++k) { - vr[k + is * vr_dim1] = 0.; -/* L70: */ - } - } else { - if (ki > 1) { - i__1 = ki - 1; - dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & - work[*n + 1], &c__1, &work[ki + *n], &vr[ki * - vr_dim1 + 1], &c__1); - } - - ii = idamax_(n, &vr[ki * vr_dim1 + 1], &c__1); - remax = 1. / (d__1 = vr[ii + ki * vr_dim1], abs(d__1)); - dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1); - } - - } else { - -/* - Complex right eigenvector. - - Initial solve - [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0. - [ (T(KI,KI-1) T(KI,KI) ) ] -*/ - - if ((d__1 = t[ki - 1 + ki * t_dim1], abs(d__1)) >= (d__2 = t[ - ki + (ki - 1) * t_dim1], abs(d__2))) { - work[ki - 1 + *n] = 1.; - work[ki + n2] = wi / t[ki - 1 + ki * t_dim1]; - } else { - work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1]; - work[ki + n2] = 1.; - } - work[ki + *n] = 0.; - work[ki - 1 + n2] = 0.; - -/* Form right-hand side */ - - i__1 = ki - 2; - for (k = 1; k <= i__1; ++k) { - work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) * - t_dim1]; - work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1]; -/* L80: */ - } - -/* - Solve upper quasi-triangular system: - (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2) -*/ - - jnxt = ki - 2; - for (j = ki - 2; j >= 1; --j) { - if (j > jnxt) { - goto L90; - } - j1 = j; - j2 = j; - jnxt = j - 1; - if (j > 1) { - if (t[j + (j - 1) * t_dim1] != 0.) { - j1 = j - 1; - jnxt = j - 2; - } - } - - if (j1 == j2) { - -/* 1-by-1 diagonal block */ - - dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j + - j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * - n], n, &wr, &wi, x, &c__2, &scale, &xnorm, & - ierr); - -/* - Scale X(1,1) and X(1,2) to avoid overflow when - updating the right-hand side. -*/ - - if (xnorm > 1.) { - if (work[j] > bignum / xnorm) { - x[0] /= xnorm; - x[2] /= xnorm; - scale /= xnorm; - } - } - -/* Scale if necessary */ - - if (scale != 1.) { - dscal_(&ki, &scale, &work[*n + 1], &c__1); - dscal_(&ki, &scale, &work[n2 + 1], &c__1); - } - work[j + *n] = x[0]; - work[j + n2] = x[2]; - -/* Update the right-hand side */ - - i__1 = j - 1; - d__1 = -x[0]; - daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ - *n + 1], &c__1); - i__1 = j - 1; - d__1 = -x[2]; - daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ - n2 + 1], &c__1); - - } else { - -/* 2-by-2 diagonal block */ - - dlaln2_(&c_false, &c__2, &c__2, &smin, &c_b15, &t[j - - 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, & - work[j - 1 + *n], n, &wr, &wi, x, &c__2, & - scale, &xnorm, &ierr); - -/* - Scale X to avoid overflow when updating - the right-hand side. -*/ - - if (xnorm > 1.) { -/* Computing MAX */ - d__1 = work[j - 1], d__2 = work[j]; - beta = max(d__1,d__2); - if (beta > bignum / xnorm) { - rec = 1. / xnorm; - x[0] *= rec; - x[2] *= rec; - x[1] *= rec; - x[3] *= rec; - scale *= rec; - } - } - -/* Scale if necessary */ - - if (scale != 1.) { - dscal_(&ki, &scale, &work[*n + 1], &c__1); - dscal_(&ki, &scale, &work[n2 + 1], &c__1); - } - work[j - 1 + *n] = x[0]; - work[j + *n] = x[1]; - work[j - 1 + n2] = x[2]; - work[j + n2] = x[3]; - -/* Update the right-hand side */ - - i__1 = j - 2; - d__1 = -x[0]; - daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, - &work[*n + 1], &c__1); - i__1 = j - 2; - d__1 = -x[1]; - daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ - *n + 1], &c__1); - i__1 = j - 2; - d__1 = -x[2]; - daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, - &work[n2 + 1], &c__1); - i__1 = j - 2; - d__1 = -x[3]; - daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ - n2 + 1], &c__1); - } -L90: - ; - } - -/* Copy the vector x or Q*x to VR and normalize. */ - - if (! over) { - dcopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1 - + 1], &c__1); - dcopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], & - c__1); - - emax = 0.; - i__1 = ki; - for (k = 1; k <= i__1; ++k) { -/* Computing MAX */ - d__3 = emax, d__4 = (d__1 = vr[k + (is - 1) * vr_dim1] - , abs(d__1)) + (d__2 = vr[k + is * vr_dim1], - abs(d__2)); - emax = max(d__3,d__4); -/* L100: */ - } - - remax = 1. / emax; - dscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1); - dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1); - - i__1 = *n; - for (k = ki + 1; k <= i__1; ++k) { - vr[k + (is - 1) * vr_dim1] = 0.; - vr[k + is * vr_dim1] = 0.; -/* L110: */ - } - - } else { - - if (ki > 2) { - i__1 = ki - 2; - dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & - work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[( - ki - 1) * vr_dim1 + 1], &c__1); - i__1 = ki - 2; - dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & - work[n2 + 1], &c__1, &work[ki + n2], &vr[ki * - vr_dim1 + 1], &c__1); - } else { - dscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1 - + 1], &c__1); - dscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], & - c__1); - } - - emax = 0.; - i__1 = *n; - for (k = 1; k <= i__1; ++k) { -/* Computing MAX */ - d__3 = emax, d__4 = (d__1 = vr[k + (ki - 1) * vr_dim1] - , abs(d__1)) + (d__2 = vr[k + ki * vr_dim1], - abs(d__2)); - emax = max(d__3,d__4); -/* L120: */ - } - remax = 1. / emax; - dscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1); - dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1); - } - } - - --is; - if (ip != 0) { - --is; - } -L130: - if (ip == 1) { - ip = 0; - } - if (ip == -1) { - ip = 1; - } -/* L140: */ - } - } - - if (leftv) { - -/* Compute left eigenvectors. */ - - ip = 0; - is = 1; - i__1 = *n; - for (ki = 1; ki <= i__1; ++ki) { - - if (ip == -1) { - goto L250; - } - if (ki == *n) { - goto L150; - } - if (t[ki + 1 + ki * t_dim1] == 0.) { - goto L150; - } - ip = 1; - -L150: - if (somev) { - if (! select[ki]) { - goto L250; - } - } - -/* Compute the KI-th eigenvalue (WR,WI). */ - - wr = t[ki + ki * t_dim1]; - wi = 0.; - if (ip != 0) { - wi = sqrt((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1))) * - sqrt((d__2 = t[ki + 1 + ki * t_dim1], abs(d__2))); - } -/* Computing MAX */ - d__1 = ulp * (abs(wr) + abs(wi)); - smin = max(d__1,smlnum); - - if (ip == 0) { - -/* Real left eigenvector. */ - - work[ki + *n] = 1.; - -/* Form right-hand side */ - - i__2 = *n; - for (k = ki + 1; k <= i__2; ++k) { - work[k + *n] = -t[ki + k * t_dim1]; -/* L160: */ - } - -/* - Solve the quasi-triangular system: - (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK -*/ - - vmax = 1.; - vcrit = bignum; - - jnxt = ki + 1; - i__2 = *n; - for (j = ki + 1; j <= i__2; ++j) { - if (j < jnxt) { - goto L170; - } - j1 = j; - j2 = j; - jnxt = j + 1; - if (j < *n) { - if (t[j + 1 + j * t_dim1] != 0.) { - j2 = j + 1; - jnxt = j + 2; - } - } - - if (j1 == j2) { - -/* - 1-by-1 diagonal block - - Scale if necessary to avoid overflow when forming - the right-hand side. -*/ - - if (work[j] > vcrit) { - rec = 1. / vmax; - i__3 = *n - ki + 1; - dscal_(&i__3, &rec, &work[ki + *n], &c__1); - vmax = 1.; - vcrit = bignum; - } - - i__3 = j - ki - 1; - work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1], - &c__1, &work[ki + 1 + *n], &c__1); - -/* Solve (T(J,J)-WR)'*X = WORK */ - - dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j + - j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * - n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, - &ierr); - -/* Scale if necessary */ - - if (scale != 1.) { - i__3 = *n - ki + 1; - dscal_(&i__3, &scale, &work[ki + *n], &c__1); - } - work[j + *n] = x[0]; -/* Computing MAX */ - d__2 = (d__1 = work[j + *n], abs(d__1)); - vmax = max(d__2,vmax); - vcrit = bignum / vmax; - - } else { - -/* - 2-by-2 diagonal block - - Scale if necessary to avoid overflow when forming - the right-hand side. - - Computing MAX -*/ - d__1 = work[j], d__2 = work[j + 1]; - beta = max(d__1,d__2); - if (beta > vcrit) { - rec = 1. / vmax; - i__3 = *n - ki + 1; - dscal_(&i__3, &rec, &work[ki + *n], &c__1); - vmax = 1.; - vcrit = bignum; - } - - i__3 = j - ki - 1; - work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1], - &c__1, &work[ki + 1 + *n], &c__1); - - i__3 = j - ki - 1; - work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 1 + (j + 1) * - t_dim1], &c__1, &work[ki + 1 + *n], &c__1); - -/* - Solve - [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 ) - [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 ) -*/ - - dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b15, &t[j + - j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * - n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, - &ierr); - -/* Scale if necessary */ - - if (scale != 1.) { - i__3 = *n - ki + 1; - dscal_(&i__3, &scale, &work[ki + *n], &c__1); - } - work[j + *n] = x[0]; - work[j + 1 + *n] = x[1]; - -/* Computing MAX */ - d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2 - = work[j + 1 + *n], abs(d__2)), d__3 = max( - d__3,d__4); - vmax = max(d__3,vmax); - vcrit = bignum / vmax; - - } -L170: - ; - } - -/* Copy the vector x or Q*x to VL and normalize. */ - - if (! over) { - i__2 = *n - ki + 1; - dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is * - vl_dim1], &c__1); - - i__2 = *n - ki + 1; - ii = idamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki - - 1; - remax = 1. / (d__1 = vl[ii + is * vl_dim1], abs(d__1)); - i__2 = *n - ki + 1; - dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1); - - i__2 = ki - 1; - for (k = 1; k <= i__2; ++k) { - vl[k + is * vl_dim1] = 0.; -/* L180: */ - } - - } else { - - if (ki < *n) { - i__2 = *n - ki; - dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 1) * vl_dim1 - + 1], ldvl, &work[ki + 1 + *n], &c__1, &work[ - ki + *n], &vl[ki * vl_dim1 + 1], &c__1); - } - - ii = idamax_(n, &vl[ki * vl_dim1 + 1], &c__1); - remax = 1. / (d__1 = vl[ii + ki * vl_dim1], abs(d__1)); - dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1); - - } - - } else { - -/* - Complex left eigenvector. - - Initial solve: - ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0. - ((T(KI+1,KI) T(KI+1,KI+1)) ) -*/ - - if ((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1)) >= (d__2 = - t[ki + 1 + ki * t_dim1], abs(d__2))) { - work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1]; - work[ki + 1 + n2] = 1.; - } else { - work[ki + *n] = 1.; - work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1]; - } - work[ki + 1 + *n] = 0.; - work[ki + n2] = 0.; - -/* Form right-hand side */ - - i__2 = *n; - for (k = ki + 2; k <= i__2; ++k) { - work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1]; - work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1] - ; -/* L190: */ - } - -/* - Solve complex quasi-triangular system: - ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2 -*/ - - vmax = 1.; - vcrit = bignum; - - jnxt = ki + 2; - i__2 = *n; - for (j = ki + 2; j <= i__2; ++j) { - if (j < jnxt) { - goto L200; - } - j1 = j; - j2 = j; - jnxt = j + 1; - if (j < *n) { - if (t[j + 1 + j * t_dim1] != 0.) { - j2 = j + 1; - jnxt = j + 2; - } - } - - if (j1 == j2) { - -/* - 1-by-1 diagonal block - - Scale if necessary to avoid overflow when - forming the right-hand side elements. -*/ - - if (work[j] > vcrit) { - rec = 1. / vmax; - i__3 = *n - ki + 1; - dscal_(&i__3, &rec, &work[ki + *n], &c__1); - i__3 = *n - ki + 1; - dscal_(&i__3, &rec, &work[ki + n2], &c__1); - vmax = 1.; - vcrit = bignum; - } - - i__3 = j - ki - 2; - work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], - &c__1, &work[ki + 2 + *n], &c__1); - i__3 = j - ki - 2; - work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], - &c__1, &work[ki + 2 + n2], &c__1); - -/* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */ - - d__1 = -wi; - dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j + - j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * - n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, & - ierr); - -/* Scale if necessary */ - - if (scale != 1.) { - i__3 = *n - ki + 1; - dscal_(&i__3, &scale, &work[ki + *n], &c__1); - i__3 = *n - ki + 1; - dscal_(&i__3, &scale, &work[ki + n2], &c__1); - } - work[j + *n] = x[0]; - work[j + n2] = x[2]; -/* Computing MAX */ - d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2 - = work[j + n2], abs(d__2)), d__3 = max(d__3, - d__4); - vmax = max(d__3,vmax); - vcrit = bignum / vmax; - - } else { - -/* - 2-by-2 diagonal block - - Scale if necessary to avoid overflow when forming - the right-hand side elements. - - Computing MAX -*/ - d__1 = work[j], d__2 = work[j + 1]; - beta = max(d__1,d__2); - if (beta > vcrit) { - rec = 1. / vmax; - i__3 = *n - ki + 1; - dscal_(&i__3, &rec, &work[ki + *n], &c__1); - i__3 = *n - ki + 1; - dscal_(&i__3, &rec, &work[ki + n2], &c__1); - vmax = 1.; - vcrit = bignum; - } - - i__3 = j - ki - 2; - work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], - &c__1, &work[ki + 2 + *n], &c__1); - - i__3 = j - ki - 2; - work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], - &c__1, &work[ki + 2 + n2], &c__1); - - i__3 = j - ki - 2; - work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 2 + (j + 1) * - t_dim1], &c__1, &work[ki + 2 + *n], &c__1); - - i__3 = j - ki - 2; - work[j + 1 + n2] -= ddot_(&i__3, &t[ki + 2 + (j + 1) * - t_dim1], &c__1, &work[ki + 2 + n2], &c__1); - -/* - Solve 2-by-2 complex linear equation - ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B - ([T(j+1,j) T(j+1,j+1)] ) -*/ - - d__1 = -wi; - dlaln2_(&c_true, &c__2, &c__2, &smin, &c_b15, &t[j + - j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * - n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, & - ierr); - -/* Scale if necessary */ - - if (scale != 1.) { - i__3 = *n - ki + 1; - dscal_(&i__3, &scale, &work[ki + *n], &c__1); - i__3 = *n - ki + 1; - dscal_(&i__3, &scale, &work[ki + n2], &c__1); - } - work[j + *n] = x[0]; - work[j + n2] = x[2]; - work[j + 1 + *n] = x[1]; - work[j + 1 + n2] = x[3]; -/* Computing MAX */ - d__1 = abs(x[0]), d__2 = abs(x[2]), d__1 = max(d__1, - d__2), d__2 = abs(x[1]), d__1 = max(d__1,d__2) - , d__2 = abs(x[3]), d__1 = max(d__1,d__2); - vmax = max(d__1,vmax); - vcrit = bignum / vmax; - - } -L200: - ; - } - -/* - Copy the vector x or Q*x to VL and normalize. - - L210: -*/ - if (! over) { - i__2 = *n - ki + 1; - dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is * - vl_dim1], &c__1); - i__2 = *n - ki + 1; - dcopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) * - vl_dim1], &c__1); - - emax = 0.; - i__2 = *n; - for (k = ki; k <= i__2; ++k) { -/* Computing MAX */ - d__3 = emax, d__4 = (d__1 = vl[k + is * vl_dim1], abs( - d__1)) + (d__2 = vl[k + (is + 1) * vl_dim1], - abs(d__2)); - emax = max(d__3,d__4); -/* L220: */ - } - remax = 1. / emax; - i__2 = *n - ki + 1; - dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1); - i__2 = *n - ki + 1; - dscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1) - ; - - i__2 = ki - 1; - for (k = 1; k <= i__2; ++k) { - vl[k + is * vl_dim1] = 0.; - vl[k + (is + 1) * vl_dim1] = 0.; -/* L230: */ - } - } else { - if (ki < *n - 1) { - i__2 = *n - ki - 1; - dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1 - + 1], ldvl, &work[ki + 2 + *n], &c__1, &work[ - ki + *n], &vl[ki * vl_dim1 + 1], &c__1); - i__2 = *n - ki - 1; - dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1 - + 1], ldvl, &work[ki + 2 + n2], &c__1, &work[ - ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], & - c__1); - } else { - dscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], & - c__1); - dscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1 - + 1], &c__1); - } - - emax = 0.; - i__2 = *n; - for (k = 1; k <= i__2; ++k) { -/* Computing MAX */ - d__3 = emax, d__4 = (d__1 = vl[k + ki * vl_dim1], abs( - d__1)) + (d__2 = vl[k + (ki + 1) * vl_dim1], - abs(d__2)); - emax = max(d__3,d__4); -/* L240: */ - } - remax = 1. / emax; - dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1); - dscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1); - - } - - } - - ++is; - if (ip != 0) { - ++is; - } -L250: - if (ip == -1) { - ip = 0; - } - if (ip == 1) { - ip = -1; - } - -/* L260: */ - } - - } - - return 0; - -/* End of DTREVC */ - -} /* dtrevc_ */ - -integer ieeeck_(integer *ispec, real *zero, real *one) -{ - /* System generated locals */ - integer ret_val; - - /* Local variables */ - static real nan1, nan2, nan3, nan4, nan5, nan6, neginf, posinf, negzro, - newzro; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1998 - - - Purpose - ======= - - IEEECK is called from the ILAENV to verify that Infinity and - possibly NaN arithmetic is safe (i.e. will not trap). - - Arguments - ========= - - ISPEC (input) INTEGER - Specifies whether to test just for inifinity arithmetic - or whether to test for infinity and NaN arithmetic. - = 0: Verify infinity arithmetic only. - = 1: Verify infinity and NaN arithmetic. - - ZERO (input) REAL - Must contain the value 0.0 - This is passed to prevent the compiler from optimizing - away this code. - - ONE (input) REAL - Must contain the value 1.0 - This is passed to prevent the compiler from optimizing - away this code. - - RETURN VALUE: INTEGER - = 0: Arithmetic failed to produce the correct answers - = 1: Arithmetic produced the correct answers -*/ - - ret_val = 1; - - posinf = *one / *zero; - if (posinf <= *one) { - ret_val = 0; - return ret_val; - } - - neginf = -(*one) / *zero; - if (neginf >= *zero) { - ret_val = 0; - return ret_val; - } - - negzro = *one / (neginf + *one); - if (negzro != *zero) { - ret_val = 0; - return ret_val; - } - - neginf = *one / negzro; - if (neginf >= *zero) { - ret_val = 0; - return ret_val; - } - - newzro = negzro + *zero; - if (newzro != *zero) { - ret_val = 0; - return ret_val; - } - - posinf = *one / newzro; - if (posinf <= *one) { - ret_val = 0; - return ret_val; - } - - neginf *= posinf; - if (neginf >= *zero) { - ret_val = 0; - return ret_val; - } - - posinf *= posinf; - if (posinf <= *one) { - ret_val = 0; - return ret_val; - } - - -/* Return if we were only asked to check infinity arithmetic */ - - if (*ispec == 0) { - return ret_val; - } - - nan1 = posinf + neginf; - - nan2 = posinf / neginf; - - nan3 = posinf / posinf; - - nan4 = posinf * *zero; - - nan5 = neginf * negzro; - - nan6 = nan5 * 0.f; - - if (nan1 == nan1) { - ret_val = 0; - return ret_val; - } - - if (nan2 == nan2) { - ret_val = 0; - return ret_val; - } - - if (nan3 == nan3) { - ret_val = 0; - return ret_val; - } - - if (nan4 == nan4) { - ret_val = 0; - return ret_val; - } - - if (nan5 == nan5) { - ret_val = 0; - return ret_val; - } - - if (nan6 == nan6) { - ret_val = 0; - return ret_val; - } - - return ret_val; -} /* ieeeck_ */ - -integer ilaenv_(integer *ispec, char *name__, char *opts, integer *n1, - integer *n2, integer *n3, integer *n4, ftnlen name_len, ftnlen - opts_len) -{ - /* System generated locals */ - integer ret_val; - - /* Builtin functions */ - /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen); - integer s_cmp(char *, char *, ftnlen, ftnlen); - - /* Local variables */ - static integer i__; - static char c1[1], c2[2], c3[3], c4[2]; - static integer ic, nb, iz, nx; - static logical cname, sname; - static integer nbmin; - extern integer ieeeck_(integer *, real *, real *); - static char subnam[6]; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ILAENV is called from the LAPACK routines to choose problem-dependent - parameters for the local environment. See ISPEC for a description of - the parameters. - - This version provides a set of parameters which should give good, - but not optimal, performance on many of the currently available - computers. Users are encouraged to modify this subroutine to set - the tuning parameters for their particular machine using the option - and problem size information in the arguments. - - This routine will not function correctly if it is converted to all - lower case. Converting it to all upper case is allowed. - - Arguments - ========= - - ISPEC (input) INTEGER - Specifies the parameter to be returned as the value of - ILAENV. - = 1: the optimal blocksize; if this value is 1, an unblocked - algorithm will give the best performance. - = 2: the minimum block size for which the block routine - should be used; if the usable block size is less than - this value, an unblocked routine should be used. - = 3: the crossover point (in a block routine, for N less - than this value, an unblocked routine should be used) - = 4: the number of shifts, used in the nonsymmetric - eigenvalue routines - = 5: the minimum column dimension for blocking to be used; - rectangular blocks must have dimension at least k by m, - where k is given by ILAENV(2,...) and m by ILAENV(5,...) - = 6: the crossover point for the SVD (when reducing an m by n - matrix to bidiagonal form, if max(m,n)/min(m,n) exceeds - this value, a QR factorization is used first to reduce - the matrix to a triangular form.) - = 7: the number of processors - = 8: the crossover point for the multishift QR and QZ methods - for nonsymmetric eigenvalue problems. - = 9: maximum size of the subproblems at the bottom of the - computation tree in the divide-and-conquer algorithm - (used by xGELSD and xGESDD) - =10: ieee NaN arithmetic can be trusted not to trap - =11: infinity arithmetic can be trusted not to trap - - NAME (input) CHARACTER*(*) - The name of the calling subroutine, in either upper case or - lower case. - - OPTS (input) CHARACTER*(*) - The character options to the subroutine NAME, concatenated - into a single character string. For example, UPLO = 'U', - TRANS = 'T', and DIAG = 'N' for a triangular routine would - be specified as OPTS = 'UTN'. - - N1 (input) INTEGER - N2 (input) INTEGER - N3 (input) INTEGER - N4 (input) INTEGER - Problem dimensions for the subroutine NAME; these may not all - be required. - - (ILAENV) (output) INTEGER - >= 0: the value of the parameter specified by ISPEC - < 0: if ILAENV = -k, the k-th argument had an illegal value. - - Further Details - =============== - - The following conventions have been used when calling ILAENV from the - LAPACK routines: - 1) OPTS is a concatenation of all of the character options to - subroutine NAME, in the same order that they appear in the - argument list for NAME, even if they are not used in determining - the value of the parameter specified by ISPEC. - 2) The problem dimensions N1, N2, N3, N4 are specified in the order - that they appear in the argument list for NAME. N1 is used - first, N2 second, and so on, and unused problem dimensions are - passed a value of -1. - 3) The parameter value returned by ILAENV is checked for validity in - the calling subroutine. For example, ILAENV is used to retrieve - the optimal blocksize for STRTRI as follows: - - NB = ILAENV( 1, 'STRTRI', UPLO // DIAG, N, -1, -1, -1 ) - IF( NB.LE.1 ) NB = MAX( 1, N ) - - ===================================================================== -*/ - - - switch (*ispec) { - case 1: goto L100; - case 2: goto L100; - case 3: goto L100; - case 4: goto L400; - case 5: goto L500; - case 6: goto L600; - case 7: goto L700; - case 8: goto L800; - case 9: goto L900; - case 10: goto L1000; - case 11: goto L1100; - } - -/* Invalid value for ISPEC */ - - ret_val = -1; - return ret_val; - -L100: - -/* Convert NAME to upper case if the first character is lower case. */ - - ret_val = 1; - s_copy(subnam, name__, (ftnlen)6, name_len); - ic = *(unsigned char *)subnam; - iz = 'Z'; - if (iz == 90 || iz == 122) { - -/* ASCII character set */ - - if ((ic >= 97 && ic <= 122)) { - *(unsigned char *)subnam = (char) (ic - 32); - for (i__ = 2; i__ <= 6; ++i__) { - ic = *(unsigned char *)&subnam[i__ - 1]; - if ((ic >= 97 && ic <= 122)) { - *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32); - } -/* L10: */ - } - } - - } else if (iz == 233 || iz == 169) { - -/* EBCDIC character set */ - - if ((ic >= 129 && ic <= 137) || (ic >= 145 && ic <= 153) || (ic >= - 162 && ic <= 169)) { - *(unsigned char *)subnam = (char) (ic + 64); - for (i__ = 2; i__ <= 6; ++i__) { - ic = *(unsigned char *)&subnam[i__ - 1]; - if ((ic >= 129 && ic <= 137) || (ic >= 145 && ic <= 153) || ( - ic >= 162 && ic <= 169)) { - *(unsigned char *)&subnam[i__ - 1] = (char) (ic + 64); - } -/* L20: */ - } - } - - } else if (iz == 218 || iz == 250) { - -/* Prime machines: ASCII+128 */ - - if ((ic >= 225 && ic <= 250)) { - *(unsigned char *)subnam = (char) (ic - 32); - for (i__ = 2; i__ <= 6; ++i__) { - ic = *(unsigned char *)&subnam[i__ - 1]; - if ((ic >= 225 && ic <= 250)) { - *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32); - } -/* L30: */ - } - } - } - - *(unsigned char *)c1 = *(unsigned char *)subnam; - sname = *(unsigned char *)c1 == 'S' || *(unsigned char *)c1 == 'D'; - cname = *(unsigned char *)c1 == 'C' || *(unsigned char *)c1 == 'Z'; - if (! (cname || sname)) { - return ret_val; - } - s_copy(c2, subnam + 1, (ftnlen)2, (ftnlen)2); - s_copy(c3, subnam + 3, (ftnlen)3, (ftnlen)3); - s_copy(c4, c3 + 1, (ftnlen)2, (ftnlen)2); - - switch (*ispec) { - case 1: goto L110; - case 2: goto L200; - case 3: goto L300; - } - -L110: - -/* - ISPEC = 1: block size - - In these examples, separate code is provided for setting NB for - real and complex. We assume that NB will take the same value in - single or double precision. -*/ - - nb = 1; - - if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nb = 64; - } else { - nb = 64; - } - } else if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, - "RQF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen) - 3, (ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) - == 0) { - if (sname) { - nb = 32; - } else { - nb = 32; - } - } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nb = 32; - } else { - nb = 32; - } - } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nb = 32; - } else { - nb = 32; - } - } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nb = 64; - } else { - nb = 64; - } - } - } else if (s_cmp(c2, "PO", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nb = 64; - } else { - nb = 64; - } - } - } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nb = 64; - } else { - nb = 64; - } - } else if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) { - nb = 32; - } else if ((sname && s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0)) { - nb = 64; - } - } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) { - if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { - nb = 64; - } else if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { - nb = 32; - } else if (s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) { - nb = 64; - } - } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) { - if (*(unsigned char *)c3 == 'G') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nb = 32; - } - } else if (*(unsigned char *)c3 == 'M') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nb = 32; - } - } - } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) { - if (*(unsigned char *)c3 == 'G') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nb = 32; - } - } else if (*(unsigned char *)c3 == 'M') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nb = 32; - } - } - } else if (s_cmp(c2, "GB", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - if (*n4 <= 64) { - nb = 1; - } else { - nb = 32; - } - } else { - if (*n4 <= 64) { - nb = 1; - } else { - nb = 32; - } - } - } - } else if (s_cmp(c2, "PB", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - if (*n2 <= 64) { - nb = 1; - } else { - nb = 32; - } - } else { - if (*n2 <= 64) { - nb = 1; - } else { - nb = 32; - } - } - } - } else if (s_cmp(c2, "TR", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nb = 64; - } else { - nb = 64; - } - } - } else if (s_cmp(c2, "LA", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "UUM", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nb = 64; - } else { - nb = 64; - } - } - } else if ((sname && s_cmp(c2, "ST", (ftnlen)2, (ftnlen)2) == 0)) { - if (s_cmp(c3, "EBZ", (ftnlen)3, (ftnlen)3) == 0) { - nb = 1; - } - } - ret_val = nb; - return ret_val; - -L200: - -/* ISPEC = 2: minimum block size */ - - nbmin = 2; - if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", ( - ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, ( - ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0) - { - if (sname) { - nbmin = 2; - } else { - nbmin = 2; - } - } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nbmin = 2; - } else { - nbmin = 2; - } - } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nbmin = 2; - } else { - nbmin = 2; - } - } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nbmin = 2; - } else { - nbmin = 2; - } - } - } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nbmin = 8; - } else { - nbmin = 8; - } - } else if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) { - nbmin = 2; - } - } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) { - if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { - nbmin = 2; - } - } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) { - if (*(unsigned char *)c3 == 'G') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nbmin = 2; - } - } else if (*(unsigned char *)c3 == 'M') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nbmin = 2; - } - } - } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) { - if (*(unsigned char *)c3 == 'G') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nbmin = 2; - } - } else if (*(unsigned char *)c3 == 'M') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nbmin = 2; - } - } - } - ret_val = nbmin; - return ret_val; - -L300: - -/* ISPEC = 3: crossover point */ - - nx = 0; - if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { - if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", ( - ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, ( - ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0) - { - if (sname) { - nx = 128; - } else { - nx = 128; - } - } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nx = 128; - } else { - nx = 128; - } - } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { - if (sname) { - nx = 128; - } else { - nx = 128; - } - } - } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { - if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) { - nx = 32; - } - } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) { - if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { - nx = 32; - } - } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) { - if (*(unsigned char *)c3 == 'G') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nx = 128; - } - } - } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) { - if (*(unsigned char *)c3 == 'G') { - if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", - (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( - ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == - 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( - c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( - ftnlen)2, (ftnlen)2) == 0) { - nx = 128; - } - } - } - ret_val = nx; - return ret_val; - -L400: - -/* ISPEC = 4: number of shifts (used by xHSEQR) */ - - ret_val = 6; - return ret_val; - -L500: - -/* ISPEC = 5: minimum column dimension (not used) */ - - ret_val = 2; - return ret_val; - -L600: - -/* ISPEC = 6: crossover point for SVD (used by xGELSS and xGESVD) */ - - ret_val = (integer) ((real) min(*n1,*n2) * 1.6f); - return ret_val; - -L700: - -/* ISPEC = 7: number of processors (not used) */ - - ret_val = 1; - return ret_val; - -L800: - -/* ISPEC = 8: crossover point for multishift (used by xHSEQR) */ - - ret_val = 50; - return ret_val; - -L900: - -/* - ISPEC = 9: maximum size of the subproblems at the bottom of the - computation tree in the divide-and-conquer algorithm - (used by xGELSD and xGESDD) -*/ - - ret_val = 25; - return ret_val; - -L1000: - -/* - ISPEC = 10: ieee NaN arithmetic can be trusted not to trap - - ILAENV = 0 -*/ - ret_val = 1; - if (ret_val == 1) { - ret_val = ieeeck_(&c__0, &c_b3825, &c_b3826); - } - return ret_val; - -L1100: - -/* - ISPEC = 11: infinity arithmetic can be trusted not to trap - - ILAENV = 0 -*/ - ret_val = 1; - if (ret_val == 1) { - ret_val = ieeeck_(&c__1, &c_b3825, &c_b3826); - } - return ret_val; - -/* End of ILAENV */ - -} /* ilaenv_ */ - diff --git a/numpy-1.6.2/numpy/linalg/f2c.h b/numpy-1.6.2/numpy/linalg/f2c.h deleted file mode 100644 index e27d7ae577..0000000000 --- a/numpy-1.6.2/numpy/linalg/f2c.h +++ /dev/null @@ -1,217 +0,0 @@ -/* f2c.h -- Standard Fortran to C header file */ - -/** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed." - - - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */ - -#ifndef F2C_INCLUDE -#define F2C_INCLUDE - -typedef int integer; -typedef char *address; -typedef short int shortint; -typedef float real; -typedef double doublereal; -typedef struct { real r, i; } complex; -typedef struct { doublereal r, i; } doublecomplex; -typedef int logical; -typedef short int shortlogical; -typedef char logical1; -typedef char integer1; - -#define TRUE_ (1) -#define FALSE_ (0) - -/* Extern is for use with -E */ -#ifndef Extern -#define Extern extern -#endif - -/* I/O stuff */ - -#ifdef f2c_i2 -/* for -i2 */ -typedef short flag; -typedef short ftnlen; -typedef short ftnint; -#else -typedef int flag; -typedef int ftnlen; -typedef int ftnint; -#endif - -/*external read, write*/ -typedef struct -{ flag cierr; - ftnint ciunit; - flag ciend; - char *cifmt; - ftnint cirec; -} cilist; - -/*internal read, write*/ -typedef struct -{ flag icierr; - char *iciunit; - flag iciend; - char *icifmt; - ftnint icirlen; - ftnint icirnum; -} icilist; - -/*open*/ -typedef struct -{ flag oerr; - ftnint ounit; - char *ofnm; - ftnlen ofnmlen; - char *osta; - char *oacc; - char *ofm; - ftnint orl; - char *oblnk; -} olist; - -/*close*/ -typedef struct -{ flag cerr; - ftnint cunit; - char *csta; -} cllist; - -/*rewind, backspace, endfile*/ -typedef struct -{ flag aerr; - ftnint aunit; -} alist; - -/* inquire */ -typedef struct -{ flag inerr; - ftnint inunit; - char *infile; - ftnlen infilen; - ftnint *inex; /*parameters in standard's order*/ - ftnint *inopen; - ftnint *innum; - ftnint *innamed; - char *inname; - ftnlen innamlen; - char *inacc; - ftnlen inacclen; - char *inseq; - ftnlen inseqlen; - char *indir; - ftnlen indirlen; - char *infmt; - ftnlen infmtlen; - char *inform; - ftnint informlen; - char *inunf; - ftnlen inunflen; - ftnint *inrecl; - ftnint *innrec; - char *inblank; - ftnlen inblanklen; -} inlist; - -#define VOID void - -union Multitype { /* for multiple entry points */ - shortint h; - integer i; - real r; - doublereal d; - complex c; - doublecomplex z; - }; - -typedef union Multitype Multitype; - -typedef long Long; /* No longer used; formerly in Namelist */ - -struct Vardesc { /* for Namelist */ - char *name; - char *addr; - ftnlen *dims; - int type; - }; -typedef struct Vardesc Vardesc; - -struct Namelist { - char *name; - Vardesc **vars; - int nvars; - }; -typedef struct Namelist Namelist; - -#ifndef abs -#define abs(x) ((x) >= 0 ? (x) : -(x)) -#endif -#define dabs(x) (doublereal)abs(x) -#ifndef min -#define min(a,b) ((a) <= (b) ? (a) : (b)) -#endif -#ifndef max -#define max(a,b) ((a) >= (b) ? (a) : (b)) -#endif -#define dmin(a,b) (doublereal)min(a,b) -#define dmax(a,b) (doublereal)max(a,b) - -/* procedure parameter types for -A and -C++ */ - -#define F2C_proc_par_types 1 -#ifdef __cplusplus -typedef int /* Unknown procedure type */ (*U_fp)(...); -typedef shortint (*J_fp)(...); -typedef integer (*I_fp)(...); -typedef real (*R_fp)(...); -typedef doublereal (*D_fp)(...), (*E_fp)(...); -typedef /* Complex */ VOID (*C_fp)(...); -typedef /* Double Complex */ VOID (*Z_fp)(...); -typedef logical (*L_fp)(...); -typedef shortlogical (*K_fp)(...); -typedef /* Character */ VOID (*H_fp)(...); -typedef /* Subroutine */ int (*S_fp)(...); -#else -typedef int /* Unknown procedure type */ (*U_fp)(void); -typedef shortint (*J_fp)(void); -typedef integer (*I_fp)(void); -typedef real (*R_fp)(void); -typedef doublereal (*D_fp)(void), (*E_fp)(void); -typedef /* Complex */ VOID (*C_fp)(void); -typedef /* Double Complex */ VOID (*Z_fp)(void); -typedef logical (*L_fp)(void); -typedef shortlogical (*K_fp)(void); -typedef /* Character */ VOID (*H_fp)(void); -typedef /* Subroutine */ int (*S_fp)(void); -#endif -/* E_fp is for real functions when -R is not specified */ -typedef VOID C_f; /* complex function */ -typedef VOID H_f; /* character function */ -typedef VOID Z_f; /* double complex function */ -typedef doublereal E_f; /* real function with -R not specified */ - -/* undef any lower-case symbols that your C compiler predefines, e.g.: */ - -#ifndef Skip_f2c_Undefs -#undef cray -#undef gcos -#undef mc68010 -#undef mc68020 -#undef mips -#undef pdp11 -#undef sgi -#undef sparc -#undef sun -#undef sun2 -#undef sun3 -#undef sun4 -#undef u370 -#undef u3b -#undef u3b2 -#undef u3b5 -#undef unix -#undef vax -#endif -#endif diff --git a/numpy-1.6.2/numpy/linalg/f2c_lite.c b/numpy-1.6.2/numpy/linalg/f2c_lite.c deleted file mode 100644 index 6402271c94..0000000000 --- a/numpy-1.6.2/numpy/linalg/f2c_lite.c +++ /dev/null @@ -1,492 +0,0 @@ -#include -#include -#include -#include -#include "f2c.h" - - -extern void s_wsfe(cilist *f) {;} -extern void e_wsfe(void) {;} -extern void do_fio(integer *c, char *s, ftnlen l) {;} - -/* You'll want this if you redo the *_lite.c files with the -C option - * to f2c for checking array subscripts. (It's not suggested you do that - * for production use, of course.) */ -extern int -s_rnge(char *var, int index, char *routine, int lineno) -{ - fprintf(stderr, "array index out-of-bounds for %s[%d] in routine %s:%d\n", - var, index, routine, lineno); - fflush(stderr); - abort(); -} - - -#ifdef KR_headers -extern double sqrt(); -double f__cabs(real, imag) double real, imag; -#else -#undef abs - -double f__cabs(double real, double imag) -#endif -{ -double temp; - -if(real < 0) - real = -real; -if(imag < 0) - imag = -imag; -if(imag > real){ - temp = real; - real = imag; - imag = temp; -} -if((imag+real) == real) - return((double)real); - -temp = imag/real; -temp = real*sqrt(1.0 + temp*temp); /*overflow!!*/ -return(temp); -} - - - VOID -#ifdef KR_headers -d_cnjg(r, z) doublecomplex *r, *z; -#else -d_cnjg(doublecomplex *r, doublecomplex *z) -#endif -{ -r->r = z->r; -r->i = - z->i; -} - - -#ifdef KR_headers -double d_imag(z) doublecomplex *z; -#else -double d_imag(doublecomplex *z) -#endif -{ -return(z->i); -} - - -#define log10e 0.43429448190325182765 - -#ifdef KR_headers -double log(); -double d_lg10(x) doublereal *x; -#else -#undef abs - -double d_lg10(doublereal *x) -#endif -{ -return( log10e * log(*x) ); -} - - -#ifdef KR_headers -double d_sign(a,b) doublereal *a, *b; -#else -double d_sign(doublereal *a, doublereal *b) -#endif -{ -double x; -x = (*a >= 0 ? *a : - *a); -return( *b >= 0 ? x : -x); -} - - -#ifdef KR_headers -double floor(); -integer i_dnnt(x) doublereal *x; -#else -#undef abs - -integer i_dnnt(doublereal *x) -#endif -{ -return( (*x)>=0 ? - floor(*x + .5) : -floor(.5 - *x) ); -} - - -#ifdef KR_headers -double pow(); -double pow_dd(ap, bp) doublereal *ap, *bp; -#else -#undef abs - -double pow_dd(doublereal *ap, doublereal *bp) -#endif -{ -return(pow(*ap, *bp) ); -} - - -#ifdef KR_headers -double pow_di(ap, bp) doublereal *ap; integer *bp; -#else -double pow_di(doublereal *ap, integer *bp) -#endif -{ -double pow, x; -integer n; -unsigned long u; - -pow = 1; -x = *ap; -n = *bp; - -if(n != 0) - { - if(n < 0) - { - n = -n; - x = 1/x; - } - for(u = n; ; ) - { - if(u & 01) - pow *= x; - if(u >>= 1) - x *= x; - else - break; - } - } -return(pow); -} -/* Unless compiled with -DNO_OVERWRITE, this variant of s_cat allows the - * target of a concatenation to appear on its right-hand side (contrary - * to the Fortran 77 Standard, but in accordance with Fortran 90). - */ -#define NO_OVERWRITE - - -#ifndef NO_OVERWRITE - -#undef abs -#ifdef KR_headers - extern char *F77_aloc(); - extern void free(); - extern void exit_(); -#else - - extern char *F77_aloc(ftnlen, char*); -#endif - -#endif /* NO_OVERWRITE */ - - VOID -#ifdef KR_headers -s_cat(lp, rpp, rnp, np, ll) char *lp, *rpp[]; ftnlen rnp[], *np, ll; -#else -s_cat(char *lp, char *rpp[], ftnlen rnp[], ftnlen *np, ftnlen ll) -#endif -{ - ftnlen i, nc; - char *rp; - ftnlen n = *np; -#ifndef NO_OVERWRITE - ftnlen L, m; - char *lp0, *lp1; - - lp0 = 0; - lp1 = lp; - L = ll; - i = 0; - while(i < n) { - rp = rpp[i]; - m = rnp[i++]; - if (rp >= lp1 || rp + m <= lp) { - if ((L -= m) <= 0) { - n = i; - break; - } - lp1 += m; - continue; - } - lp0 = lp; - lp = lp1 = F77_aloc(L = ll, "s_cat"); - break; - } - lp1 = lp; -#endif /* NO_OVERWRITE */ - for(i = 0 ; i < n ; ++i) { - nc = ll; - if(rnp[i] < nc) - nc = rnp[i]; - ll -= nc; - rp = rpp[i]; - while(--nc >= 0) - *lp++ = *rp++; - } - while(--ll >= 0) - *lp++ = ' '; -#ifndef NO_OVERWRITE - if (lp0) { - memmove(lp0, lp1, L); - free(lp1); - } -#endif - } - - -/* compare two strings */ - -#ifdef KR_headers -integer s_cmp(a0, b0, la, lb) char *a0, *b0; ftnlen la, lb; -#else -integer s_cmp(char *a0, char *b0, ftnlen la, ftnlen lb) -#endif -{ -register unsigned char *a, *aend, *b, *bend; -a = (unsigned char *)a0; -b = (unsigned char *)b0; -aend = a + la; -bend = b + lb; - -if(la <= lb) - { - while(a < aend) - if(*a != *b) - return( *a - *b ); - else - { ++a; ++b; } - - while(b < bend) - if(*b != ' ') - return( ' ' - *b ); - else ++b; - } - -else - { - while(b < bend) - if(*a == *b) - { ++a; ++b; } - else - return( *a - *b ); - while(a < aend) - if(*a != ' ') - return(*a - ' '); - else ++a; - } -return(0); -} -/* Unless compiled with -DNO_OVERWRITE, this variant of s_copy allows the - * target of an assignment to appear on its right-hand side (contrary - * to the Fortran 77 Standard, but in accordance with Fortran 90), - * as in a(2:5) = a(4:7) . - */ - - - -/* assign strings: a = b */ - -#ifdef KR_headers -VOID s_copy(a, b, la, lb) register char *a, *b; ftnlen la, lb; -#else -void s_copy(register char *a, register char *b, ftnlen la, ftnlen lb) -#endif -{ - register char *aend, *bend; - - aend = a + la; - - if(la <= lb) -#ifndef NO_OVERWRITE - if (a <= b || a >= b + la) -#endif - while(a < aend) - *a++ = *b++; -#ifndef NO_OVERWRITE - else - for(b += la; a < aend; ) - *--aend = *--b; -#endif - - else { - bend = b + lb; -#ifndef NO_OVERWRITE - if (a <= b || a >= bend) -#endif - while(b < bend) - *a++ = *b++; -#ifndef NO_OVERWRITE - else { - a += lb; - while(b < bend) - *--a = *--bend; - a += lb; - } -#endif - while(a < aend) - *a++ = ' '; - } - } - - -#ifdef KR_headers -double f__cabs(); -double z_abs(z) doublecomplex *z; -#else -double f__cabs(double, double); -double z_abs(doublecomplex *z) -#endif -{ -return( f__cabs( z->r, z->i ) ); -} - - -#ifdef KR_headers -extern void sig_die(); -VOID z_div(c, a, b) doublecomplex *a, *b, *c; -#else -extern void sig_die(char*, int); -void z_div(doublecomplex *c, doublecomplex *a, doublecomplex *b) -#endif -{ -double ratio, den; -double abr, abi; - -if( (abr = b->r) < 0.) - abr = - abr; -if( (abi = b->i) < 0.) - abi = - abi; -if( abr <= abi ) - { - /*Let IEEE Infinties handle this ;( */ - /*if(abi == 0) - sig_die("complex division by zero", 1);*/ - ratio = b->r / b->i ; - den = b->i * (1 + ratio*ratio); - c->r = (a->r*ratio + a->i) / den; - c->i = (a->i*ratio - a->r) / den; - } - -else - { - ratio = b->i / b->r ; - den = b->r * (1 + ratio*ratio); - c->r = (a->r + a->i*ratio) / den; - c->i = (a->i - a->r*ratio) / den; - } - -} - - -#ifdef KR_headers -double sqrt(), f__cabs(); -VOID z_sqrt(r, z) doublecomplex *r, *z; -#else -#undef abs - -extern double f__cabs(double, double); -void z_sqrt(doublecomplex *r, doublecomplex *z) -#endif -{ -double mag; - -if( (mag = f__cabs(z->r, z->i)) == 0.) - r->r = r->i = 0.; -else if(z->r > 0) - { - r->r = sqrt(0.5 * (mag + z->r) ); - r->i = z->i / r->r / 2; - } -else - { - r->i = sqrt(0.5 * (mag - z->r) ); - if(z->i < 0) - r->i = - r->i; - r->r = z->i / r->i / 2; - } -} -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef KR_headers -integer pow_ii(ap, bp) integer *ap, *bp; -#else -integer pow_ii(integer *ap, integer *bp) -#endif -{ - integer pow, x, n; - unsigned long u; - - x = *ap; - n = *bp; - - if (n <= 0) { - if (n == 0 || x == 1) - return 1; - if (x != -1) - return x == 0 ? 1/x : 0; - n = -n; - } - u = n; - for(pow = 1; ; ) - { - if(u & 01) - pow *= x; - if(u >>= 1) - x *= x; - else - break; - } - return(pow); - } -#ifdef __cplusplus -} -#endif - -#ifdef KR_headers -extern void f_exit(); -VOID s_stop(s, n) char *s; ftnlen n; -#else -#undef abs -#undef min -#undef max -#ifdef __cplusplus -extern "C" { -#endif -#ifdef __cplusplus -extern "C" { -#endif -void f_exit(void); - -int s_stop(char *s, ftnlen n) -#endif -{ -int i; - -if(n > 0) - { - fprintf(stderr, "STOP "); - for(i = 0; iflags & CONTIGUOUS)) { - PyErr_Format(LapackError, - "Parameter %s is not contiguous in lapack_lite.%s", - obname, funname); - return 0; - } else if (!(((PyArrayObject *)ob)->descr->type_num == t)) { - PyErr_Format(LapackError, - "Parameter %s is not of type %s in lapack_lite.%s", - obname, tname, funname); - return 0; - } else if (((PyArrayObject *)ob)->descr->byteorder != '=' && - ((PyArrayObject *)ob)->descr->byteorder != '|') { - PyErr_Format(LapackError, - "Parameter %s has non-native byte order in lapack_lite.%s", - obname, funname); - return 0; - } else { - return 1; - } -} - -#define CHDATA(p) ((char *) (((PyArrayObject *)p)->data)) -#define SHDATA(p) ((short int *) (((PyArrayObject *)p)->data)) -#define DDATA(p) ((double *) (((PyArrayObject *)p)->data)) -#define FDATA(p) ((float *) (((PyArrayObject *)p)->data)) -#define CDATA(p) ((f2c_complex *) (((PyArrayObject *)p)->data)) -#define ZDATA(p) ((f2c_doublecomplex *) (((PyArrayObject *)p)->data)) -#define IDATA(p) ((int *) (((PyArrayObject *)p)->data)) - -static PyObject * -lapack_lite_dgeev(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - char jobvl; - char jobvr; - int n; - PyObject *a; - int lda; - PyObject *wr; - PyObject *wi; - PyObject *vl; - int ldvl; - PyObject *vr; - int ldvr; - PyObject *work; - int lwork; - int info; - TRY(PyArg_ParseTuple(args,"cciOiOOOiOiOii", - &jobvl,&jobvr,&n,&a,&lda,&wr,&wi,&vl,&ldvl, - &vr,&ldvr,&work,&lwork,&info)); - - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgeev")); - TRY(check_object(wr,PyArray_DOUBLE,"wr","PyArray_DOUBLE","dgeev")); - TRY(check_object(wi,PyArray_DOUBLE,"wi","PyArray_DOUBLE","dgeev")); - TRY(check_object(vl,PyArray_DOUBLE,"vl","PyArray_DOUBLE","dgeev")); - TRY(check_object(vr,PyArray_DOUBLE,"vr","PyArray_DOUBLE","dgeev")); - TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dgeev")); - - lapack_lite_status__ = \ - FNAME(dgeev)(&jobvl,&jobvr,&n,DDATA(a),&lda,DDATA(wr),DDATA(wi), - DDATA(vl),&ldvl,DDATA(vr),&ldvr,DDATA(work),&lwork, - &info); - - return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","dgeev_", - lapack_lite_status__,"jobvl",jobvl,"jobvr",jobvr, - "n",n,"lda",lda,"ldvl",ldvl,"ldvr",ldvr, - "lwork",lwork,"info",info); -} - -static PyObject * -lapack_lite_dsyevd(PyObject *NPY_UNUSED(self), PyObject *args) -{ - /* Arguments */ - /* ========= */ - - char jobz; - /* JOBZ (input) CHARACTER*1 */ - /* = 'N': Compute eigenvalues only; */ - /* = 'V': Compute eigenvalues and eigenvectors. */ - - char uplo; - /* UPLO (input) CHARACTER*1 */ - /* = 'U': Upper triangle of A is stored; */ - /* = 'L': Lower triangle of A is stored. */ - - int n; - /* N (input) INTEGER */ - /* The order of the matrix A. N >= 0. */ - - PyObject *a; - /* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */ - /* On entry, the symmetric matrix A. If UPLO = 'U', the */ - /* leading N-by-N upper triangular part of A contains the */ - /* upper triangular part of the matrix A. If UPLO = 'L', */ - /* the leading N-by-N lower triangular part of A contains */ - /* the lower triangular part of the matrix A. */ - /* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */ - /* orthonormal eigenvectors of the matrix A. */ - /* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */ - /* or the upper triangle (if UPLO='U') of A, including the */ - /* diagonal, is destroyed. */ - - int lda; - /* LDA (input) INTEGER */ - /* The leading dimension of the array A. LDA >= max(1,N). */ - - PyObject *w; - /* W (output) DOUBLE PRECISION array, dimension (N) */ - /* If INFO = 0, the eigenvalues in ascending order. */ - - PyObject *work; - /* WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) */ - /* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */ - - int lwork; - /* LWORK (input) INTEGER */ - /* The length of the array WORK. LWORK >= max(1,3*N-1). */ - /* For optimal efficiency, LWORK >= (NB+2)*N, */ - /* where NB is the blocksize for DSYTRD returned by ILAENV. */ - - PyObject *iwork; - int liwork; - - int info; - /* INFO (output) INTEGER */ - /* = 0: successful exit */ - /* < 0: if INFO = -i, the i-th argument had an illegal value */ - /* > 0: if INFO = i, the algorithm failed to converge; i */ - /* off-diagonal elements of an intermediate tridiagonal */ - /* form did not converge to zero. */ - - int lapack_lite_status__; - - TRY(PyArg_ParseTuple(args,"cciOiOOiOii", - &jobz,&uplo,&n,&a,&lda,&w,&work,&lwork, - &iwork,&liwork,&info)); - - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dsyevd")); - TRY(check_object(w,PyArray_DOUBLE,"w","PyArray_DOUBLE","dsyevd")); - TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dsyevd")); - TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","dsyevd")); - - lapack_lite_status__ = \ - FNAME(dsyevd)(&jobz,&uplo,&n,DDATA(a),&lda,DDATA(w),DDATA(work), - &lwork,IDATA(iwork),&liwork,&info); - - return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i}","dsyevd_", - lapack_lite_status__,"jobz",jobz,"uplo",uplo, - "n",n,"lda",lda,"lwork",lwork,"liwork",liwork,"info",info); -} - -static PyObject * -lapack_lite_zheevd(PyObject *NPY_UNUSED(self), PyObject *args) -{ - /* Arguments */ - /* ========= */ - - char jobz; - /* JOBZ (input) CHARACTER*1 */ - /* = 'N': Compute eigenvalues only; */ - /* = 'V': Compute eigenvalues and eigenvectors. */ - - char uplo; - /* UPLO (input) CHARACTER*1 */ - /* = 'U': Upper triangle of A is stored; */ - /* = 'L': Lower triangle of A is stored. */ - - int n; - /* N (input) INTEGER */ - /* The order of the matrix A. N >= 0. */ - - PyObject *a; - /* A (input/output) COMPLEX*16 array, dimension (LDA, N) */ - /* On entry, the Hermitian matrix A. If UPLO = 'U', the */ - /* leading N-by-N upper triangular part of A contains the */ - /* upper triangular part of the matrix A. If UPLO = 'L', */ - /* the leading N-by-N lower triangular part of A contains */ - /* the lower triangular part of the matrix A. */ - /* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */ - /* orthonormal eigenvectors of the matrix A. */ - /* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */ - /* or the upper triangle (if UPLO='U') of A, including the */ - /* diagonal, is destroyed. */ - - int lda; - /* LDA (input) INTEGER */ - /* The leading dimension of the array A. LDA >= max(1,N). */ - - PyObject *w; - /* W (output) DOUBLE PRECISION array, dimension (N) */ - /* If INFO = 0, the eigenvalues in ascending order. */ - - PyObject *work; - /* WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) */ - /* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */ - - int lwork; - /* LWORK (input) INTEGER */ - /* The length of the array WORK. LWORK >= max(1,3*N-1). */ - /* For optimal efficiency, LWORK >= (NB+2)*N, */ - /* where NB is the blocksize for DSYTRD returned by ILAENV. */ - - PyObject *rwork; - /* RWORK (workspace) DOUBLE PRECISION array, dimension (max(1, 3*N-2)) */ - int lrwork; - - PyObject *iwork; - int liwork; - - int info; - /* INFO (output) INTEGER */ - /* = 0: successful exit */ - /* < 0: if INFO = -i, the i-th argument had an illegal value */ - /* > 0: if INFO = i, the algorithm failed to converge; i */ - /* off-diagonal elements of an intermediate tridiagonal */ - /* form did not converge to zero. */ - - int lapack_lite_status__; - - TRY(PyArg_ParseTuple(args,"cciOiOOiOiOii", - &jobz,&uplo,&n,&a,&lda,&w,&work,&lwork,&rwork, - &lrwork,&iwork,&liwork,&info)); - - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zheevd")); - TRY(check_object(w,PyArray_DOUBLE,"w","PyArray_DOUBLE","zheevd")); - TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zheevd")); - TRY(check_object(w,PyArray_DOUBLE,"rwork","PyArray_DOUBLE","zheevd")); - TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","zheevd")); - - lapack_lite_status__ = \ - FNAME(zheevd)(&jobz,&uplo,&n,ZDATA(a),&lda,DDATA(w),ZDATA(work), - &lwork,DDATA(rwork),&lrwork,IDATA(iwork),&liwork,&info); - - return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","zheevd_", - lapack_lite_status__,"jobz",jobz,"uplo",uplo,"n",n, - "lda",lda,"lwork",lwork,"lrwork",lrwork, - "liwork",liwork,"info",info); -} - -static PyObject * -lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int m; - int n; - int nrhs; - PyObject *a; - int lda; - PyObject *b; - int ldb; - PyObject *s; - double rcond; - int rank; - PyObject *work; - PyObject *iwork; - int lwork; - int info; - TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOi", - &m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond, - &rank,&work,&lwork,&iwork,&info)); - - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgelsd")); - TRY(check_object(b,PyArray_DOUBLE,"b","PyArray_DOUBLE","dgelsd")); - TRY(check_object(s,PyArray_DOUBLE,"s","PyArray_DOUBLE","dgelsd")); - TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dgelsd")); - TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","dgelsd")); - - lapack_lite_status__ = \ - FNAME(dgelsd)(&m,&n,&nrhs,DDATA(a),&lda,DDATA(b),&ldb, - DDATA(s),&rcond,&rank,DDATA(work),&lwork, - IDATA(iwork),&info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:d,s:i,s:i,s:i}","dgelsd_", - lapack_lite_status__,"m",m,"n",n,"nrhs",nrhs, - "lda",lda,"ldb",ldb,"rcond",rcond,"rank",rank, - "lwork",lwork,"info",info); -} - -static PyObject * -lapack_lite_dgesv(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int n; - int nrhs; - PyObject *a; - int lda; - PyObject *ipiv; - PyObject *b; - int ldb; - int info; - TRY(PyArg_ParseTuple(args,"iiOiOOii",&n,&nrhs,&a,&lda,&ipiv,&b,&ldb,&info)); - - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgesv")); - TRY(check_object(ipiv,PyArray_INT,"ipiv","PyArray_INT","dgesv")); - TRY(check_object(b,PyArray_DOUBLE,"b","PyArray_DOUBLE","dgesv")); - - lapack_lite_status__ = \ - FNAME(dgesv)(&n,&nrhs,DDATA(a),&lda,IDATA(ipiv),DDATA(b),&ldb,&info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgesv_", - lapack_lite_status__,"n",n,"nrhs",nrhs,"lda",lda, - "ldb",ldb,"info",info); -} - -static PyObject * -lapack_lite_dgesdd(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - char jobz; - int m; - int n; - PyObject *a; - int lda; - PyObject *s; - PyObject *u; - int ldu; - PyObject *vt; - int ldvt; - PyObject *work; - int lwork; - PyObject *iwork; - int info; - TRY(PyArg_ParseTuple(args,"ciiOiOOiOiOiOi", - &jobz,&m,&n,&a,&lda,&s,&u,&ldu,&vt,&ldvt, - &work,&lwork,&iwork,&info)); - - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgesdd")); - TRY(check_object(s,PyArray_DOUBLE,"s","PyArray_DOUBLE","dgesdd")); - TRY(check_object(u,PyArray_DOUBLE,"u","PyArray_DOUBLE","dgesdd")); - TRY(check_object(vt,PyArray_DOUBLE,"vt","PyArray_DOUBLE","dgesdd")); - TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dgesdd")); - TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","dgesdd")); - - lapack_lite_status__ = \ - FNAME(dgesdd)(&jobz,&m,&n,DDATA(a),&lda,DDATA(s),DDATA(u),&ldu, - DDATA(vt),&ldvt,DDATA(work),&lwork,IDATA(iwork), - &info); - - if (info == 0 && lwork == -1) { - /* We need to check the result because - sometimes the "optimal" value is actually - too small. - Change it to the maximum of the minimum and the optimal. - */ - long work0 = (long) *DDATA(work); - int mn = MIN(m,n); - int mx = MAX(m,n); - - switch(jobz){ - case 'N': - work0 = MAX(work0,3*mn + MAX(mx,6*mn)+500); - break; - case 'O': - work0 = MAX(work0,3*mn*mn + \ - MAX(mx,5*mn*mn+4*mn+500)); - break; - case 'S': - case 'A': - work0 = MAX(work0,3*mn*mn + \ - MAX(mx,4*mn*(mn+1))+500); - break; - } - *DDATA(work) = (double) work0; - } - return Py_BuildValue("{s:i,s:c,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","dgesdd_", - lapack_lite_status__,"jobz",jobz,"m",m,"n",n, - "lda",lda,"ldu",ldu,"ldvt",ldvt,"lwork",lwork, - "info",info); -} - -static PyObject * -lapack_lite_dgetrf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int m; - int n; - PyObject *a; - int lda; - PyObject *ipiv; - int info; - TRY(PyArg_ParseTuple(args,"iiOiOi",&m,&n,&a,&lda,&ipiv,&info)); - - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgetrf")); - TRY(check_object(ipiv,PyArray_INT,"ipiv","PyArray_INT","dgetrf")); - - lapack_lite_status__ = \ - FNAME(dgetrf)(&m,&n,DDATA(a),&lda,IDATA(ipiv),&info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i}","dgetrf_",lapack_lite_status__, - "m",m,"n",n,"lda",lda,"info",info); -} - -static PyObject * -lapack_lite_dpotrf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int n; - PyObject *a; - int lda; - char uplo; - int info; - - TRY(PyArg_ParseTuple(args,"ciOii",&uplo,&n,&a,&lda,&info)); - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dpotrf")); - - lapack_lite_status__ = \ - FNAME(dpotrf)(&uplo,&n,DDATA(a),&lda,&info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i}","dpotrf_",lapack_lite_status__, - "n",n,"lda",lda,"info",info); -} - -static PyObject * -lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int m, n, lwork; - PyObject *a, *tau, *work; - int lda; - int info; - - TRY(PyArg_ParseTuple(args,"iiOiOOii",&m,&n,&a,&lda,&tau,&work,&lwork,&info)); - - /* check objects and convert to right storage order */ - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dgeqrf")); - TRY(check_object(tau,PyArray_DOUBLE,"tau","PyArray_DOUBLE","dgeqrf")); - TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dgeqrf")); - - lapack_lite_status__ = \ - FNAME(dgeqrf)(&m, &n, DDATA(a), &lda, DDATA(tau), - DDATA(work), &lwork, &info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgeqrf_", - lapack_lite_status__,"m",m,"n",n,"lda",lda, - "lwork",lwork,"info",info); -} - - -static PyObject * -lapack_lite_dorgqr(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int m, n, k, lwork; - PyObject *a, *tau, *work; - int lda; - int info; - - TRY(PyArg_ParseTuple(args,"iiiOiOOii", &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info)); - TRY(check_object(a,PyArray_DOUBLE,"a","PyArray_DOUBLE","dorgqr")); - TRY(check_object(tau,PyArray_DOUBLE,"tau","PyArray_DOUBLE","dorgqr")); - TRY(check_object(work,PyArray_DOUBLE,"work","PyArray_DOUBLE","dorgqr")); - lapack_lite_status__ = \ - FNAME(dorgqr)(&m, &n, &k, DDATA(a), &lda, DDATA(tau), DDATA(work), &lwork, &info); - - return Py_BuildValue("{s:i,s:i}","dorgqr_",lapack_lite_status__, - "info",info); -} - - -static PyObject * -lapack_lite_zgeev(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - char jobvl; - char jobvr; - int n; - PyObject *a; - int lda; - PyObject *w; - PyObject *vl; - int ldvl; - PyObject *vr; - int ldvr; - PyObject *work; - int lwork; - PyObject *rwork; - int info; - TRY(PyArg_ParseTuple(args,"cciOiOOiOiOiOi", - &jobvl,&jobvr,&n,&a,&lda,&w,&vl,&ldvl, - &vr,&ldvr,&work,&lwork,&rwork,&info)); - - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgeev")); - TRY(check_object(w,PyArray_CDOUBLE,"w","PyArray_CDOUBLE","zgeev")); - TRY(check_object(vl,PyArray_CDOUBLE,"vl","PyArray_CDOUBLE","zgeev")); - TRY(check_object(vr,PyArray_CDOUBLE,"vr","PyArray_CDOUBLE","zgeev")); - TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zgeev")); - TRY(check_object(rwork,PyArray_DOUBLE,"rwork","PyArray_DOUBLE","zgeev")); - - lapack_lite_status__ = \ - FNAME(zgeev)(&jobvl,&jobvr,&n,ZDATA(a),&lda,ZDATA(w),ZDATA(vl), - &ldvl,ZDATA(vr),&ldvr,ZDATA(work),&lwork, - DDATA(rwork),&info); - - return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","zgeev_", - lapack_lite_status__,"jobvl",jobvl,"jobvr",jobvr, - "n",n,"lda",lda,"ldvl",ldvl,"ldvr",ldvr, - "lwork",lwork,"info",info); -} - -static PyObject * -lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int m; - int n; - int nrhs; - PyObject *a; - int lda; - PyObject *b; - int ldb; - PyObject *s; - double rcond; - int rank; - PyObject *work; - int lwork; - PyObject *rwork; - PyObject *iwork; - int info; - TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOOi", - &m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond, - &rank,&work,&lwork,&rwork,&iwork,&info)); - - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgelsd")); - TRY(check_object(b,PyArray_CDOUBLE,"b","PyArray_CDOUBLE","zgelsd")); - TRY(check_object(s,PyArray_DOUBLE,"s","PyArray_DOUBLE","zgelsd")); - TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zgelsd")); - TRY(check_object(rwork,PyArray_DOUBLE,"rwork","PyArray_DOUBLE","zgelsd")); - TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","zgelsd")); - - lapack_lite_status__ = \ - FNAME(zgelsd)(&m,&n,&nrhs,ZDATA(a),&lda,ZDATA(b),&ldb,DDATA(s),&rcond, - &rank,ZDATA(work),&lwork,DDATA(rwork),IDATA(iwork),&info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgelsd_", - lapack_lite_status__,"m",m,"n",n,"nrhs",nrhs,"lda",lda, - "ldb",ldb,"rank",rank,"lwork",lwork,"info",info); -} - -static PyObject * -lapack_lite_zgesv(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int n; - int nrhs; - PyObject *a; - int lda; - PyObject *ipiv; - PyObject *b; - int ldb; - int info; - TRY(PyArg_ParseTuple(args,"iiOiOOii",&n,&nrhs,&a,&lda,&ipiv,&b,&ldb,&info)); - - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgesv")); - TRY(check_object(ipiv,PyArray_INT,"ipiv","PyArray_INT","zgesv")); - TRY(check_object(b,PyArray_CDOUBLE,"b","PyArray_CDOUBLE","zgesv")); - - lapack_lite_status__ = \ - FNAME(zgesv)(&n,&nrhs,ZDATA(a),&lda,IDATA(ipiv),ZDATA(b),&ldb,&info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgesv_", - lapack_lite_status__,"n",n,"nrhs",nrhs,"lda",lda, - "ldb",ldb,"info",info); -} - -static PyObject * -lapack_lite_zgesdd(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - char jobz; - int m; - int n; - PyObject *a; - int lda; - PyObject *s; - PyObject *u; - int ldu; - PyObject *vt; - int ldvt; - PyObject *work; - int lwork; - PyObject *rwork; - PyObject *iwork; - int info; - TRY(PyArg_ParseTuple(args,"ciiOiOOiOiOiOOi", - &jobz,&m,&n,&a,&lda,&s,&u,&ldu, - &vt,&ldvt,&work,&lwork,&rwork,&iwork,&info)); - - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgesdd")); - TRY(check_object(s,PyArray_DOUBLE,"s","PyArray_DOUBLE","zgesdd")); - TRY(check_object(u,PyArray_CDOUBLE,"u","PyArray_CDOUBLE","zgesdd")); - TRY(check_object(vt,PyArray_CDOUBLE,"vt","PyArray_CDOUBLE","zgesdd")); - TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zgesdd")); - TRY(check_object(rwork,PyArray_DOUBLE,"rwork","PyArray_DOUBLE","zgesdd")); - TRY(check_object(iwork,PyArray_INT,"iwork","PyArray_INT","zgesdd")); - - lapack_lite_status__ = \ - FNAME(zgesdd)(&jobz,&m,&n,ZDATA(a),&lda,DDATA(s),ZDATA(u),&ldu, - ZDATA(vt),&ldvt,ZDATA(work),&lwork,DDATA(rwork), - IDATA(iwork),&info); - - return Py_BuildValue("{s:i,s:c,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgesdd_", - lapack_lite_status__,"jobz",jobz,"m",m,"n",n, - "lda",lda,"ldu",ldu,"ldvt",ldvt,"lwork",lwork, - "info",info); -} - -static PyObject * -lapack_lite_zgetrf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int m; - int n; - PyObject *a; - int lda; - PyObject *ipiv; - int info; - TRY(PyArg_ParseTuple(args,"iiOiOi",&m,&n,&a,&lda,&ipiv,&info)); - - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgetrf")); - TRY(check_object(ipiv,PyArray_INT,"ipiv","PyArray_INT","zgetrf")); - - lapack_lite_status__ = \ - FNAME(zgetrf)(&m,&n,ZDATA(a),&lda,IDATA(ipiv),&info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i}","zgetrf_", - lapack_lite_status__,"m",m,"n",n,"lda",lda,"info",info); -} - -static PyObject * -lapack_lite_zpotrf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int n; - PyObject *a; - int lda; - char uplo; - int info; - - TRY(PyArg_ParseTuple(args,"ciOii",&uplo,&n,&a,&lda,&info)); - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zpotrf")); - lapack_lite_status__ = \ - FNAME(zpotrf)(&uplo,&n,ZDATA(a),&lda,&info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i}","zpotrf_", - lapack_lite_status__,"n",n,"lda",lda,"info",info); -} - -static PyObject * -lapack_lite_zgeqrf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int m, n, lwork; - PyObject *a, *tau, *work; - int lda; - int info; - - TRY(PyArg_ParseTuple(args,"iiOiOOii",&m,&n,&a,&lda,&tau,&work,&lwork,&info)); - -/* check objects and convert to right storage order */ - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zgeqrf")); - TRY(check_object(tau,PyArray_CDOUBLE,"tau","PyArray_CDOUBLE","zgeqrf")); - TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zgeqrf")); - - lapack_lite_status__ = \ - FNAME(zgeqrf)(&m, &n, ZDATA(a), &lda, ZDATA(tau), ZDATA(work), &lwork, &info); - - return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgeqrf_",lapack_lite_status__,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info); -} - - -static PyObject * -lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args) -{ - int lapack_lite_status__; - int m, n, k, lwork; - PyObject *a, *tau, *work; - int lda; - int info; - - TRY(PyArg_ParseTuple(args,"iiiOiOOii", &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info)); - TRY(check_object(a,PyArray_CDOUBLE,"a","PyArray_CDOUBLE","zungqr")); - TRY(check_object(tau,PyArray_CDOUBLE,"tau","PyArray_CDOUBLE","zungqr")); - TRY(check_object(work,PyArray_CDOUBLE,"work","PyArray_CDOUBLE","zungqr")); - - - lapack_lite_status__ = \ - FNAME(zungqr)(&m, &n, &k, ZDATA(a), &lda, ZDATA(tau), ZDATA(work), - &lwork, &info); - - return Py_BuildValue("{s:i,s:i}","zungqr_",lapack_lite_status__, - "info",info); -} - - - -#define STR(x) #x -#define lameth(name) {STR(name), lapack_lite_##name, METH_VARARGS, NULL} -static struct PyMethodDef lapack_lite_module_methods[] = { - lameth(zheevd), - lameth(dsyevd), - lameth(dgeev), - lameth(dgelsd), - lameth(dgesv), - lameth(dgesdd), - lameth(dgetrf), - lameth(dpotrf), - lameth(dgeqrf), - lameth(dorgqr), - lameth(zgeev), - lameth(zgelsd), - lameth(zgesv), - lameth(zgesdd), - lameth(zgetrf), - lameth(zpotrf), - lameth(zgeqrf), - lameth(zungqr), - { NULL,NULL,0, NULL} -}; - -static char lapack_lite_module_documentation[] = ""; - - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "lapack_lite", - NULL, - -1, - lapack_lite_module_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -/* Initialization function for the module */ -#if PY_MAJOR_VERSION >= 3 -#define RETVAL m -PyObject *PyInit_lapack_lite(void) -#else -#define RETVAL -PyMODINIT_FUNC -initlapack_lite(void) -#endif -{ - PyObject *m,*d; -#if PY_MAJOR_VERSION >= 3 - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule4("lapack_lite", lapack_lite_module_methods, - lapack_lite_module_documentation, - (PyObject*)NULL,PYTHON_API_VERSION); -#endif - if (m == NULL) { - return RETVAL; - } - import_array(); - d = PyModule_GetDict(m); - LapackError = PyErr_NewException("lapack_lite.LapackError", NULL, NULL); - PyDict_SetItemString(d, "LapackError", LapackError); - - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/linalg/linalg.py b/numpy-1.6.2/numpy/linalg/linalg.py deleted file mode 100644 index 3434bd6564..0000000000 --- a/numpy-1.6.2/numpy/linalg/linalg.py +++ /dev/null @@ -1,1983 +0,0 @@ -"""Lite version of scipy.linalg. - -Notes ------ -This module is a lite version of the linalg.py module in SciPy which -contains high-level Python interface to the LAPACK library. The lite -version only accesses the following LAPACK functions: dgesv, zgesv, -dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, -zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. -""" - -__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', - 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', - 'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank', - 'LinAlgError'] - -from numpy.core import array, asarray, zeros, empty, transpose, \ - intc, single, double, csingle, cdouble, inexact, complexfloating, \ - newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \ - maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \ - isfinite, size, finfo, absolute, log, exp -from numpy.lib import triu -from numpy.linalg import lapack_lite -from numpy.matrixlib.defmatrix import matrix_power -from numpy.compat import asbytes - -# For Python2/3 compatibility -_N = asbytes('N') -_V = asbytes('V') -_A = asbytes('A') -_S = asbytes('S') -_L = asbytes('L') - -fortran_int = intc - -# Error object -class LinAlgError(Exception): - """ - Generic Python-exception-derived object raised by linalg functions. - - General purpose exception class, derived from Python's exception.Exception - class, programmatically raised in linalg functions when a Linear - Algebra-related condition would prevent further correct execution of the - function. - - Parameters - ---------- - None - - Examples - -------- - >>> from numpy import linalg as LA - >>> LA.inv(np.zeros((2,2))) - Traceback (most recent call last): - File "", line 1, in - File "...linalg.py", line 350, - in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) - File "...linalg.py", line 249, - in solve - raise LinAlgError, 'Singular matrix' - numpy.linalg.linalg.LinAlgError: Singular matrix - - """ - pass - -def _makearray(a): - new = asarray(a) - wrap = getattr(a, "__array_prepare__", new.__array_wrap__) - return new, wrap - -def isComplexType(t): - return issubclass(t, complexfloating) - -_real_types_map = {single : single, - double : double, - csingle : single, - cdouble : double} - -_complex_types_map = {single : csingle, - double : cdouble, - csingle : csingle, - cdouble : cdouble} - -def _realType(t, default=double): - return _real_types_map.get(t, default) - -def _complexType(t, default=cdouble): - return _complex_types_map.get(t, default) - -def _linalgRealType(t): - """Cast the type t to either double or cdouble.""" - return double - -_complex_types_map = {single : csingle, - double : cdouble, - csingle : csingle, - cdouble : cdouble} - -def _commonType(*arrays): - # in lite version, use higher precision (always double or cdouble) - result_type = single - is_complex = False - for a in arrays: - if issubclass(a.dtype.type, inexact): - if isComplexType(a.dtype.type): - is_complex = True - rt = _realType(a.dtype.type, default=None) - if rt is None: - # unsupported inexact scalar - raise TypeError("array type %s is unsupported in linalg" % - (a.dtype.name,)) - else: - rt = double - if rt is double: - result_type = double - if is_complex: - t = cdouble - result_type = _complex_types_map[result_type] - else: - t = double - return t, result_type - -# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). - -_fastCT = fastCopyAndTranspose - -def _to_native_byte_order(*arrays): - ret = [] - for arr in arrays: - if arr.dtype.byteorder not in ('=', '|'): - ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) - else: - ret.append(arr) - if len(ret) == 1: - return ret[0] - else: - return ret - -def _fastCopyAndTranspose(type, *arrays): - cast_arrays = () - for a in arrays: - if a.dtype.type is type: - cast_arrays = cast_arrays + (_fastCT(a),) - else: - cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) - if len(cast_arrays) == 1: - return cast_arrays[0] - else: - return cast_arrays - -def _assertRank2(*arrays): - for a in arrays: - if len(a.shape) != 2: - raise LinAlgError, '%d-dimensional array given. Array must be \ - two-dimensional' % len(a.shape) - -def _assertSquareness(*arrays): - for a in arrays: - if max(a.shape) != min(a.shape): - raise LinAlgError, 'Array must be square' - -def _assertFinite(*arrays): - for a in arrays: - if not (isfinite(a).all()): - raise LinAlgError, "Array must not contain infs or NaNs" - -def _assertNonEmpty(*arrays): - for a in arrays: - if size(a) == 0: - raise LinAlgError("Arrays cannot be empty") - - -# Linear equations - -def tensorsolve(a, b, axes=None): - """ - Solve the tensor equation ``a x = b`` for x. - - It is assumed that all indices of `x` are summed over in the product, - together with the rightmost indices of `a`, as is done in, for example, - ``tensordot(a, x, axes=len(b.shape))``. - - Parameters - ---------- - a : array_like - Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals - the shape of that sub-tensor of `a` consisting of the appropriate - number of its rightmost indices, and must be such that - ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be - 'square'). - b : array_like - Right-hand tensor, which can be of any shape. - axes : tuple of ints, optional - Axes in `a` to reorder to the right, before inversion. - If None (default), no reordering is done. - - Returns - ------- - x : ndarray, shape Q - - Raises - ------ - LinAlgError - If `a` is singular or not 'square' (in the above sense). - - See Also - -------- - tensordot, tensorinv, einsum - - Examples - -------- - >>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) - >>> b = np.random.randn(2*3, 4) - >>> x = np.linalg.tensorsolve(a, b) - >>> x.shape - (2, 3, 4) - >>> np.allclose(np.tensordot(a, x, axes=3), b) - True - - """ - a,wrap = _makearray(a) - b = asarray(b) - an = a.ndim - - if axes is not None: - allaxes = range(0, an) - for k in axes: - allaxes.remove(k) - allaxes.insert(an, k) - a = a.transpose(allaxes) - - oldshape = a.shape[-(an-b.ndim):] - prod = 1 - for k in oldshape: - prod *= k - - a = a.reshape(-1, prod) - b = b.ravel() - res = wrap(solve(a, b)) - res.shape = oldshape - return res - -def solve(a, b): - """ - Solve a linear matrix equation, or system of linear scalar equations. - - Computes the "exact" solution, `x`, of the well-determined, i.e., full - rank, linear matrix equation `ax = b`. - - Parameters - ---------- - a : array_like, shape (M, M) - Coefficient matrix. - b : array_like, shape (M,) or (M, N) - Ordinate or "dependent variable" values. - - Returns - ------- - x : ndarray, shape (M,) or (M, N) depending on b - Solution to the system a x = b - - Raises - ------ - LinAlgError - If `a` is singular or not square. - - Notes - ----- - `solve` is a wrapper for the LAPACK routines `dgesv`_ and - `zgesv`_, the former being used if `a` is real-valued, the latter if - it is complex-valued. The solution to the system of linear equations - is computed using an LU decomposition [1]_ with partial pivoting and - row interchanges. - - .. _dgesv: http://www.netlib.org/lapack/double/dgesv.f - - .. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f - - `a` must be square and of full-rank, i.e., all rows (or, equivalently, - columns) must be linearly independent; if either is not true, use - `lstsq` for the least-squares best "solution" of the - system/equation. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pg. 22. - - Examples - -------- - Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``: - - >>> a = np.array([[3,1], [1,2]]) - >>> b = np.array([9,8]) - >>> x = np.linalg.solve(a, b) - >>> x - array([ 2., 3.]) - - Check that the solution is correct: - - >>> (np.dot(a, x) == b).all() - True - - """ - a, _ = _makearray(a) - b, wrap = _makearray(b) - one_eq = len(b.shape) == 1 - if one_eq: - b = b[:, newaxis] - _assertRank2(a, b) - _assertSquareness(a) - n_eq = a.shape[0] - n_rhs = b.shape[1] - if n_eq != b.shape[0]: - raise LinAlgError, 'Incompatible dimensions' - t, result_t = _commonType(a, b) -# lapack_routine = _findLapackRoutine('gesv', t) - if isComplexType(t): - lapack_routine = lapack_lite.zgesv - else: - lapack_routine = lapack_lite.dgesv - a, b = _fastCopyAndTranspose(t, a, b) - a, b = _to_native_byte_order(a, b) - pivots = zeros(n_eq, fortran_int) - results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0) - if results['info'] > 0: - raise LinAlgError, 'Singular matrix' - if one_eq: - return wrap(b.ravel().astype(result_t)) - else: - return wrap(b.transpose().astype(result_t)) - - -def tensorinv(a, ind=2): - """ - Compute the 'inverse' of an N-dimensional array. - - The result is an inverse for `a` relative to the tensordot operation - ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, - ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the - tensordot operation. - - Parameters - ---------- - a : array_like - Tensor to 'invert'. Its shape must be 'square', i. e., - ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. - ind : int, optional - Number of first indices that are involved in the inverse sum. - Must be a positive integer, default is 2. - - Returns - ------- - b : ndarray - `a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``. - - Raises - ------ - LinAlgError - If `a` is singular or not 'square' (in the above sense). - - See Also - -------- - tensordot, tensorsolve - - Examples - -------- - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) - >>> ainv = np.linalg.tensorinv(a, ind=2) - >>> ainv.shape - (8, 3, 4, 6) - >>> b = np.random.randn(4, 6) - >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) - True - - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) - >>> ainv = np.linalg.tensorinv(a, ind=1) - >>> ainv.shape - (8, 3, 24) - >>> b = np.random.randn(24) - >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) - True - - """ - a = asarray(a) - oldshape = a.shape - prod = 1 - if ind > 0: - invshape = oldshape[ind:] + oldshape[:ind] - for k in oldshape[ind:]: - prod *= k - else: - raise ValueError, "Invalid ind argument." - a = a.reshape(prod, -1) - ia = inv(a) - return ia.reshape(*invshape) - - -# Matrix inversion - -def inv(a): - """ - Compute the (multiplicative) inverse of a matrix. - - Given a square matrix `a`, return the matrix `ainv` satisfying - ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. - - Parameters - ---------- - a : array_like, shape (M, M) - Matrix to be inverted. - - Returns - ------- - ainv : ndarray or matrix, shape (M, M) - (Multiplicative) inverse of the matrix `a`. - - Raises - ------ - LinAlgError - If `a` is singular or not square. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1., 2.], [3., 4.]]) - >>> ainv = LA.inv(a) - >>> np.allclose(np.dot(a, ainv), np.eye(2)) - True - >>> np.allclose(np.dot(ainv, a), np.eye(2)) - True - - If a is a matrix object, then the return value is a matrix as well: - - >>> ainv = LA.inv(np.matrix(a)) - >>> ainv - matrix([[-2. , 1. ], - [ 1.5, -0.5]]) - - """ - a, wrap = _makearray(a) - return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) - - -# Cholesky decomposition - -def cholesky(a): - """ - Cholesky decomposition. - - Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, - where `L` is lower-triangular and .H is the conjugate transpose operator - (which is the ordinary transpose if `a` is real-valued). `a` must be - Hermitian (symmetric if real-valued) and positive-definite. Only `L` is - actually returned. - - Parameters - ---------- - a : array_like, shape (M, M) - Hermitian (symmetric if all elements are real), positive-definite - input matrix. - - Returns - ------- - L : ndarray, or matrix object if `a` is, shape (M, M) - Lower-triangular Cholesky factor of a. - - Raises - ------ - LinAlgError - If the decomposition fails, for example, if `a` is not - positive-definite. - - Notes - ----- - The Cholesky decomposition is often used as a fast way of solving - - .. math:: A \\mathbf{x} = \\mathbf{b} - - (when `A` is both Hermitian/symmetric and positive-definite). - - First, we solve for :math:`\\mathbf{y}` in - - .. math:: L \\mathbf{y} = \\mathbf{b}, - - and then for :math:`\\mathbf{x}` in - - .. math:: L.H \\mathbf{x} = \\mathbf{y}. - - Examples - -------- - >>> A = np.array([[1,-2j],[2j,5]]) - >>> A - array([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> L = np.linalg.cholesky(A) - >>> L - array([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) - >>> np.dot(L, L.T.conj()) # verify that L * L.H = A - array([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? - >>> np.linalg.cholesky(A) # an ndarray object is returned - array([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) - >>> # But a matrix object is returned if A is a matrix object - >>> LA.cholesky(np.matrix(A)) - matrix([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) - - """ - a, wrap = _makearray(a) - _assertRank2(a) - _assertSquareness(a) - t, result_t = _commonType(a) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - m = a.shape[0] - n = a.shape[1] - if isComplexType(t): - lapack_routine = lapack_lite.zpotrf - else: - lapack_routine = lapack_lite.dpotrf - results = lapack_routine(_L, n, a, m, 0) - if results['info'] > 0: - raise LinAlgError, 'Matrix is not positive definite - \ - Cholesky decomposition cannot be computed' - s = triu(a, k=0).transpose() - if (s.dtype != result_t): - s = s.astype(result_t) - return wrap(s) - -# QR decompostion - -def qr(a, mode='full'): - """ - Compute the qr factorization of a matrix. - - Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is - upper-triangular. - - Parameters - ---------- - a : array_like - Matrix to be factored, of shape (M, N). - mode : {'full', 'r', 'economic'}, optional - Specifies the values to be returned. 'full' is the default. - Economic mode is slightly faster then 'r' mode if only `r` is needed. - - Returns - ------- - q : ndarray of float or complex, optional - The orthonormal matrix, of shape (M, K). Only returned if - ``mode='full'``. - r : ndarray of float or complex, optional - The upper-triangular matrix, of shape (K, N) with K = min(M, N). - Only returned when ``mode='full'`` or ``mode='r'``. - a2 : ndarray of float or complex, optional - Array of shape (M, N), only returned when ``mode='economic``'. - The diagonal and the upper triangle of `a2` contains `r`, while - the rest of the matrix is undefined. - - Raises - ------ - LinAlgError - If factoring fails. - - Notes - ----- - This is an interface to the LAPACK routines dgeqrf, zgeqrf, - dorgqr, and zungqr. - - For more information on the qr factorization, see for example: - http://en.wikipedia.org/wiki/QR_factorization - - Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`, - all the return values will be matrices too. - - Examples - -------- - >>> a = np.random.randn(9, 6) - >>> q, r = np.linalg.qr(a) - >>> np.allclose(a, np.dot(q, r)) # a does equal qr - True - >>> r2 = np.linalg.qr(a, mode='r') - >>> r3 = np.linalg.qr(a, mode='economic') - >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' - True - >>> # But only triu parts are guaranteed equal when mode='economic' - >>> np.allclose(r, np.triu(r3[:6,:6], k=0)) - True - - Example illustrating a common use of `qr`: solving of least squares - problems - - What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for - the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points - and you'll see that it should be y0 = 0, m = 1.) The answer is provided - by solving the over-determined matrix equation ``Ax = b``, where:: - - A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) - x = array([[y0], [m]]) - b = array([[1], [0], [2], [1]]) - - If A = qr such that q is orthonormal (which is always possible via - Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, - however, we simply use `lstsq`.) - - >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) - >>> A - array([[0, 1], - [1, 1], - [1, 1], - [2, 1]]) - >>> b = np.array([1, 0, 2, 1]) - >>> q, r = LA.qr(A) - >>> p = np.dot(q.T, b) - >>> np.dot(LA.inv(r), p) - array([ 1.1e-16, 1.0e+00]) - - """ - a, wrap = _makearray(a) - _assertRank2(a) - m, n = a.shape - t, result_t = _commonType(a) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - mn = min(m, n) - tau = zeros((mn,), t) - if isComplexType(t): - lapack_routine = lapack_lite.zgeqrf - routine_name = 'zgeqrf' - else: - lapack_routine = lapack_lite.dgeqrf - routine_name = 'dgeqrf' - - # calculate optimal size of work data 'work' - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(m, n, a, m, tau, work, -1, 0) - if results['info'] != 0: - raise LinAlgError, '%s returns %d' % (routine_name, results['info']) - - # do qr decomposition - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - results = lapack_routine(m, n, a, m, tau, work, lwork, 0) - - if results['info'] != 0: - raise LinAlgError, '%s returns %d' % (routine_name, results['info']) - - # economic mode. Isn't actually economic. - if mode[0] == 'e': - if t != result_t : - a = a.astype(result_t) - return a.T - - # generate r - r = _fastCopyAndTranspose(result_t, a[:,:mn]) - for i in range(mn): - r[i,:i].fill(0.0) - - # 'r'-mode, that is, calculate only r - if mode[0] == 'r': - return r - - # from here on: build orthonormal matrix q from a - - if isComplexType(t): - lapack_routine = lapack_lite.zungqr - routine_name = 'zungqr' - else: - lapack_routine = lapack_lite.dorgqr - routine_name = 'dorgqr' - - # determine optimal lwork - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0) - if results['info'] != 0: - raise LinAlgError, '%s returns %d' % (routine_name, results['info']) - - # compute q - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0) - if results['info'] != 0: - raise LinAlgError, '%s returns %d' % (routine_name, results['info']) - - q = _fastCopyAndTranspose(result_t, a[:mn,:]) - - return wrap(q), wrap(r) - - -# Eigenvalues - - -def eigvals(a): - """ - Compute the eigenvalues of a general matrix. - - Main difference between `eigvals` and `eig`: the eigenvectors aren't - returned. - - Parameters - ---------- - a : array_like, shape (M, M) - A complex- or real-valued matrix whose eigenvalues will be computed. - - Returns - ------- - w : ndarray, shape (M,) - The eigenvalues, each repeated according to its multiplicity. - They are not necessarily ordered, nor are they necessarily - real for real matrices. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eig : eigenvalues and right eigenvectors of general arrays - eigvalsh : eigenvalues of symmetric or Hermitian arrays. - eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. - - Notes - ----- - This is a simple interface to the LAPACK routines dgeev and zgeev - that sets those routines' flags to return only the eigenvalues of - general real and complex arrays, respectively. - - Examples - -------- - Illustration, using the fact that the eigenvalues of a diagonal matrix - are its diagonal elements, that multiplying a matrix on the left - by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose - of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, - if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as - ``A``: - - >>> from numpy import linalg as LA - >>> x = np.random.random() - >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) - >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) - (1.0, 1.0, 0.0) - - Now multiply a diagonal matrix by Q on one side and by Q.T on the other: - - >>> D = np.diag((-1,1)) - >>> LA.eigvals(D) - array([-1., 1.]) - >>> A = np.dot(Q, D) - >>> A = np.dot(A, Q.T) - >>> LA.eigvals(A) - array([ 1., -1.]) - - """ - a, wrap = _makearray(a) - _assertRank2(a) - _assertSquareness(a) - _assertFinite(a) - t, result_t = _commonType(a) - real_t = _linalgRealType(t) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - n = a.shape[0] - dummy = zeros((1,), t) - if isComplexType(t): - lapack_routine = lapack_lite.zgeev - w = zeros((n,), t) - rwork = zeros((n,), real_t) - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(_N, _N, n, a, n, w, - dummy, 1, dummy, 1, work, -1, rwork, 0) - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - results = lapack_routine(_N, _N, n, a, n, w, - dummy, 1, dummy, 1, work, lwork, rwork, 0) - else: - lapack_routine = lapack_lite.dgeev - wr = zeros((n,), t) - wi = zeros((n,), t) - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(_N, _N, n, a, n, wr, wi, - dummy, 1, dummy, 1, work, -1, 0) - lwork = int(work[0]) - work = zeros((lwork,), t) - results = lapack_routine(_N, _N, n, a, n, wr, wi, - dummy, 1, dummy, 1, work, lwork, 0) - if all(wi == 0.): - w = wr - result_t = _realType(result_t) - else: - w = wr+1j*wi - result_t = _complexType(result_t) - if results['info'] > 0: - raise LinAlgError, 'Eigenvalues did not converge' - return w.astype(result_t) - - -def eigvalsh(a, UPLO='L'): - """ - Compute the eigenvalues of a Hermitian or real symmetric matrix. - - Main difference from eigh: the eigenvectors are not computed. - - Parameters - ---------- - a : array_like, shape (M, M) - A complex- or real-valued matrix whose eigenvalues are to be - computed. - UPLO : {'L', 'U'}, optional - Specifies whether the calculation is done with the lower triangular - part of `a` ('L', default) or the upper triangular part ('U'). - - Returns - ------- - w : ndarray, shape (M,) - The eigenvalues, not necessarily ordered, each repeated according to - its multiplicity. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. - eigvals : eigenvalues of general real or complex arrays. - eig : eigenvalues and right eigenvectors of general real or complex - arrays. - - Notes - ----- - This is a simple interface to the LAPACK routines dsyevd and zheevd - that sets those routines' flags to return only the eigenvalues of - real symmetric and complex Hermitian arrays, respectively. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, -2j], [2j, 5]]) - >>> LA.eigvalsh(a) - array([ 0.17157288+0.j, 5.82842712+0.j]) - - """ - UPLO = asbytes(UPLO) - a, wrap = _makearray(a) - _assertRank2(a) - _assertSquareness(a) - t, result_t = _commonType(a) - real_t = _linalgRealType(t) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - n = a.shape[0] - liwork = 5*n+3 - iwork = zeros((liwork,), fortran_int) - if isComplexType(t): - lapack_routine = lapack_lite.zheevd - w = zeros((n,), real_t) - lwork = 1 - work = zeros((lwork,), t) - lrwork = 1 - rwork = zeros((lrwork,), real_t) - results = lapack_routine(_N, UPLO, n, a, n, w, work, -1, - rwork, -1, iwork, liwork, 0) - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - lrwork = int(rwork[0]) - rwork = zeros((lrwork,), real_t) - results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork, - rwork, lrwork, iwork, liwork, 0) - else: - lapack_routine = lapack_lite.dsyevd - w = zeros((n,), t) - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(_N, UPLO, n, a, n, w, work, -1, - iwork, liwork, 0) - lwork = int(work[0]) - work = zeros((lwork,), t) - results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork, - iwork, liwork, 0) - if results['info'] > 0: - raise LinAlgError, 'Eigenvalues did not converge' - return w.astype(result_t) - -def _convertarray(a): - t, result_t = _commonType(a) - a = _fastCT(a.astype(t)) - return a, t, result_t - - -# Eigenvectors - - -def eig(a): - """ - Compute the eigenvalues and right eigenvectors of a square array. - - Parameters - ---------- - a : array_like, shape (M, M) - A square array of real or complex elements. - - Returns - ------- - w : ndarray, shape (M,) - The eigenvalues, each repeated according to its multiplicity. - The eigenvalues are not necessarily ordered, nor are they - necessarily real for real arrays (though for real arrays - complex-valued eigenvalues should occur in conjugate pairs). - - v : ndarray, shape (M, M) - The normalized (unit "length") eigenvectors, such that the - column ``v[:,i]`` is the eigenvector corresponding to the - eigenvalue ``w[i]``. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric) - array. - - eigvals : eigenvalues of a non-symmetric array. - - Notes - ----- - This is a simple interface to the LAPACK routines dgeev and zgeev - which compute the eigenvalues and eigenvectors of, respectively, - general real- and complex-valued square arrays. - - The number `w` is an eigenvalue of `a` if there exists a vector - `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and - `v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]`` - for :math:`i \\in \\{0,...,M-1\\}`. - - The array `v` of eigenvectors may not be of maximum rank, that is, some - of the columns may be linearly dependent, although round-off error may - obscure that fact. If the eigenvalues are all different, then theoretically - the eigenvectors are linearly independent. Likewise, the (complex-valued) - matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e., - if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate - transpose of `a`. - - Finally, it is emphasized that `v` consists of the *right* (as in - right-hand side) eigenvectors of `a`. A vector `y` satisfying - ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left* - eigenvector of `a`, and, in general, the left and right eigenvectors - of a matrix are not necessarily the (perhaps conjugate) transposes - of each other. - - References - ---------- - G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, - Academic Press, Inc., 1980, Various pp. - - Examples - -------- - >>> from numpy import linalg as LA - - (Almost) trivial example with real e-values and e-vectors. - - >>> w, v = LA.eig(np.diag((1, 2, 3))) - >>> w; v - array([ 1., 2., 3.]) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - - Real matrix possessing complex e-values and e-vectors; note that the - e-values are complex conjugates of each other. - - >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) - >>> w; v - array([ 1. + 1.j, 1. - 1.j]) - array([[ 0.70710678+0.j , 0.70710678+0.j ], - [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]) - - Complex-valued matrix with real e-values (but complex-valued e-vectors); - note that a.conj().T = a, i.e., a is Hermitian. - - >>> a = np.array([[1, 1j], [-1j, 1]]) - >>> w, v = LA.eig(a) - >>> w; v - array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0} - array([[ 0.00000000+0.70710678j, 0.70710678+0.j ], - [ 0.70710678+0.j , 0.00000000+0.70710678j]]) - - Be careful about round-off error! - - >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) - >>> # Theor. e-values are 1 +/- 1e-9 - >>> w, v = LA.eig(a) - >>> w; v - array([ 1., 1.]) - array([[ 1., 0.], - [ 0., 1.]]) - - """ - a, wrap = _makearray(a) - _assertRank2(a) - _assertSquareness(a) - _assertFinite(a) - a, t, result_t = _convertarray(a) # convert to double or cdouble type - a = _to_native_byte_order(a) - real_t = _linalgRealType(t) - n = a.shape[0] - dummy = zeros((1,), t) - if isComplexType(t): - # Complex routines take different arguments - lapack_routine = lapack_lite.zgeev - w = zeros((n,), t) - v = zeros((n, n), t) - lwork = 1 - work = zeros((lwork,), t) - rwork = zeros((2*n,), real_t) - results = lapack_routine(_N, _V, n, a, n, w, - dummy, 1, v, n, work, -1, rwork, 0) - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - results = lapack_routine(_N, _V, n, a, n, w, - dummy, 1, v, n, work, lwork, rwork, 0) - else: - lapack_routine = lapack_lite.dgeev - wr = zeros((n,), t) - wi = zeros((n,), t) - vr = zeros((n, n), t) - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(_N, _V, n, a, n, wr, wi, - dummy, 1, vr, n, work, -1, 0) - lwork = int(work[0]) - work = zeros((lwork,), t) - results = lapack_routine(_N, _V, n, a, n, wr, wi, - dummy, 1, vr, n, work, lwork, 0) - if all(wi == 0.0): - w = wr - v = vr - result_t = _realType(result_t) - else: - w = wr+1j*wi - v = array(vr, w.dtype) - ind = flatnonzero(wi != 0.0) # indices of complex e-vals - for i in range(len(ind)//2): - v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]] - v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]] - result_t = _complexType(result_t) - - if results['info'] > 0: - raise LinAlgError, 'Eigenvalues did not converge' - vt = v.transpose().astype(result_t) - return w.astype(result_t), wrap(vt) - - -def eigh(a, UPLO='L'): - """ - Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix. - - Returns two objects, a 1-D array containing the eigenvalues of `a`, and - a 2-D square array or matrix (depending on the input type) of the - corresponding eigenvectors (in columns). - - Parameters - ---------- - a : array_like, shape (M, M) - A complex Hermitian or real symmetric matrix. - UPLO : {'L', 'U'}, optional - Specifies whether the calculation is done with the lower triangular - part of `a` ('L', default) or the upper triangular part ('U'). - - Returns - ------- - w : ndarray, shape (M,) - The eigenvalues, not necessarily ordered. - v : ndarray, or matrix object if `a` is, shape (M, M) - The column ``v[:, i]`` is the normalized eigenvector corresponding - to the eigenvalue ``w[i]``. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigvalsh : eigenvalues of symmetric or Hermitian arrays. - eig : eigenvalues and right eigenvectors for non-symmetric arrays. - eigvals : eigenvalues of non-symmetric arrays. - - Notes - ----- - This is a simple interface to the LAPACK routines dsyevd and zheevd, - which compute the eigenvalues and eigenvectors of real symmetric and - complex Hermitian arrays, respectively. - - The eigenvalues of real symmetric or complex Hermitian matrices are - always real. [1]_ The array `v` of (column) eigenvectors is unitary - and `a`, `w`, and `v` satisfy the equations - ``dot(a, v[:, i]) = w[i] * v[:, i]``. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pg. 222. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, -2j], [2j, 5]]) - >>> a - array([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> w, v = LA.eigh(a) - >>> w; v - array([ 0.17157288, 5.82842712]) - array([[-0.92387953+0.j , -0.38268343+0.j ], - [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) - - >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair - array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j]) - >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair - array([ 0.+0.j, 0.+0.j]) - - >>> A = np.matrix(a) # what happens if input is a matrix object - >>> A - matrix([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> w, v = LA.eigh(A) - >>> w; v - array([ 0.17157288, 5.82842712]) - matrix([[-0.92387953+0.j , -0.38268343+0.j ], - [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) - - """ - UPLO = asbytes(UPLO) - a, wrap = _makearray(a) - _assertRank2(a) - _assertSquareness(a) - t, result_t = _commonType(a) - real_t = _linalgRealType(t) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - n = a.shape[0] - liwork = 5*n+3 - iwork = zeros((liwork,), fortran_int) - if isComplexType(t): - lapack_routine = lapack_lite.zheevd - w = zeros((n,), real_t) - lwork = 1 - work = zeros((lwork,), t) - lrwork = 1 - rwork = zeros((lrwork,), real_t) - results = lapack_routine(_V, UPLO, n, a, n, w, work, -1, - rwork, -1, iwork, liwork, 0) - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - lrwork = int(rwork[0]) - rwork = zeros((lrwork,), real_t) - results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork, - rwork, lrwork, iwork, liwork, 0) - else: - lapack_routine = lapack_lite.dsyevd - w = zeros((n,), t) - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(_V, UPLO, n, a, n, w, work, -1, - iwork, liwork, 0) - lwork = int(work[0]) - work = zeros((lwork,), t) - results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork, - iwork, liwork, 0) - if results['info'] > 0: - raise LinAlgError, 'Eigenvalues did not converge' - at = a.transpose().astype(result_t) - return w.astype(_realType(result_t)), wrap(at) - - -# Singular value decomposition - -def svd(a, full_matrices=1, compute_uv=1): - """ - Singular Value Decomposition. - - Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v` - are unitary and `s` is a 1-d array of `a`'s singular values. - - Parameters - ---------- - a : array_like - A real or complex matrix of shape (`M`, `N`) . - full_matrices : bool, optional - If True (default), `u` and `v` have the shapes (`M`, `M`) and - (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`) - and (`K`, `N`), respectively, where `K` = min(`M`, `N`). - compute_uv : bool, optional - Whether or not to compute `u` and `v` in addition to `s`. True - by default. - - Returns - ------- - u : ndarray - Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`) - depending on value of ``full_matrices``. - s : ndarray - The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is - a 1-d array of length min(`M`, `N`). - v : ndarray - Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on - ``full_matrices``. - - Raises - ------ - LinAlgError - If SVD computation does not converge. - - Notes - ----- - The SVD is commonly written as ``a = U S V.H``. The `v` returned - by this function is ``V.H`` and ``u = U``. - - If ``U`` is a unitary matrix, it means that it - satisfies ``U.H = inv(U)``. - - The rows of `v` are the eigenvectors of ``a.H a``. The columns - of `u` are the eigenvectors of ``a a.H``. For row ``i`` in - `v` and column ``i`` in `u`, the corresponding eigenvalue is - ``s[i]**2``. - - If `a` is a `matrix` object (as opposed to an `ndarray`), then so - are all the return values. - - Examples - -------- - >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) - - Reconstruction based on full SVD: - - >>> U, s, V = np.linalg.svd(a, full_matrices=True) - >>> U.shape, V.shape, s.shape - ((9, 6), (6, 6), (6,)) - >>> S = np.zeros((9, 6), dtype=complex) - >>> S[:6, :6] = np.diag(s) - >>> np.allclose(a, np.dot(U, np.dot(S, V))) - True - - Reconstruction based on reduced SVD: - - >>> U, s, V = np.linalg.svd(a, full_matrices=False) - >>> U.shape, V.shape, s.shape - ((9, 6), (6, 6), (6,)) - >>> S = np.diag(s) - >>> np.allclose(a, np.dot(U, np.dot(S, V))) - True - - """ - a, wrap = _makearray(a) - _assertRank2(a) - _assertNonEmpty(a) - m, n = a.shape - t, result_t = _commonType(a) - real_t = _linalgRealType(t) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - s = zeros((min(n, m),), real_t) - if compute_uv: - if full_matrices: - nu = m - nvt = n - option = _A - else: - nu = min(n, m) - nvt = min(n, m) - option = _S - u = zeros((nu, m), t) - vt = zeros((n, nvt), t) - else: - option = _N - nu = 1 - nvt = 1 - u = empty((1, 1), t) - vt = empty((1, 1), t) - - iwork = zeros((8*min(m, n),), fortran_int) - if isComplexType(t): - lapack_routine = lapack_lite.zgesdd - lrwork = min(m,n)*max(5*min(m,n)+7, 2*max(m,n)+2*min(m,n)+1) - rwork = zeros((lrwork,), real_t) - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt, - work, -1, rwork, iwork, 0) - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt, - work, lwork, rwork, iwork, 0) - else: - lapack_routine = lapack_lite.dgesdd - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt, - work, -1, iwork, 0) - lwork = int(work[0]) - work = zeros((lwork,), t) - results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt, - work, lwork, iwork, 0) - if results['info'] > 0: - raise LinAlgError, 'SVD did not converge' - s = s.astype(_realType(result_t)) - if compute_uv: - u = u.transpose().astype(result_t) - vt = vt.transpose().astype(result_t) - return wrap(u), s, wrap(vt) - else: - return s - -def cond(x, p=None): - """ - Compute the condition number of a matrix. - - This function is capable of returning the condition number using - one of seven different norms, depending on the value of `p` (see - Parameters below). - - Parameters - ---------- - x : array_like, shape (M, N) - The matrix whose condition number is sought. - p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional - Order of the norm: - - ===== ============================ - p norm for matrices - ===== ============================ - None 2-norm, computed directly using the ``SVD`` - 'fro' Frobenius norm - inf max(sum(abs(x), axis=1)) - -inf min(sum(abs(x), axis=1)) - 1 max(sum(abs(x), axis=0)) - -1 min(sum(abs(x), axis=0)) - 2 2-norm (largest sing. value) - -2 smallest singular value - ===== ============================ - - inf means the numpy.inf object, and the Frobenius norm is - the root-of-sum-of-squares norm. - - Returns - ------- - c : {float, inf} - The condition number of the matrix. May be infinite. - - See Also - -------- - numpy.linalg.linalg.norm - - Notes - ----- - The condition number of `x` is defined as the norm of `x` times the - norm of the inverse of `x` [1]_; the norm can be the usual L2-norm - (root-of-sum-of-squares) or one of a number of other matrix norms. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, - Academic Press, Inc., 1980, pg. 285. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) - >>> a - array([[ 1, 0, -1], - [ 0, 1, 0], - [ 1, 0, 1]]) - >>> LA.cond(a) - 1.4142135623730951 - >>> LA.cond(a, 'fro') - 3.1622776601683795 - >>> LA.cond(a, np.inf) - 2.0 - >>> LA.cond(a, -np.inf) - 1.0 - >>> LA.cond(a, 1) - 2.0 - >>> LA.cond(a, -1) - 1.0 - >>> LA.cond(a, 2) - 1.4142135623730951 - >>> LA.cond(a, -2) - 0.70710678118654746 - >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0)) - 0.70710678118654746 - - """ - x = asarray(x) # in case we have a matrix - if p is None: - s = svd(x,compute_uv=False) - return s[0]/s[-1] - else: - return norm(x,p)*norm(inv(x),p) - - -def matrix_rank(M, tol=None): - """ - Return matrix rank of array using SVD method - - Rank of the array is the number of SVD singular values of the - array that are greater than `tol`. - - Parameters - ---------- - M : array_like - array of <=2 dimensions - tol : {None, float} - threshold below which SVD values are considered zero. If `tol` is - None, and ``S`` is an array with singular values for `M`, and - ``eps`` is the epsilon value for datatype of ``S``, then `tol` is - set to ``S.max() * eps``. - - Notes - ----- - Golub and van Loan [1]_ define "numerical rank deficiency" as using - tol=eps*S[0] (where S[0] is the maximum singular value and thus the - 2-norm of the matrix). This is one definition of rank deficiency, - and the one we use here. When floating point roundoff is the main - concern, then "numerical rank deficiency" is a reasonable choice. In - some cases you may prefer other definitions. The most useful measure - of the tolerance depends on the operations you intend to use on your - matrix. For example, if your data come from uncertain measurements - with uncertainties greater than floating point epsilon, choosing a - tolerance near that uncertainty may be preferable. The tolerance - may be absolute if the uncertainties are absolute rather than - relative. - - References - ---------- - .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*. - Baltimore: Johns Hopkins University Press, 1996. - - Examples - -------- - >>> matrix_rank(np.eye(4)) # Full rank matrix - 4 - >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix - >>> matrix_rank(I) - 3 - >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 - 1 - >>> matrix_rank(np.zeros((4,))) - 0 - - """ - M = asarray(M) - if M.ndim > 2: - raise TypeError('array should have 2 or fewer dimensions') - if M.ndim < 2: - return int(not all(M==0)) - S = svd(M, compute_uv=False) - if tol is None: - tol = S.max() * finfo(S.dtype).eps - return sum(S > tol) - - -# Generalized inverse - -def pinv(a, rcond=1e-15 ): - """ - Compute the (Moore-Penrose) pseudo-inverse of a matrix. - - Calculate the generalized inverse of a matrix using its - singular-value decomposition (SVD) and including all - *large* singular values. - - Parameters - ---------- - a : array_like, shape (M, N) - Matrix to be pseudo-inverted. - rcond : float - Cutoff for small singular values. - Singular values smaller (in modulus) than - `rcond` * largest_singular_value (again, in modulus) - are set to zero. - - Returns - ------- - B : ndarray, shape (N, M) - The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so - is `B`. - - Raises - ------ - LinAlgError - If the SVD computation does not converge. - - Notes - ----- - The pseudo-inverse of a matrix A, denoted :math:`A^+`, is - defined as: "the matrix that 'solves' [the least-squares problem] - :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then - :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. - - It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular - value decomposition of A, then - :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are - orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting - of A's so-called singular values, (followed, typically, by - zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix - consisting of the reciprocals of A's singular values - (again, followed by zeros). [1]_ - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pp. 139-142. - - Examples - -------- - The following example checks that ``a * a+ * a == a`` and - ``a+ * a * a+ == a+``: - - >>> a = np.random.randn(9, 6) - >>> B = np.linalg.pinv(a) - >>> np.allclose(a, np.dot(a, np.dot(B, a))) - True - >>> np.allclose(B, np.dot(B, np.dot(a, B))) - True - - """ - a, wrap = _makearray(a) - _assertNonEmpty(a) - a = a.conjugate() - u, s, vt = svd(a, 0) - m = u.shape[0] - n = vt.shape[1] - cutoff = rcond*maximum.reduce(s) - for i in range(min(n, m)): - if s[i] > cutoff: - s[i] = 1./s[i] - else: - s[i] = 0.; - res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u))) - return wrap(res) - -# Determinant - -def slogdet(a): - """ - Compute the sign and (natural) logarithm of the determinant of an array. - - If an array has a very small or very large determinant, than a call to - `det` may overflow or underflow. This routine is more robust against such - issues, because it computes the logarithm of the determinant rather than - the determinant itself. - - Parameters - ---------- - a : array_like - Input array, has to be a square 2-D array. - - Returns - ------- - sign : float or complex - A number representing the sign of the determinant. For a real matrix, - this is 1, 0, or -1. For a complex matrix, this is a complex number - with absolute value 1 (i.e., it is on the unit circle), or else 0. - logdet : float - The natural log of the absolute value of the determinant. - - If the determinant is zero, then `sign` will be 0 and `logdet` will be - -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. - - See Also - -------- - det - - Notes - ----- - The determinant is computed via LU factorization using the LAPACK - routine z/dgetrf. - - .. versionadded:: 1.6.0. - - Examples - -------- - The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: - - >>> a = np.array([[1, 2], [3, 4]]) - >>> (sign, logdet) = np.linalg.slogdet(a) - >>> (sign, logdet) - (-1, 0.69314718055994529) - >>> sign * np.exp(logdet) - -2.0 - - This routine succeeds where ordinary `det` does not: - - >>> np.linalg.det(np.eye(500) * 0.1) - 0.0 - >>> np.linalg.slogdet(np.eye(500) * 0.1) - (1, -1151.2925464970228) - - """ - a = asarray(a) - _assertRank2(a) - _assertSquareness(a) - t, result_t = _commonType(a) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - n = a.shape[0] - if isComplexType(t): - lapack_routine = lapack_lite.zgetrf - else: - lapack_routine = lapack_lite.dgetrf - pivots = zeros((n,), fortran_int) - results = lapack_routine(n, n, a, n, pivots, 0) - info = results['info'] - if (info < 0): - raise TypeError, "Illegal input to Fortran routine" - elif (info > 0): - return (t(0.0), _realType(t)(-Inf)) - sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2) - d = diagonal(a) - absd = absolute(d) - sign *= multiply.reduce(d / absd) - log(absd, absd) - logdet = add.reduce(absd, axis=-1) - return sign, logdet - -def det(a): - """ - Compute the determinant of an array. - - Parameters - ---------- - a : array_like, shape (M, M) - Input array. - - Returns - ------- - det : ndarray - Determinant of `a`. - - Notes - ----- - The determinant is computed via LU factorization using the LAPACK - routine z/dgetrf. - - Examples - -------- - The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: - - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.linalg.det(a) - -2.0 - - See Also - -------- - slogdet : Another way to representing the determinant, more suitable - for large matrices where underflow/overflow may occur. - - """ - sign, logdet = slogdet(a) - return sign * exp(logdet) - -# Linear Least Squares - -def lstsq(a, b, rcond=-1): - """ - Return the least-squares solution to a linear matrix equation. - - Solves the equation `a x = b` by computing a vector `x` that - minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may - be under-, well-, or over- determined (i.e., the number of - linearly independent rows of `a` can be less than, equal to, or - greater than its number of linearly independent columns). If `a` - is square and of full rank, then `x` (but for round-off error) is - the "exact" solution of the equation. - - Parameters - ---------- - a : array_like, shape (M, N) - "Coefficient" matrix. - b : array_like, shape (M,) or (M, K) - Ordinate or "dependent variable" values. If `b` is two-dimensional, - the least-squares solution is calculated for each of the `K` columns - of `b`. - rcond : float, optional - Cut-off ratio for small singular values of `a`. - Singular values are set to zero if they are smaller than `rcond` - times the largest singular value of `a`. - - Returns - ------- - x : ndarray, shape (N,) or (N, K) - Least-squares solution. The shape of `x` depends on the shape of - `b`. - residues : ndarray, shape (), (1,), or (K,) - Sums of residues; squared Euclidean 2-norm for each column in - ``b - a*x``. - If the rank of `a` is < N or > M, this is an empty array. - If `b` is 1-dimensional, this is a (1,) shape array. - Otherwise the shape is (K,). - rank : int - Rank of matrix `a`. - s : ndarray, shape (min(M,N),) - Singular values of `a`. - - Raises - ------ - LinAlgError - If computation does not converge. - - Notes - ----- - If `b` is a matrix, then all array results are returned as matrices. - - Examples - -------- - Fit a line, ``y = mx + c``, through some noisy data-points: - - >>> x = np.array([0, 1, 2, 3]) - >>> y = np.array([-1, 0.2, 0.9, 2.1]) - - By examining the coefficients, we see that the line should have a - gradient of roughly 1 and cut the y-axis at, more or less, -1. - - We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` - and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: - - >>> A = np.vstack([x, np.ones(len(x))]).T - >>> A - array([[ 0., 1.], - [ 1., 1.], - [ 2., 1.], - [ 3., 1.]]) - - >>> m, c = np.linalg.lstsq(A, y)[0] - >>> print m, c - 1.0 -0.95 - - Plot the data along with the fitted line: - - >>> import matplotlib.pyplot as plt - >>> plt.plot(x, y, 'o', label='Original data', markersize=10) - >>> plt.plot(x, m*x + c, 'r', label='Fitted line') - >>> plt.legend() - >>> plt.show() - - """ - import math - a, _ = _makearray(a) - b, wrap = _makearray(b) - is_1d = len(b.shape) == 1 - if is_1d: - b = b[:, newaxis] - _assertRank2(a, b) - m = a.shape[0] - n = a.shape[1] - n_rhs = b.shape[1] - ldb = max(n, m) - if m != b.shape[0]: - raise LinAlgError, 'Incompatible dimensions' - t, result_t = _commonType(a, b) - result_real_t = _realType(result_t) - real_t = _linalgRealType(t) - bstar = zeros((ldb, n_rhs), t) - bstar[:b.shape[0],:n_rhs] = b.copy() - a, bstar = _fastCopyAndTranspose(t, a, bstar) - a, bstar = _to_native_byte_order(a, bstar) - s = zeros((min(m, n),), real_t) - nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 ) - iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int) - if isComplexType(t): - lapack_routine = lapack_lite.zgelsd - lwork = 1 - rwork = zeros((lwork,), real_t) - work = zeros((lwork,), t) - results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, - 0, work, -1, rwork, iwork, 0) - lwork = int(abs(work[0])) - rwork = zeros((lwork,), real_t) - a_real = zeros((m, n), real_t) - bstar_real = zeros((ldb, n_rhs,), real_t) - results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m, - bstar_real, ldb, s, rcond, - 0, rwork, -1, iwork, 0) - lrwork = int(rwork[0]) - work = zeros((lwork,), t) - rwork = zeros((lrwork,), real_t) - results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, - 0, work, lwork, rwork, iwork, 0) - else: - lapack_routine = lapack_lite.dgelsd - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, - 0, work, -1, iwork, 0) - lwork = int(work[0]) - work = zeros((lwork,), t) - results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, - 0, work, lwork, iwork, 0) - if results['info'] > 0: - raise LinAlgError, 'SVD did not converge in Linear Least Squares' - resids = array([], result_real_t) - if is_1d: - x = array(ravel(bstar)[:n], dtype=result_t, copy=True) - if results['rank'] == n and m > n: - if isComplexType(t): - resids = array([sum(abs(ravel(bstar)[n:])**2)], - dtype=result_real_t) - else: - resids = array([sum((ravel(bstar)[n:])**2)], - dtype=result_real_t) - else: - x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True) - if results['rank'] == n and m > n: - if isComplexType(t): - resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype( - result_real_t) - else: - resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype( - result_real_t) - - st = s[:min(n, m)].copy().astype(result_real_t) - return wrap(x), wrap(resids), results['rank'], st - -def norm(x, ord=None): - """ - Matrix or vector norm. - - This function is able to return one of seven different matrix norms, - or one of an infinite number of vector norms (described below), depending - on the value of the ``ord`` parameter. - - Parameters - ---------- - x : array_like, shape (M,) or (M, N) - Input array. - ord : {non-zero int, inf, -inf, 'fro'}, optional - Order of the norm (see table under ``Notes``). inf means numpy's - `inf` object. - - Returns - ------- - n : float - Norm of the matrix or vector. - - Notes - ----- - For values of ``ord <= 0``, the result is, strictly speaking, not a - mathematical 'norm', but it may still be useful for various numerical - purposes. - - The following norms can be calculated: - - ===== ============================ ========================== - ord norm for matrices norm for vectors - ===== ============================ ========================== - None Frobenius norm 2-norm - 'fro' Frobenius norm -- - inf max(sum(abs(x), axis=1)) max(abs(x)) - -inf min(sum(abs(x), axis=1)) min(abs(x)) - 0 -- sum(x != 0) - 1 max(sum(abs(x), axis=0)) as below - -1 min(sum(abs(x), axis=0)) as below - 2 2-norm (largest sing. value) as below - -2 smallest singular value as below - other -- sum(abs(x)**ord)**(1./ord) - ===== ============================ ========================== - - The Frobenius norm is given by [1]_: - - :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` - - References - ---------- - .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, - Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.arange(9) - 4 - >>> a - array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) - >>> b = a.reshape((3, 3)) - >>> b - array([[-4, -3, -2], - [-1, 0, 1], - [ 2, 3, 4]]) - - >>> LA.norm(a) - 7.745966692414834 - >>> LA.norm(b) - 7.745966692414834 - >>> LA.norm(b, 'fro') - 7.745966692414834 - >>> LA.norm(a, np.inf) - 4 - >>> LA.norm(b, np.inf) - 9 - >>> LA.norm(a, -np.inf) - 0 - >>> LA.norm(b, -np.inf) - 2 - - >>> LA.norm(a, 1) - 20 - >>> LA.norm(b, 1) - 7 - >>> LA.norm(a, -1) - -4.6566128774142013e-010 - >>> LA.norm(b, -1) - 6 - >>> LA.norm(a, 2) - 7.745966692414834 - >>> LA.norm(b, 2) - 7.3484692283495345 - - >>> LA.norm(a, -2) - nan - >>> LA.norm(b, -2) - 1.8570331885190563e-016 - >>> LA.norm(a, 3) - 5.8480354764257312 - >>> LA.norm(a, -3) - nan - - """ - x = asarray(x) - if ord is None: # check the default case first and handle it immediately - return sqrt(add.reduce((x.conj() * x).ravel().real)) - - nd = x.ndim - if nd == 1: - if ord == Inf: - return abs(x).max() - elif ord == -Inf: - return abs(x).min() - elif ord == 0: - return (x != 0).sum() # Zero norm - elif ord == 1: - return abs(x).sum() # special case for speedup - elif ord == 2: - return sqrt(((x.conj()*x).real).sum()) # special case for speedup - else: - try: - ord + 1 - except TypeError: - raise ValueError, "Invalid norm order for vectors." - return ((abs(x)**ord).sum())**(1.0/ord) - elif nd == 2: - if ord == 2: - return svd(x, compute_uv=0).max() - elif ord == -2: - return svd(x, compute_uv=0).min() - elif ord == 1: - return abs(x).sum(axis=0).max() - elif ord == Inf: - return abs(x).sum(axis=1).max() - elif ord == -1: - return abs(x).sum(axis=0).min() - elif ord == -Inf: - return abs(x).sum(axis=1).min() - elif ord in ['fro','f']: - return sqrt(add.reduce((x.conj() * x).real.ravel())) - else: - raise ValueError, "Invalid norm order for matrices." - else: - raise ValueError, "Improper number of dimensions to norm." diff --git a/numpy-1.6.2/numpy/linalg/python_xerbla.c b/numpy-1.6.2/numpy/linalg/python_xerbla.c deleted file mode 100644 index 4e5a68413b..0000000000 --- a/numpy-1.6.2/numpy/linalg/python_xerbla.c +++ /dev/null @@ -1,37 +0,0 @@ -#include "Python.h" -#include "f2c.h" - -/* - From the original manpage: - -------------------------- - XERBLA is an error handler for the LAPACK routines. - It is called by an LAPACK routine if an input parameter has an invalid value. - A message is printed and execution stops. - - Instead of printing a message and stopping the execution, a - ValueError is raised with the message. - - Parameters: - ----------- - srname: Subroutine name to use in error message, maximum six characters. - Spaces at the end are skipped. - info: Number of the invalid parameter. -*/ - -int xerbla_(char *srname, integer *info) -{ - const char* format = "On entry to %.*s" \ - " parameter number %d had an illegal value"; - char buf[57 + 6 + 4]; /* 57 for strlen(format), - 6 for name, 4 for param. num. */ - - int len = 0; /* length of subroutine name*/ - while( len<6 && srname[len]!='\0' ) - len++; - while( len && srname[len-1]==' ' ) - len--; - - snprintf(buf, sizeof(buf), format, len, srname, *info); - PyErr_SetString(PyExc_ValueError, buf); - return 0; -} diff --git a/numpy-1.6.2/numpy/linalg/setup.py b/numpy-1.6.2/numpy/linalg/setup.py deleted file mode 100644 index 1fb7a3acd0..0000000000 --- a/numpy-1.6.2/numpy/linalg/setup.py +++ /dev/null @@ -1,37 +0,0 @@ - -import sys - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - config = Configuration('linalg',parent_package,top_path) - - config.add_data_dir('tests') - - # Configure lapack_lite - lapack_info = get_info('lapack_opt',0) # and {} - def get_lapack_lite_sources(ext, build_dir): - if not lapack_info: - print("### Warning: Using unoptimized lapack ###") - return ext.depends[:-1] - else: - if sys.platform=='win32': - print("### Warning: python_xerbla.c is disabled ###") - return ext.depends[:1] - return ext.depends[:2] - - config.add_extension('lapack_lite', - sources = [get_lapack_lite_sources], - depends= ['lapack_litemodule.c', - 'python_xerbla.c', - 'zlapack_lite.c', 'dlapack_lite.c', - 'blas_lite.c', 'dlamch.c', - 'f2c_lite.c','f2c.h'], - extra_info = lapack_info - ) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/linalg/setupscons.py b/numpy-1.6.2/numpy/linalg/setupscons.py deleted file mode 100644 index fd05ce9aff..0000000000 --- a/numpy-1.6.2/numpy/linalg/setupscons.py +++ /dev/null @@ -1,19 +0,0 @@ - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - config = Configuration('linalg',parent_package,top_path) - - config.add_data_dir('tests') - - config.add_sconscript('SConstruct', - source_files = ['lapack_litemodule.c', - 'zlapack_lite.c', 'dlapack_lite.c', - 'blas_lite.c', 'dlamch.c', - 'f2c_lite.c','f2c.h']) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/linalg/tests/test_build.py b/numpy-1.6.2/numpy/linalg/tests/test_build.py deleted file mode 100644 index 6b6c24d617..0000000000 --- a/numpy-1.6.2/numpy/linalg/tests/test_build.py +++ /dev/null @@ -1,50 +0,0 @@ -from subprocess import call, PIPE, Popen -import sys -import re - -import numpy as np -from numpy.linalg import lapack_lite -from numpy.testing import TestCase, dec - -from numpy.compat import asbytes_nested - -class FindDependenciesLdd: - def __init__(self): - self.cmd = ['ldd'] - - try: - st = call(self.cmd, stdout=PIPE, stderr=PIPE) - except OSError: - raise RuntimeError("command %s cannot be run" % self.cmd) - - def get_dependencies(self, file): - p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - if not (p.returncode == 0): - raise RuntimeError("Failed to check dependencies for %s" % libfile) - - return stdout - - def grep_dependencies(self, file, deps): - stdout = self.get_dependencies(file) - - rdeps = dict([(dep, re.compile(dep)) for dep in deps]) - founds = [] - for l in stdout.splitlines(): - for k, v in rdeps.items(): - if v.search(l): - founds.append(k) - - return founds - -class TestF77Mismatch(TestCase): - @dec.skipif(not(sys.platform[:5] == 'linux'), - "Skipping fortran compiler mismatch on non Linux platform") - def test_lapack(self): - f = FindDependenciesLdd() - deps = f.grep_dependencies(lapack_lite.__file__, - asbytes_nested(['libg2c', 'libgfortran'])) - self.assertFalse(len(deps) > 1, -"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to -cause random crashes and wrong results. See numpy INSTALL.txt for more -information.""") diff --git a/numpy-1.6.2/numpy/linalg/tests/test_linalg.py b/numpy-1.6.2/numpy/linalg/tests/test_linalg.py deleted file mode 100644 index cccd51d1f8..0000000000 --- a/numpy-1.6.2/numpy/linalg/tests/test_linalg.py +++ /dev/null @@ -1,435 +0,0 @@ -""" Test functions for linalg module -""" - -import numpy as np -from numpy.testing import * -from numpy import array, single, double, csingle, cdouble, dot, identity -from numpy import multiply, atleast_2d, inf, asarray, matrix -from numpy import linalg -from numpy.linalg import matrix_power, norm, matrix_rank - -def ifthen(a, b): - return not a or b - -old_assert_almost_equal = assert_almost_equal -def imply(a, b): - return not a or b - -def assert_almost_equal(a, b, **kw): - if asarray(a).dtype.type in (single, csingle): - decimal = 6 - else: - decimal = 12 - old_assert_almost_equal(a, b, decimal=decimal, **kw) - -class LinalgTestCase: - def test_single(self): - a = array([[1.,2.], [3.,4.]], dtype=single) - b = array([2., 1.], dtype=single) - self.do(a, b) - - def test_double(self): - a = array([[1.,2.], [3.,4.]], dtype=double) - b = array([2., 1.], dtype=double) - self.do(a, b) - - def test_double_2(self): - a = array([[1.,2.], [3.,4.]], dtype=double) - b = array([[2., 1., 4.], [3., 4., 6.]], dtype=double) - self.do(a, b) - - def test_csingle(self): - a = array([[1.+2j,2+3j], [3+4j,4+5j]], dtype=csingle) - b = array([2.+1j, 1.+2j], dtype=csingle) - self.do(a, b) - - def test_cdouble(self): - a = array([[1.+2j,2+3j], [3+4j,4+5j]], dtype=cdouble) - b = array([2.+1j, 1.+2j], dtype=cdouble) - self.do(a, b) - - def test_cdouble_2(self): - a = array([[1.+2j,2+3j], [3+4j,4+5j]], dtype=cdouble) - b = array([[2.+1j, 1.+2j, 1+3j], [1-2j, 1-3j, 1-6j]], dtype=cdouble) - self.do(a, b) - - def test_empty(self): - a = atleast_2d(array([], dtype = double)) - b = atleast_2d(array([], dtype = double)) - try: - self.do(a, b) - raise AssertionError("%s should fail with empty matrices", self.__name__[5:]) - except linalg.LinAlgError, e: - pass - - def test_nonarray(self): - a = [[1,2], [3,4]] - b = [2, 1] - self.do(a,b) - - def test_matrix_b_only(self): - """Check that matrix type is preserved.""" - a = array([[1.,2.], [3.,4.]]) - b = matrix([2., 1.]).T - self.do(a, b) - - def test_matrix_a_and_b(self): - """Check that matrix type is preserved.""" - a = matrix([[1.,2.], [3.,4.]]) - b = matrix([2., 1.]).T - self.do(a, b) - - -class LinalgNonsquareTestCase: - def test_single_nsq_1(self): - a = array([[1.,2.,3.], [3.,4.,6.]], dtype=single) - b = array([2., 1.], dtype=single) - self.do(a, b) - - def test_single_nsq_2(self): - a = array([[1.,2.], [3.,4.], [5.,6.]], dtype=single) - b = array([2., 1., 3.], dtype=single) - self.do(a, b) - - def test_double_nsq_1(self): - a = array([[1.,2.,3.], [3.,4.,6.]], dtype=double) - b = array([2., 1.], dtype=double) - self.do(a, b) - - def test_double_nsq_2(self): - a = array([[1.,2.], [3.,4.], [5.,6.]], dtype=double) - b = array([2., 1., 3.], dtype=double) - self.do(a, b) - - def test_csingle_nsq_1(self): - a = array([[1.+1j,2.+2j,3.-3j], [3.-5j,4.+9j,6.+2j]], dtype=csingle) - b = array([2.+1j, 1.+2j], dtype=csingle) - self.do(a, b) - - def test_csingle_nsq_2(self): - a = array([[1.+1j,2.+2j], [3.-3j,4.-9j], [5.-4j,6.+8j]], dtype=csingle) - b = array([2.+1j, 1.+2j, 3.-3j], dtype=csingle) - self.do(a, b) - - def test_cdouble_nsq_1(self): - a = array([[1.+1j,2.+2j,3.-3j], [3.-5j,4.+9j,6.+2j]], dtype=cdouble) - b = array([2.+1j, 1.+2j], dtype=cdouble) - self.do(a, b) - - def test_cdouble_nsq_2(self): - a = array([[1.+1j,2.+2j], [3.-3j,4.-9j], [5.-4j,6.+8j]], dtype=cdouble) - b = array([2.+1j, 1.+2j, 3.-3j], dtype=cdouble) - self.do(a, b) - - def test_cdouble_nsq_1_2(self): - a = array([[1.+1j,2.+2j,3.-3j], [3.-5j,4.+9j,6.+2j]], dtype=cdouble) - b = array([[2.+1j, 1.+2j], [1-1j, 2-2j]], dtype=cdouble) - self.do(a, b) - - def test_cdouble_nsq_2_2(self): - a = array([[1.+1j,2.+2j], [3.-3j,4.-9j], [5.-4j,6.+8j]], dtype=cdouble) - b = array([[2.+1j, 1.+2j], [1-1j, 2-2j], [1-1j, 2-2j]], dtype=cdouble) - self.do(a, b) - - -class TestSolve(LinalgTestCase, TestCase): - def do(self, a, b): - x = linalg.solve(a, b) - assert_almost_equal(b, dot(a, x)) - assert imply(isinstance(b, matrix), isinstance(x, matrix)) - -class TestInv(LinalgTestCase, TestCase): - def do(self, a, b): - a_inv = linalg.inv(a) - assert_almost_equal(dot(a, a_inv), identity(asarray(a).shape[0])) - assert imply(isinstance(a, matrix), isinstance(a_inv, matrix)) - -class TestEigvals(LinalgTestCase, TestCase): - def do(self, a, b): - ev = linalg.eigvals(a) - evalues, evectors = linalg.eig(a) - assert_almost_equal(ev, evalues) - -class TestEig(LinalgTestCase, TestCase): - def do(self, a, b): - evalues, evectors = linalg.eig(a) - assert_almost_equal(dot(a, evectors), multiply(evectors, evalues)) - assert imply(isinstance(a, matrix), isinstance(evectors, matrix)) - -class TestSVD(LinalgTestCase, TestCase): - def do(self, a, b): - u, s, vt = linalg.svd(a, 0) - assert_almost_equal(a, dot(multiply(u, s), vt)) - assert imply(isinstance(a, matrix), isinstance(u, matrix)) - assert imply(isinstance(a, matrix), isinstance(vt, matrix)) - -class TestCondSVD(LinalgTestCase, TestCase): - def do(self, a, b): - c = asarray(a) # a might be a matrix - s = linalg.svd(c, compute_uv=False) - old_assert_almost_equal(s[0]/s[-1], linalg.cond(a), decimal=5) - -class TestCond2(LinalgTestCase, TestCase): - def do(self, a, b): - c = asarray(a) # a might be a matrix - s = linalg.svd(c, compute_uv=False) - old_assert_almost_equal(s[0]/s[-1], linalg.cond(a,2), decimal=5) - -class TestCondInf(TestCase): - def test(self): - A = array([[1.,0,0],[0,-2.,0],[0,0,3.]]) - assert_almost_equal(linalg.cond(A,inf),3.) - -class TestPinv(LinalgTestCase, TestCase): - def do(self, a, b): - a_ginv = linalg.pinv(a) - assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0])) - assert imply(isinstance(a, matrix), isinstance(a_ginv, matrix)) - -class TestDet(LinalgTestCase, TestCase): - def do(self, a, b): - d = linalg.det(a) - (s, ld) = linalg.slogdet(a) - if asarray(a).dtype.type in (single, double): - ad = asarray(a).astype(double) - else: - ad = asarray(a).astype(cdouble) - ev = linalg.eigvals(ad) - assert_almost_equal(d, multiply.reduce(ev)) - assert_almost_equal(s * np.exp(ld), multiply.reduce(ev)) - if s != 0: - assert_almost_equal(np.abs(s), 1) - else: - assert_equal(ld, -inf) - - def test_zero(self): - assert_equal(linalg.det([[0.0]]), 0.0) - assert_equal(type(linalg.det([[0.0]])), double) - assert_equal(linalg.det([[0.0j]]), 0.0) - assert_equal(type(linalg.det([[0.0j]])), cdouble) - - assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) - assert_equal(type(linalg.slogdet([[0.0]])[0]), double) - assert_equal(type(linalg.slogdet([[0.0]])[1]), double) - assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) - assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) - assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) - -class TestLstsq(LinalgTestCase, LinalgNonsquareTestCase, TestCase): - def do(self, a, b): - arr = np.asarray(a) - m, n = arr.shape - u, s, vt = linalg.svd(a, 0) - x, residuals, rank, sv = linalg.lstsq(a, b) - if m <= n: - assert_almost_equal(b, dot(a, x)) - assert_equal(rank, m) - else: - assert_equal(rank, n) - assert_almost_equal(sv, sv.__array_wrap__(s)) - if rank == n and m > n: - expect_resids = (np.asarray(abs(np.dot(a, x) - b))**2).sum(axis=0) - expect_resids = np.asarray(expect_resids) - if len(np.asarray(b).shape) == 1: - expect_resids.shape = (1,) - assert_equal(residuals.shape, expect_resids.shape) - else: - expect_resids = type(x)([]) - assert_almost_equal(residuals, expect_resids) - assert_(np.issubdtype(residuals.dtype, np.floating)) - assert imply(isinstance(b, matrix), isinstance(x, matrix)) - assert imply(isinstance(b, matrix), isinstance(residuals, matrix)) - -class TestMatrixPower: - R90 = array([[0,1],[-1,0]]) - Arb22 = array([[4,-7],[-2,10]]) - noninv = array([[1,0],[0,0]]) - arbfloat = array([[0.1,3.2],[1.2,0.7]]) - - large = identity(10) - t = large[1,:].copy() - large[1,:] = large[0,:] - large[0,:] = t - - def test_large_power(self): - assert_equal(matrix_power(self.R90,2L**100+2**10+2**5+1),self.R90) - - def test_large_power_trailing_zero(self): - assert_equal(matrix_power(self.R90,2L**100+2**10+2**5),identity(2)) - - def testip_zero(self): - def tz(M): - mz = matrix_power(M,0) - assert_equal(mz, identity(M.shape[0])) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - yield tz, M - - def testip_one(self): - def tz(M): - mz = matrix_power(M,1) - assert_equal(mz, M) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - yield tz, M - - def testip_two(self): - def tz(M): - mz = matrix_power(M,2) - assert_equal(mz, dot(M,M)) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - yield tz, M - - def testip_invert(self): - def tz(M): - mz = matrix_power(M,-1) - assert_almost_equal(identity(M.shape[0]), dot(mz,M)) - for M in [self.R90, self.Arb22, self.arbfloat, self.large]: - yield tz, M - - def test_invert_noninvertible(self): - import numpy.linalg - assert_raises(numpy.linalg.linalg.LinAlgError, - lambda: matrix_power(self.noninv,-1)) - -class TestBoolPower(TestCase): - def test_square(self): - A = array([[True,False],[True,True]]) - assert_equal(matrix_power(A,2),A) - - -class HermitianTestCase(object): - def test_single(self): - a = array([[1.,2.], [2.,1.]], dtype=single) - self.do(a) - - def test_double(self): - a = array([[1.,2.], [2.,1.]], dtype=double) - self.do(a) - - def test_csingle(self): - a = array([[1.,2+3j], [2-3j,1]], dtype=csingle) - self.do(a) - - def test_cdouble(self): - a = array([[1.,2+3j], [2-3j,1]], dtype=cdouble) - self.do(a) - - def test_empty(self): - a = atleast_2d(array([], dtype = double)) - assert_raises(linalg.LinAlgError, self.do, a) - - def test_nonarray(self): - a = [[1,2], [2,1]] - self.do(a) - - def test_matrix_b_only(self): - """Check that matrix type is preserved.""" - a = array([[1.,2.], [2.,1.]]) - self.do(a) - - def test_matrix_a_and_b(self): - """Check that matrix type is preserved.""" - a = matrix([[1.,2.], [2.,1.]]) - self.do(a) - -class TestEigvalsh(HermitianTestCase, TestCase): - def do(self, a): - # note that eigenvalue arrays must be sorted since - # their order isn't guaranteed. - ev = linalg.eigvalsh(a) - evalues, evectors = linalg.eig(a) - ev.sort() - evalues.sort() - assert_almost_equal(ev, evalues) - -class TestEigh(HermitianTestCase, TestCase): - def do(self, a): - # note that eigenvalue arrays must be sorted since - # their order isn't guaranteed. - ev, evc = linalg.eigh(a) - evalues, evectors = linalg.eig(a) - ev.sort() - evalues.sort() - assert_almost_equal(ev, evalues) - -class _TestNorm(TestCase): - dt = None - dec = None - def test_empty(self): - assert_equal(norm([]), 0.0) - assert_equal(norm(array([], dtype=self.dt)), 0.0) - assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) - - def test_vector(self): - a = [1.0,2.0,3.0,4.0] - b = [-1.0,-2.0,-3.0,-4.0] - c = [-1.0, 2.0,-3.0, 4.0] - - def _test(v): - np.testing.assert_almost_equal(norm(v), 30**0.5, decimal=self.dec) - np.testing.assert_almost_equal(norm(v,inf), 4.0, decimal=self.dec) - np.testing.assert_almost_equal(norm(v,-inf), 1.0, decimal=self.dec) - np.testing.assert_almost_equal(norm(v,1), 10.0, decimal=self.dec) - np.testing.assert_almost_equal(norm(v,-1), 12.0/25, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v,2), 30**0.5, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v,-2), ((205./144)**-0.5), - decimal=self.dec) - np.testing.assert_almost_equal(norm(v,0), 4, decimal=self.dec) - - for v in (a, b, c,): - _test(v) - - for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), - array(c, dtype=self.dt)): - _test(v) - - def test_matrix(self): - A = matrix([[1.,3.],[5.,7.]], dtype=self.dt) - A = matrix([[1.,3.],[5.,7.]], dtype=self.dt) - assert_almost_equal(norm(A), 84**0.5) - assert_almost_equal(norm(A,'fro'), 84**0.5) - assert_almost_equal(norm(A,inf), 12.0) - assert_almost_equal(norm(A,-inf), 4.0) - assert_almost_equal(norm(A,1), 10.0) - assert_almost_equal(norm(A,-1), 6.0) - assert_almost_equal(norm(A,2), 9.1231056256176615) - assert_almost_equal(norm(A,-2), 0.87689437438234041) - - self.assertRaises(ValueError, norm, A, 'nofro') - self.assertRaises(ValueError, norm, A, -3) - self.assertRaises(ValueError, norm, A, 0) - -class TestNormDouble(_TestNorm): - dt = np.double - dec= 12 - -class TestNormSingle(_TestNorm): - dt = np.float32 - dec = 6 - - -def test_matrix_rank(): - # Full rank matrix - yield assert_equal, 4, matrix_rank(np.eye(4)) - # rank deficient matrix - I=np.eye(4); I[-1,-1] = 0. - yield assert_equal, matrix_rank(I), 3 - # All zeros - zero rank - yield assert_equal, matrix_rank(np.zeros((4,4))), 0 - # 1 dimension - rank 1 unless all 0 - yield assert_equal, matrix_rank([1, 0, 0, 0]), 1 - yield assert_equal, matrix_rank(np.zeros((4,))), 0 - # accepts array-like - yield assert_equal, matrix_rank([1]), 1 - # greater than 2 dimensions raises error - yield assert_raises, TypeError, matrix_rank, np.zeros((2,2,2)) - # works on scalar - yield assert_equal, matrix_rank(1), 1 - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/linalg/tests/test_regression.py b/numpy-1.6.2/numpy/linalg/tests/test_regression.py deleted file mode 100644 index b3188f99c2..0000000000 --- a/numpy-1.6.2/numpy/linalg/tests/test_regression.py +++ /dev/null @@ -1,71 +0,0 @@ -""" Test functions for linalg module -""" - -from numpy.testing import * -import numpy as np -from numpy import linalg, arange, float64, array, dot, transpose - -rlevel = 1 - -class TestRegression(TestCase): - def test_eig_build(self, level = rlevel): - """Ticket #652""" - rva = array([1.03221168e+02 +0.j, - -1.91843603e+01 +0.j, - -6.04004526e-01+15.84422474j, - -6.04004526e-01-15.84422474j, - -1.13692929e+01 +0.j, - -6.57612485e-01+10.41755503j, - -6.57612485e-01-10.41755503j, - 1.82126812e+01 +0.j, - 1.06011014e+01 +0.j , - 7.80732773e+00 +0.j , - -7.65390898e-01 +0.j, - 1.51971555e-15 +0.j , - -1.51308713e-15 +0.j]) - a = arange(13*13, dtype = float64) - a.shape = (13,13) - a = a%17 - va, ve = linalg.eig(a) - va.sort() - rva.sort() - assert_array_almost_equal(va, rva) - - def test_eigh_build(self, level = rlevel): - """Ticket 662.""" - rvals = [68.60568999, 89.57756725, 106.67185574] - - cov = array([[ 77.70273908, 3.51489954, 15.64602427], - [3.51489954, 88.97013878, -1.07431931], - [15.64602427, -1.07431931, 98.18223512]]) - - vals, vecs = linalg.eigh(cov) - assert_array_almost_equal(vals, rvals) - - def test_svd_build(self, level = rlevel): - """Ticket 627.""" - a = array([[ 0., 1.], [ 1., 1.], [ 2., 1.], [ 3., 1.]]) - m, n = a.shape - u, s, vh = linalg.svd(a) - - b = dot(transpose(u[:, n:]), a) - - assert_array_almost_equal(b, np.zeros((2, 2))) - - def test_norm_vector_badarg(self): - """Regression for #786: Froebenius norm for vectors raises - TypeError.""" - self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') - - def test_lapack_endian(self): - # For bug #1482 - a = array([[5.7998084, -2.1825367 ], - [-2.1825367, 9.85910595]], dtype='>f8') - b = array(a, dtype='= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - The integers ILO and IHI determined by ZGEBAL. - 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. - - SCALE (input) DOUBLE PRECISION array, dimension (N) - Details of the permutation and scaling factors, as returned - by ZGEBAL. - - M (input) INTEGER - The number of columns of the matrix V. M >= 0. - - V (input/output) COMPLEX*16 array, dimension (LDV,M) - On entry, the matrix of right or left eigenvectors to be - transformed, as returned by ZHSEIN or ZTREVC. - On exit, V is overwritten by the transformed eigenvectors. - - LDV (input) INTEGER - The leading dimension of the array V. LDV >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - ===================================================================== - - - Decode and Test the input parameters -*/ - - /* Parameter adjustments */ - --scale; - v_dim1 = *ldv; - v_offset = 1 + v_dim1 * 1; - v -= v_offset; - - /* Function Body */ - rightv = lsame_(side, "R"); - leftv = lsame_(side, "L"); - - *info = 0; - if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S")) - && ! lsame_(job, "B"))) { - *info = -1; - } else if ((! rightv && ! leftv)) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -4; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -5; - } else if (*m < 0) { - *info = -7; - } else if (*ldv < max(1,*n)) { - *info = -9; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGEBAK", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - if (*m == 0) { - return 0; - } - if (lsame_(job, "N")) { - return 0; - } - - if (*ilo == *ihi) { - goto L30; - } - -/* Backward balance */ - - if (lsame_(job, "S") || lsame_(job, "B")) { - - if (rightv) { - i__1 = *ihi; - for (i__ = *ilo; i__ <= i__1; ++i__) { - s = scale[i__]; - zdscal_(m, &s, &v[i__ + v_dim1], ldv); -/* L10: */ - } - } - - if (leftv) { - i__1 = *ihi; - for (i__ = *ilo; i__ <= i__1; ++i__) { - s = 1. / scale[i__]; - zdscal_(m, &s, &v[i__ + v_dim1], ldv); -/* L20: */ - } - } - - } - -/* - Backward permutation - - For I = ILO-1 step -1 until 1, - IHI+1 step 1 until N do -- -*/ - -L30: - if (lsame_(job, "P") || lsame_(job, "B")) { - if (rightv) { - i__1 = *n; - for (ii = 1; ii <= i__1; ++ii) { - i__ = ii; - if ((i__ >= *ilo && i__ <= *ihi)) { - goto L40; - } - if (i__ < *ilo) { - i__ = *ilo - ii; - } - k = (integer) scale[i__]; - if (k == i__) { - goto L40; - } - zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); -L40: - ; - } - } - - if (leftv) { - i__1 = *n; - for (ii = 1; ii <= i__1; ++ii) { - i__ = ii; - if ((i__ >= *ilo && i__ <= *ihi)) { - goto L50; - } - if (i__ < *ilo) { - i__ = *ilo - ii; - } - k = (integer) scale[i__]; - if (k == i__) { - goto L50; - } - zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); -L50: - ; - } - } - } - - return 0; - -/* End of ZGEBAK */ - -} /* zgebak_ */ - -/* Subroutine */ int zgebal_(char *job, integer *n, doublecomplex *a, integer - *lda, integer *ilo, integer *ihi, doublereal *scale, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublereal d__1, d__2; - - /* Builtin functions */ - double d_imag(doublecomplex *), z_abs(doublecomplex *); - - /* Local variables */ - static doublereal c__, f, g; - static integer i__, j, k, l, m; - static doublereal r__, s, ca, ra; - static integer ica, ira, iexc; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static doublereal sfmin1, sfmin2, sfmax1, sfmax2; - - extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_( - integer *, doublereal *, doublecomplex *, integer *); - extern integer izamax_(integer *, doublecomplex *, integer *); - static logical noconv; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZGEBAL balances a general complex matrix A. This involves, first, - permuting A by a similarity transformation to isolate eigenvalues - in the first 1 to ILO-1 and last IHI+1 to N elements on the - diagonal; and second, applying a diagonal similarity transformation - to rows and columns ILO to IHI to make the rows and columns as - close in norm as possible. Both steps are optional. - - Balancing may reduce the 1-norm of the matrix, and improve the - accuracy of the computed eigenvalues and/or eigenvectors. - - Arguments - ========= - - JOB (input) CHARACTER*1 - Specifies the operations to be performed on A: - = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0 - for i = 1,...,N; - = 'P': permute only; - = 'S': scale only; - = 'B': both permute and scale. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the input matrix A. - On exit, A is overwritten by the balanced matrix. - If JOB = 'N', A is not referenced. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - ILO (output) INTEGER - IHI (output) INTEGER - ILO and IHI are set to integers such that on exit - A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N. - If JOB = 'N' or 'S', ILO = 1 and IHI = N. - - SCALE (output) DOUBLE PRECISION array, dimension (N) - Details of the permutations and scaling factors applied to - A. If P(j) is the index of the row and column interchanged - with row and column j and D(j) is the scaling factor - applied to row and column j, then - SCALE(j) = P(j) for j = 1,...,ILO-1 - = D(j) for j = ILO,...,IHI - = P(j) for j = IHI+1,...,N. - The order in which the interchanges are made is N to IHI+1, - then 1 to ILO-1. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The permutations consist of row and column interchanges which put - the matrix in the form - - ( T1 X Y ) - P A P = ( 0 B Z ) - ( 0 0 T2 ) - - where T1 and T2 are upper triangular matrices whose eigenvalues lie - along the diagonal. The column indices ILO and IHI mark the starting - and ending columns of the submatrix B. Balancing consists of applying - a diagonal similarity transformation inv(D) * B * D to make the - 1-norms of each row of B and its corresponding column nearly equal. - The output matrix is - - ( T1 X*D Y ) - ( 0 inv(D)*B*D inv(D)*Z ). - ( 0 0 T2 ) - - Information about the permutations P and the diagonal matrix D is - returned in the vector SCALE. - - This subroutine is based on the EISPACK routine CBAL. - - Modified by Tzu-Yi Chen, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --scale; - - /* Function Body */ - *info = 0; - if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S")) - && ! lsame_(job, "B"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGEBAL", &i__1); - return 0; - } - - k = 1; - l = *n; - - if (*n == 0) { - goto L210; - } - - if (lsame_(job, "N")) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - scale[i__] = 1.; -/* L10: */ - } - goto L210; - } - - if (lsame_(job, "S")) { - goto L120; - } - -/* Permutation to isolate eigenvalues if possible */ - - goto L50; - -/* Row and column exchange. */ - -L20: - scale[m] = (doublereal) j; - if (j == m) { - goto L30; - } - - zswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1); - i__1 = *n - k + 1; - zswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda); - -L30: - switch (iexc) { - case 1: goto L40; - case 2: goto L80; - } - -/* Search for rows isolating an eigenvalue and push them down. */ - -L40: - if (l == 1) { - goto L210; - } - --l; - -L50: - for (j = l; j >= 1; --j) { - - i__1 = l; - for (i__ = 1; i__ <= i__1; ++i__) { - if (i__ == j) { - goto L60; - } - i__2 = j + i__ * a_dim1; - if (a[i__2].r != 0. || d_imag(&a[j + i__ * a_dim1]) != 0.) { - goto L70; - } -L60: - ; - } - - m = l; - iexc = 1; - goto L20; -L70: - ; - } - - goto L90; - -/* Search for columns isolating an eigenvalue and push them left. */ - -L80: - ++k; - -L90: - i__1 = l; - for (j = k; j <= i__1; ++j) { - - i__2 = l; - for (i__ = k; i__ <= i__2; ++i__) { - if (i__ == j) { - goto L100; - } - i__3 = i__ + j * a_dim1; - if (a[i__3].r != 0. || d_imag(&a[i__ + j * a_dim1]) != 0.) { - goto L110; - } -L100: - ; - } - - m = k; - iexc = 2; - goto L20; -L110: - ; - } - -L120: - i__1 = l; - for (i__ = k; i__ <= i__1; ++i__) { - scale[i__] = 1.; -/* L130: */ - } - - if (lsame_(job, "P")) { - goto L210; - } - -/* - Balance the submatrix in rows K to L. - - Iterative loop for norm reduction -*/ - - sfmin1 = SAFEMINIMUM / PRECISION; - sfmax1 = 1. / sfmin1; - sfmin2 = sfmin1 * 8.; - sfmax2 = 1. / sfmin2; -L140: - noconv = FALSE_; - - i__1 = l; - for (i__ = k; i__ <= i__1; ++i__) { - c__ = 0.; - r__ = 0.; - - i__2 = l; - for (j = k; j <= i__2; ++j) { - if (j == i__) { - goto L150; - } - i__3 = j + i__ * a_dim1; - c__ += (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[j + i__ * - a_dim1]), abs(d__2)); - i__3 = i__ + j * a_dim1; - r__ += (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + j * - a_dim1]), abs(d__2)); -L150: - ; - } - ica = izamax_(&l, &a[i__ * a_dim1 + 1], &c__1); - ca = z_abs(&a[ica + i__ * a_dim1]); - i__2 = *n - k + 1; - ira = izamax_(&i__2, &a[i__ + k * a_dim1], lda); - ra = z_abs(&a[i__ + (ira + k - 1) * a_dim1]); - -/* Guard against zero C or R due to underflow. */ - - if (c__ == 0. || r__ == 0.) { - goto L200; - } - g = r__ / 8.; - f = 1.; - s = c__ + r__; -L160: -/* Computing MAX */ - d__1 = max(f,c__); -/* Computing MIN */ - d__2 = min(r__,g); - if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) { - goto L170; - } - f *= 8.; - c__ *= 8.; - ca *= 8.; - r__ /= 8.; - g /= 8.; - ra /= 8.; - goto L160; - -L170: - g = c__ / 8.; -L180: -/* Computing MIN */ - d__1 = min(f,c__), d__1 = min(d__1,g); - if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) { - goto L190; - } - f /= 8.; - c__ /= 8.; - g /= 8.; - ca /= 8.; - r__ *= 8.; - ra *= 8.; - goto L180; - -/* Now balance. */ - -L190: - if (c__ + r__ >= s * .95) { - goto L200; - } - if ((f < 1. && scale[i__] < 1.)) { - if (f * scale[i__] <= sfmin1) { - goto L200; - } - } - if ((f > 1. && scale[i__] > 1.)) { - if (scale[i__] >= sfmax1 / f) { - goto L200; - } - } - g = 1. / f; - scale[i__] *= f; - noconv = TRUE_; - - i__2 = *n - k + 1; - zdscal_(&i__2, &g, &a[i__ + k * a_dim1], lda); - zdscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1); - -L200: - ; - } - - if (noconv) { - goto L140; - } - -L210: - *ilo = k; - *ihi = l; - - return 0; - -/* End of ZGEBAL */ - -} /* zgebal_ */ - -/* Subroutine */ int zgebd2_(integer *m, integer *n, doublecomplex *a, - integer *lda, doublereal *d__, doublereal *e, doublecomplex *tauq, - doublecomplex *taup, doublecomplex *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - doublecomplex z__1; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__; - static doublecomplex alpha; - extern /* Subroutine */ int zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *, - integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZGEBD2 reduces a complex general m by n matrix A to upper or lower - real bidiagonal form B by a unitary transformation: Q' * A * P = B. - - If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. - - Arguments - ========= - - M (input) INTEGER - The number of rows in the matrix A. M >= 0. - - N (input) INTEGER - The number of columns in the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the m by n general matrix to be reduced. - On exit, - if m >= n, the diagonal and the first superdiagonal are - overwritten with the upper bidiagonal matrix B; the - elements below the diagonal, with the array TAUQ, represent - the unitary matrix Q as a product of elementary - reflectors, and the elements above the first superdiagonal, - with the array TAUP, represent the unitary matrix P as - a product of elementary reflectors; - if m < n, the diagonal and the first subdiagonal are - overwritten with the lower bidiagonal matrix B; the - elements below the first subdiagonal, with the array TAUQ, - represent the unitary matrix Q as a product of - elementary reflectors, and the elements above the diagonal, - with the array TAUP, represent the unitary matrix P as - a product of elementary reflectors. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - D (output) DOUBLE PRECISION array, dimension (min(M,N)) - The diagonal elements of the bidiagonal matrix B: - D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) - The off-diagonal elements of the bidiagonal matrix B: - if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; - if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. - - TAUQ (output) COMPLEX*16 array dimension (min(M,N)) - The scalar factors of the elementary reflectors which - represent the unitary matrix Q. See Further Details. - - TAUP (output) COMPLEX*16 array, dimension (min(M,N)) - The scalar factors of the elementary reflectors which - represent the unitary matrix P. See Further Details. - - WORK (workspace) COMPLEX*16 array, dimension (max(M,N)) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The matrices Q and P are represented as products of elementary - reflectors: - - If m >= n, - - Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are complex scalars, and v and u are complex - vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in - A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in - A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). - - If m < n, - - Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are complex scalars, v and u are complex vectors; - v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); - u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); - tauq is stored in TAUQ(i) and taup in TAUP(i). - - The contents of A on exit are illustrated by the following examples: - - m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): - - ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) - ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) - ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) - ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) - ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) - ( v1 v2 v3 v4 v5 ) - - where d and e denote diagonal and off-diagonal elements of B, vi - denotes an element of the vector defining H(i), and ui an element of - the vector defining G(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tauq; - --taup; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info < 0) { - i__1 = -(*info); - xerbla_("ZGEBD2", &i__1); - return 0; - } - - if (*m >= *n) { - -/* Reduce to upper bidiagonal form */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */ - - i__2 = i__ + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *m - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - zlarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1, & - tauq[i__]); - i__2 = i__; - d__[i__2] = alpha.r; - i__2 = i__ + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Apply H(i)' to A(i:m,i+1:n) from the left */ - - i__2 = *m - i__ + 1; - i__3 = *n - i__; - d_cnjg(&z__1, &tauq[i__]); - zlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &z__1, - &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); - i__2 = i__ + i__ * a_dim1; - i__3 = i__; - a[i__2].r = d__[i__3], a[i__2].i = 0.; - - if (i__ < *n) { - -/* - Generate elementary reflector G(i) to annihilate - A(i,i+2:n) -*/ - - i__2 = *n - i__; - zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); - i__2 = i__ + (i__ + 1) * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *n - i__; -/* Computing MIN */ - i__3 = i__ + 2; - zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, & - taup[i__]); - i__2 = i__; - e[i__2] = alpha.r; - i__2 = i__ + (i__ + 1) * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Apply G(i) to A(i+1:m,i+1:n) from the right */ - - i__2 = *m - i__; - i__3 = *n - i__; - zlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1], - lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1], - lda, &work[1]); - i__2 = *n - i__; - zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); - i__2 = i__ + (i__ + 1) * a_dim1; - i__3 = i__; - a[i__2].r = e[i__3], a[i__2].i = 0.; - } else { - i__2 = i__; - taup[i__2].r = 0., taup[i__2].i = 0.; - } -/* L10: */ - } - } else { - -/* Reduce to lower bidiagonal form */ - - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */ - - i__2 = *n - i__ + 1; - zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda); - i__2 = i__ + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *n - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, & - taup[i__]); - i__2 = i__; - d__[i__2] = alpha.r; - i__2 = i__ + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Apply G(i) to A(i+1:m,i:n) from the right */ - - i__2 = *m - i__; - i__3 = *n - i__ + 1; -/* Computing MIN */ - i__4 = i__ + 1; - zlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &taup[ - i__], &a[min(i__4,*m) + i__ * a_dim1], lda, &work[1]); - i__2 = *n - i__ + 1; - zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda); - i__2 = i__ + i__ * a_dim1; - i__3 = i__; - a[i__2].r = d__[i__3], a[i__2].i = 0.; - - if (i__ < *m) { - -/* - Generate elementary reflector H(i) to annihilate - A(i+2:m,i) -*/ - - i__2 = i__ + 1 + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *m - i__; -/* Computing MIN */ - i__3 = i__ + 2; - zlarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1, - &tauq[i__]); - i__2 = i__; - e[i__2] = alpha.r; - i__2 = i__ + 1 + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Apply H(i)' to A(i+1:m,i+1:n) from the left */ - - i__2 = *m - i__; - i__3 = *n - i__; - d_cnjg(&z__1, &tauq[i__]); - zlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], & - c__1, &z__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, & - work[1]); - i__2 = i__ + 1 + i__ * a_dim1; - i__3 = i__; - a[i__2].r = e[i__3], a[i__2].i = 0.; - } else { - i__2 = i__; - tauq[i__2].r = 0., tauq[i__2].i = 0.; - } -/* L20: */ - } - } - return 0; - -/* End of ZGEBD2 */ - -} /* zgebd2_ */ - -/* Subroutine */ int zgebrd_(integer *m, integer *n, doublecomplex *a, - integer *lda, doublereal *d__, doublereal *e, doublecomplex *tauq, - doublecomplex *taup, doublecomplex *work, integer *lwork, integer * - info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublereal d__1; - doublecomplex z__1; - - /* Local variables */ - static integer i__, j, nb, nx; - static doublereal ws; - static integer nbmin, iinfo, minmn; - extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *, - integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *), zgebd2_(integer *, integer *, - doublecomplex *, integer *, doublereal *, doublereal *, - doublecomplex *, doublecomplex *, doublecomplex *, integer *), - xerbla_(char *, integer *), zlabrd_(integer *, integer *, - integer *, doublecomplex *, integer *, doublereal *, doublereal *, - doublecomplex *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer ldwrkx, ldwrky, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZGEBRD reduces a general complex M-by-N matrix A to upper or lower - bidiagonal form B by a unitary transformation: Q**H * A * P = B. - - If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. - - Arguments - ========= - - M (input) INTEGER - The number of rows in the matrix A. M >= 0. - - N (input) INTEGER - The number of columns in the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the M-by-N general matrix to be reduced. - On exit, - if m >= n, the diagonal and the first superdiagonal are - overwritten with the upper bidiagonal matrix B; the - elements below the diagonal, with the array TAUQ, represent - the unitary matrix Q as a product of elementary - reflectors, and the elements above the first superdiagonal, - with the array TAUP, represent the unitary matrix P as - a product of elementary reflectors; - if m < n, the diagonal and the first subdiagonal are - overwritten with the lower bidiagonal matrix B; the - elements below the first subdiagonal, with the array TAUQ, - represent the unitary matrix Q as a product of - elementary reflectors, and the elements above the diagonal, - with the array TAUP, represent the unitary matrix P as - a product of elementary reflectors. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - D (output) DOUBLE PRECISION array, dimension (min(M,N)) - The diagonal elements of the bidiagonal matrix B: - D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) - The off-diagonal elements of the bidiagonal matrix B: - if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; - if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. - - TAUQ (output) COMPLEX*16 array dimension (min(M,N)) - The scalar factors of the elementary reflectors which - represent the unitary matrix Q. See Further Details. - - TAUP (output) COMPLEX*16 array, dimension (min(M,N)) - The scalar factors of the elementary reflectors which - represent the unitary matrix P. See Further Details. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The length of the array WORK. LWORK >= max(1,M,N). - For optimum performance LWORK >= (M+N)*NB, where NB - is the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The matrices Q and P are represented as products of elementary - reflectors: - - If m >= n, - - Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are complex scalars, and v and u are complex - vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in - A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in - A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). - - If m < n, - - Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are complex scalars, and v and u are complex - vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in - A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in - A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). - - The contents of A on exit are illustrated by the following examples: - - m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): - - ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) - ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) - ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) - ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) - ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) - ( v1 v2 v3 v4 v5 ) - - where d and e denote diagonal and off-diagonal elements of B, vi - denotes an element of the vector defining H(i), and ui an element of - the vector defining G(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tauq; - --taup; - --work; - - /* Function Body */ - *info = 0; -/* Computing MAX */ - i__1 = 1, i__2 = ilaenv_(&c__1, "ZGEBRD", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - nb = max(i__1,i__2); - lwkopt = (*m + *n) * nb; - d__1 = (doublereal) lwkopt; - work[1].r = d__1, work[1].i = 0.; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } else /* if(complicated condition) */ { -/* Computing MAX */ - i__1 = max(1,*m); - if ((*lwork < max(i__1,*n) && ! lquery)) { - *info = -10; - } - } - if (*info < 0) { - i__1 = -(*info); - xerbla_("ZGEBRD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - minmn = min(*m,*n); - if (minmn == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - ws = (doublereal) max(*m,*n); - ldwrkx = *m; - ldwrky = *n; - - if ((nb > 1 && nb < minmn)) { - -/* - Set the crossover point NX. - - Computing MAX -*/ - i__1 = nb, i__2 = ilaenv_(&c__3, "ZGEBRD", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - -/* Determine when to switch from blocked to unblocked code. */ - - if (nx < minmn) { - ws = (doublereal) ((*m + *n) * nb); - if ((doublereal) (*lwork) < ws) { - -/* - Not enough work space for the optimal NB, consider using - a smaller block size. -*/ - - nbmin = ilaenv_(&c__2, "ZGEBRD", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - if (*lwork >= (*m + *n) * nbmin) { - nb = *lwork / (*m + *n); - } else { - nb = 1; - nx = minmn; - } - } - } - } else { - nx = minmn; - } - - i__1 = minmn - nx; - i__2 = nb; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - -/* - Reduce rows and columns i:i+ib-1 to bidiagonal form and return - the matrices X and Y which are needed to update the unreduced - part of the matrix -*/ - - i__3 = *m - i__ + 1; - i__4 = *n - i__ + 1; - zlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[ - i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx - * nb + 1], &ldwrky); - -/* - Update the trailing submatrix A(i+ib:m,i+ib:n), using - an update of the form A := A - V*Y' - X*U' -*/ - - i__3 = *m - i__ - nb + 1; - i__4 = *n - i__ - nb + 1; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "Conjugate transpose", &i__3, &i__4, &nb, & - z__1, &a[i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + - nb + 1], &ldwrky, &c_b60, &a[i__ + nb + (i__ + nb) * a_dim1], - lda); - i__3 = *m - i__ - nb + 1; - i__4 = *n - i__ - nb + 1; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &z__1, & - work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, & - c_b60, &a[i__ + nb + (i__ + nb) * a_dim1], lda); - -/* Copy diagonal and off-diagonal elements of B back into A */ - - if (*m >= *n) { - i__3 = i__ + nb - 1; - for (j = i__; j <= i__3; ++j) { - i__4 = j + j * a_dim1; - i__5 = j; - a[i__4].r = d__[i__5], a[i__4].i = 0.; - i__4 = j + (j + 1) * a_dim1; - i__5 = j; - a[i__4].r = e[i__5], a[i__4].i = 0.; -/* L10: */ - } - } else { - i__3 = i__ + nb - 1; - for (j = i__; j <= i__3; ++j) { - i__4 = j + j * a_dim1; - i__5 = j; - a[i__4].r = d__[i__5], a[i__4].i = 0.; - i__4 = j + 1 + j * a_dim1; - i__5 = j; - a[i__4].r = e[i__5], a[i__4].i = 0.; -/* L20: */ - } - } -/* L30: */ - } - -/* Use unblocked code to reduce the remainder of the matrix */ - - i__2 = *m - i__ + 1; - i__1 = *n - i__ + 1; - zgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], & - tauq[i__], &taup[i__], &work[1], &iinfo); - work[1].r = ws, work[1].i = 0.; - return 0; - -/* End of ZGEBRD */ - -} /* zgebrd_ */ - -/* Subroutine */ int zgeev_(char *jobvl, char *jobvr, integer *n, - doublecomplex *a, integer *lda, doublecomplex *w, doublecomplex *vl, - integer *ldvl, doublecomplex *vr, integer *ldvr, doublecomplex *work, - integer *lwork, doublereal *rwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, - i__2, i__3, i__4; - doublereal d__1, d__2; - doublecomplex z__1, z__2; - - /* Builtin functions */ - double sqrt(doublereal), d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, k, ihi; - static doublereal scl; - static integer ilo; - static doublereal dum[1], eps; - static doublecomplex tmp; - static integer ibal; - static char side[1]; - static integer maxb; - static doublereal anrm; - static integer ierr, itau, iwrk, nout; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *), dlabad_(doublereal *, doublereal *); - extern doublereal dznrm2_(integer *, doublecomplex *, integer *); - static logical scalea; - - static doublereal cscale; - extern /* Subroutine */ int zgebak_(char *, char *, integer *, integer *, - integer *, doublereal *, integer *, doublecomplex *, integer *, - integer *), zgebal_(char *, integer *, - doublecomplex *, integer *, integer *, integer *, doublereal *, - integer *); - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static logical select[1]; - extern /* Subroutine */ int zdscal_(integer *, doublereal *, - doublecomplex *, integer *); - static doublereal bignum; - extern doublereal zlange_(char *, integer *, integer *, doublecomplex *, - integer *, doublereal *); - extern /* Subroutine */ int zgehrd_(integer *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, integer *), zlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublecomplex *, - integer *, integer *), zlacpy_(char *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, integer *); - static integer minwrk, maxwrk; - static logical wantvl; - static doublereal smlnum; - static integer hswork, irwork; - extern /* Subroutine */ int zhseqr_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer *), ztrevc_(char *, char *, logical *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, integer *, integer *, doublecomplex *, - doublereal *, integer *); - static logical lquery, wantvr; - extern /* Subroutine */ int zunghr_(integer *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, integer *); - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZGEEV computes for an N-by-N complex nonsymmetric matrix A, the - eigenvalues and, optionally, the left and/or right eigenvectors. - - The right eigenvector v(j) of A satisfies - A * v(j) = lambda(j) * v(j) - where lambda(j) is its eigenvalue. - The left eigenvector u(j) of A satisfies - u(j)**H * A = lambda(j) * u(j)**H - where u(j)**H denotes the conjugate transpose of u(j). - - The computed eigenvectors are normalized to have Euclidean norm - equal to 1 and largest component real. - - Arguments - ========= - - JOBVL (input) CHARACTER*1 - = 'N': left eigenvectors of A are not computed; - = 'V': left eigenvectors of are computed. - - JOBVR (input) CHARACTER*1 - = 'N': right eigenvectors of A are not computed; - = 'V': right eigenvectors of A are computed. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the N-by-N matrix A. - On exit, A has been overwritten. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - W (output) COMPLEX*16 array, dimension (N) - W contains the computed eigenvalues. - - VL (output) COMPLEX*16 array, dimension (LDVL,N) - If JOBVL = 'V', the left eigenvectors u(j) are stored one - after another in the columns of VL, in the same order - as their eigenvalues. - If JOBVL = 'N', VL is not referenced. - u(j) = VL(:,j), the j-th column of VL. - - LDVL (input) INTEGER - The leading dimension of the array VL. LDVL >= 1; if - JOBVL = 'V', LDVL >= N. - - VR (output) COMPLEX*16 array, dimension (LDVR,N) - If JOBVR = 'V', the right eigenvectors v(j) are stored one - after another in the columns of VR, in the same order - as their eigenvalues. - If JOBVR = 'N', VR is not referenced. - v(j) = VR(:,j), the j-th column of VR. - - LDVR (input) INTEGER - The leading dimension of the array VR. LDVR >= 1; if - JOBVR = 'V', LDVR >= N. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,2*N). - For good performance, LWORK must generally be larger. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = i, the QR algorithm failed to compute all the - eigenvalues, and no eigenvectors have been computed; - elements and i+1:N of W contain eigenvalues which have - converged. - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --w; - vl_dim1 = *ldvl; - vl_offset = 1 + vl_dim1 * 1; - vl -= vl_offset; - vr_dim1 = *ldvr; - vr_offset = 1 + vr_dim1 * 1; - vr -= vr_offset; - --work; - --rwork; - - /* Function Body */ - *info = 0; - lquery = *lwork == -1; - wantvl = lsame_(jobvl, "V"); - wantvr = lsame_(jobvr, "V"); - if ((! wantvl && ! lsame_(jobvl, "N"))) { - *info = -1; - } else if ((! wantvr && ! lsame_(jobvr, "N"))) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if (*ldvl < 1 || (wantvl && *ldvl < *n)) { - *info = -8; - } else if (*ldvr < 1 || (wantvr && *ldvr < *n)) { - *info = -10; - } - -/* - Compute workspace - (Note: Comments in the code beginning "Workspace:" describe the - minimal amount of workspace needed at that point in the code, - as well as the preferred amount for good performance. - CWorkspace refers to complex workspace, and RWorkspace to real - workspace. NB refers to the optimal block size for the - immediately following subroutine, as returned by ILAENV. - HSWORK refers to the workspace preferred by ZHSEQR, as - calculated below. HSWORK is computed assuming ILO=1 and IHI=N, - the worst case.) -*/ - - minwrk = 1; - if ((*info == 0 && (*lwork >= 1 || lquery))) { - maxwrk = *n + *n * ilaenv_(&c__1, "ZGEHRD", " ", n, &c__1, n, &c__0, ( - ftnlen)6, (ftnlen)1); - if ((! wantvl && ! wantvr)) { -/* Computing MAX */ - i__1 = 1, i__2 = (*n) << (1); - minwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = ilaenv_(&c__8, "ZHSEQR", "EN", n, &c__1, n, &c_n1, (ftnlen) - 6, (ftnlen)2); - maxb = max(i__1,2); -/* - Computing MIN - Computing MAX -*/ - i__3 = 2, i__4 = ilaenv_(&c__4, "ZHSEQR", "EN", n, &c__1, n, & - c_n1, (ftnlen)6, (ftnlen)2); - i__1 = min(maxb,*n), i__2 = max(i__3,i__4); - k = min(i__1,i__2); -/* Computing MAX */ - i__1 = k * (k + 2), i__2 = (*n) << (1); - hswork = max(i__1,i__2); - maxwrk = max(maxwrk,hswork); - } else { -/* Computing MAX */ - i__1 = 1, i__2 = (*n) << (1); - minwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "ZUNGHR", - " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen)1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = ilaenv_(&c__8, "ZHSEQR", "SV", n, &c__1, n, &c_n1, (ftnlen) - 6, (ftnlen)2); - maxb = max(i__1,2); -/* - Computing MIN - Computing MAX -*/ - i__3 = 2, i__4 = ilaenv_(&c__4, "ZHSEQR", "SV", n, &c__1, n, & - c_n1, (ftnlen)6, (ftnlen)2); - i__1 = min(maxb,*n), i__2 = max(i__3,i__4); - k = min(i__1,i__2); -/* Computing MAX */ - i__1 = k * (k + 2), i__2 = (*n) << (1); - hswork = max(i__1,i__2); -/* Computing MAX */ - i__1 = max(maxwrk,hswork), i__2 = (*n) << (1); - maxwrk = max(i__1,i__2); - } - work[1].r = (doublereal) maxwrk, work[1].i = 0.; - } - if ((*lwork < minwrk && ! lquery)) { - *info = -12; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGEEV ", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* Get machine constants */ - - eps = PRECISION; - smlnum = SAFEMINIMUM; - bignum = 1. / smlnum; - dlabad_(&smlnum, &bignum); - smlnum = sqrt(smlnum) / eps; - bignum = 1. / smlnum; - -/* Scale A if max element outside range [SMLNUM,BIGNUM] */ - - anrm = zlange_("M", n, n, &a[a_offset], lda, dum); - scalea = FALSE_; - if ((anrm > 0. && anrm < smlnum)) { - scalea = TRUE_; - cscale = smlnum; - } else if (anrm > bignum) { - scalea = TRUE_; - cscale = bignum; - } - if (scalea) { - zlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, & - ierr); - } - -/* - Balance the matrix - (CWorkspace: none) - (RWorkspace: need N) -*/ - - ibal = 1; - zgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr); - -/* - Reduce to upper Hessenberg form - (CWorkspace: need 2*N, prefer N+N*NB) - (RWorkspace: none) -*/ - - itau = 1; - iwrk = itau + *n; - i__1 = *lwork - iwrk + 1; - zgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, - &ierr); - - if (wantvl) { - -/* - Want left eigenvectors - Copy Householder vectors to VL -*/ - - *(unsigned char *)side = 'L'; - zlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl) - ; - -/* - Generate unitary matrix in VL - (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) - (RWorkspace: none) -*/ - - i__1 = *lwork - iwrk + 1; - zunghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], - &i__1, &ierr); - -/* - Perform QR iteration, accumulating Schur vectors in VL - (CWorkspace: need 1, prefer HSWORK (see comments) ) - (RWorkspace: none) -*/ - - iwrk = itau; - i__1 = *lwork - iwrk + 1; - zhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vl[ - vl_offset], ldvl, &work[iwrk], &i__1, info); - - if (wantvr) { - -/* - Want left and right eigenvectors - Copy Schur vectors to VR -*/ - - *(unsigned char *)side = 'B'; - zlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr); - } - - } else if (wantvr) { - -/* - Want right eigenvectors - Copy Householder vectors to VR -*/ - - *(unsigned char *)side = 'R'; - zlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr) - ; - -/* - Generate unitary matrix in VR - (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) - (RWorkspace: none) -*/ - - i__1 = *lwork - iwrk + 1; - zunghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], - &i__1, &ierr); - -/* - Perform QR iteration, accumulating Schur vectors in VR - (CWorkspace: need 1, prefer HSWORK (see comments) ) - (RWorkspace: none) -*/ - - iwrk = itau; - i__1 = *lwork - iwrk + 1; - zhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[ - vr_offset], ldvr, &work[iwrk], &i__1, info); - - } else { - -/* - Compute eigenvalues only - (CWorkspace: need 1, prefer HSWORK (see comments) ) - (RWorkspace: none) -*/ - - iwrk = itau; - i__1 = *lwork - iwrk + 1; - zhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[ - vr_offset], ldvr, &work[iwrk], &i__1, info); - } - -/* If INFO > 0 from ZHSEQR, then quit */ - - if (*info > 0) { - goto L50; - } - - if (wantvl || wantvr) { - -/* - Compute left and/or right eigenvectors - (CWorkspace: need 2*N) - (RWorkspace: need 2*N) -*/ - - irwork = ibal + *n; - ztrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl, - &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[irwork], - &ierr); - } - - if (wantvl) { - -/* - Undo balancing of left eigenvectors - (CWorkspace: none) - (RWorkspace: need N) -*/ - - zgebak_("B", "L", n, &ilo, &ihi, &rwork[ibal], n, &vl[vl_offset], - ldvl, &ierr); - -/* Normalize left eigenvectors and make largest component real */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - scl = 1. / dznrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1); - zdscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1); - i__2 = *n; - for (k = 1; k <= i__2; ++k) { - i__3 = k + i__ * vl_dim1; -/* Computing 2nd power */ - d__1 = vl[i__3].r; -/* Computing 2nd power */ - d__2 = d_imag(&vl[k + i__ * vl_dim1]); - rwork[irwork + k - 1] = d__1 * d__1 + d__2 * d__2; -/* L10: */ - } - k = idamax_(n, &rwork[irwork], &c__1); - d_cnjg(&z__2, &vl[k + i__ * vl_dim1]); - d__1 = sqrt(rwork[irwork + k - 1]); - z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1; - tmp.r = z__1.r, tmp.i = z__1.i; - zscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1); - i__2 = k + i__ * vl_dim1; - i__3 = k + i__ * vl_dim1; - d__1 = vl[i__3].r; - z__1.r = d__1, z__1.i = 0.; - vl[i__2].r = z__1.r, vl[i__2].i = z__1.i; -/* L20: */ - } - } - - if (wantvr) { - -/* - Undo balancing of right eigenvectors - (CWorkspace: none) - (RWorkspace: need N) -*/ - - zgebak_("B", "R", n, &ilo, &ihi, &rwork[ibal], n, &vr[vr_offset], - ldvr, &ierr); - -/* Normalize right eigenvectors and make largest component real */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - scl = 1. / dznrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1); - zdscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1); - i__2 = *n; - for (k = 1; k <= i__2; ++k) { - i__3 = k + i__ * vr_dim1; -/* Computing 2nd power */ - d__1 = vr[i__3].r; -/* Computing 2nd power */ - d__2 = d_imag(&vr[k + i__ * vr_dim1]); - rwork[irwork + k - 1] = d__1 * d__1 + d__2 * d__2; -/* L30: */ - } - k = idamax_(n, &rwork[irwork], &c__1); - d_cnjg(&z__2, &vr[k + i__ * vr_dim1]); - d__1 = sqrt(rwork[irwork + k - 1]); - z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1; - tmp.r = z__1.r, tmp.i = z__1.i; - zscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1); - i__2 = k + i__ * vr_dim1; - i__3 = k + i__ * vr_dim1; - d__1 = vr[i__3].r; - z__1.r = d__1, z__1.i = 0.; - vr[i__2].r = z__1.r, vr[i__2].i = z__1.i; -/* L40: */ - } - } - -/* Undo scaling if necessary */ - -L50: - if (scalea) { - i__1 = *n - *info; -/* Computing MAX */ - i__3 = *n - *info; - i__2 = max(i__3,1); - zlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1] - , &i__2, &ierr); - if (*info > 0) { - i__1 = ilo - 1; - zlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n, - &ierr); - } - } - - work[1].r = (doublereal) maxwrk, work[1].i = 0.; - return 0; - -/* End of ZGEEV */ - -} /* zgeev_ */ - -/* Subroutine */ int zgehd2_(integer *n, integer *ilo, integer *ihi, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex * - work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublecomplex z__1; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__; - static doublecomplex alpha; - extern /* Subroutine */ int zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZGEHD2 reduces a complex general matrix A to upper Hessenberg form H - by a unitary similarity transformation: Q' * A * Q = H . - - Arguments - ========= - - N (input) INTEGER - The order of the matrix A. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - It is assumed that A is already upper triangular in rows - and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally - set by a previous call to ZGEBAL; otherwise they should be - set to 1 and N respectively. See Further Details. - 1 <= ILO <= IHI <= max(1,N). - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the n by n general matrix to be reduced. - On exit, the upper triangle and the first subdiagonal of A - are overwritten with the upper Hessenberg matrix H, and the - elements below the first subdiagonal, with the array TAU, - represent the unitary matrix Q as a product of elementary - reflectors. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - TAU (output) COMPLEX*16 array, dimension (N-1) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace) COMPLEX*16 array, dimension (N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The matrix Q is represented as a product of (ihi-ilo) elementary - reflectors - - Q = H(ilo) H(ilo+1) . . . H(ihi-1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on - exit in A(i+2:ihi,i), and tau in TAU(i). - - The contents of A are illustrated by the following example, with - n = 7, ilo = 2 and ihi = 6: - - on entry, on exit, - - ( a a a a a a a ) ( a a h h h h a ) - ( a a a a a a ) ( a h h h h a ) - ( a a a a a a ) ( h h h h h h ) - ( a a a a a a ) ( v2 h h h h h ) - ( a a a a a a ) ( v2 v3 h h h h ) - ( a a a a a a ) ( v2 v3 v4 h h h ) - ( a ) ( a ) - - where a denotes an element of the original matrix A, h denotes a - modified element of the upper Hessenberg matrix H, and vi denotes an - element of the vector defining H(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*n < 0) { - *info = -1; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -2; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGEHD2", &i__1); - return 0; - } - - i__1 = *ihi - 1; - for (i__ = *ilo; i__ <= i__1; ++i__) { - -/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */ - - i__2 = i__ + 1 + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *ihi - i__; -/* Computing MIN */ - i__3 = i__ + 2; - zlarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[ - i__]); - i__2 = i__ + 1 + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */ - - i__2 = *ihi - i__; - zlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ - i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]); - -/* Apply H(i)' to A(i+1:ihi,i+1:n) from the left */ - - i__2 = *ihi - i__; - i__3 = *n - i__; - d_cnjg(&z__1, &tau[i__]); - zlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &z__1, - &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]); - - i__2 = i__ + 1 + i__ * a_dim1; - a[i__2].r = alpha.r, a[i__2].i = alpha.i; -/* L10: */ - } - - return 0; - -/* End of ZGEHD2 */ - -} /* zgehd2_ */ - -/* Subroutine */ int zgehrd_(integer *n, integer *ilo, integer *ihi, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex * - work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - doublecomplex z__1; - - /* Local variables */ - static integer i__; - static doublecomplex t[4160] /* was [65][64] */; - static integer ib; - static doublecomplex ei; - static integer nb, nh, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *, - integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *), zgehd2_(integer *, integer *, integer - *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *), - zlahrd_(integer *, integer *, integer *, doublecomplex *, integer - *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *); - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZGEHRD reduces a complex general matrix A to upper Hessenberg form H - by a unitary similarity transformation: Q' * A * Q = H . - - Arguments - ========= - - N (input) INTEGER - The order of the matrix A. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - It is assumed that A is already upper triangular in rows - and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally - set by a previous call to ZGEBAL; otherwise they should be - set to 1 and N respectively. See Further Details. - 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the N-by-N general matrix to be reduced. - On exit, the upper triangle and the first subdiagonal of A - are overwritten with the upper Hessenberg matrix H, and the - elements below the first subdiagonal, with the array TAU, - represent the unitary matrix Q as a product of elementary - reflectors. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - TAU (output) COMPLEX*16 array, dimension (N-1) - The scalar factors of the elementary reflectors (see Further - Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to - zero. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The length of the array WORK. LWORK >= max(1,N). - For optimum performance LWORK >= N*NB, where NB is the - optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - The matrix Q is represented as a product of (ihi-ilo) elementary - reflectors - - Q = H(ilo) H(ilo+1) . . . H(ihi-1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on - exit in A(i+2:ihi,i), and tau in TAU(i). - - The contents of A are illustrated by the following example, with - n = 7, ilo = 2 and ihi = 6: - - on entry, on exit, - - ( a a a a a a a ) ( a a h h h h a ) - ( a a a a a a ) ( a h h h h a ) - ( a a a a a a ) ( h h h h h h ) - ( a a a a a a ) ( v2 h h h h h ) - ( a a a a a a ) ( v2 v3 h h h h ) - ( a a a a a a ) ( v2 v3 v4 h h h ) - ( a ) ( a ) - - where a denotes an element of the original matrix A, h denotes a - modified element of the upper Hessenberg matrix H, and vi denotes an - element of the vector defining H(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; -/* Computing MIN */ - i__1 = 64, i__2 = ilaenv_(&c__1, "ZGEHRD", " ", n, ilo, ihi, &c_n1, ( - ftnlen)6, (ftnlen)1); - nb = min(i__1,i__2); - lwkopt = *n * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - lquery = *lwork == -1; - if (*n < 0) { - *info = -1; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -2; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if ((*lwork < max(1,*n) && ! lquery)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGEHRD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */ - - i__1 = *ilo - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - tau[i__2].r = 0., tau[i__2].i = 0.; -/* L10: */ - } - i__1 = *n - 1; - for (i__ = max(1,*ihi); i__ <= i__1; ++i__) { - i__2 = i__; - tau[i__2].r = 0., tau[i__2].i = 0.; -/* L20: */ - } - -/* Quick return if possible */ - - nh = *ihi - *ilo + 1; - if (nh <= 1) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nbmin = 2; - iws = 1; - if ((nb > 1 && nb < nh)) { - -/* - Determine when to cross over from blocked to unblocked code - (last block is always handled by unblocked code). - - Computing MAX -*/ - i__1 = nb, i__2 = ilaenv_(&c__3, "ZGEHRD", " ", n, ilo, ihi, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < nh) { - -/* Determine if workspace is large enough for blocked code. */ - - iws = *n * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: determine the - minimum value of NB, and reduce NB or force use of - unblocked code. - - Computing MAX -*/ - i__1 = 2, i__2 = ilaenv_(&c__2, "ZGEHRD", " ", n, ilo, ihi, & - c_n1, (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - if (*lwork >= *n * nbmin) { - nb = *lwork / *n; - } else { - nb = 1; - } - } - } - } - ldwork = *n; - - if (nb < nbmin || nb >= nh) { - -/* Use unblocked code below */ - - i__ = *ilo; - - } else { - -/* Use blocked code */ - - i__1 = *ihi - 1 - nx; - i__2 = nb; - for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__3 = nb, i__4 = *ihi - i__; - ib = min(i__3,i__4); - -/* - Reduce columns i:i+ib-1 to Hessenberg form, returning the - matrices V and T of the block reflector H = I - V*T*V' - which performs the reduction, and also the matrix Y = A*V*T -*/ - - zlahrd_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, & - c__65, &work[1], &ldwork); - -/* - Apply the block reflector H to A(1:ihi,i+ib:ihi) from the - right, computing A := A - Y * V'. V(i+ib,ib-1) must be set - to 1. -*/ - - i__3 = i__ + ib + (i__ + ib - 1) * a_dim1; - ei.r = a[i__3].r, ei.i = a[i__3].i; - i__3 = i__ + ib + (i__ + ib - 1) * a_dim1; - a[i__3].r = 1., a[i__3].i = 0.; - i__3 = *ihi - i__ - ib + 1; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "Conjugate transpose", ihi, &i__3, &ib, & - z__1, &work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, - &c_b60, &a[(i__ + ib) * a_dim1 + 1], lda); - i__3 = i__ + ib + (i__ + ib - 1) * a_dim1; - a[i__3].r = ei.r, a[i__3].i = ei.i; - -/* - Apply the block reflector H to A(i+1:ihi,i+ib:n) from the - left -*/ - - i__3 = *ihi - i__; - i__4 = *n - i__ - ib + 1; - zlarfb_("Left", "Conjugate transpose", "Forward", "Columnwise", & - i__3, &i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, & - c__65, &a[i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], & - ldwork); -/* L30: */ - } - } - -/* Use unblocked code to reduce the rest of the matrix */ - - zgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo); - work[1].r = (doublereal) iws, work[1].i = 0.; - - return 0; - -/* End of ZGEHRD */ - -} /* zgehrd_ */ - -/* Subroutine */ int zgelq2_(integer *m, integer *n, doublecomplex *a, - integer *lda, doublecomplex *tau, doublecomplex *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, k; - static doublecomplex alpha; - extern /* Subroutine */ int zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *, - integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZGELQ2 computes an LQ factorization of a complex m by n matrix A: - A = L * Q. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the m by n matrix A. - On exit, the elements on and below the diagonal of the array - contain the m by min(m,n) lower trapezoidal matrix L (L is - lower triangular if m <= n); the elements above the diagonal, - with the array TAU, represent the unitary matrix Q as a - product of elementary reflectors (see Further Details). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (output) COMPLEX*16 array, dimension (min(M,N)) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace) COMPLEX*16 array, dimension (M) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The matrix Q is represented as a product of elementary reflectors - - Q = H(k)' . . . H(2)' H(1)', where k = min(m,n). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in - A(i,i+1:n), and tau in TAU(i). - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGELQ2", &i__1); - return 0; - } - - k = min(*m,*n); - - i__1 = k; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */ - - i__2 = *n - i__ + 1; - zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda); - i__2 = i__ + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *n - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &tau[i__] - ); - if (i__ < *m) { - -/* Apply H(i) to A(i+1:m,i:n) from the right */ - - i__2 = i__ + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - i__2 = *m - i__; - i__3 = *n - i__ + 1; - zlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[ - i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); - } - i__2 = i__ + i__ * a_dim1; - a[i__2].r = alpha.r, a[i__2].i = alpha.i; - i__2 = *n - i__ + 1; - zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda); -/* L10: */ - } - return 0; - -/* End of ZGELQ2 */ - -} /* zgelq2_ */ - -/* Subroutine */ int zgelqf_(integer *m, integer *n, doublecomplex *a, - integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, k, ib, nb, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int zgelq2_(integer *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *), xerbla_( - char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static integer ldwork; - extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *); - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZGELQF computes an LQ factorization of a complex M-by-N matrix A: - A = L * Q. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the M-by-N matrix A. - On exit, the elements on and below the diagonal of the array - contain the m-by-min(m,n) lower trapezoidal matrix L (L is - lower triangular if m <= n); the elements above the diagonal, - with the array TAU, represent the unitary matrix Q as a - product of elementary reflectors (see Further Details). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (output) COMPLEX*16 array, dimension (min(M,N)) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,M). - For optimum performance LWORK >= M*NB, where NB is the - optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The matrix Q is represented as a product of elementary reflectors - - Q = H(k)' . . . H(2)' H(1)', where k = min(m,n). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in - A(i,i+1:n), and tau in TAU(i). - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nb = ilaenv_(&c__1, "ZGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) - 1); - lwkopt = *m * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } else if ((*lwork < max(1,*m) && ! lquery)) { - *info = -7; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGELQF", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - k = min(*m,*n); - if (k == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nbmin = 2; - nx = 0; - iws = *m; - if ((nb > 1 && nb < k)) { - -/* - Determine when to cross over from blocked to unblocked code. - - Computing MAX -*/ - i__1 = 0, i__2 = ilaenv_(&c__3, "ZGELQF", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < k) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *m; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: reduce NB and - determine the minimum value of NB. -*/ - - nb = *lwork / ldwork; -/* Computing MAX */ - i__1 = 2, i__2 = ilaenv_(&c__2, "ZGELQF", " ", m, n, &c_n1, & - c_n1, (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - } - } - } - - if (((nb >= nbmin && nb < k) && nx < k)) { - -/* Use blocked code initially */ - - i__1 = k - nx; - i__2 = nb; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__3 = k - i__ + 1; - ib = min(i__3,nb); - -/* - Compute the LQ factorization of the current block - A(i:i+ib-1,i:n) -*/ - - i__3 = *n - i__ + 1; - zgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[ - 1], &iinfo); - if (i__ + ib <= *m) { - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__3 = *n - i__ + 1; - zlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], &work[1], &ldwork); - -/* Apply H to A(i+ib:m,i:n) from the right */ - - i__3 = *m - i__ - ib + 1; - i__4 = *n - i__ + 1; - zlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3, - &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & - ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib + - 1], &ldwork); - } -/* L10: */ - } - } else { - i__ = 1; - } - -/* Use unblocked code to factor the last or only block. */ - - if (i__ <= k) { - i__2 = *m - i__ + 1; - i__1 = *n - i__ + 1; - zgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1] - , &iinfo); - } - - work[1].r = (doublereal) iws, work[1].i = 0.; - return 0; - -/* End of ZGELQF */ - -} /* zgelqf_ */ - -/* Subroutine */ int zgelsd_(integer *m, integer *n, integer *nrhs, - doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb, - doublereal *s, doublereal *rcond, integer *rank, doublecomplex *work, - integer *lwork, doublereal *rwork, integer *iwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4; - doublereal d__1; - doublecomplex z__1; - - /* Local variables */ - static integer ie, il, mm; - static doublereal eps, anrm, bnrm; - static integer itau, iascl, ibscl; - static doublereal sfmin; - static integer minmn, maxmn, itaup, itauq, mnthr, nwork; - extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); - - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dlaset_(char *, integer *, integer - *, doublereal *, doublereal *, doublereal *, integer *), - xerbla_(char *, integer *), zgebrd_(integer *, integer *, - doublecomplex *, integer *, doublereal *, doublereal *, - doublecomplex *, doublecomplex *, doublecomplex *, integer *, - integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern doublereal zlange_(char *, integer *, integer *, doublecomplex *, - integer *, doublereal *); - static doublereal bignum; - extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *, integer * - ), zlalsd_(char *, integer *, integer *, integer *, doublereal *, - doublereal *, doublecomplex *, integer *, doublereal *, integer *, - doublecomplex *, doublereal *, integer *, integer *), - zlascl_(char *, integer *, integer *, doublereal *, doublereal *, - integer *, integer *, doublecomplex *, integer *, integer *), zgeqrf_(integer *, integer *, doublecomplex *, integer *, - doublecomplex *, doublecomplex *, integer *, integer *); - static integer ldwork; - extern /* Subroutine */ int zlacpy_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *), - zlaset_(char *, integer *, integer *, doublecomplex *, - doublecomplex *, doublecomplex *, integer *); - static integer minwrk, maxwrk; - static doublereal smlnum; - extern /* Subroutine */ int zunmbr_(char *, char *, char *, integer *, - integer *, integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer * - ); - static logical lquery; - static integer nrwork, smlsiz; - extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer *); - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - ZGELSD computes the minimum-norm solution to a real linear least - squares problem: - minimize 2-norm(| b - A*x |) - using the singular value decomposition (SVD) of A. A is an M-by-N - matrix which may be rank-deficient. - - Several right hand side vectors b and solution vectors x can be - handled in a single call; they are stored as the columns of the - M-by-NRHS right hand side matrix B and the N-by-NRHS solution - matrix X. - - The problem is solved in three steps: - (1) Reduce the coefficient matrix A to bidiagonal form with - Householder tranformations, reducing the original problem - into a "bidiagonal least squares problem" (BLS) - (2) Solve the BLS using a divide and conquer approach. - (3) Apply back all the Householder tranformations to solve - the original least squares problem. - - The effective rank of A is determined by treating as zero those - singular values which are less than RCOND times the largest singular - value. - - The divide and conquer algorithm makes very mild assumptions about - floating point arithmetic. It will work on machines with a guard - digit in add/subtract, or on those binary machines without guard - digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or - Cray-2. It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - NRHS (input) INTEGER - The number of right hand sides, i.e., the number of columns - of the matrices B and X. NRHS >= 0. - - A (input) COMPLEX*16 array, dimension (LDA,N) - On entry, the M-by-N matrix A. - On exit, A has been destroyed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) - On entry, the M-by-NRHS right hand side matrix B. - On exit, B is overwritten by the N-by-NRHS solution matrix X. - If m >= n and RANK = n, the residual sum-of-squares for - the solution in the i-th column is given by the sum of - squares of elements n+1:m in that column. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,M,N). - - S (output) DOUBLE PRECISION array, dimension (min(M,N)) - The singular values of A in decreasing order. - The condition number of A in the 2-norm = S(1)/S(min(m,n)). - - RCOND (input) DOUBLE PRECISION - RCOND is used to determine the effective rank of A. - Singular values S(i) <= RCOND*S(1) are treated as zero. - If RCOND < 0, machine precision is used instead. - - RANK (output) INTEGER - The effective rank of A, i.e., the number of singular values - which are greater than RCOND*S(1). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK must be at least 1. - The exact minimum amount of workspace needed depends on M, - N and NRHS. As long as LWORK is at least - 2 * N + N * NRHS - if M is greater than or equal to N or - 2 * M + M * NRHS - if M is less than N, the code will execute correctly. - For good performance, LWORK should generally be larger. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - RWORK (workspace) DOUBLE PRECISION array, dimension at least - 10*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + - (SMLSIZ+1)**2 - if M is greater than or equal to N or - 10*M + 2*M*SMLSIZ + 8*M*NLVL + 3*SMLSIZ*NRHS + - (SMLSIZ+1)**2 - if M is less than N, the code will execute correctly. - SMLSIZ is returned by ILAENV and is equal to the maximum - size of the subproblems at the bottom of the computation - tree (usually about 25), and - NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) - - IWORK (workspace) INTEGER array, dimension (LIWORK) - LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, - where MINMN = MIN( M,N ). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: the algorithm for computing the SVD failed to converge; - if INFO = i, i off-diagonal elements of an intermediate - bidiagonal form did not converge to zero. - - Further Details - =============== - - Based on contributions by - Ming Gu and Ren-Cang Li, Computer Science Division, University of - California at Berkeley, USA - Osni Marques, LBNL/NERSC, USA - - ===================================================================== - - - Test the input arguments. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - --s; - --work; - --rwork; - --iwork; - - /* Function Body */ - *info = 0; - minmn = min(*m,*n); - maxmn = max(*m,*n); - mnthr = ilaenv_(&c__6, "ZGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, ( - ftnlen)1); - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*nrhs < 0) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } else if (*ldb < max(1,maxmn)) { - *info = -7; - } - - smlsiz = ilaenv_(&c__9, "ZGELSD", " ", &c__0, &c__0, &c__0, &c__0, ( - ftnlen)6, (ftnlen)1); - -/* - Compute workspace. - (Note: Comments in the code beginning "Workspace:" describe the - minimal amount of workspace needed at that point in the code, - as well as the preferred amount for good performance. - NB refers to the optimal block size for the immediately - following subroutine, as returned by ILAENV.) -*/ - - minwrk = 1; - if (*info == 0) { - maxwrk = 0; - mm = *m; - if ((*m >= *n && *m >= mnthr)) { - -/* Path 1a - overdetermined, with many more rows than columns. */ - - mm = *n; -/* Computing MAX */ - i__1 = maxwrk, i__2 = *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *nrhs * ilaenv_(&c__1, "ZUNMQR", "LC", m, - nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2); - maxwrk = max(i__1,i__2); - } - if (*m >= *n) { - -/* - Path 1 - overdetermined or exactly determined. - - Computing MAX -*/ - i__1 = maxwrk, i__2 = ((*n) << (1)) + (mm + *n) * ilaenv_(&c__1, - "ZGEBRD", " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1) - ; - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *nrhs * ilaenv_(&c__1, - "ZUNMBR", "QLC", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen) - 3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + (*n - 1) * ilaenv_(&c__1, - "ZUNMBR", "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * *nrhs; - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = ((*n) << (1)) + mm, i__2 = ((*n) << (1)) + *n * *nrhs; - minwrk = max(i__1,i__2); - } - if (*n > *m) { - if (*n >= mnthr) { - -/* - Path 2a - underdetermined, with many more columns - than rows. -*/ - - maxwrk = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &c_n1, - &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + ((*m) << (1)) - * ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *nrhs * - ilaenv_(&c__1, "ZUNMBR", "QLC", m, nrhs, m, &c_n1, ( - ftnlen)6, (ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + (*m - 1) * - ilaenv_(&c__1, "ZUNMLQ", "LC", n, nrhs, m, &c_n1, ( - ftnlen)6, (ftnlen)2); - maxwrk = max(i__1,i__2); - if (*nrhs > 1) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs; - maxwrk = max(i__1,i__2); - } else { -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (1)); - maxwrk = max(i__1,i__2); - } -/* Computing MAX */ - i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *m * *nrhs; - maxwrk = max(i__1,i__2); - } else { - -/* Path 2 - underdetermined. */ - - maxwrk = ((*m) << (1)) + (*n + *m) * ilaenv_(&c__1, "ZGEBRD", - " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *nrhs * ilaenv_(&c__1, - "ZUNMBR", "QLC", m, nrhs, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "PLN", n, nrhs, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * *nrhs; - maxwrk = max(i__1,i__2); - } -/* Computing MAX */ - i__1 = ((*m) << (1)) + *n, i__2 = ((*m) << (1)) + *m * *nrhs; - minwrk = max(i__1,i__2); - } - minwrk = min(minwrk,maxwrk); - d__1 = (doublereal) maxwrk; - z__1.r = d__1, z__1.i = 0.; - work[1].r = z__1.r, work[1].i = z__1.i; - if ((*lwork < minwrk && ! lquery)) { - *info = -12; - } - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGELSD", &i__1); - return 0; - } else if (lquery) { - goto L10; - } - -/* Quick return if possible. */ - - if (*m == 0 || *n == 0) { - *rank = 0; - return 0; - } - -/* Get machine parameters. */ - - eps = PRECISION; - sfmin = SAFEMINIMUM; - smlnum = sfmin / eps; - bignum = 1. / smlnum; - dlabad_(&smlnum, &bignum); - -/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */ - - anrm = zlange_("M", m, n, &a[a_offset], lda, &rwork[1]); - iascl = 0; - if ((anrm > 0. && anrm < smlnum)) { - -/* Scale matrix norm up to SMLNUM */ - - zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, - info); - iascl = 1; - } else if (anrm > bignum) { - -/* Scale matrix norm down to BIGNUM. */ - - zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, - info); - iascl = 2; - } else if (anrm == 0.) { - -/* Matrix all zero. Return zero solution. */ - - i__1 = max(*m,*n); - zlaset_("F", &i__1, nrhs, &c_b59, &c_b59, &b[b_offset], ldb); - dlaset_("F", &minmn, &c__1, &c_b324, &c_b324, &s[1], &c__1) - ; - *rank = 0; - goto L10; - } - -/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */ - - bnrm = zlange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]); - ibscl = 0; - if ((bnrm > 0. && bnrm < smlnum)) { - -/* Scale matrix norm up to SMLNUM. */ - - zlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb, - info); - ibscl = 1; - } else if (bnrm > bignum) { - -/* Scale matrix norm down to BIGNUM. */ - - zlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb, - info); - ibscl = 2; - } - -/* If M < N make sure B(M+1:N,:) = 0 */ - - if (*m < *n) { - i__1 = *n - *m; - zlaset_("F", &i__1, nrhs, &c_b59, &c_b59, &b[*m + 1 + b_dim1], ldb); - } - -/* Overdetermined case. */ - - if (*m >= *n) { - -/* Path 1 - overdetermined or exactly determined. */ - - mm = *m; - if (*m >= mnthr) { - -/* Path 1a - overdetermined, with many more rows than columns */ - - mm = *n; - itau = 1; - nwork = itau + *n; - -/* - Compute A=Q*R. - (RWorkspace: need N) - (CWorkspace: need N, prefer N*NB) -*/ - - i__1 = *lwork - nwork + 1; - zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, - info); - -/* - Multiply B by transpose(Q). - (RWorkspace: need N) - (CWorkspace: need NRHS, prefer NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - zunmqr_("L", "C", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[ - b_offset], ldb, &work[nwork], &i__1, info); - -/* Zero out below R. */ - - if (*n > 1) { - i__1 = *n - 1; - i__2 = *n - 1; - zlaset_("L", &i__1, &i__2, &c_b59, &c_b59, &a[a_dim1 + 2], - lda); - } - } - - itauq = 1; - itaup = itauq + *n; - nwork = itaup + *n; - ie = 1; - nrwork = ie + *n; - -/* - Bidiagonalize R in A. - (RWorkspace: need N) - (CWorkspace: need 2*N+MM, prefer 2*N+(MM+N)*NB) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(&mm, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], & - work[itaup], &work[nwork], &i__1, info); - -/* - Multiply B by transpose of left bidiagonalizing vectors of R. - (CWorkspace: need 2*N+NRHS, prefer 2*N+NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "C", &mm, nrhs, n, &a[a_offset], lda, &work[itauq], - &b[b_offset], ldb, &work[nwork], &i__1, info); - -/* Solve the bidiagonal least squares problem. */ - - zlalsd_("U", &smlsiz, n, nrhs, &s[1], &rwork[ie], &b[b_offset], ldb, - rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1], info); - if (*info != 0) { - goto L10; - } - -/* Multiply B by right bidiagonalizing vectors of R. */ - - i__1 = *lwork - nwork + 1; - zunmbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], & - b[b_offset], ldb, &work[nwork], &i__1, info); - - } else /* if(complicated condition) */ { -/* Computing MAX */ - i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 = - max(i__1,*nrhs), i__2 = *n - *m * 3; - if ((*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2) - )) { - -/* - Path 2a - underdetermined, with many more columns than rows - and sufficient workspace for an efficient algorithm. -*/ - - ldwork = *m; -/* - Computing MAX - Computing MAX -*/ - i__3 = *m, i__4 = ((*m) << (1)) - 4, i__3 = max(i__3,i__4), i__3 = - max(i__3,*nrhs), i__4 = *n - *m * 3; - i__1 = ((*m) << (2)) + *m * *lda + max(i__3,i__4), i__2 = *m * * - lda + *m + *m * *nrhs; - if (*lwork >= max(i__1,i__2)) { - ldwork = *lda; - } - itau = 1; - nwork = *m + 1; - -/* - Compute A=L*Q. - (CWorkspace: need 2*M, prefer M+M*NB) -*/ - - i__1 = *lwork - nwork + 1; - zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, - info); - il = nwork; - -/* Copy L to WORK(IL), zeroing out above its diagonal. */ - - zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork); - i__1 = *m - 1; - i__2 = *m - 1; - zlaset_("U", &i__1, &i__2, &c_b59, &c_b59, &work[il + ldwork], & - ldwork); - itauq = il + ldwork * *m; - itaup = itauq + *m; - nwork = itaup + *m; - ie = 1; - nrwork = ie + *m; - -/* - Bidiagonalize L in WORK(IL). - (RWorkspace: need M) - (CWorkspace: need M*M+4*M, prefer M*M+4*M+2*M*NB) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(m, m, &work[il], &ldwork, &s[1], &rwork[ie], &work[itauq], - &work[itaup], &work[nwork], &i__1, info); - -/* - Multiply B by transpose of left bidiagonalizing vectors of L. - (CWorkspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "C", m, nrhs, m, &work[il], &ldwork, &work[ - itauq], &b[b_offset], ldb, &work[nwork], &i__1, info); - -/* Solve the bidiagonal least squares problem. */ - - zlalsd_("U", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset], - ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1], - info); - if (*info != 0) { - goto L10; - } - -/* Multiply B by right bidiagonalizing vectors of L. */ - - i__1 = *lwork - nwork + 1; - zunmbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[ - itaup], &b[b_offset], ldb, &work[nwork], &i__1, info); - -/* Zero out below first M rows of B. */ - - i__1 = *n - *m; - zlaset_("F", &i__1, nrhs, &c_b59, &c_b59, &b[*m + 1 + b_dim1], - ldb); - nwork = itau + *m; - -/* - Multiply transpose(Q) by B. - (CWorkspace: need NRHS, prefer NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - zunmlq_("L", "C", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[ - b_offset], ldb, &work[nwork], &i__1, info); - - } else { - -/* Path 2 - remaining underdetermined cases. */ - - itauq = 1; - itaup = itauq + *m; - nwork = itaup + *m; - ie = 1; - nrwork = ie + *m; - -/* - Bidiagonalize A. - (RWorkspace: need M) - (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], - &work[itaup], &work[nwork], &i__1, info); - -/* - Multiply B by transpose of left bidiagonalizing vectors. - (CWorkspace: need 2*M+NRHS, prefer 2*M+NRHS*NB) -*/ - - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "C", m, nrhs, n, &a[a_offset], lda, &work[itauq] - , &b[b_offset], ldb, &work[nwork], &i__1, info); - -/* Solve the bidiagonal least squares problem. */ - - zlalsd_("L", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset], - ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1], - info); - if (*info != 0) { - goto L10; - } - -/* Multiply B by right bidiagonalizing vectors of A. */ - - i__1 = *lwork - nwork + 1; - zunmbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup] - , &b[b_offset], ldb, &work[nwork], &i__1, info); - - } - } - -/* Undo scaling. */ - - if (iascl == 1) { - zlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb, - info); - dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], & - minmn, info); - } else if (iascl == 2) { - zlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb, - info); - dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], & - minmn, info); - } - if (ibscl == 1) { - zlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb, - info); - } else if (ibscl == 2) { - zlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb, - info); - } - -L10: - d__1 = (doublereal) maxwrk; - z__1.r = d__1, z__1.i = 0.; - work[1].r = z__1.r, work[1].i = z__1.i; - return 0; - -/* End of ZGELSD */ - -} /* zgelsd_ */ - -/* Subroutine */ int zgeqr2_(integer *m, integer *n, doublecomplex *a, - integer *lda, doublecomplex *tau, doublecomplex *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublecomplex z__1; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, k; - static doublecomplex alpha; - extern /* Subroutine */ int zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZGEQR2 computes a QR factorization of a complex m by n matrix A: - A = Q * R. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the m by n matrix A. - On exit, the elements on and above the diagonal of the array - contain the min(m,n) by n upper trapezoidal matrix R (R is - upper triangular if m >= n); the elements below the diagonal, - with the array TAU, represent the unitary matrix Q as a - product of elementary reflectors (see Further Details). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (output) COMPLEX*16 array, dimension (min(M,N)) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace) COMPLEX*16 array, dimension (N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The matrix Q is represented as a product of elementary reflectors - - Q = H(1) H(2) . . . H(k), where k = min(m,n). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), - and tau in TAU(i). - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGEQR2", &i__1); - return 0; - } - - k = min(*m,*n); - - i__1 = k; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */ - - i__2 = *m - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - zlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1] - , &c__1, &tau[i__]); - if (i__ < *n) { - -/* Apply H(i)' to A(i:m,i+1:n) from the left */ - - i__2 = i__ + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = i__ + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - i__2 = *m - i__ + 1; - i__3 = *n - i__; - d_cnjg(&z__1, &tau[i__]); - zlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &z__1, - &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); - i__2 = i__ + i__ * a_dim1; - a[i__2].r = alpha.r, a[i__2].i = alpha.i; - } -/* L10: */ - } - return 0; - -/* End of ZGEQR2 */ - -} /* zgeqr2_ */ - -/* Subroutine */ int zgeqrf_(integer *m, integer *n, doublecomplex *a, - integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, k, ib, nb, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int zgeqr2_(integer *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *), xerbla_( - char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static integer ldwork; - extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *); - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZGEQRF computes a QR factorization of a complex M-by-N matrix A: - A = Q * R. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the M-by-N matrix A. - On exit, the elements on and above the diagonal of the array - contain the min(M,N)-by-N upper trapezoidal matrix R (R is - upper triangular if m >= n); the elements below the diagonal, - with the array TAU, represent the unitary matrix Q as a - product of min(m,n) elementary reflectors (see Further - Details). - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - TAU (output) COMPLEX*16 array, dimension (min(M,N)) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,N). - For optimum performance LWORK >= N*NB, where NB is - the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The matrix Q is represented as a product of elementary reflectors - - Q = H(1) H(2) . . . H(k), where k = min(m,n). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), - and tau in TAU(i). - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nb = ilaenv_(&c__1, "ZGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) - 1); - lwkopt = *n * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } else if ((*lwork < max(1,*n) && ! lquery)) { - *info = -7; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGEQRF", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - k = min(*m,*n); - if (k == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nbmin = 2; - nx = 0; - iws = *n; - if ((nb > 1 && nb < k)) { - -/* - Determine when to cross over from blocked to unblocked code. - - Computing MAX -*/ - i__1 = 0, i__2 = ilaenv_(&c__3, "ZGEQRF", " ", m, n, &c_n1, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < k) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *n; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: reduce NB and - determine the minimum value of NB. -*/ - - nb = *lwork / ldwork; -/* Computing MAX */ - i__1 = 2, i__2 = ilaenv_(&c__2, "ZGEQRF", " ", m, n, &c_n1, & - c_n1, (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - } - } - } - - if (((nb >= nbmin && nb < k) && nx < k)) { - -/* Use blocked code initially */ - - i__1 = k - nx; - i__2 = nb; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__3 = k - i__ + 1; - ib = min(i__3,nb); - -/* - Compute the QR factorization of the current block - A(i:m,i:i+ib-1) -*/ - - i__3 = *m - i__ + 1; - zgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[ - 1], &iinfo); - if (i__ + ib <= *n) { - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__3 = *m - i__ + 1; - zlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], &work[1], &ldwork); - -/* Apply H' to A(i:m,i+ib:n) from the left */ - - i__3 = *m - i__ + 1; - i__4 = *n - i__ - ib + 1; - zlarfb_("Left", "Conjugate transpose", "Forward", "Columnwise" - , &i__3, &i__4, &ib, &a[i__ + i__ * a_dim1], lda, & - work[1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, - &work[ib + 1], &ldwork); - } -/* L10: */ - } - } else { - i__ = 1; - } - -/* Use unblocked code to factor the last or only block. */ - - if (i__ <= k) { - i__2 = *m - i__ + 1; - i__1 = *n - i__ + 1; - zgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1] - , &iinfo); - } - - work[1].r = (doublereal) iws, work[1].i = 0.; - return 0; - -/* End of ZGEQRF */ - -} /* zgeqrf_ */ - -/* Subroutine */ int zgesdd_(char *jobz, integer *m, integer *n, - doublecomplex *a, integer *lda, doublereal *s, doublecomplex *u, - integer *ldu, doublecomplex *vt, integer *ldvt, doublecomplex *work, - integer *lwork, doublereal *rwork, integer *iwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, - i__2, i__3; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer i__, ie, il, ir, iu, blk; - static doublereal dum[1], eps; - static integer iru, ivt, iscl; - static doublereal anrm; - static integer idum[1], ierr, itau, irvt; - extern logical lsame_(char *, char *); - static integer chunk, minmn; - extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *, - integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *); - static integer wrkbl, itaup, itauq; - static logical wntqa; - static integer nwork; - static logical wntqn, wntqo, wntqs; - extern /* Subroutine */ int zlacp2_(char *, integer *, integer *, - doublereal *, integer *, doublecomplex *, integer *); - static integer mnthr1, mnthr2; - extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal - *, doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *, doublereal *, integer *, integer *); - - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), xerbla_(char *, integer *), - zgebrd_(integer *, integer *, doublecomplex *, integer *, - doublereal *, doublereal *, doublecomplex *, doublecomplex *, - doublecomplex *, integer *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static doublereal bignum; - extern doublereal zlange_(char *, integer *, integer *, doublecomplex *, - integer *, doublereal *); - extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *, integer * - ), zlacrm_(integer *, integer *, doublecomplex *, integer *, - doublereal *, integer *, doublecomplex *, integer *, doublereal *) - , zlarcm_(integer *, integer *, doublereal *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublereal *), zlascl_(char *, integer *, integer *, doublereal *, - doublereal *, integer *, integer *, doublecomplex *, integer *, - integer *), zgeqrf_(integer *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *, integer * - ); - static integer ldwrkl; - extern /* Subroutine */ int zlacpy_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *), - zlaset_(char *, integer *, integer *, doublecomplex *, - doublecomplex *, doublecomplex *, integer *); - static integer ldwrkr, minwrk, ldwrku, maxwrk; - extern /* Subroutine */ int zungbr_(char *, integer *, integer *, integer - *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, integer *); - static integer ldwkvt; - static doublereal smlnum; - static logical wntqas; - extern /* Subroutine */ int zunmbr_(char *, char *, char *, integer *, - integer *, integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer * - ), zunglq_(integer *, integer *, integer * - , doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, integer *); - static logical lquery; - static integer nrwork; - extern /* Subroutine */ int zungqr_(integer *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, integer *); - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - ZGESDD computes the singular value decomposition (SVD) of a complex - M-by-N matrix A, optionally computing the left and/or right singular - vectors, by using divide-and-conquer method. The SVD is written - - A = U * SIGMA * conjugate-transpose(V) - - where SIGMA is an M-by-N matrix which is zero except for its - min(m,n) diagonal elements, U is an M-by-M unitary matrix, and - V is an N-by-N unitary matrix. The diagonal elements of SIGMA - are the singular values of A; they are real and non-negative, and - are returned in descending order. The first min(m,n) columns of - U and V are the left and right singular vectors of A. - - Note that the routine returns VT = V**H, not V. - - The divide and conquer algorithm makes very mild assumptions about - floating point arithmetic. It will work on machines with a guard - digit in add/subtract, or on those binary machines without guard - digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or - Cray-2. It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Arguments - ========= - - JOBZ (input) CHARACTER*1 - Specifies options for computing all or part of the matrix U: - = 'A': all M columns of U and all N rows of V**H are - returned in the arrays U and VT; - = 'S': the first min(M,N) columns of U and the first - min(M,N) rows of V**H are returned in the arrays U - and VT; - = 'O': If M >= N, the first N columns of U are overwritten - on the array A and all rows of V**H are returned in - the array VT; - otherwise, all columns of U are returned in the - array U and the first M rows of V**H are overwritten - in the array VT; - = 'N': no columns of U or rows of V**H are computed. - - M (input) INTEGER - The number of rows of the input matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the input matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the M-by-N matrix A. - On exit, - if JOBZ = 'O', A is overwritten with the first N columns - of U (the left singular vectors, stored - columnwise) if M >= N; - A is overwritten with the first M rows - of V**H (the right singular vectors, stored - rowwise) otherwise. - if JOBZ .ne. 'O', the contents of A are destroyed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - S (output) DOUBLE PRECISION array, dimension (min(M,N)) - The singular values of A, sorted so that S(i) >= S(i+1). - - U (output) COMPLEX*16 array, dimension (LDU,UCOL) - UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; - UCOL = min(M,N) if JOBZ = 'S'. - If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M - unitary matrix U; - if JOBZ = 'S', U contains the first min(M,N) columns of U - (the left singular vectors, stored columnwise); - if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. - - LDU (input) INTEGER - The leading dimension of the array U. LDU >= 1; if - JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. - - VT (output) COMPLEX*16 array, dimension (LDVT,N) - If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the - N-by-N unitary matrix V**H; - if JOBZ = 'S', VT contains the first min(M,N) rows of - V**H (the right singular vectors, stored rowwise); - if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. - - LDVT (input) INTEGER - The leading dimension of the array VT. LDVT >= 1; if - JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; - if JOBZ = 'S', LDVT >= min(M,N). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= 1. - if JOBZ = 'N', LWORK >= 2*min(M,N)+max(M,N). - if JOBZ = 'O', - LWORK >= 2*min(M,N)*min(M,N)+2*min(M,N)+max(M,N). - if JOBZ = 'S' or 'A', - LWORK >= min(M,N)*min(M,N)+2*min(M,N)+max(M,N). - For good performance, LWORK should generally be larger. - If LWORK < 0 but other input arguments are legal, WORK(1) - returns the optimal LWORK. - - RWORK (workspace) DOUBLE PRECISION array, dimension (LRWORK) - If JOBZ = 'N', LRWORK >= 7*min(M,N). - Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 5*min(M,N) - - IWORK (workspace) INTEGER array, dimension (8*min(M,N)) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: The updating process of DBDSDC did not converge. - - Further Details - =============== - - Based on contributions by - Ming Gu and Huan Ren, Computer Science Division, University of - California at Berkeley, USA - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --s; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - --work; - --rwork; - --iwork; - - /* Function Body */ - *info = 0; - minmn = min(*m,*n); - mnthr1 = (integer) (minmn * 17. / 9.); - mnthr2 = (integer) (minmn * 5. / 3.); - wntqa = lsame_(jobz, "A"); - wntqs = lsame_(jobz, "S"); - wntqas = wntqa || wntqs; - wntqo = lsame_(jobz, "O"); - wntqn = lsame_(jobz, "N"); - minwrk = 1; - maxwrk = 1; - lquery = *lwork == -1; - - if (! (wntqa || wntqs || wntqo || wntqn)) { - *info = -1; - } else if (*m < 0) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } else if (*ldu < 1 || (wntqas && *ldu < *m) || ((wntqo && *m < *n) && * - ldu < *m)) { - *info = -8; - } else if (*ldvt < 1 || (wntqa && *ldvt < *n) || (wntqs && *ldvt < minmn) - || ((wntqo && *m >= *n) && *ldvt < *n)) { - *info = -10; - } - -/* - Compute workspace - (Note: Comments in the code beginning "Workspace:" describe the - minimal amount of workspace needed at that point in the code, - as well as the preferred amount for good performance. - CWorkspace refers to complex workspace, and RWorkspace to - real workspace. NB refers to the optimal block size for the - immediately following subroutine, as returned by ILAENV.) -*/ - - if (((*info == 0 && *m > 0) && *n > 0)) { - if (*m >= *n) { - -/* - There is no complex work space needed for bidiagonal SVD - The real work space needed for bidiagonal SVD is BDSPAC, - BDSPAC = 3*N*N + 4*N -*/ - - if (*m >= mnthr1) { - if (wntqn) { - -/* Path 1 (M much larger than N, JOBZ='N') */ - - wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) * - ilaenv_(&c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); - maxwrk = wrkbl; - minwrk = *n * 3; - } else if (wntqo) { - -/* Path 2 (M much larger than N, JOBZ='O') */ - - wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR", - " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) * - ilaenv_(&c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); - maxwrk = *m * *n + *n * *n + wrkbl; - minwrk = ((*n) << (1)) * *n + *n * 3; - } else if (wntqs) { - -/* Path 3 (M much larger than N, JOBZ='S') */ - - wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR", - " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) * - ilaenv_(&c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); - maxwrk = *n * *n + wrkbl; - minwrk = *n * *n + *n * 3; - } else if (wntqa) { - -/* Path 4 (M much larger than N, JOBZ='A') */ - - wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "ZUNGQR", - " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) * - ilaenv_(&c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); - maxwrk = *n * *n + wrkbl; - minwrk = *n * *n + ((*n) << (1)) + *m; - } - } else if (*m >= mnthr2) { - -/* Path 5 (M much larger than N, but not as much as MNTHR1) */ - - maxwrk = ((*n) << (1)) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD", - " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - minwrk = ((*n) << (1)) + *m; - if (wntqo) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNGBR", "Q", m, n, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); - maxwrk += *m * *n; - minwrk += *n * *n; - } else if (wntqs) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNGBR", "Q", m, n, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); - } else if (wntqa) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); - } - } else { - -/* Path 6 (M at least N, but not much larger) */ - - maxwrk = ((*n) << (1)) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD", - " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - minwrk = ((*n) << (1)) + *m; - if (wntqo) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "QLN", m, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); - maxwrk += *m * *n; - minwrk += *n * *n; - } else if (wntqs) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNMBR", "QLN", m, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); - } else if (wntqa) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1, - "ZUNGBR", "PRC", n, n, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*n) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); - } - } - } else { - -/* - There is no complex work space needed for bidiagonal SVD - The real work space needed for bidiagonal SVD is BDSPAC, - BDSPAC = 3*M*M + 4*M -*/ - - if (*n >= mnthr1) { - if (wntqn) { - -/* Path 1t (N much larger than M, JOBZ='N') */ - - maxwrk = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + ((*m) << (1)) * - ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - maxwrk = max(i__1,i__2); - minwrk = *m * 3; - } else if (wntqo) { - -/* Path 2t (N much larger than M, JOBZ='O') */ - - wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "ZUNGLQ", - " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) * - ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); - maxwrk = *m * *n + *m * *m + wrkbl; - minwrk = ((*m) << (1)) * *m + *m * 3; - } else if (wntqs) { - -/* Path 3t (N much larger than M, JOBZ='S') */ - - wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "ZUNGLQ", - " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) * - ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); - maxwrk = *m * *m + wrkbl; - minwrk = *m * *m + *m * 3; - } else if (wntqa) { - -/* Path 4t (N much larger than M, JOBZ='A') */ - - wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, & - c_n1, &c_n1, (ftnlen)6, (ftnlen)1); -/* Computing MAX */ - i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "ZUNGLQ", - " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) * - ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); -/* Computing MAX */ - i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - wrkbl = max(i__1,i__2); - maxwrk = *m * *m + wrkbl; - minwrk = *m * *m + ((*m) << (1)) + *n; - } - } else if (*n >= mnthr2) { - -/* Path 5t (N much larger than M, but not as much as MNTHR1) */ - - maxwrk = ((*m) << (1)) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD", - " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - minwrk = ((*m) << (1)) + *n; - if (wntqo) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "P", m, n, m, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); - maxwrk += *m * *n; - minwrk += *m * *m; - } else if (wntqs) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "P", m, n, m, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); - } else if (wntqa) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *n * ilaenv_(&c__1, - "ZUNGBR", "P", n, n, m, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen) - 1); - maxwrk = max(i__1,i__2); - } - } else { - -/* Path 6t (N greater than M, but not much larger) */ - - maxwrk = ((*m) << (1)) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD", - " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); - minwrk = ((*m) << (1)) + *n; - if (wntqo) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "PRC", m, n, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNMBR", "QLN", m, m, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); - maxwrk += *m * *n; - minwrk += *m * *m; - } else if (wntqs) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "PRC", m, n, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); - } else if (wntqa) { -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *n * ilaenv_(&c__1, - "ZUNGBR", "PRC", n, n, m, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); -/* Computing MAX */ - i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1, - "ZUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, ( - ftnlen)3); - maxwrk = max(i__1,i__2); - } - } - } - maxwrk = max(maxwrk,minwrk); - work[1].r = (doublereal) maxwrk, work[1].i = 0.; - } - - if ((*lwork < minwrk && ! lquery)) { - *info = -13; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGESDD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - if (*lwork >= 1) { - work[1].r = 1., work[1].i = 0.; - } - return 0; - } - -/* Get machine constants */ - - eps = PRECISION; - smlnum = sqrt(SAFEMINIMUM) / eps; - bignum = 1. / smlnum; - -/* Scale A if max element outside range [SMLNUM,BIGNUM] */ - - anrm = zlange_("M", m, n, &a[a_offset], lda, dum); - iscl = 0; - if ((anrm > 0. && anrm < smlnum)) { - iscl = 1; - zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, & - ierr); - } else if (anrm > bignum) { - iscl = 1; - zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, & - ierr); - } - - if (*m >= *n) { - -/* - A has at least as many rows as columns. If A has sufficiently - more rows than columns, first reduce using the QR - decomposition (if sufficient workspace available) -*/ - - if (*m >= mnthr1) { - - if (wntqn) { - -/* - Path 1 (M much larger than N, JOBZ='N') - No singular vectors to be computed -*/ - - itau = 1; - nwork = itau + *n; - -/* - Compute A=Q*R - (CWorkspace: need 2*N, prefer N+N*NB) - (RWorkspace: need 0) -*/ - - i__1 = *lwork - nwork + 1; - zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__1, &ierr); - -/* Zero out below R */ - - i__1 = *n - 1; - i__2 = *n - 1; - zlaset_("L", &i__1, &i__2, &c_b59, &c_b59, &a[a_dim1 + 2], - lda); - ie = 1; - itauq = 1; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in A - (CWorkspace: need 3*N, prefer 2*N+2*N*NB) - (RWorkspace: need N) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__1, &ierr); - nrwork = ie + *n; - -/* - Perform bidiagonal SVD, compute singular values only - (CWorkspace: 0) - (RWorkspace: need BDSPAC) -*/ - - dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, & - c__1, dum, idum, &rwork[nrwork], &iwork[1], info); - - } else if (wntqo) { - -/* - Path 2 (M much larger than N, JOBZ='O') - N left singular vectors to be overwritten on A and - N right singular vectors to be computed in VT -*/ - - iu = 1; - -/* WORK(IU) is N by N */ - - ldwrku = *n; - ir = iu + ldwrku * *n; - if (*lwork >= *m * *n + *n * *n + *n * 3) { - -/* WORK(IR) is M by N */ - - ldwrkr = *m; - } else { - ldwrkr = (*lwork - *n * *n - *n * 3) / *n; - } - itau = ir + ldwrkr * *n; - nwork = itau + *n; - -/* - Compute A=Q*R - (CWorkspace: need N*N+2*N, prefer M*N+N+N*NB) - (RWorkspace: 0) -*/ - - i__1 = *lwork - nwork + 1; - zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__1, &ierr); - -/* Copy R to WORK( IR ), zeroing out below it */ - - zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr); - i__1 = *n - 1; - i__2 = *n - 1; - zlaset_("L", &i__1, &i__2, &c_b59, &c_b59, &work[ir + 1], & - ldwrkr); - -/* - Generate Q in A - (CWorkspace: need 2*N, prefer N+N*NB) - (RWorkspace: 0) -*/ - - i__1 = *lwork - nwork + 1; - zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork], - &i__1, &ierr); - ie = 1; - itauq = itau; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in WORK(IR) - (CWorkspace: need N*N+3*N, prefer M*N+2*N+2*N*NB) - (RWorkspace: need N) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__1, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of R in WORK(IRU) and computing right singular vectors - of R in WORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = ie + *n; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix WORK(IU) - Overwrite WORK(IU) by the left singular vectors of R - (CWorkspace: need 2*N*N+3*N, prefer M*N+N*N+2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku); - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[ - itauq], &work[iu], &ldwrku, &work[nwork], &i__1, & - ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by the right singular vectors of R - (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt); - i__1 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - -/* - Multiply Q in A by left singular vectors of R in - WORK(IU), storing result in WORK(IR) and copying to A - (CWorkspace: need 2*N*N, prefer N*N+M*N) - (RWorkspace: 0) -*/ - - i__1 = *m; - i__2 = ldwrkr; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += - i__2) { -/* Computing MIN */ - i__3 = *m - i__ + 1; - chunk = min(i__3,ldwrkr); - zgemm_("N", "N", &chunk, n, n, &c_b60, &a[i__ + a_dim1], - lda, &work[iu], &ldwrku, &c_b59, &work[ir], & - ldwrkr); - zlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ + - a_dim1], lda); -/* L10: */ - } - - } else if (wntqs) { - -/* - Path 3 (M much larger than N, JOBZ='S') - N left singular vectors to be computed in U and - N right singular vectors to be computed in VT -*/ - - ir = 1; - -/* WORK(IR) is N by N */ - - ldwrkr = *n; - itau = ir + ldwrkr * *n; - nwork = itau + *n; - -/* - Compute A=Q*R - (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) - (RWorkspace: 0) -*/ - - i__2 = *lwork - nwork + 1; - zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__2, &ierr); - -/* Copy R to WORK(IR), zeroing out below it */ - - zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr); - i__2 = *n - 1; - i__1 = *n - 1; - zlaset_("L", &i__2, &i__1, &c_b59, &c_b59, &work[ir + 1], & - ldwrkr); - -/* - Generate Q in A - (CWorkspace: need 2*N, prefer N+N*NB) - (RWorkspace: 0) -*/ - - i__2 = *lwork - nwork + 1; - zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork], - &i__2, &ierr); - ie = 1; - itauq = itau; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in WORK(IR) - (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) - (RWorkspace: need N) -*/ - - i__2 = *lwork - nwork + 1; - zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__2, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = ie + *n; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix U - Overwrite U by left singular vectors of R - (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu); - i__2 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by right singular vectors of R - (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt); - i__2 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & - ierr); - -/* - Multiply Q in A by left singular vectors of R in - WORK(IR), storing result in U - (CWorkspace: need N*N) - (RWorkspace: 0) -*/ - - zlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr); - zgemm_("N", "N", m, n, n, &c_b60, &a[a_offset], lda, &work[ir] - , &ldwrkr, &c_b59, &u[u_offset], ldu); - - } else if (wntqa) { - -/* - Path 4 (M much larger than N, JOBZ='A') - M left singular vectors to be computed in U and - N right singular vectors to be computed in VT -*/ - - iu = 1; - -/* WORK(IU) is N by N */ - - ldwrku = *n; - itau = iu + ldwrku * *n; - nwork = itau + *n; - -/* - Compute A=Q*R, copying result to U - (CWorkspace: need 2*N, prefer N+N*NB) - (RWorkspace: 0) -*/ - - i__2 = *lwork - nwork + 1; - zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__2, &ierr); - zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu); - -/* - Generate Q in U - (CWorkspace: need N+M, prefer N+M*NB) - (RWorkspace: 0) -*/ - - i__2 = *lwork - nwork + 1; - zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork], - &i__2, &ierr); - -/* Produce R in A, zeroing out below it */ - - i__2 = *n - 1; - i__1 = *n - 1; - zlaset_("L", &i__2, &i__1, &c_b59, &c_b59, &a[a_dim1 + 2], - lda); - ie = 1; - itauq = itau; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize R in A - (CWorkspace: need 3*N, prefer 2*N+2*N*NB) - (RWorkspace: need N) -*/ - - i__2 = *lwork - nwork + 1; - zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__2, &ierr); - iru = ie + *n; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix WORK(IU) - Overwrite WORK(IU) by left singular vectors of R - (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku); - i__2 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[ - itauq], &work[iu], &ldwrku, &work[nwork], &i__2, & - ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by right singular vectors of R - (CWorkspace: need 3*N, prefer 2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt); - i__2 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & - ierr); - -/* - Multiply Q in U by left singular vectors of R in - WORK(IU), storing result in A - (CWorkspace: need N*N) - (RWorkspace: 0) -*/ - - zgemm_("N", "N", m, n, n, &c_b60, &u[u_offset], ldu, &work[iu] - , &ldwrku, &c_b59, &a[a_offset], lda); - -/* Copy left singular vectors of A from A to U */ - - zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu); - - } - - } else if (*m >= mnthr2) { - -/* - MNTHR2 <= M < MNTHR1 - - Path 5 (M much larger than N, but not as much as MNTHR1) - Reduce to bidiagonal form without QR decomposition, use - ZUNGBR and matrix multiplication to compute singular vectors -*/ - - ie = 1; - nrwork = ie + *n; - itauq = 1; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize A - (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) - (RWorkspace: need N) -*/ - - i__2 = *lwork - nwork + 1; - zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], - &work[itaup], &work[nwork], &i__2, &ierr); - if (wntqn) { - -/* - Compute singular values only - (Cworkspace: 0) - (Rworkspace: need BDSPAC) -*/ - - dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, & - c__1, dum, idum, &rwork[nrwork], &iwork[1], info); - } else if (wntqo) { - iu = nwork; - iru = nrwork; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - -/* - Copy A to VT, generate P**H - (Cworkspace: need 2*N, prefer N+N*NB) - (Rworkspace: 0) -*/ - - zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - i__2 = *lwork - nwork + 1; - zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], & - work[nwork], &i__2, &ierr); - -/* - Generate Q in A - (CWorkspace: need 2*N, prefer N+N*NB) - (RWorkspace: 0) -*/ - - i__2 = *lwork - nwork + 1; - zungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[ - nwork], &i__2, &ierr); - - if (*lwork >= *m * *n + *n * 3) { - -/* WORK( IU ) is M by N */ - - ldwrku = *m; - } else { - -/* WORK(IU) is LDWRKU by N */ - - ldwrku = (*lwork - *n * 3) / *n; - } - nwork = iu + ldwrku * *n; - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Multiply real matrix RWORK(IRVT) by P**H in VT, - storing the result in WORK(IU), copying to VT - (Cworkspace: need 0) - (Rworkspace: need 3*N*N) -*/ - - zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &work[iu] - , &ldwrku, &rwork[nrwork]); - zlacpy_("F", n, n, &work[iu], &ldwrku, &vt[vt_offset], ldvt); - -/* - Multiply Q in A by real matrix RWORK(IRU), storing the - result in WORK(IU), copying to A - (CWorkspace: need N*N, prefer M*N) - (Rworkspace: need 3*N*N, prefer N*N+2*M*N) -*/ - - nrwork = irvt; - i__2 = *m; - i__1 = ldwrku; - for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += - i__1) { -/* Computing MIN */ - i__3 = *m - i__ + 1; - chunk = min(i__3,ldwrku); - zlacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru], n, - &work[iu], &ldwrku, &rwork[nrwork]); - zlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ + - a_dim1], lda); -/* L20: */ - } - - } else if (wntqs) { - -/* - Copy A to VT, generate P**H - (Cworkspace: need 2*N, prefer N+N*NB) - (Rworkspace: 0) -*/ - - zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - i__1 = *lwork - nwork + 1; - zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], & - work[nwork], &i__1, &ierr); - -/* - Copy A to U, generate Q - (Cworkspace: need 2*N, prefer N+N*NB) - (Rworkspace: 0) -*/ - - zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu); - i__1 = *lwork - nwork + 1; - zungbr_("Q", m, n, n, &u[u_offset], ldu, &work[itauq], &work[ - nwork], &i__1, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = nrwork; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Multiply real matrix RWORK(IRVT) by P**H in VT, - storing the result in A, copying to VT - (Cworkspace: need 0) - (Rworkspace: need 3*N*N) -*/ - - zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[ - a_offset], lda, &rwork[nrwork]); - zlacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - -/* - Multiply Q in U by real matrix RWORK(IRU), storing the - result in A, copying to U - (CWorkspace: need 0) - (Rworkspace: need N*N+2*M*N) -*/ - - nrwork = irvt; - zlacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset], - lda, &rwork[nrwork]); - zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu); - } else { - -/* - Copy A to VT, generate P**H - (Cworkspace: need 2*N, prefer N+N*NB) - (Rworkspace: 0) -*/ - - zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - i__1 = *lwork - nwork + 1; - zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], & - work[nwork], &i__1, &ierr); - -/* - Copy A to U, generate Q - (Cworkspace: need 2*N, prefer N+N*NB) - (Rworkspace: 0) -*/ - - zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu); - i__1 = *lwork - nwork + 1; - zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[ - nwork], &i__1, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = nrwork; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Multiply real matrix RWORK(IRVT) by P**H in VT, - storing the result in A, copying to VT - (Cworkspace: need 0) - (Rworkspace: need 3*N*N) -*/ - - zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[ - a_offset], lda, &rwork[nrwork]); - zlacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - -/* - Multiply Q in U by real matrix RWORK(IRU), storing the - result in A, copying to U - (CWorkspace: 0) - (Rworkspace: need 3*N*N) -*/ - - nrwork = irvt; - zlacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset], - lda, &rwork[nrwork]); - zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu); - } - - } else { - -/* - M .LT. MNTHR2 - - Path 6 (M at least N, but not much larger) - Reduce to bidiagonal form without QR decomposition - Use ZUNMBR to compute singular vectors -*/ - - ie = 1; - nrwork = ie + *n; - itauq = 1; - itaup = itauq + *n; - nwork = itaup + *n; - -/* - Bidiagonalize A - (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) - (RWorkspace: need N) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], - &work[itaup], &work[nwork], &i__1, &ierr); - if (wntqn) { - -/* - Compute singular values only - (Cworkspace: 0) - (Rworkspace: need BDSPAC) -*/ - - dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, & - c__1, dum, idum, &rwork[nrwork], &iwork[1], info); - } else if (wntqo) { - iu = nwork; - iru = nrwork; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - if (*lwork >= *m * *n + *n * 3) { - -/* WORK( IU ) is M by N */ - - ldwrku = *m; - } else { - -/* WORK( IU ) is LDWRKU by N */ - - ldwrku = (*lwork - *n * 3) / *n; - } - nwork = iu + ldwrku * *n; - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by right singular vectors of A - (Cworkspace: need 2*N, prefer N+N*NB) - (Rworkspace: need 0) -*/ - - zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt); - i__1 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - - if (*lwork >= *m * *n + *n * 3) { - -/* - Copy real matrix RWORK(IRU) to complex matrix WORK(IU) - Overwrite WORK(IU) by left singular vectors of A, copying - to A - (Cworkspace: need M*N+2*N, prefer M*N+N+N*NB) - (Rworkspace: need 0) -*/ - - zlaset_("F", m, n, &c_b59, &c_b59, &work[iu], &ldwrku); - zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku); - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[ - itauq], &work[iu], &ldwrku, &work[nwork], &i__1, & - ierr); - zlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda); - } else { - -/* - Generate Q in A - (Cworkspace: need 2*N, prefer N+N*NB) - (Rworkspace: need 0) -*/ - - i__1 = *lwork - nwork + 1; - zungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], & - work[nwork], &i__1, &ierr); - -/* - Multiply Q in A by real matrix RWORK(IRU), storing the - result in WORK(IU), copying to A - (CWorkspace: need N*N, prefer M*N) - (Rworkspace: need 3*N*N, prefer N*N+2*M*N) -*/ - - nrwork = irvt; - i__1 = *m; - i__2 = ldwrku; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += - i__2) { -/* Computing MIN */ - i__3 = *m - i__ + 1; - chunk = min(i__3,ldwrku); - zlacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru], - n, &work[iu], &ldwrku, &rwork[nrwork]); - zlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ + - a_dim1], lda); -/* L30: */ - } - } - - } else if (wntqs) { - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = nrwork; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix U - Overwrite U by left singular vectors of A - (CWorkspace: need 3*N, prefer 2*N+N*NB) - (RWorkspace: 0) -*/ - - zlaset_("F", m, n, &c_b59, &c_b59, &u[u_offset], ldu); - zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu); - i__2 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by right singular vectors of A - (CWorkspace: need 3*N, prefer 2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt); - i__2 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & - ierr); - } else { - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = nrwork; - irvt = iru + *n * *n; - nrwork = irvt + *n * *n; - dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, & - rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* Set the right corner of U to identity matrix */ - - zlaset_("F", m, m, &c_b59, &c_b59, &u[u_offset], ldu); - i__2 = *m - *n; - i__1 = *m - *n; - zlaset_("F", &i__2, &i__1, &c_b59, &c_b60, &u[*n + 1 + (*n + - 1) * u_dim1], ldu); - -/* - Copy real matrix RWORK(IRU) to complex matrix U - Overwrite U by left singular vectors of A - (CWorkspace: need 2*N+M, prefer 2*N+M*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu); - i__2 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by right singular vectors of A - (CWorkspace: need 3*N, prefer 2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt); - i__2 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & - ierr); - } - - } - - } else { - -/* - A has more columns than rows. If A has sufficiently more - columns than rows, first reduce using the LQ decomposition - (if sufficient workspace available) -*/ - - if (*n >= mnthr1) { - - if (wntqn) { - -/* - Path 1t (N much larger than M, JOBZ='N') - No singular vectors to be computed -*/ - - itau = 1; - nwork = itau + *m; - -/* - Compute A=L*Q - (CWorkspace: need 2*M, prefer M+M*NB) - (RWorkspace: 0) -*/ - - i__2 = *lwork - nwork + 1; - zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__2, &ierr); - -/* Zero out above L */ - - i__2 = *m - 1; - i__1 = *m - 1; - zlaset_("U", &i__2, &i__1, &c_b59, &c_b59, &a[((a_dim1) << (1) - ) + 1], lda); - ie = 1; - itauq = 1; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in A - (CWorkspace: need 3*M, prefer 2*M+2*M*NB) - (RWorkspace: need M) -*/ - - i__2 = *lwork - nwork + 1; - zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__2, &ierr); - nrwork = ie + *m; - -/* - Perform bidiagonal SVD, compute singular values only - (CWorkspace: 0) - (RWorkspace: need BDSPAC) -*/ - - dbdsdc_("U", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, & - c__1, dum, idum, &rwork[nrwork], &iwork[1], info); - - } else if (wntqo) { - -/* - Path 2t (N much larger than M, JOBZ='O') - M right singular vectors to be overwritten on A and - M left singular vectors to be computed in U -*/ - - ivt = 1; - ldwkvt = *m; - -/* WORK(IVT) is M by M */ - - il = ivt + ldwkvt * *m; - if (*lwork >= *m * *n + *m * *m + *m * 3) { - -/* WORK(IL) M by N */ - - ldwrkl = *m; - chunk = *n; - } else { - -/* WORK(IL) is M by CHUNK */ - - ldwrkl = *m; - chunk = (*lwork - *m * *m - *m * 3) / *m; - } - itau = il + ldwrkl * chunk; - nwork = itau + *m; - -/* - Compute A=L*Q - (CWorkspace: need 2*M, prefer M+M*NB) - (RWorkspace: 0) -*/ - - i__2 = *lwork - nwork + 1; - zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__2, &ierr); - -/* Copy L to WORK(IL), zeroing about above it */ - - zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl); - i__2 = *m - 1; - i__1 = *m - 1; - zlaset_("U", &i__2, &i__1, &c_b59, &c_b59, &work[il + ldwrkl], - &ldwrkl); - -/* - Generate Q in A - (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) - (RWorkspace: 0) -*/ - - i__2 = *lwork - nwork + 1; - zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork], - &i__2, &ierr); - ie = 1; - itauq = itau; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in WORK(IL) - (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) - (RWorkspace: need M) -*/ - - i__2 = *lwork - nwork + 1; - zgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__2, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = ie + *m; - irvt = iru + *m * *m; - nrwork = irvt + *m * *m; - dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix WORK(IU) - Overwrite WORK(IU) by the left singular vectors of L - (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu); - i__2 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) - Overwrite WORK(IVT) by the right singular vectors of L - (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt); - i__2 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[ - itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, & - ierr); - -/* - Multiply right singular vectors of L in WORK(IL) by Q - in A, storing result in WORK(IL) and copying to A - (CWorkspace: need 2*M*M, prefer M*M+M*N)) - (RWorkspace: 0) -*/ - - i__2 = *n; - i__1 = chunk; - for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += - i__1) { -/* Computing MIN */ - i__3 = *n - i__ + 1; - blk = min(i__3,chunk); - zgemm_("N", "N", m, &blk, m, &c_b60, &work[ivt], m, &a[ - i__ * a_dim1 + 1], lda, &c_b59, &work[il], & - ldwrkl); - zlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1 - + 1], lda); -/* L40: */ - } - - } else if (wntqs) { - -/* - Path 3t (N much larger than M, JOBZ='S') - M right singular vectors to be computed in VT and - M left singular vectors to be computed in U -*/ - - il = 1; - -/* WORK(IL) is M by M */ - - ldwrkl = *m; - itau = il + ldwrkl * *m; - nwork = itau + *m; - -/* - Compute A=L*Q - (CWorkspace: need 2*M, prefer M+M*NB) - (RWorkspace: 0) -*/ - - i__1 = *lwork - nwork + 1; - zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__1, &ierr); - -/* Copy L to WORK(IL), zeroing out above it */ - - zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl); - i__1 = *m - 1; - i__2 = *m - 1; - zlaset_("U", &i__1, &i__2, &c_b59, &c_b59, &work[il + ldwrkl], - &ldwrkl); - -/* - Generate Q in A - (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) - (RWorkspace: 0) -*/ - - i__1 = *lwork - nwork + 1; - zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork], - &i__1, &ierr); - ie = 1; - itauq = itau; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in WORK(IL) - (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) - (RWorkspace: need M) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__1, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = ie + *m; - irvt = iru + *m * *m; - nrwork = irvt + *m * *m; - dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix U - Overwrite U by left singular vectors of L - (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu); - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by left singular vectors of L - (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt); - i__1 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - -/* - Copy VT to WORK(IL), multiply right singular vectors of L - in WORK(IL) by Q in A, storing result in VT - (CWorkspace: need M*M) - (RWorkspace: 0) -*/ - - zlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl); - zgemm_("N", "N", m, n, m, &c_b60, &work[il], &ldwrkl, &a[ - a_offset], lda, &c_b59, &vt[vt_offset], ldvt); - - } else if (wntqa) { - -/* - Path 9t (N much larger than M, JOBZ='A') - N right singular vectors to be computed in VT and - M left singular vectors to be computed in U -*/ - - ivt = 1; - -/* WORK(IVT) is M by M */ - - ldwkvt = *m; - itau = ivt + ldwkvt * *m; - nwork = itau + *m; - -/* - Compute A=L*Q, copying result to VT - (CWorkspace: need 2*M, prefer M+M*NB) - (RWorkspace: 0) -*/ - - i__1 = *lwork - nwork + 1; - zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & - i__1, &ierr); - zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - -/* - Generate Q in VT - (CWorkspace: need M+N, prefer M+N*NB) - (RWorkspace: 0) -*/ - - i__1 = *lwork - nwork + 1; - zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[ - nwork], &i__1, &ierr); - -/* Produce L in A, zeroing out above it */ - - i__1 = *m - 1; - i__2 = *m - 1; - zlaset_("U", &i__1, &i__2, &c_b59, &c_b59, &a[((a_dim1) << (1) - ) + 1], lda); - ie = 1; - itauq = itau; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize L in A - (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) - (RWorkspace: need M) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[ - itauq], &work[itaup], &work[nwork], &i__1, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - iru = ie + *m; - irvt = iru + *m * *m; - nrwork = irvt + *m * *m; - dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix U - Overwrite U by left singular vectors of L - (CWorkspace: need 3*M, prefer 2*M+M*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu); - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) - Overwrite WORK(IVT) by right singular vectors of L - (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) - (RWorkspace: 0) -*/ - - zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt); - i__1 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", m, m, m, &a[a_offset], lda, &work[ - itaup], &work[ivt], &ldwkvt, &work[nwork], &i__1, & - ierr); - -/* - Multiply right singular vectors of L in WORK(IVT) by - Q in VT, storing result in A - (CWorkspace: need M*M) - (RWorkspace: 0) -*/ - - zgemm_("N", "N", m, n, m, &c_b60, &work[ivt], &ldwkvt, &vt[ - vt_offset], ldvt, &c_b59, &a[a_offset], lda); - -/* Copy right singular vectors of A from A to VT */ - - zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - - } - - } else if (*n >= mnthr2) { - -/* - MNTHR2 <= N < MNTHR1 - - Path 5t (N much larger than M, but not as much as MNTHR1) - Reduce to bidiagonal form without QR decomposition, use - ZUNGBR and matrix multiplication to compute singular vectors -*/ - - - ie = 1; - nrwork = ie + *m; - itauq = 1; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize A - (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) - (RWorkspace: M) -*/ - - i__1 = *lwork - nwork + 1; - zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], - &work[itaup], &work[nwork], &i__1, &ierr); - - if (wntqn) { - -/* - Compute singular values only - (Cworkspace: 0) - (Rworkspace: need BDSPAC) -*/ - - dbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, & - c__1, dum, idum, &rwork[nrwork], &iwork[1], info); - } else if (wntqo) { - irvt = nrwork; - iru = irvt + *m * *m; - nrwork = iru + *m * *m; - ivt = nwork; - -/* - Copy A to U, generate Q - (Cworkspace: need 2*M, prefer M+M*NB) - (Rworkspace: 0) -*/ - - zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu); - i__1 = *lwork - nwork + 1; - zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[ - nwork], &i__1, &ierr); - -/* - Generate P**H in A - (Cworkspace: need 2*M, prefer M+M*NB) - (Rworkspace: 0) -*/ - - i__1 = *lwork - nwork + 1; - zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[ - nwork], &i__1, &ierr); - - ldwkvt = *m; - if (*lwork >= *m * *n + *m * 3) { - -/* WORK( IVT ) is M by N */ - - nwork = ivt + ldwkvt * *n; - chunk = *n; - } else { - -/* WORK( IVT ) is M by CHUNK */ - - chunk = (*lwork - *m * 3) / *m; - nwork = ivt + ldwkvt * chunk; - } - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Multiply Q in U by real matrix RWORK(IRVT) - storing the result in WORK(IVT), copying to U - (Cworkspace: need 0) - (Rworkspace: need 2*M*M) -*/ - - zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &work[ivt], & - ldwkvt, &rwork[nrwork]); - zlacpy_("F", m, m, &work[ivt], &ldwkvt, &u[u_offset], ldu); - -/* - Multiply RWORK(IRVT) by P**H in A, storing the - result in WORK(IVT), copying to A - (CWorkspace: need M*M, prefer M*N) - (Rworkspace: need 2*M*M, prefer 2*M*N) -*/ - - nrwork = iru; - i__1 = *n; - i__2 = chunk; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += - i__2) { -/* Computing MIN */ - i__3 = *n - i__ + 1; - blk = min(i__3,chunk); - zlarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1], - lda, &work[ivt], &ldwkvt, &rwork[nrwork]); - zlacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ * - a_dim1 + 1], lda); -/* L50: */ - } - } else if (wntqs) { - -/* - Copy A to U, generate Q - (Cworkspace: need 2*M, prefer M+M*NB) - (Rworkspace: 0) -*/ - - zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu); - i__2 = *lwork - nwork + 1; - zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[ - nwork], &i__2, &ierr); - -/* - Copy A to VT, generate P**H - (Cworkspace: need 2*M, prefer M+M*NB) - (Rworkspace: 0) -*/ - - zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - i__2 = *lwork - nwork + 1; - zungbr_("P", m, n, m, &vt[vt_offset], ldvt, &work[itaup], & - work[nwork], &i__2, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - irvt = nrwork; - iru = irvt + *m * *m; - nrwork = iru + *m * *m; - dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Multiply Q in U by real matrix RWORK(IRU), storing the - result in A, copying to U - (CWorkspace: need 0) - (Rworkspace: need 3*M*M) -*/ - - zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset], - lda, &rwork[nrwork]); - zlacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu); - -/* - Multiply real matrix RWORK(IRVT) by P**H in VT, - storing the result in A, copying to VT - (Cworkspace: need 0) - (Rworkspace: need M*M+2*M*N) -*/ - - nrwork = iru; - zlarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[ - a_offset], lda, &rwork[nrwork]); - zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - } else { - -/* - Copy A to U, generate Q - (Cworkspace: need 2*M, prefer M+M*NB) - (Rworkspace: 0) -*/ - - zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu); - i__2 = *lwork - nwork + 1; - zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[ - nwork], &i__2, &ierr); - -/* - Copy A to VT, generate P**H - (Cworkspace: need 2*M, prefer M+M*NB) - (Rworkspace: 0) -*/ - - zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - i__2 = *lwork - nwork + 1; - zungbr_("P", n, n, m, &vt[vt_offset], ldvt, &work[itaup], & - work[nwork], &i__2, &ierr); - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - irvt = nrwork; - iru = irvt + *m * *m; - nrwork = iru + *m * *m; - dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Multiply Q in U by real matrix RWORK(IRU), storing the - result in A, copying to U - (CWorkspace: need 0) - (Rworkspace: need 3*M*M) -*/ - - zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset], - lda, &rwork[nrwork]); - zlacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu); - -/* - Multiply real matrix RWORK(IRVT) by P**H in VT, - storing the result in A, copying to VT - (Cworkspace: need 0) - (Rworkspace: need M*M+2*M*N) -*/ - - zlarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[ - a_offset], lda, &rwork[nrwork]); - zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); - } - - } else { - -/* - N .LT. MNTHR2 - - Path 6t (N greater than M, but not much larger) - Reduce to bidiagonal form without LQ decomposition - Use ZUNMBR to compute singular vectors -*/ - - ie = 1; - nrwork = ie + *m; - itauq = 1; - itaup = itauq + *m; - nwork = itaup + *m; - -/* - Bidiagonalize A - (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) - (RWorkspace: M) -*/ - - i__2 = *lwork - nwork + 1; - zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], - &work[itaup], &work[nwork], &i__2, &ierr); - if (wntqn) { - -/* - Compute singular values only - (Cworkspace: 0) - (Rworkspace: need BDSPAC) -*/ - - dbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, & - c__1, dum, idum, &rwork[nrwork], &iwork[1], info); - } else if (wntqo) { - ldwkvt = *m; - ivt = nwork; - if (*lwork >= *m * *n + *m * 3) { - -/* WORK( IVT ) is M by N */ - - zlaset_("F", m, n, &c_b59, &c_b59, &work[ivt], &ldwkvt); - nwork = ivt + ldwkvt * *n; - } else { - -/* WORK( IVT ) is M by CHUNK */ - - chunk = (*lwork - *m * 3) / *m; - nwork = ivt + ldwkvt * chunk; - } - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - irvt = nrwork; - iru = irvt + *m * *m; - nrwork = iru + *m * *m; - dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix U - Overwrite U by left singular vectors of A - (Cworkspace: need 2*M, prefer M+M*NB) - (Rworkspace: need 0) -*/ - - zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu); - i__2 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); - - if (*lwork >= *m * *n + *m * 3) { - -/* - Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) - Overwrite WORK(IVT) by right singular vectors of A, - copying to A - (Cworkspace: need M*N+2*M, prefer M*N+M+M*NB) - (Rworkspace: need 0) -*/ - - zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt); - i__2 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[ - itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, - &ierr); - zlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda); - } else { - -/* - Generate P**H in A - (Cworkspace: need 2*M, prefer M+M*NB) - (Rworkspace: need 0) -*/ - - i__2 = *lwork - nwork + 1; - zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], & - work[nwork], &i__2, &ierr); - -/* - Multiply Q in A by real matrix RWORK(IRU), storing the - result in WORK(IU), copying to A - (CWorkspace: need M*M, prefer M*N) - (Rworkspace: need 3*M*M, prefer M*M+2*M*N) -*/ - - nrwork = iru; - i__2 = *n; - i__1 = chunk; - for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += - i__1) { -/* Computing MIN */ - i__3 = *n - i__ + 1; - blk = min(i__3,chunk); - zlarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1] - , lda, &work[ivt], &ldwkvt, &rwork[nrwork]); - zlacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ * - a_dim1 + 1], lda); -/* L60: */ - } - } - } else if (wntqs) { - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - irvt = nrwork; - iru = irvt + *m * *m; - nrwork = iru + *m * *m; - dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix U - Overwrite U by left singular vectors of A - (CWorkspace: need 3*M, prefer 2*M+M*NB) - (RWorkspace: M*M) -*/ - - zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu); - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by right singular vectors of A - (CWorkspace: need 3*M, prefer 2*M+M*NB) - (RWorkspace: M*M) -*/ - - zlaset_("F", m, n, &c_b59, &c_b59, &vt[vt_offset], ldvt); - zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt); - i__1 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - } else { - -/* - Perform bidiagonal SVD, computing left singular vectors - of bidiagonal matrix in RWORK(IRU) and computing right - singular vectors of bidiagonal matrix in RWORK(IRVT) - (CWorkspace: need 0) - (RWorkspace: need BDSPAC) -*/ - - irvt = nrwork; - iru = irvt + *m * *m; - nrwork = iru + *m * *m; - - dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, & - rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1], - info); - -/* - Copy real matrix RWORK(IRU) to complex matrix U - Overwrite U by left singular vectors of A - (CWorkspace: need 3*M, prefer 2*M+M*NB) - (RWorkspace: M*M) -*/ - - zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu); - i__1 = *lwork - nwork + 1; - zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ - itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); - -/* Set the right corner of VT to identity matrix */ - - i__1 = *n - *m; - i__2 = *n - *m; - zlaset_("F", &i__1, &i__2, &c_b59, &c_b60, &vt[*m + 1 + (*m + - 1) * vt_dim1], ldvt); - -/* - Copy real matrix RWORK(IRVT) to complex matrix VT - Overwrite VT by right singular vectors of A - (CWorkspace: need 2*M+N, prefer 2*M+N*NB) - (RWorkspace: M*M) -*/ - - zlaset_("F", n, n, &c_b59, &c_b59, &vt[vt_offset], ldvt); - zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt); - i__1 = *lwork - nwork + 1; - zunmbr_("P", "R", "C", n, n, m, &a[a_offset], lda, &work[ - itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & - ierr); - } - - } - - } - -/* Undo scaling if necessary */ - - if (iscl == 1) { - if (anrm > bignum) { - dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], & - minmn, &ierr); - } - if (anrm < smlnum) { - dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], & - minmn, &ierr); - } - } - -/* Return optimal workspace in WORK(1) */ - - work[1].r = (doublereal) maxwrk, work[1].i = 0.; - - return 0; - -/* End of ZGESDD */ - -} /* zgesdd_ */ - -/* Subroutine */ int zgesv_(integer *n, integer *nrhs, doublecomplex *a, - integer *lda, integer *ipiv, doublecomplex *b, integer *ldb, integer * - info) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1; - - /* Local variables */ - extern /* Subroutine */ int xerbla_(char *, integer *), zgetrf_( - integer *, integer *, doublecomplex *, integer *, integer *, - integer *), zgetrs_(char *, integer *, integer *, doublecomplex *, - integer *, integer *, doublecomplex *, integer *, integer *); - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - March 31, 1993 - - - Purpose - ======= - - ZGESV computes the solution to a complex system of linear equations - A * X = B, - where A is an N-by-N matrix and X and B are N-by-NRHS matrices. - - The LU decomposition with partial pivoting and row interchanges is - used to factor A as - A = P * L * U, - where P is a permutation matrix, L is unit lower triangular, and U is - upper triangular. The factored form of A is then used to solve the - system of equations A * X = B. - - Arguments - ========= - - N (input) INTEGER - The number of linear equations, i.e., the order of the - matrix A. N >= 0. - - NRHS (input) INTEGER - The number of right hand sides, i.e., the number of columns - of the matrix B. NRHS >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the N-by-N coefficient matrix A. - On exit, the factors L and U from the factorization - A = P*L*U; the unit diagonal elements of L are not stored. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - IPIV (output) INTEGER array, dimension (N) - The pivot indices that define the permutation matrix P; - row i of the matrix was interchanged with row IPIV(i). - - B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) - On entry, the N-by-NRHS matrix of right hand side matrix B. - On exit, if INFO = 0, the N-by-NRHS solution matrix X. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, U(i,i) is exactly zero. The factorization - has been completed, but the factor U is exactly - singular, so the solution could not be computed. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - *info = 0; - if (*n < 0) { - *info = -1; - } else if (*nrhs < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } else if (*ldb < max(1,*n)) { - *info = -7; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGESV ", &i__1); - return 0; - } - -/* Compute the LU factorization of A. */ - - zgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info); - if (*info == 0) { - -/* Solve the system A*X = B, overwriting B with X. */ - - zgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[ - b_offset], ldb, info); - } - return 0; - -/* End of ZGESV */ - -} /* zgesv_ */ - -/* Subroutine */ int zgetf2_(integer *m, integer *n, doublecomplex *a, - integer *lda, integer *ipiv, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublecomplex z__1; - - /* Builtin functions */ - void z_div(doublecomplex *, doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer j, jp; - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *), zgeru_(integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, integer *), zswap_(integer *, - doublecomplex *, integer *, doublecomplex *, integer *), xerbla_( - char *, integer *); - extern integer izamax_(integer *, doublecomplex *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZGETF2 computes an LU factorization of a general m-by-n matrix A - using partial pivoting with row interchanges. - - The factorization has the form - A = P * L * U - where P is a permutation matrix, L is lower triangular with unit - diagonal elements (lower trapezoidal if m > n), and U is upper - triangular (upper trapezoidal if m < n). - - This is the right-looking Level 2 BLAS version of the algorithm. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the m by n matrix to be factored. - On exit, the factors L and U from the factorization - A = P*L*U; the unit diagonal elements of L are not stored. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - IPIV (output) INTEGER array, dimension (min(M,N)) - The pivot indices; for 1 <= i <= min(M,N), row i of the - matrix was interchanged with row IPIV(i). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization - has been completed, but the factor U is exactly - singular, and division by zero will occur if it is used - to solve a system of equations. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGETF2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - return 0; - } - - i__1 = min(*m,*n); - for (j = 1; j <= i__1; ++j) { - -/* Find pivot and test for singularity. */ - - i__2 = *m - j + 1; - jp = j - 1 + izamax_(&i__2, &a[j + j * a_dim1], &c__1); - ipiv[j] = jp; - i__2 = jp + j * a_dim1; - if (a[i__2].r != 0. || a[i__2].i != 0.) { - -/* Apply the interchange to columns 1:N. */ - - if (jp != j) { - zswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda); - } - -/* Compute elements J+1:M of J-th column. */ - - if (j < *m) { - i__2 = *m - j; - z_div(&z__1, &c_b60, &a[j + j * a_dim1]); - zscal_(&i__2, &z__1, &a[j + 1 + j * a_dim1], &c__1); - } - - } else if (*info == 0) { - - *info = j; - } - - if (j < min(*m,*n)) { - -/* Update trailing submatrix. */ - - i__2 = *m - j; - i__3 = *n - j; - z__1.r = -1., z__1.i = -0.; - zgeru_(&i__2, &i__3, &z__1, &a[j + 1 + j * a_dim1], &c__1, &a[j + - (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda) - ; - } -/* L10: */ - } - return 0; - -/* End of ZGETF2 */ - -} /* zgetf2_ */ - -/* Subroutine */ int zgetrf_(integer *m, integer *n, doublecomplex *a, - integer *lda, integer *ipiv, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1; - - /* Local variables */ - static integer i__, j, jb, nb, iinfo; - extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *, - integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *), ztrsm_(char *, char *, char *, char *, - integer *, integer *, doublecomplex *, doublecomplex *, integer * - , doublecomplex *, integer *), - zgetf2_(integer *, integer *, doublecomplex *, integer *, integer - *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *, - integer *, integer *, integer *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZGETRF computes an LU factorization of a general M-by-N matrix A - using partial pivoting with row interchanges. - - The factorization has the form - A = P * L * U - where P is a permutation matrix, L is lower triangular with unit - diagonal elements (lower trapezoidal if m > n), and U is upper - triangular (upper trapezoidal if m < n). - - This is the right-looking Level 3 BLAS version of the algorithm. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the M-by-N matrix to be factored. - On exit, the factors L and U from the factorization - A = P*L*U; the unit diagonal elements of L are not stored. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - IPIV (output) INTEGER array, dimension (min(M,N)) - The pivot indices; for 1 <= i <= min(M,N), row i of the - matrix was interchanged with row IPIV(i). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, U(i,i) is exactly zero. The factorization - has been completed, but the factor U is exactly - singular, and division by zero will occur if it is used - to solve a system of equations. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*m)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGETRF", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - return 0; - } - -/* Determine the block size for this environment. */ - - nb = ilaenv_(&c__1, "ZGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) - 1); - if (nb <= 1 || nb >= min(*m,*n)) { - -/* Use unblocked code. */ - - zgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info); - } else { - -/* Use blocked code. */ - - i__1 = min(*m,*n); - i__2 = nb; - for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { -/* Computing MIN */ - i__3 = min(*m,*n) - j + 1; - jb = min(i__3,nb); - -/* - Factor diagonal and subdiagonal blocks and test for exact - singularity. -*/ - - i__3 = *m - j + 1; - zgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo); - -/* Adjust INFO and the pivot indices. */ - - if ((*info == 0 && iinfo > 0)) { - *info = iinfo + j - 1; - } -/* Computing MIN */ - i__4 = *m, i__5 = j + jb - 1; - i__3 = min(i__4,i__5); - for (i__ = j; i__ <= i__3; ++i__) { - ipiv[i__] = j - 1 + ipiv[i__]; -/* L10: */ - } - -/* Apply interchanges to columns 1:J-1. */ - - i__3 = j - 1; - i__4 = j + jb - 1; - zlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1); - - if (j + jb <= *n) { - -/* Apply interchanges to columns J+JB:N. */ - - i__3 = *n - j - jb + 1; - i__4 = j + jb - 1; - zlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, & - ipiv[1], &c__1); - -/* Compute block row of U. */ - - i__3 = *n - j - jb + 1; - ztrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, & - c_b60, &a[j + j * a_dim1], lda, &a[j + (j + jb) * - a_dim1], lda); - if (j + jb <= *m) { - -/* Update trailing submatrix. */ - - i__3 = *m - j - jb + 1; - i__4 = *n - j - jb + 1; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "No transpose", &i__3, &i__4, &jb, - &z__1, &a[j + jb + j * a_dim1], lda, &a[j + (j + - jb) * a_dim1], lda, &c_b60, &a[j + jb + (j + jb) * - a_dim1], lda); - } - } -/* L20: */ - } - } - return 0; - -/* End of ZGETRF */ - -} /* zgetrf_ */ - -/* Subroutine */ int zgetrs_(char *trans, integer *n, integer *nrhs, - doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *b, - integer *ldb, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1; - - /* Local variables */ - extern logical lsame_(char *, char *); - extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *, - integer *, integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *), - xerbla_(char *, integer *); - static logical notran; - extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *, - integer *, integer *, integer *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZGETRS solves a system of linear equations - A * X = B, A**T * X = B, or A**H * X = B - with a general N-by-N matrix A using the LU factorization computed - by ZGETRF. - - Arguments - ========= - - TRANS (input) CHARACTER*1 - Specifies the form of the system of equations: - = 'N': A * X = B (No transpose) - = 'T': A**T * X = B (Transpose) - = 'C': A**H * X = B (Conjugate transpose) - - N (input) INTEGER - The order of the matrix A. N >= 0. - - NRHS (input) INTEGER - The number of right hand sides, i.e., the number of columns - of the matrix B. NRHS >= 0. - - A (input) COMPLEX*16 array, dimension (LDA,N) - The factors L and U from the factorization A = P*L*U - as computed by ZGETRF. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - IPIV (input) INTEGER array, dimension (N) - The pivot indices from ZGETRF; for 1<=i<=N, row i of the - matrix was interchanged with row IPIV(i). - - B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) - On entry, the right hand side matrix B. - On exit, the solution matrix X. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - *info = 0; - notran = lsame_(trans, "N"); - if (((! notran && ! lsame_(trans, "T")) && ! lsame_( - trans, "C"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*nrhs < 0) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if (*ldb < max(1,*n)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZGETRS", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0 || *nrhs == 0) { - return 0; - } - - if (notran) { - -/* - Solve A * X = B. - - Apply row interchanges to the right hand sides. -*/ - - zlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1); - -/* Solve L*X = B, overwriting B with X. */ - - ztrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b60, &a[ - a_offset], lda, &b[b_offset], ldb); - -/* Solve U*X = B, overwriting B with X. */ - - ztrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b60, & - a[a_offset], lda, &b[b_offset], ldb); - } else { - -/* - Solve A**T * X = B or A**H * X = B. - - Solve U'*X = B, overwriting B with X. -*/ - - ztrsm_("Left", "Upper", trans, "Non-unit", n, nrhs, &c_b60, &a[ - a_offset], lda, &b[b_offset], ldb); - -/* Solve L'*X = B, overwriting B with X. */ - - ztrsm_("Left", "Lower", trans, "Unit", n, nrhs, &c_b60, &a[a_offset], - lda, &b[b_offset], ldb); - -/* Apply row interchanges to the solution vectors. */ - - zlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1); - } - - return 0; - -/* End of ZGETRS */ - -} /* zgetrs_ */ - -/* Subroutine */ int zheevd_(char *jobz, char *uplo, integer *n, - doublecomplex *a, integer *lda, doublereal *w, doublecomplex *work, - integer *lwork, doublereal *rwork, integer *lrwork, integer *iwork, - integer *liwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal eps; - static integer inde; - static doublereal anrm; - static integer imax; - static doublereal rmin, rmax; - static integer lopt; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - static doublereal sigma; - extern logical lsame_(char *, char *); - static integer iinfo, lwmin, liopt; - static logical lower; - static integer llrwk, lropt; - static logical wantz; - static integer indwk2, llwrk2; - - static integer iscale; - static doublereal safmin; - extern /* Subroutine */ int xerbla_(char *, integer *); - static doublereal bignum; - extern doublereal zlanhe_(char *, char *, integer *, doublecomplex *, - integer *, doublereal *); - static integer indtau; - extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *, - integer *), zlascl_(char *, integer *, integer *, doublereal *, - doublereal *, integer *, integer *, doublecomplex *, integer *, - integer *), zstedc_(char *, integer *, doublereal *, - doublereal *, doublecomplex *, integer *, doublecomplex *, - integer *, doublereal *, integer *, integer *, integer *, integer - *); - static integer indrwk, indwrk, liwmin; - extern /* Subroutine */ int zhetrd_(char *, integer *, doublecomplex *, - integer *, doublereal *, doublereal *, doublecomplex *, - doublecomplex *, integer *, integer *), zlacpy_(char *, - integer *, integer *, doublecomplex *, integer *, doublecomplex *, - integer *); - static integer lrwmin, llwork; - static doublereal smlnum; - static logical lquery; - extern /* Subroutine */ int zunmtr_(char *, char *, char *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer *); - - -/* - -- LAPACK driver routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZHEEVD computes all eigenvalues and, optionally, eigenvectors of a - complex Hermitian matrix A. If eigenvectors are desired, it uses a - divide and conquer algorithm. - - The divide and conquer algorithm makes very mild assumptions about - floating point arithmetic. It will work on machines with a guard - digit in add/subtract, or on those binary machines without guard - digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or - Cray-2. It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Arguments - ========= - - JOBZ (input) CHARACTER*1 - = 'N': Compute eigenvalues only; - = 'V': Compute eigenvalues and eigenvectors. - - UPLO (input) CHARACTER*1 - = 'U': Upper triangle of A is stored; - = 'L': Lower triangle of A is stored. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA, N) - On entry, the Hermitian matrix A. If UPLO = 'U', the - leading N-by-N upper triangular part of A contains the - upper triangular part of the matrix A. If UPLO = 'L', - the leading N-by-N lower triangular part of A contains - the lower triangular part of the matrix A. - On exit, if JOBZ = 'V', then if INFO = 0, A contains the - orthonormal eigenvectors of the matrix A. - If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') - or the upper triangle (if UPLO='U') of A, including the - diagonal, is destroyed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - W (output) DOUBLE PRECISION array, dimension (N) - If INFO = 0, the eigenvalues in ascending order. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The length of the array WORK. - If N <= 1, LWORK must be at least 1. - If JOBZ = 'N' and N > 1, LWORK must be at least N + 1. - If JOBZ = 'V' and N > 1, LWORK must be at least 2*N + N**2. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - RWORK (workspace/output) DOUBLE PRECISION array, - dimension (LRWORK) - On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. - - LRWORK (input) INTEGER - The dimension of the array RWORK. - If N <= 1, LRWORK must be at least 1. - If JOBZ = 'N' and N > 1, LRWORK must be at least N. - If JOBZ = 'V' and N > 1, LRWORK must be at least - 1 + 5*N + 2*N**2. - - If LRWORK = -1, then a workspace query is assumed; the - routine only calculates the optimal size of the RWORK array, - returns this value as the first entry of the RWORK array, and - no error message related to LRWORK is issued by XERBLA. - - IWORK (workspace/output) INTEGER array, dimension (LIWORK) - On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. - - LIWORK (input) INTEGER - The dimension of the array IWORK. - If N <= 1, LIWORK must be at least 1. - If JOBZ = 'N' and N > 1, LIWORK must be at least 1. - If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. - - If LIWORK = -1, then a workspace query is assumed; the - routine only calculates the optimal size of the IWORK array, - returns this value as the first entry of the IWORK array, and - no error message related to LIWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, the algorithm failed to converge; i - off-diagonal elements of an intermediate tridiagonal - form did not converge to zero. - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --w; - --work; - --rwork; - --iwork; - - /* Function Body */ - wantz = lsame_(jobz, "V"); - lower = lsame_(uplo, "L"); - lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1; - - *info = 0; - if (*n <= 1) { - lwmin = 1; - lrwmin = 1; - liwmin = 1; - lopt = lwmin; - lropt = lrwmin; - liopt = liwmin; - } else { - if (wantz) { - lwmin = ((*n) << (1)) + *n * *n; -/* Computing 2nd power */ - i__1 = *n; - lrwmin = *n * 5 + 1 + ((i__1 * i__1) << (1)); - liwmin = *n * 5 + 3; - } else { - lwmin = *n + 1; - lrwmin = *n; - liwmin = 1; - } - lopt = lwmin; - lropt = lrwmin; - liopt = liwmin; - } - if (! (wantz || lsame_(jobz, "N"))) { - *info = -1; - } else if (! (lower || lsame_(uplo, "U"))) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if ((*lwork < lwmin && ! lquery)) { - *info = -8; - } else if ((*lrwork < lrwmin && ! lquery)) { - *info = -10; - } else if ((*liwork < liwmin && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - work[1].r = (doublereal) lopt, work[1].i = 0.; - rwork[1] = (doublereal) lropt; - iwork[1] = liopt; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZHEEVD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - if (*n == 1) { - i__1 = a_dim1 + 1; - w[1] = a[i__1].r; - if (wantz) { - i__1 = a_dim1 + 1; - a[i__1].r = 1., a[i__1].i = 0.; - } - return 0; - } - -/* Get machine constants. */ - - safmin = SAFEMINIMUM; - eps = PRECISION; - smlnum = safmin / eps; - bignum = 1. / smlnum; - rmin = sqrt(smlnum); - rmax = sqrt(bignum); - -/* Scale matrix to allowable range, if necessary. */ - - anrm = zlanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]); - iscale = 0; - if ((anrm > 0. && anrm < rmin)) { - iscale = 1; - sigma = rmin / anrm; - } else if (anrm > rmax) { - iscale = 1; - sigma = rmax / anrm; - } - if (iscale == 1) { - zlascl_(uplo, &c__0, &c__0, &c_b1015, &sigma, n, n, &a[a_offset], lda, - info); - } - -/* Call ZHETRD to reduce Hermitian matrix to tridiagonal form. */ - - inde = 1; - indtau = 1; - indwrk = indtau + *n; - indrwk = inde + *n; - indwk2 = indwrk + *n * *n; - llwork = *lwork - indwrk + 1; - llwrk2 = *lwork - indwk2 + 1; - llrwk = *lrwork - indrwk + 1; - zhetrd_(uplo, n, &a[a_offset], lda, &w[1], &rwork[inde], &work[indtau], & - work[indwrk], &llwork, &iinfo); -/* Computing MAX */ - i__1 = indwrk; - d__1 = (doublereal) lopt, d__2 = (doublereal) (*n) + work[i__1].r; - lopt = (integer) max(d__1,d__2); - -/* - For eigenvalues only, call DSTERF. For eigenvectors, first call - ZSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the - tridiagonal matrix, then call ZUNMTR to multiply it to the - Householder transformations represented as Householder vectors in - A. -*/ - - if (! wantz) { - dsterf_(n, &w[1], &rwork[inde], info); - } else { - zstedc_("I", n, &w[1], &rwork[inde], &work[indwrk], n, &work[indwk2], - &llwrk2, &rwork[indrwk], &llrwk, &iwork[1], liwork, info); - zunmtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[ - indwrk], n, &work[indwk2], &llwrk2, &iinfo); - zlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda); -/* - Computing MAX - Computing 2nd power -*/ - i__3 = *n; - i__4 = indwk2; - i__1 = lopt, i__2 = *n + i__3 * i__3 + (integer) work[i__4].r; - lopt = max(i__1,i__2); - } - -/* If matrix was scaled, then rescale eigenvalues appropriately. */ - - if (iscale == 1) { - if (*info == 0) { - imax = *n; - } else { - imax = *info - 1; - } - d__1 = 1. / sigma; - dscal_(&imax, &d__1, &w[1], &c__1); - } - - work[1].r = (doublereal) lopt, work[1].i = 0.; - rwork[1] = (doublereal) lropt; - iwork[1] = liopt; - - return 0; - -/* End of ZHEEVD */ - -} /* zheevd_ */ - -/* Subroutine */ int zhetd2_(char *uplo, integer *n, doublecomplex *a, - integer *lda, doublereal *d__, doublereal *e, doublecomplex *tau, - integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublereal d__1; - doublecomplex z__1, z__2, z__3, z__4; - - /* Local variables */ - static integer i__; - static doublecomplex taui; - extern /* Subroutine */ int zher2_(char *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static doublecomplex alpha; - extern logical lsame_(char *, char *); - extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *); - extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, doublecomplex *, integer *); - static logical upper; - extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *), xerbla_( - char *, integer *), zlarfg_(integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - ZHETD2 reduces a complex Hermitian matrix A to real symmetric - tridiagonal form T by a unitary similarity transformation: - Q' * A * Q = T. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies whether the upper or lower triangular part of the - Hermitian matrix A is stored: - = 'U': Upper triangular - = 'L': Lower triangular - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the Hermitian matrix A. If UPLO = 'U', the leading - n-by-n upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading n-by-n lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - On exit, if UPLO = 'U', the diagonal and first superdiagonal - of A are overwritten by the corresponding elements of the - tridiagonal matrix T, and the elements above the first - superdiagonal, with the array TAU, represent the unitary - matrix Q as a product of elementary reflectors; if UPLO - = 'L', the diagonal and first subdiagonal of A are over- - written by the corresponding elements of the tridiagonal - matrix T, and the elements below the first subdiagonal, with - the array TAU, represent the unitary matrix Q as a product - of elementary reflectors. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - D (output) DOUBLE PRECISION array, dimension (N) - The diagonal elements of the tridiagonal matrix T: - D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (N-1) - The off-diagonal elements of the tridiagonal matrix T: - E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. - - TAU (output) COMPLEX*16 array, dimension (N-1) - The scalar factors of the elementary reflectors (see Further - Details). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - If UPLO = 'U', the matrix Q is represented as a product of elementary - reflectors - - Q = H(n-1) . . . H(2) H(1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in - A(1:i-1,i+1), and tau in TAU(i). - - If UPLO = 'L', the matrix Q is represented as a product of elementary - reflectors - - Q = H(1) H(2) . . . H(n-1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), - and tau in TAU(i). - - The contents of A on exit are illustrated by the following examples - with n = 5: - - if UPLO = 'U': if UPLO = 'L': - - ( d e v2 v3 v4 ) ( d ) - ( d e v3 v4 ) ( e d ) - ( d e v4 ) ( v1 e d ) - ( d e ) ( v1 v2 e d ) - ( d ) ( v1 v2 v3 e d ) - - where d and e denote diagonal and off-diagonal elements of T, and vi - denotes an element of the vector defining H(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tau; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZHETD2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n <= 0) { - return 0; - } - - if (upper) { - -/* Reduce the upper triangle of A */ - - i__1 = *n + *n * a_dim1; - i__2 = *n + *n * a_dim1; - d__1 = a[i__2].r; - a[i__1].r = d__1, a[i__1].i = 0.; - for (i__ = *n - 1; i__ >= 1; --i__) { - -/* - Generate elementary reflector H(i) = I - tau * v * v' - to annihilate A(1:i-1,i+1) -*/ - - i__1 = i__ + (i__ + 1) * a_dim1; - alpha.r = a[i__1].r, alpha.i = a[i__1].i; - zlarfg_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui); - i__1 = i__; - e[i__1] = alpha.r; - - if (taui.r != 0. || taui.i != 0.) { - -/* Apply H(i) from both sides to A(1:i,1:i) */ - - i__1 = i__ + (i__ + 1) * a_dim1; - a[i__1].r = 1., a[i__1].i = 0.; - -/* Compute x := tau * A * v storing x in TAU(1:i) */ - - zhemv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) * - a_dim1 + 1], &c__1, &c_b59, &tau[1], &c__1) - ; - -/* Compute w := x - 1/2 * tau * (x'*v) * v */ - - z__3.r = -.5, z__3.i = -0.; - z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r * - taui.i + z__3.i * taui.r; - zdotc_(&z__4, &i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1] - , &c__1); - z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * - z__4.i + z__2.i * z__4.r; - alpha.r = z__1.r, alpha.i = z__1.i; - zaxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[ - 1], &c__1); - -/* - Apply the transformation as a rank-2 update: - A := A - v * w' - w * v' -*/ - - z__1.r = -1., z__1.i = -0.; - zher2_(uplo, &i__, &z__1, &a[(i__ + 1) * a_dim1 + 1], &c__1, & - tau[1], &c__1, &a[a_offset], lda); - - } else { - i__1 = i__ + i__ * a_dim1; - i__2 = i__ + i__ * a_dim1; - d__1 = a[i__2].r; - a[i__1].r = d__1, a[i__1].i = 0.; - } - i__1 = i__ + (i__ + 1) * a_dim1; - i__2 = i__; - a[i__1].r = e[i__2], a[i__1].i = 0.; - i__1 = i__ + 1; - i__2 = i__ + 1 + (i__ + 1) * a_dim1; - d__[i__1] = a[i__2].r; - i__1 = i__; - tau[i__1].r = taui.r, tau[i__1].i = taui.i; -/* L10: */ - } - i__1 = a_dim1 + 1; - d__[1] = a[i__1].r; - } else { - -/* Reduce the lower triangle of A */ - - i__1 = a_dim1 + 1; - i__2 = a_dim1 + 1; - d__1 = a[i__2].r; - a[i__1].r = d__1, a[i__1].i = 0.; - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* - Generate elementary reflector H(i) = I - tau * v * v' - to annihilate A(i+2:n,i) -*/ - - i__2 = i__ + 1 + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *n - i__; -/* Computing MIN */ - i__3 = i__ + 2; - zlarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, & - taui); - i__2 = i__; - e[i__2] = alpha.r; - - if (taui.r != 0. || taui.i != 0.) { - -/* Apply H(i) from both sides to A(i+1:n,i+1:n) */ - - i__2 = i__ + 1 + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Compute x := tau * A * v storing y in TAU(i:n-1) */ - - i__2 = *n - i__; - zhemv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1], - lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b59, &tau[ - i__], &c__1); - -/* Compute w := x - 1/2 * tau * (x'*v) * v */ - - z__3.r = -.5, z__3.i = -0.; - z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r * - taui.i + z__3.i * taui.r; - i__2 = *n - i__; - zdotc_(&z__4, &i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ * - a_dim1], &c__1); - z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * - z__4.i + z__2.i * z__4.r; - alpha.r = z__1.r, alpha.i = z__1.i; - i__2 = *n - i__; - zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ - i__], &c__1); - -/* - Apply the transformation as a rank-2 update: - A := A - v * w' - w * v' -*/ - - i__2 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zher2_(uplo, &i__2, &z__1, &a[i__ + 1 + i__ * a_dim1], &c__1, - &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1], - lda); - - } else { - i__2 = i__ + 1 + (i__ + 1) * a_dim1; - i__3 = i__ + 1 + (i__ + 1) * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - } - i__2 = i__ + 1 + i__ * a_dim1; - i__3 = i__; - a[i__2].r = e[i__3], a[i__2].i = 0.; - i__2 = i__; - i__3 = i__ + i__ * a_dim1; - d__[i__2] = a[i__3].r; - i__2 = i__; - tau[i__2].r = taui.r, tau[i__2].i = taui.i; -/* L20: */ - } - i__1 = *n; - i__2 = *n + *n * a_dim1; - d__[i__1] = a[i__2].r; - } - - return 0; - -/* End of ZHETD2 */ - -} /* zhetd2_ */ - -/* Subroutine */ int zhetrd_(char *uplo, integer *n, doublecomplex *a, - integer *lda, doublereal *d__, doublereal *e, doublecomplex *tau, - doublecomplex *work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1; - - /* Local variables */ - static integer i__, j, nb, kk, nx, iws; - extern logical lsame_(char *, char *); - static integer nbmin, iinfo; - static logical upper; - extern /* Subroutine */ int zhetd2_(char *, integer *, doublecomplex *, - integer *, doublereal *, doublereal *, doublecomplex *, integer *), zher2k_(char *, char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublereal *, doublecomplex *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlatrd_(char *, integer *, integer *, - doublecomplex *, integer *, doublereal *, doublecomplex *, - doublecomplex *, integer *); - static integer ldwork, lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZHETRD reduces a complex Hermitian matrix A to real symmetric - tridiagonal form T by a unitary similarity transformation: - Q**H * A * Q = T. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - = 'U': Upper triangle of A is stored; - = 'L': Lower triangle of A is stored. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the Hermitian matrix A. If UPLO = 'U', the leading - N-by-N upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading N-by-N lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - On exit, if UPLO = 'U', the diagonal and first superdiagonal - of A are overwritten by the corresponding elements of the - tridiagonal matrix T, and the elements above the first - superdiagonal, with the array TAU, represent the unitary - matrix Q as a product of elementary reflectors; if UPLO - = 'L', the diagonal and first subdiagonal of A are over- - written by the corresponding elements of the tridiagonal - matrix T, and the elements below the first subdiagonal, with - the array TAU, represent the unitary matrix Q as a product - of elementary reflectors. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - D (output) DOUBLE PRECISION array, dimension (N) - The diagonal elements of the tridiagonal matrix T: - D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (N-1) - The off-diagonal elements of the tridiagonal matrix T: - E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. - - TAU (output) COMPLEX*16 array, dimension (N-1) - The scalar factors of the elementary reflectors (see Further - Details). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= 1. - For optimum performance LWORK >= N*NB, where NB is the - optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - If UPLO = 'U', the matrix Q is represented as a product of elementary - reflectors - - Q = H(n-1) . . . H(2) H(1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in - A(1:i-1,i+1), and tau in TAU(i). - - If UPLO = 'L', the matrix Q is represented as a product of elementary - reflectors - - Q = H(1) H(2) . . . H(n-1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), - and tau in TAU(i). - - The contents of A on exit are illustrated by the following examples - with n = 5: - - if UPLO = 'U': if UPLO = 'L': - - ( d e v2 v3 v4 ) ( d ) - ( d e v3 v4 ) ( e d ) - ( d e v4 ) ( v1 e d ) - ( d e ) ( v1 v2 e d ) - ( d ) ( v1 v2 v3 e d ) - - where d and e denote diagonal and off-diagonal elements of T, and vi - denotes an element of the vector defining H(i). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tau; - --work; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - lquery = *lwork == -1; - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } else if ((*lwork < 1 && ! lquery)) { - *info = -9; - } - - if (*info == 0) { - -/* Determine the block size. */ - - nb = ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, - (ftnlen)1); - lwkopt = *n * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZHETRD", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nx = *n; - iws = 1; - if ((nb > 1 && nb < *n)) { - -/* - Determine when to cross over from blocked to unblocked code - (last block is always handled by unblocked code). - - Computing MAX -*/ - i__1 = nb, i__2 = ilaenv_(&c__3, "ZHETRD", uplo, n, &c_n1, &c_n1, & - c_n1, (ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < *n) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *n; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: determine the - minimum value of NB, and reduce NB or force use of - unblocked code by setting NX = N. - - Computing MAX -*/ - i__1 = *lwork / ldwork; - nb = max(i__1,1); - nbmin = ilaenv_(&c__2, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1, - (ftnlen)6, (ftnlen)1); - if (nb < nbmin) { - nx = *n; - } - } - } else { - nx = *n; - } - } else { - nb = 1; - } - - if (upper) { - -/* - Reduce the upper triangle of A. - Columns 1:kk are handled by the unblocked method. -*/ - - kk = *n - (*n - nx + nb - 1) / nb * nb; - i__1 = kk + 1; - i__2 = -nb; - for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += - i__2) { - -/* - Reduce columns i:i+nb-1 to tridiagonal form and form the - matrix W which is needed to update the unreduced part of - the matrix -*/ - - i__3 = i__ + nb - 1; - zlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], & - work[1], &ldwork); - -/* - Update the unreduced submatrix A(1:i-1,1:i-1), using an - update of the form: A := A - V*W' - W*V' -*/ - - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zher2k_(uplo, "No transpose", &i__3, &nb, &z__1, &a[i__ * a_dim1 - + 1], lda, &work[1], &ldwork, &c_b1015, &a[a_offset], lda); - -/* - Copy superdiagonal elements back into A, and diagonal - elements into D -*/ - - i__3 = i__ + nb - 1; - for (j = i__; j <= i__3; ++j) { - i__4 = j - 1 + j * a_dim1; - i__5 = j - 1; - a[i__4].r = e[i__5], a[i__4].i = 0.; - i__4 = j; - i__5 = j + j * a_dim1; - d__[i__4] = a[i__5].r; -/* L10: */ - } -/* L20: */ - } - -/* Use unblocked code to reduce the last or only block */ - - zhetd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo); - } else { - -/* Reduce the lower triangle of A */ - - i__2 = *n - nx; - i__1 = nb; - for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { - -/* - Reduce columns i:i+nb-1 to tridiagonal form and form the - matrix W which is needed to update the unreduced part of - the matrix -*/ - - i__3 = *n - i__ + 1; - zlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], & - tau[i__], &work[1], &ldwork); - -/* - Update the unreduced submatrix A(i+nb:n,i+nb:n), using - an update of the form: A := A - V*W' - W*V' -*/ - - i__3 = *n - i__ - nb + 1; - z__1.r = -1., z__1.i = -0.; - zher2k_(uplo, "No transpose", &i__3, &nb, &z__1, &a[i__ + nb + - i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b1015, &a[ - i__ + nb + (i__ + nb) * a_dim1], lda); - -/* - Copy subdiagonal elements back into A, and diagonal - elements into D -*/ - - i__3 = i__ + nb - 1; - for (j = i__; j <= i__3; ++j) { - i__4 = j + 1 + j * a_dim1; - i__5 = j; - a[i__4].r = e[i__5], a[i__4].i = 0.; - i__4 = j; - i__5 = j + j * a_dim1; - d__[i__4] = a[i__5].r; -/* L30: */ - } -/* L40: */ - } - -/* Use unblocked code to reduce the last or only block */ - - i__1 = *n - i__ + 1; - zhetd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], - &tau[i__], &iinfo); - } - - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - return 0; - -/* End of ZHETRD */ - -} /* zhetrd_ */ - -/* Subroutine */ int zhseqr_(char *job, char *compz, integer *n, integer *ilo, - integer *ihi, doublecomplex *h__, integer *ldh, doublecomplex *w, - doublecomplex *z__, integer *ldz, doublecomplex *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4[2], - i__5, i__6; - doublereal d__1, d__2, d__3, d__4; - doublecomplex z__1; - char ch__1[2]; - - /* Builtin functions */ - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i__, j, k, l; - static doublecomplex s[225] /* was [15][15] */, v[16]; - static integer i1, i2, ii, nh, nr, ns, nv; - static doublecomplex vv[16]; - static integer itn; - static doublecomplex tau; - static integer its; - static doublereal ulp, tst1; - static integer maxb, ierr; - static doublereal unfl; - static doublecomplex temp; - static doublereal ovfl; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *); - static integer itemp; - static doublereal rtemp; - extern /* Subroutine */ int zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *); - static logical initz, wantt, wantz; - static doublereal rwork[1]; - extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - extern doublereal dlapy2_(doublereal *, doublereal *); - extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); - - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zdscal_(integer *, doublereal *, - doublecomplex *, integer *), zlarfg_(integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *); - extern integer izamax_(integer *, doublecomplex *, integer *); - extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *, - doublereal *); - extern /* Subroutine */ int zlahqr_(logical *, logical *, integer *, - integer *, integer *, doublecomplex *, integer *, doublecomplex *, - integer *, integer *, doublecomplex *, integer *, integer *), - zlacpy_(char *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *), zlaset_(char *, integer *, - integer *, doublecomplex *, doublecomplex *, doublecomplex *, - integer *), zlarfx_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *); - static doublereal smlnum; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZHSEQR computes the eigenvalues of a complex upper Hessenberg - matrix H, and, optionally, the matrices T and Z from the Schur - decomposition H = Z T Z**H, where T is an upper triangular matrix - (the Schur form), and Z is the unitary matrix of Schur vectors. - - Optionally Z may be postmultiplied into an input unitary matrix Q, - so that this routine can give the Schur factorization of a matrix A - which has been reduced to the Hessenberg form H by the unitary - matrix Q: A = Q*H*Q**H = (QZ)*T*(QZ)**H. - - Arguments - ========= - - JOB (input) CHARACTER*1 - = 'E': compute eigenvalues only; - = 'S': compute eigenvalues and the Schur form T. - - COMPZ (input) CHARACTER*1 - = 'N': no Schur vectors are computed; - = 'I': Z is initialized to the unit matrix and the matrix Z - of Schur vectors of H is returned; - = 'V': Z must contain an unitary matrix Q on entry, and - the product Q*Z is returned. - - N (input) INTEGER - The order of the matrix H. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - It is assumed that H is already upper triangular in rows - and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally - set by a previous call to ZGEBAL, and then passed to CGEHRD - when the matrix output by ZGEBAL is reduced to Hessenberg - form. Otherwise ILO and IHI should be set to 1 and N - respectively. - 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. - - H (input/output) COMPLEX*16 array, dimension (LDH,N) - On entry, the upper Hessenberg matrix H. - On exit, if JOB = 'S', H contains the upper triangular matrix - T from the Schur decomposition (the Schur form). If - JOB = 'E', the contents of H are unspecified on exit. - - LDH (input) INTEGER - The leading dimension of the array H. LDH >= max(1,N). - - W (output) COMPLEX*16 array, dimension (N) - The computed eigenvalues. If JOB = 'S', the eigenvalues are - stored in the same order as on the diagonal of the Schur form - returned in H, with W(i) = H(i,i). - - Z (input/output) COMPLEX*16 array, dimension (LDZ,N) - If COMPZ = 'N': Z is not referenced. - If COMPZ = 'I': on entry, Z need not be set, and on exit, Z - contains the unitary matrix Z of the Schur vectors of H. - If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q, - which is assumed to be equal to the unit matrix except for - the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z. - Normally Q is the unitary matrix generated by ZUNGHR after - the call to ZGEHRD which formed the Hessenberg matrix H. - - LDZ (input) INTEGER - The leading dimension of the array Z. - LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,N). - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, ZHSEQR failed to compute all the - eigenvalues in a total of 30*(IHI-ILO+1) iterations; - elements 1:ilo-1 and i+1:n of W contain those - eigenvalues which have been successfully computed. - - ===================================================================== - - - Decode and test the input parameters -*/ - - /* Parameter adjustments */ - h_dim1 = *ldh; - h_offset = 1 + h_dim1 * 1; - h__ -= h_offset; - --w; - z_dim1 = *ldz; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - --work; - - /* Function Body */ - wantt = lsame_(job, "S"); - initz = lsame_(compz, "I"); - wantz = initz || lsame_(compz, "V"); - - *info = 0; - i__1 = max(1,*n); - work[1].r = (doublereal) i__1, work[1].i = 0.; - lquery = *lwork == -1; - if ((! lsame_(job, "E") && ! wantt)) { - *info = -1; - } else if ((! lsame_(compz, "N") && ! wantz)) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -4; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -5; - } else if (*ldh < max(1,*n)) { - *info = -7; - } else if (*ldz < 1 || (wantz && *ldz < max(1,*n))) { - *info = -10; - } else if ((*lwork < max(1,*n) && ! lquery)) { - *info = -12; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZHSEQR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Initialize Z, if necessary */ - - if (initz) { - zlaset_("Full", n, n, &c_b59, &c_b60, &z__[z_offset], ldz); - } - -/* Store the eigenvalues isolated by ZGEBAL. */ - - i__1 = *ilo - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - i__3 = i__ + i__ * h_dim1; - w[i__2].r = h__[i__3].r, w[i__2].i = h__[i__3].i; -/* L10: */ - } - i__1 = *n; - for (i__ = *ihi + 1; i__ <= i__1; ++i__) { - i__2 = i__; - i__3 = i__ + i__ * h_dim1; - w[i__2].r = h__[i__3].r, w[i__2].i = h__[i__3].i; -/* L20: */ - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - if (*ilo == *ihi) { - i__1 = *ilo; - i__2 = *ilo + *ilo * h_dim1; - w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i; - return 0; - } - -/* - Set rows and columns ILO to IHI to zero below the first - subdiagonal. -*/ - - i__1 = *ihi - 2; - for (j = *ilo; j <= i__1; ++j) { - i__2 = *n; - for (i__ = j + 2; i__ <= i__2; ++i__) { - i__3 = i__ + j * h_dim1; - h__[i__3].r = 0., h__[i__3].i = 0.; -/* L30: */ - } -/* L40: */ - } - nh = *ihi - *ilo + 1; - -/* - I1 and I2 are the indices of the first row and last column of H - to which transformations must be applied. If eigenvalues only are - being computed, I1 and I2 are re-set inside the main loop. -*/ - - if (wantt) { - i1 = 1; - i2 = *n; - } else { - i1 = *ilo; - i2 = *ihi; - } - -/* Ensure that the subdiagonal elements are real. */ - - i__1 = *ihi; - for (i__ = *ilo + 1; i__ <= i__1; ++i__) { - i__2 = i__ + (i__ - 1) * h_dim1; - temp.r = h__[i__2].r, temp.i = h__[i__2].i; - if (d_imag(&temp) != 0.) { - d__1 = temp.r; - d__2 = d_imag(&temp); - rtemp = dlapy2_(&d__1, &d__2); - i__2 = i__ + (i__ - 1) * h_dim1; - h__[i__2].r = rtemp, h__[i__2].i = 0.; - z__1.r = temp.r / rtemp, z__1.i = temp.i / rtemp; - temp.r = z__1.r, temp.i = z__1.i; - if (i2 > i__) { - i__2 = i2 - i__; - d_cnjg(&z__1, &temp); - zscal_(&i__2, &z__1, &h__[i__ + (i__ + 1) * h_dim1], ldh); - } - i__2 = i__ - i1; - zscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1); - if (i__ < *ihi) { - i__2 = i__ + 1 + i__ * h_dim1; - i__3 = i__ + 1 + i__ * h_dim1; - z__1.r = temp.r * h__[i__3].r - temp.i * h__[i__3].i, z__1.i = - temp.r * h__[i__3].i + temp.i * h__[i__3].r; - h__[i__2].r = z__1.r, h__[i__2].i = z__1.i; - } - if (wantz) { - zscal_(&nh, &temp, &z__[*ilo + i__ * z_dim1], &c__1); - } - } -/* L50: */ - } - -/* - Determine the order of the multi-shift QR algorithm to be used. - - Writing concatenation -*/ - i__4[0] = 1, a__1[0] = job; - i__4[1] = 1, a__1[1] = compz; - s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2); - ns = ilaenv_(&c__4, "ZHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, ( - ftnlen)2); -/* Writing concatenation */ - i__4[0] = 1, a__1[0] = job; - i__4[1] = 1, a__1[1] = compz; - s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2); - maxb = ilaenv_(&c__8, "ZHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, ( - ftnlen)2); - if (ns <= 1 || ns > nh || maxb >= nh) { - -/* Use the standard double-shift algorithm */ - - zlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1], ilo, - ihi, &z__[z_offset], ldz, info); - return 0; - } - maxb = max(2,maxb); -/* Computing MIN */ - i__1 = min(ns,maxb); - ns = min(i__1,15); - -/* - Now 1 < NS <= MAXB < NH. - - Set machine-dependent constants for the stopping criterion. - If norm(H) <= sqrt(OVFL), overflow should not occur. -*/ - - unfl = SAFEMINIMUM; - ovfl = 1. / unfl; - dlabad_(&unfl, &ovfl); - ulp = PRECISION; - smlnum = unfl * (nh / ulp); - -/* ITN is the total number of multiple-shift QR iterations allowed. */ - - itn = nh * 30; - -/* - The main loop begins here. I is the loop index and decreases from - IHI to ILO in steps of at most MAXB. Each iteration of the loop - works with the active submatrix in rows and columns L to I. - Eigenvalues I+1 to IHI have already converged. Either L = ILO, or - H(L,L-1) is negligible so that the matrix splits. -*/ - - i__ = *ihi; -L60: - if (i__ < *ilo) { - goto L180; - } - -/* - Perform multiple-shift QR iterations on rows and columns ILO to I - until a submatrix of order at most MAXB splits off at the bottom - because a subdiagonal element has become negligible. -*/ - - l = *ilo; - i__1 = itn; - for (its = 0; its <= i__1; ++its) { - -/* Look for a single small subdiagonal element. */ - - i__2 = l + 1; - for (k = i__; k >= i__2; --k) { - i__3 = k - 1 + (k - 1) * h_dim1; - i__5 = k + k * h_dim1; - tst1 = (d__1 = h__[i__3].r, abs(d__1)) + (d__2 = d_imag(&h__[k - - 1 + (k - 1) * h_dim1]), abs(d__2)) + ((d__3 = h__[i__5].r, - abs(d__3)) + (d__4 = d_imag(&h__[k + k * h_dim1]), abs( - d__4))); - if (tst1 == 0.) { - i__3 = i__ - l + 1; - tst1 = zlanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, rwork); - } - i__3 = k + (k - 1) * h_dim1; -/* Computing MAX */ - d__2 = ulp * tst1; - if ((d__1 = h__[i__3].r, abs(d__1)) <= max(d__2,smlnum)) { - goto L80; - } -/* L70: */ - } -L80: - l = k; - if (l > *ilo) { - -/* H(L,L-1) is negligible. */ - - i__2 = l + (l - 1) * h_dim1; - h__[i__2].r = 0., h__[i__2].i = 0.; - } - -/* Exit from loop if a submatrix of order <= MAXB has split off. */ - - if (l >= i__ - maxb + 1) { - goto L170; - } - -/* - Now the active submatrix is in rows and columns L to I. If - eigenvalues only are being computed, only the active submatrix - need be transformed. -*/ - - if (! wantt) { - i1 = l; - i2 = i__; - } - - if (its == 20 || its == 30) { - -/* Exceptional shifts. */ - - i__2 = i__; - for (ii = i__ - ns + 1; ii <= i__2; ++ii) { - i__3 = ii; - i__5 = ii + (ii - 1) * h_dim1; - i__6 = ii + ii * h_dim1; - d__3 = ((d__1 = h__[i__5].r, abs(d__1)) + (d__2 = h__[i__6].r, - abs(d__2))) * 1.5; - w[i__3].r = d__3, w[i__3].i = 0.; -/* L90: */ - } - } else { - -/* Use eigenvalues of trailing submatrix of order NS as shifts. */ - - zlacpy_("Full", &ns, &ns, &h__[i__ - ns + 1 + (i__ - ns + 1) * - h_dim1], ldh, s, &c__15); - zlahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &w[i__ - - ns + 1], &c__1, &ns, &z__[z_offset], ldz, &ierr); - if (ierr > 0) { - -/* - If ZLAHQR failed to compute all NS eigenvalues, use the - unconverged diagonal elements as the remaining shifts. -*/ - - i__2 = ierr; - for (ii = 1; ii <= i__2; ++ii) { - i__3 = i__ - ns + ii; - i__5 = ii + ii * 15 - 16; - w[i__3].r = s[i__5].r, w[i__3].i = s[i__5].i; -/* L100: */ - } - } - } - -/* - Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns)) - where G is the Hessenberg submatrix H(L:I,L:I) and w is - the vector of shifts (stored in W). The result is - stored in the local array V. -*/ - - v[0].r = 1., v[0].i = 0.; - i__2 = ns + 1; - for (ii = 2; ii <= i__2; ++ii) { - i__3 = ii - 1; - v[i__3].r = 0., v[i__3].i = 0.; -/* L110: */ - } - nv = 1; - i__2 = i__; - for (j = i__ - ns + 1; j <= i__2; ++j) { - i__3 = nv + 1; - zcopy_(&i__3, v, &c__1, vv, &c__1); - i__3 = nv + 1; - i__5 = j; - z__1.r = -w[i__5].r, z__1.i = -w[i__5].i; - zgemv_("No transpose", &i__3, &nv, &c_b60, &h__[l + l * h_dim1], - ldh, vv, &c__1, &z__1, v, &c__1); - ++nv; - -/* - Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero, - reset it to the unit vector. -*/ - - itemp = izamax_(&nv, v, &c__1); - i__3 = itemp - 1; - rtemp = (d__1 = v[i__3].r, abs(d__1)) + (d__2 = d_imag(&v[itemp - - 1]), abs(d__2)); - if (rtemp == 0.) { - v[0].r = 1., v[0].i = 0.; - i__3 = nv; - for (ii = 2; ii <= i__3; ++ii) { - i__5 = ii - 1; - v[i__5].r = 0., v[i__5].i = 0.; -/* L120: */ - } - } else { - rtemp = max(rtemp,smlnum); - d__1 = 1. / rtemp; - zdscal_(&nv, &d__1, v, &c__1); - } -/* L130: */ - } - -/* Multiple-shift QR step */ - - i__2 = i__ - 1; - for (k = l; k <= i__2; ++k) { - -/* - The first iteration of this loop determines a reflection G - from the vector V and applies it from left and right to H, - thus creating a nonzero bulge below the subdiagonal. - - Each subsequent iteration determines a reflection G to - restore the Hessenberg form in the (K-1)th column, and thus - chases the bulge one step toward the bottom of the active - submatrix. NR is the order of G. - - Computing MIN -*/ - i__3 = ns + 1, i__5 = i__ - k + 1; - nr = min(i__3,i__5); - if (k > l) { - zcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1); - } - zlarfg_(&nr, v, &v[1], &c__1, &tau); - if (k > l) { - i__3 = k + (k - 1) * h_dim1; - h__[i__3].r = v[0].r, h__[i__3].i = v[0].i; - i__3 = i__; - for (ii = k + 1; ii <= i__3; ++ii) { - i__5 = ii + (k - 1) * h_dim1; - h__[i__5].r = 0., h__[i__5].i = 0.; -/* L140: */ - } - } - v[0].r = 1., v[0].i = 0.; - -/* - Apply G' from the left to transform the rows of the matrix - in columns K to I2. -*/ - - i__3 = i2 - k + 1; - d_cnjg(&z__1, &tau); - zlarfx_("Left", &nr, &i__3, v, &z__1, &h__[k + k * h_dim1], ldh, & - work[1]); - -/* - Apply G from the right to transform the columns of the - matrix in rows I1 to min(K+NR,I). - - Computing MIN -*/ - i__5 = k + nr; - i__3 = min(i__5,i__) - i1 + 1; - zlarfx_("Right", &i__3, &nr, v, &tau, &h__[i1 + k * h_dim1], ldh, - &work[1]); - - if (wantz) { - -/* Accumulate transformations in the matrix Z */ - - zlarfx_("Right", &nh, &nr, v, &tau, &z__[*ilo + k * z_dim1], - ldz, &work[1]); - } -/* L150: */ - } - -/* Ensure that H(I,I-1) is real. */ - - i__2 = i__ + (i__ - 1) * h_dim1; - temp.r = h__[i__2].r, temp.i = h__[i__2].i; - if (d_imag(&temp) != 0.) { - d__1 = temp.r; - d__2 = d_imag(&temp); - rtemp = dlapy2_(&d__1, &d__2); - i__2 = i__ + (i__ - 1) * h_dim1; - h__[i__2].r = rtemp, h__[i__2].i = 0.; - z__1.r = temp.r / rtemp, z__1.i = temp.i / rtemp; - temp.r = z__1.r, temp.i = z__1.i; - if (i2 > i__) { - i__2 = i2 - i__; - d_cnjg(&z__1, &temp); - zscal_(&i__2, &z__1, &h__[i__ + (i__ + 1) * h_dim1], ldh); - } - i__2 = i__ - i1; - zscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1); - if (wantz) { - zscal_(&nh, &temp, &z__[*ilo + i__ * z_dim1], &c__1); - } - } - -/* L160: */ - } - -/* Failure to converge in remaining number of iterations */ - - *info = i__; - return 0; - -L170: - -/* - A submatrix of order <= MAXB in rows and columns L to I has split - off. Use the double-shift QR algorithm to handle it. -*/ - - zlahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &w[1], ilo, ihi, - &z__[z_offset], ldz, info); - if (*info > 0) { - return 0; - } - -/* - Decrement number of remaining iterations, and return to start of - the main loop with a new value of I. -*/ - - itn -= its; - i__ = l - 1; - goto L60; - -L180: - i__1 = max(1,*n); - work[1].r = (doublereal) i__1, work[1].i = 0.; - return 0; - -/* End of ZHSEQR */ - -} /* zhseqr_ */ - -/* Subroutine */ int zlabrd_(integer *m, integer *n, integer *nb, - doublecomplex *a, integer *lda, doublereal *d__, doublereal *e, - doublecomplex *tauq, doublecomplex *taup, doublecomplex *x, integer * - ldx, doublecomplex *y, integer *ldy) -{ - /* System generated locals */ - integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2, - i__3; - doublecomplex z__1; - - /* Local variables */ - static integer i__; - static doublecomplex alpha; - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *), zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *), - zlarfg_(integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *), zlacgv_(integer *, doublecomplex *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLABRD reduces the first NB rows and columns of a complex general - m by n matrix A to upper or lower real bidiagonal form by a unitary - transformation Q' * A * P, and returns the matrices X and Y which - are needed to apply the transformation to the unreduced part of A. - - If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower - bidiagonal form. - - This is an auxiliary routine called by ZGEBRD - - Arguments - ========= - - M (input) INTEGER - The number of rows in the matrix A. - - N (input) INTEGER - The number of columns in the matrix A. - - NB (input) INTEGER - The number of leading rows and columns of A to be reduced. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the m by n general matrix to be reduced. - On exit, the first NB rows and columns of the matrix are - overwritten; the rest of the array is unchanged. - If m >= n, elements on and below the diagonal in the first NB - columns, with the array TAUQ, represent the unitary - matrix Q as a product of elementary reflectors; and - elements above the diagonal in the first NB rows, with the - array TAUP, represent the unitary matrix P as a product - of elementary reflectors. - If m < n, elements below the diagonal in the first NB - columns, with the array TAUQ, represent the unitary - matrix Q as a product of elementary reflectors, and - elements on and above the diagonal in the first NB rows, - with the array TAUP, represent the unitary matrix P as - a product of elementary reflectors. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - D (output) DOUBLE PRECISION array, dimension (NB) - The diagonal elements of the first NB rows and columns of - the reduced matrix. D(i) = A(i,i). - - E (output) DOUBLE PRECISION array, dimension (NB) - The off-diagonal elements of the first NB rows and columns of - the reduced matrix. - - TAUQ (output) COMPLEX*16 array dimension (NB) - The scalar factors of the elementary reflectors which - represent the unitary matrix Q. See Further Details. - - TAUP (output) COMPLEX*16 array, dimension (NB) - The scalar factors of the elementary reflectors which - represent the unitary matrix P. See Further Details. - - X (output) COMPLEX*16 array, dimension (LDX,NB) - The m-by-nb matrix X required to update the unreduced part - of A. - - LDX (input) INTEGER - The leading dimension of the array X. LDX >= max(1,M). - - Y (output) COMPLEX*16 array, dimension (LDY,NB) - The n-by-nb matrix Y required to update the unreduced part - of A. - - LDY (output) INTEGER - The leading dimension of the array Y. LDY >= max(1,N). - - Further Details - =============== - - The matrices Q and P are represented as products of elementary - reflectors: - - Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb) - - Each H(i) and G(i) has the form: - - H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' - - where tauq and taup are complex scalars, and v and u are complex - vectors. - - If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in - A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in - A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). - - If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in - A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in - A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). - - The elements of the vectors v and u together form the m-by-nb matrix - V and the nb-by-n matrix U' which are needed, with X and Y, to apply - the transformation to the unreduced part of the matrix, using a block - update of the form: A := A - V*Y' - X*U'. - - The contents of A on exit are illustrated by the following examples - with nb = 2: - - m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): - - ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 ) - ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 ) - ( v1 v2 a a a ) ( v1 1 a a a a ) - ( v1 v2 a a a ) ( v1 v2 a a a a ) - ( v1 v2 a a a ) ( v1 v2 a a a a ) - ( v1 v2 a a a ) - - where a denotes an element of the original matrix which is unchanged, - vi denotes an element of the vector defining H(i), and ui an element - of the vector defining G(i). - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --d__; - --e; - --tauq; - --taup; - x_dim1 = *ldx; - x_offset = 1 + x_dim1 * 1; - x -= x_offset; - y_dim1 = *ldy; - y_offset = 1 + y_dim1 * 1; - y -= y_offset; - - /* Function Body */ - if (*m <= 0 || *n <= 0) { - return 0; - } - - if (*m >= *n) { - -/* Reduce to upper bidiagonal form */ - - i__1 = *nb; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Update A(i:m,i) */ - - i__2 = i__ - 1; - zlacgv_(&i__2, &y[i__ + y_dim1], ldy); - i__2 = *m - i__ + 1; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda, - &y[i__ + y_dim1], ldy, &c_b60, &a[i__ + i__ * a_dim1], & - c__1); - i__2 = i__ - 1; - zlacgv_(&i__2, &y[i__ + y_dim1], ldy); - i__2 = *m - i__ + 1; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + x_dim1], ldx, - &a[i__ * a_dim1 + 1], &c__1, &c_b60, &a[i__ + i__ * - a_dim1], &c__1); - -/* Generate reflection Q(i) to annihilate A(i+1:m,i) */ - - i__2 = i__ + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *m - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - zlarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1, & - tauq[i__]); - i__2 = i__; - d__[i__2] = alpha.r; - if (i__ < *n) { - i__2 = i__ + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Compute Y(i+1:n,i) */ - - i__2 = *m - i__ + 1; - i__3 = *n - i__; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ + ( - i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], & - c__1, &c_b59, &y[i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *m - i__ + 1; - i__3 = i__ - 1; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ + - a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b59, & - y[i__ * y_dim1 + 1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + 1 + - y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b60, &y[ - i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *m - i__ + 1; - i__3 = i__ - 1; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &x[i__ + - x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b59, & - y[i__ * y_dim1 + 1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[(i__ + - 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, & - c_b60, &y[i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *n - i__; - zscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1); - -/* Update A(i,i+1:n) */ - - i__2 = *n - i__; - zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); - zlacgv_(&i__, &a[i__ + a_dim1], lda); - i__2 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__, &z__1, &y[i__ + 1 + - y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b60, &a[i__ + - (i__ + 1) * a_dim1], lda); - zlacgv_(&i__, &a[i__ + a_dim1], lda); - i__2 = i__ - 1; - zlacgv_(&i__2, &x[i__ + x_dim1], ldx); - i__2 = i__ - 1; - i__3 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[(i__ + - 1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b60, - &a[i__ + (i__ + 1) * a_dim1], lda); - i__2 = i__ - 1; - zlacgv_(&i__2, &x[i__ + x_dim1], ldx); - -/* Generate reflection P(i) to annihilate A(i,i+2:n) */ - - i__2 = i__ + (i__ + 1) * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *n - i__; -/* Computing MIN */ - i__3 = i__ + 2; - zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, & - taup[i__]); - i__2 = i__; - e[i__2] = alpha.r; - i__2 = i__ + (i__ + 1) * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Compute X(i+1:m,i) */ - - i__2 = *m - i__; - i__3 = *n - i__; - zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[i__ + 1 + ( - i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1], - lda, &c_b59, &x[i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *n - i__; - zgemv_("Conjugate transpose", &i__2, &i__, &c_b60, &y[i__ + 1 - + y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, & - c_b59, &x[i__ * x_dim1 + 1], &c__1); - i__2 = *m - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__, &z__1, &a[i__ + 1 + - a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b60, &x[ - i__ + 1 + i__ * x_dim1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[(i__ + 1) * - a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, & - c_b59, &x[i__ * x_dim1 + 1], &c__1); - i__2 = *m - i__; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + 1 + - x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b60, &x[ - i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *m - i__; - zscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *n - i__; - zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); - } -/* L10: */ - } - } else { - -/* Reduce to lower bidiagonal form */ - - i__1 = *nb; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Update A(i,i:n) */ - - i__2 = *n - i__ + 1; - zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda); - i__2 = i__ - 1; - zlacgv_(&i__2, &a[i__ + a_dim1], lda); - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + y_dim1], ldy, - &a[i__ + a_dim1], lda, &c_b60, &a[i__ + i__ * a_dim1], - lda); - i__2 = i__ - 1; - zlacgv_(&i__2, &a[i__ + a_dim1], lda); - i__2 = i__ - 1; - zlacgv_(&i__2, &x[i__ + x_dim1], ldx); - i__2 = i__ - 1; - i__3 = *n - i__ + 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[i__ * - a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b60, &a[i__ + - i__ * a_dim1], lda); - i__2 = i__ - 1; - zlacgv_(&i__2, &x[i__ + x_dim1], ldx); - -/* Generate reflection P(i) to annihilate A(i,i+1:n) */ - - i__2 = i__ + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *n - i__ + 1; -/* Computing MIN */ - i__3 = i__ + 1; - zlarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, & - taup[i__]); - i__2 = i__; - d__[i__2] = alpha.r; - if (i__ < *m) { - i__2 = i__ + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Compute X(i+1:m,i) */ - - i__2 = *m - i__; - i__3 = *n - i__ + 1; - zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[i__ + 1 + i__ - * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b59, & - x[i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &y[i__ + - y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b59, &x[ - i__ * x_dim1 + 1], &c__1); - i__2 = *m - i__; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 + - a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b60, &x[ - i__ + 1 + i__ * x_dim1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__ + 1; - zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[i__ * a_dim1 - + 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b59, &x[ - i__ * x_dim1 + 1], &c__1); - i__2 = *m - i__; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + 1 + - x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b60, &x[ - i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *m - i__; - zscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1); - i__2 = *n - i__ + 1; - zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda); - -/* Update A(i+1:m,i) */ - - i__2 = i__ - 1; - zlacgv_(&i__2, &y[i__ + y_dim1], ldy); - i__2 = *m - i__; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 + - a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b60, &a[i__ + - 1 + i__ * a_dim1], &c__1); - i__2 = i__ - 1; - zlacgv_(&i__2, &y[i__ + y_dim1], ldy); - i__2 = *m - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__, &z__1, &x[i__ + 1 + - x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b60, &a[ - i__ + 1 + i__ * a_dim1], &c__1); - -/* Generate reflection Q(i) to annihilate A(i+2:m,i) */ - - i__2 = i__ + 1 + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *m - i__; -/* Computing MIN */ - i__3 = i__ + 2; - zlarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1, - &tauq[i__]); - i__2 = i__; - e[i__2] = alpha.r; - i__2 = i__ + 1 + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Compute Y(i+1:n,i) */ - - i__2 = *m - i__; - i__3 = *n - i__; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ + - 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * - a_dim1], &c__1, &c_b59, &y[i__ + 1 + i__ * y_dim1], & - c__1); - i__2 = *m - i__; - i__3 = i__ - 1; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ + - 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & - c_b59, &y[i__ * y_dim1 + 1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + 1 + - y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b60, &y[ - i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *m - i__; - zgemv_("Conjugate transpose", &i__2, &i__, &c_b60, &x[i__ + 1 - + x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, & - c_b59, &y[i__ * y_dim1 + 1], &c__1); - i__2 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("Conjugate transpose", &i__, &i__2, &z__1, &a[(i__ + 1) - * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, & - c_b60, &y[i__ + 1 + i__ * y_dim1], &c__1); - i__2 = *n - i__; - zscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1); - } else { - i__2 = *n - i__ + 1; - zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda); - } -/* L20: */ - } - } - return 0; - -/* End of ZLABRD */ - -} /* zlabrd_ */ - -/* Subroutine */ int zlacgv_(integer *n, doublecomplex *x, integer *incx) -{ - /* System generated locals */ - integer i__1, i__2; - doublecomplex z__1; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, ioff; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - ZLACGV conjugates a complex vector of length N. - - Arguments - ========= - - N (input) INTEGER - The length of the vector X. N >= 0. - - X (input/output) COMPLEX*16 array, dimension - (1+(N-1)*abs(INCX)) - On entry, the vector of length N to be conjugated. - On exit, X is overwritten with conjg(X). - - INCX (input) INTEGER - The spacing between successive elements of X. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --x; - - /* Function Body */ - if (*incx == 1) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - d_cnjg(&z__1, &x[i__]); - x[i__2].r = z__1.r, x[i__2].i = z__1.i; -/* L10: */ - } - } else { - ioff = 1; - if (*incx < 0) { - ioff = 1 - (*n - 1) * *incx; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = ioff; - d_cnjg(&z__1, &x[ioff]); - x[i__2].r = z__1.r, x[i__2].i = z__1.i; - ioff += *incx; -/* L20: */ - } - } - return 0; - -/* End of ZLACGV */ - -} /* zlacgv_ */ - -/* Subroutine */ int zlacp2_(char *uplo, integer *m, integer *n, doublereal * - a, integer *lda, doublecomplex *b, integer *ldb) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, j; - extern logical lsame_(char *, char *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZLACP2 copies all or part of a real two-dimensional matrix A to a - complex matrix B. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies the part of the matrix A to be copied to B. - = 'U': Upper triangular part - = 'L': Lower triangular part - Otherwise: All of the matrix A - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA,N) - The m by n matrix A. If UPLO = 'U', only the upper trapezium - is accessed; if UPLO = 'L', only the lower trapezium is - accessed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - B (output) COMPLEX*16 array, dimension (LDB,N) - On exit, B = A in the locations specified by UPLO. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,M). - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = min(j,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * a_dim1; - b[i__3].r = a[i__4], b[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - - } else if (lsame_(uplo, "L")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = j; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * a_dim1; - b[i__3].r = a[i__4], b[i__3].i = 0.; -/* L30: */ - } -/* L40: */ - } - - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * a_dim1; - b[i__3].r = a[i__4], b[i__3].i = 0.; -/* L50: */ - } -/* L60: */ - } - } - - return 0; - -/* End of ZLACP2 */ - -} /* zlacp2_ */ - -/* Subroutine */ int zlacpy_(char *uplo, integer *m, integer *n, - doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, j; - extern logical lsame_(char *, char *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - ZLACPY copies all or part of a two-dimensional matrix A to another - matrix B. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies the part of the matrix A to be copied to B. - = 'U': Upper triangular part - = 'L': Lower triangular part - Otherwise: All of the matrix A - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input) COMPLEX*16 array, dimension (LDA,N) - The m by n matrix A. If UPLO = 'U', only the upper trapezium - is accessed; if UPLO = 'L', only the lower trapezium is - accessed. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - B (output) COMPLEX*16 array, dimension (LDB,N) - On exit, B = A in the locations specified by UPLO. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >= max(1,M). - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - - /* Function Body */ - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = min(j,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * a_dim1; - b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i; -/* L10: */ - } -/* L20: */ - } - - } else if (lsame_(uplo, "L")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = j; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * a_dim1; - b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i; -/* L30: */ - } -/* L40: */ - } - - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - i__4 = i__ + j * a_dim1; - b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i; -/* L50: */ - } -/* L60: */ - } - } - - return 0; - -/* End of ZLACPY */ - -} /* zlacpy_ */ - -/* Subroutine */ int zlacrm_(integer *m, integer *n, doublecomplex *a, - integer *lda, doublereal *b, integer *ldb, doublecomplex *c__, - integer *ldc, doublereal *rwork) -{ - /* System generated locals */ - integer b_dim1, b_offset, a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, - i__3, i__4, i__5; - doublereal d__1; - doublecomplex z__1; - - /* Builtin functions */ - double d_imag(doublecomplex *); - - /* Local variables */ - static integer i__, j, l; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLACRM performs a very simple matrix-matrix multiplication: - C := A * B, - where A is M by N and complex; B is N by N and real; - C is M by N and complex. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A and of the matrix C. - M >= 0. - - N (input) INTEGER - The number of columns and rows of the matrix B and - the number of columns of the matrix C. - N >= 0. - - A (input) COMPLEX*16 array, dimension (LDA, N) - A contains the M by N matrix A. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >=max(1,M). - - B (input) DOUBLE PRECISION array, dimension (LDB, N) - B contains the N by N matrix B. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >=max(1,N). - - C (input) COMPLEX*16 array, dimension (LDC, N) - C contains the M by N matrix C. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >=max(1,N). - - RWORK (workspace) DOUBLE PRECISION array, dimension (2*M*N) - - ===================================================================== - - - Quick return if possible. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --rwork; - - /* Function Body */ - if (*m == 0 || *n == 0) { - return 0; - } - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - rwork[(j - 1) * *m + i__] = a[i__3].r; -/* L10: */ - } -/* L20: */ - } - - l = *m * *n + 1; - dgemm_("N", "N", m, n, n, &c_b1015, &rwork[1], m, &b[b_offset], ldb, & - c_b324, &rwork[l], m); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = l + (j - 1) * *m + i__ - 1; - c__[i__3].r = rwork[i__4], c__[i__3].i = 0.; -/* L30: */ - } -/* L40: */ - } - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - rwork[(j - 1) * *m + i__] = d_imag(&a[i__ + j * a_dim1]); -/* L50: */ - } -/* L60: */ - } - dgemm_("N", "N", m, n, n, &c_b1015, &rwork[1], m, &b[b_offset], ldb, & - c_b324, &rwork[l], m); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - d__1 = c__[i__4].r; - i__5 = l + (j - 1) * *m + i__ - 1; - z__1.r = d__1, z__1.i = rwork[i__5]; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L70: */ - } -/* L80: */ - } - - return 0; - -/* End of ZLACRM */ - -} /* zlacrm_ */ - -/* Double Complex */ VOID zladiv_(doublecomplex * ret_val, doublecomplex *x, - doublecomplex *y) -{ - /* System generated locals */ - doublereal d__1, d__2, d__3, d__4; - doublecomplex z__1; - - /* Builtin functions */ - double d_imag(doublecomplex *); - - /* Local variables */ - static doublereal zi, zr; - extern /* Subroutine */ int dladiv_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - ZLADIV := X / Y, where X and Y are complex. The computation of X / Y - will not overflow on an intermediary step unless the results - overflows. - - Arguments - ========= - - X (input) COMPLEX*16 - Y (input) COMPLEX*16 - The complex scalars X and Y. - - ===================================================================== -*/ - - - d__1 = x->r; - d__2 = d_imag(x); - d__3 = y->r; - d__4 = d_imag(y); - dladiv_(&d__1, &d__2, &d__3, &d__4, &zr, &zi); - z__1.r = zr, z__1.i = zi; - ret_val->r = z__1.r, ret_val->i = z__1.i; - - return ; - -/* End of ZLADIV */ - -} /* zladiv_ */ - -/* Subroutine */ int zlaed0_(integer *qsiz, integer *n, doublereal *d__, - doublereal *e, doublecomplex *q, integer *ldq, doublecomplex *qstore, - integer *ldqs, doublereal *rwork, integer *iwork, integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2; - doublereal d__1; - - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - - /* Local variables */ - static integer i__, j, k, ll, iq, lgn, msd2, smm1, spm1, spm2; - static doublereal temp; - static integer curr, iperm; - extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, - doublereal *, integer *); - static integer indxq, iwrem, iqptr, tlvls; - extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *), zlaed7_(integer *, integer *, - integer *, integer *, integer *, integer *, doublereal *, - doublecomplex *, integer *, doublereal *, integer *, doublereal *, - integer *, integer *, integer *, integer *, integer *, - doublereal *, doublecomplex *, doublereal *, integer *, integer *) - ; - static integer igivcl; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlacrm_(integer *, integer *, doublecomplex *, - integer *, doublereal *, integer *, doublecomplex *, integer *, - doublereal *); - static integer igivnm, submat, curprb, subpbs, igivpt; - extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, integer *); - static integer curlvl, matsiz, iprmpt, smlsiz; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - Using the divide and conquer method, ZLAED0 computes all eigenvalues - of a symmetric tridiagonal matrix which is one diagonal block of - those from reducing a dense or band Hermitian matrix and - corresponding eigenvectors of the dense or band matrix. - - Arguments - ========= - - QSIZ (input) INTEGER - The dimension of the unitary matrix used to reduce - the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the diagonal elements of the tridiagonal matrix. - On exit, the eigenvalues in ascending order. - - E (input/output) DOUBLE PRECISION array, dimension (N-1) - On entry, the off-diagonal elements of the tridiagonal matrix. - On exit, E has been destroyed. - - Q (input/output) COMPLEX*16 array, dimension (LDQ,N) - On entry, Q must contain an QSIZ x N matrix whose columns - unitarily orthonormal. It is a part of the unitary matrix - that reduces the full dense Hermitian matrix to a - (reducible) symmetric tridiagonal matrix. - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max(1,N). - - IWORK (workspace) INTEGER array, - the dimension of IWORK must be at least - 6 + 6*N + 5*N*lg N - ( lg( N ) = smallest integer k - such that 2^k >= N ) - - RWORK (workspace) DOUBLE PRECISION array, - dimension (1 + 3*N + 2*N*lg N + 3*N**2) - ( lg( N ) = smallest integer k - such that 2^k >= N ) - - QSTORE (workspace) COMPLEX*16 array, dimension (LDQS, N) - Used to store parts of - the eigenvector matrix when the updating matrix multiplies - take place. - - LDQS (input) INTEGER - The leading dimension of the array QSTORE. - LDQS >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: The algorithm failed to compute an eigenvalue while - working on the submatrix lying in rows and columns - INFO/(N+1) through mod(INFO,N+1). - - ===================================================================== - - Warning: N could be as big as QSIZ! - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - qstore_dim1 = *ldqs; - qstore_offset = 1 + qstore_dim1 * 1; - qstore -= qstore_offset; - --rwork; - --iwork; - - /* Function Body */ - *info = 0; - -/* - IF( ICOMPQ .LT. 0 .OR. ICOMPQ .GT. 2 ) THEN - INFO = -1 - ELSE IF( ( ICOMPQ .EQ. 1 ) .AND. ( QSIZ .LT. MAX( 0, N ) ) ) - $ THEN -*/ - if (*qsiz < max(0,*n)) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*ldq < max(1,*n)) { - *info = -6; - } else if (*ldqs < max(1,*n)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZLAED0", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - smlsiz = ilaenv_(&c__9, "ZLAED0", " ", &c__0, &c__0, &c__0, &c__0, ( - ftnlen)6, (ftnlen)1); - -/* - Determine the size and placement of the submatrices, and save in - the leading elements of IWORK. -*/ - - iwork[1] = *n; - subpbs = 1; - tlvls = 0; -L10: - if (iwork[subpbs] > smlsiz) { - for (j = subpbs; j >= 1; --j) { - iwork[j * 2] = (iwork[j] + 1) / 2; - iwork[((j) << (1)) - 1] = iwork[j] / 2; -/* L20: */ - } - ++tlvls; - subpbs <<= 1; - goto L10; - } - i__1 = subpbs; - for (j = 2; j <= i__1; ++j) { - iwork[j] += iwork[j - 1]; -/* L30: */ - } - -/* - Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1 - using rank-1 modifications (cuts). -*/ - - spm1 = subpbs - 1; - i__1 = spm1; - for (i__ = 1; i__ <= i__1; ++i__) { - submat = iwork[i__] + 1; - smm1 = submat - 1; - d__[smm1] -= (d__1 = e[smm1], abs(d__1)); - d__[submat] -= (d__1 = e[smm1], abs(d__1)); -/* L40: */ - } - - indxq = ((*n) << (2)) + 3; - -/* - Set up workspaces for eigenvalues only/accumulate new vectors - routine -*/ - - temp = log((doublereal) (*n)) / log(2.); - lgn = (integer) temp; - if (pow_ii(&c__2, &lgn) < *n) { - ++lgn; - } - if (pow_ii(&c__2, &lgn) < *n) { - ++lgn; - } - iprmpt = indxq + *n + 1; - iperm = iprmpt + *n * lgn; - iqptr = iperm + *n * lgn; - igivpt = iqptr + *n + 2; - igivcl = igivpt + *n * lgn; - - igivnm = 1; - iq = igivnm + ((*n) << (1)) * lgn; -/* Computing 2nd power */ - i__1 = *n; - iwrem = iq + i__1 * i__1 + 1; -/* Initialize pointers */ - i__1 = subpbs; - for (i__ = 0; i__ <= i__1; ++i__) { - iwork[iprmpt + i__] = 1; - iwork[igivpt + i__] = 1; -/* L50: */ - } - iwork[iqptr] = 1; - -/* - Solve each submatrix eigenproblem at the bottom of the divide and - conquer tree. -*/ - - curr = 0; - i__1 = spm1; - for (i__ = 0; i__ <= i__1; ++i__) { - if (i__ == 0) { - submat = 1; - matsiz = iwork[1]; - } else { - submat = iwork[i__] + 1; - matsiz = iwork[i__ + 1] - iwork[i__]; - } - ll = iq - 1 + iwork[iqptr + curr]; - dsteqr_("I", &matsiz, &d__[submat], &e[submat], &rwork[ll], &matsiz, & - rwork[1], info); - zlacrm_(qsiz, &matsiz, &q[submat * q_dim1 + 1], ldq, &rwork[ll], & - matsiz, &qstore[submat * qstore_dim1 + 1], ldqs, &rwork[iwrem] - ); -/* Computing 2nd power */ - i__2 = matsiz; - iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2; - ++curr; - if (*info > 0) { - *info = submat * (*n + 1) + submat + matsiz - 1; - return 0; - } - k = 1; - i__2 = iwork[i__ + 1]; - for (j = submat; j <= i__2; ++j) { - iwork[indxq + j] = k; - ++k; -/* L60: */ - } -/* L70: */ - } - -/* - Successively merge eigensystems of adjacent submatrices - into eigensystem for the corresponding larger matrix. - - while ( SUBPBS > 1 ) -*/ - - curlvl = 1; -L80: - if (subpbs > 1) { - spm2 = subpbs - 2; - i__1 = spm2; - for (i__ = 0; i__ <= i__1; i__ += 2) { - if (i__ == 0) { - submat = 1; - matsiz = iwork[2]; - msd2 = iwork[1]; - curprb = 0; - } else { - submat = iwork[i__] + 1; - matsiz = iwork[i__ + 2] - iwork[i__]; - msd2 = matsiz / 2; - ++curprb; - } - -/* - Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2) - into an eigensystem of size MATSIZ. ZLAED7 handles the case - when the eigenvectors of a full or band Hermitian matrix (which - was reduced to tridiagonal form) are desired. - - I am free to use Q as a valuable working space until Loop 150. -*/ - - zlaed7_(&matsiz, &msd2, qsiz, &tlvls, &curlvl, &curprb, &d__[ - submat], &qstore[submat * qstore_dim1 + 1], ldqs, &e[ - submat + msd2 - 1], &iwork[indxq + submat], &rwork[iq], & - iwork[iqptr], &iwork[iprmpt], &iwork[iperm], &iwork[ - igivpt], &iwork[igivcl], &rwork[igivnm], &q[submat * - q_dim1 + 1], &rwork[iwrem], &iwork[subpbs + 1], info); - if (*info > 0) { - *info = submat * (*n + 1) + submat + matsiz - 1; - return 0; - } - iwork[i__ / 2 + 1] = iwork[i__ + 2]; -/* L90: */ - } - subpbs /= 2; - ++curlvl; - goto L80; - } - -/* - end while - - Re-merge the eigenvalues/vectors which were deflated at the final - merge step. -*/ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - j = iwork[indxq + i__]; - rwork[i__] = d__[j]; - zcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1 + 1] - , &c__1); -/* L100: */ - } - dcopy_(n, &rwork[1], &c__1, &d__[1], &c__1); - - return 0; - -/* End of ZLAED0 */ - -} /* zlaed0_ */ - -/* Subroutine */ int zlaed7_(integer *n, integer *cutpnt, integer *qsiz, - integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__, - doublecomplex *q, integer *ldq, doublereal *rho, integer *indxq, - doublereal *qstore, integer *qptr, integer *prmptr, integer *perm, - integer *givptr, integer *givcol, doublereal *givnum, doublecomplex * - work, doublereal *rwork, integer *iwork, integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, i__1, i__2; - - /* Builtin functions */ - integer pow_ii(integer *, integer *); - - /* Local variables */ - static integer i__, k, n1, n2, iq, iw, iz, ptr, ind1, ind2, indx, curr, - indxc, indxp; - extern /* Subroutine */ int dlaed9_(integer *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - doublereal *, doublereal *, doublereal *, integer *, integer *), - zlaed8_(integer *, integer *, integer *, doublecomplex *, integer - *, doublereal *, doublereal *, integer *, doublereal *, - doublereal *, doublecomplex *, integer *, doublereal *, integer *, - integer *, integer *, integer *, integer *, integer *, - doublereal *, integer *), dlaeda_(integer *, integer *, integer *, - integer *, integer *, integer *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, doublereal *, - integer *); - static integer idlmda; - extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, - integer *, integer *, integer *), xerbla_(char *, integer *), zlacrm_(integer *, integer *, doublecomplex *, integer *, - doublereal *, integer *, doublecomplex *, integer *, doublereal * - ); - static integer coltyp; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLAED7 computes the updated eigensystem of a diagonal - matrix after modification by a rank-one symmetric matrix. This - routine is used only for the eigenproblem which requires all - eigenvalues and optionally eigenvectors of a dense or banded - Hermitian matrix that has been reduced to tridiagonal form. - - T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) - - where Z = Q'u, u is a vector of length N with ones in the - CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. - - The eigenvectors of the original matrix are stored in Q, and the - eigenvalues are in D. The algorithm consists of three stages: - - The first stage consists of deflating the size of the problem - when there are multiple eigenvalues or if there is a zero in - the Z vector. For each such occurence the dimension of the - secular equation problem is reduced by one. This stage is - performed by the routine DLAED2. - - The second stage consists of calculating the updated - eigenvalues. This is done by finding the roots of the secular - equation via the routine DLAED4 (as called by SLAED3). - This routine also calculates the eigenvectors of the current - problem. - - The final stage consists of computing the updated eigenvectors - directly using the updated eigenvalues. The eigenvectors for - the current problem are multiplied with the eigenvectors from - the overall problem. - - Arguments - ========= - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - CUTPNT (input) INTEGER - Contains the location of the last eigenvalue in the leading - sub-matrix. min(1,N) <= CUTPNT <= N. - - QSIZ (input) INTEGER - The dimension of the unitary matrix used to reduce - the full matrix to tridiagonal form. QSIZ >= N. - - TLVLS (input) INTEGER - The total number of merging levels in the overall divide and - conquer tree. - - CURLVL (input) INTEGER - The current level in the overall merge routine, - 0 <= curlvl <= tlvls. - - CURPBM (input) INTEGER - The current problem in the current level in the overall - merge routine (counting from upper left to lower right). - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the eigenvalues of the rank-1-perturbed matrix. - On exit, the eigenvalues of the repaired matrix. - - Q (input/output) COMPLEX*16 array, dimension (LDQ,N) - On entry, the eigenvectors of the rank-1-perturbed matrix. - On exit, the eigenvectors of the repaired tridiagonal matrix. - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max(1,N). - - RHO (input) DOUBLE PRECISION - Contains the subdiagonal element used to create the rank-1 - modification. - - INDXQ (output) INTEGER array, dimension (N) - This contains the permutation which will reintegrate the - subproblem just solved back into sorted order, - ie. D( INDXQ( I = 1, N ) ) will be in ascending order. - - IWORK (workspace) INTEGER array, dimension (4*N) - - RWORK (workspace) DOUBLE PRECISION array, - dimension (3*N+2*QSIZ*N) - - WORK (workspace) COMPLEX*16 array, dimension (QSIZ*N) - - QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1) - Stores eigenvectors of submatrices encountered during - divide and conquer, packed together. QPTR points to - beginning of the submatrices. - - QPTR (input/output) INTEGER array, dimension (N+2) - List of indices pointing to beginning of submatrices stored - in QSTORE. The submatrices are numbered starting at the - bottom left of the divide and conquer tree, from left to - right and bottom to top. - - PRMPTR (input) INTEGER array, dimension (N lg N) - Contains a list of pointers which indicate where in PERM a - level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) - indicates the size of the permutation and also the size of - the full, non-deflated problem. - - PERM (input) INTEGER array, dimension (N lg N) - Contains the permutations (from deflation and sorting) to be - applied to each eigenblock. - - GIVPTR (input) INTEGER array, dimension (N lg N) - Contains a list of pointers which indicate where in GIVCOL a - level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) - indicates the number of Givens rotations. - - GIVCOL (input) INTEGER array, dimension (2, N lg N) - Each pair of numbers indicates a pair of columns to take place - in a Givens rotation. - - GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) - Each number indicates the S value to be used in the - corresponding Givens rotation. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: if INFO = 1, an eigenvalue did not converge - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --indxq; - --qstore; - --qptr; - --prmptr; - --perm; - --givptr; - givcol -= 3; - givnum -= 3; - --work; - --rwork; - --iwork; - - /* Function Body */ - *info = 0; - -/* - IF( ICOMPQ.LT.0 .OR. ICOMPQ.GT.1 ) THEN - INFO = -1 - ELSE IF( N.LT.0 ) THEN -*/ - if (*n < 0) { - *info = -1; - } else if (min(1,*n) > *cutpnt || *n < *cutpnt) { - *info = -2; - } else if (*qsiz < *n) { - *info = -3; - } else if (*ldq < max(1,*n)) { - *info = -9; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZLAED7", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* - The following values are for bookkeeping purposes only. They are - integer pointers which indicate the portion of the workspace - used by a particular array in DLAED2 and SLAED3. -*/ - - iz = 1; - idlmda = iz + *n; - iw = idlmda + *n; - iq = iw + *n; - - indx = 1; - indxc = indx + *n; - coltyp = indxc + *n; - indxp = coltyp + *n; - -/* - Form the z-vector which consists of the last row of Q_1 and the - first row of Q_2. -*/ - - ptr = pow_ii(&c__2, tlvls) + 1; - i__1 = *curlvl - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = *tlvls - i__; - ptr += pow_ii(&c__2, &i__2); -/* L10: */ - } - curr = ptr + *curpbm; - dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], & - givcol[3], &givnum[3], &qstore[1], &qptr[1], &rwork[iz], &rwork[ - iz + *n], info); - -/* - When solving the final problem, we no longer need the stored data, - so we will overwrite the data from this level onto the previously - used storage space. -*/ - - if (*curlvl == *tlvls) { - qptr[curr] = 1; - prmptr[curr] = 1; - givptr[curr] = 1; - } - -/* Sort and Deflate eigenvalues. */ - - zlaed8_(&k, n, qsiz, &q[q_offset], ldq, &d__[1], rho, cutpnt, &rwork[iz], - &rwork[idlmda], &work[1], qsiz, &rwork[iw], &iwork[indxp], &iwork[ - indx], &indxq[1], &perm[prmptr[curr]], &givptr[curr + 1], &givcol[ - ((givptr[curr]) << (1)) + 1], &givnum[((givptr[curr]) << (1)) + 1] - , info); - prmptr[curr + 1] = prmptr[curr] + *n; - givptr[curr + 1] += givptr[curr]; - -/* Solve Secular Equation. */ - - if (k != 0) { - dlaed9_(&k, &c__1, &k, n, &d__[1], &rwork[iq], &k, rho, &rwork[idlmda] - , &rwork[iw], &qstore[qptr[curr]], &k, info); - zlacrm_(qsiz, &k, &work[1], qsiz, &qstore[qptr[curr]], &k, &q[ - q_offset], ldq, &rwork[iq]); -/* Computing 2nd power */ - i__1 = k; - qptr[curr + 1] = qptr[curr] + i__1 * i__1; - if (*info != 0) { - return 0; - } - -/* Prepare the INDXQ sorting premutation. */ - - n1 = k; - n2 = *n - k; - ind1 = 1; - ind2 = *n; - dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]); - } else { - qptr[curr + 1] = qptr[curr]; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - indxq[i__] = i__; -/* L20: */ - } - } - - return 0; - -/* End of ZLAED7 */ - -} /* zlaed7_ */ - -/* Subroutine */ int zlaed8_(integer *k, integer *n, integer *qsiz, - doublecomplex *q, integer *ldq, doublereal *d__, doublereal *rho, - integer *cutpnt, doublereal *z__, doublereal *dlamda, doublecomplex * - q2, integer *ldq2, doublereal *w, integer *indxp, integer *indx, - integer *indxq, integer *perm, integer *givptr, integer *givcol, - doublereal *givnum, integer *info) -{ - /* System generated locals */ - integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; - doublereal d__1; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static doublereal c__; - static integer i__, j; - static doublereal s, t; - static integer k2, n1, n2, jp, n1p1; - static doublereal eps, tau, tol; - static integer jlam, imax, jmax; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *), dcopy_(integer *, doublereal *, integer *, doublereal - *, integer *), zdrot_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublereal *, doublereal *), zcopy_( - integer *, doublecomplex *, integer *, doublecomplex *, integer *) - ; - - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, - integer *, integer *, integer *), xerbla_(char *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *, - integer *, doublecomplex *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab, - Courant Institute, NAG Ltd., and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLAED8 merges the two sets of eigenvalues together into a single - sorted set. Then it tries to deflate the size of the problem. - There are two ways in which deflation can occur: when two or more - eigenvalues are close together or if there is a tiny element in the - Z vector. For each such occurrence the order of the related secular - equation problem is reduced by one. - - Arguments - ========= - - K (output) INTEGER - Contains the number of non-deflated eigenvalues. - This is the order of the related secular equation. - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - QSIZ (input) INTEGER - The dimension of the unitary matrix used to reduce - the dense or band matrix to tridiagonal form. - QSIZ >= N if ICOMPQ = 1. - - Q (input/output) COMPLEX*16 array, dimension (LDQ,N) - On entry, Q contains the eigenvectors of the partially solved - system which has been previously updated in matrix - multiplies with other partially solved eigensystems. - On exit, Q contains the trailing (N-K) updated eigenvectors - (those which were deflated) in its last N-K columns. - - LDQ (input) INTEGER - The leading dimension of the array Q. LDQ >= max( 1, N ). - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, D contains the eigenvalues of the two submatrices to - be combined. On exit, D contains the trailing (N-K) updated - eigenvalues (those which were deflated) sorted into increasing - order. - - RHO (input/output) DOUBLE PRECISION - Contains the off diagonal element associated with the rank-1 - cut which originally split the two submatrices which are now - being recombined. RHO is modified during the computation to - the value required by DLAED3. - - CUTPNT (input) INTEGER - Contains the location of the last eigenvalue in the leading - sub-matrix. MIN(1,N) <= CUTPNT <= N. - - Z (input) DOUBLE PRECISION array, dimension (N) - On input this vector contains the updating vector (the last - row of the first sub-eigenvector matrix and the first row of - the second sub-eigenvector matrix). The contents of Z are - destroyed during the updating process. - - DLAMDA (output) DOUBLE PRECISION array, dimension (N) - Contains a copy of the first K eigenvalues which will be used - by DLAED3 to form the secular equation. - - Q2 (output) COMPLEX*16 array, dimension (LDQ2,N) - If ICOMPQ = 0, Q2 is not referenced. Otherwise, - Contains a copy of the first K eigenvectors which will be used - by DLAED7 in a matrix multiply (DGEMM) to update the new - eigenvectors. - - LDQ2 (input) INTEGER - The leading dimension of the array Q2. LDQ2 >= max( 1, N ). - - W (output) DOUBLE PRECISION array, dimension (N) - This will hold the first k values of the final - deflation-altered z-vector and will be passed to DLAED3. - - INDXP (workspace) INTEGER array, dimension (N) - This will contain the permutation used to place deflated - values of D at the end of the array. On output INDXP(1:K) - points to the nondeflated D-values and INDXP(K+1:N) - points to the deflated eigenvalues. - - INDX (workspace) INTEGER array, dimension (N) - This will contain the permutation used to sort the contents of - D into ascending order. - - INDXQ (input) INTEGER array, dimension (N) - This contains the permutation which separately sorts the two - sub-problems in D into ascending order. Note that elements in - the second half of this permutation must first have CUTPNT - added to their values in order to be accurate. - - PERM (output) INTEGER array, dimension (N) - Contains the permutations (from deflation and sorting) to be - applied to each eigenblock. - - GIVPTR (output) INTEGER - Contains the number of Givens rotations which took place in - this subproblem. - - GIVCOL (output) INTEGER array, dimension (2, N) - Each pair of numbers indicates a pair of columns to take place - in a Givens rotation. - - GIVNUM (output) DOUBLE PRECISION array, dimension (2, N) - Each number indicates the S value to be used in the - corresponding Givens rotation. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - q_dim1 = *ldq; - q_offset = 1 + q_dim1 * 1; - q -= q_offset; - --d__; - --z__; - --dlamda; - q2_dim1 = *ldq2; - q2_offset = 1 + q2_dim1 * 1; - q2 -= q2_offset; - --w; - --indxp; - --indx; - --indxq; - --perm; - givcol -= 3; - givnum -= 3; - - /* Function Body */ - *info = 0; - - if (*n < 0) { - *info = -2; - } else if (*qsiz < *n) { - *info = -3; - } else if (*ldq < max(1,*n)) { - *info = -5; - } else if (*cutpnt < min(1,*n) || *cutpnt > *n) { - *info = -8; - } else if (*ldq2 < max(1,*n)) { - *info = -12; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZLAED8", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - n1 = *cutpnt; - n2 = *n - n1; - n1p1 = n1 + 1; - - if (*rho < 0.) { - dscal_(&n2, &c_b1294, &z__[n1p1], &c__1); - } - -/* Normalize z so that norm(z) = 1 */ - - t = 1. / sqrt(2.); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - indx[j] = j; -/* L10: */ - } - dscal_(n, &t, &z__[1], &c__1); - *rho = (d__1 = *rho * 2., abs(d__1)); - -/* Sort the eigenvalues into increasing order */ - - i__1 = *n; - for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) { - indxq[i__] += *cutpnt; -/* L20: */ - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - dlamda[i__] = d__[indxq[i__]]; - w[i__] = z__[indxq[i__]]; -/* L30: */ - } - i__ = 1; - j = *cutpnt + 1; - dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]); - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - d__[i__] = dlamda[indx[i__]]; - z__[i__] = w[indx[i__]]; -/* L40: */ - } - -/* Calculate the allowable deflation tolerance */ - - imax = idamax_(n, &z__[1], &c__1); - jmax = idamax_(n, &d__[1], &c__1); - eps = EPSILON; - tol = eps * 8. * (d__1 = d__[jmax], abs(d__1)); - -/* - If the rank-1 modifier is small enough, no more needs to be done - -- except to reorganize Q so that its columns correspond with the - elements in D. -*/ - - if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) { - *k = 0; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - perm[j] = indxq[indx[j]]; - zcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1] - , &c__1); -/* L50: */ - } - zlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq); - return 0; - } - -/* - If there are multiple eigenvalues then the problem deflates. Here - the number of equal eigenvalues are found. As each equal - eigenvalue is found, an elementary reflector is computed to rotate - the corresponding eigensubspace so that the corresponding - components of Z are zero in this new basis. -*/ - - *k = 0; - *givptr = 0; - k2 = *n + 1; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - indxp[k2] = j; - if (j == *n) { - goto L100; - } - } else { - jlam = j; - goto L70; - } -/* L60: */ - } -L70: - ++j; - if (j > *n) { - goto L90; - } - if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) { - -/* Deflate due to small z component. */ - - --k2; - indxp[k2] = j; - } else { - -/* Check if eigenvalues are close enough to allow deflation. */ - - s = z__[jlam]; - c__ = z__[j]; - -/* - Find sqrt(a**2+b**2) without overflow or - destructive underflow. -*/ - - tau = dlapy2_(&c__, &s); - t = d__[j] - d__[jlam]; - c__ /= tau; - s = -s / tau; - if ((d__1 = t * c__ * s, abs(d__1)) <= tol) { - -/* Deflation is possible. */ - - z__[j] = tau; - z__[jlam] = 0.; - -/* Record the appropriate Givens rotation */ - - ++(*givptr); - givcol[((*givptr) << (1)) + 1] = indxq[indx[jlam]]; - givcol[((*givptr) << (1)) + 2] = indxq[indx[j]]; - givnum[((*givptr) << (1)) + 1] = c__; - givnum[((*givptr) << (1)) + 2] = s; - zdrot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[indxq[ - indx[j]] * q_dim1 + 1], &c__1, &c__, &s); - t = d__[jlam] * c__ * c__ + d__[j] * s * s; - d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__; - d__[jlam] = t; - --k2; - i__ = 1; -L80: - if (k2 + i__ <= *n) { - if (d__[jlam] < d__[indxp[k2 + i__]]) { - indxp[k2 + i__ - 1] = indxp[k2 + i__]; - indxp[k2 + i__] = jlam; - ++i__; - goto L80; - } else { - indxp[k2 + i__ - 1] = jlam; - } - } else { - indxp[k2 + i__ - 1] = jlam; - } - jlam = j; - } else { - ++(*k); - w[*k] = z__[jlam]; - dlamda[*k] = d__[jlam]; - indxp[*k] = jlam; - jlam = j; - } - } - goto L70; -L90: - -/* Record the last eigenvalue. */ - - ++(*k); - w[*k] = z__[jlam]; - dlamda[*k] = d__[jlam]; - indxp[*k] = jlam; - -L100: - -/* - Sort the eigenvalues and corresponding eigenvectors into DLAMDA - and Q2 respectively. The eigenvalues/vectors which were not - deflated go into the first K slots of DLAMDA and Q2 respectively, - while those which were deflated go into the last N - K slots. -*/ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - jp = indxp[j]; - dlamda[j] = d__[jp]; - perm[j] = indxq[indx[jp]]; - zcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], & - c__1); -/* L110: */ - } - -/* - The deflated eigenvalues and their corresponding vectors go back - into the last N - K slots of D and Q respectively. -*/ - - if (*k < *n) { - i__1 = *n - *k; - dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); - i__1 = *n - *k; - zlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*k + - 1) * q_dim1 + 1], ldq); - } - - return 0; - -/* End of ZLAED8 */ - -} /* zlaed8_ */ - -/* Subroutine */ int zlahqr_(logical *wantt, logical *wantz, integer *n, - integer *ilo, integer *ihi, doublecomplex *h__, integer *ldh, - doublecomplex *w, integer *iloz, integer *ihiz, doublecomplex *z__, - integer *ldz, integer *info) -{ - /* System generated locals */ - integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5; - doublereal d__1, d__2, d__3, d__4, d__5, d__6; - doublecomplex z__1, z__2, z__3, z__4; - - /* Builtin functions */ - double d_imag(doublecomplex *); - void z_sqrt(doublecomplex *, doublecomplex *), d_cnjg(doublecomplex *, - doublecomplex *); - double z_abs(doublecomplex *); - - /* Local variables */ - static integer i__, j, k, l, m; - static doublereal s; - static doublecomplex t, u, v[2], x, y; - static integer i1, i2; - static doublecomplex t1; - static doublereal t2; - static doublecomplex v2; - static doublereal h10; - static doublecomplex h11; - static doublereal h21; - static doublecomplex h22; - static integer nh, nz; - static doublecomplex h11s; - static integer itn, its; - static doublereal ulp; - static doublecomplex sum; - static doublereal tst1; - static doublecomplex temp; - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *); - static doublereal rtemp, rwork[1]; - extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - - extern /* Subroutine */ int zlarfg_(integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *); - extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *, - doublecomplex *); - extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *, - doublereal *); - static doublereal smlnum; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZLAHQR is an auxiliary routine called by ZHSEQR to update the - eigenvalues and Schur decomposition already computed by ZHSEQR, by - dealing with the Hessenberg submatrix in rows and columns ILO to IHI. - - Arguments - ========= - - WANTT (input) LOGICAL - = .TRUE. : the full Schur form T is required; - = .FALSE.: only eigenvalues are required. - - WANTZ (input) LOGICAL - = .TRUE. : the matrix of Schur vectors Z is required; - = .FALSE.: Schur vectors are not required. - - N (input) INTEGER - The order of the matrix H. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - It is assumed that H is already upper triangular in rows and - columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1). - ZLAHQR works primarily with the Hessenberg submatrix in rows - and columns ILO to IHI, but applies transformations to all of - H if WANTT is .TRUE.. - 1 <= ILO <= max(1,IHI); IHI <= N. - - H (input/output) COMPLEX*16 array, dimension (LDH,N) - On entry, the upper Hessenberg matrix H. - On exit, if WANTT is .TRUE., H is upper triangular in rows - and columns ILO:IHI, with any 2-by-2 diagonal blocks in - standard form. If WANTT is .FALSE., the contents of H are - unspecified on exit. - - LDH (input) INTEGER - The leading dimension of the array H. LDH >= max(1,N). - - W (output) COMPLEX*16 array, dimension (N) - The computed eigenvalues ILO to IHI are stored in the - corresponding elements of W. If WANTT is .TRUE., the - eigenvalues are stored in the same order as on the diagonal - of the Schur form returned in H, with W(i) = H(i,i). - - ILOZ (input) INTEGER - IHIZ (input) INTEGER - Specify the rows of Z to which transformations must be - applied if WANTZ is .TRUE.. - 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. - - Z (input/output) COMPLEX*16 array, dimension (LDZ,N) - If WANTZ is .TRUE., on entry Z must contain the current - matrix Z of transformations accumulated by ZHSEQR, and on - exit Z has been updated; transformations are applied only to - the submatrix Z(ILOZ:IHIZ,ILO:IHI). - If WANTZ is .FALSE., Z is not referenced. - - LDZ (input) INTEGER - The leading dimension of the array Z. LDZ >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - > 0: if INFO = i, ZLAHQR failed to compute all the - eigenvalues ILO to IHI in a total of 30*(IHI-ILO+1) - iterations; elements i+1:ihi of W contain those - eigenvalues which have been successfully computed. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - h_dim1 = *ldh; - h_offset = 1 + h_dim1 * 1; - h__ -= h_offset; - --w; - z_dim1 = *ldz; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - - /* Function Body */ - *info = 0; - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - if (*ilo == *ihi) { - i__1 = *ilo; - i__2 = *ilo + *ilo * h_dim1; - w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i; - return 0; - } - - nh = *ihi - *ilo + 1; - nz = *ihiz - *iloz + 1; - -/* - Set machine-dependent constants for the stopping criterion. - If norm(H) <= sqrt(OVFL), overflow should not occur. -*/ - - ulp = PRECISION; - smlnum = SAFEMINIMUM / ulp; - -/* - I1 and I2 are the indices of the first row and last column of H - to which transformations must be applied. If eigenvalues only are - being computed, I1 and I2 are set inside the main loop. -*/ - - if (*wantt) { - i1 = 1; - i2 = *n; - } - -/* ITN is the total number of QR iterations allowed. */ - - itn = nh * 30; - -/* - The main loop begins here. I is the loop index and decreases from - IHI to ILO in steps of 1. Each iteration of the loop works - with the active submatrix in rows and columns L to I. - Eigenvalues I+1 to IHI have already converged. Either L = ILO, or - H(L,L-1) is negligible so that the matrix splits. -*/ - - i__ = *ihi; -L10: - if (i__ < *ilo) { - goto L130; - } - -/* - Perform QR iterations on rows and columns ILO to I until a - submatrix of order 1 splits off at the bottom because a - subdiagonal element has become negligible. -*/ - - l = *ilo; - i__1 = itn; - for (its = 0; its <= i__1; ++its) { - -/* Look for a single small subdiagonal element. */ - - i__2 = l + 1; - for (k = i__; k >= i__2; --k) { - i__3 = k - 1 + (k - 1) * h_dim1; - i__4 = k + k * h_dim1; - tst1 = (d__1 = h__[i__3].r, abs(d__1)) + (d__2 = d_imag(&h__[k - - 1 + (k - 1) * h_dim1]), abs(d__2)) + ((d__3 = h__[i__4].r, - abs(d__3)) + (d__4 = d_imag(&h__[k + k * h_dim1]), abs( - d__4))); - if (tst1 == 0.) { - i__3 = i__ - l + 1; - tst1 = zlanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, rwork); - } - i__3 = k + (k - 1) * h_dim1; -/* Computing MAX */ - d__2 = ulp * tst1; - if ((d__1 = h__[i__3].r, abs(d__1)) <= max(d__2,smlnum)) { - goto L30; - } -/* L20: */ - } -L30: - l = k; - if (l > *ilo) { - -/* H(L,L-1) is negligible */ - - i__2 = l + (l - 1) * h_dim1; - h__[i__2].r = 0., h__[i__2].i = 0.; - } - -/* Exit from loop if a submatrix of order 1 has split off. */ - - if (l >= i__) { - goto L120; - } - -/* - Now the active submatrix is in rows and columns L to I. If - eigenvalues only are being computed, only the active submatrix - need be transformed. -*/ - - if (! (*wantt)) { - i1 = l; - i2 = i__; - } - - if (its == 10 || its == 20) { - -/* Exceptional shift. */ - - i__2 = i__ + (i__ - 1) * h_dim1; - s = (d__1 = h__[i__2].r, abs(d__1)) * .75; - i__2 = i__ + i__ * h_dim1; - z__1.r = s + h__[i__2].r, z__1.i = h__[i__2].i; - t.r = z__1.r, t.i = z__1.i; - } else { - -/* Wilkinson's shift. */ - - i__2 = i__ + i__ * h_dim1; - t.r = h__[i__2].r, t.i = h__[i__2].i; - i__2 = i__ - 1 + i__ * h_dim1; - i__3 = i__ + (i__ - 1) * h_dim1; - d__1 = h__[i__3].r; - z__1.r = d__1 * h__[i__2].r, z__1.i = d__1 * h__[i__2].i; - u.r = z__1.r, u.i = z__1.i; - if (u.r != 0. || u.i != 0.) { - i__2 = i__ - 1 + (i__ - 1) * h_dim1; - z__2.r = h__[i__2].r - t.r, z__2.i = h__[i__2].i - t.i; - z__1.r = z__2.r * .5, z__1.i = z__2.i * .5; - x.r = z__1.r, x.i = z__1.i; - z__3.r = x.r * x.r - x.i * x.i, z__3.i = x.r * x.i + x.i * - x.r; - z__2.r = z__3.r + u.r, z__2.i = z__3.i + u.i; - z_sqrt(&z__1, &z__2); - y.r = z__1.r, y.i = z__1.i; - if (x.r * y.r + d_imag(&x) * d_imag(&y) < 0.) { - z__1.r = -y.r, z__1.i = -y.i; - y.r = z__1.r, y.i = z__1.i; - } - z__3.r = x.r + y.r, z__3.i = x.i + y.i; - zladiv_(&z__2, &u, &z__3); - z__1.r = t.r - z__2.r, z__1.i = t.i - z__2.i; - t.r = z__1.r, t.i = z__1.i; - } - } - -/* Look for two consecutive small subdiagonal elements. */ - - i__2 = l + 1; - for (m = i__ - 1; m >= i__2; --m) { - -/* - Determine the effect of starting the single-shift QR - iteration at row M, and see if this would make H(M,M-1) - negligible. -*/ - - i__3 = m + m * h_dim1; - h11.r = h__[i__3].r, h11.i = h__[i__3].i; - i__3 = m + 1 + (m + 1) * h_dim1; - h22.r = h__[i__3].r, h22.i = h__[i__3].i; - z__1.r = h11.r - t.r, z__1.i = h11.i - t.i; - h11s.r = z__1.r, h11s.i = z__1.i; - i__3 = m + 1 + m * h_dim1; - h21 = h__[i__3].r; - s = (d__1 = h11s.r, abs(d__1)) + (d__2 = d_imag(&h11s), abs(d__2)) - + abs(h21); - z__1.r = h11s.r / s, z__1.i = h11s.i / s; - h11s.r = z__1.r, h11s.i = z__1.i; - h21 /= s; - v[0].r = h11s.r, v[0].i = h11s.i; - v[1].r = h21, v[1].i = 0.; - i__3 = m + (m - 1) * h_dim1; - h10 = h__[i__3].r; - tst1 = ((d__1 = h11s.r, abs(d__1)) + (d__2 = d_imag(&h11s), abs( - d__2))) * ((d__3 = h11.r, abs(d__3)) + (d__4 = d_imag(& - h11), abs(d__4)) + ((d__5 = h22.r, abs(d__5)) + (d__6 = - d_imag(&h22), abs(d__6)))); - if ((d__1 = h10 * h21, abs(d__1)) <= ulp * tst1) { - goto L50; - } -/* L40: */ - } - i__2 = l + l * h_dim1; - h11.r = h__[i__2].r, h11.i = h__[i__2].i; - i__2 = l + 1 + (l + 1) * h_dim1; - h22.r = h__[i__2].r, h22.i = h__[i__2].i; - z__1.r = h11.r - t.r, z__1.i = h11.i - t.i; - h11s.r = z__1.r, h11s.i = z__1.i; - i__2 = l + 1 + l * h_dim1; - h21 = h__[i__2].r; - s = (d__1 = h11s.r, abs(d__1)) + (d__2 = d_imag(&h11s), abs(d__2)) + - abs(h21); - z__1.r = h11s.r / s, z__1.i = h11s.i / s; - h11s.r = z__1.r, h11s.i = z__1.i; - h21 /= s; - v[0].r = h11s.r, v[0].i = h11s.i; - v[1].r = h21, v[1].i = 0.; -L50: - -/* Single-shift QR step */ - - i__2 = i__ - 1; - for (k = m; k <= i__2; ++k) { - -/* - The first iteration of this loop determines a reflection G - from the vector V and applies it from left and right to H, - thus creating a nonzero bulge below the subdiagonal. - - Each subsequent iteration determines a reflection G to - restore the Hessenberg form in the (K-1)th column, and thus - chases the bulge one step toward the bottom of the active - submatrix. - - V(2) is always real before the call to ZLARFG, and hence - after the call T2 ( = T1*V(2) ) is also real. -*/ - - if (k > m) { - zcopy_(&c__2, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1); - } - zlarfg_(&c__2, v, &v[1], &c__1, &t1); - if (k > m) { - i__3 = k + (k - 1) * h_dim1; - h__[i__3].r = v[0].r, h__[i__3].i = v[0].i; - i__3 = k + 1 + (k - 1) * h_dim1; - h__[i__3].r = 0., h__[i__3].i = 0.; - } - v2.r = v[1].r, v2.i = v[1].i; - z__1.r = t1.r * v2.r - t1.i * v2.i, z__1.i = t1.r * v2.i + t1.i * - v2.r; - t2 = z__1.r; - -/* - Apply G from the left to transform the rows of the matrix - in columns K to I2. -*/ - - i__3 = i2; - for (j = k; j <= i__3; ++j) { - d_cnjg(&z__3, &t1); - i__4 = k + j * h_dim1; - z__2.r = z__3.r * h__[i__4].r - z__3.i * h__[i__4].i, z__2.i = - z__3.r * h__[i__4].i + z__3.i * h__[i__4].r; - i__5 = k + 1 + j * h_dim1; - z__4.r = t2 * h__[i__5].r, z__4.i = t2 * h__[i__5].i; - z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; - sum.r = z__1.r, sum.i = z__1.i; - i__4 = k + j * h_dim1; - i__5 = k + j * h_dim1; - z__1.r = h__[i__5].r - sum.r, z__1.i = h__[i__5].i - sum.i; - h__[i__4].r = z__1.r, h__[i__4].i = z__1.i; - i__4 = k + 1 + j * h_dim1; - i__5 = k + 1 + j * h_dim1; - z__2.r = sum.r * v2.r - sum.i * v2.i, z__2.i = sum.r * v2.i + - sum.i * v2.r; - z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i - z__2.i; - h__[i__4].r = z__1.r, h__[i__4].i = z__1.i; -/* L60: */ - } - -/* - Apply G from the right to transform the columns of the - matrix in rows I1 to min(K+2,I). - - Computing MIN -*/ - i__4 = k + 2; - i__3 = min(i__4,i__); - for (j = i1; j <= i__3; ++j) { - i__4 = j + k * h_dim1; - z__2.r = t1.r * h__[i__4].r - t1.i * h__[i__4].i, z__2.i = - t1.r * h__[i__4].i + t1.i * h__[i__4].r; - i__5 = j + (k + 1) * h_dim1; - z__3.r = t2 * h__[i__5].r, z__3.i = t2 * h__[i__5].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - sum.r = z__1.r, sum.i = z__1.i; - i__4 = j + k * h_dim1; - i__5 = j + k * h_dim1; - z__1.r = h__[i__5].r - sum.r, z__1.i = h__[i__5].i - sum.i; - h__[i__4].r = z__1.r, h__[i__4].i = z__1.i; - i__4 = j + (k + 1) * h_dim1; - i__5 = j + (k + 1) * h_dim1; - d_cnjg(&z__3, &v2); - z__2.r = sum.r * z__3.r - sum.i * z__3.i, z__2.i = sum.r * - z__3.i + sum.i * z__3.r; - z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i - z__2.i; - h__[i__4].r = z__1.r, h__[i__4].i = z__1.i; -/* L70: */ - } - - if (*wantz) { - -/* Accumulate transformations in the matrix Z */ - - i__3 = *ihiz; - for (j = *iloz; j <= i__3; ++j) { - i__4 = j + k * z_dim1; - z__2.r = t1.r * z__[i__4].r - t1.i * z__[i__4].i, z__2.i = - t1.r * z__[i__4].i + t1.i * z__[i__4].r; - i__5 = j + (k + 1) * z_dim1; - z__3.r = t2 * z__[i__5].r, z__3.i = t2 * z__[i__5].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - sum.r = z__1.r, sum.i = z__1.i; - i__4 = j + k * z_dim1; - i__5 = j + k * z_dim1; - z__1.r = z__[i__5].r - sum.r, z__1.i = z__[i__5].i - - sum.i; - z__[i__4].r = z__1.r, z__[i__4].i = z__1.i; - i__4 = j + (k + 1) * z_dim1; - i__5 = j + (k + 1) * z_dim1; - d_cnjg(&z__3, &v2); - z__2.r = sum.r * z__3.r - sum.i * z__3.i, z__2.i = sum.r * - z__3.i + sum.i * z__3.r; - z__1.r = z__[i__5].r - z__2.r, z__1.i = z__[i__5].i - - z__2.i; - z__[i__4].r = z__1.r, z__[i__4].i = z__1.i; -/* L80: */ - } - } - - if ((k == m && m > l)) { - -/* - If the QR step was started at row M > L because two - consecutive small subdiagonals were found, then extra - scaling must be performed to ensure that H(M,M-1) remains - real. -*/ - - z__1.r = 1. - t1.r, z__1.i = 0. - t1.i; - temp.r = z__1.r, temp.i = z__1.i; - d__1 = z_abs(&temp); - z__1.r = temp.r / d__1, z__1.i = temp.i / d__1; - temp.r = z__1.r, temp.i = z__1.i; - i__3 = m + 1 + m * h_dim1; - i__4 = m + 1 + m * h_dim1; - d_cnjg(&z__2, &temp); - z__1.r = h__[i__4].r * z__2.r - h__[i__4].i * z__2.i, z__1.i = - h__[i__4].r * z__2.i + h__[i__4].i * z__2.r; - h__[i__3].r = z__1.r, h__[i__3].i = z__1.i; - if (m + 2 <= i__) { - i__3 = m + 2 + (m + 1) * h_dim1; - i__4 = m + 2 + (m + 1) * h_dim1; - z__1.r = h__[i__4].r * temp.r - h__[i__4].i * temp.i, - z__1.i = h__[i__4].r * temp.i + h__[i__4].i * - temp.r; - h__[i__3].r = z__1.r, h__[i__3].i = z__1.i; - } - i__3 = i__; - for (j = m; j <= i__3; ++j) { - if (j != m + 1) { - if (i2 > j) { - i__4 = i2 - j; - zscal_(&i__4, &temp, &h__[j + (j + 1) * h_dim1], - ldh); - } - i__4 = j - i1; - d_cnjg(&z__1, &temp); - zscal_(&i__4, &z__1, &h__[i1 + j * h_dim1], &c__1); - if (*wantz) { - d_cnjg(&z__1, &temp); - zscal_(&nz, &z__1, &z__[*iloz + j * z_dim1], & - c__1); - } - } -/* L90: */ - } - } -/* L100: */ - } - -/* Ensure that H(I,I-1) is real. */ - - i__2 = i__ + (i__ - 1) * h_dim1; - temp.r = h__[i__2].r, temp.i = h__[i__2].i; - if (d_imag(&temp) != 0.) { - rtemp = z_abs(&temp); - i__2 = i__ + (i__ - 1) * h_dim1; - h__[i__2].r = rtemp, h__[i__2].i = 0.; - z__1.r = temp.r / rtemp, z__1.i = temp.i / rtemp; - temp.r = z__1.r, temp.i = z__1.i; - if (i2 > i__) { - i__2 = i2 - i__; - d_cnjg(&z__1, &temp); - zscal_(&i__2, &z__1, &h__[i__ + (i__ + 1) * h_dim1], ldh); - } - i__2 = i__ - i1; - zscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1); - if (*wantz) { - zscal_(&nz, &temp, &z__[*iloz + i__ * z_dim1], &c__1); - } - } - -/* L110: */ - } - -/* Failure to converge in remaining number of iterations */ - - *info = i__; - return 0; - -L120: - -/* H(I,I-1) is negligible: one eigenvalue has converged. */ - - i__1 = i__; - i__2 = i__ + i__ * h_dim1; - w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i; - -/* - Decrement number of remaining iterations, and return to start of - the main loop with new value of I. -*/ - - itn -= its; - i__ = l - 1; - goto L10; - -L130: - return 0; - -/* End of ZLAHQR */ - -} /* zlahqr_ */ - -/* Subroutine */ int zlahrd_(integer *n, integer *k, integer *nb, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *t, - integer *ldt, doublecomplex *y, integer *ldy) -{ - /* System generated locals */ - integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, - i__3; - doublecomplex z__1; - - /* Local variables */ - static integer i__; - static doublecomplex ei; - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *), zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *), - zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, - integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *, integer *), ztrmv_(char *, char *, - char *, integer *, doublecomplex *, integer *, doublecomplex *, - integer *), zlarfg_(integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *), - zlacgv_(integer *, doublecomplex *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZLAHRD reduces the first NB columns of a complex general n-by-(n-k+1) - matrix A so that elements below the k-th subdiagonal are zero. The - reduction is performed by a unitary similarity transformation - Q' * A * Q. The routine returns the matrices V and T which determine - Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. - - This is an auxiliary routine called by ZGEHRD. - - Arguments - ========= - - N (input) INTEGER - The order of the matrix A. - - K (input) INTEGER - The offset for the reduction. Elements below the k-th - subdiagonal in the first NB columns are reduced to zero. - - NB (input) INTEGER - The number of columns to be reduced. - - A (input/output) COMPLEX*16 array, dimension (LDA,N-K+1) - On entry, the n-by-(n-k+1) general matrix A. - On exit, the elements on and above the k-th subdiagonal in - the first NB columns are overwritten with the corresponding - elements of the reduced matrix; the elements below the k-th - subdiagonal, with the array TAU, represent the matrix Q as a - product of elementary reflectors. The other columns of A are - unchanged. See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - TAU (output) COMPLEX*16 array, dimension (NB) - The scalar factors of the elementary reflectors. See Further - Details. - - T (output) COMPLEX*16 array, dimension (LDT,NB) - The upper triangular matrix T. - - LDT (input) INTEGER - The leading dimension of the array T. LDT >= NB. - - Y (output) COMPLEX*16 array, dimension (LDY,NB) - The n-by-nb matrix Y. - - LDY (input) INTEGER - The leading dimension of the array Y. LDY >= max(1,N). - - Further Details - =============== - - The matrix Q is represented as a product of nb elementary reflectors - - Q = H(1) H(2) . . . H(nb). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in - A(i+k+1:n,i), and tau in TAU(i). - - The elements of the vectors v together form the (n-k+1)-by-nb matrix - V which is needed, with T and Y, to apply the transformation to the - unreduced part of the matrix, using an update of the form: - A := (I - V*T*V') * (A - Y*V'). - - The contents of A on exit are illustrated by the following example - with n = 7, k = 3 and nb = 2: - - ( a h a a a ) - ( a h a a a ) - ( a h a a a ) - ( h h a a a ) - ( v1 h a a a ) - ( v1 v2 a a a ) - ( v1 v2 a a a ) - - where a denotes an element of the original matrix A, h denotes a - modified element of the upper Hessenberg matrix H, and vi denotes an - element of the vector defining H(i). - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - --tau; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - t_dim1 = *ldt; - t_offset = 1 + t_dim1 * 1; - t -= t_offset; - y_dim1 = *ldy; - y_offset = 1 + y_dim1 * 1; - y -= y_offset; - - /* Function Body */ - if (*n <= 1) { - return 0; - } - - i__1 = *nb; - for (i__ = 1; i__ <= i__1; ++i__) { - if (i__ > 1) { - -/* - Update A(1:n,i) - - Compute i-th column of A - Y * V' -*/ - - i__2 = i__ - 1; - zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda); - i__2 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", n, &i__2, &z__1, &y[y_offset], ldy, &a[*k - + i__ - 1 + a_dim1], lda, &c_b60, &a[i__ * a_dim1 + 1], & - c__1); - i__2 = i__ - 1; - zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda); - -/* - Apply I - V * T' * V' to this column (call it b) from the - left, using the last column of T as workspace - - Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) - ( V2 ) ( b2 ) - - where V1 is unit lower triangular - - w := V1' * b1 -*/ - - i__2 = i__ - 1; - zcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 + - 1], &c__1); - i__2 = i__ - 1; - ztrmv_("Lower", "Conjugate transpose", "Unit", &i__2, &a[*k + 1 + - a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1); - -/* w := w + V2'*b2 */ - - i__2 = *n - *k - i__ + 1; - i__3 = i__ - 1; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[*k + i__ + - a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b60, - &t[*nb * t_dim1 + 1], &c__1); - -/* w := T'*w */ - - i__2 = i__ - 1; - ztrmv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &t[ - t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1); - -/* b2 := b2 - V2*w */ - - i__2 = *n - *k - i__ + 1; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &a[*k + i__ + a_dim1], - lda, &t[*nb * t_dim1 + 1], &c__1, &c_b60, &a[*k + i__ + - i__ * a_dim1], &c__1); - -/* b1 := b1 - V1*w */ - - i__2 = i__ - 1; - ztrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1] - , lda, &t[*nb * t_dim1 + 1], &c__1); - i__2 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zaxpy_(&i__2, &z__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__ - * a_dim1], &c__1); - - i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1; - a[i__2].r = ei.r, a[i__2].i = ei.i; - } - -/* - Generate the elementary reflector H(i) to annihilate - A(k+i+1:n,i) -*/ - - i__2 = *k + i__ + i__ * a_dim1; - ei.r = a[i__2].r, ei.i = a[i__2].i; - i__2 = *n - *k - i__ + 1; -/* Computing MIN */ - i__3 = *k + i__ + 1; - zlarfg_(&i__2, &ei, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]) - ; - i__2 = *k + i__ + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Compute Y(1:n,i) */ - - i__2 = *n - *k - i__ + 1; - zgemv_("No transpose", n, &i__2, &c_b60, &a[(i__ + 1) * a_dim1 + 1], - lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b59, &y[i__ * - y_dim1 + 1], &c__1); - i__2 = *n - *k - i__ + 1; - i__3 = i__ - 1; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[*k + i__ + - a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b59, &t[ - i__ * t_dim1 + 1], &c__1); - i__2 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", n, &i__2, &z__1, &y[y_offset], ldy, &t[i__ * - t_dim1 + 1], &c__1, &c_b60, &y[i__ * y_dim1 + 1], &c__1); - zscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1); - -/* Compute T(1:i,i) */ - - i__2 = i__ - 1; - i__3 = i__; - z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i; - zscal_(&i__2, &z__1, &t[i__ * t_dim1 + 1], &c__1); - i__2 = i__ - 1; - ztrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt, - &t[i__ * t_dim1 + 1], &c__1) - ; - i__2 = i__ + i__ * t_dim1; - i__3 = i__; - t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i; - -/* L10: */ - } - i__1 = *k + *nb + *nb * a_dim1; - a[i__1].r = ei.r, a[i__1].i = ei.i; - - return 0; - -/* End of ZLAHRD */ - -} /* zlahrd_ */ - -/* Subroutine */ int zlals0_(integer *icompq, integer *nl, integer *nr, - integer *sqre, integer *nrhs, doublecomplex *b, integer *ldb, - doublecomplex *bx, integer *ldbx, integer *perm, integer *givptr, - integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, - doublereal *poles, doublereal *difl, doublereal *difr, doublereal * - z__, integer *k, doublereal *c__, doublereal *s, doublereal *rwork, - integer *info) -{ - /* System generated locals */ - integer givcol_dim1, givcol_offset, difr_dim1, difr_offset, givnum_dim1, - givnum_offset, poles_dim1, poles_offset, b_dim1, b_offset, - bx_dim1, bx_offset, i__1, i__2, i__3, i__4, i__5; - doublereal d__1; - doublecomplex z__1; - - /* Builtin functions */ - double d_imag(doublecomplex *); - - /* Local variables */ - static integer i__, j, m, n; - static doublereal dj; - static integer nlp1, jcol; - static doublereal temp; - static integer jrow; - extern doublereal dnrm2_(integer *, doublereal *, integer *); - static doublereal diflj, difrj, dsigj; - extern /* Subroutine */ int dgemv_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, doublereal *, integer *, - doublereal *, doublereal *, integer *), zdrot_(integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublereal *, doublereal *); - extern doublereal dlamc3_(doublereal *, doublereal *); - extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *), xerbla_(char *, integer *); - static doublereal dsigjp; - extern /* Subroutine */ int zdscal_(integer *, doublereal *, - doublecomplex *, integer *), zlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublecomplex * - , integer *, integer *), zlacpy_(char *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - December 1, 1999 - - - Purpose - ======= - - ZLALS0 applies back the multiplying factors of either the left or the - right singular vector matrix of a diagonal matrix appended by a row - to the right hand side matrix B in solving the least squares problem - using the divide-and-conquer SVD approach. - - For the left singular vector matrix, three types of orthogonal - matrices are involved: - - (1L) Givens rotations: the number of such rotations is GIVPTR; the - pairs of columns/rows they were applied to are stored in GIVCOL; - and the C- and S-values of these rotations are stored in GIVNUM. - - (2L) Permutation. The (NL+1)-st row of B is to be moved to the first - row, and for J=2:N, PERM(J)-th row of B is to be moved to the - J-th row. - - (3L) The left singular vector matrix of the remaining matrix. - - For the right singular vector matrix, four types of orthogonal - matrices are involved: - - (1R) The right singular vector matrix of the remaining matrix. - - (2R) If SQRE = 1, one extra Givens rotation to generate the right - null space. - - (3R) The inverse transformation of (2L). - - (4R) The inverse transformation of (1L). - - Arguments - ========= - - ICOMPQ (input) INTEGER - Specifies whether singular vectors are to be computed in - factored form: - = 0: Left singular vector matrix. - = 1: Right singular vector matrix. - - NL (input) INTEGER - The row dimension of the upper block. NL >= 1. - - NR (input) INTEGER - The row dimension of the lower block. NR >= 1. - - SQRE (input) INTEGER - = 0: the lower block is an NR-by-NR square matrix. - = 1: the lower block is an NR-by-(NR+1) rectangular matrix. - - The bidiagonal matrix has row dimension N = NL + NR + 1, - and column dimension M = N + SQRE. - - NRHS (input) INTEGER - The number of columns of B and BX. NRHS must be at least 1. - - B (input/output) COMPLEX*16 array, dimension ( LDB, NRHS ) - On input, B contains the right hand sides of the least - squares problem in rows 1 through M. On output, B contains - the solution X in rows 1 through N. - - LDB (input) INTEGER - The leading dimension of B. LDB must be at least - max(1,MAX( M, N ) ). - - BX (workspace) COMPLEX*16 array, dimension ( LDBX, NRHS ) - - LDBX (input) INTEGER - The leading dimension of BX. - - PERM (input) INTEGER array, dimension ( N ) - The permutations (from deflation and sorting) applied - to the two blocks. - - GIVPTR (input) INTEGER - The number of Givens rotations which took place in this - subproblem. - - GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) - Each pair of numbers indicates a pair of rows/columns - involved in a Givens rotation. - - LDGCOL (input) INTEGER - The leading dimension of GIVCOL, must be at least N. - - GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) - Each number indicates the C or S value used in the - corresponding Givens rotation. - - LDGNUM (input) INTEGER - The leading dimension of arrays DIFR, POLES and - GIVNUM, must be at least K. - - POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) - On entry, POLES(1:K, 1) contains the new singular - values obtained from solving the secular equation, and - POLES(1:K, 2) is an array containing the poles in the secular - equation. - - DIFL (input) DOUBLE PRECISION array, dimension ( K ). - On entry, DIFL(I) is the distance between I-th updated - (undeflated) singular value and the I-th (undeflated) old - singular value. - - DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ). - On entry, DIFR(I, 1) contains the distances between I-th - updated (undeflated) singular value and the I+1-th - (undeflated) old singular value. And DIFR(I, 2) is the - normalizing factor for the I-th right singular vector. - - Z (input) DOUBLE PRECISION array, dimension ( K ) - Contain the components of the deflation-adjusted updating row - vector. - - K (input) INTEGER - Contains the dimension of the non-deflated matrix, - This is the order of the related secular equation. 1 <= K <=N. - - C (input) DOUBLE PRECISION - C contains garbage if SQRE =0 and the C-value of a Givens - rotation related to the right null space if SQRE = 1. - - S (input) DOUBLE PRECISION - S contains garbage if SQRE =0 and the S-value of a Givens - rotation related to the right null space if SQRE = 1. - - RWORK (workspace) DOUBLE PRECISION array, dimension - ( K*(1+NRHS) + 2*NRHS ) - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Ming Gu and Ren-Cang Li, Computer Science Division, University of - California at Berkeley, USA - Osni Marques, LBNL/NERSC, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - bx_dim1 = *ldbx; - bx_offset = 1 + bx_dim1 * 1; - bx -= bx_offset; - --perm; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1 * 1; - givcol -= givcol_offset; - difr_dim1 = *ldgnum; - difr_offset = 1 + difr_dim1 * 1; - difr -= difr_offset; - poles_dim1 = *ldgnum; - poles_offset = 1 + poles_dim1 * 1; - poles -= poles_offset; - givnum_dim1 = *ldgnum; - givnum_offset = 1 + givnum_dim1 * 1; - givnum -= givnum_offset; - --difl; - --z__; - --rwork; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*nl < 1) { - *info = -2; - } else if (*nr < 1) { - *info = -3; - } else if (*sqre < 0 || *sqre > 1) { - *info = -4; - } - - n = *nl + *nr + 1; - - if (*nrhs < 1) { - *info = -5; - } else if (*ldb < n) { - *info = -7; - } else if (*ldbx < n) { - *info = -9; - } else if (*givptr < 0) { - *info = -11; - } else if (*ldgcol < n) { - *info = -13; - } else if (*ldgnum < n) { - *info = -15; - } else if (*k < 1) { - *info = -20; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZLALS0", &i__1); - return 0; - } - - m = n + *sqre; - nlp1 = *nl + 1; - - if (*icompq == 0) { - -/* - Apply back orthogonal transformations from the left. - - Step (1L): apply back the Givens rotations performed. -*/ - - i__1 = *givptr; - for (i__ = 1; i__ <= i__1; ++i__) { - zdrot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1], - ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[ - i__ + ((givnum_dim1) << (1))], &givnum[i__ + givnum_dim1]) - ; -/* L10: */ - } - -/* Step (2L): permute rows of B. */ - - zcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx); - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - zcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1], - ldbx); -/* L20: */ - } - -/* - Step (3L): apply the inverse of the left singular vector - matrix to BX. -*/ - - if (*k == 1) { - zcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb); - if (z__[1] < 0.) { - zdscal_(nrhs, &c_b1294, &b[b_offset], ldb); - } - } else { - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - diflj = difl[j]; - dj = poles[j + poles_dim1]; - dsigj = -poles[j + ((poles_dim1) << (1))]; - if (j < *k) { - difrj = -difr[j + difr_dim1]; - dsigjp = -poles[j + 1 + ((poles_dim1) << (1))]; - } - if (z__[j] == 0. || poles[j + ((poles_dim1) << (1))] == 0.) { - rwork[j] = 0.; - } else { - rwork[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] / - diflj / (poles[j + ((poles_dim1) << (1))] + dj); - } - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))] - == 0.) { - rwork[i__] = 0.; - } else { - rwork[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[ - i__] / (dlamc3_(&poles[i__ + ((poles_dim1) << - (1))], &dsigj) - diflj) / (poles[i__ + (( - poles_dim1) << (1))] + dj); - } -/* L30: */ - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))] - == 0.) { - rwork[i__] = 0.; - } else { - rwork[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[ - i__] / (dlamc3_(&poles[i__ + ((poles_dim1) << - (1))], &dsigjp) + difrj) / (poles[i__ + (( - poles_dim1) << (1))] + dj); - } -/* L40: */ - } - rwork[1] = -1.; - temp = dnrm2_(k, &rwork[1], &c__1); - -/* - Since B and BX are complex, the following call to DGEMV - is performed in two steps (real and imaginary parts). - - CALL DGEMV( 'T', K, NRHS, ONE, BX, LDBX, WORK, 1, ZERO, - $ B( J, 1 ), LDB ) -*/ - - i__ = *k + ((*nrhs) << (1)); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = *k; - for (jrow = 1; jrow <= i__3; ++jrow) { - ++i__; - i__4 = jrow + jcol * bx_dim1; - rwork[i__] = bx[i__4].r; -/* L50: */ - } -/* L60: */ - } - dgemv_("T", k, nrhs, &c_b1015, &rwork[*k + 1 + ((*nrhs) << (1) - )], k, &rwork[1], &c__1, &c_b324, &rwork[*k + 1], & - c__1); - i__ = *k + ((*nrhs) << (1)); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = *k; - for (jrow = 1; jrow <= i__3; ++jrow) { - ++i__; - rwork[i__] = d_imag(&bx[jrow + jcol * bx_dim1]); -/* L70: */ - } -/* L80: */ - } - dgemv_("T", k, nrhs, &c_b1015, &rwork[*k + 1 + ((*nrhs) << (1) - )], k, &rwork[1], &c__1, &c_b324, &rwork[*k + 1 + * - nrhs], &c__1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = j + jcol * b_dim1; - i__4 = jcol + *k; - i__5 = jcol + *k + *nrhs; - z__1.r = rwork[i__4], z__1.i = rwork[i__5]; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L90: */ - } - zlascl_("G", &c__0, &c__0, &temp, &c_b1015, &c__1, nrhs, &b[j - + b_dim1], ldb, info); -/* L100: */ - } - } - -/* Move the deflated rows of BX to B also. */ - - if (*k < max(m,n)) { - i__1 = n - *k; - zlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1 - + b_dim1], ldb); - } - } else { - -/* - Apply back the right orthogonal transformations. - - Step (1R): apply back the new right singular vector matrix - to B. -*/ - - if (*k == 1) { - zcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx); - } else { - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - dsigj = poles[j + ((poles_dim1) << (1))]; - if (z__[j] == 0.) { - rwork[j] = 0.; - } else { - rwork[j] = -z__[j] / difl[j] / (dsigj + poles[j + - poles_dim1]) / difr[j + ((difr_dim1) << (1))]; - } - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - if (z__[j] == 0.) { - rwork[i__] = 0.; - } else { - d__1 = -poles[i__ + 1 + ((poles_dim1) << (1))]; - rwork[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[ - i__ + difr_dim1]) / (dsigj + poles[i__ + - poles_dim1]) / difr[i__ + ((difr_dim1) << (1)) - ]; - } -/* L110: */ - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - if (z__[j] == 0.) { - rwork[i__] = 0.; - } else { - d__1 = -poles[i__ + ((poles_dim1) << (1))]; - rwork[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[ - i__]) / (dsigj + poles[i__ + poles_dim1]) / - difr[i__ + ((difr_dim1) << (1))]; - } -/* L120: */ - } - -/* - Since B and BX are complex, the following call to DGEMV - is performed in two steps (real and imaginary parts). - - CALL DGEMV( 'T', K, NRHS, ONE, B, LDB, WORK, 1, ZERO, - $ BX( J, 1 ), LDBX ) -*/ - - i__ = *k + ((*nrhs) << (1)); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = *k; - for (jrow = 1; jrow <= i__3; ++jrow) { - ++i__; - i__4 = jrow + jcol * b_dim1; - rwork[i__] = b[i__4].r; -/* L130: */ - } -/* L140: */ - } - dgemv_("T", k, nrhs, &c_b1015, &rwork[*k + 1 + ((*nrhs) << (1) - )], k, &rwork[1], &c__1, &c_b324, &rwork[*k + 1], & - c__1); - i__ = *k + ((*nrhs) << (1)); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = *k; - for (jrow = 1; jrow <= i__3; ++jrow) { - ++i__; - rwork[i__] = d_imag(&b[jrow + jcol * b_dim1]); -/* L150: */ - } -/* L160: */ - } - dgemv_("T", k, nrhs, &c_b1015, &rwork[*k + 1 + ((*nrhs) << (1) - )], k, &rwork[1], &c__1, &c_b324, &rwork[*k + 1 + * - nrhs], &c__1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = j + jcol * bx_dim1; - i__4 = jcol + *k; - i__5 = jcol + *k + *nrhs; - z__1.r = rwork[i__4], z__1.i = rwork[i__5]; - bx[i__3].r = z__1.r, bx[i__3].i = z__1.i; -/* L170: */ - } -/* L180: */ - } - } - -/* - Step (2R): if SQRE = 1, apply back the rotation that is - related to the right null space of the subproblem. -*/ - - if (*sqre == 1) { - zcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx); - zdrot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__, - s); - } - if (*k < max(m,n)) { - i__1 = n - *k; - zlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 + - bx_dim1], ldbx); - } - -/* Step (3R): permute rows of B. */ - - zcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb); - if (*sqre == 1) { - zcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb); - } - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - zcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1], - ldb); -/* L190: */ - } - -/* Step (4R): apply back the Givens rotations performed. */ - - for (i__ = *givptr; i__ >= 1; --i__) { - d__1 = -givnum[i__ + givnum_dim1]; - zdrot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1], - ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[ - i__ + ((givnum_dim1) << (1))], &d__1); -/* L200: */ - } - } - - return 0; - -/* End of ZLALS0 */ - -} /* zlals0_ */ - -/* Subroutine */ int zlalsa_(integer *icompq, integer *smlsiz, integer *n, - integer *nrhs, doublecomplex *b, integer *ldb, doublecomplex *bx, - integer *ldbx, doublereal *u, integer *ldu, doublereal *vt, integer * - k, doublereal *difl, doublereal *difr, doublereal *z__, doublereal * - poles, integer *givptr, integer *givcol, integer *ldgcol, integer * - perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal * - rwork, integer *iwork, integer *info) -{ - /* System generated locals */ - integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1, - difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, - poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset, - z_dim1, z_offset, b_dim1, b_offset, bx_dim1, bx_offset, i__1, - i__2, i__3, i__4, i__5, i__6; - doublecomplex z__1; - - /* Builtin functions */ - double d_imag(doublecomplex *); - integer pow_ii(integer *, integer *); - - /* Local variables */ - static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl, - ndb1, nlp1, lvl2, nrp1, jcol, nlvl, sqre, jrow, jimag; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer jreal, inode, ndiml, ndimr; - extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *), zlals0_(integer *, integer *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, integer *, integer *, integer *, - integer *, doublereal *, integer *, doublereal *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, doublereal *, - doublereal *, integer *), dlasdt_(integer *, integer *, integer * - , integer *, integer *, integer *, integer *), xerbla_(char *, - integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZLALSA is an itermediate step in solving the least squares problem - by computing the SVD of the coefficient matrix in compact form (The - singular vectors are computed as products of simple orthorgonal - matrices.). - - If ICOMPQ = 0, ZLALSA applies the inverse of the left singular vector - matrix of an upper bidiagonal matrix to the right hand side; and if - ICOMPQ = 1, ZLALSA applies the right singular vector matrix to the - right hand side. The singular vector matrices were generated in - compact form by ZLALSA. - - Arguments - ========= - - ICOMPQ (input) INTEGER - Specifies whether the left or the right singular vector - matrix is involved. - = 0: Left singular vector matrix - = 1: Right singular vector matrix - - SMLSIZ (input) INTEGER - The maximum size of the subproblems at the bottom of the - computation tree. - - N (input) INTEGER - The row and column dimensions of the upper bidiagonal matrix. - - NRHS (input) INTEGER - The number of columns of B and BX. NRHS must be at least 1. - - B (input) COMPLEX*16 array, dimension ( LDB, NRHS ) - On input, B contains the right hand sides of the least - squares problem in rows 1 through M. On output, B contains - the solution X in rows 1 through N. - - LDB (input) INTEGER - The leading dimension of B in the calling subprogram. - LDB must be at least max(1,MAX( M, N ) ). - - BX (output) COMPLEX*16 array, dimension ( LDBX, NRHS ) - On exit, the result of applying the left or right singular - vector matrix to B. - - LDBX (input) INTEGER - The leading dimension of BX. - - U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ). - On entry, U contains the left singular vector matrices of all - subproblems at the bottom level. - - LDU (input) INTEGER, LDU = > N. - The leading dimension of arrays U, VT, DIFL, DIFR, - POLES, GIVNUM, and Z. - - VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ). - On entry, VT' contains the right singular vector matrices of - all subproblems at the bottom level. - - K (input) INTEGER array, dimension ( N ). - - DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). - where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1. - - DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). - On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record - distances between singular values on the I-th level and - singular values on the (I -1)-th level, and DIFR(*, 2 * I) - record the normalizing factors of the right singular vectors - matrices of subproblems on I-th level. - - Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). - On entry, Z(1, I) contains the components of the deflation- - adjusted updating row vector for subproblems on the I-th - level. - - POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). - On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old - singular values involved in the secular equations on the I-th - level. - - GIVPTR (input) INTEGER array, dimension ( N ). - On entry, GIVPTR( I ) records the number of Givens - rotations performed on the I-th problem on the computation - tree. - - GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ). - On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the - locations of Givens rotations performed on the I-th level on - the computation tree. - - LDGCOL (input) INTEGER, LDGCOL = > N. - The leading dimension of arrays GIVCOL and PERM. - - PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ). - On entry, PERM(*, I) records permutations done on the I-th - level of the computation tree. - - GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). - On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S- - values of Givens rotations performed on the I-th level on the - computation tree. - - C (input) DOUBLE PRECISION array, dimension ( N ). - On entry, if the I-th subproblem is not square, - C( I ) contains the C-value of a Givens rotation related to - the right null space of the I-th subproblem. - - S (input) DOUBLE PRECISION array, dimension ( N ). - On entry, if the I-th subproblem is not square, - S( I ) contains the S-value of a Givens rotation related to - the right null space of the I-th subproblem. - - RWORK (workspace) DOUBLE PRECISION array, dimension at least - max ( N, (SMLSZ+1)*NRHS*3 ). - - IWORK (workspace) INTEGER array. - The dimension must be at least 3 * N - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - - Further Details - =============== - - Based on contributions by - Ming Gu and Ren-Cang Li, Computer Science Division, University of - California at Berkeley, USA - Osni Marques, LBNL/NERSC, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - bx_dim1 = *ldbx; - bx_offset = 1 + bx_dim1 * 1; - bx -= bx_offset; - givnum_dim1 = *ldu; - givnum_offset = 1 + givnum_dim1 * 1; - givnum -= givnum_offset; - poles_dim1 = *ldu; - poles_offset = 1 + poles_dim1 * 1; - poles -= poles_offset; - z_dim1 = *ldu; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - difr_dim1 = *ldu; - difr_offset = 1 + difr_dim1 * 1; - difr -= difr_offset; - difl_dim1 = *ldu; - difl_offset = 1 + difl_dim1 * 1; - difl -= difl_offset; - vt_dim1 = *ldu; - vt_offset = 1 + vt_dim1 * 1; - vt -= vt_offset; - u_dim1 = *ldu; - u_offset = 1 + u_dim1 * 1; - u -= u_offset; - --k; - --givptr; - perm_dim1 = *ldgcol; - perm_offset = 1 + perm_dim1 * 1; - perm -= perm_offset; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1 * 1; - givcol -= givcol_offset; - --c__; - --s; - --rwork; - --iwork; - - /* Function Body */ - *info = 0; - - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*smlsiz < 3) { - *info = -2; - } else if (*n < *smlsiz) { - *info = -3; - } else if (*nrhs < 1) { - *info = -4; - } else if (*ldb < *n) { - *info = -6; - } else if (*ldbx < *n) { - *info = -8; - } else if (*ldu < *n) { - *info = -10; - } else if (*ldgcol < *n) { - *info = -19; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZLALSA", &i__1); - return 0; - } - -/* Book-keeping and setting up the computation tree. */ - - inode = 1; - ndiml = inode + *n; - ndimr = ndiml + *n; - - dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], - smlsiz); - -/* - The following code applies back the left singular vector factors. - For applying back the right singular vector factors, go to 170. -*/ - - if (*icompq == 1) { - goto L170; - } - -/* - The nodes on the bottom level of the tree were solved - by DLASDQ. The corresponding left and right singular vector - matrices are in explicit form. First apply back the left - singular vector matrices. -*/ - - ndb1 = (nd + 1) / 2; - i__1 = nd; - for (i__ = ndb1; i__ <= i__1; ++i__) { - -/* - IC : center row of each node - NL : number of rows of left subproblem - NR : number of rows of right subproblem - NLF: starting row of the left subproblem - NRF: starting row of the right subproblem -*/ - - i1 = i__ - 1; - ic = iwork[inode + i1]; - nl = iwork[ndiml + i1]; - nr = iwork[ndimr + i1]; - nlf = ic - nl; - nrf = ic + 1; - -/* - Since B and BX are complex, the following call to DGEMM - is performed in two steps (real and imaginary parts). - - CALL DGEMM( 'T', 'N', NL, NRHS, NL, ONE, U( NLF, 1 ), LDU, - $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX ) -*/ - - j = (nl * *nrhs) << (1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nlf + nl - 1; - for (jrow = nlf; jrow <= i__3; ++jrow) { - ++j; - i__4 = jrow + jcol * b_dim1; - rwork[j] = b[i__4].r; -/* L10: */ - } -/* L20: */ - } - dgemm_("T", "N", &nl, nrhs, &nl, &c_b1015, &u[nlf + u_dim1], ldu, & - rwork[((nl * *nrhs) << (1)) + 1], &nl, &c_b324, &rwork[1], & - nl); - j = (nl * *nrhs) << (1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nlf + nl - 1; - for (jrow = nlf; jrow <= i__3; ++jrow) { - ++j; - rwork[j] = d_imag(&b[jrow + jcol * b_dim1]); -/* L30: */ - } -/* L40: */ - } - dgemm_("T", "N", &nl, nrhs, &nl, &c_b1015, &u[nlf + u_dim1], ldu, & - rwork[((nl * *nrhs) << (1)) + 1], &nl, &c_b324, &rwork[nl * * - nrhs + 1], &nl); - jreal = 0; - jimag = nl * *nrhs; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nlf + nl - 1; - for (jrow = nlf; jrow <= i__3; ++jrow) { - ++jreal; - ++jimag; - i__4 = jrow + jcol * bx_dim1; - i__5 = jreal; - i__6 = jimag; - z__1.r = rwork[i__5], z__1.i = rwork[i__6]; - bx[i__4].r = z__1.r, bx[i__4].i = z__1.i; -/* L50: */ - } -/* L60: */ - } - -/* - Since B and BX are complex, the following call to DGEMM - is performed in two steps (real and imaginary parts). - - CALL DGEMM( 'T', 'N', NR, NRHS, NR, ONE, U( NRF, 1 ), LDU, - $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX ) -*/ - - j = (nr * *nrhs) << (1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nrf + nr - 1; - for (jrow = nrf; jrow <= i__3; ++jrow) { - ++j; - i__4 = jrow + jcol * b_dim1; - rwork[j] = b[i__4].r; -/* L70: */ - } -/* L80: */ - } - dgemm_("T", "N", &nr, nrhs, &nr, &c_b1015, &u[nrf + u_dim1], ldu, & - rwork[((nr * *nrhs) << (1)) + 1], &nr, &c_b324, &rwork[1], & - nr); - j = (nr * *nrhs) << (1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nrf + nr - 1; - for (jrow = nrf; jrow <= i__3; ++jrow) { - ++j; - rwork[j] = d_imag(&b[jrow + jcol * b_dim1]); -/* L90: */ - } -/* L100: */ - } - dgemm_("T", "N", &nr, nrhs, &nr, &c_b1015, &u[nrf + u_dim1], ldu, & - rwork[((nr * *nrhs) << (1)) + 1], &nr, &c_b324, &rwork[nr * * - nrhs + 1], &nr); - jreal = 0; - jimag = nr * *nrhs; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nrf + nr - 1; - for (jrow = nrf; jrow <= i__3; ++jrow) { - ++jreal; - ++jimag; - i__4 = jrow + jcol * bx_dim1; - i__5 = jreal; - i__6 = jimag; - z__1.r = rwork[i__5], z__1.i = rwork[i__6]; - bx[i__4].r = z__1.r, bx[i__4].i = z__1.i; -/* L110: */ - } -/* L120: */ - } - -/* L130: */ - } - -/* - Next copy the rows of B that correspond to unchanged rows - in the bidiagonal matrix to BX. -*/ - - i__1 = nd; - for (i__ = 1; i__ <= i__1; ++i__) { - ic = iwork[inode + i__ - 1]; - zcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx); -/* L140: */ - } - -/* - Finally go through the left singular vector matrices of all - the other subproblems bottom-up on the tree. -*/ - - j = pow_ii(&c__2, &nlvl); - sqre = 0; - - for (lvl = nlvl; lvl >= 1; --lvl) { - lvl2 = ((lvl) << (1)) - 1; - -/* - find the first node LF and last node LL on - the current level LVL -*/ - - if (lvl == 1) { - lf = 1; - ll = 1; - } else { - i__1 = lvl - 1; - lf = pow_ii(&c__2, &i__1); - ll = ((lf) << (1)) - 1; - } - i__1 = ll; - for (i__ = lf; i__ <= i__1; ++i__) { - im1 = i__ - 1; - ic = iwork[inode + im1]; - nl = iwork[ndiml + im1]; - nr = iwork[ndimr + im1]; - nlf = ic - nl; - nrf = ic + 1; - --j; - zlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, & - b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], & - givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, & - givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * - poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf + - lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[ - j], &s[j], &rwork[1], info); -/* L150: */ - } -/* L160: */ - } - goto L330; - -/* ICOMPQ = 1: applying back the right singular vector factors. */ - -L170: - -/* - First now go through the right singular vector matrices of all - the tree nodes top-down. -*/ - - j = 0; - i__1 = nlvl; - for (lvl = 1; lvl <= i__1; ++lvl) { - lvl2 = ((lvl) << (1)) - 1; - -/* - Find the first node LF and last node LL on - the current level LVL. -*/ - - if (lvl == 1) { - lf = 1; - ll = 1; - } else { - i__2 = lvl - 1; - lf = pow_ii(&c__2, &i__2); - ll = ((lf) << (1)) - 1; - } - i__2 = lf; - for (i__ = ll; i__ >= i__2; --i__) { - im1 = i__ - 1; - ic = iwork[inode + im1]; - nl = iwork[ndiml + im1]; - nr = iwork[ndimr + im1]; - nlf = ic - nl; - nrf = ic + 1; - if (i__ == ll) { - sqre = 0; - } else { - sqre = 1; - } - ++j; - zlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[ - nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], & - givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, & - givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * - poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf + - lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[ - j], &s[j], &rwork[1], info); -/* L180: */ - } -/* L190: */ - } - -/* - The nodes on the bottom level of the tree were solved - by DLASDQ. The corresponding right singular vector - matrices are in explicit form. Apply them back. -*/ - - ndb1 = (nd + 1) / 2; - i__1 = nd; - for (i__ = ndb1; i__ <= i__1; ++i__) { - i1 = i__ - 1; - ic = iwork[inode + i1]; - nl = iwork[ndiml + i1]; - nr = iwork[ndimr + i1]; - nlp1 = nl + 1; - if (i__ == nd) { - nrp1 = nr; - } else { - nrp1 = nr + 1; - } - nlf = ic - nl; - nrf = ic + 1; - -/* - Since B and BX are complex, the following call to DGEMM is - performed in two steps (real and imaginary parts). - - CALL DGEMM( 'T', 'N', NLP1, NRHS, NLP1, ONE, VT( NLF, 1 ), LDU, - $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX ) -*/ - - j = (nlp1 * *nrhs) << (1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nlf + nlp1 - 1; - for (jrow = nlf; jrow <= i__3; ++jrow) { - ++j; - i__4 = jrow + jcol * b_dim1; - rwork[j] = b[i__4].r; -/* L200: */ - } -/* L210: */ - } - dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1015, &vt[nlf + vt_dim1], - ldu, &rwork[((nlp1 * *nrhs) << (1)) + 1], &nlp1, &c_b324, & - rwork[1], &nlp1); - j = (nlp1 * *nrhs) << (1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nlf + nlp1 - 1; - for (jrow = nlf; jrow <= i__3; ++jrow) { - ++j; - rwork[j] = d_imag(&b[jrow + jcol * b_dim1]); -/* L220: */ - } -/* L230: */ - } - dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1015, &vt[nlf + vt_dim1], - ldu, &rwork[((nlp1 * *nrhs) << (1)) + 1], &nlp1, &c_b324, & - rwork[nlp1 * *nrhs + 1], &nlp1); - jreal = 0; - jimag = nlp1 * *nrhs; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nlf + nlp1 - 1; - for (jrow = nlf; jrow <= i__3; ++jrow) { - ++jreal; - ++jimag; - i__4 = jrow + jcol * bx_dim1; - i__5 = jreal; - i__6 = jimag; - z__1.r = rwork[i__5], z__1.i = rwork[i__6]; - bx[i__4].r = z__1.r, bx[i__4].i = z__1.i; -/* L240: */ - } -/* L250: */ - } - -/* - Since B and BX are complex, the following call to DGEMM is - performed in two steps (real and imaginary parts). - - CALL DGEMM( 'T', 'N', NRP1, NRHS, NRP1, ONE, VT( NRF, 1 ), LDU, - $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX ) -*/ - - j = (nrp1 * *nrhs) << (1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nrf + nrp1 - 1; - for (jrow = nrf; jrow <= i__3; ++jrow) { - ++j; - i__4 = jrow + jcol * b_dim1; - rwork[j] = b[i__4].r; -/* L260: */ - } -/* L270: */ - } - dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1015, &vt[nrf + vt_dim1], - ldu, &rwork[((nrp1 * *nrhs) << (1)) + 1], &nrp1, &c_b324, & - rwork[1], &nrp1); - j = (nrp1 * *nrhs) << (1); - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nrf + nrp1 - 1; - for (jrow = nrf; jrow <= i__3; ++jrow) { - ++j; - rwork[j] = d_imag(&b[jrow + jcol * b_dim1]); -/* L280: */ - } -/* L290: */ - } - dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1015, &vt[nrf + vt_dim1], - ldu, &rwork[((nrp1 * *nrhs) << (1)) + 1], &nrp1, &c_b324, & - rwork[nrp1 * *nrhs + 1], &nrp1); - jreal = 0; - jimag = nrp1 * *nrhs; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = nrf + nrp1 - 1; - for (jrow = nrf; jrow <= i__3; ++jrow) { - ++jreal; - ++jimag; - i__4 = jrow + jcol * bx_dim1; - i__5 = jreal; - i__6 = jimag; - z__1.r = rwork[i__5], z__1.i = rwork[i__6]; - bx[i__4].r = z__1.r, bx[i__4].i = z__1.i; -/* L300: */ - } -/* L310: */ - } - -/* L320: */ - } - -L330: - - return 0; - -/* End of ZLALSA */ - -} /* zlalsa_ */ - -/* Subroutine */ int zlalsd_(char *uplo, integer *smlsiz, integer *n, integer - *nrhs, doublereal *d__, doublereal *e, doublecomplex *b, integer *ldb, - doublereal *rcond, integer *rank, doublecomplex *work, doublereal * - rwork, integer *iwork, integer *info) -{ - /* System generated locals */ - integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6; - doublereal d__1; - doublecomplex z__1; - - /* Builtin functions */ - double d_imag(doublecomplex *), log(doublereal), d_sign(doublereal *, - doublereal *); - - /* Local variables */ - static integer c__, i__, j, k; - static doublereal r__; - static integer s, u, z__; - static doublereal cs; - static integer bx; - static doublereal sn; - static integer st, vt, nm1, st1; - static doublereal eps; - static integer iwk; - static doublereal tol; - static integer difl, difr, jcol, irwb, perm, nsub, nlvl, sqre, bxst, jrow, - irwu, jimag; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - static integer jreal, irwib, poles, sizei, irwrb, nsize; - extern /* Subroutine */ int zdrot_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublereal *, doublereal *), zcopy_( - integer *, doublecomplex *, integer *, doublecomplex *, integer *) - ; - static integer irwvt, icmpq1, icmpq2; - - extern /* Subroutine */ int dlasda_(integer *, integer *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *, - doublereal *, integer *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *, integer *, integer *, integer *, - doublereal *, doublereal *, doublereal *, doublereal *, integer *, - integer *), dlascl_(char *, integer *, integer *, doublereal *, - doublereal *, integer *, integer *, doublereal *, integer *, - integer *); - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer - *, integer *, integer *, doublereal *, doublereal *, doublereal *, - integer *, doublereal *, integer *, doublereal *, integer *, - doublereal *, integer *), dlaset_(char *, integer *, - integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *), xerbla_(char *, integer *); - static integer givcol; - extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); - extern /* Subroutine */ int zlalsa_(integer *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, integer *, - doublereal *, integer *, doublereal *, integer *, doublereal *, - doublereal *, doublereal *, doublereal *, integer *, integer *, - integer *, integer *, doublereal *, doublereal *, doublereal *, - doublereal *, integer *, integer *), zlascl_(char *, integer *, - integer *, doublereal *, doublereal *, integer *, integer *, - doublecomplex *, integer *, integer *), dlasrt_(char *, - integer *, doublereal *, integer *), zlacpy_(char *, - integer *, integer *, doublecomplex *, integer *, doublecomplex *, - integer *), zlaset_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, doublecomplex *, integer *); - static doublereal orgnrm; - static integer givnum, givptr, nrwork, irwwrk, smlszp; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1999 - - - Purpose - ======= - - ZLALSD uses the singular value decomposition of A to solve the least - squares problem of finding X to minimize the Euclidean norm of each - column of A*X-B, where A is N-by-N upper bidiagonal, and X and B - are N-by-NRHS. The solution X overwrites B. - - The singular values of A smaller than RCOND times the largest - singular value are treated as zero in solving the least squares - problem; in this case a minimum norm solution is returned. - The actual singular values are returned in D in ascending order. - - This code makes very mild assumptions about floating point - arithmetic. It will work on machines with a guard digit in - add/subtract, or on those binary machines without guard digits - which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. - It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - = 'U': D and E define an upper bidiagonal matrix. - = 'L': D and E define a lower bidiagonal matrix. - - SMLSIZ (input) INTEGER - The maximum size of the subproblems at the bottom of the - computation tree. - - N (input) INTEGER - The dimension of the bidiagonal matrix. N >= 0. - - NRHS (input) INTEGER - The number of columns of B. NRHS must be at least 1. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry D contains the main diagonal of the bidiagonal - matrix. On exit, if INFO = 0, D contains its singular values. - - E (input) DOUBLE PRECISION array, dimension (N-1) - Contains the super-diagonal entries of the bidiagonal matrix. - On exit, E has been destroyed. - - B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) - On input, B contains the right hand sides of the least - squares problem. On output, B contains the solution X. - - LDB (input) INTEGER - The leading dimension of B in the calling subprogram. - LDB must be at least max(1,N). - - RCOND (input) DOUBLE PRECISION - The singular values of A less than or equal to RCOND times - the largest singular value are treated as zero in solving - the least squares problem. If RCOND is negative, - machine precision is used instead. - For example, if diag(S)*X=B were the least squares problem, - where diag(S) is a diagonal matrix of singular values, the - solution would be X(i) = B(i) / S(i) if S(i) is greater than - RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to - RCOND*max(S). - - RANK (output) INTEGER - The number of singular values of A greater than RCOND times - the largest singular value. - - WORK (workspace) COMPLEX*16 array, dimension at least - (N * NRHS). - - RWORK (workspace) DOUBLE PRECISION array, dimension at least - (9*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + (SMLSIZ+1)**2), - where - NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) - - IWORK (workspace) INTEGER array, dimension at least - (3*N*NLVL + 11*N). - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: The algorithm failed to compute an singular value while - working on the submatrix lying in rows and columns - INFO/(N+1) through MOD(INFO,N+1). - - Further Details - =============== - - Based on contributions by - Ming Gu and Ren-Cang Li, Computer Science Division, University of - California at Berkeley, USA - Osni Marques, LBNL/NERSC, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - --work; - --rwork; - --iwork; - - /* Function Body */ - *info = 0; - - if (*n < 0) { - *info = -3; - } else if (*nrhs < 1) { - *info = -4; - } else if (*ldb < 1 || *ldb < *n) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZLALSD", &i__1); - return 0; - } - - eps = EPSILON; - -/* Set up the tolerance. */ - - if (*rcond <= 0. || *rcond >= 1.) { - *rcond = eps; - } - - *rank = 0; - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } else if (*n == 1) { - if (d__[1] == 0.) { - zlaset_("A", &c__1, nrhs, &c_b59, &c_b59, &b[b_offset], ldb); - } else { - *rank = 1; - zlascl_("G", &c__0, &c__0, &d__[1], &c_b1015, &c__1, nrhs, &b[ - b_offset], ldb, info); - d__[1] = abs(d__[1]); - } - return 0; - } - -/* Rotate the matrix if it is lower bidiagonal. */ - - if (*(unsigned char *)uplo == 'L') { - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); - d__[i__] = r__; - e[i__] = sn * d__[i__ + 1]; - d__[i__ + 1] = cs * d__[i__ + 1]; - if (*nrhs == 1) { - zdrot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], & - c__1, &cs, &sn); - } else { - rwork[((i__) << (1)) - 1] = cs; - rwork[i__ * 2] = sn; - } -/* L10: */ - } - if (*nrhs > 1) { - i__1 = *nrhs; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = *n - 1; - for (j = 1; j <= i__2; ++j) { - cs = rwork[((j) << (1)) - 1]; - sn = rwork[j * 2]; - zdrot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ - * b_dim1], &c__1, &cs, &sn); -/* L20: */ - } -/* L30: */ - } - } - } - -/* Scale. */ - - nm1 = *n - 1; - orgnrm = dlanst_("M", n, &d__[1], &e[1]); - if (orgnrm == 0.) { - zlaset_("A", n, nrhs, &c_b59, &c_b59, &b[b_offset], ldb); - return 0; - } - - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, n, &c__1, &d__[1], n, info); - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, &nm1, &c__1, &e[1], &nm1, - info); - -/* - If N is smaller than the minimum divide size SMLSIZ, then solve - the problem with another solver. -*/ - - if (*n <= *smlsiz) { - irwu = 1; - irwvt = irwu + *n * *n; - irwwrk = irwvt + *n * *n; - irwrb = irwwrk; - irwib = irwrb + *n * *nrhs; - irwb = irwib + *n * *nrhs; - dlaset_("A", n, n, &c_b324, &c_b1015, &rwork[irwu], n); - dlaset_("A", n, n, &c_b324, &c_b1015, &rwork[irwvt], n); - dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &rwork[irwvt], n, - &rwork[irwu], n, &rwork[irwwrk], &c__1, &rwork[irwwrk], info); - if (*info != 0) { - return 0; - } - -/* - In the real version, B is passed to DLASDQ and multiplied - internally by Q'. Here B is complex and that product is - computed below in two steps (real and imaginary parts). -*/ - - j = irwb - 1; - i__1 = *nrhs; - for (jcol = 1; jcol <= i__1; ++jcol) { - i__2 = *n; - for (jrow = 1; jrow <= i__2; ++jrow) { - ++j; - i__3 = jrow + jcol * b_dim1; - rwork[j] = b[i__3].r; -/* L40: */ - } -/* L50: */ - } - dgemm_("T", "N", n, nrhs, n, &c_b1015, &rwork[irwu], n, &rwork[irwb], - n, &c_b324, &rwork[irwrb], n); - j = irwb - 1; - i__1 = *nrhs; - for (jcol = 1; jcol <= i__1; ++jcol) { - i__2 = *n; - for (jrow = 1; jrow <= i__2; ++jrow) { - ++j; - rwork[j] = d_imag(&b[jrow + jcol * b_dim1]); -/* L60: */ - } -/* L70: */ - } - dgemm_("T", "N", n, nrhs, n, &c_b1015, &rwork[irwu], n, &rwork[irwb], - n, &c_b324, &rwork[irwib], n); - jreal = irwrb - 1; - jimag = irwib - 1; - i__1 = *nrhs; - for (jcol = 1; jcol <= i__1; ++jcol) { - i__2 = *n; - for (jrow = 1; jrow <= i__2; ++jrow) { - ++jreal; - ++jimag; - i__3 = jrow + jcol * b_dim1; - i__4 = jreal; - i__5 = jimag; - z__1.r = rwork[i__4], z__1.i = rwork[i__5]; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L80: */ - } -/* L90: */ - } - - tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1)); - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - if (d__[i__] <= tol) { - zlaset_("A", &c__1, nrhs, &c_b59, &c_b59, &b[i__ + b_dim1], - ldb); - } else { - zlascl_("G", &c__0, &c__0, &d__[i__], &c_b1015, &c__1, nrhs, & - b[i__ + b_dim1], ldb, info); - ++(*rank); - } -/* L100: */ - } - -/* - Since B is complex, the following call to DGEMM is performed - in two steps (real and imaginary parts). That is for V * B - (in the real version of the code V' is stored in WORK). - - CALL DGEMM( 'T', 'N', N, NRHS, N, ONE, WORK, N, B, LDB, ZERO, - $ WORK( NWORK ), N ) -*/ - - j = irwb - 1; - i__1 = *nrhs; - for (jcol = 1; jcol <= i__1; ++jcol) { - i__2 = *n; - for (jrow = 1; jrow <= i__2; ++jrow) { - ++j; - i__3 = jrow + jcol * b_dim1; - rwork[j] = b[i__3].r; -/* L110: */ - } -/* L120: */ - } - dgemm_("T", "N", n, nrhs, n, &c_b1015, &rwork[irwvt], n, &rwork[irwb], - n, &c_b324, &rwork[irwrb], n); - j = irwb - 1; - i__1 = *nrhs; - for (jcol = 1; jcol <= i__1; ++jcol) { - i__2 = *n; - for (jrow = 1; jrow <= i__2; ++jrow) { - ++j; - rwork[j] = d_imag(&b[jrow + jcol * b_dim1]); -/* L130: */ - } -/* L140: */ - } - dgemm_("T", "N", n, nrhs, n, &c_b1015, &rwork[irwvt], n, &rwork[irwb], - n, &c_b324, &rwork[irwib], n); - jreal = irwrb - 1; - jimag = irwib - 1; - i__1 = *nrhs; - for (jcol = 1; jcol <= i__1; ++jcol) { - i__2 = *n; - for (jrow = 1; jrow <= i__2; ++jrow) { - ++jreal; - ++jimag; - i__3 = jrow + jcol * b_dim1; - i__4 = jreal; - i__5 = jimag; - z__1.r = rwork[i__4], z__1.i = rwork[i__5]; - b[i__3].r = z__1.r, b[i__3].i = z__1.i; -/* L150: */ - } -/* L160: */ - } - -/* Unscale. */ - - dlascl_("G", &c__0, &c__0, &c_b1015, &orgnrm, n, &c__1, &d__[1], n, - info); - dlasrt_("D", n, &d__[1], info); - zlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, n, nrhs, &b[b_offset], - ldb, info); - - return 0; - } - -/* Book-keeping and setting up some constants. */ - - nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) / - log(2.)) + 1; - - smlszp = *smlsiz + 1; - - u = 1; - vt = *smlsiz * *n + 1; - difl = vt + smlszp * *n; - difr = difl + nlvl * *n; - z__ = difr + ((nlvl * *n) << (1)); - c__ = z__ + nlvl * *n; - s = c__ + *n; - poles = s + *n; - givnum = poles + ((nlvl) << (1)) * *n; - nrwork = givnum + ((nlvl) << (1)) * *n; - bx = 1; - - irwrb = nrwork; - irwib = irwrb + *smlsiz * *nrhs; - irwb = irwib + *smlsiz * *nrhs; - - sizei = *n + 1; - k = sizei + *n; - givptr = k + *n; - perm = givptr + *n; - givcol = perm + nlvl * *n; - iwk = givcol + ((nlvl * *n) << (1)); - - st = 1; - sqre = 0; - icmpq1 = 1; - icmpq2 = 0; - nsub = 0; - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = d__[i__], abs(d__1)) < eps) { - d__[i__] = d_sign(&eps, &d__[i__]); - } -/* L170: */ - } - - i__1 = nm1; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) { - ++nsub; - iwork[nsub] = st; - -/* - Subproblem found. First determine its size and then - apply divide and conquer on it. -*/ - - if (i__ < nm1) { - -/* A subproblem with E(I) small for I < NM1. */ - - nsize = i__ - st + 1; - iwork[sizei + nsub - 1] = nsize; - } else if ((d__1 = e[i__], abs(d__1)) >= eps) { - -/* A subproblem with E(NM1) not too small but I = NM1. */ - - nsize = *n - st + 1; - iwork[sizei + nsub - 1] = nsize; - } else { - -/* - A subproblem with E(NM1) small. This implies an - 1-by-1 subproblem at D(N), which is not solved - explicitly. -*/ - - nsize = i__ - st + 1; - iwork[sizei + nsub - 1] = nsize; - ++nsub; - iwork[nsub] = *n; - iwork[sizei + nsub - 1] = 1; - zcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n); - } - st1 = st - 1; - if (nsize == 1) { - -/* - This is a 1-by-1 subproblem and is not solved - explicitly. -*/ - - zcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n); - } else if (nsize <= *smlsiz) { - -/* This is a small subproblem and is solved by DLASDQ. */ - - dlaset_("A", &nsize, &nsize, &c_b324, &c_b1015, &rwork[vt + - st1], n); - dlaset_("A", &nsize, &nsize, &c_b324, &c_b1015, &rwork[u + - st1], n); - dlasdq_("U", &c__0, &nsize, &nsize, &nsize, &c__0, &d__[st], & - e[st], &rwork[vt + st1], n, &rwork[u + st1], n, & - rwork[nrwork], &c__1, &rwork[nrwork], info) - ; - if (*info != 0) { - return 0; - } - -/* - In the real version, B is passed to DLASDQ and multiplied - internally by Q'. Here B is complex and that product is - computed below in two steps (real and imaginary parts). -*/ - - j = irwb - 1; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = st + nsize - 1; - for (jrow = st; jrow <= i__3; ++jrow) { - ++j; - i__4 = jrow + jcol * b_dim1; - rwork[j] = b[i__4].r; -/* L180: */ - } -/* L190: */ - } - dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1015, &rwork[u + - st1], n, &rwork[irwb], &nsize, &c_b324, &rwork[irwrb], - &nsize); - j = irwb - 1; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = st + nsize - 1; - for (jrow = st; jrow <= i__3; ++jrow) { - ++j; - rwork[j] = d_imag(&b[jrow + jcol * b_dim1]); -/* L200: */ - } -/* L210: */ - } - dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1015, &rwork[u + - st1], n, &rwork[irwb], &nsize, &c_b324, &rwork[irwib], - &nsize); - jreal = irwrb - 1; - jimag = irwib - 1; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = st + nsize - 1; - for (jrow = st; jrow <= i__3; ++jrow) { - ++jreal; - ++jimag; - i__4 = jrow + jcol * b_dim1; - i__5 = jreal; - i__6 = jimag; - z__1.r = rwork[i__5], z__1.i = rwork[i__6]; - b[i__4].r = z__1.r, b[i__4].i = z__1.i; -/* L220: */ - } -/* L230: */ - } - - zlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx + - st1], n); - } else { - -/* A large problem. Solve it using divide and conquer. */ - - dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], & - rwork[u + st1], n, &rwork[vt + st1], &iwork[k + st1], - &rwork[difl + st1], &rwork[difr + st1], &rwork[z__ + - st1], &rwork[poles + st1], &iwork[givptr + st1], & - iwork[givcol + st1], n, &iwork[perm + st1], &rwork[ - givnum + st1], &rwork[c__ + st1], &rwork[s + st1], & - rwork[nrwork], &iwork[iwk], info); - if (*info != 0) { - return 0; - } - bxst = bx + st1; - zlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, & - work[bxst], n, &rwork[u + st1], n, &rwork[vt + st1], & - iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1] - , &rwork[z__ + st1], &rwork[poles + st1], &iwork[ - givptr + st1], &iwork[givcol + st1], n, &iwork[perm + - st1], &rwork[givnum + st1], &rwork[c__ + st1], &rwork[ - s + st1], &rwork[nrwork], &iwork[iwk], info); - if (*info != 0) { - return 0; - } - } - st = i__ + 1; - } -/* L240: */ - } - -/* Apply the singular values and treat the tiny ones as zero. */ - - tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1)); - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* - Some of the elements in D can be negative because 1-by-1 - subproblems were not solved explicitly. -*/ - - if ((d__1 = d__[i__], abs(d__1)) <= tol) { - zlaset_("A", &c__1, nrhs, &c_b59, &c_b59, &work[bx + i__ - 1], n); - } else { - ++(*rank); - zlascl_("G", &c__0, &c__0, &d__[i__], &c_b1015, &c__1, nrhs, & - work[bx + i__ - 1], n, info); - } - d__[i__] = (d__1 = d__[i__], abs(d__1)); -/* L250: */ - } - -/* Now apply back the right singular vectors. */ - - icmpq2 = 1; - i__1 = nsub; - for (i__ = 1; i__ <= i__1; ++i__) { - st = iwork[i__]; - st1 = st - 1; - nsize = iwork[sizei + i__ - 1]; - bxst = bx + st1; - if (nsize == 1) { - zcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb); - } else if (nsize <= *smlsiz) { - -/* - Since B and BX are complex, the following call to DGEMM - is performed in two steps (real and imaginary parts). - - CALL DGEMM( 'T', 'N', NSIZE, NRHS, NSIZE, ONE, - $ RWORK( VT+ST1 ), N, RWORK( BXST ), N, ZERO, - $ B( ST, 1 ), LDB ) -*/ - - j = bxst - *n - 1; - jreal = irwb - 1; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - j += *n; - i__3 = nsize; - for (jrow = 1; jrow <= i__3; ++jrow) { - ++jreal; - i__4 = j + jrow; - rwork[jreal] = work[i__4].r; -/* L260: */ - } -/* L270: */ - } - dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1015, &rwork[vt + st1], - n, &rwork[irwb], &nsize, &c_b324, &rwork[irwrb], &nsize); - j = bxst - *n - 1; - jimag = irwb - 1; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - j += *n; - i__3 = nsize; - for (jrow = 1; jrow <= i__3; ++jrow) { - ++jimag; - rwork[jimag] = d_imag(&work[j + jrow]); -/* L280: */ - } -/* L290: */ - } - dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1015, &rwork[vt + st1], - n, &rwork[irwb], &nsize, &c_b324, &rwork[irwib], &nsize); - jreal = irwrb - 1; - jimag = irwib - 1; - i__2 = *nrhs; - for (jcol = 1; jcol <= i__2; ++jcol) { - i__3 = st + nsize - 1; - for (jrow = st; jrow <= i__3; ++jrow) { - ++jreal; - ++jimag; - i__4 = jrow + jcol * b_dim1; - i__5 = jreal; - i__6 = jimag; - z__1.r = rwork[i__5], z__1.i = rwork[i__6]; - b[i__4].r = z__1.r, b[i__4].i = z__1.i; -/* L300: */ - } -/* L310: */ - } - } else { - zlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st + - b_dim1], ldb, &rwork[u + st1], n, &rwork[vt + st1], & - iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1], & - rwork[z__ + st1], &rwork[poles + st1], &iwork[givptr + - st1], &iwork[givcol + st1], n, &iwork[perm + st1], &rwork[ - givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &rwork[ - nrwork], &iwork[iwk], info); - if (*info != 0) { - return 0; - } - } -/* L320: */ - } - -/* Unscale and sort the singular values. */ - - dlascl_("G", &c__0, &c__0, &c_b1015, &orgnrm, n, &c__1, &d__[1], n, info); - dlasrt_("D", n, &d__[1], info); - zlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, n, nrhs, &b[b_offset], ldb, - info); - - return 0; - -/* End of ZLALSD */ - -} /* zlalsd_ */ - -doublereal zlange_(char *norm, integer *m, integer *n, doublecomplex *a, - integer *lda, doublereal *work) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - doublereal ret_val, d__1, d__2; - - /* Builtin functions */ - double z_abs(doublecomplex *), sqrt(doublereal); - - /* Local variables */ - static integer i__, j; - static doublereal sum, scale; - extern logical lsame_(char *, char *); - static doublereal value; - extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *, - doublereal *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - ZLANGE returns the value of the one norm, or the Frobenius norm, or - the infinity norm, or the element of largest absolute value of a - complex matrix A. - - Description - =========== - - ZLANGE returns the value - - ZLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' - ( - ( norm1(A), NORM = '1', 'O' or 'o' - ( - ( normI(A), NORM = 'I' or 'i' - ( - ( normF(A), NORM = 'F', 'f', 'E' or 'e' - - where norm1 denotes the one norm of a matrix (maximum column sum), - normI denotes the infinity norm of a matrix (maximum row sum) and - normF denotes the Frobenius norm of a matrix (square root of sum of - squares). Note that max(abs(A(i,j))) is not a matrix norm. - - Arguments - ========= - - NORM (input) CHARACTER*1 - Specifies the value to be returned in ZLANGE as described - above. - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. When M = 0, - ZLANGE is set to zero. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. When N = 0, - ZLANGE is set to zero. - - A (input) COMPLEX*16 array, dimension (LDA,N) - The m by n matrix A. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(M,1). - - WORK (workspace) DOUBLE PRECISION array, dimension (LWORK), - where LWORK >= M when NORM = 'I'; otherwise, WORK is not - referenced. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --work; - - /* Function Body */ - if (min(*m,*n) == 0) { - value = 0.; - } else if (lsame_(norm, "M")) { - -/* Find max(abs(A(i,j))). */ - - value = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]); - value = max(d__1,d__2); -/* L10: */ - } -/* L20: */ - } - } else if (lsame_(norm, "O") || *(unsigned char *) - norm == '1') { - -/* Find norm1(A). */ - - value = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = 0.; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - sum += z_abs(&a[i__ + j * a_dim1]); -/* L30: */ - } - value = max(value,sum); -/* L40: */ - } - } else if (lsame_(norm, "I")) { - -/* Find normI(A). */ - - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - work[i__] = 0.; -/* L50: */ - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - work[i__] += z_abs(&a[i__ + j * a_dim1]); -/* L60: */ - } -/* L70: */ - } - value = 0.; - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = work[i__]; - value = max(d__1,d__2); -/* L80: */ - } - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - -/* Find normF(A). */ - - scale = 0.; - sum = 1.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - zlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum); -/* L90: */ - } - value = scale * sqrt(sum); - } - - ret_val = value; - return ret_val; - -/* End of ZLANGE */ - -} /* zlange_ */ - -doublereal zlanhe_(char *norm, char *uplo, integer *n, doublecomplex *a, - integer *lda, doublereal *work) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2; - doublereal ret_val, d__1, d__2, d__3; - - /* Builtin functions */ - double z_abs(doublecomplex *), sqrt(doublereal); - - /* Local variables */ - static integer i__, j; - static doublereal sum, absa, scale; - extern logical lsame_(char *, char *); - static doublereal value; - extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *, - doublereal *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - ZLANHE returns the value of the one norm, or the Frobenius norm, or - the infinity norm, or the element of largest absolute value of a - complex hermitian matrix A. - - Description - =========== - - ZLANHE returns the value - - ZLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm' - ( - ( norm1(A), NORM = '1', 'O' or 'o' - ( - ( normI(A), NORM = 'I' or 'i' - ( - ( normF(A), NORM = 'F', 'f', 'E' or 'e' - - where norm1 denotes the one norm of a matrix (maximum column sum), - normI denotes the infinity norm of a matrix (maximum row sum) and - normF denotes the Frobenius norm of a matrix (square root of sum of - squares). Note that max(abs(A(i,j))) is not a matrix norm. - - Arguments - ========= - - NORM (input) CHARACTER*1 - Specifies the value to be returned in ZLANHE as described - above. - - UPLO (input) CHARACTER*1 - Specifies whether the upper or lower triangular part of the - hermitian matrix A is to be referenced. - = 'U': Upper triangular part of A is referenced - = 'L': Lower triangular part of A is referenced - - N (input) INTEGER - The order of the matrix A. N >= 0. When N = 0, ZLANHE is - set to zero. - - A (input) COMPLEX*16 array, dimension (LDA,N) - The hermitian matrix A. If UPLO = 'U', the leading n by n - upper triangular part of A contains the upper triangular part - of the matrix A, and the strictly lower triangular part of A - is not referenced. If UPLO = 'L', the leading n by n lower - triangular part of A contains the lower triangular part of - the matrix A, and the strictly upper triangular part of A is - not referenced. Note that the imaginary parts of the diagonal - elements need not be set and are assumed to be zero. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(N,1). - - WORK (workspace) DOUBLE PRECISION array, dimension (LWORK), - where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, - WORK is not referenced. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --work; - - /* Function Body */ - if (*n == 0) { - value = 0.; - } else if (lsame_(norm, "M")) { - -/* Find max(abs(A(i,j))). */ - - value = 0.; - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]); - value = max(d__1,d__2); -/* L10: */ - } -/* Computing MAX */ - i__2 = j + j * a_dim1; - d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1)); - value = max(d__2,d__3); -/* L20: */ - } - } else { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MAX */ - i__2 = j + j * a_dim1; - d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1)); - value = max(d__2,d__3); - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]); - value = max(d__1,d__2); -/* L30: */ - } -/* L40: */ - } - } - } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') { - -/* Find normI(A) ( = norm1(A), since A is hermitian). */ - - value = 0.; - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = 0.; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - absa = z_abs(&a[i__ + j * a_dim1]); - sum += absa; - work[i__] += absa; -/* L50: */ - } - i__2 = j + j * a_dim1; - work[j] = sum + (d__1 = a[i__2].r, abs(d__1)); -/* L60: */ - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = work[i__]; - value = max(d__1,d__2); -/* L70: */ - } - } else { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - work[i__] = 0.; -/* L80: */ - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j + j * a_dim1; - sum = work[j] + (d__1 = a[i__2].r, abs(d__1)); - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - absa = z_abs(&a[i__ + j * a_dim1]); - sum += absa; - work[i__] += absa; -/* L90: */ - } - value = max(value,sum); -/* L100: */ - } - } - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - -/* Find normF(A). */ - - scale = 0.; - sum = 1.; - if (lsame_(uplo, "U")) { - i__1 = *n; - for (j = 2; j <= i__1; ++j) { - i__2 = j - 1; - zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum); -/* L110: */ - } - } else { - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - i__2 = *n - j; - zlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum); -/* L120: */ - } - } - sum *= 2; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + i__ * a_dim1; - if (a[i__2].r != 0.) { - i__2 = i__ + i__ * a_dim1; - absa = (d__1 = a[i__2].r, abs(d__1)); - if (scale < absa) { -/* Computing 2nd power */ - d__1 = scale / absa; - sum = sum * (d__1 * d__1) + 1.; - scale = absa; - } else { -/* Computing 2nd power */ - d__1 = absa / scale; - sum += d__1 * d__1; - } - } -/* L130: */ - } - value = scale * sqrt(sum); - } - - ret_val = value; - return ret_val; - -/* End of ZLANHE */ - -} /* zlanhe_ */ - -doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda, - doublereal *work) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - doublereal ret_val, d__1, d__2; - - /* Builtin functions */ - double z_abs(doublecomplex *), sqrt(doublereal); - - /* Local variables */ - static integer i__, j; - static doublereal sum, scale; - extern logical lsame_(char *, char *); - static doublereal value; - extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *, - doublereal *, doublereal *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - ZLANHS returns the value of the one norm, or the Frobenius norm, or - the infinity norm, or the element of largest absolute value of a - Hessenberg matrix A. - - Description - =========== - - ZLANHS returns the value - - ZLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm' - ( - ( norm1(A), NORM = '1', 'O' or 'o' - ( - ( normI(A), NORM = 'I' or 'i' - ( - ( normF(A), NORM = 'F', 'f', 'E' or 'e' - - where norm1 denotes the one norm of a matrix (maximum column sum), - normI denotes the infinity norm of a matrix (maximum row sum) and - normF denotes the Frobenius norm of a matrix (square root of sum of - squares). Note that max(abs(A(i,j))) is not a matrix norm. - - Arguments - ========= - - NORM (input) CHARACTER*1 - Specifies the value to be returned in ZLANHS as described - above. - - N (input) INTEGER - The order of the matrix A. N >= 0. When N = 0, ZLANHS is - set to zero. - - A (input) COMPLEX*16 array, dimension (LDA,N) - The n by n upper Hessenberg matrix A; the part of A below the - first sub-diagonal is not referenced. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(N,1). - - WORK (workspace) DOUBLE PRECISION array, dimension (LWORK), - where LWORK >= N when NORM = 'I'; otherwise, WORK is not - referenced. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --work; - - /* Function Body */ - if (*n == 0) { - value = 0.; - } else if (lsame_(norm, "M")) { - -/* Find max(abs(A(i,j))). */ - - value = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = *n, i__4 = j + 1; - i__2 = min(i__3,i__4); - for (i__ = 1; i__ <= i__2; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]); - value = max(d__1,d__2); -/* L10: */ - } -/* L20: */ - } - } else if (lsame_(norm, "O") || *(unsigned char *) - norm == '1') { - -/* Find norm1(A). */ - - value = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = 0.; -/* Computing MIN */ - i__3 = *n, i__4 = j + 1; - i__2 = min(i__3,i__4); - for (i__ = 1; i__ <= i__2; ++i__) { - sum += z_abs(&a[i__ + j * a_dim1]); -/* L30: */ - } - value = max(value,sum); -/* L40: */ - } - } else if (lsame_(norm, "I")) { - -/* Find normI(A). */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - work[i__] = 0.; -/* L50: */ - } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = *n, i__4 = j + 1; - i__2 = min(i__3,i__4); - for (i__ = 1; i__ <= i__2; ++i__) { - work[i__] += z_abs(&a[i__ + j * a_dim1]); -/* L60: */ - } -/* L70: */ - } - value = 0.; - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { -/* Computing MAX */ - d__1 = value, d__2 = work[i__]; - value = max(d__1,d__2); -/* L80: */ - } - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - -/* Find normF(A). */ - - scale = 0.; - sum = 1.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = *n, i__4 = j + 1; - i__2 = min(i__3,i__4); - zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum); -/* L90: */ - } - value = scale * sqrt(sum); - } - - ret_val = value; - return ret_val; - -/* End of ZLANHS */ - -} /* zlanhs_ */ - -/* Subroutine */ int zlarcm_(integer *m, integer *n, doublereal *a, integer * - lda, doublecomplex *b, integer *ldb, doublecomplex *c__, integer *ldc, - doublereal *rwork) -{ - /* System generated locals */ - integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, - i__3, i__4, i__5; - doublereal d__1; - doublecomplex z__1; - - /* Builtin functions */ - double d_imag(doublecomplex *); - - /* Local variables */ - static integer i__, j, l; - extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, - integer *, doublereal *, doublereal *, integer *, doublereal *, - integer *, doublereal *, doublereal *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZLARCM performs a very simple matrix-matrix multiplication: - C := A * B, - where A is M by M and real; B is M by N and complex; - C is M by N and complex. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix A and of the matrix C. - M >= 0. - - N (input) INTEGER - The number of columns and rows of the matrix B and - the number of columns of the matrix C. - N >= 0. - - A (input) DOUBLE PRECISION array, dimension (LDA, M) - A contains the M by M matrix A. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >=max(1,M). - - B (input) DOUBLE PRECISION array, dimension (LDB, N) - B contains the M by N matrix B. - - LDB (input) INTEGER - The leading dimension of the array B. LDB >=max(1,M). - - C (input) COMPLEX*16 array, dimension (LDC, N) - C contains the M by N matrix C. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >=max(1,M). - - RWORK (workspace) DOUBLE PRECISION array, dimension (2*M*N) - - ===================================================================== - - - Quick return if possible. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1 * 1; - b -= b_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --rwork; - - /* Function Body */ - if (*m == 0 || *n == 0) { - return 0; - } - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * b_dim1; - rwork[(j - 1) * *m + i__] = b[i__3].r; -/* L10: */ - } -/* L20: */ - } - - l = *m * *n + 1; - dgemm_("N", "N", m, n, m, &c_b1015, &a[a_offset], lda, &rwork[1], m, & - c_b324, &rwork[l], m); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = l + (j - 1) * *m + i__ - 1; - c__[i__3].r = rwork[i__4], c__[i__3].i = 0.; -/* L30: */ - } -/* L40: */ - } - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - rwork[(j - 1) * *m + i__] = d_imag(&b[i__ + j * b_dim1]); -/* L50: */ - } -/* L60: */ - } - dgemm_("N", "N", m, n, m, &c_b1015, &a[a_offset], lda, &rwork[1], m, & - c_b324, &rwork[l], m); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - d__1 = c__[i__4].r; - i__5 = l + (j - 1) * *m + i__ - 1; - z__1.r = d__1, z__1.i = rwork[i__5]; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L70: */ - } -/* L80: */ - } - - return 0; - -/* End of ZLARCM */ - -} /* zlarcm_ */ - -/* Subroutine */ int zlarf_(char *side, integer *m, integer *n, doublecomplex - *v, integer *incv, doublecomplex *tau, doublecomplex *c__, integer * - ldc, doublecomplex *work) -{ - /* System generated locals */ - integer c_dim1, c_offset; - doublecomplex z__1; - - /* Local variables */ - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *), zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLARF applies a complex elementary reflector H to a complex M-by-N - matrix C, from either the left or the right. H is represented in the - form - - H = I - tau * v * v' - - where tau is a complex scalar and v is a complex vector. - - If tau = 0, then H is taken to be the unit matrix. - - To apply H' (the conjugate transpose of H), supply conjg(tau) instead - tau. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': form H * C - = 'R': form C * H - - M (input) INTEGER - The number of rows of the matrix C. - - N (input) INTEGER - The number of columns of the matrix C. - - V (input) COMPLEX*16 array, dimension - (1 + (M-1)*abs(INCV)) if SIDE = 'L' - or (1 + (N-1)*abs(INCV)) if SIDE = 'R' - The vector v in the representation of H. V is not used if - TAU = 0. - - INCV (input) INTEGER - The increment between elements of v. INCV <> 0. - - TAU (input) COMPLEX*16 - The value tau in the representation of H. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by the matrix H * C if SIDE = 'L', - or C * H if SIDE = 'R'. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) COMPLEX*16 array, dimension - (N) if SIDE = 'L' - or (M) if SIDE = 'R' - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --v; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - if (lsame_(side, "L")) { - -/* Form H * C */ - - if (tau->r != 0. || tau->i != 0.) { - -/* w := C' * v */ - - zgemv_("Conjugate transpose", m, n, &c_b60, &c__[c_offset], ldc, & - v[1], incv, &c_b59, &work[1], &c__1); - -/* C := C - v * w' */ - - z__1.r = -tau->r, z__1.i = -tau->i; - zgerc_(m, n, &z__1, &v[1], incv, &work[1], &c__1, &c__[c_offset], - ldc); - } - } else { - -/* Form C * H */ - - if (tau->r != 0. || tau->i != 0.) { - -/* w := C * v */ - - zgemv_("No transpose", m, n, &c_b60, &c__[c_offset], ldc, &v[1], - incv, &c_b59, &work[1], &c__1); - -/* C := C - w * v' */ - - z__1.r = -tau->r, z__1.i = -tau->i; - zgerc_(m, n, &z__1, &work[1], &c__1, &v[1], incv, &c__[c_offset], - ldc); - } - } - return 0; - -/* End of ZLARF */ - -} /* zlarf_ */ - -/* Subroutine */ int zlarfb_(char *side, char *trans, char *direct, char * - storev, integer *m, integer *n, integer *k, doublecomplex *v, integer - *ldv, doublecomplex *t, integer *ldt, doublecomplex *c__, integer * - ldc, doublecomplex *work, integer *ldwork) -{ - /* System generated locals */ - integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1, - work_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1, z__2; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *, - integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *), zcopy_(integer *, doublecomplex *, - integer *, doublecomplex *, integer *), ztrmm_(char *, char *, - char *, char *, integer *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *), zlacgv_(integer *, doublecomplex *, - integer *); - static char transt[1]; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLARFB applies a complex block reflector H or its transpose H' to a - complex M-by-N matrix C, from either the left or the right. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply H or H' from the Left - = 'R': apply H or H' from the Right - - TRANS (input) CHARACTER*1 - = 'N': apply H (No transpose) - = 'C': apply H' (Conjugate transpose) - - DIRECT (input) CHARACTER*1 - Indicates how H is formed from a product of elementary - reflectors - = 'F': H = H(1) H(2) . . . H(k) (Forward) - = 'B': H = H(k) . . . H(2) H(1) (Backward) - - STOREV (input) CHARACTER*1 - Indicates how the vectors which define the elementary - reflectors are stored: - = 'C': Columnwise - = 'R': Rowwise - - M (input) INTEGER - The number of rows of the matrix C. - - N (input) INTEGER - The number of columns of the matrix C. - - K (input) INTEGER - The order of the matrix T (= the number of elementary - reflectors whose product defines the block reflector). - - V (input) COMPLEX*16 array, dimension - (LDV,K) if STOREV = 'C' - (LDV,M) if STOREV = 'R' and SIDE = 'L' - (LDV,N) if STOREV = 'R' and SIDE = 'R' - The matrix V. See further details. - - LDV (input) INTEGER - The leading dimension of the array V. - If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); - if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); - if STOREV = 'R', LDV >= K. - - T (input) COMPLEX*16 array, dimension (LDT,K) - The triangular K-by-K matrix T in the representation of the - block reflector. - - LDT (input) INTEGER - The leading dimension of the array T. LDT >= K. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by H*C or H'*C or C*H or C*H'. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) COMPLEX*16 array, dimension (LDWORK,K) - - LDWORK (input) INTEGER - The leading dimension of the array WORK. - If SIDE = 'L', LDWORK >= max(1,N); - if SIDE = 'R', LDWORK >= max(1,M). - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - v_dim1 = *ldv; - v_offset = 1 + v_dim1 * 1; - v -= v_offset; - t_dim1 = *ldt; - t_offset = 1 + t_dim1 * 1; - t -= t_offset; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - work_dim1 = *ldwork; - work_offset = 1 + work_dim1 * 1; - work -= work_offset; - - /* Function Body */ - if (*m <= 0 || *n <= 0) { - return 0; - } - - if (lsame_(trans, "N")) { - *(unsigned char *)transt = 'C'; - } else { - *(unsigned char *)transt = 'N'; - } - - if (lsame_(storev, "C")) { - - if (lsame_(direct, "F")) { - -/* - Let V = ( V1 ) (first K rows) - ( V2 ) - where V1 is unit lower triangular. -*/ - - if (lsame_(side, "L")) { - -/* - Form H * C or H' * C where C = ( C1 ) - ( C2 ) - - W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) - - W := C1' -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - zcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], - &c__1); - zlacgv_(n, &work[j * work_dim1 + 1], &c__1); -/* L10: */ - } - -/* W := W * V1 */ - - ztrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b60, - &v[v_offset], ldv, &work[work_offset], ldwork); - if (*m > *k) { - -/* W := W + C2'*V2 */ - - i__1 = *m - *k; - zgemm_("Conjugate transpose", "No transpose", n, k, &i__1, - &c_b60, &c__[*k + 1 + c_dim1], ldc, &v[*k + 1 + - v_dim1], ldv, &c_b60, &work[work_offset], ldwork); - } - -/* W := W * T' or W * T */ - - ztrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b60, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - V * W' */ - - if (*m > *k) { - -/* C2 := C2 - V2 * W' */ - - i__1 = *m - *k; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "Conjugate transpose", &i__1, n, k, - &z__1, &v[*k + 1 + v_dim1], ldv, &work[ - work_offset], ldwork, &c_b60, &c__[*k + 1 + - c_dim1], ldc); - } - -/* W := W * V1' */ - - ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", n, k, - &c_b60, &v[v_offset], ldv, &work[work_offset], ldwork); - -/* C1 := C1 - W' */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = j + i__ * c_dim1; - i__4 = j + i__ * c_dim1; - d_cnjg(&z__2, &work[i__ + j * work_dim1]); - z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i - - z__2.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L20: */ - } -/* L30: */ - } - - } else if (lsame_(side, "R")) { - -/* - Form C * H or C * H' where C = ( C1 C2 ) - - W := C * V = (C1*V1 + C2*V2) (stored in WORK) - - W := C1 -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - zcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * - work_dim1 + 1], &c__1); -/* L40: */ - } - -/* W := W * V1 */ - - ztrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b60, - &v[v_offset], ldv, &work[work_offset], ldwork); - if (*n > *k) { - -/* W := W + C2 * V2 */ - - i__1 = *n - *k; - zgemm_("No transpose", "No transpose", m, k, &i__1, & - c_b60, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k + - 1 + v_dim1], ldv, &c_b60, &work[work_offset], - ldwork); - } - -/* W := W * T or W * T' */ - - ztrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b60, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - W * V' */ - - if (*n > *k) { - -/* C2 := C2 - W * V2' */ - - i__1 = *n - *k; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "Conjugate transpose", m, &i__1, k, - &z__1, &work[work_offset], ldwork, &v[*k + 1 + - v_dim1], ldv, &c_b60, &c__[(*k + 1) * c_dim1 + 1], - ldc); - } - -/* W := W * V1' */ - - ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", m, k, - &c_b60, &v[v_offset], ldv, &work[work_offset], ldwork); - -/* C1 := C1 - W */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * work_dim1; - z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[ - i__4].i - work[i__5].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L50: */ - } -/* L60: */ - } - } - - } else { - -/* - Let V = ( V1 ) - ( V2 ) (last K rows) - where V2 is unit upper triangular. -*/ - - if (lsame_(side, "L")) { - -/* - Form H * C or H' * C where C = ( C1 ) - ( C2 ) - - W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) - - W := C2' -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - zcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j * - work_dim1 + 1], &c__1); - zlacgv_(n, &work[j * work_dim1 + 1], &c__1); -/* L70: */ - } - -/* W := W * V2 */ - - ztrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b60, - &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset], - ldwork); - if (*m > *k) { - -/* W := W + C1'*V1 */ - - i__1 = *m - *k; - zgemm_("Conjugate transpose", "No transpose", n, k, &i__1, - &c_b60, &c__[c_offset], ldc, &v[v_offset], ldv, & - c_b60, &work[work_offset], ldwork); - } - -/* W := W * T' or W * T */ - - ztrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b60, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - V * W' */ - - if (*m > *k) { - -/* C1 := C1 - V1 * W' */ - - i__1 = *m - *k; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "Conjugate transpose", &i__1, n, k, - &z__1, &v[v_offset], ldv, &work[work_offset], - ldwork, &c_b60, &c__[c_offset], ldc); - } - -/* W := W * V2' */ - - ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", n, k, - &c_b60, &v[*m - *k + 1 + v_dim1], ldv, &work[ - work_offset], ldwork); - -/* C2 := C2 - W' */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = *m - *k + j + i__ * c_dim1; - i__4 = *m - *k + j + i__ * c_dim1; - d_cnjg(&z__2, &work[i__ + j * work_dim1]); - z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i - - z__2.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L80: */ - } -/* L90: */ - } - - } else if (lsame_(side, "R")) { - -/* - Form C * H or C * H' where C = ( C1 C2 ) - - W := C * V = (C1*V1 + C2*V2) (stored in WORK) - - W := C2 -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - zcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[ - j * work_dim1 + 1], &c__1); -/* L100: */ - } - -/* W := W * V2 */ - - ztrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b60, - &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset], - ldwork); - if (*n > *k) { - -/* W := W + C1 * V1 */ - - i__1 = *n - *k; - zgemm_("No transpose", "No transpose", m, k, &i__1, & - c_b60, &c__[c_offset], ldc, &v[v_offset], ldv, & - c_b60, &work[work_offset], ldwork); - } - -/* W := W * T or W * T' */ - - ztrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b60, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - W * V' */ - - if (*n > *k) { - -/* C1 := C1 - W * V1' */ - - i__1 = *n - *k; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "Conjugate transpose", m, &i__1, k, - &z__1, &work[work_offset], ldwork, &v[v_offset], - ldv, &c_b60, &c__[c_offset], ldc); - } - -/* W := W * V2' */ - - ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", m, k, - &c_b60, &v[*n - *k + 1 + v_dim1], ldv, &work[ - work_offset], ldwork); - -/* C2 := C2 - W */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + (*n - *k + j) * c_dim1; - i__4 = i__ + (*n - *k + j) * c_dim1; - i__5 = i__ + j * work_dim1; - z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[ - i__4].i - work[i__5].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L110: */ - } -/* L120: */ - } - } - } - - } else if (lsame_(storev, "R")) { - - if (lsame_(direct, "F")) { - -/* - Let V = ( V1 V2 ) (V1: first K columns) - where V1 is unit upper triangular. -*/ - - if (lsame_(side, "L")) { - -/* - Form H * C or H' * C where C = ( C1 ) - ( C2 ) - - W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) - - W := C1' -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - zcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], - &c__1); - zlacgv_(n, &work[j * work_dim1 + 1], &c__1); -/* L130: */ - } - -/* W := W * V1' */ - - ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", n, k, - &c_b60, &v[v_offset], ldv, &work[work_offset], ldwork); - if (*m > *k) { - -/* W := W + C2'*V2' */ - - i__1 = *m - *k; - zgemm_("Conjugate transpose", "Conjugate transpose", n, k, - &i__1, &c_b60, &c__[*k + 1 + c_dim1], ldc, &v[(* - k + 1) * v_dim1 + 1], ldv, &c_b60, &work[ - work_offset], ldwork); - } - -/* W := W * T' or W * T */ - - ztrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b60, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - V' * W' */ - - if (*m > *k) { - -/* C2 := C2 - V2' * W' */ - - i__1 = *m - *k; - z__1.r = -1., z__1.i = -0.; - zgemm_("Conjugate transpose", "Conjugate transpose", & - i__1, n, k, &z__1, &v[(*k + 1) * v_dim1 + 1], ldv, - &work[work_offset], ldwork, &c_b60, &c__[*k + 1 - + c_dim1], ldc); - } - -/* W := W * V1 */ - - ztrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b60, - &v[v_offset], ldv, &work[work_offset], ldwork); - -/* C1 := C1 - W' */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = j + i__ * c_dim1; - i__4 = j + i__ * c_dim1; - d_cnjg(&z__2, &work[i__ + j * work_dim1]); - z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i - - z__2.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L140: */ - } -/* L150: */ - } - - } else if (lsame_(side, "R")) { - -/* - Form C * H or C * H' where C = ( C1 C2 ) - - W := C * V' = (C1*V1' + C2*V2') (stored in WORK) - - W := C1 -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - zcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * - work_dim1 + 1], &c__1); -/* L160: */ - } - -/* W := W * V1' */ - - ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", m, k, - &c_b60, &v[v_offset], ldv, &work[work_offset], ldwork); - if (*n > *k) { - -/* W := W + C2 * V2' */ - - i__1 = *n - *k; - zgemm_("No transpose", "Conjugate transpose", m, k, &i__1, - &c_b60, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k - + 1) * v_dim1 + 1], ldv, &c_b60, &work[ - work_offset], ldwork); - } - -/* W := W * T or W * T' */ - - ztrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b60, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - W * V */ - - if (*n > *k) { - -/* C2 := C2 - W * V2 */ - - i__1 = *n - *k; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "No transpose", m, &i__1, k, &z__1, - &work[work_offset], ldwork, &v[(*k + 1) * v_dim1 - + 1], ldv, &c_b60, &c__[(*k + 1) * c_dim1 + 1], - ldc); - } - -/* W := W * V1 */ - - ztrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b60, - &v[v_offset], ldv, &work[work_offset], ldwork); - -/* C1 := C1 - W */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * c_dim1; - i__4 = i__ + j * c_dim1; - i__5 = i__ + j * work_dim1; - z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[ - i__4].i - work[i__5].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L170: */ - } -/* L180: */ - } - - } - - } else { - -/* - Let V = ( V1 V2 ) (V2: last K columns) - where V2 is unit lower triangular. -*/ - - if (lsame_(side, "L")) { - -/* - Form H * C or H' * C where C = ( C1 ) - ( C2 ) - - W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) - - W := C2' -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - zcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j * - work_dim1 + 1], &c__1); - zlacgv_(n, &work[j * work_dim1 + 1], &c__1); -/* L190: */ - } - -/* W := W * V2' */ - - ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", n, k, - &c_b60, &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[ - work_offset], ldwork); - if (*m > *k) { - -/* W := W + C1'*V1' */ - - i__1 = *m - *k; - zgemm_("Conjugate transpose", "Conjugate transpose", n, k, - &i__1, &c_b60, &c__[c_offset], ldc, &v[v_offset], - ldv, &c_b60, &work[work_offset], ldwork); - } - -/* W := W * T' or W * T */ - - ztrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b60, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - V' * W' */ - - if (*m > *k) { - -/* C1 := C1 - V1' * W' */ - - i__1 = *m - *k; - z__1.r = -1., z__1.i = -0.; - zgemm_("Conjugate transpose", "Conjugate transpose", & - i__1, n, k, &z__1, &v[v_offset], ldv, &work[ - work_offset], ldwork, &c_b60, &c__[c_offset], ldc); - } - -/* W := W * V2 */ - - ztrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b60, - &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[ - work_offset], ldwork); - -/* C2 := C2 - W' */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = *m - *k + j + i__ * c_dim1; - i__4 = *m - *k + j + i__ * c_dim1; - d_cnjg(&z__2, &work[i__ + j * work_dim1]); - z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i - - z__2.i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L200: */ - } -/* L210: */ - } - - } else if (lsame_(side, "R")) { - -/* - Form C * H or C * H' where C = ( C1 C2 ) - - W := C * V' = (C1*V1' + C2*V2') (stored in WORK) - - W := C2 -*/ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - zcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[ - j * work_dim1 + 1], &c__1); -/* L220: */ - } - -/* W := W * V2' */ - - ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", m, k, - &c_b60, &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[ - work_offset], ldwork); - if (*n > *k) { - -/* W := W + C1 * V1' */ - - i__1 = *n - *k; - zgemm_("No transpose", "Conjugate transpose", m, k, &i__1, - &c_b60, &c__[c_offset], ldc, &v[v_offset], ldv, & - c_b60, &work[work_offset], ldwork); - } - -/* W := W * T or W * T' */ - - ztrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b60, &t[ - t_offset], ldt, &work[work_offset], ldwork); - -/* C := C - W * V */ - - if (*n > *k) { - -/* C1 := C1 - W * V1 */ - - i__1 = *n - *k; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "No transpose", m, &i__1, k, &z__1, - &work[work_offset], ldwork, &v[v_offset], ldv, & - c_b60, &c__[c_offset], ldc); - } - -/* W := W * V2 */ - - ztrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b60, - &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[ - work_offset], ldwork); - -/* C1 := C1 - W */ - - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + (*n - *k + j) * c_dim1; - i__4 = i__ + (*n - *k + j) * c_dim1; - i__5 = i__ + j * work_dim1; - z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[ - i__4].i - work[i__5].i; - c__[i__3].r = z__1.r, c__[i__3].i = z__1.i; -/* L230: */ - } -/* L240: */ - } - - } - - } - } - - return 0; - -/* End of ZLARFB */ - -} /* zlarfb_ */ - -/* Subroutine */ int zlarfg_(integer *n, doublecomplex *alpha, doublecomplex * - x, integer *incx, doublecomplex *tau) -{ - /* System generated locals */ - integer i__1; - doublereal d__1, d__2; - doublecomplex z__1, z__2; - - /* Builtin functions */ - double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static integer j, knt; - static doublereal beta, alphi, alphr; - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *); - static doublereal xnorm; - extern doublereal dlapy3_(doublereal *, doublereal *, doublereal *), - dznrm2_(integer *, doublecomplex *, integer *), dlamch_(char *); - static doublereal safmin; - extern /* Subroutine */ int zdscal_(integer *, doublereal *, - doublecomplex *, integer *); - static doublereal rsafmn; - extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *, - doublecomplex *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLARFG generates a complex elementary reflector H of order n, such - that - - H' * ( alpha ) = ( beta ), H' * H = I. - ( x ) ( 0 ) - - where alpha and beta are scalars, with beta real, and x is an - (n-1)-element complex vector. H is represented in the form - - H = I - tau * ( 1 ) * ( 1 v' ) , - ( v ) - - where tau is a complex scalar and v is a complex (n-1)-element - vector. Note that H is not hermitian. - - If the elements of x are all zero and alpha is real, then tau = 0 - and H is taken to be the unit matrix. - - Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 . - - Arguments - ========= - - N (input) INTEGER - The order of the elementary reflector. - - ALPHA (input/output) COMPLEX*16 - On entry, the value alpha. - On exit, it is overwritten with the value beta. - - X (input/output) COMPLEX*16 array, dimension - (1+(N-2)*abs(INCX)) - On entry, the vector x. - On exit, it is overwritten with the vector v. - - INCX (input) INTEGER - The increment between elements of X. INCX > 0. - - TAU (output) COMPLEX*16 - The value tau. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --x; - - /* Function Body */ - if (*n <= 0) { - tau->r = 0., tau->i = 0.; - return 0; - } - - i__1 = *n - 1; - xnorm = dznrm2_(&i__1, &x[1], incx); - alphr = alpha->r; - alphi = d_imag(alpha); - - if ((xnorm == 0. && alphi == 0.)) { - -/* H = I */ - - tau->r = 0., tau->i = 0.; - } else { - -/* general case */ - - d__1 = dlapy3_(&alphr, &alphi, &xnorm); - beta = -d_sign(&d__1, &alphr); - safmin = SAFEMINIMUM / EPSILON; - rsafmn = 1. / safmin; - - if (abs(beta) < safmin) { - -/* XNORM, BETA may be inaccurate; scale X and recompute them */ - - knt = 0; -L10: - ++knt; - i__1 = *n - 1; - zdscal_(&i__1, &rsafmn, &x[1], incx); - beta *= rsafmn; - alphi *= rsafmn; - alphr *= rsafmn; - if (abs(beta) < safmin) { - goto L10; - } - -/* New BETA is at most 1, at least SAFMIN */ - - i__1 = *n - 1; - xnorm = dznrm2_(&i__1, &x[1], incx); - z__1.r = alphr, z__1.i = alphi; - alpha->r = z__1.r, alpha->i = z__1.i; - d__1 = dlapy3_(&alphr, &alphi, &xnorm); - beta = -d_sign(&d__1, &alphr); - d__1 = (beta - alphr) / beta; - d__2 = -alphi / beta; - z__1.r = d__1, z__1.i = d__2; - tau->r = z__1.r, tau->i = z__1.i; - z__2.r = alpha->r - beta, z__2.i = alpha->i; - zladiv_(&z__1, &c_b60, &z__2); - alpha->r = z__1.r, alpha->i = z__1.i; - i__1 = *n - 1; - zscal_(&i__1, alpha, &x[1], incx); - -/* If ALPHA is subnormal, it may lose relative accuracy */ - - alpha->r = beta, alpha->i = 0.; - i__1 = knt; - for (j = 1; j <= i__1; ++j) { - z__1.r = safmin * alpha->r, z__1.i = safmin * alpha->i; - alpha->r = z__1.r, alpha->i = z__1.i; -/* L20: */ - } - } else { - d__1 = (beta - alphr) / beta; - d__2 = -alphi / beta; - z__1.r = d__1, z__1.i = d__2; - tau->r = z__1.r, tau->i = z__1.i; - z__2.r = alpha->r - beta, z__2.i = alpha->i; - zladiv_(&z__1, &c_b60, &z__2); - alpha->r = z__1.r, alpha->i = z__1.i; - i__1 = *n - 1; - zscal_(&i__1, alpha, &x[1], incx); - alpha->r = beta, alpha->i = 0.; - } - } - - return 0; - -/* End of ZLARFG */ - -} /* zlarfg_ */ - -/* Subroutine */ int zlarft_(char *direct, char *storev, integer *n, integer * - k, doublecomplex *v, integer *ldv, doublecomplex *tau, doublecomplex * - t, integer *ldt) -{ - /* System generated locals */ - integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4; - doublecomplex z__1; - - /* Local variables */ - static integer i__, j; - static doublecomplex vii; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *), - ztrmv_(char *, char *, char *, integer *, doublecomplex *, - integer *, doublecomplex *, integer *), - zlacgv_(integer *, doublecomplex *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLARFT forms the triangular factor T of a complex block reflector H - of order n, which is defined as a product of k elementary reflectors. - - If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; - - If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. - - If STOREV = 'C', the vector which defines the elementary reflector - H(i) is stored in the i-th column of the array V, and - - H = I - V * T * V' - - If STOREV = 'R', the vector which defines the elementary reflector - H(i) is stored in the i-th row of the array V, and - - H = I - V' * T * V - - Arguments - ========= - - DIRECT (input) CHARACTER*1 - Specifies the order in which the elementary reflectors are - multiplied to form the block reflector: - = 'F': H = H(1) H(2) . . . H(k) (Forward) - = 'B': H = H(k) . . . H(2) H(1) (Backward) - - STOREV (input) CHARACTER*1 - Specifies how the vectors which define the elementary - reflectors are stored (see also Further Details): - = 'C': columnwise - = 'R': rowwise - - N (input) INTEGER - The order of the block reflector H. N >= 0. - - K (input) INTEGER - The order of the triangular factor T (= the number of - elementary reflectors). K >= 1. - - V (input/output) COMPLEX*16 array, dimension - (LDV,K) if STOREV = 'C' - (LDV,N) if STOREV = 'R' - The matrix V. See further details. - - LDV (input) INTEGER - The leading dimension of the array V. - If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i). - - T (output) COMPLEX*16 array, dimension (LDT,K) - The k by k triangular factor T of the block reflector. - If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is - lower triangular. The rest of the array is not used. - - LDT (input) INTEGER - The leading dimension of the array T. LDT >= K. - - Further Details - =============== - - The shape of the matrix V and the storage of the vectors which define - the H(i) is best illustrated by the following example with n = 5 and - k = 3. The elements equal to 1 are not stored; the corresponding - array elements are modified but restored on exit. The rest of the - array is not used. - - DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': - - V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) - ( v1 1 ) ( 1 v2 v2 v2 ) - ( v1 v2 1 ) ( 1 v3 v3 ) - ( v1 v2 v3 ) - ( v1 v2 v3 ) - - DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': - - V = ( v1 v2 v3 ) V = ( v1 v1 1 ) - ( v1 v2 v3 ) ( v2 v2 v2 1 ) - ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) - ( 1 v3 ) - ( 1 ) - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - v_dim1 = *ldv; - v_offset = 1 + v_dim1 * 1; - v -= v_offset; - --tau; - t_dim1 = *ldt; - t_offset = 1 + t_dim1 * 1; - t -= t_offset; - - /* Function Body */ - if (*n == 0) { - return 0; - } - - if (lsame_(direct, "F")) { - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - if ((tau[i__2].r == 0. && tau[i__2].i == 0.)) { - -/* H(i) = I */ - - i__2 = i__; - for (j = 1; j <= i__2; ++j) { - i__3 = j + i__ * t_dim1; - t[i__3].r = 0., t[i__3].i = 0.; -/* L10: */ - } - } else { - -/* general case */ - - i__2 = i__ + i__ * v_dim1; - vii.r = v[i__2].r, vii.i = v[i__2].i; - i__2 = i__ + i__ * v_dim1; - v[i__2].r = 1., v[i__2].i = 0.; - if (lsame_(storev, "C")) { - -/* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */ - - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - i__4 = i__; - z__1.r = -tau[i__4].r, z__1.i = -tau[i__4].i; - zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &v[i__ - + v_dim1], ldv, &v[i__ + i__ * v_dim1], &c__1, & - c_b59, &t[i__ * t_dim1 + 1], &c__1); - } else { - -/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */ - - if (i__ < *n) { - i__2 = *n - i__; - zlacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv); - } - i__2 = i__ - 1; - i__3 = *n - i__ + 1; - i__4 = i__; - z__1.r = -tau[i__4].r, z__1.i = -tau[i__4].i; - zgemv_("No transpose", &i__2, &i__3, &z__1, &v[i__ * - v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, & - c_b59, &t[i__ * t_dim1 + 1], &c__1); - if (i__ < *n) { - i__2 = *n - i__; - zlacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv); - } - } - i__2 = i__ + i__ * v_dim1; - v[i__2].r = vii.r, v[i__2].i = vii.i; - -/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */ - - i__2 = i__ - 1; - ztrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[ - t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1); - i__2 = i__ + i__ * t_dim1; - i__3 = i__; - t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i; - } -/* L20: */ - } - } else { - for (i__ = *k; i__ >= 1; --i__) { - i__1 = i__; - if ((tau[i__1].r == 0. && tau[i__1].i == 0.)) { - -/* H(i) = I */ - - i__1 = *k; - for (j = i__; j <= i__1; ++j) { - i__2 = j + i__ * t_dim1; - t[i__2].r = 0., t[i__2].i = 0.; -/* L30: */ - } - } else { - -/* general case */ - - if (i__ < *k) { - if (lsame_(storev, "C")) { - i__1 = *n - *k + i__ + i__ * v_dim1; - vii.r = v[i__1].r, vii.i = v[i__1].i; - i__1 = *n - *k + i__ + i__ * v_dim1; - v[i__1].r = 1., v[i__1].i = 0.; - -/* - T(i+1:k,i) := - - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i) -*/ - - i__1 = *n - *k + i__; - i__2 = *k - i__; - i__3 = i__; - z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i; - zgemv_("Conjugate transpose", &i__1, &i__2, &z__1, &v[ - (i__ + 1) * v_dim1 + 1], ldv, &v[i__ * v_dim1 - + 1], &c__1, &c_b59, &t[i__ + 1 + i__ * - t_dim1], &c__1); - i__1 = *n - *k + i__ + i__ * v_dim1; - v[i__1].r = vii.r, v[i__1].i = vii.i; - } else { - i__1 = i__ + (*n - *k + i__) * v_dim1; - vii.r = v[i__1].r, vii.i = v[i__1].i; - i__1 = i__ + (*n - *k + i__) * v_dim1; - v[i__1].r = 1., v[i__1].i = 0.; - -/* - T(i+1:k,i) := - - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)' -*/ - - i__1 = *n - *k + i__ - 1; - zlacgv_(&i__1, &v[i__ + v_dim1], ldv); - i__1 = *k - i__; - i__2 = *n - *k + i__; - i__3 = i__; - z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i; - zgemv_("No transpose", &i__1, &i__2, &z__1, &v[i__ + - 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, & - c_b59, &t[i__ + 1 + i__ * t_dim1], &c__1); - i__1 = *n - *k + i__ - 1; - zlacgv_(&i__1, &v[i__ + v_dim1], ldv); - i__1 = i__ + (*n - *k + i__) * v_dim1; - v[i__1].r = vii.r, v[i__1].i = vii.i; - } - -/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */ - - i__1 = *k - i__; - ztrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ - + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * - t_dim1], &c__1) - ; - } - i__1 = i__ + i__ * t_dim1; - i__2 = i__; - t[i__1].r = tau[i__2].r, t[i__1].i = tau[i__2].i; - } -/* L40: */ - } - } - return 0; - -/* End of ZLARFT */ - -} /* zlarft_ */ - -/* Subroutine */ int zlarfx_(char *side, integer *m, integer *n, - doublecomplex *v, doublecomplex *tau, doublecomplex *c__, integer * - ldc, doublecomplex *work) -{ - /* System generated locals */ - integer c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8, - i__9, i__10, i__11; - doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8, z__9, z__10, - z__11, z__12, z__13, z__14, z__15, z__16, z__17, z__18, z__19; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer j; - static doublecomplex t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, - v5, v6, v7, v8, v9, t10, v10, sum; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *), zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLARFX applies a complex elementary reflector H to a complex m by n - matrix C, from either the left or the right. H is represented in the - form - - H = I - tau * v * v' - - where tau is a complex scalar and v is a complex vector. - - If tau = 0, then H is taken to be the unit matrix - - This version uses inline code if H has order < 11. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': form H * C - = 'R': form C * H - - M (input) INTEGER - The number of rows of the matrix C. - - N (input) INTEGER - The number of columns of the matrix C. - - V (input) COMPLEX*16 array, dimension (M) if SIDE = 'L' - or (N) if SIDE = 'R' - The vector v in the representation of H. - - TAU (input) COMPLEX*16 - The value tau in the representation of H. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the m by n matrix C. - On exit, C is overwritten by the matrix H * C if SIDE = 'L', - or C * H if SIDE = 'R'. - - LDC (input) INTEGER - The leading dimension of the array C. LDA >= max(1,M). - - WORK (workspace) COMPLEX*16 array, dimension (N) if SIDE = 'L' - or (M) if SIDE = 'R' - WORK is not referenced if H has order < 11. - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --v; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - if ((tau->r == 0. && tau->i == 0.)) { - return 0; - } - if (lsame_(side, "L")) { - -/* Form H * C, where H has order m. */ - - switch (*m) { - case 1: goto L10; - case 2: goto L30; - case 3: goto L50; - case 4: goto L70; - case 5: goto L90; - case 6: goto L110; - case 7: goto L130; - case 8: goto L150; - case 9: goto L170; - case 10: goto L190; - } - -/* - Code for general M - - w := C'*v -*/ - - zgemv_("Conjugate transpose", m, n, &c_b60, &c__[c_offset], ldc, &v[1] - , &c__1, &c_b59, &work[1], &c__1); - -/* C := C - tau * v * w' */ - - z__1.r = -tau->r, z__1.i = -tau->i; - zgerc_(m, n, &z__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], - ldc); - goto L410; -L10: - -/* Special code for 1 x 1 Householder */ - - z__3.r = tau->r * v[1].r - tau->i * v[1].i, z__3.i = tau->r * v[1].i - + tau->i * v[1].r; - d_cnjg(&z__4, &v[1]); - z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i = z__3.r * z__4.i - + z__3.i * z__4.r; - z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i; - t1.r = z__1.r, t1.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, z__1.i = t1.r * - c__[i__3].i + t1.i * c__[i__3].r; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L20: */ - } - goto L410; -L30: - -/* Special code for 2 x 2 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__2.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__3.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L40: */ - } - goto L410; -L50: - -/* Special code for 3 x 3 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - d_cnjg(&z__1, &v[3]); - v3.r = z__1.r, v3.i = z__1.i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__3.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__4.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i; - i__4 = j * c_dim1 + 3; - z__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__5.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 3; - i__3 = j * c_dim1 + 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L60: */ - } - goto L410; -L70: - -/* Special code for 4 x 4 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - d_cnjg(&z__1, &v[3]); - v3.r = z__1.r, v3.i = z__1.i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - d_cnjg(&z__1, &v[4]); - v4.r = z__1.r, v4.i = z__1.i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__4.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__5.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i; - i__4 = j * c_dim1 + 3; - z__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__6.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__2.r = z__3.r + z__6.r, z__2.i = z__3.i + z__6.i; - i__5 = j * c_dim1 + 4; - z__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__7.i = v4.r * - c__[i__5].i + v4.i * c__[i__5].r; - z__1.r = z__2.r + z__7.r, z__1.i = z__2.i + z__7.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 3; - i__3 = j * c_dim1 + 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 4; - i__3 = j * c_dim1 + 4; - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L80: */ - } - goto L410; -L90: - -/* Special code for 5 x 5 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - d_cnjg(&z__1, &v[3]); - v3.r = z__1.r, v3.i = z__1.i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - d_cnjg(&z__1, &v[4]); - v4.r = z__1.r, v4.i = z__1.i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - d_cnjg(&z__1, &v[5]); - v5.r = z__1.r, v5.i = z__1.i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__5.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__6.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__4.r = z__5.r + z__6.r, z__4.i = z__5.i + z__6.i; - i__4 = j * c_dim1 + 3; - z__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__7.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i; - i__5 = j * c_dim1 + 4; - z__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__8.i = v4.r * - c__[i__5].i + v4.i * c__[i__5].r; - z__2.r = z__3.r + z__8.r, z__2.i = z__3.i + z__8.i; - i__6 = j * c_dim1 + 5; - z__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__9.i = v5.r * - c__[i__6].i + v5.i * c__[i__6].r; - z__1.r = z__2.r + z__9.r, z__1.i = z__2.i + z__9.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 3; - i__3 = j * c_dim1 + 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 4; - i__3 = j * c_dim1 + 4; - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 5; - i__3 = j * c_dim1 + 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L100: */ - } - goto L410; -L110: - -/* Special code for 6 x 6 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - d_cnjg(&z__1, &v[3]); - v3.r = z__1.r, v3.i = z__1.i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - d_cnjg(&z__1, &v[4]); - v4.r = z__1.r, v4.i = z__1.i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - d_cnjg(&z__1, &v[5]); - v5.r = z__1.r, v5.i = z__1.i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - d_cnjg(&z__1, &v[6]); - v6.r = z__1.r, v6.i = z__1.i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__6.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__7.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__5.r = z__6.r + z__7.r, z__5.i = z__6.i + z__7.i; - i__4 = j * c_dim1 + 3; - z__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__8.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__4.r = z__5.r + z__8.r, z__4.i = z__5.i + z__8.i; - i__5 = j * c_dim1 + 4; - z__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__9.i = v4.r * - c__[i__5].i + v4.i * c__[i__5].r; - z__3.r = z__4.r + z__9.r, z__3.i = z__4.i + z__9.i; - i__6 = j * c_dim1 + 5; - z__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__10.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__2.r = z__3.r + z__10.r, z__2.i = z__3.i + z__10.i; - i__7 = j * c_dim1 + 6; - z__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__11.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__1.r = z__2.r + z__11.r, z__1.i = z__2.i + z__11.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 3; - i__3 = j * c_dim1 + 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 4; - i__3 = j * c_dim1 + 4; - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 5; - i__3 = j * c_dim1 + 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 6; - i__3 = j * c_dim1 + 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L120: */ - } - goto L410; -L130: - -/* Special code for 7 x 7 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - d_cnjg(&z__1, &v[3]); - v3.r = z__1.r, v3.i = z__1.i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - d_cnjg(&z__1, &v[4]); - v4.r = z__1.r, v4.i = z__1.i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - d_cnjg(&z__1, &v[5]); - v5.r = z__1.r, v5.i = z__1.i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - d_cnjg(&z__1, &v[6]); - v6.r = z__1.r, v6.i = z__1.i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - d_cnjg(&z__1, &v[7]); - v7.r = z__1.r, v7.i = z__1.i; - d_cnjg(&z__2, &v7); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t7.r = z__1.r, t7.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__7.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__8.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__6.r = z__7.r + z__8.r, z__6.i = z__7.i + z__8.i; - i__4 = j * c_dim1 + 3; - z__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__9.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__5.r = z__6.r + z__9.r, z__5.i = z__6.i + z__9.i; - i__5 = j * c_dim1 + 4; - z__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__10.i = v4.r - * c__[i__5].i + v4.i * c__[i__5].r; - z__4.r = z__5.r + z__10.r, z__4.i = z__5.i + z__10.i; - i__6 = j * c_dim1 + 5; - z__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__11.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__3.r = z__4.r + z__11.r, z__3.i = z__4.i + z__11.i; - i__7 = j * c_dim1 + 6; - z__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__12.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__2.r = z__3.r + z__12.r, z__2.i = z__3.i + z__12.i; - i__8 = j * c_dim1 + 7; - z__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__13.i = v7.r - * c__[i__8].i + v7.i * c__[i__8].r; - z__1.r = z__2.r + z__13.r, z__1.i = z__2.i + z__13.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 3; - i__3 = j * c_dim1 + 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 4; - i__3 = j * c_dim1 + 4; - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 5; - i__3 = j * c_dim1 + 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 6; - i__3 = j * c_dim1 + 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 7; - i__3 = j * c_dim1 + 7; - z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i + - sum.i * t7.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L140: */ - } - goto L410; -L150: - -/* Special code for 8 x 8 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - d_cnjg(&z__1, &v[3]); - v3.r = z__1.r, v3.i = z__1.i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - d_cnjg(&z__1, &v[4]); - v4.r = z__1.r, v4.i = z__1.i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - d_cnjg(&z__1, &v[5]); - v5.r = z__1.r, v5.i = z__1.i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - d_cnjg(&z__1, &v[6]); - v6.r = z__1.r, v6.i = z__1.i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - d_cnjg(&z__1, &v[7]); - v7.r = z__1.r, v7.i = z__1.i; - d_cnjg(&z__2, &v7); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t7.r = z__1.r, t7.i = z__1.i; - d_cnjg(&z__1, &v[8]); - v8.r = z__1.r, v8.i = z__1.i; - d_cnjg(&z__2, &v8); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t8.r = z__1.r, t8.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__8.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__9.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__7.r = z__8.r + z__9.r, z__7.i = z__8.i + z__9.i; - i__4 = j * c_dim1 + 3; - z__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__10.i = v3.r - * c__[i__4].i + v3.i * c__[i__4].r; - z__6.r = z__7.r + z__10.r, z__6.i = z__7.i + z__10.i; - i__5 = j * c_dim1 + 4; - z__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__11.i = v4.r - * c__[i__5].i + v4.i * c__[i__5].r; - z__5.r = z__6.r + z__11.r, z__5.i = z__6.i + z__11.i; - i__6 = j * c_dim1 + 5; - z__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__12.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__4.r = z__5.r + z__12.r, z__4.i = z__5.i + z__12.i; - i__7 = j * c_dim1 + 6; - z__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__13.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__3.r = z__4.r + z__13.r, z__3.i = z__4.i + z__13.i; - i__8 = j * c_dim1 + 7; - z__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__14.i = v7.r - * c__[i__8].i + v7.i * c__[i__8].r; - z__2.r = z__3.r + z__14.r, z__2.i = z__3.i + z__14.i; - i__9 = j * c_dim1 + 8; - z__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__15.i = v8.r - * c__[i__9].i + v8.i * c__[i__9].r; - z__1.r = z__2.r + z__15.r, z__1.i = z__2.i + z__15.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 3; - i__3 = j * c_dim1 + 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 4; - i__3 = j * c_dim1 + 4; - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 5; - i__3 = j * c_dim1 + 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 6; - i__3 = j * c_dim1 + 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 7; - i__3 = j * c_dim1 + 7; - z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i + - sum.i * t7.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 8; - i__3 = j * c_dim1 + 8; - z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i + - sum.i * t8.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L160: */ - } - goto L410; -L170: - -/* Special code for 9 x 9 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - d_cnjg(&z__1, &v[3]); - v3.r = z__1.r, v3.i = z__1.i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - d_cnjg(&z__1, &v[4]); - v4.r = z__1.r, v4.i = z__1.i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - d_cnjg(&z__1, &v[5]); - v5.r = z__1.r, v5.i = z__1.i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - d_cnjg(&z__1, &v[6]); - v6.r = z__1.r, v6.i = z__1.i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - d_cnjg(&z__1, &v[7]); - v7.r = z__1.r, v7.i = z__1.i; - d_cnjg(&z__2, &v7); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t7.r = z__1.r, t7.i = z__1.i; - d_cnjg(&z__1, &v[8]); - v8.r = z__1.r, v8.i = z__1.i; - d_cnjg(&z__2, &v8); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t8.r = z__1.r, t8.i = z__1.i; - d_cnjg(&z__1, &v[9]); - v9.r = z__1.r, v9.i = z__1.i; - d_cnjg(&z__2, &v9); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t9.r = z__1.r, t9.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__9.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__10.i = v2.r - * c__[i__3].i + v2.i * c__[i__3].r; - z__8.r = z__9.r + z__10.r, z__8.i = z__9.i + z__10.i; - i__4 = j * c_dim1 + 3; - z__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__11.i = v3.r - * c__[i__4].i + v3.i * c__[i__4].r; - z__7.r = z__8.r + z__11.r, z__7.i = z__8.i + z__11.i; - i__5 = j * c_dim1 + 4; - z__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__12.i = v4.r - * c__[i__5].i + v4.i * c__[i__5].r; - z__6.r = z__7.r + z__12.r, z__6.i = z__7.i + z__12.i; - i__6 = j * c_dim1 + 5; - z__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__13.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__5.r = z__6.r + z__13.r, z__5.i = z__6.i + z__13.i; - i__7 = j * c_dim1 + 6; - z__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__14.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__4.r = z__5.r + z__14.r, z__4.i = z__5.i + z__14.i; - i__8 = j * c_dim1 + 7; - z__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__15.i = v7.r - * c__[i__8].i + v7.i * c__[i__8].r; - z__3.r = z__4.r + z__15.r, z__3.i = z__4.i + z__15.i; - i__9 = j * c_dim1 + 8; - z__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__16.i = v8.r - * c__[i__9].i + v8.i * c__[i__9].r; - z__2.r = z__3.r + z__16.r, z__2.i = z__3.i + z__16.i; - i__10 = j * c_dim1 + 9; - z__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__17.i = - v9.r * c__[i__10].i + v9.i * c__[i__10].r; - z__1.r = z__2.r + z__17.r, z__1.i = z__2.i + z__17.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 3; - i__3 = j * c_dim1 + 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 4; - i__3 = j * c_dim1 + 4; - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 5; - i__3 = j * c_dim1 + 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 6; - i__3 = j * c_dim1 + 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 7; - i__3 = j * c_dim1 + 7; - z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i + - sum.i * t7.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 8; - i__3 = j * c_dim1 + 8; - z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i + - sum.i * t8.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 9; - i__3 = j * c_dim1 + 9; - z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i + - sum.i * t9.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L180: */ - } - goto L410; -L190: - -/* Special code for 10 x 10 Householder */ - - d_cnjg(&z__1, &v[1]); - v1.r = z__1.r, v1.i = z__1.i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - d_cnjg(&z__1, &v[2]); - v2.r = z__1.r, v2.i = z__1.i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - d_cnjg(&z__1, &v[3]); - v3.r = z__1.r, v3.i = z__1.i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - d_cnjg(&z__1, &v[4]); - v4.r = z__1.r, v4.i = z__1.i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - d_cnjg(&z__1, &v[5]); - v5.r = z__1.r, v5.i = z__1.i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - d_cnjg(&z__1, &v[6]); - v6.r = z__1.r, v6.i = z__1.i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - d_cnjg(&z__1, &v[7]); - v7.r = z__1.r, v7.i = z__1.i; - d_cnjg(&z__2, &v7); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t7.r = z__1.r, t7.i = z__1.i; - d_cnjg(&z__1, &v[8]); - v8.r = z__1.r, v8.i = z__1.i; - d_cnjg(&z__2, &v8); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t8.r = z__1.r, t8.i = z__1.i; - d_cnjg(&z__1, &v[9]); - v9.r = z__1.r, v9.i = z__1.i; - d_cnjg(&z__2, &v9); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t9.r = z__1.r, t9.i = z__1.i; - d_cnjg(&z__1, &v[10]); - v10.r = z__1.r, v10.i = z__1.i; - d_cnjg(&z__2, &v10); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t10.r = z__1.r, t10.i = z__1.i; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j * c_dim1 + 1; - z__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__10.i = v1.r - * c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j * c_dim1 + 2; - z__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__11.i = v2.r - * c__[i__3].i + v2.i * c__[i__3].r; - z__9.r = z__10.r + z__11.r, z__9.i = z__10.i + z__11.i; - i__4 = j * c_dim1 + 3; - z__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__12.i = v3.r - * c__[i__4].i + v3.i * c__[i__4].r; - z__8.r = z__9.r + z__12.r, z__8.i = z__9.i + z__12.i; - i__5 = j * c_dim1 + 4; - z__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__13.i = v4.r - * c__[i__5].i + v4.i * c__[i__5].r; - z__7.r = z__8.r + z__13.r, z__7.i = z__8.i + z__13.i; - i__6 = j * c_dim1 + 5; - z__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__14.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__6.r = z__7.r + z__14.r, z__6.i = z__7.i + z__14.i; - i__7 = j * c_dim1 + 6; - z__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__15.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__5.r = z__6.r + z__15.r, z__5.i = z__6.i + z__15.i; - i__8 = j * c_dim1 + 7; - z__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__16.i = v7.r - * c__[i__8].i + v7.i * c__[i__8].r; - z__4.r = z__5.r + z__16.r, z__4.i = z__5.i + z__16.i; - i__9 = j * c_dim1 + 8; - z__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__17.i = v8.r - * c__[i__9].i + v8.i * c__[i__9].r; - z__3.r = z__4.r + z__17.r, z__3.i = z__4.i + z__17.i; - i__10 = j * c_dim1 + 9; - z__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__18.i = - v9.r * c__[i__10].i + v9.i * c__[i__10].r; - z__2.r = z__3.r + z__18.r, z__2.i = z__3.i + z__18.i; - i__11 = j * c_dim1 + 10; - z__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, z__19.i = - v10.r * c__[i__11].i + v10.i * c__[i__11].r; - z__1.r = z__2.r + z__19.r, z__1.i = z__2.i + z__19.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j * c_dim1 + 1; - i__3 = j * c_dim1 + 1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 2; - i__3 = j * c_dim1 + 2; - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 3; - i__3 = j * c_dim1 + 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 4; - i__3 = j * c_dim1 + 4; - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 5; - i__3 = j * c_dim1 + 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 6; - i__3 = j * c_dim1 + 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 7; - i__3 = j * c_dim1 + 7; - z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i + - sum.i * t7.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 8; - i__3 = j * c_dim1 + 8; - z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i + - sum.i * t8.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 9; - i__3 = j * c_dim1 + 9; - z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i + - sum.i * t9.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j * c_dim1 + 10; - i__3 = j * c_dim1 + 10; - z__2.r = sum.r * t10.r - sum.i * t10.i, z__2.i = sum.r * t10.i + - sum.i * t10.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L200: */ - } - goto L410; - } else { - -/* Form C * H, where H has order n. */ - - switch (*n) { - case 1: goto L210; - case 2: goto L230; - case 3: goto L250; - case 4: goto L270; - case 5: goto L290; - case 6: goto L310; - case 7: goto L330; - case 8: goto L350; - case 9: goto L370; - case 10: goto L390; - } - -/* - Code for general N - - w := C * v -*/ - - zgemv_("No transpose", m, n, &c_b60, &c__[c_offset], ldc, &v[1], & - c__1, &c_b59, &work[1], &c__1); - -/* C := C - tau * w * v' */ - - z__1.r = -tau->r, z__1.i = -tau->i; - zgerc_(m, n, &z__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], - ldc); - goto L410; -L210: - -/* Special code for 1 x 1 Householder */ - - z__3.r = tau->r * v[1].r - tau->i * v[1].i, z__3.i = tau->r * v[1].i - + tau->i * v[1].r; - d_cnjg(&z__4, &v[1]); - z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i = z__3.r * z__4.i - + z__3.i * z__4.r; - z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i; - t1.r = z__1.r, t1.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, z__1.i = t1.r * - c__[i__3].i + t1.i * c__[i__3].r; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L220: */ - } - goto L410; -L230: - -/* Special code for 2 x 2 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__2.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__3.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L240: */ - } - goto L410; -L250: - -/* Special code for 3 x 3 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - v3.r = v[3].r, v3.i = v[3].i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__3.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__4.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i; - i__4 = j + c_dim1 * 3; - z__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__5.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 3; - i__3 = j + c_dim1 * 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L260: */ - } - goto L410; -L270: - -/* Special code for 4 x 4 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - v3.r = v[3].r, v3.i = v[3].i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - v4.r = v[4].r, v4.i = v[4].i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__4.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__5.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i; - i__4 = j + c_dim1 * 3; - z__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__6.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__2.r = z__3.r + z__6.r, z__2.i = z__3.i + z__6.i; - i__5 = j + ((c_dim1) << (2)); - z__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__7.i = v4.r * - c__[i__5].i + v4.i * c__[i__5].r; - z__1.r = z__2.r + z__7.r, z__1.i = z__2.i + z__7.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 3; - i__3 = j + c_dim1 * 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (2)); - i__3 = j + ((c_dim1) << (2)); - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L280: */ - } - goto L410; -L290: - -/* Special code for 5 x 5 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - v3.r = v[3].r, v3.i = v[3].i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - v4.r = v[4].r, v4.i = v[4].i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - v5.r = v[5].r, v5.i = v[5].i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__5.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__6.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__4.r = z__5.r + z__6.r, z__4.i = z__5.i + z__6.i; - i__4 = j + c_dim1 * 3; - z__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__7.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i; - i__5 = j + ((c_dim1) << (2)); - z__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__8.i = v4.r * - c__[i__5].i + v4.i * c__[i__5].r; - z__2.r = z__3.r + z__8.r, z__2.i = z__3.i + z__8.i; - i__6 = j + c_dim1 * 5; - z__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__9.i = v5.r * - c__[i__6].i + v5.i * c__[i__6].r; - z__1.r = z__2.r + z__9.r, z__1.i = z__2.i + z__9.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 3; - i__3 = j + c_dim1 * 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (2)); - i__3 = j + ((c_dim1) << (2)); - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 5; - i__3 = j + c_dim1 * 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L300: */ - } - goto L410; -L310: - -/* Special code for 6 x 6 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - v3.r = v[3].r, v3.i = v[3].i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - v4.r = v[4].r, v4.i = v[4].i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - v5.r = v[5].r, v5.i = v[5].i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - v6.r = v[6].r, v6.i = v[6].i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__6.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__7.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__5.r = z__6.r + z__7.r, z__5.i = z__6.i + z__7.i; - i__4 = j + c_dim1 * 3; - z__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__8.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__4.r = z__5.r + z__8.r, z__4.i = z__5.i + z__8.i; - i__5 = j + ((c_dim1) << (2)); - z__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__9.i = v4.r * - c__[i__5].i + v4.i * c__[i__5].r; - z__3.r = z__4.r + z__9.r, z__3.i = z__4.i + z__9.i; - i__6 = j + c_dim1 * 5; - z__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__10.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__2.r = z__3.r + z__10.r, z__2.i = z__3.i + z__10.i; - i__7 = j + c_dim1 * 6; - z__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__11.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__1.r = z__2.r + z__11.r, z__1.i = z__2.i + z__11.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 3; - i__3 = j + c_dim1 * 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (2)); - i__3 = j + ((c_dim1) << (2)); - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 5; - i__3 = j + c_dim1 * 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 6; - i__3 = j + c_dim1 * 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L320: */ - } - goto L410; -L330: - -/* Special code for 7 x 7 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - v3.r = v[3].r, v3.i = v[3].i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - v4.r = v[4].r, v4.i = v[4].i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - v5.r = v[5].r, v5.i = v[5].i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - v6.r = v[6].r, v6.i = v[6].i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - v7.r = v[7].r, v7.i = v[7].i; - d_cnjg(&z__2, &v7); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t7.r = z__1.r, t7.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__7.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__8.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__6.r = z__7.r + z__8.r, z__6.i = z__7.i + z__8.i; - i__4 = j + c_dim1 * 3; - z__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__9.i = v3.r * - c__[i__4].i + v3.i * c__[i__4].r; - z__5.r = z__6.r + z__9.r, z__5.i = z__6.i + z__9.i; - i__5 = j + ((c_dim1) << (2)); - z__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__10.i = v4.r - * c__[i__5].i + v4.i * c__[i__5].r; - z__4.r = z__5.r + z__10.r, z__4.i = z__5.i + z__10.i; - i__6 = j + c_dim1 * 5; - z__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__11.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__3.r = z__4.r + z__11.r, z__3.i = z__4.i + z__11.i; - i__7 = j + c_dim1 * 6; - z__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__12.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__2.r = z__3.r + z__12.r, z__2.i = z__3.i + z__12.i; - i__8 = j + c_dim1 * 7; - z__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__13.i = v7.r - * c__[i__8].i + v7.i * c__[i__8].r; - z__1.r = z__2.r + z__13.r, z__1.i = z__2.i + z__13.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 3; - i__3 = j + c_dim1 * 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (2)); - i__3 = j + ((c_dim1) << (2)); - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 5; - i__3 = j + c_dim1 * 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 6; - i__3 = j + c_dim1 * 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 7; - i__3 = j + c_dim1 * 7; - z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i + - sum.i * t7.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L340: */ - } - goto L410; -L350: - -/* Special code for 8 x 8 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - v3.r = v[3].r, v3.i = v[3].i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - v4.r = v[4].r, v4.i = v[4].i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - v5.r = v[5].r, v5.i = v[5].i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - v6.r = v[6].r, v6.i = v[6].i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - v7.r = v[7].r, v7.i = v[7].i; - d_cnjg(&z__2, &v7); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t7.r = z__1.r, t7.i = z__1.i; - v8.r = v[8].r, v8.i = v[8].i; - d_cnjg(&z__2, &v8); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t8.r = z__1.r, t8.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__8.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__9.i = v2.r * - c__[i__3].i + v2.i * c__[i__3].r; - z__7.r = z__8.r + z__9.r, z__7.i = z__8.i + z__9.i; - i__4 = j + c_dim1 * 3; - z__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__10.i = v3.r - * c__[i__4].i + v3.i * c__[i__4].r; - z__6.r = z__7.r + z__10.r, z__6.i = z__7.i + z__10.i; - i__5 = j + ((c_dim1) << (2)); - z__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__11.i = v4.r - * c__[i__5].i + v4.i * c__[i__5].r; - z__5.r = z__6.r + z__11.r, z__5.i = z__6.i + z__11.i; - i__6 = j + c_dim1 * 5; - z__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__12.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__4.r = z__5.r + z__12.r, z__4.i = z__5.i + z__12.i; - i__7 = j + c_dim1 * 6; - z__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__13.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__3.r = z__4.r + z__13.r, z__3.i = z__4.i + z__13.i; - i__8 = j + c_dim1 * 7; - z__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__14.i = v7.r - * c__[i__8].i + v7.i * c__[i__8].r; - z__2.r = z__3.r + z__14.r, z__2.i = z__3.i + z__14.i; - i__9 = j + ((c_dim1) << (3)); - z__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__15.i = v8.r - * c__[i__9].i + v8.i * c__[i__9].r; - z__1.r = z__2.r + z__15.r, z__1.i = z__2.i + z__15.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 3; - i__3 = j + c_dim1 * 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (2)); - i__3 = j + ((c_dim1) << (2)); - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 5; - i__3 = j + c_dim1 * 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 6; - i__3 = j + c_dim1 * 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 7; - i__3 = j + c_dim1 * 7; - z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i + - sum.i * t7.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (3)); - i__3 = j + ((c_dim1) << (3)); - z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i + - sum.i * t8.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L360: */ - } - goto L410; -L370: - -/* Special code for 9 x 9 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - v3.r = v[3].r, v3.i = v[3].i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - v4.r = v[4].r, v4.i = v[4].i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - v5.r = v[5].r, v5.i = v[5].i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - v6.r = v[6].r, v6.i = v[6].i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - v7.r = v[7].r, v7.i = v[7].i; - d_cnjg(&z__2, &v7); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t7.r = z__1.r, t7.i = z__1.i; - v8.r = v[8].r, v8.i = v[8].i; - d_cnjg(&z__2, &v8); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t8.r = z__1.r, t8.i = z__1.i; - v9.r = v[9].r, v9.i = v[9].i; - d_cnjg(&z__2, &v9); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t9.r = z__1.r, t9.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__9.i = v1.r * - c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__10.i = v2.r - * c__[i__3].i + v2.i * c__[i__3].r; - z__8.r = z__9.r + z__10.r, z__8.i = z__9.i + z__10.i; - i__4 = j + c_dim1 * 3; - z__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__11.i = v3.r - * c__[i__4].i + v3.i * c__[i__4].r; - z__7.r = z__8.r + z__11.r, z__7.i = z__8.i + z__11.i; - i__5 = j + ((c_dim1) << (2)); - z__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__12.i = v4.r - * c__[i__5].i + v4.i * c__[i__5].r; - z__6.r = z__7.r + z__12.r, z__6.i = z__7.i + z__12.i; - i__6 = j + c_dim1 * 5; - z__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__13.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__5.r = z__6.r + z__13.r, z__5.i = z__6.i + z__13.i; - i__7 = j + c_dim1 * 6; - z__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__14.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__4.r = z__5.r + z__14.r, z__4.i = z__5.i + z__14.i; - i__8 = j + c_dim1 * 7; - z__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__15.i = v7.r - * c__[i__8].i + v7.i * c__[i__8].r; - z__3.r = z__4.r + z__15.r, z__3.i = z__4.i + z__15.i; - i__9 = j + ((c_dim1) << (3)); - z__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__16.i = v8.r - * c__[i__9].i + v8.i * c__[i__9].r; - z__2.r = z__3.r + z__16.r, z__2.i = z__3.i + z__16.i; - i__10 = j + c_dim1 * 9; - z__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__17.i = - v9.r * c__[i__10].i + v9.i * c__[i__10].r; - z__1.r = z__2.r + z__17.r, z__1.i = z__2.i + z__17.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 3; - i__3 = j + c_dim1 * 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (2)); - i__3 = j + ((c_dim1) << (2)); - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 5; - i__3 = j + c_dim1 * 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 6; - i__3 = j + c_dim1 * 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 7; - i__3 = j + c_dim1 * 7; - z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i + - sum.i * t7.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (3)); - i__3 = j + ((c_dim1) << (3)); - z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i + - sum.i * t8.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 9; - i__3 = j + c_dim1 * 9; - z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i + - sum.i * t9.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L380: */ - } - goto L410; -L390: - -/* Special code for 10 x 10 Householder */ - - v1.r = v[1].r, v1.i = v[1].i; - d_cnjg(&z__2, &v1); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t1.r = z__1.r, t1.i = z__1.i; - v2.r = v[2].r, v2.i = v[2].i; - d_cnjg(&z__2, &v2); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t2.r = z__1.r, t2.i = z__1.i; - v3.r = v[3].r, v3.i = v[3].i; - d_cnjg(&z__2, &v3); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t3.r = z__1.r, t3.i = z__1.i; - v4.r = v[4].r, v4.i = v[4].i; - d_cnjg(&z__2, &v4); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t4.r = z__1.r, t4.i = z__1.i; - v5.r = v[5].r, v5.i = v[5].i; - d_cnjg(&z__2, &v5); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t5.r = z__1.r, t5.i = z__1.i; - v6.r = v[6].r, v6.i = v[6].i; - d_cnjg(&z__2, &v6); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t6.r = z__1.r, t6.i = z__1.i; - v7.r = v[7].r, v7.i = v[7].i; - d_cnjg(&z__2, &v7); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t7.r = z__1.r, t7.i = z__1.i; - v8.r = v[8].r, v8.i = v[8].i; - d_cnjg(&z__2, &v8); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t8.r = z__1.r, t8.i = z__1.i; - v9.r = v[9].r, v9.i = v[9].i; - d_cnjg(&z__2, &v9); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t9.r = z__1.r, t9.i = z__1.i; - v10.r = v[10].r, v10.i = v[10].i; - d_cnjg(&z__2, &v10); - z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i - + tau->i * z__2.r; - t10.r = z__1.r, t10.i = z__1.i; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - i__2 = j + c_dim1; - z__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__10.i = v1.r - * c__[i__2].i + v1.i * c__[i__2].r; - i__3 = j + ((c_dim1) << (1)); - z__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__11.i = v2.r - * c__[i__3].i + v2.i * c__[i__3].r; - z__9.r = z__10.r + z__11.r, z__9.i = z__10.i + z__11.i; - i__4 = j + c_dim1 * 3; - z__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__12.i = v3.r - * c__[i__4].i + v3.i * c__[i__4].r; - z__8.r = z__9.r + z__12.r, z__8.i = z__9.i + z__12.i; - i__5 = j + ((c_dim1) << (2)); - z__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__13.i = v4.r - * c__[i__5].i + v4.i * c__[i__5].r; - z__7.r = z__8.r + z__13.r, z__7.i = z__8.i + z__13.i; - i__6 = j + c_dim1 * 5; - z__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__14.i = v5.r - * c__[i__6].i + v5.i * c__[i__6].r; - z__6.r = z__7.r + z__14.r, z__6.i = z__7.i + z__14.i; - i__7 = j + c_dim1 * 6; - z__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__15.i = v6.r - * c__[i__7].i + v6.i * c__[i__7].r; - z__5.r = z__6.r + z__15.r, z__5.i = z__6.i + z__15.i; - i__8 = j + c_dim1 * 7; - z__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__16.i = v7.r - * c__[i__8].i + v7.i * c__[i__8].r; - z__4.r = z__5.r + z__16.r, z__4.i = z__5.i + z__16.i; - i__9 = j + ((c_dim1) << (3)); - z__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__17.i = v8.r - * c__[i__9].i + v8.i * c__[i__9].r; - z__3.r = z__4.r + z__17.r, z__3.i = z__4.i + z__17.i; - i__10 = j + c_dim1 * 9; - z__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__18.i = - v9.r * c__[i__10].i + v9.i * c__[i__10].r; - z__2.r = z__3.r + z__18.r, z__2.i = z__3.i + z__18.i; - i__11 = j + c_dim1 * 10; - z__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, z__19.i = - v10.r * c__[i__11].i + v10.i * c__[i__11].r; - z__1.r = z__2.r + z__19.r, z__1.i = z__2.i + z__19.i; - sum.r = z__1.r, sum.i = z__1.i; - i__2 = j + c_dim1; - i__3 = j + c_dim1; - z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i + - sum.i * t1.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (1)); - i__3 = j + ((c_dim1) << (1)); - z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i + - sum.i * t2.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 3; - i__3 = j + c_dim1 * 3; - z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i + - sum.i * t3.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (2)); - i__3 = j + ((c_dim1) << (2)); - z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i + - sum.i * t4.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 5; - i__3 = j + c_dim1 * 5; - z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i + - sum.i * t5.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 6; - i__3 = j + c_dim1 * 6; - z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i + - sum.i * t6.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 7; - i__3 = j + c_dim1 * 7; - z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i + - sum.i * t7.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + ((c_dim1) << (3)); - i__3 = j + ((c_dim1) << (3)); - z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i + - sum.i * t8.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 9; - i__3 = j + c_dim1 * 9; - z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i + - sum.i * t9.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; - i__2 = j + c_dim1 * 10; - i__3 = j + c_dim1 * 10; - z__2.r = sum.r * t10.r - sum.i * t10.i, z__2.i = sum.r * t10.i + - sum.i * t10.r; - z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; - c__[i__2].r = z__1.r, c__[i__2].i = z__1.i; -/* L400: */ - } - goto L410; - } -L410: - return 0; - -/* End of ZLARFX */ - -} /* zlarfx_ */ - -/* Subroutine */ int zlascl_(char *type__, integer *kl, integer *ku, - doublereal *cfrom, doublereal *cto, integer *m, integer *n, - doublecomplex *a, integer *lda, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1; - - /* Local variables */ - static integer i__, j, k1, k2, k3, k4; - static doublereal mul, cto1; - static logical done; - static doublereal ctoc; - extern logical lsame_(char *, char *); - static integer itype; - static doublereal cfrom1; - - static doublereal cfromc; - extern /* Subroutine */ int xerbla_(char *, integer *); - static doublereal bignum, smlnum; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - February 29, 1992 - - - Purpose - ======= - - ZLASCL multiplies the M by N complex matrix A by the real scalar - CTO/CFROM. This is done without over/underflow as long as the final - result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that - A may be full, upper triangular, lower triangular, upper Hessenberg, - or banded. - - Arguments - ========= - - TYPE (input) CHARACTER*1 - TYPE indices the storage type of the input matrix. - = 'G': A is a full matrix. - = 'L': A is a lower triangular matrix. - = 'U': A is an upper triangular matrix. - = 'H': A is an upper Hessenberg matrix. - = 'B': A is a symmetric band matrix with lower bandwidth KL - and upper bandwidth KU and with the only the lower - half stored. - = 'Q': A is a symmetric band matrix with lower bandwidth KL - and upper bandwidth KU and with the only the upper - half stored. - = 'Z': A is a band matrix with lower bandwidth KL and upper - bandwidth KU. - - KL (input) INTEGER - The lower bandwidth of A. Referenced only if TYPE = 'B', - 'Q' or 'Z'. - - KU (input) INTEGER - The upper bandwidth of A. Referenced only if TYPE = 'B', - 'Q' or 'Z'. - - CFROM (input) DOUBLE PRECISION - CTO (input) DOUBLE PRECISION - The matrix A is multiplied by CTO/CFROM. A(I,J) is computed - without over/underflow if the final result CTO*A(I,J)/CFROM - can be represented without over/underflow. CFROM must be - nonzero. - - M (input) INTEGER - The number of rows of the matrix A. M >= 0. - - N (input) INTEGER - The number of columns of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,M) - The matrix to be multiplied by CTO/CFROM. See TYPE for the - storage type. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - INFO (output) INTEGER - 0 - successful exit - <0 - if INFO = -i, the i-th argument had an illegal value. - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - *info = 0; - - if (lsame_(type__, "G")) { - itype = 0; - } else if (lsame_(type__, "L")) { - itype = 1; - } else if (lsame_(type__, "U")) { - itype = 2; - } else if (lsame_(type__, "H")) { - itype = 3; - } else if (lsame_(type__, "B")) { - itype = 4; - } else if (lsame_(type__, "Q")) { - itype = 5; - } else if (lsame_(type__, "Z")) { - itype = 6; - } else { - itype = -1; - } - - if (itype == -1) { - *info = -1; - } else if (*cfrom == 0.) { - *info = -4; - } else if (*m < 0) { - *info = -6; - } else if (*n < 0 || (itype == 4 && *n != *m) || (itype == 5 && *n != *m)) - { - *info = -7; - } else if ((itype <= 3 && *lda < max(1,*m))) { - *info = -9; - } else if (itype >= 4) { -/* Computing MAX */ - i__1 = *m - 1; - if (*kl < 0 || *kl > max(i__1,0)) { - *info = -2; - } else /* if(complicated condition) */ { -/* Computing MAX */ - i__1 = *n - 1; - if (*ku < 0 || *ku > max(i__1,0) || ((itype == 4 || itype == 5) && - *kl != *ku)) { - *info = -3; - } else if ((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda < - *ku + 1) || (itype == 6 && *lda < ((*kl) << (1)) + *ku + - 1)) { - *info = -9; - } - } - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZLASCL", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0 || *m == 0) { - return 0; - } - -/* Get machine parameters */ - - smlnum = SAFEMINIMUM; - bignum = 1. / smlnum; - - cfromc = *cfrom; - ctoc = *cto; - -L10: - cfrom1 = cfromc * smlnum; - cto1 = ctoc / bignum; - if ((abs(cfrom1) > abs(ctoc) && ctoc != 0.)) { - mul = smlnum; - done = FALSE_; - cfromc = cfrom1; - } else if (abs(cto1) > abs(cfromc)) { - mul = bignum; - done = FALSE_; - ctoc = cto1; - } else { - mul = ctoc / cfromc; - done = TRUE_; - } - - if (itype == 0) { - -/* Full matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L20: */ - } -/* L30: */ - } - - } else if (itype == 1) { - -/* Lower triangular matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = j; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L40: */ - } -/* L50: */ - } - - } else if (itype == 2) { - -/* Upper triangular matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = min(j,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L60: */ - } -/* L70: */ - } - - } else if (itype == 3) { - -/* Upper Hessenberg matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = j + 1; - i__2 = min(i__3,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L80: */ - } -/* L90: */ - } - - } else if (itype == 4) { - -/* Lower half of a symmetric band matrix */ - - k3 = *kl + 1; - k4 = *n + 1; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = k3, i__4 = k4 - j; - i__2 = min(i__3,i__4); - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L100: */ - } -/* L110: */ - } - - } else if (itype == 5) { - -/* Upper half of a symmetric band matrix */ - - k1 = *ku + 2; - k3 = *ku + 1; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MAX */ - i__2 = k1 - j; - i__3 = k3; - for (i__ = max(i__2,1); i__ <= i__3; ++i__) { - i__2 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; -/* L120: */ - } -/* L130: */ - } - - } else if (itype == 6) { - -/* Band matrix */ - - k1 = *kl + *ku + 2; - k2 = *kl + 1; - k3 = ((*kl) << (1)) + *ku + 1; - k4 = *kl + *ku + 1 + *m; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MAX */ - i__3 = k1 - j; -/* Computing MIN */ - i__4 = k3, i__5 = k4 - j; - i__2 = min(i__4,i__5); - for (i__ = max(i__3,k2); i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L140: */ - } -/* L150: */ - } - - } - - if (! done) { - goto L10; - } - - return 0; - -/* End of ZLASCL */ - -} /* zlascl_ */ - -/* Subroutine */ int zlaset_(char *uplo, integer *m, integer *n, - doublecomplex *alpha, doublecomplex *beta, doublecomplex *a, integer * - lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j; - extern logical lsame_(char *, char *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - ZLASET initializes a 2-D array A to BETA on the diagonal and - ALPHA on the offdiagonals. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies the part of the matrix A to be set. - = 'U': Upper triangular part is set. The lower triangle - is unchanged. - = 'L': Lower triangular part is set. The upper triangle - is unchanged. - Otherwise: All of the matrix A is set. - - M (input) INTEGER - On entry, M specifies the number of rows of A. - - N (input) INTEGER - On entry, N specifies the number of columns of A. - - ALPHA (input) COMPLEX*16 - All the offdiagonal array elements are set to ALPHA. - - BETA (input) COMPLEX*16 - All the diagonal array elements are set to BETA. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the m by n matrix A. - On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n, i.ne.j; - A(i,i) = BETA , 1 <= i <= min(m,n) - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - if (lsame_(uplo, "U")) { - -/* - Set the diagonal to BETA and the strictly upper triangular - part of the array to ALPHA. -*/ - - i__1 = *n; - for (j = 2; j <= i__1; ++j) { -/* Computing MIN */ - i__3 = j - 1; - i__2 = min(i__3,*m); - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = alpha->r, a[i__3].i = alpha->i; -/* L10: */ - } -/* L20: */ - } - i__1 = min(*n,*m); - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + i__ * a_dim1; - a[i__2].r = beta->r, a[i__2].i = beta->i; -/* L30: */ - } - - } else if (lsame_(uplo, "L")) { - -/* - Set the diagonal to BETA and the strictly lower triangular - part of the array to ALPHA. -*/ - - i__1 = min(*m,*n); - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = alpha->r, a[i__3].i = alpha->i; -/* L40: */ - } -/* L50: */ - } - i__1 = min(*n,*m); - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + i__ * a_dim1; - a[i__2].r = beta->r, a[i__2].i = beta->i; -/* L60: */ - } - - } else { - -/* - Set the array to BETA on the diagonal and ALPHA on the - offdiagonal. -*/ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = alpha->r, a[i__3].i = alpha->i; -/* L70: */ - } -/* L80: */ - } - i__1 = min(*m,*n); - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + i__ * a_dim1; - a[i__2].r = beta->r, a[i__2].i = beta->i; -/* L90: */ - } - } - - return 0; - -/* End of ZLASET */ - -} /* zlaset_ */ - -/* Subroutine */ int zlasr_(char *side, char *pivot, char *direct, integer *m, - integer *n, doublereal *c__, doublereal *s, doublecomplex *a, - integer *lda) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - doublecomplex z__1, z__2, z__3; - - /* Local variables */ - static integer i__, j, info; - static doublecomplex temp; - extern logical lsame_(char *, char *); - static doublereal ctemp, stemp; - extern /* Subroutine */ int xerbla_(char *, integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - ZLASR performs the transformation - - A := P*A, when SIDE = 'L' or 'l' ( Left-hand side ) - - A := A*P', when SIDE = 'R' or 'r' ( Right-hand side ) - - where A is an m by n complex matrix and P is an orthogonal matrix, - consisting of a sequence of plane rotations determined by the - parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l' - and z = n when SIDE = 'R' or 'r' ): - - When DIRECT = 'F' or 'f' ( Forward sequence ) then - - P = P( z - 1 )*...*P( 2 )*P( 1 ), - - and when DIRECT = 'B' or 'b' ( Backward sequence ) then - - P = P( 1 )*P( 2 )*...*P( z - 1 ), - - where P( k ) is a plane rotation matrix for the following planes: - - when PIVOT = 'V' or 'v' ( Variable pivot ), - the plane ( k, k + 1 ) - - when PIVOT = 'T' or 't' ( Top pivot ), - the plane ( 1, k + 1 ) - - when PIVOT = 'B' or 'b' ( Bottom pivot ), - the plane ( k, z ) - - c( k ) and s( k ) must contain the cosine and sine that define the - matrix P( k ). The two by two plane rotation part of the matrix - P( k ), R( k ), is assumed to be of the form - - R( k ) = ( c( k ) s( k ) ). - ( -s( k ) c( k ) ) - - Arguments - ========= - - SIDE (input) CHARACTER*1 - Specifies whether the plane rotation matrix P is applied to - A on the left or the right. - = 'L': Left, compute A := P*A - = 'R': Right, compute A:= A*P' - - DIRECT (input) CHARACTER*1 - Specifies whether P is a forward or backward sequence of - plane rotations. - = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 ) - = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 ) - - PIVOT (input) CHARACTER*1 - Specifies the plane for which P(k) is a plane rotation - matrix. - = 'V': Variable pivot, the plane (k,k+1) - = 'T': Top pivot, the plane (1,k+1) - = 'B': Bottom pivot, the plane (k,z) - - M (input) INTEGER - The number of rows of the matrix A. If m <= 1, an immediate - return is effected. - - N (input) INTEGER - The number of columns of the matrix A. If n <= 1, an - immediate return is effected. - - C, S (input) DOUBLE PRECISION arrays, dimension - (M-1) if SIDE = 'L' - (N-1) if SIDE = 'R' - c(k) and s(k) contain the cosine and sine that define the - matrix P(k). The two by two plane rotation part of the - matrix P(k), R(k), is assumed to be of the form - R( k ) = ( c( k ) s( k ) ). - ( -s( k ) c( k ) ) - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - The m by n matrix A. On exit, A is overwritten by P*A if - SIDE = 'R' or by A*P' if SIDE = 'L'. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,M). - - ===================================================================== - - - Test the input parameters -*/ - - /* Parameter adjustments */ - --c__; - --s; - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - info = 0; - if (! (lsame_(side, "L") || lsame_(side, "R"))) { - info = 1; - } else if (! (lsame_(pivot, "V") || lsame_(pivot, - "T") || lsame_(pivot, "B"))) { - info = 2; - } else if (! (lsame_(direct, "F") || lsame_(direct, - "B"))) { - info = 3; - } else if (*m < 0) { - info = 4; - } else if (*n < 0) { - info = 5; - } else if (*lda < max(1,*m)) { - info = 9; - } - if (info != 0) { - xerbla_("ZLASR ", &info); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - return 0; - } - if (lsame_(side, "L")) { - -/* Form P * A */ - - if (lsame_(pivot, "V")) { - if (lsame_(direct, "F")) { - i__1 = *m - 1; - for (j = 1; j <= i__1; ++j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = j + 1 + i__ * a_dim1; - temp.r = a[i__3].r, temp.i = a[i__3].i; - i__3 = j + 1 + i__ * a_dim1; - z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i; - i__4 = j + i__ * a_dim1; - z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[ - i__4].i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - i__3 = j + i__ * a_dim1; - z__2.r = stemp * temp.r, z__2.i = stemp * temp.i; - i__4 = j + i__ * a_dim1; - z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[ - i__4].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L10: */ - } - } -/* L20: */ - } - } else if (lsame_(direct, "B")) { - for (j = *m - 1; j >= 1; --j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = j + 1 + i__ * a_dim1; - temp.r = a[i__2].r, temp.i = a[i__2].i; - i__2 = j + 1 + i__ * a_dim1; - z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i; - i__3 = j + i__ * a_dim1; - z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[ - i__3].i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; - i__2 = j + i__ * a_dim1; - z__2.r = stemp * temp.r, z__2.i = stemp * temp.i; - i__3 = j + i__ * a_dim1; - z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[ - i__3].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; -/* L30: */ - } - } -/* L40: */ - } - } - } else if (lsame_(pivot, "T")) { - if (lsame_(direct, "F")) { - i__1 = *m; - for (j = 2; j <= i__1; ++j) { - ctemp = c__[j - 1]; - stemp = s[j - 1]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = j + i__ * a_dim1; - temp.r = a[i__3].r, temp.i = a[i__3].i; - i__3 = j + i__ * a_dim1; - z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i; - i__4 = i__ * a_dim1 + 1; - z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[ - i__4].i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - i__3 = i__ * a_dim1 + 1; - z__2.r = stemp * temp.r, z__2.i = stemp * temp.i; - i__4 = i__ * a_dim1 + 1; - z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[ - i__4].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L50: */ - } - } -/* L60: */ - } - } else if (lsame_(direct, "B")) { - for (j = *m; j >= 2; --j) { - ctemp = c__[j - 1]; - stemp = s[j - 1]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = j + i__ * a_dim1; - temp.r = a[i__2].r, temp.i = a[i__2].i; - i__2 = j + i__ * a_dim1; - z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i; - i__3 = i__ * a_dim1 + 1; - z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[ - i__3].i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; - i__2 = i__ * a_dim1 + 1; - z__2.r = stemp * temp.r, z__2.i = stemp * temp.i; - i__3 = i__ * a_dim1 + 1; - z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[ - i__3].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; -/* L70: */ - } - } -/* L80: */ - } - } - } else if (lsame_(pivot, "B")) { - if (lsame_(direct, "F")) { - i__1 = *m - 1; - for (j = 1; j <= i__1; ++j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = j + i__ * a_dim1; - temp.r = a[i__3].r, temp.i = a[i__3].i; - i__3 = j + i__ * a_dim1; - i__4 = *m + i__ * a_dim1; - z__2.r = stemp * a[i__4].r, z__2.i = stemp * a[ - i__4].i; - z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - i__3 = *m + i__ * a_dim1; - i__4 = *m + i__ * a_dim1; - z__2.r = ctemp * a[i__4].r, z__2.i = ctemp * a[ - i__4].i; - z__3.r = stemp * temp.r, z__3.i = stemp * temp.i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L90: */ - } - } -/* L100: */ - } - } else if (lsame_(direct, "B")) { - for (j = *m - 1; j >= 1; --j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = j + i__ * a_dim1; - temp.r = a[i__2].r, temp.i = a[i__2].i; - i__2 = j + i__ * a_dim1; - i__3 = *m + i__ * a_dim1; - z__2.r = stemp * a[i__3].r, z__2.i = stemp * a[ - i__3].i; - z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; - i__2 = *m + i__ * a_dim1; - i__3 = *m + i__ * a_dim1; - z__2.r = ctemp * a[i__3].r, z__2.i = ctemp * a[ - i__3].i; - z__3.r = stemp * temp.r, z__3.i = stemp * temp.i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; -/* L110: */ - } - } -/* L120: */ - } - } - } - } else if (lsame_(side, "R")) { - -/* Form A * P' */ - - if (lsame_(pivot, "V")) { - if (lsame_(direct, "F")) { - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + (j + 1) * a_dim1; - temp.r = a[i__3].r, temp.i = a[i__3].i; - i__3 = i__ + (j + 1) * a_dim1; - z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i; - i__4 = i__ + j * a_dim1; - z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[ - i__4].i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - i__3 = i__ + j * a_dim1; - z__2.r = stemp * temp.r, z__2.i = stemp * temp.i; - i__4 = i__ + j * a_dim1; - z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[ - i__4].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L130: */ - } - } -/* L140: */ - } - } else if (lsame_(direct, "B")) { - for (j = *n - 1; j >= 1; --j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + (j + 1) * a_dim1; - temp.r = a[i__2].r, temp.i = a[i__2].i; - i__2 = i__ + (j + 1) * a_dim1; - z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i; - i__3 = i__ + j * a_dim1; - z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[ - i__3].i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; - i__2 = i__ + j * a_dim1; - z__2.r = stemp * temp.r, z__2.i = stemp * temp.i; - i__3 = i__ + j * a_dim1; - z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[ - i__3].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; -/* L150: */ - } - } -/* L160: */ - } - } - } else if (lsame_(pivot, "T")) { - if (lsame_(direct, "F")) { - i__1 = *n; - for (j = 2; j <= i__1; ++j) { - ctemp = c__[j - 1]; - stemp = s[j - 1]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - temp.r = a[i__3].r, temp.i = a[i__3].i; - i__3 = i__ + j * a_dim1; - z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i; - i__4 = i__ + a_dim1; - z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[ - i__4].i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - i__3 = i__ + a_dim1; - z__2.r = stemp * temp.r, z__2.i = stemp * temp.i; - i__4 = i__ + a_dim1; - z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[ - i__4].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L170: */ - } - } -/* L180: */ - } - } else if (lsame_(direct, "B")) { - for (j = *n; j >= 2; --j) { - ctemp = c__[j - 1]; - stemp = s[j - 1]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + j * a_dim1; - temp.r = a[i__2].r, temp.i = a[i__2].i; - i__2 = i__ + j * a_dim1; - z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i; - i__3 = i__ + a_dim1; - z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[ - i__3].i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; - i__2 = i__ + a_dim1; - z__2.r = stemp * temp.r, z__2.i = stemp * temp.i; - i__3 = i__ + a_dim1; - z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[ - i__3].i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; -/* L190: */ - } - } -/* L200: */ - } - } - } else if (lsame_(pivot, "B")) { - if (lsame_(direct, "F")) { - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - temp.r = a[i__3].r, temp.i = a[i__3].i; - i__3 = i__ + j * a_dim1; - i__4 = i__ + *n * a_dim1; - z__2.r = stemp * a[i__4].r, z__2.i = stemp * a[ - i__4].i; - z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - i__3 = i__ + *n * a_dim1; - i__4 = i__ + *n * a_dim1; - z__2.r = ctemp * a[i__4].r, z__2.i = ctemp * a[ - i__4].i; - z__3.r = stemp * temp.r, z__3.i = stemp * temp.i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; -/* L210: */ - } - } -/* L220: */ - } - } else if (lsame_(direct, "B")) { - for (j = *n - 1; j >= 1; --j) { - ctemp = c__[j]; - stemp = s[j]; - if (ctemp != 1. || stemp != 0.) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + j * a_dim1; - temp.r = a[i__2].r, temp.i = a[i__2].i; - i__2 = i__ + j * a_dim1; - i__3 = i__ + *n * a_dim1; - z__2.r = stemp * a[i__3].r, z__2.i = stemp * a[ - i__3].i; - z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; - i__2 = i__ + *n * a_dim1; - i__3 = i__ + *n * a_dim1; - z__2.r = ctemp * a[i__3].r, z__2.i = ctemp * a[ - i__3].i; - z__3.r = stemp * temp.r, z__3.i = stemp * temp.i; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - - z__3.i; - a[i__2].r = z__1.r, a[i__2].i = z__1.i; -/* L230: */ - } - } -/* L240: */ - } - } - } - } - - return 0; - -/* End of ZLASR */ - -} /* zlasr_ */ - -/* Subroutine */ int zlassq_(integer *n, doublecomplex *x, integer *incx, - doublereal *scale, doublereal *sumsq) -{ - /* System generated locals */ - integer i__1, i__2, i__3; - doublereal d__1; - - /* Builtin functions */ - double d_imag(doublecomplex *); - - /* Local variables */ - static integer ix; - static doublereal temp1; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZLASSQ returns the values scl and ssq such that - - ( scl**2 )*ssq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq, - - where x( i ) = abs( X( 1 + ( i - 1 )*INCX ) ). The value of sumsq is - assumed to be at least unity and the value of ssq will then satisfy - - 1.0 .le. ssq .le. ( sumsq + 2*n ). - - scale is assumed to be non-negative and scl returns the value - - scl = max( scale, abs( real( x( i ) ) ), abs( aimag( x( i ) ) ) ), - i - - scale and sumsq must be supplied in SCALE and SUMSQ respectively. - SCALE and SUMSQ are overwritten by scl and ssq respectively. - - The routine makes only one pass through the vector X. - - Arguments - ========= - - N (input) INTEGER - The number of elements to be used from the vector X. - - X (input) COMPLEX*16 array, dimension (N) - The vector x as described above. - x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. - - INCX (input) INTEGER - The increment between successive values of the vector X. - INCX > 0. - - SCALE (input/output) DOUBLE PRECISION - On entry, the value scale in the equation above. - On exit, SCALE is overwritten with the value scl . - - SUMSQ (input/output) DOUBLE PRECISION - On entry, the value sumsq in the equation above. - On exit, SUMSQ is overwritten with the value ssq . - - ===================================================================== -*/ - - - /* Parameter adjustments */ - --x; - - /* Function Body */ - if (*n > 0) { - i__1 = (*n - 1) * *incx + 1; - i__2 = *incx; - for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) { - i__3 = ix; - if (x[i__3].r != 0.) { - i__3 = ix; - temp1 = (d__1 = x[i__3].r, abs(d__1)); - if (*scale < temp1) { -/* Computing 2nd power */ - d__1 = *scale / temp1; - *sumsq = *sumsq * (d__1 * d__1) + 1; - *scale = temp1; - } else { -/* Computing 2nd power */ - d__1 = temp1 / *scale; - *sumsq += d__1 * d__1; - } - } - if (d_imag(&x[ix]) != 0.) { - temp1 = (d__1 = d_imag(&x[ix]), abs(d__1)); - if (*scale < temp1) { -/* Computing 2nd power */ - d__1 = *scale / temp1; - *sumsq = *sumsq * (d__1 * d__1) + 1; - *scale = temp1; - } else { -/* Computing 2nd power */ - d__1 = temp1 / *scale; - *sumsq += d__1 * d__1; - } - } -/* L10: */ - } - } - - return 0; - -/* End of ZLASSQ */ - -} /* zlassq_ */ - -/* Subroutine */ int zlaswp_(integer *n, doublecomplex *a, integer *lda, - integer *k1, integer *k2, integer *ipiv, integer *incx) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6; - - /* Local variables */ - static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc; - static doublecomplex temp; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZLASWP performs a series of row interchanges on the matrix A. - One row interchange is initiated for each of rows K1 through K2 of A. - - Arguments - ========= - - N (input) INTEGER - The number of columns of the matrix A. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the matrix of column dimension N to which the row - interchanges will be applied. - On exit, the permuted matrix. - - LDA (input) INTEGER - The leading dimension of the array A. - - K1 (input) INTEGER - The first element of IPIV for which a row interchange will - be done. - - K2 (input) INTEGER - The last element of IPIV for which a row interchange will - be done. - - IPIV (input) INTEGER array, dimension (M*abs(INCX)) - The vector of pivot indices. Only the elements in positions - K1 through K2 of IPIV are accessed. - IPIV(K) = L implies rows K and L are to be interchanged. - - INCX (input) INTEGER - The increment between successive values of IPIV. If IPIV - is negative, the pivots are applied in reverse order. - - Further Details - =============== - - Modified by - R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA - - ===================================================================== - - - Interchange row I with row IPIV(I) for each of rows K1 through K2. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --ipiv; - - /* Function Body */ - if (*incx > 0) { - ix0 = *k1; - i1 = *k1; - i2 = *k2; - inc = 1; - } else if (*incx < 0) { - ix0 = (1 - *k2) * *incx + 1; - i1 = *k2; - i2 = *k1; - inc = -1; - } else { - return 0; - } - - n32 = (*n / 32) << (5); - if (n32 != 0) { - i__1 = n32; - for (j = 1; j <= i__1; j += 32) { - ix = ix0; - i__2 = i2; - i__3 = inc; - for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) - { - ip = ipiv[ix]; - if (ip != i__) { - i__4 = j + 31; - for (k = j; k <= i__4; ++k) { - i__5 = i__ + k * a_dim1; - temp.r = a[i__5].r, temp.i = a[i__5].i; - i__5 = i__ + k * a_dim1; - i__6 = ip + k * a_dim1; - a[i__5].r = a[i__6].r, a[i__5].i = a[i__6].i; - i__5 = ip + k * a_dim1; - a[i__5].r = temp.r, a[i__5].i = temp.i; -/* L10: */ - } - } - ix += *incx; -/* L20: */ - } -/* L30: */ - } - } - if (n32 != *n) { - ++n32; - ix = ix0; - i__1 = i2; - i__3 = inc; - for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) { - ip = ipiv[ix]; - if (ip != i__) { - i__2 = *n; - for (k = n32; k <= i__2; ++k) { - i__4 = i__ + k * a_dim1; - temp.r = a[i__4].r, temp.i = a[i__4].i; - i__4 = i__ + k * a_dim1; - i__5 = ip + k * a_dim1; - a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i; - i__4 = ip + k * a_dim1; - a[i__4].r = temp.r, a[i__4].i = temp.i; -/* L40: */ - } - } - ix += *incx; -/* L50: */ - } - } - - return 0; - -/* End of ZLASWP */ - -} /* zlaswp_ */ - -/* Subroutine */ int zlatrd_(char *uplo, integer *n, integer *nb, - doublecomplex *a, integer *lda, doublereal *e, doublecomplex *tau, - doublecomplex *w, integer *ldw) -{ - /* System generated locals */ - integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3; - doublereal d__1; - doublecomplex z__1, z__2, z__3, z__4; - - /* Local variables */ - static integer i__, iw; - static doublecomplex alpha; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *); - extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *); - extern /* Subroutine */ int zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *), - zhemv_(char *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *), zaxpy_(integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *, - integer *); - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZLATRD reduces NB rows and columns of a complex Hermitian matrix A to - Hermitian tridiagonal form by a unitary similarity - transformation Q' * A * Q, and returns the matrices V and W which are - needed to apply the transformation to the unreduced part of A. - - If UPLO = 'U', ZLATRD reduces the last NB rows and columns of a - matrix, of which the upper triangle is supplied; - if UPLO = 'L', ZLATRD reduces the first NB rows and columns of a - matrix, of which the lower triangle is supplied. - - This is an auxiliary routine called by ZHETRD. - - Arguments - ========= - - UPLO (input) CHARACTER - Specifies whether the upper or lower triangular part of the - Hermitian matrix A is stored: - = 'U': Upper triangular - = 'L': Lower triangular - - N (input) INTEGER - The order of the matrix A. - - NB (input) INTEGER - The number of rows and columns to be reduced. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the Hermitian matrix A. If UPLO = 'U', the leading - n-by-n upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading n-by-n lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - On exit: - if UPLO = 'U', the last NB columns have been reduced to - tridiagonal form, with the diagonal elements overwriting - the diagonal elements of A; the elements above the diagonal - with the array TAU, represent the unitary matrix Q as a - product of elementary reflectors; - if UPLO = 'L', the first NB columns have been reduced to - tridiagonal form, with the diagonal elements overwriting - the diagonal elements of A; the elements below the diagonal - with the array TAU, represent the unitary matrix Q as a - product of elementary reflectors. - See Further Details. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - E (output) DOUBLE PRECISION array, dimension (N-1) - If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal - elements of the last NB columns of the reduced matrix; - if UPLO = 'L', E(1:nb) contains the subdiagonal elements of - the first NB columns of the reduced matrix. - - TAU (output) COMPLEX*16 array, dimension (N-1) - The scalar factors of the elementary reflectors, stored in - TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. - See Further Details. - - W (output) COMPLEX*16 array, dimension (LDW,NB) - The n-by-nb matrix W required to update the unreduced part - of A. - - LDW (input) INTEGER - The leading dimension of the array W. LDW >= max(1,N). - - Further Details - =============== - - If UPLO = 'U', the matrix Q is represented as a product of elementary - reflectors - - Q = H(n) H(n-1) . . . H(n-nb+1). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), - and tau in TAU(i-1). - - If UPLO = 'L', the matrix Q is represented as a product of elementary - reflectors - - Q = H(1) H(2) . . . H(nb). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a complex scalar, and v is a complex vector with - v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), - and tau in TAU(i). - - The elements of the vectors v together form the n-by-nb matrix V - which is needed, with W, to apply the transformation to the unreduced - part of the matrix, using a Hermitian rank-2k update of the form: - A := A - V*W' - W*V'. - - The contents of A on exit are illustrated by the following examples - with n = 5 and nb = 2: - - if UPLO = 'U': if UPLO = 'L': - - ( a a a v4 v5 ) ( d ) - ( a a v4 v5 ) ( 1 d ) - ( a 1 v5 ) ( v1 1 a ) - ( d 1 ) ( v1 v2 a a ) - ( d ) ( v1 v2 a a a ) - - where d denotes a diagonal element of the reduced matrix, a denotes - an element of the original matrix that is unchanged, and vi denotes - an element of the vector defining H(i). - - ===================================================================== - - - Quick return if possible -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --e; - --tau; - w_dim1 = *ldw; - w_offset = 1 + w_dim1 * 1; - w -= w_offset; - - /* Function Body */ - if (*n <= 0) { - return 0; - } - - if (lsame_(uplo, "U")) { - -/* Reduce last NB columns of upper triangle */ - - i__1 = *n - *nb + 1; - for (i__ = *n; i__ >= i__1; --i__) { - iw = i__ - *n + *nb; - if (i__ < *n) { - -/* Update A(1:i,i) */ - - i__2 = i__ + i__ * a_dim1; - i__3 = i__ + i__ * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - i__2 = *n - i__; - zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw); - i__2 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__, &i__2, &z__1, &a[(i__ + 1) * - a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, & - c_b60, &a[i__ * a_dim1 + 1], &c__1); - i__2 = *n - i__; - zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw); - i__2 = *n - i__; - zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); - i__2 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__, &i__2, &z__1, &w[(iw + 1) * - w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, & - c_b60, &a[i__ * a_dim1 + 1], &c__1); - i__2 = *n - i__; - zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); - i__2 = i__ + i__ * a_dim1; - i__3 = i__ + i__ * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - } - if (i__ > 1) { - -/* - Generate elementary reflector H(i) to annihilate - A(1:i-2,i) -*/ - - i__2 = i__ - 1 + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = i__ - 1; - zlarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__ - - 1]); - i__2 = i__ - 1; - e[i__2] = alpha.r; - i__2 = i__ - 1 + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Compute W(1:i-1,i) */ - - i__2 = i__ - 1; - zhemv_("Upper", &i__2, &c_b60, &a[a_offset], lda, &a[i__ * - a_dim1 + 1], &c__1, &c_b59, &w[iw * w_dim1 + 1], & - c__1); - if (i__ < *n) { - i__2 = i__ - 1; - i__3 = *n - i__; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &w[( - iw + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], - &c__1, &c_b59, &w[i__ + 1 + iw * w_dim1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &a[(i__ + 1) * - a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], & - c__1, &c_b60, &w[iw * w_dim1 + 1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[( - i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], - &c__1, &c_b59, &w[i__ + 1 + iw * w_dim1], &c__1); - i__2 = i__ - 1; - i__3 = *n - i__; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &w[(iw + 1) * - w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], & - c__1, &c_b60, &w[iw * w_dim1 + 1], &c__1); - } - i__2 = i__ - 1; - zscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1); - z__3.r = -.5, z__3.i = -0.; - i__2 = i__ - 1; - z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i = - z__3.r * tau[i__2].i + z__3.i * tau[i__2].r; - i__3 = i__ - 1; - zdotc_(&z__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ * - a_dim1 + 1], &c__1); - z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * - z__4.i + z__2.i * z__4.r; - alpha.r = z__1.r, alpha.i = z__1.i; - i__2 = i__ - 1; - zaxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * - w_dim1 + 1], &c__1); - } - -/* L10: */ - } - } else { - -/* Reduce first NB columns of lower triangle */ - - i__1 = *nb; - for (i__ = 1; i__ <= i__1; ++i__) { - -/* Update A(i:n,i) */ - - i__2 = i__ + i__ * a_dim1; - i__3 = i__ + i__ * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - i__2 = i__ - 1; - zlacgv_(&i__2, &w[i__ + w_dim1], ldw); - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda, - &w[i__ + w_dim1], ldw, &c_b60, &a[i__ + i__ * a_dim1], & - c__1); - i__2 = i__ - 1; - zlacgv_(&i__2, &w[i__ + w_dim1], ldw); - i__2 = i__ - 1; - zlacgv_(&i__2, &a[i__ + a_dim1], lda); - i__2 = *n - i__ + 1; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + w_dim1], ldw, - &a[i__ + a_dim1], lda, &c_b60, &a[i__ + i__ * a_dim1], & - c__1); - i__2 = i__ - 1; - zlacgv_(&i__2, &a[i__ + a_dim1], lda); - i__2 = i__ + i__ * a_dim1; - i__3 = i__ + i__ * a_dim1; - d__1 = a[i__3].r; - a[i__2].r = d__1, a[i__2].i = 0.; - if (i__ < *n) { - -/* - Generate elementary reflector H(i) to annihilate - A(i+2:n,i) -*/ - - i__2 = i__ + 1 + i__ * a_dim1; - alpha.r = a[i__2].r, alpha.i = a[i__2].i; - i__2 = *n - i__; -/* Computing MIN */ - i__3 = i__ + 2; - zlarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, - &tau[i__]); - i__2 = i__; - e[i__2] = alpha.r; - i__2 = i__ + 1 + i__ * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - -/* Compute W(i+1:n,i) */ - - i__2 = *n - i__; - zhemv_("Lower", &i__2, &c_b60, &a[i__ + 1 + (i__ + 1) * - a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & - c_b59, &w[i__ + 1 + i__ * w_dim1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &w[i__ + - 1 + w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, & - c_b59, &w[i__ * w_dim1 + 1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 + - a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b60, &w[ - i__ + 1 + i__ * w_dim1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ + - 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & - c_b59, &w[i__ * w_dim1 + 1], &c__1); - i__2 = *n - i__; - i__3 = i__ - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + 1 + - w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b60, &w[ - i__ + 1 + i__ * w_dim1], &c__1); - i__2 = *n - i__; - zscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1); - z__3.r = -.5, z__3.i = -0.; - i__2 = i__; - z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i = - z__3.r * tau[i__2].i + z__3.i * tau[i__2].r; - i__3 = *n - i__; - zdotc_(&z__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[ - i__ + 1 + i__ * a_dim1], &c__1); - z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * - z__4.i + z__2.i * z__4.r; - alpha.r = z__1.r, alpha.i = z__1.i; - i__2 = *n - i__; - zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[ - i__ + 1 + i__ * w_dim1], &c__1); - } - -/* L20: */ - } - } - - return 0; - -/* End of ZLATRD */ - -} /* zlatrd_ */ - -/* Subroutine */ int zlatrs_(char *uplo, char *trans, char *diag, char * - normin, integer *n, doublecomplex *a, integer *lda, doublecomplex *x, - doublereal *scale, doublereal *cnorm, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublereal d__1, d__2, d__3, d__4; - doublecomplex z__1, z__2, z__3, z__4; - - /* Builtin functions */ - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j; - static doublereal xj, rec, tjj; - static integer jinc; - static doublereal xbnd; - static integer imax; - static doublereal tmax; - static doublecomplex tjjs; - static doublereal xmax, grow; - extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, - integer *); - extern logical lsame_(char *, char *); - static doublereal tscal; - static doublecomplex uscal; - static integer jlast; - static doublecomplex csumj; - extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *); - static logical upper; - extern /* Double Complex */ VOID zdotu_(doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *); - extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *), ztrsv_( - char *, char *, char *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *), dlabad_( - doublereal *, doublereal *); - - extern integer idamax_(integer *, doublereal *, integer *); - extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_( - integer *, doublereal *, doublecomplex *, integer *); - static doublereal bignum; - extern integer izamax_(integer *, doublecomplex *, integer *); - extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *, - doublecomplex *); - static logical notran; - static integer jfirst; - extern doublereal dzasum_(integer *, doublecomplex *, integer *); - static doublereal smlnum; - static logical nounit; - - -/* - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1992 - - - Purpose - ======= - - ZLATRS solves one of the triangular systems - - A * x = s*b, A**T * x = s*b, or A**H * x = s*b, - - with scaling to prevent overflow. Here A is an upper or lower - triangular matrix, A**T denotes the transpose of A, A**H denotes the - conjugate transpose of A, x and b are n-element vectors, and s is a - scaling factor, usually less than or equal to 1, chosen so that the - components of x will be less than the overflow threshold. If the - unscaled problem will not cause overflow, the Level 2 BLAS routine - ZTRSV is called. If the matrix A is singular (A(j,j) = 0 for some j), - then s is set to 0 and a non-trivial solution to A*x = 0 is returned. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies whether the matrix A is upper or lower triangular. - = 'U': Upper triangular - = 'L': Lower triangular - - TRANS (input) CHARACTER*1 - Specifies the operation applied to A. - = 'N': Solve A * x = s*b (No transpose) - = 'T': Solve A**T * x = s*b (Transpose) - = 'C': Solve A**H * x = s*b (Conjugate transpose) - - DIAG (input) CHARACTER*1 - Specifies whether or not the matrix A is unit triangular. - = 'N': Non-unit triangular - = 'U': Unit triangular - - NORMIN (input) CHARACTER*1 - Specifies whether CNORM has been set or not. - = 'Y': CNORM contains the column norms on entry - = 'N': CNORM is not set on entry. On exit, the norms will - be computed and stored in CNORM. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input) COMPLEX*16 array, dimension (LDA,N) - The triangular matrix A. If UPLO = 'U', the leading n by n - upper triangular part of the array A contains the upper - triangular matrix, and the strictly lower triangular part of - A is not referenced. If UPLO = 'L', the leading n by n lower - triangular part of the array A contains the lower triangular - matrix, and the strictly upper triangular part of A is not - referenced. If DIAG = 'U', the diagonal elements of A are - also not referenced and are assumed to be 1. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max (1,N). - - X (input/output) COMPLEX*16 array, dimension (N) - On entry, the right hand side b of the triangular system. - On exit, X is overwritten by the solution vector x. - - SCALE (output) DOUBLE PRECISION - The scaling factor s for the triangular system - A * x = s*b, A**T * x = s*b, or A**H * x = s*b. - If SCALE = 0, the matrix A is singular or badly scaled, and - the vector x is an exact or approximate solution to A*x = 0. - - CNORM (input or output) DOUBLE PRECISION array, dimension (N) - - If NORMIN = 'Y', CNORM is an input argument and CNORM(j) - contains the norm of the off-diagonal part of the j-th column - of A. If TRANS = 'N', CNORM(j) must be greater than or equal - to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) - must be greater than or equal to the 1-norm. - - If NORMIN = 'N', CNORM is an output argument and CNORM(j) - returns the 1-norm of the offdiagonal part of the j-th column - of A. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - - Further Details - ======= ======= - - A rough bound on x is computed; if that is less than overflow, ZTRSV - is called, otherwise, specific code is used which checks for possible - overflow or divide-by-zero at every operation. - - A columnwise scheme is used for solving A*x = b. The basic algorithm - if A is lower triangular is - - x[1:n] := b[1:n] - for j = 1, ..., n - x(j) := x(j) / A(j,j) - x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] - end - - Define bounds on the components of x after j iterations of the loop: - M(j) = bound on x[1:j] - G(j) = bound on x[j+1:n] - Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. - - Then for iteration j+1 we have - M(j+1) <= G(j) / | A(j+1,j+1) | - G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | - <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) - - where CNORM(j+1) is greater than or equal to the infinity-norm of - column j+1 of A, not counting the diagonal. Hence - - G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) - 1<=i<=j - and - - |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) - 1<=i< j - - Since |x(j)| <= M(j), we use the Level 2 BLAS routine ZTRSV if the - reciprocal of the largest M(j), j=1,..,n, is larger than - max(underflow, 1/overflow). - - The bound on x(j) is also used to determine when a step in the - columnwise method can be performed without fear of overflow. If - the computed bound is greater than a large constant, x is scaled to - prevent overflow, but if the bound overflows, x is set to 0, x(j) to - 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. - - Similarly, a row-wise scheme is used to solve A**T *x = b or - A**H *x = b. The basic algorithm for A upper triangular is - - for j = 1, ..., n - x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) - end - - We simultaneously compute two bounds - G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j - M(j) = bound on x(i), 1<=i<=j - - The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we - add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. - Then the bound on x(j) is - - M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | - - <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) - 1<=i<=j - - and we can safely call ZTRSV if 1/M(n) and 1/G(n) are both greater - than max(underflow, 1/overflow). - - ===================================================================== -*/ - - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --x; - --cnorm; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - notran = lsame_(trans, "N"); - nounit = lsame_(diag, "N"); - -/* Test the input parameters. */ - - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (((! notran && ! lsame_(trans, "T")) && ! - lsame_(trans, "C"))) { - *info = -2; - } else if ((! nounit && ! lsame_(diag, "U"))) { - *info = -3; - } else if ((! lsame_(normin, "Y") && ! lsame_( - normin, "N"))) { - *info = -4; - } else if (*n < 0) { - *info = -5; - } else if (*lda < max(1,*n)) { - *info = -7; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZLATRS", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* Determine machine dependent parameters to control overflow. */ - - smlnum = SAFEMINIMUM; - bignum = 1. / smlnum; - dlabad_(&smlnum, &bignum); - smlnum /= PRECISION; - bignum = 1. / smlnum; - *scale = 1.; - - if (lsame_(normin, "N")) { - -/* Compute the 1-norm of each column, not including the diagonal. */ - - if (upper) { - -/* A is upper triangular. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = j - 1; - cnorm[j] = dzasum_(&i__2, &a[j * a_dim1 + 1], &c__1); -/* L10: */ - } - } else { - -/* A is lower triangular. */ - - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - i__2 = *n - j; - cnorm[j] = dzasum_(&i__2, &a[j + 1 + j * a_dim1], &c__1); -/* L20: */ - } - cnorm[*n] = 0.; - } - } - -/* - Scale the column norms by TSCAL if the maximum element in CNORM is - greater than BIGNUM/2. -*/ - - imax = idamax_(n, &cnorm[1], &c__1); - tmax = cnorm[imax]; - if (tmax <= bignum * .5) { - tscal = 1.; - } else { - tscal = .5 / (smlnum * tmax); - dscal_(n, &tscal, &cnorm[1], &c__1); - } - -/* - Compute a bound on the computed solution vector to see if the - Level 2 BLAS routine ZTRSV can be used. -*/ - - xmax = 0.; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { -/* Computing MAX */ - i__2 = j; - d__3 = xmax, d__4 = (d__1 = x[i__2].r / 2., abs(d__1)) + (d__2 = - d_imag(&x[j]) / 2., abs(d__2)); - xmax = max(d__3,d__4); -/* L30: */ - } - xbnd = xmax; - - if (notran) { - -/* Compute the growth in A * x = b. */ - - if (upper) { - jfirst = *n; - jlast = 1; - jinc = -1; - } else { - jfirst = 1; - jlast = *n; - jinc = 1; - } - - if (tscal != 1.) { - grow = 0.; - goto L60; - } - - if (nounit) { - -/* - A is non-unit triangular. - - Compute GROW = 1/G(j) and XBND = 1/M(j). - Initially, G(0) = max{x(i), i=1,...,n}. -*/ - - grow = .5 / max(xbnd,smlnum); - xbnd = grow; - i__1 = jlast; - i__2 = jinc; - for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { - -/* Exit the loop if the growth factor is too small. */ - - if (grow <= smlnum) { - goto L60; - } - - i__3 = j + j * a_dim1; - tjjs.r = a[i__3].r, tjjs.i = a[i__3].i; - tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs( - d__2)); - - if (tjj >= smlnum) { - -/* - M(j) = G(j-1) / abs(A(j,j)) - - Computing MIN -*/ - d__1 = xbnd, d__2 = min(1.,tjj) * grow; - xbnd = min(d__1,d__2); - } else { - -/* M(j) could overflow, set XBND to 0. */ - - xbnd = 0.; - } - - if (tjj + cnorm[j] >= smlnum) { - -/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */ - - grow *= tjj / (tjj + cnorm[j]); - } else { - -/* G(j) could overflow, set GROW to 0. */ - - grow = 0.; - } -/* L40: */ - } - grow = xbnd; - } else { - -/* - A is unit triangular. - - Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. - - Computing MIN -*/ - d__1 = 1., d__2 = .5 / max(xbnd,smlnum); - grow = min(d__1,d__2); - i__2 = jlast; - i__1 = jinc; - for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) { - -/* Exit the loop if the growth factor is too small. */ - - if (grow <= smlnum) { - goto L60; - } - -/* G(j) = G(j-1)*( 1 + CNORM(j) ) */ - - grow *= 1. / (cnorm[j] + 1.); -/* L50: */ - } - } -L60: - - ; - } else { - -/* Compute the growth in A**T * x = b or A**H * x = b. */ - - if (upper) { - jfirst = 1; - jlast = *n; - jinc = 1; - } else { - jfirst = *n; - jlast = 1; - jinc = -1; - } - - if (tscal != 1.) { - grow = 0.; - goto L90; - } - - if (nounit) { - -/* - A is non-unit triangular. - - Compute GROW = 1/G(j) and XBND = 1/M(j). - Initially, M(0) = max{x(i), i=1,...,n}. -*/ - - grow = .5 / max(xbnd,smlnum); - xbnd = grow; - i__1 = jlast; - i__2 = jinc; - for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { - -/* Exit the loop if the growth factor is too small. */ - - if (grow <= smlnum) { - goto L90; - } - -/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */ - - xj = cnorm[j] + 1.; -/* Computing MIN */ - d__1 = grow, d__2 = xbnd / xj; - grow = min(d__1,d__2); - - i__3 = j + j * a_dim1; - tjjs.r = a[i__3].r, tjjs.i = a[i__3].i; - tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs( - d__2)); - - if (tjj >= smlnum) { - -/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */ - - if (xj > tjj) { - xbnd *= tjj / xj; - } - } else { - -/* M(j) could overflow, set XBND to 0. */ - - xbnd = 0.; - } -/* L70: */ - } - grow = min(grow,xbnd); - } else { - -/* - A is unit triangular. - - Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. - - Computing MIN -*/ - d__1 = 1., d__2 = .5 / max(xbnd,smlnum); - grow = min(d__1,d__2); - i__2 = jlast; - i__1 = jinc; - for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) { - -/* Exit the loop if the growth factor is too small. */ - - if (grow <= smlnum) { - goto L90; - } - -/* G(j) = ( 1 + CNORM(j) )*G(j-1) */ - - xj = cnorm[j] + 1.; - grow /= xj; -/* L80: */ - } - } -L90: - ; - } - - if (grow * tscal > smlnum) { - -/* - Use the Level 2 BLAS solve if the reciprocal of the bound on - elements of X is not too small. -*/ - - ztrsv_(uplo, trans, diag, n, &a[a_offset], lda, &x[1], &c__1); - } else { - -/* Use a Level 1 BLAS solve, scaling intermediate results. */ - - if (xmax > bignum * .5) { - -/* - Scale X so that its components are less than or equal to - BIGNUM in absolute value. -*/ - - *scale = bignum * .5 / xmax; - zdscal_(n, scale, &x[1], &c__1); - xmax = bignum; - } else { - xmax *= 2.; - } - - if (notran) { - -/* Solve A * x = b */ - - i__1 = jlast; - i__2 = jinc; - for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { - -/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */ - - i__3 = j; - xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]), - abs(d__2)); - if (nounit) { - i__3 = j + j * a_dim1; - z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3].i; - tjjs.r = z__1.r, tjjs.i = z__1.i; - } else { - tjjs.r = tscal, tjjs.i = 0.; - if (tscal == 1.) { - goto L110; - } - } - tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs( - d__2)); - if (tjj > smlnum) { - -/* abs(A(j,j)) > SMLNUM: */ - - if (tjj < 1.) { - if (xj > tjj * bignum) { - -/* Scale x by 1/b(j). */ - - rec = 1. / xj; - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - xmax *= rec; - } - } - i__3 = j; - zladiv_(&z__1, &x[j], &tjjs); - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - i__3 = j; - xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]) - , abs(d__2)); - } else if (tjj > 0.) { - -/* 0 < abs(A(j,j)) <= SMLNUM: */ - - if (xj > tjj * bignum) { - -/* - Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM - to avoid overflow when dividing by A(j,j). -*/ - - rec = tjj * bignum / xj; - if (cnorm[j] > 1.) { - -/* - Scale by 1/CNORM(j) to avoid overflow when - multiplying x(j) times column j. -*/ - - rec /= cnorm[j]; - } - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - xmax *= rec; - } - i__3 = j; - zladiv_(&z__1, &x[j], &tjjs); - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - i__3 = j; - xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]) - , abs(d__2)); - } else { - -/* - A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and - scale = 0, and compute a solution to A*x = 0. -*/ - - i__3 = *n; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__; - x[i__4].r = 0., x[i__4].i = 0.; -/* L100: */ - } - i__3 = j; - x[i__3].r = 1., x[i__3].i = 0.; - xj = 1.; - *scale = 0.; - xmax = 0.; - } -L110: - -/* - Scale x if necessary to avoid overflow when adding a - multiple of column j of A. -*/ - - if (xj > 1.) { - rec = 1. / xj; - if (cnorm[j] > (bignum - xmax) * rec) { - -/* Scale x by 1/(2*abs(x(j))). */ - - rec *= .5; - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - } - } else if (xj * cnorm[j] > bignum - xmax) { - -/* Scale x by 1/2. */ - - zdscal_(n, &c_b2210, &x[1], &c__1); - *scale *= .5; - } - - if (upper) { - if (j > 1) { - -/* - Compute the update - x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) -*/ - - i__3 = j - 1; - i__4 = j; - z__2.r = -x[i__4].r, z__2.i = -x[i__4].i; - z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i; - zaxpy_(&i__3, &z__1, &a[j * a_dim1 + 1], &c__1, &x[1], - &c__1); - i__3 = j - 1; - i__ = izamax_(&i__3, &x[1], &c__1); - i__3 = i__; - xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag( - &x[i__]), abs(d__2)); - } - } else { - if (j < *n) { - -/* - Compute the update - x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) -*/ - - i__3 = *n - j; - i__4 = j; - z__2.r = -x[i__4].r, z__2.i = -x[i__4].i; - z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i; - zaxpy_(&i__3, &z__1, &a[j + 1 + j * a_dim1], &c__1, & - x[j + 1], &c__1); - i__3 = *n - j; - i__ = j + izamax_(&i__3, &x[j + 1], &c__1); - i__3 = i__; - xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag( - &x[i__]), abs(d__2)); - } - } -/* L120: */ - } - - } else if (lsame_(trans, "T")) { - -/* Solve A**T * x = b */ - - i__2 = jlast; - i__1 = jinc; - for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) { - -/* - Compute x(j) = b(j) - sum A(k,j)*x(k). - k<>j -*/ - - i__3 = j; - xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]), - abs(d__2)); - uscal.r = tscal, uscal.i = 0.; - rec = 1. / max(xmax,1.); - if (cnorm[j] > (bignum - xj) * rec) { - -/* If x(j) could overflow, scale x by 1/(2*XMAX). */ - - rec *= .5; - if (nounit) { - i__3 = j + j * a_dim1; - z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3] - .i; - tjjs.r = z__1.r, tjjs.i = z__1.i; - } else { - tjjs.r = tscal, tjjs.i = 0.; - } - tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), - abs(d__2)); - if (tjj > 1.) { - -/* - Divide by A(j,j) when scaling x if A(j,j) > 1. - - Computing MIN -*/ - d__1 = 1., d__2 = rec * tjj; - rec = min(d__1,d__2); - zladiv_(&z__1, &uscal, &tjjs); - uscal.r = z__1.r, uscal.i = z__1.i; - } - if (rec < 1.) { - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - xmax *= rec; - } - } - - csumj.r = 0., csumj.i = 0.; - if ((uscal.r == 1. && uscal.i == 0.)) { - -/* - If the scaling needed for A in the dot product is 1, - call ZDOTU to perform the dot product. -*/ - - if (upper) { - i__3 = j - 1; - zdotu_(&z__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1], - &c__1); - csumj.r = z__1.r, csumj.i = z__1.i; - } else if (j < *n) { - i__3 = *n - j; - zdotu_(&z__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, & - x[j + 1], &c__1); - csumj.r = z__1.r, csumj.i = z__1.i; - } - } else { - -/* Otherwise, use in-line code for the dot product. */ - - if (upper) { - i__3 = j - 1; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * a_dim1; - z__3.r = a[i__4].r * uscal.r - a[i__4].i * - uscal.i, z__3.i = a[i__4].r * uscal.i + a[ - i__4].i * uscal.r; - i__5 = i__; - z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i, - z__2.i = z__3.r * x[i__5].i + z__3.i * x[ - i__5].r; - z__1.r = csumj.r + z__2.r, z__1.i = csumj.i + - z__2.i; - csumj.r = z__1.r, csumj.i = z__1.i; -/* L130: */ - } - } else if (j < *n) { - i__3 = *n; - for (i__ = j + 1; i__ <= i__3; ++i__) { - i__4 = i__ + j * a_dim1; - z__3.r = a[i__4].r * uscal.r - a[i__4].i * - uscal.i, z__3.i = a[i__4].r * uscal.i + a[ - i__4].i * uscal.r; - i__5 = i__; - z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i, - z__2.i = z__3.r * x[i__5].i + z__3.i * x[ - i__5].r; - z__1.r = csumj.r + z__2.r, z__1.i = csumj.i + - z__2.i; - csumj.r = z__1.r, csumj.i = z__1.i; -/* L140: */ - } - } - } - - z__1.r = tscal, z__1.i = 0.; - if ((uscal.r == z__1.r && uscal.i == z__1.i)) { - -/* - Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) - was not used to scale the dotproduct. -*/ - - i__3 = j; - i__4 = j; - z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i - - csumj.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - i__3 = j; - xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]) - , abs(d__2)); - if (nounit) { - i__3 = j + j * a_dim1; - z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3] - .i; - tjjs.r = z__1.r, tjjs.i = z__1.i; - } else { - tjjs.r = tscal, tjjs.i = 0.; - if (tscal == 1.) { - goto L160; - } - } - -/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */ - - tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), - abs(d__2)); - if (tjj > smlnum) { - -/* abs(A(j,j)) > SMLNUM: */ - - if (tjj < 1.) { - if (xj > tjj * bignum) { - -/* Scale X by 1/abs(x(j)). */ - - rec = 1. / xj; - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - xmax *= rec; - } - } - i__3 = j; - zladiv_(&z__1, &x[j], &tjjs); - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - } else if (tjj > 0.) { - -/* 0 < abs(A(j,j)) <= SMLNUM: */ - - if (xj > tjj * bignum) { - -/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */ - - rec = tjj * bignum / xj; - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - xmax *= rec; - } - i__3 = j; - zladiv_(&z__1, &x[j], &tjjs); - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - } else { - -/* - A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and - scale = 0 and compute a solution to A**T *x = 0. -*/ - - i__3 = *n; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__; - x[i__4].r = 0., x[i__4].i = 0.; -/* L150: */ - } - i__3 = j; - x[i__3].r = 1., x[i__3].i = 0.; - *scale = 0.; - xmax = 0.; - } -L160: - ; - } else { - -/* - Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot - product has already been divided by 1/A(j,j). -*/ - - i__3 = j; - zladiv_(&z__2, &x[j], &tjjs); - z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - } -/* Computing MAX */ - i__3 = j; - d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = - d_imag(&x[j]), abs(d__2)); - xmax = max(d__3,d__4); -/* L170: */ - } - - } else { - -/* Solve A**H * x = b */ - - i__1 = jlast; - i__2 = jinc; - for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { - -/* - Compute x(j) = b(j) - sum A(k,j)*x(k). - k<>j -*/ - - i__3 = j; - xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]), - abs(d__2)); - uscal.r = tscal, uscal.i = 0.; - rec = 1. / max(xmax,1.); - if (cnorm[j] > (bignum - xj) * rec) { - -/* If x(j) could overflow, scale x by 1/(2*XMAX). */ - - rec *= .5; - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i; - tjjs.r = z__1.r, tjjs.i = z__1.i; - } else { - tjjs.r = tscal, tjjs.i = 0.; - } - tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), - abs(d__2)); - if (tjj > 1.) { - -/* - Divide by A(j,j) when scaling x if A(j,j) > 1. - - Computing MIN -*/ - d__1 = 1., d__2 = rec * tjj; - rec = min(d__1,d__2); - zladiv_(&z__1, &uscal, &tjjs); - uscal.r = z__1.r, uscal.i = z__1.i; - } - if (rec < 1.) { - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - xmax *= rec; - } - } - - csumj.r = 0., csumj.i = 0.; - if ((uscal.r == 1. && uscal.i == 0.)) { - -/* - If the scaling needed for A in the dot product is 1, - call ZDOTC to perform the dot product. -*/ - - if (upper) { - i__3 = j - 1; - zdotc_(&z__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1], - &c__1); - csumj.r = z__1.r, csumj.i = z__1.i; - } else if (j < *n) { - i__3 = *n - j; - zdotc_(&z__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, & - x[j + 1], &c__1); - csumj.r = z__1.r, csumj.i = z__1.i; - } - } else { - -/* Otherwise, use in-line code for the dot product. */ - - if (upper) { - i__3 = j - 1; - for (i__ = 1; i__ <= i__3; ++i__) { - d_cnjg(&z__4, &a[i__ + j * a_dim1]); - z__3.r = z__4.r * uscal.r - z__4.i * uscal.i, - z__3.i = z__4.r * uscal.i + z__4.i * - uscal.r; - i__4 = i__; - z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, - z__2.i = z__3.r * x[i__4].i + z__3.i * x[ - i__4].r; - z__1.r = csumj.r + z__2.r, z__1.i = csumj.i + - z__2.i; - csumj.r = z__1.r, csumj.i = z__1.i; -/* L180: */ - } - } else if (j < *n) { - i__3 = *n; - for (i__ = j + 1; i__ <= i__3; ++i__) { - d_cnjg(&z__4, &a[i__ + j * a_dim1]); - z__3.r = z__4.r * uscal.r - z__4.i * uscal.i, - z__3.i = z__4.r * uscal.i + z__4.i * - uscal.r; - i__4 = i__; - z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, - z__2.i = z__3.r * x[i__4].i + z__3.i * x[ - i__4].r; - z__1.r = csumj.r + z__2.r, z__1.i = csumj.i + - z__2.i; - csumj.r = z__1.r, csumj.i = z__1.i; -/* L190: */ - } - } - } - - z__1.r = tscal, z__1.i = 0.; - if ((uscal.r == z__1.r && uscal.i == z__1.i)) { - -/* - Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) - was not used to scale the dotproduct. -*/ - - i__3 = j; - i__4 = j; - z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i - - csumj.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - i__3 = j; - xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]) - , abs(d__2)); - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i; - tjjs.r = z__1.r, tjjs.i = z__1.i; - } else { - tjjs.r = tscal, tjjs.i = 0.; - if (tscal == 1.) { - goto L210; - } - } - -/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */ - - tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), - abs(d__2)); - if (tjj > smlnum) { - -/* abs(A(j,j)) > SMLNUM: */ - - if (tjj < 1.) { - if (xj > tjj * bignum) { - -/* Scale X by 1/abs(x(j)). */ - - rec = 1. / xj; - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - xmax *= rec; - } - } - i__3 = j; - zladiv_(&z__1, &x[j], &tjjs); - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - } else if (tjj > 0.) { - -/* 0 < abs(A(j,j)) <= SMLNUM: */ - - if (xj > tjj * bignum) { - -/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */ - - rec = tjj * bignum / xj; - zdscal_(n, &rec, &x[1], &c__1); - *scale *= rec; - xmax *= rec; - } - i__3 = j; - zladiv_(&z__1, &x[j], &tjjs); - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - } else { - -/* - A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and - scale = 0 and compute a solution to A**H *x = 0. -*/ - - i__3 = *n; - for (i__ = 1; i__ <= i__3; ++i__) { - i__4 = i__; - x[i__4].r = 0., x[i__4].i = 0.; -/* L200: */ - } - i__3 = j; - x[i__3].r = 1., x[i__3].i = 0.; - *scale = 0.; - xmax = 0.; - } -L210: - ; - } else { - -/* - Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot - product has already been divided by 1/A(j,j). -*/ - - i__3 = j; - zladiv_(&z__2, &x[j], &tjjs); - z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; - } -/* Computing MAX */ - i__3 = j; - d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = - d_imag(&x[j]), abs(d__2)); - xmax = max(d__3,d__4); -/* L220: */ - } - } - *scale /= tscal; - } - -/* Scale the column norms by 1/TSCAL for return. */ - - if (tscal != 1.) { - d__1 = 1. / tscal; - dscal_(n, &d__1, &cnorm[1], &c__1); - } - - return 0; - -/* End of ZLATRS */ - -} /* zlatrs_ */ - -/* Subroutine */ int zpotf2_(char *uplo, integer *n, doublecomplex *a, - integer *lda, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublereal d__1; - doublecomplex z__1, z__2; - - /* Builtin functions */ - double sqrt(doublereal); - - /* Local variables */ - static integer j; - static doublereal ajj; - extern logical lsame_(char *, char *); - extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *); - extern /* Subroutine */ int zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *); - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_( - integer *, doublereal *, doublecomplex *, integer *), zlacgv_( - integer *, doublecomplex *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZPOTF2 computes the Cholesky factorization of a complex Hermitian - positive definite matrix A. - - The factorization has the form - A = U' * U , if UPLO = 'U', or - A = L * L', if UPLO = 'L', - where U is an upper triangular matrix and L is lower triangular. - - This is the unblocked version of the algorithm, calling Level 2 BLAS. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - Specifies whether the upper or lower triangular part of the - Hermitian matrix A is stored. - = 'U': Upper triangular - = 'L': Lower triangular - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the Hermitian matrix A. If UPLO = 'U', the leading - n by n upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading n by n lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - - On exit, if INFO = 0, the factor U or L from the Cholesky - factorization A = U'*U or A = L*L'. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not - positive definite, and the factorization could not be - completed. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZPOTF2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - if (upper) { - -/* Compute the Cholesky factorization A = U'*U. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - -/* Compute U(J,J) and test for non-positive-definiteness. */ - - i__2 = j + j * a_dim1; - d__1 = a[i__2].r; - i__3 = j - 1; - zdotc_(&z__2, &i__3, &a[j * a_dim1 + 1], &c__1, &a[j * a_dim1 + 1] - , &c__1); - z__1.r = d__1 - z__2.r, z__1.i = -z__2.i; - ajj = z__1.r; - if (ajj <= 0.) { - i__2 = j + j * a_dim1; - a[i__2].r = ajj, a[i__2].i = 0.; - goto L30; - } - ajj = sqrt(ajj); - i__2 = j + j * a_dim1; - a[i__2].r = ajj, a[i__2].i = 0.; - -/* Compute elements J+1:N of row J. */ - - if (j < *n) { - i__2 = j - 1; - zlacgv_(&i__2, &a[j * a_dim1 + 1], &c__1); - i__2 = j - 1; - i__3 = *n - j; - z__1.r = -1., z__1.i = -0.; - zgemv_("Transpose", &i__2, &i__3, &z__1, &a[(j + 1) * a_dim1 - + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b60, &a[j + ( - j + 1) * a_dim1], lda); - i__2 = j - 1; - zlacgv_(&i__2, &a[j * a_dim1 + 1], &c__1); - i__2 = *n - j; - d__1 = 1. / ajj; - zdscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda); - } -/* L10: */ - } - } else { - -/* Compute the Cholesky factorization A = L*L'. */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - -/* Compute L(J,J) and test for non-positive-definiteness. */ - - i__2 = j + j * a_dim1; - d__1 = a[i__2].r; - i__3 = j - 1; - zdotc_(&z__2, &i__3, &a[j + a_dim1], lda, &a[j + a_dim1], lda); - z__1.r = d__1 - z__2.r, z__1.i = -z__2.i; - ajj = z__1.r; - if (ajj <= 0.) { - i__2 = j + j * a_dim1; - a[i__2].r = ajj, a[i__2].i = 0.; - goto L30; - } - ajj = sqrt(ajj); - i__2 = j + j * a_dim1; - a[i__2].r = ajj, a[i__2].i = 0.; - -/* Compute elements J+1:N of column J. */ - - if (j < *n) { - i__2 = j - 1; - zlacgv_(&i__2, &a[j + a_dim1], lda); - i__2 = *n - j; - i__3 = j - 1; - z__1.r = -1., z__1.i = -0.; - zgemv_("No transpose", &i__2, &i__3, &z__1, &a[j + 1 + a_dim1] - , lda, &a[j + a_dim1], lda, &c_b60, &a[j + 1 + j * - a_dim1], &c__1); - i__2 = j - 1; - zlacgv_(&i__2, &a[j + a_dim1], lda); - i__2 = *n - j; - d__1 = 1. / ajj; - zdscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1); - } -/* L20: */ - } - } - goto L40; - -L30: - *info = j; - -L40: - return 0; - -/* End of ZPOTF2 */ - -} /* zpotf2_ */ - -/* Subroutine */ int zpotrf_(char *uplo, integer *n, doublecomplex *a, - integer *lda, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - doublecomplex z__1; - - /* Local variables */ - static integer j, jb, nb; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *, - integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *), zherk_(char *, char *, integer *, - integer *, doublereal *, doublecomplex *, integer *, doublereal *, - doublecomplex *, integer *); - static logical upper; - extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *, - integer *, integer *, doublecomplex *, doublecomplex *, integer *, - doublecomplex *, integer *), - zpotf2_(char *, integer *, doublecomplex *, integer *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZPOTRF computes the Cholesky factorization of a complex Hermitian - positive definite matrix A. - - The factorization has the form - A = U**H * U, if UPLO = 'U', or - A = L * L**H, if UPLO = 'L', - where U is an upper triangular matrix and L is lower triangular. - - This is the block version of the algorithm, calling Level 3 BLAS. - - Arguments - ========= - - UPLO (input) CHARACTER*1 - = 'U': Upper triangle of A is stored; - = 'L': Lower triangle of A is stored. - - N (input) INTEGER - The order of the matrix A. N >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the Hermitian matrix A. If UPLO = 'U', the leading - N-by-N upper triangular part of A contains the upper - triangular part of the matrix A, and the strictly lower - triangular part of A is not referenced. If UPLO = 'L', the - leading N-by-N lower triangular part of A contains the lower - triangular part of the matrix A, and the strictly upper - triangular part of A is not referenced. - - On exit, if INFO = 0, the factor U or L from the Cholesky - factorization A = U**H*U or A = L*L**H. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: if INFO = i, the leading minor of order i is not - positive definite, and the factorization could not be - completed. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - - /* Function Body */ - *info = 0; - upper = lsame_(uplo, "U"); - if ((! upper && ! lsame_(uplo, "L"))) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*lda < max(1,*n)) { - *info = -4; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZPOTRF", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - -/* Determine the block size for this environment. */ - - nb = ilaenv_(&c__1, "ZPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, ( - ftnlen)1); - if (nb <= 1 || nb >= *n) { - -/* Use unblocked code. */ - - zpotf2_(uplo, n, &a[a_offset], lda, info); - } else { - -/* Use blocked code. */ - - if (upper) { - -/* Compute the Cholesky factorization A = U'*U. */ - - i__1 = *n; - i__2 = nb; - for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { - -/* - Update and factorize the current diagonal block and test - for non-positive-definiteness. - - Computing MIN -*/ - i__3 = nb, i__4 = *n - j + 1; - jb = min(i__3,i__4); - i__3 = j - 1; - zherk_("Upper", "Conjugate transpose", &jb, &i__3, &c_b1294, & - a[j * a_dim1 + 1], lda, &c_b1015, &a[j + j * a_dim1], - lda); - zpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info); - if (*info != 0) { - goto L30; - } - if (j + jb <= *n) { - -/* Compute the current block row. */ - - i__3 = *n - j - jb + 1; - i__4 = j - 1; - z__1.r = -1., z__1.i = -0.; - zgemm_("Conjugate transpose", "No transpose", &jb, &i__3, - &i__4, &z__1, &a[j * a_dim1 + 1], lda, &a[(j + jb) - * a_dim1 + 1], lda, &c_b60, &a[j + (j + jb) * - a_dim1], lda); - i__3 = *n - j - jb + 1; - ztrsm_("Left", "Upper", "Conjugate transpose", "Non-unit", - &jb, &i__3, &c_b60, &a[j + j * a_dim1], lda, &a[ - j + (j + jb) * a_dim1], lda); - } -/* L10: */ - } - - } else { - -/* Compute the Cholesky factorization A = L*L'. */ - - i__2 = *n; - i__1 = nb; - for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) { - -/* - Update and factorize the current diagonal block and test - for non-positive-definiteness. - - Computing MIN -*/ - i__3 = nb, i__4 = *n - j + 1; - jb = min(i__3,i__4); - i__3 = j - 1; - zherk_("Lower", "No transpose", &jb, &i__3, &c_b1294, &a[j + - a_dim1], lda, &c_b1015, &a[j + j * a_dim1], lda); - zpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info); - if (*info != 0) { - goto L30; - } - if (j + jb <= *n) { - -/* Compute the current block column. */ - - i__3 = *n - j - jb + 1; - i__4 = j - 1; - z__1.r = -1., z__1.i = -0.; - zgemm_("No transpose", "Conjugate transpose", &i__3, &jb, - &i__4, &z__1, &a[j + jb + a_dim1], lda, &a[j + - a_dim1], lda, &c_b60, &a[j + jb + j * a_dim1], - lda); - i__3 = *n - j - jb + 1; - ztrsm_("Right", "Lower", "Conjugate transpose", "Non-unit" - , &i__3, &jb, &c_b60, &a[j + j * a_dim1], lda, &a[ - j + jb + j * a_dim1], lda); - } -/* L20: */ - } - } - } - goto L40; - -L30: - *info = *info + j - 1; - -L40: - return 0; - -/* End of ZPOTRF */ - -} /* zpotrf_ */ - -/* Subroutine */ int zstedc_(char *compz, integer *n, doublereal *d__, - doublereal *e, doublecomplex *z__, integer *ldz, doublecomplex *work, - integer *lwork, doublereal *rwork, integer *lrwork, integer *iwork, - integer *liwork, integer *info) -{ - /* System generated locals */ - integer z_dim1, z_offset, i__1, i__2, i__3, i__4; - doublereal d__1, d__2; - - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - - /* Local variables */ - static integer i__, j, k, m; - static doublereal p; - static integer ii, ll, end, lgn; - static doublereal eps, tiny; - extern logical lsame_(char *, char *); - static integer lwmin, start; - extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *), zlaed0_(integer *, integer *, - doublereal *, doublereal *, doublecomplex *, integer *, - doublecomplex *, integer *, doublereal *, integer *, integer *); - - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *), dstedc_(char *, integer *, - doublereal *, doublereal *, doublereal *, integer *, doublereal *, - integer *, integer *, integer *, integer *), dlaset_( - char *, integer *, integer *, doublereal *, doublereal *, - doublereal *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); - extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *, - integer *), zlacrm_(integer *, integer *, doublecomplex *, - integer *, doublereal *, integer *, doublecomplex *, integer *, - doublereal *); - static integer liwmin, icompz; - extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *, - doublereal *, doublereal *, integer *, doublereal *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *, - integer *, doublecomplex *, integer *); - static doublereal orgnrm; - static integer lrwmin; - static logical lquery; - static integer smlsiz; - extern /* Subroutine */ int zsteqr_(char *, integer *, doublereal *, - doublereal *, doublecomplex *, integer *, doublereal *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZSTEDC computes all eigenvalues and, optionally, eigenvectors of a - symmetric tridiagonal matrix using the divide and conquer method. - The eigenvectors of a full or band complex Hermitian matrix can also - be found if ZHETRD or ZHPTRD or ZHBTRD has been used to reduce this - matrix to tridiagonal form. - - This code makes very mild assumptions about floating point - arithmetic. It will work on machines with a guard digit in - add/subtract, or on those binary machines without guard digits - which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. - It could conceivably fail on hexadecimal or decimal machines - without guard digits, but we know of none. See DLAED3 for details. - - Arguments - ========= - - COMPZ (input) CHARACTER*1 - = 'N': Compute eigenvalues only. - = 'I': Compute eigenvectors of tridiagonal matrix also. - = 'V': Compute eigenvectors of original Hermitian matrix - also. On entry, Z contains the unitary matrix used - to reduce the original matrix to tridiagonal form. - - N (input) INTEGER - The dimension of the symmetric tridiagonal matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the diagonal elements of the tridiagonal matrix. - On exit, if INFO = 0, the eigenvalues in ascending order. - - E (input/output) DOUBLE PRECISION array, dimension (N-1) - On entry, the subdiagonal elements of the tridiagonal matrix. - On exit, E has been destroyed. - - Z (input/output) COMPLEX*16 array, dimension (LDZ,N) - On entry, if COMPZ = 'V', then Z contains the unitary - matrix used in the reduction to tridiagonal form. - On exit, if INFO = 0, then if COMPZ = 'V', Z contains the - orthonormal eigenvectors of the original Hermitian matrix, - and if COMPZ = 'I', Z contains the orthonormal eigenvectors - of the symmetric tridiagonal matrix. - If COMPZ = 'N', then Z is not referenced. - - LDZ (input) INTEGER - The leading dimension of the array Z. LDZ >= 1. - If eigenvectors are desired, then LDZ >= max(1,N). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If COMPZ = 'N' or 'I', or N <= 1, LWORK must be at least 1. - If COMPZ = 'V' and N > 1, LWORK must be at least N*N. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - RWORK (workspace/output) DOUBLE PRECISION array, - dimension (LRWORK) - On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. - - LRWORK (input) INTEGER - The dimension of the array RWORK. - If COMPZ = 'N' or N <= 1, LRWORK must be at least 1. - If COMPZ = 'V' and N > 1, LRWORK must be at least - 1 + 3*N + 2*N*lg N + 3*N**2 , - where lg( N ) = smallest integer k such - that 2**k >= N. - If COMPZ = 'I' and N > 1, LRWORK must be at least - 1 + 4*N + 2*N**2 . - - If LRWORK = -1, then a workspace query is assumed; the - routine only calculates the optimal size of the RWORK array, - returns this value as the first entry of the RWORK array, and - no error message related to LRWORK is issued by XERBLA. - - IWORK (workspace/output) INTEGER array, dimension (LIWORK) - On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. - - LIWORK (input) INTEGER - The dimension of the array IWORK. - If COMPZ = 'N' or N <= 1, LIWORK must be at least 1. - If COMPZ = 'V' or N > 1, LIWORK must be at least - 6 + 6*N + 5*N*lg N. - If COMPZ = 'I' or N > 1, LIWORK must be at least - 3 + 5*N . - - If LIWORK = -1, then a workspace query is assumed; the - routine only calculates the optimal size of the IWORK array, - returns this value as the first entry of the IWORK array, and - no error message related to LIWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value. - > 0: The algorithm failed to compute an eigenvalue while - working on the submatrix lying in rows and columns - INFO/(N+1) through mod(INFO,N+1). - - Further Details - =============== - - Based on contributions by - Jeff Rutter, Computer Science Division, University of California - at Berkeley, USA - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - z_dim1 = *ldz; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - --work; - --rwork; - --iwork; - - /* Function Body */ - *info = 0; - lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1; - - if (lsame_(compz, "N")) { - icompz = 0; - } else if (lsame_(compz, "V")) { - icompz = 1; - } else if (lsame_(compz, "I")) { - icompz = 2; - } else { - icompz = -1; - } - if (*n <= 1 || icompz <= 0) { - lwmin = 1; - liwmin = 1; - lrwmin = 1; - } else { - lgn = (integer) (log((doublereal) (*n)) / log(2.)); - if (pow_ii(&c__2, &lgn) < *n) { - ++lgn; - } - if (pow_ii(&c__2, &lgn) < *n) { - ++lgn; - } - if (icompz == 1) { - lwmin = *n * *n; -/* Computing 2nd power */ - i__1 = *n; - lrwmin = *n * 3 + 1 + ((*n) << (1)) * lgn + i__1 * i__1 * 3; - liwmin = *n * 6 + 6 + *n * 5 * lgn; - } else if (icompz == 2) { - lwmin = 1; -/* Computing 2nd power */ - i__1 = *n; - lrwmin = ((*n) << (2)) + 1 + ((i__1 * i__1) << (1)); - liwmin = *n * 5 + 3; - } - } - if (icompz < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) { - *info = -6; - } else if ((*lwork < lwmin && ! lquery)) { - *info = -8; - } else if ((*lrwork < lrwmin && ! lquery)) { - *info = -10; - } else if ((*liwork < liwmin && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - work[1].r = (doublereal) lwmin, work[1].i = 0.; - rwork[1] = (doublereal) lrwmin; - iwork[1] = liwmin; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZSTEDC", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - if (*n == 1) { - if (icompz != 0) { - i__1 = z_dim1 + 1; - z__[i__1].r = 1., z__[i__1].i = 0.; - } - return 0; - } - - smlsiz = ilaenv_(&c__9, "ZSTEDC", " ", &c__0, &c__0, &c__0, &c__0, ( - ftnlen)6, (ftnlen)1); - -/* - If the following conditional clause is removed, then the routine - will use the Divide and Conquer routine to compute only the - eigenvalues, which requires (3N + 3N**2) real workspace and - (2 + 5N + 2N lg(N)) integer workspace. - Since on many architectures DSTERF is much faster than any other - algorithm for finding eigenvalues only, it is used here - as the default. - - If COMPZ = 'N', use DSTERF to compute the eigenvalues. -*/ - - if (icompz == 0) { - dsterf_(n, &d__[1], &e[1], info); - return 0; - } - -/* - If N is smaller than the minimum divide size (SMLSIZ+1), then - solve the problem with another solver. -*/ - - if (*n <= smlsiz) { - if (icompz == 0) { - dsterf_(n, &d__[1], &e[1], info); - return 0; - } else if (icompz == 2) { - zsteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &rwork[1], - info); - return 0; - } else { - zsteqr_("V", n, &d__[1], &e[1], &z__[z_offset], ldz, &rwork[1], - info); - return 0; - } - } - -/* If COMPZ = 'I', we simply call DSTEDC instead. */ - - if (icompz == 2) { - dlaset_("Full", n, n, &c_b324, &c_b1015, &rwork[1], n); - ll = *n * *n + 1; - i__1 = *lrwork - ll + 1; - dstedc_("I", n, &d__[1], &e[1], &rwork[1], n, &rwork[ll], &i__1, & - iwork[1], liwork, info); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * z_dim1; - i__4 = (j - 1) * *n + i__; - z__[i__3].r = rwork[i__4], z__[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - return 0; - } - -/* - From now on, only option left to be handled is COMPZ = 'V', - i.e. ICOMPZ = 1. - - Scale. -*/ - - orgnrm = dlanst_("M", n, &d__[1], &e[1]); - if (orgnrm == 0.) { - return 0; - } - - eps = EPSILON; - - start = 1; - -/* while ( START <= N ) */ - -L30: - if (start <= *n) { - -/* - Let END be the position of the next subdiagonal entry such that - E( END ) <= TINY or END = N if no such subdiagonal exists. The - matrix identified by the elements between START and END - constitutes an independent sub-problem. -*/ - - end = start; -L40: - if (end < *n) { - tiny = eps * sqrt((d__1 = d__[end], abs(d__1))) * sqrt((d__2 = - d__[end + 1], abs(d__2))); - if ((d__1 = e[end], abs(d__1)) > tiny) { - ++end; - goto L40; - } - } - -/* (Sub) Problem determined. Compute its size and solve it. */ - - m = end - start + 1; - if (m > smlsiz) { - *info = smlsiz; - -/* Scale. */ - - orgnrm = dlanst_("M", &m, &d__[start], &e[start]); - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, &m, &c__1, &d__[ - start], &m, info); - i__1 = m - 1; - i__2 = m - 1; - dlascl_("G", &c__0, &c__0, &orgnrm, &c_b1015, &i__1, &c__1, &e[ - start], &i__2, info); - - zlaed0_(n, &m, &d__[start], &e[start], &z__[start * z_dim1 + 1], - ldz, &work[1], n, &rwork[1], &iwork[1], info); - if (*info > 0) { - *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m - + 1) + start - 1; - return 0; - } - -/* Scale back. */ - - dlascl_("G", &c__0, &c__0, &c_b1015, &orgnrm, &m, &c__1, &d__[ - start], &m, info); - - } else { - dsteqr_("I", &m, &d__[start], &e[start], &rwork[1], &m, &rwork[m * - m + 1], info); - zlacrm_(n, &m, &z__[start * z_dim1 + 1], ldz, &rwork[1], &m, & - work[1], n, &rwork[m * m + 1]); - zlacpy_("A", n, &m, &work[1], n, &z__[start * z_dim1 + 1], ldz); - if (*info > 0) { - *info = start * (*n + 1) + end; - return 0; - } - } - - start = end + 1; - goto L30; - } - -/* - endwhile - - If the problem split any number of times, then the eigenvalues - will not be properly ordered. Here we permute the eigenvalues - (and the associated eigenvectors) into ascending order. -*/ - - if (m != *n) { - -/* Use Selection Sort to minimize swaps of eigenvectors */ - - i__1 = *n; - for (ii = 2; ii <= i__1; ++ii) { - i__ = ii - 1; - k = i__; - p = d__[i__]; - i__2 = *n; - for (j = ii; j <= i__2; ++j) { - if (d__[j] < p) { - k = j; - p = d__[j]; - } -/* L50: */ - } - if (k != i__) { - d__[k] = d__[i__]; - d__[i__] = p; - zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1], - &c__1); - } -/* L60: */ - } - } - - work[1].r = (doublereal) lwmin, work[1].i = 0.; - rwork[1] = (doublereal) lrwmin; - iwork[1] = liwmin; - - return 0; - -/* End of ZSTEDC */ - -} /* zstedc_ */ - -/* Subroutine */ int zsteqr_(char *compz, integer *n, doublereal *d__, - doublereal *e, doublecomplex *z__, integer *ldz, doublereal *work, - integer *info) -{ - /* System generated locals */ - integer z_dim1, z_offset, i__1, i__2; - doublereal d__1, d__2; - - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - - /* Local variables */ - static doublereal b, c__, f, g; - static integer i__, j, k, l, m; - static doublereal p, r__, s; - static integer l1, ii, mm, lm1, mm1, nm1; - static doublereal rt1, rt2, eps; - static integer lsv; - static doublereal tst, eps2; - static integer lend, jtot; - extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal - *, doublereal *, doublereal *); - extern logical lsame_(char *, char *); - static doublereal anorm; - extern /* Subroutine */ int zlasr_(char *, char *, char *, integer *, - integer *, doublereal *, doublereal *, doublecomplex *, integer *), zswap_(integer *, doublecomplex *, - integer *, doublecomplex *, integer *), dlaev2_(doublereal *, - doublereal *, doublereal *, doublereal *, doublereal *, - doublereal *, doublereal *); - static integer lendm1, lendp1; - - static integer iscale; - extern /* Subroutine */ int dlascl_(char *, integer *, integer *, - doublereal *, doublereal *, integer *, integer *, doublereal *, - integer *, integer *); - static doublereal safmin; - extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, - doublereal *, doublereal *, doublereal *); - static doublereal safmax; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); - extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, - integer *); - static integer lendsv; - static doublereal ssfmin; - static integer nmaxit, icompz; - static doublereal ssfmax; - extern /* Subroutine */ int zlaset_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, doublecomplex *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZSTEQR computes all eigenvalues and, optionally, eigenvectors of a - symmetric tridiagonal matrix using the implicit QL or QR method. - The eigenvectors of a full or band complex Hermitian matrix can also - be found if ZHETRD or ZHPTRD or ZHBTRD has been used to reduce this - matrix to tridiagonal form. - - Arguments - ========= - - COMPZ (input) CHARACTER*1 - = 'N': Compute eigenvalues only. - = 'V': Compute eigenvalues and eigenvectors of the original - Hermitian matrix. On entry, Z must contain the - unitary matrix used to reduce the original matrix - to tridiagonal form. - = 'I': Compute eigenvalues and eigenvectors of the - tridiagonal matrix. Z is initialized to the identity - matrix. - - N (input) INTEGER - The order of the matrix. N >= 0. - - D (input/output) DOUBLE PRECISION array, dimension (N) - On entry, the diagonal elements of the tridiagonal matrix. - On exit, if INFO = 0, the eigenvalues in ascending order. - - E (input/output) DOUBLE PRECISION array, dimension (N-1) - On entry, the (n-1) subdiagonal elements of the tridiagonal - matrix. - On exit, E has been destroyed. - - Z (input/output) COMPLEX*16 array, dimension (LDZ, N) - On entry, if COMPZ = 'V', then Z contains the unitary - matrix used in the reduction to tridiagonal form. - On exit, if INFO = 0, then if COMPZ = 'V', Z contains the - orthonormal eigenvectors of the original Hermitian matrix, - and if COMPZ = 'I', Z contains the orthonormal eigenvectors - of the symmetric tridiagonal matrix. - If COMPZ = 'N', then Z is not referenced. - - LDZ (input) INTEGER - The leading dimension of the array Z. LDZ >= 1, and if - eigenvectors are desired, then LDZ >= max(1,N). - - WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2)) - If COMPZ = 'N', then WORK is not referenced. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - > 0: the algorithm has failed to find all the eigenvalues in - a total of 30*N iterations; if INFO = i, then i - elements of E have not converged to zero; on exit, D - and E contain the elements of a symmetric tridiagonal - matrix which is unitarily similar to the original - matrix. - - ===================================================================== - - - Test the input parameters. -*/ - - /* Parameter adjustments */ - --d__; - --e; - z_dim1 = *ldz; - z_offset = 1 + z_dim1 * 1; - z__ -= z_offset; - --work; - - /* Function Body */ - *info = 0; - - if (lsame_(compz, "N")) { - icompz = 0; - } else if (lsame_(compz, "V")) { - icompz = 1; - } else if (lsame_(compz, "I")) { - icompz = 2; - } else { - icompz = -1; - } - if (icompz < 0) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) { - *info = -6; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZSTEQR", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - return 0; - } - - if (*n == 1) { - if (icompz == 2) { - i__1 = z_dim1 + 1; - z__[i__1].r = 1., z__[i__1].i = 0.; - } - return 0; - } - -/* Determine the unit roundoff and over/underflow thresholds. */ - - eps = EPSILON; -/* Computing 2nd power */ - d__1 = eps; - eps2 = d__1 * d__1; - safmin = SAFEMINIMUM; - safmax = 1. / safmin; - ssfmax = sqrt(safmax) / 3.; - ssfmin = sqrt(safmin) / eps2; - -/* - Compute the eigenvalues and eigenvectors of the tridiagonal - matrix. -*/ - - if (icompz == 2) { - zlaset_("Full", n, n, &c_b59, &c_b60, &z__[z_offset], ldz); - } - - nmaxit = *n * 30; - jtot = 0; - -/* - Determine where the matrix splits and choose QL or QR iteration - for each block, according to whether top or bottom diagonal - element is smaller. -*/ - - l1 = 1; - nm1 = *n - 1; - -L10: - if (l1 > *n) { - goto L160; - } - if (l1 > 1) { - e[l1 - 1] = 0.; - } - if (l1 <= nm1) { - i__1 = nm1; - for (m = l1; m <= i__1; ++m) { - tst = (d__1 = e[m], abs(d__1)); - if (tst == 0.) { - goto L30; - } - if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m - + 1], abs(d__2))) * eps) { - e[m] = 0.; - goto L30; - } -/* L20: */ - } - } - m = *n; - -L30: - l = l1; - lsv = l; - lend = m; - lendsv = lend; - l1 = m + 1; - if (lend == l) { - goto L10; - } - -/* Scale submatrix in rows and columns L to LEND */ - - i__1 = lend - l + 1; - anorm = dlanst_("I", &i__1, &d__[l], &e[l]); - iscale = 0; - if (anorm == 0.) { - goto L10; - } - if (anorm > ssfmax) { - iscale = 1; - i__1 = lend - l + 1; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, - info); - i__1 = lend - l; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, - info); - } else if (anorm < ssfmin) { - iscale = 2; - i__1 = lend - l + 1; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, - info); - i__1 = lend - l; - dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, - info); - } - -/* Choose between QL and QR iteration */ - - if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) { - lend = lsv; - l = lendsv; - } - - if (lend > l) { - -/* - QL Iteration - - Look for small subdiagonal element. -*/ - -L40: - if (l != lend) { - lendm1 = lend - 1; - i__1 = lendm1; - for (m = l; m <= i__1; ++m) { -/* Computing 2nd power */ - d__2 = (d__1 = e[m], abs(d__1)); - tst = d__2 * d__2; - if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m - + 1], abs(d__2)) + safmin) { - goto L60; - } -/* L50: */ - } - } - - m = lend; - -L60: - if (m < lend) { - e[m] = 0.; - } - p = d__[l]; - if (m == l) { - goto L80; - } - -/* - If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 - to compute its eigensystem. -*/ - - if (m == l + 1) { - if (icompz > 0) { - dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s); - work[l] = c__; - work[*n - 1 + l] = s; - zlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], & - z__[l * z_dim1 + 1], ldz); - } else { - dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2); - } - d__[l] = rt1; - d__[l + 1] = rt2; - e[l] = 0.; - l += 2; - if (l <= lend) { - goto L40; - } - goto L140; - } - - if (jtot == nmaxit) { - goto L140; - } - ++jtot; - -/* Form shift. */ - - g = (d__[l + 1] - p) / (e[l] * 2.); - r__ = dlapy2_(&g, &c_b1015); - g = d__[m] - p + e[l] / (g + d_sign(&r__, &g)); - - s = 1.; - c__ = 1.; - p = 0.; - -/* Inner loop */ - - mm1 = m - 1; - i__1 = l; - for (i__ = mm1; i__ >= i__1; --i__) { - f = s * e[i__]; - b = c__ * e[i__]; - dlartg_(&g, &f, &c__, &s, &r__); - if (i__ != m - 1) { - e[i__ + 1] = r__; - } - g = d__[i__ + 1] - p; - r__ = (d__[i__] - g) * s + c__ * 2. * b; - p = s * r__; - d__[i__ + 1] = g + p; - g = c__ * r__ - b; - -/* If eigenvectors are desired, then save rotations. */ - - if (icompz > 0) { - work[i__] = c__; - work[*n - 1 + i__] = -s; - } - -/* L70: */ - } - -/* If eigenvectors are desired, then apply saved rotations. */ - - if (icompz > 0) { - mm = m - l + 1; - zlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l - * z_dim1 + 1], ldz); - } - - d__[l] -= p; - e[l] = g; - goto L40; - -/* Eigenvalue found. */ - -L80: - d__[l] = p; - - ++l; - if (l <= lend) { - goto L40; - } - goto L140; - - } else { - -/* - QR Iteration - - Look for small superdiagonal element. -*/ - -L90: - if (l != lend) { - lendp1 = lend + 1; - i__1 = lendp1; - for (m = l; m >= i__1; --m) { -/* Computing 2nd power */ - d__2 = (d__1 = e[m - 1], abs(d__1)); - tst = d__2 * d__2; - if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m - - 1], abs(d__2)) + safmin) { - goto L110; - } -/* L100: */ - } - } - - m = lend; - -L110: - if (m > lend) { - e[m - 1] = 0.; - } - p = d__[l]; - if (m == l) { - goto L130; - } - -/* - If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 - to compute its eigensystem. -*/ - - if (m == l - 1) { - if (icompz > 0) { - dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s) - ; - work[m] = c__; - work[*n - 1 + m] = s; - zlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], & - z__[(l - 1) * z_dim1 + 1], ldz); - } else { - dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2); - } - d__[l - 1] = rt1; - d__[l] = rt2; - e[l - 1] = 0.; - l += -2; - if (l >= lend) { - goto L90; - } - goto L140; - } - - if (jtot == nmaxit) { - goto L140; - } - ++jtot; - -/* Form shift. */ - - g = (d__[l - 1] - p) / (e[l - 1] * 2.); - r__ = dlapy2_(&g, &c_b1015); - g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g)); - - s = 1.; - c__ = 1.; - p = 0.; - -/* Inner loop */ - - lm1 = l - 1; - i__1 = lm1; - for (i__ = m; i__ <= i__1; ++i__) { - f = s * e[i__]; - b = c__ * e[i__]; - dlartg_(&g, &f, &c__, &s, &r__); - if (i__ != m) { - e[i__ - 1] = r__; - } - g = d__[i__] - p; - r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b; - p = s * r__; - d__[i__] = g + p; - g = c__ * r__ - b; - -/* If eigenvectors are desired, then save rotations. */ - - if (icompz > 0) { - work[i__] = c__; - work[*n - 1 + i__] = s; - } - -/* L120: */ - } - -/* If eigenvectors are desired, then apply saved rotations. */ - - if (icompz > 0) { - mm = l - m + 1; - zlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m - * z_dim1 + 1], ldz); - } - - d__[l] -= p; - e[lm1] = g; - goto L90; - -/* Eigenvalue found. */ - -L130: - d__[l] = p; - - --l; - if (l >= lend) { - goto L90; - } - goto L140; - - } - -/* Undo scaling if necessary */ - -L140: - if (iscale == 1) { - i__1 = lendsv - lsv + 1; - dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], - n, info); - i__1 = lendsv - lsv; - dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n, - info); - } else if (iscale == 2) { - i__1 = lendsv - lsv + 1; - dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], - n, info); - i__1 = lendsv - lsv; - dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n, - info); - } - -/* - Check for no convergence to an eigenvalue after a total - of N*MAXIT iterations. -*/ - - if (jtot == nmaxit) { - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - if (e[i__] != 0.) { - ++(*info); - } -/* L150: */ - } - return 0; - } - goto L10; - -/* Order eigenvalues and eigenvectors. */ - -L160: - if (icompz == 0) { - -/* Use Quick Sort */ - - dlasrt_("I", n, &d__[1], info); - - } else { - -/* Use Selection Sort to minimize swaps of eigenvectors */ - - i__1 = *n; - for (ii = 2; ii <= i__1; ++ii) { - i__ = ii - 1; - k = i__; - p = d__[i__]; - i__2 = *n; - for (j = ii; j <= i__2; ++j) { - if (d__[j] < p) { - k = j; - p = d__[j]; - } -/* L170: */ - } - if (k != i__) { - d__[k] = d__[i__]; - d__[i__] = p; - zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1], - &c__1); - } -/* L180: */ - } - } - return 0; - -/* End of ZSTEQR */ - -} /* zsteqr_ */ - -/* Subroutine */ int ztrevc_(char *side, char *howmny, logical *select, - integer *n, doublecomplex *t, integer *ldt, doublecomplex *vl, - integer *ldvl, doublecomplex *vr, integer *ldvr, integer *mm, integer - *m, doublecomplex *work, doublereal *rwork, integer *info) -{ - /* System generated locals */ - integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, - i__2, i__3, i__4, i__5; - doublereal d__1, d__2, d__3; - doublecomplex z__1, z__2; - - /* Builtin functions */ - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, k, ii, ki, is; - static doublereal ulp; - static logical allv; - static doublereal unfl, ovfl, smin; - static logical over; - static doublereal scale; - extern logical lsame_(char *, char *); - static doublereal remax; - static logical leftv, bothv; - extern /* Subroutine */ int zgemv_(char *, integer *, integer *, - doublecomplex *, doublecomplex *, integer *, doublecomplex *, - integer *, doublecomplex *, doublecomplex *, integer *); - static logical somev; - extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, - doublecomplex *, integer *), dlabad_(doublereal *, doublereal *); - - extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_( - integer *, doublereal *, doublecomplex *, integer *); - extern integer izamax_(integer *, doublecomplex *, integer *); - static logical rightv; - extern doublereal dzasum_(integer *, doublecomplex *, integer *); - static doublereal smlnum; - extern /* Subroutine */ int zlatrs_(char *, char *, char *, char *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublereal *, doublereal *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZTREVC computes some or all of the right and/or left eigenvectors of - a complex upper triangular matrix T. - - The right eigenvector x and the left eigenvector y of T corresponding - to an eigenvalue w are defined by: - - T*x = w*x, y'*T = w*y' - - where y' denotes the conjugate transpose of the vector y. - - If all eigenvectors are requested, the routine may either return the - matrices X and/or Y of right or left eigenvectors of T, or the - products Q*X and/or Q*Y, where Q is an input unitary - matrix. If T was obtained from the Schur factorization of an - original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of - right or left eigenvectors of A. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'R': compute right eigenvectors only; - = 'L': compute left eigenvectors only; - = 'B': compute both right and left eigenvectors. - - HOWMNY (input) CHARACTER*1 - = 'A': compute all right and/or left eigenvectors; - = 'B': compute all right and/or left eigenvectors, - and backtransform them using the input matrices - supplied in VR and/or VL; - = 'S': compute selected right and/or left eigenvectors, - specified by the logical array SELECT. - - SELECT (input) LOGICAL array, dimension (N) - If HOWMNY = 'S', SELECT specifies the eigenvectors to be - computed. - If HOWMNY = 'A' or 'B', SELECT is not referenced. - To select the eigenvector corresponding to the j-th - eigenvalue, SELECT(j) must be set to .TRUE.. - - N (input) INTEGER - The order of the matrix T. N >= 0. - - T (input/output) COMPLEX*16 array, dimension (LDT,N) - The upper triangular matrix T. T is modified, but restored - on exit. - - LDT (input) INTEGER - The leading dimension of the array T. LDT >= max(1,N). - - VL (input/output) COMPLEX*16 array, dimension (LDVL,MM) - On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must - contain an N-by-N matrix Q (usually the unitary matrix Q of - Schur vectors returned by ZHSEQR). - On exit, if SIDE = 'L' or 'B', VL contains: - if HOWMNY = 'A', the matrix Y of left eigenvectors of T; - VL is lower triangular. The i-th column - VL(i) of VL is the eigenvector corresponding - to T(i,i). - if HOWMNY = 'B', the matrix Q*Y; - if HOWMNY = 'S', the left eigenvectors of T specified by - SELECT, stored consecutively in the columns - of VL, in the same order as their - eigenvalues. - If SIDE = 'R', VL is not referenced. - - LDVL (input) INTEGER - The leading dimension of the array VL. LDVL >= max(1,N) if - SIDE = 'L' or 'B'; LDVL >= 1 otherwise. - - VR (input/output) COMPLEX*16 array, dimension (LDVR,MM) - On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must - contain an N-by-N matrix Q (usually the unitary matrix Q of - Schur vectors returned by ZHSEQR). - On exit, if SIDE = 'R' or 'B', VR contains: - if HOWMNY = 'A', the matrix X of right eigenvectors of T; - VR is upper triangular. The i-th column - VR(i) of VR is the eigenvector corresponding - to T(i,i). - if HOWMNY = 'B', the matrix Q*X; - if HOWMNY = 'S', the right eigenvectors of T specified by - SELECT, stored consecutively in the columns - of VR, in the same order as their - eigenvalues. - If SIDE = 'L', VR is not referenced. - - LDVR (input) INTEGER - The leading dimension of the array VR. LDVR >= max(1,N) if - SIDE = 'R' or 'B'; LDVR >= 1 otherwise. - - MM (input) INTEGER - The number of columns in the arrays VL and/or VR. MM >= M. - - M (output) INTEGER - The number of columns in the arrays VL and/or VR actually - used to store the eigenvectors. If HOWMNY = 'A' or 'B', M - is set to N. Each selected eigenvector occupies one - column. - - WORK (workspace) COMPLEX*16 array, dimension (2*N) - - RWORK (workspace) DOUBLE PRECISION array, dimension (N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - Further Details - =============== - - The algorithm used in this program is basically backward (forward) - substitution, with scaling to make the the code robust against - possible overflow. - - Each eigenvector is normalized so that the element of largest - magnitude has magnitude 1; here the magnitude of a complex number - (x,y) is taken to be |x| + |y|. - - ===================================================================== - - - Decode and test the input parameters -*/ - - /* Parameter adjustments */ - --select; - t_dim1 = *ldt; - t_offset = 1 + t_dim1 * 1; - t -= t_offset; - vl_dim1 = *ldvl; - vl_offset = 1 + vl_dim1 * 1; - vl -= vl_offset; - vr_dim1 = *ldvr; - vr_offset = 1 + vr_dim1 * 1; - vr -= vr_offset; - --work; - --rwork; - - /* Function Body */ - bothv = lsame_(side, "B"); - rightv = lsame_(side, "R") || bothv; - leftv = lsame_(side, "L") || bothv; - - allv = lsame_(howmny, "A"); - over = lsame_(howmny, "B"); - somev = lsame_(howmny, "S"); - -/* - Set M to the number of columns required to store the selected - eigenvectors. -*/ - - if (somev) { - *m = 0; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (select[j]) { - ++(*m); - } -/* L10: */ - } - } else { - *m = *n; - } - - *info = 0; - if ((! rightv && ! leftv)) { - *info = -1; - } else if (((! allv && ! over) && ! somev)) { - *info = -2; - } else if (*n < 0) { - *info = -4; - } else if (*ldt < max(1,*n)) { - *info = -6; - } else if (*ldvl < 1 || (leftv && *ldvl < *n)) { - *info = -8; - } else if (*ldvr < 1 || (rightv && *ldvr < *n)) { - *info = -10; - } else if (*mm < *m) { - *info = -11; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZTREVC", &i__1); - return 0; - } - -/* Quick return if possible. */ - - if (*n == 0) { - return 0; - } - -/* Set the constants to control overflow. */ - - unfl = SAFEMINIMUM; - ovfl = 1. / unfl; - dlabad_(&unfl, &ovfl); - ulp = PRECISION; - smlnum = unfl * (*n / ulp); - -/* Store the diagonal elements of T in working array WORK. */ - - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + *n; - i__3 = i__ + i__ * t_dim1; - work[i__2].r = t[i__3].r, work[i__2].i = t[i__3].i; -/* L20: */ - } - -/* - Compute 1-norm of each column of strictly upper triangular - part of T to control overflow in triangular solver. -*/ - - rwork[1] = 0.; - i__1 = *n; - for (j = 2; j <= i__1; ++j) { - i__2 = j - 1; - rwork[j] = dzasum_(&i__2, &t[j * t_dim1 + 1], &c__1); -/* L30: */ - } - - if (rightv) { - -/* Compute right eigenvectors. */ - - is = *m; - for (ki = *n; ki >= 1; --ki) { - - if (somev) { - if (! select[ki]) { - goto L80; - } - } -/* Computing MAX */ - i__1 = ki + ki * t_dim1; - d__3 = ulp * ((d__1 = t[i__1].r, abs(d__1)) + (d__2 = d_imag(&t[ - ki + ki * t_dim1]), abs(d__2))); - smin = max(d__3,smlnum); - - work[1].r = 1., work[1].i = 0.; - -/* Form right-hand side. */ - - i__1 = ki - 1; - for (k = 1; k <= i__1; ++k) { - i__2 = k; - i__3 = k + ki * t_dim1; - z__1.r = -t[i__3].r, z__1.i = -t[i__3].i; - work[i__2].r = z__1.r, work[i__2].i = z__1.i; -/* L40: */ - } - -/* - Solve the triangular system: - (T(1:KI-1,1:KI-1) - T(KI,KI))*X = SCALE*WORK. -*/ - - i__1 = ki - 1; - for (k = 1; k <= i__1; ++k) { - i__2 = k + k * t_dim1; - i__3 = k + k * t_dim1; - i__4 = ki + ki * t_dim1; - z__1.r = t[i__3].r - t[i__4].r, z__1.i = t[i__3].i - t[i__4] - .i; - t[i__2].r = z__1.r, t[i__2].i = z__1.i; - i__2 = k + k * t_dim1; - if ((d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[k + k * - t_dim1]), abs(d__2)) < smin) { - i__3 = k + k * t_dim1; - t[i__3].r = smin, t[i__3].i = 0.; - } -/* L50: */ - } - - if (ki > 1) { - i__1 = ki - 1; - zlatrs_("Upper", "No transpose", "Non-unit", "Y", &i__1, &t[ - t_offset], ldt, &work[1], &scale, &rwork[1], info); - i__1 = ki; - work[i__1].r = scale, work[i__1].i = 0.; - } - -/* Copy the vector x or Q*x to VR and normalize. */ - - if (! over) { - zcopy_(&ki, &work[1], &c__1, &vr[is * vr_dim1 + 1], &c__1); - - ii = izamax_(&ki, &vr[is * vr_dim1 + 1], &c__1); - i__1 = ii + is * vr_dim1; - remax = 1. / ((d__1 = vr[i__1].r, abs(d__1)) + (d__2 = d_imag( - &vr[ii + is * vr_dim1]), abs(d__2))); - zdscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1); - - i__1 = *n; - for (k = ki + 1; k <= i__1; ++k) { - i__2 = k + is * vr_dim1; - vr[i__2].r = 0., vr[i__2].i = 0.; -/* L60: */ - } - } else { - if (ki > 1) { - i__1 = ki - 1; - z__1.r = scale, z__1.i = 0.; - zgemv_("N", n, &i__1, &c_b60, &vr[vr_offset], ldvr, &work[ - 1], &c__1, &z__1, &vr[ki * vr_dim1 + 1], &c__1); - } - - ii = izamax_(n, &vr[ki * vr_dim1 + 1], &c__1); - i__1 = ii + ki * vr_dim1; - remax = 1. / ((d__1 = vr[i__1].r, abs(d__1)) + (d__2 = d_imag( - &vr[ii + ki * vr_dim1]), abs(d__2))); - zdscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1); - } - -/* Set back the original diagonal elements of T. */ - - i__1 = ki - 1; - for (k = 1; k <= i__1; ++k) { - i__2 = k + k * t_dim1; - i__3 = k + *n; - t[i__2].r = work[i__3].r, t[i__2].i = work[i__3].i; -/* L70: */ - } - - --is; -L80: - ; - } - } - - if (leftv) { - -/* Compute left eigenvectors. */ - - is = 1; - i__1 = *n; - for (ki = 1; ki <= i__1; ++ki) { - - if (somev) { - if (! select[ki]) { - goto L130; - } - } -/* Computing MAX */ - i__2 = ki + ki * t_dim1; - d__3 = ulp * ((d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[ - ki + ki * t_dim1]), abs(d__2))); - smin = max(d__3,smlnum); - - i__2 = *n; - work[i__2].r = 1., work[i__2].i = 0.; - -/* Form right-hand side. */ - - i__2 = *n; - for (k = ki + 1; k <= i__2; ++k) { - i__3 = k; - d_cnjg(&z__2, &t[ki + k * t_dim1]); - z__1.r = -z__2.r, z__1.i = -z__2.i; - work[i__3].r = z__1.r, work[i__3].i = z__1.i; -/* L90: */ - } - -/* - Solve the triangular system: - (T(KI+1:N,KI+1:N) - T(KI,KI))'*X = SCALE*WORK. -*/ - - i__2 = *n; - for (k = ki + 1; k <= i__2; ++k) { - i__3 = k + k * t_dim1; - i__4 = k + k * t_dim1; - i__5 = ki + ki * t_dim1; - z__1.r = t[i__4].r - t[i__5].r, z__1.i = t[i__4].i - t[i__5] - .i; - t[i__3].r = z__1.r, t[i__3].i = z__1.i; - i__3 = k + k * t_dim1; - if ((d__1 = t[i__3].r, abs(d__1)) + (d__2 = d_imag(&t[k + k * - t_dim1]), abs(d__2)) < smin) { - i__4 = k + k * t_dim1; - t[i__4].r = smin, t[i__4].i = 0.; - } -/* L100: */ - } - - if (ki < *n) { - i__2 = *n - ki; - zlatrs_("Upper", "Conjugate transpose", "Non-unit", "Y", & - i__2, &t[ki + 1 + (ki + 1) * t_dim1], ldt, &work[ki + - 1], &scale, &rwork[1], info); - i__2 = ki; - work[i__2].r = scale, work[i__2].i = 0.; - } - -/* Copy the vector x or Q*x to VL and normalize. */ - - if (! over) { - i__2 = *n - ki + 1; - zcopy_(&i__2, &work[ki], &c__1, &vl[ki + is * vl_dim1], &c__1) - ; - - i__2 = *n - ki + 1; - ii = izamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki - 1; - i__2 = ii + is * vl_dim1; - remax = 1. / ((d__1 = vl[i__2].r, abs(d__1)) + (d__2 = d_imag( - &vl[ii + is * vl_dim1]), abs(d__2))); - i__2 = *n - ki + 1; - zdscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1); - - i__2 = ki - 1; - for (k = 1; k <= i__2; ++k) { - i__3 = k + is * vl_dim1; - vl[i__3].r = 0., vl[i__3].i = 0.; -/* L110: */ - } - } else { - if (ki < *n) { - i__2 = *n - ki; - z__1.r = scale, z__1.i = 0.; - zgemv_("N", n, &i__2, &c_b60, &vl[(ki + 1) * vl_dim1 + 1], - ldvl, &work[ki + 1], &c__1, &z__1, &vl[ki * - vl_dim1 + 1], &c__1); - } - - ii = izamax_(n, &vl[ki * vl_dim1 + 1], &c__1); - i__2 = ii + ki * vl_dim1; - remax = 1. / ((d__1 = vl[i__2].r, abs(d__1)) + (d__2 = d_imag( - &vl[ii + ki * vl_dim1]), abs(d__2))); - zdscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1); - } - -/* Set back the original diagonal elements of T. */ - - i__2 = *n; - for (k = ki + 1; k <= i__2; ++k) { - i__3 = k + k * t_dim1; - i__4 = k + *n; - t[i__3].r = work[i__4].r, t[i__3].i = work[i__4].i; -/* L120: */ - } - - ++is; -L130: - ; - } - } - - return 0; - -/* End of ZTREVC */ - -} /* ztrevc_ */ - -/* Subroutine */ int zung2r_(integer *m, integer *n, integer *k, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex * - work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublecomplex z__1; - - /* Local variables */ - static integer i__, j, l; - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *), zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZUNG2R generates an m by n complex matrix Q with orthonormal columns, - which is defined as the first n columns of a product of k elementary - reflectors of order m - - Q = H(1) H(2) . . . H(k) - - as returned by ZGEQRF. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix Q. M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q. M >= N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines the - matrix Q. N >= K >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the i-th column must contain the vector which - defines the elementary reflector H(i), for i = 1,2,...,k, as - returned by ZGEQRF in the first k columns of its array - argument A. - On exit, the m by n matrix Q. - - LDA (input) INTEGER - The first dimension of the array A. LDA >= max(1,M). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGEQRF. - - WORK (workspace) COMPLEX*16 array, dimension (N) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument has an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < 0 || *n > *m) { - *info = -2; - } else if (*k < 0 || *k > *n) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNG2R", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*n <= 0) { - return 0; - } - -/* Initialise columns k+1:n to columns of the unit matrix */ - - i__1 = *n; - for (j = *k + 1; j <= i__1; ++j) { - i__2 = *m; - for (l = 1; l <= i__2; ++l) { - i__3 = l + j * a_dim1; - a[i__3].r = 0., a[i__3].i = 0.; -/* L10: */ - } - i__2 = j + j * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; -/* L20: */ - } - - for (i__ = *k; i__ >= 1; --i__) { - -/* Apply H(i) to A(i:m,i:n) from the left */ - - if (i__ < *n) { - i__1 = i__ + i__ * a_dim1; - a[i__1].r = 1., a[i__1].i = 0.; - i__1 = *m - i__ + 1; - i__2 = *n - i__; - zlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[ - i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); - } - if (i__ < *m) { - i__1 = *m - i__; - i__2 = i__; - z__1.r = -tau[i__2].r, z__1.i = -tau[i__2].i; - zscal_(&i__1, &z__1, &a[i__ + 1 + i__ * a_dim1], &c__1); - } - i__1 = i__ + i__ * a_dim1; - i__2 = i__; - z__1.r = 1. - tau[i__2].r, z__1.i = 0. - tau[i__2].i; - a[i__1].r = z__1.r, a[i__1].i = z__1.i; - -/* Set A(1:i-1,i) to zero */ - - i__1 = i__ - 1; - for (l = 1; l <= i__1; ++l) { - i__2 = l + i__ * a_dim1; - a[i__2].r = 0., a[i__2].i = 0.; -/* L30: */ - } -/* L40: */ - } - return 0; - -/* End of ZUNG2R */ - -} /* zung2r_ */ - -/* Subroutine */ int zungbr_(char *vect, integer *m, integer *n, integer *k, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex * - work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - - /* Local variables */ - static integer i__, j, nb, mn; - extern logical lsame_(char *, char *); - static integer iinfo; - static logical wantq; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer lwkopt; - static logical lquery; - extern /* Subroutine */ int zunglq_(integer *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, integer *), zungqr_(integer *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNGBR generates one of the complex unitary matrices Q or P**H - determined by ZGEBRD when reducing a complex matrix A to bidiagonal - form: A = Q * B * P**H. Q and P**H are defined as products of - elementary reflectors H(i) or G(i) respectively. - - If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q - is of order M: - if m >= k, Q = H(1) H(2) . . . H(k) and ZUNGBR returns the first n - columns of Q, where m >= n >= k; - if m < k, Q = H(1) H(2) . . . H(m-1) and ZUNGBR returns Q as an - M-by-M matrix. - - If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**H - is of order N: - if k < n, P**H = G(k) . . . G(2) G(1) and ZUNGBR returns the first m - rows of P**H, where n >= m >= k; - if k >= n, P**H = G(n-1) . . . G(2) G(1) and ZUNGBR returns P**H as - an N-by-N matrix. - - Arguments - ========= - - VECT (input) CHARACTER*1 - Specifies whether the matrix Q or the matrix P**H is - required, as defined in the transformation applied by ZGEBRD: - = 'Q': generate Q; - = 'P': generate P**H. - - M (input) INTEGER - The number of rows of the matrix Q or P**H to be returned. - M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q or P**H to be returned. - N >= 0. - If VECT = 'Q', M >= N >= min(M,K); - if VECT = 'P', N >= M >= min(N,K). - - K (input) INTEGER - If VECT = 'Q', the number of columns in the original M-by-K - matrix reduced by ZGEBRD. - If VECT = 'P', the number of rows in the original K-by-N - matrix reduced by ZGEBRD. - K >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the vectors which define the elementary reflectors, - as returned by ZGEBRD. - On exit, the M-by-N matrix Q or P**H. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= M. - - TAU (input) COMPLEX*16 array, dimension - (min(M,K)) if VECT = 'Q' - (min(N,K)) if VECT = 'P' - TAU(i) must contain the scalar factor of the elementary - reflector H(i) or G(i), which determines Q or P**H, as - returned by ZGEBRD in its array argument TAUQ or TAUP. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,min(M,N)). - For optimum performance LWORK >= min(M,N)*NB, where NB - is the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - wantq = lsame_(vect, "Q"); - mn = min(*m,*n); - lquery = *lwork == -1; - if ((! wantq && ! lsame_(vect, "P"))) { - *info = -1; - } else if (*m < 0) { - *info = -2; - } else if (*n < 0 || (wantq && (*n > *m || *n < min(*m,*k))) || (! wantq - && (*m > *n || *m < min(*n,*k)))) { - *info = -3; - } else if (*k < 0) { - *info = -4; - } else if (*lda < max(1,*m)) { - *info = -6; - } else if ((*lwork < max(1,mn) && ! lquery)) { - *info = -9; - } - - if (*info == 0) { - if (wantq) { - nb = ilaenv_(&c__1, "ZUNGQR", " ", m, n, k, &c_n1, (ftnlen)6, ( - ftnlen)1); - } else { - nb = ilaenv_(&c__1, "ZUNGLQ", " ", m, n, k, &c_n1, (ftnlen)6, ( - ftnlen)1); - } - lwkopt = max(1,mn) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNGBR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - if (wantq) { - -/* - Form Q, determined by a call to ZGEBRD to reduce an m-by-k - matrix -*/ - - if (*m >= *k) { - -/* If m >= k, assume m >= n >= k */ - - zungqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, & - iinfo); - - } else { - -/* - If m < k, assume m = n - - Shift the vectors which define the elementary reflectors one - column to the right, and set the first row and column of Q - to those of the unit matrix -*/ - - for (j = *m; j >= 2; --j) { - i__1 = j * a_dim1 + 1; - a[i__1].r = 0., a[i__1].i = 0.; - i__1 = *m; - for (i__ = j + 1; i__ <= i__1; ++i__) { - i__2 = i__ + j * a_dim1; - i__3 = i__ + (j - 1) * a_dim1; - a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i; -/* L10: */ - } -/* L20: */ - } - i__1 = a_dim1 + 1; - a[i__1].r = 1., a[i__1].i = 0.; - i__1 = *m; - for (i__ = 2; i__ <= i__1; ++i__) { - i__2 = i__ + a_dim1; - a[i__2].r = 0., a[i__2].i = 0.; -/* L30: */ - } - if (*m > 1) { - -/* Form Q(2:m,2:m) */ - - i__1 = *m - 1; - i__2 = *m - 1; - i__3 = *m - 1; - zungqr_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, & - tau[1], &work[1], lwork, &iinfo); - } - } - } else { - -/* - Form P', determined by a call to ZGEBRD to reduce a k-by-n - matrix -*/ - - if (*k < *n) { - -/* If k < n, assume k <= m <= n */ - - zunglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, & - iinfo); - - } else { - -/* - If k >= n, assume m = n - - Shift the vectors which define the elementary reflectors one - row downward, and set the first row and column of P' to - those of the unit matrix -*/ - - i__1 = a_dim1 + 1; - a[i__1].r = 1., a[i__1].i = 0.; - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - i__2 = i__ + a_dim1; - a[i__2].r = 0., a[i__2].i = 0.; -/* L40: */ - } - i__1 = *n; - for (j = 2; j <= i__1; ++j) { - for (i__ = j - 1; i__ >= 2; --i__) { - i__2 = i__ + j * a_dim1; - i__3 = i__ - 1 + j * a_dim1; - a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i; -/* L50: */ - } - i__2 = j * a_dim1 + 1; - a[i__2].r = 0., a[i__2].i = 0.; -/* L60: */ - } - if (*n > 1) { - -/* Form P'(2:n,2:n) */ - - i__1 = *n - 1; - i__2 = *n - 1; - i__3 = *n - 1; - zunglq_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, & - tau[1], &work[1], lwork, &iinfo); - } - } - } - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - return 0; - -/* End of ZUNGBR */ - -} /* zungbr_ */ - -/* Subroutine */ int zunghr_(integer *n, integer *ilo, integer *ihi, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex * - work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, j, nb, nh, iinfo; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer lwkopt; - static logical lquery; - extern /* Subroutine */ int zungqr_(integer *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNGHR generates a complex unitary matrix Q which is defined as the - product of IHI-ILO elementary reflectors of order N, as returned by - ZGEHRD: - - Q = H(ilo) H(ilo+1) . . . H(ihi-1). - - Arguments - ========= - - N (input) INTEGER - The order of the matrix Q. N >= 0. - - ILO (input) INTEGER - IHI (input) INTEGER - ILO and IHI must have the same values as in the previous call - of ZGEHRD. Q is equal to the unit matrix except in the - submatrix Q(ilo+1:ihi,ilo+1:ihi). - 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the vectors which define the elementary reflectors, - as returned by ZGEHRD. - On exit, the N-by-N unitary matrix Q. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,N). - - TAU (input) COMPLEX*16 array, dimension (N-1) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGEHRD. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= IHI-ILO. - For optimum performance LWORK >= (IHI-ILO)*NB, where NB is - the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nh = *ihi - *ilo; - lquery = *lwork == -1; - if (*n < 0) { - *info = -1; - } else if (*ilo < 1 || *ilo > max(1,*n)) { - *info = -2; - } else if (*ihi < min(*ilo,*n) || *ihi > *n) { - *info = -3; - } else if (*lda < max(1,*n)) { - *info = -5; - } else if ((*lwork < max(1,nh) && ! lquery)) { - *info = -8; - } - - if (*info == 0) { - nb = ilaenv_(&c__1, "ZUNGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, ( - ftnlen)1); - lwkopt = max(1,nh) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNGHR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - -/* - Shift the vectors which define the elementary reflectors one - column to the right, and set the first ilo and the last n-ihi - rows and columns to those of the unit matrix -*/ - - i__1 = *ilo + 1; - for (j = *ihi; j >= i__1; --j) { - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = 0., a[i__3].i = 0.; -/* L10: */ - } - i__2 = *ihi; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + (j - 1) * a_dim1; - a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i; -/* L20: */ - } - i__2 = *n; - for (i__ = *ihi + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = 0., a[i__3].i = 0.; -/* L30: */ - } -/* L40: */ - } - i__1 = *ilo; - for (j = 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = 0., a[i__3].i = 0.; -/* L50: */ - } - i__2 = j + j * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; -/* L60: */ - } - i__1 = *n; - for (j = *ihi + 1; j <= i__1; ++j) { - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = 0., a[i__3].i = 0.; -/* L70: */ - } - i__2 = j + j * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; -/* L80: */ - } - - if (nh > 0) { - -/* Generate Q(ilo+1:ihi,ilo+1:ihi) */ - - zungqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[* - ilo], &work[1], lwork, &iinfo); - } - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - return 0; - -/* End of ZUNGHR */ - -} /* zunghr_ */ - -/* Subroutine */ int zungl2_(integer *m, integer *n, integer *k, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex * - work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3; - doublecomplex z__1, z__2; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, j, l; - extern /* Subroutine */ int zscal_(integer *, doublecomplex *, - doublecomplex *, integer *), zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNGL2 generates an m-by-n complex matrix Q with orthonormal rows, - which is defined as the first m rows of a product of k elementary - reflectors of order n - - Q = H(k)' . . . H(2)' H(1)' - - as returned by ZGELQF. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix Q. M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q. N >= M. - - K (input) INTEGER - The number of elementary reflectors whose product defines the - matrix Q. M >= K >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the i-th row must contain the vector which defines - the elementary reflector H(i), for i = 1,2,...,k, as returned - by ZGELQF in the first k rows of its array argument A. - On exit, the m by n matrix Q. - - LDA (input) INTEGER - The first dimension of the array A. LDA >= max(1,M). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGELQF. - - WORK (workspace) COMPLEX*16 array, dimension (M) - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument has an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - if (*m < 0) { - *info = -1; - } else if (*n < *m) { - *info = -2; - } else if (*k < 0 || *k > *m) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNGL2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m <= 0) { - return 0; - } - - if (*k < *m) { - -/* Initialise rows k+1:m to rows of the unit matrix */ - - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (l = *k + 1; l <= i__2; ++l) { - i__3 = l + j * a_dim1; - a[i__3].r = 0., a[i__3].i = 0.; -/* L10: */ - } - if ((j > *k && j <= *m)) { - i__2 = j + j * a_dim1; - a[i__2].r = 1., a[i__2].i = 0.; - } -/* L20: */ - } - } - - for (i__ = *k; i__ >= 1; --i__) { - -/* Apply H(i)' to A(i:m,i:n) from the right */ - - if (i__ < *n) { - i__1 = *n - i__; - zlacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda); - if (i__ < *m) { - i__1 = i__ + i__ * a_dim1; - a[i__1].r = 1., a[i__1].i = 0.; - i__1 = *m - i__; - i__2 = *n - i__ + 1; - d_cnjg(&z__1, &tau[i__]); - zlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, & - z__1, &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); - } - i__1 = *n - i__; - i__2 = i__; - z__1.r = -tau[i__2].r, z__1.i = -tau[i__2].i; - zscal_(&i__1, &z__1, &a[i__ + (i__ + 1) * a_dim1], lda); - i__1 = *n - i__; - zlacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda); - } - i__1 = i__ + i__ * a_dim1; - d_cnjg(&z__2, &tau[i__]); - z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i; - a[i__1].r = z__1.r, a[i__1].i = z__1.i; - -/* Set A(i,1:i-1) to zero */ - - i__1 = i__ - 1; - for (l = 1; l <= i__1; ++l) { - i__2 = i__ + l * a_dim1; - a[i__2].r = 0., a[i__2].i = 0.; -/* L30: */ - } -/* L40: */ - } - return 0; - -/* End of ZUNGL2 */ - -} /* zungl2_ */ - -/* Subroutine */ int zunglq_(integer *m, integer *n, integer *k, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex * - work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int zungl2_(integer *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static integer ldwork; - extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *); - static logical lquery; - static integer lwkopt; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNGLQ generates an M-by-N complex matrix Q with orthonormal rows, - which is defined as the first M rows of a product of K elementary - reflectors of order N - - Q = H(k)' . . . H(2)' H(1)' - - as returned by ZGELQF. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix Q. M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q. N >= M. - - K (input) INTEGER - The number of elementary reflectors whose product defines the - matrix Q. M >= K >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the i-th row must contain the vector which defines - the elementary reflector H(i), for i = 1,2,...,k, as returned - by ZGELQF in the first k rows of its array argument A. - On exit, the M-by-N matrix Q. - - LDA (input) INTEGER - The first dimension of the array A. LDA >= max(1,M). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGELQF. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,M). - For optimum performance LWORK >= M*NB, where NB is - the optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit; - < 0: if INFO = -i, the i-th argument has an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nb = ilaenv_(&c__1, "ZUNGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); - lwkopt = max(1,*m) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < *m) { - *info = -2; - } else if (*k < 0 || *k > *m) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } else if ((*lwork < max(1,*m) && ! lquery)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNGLQ", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m <= 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nbmin = 2; - nx = 0; - iws = *m; - if ((nb > 1 && nb < *k)) { - -/* - Determine when to cross over from blocked to unblocked code. - - Computing MAX -*/ - i__1 = 0, i__2 = ilaenv_(&c__3, "ZUNGLQ", " ", m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < *k) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *m; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: reduce NB and - determine the minimum value of NB. -*/ - - nb = *lwork / ldwork; -/* Computing MAX */ - i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNGLQ", " ", m, n, k, &c_n1, - (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - } - } - } - - if (((nb >= nbmin && nb < *k) && nx < *k)) { - -/* - Use blocked code after the last block. - The first kk rows are handled by the block method. -*/ - - ki = (*k - nx - 1) / nb * nb; -/* Computing MIN */ - i__1 = *k, i__2 = ki + nb; - kk = min(i__1,i__2); - -/* Set A(kk+1:m,1:kk) to zero. */ - - i__1 = kk; - for (j = 1; j <= i__1; ++j) { - i__2 = *m; - for (i__ = kk + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = 0., a[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - kk = 0; - } - -/* Use unblocked code for the last or only block. */ - - if (kk < *m) { - i__1 = *m - kk; - i__2 = *n - kk; - i__3 = *k - kk; - zungl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, & - tau[kk + 1], &work[1], &iinfo); - } - - if (kk > 0) { - -/* Use blocked code */ - - i__1 = -nb; - for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) { -/* Computing MIN */ - i__2 = nb, i__3 = *k - i__ + 1; - ib = min(i__2,i__3); - if (i__ + ib <= *m) { - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__2 = *n - i__ + 1; - zlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], &work[1], &ldwork); - -/* Apply H' to A(i+ib:m,i:n) from the right */ - - i__2 = *m - i__ - ib + 1; - i__3 = *n - i__ + 1; - zlarfb_("Right", "Conjugate transpose", "Forward", "Rowwise", - &i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[ - 1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ - ib + 1], &ldwork); - } - -/* Apply H' to columns i:n of current block */ - - i__2 = *n - i__ + 1; - zungl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], & - work[1], &iinfo); - -/* Set columns 1:i-1 of current block to zero */ - - i__2 = i__ - 1; - for (j = 1; j <= i__2; ++j) { - i__3 = i__ + ib - 1; - for (l = i__; l <= i__3; ++l) { - i__4 = l + j * a_dim1; - a[i__4].r = 0., a[i__4].i = 0.; -/* L30: */ - } -/* L40: */ - } -/* L50: */ - } - } - - work[1].r = (doublereal) iws, work[1].i = 0.; - return 0; - -/* End of ZUNGLQ */ - -} /* zunglq_ */ - -/* Subroutine */ int zungqr_(integer *m, integer *n, integer *k, - doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex * - work, integer *lwork, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, i__1, i__2, i__3, i__4; - - /* Local variables */ - static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo; - extern /* Subroutine */ int zung2r_(integer *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static integer ldwork; - extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *); - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNGQR generates an M-by-N complex matrix Q with orthonormal columns, - which is defined as the first N columns of a product of K elementary - reflectors of order M - - Q = H(1) H(2) . . . H(k) - - as returned by ZGEQRF. - - Arguments - ========= - - M (input) INTEGER - The number of rows of the matrix Q. M >= 0. - - N (input) INTEGER - The number of columns of the matrix Q. M >= N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines the - matrix Q. N >= K >= 0. - - A (input/output) COMPLEX*16 array, dimension (LDA,N) - On entry, the i-th column must contain the vector which - defines the elementary reflector H(i), for i = 1,2,...,k, as - returned by ZGEQRF in the first k columns of its array - argument A. - On exit, the M-by-N matrix Q. - - LDA (input) INTEGER - The first dimension of the array A. LDA >= max(1,M). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGEQRF. - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. LWORK >= max(1,N). - For optimum performance LWORK >= N*NB, where NB is the - optimal blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument has an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - --work; - - /* Function Body */ - *info = 0; - nb = ilaenv_(&c__1, "ZUNGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); - lwkopt = max(1,*n) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - lquery = *lwork == -1; - if (*m < 0) { - *info = -1; - } else if (*n < 0 || *n > *m) { - *info = -2; - } else if (*k < 0 || *k > *n) { - *info = -3; - } else if (*lda < max(1,*m)) { - *info = -5; - } else if ((*lwork < max(1,*n) && ! lquery)) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNGQR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*n <= 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nbmin = 2; - nx = 0; - iws = *n; - if ((nb > 1 && nb < *k)) { - -/* - Determine when to cross over from blocked to unblocked code. - - Computing MAX -*/ - i__1 = 0, i__2 = ilaenv_(&c__3, "ZUNGQR", " ", m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)1); - nx = max(i__1,i__2); - if (nx < *k) { - -/* Determine if workspace is large enough for blocked code. */ - - ldwork = *n; - iws = ldwork * nb; - if (*lwork < iws) { - -/* - Not enough workspace to use optimal NB: reduce NB and - determine the minimum value of NB. -*/ - - nb = *lwork / ldwork; -/* Computing MAX */ - i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNGQR", " ", m, n, k, &c_n1, - (ftnlen)6, (ftnlen)1); - nbmin = max(i__1,i__2); - } - } - } - - if (((nb >= nbmin && nb < *k) && nx < *k)) { - -/* - Use blocked code after the last block. - The first kk columns are handled by the block method. -*/ - - ki = (*k - nx - 1) / nb * nb; -/* Computing MIN */ - i__1 = *k, i__2 = ki + nb; - kk = min(i__1,i__2); - -/* Set A(1:kk,kk+1:n) to zero. */ - - i__1 = *n; - for (j = kk + 1; j <= i__1; ++j) { - i__2 = kk; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - a[i__3].r = 0., a[i__3].i = 0.; -/* L10: */ - } -/* L20: */ - } - } else { - kk = 0; - } - -/* Use unblocked code for the last or only block. */ - - if (kk < *n) { - i__1 = *m - kk; - i__2 = *n - kk; - i__3 = *k - kk; - zung2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, & - tau[kk + 1], &work[1], &iinfo); - } - - if (kk > 0) { - -/* Use blocked code */ - - i__1 = -nb; - for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) { -/* Computing MIN */ - i__2 = nb, i__3 = *k - i__ + 1; - ib = min(i__2,i__3); - if (i__ + ib <= *n) { - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__2 = *m - i__ + 1; - zlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], &work[1], &ldwork); - -/* Apply H to A(i:m,i+ib:n) from the left */ - - i__2 = *m - i__ + 1; - i__3 = *n - i__ - ib + 1; - zlarfb_("Left", "No transpose", "Forward", "Columnwise", & - i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[ - 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, & - work[ib + 1], &ldwork); - } - -/* Apply H to rows i:m of current block */ - - i__2 = *m - i__ + 1; - zung2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], & - work[1], &iinfo); - -/* Set rows 1:i-1 of current block to zero */ - - i__2 = i__ + ib - 1; - for (j = i__; j <= i__2; ++j) { - i__3 = i__ - 1; - for (l = 1; l <= i__3; ++l) { - i__4 = l + j * a_dim1; - a[i__4].r = 0., a[i__4].i = 0.; -/* L30: */ - } -/* L40: */ - } -/* L50: */ - } - } - - work[1].r = (doublereal) iws, work[1].i = 0.; - return 0; - -/* End of ZUNGQR */ - -} /* zungqr_ */ - -/* Subroutine */ int zunm2l_(char *side, char *trans, integer *m, integer *n, - integer *k, doublecomplex *a, integer *lda, doublecomplex *tau, - doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; - doublecomplex z__1; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, i1, i2, i3, mi, ni, nq; - static doublecomplex aii; - static logical left; - static doublecomplex taui; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *); - static logical notran; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZUNM2L overwrites the general complex m-by-n matrix C with - - Q * C if SIDE = 'L' and TRANS = 'N', or - - Q'* C if SIDE = 'L' and TRANS = 'C', or - - C * Q if SIDE = 'R' and TRANS = 'N', or - - C * Q' if SIDE = 'R' and TRANS = 'C', - - where Q is a complex unitary matrix defined as the product of k - elementary reflectors - - Q = H(k) . . . H(2) H(1) - - as returned by ZGEQLF. Q is of order m if SIDE = 'L' and of order n - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q' from the Left - = 'R': apply Q or Q' from the Right - - TRANS (input) CHARACTER*1 - = 'N': apply Q (No transpose) - = 'C': apply Q' (Conjugate transpose) - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) COMPLEX*16 array, dimension (LDA,K) - The i-th column must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - ZGEQLF in the last k columns of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. - If SIDE = 'L', LDA >= max(1,M); - if SIDE = 'R', LDA >= max(1,N). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGEQLF. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the m-by-n matrix C. - On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) COMPLEX*16 array, dimension - (N) if SIDE = 'L', - (M) if SIDE = 'R' - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - -/* NQ is the order of Q */ - - if (left) { - nq = *m; - } else { - nq = *n; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "C"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNM2L", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - return 0; - } - - if ((left && notran) || (! left && ! notran)) { - i1 = 1; - i2 = *k; - i3 = 1; - } else { - i1 = *k; - i2 = 1; - i3 = -1; - } - - if (left) { - ni = *n; - } else { - mi = *m; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - if (left) { - -/* H(i) or H(i)' is applied to C(1:m-k+i,1:n) */ - - mi = *m - *k + i__; - } else { - -/* H(i) or H(i)' is applied to C(1:m,1:n-k+i) */ - - ni = *n - *k + i__; - } - -/* Apply H(i) or H(i)' */ - - if (notran) { - i__3 = i__; - taui.r = tau[i__3].r, taui.i = tau[i__3].i; - } else { - d_cnjg(&z__1, &tau[i__]); - taui.r = z__1.r, taui.i = z__1.i; - } - i__3 = nq - *k + i__ + i__ * a_dim1; - aii.r = a[i__3].r, aii.i = a[i__3].i; - i__3 = nq - *k + i__ + i__ * a_dim1; - a[i__3].r = 1., a[i__3].i = 0.; - zlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &taui, &c__[ - c_offset], ldc, &work[1]); - i__3 = nq - *k + i__ + i__ * a_dim1; - a[i__3].r = aii.r, a[i__3].i = aii.i; -/* L10: */ - } - return 0; - -/* End of ZUNM2L */ - -} /* zunm2l_ */ - -/* Subroutine */ int zunm2r_(char *side, char *trans, integer *m, integer *n, - integer *k, doublecomplex *a, integer *lda, doublecomplex *tau, - doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; - doublecomplex z__1; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, i1, i2, i3, ic, jc, mi, ni, nq; - static doublecomplex aii; - static logical left; - static doublecomplex taui; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *); - static logical notran; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZUNM2R overwrites the general complex m-by-n matrix C with - - Q * C if SIDE = 'L' and TRANS = 'N', or - - Q'* C if SIDE = 'L' and TRANS = 'C', or - - C * Q if SIDE = 'R' and TRANS = 'N', or - - C * Q' if SIDE = 'R' and TRANS = 'C', - - where Q is a complex unitary matrix defined as the product of k - elementary reflectors - - Q = H(1) H(2) . . . H(k) - - as returned by ZGEQRF. Q is of order m if SIDE = 'L' and of order n - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q' from the Left - = 'R': apply Q or Q' from the Right - - TRANS (input) CHARACTER*1 - = 'N': apply Q (No transpose) - = 'C': apply Q' (Conjugate transpose) - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) COMPLEX*16 array, dimension (LDA,K) - The i-th column must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - ZGEQRF in the first k columns of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. - If SIDE = 'L', LDA >= max(1,M); - if SIDE = 'R', LDA >= max(1,N). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGEQRF. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the m-by-n matrix C. - On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) COMPLEX*16 array, dimension - (N) if SIDE = 'L', - (M) if SIDE = 'R' - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - -/* NQ is the order of Q */ - - if (left) { - nq = *m; - } else { - nq = *n; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "C"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNM2R", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - return 0; - } - - if ((left && ! notran) || (! left && notran)) { - i1 = 1; - i2 = *k; - i3 = 1; - } else { - i1 = *k; - i2 = 1; - i3 = -1; - } - - if (left) { - ni = *n; - jc = 1; - } else { - mi = *m; - ic = 1; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - if (left) { - -/* H(i) or H(i)' is applied to C(i:m,1:n) */ - - mi = *m - i__ + 1; - ic = i__; - } else { - -/* H(i) or H(i)' is applied to C(1:m,i:n) */ - - ni = *n - i__ + 1; - jc = i__; - } - -/* Apply H(i) or H(i)' */ - - if (notran) { - i__3 = i__; - taui.r = tau[i__3].r, taui.i = tau[i__3].i; - } else { - d_cnjg(&z__1, &tau[i__]); - taui.r = z__1.r, taui.i = z__1.i; - } - i__3 = i__ + i__ * a_dim1; - aii.r = a[i__3].r, aii.i = a[i__3].i; - i__3 = i__ + i__ * a_dim1; - a[i__3].r = 1., a[i__3].i = 0.; - zlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &taui, &c__[ic - + jc * c_dim1], ldc, &work[1]); - i__3 = i__ + i__ * a_dim1; - a[i__3].r = aii.r, a[i__3].i = aii.i; -/* L10: */ - } - return 0; - -/* End of ZUNM2R */ - -} /* zunm2r_ */ - -/* Subroutine */ int zunmbr_(char *vect, char *side, char *trans, integer *m, - integer *n, integer *k, doublecomplex *a, integer *lda, doublecomplex - *tau, doublecomplex *c__, integer *ldc, doublecomplex *work, integer * - lwork, integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2]; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i1, i2, nb, mi, ni, nq, nw; - static logical left; - extern logical lsame_(char *, char *); - static integer iinfo; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static logical notran, applyq; - static char transt[1]; - static integer lwkopt; - static logical lquery; - extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - If VECT = 'Q', ZUNMBR overwrites the general complex M-by-N matrix C - with - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'C': Q**H * C C * Q**H - - If VECT = 'P', ZUNMBR overwrites the general complex M-by-N matrix C - with - SIDE = 'L' SIDE = 'R' - TRANS = 'N': P * C C * P - TRANS = 'C': P**H * C C * P**H - - Here Q and P**H are the unitary matrices determined by ZGEBRD when - reducing a complex matrix A to bidiagonal form: A = Q * B * P**H. Q - and P**H are defined as products of elementary reflectors H(i) and - G(i) respectively. - - Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the - order of the unitary matrix Q or P**H that is applied. - - If VECT = 'Q', A is assumed to have been an NQ-by-K matrix: - if nq >= k, Q = H(1) H(2) . . . H(k); - if nq < k, Q = H(1) H(2) . . . H(nq-1). - - If VECT = 'P', A is assumed to have been a K-by-NQ matrix: - if k < nq, P = G(1) G(2) . . . G(k); - if k >= nq, P = G(1) G(2) . . . G(nq-1). - - Arguments - ========= - - VECT (input) CHARACTER*1 - = 'Q': apply Q or Q**H; - = 'P': apply P or P**H. - - SIDE (input) CHARACTER*1 - = 'L': apply Q, Q**H, P or P**H from the Left; - = 'R': apply Q, Q**H, P or P**H from the Right. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q or P; - = 'C': Conjugate transpose, apply Q**H or P**H. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - If VECT = 'Q', the number of columns in the original - matrix reduced by ZGEBRD. - If VECT = 'P', the number of rows in the original - matrix reduced by ZGEBRD. - K >= 0. - - A (input) COMPLEX*16 array, dimension - (LDA,min(nq,K)) if VECT = 'Q' - (LDA,nq) if VECT = 'P' - The vectors which define the elementary reflectors H(i) and - G(i), whose products determine the matrices Q and P, as - returned by ZGEBRD. - - LDA (input) INTEGER - The leading dimension of the array A. - If VECT = 'Q', LDA >= max(1,nq); - if VECT = 'P', LDA >= max(1,min(nq,K)). - - TAU (input) COMPLEX*16 array, dimension (min(nq,K)) - TAU(i) must contain the scalar factor of the elementary - reflector H(i) or G(i) which determines Q or P, as returned - by ZGEBRD in the array argument TAUQ or TAUP. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q - or P*C or P**H*C or C*P or C*P**H. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - applyq = lsame_(vect, "Q"); - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - lquery = *lwork == -1; - -/* NQ is the order of Q or P and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! applyq && ! lsame_(vect, "P"))) { - *info = -1; - } else if ((! left && ! lsame_(side, "R"))) { - *info = -2; - } else if ((! notran && ! lsame_(trans, "C"))) { - *info = -3; - } else if (*m < 0) { - *info = -4; - } else if (*n < 0) { - *info = -5; - } else if (*k < 0) { - *info = -6; - } else /* if(complicated condition) */ { -/* Computing MAX */ - i__1 = 1, i__2 = min(nq,*k); - if ((applyq && *lda < max(1,nq)) || (! applyq && *lda < max(i__1,i__2) - )) { - *info = -8; - } else if (*ldc < max(1,*m)) { - *info = -11; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -13; - } - } - - if (*info == 0) { - if (applyq) { - if (left) { -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = *m - 1; - i__2 = *m - 1; - nb = ilaenv_(&c__1, "ZUNMQR", ch__1, &i__1, n, &i__2, &c_n1, ( - ftnlen)6, (ftnlen)2); - } else { -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = *n - 1; - i__2 = *n - 1; - nb = ilaenv_(&c__1, "ZUNMQR", ch__1, m, &i__1, &i__2, &c_n1, ( - ftnlen)6, (ftnlen)2); - } - } else { - if (left) { -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = *m - 1; - i__2 = *m - 1; - nb = ilaenv_(&c__1, "ZUNMLQ", ch__1, &i__1, n, &i__2, &c_n1, ( - ftnlen)6, (ftnlen)2); - } else { -/* Writing concatenation */ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = *n - 1; - i__2 = *n - 1; - nb = ilaenv_(&c__1, "ZUNMLQ", ch__1, m, &i__1, &i__2, &c_n1, ( - ftnlen)6, (ftnlen)2); - } - } - lwkopt = max(1,nw) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNMBR", &i__1); - return 0; - } else if (lquery) { - } - -/* Quick return if possible */ - - work[1].r = 1., work[1].i = 0.; - if (*m == 0 || *n == 0) { - return 0; - } - - if (applyq) { - -/* Apply Q */ - - if (nq >= *k) { - -/* Q was determined by a call to ZGEBRD with nq >= k */ - - zunmqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], lwork, &iinfo); - } else if (nq > 1) { - -/* Q was determined by a call to ZGEBRD with nq < k */ - - if (left) { - mi = *m - 1; - ni = *n; - i1 = 2; - i2 = 1; - } else { - mi = *m; - ni = *n - 1; - i1 = 1; - i2 = 2; - } - i__1 = nq - 1; - zunmqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1] - , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo); - } - } else { - -/* Apply P */ - - if (notran) { - *(unsigned char *)transt = 'C'; - } else { - *(unsigned char *)transt = 'N'; - } - if (nq > *k) { - -/* P was determined by a call to ZGEBRD with nq > k */ - - zunmlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], lwork, &iinfo); - } else if (nq > 1) { - -/* P was determined by a call to ZGEBRD with nq <= k */ - - if (left) { - mi = *m - 1; - ni = *n; - i1 = 2; - i2 = 1; - } else { - mi = *m; - ni = *n - 1; - i1 = 1; - i2 = 2; - } - i__1 = nq - 1; - zunmlq_(side, transt, &mi, &ni, &i__1, &a[((a_dim1) << (1)) + 1], - lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1], - lwork, &iinfo); - } - } - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - return 0; - -/* End of ZUNMBR */ - -} /* zunmbr_ */ - -/* Subroutine */ int zunml2_(char *side, char *trans, integer *m, integer *n, - integer *k, doublecomplex *a, integer *lda, doublecomplex *tau, - doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info) -{ - /* System generated locals */ - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; - doublecomplex z__1; - - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - - /* Local variables */ - static integer i__, i1, i2, i3, ic, jc, mi, ni, nq; - static doublecomplex aii; - static logical left; - static doublecomplex taui; - extern logical lsame_(char *, char *); - extern /* Subroutine */ int zlarf_(char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *); - static logical notran; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - ZUNML2 overwrites the general complex m-by-n matrix C with - - Q * C if SIDE = 'L' and TRANS = 'N', or - - Q'* C if SIDE = 'L' and TRANS = 'C', or - - C * Q if SIDE = 'R' and TRANS = 'N', or - - C * Q' if SIDE = 'R' and TRANS = 'C', - - where Q is a complex unitary matrix defined as the product of k - elementary reflectors - - Q = H(k)' . . . H(2)' H(1)' - - as returned by ZGELQF. Q is of order m if SIDE = 'L' and of order n - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q' from the Left - = 'R': apply Q or Q' from the Right - - TRANS (input) CHARACTER*1 - = 'N': apply Q (No transpose) - = 'C': apply Q' (Conjugate transpose) - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) COMPLEX*16 array, dimension - (LDA,M) if SIDE = 'L', - (LDA,N) if SIDE = 'R' - The i-th row must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - ZGELQF in the first k rows of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,K). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGELQF. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the m-by-n matrix C. - On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace) COMPLEX*16 array, dimension - (N) if SIDE = 'L', - (M) if SIDE = 'R' - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - -/* NQ is the order of Q */ - - if (left) { - nq = *m; - } else { - nq = *n; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "C"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,*k)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNML2", &i__1); - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - return 0; - } - - if ((left && notran) || (! left && ! notran)) { - i1 = 1; - i2 = *k; - i3 = 1; - } else { - i1 = *k; - i2 = 1; - i3 = -1; - } - - if (left) { - ni = *n; - jc = 1; - } else { - mi = *m; - ic = 1; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - if (left) { - -/* H(i) or H(i)' is applied to C(i:m,1:n) */ - - mi = *m - i__ + 1; - ic = i__; - } else { - -/* H(i) or H(i)' is applied to C(1:m,i:n) */ - - ni = *n - i__ + 1; - jc = i__; - } - -/* Apply H(i) or H(i)' */ - - if (notran) { - d_cnjg(&z__1, &tau[i__]); - taui.r = z__1.r, taui.i = z__1.i; - } else { - i__3 = i__; - taui.r = tau[i__3].r, taui.i = tau[i__3].i; - } - if (i__ < nq) { - i__3 = nq - i__; - zlacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda); - } - i__3 = i__ + i__ * a_dim1; - aii.r = a[i__3].r, aii.i = a[i__3].i; - i__3 = i__ + i__ * a_dim1; - a[i__3].r = 1., a[i__3].i = 0.; - zlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &taui, &c__[ic + - jc * c_dim1], ldc, &work[1]); - i__3 = i__ + i__ * a_dim1; - a[i__3].r = aii.r, a[i__3].i = aii.i; - if (i__ < nq) { - i__3 = nq - i__; - zlacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda); - } -/* L10: */ - } - return 0; - -/* End of ZUNML2 */ - -} /* zunml2_ */ - -/* Subroutine */ int zunmlq_(char *side, char *trans, integer *m, integer *n, - integer *k, doublecomplex *a, integer *lda, doublecomplex *tau, - doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, - i__5; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i__; - static doublecomplex t[4160] /* was [65][64] */; - static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws; - static logical left; - extern logical lsame_(char *, char *); - static integer nbmin, iinfo; - extern /* Subroutine */ int zunml2_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static logical notran; - static integer ldwork; - extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *); - static char transt[1]; - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNMLQ overwrites the general complex M-by-N matrix C with - - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'C': Q**H * C C * Q**H - - where Q is a complex unitary matrix defined as the product of k - elementary reflectors - - Q = H(k)' . . . H(2)' H(1)' - - as returned by ZGELQF. Q is of order M if SIDE = 'L' and of order N - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q**H from the Left; - = 'R': apply Q or Q**H from the Right. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q; - = 'C': Conjugate transpose, apply Q**H. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) COMPLEX*16 array, dimension - (LDA,M) if SIDE = 'L', - (LDA,N) if SIDE = 'R' - The i-th row must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - ZGELQF in the first k rows of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. LDA >= max(1,K). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGELQF. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - lquery = *lwork == -1; - -/* NQ is the order of Q and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "C"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,*k)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - -/* - Determine the block size. NB may be at most NBMAX, where NBMAX - is used to define the local array T. - - Computing MIN - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMLQ", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nb = min(i__1,i__2); - lwkopt = max(1,nw) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNMLQ", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nbmin = 2; - ldwork = nw; - if ((nb > 1 && nb < *k)) { - iws = nw * nb; - if (*lwork < iws) { - nb = *lwork / ldwork; -/* - Computing MAX - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMLQ", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nbmin = max(i__1,i__2); - } - } else { - iws = nw; - } - - if (nb < nbmin || nb >= *k) { - -/* Use unblocked code */ - - zunml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], &iinfo); - } else { - -/* Use blocked code */ - - if ((left && notran) || (! left && ! notran)) { - i1 = 1; - i2 = *k; - i3 = nb; - } else { - i1 = (*k - 1) / nb * nb + 1; - i2 = 1; - i3 = -nb; - } - - if (left) { - ni = *n; - jc = 1; - } else { - mi = *m; - ic = 1; - } - - if (notran) { - *(unsigned char *)transt = 'C'; - } else { - *(unsigned char *)transt = 'N'; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__4 = nb, i__5 = *k - i__ + 1; - ib = min(i__4,i__5); - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__4 = nq - i__ + 1; - zlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1], - lda, &tau[i__], t, &c__65); - if (left) { - -/* H or H' is applied to C(i:m,1:n) */ - - mi = *m - i__ + 1; - ic = i__; - } else { - -/* H or H' is applied to C(1:m,i:n) */ - - ni = *n - i__ + 1; - jc = i__; - } - -/* Apply H or H' */ - - zlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__ - + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1], - ldc, &work[1], &ldwork); -/* L10: */ - } - } - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - return 0; - -/* End of ZUNMLQ */ - -} /* zunmlq_ */ - -/* Subroutine */ int zunmql_(char *side, char *trans, integer *m, integer *n, - integer *k, doublecomplex *a, integer *lda, doublecomplex *tau, - doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, - i__5; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i__; - static doublecomplex t[4160] /* was [65][64] */; - static integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws; - static logical left; - extern logical lsame_(char *, char *); - static integer nbmin, iinfo; - extern /* Subroutine */ int zunm2l_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static logical notran; - static integer ldwork; - extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *); - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNMQL overwrites the general complex M-by-N matrix C with - - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'C': Q**H * C C * Q**H - - where Q is a complex unitary matrix defined as the product of k - elementary reflectors - - Q = H(k) . . . H(2) H(1) - - as returned by ZGEQLF. Q is of order M if SIDE = 'L' and of order N - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q**H from the Left; - = 'R': apply Q or Q**H from the Right. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q; - = 'C': Transpose, apply Q**H. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) COMPLEX*16 array, dimension (LDA,K) - The i-th column must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - ZGEQLF in the last k columns of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. - If SIDE = 'L', LDA >= max(1,M); - if SIDE = 'R', LDA >= max(1,N). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGEQLF. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - lquery = *lwork == -1; - -/* NQ is the order of Q and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "C"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - -/* - Determine the block size. NB may be at most NBMAX, where NBMAX - is used to define the local array T. - - Computing MIN - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMQL", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nb = min(i__1,i__2); - lwkopt = max(1,nw) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNMQL", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nbmin = 2; - ldwork = nw; - if ((nb > 1 && nb < *k)) { - iws = nw * nb; - if (*lwork < iws) { - nb = *lwork / ldwork; -/* - Computing MAX - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMQL", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nbmin = max(i__1,i__2); - } - } else { - iws = nw; - } - - if (nb < nbmin || nb >= *k) { - -/* Use unblocked code */ - - zunm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], &iinfo); - } else { - -/* Use blocked code */ - - if ((left && notran) || (! left && ! notran)) { - i1 = 1; - i2 = *k; - i3 = nb; - } else { - i1 = (*k - 1) / nb * nb + 1; - i2 = 1; - i3 = -nb; - } - - if (left) { - ni = *n; - } else { - mi = *m; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__4 = nb, i__5 = *k - i__ + 1; - ib = min(i__4,i__5); - -/* - Form the triangular factor of the block reflector - H = H(i+ib-1) . . . H(i+1) H(i) -*/ - - i__4 = nq - *k + i__ + ib - 1; - zlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1] - , lda, &tau[i__], t, &c__65); - if (left) { - -/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */ - - mi = *m - *k + i__ + ib - 1; - } else { - -/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */ - - ni = *n - *k + i__ + ib - 1; - } - -/* Apply H or H' */ - - zlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[ - i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, & - work[1], &ldwork); -/* L10: */ - } - } - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - return 0; - -/* End of ZUNMQL */ - -} /* zunmql_ */ - -/* Subroutine */ int zunmqr_(char *side, char *trans, integer *m, integer *n, - integer *k, doublecomplex *a, integer *lda, doublecomplex *tau, - doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, - i__5; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i__; - static doublecomplex t[4160] /* was [65][64] */; - static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws; - static logical left; - extern logical lsame_(char *, char *); - static integer nbmin, iinfo; - extern /* Subroutine */ int zunm2r_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *, - integer *, integer *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *, doublecomplex *, integer *, - doublecomplex *, integer *); - static logical notran; - static integer ldwork; - extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *, - doublecomplex *, integer *, doublecomplex *, doublecomplex *, - integer *); - static integer lwkopt; - static logical lquery; - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNMQR overwrites the general complex M-by-N matrix C with - - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'C': Q**H * C C * Q**H - - where Q is a complex unitary matrix defined as the product of k - elementary reflectors - - Q = H(1) H(2) . . . H(k) - - as returned by ZGEQRF. Q is of order M if SIDE = 'L' and of order N - if SIDE = 'R'. - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q**H from the Left; - = 'R': apply Q or Q**H from the Right. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q; - = 'C': Conjugate transpose, apply Q**H. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - K (input) INTEGER - The number of elementary reflectors whose product defines - the matrix Q. - If SIDE = 'L', M >= K >= 0; - if SIDE = 'R', N >= K >= 0. - - A (input) COMPLEX*16 array, dimension (LDA,K) - The i-th column must contain the vector which defines the - elementary reflector H(i), for i = 1,2,...,k, as returned by - ZGEQRF in the first k columns of its array argument A. - A is modified by the routine but restored on exit. - - LDA (input) INTEGER - The leading dimension of the array A. - If SIDE = 'L', LDA >= max(1,M); - if SIDE = 'R', LDA >= max(1,N). - - TAU (input) COMPLEX*16 array, dimension (K) - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZGEQRF. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >= M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - notran = lsame_(trans, "N"); - lquery = *lwork == -1; - -/* NQ is the order of Q and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! notran && ! lsame_(trans, "C"))) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*k < 0 || *k > nq) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - -/* - Determine the block size. NB may be at most NBMAX, where NBMAX - is used to define the local array T. - - Computing MIN - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMQR", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nb = min(i__1,i__2); - lwkopt = max(1,nw) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - } - - if (*info != 0) { - i__1 = -(*info); - xerbla_("ZUNMQR", &i__1); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || *k == 0) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - nbmin = 2; - ldwork = nw; - if ((nb > 1 && nb < *k)) { - iws = nw * nb; - if (*lwork < iws) { - nb = *lwork / ldwork; -/* - Computing MAX - Writing concatenation -*/ - i__3[0] = 1, a__1[0] = side; - i__3[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); - i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMQR", ch__1, m, n, k, &c_n1, ( - ftnlen)6, (ftnlen)2); - nbmin = max(i__1,i__2); - } - } else { - iws = nw; - } - - if (nb < nbmin || nb >= *k) { - -/* Use unblocked code */ - - zunm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ - c_offset], ldc, &work[1], &iinfo); - } else { - -/* Use blocked code */ - - if ((left && ! notran) || (! left && notran)) { - i1 = 1; - i2 = *k; - i3 = nb; - } else { - i1 = (*k - 1) / nb * nb + 1; - i2 = 1; - i3 = -nb; - } - - if (left) { - ni = *n; - jc = 1; - } else { - mi = *m; - ic = 1; - } - - i__1 = i2; - i__2 = i3; - for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { -/* Computing MIN */ - i__4 = nb, i__5 = *k - i__ + 1; - ib = min(i__4,i__5); - -/* - Form the triangular factor of the block reflector - H = H(i) H(i+1) . . . H(i+ib-1) -*/ - - i__4 = nq - i__ + 1; - zlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ * - a_dim1], lda, &tau[i__], t, &c__65) - ; - if (left) { - -/* H or H' is applied to C(i:m,1:n) */ - - mi = *m - i__ + 1; - ic = i__; - } else { - -/* H or H' is applied to C(1:m,i:n) */ - - ni = *n - i__ + 1; - jc = i__; - } - -/* Apply H or H' */ - - zlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[ - i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * - c_dim1], ldc, &work[1], &ldwork); -/* L10: */ - } - } - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - return 0; - -/* End of ZUNMQR */ - -} /* zunmqr_ */ - -/* Subroutine */ int zunmtr_(char *side, char *uplo, char *trans, integer *m, - integer *n, doublecomplex *a, integer *lda, doublecomplex *tau, - doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork, - integer *info) -{ - /* System generated locals */ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3; - char ch__1[2]; - - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - /* Local variables */ - static integer i1, i2, nb, mi, ni, nq, nw; - static logical left; - extern logical lsame_(char *, char *); - static integer iinfo; - static logical upper; - extern /* Subroutine */ int xerbla_(char *, integer *); - extern integer ilaenv_(integer *, char *, char *, integer *, integer *, - integer *, integer *, ftnlen, ftnlen); - static integer lwkopt; - static logical lquery; - extern /* Subroutine */ int zunmql_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *, - integer *, doublecomplex *, integer *, doublecomplex *, - doublecomplex *, integer *, doublecomplex *, integer *, integer *); - - -/* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1999 - - - Purpose - ======= - - ZUNMTR overwrites the general complex M-by-N matrix C with - - SIDE = 'L' SIDE = 'R' - TRANS = 'N': Q * C C * Q - TRANS = 'C': Q**H * C C * Q**H - - where Q is a complex unitary matrix of order nq, with nq = m if - SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of - nq-1 elementary reflectors, as returned by ZHETRD: - - if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); - - if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). - - Arguments - ========= - - SIDE (input) CHARACTER*1 - = 'L': apply Q or Q**H from the Left; - = 'R': apply Q or Q**H from the Right. - - UPLO (input) CHARACTER*1 - = 'U': Upper triangle of A contains elementary reflectors - from ZHETRD; - = 'L': Lower triangle of A contains elementary reflectors - from ZHETRD. - - TRANS (input) CHARACTER*1 - = 'N': No transpose, apply Q; - = 'C': Conjugate transpose, apply Q**H. - - M (input) INTEGER - The number of rows of the matrix C. M >= 0. - - N (input) INTEGER - The number of columns of the matrix C. N >= 0. - - A (input) COMPLEX*16 array, dimension - (LDA,M) if SIDE = 'L' - (LDA,N) if SIDE = 'R' - The vectors which define the elementary reflectors, as - returned by ZHETRD. - - LDA (input) INTEGER - The leading dimension of the array A. - LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. - - TAU (input) COMPLEX*16 array, dimension - (M-1) if SIDE = 'L' - (N-1) if SIDE = 'R' - TAU(i) must contain the scalar factor of the elementary - reflector H(i), as returned by ZHETRD. - - C (input/output) COMPLEX*16 array, dimension (LDC,N) - On entry, the M-by-N matrix C. - On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. - - LDC (input) INTEGER - The leading dimension of the array C. LDC >= max(1,M). - - WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) - On exit, if INFO = 0, WORK(1) returns the optimal LWORK. - - LWORK (input) INTEGER - The dimension of the array WORK. - If SIDE = 'L', LWORK >= max(1,N); - if SIDE = 'R', LWORK >= max(1,M). - For optimum performance LWORK >= N*NB if SIDE = 'L', and - LWORK >=M*NB if SIDE = 'R', where NB is the optimal - blocksize. - - If LWORK = -1, then a workspace query is assumed; the routine - only calculates the optimal size of the WORK array, returns - this value as the first entry of the WORK array, and no error - message related to LWORK is issued by XERBLA. - - INFO (output) INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value - - ===================================================================== - - - Test the input arguments -*/ - - /* Parameter adjustments */ - a_dim1 = *lda; - a_offset = 1 + a_dim1 * 1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1 * 1; - c__ -= c_offset; - --work; - - /* Function Body */ - *info = 0; - left = lsame_(side, "L"); - upper = lsame_(uplo, "U"); - lquery = *lwork == -1; - -/* NQ is the order of Q and NW is the minimum dimension of WORK */ - - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if ((! left && ! lsame_(side, "R"))) { - *info = -1; - } else if ((! upper && ! lsame_(uplo, "L"))) { - *info = -2; - } else if ((! lsame_(trans, "N") && ! lsame_(trans, - "C"))) { - *info = -3; - } else if (*m < 0) { - *info = -4; - } else if (*n < 0) { - *info = -5; - } else if (*lda < max(1,nq)) { - *info = -7; - } else if (*ldc < max(1,*m)) { - *info = -10; - } else if ((*lwork < max(1,nw) && ! lquery)) { - *info = -12; - } - - if (*info == 0) { - if (upper) { - if (left) { -/* Writing concatenation */ - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); - i__2 = *m - 1; - i__3 = *m - 1; - nb = ilaenv_(&c__1, "ZUNMQL", ch__1, &i__2, n, &i__3, &c_n1, ( - ftnlen)6, (ftnlen)2); - } else { -/* Writing concatenation */ - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); - i__2 = *n - 1; - i__3 = *n - 1; - nb = ilaenv_(&c__1, "ZUNMQL", ch__1, m, &i__2, &i__3, &c_n1, ( - ftnlen)6, (ftnlen)2); - } - } else { - if (left) { -/* Writing concatenation */ - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); - i__2 = *m - 1; - i__3 = *m - 1; - nb = ilaenv_(&c__1, "ZUNMQR", ch__1, &i__2, n, &i__3, &c_n1, ( - ftnlen)6, (ftnlen)2); - } else { -/* Writing concatenation */ - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); - i__2 = *n - 1; - i__3 = *n - 1; - nb = ilaenv_(&c__1, "ZUNMQR", ch__1, m, &i__2, &i__3, &c_n1, ( - ftnlen)6, (ftnlen)2); - } - } - lwkopt = max(1,nw) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - } - - if (*info != 0) { - i__2 = -(*info); - xerbla_("ZUNMTR", &i__2); - return 0; - } else if (lquery) { - return 0; - } - -/* Quick return if possible */ - - if (*m == 0 || *n == 0 || nq == 1) { - work[1].r = 1., work[1].i = 0.; - return 0; - } - - if (left) { - mi = *m - 1; - ni = *n; - } else { - mi = *m; - ni = *n - 1; - } - - if (upper) { - -/* Q was determined by a call to ZHETRD with UPLO = 'U' */ - - i__2 = nq - 1; - zunmql_(side, trans, &mi, &ni, &i__2, &a[((a_dim1) << (1)) + 1], lda, - &tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo); - } else { - -/* Q was determined by a call to ZHETRD with UPLO = 'L' */ - - if (left) { - i1 = 2; - i2 = 1; - } else { - i1 = 1; - i2 = 2; - } - i__2 = nq - 1; - zunmqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], & - c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo); - } - work[1].r = (doublereal) lwkopt, work[1].i = 0.; - return 0; - -/* End of ZUNMTR */ - -} /* zunmtr_ */ - diff --git a/numpy-1.6.2/numpy/ma/__init__.py b/numpy-1.6.2/numpy/ma/__init__.py deleted file mode 100644 index 17caa9e025..0000000000 --- a/numpy-1.6.2/numpy/ma/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -============= -Masked Arrays -============= - -Arrays sometimes contain invalid or missing data. When doing operations -on such arrays, we wish to suppress invalid values, which is the purpose masked -arrays fulfill (an example of typical use is given below). - -For example, examine the following array: - ->>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) - -When we try to calculate the mean of the data, the result is undetermined: - ->>> np.mean(x) -nan - -The mean is calculated using roughly ``np.sum(x)/len(x)``, but since -any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter -masked arrays: - ->>> m = np.ma.masked_array(x, np.isnan(x)) ->>> m -masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], - mask = [False False False True False False False True], - fill_value=1e+20) - -Here, we construct a masked array that suppress all ``NaN`` values. We -may now proceed to calculate the mean of the other values: - ->>> np.mean(m) -2.6666666666666665 - -.. [1] Not-a-Number, a floating point value that is the result of an - invalid operation. - -""" -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import core -from core import * - -import extras -from extras import * - -__all__ = ['core', 'extras'] -__all__ += core.__all__ -__all__ += extras.__all__ - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/ma/bench.py b/numpy-1.6.2/numpy/ma/bench.py deleted file mode 100644 index 2cc8f6a808..0000000000 --- a/numpy-1.6.2/numpy/ma/bench.py +++ /dev/null @@ -1,165 +0,0 @@ -#! python -# encoding: utf-8 - -import timeit -#import IPython.ipapi -#ip = IPython.ipapi.get() -#from IPython import ipmagic -import numpy -#from numpy import ma -#from numpy.ma import filled -#from numpy.ma.testutils import assert_equal - - -#####--------------------------------------------------------------------------- -#---- --- Global variables --- -#####--------------------------------------------------------------------------- - -# Small arrays .................................. -xs = numpy.random.uniform(-1,1,6).reshape(2,3) -ys = numpy.random.uniform(-1,1,6).reshape(2,3) -zs = xs + 1j * ys -m1 = [[True, False, False], [False, False, True]] -m2 = [[True, False, True], [False, False, True]] -nmxs = numpy.ma.array(xs, mask=m1) -nmys = numpy.ma.array(ys, mask=m2) -nmzs = numpy.ma.array(zs, mask=m1) -# Big arrays .................................... -xl = numpy.random.uniform(-1,1,100*100).reshape(100,100) -yl = numpy.random.uniform(-1,1,100*100).reshape(100,100) -zl = xl + 1j * yl -maskx = xl > 0.8 -masky = yl < -0.8 -nmxl = numpy.ma.array(xl, mask=maskx) -nmyl = numpy.ma.array(yl, mask=masky) -nmzl = numpy.ma.array(zl, mask=maskx) - -#####--------------------------------------------------------------------------- -#---- --- Functions --- -#####--------------------------------------------------------------------------- - -def timer(s, v='', nloop=500, nrep=3): - units = ["s", "ms", "µs", "ns"] - scaling = [1, 1e3, 1e6, 1e9] - print "%s : %-50s : " % (v,s), - varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] - setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) - Timer = timeit.Timer(stmt=s, setup=setup) - best = min(Timer.repeat(nrep, nloop)) / nloop - if best > 0.0: - order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) - else: - order = 3 - print "%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, - 3, - best * scaling[order], - units[order]) -# ip.magic('timeit -n%i %s' % (nloop,s)) - - - -def compare_functions_1v(func, nloop=500, - xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): - funcname = func.__name__ - print "-"*50 - print "%s on small arrays" % funcname - module, data = "numpy.ma","nmxs" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - # - print "%s on large arrays" % funcname - module, data = "numpy.ma","nmxl" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - return - -def compare_methods(methodname, args, vars='x', nloop=500, test=True, - xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): - print "-"*50 - print "%s on small arrays" % methodname - data, ver = "nm%ss" % vars, 'numpy.ma' - timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) - # - print "%s on large arrays" % methodname - data, ver = "nm%sl" % vars, 'numpy.ma' - timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) - return - -def compare_functions_2v(func, nloop=500, test=True, - xs=xs, nmxs=nmxs, - ys=ys, nmys=nmys, - xl=xl, nmxl=nmxl, - yl=yl, nmyl=nmyl): - funcname = func.__name__ - print "-"*50 - print "%s on small arrays" % funcname - module, data = "numpy.ma","nmxs,nmys" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - # - print "%s on large arrays" % funcname - module, data = "numpy.ma","nmxl,nmyl" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - return - - -############################################################################### - - -################################################################################ -if __name__ == '__main__': -# # Small arrays .................................. -# xs = numpy.random.uniform(-1,1,6).reshape(2,3) -# ys = numpy.random.uniform(-1,1,6).reshape(2,3) -# zs = xs + 1j * ys -# m1 = [[True, False, False], [False, False, True]] -# m2 = [[True, False, True], [False, False, True]] -# nmxs = numpy.ma.array(xs, mask=m1) -# nmys = numpy.ma.array(ys, mask=m2) -# nmzs = numpy.ma.array(zs, mask=m1) -# mmxs = maskedarray.array(xs, mask=m1) -# mmys = maskedarray.array(ys, mask=m2) -# mmzs = maskedarray.array(zs, mask=m1) -# # Big arrays .................................... -# xl = numpy.random.uniform(-1,1,100*100).reshape(100,100) -# yl = numpy.random.uniform(-1,1,100*100).reshape(100,100) -# zl = xl + 1j * yl -# maskx = xl > 0.8 -# masky = yl < -0.8 -# nmxl = numpy.ma.array(xl, mask=maskx) -# nmyl = numpy.ma.array(yl, mask=masky) -# nmzl = numpy.ma.array(zl, mask=maskx) -# mmxl = maskedarray.array(xl, mask=maskx, shrink=True) -# mmyl = maskedarray.array(yl, mask=masky, shrink=True) -# mmzl = maskedarray.array(zl, mask=maskx, shrink=True) -# - compare_functions_1v(numpy.sin) - compare_functions_1v(numpy.log) - compare_functions_1v(numpy.sqrt) - #.................................................................... - compare_functions_2v(numpy.multiply) - compare_functions_2v(numpy.divide) - compare_functions_2v(numpy.power) - #.................................................................... - compare_methods('ravel','', nloop=1000) - compare_methods('conjugate','','z', nloop=1000) - compare_methods('transpose','', nloop=1000) - compare_methods('compressed','', nloop=1000) - compare_methods('__getitem__','0', nloop=1000) - compare_methods('__getitem__','(0,0)', nloop=1000) - compare_methods('__getitem__','[0,-1]', nloop=1000) - compare_methods('__setitem__','0, 17', nloop=1000, test=False) - compare_methods('__setitem__','(0,0), 17', nloop=1000, test=False) - #.................................................................... - print "-"*50 - print "__setitem__ on small arrays" - timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000) - - print "-"*50 - print "__setitem__ on large arrays" - timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000) - - #.................................................................... - print "-"*50 - print "where on small arrays" - timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ',nloop=1000) - print "-"*50 - print "where on large arrays" - timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ',nloop=100) diff --git a/numpy-1.6.2/numpy/ma/core.py b/numpy-1.6.2/numpy/ma/core.py deleted file mode 100644 index 936df17f30..0000000000 --- a/numpy-1.6.2/numpy/ma/core.py +++ /dev/null @@ -1,7165 +0,0 @@ -""" -numpy.ma : a package to handle missing or invalid values. - -This package was initially written for numarray by Paul F. Dubois -at Lawrence Livermore National Laboratory. -In 2006, the package was completely rewritten by Pierre Gerard-Marchant -(University of Georgia) to make the MaskedArray class a subclass of ndarray, -and to improve support of structured arrays. - - -Copyright 1999, 2000, 2001 Regents of the University of California. -Released for unlimited redistribution. - -* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. -* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant - (pgmdevlist_AT_gmail_DOT_com) -* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) - -.. moduleauthor:: Pierre Gerard-Marchant - -""" -# pylint: disable-msg=E1002 - -__author__ = "Pierre GF Gerard-Marchant" -__docformat__ = "restructuredtext en" - -__all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray', - 'bool_', - 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', - 'amax', 'amin', 'anom', 'anomalies', 'any', 'arange', - 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', - 'arctanh', 'argmax', 'argmin', 'argsort', 'around', - 'array', 'asarray', 'asanyarray', - 'bitwise_and', 'bitwise_or', 'bitwise_xor', - 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', - 'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh', - 'count', 'cumprod', 'cumsum', - 'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump', - 'dumps', - 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', - 'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide', - 'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex', - 'fromfunction', - 'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal', - 'harden_mask', 'hypot', - 'identity', 'ids', 'indices', 'inner', 'innerproduct', - 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', - 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2', - 'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', - 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', - 'masked', 'masked_array', 'masked_equal', 'masked_greater', - 'masked_greater_equal', 'masked_inside', 'masked_invalid', - 'masked_less', 'masked_less_equal', 'masked_not_equal', - 'masked_object', 'masked_outside', 'masked_print_option', - 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', - 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', - 'mod', 'multiply', 'mvoid', - 'negative', 'nomask', 'nonzero', 'not_equal', - 'ones', 'outer', 'outerproduct', - 'power', 'prod', 'product', 'ptp', 'put', 'putmask', - 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', - 'right_shift', 'round_', 'round', - 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', - 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', - 'swapaxes', - 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', - 'var', 'where', - 'zeros'] - -import cPickle - -import numpy as np -from numpy import ndarray, amax, amin, iscomplexobj, bool_ -from numpy import array as narray - -import numpy.core.umath as umath -import numpy.core.numerictypes as ntypes -from numpy.compat import getargspec, formatargspec -from numpy import expand_dims as n_expand_dims -import warnings - -import sys -if sys.version_info[0] >= 3: - from functools import reduce - -MaskType = np.bool_ -nomask = MaskType(0) - -def doc_note(initialdoc, note): - """ - Adds a Notes section to an existing docstring. - """ - if initialdoc is None: - return - if note is None: - return initialdoc - newdoc = """ - %s - - Notes - ----- - %s - """ - return newdoc % (initialdoc, note) - -def get_object_signature(obj): - """ - Get the signature from obj - """ - try: - sig = formatargspec(*getargspec(obj)) - except TypeError, errmsg: - sig = '' -# msg = "Unable to retrieve the signature of %s '%s'\n"\ -# "(Initial error message: %s)" -# warnings.warn(msg % (type(obj), -# getattr(obj, '__name__', '???'), -# errmsg)) - return sig - - -#####-------------------------------------------------------------------------- -#---- --- Exceptions --- -#####-------------------------------------------------------------------------- -class MAError(Exception): - """Class for masked array related errors.""" - pass -class MaskError(MAError): - "Class for mask related errors." - pass - - -#####-------------------------------------------------------------------------- -#---- --- Filling options --- -#####-------------------------------------------------------------------------- -# b: boolean - c: complex - f: floats - i: integer - O: object - S: string -default_filler = {'b': True, - 'c' : 1.e20 + 0.0j, - 'f' : 1.e20, - 'i' : 999999, - 'O' : '?', - 'S' : 'N/A', - 'u' : 999999, - 'V' : '???', - 'U' : 'N/A', - } -max_filler = ntypes._minvals -max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]]) -min_filler = ntypes._maxvals -min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]]) -if 'float128' in ntypes.typeDict: - max_filler.update([(np.float128, -np.inf)]) - min_filler.update([(np.float128, +np.inf)]) - - -def default_fill_value(obj): - """ - Return the default fill value for the argument object. - - The default filling value depends on the datatype of the input - array or the type of the input scalar: - - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== - - - Parameters - ---------- - obj : ndarray, dtype or scalar - The array data-type or scalar for which the default fill value - is returned. - - Returns - ------- - fill_value : scalar - The default fill value. - - Examples - -------- - >>> np.ma.default_fill_value(1) - 999999 - >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) - 1e+20 - >>> np.ma.default_fill_value(np.dtype(complex)) - (1e+20+0j) - - """ - if hasattr(obj, 'dtype'): - defval = _check_fill_value(None, obj.dtype) - elif isinstance(obj, np.dtype): - if obj.subdtype: - defval = default_filler.get(obj.subdtype[0].kind, '?') - else: - defval = default_filler.get(obj.kind, '?') - elif isinstance(obj, float): - defval = default_filler['f'] - elif isinstance(obj, int) or isinstance(obj, long): - defval = default_filler['i'] - elif isinstance(obj, str): - defval = default_filler['S'] - elif isinstance(obj, unicode): - defval = default_filler['U'] - elif isinstance(obj, complex): - defval = default_filler['c'] - else: - defval = default_filler['O'] - return defval - - -def _recursive_extremum_fill_value(ndtype, extremum): - names = ndtype.names - if names: - deflist = [] - for name in names: - fval = _recursive_extremum_fill_value(ndtype[name], extremum) - deflist.append(fval) - return tuple(deflist) - return extremum[ndtype] - - -def minimum_fill_value(obj): - """ - Return the maximum value that can be represented by the dtype of an object. - - This function is useful for calculating a fill value suitable for - taking the minimum of an array with a given dtype. - - Parameters - ---------- - obj : ndarray or dtype - An object that can be queried for it's numeric type. - - Returns - ------- - val : scalar - The maximum representable value. - - Raises - ------ - TypeError - If `obj` isn't a suitable numeric type. - - See Also - -------- - maximum_fill_value : The inverse function. - set_fill_value : Set the filling value of a masked array. - MaskedArray.fill_value : Return current fill value. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.int8() - >>> ma.minimum_fill_value(a) - 127 - >>> a = np.int32() - >>> ma.minimum_fill_value(a) - 2147483647 - - An array of numeric data can also be passed. - - >>> a = np.array([1, 2, 3], dtype=np.int8) - >>> ma.minimum_fill_value(a) - 127 - >>> a = np.array([1, 2, 3], dtype=np.float32) - >>> ma.minimum_fill_value(a) - inf - - """ - errmsg = "Unsuitable type for calculating minimum." - if hasattr(obj, 'dtype'): - return _recursive_extremum_fill_value(obj.dtype, min_filler) - elif isinstance(obj, float): - return min_filler[ntypes.typeDict['float_']] - elif isinstance(obj, int): - return min_filler[ntypes.typeDict['int_']] - elif isinstance(obj, long): - return min_filler[ntypes.typeDict['uint']] - elif isinstance(obj, np.dtype): - return min_filler[obj] - else: - raise TypeError(errmsg) - - -def maximum_fill_value(obj): - """ - Return the minimum value that can be represented by the dtype of an object. - - This function is useful for calculating a fill value suitable for - taking the maximum of an array with a given dtype. - - Parameters - ---------- - obj : {ndarray, dtype} - An object that can be queried for it's numeric type. - - Returns - ------- - val : scalar - The minimum representable value. - - Raises - ------ - TypeError - If `obj` isn't a suitable numeric type. - - See Also - -------- - minimum_fill_value : The inverse function. - set_fill_value : Set the filling value of a masked array. - MaskedArray.fill_value : Return current fill value. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.int8() - >>> ma.maximum_fill_value(a) - -128 - >>> a = np.int32() - >>> ma.maximum_fill_value(a) - -2147483648 - - An array of numeric data can also be passed. - - >>> a = np.array([1, 2, 3], dtype=np.int8) - >>> ma.maximum_fill_value(a) - -128 - >>> a = np.array([1, 2, 3], dtype=np.float32) - >>> ma.maximum_fill_value(a) - -inf - - """ - errmsg = "Unsuitable type for calculating maximum." - if hasattr(obj, 'dtype'): - return _recursive_extremum_fill_value(obj.dtype, max_filler) - elif isinstance(obj, float): - return max_filler[ntypes.typeDict['float_']] - elif isinstance(obj, int): - return max_filler[ntypes.typeDict['int_']] - elif isinstance(obj, long): - return max_filler[ntypes.typeDict['uint']] - elif isinstance(obj, np.dtype): - return max_filler[obj] - else: - raise TypeError(errmsg) - - -def _recursive_set_default_fill_value(dtypedescr): - deflist = [] - for currentdescr in dtypedescr: - currenttype = currentdescr[1] - if isinstance(currenttype, list): - deflist.append(tuple(_recursive_set_default_fill_value(currenttype))) - else: - deflist.append(default_fill_value(np.dtype(currenttype))) - return tuple(deflist) - -def _recursive_set_fill_value(fillvalue, dtypedescr): - fillvalue = np.resize(fillvalue, len(dtypedescr)) - output_value = [] - for (fval, descr) in zip(fillvalue, dtypedescr): - cdtype = descr[1] - if isinstance(cdtype, list): - output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) - else: - output_value.append(np.array(fval, dtype=cdtype).item()) - return tuple(output_value) - - -def _check_fill_value(fill_value, ndtype): - """ - Private function validating the given `fill_value` for the given dtype. - - If fill_value is None, it is set to the default corresponding to the dtype - if this latter is standard (no fields). If the datatype is flexible (named - fields), fill_value is set to a tuple whose elements are the default fill - values corresponding to each field. - - If fill_value is not None, its value is forced to the given dtype. - - """ - ndtype = np.dtype(ndtype) - fields = ndtype.fields - if fill_value is None: - if fields: - descr = ndtype.descr - fill_value = np.array(_recursive_set_default_fill_value(descr), - dtype=ndtype,) - else: - fill_value = default_fill_value(ndtype) - elif fields: - fdtype = [(_[0], _[1]) for _ in ndtype.descr] - if isinstance(fill_value, (ndarray, np.void)): - try: - fill_value = np.array(fill_value, copy=False, dtype=fdtype) - except ValueError: - err_msg = "Unable to transform %s to dtype %s" - raise ValueError(err_msg % (fill_value, fdtype)) - else: - descr = ndtype.descr - fill_value = np.asarray(fill_value, dtype=object) - fill_value = np.array(_recursive_set_fill_value(fill_value, descr), - dtype=ndtype) - else: - if isinstance(fill_value, basestring) and (ndtype.char not in 'SV'): - fill_value = default_fill_value(ndtype) - else: - # In case we want to convert 1e+20 to int... - try: - fill_value = np.array(fill_value, copy=False, dtype=ndtype)#.item() - except OverflowError: - fill_value = default_fill_value(ndtype) - return np.array(fill_value) - - -def set_fill_value(a, fill_value): - """ - Set the filling value of a, if a is a masked array. - - This function changes the fill value of the masked array `a` in place. - If `a` is not a masked array, the function returns silently, without - doing anything. - - Parameters - ---------- - a : array_like - Input array. - fill_value : dtype - Filling value. A consistency test is performed to make sure - the value is compatible with the dtype of `a`. - - Returns - ------- - None - Nothing returned by this function. - - See Also - -------- - maximum_fill_value : Return the default fill value for a dtype. - MaskedArray.fill_value : Return current fill value. - MaskedArray.set_fill_value : Equivalent method. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(5) - >>> a - array([0, 1, 2, 3, 4]) - >>> a = ma.masked_where(a < 3, a) - >>> a - masked_array(data = [-- -- -- 3 4], - mask = [ True True True False False], - fill_value=999999) - >>> ma.set_fill_value(a, -999) - >>> a - masked_array(data = [-- -- -- 3 4], - mask = [ True True True False False], - fill_value=-999) - - Nothing happens if `a` is not a masked array. - - >>> a = range(5) - >>> a - [0, 1, 2, 3, 4] - >>> ma.set_fill_value(a, 100) - >>> a - [0, 1, 2, 3, 4] - >>> a = np.arange(5) - >>> a - array([0, 1, 2, 3, 4]) - >>> ma.set_fill_value(a, 100) - >>> a - array([0, 1, 2, 3, 4]) - - """ - if isinstance(a, MaskedArray): - a.set_fill_value(fill_value) - return - -def get_fill_value(a): - """ - Return the filling value of a, if any. Otherwise, returns the - default filling value for that type. - - """ - if isinstance(a, MaskedArray): - result = a.fill_value - else: - result = default_fill_value(a) - return result - -def common_fill_value(a, b): - """ - Return the common filling value of two masked arrays, if any. - - If ``a.fill_value == b.fill_value``, return the fill value, - otherwise return None. - - Parameters - ---------- - a, b : MaskedArray - The masked arrays for which to compare fill values. - - Returns - ------- - fill_value : scalar or None - The common fill value, or None. - - Examples - -------- - >>> x = np.ma.array([0, 1.], fill_value=3) - >>> y = np.ma.array([0, 1.], fill_value=3) - >>> np.ma.common_fill_value(x, y) - 3.0 - - """ - t1 = get_fill_value(a) - t2 = get_fill_value(b) - if t1 == t2: - return t1 - return None - - -#####-------------------------------------------------------------------------- -def filled(a, fill_value=None): - """ - Return input as an array with masked data replaced by a fill value. - - If `a` is not a `MaskedArray`, `a` itself is returned. - If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to - ``a.fill_value``. - - Parameters - ---------- - a : MaskedArray or array_like - An input object. - fill_value : scalar, optional - Filling value. Default is None. - - Returns - ------- - a : ndarray - The filled array. - - See Also - -------- - compressed - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], - ... [1, 0, 0], - ... [0, 0, 0]]) - >>> x.filled() - array([[999999, 1, 2], - [999999, 4, 5], - [ 6, 7, 8]]) - - """ - if hasattr(a, 'filled'): - return a.filled(fill_value) - elif isinstance(a, ndarray): - # Should we check for contiguity ? and a.flags['CONTIGUOUS']: - return a - elif isinstance(a, dict): - return np.array(a, 'O') - else: - return np.array(a) - -#####-------------------------------------------------------------------------- -def get_masked_subclass(*arrays): - """ - Return the youngest subclass of MaskedArray from a list of (masked) arrays. - In case of siblings, the first listed takes over. - - """ - if len(arrays) == 1: - arr = arrays[0] - if isinstance(arr, MaskedArray): - rcls = type(arr) - else: - rcls = MaskedArray - else: - arrcls = [type(a) for a in arrays] - rcls = arrcls[0] - if not issubclass(rcls, MaskedArray): - rcls = MaskedArray - for cls in arrcls[1:]: - if issubclass(cls, rcls): - rcls = cls - # Don't return MaskedConstant as result: revert to MaskedArray - if rcls.__name__ == 'MaskedConstant': - return MaskedArray - return rcls - -#####-------------------------------------------------------------------------- -def getdata(a, subok=True): - """ - Return the data of a masked array as an ndarray. - - Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, - else return `a` as a ndarray or subclass (depending on `subok`) if not. - - Parameters - ---------- - a : array_like - Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. - subok : bool - Whether to force the output to be a `pure` ndarray (False) or to - return a subclass of ndarray if appropriate (True, default). - - See Also - -------- - getmask : Return the mask of a masked array, or nomask. - getmaskarray : Return the mask of a masked array, or full array of False. - - Examples - -------- - - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) - >>> ma.getdata(a) - array([[1, 2], - [3, 4]]) - - Equivalently use the ``MaskedArray`` `data` attribute. - - >>> a.data - array([[1, 2], - [3, 4]]) - - """ - try: - data = a._data - except AttributeError: - data = np.array(a, copy=False, subok=subok) - if not subok: - return data.view(ndarray) - return data -get_data = getdata - - -def fix_invalid(a, mask=nomask, copy=True, fill_value=None): - """ - Return input with invalid data masked and replaced by a fill value. - - Invalid data means values of `nan`, `inf`, etc. - - Parameters - ---------- - a : array_like - Input array, a (subclass of) ndarray. - copy : bool, optional - Whether to use a copy of `a` (True) or to fix `a` in place (False). - Default is True. - fill_value : scalar, optional - Value used for fixing invalid data. Default is None, in which case - the ``a.fill_value`` is used. - - Returns - ------- - b : MaskedArray - The input array with invalid entries fixed. - - Notes - ----- - A copy is performed by default. - - Examples - -------- - >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) - >>> x - masked_array(data = [-- -1.0 nan inf], - mask = [ True False False False], - fill_value = 1e+20) - >>> np.ma.fix_invalid(x) - masked_array(data = [-- -1.0 -- --], - mask = [ True False True True], - fill_value = 1e+20) - - >>> fixed = np.ma.fix_invalid(x) - >>> fixed.data - array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20, - 1.00000000e+20]) - >>> x.data - array([ 1., -1., NaN, Inf]) - - """ - a = masked_array(a, copy=copy, mask=mask, subok=True) - #invalid = (numpy.isnan(a._data) | numpy.isinf(a._data)) - invalid = np.logical_not(np.isfinite(a._data)) - if not invalid.any(): - return a - a._mask |= invalid - if fill_value is None: - fill_value = a.fill_value - a._data[invalid] = fill_value - return a - - - -#####-------------------------------------------------------------------------- -#---- --- Ufuncs --- -#####-------------------------------------------------------------------------- -ufunc_domain = {} -ufunc_fills = {} - -class _DomainCheckInterval: - """ - Define a valid interval, so that : - - ``domain_check_interval(a,b)(x) == True`` where - ``x < a`` or ``x > b``. - - """ - def __init__(self, a, b): - "domain_check_interval(a,b)(x) = true where x < a or y > b" - if (a > b): - (a, b) = (b, a) - self.a = a - self.b = b - - def __call__ (self, x): - "Execute the call behavior." - return umath.logical_or(umath.greater (x, self.b), - umath.less(x, self.a)) - - - -class _DomainTan: - """Define a valid interval for the `tan` function, so that: - - ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` - - """ - def __init__(self, eps): - "domain_tan(eps) = true where abs(cos(x)) < eps)" - self.eps = eps - - def __call__ (self, x): - "Executes the call behavior." - return umath.less(umath.absolute(umath.cos(x)), self.eps) - - - -class _DomainSafeDivide: - """Define a domain for safe division.""" - def __init__ (self, tolerance=None): - self.tolerance = tolerance - - def __call__ (self, a, b): - # Delay the selection of the tolerance to here in order to reduce numpy - # import times. The calculation of these parameters is a substantial - # component of numpy's import time. - if self.tolerance is None: - self.tolerance = np.finfo(float).tiny - return umath.absolute(a) * self.tolerance >= umath.absolute(b) - - - -class _DomainGreater: - """DomainGreater(v)(x) is True where x <= v.""" - def __init__(self, critical_value): - "DomainGreater(v)(x) = true where x <= v" - self.critical_value = critical_value - - def __call__ (self, x): - "Executes the call behavior." - return umath.less_equal(x, self.critical_value) - - - -class _DomainGreaterEqual: - """DomainGreaterEqual(v)(x) is True where x < v.""" - def __init__(self, critical_value): - "DomainGreaterEqual(v)(x) = true where x < v" - self.critical_value = critical_value - - def __call__ (self, x): - "Executes the call behavior." - return umath.less(x, self.critical_value) - -#.............................................................................. -class _MaskedUnaryOperation: - """ - Defines masked version of unary operations, where invalid values are - pre-masked. - - Parameters - ---------- - mufunc : callable - The function for which to define a masked version. Made available - as ``_MaskedUnaryOperation.f``. - fill : scalar, optional - Filling value, default is 0. - domain : class instance - Domain for the function. Should be one of the ``_Domain*`` - classes. Default is None. - - """ - def __init__ (self, mufunc, fill=0, domain=None): - """ _MaskedUnaryOperation(aufunc, fill=0, domain=None) - aufunc(fill) must be defined - self(x) returns aufunc(x) - with masked values where domain(x) is true or getmask(x) is true. - """ - self.f = mufunc - self.fill = fill - self.domain = domain - self.__doc__ = getattr(mufunc, "__doc__", str(mufunc)) - self.__name__ = getattr(mufunc, "__name__", str(mufunc)) - ufunc_domain[mufunc] = domain - ufunc_fills[mufunc] = fill - # - def __call__ (self, a, *args, **kwargs): - "Execute the call behavior." - d = getdata(a) - # Case 1.1. : Domained function - if self.domain is not None: - # Save the error status - err_status_ini = np.geterr() - try: - np.seterr(divide='ignore', invalid='ignore') - result = self.f(d, *args, **kwargs) - finally: - np.seterr(**err_status_ini) - # Make a mask - m = ~umath.isfinite(result) - m |= self.domain(d) - m |= getmask(a) - # Case 1.2. : Function without a domain - else: - # Get the result and the mask - result = self.f(d, *args, **kwargs) - m = getmask(a) - # Case 2.1. : The result is scalarscalar - if not result.ndim: - if m: - return masked - return result - # Case 2.2. The result is an array - # We need to fill the invalid data back w/ the input - # Now, that's plain silly: in C, we would just skip the element and keep - # the original, but we do have to do it that way in Python - if m is not nomask: - # In case result has a lower dtype than the inputs (as in equal) - try: - np.putmask(result, m, d) - except TypeError: - pass - # Transform to - if isinstance(a, MaskedArray): - subtype = type(a) - else: - subtype = MaskedArray - result = result.view(subtype) - result._mask = m - result._update_from(a) - return result - # - def __str__ (self): - return "Masked version of %s. [Invalid values are masked]" % str(self.f) - - - -class _MaskedBinaryOperation: - """ - Define masked version of binary operations, where invalid - values are pre-masked. - - Parameters - ---------- - mbfunc : function - The function for which to define a masked version. Made available - as ``_MaskedBinaryOperation.f``. - domain : class instance - Default domain for the function. Should be one of the ``_Domain*`` - classes. Default is None. - fillx : scalar, optional - Filling value for the first argument, default is 0. - filly : scalar, optional - Filling value for the second argument, default is 0. - - """ - def __init__ (self, mbfunc, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = mbfunc - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc)) - self.__name__ = getattr(mbfunc, "__name__", str(mbfunc)) - ufunc_domain[mbfunc] = None - ufunc_fills[mbfunc] = (fillx, filly) - - def __call__ (self, a, b, *args, **kwargs): - "Execute the call behavior." - # Get the data, as ndarray - (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) - # Get the mask - (ma, mb) = (getmask(a), getmask(b)) - if ma is nomask: - if mb is nomask: - m = nomask - else: - m = umath.logical_or(getmaskarray(a), mb) - elif mb is nomask: - m = umath.logical_or(ma, getmaskarray(b)) - else: - m = umath.logical_or(ma, mb) - # Get the result - err_status_ini = np.geterr() - try: - np.seterr(divide='ignore', invalid='ignore') - result = self.f(da, db, *args, **kwargs) - finally: - np.seterr(**err_status_ini) - # Case 1. : scalar - if not result.ndim: - if m: - return masked - return result - # Case 2. : array - # Revert result to da where masked - if m.any(): - np.putmask(result, m, 0) - # This only makes sense if the operation preserved the dtype - if result.dtype == da.dtype: - result += m * da - # Transforms to a (subclass of) MaskedArray - result = result.view(get_masked_subclass(a, b)) - result._mask = m - # Update the optional info from the inputs - if isinstance(b, MaskedArray): - if isinstance(a, MaskedArray): - result._update_from(a) - else: - result._update_from(b) - elif isinstance(a, MaskedArray): - result._update_from(a) - return result - - - def reduce(self, target, axis=0, dtype=None): - """Reduce `target` along the given `axis`.""" - if isinstance(target, MaskedArray): - tclass = type(target) - else: - tclass = MaskedArray - m = getmask(target) - t = filled(target, self.filly) - if t.shape == (): - t = t.reshape(1) - if m is not nomask: - m = make_mask(m, copy=1) - m.shape = (1,) - if m is nomask: - return self.f.reduce(t, axis).view(tclass) - t = t.view(tclass) - t._mask = m - tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype) - mr = umath.logical_and.reduce(m, axis) - tr = tr.view(tclass) - if mr.ndim > 0: - tr._mask = mr - return tr - elif mr: - return masked - return tr - - def outer (self, a, b): - """Return the function applied to the outer product of a and b. - - """ - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = umath.logical_or.outer(ma, mb) - if (not m.ndim) and m: - return masked - (da, db) = (getdata(a), getdata(b)) - d = self.f.outer(da, db) - if m is not nomask: - np.putmask(d, m, da) - if d.shape: - d = d.view(get_masked_subclass(a, b)) - d._mask = m - return d - - def accumulate (self, target, axis=0): - """Accumulate `target` along `axis` after filling with y fill - value. - - """ - if isinstance(target, MaskedArray): - tclass = type(target) - else: - tclass = MaskedArray - t = filled(target, self.filly) - return self.f.accumulate(t, axis).view(tclass) - - def __str__ (self): - return "Masked version of " + str(self.f) - - - -class _DomainedBinaryOperation: - """ - Define binary operations that have a domain, like divide. - - They have no reduce, outer or accumulate. - - Parameters - ---------- - mbfunc : function - The function for which to define a masked version. Made available - as ``_DomainedBinaryOperation.f``. - domain : class instance - Default domain for the function. Should be one of the ``_Domain*`` - classes. - fillx : scalar, optional - Filling value for the first argument, default is 0. - filly : scalar, optional - Filling value for the second argument, default is 0. - - """ - def __init__ (self, dbfunc, domain, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = dbfunc - self.domain = domain - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc)) - self.__name__ = getattr(dbfunc, "__name__", str(dbfunc)) - ufunc_domain[dbfunc] = domain - ufunc_fills[dbfunc] = (fillx, filly) - - def __call__(self, a, b, *args, **kwargs): - "Execute the call behavior." - # Get the data and the mask - (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) - (ma, mb) = (getmask(a), getmask(b)) - # Get the result - err_status_ini = np.geterr() - try: - np.seterr(divide='ignore', invalid='ignore') - result = self.f(da, db, *args, **kwargs) - finally: - np.seterr(**err_status_ini) - # Get the mask as a combination of ma, mb and invalid - m = ~umath.isfinite(result) - m |= ma - m |= mb - # Apply the domain - domain = ufunc_domain.get(self.f, None) - if domain is not None: - m |= filled(domain(da, db), True) - # Take care of the scalar case first - if (not m.ndim): - if m: - return masked - else: - return result - # When the mask is True, put back da - np.putmask(result, m, 0) - result += m * da - result = result.view(get_masked_subclass(a, b)) - result._mask = m - if isinstance(b, MaskedArray): - if isinstance(a, MaskedArray): - result._update_from(a) - else: - result._update_from(b) - elif isinstance(a, MaskedArray): - result._update_from(a) - return result - - def __str__ (self): - return "Masked version of " + str(self.f) - -#.............................................................................. -# Unary ufuncs -exp = _MaskedUnaryOperation(umath.exp) -conjugate = _MaskedUnaryOperation(umath.conjugate) -sin = _MaskedUnaryOperation(umath.sin) -cos = _MaskedUnaryOperation(umath.cos) -tan = _MaskedUnaryOperation(umath.tan) -arctan = _MaskedUnaryOperation(umath.arctan) -arcsinh = _MaskedUnaryOperation(umath.arcsinh) -sinh = _MaskedUnaryOperation(umath.sinh) -cosh = _MaskedUnaryOperation(umath.cosh) -tanh = _MaskedUnaryOperation(umath.tanh) -abs = absolute = _MaskedUnaryOperation(umath.absolute) -fabs = _MaskedUnaryOperation(umath.fabs) -negative = _MaskedUnaryOperation(umath.negative) -floor = _MaskedUnaryOperation(umath.floor) -ceil = _MaskedUnaryOperation(umath.ceil) -around = _MaskedUnaryOperation(np.round_) -logical_not = _MaskedUnaryOperation(umath.logical_not) -# Domained unary ufuncs ....................................................... -sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, - _DomainGreaterEqual(0.0)) -log = _MaskedUnaryOperation(umath.log, 1.0, - _DomainGreater(0.0)) -log2 = _MaskedUnaryOperation(umath.log2, 1.0, - _DomainGreater(0.0)) -log10 = _MaskedUnaryOperation(umath.log10, 1.0, - _DomainGreater(0.0)) -tan = _MaskedUnaryOperation(umath.tan, 0.0, - _DomainTan(1e-35)) -arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, - _DomainCheckInterval(-1.0, 1.0)) -arccos = _MaskedUnaryOperation(umath.arccos, 0.0, - _DomainCheckInterval(-1.0, 1.0)) -arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, - _DomainGreaterEqual(1.0)) -arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, - _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) -# Binary ufuncs ............................................................... -add = _MaskedBinaryOperation(umath.add) -subtract = _MaskedBinaryOperation(umath.subtract) -multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) -arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) -equal = _MaskedBinaryOperation(umath.equal) -equal.reduce = None -not_equal = _MaskedBinaryOperation(umath.not_equal) -not_equal.reduce = None -less_equal = _MaskedBinaryOperation(umath.less_equal) -less_equal.reduce = None -greater_equal = _MaskedBinaryOperation(umath.greater_equal) -greater_equal.reduce = None -less = _MaskedBinaryOperation(umath.less) -less.reduce = None -greater = _MaskedBinaryOperation(umath.greater) -greater.reduce = None -logical_and = _MaskedBinaryOperation(umath.logical_and) -alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce -logical_or = _MaskedBinaryOperation(umath.logical_or) -sometrue = logical_or.reduce -logical_xor = _MaskedBinaryOperation(umath.logical_xor) -bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) -bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) -bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) -hypot = _MaskedBinaryOperation(umath.hypot) -# Domained binary ufuncs ...................................................... -divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) -true_divide = _DomainedBinaryOperation(umath.true_divide, - _DomainSafeDivide(), 0, 1) -floor_divide = _DomainedBinaryOperation(umath.floor_divide, - _DomainSafeDivide(), 0, 1) -remainder = _DomainedBinaryOperation(umath.remainder, - _DomainSafeDivide(), 0, 1) -fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) -mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) - - -#####-------------------------------------------------------------------------- -#---- --- Mask creation functions --- -#####-------------------------------------------------------------------------- - -def _recursive_make_descr(datatype, newtype=bool_): - "Private function allowing recursion in make_descr." - # Do we have some name fields ? - if datatype.names: - descr = [] - for name in datatype.names: - field = datatype.fields[name] - if len(field) == 3: - # Prepend the title to the name - name = (field[-1], name) - descr.append((name, _recursive_make_descr(field[0], newtype))) - return descr - # Is this some kind of composite a la (np.float,2) - elif datatype.subdtype: - mdescr = list(datatype.subdtype) - mdescr[0] = newtype - return tuple(mdescr) - else: - return newtype - -def make_mask_descr(ndtype): - """ - Construct a dtype description list from a given dtype. - - Returns a new dtype object, with the type of all fields in `ndtype` to a - boolean type. Field names are not altered. - - Parameters - ---------- - ndtype : dtype - The dtype to convert. - - Returns - ------- - result : dtype - A dtype that looks like `ndtype`, the type of all fields is boolean. - - Examples - -------- - >>> import numpy.ma as ma - >>> dtype = np.dtype({'names':['foo', 'bar'], - 'formats':[np.float32, np.int]}) - >>> dtype - dtype([('foo', '>> ma.make_mask_descr(dtype) - dtype([('foo', '|b1'), ('bar', '|b1')]) - >>> ma.make_mask_descr(np.float32) - - - """ - # Make sure we do have a dtype - if not isinstance(ndtype, np.dtype): - ndtype = np.dtype(ndtype) - return np.dtype(_recursive_make_descr(ndtype, np.bool)) - -def getmask(a): - """ - Return the mask of a masked array, or nomask. - - Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the - mask is not `nomask`, else return `nomask`. To guarantee a full array - of booleans of the same shape as a, use `getmaskarray`. - - Parameters - ---------- - a : array_like - Input `MaskedArray` for which the mask is required. - - See Also - -------- - getdata : Return the data of a masked array as an ndarray. - getmaskarray : Return the mask of a masked array, or full array of False. - - Examples - -------- - - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) - >>> ma.getmask(a) - array([[False, True], - [False, False]], dtype=bool) - - Equivalently use the `MaskedArray` `mask` attribute. - - >>> a.mask - array([[False, True], - [False, False]], dtype=bool) - - Result when mask == `nomask` - - >>> b = ma.masked_array([[1,2],[3,4]]) - >>> b - masked_array(data = - [[1 2] - [3 4]], - mask = - False, - fill_value=999999) - >>> ma.nomask - False - >>> ma.getmask(b) == ma.nomask - True - >>> b.mask == ma.nomask - True - - """ - return getattr(a, '_mask', nomask) -get_mask = getmask - -def getmaskarray(arr): - """ - Return the mask of a masked array, or full boolean array of False. - - Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and - the mask is not `nomask`, else return a full boolean array of False of - the same shape as `arr`. - - Parameters - ---------- - arr : array_like - Input `MaskedArray` for which the mask is required. - - See Also - -------- - getmask : Return the mask of a masked array, or nomask. - getdata : Return the data of a masked array as an ndarray. - - Examples - -------- - - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) - >>> ma.getmaskarray(a) - array([[False, True], - [False, False]], dtype=bool) - - Result when mask == ``nomask`` - - >>> b = ma.masked_array([[1,2],[3,4]]) - >>> b - masked_array(data = - [[1 2] - [3 4]], - mask = - False, - fill_value=999999) - >>> >ma.getmaskarray(b) - array([[False, False], - [False, False]], dtype=bool) - - """ - mask = getmask(arr) - if mask is nomask: - mask = make_mask_none(np.shape(arr), getdata(arr).dtype) - return mask - -def is_mask(m): - """ - Return True if m is a valid, standard mask. - - This function does not check the contents of the input, only that the - type is MaskType. In particular, this function returns False if the - mask has a flexible dtype. - - Parameters - ---------- - m : array_like - Array to test. - - Returns - ------- - result : bool - True if `m.dtype.type` is MaskType, False otherwise. - - See Also - -------- - isMaskedArray : Test whether input is an instance of MaskedArray. - - Examples - -------- - >>> import numpy.ma as ma - >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) - >>> m - masked_array(data = [-- 1 -- 2 3], - mask = [ True False True False False], - fill_value=999999) - >>> ma.is_mask(m) - False - >>> ma.is_mask(m.mask) - True - - Input must be an ndarray (or have similar attributes) - for it to be considered a valid mask. - - >>> m = [False, True, False] - >>> ma.is_mask(m) - False - >>> m = np.array([False, True, False]) - >>> m - array([False, True, False], dtype=bool) - >>> ma.is_mask(m) - True - - Arrays with complex dtypes don't return True. - - >>> dtype = np.dtype({'names':['monty', 'pithon'], - 'formats':[np.bool, np.bool]}) - >>> dtype - dtype([('monty', '|b1'), ('pithon', '|b1')]) - >>> m = np.array([(True, False), (False, True), (True, False)], - dtype=dtype) - >>> m - array([(True, False), (False, True), (True, False)], - dtype=[('monty', '|b1'), ('pithon', '|b1')]) - >>> ma.is_mask(m) - False - - """ - try: - return m.dtype.type is MaskType - except AttributeError: - return False - -def make_mask(m, copy=False, shrink=True, dtype=MaskType): - """ - Create a boolean mask from an array. - - Return `m` as a boolean mask, creating a copy if necessary or requested. - The function can accept any sequence that is convertible to integers, - or ``nomask``. Does not require that contents must be 0s and 1s, values - of 0 are interepreted as False, everything else as True. - - Parameters - ---------- - m : array_like - Potential mask. - copy : bool, optional - Whether to return a copy of `m` (True) or `m` itself (False). - shrink : bool, optional - Whether to shrink `m` to ``nomask`` if all its values are False. - dtype : dtype, optional - Data-type of the output mask. By default, the output mask has - a dtype of MaskType (bool). If the dtype is flexible, each field - has a boolean dtype. - - Returns - ------- - result : ndarray - A boolean mask derived from `m`. - - Examples - -------- - >>> import numpy.ma as ma - >>> m = [True, False, True, True] - >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) - >>> m = [1, 0, 1, 1] - >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) - >>> m = [1, 0, 2, -3] - >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) - - Effect of the `shrink` parameter. - - >>> m = np.zeros(4) - >>> m - array([ 0., 0., 0., 0.]) - >>> ma.make_mask(m) - False - >>> ma.make_mask(m, shrink=False) - array([False, False, False, False], dtype=bool) - - Using a flexible `dtype`. - - >>> m = [1, 0, 1, 1] - >>> n = [0, 1, 0, 0] - >>> arr = [] - >>> for man, mouse in zip(m, n): - ... arr.append((man, mouse)) - >>> arr - [(1, 0), (0, 1), (1, 0), (1, 0)] - >>> dtype = np.dtype({'names':['man', 'mouse'], - 'formats':[np.int, np.int]}) - >>> arr = np.array(arr, dtype=dtype) - >>> arr - array([(1, 0), (0, 1), (1, 0), (1, 0)], - dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) - array([(True, False), (False, True), (True, False), (True, False)], - dtype=[('man', '|b1'), ('mouse', '|b1')]) - - """ - if m is nomask: - return nomask - elif isinstance(m, ndarray): - # We won't return after this point to make sure we can shrink the mask - # Fill the mask in case there are missing data - m = filled(m, True) - # Make sure the input dtype is valid - dtype = make_mask_descr(dtype) - if m.dtype == dtype: - if copy: - result = m.copy() - else: - result = m - else: - result = np.array(m, dtype=dtype, copy=copy) - else: - result = np.array(filled(m, True), dtype=MaskType) - # Bas les masques ! - if shrink and (not result.dtype.names) and (not result.any()): - return nomask - else: - return result - - -def make_mask_none(newshape, dtype=None): - """ - Return a boolean mask of the given shape, filled with False. - - This function returns a boolean ndarray with all entries False, that can - be used in common mask manipulations. If a complex dtype is specified, the - type of each field is converted to a boolean type. - - Parameters - ---------- - newshape : tuple - A tuple indicating the shape of the mask. - dtype: {None, dtype}, optional - If None, use a MaskType instance. Otherwise, use a new datatype with - the same fields as `dtype`, converted to boolean types. - - Returns - ------- - result : ndarray - An ndarray of appropriate shape and dtype, filled with False. - - See Also - -------- - make_mask : Create a boolean mask from an array. - make_mask_descr : Construct a dtype description list from a given dtype. - - Examples - -------- - >>> import numpy.ma as ma - >>> ma.make_mask_none((3,)) - array([False, False, False], dtype=bool) - - Defining a more complex dtype. - - >>> dtype = np.dtype({'names':['foo', 'bar'], - 'formats':[np.float32, np.int]}) - >>> dtype - dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) - array([(False, False), (False, False), (False, False)], - dtype=[('foo', '|b1'), ('bar', '|b1')]) - - """ - if dtype is None: - result = np.zeros(newshape, dtype=MaskType) - else: - result = np.zeros(newshape, dtype=make_mask_descr(dtype)) - return result - -def mask_or (m1, m2, copy=False, shrink=True): - """ - Combine two masks with the ``logical_or`` operator. - - The result may be a view on `m1` or `m2` if the other is `nomask` - (i.e. False). - - Parameters - ---------- - m1, m2 : array_like - Input masks. - copy : bool, optional - If copy is False and one of the inputs is `nomask`, return a view - of the other input mask. Defaults to False. - shrink : bool, optional - Whether to shrink the output to `nomask` if all its values are - False. Defaults to True. - - Returns - ------- - mask : output mask - The result masks values that are masked in either `m1` or `m2`. - - Raises - ------ - ValueError - If `m1` and `m2` have different flexible dtypes. - - Examples - -------- - >>> m1 = np.ma.make_mask([0, 1, 1, 0]) - >>> m2 = np.ma.make_mask([1, 0, 0, 0]) - >>> np.ma.mask_or(m1, m2) - array([ True, True, True, False], dtype=bool) - - """ - def _recursive_mask_or(m1, m2, newmask): - names = m1.dtype.names - for name in names: - current1 = m1[name] - if current1.dtype.names: - _recursive_mask_or(current1, m2[name], newmask[name]) - else: - umath.logical_or(current1, m2[name], newmask[name]) - return - # - if (m1 is nomask) or (m1 is False): - dtype = getattr(m2, 'dtype', MaskType) - return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) - if (m2 is nomask) or (m2 is False): - dtype = getattr(m1, 'dtype', MaskType) - return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) - if m1 is m2 and is_mask(m1): - return m1 - (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) - if (dtype1 != dtype2): - raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) - if dtype1.names: - newmask = np.empty_like(m1) - _recursive_mask_or(m1, m2, newmask) - return newmask - return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) - - -def flatten_mask(mask): - """ - Returns a completely flattened version of the mask, where nested fields - are collapsed. - - Parameters - ---------- - mask : array_like - Input array, which will be interpreted as booleans. - - Returns - ------- - flattened_mask : ndarray of bools - The flattened input. - - Examples - -------- - >>> mask = np.array([0, 0, 1], dtype=np.bool) - >>> flatten_mask(mask) - array([False, False, True], dtype=bool) - - >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) - >>> flatten_mask(mask) - array([False, False, False, True], dtype=bool) - - >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] - >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) - >>> flatten_mask(mask) - array([False, False, False, False, False, True], dtype=bool) - - """ - # - def _flatmask(mask): - "Flatten the mask and returns a (maybe nested) sequence of booleans." - mnames = mask.dtype.names - if mnames: - return [flatten_mask(mask[name]) for name in mnames] - else: - return mask - # - def _flatsequence(sequence): - "Generates a flattened version of the sequence." - try: - for element in sequence: - if hasattr(element, '__iter__'): - for f in _flatsequence(element): - yield f - else: - yield element - except TypeError: - yield sequence - # - mask = np.asarray(mask) - flattened = _flatsequence(_flatmask(mask)) - return np.array([_ for _ in flattened], dtype=bool) - - -def _check_mask_axis(mask, axis): - "Check whether there are masked values along the given axis" - if mask is not nomask: - return mask.all(axis=axis) - return nomask - - -#####-------------------------------------------------------------------------- -#--- --- Masking functions --- -#####-------------------------------------------------------------------------- - -def masked_where(condition, a, copy=True): - """ - Mask an array where a condition is met. - - Return `a` as an array masked where `condition` is True. - Any masked values of `a` or `condition` are also masked in the output. - - Parameters - ---------- - condition : array_like - Masking condition. When `condition` tests floating point values for - equality, consider using ``masked_values`` instead. - a : array_like - Array to mask. - copy : bool - If True (default) make a copy of `a` in the result. If False modify - `a` in place and return a view. - - Returns - ------- - result : MaskedArray - The result of masking `a` where `condition` is True. - - See Also - -------- - masked_values : Mask using floating point equality. - masked_equal : Mask where equal to a given value. - masked_not_equal : Mask where `not` equal to a given value. - masked_less_equal : Mask where less than or equal to a given value. - masked_greater_equal : Mask where greater than or equal to a given value. - masked_less : Mask where less than a given value. - masked_greater : Mask where greater than a given value. - masked_inside : Mask inside a given interval. - masked_outside : Mask outside a given interval. - masked_invalid : Mask invalid values (NaNs or infs). - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_where(a <= 2, a) - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) - - Mask array `b` conditional on `a`. - - >>> b = ['a', 'b', 'c', 'd'] - >>> ma.masked_where(a == 2, b) - masked_array(data = [a b -- d], - mask = [False False True False], - fill_value=N/A) - - Effect of the `copy` argument. - - >>> c = ma.masked_where(a <= 2, a) - >>> c - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) - >>> c[0] = 99 - >>> c - masked_array(data = [99 -- -- 3], - mask = [False True True False], - fill_value=999999) - >>> a - array([0, 1, 2, 3]) - >>> c = ma.masked_where(a <= 2, a, copy=False) - >>> c[0] = 99 - >>> c - masked_array(data = [99 -- -- 3], - mask = [False True True False], - fill_value=999999) - >>> a - array([99, 1, 2, 3]) - - When `condition` or `a` contain masked values. - - >>> a = np.arange(4) - >>> a = ma.masked_where(a == 2, a) - >>> a - masked_array(data = [0 1 -- 3], - mask = [False False True False], - fill_value=999999) - >>> b = np.arange(4) - >>> b = ma.masked_where(b == 0, b) - >>> b - masked_array(data = [-- 1 2 3], - mask = [ True False False False], - fill_value=999999) - >>> ma.masked_where(a == 3, b) - masked_array(data = [-- 1 -- --], - mask = [ True False True True], - fill_value=999999) - - """ - # Make sure that condition is a valid standard-type mask. - cond = make_mask(condition) - a = np.array(a, copy=copy, subok=True) - - (cshape, ashape) = (cond.shape, a.shape) - if cshape and cshape != ashape: - raise IndexError("Inconsistant shape between the condition and the input"\ - " (got %s and %s)" % (cshape, ashape)) - if hasattr(a, '_mask'): - cond = mask_or(cond, a._mask) - cls = type(a) - else: - cls = MaskedArray - result = a.view(cls) - result._mask = cond - return result - - -def masked_greater(x, value, copy=True): - """ - Mask an array where greater than a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x > value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_greater(a, 2) - masked_array(data = [0 1 2 --], - mask = [False False False True], - fill_value=999999) - - """ - return masked_where(greater(x, value), x, copy=copy) - - -def masked_greater_equal(x, value, copy=True): - """ - Mask an array where greater than or equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x >= value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_greater_equal(a, 2) - masked_array(data = [0 1 -- --], - mask = [False False True True], - fill_value=999999) - - """ - return masked_where(greater_equal(x, value), x, copy=copy) - - -def masked_less(x, value, copy=True): - """ - Mask an array where less than a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x < value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_less(a, 2) - masked_array(data = [-- -- 2 3], - mask = [ True True False False], - fill_value=999999) - - """ - return masked_where(less(x, value), x, copy=copy) - - -def masked_less_equal(x, value, copy=True): - """ - Mask an array where less than or equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x <= value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_less_equal(a, 2) - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) - - """ - return masked_where(less_equal(x, value), x, copy=copy) - - -def masked_not_equal(x, value, copy=True): - """ - Mask an array where `not` equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x != value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_not_equal(a, 2) - masked_array(data = [-- -- 2 --], - mask = [ True True False True], - fill_value=999999) - - """ - return masked_where(not_equal(x, value), x, copy=copy) - - -def masked_equal(x, value, copy=True): - """ - Mask an array where equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x == value). For floating point arrays, - consider using ``masked_values(x, value)``. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_values : Mask using floating point equality. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_equal(a, 2) - masked_array(data = [0 1 -- 3], - mask = [False False True False], - fill_value=999999) - - """ - # An alternative implementation relies on filling first: probably not needed. - # d = filled(x, 0) - # c = umath.equal(d, value) - # m = mask_or(c, getmask(x)) - # return array(d, mask=m, copy=copy) - output = masked_where(equal(x, value), x, copy=copy) - output.fill_value = value - return output - - -def masked_inside(x, v1, v2, copy=True): - """ - Mask an array inside a given interval. - - Shortcut to ``masked_where``, where `condition` is True for `x` inside - the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` - can be given in either order. - - See Also - -------- - masked_where : Mask where a condition is met. - - Notes - ----- - The array `x` is prefilled with its filling value. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] - >>> ma.masked_inside(x, -0.3, 0.3) - masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], - mask = [False False True True False False], - fill_value=1e+20) - - The order of `v1` and `v2` doesn't matter. - - >>> ma.masked_inside(x, 0.3, -0.3) - masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], - mask = [False False True True False False], - fill_value=1e+20) - - """ - if v2 < v1: - (v1, v2) = (v2, v1) - xf = filled(x) - condition = (xf >= v1) & (xf <= v2) - return masked_where(condition, x, copy=copy) - - -def masked_outside(x, v1, v2, copy=True): - """ - Mask an array outside a given interval. - - Shortcut to ``masked_where``, where `condition` is True for `x` outside - the interval [v1,v2] (x < v1)|(x > v2). - The boundaries `v1` and `v2` can be given in either order. - - See Also - -------- - masked_where : Mask where a condition is met. - - Notes - ----- - The array `x` is prefilled with its filling value. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] - >>> ma.masked_outside(x, -0.3, 0.3) - masked_array(data = [-- -- 0.01 0.2 -- --], - mask = [ True True False False True True], - fill_value=1e+20) - - The order of `v1` and `v2` doesn't matter. - - >>> ma.masked_outside(x, 0.3, -0.3) - masked_array(data = [-- -- 0.01 0.2 -- --], - mask = [ True True False False True True], - fill_value=1e+20) - - """ - if v2 < v1: - (v1, v2) = (v2, v1) - xf = filled(x) - condition = (xf < v1) | (xf > v2) - return masked_where(condition, x, copy=copy) - - -def masked_object(x, value, copy=True, shrink=True): - """ - Mask the array `x` where the data are exactly equal to value. - - This function is similar to `masked_values`, but only suitable - for object arrays: for floating point, use `masked_values` instead. - - Parameters - ---------- - x : array_like - Array to mask - value : object - Comparison value - copy : {True, False}, optional - Whether to return a copy of `x`. - shrink : {True, False}, optional - Whether to collapse a mask full of False to nomask - - Returns - ------- - result : MaskedArray - The result of masking `x` where equal to `value`. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_equal : Mask where equal to a given value (integers). - masked_values : Mask using floating point equality. - - Examples - -------- - >>> import numpy.ma as ma - >>> food = np.array(['green_eggs', 'ham'], dtype=object) - >>> # don't eat spoiled food - >>> eat = ma.masked_object(food, 'green_eggs') - >>> print eat - [-- ham] - >>> # plain ol` ham is boring - >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) - >>> eat = ma.masked_object(fresh_food, 'green_eggs') - >>> print eat - [cheese ham pineapple] - - Note that `mask` is set to ``nomask`` if possible. - - >>> eat - masked_array(data = [cheese ham pineapple], - mask = False, - fill_value=?) - - """ - if isMaskedArray(x): - condition = umath.equal(x._data, value) - mask = x._mask - else: - condition = umath.equal(np.asarray(x), value) - mask = nomask - mask = mask_or(mask, make_mask(condition, shrink=shrink)) - return masked_array(x, mask=mask, copy=copy, fill_value=value) - - -def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): - """ - Mask using floating point equality. - - Return a MaskedArray, masked where the data in array `x` are approximately - equal to `value`, i.e. where the following condition is True - - (abs(x - value) <= atol+rtol*abs(value)) - - The fill_value is set to `value` and the mask is set to ``nomask`` if - possible. For integers, consider using ``masked_equal``. - - Parameters - ---------- - x : array_like - Array to mask. - value : float - Masking value. - rtol : float, optional - Tolerance parameter. - atol : float, optional - Tolerance parameter (1e-8). - copy : bool, optional - Whether to return a copy of `x`. - shrink : bool, optional - Whether to collapse a mask full of False to ``nomask``. - - Returns - ------- - result : MaskedArray - The result of masking `x` where approximately equal to `value`. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_equal : Mask where equal to a given value (integers). - - Examples - -------- - >>> import numpy.ma as ma - >>> x = np.array([1, 1.1, 2, 1.1, 3]) - >>> ma.masked_values(x, 1.1) - masked_array(data = [1.0 -- 2.0 -- 3.0], - mask = [False True False True False], - fill_value=1.1) - - Note that `mask` is set to ``nomask`` if possible. - - >>> ma.masked_values(x, 1.5) - masked_array(data = [ 1. 1.1 2. 1.1 3. ], - mask = False, - fill_value=1.5) - - For integers, the fill value will be different in general to the - result of ``masked_equal``. - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - >>> ma.masked_values(x, 2) - masked_array(data = [0 1 -- 3 4], - mask = [False False True False False], - fill_value=2) - >>> ma.masked_equal(x, 2) - masked_array(data = [0 1 -- 3 4], - mask = [False False True False False], - fill_value=999999) - - """ - mabs = umath.absolute - xnew = filled(x, value) - if issubclass(xnew.dtype.type, np.floating): - condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value)) - mask = getattr(x, '_mask', nomask) - else: - condition = umath.equal(xnew, value) - mask = nomask - mask = mask_or(mask, make_mask(condition, shrink=shrink)) - return masked_array(xnew, mask=mask, copy=copy, fill_value=value) - - -def masked_invalid(a, copy=True): - """ - Mask an array where invalid values occur (NaNs or infs). - - This function is a shortcut to ``masked_where``, with - `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. - Only applies to arrays with a dtype where NaNs or infs make sense - (i.e. floating point types), but accepts any array_like object. - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=np.float) - >>> a[2] = np.NaN - >>> a[3] = np.PINF - >>> a - array([ 0., 1., NaN, Inf, 4.]) - >>> ma.masked_invalid(a) - masked_array(data = [0.0 1.0 -- -- 4.0], - mask = [False False True True False], - fill_value=1e+20) - - """ - a = np.array(a, copy=copy, subok=True) - mask = getattr(a, '_mask', None) - if mask is not None: - condition = ~(np.isfinite(getdata(a))) - if mask is not nomask: - condition |= mask - cls = type(a) - else: - condition = ~(np.isfinite(a)) - cls = MaskedArray - result = a.view(cls) - result._mask = condition - return result - - -#####-------------------------------------------------------------------------- -#---- --- Printing options --- -#####-------------------------------------------------------------------------- - -class _MaskedPrintOption: - """ - Handle the string used to represent missing data in a masked array. - - """ - def __init__ (self, display): - "Create the masked_print_option object." - self._display = display - self._enabled = True - - def display(self): - "Display the string to print for masked values." - return self._display - - def set_display (self, s): - "Set the string to print for masked values." - self._display = s - - def enabled(self): - "Is the use of the display value enabled?" - return self._enabled - - def enable(self, shrink=1): - "Set the enabling shrink to `shrink`." - self._enabled = shrink - - def __str__ (self): - return str(self._display) - - __repr__ = __str__ - -#if you single index into a masked location you get this object. -masked_print_option = _MaskedPrintOption('--') - - -def _recursive_printoption(result, mask, printopt): - """ - Puts printoptions in result where mask is True. - Private function allowing for recursion - """ - names = result.dtype.names - for name in names: - (curdata, curmask) = (result[name], mask[name]) - if curdata.dtype.names: - _recursive_printoption(curdata, curmask, printopt) - else: - np.putmask(curdata, curmask, printopt) - return - -_print_templates = dict(long_std="""\ -masked_%(name)s(data = - %(data)s, - %(nlen)s mask = - %(mask)s, - %(nlen)s fill_value = %(fill)s) -""", - short_std="""\ -masked_%(name)s(data = %(data)s, - %(nlen)s mask = %(mask)s, -%(nlen)s fill_value = %(fill)s) -""", - long_flx="""\ -masked_%(name)s(data = - %(data)s, - %(nlen)s mask = - %(mask)s, -%(nlen)s fill_value = %(fill)s, - %(nlen)s dtype = %(dtype)s) -""", - short_flx="""\ -masked_%(name)s(data = %(data)s, -%(nlen)s mask = %(mask)s, -%(nlen)s fill_value = %(fill)s, -%(nlen)s dtype = %(dtype)s) -""") - -#####-------------------------------------------------------------------------- -#---- --- MaskedArray class --- -#####-------------------------------------------------------------------------- - -def _recursive_filled(a, mask, fill_value): - """ - Recursively fill `a` with `fill_value`. - Private function - """ - names = a.dtype.names - for name in names: - current = a[name] - if current.dtype.names: - _recursive_filled(current, mask[name], fill_value[name]) - else: - np.putmask(current, mask[name], fill_value[name]) - - - -def flatten_structured_array(a): - """ - Flatten a structured array. - - The data type of the output is chosen such that it can represent all of the - (nested) fields. - - Parameters - ---------- - a : structured array - - Returns - ------- - output : masked array or ndarray - A flattened masked array if the input is a masked array, otherwise a - standard ndarray. - - Examples - -------- - >>> ndtype = [('a', int), ('b', float)] - >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) - >>> flatten_structured_array(a) - array([[1., 1.], - [2., 2.]]) - - """ - # - def flatten_sequence(iterable): - """Flattens a compound of nested iterables.""" - for elm in iter(iterable): - if hasattr(elm, '__iter__'): - for f in flatten_sequence(elm): - yield f - else: - yield elm - # - a = np.asanyarray(a) - inishape = a.shape - a = a.ravel() - if isinstance(a, MaskedArray): - out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) - out = out.view(MaskedArray) - out._mask = np.array([tuple(flatten_sequence(d.item())) - for d in getmaskarray(a)]) - else: - out = np.array([tuple(flatten_sequence(d.item())) for d in a]) - if len(inishape) > 1: - newshape = list(out.shape) - newshape[0] = inishape - out.shape = tuple(flatten_sequence(newshape)) - return out - - - -class _arraymethod(object): - """ - Define a wrapper for basic array methods. - - Upon call, returns a masked array, where the new ``_data`` array is - the output of the corresponding method called on the original - ``_data``. - - If `onmask` is True, the new mask is the output of the method called - on the initial mask. Otherwise, the new mask is just a reference - to the initial mask. - - Attributes - ---------- - _onmask : bool - Holds the `onmask` parameter. - obj : object - The object calling `_arraymethod`. - - Parameters - ---------- - funcname : str - Name of the function to apply on data. - onmask : bool - Whether the mask must be processed also (True) or left - alone (False). Default is True. Make available as `_onmask` - attribute. - - """ - def __init__(self, funcname, onmask=True): - self.__name__ = funcname - self._onmask = onmask - self.obj = None - self.__doc__ = self.getdoc() - # - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - methdoc = getattr(ndarray, self.__name__, None) or \ - getattr(np, self.__name__, None) - if methdoc is not None: - return methdoc.__doc__ - # - def __get__(self, obj, objtype=None): - self.obj = obj - return self - # - def __call__(self, *args, **params): - methodname = self.__name__ - instance = self.obj - # Fallback : if the instance has not been initialized, use the first arg - if instance is None: - args = list(args) - instance = args.pop(0) - data = instance._data - mask = instance._mask - cls = type(instance) - result = getattr(data, methodname)(*args, **params).view(cls) - result._update_from(instance) - if result.ndim: - if not self._onmask: - result.__setmask__(mask) - elif mask is not nomask: - result.__setmask__(getattr(mask, methodname)(*args, **params)) - else: - if mask.ndim and (not mask.dtype.names and mask.all()): - return masked - return result - - - -class MaskedIterator(object): - """ - Flat iterator object to iterate over masked arrays. - - A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array - `x`. It allows iterating over the array as if it were a 1-D array, - either in a for-loop or by calling its `next` method. - - Iteration is done in C-contiguous style, with the last index varying the - fastest. The iterator can also be indexed using basic slicing or - advanced indexing. - - See Also - -------- - MaskedArray.flat : Return a flat iterator over an array. - MaskedArray.flatten : Returns a flattened copy of an array. - - Notes - ----- - `MaskedIterator` is not exported by the `ma` module. Instead of - instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. - - Examples - -------- - >>> x = np.ma.array(arange(6).reshape(2, 3)) - >>> fl = x.flat - >>> type(fl) - - >>> for item in fl: - ... print item - ... - 0 - 1 - 2 - 3 - 4 - 5 - - Extracting more than a single element b indexing the `MaskedIterator` - returns a masked array: - - >>> fl[2:4] - masked_array(data = [2 3], - mask = False, - fill_value = 999999) - - """ - def __init__(self, ma): - self.ma = ma - self.dataiter = ma._data.flat - # - if ma._mask is nomask: - self.maskiter = None - else: - self.maskiter = ma._mask.flat - - def __iter__(self): - return self - - def __getitem__(self, indx): - result = self.dataiter.__getitem__(indx).view(type(self.ma)) - if self.maskiter is not None: - _mask = self.maskiter.__getitem__(indx) - _mask.shape = result.shape - result._mask = _mask - return result - - ### This won't work is ravel makes a copy - def __setitem__(self, index, value): - self.dataiter[index] = getdata(value) - if self.maskiter is not None: - self.maskiter[index] = getmaskarray(value) - - def next(self): - """ - Return the next value, or raise StopIteration. - - Examples - -------- - >>> x = np.ma.array([3, 2], mask=[0, 1]) - >>> fl = x.flat - >>> fl.next() - 3 - >>> fl.next() - masked_array(data = --, - mask = True, - fill_value = 1e+20) - >>> fl.next() - Traceback (most recent call last): - File "", line 1, in - File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next - d = self.dataiter.next() - StopIteration - - """ - d = self.dataiter.next() - if self.maskiter is not None and self.maskiter.next(): - d = masked - return d - - - - -class MaskedArray(ndarray): - """ - An array class with possibly masked values. - - Masked values of True exclude the corresponding element from any - computation. - - Construction:: - - x = MaskedArray(data, mask=nomask, dtype=None, - copy=False, subok=True, ndmin=0, fill_value=None, - keep_mask=True, hard_mask=None, shrink=True) - - Parameters - ---------- - data : array_like - Input data. - mask : sequence, optional - Mask. Must be convertible to an array of booleans with the same - shape as `data`. True indicates a masked (i.e. invalid) data. - dtype : dtype, optional - Data type of the output. - If `dtype` is None, the type of the data argument (``data.dtype``) - is used. If `dtype` is not None and different from ``data.dtype``, - a copy is performed. - copy : bool, optional - Whether to copy the input data (True), or to use a reference instead. - Default is False. - subok : bool, optional - Whether to return a subclass of `MaskedArray` if possible (True) or a - plain `MaskedArray`. Default is True. - ndmin : int, optional - Minimum number of dimensions. Default is 0. - fill_value : scalar, optional - Value used to fill in the masked values when necessary. - If None, a default based on the data-type is used. - keep_mask : bool, optional - Whether to combine `mask` with the mask of the input data, if any - (True), or to use only `mask` for the output (False). Default is True. - hard_mask : bool, optional - Whether to use a hard mask or not. With a hard mask, masked values - cannot be unmasked. Default is False. - shrink : bool, optional - Whether to force compression of an empty mask. Default is True. - - """ - - __array_priority__ = 15 - _defaultmask = nomask - _defaulthardmask = False - _baseclass = ndarray - - def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, - subok=True, ndmin=0, fill_value=None, - keep_mask=True, hard_mask=None, shrink=True, - **options): - """ - Create a new masked array from scratch. - - Notes - ----- - A masked array can also be created by taking a .view(MaskedArray). - - """ - # Process data............ - _data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin) - _baseclass = getattr(data, '_baseclass', type(_data)) - # Check that we're not erasing the mask.......... - if isinstance(data, MaskedArray) and (data.shape != _data.shape): - copy = True - # Careful, cls might not always be MaskedArray... - if not isinstance(data, cls) or not subok: - _data = ndarray.view(_data, cls) - else: - _data = ndarray.view(_data, type(data)) - # Backwards compatibility w/ numpy.core.ma ....... - if hasattr(data, '_mask') and not isinstance(data, ndarray): - _data._mask = data._mask - _sharedmask = True - # Process mask ............................... - # Number of named fields (or zero if none) - names_ = _data.dtype.names or () - # Type of the mask - if names_: - mdtype = make_mask_descr(_data.dtype) - else: - mdtype = MaskType - # Case 1. : no mask in input ............ - if mask is nomask: - # Erase the current mask ? - if not keep_mask: - # With a reduced version - if shrink: - _data._mask = nomask - # With full version - else: - _data._mask = np.zeros(_data.shape, dtype=mdtype) - # Check whether we missed something - elif isinstance(data, (tuple, list)): - try: - # If data is a sequence of masked array - mask = np.array([getmaskarray(m) for m in data], - dtype=mdtype) - except ValueError: - # If data is nested - mask = nomask - # Force shrinking of the mask if needed (and possible) - if (mdtype == MaskType) and mask.any(): - _data._mask = mask - _data._sharedmask = False - else: - if copy: - _data._mask = _data._mask.copy() - _data._sharedmask = False - # Reset the shape of the original mask - if getmask(data) is not nomask: - data._mask.shape = data.shape - else: - _data._sharedmask = True - # Case 2. : With a mask in input ........ - else: - # Read the mask with the current mdtype - try: - mask = np.array(mask, copy=copy, dtype=mdtype) - # Or assume it's a sequence of bool/int - except TypeError: - mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - # Make sure the mask and the data have the same shape - if mask.shape != _data.shape: - (nd, nm) = (_data.size, mask.size) - if nm == 1: - mask = np.resize(mask, _data.shape) - elif nm == nd: - mask = np.reshape(mask, _data.shape) - else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MaskError, msg % (nd, nm) - copy = True - # Set the mask to the new value - if _data._mask is nomask: - _data._mask = mask - _data._sharedmask = not copy - else: - if not keep_mask: - _data._mask = mask - _data._sharedmask = not copy - else: - if names_: - def _recursive_or(a, b): - "do a|=b on each field of a, recursively" - for name in a.dtype.names: - (af, bf) = (a[name], b[name]) - if af.dtype.names: - _recursive_or(af, bf) - else: - af |= bf - return - _recursive_or(_data._mask, mask) - else: - _data._mask = np.logical_or(mask, _data._mask) - _data._sharedmask = False - # Update fill_value....... - if fill_value is None: - fill_value = getattr(data, '_fill_value', None) - # But don't run the check unless we have something to check.... - if fill_value is not None: - _data._fill_value = _check_fill_value(fill_value, _data.dtype) - # Process extra options .. - if hard_mask is None: - _data._hardmask = getattr(data, '_hardmask', False) - else: - _data._hardmask = hard_mask - _data._baseclass = _baseclass - return _data - # - def _update_from(self, obj): - """Copies some attributes of obj to self. - """ - if obj is not None and isinstance(obj, ndarray): - _baseclass = type(obj) - else: - _baseclass = ndarray - # We need to copy the _basedict to avoid backward propagation - _optinfo = {} - _optinfo.update(getattr(obj, '_optinfo', {})) - _optinfo.update(getattr(obj, '_basedict', {})) - if not isinstance(obj, MaskedArray): - _optinfo.update(getattr(obj, '__dict__', {})) - _dict = dict(_fill_value=getattr(obj, '_fill_value', None), - _hardmask=getattr(obj, '_hardmask', False), - _sharedmask=getattr(obj, '_sharedmask', False), - _isfield=getattr(obj, '_isfield', False), - _baseclass=getattr(obj, '_baseclass', _baseclass), - _optinfo=_optinfo, - _basedict=_optinfo) - self.__dict__.update(_dict) - self.__dict__.update(_optinfo) - return - - - def __array_finalize__(self, obj): - """Finalizes the masked array. - """ - # Get main attributes ......... - self._update_from(obj) - if isinstance(obj, ndarray): - odtype = obj.dtype - if odtype.names: - _mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype)) - else: - _mask = getattr(obj, '_mask', nomask) - else: - _mask = nomask - self._mask = _mask - # Finalize the mask ........... - if self._mask is not nomask: - try: - self._mask.shape = self.shape - except ValueError: - self._mask = nomask - except (TypeError, AttributeError): - # When _mask.shape is not writable (because it's a void) - pass - # Finalize the fill_value for structured arrays - if self.dtype.names: - if self._fill_value is None: - self._fill_value = _check_fill_value(None, self.dtype) - return - - - def __array_wrap__(self, obj, context=None): - """ - Special hook for ufuncs. - Wraps the numpy array and sets the mask according to context. - """ - result = obj.view(type(self)) - result._update_from(self) - #.......... - if context is not None: - result._mask = result._mask.copy() - (func, args, _) = context - m = reduce(mask_or, [getmaskarray(arg) for arg in args]) - # Get the domain mask................ - domain = ufunc_domain.get(func, None) - if domain is not None: - # Take the domain, and make sure it's a ndarray - if len(args) > 2: - d = filled(reduce(domain, args), True) - else: - d = filled(domain(*args), True) - # Fill the result where the domain is wrong - try: - # Binary domain: take the last value - fill_value = ufunc_fills[func][-1] - except TypeError: - # Unary domain: just use this one - fill_value = ufunc_fills[func] - except KeyError: - # Domain not recognized, use fill_value instead - fill_value = self.fill_value - result = result.copy() - np.putmask(result, d, fill_value) - # Update the mask - if m is nomask: - if d is not nomask: - m = d - else: - # Don't modify inplace, we risk back-propagation - m = (m | d) - # Make sure the mask has the proper size - if result.shape == () and m: - return masked - else: - result._mask = m - result._sharedmask = False - #.... - return result - - - def view(self, dtype=None, type=None): - if dtype is None: - if type is None: - output = ndarray.view(self) - else: - output = ndarray.view(self, type) - elif type is None: - try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) - dtype = None - else: - output = ndarray.view(self, dtype) - except TypeError: - output = ndarray.view(self, dtype) - else: - output = ndarray.view(self, dtype, type) - # Should we update the mask ? - if (getattr(output, '_mask', nomask) is not nomask): - if dtype is None: - dtype = output.dtype - mdtype = make_mask_descr(dtype) - output._mask = self._mask.view(mdtype, ndarray) - # Try to reset the shape of the mask (if we don't have a void) - try: - output._mask.shape = output.shape - except (AttributeError, TypeError): - pass - # Make sure to reset the _fill_value if needed - if getattr(output, '_fill_value', None) is not None: - output._fill_value = None - return output - view.__doc__ = ndarray.view.__doc__ - - - def astype(self, newtype): - """ - Returns a copy of the MaskedArray cast to given newtype. - - Returns - ------- - output : MaskedArray - A copy of self cast to input newtype. - The returned record shape matches self.shape. - - Examples - -------- - >>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1.0 -- 3.1] - [-- 5.0 --] - [7.0 -- 9.0]] - >>> print x.astype(int32) - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - - """ - newtype = np.dtype(newtype) - output = self._data.astype(newtype).view(type(self)) - output._update_from(self) - names = output.dtype.names - if names is None: - output._mask = self._mask.astype(bool) - else: - if self._mask is nomask: - output._mask = nomask - else: - output._mask = self._mask.astype([(n, bool) for n in names]) - # Don't check _fill_value if it's None, that'll speed things up - if self._fill_value is not None: - output._fill_value = _check_fill_value(self._fill_value, newtype) - return output - - - def __getitem__(self, indx): - """x.__getitem__(y) <==> x[y] - - Return the item described by i, as a masked array. - - """ - # This test is useful, but we should keep things light... -# if getmask(indx) is not nomask: -# msg = "Masked arrays must be filled before they can be used as indices!" -# raise IndexError, msg - _data = ndarray.view(self, ndarray) - dout = ndarray.__getitem__(_data, indx) - # We could directly use ndarray.__getitem__ on self... - # But then we would have to modify __array_finalize__ to prevent the - # mask of being reshaped if it hasn't been set up properly yet... - # So it's easier to stick to the current version - _mask = self._mask - if not getattr(dout, 'ndim', False): - # A record ................ - if isinstance(dout, np.void): - mask = _mask[indx] -# If we can make mvoid a subclass of np.void, that'd be what we'd need -# return mvoid(dout, mask=mask) - if flatten_mask(mask).any(): - dout = mvoid(dout, mask=mask) - else: - return dout - # Just a scalar............ - elif _mask is not nomask and _mask[indx]: - return masked - else: - # Force dout to MA ........ - dout = dout.view(type(self)) - # Inherit attributes from self - dout._update_from(self) - # Check the fill_value .... - if isinstance(indx, basestring): - if self._fill_value is not None: - dout._fill_value = self._fill_value[indx] - dout._isfield = True - # Update the mask if needed - if _mask is not nomask: - dout._mask = _mask[indx] - dout._sharedmask = True -# Note: Don't try to check for m.any(), that'll take too long... - return dout - - def __setitem__(self, indx, value): - """x.__setitem__(i, y) <==> x[i]=y - - Set item described by index. If value is masked, masks those - locations. - - """ - if self is masked: - raise MaskError, 'Cannot alter the masked element.' - # This test is useful, but we should keep things light... -# if getmask(indx) is not nomask: -# msg = "Masked arrays must be filled before they can be used as indices!" -# raise IndexError, msg - _data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass')) - _mask = ndarray.__getattribute__(self, '_mask') - if isinstance(indx, basestring): - ndarray.__setitem__(_data, indx, value) - if _mask is nomask: - self._mask = _mask = make_mask_none(self.shape, self.dtype) - _mask[indx] = getmask(value) - return - #........................................ - _dtype = ndarray.__getattribute__(_data, 'dtype') - nbfields = len(_dtype.names or ()) - #........................................ - if value is masked: - # The mask wasn't set: create a full version... - if _mask is nomask: - _mask = self._mask = make_mask_none(self.shape, _dtype) - # Now, set the mask to its value. - if nbfields: - _mask[indx] = tuple([True] * nbfields) - else: - _mask[indx] = True - if not self._isfield: - self._sharedmask = False - return - #........................................ - # Get the _data part of the new value - dval = value - # Get the _mask part of the new value - mval = getattr(value, '_mask', nomask) - if nbfields and mval is nomask: - mval = tuple([False] * nbfields) - if _mask is nomask: - # Set the data, then the mask - ndarray.__setitem__(_data, indx, dval) - if mval is not nomask: - _mask = self._mask = make_mask_none(self.shape, _dtype) - ndarray.__setitem__(_mask, indx, mval) - elif not self._hardmask: - # Unshare the mask if necessary to avoid propagation - if not self._isfield: - self.unshare_mask() - _mask = ndarray.__getattribute__(self, '_mask') - # Set the data, then the mask - ndarray.__setitem__(_data, indx, dval) - ndarray.__setitem__(_mask, indx, mval) - elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): - indx = indx * umath.logical_not(_mask) - ndarray.__setitem__(_data, indx, dval) - else: - if nbfields: - err_msg = "Flexible 'hard' masks are not yet supported..." - raise NotImplementedError(err_msg) - mindx = mask_or(_mask[indx], mval, copy=True) - dindx = self._data[indx] - if dindx.size > 1: - dindx[~mindx] = dval - elif mindx is nomask: - dindx = dval - ndarray.__setitem__(_data, indx, dindx) - _mask[indx] = mindx - return - - - def __getslice__(self, i, j): - """x.__getslice__(i, j) <==> x[i:j] - - Return the slice described by (i, j). The use of negative - indices is not supported. - - """ - return self.__getitem__(slice(i, j)) - - def __setslice__(self, i, j, value): - """x.__setslice__(i, j, value) <==> x[i:j]=value - - Set the slice (i,j) of a to value. If value is masked, mask - those locations. - - """ - self.__setitem__(slice(i, j), value) - - - def __setmask__(self, mask, copy=False): - """Set the mask. - - """ - idtype = ndarray.__getattribute__(self, 'dtype') - current_mask = ndarray.__getattribute__(self, '_mask') - if mask is masked: - mask = True - # Make sure the mask is set - if (current_mask is nomask): - # Just don't do anything is there's nothing to do... - if mask is nomask: - return - current_mask = self._mask = make_mask_none(self.shape, idtype) - # No named fields......... - if idtype.names is None: - # Hardmask: don't unmask the data - if self._hardmask: - current_mask |= mask - # Softmask: set everything to False - else: - current_mask.flat = mask - # Named fields w/ ............ - else: - mdtype = current_mask.dtype - mask = np.array(mask, copy=False) - # Mask is a singleton - if not mask.ndim: - # It's a boolean : make a record - if mask.dtype.kind == 'b': - mask = np.array(tuple([mask.item()]*len(mdtype)), - dtype=mdtype) - # It's a record: make sure the dtype is correct - else: - mask = mask.astype(mdtype) - # Mask is a sequence - else: - # Make sure the new mask is a ndarray with the proper dtype - try: - mask = np.array(mask, copy=copy, dtype=mdtype) - # Or assume it's a sequence of bool/int - except TypeError: - mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - # Hardmask: don't unmask the data - if self._hardmask: - for n in idtype.names: - current_mask[n] |= mask[n] - # Softmask: set everything to False - else: - current_mask.flat = mask - # Reshape if needed - if current_mask.shape: - current_mask.shape = self.shape - return - _set_mask = __setmask__ - #.... - def _get_mask(self): - """Return the current mask. - - """ - # We could try to force a reshape, but that wouldn't work in some cases. -# return self._mask.reshape(self.shape) - return self._mask - mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") - - - def _get_recordmask(self): - """ - Return the mask of the records. - A record is masked when all the fields are masked. - - """ - _mask = ndarray.__getattribute__(self, '_mask').view(ndarray) - if _mask.dtype.names is None: - return _mask - return np.all(flatten_structured_array(_mask), axis= -1) - - - def _set_recordmask(self): - """Return the mask of the records. - A record is masked when all the fields are masked. - - """ - raise NotImplementedError("Coming soon: setting the mask per records!") - recordmask = property(fget=_get_recordmask) - - #............................................ - def harden_mask(self): - """ - Force the mask to hard. - - Whether the mask of a masked array is hard or soft is determined by - its `hardmask` property. `harden_mask` sets `hardmask` to True. - - See Also - -------- - hardmask - - """ - self._hardmask = True - return self - - def soften_mask(self): - """ - Force the mask to soft. - - Whether the mask of a masked array is hard or soft is determined by - its `hardmask` property. `soften_mask` sets `hardmask` to False. - - See Also - -------- - hardmask - - """ - self._hardmask = False - return self - - hardmask = property(fget=lambda self: self._hardmask, - doc="Hardness of the mask") - - - def unshare_mask(self): - """ - Copy the mask and set the sharedmask flag to False. - - Whether the mask is shared between masked arrays can be seen from - the `sharedmask` property. `unshare_mask` ensures the mask is not shared. - A copy of the mask is only made if it was shared. - - See Also - -------- - sharedmask - - """ - if self._sharedmask: - self._mask = self._mask.copy() - self._sharedmask = False - return self - - sharedmask = property(fget=lambda self: self._sharedmask, - doc="Share status of the mask (read-only).") - - def shrink_mask(self): - """ - Reduce a mask to nomask when possible. - - Parameters - ---------- - None - - Returns - ------- - None - - Examples - -------- - >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) - >>> x.mask - array([[False, False], - [False, False]], dtype=bool) - >>> x.shrink_mask() - >>> x.mask - False - - """ - m = self._mask - if m.ndim and not m.any(): - self._mask = nomask - return self - - #............................................ - - baseclass = property(fget=lambda self:self._baseclass, - doc="Class of the underlying data (read-only).") - - def _get_data(self): - """Return the current data, as a view of the original - underlying data. - - """ - return ndarray.view(self, self._baseclass) - _data = property(fget=_get_data) - data = property(fget=_get_data) - - def _get_flat(self): - "Return a flat iterator." - return MaskedIterator(self) - # - def _set_flat (self, value): - "Set a flattened version of self to value." - y = self.ravel() - y[:] = value - # - flat = property(fget=_get_flat, fset=_set_flat, - doc="Flat version of the array.") - - - def get_fill_value(self): - """ - Return the filling value of the masked array. - - Returns - ------- - fill_value : scalar - The filling value. - - Examples - -------- - >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: - ... np.ma.array([0, 1], dtype=dt).get_fill_value() - ... - 999999 - 999999 - 1e+20 - (1e+20+0j) - - >>> x = np.ma.array([0, 1.], fill_value=-np.inf) - >>> x.get_fill_value() - -inf - - """ - if self._fill_value is None: - self._fill_value = _check_fill_value(None, self.dtype) - return self._fill_value[()] - - def set_fill_value(self, value=None): - """ - Set the filling value of the masked array. - - Parameters - ---------- - value : scalar, optional - The new filling value. Default is None, in which case a default - based on the data type is used. - - See Also - -------- - ma.set_fill_value : Equivalent function. - - Examples - -------- - >>> x = np.ma.array([0, 1.], fill_value=-np.inf) - >>> x.fill_value - -inf - >>> x.set_fill_value(np.pi) - >>> x.fill_value - 3.1415926535897931 - - Reset to default: - - >>> x.set_fill_value() - >>> x.fill_value - 1e+20 - - """ - target = _check_fill_value(value, self.dtype) - _fill_value = self._fill_value - if _fill_value is None: - # Create the attribute if it was undefined - self._fill_value = target - else: - # Don't overwrite the attribute, just fill it (for propagation) - _fill_value[()] = target - - fill_value = property(fget=get_fill_value, fset=set_fill_value, - doc="Filling value.") - - - def filled(self, fill_value=None): - """ - Return a copy of self, with masked values filled with a given value. - - Parameters - ---------- - fill_value : scalar, optional - The value to use for invalid entries (None by default). - If None, the `fill_value` attribute of the array is used instead. - - Returns - ------- - filled_array : ndarray - A copy of ``self`` with invalid entries replaced by *fill_value* - (be it the function argument or the attribute of ``self``. - - Notes - ----- - The result is **not** a MaskedArray! - - Examples - -------- - >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) - >>> x.filled() - array([1, 2, -999, 4, -999]) - >>> type(x.filled()) - - - Subclassing is preserved. This means that if the data part of the masked - array is a matrix, `filled` returns a matrix: - - >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.filled() - matrix([[ 1, 999999], - [999999, 4]]) - - """ - m = self._mask - if m is nomask: - return self._data - # - if fill_value is None: - fill_value = self.fill_value - else: - fill_value = _check_fill_value(fill_value, self.dtype) - # - if self is masked_singleton: - return np.asanyarray(fill_value) - # - if m.dtype.names: - result = self._data.copy() - _recursive_filled(result, self._mask, fill_value) - elif not m.any(): - return self._data - else: - result = self._data.copy() - try: - np.putmask(result, m, fill_value) - except (TypeError, AttributeError): - fill_value = narray(fill_value, dtype=object) - d = result.astype(object) - result = np.choose(m, (d, fill_value)) - except IndexError: - #ok, if scalar - if self._data.shape: - raise - elif m: - result = np.array(fill_value, dtype=self.dtype) - else: - result = self._data - return result - - def compressed(self): - """ - Return all the non-masked data as a 1-D array. - - Returns - ------- - data : ndarray - A new `ndarray` holding the non-masked data is returned. - - Notes - ----- - The result is **not** a MaskedArray! - - Examples - -------- - >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) - >>> x.compressed() - array([0, 1]) - >>> type(x.compressed()) - - - """ - data = ndarray.ravel(self._data) - if self._mask is not nomask: - data = data.compress(np.logical_not(ndarray.ravel(self._mask))) - return data - - - def compress(self, condition, axis=None, out=None): - """ - Return `a` where condition is ``True``. - - If condition is a `MaskedArray`, missing values are considered - as ``False``. - - Parameters - ---------- - condition : var - Boolean 1-d array selecting which entries to return. If len(condition) - is less than the size of a along the axis, then output is truncated - to length of condition array. - axis : {None, int}, optional - Axis along which the operation must be performed. - out : {None, ndarray}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - Returns - ------- - result : MaskedArray - A :class:`MaskedArray` object. - - Notes - ----- - Please note the difference with :meth:`compressed` ! - The output of :meth:`compress` has a mask, the output of - :meth:`compressed` does not. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> x.compress([1, 0, 1]) - masked_array(data = [1 3], - mask = [False False], - fill_value=999999) - - >>> x.compress([1, 0, 1], axis=1) - masked_array(data = - [[1 3] - [-- --] - [7 9]], - mask = - [[False False] - [ True True] - [False False]], - fill_value=999999) - - """ - # Get the basic components - (_data, _mask) = (self._data, self._mask) - # Force the condition to a regular ndarray (forget the missing values...) - condition = np.array(condition, copy=False, subok=False) - # - _new = _data.compress(condition, axis=axis, out=out).view(type(self)) - _new._update_from(self) - if _mask is not nomask: - _new._mask = _mask.compress(condition, axis=axis) - return _new - - #............................................ - def __str__(self): - """String representation. - - """ - if masked_print_option.enabled(): - f = masked_print_option - if self is masked: - return str(f) - m = self._mask - if m is nomask: - res = self._data - else: - if m.shape == (): - if m.dtype.names: - m = m.view((bool, len(m.dtype))) - if m.any(): - r = np.array(self._data.tolist(), dtype=object) - np.putmask(r, m, f) - return str(tuple(r)) - else: - return str(self._data) - elif m: - return str(f) - else: - return str(self._data) - # convert to object array to make filled work - names = self.dtype.names - if names is None: - res = self._data.astype("|O8") - res[m] = f - else: - rdtype = _recursive_make_descr(self.dtype, "|O8") - res = self._data.astype(rdtype) - _recursive_printoption(res, m, f) - else: - res = self.filled(self.fill_value) - return str(res) - - def __repr__(self): - """Literal string representation. - - """ - n = len(self.shape) - name = repr(self._data).split('(')[0] - parameters = dict(name=name, nlen=" " * len(name), - data=str(self), mask=str(self._mask), - fill=str(self.fill_value), dtype=str(self.dtype)) - if self.dtype.names: - if n <= 1: - return _print_templates['short_flx'] % parameters - return _print_templates['long_flx'] % parameters - elif n <= 1: - return _print_templates['short_std'] % parameters - return _print_templates['long_std'] % parameters - - - def __eq__(self, other): - "Check whether other equals self elementwise" - if self is masked: - return masked - omask = getattr(other, '_mask', nomask) - if omask is nomask: - check = ndarray.__eq__(self.filled(0), other) - try: - check = check.view(type(self)) - check._mask = self._mask - except AttributeError: - # Dang, we have a bool instead of an array: return the bool - return check - else: - odata = filled(other, 0) - check = ndarray.__eq__(self.filled(0), odata).view(type(self)) - if self._mask is nomask: - check._mask = omask - else: - mask = mask_or(self._mask, omask) - if mask.dtype.names: - if mask.size > 1: - axis = 1 - else: - axis = None - try: - mask = mask.view((bool_, len(self.dtype))).all(axis) - except ValueError: - mask = np.all([[f[n].all() for n in mask.dtype.names] - for f in mask], axis=axis) - check._mask = mask - return check - # - def __ne__(self, other): - "Check whether other doesn't equal self elementwise" - if self is masked: - return masked - omask = getattr(other, '_mask', nomask) - if omask is nomask: - check = ndarray.__ne__(self.filled(0), other) - try: - check = check.view(type(self)) - check._mask = self._mask - except AttributeError: - # In case check is a boolean (or a numpy.bool) - return check - else: - odata = filled(other, 0) - check = ndarray.__ne__(self.filled(0), odata).view(type(self)) - if self._mask is nomask: - check._mask = omask - else: - mask = mask_or(self._mask, omask) - if mask.dtype.names: - if mask.size > 1: - axis = 1 - else: - axis = None - try: - mask = mask.view((bool_, len(self.dtype))).all(axis) - except ValueError: - mask = np.all([[f[n].all() for n in mask.dtype.names] - for f in mask], axis=axis) - check._mask = mask - return check - # - def __add__(self, other): - "Add other to self, and return a new masked array." - return add(self, other) - # - def __radd__(self, other): - "Add other to self, and return a new masked array." - return add(self, other) - # - def __sub__(self, other): - "Subtract other to self, and return a new masked array." - return subtract(self, other) - # - def __rsub__(self, other): - "Subtract other to self, and return a new masked array." - return subtract(other, self) - # - def __mul__(self, other): - "Multiply other by self, and return a new masked array." - return multiply(self, other) - # - def __rmul__(self, other): - "Multiply other by self, and return a new masked array." - return multiply(self, other) - # - def __div__(self, other): - "Divide other into self, and return a new masked array." - return divide(self, other) - # - def __truediv__(self, other): - "Divide other into self, and return a new masked array." - return true_divide(self, other) - # - def __rtruediv__(self, other): - "Divide other into self, and return a new masked array." - return true_divide(other, self) - # - def __floordiv__(self, other): - "Divide other into self, and return a new masked array." - return floor_divide(self, other) - # - def __rfloordiv__(self, other): - "Divide other into self, and return a new masked array." - return floor_divide(other, self) - # - def __pow__(self, other): - "Raise self to the power other, masking the potential NaNs/Infs" - return power(self, other) - # - def __rpow__(self, other): - "Raise self to the power other, masking the potential NaNs/Infs" - return power(other, self) - #............................................ - def __iadd__(self, other): - "Add other to self in-place." - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - else: - if m is not nomask: - self._mask += m - ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other))) - return self - #.... - def __isub__(self, other): - "Subtract other from self in-place." - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - elif m is not nomask: - self._mask += m - ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other))) - return self - #.... - def __imul__(self, other): - "Multiply self by other in-place." - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - elif m is not nomask: - self._mask += m - ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other))) - return self - #.... - def __idiv__(self, other): - "Divide self by other in-place." - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.divide] - other_data = np.where(dom_mask, fval, other_data) -# self._mask = mask_or(self._mask, new_mask) - self._mask |= new_mask - ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data)) - return self - #.... - def __ifloordiv__(self, other): - "Floor divide self by other in-place." - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.floor_divide] - other_data = np.where(dom_mask, fval, other_data) -# self._mask = mask_or(self._mask, new_mask) - self._mask |= new_mask - ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data)) - return self - #.... - def __itruediv__(self, other): - "True divide self by other in-place." - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.true_divide] - other_data = np.where(dom_mask, fval, other_data) -# self._mask = mask_or(self._mask, new_mask) - self._mask |= new_mask - ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data)) - return self - #... - def __ipow__(self, other): - "Raise self to the power other, in place." - other_data = getdata(other) - other_mask = getmask(other) - err_status = np.geterr() - try: - np.seterr(divide='ignore', invalid='ignore') - ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data)) - finally: - np.seterr(**err_status) - invalid = np.logical_not(np.isfinite(self._data)) - if invalid.any(): - if self._mask is not nomask: - self._mask |= invalid - else: - self._mask = invalid - np.putmask(self._data, invalid, self.fill_value) - new_mask = mask_or(other_mask, invalid) - self._mask = mask_or(self._mask, new_mask) - return self - #............................................ - def __float__(self): - "Convert to float." - if self.size > 1: - raise TypeError("Only length-1 arrays can be converted "\ - "to Python scalars") - elif self._mask: - warnings.warn("Warning: converting a masked element to nan.") - return np.nan - return float(self.item()) - - def __int__(self): - "Convert to int." - if self.size > 1: - raise TypeError("Only length-1 arrays can be converted "\ - "to Python scalars") - elif self._mask: - raise MaskError, 'Cannot convert masked element to a Python int.' - return int(self.item()) - - - def get_imag(self): - """ - Return the imaginary part of the masked array. - - The returned array is a view on the imaginary part of the `MaskedArray` - whose `get_imag` method is called. - - Parameters - ---------- - None - - Returns - ------- - result : MaskedArray - The imaginary part of the masked array. - - See Also - -------- - get_real, real, imag - - Examples - -------- - >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) - >>> x.get_imag() - masked_array(data = [1.0 -- 1.6], - mask = [False True False], - fill_value = 1e+20) - - """ - result = self._data.imag.view(type(self)) - result.__setmask__(self._mask) - return result - imag = property(fget=get_imag, doc="Imaginary part.") - - def get_real(self): - """ - Return the real part of the masked array. - - The returned array is a view on the real part of the `MaskedArray` - whose `get_real` method is called. - - Parameters - ---------- - None - - Returns - ------- - result : MaskedArray - The real part of the masked array. - - See Also - -------- - get_imag, real, imag - - Examples - -------- - >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) - >>> x.get_real() - masked_array(data = [1.0 -- 3.45], - mask = [False True False], - fill_value = 1e+20) - - """ - result = self._data.real.view(type(self)) - result.__setmask__(self._mask) - return result - real = property(fget=get_real, doc="Real part") - - - #............................................ - def count(self, axis=None): - """ - Count the non-masked elements of the array along the given axis. - - Parameters - ---------- - axis : int, optional - Axis along which to count the non-masked elements. If `axis` is - `None`, all non-masked elements are counted. - - Returns - ------- - result : int or ndarray - If `axis` is `None`, an integer count is returned. When `axis` is - not `None`, an array with shape determined by the lengths of the - remaining axes, is returned. - - See Also - -------- - count_masked : Count masked elements in array or along a given axis. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.arange(6).reshape((2, 3)) - >>> a[1, :] = ma.masked - >>> a - masked_array(data = - [[0 1 2] - [-- -- --]], - mask = - [[False False False] - [ True True True]], - fill_value = 999999) - >>> a.count() - 3 - - When the `axis` keyword is specified an array of appropriate size is - returned. - - >>> a.count(axis=0) - array([1, 1, 1]) - >>> a.count(axis=1) - array([3, 0]) - - """ - m = self._mask - s = self.shape - ls = len(s) - if m is nomask: - if ls == 0: - return 1 - if ls == 1: - return s[0] - if axis is None: - return self.size - else: - n = s[axis] - t = list(s) - del t[axis] - return np.ones(t) * n - n1 = np.size(m, axis) - n2 = m.astype(int).sum(axis) - if axis is None: - return (n1 - n2) - else: - return narray(n1 - n2) - #............................................ - flatten = _arraymethod('flatten') - # - def ravel(self): - """ - Returns a 1D version of self, as a view. - - Returns - ------- - MaskedArray - Output view is of shape ``(self.size,)`` (or - ``(np.ma.product(self.shape),)``). - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print x.ravel() - [1 -- 3 -- 5 -- 7 -- 9] - - """ - r = ndarray.ravel(self._data).view(type(self)) - r._update_from(self) - if self._mask is not nomask: - r._mask = ndarray.ravel(self._mask).reshape(r.shape) - else: - r._mask = nomask - return r - # - repeat = _arraymethod('repeat') - # - def reshape (self, *s, **kwargs): - """ - Give a new shape to the array without changing its data. - - Returns a masked array containing the same data, but with a new shape. - The result is a view on the original array; if this is not possible, a - ValueError is raised. - - Parameters - ---------- - shape : int or tuple of ints - The new shape should be compatible with the original shape. If an - integer is supplied, then the result will be a 1-D array of that - length. - order : {'C', 'F'}, optional - Determines whether the array data should be viewed as in C - (row-major) or FORTRAN (column-major) order. - - Returns - ------- - reshaped_array : array - A new view on the array. - - See Also - -------- - reshape : Equivalent function in the masked array module. - numpy.ndarray.reshape : Equivalent method on ndarray object. - numpy.reshape : Equivalent function in the NumPy module. - - Notes - ----- - The reshaping operation cannot guarantee that a copy will not be made, - to modify the shape in place, use ``a.shape = s`` - - Examples - -------- - >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) - >>> print x - [[-- 2] - [3 --]] - >>> x = x.reshape((4,1)) - >>> print x - [[--] - [2] - [3] - [--]] - - """ - kwargs.update(order=kwargs.get('order', 'C')) - result = self._data.reshape(*s, **kwargs).view(type(self)) - result._update_from(self) - mask = self._mask - if mask is not nomask: - result._mask = mask.reshape(*s, **kwargs) - return result - # - def resize(self, newshape, refcheck=True, order=False): - """ - .. warning:: - - This method does nothing, except raise a ValueError exception. A - masked array does not own its data and therefore cannot safely be - resized in place. Use the `numpy.ma.resize` function instead. - - This method is difficult to implement safely and may be deprecated in - future releases of NumPy. - - """ - # Note : the 'order' keyword looks broken, let's just drop it -# try: -# ndarray.resize(self, newshape, refcheck=refcheck) -# if self.mask is not nomask: -# self._mask.resize(newshape, refcheck=refcheck) -# except ValueError: -# raise ValueError("Cannot resize an array that has been referenced " -# "or is referencing another array in this way.\n" -# "Use the numpy.ma.resize function.") -# return None - errmsg = "A masked array does not own its data "\ - "and therefore cannot be resized.\n" \ - "Use the numpy.ma.resize function instead." - raise ValueError(errmsg) - # - def put(self, indices, values, mode='raise'): - """ - Set storage-indexed locations to corresponding values. - - Sets self._data.flat[n] = values[n] for each n in indices. - If `values` is shorter than `indices` then it will repeat. - If `values` has some masked values, the initial mask is updated - in consequence, else the corresponding values are unmasked. - - Parameters - ---------- - indices : 1-D array_like - Target indices, interpreted as integers. - values : array_like - Values to place in self._data copy at target indices. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - 'raise' : raise an error. - 'wrap' : wrap around. - 'clip' : clip to the range. - - Notes - ----- - `values` can be a scalar or length 1 array. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> x.put([0,4,8],[10,20,30]) - >>> print x - [[10 -- 3] - [-- 20 --] - [7 -- 30]] - - >>> x.put(4,999) - >>> print x - [[10 -- 3] - [-- 999 --] - [7 -- 30]] - - """ - m = self._mask - # Hard mask: Get rid of the values/indices that fall on masked data - if self._hardmask and self._mask is not nomask: - mask = self._mask[indices] - indices = narray(indices, copy=False) - values = narray(values, copy=False, subok=True) - values.resize(indices.shape) - indices = indices[~mask] - values = values[~mask] - #.... - self._data.put(indices, values, mode=mode) - #.... - if m is nomask: - m = getmask(values) - else: - m = m.copy() - if getmask(values) is nomask: - m.put(indices, False, mode=mode) - else: - m.put(indices, values._mask, mode=mode) - m = make_mask(m, copy=False, shrink=True) - self._mask = m - #............................................ - def ids (self): - """ - Return the addresses of the data and mask areas. - - Parameters - ---------- - None - - Examples - -------- - >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) - >>> x.ids() - (166670640, 166659832) - - If the array has no mask, the address of `nomask` is returned. This address - is typically not close to the data in memory: - - >>> x = np.ma.array([1, 2, 3]) - >>> x.ids() - (166691080, 3083169284L) - - """ - if self._mask is nomask: - return (self.ctypes.data, id(nomask)) - return (self.ctypes.data, self._mask.ctypes.data) - - def iscontiguous(self): - """ - Return a boolean indicating whether the data is contiguous. - - Parameters - ---------- - None - - Examples - -------- - >>> x = np.ma.array([1, 2, 3]) - >>> x.iscontiguous() - True - - `iscontiguous` returns one of the flags of the masked array: - - >>> x.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : True - OWNDATA : False - WRITEABLE : True - ALIGNED : True - UPDATEIFCOPY : False - - """ - return self.flags['CONTIGUOUS'] - - #............................................ - def all(self, axis=None, out=None): - """ - Check if all of the elements of `a` are true. - - Performs a :func:`logical_and` over the given axis and returns the result. - Masked values are considered as True during computation. - For convenience, the output array is masked where ALL the values along the - current axis are masked: if the output would have been a scalar and that - all the values are masked, then the output is `masked`. - - Parameters - ---------- - axis : {None, integer} - Axis to perform the operation over. - If None, perform over flattened array. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - See Also - -------- - all : equivalent function - - Examples - -------- - >>> np.ma.array([1,2,3]).all() - True - >>> a = np.ma.array([1,2,3], mask=True) - >>> (a.all() is np.ma.masked) - True - - """ - mask = _check_mask_axis(self._mask, axis) - if out is None: - d = self.filled(True).all(axis=axis).view(type(self)) - if d.ndim: - d.__setmask__(mask) - elif mask: - return masked - return d - self.filled(True).all(axis=axis, out=out) - if isinstance(out, MaskedArray): - if out.ndim or mask: - out.__setmask__(mask) - return out - - - def any(self, axis=None, out=None): - """ - Check if any of the elements of `a` are true. - - Performs a logical_or over the given axis and returns the result. - Masked values are considered as False during computation. - - Parameters - ---------- - axis : {None, integer} - Axis to perform the operation over. - If None, perform over flattened array and return a scalar. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - See Also - -------- - any : equivalent function - - """ - mask = _check_mask_axis(self._mask, axis) - if out is None: - d = self.filled(False).any(axis=axis).view(type(self)) - if d.ndim: - d.__setmask__(mask) - elif mask: - d = masked - return d - self.filled(False).any(axis=axis, out=out) - if isinstance(out, MaskedArray): - if out.ndim or mask: - out.__setmask__(mask) - return out - - - def nonzero(self): - """ - Return the indices of unmasked elements that are not zero. - - Returns a tuple of arrays, one for each dimension, containing the - indices of the non-zero elements in that dimension. The corresponding - non-zero values can be obtained with:: - - a[a.nonzero()] - - To group the indices by element, rather than dimension, use - instead:: - - np.transpose(a.nonzero()) - - The result of this is always a 2d array, with a row for each non-zero - element. - - Parameters - ---------- - None - - Returns - ------- - tuple_of_arrays : tuple - Indices of elements that are non-zero. - - See Also - -------- - numpy.nonzero : - Function operating on ndarrays. - flatnonzero : - Return indices that are non-zero in the flattened version of the input - array. - ndarray.nonzero : - Equivalent ndarray method. - count_nonzero : - Counts the number of non-zero elements in the input array. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.array(np.eye(3)) - >>> x - masked_array(data = - [[ 1. 0. 0.] - [ 0. 1. 0.] - [ 0. 0. 1.]], - mask = - False, - fill_value=1e+20) - >>> x.nonzero() - (array([0, 1, 2]), array([0, 1, 2])) - - Masked elements are ignored. - - >>> x[1, 1] = ma.masked - >>> x - masked_array(data = - [[1.0 0.0 0.0] - [0.0 -- 0.0] - [0.0 0.0 1.0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=1e+20) - >>> x.nonzero() - (array([0, 2]), array([0, 2])) - - Indices can also be grouped by element. - - >>> np.transpose(x.nonzero()) - array([[0, 0], - [2, 2]]) - - A common use for ``nonzero`` is to find the indices of an array, where - a condition is True. Given an array `a`, the condition `a` > 3 is a - boolean array and since False is interpreted as 0, ma.nonzero(a > 3) - yields the indices of the `a` where the condition is true. - - >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) - >>> a > 3 - masked_array(data = - [[False False False] - [ True True True] - [ True True True]], - mask = - False, - fill_value=999999) - >>> ma.nonzero(a > 3) - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - The ``nonzero`` method of the condition array can also be called. - - >>> (a > 3).nonzero() - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - """ - return narray(self.filled(0), copy=False).nonzero() - - - def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """ - (this docstring should be overwritten) - """ - #!!!: implement out + test! - m = self._mask - if m is nomask: - result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, - axis2=axis2, out=out) - return result.astype(dtype) - else: - D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) - return D.astype(dtype).filled(0).sum(axis=None, out=out) - trace.__doc__ = ndarray.trace.__doc__ - - def sum(self, axis=None, dtype=None, out=None): - """ - Return the sum of the array elements over the given axis. - Masked elements are set to 0 internally. - - Parameters - ---------- - axis : {None, -1, int}, optional - Axis along which the sum is computed. The default - (`axis` = None) is to compute over the flattened array. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and - the type of a is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. - out : {None, ndarray}, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - - Returns - ------- - sum_along_axis : MaskedArray or scalar - An array with the same shape as self, with the specified - axis removed. If self is a 0-d array, or if `axis` is None, a scalar - is returned. If an output array is specified, a reference to - `out` is returned. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print x.sum() - 25 - >>> print x.sum(axis=1) - [4 5 16] - >>> print x.sum(axis=0) - [8 5 12] - >>> print type(x.sum(axis=0, dtype=np.int64)[0]) - - - """ - _mask = ndarray.__getattribute__(self, '_mask') - newmask = _check_mask_axis(_mask, axis) - # No explicit output - if out is None: - result = self.filled(0).sum(axis, dtype=dtype) - rndim = getattr(result, 'ndim', 0) - if rndim: - result = result.view(type(self)) - result.__setmask__(newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(0).sum(axis, dtype=dtype, out=out) - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - return out - - - def cumsum(self, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of the elements along the given axis. - The cumulative sum is calculated over the flattened array by - default, otherwise over the specified axis. - - Masked values are set to 0 internally during the computation. - However, their position is saved, and the result will be masked at - the same locations. - - Parameters - ---------- - axis : {None, -1, int}, optional - Axis along which the sum is computed. The default (`axis` = None) is to - compute over the flattened array. `axis` may be negative, in which case - it counts from the last to the first axis. - dtype : {None, dtype}, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - - Returns - ------- - cumsum : ndarray. - A new array holding the result is returned unless ``out`` is - specified, in which case a reference to ``out`` is returned. - - Notes - ----- - The mask is lost if `out` is not a valid :class:`MaskedArray` ! - - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) - >>> print marr.cumsum() - [0 1 3 -- -- -- 9 16 24 33] - - """ - result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(self.mask) - return out - result = result.view(type(self)) - result.__setmask__(self._mask) - return result - - - def prod(self, axis=None, dtype=None, out=None): - """ - Return the product of the array elements over the given axis. - Masked elements are set to 1 internally for computation. - - Parameters - ---------- - axis : {None, int}, optional - Axis over which the product is taken. If None is used, then the - product is over all the array elements. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are multiplied. If ``dtype`` has the value ``None`` - and the type of a is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. - out : {None, array}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - Returns - ------- - product_along_axis : {array, scalar}, see dtype parameter above. - Returns an array whose shape is the same as a with the specified - axis removed. Returns a 0d array when a is 1d or axis=None. - Returns a reference to the specified output array if specified. - - See Also - -------- - prod : equivalent function - - Notes - ----- - Arithmetic is modular when using integer types, and no error is raised - on overflow. - - Examples - -------- - >>> np.prod([1.,2.]) - 2.0 - >>> np.prod([1.,2.], dtype=np.int32) - 2 - >>> np.prod([[1.,2.],[3.,4.]]) - 24.0 - >>> np.prod([[1.,2.],[3.,4.]], axis=1) - array([ 2., 12.]) - - """ - _mask = ndarray.__getattribute__(self, '_mask') - newmask = _check_mask_axis(_mask, axis) - # No explicit output - if out is None: - result = self.filled(1).prod(axis, dtype=dtype) - rndim = getattr(result, 'ndim', 0) - if rndim: - result = result.view(type(self)) - result.__setmask__(newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(1).prod(axis, dtype=dtype, out=out) - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - return out - - product = prod - - def cumprod(self, axis=None, dtype=None, out=None): - """ - Return the cumulative product of the elements along the given axis. - The cumulative product is taken over the flattened array by - default, otherwise over the specified axis. - - Masked values are set to 1 internally during the computation. - However, their position is saved, and the result will be masked at - the same locations. - - Parameters - ---------- - axis : {None, -1, int}, optional - Axis along which the product is computed. The default - (`axis` = None) is to compute over the flattened array. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are multiplied. If ``dtype`` has the value ``None`` - and the type of ``a`` is an integer type of precision less than the - default platform integer, then the default platform integer precision - is used. Otherwise, the dtype is the same as that of ``a``. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - - Returns - ------- - cumprod : ndarray - A new array holding the result is returned unless out is specified, - in which case a reference to out is returned. - - Notes - ----- - The mask is lost if `out` is not a valid MaskedArray ! - - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - """ - result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(self._mask) - return out - result = result.view(type(self)) - result.__setmask__(self._mask) - return result - - - def mean(self, axis=None, dtype=None, out=None): - """ - Returns the average of the array elements. - - Masked entries are ignored. - The average is taken over the flattened array by default, otherwise over - the specified axis. Refer to `numpy.mean` for the full documentation. - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the means are computed. The default is to compute - the mean of the flattened array. - dtype : dtype, optional - Type to use in computing the mean. For integer inputs, the default - is float64; for floating point, inputs it is the same as the input - dtype. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - Returns - ------- - mean : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. - - See Also - -------- - numpy.ma.mean : Equivalent function. - numpy.mean : Equivalent function on non-masked arrays. - numpy.ma.average: Weighted average. - - Examples - -------- - >>> a = np.ma.array([1,2,3], mask=[False, False, True]) - >>> a - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) - >>> a.mean() - 1.5 - - """ - if self._mask is nomask: - result = super(MaskedArray, self).mean(axis=axis, dtype=dtype) - else: - dsum = self.sum(axis=axis, dtype=dtype) - cnt = self.count(axis=axis) - if cnt.shape == () and (cnt == 0): - result = masked - else: - result = dsum * 1. / cnt - if out is not None: - out.flat = result - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = getattr(result, '_mask', nomask) - return out - return result - - def anom(self, axis=None, dtype=None): - """ - Compute the anomalies (deviations from the arithmetic mean) - along the given axis. - - Returns an array of anomalies, with the same shape as the input and - where the arithmetic mean is computed along the given axis. - - Parameters - ---------- - axis : int, optional - Axis over which the anomalies are taken. - The default is to use the mean of the flattened array as reference. - dtype : dtype, optional - Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. - - See Also - -------- - mean : Compute the mean of the array. - - Examples - -------- - >>> a = np.ma.array([1,2,3]) - >>> a.anom() - masked_array(data = [-1. 0. 1.], - mask = False, - fill_value = 1e+20) - - """ - m = self.mean(axis, dtype) - if not axis: - return (self - m) - else: - return (self - expand_dims(m, axis)) - - def var(self, axis=None, dtype=None, out=None, ddof=0): - "" - # Easy case: nomask, business as usual - if self._mask is nomask: - return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof) - # Some data are masked, yay! - cnt = self.count(axis=axis) - ddof - danom = self.anom(axis=axis, dtype=dtype) - if iscomplexobj(self): - danom = umath.absolute(danom) ** 2 - else: - danom *= danom - dvar = divide(danom.sum(axis), cnt).view(type(self)) - # Apply the mask if it's not a scalar - if dvar.ndim: - dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0)) - dvar._update_from(self) - elif getattr(dvar, '_mask', False): - # Make sure that masked is returned when the scalar is masked. - dvar = masked - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(True) - elif out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or "\ - "more location." - raise MaskError(errmsg) - else: - out.flat = np.nan - return out - # In case with have an explicit output - if out is not None: - # Set the data - out.flat = dvar - # Set the mask if needed - if isinstance(out, MaskedArray): - out.__setmask__(dvar.mask) - return out - return dvar - var.__doc__ = np.var.__doc__ - - - def std(self, axis=None, dtype=None, out=None, ddof=0): - "" - dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof) - if dvar is not masked: - dvar = sqrt(dvar) - if out is not None: - out **= 0.5 - return out - return dvar - std.__doc__ = np.std.__doc__ - - #............................................ - def round(self, decimals=0, out=None): - """ - Return an array rounded a to the given number of decimals. - - Refer to `numpy.around` for full documentation. - - See Also - -------- - numpy.around : equivalent function - - """ - result = self._data.round(decimals=decimals, out=out).view(type(self)) - result._mask = self._mask - result._update_from(self) - # No explicit output: we're done - if out is None: - return result - if isinstance(out, MaskedArray): - out.__setmask__(self._mask) - return out - round.__doc__ = ndarray.round.__doc__ - - #............................................ - def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None): - """ - Return an ndarray of indices that sort the array along the - specified axis. Masked values are filled beforehand to - `fill_value`. - - Parameters - ---------- - axis : int, optional - Axis along which to sort. The default is -1 (last axis). - If None, the flattened array is used. - fill_value : var, optional - Value used to fill the array before sorting. - The default is the `fill_value` attribute of the input array. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. - order : list, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - Returns - ------- - index_array : ndarray, int - Array of indices that sort `a` along the specified axis. - In other words, ``a[index_array]`` yields a sorted `a`. - - See Also - -------- - sort : Describes sorting algorithms used. - lexsort : Indirect stable sort with multiple keys. - ndarray.sort : Inplace sort. - - Notes - ----- - See `sort` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = np.ma.array([3,2,1], mask=[False, False, True]) - >>> a - masked_array(data = [3 2 --], - mask = [False False True], - fill_value = 999999) - >>> a.argsort() - array([1, 0, 2]) - - """ - if fill_value is None: - fill_value = default_fill_value(self) - d = self.filled(fill_value).view(ndarray) - return d.argsort(axis=axis, kind=kind, order=order) - - - def argmin(self, axis=None, fill_value=None, out=None): - """ - Return array of indices to the minimum values along the given axis. - - Parameters - ---------- - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - fill_value : {var}, optional - Value used to fill in the masked values. If None, the output of - minimum_fill_value(self._data) is used instead. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - Returns - ------- - {ndarray, scalar} - If multi-dimension input, returns a new ndarray of indices to the - minimum values along the given axis. Otherwise, returns a scalar - of index to the minimum values along the given axis. - - Examples - -------- - >>> x = np.ma.array(arange(4), mask=[1,1,0,0]) - >>> x.shape = (2,2) - >>> print x - [[-- --] - [2 3]] - >>> print x.argmin(axis=0, fill_value=-1) - [0 0] - >>> print x.argmin(axis=0, fill_value=9) - [1 1] - - """ - if fill_value is None: - fill_value = minimum_fill_value(self) - d = self.filled(fill_value).view(ndarray) - return d.argmin(axis, out=out) - - - def argmax(self, axis=None, fill_value=None, out=None): - """ - Returns array of indices of the maximum values along the given axis. - Masked values are treated as if they had the value fill_value. - - Parameters - ---------- - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - fill_value : {var}, optional - Value used to fill in the masked values. If None, the output of - maximum_fill_value(self._data) is used instead. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - Returns - ------- - index_array : {integer_array} - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) - >>> a.argmax() - 5 - >>> a.argmax(0) - array([1, 1, 1]) - >>> a.argmax(1) - array([2, 2]) - - """ - if fill_value is None: - fill_value = maximum_fill_value(self._data) - d = self.filled(fill_value).view(ndarray) - return d.argmax(axis, out=out) - - - def sort(self, axis= -1, kind='quicksort', order=None, - endwith=True, fill_value=None): - """ - Sort the array, in-place - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. Default is 'quicksort'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - endwith : {True, False}, optional - Whether missing values (if any) should be forced in the upper indices - (at the end of the array) (True) or lower indices (at the beginning). - fill_value : {var}, optional - Value used internally for the masked values. - If ``fill_value`` is not None, it supersedes ``endwith``. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - - Notes - ----- - See ``sort`` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # Default - >>> a.sort() - >>> print a - [1 3 5 -- --] - - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # Put missing values in the front - >>> a.sort(endwith=False) - >>> print a - [-- -- 1 3 5] - - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # fill_value takes over endwith - >>> a.sort(endwith=False, fill_value=3) - >>> print a - [1 -- -- 3 5] - - """ - if self._mask is nomask: - ndarray.sort(self, axis=axis, kind=kind, order=order) - else: - if self is masked: - return self - if fill_value is None: - if endwith: - filler = minimum_fill_value(self) - else: - filler = maximum_fill_value(self) - else: - filler = fill_value - idx = np.indices(self.shape) - idx[axis] = self.filled(filler).argsort(axis=axis, kind=kind, - order=order) - idx_l = idx.tolist() - tmp_mask = self._mask[idx_l].flat - tmp_data = self._data[idx_l].flat - self._data.flat = tmp_data - self._mask.flat = tmp_mask - return - - #............................................ - def min(self, axis=None, out=None, fill_value=None): - """ - Return the minimum along a given axis. - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to operate. By default, ``axis`` is None and the - flattened input is used. - out : array_like, optional - Alternative output array in which to place the result. Must be of - the same shape and buffer length as the expected output. - fill_value : {var}, optional - Value used to fill in the masked values. - If None, use the output of `minimum_fill_value`. - - Returns - ------- - amin : array_like - New array holding the result. - If ``out`` was specified, ``out`` is returned. - - See Also - -------- - minimum_fill_value - Returns the minimum filling value for a given datatype. - - """ - _mask = ndarray.__getattribute__(self, '_mask') - newmask = _check_mask_axis(_mask, axis) - if fill_value is None: - fill_value = minimum_fill_value(self) - # No explicit output - if out is None: - result = self.filled(fill_value).min(axis=axis, out=out).view(type(self)) - if result.ndim: - # Set the mask - result.__setmask__(newmask) - # Get rid of Infs - if newmask.ndim: - np.putmask(result, newmask, result.fill_value) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(fill_value).min(axis=axis, out=out) - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - else: - if out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or more"\ - " location." - raise MaskError(errmsg) - np.putmask(out, newmask, np.nan) - return out - - def mini(self, axis=None): - """ - Return the array minimum along the specified axis. - - Parameters - ---------- - axis : int, optional - The axis along which to find the minima. Default is None, in which case - the minimum value in the whole array is returned. - - Returns - ------- - min : scalar or MaskedArray - If `axis` is None, the result is a scalar. Otherwise, if `axis` is - given and the array is at least 2-D, the result is a masked array with - dimension one smaller than the array on which `mini` is called. - - Examples - -------- - >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) - >>> print x - [[0 --] - [2 3] - [4 --]] - >>> x.mini() - 0 - >>> x.mini(axis=0) - masked_array(data = [0 3], - mask = [False False], - fill_value = 999999) - >>> print x.mini(axis=1) - [0 2 4] - - """ - if axis is None: - return minimum(self) - else: - return minimum.reduce(self, axis) - - #........................ - def max(self, axis=None, out=None, fill_value=None): - """ - Return the maximum along a given axis. - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to operate. By default, ``axis`` is None and the - flattened input is used. - out : array_like, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - fill_value : {var}, optional - Value used to fill in the masked values. - If None, use the output of maximum_fill_value(). - - Returns - ------- - amax : array_like - New array holding the result. - If ``out`` was specified, ``out`` is returned. - - See Also - -------- - maximum_fill_value - Returns the maximum filling value for a given datatype. - - """ - _mask = ndarray.__getattribute__(self, '_mask') - newmask = _check_mask_axis(_mask, axis) - if fill_value is None: - fill_value = maximum_fill_value(self) - # No explicit output - if out is None: - result = self.filled(fill_value).max(axis=axis, out=out).view(type(self)) - if result.ndim: - # Set the mask - result.__setmask__(newmask) - # Get rid of Infs - if newmask.ndim: - np.putmask(result, newmask, result.fill_value) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(fill_value).max(axis=axis, out=out) - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - else: - - if out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or more"\ - " location." - raise MaskError(errmsg) - np.putmask(out, newmask, np.nan) - return out - - def ptp(self, axis=None, out=None, fill_value=None): - """ - Return (maximum - minimum) along the the given dimension - (i.e. peak-to-peak value). - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to find the peaks. If None (default) the - flattened array is used. - out : {None, array_like}, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - fill_value : {var}, optional - Value used to fill in the masked values. - - Returns - ------- - ptp : ndarray. - A new array holding the result, unless ``out`` was - specified, in which case a reference to ``out`` is returned. - - """ - if out is None: - result = self.max(axis=axis, fill_value=fill_value) - result -= self.min(axis=axis, fill_value=fill_value) - return result - out.flat = self.max(axis=axis, out=out, fill_value=fill_value) - out -= self.min(axis=axis, fill_value=fill_value) - return out - - def take(self, indices, axis=None, out=None, mode='raise'): - """ - """ - (_data, _mask) = (self._data, self._mask) - cls = type(self) - # Make sure the indices are not masked - maskindices = getattr(indices, '_mask', nomask) - if maskindices is not nomask: - indices = indices.filled(0) - # Get the data - if out is None: - out = _data.take(indices, axis=axis, mode=mode).view(cls) - else: - np.take(_data, indices, axis=axis, mode=mode, out=out) - # Get the mask - if isinstance(out, MaskedArray): - if _mask is nomask: - outmask = maskindices - else: - outmask = _mask.take(indices, axis=axis, mode=mode) - outmask |= maskindices - out.__setmask__(outmask) - return out - - - # Array methods --------------------------------------- - copy = _arraymethod('copy') - diagonal = _arraymethod('diagonal') - transpose = _arraymethod('transpose') - T = property(fget=lambda self:self.transpose()) - swapaxes = _arraymethod('swapaxes') - clip = _arraymethod('clip', onmask=False) - copy = _arraymethod('copy') - squeeze = _arraymethod('squeeze') - #-------------------------------------------- - def tolist(self, fill_value=None): - """ - Return the data portion of the masked array as a hierarchical Python list. - - Data items are converted to the nearest compatible Python type. - Masked values are converted to `fill_value`. If `fill_value` is None, - the corresponding entries in the output list will be ``None``. - - Parameters - ---------- - fill_value : scalar, optional - The value to use for invalid entries. Default is None. - - Returns - ------- - result : list - The Python list representation of the masked array. - - Examples - -------- - >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) - >>> x.tolist() - [[1, None, 3], [None, 5, None], [7, None, 9]] - >>> x.tolist(-999) - [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] - - """ - _mask = self._mask - # No mask ? Just return .data.tolist ? - if _mask is nomask: - return self._data.tolist() - # Explicit fill_value: fill the array and get the list - if fill_value is not None: - return self.filled(fill_value).tolist() - # Structured array ............. - names = self.dtype.names - if names: - result = self._data.astype([(_, object) for _ in names]) - for n in names: - result[n][_mask[n]] = None - return result.tolist() - # Standard arrays ............... - if _mask is nomask: - return [None] - # Set temps to save time when dealing w/ marrays... - inishape = self.shape - result = np.array(self._data.ravel(), dtype=object) - result[_mask.ravel()] = None - result.shape = inishape - return result.tolist() -# if fill_value is not None: -# return self.filled(fill_value).tolist() -# result = self.filled().tolist() -# # Set temps to save time when dealing w/ mrecarrays... -# _mask = self._mask -# if _mask is nomask: -# return result -# nbdims = self.ndim -# dtypesize = len(self.dtype) -# if nbdims == 0: -# return tuple([None] * dtypesize) -# elif nbdims == 1: -# maskedidx = _mask.nonzero()[0].tolist() -# if dtypesize: -# nodata = tuple([None] * dtypesize) -# else: -# nodata = None -# [operator.setitem(result, i, nodata) for i in maskedidx] -# else: -# for idx in zip(*[i.tolist() for i in _mask.nonzero()]): -# tmp = result -# for i in idx[:-1]: -# tmp = tmp[i] -# tmp[idx[-1]] = None -# return result - #........................ - def tostring(self, fill_value=None, order='C'): - """ - Return the array data as a string containing the raw bytes in the array. - - The array is filled with a fill value before the string conversion. - - Parameters - ---------- - fill_value : scalar, optional - Value used to fill in the masked values. Deafult is None, in which - case `MaskedArray.fill_value` is used. - order : {'C','F','A'}, optional - Order of the data item in the copy. Default is 'C'. - - - 'C' -- C order (row major). - - 'F' -- Fortran order (column major). - - 'A' -- Any, current order of array. - - None -- Same as 'A'. - - See Also - -------- - ndarray.tostring - tolist, tofile - - Notes - ----- - As for `ndarray.tostring`, information about the shape, dtype, etc., - but also about `fill_value`, will be lost. - - Examples - -------- - >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.tostring() - '\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00' - - """ - return self.filled(fill_value).tostring(order=order) - #........................ - def tofile(self, fid, sep="", format="%s"): - """ - Save a masked array to a file in binary format. - - .. warning:: - This function is not implemented yet. - - Raises - ------ - NotImplementedError - When `tofile` is called. - - """ - raise NotImplementedError("Not implemented yet, sorry...") - - def toflex(self): - """ - Transforms a masked array into a flexible-type array. - - The flexible type array that is returned will have two fields: - - * the ``_data`` field stores the ``_data`` part of the array. - * the ``_mask`` field stores the ``_mask`` part of the array. - - Parameters - ---------- - None - - Returns - ------- - record : ndarray - A new flexible-type `ndarray` with two fields: the first element - containing a value, the second element containing the corresponding - mask boolean. The returned record shape matches self.shape. - - Notes - ----- - A side-effect of transforming a masked array into a flexible `ndarray` is - that meta information (``fill_value``, ...) will be lost. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print x.toflex() - [[(1, False) (2, True) (3, False)] - [(4, True) (5, False) (6, True)] - [(7, False) (8, True) (9, False)]] - - """ - # Get the basic dtype .... - ddtype = self.dtype - # Make sure we have a mask - _mask = self._mask - if _mask is None: - _mask = make_mask_none(self.shape, ddtype) - # And get its dtype - mdtype = self._mask.dtype - # - record = np.ndarray(shape=self.shape, - dtype=[('_data', ddtype), ('_mask', mdtype)]) - record['_data'] = self._data - record['_mask'] = self._mask - return record - torecords = toflex - #-------------------------------------------- - # Pickling - def __getstate__(self): - """Return the internal state of the masked array, for pickling - purposes. - - """ - cf = 'CF'[self.flags.fnc] - state = (1, - self.shape, - self.dtype, - self.flags.fnc, - self._data.tostring(cf), - #self._data.tolist(), - getmaskarray(self).tostring(cf), - #getmaskarray(self).tolist(), - self._fill_value, - ) - return state - # - def __setstate__(self, state): - """Restore the internal state of the masked array, for - pickling purposes. ``state`` is typically the output of the - ``__getstate__`` output, and is a 5-tuple: - - - class name - - a tuple giving the shape of the data - - a typecode for the data - - a binary string for the data - - a binary string for the mask. - - """ - (_, shp, typ, isf, raw, msk, flv) = state - ndarray.__setstate__(self, (shp, typ, isf, raw)) - self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) - self.fill_value = flv - # - def __reduce__(self): - """Return a 3-tuple for pickling a MaskedArray. - - """ - return (_mareconstruct, - (self.__class__, self._baseclass, (0,), 'b',), - self.__getstate__()) - # - def __deepcopy__(self, memo=None): - from copy import deepcopy - copied = MaskedArray.__new__(type(self), self, copy=True) - if memo is None: - memo = {} - memo[id(self)] = copied - for (k, v) in self.__dict__.iteritems(): - copied.__dict__[k] = deepcopy(v, memo) - return copied - - -def _mareconstruct(subtype, baseclass, baseshape, basetype,): - """Internal function that builds a new MaskedArray from the - information stored in a pickle. - - """ - _data = ndarray.__new__(baseclass, baseshape, basetype) - _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) - return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) - - - - - - -class mvoid(MaskedArray): - """ - Fake a 'void' object to use for masked array with structured dtypes. - """ - # - def __new__(self, data, mask=nomask, dtype=None, fill_value=None): - dtype = dtype or data.dtype - _data = ndarray((), dtype=dtype) - _data[()] = data - _data = _data.view(self) - if mask is not nomask: - if isinstance(mask, np.void): - _data._mask = mask - else: - try: - # Mask is already a 0D array - _data._mask = np.void(mask) - except TypeError: - # Transform the mask to a void - mdtype = make_mask_descr(dtype) - _data._mask = np.array(mask, dtype=mdtype)[()] - if fill_value is not None: - _data.fill_value = fill_value - return _data - - def _get_data(self): - # Make sure that the _data part is a np.void - return self.view(ndarray)[()] - _data = property(fget=_get_data) - - def __getitem__(self, indx): - "Get the index..." - m = self._mask - if m is not nomask and m[indx]: - return masked - return self._data[indx] - - def __setitem__(self, indx, value): - self._data[indx] = value - self._mask[indx] |= getattr(value, "_mask", False) - - def __str__(self): - m = self._mask - if (m is nomask): - return self._data.__str__() - m = tuple(m) - if (not any(m)): - return self._data.__str__() - r = self._data.tolist() - p = masked_print_option - if not p.enabled(): - p = 'N/A' - else: - p = str(p) - r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)] - return "(%s)" % ", ".join(r) - - def __repr__(self): - m = self._mask - if (m is nomask): - return self._data.__repr__() - m = tuple(m) - if not any(m): - return self._data.__repr__() - p = masked_print_option - if not p.enabled(): - return self.filled(self.fill_value).__repr__() - p = str(p) - r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)] - return "(%s)" % ", ".join(r) - - def __iter__(self): - "Defines an iterator for mvoid" - (_data, _mask) = (self._data, self._mask) - if _mask is nomask: - for d in _data: - yield d - else: - for (d, m) in zip(_data, _mask): - if m: - yield masked - else: - yield d - - def filled(self, fill_value=None): - """ - Return a copy with masked fields filled with a given value. - - Parameters - ---------- - fill_value : scalar, optional - The value to use for invalid entries (None by default). - If None, the `fill_value` attribute is used instead. - - Returns - ------- - filled_void: - A `np.void` object - - See Also - -------- - MaskedArray.filled - - """ - return asarray(self).filled(fill_value)[()] - - def tolist(self): - """ - Transforms the mvoid object into a tuple. - - Masked fields are replaced by None. - - Returns - ------- - returned_tuple - Tuple of fields - """ - _mask = self._mask - if _mask is nomask: - return self._data.tolist() - result = [] - for (d, m) in zip(self._data, self._mask): - if m: - result.append(None) - else: - # .item() makes sure we return a standard Python object - result.append(d.item()) - return tuple(result) - - - -#####-------------------------------------------------------------------------- -#---- --- Shortcuts --- -#####--------------------------------------------------------------------------- -def isMaskedArray(x): - """ - Test whether input is an instance of MaskedArray. - - This function returns True if `x` is an instance of MaskedArray - and returns False otherwise. Any object is accepted as input. - - Parameters - ---------- - x : object - Object to test. - - Returns - ------- - result : bool - True if `x` is a MaskedArray. - - See Also - -------- - isMA : Alias to isMaskedArray. - isarray : Alias to isMaskedArray. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.eye(3, 3) - >>> a - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - >>> m = ma.masked_values(a, 0) - >>> m - masked_array(data = - [[1.0 -- --] - [-- 1.0 --] - [-- -- 1.0]], - mask = - [[False True True] - [ True False True] - [ True True False]], - fill_value=0.0) - >>> ma.isMaskedArray(a) - False - >>> ma.isMaskedArray(m) - True - >>> ma.isMaskedArray([0, 1, 2]) - False - - """ - return isinstance(x, MaskedArray) -isarray = isMaskedArray -isMA = isMaskedArray #backward compatibility - -# We define the masked singleton as a float for higher precedence... -# Note that it can be tricky sometimes w/ type comparison - -class MaskedConstant(MaskedArray): - # - _data = data = np.array(0.) - _mask = mask = np.array(True) - _baseclass = ndarray - # - def __new__(self): - return self._data.view(self) - # - def __array_finalize__(self, obj): - return - # - def __array_wrap__(self, obj): - return self - # - def __str__(self): - return str(masked_print_option._display) - # - def __repr__(self): - return 'masked' - # - def flatten(self): - return masked_array([self._data], dtype=float, mask=[True]) - -masked = masked_singleton = MaskedConstant() - - - -masked_array = MaskedArray - -def array(data, dtype=None, copy=False, order=False, - mask=nomask, fill_value=None, - keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0, - ): - """array(data, dtype=None, copy=False, order=False, mask=nomask, - fill_value=None, keep_mask=True, hard_mask=False, shrink=True, - subok=True, ndmin=0) - - Acts as shortcut to MaskedArray, with options in a different order - for convenience. And backwards compatibility... - - """ - #!!!: we should try to put 'order' somwehere - return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, - keep_mask=keep_mask, hard_mask=hard_mask, - fill_value=fill_value, ndmin=ndmin, shrink=shrink) -array.__doc__ = masked_array.__doc__ - -def is_masked(x): - """ - Determine whether input has masked values. - - Accepts any object as input, but always returns False unless the - input is a MaskedArray containing masked values. - - Parameters - ---------- - x : array_like - Array to check for masked values. - - Returns - ------- - result : bool - True if `x` is a MaskedArray with masked values, False otherwise. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) - >>> x - masked_array(data = [-- 1 -- 2 3], - mask = [ True False True False False], - fill_value=999999) - >>> ma.is_masked(x) - True - >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) - >>> x - masked_array(data = [0 1 0 2 3], - mask = False, - fill_value=999999) - >>> ma.is_masked(x) - False - - Always returns False if `x` isn't a MaskedArray. - - >>> x = [False, True, False] - >>> ma.is_masked(x) - False - >>> x = 'a string' - >>> ma.is_masked(x) - False - - """ - m = getmask(x) - if m is nomask: - return False - elif m.any(): - return True - return False - - -#####--------------------------------------------------------------------------- -#---- --- Extrema functions --- -#####--------------------------------------------------------------------------- -class _extrema_operation(object): - """ - Generic class for maximum/minimum functions. - - .. note:: - This is the base class for `_maximum_operation` and - `_minimum_operation`. - - """ - def __call__(self, a, b=None): - "Executes the call behavior." - if b is None: - return self.reduce(a) - return where(self.compare(a, b), a, b) - #......... - def reduce(self, target, axis=None): - "Reduce target along the given axis." - target = narray(target, copy=False, subok=True) - m = getmask(target) - if axis is not None: - kargs = { 'axis' : axis } - else: - kargs = {} - target = target.ravel() - if not (m is nomask): - m = m.ravel() - if m is nomask: - t = self.ufunc.reduce(target, **kargs) - else: - target = target.filled(self.fill_value_func(target)).view(type(target)) - t = self.ufunc.reduce(target, **kargs) - m = umath.logical_and.reduce(m, **kargs) - if hasattr(t, '_mask'): - t._mask = m - elif m: - t = masked - return t - #......... - def outer (self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - result = self.ufunc.outer(filled(a), filled(b)) - if not isinstance(result, MaskedArray): - result = result.view(MaskedArray) - result._mask = m - return result - -#............................ -class _minimum_operation(_extrema_operation): - "Object to calculate minima" - def __init__ (self): - """minimum(a, b) or minimum(a) -In one argument case, returns the scalar minimum. - """ - self.ufunc = umath.minimum - self.afunc = amin - self.compare = less - self.fill_value_func = minimum_fill_value - -#............................ -class _maximum_operation(_extrema_operation): - "Object to calculate maxima" - def __init__ (self): - """maximum(a, b) or maximum(a) - In one argument case returns the scalar maximum. - """ - self.ufunc = umath.maximum - self.afunc = amax - self.compare = greater - self.fill_value_func = maximum_fill_value - -#.......................................................... -def min(obj, axis=None, out=None, fill_value=None): - try: - return obj.min(axis=axis, fill_value=fill_value, out=out) - except (AttributeError, TypeError): - # If obj doesn't have a max method, - # ...or if the method doesn't accept a fill_value argument - return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out) -min.__doc__ = MaskedArray.min.__doc__ - -def max(obj, axis=None, out=None, fill_value=None): - try: - return obj.max(axis=axis, fill_value=fill_value, out=out) - except (AttributeError, TypeError): - # If obj doesn't have a max method, - # ...or if the method doesn't accept a fill_value argument - return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out) -max.__doc__ = MaskedArray.max.__doc__ - -def ptp(obj, axis=None, out=None, fill_value=None): - """a.ptp(axis=None) = a.max(axis)-a.min(axis)""" - try: - return obj.ptp(axis, out=out, fill_value=fill_value) - except (AttributeError, TypeError): - # If obj doesn't have a max method, - # ...or if the method doesn't accept a fill_value argument - return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out) -ptp.__doc__ = MaskedArray.ptp.__doc__ - - -#####--------------------------------------------------------------------------- -#---- --- Definition of functions from the corresponding methods --- -#####--------------------------------------------------------------------------- -class _frommethod: - """ - Define functions from existing MaskedArray methods. - - Parameters - ---------- - methodname : str - Name of the method to transform. - - """ - def __init__(self, methodname): - self.__name__ = methodname - self.__doc__ = self.getdoc() - # - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - meth = getattr(MaskedArray, self.__name__, None) or\ - getattr(np, self.__name__, None) - signature = self.__name__ + get_object_signature(meth) - if meth is not None: - doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None)) - return doc - # - def __call__(self, a, *args, **params): - # Get the method from the array (if possible) - method_name = self.__name__ - method = getattr(a, method_name, None) - if method is not None: - return method(*args, **params) - # Still here ? Then a is not a MaskedArray - method = getattr(MaskedArray, method_name, None) - if method is not None: - return method(MaskedArray(a), *args, **params) - # Still here ? OK, let's call the corresponding np function - method = getattr(np, method_name) - return method(a, *args, **params) - -all = _frommethod('all') -anomalies = anom = _frommethod('anom') -any = _frommethod('any') -compress = _frommethod('compress') -cumprod = _frommethod('cumprod') -cumsum = _frommethod('cumsum') -copy = _frommethod('copy') -diagonal = _frommethod('diagonal') -harden_mask = _frommethod('harden_mask') -ids = _frommethod('ids') -maximum = _maximum_operation() -mean = _frommethod('mean') -minimum = _minimum_operation() -nonzero = _frommethod('nonzero') -prod = _frommethod('prod') -product = _frommethod('prod') -ravel = _frommethod('ravel') -repeat = _frommethod('repeat') -shrink_mask = _frommethod('shrink_mask') -soften_mask = _frommethod('soften_mask') -std = _frommethod('std') -sum = _frommethod('sum') -swapaxes = _frommethod('swapaxes') -#take = _frommethod('take') -trace = _frommethod('trace') -var = _frommethod('var') - -def take(a, indices, axis=None, out=None, mode='raise'): - """ - """ - a = masked_array(a) - return a.take(indices, axis=axis, out=out, mode=mode) - - -#.............................................................................. -def power(a, b, third=None): - """ - Returns element-wise base array raised to power from second array. - - This is the masked array version of `numpy.power`. For details see - `numpy.power`. - - See Also - -------- - numpy.power - - Notes - ----- - The *out* argument to `numpy.power` is not supported, `third` has to be - None. - - """ - if third is not None: - raise MaskError, "3-argument power not supported." - # Get the masks - ma = getmask(a) - mb = getmask(b) - m = mask_or(ma, mb) - # Get the rawdata - fa = getdata(a) - fb = getdata(b) - # Get the type of the result (so that we preserve subclasses) - if isinstance(a, MaskedArray): - basetype = type(a) - else: - basetype = MaskedArray - # Get the result and view it as a (subclass of) MaskedArray - err_status = np.geterr() - try: - np.seterr(divide='ignore', invalid='ignore') - result = np.where(m, fa, umath.power(fa, fb)).view(basetype) - finally: - np.seterr(**err_status) - result._update_from(a) - # Find where we're in trouble w/ NaNs and Infs - invalid = np.logical_not(np.isfinite(result.view(ndarray))) - # Add the initial mask - if m is not nomask: - if not (result.ndim): - return masked - result._mask = np.logical_or(m, invalid) - # Fix the invalid parts - if invalid.any(): - if not result.ndim: - return masked - elif result._mask is nomask: - result._mask = invalid - result._data[invalid] = result.fill_value - return result - -# if fb.dtype.char in typecodes["Integer"]: -# return masked_array(umath.power(fa, fb), m) -# m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) -# if m is nomask: -# return masked_array(umath.power(fa, fb)) -# else: -# fa = fa.copy() -# if m.all(): -# fa.flat = 1 -# else: -# np.putmask(fa,m,1) -# return masked_array(umath.power(fa, fb), m) - -#.............................................................................. -def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): - "Function version of the eponymous method." - if fill_value is None: - fill_value = default_fill_value(a) - d = filled(a, fill_value) - if axis is None: - return d.argsort(kind=kind, order=order) - return d.argsort(axis, kind=kind, order=order) -argsort.__doc__ = MaskedArray.argsort.__doc__ - -def argmin(a, axis=None, fill_value=None): - "Function version of the eponymous method." - if fill_value is None: - fill_value = default_fill_value(a) - d = filled(a, fill_value) - return d.argmin(axis=axis) -argmin.__doc__ = MaskedArray.argmin.__doc__ - -def argmax(a, axis=None, fill_value=None): - "Function version of the eponymous method." - if fill_value is None: - fill_value = default_fill_value(a) - try: - fill_value = -fill_value - except: - pass - d = filled(a, fill_value) - return d.argmax(axis=axis) -argmin.__doc__ = MaskedArray.argmax.__doc__ - -def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None): - "Function version of the eponymous method." - a = narray(a, copy=True, subok=True) - if axis is None: - a = a.flatten() - axis = 0 - if fill_value is None: - if endwith: - filler = minimum_fill_value(a) - else: - filler = maximum_fill_value(a) - else: - filler = fill_value -# return - indx = np.indices(a.shape).tolist() - indx[axis] = filled(a, filler).argsort(axis=axis, kind=kind, order=order) - return a[indx] -sort.__doc__ = MaskedArray.sort.__doc__ - - -def compressed(x): - """ - Return all the non-masked data as a 1-D array. - - This function is equivalent to calling the "compressed" method of a - `MaskedArray`, see `MaskedArray.compressed` for details. - - See Also - -------- - MaskedArray.compressed - Equivalent method. - - """ - if getmask(x) is nomask: - return np.asanyarray(x) - else: - return x.compressed() - -def concatenate(arrays, axis=0): - """ - Concatenate a sequence of arrays along the given axis. - - Parameters - ---------- - arrays : sequence of array_like - The arrays must have the same shape, except in the dimension - corresponding to `axis` (the first, by default). - axis : int, optional - The axis along which the arrays will be joined. Default is 0. - - Returns - ------- - result : MaskedArray - The concatenated array with any masked entries preserved. - - See Also - -------- - numpy.concatenate : Equivalent function in the top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.arange(3) - >>> a[1] = ma.masked - >>> b = ma.arange(2, 5) - >>> a - masked_array(data = [0 -- 2], - mask = [False True False], - fill_value = 999999) - >>> b - masked_array(data = [2 3 4], - mask = False, - fill_value = 999999) - >>> ma.concatenate([a, b]) - masked_array(data = [0 -- 2 2 3 4], - mask = [False True False False False False], - fill_value = 999999) - - """ - d = np.concatenate([getdata(a) for a in arrays], axis) - rcls = get_masked_subclass(*arrays) - data = d.view(rcls) - # Check whether one of the arrays has a non-empty mask... - for x in arrays: - if getmask(x) is not nomask: - break - else: - return data - # OK, so we have to concatenate the masks - dm = np.concatenate([getmaskarray(a) for a in arrays], axis) - # If we decide to keep a '_shrinkmask' option, we want to check that ... - # ... all of them are True, and then check for dm.any() -# shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays]) -# if shrink and not dm.any(): - if not dm.dtype.fields and not dm.any(): - data._mask = nomask - else: - data._mask = dm.reshape(d.shape) - return data - -def count(a, axis=None): - if isinstance(a, MaskedArray): - return a.count(axis) - return masked_array(a, copy=False).count(axis) -count.__doc__ = MaskedArray.count.__doc__ - - -def diag(v, k=0): - """ - Extract a diagonal or construct a diagonal array. - - This function is the equivalent of `numpy.diag` that takes masked - values into account, see `numpy.diag` for details. - - See Also - -------- - numpy.diag : Equivalent function for ndarrays. - - """ - output = np.diag(v, k).view(MaskedArray) - if getmask(v) is not nomask: - output._mask = np.diag(v._mask, k) - return output - - -def expand_dims(x, axis): - """ - Expand the shape of an array. - - Expands the shape of the array by including a new axis before the one - specified by the `axis` parameter. This function behaves the same as - `numpy.expand_dims` but preserves masked elements. - - See Also - -------- - numpy.expand_dims : Equivalent function in top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.array([1, 2, 4]) - >>> x[1] = ma.masked - >>> x - masked_array(data = [1 -- 4], - mask = [False True False], - fill_value = 999999) - >>> np.expand_dims(x, axis=0) - array([[1, 2, 4]]) - >>> ma.expand_dims(x, axis=0) - masked_array(data = - [[1 -- 4]], - mask = - [[False True False]], - fill_value = 999999) - - The same result can be achieved using slicing syntax with `np.newaxis`. - - >>> x[np.newaxis, :] - masked_array(data = - [[1 -- 4]], - mask = - [[False True False]], - fill_value = 999999) - - """ - result = n_expand_dims(x, axis) - if isinstance(x, MaskedArray): - new_shape = result.shape - result = x.view() - result.shape = new_shape - if result._mask is not nomask: - result._mask.shape = new_shape - return result - -#...................................... -def left_shift (a, n): - """ - Shift the bits of an integer to the left. - - This is the masked array version of `numpy.left_shift`, for details - see that function. - - See Also - -------- - numpy.left_shift - - """ - m = getmask(a) - if m is nomask: - d = umath.left_shift(filled(a), n) - return masked_array(d) - else: - d = umath.left_shift(filled(a, 0), n) - return masked_array(d, mask=m) - -def right_shift (a, n): - """ - Shift the bits of an integer to the right. - - This is the masked array version of `numpy.right_shift`, for details - see that function. - - See Also - -------- - numpy.right_shift - - """ - m = getmask(a) - if m is nomask: - d = umath.right_shift(filled(a), n) - return masked_array(d) - else: - d = umath.right_shift(filled(a, 0), n) - return masked_array(d, mask=m) - -#...................................... -def put(a, indices, values, mode='raise'): - """ - Set storage-indexed locations to corresponding values. - - This function is equivalent to `MaskedArray.put`, see that method - for details. - - See Also - -------- - MaskedArray.put - - """ - # We can't use 'frommethod', the order of arguments is different - try: - return a.put(indices, values, mode=mode) - except AttributeError: - return narray(a, copy=False).put(indices, values, mode=mode) - -def putmask(a, mask, values): #, mode='raise'): - """ - Changes elements of an array based on conditional and input values. - - This is the masked array version of `numpy.putmask`, for details see - `numpy.putmask`. - - See Also - -------- - numpy.putmask - - Notes - ----- - Using a masked array as `values` will **not** transform a `ndarray` into - a `MaskedArray`. - - """ - # We can't use 'frommethod', the order of arguments is different - if not isinstance(a, MaskedArray): - a = a.view(MaskedArray) - (valdata, valmask) = (getdata(values), getmask(values)) - if getmask(a) is nomask: - if valmask is not nomask: - a._sharedmask = True - a._mask = make_mask_none(a.shape, a.dtype) - np.putmask(a._mask, mask, valmask) - elif a._hardmask: - if valmask is not nomask: - m = a._mask.copy() - np.putmask(m, mask, valmask) - a.mask |= m - else: - if valmask is nomask: - valmask = getmaskarray(values) - np.putmask(a._mask, mask, valmask) - np.putmask(a._data, mask, valdata) - return - -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - This function is exactly equivalent to `numpy.transpose`. - - See Also - -------- - numpy.transpose : Equivalent function in top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.arange(4).reshape((2,2)) - >>> x[1, 1] = ma.masked - >>>> x - masked_array(data = - [[0 1] - [2 --]], - mask = - [[False False] - [False True]], - fill_value = 999999) - >>> ma.transpose(x) - masked_array(data = - [[0 2] - [1 --]], - mask = - [[False False] - [False True]], - fill_value = 999999) - - """ - #We can't use 'frommethod', as 'transpose' doesn't take keywords - try: - return a.transpose(axes) - except AttributeError: - return narray(a, copy=False).transpose(axes).view(MaskedArray) - -def reshape(a, new_shape, order='C'): - """ - Returns an array containing the same data with a new shape. - - Refer to `MaskedArray.reshape` for full documentation. - - See Also - -------- - MaskedArray.reshape : equivalent function - - """ - #We can't use 'frommethod', it whine about some parameters. Dmmit. - try: - return a.reshape(new_shape, order=order) - except AttributeError: - _tmp = narray(a, copy=False).reshape(new_shape, order=order) - return _tmp.view(MaskedArray) - -def resize(x, new_shape): - """ - Return a new masked array with the specified size and shape. - - This is the masked equivalent of the `numpy.resize` function. The new - array is filled with repeated copies of `x` (in the order that the - data are stored in memory). If `x` is masked, the new array will be - masked, and the new mask will be a repetition of the old one. - - See Also - -------- - numpy.resize : Equivalent function in the top level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.array([[1, 2] ,[3, 4]]) - >>> a[0, 1] = ma.masked - >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value = 999999) - >>> np.resize(a, (3, 3)) - array([[1, 2, 3], - [4, 1, 2], - [3, 4, 1]]) - >>> ma.resize(a, (3, 3)) - masked_array(data = - [[1 -- 3] - [4 1 --] - [3 4 1]], - mask = - [[False True False] - [False False True] - [False False False]], - fill_value = 999999) - - A MaskedArray is always returned, regardless of the input type. - - >>> a = np.array([[1, 2] ,[3, 4]]) - >>> ma.resize(a, (3, 3)) - masked_array(data = - [[1 2 3] - [4 1 2] - [3 4 1]], - mask = - False, - fill_value = 999999) - - """ - # We can't use _frommethods here, as N.resize is notoriously whiny. - m = getmask(x) - if m is not nomask: - m = np.resize(m, new_shape) - result = np.resize(x, new_shape).view(get_masked_subclass(x)) - if result.ndim: - result._mask = m - return result - - -#................................................ -def rank(obj): - "maskedarray version of the numpy function." - return np.rank(getdata(obj)) -rank.__doc__ = np.rank.__doc__ -# -def shape(obj): - "maskedarray version of the numpy function." - return np.shape(getdata(obj)) -shape.__doc__ = np.shape.__doc__ -# -def size(obj, axis=None): - "maskedarray version of the numpy function." - return np.size(getdata(obj), axis) -size.__doc__ = np.size.__doc__ -#................................................ - -#####-------------------------------------------------------------------------- -#---- --- Extra functions --- -#####-------------------------------------------------------------------------- -def where (condition, x=None, y=None): - """ - Return a masked array with elements from x or y, depending on condition. - - Returns a masked array, shaped like condition, where the elements - are from `x` when `condition` is True, and from `y` otherwise. - If neither `x` nor `y` are given, the function returns a tuple of - indices where `condition` is True (the result of - ``condition.nonzero()``). - - Parameters - ---------- - condition : array_like, bool - The condition to meet. For each True element, yield the corresponding - element from `x`, otherwise from `y`. - x, y : array_like, optional - Values from which to choose. `x` and `y` need to have the same shape - as condition, or be broadcast-able to that shape. - - Returns - ------- - out : MaskedArray or tuple of ndarrays - The resulting masked array if `x` and `y` were given, otherwise - the result of ``condition.nonzero()``. - - See Also - -------- - numpy.where : Equivalent function in the top-level NumPy module. - - Examples - -------- - >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], - ... [1, 0, 1], - ... [0, 1, 0]]) - >>> print x - [[0.0 -- 2.0] - [-- 4.0 --] - [6.0 -- 8.0]] - >>> np.ma.where(x > 5) # return the indices where x > 5 - (array([2, 2]), array([0, 2])) - - >>> print np.ma.where(x > 5, x, -3.1416) - [[-3.1416 -- -3.1416] - [-- -3.1416 --] - [6.0 -- 8.0]] - - """ - if x is None and y is None: - return filled(condition, 0).nonzero() - elif x is None or y is None: - raise ValueError, "Either both or neither x and y should be given." - # Get the condition ............... - fc = filled(condition, 0).astype(MaskType) - notfc = np.logical_not(fc) - # Get the data ...................................... - xv = getdata(x) - yv = getdata(y) - if x is masked: - ndtype = yv.dtype - elif y is masked: - ndtype = xv.dtype - else: - ndtype = np.find_common_type([xv.dtype, yv.dtype], []) - # Construct an empty array and fill it - d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray) - _data = d._data - np.putmask(_data, fc, xv.astype(ndtype)) - np.putmask(_data, notfc, yv.astype(ndtype)) - # Create an empty mask and fill it - _mask = d._mask = np.zeros(fc.shape, dtype=MaskType) - np.putmask(_mask, fc, getmask(x)) - np.putmask(_mask, notfc, getmask(y)) - _mask |= getmaskarray(condition) - if not _mask.any(): - d._mask = nomask - return d - -def choose (indices, choices, out=None, mode='raise'): - """ - Use an index array to construct a new array from a set of choices. - - Given an array of integers and a set of n choice arrays, this method - will create a new array that merges each of the choice arrays. Where a - value in `a` is i, the new array will have the value that choices[i] - contains in the same place. - - Parameters - ---------- - a : ndarray of ints - This array must contain integers in ``[0, n-1]``, where n is the - number of choices. - choices : sequence of arrays - Choice arrays. The index array and all of the choices should be - broadcastable to the same shape. - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and `dtype`. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' : raise an error - * 'wrap' : wrap around - * 'clip' : clip to the range - - Returns - ------- - merged_array : array - - See Also - -------- - choose : equivalent function - - Examples - -------- - >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) - >>> a = np.array([2, 1, 0]) - >>> np.ma.choose(a, choice) - masked_array(data = [3 2 1], - mask = False, - fill_value=999999) - - """ - def fmask (x): - "Returns the filled array, or True if masked." - if x is masked: - return True - return filled(x) - def nmask (x): - "Returns the mask, True if ``masked``, False if ``nomask``." - if x is masked: - return True - return getmask(x) - # Get the indices...... - c = filled(indices, 0) - # Get the masks........ - masks = [nmask(x) for x in choices] - data = [fmask(x) for x in choices] - # Construct the mask - outputmask = np.choose(c, masks, mode=mode) - outputmask = make_mask(mask_or(outputmask, getmask(indices)), - copy=0, shrink=True) - # Get the choices...... - d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(outputmask) - return out - d.__setmask__(outputmask) - return d - - -def round_(a, decimals=0, out=None): - """ - Return a copy of a, rounded to 'decimals' places. - - When 'decimals' is negative, it specifies the number of positions - to the left of the decimal point. The real and imaginary parts of - complex numbers are rounded separately. Nothing is done if the - array is not of float type and 'decimals' is greater than or equal - to 0. - - Parameters - ---------- - decimals : int - Number of decimals to round to. May be negative. - out : array_like - Existing array to use for output. - If not given, returns a default copy of a. - - Notes - ----- - If out is given and does not have a mask attribute, the mask of a - is lost! - - """ - if out is None: - return np.round_(a, decimals, out) - else: - np.round_(getdata(a), decimals, out) - if hasattr(out, '_mask'): - out._mask = getmask(a) - return out -round = round_ - -def inner(a, b): - """ - Returns the inner product of a and b for arrays of floating point types. - - Like the generic NumPy equivalent the product sum is over the last dimension - of a and b. - - Notes - ----- - The first argument is not conjugated. - - """ - fa = filled(a, 0) - fb = filled(b, 0) - if len(fa.shape) == 0: - fa.shape = (1,) - if len(fb.shape) == 0: - fb.shape = (1,) - return np.inner(fa, fb).view(MaskedArray) -inner.__doc__ = doc_note(np.inner.__doc__, - "Masked values are replaced by 0.") -innerproduct = inner - -def outer(a, b): - "maskedarray version of the numpy function." - fa = filled(a, 0).ravel() - fb = filled(b, 0).ravel() - d = np.outer(fa, fb) - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - return masked_array(d) - ma = getmaskarray(a) - mb = getmaskarray(b) - m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0) - return masked_array(d, mask=m) -outer.__doc__ = doc_note(np.outer.__doc__, - "Masked values are replaced by 0.") -outerproduct = outer - -def allequal (a, b, fill_value=True): - """ - Return True if all entries of a and b are equal, using - fill_value as a truth value where either or both are masked. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - fill_value : bool, optional - Whether masked values in a or b are considered equal (True) or not - (False). - - Returns - ------- - y : bool - Returns True if the two arrays are equal within the given - tolerance, False otherwise. If either array contains NaN, - then False is returned. - - See Also - -------- - all, any - numpy.ma.allclose - - Examples - -------- - >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) - >>> a - masked_array(data = [10000000000.0 1e-07 --], - mask = [False False True], - fill_value=1e+20) - - >>> b = array([1e10, 1e-7, -42.0]) - >>> b - array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) - >>> ma.allequal(a, b, fill_value=False) - False - >>> ma.allequal(a, b) - True - - """ - m = mask_or(getmask(a), getmask(b)) - if m is nomask: - x = getdata(a) - y = getdata(b) - d = umath.equal(x, y) - return d.all() - elif fill_value: - x = getdata(a) - y = getdata(b) - d = umath.equal(x, y) - dm = array(d, mask=m, copy=False) - return dm.filled(True).all(None) - else: - return False - -def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8): - """ - Returns True if two arrays are element-wise equal within a tolerance. - - This function is equivalent to `allclose` except that masked values - are treated as equal (default) or unequal, depending on the `masked_equal` - argument. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - masked_equal : bool, optional - Whether masked values in `a` and `b` are considered equal (True) or not - (False). They are considered equal by default. - rtol : float, optional - Relative tolerance. The relative difference is equal to ``rtol * b``. - Default is 1e-5. - atol : float, optional - Absolute tolerance. The absolute difference is equal to `atol`. - Default is 1e-8. - - Returns - ------- - y : bool - Returns True if the two arrays are equal within the given - tolerance, False otherwise. If either array contains NaN, then - False is returned. - - See Also - -------- - all, any - numpy.allclose : the non-masked `allclose`. - - Notes - ----- - If the following equation is element-wise True, then `allclose` returns - True:: - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - Return True if all elements of `a` and `b` are equal subject to - given tolerances. - - Examples - -------- - >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) - >>> a - masked_array(data = [10000000000.0 1e-07 --], - mask = [False False True], - fill_value = 1e+20) - >>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) - False - - >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) - >>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) - True - >>> ma.allclose(a, b, masked_equal=False) - False - - Masked values are not compared directly. - - >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) - >>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) - True - >>> ma.allclose(a, b, masked_equal=False) - False - - """ - x = masked_array(a, copy=False) - y = masked_array(b, copy=False) - m = mask_or(getmask(x), getmask(y)) - xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) - # If we have some infs, they should fall at the same place. - if not np.all(xinf == filled(np.isinf(y), False)): - return False - # No infs at all - if not np.any(xinf): - d = filled(umath.less_equal(umath.absolute(x - y), - atol + rtol * umath.absolute(y)), - masked_equal) - return np.all(d) - if not np.all(filled(x[xinf] == y[xinf], masked_equal)): - return False - x = x[~xinf] - y = y[~xinf] - d = filled(umath.less_equal(umath.absolute(x - y), - atol + rtol * umath.absolute(y)), - masked_equal) - return np.all(d) - -#.............................................................................. -def asarray(a, dtype=None, order=None): - """ - Convert the input to a masked array of the given data-type. - - No copy is performed if the input is already an `ndarray`. If `a` is - a subclass of `MaskedArray`, a base class `MaskedArray` is returned. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to a masked array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists, ndarrays and masked arrays. - dtype : dtype, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. - - Returns - ------- - out : MaskedArray - Masked array interpretation of `a`. - - See Also - -------- - asanyarray : Similar to `asarray`, but conserves subclasses. - - Examples - -------- - >>> x = np.arange(10.).reshape(2, 5) - >>> x - array([[ 0., 1., 2., 3., 4.], - [ 5., 6., 7., 8., 9.]]) - >>> np.ma.asarray(x) - masked_array(data = - [[ 0. 1. 2. 3. 4.] - [ 5. 6. 7. 8. 9.]], - mask = - False, - fill_value = 1e+20) - >>> type(np.ma.asarray(x)) - - - """ - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False) - -def asanyarray(a, dtype=None): - """ - Convert the input to a masked array, conserving subclasses. - - If `a` is a subclass of `MaskedArray`, its class is conserved. - No copy is performed if the input is already an `ndarray`. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. - dtype : dtype, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. - - Returns - ------- - out : MaskedArray - MaskedArray interpretation of `a`. - - See Also - -------- - asarray : Similar to `asanyarray`, but does not conserve subclass. - - Examples - -------- - >>> x = np.arange(10.).reshape(2, 5) - >>> x - array([[ 0., 1., 2., 3., 4.], - [ 5., 6., 7., 8., 9.]]) - >>> np.ma.asanyarray(x) - masked_array(data = - [[ 0. 1. 2. 3. 4.] - [ 5. 6. 7. 8. 9.]], - mask = - False, - fill_value = 1e+20) - >>> type(np.ma.asanyarray(x)) - - - """ - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) - - -#####-------------------------------------------------------------------------- -#---- --- Pickling --- -#####-------------------------------------------------------------------------- -def dump(a, F): - """ - Pickle a masked array to a file. - - This is a wrapper around ``cPickle.dump``. - - Parameters - ---------- - a : MaskedArray - The array to be pickled. - F : str or file-like object - The file to pickle `a` to. If a string, the full path to the file. - - """ - if not hasattr(F, 'readline'): - F = open(F, 'w') - return cPickle.dump(a, F) - -def dumps(a): - """ - Return a string corresponding to the pickling of a masked array. - - This is a wrapper around ``cPickle.dumps``. - - Parameters - ---------- - a : MaskedArray - The array for which the string representation of the pickle is - returned. - - """ - return cPickle.dumps(a) - -def load(F): - """ - Wrapper around ``cPickle.load`` which accepts either a file-like object - or a filename. - - Parameters - ---------- - F : str or file - The file or file name to load. - - See Also - -------- - dump : Pickle an array - - Notes - ----- - This is different from `numpy.load`, which does not use cPickle but loads - the NumPy binary .npy format. - - """ - if not hasattr(F, 'readline'): - F = open(F, 'r') - return cPickle.load(F) - -def loads(strg): - """ - Load a pickle from the current string. - - The result of ``cPickle.loads(strg)`` is returned. - - Parameters - ---------- - strg : str - The string to load. - - See Also - -------- - dumps : Return a string corresponding to the pickling of a masked array. - - """ - return cPickle.loads(strg) - -################################################################################ -def fromfile(file, dtype=float, count= -1, sep=''): - raise NotImplementedError("Not yet implemented. Sorry") - - -def fromflex(fxarray): - """ - Build a masked array from a suitable flexible-type array. - - The input array has to have a data-type with ``_data`` and ``_mask`` - fields. This type of array is output by `MaskedArray.toflex`. - - Parameters - ---------- - fxarray : ndarray - The structured input array, containing ``_data`` and ``_mask`` - fields. If present, other fields are discarded. - - Returns - ------- - result : MaskedArray - The constructed masked array. - - See Also - -------- - MaskedArray.toflex : Build a flexible-type array from a masked array. - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) - >>> rec = x.toflex() - >>> rec - array([[(0, False), (1, True), (2, False)], - [(3, True), (4, False), (5, True)], - [(6, False), (7, True), (8, False)]], - dtype=[('_data', '>> x2 = np.ma.fromflex(rec) - >>> x2 - masked_array(data = - [[0 -- 2] - [-- 4 --] - [6 -- 8]], - mask = - [[False True False] - [ True False True] - [False True False]], - fill_value = 999999) - - Extra fields can be present in the structured array but are discarded: - - >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) - >>> rec2 - array([[(0, False, 0.0), (0, False, 0.0)], - [(0, False, 0.0), (0, False, 0.0)]], - dtype=[('_data', '>> y = np.ma.fromflex(rec2) - >>> y - masked_array(data = - [[0 0] - [0 0]], - mask = - [[False False] - [False False]], - fill_value = 999999) - - """ - return masked_array(fxarray['_data'], mask=fxarray['_mask']) - - - -class _convert2ma: - """ - Convert functions from numpy to numpy.ma. - - Parameters - ---------- - _methodname : string - Name of the method to transform. - - """ - __doc__ = None - # - def __init__(self, funcname, params=None): - self._func = getattr(np, funcname) - self.__doc__ = self.getdoc() - self._extras = params or {} - # - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - doc = getattr(self._func, '__doc__', None) - sig = get_object_signature(self._func) - if doc: - # Add the signature of the function at the beginning of the doc - if sig: - sig = "%s%s\n" % (self._func.__name__, sig) - doc = sig + doc - return doc - # - def __call__(self, a, *args, **params): - # Find the common parameters to the call and the definition - _extras = self._extras - common_params = set(params).intersection(_extras) - # Drop the common parameters from the call - for p in common_params: - _extras[p] = params.pop(p) - # Get the result - result = self._func.__call__(a, *args, **params).view(MaskedArray) - if "fill_value" in common_params: - result.fill_value = _extras.get("fill_value", None) - if "hardmask" in common_params: - result._hardmask = bool(_extras.get("hard_mask", False)) - return result - -arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) -clip = np.clip -diff = np.diff -empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) -empty_like = _convert2ma('empty_like') -frombuffer = _convert2ma('frombuffer') -fromfunction = _convert2ma('fromfunction') -identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False)) -indices = np.indices -ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) -ones_like = np.ones_like -squeeze = np.squeeze -zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) -zeros_like = np.zeros_like - -############################################################################### diff --git a/numpy-1.6.2/numpy/ma/extras.py b/numpy-1.6.2/numpy/ma/extras.py deleted file mode 100644 index 1c28eadb86..0000000000 --- a/numpy-1.6.2/numpy/ma/extras.py +++ /dev/null @@ -1,1894 +0,0 @@ -""" -Masked arrays add-ons. - -A collection of utilities for `numpy.ma`. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ - -""" -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -__all__ = ['apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', - 'atleast_3d', 'average', - 'clump_masked', 'clump_unmasked', 'column_stack', 'compress_cols', - 'compress_rowcols', 'compress_rows', 'count_masked', 'corrcoef', - 'cov', - 'diagflat', 'dot', 'dstack', - 'ediff1d', - 'flatnotmasked_contiguous', 'flatnotmasked_edges', - 'hsplit', 'hstack', - 'in1d', 'intersect1d', - 'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all', - 'masked_all_like', 'median', 'mr_', - 'notmasked_contiguous', 'notmasked_edges', - 'polyfit', - 'row_stack', - 'setdiff1d', 'setxor1d', - 'unique', 'union1d', - 'vander', 'vstack', - ] - -import itertools -import warnings - -import core as ma -from core import MaskedArray, MAError, add, array, asarray, concatenate, count, \ - filled, getmask, getmaskarray, make_mask_descr, masked, masked_array, \ - mask_or, nomask, ones, sort, zeros -#from core import * - -import numpy as np -from numpy import ndarray, array as nxarray -import numpy.core.umath as umath -from numpy.lib.index_tricks import AxisConcatenator -from numpy.linalg import lstsq - - -#............................................................................... -def issequence(seq): - """Is seq a sequence (ndarray, list or tuple)?""" - if isinstance(seq, (ndarray, tuple, list)): - return True - return False - -def count_masked(arr, axis=None): - """ - Count the number of masked elements along the given axis. - - Parameters - ---------- - arr : array_like - An array with (possibly) masked elements. - axis : int, optional - Axis along which to count. If None (default), a flattened - version of the array is used. - - Returns - ------- - count : int, ndarray - The total number of masked elements (axis=None) or the number - of masked elements along each slice of the given axis. - - See Also - -------- - MaskedArray.count : Count non-masked elements. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(9).reshape((3,3)) - >>> a = ma.array(a) - >>> a[1, 0] = ma.masked - >>> a[1, 2] = ma.masked - >>> a[2, 1] = ma.masked - >>> a - masked_array(data = - [[0 1 2] - [-- 4 --] - [6 -- 8]], - mask = - [[False False False] - [ True False True] - [False True False]], - fill_value=999999) - >>> ma.count_masked(a) - 3 - - When the `axis` keyword is used an array is returned. - - >>> ma.count_masked(a, axis=0) - array([1, 1, 1]) - >>> ma.count_masked(a, axis=1) - array([0, 2, 1]) - - """ - m = getmaskarray(arr) - return m.sum(axis) - -def masked_all(shape, dtype=float): - """ - Empty masked array with all elements masked. - - Return an empty masked array of the given shape and dtype, where all the - data are masked. - - Parameters - ---------- - shape : tuple - Shape of the required MaskedArray. - dtype : dtype, optional - Data type of the output. - - Returns - ------- - a : MaskedArray - A masked array with all data masked. - - See Also - -------- - masked_all_like : Empty masked array modelled on an existing array. - - Examples - -------- - >>> import numpy.ma as ma - >>> ma.masked_all((3, 3)) - masked_array(data = - [[-- -- --] - [-- -- --] - [-- -- --]], - mask = - [[ True True True] - [ True True True] - [ True True True]], - fill_value=1e+20) - - The `dtype` parameter defines the underlying data type. - - >>> a = ma.masked_all((3, 3)) - >>> a.dtype - dtype('float64') - >>> a = ma.masked_all((3, 3), dtype=np.int32) - >>> a.dtype - dtype('int32') - - """ - a = masked_array(np.empty(shape, dtype), - mask=np.ones(shape, make_mask_descr(dtype))) - return a - -def masked_all_like(arr): - """ - Empty masked array with the properties of an existing array. - - Return an empty masked array of the same shape and dtype as - the array `arr`, where all the data are masked. - - Parameters - ---------- - arr : ndarray - An array describing the shape and dtype of the required MaskedArray. - - Returns - ------- - a : MaskedArray - A masked array with all data masked. - - Raises - ------ - AttributeError - If `arr` doesn't have a shape attribute (i.e. not an ndarray) - - See Also - -------- - masked_all : Empty masked array with all elements masked. - - Examples - -------- - >>> import numpy.ma as ma - >>> arr = np.zeros((2, 3), dtype=np.float32) - >>> arr - array([[ 0., 0., 0.], - [ 0., 0., 0.]], dtype=float32) - >>> ma.masked_all_like(arr) - masked_array(data = - [[-- -- --] - [-- -- --]], - mask = - [[ True True True] - [ True True True]], - fill_value=1e+20) - - The dtype of the masked array matches the dtype of `arr`. - - >>> arr.dtype - dtype('float32') - >>> ma.masked_all_like(arr).dtype - dtype('float32') - - """ - a = np.empty_like(arr).view(MaskedArray) - a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) - return a - - -#####-------------------------------------------------------------------------- -#---- --- Standard functions --- -#####-------------------------------------------------------------------------- -class _fromnxfunction: - """ - Defines a wrapper to adapt NumPy functions to masked arrays. - - - An instance of `_fromnxfunction` can be called with the same parameters - as the wrapped NumPy function. The docstring of `newfunc` is adapted from - the wrapped function as well, see `getdoc`. - - Parameters - ---------- - funcname : str - The name of the function to be adapted. The function should be - in the NumPy namespace (i.e. ``np.funcname``). - - """ - - def __init__(self, funcname): - self.__name__ = funcname - self.__doc__ = self.getdoc() - - def getdoc(self): - """ - Retrieve the docstring and signature from the function. - - The ``__doc__`` attribute of the function is used as the docstring for - the new masked array version of the function. A note on application - of the function to the mask is appended. - - .. warning:: - If the function docstring already contained a Notes section, the - new docstring will have two Notes sections instead of appending a note - to the existing section. - - Parameters - ---------- - None - - """ - npfunc = getattr(np, self.__name__, None) - doc = getattr(npfunc, '__doc__', None) - if doc: - sig = self.__name__ + ma.get_object_signature(npfunc) - locdoc = "Notes\n-----\nThe function is applied to both the _data"\ - " and the _mask, if any." - return '\n'.join((sig, doc, locdoc)) - return - - - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - if len(args) == 1: - x = args[0] - if isinstance(x, ndarray): - _d = func(x.__array__(), **params) - _m = func(getmaskarray(x), **params) - return masked_array(_d, mask=_m) - elif isinstance(x, tuple) or isinstance(x, list): - _d = func(tuple([np.asarray(a) for a in x]), **params) - _m = func(tuple([getmaskarray(a) for a in x]), **params) - return masked_array(_d, mask=_m) - else: - arrays = [] - args = list(args) - while len(args) > 0 and issequence(args[0]): - arrays.append(args.pop(0)) - res = [] - for x in arrays: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - res.append(masked_array(_d, mask=_m)) - return res - -atleast_1d = _fromnxfunction('atleast_1d') -atleast_2d = _fromnxfunction('atleast_2d') -atleast_3d = _fromnxfunction('atleast_3d') -#atleast_1d = np.atleast_1d -#atleast_2d = np.atleast_2d -#atleast_3d = np.atleast_3d - -vstack = row_stack = _fromnxfunction('vstack') -hstack = _fromnxfunction('hstack') -column_stack = _fromnxfunction('column_stack') -dstack = _fromnxfunction('dstack') - -hsplit = _fromnxfunction('hsplit') - -diagflat = _fromnxfunction('diagflat') - - -#####-------------------------------------------------------------------------- -#---- -#####-------------------------------------------------------------------------- -def flatten_inplace(seq): - """Flatten a sequence in place.""" - k = 0 - while (k != len(seq)): - while hasattr(seq[k], '__iter__'): - seq[k:(k + 1)] = seq[k] - k += 1 - return seq - - -def apply_along_axis(func1d, axis, arr, *args, **kwargs): - """ - (This docstring should be overwritten) - """ - arr = array(arr, copy=False, subok=True) - nd = arr.ndim - if axis < 0: - axis += nd - if (axis >= nd): - raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." - % (axis, nd)) - ind = [0] * (nd - 1) - i = np.zeros(nd, 'O') - indlist = range(nd) - indlist.remove(axis) - i[axis] = slice(None, None) - outshape = np.asarray(arr.shape).take(indlist) - i.put(indlist, ind) - j = i.copy() - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - # if res is a number, then we have a smaller output array - asscalar = np.isscalar(res) - if not asscalar: - try: - len(res) - except TypeError: - asscalar = True - # Note: we shouldn't set the dtype of the output from the first result... - #...so we force the type to object, and build a list of dtypes - #...we'll just take the largest, to avoid some downcasting - dtypes = [] - if asscalar: - dtypes.append(np.asarray(res).dtype) - outarr = zeros(outshape, object) - outarr[tuple(ind)] = res - Ntot = np.product(outshape) - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= outshape[n]) and (n > (1 - nd)): - ind[n - 1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - outarr[tuple(ind)] = res - dtypes.append(asarray(res).dtype) - k += 1 - else: - res = array(res, copy=False, subok=True) - j = i.copy() - j[axis] = ([slice(None, None)] * res.ndim) - j.put(indlist, ind) - Ntot = np.product(outshape) - holdshape = outshape - outshape = list(arr.shape) - outshape[axis] = res.shape - dtypes.append(asarray(res).dtype) - outshape = flatten_inplace(outshape) - outarr = zeros(outshape, object) - outarr[tuple(flatten_inplace(j.tolist()))] = res - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= holdshape[n]) and (n > (1 - nd)): - ind[n - 1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - j.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - outarr[tuple(flatten_inplace(j.tolist()))] = res - dtypes.append(asarray(res).dtype) - k += 1 - max_dtypes = np.dtype(np.asarray(dtypes).max()) - if not hasattr(arr, '_mask'): - result = np.asarray(outarr, dtype=max_dtypes) - else: - result = asarray(outarr, dtype=max_dtypes) - result.fill_value = ma.default_fill_value(result) - return result -apply_along_axis.__doc__ = np.apply_along_axis.__doc__ - - -def apply_over_axes(func, a, axes): - """ - (This docstring will be overwritten) - """ - val = np.asarray(a) - msk = getmaskarray(a) - N = a.ndim - if array(axes).ndim == 0: - axes = (axes,) - for axis in axes: - if axis < 0: axis = N + axis - args = (val, axis) - res = ma.array(func(*(val, axis)), mask=func(*(msk, axis))) - if res.ndim == val.ndim: - (val, msk) = (res._data, res._mask) - else: - res = ma.expand_dims(res, axis) - if res.ndim == val.ndim: - (val, msk) = (res._data, res._mask) - else: - raise ValueError("Function is not returning"\ - " an array of correct shape") - return val -apply_over_axes.__doc__ = np.apply_over_axes.__doc__ - - -def average(a, axis=None, weights=None, returned=False): - """ - Return the weighted average of array over the given axis. - - Parameters - ---------- - a : array_like - Data to be averaged. - Masked entries are not taken into account in the computation. - axis : int, optional - Axis along which the variance is computed. The default is to compute - the variance of the flattened array. - weights : array_like, optional - The importance that each element has in the computation of the average. - The weights array can either be 1-D (in which case its length must be - the size of `a` along the given axis) or of the same shape as `a`. - If ``weights=None``, then all data in `a` are assumed to have a - weight equal to one. - returned : bool, optional - Flag indicating whether a tuple ``(result, sum of weights)`` - should be returned as output (True), or just the result (False). - Default is False. - - Returns - ------- - average, [sum_of_weights] : (tuple of) scalar or MaskedArray - The average along the specified axis. When returned is `True`, - return a tuple with the average as the first element and the sum - of the weights as the second element. The return type is `np.float64` - if `a` is of integer type, otherwise it is of the same type as `a`. - If returned, `sum_of_weights` is of the same type as `average`. - - Examples - -------- - >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) - >>> np.ma.average(a, weights=[3, 1, 0, 0]) - 1.25 - - >>> x = np.ma.arange(6.).reshape(3, 2) - >>> print x - [[ 0. 1.] - [ 2. 3.] - [ 4. 5.]] - >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], - ... returned=True) - >>> print avg - [2.66666666667 3.66666666667] - - """ - a = asarray(a) - mask = a.mask - ash = a.shape - if ash == (): - ash = (1,) - if axis is None: - if mask is nomask: - if weights is None: - n = a.sum(axis=None) - d = float(a.size) - else: - w = filled(weights, 0.0).ravel() - n = umath.add.reduce(a._data.ravel() * w) - d = umath.add.reduce(w) - del w - else: - if weights is None: - n = a.filled(0).sum(axis=None) - d = float(umath.add.reduce((~mask).ravel())) - else: - w = array(filled(weights, 0.0), float, mask=mask).ravel() - n = add.reduce(a.ravel() * w) - d = add.reduce(w) - del w - else: - if mask is nomask: - if weights is None: - d = ash[axis] * 1.0 - n = add.reduce(a._data, axis, dtype=float) - else: - w = filled(weights, 0.0) - wsh = w.shape - if wsh == (): - wsh = (1,) - if wsh == ash: - w = np.array(w, float, copy=0) - n = add.reduce(a * w, axis) - d = add.reduce(w, axis) - del w - elif wsh == (ash[axis],): - ni = ash[axis] - r = [None] * len(ash) - r[axis] = slice(None, None, 1) - w = eval ("w[" + repr(tuple(r)) + "] * ones(ash, float)") - n = add.reduce(a * w, axis, dtype=float) - d = add.reduce(w, axis, dtype=float) - del w, r - else: - raise ValueError, 'average: weights wrong shape.' - else: - if weights is None: - n = add.reduce(a, axis, dtype=float) - d = umath.add.reduce((-mask), axis=axis, dtype=float) - else: - w = filled(weights, 0.0) - wsh = w.shape - if wsh == (): - wsh = (1,) - if wsh == ash: - w = array(w, dtype=float, mask=mask, copy=0) - n = add.reduce(a * w, axis, dtype=float) - d = add.reduce(w, axis, dtype=float) - elif wsh == (ash[axis],): - ni = ash[axis] - r = [None] * len(ash) - r[axis] = slice(None, None, 1) - w = eval ("w[" + repr(tuple(r)) + \ - "] * masked_array(ones(ash, float), mask)") - n = add.reduce(a * w, axis, dtype=float) - d = add.reduce(w, axis, dtype=float) - else: - raise ValueError, 'average: weights wrong shape.' - del w - if n is masked or d is masked: - return masked - result = n / d - del n - - if isinstance(result, MaskedArray): - if ((axis is None) or (axis == 0 and a.ndim == 1)) and \ - (result.mask is nomask): - result = result._data - if returned: - if not isinstance(d, MaskedArray): - d = masked_array(d) - if isinstance(d, ndarray) and (not d.shape == result.shape): - d = ones(result.shape, dtype=float) * d - if returned: - return result, d - else: - return result - - - -def median(a, axis=None, out=None, overwrite_input=False): - """ - Compute the median along the specified axis. - - Returns the median of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int, optional - Axis along which the medians are computed. The default (None) is - to compute the median along a flattened version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array (a) for - calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. Note that, if `overwrite_input` is True, and the input - is not already an `ndarray`, an error will be raised. - - Returns - ------- - median : ndarray - A new array holding the result is returned unless out is - specified, in which case a reference to out is returned. - Return data-type is `float64` for integers and floats smaller than - `float64`, or the input data-type, otherwise. - - See Also - -------- - mean - - Notes - ----- - Given a vector ``V`` with ``N`` non masked values, the median of ``V`` - is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. - ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` - when ``N`` is even. - - Examples - -------- - >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) - >>> np.ma.extras.median(x) - 1.5 - - >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) - >>> np.ma.extras.median(x) - 2.5 - >>> np.ma.extras.median(x, axis=-1, overwrite_input=True) - masked_array(data = [ 2. 5.], - mask = False, - fill_value = 1e+20) - - """ - def _median1D(data): - counts = filled(count(data), 0) - (idx, rmd) = divmod(counts, 2) - if rmd: - choice = slice(idx, idx + 1) - else: - choice = slice(idx - 1, idx + 1) - return data[choice].mean(0) - # - if overwrite_input: - if axis is None: - asorted = a.ravel() - asorted.sort() - else: - a.sort(axis=axis) - asorted = a - else: - asorted = sort(a, axis=axis) - if axis is None: - result = _median1D(asorted) - else: - result = apply_along_axis(_median1D, axis, asorted) - if out is not None: - out = result - return result - - - - -#.............................................................................. -def compress_rowcols(x, axis=None): - """ - Suppress the rows and/or columns of a 2-D array that contain - masked values. - - The suppression behavior is selected with the `axis` parameter. - - - If axis is None, both rows and columns are suppressed. - - If axis is 0, only rows are suppressed. - - If axis is 1 or -1, only columns are suppressed. - - Parameters - ---------- - axis : int, optional - Axis along which to perform the operation. Default is None. - - Returns - ------- - compressed_array : ndarray - The compressed array. - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], - ... [1, 0, 0], - ... [0, 0, 0]]) - >>> x - masked_array(data = - [[-- 1 2] - [-- 4 5] - [6 7 8]], - mask = - [[ True False False] - [ True False False] - [False False False]], - fill_value = 999999) - - >>> np.ma.extras.compress_rowcols(x) - array([[7, 8]]) - >>> np.ma.extras.compress_rowcols(x, 0) - array([[6, 7, 8]]) - >>> np.ma.extras.compress_rowcols(x, 1) - array([[1, 2], - [4, 5], - [7, 8]]) - - """ - x = asarray(x) - if x.ndim != 2: - raise NotImplementedError, "compress2d works for 2D arrays only." - m = getmask(x) - # Nothing is masked: return x - if m is nomask or not m.any(): - return x._data - # All is masked: return empty - if m.all(): - return nxarray([]) - # Builds a list of rows/columns indices - (idxr, idxc) = (range(len(x)), range(x.shape[1])) - masked = m.nonzero() - if not axis: - for i in np.unique(masked[0]): - idxr.remove(i) - if axis in [None, 1, -1]: - for j in np.unique(masked[1]): - idxc.remove(j) - return x._data[idxr][:, idxc] - -def compress_rows(a): - """ - Suppress whole rows of a 2-D array that contain masked values. - - This is equivalent to ``np.ma.extras.compress_rowcols(a, 0)``, see - `extras.compress_rowcols` for details. - - See Also - -------- - extras.compress_rowcols - - """ - return compress_rowcols(a, 0) - -def compress_cols(a): - """ - Suppress whole columns of a 2-D array that contain masked values. - - This is equivalent to ``np.ma.extras.compress_rowcols(a, 1)``, see - `extras.compress_rowcols` for details. - - See Also - -------- - extras.compress_rowcols - - """ - return compress_rowcols(a, 1) - -def mask_rowcols(a, axis=None): - """ - Mask rows and/or columns of a 2D array that contain masked values. - - Mask whole rows and/or columns of a 2D array that contain - masked values. The masking behavior is selected using the - `axis` parameter. - - - If `axis` is None, rows *and* columns are masked. - - If `axis` is 0, only rows are masked. - - If `axis` is 1 or -1, only columns are masked. - - Parameters - ---------- - a : array_like, MaskedArray - The array to mask. If not a MaskedArray instance (or if no array - elements are masked). The result is a MaskedArray with `mask` set - to `nomask` (False). Must be a 2D array. - axis : int, optional - Axis along which to perform the operation. If None, applies to a - flattened version of the array. - - Returns - ------- - a : MaskedArray - A modified version of the input array, masked depending on the value - of the `axis` parameter. - - Raises - ------ - NotImplementedError - If input array `a` is not 2D. - - See Also - -------- - mask_rows : Mask rows of a 2D array that contain masked values. - mask_cols : Mask cols of a 2D array that contain masked values. - masked_where : Mask where a condition is met. - - Notes - ----- - The input array's mask is modified by this function. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) - >>> ma.mask_rowcols(a) - masked_array(data = - [[0 -- 0] - [-- -- --] - [0 -- 0]], - mask = - [[False True False] - [ True True True] - [False True False]], - fill_value=999999) - - """ - a = asarray(a) - if a.ndim != 2: - raise NotImplementedError, "compress2d works for 2D arrays only." - m = getmask(a) - # Nothing is masked: return a - if m is nomask or not m.any(): - return a - maskedval = m.nonzero() - a._mask = a._mask.copy() - if not axis: - a[np.unique(maskedval[0])] = masked - if axis in [None, 1, -1]: - a[:, np.unique(maskedval[1])] = masked - return a - -def mask_rows(a, axis=None): - """ - Mask rows of a 2D array that contain masked values. - - This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. - - See Also - -------- - mask_rowcols : Mask rows and/or columns of a 2D array. - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) - >>> ma.mask_rows(a) - masked_array(data = - [[0 0 0] - [-- -- --] - [0 0 0]], - mask = - [[False False False] - [ True True True] - [False False False]], - fill_value=999999) - - """ - return mask_rowcols(a, 0) - -def mask_cols(a, axis=None): - """ - Mask columns of a 2D array that contain masked values. - - This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. - - See Also - -------- - mask_rowcols : Mask rows and/or columns of a 2D array. - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) - >>> ma.mask_cols(a) - masked_array(data = - [[0 -- 0] - [0 -- 0] - [0 -- 0]], - mask = - [[False True False] - [False True False] - [False True False]], - fill_value=999999) - - """ - return mask_rowcols(a, 1) - - -def dot(a, b, strict=False): - """ - Return the dot product of two arrays. - - .. note:: - Works only with 2-D arrays at the moment. - - This function is the equivalent of `numpy.dot` that takes masked values - into account, see `numpy.dot` for details. - - Parameters - ---------- - a, b : ndarray - Inputs arrays. - strict : bool, optional - Whether masked data are propagated (True) or set to 0 (False) for the - computation. Default is False. - Propagating the mask means that if a masked value appears in a row or - column, the whole row or column is considered masked. - - See Also - -------- - numpy.dot : Equivalent function for ndarrays. - - Examples - -------- - >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) - >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) - >>> np.ma.dot(a, b) - masked_array(data = - [[21 26] - [45 64]], - mask = - [[False False] - [False False]], - fill_value = 999999) - >>> np.ma.dot(a, b, strict=True) - masked_array(data = - [[-- --] - [-- 64]], - mask = - [[ True True] - [ True False]], - fill_value = 999999) - - """ - #!!!: Works only with 2D arrays. There should be a way to get it to run with higher dimension - if strict and (a.ndim == 2) and (b.ndim == 2): - a = mask_rows(a) - b = mask_cols(b) - # - d = np.dot(filled(a, 0), filled(b, 0)) - # - am = (~getmaskarray(a)) - bm = (~getmaskarray(b)) - m = ~np.dot(am, bm) - return masked_array(d, mask=m) - -#####-------------------------------------------------------------------------- -#---- --- arraysetops --- -#####-------------------------------------------------------------------------- - -def ediff1d(arr, to_end=None, to_begin=None): - """ - Compute the differences between consecutive elements of an array. - - This function is the equivalent of `numpy.ediff1d` that takes masked - values into account, see `numpy.ediff1d` for details. - - See Also - -------- - numpy.ediff1d : Equivalent function for ndarrays. - - """ - arr = ma.asanyarray(arr).flat - ed = arr[1:] - arr[:-1] - arrays = [ed] - # - if to_begin is not None: - arrays.insert(0, to_begin) - if to_end is not None: - arrays.append(to_end) - # - if len(arrays) != 1: - # We'll save ourselves a copy of a potentially large array in the common - # case where neither to_begin or to_end was given. - ed = hstack(arrays) - # - return ed - - -def unique(ar1, return_index=False, return_inverse=False): - """ - Finds the unique elements of an array. - - Masked values are considered the same element (masked). The output array - is always a masked array. See `numpy.unique` for more details. - - See Also - -------- - numpy.unique : Equivalent function for ndarrays. - - """ - output = np.unique(ar1, - return_index=return_index, - return_inverse=return_inverse) - if isinstance(output, tuple): - output = list(output) - output[0] = output[0].view(MaskedArray) - output = tuple(output) - else: - output = output.view(MaskedArray) - return output - - -def intersect1d(ar1, ar2, assume_unique=False): - """ - Returns the unique elements common to both arrays. - - Masked values are considered equal one to the other. - The output is always a masked array. - - See `numpy.intersect1d` for more details. - - See Also - -------- - numpy.intersect1d : Equivalent function for ndarrays. - - Examples - -------- - >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - >>> intersect1d(x, y) - masked_array(data = [1 3 --], - mask = [False False True], - fill_value = 999999) - - """ - if assume_unique: - aux = ma.concatenate((ar1, ar2)) - else: - # Might be faster than unique( intersect1d( ar1, ar2 ) )? - aux = ma.concatenate((unique(ar1), unique(ar2))) - aux.sort() - return aux[aux[1:] == aux[:-1]] - - -def setxor1d(ar1, ar2, assume_unique=False): - """ - Set exclusive-or of 1-D arrays with unique elements. - - The output is always a masked array. See `numpy.setxor1d` for more details. - - See Also - -------- - numpy.setxor1d : Equivalent function for ndarrays. - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - - aux = ma.concatenate((ar1, ar2)) - if aux.size == 0: - return aux - aux.sort() - auxf = aux.filled() -# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 - flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) -# flag2 = ediff1d( flag ) == 0 - flag2 = (flag[1:] == flag[:-1]) - return aux[flag2] - -def in1d(ar1, ar2, assume_unique=False): - """ - Test whether each element of an array is also present in a second - array. - - The output is always a masked array. See `numpy.in1d` for more details. - - See Also - -------- - numpy.in1d : Equivalent function for ndarrays. - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if not assume_unique: - ar1, rev_idx = unique(ar1, return_inverse=True) - ar2 = unique(ar2) - - ar = ma.concatenate((ar1, ar2)) - # We need this to be a stable sort, so always use 'mergesort' - # here. The values from the first array should always come before - # the values from the second array. - order = ar.argsort(kind='mergesort') - sar = ar[order] - equal_adj = (sar[1:] == sar[:-1]) - flag = ma.concatenate((equal_adj, [False])) - indx = order.argsort(kind='mergesort')[:len(ar1)] - - if assume_unique: - return flag[indx] - else: - return flag[indx][rev_idx] - - -def union1d(ar1, ar2): - """ - Union of two arrays. - - The output is always a masked array. See `numpy.union1d` for more details. - - See also - -------- - numpy.union1d : Equivalent function for ndarrays. - - """ - return unique(ma.concatenate((ar1, ar2))) - - -def setdiff1d(ar1, ar2, assume_unique=False): - """ - Set difference of 1D arrays with unique elements. - - The output is always a masked array. See `numpy.setdiff1d` for more - details. - - See Also - -------- - numpy.setdiff1d : Equivalent function for ndarrays. - - Examples - -------- - >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) - >>> np.ma.extras.setdiff1d(x, [1, 2]) - masked_array(data = [3 --], - mask = [False True], - fill_value = 999999) - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - aux = in1d(ar1, ar2, assume_unique=True) - if aux.size == 0: - return aux - else: - return ma.asarray(ar1)[aux == 0] - - -#####-------------------------------------------------------------------------- -#---- --- Covariance --- -#####-------------------------------------------------------------------------- - - - - -def _covhelper(x, y=None, rowvar=True, allow_masked=True): - """ - Private function for the computation of covariance and correlation - coefficients. - - """ - x = ma.array(x, ndmin=2, copy=True, dtype=float) - xmask = ma.getmaskarray(x) - # Quick exit if we can't process masked data - if not allow_masked and xmask.any(): - raise ValueError("Cannot process masked data...") - # - if x.shape[0] == 1: - rowvar = True - # Make sure that rowvar is either 0 or 1 - rowvar = int(bool(rowvar)) - axis = 1 - rowvar - if rowvar: - tup = (slice(None), None) - else: - tup = (None, slice(None)) - # - if y is None: - xnotmask = np.logical_not(xmask).astype(int) - else: - y = array(y, copy=False, ndmin=2, dtype=float) - ymask = ma.getmaskarray(y) - if not allow_masked and ymask.any(): - raise ValueError("Cannot process masked data...") - if xmask.any() or ymask.any(): - if y.shape == x.shape: - # Define some common mask - common_mask = np.logical_or(xmask, ymask) - if common_mask is not nomask: - x.unshare_mask() - y.unshare_mask() - xmask = x._mask = y._mask = ymask = common_mask - x = ma.concatenate((x, y), axis) - xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) - x -= x.mean(axis=rowvar)[tup] - return (x, xnotmask, rowvar) - - -def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): - """ - Estimate the covariance matrix. - - Except for the handling of missing data this function does the same as - `numpy.cov`. For more details and examples, see `numpy.cov`. - - By default, masked values are recognized as such. If `x` and `y` have the - same shape, a common mask is allocated: if ``x[i,j]`` is masked, then - ``y[i,j]`` will also be masked. - Setting `allow_masked` to False will raise an exception if values are - missing in either of the input arrays. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `x` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - form as `x`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : bool, optional - Default normalization (False) is by ``(N-1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is True, - then normalization is by ``N``. This keyword can be overridden by - the keyword ``ddof`` in numpy versions >= 1.5. - allow_masked : bool, optional - If True, masked values are propagated pair-wise: if a value is masked - in `x`, the corresponding value is masked in `y`. - If False, raises a `ValueError` exception when some values are missing. - ddof : {None, int}, optional - .. versionadded:: 1.5 - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - - Raises - ------ - ValueError: - Raised if some values are missing and `allow_masked` is False. - - See Also - -------- - numpy.cov - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError("ddof must be an integer") - # Set up ddof - if ddof is None: - if bias: - ddof = 0 - else: - ddof = 1 - - (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) - if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof - result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() - else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof - result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() - return result - - -def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): - """ - Return correlation coefficients of the input array. - - Except for the handling of missing data this function does the same as - `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `x` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - shape as `x`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : bool, optional - Default normalization (False) is by ``(N-1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is 1, - then normalization is by ``N``. This keyword can be overridden by - the keyword ``ddof`` in numpy versions >= 1.5. - allow_masked : bool, optional - If True, masked values are propagated pair-wise: if a value is masked - in `x`, the corresponding value is masked in `y`. - If False, raises an exception. - ddof : {None, int}, optional - .. versionadded:: 1.5 - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - See Also - -------- - numpy.corrcoef : Equivalent function in top-level NumPy module. - cov : Estimate the covariance matrix. - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError("ddof must be an integer") - # Set up ddof - if ddof is None: - if bias: - ddof = 0 - else: - ddof = 1 - - # Get the data - (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) - # Compute the covariance matrix - if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof - c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() - else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof - c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() - # Check whether we have a scalar - try: - diag = ma.diagonal(c) - except ValueError: - return 1 - # - if xnotmask.all(): - _denom = ma.sqrt(ma.multiply.outer(diag, diag)) - else: - _denom = diagflat(diag) - n = x.shape[1 - rowvar] - if rowvar: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols(vstack((x[i], x[j]))).var(axis=1, - ddof=1 - bias) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - else: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols(vstack((x[:, i], x[:, j]))).var(axis=1, - ddof=1 - bias) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - return c / _denom - -#####-------------------------------------------------------------------------- -#---- --- Concatenation helpers --- -#####-------------------------------------------------------------------------- - -class MAxisConcatenator(AxisConcatenator): - """ - Translate slice objects to concatenation along an axis. - - For documentation on usage, see `mr_class`. - - See Also - -------- - mr_class - - """ - - def __init__(self, axis=0): - AxisConcatenator.__init__(self, axis, matrix=False) - - def __getitem__(self, key): - if isinstance(key, str): - raise MAError, "Unavailable for masked array." - if type(key) is not tuple: - key = (key,) - objs = [] - scalars = [] - final_dtypedescr = None - for k in range(len(key)): - scalar = False - if type(key[k]) is slice: - step = key[k].step - start = key[k].start - stop = key[k].stop - if start is None: - start = 0 - if step is None: - step = 1 - if type(step) is type(1j): - size = int(abs(step)) - newobj = np.linspace(start, stop, num=size) - else: - newobj = np.arange(start, stop, step) - elif type(key[k]) is str: - if (key[k] in 'rc'): - self.matrix = True - self.col = (key[k] == 'c') - continue - try: - self.axis = int(key[k]) - continue - except (ValueError, TypeError): - raise ValueError, "Unknown special directive" - elif type(key[k]) in np.ScalarType: - newobj = asarray([key[k]]) - scalars.append(k) - scalar = True - else: - newobj = key[k] - objs.append(newobj) - if isinstance(newobj, ndarray) and not scalar: - if final_dtypedescr is None: - final_dtypedescr = newobj.dtype - elif newobj.dtype > final_dtypedescr: - final_dtypedescr = newobj.dtype - if final_dtypedescr is not None: - for k in scalars: - objs[k] = objs[k].astype(final_dtypedescr) - res = concatenate(tuple(objs), axis=self.axis) - return self._retval(res) - -class mr_class(MAxisConcatenator): - """ - Translate slice objects to concatenation along the first axis. - - This is the masked array version of `lib.index_tricks.RClass`. - - See Also - -------- - lib.index_tricks.RClass - - Examples - -------- - >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) - - """ - def __init__(self): - MAxisConcatenator.__init__(self, 0) - -mr_ = mr_class() - -#####-------------------------------------------------------------------------- -#---- Find unmasked data --- -#####-------------------------------------------------------------------------- - -def flatnotmasked_edges(a): - """ - Find the indices of the first and last unmasked values. - - Expects a 1-D `MaskedArray`, returns None if all values are masked. - - Parameters - ---------- - arr : array_like - Input 1-D `MaskedArray` - - Returns - ------- - edges : ndarray or None - The indices of first and last non-masked value in the array. - Returns None if all values are masked. - - See Also - -------- - flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 1-D arrays. - - Examples - -------- - >>> a = np.ma.arange(10) - >>> flatnotmasked_edges(a) - [0,-1] - - >>> mask = (a < 3) | (a > 8) | (a == 5) - >>> a[mask] = np.ma.masked - >>> np.array(a[~a.mask]) - array([3, 4, 6, 7, 8]) - - >>> flatnotmasked_edges(a) - array([3, 8]) - - >>> a[:] = np.ma.masked - >>> print flatnotmasked_edges(ma) - None - - """ - m = getmask(a) - if m is nomask or not np.any(m): - return np.array([0, a.size - 1]) - unmasked = np.flatnonzero(~m) - if len(unmasked) > 0: - return unmasked[[0, -1]] - else: - return None - - -def notmasked_edges(a, axis=None): - """ - Find the indices of the first and last unmasked values along an axis. - - If all values are masked, return None. Otherwise, return a list - of two tuples, corresponding to the indices of the first and last - unmasked values respectively. - - Parameters - ---------- - a : array_like - The input array. - axis : int, optional - Axis along which to perform the operation. - If None (default), applies to a flattened version of the array. - - Returns - ------- - edges : ndarray or list - An array of start and end indexes if there are any masked data in - the array. If there are no masked data in the array, `edges` is a - list of the first and last index. - - See Also - -------- - flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous, - clump_masked, clump_unmasked - - Examples - -------- - >>> a = np.arange(9).reshape((3, 3)) - >>> m = np.zeros_like(a) - >>> m[1:, 1:] = 1 - - >>> am = np.ma.array(a, mask=m) - >>> np.array(am[~am.mask]) - array([0, 1, 2, 3, 6]) - - >>> np.ma.extras.notmasked_edges(ma) - array([0, 6]) - - """ - a = asarray(a) - if axis is None or a.ndim == 1: - return flatnotmasked_edges(a) - m = getmaskarray(a) - idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) - return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), - tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] - - -def flatnotmasked_contiguous(a): - """ - Find contiguous unmasked data in a masked array along the given axis. - - Parameters - ---------- - a : narray - The input array. - - Returns - ------- - slice_list : list - A sorted sequence of slices (start index, end index). - - See Also - -------- - flatnotmasked_edges, notmasked_contiguous, notmasked_edges, - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 2-D arrays at most. - - Examples - -------- - >>> a = np.ma.arange(10) - >>> np.ma.extras.flatnotmasked_contiguous(a) - slice(0, 10, None) - - >>> mask = (a < 3) | (a > 8) | (a == 5) - >>> a[mask] = np.ma.masked - >>> np.array(a[~a.mask]) - array([3, 4, 6, 7, 8]) - - >>> np.ma.extras.flatnotmasked_contiguous(a) - [slice(3, 5, None), slice(6, 9, None)] - >>> a[:] = np.ma.masked - >>> print np.ma.extras.flatnotmasked_edges(a) - None - - """ - m = getmask(a) - if m is nomask: - return slice(0, a.size, None) - i = 0 - result = [] - for (k, g) in itertools.groupby(m.ravel()): - n = len(list(g)) - if not k: - result.append(slice(i, i + n)) - i += n - return result or None - -def notmasked_contiguous(a, axis=None): - """ - Find contiguous unmasked data in a masked array along the given axis. - - Parameters - ---------- - a : array_like - The input array. - axis : int, optional - Axis along which to perform the operation. - If None (default), applies to a flattened version of the array. - - Returns - ------- - endpoints : list - A list of slices (start and end indexes) of unmasked indexes - in the array. - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 2-D arrays at most. - - Examples - -------- - >>> a = np.arange(9).reshape((3, 3)) - >>> mask = np.zeros_like(a) - >>> mask[1:, 1:] = 1 - - >>> ma = np.ma.array(a, mask=mask) - >>> np.array(ma[~ma.mask]) - array([0, 1, 2, 3, 6]) - - >>> np.ma.extras.notmasked_contiguous(ma) - [slice(0, 4, None), slice(6, 7, None)] - - """ - a = asarray(a) - nd = a.ndim - if nd > 2: - raise NotImplementedError, "Currently limited to atmost 2D array." - if axis is None or nd == 1: - return flatnotmasked_contiguous(a) - # - result = [] - # - other = (axis + 1) % 2 - idx = [0, 0] - idx[axis] = slice(None, None) - # - for i in range(a.shape[other]): - idx[other] = i - result.append(flatnotmasked_contiguous(a[idx]) or None) - return result - - -def _ezclump(mask): - """ - Finds the clumps (groups of data with the same values) for a 1D bool array. - - Returns a series of slices. - """ - #def clump_masked(a): - if mask.ndim > 1: - mask = mask.ravel() - idx = (mask[1:] - mask[:-1]).nonzero() - idx = idx[0] + 1 - slices = [slice(left, right) - for (left, right) in zip(itertools.chain([0], idx), - itertools.chain(idx, [len(mask)]),)] - return slices - - -def clump_unmasked(a): - """ - Return list of slices corresponding to the unmasked clumps of a 1-D array. - (A "clump" is defined as a contiguous region of the array). - - Parameters - ---------- - a : ndarray - A one-dimensional masked array. - - Returns - ------- - slices : list of slice - The list of slices, one for each continuous region of unmasked - elements in `a`. - - Notes - ----- - .. versionadded:: 1.4.0 - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, - notmasked_contiguous, clump_masked - - Examples - -------- - >>> a = np.ma.masked_array(np.arange(10)) - >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked - >>> np.ma.extras.clump_unmasked(a) - [slice(3, 6, None), slice(7, 8, None)] - - """ - mask = getattr(a, '_mask', nomask) - if mask is nomask: - return [slice(0, a.size)] - slices = _ezclump(mask) - if a[0] is masked: - result = slices[1::2] - else: - result = slices[::2] - return result - - -def clump_masked(a): - """ - Returns a list of slices corresponding to the masked clumps of a 1-D array. - (A "clump" is defined as a contiguous region of the array). - - Parameters - ---------- - a : ndarray - A one-dimensional masked array. - - Returns - ------- - slices : list of slice - The list of slices, one for each continuous region of masked elements - in `a`. - - Notes - ----- - .. versionadded:: 1.4.0 - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, - notmasked_contiguous, clump_unmasked - - Examples - -------- - >>> a = np.ma.masked_array(np.arange(10)) - >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked - >>> np.ma.extras.clump_masked(a) - [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] - - """ - mask = ma.getmask(a) - if mask is nomask: - return [] - slices = _ezclump(mask) - if len(slices): - if a[0] is masked: - slices = slices[::2] - else: - slices = slices[1::2] - return slices - - - -#####-------------------------------------------------------------------------- -#---- Polynomial fit --- -#####-------------------------------------------------------------------------- - -def vander(x, n=None): - """ - Masked values in the input array result in rows of zeros. - """ - _vander = np.vander(x, n) - m = getmask(x) - if m is not nomask: - _vander[m] = 0 - return _vander -vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) - - -def polyfit(x, y, deg, rcond=None, full=False): - """ - Any masked values in x is propagated in y, and vice-versa. - """ - order = int(deg) + 1 - x = asarray(x) - mx = getmask(x) - y = asarray(y) - if y.ndim == 1: - m = mask_or(mx, getmask(y)) - elif y.ndim == 2: - y = mask_rows(y) - my = getmask(y) - if my is not nomask: - m = mask_or(mx, my[:, 0]) - else: - m = mx - else: - raise TypeError, "Expected a 1D or 2D array for y!" - if m is not nomask: - x[m] = y[m] = masked - # Set rcond - if rcond is None : - rcond = len(x) * np.finfo(x.dtype).eps - # Scale x to improve condition number - scale = abs(x).max() - if scale != 0 : - x = x / scale - # solve least squares equation for powers of x - v = vander(x, order) - c, resids, rank, s = lstsq(v, y.filled(0), rcond) - # warn on rank reduction, which indicates an ill conditioned matrix - if rank != order and not full: - warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) - # scale returned coefficients - if scale != 0 : - if c.ndim == 1 : - c /= np.vander([scale], order)[0] - else : - c /= np.vander([scale], order).T - if full : - return c, resids, rank, s, rcond - else : - return c -polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) - -################################################################################ diff --git a/numpy-1.6.2/numpy/ma/mrecords.py b/numpy-1.6.2/numpy/ma/mrecords.py deleted file mode 100644 index 79fa6e15b8..0000000000 --- a/numpy-1.6.2/numpy/ma/mrecords.py +++ /dev/null @@ -1,720 +0,0 @@ -""":mod:`numpy.ma..mrecords` - -Defines the equivalent of :class:`numpy.recarrays` for masked arrays, -where fields can be accessed as attributes. -Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes -and the masking of individual fields. - -:author: Pierre Gerard-Marchant -""" -#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask', -#!!!: or whatever restricted keywords. -#!!!: An idea would be to no bother in the first place, and then rename the -#!!!: invalid fields with a trailing underscore... -#!!!: Maybe we could just overload the parser function ? - - -__author__ = "Pierre GF Gerard-Marchant" - -import sys - -import numpy as np -from numpy import bool_, dtype, \ - ndarray, recarray, array as narray -import numpy.core.numerictypes as ntypes -from numpy.core.records import fromarrays as recfromarrays, \ - fromrecords as recfromrecords - -_byteorderconv = np.core.records._byteorderconv -_typestr = ntypes._typestr - -import numpy.ma as ma -from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array, \ - getdata, getmaskarray, filled - -_check_fill_value = ma.core._check_fill_value - -import warnings - -__all__ = ['MaskedRecords', 'mrecarray', - 'fromarrays', 'fromrecords', 'fromtextfile', 'addfield', - ] - -reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] - -def _getformats(data): - "Returns the formats of each array of arraylist as a comma-separated string." - if hasattr(data, 'dtype'): - return ",".join([desc[1] for desc in data.dtype.descr]) - - formats = '' - for obj in data: - obj = np.asarray(obj) - formats += _typestr[obj.dtype.type] - if issubclass(obj.dtype.type, ntypes.flexible): - formats += `obj.itemsize` - formats += ',' - return formats[:-1] - -def _checknames(descr, names=None): - """Checks that the field names of the descriptor ``descr`` are not some -reserved keywords. If this is the case, a default 'f%i' is substituted. -If the argument `names` is not None, updates the field names to valid names. - """ - ndescr = len(descr) - default_names = ['f%i' % i for i in range(ndescr)] - if names is None: - new_names = default_names - else: - if isinstance(names, (tuple, list)): - new_names = names - elif isinstance(names, str): - new_names = names.split(',') - else: - raise NameError("illegal input names %s" % `names`) - nnames = len(new_names) - if nnames < ndescr: - new_names += default_names[nnames:] - ndescr = [] - for (n, d, t) in zip(new_names, default_names, descr.descr): - if n in reserved_fields: - if t[0] in reserved_fields: - ndescr.append((d, t[1])) - else: - ndescr.append(t) - else: - ndescr.append((n, t[1])) - return np.dtype(ndescr) - - -def _get_fieldmask(self): - mdescr = [(n, '|b1') for n in self.dtype.names] - fdmask = np.empty(self.shape, dtype=mdescr) - fdmask.flat = tuple([False] * len(mdescr)) - return fdmask - - -class MaskedRecords(MaskedArray, object): - """ - -*IVariables*: - _data : {recarray} - Underlying data, as a record array. - _mask : {boolean array} - Mask of the records. A record is masked when all its fields are masked. - _fieldmask : {boolean recarray} - Record array of booleans, setting the mask of each individual field of each record. - _fill_value : {record} - Filling values for each field. - """ - #............................................ - def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, - formats=None, names=None, titles=None, - byteorder=None, aligned=False, - mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, - copy=False, - **options): - # - self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, - strides=strides, formats=formats, names=names, - titles=titles, byteorder=byteorder, - aligned=aligned,) - # - mdtype = ma.make_mask_descr(self.dtype) - if mask is nomask or not np.size(mask): - if not keep_mask: - self._mask = tuple([False] * len(mdtype)) - else: - mask = np.array(mask, copy=copy) - if mask.shape != self.shape: - (nd, nm) = (self.size, mask.size) - if nm == 1: - mask = np.resize(mask, self.shape) - elif nm == nd: - mask = np.reshape(mask, self.shape) - else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MAError(msg % (nd, nm)) - copy = True - if not keep_mask: - self.__setmask__(mask) - self._sharedmask = True - else: - if mask.dtype == mdtype: - _mask = mask - else: - _mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - self._mask = _mask - return self - #...................................................... - def __array_finalize__(self, obj): - # Make sure we have a _fieldmask by default .. - _mask = getattr(obj, '_mask', None) - if _mask is None: - objmask = getattr(obj, '_mask', nomask) - _dtype = ndarray.__getattribute__(self, 'dtype') - if objmask is nomask: - _mask = ma.make_mask_none(self.shape, dtype=_dtype) - else: - mdescr = ma.make_mask_descr(_dtype) - _mask = narray([tuple([m] * len(mdescr)) for m in objmask], - dtype=mdescr).view(recarray) - # Update some of the attributes - _dict = self.__dict__ - _dict.update(_mask=_mask) - self._update_from(obj) - if _dict['_baseclass'] == ndarray: - _dict['_baseclass'] = recarray - return - - - def _getdata(self): - "Returns the data as a recarray." - return ndarray.view(self, recarray) - _data = property(fget=_getdata) - - def _getfieldmask(self): - "Alias to mask" - return self._mask - _fieldmask = property(fget=_getfieldmask) - - def __len__(self): - "Returns the length" - # We have more than one record - if self.ndim: - return len(self._data) - # We have only one record: return the nb of fields - return len(self.dtype) - - def __getattribute__(self, attr): - try: - return object.__getattribute__(self, attr) - except AttributeError: # attr must be a fieldname - pass - fielddict = ndarray.__getattribute__(self, 'dtype').fields - try: - res = fielddict[attr][:2] - except (TypeError, KeyError): - raise AttributeError, "record array has no attribute %s" % attr - # So far, so good... - _localdict = ndarray.__getattribute__(self, '__dict__') - _data = ndarray.view(self, _localdict['_baseclass']) - obj = _data.getfield(*res) - if obj.dtype.fields: - raise NotImplementedError("MaskedRecords is currently limited to"\ - "simple records...") - # Get some special attributes - # Reset the object's mask - hasmasked = False - _mask = _localdict.get('_mask', None) - if _mask is not None: - try: - _mask = _mask[attr] - except IndexError: - # Couldn't find a mask: use the default (nomask) - pass - hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any() - if (obj.shape or hasmasked): - obj = obj.view(MaskedArray) - obj._baseclass = ndarray - obj._isfield = True - obj._mask = _mask - # Reset the field values - _fill_value = _localdict.get('_fill_value', None) - if _fill_value is not None: - try: - obj._fill_value = _fill_value[attr] - except ValueError: - obj._fill_value = None - else: - obj = obj.item() - return obj - - - def __setattr__(self, attr, val): - "Sets the attribute attr to the value val." - # Should we call __setmask__ first ? - if attr in ['mask', 'fieldmask']: - self.__setmask__(val) - return - # Create a shortcut (so that we don't have to call getattr all the time) - _localdict = object.__getattribute__(self, '__dict__') - # Check whether we're creating a new field - newattr = attr not in _localdict - try: - # Is attr a generic attribute ? - ret = object.__setattr__(self, attr, val) - except: - # Not a generic attribute: exit if it's not a valid field - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} - optinfo = ndarray.__getattribute__(self, '_optinfo') or {} - if not (attr in fielddict or attr in optinfo): - exctype, value = sys.exc_info()[:2] - raise exctype, value - else: - # Get the list of names ...... - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} - # Check the attribute - if attr not in fielddict: - return ret - if newattr: # We just added this one - try: # or this setattr worked on an internal - # attribute. - object.__delattr__(self, attr) - except: - return ret - # Let's try to set the field - try: - res = fielddict[attr][:2] - except (TypeError, KeyError): - raise AttributeError, "record array has no attribute %s" % attr - # - if val is masked: - _fill_value = _localdict['_fill_value'] - if _fill_value is not None: - dval = _localdict['_fill_value'][attr] - else: - dval = val - mval = True - else: - dval = filled(val) - mval = getmaskarray(val) - obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) - _localdict['_mask'].__setitem__(attr, mval) - return obj - - - def __getitem__(self, indx): - """Returns all the fields sharing the same fieldname base. -The fieldname base is either `_data` or `_mask`.""" - _localdict = self.__dict__ - _mask = ndarray.__getattribute__(self, '_mask') - _data = ndarray.view(self, _localdict['_baseclass']) - # We want a field ........ - if isinstance(indx, basestring): - #!!!: Make sure _sharedmask is True to propagate back to _fieldmask - #!!!: Don't use _set_mask, there are some copies being made... - #!!!: ...that break propagation - #!!!: Don't force the mask to nomask, that wrecks easy masking - obj = _data[indx].view(MaskedArray) - obj._mask = _mask[indx] - obj._sharedmask = True - fval = _localdict['_fill_value'] - if fval is not None: - obj._fill_value = fval[indx] - # Force to masked if the mask is True - if not obj.ndim and obj._mask: - return masked - return obj - # We want some elements .. - # First, the data ........ - obj = np.array(_data[indx], copy=False).view(mrecarray) - obj._mask = np.array(_mask[indx], copy=False).view(recarray) - return obj - #.... - def __setitem__(self, indx, value): - "Sets the given record to value." - MaskedArray.__setitem__(self, indx, value) - if isinstance(indx, basestring): - self._mask[indx] = ma.getmaskarray(value) - - - def __str__(self): - "Calculates the string representation." - if self.size > 1: - mstr = ["(%s)" % ",".join([str(i) for i in s]) - for s in zip(*[getattr(self, f) for f in self.dtype.names])] - return "[%s]" % ", ".join(mstr) - else: - mstr = ["%s" % ",".join([str(i) for i in s]) - for s in zip([getattr(self, f) for f in self.dtype.names])] - return "(%s)" % ", ".join(mstr) - # - def __repr__(self): - "Calculates the repr representation." - _names = self.dtype.names - fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) - reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] - reprstr.insert(0, 'masked_records(') - reprstr.extend([fmt % (' fill_value', self.fill_value), - ' )']) - return str("\n".join(reprstr)) -# #...................................................... - def view(self, dtype=None, type=None): - """Returns a view of the mrecarray.""" - # OK, basic copy-paste from MaskedArray.view... - if dtype is None: - if type is None: - output = ndarray.view(self) - else: - output = ndarray.view(self, type) - # Here again... - elif type is None: - try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) - dtype = None - else: - output = ndarray.view(self, dtype) - # OK, there's the change - except TypeError: - dtype = np.dtype(dtype) - # we need to revert to MaskedArray, but keeping the possibility - # ...of subclasses (eg, TimeSeriesRecords), so we'll force a type - # ...set to the first parent - if dtype.fields is None: - basetype = self.__class__.__bases__[0] - output = self.__array__().view(dtype, basetype) - output._update_from(self) - else: - output = ndarray.view(self, dtype) - output._fill_value = None - else: - output = ndarray.view(self, dtype, type) - # Update the mask, just like in MaskedArray.view - if (getattr(output, '_mask', nomask) is not nomask): - mdtype = ma.make_mask_descr(output.dtype) - output._mask = self._mask.view(mdtype, ndarray) - output._mask.shape = output.shape - return output - - def harden_mask(self): - "Forces the mask to hard" - self._hardmask = True - def soften_mask(self): - "Forces the mask to soft" - self._hardmask = False - - def copy(self): - """Returns a copy of the masked record.""" - _localdict = self.__dict__ - copied = self._data.copy().view(type(self)) - copied._mask = self._mask.copy() - return copied - - def tolist(self, fill_value=None): - """Copy the data portion of the array to a hierarchical python - list and returns that list. - - Data items are converted to the nearest compatible Python - type. Masked values are converted to fill_value. If - fill_value is None, the corresponding entries in the output - list will be ``None``. - - """ - if fill_value is not None: - return self.filled(fill_value).tolist() - result = narray(self.filled().tolist(), dtype=object) - mask = narray(self._mask.tolist()) - result[mask] = None - return result.tolist() - #-------------------------------------------- - # Pickling - def __getstate__(self): - """Return the internal state of the masked array, for pickling purposes. - - """ - state = (1, - self.shape, - self.dtype, - self.flags.fnc, - self._data.tostring(), - self._mask.tostring(), - self._fill_value, - ) - return state - # - def __setstate__(self, state): - """Restore the internal state of the masked array, for pickling purposes. - ``state`` is typically the output of the ``__getstate__`` output, and is a - 5-tuple: - - - class name - - a tuple giving the shape of the data - - a typecode for the data - - a binary string for the data - - a binary string for the mask. - - """ - (ver, shp, typ, isf, raw, msk, flv) = state - ndarray.__setstate__(self, (shp, typ, isf, raw)) - mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) - self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) - self.fill_value = flv - # - def __reduce__(self): - """Return a 3-tuple for pickling a MaskedArray. - - """ - return (_mrreconstruct, - (self.__class__, self._baseclass, (0,), 'b',), - self.__getstate__()) - -def _mrreconstruct(subtype, baseclass, baseshape, basetype,): - """Internal function that builds a new MaskedArray from the - information stored in a pickle. - - """ - _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) -# _data._mask = ndarray.__new__(ndarray, baseshape, 'b1') -# return _data - _mask = ndarray.__new__(ndarray, baseshape, 'b1') - return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) - - -mrecarray = MaskedRecords - -#####--------------------------------------------------------------------------- -#---- --- Constructors --- -#####--------------------------------------------------------------------------- - -def fromarrays(arraylist, dtype=None, shape=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None, - fill_value=None): - """Creates a mrecarray from a (flat) list of masked arrays. - - Parameters - ---------- - arraylist : sequence - A list of (masked) arrays. Each element of the sequence is first converted - to a masked array if needed. If a 2D array is passed as argument, it is - processed line by line - dtype : {None, dtype}, optional - Data type descriptor. - shape : {None, integer}, optional - Number of records. If None, shape is defined from the shape of the - first array in the list. - formats : {None, sequence}, optional - Sequence of formats for each individual field. If None, the formats will - be autodetected by inspecting the fields and selecting the highest dtype - possible. - names : {None, sequence}, optional - Sequence of the names of each field. - fill_value : {None, sequence}, optional - Sequence of data to be used as filling values. - - Notes - ----- - Lists of tuples should be preferred over lists of lists for faster processing. - """ - datalist = [getdata(x) for x in arraylist] - masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] - _array = recfromarrays(datalist, - dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, aligned=aligned, - byteorder=byteorder).view(mrecarray) - _array._mask.flat = zip(*masklist) - if fill_value is not None: - _array.fill_value = fill_value - return _array - - -#.............................................................................. -def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None, - fill_value=None, mask=nomask): - """Creates a MaskedRecords from a list of records. - - Parameters - ---------- - reclist : sequence - A list of records. Each element of the sequence is first converted - to a masked array if needed. If a 2D array is passed as argument, it is - processed line by line - dtype : {None, dtype}, optional - Data type descriptor. - shape : {None,int}, optional - Number of records. If None, ``shape`` is defined from the shape of the - first array in the list. - formats : {None, sequence}, optional - Sequence of formats for each individual field. If None, the formats will - be autodetected by inspecting the fields and selecting the highest dtype - possible. - names : {None, sequence}, optional - Sequence of the names of each field. - fill_value : {None, sequence}, optional - Sequence of data to be used as filling values. - mask : {nomask, sequence}, optional. - External mask to apply on the data. - - Notes - ----- - Lists of tuples should be preferred over lists of lists for faster processing. - """ - # Grab the initial _fieldmask, if needed: - _mask = getattr(reclist, '_mask', None) - # Get the list of records..... - try: - nfields = len(reclist[0]) - except TypeError: - nfields = len(reclist[0].dtype) - if isinstance(reclist, ndarray): - # Make sure we don't have some hidden mask - if isinstance(reclist, MaskedArray): - reclist = reclist.filled().view(ndarray) - # Grab the initial dtype, just in case - if dtype is None: - dtype = reclist.dtype - reclist = reclist.tolist() - mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, - aligned=aligned, byteorder=byteorder).view(mrecarray) - # Set the fill_value if needed - if fill_value is not None: - mrec.fill_value = fill_value - # Now, let's deal w/ the mask - if mask is not nomask: - mask = np.array(mask, copy=False) - maskrecordlength = len(mask.dtype) - if maskrecordlength: - mrec._mask.flat = mask - elif len(mask.shape) == 2: - mrec._mask.flat = [tuple(m) for m in mask] - else: - mrec.__setmask__(mask) - if _mask is not None: - mrec._mask[:] = _mask - return mrec - -def _guessvartypes(arr): - """Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise -conversion. Returns a list of dtypes. -The array is first converted to ndarray. If the array is 2D, the test is performed -on the first line. An exception is raised if the file is 3D or more. - """ - vartypes = [] - arr = np.asarray(arr) - if len(arr.shape) == 2 : - arr = arr[0] - elif len(arr.shape) > 2: - raise ValueError, "The array should be 2D at most!" - # Start the conversion loop ....... - for f in arr: - try: - int(f) - except ValueError: - try: - float(f) - except ValueError: - try: - val = complex(f) - except ValueError: - vartypes.append(arr.dtype) - else: - vartypes.append(np.dtype(complex)) - else: - vartypes.append(np.dtype(float)) - else: - vartypes.append(np.dtype(int)) - return vartypes - -def openfile(fname): - "Opens the file handle of file `fname`" - # A file handle ................... - if hasattr(fname, 'readline'): - return fname - # Try to open the file and guess its type - try: - f = open(fname) - except IOError: - raise IOError, "No such file: '%s'" % fname - if f.readline()[:2] != "\\x": - f.seek(0, 0) - return f - raise NotImplementedError, "Wow, binary file" - - -def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', - varnames=None, vartypes=None): - """Creates a mrecarray from data stored in the file `filename`. - - Parameters - ---------- - filename : {file name/handle} - Handle of an opened file. - delimitor : {None, string}, optional - Alphanumeric character used to separate columns in the file. - If None, any (group of) white spacestring(s) will be used. - commentchar : {'#', string}, optional - Alphanumeric character used to mark the start of a comment. - missingchar : {'', string}, optional - String indicating missing data, and used to create the masks. - varnames : {None, sequence}, optional - Sequence of the variable names. If None, a list will be created from - the first non empty line of the file. - vartypes : {None, sequence}, optional - Sequence of the variables dtypes. If None, it will be estimated from - the first non-commented line. - - - Ultra simple: the varnames are in the header, one line""" - # Try to open the file ...................... - f = openfile(fname) - # Get the first non-empty line as the varnames - while True: - line = f.readline() - firstline = line[:line.find(commentchar)].strip() - _varnames = firstline.split(delimitor) - if len(_varnames) > 1: - break - if varnames is None: - varnames = _varnames - # Get the data .............................. - _variables = masked_array([line.strip().split(delimitor) for line in f - if line[0] != commentchar and len(line) > 1]) - (_, nfields) = _variables.shape - # Try to guess the dtype .................... - if vartypes is None: - vartypes = _guessvartypes(_variables[0]) - else: - vartypes = [np.dtype(v) for v in vartypes] - if len(vartypes) != nfields: - msg = "Attempting to %i dtypes for %i fields!" - msg += " Reverting to default." - warnings.warn(msg % (len(vartypes), nfields)) - vartypes = _guessvartypes(_variables[0]) - # Construct the descriptor .................. - mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] - mfillv = [ma.default_fill_value(f) for f in vartypes] - # Get the data and the mask ................. - # We just need a list of masked_arrays. It's easier to create it like that: - _mask = (_variables.T == missingchar) - _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) - for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] - return fromarrays(_datalist, dtype=mdescr) - -#.................................................................... -def addfield(mrecord, newfield, newfieldname=None): - """Adds a new field to the masked record array, using `newfield` as data -and `newfieldname` as name. If `newfieldname` is None, the new field name is -set to 'fi', where `i` is the number of existing fields. - """ - _data = mrecord._data - _mask = mrecord._mask - if newfieldname is None or newfieldname in reserved_fields: - newfieldname = 'f%i' % len(_data.dtype) - newfield = ma.array(newfield) - # Get the new data ............ - # Create a new empty recarray - newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) - newdata = recarray(_data.shape, newdtype) - # Add the exisintg field - [newdata.setfield(_data.getfield(*f), *f) - for f in _data.dtype.fields.values()] - # Add the new field - newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) - newdata = newdata.view(MaskedRecords) - # Get the new mask ............. - # Create a new empty recarray - newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) - newmask = recarray(_data.shape, newmdtype) - # Add the old masks - [newmask.setfield(_mask.getfield(*f), *f) - for f in _mask.dtype.fields.values()] - # Add the mask of the new field - newmask.setfield(getmaskarray(newfield), - *newmask.dtype.fields[newfieldname]) - newdata._mask = newmask - return newdata diff --git a/numpy-1.6.2/numpy/ma/setup.py b/numpy-1.6.2/numpy/ma/setup.py deleted file mode 100644 index 0247466554..0000000000 --- a/numpy-1.6.2/numpy/ma/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import os - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('ma',parent_package,top_path) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/numpy-1.6.2/numpy/ma/setupscons.py b/numpy-1.6.2/numpy/ma/setupscons.py deleted file mode 100644 index 0247466554..0000000000 --- a/numpy-1.6.2/numpy/ma/setupscons.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import os - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('ma',parent_package,top_path) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/numpy-1.6.2/numpy/ma/tests/test_core.py b/numpy-1.6.2/numpy/ma/tests/test_core.py deleted file mode 100644 index 0707010899..0000000000 --- a/numpy-1.6.2/numpy/ma/tests/test_core.py +++ /dev/null @@ -1,3526 +0,0 @@ -# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102 -"""Tests suite for MaskedArray & subclassing. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -""" -__author__ = "Pierre GF Gerard-Marchant" - -import types -import warnings - -import numpy as np -import numpy.core.fromnumeric as fromnumeric -from numpy import ndarray -from numpy.ma.testutils import * - -import numpy.ma.core -from numpy.ma.core import * - -from numpy.compat import asbytes, asbytes_nested - -pi = np.pi - -import sys -if sys.version_info[0] >= 3: - from functools import reduce - -#.............................................................................. -class TestMaskedArray(TestCase): - "Base test class for MaskedArrays." - - def setUp (self): - "Base data definition." - x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = masked_array(z, mask=[0, 1, 0, 0]) - xf = np.where(m1, 1e+20, x) - xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) - - - def test_basicattributes(self): - "Tests some basic array attributes." - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - assert_equal(a.ndim, 1) - assert_equal(b.ndim, 1) - assert_equal(a.size, 3) - assert_equal(b.size, 3) - assert_equal(a.shape, (3,)) - assert_equal(b.shape, (3,)) - - - def test_basic0d(self): - "Checks masking a scalar" - x = masked_array(0) - assert_equal(str(x), '0') - x = masked_array(0, mask=True) - assert_equal(str(x), str(masked_print_option)) - x = masked_array(0, mask=False) - assert_equal(str(x), '0') - x = array(0, mask=1) - self.assertTrue(x.filled().dtype is x._data.dtype) - - def test_basic1d(self): - "Test of basic array creation and properties in 1 dimension." - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - self.assertTrue(not isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - self.assertTrue((xm - ym).filled(0).any()) - fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) - s = x.shape - assert_equal(np.shape(xm), s) - assert_equal(xm.shape, s) - assert_equal(xm.dtype, x.dtype) - assert_equal(zm.dtype, z.dtype) - assert_equal(xm.size , reduce(lambda x, y:x * y, s)) - assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) - assert_array_equal(xm, xf) - assert_array_equal(filled(xm, 1.e20), xf) - assert_array_equal(x, xm) - - - def test_basic2d(self): - "Test of basic array creation and properties in 2 dimensions." - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - # - self.assertTrue(not isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - assert_equal(shape(xm), s) - assert_equal(xm.shape, s) - assert_equal(xm.size , reduce(lambda x, y:x * y, s)) - assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) - assert_equal(xm, xf) - assert_equal(filled(xm, 1.e20), xf) - assert_equal(x, xm) - - def test_concatenate_basic(self): - "Tests concatenations." - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - # basic concatenation - assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) - assert_equal(np.concatenate((x, y)), concatenate((x, y))) - assert_equal(np.concatenate((x, y)), concatenate((xm, y))) - assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) - - def test_concatenate_alongaxis(self): - "Tests concatenations." - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - # Concatenation along an axis - s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s - assert_equal(xm.mask, np.reshape(m1, s)) - assert_equal(ym.mask, np.reshape(m2, s)) - xmym = concatenate((xm, ym), 1) - assert_equal(np.concatenate((x, y), 1), xmym) - assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) - # - x = zeros(2) - y = array(ones(2), mask=[False, True]) - z = concatenate((x, y)) - assert_array_equal(z, [0, 0, 1, 1]) - assert_array_equal(z.mask, [False, False, False, True]) - z = concatenate((y, x)) - assert_array_equal(z, [1, 1, 0, 0]) - assert_array_equal(z.mask, [False, True, False, False]) - - def test_concatenate_flexible(self): - "Tests the concatenation on flexible arrays." - data = masked_array(zip(np.random.rand(10), - np.arange(10)), - dtype=[('a', float), ('b', int)]) - # - test = concatenate([data[:5], data[5:]]) - assert_equal_records(test, data) - - def test_creation_ndmin(self): - "Check the use of ndmin" - x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) - assert_equal(x.shape, (1, 3)) - assert_equal(x._data, [[1, 2, 3]]) - assert_equal(x._mask, [[1, 0, 0]]) - - def test_creation_ndmin_from_maskedarray(self): - "Make sure we're not losing the original mask w/ ndmin" - x = array([1, 2, 3]) - x[-1] = masked - xx = array(x, ndmin=2, dtype=float) - assert_equal(x.shape, x._mask.shape) - assert_equal(xx.shape, xx._mask.shape) - - def test_creation_maskcreation(self): - "Tests how masks are initialized at the creation of Maskedarrays." - data = arange(24, dtype=float) - data[[3, 6, 15]] = masked - dma_1 = MaskedArray(data) - assert_equal(dma_1.mask, data.mask) - dma_2 = MaskedArray(dma_1) - assert_equal(dma_2.mask, dma_1.mask) - dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) - fail_if_equal(dma_3.mask, dma_1.mask) - - def test_creation_with_list_of_maskedarrays(self): - "Tests creaating a masked array from alist of masked arrays." - x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) - data = array((x, x[::-1])) - assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) - assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) - # - x.mask = nomask - data = array((x, x[::-1])) - assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) - self.assertTrue(data.mask is nomask) - - def test_asarray(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - xm.fill_value = -9999 - xm._hardmask = True - xmm = asarray(xm) - assert_equal(xmm._data, xm._data) - assert_equal(xmm._mask, xm._mask) - assert_equal(xmm.fill_value, xm.fill_value) - assert_equal(xmm._hardmask, xm._hardmask) - - def test_fix_invalid(self): - "Checks fix_invalid." - err_status_ini = np.geterr() - try: - np.seterr(invalid='ignore') - data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) - data_fixed = fix_invalid(data) - assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) - assert_equal(data_fixed._mask, [1., 0., 1.]) - finally: - np.seterr(**err_status_ini) - - def test_maskedelement(self): - "Test of masked element" - x = arange(6) - x[1] = masked - self.assertTrue(str(masked) == '--') - self.assertTrue(x[1] is masked) - assert_equal(filled(x[1], 0), 0) - # don't know why these should raise an exception... - #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) - #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) - #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) - #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) - - def test_set_element_as_object(self): - """Tests setting elements with object""" - a = empty(1, dtype=object) - x = (1, 2, 3, 4, 5) - a[0] = x - assert_equal(a[0], x) - self.assertTrue(a[0] is x) - # - import datetime - dt = datetime.datetime.now() - a[0] = dt - self.assertTrue(a[0] is dt) - - - def test_indexing(self): - "Tests conversions and indexing" - x1 = np.array([1, 2, 4, 3]) - x2 = array(x1, mask=[1, 0, 0, 0]) - x3 = array(x1, mask=[0, 1, 0, 1]) - x4 = array(x1) - # test conversion to strings - junk, garbage = str(x2), repr(x2) - assert_equal(np.sort(x1), sort(x2, endwith=False)) - # tests of indexing - assert_(type(x2[1]) is type(x1[1])) - assert_(x1[1] == x2[1]) - assert_(x2[0] is masked) - assert_equal(x1[2], x2[2]) - assert_equal(x1[2:5], x2[2:5]) - assert_equal(x1[:], x2[:]) - assert_equal(x1[1:], x3[1:]) - x1[2] = 9 - x2[2] = 9 - assert_equal(x1, x2) - x1[1:3] = 99 - x2[1:3] = 99 - assert_equal(x1, x2) - x2[1] = masked - assert_equal(x1, x2) - x2[1:3] = masked - assert_equal(x1, x2) - x2[:] = x1 - x2[1] = masked - assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) - x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) - x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) - assert_(allequal(x4, array([1, 2, 3, 4]))) - x1 = np.arange(5) * 1.0 - x2 = masked_values(x1, 3.0) - assert_equal(x1, x2) - assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) - assert_equal(3.0, x2.fill_value) - x1 = array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - s1 = x1[1] - s2 = x2[1] - assert_equal(type(s2), str) - assert_equal(type(s1), str) - assert_equal(s1, s2) - assert_(x1[1:1].shape == (0,)) - - - def test_copy(self): - "Tests of some subtle points of copying and sizing." - n = [0, 0, 1, 0, 0] - m = make_mask(n) - m2 = make_mask(m) - self.assertTrue(m is m2) - m3 = make_mask(m, copy=1) - self.assertTrue(m is not m3) - - x1 = np.arange(5) - y1 = array(x1, mask=m) - #self.assertTrue( y1._data is x1) - assert_equal(y1._data.__array_interface__, x1.__array_interface__) - self.assertTrue(allequal(x1, y1.data)) - #self.assertTrue( y1.mask is m) - assert_equal(y1._mask.__array_interface__, m.__array_interface__) - - y1a = array(y1) - self.assertTrue(y1a._data.__array_interface__ == y1._data.__array_interface__) - self.assertTrue(y1a.mask is y1.mask) - - y2 = array(x1, mask=m) - self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__) - #self.assertTrue( y2.mask is m) - self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__) - self.assertTrue(y2[2] is masked) - y2[2] = 9 - self.assertTrue(y2[2] is not masked) - #self.assertTrue( y2.mask is not m) - self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__) - self.assertTrue(allequal(y2.mask, 0)) - - y3 = array(x1 * 1.0, mask=m) - self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) - - x4 = arange(4) - x4[2] = masked - y4 = resize(x4, (8,)) - assert_equal(concatenate([x4, x4]), y4) - assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) - y5 = repeat(x4, (2, 2, 2, 2), axis=0) - assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) - y6 = repeat(x4, 2, axis=0) - assert_equal(y5, y6) - y7 = x4.repeat((2, 2, 2, 2), axis=0) - assert_equal(y5, y7) - y8 = x4.repeat(2, 0) - assert_equal(y5, y8) - - y9 = x4.copy() - assert_equal(y9._data, x4._data) - assert_equal(y9._mask, x4._mask) - # - x = masked_array([1, 2, 3], mask=[0, 1, 0]) - # Copy is False by default - y = masked_array(x) - assert_equal(y._data.ctypes.data, x._data.ctypes.data) - assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) - y = masked_array(x, copy=True) - assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) - assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) - - - def test_deepcopy(self): - from copy import deepcopy - a = array([0, 1, 2], mask=[False, True, False]) - copied = deepcopy(a) - assert_equal(copied.mask, a.mask) - assert_not_equal(id(a._mask), id(copied._mask)) - # - copied[1] = 1 - assert_equal(copied.mask, [0, 0, 0]) - assert_equal(a.mask, [0, 1, 0]) - # - copied = deepcopy(a) - assert_equal(copied.mask, a.mask) - copied.mask[1] = False - assert_equal(copied.mask, [0, 0, 0]) - assert_equal(a.mask, [0, 1, 0]) - - - def test_pickling(self): - "Tests pickling" - import cPickle - a = arange(10) - a[::3] = masked - a.fill_value = 999 - a_pickled = cPickle.loads(a.dumps()) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled._data, a._data) - assert_equal(a_pickled.fill_value, 999) - - def test_pickling_subbaseclass(self): - "Test pickling w/ a subclass of ndarray" - import cPickle - a = array(np.matrix(range(10)), mask=[1, 0, 1, 0, 0] * 2) - a_pickled = cPickle.loads(a.dumps()) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled, a) - self.assertTrue(isinstance(a_pickled._data, np.matrix)) - - def test_pickling_wstructured(self): - "Tests pickling w/ structured array" - import cPickle - a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], - dtype=[('a', int), ('b', float)]) - a_pickled = cPickle.loads(a.dumps()) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled, a) - - def test_pickling_keepalignment(self): - "Tests pickling w/ F_CONTIGUOUS arrays" - import cPickle - a = arange(10) - a.shape = (-1, 2) - b = a.T - test = cPickle.loads(cPickle.dumps(b)) - assert_equal(test, b) - -# def test_pickling_oddity(self): -# "Test some pickling oddity" -# import cPickle -# a = array([{'a':1}, {'b':2}, 3], dtype=object) -# test = cPickle.loads(cPickle.dumps(a)) -# assert_equal(test, a) - - def test_single_element_subscript(self): - "Tests single element subscripts of Maskedarrays." - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - assert_equal(a[0].shape, ()) - assert_equal(b[0].shape, ()) - assert_equal(b[1].shape, ()) - - - def test_topython(self): - "Tests some communication issues with Python." - assert_equal(1, int(array(1))) - assert_equal(1.0, float(array(1))) - assert_equal(1, int(array([[[1]]]))) - assert_equal(1.0, float(array([[1]]))) - self.assertRaises(TypeError, float, array([1, 1])) - # - warnings.simplefilter('ignore', UserWarning) - assert_(np.isnan(float(array([1], mask=[1])))) - warnings.simplefilter('default', UserWarning) - # - a = array([1, 2, 3], mask=[1, 0, 0]) - self.assertRaises(TypeError, lambda:float(a)) - assert_equal(float(a[-1]), 3.) - self.assertTrue(np.isnan(float(a[0]))) - self.assertRaises(TypeError, int, a) - assert_equal(int(a[-1]), 3) - self.assertRaises(MAError, lambda:int(a[0])) - - - def test_oddfeatures_1(self): - "Test of other odd features" - x = arange(20) - x = x.reshape(4, 5) - x.flat[5] = 12 - assert_(x[1, 0] == 12) - z = x + 10j * x - assert_equal(z.real, x) - assert_equal(z.imag, 10 * x) - assert_equal((z * conjugate(z)).real, 101 * x * x) - z.imag[...] = 0.0 - # - x = arange(10) - x[3] = masked - assert_(str(x[3]) == str(masked)) - c = x >= 8 - assert_(count(where(c, masked, masked)) == 0) - assert_(shape(where(c, masked, masked)) == c.shape) - # - z = masked_where(c, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - assert_equal(x, z) - - - def test_oddfeatures_2(self): - "Tests some more features." - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - - - def test_oddfeatures_3(self): - """Tests some generic features.""" - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) - - - def test_filled_w_flexible_dtype(self): - "Test filled w/ flexible dtype" - flexi = array([(1, 1, 1)], - dtype=[('i', int), ('s', '|S8'), ('f', float)]) - flexi[0] = masked - assert_equal(flexi.filled(), - np.array([(default_fill_value(0), - default_fill_value('0'), - default_fill_value(0.),)], dtype=flexi.dtype)) - flexi[0] = masked - assert_equal(flexi.filled(1), - np.array([(1, '1', 1.)], dtype=flexi.dtype)) - - def test_filled_w_mvoid(self): - "Test filled w/ mvoid" - ndtype = [('a', int), ('b', float)] - a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) - # Filled using default - test = a.filled() - assert_equal(tuple(test), (1, default_fill_value(1.))) - # Explicit fill_value - test = a.filled((-1, -1)) - assert_equal(tuple(test), (1, -1)) - # Using predefined filling values - a.fill_value = (-999, -999) - assert_equal(tuple(a.filled()), (1, -999)) - - - def test_filled_w_nested_dtype(self): - "Test filled w/ nested dtype" - ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] - a = array([(1, (1, 1)), (2, (2, 2))], - mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) - test = a.filled(0) - control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) - assert_equal(test, control) - # - test = a['B'].filled(0) - control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) - assert_equal(test, control) - - - def test_optinfo_propagation(self): - "Checks that _optinfo dictionary isn't back-propagated" - x = array([1, 2, 3, ], dtype=float) - x._optinfo['info'] = '???' - y = x.copy() - assert_equal(y._optinfo['info'], '???') - y._optinfo['info'] = '!!!' - assert_equal(x._optinfo['info'], '???') - - - def test_fancy_printoptions(self): - "Test printing a masked array w/ fancy dtype." - fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) - test = array([(1, (2, 3.0)), (4, (5, 6.0))], - mask=[(1, (0, 1)), (0, (1, 0))], - dtype=fancydtype) - control = "[(--, (2, --)) (4, (--, 6.0))]" - assert_equal(str(test), control) - - - def test_flatten_structured_array(self): - "Test flatten_structured_array on arrays" - # On ndarray - ndtype = [('a', int), ('b', float)] - a = np.array([(1, 1), (2, 2)], dtype=ndtype) - test = flatten_structured_array(a) - control = np.array([[1., 1.], [2., 2.]], dtype=np.float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - # On masked_array - a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = flatten_structured_array(a) - control = array([[1., 1.], [2., 2.]], - mask=[[0, 1], [1, 0]], dtype=np.float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - assert_equal(test.mask, control.mask) - # On masked array with nested structure - ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] - a = array([(1, (1, 1.1)), (2, (2, 2.2))], - mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) - test = flatten_structured_array(a) - control = array([[1., 1., 1.1], [2., 2., 2.2]], - mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - assert_equal(test.mask, control.mask) - # Keeping the initial shape - ndtype = [('a', int), ('b', float)] - a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) - test = flatten_structured_array(a) - control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - - - - def test_void0d(self): - "Test creating a mvoid object" - ndtype = [('a', int), ('b', int)] - a = np.array([(1, 2,)], dtype=ndtype)[0] - f = mvoid(a) - assert(isinstance(f, mvoid)) - # - a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] - assert(isinstance(a, mvoid)) - # - a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - f = mvoid(a._data[0], a._mask[0]) - assert(isinstance(f, mvoid)) - - def test_mvoid_getitem(self): - "Test mvoid.__getitem__" - ndtype = [('a', int), ('b', int)] - a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype) - # w/o mask - f = a[0] - self.assertTrue(isinstance(f, np.void)) - assert_equal((f[0], f['a']), (1, 1)) - assert_equal(f['b'], 2) - # w/ mask - f = a[1] - self.assertTrue(isinstance(f, mvoid)) - self.assertTrue(f[0] is masked) - self.assertTrue(f['a'] is masked) - assert_equal(f[1], 4) - - def test_mvoid_iter(self): - "Test iteration on __getitem__" - ndtype = [('a', int), ('b', int)] - a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype) - # w/o mask - assert_equal(list(a[0]), [1, 2]) - # w/ mask - assert_equal(list(a[1]), [masked, 4]) - - def test_mvoid_print(self): - "Test printing a mvoid" - mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) - assert_equal(str(mx[0]), "(1, 1)") - mx['b'][0] = masked - ini_display = masked_print_option._display - masked_print_option.set_display("-X-") - try: - assert_equal(str(mx[0]), "(1, -X-)") - assert_equal(repr(mx[0]), "(1, -X-)") - finally: - masked_print_option.set_display(ini_display) - -#------------------------------------------------------------------------------ - -class TestMaskedArrayArithmetic(TestCase): - "Base test class for MaskedArrays." - - def setUp (self): - "Base data definition." - x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = masked_array(z, mask=[0, 1, 0, 0]) - xf = np.where(m1, 1e+20, x) - xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') - - def tearDown(self): - np.seterr(**self.err_status) - - def test_basic_arithmetic (self): - "Test of basic arithmetic." - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - a2d = array([[1, 2], [0, 4]]) - a2dm = masked_array(a2d, [[0, 0], [1, 0]]) - assert_equal(a2d * a2d, a2d * a2dm) - assert_equal(a2d + a2d, a2d + a2dm) - assert_equal(a2d - a2d, a2d - a2dm) - for s in [(12,), (4, 3), (2, 6)]: - x = x.reshape(s) - y = y.reshape(s) - xm = xm.reshape(s) - ym = ym.reshape(s) - xf = xf.reshape(s) - assert_equal(-x, -xm) - assert_equal(x + y, xm + ym) - assert_equal(x - y, xm - ym) - assert_equal(x * y, xm * ym) - assert_equal(x / y, xm / ym) - assert_equal(a10 + y, a10 + ym) - assert_equal(a10 - y, a10 - ym) - assert_equal(a10 * y, a10 * ym) - assert_equal(a10 / y, a10 / ym) - assert_equal(x + a10, xm + a10) - assert_equal(x - a10, xm - a10) - assert_equal(x * a10, xm * a10) - assert_equal(x / a10, xm / a10) - assert_equal(x ** 2, xm ** 2) - assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5) - assert_equal(x ** y, xm ** ym) - assert_equal(np.add(x, y), add(xm, ym)) - assert_equal(np.subtract(x, y), subtract(xm, ym)) - assert_equal(np.multiply(x, y), multiply(xm, ym)) - assert_equal(np.divide(x, y), divide(xm, ym)) - - - def test_divide_on_different_shapes(self): - x = arange(6, dtype=float) - x.shape = (2, 3) - y = arange(3, dtype=float) - # - z = x / y - assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) - assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) - # - z = x / y[None, :] - assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) - assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) - # - y = arange(2, dtype=float) - z = x / y[:, None] - assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]]) - assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]]) - - - def test_mixed_arithmetic(self): - "Tests mixed arithmetics." - na = np.array([1]) - ma = array([1]) - self.assertTrue(isinstance(na + ma, MaskedArray)) - self.assertTrue(isinstance(ma + na, MaskedArray)) - - - def test_limits_arithmetic(self): - tiny = np.finfo(float).tiny - a = array([tiny, 1. / tiny, 0.]) - assert_equal(getmaskarray(a / 2), [0, 0, 0]) - assert_equal(getmaskarray(2 / a), [1, 0, 1]) - - - def test_masked_singleton_arithmetic(self): - "Tests some scalar arithmetics on MaskedArrays." - # Masked singleton should remain masked no matter what - xm = array(0, mask=1) - self.assertTrue((1 / array(0)).mask) - self.assertTrue((1 + xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue(maximum(xm, xm).mask) - self.assertTrue(minimum(xm, xm).mask) - - - def test_masked_singleton_equality(self): - "Tests (in)equality on masked snigleton" - a = array([1, 2, 3], mask=[1, 1, 0]) - assert((a[0] == 0) is masked) - assert((a[0] != 0) is masked) - assert_equal((a[-1] == 0), False) - assert_equal((a[-1] != 0), True) - - - def test_arithmetic_with_masked_singleton(self): - "Checks that there's no collapsing to masked" - x = masked_array([1, 2]) - y = x * masked - assert_equal(y.shape, x.shape) - assert_equal(y._mask, [True, True]) - y = x[0] * masked - assert_(y is masked) - y = x + masked - assert_equal(y.shape, x.shape) - assert_equal(y._mask, [True, True]) - - - def test_arithmetic_with_masked_singleton_on_1d_singleton(self): - "Check that we're not losing the shape of a singleton" - x = masked_array([1, ]) - y = x + masked - assert_equal(y.shape, x.shape) - assert_equal(y.mask, [True, ]) - - - def test_scalar_arithmetic(self): - x = array(0, mask=0) - assert_equal(x.filled().ctypes.data, x.ctypes.data) - # Make sure we don't lose the shape in some circumstances - xm = array((0, 0)) / 0. - assert_equal(xm.shape, (2,)) - assert_equal(xm.mask, [1, 1]) - - - def test_basic_ufuncs (self): - "Test various functions such as sin, cos." - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(np.cos(x), cos(xm)) - assert_equal(np.cosh(x), cosh(xm)) - assert_equal(np.sin(x), sin(xm)) - assert_equal(np.sinh(x), sinh(xm)) - assert_equal(np.tan(x), tan(xm)) - assert_equal(np.tanh(x), tanh(xm)) - assert_equal(np.sqrt(abs(x)), sqrt(xm)) - assert_equal(np.log(abs(x)), log(xm)) - assert_equal(np.log10(abs(x)), log10(xm)) - assert_equal(np.exp(x), exp(xm)) - assert_equal(np.arcsin(z), arcsin(zm)) - assert_equal(np.arccos(z), arccos(zm)) - assert_equal(np.arctan(z), arctan(zm)) - assert_equal(np.arctan2(x, y), arctan2(xm, ym)) - assert_equal(np.absolute(x), absolute(xm)) - assert_equal(np.equal(x, y), equal(xm, ym)) - assert_equal(np.not_equal(x, y), not_equal(xm, ym)) - assert_equal(np.less(x, y), less(xm, ym)) - assert_equal(np.greater(x, y), greater(xm, ym)) - assert_equal(np.less_equal(x, y), less_equal(xm, ym)) - assert_equal(np.greater_equal(x, y), greater_equal(xm, ym)) - assert_equal(np.conjugate(x), conjugate(xm)) - - - def test_count_func (self): - "Tests count" - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - if sys.version_info[0] >= 3: - self.assertTrue(isinstance(count(ott), np.integer)) - else: - self.assertTrue(isinstance(count(ott), int)) - assert_equal(3, count(ott)) - assert_equal(1, count(1)) - assert_equal(0, array(1, mask=[1])) - ott = ott.reshape((2, 2)) - assert_(isinstance(count(ott, 0), ndarray)) - if sys.version_info[0] >= 3: - assert_(isinstance(count(ott), np.integer)) - else: - assert_(isinstance(count(ott), types.IntType)) - assert_equal(3, count(ott)) - assert_(getmask(count(ott, 0)) is nomask) - assert_equal([1, 2], count(ott, 0)) - - - def test_minmax_func (self): - "Tests minimum and maximum." - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - xr = np.ravel(x) #max doesn't work if shaped - xmr = ravel(xm) - assert_equal(max(xr), maximum(xmr)) #true because of careful selection of data - assert_equal(min(xr), minimum(xmr)) #true because of careful selection of data - # - assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]) - assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]) - x = arange(5) - y = arange(5) - 2 - x[3] = masked - y[0] = masked - assert_equal(minimum(x, y), where(less(x, y), x, y)) - assert_equal(maximum(x, y), where(greater(x, y), x, y)) - assert_(minimum(x) == 0) - assert_(maximum(x) == 4) - # - x = arange(4).reshape(2, 2) - x[-1, -1] = masked - assert_equal(maximum(x), 2) - - - def test_minimummaximum_func(self): - a = np.ones((2, 2)) - aminimum = minimum(a, a) - self.assertTrue(isinstance(aminimum, MaskedArray)) - assert_equal(aminimum, np.minimum(a, a)) - # - aminimum = minimum.outer(a, a) - self.assertTrue(isinstance(aminimum, MaskedArray)) - assert_equal(aminimum, np.minimum.outer(a, a)) - # - amaximum = maximum(a, a) - self.assertTrue(isinstance(amaximum, MaskedArray)) - assert_equal(amaximum, np.maximum(a, a)) - # - amaximum = maximum.outer(a, a) - self.assertTrue(isinstance(amaximum, MaskedArray)) - assert_equal(amaximum, np.maximum.outer(a, a)) - - - def test_minmax_reduce(self): - "Test np.min/maximum.reduce on array w/ full False mask" - a = array([1, 2, 3], mask=[False, False, False]) - b = np.maximum.reduce(a) - assert_equal(b, 3) - - def test_minmax_funcs_with_output(self): - "Tests the min/max functions with explicit outputs" - mask = np.random.rand(12).round() - xm = array(np.random.uniform(0, 10, 12), mask=mask) - xm.shape = (3, 4) - for funcname in ('min', 'max'): - # Initialize - npfunc = getattr(np, funcname) - mafunc = getattr(numpy.ma.core, funcname) - # Use the np version - nout = np.empty((4,), dtype=int) - try: - result = npfunc(xm, axis=0, out=nout) - except MaskError: - pass - nout = np.empty((4,), dtype=float) - result = npfunc(xm, axis=0, out=nout) - self.assertTrue(result is nout) - # Use the ma version - nout.fill(-999) - result = mafunc(xm, axis=0, out=nout) - self.assertTrue(result is nout) - - - def test_minmax_methods(self): - "Additional tests on max/min" - (_, _, _, _, _, xm, _, _, _, _) = self.d - xm.shape = (xm.size,) - assert_equal(xm.max(), 10) - self.assertTrue(xm[0].max() is masked) - self.assertTrue(xm[0].max(0) is masked) - self.assertTrue(xm[0].max(-1) is masked) - assert_equal(xm.min(), -10.) - self.assertTrue(xm[0].min() is masked) - self.assertTrue(xm[0].min(0) is masked) - self.assertTrue(xm[0].min(-1) is masked) - assert_equal(xm.ptp(), 20.) - self.assertTrue(xm[0].ptp() is masked) - self.assertTrue(xm[0].ptp(0) is masked) - self.assertTrue(xm[0].ptp(-1) is masked) - # - x = array([1, 2, 3], mask=True) - self.assertTrue(x.min() is masked) - self.assertTrue(x.max() is masked) - self.assertTrue(x.ptp() is masked) - - - def test_addsumprod (self): - "Tests add, sum, product." - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(np.add.reduce(x), add.reduce(x)) - assert_equal(np.add.accumulate(x), add.accumulate(x)) - assert_equal(4, sum(array(4), axis=0)) - assert_equal(4, sum(array(4), axis=0)) - assert_equal(np.sum(x, axis=0), sum(x, axis=0)) - assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)) - assert_equal(np.sum(x, 0), sum(x, 0)) - assert_equal(np.product(x, axis=0), product(x, axis=0)) - assert_equal(np.product(x, 0), product(x, 0)) - assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0)) - s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s - if len(s) > 1: - assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) - assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) - assert_equal(np.sum(x, 1), sum(x, 1)) - assert_equal(np.product(x, 1), product(x, 1)) - - - def test_binops_d2D(self): - "Test binary operations on 2D data" - a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) - b = array([[2., 3.], [4., 5.], [6., 7.]]) - # - test = a * b - control = array([[2., 3.], [2., 2.], [3., 3.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = b * a - control = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - a = array([[1.], [2.], [3.]]) - b = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [0, 0], [0, 1]]) - test = a * b - control = array([[2, 3], [8, 10], [18, 3]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = b * a - control = array([[2, 3], [8, 10], [18, 7]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - - def test_domained_binops_d2D(self): - "Test domained binary operations on 2D data" - a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) - b = array([[2., 3.], [4., 5.], [6., 7.]]) - # - test = a / b - control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = b / a - control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - a = array([[1.], [2.], [3.]]) - b = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [0, 0], [0, 1]]) - test = a / b - control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = b / a - control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - - def test_noshrinking(self): - "Check that we don't shrink a mask when not wanted" - # Binary operations - a = masked_array([1, 2, 3], mask=[False, False, False], shrink=False) - b = a + 1 - assert_equal(b.mask, [0, 0, 0]) - # In place binary operation - a += 1 - assert_equal(a.mask, [0, 0, 0]) - # Domained binary operation - b = a / 1. - assert_equal(b.mask, [0, 0, 0]) - # In place binary operation - a /= 1. - assert_equal(a.mask, [0, 0, 0]) - - - def test_mod(self): - "Tests mod" - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(mod(x, y), mod(xm, ym)) - test = mod(ym, xm) - assert_equal(test, np.mod(ym, xm)) - assert_equal(test.mask, mask_or(xm.mask, ym.mask)) - test = mod(xm, ym) - assert_equal(test, np.mod(xm, ym)) - assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) - - def test_TakeTransposeInnerOuter(self): - "Test of take, transpose, inner, outer products" - x = arange(24) - y = np.arange(24) - x[5:6] = masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) - assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) - assert_equal(np.inner(filled(x, 0), filled(y, 0)), - inner(x, y)) - assert_equal(np.outer(filled(x, 0), filled(y, 0)), - outer(x, y)) - y = array(['abc', 1, 'def', 2, 3], object) - y[2] = masked - t = take(y, [0, 3, 4]) - assert_(t[0] == 'abc') - assert_(t[1] == 2) - assert_(t[2] == 3) - - - def test_imag_real(self): - "Check complex" - xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) - assert_equal(xx.imag, [10, 2]) - assert_equal(xx.imag.filled(), [1e+20, 2]) - assert_equal(xx.imag.dtype, xx._data.imag.dtype) - assert_equal(xx.real, [1, 20]) - assert_equal(xx.real.filled(), [1e+20, 20]) - assert_equal(xx.real.dtype, xx._data.real.dtype) - - - def test_methods_with_output(self): - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - # - funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) - # - for funcname in funclist: - npfunc = getattr(np, funcname) - xmmeth = getattr(xm, funcname) - # A ndarray as explicit input - output = np.empty(4, dtype=float) - output.fill(-9999) - result = npfunc(xm, axis=0, out=output) - # ... the result should be the given output - self.assertTrue(result is output) - assert_equal(result, xmmeth(axis=0, out=output)) - # - output = empty(4, dtype=int) - result = xmmeth(axis=0, out=output) - self.assertTrue(result is output) - self.assertTrue(output[0] is masked) - - - def test_eq_on_structured(self): - "Test the equality of structured arrays" - ndtype = [('A', int), ('B', int)] - a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) - test = (a == a) - assert_equal(test, [True, True]) - assert_equal(test.mask, [False, False]) - b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - test = (a == b) - assert_equal(test, [False, True]) - assert_equal(test.mask, [True, False]) - b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = (a == b) - assert_equal(test, [True, False]) - assert_equal(test.mask, [False, False]) - - - def test_ne_on_structured(self): - "Test the equality of structured arrays" - ndtype = [('A', int), ('B', int)] - a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) - test = (a != a) - assert_equal(test, [False, False]) - assert_equal(test.mask, [False, False]) - b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - test = (a != b) - assert_equal(test, [True, False]) - assert_equal(test.mask, [True, False]) - b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = (a != b) - assert_equal(test, [False, True]) - assert_equal(test.mask, [False, False]) - - - def test_eq_w_None(self): - # With partial mask - a = array([1, 2], mask=[0, 1]) - assert_equal(a == None, False) - assert_equal(a.data == None, False) - assert_equal(a.mask == None, False) - assert_equal(a != None, True) - # With nomask - a = array([1, 2], mask=False) - assert_equal(a == None, False) - assert_equal(a != None, True) - # With complete mask - a = array([1, 2], mask=True) - assert_equal(a == None, False) - assert_equal(a != None, True) - # With masked - a = masked - assert_equal(a == None, masked) - - def test_eq_w_scalar(self): - a = array(1) - assert_equal(a == 1, True) - assert_equal(a == 0, False) - assert_equal(a != 1, False) - assert_equal(a != 0, True) - - - def test_numpyarithmetics(self): - "Check that the mask is not back-propagated when using numpy functions" - a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) - control = masked_array([np.nan, np.nan, 0, np.log(2), -1], - mask=[1, 1, 0, 0, 1]) - # - test = log(a) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(a.mask, [0, 0, 0, 0, 1]) - # - test = np.log(a) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(a.mask, [0, 0, 0, 0, 1]) - -#------------------------------------------------------------------------------ - -class TestMaskedArrayAttributes(TestCase): - - def test_keepmask(self): - "Tests the keep mask flag" - x = masked_array([1, 2, 3], mask=[1, 0, 0]) - mx = masked_array(x) - assert_equal(mx.mask, x.mask) - mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) - assert_equal(mx.mask, [0, 1, 0]) - mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) - assert_equal(mx.mask, [1, 1, 0]) - # We default to true - mx = masked_array(x, mask=[0, 1, 0]) - assert_equal(mx.mask, [1, 1, 0]) - - def test_hardmask(self): - "Test hard_mask" - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d, mask=m, hard_mask=True) - # We need to copy, to avoid updating d in xh ! - xs = array(d, mask=m, hard_mask=False, copy=True) - xh[[1, 4]] = [10, 40] - xs[[1, 4]] = [10, 40] - assert_equal(xh._data, [0, 10, 2, 3, 4]) - assert_equal(xs._data, [0, 10, 2, 3, 40]) - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) - assert_equal(xs.mask, [0, 0, 0, 1, 0]) - self.assertTrue(xh._hardmask) - self.assertTrue(not xs._hardmask) - xh[1:4] = [10, 20, 30] - xs[1:4] = [10, 20, 30] - assert_equal(xh._data, [0, 10, 20, 3, 4]) - assert_equal(xs._data, [0, 10, 20, 30, 40]) - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) - assert_equal(xs.mask, nomask) - xh[0] = masked - xs[0] = masked - assert_equal(xh.mask, [1, 0, 0, 1, 1]) - assert_equal(xs.mask, [1, 0, 0, 0, 0]) - xh[:] = 1 - xs[:] = 1 - assert_equal(xh._data, [0, 1, 1, 3, 4]) - assert_equal(xs._data, [1, 1, 1, 1, 1]) - assert_equal(xh.mask, [1, 0, 0, 1, 1]) - assert_equal(xs.mask, nomask) - # Switch to soft mask - xh.soften_mask() - xh[:] = arange(5) - assert_equal(xh._data, [0, 1, 2, 3, 4]) - assert_equal(xh.mask, nomask) - # Switch back to hard mask - xh.harden_mask() - xh[xh < 3] = masked - assert_equal(xh._data, [0, 1, 2, 3, 4]) - assert_equal(xh._mask, [1, 1, 1, 0, 0]) - xh[filled(xh > 1, False)] = 5 - assert_equal(xh._data, [0, 1, 2, 5, 5]) - assert_equal(xh._mask, [1, 1, 1, 0, 0]) - # - xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) - xh[0] = 0 - assert_equal(xh._data, [[1, 0], [3, 4]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - xh[-1, -1] = 5 - assert_equal(xh._data, [[1, 0], [3, 5]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - xh[filled(xh < 5, False)] = 2 - assert_equal(xh._data, [[1, 2], [2, 5]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - - def test_hardmask_again(self): - "Another test of hardmask" - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d, mask=m, hard_mask=True) - xh[4:5] = 999 - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) - xh[0:1] = 999 - assert_equal(xh._data, [999, 1, 2, 3, 4]) - - def test_hardmask_oncemore_yay(self): - "OK, yet another test of hardmask" - "Make sure that harden_mask/soften_mask//unshare_mask retursn self" - a = array([1, 2, 3], mask=[1, 0, 0]) - b = a.harden_mask() - assert_equal(a, b) - b[0] = 0 - assert_equal(a, b) - assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) - a = b.soften_mask() - a[0] = 0 - assert_equal(a, b) - assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) - - - def test_smallmask(self): - "Checks the behaviour of _smallmask" - a = arange(10) - a[1] = masked - a[1] = 1 - assert_equal(a._mask, nomask) - a = arange(10) - a._smallmask = False - a[1] = masked - a[1] = 1 - assert_equal(a._mask, zeros(10)) - - - def test_shrink_mask(self): - "Tests .shrink_mask()" - a = array([1, 2, 3], mask=[0, 0, 0]) - b = a.shrink_mask() - assert_equal(a, b) - assert_equal(a.mask, nomask) - - - def test_flat(self): - "Test flat on masked_matrices" - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) - control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) - assert_equal(test, control) - # - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] - assert_equal(test, control) - -#------------------------------------------------------------------------------ - -class TestFillingValues(TestCase): - # - def test_check_on_scalar(self): - "Test _check_fill_value" - _check_fill_value = np.ma.core._check_fill_value - # - fval = _check_fill_value(0, int) - assert_equal(fval, 0) - fval = _check_fill_value(None, int) - assert_equal(fval, default_fill_value(0)) - # - fval = _check_fill_value(0, "|S3") - assert_equal(fval, asbytes("0")) - fval = _check_fill_value(None, "|S3") - assert_equal(fval, default_fill_value("|S3")) - # - fval = _check_fill_value(1e+20, int) - assert_equal(fval, default_fill_value(0)) - - - def test_check_on_fields(self): - "Tests _check_fill_value with records" - _check_fill_value = np.ma.core._check_fill_value - ndtype = [('a', int), ('b', float), ('c', "|S3")] - # A check on a list should return a single record - fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - # A check on None should output the defaults - fval = _check_fill_value(None, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [default_fill_value(0), - default_fill_value(0.), - asbytes(default_fill_value("0"))]) - #.....Using a structured type as fill_value should work - fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) - fval = _check_fill_value(fill_val, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - - #.....Using a flexible type w/ a different type shouldn't matter - # BEHAVIOR in 1.5 and earlier: match structured types by position - #fill_val = np.array((-999, -12345678.9, "???"), - # dtype=[("A", int), ("B", float), ("C", "|S3")]) - # BEHAVIOR in 1.6 and later: match structured types by name - fill_val = np.array(("???", -999, -12345678.9), - dtype=[("c", "|S3"), ("a", int), ("b", float), ]) - fval = _check_fill_value(fill_val, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - - #.....Using an object-array shouldn't matter either - fill_value = np.array((-999, -12345678.9, "???"), dtype=object) - fval = _check_fill_value(fill_val, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - # - fill_value = np.array((-999, -12345678.9, "???")) - fval = _check_fill_value(fill_val, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - #.....One-field-only flexible type should work as well - ndtype = [("a", int)] - fval = _check_fill_value(-999999999, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), (-999999999,)) - - - def test_fillvalue_conversion(self): - "Tests the behavior of fill_value during conversion" - # We had a tailored comment to make sure special attributes are properly - # dealt with - a = array(asbytes_nested(['3', '4', '5'])) - a._optinfo.update({'comment':"updated!"}) - # - b = array(a, dtype=int) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0)) - # - b = array(a, dtype=float) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0.)) - # - b = a.astype(int) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0)) - assert_equal(b._optinfo['comment'], "updated!") - # - b = a.astype([('a', '|S3')]) - assert_equal(b['a']._data, a._data) - assert_equal(b['a'].fill_value, a.fill_value) - - - def test_fillvalue(self): - "Yet more fun with the fill_value" - data = masked_array([1, 2, 3], fill_value= -999) - series = data[[0, 2, 1]] - assert_equal(series._fill_value, data._fill_value) - # - mtype = [('f', float), ('s', '|S3')] - x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) - x.fill_value = 999 - assert_equal(x.fill_value.item(), [999., asbytes('999')]) - assert_equal(x['f'].fill_value, 999) - assert_equal(x['s'].fill_value, asbytes('999')) - # - x.fill_value = (9, '???') - assert_equal(x.fill_value.item(), (9, asbytes('???'))) - assert_equal(x['f'].fill_value, 9) - assert_equal(x['s'].fill_value, asbytes('???')) - # - x = array([1, 2, 3.1]) - x.fill_value = 999 - assert_equal(np.asarray(x.fill_value).dtype, float) - assert_equal(x.fill_value, 999.) - assert_equal(x._fill_value, np.array(999.)) - - - def test_fillvalue_exotic_dtype(self): - "Tests yet more exotic flexible dtypes" - _check_fill_value = np.ma.core._check_fill_value - ndtype = [('i', int), ('s', '|S8'), ('f', float)] - control = np.array((default_fill_value(0), - default_fill_value('0'), - default_fill_value(0.),), - dtype=ndtype) - assert_equal(_check_fill_value(None, ndtype), control) - # The shape shouldn't matter - ndtype = [('f0', float, (2, 2))] - control = np.array((default_fill_value(0.),), - dtype=[('f0', float)]).astype(ndtype) - assert_equal(_check_fill_value(None, ndtype), control) - control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) - assert_equal(_check_fill_value(0, ndtype), control) - # - ndtype = np.dtype("int, (2,3)float, float") - control = np.array((default_fill_value(0), - default_fill_value(0.), - default_fill_value(0.),), - dtype="int, float, float").astype(ndtype) - test = _check_fill_value(None, ndtype) - assert_equal(test, control) - control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) - assert_equal(_check_fill_value(0, ndtype), control) - - - def test_extremum_fill_value(self): - "Tests extremum fill values for flexible type." - a = array([(1, (2, 3)), (4, (5, 6))], - dtype=[('A', int), ('B', [('BA', int), ('BB', int)])]) - test = a.fill_value - assert_equal(test['A'], default_fill_value(a['A'])) - assert_equal(test['B']['BA'], default_fill_value(a['B']['BA'])) - assert_equal(test['B']['BB'], default_fill_value(a['B']['BB'])) - # - test = minimum_fill_value(a) - assert_equal(test[0], minimum_fill_value(a['A'])) - assert_equal(test[1][0], minimum_fill_value(a['B']['BA'])) - assert_equal(test[1][1], minimum_fill_value(a['B']['BB'])) - assert_equal(test[1], minimum_fill_value(a['B'])) - # - test = maximum_fill_value(a) - assert_equal(test[0], maximum_fill_value(a['A'])) - assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) - assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) - assert_equal(test[1], maximum_fill_value(a['B'])) - - def test_fillvalue_individual_fields(self): - "Test setting fill_value on individual fields" - ndtype = [('a', int), ('b', int)] - # Explicit fill_value - a = array(zip([1, 2, 3], [4, 5, 6]), - fill_value=(-999, -999), dtype=ndtype) - f = a._fill_value - aa = a['a'] - aa.set_fill_value(10) - assert_equal(aa._fill_value, np.array(10)) - assert_equal(tuple(a.fill_value), (10, -999)) - a.fill_value['b'] = -10 - assert_equal(tuple(a.fill_value), (10, -10)) - # Implicit fill_value - t = array(zip([1, 2, 3], [4, 5, 6]), dtype=[('a', int), ('b', int)]) - tt = t['a'] - tt.set_fill_value(10) - assert_equal(tt._fill_value, np.array(10)) - assert_equal(tuple(t.fill_value), (10, default_fill_value(0))) - - def test_fillvalue_implicit_structured_array(self): - "Check that fill_value is always defined for structured arrays" - ndtype = ('b', float) - adtype = ('a', float) - a = array([(1.,), (2.,)], mask=[(False,), (False,)], - fill_value=(np.nan,), dtype=np.dtype([adtype])) - b = empty(a.shape, dtype=[adtype, ndtype]) - b['a'] = a['a'] - b['a'].set_fill_value(a['a'].fill_value) - f = b._fill_value[()] - assert(np.isnan(f[0])) - assert_equal(f[-1], default_fill_value(1.)) - - def test_fillvalue_as_arguments(self): - "Test adding a fill_value parameter to empty/ones/zeros" - a = empty(3, fill_value=999.) - assert_equal(a.fill_value, 999.) - # - a = ones(3, fill_value=999., dtype=float) - assert_equal(a.fill_value, 999.) - # - a = zeros(3, fill_value=0., dtype=complex) - assert_equal(a.fill_value, 0.) - # - a = identity(3, fill_value=0., dtype=complex) - assert_equal(a.fill_value, 0.) - -#------------------------------------------------------------------------------ - -class TestUfuncs(TestCase): - "Test class for the application of ufuncs on MaskedArrays." - - def setUp(self): - "Base data definition." - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), - array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') - - def tearDown(self): - np.seterr(**self.err_status) - - def test_testUfuncRegression(self): - "Tests new ufuncs on MaskedArrays." - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', - 'sin', 'cos', 'tan', - 'arcsin', 'arccos', 'arctan', - 'sinh', 'cosh', 'tanh', - 'arcsinh', - 'arccosh', - 'arctanh', - 'absolute', 'fabs', 'negative', - # 'nonzero', 'around', - 'floor', 'ceil', - # 'sometrue', 'alltrue', - 'logical_not', - 'add', 'subtract', 'multiply', - 'divide', 'true_divide', 'floor_divide', - 'remainder', 'fmod', 'hypot', 'arctan2', - 'equal', 'not_equal', 'less_equal', 'greater_equal', - 'less', 'greater', - 'logical_and', 'logical_or', 'logical_xor', - ]: - try: - uf = getattr(umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(numpy.ma.core, f) - args = self.d[:uf.nin] - ur = uf(*args) - mr = mf(*args) - assert_equal(ur.filled(0), mr.filled(0), f) - assert_mask_equal(ur.mask, mr.mask, err_msg=f) - - def test_reduce(self): - "Tests reduce on MaskedArrays." - a = self.d[0] - self.assertTrue(not alltrue(a, axis=0)) - self.assertTrue(sometrue(a, axis=0)) - assert_equal(sum(a[:3], axis=0), 0) - assert_equal(product(a, axis=0), 0) - assert_equal(add.reduce(a), pi) - - def test_minmax(self): - "Tests extrema on MaskedArrays." - a = arange(1, 13).reshape(3, 4) - amask = masked_where(a < 5, a) - assert_equal(amask.max(), a.max()) - assert_equal(amask.min(), 5) - assert_equal(amask.max(0), a.max(0)) - assert_equal(amask.min(0), [5, 6, 7, 8]) - self.assertTrue(amask.max(1)[0].mask) - self.assertTrue(amask.min(1)[0].mask) - - def test_ndarray_mask(self): - "Check that the mask of the result is a ndarray (not a MaskedArray...)" - a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) - test = np.sqrt(a) - control = masked_array([-1, 0, 1, np.sqrt(2), -1], - mask=[1, 0, 0, 0, 1]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - self.assertTrue(not isinstance(test.mask, MaskedArray)) - -#------------------------------------------------------------------------------ - -class TestMaskedArrayInPlaceArithmetics(TestCase): - "Test MaskedArray Arithmetics" - - def setUp(self): - x = arange(10) - y = arange(10) - xm = arange(10) - xm[2] = masked - self.intdata = (x, y, xm) - self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) - - def test_inplace_addition_scalar(self): - """Test of inplace additions""" - (x, y, xm) = self.intdata - xm[2] = masked - x += 1 - assert_equal(x, y + 1) - xm += 1 - assert_equal(xm, y + 1) - # - (x, _, xm) = self.floatdata - id1 = x.data.ctypes._data - x += 1. - assert_(id1 == x.data.ctypes._data) - assert_equal(x, y + 1.) - - def test_inplace_addition_array(self): - """Test of inplace additions""" - (x, y, xm) = self.intdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x += a - xm += a - assert_equal(x, y + a) - assert_equal(xm, y + a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_subtraction_scalar(self): - """Test of inplace subtractions""" - (x, y, xm) = self.intdata - x -= 1 - assert_equal(x, y - 1) - xm -= 1 - assert_equal(xm, y - 1) - - def test_inplace_subtraction_array(self): - """Test of inplace subtractions""" - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x -= a - xm -= a - assert_equal(x, y - a) - assert_equal(xm, y - a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_multiplication_scalar(self): - """Test of inplace multiplication""" - (x, y, xm) = self.floatdata - x *= 2.0 - assert_equal(x, y * 2) - xm *= 2.0 - assert_equal(xm, y * 2) - - def test_inplace_multiplication_array(self): - """Test of inplace multiplication""" - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x *= a - xm *= a - assert_equal(x, y * a) - assert_equal(xm, y * a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_division_scalar_int(self): - """Test of inplace division""" - (x, y, xm) = self.intdata - x = arange(10) * 2 - xm = arange(10) * 2 - xm[2] = masked - x /= 2 - assert_equal(x, y) - xm /= 2 - assert_equal(xm, y) - - def test_inplace_division_scalar_float(self): - """Test of inplace division""" - (x, y, xm) = self.floatdata - x /= 2.0 - assert_equal(x, y / 2.0) - xm /= arange(10) - assert_equal(xm, ones((10,))) - - def test_inplace_division_array_float(self): - """Test of inplace division""" - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x /= a - xm /= a - assert_equal(x, y / a) - assert_equal(xm, y / a) - assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) - - def test_inplace_division_misc(self): - # - x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] - y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - # - z = xm / ym - assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) - assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) - #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) - # - xm = xm.copy() - xm /= ym - assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) - assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) - #assert_equal(xm._data, [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) - - - def test_datafriendly_add(self): - "Test keeping data w/ (inplace) addition" - x = array([1, 2, 3], mask=[0, 0, 1]) - # Test add w/ scalar - xx = x + 1 - assert_equal(xx.data, [2, 3, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test iadd w/ scalar - x += 1 - assert_equal(x.data, [2, 3, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test add w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x + array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 4, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test iadd w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x += array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(x.data, [1, 4, 3]) - assert_equal(x.mask, [1, 0, 1]) - - - def test_datafriendly_sub(self): - "Test keeping data w/ (inplace) subtraction" - # Test sub w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x - 1 - assert_equal(xx.data, [0, 1, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test isub w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - x -= 1 - assert_equal(x.data, [0, 1, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test sub w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x - array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 0, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test isub w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x -= array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(x.data, [1, 0, 3]) - assert_equal(x.mask, [1, 0, 1]) - - - def test_datafriendly_mul(self): - "Test keeping data w/ (inplace) multiplication" - # Test mul w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x * 2 - assert_equal(xx.data, [2, 4, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test imul w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - x *= 2 - assert_equal(x.data, [2, 4, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test mul w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x * array([10, 20, 30], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 40, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test imul w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x *= array([10, 20, 30], mask=[1, 0, 0]) - assert_equal(x.data, [1, 40, 3]) - assert_equal(x.mask, [1, 0, 1]) - - - def test_datafriendly_div(self): - "Test keeping data w/ (inplace) division" - # Test div on scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x / 2. - assert_equal(xx.data, [1 / 2., 2 / 2., 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test idiv on scalar - x = array([1., 2., 3.], mask=[0, 0, 1]) - x /= 2. - assert_equal(x.data, [1 / 2., 2 / 2., 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test div on array - x = array([1., 2., 3.], mask=[0, 0, 1]) - xx = x / array([10., 20., 30.], mask=[1, 0, 0]) - assert_equal(xx.data, [1., 2. / 20., 3.]) - assert_equal(xx.mask, [1, 0, 1]) - # Test idiv on array - x = array([1., 2., 3.], mask=[0, 0, 1]) - x /= array([10., 20., 30.], mask=[1, 0, 0]) - assert_equal(x.data, [1., 2 / 20., 3.]) - assert_equal(x.mask, [1, 0, 1]) - - - def test_datafriendly_pow(self): - "Test keeping data w/ (inplace) power" - # Test pow on scalar - x = array([1., 2., 3.], mask=[0, 0, 1]) - xx = x ** 2.5 - assert_equal(xx.data, [1., 2. ** 2.5, 3.]) - assert_equal(xx.mask, [0, 0, 1]) - # Test ipow on scalar - x **= 2.5 - assert_equal(x.data, [1., 2. ** 2.5, 3]) - assert_equal(x.mask, [0, 0, 1]) - - - def test_datafriendly_add_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a += b - assert_equal(a, [[2, 2], [4, 4]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - # - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a += b - assert_equal(a, [[2, 2], [4, 4]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - - - def test_datafriendly_sub_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a -= b - assert_equal(a, [[0, 0], [2, 2]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - # - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a -= b - assert_equal(a, [[0, 0], [2, 2]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - - - def test_datafriendly_mul_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a *= b - assert_equal(a, [[1, 1], [3, 3]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - # - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a *= b - assert_equal(a, [[1, 1], [3, 3]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - -#------------------------------------------------------------------------------ - -class TestMaskedArrayMethods(TestCase): - "Test class for miscellaneous MaskedArrays methods." - def setUp(self): - "Base data definition." - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - def test_generic_methods(self): - "Tests some MaskedArray methods." - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - assert_equal(a.any(), a._data.any()) - assert_equal(a.all(), a._data.all()) - assert_equal(a.argmax(), a._data.argmax()) - assert_equal(a.argmin(), a._data.argmin()) - assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) - assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) - assert_equal(a.conj(), a._data.conj()) - assert_equal(a.conjugate(), a._data.conjugate()) - # - m = array([[1, 2], [3, 4]]) - assert_equal(m.diagonal(), m._data.diagonal()) - assert_equal(a.sum(), a._data.sum()) - assert_equal(a.take([1, 2]), a._data.take([1, 2])) - assert_equal(m.transpose(), m._data.transpose()) - - - def test_allclose(self): - "Tests allclose on arrays" - a = np.random.rand(10) - b = a + np.random.rand(10) * 1e-8 - self.assertTrue(allclose(a, b)) - # Test allclose w/ infs - a[0] = np.inf - self.assertTrue(not allclose(a, b)) - b[0] = np.inf - self.assertTrue(allclose(a, b)) - # Test all close w/ masked - a = masked_array(a) - a[-1] = masked - self.assertTrue(allclose(a, b, masked_equal=True)) - self.assertTrue(not allclose(a, b, masked_equal=False)) - # Test comparison w/ scalar - a *= 1e-8 - a[0] = 0 - self.assertTrue(allclose(a, 0, masked_equal=True)) - - - def test_allany(self): - """Checks the any/all methods/functions.""" - x = np.array([[ 0.13, 0.26, 0.90], - [ 0.28, 0.33, 0.63], - [ 0.31, 0.87, 0.70]]) - m = np.array([[ True, False, False], - [False, False, False], - [True, True, False]], dtype=np.bool_) - mx = masked_array(x, mask=m) - xbig = np.array([[False, False, True], - [False, False, True], - [False, True, True]], dtype=np.bool_) - mxbig = (mx > 0.5) - mxsmall = (mx < 0.5) - # - assert_((mxbig.all() == False)) - assert_((mxbig.any() == True)) - assert_equal(mxbig.all(0), [False, False, True]) - assert_equal(mxbig.all(1), [False, False, True]) - assert_equal(mxbig.any(0), [False, False, True]) - assert_equal(mxbig.any(1), [True, True, True]) - # - assert_((mxsmall.all() == False)) - assert_((mxsmall.any() == True)) - assert_equal(mxsmall.all(0), [True, True, False]) - assert_equal(mxsmall.all(1), [False, False, False]) - assert_equal(mxsmall.any(0), [True, True, False]) - assert_equal(mxsmall.any(1), [True, True, False]) - - - def test_allany_onmatrices(self): - x = np.array([[ 0.13, 0.26, 0.90], - [ 0.28, 0.33, 0.63], - [ 0.31, 0.87, 0.70]]) - X = np.matrix(x) - m = np.array([[ True, False, False], - [False, False, False], - [True, True, False]], dtype=np.bool_) - mX = masked_array(X, mask=m) - mXbig = (mX > 0.5) - mXsmall = (mX < 0.5) - # - assert_((mXbig.all() == False)) - assert_((mXbig.any() == True)) - assert_equal(mXbig.all(0), np.matrix([False, False, True])) - assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) - assert_equal(mXbig.any(0), np.matrix([False, False, True])) - assert_equal(mXbig.any(1), np.matrix([ True, True, True]).T) - # - assert_((mXsmall.all() == False)) - assert_((mXsmall.any() == True)) - assert_equal(mXsmall.all(0), np.matrix([True, True, False])) - assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) - assert_equal(mXsmall.any(0), np.matrix([True, True, False])) - assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) - - - def test_allany_oddities(self): - "Some fun with all and any" - store = empty(1, dtype=bool) - full = array([1, 2, 3], mask=True) - # - self.assertTrue(full.all() is masked) - full.all(out=store) - self.assertTrue(store) - self.assertTrue(store._mask, True) - self.assertTrue(store is not masked) - # - store = empty(1, dtype=bool) - self.assertTrue(full.any() is masked) - full.any(out=store) - self.assertTrue(not store) - self.assertTrue(store._mask, True) - self.assertTrue(store is not masked) - - - def test_argmax_argmin(self): - "Tests argmin & argmax on MaskedArrays." - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - # - assert_equal(mx.argmin(), 35) - assert_equal(mX.argmin(), 35) - assert_equal(m2x.argmin(), 4) - assert_equal(m2X.argmin(), 4) - assert_equal(mx.argmax(), 28) - assert_equal(mX.argmax(), 28) - assert_equal(m2x.argmax(), 31) - assert_equal(m2X.argmax(), 31) - # - assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) - assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) - assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) - assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) - # - assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) - assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) - assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) - assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) - - - def test_clip(self): - "Tests clip on MaskedArrays." - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) - mx = array(x, mask=m) - clipped = mx.clip(2, 8) - assert_equal(clipped.mask, mx.mask) - assert_equal(clipped._data, x.clip(2, 8)) - assert_equal(clipped._data, mx._data.clip(2, 8)) - - - def test_compress(self): - "test compress" - a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) - condition = (a > 1.5) & (a < 3.5) - assert_equal(a.compress(condition), [2., 3.]) - # - a[[2, 3]] = masked - b = a.compress(condition) - assert_equal(b._data, [2., 3.]) - assert_equal(b._mask, [0, 1]) - assert_equal(b.fill_value, 9999) - assert_equal(b, a[condition]) - # - condition = (a < 4.) - b = a.compress(condition) - assert_equal(b._data, [1., 2., 3.]) - assert_equal(b._mask, [0, 0, 1]) - assert_equal(b.fill_value, 9999) - assert_equal(b, a[condition]) - # - a = masked_array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0]]) - b = a.compress(a.ravel() >= 22) - assert_equal(b._data, [30, 40, 50, 60]) - assert_equal(b._mask, [1, 1, 0, 0]) - # - x = np.array([3, 1, 2]) - b = a.compress(x >= 2, axis=1) - assert_equal(b._data, [[10, 30], [40, 60]]) - assert_equal(b._mask, [[0, 1], [1, 0]]) - - - def test_compressed(self): - "Tests compressed" - a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) - b = a.compressed() - assert_equal(b, a) - a[0] = masked - b = a.compressed() - assert_equal(b, [2, 3, 4]) - # - a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) - b = a.compressed() - assert_equal(b, a) - self.assertTrue(isinstance(b, np.matrix)) - a[0, 0] = masked - b = a.compressed() - assert_equal(b, [[2, 3, 4]]) - - - def test_empty(self): - "Tests empty/like" - datatype = [('a', int), ('b', float), ('c', '|S8')] - a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], - dtype=datatype) - assert_equal(len(a.fill_value.item()), len(datatype)) - # - b = empty_like(a) - assert_equal(b.shape, a.shape) - assert_equal(b.fill_value, a.fill_value) - # - b = empty(len(a), dtype=datatype) - assert_equal(b.shape, a.shape) - assert_equal(b.fill_value, a.fill_value) - - - def test_put(self): - "Tests put." - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - x = array(d, mask=m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) - x[[1, 4]] = [10, 40] - #self.assertTrue(x.mask is not m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is not masked) - assert_equal(x, [0, 10, 2, -1, 40]) - # - x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) - i = [0, 2, 4, 6] - x.put(i, [6, 4, 2, 0]) - assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) - assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) - x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) - assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) - assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) - # - x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) - put(x, i, [6, 4, 2, 0]) - assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) - assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) - put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) - assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) - assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) - - - def test_put_hardmask(self): - "Tests put on hardmask" - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d + 1, mask=m, hard_mask=True, copy=True) - xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) - assert_equal(xh._data, [3, 4, 2, 4, 5]) - - - def test_putmask(self): - x = arange(6) + 1 - mx = array(x, mask=[0, 0, 0, 1, 1, 1]) - mask = [0, 0, 1, 0, 0, 1] - # w/o mask, w/o masked values - xx = x.copy() - putmask(xx, mask, 99) - assert_equal(xx, [1, 2, 99, 4, 5, 99]) - # w/ mask, w/o masked values - mxx = mx.copy() - putmask(mxx, mask, 99) - assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) - assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) - # w/o mask, w/ masked values - values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) - xx = x.copy() - putmask(xx, mask, values) - assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) - assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) - # w/ mask, w/ masked values - mxx = mx.copy() - putmask(mxx, mask, values) - assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) - assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) - # w/ mask, w/ masked values + hardmask - mxx = mx.copy() - mxx.harden_mask() - putmask(mxx, mask, values) - assert_equal(mxx, [1, 2, 30, 4, 5, 60]) - - - def test_ravel(self): - "Tests ravel" - a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) - aravel = a.ravel() - assert_equal(a._mask.shape, a.shape) - a = array([0, 0], mask=[1, 1]) - aravel = a.ravel() - assert_equal(a._mask.shape, a.shape) - a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) - aravel = a.ravel() - assert_equal(a.shape, (1, 5)) - assert_equal(a._mask.shape, a.shape) - # Checks that small_mask is preserved - a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) - assert_equal(a.ravel()._mask, [0, 0, 0, 0]) - # Test that the fill_value is preserved - a.fill_value = -99 - a.shape = (2, 2) - ar = a.ravel() - assert_equal(ar._mask, [0, 0, 0, 0]) - assert_equal(ar._data, [1, 2, 3, 4]) - assert_equal(ar.fill_value, -99) - - - def test_reshape(self): - "Tests reshape" - x = arange(4) - x[0] = masked - y = x.reshape(2, 2) - assert_equal(y.shape, (2, 2,)) - assert_equal(y._mask.shape, (2, 2,)) - assert_equal(x.shape, (4,)) - assert_equal(x._mask.shape, (4,)) - - - def test_sort(self): - "Test sort" - x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) - # - sortedx = sort(x) - assert_equal(sortedx._data, [1, 2, 3, 4]) - assert_equal(sortedx._mask, [0, 0, 0, 1]) - # - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [4, 1, 2, 3]) - assert_equal(sortedx._mask, [1, 0, 0, 0]) - # - x.sort() - assert_equal(x._data, [1, 2, 3, 4]) - assert_equal(x._mask, [0, 0, 0, 1]) - # - x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) - x.sort(endwith=False) - assert_equal(x._data, [4, 1, 2, 3]) - assert_equal(x._mask, [1, 0, 0, 0]) - # - x = [1, 4, 2, 3] - sortedx = sort(x) - self.assertTrue(not isinstance(sorted, MaskedArray)) - # - x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) - x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [1, 2, -2, -1, 0]) - assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) - - - def test_sort_2d(self): - "Check sort of 2D array." - # 2D array w/o mask - a = masked_array([[8, 4, 1], [2, 0, 9]]) - a.sort(0) - assert_equal(a, [[2, 0, 1], [8, 4, 9]]) - a = masked_array([[8, 4, 1], [2, 0, 9]]) - a.sort(1) - assert_equal(a, [[1, 4, 8], [0, 2, 9]]) - # 2D array w/mask - a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) - a.sort(0) - assert_equal(a, [[2, 0, 1], [8, 4, 9]]) - assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) - a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) - a.sort(1) - assert_equal(a, [[1, 4, 8], [0, 2, 9]]) - assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) - # 3D - a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], - [[1, 2, 3], [7, 8, 9], [4, 5, 6]], - [[7, 8, 9], [1, 2, 3], [4, 5, 6]], - [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) - a[a % 4 == 0] = masked - am = a.copy() - an = a.filled(99) - am.sort(0) - an.sort(0) - assert_equal(am, an) - am = a.copy() - an = a.filled(99) - am.sort(1) - an.sort(1) - assert_equal(am, an) - am = a.copy() - an = a.filled(99) - am.sort(2) - an.sort(2) - assert_equal(am, an) - - - def test_sort_flexible(self): - "Test sort on flexible dtype." - a = array([(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], - mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], - dtype=[('A', int), ('B', int)]) - # - test = sort(a) - b = array([(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], - mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, b) - assert_equal(test.mask, b.mask) - # - test = sort(a, endwith=False) - b = array([(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ], - mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ], - dtype=[('A', int), ('B', int)]) - assert_equal(test, b) - assert_equal(test.mask, b.mask) - - def test_argsort(self): - "Test argsort" - a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) - assert_equal(np.argsort(a), argsort(a)) - - - def test_squeeze(self): - "Check squeeze" - data = masked_array([[1, 2, 3]]) - assert_equal(data.squeeze(), [1, 2, 3]) - data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) - assert_equal(data.squeeze(), [1, 2, 3]) - assert_equal(data.squeeze()._mask, [1, 1, 1]) - data = masked_array([[1]], mask=True) - self.assertTrue(data.squeeze() is masked) - - - def test_swapaxes(self): - "Tests swapaxes on MaskedArrays." - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mX = array(x, mask=m).reshape(6, 6) - mXX = mX.reshape(3, 2, 2, 3) - # - mXswapped = mX.swapaxes(0, 1) - assert_equal(mXswapped[-1], mX[:, -1]) - - mXXswapped = mXX.swapaxes(0, 2) - assert_equal(mXXswapped.shape, (2, 2, 3, 3)) - - - def test_take(self): - "Tests take" - x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) - assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) - assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) - assert_equal(x.take([[0, 1], [0, 1]]), - masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) - # - x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) - assert_equal(x.take([0, 2], axis=1), - array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) - assert_equal(take(x, [0, 2], axis=1), - array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) - - def test_take_masked_indices(self): - "Test take w/ masked indices" - a = np.array((40, 18, 37, 9, 22)) - indices = np.arange(3)[None, :] + np.arange(5)[:, None] - mindices = array(indices, mask=(indices >= len(a))) - # No mask - test = take(a, mindices, mode='clip') - ctrl = array([[40, 18, 37], - [18, 37, 9], - [37, 9, 22], - [ 9, 22, 22], - [22, 22, 22]]) - assert_equal(test, ctrl) - # Masked indices - test = take(a, mindices) - ctrl = array([[40, 18, 37], - [18, 37, 9], - [37, 9, 22], - [ 9, 22, 40], - [22, 40, 40]]) - ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - # Masked input + masked indices - a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) - test = take(a, mindices) - ctrl[0, 1] = ctrl[1, 0] = masked - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - - - def test_tolist(self): - "Tests to list" - # ... on 1D - x = array(np.arange(12)) - x[[1, -2]] = masked - xlist = x.tolist() - self.assertTrue(xlist[1] is None) - self.assertTrue(xlist[-2] is None) - # ... on 2D - x.shape = (3, 4) - xlist = x.tolist() - ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] - assert_equal(xlist[0], [0, None, 2, 3]) - assert_equal(xlist[1], [4, 5, 6, 7]) - assert_equal(xlist[2], [8, 9, None, 11]) - assert_equal(xlist, ctrl) - # ... on structured array w/ masked records - x = array(zip([1, 2, 3], - [1.1, 2.2, 3.3], - ['one', 'two', 'thr']), - dtype=[('a', int), ('b', float), ('c', '|S8')]) - x[-1] = masked - assert_equal(x.tolist(), - [(1, 1.1, asbytes('one')), - (2, 2.2, asbytes('two')), - (None, None, None)]) - # ... on structured array w/ masked fields - a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], - dtype=[('a', int), ('b', int)]) - test = a.tolist() - assert_equal(test, [[1, None], [3, 4]]) - # ... on mvoid - a = a[0] - test = a.tolist() - assert_equal(test, [1, None]) - - def test_tolist_specialcase(self): - "Test mvoid.tolist: make sure we return a standard Python object" - a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) - # w/o mask: each entry is a np.void whose elements are standard Python - for entry in a: - for item in entry.tolist(): - assert(not isinstance(item, np.generic)) - # w/ mask: each entry is a ma.void whose elements should be standard Python - a.mask[0] = (0, 1) - for entry in a: - for item in entry.tolist(): - assert(not isinstance(item, np.generic)) - - - def test_toflex(self): - "Test the conversion to records" - data = arange(10) - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - # - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - # - ndtype = [('i', int), ('s', '|S3'), ('f', float)] - data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), - 'ABCDEFGHIJKLM', - np.random.rand(10))], - dtype=ndtype) - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - # - ndtype = np.dtype("int, (2,3)float, float") - data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), - np.random.rand(10), - np.random.rand(10))], - dtype=ndtype) - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal_records(record['_data'], data._data) - assert_equal_records(record['_mask'], data._mask) - - - def test_fromflex(self): - "Test the reconstruction of a masked_array from a record" - a = array([1, 2, 3]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.mask, a.mask) - # - a = array([1, 2, 3], mask=[0, 0, 1]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.mask, a.mask) - # - a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], - dtype=[('A', int), ('B', float)]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.data, a.data) - - - def test_arraymethod(self): - "Test a _arraymethod w/ n argument" - marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) - control = masked_array([[1], [2], [3], [4], [5]], - mask=[0, 0, 1, 0, 0]) - assert_equal(marray.T, control) - assert_equal(marray.transpose(), control) - # - assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) - - -#------------------------------------------------------------------------------ - - -class TestMaskedArrayMathMethods(TestCase): - - def setUp(self): - "Base data definition." - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - - def test_cumsumprod(self): - "Tests cumsum & cumprod on MaskedArrays." - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - mXcp = mX.cumsum(0) - assert_equal(mXcp._data, mX.filled(0).cumsum(0)) - mXcp = mX.cumsum(1) - assert_equal(mXcp._data, mX.filled(0).cumsum(1)) - # - mXcp = mX.cumprod(0) - assert_equal(mXcp._data, mX.filled(1).cumprod(0)) - mXcp = mX.cumprod(1) - assert_equal(mXcp._data, mX.filled(1).cumprod(1)) - - - def test_cumsumprod_with_output(self): - "Tests cumsum/cumprod w/ output" - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - # - for funcname in ('cumsum', 'cumprod'): - npfunc = getattr(np, funcname) - xmmeth = getattr(xm, funcname) - - # A ndarray as explicit input - output = np.empty((3, 4), dtype=float) - output.fill(-9999) - result = npfunc(xm, axis=0, out=output) - # ... the result should be the given output - self.assertTrue(result is output) - assert_equal(result, xmmeth(axis=0, out=output)) - # - output = empty((3, 4), dtype=int) - result = xmmeth(axis=0, out=output) - self.assertTrue(result is output) - - - def test_ptp(self): - "Tests ptp on MaskedArrays." - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - (n, m) = X.shape - assert_equal(mx.ptp(), mx.compressed().ptp()) - rows = np.zeros(n, np.float) - cols = np.zeros(m, np.float) - for k in range(m): - cols[k] = mX[:, k].compressed().ptp() - for k in range(n): - rows[k] = mX[k].compressed().ptp() - assert_equal(mX.ptp(0), cols) - assert_equal(mX.ptp(1), rows) - - - def test_sum_object(self): - "Test sum on object dtype" - a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) - assert_equal(a.sum(), 5) - a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) - assert_equal(a.sum(axis=0), [5, 7, 9]) - - def test_prod_object(self): - "Test prod on object dtype" - a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) - assert_equal(a.prod(), 2 * 3) - a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) - assert_equal(a.prod(axis=0), [4, 10, 18]) - - def test_meananom_object(self): - "Test mean/anom on object dtype" - a = masked_array([1, 2, 3], dtype=np.object) - assert_equal(a.mean(), 2) - assert_equal(a.anom(), [-1, 0, 1]) - - - def test_trace(self): - "Tests trace on MaskedArrays." - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - mXdiag = mX.diagonal() - assert_equal(mX.trace(), mX.diagonal().compressed().sum()) - assert_almost_equal(mX.trace(), - X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0)) - - - def test_varstd(self): - "Tests var & std on MaskedArrays." - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - assert_almost_equal(mX.var(axis=None), mX.compressed().var()) - assert_almost_equal(mX.std(axis=None), mX.compressed().std()) - assert_almost_equal(mX.std(axis=None, ddof=1), - mX.compressed().std(ddof=1)) - assert_almost_equal(mX.var(axis=None, ddof=1), - mX.compressed().var(ddof=1)) - assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) - assert_equal(mX.var().shape, X.var().shape) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2)) - assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2)) - for k in range(6): - assert_almost_equal(mXvar1[k], mX[k].compressed().var()) - assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) - assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) - - - def test_varstd_specialcases(self): - "Test a special case for var" - nout = np.empty(1, dtype=float) - mout = empty(1, dtype=float) - # - x = array(arange(10), mask=True) - for methodname in ('var', 'std'): - method = getattr(x, methodname) - self.assertTrue(method() is masked) - self.assertTrue(method(0) is masked) - self.assertTrue(method(-1) is masked) - # Using a masked array as explicit output - _ = method(out=mout) - self.assertTrue(mout is not masked) - assert_equal(mout.mask, True) - # Using a ndarray as explicit output - _ = method(out=nout) - self.assertTrue(np.isnan(nout)) - # - x = array(arange(10), mask=True) - x[-1] = 9 - for methodname in ('var', 'std'): - method = getattr(x, methodname) - self.assertTrue(method(ddof=1) is masked) - self.assertTrue(method(0, ddof=1) is masked) - self.assertTrue(method(-1, ddof=1) is masked) - # Using a masked array as explicit output - _ = method(out=mout, ddof=1) - self.assertTrue(mout is not masked) - assert_equal(mout.mask, True) - # Using a ndarray as explicit output - _ = method(out=nout, ddof=1) - self.assertTrue(np.isnan(nout)) - - - def test_varstd_ddof(self): - a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) - test = a.std(axis=0, ddof=0) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [0, 0, 1]) - test = a.std(axis=0, ddof=1) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [0, 0, 1]) - test = a.std(axis=0, ddof=2) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [1, 1, 1]) - - - def test_diag(self): - "Test diag" - x = arange(9).reshape((3, 3)) - x[1, 1] = masked - out = np.diag(x) - assert_equal(out, [0, 4, 8]) - out = diag(x) - assert_equal(out, [0, 4, 8]) - assert_equal(out.mask, [0, 1, 0]) - out = diag(out) - control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], - mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(out, control) - - - def test_axis_methods_nomask(self): - "Test the combination nomask & methods w/ axis" - a = array([[1, 2, 3], [4, 5, 6]]) - # - assert_equal(a.sum(0), [5, 7, 9]) - assert_equal(a.sum(-1), [6, 15]) - assert_equal(a.sum(1), [6, 15]) - # - assert_equal(a.prod(0), [4, 10, 18]) - assert_equal(a.prod(-1), [6, 120]) - assert_equal(a.prod(1), [6, 120]) - # - assert_equal(a.min(0), [1, 2, 3]) - assert_equal(a.min(-1), [1, 4]) - assert_equal(a.min(1), [1, 4]) - # - assert_equal(a.max(0), [4, 5, 6]) - assert_equal(a.max(-1), [3, 6]) - assert_equal(a.max(1), [3, 6]) - -#------------------------------------------------------------------------------ - -class TestMaskedArrayMathMethodsComplex(TestCase): - "Test class for miscellaneous MaskedArrays methods." - def setUp(self): - "Base data definition." - x = np.array([ 8.375j, 7.545j, 8.828j, 8.5j , 1.757j, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479j, - 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - - def test_varstd(self): - "Tests var & std on MaskedArrays." - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - assert_almost_equal(mX.var(axis=None), mX.compressed().var()) - assert_almost_equal(mX.std(axis=None), mX.compressed().std()) - assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) - assert_equal(mX.var().shape, X.var().shape) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2)) - assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2)) - for k in range(6): - assert_almost_equal(mXvar1[k], mX[k].compressed().var()) - assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) - assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) - - -#------------------------------------------------------------------------------ - -class TestMaskedArrayFunctions(TestCase): - "Test class for miscellaneous functions." - - def setUp(self): - x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = masked_array(z, mask=[0, 1, 0, 0]) - xf = np.where(m1, 1e+20, x) - xm.set_fill_value(1e+20) - self.info = (xm, ym) - - def test_masked_where_bool(self): - x = [1, 2] - y = masked_where(False, x) - assert_equal(y, [1, 2]) - assert_equal(y[1], 2) - - def test_masked_equal_wlist(self): - x = [1, 2, 3] - mx = masked_equal(x, 3) - assert_equal(mx, x) - assert_equal(mx._mask, [0, 0, 1]) - mx = masked_not_equal(x, 3) - assert_equal(mx, x) - assert_equal(mx._mask, [1, 1, 0]) - - def test_masked_equal_fill_value(self): - x = [1, 2, 3] - mx = masked_equal(x, 3) - assert_equal(mx._mask, [0, 0, 1]) - assert_equal(mx.fill_value, 3) - - def test_masked_where_condition(self): - "Tests masking functions." - x = array([1., 2., 3., 4., 5.]) - x[2] = masked - assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) - assert_equal(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2)) - assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) - assert_equal(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)) - assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) - assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) - assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) - assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5]) - - - def test_masked_where_oddities(self): - """Tests some generic features.""" - atest = ones((10, 10, 10), dtype=float) - btest = zeros(atest.shape, MaskType) - ctest = masked_where(btest, atest) - assert_equal(atest, ctest) - - - def test_masked_where_shape_constraint(self): - a = arange(10) - try: - test = masked_equal(1, a) - except IndexError: - pass - else: - raise AssertionError("Should have failed...") - test = masked_equal(a, 1) - assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) - - - def test_masked_otherfunctions(self): - assert_equal(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4]) - assert_equal(masked_outside(range(5), 1, 3), [199, 1, 2, 3, 199]) - assert_equal(masked_inside(array(range(5), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0]) - assert_equal(masked_outside(array(range(5), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1]) - assert_equal(masked_equal(array(range(5), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0]) - assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1]) - - - def test_round(self): - a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], - mask=[0, 1, 0, 0, 0]) - assert_equal(a.round(), [1., 2., 3., 5., 6.]) - assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) - assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) - b = empty_like(a) - a.round(out=b) - assert_equal(b, [1., 2., 3., 5., 6.]) - - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - - - def test_round_with_output(self): - "Testing round with an explicit output" - - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - - # A ndarray as explicit input - output = np.empty((3, 4), dtype=float) - output.fill(-9999) - result = np.round(xm, decimals=2, out=output) - # ... the result should be the given output - self.assertTrue(result is output) - assert_equal(result, xm.round(decimals=2, out=output)) - # - output = empty((3, 4), dtype=float) - result = xm.round(decimals=2, out=output) - self.assertTrue(result is output) - - - def test_identity(self): - a = identity(5) - self.assertTrue(isinstance(a, MaskedArray)) - assert_equal(a, np.identity(5)) - - - def test_power(self): - x = -1.1 - assert_almost_equal(power(x, 2.), 1.21) - self.assertTrue(power(x, masked) is masked) - x = array([-1.1, -1.1, 1.1, 1.1, 0.]) - b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) - y = power(x, b) - assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) - assert_equal(y._mask, [1, 0, 0, 0, 1]) - b.mask = nomask - y = power(x, b) - assert_equal(y._mask, [1, 0, 0, 0, 1]) - z = x ** b - assert_equal(z._mask, y._mask) - assert_almost_equal(z, y) - assert_almost_equal(z._data, y._data) - x **= b - assert_equal(x._mask, y._mask) - assert_almost_equal(x, y) - assert_almost_equal(x._data, y._data) - - def test_power_w_broadcasting(self): - "Test power w/ broadcasting" - a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) - a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) - b1 = np.array([2, 4, 3]) - b1m = array(b1, mask=[0, 1, 0]) - b2 = np.array([b1, b1]) - b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) - # - ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], - mask=[[1, 1, 0], [0, 1, 1]]) - # No broadcasting, base & exp w/ mask - test = a2m ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - # No broadcasting, base w/ mask, exp w/o mask - test = a2m ** b2 - assert_equal(test, ctrl) - assert_equal(test.mask, a2m.mask) - # No broadcasting, base w/o mask, exp w/ mask - test = a2 ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, b2m.mask) - # - ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], - mask=[[0, 1, 0], [0, 1, 0]]) - test = b1 ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - test = b2m ** b1 - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - - - def test_where(self): - "Test the where function" - x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = masked_array(z, mask=[0, 1, 0, 0]) - xf = np.where(m1, 1e+20, x) - xm.set_fill_value(1e+20) - # - d = where(xm > 2, xm, -9) - assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.]) - assert_equal(d._mask, xm._mask) - d = where(xm > 2, -9, ym) - assert_equal(d, [5., 0., 3., 2., -1., -9., -9., -10., -9., 1., 0., -9.]) - assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) - d = where(xm > 2, xm, masked) - assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.]) - tmp = xm._mask.copy() - tmp[(xm <= 2).filled(True)] = True - assert_equal(d._mask, tmp) - # - ixm = xm.astype(int) - d = where(ixm > 2, ixm, masked) - assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) - assert_equal(d.dtype, ixm.dtype) - - - def test_where_with_masked_choice(self): - x = arange(10) - x[3] = masked - c = x >= 8 - # Set False to masked - z = where(c , x, masked) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is masked) - assert_(z[7] is masked) - assert_(z[8] is not masked) - assert_(z[9] is not masked) - assert_equal(x, z) - # Set True to masked - z = where(c , masked, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - - def test_where_with_masked_condition(self): - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - # - x = arange(1, 6) - x[-1] = masked - y = arange(1, 6) * 10 - y[2] = masked - c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) - cm = c.filled(1) - z = where(c, x, y) - zm = where(cm, x, y) - assert_equal(z, zm) - assert_(getmask(zm) is nomask) - assert_equal(zm, [1, 2, 3, 40, 50]) - z = where(c, masked, 1) - assert_equal(z, [99, 99, 99, 1, 1]) - z = where(c, 1, masked) - assert_equal(z, [99, 1, 1, 99, 99]) - - def test_where_type(self): - "Test the type conservation with where" - x = np.arange(4, dtype=np.int32) - y = np.arange(4, dtype=np.float32) * 2.2 - test = where(x > 1.5, y, x).dtype - control = np.find_common_type([np.int32, np.float32], []) - assert_equal(test, control) - - - def test_choose(self): - "Test choose" - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - chosen = choose([2, 3, 1, 0], choices) - assert_equal(chosen, array([20, 31, 12, 3])) - chosen = choose([2, 4, 1, 0], choices, mode='clip') - assert_equal(chosen, array([20, 31, 12, 3])) - chosen = choose([2, 4, 1, 0], choices, mode='wrap') - assert_equal(chosen, array([20, 1, 12, 3])) - # Check with some masked indices - indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) - chosen = choose(indices_, choices, mode='wrap') - assert_equal(chosen, array([99, 1, 12, 99])) - assert_equal(chosen.mask, [1, 0, 0, 1]) - # Check with some masked choices - choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], - [1, 0, 0, 0], [0, 0, 0, 0]]) - indices_ = [2, 3, 1, 0] - chosen = choose(indices_, choices, mode='wrap') - assert_equal(chosen, array([20, 31, 12, 3])) - assert_equal(chosen.mask, [1, 0, 0, 1]) - - - def test_choose_with_out(self): - "Test choose with an explicit out keyword" - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - store = empty(4, dtype=int) - chosen = choose([2, 3, 1, 0], choices, out=store) - assert_equal(store, array([20, 31, 12, 3])) - self.assertTrue(store is chosen) - # Check with some masked indices + out - store = empty(4, dtype=int) - indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) - chosen = choose(indices_, choices, mode='wrap', out=store) - assert_equal(store, array([99, 31, 12, 99])) - assert_equal(store.mask, [1, 0, 0, 1]) - # Check with some masked choices + out ina ndarray ! - choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], - [1, 0, 0, 0], [0, 0, 0, 0]]) - indices_ = [2, 3, 1, 0] - store = empty(4, dtype=int).view(ndarray) - chosen = choose(indices_, choices, mode='wrap', out=store) - assert_equal(store, array([999999, 31, 12, 999999])) - - - def test_reshape(self): - a = arange(10) - a[0] = masked - # Try the default - b = a.reshape((5, 2)) - assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['C']) - # Try w/ arguments as list instead of tuple - b = a.reshape(5, 2) - assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['C']) - # Try w/ order - b = a.reshape((5, 2), order='F') - assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['F']) - # Try w/ order - b = a.reshape(5, 2, order='F') - assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['F']) - # - c = np.reshape(a, (2, 5)) - self.assertTrue(isinstance(c, MaskedArray)) - assert_equal(c.shape, (2, 5)) - self.assertTrue(c[0, 0] is masked) - self.assertTrue(c.flags['C']) - - - def test_make_mask_descr(self): - "Test make_mask_descr" - # Flexible - ntype = [('a', np.float), ('b', np.float)] - test = make_mask_descr(ntype) - assert_equal(test, [('a', np.bool), ('b', np.bool)]) - # Standard w/ shape - ntype = (np.float, 2) - test = make_mask_descr(ntype) - assert_equal(test, (np.bool, 2)) - # Standard standard - ntype = np.float - test = make_mask_descr(ntype) - assert_equal(test, np.dtype(np.bool)) - # Nested - ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])] - test = make_mask_descr(ntype) - control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) - assert_equal(test, control) - # Named+ shape - ntype = [('a', (np.float, 2))] - test = make_mask_descr(ntype) - assert_equal(test, np.dtype([('a', (np.bool, 2))])) - # 2 names - ntype = [(('A', 'a'), float)] - test = make_mask_descr(ntype) - assert_equal(test, np.dtype([(('A', 'a'), bool)])) - - - def test_make_mask(self): - "Test make_mask" - # w/ a list as an input - mask = [0, 1] - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [0, 1]) - # w/ a ndarray as an input - mask = np.array([0, 1], dtype=np.bool) - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [0, 1]) - # w/ a flexible-type ndarray as an input - use default - mdtype = [('a', np.bool), ('b', np.bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [1, 1]) - # w/ a flexible-type ndarray as an input - use input dtype - mdtype = [('a', np.bool), ('b', np.bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask, dtype=mask.dtype) - assert_equal(test.dtype, mdtype) - assert_equal(test, mask) - # w/ a flexible-type ndarray as an input - use input dtype - mdtype = [('a', np.float), ('b', np.float)] - bdtype = [('a', np.bool), ('b', np.bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask, dtype=mask.dtype) - assert_equal(test.dtype, bdtype) - assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) - - - def test_mask_or(self): - # Initialize - mtype = [('a', np.bool), ('b', np.bool)] - mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) - # Test using nomask as input - test = mask_or(mask, nomask) - assert_equal(test, mask) - test = mask_or(nomask, mask) - assert_equal(test, mask) - # Using False as input - test = mask_or(mask, False) - assert_equal(test, mask) - # Using True as input. Won't work, but keep it for the kicks - # test = mask_or(mask, True) - # control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype) - # assert_equal(test, control) - # Using another array w / the same dtype - other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) - test = mask_or(mask, other) - control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) - assert_equal(test, control) - # Using another array w / a different dtype - othertype = [('A', np.bool), ('B', np.bool)] - other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) - try: - test = mask_or(mask, other) - except ValueError: - pass - # Using nested arrays - dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])] - amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) - bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) - cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) - assert_equal(mask_or(amask, bmask), cntrl) - - - def test_flatten_mask(self): - "Tests flatten mask" - # Standarad dtype - mask = np.array([0, 0, 1], dtype=np.bool) - assert_equal(flatten_mask(mask), mask) - # Flexible dtype - mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) - test = flatten_mask(mask) - control = np.array([0, 0, 0, 1], dtype=bool) - assert_equal(test, control) - - mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] - data = [(0, (0, 0)), (0, (0, 1))] - mask = np.array(data, dtype=mdtype) - test = flatten_mask(mask) - control = np.array([ 0, 0, 0, 0, 0, 1], dtype=bool) - assert_equal(test, control) - - - def test_on_ndarray(self): - "Test functions on ndarrays" - a = np.array([1, 2, 3, 4]) - m = array(a, mask=False) - test = anom(a) - assert_equal(test, m.anom()) - test = reshape(a, (2, 2)) - assert_equal(test, m.reshape(2, 2)) - -#------------------------------------------------------------------------------ - -class TestMaskedFields(TestCase): - # - def setUp(self): - ilist = [1, 2, 3, 4, 5] - flist = [1.1, 2.2, 3.3, 4.4, 5.5] - slist = ['one', 'two', 'three', 'four', 'five'] - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mdtype = [('a', bool), ('b', bool), ('c', bool)] - mask = [0, 1, 0, 0, 1] - base = array(zip(ilist, flist, slist), mask=mask, dtype=ddtype) - self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) - - def test_set_records_masks(self): - base = self.data['base'] - mdtype = self.data['mdtype'] - # Set w/ nomask or masked - base.mask = nomask - assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) - base.mask = masked - assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) - # Set w/ simple boolean - base.mask = False - assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) - base.mask = True - assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) - # Set w/ list - base.mask = [0, 0, 0, 1, 1] - assert_equal_records(base._mask, - np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], - dtype=mdtype)) - - def test_set_record_element(self): - "Check setting an element of a record)" - base = self.data['base'] - (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) - base[0] = (pi, pi, 'pi') - - assert_equal(base_a.dtype, int) - assert_equal(base_a._data, [3, 2, 3, 4, 5]) - - assert_equal(base_b.dtype, float) - assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) - - assert_equal(base_c.dtype, '|S8') - assert_equal(base_c._data, - asbytes_nested(['pi', 'two', 'three', 'four', 'five'])) - - def test_set_record_slice(self): - base = self.data['base'] - (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) - base[:3] = (pi, pi, 'pi') - - assert_equal(base_a.dtype, int) - assert_equal(base_a._data, [3, 3, 3, 4, 5]) - - assert_equal(base_b.dtype, float) - assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) - - assert_equal(base_c.dtype, '|S8') - assert_equal(base_c._data, - asbytes_nested(['pi', 'pi', 'pi', 'four', 'five'])) - - def test_mask_element(self): - "Check record access" - base = self.data['base'] - (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) - base[0] = masked - # - for n in ('a', 'b', 'c'): - assert_equal(base[n].mask, [1, 1, 0, 0, 1]) - assert_equal(base[n]._data, base._data[n]) - # - def test_getmaskarray(self): - "Test getmaskarray on flexible dtype" - ndtype = [('a', int), ('b', float)] - test = empty(3, dtype=ndtype) - assert_equal(getmaskarray(test), - np.array([(0, 0) , (0, 0), (0, 0)], - dtype=[('a', '|b1'), ('b', '|b1')])) - test[:] = masked - assert_equal(getmaskarray(test), - np.array([(1, 1) , (1, 1), (1, 1)], - dtype=[('a', '|b1'), ('b', '|b1')])) - # - def test_view(self): - "Test view w/ flexible dtype" - iterator = zip(np.arange(10), np.random.rand(10)) - data = np.array(iterator) - a = array(iterator, dtype=[('a', float), ('b', float)]) - a.mask[0] = (1, 0) - controlmask = np.array([1] + 19 * [0], dtype=bool) - # Transform globally to simple dtype - test = a.view(float) - assert_equal(test, data.ravel()) - assert_equal(test.mask, controlmask) - # Transform globally to dty - test = a.view((float, 2)) - assert_equal(test, data) - assert_equal(test.mask, controlmask.reshape(-1, 2)) - # - test = a.view((float, 2), np.matrix) - assert_equal(test, data) - self.assertTrue(isinstance(test, np.matrix)) - # - def test_getitem(self): - ndtype = [('a', float), ('b', float)] - a = array(zip(np.random.rand(10), np.arange(10)), dtype=ndtype) - a.mask = np.array(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], - [1, 0, 0, 0, 0, 0, 0, 0, 1, 0]), - dtype=[('a', bool), ('b', bool)]) - # No mask - self.assertTrue(isinstance(a[1], np.void)) - # One element masked - self.assertTrue(isinstance(a[0], MaskedArray)) - assert_equal_records(a[0]._data, a._data[0]) - assert_equal_records(a[0]._mask, a._mask[0]) - # All element masked - self.assertTrue(isinstance(a[-2], MaskedArray)) - assert_equal_records(a[-2]._data, a._data[-2]) - assert_equal_records(a[-2]._mask, a._mask[-2]) - -#------------------------------------------------------------------------------ - -class TestMaskedView(TestCase): - # - def setUp(self): - iterator = zip(np.arange(10), np.random.rand(10)) - data = np.array(iterator) - a = array(iterator, dtype=[('a', float), ('b', float)]) - a.mask[0] = (1, 0) - controlmask = np.array([1] + 19 * [0], dtype=bool) - self.data = (data, a, controlmask) - # - def test_view_to_nothing(self): - (data, a, controlmask) = self.data - test = a.view() - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test._data, a._data) - assert_equal(test._mask, a._mask) - - # - def test_view_to_type(self): - (data, a, controlmask) = self.data - test = a.view(np.ndarray) - self.assertTrue(not isinstance(test, MaskedArray)) - assert_equal(test, a._data) - assert_equal_records(test, data.view(a.dtype).squeeze()) - # - def test_view_to_simple_dtype(self): - (data, a, controlmask) = self.data - # View globally - test = a.view(float) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test, data.ravel()) - assert_equal(test.mask, controlmask) - # - def test_view_to_flexible_dtype(self): - (data, a, controlmask) = self.data - # - test = a.view([('A', float), ('B', float)]) - assert_equal(test.mask.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a']) - assert_equal(test['B'], a['b']) - # - test = a[0].view([('A', float), ('B', float)]) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test.mask.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a'][0]) - assert_equal(test['B'], a['b'][0]) - # - test = a[-1].view([('A', float), ('B', float)]) - self.assertTrue(not isinstance(test, MaskedArray)) - assert_equal(test.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a'][-1]) - assert_equal(test['B'], a['b'][-1]) - - # - def test_view_to_subdtype(self): - (data, a, controlmask) = self.data - # View globally - test = a.view((float, 2)) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test, data) - assert_equal(test.mask, controlmask.reshape(-1, 2)) - # View on 1 masked element - test = a[0].view((float, 2)) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test, data[0]) - assert_equal(test.mask, (1, 0)) - # View on 1 unmasked element - test = a[-1].view((float, 2)) - self.assertTrue(not isinstance(test, MaskedArray)) - assert_equal(test, data[-1]) - # - def test_view_to_dtype_and_type(self): - (data, a, controlmask) = self.data - # - test = a.view((float, 2), np.matrix) - assert_equal(test, data) - self.assertTrue(isinstance(test, np.matrix)) - self.assertTrue(not isinstance(test, MaskedArray)) - -def test_masked_array(): - a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) - assert_equal(np.argwhere(a), [[1], [3]]) - -############################################################################### -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/ma/tests/test_extras.py b/numpy-1.6.2/numpy/ma/tests/test_extras.py deleted file mode 100644 index ac33ec0e5f..0000000000 --- a/numpy-1.6.2/numpy/ma/tests/test_extras.py +++ /dev/null @@ -1,848 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511 -"""Tests suite for MaskedArray. -Adapted from the original test_ma by Pierre Gerard-Marchant - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ -""" -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import numpy as np -from numpy.testing import TestCase, run_module_suite -from numpy.ma.testutils import * -from numpy.ma.core import * -from numpy.ma.extras import * - - -class TestGeneric(TestCase): - # - def test_masked_all(self): - "Tests masked_all" - # Standard dtype - test = masked_all((2,), dtype=float) - control = array([1, 1], mask=[1, 1], dtype=float) - assert_equal(test, control) - # Flexible dtype - dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) - test = masked_all((2,), dtype=dt) - control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) - assert_equal(test, control) - test = masked_all((2, 2), dtype=dt) - control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], - mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], - dtype=dt) - assert_equal(test, control) - # Nested dtype - dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) - test = masked_all((2,), dtype=dt) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - assert_equal(test, control) - test = masked_all((2,), dtype=dt) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - assert_equal(test, control) - test = masked_all((1, 1), dtype=dt) - control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) - assert_equal(test, control) - - - def test_masked_all_like(self): - "Tests masked_all" - # Standard dtype - base = array([1, 2], dtype=float) - test = masked_all_like(base) - control = array([1, 1], mask=[1, 1], dtype=float) - assert_equal(test, control) - # Flexible dtype - dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) - base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) - test = masked_all_like(base) - control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) - assert_equal(test, control) - # Nested dtype - dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - test = masked_all_like(control) - assert_equal(test, control) - - def test_clump_masked(self): - "Test clump_masked" - a = masked_array(np.arange(10)) - a[[0, 1, 2, 6, 8, 9]] = masked - # - test = clump_masked(a) - control = [slice(0, 3), slice(6, 7), slice(8, 10)] - assert_equal(test, control) - - def test_clump_unmasked(self): - "Test clump_unmasked" - a = masked_array(np.arange(10)) - a[[0, 1, 2, 6, 8, 9]] = masked - test = clump_unmasked(a) - control = [slice(3, 6), slice(7, 8), ] - assert_equal(test, control) - - def test_flatnotmasked_contiguous(self): - "Test flatnotmasked_contiguous" - a = arange(10) - # No mask - test = flatnotmasked_contiguous(a) - assert_equal(test, slice(0, a.size)) - # Some mask - a[(a < 3) | (a > 8) | (a == 5)] = masked - test = flatnotmasked_contiguous(a) - assert_equal(test, [slice(3, 5), slice(6, 9)]) - # - a[:] = masked - test = flatnotmasked_contiguous(a) - assert_equal(test, None) - - -class TestAverage(TestCase): - "Several tests of average. Why so many ? Good point..." - def test_testAverage1(self): - "Test of average." - ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) - assert_equal(2.0, average(ott, axis=0)) - assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) - result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) - assert_equal(2.0, result) - self.assertTrue(wts == 4.0) - ott[:] = masked - assert_equal(average(ott, axis=0).mask, [True]) - ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) - ott = ott.reshape(2, 2) - ott[:, 1] = masked - assert_equal(average(ott, axis=0), [2.0, 0.0]) - assert_equal(average(ott, axis=1).mask[0], [True]) - assert_equal([2., 0.], average(ott, axis=0)) - result, wts = average(ott, axis=0, returned=1) - assert_equal(wts, [1., 0.]) - - def test_testAverage2(self): - "More tests of average." - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = arange(6, dtype=float_) - assert_equal(average(x, axis=0), 2.5) - assert_equal(average(x, axis=0, weights=w1), 2.5) - y = array([arange(6, dtype=float_), 2.0 * arange(6)]) - assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) - assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) - assert_equal(average(y, axis=1), - [average(x, axis=0), average(x, axis=0) * 2.0]) - assert_equal(average(y, None, weights=w2), 20. / 6.) - assert_equal(average(y, axis=0, weights=w2), - [0., 1., 2., 3., 4., 10.]) - assert_equal(average(y, axis=1), - [average(x, axis=0), average(x, axis=0) * 2.0]) - m1 = zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = ones(6) - m5 = [0, 1, 1, 1, 1, 1] - assert_equal(average(masked_array(x, m1), axis=0), 2.5) - assert_equal(average(masked_array(x, m2), axis=0), 2.5) - assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) - assert_equal(average(masked_array(x, m5), axis=0), 0.0) - assert_equal(count(average(masked_array(x, m4), axis=0)), 0) - z = masked_array(y, m3) - assert_equal(average(z, None), 20. / 6.) - assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) - assert_equal(average(z, axis=1), [2.5, 5.0]) - assert_equal(average(z, axis=0, weights=w2), - [0., 1., 99., 99., 4.0, 10.0]) - - def test_testAverage3(self): - "Yet more tests of average!" - a = arange(6) - b = arange(6) * 3 - r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) - assert_equal(shape(r1) , shape(w1)) - assert_equal(r1.shape , w1.shape) - r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) - assert_equal(shape(w2) , shape(r2)) - r2, w2 = average(ones((2, 2, 3)), returned=1) - assert_equal(shape(w2) , shape(r2)) - r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) - assert_equal(shape(w2), shape(r2)) - a2d = array([[1, 2], [0, 4]], float) - a2dm = masked_array(a2d, [[False, False], [True, False]]) - a2da = average(a2d, axis=0) - assert_equal(a2da, [0.5, 3.0]) - a2dma = average(a2dm, axis=0) - assert_equal(a2dma, [1.0, 3.0]) - a2dma = average(a2dm, axis=None) - assert_equal(a2dma, 7. / 3.) - a2dma = average(a2dm, axis=1) - assert_equal(a2dma, [1.5, 4.0]) - - def test_onintegers_with_mask(self): - "Test average on integers with mask" - a = average(array([1, 2])) - assert_equal(a, 1.5) - a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) - assert_equal(a, 1.5) - - -class TestConcatenator(TestCase): - """ - Tests for mr_, the equivalent of r_ for masked arrays. - """ - - def test_1d(self): - "Tests mr_ on 1D arrays." - assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) - b = ones(5) - m = [1, 0, 0, 0, 0] - d = masked_array(b, mask=m) - c = mr_[d, 0, 0, d] - self.assertTrue(isinstance(c, MaskedArray) or isinstance(c, core.MaskedArray)) - assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) - assert_array_equal(c.mask, mr_[m, 0, 0, m]) - - def test_2d(self): - "Tests mr_ on 2D arrays." - a_1 = rand(5, 5) - a_2 = rand(5, 5) - m_1 = np.round_(rand(5, 5), 0) - m_2 = np.round_(rand(5, 5), 0) - b_1 = masked_array(a_1, mask=m_1) - b_2 = masked_array(a_2, mask=m_2) - d = mr_['1', b_1, b_2] # append columns - self.assertTrue(d.shape == (5, 10)) - assert_array_equal(d[:, :5], b_1) - assert_array_equal(d[:, 5:], b_2) - assert_array_equal(d.mask, np.r_['1', m_1, m_2]) - d = mr_[b_1, b_2] - self.assertTrue(d.shape == (10, 5)) - assert_array_equal(d[:5, :], b_1) - assert_array_equal(d[5:, :], b_2) - assert_array_equal(d.mask, np.r_[m_1, m_2]) - - - -class TestNotMasked(TestCase): - """ - Tests notmasked_edges and notmasked_contiguous. - """ - - def test_edges(self): - "Tests unmasked_edges" - data = masked_array(np.arange(25).reshape(5, 5), - mask=[[0, 0, 1, 0, 0], - [0, 0, 0, 1, 1], - [1, 1, 0, 0, 0], - [0, 0, 0, 0, 0], - [1, 1, 1, 0, 0]],) - test = notmasked_edges(data, None) - assert_equal(test, [0, 24]) - test = notmasked_edges(data, 0) - assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data, 1) - assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) - assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) - # - test = notmasked_edges(data.data, None) - assert_equal(test, [0, 24]) - test = notmasked_edges(data.data, 0) - assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data.data, -1) - assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) - assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) - # - data[-2] = masked - test = notmasked_edges(data, 0) - assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data, -1) - assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) - assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) - - - def test_contiguous(self): - "Tests notmasked_contiguous" - a = masked_array(np.arange(24).reshape(3, 8), - mask=[[0, 0, 0, 0, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 1, 0], ]) - tmp = notmasked_contiguous(a, None) - assert_equal(tmp[-1], slice(23, 24, None)) - assert_equal(tmp[-2], slice(16, 22, None)) - assert_equal(tmp[-3], slice(0, 4, None)) - # - tmp = notmasked_contiguous(a, 0) - self.assertTrue(len(tmp[-1]) == 1) - self.assertTrue(tmp[-2] is None) - assert_equal(tmp[-3], tmp[-1]) - self.assertTrue(len(tmp[0]) == 2) - # - tmp = notmasked_contiguous(a, 1) - assert_equal(tmp[0][-1], slice(0, 4, None)) - self.assertTrue(tmp[1] is None) - assert_equal(tmp[2][-1], slice(7, 8, None)) - assert_equal(tmp[2][-2], slice(0, 6, None)) - - - -class Test2DFunctions(TestCase): - "Tests 2D functions" - def test_compress2d(self): - "Tests compress2d" - x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) - assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) - assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) - x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) - assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) - assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[8]]) - assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) - assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - assert_equal(compress_rowcols(x).size, 0) - assert_equal(compress_rowcols(x, 0).size, 0) - assert_equal(compress_rowcols(x, 1).size, 0) - # - def test_mask_rowcols(self): - "Tests mask_rowcols." - x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) - assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1).mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) - x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) - assert_equal(mask_rowcols(x, 0).mask, [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1).mask, [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) - assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1,).mask, [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - self.assertTrue(mask_rowcols(x).all() is masked) - self.assertTrue(mask_rowcols(x, 0).all() is masked) - self.assertTrue(mask_rowcols(x, 1).all() is masked) - self.assertTrue(mask_rowcols(x).mask.all()) - self.assertTrue(mask_rowcols(x, 0).mask.all()) - self.assertTrue(mask_rowcols(x, 1).mask.all()) - # - def test_dot(self): - "Tests dot product" - n = np.arange(1, 7) - # - m = [1, 0, 0, 0, 0, 0] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[1, 1], [1, 0]]) - c = dot(b, a, True) - assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) - c = dot(a, b, False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - m = [0, 0, 0, 0, 0, 1] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[0, 1], [1, 1]]) - c = dot(b, a, True) - assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) - c = dot(a, b, False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - assert_equal(c, dot(a, b)) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - m = [0, 0, 0, 0, 0, 0] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b) - assert_equal(c.mask, nomask) - c = dot(b, a) - assert_equal(c.mask, nomask) - # - a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[1, 1], [0, 0]]) - c = dot(a, b, False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) - assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[0, 0], [1, 1]]) - c = dot(a, b) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) - assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[1, 0], [1, 1]]) - c = dot(a, b, False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) - assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - - - -class TestApplyAlongAxis(TestCase): - # - "Tests 2D functions" - def test_3d(self): - a = arange(12.).reshape(2, 2, 3) - def myfunc(b): - return b[1] - xa = apply_along_axis(myfunc, 2, a) - assert_equal(xa, [[1, 4], [7, 10]]) - - - -class TestApplyOverAxes(TestCase): - "Tests apply_over_axes" - def test_basic(self): - a = arange(24).reshape(2, 3, 4) - test = apply_over_axes(np.sum, a, [0, 2]) - ctrl = np.array([[[ 60], [ 92], [124]]]) - assert_equal(test, ctrl) - a[(a % 2).astype(np.bool)] = masked - test = apply_over_axes(np.sum, a, [0, 2]) - ctrl = np.array([[[ 30], [ 44], [60]]]) - - -class TestMedian(TestCase): - # - def test_2d(self): - "Tests median w/ 2D" - (n, p) = (101, 30) - x = masked_array(np.linspace(-1., 1., n),) - x[:10] = x[-10:] = masked - z = masked_array(np.empty((n, p), dtype=float)) - z[:, 0] = x[:] - idx = np.arange(len(x)) - for i in range(1, p): - np.random.shuffle(idx) - z[:, i] = x[idx] - assert_equal(median(z[:, 0]), 0) - assert_equal(median(z), 0) - assert_equal(median(z, axis=0), np.zeros(p)) - assert_equal(median(z.T, axis=1), np.zeros(p)) - # - def test_2d_waxis(self): - "Tests median w/ 2D arrays and different axis." - x = masked_array(np.arange(30).reshape(10, 3)) - x[:3] = x[-3:] = masked - assert_equal(median(x), 14.5) - assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) - assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) - assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) - # - def test_3d(self): - "Tests median w/ 3D" - x = np.ma.arange(24).reshape(3, 4, 2) - x[x % 3 == 0] = masked - assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) - assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) - x = np.ma.arange(24).reshape(4, 3, 2) - x[x % 5 == 0] = masked - assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) - - - -class TestCov(TestCase): - - def setUp(self): - self.data = array(np.random.rand(12)) - - def test_1d_wo_missing(self): - "Test cov on 1D variable w/o missing values" - x = self.data - assert_almost_equal(np.cov(x), cov(x)) - assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(x, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - - def test_2d_wo_missing(self): - "Test cov on 1 2D variable w/o missing values" - x = self.data.reshape(3, 4) - assert_almost_equal(np.cov(x), cov(x)) - assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(x, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - - def test_1d_w_missing(self): - "Test cov 1 1D variable w/missing values" - x = self.data - x[-1] = masked - x -= x.mean() - nx = x.compressed() - assert_almost_equal(np.cov(nx), cov(x)) - assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(nx, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - # - try: - cov(x, allow_masked=False) - except ValueError: - pass - # - # 2 1D variables w/ missing values - nx = x[1:-1] - assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) - assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), - cov(x, x[::-1], rowvar=False)) - assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), - cov(x, x[::-1], rowvar=False, bias=True)) - - def test_2d_w_missing(self): - "Test cov on 2D variable w/ missing value" - x = self.data - x[-1] = masked - x = x.reshape(3, 4) - valid = np.logical_not(getmaskarray(x)).astype(int) - frac = np.dot(valid, valid.T) - xf = (x - x.mean(1)[:, None]).filled(0) - assert_almost_equal(cov(x), np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) - assert_almost_equal(cov(x, bias=True), - np.cov(xf, bias=True) * x.shape[1] / frac) - frac = np.dot(valid.T, valid) - xf = (x - x.mean(0)).filled(0) - assert_almost_equal(cov(x, rowvar=False), - np.cov(xf, rowvar=False) * (x.shape[0] - 1) / (frac - 1.)) - assert_almost_equal(cov(x, rowvar=False, bias=True), - np.cov(xf, rowvar=False, bias=True) * x.shape[0] / frac) - - - -class TestCorrcoef(TestCase): - - def setUp(self): - self.data = array(np.random.rand(12)) - - def test_ddof(self): - "Test ddof keyword" - x = self.data - assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) - - - def test_1d_wo_missing(self): - "Test cov on 1D variable w/o missing values" - x = self.data - assert_almost_equal(np.corrcoef(x), corrcoef(x)) - assert_almost_equal(np.corrcoef(x, rowvar=False), - corrcoef(x, rowvar=False)) - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - - def test_2d_wo_missing(self): - "Test corrcoef on 1 2D variable w/o missing values" - x = self.data.reshape(3, 4) - assert_almost_equal(np.corrcoef(x), corrcoef(x)) - assert_almost_equal(np.corrcoef(x, rowvar=False), - corrcoef(x, rowvar=False)) - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - - def test_1d_w_missing(self): - "Test corrcoef 1 1D variable w/missing values" - x = self.data - x[-1] = masked - x -= x.mean() - nx = x.compressed() - assert_almost_equal(np.corrcoef(nx), corrcoef(x)) - assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - # - try: - corrcoef(x, allow_masked=False) - except ValueError: - pass - # - # 2 1D variables w/ missing values - nx = x[1:-1] - assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) - assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), - corrcoef(x, x[::-1], rowvar=False)) - assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False, bias=True), - corrcoef(x, x[::-1], rowvar=False, bias=True)) - - def test_2d_w_missing(self): - "Test corrcoef on 2D variable w/ missing value" - x = self.data - x[-1] = masked - x = x.reshape(3, 4) - - test = corrcoef(x) - control = np.corrcoef(x) - assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - - - -class TestPolynomial(TestCase): - # - def test_polyfit(self): - "Tests polyfit" - # On ndarrays - x = np.random.rand(10) - y = np.random.rand(20).reshape(-1, 2) - assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) - # ON 1D maskedarrays - x = x.view(MaskedArray) - x[0] = masked - y = y.view(MaskedArray) - y[0, 0] = y[-1, -1] = masked - # - (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - # - (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - # - (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - - - -class TestArraySetOps(TestCase): - # - def test_unique_onlist(self): - "Test unique on list" - data = [1, 1, 1, 2, 2, 3] - test = unique(data, return_index=True, return_inverse=True) - self.assertTrue(isinstance(test[0], MaskedArray)) - assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) - assert_equal(test[1], [0, 3, 5]) - assert_equal(test[2], [0, 0, 0, 1, 1, 2]) - - def test_unique_onmaskedarray(self): - "Test unique on masked data w/use_mask=True" - data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) - assert_equal(test[1], [0, 3, 5, 2]) - assert_equal(test[2], [0, 0, 3, 1, 3, 2]) - # - data.fill_value = 3 - data = masked_array([1, 1, 1, 2, 2, 3], - mask=[0, 0, 1, 0, 1, 0], fill_value=3) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) - assert_equal(test[1], [0, 3, 5, 2]) - assert_equal(test[2], [0, 0, 3, 1, 3, 2]) - - def test_unique_allmasked(self): - "Test all masked" - data = masked_array([1, 1, 1], mask=True) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, ], mask=[True])) - assert_equal(test[1], [0]) - assert_equal(test[2], [0, 0, 0]) - # - "Test masked" - data = masked - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array(masked)) - assert_equal(test[1], [0]) - assert_equal(test[2], [0]) - - def test_ediff1d(self): - "Tests mediff1d" - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) - test = ediff1d(x) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - def test_ediff1d_tobegin(self): - "Test ediff1d w/ to_begin" - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_begin=masked) - control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_begin=[1, 2, 3]) - control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - def test_ediff1d_toend(self): - "Test ediff1d w/ to_end" - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_end=masked) - control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=[1, 2, 3]) - control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - def test_ediff1d_tobegin_toend(self): - "Test ediff1d w/ to_begin and to_end" - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_end=masked, to_begin=masked) - control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) - control = array([0, 1, 1, 1, 4, 1, 2, 3], mask=[1, 1, 0, 0, 1, 0, 0, 0]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - def test_ediff1d_ndarray(self): - "Test ediff1d w/ a ndarray" - x = np.arange(5) - test = ediff1d(x) - control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) - assert_equal(test, control) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=masked, to_begin=masked) - control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - - def test_intersect1d(self): - "Test intersect1d" - x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - test = intersect1d(x, y) - control = array([1, 3, -1], mask=[0, 0, 1]) - assert_equal(test, control) - - - def test_setxor1d(self): - "Test setxor1d" - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = setxor1d(a, b) - assert_equal(test, array([3, 4, 7])) - # - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = [1, 2, 3, 4, 5] - test = setxor1d(a, b) - assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) - # - a = array([1, 2, 3]) - b = array([6, 5, 4]) - test = setxor1d(a, b) - assert(isinstance(test, MaskedArray)) - assert_equal(test, [1, 2, 3, 4, 5, 6]) - # - a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) - b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) - test = setxor1d(a, b) - assert(isinstance(test, MaskedArray)) - assert_equal(test, [1, 2, 3, 4, 5, 6]) - # - assert_array_equal([], setxor1d([], [])) - - - def test_in1d(self): - "Test in1d" - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = in1d(a, b) - assert_equal(test, [True, True, True, False, True]) - # - a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 5, -1], mask=[0, 0, 1]) - test = in1d(a, b) - assert_equal(test, [True, True, False, True, True]) - # - assert_array_equal([], in1d([], [])) - - - def test_union1d(self): - "Test union1d" - a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = union1d(a, b) - control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) - assert_equal(test, control) - # - assert_array_equal([], union1d([], [])) - - - def test_setdiff1d(self): - "Test setdiff1d" - a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) - b = array([2, 4, 3, 3, 2, 1, 5]) - test = setdiff1d(a, b) - assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) - # - a = arange(10) - b = arange(8) - assert_equal(setdiff1d(a, b), array([8, 9])) - - - def test_setdiff1d_char_array(self): - "Test setdiff1d_charray" - a = np.array(['a', 'b', 'c']) - b = np.array(['a', 'b', 's']) - assert_array_equal(setdiff1d(a, b), np.array(['c'])) - - - - - -class TestShapeBase(TestCase): - # - def test_atleast2d(self): - "Test atleast_2d" - a = masked_array([0, 1, 2], mask=[0, 1, 0]) - b = atleast_2d(a) - assert_equal(b.shape, (1, 3)) - assert_equal(b.mask.shape, b.data.shape) - assert_equal(a.shape, (3,)) - assert_equal(a.mask.shape, a.data.shape) - - -############################################################################### -#------------------------------------------------------------------------------ -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/ma/tests/test_mrecords.py b/numpy-1.6.2/numpy/ma/tests/test_mrecords.py deleted file mode 100644 index f068d4e503..0000000000 --- a/numpy-1.6.2/numpy/ma/tests/test_mrecords.py +++ /dev/null @@ -1,501 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for mrecords. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -""" -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import sys -import numpy as np -from numpy import recarray -from numpy.core.records import fromrecords as recfromrecords, \ - fromarrays as recfromarrays - -from numpy.compat import asbytes, asbytes_nested - -import numpy.ma.testutils -from numpy.ma.testutils import * - -import numpy.ma as ma -from numpy.ma import masked, nomask - -from numpy.ma.mrecords import MaskedRecords, mrecarray, fromarrays, \ - fromtextfile, fromrecords, addfield - -#.............................................................................. -class TestMRecords(TestCase): - "Base test class for MaskedArrays." - def __init__(self, *args, **kwds): - TestCase.__init__(self, *args, **kwds) - self.setup() - - def setup(self): - "Generic setup" - ilist = [1,2,3,4,5] - flist = [1.1,2.2,3.3,4.4,5.5] - slist = asbytes_nested(['one','two','three','four','five']) - ddtype = [('a',int),('b',float),('c','|S8')] - mask = [0,1,0,0,1] - self.base = ma.array(list(zip(ilist,flist,slist)), - mask=mask, dtype=ddtype) - - def test_byview(self): - "Test creation by view" - base = self.base - mbase = base.view(mrecarray) - assert_equal(mbase.recordmask, base.recordmask) - assert_equal_records(mbase._mask, base._mask) - assert isinstance(mbase._data, recarray) - assert_equal_records(mbase._data, base._data.view(recarray)) - for field in ('a','b','c'): - assert_equal(base[field], mbase[field]) - assert_equal_records(mbase.view(mrecarray), mbase) - - def test_get(self): - "Tests fields retrieval" - base = self.base.copy() - mbase = base.view(mrecarray) - # As fields.......... - for field in ('a','b','c'): - assert_equal(getattr(mbase,field), mbase[field]) - assert_equal(base[field], mbase[field]) - # as elements ....... - mbase_first = mbase[0] - assert isinstance(mbase_first, mrecarray) - assert_equal(mbase_first.dtype, mbase.dtype) - assert_equal(mbase_first.tolist(), (1,1.1,asbytes('one'))) - # Used to be mask, now it's recordmask - assert_equal(mbase_first.recordmask, nomask) - assert_equal(mbase_first._mask.item(), (False, False, False)) - assert_equal(mbase_first['a'], mbase['a'][0]) - mbase_last = mbase[-1] - assert isinstance(mbase_last, mrecarray) - assert_equal(mbase_last.dtype, mbase.dtype) - assert_equal(mbase_last.tolist(), (None,None,None)) - # Used to be mask, now it's recordmask - assert_equal(mbase_last.recordmask, True) - assert_equal(mbase_last._mask.item(), (True, True, True)) - assert_equal(mbase_last['a'], mbase['a'][-1]) - assert (mbase_last['a'] is masked) - # as slice .......... - mbase_sl = mbase[:2] - assert isinstance(mbase_sl, mrecarray) - assert_equal(mbase_sl.dtype, mbase.dtype) - # Used to be mask, now it's recordmask - assert_equal(mbase_sl.recordmask, [0,1]) - assert_equal_records(mbase_sl.mask, - np.array([(False,False,False),(True,True,True)], - dtype=mbase._mask.dtype)) - assert_equal_records(mbase_sl, base[:2].view(mrecarray)) - for field in ('a','b','c'): - assert_equal(getattr(mbase_sl,field), base[:2][field]) - - def test_set_fields(self): - "Tests setting fields." - base = self.base.copy() - mbase = base.view(mrecarray) - mbase = mbase.copy() - mbase.fill_value = (999999,1e20,'N/A') - # Change the data, the mask should be conserved - mbase.a._data[:] = 5 - assert_equal(mbase['a']._data, [5,5,5,5,5]) - assert_equal(mbase['a']._mask, [0,1,0,0,1]) - # Change the elements, and the mask will follow - mbase.a = 1 - assert_equal(mbase['a']._data, [1]*5) - assert_equal(ma.getmaskarray(mbase['a']), [0]*5) - # Use to be _mask, now it's recordmask - assert_equal(mbase.recordmask, [False]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0,0,0),(0,1,1),(0,0,0),(0,0,0),(0,1,1)], - dtype=bool)) - # Set a field to mask ........................ - mbase.c = masked - # Use to be mask, and now it's still mask ! - assert_equal(mbase.c.mask, [1]*5) - assert_equal(mbase.c.recordmask, [1]*5) - assert_equal(ma.getmaskarray(mbase['c']), [1]*5) - assert_equal(ma.getdata(mbase['c']), [asbytes('N/A')]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0,0,1),(0,1,1),(0,0,1),(0,0,1),(0,1,1)], - dtype=bool)) - # Set fields by slices ....................... - mbase = base.view(mrecarray).copy() - mbase.a[3:] = 5 - assert_equal(mbase.a, [1,2,3,5,5]) - assert_equal(mbase.a._mask, [0,1,0,0,0]) - mbase.b[3:] = masked - assert_equal(mbase.b, base['b']) - assert_equal(mbase.b._mask, [0,1,0,1,1]) - # Set fields globally.......................... - ndtype = [('alpha','|S1'),('num',int)] - data = ma.array([('a',1),('b',2),('c',3)], dtype=ndtype) - rdata = data.view(MaskedRecords) - val = ma.array([10,20,30], mask=[1,0,0]) - # - import warnings - warnings.simplefilter("ignore") - rdata['num'] = val - assert_equal(rdata.num, val) - assert_equal(rdata.num.mask, [1,0,0]) - - def test_set_fields_mask(self): - "Tests setting the mask of a field." - base = self.base.copy() - # This one has already a mask.... - mbase = base.view(mrecarray) - mbase['a'][-2] = masked - assert_equal(mbase.a, [1,2,3,4,5]) - assert_equal(mbase.a._mask, [0,1,0,1,1]) - # This one has not yet - mbase = fromarrays([np.arange(5), np.random.rand(5)], - dtype=[('a',int),('b',float)]) - mbase['a'][-2] = masked - assert_equal(mbase.a, [0,1,2,3,4]) - assert_equal(mbase.a._mask, [0,0,0,1,0]) - # - def test_set_mask(self): - base = self.base.copy() - mbase = base.view(mrecarray) - # Set the mask to True ....................... - mbase.mask = masked - assert_equal(ma.getmaskarray(mbase['b']), [1]*5) - assert_equal(mbase['a']._mask, mbase['b']._mask) - assert_equal(mbase['a']._mask, mbase['c']._mask) - assert_equal(mbase._mask.tolist(), - np.array([(1,1,1)]*5, dtype=bool)) - # Delete the mask ............................ - mbase.mask = nomask - assert_equal(ma.getmaskarray(mbase['c']), [0]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0,0,0)]*5, dtype=bool)) - # - def test_set_mask_fromarray(self): - base = self.base.copy() - mbase = base.view(mrecarray) - # Sets the mask w/ an array - mbase.mask = [1,0,0,0,1] - assert_equal(mbase.a.mask, [1,0,0,0,1]) - assert_equal(mbase.b.mask, [1,0,0,0,1]) - assert_equal(mbase.c.mask, [1,0,0,0,1]) - # Yay, once more ! - mbase.mask = [0,0,0,0,1] - assert_equal(mbase.a.mask, [0,0,0,0,1]) - assert_equal(mbase.b.mask, [0,0,0,0,1]) - assert_equal(mbase.c.mask, [0,0,0,0,1]) - # - def test_set_mask_fromfields(self): - mbase = self.base.copy().view(mrecarray) - # - nmask = np.array([(0,1,0),(0,1,0),(1,0,1),(1,0,1),(0,0,0)], - dtype=[('a',bool),('b',bool),('c',bool)]) - mbase.mask = nmask - assert_equal(mbase.a.mask, [0,0,1,1,0]) - assert_equal(mbase.b.mask, [1,1,0,0,0]) - assert_equal(mbase.c.mask, [0,0,1,1,0]) - # Reinitalizes and redo - mbase.mask = False - mbase.fieldmask = nmask - assert_equal(mbase.a.mask, [0,0,1,1,0]) - assert_equal(mbase.b.mask, [1,1,0,0,0]) - assert_equal(mbase.c.mask, [0,0,1,1,0]) - # - def test_set_elements(self): - base = self.base.copy() - # Set an element to mask ..................... - mbase = base.view(mrecarray).copy() - mbase[-2] = masked - assert_equal(mbase._mask.tolist(), - np.array([(0,0,0),(1,1,1),(0,0,0),(1,1,1),(1,1,1)], - dtype=bool)) - # Used to be mask, now it's recordmask! - assert_equal(mbase.recordmask, [0,1,0,1,1]) - # Set slices ................................. - mbase = base.view(mrecarray).copy() - mbase[:2] = (5,5,5) - assert_equal(mbase.a._data, [5,5,3,4,5]) - assert_equal(mbase.a._mask, [0,0,0,0,1]) - assert_equal(mbase.b._data, [5.,5.,3.3,4.4,5.5]) - assert_equal(mbase.b._mask, [0,0,0,0,1]) - assert_equal(mbase.c._data, - asbytes_nested(['5','5','three','four','five'])) - assert_equal(mbase.b._mask, [0,0,0,0,1]) - # - mbase = base.view(mrecarray).copy() - mbase[:2] = masked - assert_equal(mbase.a._data, [1,2,3,4,5]) - assert_equal(mbase.a._mask, [1,1,0,0,1]) - assert_equal(mbase.b._data, [1.1,2.2,3.3,4.4,5.5]) - assert_equal(mbase.b._mask, [1,1,0,0,1]) - assert_equal(mbase.c._data, - asbytes_nested(['one','two','three','four','five'])) - assert_equal(mbase.b._mask, [1,1,0,0,1]) - # - def test_setslices_hardmask(self): - "Tests setting slices w/ hardmask." - base = self.base.copy() - mbase = base.view(mrecarray) - mbase.harden_mask() - try: - mbase[-2:] = (5,5,5) - assert_equal(mbase.a._data, [1,2,3,5,5]) - assert_equal(mbase.b._data, [1.1,2.2,3.3,5,5.5]) - assert_equal(mbase.c._data, - asbytes_nested(['one','two','three','5','five'])) - assert_equal(mbase.a._mask, [0,1,0,0,1]) - assert_equal(mbase.b._mask, mbase.a._mask) - assert_equal(mbase.b._mask, mbase.c._mask) - except NotImplementedError: - # OK, not implemented yet... - pass - except AssertionError: - raise - else: - raise Exception("Flexible hard masks should be supported !") - # Not using a tuple should crash - try: - mbase[-2:] = 3 - except (NotImplementedError, TypeError): - pass - else: - raise TypeError("Should have expected a readable buffer object!") - - - def test_hardmask(self): - "Test hardmask" - base = self.base.copy() - mbase = base.view(mrecarray) - mbase.harden_mask() - self.assertTrue(mbase._hardmask) - mbase.mask = nomask - assert_equal_records(mbase._mask, base._mask) - mbase.soften_mask() - self.assertTrue(not mbase._hardmask) - mbase.mask = nomask - # So, the mask of a field is no longer set to nomask... - assert_equal_records(mbase._mask, - ma.make_mask_none(base.shape,base.dtype)) - self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask) - assert_equal(mbase['a']._mask,mbase['b']._mask) - # - def test_pickling(self): - "Test pickling" - import cPickle - base = self.base.copy() - mrec = base.view(mrecarray) - _ = cPickle.dumps(mrec) - mrec_ = cPickle.loads(_) - assert_equal(mrec_.dtype, mrec.dtype) - assert_equal_records(mrec_._data, mrec._data) - assert_equal(mrec_._mask, mrec._mask) - assert_equal_records(mrec_._mask, mrec._mask) - # - def test_filled(self): - "Test filling the array" - _a = ma.array([1,2,3],mask=[0,0,1],dtype=int) - _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float) - _c = ma.array(['one','two','three'],mask=[0,0,1],dtype='|S8') - ddtype = [('a',int),('b',float),('c','|S8')] - mrec = fromarrays([_a,_b,_c], dtype=ddtype, - fill_value=(99999,99999.,'N/A')) - mrecfilled = mrec.filled() - assert_equal(mrecfilled['a'], np.array((1,2,99999), dtype=int)) - assert_equal(mrecfilled['b'], np.array((1.1,2.2,99999.), dtype=float)) - assert_equal(mrecfilled['c'], np.array(('one','two','N/A'), dtype='|S8')) - # - def test_tolist(self): - "Test tolist." - _a = ma.array([1,2,3],mask=[0,0,1],dtype=int) - _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float) - _c = ma.array(['one','two','three'],mask=[1,0,0],dtype='|S8') - ddtype = [('a',int),('b',float),('c','|S8')] - mrec = fromarrays([_a,_b,_c], dtype=ddtype, - fill_value=(99999,99999.,'N/A')) - # - assert_equal(mrec.tolist(), - [(1,1.1,None),(2,2.2,asbytes('two')), - (None,None,asbytes('three'))]) - - - # - def test_withnames(self): - "Test the creation w/ format and names" - x = mrecarray(1, formats=float, names='base') - x[0]['base'] = 10 - assert_equal(x['base'][0], 10) - # - def test_exotic_formats(self): - "Test that 'exotic' formats are processed properly" - easy = mrecarray(1, dtype=[('i',int), ('s','|S8'), ('f',float)]) - easy[0] = masked - assert_equal(easy.filled(1).item(), (1,asbytes('1'),1.)) - # - solo = mrecarray(1, dtype=[('f0', '= 3: - from functools import reduce - -pi = numpy.pi -def eq(v, w, msg=''): - result = allclose(v, w) - if not result: - print """Not eq:%s -%s ----- -%s""" % (msg, str(v), str(w)) - return result - -class TestMa(TestCase): - def setUp (self): - x = numpy.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = numpy.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] - xm = array(x, mask=m1) - ym = array(y, mask=m2) - z = numpy.array([-.5, 0., .5, .8]) - zm = array(z, mask=[0, 1, 0, 0]) - xf = numpy.where(m1, 1e+20, x) - s = x.shape - xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) - - def test_testBasic1d(self): - "Test of basic array creation and properties in 1 dimension." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.assertFalse(isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - self.assertEqual(shape(xm), s) - self.assertEqual(xm.shape, s) - self.assertEqual(xm.dtype, x.dtype) - self.assertEqual(xm.size , reduce(lambda x, y:x * y, s)) - self.assertEqual(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) - self.assertTrue(eq(xm, xf)) - self.assertTrue(eq(filled(xm, 1.e20), xf)) - self.assertTrue(eq(x, xm)) - - def test_testBasic2d(self): - "Test of basic array creation and properties in 2 dimensions." - for s in [(4, 3), (6, 2)]: - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - - self.assertFalse(isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - self.assertEqual(shape(xm), s) - self.assertEqual(xm.shape, s) - self.assertEqual(xm.size , reduce(lambda x, y:x * y, s)) - self.assertEqual(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) - self.assertTrue(eq(xm, xf)) - self.assertTrue(eq(filled(xm, 1.e20), xf)) - self.assertTrue(eq(x, xm)) - self.setUp() - - def test_testArithmetic (self): - "Test of basic arithmetic." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - a2d = array([[1, 2], [0, 4]]) - a2dm = masked_array(a2d, [[0, 0], [1, 0]]) - self.assertTrue(eq (a2d * a2d, a2d * a2dm)) - self.assertTrue(eq (a2d + a2d, a2d + a2dm)) - self.assertTrue(eq (a2d - a2d, a2d - a2dm)) - for s in [(12,), (4, 3), (2, 6)]: - x = x.reshape(s) - y = y.reshape(s) - xm = xm.reshape(s) - ym = ym.reshape(s) - xf = xf.reshape(s) - self.assertTrue(eq(-x, -xm)) - self.assertTrue(eq(x + y, xm + ym)) - self.assertTrue(eq(x - y, xm - ym)) - self.assertTrue(eq(x * y, xm * ym)) - olderr = numpy.seterr(divide='ignore', invalid='ignore') - try: - self.assertTrue(eq(x / y, xm / ym)) - finally: - numpy.seterr(**olderr) - self.assertTrue(eq(a10 + y, a10 + ym)) - self.assertTrue(eq(a10 - y, a10 - ym)) - self.assertTrue(eq(a10 * y, a10 * ym)) - olderr = numpy.seterr(divide='ignore', invalid='ignore') - try: - self.assertTrue(eq(a10 / y, a10 / ym)) - finally: - numpy.seterr(**olderr) - self.assertTrue(eq(x + a10, xm + a10)) - self.assertTrue(eq(x - a10, xm - a10)) - self.assertTrue(eq(x * a10, xm * a10)) - self.assertTrue(eq(x / a10, xm / a10)) - self.assertTrue(eq(x ** 2, xm ** 2)) - self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5)) - self.assertTrue(eq(x ** y, xm ** ym)) - self.assertTrue(eq(numpy.add(x, y), add(xm, ym))) - self.assertTrue(eq(numpy.subtract(x, y), subtract(xm, ym))) - self.assertTrue(eq(numpy.multiply(x, y), multiply(xm, ym))) - olderr = numpy.seterr(divide='ignore', invalid='ignore') - try: - self.assertTrue(eq(numpy.divide(x, y), divide(xm, ym))) - finally: - numpy.seterr(**olderr) - - - def test_testMixedArithmetic(self): - na = numpy.array([1]) - ma = array([1]) - self.assertTrue(isinstance(na + ma, MaskedArray)) - self.assertTrue(isinstance(ma + na, MaskedArray)) - - def test_testUfuncs1 (self): - "Test various functions such as sin, cos." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.assertTrue (eq(numpy.cos(x), cos(xm))) - self.assertTrue (eq(numpy.cosh(x), cosh(xm))) - self.assertTrue (eq(numpy.sin(x), sin(xm))) - self.assertTrue (eq(numpy.sinh(x), sinh(xm))) - self.assertTrue (eq(numpy.tan(x), tan(xm))) - self.assertTrue (eq(numpy.tanh(x), tanh(xm))) - olderr = numpy.seterr(divide='ignore', invalid='ignore') - try: - self.assertTrue (eq(numpy.sqrt(abs(x)), sqrt(xm))) - self.assertTrue (eq(numpy.log(abs(x)), log(xm))) - self.assertTrue (eq(numpy.log10(abs(x)), log10(xm))) - finally: - numpy.seterr(**olderr) - self.assertTrue (eq(numpy.exp(x), exp(xm))) - self.assertTrue (eq(numpy.arcsin(z), arcsin(zm))) - self.assertTrue (eq(numpy.arccos(z), arccos(zm))) - self.assertTrue (eq(numpy.arctan(z), arctan(zm))) - self.assertTrue (eq(numpy.arctan2(x, y), arctan2(xm, ym))) - self.assertTrue (eq(numpy.absolute(x), absolute(xm))) - self.assertTrue (eq(numpy.equal(x, y), equal(xm, ym))) - self.assertTrue (eq(numpy.not_equal(x, y), not_equal(xm, ym))) - self.assertTrue (eq(numpy.less(x, y), less(xm, ym))) - self.assertTrue (eq(numpy.greater(x, y), greater(xm, ym))) - self.assertTrue (eq(numpy.less_equal(x, y), less_equal(xm, ym))) - self.assertTrue (eq(numpy.greater_equal(x, y), greater_equal(xm, ym))) - self.assertTrue (eq(numpy.conjugate(x), conjugate(xm))) - self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, ym)))) - self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((x, y)))) - self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, y)))) - self.assertTrue (eq(numpy.concatenate((x, y, x)), concatenate((x, ym, x)))) - - def test_xtestCount (self): - "Test count" - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - if sys.version_info[0] >= 3: - self.assertTrue(isinstance(count(ott), numpy.integer)) - else: - self.assertTrue(isinstance(count(ott), types.IntType)) - self.assertEqual(3, count(ott)) - self.assertEqual(1, count(1)) - self.assertTrue (eq(0, array(1, mask=[1]))) - ott = ott.reshape((2, 2)) - assert isinstance(count(ott, 0), numpy.ndarray) - if sys.version_info[0] >= 3: - assert isinstance(count(ott), numpy.integer) - else: - assert isinstance(count(ott), types.IntType) - self.assertTrue (eq(3, count(ott))) - assert getmask(count(ott, 0)) is nomask - self.assertTrue (eq([1, 2], count(ott, 0))) - - def test_testMinMax (self): - "Test minimum and maximum." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - xr = numpy.ravel(x) #max doesn't work if shaped - xmr = ravel(xm) - - #true because of careful selection of data - self.assertTrue(eq(max(xr), maximum(xmr))) - - #true because of careful selection of data - self.assertTrue(eq(min(xr), minimum(xmr))) - - def test_testAddSumProd (self): - "Test add, sum, product." - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.assertTrue (eq(numpy.add.reduce(x), add.reduce(x))) - self.assertTrue (eq(numpy.add.accumulate(x), add.accumulate(x))) - self.assertTrue (eq(4, sum(array(4), axis=0))) - self.assertTrue (eq(4, sum(array(4), axis=0))) - self.assertTrue (eq(numpy.sum(x, axis=0), sum(x, axis=0))) - self.assertTrue (eq(numpy.sum(filled(xm, 0), axis=0), sum(xm, axis=0))) - self.assertTrue (eq(numpy.sum(x, 0), sum(x, 0))) - self.assertTrue (eq(numpy.product(x, axis=0), product(x, axis=0))) - self.assertTrue (eq(numpy.product(x, 0), product(x, 0))) - self.assertTrue (eq(numpy.product(filled(xm, 1), axis=0), - product(xm, axis=0))) - if len(s) > 1: - self.assertTrue (eq(numpy.concatenate((x, y), 1), - concatenate((xm, ym), 1))) - self.assertTrue (eq(numpy.add.reduce(x, 1), add.reduce(x, 1))) - self.assertTrue (eq(numpy.sum(x, 1), sum(x, 1))) - self.assertTrue (eq(numpy.product(x, 1), product(x, 1))) - - - def test_testCI(self): - "Test of conversions and indexing" - x1 = numpy.array([1, 2, 4, 3]) - x2 = array(x1, mask=[1, 0, 0, 0]) - x3 = array(x1, mask=[0, 1, 0, 1]) - x4 = array(x1) - # test conversion to strings - junk, garbage = str(x2), repr(x2) - assert eq(numpy.sort(x1), sort(x2, fill_value=0)) - # tests of indexing - assert type(x2[1]) is type(x1[1]) - assert x1[1] == x2[1] - assert x2[0] is masked - assert eq(x1[2], x2[2]) - assert eq(x1[2:5], x2[2:5]) - assert eq(x1[:], x2[:]) - assert eq(x1[1:], x3[1:]) - x1[2] = 9 - x2[2] = 9 - assert eq(x1, x2) - x1[1:3] = 99 - x2[1:3] = 99 - assert eq(x1, x2) - x2[1] = masked - assert eq(x1, x2) - x2[1:3] = masked - assert eq(x1, x2) - x2[:] = x1 - x2[1] = masked - assert allequal(getmask(x2), array([0, 1, 0, 0])) - x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert allequal(getmask(x3), array([0, 1, 1, 0])) - x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert allequal(getmask(x4), array([0, 1, 1, 0])) - assert allequal(x4, array([1, 2, 3, 4])) - x1 = numpy.arange(5) * 1.0 - x2 = masked_values(x1, 3.0) - assert eq(x1, x2) - assert allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask) - assert eq(3.0, x2.fill_value) - x1 = array([1, 'hello', 2, 3], object) - x2 = numpy.array([1, 'hello', 2, 3], object) - s1 = x1[1] - s2 = x2[1] - self.assertEqual(type(s2), str) - self.assertEqual(type(s1), str) - self.assertEqual(s1, s2) - assert x1[1:1].shape == (0,) - - def test_testCopySize(self): - "Tests of some subtle points of copying and sizing." - n = [0, 0, 1, 0, 0] - m = make_mask(n) - m2 = make_mask(m) - self.assertTrue(m is m2) - m3 = make_mask(m, copy=1) - self.assertTrue(m is not m3) - - x1 = numpy.arange(5) - y1 = array(x1, mask=m) - self.assertTrue(y1._data is not x1) - self.assertTrue(allequal(x1, y1._data)) - self.assertTrue(y1.mask is m) - - y1a = array(y1, copy=0) - self.assertTrue(y1a.mask is y1.mask) - - y2 = array(x1, mask=m, copy=0) - self.assertTrue(y2.mask is m) - self.assertTrue(y2[2] is masked) - y2[2] = 9 - self.assertTrue(y2[2] is not masked) - self.assertTrue(y2.mask is not m) - self.assertTrue(allequal(y2.mask, 0)) - - y3 = array(x1 * 1.0, mask=m) - self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) - - x4 = arange(4) - x4[2] = masked - y4 = resize(x4, (8,)) - self.assertTrue(eq(concatenate([x4, x4]), y4)) - self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) - y5 = repeat(x4, (2, 2, 2, 2), axis=0) - self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) - y6 = repeat(x4, 2, axis=0) - self.assertTrue(eq(y5, y6)) - - def test_testPut(self): - "Test of put" - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - x = array(d, mask=m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) - x[[1, 4]] = [10, 40] - self.assertTrue(x.mask is not m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is not masked) - self.assertTrue(eq(x, [0, 10, 2, -1, 40])) - - x = array(d, mask=m) - x.put([0, 1, 2], [-1, 100, 200]) - self.assertTrue(eq(x, [-1, 100, 200, 0, 0])) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) - - def test_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] - i = numpy.nonzero(m)[0] - put(ym, i, zm) - assert all(take(ym, i, axis=0) == zm) - - def test_testOddFeatures(self): - "Test of other odd features" - x = arange(20); x = x.reshape(4, 5) - x.flat[5] = 12 - assert x[1, 0] == 12 - z = x + 10j * x - assert eq(z.real, x) - assert eq(z.imag, 10 * x) - assert eq((z * conjugate(z)).real, 101 * x * x) - z.imag[...] = 0.0 - - x = arange(10) - x[3] = masked - assert str(x[3]) == str(masked) - c = x >= 8 - assert count(where(c, masked, masked)) == 0 - assert shape(where(c, masked, masked)) == c.shape - z = where(c , x, masked) - assert z.dtype is x.dtype - assert z[3] is masked - assert z[4] is masked - assert z[7] is masked - assert z[8] is not masked - assert z[9] is not masked - assert eq(x, z) - z = where(c , masked, x) - assert z.dtype is x.dtype - assert z[3] is masked - assert z[4] is not masked - assert z[7] is not masked - assert z[8] is masked - assert z[9] is masked - z = masked_where(c, x) - assert z.dtype is x.dtype - assert z[3] is masked - assert z[4] is not masked - assert z[7] is not masked - assert z[8] is masked - assert z[9] is masked - assert eq(x, z) - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert eq(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert eq(z, [1., 2., 0., -4., -5]) - assert z[0] is masked - assert z[1] is not masked - assert z[2] is masked - assert eq(masked_where(greater(x, 2), x), masked_greater(x, 2)) - assert eq(masked_where(greater_equal(x, 2), x), - masked_greater_equal(x, 2)) - assert eq(masked_where(less(x, 2), x), masked_less(x, 2)) - assert eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)) - assert eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) - assert eq(masked_where(equal(x, 2), x), masked_equal(x, 2)) - assert eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) - assert eq(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4]) - assert eq(masked_outside(range(5), 1, 3), [199, 1, 2, 3, 199]) - assert eq(masked_inside(array(range(5), mask=[1, 0, 0, 0, 0]), 1, 3).mask, - [1, 1, 1, 1, 0]) - assert eq(masked_outside(array(range(5), mask=[0, 1, 0, 0, 0]), 1, 3).mask, - [1, 1, 0, 0, 1]) - assert eq(masked_equal(array(range(5), mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 0]) - assert eq(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 1]) - assert eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5]) - atest = ones((10, 10, 10), dtype=float32) - btest = zeros(atest.shape, MaskType) - ctest = masked_where(btest, atest) - assert eq(atest, ctest) - z = choose(c, (-x, x)) - assert eq(z, [1., 2., 0., -4., -5]) - assert z[0] is masked - assert z[1] is not masked - assert z[2] is masked - x = arange(6) - x[5] = masked - y = arange(6) * 10 - y[2] = masked - c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) - cm = c.filled(1) - z = where(c, x, y) - zm = where(cm, x, y) - assert eq(z, zm) - assert getmask(zm) is nomask - assert eq(zm, [0, 1, 2, 30, 40, 50]) - z = where(c, masked, 1) - assert eq(z, [99, 99, 99, 1, 1, 1]) - z = where(c, 1, masked) - assert eq(z, [99, 1, 1, 99, 99, 99]) - - def test_testMinMax(self): - "Test of minumum, maximum." - assert eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]) - assert eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]) - x = arange(5) - y = arange(5) - 2 - x[3] = masked - y[0] = masked - assert eq(minimum(x, y), where(less(x, y), x, y)) - assert eq(maximum(x, y), where(greater(x, y), x, y)) - assert minimum(x) == 0 - assert maximum(x) == 4 - - def test_testTakeTransposeInnerOuter(self): - "Test of take, transpose, inner, outer products" - x = arange(24) - y = numpy.arange(24) - x[5:6] = masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert eq(numpy.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) - assert eq(numpy.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) - assert eq(numpy.inner(filled(x, 0), filled(y, 0)), - inner(x, y)) - assert eq(numpy.outer(filled(x, 0), filled(y, 0)), - outer(x, y)) - y = array(['abc', 1, 'def', 2, 3], object) - y[2] = masked - t = take(y, [0, 3, 4]) - assert t[0] == 'abc' - assert t[1] == 2 - assert t[2] == 3 - - def test_testInplace(self): - """Test of inplace operations and rich comparisons""" - y = arange(10) - - x = arange(10) - xm = arange(10) - xm[2] = masked - x += 1 - assert eq(x, y + 1) - xm += 1 - assert eq(x, y + 1) - - x = arange(10) - xm = arange(10) - xm[2] = masked - x -= 1 - assert eq(x, y - 1) - xm -= 1 - assert eq(xm, y - 1) - - x = arange(10) * 1.0 - xm = arange(10) * 1.0 - xm[2] = masked - x *= 2.0 - assert eq(x, y * 2) - xm *= 2.0 - assert eq(xm, y * 2) - - x = arange(10) * 2 - xm = arange(10) - xm[2] = masked - x /= 2 - assert eq(x, y) - xm /= 2 - assert eq(x, y) - - x = arange(10) * 1.0 - xm = arange(10) * 1.0 - xm[2] = masked - x /= 2.0 - assert eq(x, y / 2.0) - xm /= arange(10) - assert eq(xm, ones((10,))) - - x = arange(10).astype(float32) - xm = arange(10) - xm[2] = masked - x += 1. - assert eq(x, y + 1.) - - def test_testPickle(self): - "Test of pickling" - import pickle - x = arange(12) - x[4:10:2] = masked - x = x.reshape(4, 3) - s = pickle.dumps(x) - y = pickle.loads(s) - assert eq(x, y) - - def test_testMasked(self): - "Test of masked element" - xx = arange(6) - xx[1] = masked - self.assertTrue(str(masked) == '--') - self.assertTrue(xx[1] is masked) - self.assertEqual(filled(xx[1], 0), 0) - # don't know why these should raise an exception... - #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) - #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) - #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) - #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) - - def test_testAverage1(self): - "Test of average." - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - self.assertTrue(eq(2.0, average(ott, axis=0))) - self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) - result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) - self.assertTrue(eq(2.0, result)) - self.assertTrue(wts == 4.0) - ott[:] = masked - self.assertTrue(average(ott, axis=0) is masked) - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - ott = ott.reshape(2, 2) - ott[:, 1] = masked - self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0])) - self.assertTrue(average(ott, axis=1)[0] is masked) - self.assertTrue(eq([2., 0.], average(ott, axis=0))) - result, wts = average(ott, axis=0, returned=1) - self.assertTrue(eq(wts, [1., 0.])) - - def test_testAverage2(self): - "More tests of average." - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = arange(6) - self.assertTrue(allclose(average(x, axis=0), 2.5)) - self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5)) - y = array([arange(6), 2.0 * arange(6)]) - self.assertTrue(allclose(average(y, None), - numpy.add.reduce(numpy.arange(6)) * 3. / 12.)) - self.assertTrue(allclose(average(y, axis=0), numpy.arange(6) * 3. / 2.)) - self.assertTrue(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0) * 2.0])) - self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.)) - self.assertTrue(allclose(average(y, axis=0, weights=w2), - [0., 1., 2., 3., 4., 10.])) - self.assertTrue(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0) * 2.0])) - m1 = zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = ones(6) - m5 = [0, 1, 1, 1, 1, 1] - self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5)) - self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5)) - self.assertTrue(average(masked_array(x, m4), axis=0) is masked) - self.assertEqual(average(masked_array(x, m5), axis=0), 0.0) - self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0) - z = masked_array(y, m3) - self.assertTrue(allclose(average(z, None), 20. / 6.)) - self.assertTrue(allclose(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])) - self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0])) - self.assertTrue(allclose(average(z, axis=0, weights=w2), - [0., 1., 99., 99., 4.0, 10.0])) - - a = arange(6) - b = arange(6) * 3 - r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) - self.assertEqual(shape(r1) , shape(w1)) - self.assertEqual(r1.shape , w1.shape) - r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) - self.assertEqual(shape(w2) , shape(r2)) - r2, w2 = average(ones((2, 2, 3)), returned=1) - self.assertEqual(shape(w2) , shape(r2)) - r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) - self.assertTrue(shape(w2) == shape(r2)) - a2d = array([[1, 2], [0, 4]], float) - a2dm = masked_array(a2d, [[0, 0], [1, 0]]) - a2da = average(a2d, axis=0) - self.assertTrue(eq (a2da, [0.5, 3.0])) - a2dma = average(a2dm, axis=0) - self.assertTrue(eq(a2dma, [1.0, 3.0])) - a2dma = average(a2dm, axis=None) - self.assertTrue(eq(a2dma, 7. / 3.)) - a2dma = average(a2dm, axis=1) - self.assertTrue(eq(a2dma, [1.5, 4.0])) - - def test_testToPython(self): - self.assertEqual(1, int(array(1))) - self.assertEqual(1.0, float(array(1))) - self.assertEqual(1, int(array([[[1]]]))) - self.assertEqual(1.0, float(array([[1]]))) - self.assertRaises(TypeError, float, array([1, 1])) - self.assertRaises(ValueError, bool, array([0, 1])) - self.assertRaises(ValueError, bool, array([0, 0], mask=[0, 1])) - - def test_testScalarArithmetic(self): - xm = array(0, mask=1) - #TODO FIXME: Find out what the following raises a warning in r8247 - err_status = numpy.geterr() - try: - numpy.seterr(divide='ignore') - self.assertTrue((1 / array(0)).mask) - finally: - numpy.seterr(**err_status) - self.assertTrue((1 + xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue(maximum(xm, xm).mask) - self.assertTrue(minimum(xm, xm).mask) - self.assertTrue(xm.filled().dtype is xm._data.dtype) - x = array(0, mask=0) - self.assertTrue(x.filled() == x._data) - self.assertEqual(str(xm), str(masked_print_option)) - - def test_testArrayMethods(self): - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - self.assertTrue(eq(a.any(), a._data.any())) - self.assertTrue(eq(a.all(), a._data.all())) - self.assertTrue(eq(a.argmax(), a._data.argmax())) - self.assertTrue(eq(a.argmin(), a._data.argmin())) - self.assertTrue(eq(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))) - self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) - self.assertTrue(eq(a.conj(), a._data.conj())) - self.assertTrue(eq(a.conjugate(), a._data.conjugate())) - m = array([[1, 2], [3, 4]]) - self.assertTrue(eq(m.diagonal(), m._data.diagonal())) - self.assertTrue(eq(a.sum(), a._data.sum())) - self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2]))) - self.assertTrue(eq(m.transpose(), m._data.transpose())) - - def test_testArrayAttributes(self): - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - self.assertEqual(a.ndim, 1) - - def test_testAPI(self): - self.assertFalse([m for m in dir(numpy.ndarray) - if m not in dir(MaskedArray) and not m.startswith('_')]) - - def test_testSingleElementSubscript(self): - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - self.assertEqual(a[0].shape, ()) - self.assertEqual(b[0].shape, ()) - self.assertEqual(b[1].shape, ()) - -class TestUfuncs(TestCase): - def setUp(self): - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), - array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) - - - def test_testUfuncRegression(self): - f_invalid_ignore = ['sqrt', 'arctanh', 'arcsin', 'arccos', - 'arccosh', 'arctanh', 'log', 'log10', 'divide', - 'true_divide', 'floor_divide', 'remainder', 'fmod'] - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', - 'sin', 'cos', 'tan', - 'arcsin', 'arccos', 'arctan', - 'sinh', 'cosh', 'tanh', - 'arcsinh', - 'arccosh', - 'arctanh', - 'absolute', 'fabs', 'negative', - # 'nonzero', 'around', - 'floor', 'ceil', - # 'sometrue', 'alltrue', - 'logical_not', - 'add', 'subtract', 'multiply', - 'divide', 'true_divide', 'floor_divide', - 'remainder', 'fmod', 'hypot', 'arctan2', - 'equal', 'not_equal', 'less_equal', 'greater_equal', - 'less', 'greater', - 'logical_and', 'logical_or', 'logical_xor', - ]: - try: - uf = getattr(umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(numpy.ma, f) - args = self.d[:uf.nin] - olderr = numpy.geterr() - try: - if f in f_invalid_ignore: - numpy.seterr(invalid='ignore') - if f in ['arctanh', 'log', 'log10']: - numpy.seterr(divide='ignore') - ur = uf(*args) - mr = mf(*args) - finally: - numpy.seterr(**olderr) - self.assertTrue(eq(ur.filled(0), mr.filled(0), f)) - self.assertTrue(eqmask(ur.mask, mr.mask)) - - def test_reduce(self): - a = self.d[0] - self.assertFalse(alltrue(a, axis=0)) - self.assertTrue(sometrue(a, axis=0)) - self.assertEqual(sum(a[:3], axis=0), 0) - self.assertEqual(product(a, axis=0), 0) - - def test_minmax(self): - a = arange(1, 13).reshape(3, 4) - amask = masked_where(a < 5, a) - self.assertEqual(amask.max(), a.max()) - self.assertEqual(amask.min(), 5) - self.assertTrue((amask.max(0) == a.max(0)).all()) - self.assertTrue((amask.min(0) == [5, 6, 7, 8]).all()) - self.assertTrue(amask.max(1)[0].mask) - self.assertTrue(amask.min(1)[0].mask) - - def test_nonzero(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) - self.assertTrue(eq(nonzero(x), [0])) - - -class TestArrayMethods(TestCase): - - def setUp(self): - x = numpy.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = numpy.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = numpy.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX) - - #------------------------------------------------------ - def test_trace(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXdiag = mX.diagonal() - self.assertEqual(mX.trace(), mX.diagonal().compressed().sum()) - self.assertTrue(eq(mX.trace(), - X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0))) - - def test_clip(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - clipped = mx.clip(2, 8) - self.assertTrue(eq(clipped.mask, mx.mask)) - self.assertTrue(eq(clipped._data, x.clip(2, 8))) - self.assertTrue(eq(clipped._data, mx._data.clip(2, 8))) - - def test_ptp(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - (n, m) = X.shape - self.assertEqual(mx.ptp(), mx.compressed().ptp()) - rows = numpy.zeros(n, numpy.float_) - cols = numpy.zeros(m, numpy.float_) - for k in range(m): - cols[k] = mX[:, k].compressed().ptp() - for k in range(n): - rows[k] = mX[k].compressed().ptp() - self.assertTrue(eq(mX.ptp(0), cols)) - self.assertTrue(eq(mX.ptp(1), rows)) - - def test_swapaxes(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXswapped = mX.swapaxes(0, 1) - self.assertTrue(eq(mXswapped[-1], mX[:, -1])) - mXXswapped = mXX.swapaxes(0, 2) - self.assertEqual(mXXswapped.shape, (2, 2, 3, 3)) - - - def test_cumprod(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXcp = mX.cumprod(0) - self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(0))) - mXcp = mX.cumprod(1) - self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(1))) - - def test_cumsum(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXcp = mX.cumsum(0) - self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(0))) - mXcp = mX.cumsum(1) - self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(1))) - - def test_varstd(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - self.assertTrue(eq(mX.var(axis=None), mX.compressed().var())) - self.assertTrue(eq(mX.std(axis=None), mX.compressed().std())) - self.assertTrue(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) - self.assertTrue(eq(mX.var().shape, X.var().shape)) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - for k in range(6): - self.assertTrue(eq(mXvar1[k], mX[k].compressed().var())) - self.assertTrue(eq(mXvar0[k], mX[:, k].compressed().var())) - self.assertTrue(eq(numpy.sqrt(mXvar0[k]), - mX[:, k].compressed().std())) - - -def eqmask(m1, m2): - if m1 is nomask: - return m2 is nomask - if m2 is nomask: - return m1 is nomask - return (m1 == m2).all() - -#def timingTest(): -# for f in [testf, testinplace]: -# for n in [1000,10000,50000]: -# t = testta(n, f) -# t1 = testtb(n, f) -# t2 = testtc(n, f) -# print f.test_name -# print """\ -#n = %7d -#numpy time (ms) %6.1f -#MA maskless ratio %6.1f -#MA masked ratio %6.1f -#""" % (n, t*1000.0, t1/t, t2/t) - -#def testta(n, f): -# x=numpy.arange(n) + 1.0 -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testtb(n, f): -# x=arange(n) + 1.0 -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testtc(n, f): -# x=arange(n) + 1.0 -# x[0] = masked -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testf(x): -# for i in range(25): -# y = x **2 + 2.0 * x - 1.0 -# w = x **2 + 1.0 -# z = (y / w) ** 2 -# return z -#testf.test_name = 'Simple arithmetic' - -#def testinplace(x): -# for i in range(25): -# y = x**2 -# y += 2.0*x -# y -= 1.0 -# y /= x -# return y -#testinplace.test_name = 'Inplace operations' - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/ma/tests/test_regression.py b/numpy-1.6.2/numpy/ma/tests/test_regression.py deleted file mode 100644 index 62e4ee0ae5..0000000000 --- a/numpy-1.6.2/numpy/ma/tests/test_regression.py +++ /dev/null @@ -1,49 +0,0 @@ -from numpy.testing import * -import numpy as np - -rlevel = 1 - -class TestRegression(TestCase): - def test_masked_array_create(self,level=rlevel): - """Ticket #17""" - x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0]) - assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]]) - - def test_masked_array(self,level=rlevel): - """Ticket #61""" - x = np.ma.array(1,mask=[1]) - - def test_mem_masked_where(self,level=rlevel): - """Ticket #62""" - from numpy.ma import masked_where, MaskType - a = np.zeros((1,1)) - b = np.zeros(a.shape, MaskType) - c = masked_where(b,a) - a-c - - def test_masked_array_multiply(self,level=rlevel): - """Ticket #254""" - a = np.ma.zeros((4,1)) - a[2,0] = np.ma.masked - b = np.zeros((4,2)) - a*b - b*a - - def test_masked_array_repeat(self, level=rlevel): - """Ticket #271""" - np.ma.array([1],mask=False).repeat(10) - - def test_masked_array_repr_unicode(self): - """Ticket #1256""" - repr(np.ma.array(u"Unicode")) - - def test_atleast_2d(self): - """Ticket #1559""" - a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) - b = np.atleast_2d(a) - assert_(a.mask.ndim == 1) - assert_(b.mask.ndim == 2) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/ma/tests/test_subclassing.py b/numpy-1.6.2/numpy/ma/tests/test_subclassing.py deleted file mode 100644 index fb72ca773f..0000000000 --- a/numpy-1.6.2/numpy/ma/tests/test_subclassing.py +++ /dev/null @@ -1,181 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for MaskedArray & subclassing. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ -""" -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import numpy as np -from numpy.testing import * -from numpy.ma.testutils import * -from numpy.ma.core import * - -class SubArray(np.ndarray): - """Defines a generic np.ndarray subclass, that stores some metadata - in the dictionary `info`.""" - def __new__(cls,arr,info={}): - x = np.asanyarray(arr).view(cls) - x.info = info - return x - def __array_finalize__(self, obj): - self.info = getattr(obj,'info',{}) - return - def __add__(self, other): - result = np.ndarray.__add__(self, other) - result.info.update({'added':result.info.pop('added',0)+1}) - return result - -subarray = SubArray - -class MSubArray(SubArray,MaskedArray): - def __new__(cls, data, info={}, mask=nomask): - subarr = SubArray(data, info) - _data = MaskedArray.__new__(cls, data=subarr, mask=mask) - _data.info = subarr.info - return _data - def __array_finalize__(self,obj): - MaskedArray.__array_finalize__(self,obj) - SubArray.__array_finalize__(self, obj) - return - def _get_series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - _series = property(fget=_get_series) - -msubarray = MSubArray - -class MMatrix(MaskedArray, np.matrix,): - def __new__(cls, data, mask=nomask): - mat = np.matrix(data) - _data = MaskedArray.__new__(cls, data=mat, mask=mask) - return _data - def __array_finalize__(self,obj): - np.matrix.__array_finalize__(self, obj) - MaskedArray.__array_finalize__(self,obj) - return - def _get_series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - _series = property(fget=_get_series) - -mmatrix = MMatrix - -class TestSubclassing(TestCase): - """Test suite for masked subclasses of ndarray.""" - - def setUp(self): - x = np.arange(5) - mx = mmatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) - - def test_data_subclassing(self): - "Tests whether the subclass is kept." - x = np.arange(5) - m = [0,0,1,0,0] - xsub = SubArray(x) - xmsub = masked_array(xsub, mask=m) - self.assertTrue(isinstance(xmsub, MaskedArray)) - assert_equal(xmsub._data, xsub) - self.assertTrue(isinstance(xmsub._data, SubArray)) - - def test_maskedarray_subclassing(self): - "Tests subclassing MaskedArray" - (x, mx) = self.data - self.assertTrue(isinstance(mx._data, np.matrix)) - - def test_masked_unary_operations(self): - "Tests masked_unary_operation" - (x, mx) = self.data - olderr = np.seterr(divide='ignore') - try: - self.assertTrue(isinstance(log(mx), mmatrix)) - assert_equal(log(x), np.log(x)) - finally: - np.seterr(**olderr) - - def test_masked_binary_operations(self): - "Tests masked_binary_operation" - (x, mx) = self.data - # Result should be a mmatrix - self.assertTrue(isinstance(add(mx,mx), mmatrix)) - self.assertTrue(isinstance(add(mx,x), mmatrix)) - # Result should work - assert_equal(add(mx,x), mx+x) - self.assertTrue(isinstance(add(mx,mx)._data, np.matrix)) - self.assertTrue(isinstance(add.outer(mx,mx), mmatrix)) - self.assertTrue(isinstance(hypot(mx,mx), mmatrix)) - self.assertTrue(isinstance(hypot(mx,x), mmatrix)) - - def test_masked_binary_operations(self): - "Tests domained_masked_binary_operation" - (x, mx) = self.data - xmx = masked_array(mx.data.__array__(), mask=mx.mask) - self.assertTrue(isinstance(divide(mx,mx), mmatrix)) - self.assertTrue(isinstance(divide(mx,x), mmatrix)) - assert_equal(divide(mx, mx), divide(xmx, xmx)) - - def test_attributepropagation(self): - x = array(arange(5), mask=[0]+[1]*4) - my = masked_array(subarray(x)) - ym = msubarray(x) - # - z = (my+1) - self.assertTrue(isinstance(z,MaskedArray)) - self.assertTrue(not isinstance(z, MSubArray)) - self.assertTrue(isinstance(z._data, SubArray)) - assert_equal(z._data.info, {}) - # - z = (ym+1) - self.assertTrue(isinstance(z, MaskedArray)) - self.assertTrue(isinstance(z, MSubArray)) - self.assertTrue(isinstance(z._data, SubArray)) - self.assertTrue(z._data.info['added'] > 0) - # - ym._set_mask([1,0,0,0,1]) - assert_equal(ym._mask, [1,0,0,0,1]) - ym._series._set_mask([0,0,0,0,1]) - assert_equal(ym._mask, [0,0,0,0,1]) - # - xsub = subarray(x, info={'name':'x'}) - mxsub = masked_array(xsub) - self.assertTrue(hasattr(mxsub, 'info')) - assert_equal(mxsub.info, xsub.info) - - def test_subclasspreservation(self): - "Checks that masked_array(...,subok=True) preserves the class." - x = np.arange(5) - m = [0,0,1,0,0] - xinfo = [(i,j) for (i,j) in zip(x,m)] - xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) - # - mxsub = masked_array(xsub, subok=False) - self.assertTrue(not isinstance(mxsub, MSubArray)) - self.assertTrue(isinstance(mxsub, MaskedArray)) - assert_equal(mxsub._mask, m) - # - mxsub = asarray(xsub) - self.assertTrue(not isinstance(mxsub, MSubArray)) - self.assertTrue(isinstance(mxsub, MaskedArray)) - assert_equal(mxsub._mask, m) - # - mxsub = masked_array(xsub, subok=True) - self.assertTrue(isinstance(mxsub, MSubArray)) - assert_equal(mxsub.info, xsub.info) - assert_equal(mxsub._mask, xsub._mask) - # - mxsub = asanyarray(xsub) - self.assertTrue(isinstance(mxsub, MSubArray)) - assert_equal(mxsub.info, xsub.info) - assert_equal(mxsub._mask, m) - - -################################################################################ -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.6.2/numpy/ma/testutils.py b/numpy-1.6.2/numpy/ma/testutils.py deleted file mode 100644 index 5cfc9f2ab2..0000000000 --- a/numpy-1.6.2/numpy/ma/testutils.py +++ /dev/null @@ -1,236 +0,0 @@ -"""Miscellaneous functions for testing masked arrays and subclasses - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ -""" -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = "1.0" -__revision__ = "$Revision: 3529 $" -__date__ = "$Date: 2007-11-13 10:01:14 +0200 (Tue, 13 Nov 2007) $" - - -import operator - -import numpy as np -from numpy import ndarray, float_ -import numpy.core.umath as umath -from numpy.testing import * -import numpy.testing.utils as utils - -from core import mask_or, getmask, masked_array, nomask, masked, filled, \ - equal, less - -#------------------------------------------------------------------------------ -def approx (a, b, fill_value=True, rtol=1e-5, atol=1e-8): - """Returns true if all components of a and b are equal subject to given tolerances. - -If fill_value is True, masked values considered equal. Otherwise, masked values -are considered unequal. -The relative error rtol should be positive and << 1.0 -The absolute error atol comes into play for those elements of b that are very -small or zero; it says how small a must be also. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - if d1.dtype.char == "O" or d2.dtype.char == "O": - return np.equal(d1, d2).ravel() - x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) - y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) - d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) - return d.ravel() - - -def almost(a, b, decimal=6, fill_value=True): - """Returns True if a and b are equal up to decimal places. -If fill_value is True, masked values considered equal. Otherwise, masked values -are considered unequal. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - if d1.dtype.char == "O" or d2.dtype.char == "O": - return np.equal(d1, d2).ravel() - x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) - y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) - d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) - return d.ravel() - - -#................................................ -def _assert_equal_on_sequences(actual, desired, err_msg=''): - "Asserts the equality of two non-array sequences." - assert_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) - return - -def assert_equal_records(a, b): - """Asserts that two records are equal. Pretty crude for now.""" - assert_equal(a.dtype, b.dtype) - for f in a.dtype.names: - (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) - if not (af is masked) and not (bf is masked): - assert_equal(operator.getitem(a, f), operator.getitem(b, f)) - return - - -def assert_equal(actual, desired, err_msg=''): - "Asserts that two items are equal." - # Case #1: dictionary ..... - if isinstance(desired, dict): - if not isinstance(actual, dict): - raise AssertionError(repr(type(actual))) - assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): - if not k in actual: - raise AssertionError("%s not in %s" % (k, actual)) - assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) - return - # Case #2: lists ..... - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - return _assert_equal_on_sequences(actual, desired, err_msg='') - if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): - msg = build_err_msg([actual, desired], err_msg,) - if not desired == actual: - raise AssertionError(msg) - return - # Case #4. arrays or equivalent - if ((actual is masked) and not (desired is masked)) or \ - ((desired is masked) and not (actual is masked)): - msg = build_err_msg([actual, desired], - err_msg, header='', names=('x', 'y')) - raise ValueError(msg) - actual = np.array(actual, copy=False, subok=True) - desired = np.array(desired, copy=False, subok=True) - (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) - if actual_dtype.char == "S" and desired_dtype.char == "S": - return _assert_equal_on_sequences(actual.tolist(), - desired.tolist(), - err_msg='') -# elif actual_dtype.char in "OV" and desired_dtype.char in "OV": -# if (actual_dtype != desired_dtype) and actual_dtype: -# msg = build_err_msg([actual_dtype, desired_dtype], -# err_msg, header='', names=('actual', 'desired')) -# raise ValueError(msg) -# return _assert_equal_on_sequences(actual.tolist(), -# desired.tolist(), -# err_msg='') - return assert_array_equal(actual, desired, err_msg) - - -def fail_if_equal(actual, desired, err_msg='',): - """Raises an assertion error if two items are equal. - """ - if isinstance(desired, dict): - if not isinstance(actual, dict): - raise AssertionError(repr(type(actual))) - fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): - if not k in actual: - raise AssertionError(repr(k)) - fail_if_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) - return - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - fail_if_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - fail_if_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) - return - if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): - return fail_if_array_equal(actual, desired, err_msg) - msg = build_err_msg([actual, desired], err_msg) - if not desired != actual: - raise AssertionError(msg) -assert_not_equal = fail_if_equal - - -def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): - """Asserts that two items are almost equal. - The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal) - """ - if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): - return assert_array_almost_equal(actual, desired, decimal=decimal, - err_msg=err_msg, verbose=verbose) - msg = build_err_msg([actual, desired], - err_msg=err_msg, verbose=verbose) - if not round(abs(desired - actual), decimal) == 0: - raise AssertionError(msg) - - -assert_close = assert_almost_equal - - -def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', - fill_value=True): - """Asserts that a comparison relation between two masked arrays is satisfied - elementwise.""" - # Fill the data first -# xf = filled(x) -# yf = filled(y) - # Allocate a common mask and refill - m = mask_or(getmask(x), getmask(y)) - x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) - y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) - if ((x is masked) and not (y is masked)) or \ - ((y is masked) and not (x is masked)): - msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, - header=header, names=('x', 'y')) - raise ValueError(msg) - # OK, now run the basic tests on filled versions - return utils.assert_array_compare(comparison, - x.filled(fill_value), - y.filled(fill_value), - err_msg=err_msg, - verbose=verbose, header=header) - - -def assert_array_equal(x, y, err_msg='', verbose=True): - """Checks the elementwise equality of two masked arrays.""" - assert_array_compare(operator.__eq__, x, y, - err_msg=err_msg, verbose=verbose, - header='Arrays are not equal') - - -def fail_if_array_equal(x, y, err_msg='', verbose=True): - "Raises an assertion error if two masked arrays are not equal (elementwise)." - def compare(x, y): - return (not np.alltrue(approx(x, y))) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not equal') - - -def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): - """Checks the elementwise equality of two masked arrays, up to a given - number of decimals.""" - def compare(x, y): - "Returns the result of the loose comparison between x and y)." - return approx(x, y, rtol=10. ** -decimal) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not almost equal') - - -def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): - """Checks the elementwise equality of two masked arrays, up to a given - number of decimals.""" - def compare(x, y): - "Returns the result of the loose comparison between x and y)." - return almost(x, y, decimal) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not almost equal') - - -def assert_array_less(x, y, err_msg='', verbose=True): - "Checks that x is smaller than y elementwise." - assert_array_compare(operator.__lt__, x, y, - err_msg=err_msg, verbose=verbose, - header='Arrays are not less-ordered') - - -def assert_mask_equal(m1, m2, err_msg=''): - """Asserts the equality of two masks.""" - if m1 is nomask: - assert(m2 is nomask) - if m2 is nomask: - assert(m1 is nomask) - assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/numpy-1.6.2/numpy/ma/timer_comparison.py b/numpy-1.6.2/numpy/ma/timer_comparison.py deleted file mode 100644 index 57cccf0b40..0000000000 --- a/numpy-1.6.2/numpy/ma/timer_comparison.py +++ /dev/null @@ -1,458 +0,0 @@ -import timeit - -import sys -import numpy as np -from numpy import float_ -import np.core.fromnumeric as fromnumeric - -from np.testing.utils import build_err_msg - -np.seterr(all='ignore') - -pi = np.pi - -if sys.version_info[0] >= 3: - from functools import reduce - -class moduletester: - def __init__(self, module): - self.module = module - self.allequal = module.allequal - self.arange = module.arange - self.array = module.array -# self.average = module.average - self.concatenate = module.concatenate - self.count = module.count - self.equal = module.equal - self.filled = module.filled - self.getmask = module.getmask - self.getmaskarray = module.getmaskarray - self.id = id - self.inner = module.inner - self.make_mask = module.make_mask - self.masked = module.masked - self.masked_array = module.masked_array - self.masked_values = module.masked_values - self.mask_or = module.mask_or - self.nomask = module.nomask - self.ones = module.ones - self.outer = module.outer - self.repeat = module.repeat - self.resize = module.resize - self.sort = module.sort - self.take = module.take - self.transpose = module.transpose - self.zeros = module.zeros - self.MaskType = module.MaskType - try: - self.umath = module.umath - except AttributeError: - self.umath = module.core.umath - self.testnames = [] - - def assert_array_compare(self, comparison, x, y, err_msg='', header='', - fill_value=True): - """Asserts that a comparison relation between two masked arrays is satisfied - elementwise.""" - xf = self.filled(x) - yf = self.filled(y) - m = self.mask_or(self.getmask(x), self.getmask(y)) - - x = self.filled(self.masked_array(xf, mask=m), fill_value) - y = self.filled(self.masked_array(yf, mask=m), fill_value) - if (x.dtype.char != "O"): - x = x.astype(float_) - if isinstance(x, np.ndarray) and x.size > 1: - x[np.isnan(x)] = 0 - elif np.isnan(x): - x = 0 - if (y.dtype.char != "O"): - y = y.astype(float_) - if isinstance(y, np.ndarray) and y.size > 1: - y[np.isnan(y)] = 0 - elif np.isnan(y): - y = 0 - try: - cond = (x.shape==() or y.shape==()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + '\n(shapes %s, %s mismatch)' % (x.shape, - y.shape), - header=header, - names=('x', 'y')) - assert cond, msg - val = comparison(x,y) - if m is not self.nomask and fill_value: - val = self.masked_array(val, mask=m) - if isinstance(val, bool): - cond = val - reduced = [0] - else: - reduced = val.ravel() - cond = reduced.all() - reduced = reduced.tolist() - if not cond: - match = 100-100.0*reduced.count(1)/len(reduced) - msg = build_err_msg([x, y], - err_msg - + '\n(mismatch %s%%)' % (match,), - header=header, - names=('x', 'y')) - assert cond, msg - except ValueError: - msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) - raise ValueError(msg) - - def assert_array_equal(self, x, y, err_msg=''): - """Checks the elementwise equality of two masked arrays.""" - self.assert_array_compare(self.equal, x, y, err_msg=err_msg, - header='Arrays are not equal') - - def test_0(self): - "Tests creation" - x = np.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - xm = self.masked_array(x, mask=m) - xm[0] - - def test_1(self): - "Tests creation" - x = np.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ,0, 1] - xm = self.masked_array(x, mask=m1) - ym = self.masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = self.masked_array(z, mask=[0,1,0,0]) - xf = np.where(m1, 1.e+20, x) - xm.set_fill_value(1.e+20) - - assert((xm-ym).filled(0).any()) - #fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_)) - s = x.shape - assert(xm.size == reduce(lambda x,y:x*y, s)) - assert(self.count(xm) == len(m1) - reduce(lambda x,y:x+y, m1)) - - for s in [(4,3), (6,2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - - assert(self.count(xm) == len(m1) - reduce(lambda x,y:x+y, m1)) - - def test_2(self): - "Tests conversions and indexing" - x1 = np.array([1,2,4,3]) - x2 = self.array(x1, mask=[1,0,0,0]) - x3 = self.array(x1, mask=[0,1,0,1]) - x4 = self.array(x1) - # test conversion to strings - junk, garbage = str(x2), repr(x2) -# assert_equal(np.sort(x1), self.sort(x2, fill_value=0)) - # tests of indexing - assert type(x2[1]) is type(x1[1]) - assert x1[1] == x2[1] -# assert self.allequal(x1[2],x2[2]) -# assert self.allequal(x1[2:5],x2[2:5]) -# assert self.allequal(x1[:],x2[:]) -# assert self.allequal(x1[1:], x3[1:]) - x1[2] = 9 - x2[2] = 9 - self.assert_array_equal(x1,x2) - x1[1:3] = 99 - x2[1:3] = 99 -# assert self.allequal(x1,x2) - x2[1] = self.masked -# assert self.allequal(x1,x2) - x2[1:3] = self.masked -# assert self.allequal(x1,x2) - x2[:] = x1 - x2[1] = self.masked -# assert self.allequal(self.getmask(x2),self.array([0,1,0,0])) - x3[:] = self.masked_array([1,2,3,4],[0,1,1,0]) -# assert self.allequal(self.getmask(x3), self.array([0,1,1,0])) - x4[:] = self.masked_array([1,2,3,4],[0,1,1,0]) -# assert self.allequal(self.getmask(x4), self.array([0,1,1,0])) -# assert self.allequal(x4, self.array([1,2,3,4])) - x1 = np.arange(5)*1.0 - x2 = self.masked_values(x1, 3.0) -# assert self.allequal(x1,x2) -# assert self.allequal(self.array([0,0,0,1,0], self.MaskType), x2.mask) - x1 = self.array([1,'hello',2,3],object) - x2 = np.array([1,'hello',2,3],object) - s1 = x1[1] - s2 = x2[1] - assert x1[1:1].shape == (0,) - # Tests copy-size - n = [0,0,1,0,0] - m = self.make_mask(n) - m2 = self.make_mask(m) - assert(m is m2) - m3 = self.make_mask(m, copy=1) - assert(m is not m3) - - - def test_3(self): - "Tests resize/repeat" - x4 = self.arange(4) - x4[2] = self.masked - y4 = self.resize(x4, (8,)) - assert self.allequal(self.concatenate([x4,x4]), y4) - assert self.allequal(self.getmask(y4),[0,0,1,0,0,0,1,0]) - y5 = self.repeat(x4, (2,2,2,2), axis=0) - self.assert_array_equal(y5, [0,0,1,1,2,2,3,3]) - y6 = self.repeat(x4, 2, axis=0) - assert self.allequal(y5, y6) - y7 = x4.repeat((2,2,2,2), axis=0) - assert self.allequal(y5,y7) - y8 = x4.repeat(2,0) - assert self.allequal(y5,y8) - - #---------------------------------- - def test_4(self): - "Test of take, transpose, inner, outer products" - x = self.arange(24) - y = np.arange(24) - x[5:6] = self.masked - x = x.reshape(2,3,4) - y = y.reshape(2,3,4) - assert self.allequal(np.transpose(y,(2,0,1)), self.transpose(x,(2,0,1))) - assert self.allequal(np.take(y, (2,0,1), 1), self.take(x, (2,0,1), 1)) - assert self.allequal(np.inner(self.filled(x,0), self.filled(y,0)), - self.inner(x, y)) - assert self.allequal(np.outer(self.filled(x,0), self.filled(y,0)), - self.outer(x, y)) - y = self.array(['abc', 1, 'def', 2, 3], object) - y[2] = self.masked - t = self.take(y,[0,3,4]) - assert t[0] == 'abc' - assert t[1] == 2 - assert t[2] == 3 - #---------------------------------- - def test_5(self): - "Tests inplace w/ scalar" - - x = self.arange(10) - y = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x += 1 - assert self.allequal(x, y+1) - xm += 1 - assert self.allequal(xm, y+1) - - x = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x -= 1 - assert self.allequal(x, y-1) - xm -= 1 - assert self.allequal(xm, y-1) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x *= 2.0 - assert self.allequal(x, y*2) - xm *= 2.0 - assert self.allequal(xm, y*2) - - x = self.arange(10)*2 - xm = self.arange(10)*2 - xm[2] = self.masked - x /= 2 - assert self.allequal(x, y) - xm /= 2 - assert self.allequal(xm, y) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x /= 2.0 - assert self.allequal(x, y/2.0) - xm /= self.arange(10) - self.assert_array_equal(xm, self.ones((10,))) - - x = self.arange(10).astype(float_) - xm = self.arange(10) - xm[2] = self.masked - id1 = self.id(x.raw_data()) - x += 1. - #assert id1 == self.id(x.raw_data()) - assert self.allequal(x, y+1.) - - - def test_6(self): - "Tests inplace w/ array" - - x = self.arange(10, dtype=float_) - y = self.arange(10) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x += a - xm += a - assert self.allequal(x,y+a) - assert self.allequal(xm,y+a) - assert self.allequal(xm.mask, self.mask_or(m,a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x -= a - xm -= a - assert self.allequal(x,y-a) - assert self.allequal(xm,y-a) - assert self.allequal(xm.mask, self.mask_or(m,a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x *= a - xm *= a - assert self.allequal(x,y*a) - assert self.allequal(xm,y*a) - assert self.allequal(xm.mask, self.mask_or(m,a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x /= a - xm /= a - - #---------------------------------- - def test_7(self): - "Tests ufunc" - d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0,1]+[0]*6), - self.array([1.0, 0, -1, pi/2]*2, mask=[1,0]+[0]*6),) - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', -# 'sin', 'cos', 'tan', -# 'arcsin', 'arccos', 'arctan', -# 'sinh', 'cosh', 'tanh', -# 'arcsinh', -# 'arccosh', -# 'arctanh', -# 'absolute', 'fabs', 'negative', -# # 'nonzero', 'around', -# 'floor', 'ceil', -# # 'sometrue', 'alltrue', -# 'logical_not', -# 'add', 'subtract', 'multiply', -# 'divide', 'true_divide', 'floor_divide', -# 'remainder', 'fmod', 'hypot', 'arctan2', -# 'equal', 'not_equal', 'less_equal', 'greater_equal', -# 'less', 'greater', -# 'logical_and', 'logical_or', 'logical_xor', - ]: - #print f - try: - uf = getattr(self.umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(self.module, f) - args = d[:uf.nin] - ur = uf(*args) - mr = mf(*args) - self.assert_array_equal(ur.filled(0), mr.filled(0), f) - self.assert_array_equal(ur._mask, mr._mask) - - #---------------------------------- - def test_99(self): - # test average - ott = self.array([0.,1.,2.,3.], mask=[1,0,0,0]) - self.assert_array_equal(2.0, self.average(ott,axis=0)) - self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) - result, wts = self.average(ott, weights=[1.,1.,2.,1.], returned=1) - self.assert_array_equal(2.0, result) - assert(wts == 4.0) - ott[:] = self.masked - assert(self.average(ott,axis=0) is self.masked) - ott = self.array([0.,1.,2.,3.], mask=[1,0,0,0]) - ott = ott.reshape(2,2) - ott[:,1] = self.masked - self.assert_array_equal(self.average(ott,axis=0), [2.0, 0.0]) - assert(self.average(ott,axis=1)[0] is self.masked) - self.assert_array_equal([2.,0.], self.average(ott, axis=0)) - result, wts = self.average(ott, axis=0, returned=1) - self.assert_array_equal(wts, [1., 0.]) - w1 = [0,1,1,1,1,0] - w2 = [[0,1,1,1,1,0],[1,0,0,0,0,1]] - x = self.arange(6) - self.assert_array_equal(self.average(x, axis=0), 2.5) - self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) - y = self.array([self.arange(6), 2.0*self.arange(6)]) - self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) - self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) - self.assert_array_equal(self.average(y, axis=1), [self.average(x,axis=0), self.average(x,axis=0) * 2.0]) - self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) - self.assert_array_equal(self.average(y, axis=0, weights=w2), [0.,1.,2.,3.,4.,10.]) - self.assert_array_equal(self.average(y, axis=1), [self.average(x,axis=0), self.average(x,axis=0) * 2.0]) - m1 = self.zeros(6) - m2 = [0,0,1,1,0,0] - m3 = [[0,0,1,1,0,0],[0,1,1,1,1,0]] - m4 = self.ones(6) - m5 = [0, 1, 1, 1, 1, 1] - self.assert_array_equal(self.average(self.masked_array(x, m1),axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m2),axis=0), 2.5) - # assert(self.average(masked_array(x, m4),axis=0) is masked) - self.assert_array_equal(self.average(self.masked_array(x, m5),axis=0), 0.0) - self.assert_array_equal(self.count(self.average(self.masked_array(x, m4),axis=0)), 0) - z = self.masked_array(y, m3) - self.assert_array_equal(self.average(z, None), 20./6.) - self.assert_array_equal(self.average(z, axis=0), [0.,1.,99.,99.,4.0, 7.5]) - self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) - self.assert_array_equal(self.average(z,axis=0, weights=w2), [0.,1., 99., 99., 4.0, 10.0]) - #------------------------ - def test_A(self): - x = self.arange(24) - y = np.arange(24) - x[5:6] = self.masked - x = x.reshape(2,3,4) - - -################################################################################ -if __name__ == '__main__': - - setup_base = "from __main__ import moduletester \n"\ - "import numpy\n" \ - "tester = moduletester(module)\n" -# setup_new = "import np.ma.core_ini as module\n"+setup_base - setup_cur = "import np.ma.core as module\n"+setup_base -# setup_alt = "import np.ma.core_alt as module\n"+setup_base -# setup_tmp = "import np.ma.core_tmp as module\n"+setup_base - - (nrepeat, nloop) = (10, 10) - - if 1: - for i in range(1,8): - func = 'tester.test_%i()' % i -# new = timeit.Timer(func, setup_new).repeat(nrepeat, nloop*10) - cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) -# alt = timeit.Timer(func, setup_alt).repeat(nrepeat, nloop*10) -# tmp = timeit.Timer(func, setup_tmp).repeat(nrepeat, nloop*10) -# new = np.sort(new) - cur = np.sort(cur) -# alt = np.sort(alt) -# tmp = np.sort(tmp) - print "#%i" % i +50*'.' - print eval("moduletester.test_%i.__doc__" % i) -# print "core_ini : %.3f - %.3f" % (new[0], new[1]) - print "core_current : %.3f - %.3f" % (cur[0], cur[1]) -# print "core_alt : %.3f - %.3f" % (alt[0], alt[1]) -# print "core_tmp : %.3f - %.3f" % (tmp[0], tmp[1]) diff --git a/numpy-1.6.2/numpy/ma/version.py b/numpy-1.6.2/numpy/ma/version.py deleted file mode 100644 index 7a925f1a85..0000000000 --- a/numpy-1.6.2/numpy/ma/version.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Version number""" - -version = '1.00' -release = False - -if not release: - import core - import extras - revision = [core.__revision__.split(':')[-1][:-1].strip(), - extras.__revision__.split(':')[-1][:-1].strip(),] - version += '.dev%04i' % max([int(rev) for rev in revision]) diff --git a/numpy-1.6.2/numpy/matlib.py b/numpy-1.6.2/numpy/matlib.py deleted file mode 100644 index f55f763c3d..0000000000 --- a/numpy-1.6.2/numpy/matlib.py +++ /dev/null @@ -1,356 +0,0 @@ -import numpy as np -from numpy.matrixlib.defmatrix import matrix, asmatrix -# need * as we're copying the numpy namespace -from numpy import * - -__version__ = np.__version__ - -__all__ = np.__all__[:] # copy numpy namespace -__all__ += ['rand', 'randn', 'repmat'] - -def empty(shape, dtype=None, order='C'): - """ - Return a new matrix of given shape and type, without initializing entries. - - Parameters - ---------- - shape : int or tuple of int - Shape of the empty matrix. - dtype : data-type, optional - Desired output data-type. - order : {'C', 'F'}, optional - Whether to store multi-dimensional data in C (row-major) or - Fortran (column-major) order in memory. - - See Also - -------- - empty_like, zeros - - Notes - ----- - `empty`, unlike `zeros`, does not set the matrix values to zero, - and may therefore be marginally faster. On the other hand, it requires - the user to manually set all the values in the array, and should be - used with caution. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.empty((2, 2)) # filled with random data - matrix([[ 6.76425276e-320, 9.79033856e-307], - [ 7.39337286e-309, 3.22135945e-309]]) #random - >>> np.matlib.empty((2, 2), dtype=int) - matrix([[ 6600475, 0], - [ 6586976, 22740995]]) #random - - """ - return ndarray.__new__(matrix, shape, dtype, order=order) - -def ones(shape, dtype=None, order='C'): - """ - Matrix of ones. - - Return a matrix of given shape and type, filled with ones. - - Parameters - ---------- - shape : {sequence of ints, int} - Shape of the matrix - dtype : data-type, optional - The desired data-type for the matrix, default is np.float64. - order : {'C', 'F'}, optional - Whether to store matrix in C- or Fortran-contiguous order, - default is 'C'. - - Returns - ------- - out : matrix - Matrix of ones of given shape, dtype, and order. - - See Also - -------- - ones : Array of ones. - matlib.zeros : Zero matrix. - - Notes - ----- - If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, - `out` becomes a single row matrix of shape ``(1,N)``. - - Examples - -------- - >>> np.matlib.ones((2,3)) - matrix([[ 1., 1., 1.], - [ 1., 1., 1.]]) - - >>> np.matlib.ones(2) - matrix([[ 1., 1.]]) - - """ - a = ndarray.__new__(matrix, shape, dtype, order=order) - a.fill(1) - return a - -def zeros(shape, dtype=None, order='C'): - """ - Return a matrix of given shape and type, filled with zeros. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the matrix - dtype : data-type, optional - The desired data-type for the matrix, default is float. - order : {'C', 'F'}, optional - Whether to store the result in C- or Fortran-contiguous order, - default is 'C'. - - Returns - ------- - out : matrix - Zero matrix of given shape, dtype, and order. - - See Also - -------- - numpy.zeros : Equivalent array function. - matlib.ones : Return a matrix of ones. - - Notes - ----- - If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, - `out` becomes a single row matrix of shape ``(1,N)``. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.zeros((2, 3)) - matrix([[ 0., 0., 0.], - [ 0., 0., 0.]]) - - >>> np.matlib.zeros(2) - matrix([[ 0., 0.]]) - - """ - a = ndarray.__new__(matrix, shape, dtype, order=order) - a.fill(0) - return a - -def identity(n,dtype=None): - """ - Returns the square identity matrix of given size. - - Parameters - ---------- - n : int - Size of the returned identity matrix. - dtype : data-type, optional - Data-type of the output. Defaults to ``float``. - - Returns - ------- - out : matrix - `n` x `n` matrix with its main diagonal set to one, - and all other elements zero. - - See Also - -------- - numpy.identity : Equivalent array function. - matlib.eye : More general matrix identity function. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.identity(3, dtype=int) - matrix([[1, 0, 0], - [0, 1, 0], - [0, 0, 1]]) - - """ - a = array([1]+n*[0],dtype=dtype) - b = empty((n,n),dtype=dtype) - b.flat = a - return b - -def eye(n,M=None, k=0, dtype=float): - """ - Return a matrix with ones on the diagonal and zeros elsewhere. - - Parameters - ---------- - n : int - Number of rows in the output. - M : int, optional - Number of columns in the output, defaults to `n`. - k : int, optional - Index of the diagonal: 0 refers to the main diagonal, - a positive value refers to an upper diagonal, - and a negative value to a lower diagonal. - dtype : dtype, optional - Data-type of the returned matrix. - - Returns - ------- - I : matrix - A `n` x `M` matrix where all elements are equal to zero, - except for the `k`-th diagonal, whose values are equal to one. - - See Also - -------- - numpy.eye : Equivalent array function. - identity : Square identity matrix. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.eye(3, k=1, dtype=float) - matrix([[ 0., 1., 0.], - [ 0., 0., 1.], - [ 0., 0., 0.]]) - - """ - return asmatrix(np.eye(n,M,k,dtype)) - -def rand(*args): - """ - Return a matrix of random values with given shape. - - Create a matrix of the given shape and propagate it with - random samples from a uniform distribution over ``[0, 1)``. - - Parameters - ---------- - \\*args : Arguments - Shape of the output. - If given as N integers, each integer specifies the size of one - dimension. - If given as a tuple, this tuple gives the complete shape. - - Returns - ------- - out : ndarray - The matrix of random values with shape given by `\\*args`. - - See Also - -------- - randn, numpy.random.rand - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.rand(2, 3) - matrix([[ 0.68340382, 0.67926887, 0.83271405], - [ 0.00793551, 0.20468222, 0.95253525]]) #random - >>> np.matlib.rand((2, 3)) - matrix([[ 0.84682055, 0.73626594, 0.11308016], - [ 0.85429008, 0.3294825 , 0.89139555]]) #random - - If the first argument is a tuple, other arguments are ignored: - - >>> np.matlib.rand((2, 3), 4) - matrix([[ 0.46898646, 0.15163588, 0.95188261], - [ 0.59208621, 0.09561818, 0.00583606]]) #random - - """ - if isinstance(args[0], tuple): - args = args[0] - return asmatrix(np.random.rand(*args)) - -def randn(*args): - """ - Return a random matrix with data from the "standard normal" distribution. - - `randn` generates a matrix filled with random floats sampled from a - univariate "normal" (Gaussian) distribution of mean 0 and variance 1. - - Parameters - ---------- - \\*args : Arguments - Shape of the output. - If given as N integers, each integer specifies the size of one - dimension. If given as a tuple, this tuple gives the complete shape. - - Returns - ------- - Z : matrix of floats - A matrix of floating-point samples drawn from the standard normal - distribution. - - See Also - -------- - rand, random.randn - - Notes - ----- - For random samples from :math:`N(\\mu, \\sigma^2)`, use: - - ``sigma * np.matlib.randn(...) + mu`` - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.randn(1) - matrix([[-0.09542833]]) #random - >>> np.matlib.randn(1, 2, 3) - matrix([[ 0.16198284, 0.0194571 , 0.18312985], - [-0.7509172 , 1.61055 , 0.45298599]]) #random - - Two-by-four matrix of samples from :math:`N(3, 6.25)`: - - >>> 2.5 * np.matlib.randn((2, 4)) + 3 - matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922], - [ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random - - """ - if isinstance(args[0], tuple): - args = args[0] - return asmatrix(np.random.randn(*args)) - -def repmat(a, m, n): - """ - Repeat a 0-D to 2-D array or matrix MxN times. - - Parameters - ---------- - a : array_like - The array or matrix to be repeated. - m, n : int - The number of times `a` is repeated along the first and second axes. - - Returns - ------- - out : ndarray - The result of repeating `a`. - - Examples - -------- - >>> import numpy.matlib - >>> a0 = np.array(1) - >>> np.matlib.repmat(a0, 2, 3) - array([[1, 1, 1], - [1, 1, 1]]) - - >>> a1 = np.arange(4) - >>> np.matlib.repmat(a1, 2, 2) - array([[0, 1, 2, 3, 0, 1, 2, 3], - [0, 1, 2, 3, 0, 1, 2, 3]]) - - >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) - >>> np.matlib.repmat(a2, 2, 3) - matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5, 3, 4, 5], - [0, 1, 2, 0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5, 3, 4, 5]]) - - """ - a = asanyarray(a) - ndim = a.ndim - if ndim == 0: - origrows, origcols = (1,1) - elif ndim == 1: - origrows, origcols = (1, a.shape[0]) - else: - origrows, origcols = a.shape - rows = origrows * m - cols = origcols * n - c = a.reshape(1,a.size).repeat(m, 0).reshape(rows, origcols).repeat(n,0) - return c.reshape(rows, cols) diff --git a/numpy-1.6.2/numpy/matrixlib/__init__.py b/numpy-1.6.2/numpy/matrixlib/__init__.py deleted file mode 100644 index 468a8829d5..0000000000 --- a/numpy-1.6.2/numpy/matrixlib/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Sub-package containing the matrix class and related functions.""" -from defmatrix import * - -__all__ = defmatrix.__all__ - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/matrixlib/defmatrix.py b/numpy-1.6.2/numpy/matrixlib/defmatrix.py deleted file mode 100644 index a5aa84f6d0..0000000000 --- a/numpy-1.6.2/numpy/matrixlib/defmatrix.py +++ /dev/null @@ -1,1074 +0,0 @@ -__all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] - -import sys -import numpy.core.numeric as N -from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray -from numpy.core.numerictypes import issubdtype - -# make translation table -_numchars = '0123456789.-+jeEL' - -if sys.version_info[0] >= 3: - class _NumCharTable: - def __getitem__(self, i): - if chr(i) in _numchars: - return chr(i) - else: - return None - _table = _NumCharTable() - def _eval(astr): - return eval(astr.translate(_table)) -else: - _table = [None]*256 - for k in range(256): - _table[k] = chr(k) - _table = ''.join(_table) - - _todelete = [] - for k in _table: - if k not in _numchars: - _todelete.append(k) - _todelete = ''.join(_todelete) - del k - - def _eval(astr): - return eval(astr.translate(_table,_todelete)) - -def _convert_from_string(data): - rows = data.split(';') - newdata = [] - count = 0 - for row in rows: - trow = row.split(',') - newrow = [] - for col in trow: - temp = col.split() - newrow.extend(map(_eval,temp)) - if count == 0: - Ncols = len(newrow) - elif len(newrow) != Ncols: - raise ValueError, "Rows not the same size." - count += 1 - newdata.append(newrow) - return newdata - -def asmatrix(data, dtype=None): - """ - Interpret the input as a matrix. - - Unlike `matrix`, `asmatrix` does not make a copy if the input is already - a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. - - Parameters - ---------- - data : array_like - Input data. - - Returns - ------- - mat : matrix - `data` interpreted as a matrix. - - Examples - -------- - >>> x = np.array([[1, 2], [3, 4]]) - - >>> m = np.asmatrix(x) - - >>> x[0,0] = 5 - - >>> m - matrix([[5, 2], - [3, 4]]) - - """ - return matrix(data, dtype=dtype, copy=False) - -def matrix_power(M,n): - """ - Raise a square matrix to the (integer) power `n`. - - For positive integers `n`, the power is computed by repeated matrix - squarings and matrix multiplications. If ``n == 0``, the identity matrix - of the same shape as M is returned. If ``n < 0``, the inverse - is computed and then raised to the ``abs(n)``. - - Parameters - ---------- - M : ndarray or matrix object - Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``, - with `m` a positive integer. - n : int - The exponent can be any integer or long integer, positive, - negative, or zero. - - Returns - ------- - M**n : ndarray or matrix object - The return value is the same shape and type as `M`; - if the exponent is positive or zero then the type of the - elements is the same as those of `M`. If the exponent is - negative the elements are floating-point. - - Raises - ------ - LinAlgError - If the matrix is not numerically invertible. - - See Also - -------- - matrix - Provides an equivalent function as the exponentiation operator - (``**``, not ``^``). - - Examples - -------- - >>> from numpy import linalg as LA - >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit - >>> LA.matrix_power(i, 3) # should = -i - array([[ 0, -1], - [ 1, 0]]) - >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix - matrix([[ 0, -1], - [ 1, 0]]) - >>> LA.matrix_power(i, 0) - array([[1, 0], - [0, 1]]) - >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements - array([[ 0., 1.], - [-1., 0.]]) - - Somewhat more sophisticated example - - >>> q = np.zeros((4, 4)) - >>> q[0:2, 0:2] = -i - >>> q[2:4, 2:4] = i - >>> q # one of the three quarternion units not equal to 1 - array([[ 0., -1., 0., 0.], - [ 1., 0., 0., 0.], - [ 0., 0., 0., 1.], - [ 0., 0., -1., 0.]]) - >>> LA.matrix_power(q, 2) # = -np.eye(4) - array([[-1., 0., 0., 0.], - [ 0., -1., 0., 0.], - [ 0., 0., -1., 0.], - [ 0., 0., 0., -1.]]) - - """ - M = asanyarray(M) - if len(M.shape) != 2 or M.shape[0] != M.shape[1]: - raise ValueError("input must be a square array") - if not issubdtype(type(n),int): - raise TypeError("exponent must be an integer") - - from numpy.linalg import inv - - if n==0: - M = M.copy() - M[:] = identity(M.shape[0]) - return M - elif n<0: - M = inv(M) - n *= -1 - - result = M - if n <= 3: - for _ in range(n-1): - result=N.dot(result,M) - return result - - # binary decomposition to reduce the number of Matrix - # multiplications for n > 3. - beta = binary_repr(n) - Z,q,t = M,0,len(beta) - while beta[t-q-1] == '0': - Z = N.dot(Z,Z) - q += 1 - result = Z - for k in range(q+1,t): - Z = N.dot(Z,Z) - if beta[t-k-1] == '1': - result = N.dot(result,Z) - return result - - -class matrix(N.ndarray): - """ - matrix(data, dtype=None, copy=True) - - Returns a matrix from an array-like object, or from a string of data. - A matrix is a specialized 2-D array that retains its 2-D nature - through operations. It has certain special operators, such as ``*`` - (matrix multiplication) and ``**`` (matrix power). - - Parameters - ---------- - data : array_like or string - If `data` is a string, it is interpreted as a matrix with commas - or spaces separating columns, and semicolons separating rows. - dtype : data-type - Data-type of the output matrix. - copy : bool - If `data` is already an `ndarray`, then this flag determines - whether the data is copied (the default), or whether a view is - constructed. - - See Also - -------- - array - - Examples - -------- - >>> a = np.matrix('1 2; 3 4') - >>> print a - [[1 2] - [3 4]] - - >>> np.matrix([[1, 2], [3, 4]]) - matrix([[1, 2], - [3, 4]]) - - """ - __array_priority__ = 10.0 - def __new__(subtype, data, dtype=None, copy=True): - if isinstance(data, matrix): - dtype2 = data.dtype - if (dtype is None): - dtype = dtype2 - if (dtype2 == dtype) and (not copy): - return data - return data.astype(dtype) - - if isinstance(data, N.ndarray): - if dtype is None: - intype = data.dtype - else: - intype = N.dtype(dtype) - new = data.view(subtype) - if intype != data.dtype: - return new.astype(intype) - if copy: return new.copy() - else: return new - - if isinstance(data, str): - data = _convert_from_string(data) - - # now convert data to an array - arr = N.array(data, dtype=dtype, copy=copy) - ndim = arr.ndim - shape = arr.shape - if (ndim > 2): - raise ValueError, "matrix must be 2-dimensional" - elif ndim == 0: - shape = (1,1) - elif ndim == 1: - shape = (1,shape[0]) - - order = False - if (ndim == 2) and arr.flags.fortran: - order = True - - if not (order or arr.flags.contiguous): - arr = arr.copy() - - ret = N.ndarray.__new__(subtype, shape, arr.dtype, - buffer=arr, - order=order) - return ret - - def __array_finalize__(self, obj): - self._getitem = False - if (isinstance(obj, matrix) and obj._getitem): return - ndim = self.ndim - if (ndim == 2): - return - if (ndim > 2): - newshape = tuple([x for x in self.shape if x > 1]) - ndim = len(newshape) - if ndim == 2: - self.shape = newshape - return - elif (ndim > 2): - raise ValueError, "shape too large to be a matrix." - else: - newshape = self.shape - if ndim == 0: - self.shape = (1,1) - elif ndim == 1: - self.shape = (1,newshape[0]) - return - - def __getitem__(self, index): - self._getitem = True - - try: - out = N.ndarray.__getitem__(self, index) - finally: - self._getitem = False - - if not isinstance(out, N.ndarray): - return out - - if out.ndim == 0: - return out[()] - if out.ndim == 1: - sh = out.shape[0] - # Determine when we should have a column array - try: - n = len(index) - except: - n = 0 - if n > 1 and isscalar(index[1]): - out.shape = (sh,1) - else: - out.shape = (1,sh) - return out - - def __mul__(self, other): - if isinstance(other,(N.ndarray, list, tuple)) : - # This promotes 1-D vectors to row vectors - return N.dot(self, asmatrix(other)) - if isscalar(other) or not hasattr(other, '__rmul__') : - return N.dot(self, other) - return NotImplemented - - def __rmul__(self, other): - return N.dot(other, self) - - def __imul__(self, other): - self[:] = self * other - return self - - def __pow__(self, other): - return matrix_power(self, other) - - def __ipow__(self, other): - self[:] = self ** other - return self - - def __rpow__(self, other): - return NotImplemented - - def __repr__(self): - s = repr(self.__array__()).replace('array', 'matrix') - # now, 'matrix' has 6 letters, and 'array' 5, so the columns don't - # line up anymore. We need to add a space. - l = s.splitlines() - for i in range(1, len(l)): - if l[i]: - l[i] = ' ' + l[i] - return '\n'.join(l) - - def __str__(self): - return str(self.__array__()) - - def _align(self, axis): - """A convenience function for operations that need to preserve axis - orientation. - """ - if axis is None: - return self[0,0] - elif axis==0: - return self - elif axis==1: - return self.transpose() - else: - raise ValueError, "unsupported axis" - - # Necessary because base-class tolist expects dimension - # reduction by x[0] - def tolist(self): - """ - Return the matrix as a (possibly nested) list. - - See `ndarray.tolist` for full documentation. - - See Also - -------- - ndarray.tolist - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.tolist() - [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] - - """ - return self.__array__().tolist() - - # To preserve orientation of result... - def sum(self, axis=None, dtype=None, out=None): - """ - Returns the sum of the matrix elements, along the given axis. - - Refer to `numpy.sum` for full documentation. - - See Also - -------- - numpy.sum - - Notes - ----- - This is the same as `ndarray.sum`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix([[1, 2], [4, 3]]) - >>> x.sum() - 10 - >>> x.sum(axis=1) - matrix([[3], - [7]]) - >>> x.sum(axis=1, dtype='float') - matrix([[ 3.], - [ 7.]]) - >>> out = np.zeros((1, 2), dtype='float') - >>> x.sum(axis=1, dtype='float', out=out) - matrix([[ 3.], - [ 7.]]) - - """ - return N.ndarray.sum(self, axis, dtype, out)._align(axis) - - def mean(self, axis=None, dtype=None, out=None): - """ - Returns the average of the matrix elements along the given axis. - - Refer to `numpy.mean` for full documentation. - - See Also - -------- - numpy.mean - - Notes - ----- - Same as `ndarray.mean` except that, where that returns an `ndarray`, - this returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.mean() - 5.5 - >>> x.mean(0) - matrix([[ 4., 5., 6., 7.]]) - >>> x.mean(1) - matrix([[ 1.5], - [ 5.5], - [ 9.5]]) - - """ - return N.ndarray.mean(self, axis, dtype, out)._align(axis) - - def std(self, axis=None, dtype=None, out=None, ddof=0): - """ - Return the standard deviation of the array elements along the given axis. - - Refer to `numpy.std` for full documentation. - - See Also - -------- - numpy.std - - Notes - ----- - This is the same as `ndarray.std`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.std() - 3.4520525295346629 - >>> x.std(0) - matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) - >>> x.std(1) - matrix([[ 1.11803399], - [ 1.11803399], - [ 1.11803399]]) - - """ - return N.ndarray.std(self, axis, dtype, out, ddof)._align(axis) - - def var(self, axis=None, dtype=None, out=None, ddof=0): - """ - Returns the variance of the matrix elements, along the given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var - - Notes - ----- - This is the same as `ndarray.var`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.var() - 11.916666666666666 - >>> x.var(0) - matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) - >>> x.var(1) - matrix([[ 1.25], - [ 1.25], - [ 1.25]]) - - """ - return N.ndarray.var(self, axis, dtype, out, ddof)._align(axis) - - def prod(self, axis=None, dtype=None, out=None): - """ - Return the product of the array elements over the given axis. - - Refer to `prod` for full documentation. - - See Also - -------- - prod, ndarray.prod - - Notes - ----- - Same as `ndarray.prod`, except, where that returns an `ndarray`, this - returns a `matrix` object instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.prod() - 0 - >>> x.prod(0) - matrix([[ 0, 45, 120, 231]]) - >>> x.prod(1) - matrix([[ 0], - [ 840], - [7920]]) - - """ - return N.ndarray.prod(self, axis, dtype, out)._align(axis) - - def any(self, axis=None, out=None): - """ - Test whether any array element along a given axis evaluates to True. - - Refer to `numpy.any` for full documentation. - - Parameters - ---------- - axis: int, optional - Axis along which logical OR is performed - out: ndarray, optional - Output to existing array instead of creating new one, must have - same shape as expected output - - Returns - ------- - any : bool, ndarray - Returns a single bool if `axis` is ``None``; otherwise, - returns `ndarray` - - """ - return N.ndarray.any(self, axis, out)._align(axis) - - def all(self, axis=None, out=None): - """ - Test whether all matrix elements along a given axis evaluate to True. - - Parameters - ---------- - See `numpy.all` for complete descriptions - - See Also - -------- - numpy.all - - Notes - ----- - This is the same as `ndarray.all`, but it returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> y = x[0]; y - matrix([[0, 1, 2, 3]]) - >>> (x == y) - matrix([[ True, True, True, True], - [False, False, False, False], - [False, False, False, False]], dtype=bool) - >>> (x == y).all() - False - >>> (x == y).all(0) - matrix([[False, False, False, False]], dtype=bool) - >>> (x == y).all(1) - matrix([[ True], - [False], - [False]], dtype=bool) - - """ - return N.ndarray.all(self, axis, out)._align(axis) - - def max(self, axis=None, out=None): - """ - Return the maximum value along an axis. - - Parameters - ---------- - See `amax` for complete descriptions - - See Also - -------- - amax, ndarray.max - - Notes - ----- - This is the same as `ndarray.max`, but returns a `matrix` object - where `ndarray.max` would return an ndarray. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.max() - 11 - >>> x.max(0) - matrix([[ 8, 9, 10, 11]]) - >>> x.max(1) - matrix([[ 3], - [ 7], - [11]]) - - """ - return N.ndarray.max(self, axis, out)._align(axis) - - def argmax(self, axis=None, out=None): - """ - Indices of the maximum values along an axis. - - Parameters - ---------- - See `numpy.argmax` for complete descriptions - - See Also - -------- - numpy.argmax - - Notes - ----- - This is the same as `ndarray.argmax`, but returns a `matrix` object - where `ndarray.argmax` would return an `ndarray`. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.argmax() - 11 - >>> x.argmax(0) - matrix([[2, 2, 2, 2]]) - >>> x.argmax(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.argmax(self, axis, out)._align(axis) - - def min(self, axis=None, out=None): - """ - Return the minimum value along an axis. - - Parameters - ---------- - See `amin` for complete descriptions. - - See Also - -------- - amin, ndarray.min - - Notes - ----- - This is the same as `ndarray.min`, but returns a `matrix` object - where `ndarray.min` would return an ndarray. - - Examples - -------- - >>> x = -np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, -1, -2, -3], - [ -4, -5, -6, -7], - [ -8, -9, -10, -11]]) - >>> x.min() - -11 - >>> x.min(0) - matrix([[ -8, -9, -10, -11]]) - >>> x.min(1) - matrix([[ -3], - [ -7], - [-11]]) - - """ - return N.ndarray.min(self, axis, out)._align(axis) - - def argmin(self, axis=None, out=None): - """ - Return the indices of the minimum values along an axis. - - Parameters - ---------- - See `numpy.argmin` for complete descriptions. - - See Also - -------- - numpy.argmin - - Notes - ----- - This is the same as `ndarray.argmin`, but returns a `matrix` object - where `ndarray.argmin` would return an `ndarray`. - - Examples - -------- - >>> x = -np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, -1, -2, -3], - [ -4, -5, -6, -7], - [ -8, -9, -10, -11]]) - >>> x.argmin() - 11 - >>> x.argmin(0) - matrix([[2, 2, 2, 2]]) - >>> x.argmin(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.argmin(self, axis, out)._align(axis) - - def ptp(self, axis=None, out=None): - """ - Peak-to-peak (maximum - minimum) value along the given axis. - - Refer to `numpy.ptp` for full documentation. - - See Also - -------- - numpy.ptp - - Notes - ----- - Same as `ndarray.ptp`, except, where that would return an `ndarray` object, - this returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.ptp() - 11 - >>> x.ptp(0) - matrix([[8, 8, 8, 8]]) - >>> x.ptp(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.ptp(self, axis, out)._align(axis) - - def getI(self): - """ - Returns the (multiplicative) inverse of invertible `self`. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - If `self` is non-singular, `ret` is such that ``ret * self`` == - ``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return - ``True``. - - Raises - ------ - numpy.linalg.linalg.LinAlgError: Singular matrix - If `self` is singular. - - See Also - -------- - linalg.inv - - Examples - -------- - >>> m = np.matrix('[1, 2; 3, 4]'); m - matrix([[1, 2], - [3, 4]]) - >>> m.getI() - matrix([[-2. , 1. ], - [ 1.5, -0.5]]) - >>> m.getI() * m - matrix([[ 1., 0.], - [ 0., 1.]]) - - """ - M,N = self.shape - if M == N: - from numpy.dual import inv as func - else: - from numpy.dual import pinv as func - return asmatrix(func(self)) - - def getA(self): - """ - Return `self` as an `ndarray` object. - - Equivalent to ``np.asarray(self)``. - - Parameters - ---------- - None - - Returns - ------- - ret : ndarray - `self` as an `ndarray` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.getA() - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - - """ - return self.__array__() - - def getA1(self): - """ - Return `self` as a flattened `ndarray`. - - Equivalent to ``np.asarray(x).ravel()`` - - Parameters - ---------- - None - - Returns - ------- - ret : ndarray - `self`, 1-D, as an `ndarray` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.getA1() - array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - - """ - return self.__array__().ravel() - - def getT(self): - """ - Returns the transpose of the matrix. - - Does *not* conjugate! For the complex conjugate transpose, use `getH`. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - The (non-conjugated) transpose of the matrix. - - See Also - -------- - transpose, getH - - Examples - -------- - >>> m = np.matrix('[1, 2; 3, 4]') - >>> m - matrix([[1, 2], - [3, 4]]) - >>> m.getT() - matrix([[1, 3], - [2, 4]]) - - """ - return self.transpose() - - def getH(self): - """ - Returns the (complex) conjugate transpose of `self`. - - Equivalent to ``np.transpose(self)`` if `self` is real-valued. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - complex conjugate transpose of `self` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))) - >>> z = x - 1j*x; z - matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], - [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], - [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) - >>> z.getH() - matrix([[ 0. +0.j, 4. +4.j, 8. +8.j], - [ 1. +1.j, 5. +5.j, 9. +9.j], - [ 2. +2.j, 6. +6.j, 10.+10.j], - [ 3. +3.j, 7. +7.j, 11.+11.j]]) - - """ - if issubclass(self.dtype.type, N.complexfloating): - return self.transpose().conjugate() - else: - return self.transpose() - - T = property(getT, None, doc="transpose") - A = property(getA, None, doc="base array") - A1 = property(getA1, None, doc="1-d base array") - H = property(getH, None, doc="hermitian (conjugate) transpose") - I = property(getI, None, doc="inverse") - -def _from_string(str,gdict,ldict): - rows = str.split(';') - rowtup = [] - for row in rows: - trow = row.split(',') - newrow = [] - for x in trow: - newrow.extend(x.split()) - trow = newrow - coltup = [] - for col in trow: - col = col.strip() - try: - thismat = ldict[col] - except KeyError: - try: - thismat = gdict[col] - except KeyError: - raise KeyError, "%s not found" % (col,) - - coltup.append(thismat) - rowtup.append(concatenate(coltup,axis=-1)) - return concatenate(rowtup,axis=0) - - -def bmat(obj, ldict=None, gdict=None): - """ - Build a matrix object from a string, nested sequence, or array. - - Parameters - ---------- - obj : str or array_like - Input data. Names of variables in the current scope may be - referenced, even if `obj` is a string. - - Returns - ------- - out : matrix - Returns a matrix object, which is a specialized 2-D array. - - See Also - -------- - matrix - - Examples - -------- - >>> A = np.mat('1 1; 1 1') - >>> B = np.mat('2 2; 2 2') - >>> C = np.mat('3 4; 5 6') - >>> D = np.mat('7 8; 9 0') - - All the following expressions construct the same block matrix: - - >>> np.bmat([[A, B], [C, D]]) - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - >>> np.bmat('A,B; C,D') - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - - """ - if isinstance(obj, str): - if gdict is None: - # get previous frame - frame = sys._getframe().f_back - glob_dict = frame.f_globals - loc_dict = frame.f_locals - else: - glob_dict = gdict - loc_dict = ldict - - return matrix(_from_string(obj, glob_dict, loc_dict)) - - if isinstance(obj, (tuple, list)): - # [[A,B],[C,D]] - arr_rows = [] - for row in obj: - if isinstance(row, N.ndarray): # not 2-d - return matrix(concatenate(obj,axis=-1)) - else: - arr_rows.append(concatenate(row,axis=-1)) - return matrix(concatenate(arr_rows,axis=0)) - if isinstance(obj, N.ndarray): - return matrix(obj) - -mat = asmatrix diff --git a/numpy-1.6.2/numpy/matrixlib/setup.py b/numpy-1.6.2/numpy/matrixlib/setup.py deleted file mode 100644 index 85b090094e..0000000000 --- a/numpy-1.6.2/numpy/matrixlib/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -import os - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('matrixlib', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/numpy-1.6.2/numpy/matrixlib/setupscons.py b/numpy-1.6.2/numpy/matrixlib/setupscons.py deleted file mode 100644 index 85b090094e..0000000000 --- a/numpy-1.6.2/numpy/matrixlib/setupscons.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -import os - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('matrixlib', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/numpy-1.6.2/numpy/matrixlib/tests/test_defmatrix.py b/numpy-1.6.2/numpy/matrixlib/tests/test_defmatrix.py deleted file mode 100644 index ccb68f0e71..0000000000 --- a/numpy-1.6.2/numpy/matrixlib/tests/test_defmatrix.py +++ /dev/null @@ -1,376 +0,0 @@ -from numpy.testing import * -from numpy.core import * -from numpy import matrix, asmatrix, bmat -from numpy.matrixlib.defmatrix import matrix_power -from numpy.matrixlib import mat -import numpy as np - -class TestCtor(TestCase): - def test_basic(self): - A = array([[1,2],[3,4]]) - mA = matrix(A) - assert all(mA.A == A) - - B = bmat("A,A;A,A") - C = bmat([[A,A], [A,A]]) - D = array([[1,2,1,2], - [3,4,3,4], - [1,2,1,2], - [3,4,3,4]]) - assert all(B.A == D) - assert all(C.A == D) - - E = array([[5,6],[7,8]]) - AEresult = matrix([[1,2,5,6],[3,4,7,8]]) - assert all(bmat([A,E]) == AEresult) - - vec = arange(5) - mvec = matrix(vec) - assert mvec.shape == (1,5) - - def test_bmat_nondefault_str(self): - A = array([[1,2],[3,4]]) - B = array([[5,6],[7,8]]) - Aresult = array([[1,2,1,2], - [3,4,3,4], - [1,2,1,2], - [3,4,3,4]]) - Bresult = array([[5,6,5,6], - [7,8,7,8], - [5,6,5,6], - [7,8,7,8]]) - mixresult = array([[1,2,5,6], - [3,4,7,8], - [5,6,1,2], - [7,8,3,4]]) - assert all(bmat("A,A;A,A") == Aresult) - assert all(bmat("A,A;A,A",ldict={'A':B}) == Aresult) - assert_raises(TypeError, bmat, "A,A;A,A",gdict={'A':B}) - assert all(bmat("A,A;A,A",ldict={'A':A},gdict={'A':B}) == Aresult) - b2 = bmat("A,B;C,D",ldict={'A':A,'B':B},gdict={'C':B,'D':A}) - assert all(b2 == mixresult) - - -class TestProperties(TestCase): - def test_sum(self): - """Test whether matrix.sum(axis=1) preserves orientation. - Fails in NumPy <= 0.9.6.2127. - """ - M = matrix([[1,2,0,0], - [3,4,0,0], - [1,2,1,2], - [3,4,3,4]]) - sum0 = matrix([8,12,4,6]) - sum1 = matrix([3,7,6,14]).T - sumall = 30 - assert_array_equal(sum0, M.sum(axis=0)) - assert_array_equal(sum1, M.sum(axis=1)) - assert sumall == M.sum() - - - def test_prod(self): - x = matrix([[1,2,3],[4,5,6]]) - assert x.prod() == 720 - assert all(x.prod(0) == matrix([[4,10,18]])) - assert all(x.prod(1) == matrix([[6],[120]])) - - y = matrix([0,1,3]) - assert y.prod() == 0 - - def test_max(self): - x = matrix([[1,2,3],[4,5,6]]) - assert x.max() == 6 - assert all(x.max(0) == matrix([[4,5,6]])) - assert all(x.max(1) == matrix([[3],[6]])) - - def test_min(self): - x = matrix([[1,2,3],[4,5,6]]) - assert x.min() == 1 - assert all(x.min(0) == matrix([[1,2,3]])) - assert all(x.min(1) == matrix([[1],[4]])) - - def test_ptp(self): - x = np.arange(4).reshape((2,2)) - assert x.ptp() == 3 - assert all(x.ptp(0) == array([2, 2])) - assert all(x.ptp(1) == array([1, 1])) - - def test_var(self): - x = np.arange(9).reshape((3,3)) - mx = x.view(np.matrix) - assert_equal(x.var(ddof=0), mx.var(ddof=0)) - assert_equal(x.var(ddof=1), mx.var(ddof=1)) - - def test_basic(self): - import numpy.linalg as linalg - - A = array([[1., 2.], - [3., 4.]]) - mA = matrix(A) - assert allclose(linalg.inv(A), mA.I) - assert all(array(transpose(A) == mA.T)) - assert all(array(transpose(A) == mA.H)) - assert all(A == mA.A) - - B = A + 2j*A - mB = matrix(B) - assert allclose(linalg.inv(B), mB.I) - assert all(array(transpose(B) == mB.T)) - assert all(array(conjugate(transpose(B)) == mB.H)) - - def test_pinv(self): - x = matrix(arange(6).reshape(2,3)) - xpinv = matrix([[-0.77777778, 0.27777778], - [-0.11111111, 0.11111111], - [ 0.55555556, -0.05555556]]) - assert_almost_equal(x.I, xpinv) - - def test_comparisons(self): - A = arange(100).reshape(10,10) - mA = matrix(A) - mB = matrix(A) + 0.1 - assert all(mB == A+0.1) - assert all(mB == matrix(A+0.1)) - assert not any(mB == matrix(A-0.1)) - assert all(mA < mB) - assert all(mA <= mB) - assert all(mA <= mA) - assert not any(mA < mA) - - assert not any(mB < mA) - assert all(mB >= mA) - assert all(mB >= mB) - assert not any(mB > mB) - - assert all(mA == mA) - assert not any(mA == mB) - assert all(mB != mA) - - assert not all(abs(mA) > 0) - assert all(abs(mB > 0)) - - def test_asmatrix(self): - A = arange(100).reshape(10,10) - mA = asmatrix(A) - A[0,0] = -10 - assert A[0,0] == mA[0,0] - - def test_noaxis(self): - A = matrix([[1,0],[0,1]]) - assert A.sum() == matrix(2) - assert A.mean() == matrix(0.5) - - def test_repr(self): - A = matrix([[1,0],[0,1]]) - assert repr(A) == "matrix([[1, 0],\n [0, 1]])" - -class TestCasting(TestCase): - def test_basic(self): - A = arange(100).reshape(10,10) - mA = matrix(A) - - mB = mA.copy() - O = ones((10,10), float64) * 0.1 - mB = mB + O - assert mB.dtype.type == float64 - assert all(mA != mB) - assert all(mB == mA+0.1) - - mC = mA.copy() - O = ones((10,10), complex128) - mC = mC * O - assert mC.dtype.type == complex128 - assert all(mA != mB) - - -class TestAlgebra(TestCase): - def test_basic(self): - import numpy.linalg as linalg - - A = array([[1., 2.], - [3., 4.]]) - mA = matrix(A) - - B = identity(2) - for i in xrange(6): - assert allclose((mA ** i).A, B) - B = dot(B, A) - - Ainv = linalg.inv(A) - B = identity(2) - for i in xrange(6): - assert allclose((mA ** -i).A, B) - B = dot(B, Ainv) - - assert allclose((mA * mA).A, dot(A, A)) - assert allclose((mA + mA).A, (A + A)) - assert allclose((3*mA).A, (3*A)) - - mA2 = matrix(A) - mA2 *= 3 - assert allclose(mA2.A, 3*A) - - def test_pow(self): - """Test raising a matrix to an integer power works as expected.""" - m = matrix("1. 2.; 3. 4.") - m2 = m.copy() - m2 **= 2 - mi = m.copy() - mi **= -1 - m4 = m2.copy() - m4 **= 2 - assert_array_almost_equal(m2, m**2) - assert_array_almost_equal(m4, np.dot(m2, m2)) - assert_array_almost_equal(np.dot(mi, m), np.eye(2)) - - def test_notimplemented(self): - '''Check that 'not implemented' operations produce a failure.''' - A = matrix([[1., 2.], - [3., 4.]]) - - # __rpow__ - try: - 1.0**A - except TypeError: - pass - else: - self.fail("matrix.__rpow__ doesn't raise a TypeError") - - # __mul__ with something not a list, ndarray, tuple, or scalar - try: - A*object() - except TypeError: - pass - else: - self.fail("matrix.__mul__ with non-numeric object doesn't raise" - "a TypeError") - -class TestMatrixReturn(TestCase): - def test_instance_methods(self): - a = matrix([1.0], dtype='f8') - methodargs = { - 'astype' : ('intc',), - 'clip' : (0.0, 1.0), - 'compress' : ([1],), - 'repeat' : (1,), - 'reshape' : (1,), - 'swapaxes' : (0,0), - 'dot': np.array([1.0]), - } - excluded_methods = [ - 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', - 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', - 'searchsorted', 'setflags', 'setfield', 'sort', 'take', - 'tofile', 'tolist', 'tostring', 'all', 'any', 'sum', - 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset', 'setasflat' - ] - for attrib in dir(a): - if attrib.startswith('_') or attrib in excluded_methods: - continue - f = getattr(a, attrib) - if callable(f): - # reset contents of a - a.astype('f8') - a.fill(1.0) - if attrib in methodargs: - args = methodargs[attrib] - else: - args = () - b = f(*args) - assert type(b) is matrix, "%s" % attrib - assert type(a.real) is matrix - assert type(a.imag) is matrix - c,d = matrix([0.0]).nonzero() - assert type(c) is matrix - assert type(d) is matrix - - -class TestIndexing(TestCase): - def test_basic(self): - x = asmatrix(zeros((3,2),float)) - y = zeros((3,1),float) - y[:,0] = [0.8,0.2,0.3] - x[:,1] = y>0.5 - assert_equal(x, [[0,1],[0,0],[0,0]]) - - -class TestNewScalarIndexing(TestCase): - def setUp(self): - self.a = matrix([[1, 2],[3,4]]) - - def test_dimesions(self): - a = self.a - x = a[0] - assert_equal(x.ndim, 2) - - def test_array_from_matrix_list(self): - a = self.a - x = array([a, a]) - assert_equal(x.shape, [2,2,2]) - - def test_array_to_list(self): - a = self.a - assert_equal(a.tolist(),[[1, 2], [3, 4]]) - - def test_fancy_indexing(self): - a = self.a - x = a[1, [0,1,0]] - assert isinstance(x, matrix) - assert_equal(x, matrix([[3, 4, 3]])) - x = a[[1,0]] - assert isinstance(x, matrix) - assert_equal(x, matrix([[3, 4], [1, 2]])) - x = a[[[1],[0]],[[1,0],[0,1]]] - assert isinstance(x, matrix) - assert_equal(x, matrix([[4, 3], [1, 2]])) - - def test_matrix_element(self): - x = matrix([[1,2,3],[4,5,6]]) - assert_equal(x[0][0],matrix([[1,2,3]])) - assert_equal(x[0][0].shape,(1,3)) - assert_equal(x[0].shape,(1,3)) - assert_equal(x[:,0].shape,(2,1)) - - x = matrix(0) - assert_equal(x[0,0],0) - assert_equal(x[0],0) - assert_equal(x[:,0].shape,x.shape) - - def test_scalar_indexing(self): - x = asmatrix(zeros((3,2),float)) - assert_equal(x[0,0],x[0][0]) - - def test_row_column_indexing(self): - x = asmatrix(np.eye(2)) - assert_array_equal(x[0,:],[[1,0]]) - assert_array_equal(x[1,:],[[0,1]]) - assert_array_equal(x[:,0],[[1],[0]]) - assert_array_equal(x[:,1],[[0],[1]]) - - def test_boolean_indexing(self): - A = arange(6) - A.shape = (3,2) - x = asmatrix(A) - assert_array_equal(x[:,array([True,False])],x[:,0]) - assert_array_equal(x[array([True,False,False]),:],x[0,:]) - - def test_list_indexing(self): - A = arange(6) - A.shape = (3,2) - x = asmatrix(A) - assert_array_equal(x[:,[1,0]],x[:,::-1]) - assert_array_equal(x[[2,1,0],:],x[::-1,:]) - -class TestPower(TestCase): - def test_returntype(self): - a = array([[0,1],[0,0]]) - assert type(matrix_power(a, 2)) is ndarray - a = mat(a) - assert type(matrix_power(a, 2)) is matrix - - def test_list(self): - assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/matrixlib/tests/test_multiarray.py b/numpy-1.6.2/numpy/matrixlib/tests/test_multiarray.py deleted file mode 100644 index 9f2dce7e47..0000000000 --- a/numpy-1.6.2/numpy/matrixlib/tests/test_multiarray.py +++ /dev/null @@ -1,16 +0,0 @@ -import numpy as np -from numpy.testing import * - -class TestView(TestCase): - def test_type(self): - x = np.array([1,2,3]) - assert(isinstance(x.view(np.matrix),np.matrix)) - - def test_keywords(self): - x = np.array([(1,2)],dtype=[('a',np.int8),('b',np.int8)]) - # We must be specific about the endianness here: - y = x.view(dtype=' - -#define _libnumarray_MODULE -#include "include/numpy/libnumarray.h" -#include "numpy/npy_3kcompat.h" -#include - -#if (defined(__unix__) || defined(unix)) && !defined(USG) -#include -#endif - -#if defined(__GLIBC__) || defined(__APPLE__) || defined(__MINGW32__) || (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) -#include -#elif defined(__CYGWIN__) -#include "numpy/fenv/fenv.h" -#include "numpy/fenv/fenv.c" -#endif - -static PyObject *pCfuncClass; -static PyTypeObject CfuncType; -static PyObject *pHandleErrorFunc; - -static int -deferred_libnumarray_init(void) -{ -static int initialized=0; - - if (initialized) return 0; - - pCfuncClass = (PyObject *) &CfuncType; - Py_INCREF(pCfuncClass); - - pHandleErrorFunc = - NA_initModuleGlobal("numpy.numarray.util", "handleError"); - - if (!pHandleErrorFunc) goto _fail; - - - /* _exit: */ - initialized = 1; - return 0; - -_fail: - initialized = 0; - return -1; -} - - - -/**********************************************************************/ -/* Buffer Utility Functions */ -/**********************************************************************/ - -static PyObject * -getBuffer( PyObject *obj) -{ - if (!obj) return PyErr_Format(PyExc_RuntimeError, - "NULL object passed to getBuffer()"); - if (((PyObject*)obj)->ob_type->tp_as_buffer == NULL) { - return PyObject_CallMethod(obj, "__buffer__", NULL); - } else { - Py_INCREF(obj); /* Since CallMethod returns a new object when it - succeeds, We'll need to DECREF later to free it. - INCREF ordinary buffers here so we don't have to - remember where the buffer came from at DECREF time. - */ - return obj; - } -} - -/* Either it defines the buffer API, or it is an instance which returns - a buffer when obj.__buffer__() is called */ -static int -isBuffer (PyObject *obj) -{ - PyObject *buf = getBuffer(obj); - int ans = 0; - if (buf) { - ans = buf->ob_type->tp_as_buffer != NULL; - Py_DECREF(buf); - } else { - PyErr_Clear(); - } - return ans; -} - -/**********************************************************************/ - -static int -getWriteBufferDataPtr(PyObject *buffobj, void **buff) -{ -#if defined(NPY_PY3K) - /* FIXME: XXX - needs implementation */ - PyErr_SetString(PyExc_RuntimeError, - "XXX: getWriteBufferDataPtr is not implemented"); - return -1; -#else - int rval = -1; - PyObject *buff2; - if ((buff2 = getBuffer(buffobj))) - { - if (buff2->ob_type->tp_as_buffer->bf_getwritebuffer) - rval = buff2->ob_type->tp_as_buffer->bf_getwritebuffer(buff2, - 0, buff); - Py_DECREF(buff2); - } - return rval; -#endif -} - -/**********************************************************************/ - -static int -isBufferWriteable (PyObject *buffobj) -{ - void *ptr; - int rval = -1; - rval = getWriteBufferDataPtr(buffobj, &ptr); - if (rval == -1) - PyErr_Clear(); /* Since we're just "testing", it's not really an error */ - return rval != -1; -} - -/**********************************************************************/ - -static int -getReadBufferDataPtr(PyObject *buffobj, void **buff) -{ -#if defined(NPY_PY3K) - /* FIXME: XXX - needs implementation */ - PyErr_SetString(PyExc_RuntimeError, - "XXX: getWriteBufferDataPtr is not implemented"); - return -1; -#else - int rval = -1; - PyObject *buff2; - if ((buff2 = getBuffer(buffobj))) { - if (buff2->ob_type->tp_as_buffer->bf_getreadbuffer) - rval = buff2->ob_type->tp_as_buffer->bf_getreadbuffer(buff2, - 0, buff); - Py_DECREF(buff2); - } - return rval; -#endif -} - -/**********************************************************************/ - -static int -getBufferSize(PyObject *buffobj) -{ -#if defined(NPY_PY3K) - /* FIXME: XXX - needs implementation */ - PyErr_SetString(PyExc_RuntimeError, - "XXX: getWriteBufferDataPtr is not implemented"); - return -1; -#else - Py_ssize_t size=0; - PyObject *buff2; - if ((buff2 = getBuffer(buffobj))) - { - (void) buff2->ob_type->tp_as_buffer->bf_getsegcount(buff2, &size); - Py_DECREF(buff2); - } - else - size = -1; - return size; -#endif -} - - -static double numarray_zero = 0.0; - -static double raiseDivByZero(void) -{ - return 1.0/numarray_zero; -} - -static double raiseNegDivByZero(void) -{ - return -1.0/numarray_zero; -} - -static double num_log(double x) -{ - if (x == 0.0) - return raiseNegDivByZero(); - else - return log(x); -} - -static double num_log10(double x) -{ - if (x == 0.0) - return raiseNegDivByZero(); - else - return log10(x); -} - -static double num_pow(double x, double y) -{ - int z = (int) y; - if ((x < 0.0) && (y != z)) - return raiseDivByZero(); - else - return pow(x, y); -} - -/* Inverse hyperbolic trig functions from Numeric */ -static double num_acosh(double x) -{ - return log(x + sqrt((x-1.0)*(x+1.0))); -} - -static double num_asinh(double xx) -{ - double x; - int sign; - if (xx < 0.0) { - sign = -1; - x = -xx; - } - else { - sign = 1; - x = xx; - } - return sign*log(x + sqrt(x*x+1.0)); -} - -static double num_atanh(double x) -{ - return 0.5*log((1.0+x)/(1.0-x)); -} - -/* NUM_CROUND (in numcomplex.h) also calls num_round */ -static double num_round(double x) -{ - return (x >= 0) ? floor(x+0.5) : ceil(x-0.5); -} - - -/* The following routine is used in the event of a detected integer * - ** divide by zero so that a floating divide by zero is generated. * - ** This is done since numarray uses the floating point exception * - ** sticky bits to detect errors. The last bit is an attempt to * - ** prevent optimization of the divide by zero away, the input value * - ** should always be 0 * - */ - -static int int_dividebyzero_error(long NPY_UNUSED(value), long NPY_UNUSED(unused)) { - double dummy; - dummy = 1./numarray_zero; - if (dummy) /* to prevent optimizer from eliminating expression */ - return 0; - else - return 1; -} - -/* Likewise for Integer overflows */ -#if defined(__GLIBC__) || defined(__APPLE__) || defined(__CYGWIN__) || defined(__MINGW32__) || (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) -static int int_overflow_error(Float64 value) { /* For x86_64 */ - feraiseexcept(FE_OVERFLOW); - return (int) value; -} -#else -static int int_overflow_error(Float64 value) { - double dummy; - dummy = pow(1.e10, fabs(value/2)); - if (dummy) /* to prevent optimizer from eliminating expression */ - return (int) value; - else - return 1; -} -#endif - -static int umult64_overflow(UInt64 a, UInt64 b) -{ - UInt64 ah, al, bh, bl, w, x, y, z; - - ah = (a >> 32); - al = (a & 0xFFFFFFFFL); - bh = (b >> 32); - bl = (b & 0xFFFFFFFFL); - - /* 128-bit product: z*2**64 + (x+y)*2**32 + w */ - w = al*bl; - x = bh*al; - y = ah*bl; - z = ah*bh; - - /* *c = ((x + y)<<32) + w; */ - return z || (x>>32) || (y>>32) || - (((x & 0xFFFFFFFFL) + (y & 0xFFFFFFFFL) + (w >> 32)) >> 32); -} - -static int smult64_overflow(Int64 a0, Int64 b0) -{ - UInt64 a, b; - UInt64 ah, al, bh, bl, w, x, y, z; - - /* Convert to non-negative quantities */ - if (a0 < 0) { a = -a0; } else { a = a0; } - if (b0 < 0) { b = -b0; } else { b = b0; } - - ah = (a >> 32); - al = (a & 0xFFFFFFFFL); - bh = (b >> 32); - bl = (b & 0xFFFFFFFFL); - - w = al*bl; - x = bh*al; - y = ah*bl; - z = ah*bh; - - /* - UInt64 c = ((x + y)<<32) + w; - if ((a0 < 0) ^ (b0 < 0)) - *c = -c; - else - *c = c - */ - - return z || (x>>31) || (y>>31) || - (((x & 0xFFFFFFFFL) + (y & 0xFFFFFFFFL) + (w >> 32)) >> 31); -} - - -static void -NA_Done(void) -{ - return; -} - -static PyArrayObject * -NA_NewAll(int ndim, maybelong *shape, NumarrayType type, - void *buffer, maybelong byteoffset, maybelong bytestride, - int byteorder, int aligned, int writeable) -{ - PyArrayObject *result = NA_NewAllFromBuffer( - ndim, shape, type, Py_None, byteoffset, bytestride, - byteorder, aligned, writeable); - - if (result) { - if (!NA_NumArrayCheck((PyObject *) result)) { - PyErr_Format( PyExc_TypeError, - "NA_NewAll: non-NumArray result"); - result = NULL; - } else { - if (buffer) { - memcpy(result->data, buffer, NA_NBYTES(result)); - } else { - memset(result->data, 0, NA_NBYTES(result)); - } - } - } - return result; -} - -static PyArrayObject * -NA_NewAllStrides(int ndim, maybelong *shape, maybelong *strides, - NumarrayType type, void *buffer, maybelong byteoffset, - int byteorder, int aligned, int writeable) -{ - int i; - PyArrayObject *result = NA_NewAll(ndim, shape, type, buffer, - byteoffset, 0, - byteorder, aligned, writeable); - for(i=0; istrides[i] = strides[i]; - return result; -} - - -static PyArrayObject * -NA_New(void *buffer, NumarrayType type, int ndim, ...) -{ - int i; - maybelong shape[MAXDIM]; - va_list ap; - va_start(ap, ndim); - for(i=0; i out.copyFrom(shadow) */ - Py_DECREF(shadow); - Py_INCREF(Py_None); - rval = Py_None; - return rval; - } -} - -static long NA_getBufferPtrAndSize(PyObject *buffobj, int readonly, void **ptr) -{ - long rval; - if (readonly) - rval = getReadBufferDataPtr(buffobj, ptr); - else - rval = getWriteBufferDataPtr(buffobj, ptr); - return rval; -} - - -static int NA_checkIo(char *name, - int wantIn, int wantOut, int gotIn, int gotOut) -{ - if (wantIn != gotIn) { - PyErr_Format(_Error, - "%s: wrong # of input buffers. Expected %d. Got %d.", - name, wantIn, gotIn); - return -1; - } - if (wantOut != gotOut) { - PyErr_Format(_Error, - "%s: wrong # of output buffers. Expected %d. Got %d.", - name, wantOut, gotOut); - return -1; - } - return 0; -} - -static int NA_checkOneCBuffer(char *name, long niter, - void *buffer, long bsize, size_t typesize) -{ - Int64 lniter = niter, ltypesize = typesize; - - if (lniter*ltypesize > bsize) { - PyErr_Format(_Error, - "%s: access out of buffer. niter=%d typesize=%d bsize=%d", - name, (int) niter, (int) typesize, (int) bsize); - return -1; - } - if ((typesize <= sizeof(Float64)) && (((long) buffer) % typesize)) { - PyErr_Format(_Error, - "%s: buffer not aligned on %d byte boundary.", - name, (int) typesize); - return -1; - } - return 0; -} - - -static int NA_checkNCBuffers(char *name, int N, long niter, - void **buffers, long *bsizes, - Int8 *typesizes, Int8 *iters) -{ - int i; - for (i=0; i= 0) { /* Skip dimension == 0. */ - omax = MAX(omax, tmax); - omin = MIN(omin, tmin); - if (align && (ABS(stride[i]) % alignsize)) { - PyErr_Format(_Error, - "%s: stride %d not aligned on %d byte boundary.", - name, (int) stride[i], (int) alignsize); - return -1; - } - if (omax + itemsize > buffersize) { - PyErr_Format(_Error, - "%s: access beyond buffer. offset=%d buffersize=%d", - name, (int) (omax+itemsize-1), (int) buffersize); - return -1; - } - if (omin < 0) { - PyErr_Format(_Error, - "%s: access before buffer. offset=%d buffersize=%d", - name, (int) omin, (int) buffersize); - return -1; - } - } - } - return 0; -} - -/* Function to call standard C Ufuncs - ** - ** The C Ufuncs expect contiguous 1-d data numarray, input and output numarray - ** iterate with standard increments of one data element over all numarray. - ** (There are some exceptions like arrayrangexxx which use one or more of - ** the data numarray as parameter or other sources of information and do not - ** iterate over every buffer). - ** - ** Arguments: - ** - ** Number of iterations (simple integer value). - ** Number of input numarray. - ** Number of output numarray. - ** Tuple of tuples, one tuple per input/output array. Each of these - ** tuples consists of a buffer object and a byte offset to start. - ** - ** Returns None - */ - - -static PyObject * -NA_callCUFuncCore(PyObject *self, - long niter, long ninargs, long noutargs, - PyObject **BufferObj, long *offset) -{ - CfuncObject *me = (CfuncObject *) self; - char *buffers[MAXARGS]; - long bsizes[MAXARGS]; - long i, pnargs = ninargs + noutargs; - UFUNC ufuncptr; - - if (pnargs > MAXARGS) - return PyErr_Format(PyExc_RuntimeError, "NA_callCUFuncCore: too many parameters"); - - if (!PyObject_IsInstance(self, (PyObject *) &CfuncType) - || me->descr.type != CFUNC_UFUNC) - return PyErr_Format(PyExc_TypeError, - "NA_callCUFuncCore: problem with cfunc."); - - for (i=0; idescr.name, (int) offset[i], (int) i); - if ((bsizes[i] = NA_getBufferPtrAndSize(BufferObj[i], readonly, - (void *) &buffers[i])) < 0) - return PyErr_Format(_Error, - "%s: Problem with %s buffer[%d].", - me->descr.name, - readonly ? "read" : "write", (int) i); - buffers[i] += offset[i]; - bsizes[i] -= offset[i]; /* "shorten" buffer size by offset. */ - } - - ufuncptr = (UFUNC) me->descr.fptr; - - /* If it's not a self-checking ufunc, check arg count match, - buffer size, and alignment for all buffers */ - if (!me->descr.chkself && - (NA_checkIo(me->descr.name, - me->descr.wantIn, me->descr.wantOut, ninargs, noutargs) || - NA_checkNCBuffers(me->descr.name, pnargs, - niter, (void **) buffers, bsizes, - me->descr.sizes, me->descr.iters))) - return NULL; - - /* Since the parameters are valid, call the C Ufunc */ - if (!(*ufuncptr)(niter, ninargs, noutargs, (void **)buffers, bsizes)) { - Py_INCREF(Py_None); - return Py_None; - } else { - return NULL; - } -} - -static PyObject * -callCUFunc(PyObject *self, PyObject *args) { - PyObject *DataArgs, *ArgTuple; - long pnargs, ninargs, noutargs, niter, i; - CfuncObject *me = (CfuncObject *) self; - PyObject *BufferObj[MAXARGS]; - long offset[MAXARGS]; - - if (!PyArg_ParseTuple(args, "lllO", - &niter, &ninargs, &noutargs, &DataArgs)) - return PyErr_Format(_Error, - "%s: Problem with argument list", me->descr.name); - - /* check consistency of stated inputs/outputs and supplied buffers */ - pnargs = PyObject_Length(DataArgs); - if ((pnargs != (ninargs+noutargs)) || (pnargs > MAXARGS)) - return PyErr_Format(_Error, - "%s: wrong buffer count for function", me->descr.name); - - /* Unpack buffers and offsets, get data pointers */ - for (i=0; idescr.name); - } - return NA_callCUFuncCore(self, niter, ninargs, noutargs, BufferObj, offset); -} - -static PyObject * -callStrideConvCFunc(PyObject *self, PyObject *args) { - PyObject *inbuffObj, *outbuffObj, *shapeObj; - PyObject *inbstridesObj, *outbstridesObj; - CfuncObject *me = (CfuncObject *) self; - int nshape, ninbstrides, noutbstrides; - maybelong shape[MAXDIM], inbstrides[MAXDIM], - outbstrides[MAXDIM], *outbstrides1 = outbstrides; - long inboffset, outboffset, nbytes=0; - - if (!PyArg_ParseTuple(args, "OOlOOlO|l", - &shapeObj, &inbuffObj, &inboffset, &inbstridesObj, - &outbuffObj, &outboffset, &outbstridesObj, - &nbytes)) { - return PyErr_Format(_Error, - "%s: Problem with argument list", - me->descr.name); - } - - nshape = NA_maybeLongsFromIntTuple(MAXDIM, shape, shapeObj); - if (nshape < 0) return NULL; - - ninbstrides = NA_maybeLongsFromIntTuple(MAXDIM, inbstrides, inbstridesObj); - if (ninbstrides < 0) return NULL; - - noutbstrides= NA_maybeLongsFromIntTuple(MAXDIM, outbstrides, outbstridesObj); - if (noutbstrides < 0) return NULL; - - if (nshape && (nshape != ninbstrides)) { - return PyErr_Format(_Error, - "%s: Missmatch between input iteration and strides tuples", - me->descr.name); - } - - if (nshape && (nshape != noutbstrides)) { - if (noutbstrides < 1 || - outbstrides[ noutbstrides - 1 ])/* allow 0 for reductions. */ - return PyErr_Format(_Error, - "%s: Missmatch between output " - "iteration and strides tuples", - me->descr.name); - } - - return NA_callStrideConvCFuncCore( - self, nshape, shape, - inbuffObj, inboffset, ninbstrides, inbstrides, - outbuffObj, outboffset, noutbstrides, outbstrides1, nbytes); -} - -static int -_NA_callStridingHelper(PyObject *aux, long dim, - long nnumarray, PyArrayObject *numarray[], char *data[], - CFUNC_STRIDED_FUNC f) -{ - int i, j, status=0; - dim -= 1; - for(i=0; idimensions[dim]; i++) { - for (j=0; jstrides[dim]*i; - if (dim == 0) - status |= f(aux, nnumarray, numarray, data); - else - status |= _NA_callStridingHelper( - aux, dim, nnumarray, numarray, data, f); - for (j=0; jstrides[dim]*i; - } - return status; -} - - -static PyObject * -callStridingCFunc(PyObject *self, PyObject *args) { - CfuncObject *me = (CfuncObject *) self; - PyObject *aux; - PyArrayObject *numarray[MAXARRAYS]; - char *data[MAXARRAYS]; - CFUNC_STRIDED_FUNC f; - int i; - - int nnumarray = PySequence_Length(args)-1; - if ((nnumarray < 1) || (nnumarray > MAXARRAYS)) - return PyErr_Format(_Error, "%s, too many or too few numarray.", - me->descr.name); - - aux = PySequence_GetItem(args, 0); - if (!aux) - return NULL; - - for(i=0; idescr.name, i); - if (!NA_NDArrayCheck(otemp)) - return PyErr_Format(PyExc_TypeError, - "%s arg[%d] is not an array.", - me->descr.name, i); - numarray[i] = (PyArrayObject *) otemp; - data[i] = numarray[i]->data; - Py_DECREF(otemp); - if (!NA_updateDataPtr(numarray[i])) - return NULL; - } - - /* Cast function pointer and perform stride operation */ - f = (CFUNC_STRIDED_FUNC) me->descr.fptr; - - if (_NA_callStridingHelper(aux, numarray[0]->nd, - nnumarray, numarray, data, f)) { - return NULL; - } else { - Py_INCREF(Py_None); - return Py_None; - } -} - -/* Convert a standard C numeric value to a Python numeric value. - ** - ** Handles both nonaligned and/or byteswapped C data. - ** - ** Input arguments are: - ** - ** Buffer object that contains the C numeric value. - ** Offset (in bytes) into the buffer that the data is located at. - ** The size of the C numeric data item in bytes. - ** Flag indicating if the C data is byteswapped from the processor's - ** natural representation. - ** - ** Returns a Python numeric value. - */ - -static PyObject * -NumTypeAsPyValue(PyObject *self, PyObject *args) { - PyObject *bufferObj; - long offset, itemsize, byteswap, i, buffersize; - Py_complex temp; /* to hold copies of largest possible type */ - void *buffer; - char *tempptr; - CFUNCasPyValue funcptr; - CfuncObject *me = (CfuncObject *) self; - - if (!PyArg_ParseTuple(args, "Olll", - &bufferObj, &offset, &itemsize, &byteswap)) - return PyErr_Format(_Error, - "NumTypeAsPyValue: Problem with argument list"); - - if ((buffersize = NA_getBufferPtrAndSize(bufferObj, 1, &buffer)) < 0) - return PyErr_Format(_Error, - "NumTypeAsPyValue: Problem with array buffer"); - - if (offset < 0) - return PyErr_Format(_Error, - "NumTypeAsPyValue: invalid negative offset: %d", (int) offset); - - /* Guarantee valid buffer pointer */ - if (offset+itemsize > buffersize) - return PyErr_Format(_Error, - "NumTypeAsPyValue: buffer too small for offset and itemsize."); - - /* Do byteswapping. Guarantee double alignment by using temp. */ - tempptr = (char *) &temp; - if (!byteswap) { - for (i=0; idescr.fptr; - - /* Call function to build PyObject. Bad parameters to this function - may render call meaningless, but "temp" guarantees that its safe. */ - return (*funcptr)((void *)(&temp)); -} - -/* Convert a Python numeric value to a standard C numeric value. - ** - ** Handles both nonaligned and/or byteswapped C data. - ** - ** Input arguments are: - ** - ** The Python numeric value to be converted. - ** Buffer object to contain the C numeric value. - ** Offset (in bytes) into the buffer that the data is to be copied to. - ** The size of the C numeric data item in bytes. - ** Flag indicating if the C data is byteswapped from the processor's - ** natural representation. - ** - ** Returns None - */ - -static PyObject * -NumTypeFromPyValue(PyObject *self, PyObject *args) { - PyObject *bufferObj, *valueObj; - long offset, itemsize, byteswap, i, buffersize; - Py_complex temp; /* to hold copies of largest possible type */ - void *buffer; - char *tempptr; - CFUNCfromPyValue funcptr; - CfuncObject *me = (CfuncObject *) self; - - if (!PyArg_ParseTuple(args, "OOlll", - &valueObj, &bufferObj, &offset, &itemsize, &byteswap)) - return PyErr_Format(_Error, - "%s: Problem with argument list", me->descr.name); - - if ((buffersize = NA_getBufferPtrAndSize(bufferObj, 0, &buffer)) < 0) - return PyErr_Format(_Error, - "%s: Problem with array buffer (read only?)", me->descr.name); - - funcptr = (CFUNCfromPyValue) me->descr.fptr; - - /* Convert python object into "temp". Always safe. */ - if (!((*funcptr)(valueObj, (void *)( &temp)))) - return PyErr_Format(_Error, - "%s: Problem converting value", me->descr.name); - - /* Check buffer offset. */ - if (offset < 0) - return PyErr_Format(_Error, - "%s: invalid negative offset: %d", me->descr.name, (int) offset); - - if (offset+itemsize > buffersize) - return PyErr_Format(_Error, - "%s: buffer too small(%d) for offset(%d) and itemsize(%d)", - me->descr.name, (int) buffersize, (int) offset, (int) itemsize); - - /* Copy "temp" to array buffer. */ - tempptr = (char *) &temp; - if (!byteswap) { - for (i=0; idescr.type) { - case CFUNC_UFUNC: - return callCUFunc(self, argsTuple); - break; - case CFUNC_STRIDING: - return callStrideConvCFunc(self, argsTuple); - break; - case CFUNC_NSTRIDING: - return callStridingCFunc(self, argsTuple); - case CFUNC_FROM_PY_VALUE: - return NumTypeFromPyValue(self, argsTuple); - break; - case CFUNC_AS_PY_VALUE: - return NumTypeAsPyValue(self, argsTuple); - break; - default: - return PyErr_Format( _Error, - "cfunc_call: Can't dispatch cfunc '%s' with type: %d.", - me->descr.name, me->descr.type); - } -} - -static PyTypeObject CfuncType; - -static void -cfunc_dealloc(PyObject* self) -{ - PyObject_Del(self); -} - -static PyObject * -cfunc_repr(PyObject *self) -{ - char buf[256]; - CfuncObject *me = (CfuncObject *) self; - sprintf(buf, "", - me->descr.name, (unsigned long ) me->descr.fptr, - me->descr.chkself, me->descr.align, - me->descr.wantIn, me->descr.wantOut); - return PyUString_FromString(buf); -} - -static PyTypeObject CfuncType = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(0,0) -#else - PyObject_HEAD_INIT(0) - 0, /* ob_size */ -#endif - "Cfunc", - sizeof(CfuncObject), - 0, - cfunc_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - cfunc_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - cfunc_call, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif - }; - -/* CfuncObjects are created at the c-level only. They ensure that each - cfunc is called via the correct python-c-wrapper as defined by its - CfuncDescriptor. The wrapper, in turn, does conversions and buffer size - and alignment checking. Allowing these to be created at the python level - would enable them to be created *wrong* at the python level, and thereby - enable python code to *crash* python. - */ -static PyObject* -NA_new_cfunc(CfuncDescriptor *cfd) -{ - CfuncObject* cfunc; - - /* Should be done once at init. - Do now since there is no init. */ - ((PyObject*)&CfuncType)->ob_type = &PyType_Type; - - cfunc = PyObject_New(CfuncObject, &CfuncType); - - if (!cfunc) { - return PyErr_Format(_Error, - "NA_new_cfunc: failed creating '%s'", - cfd->name); - } - - cfunc->descr = *cfd; - - return (PyObject*)cfunc; -} - -static int NA_add_cfunc(PyObject *dict, char *keystr, CfuncDescriptor *descr) -{ - PyObject *c = (PyObject *) NA_new_cfunc(descr); - if (!c) return -1; - return PyDict_SetItemString(dict, keystr, c); -} - -static PyArrayObject* -NA_InputArray(PyObject *a, NumarrayType t, int requires) -{ - PyArray_Descr *descr; - if (t == tAny) descr = NULL; - else descr = PyArray_DescrFromType(t); - return (PyArrayObject *) \ - PyArray_CheckFromAny(a, descr, 0, 0, requires, NULL); -} - -/* satisfies ensures that 'a' meets a set of requirements and matches - the specified type. - */ -static int -satisfies(PyArrayObject *a, int requirements, NumarrayType t) -{ - int type_ok = (a->descr->type_num == t) || (t == tAny); - - if (PyArray_ISCARRAY(a)) - return type_ok; - if (PyArray_ISBYTESWAPPED(a) && (requirements & NUM_NOTSWAPPED)) - return 0; - if (!PyArray_ISALIGNED(a) && (requirements & NUM_ALIGNED)) - return 0; - if (!PyArray_ISCONTIGUOUS(a) && (requirements & NUM_CONTIGUOUS)) - return 0; - if (!PyArray_ISWRITABLE(a) && (requirements & NUM_WRITABLE)) - return 0; - if (requirements & NUM_COPY) - return 0; - return type_ok; -} - - -static PyArrayObject * -NA_OutputArray(PyObject *a, NumarrayType t, int requires) -{ - PyArray_Descr *dtype; - PyArrayObject *ret; - - if (!PyArray_Check(a) || !PyArray_ISWRITEABLE(a)) { - PyErr_Format(PyExc_TypeError, - "NA_OutputArray: only writeable arrays work for output."); - return NULL; - } - - if (satisfies((PyArrayObject *)a, requires, t)) { - Py_INCREF(a); - return (PyArrayObject *)a; - } - if (t == tAny) { - dtype = PyArray_DESCR(a); - Py_INCREF(dtype); - } - else { - dtype = PyArray_DescrFromType(t); - } - ret = (PyArrayObject *)PyArray_Empty(PyArray_NDIM(a), PyArray_DIMS(a), - dtype, 0); - ret->flags |= NPY_UPDATEIFCOPY; - ret->base = a; - PyArray_FLAGS(a) &= ~NPY_WRITEABLE; - Py_INCREF(a); - return ret; -} - - -/* NA_IoArray is a combination of NA_InputArray and NA_OutputArray. - - Unlike NA_OutputArray, if a temporary is required it is initialized to a copy - of the input array. - - Unlike NA_InputArray, deallocating any resulting temporary array results in a - copy from the temporary back to the original. - */ -static PyArrayObject * -NA_IoArray(PyObject *a, NumarrayType t, int requires) -{ - PyArrayObject *shadow = NA_InputArray(a, t, requires | NPY_UPDATEIFCOPY ); - - if (!shadow) return NULL; - - /* Guard against non-writable, but otherwise satisfying requires. - In this case, shadow == a. - */ - if (!PyArray_ISWRITABLE(shadow)) { - PyErr_Format(PyExc_TypeError, - "NA_IoArray: I/O array must be writable array"); - PyArray_XDECREF_ERR(shadow); - return NULL; - } - - return shadow; -} - -/* NA_OptionalOutputArray works like NA_OutputArray, but handles the case - where the output array 'optional' is omitted entirely at the python level, - resulting in 'optional'==Py_None. When 'optional' is Py_None, the return - value is cloned (but with NumarrayType 't') from 'master', typically an input - array with the same shape as the output array. - */ -static PyArrayObject * -NA_OptionalOutputArray(PyObject *optional, NumarrayType t, int requires, - PyArrayObject *master) -{ - if ((optional == Py_None) || (optional == NULL)) { - PyObject *rval; - PyArray_Descr *descr; - if (t == tAny) descr=NULL; - else descr = PyArray_DescrFromType(t); - rval = PyArray_FromArray( - master, descr, NUM_C_ARRAY | NUM_COPY | NUM_WRITABLE); - return (PyArrayObject *)rval; - } else { - return NA_OutputArray(optional, t, requires); - } -} - -Complex64 NA_get_Complex64(PyArrayObject *a, long offset) -{ - Complex32 v0; - Complex64 v; - - switch(a->descr->type_num) { - case tComplex32: - v0 = NA_GETP(a, Complex32, (NA_PTR(a)+offset)); - v.r = v0.r; - v.i = v0.i; - break; - case tComplex64: - v = NA_GETP(a, Complex64, (NA_PTR(a)+offset)); - break; - default: - v.r = NA_get_Float64(a, offset); - v.i = 0; - break; - } - return v; -} - -void NA_set_Complex64(PyArrayObject *a, long offset, Complex64 v) -{ - Complex32 v0; - - switch(a->descr->type_num) { - case tComplex32: - v0.r = v.r; - v0.i = v.i; - NA_SETP(a, Complex32, (NA_PTR(a)+offset), v0); - break; - case tComplex64: - NA_SETP(a, Complex64, (NA_PTR(a)+offset), v); - break; - default: - NA_set_Float64(a, offset, v.r); - break; - } -} - -Int64 NA_get_Int64(PyArrayObject *a, long offset) -{ - switch(a->descr->type_num) { - case tBool: - return NA_GETP(a, Bool, (NA_PTR(a)+offset)) != 0; - case tInt8: - return NA_GETP(a, Int8, (NA_PTR(a)+offset)); - case tUInt8: - return NA_GETP(a, UInt8, (NA_PTR(a)+offset)); - case tInt16: - return NA_GETP(a, Int16, (NA_PTR(a)+offset)); - case tUInt16: - return NA_GETP(a, UInt16, (NA_PTR(a)+offset)); - case tInt32: - return NA_GETP(a, Int32, (NA_PTR(a)+offset)); - case tUInt32: - return NA_GETP(a, UInt32, (NA_PTR(a)+offset)); - case tInt64: - return NA_GETP(a, Int64, (NA_PTR(a)+offset)); - case tUInt64: - return NA_GETP(a, UInt64, (NA_PTR(a)+offset)); - case tFloat32: - return NA_GETP(a, Float32, (NA_PTR(a)+offset)); - case tFloat64: - return NA_GETP(a, Float64, (NA_PTR(a)+offset)); - case tComplex32: - return NA_GETP(a, Float32, (NA_PTR(a)+offset)); - case tComplex64: - return NA_GETP(a, Float64, (NA_PTR(a)+offset)); - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_get_Int64", - a->descr->type_num); - PyErr_Print(); - } - return 0; /* suppress warning */ -} - -void NA_set_Int64(PyArrayObject *a, long offset, Int64 v) -{ - Bool b; - - switch(a->descr->type_num) { - case tBool: - b = (v != 0); - NA_SETP(a, Bool, (NA_PTR(a)+offset), b); - break; - case tInt8: NA_SETP(a, Int8, (NA_PTR(a)+offset), v); - break; - case tUInt8: NA_SETP(a, UInt8, (NA_PTR(a)+offset), v); - break; - case tInt16: NA_SETP(a, Int16, (NA_PTR(a)+offset), v); - break; - case tUInt16: NA_SETP(a, UInt16, (NA_PTR(a)+offset), v); - break; - case tInt32: NA_SETP(a, Int32, (NA_PTR(a)+offset), v); - break; - case tUInt32: NA_SETP(a, UInt32, (NA_PTR(a)+offset), v); - break; - case tInt64: NA_SETP(a, Int64, (NA_PTR(a)+offset), v); - break; - case tUInt64: NA_SETP(a, UInt64, (NA_PTR(a)+offset), v); - break; - case tFloat32: - NA_SETP(a, Float32, (NA_PTR(a)+offset), v); - break; - case tFloat64: - NA_SETP(a, Float64, (NA_PTR(a)+offset), v); - break; - case tComplex32: - NA_SETP(a, Float32, (NA_PTR(a)+offset), v); - NA_SETP(a, Float32, (NA_PTR(a)+offset+sizeof(Float32)), 0); - break; - case tComplex64: - NA_SETP(a, Float64, (NA_PTR(a)+offset), v); - NA_SETP(a, Float64, (NA_PTR(a)+offset+sizeof(Float64)), 0); - break; - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_set_Int64", - a->descr->type_num); - PyErr_Print(); - } -} - -/* NA_get_offset computes the offset specified by the set of indices. - If N > 0, the indices are taken from the outer dimensions of the array. - If N < 0, the indices are taken from the inner dimensions of the array. - If N == 0, the offset is 0. - */ -long NA_get_offset(PyArrayObject *a, int N, ...) -{ - int i; - long offset = 0; - va_list ap; - va_start(ap, N); - if (N > 0) { /* compute offset of "outer" indices. */ - for(i=0; istrides[i]; - } else { /* compute offset of "inner" indices. */ - N = -N; - for(i=0; istrides[a->nd-N+i]; - } - va_end(ap); - return offset; -} - -Float64 NA_get_Float64(PyArrayObject *a, long offset) -{ - switch(a->descr->type_num) { - case tBool: - return NA_GETP(a, Bool, (NA_PTR(a)+offset)) != 0; - case tInt8: - return NA_GETP(a, Int8, (NA_PTR(a)+offset)); - case tUInt8: - return NA_GETP(a, UInt8, (NA_PTR(a)+offset)); - case tInt16: - return NA_GETP(a, Int16, (NA_PTR(a)+offset)); - case tUInt16: - return NA_GETP(a, UInt16, (NA_PTR(a)+offset)); - case tInt32: - return NA_GETP(a, Int32, (NA_PTR(a)+offset)); - case tUInt32: - return NA_GETP(a, UInt32, (NA_PTR(a)+offset)); - case tInt64: - return NA_GETP(a, Int64, (NA_PTR(a)+offset)); -#if HAS_UINT64 - case tUInt64: - return NA_GETP(a, UInt64, (NA_PTR(a)+offset)); -#endif - case tFloat32: - return NA_GETP(a, Float32, (NA_PTR(a)+offset)); - case tFloat64: - return NA_GETP(a, Float64, (NA_PTR(a)+offset)); - case tComplex32: /* Since real value is first */ - return NA_GETP(a, Float32, (NA_PTR(a)+offset)); - case tComplex64: /* Since real value is first */ - return NA_GETP(a, Float64, (NA_PTR(a)+offset)); - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_get_Float64", - a->descr->type_num); - } - return 0; /* suppress warning */ -} - -void NA_set_Float64(PyArrayObject *a, long offset, Float64 v) -{ - Bool b; - - switch(a->descr->type_num) { - case tBool: - b = (v != 0); - NA_SETP(a, Bool, (NA_PTR(a)+offset), b); - break; - case tInt8: NA_SETP(a, Int8, (NA_PTR(a)+offset), v); - break; - case tUInt8: NA_SETP(a, UInt8, (NA_PTR(a)+offset), v); - break; - case tInt16: NA_SETP(a, Int16, (NA_PTR(a)+offset), v); - break; - case tUInt16: NA_SETP(a, UInt16, (NA_PTR(a)+offset), v); - break; - case tInt32: NA_SETP(a, Int32, (NA_PTR(a)+offset), v); - break; - case tUInt32: NA_SETP(a, UInt32, (NA_PTR(a)+offset), v); - break; - case tInt64: NA_SETP(a, Int64, (NA_PTR(a)+offset), v); - break; -#if HAS_UINT64 - case tUInt64: NA_SETP(a, UInt64, (NA_PTR(a)+offset), v); - break; -#endif - case tFloat32: - NA_SETP(a, Float32, (NA_PTR(a)+offset), v); - break; - case tFloat64: - NA_SETP(a, Float64, (NA_PTR(a)+offset), v); - break; - case tComplex32: { - NA_SETP(a, Float32, (NA_PTR(a)+offset), v); - NA_SETP(a, Float32, (NA_PTR(a)+offset+sizeof(Float32)), 0); - break; - } - case tComplex64: { - NA_SETP(a, Float64, (NA_PTR(a)+offset), v); - NA_SETP(a, Float64, (NA_PTR(a)+offset+sizeof(Float64)), 0); - break; - } - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_set_Float64", - a->descr->type_num ); - PyErr_Print(); - } -} - - -Float64 NA_get1_Float64(PyArrayObject *a, long i) -{ - long offset = i * a->strides[0]; - return NA_get_Float64(a, offset); -} - -Float64 NA_get2_Float64(PyArrayObject *a, long i, long j) -{ - long offset = i * a->strides[0] - + j * a->strides[1]; - return NA_get_Float64(a, offset); -} - -Float64 NA_get3_Float64(PyArrayObject *a, long i, long j, long k) -{ - long offset = i * a->strides[0] - + j * a->strides[1] - + k * a->strides[2]; - return NA_get_Float64(a, offset); -} - -void NA_set1_Float64(PyArrayObject *a, long i, Float64 v) -{ - long offset = i * a->strides[0]; - NA_set_Float64(a, offset, v); -} - -void NA_set2_Float64(PyArrayObject *a, long i, long j, Float64 v) -{ - long offset = i * a->strides[0] - + j * a->strides[1]; - NA_set_Float64(a, offset, v); -} - -void NA_set3_Float64(PyArrayObject *a, long i, long j, long k, Float64 v) -{ - long offset = i * a->strides[0] - + j * a->strides[1] - + k * a->strides[2]; - NA_set_Float64(a, offset, v); -} - -Complex64 NA_get1_Complex64(PyArrayObject *a, long i) -{ - long offset = i * a->strides[0]; - return NA_get_Complex64(a, offset); -} - -Complex64 NA_get2_Complex64(PyArrayObject *a, long i, long j) -{ - long offset = i * a->strides[0] - + j * a->strides[1]; - return NA_get_Complex64(a, offset); -} - -Complex64 NA_get3_Complex64(PyArrayObject *a, long i, long j, long k) -{ - long offset = i * a->strides[0] - + j * a->strides[1] - + k * a->strides[2]; - return NA_get_Complex64(a, offset); -} - -void NA_set1_Complex64(PyArrayObject *a, long i, Complex64 v) -{ - long offset = i * a->strides[0]; - NA_set_Complex64(a, offset, v); -} - -void NA_set2_Complex64(PyArrayObject *a, long i, long j, Complex64 v) -{ - long offset = i * a->strides[0] - + j * a->strides[1]; - NA_set_Complex64(a, offset, v); -} - -void NA_set3_Complex64(PyArrayObject *a, long i, long j, long k, Complex64 v) -{ - long offset = i * a->strides[0] - + j * a->strides[1] - + k * a->strides[2]; - NA_set_Complex64(a, offset, v); -} - -Int64 NA_get1_Int64(PyArrayObject *a, long i) -{ - long offset = i * a->strides[0]; - return NA_get_Int64(a, offset); -} - -Int64 NA_get2_Int64(PyArrayObject *a, long i, long j) -{ - long offset = i * a->strides[0] - + j * a->strides[1]; - return NA_get_Int64(a, offset); -} - -Int64 NA_get3_Int64(PyArrayObject *a, long i, long j, long k) -{ - long offset = i * a->strides[0] - + j * a->strides[1] - + k * a->strides[2]; - return NA_get_Int64(a, offset); -} - -void NA_set1_Int64(PyArrayObject *a, long i, Int64 v) -{ - long offset = i * a->strides[0]; - NA_set_Int64(a, offset, v); -} - -void NA_set2_Int64(PyArrayObject *a, long i, long j, Int64 v) -{ - long offset = i * a->strides[0] - + j * a->strides[1]; - NA_set_Int64(a, offset, v); -} - -void NA_set3_Int64(PyArrayObject *a, long i, long j, long k, Int64 v) -{ - long offset = i * a->strides[0] - + j * a->strides[1] - + k * a->strides[2]; - NA_set_Int64(a, offset, v); -} - -/* SET_CMPLX could be made faster by factoring it into 3 seperate loops. -*/ -#define NA_SET_CMPLX(a, type, base, cnt, in) \ -{ \ - int i; \ - int stride = a->strides[ a->nd - 1]; \ - NA_SET1D(a, type, base, cnt, in); \ - base = NA_PTR(a) + offset + sizeof(type); \ - for(i=0; idescr->type_num) { - case tBool: - NA_GET1D(a, Bool, base, cnt, out); - break; - case tInt8: - NA_GET1D(a, Int8, base, cnt, out); - break; - case tUInt8: - NA_GET1D(a, UInt8, base, cnt, out); - break; - case tInt16: - NA_GET1D(a, Int16, base, cnt, out); - break; - case tUInt16: - NA_GET1D(a, UInt16, base, cnt, out); - break; - case tInt32: - NA_GET1D(a, Int32, base, cnt, out); - break; - case tUInt32: - NA_GET1D(a, UInt32, base, cnt, out); - break; - case tInt64: - NA_GET1D(a, Int64, base, cnt, out); - break; -#if HAS_UINT64 - case tUInt64: - NA_GET1D(a, UInt64, base, cnt, out); - break; -#endif - case tFloat32: - NA_GET1D(a, Float32, base, cnt, out); - break; - case tFloat64: - NA_GET1D(a, Float64, base, cnt, out); - break; - case tComplex32: - NA_GET1D(a, Float32, base, cnt, out); - break; - case tComplex64: - NA_GET1D(a, Float64, base, cnt, out); - break; - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_get1D_Float64", - a->descr->type_num); - PyErr_Print(); - return -1; - } - return 0; -} - -static Float64 * -NA_alloc1D_Float64(PyArrayObject *a, long offset, int cnt) -{ - Float64 *result = PyMem_New(Float64, (size_t)cnt); - if (!result) return NULL; - if (NA_get1D_Float64(a, offset, cnt, result) < 0) { - PyMem_Free(result); - return NULL; - } - return result; -} - -static int -NA_set1D_Float64(PyArrayObject *a, long offset, int cnt, Float64*in) -{ - char *base = NA_PTR(a) + offset; - - switch(a->descr->type_num) { - case tBool: - NA_SET1D(a, Bool, base, cnt, in); - break; - case tInt8: - NA_SET1D(a, Int8, base, cnt, in); - break; - case tUInt8: - NA_SET1D(a, UInt8, base, cnt, in); - break; - case tInt16: - NA_SET1D(a, Int16, base, cnt, in); - break; - case tUInt16: - NA_SET1D(a, UInt16, base, cnt, in); - break; - case tInt32: - NA_SET1D(a, Int32, base, cnt, in); - break; - case tUInt32: - NA_SET1D(a, UInt32, base, cnt, in); - break; - case tInt64: - NA_SET1D(a, Int64, base, cnt, in); - break; -#if HAS_UINT64 - case tUInt64: - NA_SET1D(a, UInt64, base, cnt, in); - break; -#endif - case tFloat32: - NA_SET1D(a, Float32, base, cnt, in); - break; - case tFloat64: - NA_SET1D(a, Float64, base, cnt, in); - break; - case tComplex32: - NA_SET_CMPLX(a, Float32, base, cnt, in); - break; - case tComplex64: - NA_SET_CMPLX(a, Float64, base, cnt, in); - break; - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_set1D_Float64", - a->descr->type_num); - PyErr_Print(); - return -1; - } - return 0; -} - -static int -NA_get1D_Int64(PyArrayObject *a, long offset, int cnt, Int64*out) -{ - char *base = NA_PTR(a) + offset; - - switch(a->descr->type_num) { - case tBool: - NA_GET1D(a, Bool, base, cnt, out); - break; - case tInt8: - NA_GET1D(a, Int8, base, cnt, out); - break; - case tUInt8: - NA_GET1D(a, UInt8, base, cnt, out); - break; - case tInt16: - NA_GET1D(a, Int16, base, cnt, out); - break; - case tUInt16: - NA_GET1D(a, UInt16, base, cnt, out); - break; - case tInt32: - NA_GET1D(a, Int32, base, cnt, out); - break; - case tUInt32: - NA_GET1D(a, UInt32, base, cnt, out); - break; - case tInt64: - NA_GET1D(a, Int64, base, cnt, out); - break; - case tUInt64: - NA_GET1D(a, UInt64, base, cnt, out); - break; - case tFloat32: - NA_GET1D(a, Float32, base, cnt, out); - break; - case tFloat64: - NA_GET1D(a, Float64, base, cnt, out); - break; - case tComplex32: - NA_GET1D(a, Float32, base, cnt, out); - break; - case tComplex64: - NA_GET1D(a, Float64, base, cnt, out); - break; - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_get1D_Int64", - a->descr->type_num); - PyErr_Print(); - return -1; - } - return 0; -} - -static Int64 * -NA_alloc1D_Int64(PyArrayObject *a, long offset, int cnt) -{ - Int64 *result = PyMem_New(Int64, (size_t)cnt); - if (!result) return NULL; - if (NA_get1D_Int64(a, offset, cnt, result) < 0) { - PyMem_Free(result); - return NULL; - } - return result; -} - -static int -NA_set1D_Int64(PyArrayObject *a, long offset, int cnt, Int64*in) -{ - char *base = NA_PTR(a) + offset; - - switch(a->descr->type_num) { - case tBool: - NA_SET1D(a, Bool, base, cnt, in); - break; - case tInt8: - NA_SET1D(a, Int8, base, cnt, in); - break; - case tUInt8: - NA_SET1D(a, UInt8, base, cnt, in); - break; - case tInt16: - NA_SET1D(a, Int16, base, cnt, in); - break; - case tUInt16: - NA_SET1D(a, UInt16, base, cnt, in); - break; - case tInt32: - NA_SET1D(a, Int32, base, cnt, in); - break; - case tUInt32: - NA_SET1D(a, UInt32, base, cnt, in); - break; - case tInt64: - NA_SET1D(a, Int64, base, cnt, in); - break; - case tUInt64: - NA_SET1D(a, UInt64, base, cnt, in); - break; - case tFloat32: - NA_SET1D(a, Float32, base, cnt, in); - break; - case tFloat64: - NA_SET1D(a, Float64, base, cnt, in); - break; - case tComplex32: - NA_SET_CMPLX(a, Float32, base, cnt, in); - break; - case tComplex64: - NA_SET_CMPLX(a, Float64, base, cnt, in); - break; - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_set1D_Int64", - a->descr->type_num); - PyErr_Print(); - return -1; - } - return 0; -} - -static int -NA_get1D_Complex64(PyArrayObject *a, long offset, int cnt, Complex64*out) -{ - char *base = NA_PTR(a) + offset; - - switch(a->descr->type_num) { - case tComplex64: - NA_GET1D(a, Complex64, base, cnt, out); - break; - default: - PyErr_Format( PyExc_TypeError, - "Unsupported type %d in NA_get1D_Complex64", - a->descr->type_num); - PyErr_Print(); - return -1; - } - return 0; -} - -static int -NA_set1D_Complex64(PyArrayObject *a, long offset, int cnt, Complex64*in) -{ - char *base = NA_PTR(a) + offset; - - switch(a->descr->type_num) { - case tComplex64: - NA_SET1D(a, Complex64, base, cnt, in); - break; - default: - PyErr_Format( PyExc_TypeError, - "Unsupported type %d in NA_set1D_Complex64", - a->descr->type_num); - PyErr_Print(); - return -1; - } - return 0; -} - - -/* NA_ShapeEqual returns 1 if 'a' and 'b' have the same shape, 0 otherwise. -*/ -static int -NA_ShapeEqual(PyArrayObject *a, PyArrayObject *b) -{ - int i; - - if (!NA_NDArrayCheck((PyObject *) a) || - !NA_NDArrayCheck((PyObject*) b)) { - PyErr_Format( - PyExc_TypeError, - "NA_ShapeEqual: non-array as parameter."); - return -1; - } - if (a->nd != b->nd) - return 0; - for(i=0; ind; i++) - if (a->dimensions[i] != b->dimensions[i]) - return 0; - return 1; -} - -/* NA_ShapeLessThan returns 1 if a.shape[i] < b.shape[i] for all i, else 0. - If they have a different number of dimensions, it compares the innermost - overlapping dimensions of each. - */ -static int -NA_ShapeLessThan(PyArrayObject *a, PyArrayObject *b) -{ - int i; - int mindim, aoff, boff; - if (!NA_NDArrayCheck((PyObject *) a) || - !NA_NDArrayCheck((PyObject *) b)) { - PyErr_Format(PyExc_TypeError, - "NA_ShapeLessThan: non-array as parameter."); - return -1; - } - mindim = MIN(a->nd, b->nd); - aoff = a->nd - mindim; - boff = b->nd - mindim; - for(i=0; idimensions[i+aoff] >= b->dimensions[i+boff]) - return 0; - return 1; -} - -static int -NA_ByteOrder(void) -{ - unsigned long byteorder_test; - byteorder_test = 1; - if (*((char *) &byteorder_test)) - return NUM_LITTLE_ENDIAN; - else - return NUM_BIG_ENDIAN; -} - -static Bool -NA_IeeeSpecial32( Float32 *f, Int32 *mask) -{ - return NA_IeeeMask32(*f, *mask); -} - -static Bool -NA_IeeeSpecial64( Float64 *f, Int32 *mask) -{ - return NA_IeeeMask64(*f, *mask); -} - -static PyArrayObject * -NA_updateDataPtr(PyArrayObject *me) -{ - return me; -} - - -#define ELEM(x) (sizeof(x)/sizeof(x[0])) - -typedef struct -{ - char *name; - int typeno; -} NumarrayTypeNameMapping; - -static NumarrayTypeNameMapping NumarrayTypeNameMap[] = { - {"Any", tAny}, - {"Bool", tBool}, - {"Int8", tInt8}, - {"UInt8", tUInt8}, - {"Int16", tInt16}, - {"UInt16", tUInt16}, - {"Int32", tInt32}, - {"UInt32", tUInt32}, - {"Int64", tInt64}, - {"UInt64", tUInt64}, - {"Float32", tFloat32}, - {"Float64", tFloat64}, - {"Complex32", tComplex32}, - {"Complex64", tComplex64}, - {"Object", tObject}, - {"Long", tLong}, -}; - - -/* Convert NumarrayType 'typeno' into the string of the type's name. */ -static char * -NA_typeNoToName(int typeno) -{ - size_t i; - PyObject *typeObj; - int typeno2; - - for(i=0; ind == 0))) - return dims; - slen = PySequence_Length(a); - if (slen < 0) { - PyErr_Format(_Error, - "getShape: couldn't get sequence length."); - return -1; - } - if (!slen) { - *shape = 0; - return dims+1; - } else if (dims < MAXDIM) { - PyObject *item0 = PySequence_GetItem(a, 0); - if (item0) { - *shape = PySequence_Length(a); - dims = getShape(item0, ++shape, dims+1); - Py_DECREF(item0); - } else { - PyErr_Format(_Error, - "getShape: couldn't get sequence item."); - return -1; - } - } else { - PyErr_Format(_Error, - "getShape: sequence object nested more than MAXDIM deep."); - return -1; - } - return dims; -} - - - -typedef enum { - NOTHING, - NUMBER, - SEQUENCE -} SequenceConstraint; - -static int -setArrayFromSequence(PyArrayObject *a, PyObject *s, int dim, long offset) -{ - SequenceConstraint mustbe = NOTHING; - int i, seqlen=-1, slen = PySequence_Length(s); - - if (dim > a->nd) { - PyErr_Format(PyExc_ValueError, - "setArrayFromSequence: sequence/array dimensions mismatch."); - return -1; - } - - if (slen != a->dimensions[dim]) { - PyErr_Format(PyExc_ValueError, - "setArrayFromSequence: sequence/array shape mismatch."); - return -1; - } - - for(i=0; ind == 0)) && - ((mustbe == NOTHING) || (mustbe == NUMBER))) { - if (NA_setFromPythonScalar(a, offset, o) < 0) - return -2; - mustbe = NUMBER; - } else if (PyBytes_Check(o)) { - PyErr_SetString( PyExc_ValueError, - "setArrayFromSequence: strings can't define numeric numarray."); - return -3; - } else if (PySequence_Check(o)) { - - if ((mustbe == NOTHING) || (mustbe == SEQUENCE)) { - if (mustbe == NOTHING) { - mustbe = SEQUENCE; - seqlen = PySequence_Length(o); - } else if (PySequence_Length(o) != seqlen) { - PyErr_SetString( - PyExc_ValueError, - "Nested sequences with different lengths."); - return -5; - } - setArrayFromSequence(a, o, dim+1, offset); - } else { - PyErr_SetString(PyExc_ValueError, - "Nested sequences with different lengths."); - return -4; - } - } else { - PyErr_SetString(PyExc_ValueError, "Invalid sequence."); - return -6; - } - Py_DECREF(o); - offset += a->strides[dim]; - } - return 0; -} - -static PyObject * -NA_setArrayFromSequence(PyArrayObject *a, PyObject *s) -{ - maybelong shape[MAXDIM]; - - if (!PySequence_Check(s)) - return PyErr_Format( PyExc_TypeError, - "NA_setArrayFromSequence: (array, seq) expected."); - - if (getShape(s, shape, 0) < 0) - return NULL; - - if (!NA_updateDataPtr(a)) - return NULL; - - if (setArrayFromSequence(a, s, 0, 0) < 0) - return NULL; - - Py_INCREF(Py_None); - return Py_None; -} - -enum { - BOOL_SCALAR, - INT_SCALAR, - LONG_SCALAR, - FLOAT_SCALAR, - COMPLEX_SCALAR -}; - - -static int -_NA_maxType(PyObject *seq, int limit) -{ - if (limit > MAXDIM) { - PyErr_Format( PyExc_ValueError, - "NA_maxType: sequence nested too deep." ); - return -1; - } - if (NA_NumArrayCheck(seq)) { - switch(PyArray(seq)->descr->type_num) { - case tBool: - return BOOL_SCALAR; - case tInt8: - case tUInt8: - case tInt16: - case tUInt16: - case tInt32: - case tUInt32: - return INT_SCALAR; - case tInt64: - case tUInt64: - return LONG_SCALAR; - case tFloat32: - case tFloat64: - return FLOAT_SCALAR; - case tComplex32: - case tComplex64: - return COMPLEX_SCALAR; - default: - PyErr_Format(PyExc_TypeError, - "Expecting a python numeric type, got something else."); - return -1; - } - } else if (PySequence_Check(seq) && !PyBytes_Check(seq)) { - long i, maxtype=BOOL_SCALAR, slen; - - slen = PySequence_Length(seq); - if (slen < 0) return -1; - - if (slen == 0) return INT_SCALAR; - - for(i=0; i maxtype) { - maxtype = newmax; - } - Py_DECREF(o); - } - return maxtype; - } else { -#if PY_VERSION_HEX >= 0x02030000 - if (PyBool_Check(seq)) - return BOOL_SCALAR; - else -#endif -#if defined(NPY_PY3K) - if (PyInt_Check(seq)) - return INT_SCALAR; - else if (PyLong_Check(seq)) -#else - if (PyLong_Check(seq)) -#endif - return LONG_SCALAR; - else if (PyFloat_Check(seq)) - return FLOAT_SCALAR; - else if (PyComplex_Check(seq)) - return COMPLEX_SCALAR; - else { - PyErr_Format(PyExc_TypeError, - "Expecting a python numeric type, got something else."); - return -1; - } - } -} - -static int -NA_maxType(PyObject *seq) -{ - int rval; - rval = _NA_maxType(seq, 0); - return rval; -} - -static int -NA_isPythonScalar(PyObject *o) -{ - int rval; - rval = PyInt_Check(o) || - PyLong_Check(o) || - PyFloat_Check(o) || - PyComplex_Check(o) || - (PyBytes_Check(o) && (PyBytes_Size(o) == 1)); - return rval; -} - -#if (NPY_SIZEOF_INTP == 8) -#define PlatBigInt PyInt_FromLong -#define PlatBigUInt PyLong_FromUnsignedLong -#else -#define PlatBigInt PyLong_FromLongLong -#define PlatBigUInt PyLong_FromUnsignedLongLong -#endif - - -static PyObject * -NA_getPythonScalar(PyArrayObject *a, long offset) -{ - int type = a->descr->type_num; - PyObject *rval = NULL; - - switch(type) { - case tBool: - case tInt8: - case tUInt8: - case tInt16: - case tUInt16: - case tInt32: { - Int64 v = NA_get_Int64(a, offset); - rval = PyInt_FromLong(v); - break; - } - case tUInt32: { - Int64 v = NA_get_Int64(a, offset); - rval = PlatBigUInt(v); - break; - } - case tInt64: { - Int64 v = NA_get_Int64(a, offset); - rval = PlatBigInt( v); - break; - } - case tUInt64: { - Int64 v = NA_get_Int64(a, offset); - rval = PlatBigUInt( v); - break; - } - case tFloat32: - case tFloat64: { - Float64 v = NA_get_Float64(a, offset); - rval = PyFloat_FromDouble( v ); - break; - } - case tComplex32: - case tComplex64: - { - Complex64 v = NA_get_Complex64(a, offset); - rval = PyComplex_FromDoubles(v.r, v.i); - break; - } - default: - rval = PyErr_Format(PyExc_TypeError, - "NA_getPythonScalar: bad type %d\n", - type); - } - return rval; -} - -static int -NA_overflow(PyArrayObject *a, Float64 v) -{ - if ((a->flags & CHECKOVERFLOW) == 0) return 0; - - switch(a->descr->type_num) { - case tBool: - return 0; - case tInt8: - if ((v < -128) || (v > 127)) goto _fail; - return 0; - case tUInt8: - if ((v < 0) || (v > 255)) goto _fail; - return 0; - case tInt16: - if ((v < -32768) || (v > 32767)) goto _fail; - return 0; - case tUInt16: - if ((v < 0) || (v > 65535)) goto _fail; - return 0; - case tInt32: - if ((v < -2147483648.) || - (v > 2147483647.)) goto _fail; - return 0; - case tUInt32: - if ((v < 0) || (v > 4294967295.)) goto _fail; - return 0; - case tInt64: - if ((v < -9223372036854775808.) || - (v > 9223372036854775807.)) goto _fail; - return 0; -#if HAS_UINT64 - case tUInt64: - if ((v < 0) || - (v > 18446744073709551615.)) goto _fail; - return 0; -#endif - case tFloat32: - if ((v < -FLT_MAX) || (v > FLT_MAX)) goto _fail; - return 0; - case tFloat64: - return 0; - case tComplex32: - if ((v < -FLT_MAX) || (v > FLT_MAX)) goto _fail; - return 0; - case tComplex64: - return 0; - default: - PyErr_Format( PyExc_TypeError, - "Unknown type %d in NA_overflow", - a->descr->type_num ); - PyErr_Print(); - return -1; - } -_fail: - PyErr_Format(PyExc_OverflowError, "value out of range for array"); - return -1; -} - -static int -_setFromPythonScalarCore(PyArrayObject *a, long offset, PyObject*value, int entries) -{ - Int64 v; - if (entries >= 100) { - PyErr_Format(PyExc_RuntimeError, - "NA_setFromPythonScalar: __tonumtype__ conversion chain too long"); - return -1; - } else if (PyInt_Check(value)) { - v = PyInt_AsLong(value); - if (NA_overflow(a, v) < 0) - return -1; - NA_set_Int64(a, offset, v); - } else if (PyLong_Check(value)) { - if (a->descr->type_num == tInt64) { - v = (Int64) PyLong_AsLongLong( value ); - } else if (a->descr->type_num == tUInt64) { - v = (UInt64) PyLong_AsUnsignedLongLong( value ); - } else if (a->descr->type_num == tUInt32) { - v = PyLong_AsUnsignedLong(value); - } else { - v = PyLong_AsLongLong(value); - } - if (PyErr_Occurred()) - return -1; - if (NA_overflow(a, v) < 0) - return -1; - NA_set_Int64(a, offset, v); - } else if (PyFloat_Check(value)) { - Float64 v = PyFloat_AsDouble(value); - if (NA_overflow(a, v) < 0) - return -1; - NA_set_Float64(a, offset, v); - } else if (PyComplex_Check(value)) { - Complex64 vc; - vc.r = PyComplex_RealAsDouble(value); - vc.i = PyComplex_ImagAsDouble(value); - if (NA_overflow(a, vc.r) < 0) - return -1; - if (NA_overflow(a, vc.i) < 0) - return -1; - NA_set_Complex64(a, offset, vc); - } else if (PyObject_HasAttrString(value, "__tonumtype__")) { - int rval; - PyObject *type = NA_typeNoToTypeObject(a->descr->type_num); - if (!type) return -1; - value = PyObject_CallMethod( - value, "__tonumtype__", "(N)", type); - if (!value) return -1; - rval = _setFromPythonScalarCore(a, offset, value, entries+1); - Py_DECREF(value); - return rval; - } else if (PyBytes_Check(value)) { - long size = PyBytes_Size(value); - if ((size <= 0) || (size > 1)) { - PyErr_Format( PyExc_ValueError, - "NA_setFromPythonScalar: len(string) must be 1."); - return -1; - } - NA_set_Int64(a, offset, *PyBytes_AsString(value)); - } else { - PyErr_Format(PyExc_TypeError, - "NA_setFromPythonScalar: bad value type."); - return -1; - } - return 0; -} - -static int -NA_setFromPythonScalar(PyArrayObject *a, long offset, PyObject *value) -{ - if (a->flags & WRITABLE) - return _setFromPythonScalarCore(a, offset, value, 0); - else { - PyErr_Format( - PyExc_ValueError, "NA_setFromPythonScalar: assigment to readonly array buffer"); - return -1; - } -} - - -static int -NA_NDArrayCheck(PyObject *obj) { - return PyArray_Check(obj); -} - -static int -NA_NumArrayCheck(PyObject *obj) { - return PyArray_Check(obj); -} - -static int -NA_ComplexArrayCheck(PyObject *a) -{ - int rval = NA_NumArrayCheck(a); - if (rval > 0) { - PyArrayObject *arr = (PyArrayObject *) a; - switch(arr->descr->type_num) { - case tComplex64: case tComplex32: - return 1; - default: - return 0; - } - } - return rval; -} - -static unsigned long -NA_elements(PyArrayObject *a) -{ - int i; - unsigned long n = 1; - for(i = 0; ind; i++) - n *= a->dimensions[i]; - return n; -} - -static int -NA_typeObjectToTypeNo(PyObject *typeObj) -{ - PyArray_Descr *dtype; - int i; - if (PyArray_DescrConverter(typeObj, &dtype) == NPY_FAIL) i=-1; - else i=dtype->type_num; - return i; -} - -static int -NA_copyArray(PyArrayObject *to, const PyArrayObject *from) -{ - return PyArray_CopyInto(to, (PyArrayObject *)from); -} - -static PyArrayObject * -NA_copy(PyArrayObject *from) -{ - return (PyArrayObject *)PyArray_NewCopy(from, 0); -} - - -static PyObject * -NA_getType( PyObject *type) -{ - PyArray_Descr *typeobj = NULL; - if (!type && PyArray_DescrConverter(type, &typeobj) == NPY_FAIL) { - PyErr_Format(PyExc_ValueError, "NA_getType: unknown type."); - typeobj = NULL; - } - return (PyObject *)typeobj; -} - - -/* Call a standard "stride" function - ** - ** Stride functions always take one input and one output array. - ** They can handle n-dimensional data with arbitrary strides (of - ** either sign) for both the input and output numarray. Typically - ** these functions are used to copy data, byteswap, or align data. - ** - ** - ** It expects the following arguments: - ** - ** Number of iterations for each dimension as a tuple - ** Input Buffer Object - ** Offset in bytes for input buffer - ** Input strides (in bytes) for each dimension as a tuple - ** Output Buffer Object - ** Offset in bytes for output buffer - ** Output strides (in bytes) for each dimension as a tuple - ** An integer (Optional), typically the number of bytes to copy per - * element. - ** - ** Returns None - ** - ** The arguments expected by the standard stride functions that this - ** function calls are: - ** - ** Number of dimensions to iterate over - ** Long int value (from the optional last argument to - ** callStrideConvCFunc) - ** often unused by the C Function - ** An array of long ints. Each is the number of iterations for each - ** dimension. NOTE: the previous argument as well as the stride - ** arguments are reversed in order with respect to how they are - ** used in Python. Fastest changing dimension is the first element - ** in the numarray! - ** A void pointer to the input data buffer. - ** The starting offset for the input data buffer in bytes (long int). - ** An array of long int input strides (in bytes) [reversed as with - ** the iteration array] - ** A void pointer to the output data buffer. - ** The starting offset for the output data buffer in bytes (long int). - ** An array of long int output strides (in bytes) [also reversed] - */ - - -static PyObject * -NA_callStrideConvCFuncCore( - PyObject *self, int nshape, maybelong *shape, - PyObject *inbuffObj, long inboffset, - int NPY_UNUSED(ninbstrides), maybelong *inbstrides, - PyObject *outbuffObj, long outboffset, - int NPY_UNUSED(noutbstrides), maybelong *outbstrides, - long nbytes) -{ - CfuncObject *me = (CfuncObject *) self; - CFUNC_STRIDE_CONV_FUNC funcptr; - void *inbuffer, *outbuffer; - long inbsize, outbsize; - maybelong i, lshape[MAXDIM], in_strides[MAXDIM], out_strides[MAXDIM]; - maybelong shape_0, inbstr_0, outbstr_0; - - if (nshape == 0) { /* handle rank-0 numarray. */ - nshape = 1; - shape = &shape_0; - inbstrides = &inbstr_0; - outbstrides = &outbstr_0; - shape[0] = 1; - inbstrides[0] = outbstrides[0] = 0; - } - - for(i=0; idescr.type != CFUNC_STRIDING) - return PyErr_Format(PyExc_TypeError, - "NA_callStrideConvCFuncCore: problem with cfunc"); - - if ((inbsize = NA_getBufferPtrAndSize(inbuffObj, 1, &inbuffer)) < 0) - return PyErr_Format(_Error, - "%s: Problem with input buffer", me->descr.name); - - if ((outbsize = NA_getBufferPtrAndSize(outbuffObj, 0, &outbuffer)) < 0) - return PyErr_Format(_Error, - "%s: Problem with output buffer (read only?)", - me->descr.name); - - /* Check buffer alignment and bounds */ - if (NA_checkOneStriding(me->descr.name, nshape, lshape, - inboffset, in_strides, inbsize, - (me->descr.sizes[0] == -1) ? - nbytes : me->descr.sizes[0], - me->descr.align) || - NA_checkOneStriding(me->descr.name, nshape, lshape, - outboffset, out_strides, outbsize, - (me->descr.sizes[1] == -1) ? - nbytes : me->descr.sizes[1], - me->descr.align)) - return NULL; - - /* Cast function pointer and perform stride operation */ - funcptr = (CFUNC_STRIDE_CONV_FUNC) me->descr.fptr; - if ((*funcptr)(nshape-1, nbytes, lshape, - inbuffer, inboffset, in_strides, - outbuffer, outboffset, out_strides) == 0) { - Py_INCREF(Py_None); - return Py_None; - } else { - return NULL; - } -} - -static void -NA_stridesFromShape(int nshape, maybelong *shape, maybelong bytestride, - maybelong *strides) -{ - int i; - if (nshape > 0) { - for(i=0; i=0; i--) - strides[i] = strides[i+1]*shape[i+1]; - } -} - -static int -NA_OperatorCheck(PyObject *NPY_UNUSED(op)) { - return 0; -} - -static int -NA_ConverterCheck(PyObject *NPY_UNUSED(op)) { - return 0; -} - -static int -NA_UfuncCheck(PyObject *NPY_UNUSED(op)) { - return 0; -} - -static int -NA_CfuncCheck(PyObject *op) { - return PyObject_TypeCheck(op, &CfuncType); -} - -static int -NA_getByteOffset(PyArrayObject *NPY_UNUSED(array), int NPY_UNUSED(nindices), - maybelong *NPY_UNUSED(indices), long *NPY_UNUSED(offset)) -{ - return 0; -} - -static int -NA_swapAxes(PyArrayObject *array, int x, int y) -{ - long temp; - - if (((PyObject *) array) == Py_None) return 0; - - if (array->nd < 2) return 0; - - if (x < 0) x += array->nd; - if (y < 0) y += array->nd; - - if ((x < 0) || (x >= array->nd) || - (y < 0) || (y >= array->nd)) { - PyErr_Format(PyExc_ValueError, - "Specified dimension does not exist"); - return -1; - } - - temp = array->dimensions[x]; - array->dimensions[x] = array->dimensions[y]; - array->dimensions[y] = temp; - - temp = array->strides[x]; - array->strides[x] = array->strides[y]; - array->strides[y] = temp; - - PyArray_UpdateFlags(array, NPY_UPDATE_ALL); - - return 0; -} - -static PyObject * -NA_initModuleGlobal(char *modulename, char *globalname) -{ - PyObject *module, *dict, *global = NULL; - module = PyImport_ImportModule(modulename); - if (!module) { - PyErr_Format(PyExc_RuntimeError, - "Can't import '%s' module", - modulename); - goto _exit; - } - dict = PyModule_GetDict(module); - global = PyDict_GetItemString(dict, globalname); - if (!global) { - PyErr_Format(PyExc_RuntimeError, - "Can't find '%s' global in '%s' module.", - globalname, modulename); - goto _exit; - } - Py_DECREF(module); - Py_INCREF(global); -_exit: - return global; -} - - NumarrayType -NA_NumarrayType(PyObject *seq) -{ - int maxtype = NA_maxType(seq); - int rval; - switch(maxtype) { - case BOOL_SCALAR: - rval = tBool; - goto _exit; - case INT_SCALAR: - case LONG_SCALAR: - rval = tLong; /* tLong corresponds to C long int, - not Python long int */ - goto _exit; - case FLOAT_SCALAR: - rval = tFloat64; - goto _exit; - case COMPLEX_SCALAR: - rval = tComplex64; - goto _exit; - default: - PyErr_Format(PyExc_TypeError, - "expecting Python numeric scalar value; got something else."); - rval = -1; - } -_exit: - return rval; -} - -/* ignores bytestride */ -static PyArrayObject * -NA_NewAllFromBuffer(int ndim, maybelong *shape, NumarrayType type, - PyObject *bufferObject, maybelong byteoffset, - maybelong NPY_UNUSED(bytestride), int byteorder, - int NPY_UNUSED(aligned), int NPY_UNUSED(writeable)) -{ - PyArrayObject *self = NULL; - PyArray_Descr *dtype; - - if (type == tAny) - type = tDefault; - - dtype = PyArray_DescrFromType(type); - if (dtype == NULL) return NULL; - - if (byteorder != NA_ByteOrder()) { - PyArray_Descr *temp; - temp = PyArray_DescrNewByteorder(dtype, PyArray_SWAP); - Py_DECREF(dtype); - if (temp == NULL) return NULL; - dtype = temp; - } - - if (bufferObject == Py_None || bufferObject == NULL) { - self = (PyArrayObject *) \ - PyArray_NewFromDescr(&PyArray_Type, dtype, - ndim, shape, NULL, NULL, - 0, NULL); - } - else { - npy_intp size = 1; - int i; - PyArrayObject *newself; - PyArray_Dims newdims; - for(i=0; iob_type == &PyArray_Type); -} - -static int -NA_NDArrayCheckExact(PyObject *op) { - return (op->ob_type == &PyArray_Type); -} - -static int -NA_OperatorCheckExact(PyObject *NPY_UNUSED(op)) { - return 0; -} - -static int -NA_ConverterCheckExact(PyObject *NPY_UNUSED(op)) { - return 0; -} - -static int -NA_UfuncCheckExact(PyObject *NPY_UNUSED(op)) { - return 0; -} - - -static int -NA_CfuncCheckExact(PyObject *op) { - return op->ob_type == &CfuncType; -} - -static char * -NA_getArrayData(PyArrayObject *obj) -{ - if (!NA_NDArrayCheck((PyObject *) obj)) { - PyErr_Format(PyExc_TypeError, - "expected an NDArray"); - } - return obj->data; -} - -/* Byteswap is not a flag of the array --- it is implicit in the data-type */ -static void -NA_updateByteswap(PyArrayObject *NPY_UNUSED(self)) -{ - return; -} - -static PyArray_Descr * -NA_DescrFromType(int type) -{ - if (type == tAny) - type = tDefault; - return PyArray_DescrFromType(type); -} - -static PyObject * -NA_Cast(PyArrayObject *a, int type) -{ - return PyArray_Cast(a, type); -} - - -/* The following function has much platform dependent code since - ** there is no platform-independent way of checking Floating Point - ** status bits - */ - -/* OSF/Alpha (Tru64) ---------------------------------------------*/ -#if defined(__osf__) && defined(__alpha) - -static int -NA_checkFPErrors(void) -{ - unsigned long fpstatus; - int retstatus; - -#include /* Should migrate to global scope */ - - fpstatus = ieee_get_fp_control(); - /* clear status bits as well as disable exception mode if on */ - ieee_set_fp_control( 0 ); - retstatus = - pyFPE_DIVIDE_BY_ZERO* (int)((IEEE_STATUS_DZE & fpstatus) != 0) - + pyFPE_OVERFLOW * (int)((IEEE_STATUS_OVF & fpstatus) != 0) - + pyFPE_UNDERFLOW * (int)((IEEE_STATUS_UNF & fpstatus) != 0) - + pyFPE_INVALID * (int)((IEEE_STATUS_INV & fpstatus) != 0); - - return retstatus; -} - -/* MS Windows -----------------------------------------------------*/ -#elif defined(_MSC_VER) - -#include - -static int -NA_checkFPErrors(void) -{ - int fpstatus = (int) _clear87(); - int retstatus = - pyFPE_DIVIDE_BY_ZERO * ((SW_ZERODIVIDE & fpstatus) != 0) - + pyFPE_OVERFLOW * ((SW_OVERFLOW & fpstatus) != 0) - + pyFPE_UNDERFLOW * ((SW_UNDERFLOW & fpstatus) != 0) - + pyFPE_INVALID * ((SW_INVALID & fpstatus) != 0); - - - return retstatus; -} - -/* Solaris --------------------------------------------------------*/ -/* --------ignoring SunOS ieee_flags approach, someone else can - ** deal with that! */ -#elif defined(sun) -#include - -static int -NA_checkFPErrors(void) -{ - int fpstatus; - int retstatus; - - fpstatus = (int) fpgetsticky(); - retstatus = pyFPE_DIVIDE_BY_ZERO * ((FP_X_DZ & fpstatus) != 0) - + pyFPE_OVERFLOW * ((FP_X_OFL & fpstatus) != 0) - + pyFPE_UNDERFLOW * ((FP_X_UFL & fpstatus) != 0) - + pyFPE_INVALID * ((FP_X_INV & fpstatus) != 0); - (void) fpsetsticky(0); - - return retstatus; -} - -#elif defined(__GLIBC__) || defined(__APPLE__) || defined(__CYGWIN__) || defined(__MINGW32__) || (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) - -static int -NA_checkFPErrors(void) -{ - int fpstatus = (int) fetestexcept( - FE_DIVBYZERO | FE_OVERFLOW | FE_UNDERFLOW | FE_INVALID); - int retstatus = - pyFPE_DIVIDE_BY_ZERO * ((FE_DIVBYZERO & fpstatus) != 0) - + pyFPE_OVERFLOW * ((FE_OVERFLOW & fpstatus) != 0) - + pyFPE_UNDERFLOW * ((FE_UNDERFLOW & fpstatus) != 0) - + pyFPE_INVALID * ((FE_INVALID & fpstatus) != 0); - (void) feclearexcept(FE_DIVBYZERO | FE_OVERFLOW | - FE_UNDERFLOW | FE_INVALID); - return retstatus; -} - -#else - -static int -NA_checkFPErrors(void) -{ - return 0; -} - -#endif - -static void -NA_clearFPErrors() -{ - NA_checkFPErrors(); -} - -/* Not supported yet */ -static int -NA_checkAndReportFPErrors(char *name) -{ - int error = NA_checkFPErrors(); - if (error) { - PyObject *ans; - char msg[128]; - strcpy(msg, " in "); - strncat(msg, name, 100); - ans = PyObject_CallFunction(pHandleErrorFunc, "(is)", error, msg); - if (!ans) return -1; - Py_DECREF(ans); /* Py_None */ - } - return 0; - -} - - -#define WITHIN32(v, f) (((v) >= f##_MIN32) && ((v) <= f##_MAX32)) -#define WITHIN64(v, f) (((v) >= f##_MIN64) && ((v) <= f##_MAX64)) - -static Bool -NA_IeeeMask32( Float32 f, Int32 mask) -{ - Int32 category; - UInt32 v = *(UInt32 *) &f; - - if (v & BIT(31)) { - if (WITHIN32(v, NEG_NORMALIZED)) { - category = MSK_NEG_NOR; - } else if (WITHIN32(v, NEG_DENORMALIZED)) { - category = MSK_NEG_DEN; - } else if (WITHIN32(v, NEG_SIGNAL_NAN)) { - category = MSK_NEG_SNAN; - } else if (WITHIN32(v, NEG_QUIET_NAN)) { - category = MSK_NEG_QNAN; - } else if (v == NEG_INFINITY_MIN32) { - category = MSK_NEG_INF; - } else if (v == NEG_ZERO_MIN32) { - category = MSK_NEG_ZERO; - } else if (v == INDETERMINATE_MIN32) { - category = MSK_INDETERM; - } else { - category = MSK_BUG; - } - } else { - if (WITHIN32(v, POS_NORMALIZED)) { - category = MSK_POS_NOR; - } else if (WITHIN32(v, POS_DENORMALIZED)) { - category = MSK_POS_DEN; - } else if (WITHIN32(v, POS_SIGNAL_NAN)) { - category = MSK_POS_SNAN; - } else if (WITHIN32(v, POS_QUIET_NAN)) { - category = MSK_POS_QNAN; - } else if (v == POS_INFINITY_MIN32) { - category = MSK_POS_INF; - } else if (v == POS_ZERO_MIN32) { - category = MSK_POS_ZERO; - } else { - category = MSK_BUG; - } - } - return (category & mask) != 0; -} - -static Bool -NA_IeeeMask64( Float64 f, Int32 mask) -{ - Int32 category; - UInt64 v = *(UInt64 *) &f; - - if (v & BIT(63)) { - if (WITHIN64(v, NEG_NORMALIZED)) { - category = MSK_NEG_NOR; - } else if (WITHIN64(v, NEG_DENORMALIZED)) { - category = MSK_NEG_DEN; - } else if (WITHIN64(v, NEG_SIGNAL_NAN)) { - category = MSK_NEG_SNAN; - } else if (WITHIN64(v, NEG_QUIET_NAN)) { - category = MSK_NEG_QNAN; - } else if (v == NEG_INFINITY_MIN64) { - category = MSK_NEG_INF; - } else if (v == NEG_ZERO_MIN64) { - category = MSK_NEG_ZERO; - } else if (v == INDETERMINATE_MIN64) { - category = MSK_INDETERM; - } else { - category = MSK_BUG; - } - } else { - if (WITHIN64(v, POS_NORMALIZED)) { - category = MSK_POS_NOR; - } else if (WITHIN64(v, POS_DENORMALIZED)) { - category = MSK_POS_DEN; - } else if (WITHIN64(v, POS_SIGNAL_NAN)) { - category = MSK_POS_SNAN; - } else if (WITHIN64(v, POS_QUIET_NAN)) { - category = MSK_POS_QNAN; - } else if (v == POS_INFINITY_MIN64) { - category = MSK_POS_INF; - } else if (v == POS_ZERO_MIN64) { - category = MSK_POS_ZERO; - } else { - category = MSK_BUG; - } - } - return (category & mask) != 0; -} - -static PyArrayObject * -NA_FromDimsStridesDescrAndData(int nd, maybelong *d, maybelong *s, PyArray_Descr *descr, char *data) -{ - return (PyArrayObject *)\ - PyArray_NewFromDescr(&PyArray_Type, descr, nd, d, - s, data, 0, NULL); -} - -static PyArrayObject * -NA_FromDimsTypeAndData(int nd, maybelong *d, int type, char *data) -{ - PyArray_Descr *descr = NA_DescrFromType(type); - return NA_FromDimsStridesDescrAndData(nd, d, NULL, descr, data); -} - -static PyArrayObject * -NA_FromDimsStridesTypeAndData(int nd, maybelong *shape, maybelong *strides, - int type, char *data) -{ - PyArray_Descr *descr = NA_DescrFromType(type); - return NA_FromDimsStridesDescrAndData(nd, shape, strides, descr, data); -} - - -typedef struct -{ - NumarrayType type_num; - char suffix[5]; - int itemsize; -} scipy_typestr; - -static scipy_typestr scipy_descriptors[ ] = { - { tAny, "", 0}, - - { tBool, "b1", 1}, - - { tInt8, "i1", 1}, - { tUInt8, "u1", 1}, - - { tInt16, "i2", 2}, - { tUInt16, "u2", 2}, - - { tInt32, "i4", 4}, - { tUInt32, "u4", 4}, - - { tInt64, "i8", 8}, - { tUInt64, "u8", 8}, - - { tFloat32, "f4", 4}, - { tFloat64, "f8", 8}, - - { tComplex32, "c8", 8}, - { tComplex64, "c16", 16} -}; - - -static int -NA_scipy_typestr(NumarrayType t, int byteorder, char *typestr) -{ - size_t i; - if (byteorder) - strcpy(typestr, ">"); - else - strcpy(typestr, "<"); - for(i=0; itype_num == t) { - strncat(typestr, ts->suffix, 4); - return 0; - } - } - return -1; -} - -static PyArrayObject * -NA_FromArrayStruct(PyObject *obj) -{ - return (PyArrayObject *)PyArray_FromStructInterface(obj); -} - - -static PyObject *_Error; - -void *libnumarray_API[] = { - (void*) getBuffer, - (void*) isBuffer, - (void*) getWriteBufferDataPtr, - (void*) isBufferWriteable, - (void*) getReadBufferDataPtr, - (void*) getBufferSize, - (void*) num_log, - (void*) num_log10, - (void*) num_pow, - (void*) num_acosh, - (void*) num_asinh, - (void*) num_atanh, - (void*) num_round, - (void*) int_dividebyzero_error, - (void*) int_overflow_error, - (void*) umult64_overflow, - (void*) smult64_overflow, - (void*) NA_Done, - (void*) NA_NewAll, - (void*) NA_NewAllStrides, - (void*) NA_New, - (void*) NA_Empty, - (void*) NA_NewArray, - (void*) NA_vNewArray, - (void*) NA_ReturnOutput, - (void*) NA_getBufferPtrAndSize, - (void*) NA_checkIo, - (void*) NA_checkOneCBuffer, - (void*) NA_checkNCBuffers, - (void*) NA_checkOneStriding, - (void*) NA_new_cfunc, - (void*) NA_add_cfunc, - (void*) NA_InputArray, - (void*) NA_OutputArray, - (void*) NA_IoArray, - (void*) NA_OptionalOutputArray, - (void*) NA_get_offset, - (void*) NA_get_Float64, - (void*) NA_set_Float64, - (void*) NA_get_Complex64, - (void*) NA_set_Complex64, - (void*) NA_get_Int64, - (void*) NA_set_Int64, - (void*) NA_get1_Float64, - (void*) NA_get2_Float64, - (void*) NA_get3_Float64, - (void*) NA_set1_Float64, - (void*) NA_set2_Float64, - (void*) NA_set3_Float64, - (void*) NA_get1_Complex64, - (void*) NA_get2_Complex64, - (void*) NA_get3_Complex64, - (void*) NA_set1_Complex64, - (void*) NA_set2_Complex64, - (void*) NA_set3_Complex64, - (void*) NA_get1_Int64, - (void*) NA_get2_Int64, - (void*) NA_get3_Int64, - (void*) NA_set1_Int64, - (void*) NA_set2_Int64, - (void*) NA_set3_Int64, - (void*) NA_get1D_Float64, - (void*) NA_set1D_Float64, - (void*) NA_get1D_Int64, - (void*) NA_set1D_Int64, - (void*) NA_get1D_Complex64, - (void*) NA_set1D_Complex64, - (void*) NA_ShapeEqual, - (void*) NA_ShapeLessThan, - (void*) NA_ByteOrder, - (void*) NA_IeeeSpecial32, - (void*) NA_IeeeSpecial64, - (void*) NA_updateDataPtr, - (void*) NA_typeNoToName, - (void*) NA_nameToTypeNo, - (void*) NA_typeNoToTypeObject, - (void*) NA_intTupleFromMaybeLongs, - (void*) NA_maybeLongsFromIntTuple, - (void*) NA_intTupleProduct, - (void*) NA_isIntegerSequence, - (void*) NA_setArrayFromSequence, - (void*) NA_maxType, - (void*) NA_isPythonScalar, - (void*) NA_getPythonScalar, - (void*) NA_setFromPythonScalar, - (void*) NA_NDArrayCheck, - (void*) NA_NumArrayCheck, - (void*) NA_ComplexArrayCheck, - (void*) NA_elements, - (void*) NA_typeObjectToTypeNo, - (void*) NA_copyArray, - (void*) NA_copy, - (void*) NA_getType, - (void*) NA_callCUFuncCore, - (void*) NA_callStrideConvCFuncCore, - (void*) NA_stridesFromShape, - (void*) NA_OperatorCheck, - (void*) NA_ConverterCheck, - (void*) NA_UfuncCheck, - (void*) NA_CfuncCheck, - (void*) NA_getByteOffset, - (void*) NA_swapAxes, - (void*) NA_initModuleGlobal, - (void*) NA_NumarrayType, - (void*) NA_NewAllFromBuffer, - (void*) NA_alloc1D_Float64, - (void*) NA_alloc1D_Int64, - (void*) NA_updateAlignment, - (void*) NA_updateContiguous, - (void*) NA_updateStatus, - (void*) NA_NumArrayCheckExact, - (void*) NA_NDArrayCheckExact, - (void*) NA_OperatorCheckExact, - (void*) NA_ConverterCheckExact, - (void*) NA_UfuncCheckExact, - (void*) NA_CfuncCheckExact, - (void*) NA_getArrayData, - (void*) NA_updateByteswap, - (void*) NA_DescrFromType, - (void*) NA_Cast, - (void*) NA_checkFPErrors, - (void*) NA_clearFPErrors, - (void*) NA_checkAndReportFPErrors, - (void*) NA_IeeeMask32, - (void*) NA_IeeeMask64, - (void*) _NA_callStridingHelper, - (void*) NA_FromDimsStridesDescrAndData, - (void*) NA_FromDimsTypeAndData, - (void*) NA_FromDimsStridesTypeAndData, - (void*) NA_scipy_typestr, - (void*) NA_FromArrayStruct -}; - -#if (!defined(METHOD_TABLE_EXISTS)) -static PyMethodDef _libnumarrayMethods[] = { - {NULL, NULL, 0, NULL} /* Sentinel */ -}; -#endif - -/* boiler plate API init */ -#if defined(NPY_PY3K) - -#define RETVAL m - -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_capi", - NULL, - -1, - _libnumarrayMethods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__capi(void) -#else - -#define RETVAL - -PyMODINIT_FUNC init_capi(void) -#endif -{ - PyObject *m; - PyObject *c_api_object; - - _Error = PyErr_NewException("numpy.numarray._capi.error", NULL, NULL); - - /* Create a CObject containing the API pointer array's address */ -#if defined(NPY_PY3K) - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("_capi", _libnumarrayMethods); -#endif - -#if defined(NPY_PY3K) - c_api_object = PyCapsule_New((void *)libnumarray_API, NULL, NULL); - if (c_api_object == NULL) { - PyErr_Clear(); - } -#else - c_api_object = PyCObject_FromVoidPtr((void *)libnumarray_API, NULL); -#endif - - if (c_api_object != NULL) { - /* Create a name for this object in the module's namespace */ - PyObject *d = PyModule_GetDict(m); - - PyDict_SetItemString(d, "_C_API", c_api_object); - PyDict_SetItemString(d, "error", _Error); - Py_DECREF(c_api_object); - } - else { - return RETVAL; - } - if (PyModule_AddObject(m, "__version__", PyUString_FromString("0.9")) < 0) { - return RETVAL; - } - if (_import_array() < 0) { - return RETVAL; - } - deferred_libnumarray_init(); - return RETVAL; -} diff --git a/numpy-1.6.2/numpy/numarray/alter_code1.py b/numpy-1.6.2/numpy/numarray/alter_code1.py deleted file mode 100644 index ae950e7e01..0000000000 --- a/numpy-1.6.2/numpy/numarray/alter_code1.py +++ /dev/null @@ -1,265 +0,0 @@ -""" -This module converts code written for numarray to run with numpy - -Makes the following changes: - * Changes import statements - - import numarray.package - --> import numpy.numarray.package as numarray_package - with all numarray.package in code changed to numarray_package - - import numarray --> import numpy.numarray as numarray - import numarray.package as --> import numpy.numarray.package as - - from numarray import --> from numpy.numarray import - from numarray.package import - --> from numpy.numarray.package import - - package can be convolve, image, nd_image, mlab, linear_algebra, ma, - matrix, fft, random_array - - - * Makes search and replace changes to: - - .imaginary --> .imag - - .flat --> .ravel() (most of the time) - - .byteswapped() --> .byteswap(False) - - .byteswap() --> .byteswap(True) - - .info() --> numarray.info(self) - - .isaligned() --> .flags.aligned - - .isbyteswapped() --> (not .dtype.isnative) - - .typecode() --> .dtype.char - - .iscontiguous() --> .flags.contiguous - - .is_c_array() --> .flags.carray and .dtype.isnative - - .is_fortran_contiguous() --> .flags.fortran - - .is_f_array() --> .dtype.isnative and .flags.farray - - .itemsize() --> .itemsize - - .nelements() --> .size - - self.new(type) --> numarray.newobj(self, type) - - .repeat(r) --> .repeat(r, axis=0) - - .size() --> .size - - self.type() -- numarray.typefrom(self) - - .typecode() --> .dtype.char - - .stddev() --> .std() - - .togglebyteorder() --> numarray.togglebyteorder(self) - - .getshape() --> .shape - - .setshape(obj) --> .shape=obj - - .getflat() --> .ravel() - - .getreal() --> .real - - .setreal() --> .real = - - .getimag() --> .imag - - .setimag() --> .imag = - - .getimaginary() --> .imag - - .setimaginary() --> .imag - -""" -__all__ = ['convertfile', 'convertall', 'converttree', 'convertsrc'] - -import sys -import os -import re -import glob - -def changeimports(fstr, name, newname): - importstr = 'import %s' % name - importasstr = 'import %s as ' % name - fromstr = 'from %s import ' % name - fromall=0 - - name_ = name - if ('.' in name): - name_ = name.replace('.','_') - - fstr = re.sub(r'(import\s+[^,\n\r]+,\s*)(%s)' % name, - "\\1%s as %s" % (newname, name), fstr) - fstr = fstr.replace(importasstr, 'import %s as ' % newname) - fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name_)) - if (name_ != name): - fstr = fstr.replace(name, name_) - - ind = 0 - Nlen = len(fromstr) - Nlen2 = len("from %s import " % newname) - while 1: - found = fstr.find(fromstr,ind) - if (found < 0): - break - ind = found + Nlen - if fstr[ind] == '*': - continue - fstr = "%sfrom %s import %s" % (fstr[:found], newname, fstr[ind:]) - ind += Nlen2 - Nlen - return fstr, fromall - -flatindex_re = re.compile('([.]flat(\s*?[[=]))') - - -def addimport(astr): - # find the first line with import on it - ind = astr.find('import') - start = astr.rfind(os.linesep, 0, ind) - astr = "%s%s%s%s" % (astr[:start], os.linesep, - "import numpy.numarray as numarray", - astr[start:]) - return astr - -def replaceattr(astr): - astr = astr.replace(".imaginary", ".imag") - astr = astr.replace(".byteswapped()",".byteswap(False)") - astr = astr.replace(".byteswap()", ".byteswap(True)") - astr = astr.replace(".isaligned()", ".flags.aligned") - astr = astr.replace(".iscontiguous()",".flags.contiguous") - astr = astr.replace(".is_fortran_contiguous()",".flags.fortran") - astr = astr.replace(".itemsize()",".itemsize") - astr = astr.replace(".size()",".size") - astr = astr.replace(".nelements()",".size") - astr = astr.replace(".typecode()",".dtype.char") - astr = astr.replace(".stddev()",".std()") - astr = astr.replace(".getshape()", ".shape") - astr = astr.replace(".getflat()", ".ravel()") - astr = astr.replace(".getreal", ".real") - astr = astr.replace(".getimag", ".imag") - astr = astr.replace(".getimaginary", ".imag") - - # preserve uses of flat that should be o.k. - tmpstr = flatindex_re.sub(r"@@@@\2",astr) - # replace other uses of flat - tmpstr = tmpstr.replace(".flat",".ravel()") - # put back .flat where it was valid - astr = tmpstr.replace("@@@@", ".flat") - return astr - -info_re = re.compile(r'(\S+)\s*[.]\s*info\s*[(]\s*[)]') -new_re = re.compile(r'(\S+)\s*[.]\s*new\s*[(]\s*(\S+)\s*[)]') -toggle_re = re.compile(r'(\S+)\s*[.]\s*togglebyteorder\s*[(]\s*[)]') -type_re = re.compile(r'(\S+)\s*[.]\s*type\s*[(]\s*[)]') - -isbyte_re = re.compile(r'(\S+)\s*[.]\s*isbyteswapped\s*[(]\s*[)]') -iscarr_re = re.compile(r'(\S+)\s*[.]\s*is_c_array\s*[(]\s*[)]') -isfarr_re = re.compile(r'(\S+)\s*[.]\s*is_f_array\s*[(]\s*[)]') -repeat_re = re.compile(r'(\S+)\s*[.]\s*repeat\s*[(]\s*(\S+)\s*[)]') - -setshape_re = re.compile(r'(\S+)\s*[.]\s*setshape\s*[(]\s*(\S+)\s*[)]') -setreal_re = re.compile(r'(\S+)\s*[.]\s*setreal\s*[(]\s*(\S+)\s*[)]') -setimag_re = re.compile(r'(\S+)\s*[.]\s*setimag\s*[(]\s*(\S+)\s*[)]') -setimaginary_re = re.compile(r'(\S+)\s*[.]\s*setimaginary\s*[(]\s*(\S+)\s*[)]') -def replaceother(astr): - # self.info() --> numarray.info(self) - # self.new(type) --> numarray.newobj(self, type) - # self.togglebyteorder() --> numarray.togglebyteorder(self) - # self.type() --> numarray.typefrom(self) - (astr, n1) = info_re.subn('numarray.info(\\1)', astr) - (astr, n2) = new_re.subn('numarray.newobj(\\1, \\2)', astr) - (astr, n3) = toggle_re.subn('numarray.togglebyteorder(\\1)', astr) - (astr, n4) = type_re.subn('numarray.typefrom(\\1)', astr) - if (n1+n2+n3+n4 > 0): - astr = addimport(astr) - - astr = isbyte_re.sub('not \\1.dtype.isnative', astr) - astr = iscarr_re.sub('\\1.dtype.isnative and \\1.flags.carray', astr) - astr = isfarr_re.sub('\\1.dtype.isnative and \\1.flags.farray', astr) - astr = repeat_re.sub('\\1.repeat(\\2, axis=0)', astr) - astr = setshape_re.sub('\\1.shape = \\2', astr) - astr = setreal_re.sub('\\1.real = \\2', astr) - astr = setimag_re.sub('\\1.imag = \\2', astr) - astr = setimaginary_re.sub('\\1.imag = \\2', astr) - return astr - -import datetime -def fromstr(filestr): - savestr = filestr[:] - filestr, fromall = changeimports(filestr, 'numarray', 'numpy.numarray') - base = 'numarray' - newbase = 'numpy.numarray' - for sub in ['', 'convolve', 'image', 'nd_image', 'mlab', 'linear_algebra', - 'ma', 'matrix', 'fft', 'random_array']: - if sub != '': - sub = '.'+sub - filestr, fromall = changeimports(filestr, base+sub, newbase+sub) - - filestr = replaceattr(filestr) - filestr = replaceother(filestr) - if savestr != filestr: - name = os.path.split(sys.argv[0])[-1] - today = datetime.date.today().strftime('%b %d, %Y') - filestr = '## Automatically adapted for '\ - 'numpy.numarray %s by %s\n\n%s' % (today, name, filestr) - return filestr, 1 - return filestr, 0 - -def makenewfile(name, filestr): - fid = file(name, 'w') - fid.write(filestr) - fid.close() - -def convertfile(filename, orig=1): - """Convert the filename given from using Numarray to using NumPy - - Copies the file to filename.orig and then over-writes the file - with the updated code - """ - fid = open(filename) - filestr = fid.read() - fid.close() - filestr, changed = fromstr(filestr) - if changed: - if orig: - base, ext = os.path.splitext(filename) - os.rename(filename, base+".orig") - else: - os.remove(filename) - makenewfile(filename, filestr) - -def fromargs(args): - filename = args[1] - convertfile(filename) - -def convertall(direc=os.path.curdir, orig=1): - """Convert all .py files to use numpy.oldnumeric (from Numeric) in the directory given - - For each file, a backup of .py is made as - .py.orig. A new file named .py - is then written with the updated code. - """ - files = glob.glob(os.path.join(direc,'*.py')) - for afile in files: - if afile[-8:] == 'setup.py': continue - convertfile(afile, orig) - -header_re = re.compile(r'(numarray/libnumarray.h)') - -def convertsrc(direc=os.path.curdir, ext=None, orig=1): - """Replace Numeric/arrayobject.h with numpy/oldnumeric.h in all files in the - directory with extension give by list ext (if ext is None, then all files are - replaced).""" - if ext is None: - files = glob.glob(os.path.join(direc,'*')) - else: - files = [] - for aext in ext: - files.extend(glob.glob(os.path.join(direc,"*.%s" % aext))) - for afile in files: - fid = open(afile) - fstr = fid.read() - fid.close() - fstr, n = header_re.subn(r'numpy/libnumarray.h',fstr) - if n > 0: - if orig: - base, ext = os.path.splitext(afile) - os.rename(afile, base+".orig") - else: - os.remove(afile) - makenewfile(afile, fstr) - -def _func(arg, dirname, fnames): - convertall(dirname, orig=0) - convertsrc(dirname, ['h','c'], orig=0) - -def converttree(direc=os.path.curdir): - """Convert all .py files in the tree given - - """ - os.path.walk(direc, _func, None) - - -if __name__ == '__main__': - converttree(sys.argv) diff --git a/numpy-1.6.2/numpy/numarray/alter_code2.py b/numpy-1.6.2/numpy/numarray/alter_code2.py deleted file mode 100644 index 4bb773850c..0000000000 --- a/numpy-1.6.2/numpy/numarray/alter_code2.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -This module converts code written for numpy.numarray to work -with numpy - -FIXME: finish this. - -""" -#__all__ = ['convertfile', 'convertall', 'converttree'] -__all__ = [] - -import warnings -warnings.warn("numpy.numarray.alter_code2 is not working yet.") -import sys - -import os -import glob - -def makenewfile(name, filestr): - fid = file(name, 'w') - fid.write(filestr) - fid.close() - -def getandcopy(name): - fid = file(name) - filestr = fid.read() - fid.close() - base, ext = os.path.splitext(name) - makenewfile(base+'.orig', filestr) - return filestr - -def convertfile(filename): - """Convert the filename given from using Numeric to using NumPy - - Copies the file to filename.orig and then over-writes the file - with the updated code - """ - filestr = getandcopy(filename) - filestr = fromstr(filestr) - makenewfile(filename, filestr) - -def fromargs(args): - filename = args[1] - convertfile(filename) - -def convertall(direc=os.path.curdir): - """Convert all .py files to use NumPy (from Numeric) in the directory given - - For each file, a backup of .py is made as - .py.orig. A new file named .py - is then written with the updated code. - """ - files = glob.glob(os.path.join(direc,'*.py')) - for afile in files: - convertfile(afile) - -def _func(arg, dirname, fnames): - convertall(dirname) - -def converttree(direc=os.path.curdir): - """Convert all .py files in the tree given - - """ - os.path.walk(direc, _func, None) - - -if __name__ == '__main__': - fromargs(sys.argv) diff --git a/numpy-1.6.2/numpy/numarray/compat.py b/numpy-1.6.2/numpy/numarray/compat.py deleted file mode 100644 index e0d13a7c28..0000000000 --- a/numpy-1.6.2/numpy/numarray/compat.py +++ /dev/null @@ -1,4 +0,0 @@ - -__all__ = ['NewAxis', 'ArrayType'] - -from numpy import newaxis as NewAxis, ndarray as ArrayType diff --git a/numpy-1.6.2/numpy/numarray/convolve.py b/numpy-1.6.2/numpy/numarray/convolve.py deleted file mode 100644 index 68a4730a19..0000000000 --- a/numpy-1.6.2/numpy/numarray/convolve.py +++ /dev/null @@ -1,14 +0,0 @@ -try: - from stsci.convolve import * -except ImportError: - try: - from scipy.stsci.convolve import * - except ImportError: - msg = \ -"""The convolve package is not installed. - -It can be downloaded by checking out the latest source from -http://svn.scipy.org/svn/scipy/trunk/Lib/stsci or by downloading and -installing all of SciPy from http://www.scipy.org. -""" - raise ImportError(msg) diff --git a/numpy-1.6.2/numpy/numarray/fft.py b/numpy-1.6.2/numpy/numarray/fft.py deleted file mode 100644 index c7ac6a27ed..0000000000 --- a/numpy-1.6.2/numpy/numarray/fft.py +++ /dev/null @@ -1,7 +0,0 @@ - -from numpy.oldnumeric.fft import * -import numpy.oldnumeric.fft as nof - -__all__ = nof.__all__ - -del nof diff --git a/numpy-1.6.2/numpy/numarray/functions.py b/numpy-1.6.2/numpy/numarray/functions.py deleted file mode 100644 index 1c2141c98b..0000000000 --- a/numpy-1.6.2/numpy/numarray/functions.py +++ /dev/null @@ -1,498 +0,0 @@ -# missing Numarray defined names (in from numarray import *) -##__all__ = ['ClassicUnpickler', 'Complex32_fromtype', -## 'Complex64_fromtype', 'ComplexArray', 'Error', -## 'MAX_ALIGN', 'MAX_INT_SIZE', 'MAX_LINE_WIDTH', -## 'NDArray', 'NewArray', 'NumArray', -## 'NumError', 'PRECISION', 'Py2NumType', -## 'PyINT_TYPES', 'PyLevel2Type', 'PyNUMERIC_TYPES', 'PyREAL_TYPES', -## 'SUPPRESS_SMALL', -## 'SuitableBuffer', 'USING_BLAS', -## 'UsesOpPriority', -## 'codegenerator', 'generic', 'libnumarray', 'libnumeric', -## 'make_ufuncs', 'memory', -## 'numarrayall', 'numarraycore', 'numinclude', 'safethread', -## 'typecode', 'typecodes', 'typeconv', 'ufunc', 'ufuncFactory', -## 'ieeemask'] - -__all__ = ['asarray', 'ones', 'zeros', 'array', 'where'] -__all__ += ['vdot', 'dot', 'matrixmultiply', 'ravel', 'indices', - 'arange', 'concatenate', 'all', 'allclose', 'alltrue', 'and_', - 'any', 'argmax', 'argmin', 'argsort', 'around', 'array_equal', - 'array_equiv', 'arrayrange', 'array_str', 'array_repr', - 'array2list', 'average', 'choose', 'CLIP', 'RAISE', 'WRAP', - 'clip', 'compress', 'copy', 'copy_reg', - 'diagonal', 'divide_remainder', 'e', 'explicit_type', 'pi', - 'flush_caches', 'fromfile', 'os', 'sys', 'STRICT', - 'SLOPPY', 'WARN', 'EarlyEOFError', 'SizeMismatchError', - 'SizeMismatchWarning', 'FileSeekWarning', 'fromstring', - 'fromfunction', 'fromlist', 'getShape', 'getTypeObject', - 'identity', 'info', 'innerproduct', 'inputarray', - 'isBigEndian', 'kroneckerproduct', 'lexsort', 'math', - 'operator', 'outerproduct', 'put', 'putmask', 'rank', - 'repeat', 'reshape', 'resize', 'round', 'searchsorted', - 'shape', 'size', 'sometrue', 'sort', 'swapaxes', 'take', - 'tcode', 'tname', 'tensormultiply', 'trace', 'transpose', - 'types', 'value', 'cumsum', 'cumproduct', 'nonzero', 'newobj', - 'togglebyteorder' - ] - -import copy -import copy_reg -import types -import os -import sys -import math -import operator - -from numpy import dot as matrixmultiply, dot, vdot, ravel, concatenate, all,\ - allclose, any, argsort, array_equal, array_equiv,\ - array_str, array_repr, CLIP, RAISE, WRAP, clip, concatenate, \ - diagonal, e, pi, inner as innerproduct, nonzero, \ - outer as outerproduct, kron as kroneckerproduct, lexsort, putmask, rank, \ - resize, searchsorted, shape, size, sort, swapaxes, trace, transpose -import numpy as np - -from numerictypes import typefrom - -if sys.version_info[0] >= 3: - import copyreg as copy_reg - -isBigEndian = sys.byteorder != 'little' -value = tcode = 'f' -tname = 'Float32' - -# If dtype is not None, then it is used -# If type is not None, then it is used -# If typecode is not None then it is used -# If use_default is True, then the default -# data-type is returned if all are None -def type2dtype(typecode, type, dtype, use_default=True): - if dtype is None: - if type is None: - if use_default or typecode is not None: - dtype = np.dtype(typecode) - else: - dtype = np.dtype(type) - if use_default and dtype is None: - dtype = np.dtype('int') - return dtype - -def fromfunction(shape, dimensions, type=None, typecode=None, dtype=None): - dtype = type2dtype(typecode, type, dtype, 1) - return np.fromfunction(shape, dimensions, dtype=dtype) -def ones(shape, type=None, typecode=None, dtype=None): - dtype = type2dtype(typecode, type, dtype, 1) - return np.ones(shape, dtype) - -def zeros(shape, type=None, typecode=None, dtype=None): - dtype = type2dtype(typecode, type, dtype, 1) - return np.zeros(shape, dtype) - -def where(condition, x=None, y=None, out=None): - if x is None and y is None: - arr = np.where(condition) - else: - arr = np.where(condition, x, y) - if out is not None: - out[...] = arr - return out - return arr - -def indices(shape, type=None): - return np.indices(shape, type) - -def arange(a1, a2=None, stride=1, type=None, shape=None, - typecode=None, dtype=None): - dtype = type2dtype(typecode, type, dtype, 0) - return np.arange(a1, a2, stride, dtype) - -arrayrange = arange - -def alltrue(x, axis=0): - return np.alltrue(x, axis) - -def and_(a, b): - """Same as a & b - """ - return a & b - -def divide_remainder(a, b): - a, b = asarray(a), asarray(b) - return (a/b,a%b) - -def around(array, digits=0, output=None): - ret = np.around(array, digits, output) - if output is None: - return ret - return - -def array2list(arr): - return arr.tolist() - - -def choose(selector, population, outarr=None, clipmode=RAISE): - a = np.asarray(selector) - ret = a.choose(population, out=outarr, mode=clipmode) - if outarr is None: - return ret - return - -def compress(condition, a, axis=0): - return np.compress(condition, a, axis) - -# only returns a view -def explicit_type(a): - x = a.view() - return x - -# stub -def flush_caches(): - pass - - -class EarlyEOFError(Exception): - "Raised in fromfile() if EOF unexpectedly occurs." - pass - -class SizeMismatchError(Exception): - "Raised in fromfile() if file size does not match shape." - pass - -class SizeMismatchWarning(Warning): - "Issued in fromfile() if file size does not match shape." - pass - -class FileSeekWarning(Warning): - "Issued in fromfile() if there is unused data and seek() fails" - pass - - -STRICT, SLOPPY, WARN = range(3) - -_BLOCKSIZE=1024 - -# taken and adapted directly from numarray -def fromfile(infile, type=None, shape=None, sizing=STRICT, - typecode=None, dtype=None): - if isinstance(infile, (str, unicode)): - infile = open(infile, 'rb') - dtype = type2dtype(typecode, type, dtype, True) - if shape is None: - shape = (-1,) - if not isinstance(shape, tuple): - shape = (shape,) - - if (list(shape).count(-1)>1): - raise ValueError("At most one unspecified dimension in shape") - - if -1 not in shape: - if sizing != STRICT: - raise ValueError("sizing must be STRICT if size complete") - arr = np.empty(shape, dtype) - bytesleft=arr.nbytes - bytesread=0 - while(bytesleft > _BLOCKSIZE): - data = infile.read(_BLOCKSIZE) - if len(data) != _BLOCKSIZE: - raise EarlyEOFError("Unexpected EOF reading data for size complete array") - arr.data[bytesread:bytesread+_BLOCKSIZE]=data - bytesread += _BLOCKSIZE - bytesleft -= _BLOCKSIZE - if bytesleft > 0: - data = infile.read(bytesleft) - if len(data) != bytesleft: - raise EarlyEOFError("Unexpected EOF reading data for size complete array") - arr.data[bytesread:bytesread+bytesleft]=data - return arr - - - ##shape is incompletely specified - ##read until EOF - ##implementation 1: naively use memory blocks - ##problematic because memory allocation can be double what is - ##necessary (!) - - ##the most common case, namely reading in data from an unchanging - ##file whose size may be determined before allocation, should be - ##quick -- only one allocation will be needed. - - recsize = dtype.itemsize * np.product([i for i in shape if i != -1]) - blocksize = max(_BLOCKSIZE/recsize, 1)*recsize - - ##try to estimate file size - try: - curpos=infile.tell() - infile.seek(0,2) - endpos=infile.tell() - infile.seek(curpos) - except (AttributeError, IOError): - initsize=blocksize - else: - initsize=max(1,(endpos-curpos)/recsize)*recsize - - buf = np.newbuffer(initsize) - - bytesread=0 - while 1: - data=infile.read(blocksize) - if len(data) != blocksize: ##eof - break - ##do we have space? - if len(buf) < bytesread+blocksize: - buf=_resizebuf(buf,len(buf)+blocksize) - ## or rather a=resizebuf(a,2*len(a)) ? - assert len(buf) >= bytesread+blocksize - buf[bytesread:bytesread+blocksize]=data - bytesread += blocksize - - if len(data) % recsize != 0: - if sizing == STRICT: - raise SizeMismatchError("Filesize does not match specified shape") - if sizing == WARN: - _warnings.warn("Filesize does not match specified shape", - SizeMismatchWarning) - try: - infile.seek(-(len(data) % recsize),1) - except AttributeError: - _warnings.warn("Could not rewind (no seek support)", - FileSeekWarning) - except IOError: - _warnings.warn("Could not rewind (IOError in seek)", - FileSeekWarning) - datasize = (len(data)/recsize) * recsize - if len(buf) != bytesread+datasize: - buf=_resizebuf(buf,bytesread+datasize) - buf[bytesread:bytesread+datasize]=data[:datasize] - ##deduce shape from len(buf) - shape = list(shape) - uidx = shape.index(-1) - shape[uidx]=len(buf) / recsize - - a = np.ndarray(shape=shape, dtype=type, buffer=buf) - if a.dtype.char == '?': - np.not_equal(a, 0, a) - return a - -def fromstring(datastring, type=None, shape=None, typecode=None, dtype=None): - dtype = type2dtype(typecode, type, dtype, True) - if shape is None: - count = -1 - else: - count = np.product(shape) - res = np.fromstring(datastring, dtype=dtype, count=count) - if shape is not None: - res.shape = shape - return res - - -# check_overflow is ignored -def fromlist(seq, type=None, shape=None, check_overflow=0, typecode=None, dtype=None): - dtype = type2dtype(typecode, type, dtype, False) - return np.array(seq, dtype) - -def array(sequence=None, typecode=None, copy=1, savespace=0, - type=None, shape=None, dtype=None): - dtype = type2dtype(typecode, type, dtype, 0) - if sequence is None: - if shape is None: - return None - if dtype is None: - dtype = 'l' - return np.empty(shape, dtype) - if isinstance(sequence, file): - return fromfile(sequence, dtype=dtype, shape=shape) - if isinstance(sequence, str): - return fromstring(sequence, dtype=dtype, shape=shape) - if isinstance(sequence, buffer): - arr = np.frombuffer(sequence, dtype=dtype) - else: - arr = np.array(sequence, dtype, copy=copy) - if shape is not None: - arr.shape = shape - return arr - -def asarray(seq, type=None, typecode=None, dtype=None): - if isinstance(seq, np.ndarray) and type is None and \ - typecode is None and dtype is None: - return seq - return array(seq, type=type, typecode=typecode, copy=0, dtype=dtype) - -inputarray = asarray - - -def getTypeObject(sequence, type): - if type is not None: - return type - try: - return typefrom(np.array(sequence)) - except: - raise TypeError("Can't determine a reasonable type from sequence") - -def getShape(shape, *args): - try: - if shape is () and not args: - return () - if len(args) > 0: - shape = (shape, ) + args - else: - shape = tuple(shape) - dummy = np.array(shape) - if not issubclass(dummy.dtype.type, np.integer): - raise TypeError - if len(dummy) > np.MAXDIMS: - raise TypeError - except: - raise TypeError("Shape must be a sequence of integers") - return shape - - -def identity(n, type=None, typecode=None, dtype=None): - dtype = type2dtype(typecode, type, dtype, True) - return np.identity(n, dtype) - -def info(obj, output=sys.stdout, numpy=0): - if numpy: - bp = lambda x: x - else: - bp = lambda x: int(x) - cls = getattr(obj, '__class__', type(obj)) - if numpy: - nm = getattr(cls, '__name__', cls) - else: - nm = cls - print >> output, "class: ", nm - print >> output, "shape: ", obj.shape - strides = obj.strides - print >> output, "strides: ", strides - if not numpy: - print >> output, "byteoffset: 0" - if len(strides) > 0: - bs = obj.strides[0] - else: - bs = obj.itemsize - print >> output, "bytestride: ", bs - print >> output, "itemsize: ", obj.itemsize - print >> output, "aligned: ", bp(obj.flags.aligned) - print >> output, "contiguous: ", bp(obj.flags.contiguous) - if numpy: - print >> output, "fortran: ", obj.flags.fortran - if not numpy: - print >> output, "buffer: ", repr(obj.data) - if not numpy: - extra = " (DEBUG ONLY)" - tic = "'" - else: - extra = "" - tic = "" - print >> output, "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra) - print >> output, "byteorder: ", - endian = obj.dtype.byteorder - if endian in ['|','=']: - print >> output, "%s%s%s" % (tic, sys.byteorder, tic) - byteswap = False - elif endian == '>': - print >> output, "%sbig%s" % (tic, tic) - byteswap = sys.byteorder != "big" - else: - print >> output, "%slittle%s" % (tic, tic) - byteswap = sys.byteorder != "little" - print >> output, "byteswap: ", bp(byteswap) - if not numpy: - print >> output, "type: ", typefrom(obj).name - else: - print >> output, "type: %s" % obj.dtype - -#clipmode is ignored if axis is not 0 and array is not 1d -def put(array, indices, values, axis=0, clipmode=RAISE): - if not isinstance(array, np.ndarray): - raise TypeError("put only works on subclass of ndarray") - work = asarray(array) - if axis == 0: - if array.ndim == 1: - work.put(indices, values, clipmode) - else: - work[indices] = values - elif isinstance(axis, (int, long, np.integer)): - work = work.swapaxes(0, axis) - work[indices] = values - work = work.swapaxes(0, axis) - else: - def_axes = range(work.ndim) - for x in axis: - def_axes.remove(x) - axis = list(axis)+def_axes - work = work.transpose(axis) - work[indices] = values - work = work.transpose(axis) - -def repeat(array, repeats, axis=0): - return np.repeat(array, repeats, axis) - - -def reshape(array, shape, *args): - if len(args) > 0: - shape = (shape,) + args - return np.reshape(array, shape) - - -import warnings as _warnings -def round(*args, **keys): - _warnings.warn("round() is deprecated. Switch to around()", - DeprecationWarning) - return around(*args, **keys) - -def sometrue(array, axis=0): - return np.sometrue(array, axis) - -#clipmode is ignored if axis is not an integer -def take(array, indices, axis=0, outarr=None, clipmode=RAISE): - array = np.asarray(array) - if isinstance(axis, (int, long, np.integer)): - res = array.take(indices, axis, outarr, clipmode) - if outarr is None: - return res - return - else: - def_axes = range(array.ndim) - for x in axis: - def_axes.remove(x) - axis = list(axis) + def_axes - work = array.transpose(axis) - res = work[indices] - if outarr is None: - return res - outarr[...] = res - return - -def tensormultiply(a1, a2): - a1, a2 = np.asarray(a1), np.asarray(a2) - if (a1.shape[-1] != a2.shape[0]): - raise ValueError("Unmatched dimensions") - shape = a1.shape[:-1] + a2.shape[1:] - return np.reshape(dot(np.reshape(a1, (-1, a1.shape[-1])), - np.reshape(a2, (a2.shape[0],-1))), - shape) - -def cumsum(a1, axis=0, out=None, type=None, dim=0): - return np.asarray(a1).cumsum(axis,dtype=type,out=out) - -def cumproduct(a1, axis=0, out=None, type=None, dim=0): - return np.asarray(a1).cumprod(axis,dtype=type,out=out) - -def argmax(x, axis=-1): - return np.argmax(x, axis) - -def argmin(x, axis=-1): - return np.argmin(x, axis) - -def newobj(self, type): - if type is None: - return np.empty_like(self) - else: - return np.empty(self.shape, type) - -def togglebyteorder(self): - self.dtype=self.dtype.newbyteorder() - -def average(a, axis=0, weights=None, returned=0): - return np.average(a, axis, weights, returned) diff --git a/numpy-1.6.2/numpy/numarray/image.py b/numpy-1.6.2/numpy/numarray/image.py deleted file mode 100644 index 3235289050..0000000000 --- a/numpy-1.6.2/numpy/numarray/image.py +++ /dev/null @@ -1,14 +0,0 @@ -try: - from stsci.image import * -except ImportError: - try: - from scipy.stsci.image import * - except ImportError: - msg = \ -"""The image package is not installed - -It can be downloaded by checking out the latest source from -http://svn.scipy.org/svn/scipy/trunk/Lib/stsci or by downloading and -installing all of SciPy from http://www.scipy.org. -""" - raise ImportError(msg) diff --git a/numpy-1.6.2/numpy/numarray/include/numpy/arraybase.h b/numpy-1.6.2/numpy/numarray/include/numpy/arraybase.h deleted file mode 100644 index a964979ce1..0000000000 --- a/numpy-1.6.2/numpy/numarray/include/numpy/arraybase.h +++ /dev/null @@ -1,71 +0,0 @@ -#if !defined(__arraybase_h) -#define _arraybase_h 1 - -#define SZ_BUF 79 -#define MAXDIM NPY_MAXDIMS -#define MAXARGS 18 - -typedef npy_intp maybelong; -typedef npy_bool Bool; -typedef npy_int8 Int8; -typedef npy_uint8 UInt8; -typedef npy_int16 Int16; -typedef npy_uint16 UInt16; -typedef npy_int32 Int32; -typedef npy_uint32 UInt32; -typedef npy_int64 Int64; -typedef npy_uint64 UInt64; -typedef npy_float32 Float32; -typedef npy_float64 Float64; - -typedef enum -{ - tAny=-1, - tBool=PyArray_BOOL, - tInt8=PyArray_INT8, - tUInt8=PyArray_UINT8, - tInt16=PyArray_INT16, - tUInt16=PyArray_UINT16, - tInt32=PyArray_INT32, - tUInt32=PyArray_UINT32, - tInt64=PyArray_INT64, - tUInt64=PyArray_UINT64, - tFloat32=PyArray_FLOAT32, - tFloat64=PyArray_FLOAT64, - tComplex32=PyArray_COMPLEX64, - tComplex64=PyArray_COMPLEX128, - tObject=PyArray_OBJECT, /* placeholder... does nothing */ - tMaxType=PyArray_NTYPES, - tDefault = tFloat64, -#if NPY_BITSOF_LONG == 64 - tLong = tInt64, -#else - tLong = tInt32, -#endif -} NumarrayType; - -#define nNumarrayType PyArray_NTYPES - -#define HAS_UINT64 1 - -typedef enum -{ - NUM_LITTLE_ENDIAN=0, - NUM_BIG_ENDIAN = 1 -} NumarrayByteOrder; - -typedef struct { Float32 r, i; } Complex32; -typedef struct { Float64 r, i; } Complex64; - -#define WRITABLE NPY_WRITEABLE -#define CHECKOVERFLOW 0x800 -#define UPDATEDICT 0x1000 -#define FORTRAN_CONTIGUOUS NPY_FORTRAN -#define IS_CARRAY (NPY_CONTIGUOUS | NPY_ALIGNED) - -#define PyArray(m) ((PyArrayObject *)(m)) -#define PyArray_ISFORTRAN_CONTIGUOUS(m) (((PyArray(m))->flags & FORTRAN_CONTIGUOUS) != 0) -#define PyArray_ISWRITABLE PyArray_ISWRITEABLE - - -#endif diff --git a/numpy-1.6.2/numpy/numarray/include/numpy/cfunc.h b/numpy-1.6.2/numpy/numarray/include/numpy/cfunc.h deleted file mode 100644 index b581be08f0..0000000000 --- a/numpy-1.6.2/numpy/numarray/include/numpy/cfunc.h +++ /dev/null @@ -1,78 +0,0 @@ -#if !defined(__cfunc__) -#define __cfunc__ 1 - -typedef PyObject *(*CFUNCasPyValue)(void *); -typedef int (*UFUNC)(long, long, long, void **, long*); -/* typedef void (*CFUNC_2ARG)(long, void *, void *); */ -/* typedef void (*CFUNC_3ARG)(long, void *, void *, void *); */ -typedef int (*CFUNCfromPyValue)(PyObject *, void *); -typedef int (*CFUNC_STRIDE_CONV_FUNC)(long, long, maybelong *, - void *, long, maybelong*, void *, long, maybelong *); - -typedef int (*CFUNC_STRIDED_FUNC)(PyObject *, long, PyArrayObject **, - char **data); - -#define MAXARRAYS 16 - -typedef enum { - CFUNC_UFUNC, - CFUNC_STRIDING, - CFUNC_NSTRIDING, - CFUNC_AS_PY_VALUE, - CFUNC_FROM_PY_VALUE -} eCfuncType; - -typedef struct { - char *name; - void *fptr; /* Pointer to "un-wrapped" c function */ - eCfuncType type; /* UFUNC, STRIDING, AsPyValue, FromPyValue */ - Bool chkself; /* CFUNC does own alignment/bounds checking */ - Bool align; /* CFUNC requires aligned buffer pointers */ - Int8 wantIn, wantOut; /* required input/output arg counts. */ - Int8 sizes[MAXARRAYS]; /* array of align/itemsizes. */ - Int8 iters[MAXARRAYS]; /* array of element counts. 0 --> niter. */ -} CfuncDescriptor; - -typedef struct { - PyObject_HEAD - CfuncDescriptor descr; -} CfuncObject; - -#define SELF_CHECKED_CFUNC_DESCR(name, type) \ - static CfuncDescriptor name##_descr = { #name, (void *) name, type, 1 } - -#define CHECK_ALIGN 1 - -#define CFUNC_DESCR(name, type, align, iargs, oargs, s1, s2, s3, i1, i2, i3) \ - static CfuncDescriptor name##_descr = \ - { #name, (void *)name, type, 0, align, iargs, oargs, {s1, s2, s3}, {i1, i2, i3} } - -#define UFUNC_DESCR1(name, s1) \ - CFUNC_DESCR(name, CFUNC_UFUNC, CHECK_ALIGN, 0, 1, s1, 0, 0, 0, 0, 0) - -#define UFUNC_DESCR2(name, s1, s2) \ - CFUNC_DESCR(name, CFUNC_UFUNC, CHECK_ALIGN, 1, 1, s1, s2, 0, 0, 0, 0) - -#define UFUNC_DESCR3(name, s1, s2, s3) \ - CFUNC_DESCR(name, CFUNC_UFUNC, CHECK_ALIGN, 2, 1, s1, s2, s3, 0, 0, 0) - -#define UFUNC_DESCR3sv(name, s1, s2, s3) \ - CFUNC_DESCR(name, CFUNC_UFUNC, CHECK_ALIGN, 2, 1, s1, s2, s3, 1, 0, 0) - -#define UFUNC_DESCR3vs(name, s1, s2, s3) \ - CFUNC_DESCR(name, CFUNC_UFUNC, CHECK_ALIGN, 2, 1, s1, s2, s3, 0, 1, 0) - -#define STRIDING_DESCR2(name, align, s1, s2) \ - CFUNC_DESCR(name, CFUNC_STRIDING, align, 1, 1, s1, s2, 0, 0, 0, 0) - -#define NSTRIDING_DESCR1(name) \ - CFUNC_DESCR(name, CFUNC_NSTRIDING, 0, 0, 1, 0, 0, 0, 0, 0, 0) - -#define NSTRIDING_DESCR2(name) \ - CFUNC_DESCR(name, CFUNC_NSTRIDING, 0, 1, 1, 0, 0, 0, 0, 0, 0) - -#define NSTRIDING_DESCR3(name) \ - CFUNC_DESCR(name, CFUNC_NSTRIDING, 0, 2, 1, 0, 0, 0, 0, 0, 0) - -#endif - diff --git a/numpy-1.6.2/numpy/numarray/include/numpy/ieeespecial.h b/numpy-1.6.2/numpy/numarray/include/numpy/ieeespecial.h deleted file mode 100644 index 0f3fff2a92..0000000000 --- a/numpy-1.6.2/numpy/numarray/include/numpy/ieeespecial.h +++ /dev/null @@ -1,124 +0,0 @@ -/* 32-bit special value ranges */ - -#if defined(_MSC_VER) -#define MKINT(x) (x##UL) -#define MKINT64(x) (x##Ui64) -#define BIT(x) (1Ui64 << (x)) -#else -#define MKINT(x) (x##U) -#define MKINT64(x) (x##ULL) -#define BIT(x) (1ULL << (x)) -#endif - - -#define NEG_QUIET_NAN_MIN32 MKINT(0xFFC00001) -#define NEG_QUIET_NAN_MAX32 MKINT(0xFFFFFFFF) - -#define INDETERMINATE_MIN32 MKINT(0xFFC00000) -#define INDETERMINATE_MAX32 MKINT(0xFFC00000) - -#define NEG_SIGNAL_NAN_MIN32 MKINT(0xFF800001) -#define NEG_SIGNAL_NAN_MAX32 MKINT(0xFFBFFFFF) - -#define NEG_INFINITY_MIN32 MKINT(0xFF800000) - -#define NEG_NORMALIZED_MIN32 MKINT(0x80800000) -#define NEG_NORMALIZED_MAX32 MKINT(0xFF7FFFFF) - -#define NEG_DENORMALIZED_MIN32 MKINT(0x80000001) -#define NEG_DENORMALIZED_MAX32 MKINT(0x807FFFFF) - -#define NEG_ZERO_MIN32 MKINT(0x80000000) -#define NEG_ZERO_MAX32 MKINT(0x80000000) - -#define POS_ZERO_MIN32 MKINT(0x00000000) -#define POS_ZERO_MAX32 MKINT(0x00000000) - -#define POS_DENORMALIZED_MIN32 MKINT(0x00000001) -#define POS_DENORMALIZED_MAX32 MKINT(0x007FFFFF) - -#define POS_NORMALIZED_MIN32 MKINT(0x00800000) -#define POS_NORMALIZED_MAX32 MKINT(0x7F7FFFFF) - -#define POS_INFINITY_MIN32 MKINT(0x7F800000) -#define POS_INFINITY_MAX32 MKINT(0x7F800000) - -#define POS_SIGNAL_NAN_MIN32 MKINT(0x7F800001) -#define POS_SIGNAL_NAN_MAX32 MKINT(0x7FBFFFFF) - -#define POS_QUIET_NAN_MIN32 MKINT(0x7FC00000) -#define POS_QUIET_NAN_MAX32 MKINT(0x7FFFFFFF) - -/* 64-bit special value ranges */ - -#define NEG_QUIET_NAN_MIN64 MKINT64(0xFFF8000000000001) -#define NEG_QUIET_NAN_MAX64 MKINT64(0xFFFFFFFFFFFFFFFF) - -#define INDETERMINATE_MIN64 MKINT64(0xFFF8000000000000) -#define INDETERMINATE_MAX64 MKINT64(0xFFF8000000000000) - -#define NEG_SIGNAL_NAN_MIN64 MKINT64(0xFFF7FFFFFFFFFFFF) -#define NEG_SIGNAL_NAN_MAX64 MKINT64(0xFFF0000000000001) - -#define NEG_INFINITY_MIN64 MKINT64(0xFFF0000000000000) - -#define NEG_NORMALIZED_MIN64 MKINT64(0xFFEFFFFFFFFFFFFF) -#define NEG_NORMALIZED_MAX64 MKINT64(0x8010000000000000) - -#define NEG_DENORMALIZED_MIN64 MKINT64(0x800FFFFFFFFFFFFF) -#define NEG_DENORMALIZED_MAX64 MKINT64(0x8000000000000001) - -#define NEG_ZERO_MIN64 MKINT64(0x8000000000000000) -#define NEG_ZERO_MAX64 MKINT64(0x8000000000000000) - -#define POS_ZERO_MIN64 MKINT64(0x0000000000000000) -#define POS_ZERO_MAX64 MKINT64(0x0000000000000000) - -#define POS_DENORMALIZED_MIN64 MKINT64(0x0000000000000001) -#define POS_DENORMALIZED_MAX64 MKINT64(0x000FFFFFFFFFFFFF) - -#define POS_NORMALIZED_MIN64 MKINT64(0x0010000000000000) -#define POS_NORMALIZED_MAX64 MKINT64(0x7FEFFFFFFFFFFFFF) - -#define POS_INFINITY_MIN64 MKINT64(0x7FF0000000000000) -#define POS_INFINITY_MAX64 MKINT64(0x7FF0000000000000) - -#define POS_SIGNAL_NAN_MIN64 MKINT64(0x7FF0000000000001) -#define POS_SIGNAL_NAN_MAX64 MKINT64(0x7FF7FFFFFFFFFFFF) - -#define POS_QUIET_NAN_MIN64 MKINT64(0x7FF8000000000000) -#define POS_QUIET_NAN_MAX64 MKINT64(0x7FFFFFFFFFFFFFFF) - -typedef enum -{ - POS_QNAN_BIT, - NEG_QNAN_BIT, - POS_SNAN_BIT, - NEG_SNAN_BIT, - POS_INF_BIT, - NEG_INF_BIT, - POS_DEN_BIT, - NEG_DEN_BIT, - POS_NOR_BIT, - NEG_NOR_BIT, - POS_ZERO_BIT, - NEG_ZERO_BIT, - INDETERM_BIT, - BUG_BIT -} ieee_selects; - -#define MSK_POS_QNAN BIT(POS_QNAN_BIT) -#define MSK_POS_SNAN BIT(POS_SNAN_BIT) -#define MSK_POS_INF BIT(POS_INF_BIT) -#define MSK_POS_DEN BIT(POS_DEN_BIT) -#define MSK_POS_NOR BIT(POS_NOR_BIT) -#define MSK_POS_ZERO BIT(POS_ZERO_BIT) -#define MSK_NEG_QNAN BIT(NEG_QNAN_BIT) -#define MSK_NEG_SNAN BIT(NEG_SNAN_BIT) -#define MSK_NEG_INF BIT(NEG_INF_BIT) -#define MSK_NEG_DEN BIT(NEG_DEN_BIT) -#define MSK_NEG_NOR BIT(NEG_NOR_BIT) -#define MSK_NEG_ZERO BIT(NEG_ZERO_BIT) -#define MSK_INDETERM BIT(INDETERM_BIT) -#define MSK_BUG BIT(BUG_BIT) - diff --git a/numpy-1.6.2/numpy/numarray/include/numpy/libnumarray.h b/numpy-1.6.2/numpy/numarray/include/numpy/libnumarray.h deleted file mode 100644 index c69f10d8ee..0000000000 --- a/numpy-1.6.2/numpy/numarray/include/numpy/libnumarray.h +++ /dev/null @@ -1,630 +0,0 @@ -/* Compatibility with numarray. Do not use in new code. - */ - -#ifndef NUMPY_LIBNUMARRAY_H -#define NUMPY_LIBNUMARRAY_H - -#include "numpy/arrayobject.h" -#include "arraybase.h" -#include "nummacro.h" -#include "numcomplex.h" -#include "ieeespecial.h" -#include "cfunc.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* Header file for libnumarray */ - -#if !defined(_libnumarray_MODULE) - -/* -Extensions constructed from seperate compilation units can access the -C-API defined here by defining "libnumarray_UNIQUE_SYMBOL" to a global -name unique to the extension. Doing this circumvents the requirement -to import libnumarray into each compilation unit, but is nevertheless -mildly discouraged as "outside the Python norm" and potentially -leading to problems. Looking around at "existing Python art", most -extension modules are monolithic C files, and likely for good reason. -*/ - -/* C API address pointer */ -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **libnumarray_API; -#else -#if defined(libnumarray_UNIQUE_SYMBOL) -void **libnumarray_API; -#else -static void **libnumarray_API; -#endif -#endif - -#if PY_VERSION_HEX >= 0x03000000 -#define _import_libnumarray() \ - { \ - PyObject *module = PyImport_ImportModule("numpy.numarray._capi"); \ - if (module != NULL) { \ - PyObject *module_dict = PyModule_GetDict(module); \ - PyObject *c_api_object = \ - PyDict_GetItemString(module_dict, "_C_API"); \ - if (c_api_object && PyCapsule_CheckExact(c_api_object)) { \ - libnumarray_API = (void **)PyCapsule_GetPointer(c_api_object, NULL); \ - } else { \ - PyErr_Format(PyExc_ImportError, \ - "Can't get API for module 'numpy.numarray._capi'"); \ - } \ - } \ - } - -#else -#define _import_libnumarray() \ - { \ - PyObject *module = PyImport_ImportModule("numpy.numarray._capi"); \ - if (module != NULL) { \ - PyObject *module_dict = PyModule_GetDict(module); \ - PyObject *c_api_object = \ - PyDict_GetItemString(module_dict, "_C_API"); \ - if (c_api_object && PyCObject_Check(c_api_object)) { \ - libnumarray_API = (void **)PyCObject_AsVoidPtr(c_api_object); \ - } else { \ - PyErr_Format(PyExc_ImportError, \ - "Can't get API for module 'numpy.numarray._capi'"); \ - } \ - } \ - } -#endif - -#define import_libnumarray() _import_libnumarray(); if (PyErr_Occurred()) { PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.numarray._capi failed to import.\n"); return; } - -#endif - - -#define libnumarray_FatalApiError (Py_FatalError("Call to API function without first calling import_libnumarray() in " __FILE__), NULL) - - -/* Macros defining components of function prototypes */ - -#ifdef _libnumarray_MODULE - /* This section is used when compiling libnumarray */ - -static PyObject *_Error; - -static PyObject* getBuffer (PyObject*o); - -static int isBuffer (PyObject*o); - -static int getWriteBufferDataPtr (PyObject*o,void**p); - -static int isBufferWriteable (PyObject*o); - -static int getReadBufferDataPtr (PyObject*o,void**p); - -static int getBufferSize (PyObject*o); - -static double num_log (double x); - -static double num_log10 (double x); - -static double num_pow (double x, double y); - -static double num_acosh (double x); - -static double num_asinh (double x); - -static double num_atanh (double x); - -static double num_round (double x); - -static int int_dividebyzero_error (long value, long unused); - -static int int_overflow_error (Float64 value); - -static int umult64_overflow (UInt64 a, UInt64 b); - -static int smult64_overflow (Int64 a0, Int64 b0); - -static void NA_Done (void); - -static PyArrayObject* NA_NewAll (int ndim, maybelong* shape, NumarrayType type, void* buffer, maybelong byteoffset, maybelong bytestride, int byteorder, int aligned, int writeable); - -static PyArrayObject* NA_NewAllStrides (int ndim, maybelong* shape, maybelong* strides, NumarrayType type, void* buffer, maybelong byteoffset, int byteorder, int aligned, int writeable); - -static PyArrayObject* NA_New (void* buffer, NumarrayType type, int ndim,...); - -static PyArrayObject* NA_Empty (int ndim, maybelong* shape, NumarrayType type); - -static PyArrayObject* NA_NewArray (void* buffer, NumarrayType type, int ndim, ...); - -static PyArrayObject* NA_vNewArray (void* buffer, NumarrayType type, int ndim, maybelong *shape); - -static PyObject* NA_ReturnOutput (PyObject*,PyArrayObject*); - -static long NA_getBufferPtrAndSize (PyObject*,int,void**); - -static int NA_checkIo (char*,int,int,int,int); - -static int NA_checkOneCBuffer (char*,long,void*,long,size_t); - -static int NA_checkNCBuffers (char*,int,long,void**,long*,Int8*,Int8*); - -static int NA_checkOneStriding (char*,long,maybelong*,long,maybelong*,long,long,int); - -static PyObject* NA_new_cfunc (CfuncDescriptor*); - -static int NA_add_cfunc (PyObject*,char*,CfuncDescriptor*); - -static PyArrayObject* NA_InputArray (PyObject*,NumarrayType,int); - -static PyArrayObject* NA_OutputArray (PyObject*,NumarrayType,int); - -static PyArrayObject* NA_IoArray (PyObject*,NumarrayType,int); - -static PyArrayObject* NA_OptionalOutputArray (PyObject*,NumarrayType,int,PyArrayObject*); - -static long NA_get_offset (PyArrayObject*,int,...); - -static Float64 NA_get_Float64 (PyArrayObject*,long); - -static void NA_set_Float64 (PyArrayObject*,long,Float64); - -static Complex64 NA_get_Complex64 (PyArrayObject*,long); - -static void NA_set_Complex64 (PyArrayObject*,long,Complex64); - -static Int64 NA_get_Int64 (PyArrayObject*,long); - -static void NA_set_Int64 (PyArrayObject*,long,Int64); - -static Float64 NA_get1_Float64 (PyArrayObject*,long); - -static Float64 NA_get2_Float64 (PyArrayObject*,long,long); - -static Float64 NA_get3_Float64 (PyArrayObject*,long,long,long); - -static void NA_set1_Float64 (PyArrayObject*,long,Float64); - -static void NA_set2_Float64 (PyArrayObject*,long,long,Float64); - -static void NA_set3_Float64 (PyArrayObject*,long,long,long,Float64); - -static Complex64 NA_get1_Complex64 (PyArrayObject*,long); - -static Complex64 NA_get2_Complex64 (PyArrayObject*,long,long); - -static Complex64 NA_get3_Complex64 (PyArrayObject*,long,long,long); - -static void NA_set1_Complex64 (PyArrayObject*,long,Complex64); - -static void NA_set2_Complex64 (PyArrayObject*,long,long,Complex64); - -static void NA_set3_Complex64 (PyArrayObject*,long,long,long,Complex64); - -static Int64 NA_get1_Int64 (PyArrayObject*,long); - -static Int64 NA_get2_Int64 (PyArrayObject*,long,long); - -static Int64 NA_get3_Int64 (PyArrayObject*,long,long,long); - -static void NA_set1_Int64 (PyArrayObject*,long,Int64); - -static void NA_set2_Int64 (PyArrayObject*,long,long,Int64); - -static void NA_set3_Int64 (PyArrayObject*,long,long,long,Int64); - -static int NA_get1D_Float64 (PyArrayObject*,long,int,Float64*); - -static int NA_set1D_Float64 (PyArrayObject*,long,int,Float64*); - -static int NA_get1D_Int64 (PyArrayObject*,long,int,Int64*); - -static int NA_set1D_Int64 (PyArrayObject*,long,int,Int64*); - -static int NA_get1D_Complex64 (PyArrayObject*,long,int,Complex64*); - -static int NA_set1D_Complex64 (PyArrayObject*,long,int,Complex64*); - -static int NA_ShapeEqual (PyArrayObject*,PyArrayObject*); - -static int NA_ShapeLessThan (PyArrayObject*,PyArrayObject*); - -static int NA_ByteOrder (void); - -static Bool NA_IeeeSpecial32 (Float32*,Int32*); - -static Bool NA_IeeeSpecial64 (Float64*,Int32*); - -static PyArrayObject* NA_updateDataPtr (PyArrayObject*); - -static char* NA_typeNoToName (int); - -static int NA_nameToTypeNo (char*); - -static PyObject* NA_typeNoToTypeObject (int); - -static PyObject* NA_intTupleFromMaybeLongs (int,maybelong*); - -static long NA_maybeLongsFromIntTuple (int,maybelong*,PyObject*); - -static int NA_intTupleProduct (PyObject *obj, long *product); - -static long NA_isIntegerSequence (PyObject*); - -static PyObject* NA_setArrayFromSequence (PyArrayObject*,PyObject*); - -static int NA_maxType (PyObject*); - -static int NA_isPythonScalar (PyObject *obj); - -static PyObject* NA_getPythonScalar (PyArrayObject*,long); - -static int NA_setFromPythonScalar (PyArrayObject*,long,PyObject*); - -static int NA_NDArrayCheck (PyObject*); - -static int NA_NumArrayCheck (PyObject*); - -static int NA_ComplexArrayCheck (PyObject*); - -static unsigned long NA_elements (PyArrayObject*); - -static int NA_typeObjectToTypeNo (PyObject*); - -static int NA_copyArray (PyArrayObject* to, const PyArrayObject* from); - -static PyArrayObject* NA_copy (PyArrayObject*); - -static PyObject* NA_getType (PyObject *typeobj_or_name); - -static PyObject * NA_callCUFuncCore (PyObject *cfunc, long niter, long ninargs, long noutargs, PyObject **BufferObj, long *offset); - -static PyObject * NA_callStrideConvCFuncCore (PyObject *cfunc, int nshape, maybelong *shape, PyObject *inbuffObj, long inboffset, int nstrides0, maybelong *inbstrides, PyObject *outbuffObj, long outboffset, int nstrides1, maybelong *outbstrides, long nbytes); - -static void NA_stridesFromShape (int nshape, maybelong *shape, maybelong bytestride, maybelong *strides); - -static int NA_OperatorCheck (PyObject *obj); - -static int NA_ConverterCheck (PyObject *obj); - -static int NA_UfuncCheck (PyObject *obj); - -static int NA_CfuncCheck (PyObject *obj); - -static int NA_getByteOffset (PyArrayObject *array, int nindices, maybelong *indices, long *offset); - -static int NA_swapAxes (PyArrayObject *array, int x, int y); - -static PyObject * NA_initModuleGlobal (char *module, char *global); - -static NumarrayType NA_NumarrayType (PyObject *seq); - -static PyArrayObject * NA_NewAllFromBuffer (int ndim, maybelong *shape, NumarrayType type, PyObject *bufferObject, maybelong byteoffset, maybelong bytestride, int byteorder, int aligned, int writeable); - -static Float64 * NA_alloc1D_Float64 (PyArrayObject *a, long offset, int cnt); - -static Int64 * NA_alloc1D_Int64 (PyArrayObject *a, long offset, int cnt); - -static void NA_updateAlignment (PyArrayObject *self); - -static void NA_updateContiguous (PyArrayObject *self); - -static void NA_updateStatus (PyArrayObject *self); - -static int NA_NumArrayCheckExact (PyObject *op); - -static int NA_NDArrayCheckExact (PyObject *op); - -static int NA_OperatorCheckExact (PyObject *op); - -static int NA_ConverterCheckExact (PyObject *op); - -static int NA_UfuncCheckExact (PyObject *op); - -static int NA_CfuncCheckExact (PyObject *op); - -static char * NA_getArrayData (PyArrayObject *ap); - -static void NA_updateByteswap (PyArrayObject *ap); - -static PyArray_Descr * NA_DescrFromType (int type); - -static PyObject * NA_Cast (PyArrayObject *a, int type); - -static int NA_checkFPErrors (void); - -static void NA_clearFPErrors (void); - -static int NA_checkAndReportFPErrors (char *name); - -static Bool NA_IeeeMask32 (Float32,Int32); - -static Bool NA_IeeeMask64 (Float64,Int32); - -static int _NA_callStridingHelper (PyObject *aux, long dim, long nnumarray, PyArrayObject *numarray[], char *data[], CFUNC_STRIDED_FUNC f); - -static PyArrayObject * NA_FromDimsStridesDescrAndData (int nd, maybelong *dims, maybelong *strides, PyArray_Descr *descr, char *data); - -static PyArrayObject * NA_FromDimsTypeAndData (int nd, maybelong *dims, int type, char *data); - -static PyArrayObject * NA_FromDimsStridesTypeAndData (int nd, maybelong *dims, maybelong *strides, int type, char *data); - -static int NA_scipy_typestr (NumarrayType t, int byteorder, char *typestr); - -static PyArrayObject * NA_FromArrayStruct (PyObject *a); - - -#else - /* This section is used in modules that use libnumarray */ - -#define getBuffer (libnumarray_API ? (*(PyObject* (*) (PyObject*o) ) libnumarray_API[ 0 ]) : (*(PyObject* (*) (PyObject*o) ) libnumarray_FatalApiError)) - -#define isBuffer (libnumarray_API ? (*(int (*) (PyObject*o) ) libnumarray_API[ 1 ]) : (*(int (*) (PyObject*o) ) libnumarray_FatalApiError)) - -#define getWriteBufferDataPtr (libnumarray_API ? (*(int (*) (PyObject*o,void**p) ) libnumarray_API[ 2 ]) : (*(int (*) (PyObject*o,void**p) ) libnumarray_FatalApiError)) - -#define isBufferWriteable (libnumarray_API ? (*(int (*) (PyObject*o) ) libnumarray_API[ 3 ]) : (*(int (*) (PyObject*o) ) libnumarray_FatalApiError)) - -#define getReadBufferDataPtr (libnumarray_API ? (*(int (*) (PyObject*o,void**p) ) libnumarray_API[ 4 ]) : (*(int (*) (PyObject*o,void**p) ) libnumarray_FatalApiError)) - -#define getBufferSize (libnumarray_API ? (*(int (*) (PyObject*o) ) libnumarray_API[ 5 ]) : (*(int (*) (PyObject*o) ) libnumarray_FatalApiError)) - -#define num_log (libnumarray_API ? (*(double (*) (double x) ) libnumarray_API[ 6 ]) : (*(double (*) (double x) ) libnumarray_FatalApiError)) - -#define num_log10 (libnumarray_API ? (*(double (*) (double x) ) libnumarray_API[ 7 ]) : (*(double (*) (double x) ) libnumarray_FatalApiError)) - -#define num_pow (libnumarray_API ? (*(double (*) (double x, double y) ) libnumarray_API[ 8 ]) : (*(double (*) (double x, double y) ) libnumarray_FatalApiError)) - -#define num_acosh (libnumarray_API ? (*(double (*) (double x) ) libnumarray_API[ 9 ]) : (*(double (*) (double x) ) libnumarray_FatalApiError)) - -#define num_asinh (libnumarray_API ? (*(double (*) (double x) ) libnumarray_API[ 10 ]) : (*(double (*) (double x) ) libnumarray_FatalApiError)) - -#define num_atanh (libnumarray_API ? (*(double (*) (double x) ) libnumarray_API[ 11 ]) : (*(double (*) (double x) ) libnumarray_FatalApiError)) - -#define num_round (libnumarray_API ? (*(double (*) (double x) ) libnumarray_API[ 12 ]) : (*(double (*) (double x) ) libnumarray_FatalApiError)) - -#define int_dividebyzero_error (libnumarray_API ? (*(int (*) (long value, long unused) ) libnumarray_API[ 13 ]) : (*(int (*) (long value, long unused) ) libnumarray_FatalApiError)) - -#define int_overflow_error (libnumarray_API ? (*(int (*) (Float64 value) ) libnumarray_API[ 14 ]) : (*(int (*) (Float64 value) ) libnumarray_FatalApiError)) - -#define umult64_overflow (libnumarray_API ? (*(int (*) (UInt64 a, UInt64 b) ) libnumarray_API[ 15 ]) : (*(int (*) (UInt64 a, UInt64 b) ) libnumarray_FatalApiError)) - -#define smult64_overflow (libnumarray_API ? (*(int (*) (Int64 a0, Int64 b0) ) libnumarray_API[ 16 ]) : (*(int (*) (Int64 a0, Int64 b0) ) libnumarray_FatalApiError)) - -#define NA_Done (libnumarray_API ? (*(void (*) (void) ) libnumarray_API[ 17 ]) : (*(void (*) (void) ) libnumarray_FatalApiError)) - -#define NA_NewAll (libnumarray_API ? (*(PyArrayObject* (*) (int ndim, maybelong* shape, NumarrayType type, void* buffer, maybelong byteoffset, maybelong bytestride, int byteorder, int aligned, int writeable) ) libnumarray_API[ 18 ]) : (*(PyArrayObject* (*) (int ndim, maybelong* shape, NumarrayType type, void* buffer, maybelong byteoffset, maybelong bytestride, int byteorder, int aligned, int writeable) ) libnumarray_FatalApiError)) - -#define NA_NewAllStrides (libnumarray_API ? (*(PyArrayObject* (*) (int ndim, maybelong* shape, maybelong* strides, NumarrayType type, void* buffer, maybelong byteoffset, int byteorder, int aligned, int writeable) ) libnumarray_API[ 19 ]) : (*(PyArrayObject* (*) (int ndim, maybelong* shape, maybelong* strides, NumarrayType type, void* buffer, maybelong byteoffset, int byteorder, int aligned, int writeable) ) libnumarray_FatalApiError)) - -#define NA_New (libnumarray_API ? (*(PyArrayObject* (*) (void* buffer, NumarrayType type, int ndim,...) ) libnumarray_API[ 20 ]) : (*(PyArrayObject* (*) (void* buffer, NumarrayType type, int ndim,...) ) libnumarray_FatalApiError)) - -#define NA_Empty (libnumarray_API ? (*(PyArrayObject* (*) (int ndim, maybelong* shape, NumarrayType type) ) libnumarray_API[ 21 ]) : (*(PyArrayObject* (*) (int ndim, maybelong* shape, NumarrayType type) ) libnumarray_FatalApiError)) - -#define NA_NewArray (libnumarray_API ? (*(PyArrayObject* (*) (void* buffer, NumarrayType type, int ndim, ...) ) libnumarray_API[ 22 ]) : (*(PyArrayObject* (*) (void* buffer, NumarrayType type, int ndim, ...) ) libnumarray_FatalApiError)) - -#define NA_vNewArray (libnumarray_API ? (*(PyArrayObject* (*) (void* buffer, NumarrayType type, int ndim, maybelong *shape) ) libnumarray_API[ 23 ]) : (*(PyArrayObject* (*) (void* buffer, NumarrayType type, int ndim, maybelong *shape) ) libnumarray_FatalApiError)) - -#define NA_ReturnOutput (libnumarray_API ? (*(PyObject* (*) (PyObject*,PyArrayObject*) ) libnumarray_API[ 24 ]) : (*(PyObject* (*) (PyObject*,PyArrayObject*) ) libnumarray_FatalApiError)) - -#define NA_getBufferPtrAndSize (libnumarray_API ? (*(long (*) (PyObject*,int,void**) ) libnumarray_API[ 25 ]) : (*(long (*) (PyObject*,int,void**) ) libnumarray_FatalApiError)) - -#define NA_checkIo (libnumarray_API ? (*(int (*) (char*,int,int,int,int) ) libnumarray_API[ 26 ]) : (*(int (*) (char*,int,int,int,int) ) libnumarray_FatalApiError)) - -#define NA_checkOneCBuffer (libnumarray_API ? (*(int (*) (char*,long,void*,long,size_t) ) libnumarray_API[ 27 ]) : (*(int (*) (char*,long,void*,long,size_t) ) libnumarray_FatalApiError)) - -#define NA_checkNCBuffers (libnumarray_API ? (*(int (*) (char*,int,long,void**,long*,Int8*,Int8*) ) libnumarray_API[ 28 ]) : (*(int (*) (char*,int,long,void**,long*,Int8*,Int8*) ) libnumarray_FatalApiError)) - -#define NA_checkOneStriding (libnumarray_API ? (*(int (*) (char*,long,maybelong*,long,maybelong*,long,long,int) ) libnumarray_API[ 29 ]) : (*(int (*) (char*,long,maybelong*,long,maybelong*,long,long,int) ) libnumarray_FatalApiError)) - -#define NA_new_cfunc (libnumarray_API ? (*(PyObject* (*) (CfuncDescriptor*) ) libnumarray_API[ 30 ]) : (*(PyObject* (*) (CfuncDescriptor*) ) libnumarray_FatalApiError)) - -#define NA_add_cfunc (libnumarray_API ? (*(int (*) (PyObject*,char*,CfuncDescriptor*) ) libnumarray_API[ 31 ]) : (*(int (*) (PyObject*,char*,CfuncDescriptor*) ) libnumarray_FatalApiError)) - -#define NA_InputArray (libnumarray_API ? (*(PyArrayObject* (*) (PyObject*,NumarrayType,int) ) libnumarray_API[ 32 ]) : (*(PyArrayObject* (*) (PyObject*,NumarrayType,int) ) libnumarray_FatalApiError)) - -#define NA_OutputArray (libnumarray_API ? (*(PyArrayObject* (*) (PyObject*,NumarrayType,int) ) libnumarray_API[ 33 ]) : (*(PyArrayObject* (*) (PyObject*,NumarrayType,int) ) libnumarray_FatalApiError)) - -#define NA_IoArray (libnumarray_API ? (*(PyArrayObject* (*) (PyObject*,NumarrayType,int) ) libnumarray_API[ 34 ]) : (*(PyArrayObject* (*) (PyObject*,NumarrayType,int) ) libnumarray_FatalApiError)) - -#define NA_OptionalOutputArray (libnumarray_API ? (*(PyArrayObject* (*) (PyObject*,NumarrayType,int,PyArrayObject*) ) libnumarray_API[ 35 ]) : (*(PyArrayObject* (*) (PyObject*,NumarrayType,int,PyArrayObject*) ) libnumarray_FatalApiError)) - -#define NA_get_offset (libnumarray_API ? (*(long (*) (PyArrayObject*,int,...) ) libnumarray_API[ 36 ]) : (*(long (*) (PyArrayObject*,int,...) ) libnumarray_FatalApiError)) - -#define NA_get_Float64 (libnumarray_API ? (*(Float64 (*) (PyArrayObject*,long) ) libnumarray_API[ 37 ]) : (*(Float64 (*) (PyArrayObject*,long) ) libnumarray_FatalApiError)) - -#define NA_set_Float64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,Float64) ) libnumarray_API[ 38 ]) : (*(void (*) (PyArrayObject*,long,Float64) ) libnumarray_FatalApiError)) - -#define NA_get_Complex64 (libnumarray_API ? (*(Complex64 (*) (PyArrayObject*,long) ) libnumarray_API[ 39 ]) : (*(Complex64 (*) (PyArrayObject*,long) ) libnumarray_FatalApiError)) - -#define NA_set_Complex64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,Complex64) ) libnumarray_API[ 40 ]) : (*(void (*) (PyArrayObject*,long,Complex64) ) libnumarray_FatalApiError)) - -#define NA_get_Int64 (libnumarray_API ? (*(Int64 (*) (PyArrayObject*,long) ) libnumarray_API[ 41 ]) : (*(Int64 (*) (PyArrayObject*,long) ) libnumarray_FatalApiError)) - -#define NA_set_Int64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,Int64) ) libnumarray_API[ 42 ]) : (*(void (*) (PyArrayObject*,long,Int64) ) libnumarray_FatalApiError)) - -#define NA_get1_Float64 (libnumarray_API ? (*(Float64 (*) (PyArrayObject*,long) ) libnumarray_API[ 43 ]) : (*(Float64 (*) (PyArrayObject*,long) ) libnumarray_FatalApiError)) - -#define NA_get2_Float64 (libnumarray_API ? (*(Float64 (*) (PyArrayObject*,long,long) ) libnumarray_API[ 44 ]) : (*(Float64 (*) (PyArrayObject*,long,long) ) libnumarray_FatalApiError)) - -#define NA_get3_Float64 (libnumarray_API ? (*(Float64 (*) (PyArrayObject*,long,long,long) ) libnumarray_API[ 45 ]) : (*(Float64 (*) (PyArrayObject*,long,long,long) ) libnumarray_FatalApiError)) - -#define NA_set1_Float64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,Float64) ) libnumarray_API[ 46 ]) : (*(void (*) (PyArrayObject*,long,Float64) ) libnumarray_FatalApiError)) - -#define NA_set2_Float64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,long,Float64) ) libnumarray_API[ 47 ]) : (*(void (*) (PyArrayObject*,long,long,Float64) ) libnumarray_FatalApiError)) - -#define NA_set3_Float64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,long,long,Float64) ) libnumarray_API[ 48 ]) : (*(void (*) (PyArrayObject*,long,long,long,Float64) ) libnumarray_FatalApiError)) - -#define NA_get1_Complex64 (libnumarray_API ? (*(Complex64 (*) (PyArrayObject*,long) ) libnumarray_API[ 49 ]) : (*(Complex64 (*) (PyArrayObject*,long) ) libnumarray_FatalApiError)) - -#define NA_get2_Complex64 (libnumarray_API ? (*(Complex64 (*) (PyArrayObject*,long,long) ) libnumarray_API[ 50 ]) : (*(Complex64 (*) (PyArrayObject*,long,long) ) libnumarray_FatalApiError)) - -#define NA_get3_Complex64 (libnumarray_API ? (*(Complex64 (*) (PyArrayObject*,long,long,long) ) libnumarray_API[ 51 ]) : (*(Complex64 (*) (PyArrayObject*,long,long,long) ) libnumarray_FatalApiError)) - -#define NA_set1_Complex64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,Complex64) ) libnumarray_API[ 52 ]) : (*(void (*) (PyArrayObject*,long,Complex64) ) libnumarray_FatalApiError)) - -#define NA_set2_Complex64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,long,Complex64) ) libnumarray_API[ 53 ]) : (*(void (*) (PyArrayObject*,long,long,Complex64) ) libnumarray_FatalApiError)) - -#define NA_set3_Complex64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,long,long,Complex64) ) libnumarray_API[ 54 ]) : (*(void (*) (PyArrayObject*,long,long,long,Complex64) ) libnumarray_FatalApiError)) - -#define NA_get1_Int64 (libnumarray_API ? (*(Int64 (*) (PyArrayObject*,long) ) libnumarray_API[ 55 ]) : (*(Int64 (*) (PyArrayObject*,long) ) libnumarray_FatalApiError)) - -#define NA_get2_Int64 (libnumarray_API ? (*(Int64 (*) (PyArrayObject*,long,long) ) libnumarray_API[ 56 ]) : (*(Int64 (*) (PyArrayObject*,long,long) ) libnumarray_FatalApiError)) - -#define NA_get3_Int64 (libnumarray_API ? (*(Int64 (*) (PyArrayObject*,long,long,long) ) libnumarray_API[ 57 ]) : (*(Int64 (*) (PyArrayObject*,long,long,long) ) libnumarray_FatalApiError)) - -#define NA_set1_Int64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,Int64) ) libnumarray_API[ 58 ]) : (*(void (*) (PyArrayObject*,long,Int64) ) libnumarray_FatalApiError)) - -#define NA_set2_Int64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,long,Int64) ) libnumarray_API[ 59 ]) : (*(void (*) (PyArrayObject*,long,long,Int64) ) libnumarray_FatalApiError)) - -#define NA_set3_Int64 (libnumarray_API ? (*(void (*) (PyArrayObject*,long,long,long,Int64) ) libnumarray_API[ 60 ]) : (*(void (*) (PyArrayObject*,long,long,long,Int64) ) libnumarray_FatalApiError)) - -#define NA_get1D_Float64 (libnumarray_API ? (*(int (*) (PyArrayObject*,long,int,Float64*) ) libnumarray_API[ 61 ]) : (*(int (*) (PyArrayObject*,long,int,Float64*) ) libnumarray_FatalApiError)) - -#define NA_set1D_Float64 (libnumarray_API ? (*(int (*) (PyArrayObject*,long,int,Float64*) ) libnumarray_API[ 62 ]) : (*(int (*) (PyArrayObject*,long,int,Float64*) ) libnumarray_FatalApiError)) - -#define NA_get1D_Int64 (libnumarray_API ? (*(int (*) (PyArrayObject*,long,int,Int64*) ) libnumarray_API[ 63 ]) : (*(int (*) (PyArrayObject*,long,int,Int64*) ) libnumarray_FatalApiError)) - -#define NA_set1D_Int64 (libnumarray_API ? (*(int (*) (PyArrayObject*,long,int,Int64*) ) libnumarray_API[ 64 ]) : (*(int (*) (PyArrayObject*,long,int,Int64*) ) libnumarray_FatalApiError)) - -#define NA_get1D_Complex64 (libnumarray_API ? (*(int (*) (PyArrayObject*,long,int,Complex64*) ) libnumarray_API[ 65 ]) : (*(int (*) (PyArrayObject*,long,int,Complex64*) ) libnumarray_FatalApiError)) - -#define NA_set1D_Complex64 (libnumarray_API ? (*(int (*) (PyArrayObject*,long,int,Complex64*) ) libnumarray_API[ 66 ]) : (*(int (*) (PyArrayObject*,long,int,Complex64*) ) libnumarray_FatalApiError)) - -#define NA_ShapeEqual (libnumarray_API ? (*(int (*) (PyArrayObject*,PyArrayObject*) ) libnumarray_API[ 67 ]) : (*(int (*) (PyArrayObject*,PyArrayObject*) ) libnumarray_FatalApiError)) - -#define NA_ShapeLessThan (libnumarray_API ? (*(int (*) (PyArrayObject*,PyArrayObject*) ) libnumarray_API[ 68 ]) : (*(int (*) (PyArrayObject*,PyArrayObject*) ) libnumarray_FatalApiError)) - -#define NA_ByteOrder (libnumarray_API ? (*(int (*) (void) ) libnumarray_API[ 69 ]) : (*(int (*) (void) ) libnumarray_FatalApiError)) - -#define NA_IeeeSpecial32 (libnumarray_API ? (*(Bool (*) (Float32*,Int32*) ) libnumarray_API[ 70 ]) : (*(Bool (*) (Float32*,Int32*) ) libnumarray_FatalApiError)) - -#define NA_IeeeSpecial64 (libnumarray_API ? (*(Bool (*) (Float64*,Int32*) ) libnumarray_API[ 71 ]) : (*(Bool (*) (Float64*,Int32*) ) libnumarray_FatalApiError)) - -#define NA_updateDataPtr (libnumarray_API ? (*(PyArrayObject* (*) (PyArrayObject*) ) libnumarray_API[ 72 ]) : (*(PyArrayObject* (*) (PyArrayObject*) ) libnumarray_FatalApiError)) - -#define NA_typeNoToName (libnumarray_API ? (*(char* (*) (int) ) libnumarray_API[ 73 ]) : (*(char* (*) (int) ) libnumarray_FatalApiError)) - -#define NA_nameToTypeNo (libnumarray_API ? (*(int (*) (char*) ) libnumarray_API[ 74 ]) : (*(int (*) (char*) ) libnumarray_FatalApiError)) - -#define NA_typeNoToTypeObject (libnumarray_API ? (*(PyObject* (*) (int) ) libnumarray_API[ 75 ]) : (*(PyObject* (*) (int) ) libnumarray_FatalApiError)) - -#define NA_intTupleFromMaybeLongs (libnumarray_API ? (*(PyObject* (*) (int,maybelong*) ) libnumarray_API[ 76 ]) : (*(PyObject* (*) (int,maybelong*) ) libnumarray_FatalApiError)) - -#define NA_maybeLongsFromIntTuple (libnumarray_API ? (*(long (*) (int,maybelong*,PyObject*) ) libnumarray_API[ 77 ]) : (*(long (*) (int,maybelong*,PyObject*) ) libnumarray_FatalApiError)) - -#define NA_intTupleProduct (libnumarray_API ? (*(int (*) (PyObject *obj, long *product) ) libnumarray_API[ 78 ]) : (*(int (*) (PyObject *obj, long *product) ) libnumarray_FatalApiError)) - -#define NA_isIntegerSequence (libnumarray_API ? (*(long (*) (PyObject*) ) libnumarray_API[ 79 ]) : (*(long (*) (PyObject*) ) libnumarray_FatalApiError)) - -#define NA_setArrayFromSequence (libnumarray_API ? (*(PyObject* (*) (PyArrayObject*,PyObject*) ) libnumarray_API[ 80 ]) : (*(PyObject* (*) (PyArrayObject*,PyObject*) ) libnumarray_FatalApiError)) - -#define NA_maxType (libnumarray_API ? (*(int (*) (PyObject*) ) libnumarray_API[ 81 ]) : (*(int (*) (PyObject*) ) libnumarray_FatalApiError)) - -#define NA_isPythonScalar (libnumarray_API ? (*(int (*) (PyObject *obj) ) libnumarray_API[ 82 ]) : (*(int (*) (PyObject *obj) ) libnumarray_FatalApiError)) - -#define NA_getPythonScalar (libnumarray_API ? (*(PyObject* (*) (PyArrayObject*,long) ) libnumarray_API[ 83 ]) : (*(PyObject* (*) (PyArrayObject*,long) ) libnumarray_FatalApiError)) - -#define NA_setFromPythonScalar (libnumarray_API ? (*(int (*) (PyArrayObject*,long,PyObject*) ) libnumarray_API[ 84 ]) : (*(int (*) (PyArrayObject*,long,PyObject*) ) libnumarray_FatalApiError)) - -#define NA_NDArrayCheck (libnumarray_API ? (*(int (*) (PyObject*) ) libnumarray_API[ 85 ]) : (*(int (*) (PyObject*) ) libnumarray_FatalApiError)) - -#define NA_NumArrayCheck (libnumarray_API ? (*(int (*) (PyObject*) ) libnumarray_API[ 86 ]) : (*(int (*) (PyObject*) ) libnumarray_FatalApiError)) - -#define NA_ComplexArrayCheck (libnumarray_API ? (*(int (*) (PyObject*) ) libnumarray_API[ 87 ]) : (*(int (*) (PyObject*) ) libnumarray_FatalApiError)) - -#define NA_elements (libnumarray_API ? (*(unsigned long (*) (PyArrayObject*) ) libnumarray_API[ 88 ]) : (*(unsigned long (*) (PyArrayObject*) ) libnumarray_FatalApiError)) - -#define NA_typeObjectToTypeNo (libnumarray_API ? (*(int (*) (PyObject*) ) libnumarray_API[ 89 ]) : (*(int (*) (PyObject*) ) libnumarray_FatalApiError)) - -#define NA_copyArray (libnumarray_API ? (*(int (*) (PyArrayObject* to, const PyArrayObject* from) ) libnumarray_API[ 90 ]) : (*(int (*) (PyArrayObject* to, const PyArrayObject* from) ) libnumarray_FatalApiError)) - -#define NA_copy (libnumarray_API ? (*(PyArrayObject* (*) (PyArrayObject*) ) libnumarray_API[ 91 ]) : (*(PyArrayObject* (*) (PyArrayObject*) ) libnumarray_FatalApiError)) - -#define NA_getType (libnumarray_API ? (*(PyObject* (*) (PyObject *typeobj_or_name) ) libnumarray_API[ 92 ]) : (*(PyObject* (*) (PyObject *typeobj_or_name) ) libnumarray_FatalApiError)) - -#define NA_callCUFuncCore (libnumarray_API ? (*(PyObject * (*) (PyObject *cfunc, long niter, long ninargs, long noutargs, PyObject **BufferObj, long *offset) ) libnumarray_API[ 93 ]) : (*(PyObject * (*) (PyObject *cfunc, long niter, long ninargs, long noutargs, PyObject **BufferObj, long *offset) ) libnumarray_FatalApiError)) - -#define NA_callStrideConvCFuncCore (libnumarray_API ? (*(PyObject * (*) (PyObject *cfunc, int nshape, maybelong *shape, PyObject *inbuffObj, long inboffset, int nstrides0, maybelong *inbstrides, PyObject *outbuffObj, long outboffset, int nstrides1, maybelong *outbstrides, long nbytes) ) libnumarray_API[ 94 ]) : (*(PyObject * (*) (PyObject *cfunc, int nshape, maybelong *shape, PyObject *inbuffObj, long inboffset, int nstrides0, maybelong *inbstrides, PyObject *outbuffObj, long outboffset, int nstrides1, maybelong *outbstrides, long nbytes) ) libnumarray_FatalApiError)) - -#define NA_stridesFromShape (libnumarray_API ? (*(void (*) (int nshape, maybelong *shape, maybelong bytestride, maybelong *strides) ) libnumarray_API[ 95 ]) : (*(void (*) (int nshape, maybelong *shape, maybelong bytestride, maybelong *strides) ) libnumarray_FatalApiError)) - -#define NA_OperatorCheck (libnumarray_API ? (*(int (*) (PyObject *obj) ) libnumarray_API[ 96 ]) : (*(int (*) (PyObject *obj) ) libnumarray_FatalApiError)) - -#define NA_ConverterCheck (libnumarray_API ? (*(int (*) (PyObject *obj) ) libnumarray_API[ 97 ]) : (*(int (*) (PyObject *obj) ) libnumarray_FatalApiError)) - -#define NA_UfuncCheck (libnumarray_API ? (*(int (*) (PyObject *obj) ) libnumarray_API[ 98 ]) : (*(int (*) (PyObject *obj) ) libnumarray_FatalApiError)) - -#define NA_CfuncCheck (libnumarray_API ? (*(int (*) (PyObject *obj) ) libnumarray_API[ 99 ]) : (*(int (*) (PyObject *obj) ) libnumarray_FatalApiError)) - -#define NA_getByteOffset (libnumarray_API ? (*(int (*) (PyArrayObject *array, int nindices, maybelong *indices, long *offset) ) libnumarray_API[ 100 ]) : (*(int (*) (PyArrayObject *array, int nindices, maybelong *indices, long *offset) ) libnumarray_FatalApiError)) - -#define NA_swapAxes (libnumarray_API ? (*(int (*) (PyArrayObject *array, int x, int y) ) libnumarray_API[ 101 ]) : (*(int (*) (PyArrayObject *array, int x, int y) ) libnumarray_FatalApiError)) - -#define NA_initModuleGlobal (libnumarray_API ? (*(PyObject * (*) (char *module, char *global) ) libnumarray_API[ 102 ]) : (*(PyObject * (*) (char *module, char *global) ) libnumarray_FatalApiError)) - -#define NA_NumarrayType (libnumarray_API ? (*(NumarrayType (*) (PyObject *seq) ) libnumarray_API[ 103 ]) : (*(NumarrayType (*) (PyObject *seq) ) libnumarray_FatalApiError)) - -#define NA_NewAllFromBuffer (libnumarray_API ? (*(PyArrayObject * (*) (int ndim, maybelong *shape, NumarrayType type, PyObject *bufferObject, maybelong byteoffset, maybelong bytestride, int byteorder, int aligned, int writeable) ) libnumarray_API[ 104 ]) : (*(PyArrayObject * (*) (int ndim, maybelong *shape, NumarrayType type, PyObject *bufferObject, maybelong byteoffset, maybelong bytestride, int byteorder, int aligned, int writeable) ) libnumarray_FatalApiError)) - -#define NA_alloc1D_Float64 (libnumarray_API ? (*(Float64 * (*) (PyArrayObject *a, long offset, int cnt) ) libnumarray_API[ 105 ]) : (*(Float64 * (*) (PyArrayObject *a, long offset, int cnt) ) libnumarray_FatalApiError)) - -#define NA_alloc1D_Int64 (libnumarray_API ? (*(Int64 * (*) (PyArrayObject *a, long offset, int cnt) ) libnumarray_API[ 106 ]) : (*(Int64 * (*) (PyArrayObject *a, long offset, int cnt) ) libnumarray_FatalApiError)) - -#define NA_updateAlignment (libnumarray_API ? (*(void (*) (PyArrayObject *self) ) libnumarray_API[ 107 ]) : (*(void (*) (PyArrayObject *self) ) libnumarray_FatalApiError)) - -#define NA_updateContiguous (libnumarray_API ? (*(void (*) (PyArrayObject *self) ) libnumarray_API[ 108 ]) : (*(void (*) (PyArrayObject *self) ) libnumarray_FatalApiError)) - -#define NA_updateStatus (libnumarray_API ? (*(void (*) (PyArrayObject *self) ) libnumarray_API[ 109 ]) : (*(void (*) (PyArrayObject *self) ) libnumarray_FatalApiError)) - -#define NA_NumArrayCheckExact (libnumarray_API ? (*(int (*) (PyObject *op) ) libnumarray_API[ 110 ]) : (*(int (*) (PyObject *op) ) libnumarray_FatalApiError)) - -#define NA_NDArrayCheckExact (libnumarray_API ? (*(int (*) (PyObject *op) ) libnumarray_API[ 111 ]) : (*(int (*) (PyObject *op) ) libnumarray_FatalApiError)) - -#define NA_OperatorCheckExact (libnumarray_API ? (*(int (*) (PyObject *op) ) libnumarray_API[ 112 ]) : (*(int (*) (PyObject *op) ) libnumarray_FatalApiError)) - -#define NA_ConverterCheckExact (libnumarray_API ? (*(int (*) (PyObject *op) ) libnumarray_API[ 113 ]) : (*(int (*) (PyObject *op) ) libnumarray_FatalApiError)) - -#define NA_UfuncCheckExact (libnumarray_API ? (*(int (*) (PyObject *op) ) libnumarray_API[ 114 ]) : (*(int (*) (PyObject *op) ) libnumarray_FatalApiError)) - -#define NA_CfuncCheckExact (libnumarray_API ? (*(int (*) (PyObject *op) ) libnumarray_API[ 115 ]) : (*(int (*) (PyObject *op) ) libnumarray_FatalApiError)) - -#define NA_getArrayData (libnumarray_API ? (*(char * (*) (PyArrayObject *ap) ) libnumarray_API[ 116 ]) : (*(char * (*) (PyArrayObject *ap) ) libnumarray_FatalApiError)) - -#define NA_updateByteswap (libnumarray_API ? (*(void (*) (PyArrayObject *ap) ) libnumarray_API[ 117 ]) : (*(void (*) (PyArrayObject *ap) ) libnumarray_FatalApiError)) - -#define NA_DescrFromType (libnumarray_API ? (*(PyArray_Descr * (*) (int type) ) libnumarray_API[ 118 ]) : (*(PyArray_Descr * (*) (int type) ) libnumarray_FatalApiError)) - -#define NA_Cast (libnumarray_API ? (*(PyObject * (*) (PyArrayObject *a, int type) ) libnumarray_API[ 119 ]) : (*(PyObject * (*) (PyArrayObject *a, int type) ) libnumarray_FatalApiError)) - -#define NA_checkFPErrors (libnumarray_API ? (*(int (*) (void) ) libnumarray_API[ 120 ]) : (*(int (*) (void) ) libnumarray_FatalApiError)) - -#define NA_clearFPErrors (libnumarray_API ? (*(void (*) (void) ) libnumarray_API[ 121 ]) : (*(void (*) (void) ) libnumarray_FatalApiError)) - -#define NA_checkAndReportFPErrors (libnumarray_API ? (*(int (*) (char *name) ) libnumarray_API[ 122 ]) : (*(int (*) (char *name) ) libnumarray_FatalApiError)) - -#define NA_IeeeMask32 (libnumarray_API ? (*(Bool (*) (Float32,Int32) ) libnumarray_API[ 123 ]) : (*(Bool (*) (Float32,Int32) ) libnumarray_FatalApiError)) - -#define NA_IeeeMask64 (libnumarray_API ? (*(Bool (*) (Float64,Int32) ) libnumarray_API[ 124 ]) : (*(Bool (*) (Float64,Int32) ) libnumarray_FatalApiError)) - -#define _NA_callStridingHelper (libnumarray_API ? (*(int (*) (PyObject *aux, long dim, long nnumarray, PyArrayObject *numarray[], char *data[], CFUNC_STRIDED_FUNC f) ) libnumarray_API[ 125 ]) : (*(int (*) (PyObject *aux, long dim, long nnumarray, PyArrayObject *numarray[], char *data[], CFUNC_STRIDED_FUNC f) ) libnumarray_FatalApiError)) - -#define NA_FromDimsStridesDescrAndData (libnumarray_API ? (*(PyArrayObject * (*) (int nd, maybelong *dims, maybelong *strides, PyArray_Descr *descr, char *data) ) libnumarray_API[ 126 ]) : (*(PyArrayObject * (*) (int nd, maybelong *dims, maybelong *strides, PyArray_Descr *descr, char *data) ) libnumarray_FatalApiError)) - -#define NA_FromDimsTypeAndData (libnumarray_API ? (*(PyArrayObject * (*) (int nd, maybelong *dims, int type, char *data) ) libnumarray_API[ 127 ]) : (*(PyArrayObject * (*) (int nd, maybelong *dims, int type, char *data) ) libnumarray_FatalApiError)) - -#define NA_FromDimsStridesTypeAndData (libnumarray_API ? (*(PyArrayObject * (*) (int nd, maybelong *dims, maybelong *strides, int type, char *data) ) libnumarray_API[ 128 ]) : (*(PyArrayObject * (*) (int nd, maybelong *dims, maybelong *strides, int type, char *data) ) libnumarray_FatalApiError)) - -#define NA_scipy_typestr (libnumarray_API ? (*(int (*) (NumarrayType t, int byteorder, char *typestr) ) libnumarray_API[ 129 ]) : (*(int (*) (NumarrayType t, int byteorder, char *typestr) ) libnumarray_FatalApiError)) - -#define NA_FromArrayStruct (libnumarray_API ? (*(PyArrayObject * (*) (PyObject *a) ) libnumarray_API[ 130 ]) : (*(PyArrayObject * (*) (PyObject *a) ) libnumarray_FatalApiError)) - -#endif - - /* Total number of C API pointers */ -#define libnumarray_API_pointers 131 - -#ifdef __cplusplus -} -#endif - -#endif /* NUMPY_LIBNUMARRAY_H */ diff --git a/numpy-1.6.2/numpy/numarray/include/numpy/numcomplex.h b/numpy-1.6.2/numpy/numarray/include/numpy/numcomplex.h deleted file mode 100644 index 9ed4198c7e..0000000000 --- a/numpy-1.6.2/numpy/numarray/include/numpy/numcomplex.h +++ /dev/null @@ -1,252 +0,0 @@ -/* See numarray.h for Complex32, Complex64: - -typedef struct { Float32 r, i; } Complex32; -typedef struct { Float64 r, i; } Complex64; - -*/ -typedef struct { Float32 a, theta; } PolarComplex32; -typedef struct { Float64 a, theta; } PolarComplex64; - -#define NUM_SQ(x) ((x)*(x)) - -#define NUM_CABSSQ(p) (NUM_SQ((p).r) + NUM_SQ((p).i)) - -#define NUM_CABS(p) sqrt(NUM_CABSSQ(p)) - -#define NUM_C_TO_P(c, p) (p).a = NUM_CABS(c); \ - (p).theta = atan2((c).i, (c).r); - -#define NUM_P_TO_C(p, c) (c).r = (p).a*cos((p).theta); \ - (c).i = (p).a*sin((p).theta); - -#define NUM_CASS(p, q) (q).r = (p).r, (q).i = (p).i - -#define NUM_CADD(p, q, s) (s).r = (p).r + (q).r, \ - (s).i = (p).i + (q).i - -#define NUM_CSUB(p, q, s) (s).r = (p).r - (q).r, \ - (s).i = (p).i - (q).i - -#define NUM_CMUL(p, q, s) \ - { Float64 rp = (p).r; \ - Float64 rq = (q).r; \ - (s).r = rp*rq - (p).i*(q).i; \ - (s).i = rp*(q).i + rq*(p).i; \ - } - -#define NUM_CDIV(p, q, s) \ - { \ - Float64 rp = (p).r; \ - Float64 ip = (p).i; \ - Float64 rq = (q).r; \ - if ((q).i != 0) { \ - Float64 temp = NUM_CABSSQ(q); \ - (s).r = (rp*rq+(p).i*(q).i)/temp; \ - (s).i = (rq*(p).i-(q).i*rp)/temp; \ - } else { \ - (s).r = rp/rq; \ - (s).i = ip/rq; \ - } \ - } - -#define NUM_CREM(p, q, s) \ - { Complex64 r; \ - NUM_CDIV(p, q, r); \ - r.r = floor(r.r); \ - r.i = 0; \ - NUM_CMUL(r, q, r); \ - NUM_CSUB(p, r, s); \ - } - -#define NUM_CMINUS(p, s) (s).r = -(p).r; (s).i = -(p).i; -#define NUM_CNEG NUM_CMINUS - -#define NUM_CEQ(p, q) (((p).r == (q).r) && ((p).i == (q).i)) -#define NUM_CNE(p, q) (((p).r != (q).r) || ((p).i != (q).i)) -#define NUM_CLT(p, q) ((p).r < (q).r) -#define NUM_CGT(p, q) ((p).r > (q).r) -#define NUM_CLE(p, q) ((p).r <= (q).r) -#define NUM_CGE(p, q) ((p).r >= (q).r) - -/* e**z = e**x * (cos(y)+ i*sin(y)) where z = x + i*y - so e**z = e**x * cos(y) + i * e**x * sin(y) -*/ -#define NUM_CEXP(p, s) \ - { Float64 ex = exp((p).r); \ - (s).r = ex * cos((p).i); \ - (s).i = ex * sin((p).i); \ - } - -/* e**w = z; w = u + i*v; z = r * e**(i*theta); - -e**u * e**(i*v) = r * e**(i*theta); - -log(z) = w; log(z) = log(r) + i*theta; - */ -#define NUM_CLOG(p, s) \ - { PolarComplex64 temp; NUM_C_TO_P(p, temp); \ - (s).r = num_log(temp.a); \ - (s).i = temp.theta; \ - } - -#define NUM_LOG10_E 0.43429448190325182 - -#define NUM_CLOG10(p, s) \ - { NUM_CLOG(p, s); \ - (s).r *= NUM_LOG10_E; \ - (s).i *= NUM_LOG10_E; \ - } - -/* s = p ** q */ -#define NUM_CPOW(p, q, s) { if (NUM_CABSSQ(p) == 0) { \ - if ((q).r == 0 && (q).i == 0) { \ - (s).r = (s).i = 1; \ - } else { \ - (s).r = (s).i = 0; \ - } \ - } else { \ - NUM_CLOG(p, s); \ - NUM_CMUL(s, q, s); \ - NUM_CEXP(s, s); \ - } \ - } - -#define NUM_CSQRT(p, s) { Complex64 temp; temp.r = 0.5; temp.i=0; \ - NUM_CPOW(p, temp, s); \ - } - -#define NUM_CSQR(p, s) { Complex64 temp; temp.r = 2.0; temp.i=0; \ - NUM_CPOW(p, temp, s); \ - } - -#define NUM_CSIN(p, s) { Float64 sp = sin((p).r); \ - Float64 cp = cos((p).r); \ - (s).r = cosh((p).i) * sp; \ - (s).i = sinh((p).i) * cp; \ - } - -#define NUM_CCOS(p, s) { Float64 sp = sin((p).r); \ - Float64 cp = cos((p).r); \ - (s).r = cosh((p).i) * cp; \ - (s).i = -sinh((p).i) * sp; \ - } - -#define NUM_CTAN(p, s) { Complex64 ss, cs; \ - NUM_CSIN(p, ss); \ - NUM_CCOS(p, cs); \ - NUM_CDIV(ss, cs, s); \ - } - -#define NUM_CSINH(p, s) { Float64 sp = sin((p).i); \ - Float64 cp = cos((p).i); \ - (s).r = sinh((p).r) * cp; \ - (s).i = cosh((p).r) * sp; \ - } - -#define NUM_CCOSH(p, s) { Float64 sp = sin((p).i); \ - Float64 cp = cos((p).i); \ - (s).r = cosh((p).r) * cp; \ - (s).i = sinh((p).r) * sp; \ - } - -#define NUM_CTANH(p, s) { Complex64 ss, cs; \ - NUM_CSINH(p, ss); \ - NUM_CCOSH(p, cs); \ - NUM_CDIV(ss, cs, s); \ - } - -#define NUM_CRPOW(p, v, s) { Complex64 cr; cr.r = v; cr.i = 0; \ - NUM_CPOW(p,cr,s); \ - } - -#define NUM_CRMUL(p, v, s) (s).r = (p).r * v; (s).i = (p).i * v; - -#define NUM_CIMUL(p, s) { Float64 temp = (s).r; \ - (s).r = -(p).i; (s).i = temp; \ - } - -/* asin(z) = -i * log(i*z + (1 - z**2)**0.5) */ -#define NUM_CASIN(p, s) { Complex64 p1; NUM_CASS(p, p1); \ - NUM_CIMUL(p, p1); \ - NUM_CMUL(p, p, s); \ - NUM_CNEG(s, s); \ - (s).r += 1; \ - NUM_CRPOW(s, 0.5, s); \ - NUM_CADD(p1, s, s); \ - NUM_CLOG(s, s); \ - NUM_CIMUL(s, s); \ - NUM_CNEG(s, s); \ - } - -/* acos(z) = -i * log(z + i*(1 - z**2)**0.5) */ -#define NUM_CACOS(p, s) { Complex64 p1; NUM_CASS(p, p1); \ - NUM_CMUL(p, p, s); \ - NUM_CNEG(s, s); \ - (s).r += 1; \ - NUM_CRPOW(s, 0.5, s); \ - NUM_CIMUL(s, s); \ - NUM_CADD(p1, s, s); \ - NUM_CLOG(s, s); \ - NUM_CIMUL(s, s); \ - NUM_CNEG(s, s); \ - } - -/* atan(z) = i/2 * log( (i+z) / (i - z) ) */ -#define NUM_CATAN(p, s) { Complex64 p1, p2; \ - NUM_CASS(p, p1); NUM_CNEG(p, p2); \ - p1.i += 1; \ - p2.i += 1; \ - NUM_CDIV(p1, p2, s); \ - NUM_CLOG(s, s); \ - NUM_CIMUL(s, s); \ - NUM_CRMUL(s, 0.5, s); \ - } - -/* asinh(z) = log( z + (z**2 + 1)**0.5 ) */ -#define NUM_CASINH(p, s) { Complex64 p1; NUM_CASS(p, p1); \ - NUM_CMUL(p, p, s); \ - (s).r += 1; \ - NUM_CRPOW(s, 0.5, s); \ - NUM_CADD(p1, s, s); \ - NUM_CLOG(s, s); \ - } - -/* acosh(z) = log( z + (z**2 - 1)**0.5 ) */ -#define NUM_CACOSH(p, s) { Complex64 p1; NUM_CASS(p, p1); \ - NUM_CMUL(p, p, s); \ - (s).r -= 1; \ - NUM_CRPOW(s, 0.5, s); \ - NUM_CADD(p1, s, s); \ - NUM_CLOG(s, s); \ - } - -/* atanh(z) = 1/2 * log( (1+z)/(1-z) ) */ -#define NUM_CATANH(p, s) { Complex64 p1, p2; \ - NUM_CASS(p, p1); NUM_CNEG(p, p2); \ - p1.r += 1; \ - p2.r += 1; \ - NUM_CDIV(p1, p2, s); \ - NUM_CLOG(s, s); \ - NUM_CRMUL(s, 0.5, s); \ - } - - -#define NUM_CMIN(p, q) (NUM_CLE(p, q) ? p : q) -#define NUM_CMAX(p, q) (NUM_CGE(p, q) ? p : q) - -#define NUM_CNZ(p) (((p).r != 0) || ((p).i != 0)) -#define NUM_CLAND(p, q) (NUM_CNZ(p) & NUM_CNZ(q)) -#define NUM_CLOR(p, q) (NUM_CNZ(p) | NUM_CNZ(q)) -#define NUM_CLXOR(p, q) (NUM_CNZ(p) ^ NUM_CNZ(q)) -#define NUM_CLNOT(p) (!NUM_CNZ(p)) - -#define NUM_CFLOOR(p, s) (s).r = floor((p).r); (s).i = floor((p).i); -#define NUM_CCEIL(p, s) (s).r = ceil((p).r); (s).i = ceil((p).i); - -#define NUM_CFABS(p, s) (s).r = fabs((p).r); (s).i = fabs((p).i); -#define NUM_CROUND(p, s) (s).r = num_round((p).r); (s).i = num_round((p).i); -#define NUM_CHYPOT(p, q, s) { Complex64 t; \ - NUM_CSQR(p, s); NUM_CSQR(q, t); \ - NUM_CADD(s, t, s); \ - NUM_CSQRT(s, s); \ - } diff --git a/numpy-1.6.2/numpy/numarray/include/numpy/nummacro.h b/numpy-1.6.2/numpy/numarray/include/numpy/nummacro.h deleted file mode 100644 index e9acd6e31c..0000000000 --- a/numpy-1.6.2/numpy/numarray/include/numpy/nummacro.h +++ /dev/null @@ -1,447 +0,0 @@ -/* Primarily for compatibility with numarray C-API */ - -#if !defined(_ndarraymacro) -#define _ndarraymacro - -/* The structs defined here are private implementation details of numarray -which are subject to change w/o notice. -*/ - -#define PY_BOOL_CHAR "b" -#define PY_INT8_CHAR "b" -#define PY_INT16_CHAR "h" -#define PY_INT32_CHAR "i" -#define PY_FLOAT32_CHAR "f" -#define PY_FLOAT64_CHAR "d" -#define PY_UINT8_CHAR "h" -#define PY_UINT16_CHAR "i" -#define PY_UINT32_CHAR "i" /* Unless longer int available */ -#define PY_COMPLEX64_CHAR "D" -#define PY_COMPLEX128_CHAR "D" - -#define PY_LONG_CHAR "l" -#define PY_LONG_LONG_CHAR "L" - -#define pyFPE_DIVIDE_BY_ZERO 1 -#define pyFPE_OVERFLOW 2 -#define pyFPE_UNDERFLOW 4 -#define pyFPE_INVALID 8 - -#define isNonZERO(x) (x != 0) /* to convert values to boolean 1's or 0's */ - -typedef enum -{ - NUM_CONTIGUOUS=1, - NUM_NOTSWAPPED=0x0200, - NUM_ALIGNED=0x0100, - NUM_WRITABLE=0x0400, - NUM_COPY=0x0020, - - NUM_C_ARRAY = (NUM_CONTIGUOUS | NUM_ALIGNED | NUM_NOTSWAPPED), - NUM_UNCONVERTED = 0 -} NumRequirements; - -#define UNCONVERTED 0 -#define C_ARRAY (NUM_CONTIGUOUS | NUM_NOTSWAPPED | NUM_ALIGNED) - -#define MUST_BE_COMPUTED 2 - -#define NUM_FLOORDIVIDE(a,b,out) (out) = floor((a)/(b)) - -#define NA_Begin() Py_Initialize(); import_libnumarray(); -#define NA_End() NA_Done(); Py_Finalize(); - -#define NA_OFFSETDATA(num) ((void *) PyArray_DATA(num)) - -/* unaligned NA_COPY functions */ -#define NA_COPY1(i, o) (*(o) = *(i)) -#define NA_COPY2(i, o) NA_COPY1(i, o), NA_COPY1(i+1, o+1) -#define NA_COPY4(i, o) NA_COPY2(i, o), NA_COPY2(i+2, o+2) -#define NA_COPY8(i, o) NA_COPY4(i, o), NA_COPY4(i+4, o+4) -#define NA_COPY16(i, o) NA_COPY8(i, o), NA_COPY8(i+8, o+8) - -/* byteswapping macros: these fail if i==o */ -#define NA_SWAP1(i, o) NA_COPY1(i, o) -#define NA_SWAP2(i, o) NA_SWAP1(i, o+1), NA_SWAP1(i+1, o) -#define NA_SWAP4(i, o) NA_SWAP2(i, o+2), NA_SWAP2(i+2, o) -#define NA_SWAP8(i, o) NA_SWAP4(i, o+4), NA_SWAP4(i+4, o) -#define NA_SWAP16(i, o) NA_SWAP8(i, o+8), NA_SWAP8(i+8, o) - -/* complex byteswaps must swap each part (real, imag) independently */ -#define NA_COMPLEX_SWAP8(i, o) NA_SWAP4(i, o), NA_SWAP4(i+4, o+4) -#define NA_COMPLEX_SWAP16(i, o) NA_SWAP8(i, o), NA_SWAP8(i+8, o+8) - -/* byteswapping macros: these work even if i == o */ -#define NA_TSWAP1(i, o, t) NA_COPY1(i, t), NA_SWAP1(t, o) -#define NA_TSWAP2(i, o, t) NA_COPY2(i, t), NA_SWAP2(t, o) -#define NA_TSWAP4(i, o, t) NA_COPY4(i, t), NA_SWAP4(t, o) -#define NA_TSWAP8(i, o, t) NA_COPY8(i, t), NA_SWAP8(t, o) - -/* fast copy functions for %N aligned i and o */ -#define NA_ACOPY1(i, o) (((Int8 *)o)[0] = ((Int8 *)i)[0]) -#define NA_ACOPY2(i, o) (((Int16 *)o)[0] = ((Int16 *)i)[0]) -#define NA_ACOPY4(i, o) (((Int32 *)o)[0] = ((Int32 *)i)[0]) -#define NA_ACOPY8(i, o) (((Float64 *)o)[0] = ((Float64 *)i)[0]) -#define NA_ACOPY16(i, o) (((Complex64 *)o)[0] = ((Complex64 *)i)[0]) - -/* from here down, type("ai") is NDInfo* */ - -#define NA_PTR(ai) ((char *) NA_OFFSETDATA((ai))) -#define NA_PTR1(ai, i) (NA_PTR(ai) + \ - (i)*(ai)->strides[0]) -#define NA_PTR2(ai, i, j) (NA_PTR(ai) + \ - (i)*(ai)->strides[0] + \ - (j)*(ai)->strides[1]) -#define NA_PTR3(ai, i, j, k) (NA_PTR(ai) + \ - (i)*(ai)->strides[0] + \ - (j)*(ai)->strides[1] + \ - (k)*(ai)->strides[2]) - -#define NA_SET_TEMP(ai, type, v) (((type *) &__temp__)[0] = v) - -#define NA_SWAPComplex64 NA_COMPLEX_SWAP16 -#define NA_SWAPComplex32 NA_COMPLEX_SWAP8 -#define NA_SWAPFloat64 NA_SWAP8 -#define NA_SWAPFloat32 NA_SWAP4 -#define NA_SWAPInt64 NA_SWAP8 -#define NA_SWAPUInt64 NA_SWAP8 -#define NA_SWAPInt32 NA_SWAP4 -#define NA_SWAPUInt32 NA_SWAP4 -#define NA_SWAPInt16 NA_SWAP2 -#define NA_SWAPUInt16 NA_SWAP2 -#define NA_SWAPInt8 NA_SWAP1 -#define NA_SWAPUInt8 NA_SWAP1 -#define NA_SWAPBool NA_SWAP1 - -#define NA_COPYComplex64 NA_COPY16 -#define NA_COPYComplex32 NA_COPY8 -#define NA_COPYFloat64 NA_COPY8 -#define NA_COPYFloat32 NA_COPY4 -#define NA_COPYInt64 NA_COPY8 -#define NA_COPYUInt64 NA_COPY8 -#define NA_COPYInt32 NA_COPY4 -#define NA_COPYUInt32 NA_COPY4 -#define NA_COPYInt16 NA_COPY2 -#define NA_COPYUInt16 NA_COPY2 -#define NA_COPYInt8 NA_COPY1 -#define NA_COPYUInt8 NA_COPY1 -#define NA_COPYBool NA_COPY1 - -#ifdef __cplusplus -extern "C" { -#endif - -#define _makeGetPb(type) \ -static type _NA_GETPb_##type(char *ptr) \ -{ \ - type temp; \ - NA_SWAP##type(ptr, (char *)&temp); \ - return temp; \ -} - -#define _makeGetPa(type) \ -static type _NA_GETPa_##type(char *ptr) \ -{ \ - type temp; \ - NA_COPY##type(ptr, (char *)&temp); \ - return temp; \ -} - -_makeGetPb(Complex64) -_makeGetPb(Complex32) -_makeGetPb(Float64) -_makeGetPb(Float32) -_makeGetPb(Int64) -_makeGetPb(UInt64) -_makeGetPb(Int32) -_makeGetPb(UInt32) -_makeGetPb(Int16) -_makeGetPb(UInt16) -_makeGetPb(Int8) -_makeGetPb(UInt8) -_makeGetPb(Bool) - -_makeGetPa(Complex64) -_makeGetPa(Complex32) -_makeGetPa(Float64) -_makeGetPa(Float32) -_makeGetPa(Int64) -_makeGetPa(UInt64) -_makeGetPa(Int32) -_makeGetPa(UInt32) -_makeGetPa(Int16) -_makeGetPa(UInt16) -_makeGetPa(Int8) -_makeGetPa(UInt8) -_makeGetPa(Bool) - -#undef _makeGetPb -#undef _makeGetPa - -#define _makeSetPb(type) \ -static void _NA_SETPb_##type(char *ptr, type v) \ -{ \ - NA_SWAP##type(((char *)&v), ptr); \ - return; \ -} - -#define _makeSetPa(type) \ -static void _NA_SETPa_##type(char *ptr, type v) \ -{ \ - NA_COPY##type(((char *)&v), ptr); \ - return; \ -} - -_makeSetPb(Complex64) -_makeSetPb(Complex32) -_makeSetPb(Float64) -_makeSetPb(Float32) -_makeSetPb(Int64) -_makeSetPb(UInt64) -_makeSetPb(Int32) -_makeSetPb(UInt32) -_makeSetPb(Int16) -_makeSetPb(UInt16) -_makeSetPb(Int8) -_makeSetPb(UInt8) -_makeSetPb(Bool) - -_makeSetPa(Complex64) -_makeSetPa(Complex32) -_makeSetPa(Float64) -_makeSetPa(Float32) -_makeSetPa(Int64) -_makeSetPa(UInt64) -_makeSetPa(Int32) -_makeSetPa(UInt32) -_makeSetPa(Int16) -_makeSetPa(UInt16) -_makeSetPa(Int8) -_makeSetPa(UInt8) -_makeSetPa(Bool) - -#undef _makeSetPb -#undef _makeSetPa - -#ifdef __cplusplus - } -#endif - -/* ========================== ptr get/set ================================ */ - -/* byteswapping */ -#define NA_GETPb(ai, type, ptr) _NA_GETPb_##type(ptr) - -/* aligning */ -#define NA_GETPa(ai, type, ptr) _NA_GETPa_##type(ptr) - -/* fast (aligned, !byteswapped) */ -#define NA_GETPf(ai, type, ptr) (*((type *) (ptr))) - -#define NA_GETP(ai, type, ptr) \ - (PyArray_ISCARRAY(ai) ? NA_GETPf(ai, type, ptr) \ - : (PyArray_ISBYTESWAPPED(ai) ? \ - NA_GETPb(ai, type, ptr) \ - : NA_GETPa(ai, type, ptr))) - -/* NOTE: NA_SET* macros cannot be used as values. */ - -/* byteswapping */ -#define NA_SETPb(ai, type, ptr, v) _NA_SETPb_##type(ptr, v) - -/* aligning */ -#define NA_SETPa(ai, type, ptr, v) _NA_SETPa_##type(ptr, v) - -/* fast (aligned, !byteswapped) */ -#define NA_SETPf(ai, type, ptr, v) ((*((type *) ptr)) = (v)) - -#define NA_SETP(ai, type, ptr, v) \ - if (PyArray_ISCARRAY(ai)) { \ - NA_SETPf((ai), type, (ptr), (v)); \ - } else if (PyArray_ISBYTESWAPPED(ai)) { \ - NA_SETPb((ai), type, (ptr), (v)); \ - } else \ - NA_SETPa((ai), type, (ptr), (v)) - -/* ========================== 1 index get/set ============================ */ - -/* byteswapping */ -#define NA_GET1b(ai, type, i) NA_GETPb(ai, type, NA_PTR1(ai, i)) -/* aligning */ -#define NA_GET1a(ai, type, i) NA_GETPa(ai, type, NA_PTR1(ai, i)) -/* fast (aligned, !byteswapped) */ -#define NA_GET1f(ai, type, i) NA_GETPf(ai, type, NA_PTR1(ai, i)) -/* testing */ -#define NA_GET1(ai, type, i) NA_GETP(ai, type, NA_PTR1(ai, i)) - -/* byteswapping */ -#define NA_SET1b(ai, type, i, v) NA_SETPb(ai, type, NA_PTR1(ai, i), v) -/* aligning */ -#define NA_SET1a(ai, type, i, v) NA_SETPa(ai, type, NA_PTR1(ai, i), v) -/* fast (aligned, !byteswapped) */ -#define NA_SET1f(ai, type, i, v) NA_SETPf(ai, type, NA_PTR1(ai, i), v) -/* testing */ -#define NA_SET1(ai, type, i, v) NA_SETP(ai, type, NA_PTR1(ai, i), v) - -/* ========================== 2 index get/set ============================= */ - -/* byteswapping */ -#define NA_GET2b(ai, type, i, j) NA_GETPb(ai, type, NA_PTR2(ai, i, j)) -/* aligning */ -#define NA_GET2a(ai, type, i, j) NA_GETPa(ai, type, NA_PTR2(ai, i, j)) -/* fast (aligned, !byteswapped) */ -#define NA_GET2f(ai, type, i, j) NA_GETPf(ai, type, NA_PTR2(ai, i, j)) -/* testing */ -#define NA_GET2(ai, type, i, j) NA_GETP(ai, type, NA_PTR2(ai, i, j)) - -/* byteswapping */ -#define NA_SET2b(ai, type, i, j, v) NA_SETPb(ai, type, NA_PTR2(ai, i, j), v) -/* aligning */ -#define NA_SET2a(ai, type, i, j, v) NA_SETPa(ai, type, NA_PTR2(ai, i, j), v) -/* fast (aligned, !byteswapped) */ -#define NA_SET2f(ai, type, i, j, v) NA_SETPf(ai, type, NA_PTR2(ai, i, j), v) - -#define NA_SET2(ai, type, i, j, v) NA_SETP(ai, type, NA_PTR2(ai, i, j), v) - -/* ========================== 3 index get/set ============================= */ - -/* byteswapping */ -#define NA_GET3b(ai, type, i, j, k) NA_GETPb(ai, type, NA_PTR3(ai, i, j, k)) -/* aligning */ -#define NA_GET3a(ai, type, i, j, k) NA_GETPa(ai, type, NA_PTR3(ai, i, j, k)) -/* fast (aligned, !byteswapped) */ -#define NA_GET3f(ai, type, i, j, k) NA_GETPf(ai, type, NA_PTR3(ai, i, j, k)) -/* testing */ -#define NA_GET3(ai, type, i, j, k) NA_GETP(ai, type, NA_PTR3(ai, i, j, k)) - -/* byteswapping */ -#define NA_SET3b(ai, type, i, j, k, v) \ - NA_SETPb(ai, type, NA_PTR3(ai, i, j, k), v) -/* aligning */ -#define NA_SET3a(ai, type, i, j, k, v) \ - NA_SETPa(ai, type, NA_PTR3(ai, i, j, k), v) -/* fast (aligned, !byteswapped) */ -#define NA_SET3f(ai, type, i, j, k, v) \ - NA_SETPf(ai, type, NA_PTR3(ai, i, j, k), v) -#define NA_SET3(ai, type, i, j, k, v) \ - NA_SETP(ai, type, NA_PTR3(ai, i, j, k), v) - -/* ========================== 1D get/set ================================== */ - -#define NA_GET1Db(ai, type, base, cnt, out) \ - { int i, stride = ai->strides[ai->nd-1]; \ - for(i=0; istrides[ai->nd-1]; \ - for(i=0; istrides[ai->nd-1]; \ - for(i=0; istrides[ai->nd-1]; \ - for(i=0; istrides[ai->nd-1]; \ - for(i=0; istrides[ai->nd-1]; \ - for(i=0; i=(y)) ? (x) : (y)) -#endif - -#if !defined(ABS) -#define ABS(x) (((x) >= 0) ? (x) : -(x)) -#endif - -#define ELEM(x) (sizeof(x)/sizeof(x[0])) - -#define BOOLEAN_BITWISE_NOT(x) ((x) ^ 1) - -#define NA_NBYTES(a) (a->descr->elsize * NA_elements(a)) - -#if defined(NA_SMP) -#define BEGIN_THREADS Py_BEGIN_ALLOW_THREADS -#define END_THREADS Py_END_ALLOW_THREADS -#else -#define BEGIN_THREADS -#define END_THREADS -#endif - -#if !defined(NA_isnan) - -#define U32(u) (* (Int32 *) &(u) ) -#define U64(u) (* (Int64 *) &(u) ) - -#define NA_isnan32(u) \ - ( (( U32(u) & 0x7f800000) == 0x7f800000) && ((U32(u) & 0x007fffff) != 0)) ? 1:0 - -#if !defined(_MSC_VER) -#define NA_isnan64(u) \ - ( (( U64(u) & 0x7ff0000000000000LL) == 0x7ff0000000000000LL) && ((U64(u) & 0x000fffffffffffffLL) != 0)) ? 1:0 -#else -#define NA_isnan64(u) \ - ( (( U64(u) & 0x7ff0000000000000i64) == 0x7ff0000000000000i64) && ((U64(u) & 0x000fffffffffffffi64) != 0)) ? 1:0 -#endif - -#define NA_isnanC32(u) (NA_isnan32(((Complex32 *)&(u))->r) || NA_isnan32(((Complex32 *)&(u))->i)) -#define NA_isnanC64(u) (NA_isnan64(((Complex64 *)&(u))->r) || NA_isnan64(((Complex64 *)&(u))->i)) - -#endif /* NA_isnan */ - - -#endif /* _ndarraymacro */ diff --git a/numpy-1.6.2/numpy/numarray/linear_algebra.py b/numpy-1.6.2/numpy/numarray/linear_algebra.py deleted file mode 100644 index 238dff9522..0000000000 --- a/numpy-1.6.2/numpy/numarray/linear_algebra.py +++ /dev/null @@ -1,15 +0,0 @@ - -from numpy.oldnumeric.linear_algebra import * - -import numpy.oldnumeric.linear_algebra as nol - -__all__ = list(nol.__all__) -__all__ += ['qr_decomposition'] - -from numpy.linalg import qr as _qr - -def qr_decomposition(a, mode='full'): - res = _qr(a, mode) - if mode == 'full': - return res - return (None, res) diff --git a/numpy-1.6.2/numpy/numarray/ma.py b/numpy-1.6.2/numpy/numarray/ma.py deleted file mode 100644 index 5c7a19cf2f..0000000000 --- a/numpy-1.6.2/numpy/numarray/ma.py +++ /dev/null @@ -1,2 +0,0 @@ - -from numpy.oldnumeric.ma import * diff --git a/numpy-1.6.2/numpy/numarray/matrix.py b/numpy-1.6.2/numpy/numarray/matrix.py deleted file mode 100644 index 86d79bbe21..0000000000 --- a/numpy-1.6.2/numpy/numarray/matrix.py +++ /dev/null @@ -1,7 +0,0 @@ - -__all__ = ['Matrix'] - -from numpy import matrix as _matrix - -def Matrix(data, typecode=None, copy=1, savespace=0): - return _matrix(data, typecode, copy=copy) diff --git a/numpy-1.6.2/numpy/numarray/mlab.py b/numpy-1.6.2/numpy/numarray/mlab.py deleted file mode 100644 index 05f234d376..0000000000 --- a/numpy-1.6.2/numpy/numarray/mlab.py +++ /dev/null @@ -1,7 +0,0 @@ - -from numpy.oldnumeric.mlab import * -import numpy.oldnumeric.mlab as nom - -__all__ = nom.__all__ - -del nom diff --git a/numpy-1.6.2/numpy/numarray/nd_image.py b/numpy-1.6.2/numpy/numarray/nd_image.py deleted file mode 100644 index dff7fa066f..0000000000 --- a/numpy-1.6.2/numpy/numarray/nd_image.py +++ /dev/null @@ -1,14 +0,0 @@ -try: - from ndimage import * -except ImportError: - try: - from scipy.ndimage import * - except ImportError: - msg = \ -"""The nd_image package is not installed - -It can be downloaded by checking out the latest source from -http://svn.scipy.org/svn/scipy/trunk/Lib/ndimage or by downloading and -installing all of SciPy from http://www.scipy.org. -""" - raise ImportError(msg) diff --git a/numpy-1.6.2/numpy/numarray/numerictypes.py b/numpy-1.6.2/numpy/numarray/numerictypes.py deleted file mode 100644 index 7bc91612ec..0000000000 --- a/numpy-1.6.2/numpy/numarray/numerictypes.py +++ /dev/null @@ -1,548 +0,0 @@ -"""numerictypes: Define the numeric type objects - -This module is designed so 'from numerictypes import *' is safe. -Exported symbols include: - - Dictionary with all registered number types (including aliases): - typeDict - - Numeric type objects: - Bool - Int8 Int16 Int32 Int64 - UInt8 UInt16 UInt32 UInt64 - Float32 Double64 - Complex32 Complex64 - - Numeric type classes: - NumericType - BooleanType - SignedType - UnsignedType - IntegralType - SignedIntegralType - UnsignedIntegralType - FloatingType - ComplexType - -$Id: numerictypes.py,v 1.55 2005/12/01 16:22:03 jaytmiller Exp $ -""" - -__all__ = ['NumericType','HasUInt64','typeDict','IsType', - 'BooleanType', 'SignedType', 'UnsignedType', 'IntegralType', - 'SignedIntegralType', 'UnsignedIntegralType', 'FloatingType', - 'ComplexType', 'AnyType', 'ObjectType', 'Any', 'Object', - 'Bool', 'Int8', 'Int16', 'Int32', 'Int64', 'Float32', - 'Float64', 'UInt8', 'UInt16', 'UInt32', 'UInt64', - 'Complex32', 'Complex64', 'Byte', 'Short', 'Int','Long', - 'Float', 'Complex', 'genericTypeRank', 'pythonTypeRank', - 'pythonTypeMap', 'scalarTypeMap', 'genericCoercions', - 'typecodes', 'genericPromotionExclusions','MaximumType', - 'getType','scalarTypes', 'typefrom'] - -MAX_ALIGN = 8 -MAX_INT_SIZE = 8 - -import numpy -LP64 = numpy.intp(0).itemsize == 8 - -HasUInt64 = 1 -try: - numpy.int64(0) -except: - HasUInt64 = 0 - -#from typeconv import typeConverters as _typeConverters -#import numinclude -#from _numerictype import _numerictype, typeDict - -# Enumeration of numarray type codes -typeDict = {} - -_tAny = 0 -_tBool = 1 -_tInt8 = 2 -_tUInt8 = 3 -_tInt16 = 4 -_tUInt16 = 5 -_tInt32 = 6 -_tUInt32 = 7 -_tInt64 = 8 -_tUInt64 = 9 -_tFloat32 = 10 -_tFloat64 = 11 -_tComplex32 = 12 -_tComplex64 = 13 -_tObject = 14 - -def IsType(rep): - """Determines whether the given object or string, 'rep', represents - a numarray type.""" - return isinstance(rep, NumericType) or rep in typeDict - -def _register(name, type, force=0): - """Register the type object. Raise an exception if it is already registered - unless force is true. - """ - if name in typeDict and not force: - raise ValueError("Type %s has already been registered" % name) - typeDict[name] = type - return type - - -class NumericType(object): - """Numeric type class - - Used both as a type identification and the repository of - characteristics and conversion functions. - """ - def __new__(type, name, bytes, default, typeno): - """__new__() implements a 'quasi-singleton pattern because attempts - to create duplicate types return the first created instance of that - particular type parameterization, i.e. the second time you try to - create "Int32", you get the original Int32, not a new one. - """ - if name in typeDict: - self = typeDict[name] - if self.bytes != bytes or self.default != default or \ - self.typeno != typeno: - raise ValueError("Redeclaration of existing NumericType "\ - "with different parameters.") - return self - else: - self = object.__new__(type) - self.name = "no name" - self.bytes = None - self.default = None - self.typeno = -1 - return self - - def __init__(self, name, bytes, default, typeno): - if not isinstance(name, str): - raise TypeError("name must be a string") - self.name = name - self.bytes = bytes - self.default = default - self.typeno = typeno - self._conv = None - _register(self.name, self) - - def __getnewargs__(self): - """support the pickling protocol.""" - return (self.name, self.bytes, self.default, self.typeno) - - def __getstate__(self): - """support pickling protocol... no __setstate__ required.""" - False - -class BooleanType(NumericType): - pass - -class SignedType: - """Marker class used for signed type check""" - pass - -class UnsignedType: - """Marker class used for unsigned type check""" - pass - -class IntegralType(NumericType): - pass - -class SignedIntegralType(IntegralType, SignedType): - pass - -class UnsignedIntegralType(IntegralType, UnsignedType): - pass - -class FloatingType(NumericType): - pass - -class ComplexType(NumericType): - pass - -class AnyType(NumericType): - pass - -class ObjectType(NumericType): - pass - -# C-API Type Any - -Any = AnyType("Any", None, None, _tAny) - -Object = ObjectType("Object", None, None, _tObject) - -# Numeric Types: - -Bool = BooleanType("Bool", 1, 0, _tBool) -Int8 = SignedIntegralType( "Int8", 1, 0, _tInt8) -Int16 = SignedIntegralType("Int16", 2, 0, _tInt16) -Int32 = SignedIntegralType("Int32", 4, 0, _tInt32) -Int64 = SignedIntegralType("Int64", 8, 0, _tInt64) - -Float32 = FloatingType("Float32", 4, 0.0, _tFloat32) -Float64 = FloatingType("Float64", 8, 0.0, _tFloat64) - -UInt8 = UnsignedIntegralType( "UInt8", 1, 0, _tUInt8) -UInt16 = UnsignedIntegralType("UInt16", 2, 0, _tUInt16) -UInt32 = UnsignedIntegralType("UInt32", 4, 0, _tUInt32) -UInt64 = UnsignedIntegralType("UInt64", 8, 0, _tUInt64) - -Complex32 = ComplexType("Complex32", 8, complex(0.0), _tComplex32) -Complex64 = ComplexType("Complex64", 16, complex(0.0), _tComplex64) - -Object.dtype = 'O' -Bool.dtype = '?' -Int8.dtype = 'i1' -Int16.dtype = 'i2' -Int32.dtype = 'i4' -Int64.dtype = 'i8' - -UInt8.dtype = 'u1' -UInt16.dtype = 'u2' -UInt32.dtype = 'u4' -UInt64.dtype = 'u8' - -Float32.dtype = 'f4' -Float64.dtype = 'f8' - -Complex32.dtype = 'c8' -Complex64.dtype = 'c16' - -# Aliases - -Byte = _register("Byte", Int8) -Short = _register("Short", Int16) -Int = _register("Int", Int32) -if LP64: - Long = _register("Long", Int64) - if HasUInt64: - _register("ULong", UInt64) - MaybeLong = _register("MaybeLong", Int64) - __all__.append('MaybeLong') -else: - Long = _register("Long", Int32) - _register("ULong", UInt32) - MaybeLong = _register("MaybeLong", Int32) - __all__.append('MaybeLong') - - -_register("UByte", UInt8) -_register("UShort", UInt16) -_register("UInt", UInt32) -Float = _register("Float", Float64) -Complex = _register("Complex", Complex64) - -# short forms - -_register("b1", Bool) -_register("u1", UInt8) -_register("u2", UInt16) -_register("u4", UInt32) -_register("i1", Int8) -_register("i2", Int16) -_register("i4", Int32) - -_register("i8", Int64) -if HasUInt64: - _register("u8", UInt64) - -_register("f4", Float32) -_register("f8", Float64) -_register("c8", Complex32) -_register("c16", Complex64) - -# NumPy forms - -_register("1", Int8) -_register("B", Bool) -_register("c", Int8) -_register("b", UInt8) -_register("s", Int16) -_register("w", UInt16) -_register("i", Int32) -_register("N", Int64) -_register("u", UInt32) -_register("U", UInt64) - -if LP64: - _register("l", Int64) -else: - _register("l", Int32) - -_register("d", Float64) -_register("f", Float32) -_register("D", Complex64) -_register("F", Complex32) - -# scipy.base forms - -def _scipy_alias(scipy_type, numarray_type): - _register(scipy_type, eval(numarray_type)) - globals()[scipy_type] = globals()[numarray_type] - -_scipy_alias("bool_", "Bool") -_scipy_alias("bool8", "Bool") -_scipy_alias("int8", "Int8") -_scipy_alias("uint8", "UInt8") -_scipy_alias("int16", "Int16") -_scipy_alias("uint16", "UInt16") -_scipy_alias("int32", "Int32") -_scipy_alias("uint32", "UInt32") -_scipy_alias("int64", "Int64") -_scipy_alias("uint64", "UInt64") - -_scipy_alias("float64", "Float64") -_scipy_alias("float32", "Float32") -_scipy_alias("complex128", "Complex64") -_scipy_alias("complex64", "Complex32") - -# The rest is used by numeric modules to determine conversions - -# Ranking of types from lowest to highest (sorta) -if not HasUInt64: - genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16', - 'Int32', 'UInt32', 'Int64', - 'Float32','Float64', 'Complex32', 'Complex64', 'Object'] -else: - genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16', - 'Int32', 'UInt32', 'Int64', 'UInt64', - 'Float32','Float64', 'Complex32', 'Complex64', 'Object'] - -pythonTypeRank = [ bool, int, long, float, complex ] - -# The next line is not platform independent XXX Needs to be generalized -if not LP64: - pythonTypeMap = { - int:("Int32","int"), - long:("Int64","int"), - float:("Float64","float"), - complex:("Complex64","complex")} - - scalarTypeMap = { - int:"Int32", - long:"Int64", - float:"Float64", - complex:"Complex64"} -else: - pythonTypeMap = { - int:("Int64","int"), - long:("Int64","int"), - float:("Float64","float"), - complex:("Complex64","complex")} - - scalarTypeMap = { - int:"Int64", - long:"Int64", - float:"Float64", - complex:"Complex64"} - -pythonTypeMap.update({bool:("Bool","bool") }) -scalarTypeMap.update({bool:"Bool"}) - -# Generate coercion matrix - -def _initGenericCoercions(): - global genericCoercions - genericCoercions = {} - - # vector with ... - for ntype1 in genericTypeRank: - nt1 = typeDict[ntype1] - rank1 = genericTypeRank.index(ntype1) - ntypesize1, inttype1, signedtype1 = nt1.bytes, \ - isinstance(nt1, IntegralType), isinstance(nt1, SignedIntegralType) - for ntype2 in genericTypeRank: - # vector - nt2 = typeDict[ntype2] - ntypesize2, inttype2, signedtype2 = nt2.bytes, \ - isinstance(nt2, IntegralType), isinstance(nt2, SignedIntegralType) - rank2 = genericTypeRank.index(ntype2) - if (signedtype1 != signedtype2) and inttype1 and inttype2: - # mixing of signed and unsigned ints is a special case - # If unsigned same size or larger, final size needs to be bigger - # if possible - if signedtype1: - if ntypesize2 >= ntypesize1: - size = min(2*ntypesize2, MAX_INT_SIZE) - else: - size = ntypesize1 - else: - if ntypesize1 >= ntypesize2: - size = min(2*ntypesize1, MAX_INT_SIZE) - else: - size = ntypesize2 - outtype = "Int"+str(8*size) - else: - if rank1 >= rank2: - outtype = ntype1 - else: - outtype = ntype2 - genericCoercions[(ntype1, ntype2)] = outtype - - for ntype2 in pythonTypeRank: - # scalar - mapto, kind = pythonTypeMap[ntype2] - if ((inttype1 and kind=="int") or (not inttype1 and kind=="float")): - # both are of the same "kind" thus vector type dominates - outtype = ntype1 - else: - rank2 = genericTypeRank.index(mapto) - if rank1 >= rank2: - outtype = ntype1 - else: - outtype = mapto - genericCoercions[(ntype1, ntype2)] = outtype - genericCoercions[(ntype2, ntype1)] = outtype - - # scalar-scalar - for ntype1 in pythonTypeRank: - maptype1 = scalarTypeMap[ntype1] - genericCoercions[(ntype1,)] = maptype1 - for ntype2 in pythonTypeRank: - maptype2 = scalarTypeMap[ntype2] - genericCoercions[(ntype1, ntype2)] = genericCoercions[(maptype1, maptype2)] - - # Special cases more easily dealt with outside of the loop - genericCoercions[("Complex32", "Float64")] = "Complex64" - genericCoercions[("Float64", "Complex32")] = "Complex64" - genericCoercions[("Complex32", "Int64")] = "Complex64" - genericCoercions[("Int64", "Complex32")] = "Complex64" - genericCoercions[("Complex32", "UInt64")] = "Complex64" - genericCoercions[("UInt64", "Complex32")] = "Complex64" - - genericCoercions[("Int64","Float32")] = "Float64" - genericCoercions[("Float32", "Int64")] = "Float64" - genericCoercions[("UInt64","Float32")] = "Float64" - genericCoercions[("Float32", "UInt64")] = "Float64" - - genericCoercions[(float, "Bool")] = "Float64" - genericCoercions[("Bool", float)] = "Float64" - - genericCoercions[(float,float,float)] = "Float64" # for scipy.special - genericCoercions[(int,int,float)] = "Float64" # for scipy.special - -_initGenericCoercions() - -# If complex is subclassed, the following may not be necessary -genericPromotionExclusions = { - 'Bool': (), - 'Int8': (), - 'Int16': (), - 'Int32': ('Float32','Complex32'), - 'UInt8': (), - 'UInt16': (), - 'UInt32': ('Float32','Complex32'), - 'Int64' : ('Float32','Complex32'), - 'UInt64' : ('Float32','Complex32'), - 'Float32': (), - 'Float64': ('Complex32',), - 'Complex32':(), - 'Complex64':() -} # e.g., don't allow promotion from Float64 to Complex32 or Int64 to Float32 - -# Numeric typecodes -typecodes = {'Integer': '1silN', - 'UnsignedInteger': 'bBwuU', - 'Float': 'fd', - 'Character': 'c', - 'Complex': 'FD' } - -if HasUInt64: - _MaximumType = { - Bool : UInt64, - - Int8 : Int64, - Int16 : Int64, - Int32 : Int64, - Int64 : Int64, - - UInt8 : UInt64, - UInt16 : UInt64, - UInt32 : UInt64, - UInt8 : UInt64, - - Float32 : Float64, - Float64 : Float64, - - Complex32 : Complex64, - Complex64 : Complex64 - } -else: - _MaximumType = { - Bool : Int64, - - Int8 : Int64, - Int16 : Int64, - Int32 : Int64, - Int64 : Int64, - - UInt8 : Int64, - UInt16 : Int64, - UInt32 : Int64, - UInt8 : Int64, - - Float32 : Float64, - Float64 : Float64, - - Complex32 : Complex64, - Complex64 : Complex64 - } - -def MaximumType(t): - """returns the type of highest precision of the same general kind as 't'""" - return _MaximumType[t] - - -def getType(type): - """Return the numeric type object for type - - type may be the name of a type object or the actual object - """ - if isinstance(type, NumericType): - return type - try: - return typeDict[type] - except KeyError: - raise TypeError("Not a numeric type") - -scalarTypes = (bool,int,long,float,complex) - -_scipy_dtypechar = { - Int8 : 'b', - UInt8 : 'B', - Int16 : 'h', - UInt16 : 'H', - Int32 : 'i', - UInt32 : 'I', - Int64 : 'q', - UInt64 : 'Q', - Float32 : 'f', - Float64 : 'd', - Complex32 : 'F', # Note the switchup here: - Complex64 : 'D' # numarray.Complex32 == scipy.complex64, etc. - } - -_scipy_dtypechar_inverse = {} -for key,value in _scipy_dtypechar.items(): - _scipy_dtypechar_inverse[value] = key - -_val = numpy.int_(0).itemsize -if _val == 8: - _scipy_dtypechar_inverse['l'] = Int64 - _scipy_dtypechar_inverse['L'] = UInt64 -elif _val == 4: - _scipy_dtypechar_inverse['l'] = Int32 - _scipy_dtypechar_inverse['L'] = UInt32 - -del _val - -if LP64: - _scipy_dtypechar_inverse['p'] = Int64 - _scipy_dtypechar_inverse['P'] = UInt64 -else: - _scipy_dtypechar_inverse['p'] = Int32 - _scipy_dtypechar_inverse['P'] = UInt32 - -def typefrom(obj): - return _scipy_dtypechar_inverse[obj.dtype.char] diff --git a/numpy-1.6.2/numpy/numarray/random_array.py b/numpy-1.6.2/numpy/numarray/random_array.py deleted file mode 100644 index d70e2694a5..0000000000 --- a/numpy-1.6.2/numpy/numarray/random_array.py +++ /dev/null @@ -1,9 +0,0 @@ - -__all__ = ['ArgumentError', 'F', 'beta', 'binomial', 'chi_square', - 'exponential', 'gamma', 'get_seed', 'multinomial', - 'multivariate_normal', 'negative_binomial', 'noncentral_F', - 'noncentral_chi_square', 'normal', 'permutation', 'poisson', - 'randint', 'random', 'random_integers', 'standard_normal', - 'uniform', 'seed'] - -from numpy.oldnumeric.random_array import * diff --git a/numpy-1.6.2/numpy/numarray/session.py b/numpy-1.6.2/numpy/numarray/session.py deleted file mode 100644 index 0982742abc..0000000000 --- a/numpy-1.6.2/numpy/numarray/session.py +++ /dev/null @@ -1,346 +0,0 @@ -""" This module contains a "session saver" which saves the state of a -NumPy session to a file. At a later time, a different Python -process can be started and the saved session can be restored using -load(). - -The session saver relies on the Python pickle protocol to save and -restore objects. Objects which are not themselves picklable (e.g. -modules) can sometimes be saved by "proxy", particularly when they -are global constants of some kind. If it's not known that proxying -will work, a warning is issued at save time. If a proxy fails to -reload properly (e.g. because it's not a global constant), a warning -is issued at reload time and that name is bound to a _ProxyFailure -instance which tries to identify what should have been restored. - -First, some unfortunate (probably unnecessary) concessions to doctest -to keep the test run free of warnings. - ->>> del _PROXY_ALLOWED ->>> del __builtins__ - -By default, save() stores every variable in the caller's namespace: - ->>> import numpy as na ->>> a = na.arange(10) ->>> save() - -Alternately, save() can be passed a comma seperated string of variables: - ->>> save("a,na") - -Alternately, save() can be passed a dictionary, typically one you already -have lying around somewhere rather than created inline as shown here: - ->>> save(dictionary={"a":a,"na":na}) - -If both variables and a dictionary are specified, the variables to be -saved are taken from the dictionary. - ->>> save(variables="a,na",dictionary={"a":a,"na":na}) - -Remove names from the session namespace - ->>> del a, na - -By default, load() restores every variable/object in the session file -to the caller's namespace. - ->>> load() - -load() can be passed a comma seperated string of variables to be -restored from the session file to the caller's namespace: - ->>> load("a,na") - -load() can also be passed a dictionary to *restore to*: - ->>> d = {} ->>> load(dictionary=d) - -load can be passed both a list variables of variables to restore and a -dictionary to restore to: - ->>> load(variables="a,na", dictionary=d) - ->>> na.all(a == na.arange(10)) -1 ->>> na.__name__ -'numpy' - -NOTE: session saving is faked for modules using module proxy objects. -Saved modules are re-imported at load time but any "state" in the module -which is not restored by a simple import is lost. - -""" - -__all__ = ['load', 'save'] - -import sys -import pickle - -SAVEFILE="session.dat" -VERBOSE = False # global import-time override - -def _foo(): pass - -_PROXY_ALLOWED = (type(sys), # module - type(_foo), # function - type(None)) # None - -def _update_proxy_types(): - """Suppress warnings for known un-picklables with working proxies.""" - pass - -def _unknown(_type): - """returns True iff _type isn't known as OK to proxy""" - return (_type is not None) and (_type not in _PROXY_ALLOWED) - -# caller() from the following article with one extra f_back added. -# from http://www.python.org/search/hypermail/python-1994q1/0506.html -# SUBJECT: import ( how to put a symbol into caller's namespace ) -# SENDER: Steven D. Majewski (sdm7g@elvis.med.virginia.edu) -# DATE: Thu, 24 Mar 1994 15:38:53 -0500 - -def _caller(): - """caller() returns the frame object of the function's caller.""" - try: - 1 + '' # make an error happen - except: # and return the caller's caller's frame - return sys.exc_traceback.tb_frame.f_back.f_back.f_back - -def _callers_globals(): - """callers_globals() returns the global dictionary of the caller.""" - frame = _caller() - return frame.f_globals - -def _callers_modules(): - """returns a list containing the names of all the modules in the caller's - global namespace.""" - g = _callers_globals() - mods = [] - for k,v in g.items(): - if type(v) == type(sys): - mods.append(getattr(v,"__name__")) - return mods - -def _errout(*args): - for a in args: - print >>sys.stderr, a, - print >>sys.stderr - -def _verbose(*args): - if VERBOSE: - _errout(*args) - -class _ProxyingFailure: - """Object which is bound to a variable for a proxy pickle which failed to reload""" - def __init__(self, module, name, type=None): - self.module = module - self.name = name - self.type = type - def __repr__(self): - return "ProxyingFailure('%s','%s','%s')" % (self.module, self.name, self.type) - -class _ModuleProxy(object): - """Proxy object which fakes pickling a module""" - def __new__(_type, name, save=False): - if save: - _verbose("proxying module", name) - self = object.__new__(_type) - self.name = name - else: - _verbose("loading module proxy", name) - try: - self = _loadmodule(name) - except ImportError: - _errout("warning: module", name,"import failed.") - return self - - def __getnewargs__(self): - return (self.name,) - - def __getstate__(self): - return False - -def _loadmodule(module): - if module not in sys.modules: - modules = module.split(".") - s = "" - for i in range(len(modules)): - s = ".".join(modules[:i+1]) - exec "import " + s - return sys.modules[module] - -class _ObjectProxy(object): - """Proxy object which fakes pickling an arbitrary object. Only global - constants can really be proxied.""" - def __new__(_type, module, name, _type2, save=False): - if save: - if _unknown(_type2): - _errout("warning: proxying object", module + "." + name, - "of type", _type2, "because it wouldn't pickle...", - "it may not reload later.") - else: - _verbose("proxying object", module, name) - self = object.__new__(_type) - self.module, self.name, self.type = module, name, str(_type2) - else: - _verbose("loading object proxy", module, name) - try: - m = _loadmodule(module) - except (ImportError, KeyError): - _errout("warning: loading object proxy", module + "." + name, - "module import failed.") - return _ProxyingFailure(module,name,_type2) - try: - self = getattr(m, name) - except AttributeError: - _errout("warning: object proxy", module + "." + name, - "wouldn't reload from", m) - return _ProxyingFailure(module,name,_type2) - return self - - def __getnewargs__(self): - return (self.module, self.name, self.type) - - def __getstate__(self): - return False - - -class _SaveSession(object): - """Tag object which marks the end of a save session and holds the - saved session variable names as a list of strings in the same - order as the session pickles.""" - def __new__(_type, keys, save=False): - if save: - _verbose("saving session", keys) - else: - _verbose("loading session", keys) - self = object.__new__(_type) - self.keys = keys - return self - - def __getnewargs__(self): - return (self.keys,) - - def __getstate__(self): - return False - -class ObjectNotFound(RuntimeError): - pass - -def _locate(modules, object): - for mname in modules: - m = sys.modules[mname] - if m: - for k,v in m.__dict__.items(): - if v is object: - return m.__name__, k - else: - raise ObjectNotFound(k) - -def save(variables=None, file=SAVEFILE, dictionary=None, verbose=False): - - """saves variables from a numpy session to a file. Variables - which won't pickle are "proxied" if possible. - - 'variables' a string of comma seperated variables: e.g. "a,b,c" - Defaults to dictionary.keys(). - - 'file' a filename or file object for the session file. - - 'dictionary' the dictionary in which to look up the variables. - Defaults to the caller's globals() - - 'verbose' print additional debug output when True. - """ - - global VERBOSE - VERBOSE = verbose - - _update_proxy_types() - - if isinstance(file, str): - file = open(file, "wb") - - if dictionary is None: - dictionary = _callers_globals() - - if variables is None: - keys = dictionary.keys() - else: - keys = variables.split(",") - - source_modules = _callers_modules() + sys.modules.keys() - - p = pickle.Pickler(file, protocol=2) - - _verbose("variables:",keys) - for k in keys: - v = dictionary[k] - _verbose("saving", k, type(v)) - try: # Try to write an ordinary pickle - p.dump(v) - _verbose("pickled", k) - except (pickle.PicklingError, TypeError, SystemError): - # Use proxies for stuff that won't pickle - if isinstance(v, type(sys)): # module - proxy = _ModuleProxy(v.__name__, save=True) - else: - try: - module, name = _locate(source_modules, v) - except ObjectNotFound: - _errout("warning: couldn't find object",k, - "in any module... skipping.") - continue - else: - proxy = _ObjectProxy(module, name, type(v), save=True) - p.dump(proxy) - o = _SaveSession(keys, save=True) - p.dump(o) - file.close() - -def load(variables=None, file=SAVEFILE, dictionary=None, verbose=False): - - """load a numpy session from a file and store the specified - 'variables' into 'dictionary'. - - 'variables' a string of comma seperated variables: e.g. "a,b,c" - Defaults to dictionary.keys(). - - 'file' a filename or file object for the session file. - - 'dictionary' the dictionary in which to look up the variables. - Defaults to the caller's globals() - - 'verbose' print additional debug output when True. - """ - - global VERBOSE - VERBOSE = verbose - - if isinstance(file, str): - file = open(file, "rb") - if dictionary is None: - dictionary = _callers_globals() - values = [] - p = pickle.Unpickler(file) - while 1: - o = p.load() - if isinstance(o, _SaveSession): - session = dict(zip(o.keys, values)) - _verbose("updating dictionary with session variables.") - if variables is None: - keys = session.keys() - else: - keys = variables.split(",") - for k in keys: - dictionary[k] = session[k] - return None - else: - _verbose("unpickled object", str(o)) - values.append(o) - -def test(): - import doctest, numpy.numarray.session - return doctest.testmod(numpy.numarray.session) diff --git a/numpy-1.6.2/numpy/numarray/setup.py b/numpy-1.6.2/numpy/numarray/setup.py deleted file mode 100644 index 6419902179..0000000000 --- a/numpy-1.6.2/numpy/numarray/setup.py +++ /dev/null @@ -1,17 +0,0 @@ -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('numarray',parent_package,top_path) - - config.add_data_files('include/numpy/*') - - config.add_extension('_capi', - sources=['_capi.c'], - ) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/numarray/setupscons.py b/numpy-1.6.2/numpy/numarray/setupscons.py deleted file mode 100644 index 173612ae8b..0000000000 --- a/numpy-1.6.2/numpy/numarray/setupscons.py +++ /dev/null @@ -1,14 +0,0 @@ -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('numarray',parent_package,top_path) - - config.add_data_files('include/numpy/') - config.add_sconscript('SConstruct', source_files = ['_capi.c']) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/numarray/ufuncs.py b/numpy-1.6.2/numpy/numarray/ufuncs.py deleted file mode 100644 index 3fb5671ce8..0000000000 --- a/numpy-1.6.2/numpy/numarray/ufuncs.py +++ /dev/null @@ -1,22 +0,0 @@ - -__all__ = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh', - 'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_not', - 'bitwise_or', 'bitwise_xor', 'ceil', 'cos', 'cosh', 'divide', - 'equal', 'exp', 'fabs', 'floor', 'floor_divide', - 'fmod', 'greater', 'greater_equal', 'hypot', 'isnan', - 'less', 'less_equal', 'log', 'log10', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'lshift', 'maximum', 'minimum', - 'minus', 'multiply', 'negative', 'not_equal', - 'power', 'product', 'remainder', 'rshift', 'sin', 'sinh', 'sqrt', - 'subtract', 'sum', 'tan', 'tanh', 'true_divide', - 'conjugate', 'sign'] - -from numpy import absolute as abs, absolute, add, arccos, arccosh, arcsin, \ - arcsinh, arctan, arctan2, arctanh, bitwise_and, invert as bitwise_not, \ - bitwise_or, bitwise_xor, ceil, cos, cosh, divide, \ - equal, exp, fabs, floor, floor_divide, fmod, greater, greater_equal, \ - hypot, isnan, less, less_equal, log, log10, logical_and, \ - logical_not, logical_or, logical_xor, left_shift as lshift, \ - maximum, minimum, negative as minus, multiply, negative, \ - not_equal, power, product, remainder, right_shift as rshift, sin, \ - sinh, sqrt, subtract, sum, tan, tanh, true_divide, conjugate, sign diff --git a/numpy-1.6.2/numpy/numarray/util.py b/numpy-1.6.2/numpy/numarray/util.py deleted file mode 100644 index 9555474a8e..0000000000 --- a/numpy-1.6.2/numpy/numarray/util.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -import numpy as np - -__all__ = ['MathDomainError', 'UnderflowError', 'NumOverflowError', - 'handleError', 'get_numarray_include_dirs'] - -class MathDomainError(ArithmeticError): - pass - - -class UnderflowError(ArithmeticError): - pass - - -class NumOverflowError(OverflowError, ArithmeticError): - pass - - -def handleError(errorStatus, sourcemsg): - """Take error status and use error mode to handle it.""" - modes = np.geterr() - if errorStatus & np.FPE_INVALID: - if modes['invalid'] == "warn": - print "Warning: Encountered invalid numeric result(s)", sourcemsg - if modes['invalid'] == "raise": - raise MathDomainError(sourcemsg) - if errorStatus & np.FPE_DIVIDEBYZERO: - if modes['dividebyzero'] == "warn": - print "Warning: Encountered divide by zero(s)", sourcemsg - if modes['dividebyzero'] == "raise": - raise ZeroDivisionError(sourcemsg) - if errorStatus & np.FPE_OVERFLOW: - if modes['overflow'] == "warn": - print "Warning: Encountered overflow(s)", sourcemsg - if modes['overflow'] == "raise": - raise NumOverflowError(sourcemsg) - if errorStatus & np.FPE_UNDERFLOW: - if modes['underflow'] == "warn": - print "Warning: Encountered underflow(s)", sourcemsg - if modes['underflow'] == "raise": - raise UnderflowError(sourcemsg) - - -def get_numarray_include_dirs(): - base = os.path.dirname(np.__file__) - newdirs = [os.path.join(base, 'numarray', 'include')] - return newdirs diff --git a/numpy-1.6.2/numpy/oldnumeric/__init__.py b/numpy-1.6.2/numpy/oldnumeric/__init__.py deleted file mode 100644 index 05712c02c4..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# Don't add these to the __all__ variable though -from numpy import * - -def _move_axis_to_0(a, axis): - if axis == 0: - return a - n = len(a.shape) - if axis < 0: - axis += n - axes = range(1, axis+1) + [0,] + range(axis+1, n) - return transpose(a, axes) - -# Add these -from compat import * -from functions import * -from precision import * -from ufuncs import * -from misc import * - -import compat -import precision -import functions -import misc -import ufuncs - -import numpy -__version__ = numpy.__version__ -del numpy - -__all__ = ['__version__'] -__all__ += compat.__all__ -__all__ += precision.__all__ -__all__ += functions.__all__ -__all__ += ufuncs.__all__ -__all__ += misc.__all__ - -del compat -del functions -del precision -del ufuncs -del misc - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/oldnumeric/alter_code1.py b/numpy-1.6.2/numpy/oldnumeric/alter_code1.py deleted file mode 100644 index 87538a8559..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/alter_code1.py +++ /dev/null @@ -1,240 +0,0 @@ -""" -This module converts code written for Numeric to run with numpy - -Makes the following changes: - * Changes import statements (warns of use of from Numeric import *) - * Changes import statements (using numerix) ... - * Makes search and replace changes to: - - .typecode() - - .iscontiguous() - - .byteswapped() - - .itemsize() - - .toscalar() - * Converts .flat to .ravel() except for .flat = xxx or .flat[xxx] - * Replace xxx.spacesaver() with True - * Convert xx.savespace(?) to pass + ## xx.savespace(?) - - * Converts uses of 'b' to 'B' in the typecode-position of - functions: - eye, tri (in position 4) - ones, zeros, identity, empty, array, asarray, arange, - fromstring, indices, array_constructor (in position 2) - - and methods: - astype --- only argument - -- converts uses of '1', 's', 'w', and 'u' to - -- 'b', 'h', 'H', and 'I' - - * Converts uses of type(...) is - isinstance(..., ) -""" -__all__ = ['convertfile', 'convertall', 'converttree', 'convertsrc'] - -import sys -import os -import re -import glob - - -_func4 = ['eye', 'tri'] -_meth1 = ['astype'] -_func2 = ['ones', 'zeros', 'identity', 'fromstring', 'indices', - 'empty', 'array', 'asarray', 'arange', 'array_constructor'] - -_chars = {'1':'b','s':'h','w':'H','u':'I'} - -func_re = {} -meth_re = {} - -for name in _func2: - _astr = r"""(%s\s*[(][^,]*?[,][^'"]*?['"])b(['"][^)]*?[)])"""%name - func_re[name] = re.compile(_astr, re.DOTALL) - -for name in _func4: - _astr = r"""(%s\s*[(][^,]*?[,][^,]*?[,][^,]*?[,][^'"]*?['"])b(['"][^)]*?[)])"""%name - func_re[name] = re.compile(_astr, re.DOTALL) - -for name in _meth1: - _astr = r"""(.%s\s*[(][^'"]*?['"])b(['"][^)]*?[)])"""%name - func_re[name] = re.compile(_astr, re.DOTALL) - -for char in _chars.keys(): - _astr = r"""(.astype\s*[(][^'"]*?['"])%s(['"][^)]*?[)])"""%char - meth_re[char] = re.compile(_astr, re.DOTALL) - -def fixtypechars(fstr): - for name in _func2 + _func4 + _meth1: - fstr = func_re[name].sub('\\1B\\2',fstr) - for char in _chars.keys(): - fstr = meth_re[char].sub('\\1%s\\2'%_chars[char], fstr) - return fstr - -flatindex_re = re.compile('([.]flat(\s*?[[=]))') - -def changeimports(fstr, name, newname): - importstr = 'import %s' % name - importasstr = 'import %s as ' % name - fromstr = 'from %s import ' % name - fromall=0 - - fstr = re.sub(r'(import\s+[^,\n\r]+,\s*)(%s)' % name, - "\\1%s as %s" % (newname, name), fstr) - fstr = fstr.replace(importasstr, 'import %s as ' % newname) - fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name)) - - ind = 0 - Nlen = len(fromstr) - Nlen2 = len("from %s import " % newname) - while 1: - found = fstr.find(fromstr,ind) - if (found < 0): - break - ind = found + Nlen - if fstr[ind] == '*': - continue - fstr = "%sfrom %s import %s" % (fstr[:found], newname, fstr[ind:]) - ind += Nlen2 - Nlen - return fstr, fromall - -istest_re = {} -_types = ['float', 'int', 'complex', 'ArrayType', 'FloatType', - 'IntType', 'ComplexType'] -for name in _types: - _astr = r'type\s*[(]([^)]*)[)]\s+(?:is|==)\s+(.*?%s)'%name - istest_re[name] = re.compile(_astr) -def fixistesting(astr): - for name in _types: - astr = istest_re[name].sub('isinstance(\\1, \\2)', astr) - return astr - -def replaceattr(astr): - astr = astr.replace(".typecode()",".dtype.char") - astr = astr.replace(".iscontiguous()",".flags.contiguous") - astr = astr.replace(".byteswapped()",".byteswap()") - astr = astr.replace(".toscalar()", ".item()") - astr = astr.replace(".itemsize()",".itemsize") - # preserve uses of flat that should be o.k. - tmpstr = flatindex_re.sub(r"@@@@\2",astr) - # replace other uses of flat - tmpstr = tmpstr.replace(".flat",".ravel()") - # put back .flat where it was valid - astr = tmpstr.replace("@@@@", ".flat") - return astr - -svspc2 = re.compile(r'([^,(\s]+[.]spacesaver[(][)])') -svspc3 = re.compile(r'(\S+[.]savespace[(].*[)])') -#shpe = re.compile(r'(\S+\s*)[.]shape\s*=[^=]\s*(.+)') -def replaceother(astr): - astr = svspc2.sub('True',astr) - astr = svspc3.sub(r'pass ## \1', astr) - #astr = shpe.sub('\\1=\\1.reshape(\\2)', astr) - return astr - -import datetime -def fromstr(filestr): - savestr = filestr[:] - filestr = fixtypechars(filestr) - filestr = fixistesting(filestr) - filestr, fromall1 = changeimports(filestr, 'Numeric', 'numpy.oldnumeric') - filestr, fromall1 = changeimports(filestr, 'multiarray','numpy.oldnumeric') - filestr, fromall1 = changeimports(filestr, 'umath', 'numpy.oldnumeric') - filestr, fromall1 = changeimports(filestr, 'Precision', 'numpy.oldnumeric.precision') - filestr, fromall1 = changeimports(filestr, 'UserArray', 'numpy.oldnumeric.user_array') - filestr, fromall1 = changeimports(filestr, 'ArrayPrinter', 'numpy.oldnumeric.array_printer') - filestr, fromall2 = changeimports(filestr, 'numerix', 'numpy.oldnumeric') - filestr, fromall3 = changeimports(filestr, 'scipy_base', 'numpy.oldnumeric') - filestr, fromall3 = changeimports(filestr, 'Matrix', 'numpy.oldnumeric.matrix') - filestr, fromall3 = changeimports(filestr, 'MLab', 'numpy.oldnumeric.mlab') - filestr, fromall3 = changeimports(filestr, 'LinearAlgebra', 'numpy.oldnumeric.linear_algebra') - filestr, fromall3 = changeimports(filestr, 'RNG', 'numpy.oldnumeric.rng') - filestr, fromall3 = changeimports(filestr, 'RNG.Statistics', 'numpy.oldnumeric.rng_stats') - filestr, fromall3 = changeimports(filestr, 'RandomArray', 'numpy.oldnumeric.random_array') - filestr, fromall3 = changeimports(filestr, 'FFT', 'numpy.oldnumeric.fft') - filestr, fromall3 = changeimports(filestr, 'MA', 'numpy.oldnumeric.ma') - fromall = fromall1 or fromall2 or fromall3 - filestr = replaceattr(filestr) - filestr = replaceother(filestr) - if savestr != filestr: - today = datetime.date.today().strftime('%b %d, %Y') - name = os.path.split(sys.argv[0])[-1] - filestr = '## Automatically adapted for '\ - 'numpy.oldnumeric %s by %s\n\n%s' % (today, name, filestr) - return filestr, 1 - return filestr, 0 - -def makenewfile(name, filestr): - fid = file(name, 'w') - fid.write(filestr) - fid.close() - -def convertfile(filename, orig=1): - """Convert the filename given from using Numeric to using NumPy - - Copies the file to filename.orig and then over-writes the file - with the updated code - """ - fid = open(filename) - filestr = fid.read() - fid.close() - filestr, changed = fromstr(filestr) - if changed: - if orig: - base, ext = os.path.splitext(filename) - os.rename(filename, base+".orig") - else: - os.remove(filename) - makenewfile(filename, filestr) - -def fromargs(args): - filename = args[1] - converttree(filename) - -def convertall(direc=os.path.curdir, orig=1): - """Convert all .py files to use numpy.oldnumeric (from Numeric) in the directory given - - For each changed file, a backup of .py is made as - .py.orig. A new file named .py - is then written with the updated code. - """ - files = glob.glob(os.path.join(direc,'*.py')) - for afile in files: - if afile[-8:] == 'setup.py': continue # skip these - convertfile(afile, orig) - -header_re = re.compile(r'(Numeric/arrayobject.h)') - -def convertsrc(direc=os.path.curdir, ext=None, orig=1): - """Replace Numeric/arrayobject.h with numpy/oldnumeric.h in all files in the - directory with extension give by list ext (if ext is None, then all files are - replaced).""" - if ext is None: - files = glob.glob(os.path.join(direc,'*')) - else: - files = [] - for aext in ext: - files.extend(glob.glob(os.path.join(direc,"*.%s" % aext))) - for afile in files: - fid = open(afile) - fstr = fid.read() - fid.close() - fstr, n = header_re.subn(r'numpy/oldnumeric.h',fstr) - if n > 0: - if orig: - base, ext = os.path.splitext(afile) - os.rename(afile, base+".orig") - else: - os.remove(afile) - makenewfile(afile, fstr) - -def _func(arg, dirname, fnames): - convertall(dirname, orig=0) - convertsrc(dirname, ext=['h','c'], orig=0) - -def converttree(direc=os.path.curdir): - """Convert all .py files and source code files in the tree given - """ - os.path.walk(direc, _func, None) - - -if __name__ == '__main__': - fromargs(sys.argv) diff --git a/numpy-1.6.2/numpy/oldnumeric/alter_code2.py b/numpy-1.6.2/numpy/oldnumeric/alter_code2.py deleted file mode 100644 index baa6b9d265..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/alter_code2.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -This module converts code written for numpy.oldnumeric to work -with numpy - -FIXME: Flesh this out. - -Makes the following changes: - * Converts typecharacters '1swu' to 'bhHI' respectively - when used as typecodes - * Changes import statements - * Change typecode= to dtype= - * Eliminates savespace=xxx keyword arguments - * Removes it when keyword is not given as well - * replaces matrixmultiply with dot - * converts functions that don't give axis= keyword that have changed - * converts functions that don't give typecode= keyword that have changed - * converts use of capitalized type-names - * converts old function names in oldnumeric.linear_algebra, - oldnumeric.random_array, and oldnumeric.fft - -""" -#__all__ = ['convertfile', 'convertall', 'converttree'] -__all__ = [] - -import warnings -warnings.warn("numpy.oldnumeric.alter_code2 is not working yet.") - -import sys -import os -import re -import glob - -# To convert typecharacters we need to -# Not very safe. Disabled for now.. -def replacetypechars(astr): - astr = astr.replace("'s'","'h'") - astr = astr.replace("'b'","'B'") - astr = astr.replace("'1'","'b'") - astr = astr.replace("'w'","'H'") - astr = astr.replace("'u'","'I'") - return astr - -def changeimports(fstr, name, newname): - importstr = 'import %s' % name - importasstr = 'import %s as ' % name - fromstr = 'from %s import ' % name - fromall=0 - - fstr = fstr.replace(importasstr, 'import %s as ' % newname) - fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name)) - - ind = 0 - Nlen = len(fromstr) - Nlen2 = len("from %s import " % newname) - while 1: - found = fstr.find(fromstr,ind) - if (found < 0): - break - ind = found + Nlen - if fstr[ind] == '*': - continue - fstr = "%sfrom %s import %s" % (fstr[:found], newname, fstr[ind:]) - ind += Nlen2 - Nlen - return fstr, fromall - -def replaceattr(astr): - astr = astr.replace("matrixmultiply","dot") - return astr - -def replaceother(astr): - astr = re.sub(r'typecode\s*=', 'dtype=', astr) - astr = astr.replace('ArrayType', 'ndarray') - astr = astr.replace('NewAxis', 'newaxis') - return astr - -import datetime -def fromstr(filestr): - #filestr = replacetypechars(filestr) - filestr, fromall1 = changeimports(filestr, 'numpy.oldnumeric', 'numpy') - filestr, fromall1 = changeimports(filestr, 'numpy.core.multiarray', 'numpy') - filestr, fromall1 = changeimports(filestr, 'numpy.core.umath', 'numpy') - filestr, fromall3 = changeimports(filestr, 'LinearAlgebra', - 'numpy.linalg.old') - filestr, fromall3 = changeimports(filestr, 'RNG', 'numpy.random.oldrng') - filestr, fromall3 = changeimports(filestr, 'RNG.Statistics', 'numpy.random.oldrngstats') - filestr, fromall3 = changeimports(filestr, 'RandomArray', 'numpy.random.oldrandomarray') - filestr, fromall3 = changeimports(filestr, 'FFT', 'numpy.fft.old') - filestr, fromall3 = changeimports(filestr, 'MA', 'numpy.core.ma') - fromall = fromall1 or fromall2 or fromall3 - filestr = replaceattr(filestr) - filestr = replaceother(filestr) - today = datetime.date.today().strftime('%b %d, %Y') - name = os.path.split(sys.argv[0])[-1] - filestr = '## Automatically adapted for '\ - 'numpy %s by %s\n\n%s' % (today, name, filestr) - return filestr - -def makenewfile(name, filestr): - fid = file(name, 'w') - fid.write(filestr) - fid.close() - -def getandcopy(name): - fid = file(name) - filestr = fid.read() - fid.close() - base, ext = os.path.splitext(name) - makenewfile(base+'.orig', filestr) - return filestr - -def convertfile(filename): - """Convert the filename given from using Numeric to using NumPy - - Copies the file to filename.orig and then over-writes the file - with the updated code - """ - filestr = getandcopy(filename) - filestr = fromstr(filestr) - makenewfile(filename, filestr) - -def fromargs(args): - filename = args[1] - convertfile(filename) - -def convertall(direc=os.path.curdir): - """Convert all .py files to use NumPy (from Numeric) in the directory given - - For each file, a backup of .py is made as - .py.orig. A new file named .py - is then written with the updated code. - """ - files = glob.glob(os.path.join(direc,'*.py')) - for afile in files: - convertfile(afile) - -def _func(arg, dirname, fnames): - convertall(dirname) - -def converttree(direc=os.path.curdir): - """Convert all .py files in the tree given - - """ - os.path.walk(direc, _func, None) - -if __name__ == '__main__': - fromargs(sys.argv) diff --git a/numpy-1.6.2/numpy/oldnumeric/array_printer.py b/numpy-1.6.2/numpy/oldnumeric/array_printer.py deleted file mode 100644 index 95f3f42c77..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/array_printer.py +++ /dev/null @@ -1,16 +0,0 @@ - -__all__ = ['array2string'] - -from numpy import array2string as _array2string - -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', - array_output=0): - if array_output: - prefix="array(" - style=repr - else: - prefix = "" - style=str - return _array2string(a, max_line_width, precision, - suppress_small, separator, prefix, style) diff --git a/numpy-1.6.2/numpy/oldnumeric/arrayfns.py b/numpy-1.6.2/numpy/oldnumeric/arrayfns.py deleted file mode 100644 index 230b200a9d..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/arrayfns.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Backward compatible with arrayfns from Numeric -""" - -__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask', - 'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span', - 'to_corners', 'zmin_zmax'] - -import numpy as np -from numpy import asarray - -class error(Exception): - pass - -def array_set(vals1, indices, vals2): - indices = asarray(indices) - if indices.ndim != 1: - raise ValueError, "index array must be 1-d" - if not isinstance(vals1, np.ndarray): - raise TypeError, "vals1 must be an ndarray" - vals1 = asarray(vals1) - vals2 = asarray(vals2) - if vals1.ndim != vals2.ndim or vals1.ndim < 1: - raise error, "vals1 and vals2 must have same number of dimensions (>=1)" - vals1[indices] = vals2 - -from numpy import digitize -from numpy import bincount as histogram - -def index_sort(arr): - return asarray(arr).argsort(kind='heap') - -def interp(y, x, z, typ=None): - """y(z) interpolated by treating y(x) as piecewise function - """ - res = np.interp(z, x, y) - if typ is None or typ == 'd': - return res - if typ == 'f': - return res.astype('f') - - raise error, "incompatible typecode" - -def nz(x): - x = asarray(x,dtype=np.ubyte) - if x.ndim != 1: - raise TypeError, "intput must have 1 dimension." - indxs = np.flatnonzero(x != 0) - return indxs[-1].item()+1 - -def reverse(x, n): - x = asarray(x,dtype='d') - if x.ndim != 2: - raise ValueError, "input must be 2-d" - y = np.empty_like(x) - if n == 0: - y[...] = x[::-1,:] - elif n == 1: - y[...] = x[:,::-1] - return y - -def span(lo, hi, num, d2=0): - x = np.linspace(lo, hi, num) - if d2 <= 0: - return x - else: - ret = np.empty((d2,num),x.dtype) - ret[...] = x - return ret - -def zmin_zmax(z, ireg): - z = asarray(z, dtype=float) - ireg = asarray(ireg, dtype=int) - if z.shape != ireg.shape or z.ndim != 2: - raise ValueError, "z and ireg must be the same shape and 2-d" - ix, iy = np.nonzero(ireg) - # Now, add more indices - x1m = ix - 1 - y1m = iy-1 - i1 = x1m>=0 - i2 = y1m>=0 - i3 = i1 & i2 - nix = np.r_[ix, x1m[i1], x1m[i1], ix[i2] ] - niy = np.r_[iy, iy[i1], y1m[i3], y1m[i2]] - # remove any negative indices - zres = z[nix,niy] - return zres.min().item(), zres.max().item() - - -def find_mask(fs, node_edges): - raise NotImplementedError - -def to_corners(arr, nv, nvsum): - raise NotImplementedError - - -def construct3(mask, itype): - raise NotImplementedError diff --git a/numpy-1.6.2/numpy/oldnumeric/compat.py b/numpy-1.6.2/numpy/oldnumeric/compat.py deleted file mode 100644 index 607dd0b904..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/compat.py +++ /dev/null @@ -1,117 +0,0 @@ -# Compatibility module containing deprecated names - -__all__ = ['NewAxis', - 'UFuncType', 'UfuncType', 'ArrayType', 'arraytype', - 'LittleEndian', 'arrayrange', 'matrixmultiply', - 'array_constructor', 'pickle_array', - 'DumpArray', 'LoadArray', 'multiarray', - # from cPickle - 'dump', 'dumps', 'load', 'loads', - 'Unpickler', 'Pickler' - ] - -import numpy.core.multiarray as multiarray -import numpy.core.umath as um -from numpy.core.numeric import array -import functions -import sys - -from cPickle import dump, dumps - -mu = multiarray - -#Use this to add a new axis to an array -#compatibility only -NewAxis = None - -#deprecated -UFuncType = type(um.sin) -UfuncType = type(um.sin) -ArrayType = mu.ndarray -arraytype = mu.ndarray - -LittleEndian = (sys.byteorder == 'little') - -from numpy import deprecate - -# backward compatibility -arrayrange = deprecate(functions.arange, 'arrayrange', 'arange') - -# deprecated names -matrixmultiply = deprecate(mu.dot, 'matrixmultiply', 'dot') - -def DumpArray(m, fp): - m.dump(fp) - -def LoadArray(fp): - import cPickle - return cPickle.load(fp) - -def array_constructor(shape, typecode, thestr, Endian=LittleEndian): - if typecode == "O": - x = array(thestr, "O") - else: - x = mu.fromstring(thestr, typecode) - x.shape = shape - if LittleEndian != Endian: - return x.byteswap(True) - else: - return x - -def pickle_array(a): - if a.dtype.hasobject: - return (array_constructor, - a.shape, a.dtype.char, a.tolist(), LittleEndian) - else: - return (array_constructor, - (a.shape, a.dtype.char, a.tostring(), LittleEndian)) - -def loads(astr): - import cPickle - arr = cPickle.loads(astr.replace('Numeric', 'numpy.oldnumeric')) - return arr - -def load(fp): - return loads(fp.read()) - -def _LoadArray(fp): - import typeconv - ln = fp.readline().split() - if ln[0][0] == 'A': ln[0] = ln[0][1:] - typecode = ln[0][0] - endian = ln[0][1] - itemsize = int(ln[0][2:]) - shape = [int(x) for x in ln[1:]] - sz = itemsize - for val in shape: - sz *= val - dstr = fp.read(sz) - m = mu.fromstring(dstr, typeconv.convtypecode(typecode)) - m.shape = shape - - if (LittleEndian and endian == 'B') or (not LittleEndian and endian == 'L'): - return m.byteswap(True) - else: - return m - -import pickle, copy -if sys.version_info[0] >= 3: - class Unpickler(pickle.Unpickler): - # XXX: should we implement this? It's not completely straightforward - # to do. - def __init__(self, *a, **kw): - raise NotImplementedError( - "numpy.oldnumeric.Unpickler is not supported on Python 3") -else: - class Unpickler(pickle.Unpickler): - def load_array(self): - self.stack.append(_LoadArray(self)) - - dispatch = copy.copy(pickle.Unpickler.dispatch) - dispatch['A'] = load_array - -class Pickler(pickle.Pickler): - def __init__(self, *args, **kwds): - raise NotImplementedError, "Don't pickle new arrays with this" - def save_array(self, object): - raise NotImplementedError, "Don't pickle new arrays with this" diff --git a/numpy-1.6.2/numpy/oldnumeric/fft.py b/numpy-1.6.2/numpy/oldnumeric/fft.py deleted file mode 100644 index 67f30c7509..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/fft.py +++ /dev/null @@ -1,21 +0,0 @@ - -__all__ = ['fft', 'fft2d', 'fftnd', 'hermite_fft', 'inverse_fft', - 'inverse_fft2d', 'inverse_fftnd', - 'inverse_hermite_fft', 'inverse_real_fft', - 'inverse_real_fft2d', 'inverse_real_fftnd', - 'real_fft', 'real_fft2d', 'real_fftnd'] - -from numpy.fft import fft -from numpy.fft import fft2 as fft2d -from numpy.fft import fftn as fftnd -from numpy.fft import hfft as hermite_fft -from numpy.fft import ifft as inverse_fft -from numpy.fft import ifft2 as inverse_fft2d -from numpy.fft import ifftn as inverse_fftnd -from numpy.fft import ihfft as inverse_hermite_fft -from numpy.fft import irfft as inverse_real_fft -from numpy.fft import irfft2 as inverse_real_fft2d -from numpy.fft import irfftn as inverse_real_fftnd -from numpy.fft import rfft as real_fft -from numpy.fft import rfft2 as real_fft2d -from numpy.fft import rfftn as real_fftnd diff --git a/numpy-1.6.2/numpy/oldnumeric/fix_default_axis.py b/numpy-1.6.2/numpy/oldnumeric/fix_default_axis.py deleted file mode 100644 index 8483de85e5..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/fix_default_axis.py +++ /dev/null @@ -1,291 +0,0 @@ -""" -This module adds the default axis argument to code which did not specify it -for the functions where the default was changed in NumPy. - -The functions changed are - -add -1 ( all second argument) -====== -nansum -nanmax -nanmin -nanargmax -nanargmin -argmax -argmin -compress 3 - - -add 0 -====== -take 3 -repeat 3 -sum # might cause problems with builtin. -product -sometrue -alltrue -cumsum -cumproduct -average -ptp -cumprod -prod -std -mean -""" -__all__ = ['convertfile', 'convertall', 'converttree'] - -import sys -import os -import re -import glob - - -_args3 = ['compress', 'take', 'repeat'] -_funcm1 = ['nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', - 'argmax', 'argmin', 'compress'] -_func0 = ['take', 'repeat', 'sum', 'product', 'sometrue', 'alltrue', - 'cumsum', 'cumproduct', 'average', 'ptp', 'cumprod', 'prod', - 'std', 'mean'] - -_all = _func0 + _funcm1 -func_re = {} - -for name in _all: - _astr = r"""%s\s*[(]"""%name - func_re[name] = re.compile(_astr) - - -import string -disallowed = '_' + string.uppercase + string.lowercase + string.digits - -def _add_axis(fstr, name, repl): - alter = 0 - if name in _args3: - allowed_comma = 1 - else: - allowed_comma = 0 - newcode = "" - last = 0 - for obj in func_re[name].finditer(fstr): - nochange = 0 - start, end = obj.span() - if fstr[start-1] in disallowed: - continue - if fstr[start-1] == '.' \ - and fstr[start-6:start-1] != 'numpy' \ - and fstr[start-2:start-1] != 'N' \ - and fstr[start-9:start-1] != 'numarray' \ - and fstr[start-8:start-1] != 'numerix' \ - and fstr[start-8:start-1] != 'Numeric': - continue - if fstr[start-1] in ['\t',' ']: - k = start-2 - while fstr[k] in ['\t',' ']: - k -= 1 - if fstr[k-2:k+1] == 'def' or \ - fstr[k-4:k+1] == 'class': - continue - k = end - stack = 1 - ncommas = 0 - N = len(fstr) - while stack: - if k>=N: - nochange =1 - break - if fstr[k] == ')': - stack -= 1 - elif fstr[k] == '(': - stack += 1 - elif stack == 1 and fstr[k] == ',': - ncommas += 1 - if ncommas > allowed_comma: - nochange = 1 - break - k += 1 - if nochange: - continue - alter += 1 - newcode = "%s%s,%s)" % (newcode, fstr[last:k-1], repl) - last = k - if not alter: - newcode = fstr - else: - newcode = "%s%s" % (newcode, fstr[last:]) - return newcode, alter - -def _import_change(fstr, names): - # Four possibilities - # 1.) import numpy with subsequent use of numpy. - # change this to import numpy.oldnumeric as numpy - # 2.) import numpy as XXXX with subsequent use of - # XXXX. ==> import numpy.oldnumeric as XXXX - # 3.) from numpy import * - # with subsequent use of one of the names - # 4.) from numpy import ..., , ... (could span multiple - # lines. ==> remove all names from list and - # add from numpy.oldnumeric import - - num = 0 - # case 1 - importstr = "import numpy" - ind = fstr.find(importstr) - if (ind > 0): - found = 0 - for name in names: - ind2 = fstr.find("numpy.%s" % name, ind) - if (ind2 > 0): - found = 1 - break - if found: - fstr = "%s%s%s" % (fstr[:ind], "import numpy.oldnumeric as numpy", - fstr[ind+len(importstr):]) - num += 1 - - # case 2 - importre = re.compile("""import numpy as ([A-Za-z0-9_]+)""") - modules = importre.findall(fstr) - if len(modules) > 0: - for module in modules: - found = 0 - for name in names: - ind2 = fstr.find("%s.%s" % (module, name)) - if (ind2 > 0): - found = 1 - break - if found: - importstr = "import numpy as %s" % module - ind = fstr.find(importstr) - fstr = "%s%s%s" % (fstr[:ind], - "import numpy.oldnumeric as %s" % module, - fstr[ind+len(importstr):]) - num += 1 - - # case 3 - importstr = "from numpy import *" - ind = fstr.find(importstr) - if (ind > 0): - found = 0 - for name in names: - ind2 = fstr.find(name, ind) - if (ind2 > 0) and fstr[ind2-1] not in disallowed: - found = 1 - break - if found: - fstr = "%s%s%s" % (fstr[:ind], - "from numpy.oldnumeric import *", - fstr[ind+len(importstr):]) - num += 1 - - # case 4 - ind = 0 - importstr = "from numpy import" - N = len(importstr) - while 1: - ind = fstr.find(importstr, ind) - if (ind < 0): - break - ind += N - ptr = ind+1 - stack = 1 - while stack: - if fstr[ptr] == '\\': - stack += 1 - elif fstr[ptr] == '\n': - stack -= 1 - ptr += 1 - substr = fstr[ind:ptr] - found = 0 - substr = substr.replace('\n',' ') - substr = substr.replace('\\','') - importnames = [x.strip() for x in substr.split(',')] - # determine if any of names are in importnames - addnames = [] - for name in names: - if name in importnames: - importnames.remove(name) - addnames.append(name) - if len(addnames) > 0: - fstr = "%s%s\n%s\n%s" % \ - (fstr[:ind], - "from numpy import %s" % \ - ", ".join(importnames), - "from numpy.oldnumeric import %s" % \ - ", ".join(addnames), - fstr[ptr:]) - num += 1 - - return fstr, num - -def add_axis(fstr, import_change=False): - total = 0 - if not import_change: - for name in _funcm1: - fstr, num = _add_axis(fstr, name, 'axis=-1') - total += num - for name in _func0: - fstr, num = _add_axis(fstr, name, 'axis=0') - total += num - return fstr, total - else: - fstr, num = _import_change(fstr, _funcm1+_func0) - return fstr, num - - -def makenewfile(name, filestr): - fid = file(name, 'w') - fid.write(filestr) - fid.close() - -def getfile(name): - fid = file(name) - filestr = fid.read() - fid.close() - return filestr - -def copyfile(name, fstr): - base, ext = os.path.splitext(name) - makenewfile(base+'.orig', fstr) - return - -def convertfile(filename, import_change=False): - """Convert the filename given from using Numeric to using NumPy - - Copies the file to filename.orig and then over-writes the file - with the updated code - """ - filestr = getfile(filename) - newstr, total = add_axis(filestr, import_change) - if total > 0: - print "Changing ", filename - copyfile(filename, filestr) - makenewfile(filename, newstr) - sys.stdout.flush() - -def fromargs(args): - filename = args[1] - convertfile(filename) - -def convertall(direc=os.path.curdir, import_change=False): - """Convert all .py files in the directory given - - For each file, a backup of .py is made as - .py.orig. A new file named .py - is then written with the updated code. - """ - files = glob.glob(os.path.join(direc,'*.py')) - for afile in files: - convertfile(afile, import_change) - -def _func(arg, dirname, fnames): - convertall(dirname, import_change=arg) - -def converttree(direc=os.path.curdir, import_change=False): - """Convert all .py files in the tree given - - """ - os.path.walk(direc, _func, import_change) - -if __name__ == '__main__': - fromargs(sys.argv) diff --git a/numpy-1.6.2/numpy/oldnumeric/functions.py b/numpy-1.6.2/numpy/oldnumeric/functions.py deleted file mode 100644 index 5b2b1a8bfd..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/functions.py +++ /dev/null @@ -1,124 +0,0 @@ -# Functions that should behave the same as Numeric and need changing - -import numpy as np -import numpy.core.multiarray as mu -import numpy.core.numeric as nn -from typeconv import convtypecode, convtypecode2 - -__all__ = ['take', 'repeat', 'sum', 'product', 'sometrue', 'alltrue', - 'cumsum', 'cumproduct', 'compress', 'fromfunction', - 'ones', 'empty', 'identity', 'zeros', 'array', 'asarray', - 'nonzero', 'reshape', 'arange', 'fromstring', 'ravel', 'trace', - 'indices', 'where','sarray','cross_product', 'argmax', 'argmin', - 'average'] - -def take(a, indicies, axis=0): - return np.take(a, indicies, axis) - -def repeat(a, repeats, axis=0): - return np.repeat(a, repeats, axis) - -def sum(x, axis=0): - return np.sum(x, axis) - -def product(x, axis=0): - return np.product(x, axis) - -def sometrue(x, axis=0): - return np.sometrue(x, axis) - -def alltrue(x, axis=0): - return np.alltrue(x, axis) - -def cumsum(x, axis=0): - return np.cumsum(x, axis) - -def cumproduct(x, axis=0): - return np.cumproduct(x, axis) - -def argmax(x, axis=-1): - return np.argmax(x, axis) - -def argmin(x, axis=-1): - return np.argmin(x, axis) - -def compress(condition, m, axis=-1): - return np.compress(condition, m, axis) - -def fromfunction(args, dimensions): - return np.fromfunction(args, dimensions, dtype=int) - -def ones(shape, typecode='l', savespace=0, dtype=None): - """ones(shape, dtype=int) returns an array of the given - dimensions which is initialized to all ones. - """ - dtype = convtypecode(typecode,dtype) - a = mu.empty(shape, dtype) - a.fill(1) - return a - -def zeros(shape, typecode='l', savespace=0, dtype=None): - """zeros(shape, dtype=int) returns an array of the given - dimensions which is initialized to all zeros - """ - dtype = convtypecode(typecode,dtype) - return mu.zeros(shape, dtype) - -def identity(n,typecode='l', dtype=None): - """identity(n) returns the identity 2-d array of shape n x n. - """ - dtype = convtypecode(typecode, dtype) - return nn.identity(n, dtype) - -def empty(shape, typecode='l', dtype=None): - dtype = convtypecode(typecode, dtype) - return mu.empty(shape, dtype) - -def array(sequence, typecode=None, copy=1, savespace=0, dtype=None): - dtype = convtypecode2(typecode, dtype) - return mu.array(sequence, dtype, copy=copy) - -def sarray(a, typecode=None, copy=False, dtype=None): - dtype = convtypecode2(typecode, dtype) - return mu.array(a, dtype, copy) - -def asarray(a, typecode=None, dtype=None): - dtype = convtypecode2(typecode, dtype) - return mu.array(a, dtype, copy=0) - -def nonzero(a): - res = np.nonzero(a) - if len(res) == 1: - return res[0] - else: - raise ValueError, "Input argument must be 1d" - -def reshape(a, shape): - return np.reshape(a, shape) - -def arange(start, stop=None, step=1, typecode=None, dtype=None): - dtype = convtypecode2(typecode, dtype) - return mu.arange(start, stop, step, dtype) - -def fromstring(string, typecode='l', count=-1, dtype=None): - dtype = convtypecode(typecode, dtype) - return mu.fromstring(string, dtype, count=count) - -def ravel(m): - return np.ravel(m) - -def trace(a, offset=0, axis1=0, axis2=1): - return np.trace(a, offset=0, axis1=0, axis2=1) - -def indices(dimensions, typecode=None, dtype=None): - dtype = convtypecode(typecode, dtype) - return np.indices(dimensions, dtype) - -def where(condition, x, y): - return np.where(condition, x, y) - -def cross_product(a, b, axis1=-1, axis2=-1): - return np.cross(a, b, axis1, axis2) - -def average(a, axis=0, weights=None, returned=False): - return np.average(a, axis, weights, returned) diff --git a/numpy-1.6.2/numpy/oldnumeric/linear_algebra.py b/numpy-1.6.2/numpy/oldnumeric/linear_algebra.py deleted file mode 100644 index 2e7a264fe1..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/linear_algebra.py +++ /dev/null @@ -1,83 +0,0 @@ -"""Backward compatible with LinearAlgebra from Numeric -""" -# This module is a lite version of the linalg.py module in SciPy which contains -# high-level Python interface to the LAPACK library. The lite version -# only accesses the following LAPACK functions: dgesv, zgesv, dgeev, -# zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, dpotrf. - - -__all__ = ['LinAlgError', 'solve_linear_equations', - 'inverse', 'cholesky_decomposition', 'eigenvalues', - 'Heigenvalues', 'generalized_inverse', - 'determinant', 'singular_value_decomposition', - 'eigenvectors', 'Heigenvectors', - 'linear_least_squares' - ] - -from numpy.core import transpose -import numpy.linalg as linalg - -# Linear equations - -LinAlgError = linalg.LinAlgError - -def solve_linear_equations(a, b): - return linalg.solve(a,b) - -# Matrix inversion - -def inverse(a): - return linalg.inv(a) - -# Cholesky decomposition - -def cholesky_decomposition(a): - return linalg.cholesky(a) - -# Eigenvalues - -def eigenvalues(a): - return linalg.eigvals(a) - -def Heigenvalues(a, UPLO='L'): - return linalg.eigvalsh(a,UPLO) - -# Eigenvectors - -def eigenvectors(A): - w, v = linalg.eig(A) - return w, transpose(v) - -def Heigenvectors(A): - w, v = linalg.eigh(A) - return w, transpose(v) - -# Generalized inverse - -def generalized_inverse(a, rcond = 1.e-10): - return linalg.pinv(a, rcond) - -# Determinant - -def determinant(a): - return linalg.det(a) - -# Linear Least Squares - -def linear_least_squares(a, b, rcond=1.e-10): - """returns x,resids,rank,s -where x minimizes 2-norm(|b - Ax|) - resids is the sum square residuals - rank is the rank of A - s is the rank of the singular values of A in descending order - -If b is a matrix then x is also a matrix with corresponding columns. -If the rank of A is less than the number of columns of A or greater than -the number of rows, then residuals will be returned as an empty array -otherwise resids = sum((b-dot(A,x)**2). -Singular values less than s[0]*rcond are treated as zero. -""" - return linalg.lstsq(a,b,rcond) - -def singular_value_decomposition(A, full_matrices=0): - return linalg.svd(A, full_matrices) diff --git a/numpy-1.6.2/numpy/oldnumeric/ma.py b/numpy-1.6.2/numpy/oldnumeric/ma.py deleted file mode 100644 index 1284c6019f..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/ma.py +++ /dev/null @@ -1,2270 +0,0 @@ -"""MA: a facility for dealing with missing observations -MA is generally used as a numpy.array look-alike. -by Paul F. Dubois. - -Copyright 1999, 2000, 2001 Regents of the University of California. -Released for unlimited redistribution. -Adapted for numpy_core 2005 by Travis Oliphant and -(mainly) Paul Dubois. - -""" -import types, sys - -import numpy.core.umath as umath -import numpy.core.fromnumeric as fromnumeric -from numpy.core.numeric import newaxis, ndarray, inf -from numpy.core.fromnumeric import amax, amin -from numpy.core.numerictypes import bool_, typecodes -import numpy.core.numeric as numeric -import warnings - -if sys.version_info[0] >= 3: - from functools import reduce - -# Ufunc domain lookup for __array_wrap__ -ufunc_domain = {} -# Ufunc fills lookup for __array__ -ufunc_fills = {} - -MaskType = bool_ -nomask = MaskType(0) -divide_tolerance = 1.e-35 - -class MAError (Exception): - def __init__ (self, args=None): - "Create an exception" - - # The .args attribute must be a tuple. - if not isinstance(args, tuple): - args = (args,) - self.args = args - def __str__(self): - "Calculate the string representation" - return str(self.args[0]) - __repr__ = __str__ - -class _MaskedPrintOption: - "One instance of this class, masked_print_option, is created." - def __init__ (self, display): - "Create the masked print option object." - self.set_display(display) - self._enabled = 1 - - def display (self): - "Show what prints for masked values." - return self._display - - def set_display (self, s): - "set_display(s) sets what prints for masked values." - self._display = s - - def enabled (self): - "Is the use of the display value enabled?" - return self._enabled - - def enable(self, flag=1): - "Set the enabling flag to flag." - self._enabled = flag - - def __str__ (self): - return str(self._display) - - __repr__ = __str__ - -#if you single index into a masked location you get this object. -masked_print_option = _MaskedPrintOption('--') - -# Use single element arrays or scalars. -default_real_fill_value = 1.e20 -default_complex_fill_value = 1.e20 + 0.0j -default_character_fill_value = '-' -default_integer_fill_value = 999999 -default_object_fill_value = '?' - -def default_fill_value (obj): - "Function to calculate default fill value for an object." - if isinstance(obj, types.FloatType): - return default_real_fill_value - elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType): - return default_integer_fill_value - elif isinstance(obj, types.StringType): - return default_character_fill_value - elif isinstance(obj, types.ComplexType): - return default_complex_fill_value - elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray): - x = obj.dtype.char - if x in typecodes['Float']: - return default_real_fill_value - if x in typecodes['Integer']: - return default_integer_fill_value - if x in typecodes['Complex']: - return default_complex_fill_value - if x in typecodes['Character']: - return default_character_fill_value - if x in typecodes['UnsignedInteger']: - return umath.absolute(default_integer_fill_value) - return default_object_fill_value - else: - return default_object_fill_value - -def minimum_fill_value (obj): - "Function to calculate default fill value suitable for taking minima." - if isinstance(obj, types.FloatType): - return numeric.inf - elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType): - return sys.maxint - elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray): - x = obj.dtype.char - if x in typecodes['Float']: - return numeric.inf - if x in typecodes['Integer']: - return sys.maxint - if x in typecodes['UnsignedInteger']: - return sys.maxint - else: - raise TypeError, 'Unsuitable type for calculating minimum.' - -def maximum_fill_value (obj): - "Function to calculate default fill value suitable for taking maxima." - if isinstance(obj, types.FloatType): - return -inf - elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType): - return -sys.maxint - elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray): - x = obj.dtype.char - if x in typecodes['Float']: - return -inf - if x in typecodes['Integer']: - return -sys.maxint - if x in typecodes['UnsignedInteger']: - return 0 - else: - raise TypeError, 'Unsuitable type for calculating maximum.' - -def set_fill_value (a, fill_value): - "Set fill value of a if it is a masked array." - if isMaskedArray(a): - a.set_fill_value (fill_value) - -def getmask (a): - """Mask of values in a; could be nomask. - Returns nomask if a is not a masked array. - To get an array for sure use getmaskarray.""" - if isinstance(a, MaskedArray): - return a.raw_mask() - else: - return nomask - -def getmaskarray (a): - """Mask of values in a; an array of zeros if mask is nomask - or not a masked array, and is a byte-sized integer. - Do not try to add up entries, for example. - """ - m = getmask(a) - if m is nomask: - return make_mask_none(shape(a)) - else: - return m - -def is_mask (m): - """Is m a legal mask? Does not check contents, only type. - """ - try: - return m.dtype.type is MaskType - except AttributeError: - return False - -def make_mask (m, copy=0, flag=0): - """make_mask(m, copy=0, flag=0) - return m as a mask, creating a copy if necessary or requested. - Can accept any sequence of integers or nomask. Does not check - that contents must be 0s and 1s. - if flag, return nomask if m contains no true elements. - """ - if m is nomask: - return nomask - elif isinstance(m, ndarray): - if m.dtype.type is MaskType: - if copy: - result = numeric.array(m, dtype=MaskType, copy=copy) - else: - result = m - else: - result = m.astype(MaskType) - else: - result = filled(m, True).astype(MaskType) - - if flag and not fromnumeric.sometrue(fromnumeric.ravel(result)): - return nomask - else: - return result - -def make_mask_none (s): - "Return a mask of all zeros of shape s." - result = numeric.zeros(s, dtype=MaskType) - result.shape = s - return result - -def mask_or (m1, m2): - """Logical or of the mask candidates m1 and m2, treating nomask as false. - Result may equal m1 or m2 if the other is nomask. - """ - if m1 is nomask: return make_mask(m2) - if m2 is nomask: return make_mask(m1) - if m1 is m2 and is_mask(m1): return m1 - return make_mask(umath.logical_or(m1, m2)) - -def filled (a, value = None): - """a as a contiguous numeric array with any masked areas replaced by value - if value is None or the special element "masked", get_fill_value(a) - is used instead. - - If a is already a contiguous numeric array, a itself is returned. - - filled(a) can be used to be sure that the result is numeric when - passing an object a to other software ignorant of MA, in particular to - numeric itself. - """ - if isinstance(a, MaskedArray): - return a.filled(value) - elif isinstance(a, ndarray) and a.flags['CONTIGUOUS']: - return a - elif isinstance(a, types.DictType): - return numeric.array(a, 'O') - else: - return numeric.array(a) - -def get_fill_value (a): - """ - The fill value of a, if it has one; otherwise, the default fill value - for that type. - """ - if isMaskedArray(a): - result = a.fill_value() - else: - result = default_fill_value(a) - return result - -def common_fill_value (a, b): - "The common fill_value of a and b, if there is one, or None" - t1 = get_fill_value(a) - t2 = get_fill_value(b) - if t1 == t2: return t1 - return None - -# Domain functions return 1 where the argument(s) are not in the domain. -class domain_check_interval: - "domain_check_interval(a,b)(x) = true where x < a or y > b" - def __init__(self, y1, y2): - "domain_check_interval(a,b)(x) = true where x < a or y > b" - self.y1 = y1 - self.y2 = y2 - - def __call__ (self, x): - "Execute the call behavior." - return umath.logical_or(umath.greater (x, self.y2), - umath.less(x, self.y1) - ) - -class domain_tan: - "domain_tan(eps) = true where abs(cos(x)) < eps)" - def __init__(self, eps): - "domain_tan(eps) = true where abs(cos(x)) < eps)" - self.eps = eps - - def __call__ (self, x): - "Execute the call behavior." - return umath.less(umath.absolute(umath.cos(x)), self.eps) - -class domain_greater: - "domain_greater(v)(x) = true where x <= v" - def __init__(self, critical_value): - "domain_greater(v)(x) = true where x <= v" - self.critical_value = critical_value - - def __call__ (self, x): - "Execute the call behavior." - return umath.less_equal (x, self.critical_value) - -class domain_greater_equal: - "domain_greater_equal(v)(x) = true where x < v" - def __init__(self, critical_value): - "domain_greater_equal(v)(x) = true where x < v" - self.critical_value = critical_value - - def __call__ (self, x): - "Execute the call behavior." - return umath.less (x, self.critical_value) - -class masked_unary_operation: - def __init__ (self, aufunc, fill=0, domain=None): - """ masked_unary_operation(aufunc, fill=0, domain=None) - aufunc(fill) must be defined - self(x) returns aufunc(x) - with masked values where domain(x) is true or getmask(x) is true. - """ - self.f = aufunc - self.fill = fill - self.domain = domain - self.__doc__ = getattr(aufunc, "__doc__", str(aufunc)) - self.__name__ = getattr(aufunc, "__name__", str(aufunc)) - ufunc_domain[aufunc] = domain - ufunc_fills[aufunc] = fill, - - def __call__ (self, a, *args, **kwargs): - "Execute the call behavior." -# numeric tries to return scalars rather than arrays when given scalars. - m = getmask(a) - d1 = filled(a, self.fill) - if self.domain is not None: - m = mask_or(m, self.domain(d1)) - result = self.f(d1, *args, **kwargs) - return masked_array(result, m) - - def __str__ (self): - return "Masked version of " + str(self.f) - - -class domain_safe_divide: - def __init__ (self, tolerance=divide_tolerance): - self.tolerance = tolerance - def __call__ (self, a, b): - return umath.absolute(a) * self.tolerance >= umath.absolute(b) - -class domained_binary_operation: - """Binary operations that have a domain, like divide. These are complicated - so they are a separate class. They have no reduce, outer or accumulate. - """ - def __init__ (self, abfunc, domain, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = abfunc - self.domain = domain - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(abfunc, "__doc__", str(abfunc)) - self.__name__ = getattr(abfunc, "__name__", str(abfunc)) - ufunc_domain[abfunc] = domain - ufunc_fills[abfunc] = fillx, filly - - def __call__(self, a, b): - "Execute the call behavior." - ma = getmask(a) - mb = getmask(b) - d1 = filled(a, self.fillx) - d2 = filled(b, self.filly) - t = self.domain(d1, d2) - - if fromnumeric.sometrue(t, None): - d2 = where(t, self.filly, d2) - mb = mask_or(mb, t) - m = mask_or(ma, mb) - result = self.f(d1, d2) - return masked_array(result, m) - - def __str__ (self): - return "Masked version of " + str(self.f) - -class masked_binary_operation: - def __init__ (self, abfunc, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = abfunc - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(abfunc, "__doc__", str(abfunc)) - ufunc_domain[abfunc] = None - ufunc_fills[abfunc] = fillx, filly - - def __call__ (self, a, b, *args, **kwargs): - "Execute the call behavior." - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a, self.fillx) - d2 = filled(b, self.filly) - result = self.f(d1, d2, *args, **kwargs) - if isinstance(result, ndarray) \ - and m.ndim != 0 \ - and m.shape != result.shape: - m = mask_or(getmaskarray(a), getmaskarray(b)) - return masked_array(result, m) - - def reduce (self, target, axis=0, dtype=None): - """Reduce target along the given axis with this function.""" - m = getmask(target) - t = filled(target, self.filly) - if t.shape == (): - t = t.reshape(1) - if m is not nomask: - m = make_mask(m, copy=1) - m.shape = (1,) - if m is nomask: - t = self.f.reduce(t, axis) - else: - t = masked_array (t, m) - # XXX: "or t.dtype" below is a workaround for what appears - # XXX: to be a bug in reduce. - t = self.f.reduce(filled(t, self.filly), axis, - dtype=dtype or t.dtype) - m = umath.logical_and.reduce(m, axis) - if isinstance(t, ndarray): - return masked_array(t, m, get_fill_value(target)) - elif m: - return masked - else: - return t - - def outer (self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - d = self.f.outer(filled(a, self.fillx), filled(b, self.filly)) - return masked_array(d, m) - - def accumulate (self, target, axis=0): - """Accumulate target along axis after filling with y fill value.""" - t = filled(target, self.filly) - return masked_array (self.f.accumulate (t, axis)) - def __str__ (self): - return "Masked version of " + str(self.f) - -sqrt = masked_unary_operation(umath.sqrt, 0.0, domain_greater_equal(0.0)) -log = masked_unary_operation(umath.log, 1.0, domain_greater(0.0)) -log10 = masked_unary_operation(umath.log10, 1.0, domain_greater(0.0)) -exp = masked_unary_operation(umath.exp) -conjugate = masked_unary_operation(umath.conjugate) -sin = masked_unary_operation(umath.sin) -cos = masked_unary_operation(umath.cos) -tan = masked_unary_operation(umath.tan, 0.0, domain_tan(1.e-35)) -arcsin = masked_unary_operation(umath.arcsin, 0.0, domain_check_interval(-1.0, 1.0)) -arccos = masked_unary_operation(umath.arccos, 0.0, domain_check_interval(-1.0, 1.0)) -arctan = masked_unary_operation(umath.arctan) -# Missing from numeric -arcsinh = masked_unary_operation(umath.arcsinh) -arccosh = masked_unary_operation(umath.arccosh, 1.0, domain_greater_equal(1.0)) -arctanh = masked_unary_operation(umath.arctanh, 0.0, domain_check_interval(-1.0+1e-15, 1.0-1e-15)) -sinh = masked_unary_operation(umath.sinh) -cosh = masked_unary_operation(umath.cosh) -tanh = masked_unary_operation(umath.tanh) -absolute = masked_unary_operation(umath.absolute) -fabs = masked_unary_operation(umath.fabs) -negative = masked_unary_operation(umath.negative) - -def nonzero(a): - """returns the indices of the elements of a which are not zero - and not masked - """ - return numeric.asarray(filled(a, 0).nonzero()) - -around = masked_unary_operation(fromnumeric.round_) -floor = masked_unary_operation(umath.floor) -ceil = masked_unary_operation(umath.ceil) -logical_not = masked_unary_operation(umath.logical_not) - -add = masked_binary_operation(umath.add) -subtract = masked_binary_operation(umath.subtract) -subtract.reduce = None -multiply = masked_binary_operation(umath.multiply, 1, 1) -divide = domained_binary_operation(umath.divide, domain_safe_divide(), 0, 1) -true_divide = domained_binary_operation(umath.true_divide, domain_safe_divide(), 0, 1) -floor_divide = domained_binary_operation(umath.floor_divide, domain_safe_divide(), 0, 1) -remainder = domained_binary_operation(umath.remainder, domain_safe_divide(), 0, 1) -fmod = domained_binary_operation(umath.fmod, domain_safe_divide(), 0, 1) -hypot = masked_binary_operation(umath.hypot) -arctan2 = masked_binary_operation(umath.arctan2, 0.0, 1.0) -arctan2.reduce = None -equal = masked_binary_operation(umath.equal) -equal.reduce = None -not_equal = masked_binary_operation(umath.not_equal) -not_equal.reduce = None -less_equal = masked_binary_operation(umath.less_equal) -less_equal.reduce = None -greater_equal = masked_binary_operation(umath.greater_equal) -greater_equal.reduce = None -less = masked_binary_operation(umath.less) -less.reduce = None -greater = masked_binary_operation(umath.greater) -greater.reduce = None -logical_and = masked_binary_operation(umath.logical_and) -alltrue = masked_binary_operation(umath.logical_and, 1, 1).reduce -logical_or = masked_binary_operation(umath.logical_or) -sometrue = logical_or.reduce -logical_xor = masked_binary_operation(umath.logical_xor) -bitwise_and = masked_binary_operation(umath.bitwise_and) -bitwise_or = masked_binary_operation(umath.bitwise_or) -bitwise_xor = masked_binary_operation(umath.bitwise_xor) - -def rank (object): - return fromnumeric.rank(filled(object)) - -def shape (object): - return fromnumeric.shape(filled(object)) - -def size (object, axis=None): - return fromnumeric.size(filled(object), axis) - -class MaskedArray (object): - """Arrays with possibly masked values. - Masked values of 1 exclude the corresponding element from - any computation. - - Construction: - x = array(data, dtype=None, copy=True, order=False, - mask = nomask, fill_value=None) - - If copy=False, every effort is made not to copy the data: - If data is a MaskedArray, and argument mask=nomask, - then the candidate data is data.data and the - mask used is data.mask. If data is a numeric array, - it is used as the candidate raw data. - If dtype is not None and - is != data.dtype.char then a data copy is required. - Otherwise, the candidate is used. - - If a data copy is required, raw data stored is the result of: - numeric.array(data, dtype=dtype.char, copy=copy) - - If mask is nomask there are no masked values. Otherwise mask must - be convertible to an array of booleans with the same shape as x. - - fill_value is used to fill in masked values when necessary, - such as when printing and in method/function filled(). - The fill_value is not used for computation within this module. - """ - __array_priority__ = 10.1 - def __init__(self, data, dtype=None, copy=True, order=False, - mask=nomask, fill_value=None): - """array(data, dtype=None, copy=True, order=False, mask=nomask, fill_value=None) - If data already a numeric array, its dtype becomes the default value of dtype. - """ - if dtype is None: - tc = None - else: - tc = numeric.dtype(dtype) - need_data_copied = copy - if isinstance(data, MaskedArray): - c = data.data - if tc is None: - tc = c.dtype - elif tc != c.dtype: - need_data_copied = True - if mask is nomask: - mask = data.mask - elif mask is not nomask: #attempting to change the mask - need_data_copied = True - - elif isinstance(data, ndarray): - c = data - if tc is None: - tc = c.dtype - elif tc != c.dtype: - need_data_copied = True - else: - need_data_copied = False #because I'll do it now - c = numeric.array(data, dtype=tc, copy=True, order=order) - tc = c.dtype - - if need_data_copied: - if tc == c.dtype: - self._data = numeric.array(c, dtype=tc, copy=True, order=order) - else: - self._data = c.astype(tc) - else: - self._data = c - - if mask is nomask: - self._mask = nomask - self._shared_mask = 0 - else: - self._mask = make_mask (mask) - if self._mask is nomask: - self._shared_mask = 0 - else: - self._shared_mask = (self._mask is mask) - nm = size(self._mask) - nd = size(self._data) - if nm != nd: - if nm == 1: - self._mask = fromnumeric.resize(self._mask, self._data.shape) - self._shared_mask = 0 - elif nd == 1: - self._data = fromnumeric.resize(self._data, self._mask.shape) - self._data.shape = self._mask.shape - else: - raise MAError, "Mask and data not compatible." - elif nm == 1 and shape(self._mask) != shape(self._data): - self.unshare_mask() - self._mask.shape = self._data.shape - - self.set_fill_value(fill_value) - - def __array__ (self, t=None, context=None): - "Special hook for numeric. Converts to numeric if possible." - if self._mask is not nomask: - if fromnumeric.ravel(self._mask).any(): - if context is None: - warnings.warn("Cannot automatically convert masked array to "\ - "numeric because data\n is masked in one or "\ - "more locations."); - return self._data - #raise MAError, \ - # """Cannot automatically convert masked array to numeric because data - # is masked in one or more locations. - # """ - else: - func, args, i = context - fills = ufunc_fills.get(func) - if fills is None: - raise MAError, "%s not known to ma" % func - return self.filled(fills[i]) - else: # Mask is all false - # Optimize to avoid future invocations of this section. - self._mask = nomask - self._shared_mask = 0 - if t: - return self._data.astype(t) - else: - return self._data - - def __array_wrap__ (self, array, context=None): - """Special hook for ufuncs. - - Wraps the numpy array and sets the mask according to - context. - """ - if context is None: - return MaskedArray(array, copy=False, mask=nomask) - func, args = context[:2] - domain = ufunc_domain[func] - m = reduce(mask_or, [getmask(a) for a in args]) - if domain is not None: - m = mask_or(m, domain(*[getattr(a, '_data', a) - for a in args])) - if m is not nomask: - try: - shape = array.shape - except AttributeError: - pass - else: - if m.shape != shape: - m = reduce(mask_or, [getmaskarray(a) for a in args]) - - return MaskedArray(array, copy=False, mask=m) - - def _get_shape(self): - "Return the current shape." - return self._data.shape - - def _set_shape (self, newshape): - "Set the array's shape." - self._data.shape = newshape - if self._mask is not nomask: - self._mask = self._mask.copy() - self._mask.shape = newshape - - def _get_flat(self): - """Calculate the flat value. - """ - if self._mask is nomask: - return masked_array(self._data.ravel(), mask=nomask, - fill_value = self.fill_value()) - else: - return masked_array(self._data.ravel(), - mask=self._mask.ravel(), - fill_value = self.fill_value()) - - def _set_flat (self, value): - "x.flat = value" - y = self.ravel() - y[:] = value - - def _get_real(self): - "Get the real part of a complex array." - if self._mask is nomask: - return masked_array(self._data.real, mask=nomask, - fill_value = self.fill_value()) - else: - return masked_array(self._data.real, mask=self._mask, - fill_value = self.fill_value()) - - def _set_real (self, value): - "x.real = value" - y = self.real - y[...] = value - - def _get_imaginary(self): - "Get the imaginary part of a complex array." - if self._mask is nomask: - return masked_array(self._data.imag, mask=nomask, - fill_value = self.fill_value()) - else: - return masked_array(self._data.imag, mask=self._mask, - fill_value = self.fill_value()) - - def _set_imaginary (self, value): - "x.imaginary = value" - y = self.imaginary - y[...] = value - - def __str__(self): - """Calculate the str representation, using masked for fill if - it is enabled. Otherwise fill with fill value. - """ - if masked_print_option.enabled(): - f = masked_print_option - # XXX: Without the following special case masked - # XXX: would print as "[--]", not "--". Can we avoid - # XXX: checks for masked by choosing a different value - # XXX: for the masked singleton? 2005-01-05 -- sasha - if self is masked: - return str(f) - m = self._mask - if m is not nomask and m.shape == () and m: - return str(f) - # convert to object array to make filled work - self = self.astype(object) - else: - f = self.fill_value() - res = self.filled(f) - return str(res) - - def __repr__(self): - """Calculate the repr representation, using masked for fill if - it is enabled. Otherwise fill with fill value. - """ - with_mask = """\ -array(data = - %(data)s, - mask = - %(mask)s, - fill_value=%(fill)s) -""" - with_mask1 = """\ -array(data = %(data)s, - mask = %(mask)s, - fill_value=%(fill)s) -""" - without_mask = """array( - %(data)s)""" - without_mask1 = """array(%(data)s)""" - - n = len(self.shape) - if self._mask is nomask: - if n <= 1: - return without_mask1 % {'data':str(self.filled())} - return without_mask % {'data':str(self.filled())} - else: - if n <= 1: - return with_mask % { - 'data': str(self.filled()), - 'mask': str(self._mask), - 'fill': str(self.fill_value()) - } - return with_mask % { - 'data': str(self.filled()), - 'mask': str(self._mask), - 'fill': str(self.fill_value()) - } - without_mask1 = """array(%(data)s)""" - if self._mask is nomask: - return without_mask % {'data':str(self.filled())} - else: - return with_mask % { - 'data': str(self.filled()), - 'mask': str(self._mask), - 'fill': str(self.fill_value()) - } - - def __float__(self): - "Convert self to float." - self.unmask() - if self._mask is not nomask: - raise MAError, 'Cannot convert masked element to a Python float.' - return float(self.data.item()) - - def __int__(self): - "Convert self to int." - self.unmask() - if self._mask is not nomask: - raise MAError, 'Cannot convert masked element to a Python int.' - return int(self.data.item()) - - def __getitem__(self, i): - "Get item described by i. Not a copy as in previous versions." - self.unshare_mask() - m = self._mask - dout = self._data[i] - if m is nomask: - try: - if dout.size == 1: - return dout - else: - return masked_array(dout, fill_value=self._fill_value) - except AttributeError: - return dout - mi = m[i] - if mi.size == 1: - if mi: - return masked - else: - return dout - else: - return masked_array(dout, mi, fill_value=self._fill_value) - -# -------- -# setitem and setslice notes -# note that if value is masked, it means to mask those locations. -# setting a value changes the mask to match the value in those locations. - - def __setitem__(self, index, value): - "Set item described by index. If value is masked, mask those locations." - d = self._data - if self is masked: - raise MAError, 'Cannot alter masked elements.' - if value is masked: - if self._mask is nomask: - self._mask = make_mask_none(d.shape) - self._shared_mask = False - else: - self.unshare_mask() - self._mask[index] = True - return - m = getmask(value) - value = filled(value).astype(d.dtype) - d[index] = value - if m is nomask: - if self._mask is not nomask: - self.unshare_mask() - self._mask[index] = False - else: - if self._mask is nomask: - self._mask = make_mask_none(d.shape) - self._shared_mask = True - else: - self.unshare_mask() - self._mask[index] = m - - def __nonzero__(self): - """returns true if any element is non-zero or masked - - """ - # XXX: This changes bool conversion logic from MA. - # XXX: In MA bool(a) == len(a) != 0, but in numpy - # XXX: scalars do not have len - m = self._mask - d = self._data - return bool(m is not nomask and m.any() - or d is not nomask and d.any()) - - def __len__ (self): - """Return length of first dimension. This is weird but Python's - slicing behavior depends on it.""" - return len(self._data) - - def __and__(self, other): - "Return bitwise_and" - return bitwise_and(self, other) - - def __or__(self, other): - "Return bitwise_or" - return bitwise_or(self, other) - - def __xor__(self, other): - "Return bitwise_xor" - return bitwise_xor(self, other) - - __rand__ = __and__ - __ror__ = __or__ - __rxor__ = __xor__ - - def __abs__(self): - "Return absolute(self)" - return absolute(self) - - def __neg__(self): - "Return negative(self)" - return negative(self) - - def __pos__(self): - "Return array(self)" - return array(self) - - def __add__(self, other): - "Return add(self, other)" - return add(self, other) - - __radd__ = __add__ - - def __mod__ (self, other): - "Return remainder(self, other)" - return remainder(self, other) - - def __rmod__ (self, other): - "Return remainder(other, self)" - return remainder(other, self) - - def __lshift__ (self, n): - return left_shift(self, n) - - def __rshift__ (self, n): - return right_shift(self, n) - - def __sub__(self, other): - "Return subtract(self, other)" - return subtract(self, other) - - def __rsub__(self, other): - "Return subtract(other, self)" - return subtract(other, self) - - def __mul__(self, other): - "Return multiply(self, other)" - return multiply(self, other) - - __rmul__ = __mul__ - - def __div__(self, other): - "Return divide(self, other)" - return divide(self, other) - - def __rdiv__(self, other): - "Return divide(other, self)" - return divide(other, self) - - def __truediv__(self, other): - "Return divide(self, other)" - return true_divide(self, other) - - def __rtruediv__(self, other): - "Return divide(other, self)" - return true_divide(other, self) - - def __floordiv__(self, other): - "Return divide(self, other)" - return floor_divide(self, other) - - def __rfloordiv__(self, other): - "Return divide(other, self)" - return floor_divide(other, self) - - def __pow__(self, other, third=None): - "Return power(self, other, third)" - return power(self, other, third) - - def __sqrt__(self): - "Return sqrt(self)" - return sqrt(self) - - def __iadd__(self, other): - "Add other to self in place." - t = self._data.dtype.char - f = filled(other, 0) - t1 = f.dtype.char - if t == t1: - pass - elif t in typecodes['Integer']: - if t1 in typecodes['Integer']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Float']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Complex']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - elif t1 in typecodes['Complex']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - else: - raise TypeError, 'Incorrect type for in-place operation.' - - if self._mask is nomask: - self._data += f - m = getmask(other) - self._mask = m - self._shared_mask = m is not nomask - else: - result = add(self, masked_array(f, mask=getmask(other))) - self._data = result.data - self._mask = result.mask - self._shared_mask = 1 - return self - - def __imul__(self, other): - "Add other to self in place." - t = self._data.dtype.char - f = filled(other, 0) - t1 = f.dtype.char - if t == t1: - pass - elif t in typecodes['Integer']: - if t1 in typecodes['Integer']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Float']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Complex']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - elif t1 in typecodes['Complex']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - else: - raise TypeError, 'Incorrect type for in-place operation.' - - if self._mask is nomask: - self._data *= f - m = getmask(other) - self._mask = m - self._shared_mask = m is not nomask - else: - result = multiply(self, masked_array(f, mask=getmask(other))) - self._data = result.data - self._mask = result.mask - self._shared_mask = 1 - return self - - def __isub__(self, other): - "Subtract other from self in place." - t = self._data.dtype.char - f = filled(other, 0) - t1 = f.dtype.char - if t == t1: - pass - elif t in typecodes['Integer']: - if t1 in typecodes['Integer']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Float']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Complex']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - elif t1 in typecodes['Complex']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - else: - raise TypeError, 'Incorrect type for in-place operation.' - - if self._mask is nomask: - self._data -= f - m = getmask(other) - self._mask = m - self._shared_mask = m is not nomask - else: - result = subtract(self, masked_array(f, mask=getmask(other))) - self._data = result.data - self._mask = result.mask - self._shared_mask = 1 - return self - - - - def __idiv__(self, other): - "Divide self by other in place." - t = self._data.dtype.char - f = filled(other, 0) - t1 = f.dtype.char - if t == t1: - pass - elif t in typecodes['Integer']: - if t1 in typecodes['Integer']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Float']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - elif t in typecodes['Complex']: - if t1 in typecodes['Integer']: - f = f.astype(t) - elif t1 in typecodes['Float']: - f = f.astype(t) - elif t1 in typecodes['Complex']: - f = f.astype(t) - else: - raise TypeError, 'Incorrect type for in-place operation.' - else: - raise TypeError, 'Incorrect type for in-place operation.' - mo = getmask(other) - result = divide(self, masked_array(f, mask=mo)) - self._data = result.data - dm = result.raw_mask() - if dm is not self._mask: - self._mask = dm - self._shared_mask = 1 - return self - - def __eq__(self, other): - return equal(self,other) - - def __ne__(self, other): - return not_equal(self,other) - - def __lt__(self, other): - return less(self,other) - - def __le__(self, other): - return less_equal(self,other) - - def __gt__(self, other): - return greater(self,other) - - def __ge__(self, other): - return greater_equal(self,other) - - def astype (self, tc): - "return self as array of given type." - d = self._data.astype(tc) - return array(d, mask=self._mask) - - def byte_swapped(self): - """Returns the raw data field, byte_swapped. Included for consistency - with numeric but doesn't make sense in this context. - """ - return self._data.byte_swapped() - - def compressed (self): - "A 1-D array of all the non-masked data." - d = fromnumeric.ravel(self._data) - if self._mask is nomask: - return array(d) - else: - m = 1 - fromnumeric.ravel(self._mask) - c = fromnumeric.compress(m, d) - return array(c, copy=0) - - def count (self, axis = None): - "Count of the non-masked elements in a, or along a certain axis." - m = self._mask - s = self._data.shape - ls = len(s) - if m is nomask: - if ls == 0: - return 1 - if ls == 1: - return s[0] - if axis is None: - return reduce(lambda x, y:x*y, s) - else: - n = s[axis] - t = list(s) - del t[axis] - return ones(t) * n - if axis is None: - w = fromnumeric.ravel(m).astype(int) - n1 = size(w) - if n1 == 1: - n2 = w[0] - else: - n2 = umath.add.reduce(w) - return n1 - n2 - else: - n1 = size(m, axis) - n2 = sum(m.astype(int), axis) - return n1 - n2 - - def dot (self, other): - "s.dot(other) = innerproduct(s, other)" - return innerproduct(self, other) - - def fill_value(self): - "Get the current fill value." - return self._fill_value - - def filled (self, fill_value=None): - """A numeric array with masked values filled. If fill_value is None, - use self.fill_value(). - - If mask is nomask, copy data only if not contiguous. - Result is always a contiguous, numeric array. -# Is contiguous really necessary now? - """ - d = self._data - m = self._mask - if m is nomask: - if d.flags['CONTIGUOUS']: - return d - else: - return d.copy() - else: - if fill_value is None: - value = self._fill_value - else: - value = fill_value - - if self is masked: - result = numeric.array(value) - else: - try: - result = numeric.array(d, dtype=d.dtype, copy=1) - result[m] = value - except (TypeError, AttributeError): - #ok, can't put that value in here - value = numeric.array(value, dtype=object) - d = d.astype(object) - result = fromnumeric.choose(m, (d, value)) - return result - - def ids (self): - """Return the ids of the data and mask areas""" - return (id(self._data), id(self._mask)) - - def iscontiguous (self): - "Is the data contiguous?" - return self._data.flags['CONTIGUOUS'] - - def itemsize(self): - "Item size of each data item." - return self._data.itemsize - - - def outer(self, other): - "s.outer(other) = outerproduct(s, other)" - return outerproduct(self, other) - - def put (self, values): - """Set the non-masked entries of self to filled(values). - No change to mask - """ - iota = numeric.arange(self.size) - d = self._data - if self._mask is nomask: - ind = iota - else: - ind = fromnumeric.compress(1 - self._mask, iota) - d[ind] = filled(values).astype(d.dtype) - - def putmask (self, values): - """Set the masked entries of self to filled(values). - Mask changed to nomask. - """ - d = self._data - if self._mask is not nomask: - d[self._mask] = filled(values).astype(d.dtype) - self._shared_mask = 0 - self._mask = nomask - - def ravel (self): - """Return a 1-D view of self.""" - if self._mask is nomask: - return masked_array(self._data.ravel()) - else: - return masked_array(self._data.ravel(), self._mask.ravel()) - - def raw_data (self): - """ Obsolete; use data property instead. - The raw data; portions may be meaningless. - May be noncontiguous. Expert use only.""" - return self._data - data = property(fget=raw_data, - doc="The data, but values at masked locations are meaningless.") - - def raw_mask (self): - """ Obsolete; use mask property instead. - May be noncontiguous. Expert use only. - """ - return self._mask - mask = property(fget=raw_mask, - doc="The mask, may be nomask. Values where mask true are meaningless.") - - def reshape (self, *s): - """This array reshaped to shape s""" - d = self._data.reshape(*s) - if self._mask is nomask: - return masked_array(d) - else: - m = self._mask.reshape(*s) - return masked_array(d, m) - - def set_fill_value (self, v=None): - "Set the fill value to v. Omit v to restore default." - if v is None: - v = default_fill_value (self.raw_data()) - self._fill_value = v - - def _get_ndim(self): - return self._data.ndim - ndim = property(_get_ndim, doc=numeric.ndarray.ndim.__doc__) - - def _get_size (self): - return self._data.size - size = property(fget=_get_size, doc="Number of elements in the array.") -## CHECK THIS: signature of numeric.array.size? - - def _get_dtype(self): - return self._data.dtype - dtype = property(fget=_get_dtype, doc="type of the array elements.") - - def item(self, *args): - "Return Python scalar if possible" - if self._mask is not nomask: - m = self._mask.item(*args) - try: - if m[0]: - return masked - except IndexError: - return masked - return self._data.item(*args) - - def itemset(self, *args): - "Set Python scalar into array" - item = args[-1] - args = args[:-1] - self[args] = item - - def tolist(self, fill_value=None): - "Convert to list" - return self.filled(fill_value).tolist() - - def tostring(self, fill_value=None): - "Convert to string" - return self.filled(fill_value).tostring() - - def unmask (self): - "Replace the mask by nomask if possible." - if self._mask is nomask: return - m = make_mask(self._mask, flag=1) - if m is nomask: - self._mask = nomask - self._shared_mask = 0 - - def unshare_mask (self): - "If currently sharing mask, make a copy." - if self._shared_mask: - self._mask = make_mask (self._mask, copy=1, flag=0) - self._shared_mask = 0 - - def _get_ctypes(self): - return self._data.ctypes - - def _get_T(self): - if (self.ndim < 2): - return self - return self.transpose() - - shape = property(_get_shape, _set_shape, - doc = 'tuple giving the shape of the array') - - flat = property(_get_flat, _set_flat, - doc = 'Access array in flat form.') - - real = property(_get_real, _set_real, - doc = 'Access the real part of the array') - - imaginary = property(_get_imaginary, _set_imaginary, - doc = 'Access the imaginary part of the array') - - imag = imaginary - - ctypes = property(_get_ctypes, None, doc="ctypes") - - T = property(_get_T, None, doc="get transpose") - -#end class MaskedArray - -array = MaskedArray - -def isMaskedArray (x): - "Is x a masked array, that is, an instance of MaskedArray?" - return isinstance(x, MaskedArray) - -isarray = isMaskedArray -isMA = isMaskedArray #backward compatibility - -def allclose (a, b, fill_value=1, rtol=1.e-5, atol=1.e-8): - """ Returns true if all components of a and b are equal - subject to given tolerances. - If fill_value is 1, masked values considered equal. - If fill_value is 0, masked values considered unequal. - The relative error rtol should be positive and << 1.0 - The absolute error atol comes into play for those elements - of b that are very small or zero; it says how small a must be also. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - x = filled(array(d1, copy=0, mask=m), fill_value).astype(float) - y = filled(array(d2, copy=0, mask=m), 1).astype(float) - d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) - return fromnumeric.alltrue(fromnumeric.ravel(d)) - -def allequal (a, b, fill_value=1): - """ - True if all entries of a and b are equal, using - fill_value as a truth value where either or both are masked. - """ - m = mask_or(getmask(a), getmask(b)) - if m is nomask: - x = filled(a) - y = filled(b) - d = umath.equal(x, y) - return fromnumeric.alltrue(fromnumeric.ravel(d)) - elif fill_value: - x = filled(a) - y = filled(b) - d = umath.equal(x, y) - dm = array(d, mask=m, copy=0) - return fromnumeric.alltrue(fromnumeric.ravel(filled(dm, 1))) - else: - return 0 - -def masked_values (data, value, rtol=1.e-5, atol=1.e-8, copy=1): - """ - masked_values(data, value, rtol=1.e-5, atol=1.e-8) - Create a masked array; mask is nomask if possible. - If copy==0, and otherwise possible, result - may share data values with original array. - Let d = filled(data, value). Returns d - masked where abs(data-value)<= atol + rtol * abs(value) - if d is of a floating point type. Otherwise returns - masked_object(d, value, copy) - """ - abs = umath.absolute - d = filled(data, value) - if issubclass(d.dtype.type, numeric.floating): - m = umath.less_equal(abs(d-value), atol+rtol*abs(value)) - m = make_mask(m, flag=1) - return array(d, mask = m, copy=copy, - fill_value=value) - else: - return masked_object(d, value, copy=copy) - -def masked_object (data, value, copy=1): - "Create array masked where exactly data equal to value" - d = filled(data, value) - dm = make_mask(umath.equal(d, value), flag=1) - return array(d, mask=dm, copy=copy, fill_value=value) - -def arange(start, stop=None, step=1, dtype=None): - """Just like range() except it returns a array whose type can be specified - by the keyword argument dtype. - """ - return array(numeric.arange(start, stop, step, dtype)) - -arrayrange = arange - -def fromstring (s, t): - "Construct a masked array from a string. Result will have no mask." - return masked_array(numeric.fromstring(s, t)) - -def left_shift (a, n): - "Left shift n bits" - m = getmask(a) - if m is nomask: - d = umath.left_shift(filled(a), n) - return masked_array(d) - else: - d = umath.left_shift(filled(a, 0), n) - return masked_array(d, m) - -def right_shift (a, n): - "Right shift n bits" - m = getmask(a) - if m is nomask: - d = umath.right_shift(filled(a), n) - return masked_array(d) - else: - d = umath.right_shift(filled(a, 0), n) - return masked_array(d, m) - -def resize (a, new_shape): - """resize(a, new_shape) returns a new array with the specified shape. - The original array's total size can be any size.""" - m = getmask(a) - if m is not nomask: - m = fromnumeric.resize(m, new_shape) - result = array(fromnumeric.resize(filled(a), new_shape), mask=m) - result.set_fill_value(get_fill_value(a)) - return result - -def new_repeat(a, repeats, axis=None): - """repeat elements of a repeats times along axis - repeats is a sequence of length a.shape[axis] - telling how many times to repeat each element. - """ - af = filled(a) - if isinstance(repeats, types.IntType): - if axis is None: - num = af.size - else: - num = af.shape[axis] - repeats = tuple([repeats]*num) - - m = getmask(a) - if m is not nomask: - m = fromnumeric.repeat(m, repeats, axis) - d = fromnumeric.repeat(af, repeats, axis) - result = masked_array(d, m) - result.set_fill_value(get_fill_value(a)) - return result - - - -def identity(n): - """identity(n) returns the identity matrix of shape n x n. - """ - return array(numeric.identity(n)) - -def indices (dimensions, dtype=None): - """indices(dimensions,dtype=None) returns an array representing a grid - of indices with row-only, and column-only variation. - """ - return array(numeric.indices(dimensions, dtype)) - -def zeros (shape, dtype=float): - """zeros(n, dtype=float) = - an array of all zeros of the given length or shape.""" - return array(numeric.zeros(shape, dtype)) - -def ones (shape, dtype=float): - """ones(n, dtype=float) = - an array of all ones of the given length or shape.""" - return array(numeric.ones(shape, dtype)) - -def count (a, axis = None): - "Count of the non-masked elements in a, or along a certain axis." - a = masked_array(a) - return a.count(axis) - -def power (a, b, third=None): - "a**b" - if third is not None: - raise MAError, "3-argument power not supported." - ma = getmask(a) - mb = getmask(b) - m = mask_or(ma, mb) - fa = filled(a, 1) - fb = filled(b, 1) - if fb.dtype.char in typecodes["Integer"]: - return masked_array(umath.power(fa, fb), m) - md = make_mask(umath.less(fa, 0), flag=1) - m = mask_or(m, md) - if m is nomask: - return masked_array(umath.power(fa, fb)) - else: - fa = numeric.where(m, 1, fa) - return masked_array(umath.power(fa, fb), m) - -def masked_array (a, mask=nomask, fill_value=None): - """masked_array(a, mask=nomask) = - array(a, mask=mask, copy=0, fill_value=fill_value) - """ - return array(a, mask=mask, copy=0, fill_value=fill_value) - -def sum (target, axis=None, dtype=None): - if axis is None: - target = ravel(target) - axis = 0 - return add.reduce(target, axis, dtype) - -def product (target, axis=None, dtype=None): - if axis is None: - target = ravel(target) - axis = 0 - return multiply.reduce(target, axis, dtype) - -def new_average (a, axis=None, weights=None, returned = 0): - """average(a, axis=None, weights=None) - Computes average along indicated axis. - If axis is None, average over the entire array - Inputs can be integer or floating types; result is of type float. - - If weights are given, result is sum(a*weights,axis=0)/(sum(weights,axis=0)*1.0) - weights must have a's shape or be the 1-d with length the size - of a in the given axis. - - If returned, return a tuple: the result and the sum of the weights - or count of values. Results will have the same shape. - - masked values in the weights will be set to 0.0 - """ - a = masked_array(a) - mask = a.mask - ash = a.shape - if ash == (): - ash = (1,) - if axis is None: - if mask is nomask: - if weights is None: - n = add.reduce(a.raw_data().ravel()) - d = reduce(lambda x, y: x * y, ash, 1.0) - else: - w = filled(weights, 0.0).ravel() - n = umath.add.reduce(a.raw_data().ravel() * w) - d = umath.add.reduce(w) - del w - else: - if weights is None: - n = add.reduce(a.ravel()) - w = fromnumeric.choose(mask, (1.0, 0.0)).ravel() - d = umath.add.reduce(w) - del w - else: - w = array(filled(weights, 0.0), float, mask=mask).ravel() - n = add.reduce(a.ravel() * w) - d = add.reduce(w) - del w - else: - if mask is nomask: - if weights is None: - d = ash[axis] * 1.0 - n = umath.add.reduce(a.raw_data(), axis) - else: - w = filled(weights, 0.0) - wsh = w.shape - if wsh == (): - wsh = (1,) - if wsh == ash: - w = numeric.array(w, float, copy=0) - n = add.reduce(a*w, axis) - d = add.reduce(w, axis) - del w - elif wsh == (ash[axis],): - r = [newaxis]*len(ash) - r[axis] = slice(None, None, 1) - w = eval ("w["+ repr(tuple(r)) + "] * ones(ash, float)") - n = add.reduce(a*w, axis) - d = add.reduce(w, axis) - del w, r - else: - raise ValueError, 'average: weights wrong shape.' - else: - if weights is None: - n = add.reduce(a, axis) - w = numeric.choose(mask, (1.0, 0.0)) - d = umath.add.reduce(w, axis) - del w - else: - w = filled(weights, 0.0) - wsh = w.shape - if wsh == (): - wsh = (1,) - if wsh == ash: - w = array(w, float, mask=mask, copy=0) - n = add.reduce(a*w, axis) - d = add.reduce(w, axis) - elif wsh == (ash[axis],): - r = [newaxis]*len(ash) - r[axis] = slice(None, None, 1) - w = eval ("w["+ repr(tuple(r)) + "] * masked_array(ones(ash, float), mask)") - n = add.reduce(a*w, axis) - d = add.reduce(w, axis) - else: - raise ValueError, 'average: weights wrong shape.' - del w - #print n, d, repr(mask), repr(weights) - if n is masked or d is masked: return masked - result = divide (n, d) - del n - - if isinstance(result, MaskedArray): - result.unmask() - if returned: - if not isinstance(d, MaskedArray): - d = masked_array(d) - if not d.shape == result.shape: - d = ones(result.shape, float) * d - d.unmask() - if returned: - return result, d - else: - return result - -def where (condition, x, y): - """where(condition, x, y) is x where condition is nonzero, y otherwise. - condition must be convertible to an integer array. - Answer is always the shape of condition. - The type depends on x and y. It is integer if both x and y are - the value masked. - """ - fc = filled(not_equal(condition, 0), 0) - xv = filled(x) - xm = getmask(x) - yv = filled(y) - ym = getmask(y) - d = numeric.choose(fc, (yv, xv)) - md = numeric.choose(fc, (ym, xm)) - m = getmask(condition) - m = make_mask(mask_or(m, md), copy=0, flag=1) - return masked_array(d, m) - -def choose (indices, t, out=None, mode='raise'): - "Returns array shaped like indices with elements chosen from t" - def fmask (x): - if x is masked: return 1 - return filled(x) - def nmask (x): - if x is masked: return 1 - m = getmask(x) - if m is nomask: return 0 - return m - c = filled(indices, 0) - masks = [nmask(x) for x in t] - a = [fmask(x) for x in t] - d = numeric.choose(c, a) - m = numeric.choose(c, masks) - m = make_mask(mask_or(m, getmask(indices)), copy=0, flag=1) - return masked_array(d, m) - -def masked_where(condition, x, copy=1): - """Return x as an array masked where condition is true. - Also masked where x or condition masked. - """ - cm = filled(condition,1) - m = mask_or(getmask(x), cm) - return array(filled(x), copy=copy, mask=m) - -def masked_greater(x, value, copy=1): - "masked_greater(x, value) = x masked where x > value" - return masked_where(greater(x, value), x, copy) - -def masked_greater_equal(x, value, copy=1): - "masked_greater_equal(x, value) = x masked where x >= value" - return masked_where(greater_equal(x, value), x, copy) - -def masked_less(x, value, copy=1): - "masked_less(x, value) = x masked where x < value" - return masked_where(less(x, value), x, copy) - -def masked_less_equal(x, value, copy=1): - "masked_less_equal(x, value) = x masked where x <= value" - return masked_where(less_equal(x, value), x, copy) - -def masked_not_equal(x, value, copy=1): - "masked_not_equal(x, value) = x masked where x != value" - d = filled(x, 0) - c = umath.not_equal(d, value) - m = mask_or(c, getmask(x)) - return array(d, mask=m, copy=copy) - -def masked_equal(x, value, copy=1): - """masked_equal(x, value) = x masked where x == value - For floating point consider masked_values(x, value) instead. - """ - d = filled(x, 0) - c = umath.equal(d, value) - m = mask_or(c, getmask(x)) - return array(d, mask=m, copy=copy) - -def masked_inside(x, v1, v2, copy=1): - """x with mask of all values of x that are inside [v1,v2] - v1 and v2 can be given in either order. - """ - if v2 < v1: - t = v2 - v2 = v1 - v1 = t - d = filled(x, 0) - c = umath.logical_and(umath.less_equal(d, v2), umath.greater_equal(d, v1)) - m = mask_or(c, getmask(x)) - return array(d, mask = m, copy=copy) - -def masked_outside(x, v1, v2, copy=1): - """x with mask of all values of x that are outside [v1,v2] - v1 and v2 can be given in either order. - """ - if v2 < v1: - t = v2 - v2 = v1 - v1 = t - d = filled(x, 0) - c = umath.logical_or(umath.less(d, v1), umath.greater(d, v2)) - m = mask_or(c, getmask(x)) - return array(d, mask = m, copy=copy) - -def reshape (a, *newshape): - "Copy of a with a new shape." - m = getmask(a) - d = filled(a).reshape(*newshape) - if m is nomask: - return masked_array(d) - else: - return masked_array(d, mask=numeric.reshape(m, *newshape)) - -def ravel (a): - "a as one-dimensional, may share data and mask" - m = getmask(a) - d = fromnumeric.ravel(filled(a)) - if m is nomask: - return masked_array(d) - else: - return masked_array(d, mask=numeric.ravel(m)) - -def concatenate (arrays, axis=0): - "Concatenate the arrays along the given axis" - d = [] - for x in arrays: - d.append(filled(x)) - d = numeric.concatenate(d, axis) - for x in arrays: - if getmask(x) is not nomask: break - else: - return masked_array(d) - dm = [] - for x in arrays: - dm.append(getmaskarray(x)) - dm = numeric.concatenate(dm, axis) - return masked_array(d, mask=dm) - -def swapaxes (a, axis1, axis2): - m = getmask(a) - d = masked_array(a).data - if m is nomask: - return masked_array(data=numeric.swapaxes(d, axis1, axis2)) - else: - return masked_array(data=numeric.swapaxes(d, axis1, axis2), - mask=numeric.swapaxes(m, axis1, axis2),) - - -def new_take (a, indices, axis=None, out=None, mode='raise'): - "returns selection of items from a." - m = getmask(a) - # d = masked_array(a).raw_data() - d = masked_array(a).data - if m is nomask: - return masked_array(numeric.take(d, indices, axis)) - else: - return masked_array(numeric.take(d, indices, axis), - mask = numeric.take(m, indices, axis)) - -def transpose(a, axes=None): - "reorder dimensions per tuple axes" - m = getmask(a) - d = filled(a) - if m is nomask: - return masked_array(numeric.transpose(d, axes)) - else: - return masked_array(numeric.transpose(d, axes), - mask = numeric.transpose(m, axes)) - - -def put(a, indices, values, mode='raise'): - """sets storage-indexed locations to corresponding values. - - Values and indices are filled if necessary. - - """ - d = a.raw_data() - ind = filled(indices) - v = filled(values) - numeric.put (d, ind, v) - m = getmask(a) - if m is not nomask: - a.unshare_mask() - numeric.put(a.raw_mask(), ind, 0) - -def putmask(a, mask, values): - "putmask(a, mask, values) sets a where mask is true." - if mask is nomask: - return - numeric.putmask(a.raw_data(), mask, values) - m = getmask(a) - if m is nomask: return - a.unshare_mask() - numeric.putmask(a.raw_mask(), mask, 0) - -def inner(a, b): - """inner(a,b) returns the dot product of two arrays, which has - shape a.shape[:-1] + b.shape[:-1] with elements computed by summing the - product of the elements from the last dimensions of a and b. - Masked elements are replace by zeros. - """ - fa = filled(a, 0) - fb = filled(b, 0) - if len(fa.shape) == 0: fa.shape = (1,) - if len(fb.shape) == 0: fb.shape = (1,) - return masked_array(numeric.inner(fa, fb)) - -innerproduct = inner - -def outer(a, b): - """outer(a,b) = {a[i]*b[j]}, has shape (len(a),len(b))""" - fa = filled(a, 0).ravel() - fb = filled(b, 0).ravel() - d = numeric.outer(fa, fb) - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - return masked_array(d) - ma = getmaskarray(a) - mb = getmaskarray(b) - m = make_mask(1-numeric.outer(1-ma, 1-mb), copy=0) - return masked_array(d, m) - -outerproduct = outer - -def dot(a, b): - """dot(a,b) returns matrix-multiplication between a and b. The product-sum - is over the last dimension of a and the second-to-last dimension of b. - Masked values are replaced by zeros. See also innerproduct. - """ - return innerproduct(filled(a, 0), numeric.swapaxes(filled(b, 0), -1, -2)) - -def compress(condition, x, dimension=-1, out=None): - """Select those parts of x for which condition is true. - Masked values in condition are considered false. - """ - c = filled(condition, 0) - m = getmask(x) - if m is not nomask: - m = numeric.compress(c, m, dimension) - d = numeric.compress(c, filled(x), dimension) - return masked_array(d, m) - -class _minimum_operation: - "Object to calculate minima" - def __init__ (self): - """minimum(a, b) or minimum(a) - In one argument case returns the scalar minimum. - """ - pass - - def __call__ (self, a, b=None): - "Execute the call behavior." - if b is None: - m = getmask(a) - if m is nomask: - d = amin(filled(a).ravel()) - return d - ac = a.compressed() - if len(ac) == 0: - return masked - else: - return amin(ac.raw_data()) - else: - return where(less(a, b), a, b) - - def reduce (self, target, axis=0): - """Reduce target along the given axis.""" - m = getmask(target) - if m is nomask: - t = filled(target) - return masked_array (umath.minimum.reduce (t, axis)) - else: - t = umath.minimum.reduce(filled(target, minimum_fill_value(target)), axis) - m = umath.logical_and.reduce(m, axis) - return masked_array(t, m, get_fill_value(target)) - - def outer (self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - d = umath.minimum.outer(filled(a), filled(b)) - return masked_array(d, m) - -minimum = _minimum_operation () - -class _maximum_operation: - "Object to calculate maxima" - def __init__ (self): - """maximum(a, b) or maximum(a) - In one argument case returns the scalar maximum. - """ - pass - - def __call__ (self, a, b=None): - "Execute the call behavior." - if b is None: - m = getmask(a) - if m is nomask: - d = amax(filled(a).ravel()) - return d - ac = a.compressed() - if len(ac) == 0: - return masked - else: - return amax(ac.raw_data()) - else: - return where(greater(a, b), a, b) - - def reduce (self, target, axis=0): - """Reduce target along the given axis.""" - m = getmask(target) - if m is nomask: - t = filled(target) - return masked_array (umath.maximum.reduce (t, axis)) - else: - t = umath.maximum.reduce(filled(target, maximum_fill_value(target)), axis) - m = umath.logical_and.reduce(m, axis) - return masked_array(t, m, get_fill_value(target)) - - def outer (self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - d = umath.maximum.outer(filled(a), filled(b)) - return masked_array(d, m) - -maximum = _maximum_operation () - -def sort (x, axis = -1, fill_value=None): - """If x does not have a mask, return a masked array formed from the - result of numeric.sort(x, axis). - Otherwise, fill x with fill_value. Sort it. - Set a mask where the result is equal to fill_value. - Note that this may have unintended consequences if the data contains the - fill value at a non-masked site. - - If fill_value is not given the default fill value for x's type will be - used. - """ - if fill_value is None: - fill_value = default_fill_value (x) - d = filled(x, fill_value) - s = fromnumeric.sort(d, axis) - if getmask(x) is nomask: - return masked_array(s) - return masked_values(s, fill_value, copy=0) - -def diagonal(a, k = 0, axis1=0, axis2=1): - """diagonal(a,k=0,axis1=0, axis2=1) = the k'th diagonal of a""" - d = fromnumeric.diagonal(filled(a), k, axis1, axis2) - m = getmask(a) - if m is nomask: - return masked_array(d, m) - else: - return masked_array(d, fromnumeric.diagonal(m, k, axis1, axis2)) - -def trace (a, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """trace(a,offset=0, axis1=0, axis2=1) returns the sum along diagonals - (defined by the last two dimenions) of the array. - """ - return diagonal(a, offset, axis1, axis2).sum(dtype=dtype) - -def argsort (x, axis = -1, out=None, fill_value=None): - """Treating masked values as if they have the value fill_value, - return sort indices for sorting along given axis. - if fill_value is None, use get_fill_value(x) - Returns a numpy array. - """ - d = filled(x, fill_value) - return fromnumeric.argsort(d, axis) - -def argmin (x, axis = -1, out=None, fill_value=None): - """Treating masked values as if they have the value fill_value, - return indices for minimum values along given axis. - if fill_value is None, use get_fill_value(x). - Returns a numpy array if x has more than one dimension. - Otherwise, returns a scalar index. - """ - d = filled(x, fill_value) - return fromnumeric.argmin(d, axis) - -def argmax (x, axis = -1, out=None, fill_value=None): - """Treating masked values as if they have the value fill_value, - return sort indices for maximum along given axis. - if fill_value is None, use -get_fill_value(x) if it exists. - Returns a numpy array if x has more than one dimension. - Otherwise, returns a scalar index. - """ - if fill_value is None: - fill_value = default_fill_value (x) - try: - fill_value = - fill_value - except: - pass - d = filled(x, fill_value) - return fromnumeric.argmax(d, axis) - -def fromfunction (f, s): - """apply f to s to create array as in umath.""" - return masked_array(numeric.fromfunction(f, s)) - -def asarray(data, dtype=None): - """asarray(data, dtype) = array(data, dtype, copy=0) - """ - if isinstance(data, MaskedArray) and \ - (dtype is None or dtype == data.dtype): - return data - return array(data, dtype=dtype, copy=0) - -# Add methods to support ndarray interface -# XXX: I is better to to change the masked_*_operation adaptors -# XXX: to wrap ndarray methods directly to create ma.array methods. -from types import MethodType -def _m(f): - return MethodType(f, None, array) -def not_implemented(*args, **kwds): - raise NotImplementedError, "not yet implemented for numpy.ma arrays" -array.all = _m(alltrue) -array.any = _m(sometrue) -array.argmax = _m(argmax) -array.argmin = _m(argmin) -array.argsort = _m(argsort) -array.base = property(_m(not_implemented)) -array.byteswap = _m(not_implemented) - -def _choose(self, *args, **kwds): - return choose(self, args) -array.choose = _m(_choose) -del _choose - -def _clip(self,a_min,a_max,out=None): - return MaskedArray(data = self.data.clip(asarray(a_min).data, - asarray(a_max).data), - mask = mask_or(self.mask, - mask_or(getmask(a_min),getmask(a_max)))) -array.clip = _m(_clip) - -def _compress(self, cond, axis=None, out=None): - return compress(cond, self, axis) -array.compress = _m(_compress) -del _compress - -array.conj = array.conjugate = _m(conjugate) -array.copy = _m(not_implemented) - -def _cumprod(self, axis=None, dtype=None, out=None): - m = self.mask - if m is not nomask: - m = umath.logical_or.accumulate(self.mask, axis) - return MaskedArray(data = self.filled(1).cumprod(axis, dtype), mask=m) -array.cumprod = _m(_cumprod) - -def _cumsum(self, axis=None, dtype=None, out=None): - m = self.mask - if m is not nomask: - m = umath.logical_or.accumulate(self.mask, axis) - return MaskedArray(data=self.filled(0).cumsum(axis, dtype), mask=m) -array.cumsum = _m(_cumsum) - -array.diagonal = _m(diagonal) -array.dump = _m(not_implemented) -array.dumps = _m(not_implemented) -array.fill = _m(not_implemented) -array.flags = property(_m(not_implemented)) -array.flatten = _m(ravel) -array.getfield = _m(not_implemented) - -def _max(a, axis=None, out=None): - if out is not None: - raise TypeError("Output arrays Unsupported for masked arrays") - if axis is None: - return maximum(a) - else: - return maximum.reduce(a, axis) -array.max = _m(_max) -del _max -def _min(a, axis=None, out=None): - if out is not None: - raise TypeError("Output arrays Unsupported for masked arrays") - if axis is None: - return minimum(a) - else: - return minimum.reduce(a, axis) -array.min = _m(_min) -del _min -array.mean = _m(new_average) -array.nbytes = property(_m(not_implemented)) -array.newbyteorder = _m(not_implemented) -array.nonzero = _m(nonzero) -array.prod = _m(product) - -def _ptp(a,axis=None,out=None): - return a.max(axis,out)-a.min(axis) -array.ptp = _m(_ptp) -array.repeat = _m(new_repeat) -array.resize = _m(resize) -array.searchsorted = _m(not_implemented) -array.setfield = _m(not_implemented) -array.setflags = _m(not_implemented) -array.sort = _m(not_implemented) # NB: ndarray.sort is inplace - -def _squeeze(self): - try: - result = MaskedArray(data = self.data.squeeze(), - mask = self.mask.squeeze()) - except AttributeError: - result = _wrapit(self, 'squeeze') - return result -array.squeeze = _m(_squeeze) - -array.strides = property(_m(not_implemented)) -array.sum = _m(sum) -def _swapaxes(self,axis1,axis2): - return MaskedArray(data = self.data.swapaxes(axis1, axis2), - mask = self.mask.swapaxes(axis1, axis2)) -array.swapaxes = _m(_swapaxes) -array.take = _m(new_take) -array.tofile = _m(not_implemented) -array.trace = _m(trace) -array.transpose = _m(transpose) - -def _var(self,axis=None,dtype=None, out=None): - if axis is None: - return numeric.asarray(self.compressed()).var() - a = self.swapaxes(axis,0) - a = a - a.mean(axis=0) - a *= a - a /= a.count(axis=0) - return a.swapaxes(0,axis).sum(axis) -def _std(self,axis=None, dtype=None, out=None): - return (self.var(axis,dtype))**0.5 -array.var = _m(_var) -array.std = _m(_std) - -array.view = _m(not_implemented) -array.round = _m(around) -del _m, MethodType, not_implemented - - -masked = MaskedArray(0, int, mask=1) - -def repeat(a, repeats, axis=0): - return new_repeat(a, repeats, axis) - -def average(a, axis=0, weights=None, returned=0): - return new_average(a, axis, weights, returned) - -def take(a, indices, axis=0): - return new_take(a, indices, axis) diff --git a/numpy-1.6.2/numpy/oldnumeric/matrix.py b/numpy-1.6.2/numpy/oldnumeric/matrix.py deleted file mode 100644 index 5f8c1ca5ea..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/matrix.py +++ /dev/null @@ -1,67 +0,0 @@ -# This module is for compatibility only. - -__all__ = ['UserArray', 'squeeze', 'Matrix', 'asarray', 'dot', 'k', 'Numeric', 'LinearAlgebra', 'identity', 'multiply', 'types', 'string'] - -import types -from user_array import UserArray, asarray -import numpy.oldnumeric as Numeric -from numpy.oldnumeric import dot, identity, multiply -import numpy.oldnumeric.linear_algebra as LinearAlgebra -from numpy import matrix as Matrix, squeeze - -# Hidden names that will be the same. - -_table = [None]*256 -for k in range(256): - _table[k] = chr(k) -_table = ''.join(_table) - -_numchars = '0123456789.-+jeEL' -_todelete = [] -for k in _table: - if k not in _numchars: - _todelete.append(k) -_todelete = ''.join(_todelete) - - -def _eval(astr): - return eval(astr.translate(_table,_todelete)) - -def _convert_from_string(data): - data.find - rows = data.split(';') - newdata = [] - count = 0 - for row in rows: - trow = row.split(',') - newrow = [] - for col in trow: - temp = col.split() - newrow.extend(map(_eval,temp)) - if count == 0: - Ncols = len(newrow) - elif len(newrow) != Ncols: - raise ValueError, "Rows not the same size." - count += 1 - newdata.append(newrow) - return newdata - - -_lkup = {'0':'000', - '1':'001', - '2':'010', - '3':'011', - '4':'100', - '5':'101', - '6':'110', - '7':'111'} - -def _binary(num): - ostr = oct(num) - bin = '' - for ch in ostr[1:]: - bin += _lkup[ch] - ind = 0 - while bin[ind] == '0': - ind += 1 - return bin[ind:] diff --git a/numpy-1.6.2/numpy/oldnumeric/misc.py b/numpy-1.6.2/numpy/oldnumeric/misc.py deleted file mode 100644 index ccd47efbbe..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/misc.py +++ /dev/null @@ -1,36 +0,0 @@ -# Functions that already have the correct syntax or miscellaneous functions - - -__all__ = ['sort', 'copy_reg', 'clip', 'rank', - 'sign', 'shape', 'types', 'allclose', 'size', - 'choose', 'swapaxes', 'array_str', - 'pi', 'math', 'concatenate', 'putmask', 'put', - 'around', 'vdot', 'transpose', 'array2string', 'diagonal', - 'searchsorted', 'copy', 'resize', - 'array_repr', 'e', 'StringIO', 'pickle', - 'argsort', 'convolve', 'cross_correlate', - 'dot', 'outerproduct', 'innerproduct', 'insert'] - -import types -import StringIO -import pickle -import math -import copy -import copy_reg - -import sys -if sys.version_info[0] >= 3: - import copyreg - import io - StringIO = io.BytesIO - copy_reg = copyreg - -from numpy import sort, clip, rank, sign, shape, putmask, allclose, size,\ - choose, swapaxes, array_str, array_repr, e, pi, put, \ - resize, around, concatenate, vdot, transpose, \ - diagonal, searchsorted, argsort, convolve, dot, \ - outer as outerproduct, inner as innerproduct, \ - correlate as cross_correlate, \ - place as insert - -from array_printer import array2string diff --git a/numpy-1.6.2/numpy/oldnumeric/mlab.py b/numpy-1.6.2/numpy/oldnumeric/mlab.py deleted file mode 100644 index e2a0262f02..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/mlab.py +++ /dev/null @@ -1,126 +0,0 @@ -# This module is for compatibility only. All functions are defined elsewhere. - -__all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle', - 'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort', - 'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud', - 'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc', - 'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean'] - -import numpy.oldnumeric.linear_algebra as LinearAlgebra -import numpy.oldnumeric.random_array as RandomArray -from numpy import tril, trapz as _Ntrapz, hanning, rot90, triu, diff, \ - angle, roots, ptp as _Nptp, kaiser, cumprod as _Ncumprod, \ - diag, msort, prod as _Nprod, std as _Nstd, hamming, flipud, \ - amax as _Nmax, amin as _Nmin, blackman, bartlett, \ - squeeze, sinc, median, fliplr, mean as _Nmean, transpose - -from numpy.linalg import eig, svd -from numpy.random import rand, randn -import numpy as np - -from typeconv import convtypecode - -def eye(N, M=None, k=0, typecode=None, dtype=None): - """ eye returns a N-by-M 2-d array where the k-th diagonal is all ones, - and everything else is zeros. - """ - dtype = convtypecode(typecode, dtype) - if M is None: M = N - m = np.equal(np.subtract.outer(np.arange(N), np.arange(M)),-k) - if m.dtype != dtype: - return m.astype(dtype) - -def tri(N, M=None, k=0, typecode=None, dtype=None): - """ returns a N-by-M array where all the diagonals starting from - lower left corner up to the k-th are all ones. - """ - dtype = convtypecode(typecode, dtype) - if M is None: M = N - m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)),-k) - if m.dtype != dtype: - return m.astype(dtype) - -def trapz(y, x=None, axis=-1): - return _Ntrapz(y, x, axis=axis) - -def ptp(x, axis=0): - return _Nptp(x, axis) - -def cumprod(x, axis=0): - return _Ncumprod(x, axis) - -def max(x, axis=0): - return _Nmax(x, axis) - -def min(x, axis=0): - return _Nmin(x, axis) - -def prod(x, axis=0): - return _Nprod(x, axis) - -def std(x, axis=0): - N = asarray(x).shape[axis] - return _Nstd(x, axis)*sqrt(N/(N-1.)) - -def mean(x, axis=0): - return _Nmean(x, axis) - -# This is exactly the same cov function as in MLab -def cov(m, y=None, rowvar=0, bias=0): - if y is None: - y = m - else: - y = y - if rowvar: - m = transpose(m) - y = transpose(y) - if (m.shape[0] == 1): - m = transpose(m) - if (y.shape[0] == 1): - y = transpose(y) - N = m.shape[0] - if (y.shape[0] != N): - raise ValueError, "x and y must have the same number "\ - "of observations" - m = m - _Nmean(m,axis=0) - y = y - _Nmean(y,axis=0) - if bias: - fact = N*1.0 - else: - fact = N-1.0 - return squeeze(dot(transpose(m), conjugate(y)) / fact) - -from numpy import sqrt, multiply -def corrcoef(x, y=None): - c = cov(x, y) - d = diag(c) - return c/sqrt(multiply.outer(d,d)) - -from compat import * -from functions import * -from precision import * -from ufuncs import * -from misc import * - -import compat -import precision -import functions -import misc -import ufuncs - -import numpy -__version__ = numpy.__version__ -del numpy - -__all__ += ['__version__'] -__all__ += compat.__all__ -__all__ += precision.__all__ -__all__ += functions.__all__ -__all__ += ufuncs.__all__ -__all__ += misc.__all__ - -del compat -del functions -del precision -del ufuncs -del misc diff --git a/numpy-1.6.2/numpy/oldnumeric/precision.py b/numpy-1.6.2/numpy/oldnumeric/precision.py deleted file mode 100644 index c095ceb199..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/precision.py +++ /dev/null @@ -1,168 +0,0 @@ -# Lifted from Precision.py. This is for compatibility only. -# -# The character strings are still for "new" NumPy -# which is the only Incompatibility with Numeric - -__all__ = ['Character', 'Complex', 'Float', - 'PrecisionError', 'PyObject', 'Int', 'UInt', - 'UnsignedInt', 'UnsignedInteger', 'string', 'typecodes', 'zeros'] - -from functions import zeros -import string # for backwards compatibility - -typecodes = {'Character':'c', 'Integer':'bhil', 'UnsignedInteger':'BHIL', 'Float':'fd', 'Complex':'FD'} - -def _get_precisions(typecodes): - lst = [] - for t in typecodes: - lst.append( (zeros( (1,), t ).itemsize*8, t) ) - return lst - -def _fill_table(typecodes, table={}): - for key, value in typecodes.items(): - table[key] = _get_precisions(value) - return table - -_code_table = _fill_table(typecodes) - -class PrecisionError(Exception): - pass - -def _lookup(table, key, required_bits): - lst = table[key] - for bits, typecode in lst: - if bits >= required_bits: - return typecode - raise PrecisionError, key+" of "+str(required_bits)+" bits not available on this system" - -Character = 'c' - -try: - UnsignedInt8 = _lookup(_code_table, "UnsignedInteger", 8) - UInt8 = UnsignedInt8 - __all__.extend(['UnsignedInt8', 'UInt8']) -except(PrecisionError): - pass -try: - UnsignedInt16 = _lookup(_code_table, "UnsignedInteger", 16) - UInt16 = UnsignedInt16 - __all__.extend(['UnsignedInt16', 'UInt16']) -except(PrecisionError): - pass -try: - UnsignedInt32 = _lookup(_code_table, "UnsignedInteger", 32) - UInt32 = UnsignedInt32 - __all__.extend(['UnsignedInt32', 'UInt32']) -except(PrecisionError): - pass -try: - UnsignedInt64 = _lookup(_code_table, "UnsignedInteger", 64) - UInt64 = UnsignedInt64 - __all__.extend(['UnsignedInt64', 'UInt64']) -except(PrecisionError): - pass -try: - UnsignedInt128 = _lookup(_code_table, "UnsignedInteger", 128) - UInt128 = UnsignedInt128 - __all__.extend(['UnsignedInt128', 'UInt128']) -except(PrecisionError): - pass -UInt = UnsignedInt = UnsignedInteger = 'u' - -try: - Int0 = _lookup(_code_table, 'Integer', 0) - __all__.append('Int0') -except(PrecisionError): - pass -try: - Int8 = _lookup(_code_table, 'Integer', 8) - __all__.append('Int8') -except(PrecisionError): - pass -try: - Int16 = _lookup(_code_table, 'Integer', 16) - __all__.append('Int16') -except(PrecisionError): - pass -try: - Int32 = _lookup(_code_table, 'Integer', 32) - __all__.append('Int32') -except(PrecisionError): - pass -try: - Int64 = _lookup(_code_table, 'Integer', 64) - __all__.append('Int64') -except(PrecisionError): - pass -try: - Int128 = _lookup(_code_table, 'Integer', 128) - __all__.append('Int128') -except(PrecisionError): - pass -Int = 'l' - -try: - Float0 = _lookup(_code_table, 'Float', 0) - __all__.append('Float0') -except(PrecisionError): - pass -try: - Float8 = _lookup(_code_table, 'Float', 8) - __all__.append('Float8') -except(PrecisionError): - pass -try: - Float16 = _lookup(_code_table, 'Float', 16) - __all__.append('Float16') -except(PrecisionError): - pass -try: - Float32 = _lookup(_code_table, 'Float', 32) - __all__.append('Float32') -except(PrecisionError): - pass -try: - Float64 = _lookup(_code_table, 'Float', 64) - __all__.append('Float64') -except(PrecisionError): - pass -try: - Float128 = _lookup(_code_table, 'Float', 128) - __all__.append('Float128') -except(PrecisionError): - pass -Float = 'd' - -try: - Complex0 = _lookup(_code_table, 'Complex', 0) - __all__.append('Complex0') -except(PrecisionError): - pass -try: - Complex8 = _lookup(_code_table, 'Complex', 16) - __all__.append('Complex8') -except(PrecisionError): - pass -try: - Complex16 = _lookup(_code_table, 'Complex', 32) - __all__.append('Complex16') -except(PrecisionError): - pass -try: - Complex32 = _lookup(_code_table, 'Complex', 64) - __all__.append('Complex32') -except(PrecisionError): - pass -try: - Complex64 = _lookup(_code_table, 'Complex', 128) - __all__.append('Complex64') -except(PrecisionError): - pass -try: - Complex128 = _lookup(_code_table, 'Complex', 256) - __all__.append('Complex128') -except(PrecisionError): - pass -Complex = 'D' - -PyObject = 'O' diff --git a/numpy-1.6.2/numpy/oldnumeric/random_array.py b/numpy-1.6.2/numpy/oldnumeric/random_array.py deleted file mode 100644 index e84aedf1e3..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/random_array.py +++ /dev/null @@ -1,266 +0,0 @@ -# Backward compatible module for RandomArray - -__all__ = ['ArgumentError','F','beta','binomial','chi_square', 'exponential', - 'gamma', 'get_seed', 'mean_var_test', 'multinomial', - 'multivariate_normal', 'negative_binomial', 'noncentral_F', - 'noncentral_chi_square', 'normal', 'permutation', 'poisson', - 'randint', 'random', 'random_integers', 'seed', 'standard_normal', - 'uniform'] - -ArgumentError = ValueError - -import numpy.random.mtrand as mt -import numpy as np - -def seed(x=0, y=0): - if (x == 0 or y == 0): - mt.seed() - else: - mt.seed((x,y)) - -def get_seed(): - raise NotImplementedError, \ - "If you want to save the state of the random number generator.\n"\ - "Then you should use obj = numpy.random.get_state() followed by.\n"\ - "numpy.random.set_state(obj)." - -def random(shape=[]): - "random(n) or random([n, m, ...]) returns array of random numbers" - if shape == []: - shape = None - return mt.random_sample(shape) - -def uniform(minimum, maximum, shape=[]): - """uniform(minimum, maximum, shape=[]) returns array of given shape of random reals - in given range""" - if shape == []: - shape = None - return mt.uniform(minimum, maximum, shape) - -def randint(minimum, maximum=None, shape=[]): - """randint(min, max, shape=[]) = random integers >=min, < max - If max not given, random integers >= 0, = 0.6: - raise SystemExit, "uniform returned out of desired range" - print "randint(1, 10, shape=[50])" - print randint(1, 10, shape=[50]) - print "permutation(10)", permutation(10) - print "randint(3,9)", randint(3,9) - print "random_integers(10, shape=[20])" - print random_integers(10, shape=[20]) - s = 3.0 - x = normal(2.0, s, [10, 1000]) - if len(x.shape) != 2 or x.shape[0] != 10 or x.shape[1] != 1000: - raise SystemExit, "standard_normal returned wrong shape" - x.shape = (10000,) - mean_var_test(x, "normally distributed numbers with mean 2 and variance %f"%(s**2,), 2, s**2, 0) - x = exponential(3, 10000) - mean_var_test(x, "random numbers exponentially distributed with mean %f"%(s,), s, s**2, 2) - x = multivariate_normal(np.array([10,20]), np.array(([1,2],[2,4]))) - print "\nA multivariate normal", x - if x.shape != (2,): raise SystemExit, "multivariate_normal returned wrong shape" - x = multivariate_normal(np.array([10,20]), np.array([[1,2],[2,4]]), [4,3]) - print "A 4x3x2 array containing multivariate normals" - print x - if x.shape != (4,3,2): raise SystemExit, "multivariate_normal returned wrong shape" - x = multivariate_normal(np.array([-100,0,100]), np.array([[3,2,1],[2,2,1],[1,1,1]]), 10000) - x_mean = np.sum(x,axis=0)/10000. - print "Average of 10000 multivariate normals with mean [-100,0,100]" - print x_mean - x_minus_mean = x - x_mean - print "Estimated covariance of 10000 multivariate normals with covariance [[3,2,1],[2,2,1],[1,1,1]]" - print np.dot(np.transpose(x_minus_mean),x_minus_mean)/9999. - x = beta(5.0, 10.0, 10000) - mean_var_test(x, "beta(5.,10.) random numbers", 0.333, 0.014) - x = gamma(.01, 2., 10000) - mean_var_test(x, "gamma(.01,2.) random numbers", 2*100, 2*100*100) - x = chi_square(11., 10000) - mean_var_test(x, "chi squared random numbers with 11 degrees of freedom", 11, 22, 2*np.sqrt(2./11.)) - x = F(5., 10., 10000) - mean_var_test(x, "F random numbers with 5 and 10 degrees of freedom", 1.25, 1.35) - x = poisson(50., 10000) - mean_var_test(x, "poisson random numbers with mean 50", 50, 50, 0.14) - print "\nEach element is the result of 16 binomial trials with probability 0.5:" - print binomial(16, 0.5, 16) - print "\nEach element is the result of 16 negative binomial trials with probability 0.5:" - print negative_binomial(16, 0.5, [16,]) - print "\nEach row is the result of 16 multinomial trials with probabilities [0.1, 0.5, 0.1 0.3]:" - x = multinomial(16, [0.1, 0.5, 0.1], 8) - print x - print "Mean = ", np.sum(x,axis=0)/8. - -if __name__ == '__main__': - test() diff --git a/numpy-1.6.2/numpy/oldnumeric/rng.py b/numpy-1.6.2/numpy/oldnumeric/rng.py deleted file mode 100644 index 28d3f16dfc..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/rng.py +++ /dev/null @@ -1,135 +0,0 @@ -# This module re-creates the RNG interface from Numeric -# Replace import RNG with import numpy.oldnumeric.rng as RNG -# -# It is for backwards compatibility only. - - -__all__ = ['CreateGenerator','ExponentialDistribution','LogNormalDistribution', - 'NormalDistribution', 'UniformDistribution', 'error', 'ranf', - 'default_distribution', 'random_sample', 'standard_generator'] - -import numpy.random.mtrand as mt -import math - -class error(Exception): - pass - -class Distribution(object): - def __init__(self, meth, *args): - self._meth = meth - self._args = args - - def density(self,x): - raise NotImplementedError - - def __call__(self, x): - return self.density(x) - - def _onesample(self, rng): - return getattr(rng, self._meth)(*self._args) - - def _sample(self, rng, n): - kwds = {'size' : n} - return getattr(rng, self._meth)(*self._args, **kwds) - - -class ExponentialDistribution(Distribution): - def __init__(self, lambda_): - if (lambda_ <= 0): - raise error, "parameter must be positive" - Distribution.__init__(self, 'exponential', lambda_) - - def density(x): - if x < 0: - return 0.0 - else: - lambda_ = self._args[0] - return lambda_*math.exp(-lambda_*x) - -class LogNormalDistribution(Distribution): - def __init__(self, m, s): - m = float(m) - s = float(s) - if (s <= 0): - raise error, "standard deviation must be positive" - Distribution.__init__(self, 'lognormal', m, s) - sn = math.log(1.0+s*s/(m*m)); - self._mn = math.log(m)-0.5*sn - self._sn = math.sqrt(sn) - self._fac = 1.0/math.sqrt(2*math.pi)/self._sn - - def density(x): - m,s = self._args - y = (math.log(x)-self._mn)/self._sn - return self._fac*math.exp(-0.5*y*y)/x - - -class NormalDistribution(Distribution): - def __init__(self, m, s): - m = float(m) - s = float(s) - if (s <= 0): - raise error, "standard deviation must be positive" - Distribution.__init__(self, 'normal', m, s) - self._fac = 1.0/math.sqrt(2*math.pi)/s - - def density(x): - m,s = self._args - y = (x-m)/s - return self._fac*math.exp(-0.5*y*y) - -class UniformDistribution(Distribution): - def __init__(self, a, b): - a = float(a) - b = float(b) - width = b-a - if (width <=0): - raise error, "width of uniform distribution must be > 0" - Distribution.__init__(self, 'uniform', a, b) - self._fac = 1.0/width - - def density(x): - a, b = self._args - if (x < a) or (x >= b): - return 0.0 - else: - return self._fac - -default_distribution = UniformDistribution(0.0,1.0) - -class CreateGenerator(object): - def __init__(self, seed, dist=None): - if seed <= 0: - self._rng = mt.RandomState() - elif seed > 0: - self._rng = mt.RandomState(seed) - if dist is None: - dist = default_distribution - if not isinstance(dist, Distribution): - raise error, "Not a distribution object" - self._dist = dist - - def ranf(self): - return self._dist._onesample(self._rng) - - def sample(self, n): - return self._dist._sample(self._rng, n) - - -standard_generator = CreateGenerator(-1) - -def ranf(): - "ranf() = a random number from the standard generator." - return standard_generator.ranf() - -def random_sample(*n): - """random_sample(n) = array of n random numbers; - - random_sample(n1, n2, ...)= random array of shape (n1, n2, ..)""" - - if not n: - return standard_generator.ranf() - m = 1 - for i in n: - m = m * i - return standard_generator.sample(m).reshape(*n) diff --git a/numpy-1.6.2/numpy/oldnumeric/rng_stats.py b/numpy-1.6.2/numpy/oldnumeric/rng_stats.py deleted file mode 100644 index 8c7fec4336..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/rng_stats.py +++ /dev/null @@ -1,35 +0,0 @@ - -__all__ = ['average', 'histogram', 'standardDeviation', 'variance'] - -import numpy.oldnumeric as Numeric - -def average(data): - data = Numeric.array(data) - return Numeric.add.reduce(data)/len(data) - -def variance(data): - data = Numeric.array(data) - return Numeric.add.reduce((data-average(data,axis=0))**2)/(len(data)-1) - -def standardDeviation(data): - data = Numeric.array(data) - return Numeric.sqrt(variance(data)) - -def histogram(data, nbins, range = None): - data = Numeric.array(data, Numeric.Float) - if range is None: - min = Numeric.minimum.reduce(data) - max = Numeric.maximum.reduce(data) - else: - min, max = range - data = Numeric.repeat(data, - Numeric.logical_and(Numeric.less_equal(data, max), - Numeric.greater_equal(data, - min)),axis=0) - bin_width = (max-min)/nbins - data = Numeric.floor((data - min)/bin_width).astype(Numeric.Int) - histo = Numeric.add.reduce(Numeric.equal( - Numeric.arange(nbins)[:,Numeric.NewAxis], data), -1) - histo[-1] = histo[-1] + Numeric.add.reduce(Numeric.equal(nbins, data)) - bins = min + bin_width*(Numeric.arange(nbins)+0.5) - return Numeric.transpose(Numeric.array([bins, histo])) diff --git a/numpy-1.6.2/numpy/oldnumeric/setup.py b/numpy-1.6.2/numpy/oldnumeric/setup.py deleted file mode 100644 index 31b5ff3cc6..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/setup.py +++ /dev/null @@ -1,10 +0,0 @@ - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('oldnumeric',parent_package,top_path) - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/oldnumeric/setupscons.py b/numpy-1.6.2/numpy/oldnumeric/setupscons.py deleted file mode 100644 index 82e8a62013..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/setupscons.py +++ /dev/null @@ -1,8 +0,0 @@ - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - return Configuration('oldnumeric',parent_package,top_path) - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/oldnumeric/tests/test_oldnumeric.py b/numpy-1.6.2/numpy/oldnumeric/tests/test_oldnumeric.py deleted file mode 100644 index 24d709d2c8..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/tests/test_oldnumeric.py +++ /dev/null @@ -1,94 +0,0 @@ -import unittest - -from numpy.testing import * - -from numpy import array -from numpy.oldnumeric import * -from numpy.core.numeric import float32, float64, complex64, complex128, int8, \ - int16, int32, int64, uint, uint8, uint16, uint32, uint64 - -class test_oldtypes(unittest.TestCase): - def test_oldtypes(self, level=1): - a1 = array([0,1,0], Float) - a2 = array([0,1,0], float) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Float8) - a2 = array([0,1,0], float) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Float16) - a2 = array([0,1,0], float) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Float32) - a2 = array([0,1,0], float32) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Float64) - a2 = array([0,1,0], float64) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex) - a2 = array([0,1,0], complex) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex8) - a2 = array([0,1,0], complex) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex16) - a2 = array([0,1,0], complex) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex32) - a2 = array([0,1,0], complex64) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex64) - a2 = array([0,1,0], complex128) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Int) - a2 = array([0,1,0], int) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Int8) - a2 = array([0,1,0], int8) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Int16) - a2 = array([0,1,0], int16) - assert_array_equal(a1, a2) - a1 = array([0,1,0], Int32) - a2 = array([0,1,0], int32) - assert_array_equal(a1, a2) - try: - a1 = array([0,1,0], Int64) - a2 = array([0,1,0], int64) - assert_array_equal(a1, a2) - except NameError: - # Not all systems have 64-bit integers. - pass - a1 = array([0,1,0], UnsignedInt) - a2 = array([0,1,0], UnsignedInteger) - a3 = array([0,1,0], uint) - assert_array_equal(a1, a3) - assert_array_equal(a2, a3) - a1 = array([0,1,0], UInt8) - a2 = array([0,1,0], UnsignedInt8) - a3 = array([0,1,0], uint8) - assert_array_equal(a1, a3) - assert_array_equal(a2, a3) - a1 = array([0,1,0], UInt16) - a2 = array([0,1,0], UnsignedInt16) - a3 = array([0,1,0], uint16) - assert_array_equal(a1, a3) - assert_array_equal(a2, a3) - a1 = array([0,1,0], UInt32) - a2 = array([0,1,0], UnsignedInt32) - a3 = array([0,1,0], uint32) - assert_array_equal(a1, a3) - assert_array_equal(a2, a3) - try: - a1 = array([0,1,0], UInt64) - a2 = array([0,1,0], UnsignedInt64) - a3 = array([0,1,0], uint64) - assert_array_equal(a1, a3) - assert_array_equal(a2, a3) - except NameError: - # Not all systems have 64-bit integers. - pass - - -if __name__ == "__main__": - import nose - nose.main() diff --git a/numpy-1.6.2/numpy/oldnumeric/tests/test_regression.py b/numpy-1.6.2/numpy/oldnumeric/tests/test_regression.py deleted file mode 100644 index 235ae4fe5a..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/tests/test_regression.py +++ /dev/null @@ -1,10 +0,0 @@ -from numpy.testing import * - -rlevel = 1 - -class TestRegression(TestCase): - def test_numeric_random(self, level=rlevel): - """Ticket #552""" - from numpy.oldnumeric.random_array import randint - randint(0,50,[2,3]) - diff --git a/numpy-1.6.2/numpy/oldnumeric/typeconv.py b/numpy-1.6.2/numpy/oldnumeric/typeconv.py deleted file mode 100644 index 4e203d4aed..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/typeconv.py +++ /dev/null @@ -1,60 +0,0 @@ -__all__ = ['oldtype2dtype', 'convtypecode', 'convtypecode2', 'oldtypecodes'] - -import numpy as np - -oldtype2dtype = {'1': np.dtype(np.byte), - 's': np.dtype(np.short), -# 'i': np.dtype(np.intc), -# 'l': np.dtype(int), -# 'b': np.dtype(np.ubyte), - 'w': np.dtype(np.ushort), - 'u': np.dtype(np.uintc), -# 'f': np.dtype(np.single), -# 'd': np.dtype(float), -# 'F': np.dtype(np.csingle), -# 'D': np.dtype(complex), -# 'O': np.dtype(object), -# 'c': np.dtype('c'), - None: np.dtype(int) - } - -# converts typecode=None to int -def convtypecode(typecode, dtype=None): - if dtype is None: - try: - return oldtype2dtype[typecode] - except: - return np.dtype(typecode) - else: - return dtype - -#if both typecode and dtype are None -# return None -def convtypecode2(typecode, dtype=None): - if dtype is None: - if typecode is None: - return None - else: - try: - return oldtype2dtype[typecode] - except: - return np.dtype(typecode) - else: - return dtype - -_changedtypes = {'B': 'b', - 'b': '1', - 'h': 's', - 'H': 'w', - 'I': 'u'} - -class _oldtypecodes(dict): - def __getitem__(self, obj): - char = np.dtype(obj).char - try: - return _changedtypes[char] - except KeyError: - return char - - -oldtypecodes = _oldtypecodes() diff --git a/numpy-1.6.2/numpy/oldnumeric/ufuncs.py b/numpy-1.6.2/numpy/oldnumeric/ufuncs.py deleted file mode 100644 index c26050f55e..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/ufuncs.py +++ /dev/null @@ -1,19 +0,0 @@ -__all__ = ['less', 'cosh', 'arcsinh', 'add', 'ceil', 'arctan2', 'floor_divide', - 'fmod', 'hypot', 'logical_and', 'power', 'sinh', 'remainder', 'cos', - 'equal', 'arccos', 'less_equal', 'divide', 'bitwise_or', - 'bitwise_and', 'logical_xor', 'log', 'subtract', 'invert', - 'negative', 'log10', 'arcsin', 'arctanh', 'logical_not', - 'not_equal', 'tanh', 'true_divide', 'maximum', 'arccosh', - 'logical_or', 'minimum', 'conjugate', 'tan', 'greater', - 'bitwise_xor', 'fabs', 'floor', 'sqrt', 'arctan', 'right_shift', - 'absolute', 'sin', 'multiply', 'greater_equal', 'left_shift', - 'exp', 'divide_safe'] - -from numpy import less, cosh, arcsinh, add, ceil, arctan2, floor_divide, \ - fmod, hypot, logical_and, power, sinh, remainder, cos, \ - equal, arccos, less_equal, divide, bitwise_or, bitwise_and, \ - logical_xor, log, subtract, invert, negative, log10, arcsin, \ - arctanh, logical_not, not_equal, tanh, true_divide, maximum, \ - arccosh, logical_or, minimum, conjugate, tan, greater, bitwise_xor, \ - fabs, floor, sqrt, arctan, right_shift, absolute, sin, \ - multiply, greater_equal, left_shift, exp, divide as divide_safe diff --git a/numpy-1.6.2/numpy/oldnumeric/user_array.py b/numpy-1.6.2/numpy/oldnumeric/user_array.py deleted file mode 100644 index 375c4013bb..0000000000 --- a/numpy-1.6.2/numpy/oldnumeric/user_array.py +++ /dev/null @@ -1,9 +0,0 @@ - - -from numpy.oldnumeric import * -from numpy.lib.user_array import container as UserArray - -import numpy.oldnumeric as nold -__all__ = nold.__all__[:] -__all__ += ['UserArray'] -del nold diff --git a/numpy-1.6.2/numpy/polynomial/__init__.py b/numpy-1.6.2/numpy/polynomial/__init__.py deleted file mode 100644 index 48c679ce1a..0000000000 --- a/numpy-1.6.2/numpy/polynomial/__init__.py +++ /dev/null @@ -1,317 +0,0 @@ -""" -A sub-package for efficiently dealing with polynomials. - -Within the documentation for this sub-package, a "finite power series," -i.e., a polynomial (also referred to simply as a "series") is represented -by a 1-D numpy array of the polynomial's coefficients, ordered from lowest -order term to highest. For example, array([1,2,3]) represents -``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial -applicable to the specific module in question, e.g., `polynomial` (which -"wraps" the "standard" basis) or `chebyshev`. For optimal performance, -all operations on polynomials, including evaluation at an argument, are -implemented as operations on the coefficients. Additional (module-specific) -information can be found in the docstring for the module of interest. - -""" -import warnings - -from polynomial import Polynomial -from chebyshev import Chebyshev -from legendre import Legendre -from hermite import Hermite -from hermite_e import HermiteE -from laguerre import Laguerre - -# Deprecate direct import of functions from this package -# version 1.6.0 - -from numpy.lib import deprecate - -# polynomial functions - -@deprecate(message='Please import polyline from numpy.polynomial.polynomial') -def polyline(off, scl) : - from numpy.polynomial.polynomial import polyline - return polyline(off, scl) - -@deprecate(message='Please import polyfromroots from numpy.polynomial.polynomial') -def polyfromroots(roots) : - from numpy.polynomial.polynomial import polyfromroots - return polyfromroots(roots) - -@deprecate(message='Please import polyadd from numpy.polynomial.polynomial') -def polyadd(c1, c2): - from numpy.polynomial.polynomial import polyadd - return polyadd(c1, c2) - -@deprecate(message='Please import polysub from numpy.polynomial.polynomial') -def polysub(c1, c2): - from numpy.polynomial.polynomial import polysub - return polysub(c1, c2) - -@deprecate(message='Please import polymulx from numpy.polynomial.polynomial') -def polymulx(cs): - from numpy.polynomial.polynomial import polymulx - return polymulx(cs) - -@deprecate(message='Please import polymul from numpy.polynomial.polynomial') -def polymul(c1, c2): - from numpy.polynomial.polynomial import polymul - return polymul(c1, c2) - -@deprecate(message='Please import polydiv from numpy.polynomial.polynomial') -def polydiv(c1, c2): - from numpy.polynomial.polynomial import polydiv - return polydiv(c1, c2) - -@deprecate(message='Please import polypow from numpy.polynomial.polynomial') -def polypow(cs, pow, maxpower=None) : - from numpy.polynomial.polynomial import polypow - return polypow(cs, pow, maxpower) - -@deprecate(message='Please import polyder from numpy.polynomial.polynomial') -def polyder(cs, m=1, scl=1): - from numpy.polynomial.polynomial import polyder - return polyder(cs, m, scl) - -@deprecate(message='Please import polyint from numpy.polynomial.polynomial') -def polyint(cs, m=1, k=[], lbnd=0, scl=1): - from numpy.polynomial.polynomial import polyint - return polyint(cs, m, k, lbnd, scl) - -@deprecate(message='Please import polyval from numpy.polynomial.polynomial') -def polyval(x, cs): - from numpy.polynomial.polynomial import polyval - return polyval(x, cs) - -@deprecate(message='Please import polyvander from numpy.polynomial.polynomial') -def polyvander(x, deg) : - from numpy.polynomial.polynomial import polyvander - return polyvander(x, deg) - -@deprecate(message='Please import polyfit from numpy.polynomial.polynomial') -def polyfit(x, y, deg, rcond=None, full=False, w=None): - from numpy.polynomial.polynomial import polyfit - return polyfit(x, y, deg, rcond, full, w) - -@deprecate(message='Please import polyroots from numpy.polynomial.polynomial') -def polyroots(cs): - from numpy.polynomial.polynomial import polyroots - return polyroots(cs) - - -# chebyshev functions - -@deprecate(message='Please import poly2cheb from numpy.polynomial.chebyshev') -def poly2cheb(pol) : - from numpy.polynomial.chebyshev import poly2cheb - return poly2cheb(pol) - -@deprecate(message='Please import cheb2poly from numpy.polynomial.chebyshev') -def cheb2poly(cs) : - from numpy.polynomial.chebyshev import cheb2poly - return cheb2poly(cs) - -@deprecate(message='Please import chebline from numpy.polynomial.chebyshev') -def chebline(off, scl) : - from numpy.polynomial.chebyshev import chebline - return chebline(off, scl) - -@deprecate(message='Please import chebfromroots from numpy.polynomial.chebyshev') -def chebfromroots(roots) : - from numpy.polynomial.chebyshev import chebfromroots - return chebfromroots(roots) - -@deprecate(message='Please import chebadd from numpy.polynomial.chebyshev') -def chebadd(c1, c2): - from numpy.polynomial.chebyshev import chebadd - return chebadd(c1, c2) - -@deprecate(message='Please import chebsub from numpy.polynomial.chebyshev') -def chebsub(c1, c2): - from numpy.polynomial.chebyshev import chebsub - return chebsub(c1, c2) - -@deprecate(message='Please import chebmulx from numpy.polynomial.chebyshev') -def chebmulx(cs): - from numpy.polynomial.chebyshev import chebmulx - return chebmulx(cs) - -@deprecate(message='Please import chebmul from numpy.polynomial.chebyshev') -def chebmul(c1, c2): - from numpy.polynomial.chebyshev import chebmul - return chebmul(c1, c2) - -@deprecate(message='Please import chebdiv from numpy.polynomial.chebyshev') -def chebdiv(c1, c2): - from numpy.polynomial.chebyshev import chebdiv - return chebdiv(c1, c2) - -@deprecate(message='Please import chebpow from numpy.polynomial.chebyshev') -def chebpow(cs, pow, maxpower=16) : - from numpy.polynomial.chebyshev import chebpow - return chebpow(cs, pow, maxpower) - -@deprecate(message='Please import chebder from numpy.polynomial.chebyshev') -def chebder(cs, m=1, scl=1) : - from numpy.polynomial.chebyshev import chebder - return chebder(cs, m, scl) - -@deprecate(message='Please import chebint from numpy.polynomial.chebyshev') -def chebint(cs, m=1, k=[], lbnd=0, scl=1): - from numpy.polynomial.chebyshev import chebint - return chebint(cs, m, k, lbnd, scl) - -@deprecate(message='Please import chebval from numpy.polynomial.chebyshev') -def chebval(x, cs): - from numpy.polynomial.chebyshev import chebval - return chebval(x, cs) - -@deprecate(message='Please import chebvander from numpy.polynomial.chebyshev') -def chebvander(x, deg) : - from numpy.polynomial.chebyshev import chebvander - return chebvander(x, deg) - -@deprecate(message='Please import chebfit from numpy.polynomial.chebyshev') -def chebfit(x, y, deg, rcond=None, full=False, w=None): - from numpy.polynomial.chebyshev import chebfit - return chebfit(x, y, deg, rcond, full, w) - -@deprecate(message='Please import chebroots from numpy.polynomial.chebyshev') -def chebroots(cs): - from numpy.polynomial.chebyshev import chebroots - return chebroots(cs) - -@deprecate(message='Please import chebpts1 from numpy.polynomial.chebyshev') -def chebpts1(npts): - from numpy.polynomial.chebyshev import chebpts1 - return chebpts1(npts) - -@deprecate(message='Please import chebpts2 from numpy.polynomial.chebyshev') -def chebpts2(npts): - from numpy.polynomial.chebyshev import chebpts2 - return chebpts2(npts) - - -# legendre functions - -@deprecate(message='Please import poly2leg from numpy.polynomial.legendre') -def poly2leg(pol) : - from numpy.polynomial.legendre import poly2leg - return poly2leg(pol) - -@deprecate(message='Please import leg2poly from numpy.polynomial.legendre') -def leg2poly(cs) : - from numpy.polynomial.legendre import leg2poly - return leg2poly(cs) - -@deprecate(message='Please import legline from numpy.polynomial.legendre') -def legline(off, scl) : - from numpy.polynomial.legendre import legline - return legline(off, scl) - -@deprecate(message='Please import legfromroots from numpy.polynomial.legendre') -def legfromroots(roots) : - from numpy.polynomial.legendre import legfromroots - return legfromroots(roots) - -@deprecate(message='Please import legadd from numpy.polynomial.legendre') -def legadd(c1, c2): - from numpy.polynomial.legendre import legadd - return legadd(c1, c2) - -@deprecate(message='Please import legsub from numpy.polynomial.legendre') -def legsub(c1, c2): - from numpy.polynomial.legendre import legsub - return legsub(c1, c2) - -@deprecate(message='Please import legmulx from numpy.polynomial.legendre') -def legmulx(cs): - from numpy.polynomial.legendre import legmulx - return legmulx(cs) - -@deprecate(message='Please import legmul from numpy.polynomial.legendre') -def legmul(c1, c2): - from numpy.polynomial.legendre import legmul - return legmul(c1, c2) - -@deprecate(message='Please import legdiv from numpy.polynomial.legendre') -def legdiv(c1, c2): - from numpy.polynomial.legendre import legdiv - return legdiv(c1, c2) - -@deprecate(message='Please import legpow from numpy.polynomial.legendre') -def legpow(cs, pow, maxpower=16) : - from numpy.polynomial.legendre import legpow - return legpow(cs, pow, maxpower) - -@deprecate(message='Please import legder from numpy.polynomial.legendre') -def legder(cs, m=1, scl=1) : - from numpy.polynomial.legendre import legder - return legder(cs, m, scl) - -@deprecate(message='Please import legint from numpy.polynomial.legendre') -def legint(cs, m=1, k=[], lbnd=0, scl=1): - from numpy.polynomial.legendre import legint - return legint(cs, m, k, lbnd, scl) - -@deprecate(message='Please import legval from numpy.polynomial.legendre') -def legval(x, cs): - from numpy.polynomial.legendre import legval - return legval(x, cs) - -@deprecate(message='Please import legvander from numpy.polynomial.legendre') -def legvander(x, deg) : - from numpy.polynomial.legendre import legvander - return legvander(x, deg) - -@deprecate(message='Please import legfit from numpy.polynomial.legendre') -def legfit(x, y, deg, rcond=None, full=False, w=None): - from numpy.polynomial.legendre import legfit - return legfit(x, y, deg, rcond, full, w) - -@deprecate(message='Please import legroots from numpy.polynomial.legendre') -def legroots(cs): - from numpy.polynomial.legendre import legroots - return legroots(cs) - - -# polyutils functions - -@deprecate(message='Please import trimseq from numpy.polynomial.polyutils') -def trimseq(seq) : - from numpy.polynomial.polyutils import trimseq - return trimseq(seq) - -@deprecate(message='Please import as_series from numpy.polynomial.polyutils') -def as_series(alist, trim=True) : - from numpy.polynomial.polyutils import as_series - return as_series(alist, trim) - -@deprecate(message='Please import trimcoef from numpy.polynomial.polyutils') -def trimcoef(c, tol=0) : - from numpy.polynomial.polyutils import trimcoef - return trimcoef(c, tol) - -@deprecate(message='Please import getdomain from numpy.polynomial.polyutils') -def getdomain(x) : - from numpy.polynomial.polyutils import getdomain - return getdomain(x) - -# Just remove this function as it screws up the documentation of the same -# named class method. -# -#@deprecate(message='Please import mapparms from numpy.polynomial.polyutils') -#def mapparms(old, new) : -# from numpy.polynomial.polyutils import mapparms -# return mapparms(old, new) - -@deprecate(message='Please import mapdomain from numpy.polynomial.polyutils') -def mapdomain(x, old, new) : - from numpy.polynomial.polyutils import mapdomain - return mapdomain(x, old, new) - - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/polynomial/chebyshev.py b/numpy-1.6.2/numpy/polynomial/chebyshev.py deleted file mode 100644 index e32cb229a3..0000000000 --- a/numpy-1.6.2/numpy/polynomial/chebyshev.py +++ /dev/null @@ -1,1450 +0,0 @@ -""" -Objects for dealing with Chebyshev series. - -This module provides a number of objects (mostly functions) useful for -dealing with Chebyshev series, including a `Chebyshev` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `chebdomain` -- Chebyshev series default domain, [-1,1]. -- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates - identically to 0. -- `chebone` -- (Coefficients of the) Chebyshev series that evaluates - identically to 1. -- `chebx` -- (Coefficients of the) Chebyshev series for the identity map, - ``f(x) = x``. - -Arithmetic ----------- -- `chebadd` -- add two Chebyshev series. -- `chebsub` -- subtract one Chebyshev series from another. -- `chebmul` -- multiply two Chebyshev series. -- `chebdiv` -- divide one Chebyshev series by another. -- `chebpow` -- raise a Chebyshev series to an positive integer power -- `chebval` -- evaluate a Chebyshev series at given points. - -Calculus --------- -- `chebder` -- differentiate a Chebyshev series. -- `chebint` -- integrate a Chebyshev series. - -Misc Functions --------------- -- `chebfromroots` -- create a Chebyshev series with specified roots. -- `chebroots` -- find the roots of a Chebyshev series. -- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials. -- `chebfit` -- least-squares fit returning a Chebyshev series. -- `chebpts1` -- Chebyshev points of the first kind. -- `chebpts2` -- Chebyshev points of the second kind. -- `chebtrim` -- trim leading coefficients from a Chebyshev series. -- `chebline` -- Chebyshev series representing given straight line. -- `cheb2poly` -- convert a Chebyshev series to a polynomial. -- `poly2cheb` -- convert a polynomial to a Chebyshev series. - -Classes -------- -- `Chebyshev` -- A Chebyshev series class. - -See also --------- -`numpy.polynomial` - -Notes ------ -The implementations of multiplication, division, integration, and -differentiation use the algebraic identities [1]_: - -.. math :: - T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ - z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. - -where - -.. math :: x = \\frac{z + z^{-1}}{2}. - -These identities allow a Chebyshev series to be expressed as a finite, -symmetric Laurent series. In this module, this sort of Laurent series -is referred to as a "z-series." - -References ----------- -.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev - Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 - (preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) - -""" -from __future__ import division - -__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', - 'chebadd', 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', - 'chebval', 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', - 'chebfromroots', 'chebvander', 'chebfit', 'chebtrim', 'chebroots', - 'chebpts1', 'chebpts2', 'Chebyshev'] - -import numpy as np -import numpy.linalg as la -import polyutils as pu -import warnings -from polytemplate import polytemplate - -chebtrim = pu.trimcoef - -# -# A collection of functions for manipulating z-series. These are private -# functions and do minimal error checking. -# - -def _cseries_to_zseries(cs) : - """Covert Chebyshev series to z-series. - - Covert a Chebyshev series to the equivalent z-series. The result is - never an empty array. The dtype of the return is the same as that of - the input. No checks are run on the arguments as this routine is for - internal use. - - Parameters - ---------- - cs : 1-d ndarray - Chebyshev coefficients, ordered from low to high - - Returns - ------- - zs : 1-d ndarray - Odd length symmetric z-series, ordered from low to high. - - """ - n = cs.size - zs = np.zeros(2*n-1, dtype=cs.dtype) - zs[n-1:] = cs/2 - return zs + zs[::-1] - -def _zseries_to_cseries(zs) : - """Covert z-series to a Chebyshev series. - - Covert a z series to the equivalent Chebyshev series. The result is - never an empty array. The dtype of the return is the same as that of - the input. No checks are run on the arguments as this routine is for - internal use. - - Parameters - ---------- - zs : 1-d ndarray - Odd length symmetric z-series, ordered from low to high. - - Returns - ------- - cs : 1-d ndarray - Chebyshev coefficients, ordered from low to high. - - """ - n = (zs.size + 1)//2 - cs = zs[n-1:].copy() - cs[1:n] *= 2 - return cs - -def _zseries_mul(z1, z2) : - """Multiply two z-series. - - Multiply two z-series to produce a z-series. - - Parameters - ---------- - z1, z2 : 1-d ndarray - The arrays must be 1-d but this is not checked. - - Returns - ------- - product : 1-d ndarray - The product z-series. - - Notes - ----- - This is simply convolution. If symmetic/anti-symmetric z-series are - denoted by S/A then the following rules apply: - - S*S, A*A -> S - S*A, A*S -> A - - """ - return np.convolve(z1, z2) - -def _zseries_div(z1, z2) : - """Divide the first z-series by the second. - - Divide `z1` by `z2` and return the quotient and remainder as z-series. - Warning: this implementation only applies when both z1 and z2 have the - same symmetry, which is sufficient for present purposes. - - Parameters - ---------- - z1, z2 : 1-d ndarray - The arrays must be 1-d and have the same symmetry, but this is not - checked. - - Returns - ------- - - (quotient, remainder) : 1-d ndarrays - Quotient and remainder as z-series. - - Notes - ----- - This is not the same as polynomial division on account of the desired form - of the remainder. If symmetic/anti-symmetric z-series are denoted by S/A - then the following rules apply: - - S/S -> S,S - A/A -> S,A - - The restriction to types of the same symmetry could be fixed but seems like - uneeded generality. There is no natural form for the remainder in the case - where there is no symmetry. - - """ - z1 = z1.copy() - z2 = z2.copy() - len1 = len(z1) - len2 = len(z2) - if len2 == 1 : - z1 /= z2 - return z1, z1[:1]*0 - elif len1 < len2 : - return z1[:1]*0, z1 - else : - dlen = len1 - len2 - scl = z2[0] - z2 /= scl - quo = np.empty(dlen + 1, dtype=z1.dtype) - i = 0 - j = dlen - while i < j : - r = z1[i] - quo[i] = z1[i] - quo[dlen - i] = r - tmp = r*z2 - z1[i:i+len2] -= tmp - z1[j:j+len2] -= tmp - i += 1 - j -= 1 - r = z1[i] - quo[i] = r - tmp = r*z2 - z1[i:i+len2] -= tmp - quo /= scl - rem = z1[i+1:i-1+len2].copy() - return quo, rem - -def _zseries_der(zs) : - """Differentiate a z-series. - - The derivative is with respect to x, not z. This is achieved using the - chain rule and the value of dx/dz given in the module notes. - - Parameters - ---------- - zs : z-series - The z-series to differentiate. - - Returns - ------- - derivative : z-series - The derivative - - Notes - ----- - The zseries for x (ns) has been multiplied by two in order to avoid - using floats that are incompatible with Decimal and likely other - specialized scalar types. This scaling has been compensated by - multiplying the value of zs by two also so that the two cancels in the - division. - - """ - n = len(zs)//2 - ns = np.array([-1, 0, 1], dtype=zs.dtype) - zs *= np.arange(-n, n+1)*2 - d, r = _zseries_div(zs, ns) - return d - -def _zseries_int(zs) : - """Integrate a z-series. - - The integral is with respect to x, not z. This is achieved by a change - of variable using dx/dz given in the module notes. - - Parameters - ---------- - zs : z-series - The z-series to integrate - - Returns - ------- - integral : z-series - The indefinite integral - - Notes - ----- - The zseries for x (ns) has been multiplied by two in order to avoid - using floats that are incompatible with Decimal and likely other - specialized scalar types. This scaling has been compensated by - dividing the resulting zs by two. - - """ - n = 1 + len(zs)//2 - ns = np.array([-1, 0, 1], dtype=zs.dtype) - zs = _zseries_mul(zs, ns) - div = np.arange(-n, n+1)*2 - zs[:n] /= div[:n] - zs[n+1:] /= div[n+1:] - zs[n] = 0 - return zs - -# -# Chebyshev series functions -# - - -def poly2cheb(pol) : - """ - Convert a polynomial to a Chebyshev series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Chebyshev series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-d array containing the polynomial coefficients - - Returns - ------- - cs : ndarray - 1-d array containing the coefficients of the equivalent Chebyshev - series. - - See Also - -------- - cheb2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> p = P.Polynomial(range(4)) - >>> p - Polynomial([ 0., 1., 2., 3.], [-1., 1.]) - >>> c = p.convert(kind=P.Chebyshev) - >>> c - Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.]) - >>> P.poly2cheb(range(4)) - array([ 1. , 3.25, 1. , 0.75]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1) : - res = chebadd(chebmulx(res), pol[i]) - return res - - -def cheb2poly(cs) : - """ - Convert a Chebyshev series to a polynomial. - - Convert an array representing the coefficients of a Chebyshev series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - cs : array_like - 1-d array containing the Chebyshev series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-d array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2cheb - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> c = P.Chebyshev(range(4)) - >>> c - Chebyshev([ 0., 1., 2., 3.], [-1., 1.]) - >>> p = c.convert(kind=P.Polynomial) - >>> p - Polynomial([ -2., -8., 4., 12.], [-1., 1.]) - >>> P.cheb2poly(range(4)) - array([ -2., -8., 4., 12.]) - - """ - from polynomial import polyadd, polysub, polymulx - - [cs] = pu.as_series([cs]) - n = len(cs) - if n < 3: - return cs - else: - c0 = cs[-2] - c1 = cs[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1) : - tmp = c0 - c0 = polysub(cs[i - 2], c1) - c1 = polyadd(tmp, polymulx(c1)*2) - return polyadd(c0, polymulx(c1)) - - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Chebyshev default domain. -chebdomain = np.array([-1,1]) - -# Chebyshev coefficients representing zero. -chebzero = np.array([0]) - -# Chebyshev coefficients representing one. -chebone = np.array([1]) - -# Chebyshev coefficients representing the identity x. -chebx = np.array([0,1]) - -def chebline(off, scl) : - """ - Chebyshev series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Chebyshev series for - ``off + scl*x``. - - See Also - -------- - polyline - - Examples - -------- - >>> import numpy.polynomial.chebyshev as C - >>> C.chebline(3,2) - array([3, 2]) - >>> C.chebval(-3, C.chebline(3,2)) # should be -3 - -3.0 - - """ - if scl != 0 : - return np.array([off,scl]) - else : - return np.array([off]) - -def chebfromroots(roots) : - """ - Generate a Chebyshev series with the given roots. - - Return the array of coefficients for the C-series whose roots (a.k.a. - "zeros") are given by *roots*. The returned array of coefficients is - ordered from lowest order "term" to highest, and zeros of multiplicity - greater than one must be included in *roots* a number of times equal - to their multiplicity (e.g., if `2` is a root of multiplicity three, - then [2,2,2] must be in *roots*). - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-d array of the C-series' coefficients, ordered from low to - high. If all roots are real, ``out.dtype`` is a float type; - otherwise, ``out.dtype`` is a complex type, even if all the - coefficients in the result are real (see Examples below). - - See Also - -------- - polyfromroots - - Notes - ----- - What is returned are the :math:`c_i` such that: - - .. math:: - - \\sum_{i=0}^{n} c_i*T_i(x) = \\prod_{i=0}^{n} (x - roots[i]) - - where ``n == len(roots)`` and :math:`T_i(x)` is the `i`-th Chebyshev - (basis) polynomial over the domain `[-1,1]`. Note that, unlike - `polyfromroots`, due to the nature of the C-series basis set, the - above identity *does not* imply :math:`c_n = 1` identically (see - Examples). - - Examples - -------- - >>> import numpy.polynomial.chebyshev as C - >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis - array([ 0. , -0.25, 0. , 0.25]) - >>> j = complex(0,1) - >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis - array([ 1.5+0.j, 0.0+0.j, 0.5+0.j]) - - """ - if len(roots) == 0 : - return np.ones(1) - else : - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [chebline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [chebmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = chebmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def chebadd(c1, c2): - """ - Add one Chebyshev series to another. - - Returns the sum of two Chebyshev series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Chebyshev series of their sum. - - See Also - -------- - chebsub, chebmul, chebdiv, chebpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Chebyshev series - is a Chebyshev series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebadd(c1,c2) - array([ 4., 4., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] += c2 - ret = c1 - else : - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def chebsub(c1, c2): - """ - Subtract one Chebyshev series from another. - - Returns the difference of two Chebyshev series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Chebyshev series coefficients representing their difference. - - See Also - -------- - chebadd, chebmul, chebdiv, chebpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Chebyshev - series is a Chebyshev series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebsub(c1,c2) - array([-2., 0., 2.]) - >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) - array([ 2., 0., -2.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] -= c2 - ret = c1 - else : - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def chebmulx(cs): - """Multiply a Chebyshev series by x. - - Multiply the polynomial `cs` by x, where x is the independent - variable. - - - Parameters - ---------- - cs : array_like - 1-d array of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - # The zero series needs special treatment - if len(cs) == 1 and cs[0] == 0: - return cs - - prd = np.empty(len(cs) + 1, dtype=cs.dtype) - prd[0] = cs[0]*0 - prd[1] = cs[0] - if len(cs) > 1: - tmp = cs[1:]/2 - prd[2:] = tmp - prd[0:-2] += tmp - return prd - - -def chebmul(c1, c2): - """ - Multiply one Chebyshev series by another. - - Returns the product of two Chebyshev series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Chebyshev series coefficients representing their product. - - See Also - -------- - chebadd, chebsub, chebdiv, chebpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Chebyshev polynomial basis set. Thus, to express - the product as a C-series, it is typically necessary to "re-project" - the product onto said basis set, which typically produces - "un-intuitive" (but correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebmul(c1,c2) # multiplication requires "reprojection" - array([ 6.5, 12. , 12. , 4. , 1.5]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - z1 = _cseries_to_zseries(c1) - z2 = _cseries_to_zseries(c2) - prd = _zseries_mul(z1, z2) - ret = _zseries_to_cseries(prd) - return pu.trimseq(ret) - - -def chebdiv(c1, c2): - """ - Divide one Chebyshev series by another. - - Returns the quotient-with-remainder of two Chebyshev series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Chebyshev series coefficients representing the quotient and - remainder. - - See Also - -------- - chebadd, chebsub, chebmul, chebpow - - Notes - ----- - In general, the (polynomial) division of one C-series by another - results in quotient and remainder terms that are not in the Chebyshev - polynomial basis set. Thus, to express these results as C-series, it - is typically necessary to "re-project" the results onto said basis - set, which typically produces "un-intuitive" (but correct) results; - see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not - (array([ 3.]), array([-8., -4.])) - >>> c2 = (0,1,2,3) - >>> C.chebdiv(c2,c1) # neither "intuitive" - (array([ 0., 2.]), array([-2., -4.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2 : - return c1[:1]*0, c1 - elif lc2 == 1 : - return c1/c2[-1], c1[:1]*0 - else : - z1 = _cseries_to_zseries(c1) - z2 = _cseries_to_zseries(c2) - quo, rem = _zseries_div(z1, z2) - quo = pu.trimseq(_zseries_to_cseries(quo)) - rem = pu.trimseq(_zseries_to_cseries(rem)) - return quo, rem - -def chebpow(cs, pow, maxpower=16) : - """Raise a Chebyshev series to a power. - - Returns the Chebyshev series `cs` raised to the power `pow`. The - arguement `cs` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` - - Parameters - ---------- - cs : array_like - 1d array of chebyshev series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to umanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Chebyshev series of power. - - See Also - -------- - chebadd, chebsub, chebmul, chebdiv - - Examples - -------- - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - power = int(pow) - if power != pow or power < 0 : - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : - raise ValueError("Power is too large") - elif power == 0 : - return np.array([1], dtype=cs.dtype) - elif power == 1 : - return cs - else : - # This can be made more efficient by using powers of two - # in the usual way. - zs = _cseries_to_zseries(cs) - prd = zs - for i in range(2, power + 1) : - prd = np.convolve(prd, zs) - return _zseries_to_cseries(prd) - -def chebder(cs, m=1, scl=1) : - """ - Differentiate a Chebyshev series. - - Returns the series `cs` differentiated `m` times. At each iteration the - result is multiplied by `scl` (the scaling factor is for use in a linear - change of variable). The argument `cs` is the sequence of coefficients - from lowest order "term" to highest, e.g., [1,2,3] represents the series - ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - cs: array_like - 1-d array of Chebyshev series coefficients ordered from low to high. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - - Returns - ------- - der : ndarray - Chebyshev series of the derivative. - - See Also - -------- - chebint - - Notes - ----- - In general, the result of differentiating a C-series needs to be - "re-projected" onto the C-series basis set. Thus, typically, the - result of this function is "un-intuitive," albeit correct; see Examples - section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> cs = (1,2,3,4) - >>> C.chebder(cs) - array([ 14., 12., 24.]) - >>> C.chebder(cs,3) - array([ 96.]) - >>> C.chebder(cs,scl=-1) - array([-14., -12., -24.]) - >>> C.chebder(cs,2,-1) - array([ 12., 96.]) - - """ - cnt = int(m) - - if cnt != m: - raise ValueError, "The order of derivation must be integer" - if cnt < 0 : - raise ValueError, "The order of derivation must be non-negative" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - elif cnt >= len(cs): - return cs[:1]*0 - else : - zs = _cseries_to_zseries(cs) - for i in range(cnt): - zs = _zseries_der(zs)*scl - return _zseries_to_cseries(zs) - - -def chebint(cs, m=1, k=[], lbnd=0, scl=1): - """ - Integrate a Chebyshev series. - - Returns, as a C-series, the input C-series `cs`, integrated `m` times - from `lbnd` to `x`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `cs` is a sequence of - coefficients, from lowest order C-series "term" to highest, e.g., - [1,2,3] represents the series :math:`T_0(x) + 2T_1(x) + 3T_2(x)`. - - Parameters - ---------- - cs : array_like - 1-d array of C-series coefficients, ordered from low to high. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at zero - is the first value in the list, the value of the second integral - at zero is the second value, etc. If ``k == []`` (the default), - all constants are set to zero. If ``m == 1``, a single scalar can - be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - - Returns - ------- - S : ndarray - C-series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - chebder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "re-projected" onto the C-series basis set. Thus, typically, - the result of this function is "un-intuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> cs = (1,2,3) - >>> C.chebint(cs) - array([ 0.5, -0.5, 0.5, 0.5]) - >>> C.chebint(cs,3) - array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, - 0.00625 ]) - >>> C.chebint(cs, k=3) - array([ 3.5, -0.5, 0.5, 0.5]) - >>> C.chebint(cs,lbnd=-2) - array([ 8.5, -0.5, 0.5, 0.5]) - >>> C.chebint(cs,scl=-2) - array([-1., 1., -1., -1.]) - - """ - cnt = int(m) - if not np.iterable(k): - k = [k] - - if cnt != m: - raise ValueError, "The order of integration must be integer" - if cnt < 0 : - raise ValueError, "The order of integration must be non-negative" - if len(k) > cnt : - raise ValueError, "Too many integration constants" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : - n = len(cs) - cs *= scl - if n == 1 and cs[0] == 0: - cs[0] += k[i] - else: - zs = _cseries_to_zseries(cs) - zs = _zseries_int(zs) - cs = _zseries_to_cseries(zs) - cs[0] += k[i] - chebval(lbnd, cs) - return cs - -def chebval(x, cs): - """Evaluate a Chebyshev series. - - If `cs` is of length `n`, this function returns : - - ``p(x) = cs[0]*T_0(x) + cs[1]*T_1(x) + ... + cs[n-1]*T_{n-1}(x)`` - - If x is a sequence or array then p(x) will have the same shape as x. - If r is a ring_like object that supports multiplication and addition - by the values in `cs`, then an object of the same type is returned. - - Parameters - ---------- - x : array_like, ring_like - Array of numbers or objects that support multiplication and - addition with themselves and with the elements of `cs`. - cs : array_like - 1-d array of Chebyshev coefficients ordered from low to high. - - Returns - ------- - values : ndarray, ring_like - If the return is an ndarray then it has the same shape as `x`. - - See Also - -------- - chebfit - - Examples - -------- - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if isinstance(x, tuple) or isinstance(x, list) : - x = np.asarray(x) - - if len(cs) == 1 : - c0 = cs[0] - c1 = 0 - elif len(cs) == 2 : - c0 = cs[0] - c1 = cs[1] - else : - x2 = 2*x - c0 = cs[-2] - c1 = cs[-1] - for i in range(3, len(cs) + 1) : - tmp = c0 - c0 = cs[-i] - c1 - c1 = tmp + c1*x2 - return c0 + c1*x - -def chebvander(x, deg) : - """Vandermonde matrix of given degree. - - Returns the Vandermonde matrix of degree `deg` and sample points `x`. - This isn't a true Vandermonde matrix because `x` can be an arbitrary - ndarray and the Chebyshev polynomials aren't powers. If ``V`` is the - returned matrix and `x` is a 2d array, then the elements of ``V`` are - ``V[i,j,k] = T_k(x[i,j])``, where ``T_k`` is the Chebyshev polynomial - of degree ``k``. - - Parameters - ---------- - x : array_like - Array of points. The values are converted to double or complex - doubles. If x is scalar it is converted to a 1D array. - deg : integer - Degree of the resulting matrix. - - Returns - ------- - vander : Vandermonde matrix. - The shape of the returned matrix is ``x.shape + (deg+1,)``. The last - index is the degree. - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype) - # Use forward recursion to generate the entries. - v[0] = x*0 + 1 - if ideg > 0 : - x2 = 2*x - v[1] = x - for i in range(2, ideg + 1) : - v[i] = v[i-1]*x2 - v[i-2] - return np.rollaxis(v, 0, v.ndim) - - -def chebfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Chebyshev series to data. - - Return the coefficients of a Legendre series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting series - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Chebyshev coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : present when `full` = True - Residuals of the least-squares fit, the effective rank of the - scaled Vandermonde matrix and its singular values, and the - specified value of `rcond`. For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - polyfit, legfit, lagfit, hermfit, hermefit - chebval : Evaluates a Chebyshev series. - chebvander : Vandermonde matrix of Chebyshev series. - chebweight : Chebyshev weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Chebyshev series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where :math:`w_j` are the weights. This problem is solved by setting up - as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coeficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Chebyshev series are usually better conditioned than fits - using power series, but much can depend on the distribution of the - sample points and the smoothness of the data. If the quality of the fit - is inadequate splines may be a good alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0 : - raise ValueError, "expected deg >= 0" - if x.ndim != 1: - raise TypeError, "expected 1D vector for x" - if x.size == 0: - raise TypeError, "expected non-empty vector for x" - if y.ndim < 1 or y.ndim > 2 : - raise TypeError, "expected 1D or 2D array for y" - if len(x) != len(y): - raise TypeError, "expected x and y to have same length" - - # set up the least squares matrices - lhs = chebvander(x, deg) - rhs = y - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError, "expected 1D vector for w" - if len(x) != len(w): - raise TypeError, "expected x and w to have same length" - # apply weights - if rhs.ndim == 2: - lhs *= w[:, np.newaxis] - rhs *= w[:, np.newaxis] - else: - lhs *= w[:, np.newaxis] - rhs *= w - - # set rcond - if rcond is None : - rcond = len(x)*np.finfo(x.dtype).eps - - # scale the design matrix and solve the least squares equation - scl = np.sqrt((lhs*lhs).sum(0)) - c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full : - return c, [resids, rank, s, rcond] - else : - return c - - -def chebcompanion(cs): - """Return the scaled companion matrix of cs. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `cs` represents a single Chebyshev polynomial. This - provides better eigenvalue estimates than the unscaled case and in the - single polynomial case the eigenvalues are guaranteed to be real if - np.eigvalsh is used to obtain them. - - Parameters - ---------- - cs : array_like - 1-d array of Legendre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(cs) == 2: - return np.array(-cs[0]/cs[1]) - - n = len(cs) - 1 - mat = np.zeros((n, n), dtype=cs.dtype) - scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[0] = np.sqrt(.5) - top[1:] = 1/2 - bot[...] = top - mat[:,-1] -= (cs[:-1]/cs[-1])*(scl/scl[-1])*.5 - return mat - - -def chebroots(cs): - """ - Compute the roots of a Chebyshev series. - - Return the roots (a.k.a "zeros") of the C-series represented by `cs`, - which is the sequence of the C-series' coefficients from lowest order - "term" to highest, e.g., [1,2,3] represents the C-series - ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - cs : array_like - 1-d array of C-series coefficients ordered from low to high. - - Returns - ------- - out : ndarray - Array of the roots. If all the roots are real, then so is the - dtype of ``out``; otherwise, ``out``'s dtype is complex. - - See Also - -------- - polyroots - - Notes - ----- - Algorithm(s) used: - - Remember: because the C-series basis set is different from the - "standard" basis set, the results of this function *may* not be what - one is expecting. - - Examples - -------- - >>> import numpy.polynomial.chebyshev as cheb - >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots - array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2: - return np.array([], dtype=cs.dtype) - if len(cs) == 2: - return np.array([-cs[0]/cs[1]]) - - m = chebcompanion(cs) - r = la.eigvals(m) - r.sort() - return r - - -def chebpts1(npts): - """Chebyshev points of the first kind. - - Chebyshev points of the first kind are the set ``{cos(x_k)}``, - where ``x_k = pi*(k + .5)/npts`` for k in ``range(npts}``. - - Parameters - ---------- - npts : int - Number of sample points desired. - - Returns - ------- - pts : ndarray - The Chebyshev points of the second kind. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - _npts = int(npts) - if _npts != npts: - raise ValueError("npts must be integer") - if _npts < 1: - raise ValueError("npts must be >= 1") - - x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts) - return np.cos(x) - - -def chebpts2(npts): - """Chebyshev points of the second kind. - - Chebyshev points of the second kind are the set ``{cos(x_k)}``, - where ``x_k = pi*/(npts - 1)`` for k in ``range(npts}``. - - Parameters - ---------- - npts : int - Number of sample points desired. - - Returns - ------- - pts : ndarray - The Chebyshev points of the second kind. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - _npts = int(npts) - if _npts != npts: - raise ValueError("npts must be integer") - if _npts < 2: - raise ValueError("npts must be >= 2") - - x = np.linspace(-np.pi, 0, _npts) - return np.cos(x) - - -# -# Chebyshev series class -# - -exec polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]') diff --git a/numpy-1.6.2/numpy/polynomial/hermite.py b/numpy-1.6.2/numpy/polynomial/hermite.py deleted file mode 100644 index 199b47e151..0000000000 --- a/numpy-1.6.2/numpy/polynomial/hermite.py +++ /dev/null @@ -1,1194 +0,0 @@ -""" -Objects for dealing with Hermite series. - -This module provides a number of objects (mostly functions) useful for -dealing with Hermite series, including a `Hermite` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `hermdomain` -- Hermite series default domain, [-1,1]. -- `hermzero` -- Hermite series that evaluates identically to 0. -- `hermone` -- Hermite series that evaluates identically to 1. -- `hermx` -- Hermite series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. -- `hermadd` -- add two Hermite series. -- `hermsub` -- subtract one Hermite series from another. -- `hermmul` -- multiply two Hermite series. -- `hermdiv` -- divide one Hermite series by another. -- `hermval` -- evaluate a Hermite series at given points. - -Calculus --------- -- `hermder` -- differentiate a Hermite series. -- `hermint` -- integrate a Hermite series. - -Misc Functions --------------- -- `hermfromroots` -- create a Hermite series with specified roots. -- `hermroots` -- find the roots of a Hermite series. -- `hermvander` -- Vandermonde-like matrix for Hermite polynomials. -- `hermfit` -- least-squares fit returning a Hermite series. -- `hermtrim` -- trim leading coefficients from a Hermite series. -- `hermline` -- Hermite series of given straight line. -- `herm2poly` -- convert a Hermite series to a polynomial. -- `poly2herm` -- convert a polynomial to a Hermite series. - -Classes -------- -- `Hermite` -- A Hermite series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division - -__all__ = ['hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', - 'hermadd', 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermval', - 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', - 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite'] - -import numpy as np -import numpy.linalg as la -import polyutils as pu -import warnings -from polytemplate import polytemplate - -hermtrim = pu.trimcoef - -def poly2herm(pol) : - """ - poly2herm(pol) - - Convert a polynomial to a Hermite series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Hermite series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-d array containing the polynomial coefficients - - Returns - ------- - cs : ndarray - 1-d array containing the coefficients of the equivalent Hermite - series. - - See Also - -------- - herm2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import poly2herme - >>> poly2herm(np.arange(4)) - array([ 1. , 2.75 , 0.5 , 0.375]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1) : - res = hermadd(hermmulx(res), pol[i]) - return res - - -def herm2poly(cs) : - """ - Convert a Hermite series to a polynomial. - - Convert an array representing the coefficients of a Hermite series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - cs : array_like - 1-d array containing the Hermite series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-d array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2herm - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite import herm2poly - >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) - array([ 0., 1., 2., 3.]) - - """ - from polynomial import polyadd, polysub, polymulx - - [cs] = pu.as_series([cs]) - n = len(cs) - if n == 1: - return cs - if n == 2: - cs[1] *= 2 - return cs - else: - c0 = cs[-2] - c1 = cs[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1) : - tmp = c0 - c0 = polysub(cs[i - 2], c1*(2*(i - 1))) - c1 = polyadd(tmp, polymulx(c1)*2) - return polyadd(c0, polymulx(c1)*2) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Hermite -hermdomain = np.array([-1,1]) - -# Hermite coefficients representing zero. -hermzero = np.array([0]) - -# Hermite coefficients representing one. -hermone = np.array([1]) - -# Hermite coefficients representing the identity x. -hermx = np.array([0, 1/2]) - - -def hermline(off, scl) : - """ - Hermite series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Hermite series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.hermite import hermline, hermval - >>> hermval(0,hermline(3, 2)) - 3.0 - >>> hermval(1,hermline(3, 2)) - 5.0 - - """ - if scl != 0 : - return np.array([off,scl/2]) - else : - return np.array([off]) - - -def hermfromroots(roots) : - """ - Generate a Hermite series with the given roots. - - Return the array of coefficients for the P-series whose roots (a.k.a. - "zeros") are given by *roots*. The returned array of coefficients is - ordered from lowest order "term" to highest, and zeros of multiplicity - greater than one must be included in *roots* a number of times equal - to their multiplicity (e.g., if `2` is a root of multiplicity three, - then [2,2,2] must be in *roots*). - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-d array of the Hermite series coefficients, ordered from low to - high. If all roots are real, ``out.dtype`` is a float type; - otherwise, ``out.dtype`` is a complex type, even if all the - coefficients in the result are real (see Examples below). - - See Also - -------- - polyfromroots, chebfromroots - - Notes - ----- - What is returned are the :math:`c_i` such that: - - .. math:: - - \\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i]) - - where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Hermite - (basis) polynomial over the domain `[-1,1]`. Note that, unlike - `polyfromroots`, due to the nature of the Hermite basis set, the - above identity *does not* imply :math:`c_n = 1` identically (see - Examples). - - Examples - -------- - >>> from numpy.polynomial.hermite import hermfromroots, hermval - >>> coef = hermfromroots((-1, 0, 1)) - >>> hermval((-1, 0, 1), coef) - array([ 0., 0., 0.]) - >>> coef = hermfromroots((-1j, 1j)) - >>> hermval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) - - """ - if len(roots) == 0 : - return np.ones(1) - else : - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [hermline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [hermmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = hermmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def hermadd(c1, c2): - """ - Add one Hermite series to another. - - Returns the sum of two Hermite series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Hermite series of their sum. - - See Also - -------- - hermsub, hermmul, hermdiv, hermpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Hermite series - is a Hermite series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite import hermadd - >>> hermadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] += c2 - ret = c1 - else : - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def hermsub(c1, c2): - """ - Subtract one Hermite series from another. - - Returns the difference of two Hermite series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their difference. - - See Also - -------- - hermadd, hermmul, hermdiv, hermpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Hermite - series is a Hermite series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite import hermsub - >>> hermsub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] -= c2 - ret = c1 - else : - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def hermmulx(cs): - """Multiply a Hermite series by x. - - Multiply the Hermite series `cs` by x, where x is the independent - variable. - - - Parameters - ---------- - cs : array_like - 1-d array of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Hermite - polynomials in the form - - .. math:: - - xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) - - Examples - -------- - >>> from numpy.polynomial.hermite import hermmulx - >>> hermmulx([1, 2, 3]) - array([ 2. , 6.5, 1. , 1.5]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - # The zero series needs special treatment - if len(cs) == 1 and cs[0] == 0: - return cs - - prd = np.empty(len(cs) + 1, dtype=cs.dtype) - prd[0] = cs[0]*0 - prd[1] = cs[0]/2 - for i in range(1, len(cs)): - prd[i + 1] = cs[i]/2 - prd[i - 1] += cs[i]*i - return prd - - -def hermmul(c1, c2): - """ - Multiply one Hermite series by another. - - Returns the product of two Hermite series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their product. - - See Also - -------- - hermadd, hermsub, hermdiv, hermpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Hermite polynomial basis set. Thus, to express - the product as a Hermite series, it is necessary to "re-project" the - product onto said basis set, which may produce "un-intuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermmul - >>> hermmul([1, 2, 3], [0, 1, 2]) - array([ 52., 29., 52., 7., 6.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - cs = c2 - xs = c1 - else: - cs = c1 - xs = c2 - - if len(cs) == 1: - c0 = cs[0]*xs - c1 = 0 - elif len(cs) == 2: - c0 = cs[0]*xs - c1 = cs[1]*xs - else : - nd = len(cs) - c0 = cs[-2]*xs - c1 = cs[-1]*xs - for i in range(3, len(cs) + 1) : - tmp = c0 - nd = nd - 1 - c0 = hermsub(cs[-i]*xs, c1*(2*(nd - 1))) - c1 = hermadd(tmp, hermmulx(c1)*2) - return hermadd(c0, hermmulx(c1)*2) - - -def hermdiv(c1, c2): - """ - Divide one Hermite series by another. - - Returns the quotient-with-remainder of two Hermite series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Hermite series coefficients representing the quotient and - remainder. - - See Also - -------- - hermadd, hermsub, hermmul, hermpow - - Notes - ----- - In general, the (polynomial) division of one Hermite series by another - results in quotient and remainder terms that are not in the Hermite - polynomial basis set. Thus, to express these results as a Hermite - series, it is necessary to "re-project" the results onto the Hermite - basis set, which may produce "un-intuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermdiv - >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) - >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 2., 2.])) - >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 1.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2 : - return c1[:1]*0, c1 - elif lc2 == 1 : - return c1/c2[-1], c1[:1]*0 - else : - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = hermmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) - - -def hermpow(cs, pow, maxpower=16) : - """Raise a Hermite series to a power. - - Returns the Hermite series `cs` raised to the power `pow`. The - arguement `cs` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - cs : array_like - 1d array of Hermite series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to umanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Hermite series of power. - - See Also - -------- - hermadd, hermsub, hermmul, hermdiv - - Examples - -------- - >>> from numpy.polynomial.hermite import hermpow - >>> hermpow([1, 2, 3], 2) - array([ 81., 52., 82., 12., 9.]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - power = int(pow) - if power != pow or power < 0 : - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : - raise ValueError("Power is too large") - elif power == 0 : - return np.array([1], dtype=cs.dtype) - elif power == 1 : - return cs - else : - # This can be made more efficient by using powers of two - # in the usual way. - prd = cs - for i in range(2, power + 1) : - prd = hermmul(prd, cs) - return prd - - -def hermder(cs, m=1, scl=1) : - """ - Differentiate a Hermite series. - - Returns the series `cs` differentiated `m` times. At each iteration the - result is multiplied by `scl` (the scaling factor is for use in a linear - change of variable). The argument `cs` is the sequence of coefficients - from lowest order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - cs: array_like - 1-d array of Hermite series coefficients ordered from low to high. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - - Returns - ------- - der : ndarray - Hermite series of the derivative. - - See Also - -------- - hermint - - Notes - ----- - In general, the result of differentiating a Hermite series does not - resemble the same operation on a power series. Thus the result of this - function may be "un-intuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermder - >>> hermder([ 1. , 0.5, 0.5, 0.5]) - array([ 1., 2., 3.]) - >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) - array([ 1., 2., 3.]) - - """ - cnt = int(m) - - if cnt != m: - raise ValueError, "The order of derivation must be integer" - if cnt < 0 : - raise ValueError, "The order of derivation must be non-negative" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - elif cnt >= len(cs): - return cs[:1]*0 - else : - for i in range(cnt): - n = len(cs) - 1 - cs *= scl - der = np.empty(n, dtype=cs.dtype) - for j in range(n, 0, -1): - der[j - 1] = (2*j)*cs[j] - cs = der - return cs - - -def hermint(cs, m=1, k=[], lbnd=0, scl=1): - """ - Integrate a Hermite series. - - Returns a Hermite series that is the Hermite series `cs`, integrated - `m` times from `lbnd` to `x`. At each iteration the resulting series - is **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `cs` is a sequence of - coefficients, from lowest order Hermite series "term" to highest, - e.g., [1,2,3] represents the series :math:`P_0(x) + 2P_1(x) + 3P_2(x)`. - - Parameters - ---------- - cs : array_like - 1-d array of Hermite series coefficients, ordered from low to high. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - - Returns - ------- - S : ndarray - Hermite series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - hermder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "re-projected" onto the C-series basis set. Thus, typically, - the result of this function is "un-intuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermint - >>> hermint([1,2,3]) # integrate once, value 0 at 0. - array([ 1. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 - array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) - >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. - array([ 2. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 - array([-2. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) - array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) - - """ - cnt = int(m) - if np.isscalar(k) : - k = [k] - - if cnt != m: - raise ValueError, "The order of integration must be integer" - if cnt < 0 : - raise ValueError, "The order of integration must be non-negative" - if len(k) > cnt : - raise ValueError, "Too many integration constants" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : - n = len(cs) - cs *= scl - if n == 1 and cs[0] == 0: - cs[0] += k[i] - else: - tmp = np.empty(n + 1, dtype=cs.dtype) - tmp[0] = cs[0]*0 - tmp[1] = cs[0]/2 - for j in range(1, n): - tmp[j + 1] = cs[j]/(2*(j + 1)) - tmp[0] += k[i] - hermval(lbnd, tmp) - cs = tmp - return cs - - -def hermval(x, cs): - """Evaluate a Hermite series. - - If `cs` is of length `n`, this function returns : - - ``p(x) = cs[0]*P_0(x) + cs[1]*P_1(x) + ... + cs[n-1]*P_{n-1}(x)`` - - If x is a sequence or array then p(x) will have the same shape as x. - If r is a ring_like object that supports multiplication and addition - by the values in `cs`, then an object of the same type is returned. - - Parameters - ---------- - x : array_like, ring_like - Array of numbers or objects that support multiplication and - addition with themselves and with the elements of `cs`. - cs : array_like - 1-d array of Hermite coefficients ordered from low to high. - - Returns - ------- - values : ndarray, ring_like - If the return is an ndarray then it has the same shape as `x`. - - See Also - -------- - hermfit - - Examples - -------- - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermval - >>> coef = [1,2,3] - >>> hermval(1, coef) - 11.0 - >>> hermval([[1,2],[3,4]], coef) - array([[ 11., 51.], - [ 115., 203.]]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if isinstance(x, tuple) or isinstance(x, list) : - x = np.asarray(x) - - x2 = x*2 - if len(cs) == 1 : - c0 = cs[0] - c1 = 0 - elif len(cs) == 2 : - c0 = cs[0] - c1 = cs[1] - else : - nd = len(cs) - c0 = cs[-2] - c1 = cs[-1] - for i in range(3, len(cs) + 1) : - tmp = c0 - nd = nd - 1 - c0 = cs[-i] - c1*(2*(nd - 1)) - c1 = tmp + c1*x2 - return c0 + c1*x2 - - -def hermvander(x, deg) : - """Vandermonde matrix of given degree. - - Returns the Vandermonde matrix of degree `deg` and sample points `x`. - This isn't a true Vandermonde matrix because `x` can be an arbitrary - ndarray and the Hermite polynomials aren't powers. If ``V`` is the - returned matrix and `x` is a 2d array, then the elements of ``V`` are - ``V[i,j,k] = P_k(x[i,j])``, where ``P_k`` is the Hermite polynomial - of degree ``k``. - - Parameters - ---------- - x : array_like - Array of points. The values are converted to double or complex - doubles. If x is scalar it is converted to a 1D array. - deg : integer - Degree of the resulting matrix. - - Returns - ------- - vander : Vandermonde matrix. - The shape of the returned matrix is ``x.shape + (deg+1,)``. The last - index is the degree. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermvander - >>> x = np.array([-1, 0, 1]) - >>> hermvander(x, 3) - array([[ 1., -2., 2., 4.], - [ 1., 0., -2., -0.], - [ 1., 2., 2., -4.]]) - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype) - v[0] = x*0 + 1 - if ideg > 0 : - x2 = x*2 - v[1] = x2 - for i in range(2, ideg + 1) : - v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) - return np.rollaxis(v, 0, v.ndim) - - -def hermfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Hermite series to data. - - Return the coefficients of a Hermite series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Hermite coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : present when `full` = True - Residuals of the least-squares fit, the effective rank of the - scaled Vandermonde matrix and its singular values, and the - specified value of `rcond`. For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, legfit, lagfit, polyfit, hermefit - hermval : Evaluates a Hermite series. - hermvander : Vandermonde matrix of Hermite series. - hermweight : Hermite weight function - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Hermite series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coeficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Hermite series are probably most useful when the data can be - approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite - weight. In that case the wieght ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `hermweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.hermite import hermfit, hermval - >>> x = np.linspace(-10, 10) - >>> err = np.random.randn(len(x))/10 - >>> y = hermval(x, [1, 2, 3]) + err - >>> hermfit(x, y, 2) - array([ 0.97902637, 1.99849131, 3.00006 ]) - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0 : - raise ValueError, "expected deg >= 0" - if x.ndim != 1: - raise TypeError, "expected 1D vector for x" - if x.size == 0: - raise TypeError, "expected non-empty vector for x" - if y.ndim < 1 or y.ndim > 2 : - raise TypeError, "expected 1D or 2D array for y" - if len(x) != len(y): - raise TypeError, "expected x and y to have same length" - - # set up the least squares matrices - lhs = hermvander(x, deg) - rhs = y - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError, "expected 1D vector for w" - if len(x) != len(w): - raise TypeError, "expected x and w to have same length" - # apply weights - if rhs.ndim == 2: - lhs *= w[:, np.newaxis] - rhs *= w[:, np.newaxis] - else: - lhs *= w[:, np.newaxis] - rhs *= w - - # set rcond - if rcond is None : - rcond = len(x)*np.finfo(x.dtype).eps - - # scale the design matrix and solve the least squares equation - scl = np.sqrt((lhs*lhs).sum(0)) - c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full : - return c, [resids, rank, s, rcond] - else : - return c - - -def hermcompanion(cs): - """Return the scaled companion matrix of cs. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `cs` represents a single Hermite polynomial. This - provides better eigenvalue estimates than the unscaled case and in the - single polynomial case the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - cs : array_like - 1-d array of Legendre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - """ - accprod = np.multiply.accumulate - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(cs) == 2: - return np.array(-.5*cs[0]/cs[1]) - - n = len(cs) - 1 - mat = np.zeros((n, n), dtype=cs.dtype) - scl = np.hstack((1., np.sqrt(2.*np.arange(1,n)))) - scl = np.multiply.accumulate(scl) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(.5*np.arange(1,n)) - bot[...] = top - mat[:,-1] -= (cs[:-1]/cs[-1])*(scl/scl[-1])*.5 - return mat - - -def hermroots(cs): - """ - Compute the roots of a Hermite series. - - Return the roots (a.k.a "zeros") of the Hermite series represented by - `cs`, which is the sequence of coefficients from lowest order "term" - to highest, e.g., [1,2,3] is the series ``L_0 + 2*L_1 + 3*L_2``. - - Parameters - ---------- - cs : array_like - 1-d array of Hermite series coefficients ordered from low to high. - - Returns - ------- - out : ndarray - Array of the roots. If all the roots are real, then so is the - dtype of ``out``; otherwise, ``out``'s dtype is complex. - - See Also - -------- - polyroots - chebroots - - Notes - ----- - Algorithm(s) used: - - Remember: because the Hermite series basis set is different from the - "standard" basis set, the results of this function *may* not be what - one is expecting. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermroots, hermfromroots - >>> coef = hermfromroots([-1, 0, 1]) - >>> coef - array([ 0. , 0.25 , 0. , 0.125]) - >>> hermroots(coef) - array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) <= 1 : - return np.array([], dtype=cs.dtype) - if len(cs) == 2 : - return np.array([-.5*cs[0]/cs[1]]) - - m = hermcompanion(cs) - r = la.eigvals(m) - r.sort() - return r - - -# -# Hermite series class -# - -exec polytemplate.substitute(name='Hermite', nick='herm', domain='[-1,1]') diff --git a/numpy-1.6.2/numpy/polynomial/hermite_e.py b/numpy-1.6.2/numpy/polynomial/hermite_e.py deleted file mode 100644 index d0efe7209b..0000000000 --- a/numpy-1.6.2/numpy/polynomial/hermite_e.py +++ /dev/null @@ -1,1190 +0,0 @@ -""" -Objects for dealing with Hermite series. - -This module provides a number of objects (mostly functions) useful for -dealing with Hermite series, including a `Hermite` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `hermedomain` -- Hermite series default domain, [-1,1]. -- `hermezero` -- Hermite series that evaluates identically to 0. -- `hermeone` -- Hermite series that evaluates identically to 1. -- `hermex` -- Hermite series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `hermemulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. -- `hermeadd` -- add two Hermite series. -- `hermesub` -- subtract one Hermite series from another. -- `hermemul` -- multiply two Hermite series. -- `hermediv` -- divide one Hermite series by another. -- `hermeval` -- evaluate a Hermite series at given points. - -Calculus --------- -- `hermeder` -- differentiate a Hermite series. -- `hermeint` -- integrate a Hermite series. - -Misc Functions --------------- -- `hermefromroots` -- create a Hermite series with specified roots. -- `hermeroots` -- find the roots of a Hermite series. -- `hermevander` -- Vandermonde-like matrix for Hermite polynomials. -- `hermefit` -- least-squares fit returning a Hermite series. -- `hermetrim` -- trim leading coefficients from a Hermite series. -- `hermeline` -- Hermite series of given straight line. -- `herme2poly` -- convert a Hermite series to a polynomial. -- `poly2herme` -- convert a polynomial to a Hermite series. - -Classes -------- -- `Hermite` -- A Hermite series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division - -import numpy as np -import numpy.linalg as la -import polyutils as pu -import warnings -from polytemplate import polytemplate - -__all__ = ['hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', - 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', 'hermeval', - 'hermeder', 'hermeint', 'herme2poly', 'poly2herme', 'hermefromroots', - 'hermevander', 'hermefit', 'hermetrim', 'hermeroots', 'HermiteE'] - -hermetrim = pu.trimcoef - -def poly2herme(pol) : - """ - poly2herme(pol) - - Convert a polynomial to a Hermite series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Hermite series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-d array containing the polynomial coefficients - - Returns - ------- - cs : ndarray - 1-d array containing the coefficients of the equivalent Hermite - series. - - See Also - -------- - herme2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import poly2herme - >>> poly2herme(np.arange(4)) - array([ 2., 10., 2., 3.]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1) : - res = hermeadd(hermemulx(res), pol[i]) - return res - - -def herme2poly(cs) : - """ - Convert a Hermite series to a polynomial. - - Convert an array representing the coefficients of a Hermite series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - cs : array_like - 1-d array containing the Hermite series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-d array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2herme - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import herme2poly - >>> herme2poly([ 2., 10., 2., 3.]) - array([ 0., 1., 2., 3.]) - - """ - from polynomial import polyadd, polysub, polymulx - - [cs] = pu.as_series([cs]) - n = len(cs) - if n == 1: - return cs - if n == 2: - return cs - else: - c0 = cs[-2] - c1 = cs[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1) : - tmp = c0 - c0 = polysub(cs[i - 2], c1*(i - 1)) - c1 = polyadd(tmp, polymulx(c1)) - return polyadd(c0, polymulx(c1)) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Hermite -hermedomain = np.array([-1,1]) - -# Hermite coefficients representing zero. -hermezero = np.array([0]) - -# Hermite coefficients representing one. -hermeone = np.array([1]) - -# Hermite coefficients representing the identity x. -hermex = np.array([0, 1]) - - -def hermeline(off, scl) : - """ - Hermite series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Hermite series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeline - >>> from numpy.polynomial.hermite_e import hermeline, hermeval - >>> hermeval(0,hermeline(3, 2)) - 3.0 - >>> hermeval(1,hermeline(3, 2)) - 5.0 - - """ - if scl != 0 : - return np.array([off,scl]) - else : - return np.array([off]) - - -def hermefromroots(roots) : - """ - Generate a Hermite series with the given roots. - - Return the array of coefficients for the P-series whose roots (a.k.a. - "zeros") are given by *roots*. The returned array of coefficients is - ordered from lowest order "term" to highest, and zeros of multiplicity - greater than one must be included in *roots* a number of times equal - to their multiplicity (e.g., if `2` is a root of multiplicity three, - then [2,2,2] must be in *roots*). - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-d array of the Hermite series coefficients, ordered from low to - high. If all roots are real, ``out.dtype`` is a float type; - otherwise, ``out.dtype`` is a complex type, even if all the - coefficients in the result are real (see Examples below). - - See Also - -------- - polyfromroots, chebfromroots - - Notes - ----- - What is returned are the :math:`c_i` such that: - - .. math:: - - \\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i]) - - where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Hermite - (basis) polynomial over the domain `[-1,1]`. Note that, unlike - `polyfromroots`, due to the nature of the Hermite basis set, the - above identity *does not* imply :math:`c_n = 1` identically (see - Examples). - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval - >>> coef = hermefromroots((-1, 0, 1)) - >>> hermeval((-1, 0, 1), coef) - array([ 0., 0., 0.]) - >>> coef = hermefromroots((-1j, 1j)) - >>> hermeval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) - - """ - if len(roots) == 0 : - return np.ones(1) - else : - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [hermeline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [hermemul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = hermemul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def hermeadd(c1, c2): - """ - Add one Hermite series to another. - - Returns the sum of two Hermite series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Hermite series of their sum. - - See Also - -------- - hermesub, hermemul, hermediv, hermepow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Hermite series - is a Hermite series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeadd - >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] += c2 - ret = c1 - else : - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def hermesub(c1, c2): - """ - Subtract one Hermite series from another. - - Returns the difference of two Hermite series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their difference. - - See Also - -------- - hermeadd, hermemul, hermediv, hermepow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Hermite - series is a Hermite series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermesub - >>> hermesub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] -= c2 - ret = c1 - else : - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def hermemulx(cs): - """Multiply a Hermite series by x. - - Multiply the Hermite series `cs` by x, where x is the independent - variable. - - - Parameters - ---------- - cs : array_like - 1-d array of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Hermite - polynomials in the form - - .. math:: - - xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermemulx - >>> hermemulx([1, 2, 3]) - array([ 2., 7., 2., 3.]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - # The zero series needs special treatment - if len(cs) == 1 and cs[0] == 0: - return cs - - prd = np.empty(len(cs) + 1, dtype=cs.dtype) - prd[0] = cs[0]*0 - prd[1] = cs[0] - for i in range(1, len(cs)): - prd[i + 1] = cs[i] - prd[i - 1] += cs[i]*i - return prd - - -def hermemul(c1, c2): - """ - Multiply one Hermite series by another. - - Returns the product of two Hermite series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their product. - - See Also - -------- - hermeadd, hermesub, hermediv, hermepow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Hermite polynomial basis set. Thus, to express - the product as a Hermite series, it is necessary to "re-project" the - product onto said basis set, which may produce "un-intuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermemul - >>> hermemul([1, 2, 3], [0, 1, 2]) - array([ 14., 15., 28., 7., 6.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - cs = c2 - xs = c1 - else: - cs = c1 - xs = c2 - - if len(cs) == 1: - c0 = cs[0]*xs - c1 = 0 - elif len(cs) == 2: - c0 = cs[0]*xs - c1 = cs[1]*xs - else : - nd = len(cs) - c0 = cs[-2]*xs - c1 = cs[-1]*xs - for i in range(3, len(cs) + 1) : - tmp = c0 - nd = nd - 1 - c0 = hermesub(cs[-i]*xs, c1*(nd - 1)) - c1 = hermeadd(tmp, hermemulx(c1)) - return hermeadd(c0, hermemulx(c1)) - - -def hermediv(c1, c2): - """ - Divide one Hermite series by another. - - Returns the quotient-with-remainder of two Hermite series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Hermite series coefficients representing the quotient and - remainder. - - See Also - -------- - hermeadd, hermesub, hermemul, hermepow - - Notes - ----- - In general, the (polynomial) division of one Hermite series by another - results in quotient and remainder terms that are not in the Hermite - polynomial basis set. Thus, to express these results as a Hermite - series, it is necessary to "re-project" the results onto the Hermite - basis set, which may produce "un-intuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermediv - >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) - >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 2.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2 : - return c1[:1]*0, c1 - elif lc2 == 1 : - return c1/c2[-1], c1[:1]*0 - else : - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = hermemul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) - - -def hermepow(cs, pow, maxpower=16) : - """Raise a Hermite series to a power. - - Returns the Hermite series `cs` raised to the power `pow`. The - arguement `cs` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - cs : array_like - 1d array of Hermite series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to umanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Hermite series of power. - - See Also - -------- - hermeadd, hermesub, hermemul, hermediv - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermepow - >>> hermepow([1, 2, 3], 2) - array([ 23., 28., 46., 12., 9.]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - power = int(pow) - if power != pow or power < 0 : - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : - raise ValueError("Power is too large") - elif power == 0 : - return np.array([1], dtype=cs.dtype) - elif power == 1 : - return cs - else : - # This can be made more efficient by using powers of two - # in the usual way. - prd = cs - for i in range(2, power + 1) : - prd = hermemul(prd, cs) - return prd - - -def hermeder(cs, m=1, scl=1) : - """ - Differentiate a Hermite series. - - Returns the series `cs` differentiated `m` times. At each iteration the - result is multiplied by `scl` (the scaling factor is for use in a linear - change of variable). The argument `cs` is the sequence of coefficients - from lowest order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - cs: array_like - 1-d array of Hermite series coefficients ordered from low to high. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - - Returns - ------- - der : ndarray - Hermite series of the derivative. - - See Also - -------- - hermeint - - Notes - ----- - In general, the result of differentiating a Hermite series does not - resemble the same operation on a power series. Thus the result of this - function may be "un-intuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeder - >>> hermeder([ 1., 1., 1., 1.]) - array([ 1., 2., 3.]) - >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) - array([ 1., 2., 3.]) - - """ - cnt = int(m) - - if cnt != m: - raise ValueError, "The order of derivation must be integer" - if cnt < 0 : - raise ValueError, "The order of derivation must be non-negative" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - elif cnt >= len(cs): - return cs[:1]*0 - else : - for i in range(cnt): - n = len(cs) - 1 - cs *= scl - der = np.empty(n, dtype=cs.dtype) - for j in range(n, 0, -1): - der[j - 1] = j*cs[j] - cs = der - return cs - - -def hermeint(cs, m=1, k=[], lbnd=0, scl=1): - """ - Integrate a Hermite series. - - Returns a Hermite series that is the Hermite series `cs`, integrated - `m` times from `lbnd` to `x`. At each iteration the resulting series - is **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `cs` is a sequence of - coefficients, from lowest order Hermite series "term" to highest, - e.g., [1,2,3] represents the series :math:`P_0(x) + 2P_1(x) + 3P_2(x)`. - - Parameters - ---------- - cs : array_like - 1-d array of Hermite series coefficients, ordered from low to high. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - - Returns - ------- - S : ndarray - Hermite series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - hermeder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "re-projected" onto the C-series basis set. Thus, typically, - the result of this function is "un-intuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeint - >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. - array([ 1., 1., 1., 1.]) - >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 - array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) - >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. - array([ 2., 1., 1., 1.]) - >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 - array([-1., 1., 1., 1.]) - >>> hermeint([1, 2, 3], m=2, k=[1,2], lbnd=-1) - array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) - - """ - cnt = int(m) - if np.isscalar(k) : - k = [k] - - if cnt != m: - raise ValueError, "The order of integration must be integer" - if cnt < 0 : - raise ValueError, "The order of integration must be non-negative" - if len(k) > cnt : - raise ValueError, "Too many integration constants" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : - n = len(cs) - cs *= scl - if n == 1 and cs[0] == 0: - cs[0] += k[i] - else: - tmp = np.empty(n + 1, dtype=cs.dtype) - tmp[0] = cs[0]*0 - tmp[1] = cs[0] - for j in range(1, n): - tmp[j + 1] = cs[j]/(j + 1) - tmp[0] += k[i] - hermeval(lbnd, tmp) - cs = tmp - return cs - - -def hermeval(x, cs): - """Evaluate a Hermite series. - - If `cs` is of length `n`, this function returns : - - ``p(x) = cs[0]*P_0(x) + cs[1]*P_1(x) + ... + cs[n-1]*P_{n-1}(x)`` - - If x is a sequence or array then p(x) will have the same shape as x. - If r is a ring_like object that supports multiplication and addition - by the values in `cs`, then an object of the same type is returned. - - Parameters - ---------- - x : array_like, ring_like - Array of numbers or objects that support multiplication and - addition with themselves and with the elements of `cs`. - cs : array_like - 1-d array of Hermite coefficients ordered from low to high. - - Returns - ------- - values : ndarray, ring_like - If the return is an ndarray then it has the same shape as `x`. - - See Also - -------- - hermefit - - Examples - -------- - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeval - >>> coef = [1,2,3] - >>> hermeval(1, coef) - 3.0 - >>> hermeval([[1,2],[3,4]], coef) - array([[ 3., 14.], - [ 31., 54.]]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if isinstance(x, tuple) or isinstance(x, list) : - x = np.asarray(x) - - if len(cs) == 1 : - c0 = cs[0] - c1 = 0 - elif len(cs) == 2 : - c0 = cs[0] - c1 = cs[1] - else : - nd = len(cs) - c0 = cs[-2] - c1 = cs[-1] - for i in range(3, len(cs) + 1) : - tmp = c0 - nd = nd - 1 - c0 = cs[-i] - c1*(nd - 1) - c1 = tmp + c1*x - return c0 + c1*x - - -def hermevander(x, deg) : - """Vandermonde matrix of given degree. - - Returns the Vandermonde matrix of degree `deg` and sample points `x`. - This isn't a true Vandermonde matrix because `x` can be an arbitrary - ndarray and the Hermite polynomials aren't powers. If ``V`` is the - returned matrix and `x` is a 2d array, then the elements of ``V`` are - ``V[i,j,k] = P_k(x[i,j])``, where ``P_k`` is the Hermite polynomial - of degree ``k``. - - Parameters - ---------- - x : array_like - Array of points. The values are converted to double or complex - doubles. If x is scalar it is converted to a 1D array. - deg : integer - Degree of the resulting matrix. - - Returns - ------- - vander : Vandermonde matrix. - The shape of the returned matrix is ``x.shape + (deg+1,)``. The last - index is the degree. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermevander - >>> x = np.array([-1, 0, 1]) - >>> hermevander(x, 3) - array([[ 1., -1., 0., 2.], - [ 1., 0., -1., -0.], - [ 1., 1., 0., -2.]]) - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype) - v[0] = x*0 + 1 - if ideg > 0 : - v[1] = x - for i in range(2, ideg + 1) : - v[i] = (v[i-1]*x - v[i-2]*(i - 1)) - return np.rollaxis(v, 0, v.ndim) - - -def hermefit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Hermite series to data. - - Return the coefficients of a HermiteE series of degree `deg` that is - the least squares fit to the data values `y` given at points `x`. If - `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D - multiple fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Hermite coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : present when `full` = True - Residuals of the least-squares fit, the effective rank of the - scaled Vandermonde matrix and its singular values, and the - specified value of `rcond`. For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, legfit, polyfit, hermfit, polyfit - hermeval : Evaluates a Hermite series. - hermevander : pseudo Vandermonde matrix of Hermite series. - hermeweight : HermiteE weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the HermiteE series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` - are the coefficients to be solved for, and the elements of `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coeficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using HermiteE series are probably most useful when the data can - be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE - weight. In that case the wieght ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `hermeweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermefik, hermeval - >>> x = np.linspace(-10, 10) - >>> err = np.random.randn(len(x))/10 - >>> y = hermeval(x, [1, 2, 3]) + err - >>> hermefit(x, y, 2) - array([ 1.01690445, 1.99951418, 2.99948696]) - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0 : - raise ValueError, "expected deg >= 0" - if x.ndim != 1: - raise TypeError, "expected 1D vector for x" - if x.size == 0: - raise TypeError, "expected non-empty vector for x" - if y.ndim < 1 or y.ndim > 2 : - raise TypeError, "expected 1D or 2D array for y" - if len(x) != len(y): - raise TypeError, "expected x and y to have same length" - - # set up the least squares matrices - lhs = hermevander(x, deg) - rhs = y - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError, "expected 1D vector for w" - if len(x) != len(w): - raise TypeError, "expected x and w to have same length" - # apply weights - if rhs.ndim == 2: - lhs *= w[:, np.newaxis] - rhs *= w[:, np.newaxis] - else: - lhs *= w[:, np.newaxis] - rhs *= w - - # set rcond - if rcond is None : - rcond = len(x)*np.finfo(x.dtype).eps - - # scale the design matrix and solve the least squares equation - scl = np.sqrt((lhs*lhs).sum(0)) - c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full : - return c, [resids, rank, s, rcond] - else : - return c - - -def hermecompanion(cs): - """Return the scaled companion matrix of cs. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `cs` represents a single HermiteE polynomial. This - provides better eigenvalue estimates than the unscaled case and in the - single polynomial case the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - cs : array_like - 1-d array of Legendre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - """ - accprod = np.multiply.accumulate - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(cs) == 2: - return np.array(-cs[0]/cs[1]) - - n = len(cs) - 1 - mat = np.zeros((n, n), dtype=cs.dtype) - scl = np.hstack((1., np.sqrt(np.arange(1,n)))) - scl = np.multiply.accumulate(scl) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(np.arange(1,n)) - bot[...] = top - mat[:,-1] -= (cs[:-1]/cs[-1])*(scl/scl[-1]) - return mat - - -def hermeroots(cs): - """ - Compute the roots of a Hermite series. - - Return the roots (a.k.a "zeros") of the HermiteE series represented by - `cs`, which is the sequence of coefficients from lowest order "term" - to highest, e.g., [1,2,3] is the series ``L_0 + 2*L_1 + 3*L_2``. - - Parameters - ---------- - cs : array_like - 1-d array of HermiteE series coefficients ordered from low to high. - - Returns - ------- - out : ndarray - Array of the roots. If all the roots are real, then so is the - dtype of ``out``; otherwise, ``out``'s dtype is complex. - - See Also - -------- - polyroots - chebroots - - Notes - ----- - Algorithm(s) used: - - Remember: because the Hermite series basis set is different from the - "standard" basis set, the results of this function *may* not be what - one is expecting. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots - >>> coef = hermefromroots([-1, 0, 1]) - >>> coef - array([ 0., 2., 0., 1.]) - >>> hermeroots(coef) - array([-1., 0., 1.]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) <= 1 : - return np.array([], dtype=cs.dtype) - if len(cs) == 2 : - return np.array([-cs[0]/cs[1]]) - - m = hermecompanion(cs) - r = la.eigvals(m) - r.sort() - return r - - -# -# HermiteE series class -# - -exec polytemplate.substitute(name='HermiteE', nick='herme', domain='[-1,1]') diff --git a/numpy-1.6.2/numpy/polynomial/laguerre.py b/numpy-1.6.2/numpy/polynomial/laguerre.py deleted file mode 100644 index 7b518404f8..0000000000 --- a/numpy-1.6.2/numpy/polynomial/laguerre.py +++ /dev/null @@ -1,1189 +0,0 @@ -""" -Objects for dealing with Laguerre series. - -This module provides a number of objects (mostly functions) useful for -dealing with Laguerre series, including a `Laguerre` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `lagdomain` -- Laguerre series default domain, [-1,1]. -- `lagzero` -- Laguerre series that evaluates identically to 0. -- `lagone` -- Laguerre series that evaluates identically to 1. -- `lagx` -- Laguerre series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. -- `lagadd` -- add two Laguerre series. -- `lagsub` -- subtract one Laguerre series from another. -- `lagmul` -- multiply two Laguerre series. -- `lagdiv` -- divide one Laguerre series by another. -- `lagval` -- evaluate a Laguerre series at given points. - -Calculus --------- -- `lagder` -- differentiate a Laguerre series. -- `lagint` -- integrate a Laguerre series. - -Misc Functions --------------- -- `lagfromroots` -- create a Laguerre series with specified roots. -- `lagroots` -- find the roots of a Laguerre series. -- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials. -- `lagfit` -- least-squares fit returning a Laguerre series. -- `lagtrim` -- trim leading coefficients from a Laguerre series. -- `lagline` -- Laguerre series of given straight line. -- `lag2poly` -- convert a Laguerre series to a polynomial. -- `poly2lag` -- convert a polynomial to a Laguerre series. - -Classes -------- -- `Laguerre` -- A Laguerre series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division - -__all__ = ['lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', - 'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagval', - 'lagder', 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', - 'lagvander', 'lagfit', 'lagtrim', 'lagroots', 'Laguerre'] - -import numpy as np -import numpy.linalg as la -import polyutils as pu -import warnings -from polytemplate import polytemplate - -lagtrim = pu.trimcoef - -def poly2lag(pol) : - """ - poly2lag(pol) - - Convert a polynomial to a Laguerre series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Laguerre series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-d array containing the polynomial coefficients - - Returns - ------- - cs : ndarray - 1-d array containing the coefficients of the equivalent Laguerre - series. - - See Also - -------- - lag2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.laguerre import poly2lag - >>> poly2lag(np.arange(4)) - array([ 23., -63., 58., -18.]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1) : - res = lagadd(lagmulx(res), pol[i]) - return res - - -def lag2poly(cs) : - """ - Convert a Laguerre series to a polynomial. - - Convert an array representing the coefficients of a Laguerre series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - cs : array_like - 1-d array containing the Laguerre series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-d array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2lag - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lag2poly - >>> lag2poly([ 23., -63., 58., -18.]) - array([ 0., 1., 2., 3.]) - - """ - from polynomial import polyadd, polysub, polymulx - - [cs] = pu.as_series([cs]) - n = len(cs) - if n == 1: - return cs - else: - c0 = cs[-2] - c1 = cs[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(cs[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) - return polyadd(c0, polysub(c1, polymulx(c1))) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Laguerre -lagdomain = np.array([0,1]) - -# Laguerre coefficients representing zero. -lagzero = np.array([0]) - -# Laguerre coefficients representing one. -lagone = np.array([1]) - -# Laguerre coefficients representing the identity x. -lagx = np.array([1, -1]) - - -def lagline(off, scl) : - """ - Laguerre series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Laguerre series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagline, lagval - >>> lagval(0,lagline(3, 2)) - 3.0 - >>> lagval(1,lagline(3, 2)) - 5.0 - - """ - if scl != 0 : - return np.array([off + scl, -scl]) - else : - return np.array([off]) - - -def lagfromroots(roots) : - """ - Generate a Laguerre series with the given roots. - - Return the array of coefficients for the P-series whose roots (a.k.a. - "zeros") are given by *roots*. The returned array of coefficients is - ordered from lowest order "term" to highest, and zeros of multiplicity - greater than one must be included in *roots* a number of times equal - to their multiplicity (e.g., if `2` is a root of multiplicity three, - then [2,2,2] must be in *roots*). - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-d array of the Laguerre series coefficients, ordered from low to - high. If all roots are real, ``out.dtype`` is a float type; - otherwise, ``out.dtype`` is a complex type, even if all the - coefficients in the result are real (see Examples below). - - See Also - -------- - polyfromroots, chebfromroots - - Notes - ----- - What is returned are the :math:`c_i` such that: - - .. math:: - - \\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i]) - - where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Laguerre - (basis) polynomial over the domain `[-1,1]`. Note that, unlike - `polyfromroots`, due to the nature of the Laguerre basis set, the - above identity *does not* imply :math:`c_n = 1` identically (see - Examples). - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagfromroots, lagval - >>> coef = lagfromroots((-1, 0, 1)) - >>> lagval((-1, 0, 1), coef) - array([ 0., 0., 0.]) - >>> coef = lagfromroots((-1j, 1j)) - >>> lagval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) - - """ - if len(roots) == 0 : - return np.ones(1) - else : - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [lagline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [lagmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = lagmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def lagadd(c1, c2): - """ - Add one Laguerre series to another. - - Returns the sum of two Laguerre series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Laguerre series of their sum. - - See Also - -------- - lagsub, lagmul, lagdiv, lagpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Laguerre series - is a Laguerre series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagadd - >>> lagadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) - - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] += c2 - ret = c1 - else : - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def lagsub(c1, c2): - """ - Subtract one Laguerre series from another. - - Returns the difference of two Laguerre series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Laguerre series coefficients representing their difference. - - See Also - -------- - lagadd, lagmul, lagdiv, lagpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Laguerre - series is a Laguerre series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagsub - >>> lagsub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] -= c2 - ret = c1 - else : - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def lagmulx(cs): - """Multiply a Laguerre series by x. - - Multiply the Laguerre series `cs` by x, where x is the independent - variable. - - - Parameters - ---------- - cs : array_like - 1-d array of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Laguerre - polynomials in the form - - .. math:: - - xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagmulx - >>> lagmulx([1, 2, 3]) - array([ -1., -1., 11., -9.]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - # The zero series needs special treatment - if len(cs) == 1 and cs[0] == 0: - return cs - - prd = np.empty(len(cs) + 1, dtype=cs.dtype) - prd[0] = cs[0] - prd[1] = -cs[0] - for i in range(1, len(cs)): - prd[i + 1] = -cs[i]*(i + 1) - prd[i] += cs[i]*(2*i + 1) - prd[i - 1] -= cs[i]*i - return prd - - -def lagmul(c1, c2): - """ - Multiply one Laguerre series by another. - - Returns the product of two Laguerre series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Laguerre series coefficients representing their product. - - See Also - -------- - lagadd, lagsub, lagdiv, lagpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Laguerre polynomial basis set. Thus, to express - the product as a Laguerre series, it is necessary to "re-project" the - product onto said basis set, which may produce "un-intuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagmul - >>> lagmul([1, 2, 3], [0, 1, 2]) - array([ 8., -13., 38., -51., 36.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - cs = c2 - xs = c1 - else: - cs = c1 - xs = c2 - - if len(cs) == 1: - c0 = cs[0]*xs - c1 = 0 - elif len(cs) == 2: - c0 = cs[0]*xs - c1 = cs[1]*xs - else : - nd = len(cs) - c0 = cs[-2]*xs - c1 = cs[-1]*xs - for i in range(3, len(cs) + 1) : - tmp = c0 - nd = nd - 1 - c0 = lagsub(cs[-i]*xs, (c1*(nd - 1))/nd) - c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) - return lagadd(c0, lagsub(c1, lagmulx(c1))) - - -def lagdiv(c1, c2): - """ - Divide one Laguerre series by another. - - Returns the quotient-with-remainder of two Laguerre series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Laguerre series coefficients representing the quotient and - remainder. - - See Also - -------- - lagadd, lagsub, lagmul, lagpow - - Notes - ----- - In general, the (polynomial) division of one Laguerre series by another - results in quotient and remainder terms that are not in the Laguerre - polynomial basis set. Thus, to express these results as a Laguerre - series, it is necessary to "re-project" the results onto the Laguerre - basis set, which may produce "un-intuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagdiv - >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) - >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 1.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2 : - return c1[:1]*0, c1 - elif lc2 == 1 : - return c1/c2[-1], c1[:1]*0 - else : - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = lagmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) - - -def lagpow(cs, pow, maxpower=16) : - """Raise a Laguerre series to a power. - - Returns the Laguerre series `cs` raised to the power `pow`. The - arguement `cs` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - cs : array_like - 1d array of Laguerre series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to umanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Laguerre series of power. - - See Also - -------- - lagadd, lagsub, lagmul, lagdiv - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagpow - >>> lagpow([1, 2, 3], 2) - array([ 14., -16., 56., -72., 54.]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - power = int(pow) - if power != pow or power < 0 : - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : - raise ValueError("Power is too large") - elif power == 0 : - return np.array([1], dtype=cs.dtype) - elif power == 1 : - return cs - else : - # This can be made more efficient by using powers of two - # in the usual way. - prd = cs - for i in range(2, power + 1) : - prd = lagmul(prd, cs) - return prd - - -def lagder(cs, m=1, scl=1) : - """ - Differentiate a Laguerre series. - - Returns the series `cs` differentiated `m` times. At each iteration the - result is multiplied by `scl` (the scaling factor is for use in a linear - change of variable). The argument `cs` is the sequence of coefficients - from lowest order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - cs: array_like - 1-d array of Laguerre series coefficients ordered from low to high. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - - Returns - ------- - der : ndarray - Laguerre series of the derivative. - - See Also - -------- - lagint - - Notes - ----- - In general, the result of differentiating a Laguerre series does not - resemble the same operation on a power series. Thus the result of this - function may be "un-intuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagder - >>> lagder([ 1., 1., 1., -3.]) - array([ 1., 2., 3.]) - >>> lagder([ 1., 0., 0., -4., 3.], m=2) - array([ 1., 2., 3.]) - - """ - cnt = int(m) - - if cnt != m: - raise ValueError, "The order of derivation must be integer" - if cnt < 0 : - raise ValueError, "The order of derivation must be non-negative" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - elif cnt >= len(cs): - return cs[:1]*0 - else : - for i in range(cnt): - n = len(cs) - 1 - cs *= scl - der = np.empty(n, dtype=cs.dtype) - for j in range(n, 0, -1): - der[j - 1] = -cs[j] - cs[j - 1] += cs[j] - cs = der - return cs - - -def lagint(cs, m=1, k=[], lbnd=0, scl=1): - """ - Integrate a Laguerre series. - - Returns a Laguerre series that is the Laguerre series `cs`, integrated - `m` times from `lbnd` to `x`. At each iteration the resulting series - is **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `cs` is a sequence of - coefficients, from lowest order Laguerre series "term" to highest, - e.g., [1,2,3] represents the series :math:`P_0(x) + 2P_1(x) + 3P_2(x)`. - - Parameters - ---------- - cs : array_like - 1-d array of Laguerre series coefficients, ordered from low to high. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - - Returns - ------- - S : ndarray - Laguerre series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - lagder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "re-projected" onto the C-series basis set. Thus, typically, - the result of this function is "un-intuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagint - >>> lagint([1,2,3]) - array([ 1., 1., 1., -3.]) - >>> lagint([1,2,3], m=2) - array([ 1., 0., 0., -4., 3.]) - >>> lagint([1,2,3], k=1) - array([ 2., 1., 1., -3.]) - >>> lagint([1,2,3], lbnd=-1) - array([ 11.5, 1. , 1. , -3. ]) - >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) - array([ 11.16666667, -5. , -3. , 2. ]) - - """ - cnt = int(m) - if np.isscalar(k) : - k = [k] - - if cnt != m: - raise ValueError, "The order of integration must be integer" - if cnt < 0 : - raise ValueError, "The order of integration must be non-negative" - if len(k) > cnt : - raise ValueError, "Too many integration constants" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : - n = len(cs) - cs *= scl - if n == 1 and cs[0] == 0: - cs[0] += k[i] - else: - tmp = np.empty(n + 1, dtype=cs.dtype) - tmp[0] = cs[0] - tmp[1] = -cs[0] - for j in range(1, n): - tmp[j] += cs[j] - tmp[j + 1] = -cs[j] - tmp[0] += k[i] - lagval(lbnd, tmp) - cs = tmp - return cs - - -def lagval(x, cs): - """Evaluate a Laguerre series. - - If `cs` is of length `n`, this function returns : - - ``p(x) = cs[0]*P_0(x) + cs[1]*P_1(x) + ... + cs[n-1]*P_{n-1}(x)`` - - If x is a sequence or array then p(x) will have the same shape as x. - If r is a ring_like object that supports multiplication and addition - by the values in `cs`, then an object of the same type is returned. - - Parameters - ---------- - x : array_like, ring_like - Array of numbers or objects that support multiplication and - addition with themselves and with the elements of `cs`. - cs : array_like - 1-d array of Laguerre coefficients ordered from low to high. - - Returns - ------- - values : ndarray, ring_like - If the return is an ndarray then it has the same shape as `x`. - - See Also - -------- - lagfit - - Examples - -------- - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagval - >>> coef = [1,2,3] - >>> lagval(1, coef) - -0.5 - >>> lagval([[1,2],[3,4]], coef) - array([[-0.5, -4. ], - [-4.5, -2. ]]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if isinstance(x, tuple) or isinstance(x, list) : - x = np.asarray(x) - - if len(cs) == 1 : - c0 = cs[0] - c1 = 0 - elif len(cs) == 2 : - c0 = cs[0] - c1 = cs[1] - else : - nd = len(cs) - c0 = cs[-2] - c1 = cs[-1] - for i in range(3, len(cs) + 1) : - tmp = c0 - nd = nd - 1 - c0 = cs[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*((2*nd - 1) - x))/nd - return c0 + c1*(1 - x) - - -def lagvander(x, deg) : - """Vandermonde matrix of given degree. - - Returns the Vandermonde matrix of degree `deg` and sample points `x`. - This isn't a true Vandermonde matrix because `x` can be an arbitrary - ndarray and the Laguerre polynomials aren't powers. If ``V`` is the - returned matrix and `x` is a 2d array, then the elements of ``V`` are - ``V[i,j,k] = P_k(x[i,j])``, where ``P_k`` is the Laguerre polynomial - of degree ``k``. - - Parameters - ---------- - x : array_like - Array of points. The values are converted to double or complex - doubles. If x is scalar it is converted to a 1D array. - deg : integer - Degree of the resulting matrix. - - Returns - ------- - vander : Vandermonde matrix. - The shape of the returned matrix is ``x.shape + (deg+1,)``. The last - index is the degree. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagvander - >>> x = np.array([0, 1, 2]) - >>> lagvander(x, 3) - array([[ 1. , 1. , 1. , 1. ], - [ 1. , 0. , -0.5 , -0.66666667], - [ 1. , -1. , -1. , -0.33333333]]) - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype) - v[0] = x*0 + 1 - if ideg > 0 : - v[1] = 1 - x - for i in range(2, ideg + 1) : - v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i - return np.rollaxis(v, 0, v.ndim) - - -def lagfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Laguerre series to data. - - Return the coefficients of a Laguerre series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Laguerre coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : present when `full` = True - Residuals of the least-squares fit, the effective rank of the - scaled Vandermonde matrix and its singular values, and the - specified value of `rcond`. For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, legfit, polyfit, hermfit, hermefit - lagval : Evaluates a Laguerre series. - lagvander : pseudo Vandermonde matrix of Laguerre series. - lagweight : Laguerre weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Laguerre series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coeficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Laguerre series are probably most useful when the data can - be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre - weight. In that case the wieght ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `lagweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagfit, lagval - >>> x = np.linspace(0, 10) - >>> err = np.random.randn(len(x))/10 - >>> y = lagval(x, [1, 2, 3]) + err - >>> lagfit(x, y, 2) - array([ 0.96971004, 2.00193749, 3.00288744]) - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0 : - raise ValueError, "expected deg >= 0" - if x.ndim != 1: - raise TypeError, "expected 1D vector for x" - if x.size == 0: - raise TypeError, "expected non-empty vector for x" - if y.ndim < 1 or y.ndim > 2 : - raise TypeError, "expected 1D or 2D array for y" - if len(x) != len(y): - raise TypeError, "expected x and y to have same length" - - # set up the least squares matrices - lhs = lagvander(x, deg) - rhs = y - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError, "expected 1D vector for w" - if len(x) != len(w): - raise TypeError, "expected x and w to have same length" - # apply weights - if rhs.ndim == 2: - lhs *= w[:, np.newaxis] - rhs *= w[:, np.newaxis] - else: - lhs *= w[:, np.newaxis] - rhs *= w - - # set rcond - if rcond is None : - rcond = len(x)*np.finfo(x.dtype).eps - - # scale the design matrix and solve the least squares equation - scl = np.sqrt((lhs*lhs).sum(0)) - c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full : - return c, [resids, rank, s, rcond] - else : - return c - - -def lagcompanion(cs): - """Return the companion matrix of cs. - - The unscaled companion matrix of the Laguerre polynomials is already - symmetric when `cs` represents a single Laguerre polynomial, so no - further scaling is needed. - - Parameters - ---------- - cs : array_like - 1-d array of Laguerre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Companion matrix of dimensions (deg, deg). - - """ - accprod = np.multiply.accumulate - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(cs) == 2: - return np.array(1 + cs[0]/cs[1]) - - n = len(cs) - 1 - mat = np.zeros((n, n), dtype=cs.dtype) - top = mat.reshape(-1)[1::n+1] - mid = mat.reshape(-1)[0::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = -np.arange(1,n) - mid[...] = 2.*np.arange(n) + 1. - bot[...] = top - mat[:,-1] += (cs[:-1]/cs[-1])*n - return mat - - -def lagroots(cs): - """ - Compute the roots of a Laguerre series. - - Return the roots (a.k.a "zeros") of the Laguerre series represented by - `cs`, which is the sequence of coefficients from lowest order "term" - to highest, e.g., [1,2,3] is the series ``L_0 + 2*L_1 + 3*L_2``. - - Parameters - ---------- - cs : array_like - 1-d array of Laguerre series coefficients ordered from low to high. - - Returns - ------- - out : ndarray - Array of the roots. If all the roots are real, then so is the - dtype of ``out``; otherwise, ``out``'s dtype is complex. - - See Also - -------- - polyroots - chebroots - - Notes - ----- - Algorithm(s) used: - - Remember: because the Laguerre series basis set is different from the - "standard" basis set, the results of this function *may* not be what - one is expecting. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagroots, lagfromroots - >>> coef = lagfromroots([0, 1, 2]) - >>> coef - array([ 2., -8., 12., -6.]) - >>> lagroots(coef) - array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) <= 1 : - return np.array([], dtype=cs.dtype) - if len(cs) == 2 : - return np.array([1 + cs[0]/cs[1]]) - - m = lagcompanion(cs) - r = la.eigvals(m) - r.sort() - return r - - -# -# Laguerre series class -# - -exec polytemplate.substitute(name='Laguerre', nick='lag', domain='[-1,1]') diff --git a/numpy-1.6.2/numpy/polynomial/legendre.py b/numpy-1.6.2/numpy/polynomial/legendre.py deleted file mode 100644 index 94098632cd..0000000000 --- a/numpy-1.6.2/numpy/polynomial/legendre.py +++ /dev/null @@ -1,1216 +0,0 @@ -""" -Legendre Series (:mod: `numpy.polynomial.legendre`) -=================================================== - -.. currentmodule:: numpy.polynomial.polynomial - -This module provides a number of objects (mostly functions) useful for -dealing with Legendre series, including a `Legendre` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- - -.. autosummary:: - :toctree: generated/ - - legdomain Legendre series default domain, [-1,1]. - legzero Legendre series that evaluates identically to 0. - legone Legendre series that evaluates identically to 1. - legx Legendre series for the identity map, ``f(x) = x``. - -Arithmetic ----------- - -.. autosummary:: - :toctree: generated/ - - legmulx multiply a Legendre series in P_i(x) by x. - legadd add two Legendre series. - legsub subtract one Legendre series from another. - legmul multiply two Legendre series. - legdiv divide one Legendre series by another. - legpow raise a Legendre series to an positive integer power - legval evaluate a Legendre series at given points. - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - legder differentiate a Legendre series. - legint integrate a Legendre series. - -Misc Functions --------------- - -.. autosummary:: - :toctree: generated/ - - legfromroots create a Legendre series with specified roots. - legroots find the roots of a Legendre series. - legvander Vandermonde-like matrix for Legendre polynomials. - legfit least-squares fit returning a Legendre series. - legtrim trim leading coefficients from a Legendre series. - legline Legendre series representing given straight line. - leg2poly convert a Legendre series to a polynomial. - poly2leg convert a polynomial to a Legendre series. - -Classes -------- - Legendre A Legendre series class. - -See also --------- -numpy.polynomial.polynomial -numpy.polynomial.chebyshev -numpy.polynomial.laguerre -numpy.polynomial.hermite -numpy.polynomial.hermite_e - -""" -from __future__ import division - -__all__ = ['legzero', 'legone', 'legx', 'legdomain', 'legline', - 'legadd', 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', - 'legval', 'legder', 'legint', 'leg2poly', 'poly2leg', - 'legfromroots', 'legvander', 'legfit', 'legtrim', 'legroots', - 'Legendre'] - -import numpy as np -import numpy.linalg as la -import polyutils as pu -import warnings -from polytemplate import polytemplate - -legtrim = pu.trimcoef - -def poly2leg(pol) : - """ - Convert a polynomial to a Legendre series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Legendre series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-d array containing the polynomial coefficients - - Returns - ------- - cs : ndarray - 1-d array containing the coefficients of the equivalent Legendre - series. - - See Also - -------- - leg2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> p = P.Polynomial(np.arange(4)) - >>> p - Polynomial([ 0., 1., 2., 3.], [-1., 1.]) - >>> c = P.Legendre(P.poly2leg(p.coef)) - >>> c - Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1) : - res = legadd(legmulx(res), pol[i]) - return res - - -def leg2poly(cs) : - """ - Convert a Legendre series to a polynomial. - - Convert an array representing the coefficients of a Legendre series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - cs : array_like - 1-d array containing the Legendre series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-d array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2leg - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> c = P.Legendre(range(4)) - >>> c - Legendre([ 0., 1., 2., 3.], [-1., 1.]) - >>> p = c.convert(kind=P.Polynomial) - >>> p - Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.]) - >>> P.leg2poly(range(4)) - array([-1. , -3.5, 3. , 7.5]) - - - """ - from polynomial import polyadd, polysub, polymulx - - [cs] = pu.as_series([cs]) - n = len(cs) - if n < 3: - return cs - else: - c0 = cs[-2] - c1 = cs[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1) : - tmp = c0 - c0 = polysub(cs[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) - return polyadd(c0, polymulx(c1)) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Legendre -legdomain = np.array([-1,1]) - -# Legendre coefficients representing zero. -legzero = np.array([0]) - -# Legendre coefficients representing one. -legone = np.array([1]) - -# Legendre coefficients representing the identity x. -legx = np.array([0,1]) - - -def legline(off, scl) : - """ - Legendre series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Legendre series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> import numpy.polynomial.legendre as L - >>> L.legline(3,2) - array([3, 2]) - >>> L.legval(-3, L.legline(3,2)) # should be -3 - -3.0 - - """ - if scl != 0 : - return np.array([off,scl]) - else : - return np.array([off]) - - -def legfromroots(roots) : - """ - Generate a Legendre series with the given roots. - - Return the array of coefficients for the P-series whose roots (a.k.a. - "zeros") are given by *roots*. The returned array of coefficients is - ordered from lowest order "term" to highest, and zeros of multiplicity - greater than one must be included in *roots* a number of times equal - to their multiplicity (e.g., if `2` is a root of multiplicity three, - then [2,2,2] must be in *roots*). - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-d array of the Legendre series coefficients, ordered from low to - high. If all roots are real, ``out.dtype`` is a float type; - otherwise, ``out.dtype`` is a complex type, even if all the - coefficients in the result are real (see Examples below). - - See Also - -------- - polyfromroots, chebfromroots - - Notes - ----- - What is returned are the :math:`c_i` such that: - - .. math:: - - \\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i]) - - where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Legendre - (basis) polynomial over the domain `[-1,1]`. Note that, unlike - `polyfromroots`, due to the nature of the Legendre basis set, the - above identity *does not* imply :math:`c_n = 1` identically (see - Examples). - - Examples - -------- - >>> import numpy.polynomial.legendre as L - >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis - array([ 0. , -0.4, 0. , 0.4]) - >>> j = complex(0,1) - >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis - array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) - - """ - if len(roots) == 0 : - return np.ones(1) - else : - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [legline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [legmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = legmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def legadd(c1, c2): - """ - Add one Legendre series to another. - - Returns the sum of two Legendre series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Legendre series of their sum. - - See Also - -------- - legsub, legmul, legdiv, legpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Legendre series - is a Legendre series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legadd(c1,c2) - array([ 4., 4., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] += c2 - ret = c1 - else : - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def legsub(c1, c2): - """ - Subtract one Legendre series from another. - - Returns the difference of two Legendre series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Legendre series coefficients representing their difference. - - See Also - -------- - legadd, legmul, legdiv, legpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Legendre - series is a Legendre series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legsub(c1,c2) - array([-2., 0., 2.]) - >>> L.legsub(c2,c1) # -C.legsub(c1,c2) - array([ 2., 0., -2.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] -= c2 - ret = c1 - else : - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def legmulx(cs): - """Multiply a Legendre series by x. - - Multiply the Legendre series `cs` by x, where x is the independent - variable. - - - Parameters - ---------- - cs : array_like - 1-d array of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Legendre - polynomials in the form - - .. math:: - - xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - # The zero series needs special treatment - if len(cs) == 1 and cs[0] == 0: - return cs - - prd = np.empty(len(cs) + 1, dtype=cs.dtype) - prd[0] = cs[0]*0 - prd[1] = cs[0] - for i in range(1, len(cs)): - j = i + 1 - k = i - 1 - s = i + j - prd[j] = (cs[i]*j)/s - prd[k] += (cs[i]*i)/s - return prd - - -def legmul(c1, c2): - """ - Multiply one Legendre series by another. - - Returns the product of two Legendre series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Legendre series coefficients representing their product. - - See Also - -------- - legadd, legsub, legdiv, legpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Legendre polynomial basis set. Thus, to express - the product as a Legendre series, it is necessary to "re-project" the - product onto said basis set, which may produce "un-intuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2) - >>> P.legmul(c1,c2) # multiplication requires "reprojection" - array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - cs = c2 - xs = c1 - else: - cs = c1 - xs = c2 - - if len(cs) == 1: - c0 = cs[0]*xs - c1 = 0 - elif len(cs) == 2: - c0 = cs[0]*xs - c1 = cs[1]*xs - else : - nd = len(cs) - c0 = cs[-2]*xs - c1 = cs[-1]*xs - for i in range(3, len(cs) + 1) : - tmp = c0 - nd = nd - 1 - c0 = legsub(cs[-i]*xs, (c1*(nd - 1))/nd) - c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) - return legadd(c0, legmulx(c1)) - - -def legdiv(c1, c2): - """ - Divide one Legendre series by another. - - Returns the quotient-with-remainder of two Legendre series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - quo, rem : ndarrays - Of Legendre series coefficients representing the quotient and - remainder. - - See Also - -------- - legadd, legsub, legmul, legpow - - Notes - ----- - In general, the (polynomial) division of one Legendre series by another - results in quotient and remainder terms that are not in the Legendre - polynomial basis set. Thus, to express these results as a Legendre - series, it is necessary to "re-project" the results onto the Legendre - basis set, which may produce "un-intuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not - (array([ 3.]), array([-8., -4.])) - >>> c2 = (0,1,2,3) - >>> L.legdiv(c2,c1) # neither "intuitive" - (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2 : - return c1[:1]*0, c1 - elif lc2 == 1 : - return c1/c2[-1], c1[:1]*0 - else : - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = legmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) - - -def legpow(cs, pow, maxpower=16) : - """Raise a Legendre series to a power. - - Returns the Legendre series `cs` raised to the power `pow`. The - arguement `cs` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - cs : array_like - 1d array of Legendre series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to umanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Legendre series of power. - - See Also - -------- - legadd, legsub, legmul, legdiv - - Examples - -------- - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - power = int(pow) - if power != pow or power < 0 : - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : - raise ValueError("Power is too large") - elif power == 0 : - return np.array([1], dtype=cs.dtype) - elif power == 1 : - return cs - else : - # This can be made more efficient by using powers of two - # in the usual way. - prd = cs - for i in range(2, power + 1) : - prd = legmul(prd, cs) - return prd - - -def legder(cs, m=1, scl=1) : - """ - Differentiate a Legendre series. - - Returns the series `cs` differentiated `m` times. At each iteration the - result is multiplied by `scl` (the scaling factor is for use in a linear - change of variable). The argument `cs` is the sequence of coefficients - from lowest order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - cs : array_like - 1-D array of Legendre series coefficients ordered from low to high. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - - Returns - ------- - der : ndarray - Legendre series of the derivative. - - See Also - -------- - legint - - Notes - ----- - In general, the result of differentiating a Legendre series does not - resemble the same operation on a power series. Thus the result of this - function may be "un-intuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> cs = (1,2,3,4) - >>> L.legder(cs) - array([ 6., 9., 20.]) - >>> L.legder(cs,3) - array([ 60.]) - >>> L.legder(cs,scl=-1) - array([ -6., -9., -20.]) - >>> L.legder(cs,2,-1) - array([ 9., 60.]) - - """ - cnt = int(m) - - if cnt != m: - raise ValueError, "The order of derivation must be integer" - if cnt < 0 : - raise ValueError, "The order of derivation must be non-negative" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - elif cnt >= len(cs): - return cs[:1]*0 - else : - for i in range(cnt): - n = len(cs) - 1 - cs *= scl - der = np.empty(n, dtype=cs.dtype) - for j in range(n, 0, -1): - der[j - 1] = (2*j - 1)*cs[j] - cs[j - 2] += cs[j] - cs = der - return cs - - -def legint(cs, m=1, k=[], lbnd=0, scl=1): - """ - Integrate a Legendre series. - - Returns a Legendre series that is the Legendre series `cs`, integrated - `m` times from `lbnd` to `x`. At each iteration the resulting series - is **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `cs` is a sequence of - coefficients, from lowest order Legendre series "term" to highest, - e.g., [1,2,3] represents the series :math:`P_0(x) + 2P_1(x) + 3P_2(x)`. - - Parameters - ---------- - cs : array_like - 1-d array of Legendre series coefficients, ordered from low to high. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - - Returns - ------- - S : ndarray - Legendre series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - legder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "re-projected" onto the C-series basis set. Thus, typically, - the result of this function is "un-intuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> cs = (1,2,3) - >>> L.legint(cs) - array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) - >>> L.legint(cs,3) - array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, - -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) - >>> L.legint(cs, k=3) - array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) - >>> L.legint(cs, lbnd=-2) - array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) - >>> L.legint(cs, scl=2) - array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) - - """ - cnt = int(m) - if np.isscalar(k) : - k = [k] - - if cnt != m: - raise ValueError, "The order of integration must be integer" - if cnt < 0 : - raise ValueError, "The order of integration must be non-negative" - if len(k) > cnt : - raise ValueError, "Too many integration constants" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt) : - n = len(cs) - cs *= scl - if n == 1 and cs[0] == 0: - cs[0] += k[i] - else: - tmp = np.empty(n + 1, dtype=cs.dtype) - tmp[0] = cs[0]*0 - tmp[1] = cs[0] - for j in range(1, n): - t = cs[j]/(2*j + 1) - tmp[j + 1] = t - tmp[j - 1] -= t - tmp[0] += k[i] - legval(lbnd, tmp) - cs = tmp - return cs - - -def legval(x, cs): - """Evaluate a Legendre series. - - If `cs` is of length `n`, this function returns : - - ``p(x) = cs[0]*P_0(x) + cs[1]*P_1(x) + ... + cs[n-1]*P_{n-1}(x)`` - - If x is a sequence or array then p(x) will have the same shape as x. - If r is a ring_like object that supports multiplication and addition - by the values in `cs`, then an object of the same type is returned. - - Parameters - ---------- - x : array_like, ring_like - Array of numbers or objects that support multiplication and - addition with themselves and with the elements of `cs`. - cs : array_like - 1-d array of Legendre coefficients ordered from low to high. - - Returns - ------- - values : ndarray, ring_like - If the return is an ndarray then it has the same shape as `x`. - - See Also - -------- - legfit - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if isinstance(x, tuple) or isinstance(x, list) : - x = np.asarray(x) - - if len(cs) == 1 : - c0 = cs[0] - c1 = 0 - elif len(cs) == 2 : - c0 = cs[0] - c1 = cs[1] - else : - nd = len(cs) - c0 = cs[-2] - c1 = cs[-1] - for i in range(3, len(cs) + 1) : - tmp = c0 - nd = nd - 1 - c0 = cs[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*x*(2*nd - 1))/nd - return c0 + c1*x - - -def legvander(x, deg) : - """Vandermonde matrix of given degree. - - Returns the Vandermonde matrix of degree `deg` and sample points `x`. - This isn't a true Vandermonde matrix because `x` can be an arbitrary - ndarray and the Legendre polynomials aren't powers. If ``V`` is the - returned matrix and `x` is a 2d array, then the elements of ``V`` are - ``V[i,j,k] = P_k(x[i,j])``, where ``P_k`` is the Legendre polynomial - of degree ``k``. - - Parameters - ---------- - x : array_like - Array of points. The values are converted to double or complex - doubles. If x is scalar it is converted to a 1D array. - deg : integer - Degree of the resulting matrix. - - Returns - ------- - vander : Vandermonde matrix. - The shape of the returned matrix is ``x.shape + (deg+1,)``. The last - index is the degree. - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype) - # Use forward recursion to generate the entries. This is not as accurate - # as reverse recursion in this application but it is more efficient. - v[0] = x*0 + 1 - if ideg > 0 : - v[1] = x - for i in range(2, ideg + 1) : - v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i - return np.rollaxis(v, 0, v.ndim) - - -def legfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Legendre series to data. - - Return the coefficients of a Legendre series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Legendre coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : present when `full` = True - Residuals of the least-squares fit, the effective rank of the - scaled Vandermonde matrix and its singular values, and the - specified value of `rcond`. For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, polyfit, lagfit, hermfit, hermefit - legval : Evaluates a Legendre series. - legvander : Vandermonde matrix of Legendre series. - legweight : Legendre weight function (= 1). - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Legendre series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where :math:`w_j` are the weights. This problem is solved by setting up - as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coeficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Legendre series are usually better conditioned than fits - using power series, but much can depend on the distribution of the - sample points and the smoothness of the data. If the quality of the fit - is inadequate splines may be a good alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0 : - raise ValueError, "expected deg >= 0" - if x.ndim != 1: - raise TypeError, "expected 1D vector for x" - if x.size == 0: - raise TypeError, "expected non-empty vector for x" - if y.ndim < 1 or y.ndim > 2 : - raise TypeError, "expected 1D or 2D array for y" - if len(x) != len(y): - raise TypeError, "expected x and y to have same length" - - # set up the least squares matrices - lhs = legvander(x, deg) - rhs = y - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError, "expected 1D vector for w" - if len(x) != len(w): - raise TypeError, "expected x and w to have same length" - # apply weights - if rhs.ndim == 2: - lhs *= w[:, np.newaxis] - rhs *= w[:, np.newaxis] - else: - lhs *= w[:, np.newaxis] - rhs *= w - - # set rcond - if rcond is None : - rcond = len(x)*np.finfo(x.dtype).eps - - # scale the design matrix and solve the least squares equation - scl = np.sqrt((lhs*lhs).sum(0)) - c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full : - return c, [resids, rank, s, rcond] - else : - return c - - -def legcompanion(cs): - """Return the scaled companion matrix of cs. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `cs` represents a single Legendre polynomial. This - provides better eigenvalue estimates than the unscaled case and in the - single polynomial case the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - cs : array_like - 1-d array of Legendre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(cs) == 2: - return np.array(-cs[0]/cs[1]) - - n = len(cs) - 1 - mat = np.zeros((n, n), dtype=cs.dtype) - scl = 1./np.sqrt(2*np.arange(n) + 1) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] - bot[...] = top - mat[:,-1] -= (cs[:-1]/cs[-1])*(scl/scl[-1])*(n/(2*n - 1)) - return mat - - -def legroots(cs): - """ - Compute the roots of a Legendre series. - - Returns the roots (a.k.a "zeros") of the Legendre series represented by - `cs`, which is the sequence of coefficients from lowest order "term" - to highest, e.g., [1,2,3] is the series ``L_0 + 2*L_1 + 3*L_2``. - - Parameters - ---------- - cs : array_like - 1-d array of Legendre series coefficients ordered from low to high. - maxiter : int, optional - Maximum number of iterations of Newton to use in refining the - roots. - - Returns - ------- - out : ndarray - Sorted array of the roots. If all the roots are real, then so is - the dtype of ``out``; otherwise, ``out``'s dtype is complex. - - See Also - -------- - polyroots - chebroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the real interval [-1, 1] in the complex plane - may have large errors due to the numerical instability of the Lengendre - series for such values. Roots with multiplicity greater than 1 will - also show larger errors as the value of the series near such points is - relatively insensitive to errors in the roots. Isolated roots near the - interval [-1, 1] can be improved by a few iterations of Newton's - method. - - The Legendre series basis polynomials aren't powers of ``x`` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> import numpy.polynomial.legendre as leg - >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0 has only real roots - array([-0.85099543, -0.11407192, 0.51506735]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2: - return np.array([], dtype=cs.dtype) - if len(cs) == 2: - return np.array([-cs[0]/cs[1]]) - - m = legcompanion(cs) - r = la.eigvals(m) - r.sort() - return r - - -# -# Legendre series class -# - -exec polytemplate.substitute(name='Legendre', nick='leg', domain='[-1,1]') diff --git a/numpy-1.6.2/numpy/polynomial/polynomial.py b/numpy-1.6.2/numpy/polynomial/polynomial.py deleted file mode 100644 index 6f67814ad9..0000000000 --- a/numpy-1.6.2/numpy/polynomial/polynomial.py +++ /dev/null @@ -1,984 +0,0 @@ -""" -Objects for dealing with polynomials. - -This module provides a number of objects (mostly functions) useful for -dealing with polynomials, including a `Polynomial` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with polynomial objects is in -the docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `polydomain` -- Polynomial default domain, [-1,1]. -- `polyzero` -- (Coefficients of the) "zero polynomial." -- `polyone` -- (Coefficients of the) constant polynomial 1. -- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``. - -Arithmetic ----------- -- `polyadd` -- add two polynomials. -- `polysub` -- subtract one polynomial from another. -- `polymul` -- multiply two polynomials. -- `polydiv` -- divide one polynomial by another. -- `polypow` -- raise a polynomial to an positive integer power -- `polyval` -- evaluate a polynomial at given points. - -Calculus --------- -- `polyder` -- differentiate a polynomial. -- `polyint` -- integrate a polynomial. - -Misc Functions --------------- -- `polyfromroots` -- create a polynomial with specified roots. -- `polyroots` -- find the roots of a polynomial. -- `polyvander` -- Vandermonde-like matrix for powers. -- `polyfit` -- least-squares fit returning a polynomial. -- `polytrim` -- trim leading coefficients from a polynomial. -- `polyline` -- polynomial representing given straight line. - -Classes -------- -- `Polynomial` -- polynomial class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division - -__all__ = ['polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', - 'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', - 'polyval', 'polyder', 'polyint', 'polyfromroots', 'polyvander', - 'polyfit', 'polytrim', 'polyroots', 'Polynomial'] - -import numpy as np -import numpy.linalg as la -import polyutils as pu -import warnings -from polytemplate import polytemplate - -polytrim = pu.trimcoef - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Polynomial default domain. -polydomain = np.array([-1,1]) - -# Polynomial coefficients representing zero. -polyzero = np.array([0]) - -# Polynomial coefficients representing one. -polyone = np.array([1]) - -# Polynomial coefficients representing the identity x. -polyx = np.array([0,1]) - -# -# Polynomial series functions -# - -def polyline(off, scl) : - """ - Returns an array representing a linear polynomial. - - Parameters - ---------- - off, scl : scalars - The "y-intercept" and "slope" of the line, respectively. - - Returns - ------- - y : ndarray - This module's representation of the linear polynomial ``off + - scl*x``. - - See Also - -------- - chebline - - Examples - -------- - >>> from numpy import polynomial as P - >>> P.polyline(1,-1) - array([ 1, -1]) - >>> P.polyval(1, P.polyline(1,-1)) # should be 0 - 0.0 - - """ - if scl != 0 : - return np.array([off,scl]) - else : - return np.array([off]) - -def polyfromroots(roots) : - """ - Generate a polynomial with the given roots. - - Return the array of coefficients for the polynomial whose leading - coefficient (i.e., that of the highest order term) is `1` and whose - roots (a.k.a. "zeros") are given by *roots*. The returned array of - coefficients is ordered from lowest order term to highest, and zeros - of multiplicity greater than one must be included in *roots* a number - of times equal to their multiplicity (e.g., if `2` is a root of - multiplicity three, then [2,2,2] must be in *roots*). - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-d array of the polynomial's coefficients, ordered from low to - high. If all roots are real, ``out.dtype`` is a float type; - otherwise, ``out.dtype`` is a complex type, even if all the - coefficients in the result are real (see Examples below). - - See Also - -------- - chebfromroots - - Notes - ----- - What is returned are the :math:`a_i` such that: - - .. math:: - - \\sum_{i=0}^{n} a_ix^i = \\prod_{i=0}^{n} (x - roots[i]) - - where ``n == len(roots)``; note that this implies that `1` is always - returned for :math:`a_n`. - - Examples - -------- - >>> import numpy.polynomial as P - >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x - array([ 0., -1., 0., 1.]) - >>> j = complex(0,1) - >>> P.polyfromroots((-j,j)) # complex returned, though values are real - array([ 1.+0.j, 0.+0.j, 1.+0.j]) - - """ - if len(roots) == 0 : - return np.ones(1) - else : - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [polyline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [polymul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = polymul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def polyadd(c1, c2): - """ - Add one polynomial to another. - - Returns the sum of two polynomials `c1` + `c2`. The arguments are - sequences of coefficients from lowest order term to highest, i.e., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2"``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of polynomial coefficients ordered from low to high. - - Returns - ------- - out : ndarray - The coefficient array representing their sum. - - See Also - -------- - polysub, polymul, polydiv, polypow - - Examples - -------- - >>> from numpy import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> sum = P.polyadd(c1,c2); sum - array([ 4., 4., 4.]) - >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) - 28.0 - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] += c2 - ret = c1 - else : - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def polysub(c1, c2): - """ - Subtract one polynomial from another. - - Returns the difference of two polynomials `c1` - `c2`. The arguments - are sequences of coefficients from lowest order term to highest, i.e., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of polynomial coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of coefficients representing their difference. - - See Also - -------- - polyadd, polymul, polydiv, polypow - - Examples - -------- - >>> from numpy import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polysub(c1,c2) - array([-2., 0., 2.]) - >>> P.polysub(c2,c1) # -P.polysub(c1,c2) - array([ 2., 0., -2.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2) : - c1[:c2.size] -= c2 - ret = c1 - else : - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def polymulx(cs): - """Multiply a polynomial by x. - - Multiply the polynomial `cs` by x, where x is the independent - variable. - - - Parameters - ---------- - cs : array_like - 1-d array of polynomial coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - # The zero series needs special treatment - if len(cs) == 1 and cs[0] == 0: - return cs - - prd = np.empty(len(cs) + 1, dtype=cs.dtype) - prd[0] = cs[0]*0 - prd[1:] = cs - return prd - - -def polymul(c1, c2): - """ - Multiply one polynomial by another. - - Returns the product of two polynomials `c1` * `c2`. The arguments are - sequences of coefficients, from lowest order term to highest, e.g., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of coefficients representing a polynomial, relative to the - "standard" basis, and ordered from lowest order term to highest. - - Returns - ------- - out : ndarray - Of the coefficients of their product. - - See Also - -------- - polyadd, polysub, polydiv, polypow - - Examples - -------- - >>> import numpy.polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polymul(c1,c2) - array([ 3., 8., 14., 8., 3.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - ret = np.convolve(c1, c2) - return pu.trimseq(ret) - - -def polydiv(c1, c2): - """ - Divide one polynomial by another. - - Returns the quotient-with-remainder of two polynomials `c1` / `c2`. - The arguments are sequences of coefficients, from lowest order term - to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - c1, c2 : array_like - 1-d arrays of polynomial coefficients ordered from low to high. - - Returns - ------- - [quo, rem] : ndarrays - Of coefficient series representing the quotient and remainder. - - See Also - -------- - polyadd, polysub, polymul, polypow - - Examples - -------- - >>> import numpy.polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polydiv(c1,c2) - (array([ 3.]), array([-8., -4.])) - >>> P.polydiv(c2,c1) - (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0 : - raise ZeroDivisionError() - - len1 = len(c1) - len2 = len(c2) - if len2 == 1 : - return c1/c2[-1], c1[:1]*0 - elif len1 < len2 : - return c1[:1]*0, c1 - else : - dlen = len1 - len2 - scl = c2[-1] - c2 = c2[:-1]/scl - i = dlen - j = len1 - 1 - while i >= 0 : - c1[i:j] -= c2*c1[j] - i -= 1 - j -= 1 - return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) - -def polypow(cs, pow, maxpower=None) : - """Raise a polynomial to a power. - - Returns the polynomial `cs` raised to the power `pow`. The argument - `cs` is a sequence of coefficients ordered from low to high. i.e., - [1,2,3] is the series ``1 + 2*x + 3*x**2.`` - - Parameters - ---------- - cs : array_like - 1d array of chebyshev series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to umanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Chebyshev series of power. - - See Also - -------- - chebadd, chebsub, chebmul, chebdiv - - Examples - -------- - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - power = int(pow) - if power != pow or power < 0 : - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower : - raise ValueError("Power is too large") - elif power == 0 : - return np.array([1], dtype=cs.dtype) - elif power == 1 : - return cs - else : - # This can be made more efficient by using powers of two - # in the usual way. - prd = cs - for i in range(2, power + 1) : - prd = np.convolve(prd, cs) - return prd - -def polyder(cs, m=1, scl=1): - """ - Differentiate a polynomial. - - Returns the polynomial `cs` differentiated `m` times. At each - iteration the result is multiplied by `scl` (the scaling factor is for - use in a linear change of variable). The argument `cs` is the sequence - of coefficients from lowest order term to highest, e.g., [1,2,3] - represents the polynomial ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - cs: array_like - 1-d array of polynomial coefficients ordered from low to high. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change - of variable. (Default: 1) - - Returns - ------- - der : ndarray - Polynomial of the derivative. - - See Also - -------- - polyint - - Examples - -------- - >>> from numpy import polynomial as P - >>> cs = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 - >>> P.polyder(cs) # (d/dx)(cs) = 2 + 6x + 12x**2 - array([ 2., 6., 12.]) - >>> P.polyder(cs,3) # (d**3/dx**3)(cs) = 24 - array([ 24.]) - >>> P.polyder(cs,scl=-1) # (d/d(-x))(cs) = -2 - 6x - 12x**2 - array([ -2., -6., -12.]) - >>> P.polyder(cs,2,-1) # (d**2/d(-x)**2)(cs) = 6 + 24x - array([ 6., 24.]) - - """ - cnt = int(m) - - if cnt != m: - raise ValueError, "The order of derivation must be integer" - if cnt < 0: - raise ValueError, "The order of derivation must be non-negative" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - elif cnt >= len(cs): - return cs[:1]*0 - else : - n = len(cs) - d = np.arange(n)*scl - for i in range(cnt): - cs[i:] *= d[:n-i] - return cs[i+1:].copy() - -def polyint(cs, m=1, k=[], lbnd=0, scl=1): - """ - Integrate a polynomial. - - Returns the polynomial `cs`, integrated `m` times from `lbnd` to `x`. - At each iteration the resulting series is **multiplied** by `scl` and - an integration constant, `k`, is added. The scaling factor is for use - in a linear change of variable. ("Buyer beware": note that, depending - on what one is doing, one may want `scl` to be the reciprocal of what - one might expect; for more information, see the Notes section below.) - The argument `cs` is a sequence of coefficients, from lowest order - term to highest, e.g., [1,2,3] represents the polynomial - ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - cs : array_like - 1-d array of polynomial coefficients, ordered from low to high. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at zero - is the first value in the list, the value of the second integral - at zero is the second value, etc. If ``k == []`` (the default), - all constants are set to zero. If ``m == 1``, a single scalar can - be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - - Returns - ------- - S : ndarray - Coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 1``, ``len(k) > m``. - - See Also - -------- - polyder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - - perhaps not what one would have first thought. - - Examples - -------- - >>> from numpy import polynomial as P - >>> cs = (1,2,3) - >>> P.polyint(cs) # should return array([0, 1, 1, 1]) - array([ 0., 1., 1., 1.]) - >>> P.polyint(cs,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) - array([ 0. , 0. , 0. , 0.16666667, 0.08333333, - 0.05 ]) - >>> P.polyint(cs,k=3) # should return array([3, 1, 1, 1]) - array([ 3., 1., 1., 1.]) - >>> P.polyint(cs,lbnd=-2) # should return array([6, 1, 1, 1]) - array([ 6., 1., 1., 1.]) - >>> P.polyint(cs,scl=-2) # should return array([0, -2, -2, -2]) - array([ 0., -2., -2., -2.]) - - """ - cnt = int(m) - if not np.iterable(k): - k = [k] - - if cnt != m: - raise ValueError, "The order of integration must be integer" - if cnt < 0 : - raise ValueError, "The order of integration must be non-negative" - if len(k) > cnt : - raise ValueError, "Too many integration constants" - - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if cnt == 0: - return cs - - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(cs) - cs *= scl - if n == 1 and cs[0] == 0: - cs[0] += k[i] - else: - tmp = np.empty(n + 1, dtype=cs.dtype) - tmp[0] = cs[0]*0 - tmp[1:] = cs/np.arange(1, n + 1) - tmp[0] += k[i] - polyval(lbnd, tmp) - cs = tmp - return cs - -def polyval(x, cs): - """ - Evaluate a polynomial. - - If `cs` is of length `n`, this function returns : - - ``p(x) = cs[0] + cs[1]*x + ... + cs[n-1]*x**(n-1)`` - - If x is a sequence or array then p(x) will have the same shape as x. - If r is a ring_like object that supports multiplication and addition - by the values in `cs`, then an object of the same type is returned. - - Parameters - ---------- - x : array_like, ring_like - If x is a list or tuple, it is converted to an ndarray. Otherwise - it must support addition and multiplication with itself and the - elements of `cs`. - cs : array_like - 1-d array of Chebyshev coefficients ordered from low to high. - - Returns - ------- - values : ndarray - The return array has the same shape as `x`. - - See Also - -------- - polyfit - - Notes - ----- - The evaluation uses Horner's method. - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if isinstance(x, tuple) or isinstance(x, list) : - x = np.asarray(x) - - c0 = cs[-1] + x*0 - for i in range(2, len(cs) + 1) : - c0 = cs[-i] + c0*x - return c0 - -def polyvander(x, deg) : - """Vandermonde matrix of given degree. - - Returns the Vandermonde matrix of degree `deg` and sample points `x`. - This isn't a true Vandermonde matrix because `x` can be an arbitrary - ndarray. If ``V`` is the returned matrix and `x` is a 2d array, then - the elements of ``V`` are ``V[i,j,k] = x[i,j]**k`` - - Parameters - ---------- - x : array_like - Array of points. The values are converted to double or complex - doubles. If x is scalar it is converted to a 1D array. - deg : integer - Degree of the resulting matrix. - - Returns - ------- - vander : Vandermonde matrix. - The shape of the returned matrix is ``x.shape + (deg+1,)``. The last - index is the degree. - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype) - v[0] = x*0 + 1 - if ideg > 0 : - v[1] = x - for i in range(2, ideg + 1) : - v[i] = v[i-1]*x - return np.rollaxis(v, 0, v.ndim) - -def polyfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least-squares fit of a polynomial to data. - - Return the coefficients of a polynomial of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (`M`,) - x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. - y : array_like, shape (`M`,) or (`M`, `K`) - y-coordinates of the sample points. Several sets of sample points - sharing the same x-coordinates can be (independently) fit with one - call to `polyfit` by passing in for `y` a 2-D array that contains - one data set per column. - deg : int - Degree of the polynomial(s) to be fit. - rcond : float, optional - Relative condition number of the fit. Singular values smaller - than `rcond`, relative to the largest singular value, will be - ignored. The default value is ``len(x)*eps``, where `eps` is the - relative precision of the platform's float type, about 2e-16 in - most cases. - full : bool, optional - Switch determining the nature of the return value. When ``False`` - (the default) just the coefficients are returned; when ``True``, - diagnostic information from the singular value decomposition (used - to solve the fit's matrix equation) is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) - Polynomial coefficients ordered from low to high. If `y` was 2-D, - the coefficients in column `k` of `coef` represent the polynomial - fit to the data in `y`'s `k`-th column. - - [residuals, rank, singular_values, rcond] : present when `full` == True - Sum of the squared residuals (SSR) of the least-squares fit; the - effective rank of the scaled Vandermonde matrix; its singular - values; and the specified value of `rcond`. For more information, - see `linalg.lstsq`. - - Raises - ------ - RankWarning - Raised if the matrix in the least-squares fit is rank deficient. - The warning is only raised if `full` == False. The warnings can - be turned off by: - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, legfit, lagfit, hermfit, hermefit - polyval : Evaluates a polynomial. - polyvander : Vandermonde matrix for powers. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the polynomial `p` that minimizes - the sum of the weighted squared errors - - .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) over-determined matrix equation: - - .. math :: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected (and `full` == ``False``), a `RankWarning` will be raised. - This means that the coefficient values may be poorly determined. - Fitting to a lower order polynomial will usually get rid of the warning - (but may not be what you want, of course; if you have independent - reason(s) for choosing the degree which isn't working, you may have to: - a) reconsider those reasons, and/or b) reconsider the quality of your - data). The `rcond` parameter can also be set to a value smaller than - its default, but the resulting fit may be spurious and have large - contributions from roundoff error. - - Polynomial fits using double precision tend to "fail" at about - (polynomial) degree 20. Fits using Chebyshev or Legendre series are - generally better conditioned, but much can still depend on the - distribution of the sample points and the smoothness of the data. If - the quality of the fit is inadequate, splines may be a good - alternative. - - Examples - -------- - >>> from numpy import polynomial as P - >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] - >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" - >>> c, stats = P.polyfit(x,y,3,full=True) - >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 - array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) - >>> stats # note the large SSR, explaining the rather poor results - [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, - 0.28853036]), 1.1324274851176597e-014] - - Same thing without the added noise - - >>> y = x**3 - x - >>> c, stats = P.polyfit(x,y,3,full=True) - >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 - array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16, - 1.00000000e+00]) - >>> stats # note the minuscule SSR - [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, - 0.50443316, 0.28853036]), 1.1324274851176597e-014] - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0 : - raise ValueError, "expected deg >= 0" - if x.ndim != 1: - raise TypeError, "expected 1D vector for x" - if x.size == 0: - raise TypeError, "expected non-empty vector for x" - if y.ndim < 1 or y.ndim > 2 : - raise TypeError, "expected 1D or 2D array for y" - if len(x) != len(y): - raise TypeError, "expected x and y to have same length" - - # set up the least squares matrices - lhs = polyvander(x, deg) - rhs = y - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError, "expected 1D vector for w" - if len(x) != len(w): - raise TypeError, "expected x and w to have same length" - # apply weights - if rhs.ndim == 2: - lhs *= w[:, np.newaxis] - rhs *= w[:, np.newaxis] - else: - lhs *= w[:, np.newaxis] - rhs *= w - - # set rcond - if rcond is None : - rcond = len(x)*np.finfo(x.dtype).eps - - # scale the design matrix and solve the least squares equation - scl = np.sqrt((lhs*lhs).sum(0)) - c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full : - return c, [resids, rank, s, rcond] - else : - return c - - -def polycompanion(cs): - """Return the companion matrix of cs. - - - Parameters - ---------- - cs : array_like - 1-d array of series coefficients ordered from low to high degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2 : - raise ValueError('Series must have maximum degree of at least 1.') - if len(cs) == 2: - return np.array(-cs[0]/cs[1]) - - n = len(cs) - 1 - mat = np.zeros((n, n), dtype=cs.dtype) - bot = mat.reshape(-1)[n::n+1] - bot[...] = 1 - mat[:,-1] -= cs[:-1]/cs[-1] - return mat - - -def polyroots(cs): - """ - Compute the roots of a polynomial. - - Return the roots (a.k.a. "zeros") of the "polynomial" `cs`, the - polynomial's coefficients from lowest order term to highest - (e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``). - - Parameters - ---------- - cs : array_like of shape (M,) - 1-d array of polynomial coefficients ordered from low to high. - - Returns - ------- - out : ndarray - Array of the roots of the polynomial. If all the roots are real, - then so is the dtype of ``out``; otherwise, ``out``'s dtype is - complex. - - See Also - -------- - chebroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the power series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - Examples - -------- - >>> import numpy.polynomial.polynomial as poly - >>> poly.polyroots(poly.polyfromroots((-1,0,1))) - array([-1., 0., 1.]) - >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype - dtype('float64') - >>> j = complex(0,1) - >>> poly.polyroots(poly.polyfromroots((-j,0,j))) - array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) - - """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if len(cs) < 2: - return np.array([], dtype=cs.dtype) - if len(cs) == 2: - return np.array([-cs[0]/cs[1]]) - - m = polycompanion(cs) - r = la.eigvals(m) - r.sort() - return r - - -# -# polynomial class -# - -exec polytemplate.substitute(name='Polynomial', nick='poly', domain='[-1,1]') - diff --git a/numpy-1.6.2/numpy/polynomial/polytemplate.py b/numpy-1.6.2/numpy/polynomial/polytemplate.py deleted file mode 100644 index e390d3d778..0000000000 --- a/numpy-1.6.2/numpy/polynomial/polytemplate.py +++ /dev/null @@ -1,847 +0,0 @@ -""" -Template for the Chebyshev and Polynomial classes. - -This module houses a Python string module Template object (see, e.g., -http://docs.python.org/library/string.html#template-strings) used by -the `polynomial` and `chebyshev` modules to implement their respective -`Polynomial` and `Chebyshev` classes. It provides a mechanism for easily -creating additional specific polynomial classes (e.g., Legendre, Jacobi, -etc.) in the future, such that all these classes will have a common API. - -""" -import string -import sys - -if sys.version_info[0] >= 3: - rel_import = "from . import" -else: - rel_import = "import" - -polytemplate = string.Template(''' -from __future__ import division -import numpy as np -import warnings -REL_IMPORT polyutils as pu - -class $name(pu.PolyBase) : - """A $name series class. - - $name instances provide the standard Python numerical methods '+', - '-', '*', '//', '%', 'divmod', '**', and '()' as well as the listed - methods. - - Parameters - ---------- - coef : array_like - $name coefficients, in increasing order. For example, - ``(1, 2, 3)`` implies ``P_0 + 2P_1 + 3P_2`` where the - ``P_i`` are a graded polynomial basis. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to - the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is $domain. - window : (2,) array_like, optional - Window, see ``domain`` for its use. The default value is $domain. - .. versionadded:: 1.6.0 - - Attributes - ---------- - coef : (N,) array - $name coefficients, from low to high. - domain : (2,) array - Domain that is mapped to ``window``. - window : (2,) array - Window that ``domain`` is mapped to. - - Class Attributes - ---------------- - maxpower : int - Maximum power allowed, i.e., the largest number ``n`` such that - ``p(x)**n`` is allowed. This is to limit runaway polynomial size. - domain : (2,) ndarray - Default domain of the class. - window : (2,) ndarray - Default window of the class. - - Notes - ----- - It is important to specify the domain in many cases, for instance in - fitting data, because many of the important properties of the - polynomial basis only hold in a specified interval and consequently - the data must be mapped into that interval in order to benefit. - - Examples - -------- - - """ - # Limit runaway size. T_n^m has degree n*2^m - maxpower = 16 - # Default domain - domain = np.array($domain) - # Default window - window = np.array($domain) - # Don't let participate in array operations. Value doesn't matter. - __array_priority__ = 0 - - def has_samecoef(self, other): - """Check if coefficients match. - - Parameters - ---------- - other : class instance - The other class must have the ``coef`` attribute. - - Returns - ------- - bool : boolean - True if the coefficients are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - if len(self.coef) != len(other.coef): - return False - elif not np.all(self.coef == other.coef): - return False - else: - return True - - def has_samedomain(self, other): - """Check if domains match. - - Parameters - ---------- - other : class instance - The other class must have the ``domain`` attribute. - - Returns - ------- - bool : boolean - True if the domains are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - return np.all(self.domain == other.domain) - - def has_samewindow(self, other): - """Check if windows match. - - Parameters - ---------- - other : class instance - The other class must have the ``window`` attribute. - - Returns - ------- - bool : boolean - True if the windows are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - return np.all(self.window == other.window) - - def has_sametype(self, other): - """Check if types match. - - Parameters - ---------- - other : object - Class instance. - - Returns - ------- - bool : boolean - True if other is same class as self - - Notes - ----- - .. versionadded:: 1.7.0 - - """ - return isinstance(other, self.__class__) - - def __init__(self, coef, domain=$domain, window=$domain) : - [coef, dom, win] = pu.as_series([coef, domain, window], trim=False) - if len(dom) != 2 : - raise ValueError("Domain has wrong number of elements.") - if len(win) != 2 : - raise ValueError("Window has wrong number of elements.") - self.coef = coef - self.domain = dom - self.window = win - - def __repr__(self): - format = "%s(%s, %s, %s)" - coef = repr(self.coef)[6:-1] - domain = repr(self.domain)[6:-1] - window = repr(self.window)[6:-1] - return format % ('$name', coef, domain, window) - - def __str__(self) : - format = "%s(%s)" - coef = str(self.coef) - return format % ('$nick', coef) - - # Pickle and copy - - def __getstate__(self) : - ret = self.__dict__.copy() - ret['coef'] = self.coef.copy() - ret['domain'] = self.domain.copy() - ret['window'] = self.window.copy() - return ret - - def __setstate__(self, dict) : - self.__dict__ = dict - - # Call - - def __call__(self, arg) : - off, scl = pu.mapparms(self.domain, self.window) - arg = off + scl*arg - return ${nick}val(arg, self.coef) - - def __iter__(self) : - return iter(self.coef) - - def __len__(self) : - return len(self.coef) - - # Numeric properties. - - def __neg__(self) : - return self.__class__(-self.coef, self.domain, self.window) - - def __pos__(self) : - return self - - def __add__(self, other) : - """Returns sum""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}add(self.coef, other.coef) - else : - try : - coef = ${nick}add(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __sub__(self, other) : - """Returns difference""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}sub(self.coef, other.coef) - else : - try : - coef = ${nick}sub(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __mul__(self, other) : - """Returns product""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}mul(self.coef, other.coef) - else : - try : - coef = ${nick}mul(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __div__(self, other): - # set to __floordiv__, /, for now. - return self.__floordiv__(other) - - def __truediv__(self, other) : - # there is no true divide if the rhs is not a scalar, although it - # could return the first n elements of an infinite series. - # It is hard to see where n would come from, though. - if np.isscalar(other) : - # this might be overly restrictive - coef = self.coef/other - return self.__class__(coef, self.domain, self.window) - else : - return NotImplemented - - def __floordiv__(self, other) : - """Returns the quotient.""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - return self.__class__(quo, self.domain, self.window) - - def __mod__(self, other) : - """Returns the remainder.""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - return self.__class__(rem, self.domain, self.window) - - def __divmod__(self, other) : - """Returns quo, remainder""" - if isinstance(other, self.__class__) : - if not self.has_samedomain(other): - raise TypeError("Domains are not equal") - elif not self.has_samewindow(other): - raise TypeError("Windows are not equal") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - def __pow__(self, other) : - try : - coef = ${nick}pow(self.coef, other, maxpower = self.maxpower) - except : - raise - return self.__class__(coef, self.domain, self.window) - - def __radd__(self, other) : - try : - coef = ${nick}add(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rsub__(self, other): - try : - coef = ${nick}sub(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rmul__(self, other) : - try : - coef = ${nick}mul(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rdiv__(self, other): - # set to __floordiv__ /. - return self.__rfloordiv__(other) - - def __rtruediv__(self, other) : - # there is no true divide if the rhs is not a scalar, although it - # could return the first n elements of an infinite series. - # It is hard to see where n would come from, though. - if len(self.coef) == 1 : - try : - quo, rem = ${nick}div(other, self.coef[0]) - except : - return NotImplemented - return self.__class__(quo, self.domain, self.window) - - def __rfloordiv__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except : - return NotImplemented - return self.__class__(quo, self.domain, self.window) - - def __rmod__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except : - return NotImplemented - return self.__class__(rem, self.domain, self.window) - - def __rdivmod__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except : - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - # Enhance me - # some augmented arithmetic operations could be added here - - def __eq__(self, other) : - res = isinstance(other, self.__class__) \ - and self.has_samecoef(other) \ - and self.has_samedomain(other) \ - and self.has_samewindow(other) - return res - - def __ne__(self, other) : - return not self.__eq__(other) - - # - # Extra methods. - # - - def copy(self) : - """Return a copy. - - Return a copy of the current $name instance. - - Returns - ------- - new_instance : $name - Copy of current instance. - - """ - return self.__class__(self.coef, self.domain, self.window) - - def degree(self) : - """The degree of the series. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - return len(self) - 1 - - def cutdeg(self, deg) : - """Truncate series to the given degree. - - Reduce the degree of the $name series to `deg` by discarding the - high order terms. If `deg` is greater than the current degree a - copy of the current series is returned. This can be useful in least - squares where the coefficients of the high degree terms may be very - small. - - Parameters - ---------- - deg : non-negative int - The series is reduced to degree `deg` by discarding the high - order terms. The value of `deg` must be a non-negative integer. - - Returns - ------- - new_instance : $name - New instance of $name with reduced degree. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - return self.truncate(deg + 1) - - def trim(self, tol=0) : - """Remove small leading coefficients - - Remove leading coefficients until a coefficient is reached whose - absolute value greater than `tol` or the beginning of the series is - reached. If all the coefficients would be removed the series is set to - ``[0]``. A new $name instance is returned with the new coefficients. - The current instance remains unchanged. - - Parameters - ---------- - tol : non-negative number. - All trailing coefficients less than `tol` will be removed. - - Returns - ------- - new_instance : $name - Contains the new set of coefficients. - - """ - coef = pu.trimcoef(self.coef, tol) - return self.__class__(coef, self.domain, self.window) - - def truncate(self, size) : - """Truncate series to length `size`. - - Reduce the $name series to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. This - can be useful in least squares where the coefficients of the - high degree terms may be very small. - - Parameters - ---------- - size : positive int - The series is reduced to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. - - Returns - ------- - new_instance : $name - New instance of $name with truncated coefficients. - - """ - isize = int(size) - if isize != size or isize < 1 : - raise ValueError("size must be a positive integer") - if isize >= len(self.coef) : - coef = self.coef - else : - coef = self.coef[:isize] - return self.__class__(coef, self.domain, self.window) - - def convert(self, domain=None, kind=None, window=None) : - """Convert to different class and/or domain. - - Parameters - ---------- - domain : array_like, optional - The domain of the converted series. If the value is None, - the default domain of `kind` is used. - kind : class, optional - The polynomial series type class to which the current instance - should be converted. If kind is None, then the class of the - current instance is used. - window : array_like, optional - The window of the converted series. If the value is None, - the default window of `kind` is used. - - Returns - ------- - new_series_instance : `kind` - The returned class can be of different type than the current - instance and/or have a different domain. - - Notes - ----- - Conversion between domains and class types can result in - numerically ill defined series. - - Examples - -------- - - """ - if kind is None: - kind = $name - if domain is None: - domain = kind.domain - if window is None: - window = kind.window - return self(kind.identity(domain, window=window)) - - def mapparms(self) : - """Return the mapping parameters. - - The returned values define a linear map ``off + scl*x`` that is - applied to the input arguments before the series is evaluated. The - map depends on the ``domain`` and ``window``; if the current - ``domain`` is equal to the ``window`` the resulting map is the - identity. If the coeffients of the ``$name`` instance are to be - used by themselves outside this class, then the linear function - must be substituted for the ``x`` in the standard representation of - the base polynomials. - - Returns - ------- - off, scl : floats or complex - The mapping function is defined by ``off + scl*x``. - - Notes - ----- - If the current domain is the interval ``[l_1, r_1]`` and the window - is ``[l_2, r_2]``, then the linear mapping function ``L`` is - defined by the equations:: - - L(l_1) = l_2 - L(r_1) = r_2 - - """ - return pu.mapparms(self.domain, self.window) - - def integ(self, m=1, k=[], lbnd=None) : - """Integrate. - - Return an instance of $name that is the definite integral of the - current series. Refer to `${nick}int` for full documentation. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - k : array_like - Integration constants. The first constant is applied to the - first integration, the second to the second, and so on. The - list of values must less than or equal to `m` in length and any - missing values are set to zero. - lbnd : Scalar - The lower bound of the definite integral. - - Returns - ------- - integral : $name - The integral of the series using the same domain. - - See Also - -------- - ${nick}int : similar function. - ${nick}der : similar function for derivative. - - """ - off, scl = self.mapparms() - if lbnd is None : - lbnd = 0 - else : - lbnd = off + scl*lbnd - coef = ${nick}int(self.coef, m, k, lbnd, 1./scl) - return self.__class__(coef, self.domain, self.window) - - def deriv(self, m=1): - """Differentiate. - - Return an instance of $name that is the derivative of the current - series. Refer to `${nick}der` for full documentation. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - - Returns - ------- - derivative : $name - The derivative of the series using the same domain. - - See Also - -------- - ${nick}der : similar function. - ${nick}int : similar function for integration. - - """ - off, scl = self.mapparms() - coef = ${nick}der(self.coef, m, scl) - return self.__class__(coef, self.domain, self.window) - - def roots(self) : - """Return list of roots. - - Return ndarray of roots for this series. See `${nick}roots` for - full documentation. Note that the accuracy of the roots is likely to - decrease the further outside the domain they lie. - - See Also - -------- - ${nick}roots : similar function - ${nick}fromroots : function to go generate series from roots. - - """ - roots = ${nick}roots(self.coef) - return pu.mapdomain(roots, self.window, self.domain) - - def linspace(self, n=100, domain=None): - """Return x,y values at equally spaced points in domain. - - Returns x, y values at `n` equally spaced points across domain. - Here y is the value of the polynomial at the points x. This is - intended as a plotting aid. - - Parameters - ---------- - n : int, optional - Number of point pairs to return. The default value is 100. - - Returns - ------- - x, y : ndarrays - ``x`` is equal to linspace(self.domain[0], self.domain[1], n) - ``y`` is the polynomial evaluated at ``x``. - - .. versionadded:: 1.5.0 - - """ - if domain is None: - domain = self.domain - x = np.linspace(domain[0], domain[1], n) - y = self(x) - return x, y - - - - @staticmethod - def fit(x, y, deg, domain=None, rcond=None, full=False, w=None, - window=$domain): - """Least squares fit to data. - - Return a `$name` instance that is the least squares fit to the data - `y` sampled at `x`. Unlike `${nick}fit`, the domain of the returned - instance can be specified and this will often result in a superior - fit with less chance of ill conditioning. See `${nick}fit` for full - documentation of the implementation. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial. - domain : {None, [beg, end], []}, optional - Domain to use for the returned $name instance. If ``None``, - then a minimal domain that covers the points `x` is chosen. If - ``[]`` the default domain ``$domain`` is used. The default - value is $domain in numpy 1.4.x and ``None`` in later versions. - The ``'[]`` value was added in numpy 1.5.0. - rcond : float, optional - Relative condition number of the fit. Singular values smaller - than this relative to the largest singular value will be - ignored. The default value is len(x)*eps, where eps is the - relative precision of the float type, about 2e-16 in most - cases. - full : bool, optional - Switch determining nature of return value. When it is False - (the default) just the coefficients are returned, when True - diagnostic information from the singular value decomposition is - also returned. - w : array_like, shape (M,), optional - Weights. If not None the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products - ``w[i]*y[i]`` all have the same variance. The default value is - None. - .. versionadded:: 1.5.0 - window : {[beg, end]}, optional - Window to use for the returned $name instance. The default - value is ``$domain`` - .. versionadded:: 1.6.0 - - Returns - ------- - least_squares_fit : instance of $name - The $name instance is the least squares fit to the data and - has the domain specified in the call. - - [residuals, rank, singular_values, rcond] : only if `full` = True - Residuals of the least-squares fit, the effective rank of the - scaled Vandermonde matrix and its singular values, and the - specified value of `rcond`. For more details, see - `linalg.lstsq`. - - See Also - -------- - ${nick}fit : similar function - - """ - if domain is None: - domain = pu.getdomain(x) - elif domain == []: - domain = $domain - - if window == []: - window = $domain - - xnew = pu.mapdomain(x, domain, window) - res = ${nick}fit(xnew, y, deg, w=w, rcond=rcond, full=full) - if full : - [coef, status] = res - return $name(coef, domain=domain, window=window), status - else : - coef = res - return $name(coef, domain=domain, window=window) - - @staticmethod - def fromroots(roots, domain=$domain, window=$domain) : - """Return $name instance with specified roots. - - Returns an instance of $name representing the product - ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is the - list of roots. - - Parameters - ---------- - roots : array_like - List of roots. - - Returns - ------- - object : $name - Series with the specified roots. - - See Also - -------- - ${nick}fromroots : equivalent function - - """ - if domain is None : - domain = pu.getdomain(roots) - rnew = pu.mapdomain(roots, domain, window) - coef = ${nick}fromroots(rnew) - return $name(coef, domain=domain, window=window) - - @staticmethod - def identity(domain=$domain, window=$domain) : - """Identity function. - - If ``p`` is the returned $name object, then ``p(x) == x`` for all - values of x. - - Parameters - ---------- - domain : array_like - The resulting array must be if the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. - window : array_like - The resulting array must be if the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the window. - - Returns - ------- - identity : $name object - - """ - off, scl = pu.mapparms(window, domain) - coef = ${nick}line(off, scl) - return $name(coef, domain, window) -'''.replace('REL_IMPORT', rel_import)) diff --git a/numpy-1.6.2/numpy/polynomial/polyutils.py b/numpy-1.6.2/numpy/polynomial/polyutils.py deleted file mode 100644 index 25d50837a3..0000000000 --- a/numpy-1.6.2/numpy/polynomial/polyutils.py +++ /dev/null @@ -1,394 +0,0 @@ -""" -Utililty objects for the polynomial modules. - -This module provides: error and warning objects; a polynomial base class; -and some routines used in both the `polynomial` and `chebyshev` modules. - -Error objects -------------- -- `PolyError` -- base class for this sub-package's errors. -- `PolyDomainError` -- raised when domains are "mismatched." - -Warning objects ---------------- -- `RankWarning` -- raised by a least-squares fit when a rank-deficient - matrix is encountered. - -Base class ----------- -- `PolyBase` -- The base class for the `Polynomial` and `Chebyshev` - classes. - -Functions ---------- -- `as_series` -- turns a list of array_likes into 1-D arrays of common - type. -- `trimseq` -- removes trailing zeros. -- `trimcoef` -- removes trailing coefficients that are less than a given - magnitude (thereby removing the corresponding terms). -- `getdomain` -- returns a domain appropriate for a given set of abscissae. -- `mapdomain` -- maps points between domains. -- `mapparms` -- parameters of the linear map between domains. - -""" -from __future__ import division - -__all__ = ['RankWarning', 'PolyError', 'PolyDomainError', 'PolyBase', - 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', - 'mapparms'] - -import warnings -import numpy as np -import sys - -# -# Warnings and Exceptions -# - -class RankWarning(UserWarning) : - """Issued by chebfit when the design matrix is rank deficient.""" - pass - -class PolyError(Exception) : - """Base class for errors in this module.""" - pass - -class PolyDomainError(PolyError) : - """Issued by the generic Poly class when two domains don't match. - - This is raised when an binary operation is passed Poly objects with - different domains. - - """ - pass - -# -# Base class for all polynomial types -# - -class PolyBase(object) : - pass - -# -# We need the any function for python < 2.5 -# -if sys.version_info[:2] < (2,5) : - def any(iterable) : - for element in iterable: - if element : - return True - return False - -# -# Helper functions to convert inputs to 1d arrays -# -def trimseq(seq) : - """Remove small Poly series coefficients. - - Parameters - ---------- - seq : sequence - Sequence of Poly series coefficients. This routine fails for - empty sequences. - - Returns - ------- - series : sequence - Subsequence with trailing zeros removed. If the resulting sequence - would be empty, return the first element. The returned sequence may - or may not be a view. - - Notes - ----- - Do not lose the type info if the sequence contains unknown objects. - - """ - if len(seq) == 0 : - return seq - else : - for i in range(len(seq) - 1, -1, -1) : - if seq[i] != 0 : - break - return seq[:i+1] - - -def as_series(alist, trim=True) : - """ - Return argument as a list of 1-d arrays. - - The returned list contains array(s) of dtype double, complex double, or - object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of - size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays - of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array - raises a Value Error if it is not first reshaped into either a 1-d or 2-d - array. - - Parameters - ---------- - a : array_like - A 1- or 2-d array_like - trim : boolean, optional - When True, trailing zeros are removed from the inputs. - When False, the inputs are passed through intact. - - Returns - ------- - [a1, a2,...] : list of 1d-arrays - A copy of the input data as a list of 1-d arrays. - - Raises - ------ - ValueError : - Raised when `as_series` cannot convert its input to 1-d arrays, or at - least one of the resulting arrays is empty. - - Examples - -------- - >>> from numpy import polynomial as P - >>> a = np.arange(4) - >>> P.as_series(a) - [array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])] - >>> b = np.arange(6).reshape((2,3)) - >>> P.as_series(b) - [array([ 0., 1., 2.]), array([ 3., 4., 5.])] - - """ - arrays = [np.array(a, ndmin=1, copy=0) for a in alist] - if min([a.size for a in arrays]) == 0 : - raise ValueError("Coefficient array is empty") - if any([a.ndim != 1 for a in arrays]) : - raise ValueError("Coefficient array is not 1-d") - if trim : - arrays = [trimseq(a) for a in arrays] - - if any([a.dtype == np.dtype(object) for a in arrays]) : - ret = [] - for a in arrays : - if a.dtype != np.dtype(object) : - tmp = np.empty(len(a), dtype=np.dtype(object)) - tmp[:] = a[:] - ret.append(tmp) - else : - ret.append(a.copy()) - else : - try : - dtype = np.common_type(*arrays) - except : - raise ValueError("Coefficient arrays have no common type") - ret = [np.array(a, copy=1, dtype=dtype) for a in arrays] - return ret - - -def trimcoef(c, tol=0) : - """ - Remove "small" "trailing" coefficients from a polynomial. - - "Small" means "small in absolute value" and is controlled by the - parameter `tol`; "trailing" means highest order coefficient(s), e.g., in - ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) - both the 3-rd and 4-th order coefficients would be "trimmed." - - Parameters - ---------- - c : array_like - 1-d array of coefficients, ordered from lowest order to highest. - tol : number, optional - Trailing (i.e., highest order) elements with absolute value less - than or equal to `tol` (default value is zero) are removed. - - Returns - ------- - trimmed : ndarray - 1-d array with trailing zeros removed. If the resulting series - would be empty, a series containing a single zero is returned. - - Raises - ------ - ValueError - If `tol` < 0 - - See Also - -------- - trimseq - - Examples - -------- - >>> from numpy import polynomial as P - >>> P.trimcoef((0,0,3,0,5,0,0)) - array([ 0., 0., 3., 0., 5.]) - >>> P.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed - array([ 0.]) - >>> i = complex(0,1) # works for complex - >>> P.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) - array([ 0.0003+0.j , 0.0010-0.001j]) - - """ - if tol < 0 : - raise ValueError("tol must be non-negative") - - [c] = as_series([c]) - [ind] = np.where(np.abs(c) > tol) - if len(ind) == 0 : - return c[:1]*0 - else : - return c[:ind[-1] + 1].copy() - -def getdomain(x) : - """ - Return a domain suitable for given abscissae. - - Find a domain suitable for a polynomial or Chebyshev series - defined at the values supplied. - - Parameters - ---------- - x : array_like - 1-d array of abscissae whose domain will be determined. - - Returns - ------- - domain : ndarray - 1-d array containing two values. If the inputs are complex, then - the two returned points are the lower left and upper right corners - of the smallest rectangle (aligned with the axes) in the complex - plane containing the points `x`. If the inputs are real, then the - two points are the ends of the smallest interval containing the - points `x`. - - See Also - -------- - mapparms, mapdomain - - Examples - -------- - >>> from numpy.polynomial import polyutils as pu - >>> points = np.arange(4)**2 - 5; points - array([-5, -4, -1, 4]) - >>> pu.getdomain(points) - array([-5., 4.]) - >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle - >>> pu.getdomain(c) - array([-1.-1.j, 1.+1.j]) - - """ - [x] = as_series([x], trim=False) - if x.dtype.char in np.typecodes['Complex'] : - rmin, rmax = x.real.min(), x.real.max() - imin, imax = x.imag.min(), x.imag.max() - return np.array((complex(rmin, imin), complex(rmax, imax))) - else : - return np.array((x.min(), x.max())) - -def mapparms(old, new) : - """ - Linear map parameters between domains. - - Return the parameters of the linear map ``offset + scale*x`` that maps - `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. - - Parameters - ---------- - old, new : array_like - Domains. Each domain must (successfully) convert to a 1-d array - containing precisely two values. - - Returns - ------- - offset, scale : scalars - The map ``L(x) = offset + scale*x`` maps the first domain to the - second. - - See Also - -------- - getdomain, mapdomain - - Notes - ----- - Also works for complex numbers, and thus can be used to calculate the - parameters required to map any line in the complex plane to any other - line therein. - - Examples - -------- - >>> from numpy import polynomial as P - >>> P.mapparms((-1,1),(-1,1)) - (0.0, 1.0) - >>> P.mapparms((1,-1),(-1,1)) - (0.0, -1.0) - >>> i = complex(0,1) - >>> P.mapparms((-i,-1),(1,i)) - ((1+1j), (1+0j)) - - """ - oldlen = old[1] - old[0] - newlen = new[1] - new[0] - off = (old[1]*new[0] - old[0]*new[1])/oldlen - scl = newlen/oldlen - return off, scl - -def mapdomain(x, old, new) : - """ - Apply linear map to input points. - - The linear map ``offset + scale*x`` that maps the domain `old` to - the domain `new` is applied to the points `x`. - - Parameters - ---------- - x : array_like - Points to be mapped. If `x` is a subtype of ndarray the subtype - will be preserved. - old, new : array_like - The two domains that determine the map. Each must (successfully) - convert to 1-d arrays containing precisely two values. - - Returns - ------- - x_out : ndarray - Array of points of the same shape as `x`, after application of the - linear map between the two domains. - - See Also - -------- - getdomain, mapparms - - Notes - ----- - Effectively, this implements: - - .. math :: - x\\_out = new[0] + m(x - old[0]) - - where - - .. math :: - m = \\frac{new[1]-new[0]}{old[1]-old[0]} - - Examples - -------- - >>> from numpy import polynomial as P - >>> old_domain = (-1,1) - >>> new_domain = (0,2*np.pi) - >>> x = np.linspace(-1,1,6); x - array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) - >>> x_out = P.mapdomain(x, old_domain, new_domain); x_out - array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, - 6.28318531]) - >>> x - P.mapdomain(x_out, new_domain, old_domain) - array([ 0., 0., 0., 0., 0., 0.]) - - Also works for complex numbers (and thus can be used to map any line in - the complex plane to any other line therein). - - >>> i = complex(0,1) - >>> old = (-1 - i, 1 + i) - >>> new = (-1 + i, 1 - i) - >>> z = np.linspace(old[0], old[1], 6); z - array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ]) - >>> new_z = P.mapdomain(z, old, new); new_z - array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) - - """ - x = np.asanyarray(x) - off, scl = mapparms(old, new) - return off + scl*x diff --git a/numpy-1.6.2/numpy/polynomial/setup.py b/numpy-1.6.2/numpy/polynomial/setup.py deleted file mode 100644 index 173fd126cf..0000000000 --- a/numpy-1.6.2/numpy/polynomial/setup.py +++ /dev/null @@ -1,11 +0,0 @@ - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('polynomial',parent_package,top_path) - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/polynomial/tests/test_chebyshev.py b/numpy-1.6.2/numpy/polynomial/tests/test_chebyshev.py deleted file mode 100644 index 21e4728bff..0000000000 --- a/numpy-1.6.2/numpy/polynomial/tests/test_chebyshev.py +++ /dev/null @@ -1,570 +0,0 @@ -"""Tests for chebyshev module. - -""" -from __future__ import division - -import numpy as np -import numpy.polynomial.chebyshev as ch -from numpy.testing import * - -def trim(x) : - return ch.chebtrim(x, tol=1e-6) - -T0 = [ 1] -T1 = [ 0, 1] -T2 = [-1, 0, 2] -T3 = [ 0, -3, 0, 4] -T4 = [ 1, 0, -8, 0, 8] -T5 = [ 0, 5, 0, -20, 0, 16] -T6 = [-1, 0, 18, 0, -48, 0, 32] -T7 = [ 0, -7, 0, 56, 0, -112, 0, 64] -T8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128] -T9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256] - -Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] - - -class TestPrivate(TestCase) : - - def test__cseries_to_zseries(self) : - for i in range(5) : - inp = np.array([2] + [1]*i, np.double) - tgt = np.array([.5]*i + [2] + [.5]*i, np.double) - res = ch._cseries_to_zseries(inp) - assert_equal(res, tgt) - - def test__zseries_to_cseries(self) : - for i in range(5) : - inp = np.array([.5]*i + [2] + [.5]*i, np.double) - tgt = np.array([2] + [1]*i, np.double) - res = ch._zseries_to_cseries(inp) - assert_equal(res, tgt) - - -class TestConstants(TestCase) : - - def test_chebdomain(self) : - assert_equal(ch.chebdomain, [-1, 1]) - - def test_chebzero(self) : - assert_equal(ch.chebzero, [0]) - - def test_chebone(self) : - assert_equal(ch.chebone, [1]) - - def test_chebx(self) : - assert_equal(ch.chebx, [0, 1]) - - -class TestArithmetic(TestCase) : - - def test_chebadd(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = ch.chebadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebsub(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = ch.chebsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebmulx(self): - assert_equal(ch.chebmulx([0]), [0]) - assert_equal(ch.chebmulx([1]), [0,1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [.5, 0, .5] - assert_equal(ch.chebmulx(ser), tgt) - - def test_chebmul(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(i + j + 1) - tgt[i + j] += .5 - tgt[abs(i - j)] += .5 - res = ch.chebmul([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebdiv(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = ch.chebadd(ci, cj) - quo, rem = ch.chebdiv(tgt, ci) - res = ch.chebadd(ch.chebmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebval(self) : - def f(x) : - return x*(x**2 - 1) - - #check empty input - assert_equal(ch.chebval([], [1]).size, 0) - - #check normal input) - for i in range(5) : - tgt = 1 - res = ch.chebval(1, [0]*i + [1]) - assert_almost_equal(res, tgt) - tgt = (-1)**i - res = ch.chebval(-1, [0]*i + [1]) - assert_almost_equal(res, tgt) - zeros = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = 0 - res = ch.chebval(zeros, [0]*i + [1]) - assert_almost_equal(res, tgt) - x = np.linspace(-1,1) - tgt = f(x) - res = ch.chebval(x, [0, -.25, 0, .25]) - assert_almost_equal(res, tgt) - - #check that shape is preserved - for i in range(3) : - dims = [2]*i - x = np.zeros(dims) - assert_equal(ch.chebval(x, [1]).shape, dims) - assert_equal(ch.chebval(x, [1,0]).shape, dims) - assert_equal(ch.chebval(x, [1,0,0]).shape, dims) - - -class TestCalculus(TestCase) : - - def test_chebint(self) : - # check exceptions - assert_raises(ValueError, ch.chebint, [0], .5) - assert_raises(ValueError, ch.chebint, [0], -1) - assert_raises(ValueError, ch.chebint, [0], 1, [0,0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = ch.chebint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - chebpol = ch.poly2cheb(pol) - chebint = ch.chebint(chebpol, m=1, k=[i]) - res = ch.cheb2poly(chebint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - chebpol = ch.poly2cheb(pol) - chebint = ch.chebint(chebpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(ch.chebval(-1, chebint), i) - - # check single integration with integration constant and scaling - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - chebpol = ch.poly2cheb(pol) - chebint = ch.chebint(chebpol, m=1, k=[i], scl=2) - res = ch.cheb2poly(chebint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = ch.chebint(tgt, m=1) - res = ch.chebint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = ch.chebint(tgt, m=1, k=[k]) - res = ch.chebint(pol, m=j, k=range(j)) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = ch.chebint(tgt, m=1, k=[k], lbnd=-1) - res = ch.chebint(pol, m=j, k=range(j), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = ch.chebint(tgt, m=1, k=[k], scl=2) - res = ch.chebint(pol, m=j, k=range(j), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebder(self) : - # check exceptions - assert_raises(ValueError, ch.chebder, [0], .5) - assert_raises(ValueError, ch.chebder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5) : - tgt = [1] + [0]*i - res = ch.chebder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = ch.chebder(ch.chebint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = ch.chebder(ch.chebint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - -class TestMisc(TestCase) : - - def test_chebfromroots(self) : - res = ch.chebfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1,5) : - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = [0]*i + [1] - res = ch.chebfromroots(roots)*2**(i-1) - assert_almost_equal(trim(res),trim(tgt)) - - def test_chebroots(self) : - assert_almost_equal(ch.chebroots([1]), []) - assert_almost_equal(ch.chebroots([1, 2]), [-.5]) - for i in range(2,5) : - tgt = np.linspace(-1, 1, i) - res = ch.chebroots(ch.chebfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebvander(self) : - # check for 1d x - x = np.arange(3) - v = ch.chebvander(x, 3) - assert_(v.shape == (3,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], ch.chebval(x, coef)) - - # check for 2d x - x = np.array([[1,2],[3,4],[5,6]]) - v = ch.chebvander(x, 3) - assert_(v.shape == (3,2,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], ch.chebval(x, coef)) - - def test_chebfit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, ch.chebfit, [1], [1], -1) - assert_raises(TypeError, ch.chebfit, [[1]], [1], 0) - assert_raises(TypeError, ch.chebfit, [], [1], 0) - assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0) - assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0) - assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0) - assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[1,1]) - - # Test fit - x = np.linspace(0,2) - y = f(x) - # - coef3 = ch.chebfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(ch.chebval(x, coef3), y) - # - coef4 = ch.chebfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(ch.chebval(x, coef4), y) - # - coef2d = ch.chebfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = ch.chebfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = ch.chebfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) - - def test_chebtrim(self) : - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, ch.chebtrim, coef, -1) - - # Test results - assert_equal(ch.chebtrim(coef), coef[:-1]) - assert_equal(ch.chebtrim(coef, 1), coef[:-3]) - assert_equal(ch.chebtrim(coef, 2), [0]) - - def test_chebline(self) : - assert_equal(ch.chebline(3,4), [3, 4]) - - def test_cheb2poly(self) : - for i in range(10) : - assert_almost_equal(ch.cheb2poly([0]*i + [1]), Tlist[i]) - - def test_poly2cheb(self) : - for i in range(10) : - assert_almost_equal(ch.poly2cheb(Tlist[i]), [0]*i + [1]) - - def test_chebpts1(self): - #test exceptions - assert_raises(ValueError, ch.chebpts1, 1.5) - assert_raises(ValueError, ch.chebpts1, 0) - - #test points - tgt = [0] - assert_almost_equal(ch.chebpts1(1), tgt) - tgt = [-0.70710678118654746, 0.70710678118654746] - assert_almost_equal(ch.chebpts1(2), tgt) - tgt = [-0.86602540378443871, 0, 0.86602540378443871] - assert_almost_equal(ch.chebpts1(3), tgt) - tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] - assert_almost_equal(ch.chebpts1(4), tgt) - - - def test_chebpts2(self): - #test exceptions - assert_raises(ValueError, ch.chebpts2, 1.5) - assert_raises(ValueError, ch.chebpts2, 1) - - #test points - tgt = [-1, 1] - assert_almost_equal(ch.chebpts2(2), tgt) - tgt = [-1, 0, 1] - assert_almost_equal(ch.chebpts2(3), tgt) - tgt = [-1, -0.5, .5, 1] - assert_almost_equal(ch.chebpts2(4), tgt) - tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] - assert_almost_equal(ch.chebpts2(5), tgt) - - - - -class TestChebyshevClass(TestCase) : - - p1 = ch.Chebyshev([1,2,3]) - p2 = ch.Chebyshev([1,2,3], [0,1]) - p3 = ch.Chebyshev([1,2]) - p4 = ch.Chebyshev([2,2,3]) - p5 = ch.Chebyshev([3,2,3]) - - def test_equal(self) : - assert_(self.p1 == self.p1) - assert_(self.p2 == self.p2) - assert_(not self.p1 == self.p2) - assert_(not self.p1 == self.p3) - assert_(not self.p1 == [1,2,3]) - - def test_not_equal(self) : - assert_(not self.p1 != self.p1) - assert_(not self.p2 != self.p2) - assert_(self.p1 != self.p2) - assert_(self.p1 != self.p3) - assert_(self.p1 != [1,2,3]) - - def test_add(self) : - tgt = ch.Chebyshev([2,4,6]) - assert_(self.p1 + self.p1 == tgt) - assert_(self.p1 + [1,2,3] == tgt) - assert_([1,2,3] + self.p1 == tgt) - - def test_sub(self) : - tgt = ch.Chebyshev([1]) - assert_(self.p4 - self.p1 == tgt) - assert_(self.p4 - [1,2,3] == tgt) - assert_([2,2,3] - self.p1 == tgt) - - def test_mul(self) : - tgt = ch.Chebyshev([7.5, 10., 8., 6., 4.5]) - assert_(self.p1 * self.p1 == tgt) - assert_(self.p1 * [1,2,3] == tgt) - assert_([1,2,3] * self.p1 == tgt) - - def test_floordiv(self) : - tgt = ch.Chebyshev([1]) - assert_(self.p4 // self.p1 == tgt) - assert_(self.p4 // [1,2,3] == tgt) - assert_([2,2,3] // self.p1 == tgt) - - def test_mod(self) : - tgt = ch.Chebyshev([1]) - assert_((self.p4 % self.p1) == tgt) - assert_((self.p4 % [1,2,3]) == tgt) - assert_(([2,2,3] % self.p1) == tgt) - - def test_divmod(self) : - tquo = ch.Chebyshev([1]) - trem = ch.Chebyshev([2]) - quo, rem = divmod(self.p5, self.p1) - assert_(quo == tquo and rem == trem) - quo, rem = divmod(self.p5, [1,2,3]) - assert_(quo == tquo and rem == trem) - quo, rem = divmod([3,2,3], self.p1) - assert_(quo == tquo and rem == trem) - - def test_pow(self) : - tgt = ch.Chebyshev([1]) - for i in range(5) : - res = self.p1**i - assert_(res == tgt) - tgt *= self.p1 - - def test_call(self) : - # domain = [-1, 1] - x = np.linspace(-1, 1) - tgt = 3*(2*x**2 - 1) + 2*x + 1 - assert_almost_equal(self.p1(x), tgt) - - # domain = [0, 1] - x = np.linspace(0, 1) - xx = 2*x - 1 - assert_almost_equal(self.p2(x), self.p1(xx)) - - def test_degree(self) : - assert_equal(self.p1.degree(), 2) - - def test_cutdeg(self) : - assert_raises(ValueError, self.p1.cutdeg, .5) - assert_raises(ValueError, self.p1.cutdeg, -1) - assert_equal(len(self.p1.cutdeg(3)), 3) - assert_equal(len(self.p1.cutdeg(2)), 3) - assert_equal(len(self.p1.cutdeg(1)), 2) - assert_equal(len(self.p1.cutdeg(0)), 1) - - def test_convert(self) : - x = np.linspace(-1,1) - p = self.p1.convert(domain=[0,1]) - assert_almost_equal(p(x), self.p1(x)) - - def test_mapparms(self) : - parms = self.p2.mapparms() - assert_almost_equal(parms, [-1, 2]) - - def test_trim(self) : - coef = [1, 1e-6, 1e-12, 0] - p = ch.Chebyshev(coef) - assert_equal(p.trim().coef, coef[:3]) - assert_equal(p.trim(1e-10).coef, coef[:2]) - assert_equal(p.trim(1e-5).coef, coef[:1]) - - def test_truncate(self) : - assert_raises(ValueError, self.p1.truncate, .5) - assert_raises(ValueError, self.p1.truncate, 0) - assert_equal(len(self.p1.truncate(4)), 3) - assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 2) - assert_equal(len(self.p1.truncate(1)), 1) - - def test_copy(self) : - p = self.p1.copy() - assert_(self.p1 == p) - - def test_integ(self) : - p = self.p2.integ() - assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 0, scl=.5)) - p = self.p2.integ(lbnd=0) - assert_almost_equal(p(0), 0) - p = self.p2.integ(1, 1) - assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 1, scl=.5)) - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.coef, ch.chebint([1,2,3], 2, [1,2], scl=.5)) - - def test_deriv(self) : - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef) - assert_almost_equal(p.deriv(2).coef, self.p2.coef) - - def test_roots(self) : - p = ch.Chebyshev(ch.poly2cheb([0, -1, 0, 1]), [0, 1]) - res = p.roots() - tgt = [0, .5, 1] - assert_almost_equal(res, tgt) - - def test_linspace(self): - xdes = np.linspace(0, 1, 20) - ydes = self.p2(xdes) - xres, yres = self.p2.linspace(20) - assert_almost_equal(xres, xdes) - assert_almost_equal(yres, ydes) - - def test_fromroots(self) : - roots = [0, .5, 1] - p = ch.Chebyshev.fromroots(roots, domain=[0, 1]) - res = p.coef - tgt = ch.poly2cheb([0, -1, 0, 1]) - assert_almost_equal(res, tgt) - - def test_fit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - x = np.linspace(0,3) - y = f(x) - - # test default value of domain - p = ch.Chebyshev.fit(x, y, 3) - assert_almost_equal(p.domain, [0,3]) - - # test that fit works in given domains - p = ch.Chebyshev.fit(x, y, 3, None) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [0,3]) - p = ch.Chebyshev.fit(x, y, 3, []) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [-1, 1]) - # test that fit accepts weights. - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - p = ch.Chebyshev.fit(x, yw, 3, w=w) - assert_almost_equal(p(x), y) - - def test_identity(self) : - x = np.linspace(0,3) - p = ch.Chebyshev.identity() - assert_almost_equal(p(x), x) - p = ch.Chebyshev.identity([1,3]) - assert_almost_equal(p(x), x) -# - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/polynomial/tests/test_hermite.py b/numpy-1.6.2/numpy/polynomial/tests/test_hermite.py deleted file mode 100644 index dea32f24a7..0000000000 --- a/numpy-1.6.2/numpy/polynomial/tests/test_hermite.py +++ /dev/null @@ -1,537 +0,0 @@ -"""Tests for hermendre module. - -""" -from __future__ import division - -import numpy as np -import numpy.polynomial.hermite as herm -import numpy.polynomial.polynomial as poly -from numpy.testing import * - -H0 = np.array([ 1]) -H1 = np.array([0, 2]) -H2 = np.array([ -2, 0, 4]) -H3 = np.array([0, -12, 0, 8]) -H4 = np.array([ 12, 0, -48, 0, 16]) -H5 = np.array([0, 120, 0, -160, 0, 32]) -H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) -H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) -H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) -H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) - -Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] - - -def trim(x) : - return herm.hermtrim(x, tol=1e-6) - - -class TestConstants(TestCase) : - - def test_hermdomain(self) : - assert_equal(herm.hermdomain, [-1, 1]) - - def test_hermzero(self) : - assert_equal(herm.hermzero, [0]) - - def test_hermone(self) : - assert_equal(herm.hermone, [1]) - - def test_hermx(self) : - assert_equal(herm.hermx, [0, .5]) - - -class TestArithmetic(TestCase) : - x = np.linspace(-3, 3, 100) - y0 = poly.polyval(x, H0) - y1 = poly.polyval(x, H1) - y2 = poly.polyval(x, H2) - y3 = poly.polyval(x, H3) - y4 = poly.polyval(x, H4) - y5 = poly.polyval(x, H5) - y6 = poly.polyval(x, H6) - y7 = poly.polyval(x, H7) - y8 = poly.polyval(x, H8) - y9 = poly.polyval(x, H9) - y = [y0, y1, y2, y3, y4, y5, y6, y7, y8, y9] - - def test_hermval(self) : - def f(x) : - return x*(x**2 - 1) - - #check empty input - assert_equal(herm.hermval([], [1]).size, 0) - - #check normal input) - for i in range(10) : - msg = "At i=%d" % i - ser = np.zeros - tgt = self.y[i] - res = herm.hermval(self.x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3) : - dims = [2]*i - x = np.zeros(dims) - assert_equal(herm.hermval(x, [1]).shape, dims) - assert_equal(herm.hermval(x, [1,0]).shape, dims) - assert_equal(herm.hermval(x, [1,0,0]).shape, dims) - - def test_hermadd(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = herm.hermadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermsub(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = herm.hermsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermmulx(self): - assert_equal(herm.hermmulx([0]), [0]) - assert_equal(herm.hermmulx([1]), [0,.5]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, .5] - assert_equal(herm.hermmulx(ser), tgt) - - def test_hermmul(self) : - # check values of result - for i in range(5) : - pol1 = [0]*i + [1] - val1 = herm.hermval(self.x, pol1) - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - pol2 = [0]*j + [1] - val2 = herm.hermval(self.x, pol2) - pol3 = herm.hermmul(pol1, pol2) - val3 = herm.hermval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_hermdiv(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = herm.hermadd(ci, cj) - quo, rem = herm.hermdiv(tgt, ci) - res = herm.hermadd(herm.hermmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestCalculus(TestCase) : - - def test_hermint(self) : - # check exceptions - assert_raises(ValueError, herm.hermint, [0], .5) - assert_raises(ValueError, herm.hermint, [0], -1) - assert_raises(ValueError, herm.hermint, [0], 1, [0,0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = herm.hermint([0], m=i, k=k) - assert_almost_equal(res, [0, .5]) - - # check single integration with integration constant - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i]) - res = herm.herm2poly(hermint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(herm.hermval(-1, hermint), i) - - # check single integration with integration constant and scaling - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) - res = herm.herm2poly(hermint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = herm.hermint(tgt, m=1) - res = herm.hermint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = herm.hermint(tgt, m=1, k=[k]) - res = herm.hermint(pol, m=j, k=range(j)) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) - res = herm.hermint(pol, m=j, k=range(j), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = herm.hermint(tgt, m=1, k=[k], scl=2) - res = herm.hermint(pol, m=j, k=range(j), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermder(self) : - # check exceptions - assert_raises(ValueError, herm.hermder, [0], .5) - assert_raises(ValueError, herm.hermder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5) : - tgt = [1] + [0]*i - res = herm.hermder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = herm.hermder(herm.hermint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - -class TestMisc(TestCase) : - - def test_hermfromroots(self) : - res = herm.hermfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1,5) : - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = herm.hermfromroots(roots) - res = herm.hermval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(herm.herm2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_hermroots(self) : - assert_almost_equal(herm.hermroots([1]), []) - assert_almost_equal(herm.hermroots([1, 1]), [-.5]) - for i in range(2,5) : - tgt = np.linspace(-1, 1, i) - res = herm.hermroots(herm.hermfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermvander(self) : - # check for 1d x - x = np.arange(3) - v = herm.hermvander(x, 3) - assert_(v.shape == (3,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], herm.hermval(x, coef)) - - # check for 2d x - x = np.array([[1,2],[3,4],[5,6]]) - v = herm.hermvander(x, 3) - assert_(v.shape == (3,2,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], herm.hermval(x, coef)) - - def test_hermfit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, herm.hermfit, [1], [1], -1) - assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) - assert_raises(TypeError, herm.hermfit, [], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) - assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1,1]) - - # Test fit - x = np.linspace(0,2) - y = f(x) - # - coef3 = herm.hermfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(herm.hermval(x, coef3), y) - # - coef4 = herm.hermfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(herm.hermval(x, coef4), y) - # - coef2d = herm.hermfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = herm.hermfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = herm.hermfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) - - def test_hermtrim(self) : - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, herm.hermtrim, coef, -1) - - # Test results - assert_equal(herm.hermtrim(coef), coef[:-1]) - assert_equal(herm.hermtrim(coef, 1), coef[:-3]) - assert_equal(herm.hermtrim(coef, 2), [0]) - - def test_hermline(self) : - assert_equal(herm.hermline(3,4), [3, 2]) - - def test_herm2poly(self) : - for i in range(10) : - assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) - - def test_poly2herm(self) : - for i in range(10) : - assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) - - -def assert_poly_almost_equal(p1, p2): - assert_almost_equal(p1.coef, p2.coef) - assert_equal(p1.domain, p2.domain) - - -class TestHermiteClass(TestCase) : - - p1 = herm.Hermite([1,2,3]) - p2 = herm.Hermite([1,2,3], [0,1]) - p3 = herm.Hermite([1,2]) - p4 = herm.Hermite([2,2,3]) - p5 = herm.Hermite([3,2,3]) - - def test_equal(self) : - assert_(self.p1 == self.p1) - assert_(self.p2 == self.p2) - assert_(not self.p1 == self.p2) - assert_(not self.p1 == self.p3) - assert_(not self.p1 == [1,2,3]) - - def test_not_equal(self) : - assert_(not self.p1 != self.p1) - assert_(not self.p2 != self.p2) - assert_(self.p1 != self.p2) - assert_(self.p1 != self.p3) - assert_(self.p1 != [1,2,3]) - - def test_add(self) : - tgt = herm.Hermite([2,4,6]) - assert_(self.p1 + self.p1 == tgt) - assert_(self.p1 + [1,2,3] == tgt) - assert_([1,2,3] + self.p1 == tgt) - - def test_sub(self) : - tgt = herm.Hermite([1]) - assert_(self.p4 - self.p1 == tgt) - assert_(self.p4 - [1,2,3] == tgt) - assert_([2,2,3] - self.p1 == tgt) - - def test_mul(self) : - tgt = herm.Hermite([ 81., 52., 82., 12., 9.]) - assert_poly_almost_equal(self.p1 * self.p1, tgt) - assert_poly_almost_equal(self.p1 * [1,2,3], tgt) - assert_poly_almost_equal([1,2,3] * self.p1, tgt) - - def test_floordiv(self) : - tgt = herm.Hermite([1]) - assert_(self.p4 // self.p1 == tgt) - assert_(self.p4 // [1,2,3] == tgt) - assert_([2,2,3] // self.p1 == tgt) - - def test_mod(self) : - tgt = herm.Hermite([1]) - assert_((self.p4 % self.p1) == tgt) - assert_((self.p4 % [1,2,3]) == tgt) - assert_(([2,2,3] % self.p1) == tgt) - - def test_divmod(self) : - tquo = herm.Hermite([1]) - trem = herm.Hermite([2]) - quo, rem = divmod(self.p5, self.p1) - assert_(quo == tquo and rem == trem) - quo, rem = divmod(self.p5, [1,2,3]) - assert_(quo == tquo and rem == trem) - quo, rem = divmod([3,2,3], self.p1) - assert_(quo == tquo and rem == trem) - - def test_pow(self) : - tgt = herm.Hermite([1]) - for i in range(5) : - res = self.p1**i - assert_(res == tgt) - tgt = tgt*self.p1 - - def test_call(self) : - # domain = [-1, 1] - x = np.linspace(-1, 1) - tgt = 3*(4*x**2 - 2) + 2*(2*x) + 1 - assert_almost_equal(self.p1(x), tgt) - - # domain = [0, 1] - x = np.linspace(0, 1) - xx = 2*x - 1 - assert_almost_equal(self.p2(x), self.p1(xx)) - - def test_degree(self) : - assert_equal(self.p1.degree(), 2) - - def test_cutdeg(self) : - assert_raises(ValueError, self.p1.cutdeg, .5) - assert_raises(ValueError, self.p1.cutdeg, -1) - assert_equal(len(self.p1.cutdeg(3)), 3) - assert_equal(len(self.p1.cutdeg(2)), 3) - assert_equal(len(self.p1.cutdeg(1)), 2) - assert_equal(len(self.p1.cutdeg(0)), 1) - - def test_convert(self) : - x = np.linspace(-1,1) - p = self.p1.convert(domain=[0,1]) - assert_almost_equal(p(x), self.p1(x)) - - def test_mapparms(self) : - parms = self.p2.mapparms() - assert_almost_equal(parms, [-1, 2]) - - def test_trim(self) : - coef = [1, 1e-6, 1e-12, 0] - p = herm.Hermite(coef) - assert_equal(p.trim().coef, coef[:3]) - assert_equal(p.trim(1e-10).coef, coef[:2]) - assert_equal(p.trim(1e-5).coef, coef[:1]) - - def test_truncate(self) : - assert_raises(ValueError, self.p1.truncate, .5) - assert_raises(ValueError, self.p1.truncate, 0) - assert_equal(len(self.p1.truncate(4)), 3) - assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 2) - assert_equal(len(self.p1.truncate(1)), 1) - - def test_copy(self) : - p = self.p1.copy() - assert_(self.p1 == p) - - def test_integ(self) : - p = self.p2.integ() - assert_almost_equal(p.coef, herm.hermint([1,2,3], 1, 0, scl=.5)) - p = self.p2.integ(lbnd=0) - assert_almost_equal(p(0), 0) - p = self.p2.integ(1, 1) - assert_almost_equal(p.coef, herm.hermint([1,2,3], 1, 1, scl=.5)) - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.coef, herm.hermint([1,2,3], 2, [1,2], scl=.5)) - - def test_deriv(self) : - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef) - assert_almost_equal(p.deriv(2).coef, self.p2.coef) - - def test_roots(self) : - p = herm.Hermite(herm.poly2herm([0, -1, 0, 1]), [0, 1]) - res = p.roots() - tgt = [0, .5, 1] - assert_almost_equal(res, tgt) - - def test_linspace(self): - xdes = np.linspace(0, 1, 20) - ydes = self.p2(xdes) - xres, yres = self.p2.linspace(20) - assert_almost_equal(xres, xdes) - assert_almost_equal(yres, ydes) - - def test_fromroots(self) : - roots = [0, .5, 1] - p = herm.Hermite.fromroots(roots, domain=[0, 1]) - res = p.coef - tgt = herm.poly2herm([0, -1, 0, 1]) - assert_almost_equal(res, tgt) - - def test_fit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - x = np.linspace(0,3) - y = f(x) - - # test default value of domain - p = herm.Hermite.fit(x, y, 3) - assert_almost_equal(p.domain, [0,3]) - - # test that fit works in given domains - p = herm.Hermite.fit(x, y, 3, None) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [0,3]) - p = herm.Hermite.fit(x, y, 3, []) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [-1, 1]) - # test that fit accepts weights. - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - p = herm.Hermite.fit(x, yw, 3, w=w) - assert_almost_equal(p(x), y) - - def test_identity(self) : - x = np.linspace(0,3) - p = herm.Hermite.identity() - assert_almost_equal(p(x), x) - p = herm.Hermite.identity([1,3]) - assert_almost_equal(p(x), x) -# - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/polynomial/tests/test_hermite_e.py b/numpy-1.6.2/numpy/polynomial/tests/test_hermite_e.py deleted file mode 100644 index 97f3a1c72b..0000000000 --- a/numpy-1.6.2/numpy/polynomial/tests/test_hermite_e.py +++ /dev/null @@ -1,536 +0,0 @@ -"""Tests for hermeendre module. - -""" -from __future__ import division - -import numpy as np -import numpy.polynomial.hermite_e as herme -import numpy.polynomial.polynomial as poly -from numpy.testing import * - -He0 = np.array([ 1 ]) -He1 = np.array([ 0 , 1 ]) -He2 = np.array([ -1 ,0 , 1 ]) -He3 = np.array([ 0 , -3 ,0 , 1 ]) -He4 = np.array([ 3 ,0 , -6 ,0 , 1 ]) -He5 = np.array([ 0 , 15 ,0 , -10 ,0 , 1 ]) -He6 = np.array([ -15 ,0 , 45 ,0 , -15 ,0 , 1 ]) -He7 = np.array([ 0 , -105 ,0 , 105 ,0 , -21 ,0 , 1 ]) -He8 = np.array([ 105 ,0 , -420 ,0 , 210 ,0 , -28 ,0 , 1 ]) -He9 = np.array([ 0 , 945 ,0 , -1260 ,0 , 378 ,0 , -36 ,0 , 1 ]) - -Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] - -def trim(x) : - return herme.hermetrim(x, tol=1e-6) - - -class TestConstants(TestCase) : - - def test_hermedomain(self) : - assert_equal(herme.hermedomain, [-1, 1]) - - def test_hermezero(self) : - assert_equal(herme.hermezero, [0]) - - def test_hermeone(self) : - assert_equal(herme.hermeone, [1]) - - def test_hermex(self) : - assert_equal(herme.hermex, [0, 1]) - - -class TestArithmetic(TestCase) : - x = np.linspace(-3, 3, 100) - y0 = poly.polyval(x, He0) - y1 = poly.polyval(x, He1) - y2 = poly.polyval(x, He2) - y3 = poly.polyval(x, He3) - y4 = poly.polyval(x, He4) - y5 = poly.polyval(x, He5) - y6 = poly.polyval(x, He6) - y7 = poly.polyval(x, He7) - y8 = poly.polyval(x, He8) - y9 = poly.polyval(x, He9) - y = [y0, y1, y2, y3, y4, y5, y6, y7, y8, y9] - - def test_hermeval(self) : - def f(x) : - return x*(x**2 - 1) - - #check empty input - assert_equal(herme.hermeval([], [1]).size, 0) - - #check normal input) - for i in range(10) : - msg = "At i=%d" % i - ser = np.zeros - tgt = self.y[i] - res = herme.hermeval(self.x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3) : - dims = [2]*i - x = np.zeros(dims) - assert_equal(herme.hermeval(x, [1]).shape, dims) - assert_equal(herme.hermeval(x, [1,0]).shape, dims) - assert_equal(herme.hermeval(x, [1,0,0]).shape, dims) - - def test_hermeadd(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = herme.hermeadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermesub(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = herme.hermesub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermemulx(self): - assert_equal(herme.hermemulx([0]), [0]) - assert_equal(herme.hermemulx([1]), [0,1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, 1] - assert_equal(herme.hermemulx(ser), tgt) - - def test_hermemul(self) : - # check values of result - for i in range(5) : - pol1 = [0]*i + [1] - val1 = herme.hermeval(self.x, pol1) - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - pol2 = [0]*j + [1] - val2 = herme.hermeval(self.x, pol2) - pol3 = herme.hermemul(pol1, pol2) - val3 = herme.hermeval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_hermediv(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = herme.hermeadd(ci, cj) - quo, rem = herme.hermediv(tgt, ci) - res = herme.hermeadd(herme.hermemul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestCalculus(TestCase) : - - def test_hermeint(self) : - # check exceptions - assert_raises(ValueError, herme.hermeint, [0], .5) - assert_raises(ValueError, herme.hermeint, [0], -1) - assert_raises(ValueError, herme.hermeint, [0], 1, [0,0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = herme.hermeint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i]) - res = herme.herme2poly(hermeint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) - assert_almost_equal(herme.hermeval(-1, hermeint), i) - - # check single integration with integration constant and scaling - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) - res = herme.herme2poly(hermeint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = herme.hermeint(tgt, m=1) - res = herme.hermeint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = herme.hermeint(tgt, m=1, k=[k]) - res = herme.hermeint(pol, m=j, k=range(j)) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) - res = herme.hermeint(pol, m=j, k=range(j), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) - res = herme.hermeint(pol, m=j, k=range(j), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermeder(self) : - # check exceptions - assert_raises(ValueError, herme.hermeder, [0], .5) - assert_raises(ValueError, herme.hermeder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5) : - tgt = [1] + [0]*i - res = herme.hermeder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = herme.hermeder(herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - -class TestMisc(TestCase) : - - def test_hermefromroots(self) : - res = herme.hermefromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1,5) : - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = herme.hermefromroots(roots) - res = herme.hermeval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(herme.herme2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_hermeroots(self) : - assert_almost_equal(herme.hermeroots([1]), []) - assert_almost_equal(herme.hermeroots([1, 1]), [-1]) - for i in range(2,5) : - tgt = np.linspace(-1, 1, i) - res = herme.hermeroots(herme.hermefromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermevander(self) : - # check for 1d x - x = np.arange(3) - v = herme.hermevander(x, 3) - assert_(v.shape == (3,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], herme.hermeval(x, coef)) - - # check for 2d x - x = np.array([[1,2],[3,4],[5,6]]) - v = herme.hermevander(x, 3) - assert_(v.shape == (3,2,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], herme.hermeval(x, coef)) - - def test_hermefit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, herme.hermefit, [1], [1], -1) - assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) - assert_raises(TypeError, herme.hermefit, [], [1], 0) - assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) - assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) - assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1,1]) - - # Test fit - x = np.linspace(0,2) - y = f(x) - # - coef3 = herme.hermefit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(herme.hermeval(x, coef3), y) - # - coef4 = herme.hermefit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(herme.hermeval(x, coef4), y) - # - coef2d = herme.hermefit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = herme.hermefit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = herme.hermefit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) - - def test_hermetrim(self) : - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, herme.hermetrim, coef, -1) - - # Test results - assert_equal(herme.hermetrim(coef), coef[:-1]) - assert_equal(herme.hermetrim(coef, 1), coef[:-3]) - assert_equal(herme.hermetrim(coef, 2), [0]) - - def test_hermeline(self) : - assert_equal(herme.hermeline(3,4), [3, 4]) - - def test_herme2poly(self) : - for i in range(10) : - assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) - - def test_poly2herme(self) : - for i in range(10) : - assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) - - -def assert_poly_almost_equal(p1, p2): - assert_almost_equal(p1.coef, p2.coef) - assert_equal(p1.domain, p2.domain) - - -class TestHermiteEClass(TestCase) : - - p1 = herme.HermiteE([1,2,3]) - p2 = herme.HermiteE([1,2,3], [0,1]) - p3 = herme.HermiteE([1,2]) - p4 = herme.HermiteE([2,2,3]) - p5 = herme.HermiteE([3,2,3]) - - def test_equal(self) : - assert_(self.p1 == self.p1) - assert_(self.p2 == self.p2) - assert_(not self.p1 == self.p2) - assert_(not self.p1 == self.p3) - assert_(not self.p1 == [1,2,3]) - - def test_not_equal(self) : - assert_(not self.p1 != self.p1) - assert_(not self.p2 != self.p2) - assert_(self.p1 != self.p2) - assert_(self.p1 != self.p3) - assert_(self.p1 != [1,2,3]) - - def test_add(self) : - tgt = herme.HermiteE([2,4,6]) - assert_(self.p1 + self.p1 == tgt) - assert_(self.p1 + [1,2,3] == tgt) - assert_([1,2,3] + self.p1 == tgt) - - def test_sub(self) : - tgt = herme.HermiteE([1]) - assert_(self.p4 - self.p1 == tgt) - assert_(self.p4 - [1,2,3] == tgt) - assert_([2,2,3] - self.p1 == tgt) - - def test_mul(self) : - tgt = herme.HermiteE([ 23., 28., 46., 12., 9.]) - assert_poly_almost_equal(self.p1 * self.p1, tgt) - assert_poly_almost_equal(self.p1 * [1,2,3], tgt) - assert_poly_almost_equal([1,2,3] * self.p1, tgt) - - def test_floordiv(self) : - tgt = herme.HermiteE([1]) - assert_(self.p4 // self.p1 == tgt) - assert_(self.p4 // [1,2,3] == tgt) - assert_([2,2,3] // self.p1 == tgt) - - def test_mod(self) : - tgt = herme.HermiteE([1]) - assert_((self.p4 % self.p1) == tgt) - assert_((self.p4 % [1,2,3]) == tgt) - assert_(([2,2,3] % self.p1) == tgt) - - def test_divmod(self) : - tquo = herme.HermiteE([1]) - trem = herme.HermiteE([2]) - quo, rem = divmod(self.p5, self.p1) - assert_(quo == tquo and rem == trem) - quo, rem = divmod(self.p5, [1,2,3]) - assert_(quo == tquo and rem == trem) - quo, rem = divmod([3,2,3], self.p1) - assert_(quo == tquo and rem == trem) - - def test_pow(self) : - tgt = herme.HermiteE([1]) - for i in range(5) : - res = self.p1**i - assert_(res == tgt) - tgt = tgt*self.p1 - - def test_call(self) : - # domain = [-1, 1] - x = np.linspace(-1, 1) - tgt = 3*(x**2 - 1) + 2*(x) + 1 - assert_almost_equal(self.p1(x), tgt) - - # domain = [0, 1] - x = np.linspace(0, 1) - xx = 2*x - 1 - assert_almost_equal(self.p2(x), self.p1(xx)) - - def test_degree(self) : - assert_equal(self.p1.degree(), 2) - - def test_cutdeg(self) : - assert_raises(ValueError, self.p1.cutdeg, .5) - assert_raises(ValueError, self.p1.cutdeg, -1) - assert_equal(len(self.p1.cutdeg(3)), 3) - assert_equal(len(self.p1.cutdeg(2)), 3) - assert_equal(len(self.p1.cutdeg(1)), 2) - assert_equal(len(self.p1.cutdeg(0)), 1) - - def test_convert(self) : - x = np.linspace(-1,1) - p = self.p1.convert(domain=[0,1]) - assert_almost_equal(p(x), self.p1(x)) - - def test_mapparms(self) : - parms = self.p2.mapparms() - assert_almost_equal(parms, [-1, 2]) - - def test_trim(self) : - coef = [1, 1e-6, 1e-12, 0] - p = herme.HermiteE(coef) - assert_equal(p.trim().coef, coef[:3]) - assert_equal(p.trim(1e-10).coef, coef[:2]) - assert_equal(p.trim(1e-5).coef, coef[:1]) - - def test_truncate(self) : - assert_raises(ValueError, self.p1.truncate, .5) - assert_raises(ValueError, self.p1.truncate, 0) - assert_equal(len(self.p1.truncate(4)), 3) - assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 2) - assert_equal(len(self.p1.truncate(1)), 1) - - def test_copy(self) : - p = self.p1.copy() - assert_(self.p1 == p) - - def test_integ(self) : - p = self.p2.integ() - assert_almost_equal(p.coef, herme.hermeint([1,2,3], 1, 0, scl=.5)) - p = self.p2.integ(lbnd=0) - assert_almost_equal(p(0), 0) - p = self.p2.integ(1, 1) - assert_almost_equal(p.coef, herme.hermeint([1,2,3], 1, 1, scl=.5)) - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.coef, herme.hermeint([1,2,3], 2, [1,2], scl=.5)) - - def test_deriv(self) : - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef) - assert_almost_equal(p.deriv(2).coef, self.p2.coef) - - def test_roots(self) : - p = herme.HermiteE(herme.poly2herme([0, -1, 0, 1]), [0, 1]) - res = p.roots() - tgt = [0, .5, 1] - assert_almost_equal(res, tgt) - - def test_linspace(self): - xdes = np.linspace(0, 1, 20) - ydes = self.p2(xdes) - xres, yres = self.p2.linspace(20) - assert_almost_equal(xres, xdes) - assert_almost_equal(yres, ydes) - - def test_fromroots(self) : - roots = [0, .5, 1] - p = herme.HermiteE.fromroots(roots, domain=[0, 1]) - res = p.coef - tgt = herme.poly2herme([0, -1, 0, 1]) - assert_almost_equal(res, tgt) - - def test_fit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - x = np.linspace(0,3) - y = f(x) - - # test default value of domain - p = herme.HermiteE.fit(x, y, 3) - assert_almost_equal(p.domain, [0,3]) - - # test that fit works in given domains - p = herme.HermiteE.fit(x, y, 3, None) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [0,3]) - p = herme.HermiteE.fit(x, y, 3, []) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [-1, 1]) - # test that fit accepts weights. - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - p = herme.HermiteE.fit(x, yw, 3, w=w) - assert_almost_equal(p(x), y) - - def test_identity(self) : - x = np.linspace(0,3) - p = herme.HermiteE.identity() - assert_almost_equal(p(x), x) - p = herme.HermiteE.identity([1,3]) - assert_almost_equal(p(x), x) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/polynomial/tests/test_laguerre.py b/numpy-1.6.2/numpy/polynomial/tests/test_laguerre.py deleted file mode 100644 index f3a4a930bf..0000000000 --- a/numpy-1.6.2/numpy/polynomial/tests/test_laguerre.py +++ /dev/null @@ -1,530 +0,0 @@ -"""Tests for hermendre module. - -""" -from __future__ import division - -import numpy as np -import numpy.polynomial.laguerre as lag -import numpy.polynomial.polynomial as poly -from numpy.testing import * - -L0 = np.array([1 ])/1 -L1 = np.array([1 , -1 ])/1 -L2 = np.array([2 , -4 , 1 ])/2 -L3 = np.array([6 , -18 , 9 , -1 ])/6 -L4 = np.array([24 , -96 , 72 , -16 , 1 ])/24 -L5 = np.array([120 , -600 , 600 , -200 , 25 , -1 ])/120 -L6 = np.array([720 , -4320 , 5400 , -2400 , 450 , -36 , 1 ])/720 - -Llist = [L0, L1, L2, L3, L4, L5, L6] - -def trim(x) : - return lag.lagtrim(x, tol=1e-6) - - -class TestConstants(TestCase) : - - def test_lagdomain(self) : - assert_equal(lag.lagdomain, [0, 1]) - - def test_lagzero(self) : - assert_equal(lag.lagzero, [0]) - - def test_lagone(self) : - assert_equal(lag.lagone, [1]) - - def test_lagx(self) : - assert_equal(lag.lagx, [1, -1]) - - -class TestArithmetic(TestCase) : - x = np.linspace(-3, 3, 100) - y0 = poly.polyval(x, L0) - y1 = poly.polyval(x, L1) - y2 = poly.polyval(x, L2) - y3 = poly.polyval(x, L3) - y4 = poly.polyval(x, L4) - y5 = poly.polyval(x, L5) - y6 = poly.polyval(x, L6) - y = [y0, y1, y2, y3, y4, y5, y6] - - def test_lagval(self) : - def f(x) : - return x*(x**2 - 1) - - #check empty input - assert_equal(lag.lagval([], [1]).size, 0) - - #check normal input) - for i in range(7) : - msg = "At i=%d" % i - ser = np.zeros - tgt = self.y[i] - res = lag.lagval(self.x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3) : - dims = [2]*i - x = np.zeros(dims) - assert_equal(lag.lagval(x, [1]).shape, dims) - assert_equal(lag.lagval(x, [1,0]).shape, dims) - assert_equal(lag.lagval(x, [1,0,0]).shape, dims) - - def test_lagadd(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = lag.lagadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_lagsub(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = lag.lagsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_lagmulx(self): - assert_equal(lag.lagmulx([0]), [0]) - assert_equal(lag.lagmulx([1]), [1,-1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] - assert_almost_equal(lag.lagmulx(ser), tgt) - - def test_lagmul(self) : - # check values of result - for i in range(5) : - pol1 = [0]*i + [1] - val1 = lag.lagval(self.x, pol1) - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - pol2 = [0]*j + [1] - val2 = lag.lagval(self.x, pol2) - pol3 = lag.lagmul(pol1, pol2) - val3 = lag.lagval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_lagdiv(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = lag.lagadd(ci, cj) - quo, rem = lag.lagdiv(tgt, ci) - res = lag.lagadd(lag.lagmul(quo, ci), rem) - assert_almost_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestCalculus(TestCase) : - - def test_lagint(self) : - # check exceptions - assert_raises(ValueError, lag.lagint, [0], .5) - assert_raises(ValueError, lag.lagint, [0], -1) - assert_raises(ValueError, lag.lagint, [0], 1, [0,0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = lag.lagint([0], m=i, k=k) - assert_almost_equal(res, [1, -1]) - - # check single integration with integration constant - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i]) - res = lag.lag2poly(lagint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(lag.lagval(-1, lagint), i) - - # check single integration with integration constant and scaling - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) - res = lag.lag2poly(lagint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = lag.lagint(tgt, m=1) - res = lag.lagint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = lag.lagint(tgt, m=1, k=[k]) - res = lag.lagint(pol, m=j, k=range(j)) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) - res = lag.lagint(pol, m=j, k=range(j), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = lag.lagint(tgt, m=1, k=[k], scl=2) - res = lag.lagint(pol, m=j, k=range(j), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_lagder(self) : - # check exceptions - assert_raises(ValueError, lag.lagder, [0], .5) - assert_raises(ValueError, lag.lagder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5) : - tgt = [1] + [0]*i - res = lag.lagder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = lag.lagder(lag.lagint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - -class TestMisc(TestCase) : - - def test_lagfromroots(self) : - res = lag.lagfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1,5) : - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = lag.lagfromroots(roots) - res = lag.lagval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(lag.lag2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_lagroots(self) : - assert_almost_equal(lag.lagroots([1]), []) - assert_almost_equal(lag.lagroots([0, 1]), [1]) - for i in range(2,5) : - tgt = np.linspace(0, 3, i) - res = lag.lagroots(lag.lagfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_lagvander(self) : - # check for 1d x - x = np.arange(3) - v = lag.lagvander(x, 3) - assert_(v.shape == (3,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], lag.lagval(x, coef)) - - # check for 2d x - x = np.array([[1,2],[3,4],[5,6]]) - v = lag.lagvander(x, 3) - assert_(v.shape == (3,2,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], lag.lagval(x, coef)) - - def test_lagfit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, lag.lagfit, [1], [1], -1) - assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) - assert_raises(TypeError, lag.lagfit, [], [1], 0) - assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) - assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) - assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1,1]) - - # Test fit - x = np.linspace(0,2) - y = f(x) - # - coef3 = lag.lagfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(lag.lagval(x, coef3), y) - # - coef4 = lag.lagfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(lag.lagval(x, coef4), y) - # - coef2d = lag.lagfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = lag.lagfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = lag.lagfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) - - def test_lagtrim(self) : - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, lag.lagtrim, coef, -1) - - # Test results - assert_equal(lag.lagtrim(coef), coef[:-1]) - assert_equal(lag.lagtrim(coef, 1), coef[:-3]) - assert_equal(lag.lagtrim(coef, 2), [0]) - - def test_lagline(self) : - assert_equal(lag.lagline(3,4), [7, -4]) - - def test_lag2poly(self) : - for i in range(7) : - assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) - - def test_poly2lag(self) : - for i in range(7) : - assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) - - -def assert_poly_almost_equal(p1, p2): - assert_almost_equal(p1.coef, p2.coef) - assert_equal(p1.domain, p2.domain) - - -class TestLaguerreClass(TestCase) : - - p1 = lag.Laguerre([1,2,3]) - p2 = lag.Laguerre([1,2,3], [0,1]) - p3 = lag.Laguerre([1,2]) - p4 = lag.Laguerre([2,2,3]) - p5 = lag.Laguerre([3,2,3]) - - def test_equal(self) : - assert_(self.p1 == self.p1) - assert_(self.p2 == self.p2) - assert_(not self.p1 == self.p2) - assert_(not self.p1 == self.p3) - assert_(not self.p1 == [1,2,3]) - - def test_not_equal(self) : - assert_(not self.p1 != self.p1) - assert_(not self.p2 != self.p2) - assert_(self.p1 != self.p2) - assert_(self.p1 != self.p3) - assert_(self.p1 != [1,2,3]) - - def test_add(self) : - tgt = lag.Laguerre([2,4,6]) - assert_(self.p1 + self.p1 == tgt) - assert_(self.p1 + [1,2,3] == tgt) - assert_([1,2,3] + self.p1 == tgt) - - def test_sub(self) : - tgt = lag.Laguerre([1]) - assert_(self.p4 - self.p1 == tgt) - assert_(self.p4 - [1,2,3] == tgt) - assert_([2,2,3] - self.p1 == tgt) - - def test_mul(self) : - tgt = lag.Laguerre([ 14., -16., 56., -72., 54.]) - assert_poly_almost_equal(self.p1 * self.p1, tgt) - assert_poly_almost_equal(self.p1 * [1,2,3], tgt) - assert_poly_almost_equal([1,2,3] * self.p1, tgt) - - def test_floordiv(self) : - tgt = lag.Laguerre([1]) - assert_(self.p4 // self.p1 == tgt) - assert_(self.p4 // [1,2,3] == tgt) - assert_([2,2,3] // self.p1 == tgt) - - def test_mod(self) : - tgt = lag.Laguerre([1]) - assert_((self.p4 % self.p1) == tgt) - assert_((self.p4 % [1,2,3]) == tgt) - assert_(([2,2,3] % self.p1) == tgt) - - def test_divmod(self) : - tquo = lag.Laguerre([1]) - trem = lag.Laguerre([2]) - quo, rem = divmod(self.p5, self.p1) - assert_(quo == tquo and rem == trem) - quo, rem = divmod(self.p5, [1,2,3]) - assert_(quo == tquo and rem == trem) - quo, rem = divmod([3,2,3], self.p1) - assert_(quo == tquo and rem == trem) - - def test_pow(self) : - tgt = lag.Laguerre([1]) - for i in range(5) : - res = self.p1**i - assert_(res == tgt) - tgt = tgt*self.p1 - - def test_call(self) : - # domain = [0, 1] - x = np.linspace(0, 1) - tgt = 3*(.5*x**2 - 2*x + 1) + 2*(-x + 1) + 1 - assert_almost_equal(self.p1(x), tgt) - - # domain = [0, 1] - x = np.linspace(.5, 1) - xx = 2*x - 1 - assert_almost_equal(self.p2(x), self.p1(xx)) - - def test_degree(self) : - assert_equal(self.p1.degree(), 2) - - def test_cutdeg(self) : - assert_raises(ValueError, self.p1.cutdeg, .5) - assert_raises(ValueError, self.p1.cutdeg, -1) - assert_equal(len(self.p1.cutdeg(3)), 3) - assert_equal(len(self.p1.cutdeg(2)), 3) - assert_equal(len(self.p1.cutdeg(1)), 2) - assert_equal(len(self.p1.cutdeg(0)), 1) - - def test_convert(self) : - x = np.linspace(-1,1) - p = self.p1.convert(domain=[0,1]) - assert_almost_equal(p(x), self.p1(x)) - - def test_mapparms(self) : - parms = self.p2.mapparms() - assert_almost_equal(parms, [-1, 2]) - - def test_trim(self) : - coef = [1, 1e-6, 1e-12, 0] - p = lag.Laguerre(coef) - assert_equal(p.trim().coef, coef[:3]) - assert_equal(p.trim(1e-10).coef, coef[:2]) - assert_equal(p.trim(1e-5).coef, coef[:1]) - - def test_truncate(self) : - assert_raises(ValueError, self.p1.truncate, .5) - assert_raises(ValueError, self.p1.truncate, 0) - assert_equal(len(self.p1.truncate(4)), 3) - assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 2) - assert_equal(len(self.p1.truncate(1)), 1) - - def test_copy(self) : - p = self.p1.copy() - assert_(self.p1 == p) - - def test_integ(self) : - p = self.p2.integ() - assert_almost_equal(p.coef, lag.lagint([1,2,3], 1, 0, scl=.5)) - p = self.p2.integ(lbnd=0) - assert_almost_equal(p(0), 0) - p = self.p2.integ(1, 1) - assert_almost_equal(p.coef, lag.lagint([1,2,3], 1, 1, scl=.5)) - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.coef, lag.lagint([1,2,3], 2, [1,2], scl=.5)) - - def test_deriv(self) : - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef) - assert_almost_equal(p.deriv(2).coef, self.p2.coef) - - def test_roots(self) : - p = lag.Laguerre(lag.poly2lag([0, -1, 0, 1]), [0, 1]) - res = p.roots() - tgt = [0, .5, 1] - assert_almost_equal(res, tgt) - - def test_linspace(self): - xdes = np.linspace(0, 1, 20) - ydes = self.p2(xdes) - xres, yres = self.p2.linspace(20) - assert_almost_equal(xres, xdes) - assert_almost_equal(yres, ydes) - - def test_fromroots(self) : - roots = [0, .5, 1] - p = lag.Laguerre.fromroots(roots, domain=[0, 1]) - res = p.coef - tgt = lag.poly2lag([0, -1, 0, 1]) - assert_almost_equal(res, tgt) - - def test_fit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - x = np.linspace(0,3) - y = f(x) - - # test default value of domain - p = lag.Laguerre.fit(x, y, 3) - assert_almost_equal(p.domain, [0,3]) - - # test that fit works in given domains - p = lag.Laguerre.fit(x, y, 3, None) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [0,3]) - p = lag.Laguerre.fit(x, y, 3, []) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [-1, 1]) - # test that fit accepts weights. - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - p = lag.Laguerre.fit(x, yw, 3, w=w) - assert_almost_equal(p(x), y) - - def test_identity(self) : - x = np.linspace(0,3) - p = lag.Laguerre.identity() - assert_almost_equal(p(x), x) - p = lag.Laguerre.identity([1,3]) - assert_almost_equal(p(x), x) -# - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/polynomial/tests/test_legendre.py b/numpy-1.6.2/numpy/polynomial/tests/test_legendre.py deleted file mode 100644 index a35e6605e2..0000000000 --- a/numpy-1.6.2/numpy/polynomial/tests/test_legendre.py +++ /dev/null @@ -1,537 +0,0 @@ -"""Tests for legendre module. - -""" -from __future__ import division - -import numpy as np -import numpy.polynomial.legendre as leg -import numpy.polynomial.polynomial as poly -from numpy.testing import * - -P0 = np.array([ 1]) -P1 = np.array([ 0, 1]) -P2 = np.array([-1, 0, 3])/2 -P3 = np.array([ 0, -3, 0, 5])/2 -P4 = np.array([ 3, 0, -30, 0, 35])/8 -P5 = np.array([ 0, 15, 0, -70, 0, 63])/8 -P6 = np.array([-5, 0, 105, 0,-315, 0, 231])/16 -P7 = np.array([ 0,-35, 0, 315, 0, -693, 0, 429])/16 -P8 = np.array([35, 0,-1260, 0,6930, 0,-12012, 0,6435])/128 -P9 = np.array([ 0,315, 0,-4620, 0,18018, 0,-25740, 0,12155])/128 - -Plist = [P0, P1, P2, P3, P4, P5, P6, P7, P8, P9] - -def trim(x) : - return leg.legtrim(x, tol=1e-6) - - -class TestConstants(TestCase) : - - def test_legdomain(self) : - assert_equal(leg.legdomain, [-1, 1]) - - def test_legzero(self) : - assert_equal(leg.legzero, [0]) - - def test_legone(self) : - assert_equal(leg.legone, [1]) - - def test_legx(self) : - assert_equal(leg.legx, [0, 1]) - - -class TestArithmetic(TestCase) : - x = np.linspace(-1, 1, 100) - y0 = poly.polyval(x, P0) - y1 = poly.polyval(x, P1) - y2 = poly.polyval(x, P2) - y3 = poly.polyval(x, P3) - y4 = poly.polyval(x, P4) - y5 = poly.polyval(x, P5) - y6 = poly.polyval(x, P6) - y7 = poly.polyval(x, P7) - y8 = poly.polyval(x, P8) - y9 = poly.polyval(x, P9) - y = [y0, y1, y2, y3, y4, y5, y6, y7, y8, y9] - - def test_legval(self) : - def f(x) : - return x*(x**2 - 1) - - #check empty input - assert_equal(leg.legval([], [1]).size, 0) - - #check normal input) - for i in range(10) : - msg = "At i=%d" % i - ser = np.zeros - tgt = self.y[i] - res = leg.legval(self.x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3) : - dims = [2]*i - x = np.zeros(dims) - assert_equal(leg.legval(x, [1]).shape, dims) - assert_equal(leg.legval(x, [1,0]).shape, dims) - assert_equal(leg.legval(x, [1,0,0]).shape, dims) - - def test_legadd(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = leg.legadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_legsub(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = leg.legsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_legmulx(self): - assert_equal(leg.legmulx([0]), [0]) - assert_equal(leg.legmulx([1]), [0,1]) - for i in range(1, 5): - tmp = 2*i + 1 - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] - assert_equal(leg.legmulx(ser), tgt) - - def test_legmul(self) : - # check values of result - for i in range(5) : - pol1 = [0]*i + [1] - val1 = leg.legval(self.x, pol1) - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - pol2 = [0]*j + [1] - val2 = leg.legval(self.x, pol2) - pol3 = leg.legmul(pol1, pol2) - val3 = leg.legval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_legdiv(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = leg.legadd(ci, cj) - quo, rem = leg.legdiv(tgt, ci) - res = leg.legadd(leg.legmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestCalculus(TestCase) : - - def test_legint(self) : - # check exceptions - assert_raises(ValueError, leg.legint, [0], .5) - assert_raises(ValueError, leg.legint, [0], -1) - assert_raises(ValueError, leg.legint, [0], 1, [0,0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = leg.legint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i]) - res = leg.leg2poly(legint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(leg.legval(-1, legint), i) - - # check single integration with integration constant and scaling - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i], scl=2) - res = leg.leg2poly(legint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = leg.legint(tgt, m=1) - res = leg.legint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = leg.legint(tgt, m=1, k=[k]) - res = leg.legint(pol, m=j, k=range(j)) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) - res = leg.legint(pol, m=j, k=range(j), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = leg.legint(tgt, m=1, k=[k], scl=2) - res = leg.legint(pol, m=j, k=range(j), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_legder(self) : - # check exceptions - assert_raises(ValueError, leg.legder, [0], .5) - assert_raises(ValueError, leg.legder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5) : - tgt = [1] + [0]*i - res = leg.legder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = leg.legder(leg.legint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - -class TestMisc(TestCase) : - - def test_legfromroots(self) : - res = leg.legfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1,5) : - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = leg.legfromroots(roots) - res = leg.legval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(leg.leg2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_legroots(self) : - assert_almost_equal(leg.legroots([1]), []) - assert_almost_equal(leg.legroots([1, 2]), [-.5]) - for i in range(2,5) : - tgt = np.linspace(-1, 1, i) - res = leg.legroots(leg.legfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_legvander(self) : - # check for 1d x - x = np.arange(3) - v = leg.legvander(x, 3) - assert_(v.shape == (3,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], leg.legval(x, coef)) - - # check for 2d x - x = np.array([[1,2],[3,4],[5,6]]) - v = leg.legvander(x, 3) - assert_(v.shape == (3,2,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], leg.legval(x, coef)) - - def test_legfit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, leg.legfit, [1], [1], -1) - assert_raises(TypeError, leg.legfit, [[1]], [1], 0) - assert_raises(TypeError, leg.legfit, [], [1], 0) - assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) - assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) - assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1,1]) - - # Test fit - x = np.linspace(0,2) - y = f(x) - # - coef3 = leg.legfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(leg.legval(x, coef3), y) - # - coef4 = leg.legfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(leg.legval(x, coef4), y) - # - coef2d = leg.legfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = leg.legfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) - - def test_legtrim(self) : - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, leg.legtrim, coef, -1) - - # Test results - assert_equal(leg.legtrim(coef), coef[:-1]) - assert_equal(leg.legtrim(coef, 1), coef[:-3]) - assert_equal(leg.legtrim(coef, 2), [0]) - - def test_legline(self) : - assert_equal(leg.legline(3,4), [3, 4]) - - def test_leg2poly(self) : - for i in range(10) : - assert_almost_equal(leg.leg2poly([0]*i + [1]), Plist[i]) - - def test_poly2leg(self) : - for i in range(10) : - assert_almost_equal(leg.poly2leg(Plist[i]), [0]*i + [1]) - - -def assert_poly_almost_equal(p1, p2): - assert_almost_equal(p1.coef, p2.coef) - assert_equal(p1.domain, p2.domain) - - -class TestLegendreClass(TestCase) : - - p1 = leg.Legendre([1,2,3]) - p2 = leg.Legendre([1,2,3], [0,1]) - p3 = leg.Legendre([1,2]) - p4 = leg.Legendre([2,2,3]) - p5 = leg.Legendre([3,2,3]) - - def test_equal(self) : - assert_(self.p1 == self.p1) - assert_(self.p2 == self.p2) - assert_(not self.p1 == self.p2) - assert_(not self.p1 == self.p3) - assert_(not self.p1 == [1,2,3]) - - def test_not_equal(self) : - assert_(not self.p1 != self.p1) - assert_(not self.p2 != self.p2) - assert_(self.p1 != self.p2) - assert_(self.p1 != self.p3) - assert_(self.p1 != [1,2,3]) - - def test_add(self) : - tgt = leg.Legendre([2,4,6]) - assert_(self.p1 + self.p1 == tgt) - assert_(self.p1 + [1,2,3] == tgt) - assert_([1,2,3] + self.p1 == tgt) - - def test_sub(self) : - tgt = leg.Legendre([1]) - assert_(self.p4 - self.p1 == tgt) - assert_(self.p4 - [1,2,3] == tgt) - assert_([2,2,3] - self.p1 == tgt) - - def test_mul(self) : - tgt = leg.Legendre([4.13333333, 8.8, 11.23809524, 7.2, 4.62857143]) - assert_poly_almost_equal(self.p1 * self.p1, tgt) - assert_poly_almost_equal(self.p1 * [1,2,3], tgt) - assert_poly_almost_equal([1,2,3] * self.p1, tgt) - - def test_floordiv(self) : - tgt = leg.Legendre([1]) - assert_(self.p4 // self.p1 == tgt) - assert_(self.p4 // [1,2,3] == tgt) - assert_([2,2,3] // self.p1 == tgt) - - def test_mod(self) : - tgt = leg.Legendre([1]) - assert_((self.p4 % self.p1) == tgt) - assert_((self.p4 % [1,2,3]) == tgt) - assert_(([2,2,3] % self.p1) == tgt) - - def test_divmod(self) : - tquo = leg.Legendre([1]) - trem = leg.Legendre([2]) - quo, rem = divmod(self.p5, self.p1) - assert_(quo == tquo and rem == trem) - quo, rem = divmod(self.p5, [1,2,3]) - assert_(quo == tquo and rem == trem) - quo, rem = divmod([3,2,3], self.p1) - assert_(quo == tquo and rem == trem) - - def test_pow(self) : - tgt = leg.Legendre([1]) - for i in range(5) : - res = self.p1**i - assert_(res == tgt) - tgt = tgt*self.p1 - - def test_call(self) : - # domain = [-1, 1] - x = np.linspace(-1, 1) - tgt = 3*(1.5*x**2 - .5) + 2*x + 1 - assert_almost_equal(self.p1(x), tgt) - - # domain = [0, 1] - x = np.linspace(0, 1) - xx = 2*x - 1 - assert_almost_equal(self.p2(x), self.p1(xx)) - - def test_degree(self) : - assert_equal(self.p1.degree(), 2) - - def test_cutdeg(self) : - assert_raises(ValueError, self.p1.cutdeg, .5) - assert_raises(ValueError, self.p1.cutdeg, -1) - assert_equal(len(self.p1.cutdeg(3)), 3) - assert_equal(len(self.p1.cutdeg(2)), 3) - assert_equal(len(self.p1.cutdeg(1)), 2) - assert_equal(len(self.p1.cutdeg(0)), 1) - - def test_convert(self) : - x = np.linspace(-1,1) - p = self.p1.convert(domain=[0,1]) - assert_almost_equal(p(x), self.p1(x)) - - def test_mapparms(self) : - parms = self.p2.mapparms() - assert_almost_equal(parms, [-1, 2]) - - def test_trim(self) : - coef = [1, 1e-6, 1e-12, 0] - p = leg.Legendre(coef) - assert_equal(p.trim().coef, coef[:3]) - assert_equal(p.trim(1e-10).coef, coef[:2]) - assert_equal(p.trim(1e-5).coef, coef[:1]) - - def test_truncate(self) : - assert_raises(ValueError, self.p1.truncate, .5) - assert_raises(ValueError, self.p1.truncate, 0) - assert_equal(len(self.p1.truncate(4)), 3) - assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 2) - assert_equal(len(self.p1.truncate(1)), 1) - - def test_copy(self) : - p = self.p1.copy() - assert_(self.p1 == p) - - def test_integ(self) : - p = self.p2.integ() - assert_almost_equal(p.coef, leg.legint([1,2,3], 1, 0, scl=.5)) - p = self.p2.integ(lbnd=0) - assert_almost_equal(p(0), 0) - p = self.p2.integ(1, 1) - assert_almost_equal(p.coef, leg.legint([1,2,3], 1, 1, scl=.5)) - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.coef, leg.legint([1,2,3], 2, [1,2], scl=.5)) - - def test_deriv(self) : - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef) - assert_almost_equal(p.deriv(2).coef, self.p2.coef) - - def test_roots(self) : - p = leg.Legendre(leg.poly2leg([0, -1, 0, 1]), [0, 1]) - res = p.roots() - tgt = [0, .5, 1] - assert_almost_equal(res, tgt) - - def test_linspace(self): - xdes = np.linspace(0, 1, 20) - ydes = self.p2(xdes) - xres, yres = self.p2.linspace(20) - assert_almost_equal(xres, xdes) - assert_almost_equal(yres, ydes) - - def test_fromroots(self) : - roots = [0, .5, 1] - p = leg.Legendre.fromroots(roots, domain=[0, 1]) - res = p.coef - tgt = leg.poly2leg([0, -1, 0, 1]) - assert_almost_equal(res, tgt) - - def test_fit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - x = np.linspace(0,3) - y = f(x) - - # test default value of domain - p = leg.Legendre.fit(x, y, 3) - assert_almost_equal(p.domain, [0,3]) - - # test that fit works in given domains - p = leg.Legendre.fit(x, y, 3, None) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [0,3]) - p = leg.Legendre.fit(x, y, 3, []) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [-1, 1]) - # test that fit accepts weights. - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - p = leg.Legendre.fit(x, yw, 3, w=w) - assert_almost_equal(p(x), y) - - def test_identity(self) : - x = np.linspace(0,3) - p = leg.Legendre.identity() - assert_almost_equal(p(x), x) - p = leg.Legendre.identity([1,3]) - assert_almost_equal(p(x), x) -# - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/polynomial/tests/test_polynomial.py b/numpy-1.6.2/numpy/polynomial/tests/test_polynomial.py deleted file mode 100644 index 4b93ea118c..0000000000 --- a/numpy-1.6.2/numpy/polynomial/tests/test_polynomial.py +++ /dev/null @@ -1,508 +0,0 @@ -"""Tests for polynomial module. - -""" -from __future__ import division - -import numpy as np -import numpy.polynomial.polynomial as poly -from numpy.testing import * - -def trim(x) : - return poly.polytrim(x, tol=1e-6) - -T0 = [ 1] -T1 = [ 0, 1] -T2 = [-1, 0, 2] -T3 = [ 0, -3, 0, 4] -T4 = [ 1, 0, -8, 0, 8] -T5 = [ 0, 5, 0, -20, 0, 16] -T6 = [-1, 0, 18, 0, -48, 0, 32] -T7 = [ 0, -7, 0, 56, 0, -112, 0, 64] -T8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128] -T9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256] - -Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] - - -class TestConstants(TestCase) : - - def test_polydomain(self) : - assert_equal(poly.polydomain, [-1, 1]) - - def test_polyzero(self) : - assert_equal(poly.polyzero, [0]) - - def test_polyone(self) : - assert_equal(poly.polyone, [1]) - - def test_polyx(self) : - assert_equal(poly.polyx, [0, 1]) - - -class TestArithmetic(TestCase) : - - def test_polyadd(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = poly.polyadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polysub(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = poly.polysub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polymulx(self): - assert_equal(poly.polymulx([0]), [0]) - assert_equal(poly.polymulx([1]), [0, 1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i + 1) + [1] - assert_equal(poly.polymulx(ser), tgt) - - def test_polymul(self) : - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(i + j + 1) - tgt[i + j] += 1 - res = poly.polymul([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polydiv(self) : - # check zero division - assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) - - # check scalar division - quo, rem = poly.polydiv([2],[2]) - assert_equal((quo, rem), (1, 0)) - quo, rem = poly.polydiv([2,2],[2]) - assert_equal((quo, rem), ((1,1), 0)) - - # check rest. - for i in range(5) : - for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - ci = [0]*i + [1,2] - cj = [0]*j + [1,2] - tgt = poly.polyadd(ci, cj) - quo, rem = poly.polydiv(tgt, ci) - res = poly.polyadd(poly.polymul(quo, ci), rem) - assert_equal(res, tgt, err_msg=msg) - - def test_polyval(self) : - def f(x) : - return x*(x**2 - 1) - - #check empty input - assert_equal(poly.polyval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1,1) - for i in range(5) : - tgt = x**i - res = poly.polyval(x, [0]*i + [1]) - assert_almost_equal(res, tgt) - tgt = f(x) - res = poly.polyval(x, [0, -1, 0, 1]) - assert_almost_equal(res, tgt) - - #check that shape is preserved - for i in range(3) : - dims = [2]*i - x = np.zeros(dims) - assert_equal(poly.polyval(x, [1]).shape, dims) - assert_equal(poly.polyval(x, [1,0]).shape, dims) - assert_equal(poly.polyval(x, [1,0,0]).shape, dims) - - -class TestCalculus(TestCase) : - - def test_polyint(self) : - # check exceptions - assert_raises(ValueError, poly.polyint, [0], .5) - assert_raises(ValueError, poly.polyint, [0], -1) - assert_raises(ValueError, poly.polyint, [0], 1, [0,0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = poly.polyint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - res = poly.polyint(pol, m=1, k=[i]) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - res = poly.polyint(pol, m=1, k=[i], lbnd=-1) - assert_almost_equal(poly.polyval(-1, res), i) - - # check single integration with integration constant and scaling - for i in range(5) : - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - res = poly.polyint(pol, m=1, k=[i], scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = poly.polyint(tgt, m=1) - res = poly.polyint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = poly.polyint(tgt, m=1, k=[k]) - res = poly.polyint(pol, m=j, k=range(j)) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) - res = poly.polyint(pol, m=j, k=range(j), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5) : - for j in range(2,5) : - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j) : - tgt = poly.polyint(tgt, m=1, k=[k], scl=2) - res = poly.polyint(pol, m=j, k=range(j), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyder(self) : - # check exceptions - assert_raises(ValueError, poly.polyder, [0], .5) - assert_raises(ValueError, poly.polyder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5) : - tgt = [1] + [0]*i - res = poly.polyder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = poly.polyder(poly.polyint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5) : - for j in range(2,5) : - tgt = [1] + [0]*i - res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - -class TestMisc(TestCase) : - - def test_polyfromroots(self) : - res = poly.polyfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1,5) : - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = Tlist[i] - res = poly.polyfromroots(roots)*2**(i-1) - assert_almost_equal(trim(res),trim(tgt)) - - def test_polyroots(self) : - assert_almost_equal(poly.polyroots([1]), []) - assert_almost_equal(poly.polyroots([1, 2]), [-.5]) - for i in range(2,5) : - tgt = np.linspace(-1, 1, i) - res = poly.polyroots(poly.polyfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyvander(self) : - # check for 1d x - x = np.arange(3) - v = poly.polyvander(x, 3) - assert_(v.shape == (3,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], poly.polyval(x, coef)) - - # check for 2d x - x = np.array([[1,2],[3,4],[5,6]]) - v = poly.polyvander(x, 3) - assert_(v.shape == (3,2,4)) - for i in range(4) : - coef = [0]*i + [1] - assert_almost_equal(v[...,i], poly.polyval(x, coef)) - - def test_polyfit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, poly.polyfit, [1], [1], -1) - assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) - assert_raises(TypeError, poly.polyfit, [], [1], 0) - assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) - assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) - assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1,1]) - - # Test fit - x = np.linspace(0,2) - y = f(x) - # - coef3 = poly.polyfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(poly.polyval(x, coef3), y) - # - coef4 = poly.polyfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(poly.polyval(x, coef4), y) - # - coef2d = poly.polyfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - wcoef3 = poly.polyfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = poly.polyfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) - - def test_polytrim(self) : - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, poly.polytrim, coef, -1) - - # Test results - assert_equal(poly.polytrim(coef), coef[:-1]) - assert_equal(poly.polytrim(coef, 1), coef[:-3]) - assert_equal(poly.polytrim(coef, 2), [0]) - - def test_polyline(self) : - assert_equal(poly.polyline(3,4), [3, 4]) - -class TestPolynomialClass(TestCase) : - - p1 = poly.Polynomial([1,2,3]) - p2 = poly.Polynomial([1,2,3], [0,1]) - p3 = poly.Polynomial([1,2]) - p4 = poly.Polynomial([2,2,3]) - p5 = poly.Polynomial([3,2,3]) - - def test_equal(self) : - assert_(self.p1 == self.p1) - assert_(self.p2 == self.p2) - assert_(not self.p1 == self.p2) - assert_(not self.p1 == self.p3) - assert_(not self.p1 == [1,2,3]) - - def test_not_equal(self) : - assert_(not self.p1 != self.p1) - assert_(not self.p2 != self.p2) - assert_(self.p1 != self.p2) - assert_(self.p1 != self.p3) - assert_(self.p1 != [1,2,3]) - - def test_add(self) : - tgt = poly.Polynomial([2,4,6]) - assert_(self.p1 + self.p1 == tgt) - assert_(self.p1 + [1,2,3] == tgt) - assert_([1,2,3] + self.p1 == tgt) - - def test_sub(self) : - tgt = poly.Polynomial([1]) - assert_(self.p4 - self.p1 == tgt) - assert_(self.p4 - [1,2,3] == tgt) - assert_([2,2,3] - self.p1 == tgt) - - def test_mul(self) : - tgt = poly.Polynomial([1,4,10,12,9]) - assert_(self.p1 * self.p1 == tgt) - assert_(self.p1 * [1,2,3] == tgt) - assert_([1,2,3] * self.p1 == tgt) - - def test_floordiv(self) : - tgt = poly.Polynomial([1]) - assert_(self.p4 // self.p1 == tgt) - assert_(self.p4 // [1,2,3] == tgt) - assert_([2,2,3] // self.p1 == tgt) - - def test_mod(self) : - tgt = poly.Polynomial([1]) - assert_((self.p4 % self.p1) == tgt) - assert_((self.p4 % [1,2,3]) == tgt) - assert_(([2,2,3] % self.p1) == tgt) - - def test_divmod(self) : - tquo = poly.Polynomial([1]) - trem = poly.Polynomial([2]) - quo, rem = divmod(self.p5, self.p1) - assert_(quo == tquo and rem == trem) - quo, rem = divmod(self.p5, [1,2,3]) - assert_(quo == tquo and rem == trem) - quo, rem = divmod([3,2,3], self.p1) - assert_(quo == tquo and rem == trem) - - def test_pow(self) : - tgt = poly.Polynomial([1]) - for i in range(5) : - res = self.p1**i - assert_(res == tgt) - tgt *= self.p1 - - def test_call(self) : - # domain = [-1, 1] - x = np.linspace(-1, 1) - tgt = (3*x + 2)*x + 1 - assert_almost_equal(self.p1(x), tgt) - - # domain = [0, 1] - x = np.linspace(0, 1) - xx = 2*x - 1 - assert_almost_equal(self.p2(x), self.p1(xx)) - - def test_degree(self) : - assert_equal(self.p1.degree(), 2) - - def test_cutdeg(self) : - assert_raises(ValueError, self.p1.cutdeg, .5) - assert_raises(ValueError, self.p1.cutdeg, -1) - assert_equal(len(self.p1.cutdeg(3)), 3) - assert_equal(len(self.p1.cutdeg(2)), 3) - assert_equal(len(self.p1.cutdeg(1)), 2) - assert_equal(len(self.p1.cutdeg(0)), 1) - - def test_convert(self) : - x = np.linspace(-1,1) - p = self.p1.convert(domain=[0,1]) - assert_almost_equal(p(x), self.p1(x)) - - def test_mapparms(self) : - parms = self.p2.mapparms() - assert_almost_equal(parms, [-1, 2]) - - def test_trim(self) : - coef = [1, 1e-6, 1e-12, 0] - p = poly.Polynomial(coef) - assert_equal(p.trim().coef, coef[:3]) - assert_equal(p.trim(1e-10).coef, coef[:2]) - assert_equal(p.trim(1e-5).coef, coef[:1]) - - def test_truncate(self) : - assert_raises(ValueError, self.p1.truncate, .5) - assert_raises(ValueError, self.p1.truncate, 0) - assert_equal(len(self.p1.truncate(4)), 3) - assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 2) - assert_equal(len(self.p1.truncate(1)), 1) - - def test_copy(self) : - p = self.p1.copy() - assert_(self.p1 == p) - - def test_integ(self) : - p = self.p2.integ() - assert_almost_equal(p.coef, poly.polyint([1,2,3], 1, 0, scl=.5)) - p = self.p2.integ(lbnd=0) - assert_almost_equal(p(0), 0) - p = self.p2.integ(1, 1) - assert_almost_equal(p.coef, poly.polyint([1,2,3], 1, 1, scl=.5)) - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.coef, poly.polyint([1,2,3], 2, [1, 2], scl=.5)) - - def test_deriv(self) : - p = self.p2.integ(2, [1, 2]) - assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef) - assert_almost_equal(p.deriv(2).coef, self.p2.coef) - - def test_roots(self) : - p = poly.Polynomial([0, -1, 0, 1], [0, 1]) - res = p.roots() - tgt = [0, .5, 1] - assert_almost_equal(res, tgt) - - def test_linspace(self): - xdes = np.linspace(0, 1, 20) - ydes = self.p2(xdes) - xres, yres = self.p2.linspace(20) - assert_almost_equal(xres, xdes) - assert_almost_equal(yres, ydes) - - def test_fromroots(self) : - roots = [0, .5, 1] - p = poly.Polynomial.fromroots(roots, domain=[0, 1]) - res = p.coef - tgt = [0, -1, 0, 1] - assert_almost_equal(res, tgt) - - def test_fit(self) : - def f(x) : - return x*(x - 1)*(x - 2) - x = np.linspace(0,3) - y = f(x) - - # test default value of domain - p = poly.Polynomial.fit(x, y, 3) - assert_almost_equal(p.domain, [0,3]) - - # test that fit works in given domains - p = poly.Polynomial.fit(x, y, 3, None) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [0,3]) - p = poly.Polynomial.fit(x, y, 3, []) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, [-1, 1]) - # test that fit accepts weights. - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - p = poly.Polynomial.fit(x, yw, 3, w=w) - assert_almost_equal(p(x), y) - - def test_identity(self) : - x = np.linspace(0,3) - p = poly.Polynomial.identity() - assert_almost_equal(p(x), x) - p = poly.Polynomial.identity([1,3]) - assert_almost_equal(p(x), x) -# - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/polynomial/tests/test_polyutils.py b/numpy-1.6.2/numpy/polynomial/tests/test_polyutils.py deleted file mode 100644 index 14bf8bb789..0000000000 --- a/numpy-1.6.2/numpy/polynomial/tests/test_polyutils.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Tests for polyutils module. - -""" -from __future__ import division - -import numpy as np -import numpy.polynomial.polyutils as pu -from numpy.testing import * - -class TestMisc(TestCase) : - - def test_trimseq(self) : - for i in range(5) : - tgt = [1] - res = pu.trimseq([1] + [0]*5) - assert_equal(res, tgt) - - def test_as_series(self) : - # check exceptions - assert_raises(ValueError, pu.as_series, [[]]) - assert_raises(ValueError, pu.as_series, [[[1,2]]]) - assert_raises(ValueError, pu.as_series, [[1],['a']]) - # check common types - types = ['i', 'd', 'O'] - for i in range(len(types)) : - for j in range(i) : - ci = np.ones(1, types[i]) - cj = np.ones(1, types[j]) - [resi, resj] = pu.as_series([ci, cj]) - assert_(resi.dtype.char == resj.dtype.char) - assert_(resj.dtype.char == types[i]) - - def test_trimcoef(self) : - coef = [2, -1, 1, 0] - # Test exceptions - assert_raises(ValueError, pu.trimcoef, coef, -1) - # Test results - assert_equal(pu.trimcoef(coef), coef[:-1]) - assert_equal(pu.trimcoef(coef, 1), coef[:-3]) - assert_equal(pu.trimcoef(coef, 2), [0]) - - -class TestDomain(TestCase) : - - def test_getdomain(self) : - # test for real values - x = [1, 10, 3, -1] - tgt = [-1,10] - res = pu.getdomain(x) - assert_almost_equal(res, tgt) - - # test for complex values - x = [1 + 1j, 1 - 1j, 0, 2] - tgt = [-1j, 2 + 1j] - res = pu.getdomain(x) - assert_almost_equal(res, tgt) - - def test_mapdomain(self) : - # test for real values - dom1 = [0,4] - dom2 = [1,3] - tgt = dom2 - res = pu. mapdomain(dom1, dom1, dom2) - assert_almost_equal(res, tgt) - - # test for complex values - dom1 = [0 - 1j, 2 + 1j] - dom2 = [-2, 2] - tgt = dom2 - x = dom1 - res = pu.mapdomain(x, dom1, dom2) - assert_almost_equal(res, tgt) - - # test for multidimensional arrays - dom1 = [0,4] - dom2 = [1,3] - tgt = np.array([dom2, dom2]) - x = np.array([dom1, dom1]) - res = pu.mapdomain(x, dom1, dom2) - assert_almost_equal(res, tgt) - - # test that subtypes are preserved. - dom1 = [0,4] - dom2 = [1,3] - x = np.matrix([dom1, dom1]) - res = pu.mapdomain(x, dom1, dom2) - assert_(isinstance(res, np.matrix)) - - def test_mapparms(self) : - # test for real values - dom1 = [0,4] - dom2 = [1,3] - tgt = [1, .5] - res = pu. mapparms(dom1, dom2) - assert_almost_equal(res, tgt) - - # test for complex values - dom1 = [0 - 1j, 2 + 1j] - dom2 = [-2, 2] - tgt = [-1 + 1j, 1 - 1j] - res = pu.mapparms(dom1, dom2) - assert_almost_equal(res, tgt) diff --git a/numpy-1.6.2/numpy/polynomial/tests/test_printing.py b/numpy-1.6.2/numpy/polynomial/tests/test_printing.py deleted file mode 100644 index 9803d931c1..0000000000 --- a/numpy-1.6.2/numpy/polynomial/tests/test_printing.py +++ /dev/null @@ -1,81 +0,0 @@ -import numpy.polynomial as poly -from numpy.testing import TestCase, run_module_suite, assert_ - -class test_str(TestCase): - def test_polynomial_str(self): - res = str(poly.Polynomial([0,1])) - tgt = 'poly([0., 1.])' - assert_(res, tgt) - - - def test_chebyshev_str(self): - res = str(poly.Chebyshev([0,1])) - tgt = 'leg([0., 1.])' - assert_(res, tgt) - - - def test_legendre_str(self): - res = str(poly.Legendre([0,1])) - tgt = 'leg([0., 1.])' - assert_(res, tgt) - - - def test_hermite_str(self): - res = str(poly.Hermite([0,1])) - tgt = 'herm([0., 1.])' - assert_(res, tgt) - - - def test_hermiteE_str(self): - res = str(poly.HermiteE([0,1])) - tgt = 'herme([0., 1.])' - assert_(res, tgt) - - - def test_laguerre_str(self): - res = str(poly.Laguerre([0,1])) - tgt = 'lag([0., 1.])' - assert_(res, tgt) - - -class test_repr(TestCase): - def test_polynomial_str(self): - res = repr(poly.Polynomial([0,1])) - tgt = 'Polynomial([0., 1.])' - assert_(res, tgt) - - - def test_chebyshev_str(self): - res = repr(poly.Chebyshev([0,1])) - tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) - - - def test_legendre_repr(self): - res = repr(poly.Legendre([0,1])) - tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) - - - def test_hermite_repr(self): - res = repr(poly.Hermite([0,1])) - tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) - - - def test_hermiteE_repr(self): - res = repr(poly.HermiteE([0,1])) - tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) - - - def test_laguerre_repr(self): - res = repr(poly.Laguerre([0,1])) - tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])' - assert_(res, tgt) - - -# - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/random/SConscript b/numpy-1.6.2/numpy/random/SConscript deleted file mode 100644 index a2acb0a668..0000000000 --- a/numpy-1.6.2/numpy/random/SConscript +++ /dev/null @@ -1,50 +0,0 @@ -# Last Change: Wed Nov 19 09:00 PM 2008 J -# vim:syntax=python -import os - -from numscons import GetNumpyEnvironment, scons_get_mathlib - -from setup import needs_mingw_ftime_workaround - -def CheckWincrypt(context): - from copy import deepcopy - src = """\ -/* check to see if _WIN32 is defined */ -int main(int argc, char *argv[]) -{ -#ifdef _WIN32 - return 0; -#else - return 1; -#endif -} -""" - - context.Message("Checking if using wincrypt ... ") - st = context.env.TryRun(src, '.C') - if st[0] == 0: - context.Result('No') - else: - context.Result('Yes') - return st[0] - -env = GetNumpyEnvironment(ARGUMENTS) - -mlib = scons_get_mathlib(env) -env.AppendUnique(LIBS = mlib) - -# On windows, see if we should use Advapi32 -if os.name == 'nt': - config = env.NumpyConfigure(custom_tests = {'CheckWincrypt' : CheckWincrypt}) - if config.CheckWincrypt: - config.env.AppendUnique(LIBS = 'Advapi32') - config.Finish() - -if needs_mingw_ftime_workaround(): - env.Append(CPPDEFINES=['NPY_NEEDS_MINGW_TIME_WORKAROUND']) - -sources = [os.path.join('mtrand', x) for x in - ['mtrand.c', 'randomkit.c', 'initarray.c', 'distributions.c']] - -# XXX: Pyrex dependency -env.NumpyPythonExtension('mtrand', source = sources) diff --git a/numpy-1.6.2/numpy/random/SConstruct b/numpy-1.6.2/numpy/random/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/numpy-1.6.2/numpy/random/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/numpy-1.6.2/numpy/random/__init__.py b/numpy-1.6.2/numpy/random/__init__.py deleted file mode 100644 index 8c3a333688..0000000000 --- a/numpy-1.6.2/numpy/random/__init__.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -======================== -Random Number Generation -======================== - -==================== ========================================================= -Utility functions -============================================================================== -random Uniformly distributed values of a given shape. -bytes Uniformly distributed random bytes. -random_integers Uniformly distributed integers in a given range. -random_sample Uniformly distributed floats in a given range. -permutation Randomly permute a sequence / generate a random sequence. -shuffle Randomly permute a sequence in place. -seed Seed the random number generator. -==================== ========================================================= - -==================== ========================================================= -Compatibility functions -============================================================================== -rand Uniformly distributed values. -randn Normally distributed values. -ranf Uniformly distributed floating point numbers. -randint Uniformly distributed integers in a given range. -==================== ========================================================= - -==================== ========================================================= -Univariate distributions -============================================================================== -beta Beta distribution over ``[0, 1]``. -binomial Binomial distribution. -chisquare :math:`\\chi^2` distribution. -exponential Exponential distribution. -f F (Fisher-Snedecor) distribution. -gamma Gamma distribution. -geometric Geometric distribution. -gumbel Gumbel distribution. -hypergeometric Hypergeometric distribution. -laplace Laplace distribution. -logistic Logistic distribution. -lognormal Log-normal distribution. -logseries Logarithmic series distribution. -negative_binomial Negative binomial distribution. -noncentral_chisquare Non-central chi-square distribution. -noncentral_f Non-central F distribution. -normal Normal / Gaussian distribution. -pareto Pareto distribution. -poisson Poisson distribution. -power Power distribution. -rayleigh Rayleigh distribution. -triangular Triangular distribution. -uniform Uniform distribution. -vonmises Von Mises circular distribution. -wald Wald (inverse Gaussian) distribution. -weibull Weibull distribution. -zipf Zipf's distribution over ranked data. -==================== ========================================================= - -==================== ========================================================= -Multivariate distributions -============================================================================== -dirichlet Multivariate generalization of Beta distribution. -multinomial Multivariate generalization of the binomial distribution. -multivariate_normal Multivariate generalization of the normal distribution. -==================== ========================================================= - -==================== ========================================================= -Standard distributions -============================================================================== -standard_cauchy Standard Cauchy-Lorentz distribution. -standard_exponential Standard exponential distribution. -standard_gamma Standard Gamma distribution. -standard_normal Standard normal distribution. -standard_t Standard Student's t-distribution. -==================== ========================================================= - -==================== ========================================================= -Internal functions -============================================================================== -get_state Get tuple representing internal state of generator. -set_state Set state of generator. -==================== ========================================================= - -""" -# To get sub-modules -from info import __doc__, __all__ -from mtrand import * - -# Some aliases: -ranf = random = sample = random_sample -__all__.extend(['ranf','random','sample']) - -def __RandomState_ctor(): - """Return a RandomState instance. - - This function exists solely to assist (un)pickling. - """ - return RandomState() - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.6.2/numpy/random/info.py b/numpy-1.6.2/numpy/random/info.py deleted file mode 100644 index 6139e57841..0000000000 --- a/numpy-1.6.2/numpy/random/info.py +++ /dev/null @@ -1,134 +0,0 @@ -""" -======================== -Random Number Generation -======================== - -==================== ========================================================= -Utility functions -============================================================================== -random Uniformly distributed values of a given shape. -bytes Uniformly distributed random bytes. -random_integers Uniformly distributed integers in a given range. -random_sample Uniformly distributed floats in a given range. -permutation Randomly permute a sequence / generate a random sequence. -shuffle Randomly permute a sequence in place. -seed Seed the random number generator. -==================== ========================================================= - -==================== ========================================================= -Compatibility functions -============================================================================== -rand Uniformly distributed values. -randn Normally distributed values. -ranf Uniformly distributed floating point numbers. -randint Uniformly distributed integers in a given range. -==================== ========================================================= - -==================== ========================================================= -Univariate distributions -============================================================================== -beta Beta distribution over ``[0, 1]``. -binomial Binomial distribution. -chisquare :math:`\\chi^2` distribution. -exponential Exponential distribution. -f F (Fisher-Snedecor) distribution. -gamma Gamma distribution. -geometric Geometric distribution. -gumbel Gumbel distribution. -hypergeometric Hypergeometric distribution. -laplace Laplace distribution. -logistic Logistic distribution. -lognormal Log-normal distribution. -logseries Logarithmic series distribution. -negative_binomial Negative binomial distribution. -noncentral_chisquare Non-central chi-square distribution. -noncentral_f Non-central F distribution. -normal Normal / Gaussian distribution. -pareto Pareto distribution. -poisson Poisson distribution. -power Power distribution. -rayleigh Rayleigh distribution. -triangular Triangular distribution. -uniform Uniform distribution. -vonmises Von Mises circular distribution. -wald Wald (inverse Gaussian) distribution. -weibull Weibull distribution. -zipf Zipf's distribution over ranked data. -==================== ========================================================= - -==================== ========================================================= -Multivariate distributions -============================================================================== -dirichlet Multivariate generalization of Beta distribution. -multinomial Multivariate generalization of the binomial distribution. -multivariate_normal Multivariate generalization of the normal distribution. -==================== ========================================================= - -==================== ========================================================= -Standard distributions -============================================================================== -standard_cauchy Standard Cauchy-Lorentz distribution. -standard_exponential Standard exponential distribution. -standard_gamma Standard Gamma distribution. -standard_normal Standard normal distribution. -standard_t Standard Student's t-distribution. -==================== ========================================================= - -==================== ========================================================= -Internal functions -============================================================================== -get_state Get tuple representing internal state of generator. -set_state Set state of generator. -==================== ========================================================= - -""" - -depends = ['core'] - -__all__ = [ - 'beta', - 'binomial', - 'bytes', - 'chisquare', - 'exponential', - 'f', - 'gamma', - 'geometric', - 'get_state', - 'gumbel', - 'hypergeometric', - 'laplace', - 'logistic', - 'lognormal', - 'logseries', - 'multinomial', - 'multivariate_normal', - 'negative_binomial', - 'noncentral_chisquare', - 'noncentral_f', - 'normal', - 'pareto', - 'permutation', - 'poisson', - 'power', - 'rand', - 'randint', - 'randn', - 'random_integers', - 'random_sample', - 'rayleigh', - 'seed', - 'set_state', - 'shuffle', - 'standard_cauchy', - 'standard_exponential', - 'standard_gamma', - 'standard_normal', - 'standard_t', - 'triangular', - 'uniform', - 'vonmises', - 'wald', - 'weibull', - 'zipf' -] diff --git a/numpy-1.6.2/numpy/random/mtrand/Python.pxi b/numpy-1.6.2/numpy/random/mtrand/Python.pxi deleted file mode 100644 index 01d47af500..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/Python.pxi +++ /dev/null @@ -1,57 +0,0 @@ -# :Author: Robert Kern -# :Copyright: 2004, Enthought, Inc. -# :License: BSD Style - - -cdef extern from "Python.h": - # Not part of the Python API, but we might as well define it here. - # Note that the exact type doesn't actually matter for Pyrex. - ctypedef int size_t - - # String API - char* PyString_AsString(object string) - char* PyString_AS_STRING(object string) - object PyString_FromString(char* c_string) - object PyString_FromStringAndSize(char* c_string, int length) - - # Float API - double PyFloat_AsDouble(object ob) - long PyInt_AsLong(object ob) - - # Memory API - void* PyMem_Malloc(size_t n) - void* PyMem_Realloc(void* buf, size_t n) - void PyMem_Free(void* buf) - - void Py_DECREF(object obj) - void Py_XDECREF(object obj) - void Py_INCREF(object obj) - void Py_XINCREF(object obj) - - # CObject API -# If this is uncommented it needs to be fixed to use PyCapsule -# for Python >= 3.0 -# -# ctypedef void (*destructor1)(void* cobj) -# ctypedef void (*destructor2)(void* cobj, void* desc) -# int PyCObject_Check(object p) -# object PyCObject_FromVoidPtr(void* cobj, destructor1 destr) -# object PyCObject_FromVoidPtrAndDesc(void* cobj, void* desc, -# destructor2 destr) -# void* PyCObject_AsVoidPtr(object self) -# void* PyCObject_GetDesc(object self) -# int PyCObject_SetVoidPtr(object self, void* cobj) - - # TypeCheck API - int PyFloat_Check(object obj) - int PyInt_Check(object obj) - - # Error API - int PyErr_Occurred() - void PyErr_Clear() - -cdef extern from "string.h": - void *memcpy(void *s1, void *s2, int n) - -cdef extern from "math.h": - double fabs(double x) diff --git a/numpy-1.6.2/numpy/random/mtrand/distributions.c b/numpy-1.6.2/numpy/random/mtrand/distributions.c deleted file mode 100644 index d792cf86da..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/distributions.c +++ /dev/null @@ -1,884 +0,0 @@ -/* Copyright 2005 Robert Kern (robert.kern@gmail.com) - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* The implementations of rk_hypergeometric_hyp(), rk_hypergeometric_hrua(), - * and rk_triangular() were adapted from Ivan Frohne's rv.py which has this - * license: - * - * Copyright 1998 by Ivan Frohne; Wasilla, Alaska, U.S.A. - * All Rights Reserved - * - * Permission to use, copy, modify and distribute this software and its - * documentation for any purpose, free of charge, is granted subject to the - * following conditions: - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the software. - * - * THE SOFTWARE AND DOCUMENTATION IS PROVIDED WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO MERCHANTABILITY, FITNESS - * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR - * OR COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM OR DAMAGES IN A CONTRACT - * ACTION, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR ITS DOCUMENTATION. - */ - -#include -#include "distributions.h" -#include - -#ifndef min -#define min(x,y) ((xy)?x:y) -#endif - -#ifndef M_PI -#define M_PI 3.14159265358979323846264338328 -#endif -/* log-gamma function to support some of these distributions. The - * algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their - * book "Computation of Special Functions", 1996, John Wiley & Sons, Inc. - */ -extern double loggam(double x); -double loggam(double x) -{ - double x0, x2, xp, gl, gl0; - long k, n; - - static double a[10] = {8.333333333333333e-02,-2.777777777777778e-03, - 7.936507936507937e-04,-5.952380952380952e-04, - 8.417508417508418e-04,-1.917526917526918e-03, - 6.410256410256410e-03,-2.955065359477124e-02, - 1.796443723688307e-01,-1.39243221690590e+00}; - x0 = x; - n = 0; - if ((x == 1.0) || (x == 2.0)) - { - return 0.0; - } - else if (x <= 7.0) - { - n = (long)(7 - x); - x0 = x + n; - } - x2 = 1.0/(x0*x0); - xp = 2*M_PI; - gl0 = a[9]; - for (k=8; k>=0; k--) - { - gl0 *= x2; - gl0 += a[k]; - } - gl = gl0/x0 + 0.5*log(xp) + (x0-0.5)*log(x0) - x0; - if (x <= 7.0) - { - for (k=1; k<=n; k++) - { - gl -= log(x0-1.0); - x0 -= 1.0; - } - } - return gl; -} - -double rk_normal(rk_state *state, double loc, double scale) -{ - return loc + scale*rk_gauss(state); -} - -double rk_standard_exponential(rk_state *state) -{ - /* We use -log(1-U) since U is [0, 1) */ - return -log(1.0 - rk_double(state)); -} - -double rk_exponential(rk_state *state, double scale) -{ - return scale * rk_standard_exponential(state); -} - -double rk_uniform(rk_state *state, double loc, double scale) -{ - return loc + scale*rk_double(state); -} - -double rk_standard_gamma(rk_state *state, double shape) -{ - double b, c; - double U, V, X, Y; - - if (shape == 1.0) - { - return rk_standard_exponential(state); - } - else if (shape < 1.0) - { - for (;;) - { - U = rk_double(state); - V = rk_standard_exponential(state); - if (U <= 1.0 - shape) - { - X = pow(U, 1./shape); - if (X <= V) - { - return X; - } - } - else - { - Y = -log((1-U)/shape); - X = pow(1.0 - shape + shape*Y, 1./shape); - if (X <= (V + Y)) - { - return X; - } - } - } - } - else - { - b = shape - 1./3.; - c = 1./sqrt(9*b); - for (;;) - { - do - { - X = rk_gauss(state); - V = 1.0 + c*X; - } while (V <= 0.0); - - V = V*V*V; - U = rk_double(state); - if (U < 1.0 - 0.0331*(X*X)*(X*X)) return (b*V); - if (log(U) < 0.5*X*X + b*(1. - V + log(V))) return (b*V); - } - } -} - -double rk_gamma(rk_state *state, double shape, double scale) -{ - return scale * rk_standard_gamma(state, shape); -} - -double rk_beta(rk_state *state, double a, double b) -{ - double Ga, Gb; - - if ((a <= 1.0) && (b <= 1.0)) - { - double U, V, X, Y; - /* Use Jonk's algorithm */ - - while (1) - { - U = rk_double(state); - V = rk_double(state); - X = pow(U, 1.0/a); - Y = pow(V, 1.0/b); - - if ((X + Y) <= 1.0) - { - return X / (X + Y); - } - } - } - else - { - Ga = rk_standard_gamma(state, a); - Gb = rk_standard_gamma(state, b); - return Ga/(Ga + Gb); - } -} - -double rk_chisquare(rk_state *state, double df) -{ - return 2.0*rk_standard_gamma(state, df/2.0); -} - -double rk_noncentral_chisquare(rk_state *state, double df, double nonc) -{ - double Chi2, N; - - Chi2 = rk_chisquare(state, df-1); - N = rk_gauss(state) + sqrt(nonc); - return Chi2 + N*N; -} - -double rk_f(rk_state *state, double dfnum, double dfden) -{ - return ((rk_chisquare(state, dfnum) * dfden) / - (rk_chisquare(state, dfden) * dfnum)); -} - -double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc) -{ - double t = rk_noncentral_chisquare(state, dfnum, nonc) * dfden; - return t / (rk_chisquare(state, dfden) * dfnum); -} - -long rk_binomial_btpe(rk_state *state, long n, double p) -{ - double r,q,fm,p1,xm,xl,xr,c,laml,lamr,p2,p3,p4; - double a,u,v,s,F,rho,t,A,nrq,x1,x2,f1,f2,z,z2,w,w2,x; - long m,y,k,i; - - if (!(state->has_binomial) || - (state->nsave != n) || - (state->psave != p)) - { - /* initialize */ - state->nsave = n; - state->psave = p; - state->has_binomial = 1; - state->r = r = min(p, 1.0-p); - state->q = q = 1.0 - r; - state->fm = fm = n*r+r; - state->m = m = (long)floor(state->fm); - state->p1 = p1 = floor(2.195*sqrt(n*r*q)-4.6*q) + 0.5; - state->xm = xm = m + 0.5; - state->xl = xl = xm - p1; - state->xr = xr = xm + p1; - state->c = c = 0.134 + 20.5/(15.3 + m); - a = (fm - xl)/(fm-xl*r); - state->laml = laml = a*(1.0 + a/2.0); - a = (xr - fm)/(xr*q); - state->lamr = lamr = a*(1.0 + a/2.0); - state->p2 = p2 = p1*(1.0 + 2.0*c); - state->p3 = p3 = p2 + c/laml; - state->p4 = p4 = p3 + c/lamr; - } - else - { - r = state->r; - q = state->q; - fm = state->fm; - m = state->m; - p1 = state->p1; - xm = state->xm; - xl = state->xl; - xr = state->xr; - c = state->c; - laml = state->laml; - lamr = state->lamr; - p2 = state->p2; - p3 = state->p3; - p4 = state->p4; - } - - /* sigh ... */ - Step10: - nrq = n*r*q; - u = rk_double(state)*p4; - v = rk_double(state); - if (u > p1) goto Step20; - y = (long)floor(xm - p1*v + u); - goto Step60; - - Step20: - if (u > p2) goto Step30; - x = xl + (u - p1)/c; - v = v*c + 1.0 - fabs(m - x + 0.5)/p1; - if (v > 1.0) goto Step10; - y = (long)floor(x); - goto Step50; - - Step30: - if (u > p3) goto Step40; - y = (long)floor(xl + log(v)/laml); - if (y < 0) goto Step10; - v = v*(u-p2)*laml; - goto Step50; - - Step40: - y = (int)floor(xr - log(v)/lamr); - if (y > n) goto Step10; - v = v*(u-p3)*lamr; - - Step50: - k = fabs(y - m); - if ((k > 20) && (k < ((nrq)/2.0 - 1))) goto Step52; - - s = r/q; - a = s*(n+1); - F = 1.0; - if (m < y) - { - for (i=m; i<=y; i++) - { - F *= (a/i - s); - } - } - else if (m > y) - { - for (i=y; i<=m; i++) - { - F /= (a/i - s); - } - } - else - { - if (v > F) goto Step10; - goto Step60; - } - - Step52: - rho = (k/(nrq))*((k*(k/3.0 + 0.625) + 0.16666666666666666)/nrq + 0.5); - t = -k*k/(2*nrq); - A = log(v); - if (A < (t - rho)) goto Step60; - if (A > (t + rho)) goto Step10; - - x1 = y+1; - f1 = m+1; - z = n+1-m; - w = n-y+1; - x2 = x1*x1; - f2 = f1*f1; - z2 = z*z; - w2 = w*w; - if (A > (xm*log(f1/x1) - + (n-m+0.5)*log(z/w) - + (y-m)*log(w*r/(x1*q)) - + (13680.-(462.-(132.-(99.-140./f2)/f2)/f2)/f2)/f1/166320. - + (13680.-(462.-(132.-(99.-140./z2)/z2)/z2)/z2)/z/166320. - + (13680.-(462.-(132.-(99.-140./x2)/x2)/x2)/x2)/x1/166320. - + (13680.-(462.-(132.-(99.-140./w2)/w2)/w2)/w2)/w/166320.)) - { - goto Step10; - } - - Step60: - if (p > 0.5) - { - y = n - y; - } - - return y; -} - -long rk_binomial_inversion(rk_state *state, long n, double p) -{ - double q, qn, np, px, U; - long X, bound; - - if (!(state->has_binomial) || - (state->nsave != n) || - (state->psave != p)) - { - state->nsave = n; - state->psave = p; - state->has_binomial = 1; - state->q = q = 1.0 - p; - state->r = qn = exp(n * log(q)); - state->c = np = n*p; - state->m = bound = min(n, np + 10.0*sqrt(np*q + 1)); - } else - { - q = state->q; - qn = state->r; - np = state->c; - bound = state->m; - } - X = 0; - px = qn; - U = rk_double(state); - while (U > px) - { - X++; - if (X > bound) - { - X = 0; - px = qn; - U = rk_double(state); - } else - { - U -= px; - px = ((n-X+1) * p * px)/(X*q); - } - } - return X; -} - -long rk_binomial(rk_state *state, long n, double p) -{ - double q; - - if (p <= 0.5) - { - if (p*n <= 30.0) - { - return rk_binomial_inversion(state, n, p); - } - else - { - return rk_binomial_btpe(state, n, p); - } - } - else - { - q = 1.0-p; - if (q*n <= 30.0) - { - return n - rk_binomial_inversion(state, n, q); - } - else - { - return n - rk_binomial_btpe(state, n, q); - } - } - -} - -long rk_negative_binomial(rk_state *state, double n, double p) -{ - double Y; - - Y = rk_gamma(state, n, (1-p)/p); - return rk_poisson(state, Y); -} - -long rk_poisson_mult(rk_state *state, double lam) -{ - long X; - double prod, U, enlam; - - enlam = exp(-lam); - X = 0; - prod = 1.0; - while (1) - { - U = rk_double(state); - prod *= U; - if (prod > enlam) - { - X += 1; - } - else - { - return X; - } - } -} - -#define LS2PI 0.91893853320467267 -#define TWELFTH 0.083333333333333333333333 -long rk_poisson_ptrs(rk_state *state, double lam) -{ - long k; - double U, V, slam, loglam, a, b, invalpha, vr, us; - - slam = sqrt(lam); - loglam = log(lam); - b = 0.931 + 2.53*slam; - a = -0.059 + 0.02483*b; - invalpha = 1.1239 + 1.1328/(b-3.4); - vr = 0.9277 - 3.6224/(b-2); - - while (1) - { - U = rk_double(state) - 0.5; - V = rk_double(state); - us = 0.5 - fabs(U); - k = (long)floor((2*a/us + b)*U + lam + 0.43); - if ((us >= 0.07) && (V <= vr)) - { - return k; - } - if ((k < 0) || - ((us < 0.013) && (V > us))) - { - continue; - } - if ((log(V) + log(invalpha) - log(a/(us*us)+b)) <= - (-lam + k*loglam - loggam(k+1))) - { - return k; - } - - - } - -} - -long rk_poisson(rk_state *state, double lam) -{ - if (lam >= 10) - { - return rk_poisson_ptrs(state, lam); - } - else if (lam == 0) - { - return 0; - } - else - { - return rk_poisson_mult(state, lam); - } -} - -double rk_standard_cauchy(rk_state *state) -{ - return rk_gauss(state) / rk_gauss(state); -} - -double rk_standard_t(rk_state *state, double df) -{ - double N, G, X; - - N = rk_gauss(state); - G = rk_standard_gamma(state, df/2); - X = sqrt(df/2)*N/sqrt(G); - return X; -} - -/* Uses the rejection algorithm compared against the wrapped Cauchy - distribution suggested by Best and Fisher and documented in - Chapter 9 of Luc's Non-Uniform Random Variate Generation. - http://cg.scs.carleton.ca/~luc/rnbookindex.html - (but corrected to match the algorithm in R and Python) -*/ -double rk_vonmises(rk_state *state, double mu, double kappa) -{ - double r, rho, s; - double U, V, W, Y, Z; - double result, mod; - int neg; - - if (kappa < 1e-8) - { - return M_PI * (2*rk_double(state)-1); - } - else - { - r = 1 + sqrt(1 + 4*kappa*kappa); - rho = (r - sqrt(2*r))/(2*kappa); - s = (1 + rho*rho)/(2*rho); - - while (1) - { - U = rk_double(state); - Z = cos(M_PI*U); - W = (1 + s*Z)/(s + Z); - Y = kappa * (s - W); - V = rk_double(state); - if ((Y*(2-Y) - V >= 0) || (log(Y/V)+1 - Y >= 0)) - { - break; - } - } - - U = rk_double(state); - - result = acos(W); - if (U < 0.5) - { - result = -result; - } - result += mu; - neg = (result < 0); - mod = fabs(result); - mod = (fmod(mod+M_PI, 2*M_PI)-M_PI); - if (neg) - { - mod *= -1; - } - - return mod; - } -} - -double rk_pareto(rk_state *state, double a) -{ - return exp(rk_standard_exponential(state)/a) - 1; -} - -double rk_weibull(rk_state *state, double a) -{ - return pow(rk_standard_exponential(state), 1./a); -} - -double rk_power(rk_state *state, double a) -{ - return pow(1 - exp(-rk_standard_exponential(state)), 1./a); -} - -double rk_laplace(rk_state *state, double loc, double scale) -{ - double U; - - U = rk_double(state); - if (U < 0.5) - { - U = loc + scale * log(U + U); - } else - { - U = loc - scale * log(2.0 - U - U); - } - return U; -} - -double rk_gumbel(rk_state *state, double loc, double scale) -{ - double U; - - U = 1.0 - rk_double(state); - return loc - scale * log(-log(U)); -} - -double rk_logistic(rk_state *state, double loc, double scale) -{ - double U; - - U = rk_double(state); - return loc + scale * log(U/(1.0 - U)); -} - -double rk_lognormal(rk_state *state, double mean, double sigma) -{ - return exp(rk_normal(state, mean, sigma)); -} - -double rk_rayleigh(rk_state *state, double mode) -{ - return mode*sqrt(-2.0 * log(1.0 - rk_double(state))); -} - -double rk_wald(rk_state *state, double mean, double scale) -{ - double U, X, Y; - double mu_2l; - - mu_2l = mean / (2*scale); - Y = rk_gauss(state); - Y = mean*Y*Y; - X = mean + mu_2l*(Y - sqrt(4*scale*Y + Y*Y)); - U = rk_double(state); - if (U <= mean/(mean+X)) - { - return X; - } else - { - return mean*mean/X; - } -} - -long rk_zipf(rk_state *state, double a) -{ - double T, U, V; - long X; - double am1, b; - - am1 = a - 1.0; - b = pow(2.0, am1); - do - { - U = 1.0-rk_double(state); - V = rk_double(state); - X = (long)floor(pow(U, -1.0/am1)); - /* The real result may be above what can be represented in a signed - * long. It will get casted to -sys.maxint-1. Since this is - * a straightforward rejection algorithm, we can just reject this value - * in the rejection condition below. This function then models a Zipf - * distribution truncated to sys.maxint. - */ - T = pow(1.0 + 1.0/X, am1); - } while (((V*X*(T-1.0)/(b-1.0)) > (T/b)) || X < 1); - return X; -} - -long rk_geometric_search(rk_state *state, double p) -{ - double U; - long X; - double sum, prod, q; - - X = 1; - sum = prod = p; - q = 1.0 - p; - U = rk_double(state); - while (U > sum) - { - prod *= q; - sum += prod; - X++; - } - return X; -} - -long rk_geometric_inversion(rk_state *state, double p) -{ - return (long)ceil(log(1.0-rk_double(state))/log(1.0-p)); -} - -long rk_geometric(rk_state *state, double p) -{ - if (p >= 0.333333333333333333333333) - { - return rk_geometric_search(state, p); - } else - { - return rk_geometric_inversion(state, p); - } -} - -long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample) -{ - long d1, K, Z; - double d2, U, Y; - - d1 = bad + good - sample; - d2 = (double)min(bad, good); - - Y = d2; - K = sample; - while (Y > 0.0) - { - U = rk_double(state); - Y -= (long)floor(U + Y/(d1 + K)); - K--; - if (K == 0) break; - } - Z = (long)(d2 - Y); - if (good > bad) Z = sample - Z; - return Z; -} - -/* D1 = 2*sqrt(2/e) */ -/* D2 = 3 - 2*sqrt(3/e) */ -#define D1 1.7155277699214135 -#define D2 0.8989161620588988 -long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample) -{ - long mingoodbad, maxgoodbad, popsize, m, d9; - double d4, d5, d6, d7, d8, d10, d11; - long Z; - double T, W, X, Y; - - mingoodbad = min(good, bad); - popsize = good + bad; - maxgoodbad = max(good, bad); - m = min(sample, popsize - sample); - d4 = ((double)mingoodbad) / popsize; - d5 = 1.0 - d4; - d6 = m*d4 + 0.5; - d7 = sqrt((popsize - m) * sample * d4 *d5 / (popsize-1) + 0.5); - d8 = D1*d7 + D2; - d9 = (long)floor((double)((m+1)*(mingoodbad+1))/(popsize+2)); - d10 = (loggam(d9+1) + loggam(mingoodbad-d9+1) + loggam(m-d9+1) + - loggam(maxgoodbad-m+d9+1)); - d11 = min(min(m, mingoodbad)+1.0, floor(d6+16*d7)); - /* 16 for 16-decimal-digit precision in D1 and D2 */ - - while (1) - { - X = rk_double(state); - Y = rk_double(state); - W = d6 + d8*(Y- 0.5)/X; - - /* fast rejection: */ - if ((W < 0.0) || (W >= d11)) continue; - - Z = (long)floor(W); - T = d10 - (loggam(Z+1) + loggam(mingoodbad-Z+1) + loggam(m-Z+1) + - loggam(maxgoodbad-m+Z+1)); - - /* fast acceptance: */ - if ((X*(4.0-X)-3.0) <= T) break; - - /* fast rejection: */ - if (X*(X-T) >= 1) continue; - - if (2.0*log(X) <= T) break; /* acceptance */ - } - - /* this is a correction to HRUA* by Ivan Frohne in rv.py */ - if (good > bad) Z = m - Z; - - /* another fix from rv.py to allow sample to exceed popsize/2 */ - if (m < sample) Z = good - Z; - - return Z; -} -#undef D1 -#undef D2 - -long rk_hypergeometric(rk_state *state, long good, long bad, long sample) -{ - if (sample > 10) - { - return rk_hypergeometric_hrua(state, good, bad, sample); - } else - { - return rk_hypergeometric_hyp(state, good, bad, sample); - } -} - -double rk_triangular(rk_state *state, double left, double mode, double right) -{ - double base, leftbase, ratio, leftprod, rightprod; - double U; - - base = right - left; - leftbase = mode - left; - ratio = leftbase / base; - leftprod = leftbase*base; - rightprod = (right - mode)*base; - - U = rk_double(state); - if (U <= ratio) - { - return left + sqrt(U*leftprod); - } else - { - return right - sqrt((1.0 - U) * rightprod); - } -} - -long rk_logseries(rk_state *state, double p) -{ - double q, r, U, V; - long result; - - r = log(1.0 - p); - - while (1) { - V = rk_double(state); - if (V >= p) { - return 1; - } - U = rk_double(state); - q = 1.0 - exp(r*U); - if (V <= q*q) { - result = (long)floor(1 + log(V)/log(q)); - if (result < 1) { - continue; - } - else { - return result; - } - } - if (V >= q) { - return 1; - } - return 2; - } -} diff --git a/numpy-1.6.2/numpy/random/mtrand/distributions.h b/numpy-1.6.2/numpy/random/mtrand/distributions.h deleted file mode 100644 index 6f60a4ff30..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/distributions.h +++ /dev/null @@ -1,185 +0,0 @@ -/* Copyright 2005 Robert Kern (robert.kern@gmail.com) - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _RK_DISTR_ -#define _RK_DISTR_ - -#include "randomkit.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* References: - * - * Devroye, Luc. _Non-Uniform Random Variate Generation_. - * Springer-Verlag, New York, 1986. - * http://cgm.cs.mcgill.ca/~luc/rnbookindex.html - * - * Kachitvichyanukul, V. and Schmeiser, B. W. Binomial Random Variate - * Generation. Communications of the ACM, 31, 2 (February, 1988) 216. - * - * Hoermann, W. The Transformed Rejection Method for Generating Poisson Random - * Variables. Insurance: Mathematics and Economics, (to appear) - * http://citeseer.csail.mit.edu/151115.html - * - * Marsaglia, G. and Tsang, W. W. A Simple Method for Generating Gamma - * Variables. ACM Transactions on Mathematical Software, Vol. 26, No. 3, - * September 2000, Pages 363–372. - */ - -/* Normal distribution with mean=loc and standard deviation=scale. */ -extern double rk_normal(rk_state *state, double loc, double scale); - -/* Standard exponential distribution (mean=1) computed by inversion of the - * CDF. */ -extern double rk_standard_exponential(rk_state *state); - -/* Exponential distribution with mean=scale. */ -extern double rk_exponential(rk_state *state, double scale); - -/* Uniform distribution on interval [loc, loc+scale). */ -extern double rk_uniform(rk_state *state, double loc, double scale); - -/* Standard gamma distribution with shape parameter. - * When shape < 1, the algorithm given by (Devroye p. 304) is used. - * When shape == 1, a Exponential variate is generated. - * When shape > 1, the small and fast method of (Marsaglia and Tsang 2000) - * is used. - */ -extern double rk_standard_gamma(rk_state *state, double shape); - -/* Gamma distribution with shape and scale. */ -extern double rk_gamma(rk_state *state, double shape, double scale); - -/* Beta distribution computed by combining two gamma variates (Devroye p. 432). - */ -extern double rk_beta(rk_state *state, double a, double b); - -/* Chi^2 distribution computed by transforming a gamma variate (it being a - * special case Gamma(df/2, 2)). */ -extern double rk_chisquare(rk_state *state, double df); - -/* Noncentral Chi^2 distribution computed by modifying a Chi^2 variate. */ -extern double rk_noncentral_chisquare(rk_state *state, double df, double nonc); - -/* F distribution computed by taking the ratio of two Chi^2 variates. */ -extern double rk_f(rk_state *state, double dfnum, double dfden); - -/* Noncentral F distribution computed by taking the ratio of a noncentral Chi^2 - * and a Chi^2 variate. */ -extern double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc); - -/* Binomial distribution with n Bernoulli trials with success probability p. - * When n*p <= 30, the "Second waiting time method" given by (Devroye p. 525) is - * used. Otherwise, the BTPE algorithm of (Kachitvichyanukul and Schmeiser 1988) - * is used. */ -extern long rk_binomial(rk_state *state, long n, double p); - -/* Binomial distribution using BTPE. */ -extern long rk_binomial_btpe(rk_state *state, long n, double p); - -/* Binomial distribution using inversion and chop-down */ -extern long rk_binomial_inversion(rk_state *state, long n, double p); - -/* Negative binomial distribution computed by generating a Gamma(n, (1-p)/p) - * variate Y and returning a Poisson(Y) variate (Devroye p. 543). */ -extern long rk_negative_binomial(rk_state *state, double n, double p); - -/* Poisson distribution with mean=lam. - * When lam < 10, a basic algorithm using repeated multiplications of uniform - * variates is used (Devroye p. 504). - * When lam >= 10, algorithm PTRS from (Hoermann 1992) is used. - */ -extern long rk_poisson(rk_state *state, double lam); - -/* Poisson distribution computed by repeated multiplication of uniform variates. - */ -extern long rk_poisson_mult(rk_state *state, double lam); - -/* Poisson distribution computer by the PTRS algorithm. */ -extern long rk_poisson_ptrs(rk_state *state, double lam); - -/* Standard Cauchy distribution computed by dividing standard gaussians - * (Devroye p. 451). */ -extern double rk_standard_cauchy(rk_state *state); - -/* Standard t-distribution with df degrees of freedom (Devroye p. 445 as - * corrected in the Errata). */ -extern double rk_standard_t(rk_state *state, double df); - -/* von Mises circular distribution with center mu and shape kappa on [-pi,pi] - * (Devroye p. 476 as corrected in the Errata). */ -extern double rk_vonmises(rk_state *state, double mu, double kappa); - -/* Pareto distribution via inversion (Devroye p. 262) */ -extern double rk_pareto(rk_state *state, double a); - -/* Weibull distribution via inversion (Devroye p. 262) */ -extern double rk_weibull(rk_state *state, double a); - -/* Power distribution via inversion (Devroye p. 262) */ -extern double rk_power(rk_state *state, double a); - -/* Laplace distribution */ -extern double rk_laplace(rk_state *state, double loc, double scale); - -/* Gumbel distribution */ -extern double rk_gumbel(rk_state *state, double loc, double scale); - -/* Logistic distribution */ -extern double rk_logistic(rk_state *state, double loc, double scale); - -/* Log-normal distribution */ -extern double rk_lognormal(rk_state *state, double mean, double sigma); - -/* Rayleigh distribution */ -extern double rk_rayleigh(rk_state *state, double mode); - -/* Wald distribution */ -extern double rk_wald(rk_state *state, double mean, double scale); - -/* Zipf distribution */ -extern long rk_zipf(rk_state *state, double a); - -/* Geometric distribution */ -extern long rk_geometric(rk_state *state, double p); -extern long rk_geometric_search(rk_state *state, double p); -extern long rk_geometric_inversion(rk_state *state, double p); - -/* Hypergeometric distribution */ -extern long rk_hypergeometric(rk_state *state, long good, long bad, long sample); -extern long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample); -extern long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample); - -/* Triangular distribution */ -extern double rk_triangular(rk_state *state, double left, double mode, double right); - -/* Logarithmic series distribution */ -extern long rk_logseries(rk_state *state, double p); - -#ifdef __cplusplus -} -#endif - - -#endif /* _RK_DISTR_ */ diff --git a/numpy-1.6.2/numpy/random/mtrand/initarray.c b/numpy-1.6.2/numpy/random/mtrand/initarray.c deleted file mode 100644 index beff785100..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/initarray.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * These function have been adapted from Python 2.4.1's _randommodule.c - * - * The following changes have been made to it in 2005 by Robert Kern: - * - * * init_by_array has been declared extern, has a void return, and uses the - * rk_state structure to hold its data. - * - * The original file has the following verbatim comments: - * - * ------------------------------------------------------------------ - * The code in this module was based on a download from: - * http://www.math.keio.ac.jp/~matumoto/MT2002/emt19937ar.html - * - * It was modified in 2002 by Raymond Hettinger as follows: - * - * * the principal computational lines untouched except for tabbing. - * - * * renamed genrand_res53() to random_random() and wrapped - * in python calling/return code. - * - * * genrand_int32() and the helper functions, init_genrand() - * and init_by_array(), were declared static, wrapped in - * Python calling/return code. also, their global data - * references were replaced with structure references. - * - * * unused functions from the original were deleted. - * new, original C python code was added to implement the - * Random() interface. - * - * The following are the verbatim comments from the original code: - * - * A C-program for MT19937, with initialization improved 2002/1/26. - * Coded by Takuji Nishimura and Makoto Matsumoto. - * - * Before using, initialize the state by using init_genrand(seed) - * or init_by_array(init_key, key_length). - * - * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * 3. The names of its contributors may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Any feedback is very welcome. - * http://www.math.keio.ac.jp/matumoto/emt.html - * email: matumoto@math.keio.ac.jp - */ - -#include "initarray.h" - -static void -init_genrand(rk_state *self, unsigned long s); - -/* initializes mt[RK_STATE_LEN] with a seed */ -static void -init_genrand(rk_state *self, unsigned long s) -{ - int mti; - unsigned long *mt = self->key; - - mt[0] = s & 0xffffffffUL; - for (mti = 1; mti < RK_STATE_LEN; mti++) { - /* - * See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. - * In the previous versions, MSBs of the seed affect - * only MSBs of the array mt[]. - * 2002/01/09 modified by Makoto Matsumoto - */ - mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); - /* for > 32 bit machines */ - mt[mti] &= 0xffffffffUL; - } - self->pos = mti; - return; -} - - -/* - * initialize by an array with array-length - * init_key is the array for initializing keys - * key_length is its length - */ -extern void -init_by_array(rk_state *self, unsigned long init_key[], npy_intp key_length) -{ - /* was signed in the original code. RDH 12/16/2002 */ - npy_intp i = 1; - npy_intp j = 0; - unsigned long *mt = self->key; - npy_intp k; - - init_genrand(self, 19650218UL); - k = (RK_STATE_LEN > key_length ? RK_STATE_LEN : key_length); - for (; k; k--) { - /* non linear */ - mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) - + init_key[j] + j; - /* for > 32 bit machines */ - mt[i] &= 0xffffffffUL; - i++; - j++; - if (i >= RK_STATE_LEN) { - mt[0] = mt[RK_STATE_LEN - 1]; - i = 1; - } - if (j >= key_length) { - j = 0; - } - } - for (k = RK_STATE_LEN - 1; k; k--) { - mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL)) - - i; /* non linear */ - mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ - i++; - if (i >= RK_STATE_LEN) { - mt[0] = mt[RK_STATE_LEN - 1]; - i = 1; - } - } - - mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ - self->gauss = 0; - self->has_gauss = 0; - self->has_binomial = 0; -} diff --git a/numpy-1.6.2/numpy/random/mtrand/initarray.h b/numpy-1.6.2/numpy/random/mtrand/initarray.h deleted file mode 100644 index 4cd27f5a9e..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/initarray.h +++ /dev/null @@ -1,7 +0,0 @@ -#include "Python.h" -#include "numpy/arrayobject.h" -#include "randomkit.h" - -extern void -init_by_array(rk_state *self, unsigned long init_key[], - npy_intp key_length); diff --git a/numpy-1.6.2/numpy/random/mtrand/mtrand.c.REMOVED.git-id b/numpy-1.6.2/numpy/random/mtrand/mtrand.c.REMOVED.git-id deleted file mode 100644 index ca92c5266f..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/mtrand.c.REMOVED.git-id +++ /dev/null @@ -1 +0,0 @@ -cdd748e756938a85ccdd2b5ae6cc21b3de3bc2a9 \ No newline at end of file diff --git a/numpy-1.6.2/numpy/random/mtrand/mtrand.pyx b/numpy-1.6.2/numpy/random/mtrand/mtrand.pyx deleted file mode 100644 index 719e1bc155..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/mtrand.pyx +++ /dev/null @@ -1,4378 +0,0 @@ -# mtrand.pyx -- A Pyrex wrapper of Jean-Sebastien Roy's RandomKit -# -# Copyright 2005 Robert Kern (robert.kern@gmail.com) -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -include "Python.pxi" -include "numpy.pxi" - -cdef extern from "math.h": - double exp(double x) - double log(double x) - double floor(double x) - double sin(double x) - double cos(double x) - -cdef extern from "mtrand_py_helper.h": - object empty_py_bytes(npy_intp length, void **bytes) - -cdef extern from "randomkit.h": - - ctypedef struct rk_state: - unsigned long key[624] - int pos - int has_gauss - double gauss - - ctypedef enum rk_error: - RK_NOERR = 0 - RK_ENODEV = 1 - RK_ERR_MAX = 2 - - char *rk_strerror[2] - - # 0xFFFFFFFFUL - unsigned long RK_MAX - - void rk_seed(unsigned long seed, rk_state *state) - rk_error rk_randomseed(rk_state *state) - unsigned long rk_random(rk_state *state) - long rk_long(rk_state *state) - unsigned long rk_ulong(rk_state *state) - unsigned long rk_interval(unsigned long max, rk_state *state) - double rk_double(rk_state *state) - void rk_fill(void *buffer, size_t size, rk_state *state) - rk_error rk_devfill(void *buffer, size_t size, int strong) - rk_error rk_altfill(void *buffer, size_t size, int strong, - rk_state *state) - double rk_gauss(rk_state *state) - -cdef extern from "distributions.h": - - double rk_normal(rk_state *state, double loc, double scale) - double rk_standard_exponential(rk_state *state) - double rk_exponential(rk_state *state, double scale) - double rk_uniform(rk_state *state, double loc, double scale) - double rk_standard_gamma(rk_state *state, double shape) - double rk_gamma(rk_state *state, double shape, double scale) - double rk_beta(rk_state *state, double a, double b) - double rk_chisquare(rk_state *state, double df) - double rk_noncentral_chisquare(rk_state *state, double df, double nonc) - double rk_f(rk_state *state, double dfnum, double dfden) - double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc) - double rk_standard_cauchy(rk_state *state) - double rk_standard_t(rk_state *state, double df) - double rk_vonmises(rk_state *state, double mu, double kappa) - double rk_pareto(rk_state *state, double a) - double rk_weibull(rk_state *state, double a) - double rk_power(rk_state *state, double a) - double rk_laplace(rk_state *state, double loc, double scale) - double rk_gumbel(rk_state *state, double loc, double scale) - double rk_logistic(rk_state *state, double loc, double scale) - double rk_lognormal(rk_state *state, double mode, double sigma) - double rk_rayleigh(rk_state *state, double mode) - double rk_wald(rk_state *state, double mean, double scale) - double rk_triangular(rk_state *state, double left, double mode, double right) - - long rk_binomial(rk_state *state, long n, double p) - long rk_binomial_btpe(rk_state *state, long n, double p) - long rk_binomial_inversion(rk_state *state, long n, double p) - long rk_negative_binomial(rk_state *state, double n, double p) - long rk_poisson(rk_state *state, double lam) - long rk_poisson_mult(rk_state *state, double lam) - long rk_poisson_ptrs(rk_state *state, double lam) - long rk_zipf(rk_state *state, double a) - long rk_geometric(rk_state *state, double p) - long rk_hypergeometric(rk_state *state, long good, long bad, long sample) - long rk_logseries(rk_state *state, double p) - -ctypedef double (* rk_cont0)(rk_state *state) -ctypedef double (* rk_cont1)(rk_state *state, double a) -ctypedef double (* rk_cont2)(rk_state *state, double a, double b) -ctypedef double (* rk_cont3)(rk_state *state, double a, double b, double c) - -ctypedef long (* rk_disc0)(rk_state *state) -ctypedef long (* rk_discnp)(rk_state *state, long n, double p) -ctypedef long (* rk_discdd)(rk_state *state, double n, double p) -ctypedef long (* rk_discnmN)(rk_state *state, long n, long m, long N) -ctypedef long (* rk_discd)(rk_state *state, double a) - - -cdef extern from "initarray.h": - void init_by_array(rk_state *self, unsigned long *init_key, - npy_intp key_length) - -# Initialize numpy -import_array() - -import numpy as np - -cdef object cont0_array(rk_state *state, rk_cont0 func, object size): - cdef double *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state) - else: - array = np.empty(size, np.float64) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state) - return array - - -cdef object cont1_array_sc(rk_state *state, rk_cont1 func, object size, double a): - cdef double *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state, a) - else: - array = np.empty(size, np.float64) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state, a) - return array - -cdef object cont1_array(rk_state *state, rk_cont1 func, object size, ndarray oa): - cdef double *array_data - cdef double *oa_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - cdef flatiter itera - cdef broadcast multi - - if size is None: - array = PyArray_SimpleNew(oa.nd, oa.dimensions, NPY_DOUBLE) - length = PyArray_SIZE(array) - array_data = array.data - itera = PyArray_IterNew(oa) - for i from 0 <= i < length: - array_data[i] = func(state, ((itera.dataptr))[0]) - PyArray_ITER_NEXT(itera) - else: - array = np.empty(size, np.float64) - array_data = array.data - multi = PyArray_MultiIterNew(2, array, - oa) - if (multi.size != PyArray_SIZE(array)): - raise ValueError("size is not compatible with inputs") - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 1) - array_data[i] = func(state, oa_data[0]) - PyArray_MultiIter_NEXTi(multi, 1) - return array - -cdef object cont2_array_sc(rk_state *state, rk_cont2 func, object size, double a, - double b): - cdef double *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state, a, b) - else: - array = np.empty(size, np.float64) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state, a, b) - return array - - -cdef object cont2_array(rk_state *state, rk_cont2 func, object size, - ndarray oa, ndarray ob): - cdef double *array_data - cdef double *oa_data - cdef double *ob_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - cdef broadcast multi - - if size is None: - multi = PyArray_MultiIterNew(2, oa, ob) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) - array_data = array.data - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 0) - ob_data = PyArray_MultiIter_DATA(multi, 1) - array_data[i] = func(state, oa_data[0], ob_data[0]) - PyArray_MultiIter_NEXT(multi) - else: - array = np.empty(size, np.float64) - array_data = array.data - multi = PyArray_MultiIterNew(3, array, oa, ob) - if (multi.size != PyArray_SIZE(array)): - raise ValueError("size is not compatible with inputs") - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 1) - ob_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, oa_data[0], ob_data[0]) - PyArray_MultiIter_NEXTi(multi, 1) - PyArray_MultiIter_NEXTi(multi, 2) - return array - -cdef object cont3_array_sc(rk_state *state, rk_cont3 func, object size, double a, - double b, double c): - - cdef double *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state, a, b, c) - else: - array = np.empty(size, np.float64) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state, a, b, c) - return array - -cdef object cont3_array(rk_state *state, rk_cont3 func, object size, ndarray oa, - ndarray ob, ndarray oc): - - cdef double *array_data - cdef double *oa_data - cdef double *ob_data - cdef double *oc_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - cdef broadcast multi - - if size is None: - multi = PyArray_MultiIterNew(3, oa, ob, oc) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) - array_data = array.data - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 0) - ob_data = PyArray_MultiIter_DATA(multi, 1) - oc_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) - PyArray_MultiIter_NEXT(multi) - else: - array = np.empty(size, np.float64) - array_data = array.data - multi = PyArray_MultiIterNew(4, array, oa, - ob, oc) - if (multi.size != PyArray_SIZE(array)): - raise ValueError("size is not compatible with inputs") - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 1) - ob_data = PyArray_MultiIter_DATA(multi, 2) - oc_data = PyArray_MultiIter_DATA(multi, 3) - array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) - PyArray_MultiIter_NEXT(multi) - return array - -cdef object disc0_array(rk_state *state, rk_disc0 func, object size): - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state) - else: - array = np.empty(size, int) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state) - return array - -cdef object discnp_array_sc(rk_state *state, rk_discnp func, object size, long n, double p): - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state, n, p) - else: - array = np.empty(size, int) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state, n, p) - return array - -cdef object discnp_array(rk_state *state, rk_discnp func, object size, ndarray on, ndarray op): - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - cdef double *op_data - cdef long *on_data - cdef broadcast multi - - if size is None: - multi = PyArray_MultiIterNew(2, on, op) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) - array_data = array.data - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 0) - op_data = PyArray_MultiIter_DATA(multi, 1) - array_data[i] = func(state, on_data[0], op_data[0]) - PyArray_MultiIter_NEXT(multi) - else: - array = np.empty(size, int) - array_data = array.data - multi = PyArray_MultiIterNew(3, array, on, op) - if (multi.size != PyArray_SIZE(array)): - raise ValueError("size is not compatible with inputs") - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 1) - op_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, on_data[0], op_data[0]) - PyArray_MultiIter_NEXTi(multi, 1) - PyArray_MultiIter_NEXTi(multi, 2) - - return array - -cdef object discdd_array_sc(rk_state *state, rk_discdd func, object size, double n, double p): - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state, n, p) - else: - array = np.empty(size, int) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state, n, p) - return array - -cdef object discdd_array(rk_state *state, rk_discdd func, object size, ndarray on, ndarray op): - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - cdef double *op_data - cdef double *on_data - cdef broadcast multi - - if size is None: - multi = PyArray_MultiIterNew(2, on, op) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) - array_data = array.data - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 0) - op_data = PyArray_MultiIter_DATA(multi, 1) - array_data[i] = func(state, on_data[0], op_data[0]) - PyArray_MultiIter_NEXT(multi) - else: - array = np.empty(size, int) - array_data = array.data - multi = PyArray_MultiIterNew(3, array, on, op) - if (multi.size != PyArray_SIZE(array)): - raise ValueError("size is not compatible with inputs") - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 1) - op_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, on_data[0], op_data[0]) - PyArray_MultiIter_NEXTi(multi, 1) - PyArray_MultiIter_NEXTi(multi, 2) - - return array - -cdef object discnmN_array_sc(rk_state *state, rk_discnmN func, object size, - long n, long m, long N): - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state, n, m, N) - else: - array = np.empty(size, int) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state, n, m, N) - return array - -cdef object discnmN_array(rk_state *state, rk_discnmN func, object size, - ndarray on, ndarray om, ndarray oN): - cdef long *array_data - cdef long *on_data - cdef long *om_data - cdef long *oN_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - cdef broadcast multi - - if size is None: - multi = PyArray_MultiIterNew(3, on, om, oN) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) - array_data = array.data - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 0) - om_data = PyArray_MultiIter_DATA(multi, 1) - oN_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) - PyArray_MultiIter_NEXT(multi) - else: - array = np.empty(size, int) - array_data = array.data - multi = PyArray_MultiIterNew(4, array, on, om, - oN) - if (multi.size != PyArray_SIZE(array)): - raise ValueError("size is not compatible with inputs") - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 1) - om_data = PyArray_MultiIter_DATA(multi, 2) - oN_data = PyArray_MultiIter_DATA(multi, 3) - array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) - PyArray_MultiIter_NEXT(multi) - - return array - -cdef object discd_array_sc(rk_state *state, rk_discd func, object size, double a): - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if size is None: - return func(state, a) - else: - array = np.empty(size, int) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - array_data[i] = func(state, a) - return array - -cdef object discd_array(rk_state *state, rk_discd func, object size, ndarray oa): - cdef long *array_data - cdef double *oa_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - cdef broadcast multi - cdef flatiter itera - - if size is None: - array = PyArray_SimpleNew(oa.nd, oa.dimensions, NPY_LONG) - length = PyArray_SIZE(array) - array_data = array.data - itera = PyArray_IterNew(oa) - for i from 0 <= i < length: - array_data[i] = func(state, ((itera.dataptr))[0]) - PyArray_ITER_NEXT(itera) - else: - array = np.empty(size, int) - array_data = array.data - multi = PyArray_MultiIterNew(2, array, oa) - if (multi.size != PyArray_SIZE(array)): - raise ValueError("size is not compatible with inputs") - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 1) - array_data[i] = func(state, oa_data[0]) - PyArray_MultiIter_NEXTi(multi, 1) - return array - -cdef double kahan_sum(double *darr, npy_intp n): - cdef double c, y, t, sum - cdef npy_intp i - sum = darr[0] - c = 0.0 - for i from 1 <= i < n: - y = darr[i] - c - t = sum + y - c = (t-sum) - y - sum = t - return sum - -cdef class RandomState: - """ - RandomState(seed=None) - - Container for the Mersenne Twister pseudo-random number generator. - - `RandomState` exposes a number of methods for generating random numbers - drawn from a variety of probability distributions. In addition to the - distribution-specific arguments, each method takes a keyword argument - `size` that defaults to ``None``. If `size` is ``None``, then a single - value is generated and returned. If `size` is an integer, then a 1-D - array filled with generated values is returned. If `size` is a tuple, - then an array with that shape is filled and returned. - - Parameters - ---------- - seed : int or array_like, optional - Random seed initializing the pseudo-random number generator. - Can be an integer, an array (or other sequence) of integers of - any length, or ``None`` (the default). - If `seed` is ``None``, then `RandomState` will try to read data from - ``/dev/urandom`` (or the Windows analogue) if available or seed from - the clock otherwise. - - Notes - ----- - The Python stdlib module "random" also contains a Mersenne Twister - pseudo-random number generator with a number of methods that are similar - to the ones available in `RandomState`. `RandomState`, besides being - NumPy-aware, has the advantage that it provides a much larger number - of probability distributions to choose from. - - """ - cdef rk_state *internal_state - poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10 - - def __init__(self, seed=None): - self.internal_state = PyMem_Malloc(sizeof(rk_state)) - - self.seed(seed) - - def __dealloc__(self): - if self.internal_state != NULL: - PyMem_Free(self.internal_state) - self.internal_state = NULL - - def seed(self, seed=None): - """ - seed(seed=None) - - Seed the generator. - - This method is called when `RandomState` is initialized. It can be - called again to re-seed the generator. For details, see `RandomState`. - - Parameters - ---------- - seed : int or array_like, optional - Seed for `RandomState`. - - See Also - -------- - RandomState - - """ - cdef rk_error errcode - cdef ndarray obj "arrayObject_obj" - if seed is None: - errcode = rk_randomseed(self.internal_state) - elif type(seed) is int: - rk_seed(seed, self.internal_state) - elif isinstance(seed, np.integer): - iseed = int(seed) - rk_seed(iseed, self.internal_state) - else: - obj = PyArray_ContiguousFromObject(seed, NPY_LONG, 1, 1) - init_by_array(self.internal_state, (obj.data), - obj.dimensions[0]) - - def get_state(self): - """ - get_state() - - Return a tuple representing the internal state of the generator. - - For more details, see `set_state`. - - Returns - ------- - out : tuple(str, ndarray of 624 uints, int, int, float) - The returned tuple has the following items: - - 1. the string 'MT19937'. - 2. a 1-D array of 624 unsigned integer keys. - 3. an integer ``pos``. - 4. an integer ``has_gauss``. - 5. a float ``cached_gaussian``. - - See Also - -------- - set_state - - Notes - ----- - `set_state` and `get_state` are not needed to work with any of the - random distributions in NumPy. If the internal state is manually altered, - the user should know exactly what he/she is doing. - - """ - cdef ndarray state "arrayObject_state" - state = np.empty(624, np.uint) - memcpy((state.data), (self.internal_state.key), 624*sizeof(long)) - state = np.asarray(state, np.uint32) - return ('MT19937', state, self.internal_state.pos, - self.internal_state.has_gauss, self.internal_state.gauss) - - def set_state(self, state): - """ - set_state(state) - - Set the internal state of the generator from a tuple. - - For use if one has reason to manually (re-)set the internal state of the - "Mersenne Twister"[1]_ pseudo-random number generating algorithm. - - Parameters - ---------- - state : tuple(str, ndarray of 624 uints, int, int, float) - The `state` tuple has the following items: - - 1. the string 'MT19937', specifying the Mersenne Twister algorithm. - 2. a 1-D array of 624 unsigned integers ``keys``. - 3. an integer ``pos``. - 4. an integer ``has_gauss``. - 5. a float ``cached_gaussian``. - - Returns - ------- - out : None - Returns 'None' on success. - - See Also - -------- - get_state - - Notes - ----- - `set_state` and `get_state` are not needed to work with any of the - random distributions in NumPy. If the internal state is manually altered, - the user should know exactly what he/she is doing. - - For backwards compatibility, the form (str, array of 624 uints, int) is - also accepted although it is missing some information about the cached - Gaussian value: ``state = ('MT19937', keys, pos)``. - - References - ---------- - .. [1] M. Matsumoto and T. Nishimura, "Mersenne Twister: A - 623-dimensionally equidistributed uniform pseudorandom number - generator," *ACM Trans. on Modeling and Computer Simulation*, - Vol. 8, No. 1, pp. 3-30, Jan. 1998. - - """ - cdef ndarray obj "arrayObject_obj" - cdef int pos - algorithm_name = state[0] - if algorithm_name != 'MT19937': - raise ValueError("algorithm must be 'MT19937'") - key, pos = state[1:3] - if len(state) == 3: - has_gauss = 0 - cached_gaussian = 0.0 - else: - has_gauss, cached_gaussian = state[3:5] - try: - obj = PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1) - except TypeError: - # compatibility -- could be an older pickle - obj = PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1) - if obj.dimensions[0] != 624: - raise ValueError("state must be 624 longs") - memcpy((self.internal_state.key), (obj.data), 624*sizeof(long)) - self.internal_state.pos = pos - self.internal_state.has_gauss = has_gauss - self.internal_state.gauss = cached_gaussian - - # Pickling support: - def __getstate__(self): - return self.get_state() - - def __setstate__(self, state): - self.set_state(state) - - def __reduce__(self): - return (np.random.__RandomState_ctor, (), self.get_state()) - - # Basic distributions: - def random_sample(self, size=None): - """ - random_sample(size=None) - - Return random floats in the half-open interval [0.0, 1.0). - - Results are from the "continuous uniform" distribution over the - stated interval. To sample :math:`Unif[a, b), b > a` multiply - the output of `random_sample` by `(b-a)` and add `a`:: - - (b - a) * random_sample() + a - - Parameters - ---------- - size : int or tuple of ints, optional - Defines the shape of the returned array of random floats. If None - (the default), returns a single float. - - Returns - ------- - out : float or ndarray of floats - Array of random floats of shape `size` (unless ``size=None``, in which - case a single float is returned). - - Examples - -------- - >>> np.random.random_sample() - 0.47108547995356098 - >>> type(np.random.random_sample()) - - >>> np.random.random_sample((5,)) - array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) - - Three-by-two array of random numbers from [-5, 0): - - >>> 5 * np.random.random_sample((3, 2)) - 5 - array([[-3.99149989, -0.52338984], - [-2.99091858, -0.79479508], - [-1.23204345, -1.75224494]]) - - """ - return cont0_array(self.internal_state, rk_double, size) - - def tomaxint(self, size=None): - """ - tomaxint(size=None) - - Random integers between 0 and ``sys.maxint``, inclusive. - - Return a sample of uniformly distributed random integers in the interval - [0, ``sys.maxint``]. - - Parameters - ---------- - size : tuple of ints, int, optional - Shape of output. If this is, for example, (m,n,k), m*n*k samples - are generated. If no shape is specified, a single sample is - returned. - - Returns - ------- - out : ndarray - Drawn samples, with shape `size`. - - See Also - -------- - randint : Uniform sampling over a given half-open interval of integers. - random_integers : Uniform sampling over a given closed interval of - integers. - - Examples - -------- - >>> RS = np.random.mtrand.RandomState() # need a RandomState object - >>> RS.tomaxint((2,2,2)) - array([[[1170048599, 1600360186], - [ 739731006, 1947757578]], - [[1871712945, 752307660], - [1601631370, 1479324245]]]) - >>> import sys - >>> sys.maxint - 2147483647 - >>> RS.tomaxint((2,2,2)) < sys.maxint - array([[[ True, True], - [ True, True]], - [[ True, True], - [ True, True]]], dtype=bool) - - """ - return disc0_array(self.internal_state, rk_long, size) - - def randint(self, low, high=None, size=None): - """ - randint(low, high=None, size=None) - - Return random integers from `low` (inclusive) to `high` (exclusive). - - Return random integers from the "discrete uniform" distribution in the - "half-open" interval [`low`, `high`). If `high` is None (the default), - then results are from [0, `low`). - - Parameters - ---------- - low : int - Lowest (signed) integer to be drawn from the distribution (unless - ``high=None``, in which case this parameter is the *highest* such - integer). - high : int, optional - If provided, one above the largest (signed) integer to be drawn - from the distribution (see above for behavior if ``high=None``). - size : int or tuple of ints, optional - Output shape. Default is None, in which case a single int is - returned. - - Returns - ------- - out : int or ndarray of ints - `size`-shaped array of random integers from the appropriate - distribution, or a single such random int if `size` not provided. - - See Also - -------- - random.random_integers : similar to `randint`, only for the closed - interval [`low`, `high`], and 1 is the lowest value if `high` is - omitted. In particular, this other one is the one to use to generate - uniformly distributed discrete non-integers. - - Examples - -------- - >>> np.random.randint(2, size=10) - array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) - >>> np.random.randint(1, size=10) - array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) - - Generate a 2 x 4 array of ints between 0 and 4, inclusive: - - >>> np.random.randint(5, size=(2, 4)) - array([[4, 0, 2, 1], - [3, 2, 2, 0]]) - - """ - cdef long lo, hi, rv - cdef unsigned long diff - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i - - if high is None: - lo = 0 - hi = low - else: - lo = low - hi = high - - if lo >= hi : - raise ValueError("low >= high") - - diff = hi - lo - 1UL - if size is None: - rv = lo + rk_interval(diff, self. internal_state) - return rv - else: - array = np.empty(size, int) - length = PyArray_SIZE(array) - array_data = array.data - for i from 0 <= i < length: - rv = lo + rk_interval(diff, self. internal_state) - array_data[i] = rv - return array - - def bytes(self, npy_intp length): - """ - bytes(length) - - Return random bytes. - - Parameters - ---------- - length : int - Number of random bytes. - - Returns - ------- - out : str - String of length `length`. - - Examples - -------- - >>> np.random.bytes(10) - ' eh\\x85\\x022SZ\\xbf\\xa4' #random - - """ - cdef void *bytes - bytestring = empty_py_bytes(length, &bytes) - rk_fill(bytes, length, self.internal_state) - return bytestring - - def uniform(self, low=0.0, high=1.0, size=None): - """ - uniform(low=0.0, high=1.0, size=1) - - Draw samples from a uniform distribution. - - Samples are uniformly distributed over the half-open interval - ``[low, high)`` (includes low, but excludes high). In other words, - any value within the given interval is equally likely to be drawn - by `uniform`. - - Parameters - ---------- - low : float, optional - Lower boundary of the output interval. All values generated will be - greater than or equal to low. The default value is 0. - high : float - Upper boundary of the output interval. All values generated will be - less than high. The default value is 1.0. - size : int or tuple of ints, optional - Shape of output. If the given size is, for example, (m,n,k), - m*n*k samples are generated. If no shape is specified, a single sample - is returned. - - Returns - ------- - out : ndarray - Drawn samples, with shape `size`. - - See Also - -------- - randint : Discrete uniform distribution, yielding integers. - random_integers : Discrete uniform distribution over the closed - interval ``[low, high]``. - random_sample : Floats uniformly distributed over ``[0, 1)``. - random : Alias for `random_sample`. - rand : Convenience function that accepts dimensions as input, e.g., - ``rand(2,2)`` would generate a 2-by-2 array of floats, - uniformly distributed over ``[0, 1)``. - - Notes - ----- - The probability density function of the uniform distribution is - - .. math:: p(x) = \\frac{1}{b - a} - - anywhere within the interval ``[a, b)``, and zero elsewhere. - - Examples - -------- - Draw samples from the distribution: - - >>> s = np.random.uniform(-1,0,1000) - - All values are within the given interval: - - >>> np.all(s >= -1) - True - >>> np.all(s < 0) - True - - Display the histogram of the samples, along with the - probability density function: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(s, 15, normed=True) - >>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r') - >>> plt.show() - - """ - cdef ndarray olow, ohigh, odiff - cdef double flow, fhigh - cdef object temp - - flow = PyFloat_AsDouble(low) - fhigh = PyFloat_AsDouble(high) - if not PyErr_Occurred(): - return cont2_array_sc(self.internal_state, rk_uniform, size, flow, fhigh-flow) - PyErr_Clear() - olow = PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ALIGNED) - ohigh = PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ALIGNED) - temp = np.subtract(ohigh, olow) - Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting - # rules because EnsureArray steals a reference - odiff = PyArray_EnsureArray(temp) - return cont2_array(self.internal_state, rk_uniform, size, olow, odiff) - - def rand(self, *args): - """ - rand(d0, d1, ..., dn) - - Random values in a given shape. - - Create an array of the given shape and propagate it with - random samples from a uniform distribution - over ``[0, 1)``. - - Parameters - ---------- - d0, d1, ..., dn : int - Shape of the output. - - Returns - ------- - out : ndarray, shape ``(d0, d1, ..., dn)`` - Random values. - - See Also - -------- - random - - Notes - ----- - This is a convenience function. If you want an interface that - takes a shape-tuple as the first argument, refer to - `random`. - - Examples - -------- - >>> np.random.rand(3,2) - array([[ 0.14022471, 0.96360618], #random - [ 0.37601032, 0.25528411], #random - [ 0.49313049, 0.94909878]]) #random - - """ - if len(args) == 0: - return self.random_sample() - else: - return self.random_sample(size=args) - - def randn(self, *args): - """ - randn([d1, ..., dn]) - - Return a sample (or samples) from the "standard normal" distribution. - - If positive, int_like or int-convertible arguments are provided, - `randn` generates an array of shape ``(d1, ..., dn)``, filled - with random floats sampled from a univariate "normal" (Gaussian) - distribution of mean 0 and variance 1 (if any of the :math:`d_i` are - floats, they are first converted to integers by truncation). A single - float randomly sampled from the distribution is returned if no - argument is provided. - - This is a convenience function. If you want an interface that takes a - tuple as the first argument, use `numpy.random.standard_normal` instead. - - Parameters - ---------- - d1, ..., dn : `n` ints, optional - The dimensions of the returned array, should be all positive. - - Returns - ------- - Z : ndarray or float - A ``(d1, ..., dn)``-shaped array of floating-point samples from - the standard normal distribution, or a single such float if - no parameters were supplied. - - See Also - -------- - random.standard_normal : Similar, but takes a tuple as its argument. - - Notes - ----- - For random samples from :math:`N(\\mu, \\sigma^2)`, use: - - ``sigma * np.random.randn(...) + mu`` - - Examples - -------- - >>> np.random.randn() - 2.1923875335537315 #random - - Two-by-four array of samples from N(3, 6.25): - - >>> 2.5 * np.random.randn(2, 4) + 3 - array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random - [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random - - """ - if len(args) == 0: - return self.standard_normal() - else: - return self.standard_normal(args) - - def random_integers(self, low, high=None, size=None): - """ - random_integers(low, high=None, size=None) - - Return random integers between `low` and `high`, inclusive. - - Return random integers from the "discrete uniform" distribution in the - closed interval [`low`, `high`]. If `high` is None (the default), - then results are from [1, `low`]. - - Parameters - ---------- - low : int - Lowest (signed) integer to be drawn from the distribution (unless - ``high=None``, in which case this parameter is the *highest* such - integer). - high : int, optional - If provided, the largest (signed) integer to be drawn from the - distribution (see above for behavior if ``high=None``). - size : int or tuple of ints, optional - Output shape. Default is None, in which case a single int is returned. - - Returns - ------- - out : int or ndarray of ints - `size`-shaped array of random integers from the appropriate - distribution, or a single such random int if `size` not provided. - - See Also - -------- - random.randint : Similar to `random_integers`, only for the half-open - interval [`low`, `high`), and 0 is the lowest value if `high` is - omitted. - - Notes - ----- - To sample from N evenly spaced floating-point numbers between a and b, - use:: - - a + (b - a) * (np.random.random_integers(N) - 1) / (N - 1.) - - Examples - -------- - >>> np.random.random_integers(5) - 4 - >>> type(np.random.random_integers(5)) - - >>> np.random.random_integers(5, size=(3.,2.)) - array([[5, 4], - [3, 3], - [4, 5]]) - - Choose five random numbers from the set of five evenly-spaced - numbers between 0 and 2.5, inclusive (*i.e.*, from the set - :math:`{0, 5/8, 10/8, 15/8, 20/8}`): - - >>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4. - array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ]) - - Roll two six sided dice 1000 times and sum the results: - - >>> d1 = np.random.random_integers(1, 6, 1000) - >>> d2 = np.random.random_integers(1, 6, 1000) - >>> dsums = d1 + d2 - - Display results as a histogram: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(dsums, 11, normed=True) - >>> plt.show() - - """ - if high is None: - high = low - low = 1 - return self.randint(low, high+1, size) - - # Complicated, continuous distributions: - def standard_normal(self, size=None): - """ - standard_normal(size=None) - - Returns samples from a Standard Normal distribution (mean=0, stdev=1). - - Parameters - ---------- - size : int or tuple of ints, optional - Output shape. Default is None, in which case a single value is - returned. - - Returns - ------- - out : float or ndarray - Drawn samples. - - Examples - -------- - >>> s = np.random.standard_normal(8000) - >>> s - array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, #random - -0.38672696, -0.4685006 ]) #random - >>> s.shape - (8000,) - >>> s = np.random.standard_normal(size=(3, 4, 2)) - >>> s.shape - (3, 4, 2) - - """ - return cont0_array(self.internal_state, rk_gauss, size) - - def normal(self, loc=0.0, scale=1.0, size=None): - """ - normal(loc=0.0, scale=1.0, size=None) - - Draw random samples from a normal (Gaussian) distribution. - - The probability density function of the normal distribution, first - derived by De Moivre and 200 years later by both Gauss and Laplace - independently [2]_, is often called the bell curve because of - its characteristic shape (see the example below). - - The normal distributions occurs often in nature. For example, it - describes the commonly occurring distribution of samples influenced - by a large number of tiny, random disturbances, each with its own - unique distribution [2]_. - - Parameters - ---------- - loc : float - Mean ("centre") of the distribution. - scale : float - Standard deviation (spread or "width") of the distribution. - size : tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - See Also - -------- - scipy.stats.distributions.norm : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - The probability density for the Gaussian distribution is - - .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }} - e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} }, - - where :math:`\\mu` is the mean and :math:`\\sigma` the standard deviation. - The square of the standard deviation, :math:`\\sigma^2`, is called the - variance. - - The function has its peak at the mean, and its "spread" increases with - the standard deviation (the function reaches 0.607 times its maximum at - :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that - `numpy.random.normal` is more likely to return samples lying close to the - mean, rather than those far away. - - References - ---------- - .. [1] Wikipedia, "Normal distribution", - http://en.wikipedia.org/wiki/Normal_distribution - .. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability, Random - Variables and Random Signal Principles", 4th ed., 2001, - pp. 51, 51, 125. - - Examples - -------- - Draw samples from the distribution: - - >>> mu, sigma = 0, 0.1 # mean and standard deviation - >>> s = np.random.normal(mu, sigma, 1000) - - Verify the mean and the variance: - - >>> abs(mu - np.mean(s)) < 0.01 - True - - >>> abs(sigma - np.std(s, ddof=1)) < 0.01 - True - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(s, 30, normed=True) - >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * - ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ), - ... linewidth=2, color='r') - >>> plt.show() - - """ - cdef ndarray oloc, oscale - cdef double floc, fscale - - floc = PyFloat_AsDouble(loc) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): - if fscale <= 0: - raise ValueError("scale <= 0") - return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale) - - PyErr_Clear() - - oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oscale, 0)): - raise ValueError("scale <= 0") - return cont2_array(self.internal_state, rk_normal, size, oloc, oscale) - - def beta(self, a, b, size=None): - """ - beta(a, b, size=None) - - The Beta distribution over ``[0, 1]``. - - The Beta distribution is a special case of the Dirichlet distribution, - and is related to the Gamma distribution. It has the probability - distribution function - - .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1} - (1 - x)^{\\beta - 1}, - - where the normalisation, B, is the beta function, - - .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1} - (1 - t)^{\\beta - 1} dt. - - It is often seen in Bayesian inference and order statistics. - - Parameters - ---------- - a : float - Alpha, non-negative. - b : float - Beta, non-negative. - size : tuple of ints, optional - The number of samples to draw. The ouput is packed according to - the size given. - - Returns - ------- - out : ndarray - Array of the given shape, containing values drawn from a - Beta distribution. - - """ - cdef ndarray oa, ob - cdef double fa, fb - - fa = PyFloat_AsDouble(a) - fb = PyFloat_AsDouble(b) - if not PyErr_Occurred(): - if fa <= 0: - raise ValueError("a <= 0") - if fb <= 0: - raise ValueError("b <= 0") - return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb) - - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ALIGNED) - ob = PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oa, 0)): - raise ValueError("a <= 0") - if np.any(np.less_equal(ob, 0)): - raise ValueError("b <= 0") - return cont2_array(self.internal_state, rk_beta, size, oa, ob) - - def exponential(self, scale=1.0, size=None): - """ - exponential(scale=1.0, size=None) - - Exponential distribution. - - Its probability density function is - - .. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}), - - for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter, - which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`. - The rate parameter is an alternative, widely used parameterization - of the exponential distribution [3]_. - - The exponential distribution is a continuous analogue of the - geometric distribution. It describes many common situations, such as - the size of raindrops measured over many rainstorms [1]_, or the time - between page requests to Wikipedia [2]_. - - Parameters - ---------- - scale : float - The scale parameter, :math:`\\beta = 1/\\lambda`. - size : tuple of ints - Number of samples to draw. The output is shaped - according to `size`. - - References - ---------- - .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and - Random Signal Principles", 4th ed, 2001, p. 57. - .. [2] "Poisson Process", Wikipedia, - http://en.wikipedia.org/wiki/Poisson_process - .. [3] "Exponential Distribution, Wikipedia, - http://en.wikipedia.org/wiki/Exponential_distribution - - """ - cdef ndarray oscale - cdef double fscale - - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): - if fscale <= 0: - raise ValueError("scale <= 0") - return cont1_array_sc(self.internal_state, rk_exponential, size, fscale) - - PyErr_Clear() - - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oscale, 0.0)): - raise ValueError("scale <= 0") - return cont1_array(self.internal_state, rk_exponential, size, oscale) - - def standard_exponential(self, size=None): - """ - standard_exponential(size=None) - - Draw samples from the standard exponential distribution. - - `standard_exponential` is identical to the exponential distribution - with a scale parameter of 1. - - Parameters - ---------- - size : int or tuple of ints - Shape of the output. - - Returns - ------- - out : float or ndarray - Drawn samples. - - Examples - -------- - Output a 3x8000 array: - - >>> n = np.random.standard_exponential((3, 8000)) - - """ - return cont0_array(self.internal_state, rk_standard_exponential, size) - - def standard_gamma(self, shape, size=None): - """ - standard_gamma(shape, size=None) - - Draw samples from a Standard Gamma distribution. - - Samples are drawn from a Gamma distribution with specified parameters, - shape (sometimes designated "k") and scale=1. - - Parameters - ---------- - shape : float - Parameter, should be > 0. - size : int or tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : ndarray or scalar - The drawn samples. - - See Also - -------- - scipy.stats.distributions.gamma : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - The probability density for the Gamma distribution is - - .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)}, - - where :math:`k` is the shape and :math:`\\theta` the scale, - and :math:`\\Gamma` is the Gamma function. - - The Gamma distribution is often used to model the times to failure of - electronic components, and arises naturally in processes for which the - waiting times between Poisson distributed events are relevant. - - References - ---------- - .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A - Wolfram Web Resource. - http://mathworld.wolfram.com/GammaDistribution.html - .. [2] Wikipedia, "Gamma-distribution", - http://en.wikipedia.org/wiki/Gamma-distribution - - Examples - -------- - Draw samples from the distribution: - - >>> shape, scale = 2., 1. # mean and width - >>> s = np.random.standard_gamma(shape, 1000000) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> import scipy.special as sps - >>> count, bins, ignored = plt.hist(s, 50, normed=True) - >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\ - ... (sps.gamma(shape) * scale**shape)) - >>> plt.plot(bins, y, linewidth=2, color='r') - >>> plt.show() - - """ - cdef ndarray oshape - cdef double fshape - - fshape = PyFloat_AsDouble(shape) - if not PyErr_Occurred(): - if fshape <= 0: - raise ValueError("shape <= 0") - return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape) - - PyErr_Clear() - oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oshape, 0.0)): - raise ValueError("shape <= 0") - return cont1_array(self.internal_state, rk_standard_gamma, size, oshape) - - def gamma(self, shape, scale=1.0, size=None): - """ - gamma(shape, scale=1.0, size=None) - - Draw samples from a Gamma distribution. - - Samples are drawn from a Gamma distribution with specified parameters, - `shape` (sometimes designated "k") and `scale` (sometimes designated - "theta"), where both parameters are > 0. - - Parameters - ---------- - shape : scalar > 0 - The shape of the gamma distribution. - scale : scalar > 0, optional - The scale of the gamma distribution. Default is equal to 1. - size : shape_tuple, optional - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - out : ndarray, float - Returns one sample unless `size` parameter is specified. - - See Also - -------- - scipy.stats.distributions.gamma : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - The probability density for the Gamma distribution is - - .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)}, - - where :math:`k` is the shape and :math:`\\theta` the scale, - and :math:`\\Gamma` is the Gamma function. - - The Gamma distribution is often used to model the times to failure of - electronic components, and arises naturally in processes for which the - waiting times between Poisson distributed events are relevant. - - References - ---------- - .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A - Wolfram Web Resource. - http://mathworld.wolfram.com/GammaDistribution.html - .. [2] Wikipedia, "Gamma-distribution", - http://en.wikipedia.org/wiki/Gamma-distribution - - Examples - -------- - Draw samples from the distribution: - - >>> shape, scale = 2., 2. # mean and dispersion - >>> s = np.random.gamma(shape, scale, 1000) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> import scipy.special as sps - >>> count, bins, ignored = plt.hist(s, 50, normed=True) - >>> y = bins**(shape-1)*(np.exp(-bins/scale) / - ... (sps.gamma(shape)*scale**shape)) - >>> plt.plot(bins, y, linewidth=2, color='r') - >>> plt.show() - - """ - cdef ndarray oshape, oscale - cdef double fshape, fscale - - fshape = PyFloat_AsDouble(shape) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): - if fshape <= 0: - raise ValueError("shape <= 0") - if fscale <= 0: - raise ValueError("scale <= 0") - return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale) - - PyErr_Clear() - oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oshape, 0.0)): - raise ValueError("shape <= 0") - if np.any(np.less_equal(oscale, 0.0)): - raise ValueError("scale <= 0") - return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale) - - def f(self, dfnum, dfden, size=None): - """ - f(dfnum, dfden, size=None) - - Draw samples from a F distribution. - - Samples are drawn from an F distribution with specified parameters, - `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of freedom - in denominator), where both parameters should be greater than zero. - - The random variate of the F distribution (also known as the - Fisher distribution) is a continuous probability distribution - that arises in ANOVA tests, and is the ratio of two chi-square - variates. - - Parameters - ---------- - dfnum : float - Degrees of freedom in numerator. Should be greater than zero. - dfden : float - Degrees of freedom in denominator. Should be greater than zero. - size : {tuple, int}, optional - Output shape. If the given shape is, e.g., ``(m, n, k)``, - then ``m * n * k`` samples are drawn. By default only one sample - is returned. - - Returns - ------- - samples : {ndarray, scalar} - Samples from the Fisher distribution. - - See Also - -------- - scipy.stats.distributions.f : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - - The F statistic is used to compare in-group variances to between-group - variances. Calculating the distribution depends on the sampling, and - so it is a function of the respective degrees of freedom in the - problem. The variable `dfnum` is the number of samples minus one, the - between-groups degrees of freedom, while `dfden` is the within-groups - degrees of freedom, the sum of the number of samples in each group - minus the number of groups. - - References - ---------- - .. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill, - Fifth Edition, 2002. - .. [2] Wikipedia, "F-distribution", - http://en.wikipedia.org/wiki/F-distribution - - Examples - -------- - An example from Glantz[1], pp 47-40. - Two groups, children of diabetics (25 people) and children from people - without diabetes (25 controls). Fasting blood glucose was measured, - case group had a mean value of 86.1, controls had a mean value of - 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these - data consistent with the null hypothesis that the parents diabetic - status does not affect their children's blood glucose levels? - Calculating the F statistic from the data gives a value of 36.01. - - Draw samples from the distribution: - - >>> dfnum = 1. # between group degrees of freedom - >>> dfden = 48. # within groups degrees of freedom - >>> s = np.random.f(dfnum, dfden, 1000) - - The lower bound for the top 1% of the samples is : - - >>> sort(s)[-10] - 7.61988120985 - - So there is about a 1% chance that the F statistic will exceed 7.62, - the measured value is 36, so the null hypothesis is rejected at the 1% - level. - - """ - cdef ndarray odfnum, odfden - cdef double fdfnum, fdfden - - fdfnum = PyFloat_AsDouble(dfnum) - fdfden = PyFloat_AsDouble(dfden) - if not PyErr_Occurred(): - if fdfnum <= 0: - raise ValueError("shape <= 0") - if fdfden <= 0: - raise ValueError("scale <= 0") - return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden) - - PyErr_Clear() - - odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ALIGNED) - odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(odfnum, 0.0)): - raise ValueError("dfnum <= 0") - if np.any(np.less_equal(odfden, 0.0)): - raise ValueError("dfden <= 0") - return cont2_array(self.internal_state, rk_f, size, odfnum, odfden) - - def noncentral_f(self, dfnum, dfden, nonc, size=None): - """ - noncentral_f(dfnum, dfden, nonc, size=None) - - Draw samples from the noncentral F distribution. - - Samples are drawn from an F distribution with specified parameters, - `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of - freedom in denominator), where both parameters > 1. - `nonc` is the non-centrality parameter. - - Parameters - ---------- - dfnum : int - Parameter, should be > 1. - dfden : int - Parameter, should be > 1. - nonc : float - Parameter, should be >= 0. - size : int or tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : scalar or ndarray - Drawn samples. - - Notes - ----- - When calculating the power of an experiment (power = probability of - rejecting the null hypothesis when a specific alternative is true) the - non-central F statistic becomes important. When the null hypothesis is - true, the F statistic follows a central F distribution. When the null - hypothesis is not true, then it follows a non-central F statistic. - - References - ---------- - Weisstein, Eric W. "Noncentral F-Distribution." From MathWorld--A Wolfram - Web Resource. http://mathworld.wolfram.com/NoncentralF-Distribution.html - - Wikipedia, "Noncentral F distribution", - http://en.wikipedia.org/wiki/Noncentral_F-distribution - - Examples - -------- - In a study, testing for a specific alternative to the null hypothesis - requires use of the Noncentral F distribution. We need to calculate the - area in the tail of the distribution that exceeds the value of the F - distribution for the null hypothesis. We'll plot the two probability - distributions for comparison. - - >>> dfnum = 3 # between group deg of freedom - >>> dfden = 20 # within groups degrees of freedom - >>> nonc = 3.0 - >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000) - >>> NF = np.histogram(nc_vals, bins=50, normed=True) - >>> c_vals = np.random.f(dfnum, dfden, 1000000) - >>> F = np.histogram(c_vals, bins=50, normed=True) - >>> plt.plot(F[1][1:], F[0]) - >>> plt.plot(NF[1][1:], NF[0]) - >>> plt.show() - - """ - cdef ndarray odfnum, odfden, ononc - cdef double fdfnum, fdfden, fnonc - - fdfnum = PyFloat_AsDouble(dfnum) - fdfden = PyFloat_AsDouble(dfden) - fnonc = PyFloat_AsDouble(nonc) - if not PyErr_Occurred(): - if fdfnum <= 1: - raise ValueError("dfnum <= 1") - if fdfden <= 0: - raise ValueError("dfden <= 0") - if fnonc < 0: - raise ValueError("nonc < 0") - return cont3_array_sc(self.internal_state, rk_noncentral_f, size, - fdfnum, fdfden, fnonc) - - PyErr_Clear() - - odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ALIGNED) - odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ALIGNED) - ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ALIGNED) - - if np.any(np.less_equal(odfnum, 1.0)): - raise ValueError("dfnum <= 1") - if np.any(np.less_equal(odfden, 0.0)): - raise ValueError("dfden <= 0") - if np.any(np.less(ononc, 0.0)): - raise ValueError("nonc < 0") - return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, - odfden, ononc) - - def chisquare(self, df, size=None): - """ - chisquare(df, size=None) - - Draw samples from a chi-square distribution. - - When `df` independent random variables, each with standard normal - distributions (mean 0, variance 1), are squared and summed, the - resulting distribution is chi-square (see Notes). This distribution - is often used in hypothesis testing. - - Parameters - ---------- - df : int - Number of degrees of freedom. - size : tuple of ints, int, optional - Size of the returned array. By default, a scalar is - returned. - - Returns - ------- - output : ndarray - Samples drawn from the distribution, packed in a `size`-shaped - array. - - Raises - ------ - ValueError - When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``) - is given. - - Notes - ----- - The variable obtained by summing the squares of `df` independent, - standard normally distributed random variables: - - .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i - - is chi-square distributed, denoted - - .. math:: Q \\sim \\chi^2_k. - - The probability density function of the chi-squared distribution is - - .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)} - x^{k/2 - 1} e^{-x/2}, - - where :math:`\\Gamma` is the gamma function, - - .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt. - - References - ---------- - `NIST/SEMATECH e-Handbook of Statistical Methods - `_ - - Examples - -------- - >>> np.random.chisquare(2,4) - array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) - - """ - cdef ndarray odf - cdef double fdf - - fdf = PyFloat_AsDouble(df) - if not PyErr_Occurred(): - if fdf <= 0: - raise ValueError("df <= 0") - return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf) - - PyErr_Clear() - - odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(odf, 0.0)): - raise ValueError("df <= 0") - return cont1_array(self.internal_state, rk_chisquare, size, odf) - - def noncentral_chisquare(self, df, nonc, size=None): - """ - noncentral_chisquare(df, nonc, size=None) - - Draw samples from a noncentral chi-square distribution. - - The noncentral :math:`\\chi^2` distribution is a generalisation of - the :math:`\\chi^2` distribution. - - Parameters - ---------- - df : int - Degrees of freedom, should be >= 1. - nonc : float - Non-centrality, should be > 0. - size : int or tuple of ints - Shape of the output. - - Notes - ----- - The probability density function for the noncentral Chi-square distribution - is - - .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0} - \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}P_{Y_{df+2i}}(x), - - where :math:`Y_{q}` is the Chi-square with q degrees of freedom. - - In Delhi (2007), it is noted that the noncentral chi-square is useful in - bombing and coverage problems, the probability of killing the point target - given by the noncentral chi-squared distribution. - - References - ---------- - .. [1] Delhi, M.S. Holla, "On a noncentral chi-square distribution in the - analysis of weapon systems effectiveness", Metrika, Volume 15, - Number 1 / December, 1970. - .. [2] Wikipedia, "Noncentral chi-square distribution" - http://en.wikipedia.org/wiki/Noncentral_chi-square_distribution - - Examples - -------- - Draw values from the distribution and plot the histogram - - >>> import matplotlib.pyplot as plt - >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000), - ... bins=200, normed=True) - >>> plt.show() - - Draw values from a noncentral chisquare with very small noncentrality, - and compare to a chisquare. - - >>> plt.figure() - >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000), - ... bins=np.arange(0., 25, .1), normed=True) - >>> values2 = plt.hist(np.random.chisquare(3, 100000), - ... bins=np.arange(0., 25, .1), normed=True) - >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob') - >>> plt.show() - - Demonstrate how large values of non-centrality lead to a more symmetric - distribution. - - >>> plt.figure() - >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000), - ... bins=200, normed=True) - >>> plt.show() - - """ - cdef ndarray odf, ononc - cdef double fdf, fnonc - fdf = PyFloat_AsDouble(df) - fnonc = PyFloat_AsDouble(nonc) - if not PyErr_Occurred(): - if fdf <= 1: - raise ValueError("df <= 0") - if fnonc <= 0: - raise ValueError("nonc <= 0") - return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, - size, fdf, fnonc) - - PyErr_Clear() - - odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ALIGNED) - ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(odf, 0.0)): - raise ValueError("df <= 1") - if np.any(np.less_equal(ononc, 0.0)): - raise ValueError("nonc < 0") - return cont2_array(self.internal_state, rk_noncentral_chisquare, size, - odf, ononc) - - def standard_cauchy(self, size=None): - """ - standard_cauchy(size=None) - - Standard Cauchy distribution with mode = 0. - - Also known as the Lorentz distribution. - - Parameters - ---------- - size : int or tuple of ints - Shape of the output. - - Returns - ------- - samples : ndarray or scalar - The drawn samples. - - Notes - ----- - The probability density function for the full Cauchy distribution is - - .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+ - (\\frac{x-x_0}{\\gamma})^2 \\bigr] } - - and the Standard Cauchy distribution just sets :math:`x_0=0` and - :math:`\\gamma=1` - - The Cauchy distribution arises in the solution to the driven harmonic - oscillator problem, and also describes spectral line broadening. It - also describes the distribution of values at which a line tilted at - a random angle will cut the x axis. - - When studying hypothesis tests that assume normality, seeing how the - tests perform on data from a Cauchy distribution is a good indicator of - their sensitivity to a heavy-tailed distribution, since the Cauchy looks - very much like a Gaussian distribution, but with heavier tails. - - References - ---------- - ..[1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy - Distribution", - http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm - ..[2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A - Wolfram Web Resource. - http://mathworld.wolfram.com/CauchyDistribution.html - ..[3] Wikipedia, "Cauchy distribution" - http://en.wikipedia.org/wiki/Cauchy_distribution - - Examples - -------- - Draw samples and plot the distribution: - - >>> s = np.random.standard_cauchy(1000000) - >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well - >>> plt.hist(s, bins=100) - >>> plt.show() - - """ - return cont0_array(self.internal_state, rk_standard_cauchy, size) - - def standard_t(self, df, size=None): - """ - standard_t(df, size=None) - - Standard Student's t distribution with df degrees of freedom. - - A special case of the hyperbolic distribution. - As `df` gets large, the result resembles that of the standard normal - distribution (`standard_normal`). - - Parameters - ---------- - df : int - Degrees of freedom, should be > 0. - size : int or tuple of ints, optional - Output shape. Default is None, in which case a single value is - returned. - - Returns - ------- - samples : ndarray or scalar - Drawn samples. - - Notes - ----- - The probability density function for the t distribution is - - .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df} - \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2} - - The t test is based on an assumption that the data come from a Normal - distribution. The t test provides a way to test whether the sample mean - (that is the mean calculated from the data) is a good estimate of the true - mean. - - The derivation of the t-distribution was forst published in 1908 by William - Gisset while working for the Guinness Brewery in Dublin. Due to proprietary - issues, he had to publish under a pseudonym, and so he used the name - Student. - - References - ---------- - .. [1] Dalgaard, Peter, "Introductory Statistics With R", - Springer, 2002. - .. [2] Wikipedia, "Student's t-distribution" - http://en.wikipedia.org/wiki/Student's_t-distribution - - Examples - -------- - From Dalgaard page 83 [1]_, suppose the daily energy intake for 11 - women in Kj is: - - >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\ - ... 7515, 8230, 8770]) - - Does their energy intake deviate systematically from the recommended - value of 7725 kJ? - - We have 10 degrees of freedom, so is the sample mean within 95% of the - recommended value? - - >>> s = np.random.standard_t(10, size=100000) - >>> np.mean(intake) - 6753.636363636364 - >>> intake.std(ddof=1) - 1142.1232221373727 - - Calculate the t statistic, setting the ddof parameter to the unbiased - value so the divisor in the standard deviation will be degrees of - freedom, N-1. - - >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake))) - >>> import matplotlib.pyplot as plt - >>> h = plt.hist(s, bins=100, normed=True) - - For a one-sided t-test, how far out in the distribution does the t - statistic appear? - - >>> >>> np.sum(s PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(odf, 0.0)): - raise ValueError("df <= 0") - return cont1_array(self.internal_state, rk_standard_t, size, odf) - - def vonmises(self, mu, kappa, size=None): - """ - vonmises(mu, kappa, size=None) - - Draw samples from a von Mises distribution. - - Samples are drawn from a von Mises distribution with specified mode - (mu) and dispersion (kappa), on the interval [-pi, pi]. - - The von Mises distribution (also known as the circular normal - distribution) is a continuous probability distribution on the unit - circle. It may be thought of as the circular analogue of the normal - distribution. - - Parameters - ---------- - mu : float - Mode ("center") of the distribution. - kappa : float - Dispersion of the distribution, has to be >=0. - size : int or tuple of int - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : scalar or ndarray - The returned samples, which are in the interval [-pi, pi]. - - See Also - -------- - scipy.stats.distributions.vonmises : probability density function, - distribution, or cumulative density function, etc. - - Notes - ----- - The probability density for the von Mises distribution is - - .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)}, - - where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion, - and :math:`I_0(\\kappa)` is the modified Bessel function of order 0. - - The von Mises is named for Richard Edler von Mises, who was born in - Austria-Hungary, in what is now the Ukraine. He fled to the United - States in 1939 and became a professor at Harvard. He worked in - probability theory, aerodynamics, fluid mechanics, and philosophy of - science. - - References - ---------- - Abramowitz, M. and Stegun, I. A. (ed.), *Handbook of Mathematical - Functions*, New York: Dover, 1965. - - von Mises, R., *Mathematical Theory of Probability and Statistics*, - New York: Academic Press, 1964. - - Examples - -------- - Draw samples from the distribution: - - >>> mu, kappa = 0.0, 4.0 # mean and dispersion - >>> s = np.random.vonmises(mu, kappa, 1000) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> import scipy.special as sps - >>> count, bins, ignored = plt.hist(s, 50, normed=True) - >>> x = np.arange(-np.pi, np.pi, 2*np.pi/50.) - >>> y = -np.exp(kappa*np.cos(x-mu))/(2*np.pi*sps.jn(0,kappa)) - >>> plt.plot(x, y/max(y), linewidth=2, color='r') - >>> plt.show() - - """ - cdef ndarray omu, okappa - cdef double fmu, fkappa - - fmu = PyFloat_AsDouble(mu) - fkappa = PyFloat_AsDouble(kappa) - if not PyErr_Occurred(): - if fkappa < 0: - raise ValueError("kappa < 0") - return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa) - - PyErr_Clear() - - omu = PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ALIGNED) - okappa = PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less(okappa, 0.0)): - raise ValueError("kappa < 0") - return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa) - - def pareto(self, a, size=None): - """ - pareto(a, size=None) - - Draw samples from a Pareto II or Lomax distribution with specified shape. - - The Lomax or Pareto II distribution is a shifted Pareto distribution. The - classical Pareto distribution can be obtained from the Lomax distribution - by adding the location parameter m, see below. The smallest value of the - Lomax distribution is zero while for the classical Pareto distribution it - is m, where the standard Pareto distribution has location m=1. - Lomax can also be considered as a simplified version of the Generalized - Pareto distribution (available in SciPy), with the scale set to one and - the location set to zero. - - The Pareto distribution must be greater than zero, and is unbounded above. - It is also known as the "80-20 rule". In this distribution, 80 percent of - the weights are in the lowest 20 percent of the range, while the other 20 - percent fill the remaining 80 percent of the range. - - Parameters - ---------- - shape : float, > 0. - Shape of the distribution. - size : tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - See Also - -------- - scipy.stats.distributions.lomax.pdf : probability density function, - distribution or cumulative density function, etc. - scipy.stats.distributions.genpareto.pdf : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - The probability density for the Pareto distribution is - - .. math:: p(x) = \\frac{am^a}{x^{a+1}} - - where :math:`a` is the shape and :math:`m` the location - - The Pareto distribution, named after the Italian economist Vilfredo Pareto, - is a power law probability distribution useful in many real world problems. - Outside the field of economics it is generally referred to as the Bradford - distribution. Pareto developed the distribution to describe the - distribution of wealth in an economy. It has also found use in insurance, - web page access statistics, oil field sizes, and many other problems, - including the download frequency for projects in Sourceforge [1]. It is - one of the so-called "fat-tailed" distributions. - - - References - ---------- - .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of - Sourceforge projects. - .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne. - .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme - Values, Birkhauser Verlag, Basel, pp 23-30. - .. [4] Wikipedia, "Pareto distribution", - http://en.wikipedia.org/wiki/Pareto_distribution - - Examples - -------- - Draw samples from the distribution: - - >>> a, m = 3., 1. # shape and mode - >>> s = np.random.pareto(a, 1000) + m - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(s, 100, normed=True, align='center') - >>> fit = a*m**a/bins**(a+1) - >>> plt.plot(bins, max(count)*fit/max(fit),linewidth=2, color='r') - >>> plt.show() - - """ - cdef ndarray oa - cdef double fa - - fa = PyFloat_AsDouble(a) - if not PyErr_Occurred(): - if fa <= 0: - raise ValueError("a <= 0") - return cont1_array_sc(self.internal_state, rk_pareto, size, fa) - - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oa, 0.0)): - raise ValueError("a <= 0") - return cont1_array(self.internal_state, rk_pareto, size, oa) - - def weibull(self, a, size=None): - """ - weibull(a, size=None) - - Weibull distribution. - - Draw samples from a 1-parameter Weibull distribution with the given - shape parameter `a`. - - .. math:: X = (-ln(U))^{1/a} - - Here, U is drawn from the uniform distribution over (0,1]. - - The more common 2-parameter Weibull, including a scale parameter - :math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`. - - Parameters - ---------- - a : float - Shape of the distribution. - size : tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - See Also - -------- - scipy.stats.distributions.weibull : probability density function, - distribution or cumulative density function, etc. - - gumbel, scipy.stats.distributions.genextreme - - Notes - ----- - The Weibull (or Type III asymptotic extreme value distribution for smallest - values, SEV Type III, or Rosin-Rammler distribution) is one of a class of - Generalized Extreme Value (GEV) distributions used in modeling extreme - value problems. This class includes the Gumbel and Frechet distributions. - - The probability density for the Weibull distribution is - - .. math:: p(x) = \\frac{a} - {\\lambda}(\\frac{x}{\\lambda})^{a-1}e^{-(x/\\lambda)^a}, - - where :math:`a` is the shape and :math:`\\lambda` the scale. - - The function has its peak (the mode) at - :math:`\\lambda(\\frac{a-1}{a})^{1/a}`. - - When ``a = 1``, the Weibull distribution reduces to the exponential - distribution. - - References - ---------- - .. [1] Waloddi Weibull, Professor, Royal Technical University, Stockholm, - 1939 "A Statistical Theory Of The Strength Of Materials", - Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939, - Generalstabens Litografiska Anstalts Forlag, Stockholm. - .. [2] Waloddi Weibull, 1951 "A Statistical Distribution Function of Wide - Applicability", Journal Of Applied Mechanics ASME Paper. - .. [3] Wikipedia, "Weibull distribution", - http://en.wikipedia.org/wiki/Weibull_distribution - - Examples - -------- - Draw samples from the distribution: - - >>> a = 5. # shape - >>> s = np.random.weibull(a, 1000) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> x = np.arange(1,100.)/50. - >>> def weib(x,n,a): - ... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a) - - >>> count, bins, ignored = plt.hist(np.random.weibull(5.,1000)) - >>> x = np.arange(1,100.)/50. - >>> scale = count.max()/weib(x, 1., 5.).max() - >>> plt.plot(x, weib(x, 1., 5.)*scale) - >>> plt.show() - - """ - cdef ndarray oa - cdef double fa - - fa = PyFloat_AsDouble(a) - if not PyErr_Occurred(): - if fa <= 0: - raise ValueError("a <= 0") - return cont1_array_sc(self.internal_state, rk_weibull, size, fa) - - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oa, 0.0)): - raise ValueError("a <= 0") - return cont1_array(self.internal_state, rk_weibull, size, oa) - - def power(self, a, size=None): - """ - power(a, size=None) - - Draws samples in [0, 1] from a power distribution with positive - exponent a - 1. - - Also known as the power function distribution. - - Parameters - ---------- - a : float - parameter, > 0 - size : tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : {ndarray, scalar} - The returned samples lie in [0, 1]. - - Raises - ------ - ValueError - If a<1. - - Notes - ----- - The probability density function is - - .. math:: P(x; a) = ax^{a-1}, 0 \\le x \\le 1, a>0. - - The power function distribution is just the inverse of the Pareto - distribution. It may also be seen as a special case of the Beta - distribution. - - It is used, for example, in modeling the over-reporting of insurance - claims. - - References - ---------- - .. [1] Christian Kleiber, Samuel Kotz, "Statistical size distributions - in economics and actuarial sciences", Wiley, 2003. - .. [2] Heckert, N. A. and Filliben, James J. (2003). NIST Handbook 148: - Dataplot Reference Manual, Volume 2: Let Subcommands and Library - Functions", National Institute of Standards and Technology Handbook - Series, June 2003. - http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf - - Examples - -------- - Draw samples from the distribution: - - >>> a = 5. # shape - >>> samples = 1000 - >>> s = np.random.power(a, samples) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(s, bins=30) - >>> x = np.linspace(0, 1, 100) - >>> y = a*x**(a-1.) - >>> normed_y = samples*np.diff(bins)[0]*y - >>> plt.plot(x, normed_y) - >>> plt.show() - - Compare the power function distribution to the inverse of the Pareto. - - >>> from scipy import stats - >>> rvs = np.random.power(5, 1000000) - >>> rvsp = np.random.pareto(5, 1000000) - >>> xx = np.linspace(0,1,100) - >>> powpdf = stats.powerlaw.pdf(xx,5) - - >>> plt.figure() - >>> plt.hist(rvs, bins=50, normed=True) - >>> plt.plot(xx,powpdf,'r-') - >>> plt.title('np.random.power(5)') - - >>> plt.figure() - >>> plt.hist(1./(1.+rvsp), bins=50, normed=True) - >>> plt.plot(xx,powpdf,'r-') - >>> plt.title('inverse of 1 + np.random.pareto(5)') - - >>> plt.figure() - >>> plt.hist(1./(1.+rvsp), bins=50, normed=True) - >>> plt.plot(xx,powpdf,'r-') - >>> plt.title('inverse of stats.pareto(5)') - - """ - cdef ndarray oa - cdef double fa - - fa = PyFloat_AsDouble(a) - if not PyErr_Occurred(): - if fa <= 0: - raise ValueError("a <= 0") - return cont1_array_sc(self.internal_state, rk_power, size, fa) - - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oa, 0.0)): - raise ValueError("a <= 0") - return cont1_array(self.internal_state, rk_power, size, oa) - - def laplace(self, loc=0.0, scale=1.0, size=None): - """ - laplace(loc=0.0, scale=1.0, size=None) - - Draw samples from the Laplace or double exponential distribution with - specified location (or mean) and scale (decay). - - The Laplace distribution is similar to the Gaussian/normal distribution, - but is sharper at the peak and has fatter tails. It represents the - difference between two independent, identically distributed exponential - random variables. - - Parameters - ---------- - loc : float - The position, :math:`\\mu`, of the distribution peak. - scale : float - :math:`\\lambda`, the exponential decay. - - Notes - ----- - It has the probability density function - - .. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda} - \\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right). - - The first law of Laplace, from 1774, states that the frequency of an error - can be expressed as an exponential function of the absolute magnitude of - the error, which leads to the Laplace distribution. For many problems in - Economics and Health sciences, this distribution seems to model the data - better than the standard Gaussian distribution - - - References - ---------- - .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). Handbook of Mathematical - Functions with Formulas, Graphs, and Mathematical Tables, 9th - printing. New York: Dover, 1972. - - .. [2] The Laplace distribution and generalizations - By Samuel Kotz, Tomasz J. Kozubowski, Krzysztof Podgorski, - Birkhauser, 2001. - - .. [3] Weisstein, Eric W. "Laplace Distribution." - From MathWorld--A Wolfram Web Resource. - http://mathworld.wolfram.com/LaplaceDistribution.html - - .. [4] Wikipedia, "Laplace distribution", - http://en.wikipedia.org/wiki/Laplace_distribution - - Examples - -------- - Draw samples from the distribution - - >>> loc, scale = 0., 1. - >>> s = np.random.laplace(loc, scale, 1000) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(s, 30, normed=True) - >>> x = np.arange(-8., 8., .01) - >>> pdf = np.exp(-abs(x-loc/scale))/(2.*scale) - >>> plt.plot(x, pdf) - - Plot Gaussian for comparison: - - >>> g = (1/(scale * np.sqrt(2 * np.pi)) * - ... np.exp( - (x - loc)**2 / (2 * scale**2) )) - >>> plt.plot(x,g) - - """ - cdef ndarray oloc, oscale - cdef double floc, fscale - - floc = PyFloat_AsDouble(loc) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): - if fscale <= 0: - raise ValueError("scale <= 0") - return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale) - - PyErr_Clear() - oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oscale, 0.0)): - raise ValueError("scale <= 0") - return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale) - - def gumbel(self, loc=0.0, scale=1.0, size=None): - """ - gumbel(loc=0.0, scale=1.0, size=None) - - Gumbel distribution. - - Draw samples from a Gumbel distribution with specified location and scale. - For more information on the Gumbel distribution, see Notes and References - below. - - Parameters - ---------- - loc : float - The location of the mode of the distribution. - scale : float - The scale parameter of the distribution. - size : tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - out : ndarray - The samples - - See Also - -------- - scipy.stats.gumbel_l - scipy.stats.gumbel_r - scipy.stats.genextreme - probability density function, distribution, or cumulative density - function, etc. for each of the above - weibull - - Notes - ----- - The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value - Type I) distribution is one of a class of Generalized Extreme Value (GEV) - distributions used in modeling extreme value problems. The Gumbel is a - special case of the Extreme Value Type I distribution for maximums from - distributions with "exponential-like" tails. - - The probability density for the Gumbel distribution is - - .. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/ - \\beta}}, - - where :math:`\\mu` is the mode, a location parameter, and :math:`\\beta` is - the scale parameter. - - The Gumbel (named for German mathematician Emil Julius Gumbel) was used - very early in the hydrology literature, for modeling the occurrence of - flood events. It is also used for modeling maximum wind speed and rainfall - rates. It is a "fat-tailed" distribution - the probability of an event in - the tail of the distribution is larger than if one used a Gaussian, hence - the surprisingly frequent occurrence of 100-year floods. Floods were - initially modeled as a Gaussian process, which underestimated the frequency - of extreme events. - - - It is one of a class of extreme value distributions, the Generalized - Extreme Value (GEV) distributions, which also includes the Weibull and - Frechet. - - The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance of - :math:`\\frac{\\pi^2}{6}\\beta^2`. - - References - ---------- - Gumbel, E. J., *Statistics of Extremes*, New York: Columbia University - Press, 1958. - - Reiss, R.-D. and Thomas, M., *Statistical Analysis of Extreme Values from - Insurance, Finance, Hydrology and Other Fields*, Basel: Birkhauser Verlag, - 2001. - - Examples - -------- - Draw samples from the distribution: - - >>> mu, beta = 0, 0.1 # location and scale - >>> s = np.random.gumbel(mu, beta, 1000) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(s, 30, normed=True) - >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta) - ... * np.exp( -np.exp( -(bins - mu) /beta) ), - ... linewidth=2, color='r') - >>> plt.show() - - Show how an extreme value distribution can arise from a Gaussian process - and compare to a Gaussian: - - >>> means = [] - >>> maxima = [] - >>> for i in range(0,1000) : - ... a = np.random.normal(mu, beta, 1000) - ... means.append(a.mean()) - ... maxima.append(a.max()) - >>> count, bins, ignored = plt.hist(maxima, 30, normed=True) - >>> beta = np.std(maxima)*np.pi/np.sqrt(6) - >>> mu = np.mean(maxima) - 0.57721*beta - >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta) - ... * np.exp(-np.exp(-(bins - mu)/beta)), - ... linewidth=2, color='r') - >>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi)) - ... * np.exp(-(bins - mu)**2 / (2 * beta**2)), - ... linewidth=2, color='g') - >>> plt.show() - - """ - cdef ndarray oloc, oscale - cdef double floc, fscale - - floc = PyFloat_AsDouble(loc) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): - if fscale <= 0: - raise ValueError("scale <= 0") - return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale) - - PyErr_Clear() - oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oscale, 0.0)): - raise ValueError("scale <= 0") - return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale) - - def logistic(self, loc=0.0, scale=1.0, size=None): - """ - logistic(loc=0.0, scale=1.0, size=None) - - Draw samples from a Logistic distribution. - - Samples are drawn from a Logistic distribution with specified - parameters, loc (location or mean, also median), and scale (>0). - - Parameters - ---------- - loc : float - - scale : float > 0. - - size : {tuple, int} - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : {ndarray, scalar} - where the values are all integers in [0, n]. - - See Also - -------- - scipy.stats.distributions.logistic : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - The probability density for the Logistic distribution is - - .. math:: P(x) = P(x) = \\frac{e^{-(x-\\mu)/s}}{s(1+e^{-(x-\\mu)/s})^2}, - - where :math:`\\mu` = location and :math:`s` = scale. - - The Logistic distribution is used in Extreme Value problems where it - can act as a mixture of Gumbel distributions, in Epidemiology, and by - the World Chess Federation (FIDE) where it is used in the Elo ranking - system, assuming the performance of each player is a logistically - distributed random variable. - - References - ---------- - .. [1] Reiss, R.-D. and Thomas M. (2001), Statistical Analysis of Extreme - Values, from Insurance, Finance, Hydrology and Other Fields, - Birkhauser Verlag, Basel, pp 132-133. - .. [2] Weisstein, Eric W. "Logistic Distribution." From - MathWorld--A Wolfram Web Resource. - http://mathworld.wolfram.com/LogisticDistribution.html - .. [3] Wikipedia, "Logistic-distribution", - http://en.wikipedia.org/wiki/Logistic-distribution - - Examples - -------- - Draw samples from the distribution: - - >>> loc, scale = 10, 1 - >>> s = np.random.logistic(loc, scale, 10000) - >>> count, bins, ignored = plt.hist(s, bins=50) - - # plot against distribution - - >>> def logist(x, loc, scale): - ... return exp((loc-x)/scale)/(scale*(1+exp((loc-x)/scale))**2) - >>> plt.plot(bins, logist(bins, loc, scale)*count.max()/\\ - ... logist(bins, loc, scale).max()) - >>> plt.show() - - """ - cdef ndarray oloc, oscale - cdef double floc, fscale - - floc = PyFloat_AsDouble(loc) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): - if fscale <= 0: - raise ValueError("scale <= 0") - return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale) - - PyErr_Clear() - oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oscale, 0.0)): - raise ValueError("scale <= 0") - return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale) - - def lognormal(self, mean=0.0, sigma=1.0, size=None): - """ - lognormal(mean=0.0, sigma=1.0, size=None) - - Return samples drawn from a log-normal distribution. - - Draw samples from a log-normal distribution with specified mean, standard - deviation, and shape. Note that the mean and standard deviation are not the - values for the distribution itself, but of the underlying normal - distribution it is derived from. - - - Parameters - ---------- - mean : float - Mean value of the underlying normal distribution - sigma : float, >0. - Standard deviation of the underlying normal distribution - size : tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - See Also - -------- - scipy.stats.lognorm : probability density function, distribution, - cumulative density function, etc. - - Notes - ----- - A variable `x` has a log-normal distribution if `log(x)` is normally - distributed. - - The probability density function for the log-normal distribution is - - .. math:: p(x) = \\frac{1}{\\sigma x \\sqrt{2\\pi}} - e^{(-\\frac{(ln(x)-\\mu)^2}{2\\sigma^2})} - - where :math:`\\mu` is the mean and :math:`\\sigma` is the standard deviation - of the normally distributed logarithm of the variable. - - A log-normal distribution results if a random variable is the *product* of - a large number of independent, identically-distributed variables in the - same way that a normal distribution results if the variable is the *sum* - of a large number of independent, identically-distributed variables - (see the last example). It is one of the so-called "fat-tailed" - distributions. - - The log-normal distribution is commonly used to model the lifespan of units - with fatigue-stress failure modes. Since this includes - most mechanical systems, the log-normal distribution has widespread - application. - - It is also commonly used to model oil field sizes, species abundance, and - latent periods of infectious diseases. - - References - ---------- - .. [1] Eckhard Limpert, Werner A. Stahel, and Markus Abbt, "Log-normal - Distributions across the Sciences: Keys and Clues", May 2001 - Vol. 51 No. 5 BioScience - http://stat.ethz.ch/~stahel/lognormal/bioscience.pdf - .. [2] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme - Values, Birkhauser Verlag, Basel, pp 31-32. - .. [3] Wikipedia, "Lognormal distribution", - http://en.wikipedia.org/wiki/Lognormal_distribution - - Examples - -------- - Draw samples from the distribution: - - >>> mu, sigma = 3., 1. # mean and standard deviation - >>> s = np.random.lognormal(mu, sigma, 1000) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(s, 100, normed=True, align='mid') - - >>> x = np.linspace(min(bins), max(bins), 10000) - >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2)) - ... / (x * sigma * np.sqrt(2 * np.pi))) - - >>> plt.plot(x, pdf, linewidth=2, color='r') - >>> plt.axis('tight') - >>> plt.show() - - Demonstrate that taking the products of random samples from a uniform - distribution can be fit well by a log-normal probability density function. - - >>> # Generate a thousand samples: each is the product of 100 random - >>> # values, drawn from a normal distribution. - >>> b = [] - >>> for i in range(1000): - ... a = 10. + np.random.random(100) - ... b.append(np.product(a)) - - >>> b = np.array(b) / np.min(b) # scale values to be positive - - >>> count, bins, ignored = plt.hist(b, 100, normed=True, align='center') - - >>> sigma = np.std(np.log(b)) - >>> mu = np.mean(np.log(b)) - - >>> x = np.linspace(min(bins), max(bins), 10000) - >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2)) - ... / (x * sigma * np.sqrt(2 * np.pi))) - - >>> plt.plot(x, pdf, color='r', linewidth=2) - >>> plt.show() - - """ - cdef ndarray omean, osigma - cdef double fmean, fsigma - - fmean = PyFloat_AsDouble(mean) - fsigma = PyFloat_AsDouble(sigma) - - if not PyErr_Occurred(): - if fsigma <= 0: - raise ValueError("sigma <= 0") - return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma) - - PyErr_Clear() - - omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ALIGNED) - osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(osigma, 0.0)): - raise ValueError("sigma <= 0.0") - return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma) - - def rayleigh(self, scale=1.0, size=None): - """ - rayleigh(scale=1.0, size=None) - - Draw samples from a Rayleigh distribution. - - The :math:`\\chi` and Weibull distributions are generalizations of the - Rayleigh. - - Parameters - ---------- - scale : scalar - Scale, also equals the mode. Should be >= 0. - size : int or tuple of ints, optional - Shape of the output. Default is None, in which case a single - value is returned. - - Notes - ----- - The probability density function for the Rayleigh distribution is - - .. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}} - - The Rayleigh distribution arises if the wind speed and wind direction are - both gaussian variables, then the vector wind velocity forms a Rayleigh - distribution. The Rayleigh distribution is used to model the expected - output from wind turbines. - - References - ---------- - ..[1] Brighton Webs Ltd., Rayleigh Distribution, - http://www.brighton-webs.co.uk/distributions/rayleigh.asp - ..[2] Wikipedia, "Rayleigh distribution" - http://en.wikipedia.org/wiki/Rayleigh_distribution - - Examples - -------- - Draw values from the distribution and plot the histogram - - >>> values = hist(np.random.rayleigh(3, 100000), bins=200, normed=True) - - Wave heights tend to follow a Rayleigh distribution. If the mean wave - height is 1 meter, what fraction of waves are likely to be larger than 3 - meters? - - >>> meanvalue = 1 - >>> modevalue = np.sqrt(2 / np.pi) * meanvalue - >>> s = np.random.rayleigh(modevalue, 1000000) - - The percentage of waves larger than 3 meters is: - - >>> 100.*sum(s>3)/1000000. - 0.087300000000000003 - - """ - cdef ndarray oscale - cdef double fscale - - fscale = PyFloat_AsDouble(scale) - - if not PyErr_Occurred(): - if fscale <= 0: - raise ValueError("scale <= 0") - return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale) - - PyErr_Clear() - - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oscale, 0.0)): - raise ValueError("scale <= 0.0") - return cont1_array(self.internal_state, rk_rayleigh, size, oscale) - - def wald(self, mean, scale, size=None): - """ - wald(mean, scale, size=None) - - Draw samples from a Wald, or Inverse Gaussian, distribution. - - As the scale approaches infinity, the distribution becomes more like a - Gaussian. - - Some references claim that the Wald is an Inverse Gaussian with mean=1, but - this is by no means universal. - - The Inverse Gaussian distribution was first studied in relationship to - Brownian motion. In 1956 M.C.K. Tweedie used the name Inverse Gaussian - because there is an inverse relationship between the time to cover a unit - distance and distance covered in unit time. - - Parameters - ---------- - mean : scalar - Distribution mean, should be > 0. - scale : scalar - Scale parameter, should be >= 0. - size : int or tuple of ints, optional - Output shape. Default is None, in which case a single value is - returned. - - Returns - ------- - samples : ndarray or scalar - Drawn sample, all greater than zero. - - Notes - ----- - The probability density function for the Wald distribution is - - .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^ - \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x} - - As noted above the Inverse Gaussian distribution first arise from attempts - to model Brownian Motion. It is also a competitor to the Weibull for use in - reliability modeling and modeling stock returns and interest rate - processes. - - References - ---------- - ..[1] Brighton Webs Ltd., Wald Distribution, - http://www.brighton-webs.co.uk/distributions/wald.asp - ..[2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian - Distribution: Theory : Methodology, and Applications", CRC Press, - 1988. - ..[3] Wikipedia, "Wald distribution" - http://en.wikipedia.org/wiki/Wald_distribution - - Examples - -------- - Draw values from the distribution and plot the histogram: - - >>> import matplotlib.pyplot as plt - >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, normed=True) - >>> plt.show() - - """ - cdef ndarray omean, oscale - cdef double fmean, fscale - - fmean = PyFloat_AsDouble(mean) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): - if fmean <= 0: - raise ValueError("mean <= 0") - if fscale <= 0: - raise ValueError("scale <= 0") - return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale) - - PyErr_Clear() - omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(omean,0.0)): - raise ValueError("mean <= 0.0") - elif np.any(np.less_equal(oscale,0.0)): - raise ValueError("scale <= 0.0") - return cont2_array(self.internal_state, rk_wald, size, omean, oscale) - - - - def triangular(self, left, mode, right, size=None): - """ - triangular(left, mode, right, size=None) - - Draw samples from the triangular distribution. - - The triangular distribution is a continuous probability distribution with - lower limit left, peak at mode, and upper limit right. Unlike the other - distributions, these parameters directly define the shape of the pdf. - - Parameters - ---------- - left : scalar - Lower limit. - mode : scalar - The value where the peak of the distribution occurs. - The value should fulfill the condition ``left <= mode <= right``. - right : scalar - Upper limit, should be larger than `left`. - size : int or tuple of ints, optional - Output shape. Default is None, in which case a single value is - returned. - - Returns - ------- - samples : ndarray or scalar - The returned samples all lie in the interval [left, right]. - - Notes - ----- - The probability density function for the Triangular distribution is - - .. math:: P(x;l, m, r) = \\begin{cases} - \\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\ - \\frac{2(m-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\ - 0& \\text{otherwise}. - \\end{cases} - - The triangular distribution is often used in ill-defined problems where the - underlying distribution is not known, but some knowledge of the limits and - mode exists. Often it is used in simulations. - - References - ---------- - ..[1] Wikipedia, "Triangular distribution" - http://en.wikipedia.org/wiki/Triangular_distribution - - Examples - -------- - Draw values from the distribution and plot the histogram: - - >>> import matplotlib.pyplot as plt - >>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=200, - ... normed=True) - >>> plt.show() - - """ - cdef ndarray oleft, omode, oright - cdef double fleft, fmode, fright - - fleft = PyFloat_AsDouble(left) - fright = PyFloat_AsDouble(right) - fmode = PyFloat_AsDouble(mode) - if not PyErr_Occurred(): - if fleft > fmode: - raise ValueError("left > mode") - if fmode > fright: - raise ValueError("mode > right") - if fleft == fright: - raise ValueError("left == right") - return cont3_array_sc(self.internal_state, rk_triangular, size, fleft, - fmode, fright) - - PyErr_Clear() - oleft = PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ALIGNED) - omode = PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ALIGNED) - oright = PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ALIGNED) - - if np.any(np.greater(oleft, omode)): - raise ValueError("left > mode") - if np.any(np.greater(omode, oright)): - raise ValueError("mode > right") - if np.any(np.equal(oleft, oright)): - raise ValueError("left == right") - return cont3_array(self.internal_state, rk_triangular, size, oleft, - omode, oright) - - # Complicated, discrete distributions: - def binomial(self, n, p, size=None): - """ - binomial(n, p, size=None) - - Draw samples from a binomial distribution. - - Samples are drawn from a Binomial distribution with specified - parameters, n trials and p probability of success where - n an integer > 0 and p is in the interval [0,1]. (n may be - input as a float, but it is truncated to an integer in use) - - Parameters - ---------- - n : float (but truncated to an integer) - parameter, > 0. - p : float - parameter, >= 0 and <=1. - size : {tuple, int} - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : {ndarray, scalar} - where the values are all integers in [0, n]. - - See Also - -------- - scipy.stats.distributions.binom : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - The probability density for the Binomial distribution is - - .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N}, - - where :math:`n` is the number of trials, :math:`p` is the probability - of success, and :math:`N` is the number of successes. - - When estimating the standard error of a proportion in a population by - using a random sample, the normal distribution works well unless the - product p*n <=5, where p = population proportion estimate, and n = - number of samples, in which case the binomial distribution is used - instead. For example, a sample of 15 people shows 4 who are left - handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4, - so the binomial distribution should be used in this case. - - References - ---------- - .. [1] Dalgaard, Peter, "Introductory Statistics with R", - Springer-Verlag, 2002. - .. [2] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill, - Fifth Edition, 2002. - .. [3] Lentner, Marvin, "Elementary Applied Statistics", Bogden - and Quigley, 1972. - .. [4] Weisstein, Eric W. "Binomial Distribution." From MathWorld--A - Wolfram Web Resource. - http://mathworld.wolfram.com/BinomialDistribution.html - .. [5] Wikipedia, "Binomial-distribution", - http://en.wikipedia.org/wiki/Binomial_distribution - - Examples - -------- - Draw samples from the distribution: - - >>> n, p = 10, .5 # number of trials, probability of each trial - >>> s = np.random.binomial(n, p, 1000) - # result of flipping a coin 10 times, tested 1000 times. - - A real world example. A company drills 9 wild-cat oil exploration - wells, each with an estimated probability of success of 0.1. All nine - wells fail. What is the probability of that happening? - - Let's do 20,000 trials of the model, and count the number that - generate zero positive results. - - >>> sum(np.random.binomial(9,0.1,20000)==0)/20000. - answer = 0.38885, or 38%. - - """ - cdef ndarray on, op - cdef long ln - cdef double fp - - fp = PyFloat_AsDouble(p) - ln = PyInt_AsLong(n) - if not PyErr_Occurred(): - if ln <= 0: - raise ValueError("n <= 0") - if fp < 0: - raise ValueError("p < 0") - elif fp > 1: - raise ValueError("p > 1") - return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp) - - PyErr_Clear() - - on = PyArray_FROM_OTF(n, NPY_LONG, NPY_ALIGNED) - op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(n, 0)): - raise ValueError("n <= 0") - if np.any(np.less(p, 0)): - raise ValueError("p < 0") - if np.any(np.greater(p, 1)): - raise ValueError("p > 1") - return discnp_array(self.internal_state, rk_binomial, size, on, op) - - def negative_binomial(self, n, p, size=None): - """ - negative_binomial(n, p, size=None) - - Draw samples from a negative_binomial distribution. - - Samples are drawn from a negative_Binomial distribution with specified - parameters, `n` trials and `p` probability of success where `n` is an - integer > 0 and `p` is in the interval [0, 1]. - - Parameters - ---------- - n : int - Parameter, > 0. - p : float - Parameter, >= 0 and <=1. - size : int or tuple of ints - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : int or ndarray of ints - Drawn samples. - - Notes - ----- - The probability density for the Negative Binomial distribution is - - .. math:: P(N;n,p) = \\binom{N+n-1}{n-1}p^{n}(1-p)^{N}, - - where :math:`n-1` is the number of successes, :math:`p` is the probability - of success, and :math:`N+n-1` is the number of trials. - - The negative binomial distribution gives the probability of n-1 successes - and N failures in N+n-1 trials, and success on the (N+n)th trial. - - If one throws a die repeatedly until the third time a "1" appears, then the - probability distribution of the number of non-"1"s that appear before the - third "1" is a negative binomial distribution. - - References - ---------- - .. [1] Weisstein, Eric W. "Negative Binomial Distribution." From - MathWorld--A Wolfram Web Resource. - http://mathworld.wolfram.com/NegativeBinomialDistribution.html - .. [2] Wikipedia, "Negative binomial distribution", - http://en.wikipedia.org/wiki/Negative_binomial_distribution - - Examples - -------- - Draw samples from the distribution: - - A real world example. A company drills wild-cat oil exploration wells, each - with an estimated probability of success of 0.1. What is the probability - of having one success for each successive well, that is what is the - probability of a single success after drilling 5 wells, after 6 wells, - etc.? - - >>> s = np.random.negative_binomial(1, 0.1, 100000) - >>> for i in range(1, 11): - ... probability = sum(s 1: - raise ValueError("p > 1") - return discdd_array_sc(self.internal_state, rk_negative_binomial, - size, fn, fp) - - PyErr_Clear() - - on = PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ALIGNED) - op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(n, 0)): - raise ValueError("n <= 0") - if np.any(np.less(p, 0)): - raise ValueError("p < 0") - if np.any(np.greater(p, 1)): - raise ValueError("p > 1") - return discdd_array(self.internal_state, rk_negative_binomial, size, - on, op) - - def poisson(self, lam=1.0, size=None): - """ - poisson(lam=1.0, size=None) - - Draw samples from a Poisson distribution. - - The Poisson distribution is the limit of the Binomial - distribution for large N. - - Parameters - ---------- - lam : float - Expectation of interval, should be >= 0. - size : int or tuple of ints, optional - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Notes - ----- - The Poisson distribution - - .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!} - - For events with an expected separation :math:`\\lambda` the Poisson - distribution :math:`f(k; \\lambda)` describes the probability of - :math:`k` events occurring within the observed interval :math:`\\lambda`. - - Because the output is limited to the range of the C long type, a - ValueError is raised when `lam` is within 10 sigma of the maximum - representable value. - - References - ---------- - .. [1] Weisstein, Eric W. "Poisson Distribution." From MathWorld--A Wolfram - Web Resource. http://mathworld.wolfram.com/PoissonDistribution.html - .. [2] Wikipedia, "Poisson distribution", - http://en.wikipedia.org/wiki/Poisson_distribution - - Examples - -------- - Draw samples from the distribution: - - >>> import numpy as np - >>> s = np.random.poisson(5, 10000) - - Display histogram of the sample: - - >>> import matplotlib.pyplot as plt - >>> count, bins, ignored = plt.hist(s, 14, normed=True) - >>> plt.show() - - """ - cdef ndarray olam - cdef double flam - flam = PyFloat_AsDouble(lam) - if not PyErr_Occurred(): - if lam < 0: - raise ValueError("lam < 0") - if lam > self.poisson_lam_max: - raise ValueError("lam value too large") - return discd_array_sc(self.internal_state, rk_poisson, size, flam) - - PyErr_Clear() - - olam = PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less(olam, 0)): - raise ValueError("lam < 0") - if np.any(np.greater(olam, self.poisson_lam_max)): - raise ValueError("lam value too large.") - return discd_array(self.internal_state, rk_poisson, size, olam) - - def zipf(self, a, size=None): - """ - zipf(a, size=None) - - Draw samples from a Zipf distribution. - - Samples are drawn from a Zipf distribution with specified parameter - `a` > 1. - - The Zipf distribution (also known as the zeta distribution) is a - continuous probability distribution that satisfies Zipf's law: the - frequency of an item is inversely proportional to its rank in a - frequency table. - - Parameters - ---------- - a : float > 1 - Distribution parameter. - size : int or tuple of int, optional - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn; a single integer is equivalent in - its result to providing a mono-tuple, i.e., a 1-D array of length - *size* is returned. The default is None, in which case a single - scalar is returned. - - Returns - ------- - samples : scalar or ndarray - The returned samples are greater than or equal to one. - - See Also - -------- - scipy.stats.distributions.zipf : probability density function, - distribution, or cumulative density function, etc. - - Notes - ----- - The probability density for the Zipf distribution is - - .. math:: p(x) = \\frac{x^{-a}}{\\zeta(a)}, - - where :math:`\\zeta` is the Riemann Zeta function. - - It is named for the American linguist George Kingsley Zipf, who noted - that the frequency of any word in a sample of a language is inversely - proportional to its rank in the frequency table. - - References - ---------- - Zipf, G. K., *Selected Studies of the Principle of Relative Frequency - in Language*, Cambridge, MA: Harvard Univ. Press, 1932. - - Examples - -------- - Draw samples from the distribution: - - >>> a = 2. # parameter - >>> s = np.random.zipf(a, 1000) - - Display the histogram of the samples, along with - the probability density function: - - >>> import matplotlib.pyplot as plt - >>> import scipy.special as sps - Truncate s values at 50 so plot is interesting - >>> count, bins, ignored = plt.hist(s[s<50], 50, normed=True) - >>> x = np.arange(1., 50.) - >>> y = x**(-a)/sps.zetac(a) - >>> plt.plot(x, y/max(y), linewidth=2, color='r') - >>> plt.show() - - """ - cdef ndarray oa - cdef double fa - - fa = PyFloat_AsDouble(a) - if not PyErr_Occurred(): - if fa <= 1.0: - raise ValueError("a <= 1.0") - return discd_array_sc(self.internal_state, rk_zipf, size, fa) - - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(oa, 1.0)): - raise ValueError("a <= 1.0") - return discd_array(self.internal_state, rk_zipf, size, oa) - - def geometric(self, p, size=None): - """ - geometric(p, size=None) - - Draw samples from the geometric distribution. - - Bernoulli trials are experiments with one of two outcomes: - success or failure (an example of such an experiment is flipping - a coin). The geometric distribution models the number of trials - that must be run in order to achieve success. It is therefore - supported on the positive integers, ``k = 1, 2, ...``. - - The probability mass function of the geometric distribution is - - .. math:: f(k) = (1 - p)^{k - 1} p - - where `p` is the probability of success of an individual trial. - - Parameters - ---------- - p : float - The probability of success of an individual trial. - size : tuple of ints - Number of values to draw from the distribution. The output - is shaped according to `size`. - - Returns - ------- - out : ndarray - Samples from the geometric distribution, shaped according to - `size`. - - Examples - -------- - Draw ten thousand values from the geometric distribution, - with the probability of an individual success equal to 0.35: - - >>> z = np.random.geometric(p=0.35, size=10000) - - How many trials succeeded after a single run? - - >>> (z == 1).sum() / 10000. - 0.34889999999999999 #random - - """ - cdef ndarray op - cdef double fp - - fp = PyFloat_AsDouble(p) - if not PyErr_Occurred(): - if fp < 0.0: - raise ValueError("p < 0.0") - if fp > 1.0: - raise ValueError("p > 1.0") - return discd_array_sc(self.internal_state, rk_geometric, size, fp) - - PyErr_Clear() - - - op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less(op, 0.0)): - raise ValueError("p < 0.0") - if np.any(np.greater(op, 1.0)): - raise ValueError("p > 1.0") - return discd_array(self.internal_state, rk_geometric, size, op) - - def hypergeometric(self, ngood, nbad, nsample, size=None): - """ - hypergeometric(ngood, nbad, nsample, size=None) - - Draw samples from a Hypergeometric distribution. - - Samples are drawn from a Hypergeometric distribution with specified - parameters, ngood (ways to make a good selection), nbad (ways to make - a bad selection), and nsample = number of items sampled, which is less - than or equal to the sum ngood + nbad. - - Parameters - ---------- - ngood : float (but truncated to an integer) - parameter, > 0. - nbad : float - parameter, >= 0. - nsample : float - parameter, > 0 and <= ngood+nbad - size : {tuple, int} - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : {ndarray, scalar} - where the values are all integers in [0, n]. - - See Also - -------- - scipy.stats.distributions.hypergeom : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - The probability density for the Hypergeometric distribution is - - .. math:: P(x) = \\frac{\\binom{m}{n}\\binom{N-m}{n-x}}{\\binom{N}{n}}, - - where :math:`0 \\le x \\le m` and :math:`n+m-N \\le x \\le n` - - for P(x) the probability of x successes, n = ngood, m = nbad, and - N = number of samples. - - Consider an urn with black and white marbles in it, ngood of them - black and nbad are white. If you draw nsample balls without - replacement, then the Hypergeometric distribution describes the - distribution of black balls in the drawn sample. - - Note that this distribution is very similar to the Binomial - distribution, except that in this case, samples are drawn without - replacement, whereas in the Binomial case samples are drawn with - replacement (or the sample space is infinite). As the sample space - becomes large, this distribution approaches the Binomial. - - References - ---------- - .. [1] Lentner, Marvin, "Elementary Applied Statistics", Bogden - and Quigley, 1972. - .. [2] Weisstein, Eric W. "Hypergeometric Distribution." From - MathWorld--A Wolfram Web Resource. - http://mathworld.wolfram.com/HypergeometricDistribution.html - .. [3] Wikipedia, "Hypergeometric-distribution", - http://en.wikipedia.org/wiki/Hypergeometric-distribution - - Examples - -------- - Draw samples from the distribution: - - >>> ngood, nbad, nsamp = 100, 2, 10 - # number of good, number of bad, and number of samples - >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000) - >>> hist(s) - # note that it is very unlikely to grab both bad items - - Suppose you have an urn with 15 white and 15 black marbles. - If you pull 15 marbles at random, how likely is it that - 12 or more of them are one color? - - >>> s = np.random.hypergeometric(15, 15, 15, 100000) - >>> sum(s>=12)/100000. + sum(s<=3)/100000. - # answer = 0.003 ... pretty unlikely! - - """ - cdef ndarray ongood, onbad, onsample - cdef long lngood, lnbad, lnsample - - lngood = PyInt_AsLong(ngood) - lnbad = PyInt_AsLong(nbad) - lnsample = PyInt_AsLong(nsample) - if not PyErr_Occurred(): - if ngood < 1: - raise ValueError("ngood < 1") - if nbad < 1: - raise ValueError("nbad < 1") - if nsample < 1: - raise ValueError("nsample < 1") - if ngood + nbad < nsample: - raise ValueError("ngood + nbad < nsample") - return discnmN_array_sc(self.internal_state, rk_hypergeometric, size, - lngood, lnbad, lnsample) - - - PyErr_Clear() - - ongood = PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ALIGNED) - onbad = PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ALIGNED) - onsample = PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ALIGNED) - if np.any(np.less(ongood, 1)): - raise ValueError("ngood < 1") - if np.any(np.less(onbad, 1)): - raise ValueError("nbad < 1") - if np.any(np.less(onsample, 1)): - raise ValueError("nsample < 1") - if np.any(np.less(np.add(ongood, onbad),onsample)): - raise ValueError("ngood + nbad < nsample") - return discnmN_array(self.internal_state, rk_hypergeometric, size, - ongood, onbad, onsample) - - def logseries(self, p, size=None): - """ - logseries(p, size=None) - - Draw samples from a Logarithmic Series distribution. - - Samples are drawn from a Log Series distribution with specified - parameter, p (probability, 0 < p < 1). - - Parameters - ---------- - loc : float - - scale : float > 0. - - size : {tuple, int} - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. - - Returns - ------- - samples : {ndarray, scalar} - where the values are all integers in [0, n]. - - See Also - -------- - scipy.stats.distributions.logser : probability density function, - distribution or cumulative density function, etc. - - Notes - ----- - The probability density for the Log Series distribution is - - .. math:: P(k) = \\frac{-p^k}{k \\ln(1-p)}, - - where p = probability. - - The Log Series distribution is frequently used to represent species - richness and occurrence, first proposed by Fisher, Corbet, and - Williams in 1943 [2]. It may also be used to model the numbers of - occupants seen in cars [3]. - - References - ---------- - .. [1] Buzas, Martin A.; Culver, Stephen J., Understanding regional - species diversity through the log series distribution of - occurrences: BIODIVERSITY RESEARCH Diversity & Distributions, - Volume 5, Number 5, September 1999 , pp. 187-195(9). - .. [2] Fisher, R.A,, A.S. Corbet, and C.B. Williams. 1943. The - relation between the number of species and the number of - individuals in a random sample of an animal population. - Journal of Animal Ecology, 12:42-58. - .. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small - Data Sets, CRC Press, 1994. - .. [4] Wikipedia, "Logarithmic-distribution", - http://en.wikipedia.org/wiki/Logarithmic-distribution - - Examples - -------- - Draw samples from the distribution: - - >>> a = .6 - >>> s = np.random.logseries(a, 10000) - >>> count, bins, ignored = plt.hist(s) - - # plot against distribution - - >>> def logseries(k, p): - ... return -p**k/(k*log(1-p)) - >>> plt.plot(bins, logseries(bins, a)*count.max()/ - logseries(bins, a).max(), 'r') - >>> plt.show() - - """ - cdef ndarray op - cdef double fp - - fp = PyFloat_AsDouble(p) - if not PyErr_Occurred(): - if fp <= 0.0: - raise ValueError("p <= 0.0") - if fp >= 1.0: - raise ValueError("p >= 1.0") - return discd_array_sc(self.internal_state, rk_logseries, size, fp) - - PyErr_Clear() - - op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ALIGNED) - if np.any(np.less_equal(op, 0.0)): - raise ValueError("p <= 0.0") - if np.any(np.greater_equal(op, 1.0)): - raise ValueError("p >= 1.0") - return discd_array(self.internal_state, rk_logseries, size, op) - - # Multivariate distributions: - def multivariate_normal(self, mean, cov, size=None): - """ - multivariate_normal(mean, cov[, size]) - - Draw random samples from a multivariate normal distribution. - - The multivariate normal, multinormal or Gaussian distribution is a - generalization of the one-dimensional normal distribution to higher - dimensions. Such a distribution is specified by its mean and - covariance matrix. These parameters are analogous to the mean - (average or "center") and variance (standard deviation, or "width," - squared) of the one-dimensional normal distribution. - - Parameters - ---------- - mean : 1-D array_like, of length N - Mean of the N-dimensional distribution. - cov : 2-D array_like, of shape (N, N) - Covariance matrix of the distribution. Must be symmetric and - positive semi-definite for "physically meaningful" results. - size : tuple of ints, optional - Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are - generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because - each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``. - If no shape is specified, a single (`N`-D) sample is returned. - - Returns - ------- - out : ndarray - The drawn samples, of shape *size*, if that was provided. If not, - the shape is ``(N,)``. - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - - Notes - ----- - The mean is a coordinate in N-dimensional space, which represents the - location where samples are most likely to be generated. This is - analogous to the peak of the bell curve for the one-dimensional or - univariate normal distribution. - - Covariance indicates the level to which two variables vary together. - From the multivariate normal distribution, we draw N-dimensional - samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix - element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. - The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its - "spread"). - - Instead of specifying the full covariance matrix, popular - approximations include: - - - Spherical covariance (*cov* is a multiple of the identity matrix) - - Diagonal covariance (*cov* has non-negative elements, and only on - the diagonal) - - This geometrical property can be seen in two dimensions by plotting - generated data-points: - - >>> mean = [0,0] - >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - - >>> import matplotlib.pyplot as plt - >>> x,y = np.random.multivariate_normal(mean,cov,5000).T - >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - - Note that the covariance matrix must be non-negative definite. - - References - ---------- - Papoulis, A., *Probability, Random Variables, and Stochastic Processes*, - 3rd ed., New York: McGraw-Hill, 1991. - - Duda, R. O., Hart, P. E., and Stork, D. G., *Pattern Classification*, - 2nd ed., New York: Wiley, 2001. - - Examples - -------- - >>> mean = (1,2) - >>> cov = [[1,0],[1,0]] - >>> x = np.random.multivariate_normal(mean,cov,(3,3)) - >>> x.shape - (3, 3, 2) - - The following is probably true, given that 0.6 is roughly twice the - standard deviation: - - >>> print list( (x[0,0,:] - mean) < 0.6 ) - [True, True] - - """ - # Check preconditions on arguments - mean = np.array(mean) - cov = np.array(cov) - if size is None: - shape = [] - else: - shape = size - if len(mean.shape) != 1: - raise ValueError("mean must be 1 dimensional") - if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): - raise ValueError("cov must be 2 dimensional and square") - if mean.shape[0] != cov.shape[0]: - raise ValueError("mean and cov must have same length") - # Compute shape of output - if isinstance(shape, int): - shape = [shape] - final_shape = list(shape[:]) - final_shape.append(mean.shape[0]) - # Create a matrix of independent standard normally distributed random - # numbers. The matrix has rows with the same length as mean and as - # many rows are necessary to form a matrix of shape final_shape. - x = self.standard_normal(np.multiply.reduce(final_shape)) - x.shape = (np.multiply.reduce(final_shape[0:len(final_shape)-1]), - mean.shape[0]) - # Transform matrix of standard normals into matrix where each row - # contains multivariate normals with the desired covariance. - # Compute A such that dot(transpose(A),A) == cov. - # Then the matrix products of the rows of x and A has the desired - # covariance. Note that sqrt(s)*v where (u,s,v) is the singular value - # decomposition of cov is such an A. - - from numpy.dual import svd - # XXX: we really should be doing this by Cholesky decomposition - (u,s,v) = svd(cov) - x = np.dot(x*np.sqrt(s),v) - # The rows of x now have the correct covariance but mean 0. Add - # mean to each row. Then each row will have mean mean. - np.add(mean,x,x) - x.shape = tuple(final_shape) - return x - - def multinomial(self, npy_intp n, object pvals, size=None): - """ - multinomial(n, pvals, size=None) - - Draw samples from a multinomial distribution. - - The multinomial distribution is a multivariate generalisation of the - binomial distribution. Take an experiment with one of ``p`` - possible outcomes. An example of such an experiment is throwing a dice, - where the outcome can be 1 through 6. Each sample drawn from the - distribution represents `n` such experiments. Its values, - ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the outcome - was ``i``. - - Parameters - ---------- - n : int - Number of experiments. - pvals : sequence of floats, length p - Probabilities of each of the ``p`` different outcomes. These - should sum to 1 (however, the last element is always assumed to - account for the remaining probability, as long as - ``sum(pvals[:-1]) <= 1)``. - size : tuple of ints - Given a `size` of ``(M, N, K)``, then ``M*N*K`` samples are drawn, - and the output shape becomes ``(M, N, K, p)``, since each sample - has shape ``(p,)``. - - Examples - -------- - Throw a dice 20 times: - - >>> np.random.multinomial(20, [1/6.]*6, size=1) - array([[4, 1, 7, 5, 2, 1]]) - - It landed 4 times on 1, once on 2, etc. - - Now, throw the dice 20 times, and 20 times again: - - >>> np.random.multinomial(20, [1/6.]*6, size=2) - array([[3, 4, 3, 3, 4, 3], - [2, 4, 3, 4, 0, 7]]) - - For the first run, we threw 3 times 1, 4 times 2, etc. For the second, - we threw 2 times 1, 4 times 2, etc. - - A loaded dice is more likely to land on number 6: - - >>> np.random.multinomial(100, [1/7.]*5) - array([13, 16, 13, 16, 42]) - - """ - cdef npy_intp d - cdef ndarray parr "arrayObject_parr", mnarr "arrayObject_mnarr" - cdef double *pix - cdef long *mnix - cdef npy_intp i, j, dn - cdef double Sum - - d = len(pvals) - parr = PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1) - pix = parr.data - - if kahan_sum(pix, d-1) > (1.0 + 1e-12): - raise ValueError("sum(pvals[:-1]) > 1.0") - - if size is None: - shape = (d,) - elif type(size) is int: - shape = (size, d) - else: - shape = size + (d,) - - multin = np.zeros(shape, int) - mnarr = multin - mnix = mnarr.data - i = 0 - while i < PyArray_SIZE(mnarr): - Sum = 1.0 - dn = n - for j from 0 <= j < d-1: - mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum) - dn = dn - mnix[i+j] - if dn <= 0: - break - Sum = Sum - pix[j] - if dn > 0: - mnix[i+d-1] = dn - - i = i + d - - return multin - - def dirichlet(self, object alpha, size=None): - """ - dirichlet(alpha, size=None) - - Draw samples from the Dirichlet distribution. - - Draw `size` samples of dimension k from a Dirichlet distribution. A - Dirichlet-distributed random variable can be seen as a multivariate - generalization of a Beta distribution. Dirichlet pdf is the conjugate - prior of a multinomial in Bayesian inference. - - Parameters - ---------- - alpha : array - Parameter of the distribution (k dimension for sample of - dimension k). - size : array - Number of samples to draw. - - Returns - ------- - samples : ndarray, - The drawn samples, of shape (alpha.ndim, size). - - Notes - ----- - .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i} - - Uses the following property for computation: for each dimension, - draw a random sample y_i from a standard gamma generator of shape - `alpha_i`, then - :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is - Dirichlet distributed. - - References - ---------- - .. [1] David McKay, "Information Theory, Inference and Learning - Algorithms," chapter 23, - http://www.inference.phy.cam.ac.uk/mackay/ - .. [2] Wikipedia, "Dirichlet distribution", - http://en.wikipedia.org/wiki/Dirichlet_distribution - - Examples - -------- - Taking an example cited in Wikipedia, this distribution can be used if - one wanted to cut strings (each of initial length 1.0) into K pieces - with different lengths, where each piece had, on average, a designated - average length, but allowing some variation in the relative sizes of the - pieces. - - >>> s = np.random.dirichlet((10, 5, 3), 20).transpose() - - >>> plt.barh(range(20), s[0]) - >>> plt.barh(range(20), s[1], left=s[0], color='g') - >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r') - >>> plt.title("Lengths of Strings") - - """ - - #================= - # Pure python algo - #================= - #alpha = N.atleast_1d(alpha) - #k = alpha.size - - #if n == 1: - # val = N.zeros(k) - # for i in range(k): - # val[i] = sgamma(alpha[i], n) - # val /= N.sum(val) - #else: - # val = N.zeros((k, n)) - # for i in range(k): - # val[i] = sgamma(alpha[i], n) - # val /= N.sum(val, axis = 0) - # val = val.T - - #return val - - cdef npy_intp k - cdef npy_intp totsize - cdef ndarray alpha_arr, val_arr - cdef double *alpha_data, *val_data - cdef npy_intp i, j - cdef double acc, invacc - - k = len(alpha) - alpha_arr = PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) - alpha_data = alpha_arr.data - - if size is None: - shape = (k,) - elif type(size) is int: - shape = (size, k) - else: - shape = size + (k,) - - diric = np.zeros(shape, np.float64) - val_arr = diric - val_data= val_arr.data - - i = 0 - totsize = PyArray_SIZE(val_arr) - while i < totsize: - acc = 0.0 - for j from 0 <= j < k: - val_data[i+j] = rk_standard_gamma(self.internal_state, alpha_data[j]) - acc = acc + val_data[i+j] - invacc = 1/acc - for j from 0 <= j < k: - val_data[i+j] = val_data[i+j] * invacc - i = i + k - - return diric - - # Shuffling and permutations: - def shuffle(self, object x): - """ - shuffle(x) - - Modify a sequence in-place by shuffling its contents. - - Parameters - ---------- - x : array_like - The array or list to be shuffled. - - Returns - ------- - None - - Examples - -------- - >>> arr = np.arange(10) - >>> np.random.shuffle(arr) - >>> arr - [1 7 5 2 9 4 3 6 0 8] - - This function only shuffles the array along the first index of a - multi-dimensional array: - - >>> arr = np.arange(9).reshape((3, 3)) - >>> np.random.shuffle(arr) - >>> arr - array([[3, 4, 5], - [6, 7, 8], - [0, 1, 2]]) - - """ - cdef npy_intp i, j - cdef int copy - - i = len(x) - 1 - try: - j = len(x[0]) - except: - j = 0 - - if (j == 0): - # adaptation of random.shuffle() - while i > 0: - j = rk_interval(i, self.internal_state) - x[i], x[j] = x[j], x[i] - i = i - 1 - else: - # make copies - copy = hasattr(x[0], 'copy') - if copy: - while(i > 0): - j = rk_interval(i, self.internal_state) - x[i], x[j] = x[j].copy(), x[i].copy() - i = i - 1 - else: - while(i > 0): - j = rk_interval(i, self.internal_state) - x[i], x[j] = x[j][:], x[i][:] - i = i - 1 - - def permutation(self, object x): - """ - permutation(x) - - Randomly permute a sequence, or return a permuted range. - - If `x` is a multi-dimensional array, it is only shuffled along its - first index. - - Parameters - ---------- - x : int or array_like - If `x` is an integer, randomly permute ``np.arange(x)``. - If `x` is an array, make a copy and shuffle the elements - randomly. - - Returns - ------- - out : ndarray - Permuted sequence or array range. - - Examples - -------- - >>> np.random.permutation(10) - array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) - - >>> np.random.permutation([1, 4, 9, 12, 15]) - array([15, 1, 9, 4, 12]) - - >>> arr = np.arange(9).reshape((3, 3)) - >>> np.random.permutation(arr) - array([[6, 7, 8], - [0, 1, 2], - [3, 4, 5]]) - - """ - if isinstance(x, (int, long, np.integer)): - arr = np.arange(x) - else: - arr = np.array(x) - self.shuffle(arr) - return arr - -_rand = RandomState() -seed = _rand.seed -get_state = _rand.get_state -set_state = _rand.set_state -random_sample = _rand.random_sample -randint = _rand.randint -bytes = _rand.bytes -uniform = _rand.uniform -rand = _rand.rand -randn = _rand.randn -random_integers = _rand.random_integers -standard_normal = _rand.standard_normal -normal = _rand.normal -beta = _rand.beta -exponential = _rand.exponential -standard_exponential = _rand.standard_exponential -standard_gamma = _rand.standard_gamma -gamma = _rand.gamma -f = _rand.f -noncentral_f = _rand.noncentral_f -chisquare = _rand.chisquare -noncentral_chisquare = _rand.noncentral_chisquare -standard_cauchy = _rand.standard_cauchy -standard_t = _rand.standard_t -vonmises = _rand.vonmises -pareto = _rand.pareto -weibull = _rand.weibull -power = _rand.power -laplace = _rand.laplace -gumbel = _rand.gumbel -logistic = _rand.logistic -lognormal = _rand.lognormal -rayleigh = _rand.rayleigh -wald = _rand.wald -triangular = _rand.triangular - -binomial = _rand.binomial -negative_binomial = _rand.negative_binomial -poisson = _rand.poisson -zipf = _rand.zipf -geometric = _rand.geometric -hypergeometric = _rand.hypergeometric -logseries = _rand.logseries - -multivariate_normal = _rand.multivariate_normal -multinomial = _rand.multinomial -dirichlet = _rand.dirichlet - -shuffle = _rand.shuffle -permutation = _rand.permutation diff --git a/numpy-1.6.2/numpy/random/mtrand/mtrand_py_helper.h b/numpy-1.6.2/numpy/random/mtrand/mtrand_py_helper.h deleted file mode 100644 index 266847cbe9..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/mtrand_py_helper.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _MTRAND_PY_HELPER_H_ -#define _MTRAND_PY_HELPER_H_ - -#include - -static PyObject *empty_py_bytes(npy_intp length, void **bytes) -{ - PyObject *b; -#if PY_MAJOR_VERSION >= 3 - b = PyBytes_FromStringAndSize(NULL, length); - if (b) { - *bytes = PyBytes_AS_STRING(b); - } -#else - b = PyString_FromStringAndSize(NULL, length); - if (b) { - *bytes = PyString_AS_STRING(b); - } -#endif - return b; -} - -#endif /* _MTRAND_PY_HELPER_H_ */ diff --git a/numpy-1.6.2/numpy/random/mtrand/numpy.pxi b/numpy-1.6.2/numpy/random/mtrand/numpy.pxi deleted file mode 100644 index b4a9c39d8d..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/numpy.pxi +++ /dev/null @@ -1,133 +0,0 @@ -# :Author: Travis Oliphant - -cdef extern from "numpy/arrayobject.h": - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VOID - NPY_NTYPES - NPY_NOTYPE - - cdef enum requirements: - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_UPDATEIFCOPY - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - cdef enum defines: - NPY_MAXDIMS - - ctypedef struct npy_cdouble: - double real - double imag - - ctypedef struct npy_cfloat: - double real - double imag - - ctypedef int npy_intp - - ctypedef extern class numpy.dtype [object PyArray_Descr]: - cdef int type_num, elsize, alignment - cdef char type, kind, byteorder, flags - cdef object fields, typeobj - - ctypedef extern class numpy.ndarray [object PyArrayObject]: - cdef char *data - cdef int nd - cdef npy_intp *dimensions - cdef npy_intp *strides - cdef object base - cdef dtype descr - cdef int flags - - ctypedef extern class numpy.flatiter [object PyArrayIterObject]: - cdef int nd_m1 - cdef npy_intp index, size - cdef ndarray ao - cdef char *dataptr - - ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]: - cdef int numiter - cdef npy_intp size, index - cdef int nd - cdef npy_intp *dimensions - cdef void **iters - - object PyArray_ZEROS(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - object PyArray_EMPTY(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran) - dtype PyArray_DescrFromTypeNum(NPY_TYPES type_num) - object PyArray_SimpleNew(int ndims, npy_intp* dims, NPY_TYPES type_num) - int PyArray_Check(object obj) - object PyArray_ContiguousFromAny(object obj, NPY_TYPES type, - int mindim, int maxdim) - object PyArray_ContiguousFromObject(object obj, NPY_TYPES type, - int mindim, int maxdim) - npy_intp PyArray_SIZE(ndarray arr) - npy_intp PyArray_NBYTES(ndarray arr) - void *PyArray_DATA(ndarray arr) - object PyArray_FromAny(object obj, dtype newtype, int mindim, int maxdim, - int requirements, object context) - object PyArray_FROMANY(object obj, NPY_TYPES type_num, int min, - int max, int requirements) - object PyArray_NewFromDescr(object subtype, dtype newtype, int nd, - npy_intp* dims, npy_intp* strides, void* data, - int flags, object parent) - - object PyArray_FROM_OTF(object obj, NPY_TYPES type, int flags) - object PyArray_EnsureArray(object) - - object PyArray_MultiIterNew(int n, ...) - - char *PyArray_MultiIter_DATA(broadcast multi, int i) - void PyArray_MultiIter_NEXTi(broadcast multi, int i) - void PyArray_MultiIter_NEXT(broadcast multi) - - object PyArray_IterNew(object arr) - void PyArray_ITER_NEXT(flatiter it) - - void import_array() diff --git a/numpy-1.6.2/numpy/random/mtrand/randomkit.c b/numpy-1.6.2/numpy/random/mtrand/randomkit.c deleted file mode 100644 index b18897e2c0..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/randomkit.c +++ /dev/null @@ -1,402 +0,0 @@ -/* Random kit 1.3 */ - -/* - * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) - * - * The rk_random and rk_seed functions algorithms and the original design of - * the Mersenne Twister RNG: - * - * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * 3. The names of its contributors may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Original algorithm for the implementation of rk_interval function from - * Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by - * Magnus Jonsson. - * - * Constants used in the rk_double implementation by Isaku Wada. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* static char const rcsid[] = - "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ -#include -#include -#include -#include -#include -#include - -#ifdef _WIN32 -/* - * Windows - * XXX: we have to use this ugly defined(__GNUC__) because it is not easy to - * detect the compiler used in distutils itself - */ -#if (defined(__GNUC__) && defined(NPY_NEEDS_MINGW_TIME_WORKAROUND)) - -/* - * FIXME: ideally, we should set this to the real version of MSVCRT. We need - * something higher than 0x601 to enable _ftime64 and co - */ -#define __MSVCRT_VERSION__ 0x0700 -#include -#include - -/* - * mingw msvcr lib import wrongly export _ftime, which does not exist in the - * actual msvc runtime for version >= 8; we make it an alias to _ftime64, which - * is available in those versions of the runtime - */ -#define _FTIME(x) _ftime64((x)) -#else -#include -#include -#define _FTIME(x) _ftime((x)) -#endif - -#ifndef RK_NO_WINCRYPT -/* Windows crypto */ -#ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x0400 -#endif -#include -#include -#endif - -#else -/* Unix */ -#include -#include -#include -#endif - -#include "randomkit.h" - -#ifndef RK_DEV_URANDOM -#define RK_DEV_URANDOM "/dev/urandom" -#endif - -#ifndef RK_DEV_RANDOM -#define RK_DEV_RANDOM "/dev/random" -#endif - -char *rk_strerror[RK_ERR_MAX] = -{ - "no error", - "random device unvavailable" -}; - -/* static functions */ -static unsigned long rk_hash(unsigned long key); - -void -rk_seed(unsigned long seed, rk_state *state) -{ - int pos; - seed &= 0xffffffffUL; - - /* Knuth's PRNG as used in the Mersenne Twister reference implementation */ - for (pos = 0; pos < RK_STATE_LEN; pos++) { - state->key[pos] = seed; - seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL; - } - state->pos = RK_STATE_LEN; - state->gauss = 0; - state->has_gauss = 0; - state->has_binomial = 0; -} - -/* Thomas Wang 32 bits integer hash function */ -unsigned long -rk_hash(unsigned long key) -{ - key += ~(key << 15); - key ^= (key >> 10); - key += (key << 3); - key ^= (key >> 6); - key += ~(key << 11); - key ^= (key >> 16); - return key; -} - -rk_error -rk_randomseed(rk_state *state) -{ -#ifndef _WIN32 - struct timeval tv; -#else - struct _timeb tv; -#endif - int i; - - if (rk_devfill(state->key, sizeof(state->key), 0) == RK_NOERR) { - /* ensures non-zero key */ - state->key[0] |= 0x80000000UL; - state->pos = RK_STATE_LEN; - state->gauss = 0; - state->has_gauss = 0; - state->has_binomial = 0; - - for (i = 0; i < 624; i++) { - state->key[i] &= 0xffffffffUL; - } - return RK_NOERR; - } - -#ifndef _WIN32 - gettimeofday(&tv, NULL); - rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec) - ^ rk_hash(clock()), state); -#else - _FTIME(&tv); - rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state); -#endif - - return RK_ENODEV; -} - -/* Magic Mersenne Twister constants */ -#define N 624 -#define M 397 -#define MATRIX_A 0x9908b0dfUL -#define UPPER_MASK 0x80000000UL -#define LOWER_MASK 0x7fffffffUL - -/* Slightly optimised reference implementation of the Mersenne Twister */ -unsigned long -rk_random(rk_state *state) -{ - unsigned long y; - - if (state->pos == RK_STATE_LEN) { - int i; - - for (i = 0; i < N - M; i++) { - y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK); - state->key[i] = state->key[i+M] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); - } - for (; i < N - 1; i++) { - y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK); - state->key[i] = state->key[i+(M-N)] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); - } - y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); - state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); - - state->pos = 0; - } - y = state->key[state->pos++]; - - /* Tempering */ - y ^= (y >> 11); - y ^= (y << 7) & 0x9d2c5680UL; - y ^= (y << 15) & 0xefc60000UL; - y ^= (y >> 18); - - return y; -} - -long -rk_long(rk_state *state) -{ - return rk_ulong(state) >> 1; -} - -unsigned long -rk_ulong(rk_state *state) -{ -#if ULONG_MAX <= 0xffffffffUL - return rk_random(state); -#else - return (rk_random(state) << 32) | (rk_random(state)); -#endif -} - -unsigned long -rk_interval(unsigned long max, rk_state *state) -{ - unsigned long mask = max, value; - - if (max == 0) { - return 0; - } - /* Smallest bit mask >= max */ - mask |= mask >> 1; - mask |= mask >> 2; - mask |= mask >> 4; - mask |= mask >> 8; - mask |= mask >> 16; -#if ULONG_MAX > 0xffffffffUL - mask |= mask >> 32; -#endif - - /* Search a random value in [0..mask] <= max */ -#if ULONG_MAX > 0xffffffffUL - if (max <= 0xffffffffUL) { - while ((value = (rk_random(state) & mask)) > max); - } - else { - while ((value = (rk_ulong(state) & mask)) > max); - } -#else - while ((value = (rk_ulong(state) & mask)) > max); -#endif - return value; -} - -double -rk_double(rk_state *state) -{ - /* shifts : 67108864 = 0x4000000, 9007199254740992 = 0x20000000000000 */ - long a = rk_random(state) >> 5, b = rk_random(state) >> 6; - return (a * 67108864.0 + b) / 9007199254740992.0; -} - -void -rk_fill(void *buffer, size_t size, rk_state *state) -{ - unsigned long r; - unsigned char *buf = buffer; - - for (; size >= 4; size -= 4) { - r = rk_random(state); - *(buf++) = r & 0xFF; - *(buf++) = (r >> 8) & 0xFF; - *(buf++) = (r >> 16) & 0xFF; - *(buf++) = (r >> 24) & 0xFF; - } - - if (!size) { - return; - } - r = rk_random(state); - for (; size; r >>= 8, size --) { - *(buf++) = (unsigned char)(r & 0xFF); - } -} - -rk_error -rk_devfill(void *buffer, size_t size, int strong) -{ -#ifndef _WIN32 - FILE *rfile; - int done; - - if (strong) { - rfile = fopen(RK_DEV_RANDOM, "rb"); - } - else { - rfile = fopen(RK_DEV_URANDOM, "rb"); - } - if (rfile == NULL) { - return RK_ENODEV; - } - done = fread(buffer, size, 1, rfile); - fclose(rfile); - if (done) { - return RK_NOERR; - } -#else - -#ifndef RK_NO_WINCRYPT - HCRYPTPROV hCryptProv; - BOOL done; - - if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, - CRYPT_VERIFYCONTEXT) || !hCryptProv) { - return RK_ENODEV; - } - done = CryptGenRandom(hCryptProv, size, (unsigned char *)buffer); - CryptReleaseContext(hCryptProv, 0); - if (done) { - return RK_NOERR; - } -#endif - -#endif - return RK_ENODEV; -} - -rk_error -rk_altfill(void *buffer, size_t size, int strong, rk_state *state) -{ - rk_error err; - - err = rk_devfill(buffer, size, strong); - if (err) { - rk_fill(buffer, size, state); - } - return err; -} - -double -rk_gauss(rk_state *state) -{ - if (state->has_gauss) { - const double tmp = state->gauss; - state->gauss = 0; - state->has_gauss = 0; - return tmp; - } - else { - double f, x1, x2, r2; - - do { - x1 = 2.0*rk_double(state) - 1.0; - x2 = 2.0*rk_double(state) - 1.0; - r2 = x1*x1 + x2*x2; - } - while (r2 >= 1.0 || r2 == 0.0); - - /* Box-Muller transform */ - f = sqrt(-2.0*log(r2)/r2); - /* Keep for next call */ - state->gauss = f*x1; - state->has_gauss = 1; - return f*x2; - } -} diff --git a/numpy-1.6.2/numpy/random/mtrand/randomkit.h b/numpy-1.6.2/numpy/random/mtrand/randomkit.h deleted file mode 100644 index e049488eeb..0000000000 --- a/numpy-1.6.2/numpy/random/mtrand/randomkit.h +++ /dev/null @@ -1,189 +0,0 @@ -/* Random kit 1.3 */ - -/* - * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* @(#) $Jeannot: randomkit.h,v 1.24 2005/07/21 22:14:09 js Exp $ */ - -/* - * Typical use: - * - * { - * rk_state state; - * unsigned long seed = 1, random_value; - * - * rk_seed(seed, &state); // Initialize the RNG - * ... - * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] - * } - * - * Instead of rk_seed, you can use rk_randomseed which will get a random seed - * from /dev/urandom (or the clock, if /dev/urandom is unavailable): - * - * { - * rk_state state; - * unsigned long random_value; - * - * rk_randomseed(&state); // Initialize the RNG with a random seed - * ... - * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] - * } - */ - -/* - * Useful macro: - * RK_DEV_RANDOM: the device used for random seeding. - * defaults to "/dev/urandom" - */ - -#include - -#ifndef _RANDOMKIT_ -#define _RANDOMKIT_ - -#define RK_STATE_LEN 624 - -typedef struct rk_state_ -{ - unsigned long key[RK_STATE_LEN]; - int pos; - int has_gauss; /* !=0: gauss contains a gaussian deviate */ - double gauss; - - /* The rk_state structure has been extended to store the following - * information for the binomial generator. If the input values of n or p - * are different than nsave and psave, then the other parameters will be - * recomputed. RTK 2005-09-02 */ - - int has_binomial; /* !=0: following parameters initialized for - binomial */ - double psave; - long nsave; - double r; - double q; - double fm; - long m; - double p1; - double xm; - double xl; - double xr; - double c; - double laml; - double lamr; - double p2; - double p3; - double p4; - -} -rk_state; - -typedef enum { - RK_NOERR = 0, /* no error */ - RK_ENODEV = 1, /* no RK_DEV_RANDOM device */ - RK_ERR_MAX = 2 -} rk_error; - -/* error strings */ -extern char *rk_strerror[RK_ERR_MAX]; - -/* Maximum generated random value */ -#define RK_MAX 0xFFFFFFFFUL - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Initialize the RNG state using the given seed. - */ -extern void rk_seed(unsigned long seed, rk_state *state); - -/* - * Initialize the RNG state using a random seed. - * Uses /dev/random or, when unavailable, the clock (see randomkit.c). - * Returns RK_NOERR when no errors occurs. - * Returns RK_ENODEV when the use of RK_DEV_RANDOM failed (for example because - * there is no such device). In this case, the RNG was initialized using the - * clock. - */ -extern rk_error rk_randomseed(rk_state *state); - -/* - * Returns a random unsigned long between 0 and RK_MAX inclusive - */ -extern unsigned long rk_random(rk_state *state); - -/* - * Returns a random long between 0 and LONG_MAX inclusive - */ -extern long rk_long(rk_state *state); - -/* - * Returns a random unsigned long between 0 and ULONG_MAX inclusive - */ -extern unsigned long rk_ulong(rk_state *state); - -/* - * Returns a random unsigned long between 0 and max inclusive. - */ -extern unsigned long rk_interval(unsigned long max, rk_state *state); - -/* - * Returns a random double between 0.0 and 1.0, 1.0 excluded. - */ -extern double rk_double(rk_state *state); - -/* - * fill the buffer with size random bytes - */ -extern void rk_fill(void *buffer, size_t size, rk_state *state); - -/* - * fill the buffer with randombytes from the random device - * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is - * On Unix, if strong is defined, RK_DEV_RANDOM is used. If not, RK_DEV_URANDOM - * is used instead. This parameter has no effect on Windows. - * Warning: on most unixes RK_DEV_RANDOM will wait for enough entropy to answer - * which can take a very long time on quiet systems. - */ -extern rk_error rk_devfill(void *buffer, size_t size, int strong); - -/* - * fill the buffer using rk_devfill if the random device is available and using - * rk_fill if is is not - * parameters have the same meaning as rk_fill and rk_devfill - * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is - */ -extern rk_error rk_altfill(void *buffer, size_t size, int strong, - rk_state *state); - -/* - * return a random gaussian deviate with variance unity and zero mean. - */ -extern double rk_gauss(rk_state *state); - -#ifdef __cplusplus -} -#endif - -#endif /* _RANDOMKIT_ */ diff --git a/numpy-1.6.2/numpy/random/setup.py b/numpy-1.6.2/numpy/random/setup.py deleted file mode 100644 index dde3119b7f..0000000000 --- a/numpy-1.6.2/numpy/random/setup.py +++ /dev/null @@ -1,69 +0,0 @@ -from os.path import join, split, dirname -import os -import sys -from distutils.dep_util import newer -from distutils.msvccompiler import get_build_version as get_msvc_build_version - -def needs_mingw_ftime_workaround(): - # We need the mingw workaround for _ftime if the msvc runtime version is - # 7.1 or above and we build with mingw ... - # ... but we can't easily detect compiler version outside distutils command - # context, so we will need to detect in randomkit whether we build with gcc - msver = get_msvc_build_version() - if msver and msver >= 8: - return True - - return False - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration, get_mathlibs - config = Configuration('random',parent_package,top_path) - - def generate_libraries(ext, build_dir): - config_cmd = config.get_config_cmd() - libs = get_mathlibs() - tc = testcode_wincrypt() - if config_cmd.try_run(tc): - libs.append('Advapi32') - ext.libraries.extend(libs) - return None - - defs = [] - if needs_mingw_ftime_workaround(): - defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None)) - - libs = [] - # Configure mtrand - config.add_extension('mtrand', - sources=[join('mtrand', x) for x in - ['mtrand.c', 'randomkit.c', 'initarray.c', - 'distributions.c']]+[generate_libraries], - libraries=libs, - depends = [join('mtrand','*.h'), - join('mtrand','*.pyx'), - join('mtrand','*.pxi'), - ], - define_macros = defs, - ) - - config.add_data_files(('.', join('mtrand', 'randomkit.h'))) - config.add_data_dir('tests') - - return config - -def testcode_wincrypt(): - return """\ -/* check to see if _WIN32 is defined */ -int main(int argc, char *argv[]) -{ -#ifdef _WIN32 - return 0; -#else - return 1; -#endif -} -""" - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/random/setupscons.py b/numpy-1.6.2/numpy/random/setupscons.py deleted file mode 100644 index f5342c39ec..0000000000 --- a/numpy-1.6.2/numpy/random/setupscons.py +++ /dev/null @@ -1,40 +0,0 @@ -import glob -from os.path import join, split - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration, get_mathlibs - config = Configuration('random',parent_package,top_path) - - source_files = [join('mtrand', i) for i in ['mtrand.c', - 'mtrand.pyx', - 'numpy.pxi', - 'randomkit.c', - 'randomkit.h', - 'Python.pxi', - 'initarray.c', - 'initarray.h', - 'distributions.c', - 'distributions.h', - ]] - config.add_sconscript('SConstruct', source_files = source_files) - config.add_data_files(('.', join('mtrand', 'randomkit.h'))) - config.add_data_dir('tests') - - return config - -def testcode_wincrypt(): - return """\ -/* check to see if _WIN32 is defined */ -int main(int argc, char *argv[]) -{ -#ifdef _WIN32 - return 0; -#else - return 1; -#endif -} -""" - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.6.2/numpy/random/tests/test_random.py b/numpy-1.6.2/numpy/random/tests/test_random.py deleted file mode 100644 index e276fcd687..0000000000 --- a/numpy-1.6.2/numpy/random/tests/test_random.py +++ /dev/null @@ -1,438 +0,0 @@ -from numpy.testing import TestCase, run_module_suite, assert_,\ - assert_raises -from numpy import random -from numpy.compat import asbytes -import numpy as np - - -class TestMultinomial(TestCase): - def test_basic(self): - random.multinomial(100, [0.2, 0.8]) - - def test_zero_probability(self): - random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) - - def test_int_negative_interval(self): - assert_( -5 <= random.randint(-5,-1) < -1) - x = random.randint(-5,-1,5) - assert_(np.all(-5 <= x)) - assert_(np.all(x < -1)) - - -class TestSetState(TestCase): - def setUp(self): - self.seed = 1234567890 - self.prng = random.RandomState(self.seed) - self.state = self.prng.get_state() - - def test_basic(self): - old = self.prng.tomaxint(16) - self.prng.set_state(self.state) - new = self.prng.tomaxint(16) - assert_(np.all(old == new)) - - def test_gaussian_reset(self): - """ Make sure the cached every-other-Gaussian is reset. - """ - old = self.prng.standard_normal(size=3) - self.prng.set_state(self.state) - new = self.prng.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_gaussian_reset_in_media_res(self): - """ When the state is saved with a cached Gaussian, make sure the cached - Gaussian is restored. - """ - self.prng.standard_normal() - state = self.prng.get_state() - old = self.prng.standard_normal(size=3) - self.prng.set_state(state) - new = self.prng.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_backwards_compatibility(self): - """ Make sure we can accept old state tuples that do not have the cached - Gaussian value. - """ - old_state = self.state[:-2] - x1 = self.prng.standard_normal(size=16) - self.prng.set_state(old_state) - x2 = self.prng.standard_normal(size=16) - self.prng.set_state(self.state) - x3 = self.prng.standard_normal(size=16) - assert_(np.all(x1 == x2)) - assert_(np.all(x1 == x3)) - - def test_negative_binomial(self): - """ Ensure that the negative binomial results take floating point - arguments without truncation. - """ - self.prng.negative_binomial(0.5, 0.5) - -class TestRandomDist(TestCase): - """ Make sure the random distrobution return the correct value for a - given seed - """ - def setUp(self): - self.seed = 1234567890 - - def test_rand(self): - np.random.seed(self.seed) - actual = np.random.rand(3, 2) - desired = np.array([[ 0.61879477158567997, 0.59162362775974664], - [ 0.88868358904449662, 0.89165480011560816], - [ 0.4575674820298663 , 0.7781880808593471 ]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_randn(self): - np.random.seed(self.seed) - actual = np.random.randn(3, 2) - desired = np.array([[ 1.34016345771863121, 1.73759122771936081], - [ 1.498988344300628 , -0.2286433324536169 ], - [ 2.031033998682787 , 2.17032494605655257]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_randint(self): - np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3,2)) - desired = np.array([[ 31, 3], - [-52, 41], - [-48, -66]]) - np.testing.assert_array_equal(actual, desired) - - def test_random_integers(self): - np.random.seed(self.seed) - actual = np.random.random_integers(-99, 99, size=(3,2)) - desired = np.array([[ 31, 3], - [-52, 41], - [-48, -66]]) - np.testing.assert_array_equal(actual, desired) - - def test_random_sample(self): - np.random.seed(self.seed) - actual = np.random.random_sample((3, 2)) - desired = np.array([[ 0.61879477158567997, 0.59162362775974664], - [ 0.88868358904449662, 0.89165480011560816], - [ 0.4575674820298663 , 0.7781880808593471 ]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_bytes(self): - np.random.seed(self.seed) - actual = np.random.bytes(10) - desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5') - np.testing.assert_equal(actual, desired) - - def test_shuffle(self): - np.random.seed(self.seed) - alist = [1,2,3,4,5,6,7,8,9,0] - np.random.shuffle(alist) - actual = alist - desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] - np.testing.assert_array_equal(actual, desired) - - def test_beta(self): - np.random.seed(self.seed) - actual = np.random.beta(.1, .9, size=(3, 2)) - desired = np.array([[ 1.45341850513746058e-02, 5.31297615662868145e-04], - [ 1.85366619058432324e-06, 4.19214516800110563e-03], - [ 1.58405155108498093e-04, 1.26252891949397652e-04]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_binomial(self): - np.random.seed(self.seed) - actual = np.random.binomial(100.123, .456, size=(3, 2)) - desired = np.array([[37, 43], - [42, 48], - [46, 45]]) - np.testing.assert_array_equal(actual, desired) - - def test_chisquare(self): - np.random.seed(self.seed) - actual = np.random.chisquare(50, size=(3, 2)) - desired = np.array([[ 63.87858175501090585, 68.68407748911370447], - [ 65.77116116901505904, 47.09686762438974483], - [ 72.3828403199695174 , 74.18408615260374006]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=13) - - def test_dirichlet(self): - np.random.seed(self.seed) - alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) - desired = np.array([[[ 0.54539444573611562, 0.45460555426388438], - [ 0.62345816822039413, 0.37654183177960598]], - [[ 0.55206000085785778, 0.44793999914214233], - [ 0.58964023305154301, 0.41035976694845688]], - [[ 0.59266909280647828, 0.40733090719352177], - [ 0.56974431743975207, 0.43025568256024799]]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_exponential(self): - np.random.seed(self.seed) - actual = np.random.exponential(1.1234, size=(3, 2)) - desired = np.array([[ 1.08342649775011624, 1.00607889924557314], - [ 2.46628830085216721, 2.49668106809923884], - [ 0.68717433461363442, 1.69175666993575979]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_f(self): - np.random.seed(self.seed) - actual = np.random.f(12, 77, size=(3, 2)) - desired = np.array([[ 1.21975394418575878, 1.75135759791559775], - [ 1.44803115017146489, 1.22108959480396262], - [ 1.02176975757740629, 1.34431827623300415]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_gamma(self): - np.random.seed(self.seed) - actual = np.random.gamma(5, 3, size=(3, 2)) - desired = np.array([[ 24.60509188649287182, 28.54993563207210627], - [ 26.13476110204064184, 12.56988482927716078], - [ 31.71863275789960568, 33.30143302795922011]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_geometric(self): - np.random.seed(self.seed) - actual = np.random.geometric(.123456789, size=(3, 2)) - desired = np.array([[ 8, 7], - [17, 17], - [ 5, 12]]) - np.testing.assert_array_equal(actual, desired) - - def test_gumbel(self): - np.random.seed(self.seed) - actual = np.random.gumbel(loc = .123456789, scale = 2.0, size = (3, 2)) - desired = np.array([[ 0.19591898743416816, 0.34405539668096674], - [-1.4492522252274278 , -1.47374816298446865], - [ 1.10651090478803416, -0.69535848626236174]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_hypergeometric(self): - np.random.seed(self.seed) - actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) - desired = np.array([[10, 10], - [10, 10], - [ 9, 9]]) - np.testing.assert_array_equal(actual, desired) - - def test_laplace(self): - np.random.seed(self.seed) - actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[ 0.66599721112760157, 0.52829452552221945], - [ 3.12791959514407125, 3.18202813572992005], - [-0.05391065675859356, 1.74901336242837324]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_logistic(self): - np.random.seed(self.seed) - actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[ 1.09232835305011444, 0.8648196662399954 ], - [ 4.27818590694950185, 4.33897006346929714], - [-0.21682183359214885, 2.63373365386060332]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_lognormal(self): - np.random.seed(self.seed) - actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) - desired = np.array([[ 16.50698631688883822, 36.54846706092654784], - [ 22.67886599981281748, 0.71617561058995771], - [ 65.72798501792723869, 86.84341601437161273]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=13) - - def test_logseries(self): - np.random.seed(self.seed) - actual = np.random.logseries(p=.923456789, size=(3, 2)) - desired = np.array([[ 2, 2], - [ 6, 17], - [ 3, 6]]) - np.testing.assert_array_equal(actual, desired) - - def test_multinomial(self): - np.random.seed(self.seed) - actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) - desired = np.array([[[4, 3, 5, 4, 2, 2], - [5, 2, 8, 2, 2, 1]], - [[3, 4, 3, 6, 0, 4], - [2, 1, 4, 3, 6, 4]], - [[4, 4, 2, 5, 2, 3], - [4, 3, 4, 2, 3, 4]]]) - np.testing.assert_array_equal(actual, desired) - - def test_multivariate_normal(self): - np.random.seed(self.seed) - mean= (.123456789, 10) - cov = [[1,0],[1,0]] - size = (3, 2) - actual = np.random.multivariate_normal(mean, cov, size) - desired = np.array([[[ -1.47027513018564449, 10. ], - [ -1.65915081534845532, 10. ]], - [[ -2.29186329304599745, 10. ], - [ -1.77505606019580053, 10. ]], - [[ -0.54970369430044119, 10. ], - [ 0.29768848031692957, 10. ]]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_negative_binomial(self): - np.random.seed(self.seed) - actual = np.random.negative_binomial(n = 100, p = .12345, size = (3, 2)) - desired = np.array([[848, 841], - [892, 611], - [779, 647]]) - np.testing.assert_array_equal(actual, desired) - - def test_noncentral_chisquare(self): - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df = 5, nonc = 5, size = (3, 2)) - desired = np.array([[ 23.91905354498517511, 13.35324692733826346], - [ 31.22452661329736401, 16.60047399466177254], - [ 5.03461598262724586, 17.94973089023519464]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_noncentral_f(self): - np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum = 5, dfden = 2, nonc = 1, - size = (3, 2)) - desired = np.array([[ 1.40598099674926669, 0.34207973179285761], - [ 3.57715069265772545, 7.92632662577829805], - [ 0.43741599463544162, 1.1774208752428319 ]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_normal(self): - np.random.seed(self.seed) - actual = np.random.normal(loc = .123456789, scale = 2.0, size = (3, 2)) - desired = np.array([[ 2.80378370443726244, 3.59863924443872163], - [ 3.121433477601256 , -0.33382987590723379], - [ 4.18552478636557357, 4.46410668111310471]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_pareto(self): - np.random.seed(self.seed) - actual = np.random.pareto(a =.123456789, size = (3, 2)) - desired = np.array([[ 2.46852460439034849e+03, 1.41286880810518346e+03], - [ 5.28287797029485181e+07, 6.57720981047328785e+07], - [ 1.40840323350391515e+02, 1.98390255135251704e+05]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_poisson(self): - np.random.seed(self.seed) - actual = np.random.poisson(lam = .123456789, size=(3, 2)) - desired = np.array([[0, 0], - [1, 0], - [0, 0]]) - np.testing.assert_array_equal(actual, desired) - - def test_poisson_exceptions(self): - lambig = np.iinfo('l').max - lamneg = -1 - assert_raises(ValueError, np.random.poisson, lamneg) - assert_raises(ValueError, np.random.poisson, [lamneg]*10) - assert_raises(ValueError, np.random.poisson, lambig) - assert_raises(ValueError, np.random.poisson, [lambig]*10) - - def test_power(self): - np.random.seed(self.seed) - actual = np.random.power(a =.123456789, size = (3, 2)) - desired = np.array([[ 0.02048932883240791, 0.01424192241128213], - [ 0.38446073748535298, 0.39499689943484395], - [ 0.00177699707563439, 0.13115505880863756]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_rayleigh(self): - np.random.seed(self.seed) - actual = np.random.rayleigh(scale = 10, size = (3, 2)) - desired = np.array([[ 13.8882496494248393 , 13.383318339044731 ], - [ 20.95413364294492098, 21.08285015800712614], - [ 11.06066537006854311, 17.35468505778271009]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_standard_cauchy(self): - np.random.seed(self.seed) - actual = np.random.standard_cauchy(size = (3, 2)) - desired = np.array([[ 0.77127660196445336, -6.55601161955910605], - [ 0.93582023391158309, -2.07479293013759447], - [-4.74601644297011926, 0.18338989290760804]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_exponential(self): - np.random.seed(self.seed) - actual = np.random.standard_exponential(size = (3, 2)) - desired = np.array([[ 0.96441739162374596, 0.89556604882105506], - [ 2.1953785836319808 , 2.22243285392490542], - [ 0.6116915921431676 , 1.50592546727413201]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_gamma(self): - np.random.seed(self.seed) - actual = np.random.standard_gamma(shape = 3, size = (3, 2)) - desired = np.array([[ 5.50841531318455058, 6.62953470301903103], - [ 5.93988484943779227, 2.31044849402133989], - [ 7.54838614231317084, 8.012756093271868 ]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_standard_normal(self): - np.random.seed(self.seed) - actual = np.random.standard_normal(size = (3, 2)) - desired = np.array([[ 1.34016345771863121, 1.73759122771936081], - [ 1.498988344300628 , -0.2286433324536169 ], - [ 2.031033998682787 , 2.17032494605655257]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_t(self): - np.random.seed(self.seed) - actual = np.random.standard_t(df = 10, size = (3, 2)) - desired = np.array([[ 0.97140611862659965, -0.08830486548450577], - [ 1.36311143689505321, -0.55317463909867071], - [-0.18473749069684214, 0.61181537341755321]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_triangular(self): - np.random.seed(self.seed) - actual = np.random.triangular(left = 5.12, mode = 10.23, right = 20.34, - size = (3, 2)) - desired = np.array([[ 12.68117178949215784, 12.4129206149193152 ], - [ 16.20131377335158263, 16.25692138747600524], - [ 11.20400690911820263, 14.4978144835829923 ]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_uniform(self): - np.random.seed(self.seed) - actual = np.random.uniform(low = 1.23, high=10.54, size = (3, 2)) - desired = np.array([[ 6.99097932346268003, 6.73801597444323974], - [ 9.50364421400426274, 9.53130618907631089], - [ 5.48995325769805476, 8.47493103280052118]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - - def test_vonmises(self): - np.random.seed(self.seed) - actual = np.random.vonmises(mu = 1.23, kappa = 1.54, size = (3, 2)) - desired = np.array([[ 2.28567572673902042, 2.89163838442285037], - [ 0.38198375564286025, 2.57638023113890746], - [ 1.19153771588353052, 1.83509849681825354]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_wald(self): - np.random.seed(self.seed) - actual = np.random.wald(mean = 1.23, scale = 1.54, size = (3, 2)) - desired = np.array([[ 3.82935265715889983, 5.13125249184285526], - [ 0.35045403618358717, 1.50832396872003538], - [ 0.24124319895843183, 0.22031101461955038]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_weibull(self): - np.random.seed(self.seed) - actual = np.random.weibull(a = 1.23, size = (3, 2)) - desired = np.array([[ 0.97097342648766727, 0.91422896443565516], - [ 1.89517770034962929, 1.91414357960479564], - [ 0.67057783752390987, 1.39494046635066793]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_zipf(self): - np.random.seed(self.seed) - actual = np.random.zipf(a = 1.23, size = (3, 2)) - desired = np.array([[66, 29], - [ 1, 1], - [ 3, 13]]) - np.testing.assert_array_equal(actual, desired) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/random/tests/test_regression.py b/numpy-1.6.2/numpy/random/tests/test_regression.py deleted file mode 100644 index f5039c09a9..0000000000 --- a/numpy-1.6.2/numpy/random/tests/test_regression.py +++ /dev/null @@ -1,57 +0,0 @@ -from numpy.testing import TestCase, run_module_suite, assert_,\ - assert_array_equal -from numpy import random -import numpy as np - - -class TestRegression(TestCase): - - def test_VonMises_range(self): - """Make sure generated random variables are in [-pi, pi]. - - Regression test for ticket #986. - """ - for mu in np.linspace(-7., 7., 5): - r = random.mtrand.vonmises(mu,1,50) - assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) - - def test_hypergeometric_range(self) : - """Test for ticket #921""" - assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) - - def test_logseries_convergence(self) : - """Test for ticket #923""" - N = 1000 - np.random.seed(0) - rvsn = np.random.logseries(0.8, size=N) - # these two frequency counts should be close to theoretical - # numbers with this large sample - # theoretical large N result is 0.49706795 - freq = np.sum(rvsn == 1) / float(N) - msg = "Frequency was %f, should be > 0.45" % freq - assert_(freq > 0.45, msg) - # theoretical large N result is 0.19882718 - freq = np.sum(rvsn == 2) / float(N) - msg = "Frequency was %f, should be < 0.23" % freq - assert_(freq < 0.23, msg) - - def test_permutation_longs(self): - np.random.seed(1234) - a = np.random.permutation(12) - np.random.seed(1234) - b = np.random.permutation(12L) - assert_array_equal(a, b) - - def test_hypergeometric_range(self) : - """Test for ticket #1690""" - lmax = np.iinfo('l').max - lmin = np.iinfo('l').min - try: - random.randint(lmin, lmax) - except: - raise AssertionError - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/setup.py b/numpy-1.6.2/numpy/setup.py deleted file mode 100644 index c55c85a256..0000000000 --- a/numpy-1.6.2/numpy/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('numpy',parent_package,top_path) - config.add_subpackage('distutils') - config.add_subpackage('testing') - config.add_subpackage('f2py') - config.add_subpackage('core') - config.add_subpackage('lib') - config.add_subpackage('oldnumeric') - config.add_subpackage('numarray') - config.add_subpackage('fft') - config.add_subpackage('linalg') - config.add_subpackage('random') - config.add_subpackage('ma') - config.add_subpackage('matrixlib') - config.add_subpackage('compat') - config.add_subpackage('polynomial') - config.add_subpackage('doc') - config.add_data_dir('doc') - config.add_data_dir('tests') - config.make_config_py() # installs __config__.py - return config - -if __name__ == '__main__': - print('This is the wrong setup.py file to run') diff --git a/numpy-1.6.2/numpy/setupscons.py b/numpy-1.6.2/numpy/setupscons.py deleted file mode 100644 index 59fa57a4de..0000000000 --- a/numpy-1.6.2/numpy/setupscons.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python -from os.path import join as pjoin - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.misc_util import scons_generate_config_py - - pkgname = 'numpy' - config = Configuration(pkgname, parent_package, top_path, - setup_name = 'setupscons.py') - config.add_subpackage('distutils') - config.add_subpackage('testing') - config.add_subpackage('f2py') - config.add_subpackage('core') - config.add_subpackage('lib') - config.add_subpackage('oldnumeric') - config.add_subpackage('numarray') - config.add_subpackage('fft') - config.add_subpackage('linalg') - config.add_subpackage('random') - config.add_subpackage('ma') - config.add_subpackage('matrixlib') - config.add_subpackage('compat') - config.add_subpackage('polynomial') - config.add_subpackage('doc') - config.add_data_dir('doc') - config.add_data_dir('tests') - - def add_config(*args, **kw): - # Generate __config__, handle inplace issues. - if kw['scons_cmd'].inplace: - target = pjoin(kw['pkg_name'], '__config__.py') - else: - target = pjoin(kw['scons_cmd'].build_lib, kw['pkg_name'], - '__config__.py') - scons_generate_config_py(target) - config.add_sconscript(None, post_hook = add_config) - - return config - -if __name__ == '__main__': - print 'This is the wrong setup.py file to run' diff --git a/numpy-1.6.2/numpy/testing/__init__.py b/numpy-1.6.2/numpy/testing/__init__.py deleted file mode 100644 index f391c80537..0000000000 --- a/numpy-1.6.2/numpy/testing/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Common test support for all numpy test scripts. - -This single module should provide all the common functionality for numpy tests -in a single location, so that test scripts can just import it and work right -away. -""" - -from unittest import TestCase - -import decorators as dec -from utils import * -from numpytest import * -from nosetester import NoseTester as Tester -from nosetester import run_module_suite -test = Tester().test diff --git a/numpy-1.6.2/numpy/testing/decorators.py b/numpy-1.6.2/numpy/testing/decorators.py deleted file mode 100644 index 68335cc185..0000000000 --- a/numpy-1.6.2/numpy/testing/decorators.py +++ /dev/null @@ -1,275 +0,0 @@ -""" -Decorators for labeling and modifying behavior of test objects. - -Decorators that merely return a modified version of the original -function object are straightforward. Decorators that return a new -function object need to use -:: - - nose.tools.make_decorator(original_function)(decorator) - -in returning the decorator, in order to preserve meta-data such as -function name, setup and teardown functions and so on - see -``nose.tools`` for more information. - -""" -import warnings -import sys - -from numpy.testing.utils import \ - WarningManager, WarningMessage - -def slow(t): - """ - Label a test as 'slow'. - - The exact definition of a slow test is obviously both subjective and - hardware-dependent, but in general any individual test that requires more - than a second or two should be labeled as slow (the whole suite consits of - thousands of tests, so even a second is significant). - - Parameters - ---------- - t : callable - The test to label as slow. - - Returns - ------- - t : callable - The decorated test `t`. - - Examples - -------- - The `numpy.testing` module includes ``import decorators as dec``. - A test can be decorated as slow like this:: - - from numpy.testing import * - - @dec.slow - def test_big(self): - print 'Big, slow test' - - """ - - t.slow = True - return t - -def setastest(tf=True): - """ - Signals to nose that this function is or is not a test. - - Parameters - ---------- - tf : bool - If True, specifies that the decorated callable is a test. - If False, specifies that the decorated callable is not a test. - Default is True. - - Notes - ----- - This decorator can't use the nose namespace, because it can be - called from a non-test module. See also ``istest`` and ``nottest`` in - ``nose.tools``. - - Examples - -------- - `setastest` can be used in the following way:: - - from numpy.testing.decorators import setastest - - @setastest(False) - def func_with_test_in_name(arg1, arg2): - pass - - """ - def set_test(t): - t.__test__ = tf - return t - return set_test - -def skipif(skip_condition, msg=None): - """ - Make function raise SkipTest exception if a given condition is true. - - If the condition is a callable, it is used at runtime to dynamically - make the decision. This is useful for tests that may require costly - imports, to delay the cost until the test suite is actually executed. - - Parameters - ---------- - skip_condition : bool or callable - Flag to determine whether to skip the decorated test. - msg : str, optional - Message to give on raising a SkipTest exception. Default is None. - - Returns - ------- - decorator : function - Decorator which, when applied to a function, causes SkipTest - to be raised when `skip_condition` is True, and the function - to be called normally otherwise. - - Notes - ----- - The decorator itself is decorated with the ``nose.tools.make_decorator`` - function in order to transmit function name, and various other metadata. - - """ - - def skip_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - - # Allow for both boolean or callable skip conditions. - if callable(skip_condition): - skip_val = lambda : skip_condition() - else: - skip_val = lambda : skip_condition - - def get_msg(func,msg=None): - """Skip message with information about function being skipped.""" - if msg is None: - out = 'Test skipped due to test condition' - else: - out = '\n'+msg - - return "Skipping test: %s%s" % (func.__name__,out) - - # We need to define *two* skippers because Python doesn't allow both - # return with value and yield inside the same function. - def skipper_func(*args, **kwargs): - """Skipper for normal test functions.""" - if skip_val(): - raise nose.SkipTest(get_msg(f,msg)) - else: - return f(*args, **kwargs) - - def skipper_gen(*args, **kwargs): - """Skipper for test generators.""" - if skip_val(): - raise nose.SkipTest(get_msg(f,msg)) - else: - for x in f(*args, **kwargs): - yield x - - # Choose the right skipper to use when building the actual decorator. - if nose.util.isgenerator(f): - skipper = skipper_gen - else: - skipper = skipper_func - - return nose.tools.make_decorator(f)(skipper) - - return skip_decorator - - -def knownfailureif(fail_condition, msg=None): - """ - Make function raise KnownFailureTest exception if given condition is true. - - If the condition is a callable, it is used at runtime to dynamically - make the decision. This is useful for tests that may require costly - imports, to delay the cost until the test suite is actually executed. - - Parameters - ---------- - fail_condition : bool or callable - Flag to determine whether to mark the decorated test as a known - failure (if True) or not (if False). - msg : str, optional - Message to give on raising a KnownFailureTest exception. - Default is None. - - Returns - ------- - decorator : function - Decorator, which, when applied to a function, causes SkipTest - to be raised when `skip_condition` is True, and the function - to be called normally otherwise. - - Notes - ----- - The decorator itself is decorated with the ``nose.tools.make_decorator`` - function in order to transmit function name, and various other metadata. - - """ - if msg is None: - msg = 'Test skipped due to known failure' - - # Allow for both boolean or callable known failure conditions. - if callable(fail_condition): - fail_val = lambda : fail_condition() - else: - fail_val = lambda : fail_condition - - def knownfail_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - from noseclasses import KnownFailureTest - def knownfailer(*args, **kwargs): - if fail_val(): - raise KnownFailureTest, msg - else: - return f(*args, **kwargs) - return nose.tools.make_decorator(f)(knownfailer) - - return knownfail_decorator - -def deprecated(conditional=True): - """ - Filter deprecation warnings while running the test suite. - - This decorator can be used to filter DeprecationWarning's, to avoid - printing them during the test suite run, while checking that the test - actually raises a DeprecationWarning. - - Parameters - ---------- - conditional : bool or callable, optional - Flag to determine whether to mark test as deprecated or not. If the - condition is a callable, it is used at runtime to dynamically make the - decision. Default is True. - - Returns - ------- - decorator : function - The `deprecated` decorator itself. - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - def deprecate_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - from noseclasses import KnownFailureTest - - def _deprecated_imp(*args, **kwargs): - # Poor man's replacement for the with statement - ctx = WarningManager(record=True) - l = ctx.__enter__() - warnings.simplefilter('always') - try: - f(*args, **kwargs) - if not len(l) > 0: - raise AssertionError("No warning raised when calling %s" - % f.__name__) - if not l[0].category is DeprecationWarning: - raise AssertionError("First warning for %s is not a " \ - "DeprecationWarning( is %s)" % (f.__name__, l[0])) - finally: - ctx.__exit__() - - if callable(conditional): - cond = conditional() - else: - cond = conditional - if cond: - return nose.tools.make_decorator(f)(_deprecated_imp) - else: - return f - return deprecate_decorator diff --git a/numpy-1.6.2/numpy/testing/noseclasses.py b/numpy-1.6.2/numpy/testing/noseclasses.py deleted file mode 100644 index f97ea91264..0000000000 --- a/numpy-1.6.2/numpy/testing/noseclasses.py +++ /dev/null @@ -1,351 +0,0 @@ -# These classes implement a doctest runner plugin for nose, a "known failure" -# error class, and a customized TestProgram for NumPy. - -# Because this module imports nose directly, it should not -# be used except by nosetester.py to avoid a general NumPy -# dependency on nose. - -import os -import doctest - -import nose -from nose.plugins import doctests as npd -from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin -from nose.plugins.base import Plugin -from nose.util import src, getpackage -import numpy -from nosetester import get_package_name -import inspect - -_doctest_ignore = ['generate_numpy_api.py', 'scons_support.py', - 'setupscons.py', 'setup.py'] - -# Some of the classes in this module begin with 'Numpy' to clearly distinguish -# them from the plethora of very similar names from nose/unittest/doctest - - -#----------------------------------------------------------------------------- -# Modified version of the one in the stdlib, that fixes a python bug (doctests -# not found in extension modules, http://bugs.python.org/issue3158) -class NumpyDocTestFinder(doctest.DocTestFinder): - - def _from_module(self, module, object): - """ - Return true if the given object is defined in the given - module. - """ - if module is None: - #print '_fm C1' # dbg - return True - elif inspect.isfunction(object): - #print '_fm C2' # dbg - return module.__dict__ is object.func_globals - elif inspect.isbuiltin(object): - #print '_fm C2-1' # dbg - return module.__name__ == object.__module__ - elif inspect.isclass(object): - #print '_fm C3' # dbg - return module.__name__ == object.__module__ - elif inspect.ismethod(object): - # This one may be a bug in cython that fails to correctly set the - # __module__ attribute of methods, but since the same error is easy - # to make by extension code writers, having this safety in place - # isn't such a bad idea - #print '_fm C3-1' # dbg - return module.__name__ == object.im_class.__module__ - elif inspect.getmodule(object) is not None: - #print '_fm C4' # dbg - #print 'C4 mod',module,'obj',object # dbg - return module is inspect.getmodule(object) - elif hasattr(object, '__module__'): - #print '_fm C5' # dbg - return module.__name__ == object.__module__ - elif isinstance(object, property): - #print '_fm C6' # dbg - return True # [XX] no way not be sure. - else: - raise ValueError("object must be a class or function") - - - - def _find(self, tests, obj, name, module, source_lines, globs, seen): - """ - Find tests for the given object and any contained objects, and - add them to `tests`. - """ - - doctest.DocTestFinder._find(self,tests, obj, name, module, - source_lines, globs, seen) - - # Below we re-run pieces of the above method with manual modifications, - # because the original code is buggy and fails to correctly identify - # doctests in extension modules. - - # Local shorthands - from inspect import isroutine, isclass, ismodule, isfunction, \ - ismethod - - # Look for tests in a module's contained objects. - if ismodule(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - valname1 = '%s.%s' % (name, valname) - if ( (isroutine(val) or isclass(val)) - and self._from_module(module, val) ): - - self._find(tests, val, valname1, module, source_lines, - globs, seen) - - - # Look for tests in a class's contained objects. - if isclass(obj) and self._recurse: - #print 'RECURSE into class:',obj # dbg - for valname, val in obj.__dict__.items(): - #valname1 = '%s.%s' % (name, valname) # dbg - #print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg - # Special handling for staticmethod/classmethod. - if isinstance(val, staticmethod): - val = getattr(obj, valname) - if isinstance(val, classmethod): - val = getattr(obj, valname).im_func - - # Recurse to methods, properties, and nested classes. - if ((isfunction(val) or isclass(val) or - ismethod(val) or isinstance(val, property)) and - self._from_module(module, val)): - valname = '%s.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - -class NumpyDocTestCase(npd.DocTestCase): - """Proxy for DocTestCase: provides an address() method that - returns the correct address for the doctest case. Otherwise - acts as a proxy to the test case. To provide hints for address(), - an obj may also be passed -- this will be used as the test object - for purposes of determining the test address, if it is provided. - """ - - # doctests loaded via find(obj) omit the module name - # so we need to override id, __repr__ and shortDescription - # bonus: this will squash a 2.3 vs 2.4 incompatiblity - def id(self): - name = self._dt_test.name - filename = self._dt_test.filename - if filename is not None: - pk = getpackage(filename) - if pk is not None and not name.startswith(pk): - name = "%s.%s" % (pk, name) - return name - - -# second-chance checker; if the default comparison doesn't -# pass, then see if the expected output string contains flags that -# tell us to ignore the output -class NumpyOutputChecker(doctest.OutputChecker): - def check_output(self, want, got, optionflags): - ret = doctest.OutputChecker.check_output(self, want, got, - optionflags) - if not ret: - if "#random" in want: - return True - - # it would be useful to normalize endianness so that - # bigendian machines don't fail all the tests (and there are - # actually some bigendian examples in the doctests). Let's try - # making them all little endian - got = got.replace("'>","'<") - want= want.replace("'>","'<") - - # try to normalize out 32 and 64 bit default int sizes - for sz in [4,8]: - got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') - 'numpy' - - """ - - fullpath = filepath[:] - pkg_name = [] - while 'site-packages' in filepath or 'dist-packages' in filepath: - filepath, p2 = os.path.split(filepath) - if p2 in ('site-packages', 'dist-packages'): - break - pkg_name.append(p2) - - # if package name determination failed, just default to numpy/scipy - if not pkg_name: - if 'scipy' in fullpath: - return 'scipy' - else: - return 'numpy' - - # otherwise, reverse to get correct order and return - pkg_name.reverse() - - # don't include the outer egg directory - if pkg_name[0].endswith('.egg'): - pkg_name.pop(0) - - return '.'.join(pkg_name) - -def import_nose(): - """ Import nose only when needed. - """ - fine_nose = True - minimum_nose_version = (0,10,0) - try: - import nose - from nose.tools import raises - except ImportError: - fine_nose = False - else: - if nose.__versioninfo__ < minimum_nose_version: - fine_nose = False - - if not fine_nose: - msg = 'Need nose >= %d.%d.%d for tests - see ' \ - 'http://somethingaboutorange.com/mrl/projects/nose' % \ - minimum_nose_version - - raise ImportError(msg) - - return nose - -def run_module_suite(file_to_run = None): - if file_to_run is None: - f = sys._getframe(1) - file_to_run = f.f_locals.get('__file__', None) - assert file_to_run is not None - - import_nose().run(argv=['',file_to_run]) - -# contructs NoseTester method docstrings -def _docmethod(meth, testtype): - if not meth.__doc__: - return - - test_header = \ - '''Parameters - ---------- - label : {'fast', 'full', '', attribute identifer} - Identifies the %(testtype)ss to run. This can be a string to - pass to the nosetests executable with the '-A' option, or one of - several special values. - Special values are: - 'fast' - the default - which corresponds to nosetests -A option - of 'not slow'. - 'full' - fast (as above) and slow %(testtype)ss as in the - no -A option to nosetests - same as '' - None or '' - run all %(testtype)ss - attribute_identifier - string passed directly to nosetests as '-A' - verbose : integer - verbosity value for test outputs, 1-10 - extra_argv : list - List with any extra args to pass to nosetests''' \ - % {'testtype': testtype} - - meth.__doc__ = meth.__doc__ % {'test_header':test_header} - - -class NoseTester(object): - """ - Nose test runner. - - This class is made available as numpy.testing.Tester, and a test function - is typically added to a package's __init__.py like so:: - - from numpy.testing import Tester - test = Tester().test - - Calling this test function finds and runs all tests associated with the - package and all its sub-packages. - - Attributes - ---------- - package_path : str - Full path to the package to test. - package_name : str - Name of the package to test. - - Parameters - ---------- - package : module, str or None - The package to test. If a string, this should be the full path to - the package. If None (default), `package` is set to the module from - which `NoseTester` is initialized. - - """ - - def __init__(self, package=None): - ''' Test class init - - Parameters - ---------- - package : string or module - If string, gives full path to package - If None, extract calling module path - Default is None - ''' - package_name = None - if package is None: - f = sys._getframe(1) - package_path = f.f_locals.get('__file__', None) - assert package_path is not None - package_path = os.path.dirname(package_path) - package_name = f.f_locals.get('__name__', None) - elif isinstance(package, type(os)): - package_path = os.path.dirname(package.__file__) - package_name = getattr(package, '__name__', None) - else: - package_path = str(package) - - self.package_path = package_path - - # find the package name under test; this name is used to limit coverage - # reporting (if enabled) - if package_name is None: - package_name = get_package_name(package_path) - self.package_name = package_name - - def _test_argv(self, label, verbose, extra_argv): - ''' Generate argv for nosetest command - - %(test_header)s - ''' - argv = [__file__, self.package_path, '-s'] - if label and label != 'full': - if not isinstance(label, basestring): - raise TypeError, 'Selection label should be a string' - if label == 'fast': - label = 'not slow' - argv += ['-A', label] - argv += ['--verbosity', str(verbose)] - if extra_argv: - argv += extra_argv - return argv - - def _show_system_info(self): - nose = import_nose() - - import numpy - print "NumPy version %s" % numpy.__version__ - npdir = os.path.dirname(numpy.__file__) - print "NumPy is installed in %s" % npdir - - if 'scipy' in self.package_name: - import scipy - print "SciPy version %s" % scipy.__version__ - spdir = os.path.dirname(scipy.__file__) - print "SciPy is installed in %s" % spdir - - pyversion = sys.version.replace('\n','') - print "Python version %s" % pyversion - print "nose version %d.%d.%d" % nose.__versioninfo__ - - - def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False): - """ - Run tests for module using nose. - - This method does the heavy lifting for the `test` method. It takes all - the same arguments, for details see `test`. - - See Also - -------- - test - - """ - - # if doctests is in the extra args, remove it and set the doctest - # flag so the NumPy doctester is used instead - if extra_argv and '--with-doctest' in extra_argv: - extra_argv.remove('--with-doctest') - doctests = True - - argv = self._test_argv(label, verbose, extra_argv) - if doctests: - argv += ['--with-numpydoctest'] - - if coverage: - argv+=['--cover-package=%s' % self.package_name, '--with-coverage', - '--cover-tests', '--cover-erase'] - - # bypass these samples under distutils - argv += ['--exclude','f2py_ext'] - argv += ['--exclude','f2py_f90_ext'] - argv += ['--exclude','gen_ext'] - argv += ['--exclude','pyrex_ext'] - argv += ['--exclude','swig_ext'] - - nose = import_nose() - - # construct list of plugins - import nose.plugins.builtin - from noseclasses import NumpyDoctest, KnownFailure - plugins = [NumpyDoctest(), KnownFailure()] - plugins += [p() for p in nose.plugins.builtin.plugins] - return argv, plugins - - def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, - coverage=False): - """ - Run tests for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the tests to run. This can be a string to pass to the - nosetests executable with the '-A' option, or one of - several special values. - Special values are: - 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - 'full' - fast (as above) and slow tests as in the - 'no -A' option to nosetests - this is the same as ''. - None or '' - run all tests. - attribute_identifier - string passed directly to nosetests as '-A'. - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - doctests : bool, optional - If True, run doctests in module. Default is False. - coverage : bool, optional - If True, report coverage of NumPy code. Default is False. - (This requires the `coverage module: - `_). - - Returns - ------- - result : object - Returns the result of running the tests as a - ``nose.result.TextTestResult`` object. - - Notes - ----- - Each NumPy module exposes `test` in its namespace to run all tests for it. - For example, to run all tests for numpy.lib:: - - >>> np.lib.test() - - Examples - -------- - >>> result = np.lib.test() - Running unit tests for numpy.lib - ... - Ran 976 tests in 3.933s - - OK - - >>> result.errors - [] - >>> result.knownfail - [] - - """ - - # cap verbosity at 3 because nose becomes *very* verbose beyond that - verbose = min(verbose, 3) - - import utils - utils.verbose = verbose - - if doctests: - print "Running unit tests and doctests for %s" % self.package_name - else: - print "Running unit tests for %s" % self.package_name - - self._show_system_info() - - # reset doctest state on every run - import doctest - doctest.master = None - - argv, plugins = self.prepare_test_args(label, verbose, extra_argv, - doctests, coverage) - from noseclasses import NumpyTestProgram - t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) - return t.result - - def bench(self, label='fast', verbose=1, extra_argv=None): - """ - Run benchmarks for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the tests to run. This can be a string to pass to the - nosetests executable with the '-A' option, or one of - several special values. - Special values are: - 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - 'full' - fast (as above) and slow tests as in the - 'no -A' option to nosetests - this is the same as ''. - None or '' - run all tests. - attribute_identifier - string passed directly to nosetests as '-A'. - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - - Returns - ------- - success : bool - Returns True if running the benchmarks works, False if an error - occurred. - - Notes - ----- - Benchmarks are like tests, but have names starting with "bench" instead - of "test", and can be found under the "benchmarks" sub-directory of the - module. - - Each NumPy module exposes `bench` in its namespace to run all benchmarks - for it. - - Examples - -------- - >>> success = np.lib.bench() - Running benchmarks for numpy.lib - ... - using 562341 items: - unique: - 0.11 - unique1d: - 0.11 - ratio: 1.0 - nUnique: 56230 == 56230 - ... - OK - - >>> success - True - - """ - - print "Running benchmarks for %s" % self.package_name - self._show_system_info() - - argv = self._test_argv(label, verbose, extra_argv) - argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] - - nose = import_nose() - return nose.run(argv=argv) - - # generate method docstrings - _docmethod(_test_argv, '(testtype)') - _docmethod(test, 'test') - _docmethod(bench, 'benchmark') - - -######################################################################## -# Doctests for NumPy-specific nose/doctest modifications - -# try the #random directive on the output line -def check_random_directive(): - ''' - >>> 2+2 - #random: may vary on your system - ''' - -# check the implicit "import numpy as np" -def check_implicit_np(): - ''' - >>> np.array([1,2,3]) - array([1, 2, 3]) - ''' - -# there's some extraneous whitespace around the correct responses -def check_whitespace_enabled(): - ''' - # whitespace after the 3 - >>> 1+2 - 3 - - # whitespace before the 7 - >>> 3+4 - 7 - ''' diff --git a/numpy-1.6.2/numpy/testing/nulltester.py b/numpy-1.6.2/numpy/testing/nulltester.py deleted file mode 100644 index 50d5484f65..0000000000 --- a/numpy-1.6.2/numpy/testing/nulltester.py +++ /dev/null @@ -1,15 +0,0 @@ -''' Null tester to signal nose tests disabled - -Merely returns error reporting lack of nose package or version number -below requirements. - -See pkgtester, nosetester modules - -''' - -class NullTester(object): - def test(self, labels=None, *args, **kwargs): - raise ImportError, \ - 'Need nose >=0.10 for tests - see %s' % \ - 'http://somethingaboutorange.com/mrl/projects/nose' - bench = test diff --git a/numpy-1.6.2/numpy/testing/numpytest.py b/numpy-1.6.2/numpy/testing/numpytest.py deleted file mode 100644 index 683df7a010..0000000000 --- a/numpy-1.6.2/numpy/testing/numpytest.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import sys -import traceback - -__all__ = ['IgnoreException', 'importall',] - -DEBUG=0 -get_frame = sys._getframe - -class IgnoreException(Exception): - "Ignoring this exception due to disabled feature" - - -def output_exception(printstream = sys.stdout): - try: - type, value, tb = sys.exc_info() - info = traceback.extract_tb(tb) - #this is more verbose - #traceback.print_exc() - filename, lineno, function, text = info[-1] # last line only - msg = "%s:%d: %s: %s (in %s)\n" % ( - filename, lineno, type.__name__, str(value), function) - printstream.write(msg) - finally: - type = value = tb = None # clean up - return - -def importall(package): - """ - Try recursively to import all subpackages under package. - """ - if isinstance(package,str): - package = __import__(package) - - package_name = package.__name__ - package_dir = os.path.dirname(package.__file__) - for subpackage_name in os.listdir(package_dir): - subdir = os.path.join(package_dir, subpackage_name) - if not os.path.isdir(subdir): - continue - if not os.path.isfile(os.path.join(subdir,'__init__.py')): - continue - name = package_name+'.'+subpackage_name - try: - exec 'import %s as m' % (name) - except Exception, msg: - print 'Failed importing %s: %s' %(name, msg) - continue - importall(m) - return diff --git a/numpy-1.6.2/numpy/testing/print_coercion_tables.py b/numpy-1.6.2/numpy/testing/print_coercion_tables.py deleted file mode 100755 index 7b5320d7eb..0000000000 --- a/numpy-1.6.2/numpy/testing/print_coercion_tables.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python -"""Prints type-coercion tables for the built-in NumPy types""" - -import numpy as np - -# Generic object that can be added, but doesn't do anything else -class GenericObject: - def __init__(self, v): - self.v = v - - def __add__(self, other): - return self - - def __radd__(self, other): - return self - - dtype = np.dtype('O') - -def print_cancast_table(ntypes): - print 'X', - for char in ntypes: print char, - print - for row in ntypes: - print row, - for col in ntypes: - print int(np.can_cast(row, col)), - print - -def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): - print '+', - for char in ntypes: print char, - print - for row in ntypes: - if row == 'O': - rowtype = GenericObject - else: - rowtype = np.obj2sctype(row) - - print row, - for col in ntypes: - if col == 'O': - coltype = GenericObject - else: - coltype = np.obj2sctype(col) - try: - if firstarray: - rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) - else: - rowvalue = rowtype(inputfirstvalue) - colvalue = coltype(inputsecondvalue) - if use_promote_types: - char = np.promote_types(rowvalue.dtype, colvalue.dtype).char - else: - value = np.add(rowvalue,colvalue) - if isinstance(value, np.ndarray): - char = value.dtype.char - else: - char = np.dtype(type(value)).char - except ValueError: - char = '!' - except OverflowError: - char = '@' - except TypeError: - char = '#' - print char, - print - -print "can cast" -print_cancast_table(np.typecodes['All']) -print -print "In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'" -print -print "scalar + scalar" -print_coercion_table(np.typecodes['All'], 0, 0, False) -print -print "scalar + neg scalar" -print_coercion_table(np.typecodes['All'], 0, -1, False) -print -print "array + scalar" -print_coercion_table(np.typecodes['All'], 0, 0, True) -print -print "array + neg scalar" -print_coercion_table(np.typecodes['All'], 0, -1, True) -print -print "promote_types" -print_coercion_table(np.typecodes['All'], 0, 0, False, True) diff --git a/numpy-1.6.2/numpy/testing/setup.py b/numpy-1.6.2/numpy/testing/setup.py deleted file mode 100755 index 6d8fc85c50..0000000000 --- a/numpy-1.6.2/numpy/testing/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('testing',parent_package,top_path) - - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(maintainer = "NumPy Developers", - maintainer_email = "numpy-dev@numpy.org", - description = "NumPy test module", - url = "http://www.numpy.org", - license = "NumPy License (BSD Style)", - configuration = configuration, - ) diff --git a/numpy-1.6.2/numpy/testing/setupscons.py b/numpy-1.6.2/numpy/testing/setupscons.py deleted file mode 100755 index ad248d27fa..0000000000 --- a/numpy-1.6.2/numpy/testing/setupscons.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('testing',parent_package,top_path) - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(maintainer = "NumPy Developers", - maintainer_email = "numpy-dev@numpy.org", - description = "NumPy test module", - url = "http://www.numpy.org", - license = "NumPy License (BSD Style)", - configuration = configuration, - ) diff --git a/numpy-1.6.2/numpy/testing/tests/test_decorators.py b/numpy-1.6.2/numpy/testing/tests/test_decorators.py deleted file mode 100644 index 504971e612..0000000000 --- a/numpy-1.6.2/numpy/testing/tests/test_decorators.py +++ /dev/null @@ -1,156 +0,0 @@ -import numpy as np -from numpy.testing import * -from numpy.testing.noseclasses import KnownFailureTest -import nose - -def test_slow(): - @dec.slow - def slow_func(x,y,z): - pass - - assert(slow_func.slow) - -def test_setastest(): - @dec.setastest() - def f_default(a): - pass - - @dec.setastest(True) - def f_istest(a): - pass - - @dec.setastest(False) - def f_isnottest(a): - pass - - assert(f_default.__test__) - assert(f_istest.__test__) - assert(not f_isnottest.__test__) - -class DidntSkipException(Exception): - pass - -def test_skip_functions_hardcoded(): - @dec.skipif(True) - def f1(x): - raise DidntSkipException - - try: - f1('a') - except DidntSkipException: - raise Exception('Failed to skip') - except nose.SkipTest: - pass - - @dec.skipif(False) - def f2(x): - raise DidntSkipException - - try: - f2('a') - except DidntSkipException: - pass - except nose.SkipTest: - raise Exception('Skipped when not expected to') - - -def test_skip_functions_callable(): - def skip_tester(): - return skip_flag == 'skip me!' - - @dec.skipif(skip_tester) - def f1(x): - raise DidntSkipException - - try: - skip_flag = 'skip me!' - f1('a') - except DidntSkipException: - raise Exception('Failed to skip') - except nose.SkipTest: - pass - - @dec.skipif(skip_tester) - def f2(x): - raise DidntSkipException - - try: - skip_flag = 'five is right out!' - f2('a') - except DidntSkipException: - pass - except nose.SkipTest: - raise Exception('Skipped when not expected to') - - -def test_skip_generators_hardcoded(): - @dec.knownfailureif(True, "This test is known to fail") - def g1(x): - for i in xrange(x): - yield i - - try: - for j in g1(10): - pass - except KnownFailureTest: - pass - else: - raise Exception('Failed to mark as known failure') - - - @dec.knownfailureif(False, "This test is NOT known to fail") - def g2(x): - for i in xrange(x): - yield i - raise DidntSkipException('FAIL') - - try: - for j in g2(10): - pass - except KnownFailureTest: - raise Exception('Marked incorretly as known failure') - except DidntSkipException: - pass - - -def test_skip_generators_callable(): - def skip_tester(): - return skip_flag == 'skip me!' - - @dec.knownfailureif(skip_tester, "This test is known to fail") - def g1(x): - for i in xrange(x): - yield i - - try: - skip_flag = 'skip me!' - for j in g1(10): - pass - except KnownFailureTest: - pass - else: - raise Exception('Failed to mark as known failure') - - - @dec.knownfailureif(skip_tester, "This test is NOT known to fail") - def g2(x): - for i in xrange(x): - yield i - raise DidntSkipException('FAIL') - - try: - skip_flag = 'do not skip' - for j in g2(10): - pass - except KnownFailureTest: - raise Exception('Marked incorretly as known failure') - except DidntSkipException: - pass - - -if __name__ == '__main__': - run_module_suite() - - - - diff --git a/numpy-1.6.2/numpy/testing/tests/test_utils.py b/numpy-1.6.2/numpy/testing/tests/test_utils.py deleted file mode 100644 index 067782dc0e..0000000000 --- a/numpy-1.6.2/numpy/testing/tests/test_utils.py +++ /dev/null @@ -1,465 +0,0 @@ -import warnings -import sys - -import numpy as np -from numpy.testing import * -import unittest - -class _GenericTest(object): - def _test_equal(self, a, b): - self._assert_func(a, b) - - def _test_not_equal(self, a, b): - try: - self._assert_func(a, b) - passed = True - except AssertionError: - pass - else: - raise AssertionError("a and b are found equal but are not") - - def test_array_rank1_eq(self): - """Test two equal array of rank 1 are found equal.""" - a = np.array([1, 2]) - b = np.array([1, 2]) - - self._test_equal(a, b) - - def test_array_rank1_noteq(self): - """Test two different array of rank 1 are found not equal.""" - a = np.array([1, 2]) - b = np.array([2, 2]) - - self._test_not_equal(a, b) - - def test_array_rank2_eq(self): - """Test two equal array of rank 2 are found equal.""" - a = np.array([[1, 2], [3, 4]]) - b = np.array([[1, 2], [3, 4]]) - - self._test_equal(a, b) - - def test_array_diffshape(self): - """Test two arrays with different shapes are found not equal.""" - a = np.array([1, 2]) - b = np.array([[1, 2], [1, 2]]) - - self._test_not_equal(a, b) - - def test_objarray(self): - """Test object arrays.""" - a = np.array([1, 1], dtype=np.object) - self._test_equal(a, 1) - -class TestArrayEqual(_GenericTest, unittest.TestCase): - def setUp(self): - self._assert_func = assert_array_equal - - def test_generic_rank1(self): - """Test rank 1 array for all dtypes.""" - def foo(t): - a = np.empty(2, t) - a.fill(1) - b = a.copy() - c = a.copy() - c.fill(0) - self._test_equal(a, b) - self._test_not_equal(c, b) - - # Test numeric types and object - for t in '?bhilqpBHILQPfdgFDG': - foo(t) - - # Test strings - for t in ['S1', 'U1']: - foo(t) - - def test_generic_rank3(self): - """Test rank 3 array for all dtypes.""" - def foo(t): - a = np.empty((4, 2, 3), t) - a.fill(1) - b = a.copy() - c = a.copy() - c.fill(0) - self._test_equal(a, b) - self._test_not_equal(c, b) - - # Test numeric types and object - for t in '?bhilqpBHILQPfdgFDG': - foo(t) - - # Test strings - for t in ['S1', 'U1']: - foo(t) - - def test_nan_array(self): - """Test arrays with nan values in them.""" - a = np.array([1, 2, np.nan]) - b = np.array([1, 2, np.nan]) - - self._test_equal(a, b) - - c = np.array([1, 2, 3]) - self._test_not_equal(c, b) - - def test_string_arrays(self): - """Test two arrays with different shapes are found not equal.""" - a = np.array(['floupi', 'floupa']) - b = np.array(['floupi', 'floupa']) - - self._test_equal(a, b) - - c = np.array(['floupipi', 'floupa']) - - self._test_not_equal(c, b) - - def test_recarrays(self): - """Test record arrays.""" - a = np.empty(2, [('floupi', np.float), ('floupa', np.float)]) - a['floupi'] = [1, 2] - a['floupa'] = [1, 2] - b = a.copy() - - self._test_equal(a, b) - - c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)]) - c['floupipi'] = a['floupi'].copy() - c['floupa'] = a['floupa'].copy() - - self._test_not_equal(c, b) - -class TestEqual(TestArrayEqual): - def setUp(self): - self._assert_func = assert_equal - - def test_nan_items(self): - self._assert_func(np.nan, np.nan) - self._assert_func([np.nan], [np.nan]) - self._test_not_equal(np.nan, [np.nan]) - self._test_not_equal(np.nan, 1) - - def test_inf_items(self): - self._assert_func(np.inf, np.inf) - self._assert_func([np.inf], [np.inf]) - self._test_not_equal(np.inf, [np.inf]) - - def test_non_numeric(self): - self._assert_func('ab', 'ab') - self._test_not_equal('ab', 'abb') - - def test_complex_item(self): - self._assert_func(complex(1, 2), complex(1, 2)) - self._assert_func(complex(1, np.nan), complex(1, np.nan)) - self._test_not_equal(complex(1, np.nan), complex(1, 2)) - self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) - self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) - - def test_negative_zero(self): - self._test_not_equal(np.PZERO, np.NZERO) - - def test_complex(self): - x = np.array([complex(1, 2), complex(1, np.nan)]) - y = np.array([complex(1, 2), complex(1, 2)]) - self._assert_func(x, x) - self._test_not_equal(x, y) - -class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): - def setUp(self): - self._assert_func = assert_array_almost_equal - - def test_simple(self): - x = np.array([1234.2222]) - y = np.array([1234.2223]) - - self._assert_func(x, y, decimal=3) - self._assert_func(x, y, decimal=4) - self.assertRaises(AssertionError, - lambda: self._assert_func(x, y, decimal=5)) - - def test_nan(self): - anan = np.array([np.nan]) - aone = np.array([1]) - ainf = np.array([np.inf]) - self._assert_func(anan, anan) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, aone)) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, ainf)) - self.assertRaises(AssertionError, - lambda : self._assert_func(ainf, anan)) - - def test_inf(self): - a = np.array([[1., 2.], [3., 4.]]) - b = a.copy() - a[0,0] = np.inf - self.assertRaises(AssertionError, - lambda : self._assert_func(a, b)) - -class TestAlmostEqual(_GenericTest, unittest.TestCase): - def setUp(self): - self._assert_func = assert_almost_equal - - def test_nan_item(self): - self._assert_func(np.nan, np.nan) - self.assertRaises(AssertionError, - lambda : self._assert_func(np.nan, 1)) - self.assertRaises(AssertionError, - lambda : self._assert_func(np.nan, np.inf)) - self.assertRaises(AssertionError, - lambda : self._assert_func(np.inf, np.nan)) - - def test_inf_item(self): - self._assert_func(np.inf, np.inf) - self._assert_func(-np.inf, -np.inf) - self.assertRaises(AssertionError, - lambda : self._assert_func(np.inf, 1)) - - def test_simple_item(self): - self._test_not_equal(1, 2) - - def test_complex_item(self): - self._assert_func(complex(1, 2), complex(1, 2)) - self._assert_func(complex(1, np.nan), complex(1, np.nan)) - self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) - self._test_not_equal(complex(1, np.nan), complex(1, 2)) - self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) - self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) - - def test_complex(self): - x = np.array([complex(1, 2), complex(1, np.nan)]) - z = np.array([complex(1, 2), complex(np.nan, 1)]) - y = np.array([complex(1, 2), complex(1, 2)]) - self._assert_func(x, x) - self._test_not_equal(x, y) - self._test_not_equal(x, z) - -class TestApproxEqual(unittest.TestCase): - def setUp(self): - self._assert_func = assert_approx_equal - - def test_simple_arrays(self): - x = np.array([1234.22]) - y = np.array([1234.23]) - - self._assert_func(x, y, significant=5) - self._assert_func(x, y, significant=6) - self.assertRaises(AssertionError, - lambda: self._assert_func(x, y, significant=7)) - - def test_simple_items(self): - x = 1234.22 - y = 1234.23 - - self._assert_func(x, y, significant=4) - self._assert_func(x, y, significant=5) - self._assert_func(x, y, significant=6) - self.assertRaises(AssertionError, - lambda: self._assert_func(x, y, significant=7)) - - def test_nan_array(self): - anan = np.array(np.nan) - aone = np.array(1) - ainf = np.array(np.inf) - self._assert_func(anan, anan) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, aone)) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, ainf)) - self.assertRaises(AssertionError, - lambda : self._assert_func(ainf, anan)) - - def test_nan_items(self): - anan = np.array(np.nan) - aone = np.array(1) - ainf = np.array(np.inf) - self._assert_func(anan, anan) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, aone)) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, ainf)) - self.assertRaises(AssertionError, - lambda : self._assert_func(ainf, anan)) - -class TestRaises(unittest.TestCase): - def setUp(self): - class MyException(Exception): - pass - - self.e = MyException - - def raises_exception(self, e): - raise e - - def does_not_raise_exception(self): - pass - - def test_correct_catch(self): - f = raises(self.e)(self.raises_exception)(self.e) - - def test_wrong_exception(self): - try: - f = raises(self.e)(self.raises_exception)(RuntimeError) - except RuntimeError: - return - else: - raise AssertionError("should have caught RuntimeError") - - def test_catch_no_raise(self): - try: - f = raises(self.e)(self.does_not_raise_exception)() - except AssertionError: - return - else: - raise AssertionError("should have raised an AssertionError") - -class TestWarns(unittest.TestCase): - def test_warn(self): - def f(): - warnings.warn("yo") - - before_filters = sys.modules['warnings'].filters[:] - assert_warns(UserWarning, f) - after_filters = sys.modules['warnings'].filters - - # Check that the warnings state is unchanged - assert_equal(before_filters, after_filters, - "assert_warns does not preserver warnings state") - - def test_warn_wrong_warning(self): - def f(): - warnings.warn("yo", DeprecationWarning) - - failed = False - filters = sys.modules['warnings'].filters[:] - try: - try: - # Should raise an AssertionError - assert_warns(UserWarning, f) - failed = True - except AssertionError: - pass - finally: - sys.modules['warnings'].filters = filters - - if failed: - raise AssertionError("wrong warning caught by assert_warn") - -class TestAssertAllclose(unittest.TestCase): - def test_simple(self): - x = 1e-3 - y = 1e-9 - - assert_allclose(x, y, atol=1) - self.assertRaises(AssertionError, assert_allclose, x, y) - - a = np.array([x, y, x, y]) - b = np.array([x, y, x, x]) - - assert_allclose(a, b, atol=1) - self.assertRaises(AssertionError, assert_allclose, a, b) - - b[-1] = y * (1 + 1e-8) - assert_allclose(a, b) - self.assertRaises(AssertionError, assert_allclose, a, b, - rtol=1e-9) - - assert_allclose(6, 10, rtol=0.5) - self.assertRaises(AssertionError, assert_allclose, 10, 6, rtol=0.5) - - -class TestArrayAlmostEqualNulp(unittest.TestCase): - def test_simple(self): - dev = np.random.randn(10) - x = np.ones(10) - y = x + dev * np.finfo(np.float64).eps - assert_array_almost_equal_nulp(x, y, nulp=2 * np.max(dev)) - - def test_simple2(self): - x = np.random.randn(10) - y = 2 * x - def failure(): - return assert_array_almost_equal_nulp(x, y, - nulp=1000) - self.assertRaises(AssertionError, failure) - - def test_big_float32(self): - x = (1e10 * np.random.randn(10)).astype(np.float32) - y = x + 1 - assert_array_almost_equal_nulp(x, y, nulp=1000) - - def test_big_float64(self): - x = 1e10 * np.random.randn(10) - y = x + 1 - def failure(): - assert_array_almost_equal_nulp(x, y, nulp=1000) - self.assertRaises(AssertionError, failure) - - def test_complex(self): - x = np.random.randn(10) + 1j * np.random.randn(10) - y = x + 1 - def failure(): - assert_array_almost_equal_nulp(x, y, nulp=1000) - self.assertRaises(AssertionError, failure) - - def test_complex2(self): - x = np.random.randn(10) - y = np.array(x, np.complex) + 1e-16 * np.random.randn(10) - - assert_array_almost_equal_nulp(x, y, nulp=1000) - -class TestULP(unittest.TestCase): - def test_equal(self): - x = np.random.randn(10) - assert_array_max_ulp(x, x, maxulp=0) - - def test_single(self): - # Generate 1 + small deviation, check that adding eps gives a few UNL - x = np.ones(10).astype(np.float32) - x += 0.01 * np.random.randn(10).astype(np.float32) - eps = np.finfo(np.float32).eps - assert_array_max_ulp(x, x+eps, maxulp=20) - - def test_double(self): - # Generate 1 + small deviation, check that adding eps gives a few UNL - x = np.ones(10).astype(np.float64) - x += 0.01 * np.random.randn(10).astype(np.float64) - eps = np.finfo(np.float64).eps - assert_array_max_ulp(x, x+eps, maxulp=200) - - def test_inf(self): - for dt in [np.float32, np.float64]: - inf = np.array([np.inf]).astype(dt) - big = np.array([np.finfo(dt).max]) - assert_array_max_ulp(inf, big, maxulp=200) - - def test_nan(self): - # Test that nan is 'far' from small, tiny, inf, max and min - for dt in [np.float32, np.float64]: - if dt == np.float32: - maxulp = 1e6 - else: - maxulp = 1e12 - inf = np.array([np.inf]).astype(dt) - nan = np.array([np.nan]).astype(dt) - big = np.array([np.finfo(dt).max]) - tiny = np.array([np.finfo(dt).tiny]) - zero = np.array([np.PZERO]).astype(dt) - nzero = np.array([np.NZERO]).astype(dt) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, inf, - maxulp=maxulp)) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, big, - maxulp=maxulp)) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, tiny, - maxulp=maxulp)) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, zero, - maxulp=maxulp)) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, nzero, - maxulp=maxulp)) -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.6.2/numpy/testing/utils.py b/numpy-1.6.2/numpy/testing/utils.py deleted file mode 100644 index 01ce31c4a4..0000000000 --- a/numpy-1.6.2/numpy/testing/utils.py +++ /dev/null @@ -1,1475 +0,0 @@ -""" -Utility function to facilitate testing. -""" - -import os -import sys -import re -import operator -import types -import warnings -from nosetester import import_nose - -__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal', - 'assert_array_equal', 'assert_array_less', 'assert_string_equal', - 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', - 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', - 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', - 'assert_', 'assert_array_almost_equal_nulp', - 'assert_array_max_ulp', 'assert_warns', 'assert_allclose'] - -verbose = 0 - -def assert_(val, msg='') : - """ - Assert that works in release mode. - - The Python built-in ``assert`` does not work when executing code in - optimized mode (the ``-O`` flag) - no byte-code is generated for it. - - For documentation on usage, refer to the Python documentation. - - """ - if not val : - raise AssertionError(msg) - -def gisnan(x): - """like isnan, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isnan and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isnan - st = isnan(x) - if isinstance(st, types.NotImplementedType): - raise TypeError("isnan not supported for this type") - return st - -def gisfinite(x): - """like isfinite, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isfinite and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isfinite, seterr - err = seterr(invalid='ignore') - try: - st = isfinite(x) - if isinstance(st, types.NotImplementedType): - raise TypeError("isfinite not supported for this type") - finally: - seterr(**err) - return st - -def gisinf(x): - """like isinf, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isinf and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isinf, seterr - err = seterr(invalid='ignore') - try: - st = isinf(x) - if isinstance(st, types.NotImplementedType): - raise TypeError("isinf not supported for this type") - finally: - seterr(**err) - return st - -def rand(*args): - """Returns an array of random numbers with the given shape. - - This only uses the standard library, so it is useful for testing purposes. - """ - import random - from numpy.core import zeros, float64 - results = zeros(args, float64) - f = results.flat - for i in range(len(f)): - f[i] = random.random() - return results - -if sys.platform[:5]=='linux': - def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()), - _load_time=[]): - """ Return number of jiffies (1/100ths of a second) that this - process has been scheduled in user mode. See man 5 proc. """ - import time - if not _load_time: - _load_time.append(time.time()) - try: - f=open(_proc_pid_stat,'r') - l = f.readline().split(' ') - f.close() - return int(l[13]) - except: - return int(100*(time.time()-_load_time[0])) - - def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())): - """ Return virtual memory size in bytes of the running python. - """ - try: - f=open(_proc_pid_stat,'r') - l = f.readline().split(' ') - f.close() - return int(l[22]) - except: - return -else: - # os.getpid is not in all platforms available. - # Using time is safe but inaccurate, especially when process - # was suspended or sleeping. - def jiffies(_load_time=[]): - """ Return number of jiffies (1/100ths of a second) that this - process has been scheduled in user mode. [Emulation with time.time]. """ - import time - if not _load_time: - _load_time.append(time.time()) - return int(100*(time.time()-_load_time[0])) - def memusage(): - """ Return memory usage of running python. [Not implemented]""" - raise NotImplementedError - -if os.name=='nt' and sys.version[:3] > '2.3': - # Code "stolen" from enthought/debug/memusage.py - def GetPerformanceAttributes(object, counter, instance = None, - inum=-1, format = None, machine=None): - # NOTE: Many counters require 2 samples to give accurate results, - # including "% Processor Time" (as by definition, at any instant, a - # thread's CPU usage is either 0 or 100). To read counters like this, - # you should copy this function, but keep the counter open, and call - # CollectQueryData() each time you need to know. - # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp - # My older explanation for this was that the "AddCounter" process forced - # the CPU to 100%, but the above makes more sense :) - import win32pdh - if format is None: format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine,object,instance, None, inum,counter) ) - hq = win32pdh.OpenQuery() - try: - hc = win32pdh.AddCounter(hq, path) - try: - win32pdh.CollectQueryData(hq) - type, val = win32pdh.GetFormattedCounterValue(hc, format) - return val - finally: - win32pdh.RemoveCounter(hc) - finally: - win32pdh.CloseQuery(hq) - - def memusage(processName="python", instance=0): - # from win32pdhutil, part of the win32all package - import win32pdh - return GetPerformanceAttributes("Process", "Virtual Bytes", - processName, instance, - win32pdh.PDH_FMT_LONG, None) - -def build_err_msg(arrays, err_msg, header='Items are not equal:', - verbose=True, - names=('ACTUAL', 'DESIRED')): - msg = ['\n' + header] - if err_msg: - if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): - msg = [msg[0] + ' ' + err_msg] - else: - msg.append(err_msg) - if verbose: - for i, a in enumerate(arrays): - try: - r = repr(a) - except: - r = '[repr failed]' - if r.count('\n') > 3: - r = '\n'.join(r.splitlines()[:3]) - r += '...' - msg.append(' %s: %s' % (names[i], r)) - return '\n'.join(msg) - -def assert_equal(actual,desired,err_msg='',verbose=True): - """ - Raise an assertion if two objects are not equal. - - Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), - check that all elements of these objects are equal. An exception is raised - at the first conflicting values. - - Parameters - ---------- - actual : array_like - The object to check. - desired : array_like - The expected object. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal. - - Examples - -------- - >>> np.testing.assert_equal([4,5], [4,6]) - ... - : - Items are not equal: - item=1 - ACTUAL: 5 - DESIRED: 6 - - """ - if isinstance(desired, dict): - if not isinstance(actual, dict) : - raise AssertionError(repr(type(actual))) - assert_equal(len(actual),len(desired),err_msg,verbose) - for k,i in desired.items(): - if k not in actual : - raise AssertionError(repr(k)) - assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k,err_msg), verbose) - return - if isinstance(desired, (list,tuple)) and isinstance(actual, (list,tuple)): - assert_equal(len(actual),len(desired),err_msg,verbose) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg), verbose) - return - from numpy.core import ndarray, isscalar, signbit - from numpy.lib import iscomplexobj, real, imag - if isinstance(actual, ndarray) or isinstance(desired, ndarray): - return assert_array_equal(actual, desired, err_msg, verbose) - msg = build_err_msg([actual, desired], err_msg, verbose=verbose) - - # Handle complex numbers: separate into real/imag to handle - # nan/inf/negative zero correctly - # XXX: catch ValueError for subclasses of ndarray where iscomplex fail - try: - usecomplex = iscomplexobj(actual) or iscomplexobj(desired) - except ValueError: - usecomplex = False - - if usecomplex: - if iscomplexobj(actual): - actualr = real(actual) - actuali = imag(actual) - else: - actualr = actual - actuali = 0 - if iscomplexobj(desired): - desiredr = real(desired) - desiredi = imag(desired) - else: - desiredr = desired - desiredi = 0 - try: - assert_equal(actualr, desiredr) - assert_equal(actuali, desiredi) - except AssertionError: - raise AssertionError(msg) - - # Inf/nan/negative zero handling - try: - # isscalar test to check cases such as [np.nan] != np.nan - if isscalar(desired) != isscalar(actual): - raise AssertionError(msg) - - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - isdesnan = gisnan(desired) - isactnan = gisnan(actual) - if isdesnan or isactnan: - if not (isdesnan and isactnan): - raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) - return - elif desired == 0 and actual == 0: - if not signbit(desired) == signbit(actual): - raise AssertionError(msg) - # If TypeError or ValueError raised while using isnan and co, just handle - # as before - except (TypeError, ValueError, NotImplementedError): - pass - if desired != actual : - raise AssertionError(msg) - -def print_assert_equal(test_string,actual,desired): - """ - Test if two objects are equal, and print an error message if test fails. - - The test is performed with ``actual == desired``. - - Parameters - ---------- - test_string : str - The message supplied to AssertionError. - actual : object - The object to test for equality against `desired`. - desired : object - The expected result. - - Examples - -------- - >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) - >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) - Traceback (most recent call last): - ... - AssertionError: Test XYZ of func xyz failed - ACTUAL: - [0, 1] - DESIRED: - [0, 2] - - """ - import pprint - try: - assert(actual == desired) - except AssertionError: - import cStringIO - msg = cStringIO.StringIO() - msg.write(test_string) - msg.write(' failed\nACTUAL: \n') - pprint.pprint(actual,msg) - msg.write('DESIRED: \n') - pprint.pprint(desired,msg) - raise AssertionError(msg.getvalue()) - -def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): - """ - Raise an assertion if two items are not equal up to desired precision. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - The test is equivalent to ``abs(desired-actual) < 0.5 * 10**(-decimal)``. - - Given two objects (numbers or ndarrays), check that all elements of these - objects are almost equal. An exception is raised at conflicting values. - For ndarrays this delegates to assert_array_almost_equal - - Parameters - ---------- - actual : array_like - The object to check. - desired : array_like - The expected object. - decimal : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - >>> import numpy.testing as npt - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) - ... - : - Items are not equal: - ACTUAL: 2.3333333333333002 - DESIRED: 2.3333333399999998 - - >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), - ... np.array([1.0,2.33333334]), decimal=9) - ... - : - Arrays are not almost equal - - (mismatch 50.0%) - x: array([ 1. , 2.33333333]) - y: array([ 1. , 2.33333334]) - - """ - from numpy.core import ndarray - from numpy.lib import iscomplexobj, real, imag - - # Handle complex numbers: separate into real/imag to handle - # nan/inf/negative zero correctly - # XXX: catch ValueError for subclasses of ndarray where iscomplex fail - try: - usecomplex = iscomplexobj(actual) or iscomplexobj(desired) - except ValueError: - usecomplex = False - - msg = build_err_msg([actual, desired], err_msg, verbose=verbose, - header=('Arrays are not almost equal to %d decimals' % decimal)) - - if usecomplex: - if iscomplexobj(actual): - actualr = real(actual) - actuali = imag(actual) - else: - actualr = actual - actuali = 0 - if iscomplexobj(desired): - desiredr = real(desired) - desiredi = imag(desired) - else: - desiredr = desired - desiredi = 0 - try: - assert_almost_equal(actualr, desiredr, decimal=decimal) - assert_almost_equal(actuali, desiredi, decimal=decimal) - except AssertionError: - raise AssertionError(msg) - - if isinstance(actual, (ndarray, tuple, list)) \ - or isinstance(desired, (ndarray, tuple, list)): - return assert_array_almost_equal(actual, desired, decimal, err_msg) - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - if gisnan(desired) or gisnan(actual): - if not (gisnan(desired) and gisnan(actual)): - raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) - return - except (NotImplementedError, TypeError): - pass - if round(abs(desired - actual),decimal) != 0 : - raise AssertionError(msg) - - -def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): - """ - Raise an assertion if two items are not equal up to significant digits. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - Given two numbers, check that they are approximately equal. - Approximately equal is defined as the number of significant digits - that agree. - - Parameters - ---------- - actual : scalar - The object to check. - desired : scalar - The expected object. - significant : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) - >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, - significant=8) - >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, - significant=8) - ... - : - Items are not equal to 8 significant digits: - ACTUAL: 1.234567e-021 - DESIRED: 1.2345672000000001e-021 - - the evaluated condition that raises the exception is - - >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) - True - - """ - import numpy as np - actual, desired = map(float, (actual, desired)) - if desired==actual: - return - # Normalized the numbers to be in range (-10.0,10.0) - # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) - err = np.seterr(invalid='ignore') - try: - scale = 0.5*(np.abs(desired) + np.abs(actual)) - scale = np.power(10,np.floor(np.log10(scale))) - finally: - np.seterr(**err) - - try: - sc_desired = desired/scale - except ZeroDivisionError: - sc_desired = 0.0 - try: - sc_actual = actual/scale - except ZeroDivisionError: - sc_actual = 0.0 - msg = build_err_msg([actual, desired], err_msg, - header='Items are not equal to %d significant digits:' % - significant, - verbose=verbose) - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - if gisnan(desired) or gisnan(actual): - if not (gisnan(desired) and gisnan(actual)): - raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) - return - except (TypeError, NotImplementedError): - pass - if np.abs(sc_desired - sc_actual) >= np.power(10.,-(significant-1)) : - raise AssertionError(msg) - -def assert_array_compare(comparison, x, y, err_msg='', verbose=True, - header=''): - from numpy.core import array, isnan, isinf, any - x = array(x, copy=False, subok=True) - y = array(y, copy=False, subok=True) - - def isnumber(x): - return x.dtype.char in '?bhilqpBHILQPfdgFDG' - - def chk_same_position(x_id, y_id, hasval='nan'): - """Handling nan/inf: check that x and y have the nan/inf at the same - locations.""" - try: - assert_array_equal(x_id, y_id) - except AssertionError: - msg = build_err_msg([x, y], - err_msg + '\nx and y %s location mismatch:' \ - % (hasval), verbose=verbose, header=header, - names=('x', 'y')) - raise AssertionError(msg) - - try: - cond = (x.shape==() or y.shape==()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + '\n(shapes %s, %s mismatch)' % (x.shape, - y.shape), - verbose=verbose, header=header, - names=('x', 'y')) - if not cond : - raise AssertionError(msg) - - if (isnumber(x) and isnumber(y)) and (any(isnan(x)) or any(isnan(y))): - x_id = isnan(x) - y_id = isnan(y) - chk_same_position(x_id, y_id, hasval='nan') - # If only one item, it was a nan, so just return - if x.size == y.size == 1: - return - val = comparison(x[~x_id], y[~y_id]) - elif (isnumber(x) and isnumber(y)) and (any(isinf(x)) or any(isinf(y))): - x_id = isinf(x) - y_id = isinf(y) - chk_same_position(x_id, y_id, hasval='inf') - # If only one item, it was a inf, so just return - if x.size == y.size == 1: - return - val = comparison(x[~x_id], y[~y_id]) - else: - val = comparison(x,y) - - if isinstance(val, bool): - cond = val - reduced = [0] - else: - reduced = val.ravel() - cond = reduced.all() - reduced = reduced.tolist() - if not cond: - match = 100-100.0*reduced.count(1)/len(reduced) - msg = build_err_msg([x, y], - err_msg - + '\n(mismatch %s%%)' % (match,), - verbose=verbose, header=header, - names=('x', 'y')) - if not cond : - raise AssertionError(msg) - except ValueError, e: - header = 'error during assertion:\n%s\n\n%s' % (e, header) - msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, - names=('x', 'y')) - raise ValueError(msg) - -def assert_array_equal(x, y, err_msg='', verbose=True): - """ - Raise an assertion if two array_like objects are not equal. - - Given two array_like objects, check that the shape is equal and all - elements of these objects are equal. An exception is raised at - shape mismatch or conflicting values. In contrast to the standard usage - in numpy, NaNs are compared like numbers, no assertion is raised if - both objects have NaNs in the same positions. - - The usual caution for verifying equality with floating point numbers is - advised. - - Parameters - ---------- - x : array_like - The actual object to check. - y : array_like - The desired, expected object. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired objects are not equal. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - The first assert does not raise an exception: - - >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], - ... [np.exp(0),2.33333, np.nan]) - - Assert fails with numerical inprecision with floats: - - >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], - ... [1, np.sqrt(np.pi)**2, np.nan]) - ... - : - AssertionError: - Arrays are not equal - - (mismatch 50.0%) - x: array([ 1. , 3.14159265, NaN]) - y: array([ 1. , 3.14159265, NaN]) - - Use `assert_allclose` or one of the nulp (number of floating point values) - functions for these cases instead: - - >>> np.testing.assert_allclose([1.0,np.pi,np.nan], - ... [1, np.sqrt(np.pi)**2, np.nan], - ... rtol=1e-10, atol=0) - - """ - assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, - verbose=verbose, header='Arrays are not equal') - -def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): - """ - Raise an assertion if two objects are not equal up to desired precision. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - The test verifies identical shapes and verifies values with - ``abs(desired-actual) < 0.5 * 10**(-decimal)``. - - Given two array_like objects, check that the shape is equal and all - elements of these objects are almost equal. An exception is raised at - shape mismatch or conflicting values. In contrast to the standard usage - in numpy, NaNs are compared like numbers, no assertion is raised if - both objects have NaNs in the same positions. - - Parameters - ---------- - x : array_like - The actual object to check. - y : array_like - The desired, expected object. - decimal : int, optional - Desired precision, default is 6. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - the first assert does not raise an exception - - >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], - [1.0,2.333,np.nan]) - - >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], - ... [1.0,2.33339,np.nan], decimal=5) - ... - : - AssertionError: - Arrays are not almost equal - - (mismatch 50.0%) - x: array([ 1. , 2.33333, NaN]) - y: array([ 1. , 2.33339, NaN]) - - >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], - ... [1.0,2.33333, 5], decimal=5) - : - ValueError: - Arrays are not almost equal - x: array([ 1. , 2.33333, NaN]) - y: array([ 1. , 2.33333, 5. ]) - - """ - from numpy.core import around, number, float_ - from numpy.core.numerictypes import issubdtype - from numpy.core.fromnumeric import any as npany - def compare(x, y): - try: - if npany(gisinf(x)) or npany( gisinf(y)): - xinfid = gisinf(x) - yinfid = gisinf(y) - if not xinfid == yinfid: - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - z = abs(x-y) - if not issubdtype(z.dtype, number): - z = z.astype(float_) # handle object arrays - return around(z, decimal) <= 10.0**(-decimal) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header=('Arrays are not almost equal to %d decimals' % decimal)) - -def assert_array_less(x, y, err_msg='', verbose=True): - """ - Raise an assertion if two array_like objects are not ordered by less than. - - Given two array_like objects, check that the shape is equal and all - elements of the first object are strictly smaller than those of the - second object. An exception is raised at shape mismatch or incorrectly - ordered values. Shape mismatch does not raise if an object has zero - dimension. In contrast to the standard usage in numpy, NaNs are - compared, no assertion is raised if both objects have NaNs in the same - positions. - - - - Parameters - ---------- - x : array_like - The smaller object to check. - y : array_like - The larger object to compare. - err_msg : string - The error message to be printed in case of failure. - verbose : bool - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired objects are not equal. - - See Also - -------- - assert_array_equal: tests objects for equality - assert_array_almost_equal: test objects for equality up to precision - - - - Examples - -------- - >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) - >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) - ... - : - Arrays are not less-ordered - (mismatch 50.0%) - x: array([ 1., 1., NaN]) - y: array([ 1., 2., NaN]) - - >>> np.testing.assert_array_less([1.0, 4.0], 3) - ... - : - Arrays are not less-ordered - (mismatch 50.0%) - x: array([ 1., 4.]) - y: array(3) - - >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) - ... - : - Arrays are not less-ordered - (shapes (3,), (1,) mismatch) - x: array([ 1., 2., 3.]) - y: array([4]) - - """ - assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, - verbose=verbose, - header='Arrays are not less-ordered') - -def runstring(astr, dict): - exec astr in dict - -def assert_string_equal(actual, desired): - """ - Test if two strings are equal. - - If the given strings are equal, `assert_string_equal` does nothing. - If they are not equal, an AssertionError is raised, and the diff - between the strings is shown. - - Parameters - ---------- - actual : str - The string to test for equality against the expected string. - desired : str - The expected string. - - Examples - -------- - >>> np.testing.assert_string_equal('abc', 'abc') - >>> np.testing.assert_string_equal('abc', 'abcd') - Traceback (most recent call last): - File "", line 1, in - ... - AssertionError: Differences in strings: - - abc+ abcd? + - - """ - # delay import of difflib to reduce startup time - import difflib - - if not isinstance(actual, str) : - raise AssertionError(`type(actual)`) - if not isinstance(desired, str): - raise AssertionError(`type(desired)`) - if re.match(r'\A'+desired+r'\Z', actual, re.M): return - diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1))) - diff_list = [] - while diff: - d1 = diff.pop(0) - if d1.startswith(' '): - continue - if d1.startswith('- '): - l = [d1] - d2 = diff.pop(0) - if d2.startswith('? '): - l.append(d2) - d2 = diff.pop(0) - if not d2.startswith('+ ') : - raise AssertionError(`d2`) - l.append(d2) - d3 = diff.pop(0) - if d3.startswith('? '): - l.append(d3) - else: - diff.insert(0, d3) - if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]): - continue - diff_list.extend(l) - continue - raise AssertionError(`d1`) - if not diff_list: - return - msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() - if actual != desired : - raise AssertionError(msg) - - -def rundocs(filename=None, raise_on_error=True): - """ - Run doctests found in the given file. - - By default `rundocs` raises an AssertionError on failure. - - Parameters - ---------- - filename : str - The path to the file for which the doctests are run. - raise_on_error : bool - Whether to raise an AssertionError when a doctest fails. Default is - True. - - Notes - ----- - The doctests can be run by the user/developer by adding the ``doctests`` - argument to the ``test()`` call. For example, to run all tests (including - doctests) for `numpy.lib`:: - - >>> np.lib.test(doctests=True) - - """ - import doctest, imp - if filename is None: - f = sys._getframe(1) - filename = f.f_globals['__file__'] - name = os.path.splitext(os.path.basename(filename))[0] - path = [os.path.dirname(filename)] - file, pathname, description = imp.find_module(name, path) - try: - m = imp.load_module(name, file, pathname, description) - finally: - file.close() - - tests = doctest.DocTestFinder().find(m) - runner = doctest.DocTestRunner(verbose=False) - - msg = [] - if raise_on_error: - out = lambda s: msg.append(s) - else: - out = None - - for test in tests: - runner.run(test, out=out) - - if runner.failures > 0 and raise_on_error: - raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) - - -def raises(*args,**kwargs): - nose = import_nose() - return nose.tools.raises(*args,**kwargs) - -def assert_raises(*args,**kwargs): - """ - assert_raises(exception_class, callable, *args, **kwargs) - - Fail unless an exception of class exception_class is thrown - by callable when invoked with arguments args and keyword - arguments kwargs. If a different type of exception is - thrown, it will not be caught, and the test case will be - deemed to have suffered an error, exactly as for an - unexpected exception. - - """ - nose = import_nose() - return nose.tools.assert_raises(*args,**kwargs) - -def decorate_methods(cls, decorator, testmatch=None): - """ - Apply a decorator to all methods in a class matching a regular expression. - - The given decorator is applied to all public methods of `cls` that are - matched by the regular expression `testmatch` - (``testmatch.search(methodname)``). Methods that are private, i.e. start - with an underscore, are ignored. - - Parameters - ---------- - cls : class - Class whose methods to decorate. - decorator : function - Decorator to apply to methods - testmatch : compiled regexp or str, optional - The regular expression. Default value is None, in which case the - nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) - is used. - If `testmatch` is a string, it is compiled to a regular expression - first. - - """ - if testmatch is None: - testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) - else: - testmatch = re.compile(testmatch) - cls_attr = cls.__dict__ - - # delayed import to reduce startup time - from inspect import isfunction - - methods = filter(isfunction, cls_attr.values()) - for function in methods: - try: - if hasattr(function, 'compat_func_name'): - funcname = function.compat_func_name - else: - funcname = function.__name__ - except AttributeError: - # not a function - continue - if testmatch.search(funcname) and not funcname.startswith('_'): - setattr(cls, funcname, decorator(function)) - return - - -def measure(code_str,times=1,label=None): - """ - Return elapsed time for executing code in the namespace of the caller. - - The supplied code string is compiled with the Python builtin ``compile``. - The precision of the timing is 10 milli-seconds. If the code will execute - fast on this timescale, it can be executed many times to get reasonable - timing accuracy. - - Parameters - ---------- - code_str : str - The code to be timed. - times : int, optional - The number of times the code is executed. Default is 1. The code is - only compiled once. - label : str, optional - A label to identify `code_str` with. This is passed into ``compile`` - as the second argument (for run-time error messages). - - Returns - ------- - elapsed : float - Total elapsed time in seconds for executing `code_str` `times` times. - - Examples - -------- - >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', - ... times=times) - >>> print "Time for a single execution : ", etime / times, "s" - Time for a single execution : 0.005 s - - """ - frame = sys._getframe(1) - locs,globs = frame.f_locals,frame.f_globals - - code = compile(code_str, - 'Test name: %s ' % label, - 'exec') - i = 0 - elapsed = jiffies() - while i < times: - i += 1 - exec code in globs,locs - elapsed = jiffies() - elapsed - return 0.01*elapsed - -def _assert_valid_refcount(op): - """ - Check that ufuncs don't mishandle refcount of object `1`. - Used in a few regression tests. - """ - import numpy as np - a = np.arange(100 * 100) - b = np.arange(100*100).reshape(100, 100) - c = b - - i = 1 - - rc = sys.getrefcount(i) - for j in range(15): - d = op(b,c) - - assert(sys.getrefcount(i) >= rc) - -def assert_allclose(actual, desired, rtol=1e-7, atol=0, - err_msg='', verbose=True): - """ - Raise an assertion if two objects are not equal up to desired tolerance. - - The test is equivalent to ``allclose(actual, desired, rtol, atol)``. - It compares the difference between `actual` and `desired` to - ``atol + rtol * abs(desired)``. - - Parameters - ---------- - actual : array_like - Array obtained. - desired : array_like - Array desired. - rtol : float, optional - Relative tolerance. - atol : float, optional - Absolute tolerance. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_array_almost_equal_nulp, assert_array_max_ulp - - Examples - -------- - >>> x = [1e-5, 1e-3, 1e-1] - >>> y = np.arccos(np.cos(x)) - >>> assert_allclose(x, y, rtol=1e-5, atol=0) - - """ - import numpy as np - def compare(x, y): - return np.allclose(x, y, rtol=rtol, atol=atol) - actual, desired = np.asanyarray(actual), np.asanyarray(desired) - header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) - assert_array_compare(compare, actual, desired, err_msg=str(err_msg), - verbose=verbose, header=header) - -def assert_array_almost_equal_nulp(x, y, nulp=1): - """ - Compare two arrays relatively to their spacing. - - This is a relatively robust method to compare two arrays whose amplitude - is variable. - - Parameters - ---------- - x, y : array_like - Input arrays. - nulp : int, optional - The maximum number of unit in the last place for tolerance (see Notes). - Default is 1. - - Returns - ------- - None - - Raises - ------ - AssertionError - If the spacing between `x` and `y` for one or more elements is larger - than `nulp`. - - See Also - -------- - assert_array_max_ulp : Check that all items of arrays differ in at most - N Units in the Last Place. - spacing : Return the distance between x and the nearest adjacent number. - - Notes - ----- - An assertion is raised if the following condition is not met:: - - abs(x - y) <= nulps * spacing(max(abs(x), abs(y))) - - Examples - -------- - >>> x = np.array([1., 1e-10, 1e-20]) - >>> eps = np.finfo(x.dtype).eps - >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) - - >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) - ------------------------------------------------------------ - Traceback (most recent call last): - ... - AssertionError: X and Y are not equal to 1 ULP (max is 2) - - """ - import numpy as np - ax = np.abs(x) - ay = np.abs(y) - ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) - if not np.all(np.abs(x-y) <= ref): - if np.iscomplexobj(x) or np.iscomplexobj(y): - msg = "X and Y are not equal to %d ULP" % nulp - else: - max_nulp = np.max(nulp_diff(x, y)) - msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) - raise AssertionError(msg) - -def assert_array_max_ulp(a, b, maxulp=1, dtype=None): - """ - Check that all items of arrays differ in at most N Units in the Last Place. - - Parameters - ---------- - a, b : array_like - Input arrays to be compared. - maxulp : int, optional - The maximum number of units in the last place that elements of `a` and - `b` can differ. Default is 1. - dtype : dtype, optional - Data-type to convert `a` and `b` to if given. Default is None. - - Returns - ------- - ret : ndarray - Array containing number of representable floating point numbers between - items in `a` and `b`. - - Raises - ------ - AssertionError - If one or more elements differ by more than `maxulp`. - - See Also - -------- - assert_array_almost_equal_nulp : Compare two arrays relatively to their - spacing. - - Examples - -------- - >>> a = np.linspace(0., 1., 100) - >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) - - """ - import numpy as np - ret = nulp_diff(a, b, dtype) - if not np.all(ret <= maxulp): - raise AssertionError("Arrays are not almost equal up to %g ULP" % \ - maxulp) - return ret - -def nulp_diff(x, y, dtype=None): - """For each item in x and y, return the number of representable floating - points between them. - - Parameters - ---------- - x : array_like - first input array - y : array_like - second input array - - Returns - ------- - nulp: array_like - number of representable floating point numbers between each item in x - and y. - - Examples - -------- - # By definition, epsilon is the smallest number such as 1 + eps != 1, so - # there should be exactly one ULP between 1 and 1 + eps - >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) - 1.0 - """ - import numpy as np - if dtype: - x = np.array(x, dtype=dtype) - y = np.array(y, dtype=dtype) - else: - x = np.array(x) - y = np.array(y) - - t = np.common_type(x, y) - if np.iscomplexobj(x) or np.iscomplexobj(y): - raise NotImplementedError("_nulp not implemented for complex array") - - x = np.array(x, dtype=t) - y = np.array(y, dtype=t) - - if not x.shape == y.shape: - raise ValueError("x and y do not have the same shape: %s - %s" % \ - (x.shape, y.shape)) - - def _diff(rx, ry, vdt): - diff = np.array(rx-ry, dtype=vdt) - return np.abs(diff) - - rx = integer_repr(x) - ry = integer_repr(y) - return _diff(rx, ry, t) - -def _integer_repr(x, vdt, comp): - # Reinterpret binary representation of the float as sign-magnitude: - # take into account two-complement representation - # See also - # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm - rx = x.view(vdt) - if not (rx.size == 1): - rx[rx < 0] = comp - rx[rx<0] - else: - if rx < 0: - rx = comp - rx - - return rx - -def integer_repr(x): - """Return the signed-magnitude interpretation of the binary representation of - x.""" - import numpy as np - if x.dtype == np.float32: - return _integer_repr(x, np.int32, np.int32(-2**31)) - elif x.dtype == np.float64: - return _integer_repr(x, np.int64, np.int64(-2**63)) - else: - raise ValueError("Unsupported dtype %s" % x.dtype) - -# The following two classes are copied from python 2.6 warnings module (context -# manager) -class WarningMessage(object): - - """ - Holds the result of a single showwarning() call. - - Notes - ----- - `WarningMessage` is copied from the Python 2.6 warnings module, - so it can be used in NumPy with older Python versions. - - """ - - _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", - "line") - - def __init__(self, message, category, filename, lineno, file=None, - line=None): - local_values = locals() - for attr in self._WARNING_DETAILS: - setattr(self, attr, local_values[attr]) - if category: - self._category_name = category.__name__ - else: - self._category_name = None - - def __str__(self): - return ("{message : %r, category : %r, filename : %r, lineno : %s, " - "line : %r}" % (self.message, self._category_name, - self.filename, self.lineno, self.line)) - -class WarningManager: - """ - A context manager that copies and restores the warnings filter upon - exiting the context. - - The 'record' argument specifies whether warnings should be captured by a - custom implementation of ``warnings.showwarning()`` and be appended to a - list returned by the context manager. Otherwise None is returned by the - context manager. The objects appended to the list are arguments whose - attributes mirror the arguments to ``showwarning()``. - - The 'module' argument is to specify an alternative module to the module - named 'warnings' and imported under that name. This argument is only useful - when testing the warnings module itself. - - Notes - ----- - `WarningManager` is a copy of the ``catch_warnings`` context manager - from the Python 2.6 warnings module, with slight modifications. - It is copied so it can be used in NumPy with older Python versions. - - """ - def __init__(self, record=False, module=None): - self._record = record - if module is None: - self._module = sys.modules['warnings'] - else: - self._module = module - self._entered = False - - def __enter__(self): - if self._entered: - raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._showwarning = self._module.showwarning - if self._record: - log = [] - def showwarning(*args, **kwargs): - log.append(WarningMessage(*args, **kwargs)) - self._module.showwarning = showwarning - return log - else: - return None - - def __exit__(self): - if not self._entered: - raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module.showwarning = self._showwarning - -def assert_warns(warning_class, func, *args, **kw): - """ - Fail unless the given callable throws the specified warning. - - A warning of class warning_class should be thrown by the callable when - invoked with arguments args and keyword arguments kwargs. - If a different type of warning is thrown, it will not be caught, and the - test case will be deemed to have suffered an error. - - Parameters - ---------- - warning_class : class - The class defining the warning that `func` is expected to throw. - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. - \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. - - Returns - ------- - None - - """ - - # XXX: once we may depend on python >= 2.6, this can be replaced by the - # warnings module context manager. - ctx = WarningManager(record=True) - l = ctx.__enter__() - warnings.simplefilter('always') - try: - func(*args, **kw) - if not len(l) > 0: - raise AssertionError("No warning raised when calling %s" - % func.__name__) - if not l[0].category is warning_class: - raise AssertionError("First warning for %s is not a " \ - "%s( is %s)" % (func.__name__, warning_class, l[0])) - finally: - ctx.__exit__() diff --git a/numpy-1.6.2/numpy/tests/test_ctypeslib.py b/numpy-1.6.2/numpy/tests/test_ctypeslib.py deleted file mode 100644 index dfe7e90aae..0000000000 --- a/numpy-1.6.2/numpy/tests/test_ctypeslib.py +++ /dev/null @@ -1,100 +0,0 @@ -import sys - -import numpy as np -from numpy.ctypeslib import ndpointer, load_library -from numpy.distutils.misc_util import get_shared_lib_extension -from numpy.testing import * - -try: - cdll = load_library('multiarray', np.core.multiarray.__file__) - _HAS_CTYPE = True -except ImportError: - _HAS_CTYPE = False - -class TestLoadLibrary(TestCase): - @dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation") - @dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin") - def test_basic(self): - try: - cdll = load_library('multiarray', - np.core.multiarray.__file__) - except ImportError, e: - msg = "ctypes is not available on this python: skipping the test" \ - " (import error was: %s)" % str(e) - print msg - - @dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation") - @dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin") - def test_basic2(self): - """Regression for #801: load_library with a full library name - (including extension) does not work.""" - try: - try: - so = get_shared_lib_extension(is_python_ext=True) - cdll = load_library('multiarray%s' % so, - np.core.multiarray.__file__) - except ImportError: - print "No distutils available, skipping test." - except ImportError, e: - msg = "ctypes is not available on this python: skipping the test" \ - " (import error was: %s)" % str(e) - print msg - -class TestNdpointer(TestCase): - def test_dtype(self): - dt = np.intc - p = ndpointer(dtype=dt) - self.assert_(p.from_param(np.array([1], dt))) - dt = 'i4') - p = ndpointer(dtype=dt) - p.from_param(np.array([1], dt)) - self.assertRaises(TypeError, p.from_param, - np.array([1], dt.newbyteorder('swap'))) - dtnames = ['x', 'y'] - dtformats = [np.intc, np.float64] - dtdescr = {'names' : dtnames, 'formats' : dtformats} - dt = np.dtype(dtdescr) - p = ndpointer(dtype=dt) - self.assert_(p.from_param(np.zeros((10,), dt))) - samedt = np.dtype(dtdescr) - p = ndpointer(dtype=samedt) - self.assert_(p.from_param(np.zeros((10,), dt))) - dt2 = np.dtype(dtdescr, align=True) - if dt.itemsize != dt2.itemsize: - self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2)) - else: - self.assert_(p.from_param(np.zeros((10,), dt2))) - - def test_ndim(self): - p = ndpointer(ndim=0) - self.assert_(p.from_param(np.array(1))) - self.assertRaises(TypeError, p.from_param, np.array([1])) - p = ndpointer(ndim=1) - self.assertRaises(TypeError, p.from_param, np.array(1)) - self.assert_(p.from_param(np.array([1]))) - p = ndpointer(ndim=2) - self.assert_(p.from_param(np.array([[1]]))) - - def test_shape(self): - p = ndpointer(shape=(1,2)) - self.assert_(p.from_param(np.array([[1,2]]))) - self.assertRaises(TypeError, p.from_param, np.array([[1],[2]])) - p = ndpointer(shape=()) - self.assert_(p.from_param(np.array(1))) - - def test_flags(self): - x = np.array([[1,2,3]], order='F') - p = ndpointer(flags='FORTRAN') - self.assert_(p.from_param(x)) - p = ndpointer(flags='CONTIGUOUS') - self.assertRaises(TypeError, p.from_param, x) - p = ndpointer(flags=x.flags.num) - self.assert_(p.from_param(x)) - self.assertRaises(TypeError, p.from_param, np.array([[1,2,3]])) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/tests/test_matlib.py b/numpy-1.6.2/numpy/tests/test_matlib.py deleted file mode 100644 index 0766764958..0000000000 --- a/numpy-1.6.2/numpy/tests/test_matlib.py +++ /dev/null @@ -1,53 +0,0 @@ -import numpy as np -import numpy.matlib -from numpy.testing import assert_array_equal, assert_ - -def test_empty(): - x = np.matlib.empty((2,)) - assert_(isinstance(x, np.matrix)) - assert_(x.shape, (1,2)) - -def test_ones(): - assert_array_equal(np.matlib.ones((2, 3)), - np.matrix([[ 1., 1., 1.], - [ 1., 1., 1.]])) - - assert_array_equal(np.matlib.ones(2), np.matrix([[ 1., 1.]])) - -def test_zeros(): - assert_array_equal(np.matlib.zeros((2, 3)), - np.matrix([[ 0., 0., 0.], - [ 0., 0., 0.]])) - - assert_array_equal(np.matlib.zeros(2), np.matrix([[ 0., 0.]])) - -def test_identity(): - x = np.matlib.identity(2, dtype=np.int) - assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) - -def test_eye(): - x = np.matlib.eye(3, k=1, dtype=int) - assert_array_equal(x, np.matrix([[ 0, 1, 0], - [ 0, 0, 1], - [ 0, 0, 0]])) - -def test_rand(): - x = np.matlib.rand(3) - # check matrix type, array would have shape (3,) - assert_(x.ndim == 2) - -def test_randn(): - x = np.matlib.randn(3) - # check matrix type, array would have shape (3,) - assert_(x.ndim == 2) - -def test_repmat(): - a1 = np.arange(4) - x = np.matlib.repmat(a1, 2, 2) - y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], - [0, 1, 2, 3, 0, 1, 2, 3]]) - assert_array_equal(x, y) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.6.2/numpy/version.py b/numpy-1.6.2/numpy/version.py deleted file mode 100644 index e932075125..0000000000 --- a/numpy-1.6.2/numpy/version.py +++ /dev/null @@ -1,10 +0,0 @@ - -# THIS FILE IS GENERATED FROM NUMPY SETUP.PY -short_version = '1.6.2' -version = '1.6.2' -full_version = '1.6.2' -git_revision = 'ca07bce202ae26b6f0a73870eb2ef0b88e0210c5' -release = True - -if not release: - version = full_version diff --git a/numpy-1.6.2/setup.py b/numpy-1.6.2/setup.py deleted file mode 100755 index 941dbaf136..0000000000 --- a/numpy-1.6.2/setup.py +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/env python -"""NumPy: array processing for numbers, strings, records, and objects. - -NumPy is a general-purpose array-processing package designed to -efficiently manipulate large multi-dimensional arrays of arbitrary -records without sacrificing too much speed for small multi-dimensional -arrays. NumPy is built on the Numeric code base and adds features -introduced by numarray as well as an extended C-API and the ability to -create arrays of arbitrary type which also makes NumPy suitable for -interfacing with general-purpose data-base applications. - -There are also basic facilities for discrete fourier transform, -basic linear algebra and random number generation. -""" - -DOCLINES = __doc__.split("\n") - -import os -import shutil -import sys -import re -import subprocess - -if sys.version_info[0] < 3: - import __builtin__ as builtins -else: - import builtins - -CLASSIFIERS = """\ -Development Status :: 5 - Production/Stable -Intended Audience :: Science/Research -Intended Audience :: Developers -License :: OSI Approved -Programming Language :: C -Programming Language :: Python -Programming Language :: Python :: 3 -Topic :: Software Development -Topic :: Scientific/Engineering -Operating System :: Microsoft :: Windows -Operating System :: POSIX -Operating System :: Unix -Operating System :: MacOS -""" - -NAME = 'numpy' -MAINTAINER = "NumPy Developers" -MAINTAINER_EMAIL = "numpy-discussion@scipy.org" -DESCRIPTION = DOCLINES[0] -LONG_DESCRIPTION = "\n".join(DOCLINES[2:]) -URL = "http://numpy.scipy.org" -DOWNLOAD_URL = "http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=175103" -LICENSE = 'BSD' -CLASSIFIERS = filter(None, CLASSIFIERS.split('\n')) -AUTHOR = "Travis E. Oliphant, et.al." -AUTHOR_EMAIL = "oliphant@enthought.com" -PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"] -MAJOR = 1 -MINOR = 6 -MICRO = 2 -ISRELEASED = True -VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) - -# Return the git revision as a string -def git_version(): - def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0] - return out - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - GIT_REVISION = out.strip().decode('ascii') - except OSError: - GIT_REVISION = "Unknown" - - return GIT_REVISION - -# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly -# update it when the contents of directories change. -if os.path.exists('MANIFEST'): os.remove('MANIFEST') - -# This is a bit hackish: we are setting a global variable so that the main -# numpy __init__ can detect if it is being loaded by the setup routine, to -# avoid attempting to load components that aren't built yet. While ugly, it's -# a lot more robust than what was previously being used. -builtins.__NUMPY_SETUP__ = True - - -def write_version_py(filename='numpy/version.py'): - cnt = """ -# THIS FILE IS GENERATED FROM NUMPY SETUP.PY -short_version = '%(version)s' -version = '%(version)s' -full_version = '%(full_version)s' -git_revision = '%(git_revision)s' -release = %(isrelease)s - -if not release: - version = full_version -""" - # Adding the git rev number needs to be done inside write_version_py(), - # otherwise the import of numpy.version messes up the build under Python 3. - FULLVERSION = VERSION - if os.path.exists('.git'): - GIT_REVISION = git_version() - elif os.path.exists('numpy/version.py'): - # must be a source distribution, use existing version file - try: - from numpy.version import git_revision as GIT_REVISION - except ImportError: - raise ImportError("Unable to import git_revision. Try removing " \ - "numpy/version.py and the build directory " \ - "before building.") - else: - GIT_REVISION = "Unknown" - - if not ISRELEASED: - FULLVERSION += '.dev-' + GIT_REVISION[:7] - - a = open(filename, 'w') - try: - a.write(cnt % {'version': VERSION, - 'full_version' : FULLVERSION, - 'git_revision' : GIT_REVISION, - 'isrelease': str(ISRELEASED)}) - finally: - a.close() - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration(None, parent_package, top_path) - config.set_options(ignore_setup_xxx_py=True, - assume_default_configuration=True, - delegate_options_to_subpackages=True, - quiet=True) - - config.add_subpackage('numpy') - - config.get_version('numpy/version.py') # sets config.version - - return config - -def setup_package(): - - # Perform 2to3 if needed - local_path = os.path.dirname(os.path.abspath(sys.argv[0])) - src_path = local_path - - if sys.version_info[0] == 3: - src_path = os.path.join(local_path, 'build', 'py3k') - sys.path.insert(0, os.path.join(local_path, 'tools')) - import py3tool - print("Converting to Python3 via 2to3...") - py3tool.sync_2to3('numpy', os.path.join(src_path, 'numpy')) - - site_cfg = os.path.join(local_path, 'site.cfg') - if os.path.isfile(site_cfg): - shutil.copy(site_cfg, src_path) - - # Ugly hack to make pip work with Python 3, see #1857. - # Explanation: pip messes with __file__ which interacts badly with the - # change in directory due to the 2to3 conversion. Therefore we restore - # __file__ to what it would have been otherwise. - global __file__ - __file__ = os.path.join(os.curdir, os.path.basename(__file__)) - if '--egg-base' in sys.argv: - # Change pip-egg-info entry to absolute path, so pip can find it - # after changing directory. - idx = sys.argv.index('--egg-base') - if sys.argv[idx + 1] == 'pip-egg-info': - sys.argv[idx + 1] = os.path.join(local_path, 'pip-egg-info') - - old_path = os.getcwd() - os.chdir(src_path) - sys.path.insert(0, src_path) - - # Rewrite the version file everytime - write_version_py() - - # Run build - from numpy.distutils.core import setup - - try: - setup( - name=NAME, - maintainer=MAINTAINER, - maintainer_email=MAINTAINER_EMAIL, - description=DESCRIPTION, - long_description=LONG_DESCRIPTION, - url=URL, - download_url=DOWNLOAD_URL, - license=LICENSE, - classifiers=CLASSIFIERS, - author=AUTHOR, - author_email=AUTHOR_EMAIL, - platforms=PLATFORMS, - configuration=configuration ) - finally: - del sys.path[0] - os.chdir(old_path) - return - -if __name__ == '__main__': - setup_package() diff --git a/numpy-1.6.2/setupegg.py b/numpy-1.6.2/setupegg.py deleted file mode 100755 index 3ed1e0b0bc..0000000000 --- a/numpy-1.6.2/setupegg.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python -""" -A setup.py script to use setuptools, which gives egg goodness, etc. - -This is used to build installers for OS X through bdist_mpkg. - -Notes ------ -Using ``python setupegg.py install`` directly results in file permissions being -set wrong, with nose refusing to run any tests. To run the tests anyway, use:: - - >>> np.test(extra_argv=['--exe']) - -""" - -import sys -from setuptools import setup - -if sys.version_info[0] >= 3: - import imp - setupfile = imp.load_source('setupfile', 'setup.py') - setupfile.setup_package() -else: - execfile('setup.py') diff --git a/numpy-1.6.2/setupscons.py b/numpy-1.6.2/setupscons.py deleted file mode 100755 index ff5c539903..0000000000 --- a/numpy-1.6.2/setupscons.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python -"""NumPy: array processing for numbers, strings, records, and objects. - -NumPy is a general-purpose array-processing package designed to -efficiently manipulate large multi-dimensional arrays of arbitrary -records without sacrificing too much speed for small multi-dimensional -arrays. NumPy is built on the Numeric code base and adds features -introduced by numarray as well as an extended C-API and the ability to -create arrays of arbitrary type which also makes NumPy suitable for -interfacing with general-purpose data-base applications. - -There are also basic facilities for discrete fourier transform, -basic linear algebra and random number generation. -""" - -DOCLINES = __doc__.split("\n") - -import __builtin__ -import os -import sys - -CLASSIFIERS = """\ -Development Status :: 4 - Beta -Intended Audience :: Science/Research -Intended Audience :: Developers -License :: OSI Approved -Programming Language :: C -Programming Language :: Python -Topic :: Software Development -Topic :: Scientific/Engineering -Operating System :: Microsoft :: Windows -Operating System :: POSIX -Operating System :: Unix -Operating System :: MacOS -""" - -# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly -# update it when the contents of directories change. -if os.path.exists('MANIFEST'): os.remove('MANIFEST') - -sys.path.insert(0, os.path.dirname(__file__)) -try: - setup_py = __import__("setup") - write_version_py = setup_py.write_version_py -finally: - sys.path.pop(0) - -# This is a bit hackish: we are setting a global variable so that the main -# numpy __init__ can detect if it is being loaded by the setup routine, to -# avoid attempting to load components that aren't built yet. While ugly, it's -# a lot more robust than what was previously being used. -__builtin__.__NUMPY_SETUP__ = True - -# DO NOT REMOVE numpy.distutils IMPORT ! This is necessary for numpy.distutils' -# monkey patching to work. -import numpy.distutils -from distutils.errors import DistutilsError -try: - import numscons -except ImportError, e: - msg = ["You cannot build numpy with scons without the numscons package "] - msg.append("(Failure was: %s)" % e) - raise DistutilsError('\n'.join(msg)) - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration(None, parent_package, top_path, setup_name = 'setupscons.py') - config.set_options(ignore_setup_xxx_py=True, - assume_default_configuration=True, - delegate_options_to_subpackages=True, - quiet=True) - - config.add_subpackage('numpy') - - config.add_data_files(('numpy','*.txt'), - ('numpy','COMPATIBILITY'), - ('numpy','site.cfg.example'), - ('numpy','setup.py')) - - config.get_version('numpy/version.py') # sets config.version - - return config - -def setup_package(): - - from numpy.distutils.core import setup - - old_path = os.getcwd() - local_path = os.path.dirname(os.path.abspath(sys.argv[0])) - os.chdir(local_path) - sys.path.insert(0,local_path) - - # Rewrite the version file everytime - if os.path.exists('numpy/version.py'): os.remove('numpy/version.py') - write_version_py() - - try: - setup( - name = 'numpy', - maintainer = "NumPy Developers", - maintainer_email = "numpy-discussion@lists.sourceforge.net", - description = DOCLINES[0], - long_description = "\n".join(DOCLINES[2:]), - url = "http://numeric.scipy.org", - download_url = "http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=175103", - license = 'BSD', - classifiers=filter(None, CLASSIFIERS.split('\n')), - author = "Travis E. Oliphant, et.al.", - author_email = "oliphant@ee.byu.edu", - platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], - configuration=configuration ) - finally: - del sys.path[0] - os.chdir(old_path) - return - -if __name__ == '__main__': - setup_package() diff --git a/numpy-1.6.2/setupsconsegg.py b/numpy-1.6.2/setupsconsegg.py deleted file mode 100755 index 2baae18674..0000000000 --- a/numpy-1.6.2/setupsconsegg.py +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python -""" -A setup.py script to use setuptools, which gives egg goodness, etc. -""" - -from setuptools import setup -execfile('setupscons.py') diff --git a/numpy-1.6.2/site.cfg.example b/numpy-1.6.2/site.cfg.example deleted file mode 100644 index 89d1087392..0000000000 --- a/numpy-1.6.2/site.cfg.example +++ /dev/null @@ -1,145 +0,0 @@ -# This file provides configuration information about non-Python dependencies for -# numpy.distutils-using packages. Create a file like this called "site.cfg" next -# to your package's setup.py file and fill in the appropriate sections. Not all -# packages will use all sections so you should leave out sections that your -# package does not use. - -# To assist automatic installation like easy_install, the user's home directory -# will also be checked for the file ~/.numpy-site.cfg . - -# The format of the file is that of the standard library's ConfigParser module. -# -# http://www.python.org/doc/current/lib/module-ConfigParser.html -# -# Each section defines settings that apply to one particular dependency. Some of -# the settings are general and apply to nearly any section and are defined here. -# Settings specific to a particular section will be defined near their section. -# -# libraries -# Comma-separated list of library names to add to compile the extension -# with. Note that these should be just the names, not the filenames. For -# example, the file "libfoo.so" would become simply "foo". -# libraries = lapack,f77blas,cblas,atlas -# -# library_dirs -# List of directories to add to the library search path when compiling -# extensions with this dependency. Use the character given by os.pathsep -# to separate the items in the list. Note that this character is known to -# vary on some unix-like systems; if a colon does not work, try a comma. -# This also applies to include_dirs and src_dirs (see below). -# On UN*X-type systems (OS X, most BSD and Linux systems): -# library_dirs = /usr/lib:/usr/local/lib -# On Windows: -# library_dirs = c:\mingw\lib,c:\atlas\lib -# On some BSD and Linux systems: -# library_dirs = /usr/lib,/usr/local/lib -# -# include_dirs -# List of directories to add to the header file earch path. -# include_dirs = /usr/include:/usr/local/include -# -# src_dirs -# List of directories that contain extracted source code for the -# dependency. For some dependencies, numpy.distutils will be able to build -# them from source if binaries cannot be found. The FORTRAN BLAS and -# LAPACK libraries are one example. However, most dependencies are more -# complicated and require actual installation that you need to do -# yourself. -# src_dirs = /home/rkern/src/BLAS_SRC:/home/rkern/src/LAPACK_SRC -# -# search_static_first -# Boolean (one of (0, false, no, off) for False or (1, true, yes, on) for -# True) to tell numpy.distutils to prefer static libraries (.a) over -# shared libraries (.so). It is turned off by default. -# search_static_first = false - -# Defaults -# ======== -# The settings given here will apply to all other sections if not overridden. -# This is a good place to add general library and include directories like -# /usr/local/{lib,include} -# -#[DEFAULT] -#library_dirs = /usr/local/lib -#include_dirs = /usr/local/include - -# Optimized BLAS and LAPACK -# ------------------------- -# Use the blas_opt and lapack_opt sections to give any settings that are -# required to link against your chosen BLAS and LAPACK, including the regular -# FORTRAN reference BLAS and also ATLAS. Some other sections still exist for -# linking against certain optimized libraries (e.g. [atlas], [lapack_atlas]), -# however, they are now deprecated and should not be used. -# -# These are typical configurations for ATLAS (assuming that the library and -# include directories have already been set in [DEFAULT]; the include directory -# is important for the BLAS C interface): -# -#[blas_opt] -#libraries = f77blas, cblas, atlas -# -#[lapack_opt] -#libraries = lapack, f77blas, cblas, atlas -# -# If your ATLAS was compiled with pthreads, the names of the libraries might be -# different: -# -#[blas_opt] -#libraries = ptf77blas, ptcblas, atlas -# -#[lapack_opt] -#libraries = lapack, ptf77blas, ptcblas, atlas - - -# UMFPACK -# ------- -# The UMFPACK library is used in scikits.umfpack to factor large sparse matrices. -# It, in turn, depends on the AMD library for reordering the matrices for -# better performance. Note that the AMD library has nothing to do with AMD -# (Advanced Micro Devices), the CPU company. -# -# UMFPACK is not needed for numpy or scipy. -# -# http://www.cise.ufl.edu/research/sparse/umfpack/ -# http://www.cise.ufl.edu/research/sparse/amd/ -# http://scikits.appspot.com/umfpack -# -#[amd] -#amd_libs = amd -# -#[umfpack] -#umfpack_libs = umfpack - - -# FFT libraries -# ------------- -# There are two FFT libraries that we can configure here: FFTW (2 and 3) and djbfft. -# Note that these libraries are not needed for numpy or scipy. -# -# http://fftw.org/ -# http://cr.yp.to/djbfft.html -# -# Given only this section, numpy.distutils will try to figure out which version -# of FFTW you are using. -#[fftw] -#libraries = fftw3 -# -# For djbfft, numpy.distutils will look for either djbfft.a or libdjbfft.a . -#[djbfft] -#include_dirs = /usr/local/djbfft/include -#library_dirs = /usr/local/djbfft/lib - - -# MKL -#---- -# For recent (9.0.21, for example) mkl, you need to change the names of the -# lapack library. Assuming you installed the mkl in /opt, for a 32 bits cpu: -# [mkl] -# library_dirs = /opt/intel/mkl/9.1.023/lib/32/ -# lapack_libs = mkl_lapack -# -# For 10.*, on 32 bits machines: -# [mkl] -# library_dirs = /opt/intel/mkl/10.0.1.014/lib/32/ -# lapack_libs = mkl_lapack -# mkl_libs = mkl, guide diff --git a/numpy-1.6.2/tools/py3tool.py b/numpy-1.6.2/tools/py3tool.py deleted file mode 100755 index 5be7d6c8e2..0000000000 --- a/numpy-1.6.2/tools/py3tool.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env python3 -# -*- python -*- -""" -%prog SUBMODULE... - -Hack to pipe submodules of Numpy through 2to3 and build them in-place -one-by-one. - -Example usage: - - python3 tools/py3tool.py testing distutils core - -This will copy files to _py3k/numpy, add a dummy __init__.py and -version.py on the top level, and copy and 2to3 the files of the three -submodules. - -When running py3tool again, only changed files are re-processed, which -makes the test-bugfix cycle faster. - -""" -from optparse import OptionParser -import shutil -import os -import sys -import re -import subprocess -import fnmatch - -if os.environ.get('USE_2TO3CACHE'): - import lib2to3cache - -BASE = os.path.normpath(os.path.join(os.path.dirname(__file__), '..')) -TEMP = os.path.normpath(os.path.join(BASE, '_py3k')) - -SCRIPT_2TO3 = os.path.join(BASE, 'tools', '2to3.py') - -EXTRA_2TO3_FLAGS = { - '*/setup.py': '-x import', - 'numpy/core/code_generators/generate_umath.py': '-x import', - 'numpy/core/code_generators/generate_numpy_api.py': '-x import', - 'numpy/core/code_generators/generate_ufunc_api.py': '-x import', - 'numpy/core/defchararray.py': '-x unicode', - 'numpy/compat/py3k.py': '-x unicode', - 'numpy/ma/timer_comparison.py': 'skip', - 'numpy/distutils/system_info.py': '-x reduce', - 'numpy/f2py/auxfuncs.py': '-x reduce', - 'numpy/lib/arrayterator.py': '-x reduce', - 'numpy/lib/tests/test_arrayterator.py': '-x reduce', - 'numpy/ma/core.py': '-x reduce', - 'numpy/ma/tests/test_core.py': '-x reduce', - 'numpy/ma/tests/test_old_ma.py': '-x reduce', - 'numpy/ma/timer_comparison.py': '-x reduce', - 'numpy/oldnumeric/ma.py': '-x reduce', -} - -def main(): - p = OptionParser(usage=__doc__.strip()) - p.add_option("--clean", "-c", action="store_true", - help="clean source directory") - options, args = p.parse_args() - - if not args: - p.error('no submodules given') - else: - dirs = ['numpy/%s' % x for x in map(os.path.basename, args)] - - # Prepare - if not os.path.isdir(TEMP): - os.makedirs(TEMP) - - # Set up dummy files (for building only submodules) - dummy_files = { - '__init__.py': 'from numpy.version import version as __version__', - 'version.py': 'version = "1.4.0.dev"' - } - - for fn, content in dummy_files.items(): - fn = os.path.join(TEMP, 'numpy', fn) - if not os.path.isfile(fn): - try: - os.makedirs(os.path.dirname(fn)) - except OSError: - pass - f = open(fn, 'wb+') - f.write(content.encode('ascii')) - f.close() - - # Environment - pp = [os.path.abspath(TEMP)] - def getenv(): - env = dict(os.environ) - env.update({'PYTHONPATH': ':'.join(pp)}) - return env - - # Copy - for d in dirs: - src = os.path.join(BASE, d) - dst = os.path.join(TEMP, d) - - # Run 2to3 - sync_2to3(dst=dst, - src=src, - patchfile=os.path.join(TEMP, os.path.basename(d) + '.patch'), - clean=options.clean) - - # Run setup.py, falling back to Pdb post-mortem on exceptions - setup_py = os.path.join(dst, 'setup.py') - if os.path.isfile(setup_py): - code = """\ -import pdb, sys, traceback -p = pdb.Pdb() -try: - import __main__ - __main__.__dict__.update({ - "__name__": "__main__", "__file__": "setup.py", - "__builtins__": __builtins__}) - fp = open("setup.py", "rb") - try: - exec(compile(fp.read(), "setup.py", 'exec')) - finally: - fp.close() -except SystemExit: - raise -except: - traceback.print_exc() - t = sys.exc_info()[2] - p.interaction(None, t) -""" - ret = subprocess.call([sys.executable, '-c', code, - 'build_ext', '-i'], - cwd=dst, - env=getenv()) - if ret != 0: - raise RuntimeError("Build failed.") - - # Run nosetests - subprocess.call(['nosetests3', '-v', d], cwd=TEMP) - -def custom_mangling(filename): - import_mangling = [ - os.path.join('core', '__init__.py'), - os.path.join('core', 'numeric.py'), - os.path.join('core', '_internal.py'), - os.path.join('core', 'arrayprint.py'), - os.path.join('core', 'fromnumeric.py'), - os.path.join('numpy', '__init__.py'), - os.path.join('lib', 'npyio.py'), - os.path.join('lib', 'function_base.py'), - os.path.join('fft', 'fftpack.py'), - os.path.join('random', '__init__.py'), - ] - - if any(filename.endswith(x) for x in import_mangling): - f = open(filename, 'r') - text = f.read() - f.close() - for mod in ['multiarray', 'scalarmath', 'umath', '_sort', - '_compiled_base', 'core', 'lib', 'testing', 'fft', - 'polynomial', 'random', 'ma', 'linalg', 'compat', - 'mtrand', '_dotblas', 'version']: - text = re.sub(r'^(\s*)import %s' % mod, - r'\1from . import %s' % mod, - text, flags=re.M) - text = re.sub(r'^(\s*)from %s import' % mod, - r'\1from .%s import' % mod, - text, flags=re.M) - text = text.replace('from matrixlib', 'from .matrixlib') - f = open(filename, 'w') - f.write(text) - f.close() - -def walk_sync(dir1, dir2, _seen=None): - if _seen is None: - seen = {} - else: - seen = _seen - - if not dir1.endswith(os.path.sep): - dir1 = dir1 + os.path.sep - - # Walk through stuff (which we haven't yet gone through) in dir1 - for root, dirs, files in os.walk(dir1): - sub = root[len(dir1):] - if sub in seen: - dirs = [x for x in dirs if x not in seen[sub][0]] - files = [x for x in files if x not in seen[sub][1]] - seen[sub][0].extend(dirs) - seen[sub][1].extend(files) - else: - seen[sub] = (dirs, files) - if not dirs and not files: - continue - yield os.path.join(dir1, sub), os.path.join(dir2, sub), dirs, files - - if _seen is None: - # Walk through stuff (which we haven't yet gone through) in dir2 - for root2, root1, dirs, files in walk_sync(dir2, dir1, _seen=seen): - yield root1, root2, dirs, files - -def sync_2to3(src, dst, patchfile=None, clean=False): - import lib2to3.main - from io import StringIO - - to_convert = [] - - for src_dir, dst_dir, dirs, files in walk_sync(src, dst): - for fn in dirs + files: - src_fn = os.path.join(src_dir, fn) - dst_fn = os.path.join(dst_dir, fn) - - # skip temporary etc. files - if fn.startswith('.#') or fn.endswith('~'): - continue - - # remove non-existing - if os.path.exists(dst_fn) and not os.path.exists(src_fn): - if clean: - if os.path.isdir(dst_fn): - shutil.rmtree(dst_fn) - else: - os.unlink(dst_fn) - continue - - # make directories - if os.path.isdir(src_fn): - if not os.path.isdir(dst_fn): - os.makedirs(dst_fn) - continue - - dst_dir = os.path.dirname(dst_fn) - if os.path.isfile(dst_fn) and not os.path.isdir(dst_dir): - os.makedirs(dst_dir) - - # don't replace up-to-date files - try: - if os.path.isfile(dst_fn) and \ - os.stat(dst_fn).st_mtime >= os.stat(src_fn).st_mtime: - continue - except OSError: - pass - - # copy file - shutil.copyfile(src_fn, dst_fn) - - # add .py files to 2to3 list - if dst_fn.endswith('.py'): - to_convert.append((src_fn, dst_fn)) - - # run 2to3 - flag_sets = {} - for fn, dst_fn in to_convert: - flag = '' - for pat, opt in EXTRA_2TO3_FLAGS.items(): - if fnmatch.fnmatch(fn, pat): - flag = opt - break - flag_sets.setdefault(flag, []).append(dst_fn) - - if patchfile: - p = open(patchfile, 'wb+') - else: - p = open(os.devnull, 'wb') - - for flags, filenames in flag_sets.items(): - if flags == 'skip': - continue - - _old_stdout = sys.stdout - try: - sys.stdout = StringIO() - lib2to3.main.main("lib2to3.fixes", ['-w', '-n'] + flags.split()+filenames) - finally: - sys.stdout = _old_stdout - - for fn, dst_fn in to_convert: - # perform custom mangling - custom_mangling(dst_fn) - - p.close() - -if __name__ == "__main__": - main() diff --git a/scipy-0.10.1/BENTO_BUILD.txt b/scipy-0.10.1/BENTO_BUILD.txt deleted file mode 100644 index 66cf091563..0000000000 --- a/scipy-0.10.1/BENTO_BUILD.txt +++ /dev/null @@ -1,19 +0,0 @@ -No-frill version: - - * Clone bento:: - - git clone git://github.com/cournape/Bento.git bento-git - - * Bootstrap bento:: - - cd bento-git && python bootstrap.py - - * Download waf (version 1.6.4 or 1.6.5 should work) - * In the bento_waf_branch of scipy: - - export WAFDIR=ROOT_OF_WAF_SVN_TRUNK # WAFDIR should be such as $WAFDIR/waflib exists - $BENTO_ROOT/bentomaker build -j 4 # 4 threads in parallel - # or with progress bar - $BENTO_ROOT/bentomaker build -p - # or with verbose output - $BENTO_ROOT/bentomaker build -v diff --git a/scipy-0.10.1/INSTALL.txt b/scipy-0.10.1/INSTALL.txt deleted file mode 100644 index 403e95bf02..0000000000 --- a/scipy-0.10.1/INSTALL.txt +++ /dev/null @@ -1,374 +0,0 @@ -Building and installing SciPy -+++++++++++++++++++++++++++++ - -See http://www.scipy.org/scipy/scipy/wiki/GetCode -for updates of this document. - -.. Contents:: - -INTRODUCTION -============ - -It is *strongly* recommended that you use the binary packages on your platform -if they are available, in particular on Windows and Mac OS X. You should not -attempt to build SciPy if you are not familiar with compiling softwares from -sources. - -PREREQUISITES -============= - -SciPy requires the following software installed for your platform: - -1) Python__ 2.4.x or newer - -__ http://www.python.org - -2) NumPy__ 1.4.1 or newer (note: SciPy trunk at times requires latest NumPy - trunk). - -__ http://www.numpy.org/ - -Windows -------- - -Compilers -~~~~~~~~~ - -It is recommended to use the mingw__ compilers on Windows: you will need gcc -(C), g++ (C++) and g77 (Fortran) compilers. - -__ http://www.mingw.org - -Blas/Lapack -~~~~~~~~~~~ - -Blas/Lapack are core routines for linear algebra (vector/matrix operations). -You should use ATLAS__ with a full LAPACK, or simple BLAS/LAPACK built with g77 -from netlib__ sources. Building those libraries on windows may be difficult, as -they assume a unix-style environment. Please use the binaries if you don't feel -comfortable with cygwin, make and similar tools. - -__ http://math-atlas.sourceforge.net/ -__ http://www.netlib.org/lapack/ - -Mac OS X --------- - -Compilers -~~~~~~~~~ - -It is recommended to use gcc. gcc is available for free when installing -Xcode__, the developer toolsuite on Mac OS X. You also need a fortran compiler, -which is not included with Xcode: you should use gfortran from this page: - -__ http://r.research.att.com/tools/ - -Please do NOT use gfortran from hpc.sourceforge.net, it is known to generate -buggy scipy binaries. - -__Xcode: http://developer.apple.com/TOOLS/xcode - -Blas/Lapack -~~~~~~~~~~~ - -Mac OS X includes the Accelerate framework: it should be detected without any -intervention when building SciPy. - -Linux ------ - -Most common distributions include all the dependencies. Here are some -instructions for the most common ones: - -Ubuntu >= 8.10 -~~~~~~~~~~~~~~ - -You can get all the dependencies as follows:: - - sudo apt-get install python python-dev libatlas3-base-dev gcc gfortran g++ - -Ubuntu < 8.10, Debian -~~~~~~~~~~~~~~~~~~~~~ - -You can get all the dependencies as follows:: - - sudo apt-get install python python-dev atlas3-base-dev gcc g77 g++ - -OpenSuse >= 10 -~~~~~~~~~~~~~~ - -RHEL -~~~~ - -Fedora Core -~~~~~~~~~~~ - -GETTING SCIPY -============= - -For the latest information, see the web site: - - http://www.scipy.org - - -Development version from Git ----------------------------- -Use the command:: - - git clone https://github.com/scipy/scipy.git - -Before building and installing from git, remove the old installation -(e.g. in /usr/lib/python2.4/site-packages/scipy or -$HOME/lib/python2.4/site-packages/scipy). Then type:: - - cd scipy - git clean -xdf - python setup.py install - - - -INSTALLATION -============ - -First make sure that all SciPy prerequisites are installed and working -properly. Then be sure to remove any old SciPy installations (e.g. -/usr/lib/python2.4/site-packages/scipy or $HOME/lib/python2.4/ -site-packages/scipy). On windows, if you installed scipy previously from a -binary, use the remove facility from the add/remove softwares panel, or remote -the scipy directory by hand if you installed from sources (e.g. -C:\Python24\Lib\site-packages\scipy for python 2.4). - -From tarballs -------------- -Unpack ``SciPy-.tar.gz``, change to the ``SciPy-/`` -directory, and run -:: - - python setup.py install - -This may take several minutes to an hour depending on the speed of your -computer. To install to a user-specific location instead, run:: - - python setup.py install --prefix=$MYDIR - -where $MYDIR is, for example, $HOME or $HOME/usr. - - ** Note 1: On Unix, you should avoid installing in /usr, but rather in - /usr/local or somewhere else. /usr is generally 'owned' by your package - manager, and you may overwrite a packaged scipy this way. - -TESTING -======= - -To test SciPy after installation (highly recommended), execute in Python - - >>> import scipy - >>> scipy.test() - -To run the full test suite use - - >>> scipy.test('full') - -Please note that you must have version 0.10 or later of the 'nose' test -framework installed in order to run the tests. More information about nose is -available on the website__. - -__ http://somethingaboutorange.com/mrl/projects/nose/ - -COMPILER NOTES -============== - -Note that SciPy is developed mainly using GNU compilers. Compilers from -other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland, -Lahey, HP, IBM are supported in the form of community feedback. - -gcc__ compiler is recommended. gcc 3.x and 4.x are known to work. -If building on OS X, you should use the provided gcc by xcode tools, and the -gfortran compiler available here: - -http://r.research.att.com/tools/ - -You can specify which Fortran compiler to use by using the following -install command:: - - python setup.py config_fc --fcompiler= install - -To see a valid list of names, run:: - - python setup.py config_fc --help-fcompiler - -IMPORTANT: It is highly recommended that all libraries that scipy uses (e.g. -blas and atlas libraries) are built with the same Fortran compiler. In most -cases, if you mix compilers, you will not be able to import scipy at best, have -crashes and random results at worse. - -__ http://gcc.gnu.org/ - -Using non-GNU Fortran compiler with gcc/g77 compiled Atlas/Lapack libraries ---------------------------------------------------------------------------- - -When Atlas/Lapack libraries are compiled with GNU compilers but -one wishes to build scipy with some non-GNU Fortran compiler then -linking extension modules may require -lg2c. You can specify it -in installation command line as follows:: - - python setup.py build build_ext -lg2c install - -If using non-GNU C compiler or linker, the location of g2c library can -be specified in a similar manner using -L/path/to/libg2c.a after -build_ext command. - -Intel Fortran Compiler ----------------------- - -Note that code compiled by the Intel Fortran Compiler (IFC) is not -binary compatible with code compiled by g77. Therefore, when using IFC, -all Fortran codes used in SciPy must be compiled with IFC. This also -includes the LAPACK, BLAS, and ATLAS libraries. Using GCC for compiling -C code is OK. IFC version 5.0 is not supported (because it has bugs that -cause SciPy's tests to segfault). - -Minimum IFC flags for building LAPACK and ATLAS are -:: - - -FI -w90 -w95 -cm -O3 -unroll - -Also consult 'ifc -help' for additional optimization flags suitable -for your computers CPU. - -When finishing LAPACK build, you must recompile ?lamch.f, xerbla.f -with optimization disabled (otherwise infinite loops occur when using -these routines):: - - make lapacklib # in /path/to/src/LAPACK/ - cd SRC - ifc -FI -w90 -w95 -cm -O0 -c ?lamch.f xerbla.f - cd .. - make lapacklib - - -KNOWN INSTALLATION PROBLEMS -=========================== - -BLAS sources shipped with LAPACK are incomplete ------------------------------------------------ -Some distributions (e.g. Redhat Linux 7.1) provide BLAS libraries that -are built from such incomplete sources and therefore cause import -errors like -:: - - ImportError: .../fblas.so: undefined symbol: srotmg_ - -Fix: - Use ATLAS or the official release of BLAS libraries. - -LAPACK library provided by ATLAS is incomplete ----------------------------------------------- -You will notice it when getting import errors like -:: - - ImportError: .../flapack.so : undefined symbol: sgesdd_ - -To be sure that SciPy is built against a complete LAPACK, check the -size of the file liblapack.a -- it should be about 6MB. The location -of liblapack.a is shown by executing -:: - - python /lib/python2.4/site-packages/numpy/distutils/system_info.py - -(or the appropriate installation directory). - -To fix: follow the instructions in - - http://math-atlas.sourceforge.net/errata.html#completelp - - to create a complete liblapack.a. Then copy liblapack.a to the same - location where libatlas.a is installed and retry with scipy build. - -Using non-GNU Fortran Compiler ------------------------------- -If import scipy shows a message -:: - - ImportError: undefined symbol: s_wsfe - -and you are using non-GNU Fortran compiler, then it means that any of -the (may be system provided) Fortran libraries such as LAPACK or BLAS -were compiled with g77. See also compilers notes above. - -Recommended fix: Recompile all Fortran libraries with the same Fortran -compiler and rebuild/reinstall scipy. - -Another fix: See `Using non-GNU Fortran compiler with gcc/g77 compiled -Atlas/Lapack libraries` section above. - - -TROUBLESHOOTING -=============== - -If you experience problems when building/installing/testing SciPy, you -can ask help from scipy-user@scipy.org or scipy-dev@scipy.org mailing -lists. Please include the following information in your message: - -NOTE: You can generate some of the following information (items 1-5,7) -in one command:: - - python -c 'from numpy.f2py.diagnose import run; run()' - -1) Platform information:: - - python -c 'import os,sys;print os.name,sys.platform' - uname -a - OS, its distribution name and version information - etc. - -2) Information about C,C++,Fortran compilers/linkers as reported by - the compilers when requesting their version information, e.g., - the output of - :: - - gcc -v - g77 --version - -3) Python version:: - - python -c 'import sys;print sys.version' - -4) NumPy version:: - - python -c 'import numpy;print numpy.__version__' - -5) ATLAS version, the locations of atlas and lapack libraries, building - information if any. If you have ATLAS version 3.3.6 or newer, then - give the output of the last command in - :: - - cd scipy/Lib/linalg - python setup_atlas_version.py build_ext --inplace --force - python -c 'import atlas_version' - -7) The output of the following commands - :: - - python INSTALLDIR/numpy/distutils/system_info.py - -where INSTALLDIR is, for example, /usr/lib/python2.4/site-packages/. - -8) Feel free to add any other relevant information. - For example, the full output (both stdout and stderr) of the SciPy - installation command can be very helpful. Since this output can be - rather large, ask before sending it into the mailing list (or - better yet, to one of the developers, if asked). - -9) In case of failing to import extension modules, the output of - :: - - ldd /path/to/ext_module.so - - can be useful. - -You may find the following notes useful: - - http://www.tuxedo.org/~esr/faqs/smart-questions.html - - http://www.chiark.greenend.org.uk/~sgtatham/bugs.html diff --git a/scipy-0.10.1/LATEST.txt b/scipy-0.10.1/LATEST.txt deleted file mode 100644 index 423556ba57..0000000000 --- a/scipy-0.10.1/LATEST.txt +++ /dev/null @@ -1,5 +0,0 @@ -The latest code for this project can be obtained from: - - http://github.com/scipy/scipy - -either downloaded as a package or using the Git client. diff --git a/scipy-0.10.1/LICENSE.txt b/scipy-0.10.1/LICENSE.txt deleted file mode 100644 index 4f5dd7bc8c..0000000000 --- a/scipy-0.10.1/LICENSE.txt +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2001, 2002 Enthought, Inc. -All rights reserved. - -Copyright (c) 2003-2009 SciPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - a. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - b. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - c. Neither the name of the Enthought nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. - diff --git a/scipy-0.10.1/MANIFEST.in b/scipy-0.10.1/MANIFEST.in deleted file mode 100644 index 0bca900d59..0000000000 --- a/scipy-0.10.1/MANIFEST.in +++ /dev/null @@ -1,26 +0,0 @@ -# Use .add_data_files and .add_data_dir methods in a appropriate -# setup.py files to include non-python files such as documentation, -# data, etc files to distribution. Avoid using MANIFEST.in for that. -# -include MANIFEST.in -include *.txt -include setupscons.py -include setupegg.py -include setup.py -include scipy/*.py -# Adding scons build related files not found by distutils -recursive-include scipy SConstruct SConscript -# Add files to allow Bento build -include f2py.py -include interface_gen.py -include bscript bento.info -recursive-include scipy bscript bento.info -# Add py3tool -include tools/py3tool.py -# Add documentation: we don't use add_data_dir since we do not want to include -# this at installation, only for sdist-generated tarballs -include doc/Makefile doc/postprocess.py -recursive-include doc/release * -recursive-include doc/source * -recursive-include doc/sphinxext * -prune scipy/special/tests/data/boost diff --git a/scipy-0.10.1/PKG-INFO b/scipy-0.10.1/PKG-INFO deleted file mode 100644 index 669cb961d4..0000000000 --- a/scipy-0.10.1/PKG-INFO +++ /dev/null @@ -1,41 +0,0 @@ -Metadata-Version: 1.0 -Name: scipy -Version: 0.10.1 -Summary: SciPy: Scientific Library for Python -Home-page: http://www.scipy.org -Author: SciPy Developers -Author-email: scipy-dev@scipy.org -License: BSD -Download-URL: http://sourceforge.net/project/showfiles.php?group_id=27747&package_id=19531 -Description: SciPy (pronounced "Sigh Pie") is open-source software for mathematics, - science, and engineering. The SciPy library - depends on NumPy, which provides convenient and fast N-dimensional - array manipulation. The SciPy library is built to work with NumPy - arrays, and provides many user-friendly and efficient numerical - routines such as routines for numerical integration and optimization. - Together, they run on all popular operating systems, are quick to - install, and are free of charge. NumPy and SciPy are easy to use, - but powerful enough to be depended upon by some of the world's - leading scientists and engineers. If you need to manipulate - numbers on a computer and display or publish the results, - give SciPy a try! - - -Platform: Windows -Platform: Linux -Platform: Solaris -Platform: Mac OS-X -Platform: Unix -Classifier: Development Status :: 4 - Beta -Classifier: Intended Audience :: Science/Research -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved -Classifier: Programming Language :: C -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Topic :: Software Development -Classifier: Topic :: Scientific/Engineering -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: POSIX -Classifier: Operating System :: Unix -Classifier: Operating System :: MacOS diff --git a/scipy-0.10.1/README.txt b/scipy-0.10.1/README.txt deleted file mode 100644 index 2e76bd4feb..0000000000 --- a/scipy-0.10.1/README.txt +++ /dev/null @@ -1,135 +0,0 @@ -================================================= -Developing SciPy -================================================= - -.. Contents:: - - -What is SciPy? --------------- - -SciPy (pronounced "Sigh Pie") is open-source software for mathematics, -science, and engineering. It includes modules for statistics, optimization, -integration, linear algebra, Fourier transforms, signal and image processing, -ODE solvers, and more. It is also the name of a very popular conference on -scientific programming with Python. - -The SciPy library depends on NumPy, which provides convenient and fast -N-dimensional array manipulation. The SciPy library is built to work with -NumPy arrays, and provides many user-friendly and efficient numerical routines -such as routines for numerical integration and optimization. Together, they -run on all popular operating systems, are quick to install, and are free of -charge. NumPy and SciPy are easy to use, but powerful enough to be depended -upon by some of the world's leading scientists and engineers. If you need to -manipulate numbers on a computer and display or publish the results, give -SciPy a try! - - -SciPy structure ---------------- - -SciPy aims at being a robust and efficient "super-package" of a number -of modules, each of a non-trivial size and complexity. In order for -"SciPy integration" to work flawlessly, all SciPy modules must follow -certain rules that are described in this document. Hopefully this -document will be helpful for SciPy contributors and developers as a -basic reference about the structure of the SciPy package. - -Currently SciPy consists of the following files and directories: - - INSTALL.txt - SciPy prerequisites, installation, testing, and troubleshooting. - - THANKS.txt - SciPy developers and contributors. Please keep it up to date!! - - README.txt - SciPy structure (this document). - - setup.py - Script for building and installing SciPy. - - MANIFEST.in - Additions to distutils-generated SciPy tar-balls. Its usage is - deprecated. - - scipy/ - Contains SciPy __init__.py and the directories of SciPy modules. - -SciPy modules -+++++++++++++ - -In the following, a *SciPy module* is defined as a Python package, say -xxx, that is located in the scipy/ directory. All SciPy modules should -follow the following conventions: - -* Ideally, each SciPy module should be as self-contained as possible. - That is, it should have minimal dependencies on other packages or - modules. Even dependencies on other SciPy modules should be kept to a - minimum. A dependency on NumPy is of course assumed. - -* Directory ``xxx/`` must contain - - + a file ``setup.py`` that defines - ``configuration(parent_package='',top_path=None)`` function. - See below for more details. - - + a file ``info.py``. See below more details. - -* Directory ``xxx/`` may contain - - + a directory ``tests/`` that contains files ``test_.py`` - corresponding to modules ``xxx/{.py,.so,/}``. See below for - more details. - - + a file ``MANIFEST.in`` that may contain only ``include setup.py`` line. - DO NOT specify sources in MANIFEST.in, you must specify all sources - in setup.py file. Otherwise released SciPy tarballs will miss these sources. - - + a directory ``docs/`` for documentation. - -For details, read: - - http://projects.scipy.org/numpy/wiki/DistutilsDoc - - -Documentation -------------- - -The documentation site is here - http://docs.scipy.org - -Web sites ---------- - -The user's site is here - http://www.scipy.org/ - -The developer's site is here - http://projects.scipy.org/scipy/wiki - - -Mailing Lists -------------- - -Please see the developer's list here - http://projects.scipy.org/mailman/listinfo/scipy-dev - - -Bug reports ------------ - -To search for bugs, please use the NIPY Bug Tracker at - http://projects.scipy.org/scipy/query - -To report a bug, please use the NIPY Bug Tracker at - http://projects.scipy.org/scipy/newticket - - -License information -------------------- - -See the file "LICENSE" for information on the history of this -software, terms & conditions for usage, and a DISCLAIMER OF ALL -WARRANTIES. - diff --git a/scipy-0.10.1/THANKS.txt b/scipy-0.10.1/THANKS.txt deleted file mode 100644 index 2d6c42a55a..0000000000 --- a/scipy-0.10.1/THANKS.txt +++ /dev/null @@ -1,106 +0,0 @@ -SciPy is an open source library of routines for science and engineering -using Python. It is a community project sponsored by Enthought, Inc. -SciPy originated with code contributions by Travis Oliphant, Pearu -Peterson, and Eric Jones. Travis Oliphant and Eric Jones each contributed -about half the initial code. Pearu Peterson developed f2py, which is the -integral to wrapping the many Fortran libraries used in SciPy. - -Since then many people have contributed to SciPy, both in code development, -suggestions, and financial support. Below is a partial list. If you've -been left off, please email the "SciPy Developers List" . - -Please add names as needed so that we can keep up with all the contributors. - -Kumar Appaiah for Dolph Chebyshev window. -Nathan Bell for sparsetools, help with scipy.sparse and scipy.splinalg. -Robert Cimrman for UMFpack wrapper for sparse matrix module. -David M. Cooke for improvements to system_info, and LBFGSB wrapper. -Aric Hagberg for ARPACK wrappers, help with splinalg.eigen. -Chuck Harris for Zeros package in optimize (1d root-finding algorithms). -Prabhu Ramachandran for improvements to gui_thread. -Robert Kern for improvements to stats and bug-fixes. -Jean-Sebastien Roy for fmin_tnc code which he adapted from Stephen Nash's - original Fortran. -Ed Schofield for Maximum entropy and Monte Carlo modules, help with - sparse matrix module. -Travis Vaught for numerous contributions to annual conference and community - web-site and the initial work on stats module clean up. -Jeff Whitaker for Mac OS X support. -David Cournapeau for bug-fixes, refactoring of fftpack and cluster, - implementing the numscons build, building Windows binaries and - adding single precision FFT. -Damian Eads for hierarchical clustering, dendrogram plotting, - distance functions in spatial package, vq documentation. -Anne Archibald for kd-trees and nearest neighbor in scipy.spatial. -Pauli Virtanen for Sphinx documentation generation, online documentation - framework and interpolation bugfixes. -Josef Perktold for major improvements to scipy.stats and its test suite and - fixes and tests to optimize.curve_fit and leastsq. -David Morrill for getting the scoreboard test system up and running. -Louis Luangkesorn for providing multiple tests for the stats module. -Jochen Kupper for the zoom feature in the now-deprecated plt plotting module. -Tiffany Kamm for working on the community web-site. -Mark Koudritsky for maintaining the web-site. -Andrew Straw for help with the web-page, documentation, packaging, - testing and work on the linalg module. -Stefan van der Walt for numerous bug-fixes, testing and documentation. -Jarrod Millman for release management, community coordination, and code - clean up. -Pierre Gerard-Marchant for statistical masked array functionality. -Alan McIntyre for updating SciPy tests to use the new NumPy test framework. -Matthew Brett for work on the Matlab file IO, bug-fixes, and improvements - to the testing framework. -Gary Strangman for the scipy.stats package. -Tiziano Zito for generalized symmetric and hermitian eigenvalue problem - solver. -Chris Burns for bug-fixes. -Per Brodtkorb for improvements to stats distributions. -Neilen Marais for testing and bug-fixing in the ARPACK wrappers. -Johannes Loehnert and Bart Vandereycken for fixes in the linalg - module. -David Huard for improvements to the interpolation interface. -David Warde-Farley for converting the ndimage docs to ReST. -Uwe Schmitt for wrapping non-negative least-squares. -Ondrej Certik for Debian packaging. -Paul Ivanov for porting Numeric-style C code to the new NumPy API. -Ariel Rokem for contributions on percentileofscore fixes and tests. -Yosef Meller for tests in the optimization module. -Ralf Gommers for release management, code clean up and improvements - to doc-string generation. -Bruce Southey for bug-fixes and improvements to scipy.stats. -Ernest Adrogué for the Skellam distribution. -Enzo Michelangeli for a fast kendall tau test. -David Simcha for a fisher exact test. -Warren Weckesser for bug-fixes, cleanups, and several new features. -Fabian Pedregosa for linear algebra bug-fixes, new features and refactoring. -Jake Vanderplas for wrapping ARPACK's generalized and shift-invert modes - and improving its tests. -Collin RM Stocks for wrapping pivoted QR decomposition. -Martin Teichmann for improving scipy.special.ellipk & agm accuracy. -Jeff Armstrong for discrete state-space and linear time-invariant functionality - in scipy.signal. -Mark Wiebe for fixing type casting after changes in Numpy. -Andrey Smirnov for improvements to FIR filter design. -Anthony Scopatz for help with code review and merging. -Lars Buitinck for improvements to io.arff. -Scott Sinclair for documentation improvements and some bug fixes. -Gael Varoquaux for cleanups in scipy.sparse. -Skipper Seabold for a fix to special.gammainc. -Wes McKinney for a fix to special.gamma. -Thouis (Ray) Jones for bug fixes in ndimage. -Yaroslav Halchenko for a bug fix in ndimage. -Thomas Robitaille for the IDL 'save' reader. -Fazlul Shahriar for fixes to the NetCDF3 I/O. -Chris Jordan-Squire for bug fixes, documentation improvements and - scipy.special.logit & expit. - - -Institutions ------------- - -Enthought for providing resources and finances for development of SciPy. -Brigham Young University for providing resources for students to work on SciPy. -Agilent which gave a genereous donation for support of SciPy. -UC Berkeley for providing travel money and hosting numerous sprints. -The University of Stellenbosch for funding the development of - the SciKits portal. diff --git a/scipy-0.10.1/TOCHANGE.txt b/scipy-0.10.1/TOCHANGE.txt deleted file mode 100644 index 75bee90cf1..0000000000 --- a/scipy-0.10.1/TOCHANGE.txt +++ /dev/null @@ -1,57 +0,0 @@ -================================================= -Development Plans for SciPy 1.0 -================================================= - -See http://www.scipy.org/scipy/scipy/wiki/DevelopmentPlan -for updates of this document. - -.. Contents:: - - -General --------- - -* distributions heavy use of extract and insert (could use fancy indexing?) -- but we should wait until we learn how slow fancy indexing is....) - -* Use of old Numeric C-API. Using it means an extra C-level function call, but ... - -* Make use of type addition to extend certain ufuncs with cephes quad types - -* Use finfo(foo).bar instead of limits.foo_bar (see r3358 and r3362) - -* Comply with Python Style Guide - - * use CamelCase for class names - -* Improve testing (e.g., increased coverage) - - - -Documentation -------------- - -See http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines - -* use new docstring format - - -Packages --------- - -* consider reorganizing the namespace - - * scipy.tests, scipy.misc, scipy.stsci - -IO (scipy.io) -+++++++++++++ - -* io rewritten to use internal writing capabilities of arrays - -Image Processing (scipy.ndimage) -++++++++++++++++++++++++++++++++ - - -Statistical Analysis (scipy.stats) -++++++++++++++++++++++++++++++++++ - -* add statistical models diff --git a/scipy-0.10.1/bento.info b/scipy-0.10.1/bento.info deleted file mode 100644 index 89df4db2c5..0000000000 --- a/scipy-0.10.1/bento.info +++ /dev/null @@ -1,82 +0,0 @@ -Name: scipy -Version: 0.10.0 -Summary: SciPy: Scientific Library for Python -Url: http://www.scipy.org -DownloadUrl: http://sourceforge.net/project/showfiles.php?group_id=27747&package_id=19531 -Description: - SciPy (pronounced "Sigh Pie") is open-source software for mathematics, - science, and engineering. The SciPy library depends on NumPy, which - provides convenient and fast N-dimensional array manipulation. The SciPy - library is built to work with NumPy arrays, and provides many user-friendly - and efficient numerical routines such as routines for numerical integration - and optimization. Together, they run on all popular operating systems, are - quick to install, and are free of charge. NumPy and SciPy are easy to use, - but powerful enough to be depended upon by some of the world's leading - scientists and engineers. If you need to manipulate numbers on a computer - and display or publish the results, give SciPy a try! -Maintainer: SciPy Developers -MaintainerEmail: scipy-dev@scipy.org -License: BSD -Platforms: Windows,Linux,Solaris,Mac OS-X,Unix -Classifiers: - Development Status :: 4 - Beta, - Intended Audience :: Science/Research, - Intended Audience :: Developers, - License :: OSI Approved, - Programming Language :: C, - Programming Language :: Python, - Programming Language :: Python :: 3, - Topic :: Software Development, - Topic :: Scientific/Engineering, - Operating System :: Microsoft :: Windows, - Operating System :: POSIX, - Operating System :: Unix, - Operating System :: MacOS - -DataFiles: tests - TargetDir: $sitedir/scipy - SourceDir: scipy - Files: - constants/tests/*.py, - cluster/tests/*.py, - cluster/tests/*.txt, - fftpack/tests/*.py, - fftpack/tests/*.npz, - integrate/tests/*.py, - interpolate/tests/*.py, - io/arff/tests/*.py, - io/matlab/tests/*.py, - io/tests/*.py, - io/tests/data/*.nc, - io/tests/data/*.wav, - io/tests/data/*.sav, - io/matlab/tests/data/*.txt, - io/matlab/tests/data/*.mat, - io/arff/tests/data/*.arff, - lib/blas/tests/*.py, - lib/lapack/tests/*.py, - linalg/tests/*.py, - misc/tests/*.py, - misc/tests/data/*.png, - optimize/tests/*.py, - signal/tests/*.py, - sparse/linalg/dsolve/tests/*.py, - sparse/linalg/dsolve/umfpack/tests/*.py, - sparse/linalg/eigen/arpack/tests/*.py, - sparse/linalg/eigen/lobpcg/tests/*.py, - sparse/linalg/isolve/tests/*.py, - sparse/linalg/tests/*.py, - sparse/tests/*.py, - spatial/tests/*.py, - spatial/tests/*.txt, - special/tests/*.py, - special/tests/data/*.npz, - stats/tests/*.py - -Recurse: scipy - -HookFile: bscript - -Library: - Packages: - scipy diff --git a/scipy-0.10.1/bscript b/scipy-0.10.1/bscript deleted file mode 100644 index 01e80d110a..0000000000 --- a/scipy-0.10.1/bscript +++ /dev/null @@ -1,305 +0,0 @@ -import os -import sys -import os.path as op - -from numpy.distutils.misc_util \ - import \ - get_numpy_include_dirs, get_pkg_info -from numpy.distutils.conv_template \ - import \ - process_str as process_c_str -from numpy.distutils.from_template \ - import \ - process_str as process_f_str, resolve_includes - -from bento.commands import hooks -from bento.commands.extras.waf \ - import \ - ConfigureWafContext, BuildWafContext, register_options -from bento.installed_package_description \ - import \ - InstalledSection - -import waflib -from waflib import Options - -# FIXME: add this to numpy so that we can reuse it -class CTemplateTask(waflib.Task.Task): - color = 'BLUE' - #ext_out = ['.c', '.pyf'] - def run(self): - s = self.inputs[0] - cnt = s.read() - writestr = process_c_str(cnt) - o = self.outputs[0] - o.write(writestr) - -class FTemplateTask(waflib.Task.Task): - color = 'BLUE' - ext_out = ['.pyf'] - def run(self): - s = self.inputs[0] - lines = resolve_includes(s.abspath()) - writestr = process_f_str("".join(lines)) - o = self.outputs[0] - o.write(writestr) - return 0 - -@waflib.TaskGen.extension(".src") -def c_template(self, node): - output_name = node.name.rsplit(".", 1)[0] - output = node.parent.find_or_declare(output_name) - assert output.is_bld() - - ext = output.name.rsplit(".")[1] - if ext in ["f", "pyf", "ipyf"]: - tsk = self.create_task('FTemplateTask', node, output) - if "fc" in self.features: - self.source.append(output) - else: - raise ValueError("FTemplateTask without feature 'fc' ?") - elif ext in ["c"]: - tsk = self.create_task('CTemplateTask', node, output) - if "c" in self.features: - self.source.append(output) - else: - raise ValueError("FTemplateTask without feature 'c' ?") - else: - raise ValueError("Unknown extension in templating: %r" % ext) - -# FIXME: abstract those module gen tasks... -class write_module(waflib.Task.Task): - color = "CYAN" - vars = ["CONTENT"] - def run(self): - # FIXME: put actual data here - self.outputs[0].write(self.env.CONTENT) - -@waflib.TaskGen.feature("gen_pymodule") -def process_write_config(self): - if not hasattr(self, "content"): - raise ValueError("task gen %r expects a 'content' argument" % self.name) - else: - self.env.CONTENT = self.content - output = self.path.find_or_declare(self.target) - name = getattr(self, "name", None) or self.target - - bento_context = self.bld.bento_context - b_output = bento_context.build_node.make_node(output.bldpath()) - bento_context.outputs_registry.register_outputs( - "gen_pymodule", name, [b_output], bento_context.build_node, "$sitedir") - - tsk = self.create_task("write_module") - tsk.set_outputs(output) - return tsk - -@hooks.startup -def startup(context): - context.register_context("configure", ConfigureWafContext) - context.register_context("build", BuildWafContext) - -@hooks.options -def options(context): - register_options(context) - -@hooks.pre_configure -def pre_configure(context): - opts = context.waf_options_context - conf = context.waf_context - - opts.load("compiler_cxx") - opts.load("compiler_fc") - opts.load("f2py") - Options.options.check_fc = "gfortran" - Options.options.check_cxx_compiler = "g++" - if sys.platform == "win32" and conf.env.CC_NAME == "msvc": - Options.options.check_fc = "ifort" - Options.options.check_cxx_compiler = "msvc" - - conf.load("compiler_cxx") - conf.load("compiler_fc") - conf.load("f2py") - - if conf.env.CC_NAME == 'gcc': - conf.env.append_value('CFLAGS_PYEXT', "-Wfatal-errors") - conf.env.append_value('CXXFLAGS_PYEXT', "-Wfatal-errors") - - if sys.platform == "darwin": - # FIXME: fix upstream waf tool to work on mac os X - for flag in [conf.env.CFLAGS_PYEXT, conf.env.LINKFLAGS_PYEXT, - conf.env.CXXFLAGS_PYEXT, conf.env.FCFLAGS]: - remove_flag_prevalue("ppc", flag) - conf.env.append_value('FCFLAGS', ['-arch', 'i386']) - conf.env.append_value('CFLAGS', ['-arch', 'i386']) - conf.env["MACOSX_DEPLOYMENT_TARGET"] = "10.4" - - conf.check_fortran_verbose_flag() - conf.check_fortran_clib() - conf.check_fortran_dummy_main() - u, du, c = conf.check_fortran_mangling() - _set_mangling_var(conf, u, du, c) - -def _set_mangling_var(conf, u, du, case, f2pycompat=True): - env = conf.env - macros = [] - - if du == '_': - env['F77_UNDERSCORE_G77'] = 1 - macros.append('F77_UNDERSCORE_G77') - if f2pycompat: - macros.append('UNDERSCORE_G77') - else: - env['F77_UNDERSCORE_G77'] = 0 - - if u == '_': - env['F77_NO_APPEND_FORTRAN'] = 0 - else: - env['F77_NO_APPEND_FORTRAN'] = 1 - macros.append('F77_NO_APPEND_FORTRAN') - if f2pycompat: - macros.append('NO_APPEND_FORTRAN') - - if case == 'upper': - env['F77_UPPERCASE_FORTRAN'] = 1 - macros.append('F77_UPPERCASE_FORTRAN') - if f2pycompat: - macros.append('UPPERCASE_FORTRAN') - else: - env['F77_UPPERCASE_FORTRAN'] = 0 - - env.DEFINES.extend(macros) - -def remove_flag(name, flag): - while True: - if name in flag: - i = flag.index(name) - flag.pop(i) - else: - break - -def remove_flag_postvalue(name, flag): - while True: - if name in flag: - i = flag.index(name) - flag.pop(i) - flag.pop(i) - else: - break - -def remove_flag_prevalue(name, flag): - while True: - if name in flag: - i = flag.index(name) - flag.pop(i-1) - flag.pop(i-1) - else: - break - -@hooks.post_configure -def post_configure(context): - conf = context.waf_context - - conf.env.INCLUDES = get_numpy_include_dirs() - - conf.env.HAS_FBLAS = False - conf.env.HAS_CBLAS = False - conf.env.HAS_FLAPACK = False - conf.env.HAS_CLAPACK = False - conf.env.HAS_ATLAS = False - - if sys.platform == "win32": - mkl_libs = "mkl_lapack95,mkl_blas95,mkl_intel_c,mkl_intel_thread,mkl_core,libiomp5md".split(",") - mkl_base = r"C:\Program Files\Intel\Compiler\11.1\051" - conf.env.INCLUDES.append("%s\mkl\include" % mkl_base) - conf.env.LIBPATH.extend(["%s\mkl\ia32\lib" % mkl_base, - "%s\lib\ia32" % mkl_base]) - - try: - conf.check_cc(lib=mkl_libs, msg="Checking for MKL (CBLAS)", - uselib_store="CBLAS") - conf.env.HAS_CBLAS = True - - conf.check_cc(lib=mkl_libs, msg="Checking for MKL (FLAPACK)", - uselib_store="FLAPACK") - conf.env.HAS_FLAPACK = True - - conf.check_cc(lib=mkl_libs, msg="Checking for MKL (FBLAS)", - uselib_store="FBLAS") - conf.env.HAS_FBLAS = True - - except waflib.Errors.ConfigurationError: - pass - conf.env['FCFLAGS'] = [] - conf.env['FCFLAGS'].extend(['/MD']) - elif sys.platform == "darwin": - try: - conf.check(framework="Accelerate", msg="Checking for framework Accelerate", uselib_store="CBLAS") - conf.env.HAS_CBLAS = True - - conf.check(framework="Accelerate", msg="Checking for framework Accelerate", uselib_store="FLAPACK") - conf.env.HAS_FLAPACK = True - - conf.check(framework="Accelerate", msg="Checking for framework Accelerate", uselib_store="FBLAS") - conf.env.HAS_FBLAS = True - - except waflib.Errors.ConfigurationError: - pass - # FIXME: bug in waf ? - conf.env.FRAMEWORK_ST = ["-framework"] - else: - try: - conf.check_cc(lib=["f77blas", "cblas", "atlas"], uselib_store="FBLAS") - conf.env.HAS_FBLAS = True - - conf.check_cc(lib=["cblas", "atlas"], uselib_store="CBLAS") - conf.env.HAS_CBLAS = True - - conf.check_cc(lib=["lapack", "f77blas", "cblas", "atlas"], uselib_store="FLAPACK") - conf.env.HAS_FLAPACK = True - - conf.check_cc(lib=["lapack", "f77blas", "cblas", "atlas"], uselib_store="CLAPACK") - - conf.env.HAS_CLAPACK = True - except waflib.Errors.ConfigurationError: - pass - - # FIXME: waf bug ? Did not find a way to compile both fortran and c - # files with -fPIC - fcflags = conf.env.FCFLAGS - fcflags.extend(["-fPIC"]) - conf.env.FCFLAGS = fcflags - - conf.env.CFLAGS_cstlib = ["-fPIC"] - conf.env.FCFLAGS_cstlib = ["-fPIC"] - - if not (conf.env.HAS_FBLAS and conf.env.HAS_FLAPACK): - raise waflib.Errors.ConfigurationError("You need blas and lapack") - - npymath_info = get_pkg_info("npymath") - conf.parse_flags(npymath_info.cflags() + " " + npymath_info.libs(), "NPYMATH") - -@hooks.pre_build -def pre_build(context): - bld = context.waf_context - - def iregistrer(category, name, nodes, from_node, target_dir): - source_dir = op.join("$_srcrootdir", from_node.bldpath()) - files = [n.path_from(from_node) for n in nodes] - return InstalledSection.from_source_target_directories(category, name, source_dir, target_dir, files) - context.isection_registry.register_category("gen_pymodule", iregistrer) - context.outputs_registry.register_category("gen_pymodule", "pythonfiles") - bld(features="gen_pymodule", - target="scipy/__config__.py", - content="""\ -def show(): - pass -""", - always=True) - bld(features="gen_pymodule", - target="scipy/version.py", - content="""\ -version = '0.9.0' -short_version = version -is_released = True -""", - always=True) diff --git a/scipy-0.10.1/doc/Makefile b/scipy-0.10.1/doc/Makefile deleted file mode 100644 index ae1e7b1d29..0000000000 --- a/scipy-0.10.1/doc/Makefile +++ /dev/null @@ -1,165 +0,0 @@ -# Makefile for Sphinx documentation -# - -PYVER = -PYTHON = python$(PYVER) - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = LANG=C sphinx-build -PAPER = - -FILES= - -NEED_AUTOSUMMARY = $(shell $(PYTHON) -c 'import sphinx; print sphinx.__version__ < "0.7" and "1" or ""') - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html web pickle htmlhelp latex changes linkcheck \ - dist dist-build - -#------------------------------------------------------------------------------ - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " pickle to make pickle files (usable by e.g. sphinx-web)" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview over all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " dist PYVER=... to make a distribution-ready tree" - @echo " upload USER=... to upload results to docs.scipy.org" - -clean: - -rm -rf build/* source/generated - - -#------------------------------------------------------------------------------ -# Automated generation of all documents -#------------------------------------------------------------------------------ - -# Build the current scipy version, and extract docs from it. -# We have to be careful of some issues: -# -# - Everything must be done using the same Python version -# - We must use eggs (otherwise they might override PYTHONPATH on import). -# - Different versions of easy_install install to different directories (!) -# - -INSTALL_DIR = $(CURDIR)/build/inst-dist/ -INSTALL_PPH = $(INSTALL_DIR)/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/lib/python$(PYVER)/dist-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/dist-packages - -DIST_VARS=PYTHON="PYTHONPATH=$(INSTALL_PPH):$$PYTHONPATH python$(PYVER)" SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH):$$PYTHONPATH python$(PYVER) `which sphinx-build`" - -UPLOAD_TARGET = $(USER)@docs.scipy.org:/home/docserver/www-root/doc/scipy/ - -upload: - @test -e build/dist || { echo "make dist is required first"; exit 1; } - @test output-is-fine -nt build/dist || { \ - echo "Review the output in build/dist, and do 'touch output-is-fine' before uploading."; exit 1; } - rsync -r -z --delete-after -p \ - $(if $(shell test -f build/dist/scipy-ref.pdf && echo "y"),, \ - --exclude '**-ref.pdf' --exclude '**-user.pdf') \ - $(if $(shell test -f build/dist/scipy-chm.zip && echo "y"),, \ - --exclude '**-chm.zip') \ - build/dist/ $(UPLOAD_TARGET) - -dist: - make $(DIST_VARS) real-dist - -real-dist: dist-build html - test -d build/latex || make latex - make -C build/latex all-pdf - -test -d build/htmlhelp || make htmlhelp-build - -rm -rf build/dist - mkdir -p build/dist - cp -r build/html build/dist/reference - touch build/dist/index.html - perl -pi -e 's#^\s*(
  • SciPy.*?Reference Guide.*?»
  • )\s*$$#
  • Numpy and Scipy Documentation »
  • $$1#;' build/dist/*.html build/dist/*/*.html build/dist/*/*/*.html - (cd build/html && zip -9qr ../dist/scipy-html.zip .) - cp build/latex/scipy*.pdf build/dist - -zip build/dist/scipy-chm.zip build/htmlhelp/scipy.chm - cd build/dist && tar czf ../dist.tar.gz * - chmod ug=rwX,o=rX -R build/dist - find build/dist -type d -print0 | xargs -0r chmod g+s - -dist-build: - rm -f ../dist/*.egg - cd .. && $(PYTHON) setupegg.py bdist_egg - install -d $(subst :, ,$(INSTALL_PPH)) - $(PYTHON) `which easy_install` --prefix=$(INSTALL_DIR) ../dist/*.egg - - -#------------------------------------------------------------------------------ -# Basic Sphinx generation rules for different formats -#------------------------------------------------------------------------------ - -generate: build/generate-stamp -build/generate-stamp: $(wildcard source/*.rst) - mkdir -p build -ifeq ($(NEED_AUTOSUMMARY),1) - $(PYTHON) \ - ./sphinxext/autosummary_generate.py source/*.rst \ - -p dump.xml -o source/generated -endif - touch build/generate-stamp - -html: generate - mkdir -p build/html build/doctrees - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html $(FILES) - $(PYTHON) postprocess.py html build/html/*.html - @echo - @echo "Build finished. The HTML pages are in build/html." - -pickle: generate - mkdir -p build/pickle build/doctrees - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle $(FILES) - @echo - @echo "Build finished; now you can process the pickle files or run" - @echo " sphinx-web build/pickle" - @echo "to start the sphinx-web server." - -web: pickle - -htmlhelp: generate - mkdir -p build/htmlhelp build/doctrees - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp $(FILES) - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in build/htmlhelp." - -htmlhelp-build: htmlhelp build/htmlhelp/scipy.chm -%.chm: %.hhp - -hhc.exe $^ - -latex: generate - mkdir -p build/latex build/doctrees - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex $(FILES) - $(PYTHON) postprocess.py tex build/latex/*.tex - perl -pi -e 's/\t(latex.*|pdflatex) (.*)/\t-$$1 -interaction batchmode $$2/' build/latex/Makefile - @echo - @echo "Build finished; the LaTeX files are in build/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -coverage: build - mkdir -p build/coverage build/doctrees - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) build/coverage $(FILES) - @echo "Coverage finished; see c.txt and python.txt in build/coverage" - -changes: generate - mkdir -p build/changes build/doctrees - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes $(FILES) - @echo - @echo "The overview file is in build/changes." - -linkcheck: generate - mkdir -p build/linkcheck build/doctrees - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck $(FILES) - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in build/linkcheck/output.txt." diff --git a/scipy-0.10.1/doc/postprocess.py b/scipy-0.10.1/doc/postprocess.py deleted file mode 100755 index 28e1e0e4db..0000000000 --- a/scipy-0.10.1/doc/postprocess.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python -""" -%prog MODE FILES... - -Post-processes HTML and Latex files output by Sphinx. -MODE is either 'html' or 'tex'. - -""" -import re, optparse - -def main(): - p = optparse.OptionParser(__doc__) - options, args = p.parse_args() - - if len(args) < 1: - p.error('no mode given') - - mode = args.pop(0) - - if mode not in ('html', 'tex'): - p.error('unknown mode %s' % mode) - - for fn in args: - f = open(fn, 'r') - try: - if mode == 'html': - lines = process_html(fn, f.readlines()) - elif mode == 'tex': - lines = process_tex(f.readlines()) - finally: - f.close() - - f = open(fn, 'w') - f.write("".join(lines)) - f.close() - -def process_html(fn, lines): - return lines - -def process_tex(lines): - """ - Remove unnecessary section titles from the LaTeX file, - and convert UTF-8 non-breaking spaces to Latex nbsps. - - """ - new_lines = [] - for line in lines: - if re.match(r'^\\(section|subsection|subsubsection|paragraph|subparagraph){(numpy|scipy)\.', line): - pass # skip! - else: - new_lines.append(line) - return new_lines - -if __name__ == "__main__": - main() diff --git a/scipy-0.10.1/doc/release/0.10.0-notes.rst b/scipy-0.10.1/doc/release/0.10.0-notes.rst deleted file mode 100644 index 5c4be62560..0000000000 --- a/scipy-0.10.1/doc/release/0.10.0-notes.rst +++ /dev/null @@ -1,213 +0,0 @@ -========================== -SciPy 0.10.0 Release Notes -========================== - -.. contents:: - -SciPy 0.10.0 is the culmination of 8 months of hard work. It contains -many new features, numerous bug-fixes, improved test coverage and -better documentation. There have been a limited number of deprecations -and backwards-incompatible changes in this release, which are documented -below. All users are encouraged to upgrade to this release, as there -are a large number of bug-fixes and optimizations. Moreover, our -development attention will now shift to bug-fix releases on the 0.10.x -branch, and on adding new features on the development master branch. - -Release highlights: - - - Support for Bento as optional build system. - - Support for generalized eigenvalue problems, and all shift-invert modes - available in ARPACK. - -This release requires Python 2.4-2.7 or 3.1- and NumPy 1.5 or greater. - - -New features -============ - -Bento: new optional build system --------------------------------- - -Scipy can now be built with `Bento `_. -Bento has some nice features like parallel builds and partial rebuilds, that -are not possible with the default build system (distutils). For usage -instructions see BENTO_BUILD.txt in the scipy top-level directory. - -Currently Scipy has three build systems, distutils, numscons and bento. -Numscons is deprecated and is planned and will likely be removed in the next -release. - - -Generalized and shift-invert eigenvalue problems in ``scipy.sparse.linalg`` ---------------------------------------------------------------------------- - -The sparse eigenvalue problem solver functions -``scipy.sparse.eigs/eigh`` now support generalized eigenvalue -problems, and all shift-invert modes available in ARPACK. - - -Discrete-Time Linear Systems (``scipy.signal``) ------------------------------------------------ - -Support for simulating discrete-time linear systems, including -``scipy.signal.dlsim``, ``scipy.signal.dimpulse``, and ``scipy.signal.dstep``, -has been added to SciPy. Conversion of linear systems from continuous-time to -discrete-time representations is also present via the -``scipy.signal.cont2discrete`` function. - - -Enhancements to ``scipy.signal`` --------------------------------- - -A Lomb-Scargle periodogram can now be computed with the new function -``scipy.signal.lombscargle``. - -The forward-backward filter function ``scipy.signal.filtfilt`` can now -filter the data in a given axis of an n-dimensional numpy array. -(Previously it only handled a 1-dimensional array.) Options have been -added to allow more control over how the data is extended before filtering. - -FIR filter design with ``scipy.signal.firwin2`` now has options to create -filters of type III (zero at zero and Nyquist frequencies) and IV (zero at zero -frequency). - - -Additional decomposition options (``scipy.linalg``) ---------------------------------------------------- - -A sort keyword has been added to the Schur decomposition routine -(``scipy.linalg.schur``) to allow the sorting of eigenvalues in -the resultant Schur form. - -Additional special matrices (``scipy.linalg``) ----------------------------------------------- - -The functions ``hilbert`` and ``invhilbert`` were added to ``scipy.linalg``. - - -Enhancements to ``scipy.stats`` -------------------------------- - -* The *one-sided form* of Fisher's exact test is now also implemented in - ``stats.fisher_exact``. -* The function ``stats.chi2_contingency`` for computing the chi-square test of - independence of factors in a contingency table has been added, along with - the related utility functions ``stats.contingency.margins`` and - ``stats.contingency.expected_freq``. - - -Basic support for Harwell-Boeing file format for sparse matrices ----------------------------------------------------------------- - -Both read and write are support through a simple function-based API, as well as -a more complete API to control number format. The functions may be found in -scipy.sparse.io. - -The following features are supported: - - * Read and write sparse matrices in the CSC format - * Only real, symmetric, assembled matrix are supported (RUA format) - - -Deprecated features -=================== - -``scipy.maxentropy`` --------------------- - -The maxentropy module is unmaintained, rarely used and has not been functioning -well for several releases. Therefore it has been deprecated for this release, -and will be removed for scipy 0.11. Logistic regression in scikits.learn is a -good alternative for this functionality. The ``scipy.maxentropy.logsumexp`` -function has been moved to ``scipy.misc``. - - -``scipy.lib.blas`` ------------------- - -There are similar BLAS wrappers in ``scipy.linalg`` and ``scipy.lib``. These -have now been consolidated as ``scipy.linalg.blas``, and ``scipy.lib.blas`` is -deprecated. - - -Numscons build system ---------------------- - -The numscons build system is being replaced by Bento, and will be removed in -one of the next scipy releases. - - -Backwards-incompatible changes -============================== - -The deprecated name `invnorm` was removed from ``scipy.stats.distributions``, -this distribution is available as `invgauss`. - -The following deprecated nonlinear solvers from ``scipy.optimize`` have been -removed:: - - - ``broyden_modified`` (bad performance) - - ``broyden1_modified`` (bad performance) - - ``broyden_generalized`` (equivalent to ``anderson``) - - ``anderson2`` (equivalent to ``anderson``) - - ``broyden3`` (obsoleted by new limited-memory broyden methods) - - ``vackar`` (renamed to ``diagbroyden``) - - -Other changes -============= - -``scipy.constants`` has been updated with the CODATA 2010 constants. - -``__all__`` dicts have been added to all modules, which has cleaned up the -namespaces (particularly useful for interactive work). - -An API section has been added to the documentation, giving recommended import -guidelines and specifying which submodules are public and which aren't. - - -Authors -======= - -This release contains work by the following people (contributed at least -one patch to this release, names in alphabetical order): - -* Jeff Armstrong + -* Matthew Brett -* Lars Buitinck + -* David Cournapeau -* FI$H 2000 + -* Michael McNeil Forbes + -* Matty G + -* Christoph Gohlke -* Ralf Gommers -* Yaroslav Halchenko -* Charles Harris -* Thouis (Ray) Jones + -* Chris Jordan-Squire + -* Robert Kern -* Chris Lasher + -* Wes McKinney + -* Travis Oliphant -* Fabian Pedregosa -* Josef Perktold -* Thomas Robitaille + -* Pim Schellart + -* Anthony Scopatz + -* Skipper Seabold + -* Fazlul Shahriar + -* David Simcha + -* Scott Sinclair + -* Andrey Smirnov + -* Collin RM Stocks + -* Martin Teichmann + -* Jake Vanderplas + -* Gaël Varoquaux + -* Pauli Virtanen -* Stefan van der Walt -* Warren Weckesser -* Mark Wiebe + - -A total of 35 people contributed to this release. -People with a "+" by their names contributed a patch for the first time. - diff --git a/scipy-0.10.1/doc/release/0.10.1-notes.rst b/scipy-0.10.1/doc/release/0.10.1-notes.rst deleted file mode 100644 index b3128fa813..0000000000 --- a/scipy-0.10.1/doc/release/0.10.1-notes.rst +++ /dev/null @@ -1,33 +0,0 @@ -========================== -SciPy 0.10.1 Release Notes -========================== - -.. contents:: - -SciPy 0.10.1 is a bug-fix release with no new features compared to 0.10.0. - -Main changes ------------- - -The most important changes are:: - -1. The single precision routines of ``eigs`` and ``eigsh`` in - ``scipy.sparse.linalg`` have been disabled (they internally use double - precision now). -2. A compatibility issue related to changes in NumPy macros has been fixed, in - order to make scipy 0.10.1 compile with the upcoming numpy 1.7.0 release. - - -Other issues fixed ------------------- - -- #835: stats: nan propagation in stats.distributions -- #1202: io: netcdf segfault -- #1531: optimize: make curve_fit work with method as callable. -- #1560: linalg: fixed mistake in eig_banded documentation. -- #1565: ndimage: bug in ndimage.variance -- #1457: ndimage: standard_deviation does not work with sequence of indexes -- #1562: cluster: segfault in linkage function -- #1568: stats: One-sided fisher_exact() returns `p` < 1 for 0 successful attempts -- #1575: stats: zscore and zmap handle the axis keyword incorrectly - diff --git a/scipy-0.10.1/doc/release/0.7.0-notes.rst b/scipy-0.10.1/doc/release/0.7.0-notes.rst deleted file mode 100644 index 19f1cef8d4..0000000000 --- a/scipy-0.10.1/doc/release/0.7.0-notes.rst +++ /dev/null @@ -1,348 +0,0 @@ -========================= -SciPy 0.7.0 Release Notes -========================= - -.. contents:: - -SciPy 0.7.0 is the culmination of 16 months of hard work. It contains -many new features, numerous bug-fixes, improved test coverage and -better documentation. There have been a number of deprecations and -API changes in this release, which are documented below. All users -are encouraged to upgrade to this release, as there are a large number -of bug-fixes and optimizations. Moreover, our development attention -will now shift to bug-fix releases on the 0.7.x branch, and on adding -new features on the development trunk. This release requires Python -2.4 or 2.5 and NumPy 1.2 or greater. - -Please note that SciPy is still considered to have "Beta" status, as -we work toward a SciPy 1.0.0 release. The 1.0.0 release will mark a -major milestone in the development of SciPy, after which changing the -package structure or API will be much more difficult. Whilst these -pre-1.0 releases are considered to have "Beta" status, we are -committed to making them as bug-free as possible. For example, in -addition to fixing numerous bugs in this release, we have also doubled -the number of unit tests since the last release. - -However, until the 1.0 release, we are aggressively reviewing and -refining the functionality, organization, and interface. This is being -done in an effort to make the package as coherent, intuitive, and -useful as possible. To achieve this, we need help from the community -of users. Specifically, we need feedback regarding all aspects of the -project - everything - from which algorithms we implement, to details -about our function's call signatures. - -Over the last year, we have seen a rapid increase in community -involvement, and numerous infrastructure improvements to lower the -barrier to contributions (e.g., more explicit coding standards, -improved testing infrastructure, better documentation tools). Over -the next year, we hope to see this trend continue and invite everyone -to become more involved. - -Python 2.6 and 3.0 -================== - -A significant amount of work has gone into making SciPy compatible -with Python 2.6; however, there are still some issues in this regard. -The main issue with 2.6 support is NumPy. On UNIX (including Mac OS -X), NumPy 1.2.1 mostly works, with a few caveats. On Windows, there -are problems related to the compilation process. The upcoming NumPy -1.3 release will fix these problems. Any remaining issues with 2.6 -support for SciPy 0.7 will be addressed in a bug-fix release. - -Python 3.0 is not supported at all; it requires NumPy to be ported to -Python 3.0. This requires immense effort, since a lot of C code has -to be ported. The transition to 3.0 is still under consideration; -currently, we don't have any timeline or roadmap for this transition. - -Major documentation improvements -================================ - -SciPy documentation is greatly improved; you can view a HTML reference -manual `online `__ or download it as a PDF -file. The new reference guide was built using the popular `Sphinx tool -`__. - -This release also includes an updated tutorial, which hadn't been -available since SciPy was ported to NumPy in 2005. Though not -comprehensive, the tutorial shows how to use several essential parts -of Scipy. It also includes the ``ndimage`` documentation from the -``numarray`` manual. - -Nevertheless, more effort is needed on the documentation front. -Luckily, contributing to Scipy documentation is now easier than -before: if you find that a part of it requires improvements, and want -to help us out, please register a user name in our web-based -documentation editor at http://docs.scipy.org/ and correct the issues. - -Running Tests -============= - -NumPy 1.2 introduced a new testing framework based on `nose -`__. Starting with -this release, SciPy now uses the new NumPy test framework as well. -Taking advantage of the new testing framework requires ``nose`` -version 0.10, or later. One major advantage of the new framework is -that it greatly simplifies writing unit tests - which has all ready -paid off, given the rapid increase in tests. To run the full test -suite:: - - >>> import scipy - >>> scipy.test('full') - -For more information, please see `The NumPy/SciPy Testing Guide -`__. - -We have also greatly improved our test coverage. There were just over -2,000 unit tests in the 0.6.0 release; this release nearly doubles -that number, with just over 4,000 unit tests. - -Building SciPy -============== - -Support for NumScons has been added. NumScons is a tentative new build -system for NumPy/SciPy, using `SCons `__ at its -core. - -SCons is a next-generation build system, intended to replace the -venerable ``Make`` with the integrated functionality of -``autoconf``/``automake`` and ``ccache``. Scons is written in Python -and its configuration files are Python scripts. NumScons is meant to -replace NumPy's custom version of ``distutils`` providing more -advanced functionality, such as ``autoconf``, improved fortran -support, more tools, and support for ``numpy.distutils``/``scons`` -cooperation. - -Sandbox Removed -=============== - -While porting SciPy to NumPy in 2005, several packages and modules -were moved into ``scipy.sandbox``. The sandbox was a staging ground -for packages that were undergoing rapid development and whose APIs -were in flux. It was also a place where broken code could live. The -sandbox has served its purpose well, but was starting to create -confusion. Thus ``scipy.sandbox`` was removed. Most of the code was -moved into ``scipy``, some code was made into a ``scikit``, and the -remaining code was just deleted, as the functionality had been -replaced by other code. - -Sparse Matrices -=============== - -Sparse matrices have seen extensive improvements. There is now -support for integer dtypes such ``int8``, ``uint32``, etc. Two new -sparse formats were added: - -* new class ``dia_matrix`` : the sparse DIAgonal format -* new class ``bsr_matrix`` : the Block CSR format - -Several new sparse matrix construction functions were added: - -* ``sparse.kron`` : sparse Kronecker product -* ``sparse.bmat`` : sparse version of ``numpy.bmat`` -* ``sparse.vstack`` : sparse version of ``numpy.vstack`` -* ``sparse.hstack`` : sparse version of ``numpy.hstack`` - -Extraction of submatrices and nonzero values have been added: - -* ``sparse.tril`` : extract lower triangle -* ``sparse.triu`` : extract upper triangle -* ``sparse.find`` : nonzero values and their indices - -``csr_matrix`` and ``csc_matrix`` now support slicing and fancy -indexing (e.g., ``A[1:3, 4:7]`` and ``A[[3,2,6,8],:]``). Conversions -among all sparse formats are now possible: - -* using member functions such as ``.tocsr()`` and ``.tolil()`` -* using the ``.asformat()`` member function, e.g. ``A.asformat('csr')`` -* using constructors ``A = lil_matrix([[1,2]]); B = csr_matrix(A)`` - -All sparse constructors now accept dense matrices and lists of lists. -For example: - -* ``A = csr_matrix( rand(3,3) )`` and ``B = lil_matrix( [[1,2],[3,4]] )`` - -The handling of diagonals in the ``spdiags`` function has been changed. -It now agrees with the MATLAB(TM) function of the same name. - -Numerous efficiency improvements to format conversions and sparse -matrix arithmetic have been made. Finally, this release contains -numerous bugfixes. - -Statistics package -================== - -Statistical functions for masked arrays have been added, and are -accessible through ``scipy.stats.mstats``. The functions are similar -to their counterparts in ``scipy.stats`` but they have not yet been -verified for identical interfaces and algorithms. - -Several bugs were fixed for statistical functions, of those, -``kstest`` and ``percentileofscore`` gained new keyword arguments. - -Added deprecation warning for ``mean``, ``median``, ``var``, ``std``, -``cov``, and ``corrcoef``. These functions should be replaced by their -numpy counterparts. Note, however, that some of the default options -differ between the ``scipy.stats`` and numpy versions of these -functions. - -Numerous bug fixes to ``stats.distributions``: all generic methods now -work correctly, several methods in individual distributions were -corrected. However, a few issues remain with higher moments (``skew``, -``kurtosis``) and entropy. The maximum likelihood estimator, ``fit``, -does not work out-of-the-box for some distributions - in some cases, -starting values have to be carefully chosen, in other cases, the -generic implementation of the maximum likelihood method might not be -the numerically appropriate estimation method. - -We expect more bugfixes, increases in numerical precision and -enhancements in the next release of scipy. - -Reworking of IO package -======================= - -The IO code in both NumPy and SciPy is being extensively -reworked. NumPy will be where basic code for reading and writing NumPy -arrays is located, while SciPy will house file readers and writers for -various data formats (data, audio, video, images, matlab, etc.). - -Several functions in ``scipy.io`` have been deprecated and will be -removed in the 0.8.0 release including ``npfile``, ``save``, ``load``, -``create_module``, ``create_shelf``, ``objload``, ``objsave``, -``fopen``, ``read_array``, ``write_array``, ``fread``, ``fwrite``, -``bswap``, ``packbits``, ``unpackbits``, and ``convert_objectarray``. -Some of these functions have been replaced by NumPy's raw reading and -writing capabilities, memory-mapping capabilities, or array methods. -Others have been moved from SciPy to NumPy, since basic array reading -and writing capability is now handled by NumPy. - -The Matlab (TM) file readers/writers have a number of improvements: - -* default version 5 -* v5 writers for structures, cell arrays, and objects -* v5 readers/writers for function handles and 64-bit integers -* new struct_as_record keyword argument to ``loadmat``, which loads - struct arrays in matlab as record arrays in numpy -* string arrays have ``dtype='U...'`` instead of ``dtype=object`` -* ``loadmat`` no longer squeezes singleton dimensions, i.e. - ``squeeze_me=False`` by default - -New Hierarchical Clustering module -================================== - -This module adds new hierarchical clustering functionality to the -``scipy.cluster`` package. The function interfaces are similar to the -functions provided MATLAB(TM)'s Statistics Toolbox to help facilitate -easier migration to the NumPy/SciPy framework. Linkage methods -implemented include single, complete, average, weighted, centroid, -median, and ward. - -In addition, several functions are provided for computing -inconsistency statistics, cophenetic distance, and maximum distance -between descendants. The ``fcluster`` and ``fclusterdata`` functions -transform a hierarchical clustering into a set of flat clusters. Since -these flat clusters are generated by cutting the tree into a forest of -trees, the ``leaders`` function takes a linkage and a flat clustering, -and finds the root of each tree in the forest. The ``ClusterNode`` -class represents a hierarchical clusterings as a field-navigable tree -object. ``to_tree`` converts a matrix-encoded hierarchical clustering -to a ``ClusterNode`` object. Routines for converting between MATLAB -and SciPy linkage encodings are provided. Finally, a ``dendrogram`` -function plots hierarchical clusterings as a dendrogram, using -matplotlib. - -New Spatial package -=================== - -The new spatial package contains a collection of spatial algorithms -and data structures, useful for spatial statistics and clustering -applications. It includes rapidly compiled code for computing exact -and approximate nearest neighbors, as well as a pure-python kd-tree -with the same interface, but that supports annotation and a variety of -other algorithms. The API for both modules may change somewhat, as -user requirements become clearer. - -It also includes a ``distance`` module, containing a collection of -distance and dissimilarity functions for computing distances between -vectors, which is useful for spatial statistics, clustering, and -kd-trees. Distance and dissimilarity functions provided include -Bray-Curtis, Canberra, Chebyshev, City Block, Cosine, Dice, Euclidean, -Hamming, Jaccard, Kulsinski, Mahalanobis, Matching, Minkowski, -Rogers-Tanimoto, Russell-Rao, Squared Euclidean, Standardized -Euclidean, Sokal-Michener, Sokal-Sneath, and Yule. - -The ``pdist`` function computes pairwise distance between all -unordered pairs of vectors in a set of vectors. The ``cdist`` computes -the distance on all pairs of vectors in the Cartesian product of two -sets of vectors. Pairwise distance matrices are stored in condensed -form; only the upper triangular is stored. ``squareform`` converts -distance matrices between square and condensed forms. - -Reworked fftpack package -======================== - -FFTW2, FFTW3, MKL and DJBFFT wrappers have been removed. Only (NETLIB) -fftpack remains. By focusing on one backend, we hope to add new -features - like float32 support - more easily. - -New Constants package -===================== - -``scipy.constants`` provides a collection of physical constants and -conversion factors. These constants are taken from CODATA Recommended -Values of the Fundamental Physical Constants: 2002. They may be found -at physics.nist.gov/constants. The values are stored in the dictionary -physical_constants as a tuple containing the value, the units, and the -relative precision - in that order. All constants are in SI units, -unless otherwise stated. Several helper functions are provided. - -New Radial Basis Function module -================================ - -``scipy.interpolate`` now contains a Radial Basis Function module. -Radial basis functions can be used for smoothing/interpolating -scattered data in n-dimensions, but should be used with caution for -extrapolation outside of the observed data range. - -New complex ODE integrator -========================== - -``scipy.integrate.ode`` now contains a wrapper for the ZVODE -complex-valued ordinary differential equation solver (by Peter -N. Brown, Alan C. Hindmarsh, and George D. Byrne). - -New generalized symmetric and hermitian eigenvalue problem solver -================================================================= - -``scipy.linalg.eigh`` now contains wrappers for more LAPACK symmetric -and hermitian eigenvalue problem solvers. Users can now solve -generalized problems, select a range of eigenvalues only, and choose -to use a faster algorithm at the expense of increased memory -usage. The signature of the ``scipy.linalg.eigh`` changed accordingly. - -Bug fixes in the interpolation package -====================================== - -The shape of return values from ``scipy.interpolate.interp1d`` used to -be incorrect, if interpolated data had more than 2 dimensions and the -axis keyword was set to a non-default value. This has been fixed. -Moreover, ``interp1d`` returns now a scalar (0D-array) if the input -is a scalar. Users of ``scipy.interpolate.interp1d`` may need to -revise their code if it relies on the previous behavior. - -Weave clean up -============== - -There were numerous improvements to ``scipy.weave``. ``blitz++`` was -relicensed by the author to be compatible with the SciPy license. -``wx_spec.py`` was removed. - -Known problems -============== - -Here are known problems with scipy 0.7.0: - -* weave test failures on windows: those are known, and are being revised. -* weave test failure with gcc 4.3 (std::labs): this is a gcc 4.3 bug. A - workaround is to add #include in - scipy/weave/blitz/blitz/funcs.h (line 27). You can make the change in - the installed scipy (in site-packages). diff --git a/scipy-0.10.1/doc/release/0.7.1-notes.rst b/scipy-0.10.1/doc/release/0.7.1-notes.rst deleted file mode 100644 index d500a4f37f..0000000000 --- a/scipy-0.10.1/doc/release/0.7.1-notes.rst +++ /dev/null @@ -1,88 +0,0 @@ -========================= -SciPy 0.7.1 Release Notes -========================= - -.. contents:: - -SciPy 0.7.1 is a bug-fix release with no new features compared to 0.7.0. - -scipy.io -======== - -Bugs fixed: - -- Several fixes in Matlab file IO - -scipy.odr -========= - -Bugs fixed: - -- Work around a failure with Python 2.6 - -scipy.signal -============ - -Memory leak in lfilter have been fixed, as well as support for array object - -Bugs fixed: - -- #880, #925: lfilter fixes -- #871: bicgstab fails on Win32 - - -scipy.sparse -============ - -Bugs fixed: - -- #883: scipy.io.mmread with scipy.sparse.lil_matrix broken -- lil_matrix and csc_matrix reject now unexpected sequences, - cf. http://thread.gmane.org/gmane.comp.python.scientific.user/19996 - -scipy.special -============= - -Several bugs of varying severity were fixed in the special functions: - -- #503, #640: iv: problems at large arguments fixed by new implementation -- #623: jv: fix errors at large arguments -- #679: struve: fix wrong output for v < 0 -- #803: pbdv produces invalid output -- #804: lqmn: fix crashes on some input -- #823: betainc: fix documentation -- #834: exp1 strange behavior near negative integer values -- #852: jn_zeros: more accurate results for large s, also in jnp/yn/ynp_zeros -- #853: jv, yv, iv: invalid results for non-integer v < 0, complex x -- #854: jv, yv, iv, kv: return nan more consistently when out-of-domain -- #927: ellipj: fix segfault on Windows -- #946: ellpj: fix segfault on Mac OS X/python 2.6 combination. -- ive, jve, yve, kv, kve: with real-valued input, return nan for out-of-domain - instead of returning only the real part of the result. - -Also, when ``scipy.special.errprint(1)`` has been enabled, warning -messages are now issued as Python warnings instead of printing them to -stderr. - - -scipy.stats -=========== - -- linregress, mannwhitneyu, describe: errors fixed -- kstwobign, norm, expon, exponweib, exponpow, frechet, genexpon, rdist, - truncexpon, planck: improvements to numerical accuracy in distributions - -Windows binaries for python 2.6 -=============================== - -python 2.6 binaries for windows are now included. The binary for python 2.5 -requires numpy 1.2.0 or above, and and the one for python 2.6 requires numpy -1.3.0 or above. - -Universal build for scipy -========================= - -Mac OS X binary installer is now a proper universal build, and does not depend -on gfortran anymore (libgfortran is statically linked). The python 2.5 version -of scipy requires numpy 1.2.0 or above, the python 2.6 version requires numpy -1.3.0 or above. diff --git a/scipy-0.10.1/doc/release/0.7.2-notes.rst b/scipy-0.10.1/doc/release/0.7.2-notes.rst deleted file mode 100644 index 1891d8f120..0000000000 --- a/scipy-0.10.1/doc/release/0.7.2-notes.rst +++ /dev/null @@ -1,10 +0,0 @@ -========================= -SciPy 0.7.2 Release Notes -========================= - -.. contents:: - -SciPy 0.7.2 is a bug-fix release with no new features compared to 0.7.1. The -only change is that all C sources from Cython code have been regenerated with -Cython 0.12.1. This fixes the incompatibility between binaries of SciPy 0.7.1 -and NumPy 1.4. diff --git a/scipy-0.10.1/doc/release/0.8.0-notes.rst b/scipy-0.10.1/doc/release/0.8.0-notes.rst deleted file mode 100644 index 69f28ab7fd..0000000000 --- a/scipy-0.10.1/doc/release/0.8.0-notes.rst +++ /dev/null @@ -1,263 +0,0 @@ -========================= -SciPy 0.8.0 Release Notes -========================= - -.. contents:: - -SciPy 0.8.0 is the culmination of 17 months of hard work. It contains -many new features, numerous bug-fixes, improved test coverage and -better documentation. There have been a number of deprecations and -API changes in this release, which are documented below. All users -are encouraged to upgrade to this release, as there are a large number -of bug-fixes and optimizations. Moreover, our development attention -will now shift to bug-fix releases on the 0.8.x branch, and on adding -new features on the development trunk. This release requires Python -2.4 - 2.6 and NumPy 1.4.1 or greater. - -Please note that SciPy is still considered to have "Beta" status, as -we work toward a SciPy 1.0.0 release. The 1.0.0 release will mark a -major milestone in the development of SciPy, after which changing the -package structure or API will be much more difficult. Whilst these -pre-1.0 releases are considered to have "Beta" status, we are -committed to making them as bug-free as possible. - -However, until the 1.0 release, we are aggressively reviewing and -refining the functionality, organization, and interface. This is being -done in an effort to make the package as coherent, intuitive, and -useful as possible. To achieve this, we need help from the community -of users. Specifically, we need feedback regarding all aspects of the -project - everything - from which algorithms we implement, to details -about our function's call signatures. - -Python 3 -======== - -Python 3 compatibility is planned and is currently technically -feasible, since Numpy has been ported. However, since the Python 3 -compatible Numpy 1.5 has not been released yet, support for Python 3 -in Scipy is not yet included in Scipy 0.8. SciPy 0.9, planned for fall -2010, will very likely include experimental support for Python 3. - -Major documentation improvements -================================ - -SciPy documentation is greatly improved. - -Deprecated features -=================== - -Swapping inputs for correlation functions (scipy.signal) --------------------------------------------------------- - -Concern correlate, correlate2d, convolve and convolve2d. If the second input is -larger than the first input, the inputs are swapped before calling the -underlying computation routine. This behavior is deprecated, and will be -removed in scipy 0.9.0. - -Obsolete code deprecated (scipy.misc) -------------------------------------- - -The modules `helpmod`, `ppimport` and `pexec` from `scipy.misc` are deprecated. -They will be removed from SciPy in version 0.9. - -Additional deprecations ------------------------ - -* linalg: The function `solveh_banded` currently returns a tuple containing - the Cholesky factorization and the solution to the linear system. In - SciPy 0.9, the return value will be just the solution. -* The function `constants.codata.find` will generate a DeprecationWarning. - In Scipy version 0.8.0, the keyword argument 'disp' was added to the - function, with the default value 'True'. In 0.9.0, the default will be - 'False'. -* The `qshape` keyword argument of `signal.chirp` is deprecated. Use - the argument `vertex_zero` instead. -* Passing the coefficients of a polynomial as the argument `f0` to - `signal.chirp` is deprecated. Use the function `signal.sweep_poly` - instead. -* The `io.recaster` module has been deprecated and will be removed in 0.9.0. - -New features -============ - -DCT support (scipy.fftpack) ---------------------------- - -New realtransforms have been added, namely dct and idct for Discrete Cosine -Transform; type I, II and III are available. - -Single precision support for fft functions (scipy.fftpack) ----------------------------------------------------------- - -fft functions can now handle single precision inputs as well: fft(x) will -return a single precision array if x is single precision. - -At the moment, for FFT sizes that are not composites of 2, 3, and 5, the -transform is computed internally in double precision to avoid rounding error in -FFTPACK. - -Correlation functions now implement the usual definition (scipy.signal) ------------------------------------------------------------------------ - -The outputs should now correspond to their matlab and R counterparts, and do -what most people expect if the old_behavior=False argument is passed: - -* correlate, convolve and their 2d counterparts do not swap their inputs - depending on their relative shape anymore; -* correlation functions now conjugate their second argument while computing - the slided sum-products, which correspond to the usual definition of - correlation. - -Additions and modification to LTI functions (scipy.signal) ----------------------------------------------------------- - -* The functions `impulse2` and `step2` were added to `scipy.signal`. - They use the function `scipy.signal.lsim2` to compute the impulse and - step response of a system, respectively. -* The function `scipy.signal.lsim2` was changed to pass any additional - keyword arguments to the ODE solver. - -Improved waveform generators (scipy.signal) -------------------------------------------- - -Several improvements to the `chirp` function in `scipy.signal` were made: - -* The waveform generated when `method="logarithmic"` was corrected; it - now generates a waveform that is also known as an "exponential" or - "geometric" chirp. (See http://en.wikipedia.org/wiki/Chirp.) -* A new `chirp` method, "hyperbolic", was added. -* Instead of the keyword `qshape`, `chirp` now uses the keyword - `vertex_zero`, a boolean. -* `chirp` no longer handles an arbitrary polynomial. This functionality - has been moved to a new function, `sweep_poly`. - -A new function, `sweep_poly`, was added. - -New functions and other changes in scipy.linalg ------------------------------------------------ - -The functions `cho_solve_banded`, `circulant`, `companion`, `hadamard` and -`leslie` were added to `scipy.linalg`. - -The function `block_diag` was enhanced to accept scalar and 1D arguments, -along with the usual 2D arguments. - -New function and changes in scipy.optimize ------------------------------------------- - -The `curve_fit` function has been added; it takes a function and uses -non-linear least squares to fit that to the provided data. - -The `leastsq` and `fsolve` functions now return an array of size one instead of -a scalar when solving for a single parameter. - -New sparse least squares solver -------------------------------- - -The `lsqr` function was added to `scipy.sparse`. `This routine -`_ finds a -least-squares solution to a large, sparse, linear system of equations. - -ARPACK-based sparse SVD ------------------------ - -A naive implementation of SVD for sparse matrices is available in -scipy.sparse.linalg.eigen.arpack. It is based on using an symmetric solver on -, and as such may not be very precise. - -Alternative behavior available for `scipy.constants.find` ---------------------------------------------------------- - -The keyword argument `disp` was added to the function `scipy.constants.find`, -with the default value `True`. When `disp` is `True`, the behavior is the -same as in Scipy version 0.7. When `False`, the function returns the list of -keys instead of printing them. (In SciPy version 0.9, the default will be -reversed.) - -Incomplete sparse LU decompositions ------------------------------------ - -Scipy now wraps SuperLU version 4.0, which supports incomplete sparse LU -decompositions. These can be accessed via `scipy.sparse.linalg.spilu`. -Upgrade to SuperLU 4.0 also fixes some known bugs. - -Faster matlab file reader and default behavior change ------------------------------------------------------- - -We've rewritten the matlab file reader in Cython and it should now read -matlab files at around the same speed that Matlab does. - -The reader reads matlab named and anonymous functions, but it can't -write them. - -Until scipy 0.8.0 we have returned arrays of matlab structs as numpy -object arrays, where the objects have attributes named for the struct -fields. As of 0.8.0, we return matlab structs as numpy structured -arrays. You can get the older behavior by using the optional -``struct_as_record=False`` keyword argument to `scipy.io.loadmat` and -friends. - -There is an inconsistency in the matlab file writer, in that it writes -numpy 1D arrays as column vectors in matlab 5 files, and row vectors in -matlab 4 files. We will change this in the next version, so both write -row vectors. There is a `FutureWarning` when calling the writer to warn -of this change; for now we suggest using the ``oned_as='row'`` keyword -argument to `scipy.io.savemat` and friends. - -Faster evaluation of orthogonal polynomials -------------------------------------------- - -Values of orthogonal polynomials can be evaluated with new vectorized functions -in `scipy.special`: `eval_legendre`, `eval_chebyt`, `eval_chebyu`, -`eval_chebyc`, `eval_chebys`, `eval_jacobi`, `eval_laguerre`, -`eval_genlaguerre`, `eval_hermite`, `eval_hermitenorm`, -`eval_gegenbauer`, `eval_sh_legendre`, `eval_sh_chebyt`, -`eval_sh_chebyu`, `eval_sh_jacobi`. This is faster than constructing the -full coefficient representation of the polynomials, which was previously the -only available way. - -Note that the previous orthogonal polynomial routines will now also invoke this -feature, when possible. - -Lambert W function ------------------- - -`scipy.special.lambertw` can now be used for evaluating the Lambert W -function. - -Improved hypergeometric 2F1 function ------------------------------------- - -Implementation of `scipy.special.hyp2f1` for real parameters was revised. -The new version should produce accurate values for all real parameters. - -More flexible interface for Radial basis function interpolation ---------------------------------------------------------------- - -The `scipy.interpolate.Rbf` class now accepts a callable as input for the -"function" argument, in addition to the built-in radial basis functions which -can be selected with a string argument. - -Removed features -================ - -scipy.stsci: the package was removed - -The module `scipy.misc.limits` was removed. - -scipy.io --------- - -The IO code in both NumPy and SciPy is being extensively -reworked. NumPy will be where basic code for reading and writing NumPy -arrays is located, while SciPy will house file readers and writers for -various data formats (data, audio, video, images, matlab, etc.). - -Several functions in `scipy.io` are removed in the 0.8.0 release including: -`npfile`, `save`, `load`, `create_module`, `create_shelf`, -`objload`, `objsave`, `fopen`, `read_array`, `write_array`, -`fread`, `fwrite`, `bswap`, `packbits`, `unpackbits`, and -`convert_objectarray`. Some of these functions have been replaced by NumPy's -raw reading and writing capabilities, memory-mapping capabilities, or array -methods. Others have been moved from SciPy to NumPy, since basic array reading -and writing capability is now handled by NumPy. diff --git a/scipy-0.10.1/doc/release/0.9.0-notes.rst b/scipy-0.10.1/doc/release/0.9.0-notes.rst deleted file mode 100644 index 80c6394f33..0000000000 --- a/scipy-0.10.1/doc/release/0.9.0-notes.rst +++ /dev/null @@ -1,230 +0,0 @@ -========================= -SciPy 0.9.0 Release Notes -========================= - -.. contents:: - -SciPy 0.9.0 is the culmination of 6 months of hard work. It contains -many new features, numerous bug-fixes, improved test coverage and -better documentation. There have been a number of deprecations and -API changes in this release, which are documented below. All users -are encouraged to upgrade to this release, as there are a large number -of bug-fixes and optimizations. Moreover, our development attention -will now shift to bug-fix releases on the 0.9.x branch, and on adding -new features on the development trunk. - -This release requires Python 2.4 - 2.7 or 3.1 - and NumPy 1.5 or greater. - -Please note that SciPy is still considered to have "Beta" status, as -we work toward a SciPy 1.0.0 release. The 1.0.0 release will mark a -major milestone in the development of SciPy, after which changing the -package structure or API will be much more difficult. Whilst these -pre-1.0 releases are considered to have "Beta" status, we are -committed to making them as bug-free as possible. - -However, until the 1.0 release, we are aggressively reviewing and -refining the functionality, organization, and interface. This is being -done in an effort to make the package as coherent, intuitive, and -useful as possible. To achieve this, we need help from the community -of users. Specifically, we need feedback regarding all aspects of the -project - everything - from which algorithms we implement, to details -about our function's call signatures. - - -Python 3 -======== - -Scipy 0.9.0 is the first SciPy release to support Python 3. The only module -that is not yet ported is ``scipy.weave``. - - -Scipy source code location to be changed -======================================== - -Soon after this release, Scipy will stop using SVN as the version control -system, and move to Git. The development source code for Scipy can from then on -be found at - - http://github.com/scipy/scipy - - -New features -============ - -Delaunay tesselations (``scipy.spatial``) ------------------------------------------ - -Scipy now includes routines for computing Delaunay tesselations in N -dimensions, powered by the Qhull_ computational geometry library. Such -calculations can now make use of the new ``scipy.spatial.Delaunay`` -interface. - -.. _Qhull: http://www.qhull.org/ - -N-dimensional interpolation (``scipy.interpolate``) ---------------------------------------------------- - -Support for scattered data interpolation is now significantly -improved. This version includes a ``scipy.interpolate.griddata`` -function that can perform linear and nearest-neighbour interpolation -for N-dimensional scattered data, in addition to cubic spline -(C1-smooth) interpolation in 2D and 1D. An object-oriented interface -to each interpolator type is also available. - -Nonlinear equation solvers (``scipy.optimize``) ------------------------------------------------ - -Scipy includes new routines for large-scale nonlinear equation solving -in ``scipy.optimize``. The following methods are implemented: - -* Newton-Krylov (``scipy.optimize.newton_krylov``) - -* (Generalized) secant methods: - - - Limited-memory Broyden methods (``scipy.optimize.broyden1``, - ``scipy.optimize.broyden2``) - - - Anderson method (``scipy.optimize.anderson``) - -* Simple iterations (``scipy.optimize.diagbroyden``, - ``scipy.optimize.excitingmixing``, ``scipy.optimize.linearmixing``) - -The ``scipy.optimize.nonlin`` module was completely rewritten, and -some of the functions were deprecated (see above). - - -New linear algebra routines (``scipy.linalg``) ----------------------------------------------- - -Scipy now contains routines for effectively solving triangular -equation systems (``scipy.linalg.solve_triangular``). - - -Improved FIR filter design functions (``scipy.signal``) -------------------------------------------------------- - -The function ``scipy.signal.firwin`` was enhanced to allow the -design of highpass, bandpass, bandstop and multi-band FIR filters. - -The function ``scipy.signal.firwin2`` was added. This function -uses the window method to create a linear phase FIR filter with -an arbitrary frequency response. - -The functions ``scipy.signal.kaiser_atten`` and ``scipy.signal.kaiser_beta`` -were added. - - -Improved statistical tests (``scipy.stats``) --------------------------------------------- - -A new function ``scipy.stats.fisher_exact`` was added, that provides Fisher's -exact test for 2x2 contingency tables. - -The function ``scipy.stats.kendalltau`` was rewritten to make it much faster -(O(n log(n)) vs O(n^2)). - - -Deprecated features -=================== - -Obsolete nonlinear solvers (in ``scipy.optimize``) --------------------------------------------------- - -The following nonlinear solvers from ``scipy.optimize`` are -deprecated: - -- ``broyden_modified`` (bad performance) -- ``broyden1_modified`` (bad performance) -- ``broyden_generalized`` (equivalent to ``anderson``) -- ``anderson2`` (equivalent to ``anderson``) -- ``broyden3`` (obsoleted by new limited-memory broyden methods) -- ``vackar`` (renamed to ``diagbroyden``) - - -Removed features -================ - -The deprecated modules ``helpmod``, ``pexec`` and ``ppimport`` were removed -from ``scipy.misc``. - -The ``output_type`` keyword in many ``scipy.ndimage`` interpolation functions -has been removed. - -The ``econ`` keyword in ``scipy.linalg.qr`` has been removed. The same -functionality is still available by specifying ``mode='economic'``. - - -Old correlate/convolve behavior (in ``scipy.signal``) ------------------------------------------------------ - -The old behavior for ``scipy.signal.convolve``, ``scipy.signal.convolve2d``, -``scipy.signal.correlate`` and ``scipy.signal.correlate2d`` was deprecated in -0.8.0 and has now been removed. Convolve and correlate used to swap their -arguments if the second argument has dimensions larger than the first one, and -the mode was relative to the input with the largest dimension. The current -behavior is to never swap the inputs, which is what most people expect, and is -how correlation is usually defined. - - -``scipy.stats`` ---------------- - -Many functions in ``scipy.stats`` that are either available from numpy or have -been superseded, and have been deprecated since version 0.7, have been removed: -`std`, `var`, `mean`, `median`, `cov`, `corrcoef`, `z`, `zs`, `stderr`, -`samplestd`, `samplevar`, `pdfapprox`, `pdf_moments` and `erfc`. These changes -are mirrored in ``scipy.stats.mstats``. - - -``scipy.sparse`` ----------------- - -Several methods of the sparse matrix classes in ``scipy.sparse`` which had -been deprecated since version 0.7 were removed: `save`, `rowcol`, `getdata`, -`listprint`, `ensure_sorted_indices`, `matvec`, `matmat` and `rmatvec`. - -The functions ``spkron``, ``speye``, ``spidentity``, ``lil_eye`` and -``lil_diags`` were removed from ``scipy.sparse``. The first three functions -are still available as ``scipy.sparse.kron``, ``scipy.sparse.eye`` and -``scipy.sparse.identity``. - -The `dims` and `nzmax` keywords were removed from the sparse matrix -constructor. The `colind` and `rowind` attributes were removed from CSR and CSC -matrices respectively. - -``scipy.sparse.linalg.arpack.speigs`` -------------------------------------- - -A duplicated interface to the ARPACK library was removed. - - -Other changes -============= - -ARPACK interface changes ------------------------- - -The interface to the ARPACK eigenvalue routines in -``scipy.sparse.linalg`` was changed for more robustness. - -The eigenvalue and SVD routines now raise ``ArpackNoConvergence`` if -the eigenvalue iteration fails to converge. If partially converged results -are desired, they can be accessed as follows:: - - import numpy as np - from scipy.sparse.linalg import eigs, ArpackNoConvergence - - m = np.random.randn(30, 30) - try: - w, v = eigs(m, 6) - except ArpackNoConvergence, err: - partially_converged_w = err.eigenvalues - partially_converged_v = err.eigenvectors - -Several bugs were also fixed. - -The routines were moreover renamed as follows: - - - eigen --> eigs - - eigen_symmetric --> eigsh - - svd --> svds diff --git a/scipy-0.10.1/doc/source/_static/scipy.css b/scipy-0.10.1/doc/source/_static/scipy.css deleted file mode 100644 index 70b0e6b53c..0000000000 --- a/scipy-0.10.1/doc/source/_static/scipy.css +++ /dev/null @@ -1,186 +0,0 @@ -@import "default.css"; - -/** - * Spacing fixes - */ - -div.body p, div.body dd, div.body li { - line-height: 125%; -} - -ul.simple { - margin-top: 0; - margin-bottom: 0; - padding-top: 0; - padding-bottom: 0; -} - -/* spacing around blockquoted fields in parameters/attributes/returns */ -td.field-body > blockquote { - margin-top: 0.1em; - margin-bottom: 0.5em; -} - -/* spacing around example code */ -div.highlight > pre { - padding: 2px 5px 2px 5px; -} - -/* spacing in see also definition lists */ -dl.last > dd { - margin-top: 1px; - margin-bottom: 5px; - margin-left: 30px; -} - -/* hide overflowing content in the sidebar */ -div.sphinxsidebarwrapper p.topless { - overflow: hidden; -} - -/** - * Hide dummy toctrees - */ - -ul { - padding-top: 0; - padding-bottom: 0; - margin-top: 0; - margin-bottom: 0; -} -ul li { - padding-top: 0; - padding-bottom: 0; - margin-top: 0; - margin-bottom: 0; -} -ul li a.reference { - padding-top: 0; - padding-bottom: 0; - margin-top: 0; - margin-bottom: 0; -} - -/** - * Make high-level subsections easier to distinguish from top-level ones - */ -div.body h3 { - background-color: transparent; -} - -div.body h4 { - border: none; - background-color: transparent; -} - -/** - * Scipy colors - */ - -body { - background-color: rgb(100,135,220); -} - -div.document { - background-color: rgb(230,230,230); -} - -div.sphinxsidebar { - background-color: rgb(230,230,230); - overflow: hidden; -} - -div.related { - background-color: rgb(100,135,220); -} - -div.sphinxsidebar h3 { - color: rgb(0,102,204); -} - -div.sphinxsidebar h3 a { - color: rgb(0,102,204); -} - -div.sphinxsidebar h4 { - color: rgb(0,82,194); -} - -div.sphinxsidebar p { - color: black; -} - -div.sphinxsidebar a { - color: #355f7c; -} - -div.sphinxsidebar ul.want-points { - list-style: disc; -} - -.field-list th { - color: rgb(0,102,204); - white-space: nowrap; -} - -/** - * Extra admonitions - */ - -div.tip { - background-color: #ffffe4; - border: 1px solid #ee6; -} - -div.plot-output { - clear-after: both; -} - -div.plot-output .figure { - float: left; - text-align: center; - margin-bottom: 0; - padding-bottom: 0; -} - -div.plot-output .caption { - margin-top: 2; - padding-top: 0; -} - -div.plot-output:after { - content: ""; - display: block; - height: 0; - clear: both; -} - - -/* -div.admonition-example { - background-color: #e4ffe4; - border: 1px solid #ccc; -}*/ - - -/** - * Styling for field lists - */ - -table.field-list th { - border-left: 1px solid #aaa !important; - padding-left: 5px; -} - -table.field-list { - border-collapse: separate; - border-spacing: 10px; -} - -/** - * Styling for footnotes - */ - -table.footnote td, table.footnote th { - border: none; -} diff --git a/scipy-0.10.1/doc/source/_static/scipyshiny_small.png b/scipy-0.10.1/doc/source/_static/scipyshiny_small.png deleted file mode 100644 index 7ef81a9e8f..0000000000 Binary files a/scipy-0.10.1/doc/source/_static/scipyshiny_small.png and /dev/null differ diff --git a/scipy-0.10.1/doc/source/_templates/autosummary/class.rst b/scipy-0.10.1/doc/source/_templates/autosummary/class.rst deleted file mode 100644 index 64c1b11e9f..0000000000 --- a/scipy-0.10.1/doc/source/_templates/autosummary/class.rst +++ /dev/null @@ -1,27 +0,0 @@ -{% extends "!autosummary/class.rst" %} - -{% block methods %} -{% if methods %} - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - {% for item in all_methods %} - {%- if not item.startswith('_') or item in ['__call__'] %} - {{ name }}.{{ item }} - {%- endif -%} - {%- endfor %} -{% endif %} -{% endblock %} - -{% block attributes %} -{% if attributes %} - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - {% for item in all_attributes %} - {%- if not item.startswith('_') %} - {{ name }}.{{ item }} - {%- endif -%} - {%- endfor %} -{% endif %} -{% endblock %} diff --git a/scipy-0.10.1/doc/source/_templates/indexsidebar.html b/scipy-0.10.1/doc/source/_templates/indexsidebar.html deleted file mode 100644 index 409743a038..0000000000 --- a/scipy-0.10.1/doc/source/_templates/indexsidebar.html +++ /dev/null @@ -1,5 +0,0 @@ -

    Resources

    - diff --git a/scipy-0.10.1/doc/source/_templates/layout.html b/scipy-0.10.1/doc/source/_templates/layout.html deleted file mode 100644 index 71e8d483b7..0000000000 --- a/scipy-0.10.1/doc/source/_templates/layout.html +++ /dev/null @@ -1,14 +0,0 @@ -{% extends "!layout.html" %} - -{% block sidebarsearch %} -{%- if sourcename %} - -{%- endif %} -{{ super() }} -{% endblock %} diff --git a/scipy-0.10.1/doc/source/api.rst b/scipy-0.10.1/doc/source/api.rst deleted file mode 100644 index aea89fb705..0000000000 --- a/scipy-0.10.1/doc/source/api.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../API.rst.txt diff --git a/scipy-0.10.1/doc/source/cluster.hierarchy.rst b/scipy-0.10.1/doc/source/cluster.hierarchy.rst deleted file mode 100644 index f680a517f6..0000000000 --- a/scipy-0.10.1/doc/source/cluster.hierarchy.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.cluster.hierarchy diff --git a/scipy-0.10.1/doc/source/cluster.rst b/scipy-0.10.1/doc/source/cluster.rst deleted file mode 100644 index 56874b5fcd..0000000000 --- a/scipy-0.10.1/doc/source/cluster.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. automodule:: scipy.cluster - -.. toctree:: - :hidden: - - cluster.vq - cluster.hierarchy diff --git a/scipy-0.10.1/doc/source/cluster.vq.rst b/scipy-0.10.1/doc/source/cluster.vq.rst deleted file mode 100644 index 2c2b0aeafd..0000000000 --- a/scipy-0.10.1/doc/source/cluster.vq.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.cluster.vq diff --git a/scipy-0.10.1/doc/source/conf.py b/scipy-0.10.1/doc/source/conf.py deleted file mode 100644 index 3f99167273..0000000000 --- a/scipy-0.10.1/doc/source/conf.py +++ /dev/null @@ -1,297 +0,0 @@ -# -*- coding: utf-8 -*- - -import sys, os, re - -# Check Sphinx version -import sphinx -if sphinx.__version__ < "1.0.1": - raise RuntimeError("Sphinx 1.0.1 or newer required") - -needs_sphinx = '1.0' - -# ----------------------------------------------------------------------------- -# General configuration -# ----------------------------------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -sys.path.insert(0, os.path.abspath('../sphinxext')) - -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc', - 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', - 'sphinx.ext.autosummary'] - -# Determine if the matplotlib has a recent enough version of the -# plot_directive, otherwise use the local fork. -try: - from matplotlib.sphinxext import plot_directive -except ImportError: - use_matplotlib_plot_directive = False -else: - try: - use_matplotlib_plot_directive = (plot_directive.__version__ >= 2) - except AttributeError: - use_matplotlib_plot_directive = False - -if use_matplotlib_plot_directive: - extensions.append('matplotlib.sphinxext.plot_directive') -else: - extensions.append('plot_directive') - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General substitutions. -project = 'SciPy' -copyright = '2008-2009, The Scipy community' - -# The default replacements for |version| and |release|, also used in various -# other places throughout the built documents. -# -import scipy -# The short X.Y version (including the .devXXXX suffix if present) -version = re.sub(r'^(\d+\.\d+)\.\d+(.*)', r'\1\2', scipy.__version__) -if 'dev' in version: - # retain the .dev suffix, but clean it up - version = re.sub(r'(\.dev\d*).*?$', r'\1', version) -else: - # strip all other suffixes - version = re.sub(r'^(\d+\.\d+).*?$', r'\1', version) -# The full version, including alpha/beta/rc tags. -release = scipy.__version__ - -print "Scipy (VERSION %s) (RELEASE %s)" % (version, release) - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -#unused_docs = [] - -# The reST default role (used for this markup: `text`) to use for all documents. -default_role = "autolink" - -# List of directories, relative to source directories, that shouldn't be searched -# for source files. -exclude_dirs = [] - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = False - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - - -# ----------------------------------------------------------------------------- -# HTML output -# ----------------------------------------------------------------------------- - -# The style sheet to use for HTML and HTML Help pages. A file of that name -# must exist either in Sphinx' static/ path, or in one of the custom paths -# given in html_static_path. -html_style = 'scipy.css' - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -html_title = "%s v%s Reference Guide (DRAFT)" % (project, version) - -# The name of an image file (within the static path) to place at the top of -# the sidebar. -html_logo = '_static/scipyshiny_small.png' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%b %d, %Y' - -# Correct index page -#html_index = "index" - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -html_sidebars = { - 'index': 'indexsidebar.html' -} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -html_additional_pages = {} - -# If false, no module index is generated. -html_use_modindex = True - -# If true, the reST sources are included in the HTML build as _sources/. -#html_copy_source = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".html"). -html_file_suffix = '.html' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'scipy' - -# Pngmath should try to align formulas properly -pngmath_use_preview = True - - -# ----------------------------------------------------------------------------- -# LaTeX output -# ----------------------------------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, document class [howto/manual]). -_stdauthor = 'Written by the SciPy community' -latex_documents = [ - ('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'), -# ('user/index', 'scipy-user.tex', 'SciPy User Guide', -# _stdauthor, 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -latex_preamble = r''' -\usepackage{amsmath} -\DeclareUnicodeCharacter{00A0}{\nobreakspace} - -% In the parameters section, place a newline after the Parameters -% header -\usepackage{expdlist} -\let\latexdescription=\description -\def\description{\latexdescription{}{} \breaklabel} - -% Make Examples/etc section headers smaller and more compact -\makeatletter -\titleformat{\paragraph}{\normalsize\py@HeaderFamily}% - {\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor} -\titlespacing*{\paragraph}{0pt}{1ex}{0pt} -\makeatother - -% Fix footer/header -\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}} -\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}} -''' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -latex_use_modindex = False - - -# ----------------------------------------------------------------------------- -# Intersphinx configuration -# ----------------------------------------------------------------------------- -intersphinx_mapping = { - 'http://docs.python.org/dev': None, - 'http://docs.scipy.org/doc/numpy': None, -} - - -# ----------------------------------------------------------------------------- -# Numpy extensions -# ----------------------------------------------------------------------------- - -# If we want to do a phantom import from an XML file for all autodocs -phantom_import_file = 'dump.xml' - -# Generate plots for example sections -numpydoc_use_plots = True - -# ----------------------------------------------------------------------------- -# Autosummary -# ----------------------------------------------------------------------------- - -if sphinx.__version__ >= "0.7": - import glob - autosummary_generate = glob.glob("*.rst") - -# ----------------------------------------------------------------------------- -# Coverage checker -# ----------------------------------------------------------------------------- -coverage_ignore_modules = r""" - """.split() -coverage_ignore_functions = r""" - test($|_) (some|all)true bitwise_not cumproduct pkgload - generic\. - """.split() -coverage_ignore_classes = r""" - """.split() - -coverage_c_path = [] -coverage_c_regexes = {} -coverage_ignore_c_items = {} - - -#------------------------------------------------------------------------------ -# Plot -#------------------------------------------------------------------------------ -plot_pre_code = """ -import numpy as np -import scipy as sp -np.random.seed(123) -""" -plot_include_source = True -plot_formats = [('png', 100), 'pdf'] -plot_html_show_formats = False - -import math -phi = (math.sqrt(5) + 1)/2 - -import matplotlib -matplotlib.rcParams.update({ - 'font.size': 8, - 'axes.titlesize': 8, - 'axes.labelsize': 8, - 'xtick.labelsize': 8, - 'ytick.labelsize': 8, - 'legend.fontsize': 8, - 'figure.figsize': (3*phi, 3), - 'figure.subplot.bottom': 0.2, - 'figure.subplot.left': 0.2, - 'figure.subplot.right': 0.9, - 'figure.subplot.top': 0.85, - 'figure.subplot.wspace': 0.4, - 'text.usetex': False, -}) diff --git a/scipy-0.10.1/doc/source/constants.rst b/scipy-0.10.1/doc/source/constants.rst deleted file mode 100644 index d81f994561..0000000000 --- a/scipy-0.10.1/doc/source/constants.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.constants diff --git a/scipy-0.10.1/doc/source/fftpack.rst b/scipy-0.10.1/doc/source/fftpack.rst deleted file mode 100644 index 5e1e43d4c8..0000000000 --- a/scipy-0.10.1/doc/source/fftpack.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.fftpack diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.braycurtis.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.braycurtis.rst deleted file mode 100644 index 3e21575c28..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.braycurtis.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.braycurtis -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: braycurtis \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.canberra.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.canberra.rst deleted file mode 100644 index 231461cf8d..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.canberra.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.canberra -=============================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: canberra \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cdist.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cdist.rst deleted file mode 100644 index 7cddc9d988..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cdist.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.cdist -============================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: cdist \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.chebyshev.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.chebyshev.rst deleted file mode 100644 index e39da2ccf9..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.chebyshev.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.chebyshev -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: chebyshev \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cityblock.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cityblock.rst deleted file mode 100644 index ae9b8e9ea9..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cityblock.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.cityblock -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: cityblock \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.correlation.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.correlation.rst deleted file mode 100644 index c5500568d7..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.correlation.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.correlation -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: correlation \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cosine.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cosine.rst deleted file mode 100644 index 864fb5660a..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.cosine.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.cosine -============================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: cosine \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.dice.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.dice.rst deleted file mode 100644 index b0d1a001f4..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.dice.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.dice -=========================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: dice \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.euclidean.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.euclidean.rst deleted file mode 100644 index d26404d971..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.euclidean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.euclidean -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: euclidean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.hamming.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.hamming.rst deleted file mode 100644 index b68690448d..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.hamming.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.hamming -============================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: hamming \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.is_valid_dm.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.is_valid_dm.rst deleted file mode 100644 index 225746adb7..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.is_valid_dm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.is_valid_dm -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: is_valid_dm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.is_valid_y.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.is_valid_y.rst deleted file mode 100644 index 0698ff83b6..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.is_valid_y.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.is_valid_y -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: is_valid_y \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.jaccard.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.jaccard.rst deleted file mode 100644 index adbe0c6b0f..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.jaccard.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.jaccard -============================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: jaccard \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.kulsinski.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.kulsinski.rst deleted file mode 100644 index 11471ad88f..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.kulsinski.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.kulsinski -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: kulsinski \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.mahalanobis.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.mahalanobis.rst deleted file mode 100644 index 936afe57fa..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.mahalanobis.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.mahalanobis -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: mahalanobis \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.matching.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.matching.rst deleted file mode 100644 index a22ba0b20c..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.matching.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.matching -=============================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: matching \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.minkowski.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.minkowski.rst deleted file mode 100644 index bfe845ae22..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.minkowski.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.minkowski -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: minkowski \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.num_obs_dm.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.num_obs_dm.rst deleted file mode 100644 index a2a1efb084..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.num_obs_dm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.num_obs_dm -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: num_obs_dm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.num_obs_y.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.num_obs_y.rst deleted file mode 100644 index 7f03b0863f..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.num_obs_y.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.num_obs_y -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: num_obs_y \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.pdist.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.pdist.rst deleted file mode 100644 index f1985b22e0..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.pdist.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.pdist -============================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: pdist \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.rogerstanimoto.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.rogerstanimoto.rst deleted file mode 100644 index 47e3e15243..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.rogerstanimoto.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.rogerstanimoto -===================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: rogerstanimoto \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.russellrao.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.russellrao.rst deleted file mode 100644 index c34216888e..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.russellrao.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.russellrao -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: russellrao \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.seuclidean.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.seuclidean.rst deleted file mode 100644 index 9e626bd5d1..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.seuclidean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.seuclidean -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: seuclidean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sokalmichener.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sokalmichener.rst deleted file mode 100644 index 5507980ae7..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sokalmichener.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.sokalmichener -==================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: sokalmichener \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sokalsneath.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sokalsneath.rst deleted file mode 100644 index 2471ed63b7..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sokalsneath.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.sokalsneath -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: sokalsneath \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sqeuclidean.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sqeuclidean.rst deleted file mode 100644 index 7a3765f39b..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.sqeuclidean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.sqeuclidean -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: sqeuclidean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.squareform.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.squareform.rst deleted file mode 100644 index 4ca0d5896f..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.squareform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.squareform -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: squareform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.yule.rst b/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.yule.rst deleted file mode 100644 index 6bf920a492..0000000000 --- a/scipy-0.10.1/doc/source/generated/generated/scipy.spatial.distance.yule.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.yule -=========================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: yule \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_count.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_count.rst deleted file mode 100644 index 8340e632f6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_count.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.ClusterNode.get_count -============================================= - -.. currentmodule:: scipy.cluster.hierarchy - -.. automethod:: ClusterNode.get_count \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_id.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_id.rst deleted file mode 100644 index 4d83ddb5f4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_id.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.ClusterNode.get_id -========================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. automethod:: ClusterNode.get_id \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_left.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_left.rst deleted file mode 100644 index 8491c95f8d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_left.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.ClusterNode.get_left -============================================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. automethod:: ClusterNode.get_left \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_right.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_right.rst deleted file mode 100644 index c5998775de..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.get_right.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.ClusterNode.get_right -============================================= - -.. currentmodule:: scipy.cluster.hierarchy - -.. automethod:: ClusterNode.get_right \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.is_leaf.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.is_leaf.rst deleted file mode 100644 index 28a14f9fa9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.is_leaf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.ClusterNode.is_leaf -=========================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. automethod:: ClusterNode.is_leaf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.pre_order.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.pre_order.rst deleted file mode 100644 index 4a4641da54..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.pre_order.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.ClusterNode.pre_order -============================================= - -.. currentmodule:: scipy.cluster.hierarchy - -.. automethod:: ClusterNode.pre_order \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.rst deleted file mode 100644 index c77399a9fe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ClusterNode.rst +++ /dev/null @@ -1,24 +0,0 @@ -scipy.cluster.hierarchy.ClusterNode -=================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autoclass:: ClusterNode - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - ClusterNode.get_count - ClusterNode.get_id - ClusterNode.get_left - ClusterNode.get_right - ClusterNode.is_leaf - ClusterNode.pre_order - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.average.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.average.rst deleted file mode 100644 index 4c6e43c98d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.average.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.average -=============================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: average \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.centroid.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.centroid.rst deleted file mode 100644 index 4c0c32e0c3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.centroid.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.centroid -================================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: centroid \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.complete.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.complete.rst deleted file mode 100644 index ead82036a3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.complete.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.complete -================================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: complete \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.cophenet.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.cophenet.rst deleted file mode 100644 index 9f4cf03a35..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.cophenet.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.cophenet -================================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: cophenet \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.correspond.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.correspond.rst deleted file mode 100644 index bb084d7b06..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.correspond.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.correspond -================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: correspond \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.dendrogram.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.dendrogram.rst deleted file mode 100644 index c573ebd553..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.dendrogram.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.dendrogram -================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: dendrogram \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.fcluster.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.fcluster.rst deleted file mode 100644 index 92cb527d3e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.fcluster.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.fcluster -================================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: fcluster \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.fclusterdata.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.fclusterdata.rst deleted file mode 100644 index 5663d7bd4a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.fclusterdata.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.fclusterdata -==================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: fclusterdata \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.from_mlab_linkage.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.from_mlab_linkage.rst deleted file mode 100644 index 160e9b921c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.from_mlab_linkage.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.from_mlab_linkage -========================================= - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: from_mlab_linkage \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.inconsistent.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.inconsistent.rst deleted file mode 100644 index 704dd97d94..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.inconsistent.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.inconsistent -==================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: inconsistent \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_isomorphic.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_isomorphic.rst deleted file mode 100644 index 31c0c0b3e8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_isomorphic.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.is_isomorphic -===================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: is_isomorphic \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_monotonic.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_monotonic.rst deleted file mode 100644 index 0ccebf8b2c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_monotonic.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.is_monotonic -==================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: is_monotonic \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_valid_im.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_valid_im.rst deleted file mode 100644 index 3f085a1e5e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_valid_im.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.is_valid_im -=================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: is_valid_im \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_valid_linkage.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_valid_linkage.rst deleted file mode 100644 index 3cc787051b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.is_valid_linkage.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.is_valid_linkage -======================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: is_valid_linkage \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.leaders.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.leaders.rst deleted file mode 100644 index 73c33968c5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.leaders.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.leaders -=============================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: leaders \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.leaves_list.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.leaves_list.rst deleted file mode 100644 index 4641a839c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.leaves_list.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.leaves_list -=================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: leaves_list \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.linkage.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.linkage.rst deleted file mode 100644 index db1790326b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.linkage.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.linkage -=============================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: linkage \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxRstat.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxRstat.rst deleted file mode 100644 index abb0f835c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxRstat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.maxRstat -================================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: maxRstat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxdists.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxdists.rst deleted file mode 100644 index 5db9bc9c10..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxdists.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.maxdists -================================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: maxdists \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxinconsts.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxinconsts.rst deleted file mode 100644 index 7cad603620..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.maxinconsts.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.maxinconsts -=================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: maxinconsts \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.median.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.median.rst deleted file mode 100644 index 4a2f052f18..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.median.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.median -============================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: median \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.num_obs_linkage.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.num_obs_linkage.rst deleted file mode 100644 index 85e7f40717..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.num_obs_linkage.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.num_obs_linkage -======================================= - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: num_obs_linkage \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.set_link_color_palette.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.set_link_color_palette.rst deleted file mode 100644 index 11434bff2e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.set_link_color_palette.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.set_link_color_palette -============================================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: set_link_color_palette \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.single.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.single.rst deleted file mode 100644 index 5626cc3843..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.single.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.single -============================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: single \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.to_mlab_linkage.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.to_mlab_linkage.rst deleted file mode 100644 index d00ee90450..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.to_mlab_linkage.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.to_mlab_linkage -======================================= - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: to_mlab_linkage \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.to_tree.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.to_tree.rst deleted file mode 100644 index 844c349030..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.to_tree.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.to_tree -=============================== - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: to_tree \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ward.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ward.rst deleted file mode 100644 index 0581897dc8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.ward.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.ward -============================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: ward \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.weighted.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.weighted.rst deleted file mode 100644 index d993e8e289..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.hierarchy.weighted.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.hierarchy.weighted -================================ - -.. currentmodule:: scipy.cluster.hierarchy - -.. autofunction:: weighted \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.kmeans.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.kmeans.rst deleted file mode 100644 index 7a3673ca94..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.kmeans.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.vq.kmeans -======================= - -.. currentmodule:: scipy.cluster.vq - -.. autofunction:: kmeans \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.kmeans2.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.kmeans2.rst deleted file mode 100644 index 9d344c08b3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.kmeans2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.vq.kmeans2 -======================== - -.. currentmodule:: scipy.cluster.vq - -.. autofunction:: kmeans2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.vq.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.vq.rst deleted file mode 100644 index 64129e1fe7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.vq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.vq.vq -=================== - -.. currentmodule:: scipy.cluster.vq - -.. autofunction:: vq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.whiten.rst b/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.whiten.rst deleted file mode 100644 index 0dedf0881a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.cluster.vq.whiten.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.cluster.vq.whiten -======================= - -.. currentmodule:: scipy.cluster.vq - -.. autofunction:: whiten \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.C2F.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.C2F.rst deleted file mode 100644 index da7b229e5f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.C2F.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.C2F -=================== - -.. currentmodule:: scipy.constants - -.. autofunction:: C2F \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.C2K.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.C2K.rst deleted file mode 100644 index 8cae2514e8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.C2K.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.C2K -=================== - -.. currentmodule:: scipy.constants - -.. autofunction:: C2K \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.ConstantWarning.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.ConstantWarning.rst deleted file mode 100644 index 6dee19915f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.ConstantWarning.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.ConstantWarning -=============================== - -.. currentmodule:: scipy.constants - -.. autoexception:: ConstantWarning \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.F2C.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.F2C.rst deleted file mode 100644 index 5aacf1bf34..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.F2C.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.F2C -=================== - -.. currentmodule:: scipy.constants - -.. autofunction:: F2C \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.F2K.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.F2K.rst deleted file mode 100644 index 891ce404fb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.F2K.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.F2K -=================== - -.. currentmodule:: scipy.constants - -.. autofunction:: F2K \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.K2C.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.K2C.rst deleted file mode 100644 index 9ed182ec24..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.K2C.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.K2C -=================== - -.. currentmodule:: scipy.constants - -.. autofunction:: K2C \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.K2F.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.K2F.rst deleted file mode 100644 index 56421ddbcf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.K2F.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.K2F -=================== - -.. currentmodule:: scipy.constants - -.. autofunction:: K2F \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.find.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.find.rst deleted file mode 100644 index 542ab35064..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.find.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.find -==================== - -.. currentmodule:: scipy.constants - -.. autofunction:: find \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.lambda2nu.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.lambda2nu.rst deleted file mode 100644 index 8027fcf1c2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.lambda2nu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.lambda2nu -========================= - -.. currentmodule:: scipy.constants - -.. autofunction:: lambda2nu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.nu2lambda.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.nu2lambda.rst deleted file mode 100644 index 9db7b22407..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.nu2lambda.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.nu2lambda -========================= - -.. currentmodule:: scipy.constants - -.. autofunction:: nu2lambda \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.precision.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.precision.rst deleted file mode 100644 index dd9340ba79..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.precision.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.precision -========================= - -.. currentmodule:: scipy.constants - -.. autofunction:: precision \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.unit.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.unit.rst deleted file mode 100644 index 8f5dff2ce5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.unit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.unit -==================== - -.. currentmodule:: scipy.constants - -.. autofunction:: unit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.constants.value.rst b/scipy-0.10.1/doc/source/generated/scipy.constants.value.rst deleted file mode 100644 index 894e16eef1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.constants.value.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.constants.value -===================== - -.. currentmodule:: scipy.constants - -.. autofunction:: value \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_drfft_cache.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_drfft_cache.rst deleted file mode 100644 index eff633fad9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_drfft_cache.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack._fftpack.destroy_drfft_cache -========================================== - -.. currentmodule:: scipy.fftpack._fftpack - -.. autodata:: destroy_drfft_cache \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_zfft_cache.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_zfft_cache.rst deleted file mode 100644 index 5271bbfcdf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_zfft_cache.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack._fftpack.destroy_zfft_cache -========================================= - -.. currentmodule:: scipy.fftpack._fftpack - -.. autodata:: destroy_zfft_cache \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_zfftnd_cache.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_zfftnd_cache.rst deleted file mode 100644 index 6ea41f0964..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.destroy_zfftnd_cache.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack._fftpack.destroy_zfftnd_cache -=========================================== - -.. currentmodule:: scipy.fftpack._fftpack - -.. autodata:: destroy_zfftnd_cache \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.drfft.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.drfft.rst deleted file mode 100644 index 40fe80f2e5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.drfft.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack._fftpack.drfft -============================ - -.. currentmodule:: scipy.fftpack._fftpack - -.. autodata:: drfft \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zfft.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zfft.rst deleted file mode 100644 index 4c277a997b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zfft.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack._fftpack.zfft -=========================== - -.. currentmodule:: scipy.fftpack._fftpack - -.. autodata:: zfft \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zfftnd.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zfftnd.rst deleted file mode 100644 index 7b2df4b40d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zfftnd.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack._fftpack.zfftnd -============================= - -.. currentmodule:: scipy.fftpack._fftpack - -.. autodata:: zfftnd \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zrfft.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zrfft.rst deleted file mode 100644 index c9944d8648..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack._fftpack.zrfft.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack._fftpack.zrfft -============================ - -.. currentmodule:: scipy.fftpack._fftpack - -.. autodata:: zrfft \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.cc_diff.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.cc_diff.rst deleted file mode 100644 index 3a3afdf716..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.cc_diff.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.cc_diff -===================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: cc_diff \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.convolve.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.convolve.rst deleted file mode 100644 index bfd08d6623..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.convolve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.convolve.convolve -=============================== - -.. currentmodule:: scipy.fftpack.convolve - -.. autodata:: convolve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.convolve_z.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.convolve_z.rst deleted file mode 100644 index e3948b6f7f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.convolve_z.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.convolve.convolve_z -================================= - -.. currentmodule:: scipy.fftpack.convolve - -.. autodata:: convolve_z \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.destroy_convolve_cache.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.destroy_convolve_cache.rst deleted file mode 100644 index d76c5841a0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.destroy_convolve_cache.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.convolve.destroy_convolve_cache -============================================= - -.. currentmodule:: scipy.fftpack.convolve - -.. autodata:: destroy_convolve_cache \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.init_convolution_kernel.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.init_convolution_kernel.rst deleted file mode 100644 index e7ca1335b1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.convolve.init_convolution_kernel.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.convolve.init_convolution_kernel -============================================== - -.. currentmodule:: scipy.fftpack.convolve - -.. autodata:: init_convolution_kernel \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.cs_diff.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.cs_diff.rst deleted file mode 100644 index e64465c8fe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.cs_diff.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.cs_diff -===================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: cs_diff \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.dct.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.dct.rst deleted file mode 100644 index 2a3356ba89..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.dct.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.dct -================= - -.. currentmodule:: scipy.fftpack - -.. autofunction:: dct \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.diff.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.diff.rst deleted file mode 100644 index 493eada37b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.diff.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.diff -================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: diff \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fft.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.fft.rst deleted file mode 100644 index 4a97b1f477..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fft.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.fft -================= - -.. currentmodule:: scipy.fftpack - -.. autofunction:: fft \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fft2.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.fft2.rst deleted file mode 100644 index e74b4d3f8e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fft2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.fft2 -================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: fft2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftfreq.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftfreq.rst deleted file mode 100644 index 2de631be08..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftfreq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.fftfreq -===================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: fftfreq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftn.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftn.rst deleted file mode 100644 index 9d80c6a00c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.fftn -================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: fftn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftshift.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftshift.rst deleted file mode 100644 index cf8a451a32..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.fftshift.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.fftshift -====================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: fftshift \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.hilbert.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.hilbert.rst deleted file mode 100644 index 5fa2e246aa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.hilbert.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.hilbert -===================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: hilbert \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.idct.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.idct.rst deleted file mode 100644 index 780603145b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.idct.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.idct -================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: idct \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifft.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifft.rst deleted file mode 100644 index 19f00dedee..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifft.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.ifft -================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: ifft \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifft2.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifft2.rst deleted file mode 100644 index 781c93e2d7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifft2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.ifft2 -=================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: ifft2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifftn.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifftn.rst deleted file mode 100644 index 0c5b9e8cf3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifftn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.ifftn -=================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: ifftn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifftshift.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifftshift.rst deleted file mode 100644 index ace2834d2c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ifftshift.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.ifftshift -======================= - -.. currentmodule:: scipy.fftpack - -.. autofunction:: ifftshift \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ihilbert.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.ihilbert.rst deleted file mode 100644 index 5e028be95e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ihilbert.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.ihilbert -====================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: ihilbert \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.irfft.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.irfft.rst deleted file mode 100644 index 548c6659c6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.irfft.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.irfft -=================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: irfft \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.itilbert.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.itilbert.rst deleted file mode 100644 index 58ce42a801..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.itilbert.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.itilbert -====================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: itilbert \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.rfft.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.rfft.rst deleted file mode 100644 index 76ab78615a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.rfft.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.rfft -================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: rfft \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.rfftfreq.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.rfftfreq.rst deleted file mode 100644 index 5814e924e3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.rfftfreq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.rfftfreq -====================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: rfftfreq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.sc_diff.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.sc_diff.rst deleted file mode 100644 index 37497cad82..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.sc_diff.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.sc_diff -===================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: sc_diff \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.shift.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.shift.rst deleted file mode 100644 index cd08080aaf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.shift.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.shift -=================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: shift \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ss_diff.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.ss_diff.rst deleted file mode 100644 index 42ef700b6a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.ss_diff.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.ss_diff -===================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: ss_diff \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.fftpack.tilbert.rst b/scipy-0.10.1/doc/source/generated/scipy.fftpack.tilbert.rst deleted file mode 100644 index 78c2a81fc1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.fftpack.tilbert.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.fftpack.tilbert -===================== - -.. currentmodule:: scipy.fftpack - -.. autofunction:: tilbert \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.integrate.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.integrate.rst deleted file mode 100644 index 1195f1779d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.integrate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.complex_ode.integrate -===================================== - -.. currentmodule:: scipy.integrate - -.. automethod:: complex_ode.integrate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.rst deleted file mode 100644 index aacb45c62e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.rst +++ /dev/null @@ -1,24 +0,0 @@ -scipy.integrate.complex_ode -=========================== - -.. currentmodule:: scipy.integrate - -.. autoclass:: complex_ode - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - complex_ode.integrate - complex_ode.set_f_params - complex_ode.set_initial_value - complex_ode.set_integrator - complex_ode.set_jac_params - complex_ode.successful - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_f_params.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_f_params.rst deleted file mode 100644 index b096104540..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_f_params.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.complex_ode.set_f_params -======================================== - -.. currentmodule:: scipy.integrate - -.. automethod:: complex_ode.set_f_params \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_initial_value.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_initial_value.rst deleted file mode 100644 index ce80de22b9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_initial_value.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.complex_ode.set_initial_value -============================================= - -.. currentmodule:: scipy.integrate - -.. automethod:: complex_ode.set_initial_value \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_integrator.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_integrator.rst deleted file mode 100644 index e5ce518002..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_integrator.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.complex_ode.set_integrator -========================================== - -.. currentmodule:: scipy.integrate - -.. automethod:: complex_ode.set_integrator \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_jac_params.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_jac_params.rst deleted file mode 100644 index 16e8a8527e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.set_jac_params.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.complex_ode.set_jac_params -========================================== - -.. currentmodule:: scipy.integrate - -.. automethod:: complex_ode.set_jac_params \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.successful.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.successful.rst deleted file mode 100644 index f32346c6b4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.complex_ode.successful.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.complex_ode.successful -====================================== - -.. currentmodule:: scipy.integrate - -.. automethod:: complex_ode.successful \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.cumtrapz.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.cumtrapz.rst deleted file mode 100644 index 5a4b27b4df..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.cumtrapz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.cumtrapz -======================== - -.. currentmodule:: scipy.integrate - -.. autofunction:: cumtrapz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.dblquad.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.dblquad.rst deleted file mode 100644 index 1067812ff9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.dblquad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.dblquad -======================= - -.. currentmodule:: scipy.integrate - -.. autofunction:: dblquad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.fixed_quad.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.fixed_quad.rst deleted file mode 100644 index afbcb3c682..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.fixed_quad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.fixed_quad -========================== - -.. currentmodule:: scipy.integrate - -.. autofunction:: fixed_quad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.integrate.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.integrate.rst deleted file mode 100644 index 6500c18630..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.integrate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.ode.integrate -============================= - -.. currentmodule:: scipy.integrate - -.. automethod:: ode.integrate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.rst deleted file mode 100644 index 677d8587e4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.rst +++ /dev/null @@ -1,24 +0,0 @@ -scipy.integrate.ode -=================== - -.. currentmodule:: scipy.integrate - -.. autoclass:: ode - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - ode.integrate - ode.set_f_params - ode.set_initial_value - ode.set_integrator - ode.set_jac_params - ode.successful - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_f_params.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_f_params.rst deleted file mode 100644 index 90952244cc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_f_params.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.ode.set_f_params -================================ - -.. currentmodule:: scipy.integrate - -.. automethod:: ode.set_f_params \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_initial_value.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_initial_value.rst deleted file mode 100644 index 8534e5b06a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_initial_value.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.ode.set_initial_value -===================================== - -.. currentmodule:: scipy.integrate - -.. automethod:: ode.set_initial_value \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_integrator.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_integrator.rst deleted file mode 100644 index f64fe3a598..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_integrator.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.ode.set_integrator -================================== - -.. currentmodule:: scipy.integrate - -.. automethod:: ode.set_integrator \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_jac_params.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_jac_params.rst deleted file mode 100644 index 39b4a45239..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.set_jac_params.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.ode.set_jac_params -================================== - -.. currentmodule:: scipy.integrate - -.. automethod:: ode.set_jac_params \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.successful.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.successful.rst deleted file mode 100644 index 342e67aa17..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.ode.successful.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.ode.successful -============================== - -.. currentmodule:: scipy.integrate - -.. automethod:: ode.successful \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.odeint.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.odeint.rst deleted file mode 100644 index 4577c3903f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.odeint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.odeint -====================== - -.. currentmodule:: scipy.integrate - -.. autofunction:: odeint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.quad.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.quad.rst deleted file mode 100644 index 5e35ab5b35..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.quad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.quad -==================== - -.. currentmodule:: scipy.integrate - -.. autofunction:: quad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.quadrature.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.quadrature.rst deleted file mode 100644 index f33401ccf0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.quadrature.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.quadrature -========================== - -.. currentmodule:: scipy.integrate - -.. autofunction:: quadrature \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.romb.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.romb.rst deleted file mode 100644 index 05336a8617..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.romb.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.romb -==================== - -.. currentmodule:: scipy.integrate - -.. autofunction:: romb \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.romberg.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.romberg.rst deleted file mode 100644 index 95464d2c7b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.romberg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.romberg -======================= - -.. currentmodule:: scipy.integrate - -.. autofunction:: romberg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.simps.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.simps.rst deleted file mode 100644 index fc45bcb570..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.simps.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.simps -===================== - -.. currentmodule:: scipy.integrate - -.. autofunction:: simps \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.tplquad.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.tplquad.rst deleted file mode 100644 index 8fc4085438..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.tplquad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.tplquad -======================= - -.. currentmodule:: scipy.integrate - -.. autofunction:: tplquad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.integrate.trapz.rst b/scipy-0.10.1/doc/source/generated/scipy.integrate.trapz.rst deleted file mode 100644 index 7b61a5b408..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.integrate.trapz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.integrate.trapz -===================== - -.. currentmodule:: scipy.integrate - -.. autofunction:: trapz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.__call__.rst deleted file mode 100644 index 979a957b79..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BarycentricInterpolator.__call__ -================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: BarycentricInterpolator.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.add_xi.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.add_xi.rst deleted file mode 100644 index eebd616e2b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.add_xi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BarycentricInterpolator.add_xi -================================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: BarycentricInterpolator.add_xi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.rst deleted file mode 100644 index 7be7e64c8f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.rst +++ /dev/null @@ -1,21 +0,0 @@ -scipy.interpolate.BarycentricInterpolator -========================================= - -.. currentmodule:: scipy.interpolate - -.. autoclass:: BarycentricInterpolator - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - BarycentricInterpolator.__call__ - BarycentricInterpolator.add_xi - BarycentricInterpolator.set_yi - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.set_yi.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.set_yi.rst deleted file mode 100644 index a1a8c1b052..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BarycentricInterpolator.set_yi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BarycentricInterpolator.set_yi -================================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: BarycentricInterpolator.set_yi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.__call__.rst deleted file mode 100644 index 49bfc028ba..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BivariateSpline.__call__ -========================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: BivariateSpline.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.ev.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.ev.rst deleted file mode 100644 index 1cf3f83078..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.ev.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BivariateSpline.ev -==================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: BivariateSpline.ev \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_coeffs.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_coeffs.rst deleted file mode 100644 index d6e577baee..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_coeffs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BivariateSpline.get_coeffs -============================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: BivariateSpline.get_coeffs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_knots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_knots.rst deleted file mode 100644 index 28702c1227..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_knots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BivariateSpline.get_knots -=========================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: BivariateSpline.get_knots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_residual.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_residual.rst deleted file mode 100644 index 9d5e90d3ad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.get_residual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BivariateSpline.get_residual -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: BivariateSpline.get_residual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.integral.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.integral.rst deleted file mode 100644 index 9a9007eb8d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.integral.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.BivariateSpline.integral -========================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: BivariateSpline.integral \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.rst deleted file mode 100644 index 628f44b8c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.BivariateSpline.rst +++ /dev/null @@ -1,24 +0,0 @@ -scipy.interpolate.BivariateSpline -================================= - -.. currentmodule:: scipy.interpolate - -.. autoclass:: BivariateSpline - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - BivariateSpline.__call__ - BivariateSpline.ev - BivariateSpline.get_coeffs - BivariateSpline.get_knots - BivariateSpline.get_residual - BivariateSpline.integral - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.CloughTocher2DInterpolator.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.CloughTocher2DInterpolator.__call__.rst deleted file mode 100644 index 3cda49877d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.CloughTocher2DInterpolator.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.CloughTocher2DInterpolator.__call__ -===================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: CloughTocher2DInterpolator.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.CloughTocher2DInterpolator.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.CloughTocher2DInterpolator.rst deleted file mode 100644 index 707b026f2b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.CloughTocher2DInterpolator.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.interpolate.CloughTocher2DInterpolator -============================================ - -.. currentmodule:: scipy.interpolate - -.. autoclass:: CloughTocher2DInterpolator - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - CloughTocher2DInterpolator.__call__ - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.__call__.rst deleted file mode 100644 index 6547af5682..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline.__call__ -======================================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: InterpolatedUnivariateSpline.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.derivatives.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.derivatives.rst deleted file mode 100644 index ef94c6f120..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.derivatives.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline.derivatives -========================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: InterpolatedUnivariateSpline.derivatives \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_coeffs.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_coeffs.rst deleted file mode 100644 index 1f5fcd7fc3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_coeffs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline.get_coeffs -========================================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: InterpolatedUnivariateSpline.get_coeffs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_knots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_knots.rst deleted file mode 100644 index ff59416e36..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_knots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline.get_knots -======================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: InterpolatedUnivariateSpline.get_knots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_residual.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_residual.rst deleted file mode 100644 index f2c9f9edda..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_residual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline.get_residual -=========================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: InterpolatedUnivariateSpline.get_residual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.integral.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.integral.rst deleted file mode 100644 index 36ec934888..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.integral.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline.integral -======================================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: InterpolatedUnivariateSpline.integral \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.roots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.roots.rst deleted file mode 100644 index a9a968cf6f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.roots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline.roots -==================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: InterpolatedUnivariateSpline.roots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.rst deleted file mode 100644 index f0a1f0125c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.rst +++ /dev/null @@ -1,26 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline -============================================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: InterpolatedUnivariateSpline - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - InterpolatedUnivariateSpline.__call__ - InterpolatedUnivariateSpline.derivatives - InterpolatedUnivariateSpline.get_coeffs - InterpolatedUnivariateSpline.get_knots - InterpolatedUnivariateSpline.get_residual - InterpolatedUnivariateSpline.integral - InterpolatedUnivariateSpline.roots - InterpolatedUnivariateSpline.set_smoothing_factor - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.set_smoothing_factor.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.set_smoothing_factor.rst deleted file mode 100644 index 901d7534ac..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.InterpolatedUnivariateSpline.set_smoothing_factor.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.InterpolatedUnivariateSpline.set_smoothing_factor -=================================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: InterpolatedUnivariateSpline.set_smoothing_factor \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.__call__.rst deleted file mode 100644 index d38a43d3ad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.KroghInterpolator.__call__ -============================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: KroghInterpolator.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.derivative.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.derivative.rst deleted file mode 100644 index 308ee656c1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.derivative.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.KroghInterpolator.derivative -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: KroghInterpolator.derivative \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.derivatives.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.derivatives.rst deleted file mode 100644 index 21bb29a966..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.derivatives.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.KroghInterpolator.derivatives -=============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: KroghInterpolator.derivatives \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.rst deleted file mode 100644 index eb55353c42..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.KroghInterpolator.rst +++ /dev/null @@ -1,21 +0,0 @@ -scipy.interpolate.KroghInterpolator -=================================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: KroghInterpolator - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - KroghInterpolator.__call__ - KroghInterpolator.derivative - KroghInterpolator.derivatives - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.__call__.rst deleted file mode 100644 index 41dab86085..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQBivariateSpline.__call__ -============================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQBivariateSpline.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.ev.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.ev.rst deleted file mode 100644 index bebddf4e69..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.ev.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQBivariateSpline.ev -======================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQBivariateSpline.ev \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_coeffs.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_coeffs.rst deleted file mode 100644 index 6e338564e2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_coeffs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQBivariateSpline.get_coeffs -=============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQBivariateSpline.get_coeffs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_knots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_knots.rst deleted file mode 100644 index e8c9d13ace..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_knots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQBivariateSpline.get_knots -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQBivariateSpline.get_knots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_residual.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_residual.rst deleted file mode 100644 index 184ffbc201..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.get_residual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQBivariateSpline.get_residual -================================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQBivariateSpline.get_residual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.integral.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.integral.rst deleted file mode 100644 index e6d2656aa1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.integral.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQBivariateSpline.integral -============================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQBivariateSpline.integral \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.rst deleted file mode 100644 index fa4f24bb02..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQBivariateSpline.rst +++ /dev/null @@ -1,24 +0,0 @@ -scipy.interpolate.LSQBivariateSpline -==================================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: LSQBivariateSpline - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - LSQBivariateSpline.__call__ - LSQBivariateSpline.ev - LSQBivariateSpline.get_coeffs - LSQBivariateSpline.get_knots - LSQBivariateSpline.get_residual - LSQBivariateSpline.integral - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.__call__.rst deleted file mode 100644 index 83f170c70e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline.__call__ -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQUnivariateSpline.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.derivatives.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.derivatives.rst deleted file mode 100644 index 6a6b3ba001..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.derivatives.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline.derivatives -================================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQUnivariateSpline.derivatives \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_coeffs.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_coeffs.rst deleted file mode 100644 index 38ef13654e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_coeffs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline.get_coeffs -================================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQUnivariateSpline.get_coeffs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_knots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_knots.rst deleted file mode 100644 index d26f2d9756..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_knots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline.get_knots -=============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQUnivariateSpline.get_knots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_residual.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_residual.rst deleted file mode 100644 index 03cc01e9e2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.get_residual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline.get_residual -================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQUnivariateSpline.get_residual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.integral.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.integral.rst deleted file mode 100644 index 757af7aa56..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.integral.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline.integral -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQUnivariateSpline.integral \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.roots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.roots.rst deleted file mode 100644 index 18d12ce448..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.roots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline.roots -=========================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQUnivariateSpline.roots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.rst deleted file mode 100644 index ad20edfac3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.rst +++ /dev/null @@ -1,26 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline -===================================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: LSQUnivariateSpline - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - LSQUnivariateSpline.__call__ - LSQUnivariateSpline.derivatives - LSQUnivariateSpline.get_coeffs - LSQUnivariateSpline.get_knots - LSQUnivariateSpline.get_residual - LSQUnivariateSpline.integral - LSQUnivariateSpline.roots - LSQUnivariateSpline.set_smoothing_factor - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.set_smoothing_factor.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.set_smoothing_factor.rst deleted file mode 100644 index 687dce520e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LSQUnivariateSpline.set_smoothing_factor.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LSQUnivariateSpline.set_smoothing_factor -========================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LSQUnivariateSpline.set_smoothing_factor \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LinearNDInterpolator.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LinearNDInterpolator.__call__.rst deleted file mode 100644 index e8eaab910c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LinearNDInterpolator.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.LinearNDInterpolator.__call__ -=============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: LinearNDInterpolator.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LinearNDInterpolator.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.LinearNDInterpolator.rst deleted file mode 100644 index 114162c8c8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.LinearNDInterpolator.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.interpolate.LinearNDInterpolator -====================================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: LinearNDInterpolator - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - LinearNDInterpolator.__call__ - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.NearestNDInterpolator.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.NearestNDInterpolator.__call__.rst deleted file mode 100644 index 3e9b36aa55..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.NearestNDInterpolator.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.NearestNDInterpolator.__call__ -================================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: NearestNDInterpolator.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.NearestNDInterpolator.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.NearestNDInterpolator.rst deleted file mode 100644 index 6579a767fa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.NearestNDInterpolator.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.interpolate.NearestNDInterpolator -======================================= - -.. currentmodule:: scipy.interpolate - -.. autoclass:: NearestNDInterpolator - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - NearestNDInterpolator.__call__ - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.__call__.rst deleted file mode 100644 index 29414830aa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.PiecewisePolynomial.__call__ -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: PiecewisePolynomial.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.append.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.append.rst deleted file mode 100644 index 9ccff6be29..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.append.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.PiecewisePolynomial.append -============================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: PiecewisePolynomial.append \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.derivative.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.derivative.rst deleted file mode 100644 index cabb132520..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.derivative.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.PiecewisePolynomial.derivative -================================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: PiecewisePolynomial.derivative \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.derivatives.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.derivatives.rst deleted file mode 100644 index f2e3008870..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.derivatives.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.PiecewisePolynomial.derivatives -================================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: PiecewisePolynomial.derivatives \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.extend.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.extend.rst deleted file mode 100644 index dc4508744c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.extend.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.PiecewisePolynomial.extend -============================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: PiecewisePolynomial.extend \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.rst deleted file mode 100644 index d582887191..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.PiecewisePolynomial.rst +++ /dev/null @@ -1,23 +0,0 @@ -scipy.interpolate.PiecewisePolynomial -===================================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: PiecewisePolynomial - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - PiecewisePolynomial.__call__ - PiecewisePolynomial.append - PiecewisePolynomial.derivative - PiecewisePolynomial.derivatives - PiecewisePolynomial.extend - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.Rbf.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.Rbf.__call__.rst deleted file mode 100644 index 4768d79b00..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.Rbf.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.Rbf.__call__ -============================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: Rbf.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.Rbf.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.Rbf.rst deleted file mode 100644 index ed50d3ee54..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.Rbf.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.interpolate.Rbf -===================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: Rbf - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - Rbf.__call__ - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.__call__.rst deleted file mode 100644 index 3de65928c6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.RectBivariateSpline.__call__ -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: RectBivariateSpline.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.ev.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.ev.rst deleted file mode 100644 index 522879df07..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.ev.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.RectBivariateSpline.ev -======================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: RectBivariateSpline.ev \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_coeffs.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_coeffs.rst deleted file mode 100644 index bb090cd37a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_coeffs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.RectBivariateSpline.get_coeffs -================================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: RectBivariateSpline.get_coeffs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_knots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_knots.rst deleted file mode 100644 index 7019ddf5fa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_knots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.RectBivariateSpline.get_knots -=============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: RectBivariateSpline.get_knots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_residual.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_residual.rst deleted file mode 100644 index bc5228d83f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.get_residual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.RectBivariateSpline.get_residual -================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: RectBivariateSpline.get_residual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.integral.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.integral.rst deleted file mode 100644 index e094362356..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.integral.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.RectBivariateSpline.integral -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: RectBivariateSpline.integral \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.rst deleted file mode 100644 index 0b274471f3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.RectBivariateSpline.rst +++ /dev/null @@ -1,24 +0,0 @@ -scipy.interpolate.RectBivariateSpline -===================================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: RectBivariateSpline - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - RectBivariateSpline.__call__ - RectBivariateSpline.ev - RectBivariateSpline.get_coeffs - RectBivariateSpline.get_knots - RectBivariateSpline.get_residual - RectBivariateSpline.integral - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.__call__.rst deleted file mode 100644 index 32d2065035..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.SmoothBivariateSpline.__call__ -================================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: SmoothBivariateSpline.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.ev.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.ev.rst deleted file mode 100644 index d69027c693..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.ev.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.SmoothBivariateSpline.ev -========================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: SmoothBivariateSpline.ev \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_coeffs.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_coeffs.rst deleted file mode 100644 index 129818bebe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_coeffs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.SmoothBivariateSpline.get_coeffs -================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: SmoothBivariateSpline.get_coeffs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_knots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_knots.rst deleted file mode 100644 index 64262a3c72..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_knots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.SmoothBivariateSpline.get_knots -================================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: SmoothBivariateSpline.get_knots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_residual.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_residual.rst deleted file mode 100644 index f6f4d29301..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.get_residual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.SmoothBivariateSpline.get_residual -==================================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: SmoothBivariateSpline.get_residual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.integral.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.integral.rst deleted file mode 100644 index 904251d55b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.integral.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.SmoothBivariateSpline.integral -================================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: SmoothBivariateSpline.integral \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.rst deleted file mode 100644 index 7c2482db53..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.SmoothBivariateSpline.rst +++ /dev/null @@ -1,24 +0,0 @@ -scipy.interpolate.SmoothBivariateSpline -======================================= - -.. currentmodule:: scipy.interpolate - -.. autoclass:: SmoothBivariateSpline - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - SmoothBivariateSpline.__call__ - SmoothBivariateSpline.ev - SmoothBivariateSpline.get_coeffs - SmoothBivariateSpline.get_knots - SmoothBivariateSpline.get_residual - SmoothBivariateSpline.integral - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.__call__.rst deleted file mode 100644 index 9bbc934387..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.UnivariateSpline.__call__ -=========================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: UnivariateSpline.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.derivatives.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.derivatives.rst deleted file mode 100644 index 8e06d59a65..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.derivatives.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.UnivariateSpline.derivatives -============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: UnivariateSpline.derivatives \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_coeffs.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_coeffs.rst deleted file mode 100644 index e679c6f26c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_coeffs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.UnivariateSpline.get_coeffs -============================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: UnivariateSpline.get_coeffs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_knots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_knots.rst deleted file mode 100644 index cbf14f108e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_knots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.UnivariateSpline.get_knots -============================================ - -.. currentmodule:: scipy.interpolate - -.. automethod:: UnivariateSpline.get_knots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_residual.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_residual.rst deleted file mode 100644 index 2bbb531c12..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.get_residual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.UnivariateSpline.get_residual -=============================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: UnivariateSpline.get_residual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.integral.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.integral.rst deleted file mode 100644 index dfc481455a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.integral.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.UnivariateSpline.integral -=========================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: UnivariateSpline.integral \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.roots.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.roots.rst deleted file mode 100644 index 33657245c1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.roots.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.UnivariateSpline.roots -======================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: UnivariateSpline.roots \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.rst deleted file mode 100644 index 269d51cfc1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.rst +++ /dev/null @@ -1,26 +0,0 @@ -scipy.interpolate.UnivariateSpline -================================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: UnivariateSpline - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - UnivariateSpline.__call__ - UnivariateSpline.derivatives - UnivariateSpline.get_coeffs - UnivariateSpline.get_knots - UnivariateSpline.get_residual - UnivariateSpline.integral - UnivariateSpline.roots - UnivariateSpline.set_smoothing_factor - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.set_smoothing_factor.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.set_smoothing_factor.rst deleted file mode 100644 index 2339cbbd75..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.UnivariateSpline.set_smoothing_factor.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.UnivariateSpline.set_smoothing_factor -======================================================= - -.. currentmodule:: scipy.interpolate - -.. automethod:: UnivariateSpline.set_smoothing_factor \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.approximate_taylor_polynomial.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.approximate_taylor_polynomial.rst deleted file mode 100644 index db97380447..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.approximate_taylor_polynomial.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.approximate_taylor_polynomial -=============================================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: approximate_taylor_polynomial \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.barycentric_interpolate.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.barycentric_interpolate.rst deleted file mode 100644 index 86989761b8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.barycentric_interpolate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.barycentric_interpolate -========================================= - -.. currentmodule:: scipy.interpolate - -.. autofunction:: barycentric_interpolate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.bisplev.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.bisplev.rst deleted file mode 100644 index f8e80cac5e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.bisplev.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.bisplev -========================= - -.. currentmodule:: scipy.interpolate - -.. autofunction:: bisplev \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.bisplrep.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.bisplrep.rst deleted file mode 100644 index a36312d202..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.bisplrep.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.bisplrep -========================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: bisplrep \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.griddata.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.griddata.rst deleted file mode 100644 index 99485fbd23..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.griddata.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.griddata -========================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: griddata \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp1d.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp1d.__call__.rst deleted file mode 100644 index 20b8fbc107..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp1d.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.interp1d.__call__ -=================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: interp1d.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp1d.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp1d.rst deleted file mode 100644 index b8c72fcc33..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp1d.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.interpolate.interp1d -========================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: interp1d - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - interp1d.__call__ - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp2d.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp2d.__call__.rst deleted file mode 100644 index 4f2c91afd2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp2d.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.interp2d.__call__ -=================================== - -.. currentmodule:: scipy.interpolate - -.. automethod:: interp2d.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp2d.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp2d.rst deleted file mode 100644 index d509ac44b0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.interp2d.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.interpolate.interp2d -========================== - -.. currentmodule:: scipy.interpolate - -.. autoclass:: interp2d - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - interp2d.__call__ - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.krogh_interpolate.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.krogh_interpolate.rst deleted file mode 100644 index 677c9d0210..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.krogh_interpolate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.krogh_interpolate -=================================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: krogh_interpolate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.lagrange.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.lagrange.rst deleted file mode 100644 index cf664d4088..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.lagrange.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.lagrange -========================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: lagrange \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.piecewise_polynomial_interpolate.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.piecewise_polynomial_interpolate.rst deleted file mode 100644 index b094339469..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.piecewise_polynomial_interpolate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.piecewise_polynomial_interpolate -================================================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: piecewise_polynomial_interpolate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.spalde.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.spalde.rst deleted file mode 100644 index 2059341b73..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.spalde.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.spalde -======================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: spalde \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.splev.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.splev.rst deleted file mode 100644 index e3fa4cf563..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.splev.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.splev -======================= - -.. currentmodule:: scipy.interpolate - -.. autofunction:: splev \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.splint.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.splint.rst deleted file mode 100644 index 8f985961b2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.splint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.splint -======================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: splint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.splprep.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.splprep.rst deleted file mode 100644 index e3c77083c7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.splprep.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.splprep -========================= - -.. currentmodule:: scipy.interpolate - -.. autofunction:: splprep \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.splrep.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.splrep.rst deleted file mode 100644 index 2237b6570f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.splrep.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.splrep -======================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: splrep \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.interpolate.sproot.rst b/scipy-0.10.1/doc/source/generated/scipy.interpolate.sproot.rst deleted file mode 100644 index 4915d09fed..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.interpolate.sproot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.interpolate.sproot -======================== - -.. currentmodule:: scipy.interpolate - -.. autofunction:: sproot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.arff.loadarff.rst b/scipy-0.10.1/doc/source/generated/scipy.io.arff.loadarff.rst deleted file mode 100644 index de9de00abe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.arff.loadarff.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.arff.loadarff -====================== - -.. currentmodule:: scipy.io.arff - -.. autofunction:: loadarff \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.loadmat.rst b/scipy-0.10.1/doc/source/generated/scipy.io.loadmat.rst deleted file mode 100644 index 47c222452d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.loadmat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.loadmat -================ - -.. currentmodule:: scipy.io - -.. autofunction:: loadmat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.mminfo.rst b/scipy-0.10.1/doc/source/generated/scipy.io.mminfo.rst deleted file mode 100644 index 4143fe1c9c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.mminfo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.mminfo -=============== - -.. currentmodule:: scipy.io - -.. autofunction:: mminfo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.mmread.rst b/scipy-0.10.1/doc/source/generated/scipy.io.mmread.rst deleted file mode 100644 index b8dc951e1e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.mmread.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.mmread -=============== - -.. currentmodule:: scipy.io - -.. autofunction:: mmread \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.mmwrite.rst b/scipy-0.10.1/doc/source/generated/scipy.io.mmwrite.rst deleted file mode 100644 index 5a3ad4da00..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.mmwrite.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.mmwrite -================ - -.. currentmodule:: scipy.io - -.. autofunction:: mmwrite \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.close.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.close.rst deleted file mode 100644 index b85fe23397..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.close.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_file.close -================================= - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_file.close \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.createDimension.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.createDimension.rst deleted file mode 100644 index 5d217c4379..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.createDimension.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_file.createDimension -=========================================== - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_file.createDimension \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.createVariable.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.createVariable.rst deleted file mode 100644 index 5be67f1012..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.createVariable.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_file.createVariable -========================================== - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_file.createVariable \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.flush.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.flush.rst deleted file mode 100644 index bb578aa448..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.flush.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_file.flush -================================= - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_file.flush \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.rst deleted file mode 100644 index 63e55dae76..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.rst +++ /dev/null @@ -1,23 +0,0 @@ -scipy.io.netcdf.netcdf_file -=========================== - -.. currentmodule:: scipy.io.netcdf - -.. autoclass:: netcdf_file - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - netcdf_file.close - netcdf_file.createDimension - netcdf_file.createVariable - netcdf_file.flush - netcdf_file.sync - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.sync.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.sync.rst deleted file mode 100644 index d601046ea9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_file.sync.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_file.sync -================================ - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_file.sync \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.assignValue.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.assignValue.rst deleted file mode 100644 index 9dbe12d111..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.assignValue.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_variable.assignValue -=========================================== - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_variable.assignValue \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.getValue.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.getValue.rst deleted file mode 100644 index cdd671aa59..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.getValue.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_variable.getValue -======================================== - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_variable.getValue \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.isrec.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.isrec.rst deleted file mode 100644 index 79ea0c66cf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.isrec.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_variable.isrec -===================================== - -.. currentmodule:: scipy.io.netcdf - -.. autoattribute:: netcdf_variable.isrec \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.itemsize.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.itemsize.rst deleted file mode 100644 index c942403969..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.itemsize.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_variable.itemsize -======================================== - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_variable.itemsize \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.rst deleted file mode 100644 index 1bfe8041ae..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.rst +++ /dev/null @@ -1,29 +0,0 @@ -scipy.io.netcdf.netcdf_variable -=============================== - -.. currentmodule:: scipy.io.netcdf - -.. autoclass:: netcdf_variable - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - netcdf_variable.assignValue - netcdf_variable.getValue - netcdf_variable.itemsize - netcdf_variable.typecode - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - netcdf_variable.isrec - netcdf_variable.shape - diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.shape.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.shape.rst deleted file mode 100644 index 4de6b18529..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_variable.shape -===================================== - -.. currentmodule:: scipy.io.netcdf - -.. autoattribute:: netcdf_variable.shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.typecode.rst b/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.typecode.rst deleted file mode 100644 index f48f264c69..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.netcdf.netcdf_variable.typecode.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.netcdf.netcdf_variable.typecode -======================================== - -.. currentmodule:: scipy.io.netcdf - -.. automethod:: netcdf_variable.typecode \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.readsav.rst b/scipy-0.10.1/doc/source/generated/scipy.io.readsav.rst deleted file mode 100644 index 3f987d313f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.readsav.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.readsav -================ - -.. currentmodule:: scipy.io - -.. autofunction:: readsav \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.save_as_module.rst b/scipy-0.10.1/doc/source/generated/scipy.io.save_as_module.rst deleted file mode 100644 index 75692a3ba5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.save_as_module.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.save_as_module -======================= - -.. currentmodule:: scipy.io - -.. autofunction:: save_as_module \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.savemat.rst b/scipy-0.10.1/doc/source/generated/scipy.io.savemat.rst deleted file mode 100644 index e97612293a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.savemat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.savemat -================ - -.. currentmodule:: scipy.io - -.. autofunction:: savemat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.wavfile.read.rst b/scipy-0.10.1/doc/source/generated/scipy.io.wavfile.read.rst deleted file mode 100644 index d66dafa4f4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.wavfile.read.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.wavfile.read -===================== - -.. currentmodule:: scipy.io.wavfile - -.. autofunction:: read \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.io.wavfile.write.rst b/scipy-0.10.1/doc/source/generated/scipy.io.wavfile.write.rst deleted file mode 100644 index e7d6dc2b16..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.io.wavfile.write.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.io.wavfile.write -====================== - -.. currentmodule:: scipy.io.wavfile - -.. autofunction:: write \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.block_diag.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.block_diag.rst deleted file mode 100644 index a58c518add..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.block_diag.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.block_diag -======================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: block_diag \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_factor.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_factor.rst deleted file mode 100644 index ad72b3cc5a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_factor.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.cho_factor -======================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: cho_factor \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_solve.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_solve.rst deleted file mode 100644 index 3d01815acb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_solve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.cho_solve -====================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: cho_solve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_solve_banded.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_solve_banded.rst deleted file mode 100644 index bd8ad95040..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.cho_solve_banded.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.cho_solve_banded -============================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: cho_solve_banded \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.cholesky.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.cholesky.rst deleted file mode 100644 index bf3573b0bb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.cholesky.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.cholesky -===================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: cholesky \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.cholesky_banded.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.cholesky_banded.rst deleted file mode 100644 index 86b353e2b6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.cholesky_banded.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.cholesky_banded -============================ - -.. currentmodule:: scipy.linalg - -.. autofunction:: cholesky_banded \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.circulant.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.circulant.rst deleted file mode 100644 index b4720999ad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.circulant.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.circulant -====================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: circulant \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.companion.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.companion.rst deleted file mode 100644 index 0b8fadd7d9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.companion.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.companion -====================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: companion \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.coshm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.coshm.rst deleted file mode 100644 index a0d8a57cd9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.coshm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.coshm -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: coshm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.cosm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.cosm.rst deleted file mode 100644 index 6237cc2d77..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.cosm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.cosm -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: cosm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.det.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.det.rst deleted file mode 100644 index b8acc7ff31..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.det.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.det -================ - -.. currentmodule:: scipy.linalg - -.. autofunction:: det \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.diagsvd.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.diagsvd.rst deleted file mode 100644 index f38943ea02..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.diagsvd.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.diagsvd -==================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: diagsvd \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.eig.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.eig.rst deleted file mode 100644 index 50d416e730..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.eig.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.eig -================ - -.. currentmodule:: scipy.linalg - -.. autofunction:: eig \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.eig_banded.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.eig_banded.rst deleted file mode 100644 index c4f5a6ac48..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.eig_banded.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.eig_banded -======================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: eig_banded \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.eigh.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.eigh.rst deleted file mode 100644 index b00af4295f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.eigh.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.eigh -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: eigh \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvals.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvals.rst deleted file mode 100644 index f357d9ba85..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvals.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.eigvals -==================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: eigvals \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvals_banded.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvals_banded.rst deleted file mode 100644 index 2e5a508fff..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvals_banded.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.eigvals_banded -=========================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: eigvals_banded \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvalsh.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvalsh.rst deleted file mode 100644 index 30b23dbc1e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.eigvalsh.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.eigvalsh -===================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: eigvalsh \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.expm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.expm.rst deleted file mode 100644 index d324bd9fed..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.expm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.expm -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: expm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.expm2.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.expm2.rst deleted file mode 100644 index ff059d3964..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.expm2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.expm2 -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: expm2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.expm3.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.expm3.rst deleted file mode 100644 index 81e42bd429..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.expm3.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.expm3 -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: expm3 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.funm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.funm.rst deleted file mode 100644 index 92cd507b2e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.funm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.funm -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: funm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.hadamard.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.hadamard.rst deleted file mode 100644 index 544fe58091..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.hadamard.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.hadamard -===================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: hadamard \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.hankel.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.hankel.rst deleted file mode 100644 index 9d9c04b792..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.hankel.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.hankel -=================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: hankel \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.hessenberg.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.hessenberg.rst deleted file mode 100644 index 418ae20317..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.hessenberg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.hessenberg -======================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: hessenberg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.hilbert.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.hilbert.rst deleted file mode 100644 index 2cef751632..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.hilbert.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.hilbert -==================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: hilbert \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.inv.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.inv.rst deleted file mode 100644 index 1affb8c3a0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.inv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.inv -================ - -.. currentmodule:: scipy.linalg - -.. autofunction:: inv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.invhilbert.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.invhilbert.rst deleted file mode 100644 index bb26107762..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.invhilbert.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.invhilbert -======================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: invhilbert \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.kron.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.kron.rst deleted file mode 100644 index bb15a5d51d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.kron.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.kron -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: kron \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.leslie.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.leslie.rst deleted file mode 100644 index 014b5be16e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.leslie.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.leslie -=================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: leslie \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.logm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.logm.rst deleted file mode 100644 index 768d965b43..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.logm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.logm -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: logm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.lstsq.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.lstsq.rst deleted file mode 100644 index a4ac4c07de..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.lstsq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.lstsq -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: lstsq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.lu.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.lu.rst deleted file mode 100644 index 2cd8bdd46a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.lu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.lu -=============== - -.. currentmodule:: scipy.linalg - -.. autofunction:: lu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.lu_factor.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.lu_factor.rst deleted file mode 100644 index 7020430b57..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.lu_factor.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.lu_factor -====================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: lu_factor \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.lu_solve.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.lu_solve.rst deleted file mode 100644 index 7dc0be072b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.lu_solve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.lu_solve -===================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: lu_solve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.norm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.norm.rst deleted file mode 100644 index 96ac919bfd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.norm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.norm -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: norm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.orth.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.orth.rst deleted file mode 100644 index dbbfd19233..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.orth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.orth -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: orth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.pinv.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.pinv.rst deleted file mode 100644 index 547b20264e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.pinv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.pinv -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: pinv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.pinv2.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.pinv2.rst deleted file mode 100644 index c773b739e0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.pinv2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.pinv2 -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: pinv2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.qr.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.qr.rst deleted file mode 100644 index 77769e6927..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.qr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.qr -=============== - -.. currentmodule:: scipy.linalg - -.. autofunction:: qr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.rsf2csf.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.rsf2csf.rst deleted file mode 100644 index ba9dfbc399..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.rsf2csf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.rsf2csf -==================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: rsf2csf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.schur.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.schur.rst deleted file mode 100644 index 6748c7b6df..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.schur.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.schur -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: schur \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.signm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.signm.rst deleted file mode 100644 index 030857bdc9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.signm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.signm -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: signm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.sinhm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.sinhm.rst deleted file mode 100644 index bbe8509a56..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.sinhm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.sinhm -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: sinhm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.sinm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.sinm.rst deleted file mode 100644 index 6e20c38760..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.sinm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.sinm -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: sinm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.solve.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.solve.rst deleted file mode 100644 index 124bab6820..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.solve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.solve -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: solve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.solve_banded.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.solve_banded.rst deleted file mode 100644 index 7514df8c71..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.solve_banded.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.solve_banded -========================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: solve_banded \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.solve_triangular.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.solve_triangular.rst deleted file mode 100644 index 3994416dbe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.solve_triangular.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.solve_triangular -============================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: solve_triangular \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.solveh_banded.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.solveh_banded.rst deleted file mode 100644 index 4778f79153..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.solveh_banded.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.solveh_banded -========================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: solveh_banded \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.sqrtm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.sqrtm.rst deleted file mode 100644 index dc4ae6e3b4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.sqrtm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.sqrtm -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: sqrtm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.svd.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.svd.rst deleted file mode 100644 index 6862b1d6f6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.svd.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.svd -================ - -.. currentmodule:: scipy.linalg - -.. autofunction:: svd \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.svdvals.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.svdvals.rst deleted file mode 100644 index 41323e9a67..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.svdvals.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.svdvals -==================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: svdvals \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.tanhm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.tanhm.rst deleted file mode 100644 index e68559dfd6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.tanhm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.tanhm -================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: tanhm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.tanm.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.tanm.rst deleted file mode 100644 index 880775eec7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.tanm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.tanm -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: tanm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.toeplitz.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.toeplitz.rst deleted file mode 100644 index e39337f558..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.toeplitz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.toeplitz -===================== - -.. currentmodule:: scipy.linalg - -.. autofunction:: toeplitz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.tri.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.tri.rst deleted file mode 100644 index 792bef5a6d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.tri.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.tri -================ - -.. currentmodule:: scipy.linalg - -.. autofunction:: tri \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.tril.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.tril.rst deleted file mode 100644 index 16a955a334..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.tril.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.tril -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: tril \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.linalg.triu.rst b/scipy-0.10.1/doc/source/generated/scipy.linalg.triu.rst deleted file mode 100644 index 2a31c1d4ca..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.linalg.triu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.linalg.triu -================= - -.. currentmodule:: scipy.linalg - -.. autofunction:: triu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.arrayexp.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.arrayexp.rst deleted file mode 100644 index 65e2143e2d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.arrayexp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.arrayexp -========================= - -.. currentmodule:: scipy.maxentropy - -.. autofunction:: arrayexp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.beginlogging.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.beginlogging.rst deleted file mode 100644 index 756ce0089a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.beginlogging.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.beginlogging -======================================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.beginlogging \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.clearcache.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.clearcache.rst deleted file mode 100644 index a1c960c5af..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.clearcache.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.clearcache -===================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.clearcache \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.crossentropy.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.crossentropy.rst deleted file mode 100644 index 7ec6288b95..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.crossentropy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.crossentropy -======================================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.crossentropy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.dual.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.dual.rst deleted file mode 100644 index 93141a9f46..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.dual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.dual -=============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.dual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.endlogging.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.endlogging.rst deleted file mode 100644 index 6ac6e22681..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.endlogging.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.endlogging -===================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.endlogging \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.entropydual.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.entropydual.rst deleted file mode 100644 index 0f67208a0a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.entropydual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.entropydual -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.entropydual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.fit.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.fit.rst deleted file mode 100644 index e364adca1a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.fit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.fit -============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.fit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.grad.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.grad.rst deleted file mode 100644 index 215d149ddb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.grad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.grad -=============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.grad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.log.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.log.rst deleted file mode 100644 index 00685aabf5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.log.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.log -============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.log \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.logparams.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.logparams.rst deleted file mode 100644 index 4f36f3ca4b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.logparams.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.logparams -==================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.logparams \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.normconst.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.normconst.rst deleted file mode 100644 index f8115065ef..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.normconst.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.normconst -==================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.normconst \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.reset.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.reset.rst deleted file mode 100644 index b7fc246a75..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.reset.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.reset -================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.reset \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.rst deleted file mode 100644 index f0d0eb4111..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.rst +++ /dev/null @@ -1,33 +0,0 @@ -scipy.maxentropy.basemodel -========================== - -.. currentmodule:: scipy.maxentropy - -.. autoclass:: basemodel - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - basemodel.beginlogging - basemodel.clearcache - basemodel.crossentropy - basemodel.dual - basemodel.endlogging - basemodel.entropydual - basemodel.fit - basemodel.grad - basemodel.log - basemodel.logparams - basemodel.normconst - basemodel.reset - basemodel.setcallback - basemodel.setparams - basemodel.setsmooth - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setcallback.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setcallback.rst deleted file mode 100644 index 523e174a45..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setcallback.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.setcallback -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.setcallback \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setparams.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setparams.rst deleted file mode 100644 index 790c28cb68..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setparams.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.setparams -==================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.setparams \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setsmooth.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setsmooth.rst deleted file mode 100644 index 27f502c55e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.basemodel.setsmooth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.basemodel.setsmooth -==================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: basemodel.setsmooth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.beginlogging.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.beginlogging.rst deleted file mode 100644 index 98ce7ea123..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.beginlogging.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.beginlogging -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.beginlogging \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.clearcache.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.clearcache.rst deleted file mode 100644 index 79bb5cf763..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.clearcache.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.clearcache -==================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.clearcache \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.crossentropy.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.crossentropy.rst deleted file mode 100644 index 8421f5d213..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.crossentropy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.crossentropy -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.crossentropy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.dual.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.dual.rst deleted file mode 100644 index 1efa23cb28..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.dual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.dual -============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.dual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.endlogging.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.endlogging.rst deleted file mode 100644 index 61d5ee6e70..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.endlogging.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.endlogging -==================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.endlogging \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.entropydual.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.entropydual.rst deleted file mode 100644 index 70f9440598..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.entropydual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.entropydual -===================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.entropydual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.estimate.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.estimate.rst deleted file mode 100644 index 9f891b41fb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.estimate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.estimate -================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.estimate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.expectations.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.expectations.rst deleted file mode 100644 index 02cb31a611..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.expectations.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.expectations -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.expectations \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.fit.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.fit.rst deleted file mode 100644 index fd07c74569..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.fit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.fit -============================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.fit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.grad.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.grad.rst deleted file mode 100644 index 74e44ad832..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.grad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.grad -============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.grad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.log.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.log.rst deleted file mode 100644 index 2fadf3fe5c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.log.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.log -============================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.log \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.lognormconst.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.lognormconst.rst deleted file mode 100644 index 7518bac5c5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.lognormconst.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.lognormconst -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.lognormconst \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.logparams.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.logparams.rst deleted file mode 100644 index 9fffed9252..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.logparams.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.logparams -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.logparams \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.logpdf.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.logpdf.rst deleted file mode 100644 index 15b196e6cb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.logpdf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.logpdf -================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.logpdf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.normconst.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.normconst.rst deleted file mode 100644 index 0c53dfe930..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.normconst.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.normconst -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.normconst \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.pdf.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.pdf.rst deleted file mode 100644 index 8cae3befcf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.pdf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.pdf -============================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.pdf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.pdf_function.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.pdf_function.rst deleted file mode 100644 index fdebda72d0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.pdf_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.pdf_function -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.pdf_function \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.resample.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.resample.rst deleted file mode 100644 index e693049343..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.resample.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.resample -================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.resample \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.reset.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.reset.rst deleted file mode 100644 index c5cf96738e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.reset.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.reset -=============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.reset \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.rst deleted file mode 100644 index 5fa0759cfe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.rst +++ /dev/null @@ -1,44 +0,0 @@ -scipy.maxentropy.bigmodel -========================= - -.. currentmodule:: scipy.maxentropy - -.. autoclass:: bigmodel - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - bigmodel.beginlogging - bigmodel.clearcache - bigmodel.crossentropy - bigmodel.dual - bigmodel.endlogging - bigmodel.entropydual - bigmodel.estimate - bigmodel.expectations - bigmodel.fit - bigmodel.grad - bigmodel.log - bigmodel.lognormconst - bigmodel.logparams - bigmodel.logpdf - bigmodel.normconst - bigmodel.pdf - bigmodel.pdf_function - bigmodel.resample - bigmodel.reset - bigmodel.setcallback - bigmodel.setparams - bigmodel.setsampleFgen - bigmodel.setsmooth - bigmodel.settestsamples - bigmodel.stochapprox - bigmodel.test - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setcallback.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setcallback.rst deleted file mode 100644 index 2cccae725c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setcallback.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.setcallback -===================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.setcallback \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setparams.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setparams.rst deleted file mode 100644 index 92a5a9ab04..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setparams.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.setparams -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.setparams \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setsampleFgen.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setsampleFgen.rst deleted file mode 100644 index f3513fbe68..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setsampleFgen.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.setsampleFgen -======================================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.setsampleFgen \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setsmooth.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setsmooth.rst deleted file mode 100644 index d4374ca316..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.setsmooth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.setsmooth -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.setsmooth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.settestsamples.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.settestsamples.rst deleted file mode 100644 index 53cb0ce099..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.settestsamples.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.settestsamples -======================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.settestsamples \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.stochapprox.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.stochapprox.rst deleted file mode 100644 index 2912405824..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.stochapprox.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.stochapprox -===================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.stochapprox \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.test.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.test.rst deleted file mode 100644 index 4a8c116936..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.bigmodel.test.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.bigmodel.test -============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: bigmodel.test \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.columnmeans.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.columnmeans.rst deleted file mode 100644 index 98de267f16..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.columnmeans.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.columnmeans -============================ - -.. currentmodule:: scipy.maxentropy - -.. autofunction:: columnmeans \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.columnvariances.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.columnvariances.rst deleted file mode 100644 index 327584d83c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.columnvariances.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.columnvariances -================================ - -.. currentmodule:: scipy.maxentropy - -.. autofunction:: columnvariances \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.beginlogging.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.beginlogging.rst deleted file mode 100644 index 1ca28131de..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.beginlogging.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.beginlogging -============================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.beginlogging \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.clearcache.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.clearcache.rst deleted file mode 100644 index e2bed1b67e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.clearcache.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.clearcache -============================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.clearcache \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.crossentropy.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.crossentropy.rst deleted file mode 100644 index c4f0f218c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.crossentropy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.crossentropy -============================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.crossentropy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.dual.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.dual.rst deleted file mode 100644 index 779f993788..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.dual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.dual -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.dual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.endlogging.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.endlogging.rst deleted file mode 100644 index a427e75ff3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.endlogging.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.endlogging -============================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.endlogging \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.entropydual.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.entropydual.rst deleted file mode 100644 index 3d192ff276..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.entropydual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.entropydual -============================================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.entropydual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.expectations.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.expectations.rst deleted file mode 100644 index c8a9e493c3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.expectations.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.expectations -============================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.expectations \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.fit.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.fit.rst deleted file mode 100644 index e0dde1aaa2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.fit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.fit -===================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.fit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.grad.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.grad.rst deleted file mode 100644 index 1e191ebad4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.grad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.grad -====================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.grad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.log.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.log.rst deleted file mode 100644 index 2f3f73ed9c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.log.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.log -===================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.log \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.lognormconst.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.lognormconst.rst deleted file mode 100644 index 30dfe42f78..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.lognormconst.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.lognormconst -============================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.lognormconst \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.logparams.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.logparams.rst deleted file mode 100644 index 387d21d7fd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.logparams.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.logparams -=========================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.logparams \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.logpmf.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.logpmf.rst deleted file mode 100644 index 4351c3b6a1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.logpmf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.logpmf -======================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.logpmf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.normconst.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.normconst.rst deleted file mode 100644 index 2cf3dada63..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.normconst.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.normconst -=========================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.normconst \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.pmf.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.pmf.rst deleted file mode 100644 index 45b85b4e63..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.pmf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.pmf -===================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.pmf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.pmf_function.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.pmf_function.rst deleted file mode 100644 index a06165b4f8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.pmf_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.pmf_function -============================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.pmf_function \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.probdist.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.probdist.rst deleted file mode 100644 index ba02f02cd0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.probdist.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.probdist -========================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.probdist \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.reset.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.reset.rst deleted file mode 100644 index 04f5165faf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.reset.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.reset -======================================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.reset \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.rst deleted file mode 100644 index e5a3044b8b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.rst +++ /dev/null @@ -1,40 +0,0 @@ -scipy.maxentropy.conditionalmodel -================================= - -.. currentmodule:: scipy.maxentropy - -.. autoclass:: conditionalmodel - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - conditionalmodel.beginlogging - conditionalmodel.clearcache - conditionalmodel.crossentropy - conditionalmodel.dual - conditionalmodel.endlogging - conditionalmodel.entropydual - conditionalmodel.expectations - conditionalmodel.fit - conditionalmodel.grad - conditionalmodel.log - conditionalmodel.lognormconst - conditionalmodel.logparams - conditionalmodel.logpmf - conditionalmodel.normconst - conditionalmodel.pmf - conditionalmodel.pmf_function - conditionalmodel.probdist - conditionalmodel.reset - conditionalmodel.setcallback - conditionalmodel.setfeaturesandsamplespace - conditionalmodel.setparams - conditionalmodel.setsmooth - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setcallback.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setcallback.rst deleted file mode 100644 index 71bd54bb2c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setcallback.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.setcallback -============================================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.setcallback \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setfeaturesandsamplespace.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setfeaturesandsamplespace.rst deleted file mode 100644 index b8015db037..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setfeaturesandsamplespace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.setfeaturesandsamplespace -=========================================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.setfeaturesandsamplespace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setparams.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setparams.rst deleted file mode 100644 index 4097603ede..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setparams.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.setparams -=========================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.setparams \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setsmooth.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setsmooth.rst deleted file mode 100644 index 4e7ac81e79..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.conditionalmodel.setsmooth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.conditionalmodel.setsmooth -=========================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: conditionalmodel.setsmooth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.flatten.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.flatten.rst deleted file mode 100644 index 2f8be79464..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.flatten.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.flatten -======================== - -.. currentmodule:: scipy.maxentropy - -.. autofunction:: flatten \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.innerprod.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.innerprod.rst deleted file mode 100644 index 6c867df335..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.innerprod.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.innerprod -========================== - -.. currentmodule:: scipy.maxentropy - -.. autofunction:: innerprod \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.innerprodtranspose.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.innerprodtranspose.rst deleted file mode 100644 index 209ae0ce93..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.innerprodtranspose.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.innerprodtranspose -=================================== - -.. currentmodule:: scipy.maxentropy - -.. autofunction:: innerprodtranspose \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.logsumexp.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.logsumexp.rst deleted file mode 100644 index 7e6b7b5868..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.logsumexp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.logsumexp -========================== - -.. currentmodule:: scipy.maxentropy - -.. autofunction:: logsumexp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.beginlogging.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.beginlogging.rst deleted file mode 100644 index d08065aba9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.beginlogging.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.beginlogging -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.beginlogging \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.clearcache.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.clearcache.rst deleted file mode 100644 index e7293b8b8c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.clearcache.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.clearcache -================================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.clearcache \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.crossentropy.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.crossentropy.rst deleted file mode 100644 index 284216149a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.crossentropy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.crossentropy -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.crossentropy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.dual.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.dual.rst deleted file mode 100644 index 0d22f56f79..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.dual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.dual -=========================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.dual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.endlogging.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.endlogging.rst deleted file mode 100644 index cf526ede88..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.endlogging.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.endlogging -================================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.endlogging \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.entropydual.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.entropydual.rst deleted file mode 100644 index 9d9eea7b75..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.entropydual.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.entropydual -================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.entropydual \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.expectations.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.expectations.rst deleted file mode 100644 index 1bf02caa92..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.expectations.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.expectations -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.expectations \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.fit.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.fit.rst deleted file mode 100644 index dbfec7e490..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.fit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.fit -========================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.fit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.grad.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.grad.rst deleted file mode 100644 index 2d0c0a9f02..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.grad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.grad -=========================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.grad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.log.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.log.rst deleted file mode 100644 index 7cbd42d920..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.log.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.log -========================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.log \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.lognormconst.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.lognormconst.rst deleted file mode 100644 index d7dc127e8b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.lognormconst.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.lognormconst -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.lognormconst \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.logparams.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.logparams.rst deleted file mode 100644 index 5df13e7526..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.logparams.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.logparams -================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.logparams \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.logpmf.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.logpmf.rst deleted file mode 100644 index e22c73e418..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.logpmf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.logpmf -============================= - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.logpmf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.normconst.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.normconst.rst deleted file mode 100644 index 7fd88923e8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.normconst.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.normconst -================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.normconst \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.pmf.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.pmf.rst deleted file mode 100644 index 558d5afb9a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.pmf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.pmf -========================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.pmf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.pmf_function.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.pmf_function.rst deleted file mode 100644 index ae21cd8dd6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.pmf_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.pmf_function -=================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.pmf_function \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.probdist.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.probdist.rst deleted file mode 100644 index 3e9d67577c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.probdist.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.probdist -=============================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.probdist \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.reset.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.reset.rst deleted file mode 100644 index 59fc67864d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.reset.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.reset -============================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.reset \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.rst deleted file mode 100644 index 562374123e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.rst +++ /dev/null @@ -1,40 +0,0 @@ -scipy.maxentropy.model -====================== - -.. currentmodule:: scipy.maxentropy - -.. autoclass:: model - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - model.beginlogging - model.clearcache - model.crossentropy - model.dual - model.endlogging - model.entropydual - model.expectations - model.fit - model.grad - model.log - model.lognormconst - model.logparams - model.logpmf - model.normconst - model.pmf - model.pmf_function - model.probdist - model.reset - model.setcallback - model.setfeaturesandsamplespace - model.setparams - model.setsmooth - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setcallback.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setcallback.rst deleted file mode 100644 index 218399ae7d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setcallback.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.setcallback -================================== - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.setcallback \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setfeaturesandsamplespace.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setfeaturesandsamplespace.rst deleted file mode 100644 index 3218ca50ab..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setfeaturesandsamplespace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.setfeaturesandsamplespace -================================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.setfeaturesandsamplespace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setparams.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setparams.rst deleted file mode 100644 index ff97e38f20..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setparams.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.setparams -================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.setparams \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setsmooth.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setsmooth.rst deleted file mode 100644 index e4e88ad5f2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.model.setsmooth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.model.setsmooth -================================ - -.. currentmodule:: scipy.maxentropy - -.. automethod:: model.setsmooth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.sparsefeaturematrix.rst b/scipy-0.10.1/doc/source/generated/scipy.maxentropy.sparsefeaturematrix.rst deleted file mode 100644 index 9a6649acbf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.maxentropy.sparsefeaturematrix.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.maxentropy.sparsefeaturematrix -==================================== - -.. currentmodule:: scipy.maxentropy - -.. autofunction:: sparsefeaturematrix \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.bytescale.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.bytescale.rst deleted file mode 100644 index 700a38de64..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.bytescale.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.bytescale -==================== - -.. currentmodule:: scipy.misc - -.. autofunction:: bytescale \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.central_diff_weights.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.central_diff_weights.rst deleted file mode 100644 index 09af20c66e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.central_diff_weights.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.central_diff_weights -=============================== - -.. currentmodule:: scipy.misc - -.. autofunction:: central_diff_weights \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.comb.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.comb.rst deleted file mode 100644 index 7b5b511d67..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.comb.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.comb -=============== - -.. currentmodule:: scipy.misc - -.. autofunction:: comb \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.derivative.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.derivative.rst deleted file mode 100644 index bee1e957d7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.derivative.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.derivative -===================== - -.. currentmodule:: scipy.misc - -.. autofunction:: derivative \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.factorial.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.factorial.rst deleted file mode 100644 index 83596472ff..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.factorial.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.factorial -==================== - -.. currentmodule:: scipy.misc - -.. autofunction:: factorial \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.factorial2.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.factorial2.rst deleted file mode 100644 index a07358cdf6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.factorial2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.factorial2 -===================== - -.. currentmodule:: scipy.misc - -.. autofunction:: factorial2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.factorialk.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.factorialk.rst deleted file mode 100644 index c03babfb2a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.factorialk.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.factorialk -===================== - -.. currentmodule:: scipy.misc - -.. autofunction:: factorialk \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.fromimage.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.fromimage.rst deleted file mode 100644 index 969209bc32..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.fromimage.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.fromimage -==================== - -.. currentmodule:: scipy.misc - -.. autofunction:: fromimage \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.imfilter.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.imfilter.rst deleted file mode 100644 index 6719eea561..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.imfilter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.imfilter -=================== - -.. currentmodule:: scipy.misc - -.. autofunction:: imfilter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.imread.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.imread.rst deleted file mode 100644 index 7790a42491..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.imread.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.imread -================= - -.. currentmodule:: scipy.misc - -.. autofunction:: imread \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.imresize.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.imresize.rst deleted file mode 100644 index 52a911a488..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.imresize.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.imresize -=================== - -.. currentmodule:: scipy.misc - -.. autofunction:: imresize \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.imrotate.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.imrotate.rst deleted file mode 100644 index 6ce551fc26..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.imrotate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.imrotate -=================== - -.. currentmodule:: scipy.misc - -.. autofunction:: imrotate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.imsave.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.imsave.rst deleted file mode 100644 index 4846ed697e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.imsave.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.imsave -================= - -.. currentmodule:: scipy.misc - -.. autofunction:: imsave \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.imshow.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.imshow.rst deleted file mode 100644 index 3ea8fc2f1d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.imshow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.imshow -================= - -.. currentmodule:: scipy.misc - -.. autofunction:: imshow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.info.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.info.rst deleted file mode 100644 index 447ad5a713..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.info.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.info -=============== - -.. currentmodule:: scipy.misc - -.. autofunction:: info \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.lena.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.lena.rst deleted file mode 100644 index d9e878eb98..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.lena.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.lena -=============== - -.. currentmodule:: scipy.misc - -.. autofunction:: lena \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.pade.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.pade.rst deleted file mode 100644 index 531ff0ddf8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.pade.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.pade -=============== - -.. currentmodule:: scipy.misc - -.. autofunction:: pade \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.radon.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.radon.rst deleted file mode 100644 index e3a57025fc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.radon.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.radon -================ - -.. currentmodule:: scipy.misc - -.. autofunction:: radon \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.misc.toimage.rst b/scipy-0.10.1/doc/source/generated/scipy.misc.toimage.rst deleted file mode 100644 index d235fa52f2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.misc.toimage.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.misc.toimage -================== - -.. currentmodule:: scipy.misc - -.. autofunction:: toimage \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.convolve.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.convolve.rst deleted file mode 100644 index 4f232a2b1d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.convolve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.convolve -============================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: convolve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.convolve1d.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.convolve1d.rst deleted file mode 100644 index 33f6563cf8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.convolve1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.convolve1d -================================ - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: convolve1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.correlate.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.correlate.rst deleted file mode 100644 index fda8852829..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.correlate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.correlate -=============================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: correlate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.correlate1d.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.correlate1d.rst deleted file mode 100644 index b6b4a815a8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.correlate1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.correlate1d -================================= - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: correlate1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_filter.rst deleted file mode 100644 index d10d85706f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.gaussian_filter -===================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: gaussian_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_filter1d.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_filter1d.rst deleted file mode 100644 index b99c4eb134..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_filter1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.gaussian_filter1d -======================================= - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: gaussian_filter1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_gradient_magnitude.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_gradient_magnitude.rst deleted file mode 100644 index c35c1db64c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_gradient_magnitude.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.gaussian_gradient_magnitude -================================================= - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: gaussian_gradient_magnitude \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_laplace.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_laplace.rst deleted file mode 100644 index 0fd24a1be6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.gaussian_laplace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.gaussian_laplace -====================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: gaussian_laplace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_filter.rst deleted file mode 100644 index 198465ecd3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.generic_filter -==================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: generic_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_filter1d.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_filter1d.rst deleted file mode 100644 index f4fe69ea00..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_filter1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.generic_filter1d -====================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: generic_filter1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_gradient_magnitude.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_gradient_magnitude.rst deleted file mode 100644 index 7da3f2b0b7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_gradient_magnitude.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.generic_gradient_magnitude -================================================ - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: generic_gradient_magnitude \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_laplace.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_laplace.rst deleted file mode 100644 index ea39306cc7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.generic_laplace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.generic_laplace -===================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: generic_laplace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.laplace.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.laplace.rst deleted file mode 100644 index 1b364f730a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.laplace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.laplace -============================= - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: laplace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.maximum_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.maximum_filter.rst deleted file mode 100644 index 384a81b72f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.maximum_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.maximum_filter -==================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: maximum_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.maximum_filter1d.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.maximum_filter1d.rst deleted file mode 100644 index 9a54db5359..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.maximum_filter1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.maximum_filter1d -====================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: maximum_filter1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.median_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.median_filter.rst deleted file mode 100644 index 257663a9cd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.median_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.median_filter -=================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: median_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.minimum_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.minimum_filter.rst deleted file mode 100644 index b3aa3e0b99..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.minimum_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.minimum_filter -==================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: minimum_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.minimum_filter1d.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.minimum_filter1d.rst deleted file mode 100644 index 524961f413..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.minimum_filter1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.minimum_filter1d -====================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: minimum_filter1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.percentile_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.percentile_filter.rst deleted file mode 100644 index dbedf8e914..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.percentile_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.percentile_filter -======================================= - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: percentile_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.prewitt.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.prewitt.rst deleted file mode 100644 index d6722ca200..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.prewitt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.prewitt -============================= - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: prewitt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.rank_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.rank_filter.rst deleted file mode 100644 index ad0aca53ea..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.rank_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.rank_filter -================================= - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: rank_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.sobel.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.sobel.rst deleted file mode 100644 index 6564322e45..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.sobel.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.sobel -=========================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: sobel \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.uniform_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.uniform_filter.rst deleted file mode 100644 index be16996418..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.uniform_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.uniform_filter -==================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: uniform_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.uniform_filter1d.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.uniform_filter1d.rst deleted file mode 100644 index 7111fec4a9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.filters.uniform_filter1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.filters.uniform_filter1d -====================================== - -.. currentmodule:: scipy.ndimage.filters - -.. autofunction:: uniform_filter1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_ellipsoid.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_ellipsoid.rst deleted file mode 100644 index cdffc7547b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_ellipsoid.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.fourier.fourier_ellipsoid -======================================= - -.. currentmodule:: scipy.ndimage.fourier - -.. autofunction:: fourier_ellipsoid \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_gaussian.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_gaussian.rst deleted file mode 100644 index 48f56d57ac..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_gaussian.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.fourier.fourier_gaussian -====================================== - -.. currentmodule:: scipy.ndimage.fourier - -.. autofunction:: fourier_gaussian \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_shift.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_shift.rst deleted file mode 100644 index 7ed2be6e0c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_shift.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.fourier.fourier_shift -=================================== - -.. currentmodule:: scipy.ndimage.fourier - -.. autofunction:: fourier_shift \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_uniform.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_uniform.rst deleted file mode 100644 index 59a4ac0e4e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.fourier.fourier_uniform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.fourier.fourier_uniform -===================================== - -.. currentmodule:: scipy.ndimage.fourier - -.. autofunction:: fourier_uniform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.imread.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.imread.rst deleted file mode 100644 index 96b6d0655a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.imread.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.imread -==================== - -.. currentmodule:: scipy.ndimage - -.. autofunction:: imread \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.affine_transform.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.affine_transform.rst deleted file mode 100644 index 9459cbf950..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.affine_transform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.interpolation.affine_transform -============================================ - -.. currentmodule:: scipy.ndimage.interpolation - -.. autofunction:: affine_transform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.geometric_transform.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.geometric_transform.rst deleted file mode 100644 index 993757599f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.geometric_transform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.interpolation.geometric_transform -=============================================== - -.. currentmodule:: scipy.ndimage.interpolation - -.. autofunction:: geometric_transform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.map_coordinates.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.map_coordinates.rst deleted file mode 100644 index dc1595259d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.map_coordinates.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.interpolation.map_coordinates -=========================================== - -.. currentmodule:: scipy.ndimage.interpolation - -.. autofunction:: map_coordinates \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.rotate.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.rotate.rst deleted file mode 100644 index d8e773c96c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.rotate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.interpolation.rotate -================================== - -.. currentmodule:: scipy.ndimage.interpolation - -.. autofunction:: rotate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.shift.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.shift.rst deleted file mode 100644 index 6260b7afc6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.shift.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.interpolation.shift -================================= - -.. currentmodule:: scipy.ndimage.interpolation - -.. autofunction:: shift \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.spline_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.spline_filter.rst deleted file mode 100644 index 47ac8e4604..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.spline_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.interpolation.spline_filter -========================================= - -.. currentmodule:: scipy.ndimage.interpolation - -.. autofunction:: spline_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.spline_filter1d.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.spline_filter1d.rst deleted file mode 100644 index 54c11c4633..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.spline_filter1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.interpolation.spline_filter1d -=========================================== - -.. currentmodule:: scipy.ndimage.interpolation - -.. autofunction:: spline_filter1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.zoom.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.zoom.rst deleted file mode 100644 index 74218de8e8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.interpolation.zoom.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.interpolation.zoom -================================ - -.. currentmodule:: scipy.ndimage.interpolation - -.. autofunction:: zoom \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.center_of_mass.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.center_of_mass.rst deleted file mode 100644 index 2367fc9b27..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.center_of_mass.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.center_of_mass -========================================= - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: center_of_mass \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.extrema.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.extrema.rst deleted file mode 100644 index 3c6537cd1d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.extrema.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.extrema -================================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: extrema \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.find_objects.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.find_objects.rst deleted file mode 100644 index 24c784c086..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.find_objects.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.find_objects -======================================= - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: find_objects \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.histogram.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.histogram.rst deleted file mode 100644 index 82fd31ba20..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.histogram.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.histogram -==================================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: histogram \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.label.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.label.rst deleted file mode 100644 index 2f3d0699f6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.label.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.label -================================ - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: label \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.maximum.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.maximum.rst deleted file mode 100644 index ad09da4088..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.maximum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.maximum -================================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: maximum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.maximum_position.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.maximum_position.rst deleted file mode 100644 index 4013f30a05..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.maximum_position.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.maximum_position -=========================================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: maximum_position \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.mean.rst deleted file mode 100644 index 4d5b9ee785..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.mean -=============================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.minimum.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.minimum.rst deleted file mode 100644 index 220008597b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.minimum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.minimum -================================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: minimum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.minimum_position.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.minimum_position.rst deleted file mode 100644 index c2bb7a3e59..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.minimum_position.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.minimum_position -=========================================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: minimum_position \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.standard_deviation.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.standard_deviation.rst deleted file mode 100644 index 297d11f9d6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.standard_deviation.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.standard_deviation -============================================= - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: standard_deviation \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.sum.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.sum.rst deleted file mode 100644 index 303019d1af..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.sum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.sum -============================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: sum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.variance.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.variance.rst deleted file mode 100644 index d9dc2e671e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.variance.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.variance -=================================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: variance \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.watershed_ift.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.watershed_ift.rst deleted file mode 100644 index 53ef85345c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.measurements.watershed_ift.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.measurements.watershed_ift -======================================== - -.. currentmodule:: scipy.ndimage.measurements - -.. autofunction:: watershed_ift \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_closing.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_closing.rst deleted file mode 100644 index 45949e3128..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_closing.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.binary_closing -======================================= - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: binary_closing \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_dilation.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_dilation.rst deleted file mode 100644 index 68120b1d39..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_dilation.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.binary_dilation -======================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: binary_dilation \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_erosion.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_erosion.rst deleted file mode 100644 index 882c34e7e0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_erosion.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.binary_erosion -======================================= - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: binary_erosion \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_fill_holes.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_fill_holes.rst deleted file mode 100644 index ca49cfc8c6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_fill_holes.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.binary_fill_holes -========================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: binary_fill_holes \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_hit_or_miss.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_hit_or_miss.rst deleted file mode 100644 index 902c7d3fe3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_hit_or_miss.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.binary_hit_or_miss -=========================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: binary_hit_or_miss \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_opening.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_opening.rst deleted file mode 100644 index 9bb4049ecb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_opening.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.binary_opening -======================================= - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: binary_opening \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_propagation.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_propagation.rst deleted file mode 100644 index 4b384e4103..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.binary_propagation.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.binary_propagation -=========================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: binary_propagation \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.black_tophat.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.black_tophat.rst deleted file mode 100644 index 45d9283a25..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.black_tophat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.black_tophat -===================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: black_tophat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_bf.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_bf.rst deleted file mode 100644 index 8d2d9703b7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_bf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.distance_transform_bf -============================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: distance_transform_bf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_cdt.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_cdt.rst deleted file mode 100644 index 81ba87f586..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_cdt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.distance_transform_cdt -=============================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: distance_transform_cdt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_edt.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_edt.rst deleted file mode 100644 index 197b3bd7c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.distance_transform_edt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.distance_transform_edt -=============================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: distance_transform_edt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.generate_binary_structure.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.generate_binary_structure.rst deleted file mode 100644 index 3321bf89c7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.generate_binary_structure.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.generate_binary_structure -================================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: generate_binary_structure \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_closing.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_closing.rst deleted file mode 100644 index cd4d21f3a6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_closing.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.grey_closing -===================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: grey_closing \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_dilation.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_dilation.rst deleted file mode 100644 index 03cf3c2a26..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_dilation.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.grey_dilation -====================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: grey_dilation \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_erosion.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_erosion.rst deleted file mode 100644 index bd84e5f8c5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_erosion.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.grey_erosion -===================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: grey_erosion \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_opening.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_opening.rst deleted file mode 100644 index a283f7c8d9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.grey_opening.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.grey_opening -===================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: grey_opening \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.iterate_structure.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.iterate_structure.rst deleted file mode 100644 index 544fb916e5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.iterate_structure.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.iterate_structure -========================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: iterate_structure \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.morphological_gradient.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.morphological_gradient.rst deleted file mode 100644 index 1bd1cd530f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.morphological_gradient.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.morphological_gradient -=============================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: morphological_gradient \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.morphological_laplace.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.morphological_laplace.rst deleted file mode 100644 index d5dbf1ce0f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.morphological_laplace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.morphological_laplace -============================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: morphological_laplace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.white_tophat.rst b/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.white_tophat.rst deleted file mode 100644 index 08c78154a8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.ndimage.morphology.white_tophat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.ndimage.morphology.white_tophat -===================================== - -.. currentmodule:: scipy.ndimage.morphology - -.. autofunction:: white_tophat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.Data.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.Data.rst deleted file mode 100644 index ba614cc3b2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.Data.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.odr.Data -============== - -.. currentmodule:: scipy.odr - -.. autoclass:: Data - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - Data.set_meta - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.Data.set_meta.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.Data.set_meta.rst deleted file mode 100644 index 21e10dfb5b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.Data.set_meta.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.Data.set_meta -======================= - -.. currentmodule:: scipy.odr - -.. automethod:: Data.set_meta \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.Model.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.Model.rst deleted file mode 100644 index 9e42f69f48..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.Model.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.odr.Model -=============== - -.. currentmodule:: scipy.odr - -.. autoclass:: Model - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - Model.set_meta - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.Model.set_meta.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.Model.set_meta.rst deleted file mode 100644 index 6c8908f34a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.Model.set_meta.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.Model.set_meta -======================== - -.. currentmodule:: scipy.odr - -.. automethod:: Model.set_meta \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.restart.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.restart.rst deleted file mode 100644 index 4caf064bff..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.restart.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.ODR.restart -===================== - -.. currentmodule:: scipy.odr - -.. automethod:: ODR.restart \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.rst deleted file mode 100644 index 58df7e0d59..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.rst +++ /dev/null @@ -1,22 +0,0 @@ -scipy.odr.ODR -============= - -.. currentmodule:: scipy.odr - -.. autoclass:: ODR - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - ODR.restart - ODR.run - ODR.set_iprint - ODR.set_job - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.run.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.run.rst deleted file mode 100644 index 519e626440..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.run.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.ODR.run -================= - -.. currentmodule:: scipy.odr - -.. automethod:: ODR.run \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.set_iprint.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.set_iprint.rst deleted file mode 100644 index 8a9c25a756..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.set_iprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.ODR.set_iprint -======================== - -.. currentmodule:: scipy.odr - -.. automethod:: ODR.set_iprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.set_job.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.set_job.rst deleted file mode 100644 index cbdc0e0faf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.ODR.set_job.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.ODR.set_job -===================== - -.. currentmodule:: scipy.odr - -.. automethod:: ODR.set_job \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.Output.pprint.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.Output.pprint.rst deleted file mode 100644 index e2cd26fa1d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.Output.pprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.Output.pprint -======================= - -.. currentmodule:: scipy.odr - -.. automethod:: Output.pprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.Output.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.Output.rst deleted file mode 100644 index 6f2271f386..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.Output.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.odr.Output -================ - -.. currentmodule:: scipy.odr - -.. autoclass:: Output - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - Output.pprint - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.RealData.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.RealData.rst deleted file mode 100644 index ddb9ddc325..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.RealData.rst +++ /dev/null @@ -1,19 +0,0 @@ -scipy.odr.RealData -================== - -.. currentmodule:: scipy.odr - -.. autoclass:: RealData - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - RealData.set_meta - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.RealData.set_meta.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.RealData.set_meta.rst deleted file mode 100644 index a9e3696b05..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.RealData.set_meta.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.RealData.set_meta -=========================== - -.. currentmodule:: scipy.odr - -.. automethod:: RealData.set_meta \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.odr_error.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.odr_error.rst deleted file mode 100644 index ef7a6c1951..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.odr_error.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.odr_error -=================== - -.. currentmodule:: scipy.odr - -.. autoexception:: odr_error \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.odr.odr_stop.rst b/scipy-0.10.1/doc/source/generated/scipy.odr.odr_stop.rst deleted file mode 100644 index dbaeea667c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.odr.odr_stop.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.odr.odr_stop -================== - -.. currentmodule:: scipy.odr - -.. autoexception:: odr_stop \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.anderson.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.anderson.rst deleted file mode 100644 index 84c301e6ea..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.anderson.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.anderson -======================= - -.. currentmodule:: scipy.optimize - -.. autofunction:: anderson \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.anneal.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.anneal.rst deleted file mode 100644 index 96183fc574..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.anneal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.anneal -===================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: anneal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.bisect.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.bisect.rst deleted file mode 100644 index 15512f6ea9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.bisect.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.bisect -===================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: bisect \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.bracket.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.bracket.rst deleted file mode 100644 index 1e067aa856..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.bracket.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.bracket -====================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: bracket \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.brent.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.brent.rst deleted file mode 100644 index 8480254949..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.brent.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.brent -==================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: brent \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.brenth.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.brenth.rst deleted file mode 100644 index 4a590db484..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.brenth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.brenth -===================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: brenth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.brentq.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.brentq.rst deleted file mode 100644 index 783fe4ee4a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.brentq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.brentq -===================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: brentq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.broyden1.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.broyden1.rst deleted file mode 100644 index 4ac73445be..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.broyden1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.broyden1 -======================= - -.. currentmodule:: scipy.optimize - -.. autofunction:: broyden1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.broyden2.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.broyden2.rst deleted file mode 100644 index 097ded1cc6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.broyden2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.broyden2 -======================= - -.. currentmodule:: scipy.optimize - -.. autofunction:: broyden2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.brute.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.brute.rst deleted file mode 100644 index 364528b15d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.brute.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.brute -==================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: brute \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.check_grad.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.check_grad.rst deleted file mode 100644 index 5407751a6b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.check_grad.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.check_grad -========================= - -.. currentmodule:: scipy.optimize - -.. autofunction:: check_grad \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.curve_fit.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.curve_fit.rst deleted file mode 100644 index d9d924815b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.curve_fit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.curve_fit -======================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: curve_fit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.diagbroyden.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.diagbroyden.rst deleted file mode 100644 index d9c1a1bf95..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.diagbroyden.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.diagbroyden -========================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: diagbroyden \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.excitingmixing.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.excitingmixing.rst deleted file mode 100644 index 4854658c3b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.excitingmixing.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.excitingmixing -============================= - -.. currentmodule:: scipy.optimize - -.. autofunction:: excitingmixing \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fixed_point.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fixed_point.rst deleted file mode 100644 index e3a20e5852..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fixed_point.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fixed_point -========================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: fixed_point \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin.rst deleted file mode 100644 index 039e3356b3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin -=================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_bfgs.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_bfgs.rst deleted file mode 100644 index e3fdd19bcf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_bfgs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin_bfgs -======================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin_bfgs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_cg.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_cg.rst deleted file mode 100644 index 6fbfc758b9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_cg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin_cg -====================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin_cg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_cobyla.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_cobyla.rst deleted file mode 100644 index 0a25952ed2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_cobyla.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin_cobyla -========================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin_cobyla \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_l_bfgs_b.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_l_bfgs_b.rst deleted file mode 100644 index 729a45d13f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_l_bfgs_b.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin_l_bfgs_b -============================ - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin_l_bfgs_b \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_ncg.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_ncg.rst deleted file mode 100644 index 656ae76136..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_ncg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin_ncg -======================= - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin_ncg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_powell.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_powell.rst deleted file mode 100644 index 80af5531a1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_powell.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin_powell -========================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin_powell \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_slsqp.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_slsqp.rst deleted file mode 100644 index 2e73c3ffd6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_slsqp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin_slsqp -========================= - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin_slsqp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_tnc.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_tnc.rst deleted file mode 100644 index 7b44f9484d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fmin_tnc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fmin_tnc -======================= - -.. currentmodule:: scipy.optimize - -.. autofunction:: fmin_tnc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fminbound.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fminbound.rst deleted file mode 100644 index d99b483ddb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fminbound.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fminbound -======================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: fminbound \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.fsolve.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.fsolve.rst deleted file mode 100644 index 812dbf70bc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.fsolve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.fsolve -===================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: fsolve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.golden.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.golden.rst deleted file mode 100644 index 9ce24b34d4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.golden.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.golden -===================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: golden \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.leastsq.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.leastsq.rst deleted file mode 100644 index 1f67899389..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.leastsq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.leastsq -====================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: leastsq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.line_search.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.line_search.rst deleted file mode 100644 index f4b41b1960..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.line_search.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.line_search -========================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: line_search \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.linearmixing.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.linearmixing.rst deleted file mode 100644 index 8abe6c980e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.linearmixing.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.linearmixing -=========================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: linearmixing \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.newton.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.newton.rst deleted file mode 100644 index 5ce2fe6ce4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.newton -===================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: newton \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.newton_krylov.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.newton_krylov.rst deleted file mode 100644 index c36ef3a1a7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.newton_krylov.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.newton_krylov -============================ - -.. currentmodule:: scipy.optimize - -.. autofunction:: newton_krylov \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.nnls.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.nnls.rst deleted file mode 100644 index 362af5dc1e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.nnls.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.nnls -=================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: nnls \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.optimize.ridder.rst b/scipy-0.10.1/doc/source/generated/scipy.optimize.ridder.rst deleted file mode 100644 index 47f0172ca1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.optimize.ridder.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.optimize.ridder -===================== - -.. currentmodule:: scipy.optimize - -.. autofunction:: ridder \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.barthann.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.barthann.rst deleted file mode 100644 index 90bb8c039a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.barthann.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.barthann -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: barthann \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.bartlett.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.bartlett.rst deleted file mode 100644 index 83c0c05a55..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.bartlett.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.bartlett -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: bartlett \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.bessel.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.bessel.rst deleted file mode 100644 index 17bd4dadf9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.bessel.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.bessel -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: bessel \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.bilinear.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.bilinear.rst deleted file mode 100644 index 177222f9b4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.bilinear.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.bilinear -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: bilinear \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.blackman.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.blackman.rst deleted file mode 100644 index 3ab2e03f57..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.blackman.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.blackman -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: blackman \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.blackmanharris.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.blackmanharris.rst deleted file mode 100644 index acb02ae56e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.blackmanharris.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.blackmanharris -=========================== - -.. currentmodule:: scipy.signal - -.. autofunction:: blackmanharris \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.bohman.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.bohman.rst deleted file mode 100644 index fe5770e4ed..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.bohman.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.bohman -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: bohman \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.boxcar.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.boxcar.rst deleted file mode 100644 index 7faf631007..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.boxcar.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.boxcar -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: boxcar \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.bspline.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.bspline.rst deleted file mode 100644 index 2234782f0e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.bspline.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.bspline -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: bspline \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.butter.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.butter.rst deleted file mode 100644 index aff74ca1bc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.butter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.butter -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: butter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.buttord.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.buttord.rst deleted file mode 100644 index 1772251fe8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.buttord.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.buttord -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: buttord \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.cascade.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.cascade.rst deleted file mode 100644 index 440ac6f9c6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.cascade.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.cascade -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: cascade \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.cheb1ord.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.cheb1ord.rst deleted file mode 100644 index f7ccbfce09..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.cheb1ord.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.cheb1ord -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: cheb1ord \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.cheb2ord.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.cheb2ord.rst deleted file mode 100644 index 5ac5de9219..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.cheb2ord.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.cheb2ord -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: cheb2ord \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.chebwin.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.chebwin.rst deleted file mode 100644 index c6f2e81f0b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.chebwin.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.chebwin -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: chebwin \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.cheby1.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.cheby1.rst deleted file mode 100644 index f906cd5090..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.cheby1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.cheby1 -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: cheby1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.cheby2.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.cheby2.rst deleted file mode 100644 index e053e91225..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.cheby2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.cheby2 -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: cheby2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.chirp.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.chirp.rst deleted file mode 100644 index 5170c657ac..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.chirp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.chirp -================== - -.. currentmodule:: scipy.signal - -.. autofunction:: chirp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.cont2discrete.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.cont2discrete.rst deleted file mode 100644 index f13923c80d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.cont2discrete.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.cont2discrete -========================== - -.. currentmodule:: scipy.signal - -.. autofunction:: cont2discrete \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.convolve.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.convolve.rst deleted file mode 100644 index 92617b6025..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.convolve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.convolve -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: convolve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.convolve2d.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.convolve2d.rst deleted file mode 100644 index 1195105e21..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.convolve2d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.convolve2d -======================= - -.. currentmodule:: scipy.signal - -.. autofunction:: convolve2d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.correlate.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.correlate.rst deleted file mode 100644 index 7e2b7f4c80..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.correlate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.correlate -====================== - -.. currentmodule:: scipy.signal - -.. autofunction:: correlate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.correlate2d.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.correlate2d.rst deleted file mode 100644 index ab28d9b716..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.correlate2d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.correlate2d -======================== - -.. currentmodule:: scipy.signal - -.. autofunction:: correlate2d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.cspline1d.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.cspline1d.rst deleted file mode 100644 index bf3fb7d69a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.cspline1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.cspline1d -====================== - -.. currentmodule:: scipy.signal - -.. autofunction:: cspline1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.cspline2d.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.cspline2d.rst deleted file mode 100644 index adbafd5db8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.cspline2d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.cspline2d -====================== - -.. currentmodule:: scipy.signal - -.. autofunction:: cspline2d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.daub.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.daub.rst deleted file mode 100644 index d7cfc35507..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.daub.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.daub -================= - -.. currentmodule:: scipy.signal - -.. autofunction:: daub \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.ellip.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.ellip.rst deleted file mode 100644 index b0b0e8f879..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.ellip.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.ellip -================== - -.. currentmodule:: scipy.signal - -.. autofunction:: ellip \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.ellipord.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.ellipord.rst deleted file mode 100644 index 156dc25194..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.ellipord.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.ellipord -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: ellipord \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.fftconvolve.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.fftconvolve.rst deleted file mode 100644 index 508fcd93b0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.fftconvolve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.fftconvolve -======================== - -.. currentmodule:: scipy.signal - -.. autofunction:: fftconvolve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.flattop.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.flattop.rst deleted file mode 100644 index 0dbaad4b27..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.flattop.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.flattop -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: flattop \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.gauss_spline.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.gauss_spline.rst deleted file mode 100644 index 02eb7c0b6c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.gauss_spline.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.gauss_spline -========================= - -.. currentmodule:: scipy.signal - -.. autofunction:: gauss_spline \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.gaussian.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.gaussian.rst deleted file mode 100644 index c81fe4362e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.gaussian.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.gaussian -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: gaussian \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.gausspulse.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.gausspulse.rst deleted file mode 100644 index 377ef363a0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.gausspulse.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.gausspulse -======================= - -.. currentmodule:: scipy.signal - -.. autofunction:: gausspulse \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.general_gaussian.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.general_gaussian.rst deleted file mode 100644 index d2ed32685f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.general_gaussian.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.general_gaussian -============================= - -.. currentmodule:: scipy.signal - -.. autofunction:: general_gaussian \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.get_window.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.get_window.rst deleted file mode 100644 index 68c72e3a77..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.get_window.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.get_window -======================= - -.. currentmodule:: scipy.signal - -.. autofunction:: get_window \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.hamming.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.hamming.rst deleted file mode 100644 index 23bf931731..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.hamming.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.hamming -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: hamming \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.hann.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.hann.rst deleted file mode 100644 index 231bb43030..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.hann.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.hann -================= - -.. currentmodule:: scipy.signal - -.. autofunction:: hann \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.impulse.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.impulse.rst deleted file mode 100644 index 998e19b760..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.impulse.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.impulse -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: impulse \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.impulse2.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.impulse2.rst deleted file mode 100644 index 8834804301..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.impulse2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.impulse2 -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: impulse2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.kaiser.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.kaiser.rst deleted file mode 100644 index a74d925872..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.kaiser.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.kaiser -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: kaiser \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lfilter.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lfilter.rst deleted file mode 100644 index 0337348a2e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lfilter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.lfilter -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: lfilter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lfilter_zi.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lfilter_zi.rst deleted file mode 100644 index 2040f4e819..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lfilter_zi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.lfilter_zi -======================= - -.. currentmodule:: scipy.signal - -.. autofunction:: lfilter_zi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lfiltic.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lfiltic.rst deleted file mode 100644 index 4b728ca40c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lfiltic.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.lfiltic -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: lfiltic \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lsim.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lsim.rst deleted file mode 100644 index 1ad38d028b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lsim.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.lsim -================= - -.. currentmodule:: scipy.signal - -.. autofunction:: lsim \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lsim2.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lsim2.rst deleted file mode 100644 index afee573079..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lsim2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.lsim2 -================== - -.. currentmodule:: scipy.signal - -.. autofunction:: lsim2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lti.impulse.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lti.impulse.rst deleted file mode 100644 index 6753ed6b05..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lti.impulse.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.lti.impulse -======================== - -.. currentmodule:: scipy.signal - -.. automethod:: lti.impulse \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lti.output.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lti.output.rst deleted file mode 100644 index ca5d70a261..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lti.output.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.lti.output -======================= - -.. currentmodule:: scipy.signal - -.. automethod:: lti.output \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lti.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lti.rst deleted file mode 100644 index c99b9c614e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lti.rst +++ /dev/null @@ -1,21 +0,0 @@ -scipy.signal.lti -================ - -.. currentmodule:: scipy.signal - -.. autoclass:: lti - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - lti.impulse - lti.output - lti.step - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.lti.step.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.lti.step.rst deleted file mode 100644 index 4f153efad3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.lti.step.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.lti.step -===================== - -.. currentmodule:: scipy.signal - -.. automethod:: lti.step \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.medfilt.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.medfilt.rst deleted file mode 100644 index 7f47ccbe31..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.medfilt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.medfilt -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: medfilt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.medfilt2d.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.medfilt2d.rst deleted file mode 100644 index 4de847c175..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.medfilt2d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.medfilt2d -====================== - -.. currentmodule:: scipy.signal - -.. autofunction:: medfilt2d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.morlet.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.morlet.rst deleted file mode 100644 index 92fffb3f8c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.morlet.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.morlet -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: morlet \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.nuttall.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.nuttall.rst deleted file mode 100644 index 6dcb2cdfe9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.nuttall.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.nuttall -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: nuttall \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.order_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.order_filter.rst deleted file mode 100644 index 257db06cf4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.order_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.order_filter -========================= - -.. currentmodule:: scipy.signal - -.. autofunction:: order_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.parzen.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.parzen.rst deleted file mode 100644 index 894128c13b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.parzen.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.parzen -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: parzen \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.qmf.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.qmf.rst deleted file mode 100644 index 53003ffdf7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.qmf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.qmf -================ - -.. currentmodule:: scipy.signal - -.. autofunction:: qmf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.qspline1d.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.qspline1d.rst deleted file mode 100644 index 631622194a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.qspline1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.qspline1d -====================== - -.. currentmodule:: scipy.signal - -.. autofunction:: qspline1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.qspline2d.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.qspline2d.rst deleted file mode 100644 index 5ceb234e87..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.qspline2d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.qspline2d -====================== - -.. currentmodule:: scipy.signal - -.. autofunction:: qspline2d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.sawtooth.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.sawtooth.rst deleted file mode 100644 index 48a5717e39..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.sawtooth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.sawtooth -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: sawtooth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.sepfir2d.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.sepfir2d.rst deleted file mode 100644 index 80bf998ae0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.sepfir2d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.sepfir2d -===================== - -.. currentmodule:: scipy.signal - -.. autofunction:: sepfir2d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.slepian.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.slepian.rst deleted file mode 100644 index 2146736e5a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.slepian.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.slepian -==================== - -.. currentmodule:: scipy.signal - -.. autofunction:: slepian \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.spline_filter.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.spline_filter.rst deleted file mode 100644 index c82cd9a0af..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.spline_filter.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.spline_filter -========================== - -.. currentmodule:: scipy.signal - -.. autofunction:: spline_filter \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.square.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.square.rst deleted file mode 100644 index d1706f8b2a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.square.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.square -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: square \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.ss2tf.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.ss2tf.rst deleted file mode 100644 index 7dfb8755dd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.ss2tf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.ss2tf -================== - -.. currentmodule:: scipy.signal - -.. autofunction:: ss2tf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.ss2zpk.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.ss2zpk.rst deleted file mode 100644 index 174041e038..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.ss2zpk.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.ss2zpk -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: ss2zpk \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.step.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.step.rst deleted file mode 100644 index c9de382dda..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.step.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.step -================= - -.. currentmodule:: scipy.signal - -.. autofunction:: step \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.step2.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.step2.rst deleted file mode 100644 index c2ace8b063..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.step2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.step2 -================== - -.. currentmodule:: scipy.signal - -.. autofunction:: step2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.sweep_poly.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.sweep_poly.rst deleted file mode 100644 index 09d0b96681..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.sweep_poly.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.sweep_poly -======================= - -.. currentmodule:: scipy.signal - -.. autofunction:: sweep_poly \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.symiirorder1.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.symiirorder1.rst deleted file mode 100644 index 1e8a79f94c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.symiirorder1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.symiirorder1 -========================= - -.. currentmodule:: scipy.signal - -.. autofunction:: symiirorder1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.symiirorder2.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.symiirorder2.rst deleted file mode 100644 index 6ca1e9e70b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.symiirorder2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.symiirorder2 -========================= - -.. currentmodule:: scipy.signal - -.. autofunction:: symiirorder2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.tf2ss.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.tf2ss.rst deleted file mode 100644 index f4ed2535fe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.tf2ss.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.tf2ss -================== - -.. currentmodule:: scipy.signal - -.. autofunction:: tf2ss \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.tf2zpk.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.tf2zpk.rst deleted file mode 100644 index 792f54fe07..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.tf2zpk.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.tf2zpk -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: tf2zpk \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.triang.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.triang.rst deleted file mode 100644 index fe0deacd49..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.triang.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.triang -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: triang \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.wiener.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.wiener.rst deleted file mode 100644 index a4971314e6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.wiener.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.wiener -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: wiener \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.zpk2ss.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.zpk2ss.rst deleted file mode 100644 index cf59768477..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.zpk2ss.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.zpk2ss -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: zpk2ss \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.signal.zpk2tf.rst b/scipy-0.10.1/doc/source/generated/scipy.signal.zpk2tf.rst deleted file mode 100644 index 090c6d5922..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.signal.zpk2tf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.signal.zpk2tf -=================== - -.. currentmodule:: scipy.signal - -.. autofunction:: zpk2tf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.SparseEfficiencyWarning.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.SparseEfficiencyWarning.rst deleted file mode 100644 index 2bee36b7b0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.SparseEfficiencyWarning.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.SparseEfficiencyWarning -==================================== - -.. currentmodule:: scipy.sparse - -.. autoexception:: SparseEfficiencyWarning \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.SparseWarning.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.SparseWarning.rst deleted file mode 100644 index ba82501ee6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.SparseWarning.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.SparseWarning -========================== - -.. currentmodule:: scipy.sparse - -.. autoexception:: SparseWarning \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bmat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bmat.rst deleted file mode 100644 index 39ad5db19a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bmat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bmat -================= - -.. currentmodule:: scipy.sparse - -.. autofunction:: bmat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.asformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.asformat.rst deleted file mode 100644 index a1e8bc8095..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.asformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.asformat -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.asformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.asfptype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.asfptype.rst deleted file mode 100644 index 669c346b98..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.asfptype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.asfptype -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.asfptype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.astype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.astype.rst deleted file mode 100644 index 9e7f90a106..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.astype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.astype -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.astype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.blocksize.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.blocksize.rst deleted file mode 100644 index de647b92ca..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.blocksize.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.blocksize -================================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: bsr_matrix.blocksize \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.check_format.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.check_format.rst deleted file mode 100644 index 45924dd6df..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.check_format.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.check_format -==================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.check_format \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.conj.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.conj.rst deleted file mode 100644 index 2890f8719d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.conj.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.conj -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.conj \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.conjugate.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.conjugate.rst deleted file mode 100644 index 17d650cb6d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.conjugate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.conjugate -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.conjugate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.copy.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.copy.rst deleted file mode 100644 index 14a0502ed2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.copy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.copy -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.copy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.diagonal.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.diagonal.rst deleted file mode 100644 index 4f2940fd9e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.diagonal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.diagonal -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.diagonal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.dot.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.dot.rst deleted file mode 100644 index ea3cfdea98..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.dot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.dot -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.dot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.dtype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.dtype.rst deleted file mode 100644 index ef0191d52e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.dtype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.dtype -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: bsr_matrix.dtype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.eliminate_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.eliminate_zeros.rst deleted file mode 100644 index a9e5987748..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.eliminate_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.eliminate_zeros -======================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.eliminate_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getH.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getH.rst deleted file mode 100644 index ab558575fc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getH.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.getH -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.getH \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.get_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.get_shape.rst deleted file mode 100644 index cc746595ec..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.get_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.get_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.get_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getcol.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getcol.rst deleted file mode 100644 index dcb3386939..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getcol.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.getcol -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.getcol \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getdata.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getdata.rst deleted file mode 100644 index 3dced56270..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getdata.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.getdata -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.getdata \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getformat.rst deleted file mode 100644 index 80ab157c4c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.getformat -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.getformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getmaxprint.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getmaxprint.rst deleted file mode 100644 index 39f380ab5c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getmaxprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.getmaxprint -=================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.getmaxprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getnnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getnnz.rst deleted file mode 100644 index dcee4f8f76..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getnnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.getnnz -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.getnnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getrow.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getrow.rst deleted file mode 100644 index e378a56998..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.getrow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.getrow -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.getrow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.has_sorted_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.has_sorted_indices.rst deleted file mode 100644 index b2066c303a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.has_sorted_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.has_sorted_indices -========================================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: bsr_matrix.has_sorted_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.matmat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.matmat.rst deleted file mode 100644 index 6c75e34936..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.matmat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.matmat -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.matmat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.matvec.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.matvec.rst deleted file mode 100644 index a34be010d5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.matvec.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.matvec -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.matvec \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.mean.rst deleted file mode 100644 index a5ee76202f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.mean -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.multiply.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.multiply.rst deleted file mode 100644 index 235cc1d501..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.multiply.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.multiply -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.multiply \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.nnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.nnz.rst deleted file mode 100644 index 249c11609b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.nnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.nnz -=========================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: bsr_matrix.nnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.nonzero.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.nonzero.rst deleted file mode 100644 index 01bd31fb3e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.nonzero.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.nonzero -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.nonzero \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.prune.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.prune.rst deleted file mode 100644 index 246ea0c81a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.prune.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.prune -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.prune \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.reshape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.reshape.rst deleted file mode 100644 index a3fba0c53e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.reshape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.reshape -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.reshape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.rst deleted file mode 100644 index 2c8a94bf94..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.rst +++ /dev/null @@ -1,69 +0,0 @@ -scipy.sparse.bsr_matrix -======================= - -.. currentmodule:: scipy.sparse - -.. autoclass:: bsr_matrix - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - bsr_matrix.asformat - bsr_matrix.asfptype - bsr_matrix.astype - bsr_matrix.check_format - bsr_matrix.conj - bsr_matrix.conjugate - bsr_matrix.copy - bsr_matrix.diagonal - bsr_matrix.dot - bsr_matrix.eliminate_zeros - bsr_matrix.getH - bsr_matrix.get_shape - bsr_matrix.getcol - bsr_matrix.getdata - bsr_matrix.getformat - bsr_matrix.getmaxprint - bsr_matrix.getnnz - bsr_matrix.getrow - bsr_matrix.matmat - bsr_matrix.matvec - bsr_matrix.mean - bsr_matrix.multiply - bsr_matrix.nonzero - bsr_matrix.prune - bsr_matrix.reshape - bsr_matrix.set_shape - bsr_matrix.setdiag - bsr_matrix.sort_indices - bsr_matrix.sorted_indices - bsr_matrix.sum - bsr_matrix.sum_duplicates - bsr_matrix.toarray - bsr_matrix.tobsr - bsr_matrix.tocoo - bsr_matrix.tocsc - bsr_matrix.tocsr - bsr_matrix.todense - bsr_matrix.todia - bsr_matrix.todok - bsr_matrix.tolil - bsr_matrix.transpose - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - bsr_matrix.blocksize - bsr_matrix.dtype - bsr_matrix.has_sorted_indices - bsr_matrix.nnz - bsr_matrix.shape - diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.set_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.set_shape.rst deleted file mode 100644 index 023c69a9d2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.set_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.set_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.set_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.setdiag.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.setdiag.rst deleted file mode 100644 index 755fa572ea..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.setdiag.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.setdiag -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.setdiag \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.shape.rst deleted file mode 100644 index e29843faed..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.shape -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: bsr_matrix.shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sort_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sort_indices.rst deleted file mode 100644 index b148e4bff1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sort_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.sort_indices -==================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.sort_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sorted_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sorted_indices.rst deleted file mode 100644 index d15c9b519d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sorted_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.sorted_indices -====================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.sorted_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sum.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sum.rst deleted file mode 100644 index 267cd45b2f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.sum -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.sum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sum_duplicates.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sum_duplicates.rst deleted file mode 100644 index 359047fa4f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.sum_duplicates.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.sum_duplicates -====================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.sum_duplicates \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.toarray.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.toarray.rst deleted file mode 100644 index 351c16bdd9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.toarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.toarray -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.toarray \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tobsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tobsr.rst deleted file mode 100644 index 55ffa0a136..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tobsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.tobsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.tobsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocoo.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocoo.rst deleted file mode 100644 index f8510aea78..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocoo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.tocoo -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.tocoo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocsc.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocsc.rst deleted file mode 100644 index 44f93cc792..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocsc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.tocsc -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.tocsc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocsr.rst deleted file mode 100644 index ae9fa32a41..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tocsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.tocsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.tocsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todense.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todense.rst deleted file mode 100644 index 0a82dc8aa8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todense.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.todense -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.todense \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todia.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todia.rst deleted file mode 100644 index 0a202653ca..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todia.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.todia -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.todia \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todok.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todok.rst deleted file mode 100644 index a0fbfc905b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.todok.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.todok -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.todok \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tolil.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tolil.rst deleted file mode 100644 index accd1370b2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.tolil.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.tolil -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.tolil \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.transpose.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.transpose.rst deleted file mode 100644 index 0020bba9ae..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.bsr_matrix.transpose.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.bsr_matrix.transpose -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: bsr_matrix.transpose \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.asformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.asformat.rst deleted file mode 100644 index 5413416999..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.asformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.asformat -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.asformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.asfptype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.asfptype.rst deleted file mode 100644 index 9e09d4a4a7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.asfptype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.asfptype -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.asfptype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.astype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.astype.rst deleted file mode 100644 index 0ae4160c40..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.astype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.astype -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.astype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.conj.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.conj.rst deleted file mode 100644 index 94e5ffd398..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.conj.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.conj -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.conj \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.conjugate.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.conjugate.rst deleted file mode 100644 index 4359fa6bf9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.conjugate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.conjugate -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.conjugate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.copy.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.copy.rst deleted file mode 100644 index 60f51fbefe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.copy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.copy -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.copy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.diagonal.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.diagonal.rst deleted file mode 100644 index 15d6decf5c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.diagonal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.diagonal -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.diagonal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.dot.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.dot.rst deleted file mode 100644 index 5ff69dc1fc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.dot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.dot -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.dot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.dtype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.dtype.rst deleted file mode 100644 index 5051a6034e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.dtype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.dtype -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: coo_matrix.dtype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getH.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getH.rst deleted file mode 100644 index c971edf065..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getH.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.getH -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.getH \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.get_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.get_shape.rst deleted file mode 100644 index 0596c6fe0c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.get_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.get_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.get_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getcol.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getcol.rst deleted file mode 100644 index fabcb06a6e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getcol.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.getcol -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.getcol \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getformat.rst deleted file mode 100644 index 2a3f8147c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.getformat -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.getformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getmaxprint.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getmaxprint.rst deleted file mode 100644 index 926694a9ec..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getmaxprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.getmaxprint -=================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.getmaxprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getnnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getnnz.rst deleted file mode 100644 index 39cac83b01..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getnnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.getnnz -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.getnnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getrow.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getrow.rst deleted file mode 100644 index e6e73b8d16..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.getrow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.getrow -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.getrow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.mean.rst deleted file mode 100644 index e22f934cb8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.mean -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.multiply.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.multiply.rst deleted file mode 100644 index e8ac5ebe3b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.multiply.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.multiply -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.multiply \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.nnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.nnz.rst deleted file mode 100644 index 453f2d94e1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.nnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.nnz -=========================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: coo_matrix.nnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.nonzero.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.nonzero.rst deleted file mode 100644 index 0b11dab29b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.nonzero.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.nonzero -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.nonzero \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.reshape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.reshape.rst deleted file mode 100644 index 59db325d86..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.reshape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.reshape -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.reshape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.rst deleted file mode 100644 index 083eda03a2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.rst +++ /dev/null @@ -1,58 +0,0 @@ -scipy.sparse.coo_matrix -======================= - -.. currentmodule:: scipy.sparse - -.. autoclass:: coo_matrix - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - coo_matrix.asformat - coo_matrix.asfptype - coo_matrix.astype - coo_matrix.conj - coo_matrix.conjugate - coo_matrix.copy - coo_matrix.diagonal - coo_matrix.dot - coo_matrix.getH - coo_matrix.get_shape - coo_matrix.getcol - coo_matrix.getformat - coo_matrix.getmaxprint - coo_matrix.getnnz - coo_matrix.getrow - coo_matrix.mean - coo_matrix.multiply - coo_matrix.nonzero - coo_matrix.reshape - coo_matrix.set_shape - coo_matrix.setdiag - coo_matrix.sum - coo_matrix.toarray - coo_matrix.tobsr - coo_matrix.tocoo - coo_matrix.tocsc - coo_matrix.tocsr - coo_matrix.todense - coo_matrix.todia - coo_matrix.todok - coo_matrix.tolil - coo_matrix.transpose - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - coo_matrix.dtype - coo_matrix.nnz - coo_matrix.shape - diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.set_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.set_shape.rst deleted file mode 100644 index ac0d6310a2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.set_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.set_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.set_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.setdiag.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.setdiag.rst deleted file mode 100644 index 1c04d62ab9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.setdiag.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.setdiag -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.setdiag \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.shape.rst deleted file mode 100644 index 436b431104..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.shape -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: coo_matrix.shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.sum.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.sum.rst deleted file mode 100644 index 0ec8f77acc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.sum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.sum -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.sum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.toarray.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.toarray.rst deleted file mode 100644 index d1ac5cceda..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.toarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.toarray -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.toarray \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tobsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tobsr.rst deleted file mode 100644 index 1a16956dc3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tobsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.tobsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.tobsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocoo.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocoo.rst deleted file mode 100644 index 6ff51ec0ea..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocoo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.tocoo -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.tocoo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocsc.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocsc.rst deleted file mode 100644 index aafde02d0e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocsc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.tocsc -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.tocsc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocsr.rst deleted file mode 100644 index 633163a463..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tocsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.tocsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.tocsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todense.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todense.rst deleted file mode 100644 index 620145aee8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todense.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.todense -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.todense \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todia.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todia.rst deleted file mode 100644 index af619a9cd8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todia.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.todia -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.todia \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todok.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todok.rst deleted file mode 100644 index 8fe5639041..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.todok.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.todok -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.todok \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tolil.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tolil.rst deleted file mode 100644 index df3eb6c6e2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.tolil.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.tolil -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.tolil \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.transpose.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.transpose.rst deleted file mode 100644 index 48538b505a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.coo_matrix.transpose.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.coo_matrix.transpose -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: coo_matrix.transpose \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.cs_graph_components.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.cs_graph_components.rst deleted file mode 100644 index bb8d5d02c1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.cs_graph_components.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.cs_graph_components -================================ - -.. currentmodule:: scipy.sparse - -.. autofunction:: cs_graph_components \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.asformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.asformat.rst deleted file mode 100644 index cebc3a3499..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.asformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.asformat -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.asformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.asfptype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.asfptype.rst deleted file mode 100644 index dcd02c634f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.asfptype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.asfptype -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.asfptype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.astype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.astype.rst deleted file mode 100644 index 0f118cfd62..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.astype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.astype -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.astype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.check_format.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.check_format.rst deleted file mode 100644 index 1996294541..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.check_format.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.check_format -==================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.check_format \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.conj.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.conj.rst deleted file mode 100644 index cc3ec1db9c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.conj.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.conj -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.conj \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.conjugate.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.conjugate.rst deleted file mode 100644 index f9b7c53089..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.conjugate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.conjugate -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.conjugate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.copy.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.copy.rst deleted file mode 100644 index a4529761e9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.copy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.copy -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.copy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.diagonal.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.diagonal.rst deleted file mode 100644 index 0957c6c4cd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.diagonal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.diagonal -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.diagonal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.dot.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.dot.rst deleted file mode 100644 index 01250c6990..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.dot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.dot -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.dot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.dtype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.dtype.rst deleted file mode 100644 index ece5d4ea74..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.dtype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.dtype -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: csc_matrix.dtype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.eliminate_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.eliminate_zeros.rst deleted file mode 100644 index 61fa8951b3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.eliminate_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.eliminate_zeros -======================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.eliminate_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getH.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getH.rst deleted file mode 100644 index 31fdfff77b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getH.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.getH -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.getH \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.get_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.get_shape.rst deleted file mode 100644 index f9d7fabac3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.get_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.get_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.get_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getcol.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getcol.rst deleted file mode 100644 index f2ae270214..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getcol.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.getcol -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.getcol \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getformat.rst deleted file mode 100644 index 52d2241c55..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.getformat -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.getformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getmaxprint.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getmaxprint.rst deleted file mode 100644 index c2413d7bfb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getmaxprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.getmaxprint -=================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.getmaxprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getnnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getnnz.rst deleted file mode 100644 index d740ef37a3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getnnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.getnnz -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.getnnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getrow.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getrow.rst deleted file mode 100644 index 964ece9f6b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.getrow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.getrow -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.getrow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.has_sorted_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.has_sorted_indices.rst deleted file mode 100644 index 13002ed4de..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.has_sorted_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.has_sorted_indices -========================================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: csc_matrix.has_sorted_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.mean.rst deleted file mode 100644 index b7bcb6307d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.mean -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.multiply.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.multiply.rst deleted file mode 100644 index eb2e3f0678..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.multiply.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.multiply -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.multiply \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.nnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.nnz.rst deleted file mode 100644 index 28debe2548..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.nnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.nnz -=========================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: csc_matrix.nnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.nonzero.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.nonzero.rst deleted file mode 100644 index 768a3e7f3a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.nonzero.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.nonzero -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.nonzero \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.prune.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.prune.rst deleted file mode 100644 index 0d13c9042d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.prune.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.prune -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.prune \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.reshape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.reshape.rst deleted file mode 100644 index 758caa41f2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.reshape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.reshape -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.reshape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.rst deleted file mode 100644 index 85ec2c523f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.rst +++ /dev/null @@ -1,65 +0,0 @@ -scipy.sparse.csc_matrix -======================= - -.. currentmodule:: scipy.sparse - -.. autoclass:: csc_matrix - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - csc_matrix.asformat - csc_matrix.asfptype - csc_matrix.astype - csc_matrix.check_format - csc_matrix.conj - csc_matrix.conjugate - csc_matrix.copy - csc_matrix.diagonal - csc_matrix.dot - csc_matrix.eliminate_zeros - csc_matrix.getH - csc_matrix.get_shape - csc_matrix.getcol - csc_matrix.getformat - csc_matrix.getmaxprint - csc_matrix.getnnz - csc_matrix.getrow - csc_matrix.mean - csc_matrix.multiply - csc_matrix.nonzero - csc_matrix.prune - csc_matrix.reshape - csc_matrix.set_shape - csc_matrix.setdiag - csc_matrix.sort_indices - csc_matrix.sorted_indices - csc_matrix.sum - csc_matrix.sum_duplicates - csc_matrix.toarray - csc_matrix.tobsr - csc_matrix.tocoo - csc_matrix.tocsc - csc_matrix.tocsr - csc_matrix.todense - csc_matrix.todia - csc_matrix.todok - csc_matrix.tolil - csc_matrix.transpose - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - csc_matrix.dtype - csc_matrix.has_sorted_indices - csc_matrix.nnz - csc_matrix.shape - diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.set_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.set_shape.rst deleted file mode 100644 index 40356e3970..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.set_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.set_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.set_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.setdiag.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.setdiag.rst deleted file mode 100644 index 5ae0fae31e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.setdiag.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.setdiag -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.setdiag \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.shape.rst deleted file mode 100644 index fe8f7dafa5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.shape -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: csc_matrix.shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sort_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sort_indices.rst deleted file mode 100644 index 76968aed1c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sort_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.sort_indices -==================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.sort_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sorted_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sorted_indices.rst deleted file mode 100644 index 711db33e62..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sorted_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.sorted_indices -====================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.sorted_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sum.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sum.rst deleted file mode 100644 index b480777ab8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.sum -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.sum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sum_duplicates.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sum_duplicates.rst deleted file mode 100644 index ee88bb69b3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.sum_duplicates.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.sum_duplicates -====================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.sum_duplicates \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.toarray.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.toarray.rst deleted file mode 100644 index 8b78a5730c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.toarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.toarray -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.toarray \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tobsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tobsr.rst deleted file mode 100644 index 7788372c81..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tobsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.tobsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.tobsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocoo.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocoo.rst deleted file mode 100644 index 4864a6dcad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocoo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.tocoo -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.tocoo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocsc.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocsc.rst deleted file mode 100644 index 1be82a7a6e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocsc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.tocsc -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.tocsc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocsr.rst deleted file mode 100644 index cbf4fea91c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tocsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.tocsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.tocsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todense.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todense.rst deleted file mode 100644 index 29fc9f3dbc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todense.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.todense -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.todense \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todia.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todia.rst deleted file mode 100644 index cb97c90361..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todia.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.todia -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.todia \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todok.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todok.rst deleted file mode 100644 index 9a2883088c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.todok.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.todok -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.todok \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tolil.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tolil.rst deleted file mode 100644 index ca3acf5d05..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.tolil.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.tolil -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.tolil \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.transpose.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.transpose.rst deleted file mode 100644 index fa891625e2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csc_matrix.transpose.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csc_matrix.transpose -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csc_matrix.transpose \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.asformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.asformat.rst deleted file mode 100644 index 522552625a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.asformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.asformat -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.asformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.asfptype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.asfptype.rst deleted file mode 100644 index 81afec7d8c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.asfptype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.asfptype -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.asfptype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.astype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.astype.rst deleted file mode 100644 index 7bb364e14b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.astype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.astype -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.astype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.check_format.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.check_format.rst deleted file mode 100644 index 012311dd5b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.check_format.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.check_format -==================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.check_format \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.conj.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.conj.rst deleted file mode 100644 index dbc7e0a99b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.conj.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.conj -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.conj \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.conjugate.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.conjugate.rst deleted file mode 100644 index 0f4ea738af..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.conjugate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.conjugate -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.conjugate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.copy.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.copy.rst deleted file mode 100644 index 5830642cc0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.copy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.copy -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.copy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.diagonal.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.diagonal.rst deleted file mode 100644 index d1ab9eb0a3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.diagonal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.diagonal -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.diagonal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.dot.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.dot.rst deleted file mode 100644 index 554e0653fa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.dot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.dot -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.dot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.dtype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.dtype.rst deleted file mode 100644 index 15f3da0eb6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.dtype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.dtype -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: csr_matrix.dtype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.eliminate_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.eliminate_zeros.rst deleted file mode 100644 index fae30cf941..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.eliminate_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.eliminate_zeros -======================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.eliminate_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getH.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getH.rst deleted file mode 100644 index 3d33aba8cb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getH.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.getH -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.getH \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.get_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.get_shape.rst deleted file mode 100644 index 1629a52413..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.get_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.get_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.get_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getcol.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getcol.rst deleted file mode 100644 index 234729305c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getcol.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.getcol -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.getcol \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getformat.rst deleted file mode 100644 index 9b10b7f125..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.getformat -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.getformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getmaxprint.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getmaxprint.rst deleted file mode 100644 index 86328b01c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getmaxprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.getmaxprint -=================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.getmaxprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getnnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getnnz.rst deleted file mode 100644 index 9cee3c8b57..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getnnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.getnnz -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.getnnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getrow.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getrow.rst deleted file mode 100644 index 175d8c1ff5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.getrow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.getrow -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.getrow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.has_sorted_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.has_sorted_indices.rst deleted file mode 100644 index 544fcb4012..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.has_sorted_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.has_sorted_indices -========================================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: csr_matrix.has_sorted_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.mean.rst deleted file mode 100644 index 4ee610c1cc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.mean -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.multiply.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.multiply.rst deleted file mode 100644 index 5cc1c33fdb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.multiply.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.multiply -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.multiply \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.nnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.nnz.rst deleted file mode 100644 index ba59aea054..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.nnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.nnz -=========================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: csr_matrix.nnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.nonzero.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.nonzero.rst deleted file mode 100644 index a9831be9e6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.nonzero.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.nonzero -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.nonzero \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.prune.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.prune.rst deleted file mode 100644 index 123fbe0241..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.prune.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.prune -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.prune \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.reshape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.reshape.rst deleted file mode 100644 index 2a82953f13..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.reshape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.reshape -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.reshape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.rst deleted file mode 100644 index 33477911c4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.rst +++ /dev/null @@ -1,65 +0,0 @@ -scipy.sparse.csr_matrix -======================= - -.. currentmodule:: scipy.sparse - -.. autoclass:: csr_matrix - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - csr_matrix.asformat - csr_matrix.asfptype - csr_matrix.astype - csr_matrix.check_format - csr_matrix.conj - csr_matrix.conjugate - csr_matrix.copy - csr_matrix.diagonal - csr_matrix.dot - csr_matrix.eliminate_zeros - csr_matrix.getH - csr_matrix.get_shape - csr_matrix.getcol - csr_matrix.getformat - csr_matrix.getmaxprint - csr_matrix.getnnz - csr_matrix.getrow - csr_matrix.mean - csr_matrix.multiply - csr_matrix.nonzero - csr_matrix.prune - csr_matrix.reshape - csr_matrix.set_shape - csr_matrix.setdiag - csr_matrix.sort_indices - csr_matrix.sorted_indices - csr_matrix.sum - csr_matrix.sum_duplicates - csr_matrix.toarray - csr_matrix.tobsr - csr_matrix.tocoo - csr_matrix.tocsc - csr_matrix.tocsr - csr_matrix.todense - csr_matrix.todia - csr_matrix.todok - csr_matrix.tolil - csr_matrix.transpose - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - csr_matrix.dtype - csr_matrix.has_sorted_indices - csr_matrix.nnz - csr_matrix.shape - diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.set_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.set_shape.rst deleted file mode 100644 index b1aeffdb60..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.set_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.set_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.set_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.setdiag.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.setdiag.rst deleted file mode 100644 index dbc45992cb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.setdiag.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.setdiag -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.setdiag \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.shape.rst deleted file mode 100644 index 269011ba9a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.shape -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: csr_matrix.shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sort_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sort_indices.rst deleted file mode 100644 index 2e3d171433..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sort_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.sort_indices -==================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.sort_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sorted_indices.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sorted_indices.rst deleted file mode 100644 index 7422fce55a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sorted_indices.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.sorted_indices -====================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.sorted_indices \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sum.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sum.rst deleted file mode 100644 index fb148051a7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.sum -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.sum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sum_duplicates.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sum_duplicates.rst deleted file mode 100644 index ee9f572b95..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.sum_duplicates.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.sum_duplicates -====================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.sum_duplicates \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.toarray.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.toarray.rst deleted file mode 100644 index 1fed64029c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.toarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.toarray -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.toarray \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tobsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tobsr.rst deleted file mode 100644 index 6e18a88cd1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tobsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.tobsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.tobsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocoo.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocoo.rst deleted file mode 100644 index b11117f71e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocoo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.tocoo -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.tocoo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocsc.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocsc.rst deleted file mode 100644 index a5f110b94f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocsc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.tocsc -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.tocsc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocsr.rst deleted file mode 100644 index 04c1f44a96..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tocsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.tocsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.tocsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todense.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todense.rst deleted file mode 100644 index efd60d1527..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todense.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.todense -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.todense \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todia.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todia.rst deleted file mode 100644 index 48559b63a0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todia.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.todia -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.todia \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todok.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todok.rst deleted file mode 100644 index b4496bdd2b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.todok.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.todok -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.todok \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tolil.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tolil.rst deleted file mode 100644 index 80480e265a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.tolil.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.tolil -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.tolil \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.transpose.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.transpose.rst deleted file mode 100644 index 051b4bce9f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.csr_matrix.transpose.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.csr_matrix.transpose -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: csr_matrix.transpose \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.asformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.asformat.rst deleted file mode 100644 index 70b5492bdb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.asformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.asformat -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.asformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.asfptype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.asfptype.rst deleted file mode 100644 index 7fc80a34c7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.asfptype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.asfptype -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.asfptype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.astype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.astype.rst deleted file mode 100644 index 6c7750b855..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.astype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.astype -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.astype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.conj.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.conj.rst deleted file mode 100644 index 95c9b75bb4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.conj.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.conj -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.conj \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.conjugate.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.conjugate.rst deleted file mode 100644 index 5926086982..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.conjugate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.conjugate -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.conjugate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.copy.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.copy.rst deleted file mode 100644 index d8a6fc89d8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.copy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.copy -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.copy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.diagonal.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.diagonal.rst deleted file mode 100644 index 0b38edb52f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.diagonal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.diagonal -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.diagonal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.dot.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.dot.rst deleted file mode 100644 index 6215a82a34..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.dot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.dot -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.dot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.dtype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.dtype.rst deleted file mode 100644 index 5e3f5b53f5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.dtype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.dtype -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: dia_matrix.dtype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getH.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getH.rst deleted file mode 100644 index 1b7434887b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getH.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.getH -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.getH \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.get_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.get_shape.rst deleted file mode 100644 index 0599c1f62a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.get_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.get_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.get_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getcol.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getcol.rst deleted file mode 100644 index 2df88dcf19..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getcol.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.getcol -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.getcol \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getformat.rst deleted file mode 100644 index 5668e56d29..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.getformat -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.getformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getmaxprint.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getmaxprint.rst deleted file mode 100644 index 4b2c22edc5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getmaxprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.getmaxprint -=================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.getmaxprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getnnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getnnz.rst deleted file mode 100644 index 4bf78028d9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getnnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.getnnz -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.getnnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getrow.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getrow.rst deleted file mode 100644 index f89f047988..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.getrow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.getrow -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.getrow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.mean.rst deleted file mode 100644 index 40837836f9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.mean -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.multiply.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.multiply.rst deleted file mode 100644 index feb69e93a9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.multiply.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.multiply -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.multiply \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.nnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.nnz.rst deleted file mode 100644 index 47997a568c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.nnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.nnz -=========================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: dia_matrix.nnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.nonzero.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.nonzero.rst deleted file mode 100644 index 9f8e3953c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.nonzero.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.nonzero -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.nonzero \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.reshape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.reshape.rst deleted file mode 100644 index 90f4d12c87..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.reshape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.reshape -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.reshape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.rst deleted file mode 100644 index 409e9d92d3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.rst +++ /dev/null @@ -1,58 +0,0 @@ -scipy.sparse.dia_matrix -======================= - -.. currentmodule:: scipy.sparse - -.. autoclass:: dia_matrix - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - dia_matrix.asformat - dia_matrix.asfptype - dia_matrix.astype - dia_matrix.conj - dia_matrix.conjugate - dia_matrix.copy - dia_matrix.diagonal - dia_matrix.dot - dia_matrix.getH - dia_matrix.get_shape - dia_matrix.getcol - dia_matrix.getformat - dia_matrix.getmaxprint - dia_matrix.getnnz - dia_matrix.getrow - dia_matrix.mean - dia_matrix.multiply - dia_matrix.nonzero - dia_matrix.reshape - dia_matrix.set_shape - dia_matrix.setdiag - dia_matrix.sum - dia_matrix.toarray - dia_matrix.tobsr - dia_matrix.tocoo - dia_matrix.tocsc - dia_matrix.tocsr - dia_matrix.todense - dia_matrix.todia - dia_matrix.todok - dia_matrix.tolil - dia_matrix.transpose - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - dia_matrix.dtype - dia_matrix.nnz - dia_matrix.shape - diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.set_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.set_shape.rst deleted file mode 100644 index 6672773da0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.set_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.set_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.set_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.setdiag.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.setdiag.rst deleted file mode 100644 index cd0a61961e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.setdiag.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.setdiag -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.setdiag \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.shape.rst deleted file mode 100644 index 047d7a93fb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.shape -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: dia_matrix.shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.sum.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.sum.rst deleted file mode 100644 index 5e671a25c7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.sum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.sum -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.sum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.toarray.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.toarray.rst deleted file mode 100644 index 4d1630db88..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.toarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.toarray -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.toarray \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tobsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tobsr.rst deleted file mode 100644 index 37cab4d9ea..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tobsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.tobsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.tobsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocoo.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocoo.rst deleted file mode 100644 index 4b6b540b31..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocoo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.tocoo -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.tocoo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocsc.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocsc.rst deleted file mode 100644 index d33f8c89af..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocsc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.tocsc -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.tocsc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocsr.rst deleted file mode 100644 index 6dfb9df931..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tocsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.tocsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.tocsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todense.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todense.rst deleted file mode 100644 index 18ea4a1cd9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todense.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.todense -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.todense \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todia.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todia.rst deleted file mode 100644 index c0ff8aeccd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todia.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.todia -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.todia \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todok.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todok.rst deleted file mode 100644 index f9a429077f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.todok.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.todok -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.todok \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tolil.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tolil.rst deleted file mode 100644 index 9830977652..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.tolil.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.tolil -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.tolil \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.transpose.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.transpose.rst deleted file mode 100644 index fb9d5ff8a3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dia_matrix.transpose.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dia_matrix.transpose -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dia_matrix.transpose \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.asformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.asformat.rst deleted file mode 100644 index e6f7cf6466..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.asformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.asformat -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.asformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.asfptype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.asfptype.rst deleted file mode 100644 index 14bd5decfd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.asfptype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.asfptype -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.asfptype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.astype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.astype.rst deleted file mode 100644 index defa1afc79..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.astype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.astype -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.astype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.clear.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.clear.rst deleted file mode 100644 index 385b9d3b32..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.clear.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.clear -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.clear \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conj.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conj.rst deleted file mode 100644 index e57d805d87..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conj.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.conj -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.conj \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conjtransp.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conjtransp.rst deleted file mode 100644 index 9d44cfc356..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conjtransp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.conjtransp -================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.conjtransp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conjugate.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conjugate.rst deleted file mode 100644 index 90e8d9dc5a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.conjugate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.conjugate -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.conjugate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.copy.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.copy.rst deleted file mode 100644 index d51367f28d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.copy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.copy -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.copy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.diagonal.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.diagonal.rst deleted file mode 100644 index 532701b86b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.diagonal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.diagonal -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.diagonal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.dot.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.dot.rst deleted file mode 100644 index ecfcb57c87..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.dot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.dot -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.dot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.get.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.get.rst deleted file mode 100644 index c68a0a355b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.get.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.get -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.get \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getH.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getH.rst deleted file mode 100644 index af7a53f928..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getH.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.getH -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.getH \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.get_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.get_shape.rst deleted file mode 100644 index 10397d43a9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.get_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.get_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.get_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getcol.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getcol.rst deleted file mode 100644 index 2ff244b3e6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getcol.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.getcol -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.getcol \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getformat.rst deleted file mode 100644 index aed74e2a1d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.getformat -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.getformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getmaxprint.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getmaxprint.rst deleted file mode 100644 index da7bf9ad0d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getmaxprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.getmaxprint -=================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.getmaxprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getnnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getnnz.rst deleted file mode 100644 index d53e98762a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getnnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.getnnz -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.getnnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getrow.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getrow.rst deleted file mode 100644 index 0a5766b544..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.getrow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.getrow -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.getrow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.has_key.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.has_key.rst deleted file mode 100644 index beeb75e5f9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.has_key.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.has_key -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.has_key \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.items.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.items.rst deleted file mode 100644 index 94a2bcb0ec..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.items.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.items -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.items \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.iteritems.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.iteritems.rst deleted file mode 100644 index 32daa672c5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.iteritems.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.iteritems -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.iteritems \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.iterkeys.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.iterkeys.rst deleted file mode 100644 index 6d0ac98e9a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.iterkeys.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.iterkeys -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.iterkeys \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.itervalues.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.itervalues.rst deleted file mode 100644 index 55616108a1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.itervalues.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.itervalues -================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.itervalues \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.keys.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.keys.rst deleted file mode 100644 index 388e5b54bd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.keys.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.keys -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.keys \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.mean.rst deleted file mode 100644 index 8ee6c9776f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.mean -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.multiply.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.multiply.rst deleted file mode 100644 index 8adb0fba3c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.multiply.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.multiply -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.multiply \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.nnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.nnz.rst deleted file mode 100644 index 591185a294..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.nnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.nnz -=========================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: dok_matrix.nnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.nonzero.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.nonzero.rst deleted file mode 100644 index a3c2238304..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.nonzero.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.nonzero -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.nonzero \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.pop.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.pop.rst deleted file mode 100644 index 6064e79a3f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.pop.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.pop -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.pop \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.popitem.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.popitem.rst deleted file mode 100644 index ffd6643772..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.popitem.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.popitem -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.popitem \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.reshape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.reshape.rst deleted file mode 100644 index 280792cc1c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.reshape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.reshape -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.reshape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.resize.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.resize.rst deleted file mode 100644 index fab0b4c9ba..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.resize.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.resize -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.resize \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.rst deleted file mode 100644 index 4f6ca9c8d5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.rst +++ /dev/null @@ -1,74 +0,0 @@ -scipy.sparse.dok_matrix -======================= - -.. currentmodule:: scipy.sparse - -.. autoclass:: dok_matrix - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - dok_matrix.asformat - dok_matrix.asfptype - dok_matrix.astype - dok_matrix.clear - dok_matrix.conj - dok_matrix.conjtransp - dok_matrix.conjugate - dok_matrix.copy - dok_matrix.diagonal - dok_matrix.dot - dok_matrix.get - dok_matrix.getH - dok_matrix.get_shape - dok_matrix.getcol - dok_matrix.getformat - dok_matrix.getmaxprint - dok_matrix.getnnz - dok_matrix.getrow - dok_matrix.has_key - dok_matrix.items - dok_matrix.iteritems - dok_matrix.iterkeys - dok_matrix.itervalues - dok_matrix.keys - dok_matrix.mean - dok_matrix.multiply - dok_matrix.nonzero - dok_matrix.pop - dok_matrix.popitem - dok_matrix.reshape - dok_matrix.resize - dok_matrix.set_shape - dok_matrix.setdefault - dok_matrix.setdiag - dok_matrix.split - dok_matrix.sum - dok_matrix.take - dok_matrix.toarray - dok_matrix.tobsr - dok_matrix.tocoo - dok_matrix.tocsc - dok_matrix.tocsr - dok_matrix.todense - dok_matrix.todia - dok_matrix.todok - dok_matrix.tolil - dok_matrix.transpose - dok_matrix.update - dok_matrix.values - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - dok_matrix.nnz - dok_matrix.shape - diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.set_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.set_shape.rst deleted file mode 100644 index 327196cdc7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.set_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.set_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.set_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.setdefault.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.setdefault.rst deleted file mode 100644 index 5efeaf7bbe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.setdefault.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.setdefault -================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.setdefault \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.setdiag.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.setdiag.rst deleted file mode 100644 index 14cd59bfd3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.setdiag.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.setdiag -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.setdiag \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.shape.rst deleted file mode 100644 index f7fd3c0598..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.shape -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: dok_matrix.shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.split.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.split.rst deleted file mode 100644 index 2e758ed8cf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.split.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.split -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.split \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.sum.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.sum.rst deleted file mode 100644 index a14a852c9e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.sum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.sum -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.sum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.take.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.take.rst deleted file mode 100644 index 99f4cb8887..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.take.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.take -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.take \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.toarray.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.toarray.rst deleted file mode 100644 index 9cf076ca50..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.toarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.toarray -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.toarray \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tobsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tobsr.rst deleted file mode 100644 index 0294210130..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tobsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.tobsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.tobsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocoo.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocoo.rst deleted file mode 100644 index 29b7d146d6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocoo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.tocoo -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.tocoo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocsc.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocsc.rst deleted file mode 100644 index 5316e1f2ea..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocsc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.tocsc -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.tocsc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocsr.rst deleted file mode 100644 index b6ece1b003..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tocsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.tocsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.tocsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todense.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todense.rst deleted file mode 100644 index 40e48ec17d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todense.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.todense -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.todense \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todia.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todia.rst deleted file mode 100644 index e57fe433db..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todia.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.todia -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.todia \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todok.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todok.rst deleted file mode 100644 index b4be5065a8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.todok.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.todok -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.todok \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tolil.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tolil.rst deleted file mode 100644 index 4559ca56c4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.tolil.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.tolil -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.tolil \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.transpose.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.transpose.rst deleted file mode 100644 index a336aea5de..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.transpose.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.transpose -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.transpose \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.update.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.update.rst deleted file mode 100644 index a060376d32..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.update.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.update -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.update \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.values.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.values.rst deleted file mode 100644 index 794bab15d1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.dok_matrix.values.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.dok_matrix.values -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: dok_matrix.values \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.eye.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.eye.rst deleted file mode 100644 index 4d3a134ee9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.eye.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.eye -================ - -.. currentmodule:: scipy.sparse - -.. autofunction:: eye \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.hstack.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.hstack.rst deleted file mode 100644 index f2625ba550..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.hstack.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.hstack -=================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: hstack \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.identity.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.identity.rst deleted file mode 100644 index a342201ebc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.identity.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.identity -===================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: identity \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.issparse.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.issparse.rst deleted file mode 100644 index f459215b49..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.issparse.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.issparse -===================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: issparse \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix.rst deleted file mode 100644 index 97e4888818..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.isspmatrix -======================= - -.. currentmodule:: scipy.sparse - -.. autofunction:: isspmatrix \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_bsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_bsr.rst deleted file mode 100644 index dbf2592b69..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_bsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.isspmatrix_bsr -=========================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: isspmatrix_bsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_coo.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_coo.rst deleted file mode 100644 index 3f7ca780f1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_coo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.isspmatrix_coo -=========================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: isspmatrix_coo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_csc.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_csc.rst deleted file mode 100644 index 92ef4905f7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_csc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.isspmatrix_csc -=========================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: isspmatrix_csc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_csr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_csr.rst deleted file mode 100644 index 4a11ecb04a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_csr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.isspmatrix_csr -=========================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: isspmatrix_csr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_dia.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_dia.rst deleted file mode 100644 index 4370f2e1f3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_dia.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.isspmatrix_dia -=========================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: isspmatrix_dia \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_dok.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_dok.rst deleted file mode 100644 index e5053383a7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_dok.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.isspmatrix_dok -=========================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: isspmatrix_dok \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_lil.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_lil.rst deleted file mode 100644 index e39a7c2c54..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.isspmatrix_lil.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.isspmatrix_lil -=========================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: isspmatrix_lil \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.kron.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.kron.rst deleted file mode 100644 index a3e0ec2219..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.kron.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.kron -================= - -.. currentmodule:: scipy.sparse - -.. autofunction:: kron \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.kronsum.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.kronsum.rst deleted file mode 100644 index a3bcb8ebaa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.kronsum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.kronsum -==================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: kronsum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.asformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.asformat.rst deleted file mode 100644 index 43ecf94bc2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.asformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.asformat -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.asformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.asfptype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.asfptype.rst deleted file mode 100644 index 371657b4a7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.asfptype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.asfptype -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.asfptype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.astype.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.astype.rst deleted file mode 100644 index a81bdecc89..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.astype.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.astype -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.astype \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.conj.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.conj.rst deleted file mode 100644 index c7f253b12f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.conj.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.conj -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.conj \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.conjugate.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.conjugate.rst deleted file mode 100644 index f7007c8c5f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.conjugate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.conjugate -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.conjugate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.copy.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.copy.rst deleted file mode 100644 index f83606df76..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.copy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.copy -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.copy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.diagonal.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.diagonal.rst deleted file mode 100644 index 111659113a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.diagonal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.diagonal -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.diagonal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.dot.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.dot.rst deleted file mode 100644 index 99621f0814..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.dot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.dot -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.dot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getH.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getH.rst deleted file mode 100644 index 40db75b892..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getH.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.getH -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.getH \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.get_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.get_shape.rst deleted file mode 100644 index e41fe64414..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.get_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.get_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.get_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getcol.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getcol.rst deleted file mode 100644 index ea0573e913..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getcol.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.getcol -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.getcol \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getformat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getformat.rst deleted file mode 100644 index ad1ef6b68e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getformat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.getformat -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.getformat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getmaxprint.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getmaxprint.rst deleted file mode 100644 index 50b27bb820..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getmaxprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.getmaxprint -=================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.getmaxprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getnnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getnnz.rst deleted file mode 100644 index 8b9c44b4f2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getnnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.getnnz -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.getnnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getrow.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getrow.rst deleted file mode 100644 index 969a558f44..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getrow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.getrow -============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.getrow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getrowview.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getrowview.rst deleted file mode 100644 index 877d98b992..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.getrowview.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.getrowview -================================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.getrowview \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.mean.rst deleted file mode 100644 index bdcae24888..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.mean -============================ - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.multiply.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.multiply.rst deleted file mode 100644 index 220c53bf67..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.multiply.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.multiply -================================ - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.multiply \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.nnz.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.nnz.rst deleted file mode 100644 index 6de4b18e79..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.nnz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.nnz -=========================== - -.. currentmodule:: scipy.sparse - -.. autoattribute:: lil_matrix.nnz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.nonzero.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.nonzero.rst deleted file mode 100644 index 293e01bae9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.nonzero.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.nonzero -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.nonzero \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.reshape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.reshape.rst deleted file mode 100644 index 528f7ce46a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.reshape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.reshape -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.reshape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.rst deleted file mode 100644 index f5e73e3d0d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.rst +++ /dev/null @@ -1,58 +0,0 @@ -scipy.sparse.lil_matrix -======================= - -.. currentmodule:: scipy.sparse - -.. autoclass:: lil_matrix - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - lil_matrix.asformat - lil_matrix.asfptype - lil_matrix.astype - lil_matrix.conj - lil_matrix.conjugate - lil_matrix.copy - lil_matrix.diagonal - lil_matrix.dot - lil_matrix.getH - lil_matrix.get_shape - lil_matrix.getcol - lil_matrix.getformat - lil_matrix.getmaxprint - lil_matrix.getnnz - lil_matrix.getrow - lil_matrix.getrowview - lil_matrix.mean - lil_matrix.multiply - lil_matrix.nonzero - lil_matrix.reshape - lil_matrix.set_shape - lil_matrix.setdiag - lil_matrix.sum - lil_matrix.toarray - lil_matrix.tobsr - lil_matrix.tocoo - lil_matrix.tocsc - lil_matrix.tocsr - lil_matrix.todense - lil_matrix.todia - lil_matrix.todok - lil_matrix.tolil - lil_matrix.transpose - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - lil_matrix.nnz - lil_matrix.shape - diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.set_shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.set_shape.rst deleted file mode 100644 index bde48a056d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.set_shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.set_shape -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.set_shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.setdiag.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.setdiag.rst deleted file mode 100644 index c8bd49a69f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.setdiag.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.setdiag -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.setdiag \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.shape.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.shape.rst deleted file mode 100644 index ab8de3f2a6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.shape.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.shape -============================= - -.. currentmodule:: scipy.sparse - -.. autoattribute:: lil_matrix.shape \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.sum.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.sum.rst deleted file mode 100644 index 580d103f1b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.sum.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.sum -=========================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.sum \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.toarray.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.toarray.rst deleted file mode 100644 index a7416601df..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.toarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.toarray -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.toarray \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tobsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tobsr.rst deleted file mode 100644 index 13fc015d97..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tobsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.tobsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.tobsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocoo.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocoo.rst deleted file mode 100644 index d3835aed49..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocoo.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.tocoo -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.tocoo \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocsc.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocsc.rst deleted file mode 100644 index 5aec2d7557..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocsc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.tocsc -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.tocsc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocsr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocsr.rst deleted file mode 100644 index 7f9a0432d6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tocsr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.tocsr -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.tocsr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todense.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todense.rst deleted file mode 100644 index e3acdce3ee..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todense.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.todense -=============================== - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.todense \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todia.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todia.rst deleted file mode 100644 index 9cd0335f48..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todia.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.todia -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.todia \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todok.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todok.rst deleted file mode 100644 index fd49839999..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.todok.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.todok -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.todok \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tolil.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tolil.rst deleted file mode 100644 index b6bf596091..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.tolil.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.tolil -============================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.tolil \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.transpose.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.transpose.rst deleted file mode 100644 index e0f4301278..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.lil_matrix.transpose.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.lil_matrix.transpose -================================= - -.. currentmodule:: scipy.sparse - -.. automethod:: lil_matrix.transpose \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.ArpackError.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.ArpackError.rst deleted file mode 100644 index 6894d3f456..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.ArpackError.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.ArpackError -=============================== - -.. currentmodule:: scipy.sparse.linalg - -.. autoexception:: ArpackError \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.ArpackNoConvergence.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.ArpackNoConvergence.rst deleted file mode 100644 index df7c5af20d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.ArpackNoConvergence.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.ArpackNoConvergence -======================================= - -.. currentmodule:: scipy.sparse.linalg - -.. autoexception:: ArpackNoConvergence \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.matmat.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.matmat.rst deleted file mode 100644 index f6e259e620..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.matmat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.LinearOperator.matmat -========================================= - -.. currentmodule:: scipy.sparse.linalg - -.. automethod:: LinearOperator.matmat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.matvec.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.matvec.rst deleted file mode 100644 index b3ff3fb7d0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.matvec.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.LinearOperator.matvec -========================================= - -.. currentmodule:: scipy.sparse.linalg - -.. automethod:: LinearOperator.matvec \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.rst deleted file mode 100644 index 19fe4d6fce..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.LinearOperator.rst +++ /dev/null @@ -1,20 +0,0 @@ -scipy.sparse.linalg.LinearOperator -================================== - -.. currentmodule:: scipy.sparse.linalg - -.. autoclass:: LinearOperator - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - LinearOperator.matmat - LinearOperator.matvec - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.aslinearoperator.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.aslinearoperator.rst deleted file mode 100644 index a097460005..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.aslinearoperator.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.aslinearoperator -==================================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: aslinearoperator \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.bicg.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.bicg.rst deleted file mode 100644 index f7c1b4386d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.bicg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.bicg -======================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: bicg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.bicgstab.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.bicgstab.rst deleted file mode 100644 index 08bd77dcae..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.bicgstab.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.bicgstab -============================ - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: bicgstab \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.cg.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.cg.rst deleted file mode 100644 index 1b45abd5d0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.cg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.cg -====================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: cg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.cgs.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.cgs.rst deleted file mode 100644 index 9c419c4b74..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.cgs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.cgs -======================= - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: cgs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.eigs.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.eigs.rst deleted file mode 100644 index a70d1d455c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.eigs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.eigs -======================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: eigs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.eigsh.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.eigsh.rst deleted file mode 100644 index ef81f5dd3b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.eigsh.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.eigsh -========================= - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: eigsh \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.factorized.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.factorized.rst deleted file mode 100644 index 5abb8c6c01..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.factorized.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.factorized -============================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: factorized \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.gmres.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.gmres.rst deleted file mode 100644 index ccd9879fb9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.gmres.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.gmres -========================= - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: gmres \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lgmres.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lgmres.rst deleted file mode 100644 index 1c26c80a9f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lgmres.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.lgmres -========================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: lgmres \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lobpcg.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lobpcg.rst deleted file mode 100644 index 0841537187..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lobpcg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.lobpcg -========================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: lobpcg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lsqr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lsqr.rst deleted file mode 100644 index 86fdb60860..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.lsqr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.lsqr -======================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: lsqr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.minres.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.minres.rst deleted file mode 100644 index 03314334ad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.minres.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.minres -========================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: minres \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.qmr.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.qmr.rst deleted file mode 100644 index 7d645fc551..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.qmr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.qmr -======================= - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: qmr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.spilu.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.spilu.rst deleted file mode 100644 index 2d74e77528..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.spilu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.spilu -========================= - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: spilu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.splu.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.splu.rst deleted file mode 100644 index 8ad58be372..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.splu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.splu -======================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: splu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.spsolve.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.spsolve.rst deleted file mode 100644 index 921242660f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.spsolve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.spsolve -=========================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: spsolve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.svds.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.svds.rst deleted file mode 100644 index 8424bad043..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.linalg.svds.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.linalg.svds -======================== - -.. currentmodule:: scipy.sparse.linalg - -.. autofunction:: svds \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.rand.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.rand.rst deleted file mode 100644 index ad8a0b85b6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.rand.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.rand -================= - -.. currentmodule:: scipy.sparse - -.. autofunction:: rand \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.spdiags.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.spdiags.rst deleted file mode 100644 index 95dc5984a6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.spdiags.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.spdiags -==================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: spdiags \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.tril.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.tril.rst deleted file mode 100644 index 95767f8cac..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.tril.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.tril -================= - -.. currentmodule:: scipy.sparse - -.. autofunction:: tril \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.triu.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.triu.rst deleted file mode 100644 index 4c67a2949e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.triu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.triu -================= - -.. currentmodule:: scipy.sparse - -.. autofunction:: triu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.sparse.vstack.rst b/scipy-0.10.1/doc/source/generated/scipy.sparse.vstack.rst deleted file mode 100644 index 80b859145f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.sparse.vstack.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.sparse.vstack -=================== - -.. currentmodule:: scipy.sparse - -.. autofunction:: vstack \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.convex_hull.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.convex_hull.rst deleted file mode 100644 index 2ea8d018d2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.convex_hull.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.Delaunay.convex_hull -================================== - -.. currentmodule:: scipy.spatial - -.. autoattribute:: Delaunay.convex_hull \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.find_simplex.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.find_simplex.rst deleted file mode 100644 index 7920bc8713..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.find_simplex.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.Delaunay.find_simplex -=================================== - -.. currentmodule:: scipy.spatial - -.. automethod:: Delaunay.find_simplex \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.lift_points.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.lift_points.rst deleted file mode 100644 index 3b1a5f3120..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.lift_points.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.Delaunay.lift_points -================================== - -.. currentmodule:: scipy.spatial - -.. automethod:: Delaunay.lift_points \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.plane_distance.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.plane_distance.rst deleted file mode 100644 index be5c2c7033..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.plane_distance.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.Delaunay.plane_distance -===================================== - -.. currentmodule:: scipy.spatial - -.. automethod:: Delaunay.plane_distance \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.rst deleted file mode 100644 index f06f50962b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.rst +++ /dev/null @@ -1,29 +0,0 @@ -scipy.spatial.Delaunay -====================== - -.. currentmodule:: scipy.spatial - -.. autoclass:: Delaunay - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - Delaunay.find_simplex - Delaunay.lift_points - Delaunay.plane_distance - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - Delaunay.convex_hull - Delaunay.transform - Delaunay.vertex_to_simplex - diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.transform.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.transform.rst deleted file mode 100644 index a73f340f25..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.transform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.Delaunay.transform -================================ - -.. currentmodule:: scipy.spatial - -.. autoattribute:: Delaunay.transform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.vertex_to_simplex.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.vertex_to_simplex.rst deleted file mode 100644 index 915a3c0997..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.Delaunay.vertex_to_simplex.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.Delaunay.vertex_to_simplex -======================================== - -.. currentmodule:: scipy.spatial - -.. autoattribute:: Delaunay.vertex_to_simplex \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.count_neighbors.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.count_neighbors.rst deleted file mode 100644 index 510d52d8ad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.count_neighbors.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.KDTree.count_neighbors -==================================== - -.. currentmodule:: scipy.spatial - -.. automethod:: KDTree.count_neighbors \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query.rst deleted file mode 100644 index 7195341c7c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.KDTree.query -========================== - -.. currentmodule:: scipy.spatial - -.. automethod:: KDTree.query \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_ball_point.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_ball_point.rst deleted file mode 100644 index ff49b1135b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_ball_point.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.KDTree.query_ball_point -===================================== - -.. currentmodule:: scipy.spatial - -.. automethod:: KDTree.query_ball_point \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_ball_tree.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_ball_tree.rst deleted file mode 100644 index 64d047a7c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_ball_tree.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.KDTree.query_ball_tree -==================================== - -.. currentmodule:: scipy.spatial - -.. automethod:: KDTree.query_ball_tree \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_pairs.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_pairs.rst deleted file mode 100644 index 9cdac56f48..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.query_pairs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.KDTree.query_pairs -================================ - -.. currentmodule:: scipy.spatial - -.. automethod:: KDTree.query_pairs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.rst deleted file mode 100644 index a987b6ecff..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.rst +++ /dev/null @@ -1,24 +0,0 @@ -scipy.spatial.KDTree -==================== - -.. currentmodule:: scipy.spatial - -.. autoclass:: KDTree - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - KDTree.count_neighbors - KDTree.query - KDTree.query_ball_point - KDTree.query_ball_tree - KDTree.query_pairs - KDTree.sparse_distance_matrix - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.sparse_distance_matrix.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.sparse_distance_matrix.rst deleted file mode 100644 index cfc6d79989..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.KDTree.sparse_distance_matrix.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.KDTree.sparse_distance_matrix -=========================================== - -.. currentmodule:: scipy.spatial - -.. automethod:: KDTree.sparse_distance_matrix \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.data.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.data.rst deleted file mode 100644 index 93601e2ede..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.data.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.cKDTree.data -========================== - -.. currentmodule:: scipy.spatial - -.. autoattribute:: cKDTree.data \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.leafsize.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.leafsize.rst deleted file mode 100644 index 84669bb6e5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.leafsize.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.cKDTree.leafsize -============================== - -.. currentmodule:: scipy.spatial - -.. autoattribute:: cKDTree.leafsize \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.m.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.m.rst deleted file mode 100644 index c29bde045e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.m.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.cKDTree.m -======================= - -.. currentmodule:: scipy.spatial - -.. autoattribute:: cKDTree.m \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.maxes.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.maxes.rst deleted file mode 100644 index 1199cc2d4d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.maxes.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.cKDTree.maxes -=========================== - -.. currentmodule:: scipy.spatial - -.. autoattribute:: cKDTree.maxes \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.mins.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.mins.rst deleted file mode 100644 index d0ad27e662..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.mins.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.cKDTree.mins -========================== - -.. currentmodule:: scipy.spatial - -.. autoattribute:: cKDTree.mins \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.n.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.n.rst deleted file mode 100644 index 5e670b5645..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.n.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.cKDTree.n -======================= - -.. currentmodule:: scipy.spatial - -.. autoattribute:: cKDTree.n \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.query.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.query.rst deleted file mode 100644 index e21b5f9f4e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.query.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.cKDTree.query -=========================== - -.. currentmodule:: scipy.spatial - -.. automethod:: cKDTree.query \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.rst deleted file mode 100644 index 025c713dd3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.cKDTree.rst +++ /dev/null @@ -1,30 +0,0 @@ -scipy.spatial.cKDTree -===================== - -.. currentmodule:: scipy.spatial - -.. autoclass:: cKDTree - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - cKDTree.query - - - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - cKDTree.data - cKDTree.leafsize - cKDTree.m - cKDTree.maxes - cKDTree.mins - cKDTree.n - diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.braycurtis.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.braycurtis.rst deleted file mode 100644 index 3e21575c28..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.braycurtis.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.braycurtis -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: braycurtis \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.canberra.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.canberra.rst deleted file mode 100644 index 231461cf8d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.canberra.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.canberra -=============================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: canberra \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cdist.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cdist.rst deleted file mode 100644 index 7cddc9d988..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cdist.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.cdist -============================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: cdist \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.chebyshev.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.chebyshev.rst deleted file mode 100644 index e39da2ccf9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.chebyshev.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.chebyshev -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: chebyshev \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cityblock.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cityblock.rst deleted file mode 100644 index ae9b8e9ea9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cityblock.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.cityblock -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: cityblock \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.correlation.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.correlation.rst deleted file mode 100644 index c5500568d7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.correlation.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.correlation -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: correlation \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cosine.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cosine.rst deleted file mode 100644 index 864fb5660a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.cosine.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.cosine -============================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: cosine \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.dice.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.dice.rst deleted file mode 100644 index b0d1a001f4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.dice.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.dice -=========================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: dice \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.euclidean.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.euclidean.rst deleted file mode 100644 index d26404d971..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.euclidean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.euclidean -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: euclidean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.hamming.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.hamming.rst deleted file mode 100644 index b68690448d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.hamming.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.hamming -============================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: hamming \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.is_valid_dm.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.is_valid_dm.rst deleted file mode 100644 index 225746adb7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.is_valid_dm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.is_valid_dm -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: is_valid_dm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.is_valid_y.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.is_valid_y.rst deleted file mode 100644 index 0698ff83b6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.is_valid_y.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.is_valid_y -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: is_valid_y \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.jaccard.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.jaccard.rst deleted file mode 100644 index adbe0c6b0f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.jaccard.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.jaccard -============================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: jaccard \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.kulsinski.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.kulsinski.rst deleted file mode 100644 index 11471ad88f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.kulsinski.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.kulsinski -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: kulsinski \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.mahalanobis.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.mahalanobis.rst deleted file mode 100644 index 936afe57fa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.mahalanobis.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.mahalanobis -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: mahalanobis \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.matching.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.matching.rst deleted file mode 100644 index a22ba0b20c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.matching.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.matching -=============================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: matching \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.minkowski.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.minkowski.rst deleted file mode 100644 index bfe845ae22..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.minkowski.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.minkowski -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: minkowski \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.num_obs_dm.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.num_obs_dm.rst deleted file mode 100644 index a2a1efb084..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.num_obs_dm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.num_obs_dm -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: num_obs_dm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.num_obs_y.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.num_obs_y.rst deleted file mode 100644 index 7f03b0863f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.num_obs_y.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.num_obs_y -================================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: num_obs_y \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.pdist.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.pdist.rst deleted file mode 100644 index f1985b22e0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.pdist.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.pdist -============================ - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: pdist \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.rogerstanimoto.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.rogerstanimoto.rst deleted file mode 100644 index 47e3e15243..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.rogerstanimoto.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.rogerstanimoto -===================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: rogerstanimoto \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.rst deleted file mode 100644 index 0f21c6a695..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.rst +++ /dev/null @@ -1,51 +0,0 @@ -scipy.spatial.distance -====================== - -.. automodule:: scipy.spatial.distance - - - - .. rubric:: Functions - - .. autosummary:: - - braycurtis - canberra - cdist - chebyshev - cityblock - correlation - cosine - dice - euclidean - hamming - is_valid_dm - is_valid_y - jaccard - kulsinski - mahalanobis - matching - minkowski - norm - num_obs_dm - num_obs_y - pdist - rogerstanimoto - russellrao - seuclidean - sokalmichener - sokalsneath - sqeuclidean - squareform - wminkowski - yule - - - - - - - - - - \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.russellrao.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.russellrao.rst deleted file mode 100644 index c34216888e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.russellrao.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.russellrao -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: russellrao \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.seuclidean.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.seuclidean.rst deleted file mode 100644 index 9e626bd5d1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.seuclidean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.seuclidean -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: seuclidean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sokalmichener.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sokalmichener.rst deleted file mode 100644 index 5507980ae7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sokalmichener.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.sokalmichener -==================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: sokalmichener \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sokalsneath.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sokalsneath.rst deleted file mode 100644 index 2471ed63b7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sokalsneath.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.sokalsneath -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: sokalsneath \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sqeuclidean.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sqeuclidean.rst deleted file mode 100644 index 7a3765f39b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.sqeuclidean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.sqeuclidean -================================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: sqeuclidean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.squareform.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.squareform.rst deleted file mode 100644 index 4ca0d5896f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.squareform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.squareform -================================= - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: squareform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.yule.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.yule.rst deleted file mode 100644 index 6bf920a492..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.distance.yule.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.distance.yule -=========================== - -.. currentmodule:: scipy.spatial.distance - -.. autofunction:: yule \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.spatial.tsearch.rst b/scipy-0.10.1/doc/source/generated/scipy.spatial.tsearch.rst deleted file mode 100644 index 0c87a6d31b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.spatial.tsearch.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.spatial.tsearch -===================== - -.. currentmodule:: scipy.spatial - -.. autofunction:: tsearch \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ai_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ai_zeros.rst deleted file mode 100644 index ee79d78ad5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ai_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ai_zeros -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: ai_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.airy.rst b/scipy-0.10.1/doc/source/generated/scipy.special.airy.rst deleted file mode 100644 index 2ccfa6f3b2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.airy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.airy -================== - -.. currentmodule:: scipy.special - -.. autodata:: airy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.airye.rst b/scipy-0.10.1/doc/source/generated/scipy.special.airye.rst deleted file mode 100644 index 709a3bafc7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.airye.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.airye -=================== - -.. currentmodule:: scipy.special - -.. autodata:: airye \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.bdtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.bdtr.rst deleted file mode 100644 index 24c5acabd6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.bdtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.bdtr -================== - -.. currentmodule:: scipy.special - -.. autodata:: bdtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.bdtrc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.bdtrc.rst deleted file mode 100644 index 7127b1ad4d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.bdtrc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.bdtrc -=================== - -.. currentmodule:: scipy.special - -.. autodata:: bdtrc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.bdtri.rst b/scipy-0.10.1/doc/source/generated/scipy.special.bdtri.rst deleted file mode 100644 index 78070f63d8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.bdtri.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.bdtri -=================== - -.. currentmodule:: scipy.special - -.. autodata:: bdtri \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.bei.rst b/scipy-0.10.1/doc/source/generated/scipy.special.bei.rst deleted file mode 100644 index 243d4f68d4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.bei.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.bei -================= - -.. currentmodule:: scipy.special - -.. autodata:: bei \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.bei_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.bei_zeros.rst deleted file mode 100644 index e077eddbbf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.bei_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.bei_zeros -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: bei_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.beip.rst b/scipy-0.10.1/doc/source/generated/scipy.special.beip.rst deleted file mode 100644 index 4c239f91ad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.beip.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.beip -================== - -.. currentmodule:: scipy.special - -.. autodata:: beip \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.beip_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.beip_zeros.rst deleted file mode 100644 index 8fdb801a6c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.beip_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.beip_zeros -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: beip_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ber.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ber.rst deleted file mode 100644 index 50f1035b98..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ber.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ber -================= - -.. currentmodule:: scipy.special - -.. autodata:: ber \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ber_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ber_zeros.rst deleted file mode 100644 index 43331c6168..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ber_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ber_zeros -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: ber_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.berp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.berp.rst deleted file mode 100644 index 5d6c98fd47..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.berp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.berp -================== - -.. currentmodule:: scipy.special - -.. autodata:: berp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.berp_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.berp_zeros.rst deleted file mode 100644 index b6fcfac859..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.berp_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.berp_zeros -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: berp_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.besselpoly.rst b/scipy-0.10.1/doc/source/generated/scipy.special.besselpoly.rst deleted file mode 100644 index c7e8d87eea..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.besselpoly.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.besselpoly -======================== - -.. currentmodule:: scipy.special - -.. autodata:: besselpoly \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.beta.rst b/scipy-0.10.1/doc/source/generated/scipy.special.beta.rst deleted file mode 100644 index 3dc204a019..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.beta.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.beta -================== - -.. currentmodule:: scipy.special - -.. autodata:: beta \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.betainc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.betainc.rst deleted file mode 100644 index 7b0f138b8f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.betainc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.betainc -===================== - -.. currentmodule:: scipy.special - -.. autodata:: betainc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.betaincinv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.betaincinv.rst deleted file mode 100644 index b10984dc86..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.betaincinv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.betaincinv -======================== - -.. currentmodule:: scipy.special - -.. autodata:: betaincinv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.betaln.rst b/scipy-0.10.1/doc/source/generated/scipy.special.betaln.rst deleted file mode 100644 index 8bc712a0ca..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.betaln.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.betaln -==================== - -.. currentmodule:: scipy.special - -.. autodata:: betaln \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.bi_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.bi_zeros.rst deleted file mode 100644 index 64424ada37..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.bi_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.bi_zeros -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: bi_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.btdtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.btdtr.rst deleted file mode 100644 index bb9bdea778..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.btdtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.btdtr -=================== - -.. currentmodule:: scipy.special - -.. autodata:: btdtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.btdtri.rst b/scipy-0.10.1/doc/source/generated/scipy.special.btdtri.rst deleted file mode 100644 index d595d1530d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.btdtri.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.btdtri -==================== - -.. currentmodule:: scipy.special - -.. autodata:: btdtri \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.cbrt.rst b/scipy-0.10.1/doc/source/generated/scipy.special.cbrt.rst deleted file mode 100644 index 3790ed14fa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.cbrt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.cbrt -================== - -.. currentmodule:: scipy.special - -.. autodata:: cbrt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.chdtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.chdtr.rst deleted file mode 100644 index b7168b6c8b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.chdtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.chdtr -=================== - -.. currentmodule:: scipy.special - -.. autodata:: chdtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.chdtrc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.chdtrc.rst deleted file mode 100644 index c9bc450c23..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.chdtrc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.chdtrc -==================== - -.. currentmodule:: scipy.special - -.. autodata:: chdtrc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.chdtri.rst b/scipy-0.10.1/doc/source/generated/scipy.special.chdtri.rst deleted file mode 100644 index 8637ee1c3f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.chdtri.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.chdtri -==================== - -.. currentmodule:: scipy.special - -.. autodata:: chdtri \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.chebyc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.chebyc.rst deleted file mode 100644 index 424e86ce93..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.chebyc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.chebyc -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: chebyc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.chebys.rst b/scipy-0.10.1/doc/source/generated/scipy.special.chebys.rst deleted file mode 100644 index c75e21bd7f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.chebys.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.chebys -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: chebys \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.chebyt.rst b/scipy-0.10.1/doc/source/generated/scipy.special.chebyt.rst deleted file mode 100644 index 9136460ab5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.chebyt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.chebyt -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: chebyt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.chebyu.rst b/scipy-0.10.1/doc/source/generated/scipy.special.chebyu.rst deleted file mode 100644 index f62f24c9d0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.chebyu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.chebyu -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: chebyu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.cosdg.rst b/scipy-0.10.1/doc/source/generated/scipy.special.cosdg.rst deleted file mode 100644 index 7a140dbca3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.cosdg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.cosdg -=================== - -.. currentmodule:: scipy.special - -.. autodata:: cosdg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.cosm1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.cosm1.rst deleted file mode 100644 index 851bc6f901..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.cosm1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.cosm1 -=================== - -.. currentmodule:: scipy.special - -.. autodata:: cosm1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.cotdg.rst b/scipy-0.10.1/doc/source/generated/scipy.special.cotdg.rst deleted file mode 100644 index d5438dc017..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.cotdg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.cotdg -=================== - -.. currentmodule:: scipy.special - -.. autodata:: cotdg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.dawsn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.dawsn.rst deleted file mode 100644 index 23f8eb7025..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.dawsn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.dawsn -=================== - -.. currentmodule:: scipy.special - -.. autodata:: dawsn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ellipe.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ellipe.rst deleted file mode 100644 index 64bb714a54..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ellipe.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ellipe -==================== - -.. currentmodule:: scipy.special - -.. autodata:: ellipe \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ellipeinc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ellipeinc.rst deleted file mode 100644 index 2217c80cd3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ellipeinc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ellipeinc -======================= - -.. currentmodule:: scipy.special - -.. autodata:: ellipeinc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ellipj.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ellipj.rst deleted file mode 100644 index 83b2753dc8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ellipj.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ellipj -==================== - -.. currentmodule:: scipy.special - -.. autodata:: ellipj \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ellipk.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ellipk.rst deleted file mode 100644 index 00373a5c90..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ellipk.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ellipk -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: ellipk \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ellipkinc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ellipkinc.rst deleted file mode 100644 index 4264d4f922..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ellipkinc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ellipkinc -======================= - -.. currentmodule:: scipy.special - -.. autodata:: ellipkinc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ellipkm1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ellipkm1.rst deleted file mode 100644 index ba7b42d8cd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ellipkm1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ellipkm1 -====================== - -.. currentmodule:: scipy.special - -.. autodata:: ellipkm1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.erf.rst b/scipy-0.10.1/doc/source/generated/scipy.special.erf.rst deleted file mode 100644 index fe44202eb7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.erf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.erf -================= - -.. currentmodule:: scipy.special - -.. autodata:: erf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.erf_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.erf_zeros.rst deleted file mode 100644 index d5b2b6f4a4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.erf_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.erf_zeros -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: erf_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.erfc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.erfc.rst deleted file mode 100644 index f16d8177b6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.erfc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.erfc -================== - -.. currentmodule:: scipy.special - -.. autodata:: erfc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.erfcinv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.erfcinv.rst deleted file mode 100644 index 92a587647e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.erfcinv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.erfcinv -===================== - -.. currentmodule:: scipy.special - -.. autofunction:: erfcinv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.erfinv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.erfinv.rst deleted file mode 100644 index 76f1ba8a68..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.erfinv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.erfinv -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: erfinv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.errprint.rst b/scipy-0.10.1/doc/source/generated/scipy.special.errprint.rst deleted file mode 100644 index 90855a910f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.errprint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.errprint -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: errprint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyc.rst deleted file mode 100644 index 859c5fc1d2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_chebyc -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: eval_chebyc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebys.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebys.rst deleted file mode 100644 index c5d29aeefa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebys.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_chebys -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: eval_chebys \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyt.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyt.rst deleted file mode 100644 index 6e9daf3adc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_chebyt -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: eval_chebyt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyu.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyu.rst deleted file mode 100644 index 91148d6ad1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_chebyu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_chebyu -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: eval_chebyu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_gegenbauer.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_gegenbauer.rst deleted file mode 100644 index 8c637809d0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_gegenbauer.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_gegenbauer -============================= - -.. currentmodule:: scipy.special - -.. autofunction:: eval_gegenbauer \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_genlaguerre.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_genlaguerre.rst deleted file mode 100644 index e5e38d4fcb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_genlaguerre.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_genlaguerre -============================== - -.. currentmodule:: scipy.special - -.. autofunction:: eval_genlaguerre \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_hermite.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_hermite.rst deleted file mode 100644 index 88efbc4393..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_hermite.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_hermite -========================== - -.. currentmodule:: scipy.special - -.. autofunction:: eval_hermite \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_hermitenorm.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_hermitenorm.rst deleted file mode 100644 index cc712784b3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_hermitenorm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_hermitenorm -============================== - -.. currentmodule:: scipy.special - -.. autofunction:: eval_hermitenorm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_jacobi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_jacobi.rst deleted file mode 100644 index dda0dc7e20..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_jacobi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_jacobi -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: eval_jacobi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_laguerre.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_laguerre.rst deleted file mode 100644 index c539745b34..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_laguerre.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_laguerre -=========================== - -.. currentmodule:: scipy.special - -.. autofunction:: eval_laguerre \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_legendre.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_legendre.rst deleted file mode 100644 index 3e050cf9bb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_legendre.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_legendre -=========================== - -.. currentmodule:: scipy.special - -.. autofunction:: eval_legendre \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_chebyt.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_chebyt.rst deleted file mode 100644 index bbbfa0aaba..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_chebyt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_sh_chebyt -============================ - -.. currentmodule:: scipy.special - -.. autofunction:: eval_sh_chebyt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_chebyu.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_chebyu.rst deleted file mode 100644 index 7e436ecc56..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_chebyu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_sh_chebyu -============================ - -.. currentmodule:: scipy.special - -.. autofunction:: eval_sh_chebyu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_jacobi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_jacobi.rst deleted file mode 100644 index b7abd52229..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_jacobi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_sh_jacobi -============================ - -.. currentmodule:: scipy.special - -.. autofunction:: eval_sh_jacobi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_legendre.rst b/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_legendre.rst deleted file mode 100644 index fa4e6c6e8a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.eval_sh_legendre.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.eval_sh_legendre -============================== - -.. currentmodule:: scipy.special - -.. autofunction:: eval_sh_legendre \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.exp1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.exp1.rst deleted file mode 100644 index 579e7b4b0b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.exp1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.exp1 -================== - -.. currentmodule:: scipy.special - -.. autodata:: exp1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.exp10.rst b/scipy-0.10.1/doc/source/generated/scipy.special.exp10.rst deleted file mode 100644 index a16f9da57a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.exp10.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.exp10 -=================== - -.. currentmodule:: scipy.special - -.. autodata:: exp10 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.exp2.rst b/scipy-0.10.1/doc/source/generated/scipy.special.exp2.rst deleted file mode 100644 index c497a349e4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.exp2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.exp2 -================== - -.. currentmodule:: scipy.special - -.. autodata:: exp2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.expi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.expi.rst deleted file mode 100644 index af3bef209a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.expi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.expi -================== - -.. currentmodule:: scipy.special - -.. autodata:: expi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.expit.rst b/scipy-0.10.1/doc/source/generated/scipy.special.expit.rst deleted file mode 100644 index da5a0966f1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.expit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.expit -=================== - -.. currentmodule:: scipy.special - -.. autodata:: expit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.expm1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.expm1.rst deleted file mode 100644 index 999d02aac2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.expm1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.expm1 -=================== - -.. currentmodule:: scipy.special - -.. autodata:: expm1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.expn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.expn.rst deleted file mode 100644 index d674c49717..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.expn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.expn -================== - -.. currentmodule:: scipy.special - -.. autodata:: expn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.fdtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.fdtr.rst deleted file mode 100644 index acc53efcc7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.fdtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.fdtr -================== - -.. currentmodule:: scipy.special - -.. autodata:: fdtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.fdtrc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.fdtrc.rst deleted file mode 100644 index 70cfc956a7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.fdtrc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.fdtrc -=================== - -.. currentmodule:: scipy.special - -.. autodata:: fdtrc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.fdtri.rst b/scipy-0.10.1/doc/source/generated/scipy.special.fdtri.rst deleted file mode 100644 index c4d991fe93..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.fdtri.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.fdtri -=================== - -.. currentmodule:: scipy.special - -.. autodata:: fdtri \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.fresnel.rst b/scipy-0.10.1/doc/source/generated/scipy.special.fresnel.rst deleted file mode 100644 index aee0e3a59c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.fresnel.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.fresnel -===================== - -.. currentmodule:: scipy.special - -.. autodata:: fresnel \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.fresnel_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.fresnel_zeros.rst deleted file mode 100644 index 14eb3a8853..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.fresnel_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.fresnel_zeros -=========================== - -.. currentmodule:: scipy.special - -.. autofunction:: fresnel_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.fresnelc_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.fresnelc_zeros.rst deleted file mode 100644 index fa41185605..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.fresnelc_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.fresnelc_zeros -============================ - -.. currentmodule:: scipy.special - -.. autofunction:: fresnelc_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.fresnels_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.fresnels_zeros.rst deleted file mode 100644 index 77a8d08a03..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.fresnels_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.fresnels_zeros -============================ - -.. currentmodule:: scipy.special - -.. autofunction:: fresnels_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gamma.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gamma.rst deleted file mode 100644 index d629e3415f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gamma.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gamma -=================== - -.. currentmodule:: scipy.special - -.. autodata:: gamma \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gammainc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gammainc.rst deleted file mode 100644 index 98dc64a4b1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gammainc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gammainc -====================== - -.. currentmodule:: scipy.special - -.. autodata:: gammainc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gammaincc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gammaincc.rst deleted file mode 100644 index 5c73789b0b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gammaincc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gammaincc -======================= - -.. currentmodule:: scipy.special - -.. autodata:: gammaincc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gammainccinv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gammainccinv.rst deleted file mode 100644 index 44264f3eb8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gammainccinv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gammainccinv -========================== - -.. currentmodule:: scipy.special - -.. autodata:: gammainccinv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gammaincinv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gammaincinv.rst deleted file mode 100644 index 197af1d3b4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gammaincinv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gammaincinv -========================= - -.. currentmodule:: scipy.special - -.. autodata:: gammaincinv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gammaln.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gammaln.rst deleted file mode 100644 index 0f7fb58c04..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gammaln.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gammaln -===================== - -.. currentmodule:: scipy.special - -.. autodata:: gammaln \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gdtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gdtr.rst deleted file mode 100644 index c08fd09738..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gdtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gdtr -================== - -.. currentmodule:: scipy.special - -.. autodata:: gdtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gdtrc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gdtrc.rst deleted file mode 100644 index 74ea0932eb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gdtrc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gdtrc -=================== - -.. currentmodule:: scipy.special - -.. autodata:: gdtrc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gdtria.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gdtria.rst deleted file mode 100644 index 8ff33a108a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gdtria.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gdtria -==================== - -.. currentmodule:: scipy.special - -.. autodata:: gdtria \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gdtrib.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gdtrib.rst deleted file mode 100644 index 904cc259aa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gdtrib.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gdtrib -==================== - -.. currentmodule:: scipy.special - -.. autodata:: gdtrib \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gdtrix.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gdtrix.rst deleted file mode 100644 index 3dfdcf1269..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gdtrix.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gdtrix -==================== - -.. currentmodule:: scipy.special - -.. autodata:: gdtrix \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.gegenbauer.rst b/scipy-0.10.1/doc/source/generated/scipy.special.gegenbauer.rst deleted file mode 100644 index 533dd074e3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.gegenbauer.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.gegenbauer -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: gegenbauer \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.genlaguerre.rst b/scipy-0.10.1/doc/source/generated/scipy.special.genlaguerre.rst deleted file mode 100644 index 7ffb95c264..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.genlaguerre.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.genlaguerre -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: genlaguerre \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.h1vp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.h1vp.rst deleted file mode 100644 index 0a6b1f59f5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.h1vp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.h1vp -================== - -.. currentmodule:: scipy.special - -.. autofunction:: h1vp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.h2vp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.h2vp.rst deleted file mode 100644 index 2ec4a27cec..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.h2vp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.h2vp -================== - -.. currentmodule:: scipy.special - -.. autofunction:: h2vp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hankel1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hankel1.rst deleted file mode 100644 index 4305d62287..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hankel1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hankel1 -===================== - -.. currentmodule:: scipy.special - -.. autodata:: hankel1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hankel1e.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hankel1e.rst deleted file mode 100644 index a0d5a9a8b9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hankel1e.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hankel1e -====================== - -.. currentmodule:: scipy.special - -.. autodata:: hankel1e \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hankel2.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hankel2.rst deleted file mode 100644 index 467de9b7ca..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hankel2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hankel2 -===================== - -.. currentmodule:: scipy.special - -.. autodata:: hankel2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hankel2e.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hankel2e.rst deleted file mode 100644 index e7adcc2bd4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hankel2e.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hankel2e -====================== - -.. currentmodule:: scipy.special - -.. autodata:: hankel2e \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hermite.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hermite.rst deleted file mode 100644 index 6fa7600eb2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hermite.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hermite -===================== - -.. currentmodule:: scipy.special - -.. autofunction:: hermite \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hermitenorm.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hermitenorm.rst deleted file mode 100644 index 549c81dc05..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hermitenorm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hermitenorm -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: hermitenorm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hyp0f1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hyp0f1.rst deleted file mode 100644 index 83510d46ad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hyp0f1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hyp0f1 -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: hyp0f1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hyp1f1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hyp1f1.rst deleted file mode 100644 index d901a79a28..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hyp1f1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hyp1f1 -==================== - -.. currentmodule:: scipy.special - -.. autodata:: hyp1f1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hyp1f2.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hyp1f2.rst deleted file mode 100644 index f922553945..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hyp1f2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hyp1f2 -==================== - -.. currentmodule:: scipy.special - -.. autodata:: hyp1f2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hyp2f0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hyp2f0.rst deleted file mode 100644 index 1e8d4fb8da..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hyp2f0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hyp2f0 -==================== - -.. currentmodule:: scipy.special - -.. autodata:: hyp2f0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hyp2f1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hyp2f1.rst deleted file mode 100644 index 74474d3bf9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hyp2f1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hyp2f1 -==================== - -.. currentmodule:: scipy.special - -.. autodata:: hyp2f1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hyp3f0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hyp3f0.rst deleted file mode 100644 index b7be8143c5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hyp3f0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hyp3f0 -==================== - -.. currentmodule:: scipy.special - -.. autodata:: hyp3f0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.hyperu.rst b/scipy-0.10.1/doc/source/generated/scipy.special.hyperu.rst deleted file mode 100644 index 1033804fe7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.hyperu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.hyperu -==================== - -.. currentmodule:: scipy.special - -.. autodata:: hyperu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.i0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.i0.rst deleted file mode 100644 index 6cfdba81d5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.i0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.i0 -================ - -.. currentmodule:: scipy.special - -.. autodata:: i0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.i0e.rst b/scipy-0.10.1/doc/source/generated/scipy.special.i0e.rst deleted file mode 100644 index 3012c9852f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.i0e.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.i0e -================= - -.. currentmodule:: scipy.special - -.. autodata:: i0e \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.i1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.i1.rst deleted file mode 100644 index cbbb3b2811..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.i1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.i1 -================ - -.. currentmodule:: scipy.special - -.. autodata:: i1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.i1e.rst b/scipy-0.10.1/doc/source/generated/scipy.special.i1e.rst deleted file mode 100644 index 90753b78e4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.i1e.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.i1e -================= - -.. currentmodule:: scipy.special - -.. autodata:: i1e \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.it2i0k0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.it2i0k0.rst deleted file mode 100644 index a2af1d05c7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.it2i0k0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.it2i0k0 -===================== - -.. currentmodule:: scipy.special - -.. autodata:: it2i0k0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.it2j0y0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.it2j0y0.rst deleted file mode 100644 index fbdcbc8d62..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.it2j0y0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.it2j0y0 -===================== - -.. currentmodule:: scipy.special - -.. autodata:: it2j0y0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.it2struve0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.it2struve0.rst deleted file mode 100644 index f43e9f4784..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.it2struve0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.it2struve0 -======================== - -.. currentmodule:: scipy.special - -.. autodata:: it2struve0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.iti0k0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.iti0k0.rst deleted file mode 100644 index 40c35e3e4f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.iti0k0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.iti0k0 -==================== - -.. currentmodule:: scipy.special - -.. autodata:: iti0k0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.itj0y0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.itj0y0.rst deleted file mode 100644 index 6bac482fe3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.itj0y0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.itj0y0 -==================== - -.. currentmodule:: scipy.special - -.. autodata:: itj0y0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.itmodstruve0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.itmodstruve0.rst deleted file mode 100644 index 37489f9588..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.itmodstruve0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.itmodstruve0 -========================== - -.. currentmodule:: scipy.special - -.. autodata:: itmodstruve0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.itstruve0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.itstruve0.rst deleted file mode 100644 index 8adcd8d20c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.itstruve0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.itstruve0 -======================= - -.. currentmodule:: scipy.special - -.. autodata:: itstruve0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.iv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.iv.rst deleted file mode 100644 index 88e25cae5c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.iv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.iv -================ - -.. currentmodule:: scipy.special - -.. autodata:: iv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ive.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ive.rst deleted file mode 100644 index 047b4367a1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ive.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ive -================= - -.. currentmodule:: scipy.special - -.. autodata:: ive \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ivp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ivp.rst deleted file mode 100644 index 5f53636954..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ivp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ivp -================= - -.. currentmodule:: scipy.special - -.. autofunction:: ivp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.j0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.j0.rst deleted file mode 100644 index 15bae9a3f2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.j0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.j0 -================ - -.. currentmodule:: scipy.special - -.. autodata:: j0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.j1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.j1.rst deleted file mode 100644 index 843977e665..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.j1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.j1 -================ - -.. currentmodule:: scipy.special - -.. autodata:: j1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jacobi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jacobi.rst deleted file mode 100644 index 2823c92130..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jacobi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jacobi -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: jacobi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jn.rst deleted file mode 100644 index 68b1d58950..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jn -================ - -.. currentmodule:: scipy.special - -.. autodata:: jn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jn_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jn_zeros.rst deleted file mode 100644 index c04a0d6a07..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jn_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jn_zeros -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: jn_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jnjnp_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jnjnp_zeros.rst deleted file mode 100644 index 8279d56caf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jnjnp_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jnjnp_zeros -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: jnjnp_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jnp_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jnp_zeros.rst deleted file mode 100644 index 7c4cf0fc9f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jnp_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jnp_zeros -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: jnp_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jnyn_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jnyn_zeros.rst deleted file mode 100644 index 289bf95109..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jnyn_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jnyn_zeros -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: jnyn_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jv.rst deleted file mode 100644 index 868d25496a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jv -================ - -.. currentmodule:: scipy.special - -.. autodata:: jv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jve.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jve.rst deleted file mode 100644 index e2d0ee682f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jve -================= - -.. currentmodule:: scipy.special - -.. autodata:: jve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.jvp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.jvp.rst deleted file mode 100644 index 6823824dfe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.jvp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.jvp -================= - -.. currentmodule:: scipy.special - -.. autofunction:: jvp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.k0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.k0.rst deleted file mode 100644 index 99e5766559..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.k0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.k0 -================ - -.. currentmodule:: scipy.special - -.. autodata:: k0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.k0e.rst b/scipy-0.10.1/doc/source/generated/scipy.special.k0e.rst deleted file mode 100644 index d45094d3d9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.k0e.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.k0e -================= - -.. currentmodule:: scipy.special - -.. autodata:: k0e \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.k1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.k1.rst deleted file mode 100644 index 4093f7e235..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.k1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.k1 -================ - -.. currentmodule:: scipy.special - -.. autodata:: k1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.k1e.rst b/scipy-0.10.1/doc/source/generated/scipy.special.k1e.rst deleted file mode 100644 index c5f2a1d39d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.k1e.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.k1e -================= - -.. currentmodule:: scipy.special - -.. autodata:: k1e \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kei.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kei.rst deleted file mode 100644 index 1ae0283ca4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kei.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kei -================= - -.. currentmodule:: scipy.special - -.. autodata:: kei \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kei_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kei_zeros.rst deleted file mode 100644 index 4d57e20015..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kei_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kei_zeros -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: kei_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.keip.rst b/scipy-0.10.1/doc/source/generated/scipy.special.keip.rst deleted file mode 100644 index d305445713..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.keip.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.keip -================== - -.. currentmodule:: scipy.special - -.. autodata:: keip \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.keip_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.keip_zeros.rst deleted file mode 100644 index 337568cf39..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.keip_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.keip_zeros -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: keip_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kelvin.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kelvin.rst deleted file mode 100644 index 34b3e02b22..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kelvin.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kelvin -==================== - -.. currentmodule:: scipy.special - -.. autodata:: kelvin \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kelvin_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kelvin_zeros.rst deleted file mode 100644 index db9d0a603a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kelvin_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kelvin_zeros -========================== - -.. currentmodule:: scipy.special - -.. autofunction:: kelvin_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ker.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ker.rst deleted file mode 100644 index 4dbbaa7379..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ker.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ker -================= - -.. currentmodule:: scipy.special - -.. autodata:: ker \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ker_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ker_zeros.rst deleted file mode 100644 index 696bb29332..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ker_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ker_zeros -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: ker_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kerp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kerp.rst deleted file mode 100644 index 3398567da5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kerp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kerp -================== - -.. currentmodule:: scipy.special - -.. autodata:: kerp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kerp_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kerp_zeros.rst deleted file mode 100644 index 1109e6d2f5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kerp_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kerp_zeros -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: kerp_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kn.rst deleted file mode 100644 index 690476f519..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kn -================ - -.. currentmodule:: scipy.special - -.. autodata:: kn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kolmogi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kolmogi.rst deleted file mode 100644 index 3dd10c1de3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kolmogi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kolmogi -===================== - -.. currentmodule:: scipy.special - -.. autodata:: kolmogi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kolmogorov.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kolmogorov.rst deleted file mode 100644 index b76880fd95..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kolmogorov.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kolmogorov -======================== - -.. currentmodule:: scipy.special - -.. autodata:: kolmogorov \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kv.rst deleted file mode 100644 index d27953f5d5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kv -================ - -.. currentmodule:: scipy.special - -.. autodata:: kv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kve.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kve.rst deleted file mode 100644 index 8160b11366..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kve -================= - -.. currentmodule:: scipy.special - -.. autodata:: kve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.kvp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.kvp.rst deleted file mode 100644 index 6250c54479..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.kvp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.kvp -================= - -.. currentmodule:: scipy.special - -.. autofunction:: kvp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.laguerre.rst b/scipy-0.10.1/doc/source/generated/scipy.special.laguerre.rst deleted file mode 100644 index 5bb2e798b3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.laguerre.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.laguerre -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: laguerre \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.lambertw.rst b/scipy-0.10.1/doc/source/generated/scipy.special.lambertw.rst deleted file mode 100644 index bdeae89ad3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.lambertw.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.lambertw -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: lambertw \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.legendre.rst b/scipy-0.10.1/doc/source/generated/scipy.special.legendre.rst deleted file mode 100644 index 1e213d56da..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.legendre.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.legendre -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: legendre \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.lmbda.rst b/scipy-0.10.1/doc/source/generated/scipy.special.lmbda.rst deleted file mode 100644 index cefe63aedf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.lmbda.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.lmbda -=================== - -.. currentmodule:: scipy.special - -.. autofunction:: lmbda \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.log1p.rst b/scipy-0.10.1/doc/source/generated/scipy.special.log1p.rst deleted file mode 100644 index e43c9198e9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.log1p.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.log1p -=================== - -.. currentmodule:: scipy.special - -.. autodata:: log1p \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.logit.rst b/scipy-0.10.1/doc/source/generated/scipy.special.logit.rst deleted file mode 100644 index ba23f181d9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.logit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.logit -=================== - -.. currentmodule:: scipy.special - -.. autodata:: logit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.lpmn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.lpmn.rst deleted file mode 100644 index 826e161531..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.lpmn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.lpmn -================== - -.. currentmodule:: scipy.special - -.. autofunction:: lpmn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.lpmv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.lpmv.rst deleted file mode 100644 index 900ea1dcb2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.lpmv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.lpmv -================== - -.. currentmodule:: scipy.special - -.. autodata:: lpmv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.lpn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.lpn.rst deleted file mode 100644 index b5174cfd73..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.lpn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.lpn -================= - -.. currentmodule:: scipy.special - -.. autofunction:: lpn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.lqmn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.lqmn.rst deleted file mode 100644 index 499602af69..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.lqmn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.lqmn -================== - -.. currentmodule:: scipy.special - -.. autofunction:: lqmn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.lqn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.lqn.rst deleted file mode 100644 index e142230d4b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.lqn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.lqn -================= - -.. currentmodule:: scipy.special - -.. autofunction:: lqn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_a.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_a.rst deleted file mode 100644 index 31f9c9f3e3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_a.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_a -======================= - -.. currentmodule:: scipy.special - -.. autodata:: mathieu_a \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_b.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_b.rst deleted file mode 100644 index b4f2a0df87..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_b.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_b -======================= - -.. currentmodule:: scipy.special - -.. autodata:: mathieu_b \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_cem.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_cem.rst deleted file mode 100644 index 83ffc31fe0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_cem.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_cem -========================= - -.. currentmodule:: scipy.special - -.. autodata:: mathieu_cem \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_even_coef.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_even_coef.rst deleted file mode 100644 index 4eb49ae0f0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_even_coef.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_even_coef -=============================== - -.. currentmodule:: scipy.special - -.. autofunction:: mathieu_even_coef \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modcem1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modcem1.rst deleted file mode 100644 index d32556584a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modcem1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_modcem1 -============================= - -.. currentmodule:: scipy.special - -.. autodata:: mathieu_modcem1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modcem2.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modcem2.rst deleted file mode 100644 index 745b474002..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modcem2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_modcem2 -============================= - -.. currentmodule:: scipy.special - -.. autodata:: mathieu_modcem2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modsem1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modsem1.rst deleted file mode 100644 index e0aa6165f4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modsem1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_modsem1 -============================= - -.. currentmodule:: scipy.special - -.. autodata:: mathieu_modsem1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modsem2.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modsem2.rst deleted file mode 100644 index 658a006977..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_modsem2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_modsem2 -============================= - -.. currentmodule:: scipy.special - -.. autodata:: mathieu_modsem2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_odd_coef.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_odd_coef.rst deleted file mode 100644 index 60cf70686d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_odd_coef.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_odd_coef -============================== - -.. currentmodule:: scipy.special - -.. autofunction:: mathieu_odd_coef \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_sem.rst b/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_sem.rst deleted file mode 100644 index e6781dfa9a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.mathieu_sem.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.mathieu_sem -========================= - -.. currentmodule:: scipy.special - -.. autodata:: mathieu_sem \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.modfresnelm.rst b/scipy-0.10.1/doc/source/generated/scipy.special.modfresnelm.rst deleted file mode 100644 index 20d615e95a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.modfresnelm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.modfresnelm -========================= - -.. currentmodule:: scipy.special - -.. autodata:: modfresnelm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.modfresnelp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.modfresnelp.rst deleted file mode 100644 index 65768bd8f5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.modfresnelp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.modfresnelp -========================= - -.. currentmodule:: scipy.special - -.. autodata:: modfresnelp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.modstruve.rst b/scipy-0.10.1/doc/source/generated/scipy.special.modstruve.rst deleted file mode 100644 index c394bc8975..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.modstruve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.modstruve -======================= - -.. currentmodule:: scipy.special - -.. autodata:: modstruve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.multigammaln.rst b/scipy-0.10.1/doc/source/generated/scipy.special.multigammaln.rst deleted file mode 100644 index 8f898be197..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.multigammaln.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.multigammaln -========================== - -.. currentmodule:: scipy.special - -.. autofunction:: multigammaln \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.nbdtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.nbdtr.rst deleted file mode 100644 index c0a9678fe5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.nbdtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.nbdtr -=================== - -.. currentmodule:: scipy.special - -.. autodata:: nbdtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.nbdtrc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.nbdtrc.rst deleted file mode 100644 index 11191d353c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.nbdtrc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.nbdtrc -==================== - -.. currentmodule:: scipy.special - -.. autodata:: nbdtrc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.nbdtri.rst b/scipy-0.10.1/doc/source/generated/scipy.special.nbdtri.rst deleted file mode 100644 index 229ea1f61c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.nbdtri.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.nbdtri -==================== - -.. currentmodule:: scipy.special - -.. autodata:: nbdtri \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ndtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ndtr.rst deleted file mode 100644 index b3b64ba6e2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ndtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ndtr -================== - -.. currentmodule:: scipy.special - -.. autodata:: ndtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ndtri.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ndtri.rst deleted file mode 100644 index 9b0f858736..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ndtri.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ndtri -=================== - -.. currentmodule:: scipy.special - -.. autodata:: ndtri \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.obl_ang1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.obl_ang1.rst deleted file mode 100644 index d4ee53754d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.obl_ang1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.obl_ang1 -====================== - -.. currentmodule:: scipy.special - -.. autodata:: obl_ang1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.obl_ang1_cv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.obl_ang1_cv.rst deleted file mode 100644 index 8e2a155b17..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.obl_ang1_cv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.obl_ang1_cv -========================= - -.. currentmodule:: scipy.special - -.. autodata:: obl_ang1_cv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.obl_cv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.obl_cv.rst deleted file mode 100644 index 12a285660b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.obl_cv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.obl_cv -==================== - -.. currentmodule:: scipy.special - -.. autodata:: obl_cv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.obl_cv_seq.rst b/scipy-0.10.1/doc/source/generated/scipy.special.obl_cv_seq.rst deleted file mode 100644 index 85ffbf0777..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.obl_cv_seq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.obl_cv_seq -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: obl_cv_seq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad1.rst deleted file mode 100644 index 0c1a073949..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.obl_rad1 -====================== - -.. currentmodule:: scipy.special - -.. autodata:: obl_rad1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad1_cv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad1_cv.rst deleted file mode 100644 index 0c10e8d052..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad1_cv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.obl_rad1_cv -========================= - -.. currentmodule:: scipy.special - -.. autodata:: obl_rad1_cv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad2.rst b/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad2.rst deleted file mode 100644 index 05744ff62d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.obl_rad2 -====================== - -.. currentmodule:: scipy.special - -.. autodata:: obl_rad2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad2_cv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad2_cv.rst deleted file mode 100644 index acb5992891..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.obl_rad2_cv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.obl_rad2_cv -========================= - -.. currentmodule:: scipy.special - -.. autodata:: obl_rad2_cv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pbdn_seq.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pbdn_seq.rst deleted file mode 100644 index c1b7168e63..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pbdn_seq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pbdn_seq -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: pbdn_seq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pbdv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pbdv.rst deleted file mode 100644 index 8bc45c5638..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pbdv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pbdv -================== - -.. currentmodule:: scipy.special - -.. autodata:: pbdv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pbdv_seq.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pbdv_seq.rst deleted file mode 100644 index 761db7f4a5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pbdv_seq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pbdv_seq -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: pbdv_seq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pbvv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pbvv.rst deleted file mode 100644 index 77a74ffd14..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pbvv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pbvv -================== - -.. currentmodule:: scipy.special - -.. autodata:: pbvv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pbvv_seq.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pbvv_seq.rst deleted file mode 100644 index 821eaec534..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pbvv_seq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pbvv_seq -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: pbvv_seq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pbwa.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pbwa.rst deleted file mode 100644 index e35796aa9a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pbwa.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pbwa -================== - -.. currentmodule:: scipy.special - -.. autodata:: pbwa \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pdtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pdtr.rst deleted file mode 100644 index e66e709b58..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pdtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pdtr -================== - -.. currentmodule:: scipy.special - -.. autodata:: pdtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pdtrc.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pdtrc.rst deleted file mode 100644 index 226ddd3f5c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pdtrc.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pdtrc -=================== - -.. currentmodule:: scipy.special - -.. autodata:: pdtrc \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pdtri.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pdtri.rst deleted file mode 100644 index d20d1a5684..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pdtri.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pdtri -=================== - -.. currentmodule:: scipy.special - -.. autodata:: pdtri \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.polygamma.rst b/scipy-0.10.1/doc/source/generated/scipy.special.polygamma.rst deleted file mode 100644 index 8ee3ff9f15..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.polygamma.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.polygamma -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: polygamma \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pro_ang1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pro_ang1.rst deleted file mode 100644 index eacb85cf27..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pro_ang1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pro_ang1 -====================== - -.. currentmodule:: scipy.special - -.. autodata:: pro_ang1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pro_ang1_cv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pro_ang1_cv.rst deleted file mode 100644 index dfbe9ca82c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pro_ang1_cv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pro_ang1_cv -========================= - -.. currentmodule:: scipy.special - -.. autodata:: pro_ang1_cv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pro_cv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pro_cv.rst deleted file mode 100644 index baffdd7767..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pro_cv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pro_cv -==================== - -.. currentmodule:: scipy.special - -.. autodata:: pro_cv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pro_cv_seq.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pro_cv_seq.rst deleted file mode 100644 index 7f9a303598..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pro_cv_seq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pro_cv_seq -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: pro_cv_seq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad1.rst deleted file mode 100644 index eb07788435..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pro_rad1 -====================== - -.. currentmodule:: scipy.special - -.. autodata:: pro_rad1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad1_cv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad1_cv.rst deleted file mode 100644 index d2f2ec1228..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad1_cv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pro_rad1_cv -========================= - -.. currentmodule:: scipy.special - -.. autodata:: pro_rad1_cv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad2.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad2.rst deleted file mode 100644 index f41ce7b4b0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pro_rad2 -====================== - -.. currentmodule:: scipy.special - -.. autodata:: pro_rad2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad2_cv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad2_cv.rst deleted file mode 100644 index 21efa6e179..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.pro_rad2_cv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.pro_rad2_cv -========================= - -.. currentmodule:: scipy.special - -.. autodata:: pro_rad2_cv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.psi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.psi.rst deleted file mode 100644 index c21f8b6d00..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.psi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.psi -================= - -.. currentmodule:: scipy.special - -.. autodata:: psi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.radian.rst b/scipy-0.10.1/doc/source/generated/scipy.special.radian.rst deleted file mode 100644 index f0bbe471a5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.radian.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.radian -==================== - -.. currentmodule:: scipy.special - -.. autodata:: radian \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.rgamma.rst b/scipy-0.10.1/doc/source/generated/scipy.special.rgamma.rst deleted file mode 100644 index 9b6d3d2fcf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.rgamma.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.rgamma -==================== - -.. currentmodule:: scipy.special - -.. autodata:: rgamma \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.riccati_jn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.riccati_jn.rst deleted file mode 100644 index 59767bc525..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.riccati_jn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.riccati_jn -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: riccati_jn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.riccati_yn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.riccati_yn.rst deleted file mode 100644 index ed2a447975..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.riccati_yn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.riccati_yn -======================== - -.. currentmodule:: scipy.special - -.. autofunction:: riccati_yn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.round.rst b/scipy-0.10.1/doc/source/generated/scipy.special.round.rst deleted file mode 100644 index d5b1c5aadb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.round.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.round -=================== - -.. currentmodule:: scipy.special - -.. autodata:: round \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sh_chebyt.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sh_chebyt.rst deleted file mode 100644 index 98570c14a1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sh_chebyt.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sh_chebyt -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: sh_chebyt \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sh_chebyu.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sh_chebyu.rst deleted file mode 100644 index 9cdf6d71b5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sh_chebyu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sh_chebyu -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: sh_chebyu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sh_jacobi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sh_jacobi.rst deleted file mode 100644 index 7c4ffe4333..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sh_jacobi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sh_jacobi -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: sh_jacobi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sh_legendre.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sh_legendre.rst deleted file mode 100644 index 547d6b41ca..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sh_legendre.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sh_legendre -========================= - -.. currentmodule:: scipy.special - -.. autofunction:: sh_legendre \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.shichi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.shichi.rst deleted file mode 100644 index c97c08aeb7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.shichi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.shichi -==================== - -.. currentmodule:: scipy.special - -.. autodata:: shichi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sici.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sici.rst deleted file mode 100644 index 6e822a14fe..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sici.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sici -================== - -.. currentmodule:: scipy.special - -.. autodata:: sici \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sindg.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sindg.rst deleted file mode 100644 index aeb3c7c670..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sindg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sindg -=================== - -.. currentmodule:: scipy.special - -.. autodata:: sindg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.smirnov.rst b/scipy-0.10.1/doc/source/generated/scipy.special.smirnov.rst deleted file mode 100644 index a0f2b2ad5e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.smirnov.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.smirnov -===================== - -.. currentmodule:: scipy.special - -.. autodata:: smirnov \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.smirnovi.rst b/scipy-0.10.1/doc/source/generated/scipy.special.smirnovi.rst deleted file mode 100644 index affbabe9ff..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.smirnovi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.smirnovi -====================== - -.. currentmodule:: scipy.special - -.. autodata:: smirnovi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.spence.rst b/scipy-0.10.1/doc/source/generated/scipy.special.spence.rst deleted file mode 100644 index 19b6ed5c86..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.spence.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.spence -==================== - -.. currentmodule:: scipy.special - -.. autodata:: spence \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sph_harm.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sph_harm.rst deleted file mode 100644 index 2084325c79..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sph_harm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sph_harm -====================== - -.. currentmodule:: scipy.special - -.. autodata:: sph_harm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sph_in.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sph_in.rst deleted file mode 100644 index f748b19dbd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sph_in.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sph_in -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: sph_in \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sph_inkn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sph_inkn.rst deleted file mode 100644 index 7e62787435..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sph_inkn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sph_inkn -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: sph_inkn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sph_jn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sph_jn.rst deleted file mode 100644 index b5d07d5060..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sph_jn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sph_jn -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: sph_jn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sph_jnyn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sph_jnyn.rst deleted file mode 100644 index c694924bf7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sph_jnyn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sph_jnyn -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: sph_jnyn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sph_kn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sph_kn.rst deleted file mode 100644 index 77e1554830..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sph_kn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sph_kn -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: sph_kn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.sph_yn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.sph_yn.rst deleted file mode 100644 index ab451c1087..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.sph_yn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.sph_yn -==================== - -.. currentmodule:: scipy.special - -.. autofunction:: sph_yn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.stdtr.rst b/scipy-0.10.1/doc/source/generated/scipy.special.stdtr.rst deleted file mode 100644 index 78d40468c3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.stdtr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.stdtr -=================== - -.. currentmodule:: scipy.special - -.. autodata:: stdtr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.stdtridf.rst b/scipy-0.10.1/doc/source/generated/scipy.special.stdtridf.rst deleted file mode 100644 index 384cc5b24f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.stdtridf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.stdtridf -====================== - -.. currentmodule:: scipy.special - -.. autodata:: stdtridf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.stdtrit.rst b/scipy-0.10.1/doc/source/generated/scipy.special.stdtrit.rst deleted file mode 100644 index c326693137..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.stdtrit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.stdtrit -===================== - -.. currentmodule:: scipy.special - -.. autodata:: stdtrit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.struve.rst b/scipy-0.10.1/doc/source/generated/scipy.special.struve.rst deleted file mode 100644 index b9032afc00..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.struve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.struve -==================== - -.. currentmodule:: scipy.special - -.. autodata:: struve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.tandg.rst b/scipy-0.10.1/doc/source/generated/scipy.special.tandg.rst deleted file mode 100644 index 1a9ef956bf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.tandg.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.tandg -=================== - -.. currentmodule:: scipy.special - -.. autodata:: tandg \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.tklmbda.rst b/scipy-0.10.1/doc/source/generated/scipy.special.tklmbda.rst deleted file mode 100644 index eedfdc6d6f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.tklmbda.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.tklmbda -===================== - -.. currentmodule:: scipy.special - -.. autodata:: tklmbda \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.wofz.rst b/scipy-0.10.1/doc/source/generated/scipy.special.wofz.rst deleted file mode 100644 index 9d951b6dec..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.wofz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.wofz -================== - -.. currentmodule:: scipy.special - -.. autodata:: wofz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.y0.rst b/scipy-0.10.1/doc/source/generated/scipy.special.y0.rst deleted file mode 100644 index 85a47cb00a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.y0.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.y0 -================ - -.. currentmodule:: scipy.special - -.. autodata:: y0 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.y0_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.y0_zeros.rst deleted file mode 100644 index bd209f3860..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.y0_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.y0_zeros -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: y0_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.y1.rst b/scipy-0.10.1/doc/source/generated/scipy.special.y1.rst deleted file mode 100644 index a7244afac2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.y1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.y1 -================ - -.. currentmodule:: scipy.special - -.. autodata:: y1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.y1_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.y1_zeros.rst deleted file mode 100644 index 7f06aea3c9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.y1_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.y1_zeros -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: y1_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.y1p_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.y1p_zeros.rst deleted file mode 100644 index 83e2b6e0d5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.y1p_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.y1p_zeros -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: y1p_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.yn.rst b/scipy-0.10.1/doc/source/generated/scipy.special.yn.rst deleted file mode 100644 index db93ed9a9c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.yn.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.yn -================ - -.. currentmodule:: scipy.special - -.. autodata:: yn \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.yn_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.yn_zeros.rst deleted file mode 100644 index 46111afc43..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.yn_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.yn_zeros -====================== - -.. currentmodule:: scipy.special - -.. autofunction:: yn_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.ynp_zeros.rst b/scipy-0.10.1/doc/source/generated/scipy.special.ynp_zeros.rst deleted file mode 100644 index 980c2cd473..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.ynp_zeros.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.ynp_zeros -======================= - -.. currentmodule:: scipy.special - -.. autofunction:: ynp_zeros \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.yv.rst b/scipy-0.10.1/doc/source/generated/scipy.special.yv.rst deleted file mode 100644 index 30c4d1d600..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.yv.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.yv -================ - -.. currentmodule:: scipy.special - -.. autodata:: yv \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.yve.rst b/scipy-0.10.1/doc/source/generated/scipy.special.yve.rst deleted file mode 100644 index 137971ff5b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.yve.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.yve -================= - -.. currentmodule:: scipy.special - -.. autodata:: yve \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.yvp.rst b/scipy-0.10.1/doc/source/generated/scipy.special.yvp.rst deleted file mode 100644 index a4dbd83020..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.yvp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.yvp -================= - -.. currentmodule:: scipy.special - -.. autofunction:: yvp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.zeta.rst b/scipy-0.10.1/doc/source/generated/scipy.special.zeta.rst deleted file mode 100644 index 2cce636064..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.zeta.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.zeta -================== - -.. currentmodule:: scipy.special - -.. autodata:: zeta \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.special.zetac.rst b/scipy-0.10.1/doc/source/generated/scipy.special.zetac.rst deleted file mode 100644 index 00415ad906..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.special.zetac.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.special.zetac -=================== - -.. currentmodule:: scipy.special - -.. autodata:: zetac \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.alpha.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.alpha.rst deleted file mode 100644 index 18ce019e88..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.alpha.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.alpha -================= - -.. currentmodule:: scipy.stats - -.. autodata:: alpha \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.anderson.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.anderson.rst deleted file mode 100644 index 12b922a88e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.anderson.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.anderson -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: anderson \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.anglit.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.anglit.rst deleted file mode 100644 index f4a9291fe0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.anglit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.anglit -================== - -.. currentmodule:: scipy.stats - -.. autodata:: anglit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ansari.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ansari.rst deleted file mode 100644 index 2adb687222..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ansari.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ansari -================== - -.. currentmodule:: scipy.stats - -.. autofunction:: ansari \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.arcsine.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.arcsine.rst deleted file mode 100644 index 55306b07ed..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.arcsine.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.arcsine -=================== - -.. currentmodule:: scipy.stats - -.. autodata:: arcsine \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.bartlett.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.bartlett.rst deleted file mode 100644 index ea8226c5f5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.bartlett.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.bartlett -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: bartlett \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.bayes_mvs.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.bayes_mvs.rst deleted file mode 100644 index c7e166e259..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.bayes_mvs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.bayes_mvs -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: bayes_mvs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.bernoulli.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.bernoulli.rst deleted file mode 100644 index 3bafbff97b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.bernoulli.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.bernoulli -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: bernoulli \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.beta.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.beta.rst deleted file mode 100644 index 629463c1b9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.beta.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.beta -================ - -.. currentmodule:: scipy.stats - -.. autodata:: beta \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.betaprime.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.betaprime.rst deleted file mode 100644 index 2800370edb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.betaprime.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.betaprime -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: betaprime \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.binom.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.binom.rst deleted file mode 100644 index 4d2e155351..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.binom.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.binom -================= - -.. currentmodule:: scipy.stats - -.. autodata:: binom \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.binom_test.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.binom_test.rst deleted file mode 100644 index d0ee5c107b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.binom_test.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.binom_test -====================== - -.. currentmodule:: scipy.stats - -.. autofunction:: binom_test \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.boltzmann.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.boltzmann.rst deleted file mode 100644 index f9fb658bc1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.boltzmann.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.boltzmann -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: boltzmann \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.bradford.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.bradford.rst deleted file mode 100644 index 4e896eaf01..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.bradford.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.bradford -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: bradford \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.burr.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.burr.rst deleted file mode 100644 index 5dd1d9400b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.burr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.burr -================ - -.. currentmodule:: scipy.stats - -.. autodata:: burr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.cauchy.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.cauchy.rst deleted file mode 100644 index 421cac77fd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.cauchy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.cauchy -================== - -.. currentmodule:: scipy.stats - -.. autodata:: cauchy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.chi.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.chi.rst deleted file mode 100644 index e249f2c427..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.chi.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.chi -=============== - -.. currentmodule:: scipy.stats - -.. autodata:: chi \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.chi2.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.chi2.rst deleted file mode 100644 index cead3a8573..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.chi2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.chi2 -================ - -.. currentmodule:: scipy.stats - -.. autodata:: chi2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.chi2_contingency.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.chi2_contingency.rst deleted file mode 100644 index c4d386fe3d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.chi2_contingency.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.chi2_contingency -============================ - -.. currentmodule:: scipy.stats - -.. autofunction:: chi2_contingency \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.chisquare.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.chisquare.rst deleted file mode 100644 index 3d01ad5f34..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.chisquare.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.chisquare -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: chisquare \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.cmedian.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.cmedian.rst deleted file mode 100644 index 0747485c10..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.cmedian.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.cmedian -=================== - -.. currentmodule:: scipy.stats - -.. autofunction:: cmedian \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.contingency.expected_freq.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.contingency.expected_freq.rst deleted file mode 100644 index e67924d097..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.contingency.expected_freq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.contingency.expected_freq -===================================== - -.. currentmodule:: scipy.stats.contingency - -.. autofunction:: expected_freq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.contingency.margins.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.contingency.margins.rst deleted file mode 100644 index 56600c1fd3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.contingency.margins.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.contingency.margins -=============================== - -.. currentmodule:: scipy.stats.contingency - -.. autofunction:: margins \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.cosine.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.cosine.rst deleted file mode 100644 index 7bddb0999f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.cosine.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.cosine -================== - -.. currentmodule:: scipy.stats - -.. autodata:: cosine \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.cumfreq.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.cumfreq.rst deleted file mode 100644 index 4b91ef347c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.cumfreq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.cumfreq -=================== - -.. currentmodule:: scipy.stats - -.. autofunction:: cumfreq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.describe.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.describe.rst deleted file mode 100644 index b428d9f960..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.describe.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.describe -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: describe \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.dgamma.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.dgamma.rst deleted file mode 100644 index 07b98cfee4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.dgamma.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.dgamma -================== - -.. currentmodule:: scipy.stats - -.. autodata:: dgamma \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.dlaplace.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.dlaplace.rst deleted file mode 100644 index 2c24e6b74d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.dlaplace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.dlaplace -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: dlaplace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.dweibull.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.dweibull.rst deleted file mode 100644 index 09a024c2b9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.dweibull.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.dweibull -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: dweibull \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.erlang.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.erlang.rst deleted file mode 100644 index 5abbd28512..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.erlang.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.erlang -================== - -.. currentmodule:: scipy.stats - -.. autodata:: erlang \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.expon.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.expon.rst deleted file mode 100644 index d023fb932d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.expon.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.expon -================= - -.. currentmodule:: scipy.stats - -.. autodata:: expon \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.exponpow.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.exponpow.rst deleted file mode 100644 index f04c85b4b3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.exponpow.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.exponpow -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: exponpow \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.exponweib.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.exponweib.rst deleted file mode 100644 index f555701478..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.exponweib.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.exponweib -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: exponweib \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.f.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.f.rst deleted file mode 100644 index b2569e003d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.f.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.f -============= - -.. currentmodule:: scipy.stats - -.. autodata:: f \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.f_oneway.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.f_oneway.rst deleted file mode 100644 index b40d406fa7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.f_oneway.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.f_oneway -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: f_oneway \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.fatiguelife.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.fatiguelife.rst deleted file mode 100644 index 4b7a32fc56..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.fatiguelife.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.fatiguelife -======================= - -.. currentmodule:: scipy.stats - -.. autodata:: fatiguelife \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.fisher_exact.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.fisher_exact.rst deleted file mode 100644 index c3a6d03a56..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.fisher_exact.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.fisher_exact -======================== - -.. currentmodule:: scipy.stats - -.. autofunction:: fisher_exact \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.fisk.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.fisk.rst deleted file mode 100644 index fc13bc6e2b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.fisk.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.fisk -================ - -.. currentmodule:: scipy.stats - -.. autodata:: fisk \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.fligner.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.fligner.rst deleted file mode 100644 index 0bb489db2e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.fligner.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.fligner -=================== - -.. currentmodule:: scipy.stats - -.. autofunction:: fligner \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.foldcauchy.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.foldcauchy.rst deleted file mode 100644 index 1901ee36f1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.foldcauchy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.foldcauchy -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: foldcauchy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.foldnorm.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.foldnorm.rst deleted file mode 100644 index 69cd236b32..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.foldnorm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.foldnorm -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: foldnorm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.frechet_l.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.frechet_l.rst deleted file mode 100644 index fd99cb32aa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.frechet_l.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.frechet_l -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: frechet_l \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.frechet_r.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.frechet_r.rst deleted file mode 100644 index e800f75c3e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.frechet_r.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.frechet_r -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: frechet_r \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.friedmanchisquare.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.friedmanchisquare.rst deleted file mode 100644 index f79c49b1b1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.friedmanchisquare.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.friedmanchisquare -============================= - -.. currentmodule:: scipy.stats - -.. autofunction:: friedmanchisquare \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gamma.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gamma.rst deleted file mode 100644 index 5350d3a3eb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gamma.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gamma -================= - -.. currentmodule:: scipy.stats - -.. autodata:: gamma \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gausshyper.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gausshyper.rst deleted file mode 100644 index f7f116584e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gausshyper.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gausshyper -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: gausshyper \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.__call__.rst deleted file mode 100644 index e4ab4348ce..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.__call__ -================================= - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.covariance_factor.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.covariance_factor.rst deleted file mode 100644 index a78486416c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.covariance_factor.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.covariance_factor -========================================== - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.covariance_factor \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.evaluate.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.evaluate.rst deleted file mode 100644 index 1485ec4763..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.evaluate.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.evaluate -================================= - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.evaluate \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_box.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_box.rst deleted file mode 100644 index 23b8ba5a5a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_box.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.integrate_box -====================================== - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.integrate_box \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_box_1d.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_box_1d.rst deleted file mode 100644 index 99328b0338..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_box_1d.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.integrate_box_1d -========================================= - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.integrate_box_1d \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_gaussian.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_gaussian.rst deleted file mode 100644 index 2e0386b967..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_gaussian.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.integrate_gaussian -=========================================== - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.integrate_gaussian \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_kde.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_kde.rst deleted file mode 100644 index 0f8e54fb14..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.integrate_kde.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.integrate_kde -====================================== - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.integrate_kde \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.resample.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.resample.rst deleted file mode 100644 index ecc3f79056..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.resample.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.resample -================================= - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.resample \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.rst deleted file mode 100644 index dcebbe54b6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.rst +++ /dev/null @@ -1,28 +0,0 @@ -scipy.stats.gaussian_kde -======================== - -.. currentmodule:: scipy.stats - -.. autoclass:: gaussian_kde - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - gaussian_kde.__call__ - gaussian_kde.covariance_factor - gaussian_kde.evaluate - gaussian_kde.integrate_box - gaussian_kde.integrate_box_1d - gaussian_kde.integrate_gaussian - gaussian_kde.integrate_kde - gaussian_kde.resample - gaussian_kde.scotts_factor - gaussian_kde.silverman_factor - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.scotts_factor.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.scotts_factor.rst deleted file mode 100644 index e287cb6676..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.scotts_factor.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.scotts_factor -====================================== - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.scotts_factor \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.silverman_factor.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.silverman_factor.rst deleted file mode 100644 index 35d583d50a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gaussian_kde.silverman_factor.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gaussian_kde.silverman_factor -========================================= - -.. currentmodule:: scipy.stats - -.. automethod:: gaussian_kde.silverman_factor \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.genexpon.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.genexpon.rst deleted file mode 100644 index 1da6681de5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.genexpon.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.genexpon -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: genexpon \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.genextreme.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.genextreme.rst deleted file mode 100644 index 926859b456..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.genextreme.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.genextreme -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: genextreme \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gengamma.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gengamma.rst deleted file mode 100644 index db25669277..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gengamma.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gengamma -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: gengamma \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.genhalflogistic.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.genhalflogistic.rst deleted file mode 100644 index 0cf861726e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.genhalflogistic.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.genhalflogistic -=========================== - -.. currentmodule:: scipy.stats - -.. autodata:: genhalflogistic \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.genlogistic.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.genlogistic.rst deleted file mode 100644 index 5479cd0390..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.genlogistic.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.genlogistic -======================= - -.. currentmodule:: scipy.stats - -.. autodata:: genlogistic \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.genpareto.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.genpareto.rst deleted file mode 100644 index 19435acc80..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.genpareto.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.genpareto -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: genpareto \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.geom.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.geom.rst deleted file mode 100644 index cf417a91a6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.geom.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.geom -================ - -.. currentmodule:: scipy.stats - -.. autodata:: geom \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gilbrat.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gilbrat.rst deleted file mode 100644 index b9a25ed3bb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gilbrat.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gilbrat -=================== - -.. currentmodule:: scipy.stats - -.. autodata:: gilbrat \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.glm.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.glm.rst deleted file mode 100644 index 3a426b0d01..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.glm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.glm -=============== - -.. currentmodule:: scipy.stats - -.. autofunction:: glm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gmean.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gmean.rst deleted file mode 100644 index 536eadd42a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gmean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gmean -================= - -.. currentmodule:: scipy.stats - -.. autofunction:: gmean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gompertz.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gompertz.rst deleted file mode 100644 index 5e28b6035a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gompertz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gompertz -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: gompertz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gumbel_l.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gumbel_l.rst deleted file mode 100644 index 13dfbc6143..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gumbel_l.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gumbel_l -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: gumbel_l \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.gumbel_r.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.gumbel_r.rst deleted file mode 100644 index 6675f1c3ef..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.gumbel_r.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.gumbel_r -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: gumbel_r \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.halfcauchy.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.halfcauchy.rst deleted file mode 100644 index 31911fcecd..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.halfcauchy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.halfcauchy -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: halfcauchy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.halflogistic.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.halflogistic.rst deleted file mode 100644 index 33f9023366..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.halflogistic.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.halflogistic -======================== - -.. currentmodule:: scipy.stats - -.. autodata:: halflogistic \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.halfnorm.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.halfnorm.rst deleted file mode 100644 index 0c84d49179..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.halfnorm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.halfnorm -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: halfnorm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.histogram.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.histogram.rst deleted file mode 100644 index 9f135ac5b6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.histogram.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.histogram -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: histogram \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.histogram2.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.histogram2.rst deleted file mode 100644 index 3dfc4a9d2a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.histogram2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.histogram2 -====================== - -.. currentmodule:: scipy.stats - -.. autofunction:: histogram2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.hmean.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.hmean.rst deleted file mode 100644 index 15378620b1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.hmean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.hmean -================= - -.. currentmodule:: scipy.stats - -.. autofunction:: hmean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.hypergeom.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.hypergeom.rst deleted file mode 100644 index ffde775c5e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.hypergeom.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.hypergeom -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: hypergeom \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.hypsecant.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.hypsecant.rst deleted file mode 100644 index 1074285fe3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.hypsecant.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.hypsecant -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: hypsecant \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.invgamma.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.invgamma.rst deleted file mode 100644 index 9b12acacc2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.invgamma.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.invgamma -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: invgamma \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.invgauss.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.invgauss.rst deleted file mode 100644 index b71052eb64..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.invgauss.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.invgauss -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: invgauss \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.invweibull.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.invweibull.rst deleted file mode 100644 index 0c530027dc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.invweibull.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.invweibull -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: invweibull \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.itemfreq.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.itemfreq.rst deleted file mode 100644 index 791f1b7040..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.itemfreq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.itemfreq -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: itemfreq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.johnsonsb.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.johnsonsb.rst deleted file mode 100644 index c283131465..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.johnsonsb.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.johnsonsb -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: johnsonsb \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.johnsonsu.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.johnsonsu.rst deleted file mode 100644 index db3434e5c7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.johnsonsu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.johnsonsu -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: johnsonsu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.kendalltau.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.kendalltau.rst deleted file mode 100644 index 5e37c9f858..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.kendalltau.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.kendalltau -====================== - -.. currentmodule:: scipy.stats - -.. autofunction:: kendalltau \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.kruskal.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.kruskal.rst deleted file mode 100644 index d5f88df099..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.kruskal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.kruskal -=================== - -.. currentmodule:: scipy.stats - -.. autofunction:: kruskal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ks_2samp.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ks_2samp.rst deleted file mode 100644 index 3592e52f16..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ks_2samp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ks_2samp -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: ks_2samp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ksone.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ksone.rst deleted file mode 100644 index 67e18267ef..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ksone.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ksone -================= - -.. currentmodule:: scipy.stats - -.. autodata:: ksone \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.kstest.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.kstest.rst deleted file mode 100644 index 5df532cd32..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.kstest.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.kstest -================== - -.. currentmodule:: scipy.stats - -.. autofunction:: kstest \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.kstwobign.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.kstwobign.rst deleted file mode 100644 index 51cd0a341c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.kstwobign.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.kstwobign -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: kstwobign \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.kurtosis.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.kurtosis.rst deleted file mode 100644 index 296bade8ab..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.kurtosis.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.kurtosis -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: kurtosis \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.kurtosistest.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.kurtosistest.rst deleted file mode 100644 index 9c6095359f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.kurtosistest.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.kurtosistest -======================== - -.. currentmodule:: scipy.stats - -.. autofunction:: kurtosistest \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.laplace.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.laplace.rst deleted file mode 100644 index 48586faa22..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.laplace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.laplace -=================== - -.. currentmodule:: scipy.stats - -.. autodata:: laplace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.levene.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.levene.rst deleted file mode 100644 index a5904c7f9e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.levene.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.levene -================== - -.. currentmodule:: scipy.stats - -.. autofunction:: levene \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.linregress.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.linregress.rst deleted file mode 100644 index 2e48737b75..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.linregress.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.linregress -====================== - -.. currentmodule:: scipy.stats - -.. autofunction:: linregress \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.loggamma.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.loggamma.rst deleted file mode 100644 index 4c6d09f345..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.loggamma.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.loggamma -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: loggamma \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.logistic.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.logistic.rst deleted file mode 100644 index 0b6236b2d1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.logistic.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.logistic -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: logistic \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.loglaplace.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.loglaplace.rst deleted file mode 100644 index 6962cfa0a5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.loglaplace.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.loglaplace -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: loglaplace \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.lognorm.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.lognorm.rst deleted file mode 100644 index f6c40abcb0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.lognorm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.lognorm -=================== - -.. currentmodule:: scipy.stats - -.. autodata:: lognorm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.logser.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.logser.rst deleted file mode 100644 index 200becc057..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.logser.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.logser -================== - -.. currentmodule:: scipy.stats - -.. autodata:: logser \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.lomax.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.lomax.rst deleted file mode 100644 index d4e559676f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.lomax.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.lomax -================= - -.. currentmodule:: scipy.stats - -.. autodata:: lomax \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mannwhitneyu.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mannwhitneyu.rst deleted file mode 100644 index 171661856e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mannwhitneyu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mannwhitneyu -======================== - -.. currentmodule:: scipy.stats - -.. autofunction:: mannwhitneyu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.maxwell.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.maxwell.rst deleted file mode 100644 index ac930ccc64..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.maxwell.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.maxwell -=================== - -.. currentmodule:: scipy.stats - -.. autodata:: maxwell \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mielke.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mielke.rst deleted file mode 100644 index cd1daf2c03..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mielke.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mielke -================== - -.. currentmodule:: scipy.stats - -.. autodata:: mielke \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mode.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mode.rst deleted file mode 100644 index fbcd0f42ba..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mode.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mode -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: mode \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.moment.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.moment.rst deleted file mode 100644 index baa639afad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.moment.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.moment -================== - -.. currentmodule:: scipy.stats - -.. autofunction:: moment \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mood.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mood.rst deleted file mode 100644 index e3b246fd59..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mood.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mood -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: mood \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.argstoarray.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.argstoarray.rst deleted file mode 100644 index f05b74a609..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.argstoarray.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.argstoarray -============================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: argstoarray \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.betai.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.betai.rst deleted file mode 100644 index 8db0e9d990..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.betai.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.betai -======================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: betai \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.chisquare.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.chisquare.rst deleted file mode 100644 index efdfb4da96..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.chisquare.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.chisquare -============================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: chisquare \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.count_tied_groups.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.count_tied_groups.rst deleted file mode 100644 index e9a1c874b1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.count_tied_groups.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.count_tied_groups -==================================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: count_tied_groups \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.describe.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.describe.rst deleted file mode 100644 index 855d25d8b3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.describe.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.describe -=========================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: describe \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.f_oneway.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.f_oneway.rst deleted file mode 100644 index 63ea27696d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.f_oneway.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.f_oneway -=========================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: f_oneway \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.f_value_wilks_lambda.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.f_value_wilks_lambda.rst deleted file mode 100644 index 7b1ee4281f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.f_value_wilks_lambda.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.f_value_wilks_lambda -======================================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: f_value_wilks_lambda \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.find_repeats.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.find_repeats.rst deleted file mode 100644 index 76c9ee8ba3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.find_repeats.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.find_repeats -=============================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: find_repeats \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.friedmanchisquare.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.friedmanchisquare.rst deleted file mode 100644 index e6e2c29c8c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.friedmanchisquare.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.friedmanchisquare -==================================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: friedmanchisquare \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.gmean.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.gmean.rst deleted file mode 100644 index 49501fec74..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.gmean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.gmean -======================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: gmean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.hmean.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.hmean.rst deleted file mode 100644 index 3c4f7a7e66..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.hmean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.hmean -======================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: hmean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kendalltau.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kendalltau.rst deleted file mode 100644 index 52a822ad44..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kendalltau.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.kendalltau -============================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: kendalltau \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kendalltau_seasonal.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kendalltau_seasonal.rst deleted file mode 100644 index 72cf8a9b3a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kendalltau_seasonal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.kendalltau_seasonal -====================================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: kendalltau_seasonal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kruskalwallis.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kruskalwallis.rst deleted file mode 100644 index 3388b5bfd8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kruskalwallis.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.kruskalwallis -================================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: kruskalwallis \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ks_twosamp.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ks_twosamp.rst deleted file mode 100644 index 2841ae627a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ks_twosamp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.ks_twosamp -============================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: ks_twosamp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kurtosis.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kurtosis.rst deleted file mode 100644 index 5aa18ce03b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kurtosis.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.kurtosis -=========================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: kurtosis \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kurtosistest.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kurtosistest.rst deleted file mode 100644 index f8640b71c3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.kurtosistest.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.kurtosistest -=============================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: kurtosistest \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.linregress.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.linregress.rst deleted file mode 100644 index fdc3071e8d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.linregress.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.linregress -============================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: linregress \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mannwhitneyu.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mannwhitneyu.rst deleted file mode 100644 index 95983d7330..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mannwhitneyu.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.mannwhitneyu -=============================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: mannwhitneyu \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mode.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mode.rst deleted file mode 100644 index 5a26ecf28c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mode.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.mode -======================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: mode \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.moment.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.moment.rst deleted file mode 100644 index 17863fb6f8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.moment.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.moment -========================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: moment \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mquantiles.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mquantiles.rst deleted file mode 100644 index 65710024c6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.mquantiles.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.mquantiles -============================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: mquantiles \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.msign.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.msign.rst deleted file mode 100644 index d0a66e3c14..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.msign.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.msign -======================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: msign \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.normaltest.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.normaltest.rst deleted file mode 100644 index 84812a5d2c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.normaltest.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.normaltest -============================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: normaltest \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.obrientransform.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.obrientransform.rst deleted file mode 100644 index d4ab312a0a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.obrientransform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.obrientransform -================================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: obrientransform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.pearsonr.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.pearsonr.rst deleted file mode 100644 index 1b0c9f525c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.pearsonr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.pearsonr -=========================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: pearsonr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.plotting_positions.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.plotting_positions.rst deleted file mode 100644 index 13defdad13..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.plotting_positions.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.plotting_positions -===================================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: plotting_positions \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.pointbiserialr.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.pointbiserialr.rst deleted file mode 100644 index 5387272aa8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.pointbiserialr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.pointbiserialr -================================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: pointbiserialr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.rankdata.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.rankdata.rst deleted file mode 100644 index 1ca2752bcb..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.rankdata.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.rankdata -=========================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: rankdata \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.scoreatpercentile.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.scoreatpercentile.rst deleted file mode 100644 index def6403eac..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.scoreatpercentile.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.scoreatpercentile -==================================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: scoreatpercentile \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.sem.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.sem.rst deleted file mode 100644 index cea9cfa9bf..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.sem.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.sem -====================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: sem \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.signaltonoise.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.signaltonoise.rst deleted file mode 100644 index fdf742c91d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.signaltonoise.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.signaltonoise -================================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: signaltonoise \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.skew.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.skew.rst deleted file mode 100644 index 556c441c82..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.skew.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.skew -======================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: skew \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.skewtest.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.skewtest.rst deleted file mode 100644 index 6f5a491de5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.skewtest.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.skewtest -=========================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: skewtest \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.spearmanr.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.spearmanr.rst deleted file mode 100644 index 7607185657..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.spearmanr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.spearmanr -============================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: spearmanr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.theilslopes.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.theilslopes.rst deleted file mode 100644 index 34a7d3d716..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.theilslopes.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.theilslopes -============================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: theilslopes \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.threshold.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.threshold.rst deleted file mode 100644 index 6252bfff93..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.threshold.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.threshold -============================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: threshold \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmax.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmax.rst deleted file mode 100644 index 17e34e110b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmax.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.tmax -======================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: tmax \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmean.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmean.rst deleted file mode 100644 index aa3876915e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.tmean -======================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: tmean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmin.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmin.rst deleted file mode 100644 index d69bb742c7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tmin.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.tmin -======================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: tmin \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trim.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trim.rst deleted file mode 100644 index c6895d9708..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trim.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.trim -======================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: trim \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trima.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trima.rst deleted file mode 100644 index 69bd785e80..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trima.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.trima -======================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: trima \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimboth.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimboth.rst deleted file mode 100644 index 3921fa8faa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimboth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.trimboth -=========================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: trimboth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimmed_stde.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimmed_stde.rst deleted file mode 100644 index cfeb8d4400..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimmed_stde.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.trimmed_stde -=============================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: trimmed_stde \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimr.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimr.rst deleted file mode 100644 index 37daf10ad8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.trimr -======================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: trimr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimtail.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimtail.rst deleted file mode 100644 index 18fbe16f16..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.trimtail.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.trimtail -=========================== - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: trimtail \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tsem.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tsem.rst deleted file mode 100644 index d678d6938c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tsem.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.tsem -======================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: tsem \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_ind.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_ind.rst deleted file mode 100644 index b92c0cfd23..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_ind.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.ttest_ind -============================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: ttest_ind \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_onesamp.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_onesamp.rst deleted file mode 100644 index a2531053d9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_onesamp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.ttest_onesamp -================================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: ttest_onesamp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_rel.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_rel.rst deleted file mode 100644 index d0def7852d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.ttest_rel.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.ttest_rel -============================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: ttest_rel \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tvar.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tvar.rst deleted file mode 100644 index 78f4835c94..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.tvar.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.tvar -======================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: tvar \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.variation.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.variation.rst deleted file mode 100644 index f2e2fc02f4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.variation.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.variation -============================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: variation \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.winsorize.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.winsorize.rst deleted file mode 100644 index f4b08f18f5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.winsorize.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.winsorize -============================ - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: winsorize \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.zmap.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.zmap.rst deleted file mode 100644 index 3e9ba66bf7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.zmap.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.zmap -======================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: zmap \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.zscore.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.zscore.rst deleted file mode 100644 index 3ecf85814b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.mstats.zscore.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.mstats.zscore -========================= - -.. currentmodule:: scipy.stats.mstats - -.. autofunction:: zscore \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.nakagami.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.nakagami.rst deleted file mode 100644 index 6ca759c448..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.nakagami.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.nakagami -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: nakagami \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.nbinom.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.nbinom.rst deleted file mode 100644 index 8e143629a7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.nbinom.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.nbinom -================== - -.. currentmodule:: scipy.stats - -.. autodata:: nbinom \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ncf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ncf.rst deleted file mode 100644 index 6026255a5f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ncf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ncf -=============== - -.. currentmodule:: scipy.stats - -.. autodata:: ncf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.nct.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.nct.rst deleted file mode 100644 index eb059fd55c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.nct.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.nct -=============== - -.. currentmodule:: scipy.stats - -.. autodata:: nct \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ncx2.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ncx2.rst deleted file mode 100644 index dcef0f4729..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ncx2.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ncx2 -================ - -.. currentmodule:: scipy.stats - -.. autodata:: ncx2 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.norm.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.norm.rst deleted file mode 100644 index 130c69a412..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.norm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.norm -================ - -.. currentmodule:: scipy.stats - -.. autodata:: norm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.normaltest.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.normaltest.rst deleted file mode 100644 index fa71676a5a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.normaltest.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.normaltest -====================== - -.. currentmodule:: scipy.stats - -.. autofunction:: normaltest \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.obrientransform.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.obrientransform.rst deleted file mode 100644 index dc899c3042..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.obrientransform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.obrientransform -=========================== - -.. currentmodule:: scipy.stats - -.. autofunction:: obrientransform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.oneway.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.oneway.rst deleted file mode 100644 index c2618d3312..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.oneway.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.oneway -================== - -.. currentmodule:: scipy.stats - -.. autofunction:: oneway \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.pareto.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.pareto.rst deleted file mode 100644 index 510cc15f89..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.pareto.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.pareto -================== - -.. currentmodule:: scipy.stats - -.. autodata:: pareto \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.pearsonr.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.pearsonr.rst deleted file mode 100644 index 8ce80e464a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.pearsonr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.pearsonr -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: pearsonr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.percentileofscore.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.percentileofscore.rst deleted file mode 100644 index 2d9339c79e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.percentileofscore.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.percentileofscore -============================= - -.. currentmodule:: scipy.stats - -.. autofunction:: percentileofscore \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.planck.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.planck.rst deleted file mode 100644 index e00eeb6168..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.planck.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.planck -================== - -.. currentmodule:: scipy.stats - -.. autodata:: planck \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.pointbiserialr.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.pointbiserialr.rst deleted file mode 100644 index a76b67911a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.pointbiserialr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.pointbiserialr -========================== - -.. currentmodule:: scipy.stats - -.. autofunction:: pointbiserialr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.poisson.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.poisson.rst deleted file mode 100644 index ddea0bcb0c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.poisson.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.poisson -=================== - -.. currentmodule:: scipy.stats - -.. autodata:: poisson \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.powerlaw.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.powerlaw.rst deleted file mode 100644 index 41d3a3207c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.powerlaw.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.powerlaw -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: powerlaw \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.powerlognorm.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.powerlognorm.rst deleted file mode 100644 index cd7ca9237d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.powerlognorm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.powerlognorm -======================== - -.. currentmodule:: scipy.stats - -.. autodata:: powerlognorm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.powernorm.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.powernorm.rst deleted file mode 100644 index ea8661fc09..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.powernorm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.powernorm -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: powernorm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ppcc_max.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ppcc_max.rst deleted file mode 100644 index a4ff9405e0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ppcc_max.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ppcc_max -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: ppcc_max \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ppcc_plot.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ppcc_plot.rst deleted file mode 100644 index 759cf33d7c..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ppcc_plot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ppcc_plot -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: ppcc_plot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.probplot.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.probplot.rst deleted file mode 100644 index f923c68ea4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.probplot.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.probplot -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: probplot \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.randint.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.randint.rst deleted file mode 100644 index 99bbbe1828..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.randint.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.randint -=================== - -.. currentmodule:: scipy.stats - -.. autodata:: randint \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ranksums.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ranksums.rst deleted file mode 100644 index 495049818e..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ranksums.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ranksums -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: ranksums \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rayleigh.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rayleigh.rst deleted file mode 100644 index df0f6e3c33..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rayleigh.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rayleigh -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: rayleigh \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rdist.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rdist.rst deleted file mode 100644 index 8bca6dd0f5..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rdist.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rdist -================= - -.. currentmodule:: scipy.stats - -.. autodata:: rdist \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.recipinvgauss.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.recipinvgauss.rst deleted file mode 100644 index f36a3b084b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.recipinvgauss.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.recipinvgauss -========================= - -.. currentmodule:: scipy.stats - -.. autodata:: recipinvgauss \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.reciprocal.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.reciprocal.rst deleted file mode 100644 index 3b1b2ab1ad..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.reciprocal.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.reciprocal -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: reciprocal \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.relfreq.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.relfreq.rst deleted file mode 100644 index 08bc4615f9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.relfreq.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.relfreq -=================== - -.. currentmodule:: scipy.stats - -.. autofunction:: relfreq \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rice.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rice.rst deleted file mode 100644 index 25b53f968b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rice.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rice -================ - -.. currentmodule:: scipy.stats - -.. autodata:: rice \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.__call__.rst deleted file mode 100644 index 317ffbf7b1..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.__call__ -================================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.cdf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.cdf.rst deleted file mode 100644 index 1d8e1acdef..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.cdf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.cdf -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.cdf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.entropy.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.entropy.rst deleted file mode 100644 index 98f0864f27..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.entropy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.entropy -================================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.entropy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.est_loc_scale.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.est_loc_scale.rst deleted file mode 100644 index 0214fda21f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.est_loc_scale.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.est_loc_scale -======================================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.est_loc_scale \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.expect.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.expect.rst deleted file mode 100644 index 31a3a9c4e0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.expect.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.expect -================================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.expect \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.fit.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.fit.rst deleted file mode 100644 index 4e202d12fa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.fit.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.fit -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.fit \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.fit_loc_scale.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.fit_loc_scale.rst deleted file mode 100644 index ed01b1fe07..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.fit_loc_scale.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.fit_loc_scale -======================================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.fit_loc_scale \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.freeze.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.freeze.rst deleted file mode 100644 index 1b8c295c81..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.freeze.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.freeze -================================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.freeze \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.interval.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.interval.rst deleted file mode 100644 index d5ed413820..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.interval.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.interval -================================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.interval \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.isf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.isf.rst deleted file mode 100644 index ed3b7a5429..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.isf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.isf -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.isf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logcdf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logcdf.rst deleted file mode 100644 index dda77d3a19..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logcdf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.logcdf -================================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.logcdf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logpdf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logpdf.rst deleted file mode 100644 index b873613490..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logpdf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.logpdf -================================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.logpdf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logsf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logsf.rst deleted file mode 100644 index 433905b92d..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.logsf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.logsf -=============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.logsf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.mean.rst deleted file mode 100644 index a3b4cbf4f2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.mean -============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.median.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.median.rst deleted file mode 100644 index 6849de28a2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.median.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.median -================================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.median \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.moment.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.moment.rst deleted file mode 100644 index e4f6e59ae4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.moment.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.moment -================================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.moment \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.nnlf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.nnlf.rst deleted file mode 100644 index 3233c18fa6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.nnlf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.nnlf -============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.nnlf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.pdf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.pdf.rst deleted file mode 100644 index f61b805da9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.pdf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.pdf -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.pdf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.ppf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.ppf.rst deleted file mode 100644 index ab957c167a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.ppf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.ppf -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.ppf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.rst deleted file mode 100644 index 240411163f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.rst +++ /dev/null @@ -1,42 +0,0 @@ -scipy.stats.rv_continuous -========================= - -.. currentmodule:: scipy.stats - -.. autoclass:: rv_continuous - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - rv_continuous.__call__ - rv_continuous.cdf - rv_continuous.entropy - rv_continuous.est_loc_scale - rv_continuous.expect - rv_continuous.fit - rv_continuous.fit_loc_scale - rv_continuous.freeze - rv_continuous.interval - rv_continuous.isf - rv_continuous.logcdf - rv_continuous.logpdf - rv_continuous.logsf - rv_continuous.mean - rv_continuous.median - rv_continuous.moment - rv_continuous.nnlf - rv_continuous.pdf - rv_continuous.ppf - rv_continuous.rvs - rv_continuous.sf - rv_continuous.stats - rv_continuous.std - rv_continuous.var - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.rvs.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.rvs.rst deleted file mode 100644 index 433136fe23..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.rvs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.rvs -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.rvs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.sf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.sf.rst deleted file mode 100644 index ffa28dde48..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.sf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.sf -============================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.sf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.stats.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.stats.rst deleted file mode 100644 index e6d731c049..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.stats.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.stats -=============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.stats \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.std.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.std.rst deleted file mode 100644 index 3f1bf65772..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.std.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.std -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.std \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.var.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.var.rst deleted file mode 100644 index 86bc548171..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_continuous.var.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_continuous.var -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_continuous.var \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.__call__.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.__call__.rst deleted file mode 100644 index c219e412d9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.__call__.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.__call__ -================================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.__call__ \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.cdf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.cdf.rst deleted file mode 100644 index 9fa8f12690..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.cdf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.cdf -=========================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.cdf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.entropy.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.entropy.rst deleted file mode 100644 index bb0b019951..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.entropy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.entropy -=============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.entropy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.expect.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.expect.rst deleted file mode 100644 index 6c937627d7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.expect.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.expect -============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.expect \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.freeze.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.freeze.rst deleted file mode 100644 index f204fa9bc6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.freeze.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.freeze -============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.freeze \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.interval.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.interval.rst deleted file mode 100644 index 17399f8561..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.interval.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.interval -================================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.interval \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.isf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.isf.rst deleted file mode 100644 index 07ccd7e7ab..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.isf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.isf -=========================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.isf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logcdf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logcdf.rst deleted file mode 100644 index 20f5083013..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logcdf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.logcdf -============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.logcdf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logpmf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logpmf.rst deleted file mode 100644 index ebc45dece7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logpmf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.logpmf -============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.logpmf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logsf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logsf.rst deleted file mode 100644 index 4cfea1589a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.logsf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.logsf -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.logsf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.mean.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.mean.rst deleted file mode 100644 index 8f3df9d679..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.mean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.mean -============================ - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.mean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.median.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.median.rst deleted file mode 100644 index a835c862d6..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.median.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.median -============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.median \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.moment.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.moment.rst deleted file mode 100644 index 5dd6451313..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.moment.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.moment -============================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.moment \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.pmf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.pmf.rst deleted file mode 100644 index a406786f89..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.pmf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.pmf -=========================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.pmf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.ppf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.ppf.rst deleted file mode 100644 index cad335a600..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.ppf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.ppf -=========================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.ppf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.rst deleted file mode 100644 index 2616761a09..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.rst +++ /dev/null @@ -1,38 +0,0 @@ -scipy.stats.rv_discrete -======================= - -.. currentmodule:: scipy.stats - -.. autoclass:: rv_discrete - - - - .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. - .. autosummary:: - :toctree: - - rv_discrete.__call__ - rv_discrete.cdf - rv_discrete.entropy - rv_discrete.expect - rv_discrete.freeze - rv_discrete.interval - rv_discrete.isf - rv_discrete.logcdf - rv_discrete.logpmf - rv_discrete.logsf - rv_discrete.mean - rv_discrete.median - rv_discrete.moment - rv_discrete.pmf - rv_discrete.ppf - rv_discrete.rvs - rv_discrete.sf - rv_discrete.stats - rv_discrete.std - rv_discrete.var - - - - - diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.rvs.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.rvs.rst deleted file mode 100644 index f5839b5303..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.rvs.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.rvs -=========================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.rvs \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.sf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.sf.rst deleted file mode 100644 index 91c48c7c22..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.sf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.sf -========================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.sf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.stats.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.stats.rst deleted file mode 100644 index b74dcc56db..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.stats.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.stats -============================= - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.stats \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.std.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.std.rst deleted file mode 100644 index 6d1f83cd26..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.std.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.std -=========================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.std \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.var.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.var.rst deleted file mode 100644 index 914ef7a28b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.rv_discrete.var.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.rv_discrete.var -=========================== - -.. currentmodule:: scipy.stats - -.. automethod:: rv_discrete.var \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.scoreatpercentile.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.scoreatpercentile.rst deleted file mode 100644 index c324eb7183..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.scoreatpercentile.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.scoreatpercentile -============================= - -.. currentmodule:: scipy.stats - -.. autofunction:: scoreatpercentile \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.sem.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.sem.rst deleted file mode 100644 index d2093746a3..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.sem.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.sem -=============== - -.. currentmodule:: scipy.stats - -.. autofunction:: sem \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.semicircular.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.semicircular.rst deleted file mode 100644 index ad252d8a72..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.semicircular.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.semicircular -======================== - -.. currentmodule:: scipy.stats - -.. autodata:: semicircular \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.shapiro.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.shapiro.rst deleted file mode 100644 index cf0c09736f..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.shapiro.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.shapiro -=================== - -.. currentmodule:: scipy.stats - -.. autofunction:: shapiro \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.signaltonoise.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.signaltonoise.rst deleted file mode 100644 index 1055c4e870..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.signaltonoise.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.signaltonoise -========================= - -.. currentmodule:: scipy.stats - -.. autofunction:: signaltonoise \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.skew.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.skew.rst deleted file mode 100644 index 113fe23329..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.skew.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.skew -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: skew \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.skewtest.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.skewtest.rst deleted file mode 100644 index 6e557b0e94..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.skewtest.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.skewtest -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: skewtest \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.spearmanr.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.spearmanr.rst deleted file mode 100644 index 165caa6d11..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.spearmanr.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.spearmanr -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: spearmanr \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.t.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.t.rst deleted file mode 100644 index e77f8c77f9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.t.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.t -============= - -.. currentmodule:: scipy.stats - -.. autodata:: t \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.threshold.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.threshold.rst deleted file mode 100644 index 238ea6b200..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.threshold.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.threshold -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: threshold \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.tiecorrect.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.tiecorrect.rst deleted file mode 100644 index 0bf5e70214..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.tiecorrect.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.tiecorrect -====================== - -.. currentmodule:: scipy.stats - -.. autofunction:: tiecorrect \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.tmax.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.tmax.rst deleted file mode 100644 index 153e538e4a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.tmax.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.tmax -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: tmax \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.tmean.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.tmean.rst deleted file mode 100644 index 98df2a926b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.tmean.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.tmean -================= - -.. currentmodule:: scipy.stats - -.. autofunction:: tmean \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.tmin.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.tmin.rst deleted file mode 100644 index 2270ae8e8a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.tmin.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.tmin -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: tmin \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.triang.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.triang.rst deleted file mode 100644 index 736b8674ef..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.triang.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.triang -================== - -.. currentmodule:: scipy.stats - -.. autodata:: triang \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.trim1.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.trim1.rst deleted file mode 100644 index 396a79d5b4..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.trim1.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.trim1 -================= - -.. currentmodule:: scipy.stats - -.. autofunction:: trim1 \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.trimboth.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.trimboth.rst deleted file mode 100644 index afae5bc3ba..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.trimboth.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.trimboth -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: trimboth \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.truncexpon.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.truncexpon.rst deleted file mode 100644 index d1c3b186a8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.truncexpon.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.truncexpon -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: truncexpon \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.truncnorm.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.truncnorm.rst deleted file mode 100644 index cb590ae3ef..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.truncnorm.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.truncnorm -===================== - -.. currentmodule:: scipy.stats - -.. autodata:: truncnorm \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.tsem.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.tsem.rst deleted file mode 100644 index 04f762ade9..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.tsem.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.tsem -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: tsem \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.tstd.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.tstd.rst deleted file mode 100644 index b98939eef2..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.tstd.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.tstd -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: tstd \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_1samp.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_1samp.rst deleted file mode 100644 index 6597774b63..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_1samp.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ttest_1samp -======================= - -.. currentmodule:: scipy.stats - -.. autofunction:: ttest_1samp \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_ind.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_ind.rst deleted file mode 100644 index 8e6f6eee31..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_ind.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ttest_ind -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: ttest_ind \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_rel.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_rel.rst deleted file mode 100644 index f22f26bf6b..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.ttest_rel.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.ttest_rel -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: ttest_rel \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.tukeylambda.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.tukeylambda.rst deleted file mode 100644 index 86ff7d5fcc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.tukeylambda.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.tukeylambda -======================= - -.. currentmodule:: scipy.stats - -.. autodata:: tukeylambda \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.tvar.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.tvar.rst deleted file mode 100644 index 63144d5681..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.tvar.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.tvar -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: tvar \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.uniform.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.uniform.rst deleted file mode 100644 index ddcb8c836a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.uniform.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.uniform -=================== - -.. currentmodule:: scipy.stats - -.. autodata:: uniform \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.variation.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.variation.rst deleted file mode 100644 index 9ca680d8aa..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.variation.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.variation -===================== - -.. currentmodule:: scipy.stats - -.. autofunction:: variation \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.vonmises.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.vonmises.rst deleted file mode 100644 index 878ab4d6e0..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.vonmises.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.vonmises -==================== - -.. currentmodule:: scipy.stats - -.. autodata:: vonmises \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.wald.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.wald.rst deleted file mode 100644 index e59fc485cc..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.wald.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.wald -================ - -.. currentmodule:: scipy.stats - -.. autodata:: wald \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.weibull_max.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.weibull_max.rst deleted file mode 100644 index 32af390902..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.weibull_max.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.weibull_max -======================= - -.. currentmodule:: scipy.stats - -.. autodata:: weibull_max \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.weibull_min.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.weibull_min.rst deleted file mode 100644 index 6032447945..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.weibull_min.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.weibull_min -======================= - -.. currentmodule:: scipy.stats - -.. autodata:: weibull_min \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.wilcoxon.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.wilcoxon.rst deleted file mode 100644 index 21ab112122..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.wilcoxon.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.wilcoxon -==================== - -.. currentmodule:: scipy.stats - -.. autofunction:: wilcoxon \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.wrapcauchy.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.wrapcauchy.rst deleted file mode 100644 index fb2ac0c2e7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.wrapcauchy.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.wrapcauchy -====================== - -.. currentmodule:: scipy.stats - -.. autodata:: wrapcauchy \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.zipf.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.zipf.rst deleted file mode 100644 index 881860c4a7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.zipf.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.zipf -================ - -.. currentmodule:: scipy.stats - -.. autodata:: zipf \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.zmap.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.zmap.rst deleted file mode 100644 index fe3b26be74..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.zmap.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.zmap -================ - -.. currentmodule:: scipy.stats - -.. autofunction:: zmap \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.stats.zscore.rst b/scipy-0.10.1/doc/source/generated/scipy.stats.zscore.rst deleted file mode 100644 index b5dbc63c76..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.stats.zscore.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.stats.zscore -================== - -.. currentmodule:: scipy.stats - -.. autofunction:: zscore \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.weave.blitz.rst b/scipy-0.10.1/doc/source/generated/scipy.weave.blitz.rst deleted file mode 100644 index 41ffd88cb8..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.weave.blitz.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.weave.blitz -================= - -.. currentmodule:: scipy.weave - -.. autofunction:: blitz \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.weave.ext_tools.rst b/scipy-0.10.1/doc/source/generated/scipy.weave.ext_tools.rst deleted file mode 100644 index cb44d7506a..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.weave.ext_tools.rst +++ /dev/null @@ -1,35 +0,0 @@ -scipy.weave.ext_tools -===================== - -.. automodule:: scipy.weave.ext_tools - - - - .. rubric:: Functions - - .. autosummary:: - - assign_variable_types - downcast - format_error_msg - generate_file_name - generate_module - indent - - - - - - .. rubric:: Classes - - .. autosummary:: - - ext_function - ext_function_from_specs - ext_module - - - - - - \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/generated/scipy.weave.inline.rst b/scipy-0.10.1/doc/source/generated/scipy.weave.inline.rst deleted file mode 100644 index 3e53730bc7..0000000000 --- a/scipy-0.10.1/doc/source/generated/scipy.weave.inline.rst +++ /dev/null @@ -1,6 +0,0 @@ -scipy.weave.inline -================== - -.. currentmodule:: scipy.weave - -.. autofunction:: inline \ No newline at end of file diff --git a/scipy-0.10.1/doc/source/index.rst b/scipy-0.10.1/doc/source/index.rst deleted file mode 100644 index 01cbd444f1..0000000000 --- a/scipy-0.10.1/doc/source/index.rst +++ /dev/null @@ -1,46 +0,0 @@ -SciPy -===== - -:Release: |version| -:Date: |today| - -SciPy (pronounced "Sigh Pie") is open-source software for mathematics, -science, and engineering. - -.. toctree:: - :maxdepth: 2 - - tutorial/index - -.. toctree:: - :maxdepth: 1 - - api - release - -Reference ---------- - -.. toctree:: - :maxdepth: 1 - - cluster - constants - fftpack - integrate - interpolate - io - linalg - maxentropy - misc - ndimage - odr - optimize - signal - sparse - sparse.linalg - spatial - special - stats - stats.mstats - weave diff --git a/scipy-0.10.1/doc/source/integrate.rst b/scipy-0.10.1/doc/source/integrate.rst deleted file mode 100644 index ae1beab3b4..0000000000 --- a/scipy-0.10.1/doc/source/integrate.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.integrate diff --git a/scipy-0.10.1/doc/source/interpolate.rst b/scipy-0.10.1/doc/source/interpolate.rst deleted file mode 100644 index 8c7658cbba..0000000000 --- a/scipy-0.10.1/doc/source/interpolate.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.interpolate diff --git a/scipy-0.10.1/doc/source/io.arff.rst b/scipy-0.10.1/doc/source/io.arff.rst deleted file mode 100644 index b1a06449bd..0000000000 --- a/scipy-0.10.1/doc/source/io.arff.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.io.arff diff --git a/scipy-0.10.1/doc/source/io.rst b/scipy-0.10.1/doc/source/io.rst deleted file mode 100644 index 3d7a3ecc7e..0000000000 --- a/scipy-0.10.1/doc/source/io.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. automodule:: scipy.io - -.. toctree:: - :hidden: - - scipy.io.arff diff --git a/scipy-0.10.1/doc/source/linalg.rst b/scipy-0.10.1/doc/source/linalg.rst deleted file mode 100644 index 27d2ce4ab2..0000000000 --- a/scipy-0.10.1/doc/source/linalg.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.linalg diff --git a/scipy-0.10.1/doc/source/maxentropy.rst b/scipy-0.10.1/doc/source/maxentropy.rst deleted file mode 100644 index 072c8326e9..0000000000 --- a/scipy-0.10.1/doc/source/maxentropy.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.maxentropy diff --git a/scipy-0.10.1/doc/source/misc.rst b/scipy-0.10.1/doc/source/misc.rst deleted file mode 100644 index 9651208587..0000000000 --- a/scipy-0.10.1/doc/source/misc.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.misc diff --git a/scipy-0.10.1/doc/source/ndimage.rst b/scipy-0.10.1/doc/source/ndimage.rst deleted file mode 100644 index e2b416fad3..0000000000 --- a/scipy-0.10.1/doc/source/ndimage.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.ndimage diff --git a/scipy-0.10.1/doc/source/odr.rst b/scipy-0.10.1/doc/source/odr.rst deleted file mode 100644 index 5c45b88a0b..0000000000 --- a/scipy-0.10.1/doc/source/odr.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.odr diff --git a/scipy-0.10.1/doc/source/optimize.nonlin.rst b/scipy-0.10.1/doc/source/optimize.nonlin.rst deleted file mode 100644 index 2794ef46e6..0000000000 --- a/scipy-0.10.1/doc/source/optimize.nonlin.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.optimize.nonlin diff --git a/scipy-0.10.1/doc/source/optimize.rst b/scipy-0.10.1/doc/source/optimize.rst deleted file mode 100644 index d35ead4db8..0000000000 --- a/scipy-0.10.1/doc/source/optimize.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. automodule:: scipy.optimize - -.. toctree:: - :hidden: - :maxdepth: 1 - - optimize.nonlin diff --git a/scipy-0.10.1/doc/source/release.0.10.0.rst b/scipy-0.10.1/doc/source/release.0.10.0.rst deleted file mode 100644 index 5e47a00b02..0000000000 --- a/scipy-0.10.1/doc/source/release.0.10.0.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../release/0.10.0-notes.rst diff --git a/scipy-0.10.1/doc/source/release.0.10.1.rst b/scipy-0.10.1/doc/source/release.0.10.1.rst deleted file mode 100644 index c9db536f4f..0000000000 --- a/scipy-0.10.1/doc/source/release.0.10.1.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../release/0.10.1-notes.rst diff --git a/scipy-0.10.1/doc/source/release.0.7.0.rst b/scipy-0.10.1/doc/source/release.0.7.0.rst deleted file mode 100644 index d9335eaa67..0000000000 --- a/scipy-0.10.1/doc/source/release.0.7.0.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../release/0.7.0-notes.rst diff --git a/scipy-0.10.1/doc/source/release.0.7.1.rst b/scipy-0.10.1/doc/source/release.0.7.1.rst deleted file mode 100644 index 8cddca7b00..0000000000 --- a/scipy-0.10.1/doc/source/release.0.7.1.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../release/0.7.1-notes.rst diff --git a/scipy-0.10.1/doc/source/release.0.7.2.rst b/scipy-0.10.1/doc/source/release.0.7.2.rst deleted file mode 100644 index 9c6734185f..0000000000 --- a/scipy-0.10.1/doc/source/release.0.7.2.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../release/0.7.2-notes.rst diff --git a/scipy-0.10.1/doc/source/release.0.8.0.rst b/scipy-0.10.1/doc/source/release.0.8.0.rst deleted file mode 100644 index 0d0b375853..0000000000 --- a/scipy-0.10.1/doc/source/release.0.8.0.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../release/0.8.0-notes.rst diff --git a/scipy-0.10.1/doc/source/release.0.9.0.rst b/scipy-0.10.1/doc/source/release.0.9.0.rst deleted file mode 100644 index c5acfbba79..0000000000 --- a/scipy-0.10.1/doc/source/release.0.9.0.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../release/0.9.0-notes.rst diff --git a/scipy-0.10.1/doc/source/release.rst b/scipy-0.10.1/doc/source/release.rst deleted file mode 100644 index 1ff6d9062c..0000000000 --- a/scipy-0.10.1/doc/source/release.rst +++ /dev/null @@ -1,14 +0,0 @@ -************* -Release Notes -************* - -.. toctree:: - :maxdepth: 1 - - release.0.10.1 - release.0.10.0 - release.0.9.0 - release.0.8.0 - release.0.7.2 - release.0.7.1 - release.0.7.0 diff --git a/scipy-0.10.1/doc/source/signal.rst b/scipy-0.10.1/doc/source/signal.rst deleted file mode 100644 index 3576cd3979..0000000000 --- a/scipy-0.10.1/doc/source/signal.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.signal diff --git a/scipy-0.10.1/doc/source/sparse.linalg.rst b/scipy-0.10.1/doc/source/sparse.linalg.rst deleted file mode 100644 index 0184be6381..0000000000 --- a/scipy-0.10.1/doc/source/sparse.linalg.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.sparse.linalg diff --git a/scipy-0.10.1/doc/source/sparse.rst b/scipy-0.10.1/doc/source/sparse.rst deleted file mode 100644 index d5240e9e64..0000000000 --- a/scipy-0.10.1/doc/source/sparse.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.sparse diff --git a/scipy-0.10.1/doc/source/spatial.distance.rst b/scipy-0.10.1/doc/source/spatial.distance.rst deleted file mode 100644 index 3d0ab6ae71..0000000000 --- a/scipy-0.10.1/doc/source/spatial.distance.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.spatial.distance diff --git a/scipy-0.10.1/doc/source/spatial.rst b/scipy-0.10.1/doc/source/spatial.rst deleted file mode 100644 index 41efc94998..0000000000 --- a/scipy-0.10.1/doc/source/spatial.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. automodule:: scipy.spatial - -.. toctree:: - :hidden: - - spatial.distance diff --git a/scipy-0.10.1/doc/source/special.rst b/scipy-0.10.1/doc/source/special.rst deleted file mode 100644 index 9fc2332a84..0000000000 --- a/scipy-0.10.1/doc/source/special.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.special diff --git a/scipy-0.10.1/doc/source/stats.mstats.rst b/scipy-0.10.1/doc/source/stats.mstats.rst deleted file mode 100644 index 88cf83195d..0000000000 --- a/scipy-0.10.1/doc/source/stats.mstats.rst +++ /dev/null @@ -1 +0,0 @@ -.. automodule:: scipy.stats.mstats diff --git a/scipy-0.10.1/doc/source/stats.rst b/scipy-0.10.1/doc/source/stats.rst deleted file mode 100644 index d07dd6333f..0000000000 --- a/scipy-0.10.1/doc/source/stats.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. automodule:: scipy.stats - -.. toctree:: - :hidden: - - scipy.stats.mstats diff --git a/scipy-0.10.1/doc/source/tutorial/arpack.rst b/scipy-0.10.1/doc/source/tutorial/arpack.rst deleted file mode 100644 index 5f3bedd493..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/arpack.rst +++ /dev/null @@ -1,203 +0,0 @@ -Sparse Eigenvalue Problems with ARPACK -====================================== - -.. sectionauthor:: Jake Vanderplas - -.. currentmodule:: scipy.sparse.linalg - - -Introduction ------------- -ARPACK is a Fortran package which provides routines for quickly finding a few -eigenvalues/eigenvectors of large sparse matrices. In order to find these -solutions, it requires only left-multiplication by the matrix in question. -This operation is performed through a *reverse-communication* interface. The -result of this structure is that ARPACK is able to find eigenvalues and -eigenvectors of any linear function mapping a vector to a vector. - -All of the functionality provided in ARPACK is contained within the two -high-level interfaces :func:`scipy.sparse.linalg.eigs` and -:func:`scipy.sparse.linalg.eigsh`. :func:`eigs` -provides interfaces to find the -eigenvalues/vectors of real or complex nonsymmetric square matrices, while -:func:`eigsh` provides interfaces for real-symmetric or complex-hermitian -matrices. - - -Basic Functionality -------------------- -ARPACK can solve either standard eigenvalue problems of the form - -.. math:: - A \mathbf{x} = \lambda \mathbf{x} - -or general eigenvalue problems of the form - -.. math:: - A \mathbf{x} = \lambda M \mathbf{x} - -The power of ARPACK is that it can compute only a specified subset of -eigenvalue/eigenvector pairs. This is accomplished through the keyword -``which``. The following values of ``which`` are available: - -* ``which = 'LM'`` : Eigenvectors with largest magnitude (``eigs``, ``eigsh``) -* ``which = 'SM'`` : Eigenvectors with smallest magnitude (``eigs``, ``eigsh``) -* ``which = 'LR'`` : Eigenvectors with largest real part (``eigs``) -* ``which = 'SR'`` : Eigenvectors with smallest real part (``eigs``) -* ``which = 'LI'`` : Eigenvectors with largest imaginary part (``eigs``) -* ``which = 'SI'`` : Eigenvectors with smallest imaginary part (``eigs``) -* ``which = 'LA'`` : Eigenvectors with largest amplitude (``eigsh``) -* ``which = 'SA'`` : Eigenvectors with smallest amplitude (``eigsh``) -* ``which = 'BE'`` : Eigenvectors from both ends of the spectrum (``eigsh``) - -Note that ARPACK is generally better at finding extremal eigenvalues: that -is, eigenvalues with large magnitudes. In particular, using ``which = 'SM'`` -may lead to slow execution time and/or anomalous results. A better approach -is to use *shift-invert mode*. - - -Shift-Invert Mode ------------------ -Shift invert mode relies on the following observation. For the generalized -eigenvalue problem - -.. math:: - A \mathbf{x} = \lambda M \mathbf{x} - -it can be shown that - -.. math:: - (A - \sigma M)^{-1} M \mathbf{x} = \nu \mathbf{x} - -where - -.. math:: - \nu = \frac{1}{\lambda - \sigma} - - -Examples --------- -Imagine you'd like to find the smallest and largest eigenvalues and the -corresponding eigenvectors for a large matrix. ARPACK can handle many -forms of input: dense matrices such as :func:`numpy.ndarray` instances, sparse -matrices such as :func:`scipy.sparse.csr_matrix`, or a general linear operator -derived from :func:`scipy.sparse.linalg.LinearOperator`. For this example, for -simplicity, we'll construct a symmetric, positive-definite matrix. - - >>> import numpy as np - >>> from scipy.linalg import eigh - >>> from scipy.sparse.linalg import eigsh - >>> np.set_printoptions(suppress=True) - >>> - >>> np.random.seed(0) - >>> X = np.random.random((100,100)) - 0.5 - >>> X = np.dot(X, X.T) #create a symmetric matrix - -We now have a symmetric matrix ``X`` with which to test the routines. First -compute a standard eigenvalue decomposition using ``eigh``: - - >>> evals_all, evecs_all = eigh(X) - -As the dimension of ``X`` grows, this routine becomes very slow. Especially -if only a few eigenvectors and eigenvalues are needed, ``ARPACK`` can be a -better option. First let's compute the largest eigenvalues (``which = 'LM'``) -of ``X`` and compare them to the known results: - - >>> evals_large, evecs_large = eigsh(X, 3, which='LM') - >>> print evals_all[-3:] - [ 29.1446102 30.05821805 31.19467646] - >>> print evals_large - [ 29.1446102 30.05821805 31.19467646] - >>> print np.dot(evecs_large.T, evecs_all[:,-3:]) - [[-1. 0. 0.] - [ 0. 1. 0.] - [-0. 0. -1.]] - -The results are as expected. ARPACK recovers the desired eigenvalues, and they -match the previously known results. Furthermore, the eigenvectors are -orthogonal, as we'd expect. Now let's attempt to solve for the eigenvalues -with smallest magnitude: - - >>> evals_small, evecs_small = eigsh(X, 3, which='SM') - scipy.sparse.linalg.eigen.arpack.arpack.ArpackNoConvergence: - ARPACK error -1: No convergence (1001 iterations, 0/3 eigenvectors converged) - -Oops. We see that as mentioned above, ``ARPACK`` is not quite as adept at -finding small eigenvalues. There are a few ways this problem can be -addressed. We could increase the tolerance (``tol``) to lead to faster -convergence: - - >>> evals_small, evecs_small = eigsh(X, 3, which='SM', tol=1E-2) - >>> print evals_all[:3] - [ 0.0003783 0.00122714 0.00715878] - >>> print evals_small - [ 0.00037831 0.00122714 0.00715881] - >>> print np.dot(evecs_small.T, evecs_all[:,:3]) - [[ 0.99999999 0.00000024 -0.00000049] - [-0.00000023 0.99999999 0.00000056] - [ 0.00000031 -0.00000037 0.99999852]] - -This works, but we lose the precision in the results. Another option is -to increase the maximum number of iterations (``maxiter``) from 1000 to 5000: - - >>> evals_small, evecs_small = eigsh(X, 3, which='SM', maxiter=5000) - >>> print evals_all[:3] - [ 0.0003783 0.00122714 0.00715878] - >>> print evals_small - [ 0.0003783 0.00122714 0.00715878] - >>> print np.dot(evecs_small.T, evecs_all[:,:3]) - [[ 1. 0. 0.] - [-0. 1. 0.] - [ 0. 0. -1.]] - -We get the results we'd hoped for, but the computation time is much longer. -Fortunately, ``ARPACK`` contains a mode that allows quick determination of -non-external eigenvalues: *shift-invert mode*. As mentioned above, this -mode involves transforming the eigenvalue problem to an equivalent problem -with different eigenvalues. In this case, we hope to find eigenvalues near -zero, so we'll choose ``sigma = 0``. The transformed eigenvalues will -then satisfy :math:`\nu = 1/(\sigma - \lambda) = 1/\lambda`, so our -small eigenvalues :math:`\lambda` become large eigenvalues :math:`\nu`. - - >>> evals_small, evecs_small = eigsh(X, 3, sigma=0, which='LM') - >>> print evals_all[:3] - [ 0.0003783 0.00122714 0.00715878] - >>> print evals_small - [ 0.0003783 0.00122714 0.00715878] - >>> print np.dot(evecs_small.T, evecs_all[:,:3]) - [[ 1. 0. 0.] - [ 0. -1. -0.] - [-0. -0. 1.]] - -We get the results we were hoping for, with much less computational time. -Note that the transformation from :math:`\nu \to \lambda` takes place -entirely in the background. The user need not worry about the details. - -The shift-invert mode provides more than just a fast way to obtain a few -small eigenvalues. Say you -desire to find internal eigenvalues and eigenvectors, e.g. those nearest to -:math:`\lambda = 1`. Simply set ``sigma = 1`` and ARPACK takes care of -the rest: - - >>> evals_mid, evecs_mid = eigsh(X, 3, sigma=1, which='LM') - >>> i_sort = np.argsort(abs(1. / (1 - evals_all)))[-3:] - >>> print evals_all[i_sort] - [ 1.16577199 0.85081388 1.06642272] - >>> print evals_mid - [ 0.85081388 1.06642272 1.16577199] - >>> print np.dot(evecs_mid.T, evecs_all[:,i_sort]) - [[-0. 1. 0.] - [-0. -0. 1.] - [ 1. 0. 0.]] - -The eigenvalues come out in a different order, but they're all there. -Note that the shift-invert mode requires the internal solution of a matrix -inverse. This is taken care of automatically by ``eigsh`` and `eigs`, -but the operation can also be specified by the user. See the docstring of -:func:`scipy.sparse.linalg.eigsh` and -:func:`scipy.sparse.linalg.eigs` for details. - - -References ----------- -.. [1] http://www.caam.rice.edu/software/ARPACK/ diff --git a/scipy-0.10.1/doc/source/tutorial/basic.rst b/scipy-0.10.1/doc/source/tutorial/basic.rst deleted file mode 100644 index 97e985e57d..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/basic.rst +++ /dev/null @@ -1,302 +0,0 @@ -Basic functions in Numpy (and top-level scipy) -============================================== - -.. sectionauthor:: Travis E. Oliphant - -.. currentmodule:: numpy - -.. contents:: - -Interaction with Numpy ------------------------- - -To begin with, all of the Numpy functions have been subsumed into the -:mod:`scipy` namespace so that all of those functions are available -without additionally importing Numpy. In addition, the universal -functions (addition, subtraction, division) have been altered to not -raise exceptions if floating-point errors are encountered; instead, -NaN's and Inf's are returned in the arrays. To assist in detection of -these events, several functions (:func:`sp.isnan`, :func:`sp.isfinite`, -:func:`sp.isinf`) are available. - -Finally, some of the basic functions like log, sqrt, and inverse trig -functions have been modified to return complex numbers instead of -NaN's where appropriate (*i.e.* ``sp.sqrt(-1)`` returns ``1j``). - - -Top-level scipy routines ------------------------- - -The purpose of the top level of scipy is to collect general-purpose -routines that the other sub-packages can use and to provide a simple -replacement for Numpy. Anytime you might think to import Numpy, you -can import scipy instead and remove yourself from direct dependence on -Numpy. These routines are divided into several files for -organizational purposes, but they are all available under the numpy -namespace (and the scipy namespace). There are routines for type -handling and type checking, shape and matrix manipulation, polynomial -processing, and other useful functions. Rather than giving a detailed -description of each of these functions (which is available in the -Numpy Reference Guide or by using the :func:`help`, :func:`info` and -:func:`source` commands), this tutorial will discuss some of the more -useful commands which require a little introduction to use to their -full potential. - - -Type handling -^^^^^^^^^^^^^ - -Note the difference between :func:`sp.iscomplex`/:func:`sp.isreal` and -:func:`sp.iscomplexobj`/:func:`sp.isrealobj`. The former command is -array based and returns byte arrays of ones and zeros providing the -result of the element-wise test. The latter command is object based -and returns a scalar describing the result of the test on the entire -object. - -Often it is required to get just the real and/or imaginary part of a -complex number. While complex numbers and arrays have attributes that -return those values, if one is not sure whether or not the object will -be complex-valued, it is better to use the functional forms -:func:`sp.real` and :func:`sp.imag` . These functions succeed for anything -that can be turned into a Numpy array. Consider also the function -:func:`sp.real_if_close` which transforms a complex-valued number with -tiny imaginary part into a real number. - -Occasionally the need to check whether or not a number is a scalar -(Python (long)int, Python float, Python complex, or rank-0 array) -occurs in coding. This functionality is provided in the convenient -function :func:`sp.isscalar` which returns a 1 or a 0. - -Finally, ensuring that objects are a certain Numpy type occurs often -enough that it has been given a convenient interface in SciPy through -the use of the :obj:`sp.cast` dictionary. The dictionary is keyed by the -type it is desired to cast to and the dictionary stores functions to -perform the casting. Thus, ``sp.cast['f'](d)`` returns an array -of :class:`sp.float32` from *d*. This function is also useful as an easy -way to get a scalar of a certain type:: - - >>> sp.cast['f'](sp.pi) - array(3.1415927410125732, dtype=float32) - -Index Tricks -^^^^^^^^^^^^ - -There are some class instances that make special use of the slicing -functionality to provide efficient means for array construction. This -part will discuss the operation of :obj:`sp.mgrid` , :obj:`sp.ogrid` , -:obj:`sp.r_` , and :obj:`sp.c_` for quickly constructing arrays. - -One familiar with MATLAB (R) may complain that it is difficult to -construct arrays from the interactive session with Python. Suppose, -for example that one wants to construct an array that begins with 3 -followed by 5 zeros and then contains 10 numbers spanning the range -1 -to 1 (inclusive on both ends). Before SciPy, you would need to enter -something like the following - - >>> concatenate(([3],[0]*5,arange(-1,1.002,2/9.0))) - -With the :obj:`r_` command one can enter this as - - >>> r_[3,[0]*5,-1:1:10j] - -which can ease typing and make for more readable code. Notice how -objects are concatenated, and the slicing syntax is (ab)used to -construct ranges. The other term that deserves a little explanation is -the use of the complex number 10j as the step size in the slicing -syntax. This non-standard use allows the number to be interpreted as -the number of points to produce in the range rather than as a step -size (note we would have used the long integer notation, 10L, but this -notation may go away in Python as the integers become unified). This -non-standard usage may be unsightly to some, but it gives the user the -ability to quickly construct complicated vectors in a very readable -fashion. When the number of points is specified in this way, the end- -point is inclusive. - -The "r" stands for row concatenation because if the objects between -commas are 2 dimensional arrays, they are stacked by rows (and thus -must have commensurate columns). There is an equivalent command -:obj:`c_` that stacks 2d arrays by columns but works identically to -:obj:`r_` for 1d arrays. - -Another very useful class instance which makes use of extended slicing -notation is the function :obj:`mgrid`. In the simplest case, this -function can be used to construct 1d ranges as a convenient substitute -for arange. It also allows the use of complex-numbers in the step-size -to indicate the number of points to place between the (inclusive) -end-points. The real purpose of this function however is to produce N, -N-d arrays which provide coordinate arrays for an N-dimensional -volume. The easiest way to understand this is with an example of its -usage: - - >>> mgrid[0:5,0:5] - array([[[0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [2, 2, 2, 2, 2], - [3, 3, 3, 3, 3], - [4, 4, 4, 4, 4]], - [[0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4]]]) - >>> mgrid[0:5:4j,0:5:4j] - array([[[ 0. , 0. , 0. , 0. ], - [ 1.6667, 1.6667, 1.6667, 1.6667], - [ 3.3333, 3.3333, 3.3333, 3.3333], - [ 5. , 5. , 5. , 5. ]], - [[ 0. , 1.6667, 3.3333, 5. ], - [ 0. , 1.6667, 3.3333, 5. ], - [ 0. , 1.6667, 3.3333, 5. ], - [ 0. , 1.6667, 3.3333, 5. ]]]) - -Having meshed arrays like this is sometimes very useful. However, it -is not always needed just to evaluate some N-dimensional function over -a grid due to the array-broadcasting rules of Numpy and SciPy. If this -is the only purpose for generating a meshgrid, you should instead use -the function :obj:`ogrid` which generates an "open "grid using NewAxis -judiciously to create N, N-d arrays where only one dimension in each -array has length greater than 1. This will save memory and create the -same result if the only purpose for the meshgrid is to generate sample -points for evaluation of an N-d function. - - -Shape manipulation -^^^^^^^^^^^^^^^^^^ - -In this category of functions are routines for squeezing out length- -one dimensions from N-dimensional arrays, ensuring that an array is at -least 1-, 2-, or 3-dimensional, and stacking (concatenating) arrays by -rows, columns, and "pages "(in the third dimension). Routines for -splitting arrays (roughly the opposite of stacking arrays) are also -available. - - -Polynomials -^^^^^^^^^^^ - -There are two (interchangeable) ways to deal with 1-d polynomials in -SciPy. The first is to use the :class:`poly1d` class from Numpy. This -class accepts coefficients or polynomial roots to initialize a -polynomial. The polynomial object can then be manipulated in algebraic -expressions, integrated, differentiated, and evaluated. It even prints -like a polynomial: - - >>> p = poly1d([3,4,5]) - >>> print p - 2 - 3 x + 4 x + 5 - >>> print p*p - 4 3 2 - 9 x + 24 x + 46 x + 40 x + 25 - >>> print p.integ(k=6) - 3 2 - x + 2 x + 5 x + 6 - >>> print p.deriv() - 6 x + 4 - >>> p([4,5]) - array([ 69, 100]) - -The other way to handle polynomials is as an array of coefficients -with the first element of the array giving the coefficient of the -highest power. There are explicit functions to add, subtract, -multiply, divide, integrate, differentiate, and evaluate polynomials -represented as sequences of coefficients. - - -Vectorizing functions (vectorize) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -One of the features that NumPy provides is a class :obj:`vectorize` to -convert an ordinary Python function which accepts scalars and returns -scalars into a "vectorized-function" with the same broadcasting rules -as other Numpy functions (*i.e.* the Universal functions, or -ufuncs). For example, suppose you have a Python function named -:obj:`addsubtract` defined as: - - >>> def addsubtract(a,b): - ... if a > b: - ... return a - b - ... else: - ... return a + b - -which defines a function of two scalar variables and returns a scalar -result. The class vectorize can be used to "vectorize "this function so that :: - - >>> vec_addsubtract = vectorize(addsubtract) - -returns a function which takes array arguments and returns an array -result: - - >>> vec_addsubtract([0,3,6,9],[1,3,5,7]) - array([1, 6, 1, 2]) - -This particular function could have been written in vector form -without the use of :obj:`vectorize` . But, what if the function you have written is the result of some -optimization or integration routine. Such functions can likely only be -vectorized using ``vectorize.`` - - -Other useful functions -^^^^^^^^^^^^^^^^^^^^^^ - -There are several other functions in the scipy_base package including -most of the other functions that are also in the Numpy package. The -reason for duplicating these functions is to allow SciPy to -potentially alter their original interface and make it easier for -users to know how to get access to functions - - >>> from scipy import * - -Functions which should be mentioned are :obj:`mod(x,y)` which can -replace ``x % y`` when it is desired that the result take the sign of -*y* instead of *x* . Also included is :obj:`fix` which always rounds -to the nearest integer towards zero. For doing phase processing, the -functions :func:`angle`, and :obj:`unwrap` are also useful. Also, the -:obj:`linspace` and :obj:`logspace` functions return equally spaced samples -in a linear or log scale. Finally, it's useful to be aware of the indexing -capabilities of Numpy. Mention should be made of the new -function :obj:`select` which extends the functionality of :obj:`where` to -include multiple conditions and multiple choices. The calling -convention is ``select(condlist,choicelist,default=0).`` :obj:`select` is -a vectorized form of the multiple if-statement. It allows rapid -construction of a function which returns an array of results based on -a list of conditions. Each element of the return array is taken from -the array in a ``choicelist`` corresponding to the first condition in -``condlist`` that is true. For example - - >>> x = r_[-2:3] - >>> x - array([-2, -1, 0, 1, 2]) - >>> select([x > 3, x >= 0],[0,x+2]) - array([0, 0, 2, 3, 4]) - - -Common functions ----------------- - -Some functions depend on sub-packages of SciPy but should be available -from the top-level of SciPy due to their common use. These are -functions that might have been placed in scipy_base except for their -dependence on other sub-packages of SciPy. For example the -:obj:`factorial` and :obj:`comb` functions compute :math:`n!` and -:math:`n!/k!(n-k)!` using either exact integer arithmetic (thanks to -Python's Long integer object), or by using floating-point precision -and the gamma function. The functions :obj:`rand` and :obj:`randn` -are used so often that they warranted a place at the top level. There -are convenience functions for the interactive use: :obj:`disp` -(similar to print), and :obj:`who` (returns a list of defined -variables and memory consumption--upper bounded). Another function -returns a common image used in image processing: :obj:`lena`. - -Finally, two functions are provided that are useful for approximating -derivatives of functions using discrete-differences. The function -:obj:`central_diff_weights` returns weighting coefficients for an -equally-spaced :math:`N`-point approximation to the derivative of -order *o*. These weights must be multiplied by the function -corresponding to these points and the results added to obtain the -derivative approximation. This function is intended for use when only -samples of the function are avaiable. When the function is an object -that can be handed to a routine and evaluated, the function -:obj:`derivative` can be used to automatically evaluate the object at -the correct points to obtain an N-point approximation to the *o*-th -derivative at a given point. diff --git a/scipy-0.10.1/doc/source/tutorial/examples/1-1 b/scipy-0.10.1/doc/source/tutorial/examples/1-1 deleted file mode 100644 index 6751bc6214..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/examples/1-1 +++ /dev/null @@ -1,55 +0,0 @@ ->>> sp.info(optimize.fmin) - fmin(func, x0, args=(), xtol=0.0001, ftol=0.0001, maxiter=None, maxfun=None, - full_output=0, disp=1, retall=0, callback=None) - -Minimize a function using the downhill simplex algorithm. - -Parameters ----------- -func : callable func(x,*args) - The objective function to be minimized. -x0 : ndarray - Initial guess. -args : tuple - Extra arguments passed to func, i.e. ``f(x,*args)``. -callback : callable - Called after each iteration, as callback(xk), where xk is the - current parameter vector. - -Returns -------- -xopt : ndarray - Parameter that minimizes function. -fopt : float - Value of function at minimum: ``fopt = func(xopt)``. -iter : int - Number of iterations performed. -funcalls : int - Number of function calls made. -warnflag : int - 1 : Maximum number of function evaluations made. - 2 : Maximum number of iterations reached. -allvecs : list - Solution at each iteration. - -Other parameters ----------------- -xtol : float - Relative error in xopt acceptable for convergence. -ftol : number - Relative error in func(xopt) acceptable for convergence. -maxiter : int - Maximum number of iterations to perform. -maxfun : number - Maximum number of function evaluations to make. -full_output : bool - Set to True if fopt and warnflag outputs are desired. -disp : bool - Set to True to print convergence messages. -retall : bool - Set to True to return list of solutions at each iteration. - -Notes ------ -Uses a Nelder-Mead simplex algorithm to find the minimum of function of -one or more variables. diff --git a/scipy-0.10.1/doc/source/tutorial/examples/4-1 b/scipy-0.10.1/doc/source/tutorial/examples/4-1 deleted file mode 100644 index 21a02a7960..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/examples/4-1 +++ /dev/null @@ -1,25 +0,0 @@ ->>> help(integrate) - Methods for Integrating Functions given function object. - - quad -- General purpose integration. - dblquad -- General purpose double integration. - tplquad -- General purpose triple integration. - fixed_quad -- Integrate func(x) using Gaussian quadrature of order n. - quadrature -- Integrate with given tolerance using Gaussian quadrature. - romberg -- Integrate func using Romberg integration. - - Methods for Integrating Functions given fixed samples. - - trapz -- Use trapezoidal rule to compute integral from samples. - cumtrapz -- Use trapezoidal rule to cumulatively compute integral. - simps -- Use Simpson's rule to compute integral from samples. - romb -- Use Romberg Integration to compute integral from - (2**k + 1) evenly-spaced samples. - - See the special module's orthogonal polynomials (special) for Gaussian - quadrature roots and weights for other weighting factors and regions. - - Interface to numerical integrators of ODE systems. - - odeint -- General integration of ordinary differential equations. - ode -- Integrate ODE using VODE and ZVODE routines. diff --git a/scipy-0.10.1/doc/source/tutorial/examples/5-1 b/scipy-0.10.1/doc/source/tutorial/examples/5-1 deleted file mode 100644 index ba3138ee50..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/examples/5-1 +++ /dev/null @@ -1,91 +0,0 @@ -from scipy import optimize ->>> info(optimize) -Optimization Tools -================== - - A collection of general-purpose optimization routines. - - fmin -- Nelder-Mead Simplex algorithm - (uses only function calls) - fmin_powell -- Powell's (modified) level set method (uses only - function calls) - fmin_cg -- Non-linear (Polak-Ribiere) conjugate gradient algorithm - (can use function and gradient). - fmin_bfgs -- Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno); - (can use function and gradient) - fmin_ncg -- Line-search Newton Conjugate Gradient (can use - function, gradient and Hessian). - leastsq -- Minimize the sum of squares of M equations in - N unknowns given a starting estimate. - - - Constrained Optimizers (multivariate) - - fmin_l_bfgs_b -- Zhu, Byrd, and Nocedal's L-BFGS-B constrained optimizer - (if you use this please quote their papers -- see help) - - fmin_tnc -- Truncated Newton Code originally written by Stephen Nash and - adapted to C by Jean-Sebastien Roy. - - fmin_cobyla -- Constrained Optimization BY Linear Approximation - - - Global Optimizers - - anneal -- Simulated Annealing - brute -- Brute force searching optimizer - - - Scalar function minimizers - - fminbound -- Bounded minimization of a scalar function. - brent -- 1-D function minimization using Brent method. - golden -- 1-D function minimization using Golden Section method - bracket -- Bracket a minimum (given two starting points) - - - Also a collection of general-purpose root-finding routines. - - fsolve -- Non-linear multi-variable equation solver. - - - Scalar function solvers - - brentq -- quadratic interpolation Brent method - brenth -- Brent method (modified by Harris with hyperbolic - extrapolation) - ridder -- Ridder's method - bisect -- Bisection method - newton -- Secant method or Newton's method - - fixed_point -- Single-variable fixed-point solver. - - A collection of general-purpose nonlinear multidimensional solvers. - - broyden1 -- Broyden's first method - is a quasi-Newton-Raphson - method for updating an approximate Jacobian and then - inverting it - broyden2 -- Broyden's second method - the same as broyden1, but - updates the inverse Jacobian directly - broyden3 -- Broyden's second method - the same as broyden2, but - instead of directly computing the inverse Jacobian, - it remembers how to construct it using vectors, and - when computing inv(J)*F, it uses those vectors to - compute this product, thus avoding the expensive NxN - matrix multiplication. - broyden_generalized -- Generalized Broyden's method, the same as broyden2, - but instead of approximating the full NxN Jacobian, - it construct it at every iteration in a way that - avoids the NxN matrix multiplication. This is not - as precise as broyden3. - anderson -- extended Anderson method, the same as the - broyden_generalized, but added w_0^2*I to before - taking inversion to improve the stability - anderson2 -- the Anderson method, the same as anderson, but - formulated differently - - Utility Functions - - line_search -- Return a step that satisfies the strong Wolfe conditions. - check_grad -- Check the supplied derivative using finite difference - techniques. diff --git a/scipy-0.10.1/doc/source/tutorial/examples/newton_krylov_preconditioning.py b/scipy-0.10.1/doc/source/tutorial/examples/newton_krylov_preconditioning.py deleted file mode 100644 index 27e8bf20d9..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/examples/newton_krylov_preconditioning.py +++ /dev/null @@ -1,91 +0,0 @@ -import numpy as np -from scipy.optimize import newton_krylov -from scipy.sparse import spdiags, spkron -from scipy.sparse.linalg import spilu, LinearOperator -from numpy import cosh, zeros_like, mgrid, zeros, eye - -# parameters -nx, ny = 75, 75 -hx, hy = 1./(nx-1), 1./(ny-1) - -P_left, P_right = 0, 0 -P_top, P_bottom = 1, 0 - -def get_preconditioner(): - """Compute the preconditioner M""" - diags_x = zeros((3, nx)) - diags_x[0,:] = 1/hx/hx - diags_x[1,:] = -2/hx/hx - diags_x[2,:] = 1/hx/hx - Lx = spdiags(diags_x, [-1,0,1], nx, nx) - - diags_y = zeros((3, ny)) - diags_y[0,:] = 1/hy/hy - diags_y[1,:] = -2/hy/hy - diags_y[2,:] = 1/hy/hy - Ly = spdiags(diags_y, [-1,0,1], ny, ny) - - J1 = spkron(Lx, eye(ny)) + spkron(eye(nx), Ly) - - # Now we have the matrix `J_1`. We need to find its inverse `M` -- - # however, since an approximate inverse is enough, we can use - # the *incomplete LU* decomposition - - J1_ilu = spilu(J1) - - # This returns an object with a method .solve() that evaluates - # the corresponding matrix-vector product. We need to wrap it into - # a LinearOperator before it can be passed to the Krylov methods: - - M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve) - return M - -def solve(preconditioning=True): - """Compute the solution""" - count = [0] - - def residual(P): - count[0] += 1 - - d2x = zeros_like(P) - d2y = zeros_like(P) - - d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx - d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx - d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx - - d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy - d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy - d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy - - return d2x + d2y + 5*cosh(P).mean()**2 - - # preconditioner - if preconditioning: - M = get_preconditioner() - else: - M = None - - # solve - guess = zeros((nx, ny), float) - - sol = newton_krylov(residual, guess, verbose=1, inner_M=M) - print 'Residual', abs(residual(sol)).max() - print 'Evaluations', count[0] - - return sol - -def main(): - sol = solve(preconditioning=True) - - # visualize - import matplotlib.pyplot as plt - x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)] - plt.clf() - plt.pcolor(x, y, sol) - plt.clim(0, 1) - plt.colorbar() - plt.show() - -if __name__ == "__main__": - main() diff --git a/scipy-0.10.1/doc/source/tutorial/examples/normdiscr_plot1.py b/scipy-0.10.1/doc/source/tutorial/examples/normdiscr_plot1.py deleted file mode 100644 index 3a18d2a5d9..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/examples/normdiscr_plot1.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -from scipy import stats - -npoints = 20 # number of integer support points of the distribution minus 1 -npointsh = npoints / 2 -npointsf = float(npoints) -nbound = 4 #bounds for the truncated normal -normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal -grid = np.arange(-npointsh, npointsh+2, 1) #integer grid -gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm -gridlimits = grid - 0.5 -grid = grid[:-1] -probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound)) -gridint = grid -normdiscrete = stats.rv_discrete( - values=(gridint, np.round(probs, decimals=7)), - name='normdiscrete') - - -n_sample = 500 -np.random.seed(87655678) #fix the seed for replicability -rvs = normdiscrete.rvs(size=n_sample) -rvsnd=rvs -f,l = np.histogram(rvs, bins=gridlimits) -sfreq = np.vstack([gridint, f, probs*n_sample]).T -fs = sfreq[:,1] / float(n_sample) -ft = sfreq[:,2] / float(n_sample) -nd_std = np.sqrt(normdiscrete.stats(moments='v')) - -ind = gridint # the x locations for the groups -width = 0.35 # the width of the bars - -plt.subplot(111) -rects1 = plt.bar(ind, ft, width, color='b') -rects2 = plt.bar(ind+width, fs, width, color='r') -normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std), - color='b') - -plt.ylabel('Frequency') -plt.title('Frequency and Probability of normdiscrete') -plt.xticks(ind+width, ind ) -plt.legend((rects1[0], rects2[0]), ('true', 'sample')) - -plt.show() diff --git a/scipy-0.10.1/doc/source/tutorial/examples/normdiscr_plot2.py b/scipy-0.10.1/doc/source/tutorial/examples/normdiscr_plot2.py deleted file mode 100644 index f88e80dbf9..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/examples/normdiscr_plot2.py +++ /dev/null @@ -1,48 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -from scipy import stats - -npoints = 20 # number of integer support points of the distribution minus 1 -npointsh = npoints / 2 -npointsf = float(npoints) -nbound = 4 #bounds for the truncated normal -normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal -grid = np.arange(-npointsh, npointsh+2,1) #integer grid -gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm -gridlimits = grid - 0.5 -grid = grid[:-1] -probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound)) -gridint = grid -normdiscrete = stats.rv_discrete( - values=(gridint, np.round(probs, decimals=7)), - name='normdiscrete') - -n_sample = 500 -np.random.seed(87655678) #fix the seed for replicability -rvs = normdiscrete.rvs(size=n_sample) -rvsnd = rvs -f,l = np.histogram(rvs,bins=gridlimits) -sfreq = np.vstack([gridint,f,probs*n_sample]).T -fs = sfreq[:,1] / float(n_sample) -ft = sfreq[:,2] / float(n_sample) -fs = sfreq[:,1].cumsum() / float(n_sample) -ft = sfreq[:,2].cumsum() / float(n_sample) -nd_std = np.sqrt(normdiscrete.stats(moments='v')) - - -ind = gridint # the x locations for the groups -width = 0.35 # the width of the bars - -plt.figure() -plt.subplot(111) -rects1 = plt.bar(ind, ft, width, color='b') -rects2 = plt.bar(ind+width, fs, width, color='r') -normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std), - color='b') - -plt.ylabel('cdf') -plt.title('Cumulative Frequency and CDF of normdiscrete') -plt.xticks(ind+width, ind ) -plt.legend( (rects1[0], rects2[0]), ('true', 'sample') ) - -plt.show() diff --git a/scipy-0.10.1/doc/source/tutorial/fftpack.rst b/scipy-0.10.1/doc/source/tutorial/fftpack.rst deleted file mode 100644 index 51dc303d13..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/fftpack.rst +++ /dev/null @@ -1,145 +0,0 @@ -Fourier Transforms (:mod:`scipy.fftpack`) -========================================= - -.. sectionauthor:: Scipy Developers - -.. currentmodule:: scipy.fftpack - -.. warning:: - - This is currently a stub page - - -.. contents:: - - -Fourier analysis is fundamentally a method for expressing a function as a -sum of periodic components, and for recovering the signal from those -components. When both the function and its Fourier transform are -replaced with discretized counterparts, it is called the discrete Fourier -transform (DFT). The DFT has become a mainstay of numerical computing in -part because of a very fast algorithm for computing it, called the Fast -Fourier Transform (FFT), which was known to Gauss (1805) and was brought -to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ -provide an accessible introduction to Fourier analysis and its -applications. - - -Fast Fourier transforms ------------------------ - -One dimensional discrete Fourier transforms -------------------------------------------- - -fft, ifft, rfft, irfft - - -Two and n dimensional discrete Fourier transforms -------------------------------------------------- - -fft in more than one dimension - - -Discrete Cosine Transforms --------------------------- - - -Return the Discrete Cosine Transform [Mak]_ of arbitrary type sequence ``x``. - -For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to -MATLAB ``dct(x)``. - -There are theoretically 8 types of the DCT [WP]_, only the first 3 types are -implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the' -Inverse DCT generally refers to DCT type 3. - -type I -~~~~~~ - -There are several definitions of the DCT-I; we use the following -(for ``norm=None``): - -.. math:: - :nowrap: - - \[ y_k = x_0 + (-1)^k x_{N-1} + 2\sum_{n=1}^{N-2} x_n - \cos\left({\pi nk\over N-1}\right), - \qquad 0 \le k < N. \] - -Only None is supported as normalization mode for DCT-I. Note also that the -DCT-I is only supported for input size > 1 - -type II -~~~~~~~ - -There are several definitions of the DCT-II; we use the following -(for ``norm=None``): - -.. math:: - :nowrap: - - \[ y_k = 2 \sum_{n=0}^{N-1} x_n - \cos \left({\pi(2n+1)k \over 2N} \right) - \qquad 0 \le k < N.\] - -If ``norm='ortho'``, :math:`y_k` is multiplied by a scaling factor `f`: - -.. math:: - :nowrap: - - \[f = \begin{cases} \sqrt{1/(4N)}, & \text{if $k = 0$} \\ - \sqrt{1/(2N)}, & \text{otherwise} \end{cases} \] - - -Which makes the corresponding matrix of coefficients orthonormal -(`OO' = Id`). - -type III -~~~~~~~~ - -There are several definitions, we use the following -(for ``norm=None``): - -.. math:: - :nowrap: - - \[ y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n - \cos\left({\pi n(2k+1) \over 2N}\right) - \qquad 0 \le k < N,\] - -or, for ``norm='ortho'``: - -.. math:: - :nowrap: - - \[ y_k = {x_0\over\sqrt{N}} + {1\over\sqrt{N}} \sum_{n=1}^{N-1} - x_n \cos\left({\pi n(2k+1) \over 2N}\right) - \qquad 0 \le k < N.\] - -The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up -to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of the -orthonormalized DCT-II. - -References -~~~~~~~~~~ - -.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - -.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., - 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. - 12-13. Cambridge Univ. Press, Cambridge, UK. - -.. [Mak] J. Makhoul, 1980, 'A Fast Cosine Transform in One and Two Dimensions', - `IEEE Transactions on acoustics, speech and signal processing` - vol. 28(1), pp. 27-34, http://dx.doi.org/10.1109/TASSP.1980.1163351 - -.. [WP] http://en.wikipedia.org/wiki/Discrete_cosine_transform - - -FFT convolution ---------------- - -scipy.fftpack.convolve performs a convolution of two one-dimensional -arrays in frequency domain. diff --git a/scipy-0.10.1/doc/source/tutorial/general.rst b/scipy-0.10.1/doc/source/tutorial/general.rst deleted file mode 100644 index 7d33d051e4..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/general.rst +++ /dev/null @@ -1,129 +0,0 @@ -============ -Introduction -============ - -.. contents:: - -SciPy is a collection of mathematical algorithms and convenience -functions built on the Numpy extension for Python. It adds -significant power to the interactive Python session by exposing the -user to high-level commands and classes for the manipulation and -visualization of data. With SciPy, an interactive Python session -becomes a data-processing and system-prototyping environment rivaling -sytems such as MATLAB, IDL, Octave, R-Lab, and SciLab. - -The additional power of using SciPy within Python, however, is that a -powerful programming language is also available for use in developing -sophisticated programs and specialized applications. Scientific -applications written in SciPy benefit from the development of -additional modules in numerous niche's of the software landscape by -developers across the world. Everything from parallel programming to -web and data-base subroutines and classes have been made available to -the Python programmer. All of this power is available in addition to -the mathematical libraries in SciPy. - -This document provides a tutorial for the first-time user of SciPy to -help get started with some of the features available in this powerful -package. It is assumed that the user has already installed the -package. Some general Python facility is also assumed such as could be -acquired by working through the Tutorial in the Python distribution. -For further introductory help the user is directed to the Numpy -documentation. - -For brevity and convenience, we will often assume that the main -packages (numpy, scipy, and matplotlib) have been imported as:: - - >>> import numpy as np - >>> import scipy as sp - >>> import matplotlib as mpl - >>> import matplotlib.pyplot as plt - -These are the import conventions that our community has adopted -after discussion on public mailing lists. You will see these -conventions used throughout NumPy and SciPy source code and -documentation. While we obviously don't require you to follow -these conventions in your own code, it is highly recommended. - -SciPy Organization ------------------- - -SciPy is organized into subpackages covering different scientific -computing domains. These are summarized in the following table: - -.. currentmodule:: scipy - -================== ====================================================== -Subpackage Description -================== ====================================================== -:mod:`cluster` Clustering algorithms -:mod:`constants` Physical and mathematical constants -:mod:`fftpack` Fast Fourier Transform routines -:mod:`integrate` Integration and ordinary differential equation solvers -:mod:`interpolate` Interpolation and smoothing splines -:mod:`io` Input and Output -:mod:`linalg` Linear algebra -:mod:`maxentropy` Maximum entropy methods -:mod:`ndimage` N-dimensional image processing -:mod:`odr` Orthogonal distance regression -:mod:`optimize` Optimization and root-finding routines -:mod:`signal` Signal processing -:mod:`sparse` Sparse matrices and associated routines -:mod:`spatial` Spatial data structures and algorithms -:mod:`special` Special functions -:mod:`stats` Statistical distributions and functions -:mod:`weave` C/C++ integration -================== ====================================================== - -Scipy sub-packages need to be imported separately, for example:: - - >>> from scipy import linalg, optimize - -Because of their ubiquitousness, some of the functions in these -subpackages are also made available in the scipy namespace to ease -their use in interactive sessions and programs. In addition, many -basic array functions from :mod:`numpy` are also available at the -top-level of the :mod:`scipy` package. Before looking at the -sub-packages individually, we will first look at some of these common -functions. - -Finding Documentation ---------------------- - -Scipy and Numpy have HTML and PDF versions of their documentation -available at http://docs.scipy.org/, which currently details nearly -all available functionality. However, this documentation is still -work-in-progress, and some parts may be incomplete or sparse. As -we are a volunteer organization and depend on the community for -growth, your participation - everything from providing feedback to -improving the documentation and code - is welcome and actively -encouraged. - -Python also provides the facility of documentation strings. The -functions and classes available in SciPy use this method for on-line -documentation. There are two methods for reading these messages and -getting help. Python provides the command :func:`help` in the pydoc -module. Entering this command with no arguments (i.e. ``>>> help`` ) -launches an interactive help session that allows searching through the -keywords and modules available to all of Python. Running the command -help with an object as the argument displays the calling signature, -and the documentation string of the object. - -The pydoc method of help is sophisticated but uses a pager to display -the text. Sometimes this can interfere with the terminal you are -running the interactive session within. A scipy-specific help system -is also available under the command ``sp.info``. The signature and -documentation string for the object passed to the ``help`` command are -printed to standard output (or to a writeable object passed as the -third argument). The second keyword argument of ``sp.info`` defines -the maximum width of the line for printing. If a module is passed as -the argument to help than a list of the functions and classes defined -in that module is printed. For example: - -.. literalinclude:: examples/1-1 - -Another useful command is :func:`source`. When given a function -written in Python as an argument, it prints out a listing of the -source code for that function. This can be helpful in learning about -an algorithm or understanding exactly what a function is doing with -its arguments. Also don't forget about the Python command ``dir`` -which can be used to look at the namespace of a module or package. diff --git a/scipy-0.10.1/doc/source/tutorial/index.rst b/scipy-0.10.1/doc/source/tutorial/index.rst deleted file mode 100644 index bc489bf2ce..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -************** -SciPy Tutorial -************** - -.. sectionauthor:: Travis E. Oliphant - -.. toctree:: - :maxdepth: 1 - - general - basic - special - integrate - optimize - interpolate - fftpack - signal - linalg - arpack - stats - ndimage - io - weave diff --git a/scipy-0.10.1/doc/source/tutorial/integrate.rst b/scipy-0.10.1/doc/source/tutorial/integrate.rst deleted file mode 100644 index 923e871401..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/integrate.rst +++ /dev/null @@ -1,280 +0,0 @@ -Integration (:mod:`scipy.integrate`) -==================================== - -.. sectionauthor:: Travis E. Oliphant - -.. currentmodule:: scipy.integrate - -The :mod:`scipy.integrate` sub-package provides several integration -techniques including an ordinary differential equation integrator. An -overview of the module is provided by the help command: - -.. literalinclude:: examples/4-1 - - -General integration (:func:`quad`) ----------------------------------- - -The function :obj:`quad` is provided to integrate a function of one -variable between two points. The points can be :math:`\pm\infty` -(:math:`\pm` ``inf``) to indicate infinite limits. For example, -suppose you wish to integrate a bessel function ``jv(2.5,x)`` along -the interval :math:`[0,4.5].` - -.. math:: - :nowrap: - - \[ I=\int_{0}^{4.5}J_{2.5}\left(x\right)\, dx.\] - -This could be computed using :obj:`quad`: - - >>> result = integrate.quad(lambda x: special.jv(2.5,x), 0, 4.5) - >>> print result - (1.1178179380783249, 7.8663172481899801e-09) - - >>> I = sqrt(2/pi)*(18.0/27*sqrt(2)*cos(4.5)-4.0/27*sqrt(2)*sin(4.5)+ - sqrt(2*pi)*special.fresnel(3/sqrt(pi))[0]) - >>> print I - 1.117817938088701 - - >>> print abs(result[0]-I) - 1.03761443881e-11 - -The first argument to quad is a "callable" Python object (*i.e* a -function, method, or class instance). Notice the use of a lambda- -function in this case as the argument. The next two arguments are the -limits of integration. The return value is a tuple, with the first -element holding the estimated value of the integral and the second -element holding an upper bound on the error. Notice, that in this -case, the true value of this integral is - -.. math:: - :nowrap: - - \[ I=\sqrt{\frac{2}{\pi}}\left(\frac{18}{27}\sqrt{2}\cos\left(4.5\right)-\frac{4}{27}\sqrt{2}\sin\left(4.5\right)+\sqrt{2\pi}\textrm{Si}\left(\frac{3}{\sqrt{\pi}}\right)\right),\] - -where - -.. math:: - :nowrap: - - \[ \textrm{Si}\left(x\right)=\int_{0}^{x}\sin\left(\frac{\pi}{2}t^{2}\right)\, dt.\] - -is the Fresnel sine integral. Note that the numerically-computed -integral is within :math:`1.04\times10^{-11}` of the exact result --- well below the reported error bound. - -Infinite inputs are also allowed in :obj:`quad` by using :math:`\pm` -``inf`` as one of the arguments. For example, suppose that a numerical -value for the exponential integral: - -.. math:: - :nowrap: - - \[ E_{n}\left(x\right)=\int_{1}^{\infty}\frac{e^{-xt}}{t^{n}}\, dt.\] - -is desired (and the fact that this integral can be computed as -``special.expn(n,x)`` is forgotten). The functionality of the function -:obj:`special.expn` can be replicated by defining a new function -:obj:`vec_expint` based on the routine :obj:`quad`: - - >>> from scipy.integrate import quad - >>> def integrand(t,n,x): - ... return exp(-x*t) / t**n - - >>> def expint(n,x): - ... return quad(integrand, 1, Inf, args=(n, x))[0] - - >>> vec_expint = vectorize(expint) - - >>> vec_expint(3,arange(1.0,4.0,0.5)) - array([ 0.1097, 0.0567, 0.0301, 0.0163, 0.0089, 0.0049]) - >>> special.expn(3,arange(1.0,4.0,0.5)) - array([ 0.1097, 0.0567, 0.0301, 0.0163, 0.0089, 0.0049]) - -The function which is integrated can even use the quad argument -(though the error bound may underestimate the error due to possible -numerical error in the integrand from the use of :obj:`quad` ). The integral in this case is - -.. math:: - :nowrap: - - \[ I_{n}=\int_{0}^{\infty}\int_{1}^{\infty}\frac{e^{-xt}}{t^{n}}\, dt\, dx=\frac{1}{n}.\] - ->>> result = quad(lambda x: expint(3, x), 0, inf) ->>> print result -(0.33333333324560266, 2.8548934485373678e-09) - ->>> I3 = 1.0/3.0 ->>> print I3 -0.333333333333 - ->>> print I3 - result[0] -8.77306560731e-11 - -This last example shows that multiple integration can be handled using -repeated calls to :func:`quad`. The mechanics of this for double and -triple integration have been wrapped up into the functions -:obj:`dblquad` and :obj:`tplquad`. The function, :obj:`dblquad` -performs double integration. Use the help function to be sure that the -arguments are defined in the correct order. In addition, the limits on -all inner integrals are actually functions which can be constant -functions. An example of using double integration to compute several -values of :math:`I_{n}` is shown below: - - >>> from scipy.integrate import quad, dblquad - >>> def I(n): - ... return dblquad(lambda t, x: exp(-x*t)/t**n, 0, Inf, lambda x: 1, lambda x: Inf) - - >>> print I(4) - (0.25000000000435768, 1.0518245707751597e-09) - >>> print I(3) - (0.33333333325010883, 2.8604069919261191e-09) - >>> print I(2) - (0.49999999999857514, 1.8855523253868967e-09) - - -Gaussian quadrature (integrate.gauss_quadtol) ---------------------------------------------- - -A few functions are also provided in order to perform simple Gaussian -quadrature over a fixed interval. The first is :obj:`fixed_quad` which -performs fixed-order Gaussian quadrature. The second function is -:obj:`quadrature` which performs Gaussian quadrature of multiple -orders until the difference in the integral estimate is beneath some -tolerance supplied by the user. These functions both use the module -:mod:`special.orthogonal` which can calculate the roots and quadrature -weights of a large variety of orthogonal polynomials (the polynomials -themselves are available as special functions returning instances of -the polynomial class --- e.g. :obj:`special.legendre `). - - -Integrating using samples -------------------------- - -There are three functions for computing integrals given only samples: -:obj:`trapz` , :obj:`simps`, and :obj:`romb` . The first two -functions use Newton-Coates formulas of order 1 and 2 respectively to -perform integration. These two functions can handle, -non-equally-spaced samples. The trapezoidal rule approximates the -function as a straight line between adjacent points, while Simpson's -rule approximates the function between three adjacent points as a -parabola. - -If the samples are equally-spaced and the number of samples available -is :math:`2^{k}+1` for some integer :math:`k`, then Romberg -integration can be used to obtain high-precision estimates of the -integral using the available samples. Romberg integration uses the -trapezoid rule at step-sizes related by a power of two and then -performs Richardson extrapolation on these estimates to approximate -the integral with a higher-degree of accuracy. (A different interface -to Romberg integration useful when the function can be provided is -also available as :func:`romberg`). - - -Ordinary differential equations (:func:`odeint`) ------------------------------------------------- - -Integrating a set of ordinary differential equations (ODEs) given -initial conditions is another useful example. The function -:obj:`odeint` is available in SciPy for integrating a first-order -vector differential equation: - -.. math:: - :nowrap: - - \[ \frac{d\mathbf{y}}{dt}=\mathbf{f}\left(\mathbf{y},t\right),\] - -given initial conditions :math:`\mathbf{y}\left(0\right)=y_{0}`, where -:math:`\mathbf{y}` is a length :math:`N` vector and :math:`\mathbf{f}` -is a mapping from :math:`\mathcal{R}^{N}` to :math:`\mathcal{R}^{N}.` -A higher-order ordinary differential equation can always be reduced to -a differential equation of this type by introducing intermediate -derivatives into the :math:`\mathbf{y}` vector. - -For example suppose it is desired to find the solution to the -following second-order differential equation: - -.. math:: - :nowrap: - - \[ \frac{d^{2}w}{dz^{2}}-zw(z)=0\] - -with initial conditions :math:`w\left(0\right)=\frac{1}{\sqrt[3]{3^{2}}\Gamma\left(\frac{2}{3}\right)}` and :math:`\left.\frac{dw}{dz}\right|_{z=0}=-\frac{1}{\sqrt[3]{3}\Gamma\left(\frac{1}{3}\right)}.` It is known that the solution to this differential equation with these -boundary conditions is the Airy function - -.. math:: - :nowrap: - - \[ w=\textrm{Ai}\left(z\right),\] - -which gives a means to check the integrator using :func:`special.airy `. - -First, convert this ODE into standard form by setting -:math:`\mathbf{y}=\left[\frac{dw}{dz},w\right]` and :math:`t=z`. Thus, -the differential equation becomes - -.. math:: - :nowrap: - - \[ \frac{d\mathbf{y}}{dt}=\left[\begin{array}{c} ty_{1}\\ y_{0}\end{array}\right]=\left[\begin{array}{cc} 0 & t\\ 1 & 0\end{array}\right]\left[\begin{array}{c} y_{0}\\ y_{1}\end{array}\right]=\left[\begin{array}{cc} 0 & t\\ 1 & 0\end{array}\right]\mathbf{y}.\] - -In other words, - -.. math:: - :nowrap: - - \[ \mathbf{f}\left(\mathbf{y},t\right)=\mathbf{A}\left(t\right)\mathbf{y}.\] - -As an interesting reminder, if :math:`\mathbf{A}\left(t\right)` -commutes with :math:`\int_{0}^{t}\mathbf{A}\left(\tau\right)\, d\tau` -under matrix multiplication, then this linear differential equation -has an exact solution using the matrix exponential: - -.. math:: - :nowrap: - - \[ \mathbf{y}\left(t\right)=\exp\left(\int_{0}^{t}\mathbf{A}\left(\tau\right)d\tau\right)\mathbf{y}\left(0\right),\] - -However, in this case, :math:`\mathbf{A}\left(t\right)` and its integral do not commute. - -There are many optional inputs and outputs available when using odeint -which can help tune the solver. These additional inputs and outputs -are not needed much of the time, however, and the three required input -arguments and the output solution suffice. The required inputs are the -function defining the derivative, *fprime*, the initial conditions -vector, *y0*, and the time points to obtain a solution, *t*, (with -the initial value point as the first element of this sequence). The -output to :obj:`odeint` is a matrix where each row contains the -solution vector at each requested time point (thus, the initial -conditions are given in the first output row). - -The following example illustrates the use of odeint including the -usage of the *Dfun* option which allows the user to specify a gradient -(with respect to :math:`\mathbf{y}` ) of the function, -:math:`\mathbf{f}\left(\mathbf{y},t\right)`. - - >>> from scipy.integrate import odeint - >>> from scipy.special import gamma, airy - >>> y1_0 = 1.0/3**(2.0/3.0)/gamma(2.0/3.0) - >>> y0_0 = -1.0/3**(1.0/3.0)/gamma(1.0/3.0) - >>> y0 = [y0_0, y1_0] - >>> def func(y, t): - ... return [t*y[1],y[0]] - - >>> def gradient(y,t): - ... return [[0,t],[1,0]] - - >>> x = arange(0,4.0, 0.01) - >>> t = x - >>> ychk = airy(x)[0] - >>> y = odeint(func, y0, t) - >>> y2 = odeint(func, y0, t, Dfun=gradient) - - >>> print ychk[:36:6] - [ 0.355028 0.339511 0.324068 0.308763 0.293658 0.278806] - - >>> print y[:36:6,1] - [ 0.355028 0.339511 0.324067 0.308763 0.293658 0.278806] - - >>> print y2[:36:6,1] - [ 0.355028 0.339511 0.324067 0.308763 0.293658 0.278806] diff --git a/scipy-0.10.1/doc/source/tutorial/interpolate.rst b/scipy-0.10.1/doc/source/tutorial/interpolate.rst deleted file mode 100644 index 2eee3bfa5f..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/interpolate.rst +++ /dev/null @@ -1,469 +0,0 @@ -======================================== -Interpolation (:mod:`scipy.interpolate`) -======================================== - -.. sectionauthor:: Travis E. Oliphant - -.. sectionauthor:: Pauli Virtanen - -.. currentmodule:: scipy.interpolate - -.. contents:: - -There are several general interpolation facilities available in SciPy, -for data in 1, 2, and higher dimensions: - -- A class representing an interpolant (:class:`interp1d`) in 1-D, - offering several interpolation methods. - -- Convenience function :func:`griddata` offering a simple interface to - interpolation in N dimensions (N = 1, 2, 3, 4, ...). - Object-oriented interface for the underlying routines is also - available. - -- Functions for 1- and 2-dimensional (smoothed) cubic-spline - interpolation, based on the FORTRAN library FITPACK. There are both - procedural and object-oriented interfaces for the FITPACK library. - -- Interpolation using Radial Basis Functions. - - -1-D interpolation (:class:`interp1d`) -===================================== - -The interp1d class in scipy.interpolate is a convenient method to -create a function based on fixed data points which can be evaluated -anywhere within the domain defined by the given data using linear -interpolation. An instance of this class is created by passing the 1-d -vectors comprising the data. The instance of this class defines a -__call__ method and can therefore by treated like a function which -interpolates between known data values to obtain unknown values (it -also has a docstring for help). Behavior at the boundary can be -specified at instantiation time. The following example demonstrates -its use, for linear and cubic spline interpolation: - -.. plot:: - - >>> from scipy.interpolate import interp1d - - >>> x = np.linspace(0, 10, 10) - >>> y = np.exp(-x/3.0) - >>> f = interp1d(x, y) - >>> f2 = interp1d(x, y, kind='cubic') - - >>> xnew = np.linspace(0, 10, 40) - >>> import matplotlib.pyplot as plt - >>> plt.plot(x,y,'o',xnew,f(xnew),'-', xnew, f2(xnew),'--') - >>> plt.legend(['data', 'linear', 'cubic'], loc='best') - >>> plt.show() - -.. :caption: One-dimensional interpolation using the -.. class :obj:`interpolate.interp1d` - - -Multivariate data interpolation (:func:`griddata`) -================================================== - -Suppose you have multidimensional data, for instance for an underlying -function *f(x, y)* you only know the values at points *(x[i], y[i])* -that do not form a regular grid. - -.. plot:: - - Suppose we want to interpolate the 2-D function - - >>> def func(x, y): - >>> return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2 - - on a grid in [0, 1]x[0, 1] - - >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] - - but we only know its values at 1000 data points: - - >>> points = np.random.rand(1000, 2) - >>> values = func(points[:,0], points[:,1]) - - This can be done with `griddata` -- below we try out all of the - interpolation methods: - - >>> from scipy.interpolate import griddata - >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest') - >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear') - >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic') - - One can see that the exact result is reproduced by all of the - methods to some degree, but for this smooth function the piecewise - cubic interpolant gives the best results: - - >>> import matplotlib.pyplot as plt - >>> plt.subplot(221) - >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower') - >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1) - >>> plt.title('Original') - >>> plt.subplot(222) - >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower') - >>> plt.title('Nearest') - >>> plt.subplot(223) - >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower') - >>> plt.title('Linear') - >>> plt.subplot(224) - >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower') - >>> plt.title('Cubic') - >>> plt.gcf().set_size_inches(6, 6) - >>> plt.show() - - -Spline interpolation -==================== - -Spline interpolation in 1-d: Procedural (interpolate.splXXX) ------------------------------------------------------------- - -Spline interpolation requires two essential steps: (1) a spline -representation of the curve is computed, and (2) the spline is -evaluated at the desired points. In order to find the spline -representation, there are two different ways to represent a curve and -obtain (smoothing) spline coefficients: directly and parametrically. -The direct method finds the spline representation of a curve in a two- -dimensional plane using the function :obj:`splrep`. The -first two arguments are the only ones required, and these provide the -:math:`x` and :math:`y` components of the curve. The normal output is -a 3-tuple, :math:`\left(t,c,k\right)` , containing the knot-points, -:math:`t` , the coefficients :math:`c` and the order :math:`k` of the -spline. The default spline order is cubic, but this can be changed -with the input keyword, *k.* - -For curves in :math:`N` -dimensional space the function -:obj:`splprep` allows defining the curve -parametrically. For this function only 1 input argument is -required. This input is a list of :math:`N` -arrays representing the -curve in :math:`N` -dimensional space. The length of each array is the -number of curve points, and each array provides one component of the -:math:`N` -dimensional data point. The parameter variable is given -with the keword argument, *u,* which defaults to an equally-spaced -monotonic sequence between :math:`0` and :math:`1` . The default -output consists of two objects: a 3-tuple, :math:`\left(t,c,k\right)` -, containing the spline representation and the parameter variable -:math:`u.` - -The keyword argument, *s* , is used to specify the amount of smoothing -to perform during the spline fit. The default value of :math:`s` is -:math:`s=m-\sqrt{2m}` where :math:`m` is the number of data-points -being fit. Therefore, **if no smoothing is desired a value of** -:math:`\mathbf{s}=0` **should be passed to the routines.** - -Once the spline representation of the data has been determined, -functions are available for evaluating the spline -(:func:`splev`) and its derivatives -(:func:`splev`, :func:`spalde`) at any point -and the integral of the spline between any two points ( -:func:`splint`). In addition, for cubic splines ( :math:`k=3` -) with 8 or more knots, the roots of the spline can be estimated ( -:func:`sproot`). These functions are demonstrated in the -example that follows. - -.. plot:: - - >>> import numpy as np - >>> import matplotlib.pyplot as plt - >>> from scipy import interpolate - - Cubic-spline - - >>> x = np.arange(0,2*np.pi+np.pi/4,2*np.pi/8) - >>> y = np.sin(x) - >>> tck = interpolate.splrep(x,y,s=0) - >>> xnew = np.arange(0,2*np.pi,np.pi/50) - >>> ynew = interpolate.splev(xnew,tck,der=0) - - >>> plt.figure() - >>> plt.plot(x,y,'x',xnew,ynew,xnew,np.sin(xnew),x,y,'b') - >>> plt.legend(['Linear','Cubic Spline', 'True']) - >>> plt.axis([-0.05,6.33,-1.05,1.05]) - >>> plt.title('Cubic-spline interpolation') - >>> plt.show() - - Derivative of spline - - >>> yder = interpolate.splev(xnew,tck,der=1) - >>> plt.figure() - >>> plt.plot(xnew,yder,xnew,np.cos(xnew),'--') - >>> plt.legend(['Cubic Spline', 'True']) - >>> plt.axis([-0.05,6.33,-1.05,1.05]) - >>> plt.title('Derivative estimation from spline') - >>> plt.show() - - Integral of spline - - >>> def integ(x,tck,constant=-1): - >>> x = np.atleast_1d(x) - >>> out = np.zeros(x.shape, dtype=x.dtype) - >>> for n in xrange(len(out)): - >>> out[n] = interpolate.splint(0,x[n],tck) - >>> out += constant - >>> return out - >>> - >>> yint = integ(xnew,tck) - >>> plt.figure() - >>> plt.plot(xnew,yint,xnew,-np.cos(xnew),'--') - >>> plt.legend(['Cubic Spline', 'True']) - >>> plt.axis([-0.05,6.33,-1.05,1.05]) - >>> plt.title('Integral estimation from spline') - >>> plt.show() - - Roots of spline - - >>> print interpolate.sproot(tck) - [ 0. 3.1416] - - Parametric spline - - >>> t = np.arange(0,1.1,.1) - >>> x = np.sin(2*np.pi*t) - >>> y = np.cos(2*np.pi*t) - >>> tck,u = interpolate.splprep([x,y],s=0) - >>> unew = np.arange(0,1.01,0.01) - >>> out = interpolate.splev(unew,tck) - >>> plt.figure() - >>> plt.plot(x,y,'x',out[0],out[1],np.sin(2*np.pi*unew),np.cos(2*np.pi*unew),x,y,'b') - >>> plt.legend(['Linear','Cubic Spline', 'True']) - >>> plt.axis([-1.05,1.05,-1.05,1.05]) - >>> plt.title('Spline of parametrically-defined curve') - >>> plt.show() - -Spline interpolation in 1-d: Object-oriented (:class:`UnivariateSpline`) ------------------------------------------------------------------------------ - -The spline-fitting capabilities described above are also available via -an objected-oriented interface. The one dimensional splines are -objects of the `UnivariateSpline` class, and are created with the -:math:`x` and :math:`y` components of the curve provided as arguments -to the constructor. The class defines __call__, allowing the object -to be called with the x-axis values at which the spline should be -evaluated, returning the interpolated y-values. This is shown in -the example below for the subclass `InterpolatedUnivariateSpline`. -The methods :meth:`integral `, -:meth:`derivatives `, and -:meth:`roots ` methods are also available -on `UnivariateSpline` objects, allowing definite integrals, -derivatives, and roots to be computed for the spline. - -The UnivariateSpline class can also be used to smooth data by -providing a non-zero value of the smoothing parameter `s`, with the -same meaning as the `s` keyword of the :obj:`splrep` function -described above. This results in a spline that has fewer knots -than the number of data points, and hence is no longer strictly -an interpolating spline, but rather a smoothing spline. If this -is not desired, the `InterpolatedUnivariateSpline` class is available. -It is a subclass of `UnivariateSpline` that always passes through all -points (equivalent to forcing the smoothing parameter to 0). This -class is demonstrated in the example below. - -The `LSQUnivarateSpline` is the other subclass of `UnivarateSpline`. -It allows the user to specify the number and location of internal -knots as explicitly with the parameter `t`. This allows creation -of customized splines with non-linear spacing, to interpolate in -some domains and smooth in others, or change the character of the -spline. - - -.. plot:: - - >>> import numpy as np - >>> import matplotlib.pyplot as plt - >>> from scipy import interpolate - - InterpolatedUnivariateSpline - - >>> x = np.arange(0,2*np.pi+np.pi/4,2*np.pi/8) - >>> y = np.sin(x) - >>> s = interpolate.InterpolatedUnivariateSpline(x,y) - >>> xnew = np.arange(0,2*np.pi,np.pi/50) - >>> ynew = s(xnew) - - >>> plt.figure() - >>> plt.plot(x,y,'x',xnew,ynew,xnew,np.sin(xnew),x,y,'b') - >>> plt.legend(['Linear','InterpolatedUnivariateSpline', 'True']) - >>> plt.axis([-0.05,6.33,-1.05,1.05]) - >>> plt.title('InterpolatedUnivariateSpline') - >>> plt.show() - - LSQUnivarateSpline with non-uniform knots - - >>> t = [np.pi/2-.1,np.pi/2+.1,3*np.pi/2-.1,3*np.pi/2+.1] - >>> s = interpolate.LSQUnivariateSpline(x,y,t,k=2) - >>> ynew = s(xnew) - - >>> plt.figure() - >>> plt.plot(x,y,'x',xnew,ynew,xnew,np.sin(xnew),x,y,'b') - >>> plt.legend(['Linear','LSQUnivariateSpline', 'True']) - >>> plt.axis([-0.05,6.33,-1.05,1.05]) - >>> plt.title('Spline with Specified Interior Knots') - >>> plt.show() - - -Two-dimensional spline representation: Procedural (:func:`bisplrep`) --------------------------------------------------------------------- - -For (smooth) spline-fitting to a two dimensional surface, the function -:func:`bisplrep` is available. This function takes as required inputs -the **1-D** arrays *x*, *y*, and *z* which represent points on the -surface :math:`z=f\left(x,y\right).` The default output is a list -:math:`\left[tx,ty,c,kx,ky\right]` whose entries represent -respectively, the components of the knot positions, the coefficients -of the spline, and the order of the spline in each coordinate. It is -convenient to hold this list in a single object, *tck,* so that it can -be passed easily to the function :obj:`bisplev`. The -keyword, *s* , can be used to change the amount of smoothing performed -on the data while determining the appropriate spline. The default -value is :math:`s=m-\sqrt{2m}` where :math:`m` is the number of data -points in the *x, y,* and *z* vectors. As a result, if no smoothing is -desired, then :math:`s=0` should be passed to -:obj:`bisplrep` . - -To evaluate the two-dimensional spline and it's partial derivatives -(up to the order of the spline), the function -:obj:`bisplev` is required. This function takes as the -first two arguments **two 1-D arrays** whose cross-product specifies -the domain over which to evaluate the spline. The third argument is -the *tck* list returned from :obj:`bisplrep`. If desired, -the fourth and fifth arguments provide the orders of the partial -derivative in the :math:`x` and :math:`y` direction respectively. - -It is important to note that two dimensional interpolation should not -be used to find the spline representation of images. The algorithm -used is not amenable to large numbers of input points. The signal -processing toolbox contains more appropriate algorithms for finding -the spline representation of an image. The two dimensional -interpolation commands are intended for use when interpolating a two -dimensional function as shown in the example that follows. This -example uses the :obj:`mgrid ` command in SciPy which is -useful for defining a "mesh-grid "in many dimensions. (See also the -:obj:`ogrid ` command if the full-mesh is not -needed). The number of output arguments and the number of dimensions -of each argument is determined by the number of indexing objects -passed in :obj:`mgrid `. - -.. plot:: - - >>> import numpy as np - >>> from scipy import interpolate - >>> import matplotlib.pyplot as plt - - Define function over sparse 20x20 grid - - >>> x,y = np.mgrid[-1:1:20j,-1:1:20j] - >>> z = (x+y)*np.exp(-6.0*(x*x+y*y)) - - >>> plt.figure() - >>> plt.pcolor(x,y,z) - >>> plt.colorbar() - >>> plt.title("Sparsely sampled function.") - >>> plt.show() - - Interpolate function over new 70x70 grid - - >>> xnew,ynew = np.mgrid[-1:1:70j,-1:1:70j] - >>> tck = interpolate.bisplrep(x,y,z,s=0) - >>> znew = interpolate.bisplev(xnew[:,0],ynew[0,:],tck) - - >>> plt.figure() - >>> plt.pcolor(xnew,ynew,znew) - >>> plt.colorbar() - >>> plt.title("Interpolated function.") - >>> plt.show() - -.. :caption: Example of two-dimensional spline interpolation. - - -Two-dimensional spline representation: Object-oriented (:class:`BivariateSpline`) ---------------------------------------------------------------------------------- - -The :class:`BivariateSpline` class is the 2-dimensional analog of the -:class:`UnivariateSpline` class. It and its subclasses implement -the FITPACK functions described above in an object oriented fashion, -allowing objects to be instantiated that can be called to compute -the spline value by passing in the two coordinates as the two -arguments. - - -Using radial basis functions for smoothing/interpolation -======================================================== - -Radial basis functions can be used for smoothing/interpolating scattered -data in n-dimensions, but should be used with caution for extrapolation -outside of the observed data range. - -1-d Example ------------ - -This example compares the usage of the Rbf and UnivariateSpline classes -from the scipy.interpolate module. - -.. plot:: - - >>> import numpy as np - >>> from scipy.interpolate import Rbf, InterpolatedUnivariateSpline - >>> import matplotlib.pyplot as plt - - >>> # setup data - >>> x = np.linspace(0, 10, 9) - >>> y = np.sin(x) - >>> xi = np.linspace(0, 10, 101) - - >>> # use fitpack2 method - >>> ius = InterpolatedUnivariateSpline(x, y) - >>> yi = ius(xi) - - >>> plt.subplot(2, 1, 1) - >>> plt.plot(x, y, 'bo') - >>> plt.plot(xi, yi, 'g') - >>> plt.plot(xi, np.sin(xi), 'r') - >>> plt.title('Interpolation using univariate spline') - - >>> # use RBF method - >>> rbf = Rbf(x, y) - >>> fi = rbf(xi) - - >>> plt.subplot(2, 1, 2) - >>> plt.plot(x, y, 'bo') - >>> plt.plot(xi, fi, 'g') - >>> plt.plot(xi, np.sin(xi), 'r') - >>> plt.title('Interpolation using RBF - multiquadrics') - >>> plt.show() - -.. :caption: Example of one-dimensional RBF interpolation. - -2-d Example ------------ - -This example shows how to interpolate scattered 2d data. - -.. plot:: - - >>> import numpy as np - >>> from scipy.interpolate import Rbf - >>> import matplotlib.pyplot as plt - >>> from matplotlib import cm - - >>> # 2-d tests - setup scattered data - >>> x = np.random.rand(100)*4.0-2.0 - >>> y = np.random.rand(100)*4.0-2.0 - >>> z = x*np.exp(-x**2-y**2) - >>> ti = np.linspace(-2.0, 2.0, 100) - >>> XI, YI = np.meshgrid(ti, ti) - - >>> # use RBF - >>> rbf = Rbf(x, y, z, epsilon=2) - >>> ZI = rbf(XI, YI) - - >>> # plot the result - >>> n = plt.normalize(-2., 2.) - >>> plt.subplot(1, 1, 1) - >>> plt.pcolor(XI, YI, ZI, cmap=cm.jet) - >>> plt.scatter(x, y, 100, z, cmap=cm.jet) - >>> plt.title('RBF interpolation - multiquadrics') - >>> plt.xlim(-2, 2) - >>> plt.ylim(-2, 2) - >>> plt.colorbar() diff --git a/scipy-0.10.1/doc/source/tutorial/io.rst b/scipy-0.10.1/doc/source/tutorial/io.rst deleted file mode 100644 index 8c9f5be2f1..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/io.rst +++ /dev/null @@ -1,387 +0,0 @@ -File IO (:mod:`scipy.io`) -========================= - -.. sectionauthor:: Matthew Brett - -.. currentmodule:: scipy.io - -.. seealso:: :ref:`numpy-reference.routines.io` (in numpy) - -MATLAB files ------------- - -.. autosummary:: - :toctree: generated/ - - loadmat - savemat - -Getting started: - - >>> import scipy.io as sio - -If you are using IPython, try tab completing on ``sio``. You'll find:: - - sio.loadmat - sio.savemat - -These are the high-level functions you will most likely use. You'll -also find:: - - sio.matlab - -This is the package from which ``loadmat`` and ``savemat`` are imported. -Within ``sio.matlab``, you will find the ``mio`` module - containing -the machinery that ``loadmat`` and ``savemat`` use. From time to time -you may find yourself re-using this machinery. - -How do I start? -``````````````` - -You may have a ``.mat`` file that you want to read into Scipy. Or, you -want to pass some variables from Scipy / Numpy into MATLAB. - -To save us using a MATLAB license, let's start in Octave_. Octave has -MATLAB-compatible save / load functions. Start Octave (``octave`` at -the command line for me): - -.. sourcecode:: octave - - octave:1> a = 1:12 - a = - - 1 2 3 4 5 6 7 8 9 10 11 12 - - octave:2> a = reshape(a, [1 3 4]) - a = - - ans(:,:,1) = - - 1 2 3 - - ans(:,:,2) = - - 4 5 6 - - ans(:,:,3) = - - 7 8 9 - - ans(:,:,4) = - - 10 11 12 - - - - octave:3> save -6 octave_a.mat a % MATLAB 6 compatible - octave:4> ls octave_a.mat - octave_a.mat - -Now, to Python: - - >>> mat_contents = sio.loadmat('octave_a.mat') - >>> print mat_contents - {'a': array([[[ 1., 4., 7., 10.], - [ 2., 5., 8., 11.], - [ 3., 6., 9., 12.]]]), - '__version__': '1.0', - '__header__': 'MATLAB 5.0 MAT-file, written by - Octave 3.2.3, 2010-05-30 02:13:40 UTC', - '__globals__': []} - >>> oct_a = mat_contents['a'] - >>> print oct_a - [[[ 1. 4. 7. 10.] - [ 2. 5. 8. 11.] - [ 3. 6. 9. 12.]]] - >>> print oct_a.shape - (1, 3, 4) - -Now let's try the other way round: - - >>> import numpy as np - >>> vect = np.arange(10) - >>> print vect.shape - (10,) - >>> sio.savemat('np_vector.mat', {'vect':vect}) - /Users/mb312/usr/local/lib/python2.6/site-packages/scipy/io/matlab/mio.py:196: FutureWarning: Using oned_as default value ('column') This will change to 'row' in future versions - oned_as=oned_as) - -Then back to Octave: - -.. sourcecode:: octave - - octave:5> load np_vector.mat - octave:6> vect - vect = - - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - - octave:7> size(vect) - ans = - - 10 1 - -Note the deprecation warning. The ``oned_as`` keyword determines the way in -which one-dimensional vectors are stored. In the future, this will default -to ``row`` instead of ``column``: - - >>> sio.savemat('np_vector.mat', {'vect':vect}, oned_as='row') - -We can load this in Octave or MATLAB: - -.. sourcecode:: octave - - octave:8> load np_vector.mat - octave:9> vect - vect = - - 0 1 2 3 4 5 6 7 8 9 - - octave:10> size(vect) - ans = - - 1 10 - - -MATLAB structs -`````````````` - -MATLAB structs are a little bit like Python dicts, except the field -names must be strings. Any MATLAB object can be a value of a field. As -for all objects in MATLAB, structs are in fact arrays of structs, where -a single struct is an array of shape (1, 1). - -.. sourcecode:: octave - - octave:11> my_struct = struct('field1', 1, 'field2', 2) - my_struct = - { - field1 = 1 - field2 = 2 - } - - octave:12> save -6 octave_struct.mat my_struct - -We can load this in Python: - - >>> mat_contents = sio.loadmat('octave_struct.mat') - >>> print mat_contents - {'my_struct': array([[([[1.0]], [[2.0]])]], - dtype=[('field1', '|O8'), ('field2', '|O8')]), '__version__': '1.0', '__header__': 'MATLAB 5.0 MAT-file, written by Octave 3.2.3, 2010-05-30 02:00:26 UTC', '__globals__': []} - >>> oct_struct = mat_contents['my_struct'] - >>> print oct_struct.shape - (1, 1) - >>> val = oct_struct[0,0] - >>> print val - ([[1.0]], [[2.0]]) - >>> print val['field1'] - [[ 1.]] - >>> print val['field2'] - [[ 2.]] - >>> print val.dtype - [('field1', '|O8'), ('field2', '|O8')] - -In this version of Scipy (0.8.0), MATLAB structs come back as numpy -structured arrays, with fields named for the struct fields. You can see -the field names in the ``dtype`` output above. Note also: - - >>> val = oct_struct[0,0] - -and: - -.. sourcecode:: octave - - octave:13> size(my_struct) - ans = - - 1 1 - -So, in MATLAB, the struct array must be at least 2D, and we replicate -that when we read into Scipy. If you want all length 1 dimensions -squeezed out, try this: - - >>> mat_contents = sio.loadmat('octave_struct.mat', squeeze_me=True) - >>> oct_struct = mat_contents['my_struct'] - >>> oct_struct.shape - () - -Sometimes, it's more convenient to load the MATLAB structs as python -objects rather than numpy structured arrarys - it can make the access -syntax in python a bit more similar to that in MATLAB. In order to do -this, use the ``struct_as_record=False`` parameter to ``loadmat``. - - >>> mat_contents = sio.loadmat('octave_struct.mat', struct_as_record=False) - >>> oct_struct = mat_contents['my_struct'] - >>> oct_struct[0,0].field1 - array([[ 1.]]) - -``struct_as_record=False`` works nicely with ``squeeze_me``: - - >>> mat_contents = sio.loadmat('octave_struct.mat', struct_as_record=False, squeeze_me=True) - >>> oct_struct = mat_contents['my_struct'] - >>> oct_struct.shape # but no - it's a scalar - Traceback (most recent call last): - File "", line 1, in - AttributeError: 'mat_struct' object has no attribute 'shape' - >>> print type(oct_struct) - - >>> print oct_struct.field1 - 1.0 - -Saving struct arrays can be done in various ways. One simple method is -to use dicts: - - >>> a_dict = {'field1': 0.5, 'field2': 'a string'} - >>> sio.savemat('saved_struct.mat', {'a_dict': a_dict}) - -loaded as: - -.. sourcecode:: octave - - octave:21> load saved_struct - octave:22> a_dict - a_dict = - { - field2 = a string - field1 = 0.50000 - } - -You can also save structs back again to MATLAB (or Octave in our case) -like this: - - >>> dt = [('f1', 'f8'), ('f2', 'S10')] - >>> arr = np.zeros((2,), dtype=dt) - >>> print arr - [(0.0, '') (0.0, '')] - >>> arr[0]['f1'] = 0.5 - >>> arr[0]['f2'] = 'python' - >>> arr[1]['f1'] = 99 - >>> arr[1]['f2'] = 'not perl' - >>> sio.savemat('np_struct_arr.mat', {'arr': arr}) - -MATLAB cell arrays -`````````````````` - -Cell arrays in MATLAB are rather like python lists, in the sense that -the elements in the arrays can contain any type of MATLAB object. In -fact they are most similar to numpy object arrays, and that is how we -load them into numpy. - -.. sourcecode:: octave - - octave:14> my_cells = {1, [2, 3]} - my_cells = - - { - [1,1] = 1 - [1,2] = - - 2 3 - - } - - octave:15> save -6 octave_cells.mat my_cells - -Back to Python: - - >>> mat_contents = sio.loadmat('octave_cells.mat') - >>> oct_cells = mat_contents['my_cells'] - >>> print oct_cells.dtype - object - >>> val = oct_cells[0,0] - >>> print val - [[ 1.]] - >>> print val.dtype - float64 - -Saving to a MATLAB cell array just involves making a numpy object array: - - >>> obj_arr = np.zeros((2,), dtype=np.object) - >>> obj_arr[0] = 1 - >>> obj_arr[1] = 'a string' - >>> print obj_arr - [1 a string] - >>> sio.savemat('np_cells.mat', {'obj_arr':obj_arr}) - -.. sourcecode:: octave - - octave:16> load np_cells.mat - octave:17> obj_arr - obj_arr = - - { - [1,1] = 1 - [2,1] = a string - } - -IDL files ---------- - -.. autosummary:: - :toctree: generated/ - - readsav - -Matrix Market files -------------------- - -.. autosummary:: - :toctree: generated/ - - mminfo - mmread - mmwrite - -Other ------ - -.. autosummary:: - :toctree: generated/ - - save_as_module - -Wav sound files (:mod:`scipy.io.wavfile`) ------------------------------------------ - -.. module:: scipy.io.wavfile - -.. autosummary:: - :toctree: generated/ - - read - write - -Arff files (:mod:`scipy.io.arff`) ---------------------------------- - -.. automodule:: scipy.io.arff - -.. autosummary:: - :toctree: generated/ - - loadarff - -Netcdf (:mod:`scipy.io.netcdf`) -------------------------------- - -.. module:: scipy.io.netcdf - -.. autosummary:: - :toctree: generated/ - - netcdf_file - -Allows reading of NetCDF files (version of pupynere_ package) - -.. _pupynere: http://pypi.python.org/pypi/pupynere/ -.. _octave: http://www.gnu.org/software/octave -.. _matlab: http://www.mathworks.com/ diff --git a/scipy-0.10.1/doc/source/tutorial/linalg.rst b/scipy-0.10.1/doc/source/tutorial/linalg.rst deleted file mode 100644 index 182d85ef38..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/linalg.rst +++ /dev/null @@ -1,858 +0,0 @@ -Linear Algebra (`scipy.linalg`) -=============================== - -.. sectionauthor:: Travis E. Oliphant - -.. currentmodule: scipy - -When SciPy is built using the optimized ATLAS LAPACK and BLAS -libraries, it has very fast linear algebra capabilities. If you dig -deep enough, all of the raw lapack and blas libraries are available -for your use for even more speed. In this section, some easier-to-use -interfaces to these routines are described. - -All of these linear algebra routines expect an object that can be -converted into a 2-dimensional array. The output of these routines is -also a two-dimensional array. There is a matrix class defined in -Numpy, which you can initialize with an appropriate Numpy array in -order to get objects for which multiplication is matrix-multiplication -instead of the default, element-by-element multiplication. - - -Matrix Class ------------- - -The matrix class is initialized with the SciPy command :obj:`mat` -which is just convenient short-hand for :class:`matrix -`. If you are going to be doing a lot of matrix-math, it -is convenient to convert arrays into matrices using this command. One -advantage of using the :func:`mat` command is that you can enter -two-dimensional matrices using MATLAB-like syntax with commas or -spaces separating columns and semicolons separting rows as long as the -matrix is placed in a string passed to :obj:`mat` . - - -Basic routines --------------- - - -Finding Inverse -^^^^^^^^^^^^^^^ - -The inverse of a matrix :math:`\mathbf{A}` is the matrix -:math:`\mathbf{B}` such that :math:`\mathbf{AB}=\mathbf{I}` where -:math:`\mathbf{I}` is the identity matrix consisting of ones down the -main diagonal. Usually :math:`\mathbf{B}` is denoted -:math:`\mathbf{B}=\mathbf{A}^{-1}` . In SciPy, the matrix inverse of -the Numpy array, A, is obtained using :obj:`linalg.inv` ``(A)`` , or -using ``A.I`` if ``A`` is a Matrix. For example, let - -.. math:: - :nowrap: - - \[ \mathbf{A=}\left[\begin{array}{ccc} 1 & 3 & 5\\ 2 & 5 & 1\\ 2 & 3 & 8\end{array}\right]\] - -then - -.. math:: - :nowrap: - - \[ \mathbf{A^{-1}=\frac{1}{25}\left[\begin{array}{ccc} -37 & 9 & 22\\ 14 & 2 & -9\\ 4 & -3 & 1\end{array}\right]=\left[\begin{array}{ccc} -1.48 & 0.36 & 0.88\\ 0.56 & 0.08 & -0.36\\ 0.16 & -0.12 & 0.04\end{array}\right].}\] - -The following example demonstrates this computation in SciPy - - >>> A = mat('[1 3 5; 2 5 1; 2 3 8]') - >>> A - matrix([[1, 3, 5], - [2, 5, 1], - [2, 3, 8]]) - >>> A.I - matrix([[-1.48, 0.36, 0.88], - [ 0.56, 0.08, -0.36], - [ 0.16, -0.12, 0.04]]) - >>> from scipy import linalg - >>> linalg.inv(A) - array([[-1.48, 0.36, 0.88], - [ 0.56, 0.08, -0.36], - [ 0.16, -0.12, 0.04]]) - -Solving linear system -^^^^^^^^^^^^^^^^^^^^^ - -Solving linear systems of equations is straightforward using the scipy -command :obj:`linalg.solve`. This command expects an input matrix and -a right-hand-side vector. The solution vector is then computed. An -option for entering a symmetrix matrix is offered which can speed up -the processing when applicable. As an example, suppose it is desired -to solve the following simultaneous equations: - -.. math:: - :nowrap: - - \begin{eqnarray*} x+3y+5z & = & 10\\ 2x+5y+z & = & 8\\ 2x+3y+8z & = & 3\end{eqnarray*} - -We could find the solution vector using a matrix inverse: - -.. math:: - :nowrap: - - \[ \left[\begin{array}{c} x\\ y\\ z\end{array}\right]=\left[\begin{array}{ccc} 1 & 3 & 5\\ 2 & 5 & 1\\ 2 & 3 & 8\end{array}\right]^{-1}\left[\begin{array}{c} 10\\ 8\\ 3\end{array}\right]=\frac{1}{25}\left[\begin{array}{c} -232\\ 129\\ 19\end{array}\right]=\left[\begin{array}{c} -9.28\\ 5.16\\ 0.76\end{array}\right].\] - -However, it is better to use the linalg.solve command which can be -faster and more numerically stable. In this case it however gives the -same answer as shown in the following example: - - >>> A = mat('[1 3 5; 2 5 1; 2 3 8]') - >>> b = mat('[10;8;3]') - >>> A.I*b - matrix([[-9.28], - [ 5.16], - [ 0.76]]) - >>> linalg.solve(A,b) - array([[-9.28], - [ 5.16], - [ 0.76]]) - - -Finding Determinant -^^^^^^^^^^^^^^^^^^^ - -The determinant of a square matrix :math:`\mathbf{A}` is often denoted -:math:`\left|\mathbf{A}\right|` and is a quantity often used in linear -algebra. Suppose :math:`a_{ij}` are the elements of the matrix -:math:`\mathbf{A}` and let :math:`M_{ij}=\left|\mathbf{A}_{ij}\right|` -be the determinant of the matrix left by removing the -:math:`i^{\textrm{th}}` row and :math:`j^{\textrm{th}}` column from -:math:`\mathbf{A}` . Then for any row :math:`i,` - -.. math:: - :nowrap: - - \[ \left|\mathbf{A}\right|=\sum_{j}\left(-1\right)^{i+j}a_{ij}M_{ij}.\] - -This is a recursive way to define the determinant where the base case -is defined by accepting that the determinant of a :math:`1\times1` matrix is the only matrix element. In SciPy the determinant can be -calculated with :obj:`linalg.det` . For example, the determinant of - -.. math:: - :nowrap: - - \[ \mathbf{A=}\left[\begin{array}{ccc} 1 & 3 & 5\\ 2 & 5 & 1\\ 2 & 3 & 8\end{array}\right]\] - -is - -.. math:: - :nowrap: - - \begin{eqnarray*} \left|\mathbf{A}\right| & = & 1\left|\begin{array}{cc} 5 & 1\\ 3 & 8\end{array}\right|-3\left|\begin{array}{cc} 2 & 1\\ 2 & 8\end{array}\right|+5\left|\begin{array}{cc} 2 & 5\\ 2 & 3\end{array}\right|\\ & = & 1\left(5\cdot8-3\cdot1\right)-3\left(2\cdot8-2\cdot1\right)+5\left(2\cdot3-2\cdot5\right)=-25.\end{eqnarray*} - -In SciPy this is computed as shown in this example: - - >>> A = mat('[1 3 5; 2 5 1; 2 3 8]') - >>> linalg.det(A) - -25.000000000000004 - - -Computing norms -^^^^^^^^^^^^^^^ - -Matrix and vector norms can also be computed with SciPy. A wide range -of norm definitions are available using different parameters to the -order argument of :obj:`linalg.norm` . This function takes a rank-1 -(vectors) or a rank-2 (matrices) array and an optional order argument -(default is 2). Based on these inputs a vector or matrix norm of the -requested order is computed. - -For vector *x* , the order parameter can be any real number including -``inf`` or ``-inf``. The computed norm is - -.. math:: - :nowrap: - - \[ \left\Vert \mathbf{x}\right\Vert =\left\{ \begin{array}{cc} \max\left|x_{i}\right| & \textrm{ord}=\textrm{inf}\\ \min\left|x_{i}\right| & \textrm{ord}=-\textrm{inf}\\ \left(\sum_{i}\left|x_{i}\right|^{\textrm{ord}}\right)^{1/\textrm{ord}} & \left|\textrm{ord}\right|<\infty.\end{array}\right.\] - - - -For matrix :math:`\mathbf{A}` the only valid values for norm are :math:`\pm2,\pm1,` :math:`\pm` inf, and 'fro' (or 'f') Thus, - -.. math:: - :nowrap: - - \[ \left\Vert \mathbf{A}\right\Vert =\left\{ \begin{array}{cc} \max_{i}\sum_{j}\left|a_{ij}\right| & \textrm{ord}=\textrm{inf}\\ \min_{i}\sum_{j}\left|a_{ij}\right| & \textrm{ord}=-\textrm{inf}\\ \max_{j}\sum_{i}\left|a_{ij}\right| & \textrm{ord}=1\\ \min_{j}\sum_{i}\left|a_{ij}\right| & \textrm{ord}=-1\\ \max\sigma_{i} & \textrm{ord}=2\\ \min\sigma_{i} & \textrm{ord}=-2\\ \sqrt{\textrm{trace}\left(\mathbf{A}^{H}\mathbf{A}\right)} & \textrm{ord}=\textrm{'fro'}\end{array}\right.\] - -where :math:`\sigma_{i}` are the singular values of :math:`\mathbf{A}` . - - -Solving linear least-squares problems and pseudo-inverses -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Linear least-squares problems occur in many branches of applied -mathematics. In this problem a set of linear scaling coefficients is -sought that allow a model to fit data. In particular it is assumed -that data :math:`y_{i}` is related to data :math:`\mathbf{x}_{i}` -through a set of coefficients :math:`c_{j}` and model functions -:math:`f_{j}\left(\mathbf{x}_{i}\right)` via the model - -.. math:: - :nowrap: - - \[ y_{i}=\sum_{j}c_{j}f_{j}\left(\mathbf{x}_{i}\right)+\epsilon_{i}\] - -where :math:`\epsilon_{i}` represents uncertainty in the data. The -strategy of least squares is to pick the coefficients :math:`c_{j}` to -minimize - -.. math:: - :nowrap: - - \[ J\left(\mathbf{c}\right)=\sum_{i}\left|y_{i}-\sum_{j}c_{j}f_{j}\left(x_{i}\right)\right|^{2}.\] - - - -Theoretically, a global minimum will occur when - -.. math:: - :nowrap: - - \[ \frac{\partial J}{\partial c_{n}^{*}}=0=\sum_{i}\left(y_{i}-\sum_{j}c_{j}f_{j}\left(x_{i}\right)\right)\left(-f_{n}^{*}\left(x_{i}\right)\right)\] - -or - -.. math:: - :nowrap: - - \begin{eqnarray*} \sum_{j}c_{j}\sum_{i}f_{j}\left(x_{i}\right)f_{n}^{*}\left(x_{i}\right) & = & \sum_{i}y_{i}f_{n}^{*}\left(x_{i}\right)\\ \mathbf{A}^{H}\mathbf{Ac} & = & \mathbf{A}^{H}\mathbf{y}\end{eqnarray*} - -where - -.. math:: - :nowrap: - - \[ \left\{ \mathbf{A}\right\} _{ij}=f_{j}\left(x_{i}\right).\] - -When :math:`\mathbf{A^{H}A}` is invertible, then - -.. math:: - :nowrap: - - \[ \mathbf{c}=\left(\mathbf{A}^{H}\mathbf{A}\right)^{-1}\mathbf{A}^{H}\mathbf{y}=\mathbf{A}^{\dagger}\mathbf{y}\] - -where :math:`\mathbf{A}^{\dagger}` is called the pseudo-inverse of -:math:`\mathbf{A}.` Notice that using this definition of -:math:`\mathbf{A}` the model can be written - -.. math:: - :nowrap: - - \[ \mathbf{y}=\mathbf{Ac}+\boldsymbol{\epsilon}.\] - -The command :obj:`linalg.lstsq` will solve the linear least squares -problem for :math:`\mathbf{c}` given :math:`\mathbf{A}` and -:math:`\mathbf{y}` . In addition :obj:`linalg.pinv` or -:obj:`linalg.pinv2` (uses a different method based on singular value -decomposition) will find :math:`\mathbf{A}^{\dagger}` given -:math:`\mathbf{A}.` - -The following example and figure demonstrate the use of -:obj:`linalg.lstsq` and :obj:`linalg.pinv` for solving a data-fitting -problem. The data shown below were generated using the model: - -.. math:: - :nowrap: - - \[ y_{i}=c_{1}e^{-x_{i}}+c_{2}x_{i}\] - -where :math:`x_{i}=0.1i` for :math:`i=1\ldots10` , :math:`c_{1}=5` , -and :math:`c_{2}=4.` Noise is added to :math:`y_{i}` and the -coefficients :math:`c_{1}` and :math:`c_{2}` are estimated using -linear least squares. - -.. plot:: - - >>> from numpy import * - >>> from scipy import linalg - >>> import matplotlib.pyplot as plt - - >>> c1,c2= 5.0,2.0 - >>> i = r_[1:11] - >>> xi = 0.1*i - >>> yi = c1*exp(-xi)+c2*xi - >>> zi = yi + 0.05*max(yi)*random.randn(len(yi)) - - >>> A = c_[exp(-xi)[:,newaxis],xi[:,newaxis]] - >>> c,resid,rank,sigma = linalg.lstsq(A,zi) - - >>> xi2 = r_[0.1:1.0:100j] - >>> yi2 = c[0]*exp(-xi2) + c[1]*xi2 - - >>> plt.plot(xi,zi,'x',xi2,yi2) - >>> plt.axis([0,1.1,3.0,5.5]) - >>> plt.xlabel('$x_i$') - >>> plt.title('Data fitting with linalg.lstsq') - >>> plt.show() - -.. :caption: Example of linear least-squares fit - -Generalized inverse -^^^^^^^^^^^^^^^^^^^ - -The generalized inverse is calculated using the command -:obj:`linalg.pinv` or :obj:`linalg.pinv2`. These two commands differ -in how they compute the generalized inverse. The first uses the -linalg.lstsq algorithm while the second uses singular value -decomposition. Let :math:`\mathbf{A}` be an :math:`M\times N` matrix, -then if :math:`M>N` the generalized inverse is - -.. math:: - :nowrap: - - \[ \mathbf{A}^{\dagger}=\left(\mathbf{A}^{H}\mathbf{A}\right)^{-1}\mathbf{A}^{H}\] - -while if :math:`M>> from scipy import linalg - >>> A = mat('[1 5 2; 2 4 1; 3 6 2]') - >>> la,v = linalg.eig(A) - >>> l1,l2,l3 = la - >>> print l1, l2, l3 - (7.95791620491+0j) (-1.25766470568+0j) (0.299748500767+0j) - - >>> print v[:,0] - [-0.5297175 -0.44941741 -0.71932146] - >>> print v[:,1] - [-0.90730751 0.28662547 0.30763439] - >>> print v[:,2] - [ 0.28380519 -0.39012063 0.87593408] - >>> print sum(abs(v**2),axis=0) - [ 1. 1. 1.] - - >>> v1 = mat(v[:,0]).T - >>> print max(ravel(abs(A*v1-l1*v1))) - 8.881784197e-16 - - -Singular value decomposition -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Singular Value Decompostion (SVD) can be thought of as an extension of -the eigenvalue problem to matrices that are not square. Let -:math:`\mathbf{A}` be an :math:`M\times N` matrix with :math:`M` and -:math:`N` arbitrary. The matrices :math:`\mathbf{A}^{H}\mathbf{A}` and -:math:`\mathbf{A}\mathbf{A}^{H}` are square hermitian matrices [#]_ of -size :math:`N\times N` and :math:`M\times M` respectively. It is known -that the eigenvalues of square hermitian matrices are real and -non-negative. In addtion, there are at most -:math:`\min\left(M,N\right)` identical non-zero eigenvalues of -:math:`\mathbf{A}^{H}\mathbf{A}` and :math:`\mathbf{A}\mathbf{A}^{H}.` -Define these positive eigenvalues as :math:`\sigma_{i}^{2}.` The -square-root of these are called singular values of :math:`\mathbf{A}.` -The eigenvectors of :math:`\mathbf{A}^{H}\mathbf{A}` are collected by -columns into an :math:`N\times N` unitary [#]_ matrix -:math:`\mathbf{V}` while the eigenvectors of -:math:`\mathbf{A}\mathbf{A}^{H}` are collected by columns in the -unitary matrix :math:`\mathbf{U}` , the singular values are collected -in an :math:`M\times N` zero matrix -:math:`\mathbf{\boldsymbol{\Sigma}}` with main diagonal entries set to -the singular values. Then - -.. math:: - :nowrap: - - \[ \mathbf{A=U}\boldsymbol{\Sigma}\mathbf{V}^{H}\] - -is the singular-value decomposition of :math:`\mathbf{A}.` Every -matrix has a singular value decomposition. Sometimes, the singular -values are called the spectrum of :math:`\mathbf{A}.` The command -:obj:`linalg.svd` will return :math:`\mathbf{U}` , -:math:`\mathbf{V}^{H}` , and :math:`\sigma_{i}` as an array of the -singular values. To obtain the matrix :math:`\mathbf{\Sigma}` use -:obj:`linalg.diagsvd`. The following example illustrates the use of -:obj:`linalg.svd` . - - >>> A = mat('[1 3 2; 1 2 3]') - >>> M,N = A.shape - >>> U,s,Vh = linalg.svd(A) - >>> Sig = mat(linalg.diagsvd(s,M,N)) - >>> U, Vh = mat(U), mat(Vh) - >>> print U - [[-0.70710678 -0.70710678] - [-0.70710678 0.70710678]] - >>> print Sig - [[ 5.19615242 0. 0. ] - [ 0. 1. 0. ]] - >>> print Vh - [[ -2.72165527e-01 -6.80413817e-01 -6.80413817e-01] - [ -6.18652536e-16 -7.07106781e-01 7.07106781e-01] - [ -9.62250449e-01 1.92450090e-01 1.92450090e-01]] - - >>> print A - [[1 3 2] - [1 2 3]] - >>> print U*Sig*Vh - [[ 1. 3. 2.] - [ 1. 2. 3.]] - -.. [#] A hermitian matrix :math:`\mathbf{D}` satisfies :math:`\mathbf{D}^{H}=\mathbf{D}.` - -.. [#] A unitary matrix :math:`\mathbf{D}` satisfies :math:`\mathbf{D}^{H}\mathbf{D}=\mathbf{I}=\mathbf{D}\mathbf{D}^{H}` so that :math:`\mathbf{D}^{-1}=\mathbf{D}^{H}.` - - -LU decomposition -^^^^^^^^^^^^^^^^ - -The LU decompostion finds a representation for the :math:`M\times N` matrix :math:`\mathbf{A}` as - -.. math:: - :nowrap: - - \[ \mathbf{A}=\mathbf{PLU}\] - -where :math:`\mathbf{P}` is an :math:`M\times M` permutation matrix (a -permutation of the rows of the identity matrix), :math:`\mathbf{L}` is -in :math:`M\times K` lower triangular or trapezoidal matrix ( -:math:`K=\min\left(M,N\right)` ) with unit-diagonal, and -:math:`\mathbf{U}` is an upper triangular or trapezoidal matrix. The -SciPy command for this decomposition is :obj:`linalg.lu` . - -Such a decomposition is often useful for solving many simultaneous -equations where the left-hand-side does not change but the right hand -side does. For example, suppose we are going to solve - -.. math:: - :nowrap: - - \[ \mathbf{A}\mathbf{x}_{i}=\mathbf{b}_{i}\] - -for many different :math:`\mathbf{b}_{i}` . The LU decomposition allows this to be written as - -.. math:: - :nowrap: - - \[ \mathbf{PLUx}_{i}=\mathbf{b}_{i}.\] - -Because :math:`\mathbf{L}` is lower-triangular, the equation can be -solved for :math:`\mathbf{U}\mathbf{x}_{i}` and finally -:math:`\mathbf{x}_{i}` very rapidly using forward- and -back-substitution. An initial time spent factoring :math:`\mathbf{A}` -allows for very rapid solution of similar systems of equations in the -future. If the intent for performing LU decomposition is for solving -linear systems then the command :obj:`linalg.lu_factor` should be used -followed by repeated applications of the command -:obj:`linalg.lu_solve` to solve the system for each new -right-hand-side. - - -Cholesky decomposition -^^^^^^^^^^^^^^^^^^^^^^ - -Cholesky decomposition is a special case of LU decomposition -applicable to Hermitian positive definite matrices. When -:math:`\mathbf{A}=\mathbf{A}^{H}` and -:math:`\mathbf{x}^{H}\mathbf{Ax}\geq0` for all :math:`\mathbf{x}` , -then decompositions of :math:`\mathbf{A}` can be found so that - -.. math:: - :nowrap: - - \begin{eqnarray*} \mathbf{A} & = & \mathbf{U}^{H}\mathbf{U}\\ \mathbf{A} & = & \mathbf{L}\mathbf{L}^{H}\end{eqnarray*} - -where :math:`\mathbf{L}` is lower-triangular and :math:`\mathbf{U}` is -upper triangular. Notice that :math:`\mathbf{L}=\mathbf{U}^{H}.` The -command :obj:`linagl.cholesky` computes the cholesky -factorization. For using cholesky factorization to solve systems of -equations there are also :obj:`linalg.cho_factor` and -:obj:`linalg.cho_solve` routines that work similarly to their LU -decomposition counterparts. - - -QR decomposition -^^^^^^^^^^^^^^^^ - -The QR decomposition (sometimes called a polar decomposition) works -for any :math:`M\times N` array and finds an :math:`M\times M` unitary -matrix :math:`\mathbf{Q}` and an :math:`M\times N` upper-trapezoidal -matrix :math:`\mathbf{R}` such that - -.. math:: - :nowrap: - - \[ \mathbf{A=QR}.\] - -Notice that if the SVD of :math:`\mathbf{A}` is known then the QR decomposition can be found - -.. math:: - :nowrap: - - \[ \mathbf{A}=\mathbf{U}\boldsymbol{\Sigma}\mathbf{V}^{H}=\mathbf{QR}\] - -implies that :math:`\mathbf{Q}=\mathbf{U}` and -:math:`\mathbf{R}=\boldsymbol{\Sigma}\mathbf{V}^{H}.` Note, however, -that in SciPy independent algorithms are used to find QR and SVD -decompositions. The command for QR decomposition is :obj:`linalg.qr` . - - -Schur decomposition -^^^^^^^^^^^^^^^^^^^ - -For a square :math:`N\times N` matrix, :math:`\mathbf{A}` , the Schur -decomposition finds (not-necessarily unique) matrices -:math:`\mathbf{T}` and :math:`\mathbf{Z}` such that - -.. math:: - :nowrap: - - \[ \mathbf{A}=\mathbf{ZT}\mathbf{Z}^{H}\] - -where :math:`\mathbf{Z}` is a unitary matrix and :math:`\mathbf{T}` is -either upper-triangular or quasi-upper triangular depending on whether -or not a real schur form or complex schur form is requested. For a -real schur form both :math:`\mathbf{T}` and :math:`\mathbf{Z}` are -real-valued when :math:`\mathbf{A}` is real-valued. When -:math:`\mathbf{A}` is a real-valued matrix the real schur form is only -quasi-upper triangular because :math:`2\times2` blocks extrude from -the main diagonal corresponding to any complex- valued -eigenvalues. The command :obj:`linalg.schur` finds the Schur -decomposition while the command :obj:`linalg.rsf2csf` converts -:math:`\mathbf{T}` and :math:`\mathbf{Z}` from a real Schur form to a -complex Schur form. The Schur form is especially useful in calculating -functions of matrices. - -The following example illustrates the schur decomposition: - - >>> from scipy import linalg - >>> A = mat('[1 3 2; 1 4 5; 2 3 6]') - >>> T,Z = linalg.schur(A) - >>> T1,Z1 = linalg.schur(A,'complex') - >>> T2,Z2 = linalg.rsf2csf(T,Z) - >>> print T - [[ 9.90012467 1.78947961 -0.65498528] - [ 0. 0.54993766 -1.57754789] - [ 0. 0.51260928 0.54993766]] - >>> print T2 - [[ 9.90012467 +0.00000000e+00j -0.32436598 +1.55463542e+00j - -0.88619748 +5.69027615e-01j] - [ 0.00000000 +0.00000000e+00j 0.54993766 +8.99258408e-01j - 1.06493862 +1.37016050e-17j] - [ 0.00000000 +0.00000000e+00j 0.00000000 +0.00000000e+00j - 0.54993766 -8.99258408e-01j]] - >>> print abs(T1-T2) # different - [[ 1.24357637e-14 2.09205364e+00 6.56028192e-01] - [ 0.00000000e+00 4.00296604e-16 1.83223097e+00] - [ 0.00000000e+00 0.00000000e+00 4.57756680e-16]] - >>> print abs(Z1-Z2) # different - [[ 0.06833781 1.10591375 0.23662249] - [ 0.11857169 0.5585604 0.29617525] - [ 0.12624999 0.75656818 0.22975038]] - >>> T,Z,T1,Z1,T2,Z2 = map(mat,(T,Z,T1,Z1,T2,Z2)) - >>> print abs(A-Z*T*Z.H) # same - [[ 1.11022302e-16 4.44089210e-16 4.44089210e-16] - [ 4.44089210e-16 1.33226763e-15 8.88178420e-16] - [ 8.88178420e-16 4.44089210e-16 2.66453526e-15]] - >>> print abs(A-Z1*T1*Z1.H) # same - [[ 1.00043248e-15 2.22301403e-15 5.55749485e-15] - [ 2.88899660e-15 8.44927041e-15 9.77322008e-15] - [ 3.11291538e-15 1.15463228e-14 1.15464861e-14]] - >>> print abs(A-Z2*T2*Z2.H) # same - [[ 3.34058710e-16 8.88611201e-16 4.18773089e-18] - [ 1.48694940e-16 8.95109973e-16 8.92966151e-16] - [ 1.33228956e-15 1.33582317e-15 3.55373104e-15]] - -Matrix Functions ----------------- - -Consider the function :math:`f\left(x\right)` with Taylor series expansion - -.. math:: - :nowrap: - - \[ f\left(x\right)=\sum_{k=0}^{\infty}\frac{f^{\left(k\right)}\left(0\right)}{k!}x^{k}.\] - -A matrix function can be defined using this Taylor series for the -square matrix :math:`\mathbf{A}` as - -.. math:: - :nowrap: - - \[ f\left(\mathbf{A}\right)=\sum_{k=0}^{\infty}\frac{f^{\left(k\right)}\left(0\right)}{k!}\mathbf{A}^{k}.\] - -While, this serves as a useful representation of a matrix function, it -is rarely the best way to calculate a matrix function. - - -Exponential and logarithm functions -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The matrix exponential is one of the more common matrix functions. It -can be defined for square matrices as - -.. math:: - :nowrap: - - \[ e^{\mathbf{A}}=\sum_{k=0}^{\infty}\frac{1}{k!}\mathbf{A}^{k}.\] - -The command :obj:`linalg.expm3` uses this Taylor series definition to compute the matrix exponential. -Due to poor convergence properties it is not often used. - -Another method to compute the matrix exponential is to find an -eigenvalue decomposition of :math:`\mathbf{A}` : - -.. math:: - :nowrap: - - \[ \mathbf{A}=\mathbf{V}\boldsymbol{\Lambda}\mathbf{V}^{-1}\] - -and note that - -.. math:: - :nowrap: - - \[ e^{\mathbf{A}}=\mathbf{V}e^{\boldsymbol{\Lambda}}\mathbf{V}^{-1}\] - -where the matrix exponential of the diagonal matrix :math:`\boldsymbol{\Lambda}` is just the exponential of its elements. This method is implemented in :obj:`linalg.expm2` . - -The preferred method for implementing the matrix exponential is to use -scaling and a Padé approximation for :math:`e^{x}` . This algorithm is -implemented as :obj:`linalg.expm` . - -The inverse of the matrix exponential is the matrix logarithm defined -as the inverse of the matrix exponential. - -.. math:: - :nowrap: - - \[ \mathbf{A}\equiv\exp\left(\log\left(\mathbf{A}\right)\right).\] - -The matrix logarithm can be obtained with :obj:`linalg.logm` . - - -Trigonometric functions -^^^^^^^^^^^^^^^^^^^^^^^ - -The trigonometric functions :math:`\sin` , :math:`\cos` , and -:math:`\tan` are implemented for matrices in :func:`linalg.sinm`, -:func:`linalg.cosm`, and :obj:`linalg.tanm` respectively. The matrix -sin and cosine can be defined using Euler's identity as - -.. math:: - :nowrap: - - \begin{eqnarray*} \sin\left(\mathbf{A}\right) & = & \frac{e^{j\mathbf{A}}-e^{-j\mathbf{A}}}{2j}\\ \cos\left(\mathbf{A}\right) & = & \frac{e^{j\mathbf{A}}+e^{-j\mathbf{A}}}{2}.\end{eqnarray*} - -The tangent is - -.. math:: - :nowrap: - - \[ \tan\left(x\right)=\frac{\sin\left(x\right)}{\cos\left(x\right)}=\left[\cos\left(x\right)\right]^{-1}\sin\left(x\right)\] - -and so the matrix tangent is defined as - -.. math:: - :nowrap: - - \[ \left[\cos\left(\mathbf{A}\right)\right]^{-1}\sin\left(\mathbf{A}\right).\] - - - - -Hyperbolic trigonometric functions -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The hyperbolic trigonemetric functions :math:`\sinh` , :math:`\cosh` , -and :math:`\tanh` can also be defined for matrices using the familiar -definitions: - -.. math:: - :nowrap: - - \begin{eqnarray*} \sinh\left(\mathbf{A}\right) & = & \frac{e^{\mathbf{A}}-e^{-\mathbf{A}}}{2}\\ \cosh\left(\mathbf{A}\right) & = & \frac{e^{\mathbf{A}}+e^{-\mathbf{A}}}{2}\\ \tanh\left(\mathbf{A}\right) & = & \left[\cosh\left(\mathbf{A}\right)\right]^{-1}\sinh\left(\mathbf{A}\right).\end{eqnarray*} - -These matrix functions can be found using :obj:`linalg.sinhm`, -:obj:`linalg.coshm` , and :obj:`linalg.tanhm`. - - -Arbitrary function -^^^^^^^^^^^^^^^^^^ - -Finally, any arbitrary function that takes one complex number and -returns a complex number can be called as a matrix function using the -command :obj:`linalg.funm`. This command takes the matrix and an -arbitrary Python function. It then implements an algorithm from Golub -and Van Loan's book "Matrix Computations "to compute function applied -to the matrix using a Schur decomposition. Note that *the function -needs to accept complex numbers* as input in order to work with this -algorithm. For example the following code computes the zeroth-order -Bessel function applied to a matrix. - - >>> from scipy import special, random, linalg - >>> A = random.rand(3,3) - >>> B = linalg.funm(A,lambda x: special.jv(0,x)) - >>> print A - [[ 0.72578091 0.34105276 0.79570345] - [ 0.65767207 0.73855618 0.541453 ] - [ 0.78397086 0.68043507 0.4837898 ]] - >>> print B - [[ 0.72599893 -0.20545711 -0.22721101] - [-0.27426769 0.77255139 -0.23422637] - [-0.27612103 -0.21754832 0.7556849 ]] - >>> print linalg.eigvals(A) - [ 1.91262611+0.j 0.21846476+0.j -0.18296399+0.j] - >>> print special.jv(0, linalg.eigvals(A)) - [ 0.27448286+0.j 0.98810383+0.j 0.99164854+0.j] - >>> print linalg.eigvals(B) - [ 0.27448286+0.j 0.98810383+0.j 0.99164854+0.j] - -Note how, by virtue of how matrix analytic functions are defined, -the Bessel function has acted on the matrix eigenvalues. - - -Special matrices ----------------- - -SciPy and NumPy provide several functions for creating special matrices -that are frequently used in engineering and science. - -==================== ========================= ========================================================= -Type Function Description -==================== ========================= ========================================================= -block diagonal `scipy.linalg.block_diag` Create a block diagonal matrix from the provided arrays. --------------------- ------------------------- --------------------------------------------------------- -circulant `scipy.linalg.circulant` Construct a circulant matrix. --------------------- ------------------------- --------------------------------------------------------- -companion `scipy.linalg.companion` Create a companion matrix. --------------------- ------------------------- --------------------------------------------------------- -Hadamard `scipy.linalg.hadamard` Construct a Hadamard matrix. --------------------- ------------------------- --------------------------------------------------------- -Hankel `scipy.linalg.hankel` Construct a Hankel matrix. --------------------- ------------------------- --------------------------------------------------------- -Hilbert `scipy.linalg.hilbert` Construct a Hilbert matrix. --------------------- ------------------------- --------------------------------------------------------- -Inverse Hilbert `scipy.linalg.invhilbert` Construct the inverse of a Hilbert matrix. --------------------- ------------------------- --------------------------------------------------------- -Leslie `scipy.linalg.leslie` Create a Leslie matrix. --------------------- ------------------------- --------------------------------------------------------- -Toeplitz `scipy.linalg.toeplitz` Construct a Toeplitz matrix. --------------------- ------------------------- --------------------------------------------------------- -Van der Monde `numpy.vander` Generate a Van der Monde matrix. -==================== ========================= ========================================================= - - -For examples of the use of these functions, see their respective docstrings. diff --git a/scipy-0.10.1/doc/source/tutorial/ndimage.rst b/scipy-0.10.1/doc/source/tutorial/ndimage.rst deleted file mode 100644 index add7411bd8..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/ndimage.rst +++ /dev/null @@ -1,1726 +0,0 @@ -Multi-dimensional image processing (`scipy.ndimage`) -==================================================== - -.. moduleauthor:: Peter Verveer - -.. currentmodule:: scipy.ndimage - -.. _ndimage-introduction: - -Introduction ------------- - -Image processing and analysis are generally seen as operations on -two-dimensional arrays of values. There are however a number of -fields where images of higher dimensionality must be analyzed. Good -examples of these are medical imaging and biological imaging. -:mod:`numpy` is suited very well for this type of applications due -its inherent multi-dimensional nature. The :mod:`scipy.ndimage` -packages provides a number of general image processing and analysis -functions that are designed to operate with arrays of arbitrary -dimensionality. The packages currently includes functions for -linear and non-linear filtering, binary morphology, B-spline -interpolation, and object measurements. - -.. _ndimage-properties-shared-by-all-functions: - -Properties shared by all functions ----------------------------------- - -All functions share some common properties. Notably, all functions -allow the specification of an output array with the *output* -argument. With this argument you can specify an array that will be -changed in-place with the result with the operation. In this case -the result is not returned. Usually, using the *output* argument is -more efficient, since an existing array is used to store the -result. - -The type of arrays returned is dependent on the type of operation, -but it is in most cases equal to the type of the input. If, -however, the *output* argument is used, the type of the result is -equal to the type of the specified output argument. If no output -argument is given, it is still possible to specify what the result -of the output should be. This is done by simply assigning the -desired `numpy` type object to the output argument. For example: - -:: - - >>> correlate(np.arange(10), [1, 2.5]) - array([ 0, 2, 6, 9, 13, 16, 20, 23, 27, 30]) - >>> correlate(np.arange(10), [1, 2.5], output=np.float64) - array([ 0. , 2.5, 6. , 9.5, 13. , 16.5, 20. , 23.5, 27. , 30.5]) - -.. _ndimage-filter-functions: - -Filter functions ----------------- - -.. currentmodule:: scipy.ndimage.filters - -The functions described in this section all perform some type of spatial -filtering of the the input array: the elements in the output are some function -of the values in the neighborhood of the corresponding input element. We refer -to this neighborhood of elements as the filter kernel, which is often -rectangular in shape but may also have an arbitrary footprint. Many -of the functions described below allow you to define the footprint -of the kernel, by passing a mask through the *footprint* parameter. -For example a cross shaped kernel can be defined as follows: - -:: - - >>> footprint = array([[0,1,0],[1,1,1],[0,1,0]]) - >>> footprint - array([[0, 1, 0], - [1, 1, 1], - [0, 1, 0]]) - -Usually the origin of the kernel is at the center calculated by -dividing the dimensions of the kernel shape by two. For instance, -the origin of a one-dimensional kernel of length three is at the -second element. Take for example the correlation of a -one-dimensional array with a filter of length 3 consisting of -ones: - -:: - - >>> a = [0, 0, 0, 1, 0, 0, 0] - >>> correlate1d(a, [1, 1, 1]) - array([0, 0, 1, 1, 1, 0, 0]) - -Sometimes it is convenient to choose a different origin for the -kernel. For this reason most functions support the *origin* -parameter which gives the origin of the filter relative to its -center. For example: - -:: - - >>> a = [0, 0, 0, 1, 0, 0, 0] - >>> correlate1d(a, [1, 1, 1], origin = -1) - array([0 1 1 1 0 0 0]) - -The effect is a shift of the result towards the left. This feature -will not be needed very often, but it may be useful especially for -filters that have an even size. A good example is the calculation -of backward and forward differences: - -:: - - >>> a = [0, 0, 1, 1, 1, 0, 0] - >>> correlate1d(a, [-1, 1]) # backward difference - array([ 0 0 1 0 0 -1 0]) - >>> correlate1d(a, [-1, 1], origin = -1) # forward difference - array([ 0 1 0 0 -1 0 0]) - -We could also have calculated the forward difference as follows: - -:: - - >>> correlate1d(a, [0, -1, 1]) - array([ 0 1 0 0 -1 0 0]) - -However, using the origin parameter instead of a larger kernel is -more efficient. For multi-dimensional kernels *origin* can be a -number, in which case the origin is assumed to be equal along all -axes, or a sequence giving the origin along each axis. - -Since the output elements are a function of elements in the -neighborhood of the input elements, the borders of the array need -to be dealt with appropriately by providing the values outside the -borders. This is done by assuming that the arrays are extended -beyond their boundaries according certain boundary conditions. In -the functions described below, the boundary conditions can be -selected using the *mode* parameter which must be a string with the -name of the boundary condition. Following boundary conditions are -currently supported: - - ========== ==================================== ==================== - "nearest" Use the value at the boundary [1 2 3]->[1 1 2 3 3] - "wrap" Periodically replicate the array [1 2 3]->[3 1 2 3 1] - "reflect" Reflect the array at the boundary [1 2 3]->[1 1 2 3 3] - "constant" Use a constant value, default is 0.0 [1 2 3]->[0 1 2 3 0] - ========== ==================================== ==================== - -The "constant" mode is special since it needs an additional -parameter to specify the constant value that should be used. - -.. note:: The easiest way to implement such boundary conditions would be to copy the data to a larger array and extend the data at the borders according to the boundary conditions. For large arrays and large filter kernels, this would be very memory consuming, and the functions described below therefore use a different approach that does not require allocating large temporary buffers. - -Correlation and convolution -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - The :func:`correlate1d` function calculates a one-dimensional correlation - along the given axis. The lines of the array along the given axis - are correlated with the given *weights*. The *weights* parameter - must be a one-dimensional sequences of numbers. - - - The function :func:`correlate` implements multi-dimensional correlation - of the input array with a given kernel. - - - The :func:`convolve1d` function calculates a one-dimensional convolution - along the given axis. The lines of the array along the given axis - are convoluted with the given *weights*. The *weights* parameter - must be a one-dimensional sequences of numbers. - - .. note:: A convolution is essentially a correlation after mirroring the kernel. As a result, the *origin* parameter behaves differently than in the case of a correlation: the result is shifted in the opposite directions. - - The function :func:`convolve` implements multi-dimensional convolution of - the input array with a given kernel. - - .. note:: A convolution is essentially a correlation after mirroring the kernel. As a result, the *origin* parameter behaves differently than in the case of a correlation: the results is shifted in the opposite direction. - -.. _ndimage-filter-functions-smoothing: - -Smoothing filters -^^^^^^^^^^^^^^^^^ - - - The :func:`gaussian_filter1d` function implements a one-dimensional - Gaussian filter. The standard-deviation of the Gaussian filter is - passed through the parameter *sigma*. Setting *order* = 0 corresponds - to convolution with a Gaussian kernel. An order of 1, 2, or 3 - corresponds to convolution with the first, second or third - derivatives of a Gaussian. Higher order derivatives are not - implemented. - - - The :func:`gaussian_filter` function implements a multi-dimensional - Gaussian filter. The standard-deviations of the Gaussian filter - along each axis are passed through the parameter *sigma* as a - sequence or numbers. If *sigma* is not a sequence but a single - number, the standard deviation of the filter is equal along all - directions. The order of the filter can be specified separately for - each axis. An order of 0 corresponds to convolution with a Gaussian - kernel. An order of 1, 2, or 3 corresponds to convolution with the - first, second or third derivatives of a Gaussian. Higher order - derivatives are not implemented. The *order* parameter must be a - number, to specify the same order for all axes, or a sequence of - numbers to specify a different order for each axis. - - .. note:: The multi-dimensional filter is implemented as a sequence of one-dimensional Gaussian filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a lower precision, the results may be imprecise because intermediate results may be stored with insufficient precision. This can be prevented by specifying a more precise output type. - - - The :func:`uniform_filter1d` function calculates a one-dimensional - uniform filter of the given *size* along the given axis. - - - The :func:`uniform_filter` implements a multi-dimensional uniform - filter. The sizes of the uniform filter are given for each axis as - a sequence of integers by the *size* parameter. If *size* is not a - sequence, but a single number, the sizes along all axis are assumed - to be equal. - - .. note:: The multi-dimensional filter is implemented as a sequence of one-dimensional uniform filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a lower precision, the results may be imprecise because intermediate results may be stored with insufficient precision. This can be prevented by specifying a more precise output type. - - -Filters based on order statistics -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - The :func:`minimum_filter1d` function calculates a one-dimensional - minimum filter of given *size* along the given axis. - - - The :func:`maximum_filter1d` function calculates a one-dimensional - maximum filter of given *size* along the given axis. - - - The :func:`minimum_filter` function calculates a multi-dimensional - minimum filter. Either the sizes of a rectangular kernel or the - footprint of the kernel must be provided. The *size* parameter, if - provided, must be a sequence of sizes or a single number in which - case the size of the filter is assumed to be equal along each axis. - The *footprint*, if provided, must be an array that defines the - shape of the kernel by its non-zero elements. - - - The :func:`maximum_filter` function calculates a multi-dimensional - maximum filter. Either the sizes of a rectangular kernel or the - footprint of the kernel must be provided. The *size* parameter, if - provided, must be a sequence of sizes or a single number in which - case the size of the filter is assumed to be equal along each axis. - The *footprint*, if provided, must be an array that defines the - shape of the kernel by its non-zero elements. - - - The :func:`rank_filter` function calculates a multi-dimensional rank - filter. The *rank* may be less then zero, i.e., *rank* = -1 indicates - the largest element. Either the sizes of a rectangular kernel or - the footprint of the kernel must be provided. The *size* parameter, - if provided, must be a sequence of sizes or a single number in - which case the size of the filter is assumed to be equal along each - axis. The *footprint*, if provided, must be an array that defines - the shape of the kernel by its non-zero elements. - - - The :func:`percentile_filter` function calculates a multi-dimensional - percentile filter. The *percentile* may be less then zero, i.e., - *percentile* = -20 equals *percentile* = 80. Either the sizes of a - rectangular kernel or the footprint of the kernel must be provided. - The *size* parameter, if provided, must be a sequence of sizes or a - single number in which case the size of the filter is assumed to be - equal along each axis. The *footprint*, if provided, must be an - array that defines the shape of the kernel by its non-zero - elements. - - - The :func:`median_filter` function calculates a multi-dimensional median - filter. Either the sizes of a rectangular kernel or the footprint - of the kernel must be provided. The *size* parameter, if provided, - must be a sequence of sizes or a single number in which case the - size of the filter is assumed to be equal along each axis. The - *footprint* if provided, must be an array that defines the shape of - the kernel by its non-zero elements. - - -Derivatives -^^^^^^^^^^^ - -Derivative filters can be constructed in several ways. The function -:func:`gaussian_filter1d` described in -:ref:`ndimage-filter-functions-smoothing` can be used to calculate -derivatives along a given axis using the *order* parameter. Other -derivative filters are the Prewitt and Sobel filters: - - The :func:`prewitt` function calculates a derivative along the given - axis. - - - The :func:`sobel` function calculates a derivative along the given - axis. - - -The Laplace filter is calculated by the sum of the second -derivatives along all axes. Thus, different Laplace filters can be -constructed using different second derivative functions. Therefore -we provide a general function that takes a function argument to -calculate the second derivative along a given direction and to -construct the Laplace filter: - - The function :func:`generic_laplace` calculates a laplace filter using - the function passed through :func:`derivative2` to calculate second - derivatives. The function :func:`derivative2` should have the following - signature:: - - derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) - - It should calculate the second derivative along the dimension - *axis*. If *output* is not None it should use that for the output - and return None, otherwise it should return the result. *mode*, - *cval* have the usual meaning. - - The *extra_arguments* and *extra_keywords* arguments can be used - to pass a tuple of extra arguments and a dictionary of named - arguments that are passed to :func:`derivative2` at each call. - - For example:: - - >>> def d2(input, axis, output, mode, cval): - ... return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) - ... - >>> a = zeros((5, 5)) - >>> a[2, 2] = 1 - >>> generic_laplace(a, d2) - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 1., -4., 1., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - - To demonstrate the use of the *extra_arguments* argument we could - do:: - - >>> def d2(input, axis, output, mode, cval, weights): - ... return correlate1d(input, weights, axis, output, mode, cval, 0,) - ... - >>> a = zeros((5, 5)) - >>> a[2, 2] = 1 - >>> generic_laplace(a, d2, extra_arguments = ([1, -2, 1],)) - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 1., -4., 1., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - - or:: - - >>> generic_laplace(a, d2, extra_keywords = {'weights': [1, -2, 1]}) - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 1., -4., 1., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - - -The following two functions are implemented using -:func:`generic_laplace` by providing appropriate functions for the -second derivative function: - - The function :func:`laplace` calculates the Laplace using discrete - differentiation for the second derivative (i.e. convolution with - :obj:`[1, -2, 1]`). - - - The function :func:`gaussian_laplace` calculates the Laplace using - :func:`gaussian_filter` to calculate the second derivatives. The - standard-deviations of the Gaussian filter along each axis are - passed through the parameter *sigma* as a sequence or numbers. If - *sigma* is not a sequence but a single number, the standard - deviation of the filter is equal along all directions. - - -The gradient magnitude is defined as the square root of the sum of -the squares of the gradients in all directions. Similar to the -generic Laplace function there is a :func:`generic_gradient_magnitude` -function that calculated the gradient magnitude of an array: - - The function :func:`generic_gradient_magnitude` calculates a gradient - magnitude using the function passed through :func:`derivative` to - calculate first derivatives. The function :func:`derivative` should have - the following signature:: - - derivative(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) - - - It should calculate the derivative along the dimension *axis*. If - *output* is not None it should use that for the output and return - None, otherwise it should return the result. *mode*, *cval* have - the usual meaning. - - The *extra_arguments* and *extra_keywords* arguments can be used - to pass a tuple of extra arguments and a dictionary of named - arguments that are passed to *derivative* at each call. - - For example, the :func:`sobel` function fits the required signature:: - - >>> a = zeros((5, 5)) - >>> a[2, 2] = 1 - >>> generic_gradient_magnitude(a, sobel) - array([[ 0. , 0. , 0. , 0. , 0. ], - [ 0. , 1.41421356, 2. , 1.41421356, 0. ], - [ 0. , 2. , 0. , 2. , 0. ], - [ 0. , 1.41421356, 2. , 1.41421356, 0. ], - [ 0. , 0. , 0. , 0. , 0. ]]) - - See the documentation of :func:`generic_laplace` for examples of using - the *extra_arguments* and *extra_keywords* arguments. - - -The :func:`sobel` and :func:`prewitt` functions fit the required signature and -can therefore directly be used with :func:`generic_gradient_magnitude`. -The following function implements the gradient magnitude using -Gaussian derivatives: - - The function :func:`gaussian_gradient_magnitude` calculates the - gradient magnitude using :func:`gaussian_filter` to calculate the first - derivatives. The standard-deviations of the Gaussian filter along - each axis are passed through the parameter *sigma* as a sequence or - numbers. If *sigma* is not a sequence but a single number, the - standard deviation of the filter is equal along all directions. - - -.. _ndimage-genericfilters: - -Generic filter functions -^^^^^^^^^^^^^^^^^^^^^^^^ - -To implement filter functions, generic functions can be used that accept a -callable object that implements the filtering operation. The iteration over the -input and output arrays is handled by these generic functions, along with such -details as the implementation of the boundary conditions. Only a -callable object implementing a callback function that does the -actual filtering work must be provided. The callback function can -also be written in C and passed using a :ctype:`PyCObject` (see -:ref:`ndimage-ccallbacks` for more information). - - The :func:`generic_filter1d` function implements a generic - one-dimensional filter function, where the actual filtering - operation must be supplied as a python function (or other callable - object). The :func:`generic_filter1d` function iterates over the lines - of an array and calls :func:`function` at each line. The arguments that - are passed to :func:`function` are one-dimensional arrays of the - :ctype:`tFloat64` type. The first contains the values of the current line. - It is extended at the beginning end the end, according to the - *filter_size* and *origin* arguments. The second array should be - modified in-place to provide the output values of the line. For - example consider a correlation along one dimension:: - - >>> a = arange(12).reshape(3,4) - >>> correlate1d(a, [1, 2, 3]) - array([[ 3, 8, 14, 17], - [27, 32, 38, 41], - [51, 56, 62, 65]]) - - The same operation can be implemented using :func:`generic_filter1d` as - follows:: - - >>> def fnc(iline, oline): - ... oline[...] = iline[:-2] + 2 * iline[1:-1] + 3 * iline[2:] - ... - >>> generic_filter1d(a, fnc, 3) - array([[ 3, 8, 14, 17], - [27, 32, 38, 41], - [51, 56, 62, 65]]) - - Here the origin of the kernel was (by default) assumed to be in the - middle of the filter of length 3. Therefore, each input line was - extended by one value at the beginning and at the end, before the - function was called. - - Optionally extra arguments can be defined and passed to the filter - function. The *extra_arguments* and *extra_keywords* arguments - can be used to pass a tuple of extra arguments and/or a dictionary - of named arguments that are passed to derivative at each call. For - example, we can pass the parameters of our filter as an argument:: - - >>> def fnc(iline, oline, a, b): - ... oline[...] = iline[:-2] + a * iline[1:-1] + b * iline[2:] - ... - >>> generic_filter1d(a, fnc, 3, extra_arguments = (2, 3)) - array([[ 3, 8, 14, 17], - [27, 32, 38, 41], - [51, 56, 62, 65]]) - - or:: - - >>> generic_filter1d(a, fnc, 3, extra_keywords = {'a':2, 'b':3}) - array([[ 3, 8, 14, 17], - [27, 32, 38, 41], - [51, 56, 62, 65]]) - - The :func:`generic_filter` function implements a generic filter - function, where the actual filtering operation must be supplied as - a python function (or other callable object). The :func:`generic_filter` - function iterates over the array and calls :func:`function` at each - element. The argument of :func:`function` is a one-dimensional array of - the :ctype:`tFloat64` type, that contains the values around the current - element that are within the footprint of the filter. The function - should return a single value that can be converted to a double - precision number. For example consider a correlation:: - - >>> a = arange(12).reshape(3,4) - >>> correlate(a, [[1, 0], [0, 3]]) - array([[ 0, 3, 7, 11], - [12, 15, 19, 23], - [28, 31, 35, 39]]) - - The same operation can be implemented using *generic_filter* as - follows:: - - >>> def fnc(buffer): - ... return (buffer * array([1, 3])).sum() - ... - >>> generic_filter(a, fnc, footprint = [[1, 0], [0, 1]]) - array([[ 0 3 7 11], - [12 15 19 23], - [28 31 35 39]]) - - Here a kernel footprint was specified that contains only two - elements. Therefore the filter function receives a buffer of length - equal to two, which was multiplied with the proper weights and the - result summed. - - When calling :func:`generic_filter`, either the sizes of a rectangular - kernel or the footprint of the kernel must be provided. The *size* - parameter, if provided, must be a sequence of sizes or a single - number in which case the size of the filter is assumed to be equal - along each axis. The *footprint*, if provided, must be an array - that defines the shape of the kernel by its non-zero elements. - - Optionally extra arguments can be defined and passed to the filter - function. The *extra_arguments* and *extra_keywords* arguments - can be used to pass a tuple of extra arguments and/or a dictionary - of named arguments that are passed to derivative at each call. For - example, we can pass the parameters of our filter as an argument:: - - >>> def fnc(buffer, weights): - ... weights = asarray(weights) - ... return (buffer * weights).sum() - ... - >>> generic_filter(a, fnc, footprint = [[1, 0], [0, 1]], extra_arguments = ([1, 3],)) - array([[ 0, 3, 7, 11], - [12, 15, 19, 23], - [28, 31, 35, 39]]) - - or:: - - >>> generic_filter(a, fnc, footprint = [[1, 0], [0, 1]], extra_keywords= {'weights': [1, 3]}) - array([[ 0, 3, 7, 11], - [12, 15, 19, 23], - [28, 31, 35, 39]]) - - -These functions iterate over the lines or elements starting at the -last axis, i.e. the last index changes the fastest. This order of -iteration is guaranteed for the case that it is important to adapt -the filter depending on spatial location. Here is an example of -using a class that implements the filter and keeps track of the -current coordinates while iterating. It performs the same filter -operation as described above for :func:`generic_filter`, but -additionally prints the current coordinates: - -:: - - >>> a = arange(12).reshape(3,4) - >>> - >>> class fnc_class: - ... def __init__(self, shape): - ... # store the shape: - ... self.shape = shape - ... # initialize the coordinates: - ... self.coordinates = [0] * len(shape) - ... - ... def filter(self, buffer): - ... result = (buffer * array([1, 3])).sum() - ... print self.coordinates - ... # calculate the next coordinates: - ... axes = range(len(self.shape)) - ... axes.reverse() - ... for jj in axes: - ... if self.coordinates[jj] < self.shape[jj] - 1: - ... self.coordinates[jj] += 1 - ... break - ... else: - ... self.coordinates[jj] = 0 - ... return result - ... - >>> fnc = fnc_class(shape = (3,4)) - >>> generic_filter(a, fnc.filter, footprint = [[1, 0], [0, 1]]) - [0, 0] - [0, 1] - [0, 2] - [0, 3] - [1, 0] - [1, 1] - [1, 2] - [1, 3] - [2, 0] - [2, 1] - [2, 2] - [2, 3] - array([[ 0, 3, 7, 11], - [12, 15, 19, 23], - [28, 31, 35, 39]]) - -For the :func:`generic_filter1d` function the same approach works, -except that this function does not iterate over the axis that is -being filtered. The example for :func:`generic_filter1d` then becomes -this: - -:: - - >>> a = arange(12).reshape(3,4) - >>> - >>> class fnc1d_class: - ... def __init__(self, shape, axis = -1): - ... # store the filter axis: - ... self.axis = axis - ... # store the shape: - ... self.shape = shape - ... # initialize the coordinates: - ... self.coordinates = [0] * len(shape) - ... - ... def filter(self, iline, oline): - ... oline[...] = iline[:-2] + 2 * iline[1:-1] + 3 * iline[2:] - ... print self.coordinates - ... # calculate the next coordinates: - ... axes = range(len(self.shape)) - ... # skip the filter axis: - ... del axes[self.axis] - ... axes.reverse() - ... for jj in axes: - ... if self.coordinates[jj] < self.shape[jj] - 1: - ... self.coordinates[jj] += 1 - ... break - ... else: - ... self.coordinates[jj] = 0 - ... - >>> fnc = fnc1d_class(shape = (3,4)) - >>> generic_filter1d(a, fnc.filter, 3) - [0, 0] - [1, 0] - [2, 0] - array([[ 3, 8, 14, 17], - [27, 32, 38, 41], - [51, 56, 62, 65]]) - -Fourier domain filters -^^^^^^^^^^^^^^^^^^^^^^ - -The functions described in this section perform filtering -operations in the Fourier domain. Thus, the input array of such a -function should be compatible with an inverse Fourier transform -function, such as the functions from the :mod:`numpy.fft` module. We -therefore have to deal with arrays that may be the result of a real -or a complex Fourier transform. In the case of a real Fourier -transform only half of the of the symmetric complex transform is -stored. Additionally, it needs to be known what the length of the -axis was that was transformed by the real fft. The functions -described here provide a parameter *n* that in the case of a real -transform must be equal to the length of the real transform axis -before transformation. If this parameter is less than zero, it is -assumed that the input array was the result of a complex Fourier -transform. The parameter *axis* can be used to indicate along which -axis the real transform was executed. - - The :func:`fourier_shift` function multiplies the input array with the - multi-dimensional Fourier transform of a shift operation for the - given shift. The *shift* parameter is a sequences of shifts for - each dimension, or a single value for all dimensions. - - - The :func:`fourier_gaussian` function multiplies the input array with - the multi-dimensional Fourier transform of a Gaussian filter with - given standard-deviations *sigma*. The *sigma* parameter is a - sequences of values for each dimension, or a single value for all - dimensions. - - - The :func:`fourier_uniform` function multiplies the input array with the - multi-dimensional Fourier transform of a uniform filter with given - sizes *size*. The *size* parameter is a sequences of values for - each dimension, or a single value for all dimensions. - - - The :func:`fourier_ellipsoid` function multiplies the input array with - the multi-dimensional Fourier transform of a elliptically shaped - filter with given sizes *size*. The *size* parameter is a sequences - of values for each dimension, or a single value for all dimensions. - This function is only implemented for dimensions 1, 2, and 3. - - -.. _ndimage-interpolation: - -Interpolation functions ------------------------ - -.. currentmodule:: scipy.ndimage.interpolation - - -This section describes various interpolation functions that are -based on B-spline theory. A good introduction to B-splines can be -found in: M. Unser, "Splines: A Perfect Fit for Signal and Image -Processing," IEEE Signal Processing Magazine, vol. 16, no. 6, pp. -22-38, November 1999. - -Spline pre-filters -^^^^^^^^^^^^^^^^^^ - -Interpolation using -splines of an order larger than 1 requires a pre- filtering step. -The interpolation functions described in section -:ref:`ndimage-interpolation` apply pre-filtering by calling -:func:`spline_filter`, but they can be instructed not to do this by -setting the *prefilter* keyword equal to False. This is useful if -more than one interpolation operation is done on the same array. In -this case it is more efficient to do the pre-filtering only once -and use a prefiltered array as the input of the interpolation -functions. The following two functions implement the -pre-filtering: - - The :func:`spline_filter1d` function calculates a one-dimensional spline - filter along the given axis. An output array can optionally be - provided. The order of the spline must be larger then 1 and less - than 6. - - - The :func:`spline_filter` function calculates a multi-dimensional spline - filter. - - .. note:: The multi-dimensional filter is implemented as a sequence of one-dimensional spline filters. The intermediate arrays are stored in the same data type as the output. Therefore, if an output with a limited precision is requested, the results may be imprecise because intermediate results may be stored with insufficient precision. This can be prevented by specifying a output type of high precision. - - -Interpolation functions -^^^^^^^^^^^^^^^^^^^^^^^ - -Following functions all employ spline interpolation to effect some type of -geometric transformation of the input array. This requires a mapping of the -output coordinates to the input coordinates, and therefore the possibility -arises that input values outside the boundaries are needed. This problem is -solved in the same way as described in :ref:`ndimage-filter-functions` -for the multi-dimensional filter functions. Therefore these functions all -support a *mode* parameter that determines how the boundaries are handled, and -a *cval* parameter that gives a constant value in case that the 'constant' -mode is used. - - The :func:`geometric_transform` function applies an arbitrary geometric - transform to the input. The given *mapping* function is called at - each point in the output to find the corresponding coordinates in - the input. *mapping* must be a callable object that accepts a tuple - of length equal to the output array rank and returns the - corresponding input coordinates as a tuple of length equal to the - input array rank. The output shape and output type can optionally - be provided. If not given they are equal to the input shape and - type. - - For example:: - - >>> a = arange(12).reshape(4,3).astype(np.float64) - >>> def shift_func(output_coordinates): - ... return (output_coordinates[0] - 0.5, output_coordinates[1] - 0.5) - ... - >>> geometric_transform(a, shift_func) - array([[ 0. , 0. , 0. ], - [ 0. , 1.3625, 2.7375], - [ 0. , 4.8125, 6.1875], - [ 0. , 8.2625, 9.6375]]) - - Optionally extra arguments can be defined and passed to the filter - function. The *extra_arguments* and *extra_keywords* arguments - can be used to pass a tuple of extra arguments and/or a dictionary - of named arguments that are passed to derivative at each call. For - example, we can pass the shifts in our example as arguments:: - - >>> def shift_func(output_coordinates, s0, s1): - ... return (output_coordinates[0] - s0, output_coordinates[1] - s1) - ... - >>> geometric_transform(a, shift_func, extra_arguments = (0.5, 0.5)) - array([[ 0. , 0. , 0. ], - [ 0. , 1.3625, 2.7375], - [ 0. , 4.8125, 6.1875], - [ 0. , 8.2625, 9.6375]]) - - or:: - - >>> geometric_transform(a, shift_func, extra_keywords = {'s0': 0.5, 's1': 0.5}) - array([[ 0. , 0. , 0. ], - [ 0. , 1.3625, 2.7375], - [ 0. , 4.8125, 6.1875], - [ 0. , 8.2625, 9.6375]]) - - .. note:: The mapping function can also be written in C and passed using a :ctype:`PyCObject`. See :ref:`ndimage-ccallbacks` for more information. - - - The function :func:`map_coordinates` applies an arbitrary coordinate - transformation using the given array of coordinates. The shape of - the output is derived from that of the coordinate array by dropping - the first axis. The parameter *coordinates* is used to find for - each point in the output the corresponding coordinates in the - input. The values of *coordinates* along the first axis are the - coordinates in the input array at which the output value is found. - (See also the numarray `coordinates` function.) Since the - coordinates may be non- integer coordinates, the value of the input - at these coordinates is determined by spline interpolation of the - requested order. Here is an example that interpolates a 2D array at - (0.5, 0.5) and (1, 2):: - - >>> a = arange(12).reshape(4,3).astype(np.float64) - >>> a - array([[ 0., 1., 2.], - [ 3., 4., 5.], - [ 6., 7., 8.], - [ 9., 10., 11.]]) - >>> map_coordinates(a, [[0.5, 2], [0.5, 1]]) - array([ 1.3625 7. ]) - - The :func:`affine_transform` function applies an affine transformation - to the input array. The given transformation *matrix* and *offset* - are used to find for each point in the output the corresponding - coordinates in the input. The value of the input at the calculated - coordinates is determined by spline interpolation of the requested - order. The transformation *matrix* must be two-dimensional or can - also be given as a one-dimensional sequence or array. In the latter - case, it is assumed that the matrix is diagonal. A more efficient - interpolation algorithm is then applied that exploits the - separability of the problem. The output shape and output type can - optionally be provided. If not given they are equal to the input - shape and type. - - The :func:`shift` function returns a shifted version of the input, using - spline interpolation of the requested *order*. - - The :func:`zoom` function returns a rescaled version of the input, using - spline interpolation of the requested *order*. - - The :func:`rotate` function returns the input array rotated in the plane - defined by the two axes given by the parameter *axes*, using spline - interpolation of the requested *order*. The angle must be given in - degrees. If *reshape* is true, then the size of the output array is - adapted to contain the rotated input. - - -.. _ndimage-morphology: - -Morphology ----------- - -.. _ndimage-binary-morphology: - -Binary morphology -^^^^^^^^^^^^^^^^^ - -.. currentmodule:: scipy.ndimage.morphology - -Binary morphology (need something to put here). - - The :func:`generate_binary_structure` functions generates a binary - structuring element for use in binary morphology operations. The - *rank* of the structure must be provided. The size of the structure - that is returned is equal to three in each direction. The value of - each element is equal to one if the square of the Euclidean - distance from the element to the center is less or equal to - *connectivity*. For instance, two dimensional 4-connected and - 8-connected structures are generated as follows:: - - >>> generate_binary_structure(2, 1) - array([[False, True, False], - [ True, True, True], - [False, True, False]], dtype=bool) - >>> generate_binary_structure(2, 2) - array([[ True, True, True], - [ True, True, True], - [ True, True, True]], dtype=bool) - -Most binary morphology functions can be expressed in terms of the -basic operations erosion and dilation: - - The :func:`binary_erosion` function implements binary erosion of arrays - of arbitrary rank with the given structuring element. The origin - parameter controls the placement of the structuring element as - described in :ref:`ndimage-filter-functions`. If no - structuring element is provided, an element with connectivity equal - to one is generated using :func:`generate_binary_structure`. The - *border_value* parameter gives the value of the array outside - boundaries. The erosion is repeated *iterations* times. If - *iterations* is less than one, the erosion is repeated until the - result does not change anymore. If a *mask* array is given, only - those elements with a true value at the corresponding mask element - are modified at each iteration. - - The :func:`binary_dilation` function implements binary dilation of - arrays of arbitrary rank with the given structuring element. The - origin parameter controls the placement of the structuring element - as described in :ref:`ndimage-filter-functions`. If no - structuring element is provided, an element with connectivity equal - to one is generated using :func:`generate_binary_structure`. The - *border_value* parameter gives the value of the array outside - boundaries. The dilation is repeated *iterations* times. If - *iterations* is less than one, the dilation is repeated until the - result does not change anymore. If a *mask* array is given, only - those elements with a true value at the corresponding mask element - are modified at each iteration. - - Here is an example of using :func:`binary_dilation` to find all elements - that touch the border, by repeatedly dilating an empty array from - the border using the data array as the mask:: - - >>> struct = array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) - >>> a = array([[1,0,0,0,0], [1,1,0,1,0], [0,0,1,1,0], [0,0,0,0,0]]) - >>> a - array([[1, 0, 0, 0, 0], - [1, 1, 0, 1, 0], - [0, 0, 1, 1, 0], - [0, 0, 0, 0, 0]]) - >>> binary_dilation(zeros(a.shape), struct, -1, a, border_value=1) - array([[ True, False, False, False, False], - [ True, True, False, False, False], - [False, False, False, False, False], - [False, False, False, False, False]], dtype=bool) - - -The :func:`binary_erosion` and :func:`binary_dilation` functions both have an -*iterations* parameter which allows the erosion or dilation to be -repeated a number of times. Repeating an erosion or a dilation with -a given structure *n* times is equivalent to an erosion or a -dilation with a structure that is *n-1* times dilated with itself. -A function is provided that allows the calculation of a structure -that is dilated a number of times with itself: - - The :func:`iterate_structure` function returns a structure by dilation - of the input structure *iteration* - 1 times with itself. For - instance:: - - >>> struct = generate_binary_structure(2, 1) - >>> struct - array([[False, True, False], - [ True, True, True], - [False, True, False]], dtype=bool) - >>> iterate_structure(struct, 2) - array([[False, False, True, False, False], - [False, True, True, True, False], - [ True, True, True, True, True], - [False, True, True, True, False], - [False, False, True, False, False]], dtype=bool) - - If the origin of the original structure is equal to 0, then it is - also equal to 0 for the iterated structure. If not, the origin must - also be adapted if the equivalent of the *iterations* erosions or - dilations must be achieved with the iterated structure. The adapted - origin is simply obtained by multiplying with the number of - iterations. For convenience the :func:`iterate_structure` also returns - the adapted origin if the *origin* parameter is not None:: - - >>> iterate_structure(struct, 2, -1) - (array([[False, False, True, False, False], - [False, True, True, True, False], - [ True, True, True, True, True], - [False, True, True, True, False], - [False, False, True, False, False]], dtype=bool), [-2, -2]) - - -Other morphology operations can be defined in terms of erosion and -d dilation. Following functions provide a few of these operations -for convenience: - - The :func:`binary_opening` function implements binary opening of arrays - of arbitrary rank with the given structuring element. Binary - opening is equivalent to a binary erosion followed by a binary - dilation with the same structuring element. The origin parameter - controls the placement of the structuring element as described in - :ref:`ndimage-filter-functions`. If no structuring element is - provided, an element with connectivity equal to one is generated - using :func:`generate_binary_structure`. The *iterations* parameter - gives the number of erosions that is performed followed by the same - number of dilations. - - - The :func:`binary_closing` function implements binary closing of arrays - of arbitrary rank with the given structuring element. Binary - closing is equivalent to a binary dilation followed by a binary - erosion with the same structuring element. The origin parameter - controls the placement of the structuring element as described in - :ref:`ndimage-filter-functions`. If no structuring element is - provided, an element with connectivity equal to one is generated - using :func:`generate_binary_structure`. The *iterations* parameter - gives the number of dilations that is performed followed by the - same number of erosions. - - - The :func:`binary_fill_holes` function is used to close holes in - objects in a binary image, where the structure defines the - connectivity of the holes. The origin parameter controls the - placement of the structuring element as described in - :ref:`ndimage-filter-functions`. If no structuring element is - provided, an element with connectivity equal to one is generated - using :func:`generate_binary_structure`. - - - The :func:`binary_hit_or_miss` function implements a binary - hit-or-miss transform of arrays of arbitrary rank with the given - structuring elements. The hit-or-miss transform is calculated by - erosion of the input with the first structure, erosion of the - logical *not* of the input with the second structure, followed by - the logical *and* of these two erosions. The origin parameters - control the placement of the structuring elements as described in - :ref:`ndimage-filter-functions`. If *origin2* equals None it - is set equal to the *origin1* parameter. If the first structuring - element is not provided, a structuring element with connectivity - equal to one is generated using :func:`generate_binary_structure`, if - *structure2* is not provided, it is set equal to the logical *not* - of *structure1*. - - -.. _ndimage-grey-morphology: - -Grey-scale morphology -^^^^^^^^^^^^^^^^^^^^^ - -.. currentmodule:: scipy.ndimage.morphology - -Grey-scale morphology operations are the equivalents of binary -morphology operations that operate on arrays with arbitrary values. -Below we describe the grey-scale equivalents of erosion, dilation, -opening and closing. These operations are implemented in a similar -fashion as the filters described in -:ref:`ndimage-filter-functions`, and we refer to this section for the -description of filter kernels and footprints, and the handling of -array borders. The grey-scale morphology operations optionally take -a *structure* parameter that gives the values of the structuring -element. If this parameter is not given the structuring element is -assumed to be flat with a value equal to zero. The shape of the -structure can optionally be defined by the *footprint* parameter. -If this parameter is not given, the structure is assumed to be -rectangular, with sizes equal to the dimensions of the *structure* -array, or by the *size* parameter if *structure* is not given. The -*size* parameter is only used if both *structure* and *footprint* -are not given, in which case the structuring element is assumed to -be rectangular and flat with the dimensions given by *size*. The -*size* parameter, if provided, must be a sequence of sizes or a -single number in which case the size of the filter is assumed to be -equal along each axis. The *footprint* parameter, if provided, must -be an array that defines the shape of the kernel by its non-zero -elements. - -Similar to binary erosion and dilation there are operations for -grey-scale erosion and dilation: - - The :func:`grey_erosion` function calculates a multi-dimensional grey- - scale erosion. - - - The :func:`grey_dilation` function calculates a multi-dimensional grey- - scale dilation. - - -Grey-scale opening and closing operations can be defined similar to -their binary counterparts: - - The :func:`grey_opening` function implements grey-scale opening of - arrays of arbitrary rank. Grey-scale opening is equivalent to a - grey-scale erosion followed by a grey-scale dilation. - - - The :func:`grey_closing` function implements grey-scale closing of - arrays of arbitrary rank. Grey-scale opening is equivalent to a - grey-scale dilation followed by a grey-scale erosion. - - - The :func:`morphological_gradient` function implements a grey-scale - morphological gradient of arrays of arbitrary rank. The grey-scale - morphological gradient is equal to the difference of a grey-scale - dilation and a grey-scale erosion. - - - The :func:`morphological_laplace` function implements a grey-scale - morphological laplace of arrays of arbitrary rank. The grey-scale - morphological laplace is equal to the sum of a grey-scale dilation - and a grey-scale erosion minus twice the input. - - - The :func:`white_tophat` function implements a white top-hat filter of - arrays of arbitrary rank. The white top-hat is equal to the - difference of the input and a grey-scale opening. - - - The :func:`black_tophat` function implements a black top-hat filter of - arrays of arbitrary rank. The black top-hat is equal to the - difference of the a grey-scale closing and the input. - - -.. _ndimage-distance-transforms: - -Distance transforms -------------------- - -.. currentmodule:: scipy.ndimage.morphology - -Distance transforms are used to -calculate the minimum distance from each element of an object to -the background. The following functions implement distance -transforms for three different distance metrics: Euclidean, City -Block, and Chessboard distances. - - The function :func:`distance_transform_cdt` uses a chamfer type - algorithm to calculate the distance transform of the input, by - replacing each object element (defined by values larger than zero) - with the shortest distance to the background (all non-object - elements). The structure determines the type of chamfering that is - done. If the structure is equal to 'cityblock' a structure is - generated using :func:`generate_binary_structure` with a squared - distance equal to 1. If the structure is equal to 'chessboard', a - structure is generated using :func:`generate_binary_structure` with a - squared distance equal to the rank of the array. These choices - correspond to the common interpretations of the cityblock and the - chessboard distancemetrics in two dimensions. - - In addition to the distance transform, the feature transform can be - calculated. In this case the index of the closest background - element is returned along the first axis of the result. The - *return_distances*, and *return_indices* flags can be used to - indicate if the distance transform, the feature transform, or both - must be returned. - - The *distances* and *indices* arguments can be used to give - optional output arrays that must be of the correct size and type - (both :ctype:`Int32`). - - The basics of the algorithm used to implement this function is - described in: G. Borgefors, "Distance transformations in arbitrary - dimensions.", Computer Vision, Graphics, and Image Processing, - 27:321-345, 1984. - - - The function :func:`distance_transform_edt` calculates the exact - euclidean distance transform of the input, by replacing each object - element (defined by values larger than zero) with the shortest - euclidean distance to the background (all non-object elements). - - In addition to the distance transform, the feature transform can be - calculated. In this case the index of the closest background - element is returned along the first axis of the result. The - *return_distances*, and *return_indices* flags can be used to - indicate if the distance transform, the feature transform, or both - must be returned. - - Optionally the sampling along each axis can be given by the - *sampling* parameter which should be a sequence of length equal to - the input rank, or a single number in which the sampling is assumed - to be equal along all axes. - - The *distances* and *indices* arguments can be used to give - optional output arrays that must be of the correct size and type - (:ctype:`Float64` and :ctype:`Int32`). - - The algorithm used to implement this function is described in: C. - R. Maurer, Jr., R. Qi, and V. Raghavan, "A linear time algorithm - for computing exact euclidean distance transforms of binary images - in arbitrary dimensions. IEEE Trans. PAMI 25, 265-270, 2003. - - - The function :func:`distance_transform_bf` uses a brute-force algorithm - to calculate the distance transform of the input, by replacing each - object element (defined by values larger than zero) with the - shortest distance to the background (all non-object elements). The - metric must be one of "euclidean", "cityblock", or - "chessboard". - - In addition to the distance transform, the feature transform can be - calculated. In this case the index of the closest background - element is returned along the first axis of the result. The - *return_distances*, and *return_indices* flags can be used to - indicate if the distance transform, the feature transform, or both - must be returned. - - Optionally the sampling along each axis can be given by the - *sampling* parameter which should be a sequence of length equal to - the input rank, or a single number in which the sampling is assumed - to be equal along all axes. This parameter is only used in the case - of the euclidean distance transform. - - The *distances* and *indices* arguments can be used to give - optional output arrays that must be of the correct size and type - (:ctype:`Float64` and :ctype:`Int32`). - - .. note:: This function uses a slow brute-force algorithm, the function :func:`distance_transform_cdt` can be used to more efficiently calculate cityblock and chessboard distance transforms. The function :func:`distance_transform_edt` can be used to more efficiently calculate the exact euclidean distance transform. - - -Segmentation and labeling -------------------------- - -Segmentation is the process of separating objects of interest from -the background. The most simple approach is probably intensity -thresholding, which is easily done with :mod:`numpy` functions:: - - >>> a = array([[1,2,2,1,1,0], - ... [0,2,3,1,2,0], - ... [1,1,1,3,3,2], - ... [1,1,1,1,2,1]]) - >>> where(a > 1, 1, 0) - array([[0, 1, 1, 0, 0, 0], - [0, 1, 1, 0, 1, 0], - [0, 0, 0, 1, 1, 1], - [0, 0, 0, 0, 1, 0]]) - -The result is a binary image, in which the individual objects still -need to be identified and labeled. The function :func:`label` generates -an array where each object is assigned a unique number: - - The :func:`label` function generates an array where the objects in the - input are labeled with an integer index. It returns a tuple - consisting of the array of object labels and the number of objects - found, unless the *output* parameter is given, in which case only - the number of objects is returned. The connectivity of the objects - is defined by a structuring element. For instance, in two - dimensions using a four-connected structuring element gives:: - - >>> a = array([[0,1,1,0,0,0],[0,1,1,0,1,0],[0,0,0,1,1,1],[0,0,0,0,1,0]]) - >>> s = [[0, 1, 0], [1,1,1], [0,1,0]] - >>> label(a, s) - (array([[0, 1, 1, 0, 0, 0], - [0, 1, 1, 0, 2, 0], - [0, 0, 0, 2, 2, 2], - [0, 0, 0, 0, 2, 0]]), 2) - - These two objects are not connected because there is no way in - which we can place the structuring element such that it overlaps - with both objects. However, an 8-connected structuring element - results in only a single object:: - - >>> a = array([[0,1,1,0,0,0],[0,1,1,0,1,0],[0,0,0,1,1,1],[0,0,0,0,1,0]]) - >>> s = [[1,1,1], [1,1,1], [1,1,1]] - >>> label(a, s)[0] - array([[0, 1, 1, 0, 0, 0], - [0, 1, 1, 0, 1, 0], - [0, 0, 0, 1, 1, 1], - [0, 0, 0, 0, 1, 0]]) - - If no structuring element is provided, one is generated by calling - :func:`generate_binary_structure` (see - :ref:`ndimage-binary-morphology`) - using a connectivity of one (which in 2D is the 4-connected - structure of the first example). The input can be of any type, any - value not equal to zero is taken to be part of an object. This is - useful if you need to 're-label' an array of object indices, for - instance after removing unwanted objects. Just apply the label - function again to the index array. For instance:: - - >>> l, n = label([1, 0, 1, 0, 1]) - >>> l - array([1 0 2 0 3]) - >>> l = where(l != 2, l, 0) - >>> l - array([1 0 0 0 3]) - >>> label(l)[0] - array([1 0 0 0 2]) - - .. note:: The structuring element used by :func:`label` is assumed to be symmetric. - - -There is a large number of other approaches for segmentation, for -instance from an estimation of the borders of the objects that can -be obtained for instance by derivative filters. One such an -approach is watershed segmentation. The function :func:`watershed_ift` -generates an array where each object is assigned a unique label, -from an array that localizes the object borders, generated for -instance by a gradient magnitude filter. It uses an array -containing initial markers for the objects: - - The :func:`watershed_ift` function applies a watershed from markers - algorithm, using an Iterative Forest Transform, as described in: P. - Felkel, R. Wegenkittl, and M. Bruckschwaiger, "Implementation and - Complexity of the Watershed-from-Markers Algorithm Computed as a - Minimal Cost Forest.", Eurographics 2001, pp. C:26-35. - - The inputs of this function are the array to which the transform is - applied, and an array of markers that designate the objects by a - unique label, where any non-zero value is a marker. For instance:: - - >>> input = array([[0, 0, 0, 0, 0, 0, 0], - ... [0, 1, 1, 1, 1, 1, 0], - ... [0, 1, 0, 0, 0, 1, 0], - ... [0, 1, 0, 0, 0, 1, 0], - ... [0, 1, 0, 0, 0, 1, 0], - ... [0, 1, 1, 1, 1, 1, 0], - ... [0, 0, 0, 0, 0, 0, 0]], np.uint8) - >>> markers = array([[1, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 2, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0]], np.int8) - >>> watershed_ift(input, markers) - array([[1, 1, 1, 1, 1, 1, 1], - [1, 1, 2, 2, 2, 1, 1], - [1, 2, 2, 2, 2, 2, 1], - [1, 2, 2, 2, 2, 2, 1], - [1, 2, 2, 2, 2, 2, 1], - [1, 1, 2, 2, 2, 1, 1], - [1, 1, 1, 1, 1, 1, 1]], dtype=int8) - - Here two markers were used to designate an object (*marker* = 2) and - the background (*marker* = 1). The order in which these are processed - is arbitrary: moving the marker for the background to the lower - right corner of the array yields a different result:: - - >>> markers = array([[0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 2, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 1]], np.int8) - >>> watershed_ift(input, markers) - array([[1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1], - [1, 1, 2, 2, 2, 1, 1], - [1, 1, 2, 2, 2, 1, 1], - [1, 1, 2, 2, 2, 1, 1], - [1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1]], dtype=int8) - - The result is that the object (*marker* = 2) is smaller because the - second marker was processed earlier. This may not be the desired - effect if the first marker was supposed to designate a background - object. Therefore :func:`watershed_ift` treats markers with a negative - value explicitly as background markers and processes them after the - normal markers. For instance, replacing the first marker by a - negative marker gives a result similar to the first example:: - - >>> markers = array([[0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 2, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, -1]], np.int8) - >>> watershed_ift(input, markers) - array([[-1, -1, -1, -1, -1, -1, -1], - [-1, -1, 2, 2, 2, -1, -1], - [-1, 2, 2, 2, 2, 2, -1], - [-1, 2, 2, 2, 2, 2, -1], - [-1, 2, 2, 2, 2, 2, -1], - [-1, -1, 2, 2, 2, -1, -1], - [-1, -1, -1, -1, -1, -1, -1]], dtype=int8) - - The connectivity of the objects is defined by a structuring - element. If no structuring element is provided, one is generated by - calling :func:`generate_binary_structure` (see - :ref:`ndimage-binary-morphology`) using a connectivity of one - (which in 2D is a 4-connected structure.) For example, using - an 8-connected structure with the last example yields a different object:: - - >>> watershed_ift(input, markers, - ... structure = [[1,1,1], [1,1,1], [1,1,1]]) - array([[-1, -1, -1, -1, -1, -1, -1], - [-1, 2, 2, 2, 2, 2, -1], - [-1, 2, 2, 2, 2, 2, -1], - [-1, 2, 2, 2, 2, 2, -1], - [-1, 2, 2, 2, 2, 2, -1], - [-1, 2, 2, 2, 2, 2, -1], - [-1, -1, -1, -1, -1, -1, -1]], dtype=int8) - - .. note:: The implementation of :func:`watershed_ift` limits the data types of the input to :ctype:`UInt8` and :ctype:`UInt16`. - - -.. _ndimage-object-measurements: - -Object measurements -------------------- - -.. currentmodule:: scipy.ndimage.measurements - -Given an array of labeled objects, the properties of the individual -objects can be measured. The :func:`find_objects` function can be used -to generate a list of slices that for each object, give the -smallest sub-array that fully contains the object: - - The :func:`find_objects` function finds all objects in a labeled array and - returns a list of slices that correspond to the smallest regions in - the array that contains the object. For instance:: - - >>> a = array([[0,1,1,0,0,0],[0,1,1,0,1,0],[0,0,0,1,1,1],[0,0,0,0,1,0]]) - >>> l, n = label(a) - >>> f = find_objects(l) - >>> a[f[0]] - array([[1 1], - [1 1]]) - >>> a[f[1]] - array([[0, 1, 0], - [1, 1, 1], - [0, 1, 0]]) - - :func:`find_objects` returns slices for all objects, unless the - *max_label* parameter is larger then zero, in which case only the - first *max_label* objects are returned. If an index is missing in - the *label* array, None is return instead of a slice. For - example:: - - >>> find_objects([1, 0, 3, 4], max_label = 3) - [(slice(0, 1, None),), None, (slice(2, 3, None),)] - - -The list of slices generated by :func:`find_objects` is useful to find -the position and dimensions of the objects in the array, but can -also be used to perform measurements on the individual objects. Say -we want to find the sum of the intensities of an object in image:: - - >>> image = arange(4 * 6).reshape(4, 6) - >>> mask = array([[0,1,1,0,0,0],[0,1,1,0,1,0],[0,0,0,1,1,1],[0,0,0,0,1,0]]) - >>> labels = label(mask)[0] - >>> slices = find_objects(labels) - -Then we can calculate the sum of the elements in the second -object:: - - >>> where(labels[slices[1]] == 2, image[slices[1]], 0).sum() - 80 - -That is however not particularly efficient, and may also be more -complicated for other types of measurements. Therefore a few -measurements functions are defined that accept the array of object -labels and the index of the object to be measured. For instance -calculating the sum of the intensities can be done by:: - - >>> sum(image, labels, 2) - 80 - -For large arrays and small objects it is more efficient to call the -measurement functions after slicing the array:: - - >>> sum(image[slices[1]], labels[slices[1]], 2) - 80 - -Alternatively, we can do the measurements for a number of labels -with a single function call, returning a list of results. For -instance, to measure the sum of the values of the background and -the second object in our example we give a list of labels:: - - >>> sum(image, labels, [0, 2]) - array([178.0, 80.0]) - -The measurement functions described below all support the *index* -parameter to indicate which object(s) should be measured. The -default value of *index* is None. This indicates that all -elements where the label is larger than zero should be treated as a -single object and measured. Thus, in this case the *labels* array -is treated as a mask defined by the elements that are larger than -zero. If *index* is a number or a sequence of numbers it gives the -labels of the objects that are measured. If *index* is a sequence, -a list of the results is returned. Functions that return more than -one result, return their result as a tuple if *index* is a single -number, or as a tuple of lists, if *index* is a sequence. - - The :func:`sum` function calculates the sum of the elements of the object - with label(s) given by *index*, using the *labels* array for the - object labels. If *index* is None, all elements with a non-zero - label value are treated as a single object. If *label* is None, - all elements of *input* are used in the calculation. - - - The :func:`mean` function calculates the mean of the elements of the - object with label(s) given by *index*, using the *labels* array for - the object labels. If *index* is None, all elements with a - non-zero label value are treated as a single object. If *label* is - None, all elements of *input* are used in the calculation. - - - The :func:`variance` function calculates the variance of the elements of - the object with label(s) given by *index*, using the *labels* array - for the object labels. If *index* is None, all elements with a - non-zero label value are treated as a single object. If *label* is - None, all elements of *input* are used in the calculation. - - - The :func:`standard_deviation` function calculates the standard - deviation of the elements of the object with label(s) given by - *index*, using the *labels* array for the object labels. If *index* - is None, all elements with a non-zero label value are treated as - a single object. If *label* is None, all elements of *input* are - used in the calculation. - - - The :func:`minimum` function calculates the minimum of the elements of - the object with label(s) given by *index*, using the *labels* array - for the object labels. If *index* is None, all elements with a - non-zero label value are treated as a single object. If *label* is - None, all elements of *input* are used in the calculation. - - - The :func:`maximum` function calculates the maximum of the elements of - the object with label(s) given by *index*, using the *labels* array - for the object labels. If *index* is None, all elements with a - non-zero label value are treated as a single object. If *label* is - None, all elements of *input* are used in the calculation. - - - The :func:`minimum_position` function calculates the position of the - minimum of the elements of the object with label(s) given by - *index*, using the *labels* array for the object labels. If *index* - is None, all elements with a non-zero label value are treated as - a single object. If *label* is None, all elements of *input* are - used in the calculation. - - - The :func:`maximum_position` function calculates the position of the - maximum of the elements of the object with label(s) given by - *index*, using the *labels* array for the object labels. If *index* - is None, all elements with a non-zero label value are treated as - a single object. If *label* is None, all elements of *input* are - used in the calculation. - - - The :func:`extrema` function calculates the minimum, the maximum, and - their positions, of the elements of the object with label(s) given - by *index*, using the *labels* array for the object labels. If - *index* is None, all elements with a non-zero label value are - treated as a single object. If *label* is None, all elements of - *input* are used in the calculation. The result is a tuple giving - the minimum, the maximum, the position of the minimum and the - postition of the maximum. The result is the same as a tuple formed - by the results of the functions *minimum*, *maximum*, - *minimum_position*, and *maximum_position* that are described - above. - - - The :func:`center_of_mass` function calculates the center of mass of - the of the object with label(s) given by *index*, using the - *labels* array for the object labels. If *index* is None, all - elements with a non-zero label value are treated as a single - object. If *label* is None, all elements of *input* are used in - the calculation. - - - The :func:`histogram` function calculates a histogram of the of the - object with label(s) given by *index*, using the *labels* array for - the object labels. If *index* is None, all elements with a - non-zero label value are treated as a single object. If *label* is - None, all elements of *input* are used in the calculation. - Histograms are defined by their minimum (*min*), maximum (*max*) - and the number of bins (*bins*). They are returned as - one-dimensional arrays of type :ctype:`Int32`. - - -.. _ndimage-ccallbacks: - -Extending :mod:`ndimage` in C ------------------------------ - -.. highlight:: c - -A few functions in the :mod:`scipy.ndimage` take a call-back -argument. This can be a python function, but also a :ctype:`PyCObject` -containing a pointer to a C function. To use this feature, you must -write your own C extension that defines the function, and define a Python function that returns a :ctype:`PyCObject` containing a pointer to this function. - -An example of a function that supports this is -:func:`geometric_transform` (see :ref:`ndimage-interpolation`). -You can pass it a python callable object that defines a mapping -from all output coordinates to corresponding coordinates in the -input array. This mapping function can also be a C function, which -generally will be much more efficient, since the overhead of -calling a python function at each element is avoided. - -For example to implement a simple shift function we define the -following function: - -:: - - static int - _shift_function(int *output_coordinates, double* input_coordinates, - int output_rank, int input_rank, void *callback_data) - { - int ii; - /* get the shift from the callback data pointer: */ - double shift = *(double*)callback_data; - /* calculate the coordinates: */ - for(ii = 0; ii < irank; ii++) - icoor[ii] = ocoor[ii] - shift; - /* return OK status: */ - return 1; - } - -This function is called at every element of the output array, -passing the current coordinates in the *output_coordinates* array. -On return, the *input_coordinates* array must contain the -coordinates at which the input is interpolated. The ranks of the -input and output array are passed through *output_rank* and -*input_rank*. The value of the shift is passed through the -*callback_data* argument, which is a pointer to void. The function -returns an error status, in this case always 1, since no error can -occur. - -A pointer to this function and a pointer to the shift value must be -passed to :func:`geometric_transform`. Both are passed by a single -:ctype:`PyCObject` which is created by the following python extension -function: - -:: - - static PyObject * - py_shift_function(PyObject *obj, PyObject *args) - { - double shift = 0.0; - if (!PyArg_ParseTuple(args, "d", &shift)) { - PyErr_SetString(PyExc_RuntimeError, "invalid parameters"); - return NULL; - } else { - /* assign the shift to a dynamically allocated location: */ - double *cdata = (double*)malloc(sizeof(double)); - *cdata = shift; - /* wrap function and callback_data in a CObject: */ - return PyCObject_FromVoidPtrAndDesc(_shift_function, cdata, - _destructor); - } - } - -The value of the shift is obtained and then assigned to a -dynamically allocated memory location. Both this data pointer and -the function pointer are then wrapped in a :ctype:`PyCObject`, which is -returned. Additionally, a pointer to a destructor function is -given, that will free the memory we allocated for the shift value -when the :ctype:`PyCObject` is destroyed. This destructor is very simple: - -:: - - static void - _destructor(void* cobject, void *cdata) - { - if (cdata) - free(cdata); - } - -To use these functions, an extension module is built: - -:: - - static PyMethodDef methods[] = { - {"shift_function", (PyCFunction)py_shift_function, METH_VARARGS, ""}, - {NULL, NULL, 0, NULL} - }; - - void - initexample(void) - { - Py_InitModule("example", methods); - } - -This extension can then be used in Python, for example: - -.. highlight:: python - -:: - - >>> import example - >>> array = arange(12).reshape=(4, 3).astype(np.float64) - >>> fnc = example.shift_function(0.5) - >>> geometric_transform(array, fnc) - array([[ 0. 0. 0. ], - [ 0. 1.3625 2.7375], - [ 0. 4.8125 6.1875], - [ 0. 8.2625 9.6375]]) - -C callback functions for use with :mod:`ndimage` functions must all -be written according to this scheme. The next section lists the -:mod:`ndimage` functions that acccept a C callback function and -gives the prototype of the callback function. - -Functions that support C callback functions -------------------------------------------- - -The :mod:`ndimage` functions that support C callback functions are -described here. Obviously, the prototype of the function that is -provided to these functions must match exactly that what they -expect. Therefore we give here the prototypes of the callback -functions. All these callback functions accept a void -*callback_data* pointer that must be wrapped in a :ctype:`PyCObject` using -the Python :cfunc:`PyCObject_FromVoidPtrAndDesc` function, which can also -accept a pointer to a destructor function to free any memory -allocated for *callback_data*. If *callback_data* is not needed, -:cfunc:`PyCObject_FromVoidPtr` may be used instead. The callback -functions must return an integer error status that is equal to zero -if something went wrong, or 1 otherwise. If an error occurs, you -should normally set the python error status with an informative -message before returning, otherwise, a default error message is set -by the calling function. - -The function :func:`generic_filter` (see -:ref:`ndimage-genericfilters`) accepts a callback function with the -following prototype: - - The calling function iterates over the elements of the input and - output arrays, calling the callback function at each element. The - elements within the footprint of the filter at the current element - are passed through the *buffer* parameter, and the number of - elements within the footprint through *filter_size*. The - calculated valued should be returned in the *return_value* - argument. - -The function :func:`generic_filter1d` (see -:ref:`ndimage-genericfilters`) accepts a callback function with the -following prototype: - - The calling function iterates over the lines of the input and - output arrays, calling the callback function at each line. The - current line is extended according to the border conditions set by - the calling function, and the result is copied into the array that - is passed through the *input_line* array. The length of the input - line (after extension) is passed through *input_length*. The - callback function should apply the 1D filter and store the result - in the array passed through *output_line*. The length of the - output line is passed through *output_length*. - -The function :func:`geometric_transform` (see -:ref:`ndimage-interpolation`) expects a function with the following -prototype: - - The calling function iterates over the elements of the output - array, calling the callback function at each element. The - coordinates of the current output element are passed through - *output_coordinates*. The callback function must return the - coordinates at which the input must be interpolated in - *input_coordinates*. The rank of the input and output arrays are - given by *input_rank* and *output_rank* respectively. - - - diff --git a/scipy-0.10.1/doc/source/tutorial/octave_a.mat b/scipy-0.10.1/doc/source/tutorial/octave_a.mat deleted file mode 100644 index ead7f06870..0000000000 Binary files a/scipy-0.10.1/doc/source/tutorial/octave_a.mat and /dev/null differ diff --git a/scipy-0.10.1/doc/source/tutorial/octave_cells.mat b/scipy-0.10.1/doc/source/tutorial/octave_cells.mat deleted file mode 100644 index c49d8234bf..0000000000 Binary files a/scipy-0.10.1/doc/source/tutorial/octave_cells.mat and /dev/null differ diff --git a/scipy-0.10.1/doc/source/tutorial/octave_struct.mat b/scipy-0.10.1/doc/source/tutorial/octave_struct.mat deleted file mode 100644 index e141a998ee..0000000000 Binary files a/scipy-0.10.1/doc/source/tutorial/octave_struct.mat and /dev/null differ diff --git a/scipy-0.10.1/doc/source/tutorial/optimize.rst b/scipy-0.10.1/doc/source/tutorial/optimize.rst deleted file mode 100644 index a66a37ce25..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/optimize.rst +++ /dev/null @@ -1,810 +0,0 @@ -Optimization (:mod:`scipy.optimize`) -==================================== - -.. sectionauthor:: Travis E. Oliphant - -.. sectionauthor:: Pauli Virtanen - -.. currentmodule:: scipy.optimize - -The :mod:`scipy.optimize` package provides several commonly used -optimization algorithms. An detailed listing is available: -:mod:`scipy.optimize` (can also be found by ``help(scipy.optimize)``). - -The module contains: - -1. Unconstrained and constrained minimization and least-squares algorithms - (e.g., :func:`fmin`: Nelder-Mead simplex, :func:`fmin_bfgs`: - BFGS, :func:`fmin_ncg`: Newton Conjugate Gradient, - :func:`leastsq`: Levenberg-Marquardt, :func:`fmin_cobyla`: COBYLA). - -2. Global (brute-force) optimization routines (e.g., :func:`anneal`) - -3. Curve fitting (:func:`curve_fit`) - -4. Scalar function minimizers and root finders (e.g., Brent's method - :func:`fminbound`, and :func:`newton`) - -5. Multivariate equation system solvers (:func:`fsolve`) - -6. Large-scale multivariate equation system solvers (e.g. :func:`newton_krylov`) - -Below, several examples demonstrate their basic usage. - - -Nelder-Mead Simplex algorithm (:func:`fmin`) --------------------------------------------- - -The simplex algorithm is probably the simplest way to minimize a -fairly well-behaved function. The simplex algorithm requires only -function evaluations and is a good choice for simple minimization -problems. However, because it does not use any gradient evaluations, -it may take longer to find the minimum. To demonstrate the -minimization function consider the problem of minimizing the -Rosenbrock function of :math:`N` variables: - -.. math:: - :nowrap: - - \[ f\left(\mathbf{x}\right)=\sum_{i=1}^{N-1}100\left(x_{i}-x_{i-1}^{2}\right)^{2}+\left(1-x_{i-1}\right)^{2}.\] - -The minimum value of this function is 0 which is achieved when :math:`x_{i}=1.` This minimum can be found using the :obj:`fmin` routine as shown in the example below: - - >>> from scipy.optimize import fmin - >>> def rosen(x): - ... """The Rosenbrock function""" - ... return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0) - - >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] - >>> xopt = fmin(rosen, x0, xtol=1e-8) - Optimization terminated successfully. - Current function value: 0.000000 - Iterations: 339 - Function evaluations: 571 - - >>> print xopt - [ 1. 1. 1. 1. 1.] - -Another optimization algorithm that needs only function calls to find -the minimum is Powell's method available as :func:`fmin_powell`. - - -Broyden-Fletcher-Goldfarb-Shanno algorithm (:func:`fmin_bfgs`) --------------------------------------------------------------- - -In order to converge more quickly to the solution, this routine uses -the gradient of the objective function. If the gradient is not given -by the user, then it is estimated using first-differences. The -Broyden-Fletcher-Goldfarb-Shanno (BFGS) method typically requires -fewer function calls than the simplex algorithm even when the gradient -must be estimated. - -To demonstrate this algorithm, the Rosenbrock function is again used. -The gradient of the Rosenbrock function is the vector: - -.. math:: - :nowrap: - - \begin{eqnarray*} \frac{\partial f}{\partial x_{j}} & = & \sum_{i=1}^{N}200\left(x_{i}-x_{i-1}^{2}\right)\left(\delta_{i,j}-2x_{i-1}\delta_{i-1,j}\right)-2\left(1-x_{i-1}\right)\delta_{i-1,j}.\\ & = & 200\left(x_{j}-x_{j-1}^{2}\right)-400x_{j}\left(x_{j+1}-x_{j}^{2}\right)-2\left(1-x_{j}\right).\end{eqnarray*} - -This expression is valid for the interior derivatives. Special cases -are - -.. math:: - :nowrap: - - \begin{eqnarray*} \frac{\partial f}{\partial x_{0}} & = & -400x_{0}\left(x_{1}-x_{0}^{2}\right)-2\left(1-x_{0}\right),\\ \frac{\partial f}{\partial x_{N-1}} & = & 200\left(x_{N-1}-x_{N-2}^{2}\right).\end{eqnarray*} - -A Python function which computes this gradient is constructed by the -code-segment: - - >>> def rosen_der(x): - ... xm = x[1:-1] - ... xm_m1 = x[:-2] - ... xm_p1 = x[2:] - ... der = zeros_like(x) - ... der[1:-1] = 200*(xm-xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1-xm) - ... der[0] = -400*x[0]*(x[1]-x[0]**2) - 2*(1-x[0]) - ... der[-1] = 200*(x[-1]-x[-2]**2) - ... return der - -The calling signature for the BFGS minimization algorithm is similar -to :obj:`fmin` with the addition of the *fprime* argument. An example -usage of :obj:`fmin_bfgs` is shown in the following example which -minimizes the Rosenbrock function. - - >>> from scipy.optimize import fmin_bfgs - - >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] - >>> xopt = fmin_bfgs(rosen, x0, fprime=rosen_der) - Optimization terminated successfully. - Current function value: 0.000000 - Iterations: 53 - Function evaluations: 65 - Gradient evaluations: 65 - >>> print xopt - [ 1. 1. 1. 1. 1.] - - -Newton-Conjugate-Gradient (:func:`fmin_ncg`) --------------------------------------------- - -The method which requires the fewest function calls and is therefore -often the fastest method to minimize functions of many variables is -:obj:`fmin_ncg`. This method is a modified Newton's method and uses a -conjugate gradient algorithm to (approximately) invert the local -Hessian. Newton's method is based on fitting the function locally to -a quadratic form: - -.. math:: - :nowrap: - - \[ f\left(\mathbf{x}\right)\approx f\left(\mathbf{x}_{0}\right)+\nabla f\left(\mathbf{x}_{0}\right)\cdot\left(\mathbf{x}-\mathbf{x}_{0}\right)+\frac{1}{2}\left(\mathbf{x}-\mathbf{x}_{0}\right)^{T}\mathbf{H}\left(\mathbf{x}_{0}\right)\left(\mathbf{x}-\mathbf{x}_{0}\right).\] - -where :math:`\mathbf{H}\left(\mathbf{x}_{0}\right)` is a matrix of second-derivatives (the Hessian). If the Hessian is -positive definite then the local minimum of this function can be found -by setting the gradient of the quadratic form to zero, resulting in - -.. math:: - :nowrap: - - \[ \mathbf{x}_{\textrm{opt}}=\mathbf{x}_{0}-\mathbf{H}^{-1}\nabla f.\] - -The inverse of the Hessian is evaluted using the conjugate-gradient -method. An example of employing this method to minimizing the -Rosenbrock function is given below. To take full advantage of the -NewtonCG method, a function which computes the Hessian must be -provided. The Hessian matrix itself does not need to be constructed, -only a vector which is the product of the Hessian with an arbitrary -vector needs to be available to the minimization routine. As a result, -the user can provide either a function to compute the Hessian matrix, -or a function to compute the product of the Hessian with an arbitrary -vector. - - -Full Hessian example: -^^^^^^^^^^^^^^^^^^^^^ - -The Hessian of the Rosenbrock function is - -.. math:: - :nowrap: - - \begin{eqnarray*} H_{ij}=\frac{\partial^{2}f}{\partial x_{i}\partial x_{j}} & = & 200\left(\delta_{i,j}-2x_{i-1}\delta_{i-1,j}\right)-400x_{i}\left(\delta_{i+1,j}-2x_{i}\delta_{i,j}\right)-400\delta_{i,j}\left(x_{i+1}-x_{i}^{2}\right)+2\delta_{i,j},\\ & = & \left(202+1200x_{i}^{2}-400x_{i+1}\right)\delta_{i,j}-400x_{i}\delta_{i+1,j}-400x_{i-1}\delta_{i-1,j},\end{eqnarray*} - -if :math:`i,j\in\left[1,N-2\right]` with :math:`i,j\in\left[0,N-1\right]` defining the :math:`N\times N` matrix. Other non-zero entries of the matrix are - -.. math:: - :nowrap: - - \begin{eqnarray*} \frac{\partial^{2}f}{\partial x_{0}^{2}} & = & 1200x_{0}^{2}-400x_{1}+2,\\ \frac{\partial^{2}f}{\partial x_{0}\partial x_{1}}=\frac{\partial^{2}f}{\partial x_{1}\partial x_{0}} & = & -400x_{0},\\ \frac{\partial^{2}f}{\partial x_{N-1}\partial x_{N-2}}=\frac{\partial^{2}f}{\partial x_{N-2}\partial x_{N-1}} & = & -400x_{N-2},\\ \frac{\partial^{2}f}{\partial x_{N-1}^{2}} & = & 200.\end{eqnarray*} - -For example, the Hessian when :math:`N=5` is - -.. math:: - :nowrap: - - \[ \mathbf{H}=\left[\begin{array}{ccccc} 1200x_{0}^{2}-400x_{1}+2 & -400x_{0} & 0 & 0 & 0\\ -400x_{0} & 202+1200x_{1}^{2}-400x_{2} & -400x_{1} & 0 & 0\\ 0 & -400x_{1} & 202+1200x_{2}^{2}-400x_{3} & -400x_{2} & 0\\ 0 & & -400x_{2} & 202+1200x_{3}^{2}-400x_{4} & -400x_{3}\\ 0 & 0 & 0 & -400x_{3} & 200\end{array}\right].\] - -The code which computes this Hessian along with the code to minimize -the function using :obj:`fmin_ncg` is shown in the following example: - - >>> from scipy.optimize import fmin_ncg - >>> def rosen_hess(x): - ... x = asarray(x) - ... H = diag(-400*x[:-1],1) - diag(400*x[:-1],-1) - ... diagonal = zeros_like(x) - ... diagonal[0] = 1200*x[0]-400*x[1]+2 - ... diagonal[-1] = 200 - ... diagonal[1:-1] = 202 + 1200*x[1:-1]**2 - 400*x[2:] - ... H = H + diag(diagonal) - ... return H - - >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] - >>> xopt = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, avextol=1e-8) - Optimization terminated successfully. - Current function value: 0.000000 - Iterations: 23 - Function evaluations: 26 - Gradient evaluations: 23 - Hessian evaluations: 23 - >>> print xopt - [ 1. 1. 1. 1. 1.] - - -Hessian product example: -^^^^^^^^^^^^^^^^^^^^^^^^ - -For larger minimization problems, storing the entire Hessian matrix -can consume considerable time and memory. The Newton-CG algorithm only -needs the product of the Hessian times an arbitrary vector. As a -result, the user can supply code to compute this product rather than -the full Hessian by setting the *fhess_p* keyword to the desired -function. The *fhess_p* function should take the minimization vector as -the first argument and the arbitrary vector as the second -argument. Any extra arguments passed to the function to be minimized -will also be passed to this function. If possible, using Newton-CG -with the hessian product option is probably the fastest way to -minimize the function. - -In this case, the product of the Rosenbrock Hessian with an arbitrary -vector is not difficult to compute. If :math:`\mathbf{p}` is the arbitrary vector, then :math:`\mathbf{H}\left(\mathbf{x}\right)\mathbf{p}` has elements: - -.. math:: - :nowrap: - - \[ \mathbf{H}\left(\mathbf{x}\right)\mathbf{p}=\left[\begin{array}{c} \left(1200x_{0}^{2}-400x_{1}+2\right)p_{0}-400x_{0}p_{1}\\ \vdots\\ -400x_{i-1}p_{i-1}+\left(202+1200x_{i}^{2}-400x_{i+1}\right)p_{i}-400x_{i}p_{i+1}\\ \vdots\\ -400x_{N-2}p_{N-2}+200p_{N-1}\end{array}\right].\] - -Code which makes use of the *fhess_p* keyword to minimize the -Rosenbrock function using :obj:`fmin_ncg` follows: - - >>> from scipy.optimize import fmin_ncg - >>> def rosen_hess_p(x,p): - ... x = asarray(x) - ... Hp = zeros_like(x) - ... Hp[0] = (1200*x[0]**2 - 400*x[1] + 2)*p[0] - 400*x[0]*p[1] - ... Hp[1:-1] = -400*x[:-2]*p[:-2]+(202+1200*x[1:-1]**2-400*x[2:])*p[1:-1] \ - ... -400*x[1:-1]*p[2:] - ... Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1] - ... return Hp - - >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] - >>> xopt = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_p, avextol=1e-8) - Optimization terminated successfully. - Current function value: 0.000000 - Iterations: 22 - Function evaluations: 25 - Gradient evaluations: 22 - Hessian evaluations: 54 - >>> print xopt - [ 1. 1. 1. 1. 1.] - - -Least-square fitting (:func:`leastsq`) --------------------------------------- - -All of the previously-explained minimization procedures can be used to -solve a least-squares problem provided the appropriate objective -function is constructed. For example, suppose it is desired to fit a -set of data :math:`\left\{\mathbf{x}_{i}, \mathbf{y}_{i}\right\}` -to a known model, -:math:`\mathbf{y}=\mathbf{f}\left(\mathbf{x},\mathbf{p}\right)` -where :math:`\mathbf{p}` is a vector of parameters for the model that -need to be found. A common method for determining which parameter -vector gives the best fit to the data is to minimize the sum of squares -of the residuals. The residual is usually defined for each observed -data-point as - -.. math:: - :nowrap: - - \[ e_{i}\left(\mathbf{p},\mathbf{y}_{i},\mathbf{x}_{i}\right)=\left\Vert \mathbf{y}_{i}-\mathbf{f}\left(\mathbf{x}_{i},\mathbf{p}\right)\right\Vert .\] - -An objective function to pass to any of the previous minization -algorithms to obtain a least-squares fit is. - -.. math:: - :nowrap: - - \[ J\left(\mathbf{p}\right)=\sum_{i=0}^{N-1}e_{i}^{2}\left(\mathbf{p}\right).\] - - - -The :obj:`leastsq` algorithm performs this squaring and summing of the -residuals automatically. It takes as an input argument the vector -function :math:`\mathbf{e}\left(\mathbf{p}\right)` and returns the -value of :math:`\mathbf{p}` which minimizes -:math:`J\left(\mathbf{p}\right)=\mathbf{e}^{T}\mathbf{e}` -directly. The user is also encouraged to provide the Jacobian matrix -of the function (with derivatives down the columns or across the -rows). If the Jacobian is not provided, it is estimated. - -An example should clarify the usage. Suppose it is believed some -measured data follow a sinusoidal pattern - -.. math:: - :nowrap: - - \[ y_{i}=A\sin\left(2\pi kx_{i}+\theta\right)\] - -where the parameters :math:`A,` :math:`k` , and :math:`\theta` are unknown. The residual vector is - -.. math:: - :nowrap: - - \[ e_{i}=\left|y_{i}-A\sin\left(2\pi kx_{i}+\theta\right)\right|.\] - -By defining a function to compute the residuals and (selecting an -appropriate starting position), the least-squares fit routine can be -used to find the best-fit parameters :math:`\hat{A},\,\hat{k},\,\hat{\theta}`. -This is shown in the following example: - -.. plot:: - - >>> from numpy import * - >>> x = arange(0,6e-2,6e-2/30) - >>> A,k,theta = 10, 1.0/3e-2, pi/6 - >>> y_true = A*sin(2*pi*k*x+theta) - >>> y_meas = y_true + 2*random.randn(len(x)) - - >>> def residuals(p, y, x): - ... A,k,theta = p - ... err = y-A*sin(2*pi*k*x+theta) - ... return err - - >>> def peval(x, p): - ... return p[0]*sin(2*pi*p[1]*x+p[2]) - - >>> p0 = [8, 1/2.3e-2, pi/3] - >>> print array(p0) - [ 8. 43.4783 1.0472] - - >>> from scipy.optimize import leastsq - >>> plsq = leastsq(residuals, p0, args=(y_meas, x)) - >>> print plsq[0] - [ 10.9437 33.3605 0.5834] - - >>> print array([A, k, theta]) - [ 10. 33.3333 0.5236] - - >>> import matplotlib.pyplot as plt - >>> plt.plot(x,peval(x,plsq[0]),x,y_meas,'o',x,y_true) - >>> plt.title('Least-squares fit to noisy data') - >>> plt.legend(['Fit', 'Noisy', 'True']) - >>> plt.show() - -.. :caption: Least-square fitting to noisy data using -.. :obj:`scipy.optimize.leastsq` - - -.. _tutorial-sqlsp: - -Sequential Least-square fitting with constraints (:func:`fmin_slsqp`) ---------------------------------------------------------------------- - -This module implements the Sequential Least SQuares Programming optimization algorithm (SLSQP). - -.. math:: - :nowrap: - - \begin{eqnarray*} \min F(x) \\ \text{subject to } & C_j(X) = 0 , &j = 1,...,\text{MEQ}\\ - & C_j(x) \geq 0 , &j = \text{MEQ}+1,...,M\\ - & XL \leq x \leq XU , &I = 1,...,N. \end{eqnarray*} - -The following script shows examples for how constraints can be specified. - -:: - - """ - This script tests fmin_slsqp using Example 14.4 from Numerical Methods for - Engineers by Steven Chapra and Raymond Canale. This example maximizes the - function f(x) = 2*x0*x1 + 2*x0 - x0**2 - 2*x1**2, which has a maximum - at x0=2, x1=1. - """ - - from scipy.optimize import fmin_slsqp - from numpy import array - - def testfunc(x, *args): - """ - Parameters - ---------- - d : list - A list of two elements, where d[0] represents x and - d[1] represents y in the following equation. - args : tuple - First element of args is a multiplier for f. - Since the objective function should be maximized, and the scipy - optimizers can only minimize functions, it is nessessary to - multiply the objective function by -1 to achieve the desired - solution. - Returns - ------- - res : float - The result, equal to ``2*x*y + 2*x - x**2 - 2*y**2``. - - """ - try: - sign = args[0] - except: - sign = 1.0 - return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2) - - def testfunc_deriv(x,*args): - """ This is the derivative of testfunc, returning a numpy array - representing df/dx and df/dy """ - try: - sign = args[0] - except: - sign = 1.0 - dfdx0 = sign*(-2*x[0] + 2*x[1] + 2) - dfdx1 = sign*(2*x[0] - 4*x[1]) - return array([ dfdx0, dfdx1 ]) - - def test_eqcons(x,*args): - """ Lefthandside of the equality constraint """ - return array([ x[0]**3-x[1] ]) - - def test_ieqcons(x,*args): - """ Lefthandside of inequality constraint """ - return array([ x[1]-1 ]) - - def test_fprime_eqcons(x,*args): - """ First derivative of equality constraint """ - return array([ 3.0*(x[0]**2.0), -1.0 ]) - - def test_fprime_ieqcons(x,*args): - """ First derivative of inequality constraint """ - return array([ 0.0, 1.0 ]) - - from time import time - - print "Unbounded optimization." - print "Derivatives of objective function approximated." - t0 = time() - result = fmin_slsqp(testfunc, [-1.0,1.0], args=(-1.0,), iprint=2, full_output=1) - print "Elapsed time:", 1000*(time()-t0), "ms" - print "Results", result, "\n\n" - - print "Unbounded optimization." - print "Derivatives of objective function provided." - t0 = time() - result = fmin_slsqp(testfunc, [-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,), - iprint=2, full_output=1) - print "Elapsed time:", 1000*(time()-t0), "ms" - print "Results", result, "\n\n" - - print "Bound optimization (equality constraints)." - print "Constraints implemented via lambda function." - print "Derivatives of objective function approximated." - print "Derivatives of constraints approximated." - t0 = time() - result = fmin_slsqp(testfunc, [-1.0,1.0], args=(-1.0,), - eqcons=[lambda x, args: x[0]-x[1] ], iprint=2, full_output=1) - print "Elapsed time:", 1000*(time()-t0), "ms" - print "Results", result, "\n\n" - - print "Bound optimization (equality constraints)." - print "Constraints implemented via lambda." - print "Derivatives of objective function provided." - print "Derivatives of constraints approximated." - t0 = time() - result = fmin_slsqp(testfunc, [-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,), - eqcons=[lambda x, args: x[0]-x[1] ], iprint=2, full_output=1) - print "Elapsed time:", 1000*(time()-t0), "ms" - print "Results", result, "\n\n" - - print "Bound optimization (equality and inequality constraints)." - print "Constraints implemented via lambda." - print "Derivatives of objective function provided." - print "Derivatives of constraints approximated." - t0 = time() - result = fmin_slsqp(testfunc,[-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,), - eqcons=[lambda x, args: x[0]-x[1] ], - ieqcons=[lambda x, args: x[0]-.5], iprint=2, full_output=1) - print "Elapsed time:", 1000*(time()-t0), "ms" - print "Results", result, "\n\n" - - print "Bound optimization (equality and inequality constraints)." - print "Constraints implemented via function." - print "Derivatives of objective function provided." - print "Derivatives of constraints approximated." - t0 = time() - result = fmin_slsqp(testfunc, [-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,), - f_eqcons=test_eqcons, f_ieqcons=test_ieqcons, - iprint=2, full_output=1) - print "Elapsed time:", 1000*(time()-t0), "ms" - print "Results", result, "\n\n" - - print "Bound optimization (equality and inequality constraints)." - print "Constraints implemented via function." - print "All derivatives provided." - t0 = time() - result = fmin_slsqp(testfunc,[-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,), - f_eqcons=test_eqcons, fprime_eqcons=test_fprime_eqcons, - f_ieqcons=test_ieqcons, fprime_ieqcons=test_fprime_ieqcons, - iprint=2, full_output=1) - print "Elapsed time:", 1000*(time()-t0), "ms" - print "Results", result, "\n\n" - - -Scalar function minimizers --------------------------- - -Often only the minimum of a scalar function is needed (a scalar -function is one that takes a scalar as input and returns a scalar -output). In these circumstances, other optimization techniques have -been developed that can work faster. - - -Unconstrained minimization (:func:`brent`) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are actually two methods that can be used to minimize a scalar -function (:obj:`brent` and :func:`golden`), but :obj:`golden` is -included only for academic purposes and should rarely be used. The -brent method uses Brent's algorithm for locating a minimum. Optimally -a bracket should be given which contains the minimum desired. A -bracket is a triple :math:`\left(a,b,c\right)` such that -:math:`f\left(a\right)>f\left(b\right)>> from scipy.special import j1 - >>> from scipy.optimize import fminbound - >>> xmin = fminbound(j1, 4, 7) - >>> print xmin - 5.33144184241 - - -Root finding ------------- - - -Sets of equations -^^^^^^^^^^^^^^^^^ - -To find the roots of a polynomial, the command :obj:`roots -` is useful. To find a root of a set of non-linear -equations, the command :obj:`fsolve` is needed. For example, the -following example finds the roots of the single-variable -transcendental equation - -.. math:: - :nowrap: - - \[ x+2\cos\left(x\right)=0,\] - -and the set of non-linear equations - -.. math:: - :nowrap: - - \begin{eqnarray*} - x_{0}\cos\left(x_{1}\right) & = & 4,\\ - x_{0}x_{1}-x_{1} & = & 5. - \end{eqnarray*} - -The results are :math:`x=-1.0299` and :math:`x_{0}=6.5041,\, x_{1}=0.9084` . - - >>> def func(x): - ... return x + 2*cos(x) - - >>> def func2(x): - ... out = [x[0]*cos(x[1]) - 4] - ... out.append(x[1]*x[0] - x[1] - 5) - ... return out - - >>> from scipy.optimize import fsolve - >>> x0 = fsolve(func, 0.3) - >>> print x0 - -1.02986652932 - - >>> x02 = fsolve(func2, [1, 1]) - >>> print x02 - [ 6.50409711 0.90841421] - - -Scalar function root finding -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If one has a single-variable equation, there are four different root -finder algorithms that can be tried. Each of these root finding -algorithms requires the endpoints of an interval where a root is -suspected (because the function changes signs). In general -:obj:`brentq` is the best choice, but the other methods may be useful -in certain circumstances or for academic purposes. - - -Fixed-point solving -^^^^^^^^^^^^^^^^^^^ - -A problem closely related to finding the zeros of a function is the -problem of finding a fixed-point of a function. A fixed point of a -function is the point at which evaluation of the function returns the -point: :math:`g\left(x\right)=x.` Clearly the fixed point of :math:`g` -is the root of :math:`f\left(x\right)=g\left(x\right)-x.` -Equivalently, the root of :math:`f` is the fixed_point of -:math:`g\left(x\right)=f\left(x\right)+x.` The routine -:obj:`fixed_point` provides a simple iterative method using Aitkens -sequence acceleration to estimate the fixed point of :math:`g` given a -starting point. - - -Root finding: Large problems ----------------------------- - -The :obj:`fsolve` function cannot deal with a very large number of -variables (*N*), as it needs to calculate and invert a dense *N x N* -Jacobian matrix on every Newton step. This becomes rather inefficent -when *N* grows. - -Consider for instance the following problem: we need to solve the -following integrodifferential equation on the square -:math:`[0,1]\times[0,1]`: - -.. math:: - - (\partial_x^2 + \partial_y^2) P + 5 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 = 0 - -with the boundary condition :math:`P(x,1) = 1` on the upper edge and -:math:`P=0` elsewhere on the boundary of the square. This can be done -by approximating the continuous function *P* by its values on a grid, -:math:`P_{n,m}\approx{}P(n h, m h)`, with a small grid spacing -*h*. The derivatives and integrals can then be approximated; for -instance :math:`\partial_x^2 P(x,y)\approx{}(P(x+h,y) - 2 P(x,y) + -P(x-h,y))/h^2`. The problem is then equivalent to finding the root of -some function *residual(P)*, where *P* is a vector of length -:math:`N_x N_y`. - -Now, because :math:`N_x N_y` can be large, :obj:`fsolve` will take a -long time to solve this problem. The solution can however be found -using one of the large-scale solvers in :mod:`scipy.optimize`, for -example :obj:`newton_krylov`, :obj:`broyden2`, or -:obj:`anderson`. These use what is known as the inexact Newton method, -which instead of computing the Jacobian matrix exactly, forms an -approximation for it. - -The problem we have can now be solved as follows: - -.. plot:: - - import numpy as np - from scipy.optimize import newton_krylov - from numpy import cosh, zeros_like, mgrid, zeros - - # parameters - nx, ny = 75, 75 - hx, hy = 1./(nx-1), 1./(ny-1) - - P_left, P_right = 0, 0 - P_top, P_bottom = 1, 0 - - def residual(P): - d2x = zeros_like(P) - d2y = zeros_like(P) - - d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx - d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx - d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx - - d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy - d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy - d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy - - return d2x + d2y + 5*cosh(P).mean()**2 - - # solve - guess = zeros((nx, ny), float) - sol = newton_krylov(residual, guess, verbose=1) - #sol = broyden2(residual, guess, max_rank=50, verbose=1) - #sol = anderson(residual, guess, M=10, verbose=1) - print 'Residual', abs(residual(sol)).max() - - # visualize - import matplotlib.pyplot as plt - x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)] - plt.pcolor(x, y, sol) - plt.colorbar() - plt.show() - - -Still too slow? Preconditioning. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When looking for the zero of the functions :math:`f_i({\bf x}) = 0`, -*i = 1, 2, ..., N*, the :obj:`newton_krylov` solver spends most of its -time inverting the Jacobian matrix, - -.. math:: J_{ij} = \frac{\partial f_i}{\partial x_j} . - -If you have an approximation for the inverse matrix -:math:`M\approx{}J^{-1}`, you can use it for *preconditioning* the -linear inversion problem. The idea is that instead of solving -:math:`J{\bf s}={\bf y}` one solves :math:`MJ{\bf s}=M{\bf y}`: since -matrix :math:`MJ` is "closer" to the identity matrix than :math:`J` -is, the equation should be easier for the Krylov method to deal with. - -The matrix *M* can be passed to :obj:`newton_krylov` as the *inner_M* -parameter. It can be a (sparse) matrix or a -:obj:`scipy.sparse.linalg.LinearOperator` instance. - -For the problem in the previous section, we note that the function to -solve consists of two parts: the first one is application of the -Laplace operator, :math:`[\partial_x^2 + \partial_y^2] P`, and the second -is the integral. We can actually easily compute the Jacobian corresponding -to the Laplace operator part: we know that in one dimension - -.. math:: - - \partial_x^2 \approx \frac{1}{h_x^2} \begin{pmatrix} - -2 & 1 & 0 & 0 \cdots \\ - 1 & -2 & 1 & 0 \cdots \\ - 0 & 1 & -2 & 1 \cdots \\ - \ldots - \end{pmatrix} - = h_x^{-2} L - -so that the whole 2-D operator is represented by - -.. math:: - - J_1 = \partial_x^2 + \partial_y^2 - \simeq - h_x^{-2} L \otimes I + h_y^{-2} I \otimes L - -The matrix :math:`J_2` of the Jacobian corresponding to the integral -is more difficult to calculate, and since *all* of it entries are -nonzero, it will be difficult to invert. :math:`J_1` on the other hand -is a relatively simple matrix, and can be inverted by -:obj:`scipy.sparse.linalg.splu` (or the inverse can be approximated by -:obj:`scipy.sparse.linalg.spilu`). So we are content to take -:math:`M\approx{}J_1^{-1}` and hope for the best. - -In the example below, we use the preconditioner :math:`M=J_1^{-1}`. - -.. literalinclude:: examples/newton_krylov_preconditioning.py - -Resulting run, first without preconditioning:: - - 0: |F(x)| = 803.614; step 1; tol 0.000257947 - 1: |F(x)| = 345.912; step 1; tol 0.166755 - 2: |F(x)| = 139.159; step 1; tol 0.145657 - 3: |F(x)| = 27.3682; step 1; tol 0.0348109 - 4: |F(x)| = 1.03303; step 1; tol 0.00128227 - 5: |F(x)| = 0.0406634; step 1; tol 0.00139451 - 6: |F(x)| = 0.00344341; step 1; tol 0.00645373 - 7: |F(x)| = 0.000153671; step 1; tol 0.00179246 - 8: |F(x)| = 6.7424e-06; step 1; tol 0.00173256 - Residual 3.57078908664e-07 - Evaluations 317 - -and then with preconditioning:: - - 0: |F(x)| = 136.993; step 1; tol 7.49599e-06 - 1: |F(x)| = 4.80983; step 1; tol 0.00110945 - 2: |F(x)| = 0.195942; step 1; tol 0.00149362 - 3: |F(x)| = 0.000563597; step 1; tol 7.44604e-06 - 4: |F(x)| = 1.00698e-09; step 1; tol 2.87308e-12 - Residual 9.29603061195e-11 - Evaluations 77 - -Using a preconditioner reduced the number of evaluations of the -*residual* function by a factor of *4*. For problems where the -residual is expensive to compute, good preconditioning can be crucial ---- it can even decide whether the problem is solvable in practice or -not. - -Preconditioning is an art, science, and industry. Here, we were lucky -in making a simple choice that worked reasonably well, but there is a -lot more depth to this topic than is shown here. - -.. rubric:: References - -Some further reading and related software: - -.. [KK] D.A. Knoll and D.E. Keyes, "Jacobian-free Newton-Krylov methods", - J. Comp. Phys. 193, 357 (2003). - -.. [PP] PETSc http://www.mcs.anl.gov/petsc/ and its Python bindings - http://code.google.com/p/petsc4py/ - -.. [AMG] PyAMG (algebraic multigrid preconditioners/solvers) - http://code.google.com/p/pyamg/ diff --git a/scipy-0.10.1/doc/source/tutorial/signal.rst b/scipy-0.10.1/doc/source/tutorial/signal.rst deleted file mode 100644 index a9b0a4717c..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/signal.rst +++ /dev/null @@ -1,656 +0,0 @@ -Signal Processing (`scipy.signal`) -================================== - -.. sectionauthor:: Travis E. Oliphant - -.. sectionauthor:: Pim Schellart - -.. currentmodule:: scipy.signal - -The signal processing toolbox currently contains some filtering -functions, a limited set of filter design tools, and a few B-spline -interpolation algorithms for one- and two-dimensional data. While the -B-spline algorithms could technically be placed under the -interpolation category, they are included here because they only work -with equally-spaced data and make heavy use of filter-theory and -transfer-function formalism to provide a fast B-spline transform. To -understand this section you will need to understand that a signal in -SciPy is an array of real or complex numbers. - - -B-splines ---------- - -A B-spline is an approximation of a continuous function over a finite- -domain in terms of B-spline coefficients and knot points. If the knot- -points are equally spaced with spacing :math:`\Delta x` , then the B-spline -approximation to a 1-dimensional function is the finite-basis expansion. - -.. math:: - :nowrap: - - \[ y\left(x\right)\approx\sum_{j}c_{j}\beta^{o}\left(\frac{x}{\Delta x}-j\right).\] - -In two dimensions with knot-spacing :math:`\Delta x` and :math:`\Delta y` , the -function representation is - -.. math:: - :nowrap: - - \[ z\left(x,y\right)\approx\sum_{j}\sum_{k}c_{jk}\beta^{o}\left(\frac{x}{\Delta x}-j\right)\beta^{o}\left(\frac{y}{\Delta y}-k\right).\] - -In these expressions, :math:`\beta^{o}\left(\cdot\right)` is the space-limited -B-spline basis function of order, :math:`o` . The requirement of equally-spaced -knot-points and equally-spaced data points, allows the development of fast -(inverse-filtering) algorithms for determining the coefficients, :math:`c_{j}` -, from sample-values, :math:`y_{n}` . Unlike the general spline interpolation -algorithms, these algorithms can quickly find the spline coefficients for large -images. - -The advantage of representing a set of samples via B-spline basis -functions is that continuous-domain operators (derivatives, re- -sampling, integral, etc.) which assume that the data samples are drawn -from an underlying continuous function can be computed with relative -ease from the spline coefficients. For example, the second-derivative -of a spline is - -.. math:: - :nowrap: - - \[ y{}^{\prime\prime}\left(x\right)=\frac{1}{\Delta x^{2}}\sum_{j}c_{j}\beta^{o\prime\prime}\left(\frac{x}{\Delta x}-j\right).\] - -Using the property of B-splines that - -.. math:: - :nowrap: - - \[ \frac{d^{2}\beta^{o}\left(w\right)}{dw^{2}}=\beta^{o-2}\left(w+1\right)-2\beta^{o-2}\left(w\right)+\beta^{o-2}\left(w-1\right)\] - -it can be seen that - -.. math:: - :nowrap: - - \[ y^{\prime\prime}\left(x\right)=\frac{1}{\Delta x^{2}}\sum_{j}c_{j}\left[\beta^{o-2}\left(\frac{x}{\Delta x}-j+1\right)-2\beta^{o-2}\left(\frac{x}{\Delta x}-j\right)+\beta^{o-2}\left(\frac{x}{\Delta x}-j-1\right)\right].\] - -If :math:`o=3` , then at the sample points, - -.. math:: - :nowrap: - - \begin{eqnarray*} \Delta x^{2}\left.y^{\prime}\left(x\right)\right|_{x=n\Delta x} & = & \sum_{j}c_{j}\delta_{n-j+1}-2c_{j}\delta_{n-j}+c_{j}\delta_{n-j-1},\\ & = & c_{n+1}-2c_{n}+c_{n-1}.\end{eqnarray*} - -Thus, the second-derivative signal can be easily calculated from the -spline fit. if desired, smoothing splines can be found to make the -second-derivative less sensitive to random-errors. - -The savvy reader will have already noticed that the data samples are -related to the knot coefficients via a convolution operator, so that -simple convolution with the sampled B-spline function recovers the -original data from the spline coefficients. The output of convolutions -can change depending on how boundaries are handled (this becomes -increasingly more important as the number of dimensions in the data- -set increases). The algorithms relating to B-splines in the signal- -processing sub package assume mirror-symmetric boundary conditions. -Thus, spline coefficients are computed based on that assumption, and -data-samples can be recovered exactly from the spline coefficients by -assuming them to be mirror-symmetric also. - -Currently the package provides functions for determining second- and -third-order cubic spline coefficients from equally spaced samples in -one- and two-dimensions (:func:`signal.qspline1d`, -:func:`signal.qspline2d`, :func:`signal.cspline1d`, -:func:`signal.cspline2d`). The package also supplies a function ( -:obj:`signal.bspline` ) for evaluating the bspline basis function, -:math:`\beta^{o}\left(x\right)` for arbitrary order and :math:`x.` For -large :math:`o` , the B-spline basis function can be approximated well -by a zero-mean Gaussian function with standard-deviation equal to -:math:`\sigma_{o}=\left(o+1\right)/12` : - -.. math:: - :nowrap: - - \[ \beta^{o}\left(x\right)\approx\frac{1}{\sqrt{2\pi\sigma_{o}^{2}}}\exp\left(-\frac{x^{2}}{2\sigma_{o}}\right).\] - -A function to compute this Gaussian for arbitrary :math:`x` and -:math:`o` is also available ( :obj:`signal.gauss_spline` ). The -following code and Figure uses spline-filtering to compute an -edge-image (the second-derivative of a smoothed spline) of Lena's face -which is an array returned by the command :func:`lena`. The command -:obj:`signal.sepfir2d` was used to apply a separable two-dimensional -FIR filter with mirror- symmetric boundary conditions to the spline -coefficients. This function is ideally suited for reconstructing -samples from spline coefficients and is faster than -:obj:`signal.convolve2d` which convolves arbitrary two-dimensional -filters and allows for choosing mirror-symmetric boundary conditions. - -.. plot:: - - >>> from numpy import * - >>> from scipy import signal, misc - >>> import matplotlib.pyplot as plt - - >>> image = misc.lena().astype(float32) - >>> derfilt = array([1.0,-2,1.0],float32) - >>> ck = signal.cspline2d(image,8.0) - >>> deriv = signal.sepfir2d(ck, derfilt, [1]) + \ - >>> signal.sepfir2d(ck, [1], derfilt) - - Alternatively we could have done:: - - laplacian = array([[0,1,0],[1,-4,1],[0,1,0]],float32) - deriv2 = signal.convolve2d(ck,laplacian,mode='same',boundary='symm') - - >>> plt.figure() - >>> plt.imshow(image) - >>> plt.gray() - >>> plt.title('Original image') - >>> plt.show() - - >>> plt.figure() - >>> plt.imshow(deriv) - >>> plt.gray() - >>> plt.title('Output of spline edge filter') - >>> plt.show() - -.. :caption: Example of using smoothing splines to filter images. - - -Filtering ---------- - -Filtering is a generic name for any system that modifies an input -signal in some way. In SciPy a signal can be thought of as a Numpy -array. There are different kinds of filters for different kinds of -operations. There are two broad kinds of filtering operations: linear -and non-linear. Linear filters can always be reduced to multiplication -of the flattened Numpy array by an appropriate matrix resulting in -another flattened Numpy array. Of course, this is not usually the best -way to compute the filter as the matrices and vectors involved may be -huge. For example filtering a :math:`512 \times 512` image with this -method would require multiplication of a :math:`512^2 \times 512^2` -matrix with a :math:`512^2` vector. Just trying to store the -:math:`512^2 \times 512^2` matrix using a standard Numpy array would -require :math:`68,719,476,736` elements. At 4 bytes per element this -would require :math:`256\textrm{GB}` of memory. In most applications -most of the elements of this matrix are zero and a different method -for computing the output of the filter is employed. - - -Convolution/Correlation -^^^^^^^^^^^^^^^^^^^^^^^ - -Many linear filters also have the property of shift-invariance. This -means that the filtering operation is the same at different locations -in the signal and it implies that the filtering matrix can be -constructed from knowledge of one row (or column) of the matrix alone. -In this case, the matrix multiplication can be accomplished using -Fourier transforms. - -Let :math:`x\left[n\right]` define a one-dimensional signal indexed by the -integer :math:`n.` Full convolution of two one-dimensional signals can be -expressed as - -.. math:: - :nowrap: - - \[ y\left[n\right]=\sum_{k=-\infty}^{\infty}x\left[k\right]h\left[n-k\right].\] - -This equation can only be implemented directly if we limit the -sequences to finite support sequences that can be stored in a -computer, choose :math:`n=0` to be the starting point of both -sequences, let :math:`K+1` be that value for which -:math:`y\left[n\right]=0` for all :math:`n>K+1` and :math:`M+1` be -that value for which :math:`x\left[n\right]=0` for all :math:`n>M+1` , -then the discrete convolution expression is - -.. math:: - :nowrap: - - \[ y\left[n\right]=\sum_{k=\max\left(n-M,0\right)}^{\min\left(n,K\right)}x\left[k\right]h\left[n-k\right].\] - -For convenience assume :math:`K\geq M.` Then, more explicitly the output of -this operation is - -.. math:: - :nowrap: - - \begin{eqnarray*} y\left[0\right] & = & x\left[0\right]h\left[0\right]\\ y\left[1\right] & = & x\left[0\right]h\left[1\right]+x\left[1\right]h\left[0\right]\\ y\left[2\right] & = & x\left[0\right]h\left[2\right]+x\left[1\right]h\left[1\right]+x\left[2\right]h\left[0\right]\\ \vdots & \vdots & \vdots\\ y\left[M\right] & = & x\left[0\right]h\left[M\right]+x\left[1\right]h\left[M-1\right]+\cdots+x\left[M\right]h\left[0\right]\\ y\left[M+1\right] & = & x\left[1\right]h\left[M\right]+x\left[2\right]h\left[M-1\right]+\cdots+x\left[M+1\right]h\left[0\right]\\ \vdots & \vdots & \vdots\\ y\left[K\right] & = & x\left[K-M\right]h\left[M\right]+\cdots+x\left[K\right]h\left[0\right]\\ y\left[K+1\right] & = & x\left[K+1-M\right]h\left[M\right]+\cdots+x\left[K\right]h\left[1\right]\\ \vdots & \vdots & \vdots\\ y\left[K+M-1\right] & = & x\left[K-1\right]h\left[M\right]+x\left[K\right]h\left[M-1\right]\\ y\left[K+M\right] & = & x\left[K\right]h\left[M\right].\end{eqnarray*} - -Thus, the full discrete convolution of two finite sequences of lengths -:math:`K+1` and :math:`M+1` respectively results in a finite sequence of length -:math:`K+M+1=\left(K+1\right)+\left(M+1\right)-1.` - -One dimensional convolution is implemented in SciPy with the function -``signal.convolve`` . This function takes as inputs the signals -:math:`x,` :math:`h` , and an optional flag and returns the signal -:math:`y.` The optional flag allows for specification of which part of -the output signal to return. The default value of 'full' returns the -entire signal. If the flag has a value of 'same' then only the middle -:math:`K` values are returned starting at :math:`y\left[\left\lfloor -\frac{M-1}{2}\right\rfloor \right]` so that the output has the same -length as the largest input. If the flag has a value of 'valid' then -only the middle :math:`K-M+1=\left(K+1\right)-\left(M+1\right)+1` -output values are returned where :math:`z` depends on all of the -values of the smallest input from :math:`h\left[0\right]` to -:math:`h\left[M\right].` In other words only the values -:math:`y\left[M\right]` to :math:`y\left[K\right]` inclusive are -returned. - -This same function ``signal.convolve`` can actually take :math:`N` --dimensional arrays as inputs and will return the :math:`N` --dimensional convolution of the two arrays. The same input flags are -available for that case as well. - -Correlation is very similar to convolution except for the minus sign -becomes a plus sign. Thus - -.. math:: - :nowrap: - - \[ w\left[n\right]=\sum_{k=-\infty}^{\infty}y\left[k\right]x\left[n+k\right]\] - -is the (cross) correlation of the signals :math:`y` and :math:`x.` For -finite-length signals with :math:`y\left[n\right]=0` outside of the range -:math:`\left[0,K\right]` and :math:`x\left[n\right]=0` outside of the range -:math:`\left[0,M\right],` the summation can simplify to - -.. math:: - :nowrap: - - \[ w\left[n\right]=\sum_{k=\max\left(0,-n\right)}^{\min\left(K,M-n\right)}y\left[k\right]x\left[n+k\right].\] - -Assuming again that :math:`K\geq M` this is - -.. math:: - :nowrap: - - \begin{eqnarray*} w\left[-K\right] & = & y\left[K\right]x\left[0\right]\\ w\left[-K+1\right] & = & y\left[K-1\right]x\left[0\right]+y\left[K\right]x\left[1\right]\\ \vdots & \vdots & \vdots\\ w\left[M-K\right] & = & y\left[K-M\right]x\left[0\right]+y\left[K-M+1\right]x\left[1\right]+\cdots+y\left[K\right]x\left[M\right]\\ w\left[M-K+1\right] & = & y\left[K-M-1\right]x\left[0\right]+\cdots+y\left[K-1\right]x\left[M\right]\\ \vdots & \vdots & \vdots\\ w\left[-1\right] & = & y\left[1\right]x\left[0\right]+y\left[2\right]x\left[1\right]+\cdots+y\left[M+1\right]x\left[M\right]\\ w\left[0\right] & = & y\left[0\right]x\left[0\right]+y\left[1\right]x\left[1\right]+\cdots+y\left[M\right]x\left[M\right]\\ w\left[1\right] & = & y\left[0\right]x\left[1\right]+y\left[1\right]x\left[2\right]+\cdots+y\left[M-1\right]x\left[M\right]\\ w\left[2\right] & = & y\left[0\right]x\left[2\right]+y\left[1\right]x\left[3\right]+\cdots+y\left[M-2\right]x\left[M\right]\\ \vdots & \vdots & \vdots\\ w\left[M-1\right] & = & y\left[0\right]x\left[M-1\right]+y\left[1\right]x\left[M\right]\\ w\left[M\right] & = & y\left[0\right]x\left[M\right].\end{eqnarray*} - - - -The SciPy function ``signal.correlate`` implements this -operation. Equivalent flags are available for this operation to return -the full :math:`K+M+1` length sequence ('full') or a sequence with the -same size as the largest sequence starting at -:math:`w\left[-K+\left\lfloor \frac{M-1}{2}\right\rfloor \right]` -('same') or a sequence where the values depend on all the values of -the smallest sequence ('valid'). This final option returns the -:math:`K-M+1` values :math:`w\left[M-K\right]` to -:math:`w\left[0\right]` inclusive. - -The function :obj:`signal.correlate` can also take arbitrary :math:`N` --dimensional arrays as input and return the :math:`N` -dimensional -convolution of the two arrays on output. - -When :math:`N=2,` :obj:`signal.correlate` and/or -:obj:`signal.convolve` can be used to construct arbitrary image -filters to perform actions such as blurring, enhancing, and -edge-detection for an image. - -Convolution is mainly used for filtering when one of the signals is -much smaller than the other ( :math:`K\gg M` ), otherwise linear -filtering is more easily accomplished in the frequency domain (see -Fourier Transforms). - - -Difference-equation filtering -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A general class of linear one-dimensional filters (that includes -convolution filters) are filters described by the difference equation - -.. math:: - :nowrap: - - \[ \sum_{k=0}^{N}a_{k}y\left[n-k\right]=\sum_{k=0}^{M}b_{k}x\left[n-k\right]\] - -where :math:`x\left[n\right]` is the input sequence and -:math:`y\left[n\right]` is the output sequence. If we assume initial -rest so that :math:`y\left[n\right]=0` for :math:`n<0` , then this -kind of filter can be implemented using convolution. However, the -convolution filter sequence :math:`h\left[n\right]` could be infinite -if :math:`a_{k}\neq0` for :math:`k\geq1.` In addition, this general -class of linear filter allows initial conditions to be placed on -:math:`y\left[n\right]` for :math:`n<0` resulting in a filter that -cannot be expressed using convolution. - -The difference equation filter can be thought of as finding -:math:`y\left[n\right]` recursively in terms of it's previous values - -.. math:: - :nowrap: - - \[ a_{0}y\left[n\right]=-a_{1}y\left[n-1\right]-\cdots-a_{N}y\left[n-N\right]+\cdots+b_{0}x\left[n\right]+\cdots+b_{M}x\left[n-M\right].\] - -Often :math:`a_{0}=1` is chosen for normalization. The implementation -in SciPy of this general difference equation filter is a little more -complicated then would be implied by the previous equation. It is -implemented so that only one signal needs to be delayed. The actual -implementation equations are (assuming :math:`a_{0}=1` ). - -.. math:: - :nowrap: - - \begin{eqnarray*} y\left[n\right] & = & b_{0}x\left[n\right]+z_{0}\left[n-1\right]\\ z_{0}\left[n\right] & = & b_{1}x\left[n\right]+z_{1}\left[n-1\right]-a_{1}y\left[n\right]\\ z_{1}\left[n\right] & = & b_{2}x\left[n\right]+z_{2}\left[n-1\right]-a_{2}y\left[n\right]\\ \vdots & \vdots & \vdots\\ z_{K-2}\left[n\right] & = & b_{K-1}x\left[n\right]+z_{K-1}\left[n-1\right]-a_{K-1}y\left[n\right]\\ z_{K-1}\left[n\right] & = & b_{K}x\left[n\right]-a_{K}y\left[n\right],\end{eqnarray*} - -where :math:`K=\max\left(N,M\right).` Note that :math:`b_{K}=0` if -:math:`K>M` and :math:`a_{K}=0` if :math:`K>N.` In this way, the -output at time :math:`n` depends only on the input at time :math:`n` -and the value of :math:`z_{0}` at the previous time. This can always -be calculated as long as the :math:`K` values -:math:`z_{0}\left[n-1\right]\ldots z_{K-1}\left[n-1\right]` are -computed and stored at each time step. - -The difference-equation filter is called using the command -:obj:`signal.lfilter` in SciPy. This command takes as inputs the -vector :math:`b,` the vector, :math:`a,` a signal :math:`x` and -returns the vector :math:`y` (the same length as :math:`x` ) computed -using the equation given above. If :math:`x` is :math:`N` --dimensional, then the filter is computed along the axis provided. If, -desired, initial conditions providing the values of -:math:`z_{0}\left[-1\right]` to :math:`z_{K-1}\left[-1\right]` can be -provided or else it will be assumed that they are all zero. If initial -conditions are provided, then the final conditions on the intermediate -variables are also returned. These could be used, for example, to -restart the calculation in the same state. - -Sometimes it is more convenient to express the initial conditions in -terms of the signals :math:`x\left[n\right]` and -:math:`y\left[n\right].` In other words, perhaps you have the values -of :math:`x\left[-M\right]` to :math:`x\left[-1\right]` and the values -of :math:`y\left[-N\right]` to :math:`y\left[-1\right]` and would like -to determine what values of :math:`z_{m}\left[-1\right]` should be -delivered as initial conditions to the difference-equation filter. It -is not difficult to show that for :math:`0\leq m>> help(special).`` Each function also has its own -documentation accessible using help. If you don't see a function you -need, consider writing it and contributing it to the library. You can -write the function in either C, Fortran, or Python. Look in the source -code of the library for examples of each of these kinds of functions. - -Bessel functions of real order(:func:`jn`, :func:`jn_zeros`) --------------------------------------------------------------------------- -Bessel functions are a family of solutions to Bessel's differential equation -with real or complex order alpha: - -.. math:: - x^2 \frac{d^2 y}{dx^2} + x \frac{dy}{dx} + (x^2 - \alpha^2)y = 0 - -Among other uses, these functions arise in wave propagation problems such as -the vibrational modes of a thin drum head. Here is an example of a circular -drum head anchored at the edge: - -.. plot:: - - >>> from scipy import * - >>> from scipy.special import jn, jn_zeros - >>> def drumhead_height(n, k, distance, angle, t): - ... nth_zero = jn_zeros(n, k) - ... return cos(t)*cos(n*angle)*jn(n, distance*nth_zero) - >>> theta = r_[0:2*pi:50j] - >>> radius = r_[0:1:50j] - >>> x = array([r*cos(theta) for r in radius]) - >>> y = array([r*sin(theta) for r in radius]) - >>> z = array([drumhead_height(1, 1, r, theta, 0.5) for r in radius]) - - >>> import pylab - >>> from mpl_toolkits.mplot3d import Axes3D - >>> from matplotlib import cm - >>> fig = pylab.figure() - >>> ax = Axes3D(fig) - >>> ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.jet) - >>> ax.set_xlabel('X') - >>> ax.set_ylabel('Y') - >>> ax.set_zlabel('Z') - >>> pylab.show() - -.. :caption: Vibrating drum head using -.. :obj:`scipy.special.jn` diff --git a/scipy-0.10.1/doc/source/tutorial/stats.rst b/scipy-0.10.1/doc/source/tutorial/stats.rst deleted file mode 100644 index a1f86e6fa5..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/stats.rst +++ /dev/null @@ -1,579 +0,0 @@ -Statistics (`scipy.stats`) -========================== - -.. sectionauthor:: Travis E. Oliphant - -Introduction ------------- - -SciPy has a tremendous number of basic statistics routines with more -easily added by the end user (if you create one please contribute it). -All of the statistics functions are located in the sub-package -:mod:`scipy.stats` and a fairly complete listing of these functions -can be had using ``info(stats)``. - -Random Variables -^^^^^^^^^^^^^^^^ - -There are two general distribution classes that have been implemented -for encapsulating -:ref:`continuous random variables ` -and -:ref:`discrete random variables ` -. Over 80 continuous random variables and 10 discrete random -variables have been implemented using these classes. The list of the -random variables available is in the docstring for the stats sub- -package. - - -Note: The following is work in progress - -Distributions -------------- - - -First some imports - - >>> import numpy as np - >>> from scipy import stats - >>> import warnings - >>> warnings.simplefilter('ignore', DeprecationWarning) - -We can obtain the list of available distribution through introspection: - - >>> dist_continu = [d for d in dir(stats) if - ... isinstance(getattr(stats,d), stats.rv_continuous)] - >>> dist_discrete = [d for d in dir(stats) if - ... isinstance(getattr(stats,d), stats.rv_discrete)] - >>> print 'number of continuous distributions:', len(dist_continu) - number of continuous distributions: 84 - >>> print 'number of discrete distributions: ', len(dist_discrete) - number of discrete distributions: 12 - - - - -Distributions can be used in one of two ways, either by passing all distribution -parameters to each method call or by freezing the parameters for the instance -of the distribution. As an example, we can get the median of the distribution by using -the percent point function, ppf, which is the inverse of the cdf: - - >>> print stats.nct.ppf(0.5, 10, 2.5) - 2.56880722561 - >>> my_nct = stats.nct(10, 2.5) - >>> print my_nct.ppf(0.5) - 2.56880722561 - -``help(stats.nct)`` prints the complete docstring of the distribution. Instead -we can print just some basic information:: - - >>> print stats.nct.extradoc #contains the distribution specific docs - Non-central Student T distribution - - df**(df/2) * gamma(df+1) - nct.pdf(x,df,nc) = -------------------------------------------------- - 2**df*exp(nc**2/2)*(df+x**2)**(df/2) * gamma(df/2) - for df > 0, nc > 0. - - - >>> print 'number of arguments: %d, shape parameters: %s'% (stats.nct.numargs, - ... stats.nct.shapes) - number of arguments: 2, shape parameters: df,nc - >>> print 'bounds of distribution lower: %s, upper: %s' % (stats.nct.a, - ... stats.nct.b) - bounds of distribution lower: -1.#INF, upper: 1.#INF - -We can list all methods and properties of the distribution with -``dir(stats.nct)``. Some of the methods are private methods, that are -not named as such, i.e. no leading underscore, for example veccdf or -xa and xb are for internal calculation. The main methods we can see -when we list the methods of the frozen distribution: - - >>> print dir(my_nct) #reformatted - ['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', - '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', - '__repr__', '__setattr__', '__str__', '__weakref__', 'args', 'cdf', 'dist', - 'entropy', 'isf', 'kwds', 'moment', 'pdf', 'pmf', 'ppf', 'rvs', 'sf', 'stats'] - - -The main public methods are: - -* rvs: Random Variates -* pdf: Probability Density Function -* cdf: Cumulative Distribution Function -* sf: Survival Function (1-CDF) -* ppf: Percent Point Function (Inverse of CDF) -* isf: Inverse Survival Function (Inverse of SF) -* stats: Return mean, variance, (Fisher's) skew, or (Fisher's) kurtosis -* moment: non-central moments of the distribution - -The main additional methods of the not frozen distribution are related to the estimation -of distrition parameters: - -* fit: maximum likelihood estimation of distribution parameters, including location - and scale -* fit_loc_scale: estimation of location and scale when shape parameters are given -* nnlf: negative log likelihood function -* expect: Calculate the expectation of a function against the pdf or pmf - -All continuous distributions take `loc` and `scale` as keyword -parameters to adjust the location and scale of the distribution, -e.g. for the standard normal distribution location is the mean and -scale is the standard deviation. The standardized distribution for a -random variable `x` is obtained through ``(x - loc) / scale``. - -Discrete distribution have most of the same basic methods, however -pdf is replaced the probability mass function `pmf`, no estimation -methods, such as fit, are available, and scale is not a valid -keyword parameter. The location parameter, keyword `loc` can be used -to shift the distribution. - -The basic methods, pdf, cdf, sf, ppf, and isf are vectorized with -``np.vectorize``, and the usual numpy broadcasting is applied. For -example, we can calculate the critical values for the upper tail of -the t distribution for different probabilites and degrees of freedom. - - >>> stats.t.isf([0.1, 0.05, 0.01], [[10], [11]]) - array([[ 1.37218364, 1.81246112, 2.76376946], - [ 1.36343032, 1.79588482, 2.71807918]]) - -Here, the first row are the critical values for 10 degrees of freedom and the second row -is for 11 d.o.f., i.e. this is the same as - - >>> stats.t.isf([0.1, 0.05, 0.01], 10) - array([ 1.37218364, 1.81246112, 2.76376946]) - >>> stats.t.isf([0.1, 0.05, 0.01], 11) - array([ 1.36343032, 1.79588482, 2.71807918]) - -If both, probabilities and degrees of freedom have the same array shape, then element -wise matching is used. As an example, we can obtain the 10% tail for 10 d.o.f., the 5% tail -for 11 d.o.f. and the 1% tail for 12 d.o.f. by - - >>> stats.t.isf([0.1, 0.05, 0.01], [10, 11, 12]) - array([ 1.37218364, 1.79588482, 2.68099799]) - - - -Performance and Remaining Issues -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The performance of the individual methods, in terms of speed, varies -widely by distribution and method. The results of a method are -obtained in one of two ways, either by explicit calculation or by a -generic algorithm that is independent of the specific distribution. -Explicit calculation, requires that the method is directly specified -for the given distribution, either through analytic formulas or -through special functions in scipy.special or numpy.random for -`rvs`. These are usually relatively fast calculations. The generic -methods are used if the distribution does not specify any explicit -calculation. To define a distribution, only one of pdf or cdf is -necessary, all other methods can be derived using numeric integration -and root finding. These indirect methods can be very slow. As an -example, ``rgh = stats.gausshyper.rvs(0.5, 2, 2, 2, size=100)`` creates -random variables in a very indirect way and takes about 19 seconds -for 100 random variables on my computer, while one million random -variables from the standard normal or from the t distribution take -just above one second. - - -The distributions in scipy.stats have recently been corrected and improved -and gained a considerable test suite, however a few issues remain: - -* skew and kurtosis, 3rd and 4th moments and entropy are not thoroughly - tested and some coarse testing indicates that there are still some - incorrect results left. -* the distributions have been tested over some range of parameters, - however in some corner ranges, a few incorrect results may remain. -* the maximum likelihood estimation in `fit` does not work with - default starting parameters for all distributions and the user - needs to supply good starting parameters. Also, for some - distribution using a maximum likelihood estimator might - inherently not be the best choice. - - -The next example shows how to build our own discrete distribution, -and more examples for the usage of the distributions are shown below -together with the statistical tests. - - - -Example: discrete distribution rv_discrete -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In the following we use stats.rv_discrete to generate a discrete distribution -that has the probabilites of the truncated normal for the intervalls -centered around the integers. - - - >>> npoints = 20 # number of integer support points of the distribution minus 1 - >>> npointsh = npoints / 2 - >>> npointsf = float(npoints) - >>> nbound = 4 # bounds for the truncated normal - >>> normbound = (1+1/npointsf) * nbound # actual bounds of truncated normal - >>> grid = np.arange(-npointsh, npointsh+2, 1) # integer grid - >>> gridlimitsnorm = (grid-0.5) / npointsh * nbound # bin limits for the truncnorm - >>> gridlimits = grid - 0.5 - >>> grid = grid[:-1] - >>> probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound)) - >>> gridint = grid - >>> normdiscrete = stats.rv_discrete(values = (gridint, - ... np.round(probs, decimals=7)), name='normdiscrete') - -From the docstring of rv_discrete: - "You can construct an aribtrary discrete rv where P{X=xk} = pk by - passing to the rv_discrete initialization method (through the values= - keyword) a tuple of sequences (xk, pk) which describes only those - values of X (xk) that occur with nonzero probability (pk)." - -There are some requirements for this distribution to work. The -keyword `name` is required. The support points of the distribution -xk have to be integers. Also, I needed to limit the number of -decimals. If the last two requirements are not satisfied an -exception may be raised or the resulting numbers may be incorrect. - -After defining the distribution, we obtain access to all methods of -discrete distributions. - - >>> print 'mean = %6.4f, variance = %6.4f, skew = %6.4f, kurtosis = %6.4f'% \ - ... normdiscrete.stats(moments = 'mvsk') - mean = -0.0000, variance = 6.3302, skew = 0.0000, kurtosis = -0.0076 - - >>> nd_std = np.sqrt(normdiscrete.stats(moments = 'v')) - -**Generate a random sample and compare observed frequencies with probabilities** - - >>> n_sample = 500 - >>> np.random.seed(87655678) # fix the seed for replicability - >>> rvs = normdiscrete.rvs(size=n_sample) - >>> rvsnd = rvs - >>> f, l = np.histogram(rvs, bins=gridlimits) - >>> sfreq = np.vstack([gridint, f, probs*n_sample]).T - >>> print sfreq - [[ -1.00000000e+01 0.00000000e+00 2.95019349e-02] - [ -9.00000000e+00 0.00000000e+00 1.32294142e-01] - [ -8.00000000e+00 0.00000000e+00 5.06497902e-01] - [ -7.00000000e+00 2.00000000e+00 1.65568919e+00] - [ -6.00000000e+00 1.00000000e+00 4.62125309e+00] - [ -5.00000000e+00 9.00000000e+00 1.10137298e+01] - [ -4.00000000e+00 2.60000000e+01 2.24137683e+01] - [ -3.00000000e+00 3.70000000e+01 3.89503370e+01] - [ -2.00000000e+00 5.10000000e+01 5.78004747e+01] - [ -1.00000000e+00 7.10000000e+01 7.32455414e+01] - [ 0.00000000e+00 7.40000000e+01 7.92618251e+01] - [ 1.00000000e+00 8.90000000e+01 7.32455414e+01] - [ 2.00000000e+00 5.50000000e+01 5.78004747e+01] - [ 3.00000000e+00 5.00000000e+01 3.89503370e+01] - [ 4.00000000e+00 1.70000000e+01 2.24137683e+01] - [ 5.00000000e+00 1.10000000e+01 1.10137298e+01] - [ 6.00000000e+00 4.00000000e+00 4.62125309e+00] - [ 7.00000000e+00 3.00000000e+00 1.65568919e+00] - [ 8.00000000e+00 0.00000000e+00 5.06497902e-01] - [ 9.00000000e+00 0.00000000e+00 1.32294142e-01] - [ 1.00000000e+01 0.00000000e+00 2.95019349e-02]] - - -.. plot:: examples/normdiscr_plot1.py - :align: center - :include-source: 0 - - -.. plot:: examples/normdiscr_plot2.py - :align: center - :include-source: 0 - - -Next, we can test, whether our sample was generated by our normdiscrete -distribution. This also verifies, whether the random numbers are generated -correctly - -The chisquare test requires that there are a minimum number of observations -in each bin. We combine the tail bins into larger bins so that they contain -enough observations. - - >>> f2 = np.hstack([f[:5].sum(), f[5:-5], f[-5:].sum()]) - >>> p2 = np.hstack([probs[:5].sum(), probs[5:-5], probs[-5:].sum()]) - >>> ch2, pval = stats.chisquare(f2, p2*n_sample) - - >>> print 'chisquare for normdiscrete: chi2 = %6.3f pvalue = %6.4f' % (ch2, pval) - chisquare for normdiscrete: chi2 = 12.466 pvalue = 0.4090 - -The pvalue in this case is high, so we can be quite confident that -our random sample was actually generated by the distribution. - - - -Analysing One Sample --------------------- - -First, we create some random variables. We set a seed so that in each run -we get identical results to look at. As an example we take a sample from -the Student t distribution: - - >>> np.random.seed(282629734) - >>> x = stats.t.rvs(10, size=1000) - -Here, we set the required shape parameter of the t distribution, which -in statistics corresponds to the degrees of freedom, to 10. Using size=100 means -that our sample consists of 1000 independently drawn (pseudo) random numbers. -Since we did not specify the keyword arguments `loc` and `scale`, those are -set to their default values zero and one. - -Descriptive Statistics -^^^^^^^^^^^^^^^^^^^^^^ - -`x` is a numpy array, and we have direct access to all array methods, e.g. - - >>> print x.max(), x.min() # equivalent to np.max(x), np.min(x) - 5.26327732981 -3.78975572422 - >>> print x.mean(), x.var() # equivalent to np.mean(x), np.var(x) - 0.0140610663985 1.28899386208 - - -How do the some sample properties compare to their theoretical counterparts? - - >>> m, v, s, k = stats.t.stats(10, moments='mvsk') - >>> n, (smin, smax), sm, sv, ss, sk = stats.describe(x) - - >>> print 'distribution:', - distribution: - >>> sstr = 'mean = %6.4f, variance = %6.4f, skew = %6.4f, kurtosis = %6.4f' - >>> print sstr %(m, v, s ,k) - mean = 0.0000, variance = 1.2500, skew = 0.0000, kurtosis = 1.0000 - >>> print 'sample: ', - sample: - >>> print sstr %(sm, sv, ss, sk) - mean = 0.0141, variance = 1.2903, skew = 0.2165, kurtosis = 1.0556 - -Note: stats.describe uses the unbiased estimator for the variance, while -np.var is the biased estimator. - - -For our sample the sample statistics differ a by a small amount from -their theoretical counterparts. - - -T-test and KS-test -^^^^^^^^^^^^^^^^^^ - -We can use the t-test to test whether the mean of our sample differs -in a statistcally significant way from the theoretical expectation. - - >>> print 't-statistic = %6.3f pvalue = %6.4f' % stats.ttest_1samp(x, m) - t-statistic = 0.391 pvalue = 0.6955 - -The pvalue is 0.7, this means that with an alpha error of, for -example, 10%, we cannot reject the hypothesis that the sample mean -is equal to zero, the expectation of the standard t-distribution. - - -As an exercise, we can calculate our ttest also directly without -using the provided function, which should give us the same answer, -and so it does: - - >>> tt = (sm-m)/np.sqrt(sv/float(n)) # t-statistic for mean - >>> pval = stats.t.sf(np.abs(tt), n-1)*2 # two-sided pvalue = Prob(abs(t)>tt) - >>> print 't-statistic = %6.3f pvalue = %6.4f' % (tt, pval) - t-statistic = 0.391 pvalue = 0.6955 - -The Kolmogorov-Smirnov test can be used to test the hypothesis that -the sample comes from the standard t-distribution - - >>> print 'KS-statistic D = %6.3f pvalue = %6.4f' % stats.kstest(x, 't', (10,)) - KS-statistic D = 0.016 pvalue = 0.9606 - -Again the p-value is high enough that we cannot reject the -hypothesis that the random sample really is distributed according to the -t-distribution. In real applications, we don't know what the -underlying distribution is. If we perform the Kolmogorov-Smirnov -test of our sample against the standard normal distribution, then we -also cannot reject the hypothesis that our sample was generated by the -normal distribution given that in this example the p-value is almost 40%. - - >>> print 'KS-statistic D = %6.3f pvalue = %6.4f' % stats.kstest(x,'norm') - KS-statistic D = 0.028 pvalue = 0.3949 - -However, the standard normal distribution has a variance of 1, while our -sample has a variance of 1.29. If we standardize our sample and test it -against the normal distribution, then the p-value is again large enough -that we cannot reject the hypothesis that the sample came form the -normal distribution. - - >>> d, pval = stats.kstest((x-x.mean())/x.std(), 'norm') - >>> print 'KS-statistic D = %6.3f pvalue = %6.4f' % (d, pval) - KS-statistic D = 0.032 pvalue = 0.2402 - -Note: The Kolmogorov-Smirnov test assumes that we test against a -distribution with given parameters, since in the last case we -estimated mean and variance, this assumption is violated, and the -distribution of the test statistic on which the p-value is based, is -not correct. - -Tails of the distribution -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Finally, we can check the upper tail of the distribution. We can use -the percent point function ppf, which is the inverse of the cdf -function, to obtain the critical values, or, more directly, we can use -the inverse of the survival function - - >>> crit01, crit05, crit10 = stats.t.ppf([1-0.01, 1-0.05, 1-0.10], 10) - >>> print 'critical values from ppf at 1%%, 5%% and 10%% %8.4f %8.4f %8.4f'% (crit01, crit05, crit10) - critical values from ppf at 1%, 5% and 10% 2.7638 1.8125 1.3722 - >>> print 'critical values from isf at 1%%, 5%% and 10%% %8.4f %8.4f %8.4f'% tuple(stats.t.isf([0.01,0.05,0.10],10)) - critical values from isf at 1%, 5% and 10% 2.7638 1.8125 1.3722 - - >>> freq01 = np.sum(x>crit01) / float(n) * 100 - >>> freq05 = np.sum(x>crit05) / float(n) * 100 - >>> freq10 = np.sum(x>crit10) / float(n) * 100 - >>> print 'sample %%-frequency at 1%%, 5%% and 10%% tail %8.4f %8.4f %8.4f'% (freq01, freq05, freq10) - sample %-frequency at 1%, 5% and 10% tail 1.4000 5.8000 10.5000 - -In all three cases, our sample has more weight in the top tail than the -underlying distribution. -We can briefly check a larger sample to see if we get a closer match. In this -case the empirical frequency is quite close to the theoretical probability, -but if we repeat this several times the fluctuations are still pretty large. - - >>> freq05l = np.sum(stats.t.rvs(10, size=10000) > crit05) / 10000.0 * 100 - >>> print 'larger sample %%-frequency at 5%% tail %8.4f'% freq05l - larger sample %-frequency at 5% tail 4.8000 - -We can also compare it with the tail of the normal distribution, which -has less weight in the tails: - - >>> print 'tail prob. of normal at 1%%, 5%% and 10%% %8.4f %8.4f %8.4f'% \ - ... tuple(stats.norm.sf([crit01, crit05, crit10])*100) - tail prob. of normal at 1%, 5% and 10% 0.2857 3.4957 8.5003 - -The chisquare test can be used to test, whether for a finite number of bins, -the observed frequencies differ significantly from the probabilites of the -hypothesized distribution. - - >>> quantiles = [0.0, 0.01, 0.05, 0.1, 1-0.10, 1-0.05, 1-0.01, 1.0] - >>> crit = stats.t.ppf(quantiles, 10) - >>> print crit - [ -Inf -2.76376946 -1.81246112 -1.37218364 1.37218364 1.81246112 - 2.76376946 Inf] - >>> n_sample = x.size - >>> freqcount = np.histogram(x, bins=crit)[0] - >>> tprob = np.diff(quantiles) - >>> nprob = np.diff(stats.norm.cdf(crit)) - >>> tch, tpval = stats.chisquare(freqcount, tprob*n_sample) - >>> nch, npval = stats.chisquare(freqcount, nprob*n_sample) - >>> print 'chisquare for t: chi2 = %6.3f pvalue = %6.4f' % (tch, tpval) - chisquare for t: chi2 = 2.300 pvalue = 0.8901 - >>> print 'chisquare for normal: chi2 = %6.3f pvalue = %6.4f' % (nch, npval) - chisquare for normal: chi2 = 64.605 pvalue = 0.0000 - -We see that the standard normal distribution is clearly rejected while the -standard t-distribution cannot be rejected. Since the variance of our sample -differs from both standard distribution, we can again redo the test taking -the estimate for scale and location into account. - -The fit method of the distributions can be used to estimate the parameters -of the distribution, and the test is repeated using probabilites of the -estimated distribution. - - >>> tdof, tloc, tscale = stats.t.fit(x) - >>> nloc, nscale = stats.norm.fit(x) - >>> tprob = np.diff(stats.t.cdf(crit, tdof, loc=tloc, scale=tscale)) - >>> nprob = np.diff(stats.norm.cdf(crit, loc=nloc, scale=nscale)) - >>> tch, tpval = stats.chisquare(freqcount, tprob*n_sample) - >>> nch, npval = stats.chisquare(freqcount, nprob*n_sample) - >>> print 'chisquare for t: chi2 = %6.3f pvalue = %6.4f' % (tch, tpval) - chisquare for t: chi2 = 1.577 pvalue = 0.9542 - >>> print 'chisquare for normal: chi2 = %6.3f pvalue = %6.4f' % (nch, npval) - chisquare for normal: chi2 = 11.084 pvalue = 0.0858 - -Taking account of the estimated parameters, we can still reject the -hypothesis that our sample came from a normal distribution (at the 5% level), -but again, with a p-value of 0.95, we cannot reject the t distribution. - - - -Special tests for normal distributions -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Since the normal distribution is the most common distribution in statistics, -there are several additional functions available to test whether a sample -could have been drawn from a normal distribution - -First we can test if skew and kurtosis of our sample differ significantly from -those of a normal distribution: - - >>> print 'normal skewtest teststat = %6.3f pvalue = %6.4f' % stats.skewtest(x) - normal skewtest teststat = 2.785 pvalue = 0.0054 - >>> print 'normal kurtosistest teststat = %6.3f pvalue = %6.4f' % stats.kurtosistest(x) - normal kurtosistest teststat = 4.757 pvalue = 0.0000 - -These two tests are combined in the normality test - - >>> print 'normaltest teststat = %6.3f pvalue = %6.4f' % stats.normaltest(x) - normaltest teststat = 30.379 pvalue = 0.0000 - -In all three tests the p-values are very low and we can reject the hypothesis -that the our sample has skew and kurtosis of the normal distribution. - -Since skew and kurtosis of our sample are based on central moments, we get -exactly the same results if we test the standardized sample: - - >>> print 'normaltest teststat = %6.3f pvalue = %6.4f' % \ - ... stats.normaltest((x-x.mean())/x.std()) - normaltest teststat = 30.379 pvalue = 0.0000 - -Because normality is rejected so strongly, we can check whether the -normaltest gives reasonable results for other cases: - - >>> print 'normaltest teststat = %6.3f pvalue = %6.4f' % stats.normaltest(stats.t.rvs(10, size=100)) - normaltest teststat = 4.698 pvalue = 0.0955 - >>> print 'normaltest teststat = %6.3f pvalue = %6.4f' % stats.normaltest(stats.norm.rvs(size=1000)) - normaltest teststat = 0.613 pvalue = 0.7361 - -When testing for normality of a small sample of t-distributed observations -and a large sample of normal distributed observation, then in neither case -can we reject the null hypothesis that the sample comes from a normal -distribution. In the first case this is because the test is not powerful -enough to distinguish a t and a normally distributed random variable in a -small sample. - - -Comparing two samples ---------------------- - -In the following, we are given two samples, which can come either from the -same or from different distribution, and we want to test whether these -samples have the same statistical properties. - -Comparing means -^^^^^^^^^^^^^^^ - -Test with sample with identical means: - - >>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500) - >>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500) - >>> stats.ttest_ind(rvs1, rvs2) - (-0.54890361750888583, 0.5831943748663857) - - -Test with sample with different means: - - >>> rvs3 = stats.norm.rvs(loc=8, scale=10, size=500) - >>> stats.ttest_ind(rvs1, rvs3) - (-4.5334142901750321, 6.507128186505895e-006) - - - -Kolmogorov-Smirnov test for two samples ks_2samp -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For the example where both samples are drawn from the same distribution, -we cannot reject the null hypothesis since the pvalue is high - - >>> stats.ks_2samp(rvs1, rvs2) - (0.025999999999999995, 0.99541195173064878) - -In the second example, with different location, i.e. means, we can -reject the null hypothesis since the pvalue is below 1% - - >>> stats.ks_2samp(rvs1, rvs3) - (0.11399999999999999, 0.0027132103661283141) diff --git a/scipy-0.10.1/doc/source/tutorial/stats/continuous.lyx b/scipy-0.10.1/doc/source/tutorial/stats/continuous.lyx deleted file mode 100644 index 261931df46..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/stats/continuous.lyx +++ /dev/null @@ -1,5065 +0,0 @@ -#LyX 1.6.6.1 created this file. For more info see http://www.lyx.org/ -\lyxformat 345 -\begin_document -\begin_header -\textclass article -\use_default_options false -\language english -\inputencoding auto -\font_roman default -\font_sans default -\font_typewriter default -\font_default_family default -\font_sc false -\font_osf false -\font_sf_scale 100 -\font_tt_scale 100 - -\graphics default -\paperfontsize default -\spacing single -\use_hyperref false -\papersize default -\use_geometry true -\use_amsmath 2 -\use_esint 0 -\cite_engine basic -\use_bibtopic false -\paperorientation portrait -\leftmargin 1in -\topmargin 1in -\rightmargin 1in -\bottommargin 1in -\secnumdepth 3 -\tocdepth 3 -\paragraph_separation indent -\defskip medskip -\quotes_language english -\papercolumns 1 -\papersides 1 -\paperpagestyle default -\tracking_changes false -\output_changes false -\author "" -\author "" -\end_header - -\begin_body - -\begin_layout Title -Continuous Statistical Distributions -\end_layout - -\begin_layout Section -Overview -\end_layout - -\begin_layout Standard -All distributions will have location (L) and Scale (S) parameters along - with any shape parameters needed, the names for the shape parameters will - vary. - Standard form for the distributions will be given where -\begin_inset Formula $L=0.0$ -\end_inset - - and -\begin_inset Formula $S=1.0.$ -\end_inset - - The nonstandard forms can be obtained for the various functions using (note - -\begin_inset Formula $U$ -\end_inset - - is a standard uniform random variate). - -\end_layout - -\begin_layout Standard -\align center - -\size small -\begin_inset Tabular - - - - - - - -\begin_inset Text - -\begin_layout Plain Layout -Function Name -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -Standard Function -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -Transformation -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Cumulative Distribution Function (CDF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $F\left(x\right)$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $F\left(x;L,S\right)=F\left(\frac{\left(x-L\right)}{S}\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Probability Density Function (PDF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $f\left(x\right)=F^{\prime}\left(x\right)$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $f\left(x;L,S\right)=\frac{1}{S}f\left(\frac{\left(x-L\right)}{S}\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Percent Point Function (PPF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $G\left(q\right)=F^{-1}\left(q\right)$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $G\left(q;L,S\right)=L+SG\left(q\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Probability Sparsity Function (PSF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $g\left(q\right)=G^{\prime}\left(q\right)$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $g\left(q;L,S\right)=Sg\left(q\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Hazard Function (HF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $h_{a}\left(x\right)=\frac{f\left(x\right)}{1-F\left(x\right)}$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $h_{a}\left(x;L,S\right)=\frac{1}{S}h_{a}\left(\frac{\left(x-L\right)}{S}\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Cumulative Hazard Functon (CHF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $H_{a}\left(x\right)=$ -\end_inset - - -\begin_inset Formula $\log\frac{1}{1-F\left(x\right)}$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $H_{a}\left(x;L,S\right)=H_{a}\left(\frac{\left(x-L\right)}{S}\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Survival Function (SF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $S\left(x\right)=1-F\left(x\right)$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $S\left(x;L,S\right)=S\left(\frac{\left(x-L\right)}{S}\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Inverse Survival Function (ISF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $Z\left(\alpha\right)=S^{-1}\left(\alpha\right)=G\left(1-\alpha\right)$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $Z\left(\alpha;L,S\right)=L+SZ\left(\alpha\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Moment Generating Function (MGF) -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $M_{Y}\left(t\right)=E\left[e^{Yt}\right]$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $M_{X}\left(t\right)=e^{Lt}M_{Y}\left(St\right)$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Random Variates -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $Y=G\left(U\right)$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $X=L+SY$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -(Differential) Entropy -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $h\left[Y\right]=-\int f\left(y\right)\log f\left(y\right)dy$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $h\left[X\right]=h\left[Y\right]+\log S$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -(Non-central) Moments -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $\mu_{n}^{\prime}=E\left[Y^{n}\right]$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $E\left[X^{n}\right]=L^{n}\sum_{k=0}^{N}\left(\begin{array}{c} -n\\ -k\end{array}\right)\left(\frac{S}{L}\right)^{k}\mu_{k}^{\prime}$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -Central Moments -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $\mu_{Y^{n}}=E\left[\left(Y-\mu_{Y}\right)^{n}\right]$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $E\left[\left(X-\mu_{X}\right)^{n}\right]=S^{n}\mu_{X^{n}}=S^{n}\mu_{n}$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -mean (mode, median), var -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $\mu_{Y},\,\mu_{Y^{2}}$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $L+S\mu,\, S^{2}\mu_{2}$ -\end_inset - - -\end_layout - -\end_inset - - - - -\begin_inset Text - -\begin_layout Plain Layout -skewness, kurtosis -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $\gamma_{1}=\frac{\mu_{3}}{\left(\mu_{2}\right)^{3/2}},\,$ -\end_inset - - -\begin_inset Formula $\gamma_{2}=\frac{\mu_{4}}{\left(\mu_{2}\right)^{2}}-3$ -\end_inset - - -\end_layout - -\end_inset - - -\begin_inset Text - -\begin_layout Plain Layout -\begin_inset Formula $\gamma_{1},\,\gamma_{2}$ -\end_inset - - -\end_layout - -\end_inset - - - - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset space ~ -\end_inset - - -\end_layout - -\begin_layout Subsection -Moments -\end_layout - -\begin_layout Standard -Non-central moments are defined using the PDF -\begin_inset Formula \[ -\mu_{n}^{\prime}=\int_{-\infty}^{\infty}x^{n}f\left(x\right)dx.\] - -\end_inset - - Note, that these can always be computed using the PPF. - Substitute -\begin_inset Formula $x=G\left(q\right)$ -\end_inset - - in the above equation and get -\begin_inset Formula \[ -\mu_{n}^{\prime}=\int_{0}^{1}G^{n}\left(q\right)dq\] - -\end_inset - - which may be easier to compute numerically. - Note that -\begin_inset Formula $q=F\left(x\right)$ -\end_inset - - so that -\begin_inset Formula $dq=f\left(x\right)dx.$ -\end_inset - - Central moments are computed similarly -\begin_inset Formula $\mu=\mu_{1}^{\prime}$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu_{n} & = & \int_{-\infty}^{\infty}\left(x-\mu\right)^{n}f\left(x\right)dx\\ - & = & \int_{0}^{1}\left(G\left(q\right)-\mu\right)^{n}dq\\ - & = & \sum_{k=0}^{n}\left(\begin{array}{c} -n\\ -k\end{array}\right)\left(-\mu\right)^{k}\mu_{n-k}^{\prime}\end{eqnarray*} - -\end_inset - - In particular -\begin_inset Formula \begin{eqnarray*} -\mu_{3} & = & \mu_{3}^{\prime}-3\mu\mu_{2}^{\prime}+2\mu^{3}\\ - & = & \mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}\\ -\mu_{4} & = & \mu_{4}^{\prime}-4\mu\mu_{3}^{\prime}+6\mu^{2}\mu_{2}^{\prime}-3\mu^{4}\\ - & = & \mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}\end{eqnarray*} - -\end_inset - - Skewness is defined as -\begin_inset Formula \[ -\gamma_{1}=\sqrt{\beta_{1}}=\frac{\mu_{3}}{\mu_{2}^{3/2}}\] - -\end_inset - - while (Fisher) kurtosis is -\begin_inset Formula \[ -\gamma_{2}=\frac{\mu_{4}}{\mu_{2}^{2}}-3,\] - -\end_inset - - so that a normal distribution has a kurtosis of zero. - -\end_layout - -\begin_layout Subsection -Median and mode -\end_layout - -\begin_layout Standard -The median, -\begin_inset Formula $m_{n}$ -\end_inset - - is defined as the point at which half of the density is on one side and - half on the other. - In other words, -\begin_inset Formula $F\left(m_{n}\right)=\frac{1}{2}$ -\end_inset - - so that -\begin_inset Formula \[ -m_{n}=G\left(\frac{1}{2}\right).\] - -\end_inset - - In addition, the mode, -\begin_inset Formula $m_{d}$ -\end_inset - -, is defined as the value for which the probability density function reaches - it's peak -\begin_inset Formula \[ -m_{d}=\arg\max_{x}f\left(x\right).\] - -\end_inset - - -\end_layout - -\begin_layout Subsection -Fitting data -\end_layout - -\begin_layout Standard -To fit data to a distribution, maximizing the likelihood function is common. - Alternatively, some distributions have well-known minimum variance unbiased - estimators. - These will be chosen by default, but the likelihood function will always - be available for minimizing. - -\end_layout - -\begin_layout Standard -If -\begin_inset Formula $f\left(x;\boldsymbol{\theta}\right)$ -\end_inset - - is the PDF of a random-variable where -\begin_inset Formula $\boldsymbol{\theta}$ -\end_inset - - is a vector of parameters ( -\emph on -e.g. - -\begin_inset Formula $L$ -\end_inset - - -\emph default - and -\begin_inset Formula $S$ -\end_inset - -), then for a collection of -\begin_inset Formula $N$ -\end_inset - - independent samples from this distribution, the joint distribution the - random vector -\begin_inset Formula $\mathbf{x}$ -\end_inset - - is -\begin_inset Formula \[ -f\left(\mathbf{x};\boldsymbol{\theta}\right)=\prod_{i=1}^{N}f\left(x_{i};\boldsymbol{\theta}\right).\] - -\end_inset - - The maximum likelihood estimate of the parameters -\begin_inset Formula $\boldsymbol{\theta}$ -\end_inset - - are the parameters which maximize this function with -\begin_inset Formula $\mathbf{x}$ -\end_inset - - fixed and given by the data: -\begin_inset Formula \begin{eqnarray*} -\boldsymbol{\theta}_{es} & = & \arg\max_{\boldsymbol{\theta}}f\left(\mathbf{x};\boldsymbol{\theta}\right)\\ - & = & \arg\min_{\boldsymbol{\theta}}l_{\mathbf{x}}\left(\boldsymbol{\theta}\right).\end{eqnarray*} - -\end_inset - - Where -\begin_inset Formula \begin{eqnarray*} -l_{\mathbf{x}}\left(\boldsymbol{\theta}\right) & = & -\sum_{i=1}^{N}\log f\left(x_{i};\boldsymbol{\theta}\right)\\ - & = & -N\overline{\log f\left(x_{i};\boldsymbol{\theta}\right)}\end{eqnarray*} - -\end_inset - - Note that if -\begin_inset Formula $\boldsymbol{\theta}$ -\end_inset - - includes only shape parameters, the location and scale-parameters can be - fit by replacing -\begin_inset Formula $x_{i}$ -\end_inset - - with -\begin_inset Formula $\left(x_{i}-L\right)/S$ -\end_inset - - in the log-likelihood function adding -\begin_inset Formula $N\log S$ -\end_inset - - and minimizing, thus -\begin_inset Formula \begin{eqnarray*} -l_{\mathbf{x}}\left(L,S;\boldsymbol{\theta}\right) & = & N\log S-\sum_{i=1}^{N}\log f\left(\frac{x_{i}-L}{S};\boldsymbol{\theta}\right)\\ - & = & N\log S+l_{\frac{\mathbf{x}-S}{L}}\left(\boldsymbol{\theta}\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Subsection -Method of Moments -\end_layout - -\begin_layout Standard -Another approach to finding parameters of a distribution that explain a - collection of data is to match the expected moments of the distribution - with the computed moments and the relevant equations solved for the parameters - of the distribution. - In particular, for a distribution with no shape parameters, the following - equations can be solved for location and scale -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{align*} -\mu_{Y} & =S\mu+L\\ -\mu_{Y^{2}} & =S^{2}\mu_{2}.\end{align*} - -\end_inset - - Estimating -\begin_inset Formula $\mu_{Y}$ -\end_inset - - as -\begin_inset Formula $\hat{\mu}$ -\end_inset - - and -\begin_inset Formula $\mu_{Y^{2}}$ -\end_inset - - as -\begin_inset Formula $\hat{\mu}_{2}$ -\end_inset - - gives the estimates -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\hat{S} & = & \sqrt{\frac{\hat{\mu}_{2}}{\mu_{2}}}\\ -\hat{L} & = & \hat{\mu}-\hat{S}\mu\end{eqnarray*} - -\end_inset - - where -\begin_inset Formula $\mu$ -\end_inset - - and -\begin_inset Formula $\mu_{2}$ -\end_inset - - are assumed known as the mean and variance of the -\series bold -untransformed -\series default - distribution (when -\begin_inset Formula $L=0$ -\end_inset - - and -\begin_inset Formula $S=1$ -\end_inset - -) and -\begin_inset Formula \begin{eqnarray*} -\hat{\mu} & = & \frac{1}{N}\sum_{i=1}^{N}x_{i}=\bar{\mathbf{x}}\\ -\hat{\mu}_{2} & = & \frac{1}{N}\sum_{i=1}^{N}\left(x_{i}-\hat{\mu}\right)^{2}=\overline{\left(\mathbf{x}-\bar{\mathbf{x}}\right)^{2}}.\end{eqnarray*} - -\end_inset - -In general, if the distribution has -\begin_inset Formula $M$ -\end_inset - - shape parameters, then to find the parameters of the distribution, we need - to add -\begin_inset Formula $M$ -\end_inset - - additional equations. - It is straightforward to add the central moment equations: -\begin_inset Formula \[ -\mu_{Y^{n}}=S^{n}\mu_{n}\left(\boldsymbol{\theta}\right)\] - -\end_inset - -for -\begin_inset Formula $n=2\ldots M+2$ -\end_inset - -. - If we can estimate the -\begin_inset Formula $n^{\mbox{th}}$ -\end_inset - - central moment of -\begin_inset Formula $Y$ -\end_inset - - as -\begin_inset Formula \[ -\hat{\mu}_{n}=\frac{1}{N}\sum_{i=1}^{N}\left(x_{i}-\hat{\mu}\right)^{n}=\overline{\left(\mathbf{x}-\bar{\mathbf{x}}\right)^{n}},\] - -\end_inset - -then these data-computed statistics can be used to estimate -\begin_inset Formula $S$ -\end_inset - - and the parameter vector -\begin_inset Formula $\boldsymbol{\theta}$ -\end_inset - - by solving this set of potentially non-linear equations. - Then, the location parameter can be found by solving the equation -\begin_inset Formula $\mu_{Y}=S\mu\left(\boldsymbol{\theta}\right)+L$ -\end_inset - -, for -\begin_inset Formula $L$ -\end_inset - -. - -\end_layout - -\begin_layout Subsection -Standard notation for mean -\end_layout - -\begin_layout Standard -We will use -\begin_inset Formula \[ -\overline{y\left(\mathbf{x}\right)}=\frac{1}{N}\sum_{i=1}^{N}y\left(x_{i}\right)\] - -\end_inset - - where -\begin_inset Formula $N$ -\end_inset - - should be clear from context as the number of samples -\begin_inset Formula $x_{i}$ -\end_inset - -. - -\end_layout - -\begin_layout Section -Alpha -\end_layout - -\begin_layout Standard -One shape parameters -\begin_inset Formula $\alpha>0$ -\end_inset - - (paramter -\begin_inset Formula $\beta$ -\end_inset - - in DATAPLOT is a scale-parameter). - Standard form is -\begin_inset Formula $x>0:$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;\alpha\right) & = & \frac{1}{x^{2}\Phi\left(\alpha\right)\sqrt{2\pi}}\exp\left(-\frac{1}{2}\left(\alpha-\frac{1}{x}\right)^{2}\right)\\ -F\left(x;\alpha\right) & = & \frac{\Phi\left(\alpha-\frac{1}{x}\right)}{\Phi\left(\alpha\right)}\\ -G\left(q;\alpha\right) & = & \left[\alpha-\Phi^{-1}\left(q\Phi\left(\alpha\right)\right)\right]^{-1}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -M\left(t\right)=\frac{1}{\Phi\left(a\right)\sqrt{2\pi}}\int_{0}^{\infty}\frac{e^{xt}}{x^{2}}\exp\left(-\frac{1}{2}\left(\alpha-\frac{1}{x}\right)^{2}\right)dx\] - -\end_inset - - -\end_layout - -\begin_layout Standard -No moments? -\begin_inset Formula \[ -l_{\mathbf{x}}\left(\alpha\right)=N\log\left[\Phi\left(\alpha\right)\sqrt{2\pi}\right]+2N\overline{\log\mathbf{x}}+\frac{N}{2}\alpha^{2}-\alpha\overline{\mathbf{x}^{-1}}+\frac{1}{2}\overline{\mathbf{x}^{-2}}\] - -\end_inset - - -\end_layout - -\begin_layout Section -Anglit -\end_layout - -\begin_layout Standard -Defined over -\begin_inset Formula $x\in\left[-\frac{\pi}{4},\frac{\pi}{4}\right]$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \sin\left(2x+\frac{\pi}{2}\right)=\cos\left(2x\right)\\ -F\left(x\right) & = & \sin^{2}\left(x+\frac{\pi}{4}\right)\\ -G\left(q\right) & = & \arcsin\left(\sqrt{q}\right)-\frac{\pi}{4}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & 0\\ -\mu_{2} & = & \frac{\pi^{2}}{16}-\frac{1}{2}\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & -2\frac{\pi^{4}-96}{\left(\pi^{2}-8\right)^{2}}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & 1-\log2\\ - & \approx & 0.30685281944005469058\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -M\left(t\right) & = & \int_{-\frac{\pi}{4}}^{\frac{\pi}{4}}\cos\left(2x\right)e^{xt}dx\\ - & = & \frac{4\cosh\left(\frac{\pi t}{4}\right)}{t^{2}+4}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -l_{\mathbf{x}}\left(\cdot\right)=-N\overline{\log\left[\cos\left(2\mathbf{x}\right)\right]}\] - -\end_inset - - -\end_layout - -\begin_layout Section -Arcsine -\end_layout - -\begin_layout Standard -Defined over -\begin_inset Formula $x\in\left(0,1\right)$ -\end_inset - -. - To get the JKB definition put -\begin_inset Formula $x=\frac{u+1}{2}.$ -\end_inset - - i.e. - -\begin_inset Formula $L=-1$ -\end_inset - - and -\begin_inset Formula $S=2.$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{1}{\pi\sqrt{x\left(1-x\right)}}\\ -F\left(x\right) & = & \frac{2}{\pi}\arcsin\left(\sqrt{x}\right)\\ -G\left(q\right) & = & \sin^{2}\left(\frac{\pi}{2}q\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=E^{t/2}I_{0}\left(\frac{t}{2}\right)\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu_{n}^{\prime} & = & \frac{1}{\pi}\int_{0}^{1}dx\, x^{n-1/2}\left(1-x\right)^{-1/2}\\ - & = & \frac{1}{\pi}B\left(\frac{1}{2},n+\frac{1}{2}\right)=\frac{\left(2n-1\right)!!}{2^{n}n!}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{1}{2}\\ -\mu_{2} & = & \frac{1}{8}\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & -\frac{3}{2}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]\approx-0.24156447527049044468\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -l_{\mathbf{x}}\left(\cdot\right)=N\log\pi+\frac{N}{2}\overline{\log\mathbf{x}}+\frac{N}{2}\overline{\log\left(1-\mathbf{x}\right)}\] - -\end_inset - - -\end_layout - -\begin_layout Section -Beta -\end_layout - -\begin_layout Standard -Two shape parameters -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -a,b>0\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;a,b\right) & = & \frac{\Gamma\left(a+b\right)}{\Gamma\left(a\right)\Gamma\left(b\right)}x^{a-1}\left(1-x\right)^{b-1}I_{\left(0,1\right)}\left(x\right)\\ -F\left(x;a,b\right) & = & \int_{0}^{x}f\left(y;a,b\right)dy=I\left(x,a,b\right)\\ -G\left(\alpha;a,b\right) & = & I^{-1}\left(\alpha;a,b\right)\\ -M\left(t\right) & = & \frac{\Gamma\left(a\right)\Gamma\left(b\right)}{\Gamma\left(a+b\right)}\,_{1}F_{1}\left(a;a+b;t\right)\\ -\mu & = & \frac{a}{a+b}\\ -\mu_{2} & = & \frac{ab\left(a+b+1\right)}{\left(a+b\right)^{2}}\\ -\gamma_{1} & = & 2\frac{b-a}{a+b+2}\sqrt{\frac{a+b+1}{ab}}\\ -\gamma_{2} & = & \frac{6\left(a^{3}+a^{2}\left(1-2b\right)+b^{2}\left(b+1\right)-2ab\left(b+2\right)\right)}{ab\left(a+b+2\right)\left(a+b+3\right)}\\ -m_{d} & = & \frac{\left(a-1\right)}{\left(a+b-2\right)}\, a+b\neq2\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula $f\left(x;a,1\right)$ -\end_inset - - is also called the Power-function distribution. -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -l_{\mathbf{x}}\left(a,b\right)=-N\log\Gamma\left(a+b\right)+N\log\Gamma\left(a\right)+N\log\Gamma\left(b\right)-N\left(a-1\right)\overline{\log\mathbf{x}}-N\left(b-1\right)\overline{\log\left(1-\mathbf{x}\right)}\] - -\end_inset - - All of the -\begin_inset Formula $x_{i}\in\left[0,1\right]$ -\end_inset - - -\end_layout - -\begin_layout Section -Beta Prime -\end_layout - -\begin_layout Standard -Defined over -\begin_inset Formula $00.$ -\end_inset - - (Note the CDF evaluation uses Eq. - 3.194.1 on pg. - 313 of Gradshteyn & Ryzhik (sixth edition). - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;\alpha,\beta\right) & = & \frac{\Gamma\left(\alpha+\beta\right)}{\Gamma\left(\alpha\right)\Gamma\left(\beta\right)}x^{\alpha-1}\left(1+x\right)^{-\alpha-\beta}\\ -F\left(x;\alpha,\beta\right) & = & \frac{\Gamma\left(\alpha+\beta\right)}{\alpha\Gamma\left(\alpha\right)\Gamma\left(\beta\right)}x^{\alpha}\,_{2}F_{1}\left(\alpha+\beta,\alpha;1+\alpha;-x\right)\\ -G\left(q;\alpha,\beta\right) & = & F^{-1}\left(x;\alpha,\beta\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -\mu_{n}^{\prime}=\left\{ \begin{array}{ccc} -\frac{\Gamma\left(n+\alpha\right)\Gamma\left(\beta-n\right)}{\Gamma\left(\alpha\right)\Gamma\left(\beta\right)}=\frac{\left(\alpha\right)_{n}}{\left(\beta-n\right)_{n}} & & \beta>n\\ -\infty & & \textrm{otherwise}\end{array}\right.\] - -\end_inset - - Therefore, -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{\alpha}{\beta-1}\quad\beta>1\\ -\mu_{2} & = & \frac{\alpha\left(\alpha+1\right)}{\left(\beta-2\right)\left(\beta-1\right)}-\frac{\alpha^{2}}{\left(\beta-1\right)^{2}}\quad\beta>2\\ -\gamma_{1} & = & \frac{\frac{\alpha\left(\alpha+1\right)\left(\alpha+2\right)}{\left(\beta-3\right)\left(\beta-2\right)\left(\beta-1\right)}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\quad\beta>3\\ -\gamma_{2} & = & \frac{\mu_{4}}{\mu_{2}^{2}}-3\\ -\mu_{4} & = & \frac{\alpha\left(\alpha+1\right)\left(\alpha+2\right)\left(\alpha+3\right)}{\left(\beta-4\right)\left(\beta-3\right)\left(\beta-2\right)\left(\beta-1\right)}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}\quad\beta>4\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Bradford -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -c & > & 0\\ -k & = & \log\left(1+c\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{c}{k\left(1+cx\right)}I_{\left(0,1\right)}\left(x\right)\\ -F\left(x;c\right) & = & \frac{\log\left(1+cx\right)}{k}\\ -G\left(\alpha\; c\right) & = & \frac{\left(1+c\right)^{\alpha}-1}{c}\\ -M\left(t\right) & = & \frac{1}{k}e^{-t/c}\left[\textrm{Ei}\left(t+\frac{t}{c}\right)-\textrm{Ei}\left(\frac{t}{c}\right)\right]\\ -\mu & = & \frac{c-k}{ck}\\ -\mu_{2} & = & \frac{\left(c+2\right)k-2c}{2ck^{2}}\\ -\gamma_{1} & = & \frac{\sqrt{2}\left(12c^{2}-9kc\left(c+2\right)+2k^{2}\left(c\left(c+3\right)+3\right)\right)}{\sqrt{c\left(c\left(k-2\right)+2k\right)}\left(3c\left(k-2\right)+6k\right)}\\ -\gamma_{2} & = & \frac{c^{3}\left(k-3\right)\left(k\left(3k-16\right)+24\right)+12kc^{2}\left(k-4\right)\left(k-3\right)+6ck^{2}\left(3k-14\right)+12k^{3}}{3c\left(c\left(k-2\right)+2k\right)^{2}}\\ -m_{d} & = & 0\\ -m_{n} & = & \sqrt{1+c}-1\end{eqnarray*} - -\end_inset - - where -\begin_inset Formula $\textrm{Ei}\left(\textrm{z}\right)$ -\end_inset - - is the exponential integral function. - Also -\begin_inset Formula \[ -h\left[X\right]=\frac{1}{2}\log\left(1+c\right)-\log\left(\frac{c}{\log\left(1+c\right)}\right)\] - -\end_inset - - -\end_layout - -\begin_layout Section -Burr -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -c & > & 0\\ -d & > & 0\\ -k & = & \Gamma\left(d\right)\Gamma\left(1-\frac{2}{c}\right)\Gamma\left(\frac{2}{c}+d\right)-\Gamma^{2}\left(1-\frac{1}{c}\right)\Gamma^{2}\left(\frac{1}{c}+d\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;c,d\right) & = & \frac{cd}{x^{c+1}\left(1+x^{-c}\right)^{d+1}}I_{\left(0,\infty\right)}\left(x\right)\\ -F\left(x;c,d\right) & = & \left(1+x^{-c}\right)^{-d}\\ -G\left(\alpha;c,d\right) & = & \left(\alpha^{-1/d}-1\right)^{-1/c}\\ -\mu & = & \frac{\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+d\right)}{\Gamma\left(d\right)}\\ -\mu_{2} & = & \frac{k}{\Gamma^{2}\left(d\right)}\\ -\gamma_{1} & = & \frac{1}{\sqrt{k^{3}}}\left[2\Gamma^{3}\left(1-\frac{1}{c}\right)\Gamma^{3}\left(\frac{1}{c}+d\right)+\Gamma^{2}\left(d\right)\Gamma\left(1-\frac{3}{c}\right)\Gamma\left(\frac{3}{c}+d\right)\right.\\ - & & \left.-3\Gamma\left(d\right)\Gamma\left(1-\frac{2}{c}\right)\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+d\right)\Gamma\left(\frac{2}{c}+d\right)\right]\\ -\gamma_{2} & = & -3+\frac{1}{k^{2}}\left[6\Gamma\left(d\right)\Gamma\left(1-\frac{2}{c}\right)\Gamma^{2}\left(1-\frac{1}{c}\right)\Gamma^{2}\left(\frac{1}{c}+d\right)\Gamma\left(\frac{2}{c}+d\right)\right.\\ - & & -3\Gamma^{4}\left(1-\frac{1}{c}\right)\Gamma^{4}\left(\frac{1}{c}+d\right)+\Gamma^{3}\left(d\right)\Gamma\left(1-\frac{4}{c}\right)\Gamma\left(\frac{4}{c}+d\right)\\ - & & \left.-4\Gamma^{2}\left(d\right)\Gamma\left(1-\frac{3}{c}\right)\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+d\right)\Gamma\left(\frac{3}{c}+d\right)\right]\\ -m_{d} & = & \left(\frac{cd-1}{c+1}\right)^{1/c}\,\textrm{if }cd>1\,\textrm{otherwise }0\\ -m_{n} & = & \left(2^{1/d}-1\right)^{-1/c}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Cauchy -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{1}{\pi\left(1+x^{2}\right)}\\ -F\left(x\right) & = & \frac{1}{2}+\frac{1}{\pi}\tan^{-1}x\\ -G\left(\alpha\right) & = & \tan\left(\pi\alpha-\frac{\pi}{2}\right)\\ -m_{d} & = & 0\\ -m_{n} & = & 0\end{eqnarray*} - -\end_inset - -No finite moments. - This is the t distribution with one degree of freedom. - -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & \log\left(4\pi\right)\\ - & \approx & 2.5310242469692907930.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Chi -\end_layout - -\begin_layout Standard -Generated by taking the (positive) square-root of chi-squared variates. -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;\nu\right) & = & \frac{x^{\nu-1}e^{-x^{2}/2}}{2^{\nu/2-1}\Gamma\left(\frac{\nu}{2}\right)}I_{\left(0,\infty\right)}\left(x\right)\\ -F\left(x;\nu\right) & = & \Gamma\left(\frac{\nu}{2},\frac{x^{2}}{2}\right)\\ -G\left(\alpha;\nu\right) & = & \sqrt{2\Gamma^{-1}\left(\frac{\nu}{2},\alpha\right)}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\Gamma\left(\frac{v}{2}\right)\,_{1}F_{1}\left(\frac{v}{2};\frac{1}{2};\frac{t^{2}}{2}\right)+\frac{t}{\sqrt{2}}\Gamma\left(\frac{1+\nu}{2}\right)\,_{1}F_{1}\left(\frac{1+\nu}{2};\frac{3}{2};\frac{t^{2}}{2}\right)\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{\sqrt{2}\Gamma\left(\frac{\nu+1}{2}\right)}{\Gamma\left(\frac{\nu}{2}\right)}\\ -\mu_{2} & = & \nu-\mu^{2}\\ -\gamma_{1} & = & \frac{2\mu^{3}+\mu\left(1-2\nu\right)}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{2\nu\left(1-\nu\right)-6\mu^{4}+4\mu^{2}\left(2\nu-1\right)}{\mu_{2}^{2}}\\ -m_{d} & = & \sqrt{\nu-1}\quad\nu\geq1\\ -m_{n} & = & \sqrt{2\Gamma^{-1}\left(\frac{\nu}{2},\frac{1}{2}\right)}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Chi-squared -\end_layout - -\begin_layout Standard -This is the gamma distribution with -\begin_inset Formula $L=0.0$ -\end_inset - - and -\begin_inset Formula $S=2.0$ -\end_inset - - and -\begin_inset Formula $\alpha=\nu/2$ -\end_inset - - where -\begin_inset Formula $\nu$ -\end_inset - - is called the degrees of freedom. - If -\begin_inset Formula $Z_{1}\ldots Z_{\nu}$ -\end_inset - - are all standard normal distributions, then -\begin_inset Formula $W=\sum_{k}Z_{k}^{2}$ -\end_inset - - has (standard) chi-square distribution with -\begin_inset Formula $\nu$ -\end_inset - - degrees of freedom. - -\end_layout - -\begin_layout Standard -The standard form (most often used in standard form only) is -\begin_inset Formula $x>0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\alpha\right) & = & \frac{1}{2\Gamma\left(\frac{\nu}{2}\right)}\left(\frac{x}{2}\right)^{\nu/2-1}e^{-x/2}\\ -F\left(x;\alpha\right) & = & \Gamma\left(\frac{\nu}{2},\frac{x}{2}\right)\\ -G\left(q;\alpha\right) & = & 2\Gamma^{-1}\left(\frac{\nu}{2},q\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\frac{\Gamma\left(\frac{\nu}{2}\right)}{\left(\frac{1}{2}-t\right)^{\nu/2}}\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \nu\\ -\mu_{2} & = & 2\nu\\ -\gamma_{1} & = & \frac{2\sqrt{2}}{\sqrt{\nu}}\\ -\gamma_{2} & = & \frac{12}{\nu}\\ -m_{d} & = & \frac{\nu}{2}-1\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Cosine -\end_layout - -\begin_layout Standard -Approximation to the normal distribution. - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{1}{2\pi}\left[1+\cos x\right]I_{\left[-\pi,\pi\right]}\left(x\right)\\ -F\left(x\right) & = & \frac{1}{2\pi}\left[\pi+x+\sin x\right]I_{\left[-\pi,\pi\right]}\left(x\right)+I_{\left(\pi,\infty\right)}\left(x\right)\\ -G\left(\alpha\right) & = & F^{-1}\left(\alpha\right)\\ -M\left(t\right) & = & \frac{\sinh\left(\pi t\right)}{\pi t\left(1+t^{2}\right)}\\ -\mu=m_{d}=m_{n} & = & 0\\ -\mu_{2} & = & \frac{\pi^{2}}{3}-2\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & \frac{-6\left(\pi^{4}-90\right)}{5\left(\pi^{2}-6\right)^{2}}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & \log\left(4\pi\right)-1\\ - & \approx & 1.5310242469692907930.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Double Gamma -\end_layout - -\begin_layout Standard -The double gamma is the signed version of the Gamma distribution. - For -\begin_inset Formula $\alpha>0:$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;\alpha\right) & = & \frac{1}{2\Gamma\left(\alpha\right)}\left|x\right|^{\alpha-1}e^{-\left|x\right|}\\ -F\left(x;\alpha\right) & = & \left\{ \begin{array}{ccc} -\frac{1}{2}-\frac{1}{2}\Gamma\left(\alpha,\left|x\right|\right) & & x\leq0\\ -\frac{1}{2}+\frac{1}{2}\Gamma\left(\alpha,\left|x\right|\right) & & x>0\end{array}\right.\\ -G\left(q;\alpha\right) & = & \left\{ \begin{array}{ccc} --\Gamma^{-1}\left(\alpha,\left|2q-1\right|\right) & & q\leq\frac{1}{2}\\ -\Gamma^{-1}\left(\alpha,\left|2q-1\right|\right) & & q>\frac{1}{2}\end{array}\right.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -M\left(t\right)=\frac{1}{2\left(1-t\right)^{a}}+\frac{1}{2\left(1+t\right)^{a}}\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu=m_{n} & = & 0\\ -\mu_{2} & = & \alpha\left(\alpha+1\right)\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & \frac{\left(\alpha+2\right)\left(\alpha+3\right)}{\alpha\left(\alpha+1\right)}-3\\ -m_{d} & = & \textrm{NA}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Doubly Non-central F* -\end_layout - -\begin_layout Section -Doubly Non-central t* -\end_layout - -\begin_layout Section -Double Weibull -\end_layout - -\begin_layout Standard -This is a signed form of the Weibull distribution. -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{c}{2}\left|x\right|^{c-1}\exp\left(-\left|x\right|^{c}\right)\\ -F\left(x;c\right) & = & \left\{ \begin{array}{ccc} -\frac{1}{2}\exp\left(-\left|x\right|^{c}\right) & & x\leq0\\ -1-\frac{1}{2}\exp\left(-\left|x\right|^{c}\right) & & x>0\end{array}\right.\\ -G\left(q;c\right) & = & \left\{ \begin{array}{ccc} --\log^{1/c}\left(\frac{1}{2q}\right) & & q\leq\frac{1}{2}\\ -\log^{1/c}\left(\frac{1}{2q-1}\right) & & q>\frac{1}{2}\end{array}\right.\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\mu_{n}=\begin{cases} -\Gamma\left(1+\frac{n}{c}\right) & n\textrm{ even}\\ -0 & n\textrm{ odd}\end{cases}\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -m_{d}=\mu & = & 0\\ -\mu_{2} & = & \Gamma\left(\frac{c+2}{c}\right)\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & \frac{\Gamma\left(1+\frac{4}{c}\right)}{\Gamma^{2}\left(1+\frac{2}{c}\right)}\\ -m_{d} & = & \textrm{NA bimodal}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Erlang -\end_layout - -\begin_layout Standard -This is just the Gamma distribution with shape parameter -\begin_inset Formula $\alpha=n$ -\end_inset - - an integer. - -\end_layout - -\begin_layout Section -Exponential -\end_layout - -\begin_layout Standard -This is a special case of the Gamma (and Erlang) distributions with shape - parameter -\begin_inset Formula $\left(\alpha=1\right)$ -\end_inset - - and the same location and scale parameters. - The standard form is therefore ( -\begin_inset Formula $x\geq0$ -\end_inset - -) -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & e^{-x}\\ -F\left(x\right) & = & \Gamma\left(1,x\right)=1-e^{-x}\\ -G\left(q\right) & = & -\log\left(1-q\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -\mu_{n}^{\prime}=n!\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -M\left(t\right)=\frac{1}{1-t}\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & 1\\ -\mu_{2} & = & 1\\ -\gamma_{1} & = & 2\\ -\gamma_{2} & = & 6\\ -m_{d} & = & 0\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=1.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Exponentiated Weibull -\end_layout - -\begin_layout Standard -Two positive shape parameters -\begin_inset Formula $a$ -\end_inset - - and -\begin_inset Formula $c$ -\end_inset - - and -\begin_inset Formula $x\in\left(0,\infty\right)$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;a,c\right) & = & ac\left[1-\exp\left(-x^{c}\right)\right]^{a-1}\exp\left(-x^{c}\right)x^{c-1}\\ -F\left(x;a,c\right) & = & \left[1-\exp\left(-x^{c}\right)\right]^{a}\\ -G\left(q;a,c\right) & = & \left[-\log\left(1-q^{1/a}\right)\right]^{1/c}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Exponential Power -\end_layout - -\begin_layout Standard -One positive shape parameter -\begin_inset Formula $b$ -\end_inset - -. - Defined for -\begin_inset Formula $x\geq0.$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;b\right) & = & ebx^{b-1}\exp\left[x^{b}-e^{x^{b}}\right]\\ -F\left(x;b\right) & = & 1-\exp\left[1-e^{x^{b}}\right]\\ -G\left(q;b\right) & = & \log^{1/b}\left[1-\log\left(1-q\right)\right]\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Fatigue Life (Birnbaum-Sanders) -\end_layout - -\begin_layout Standard -This distribution's pdf is the average of the inverse-Gaussian -\begin_inset Formula $\left(\mu=1\right)$ -\end_inset - - and reciprocal inverse-Gaussian pdf -\begin_inset Formula $\left(\mu=1\right)$ -\end_inset - -. - We follow the notation of JKB here with -\begin_inset Formula $\beta=S.$ -\end_inset - - for -\begin_inset Formula $x>0$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{x+1}{2c\sqrt{2\pi x^{3}}}\exp\left(-\frac{\left(x-1\right)^{2}}{2xc^{2}}\right)\\ -F\left(x;c\right) & = & \Phi\left(\frac{1}{c}\left(\sqrt{x}-\frac{1}{\sqrt{x}}\right)\right)\\ -G\left(q;c\right) & = & \frac{1}{4}\left[c\Phi^{-1}\left(q\right)+\sqrt{c^{2}\left(\Phi^{-1}\left(q\right)\right)^{2}+4}\right]^{2}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=c\sqrt{2\pi}\exp\left[\frac{1}{c^{2}}\left(1-\sqrt{1-2c^{2}t}\right)\right]\left(1+\frac{1}{\sqrt{1-2c^{2}t}}\right)\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{c^{2}}{2}+1\\ -\mu_{2} & = & c^{2}\left(\frac{5}{4}c^{2}+1\right)\\ -\gamma_{1} & = & \frac{4c\sqrt{11c^{2}+6}}{\left(5c^{2}+4\right)^{3/2}}\\ -\gamma_{2} & = & \frac{6c^{2}\left(93c^{2}+41\right)}{\left(5c^{2}+4\right)^{2}}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Fisk (Log Logistic) -\end_layout - -\begin_layout Standard -Special case of the Burr distribution with -\begin_inset Formula $d=1$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -c & > & 0\\ -k & = & \Gamma\left(1-\frac{2}{c}\right)\Gamma\left(\frac{2}{c}+1\right)-\Gamma^{2}\left(1-\frac{1}{c}\right)\Gamma^{2}\left(\frac{1}{c}+1\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;c,d\right) & = & \frac{cx^{c-1}}{\left(1+x^{c}\right)^{2}}I_{\left(0,\infty\right)}\left(x\right)\\ -F\left(x;c,d\right) & = & \left(1+x^{-c}\right)^{-1}\\ -G\left(\alpha;c,d\right) & = & \left(\alpha^{-1}-1\right)^{-1/c}\\ -\mu & = & \Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+1\right)\\ -\mu_{2} & = & k\\ -\gamma_{1} & = & \frac{1}{\sqrt{k^{3}}}\left[2\Gamma^{3}\left(1-\frac{1}{c}\right)\Gamma^{3}\left(\frac{1}{c}+1\right)+\Gamma\left(1-\frac{3}{c}\right)\Gamma\left(\frac{3}{c}+1\right)\right.\\ - & & \left.-3\Gamma\left(1-\frac{2}{c}\right)\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+1\right)\Gamma\left(\frac{2}{c}+1\right)\right]\\ -\gamma_{2} & = & -3+\frac{1}{k^{2}}\left[6\Gamma\left(1-\frac{2}{c}\right)\Gamma^{2}\left(1-\frac{1}{c}\right)\Gamma^{2}\left(\frac{1}{c}+1\right)\Gamma\left(\frac{2}{c}+1\right)\right.\\ - & & -3\Gamma^{4}\left(1-\frac{1}{c}\right)\Gamma^{4}\left(\frac{1}{c}+1\right)+\Gamma\left(1-\frac{4}{c}\right)\Gamma\left(\frac{4}{c}+1\right)\\ - & & \left.-4\Gamma\left(1-\frac{3}{c}\right)\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+1\right)\Gamma\left(\frac{3}{c}+1\right)\right]\\ -m_{d} & = & \left(\frac{c-1}{c+1}\right)^{1/c}\,\textrm{if }c>1\,\textrm{otherwise }0\\ -m_{n} & = & 1\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=2-\log c.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Folded Cauchy -\end_layout - -\begin_layout Standard -This formula can be expressed in terms of the standard formulas for the - Cauchy distribution (call the cdf -\begin_inset Formula $C\left(x\right)$ -\end_inset - - and the pdf -\begin_inset Formula $d\left(x\right)$ -\end_inset - -). - if -\begin_inset Formula $Y$ -\end_inset - - is cauchy then -\begin_inset Formula $\left|Y\right|$ -\end_inset - - is folded cauchy. - Note that -\begin_inset Formula $x\geq0.$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{1}{\pi\left(1+\left(x-c\right)^{2}\right)}+\frac{1}{\pi\left(1+\left(x+c\right)^{2}\right)}\\ -F\left(x;c\right) & = & \frac{1}{\pi}\tan^{-1}\left(x-c\right)+\frac{1}{\pi}\tan^{-1}\left(x+c\right)\\ -G\left(q;c\right) & = & F^{-1}\left(x;c\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -No moments -\end_layout - -\begin_layout Section -Folded Normal -\end_layout - -\begin_layout Standard -If -\begin_inset Formula $Z$ -\end_inset - - is Normal with mean -\begin_inset Formula $L$ -\end_inset - - and -\begin_inset Formula $\sigma=S$ -\end_inset - -, then -\begin_inset Formula $\left|Z\right|$ -\end_inset - - is a folded normal with shape parameter -\begin_inset Formula $c=\left|L\right|/S$ -\end_inset - -, location parameter -\begin_inset Formula $0$ -\end_inset - - and scale parameter -\begin_inset Formula $S$ -\end_inset - -. - This is a special case of the non-central chi distribution with one-degree - of freedom and non-centrality parameter -\begin_inset Formula $c^{2}.$ -\end_inset - - Note that -\begin_inset Formula $c\geq0$ -\end_inset - -. - The standard form of the folded normal is -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \sqrt{\frac{2}{\pi}}\cosh\left(cx\right)\exp\left(-\frac{x^{2}+c^{2}}{2}\right)\\ -F\left(x;c\right) & = & \Phi\left(x-c\right)-\Phi\left(-x-c\right)=\Phi\left(x-c\right)+\Phi\left(x+c\right)-1\\ -G\left(\alpha;c\right) & = & F^{-1}\left(x;c\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\exp\left[\frac{t}{2}\left(t-2c\right)\right]\left(1+e^{2ct}\right)\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -k & = & \textrm{erf}\left(\frac{c}{\sqrt{2}}\right)\\ -p & = & \exp\left(-\frac{c^{2}}{2}\right)\\ -\mu & = & \sqrt{\frac{2}{\pi}}p+ck\\ -\mu_{2} & = & c^{2}+1-\mu^{2}\\ -\gamma_{1} & = & \frac{\sqrt{\frac{2}{\pi}}p^{3}\left(4-\frac{\pi}{p^{2}}\left(2c^{2}+1\right)\right)+2ck\left(6p^{2}+3cpk\sqrt{2\pi}+\pi c\left(k^{2}-1\right)\right)}{\pi\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{c^{4}+6c^{2}+3+6\left(c^{2}+1\right)\mu^{2}-3\mu^{4}-4p\mu\left(\sqrt{\frac{2}{\pi}}\left(c^{2}+2\right)+\frac{ck}{p}\left(c^{2}+3\right)\right)}{\mu_{2}^{2}}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Fratio (or F) -\end_layout - -\begin_layout Standard -Defined for -\begin_inset Formula $x>0$ -\end_inset - -. - The distribution of -\begin_inset Formula $\left(X_{1}/X_{2}\right)\left(\nu_{2}/\nu_{1}\right)$ -\end_inset - - if -\begin_inset Formula $X_{1}$ -\end_inset - - is chi-squared with -\begin_inset Formula $v_{1}$ -\end_inset - - degrees of freedom and -\begin_inset Formula $X_{2}$ -\end_inset - - is chi-squared with -\begin_inset Formula $v_{2}$ -\end_inset - - degrees of freedom. - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\nu_{1},\nu_{2}\right) & = & \frac{\nu_{2}^{\nu_{2}/2}\nu_{1}^{\nu_{1}/2}x^{\nu_{1}/2-1}}{\left(\nu_{2}+\nu_{1}x\right)^{\left(\nu_{1}+\nu_{2}\right)/2}B\left(\frac{\nu_{1}}{2},\frac{\nu_{2}}{2}\right)}\\ -F\left(x;v_{1},v_{2}\right) & = & I\left(\frac{\nu_{1}}{2},\frac{\nu_{2}}{2},\frac{\nu_{2}x}{\nu_{2}+\nu_{1}x}\right)\\ -G\left(q;\nu_{1},\nu_{2}\right) & = & \left[\frac{\nu_{2}}{I^{-1}\left(\nu_{1}/2,\nu_{2}/2,q\right)}-\frac{\nu_{1}}{\nu_{2}}\right]^{-1}.\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{\nu_{2}}{\nu_{2}-2}\quad\nu_{2}>2\\ -\mu_{2} & = & \frac{2\nu_{2}^{2}\left(\nu_{1}+\nu_{2}-2\right)}{\nu_{1}\left(\nu_{2}-2\right)^{2}\left(\nu_{2}-4\right)}\quad v_{2}>4\\ -\gamma_{1} & = & \frac{2\left(2\nu_{1}+\nu_{2}-2\right)}{\nu_{2}-6}\sqrt{\frac{2\left(\nu_{2}-4\right)}{\nu_{1}\left(\nu_{1}+\nu_{2}-2\right)}}\quad\nu_{2}>6\\ -\gamma_{2} & = & \frac{3\left[8+\left(\nu_{2}-6\right)\gamma_{1}^{2}\right]}{2\nu-16}\quad\nu_{2}>8\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Fréchet (ExtremeLB, Extreme Value II, Weibull minimum) -\end_layout - -\begin_layout Standard -A type of extreme-value distribution with a lower bound. - Defined for -\begin_inset Formula $x>0$ -\end_inset - - and -\begin_inset Formula $c>0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & cx^{c-1}\exp\left(-x^{c}\right)\\ -F\left(x;c\right) & = & 1-\exp\left(-x^{c}\right)\\ -G\left(q;c\right) & = & \left[-\log\left(1-q\right)\right]^{1/c}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\Gamma\left(1+\frac{n}{c}\right)\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \Gamma\left(1+\frac{1}{c}\right)\\ -\mu_{2} & = & \Gamma\left(1+\frac{2}{c}\right)-\Gamma^{2}\left(1-\frac{1}{c}\right)\\ -\gamma_{1} & = & \frac{\Gamma\left(1+\frac{3}{c}\right)-3\Gamma\left(1+\frac{2}{c}\right)\Gamma\left(1+\frac{1}{c}\right)+2\Gamma^{3}\left(1+\frac{1}{c}\right)}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{\Gamma\left(1+\frac{4}{c}\right)-4\Gamma\left(1+\frac{1}{c}\right)\Gamma\left(1+\frac{3}{c}\right)+6\Gamma^{2}\left(1+\frac{1}{c}\right)\Gamma\left(1+\frac{2}{c}\right)-\Gamma^{4}\left(1+\frac{1}{c}\right)}{\mu_{2}^{2}}-3\\ -m_{d} & = & \left(\frac{c}{1+c}\right)^{1/c}\\ -m_{n} & = & G\left(\frac{1}{2};c\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=-\frac{\gamma}{c}-\log\left(c\right)+\gamma+1\] - -\end_inset - - where -\begin_inset Formula $\gamma$ -\end_inset - - is Euler's constant and equal to -\begin_inset Formula \[ -\gamma\approx0.57721566490153286061.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Fréchet (left-skewed, Extreme Value Type III, Weibull maximum) -\end_layout - -\begin_layout Standard -Defined for -\begin_inset Formula $x<0$ -\end_inset - - and -\begin_inset Formula $c>0$ -\end_inset - -. - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & c\left(-x\right)^{c-1}\exp\left(-\left(-x\right)^{c}\right)\\ -F\left(x;c\right) & = & \exp\left(-\left(-x\right)^{c}\right)\\ -G\left(q;c\right) & = & -\left(-\log q\right)^{1/c}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -The mean is the negative of the right-skewed Frechet distribution given - above, and the other statistical parameters can be computed from -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -\mu_{n}^{\prime}=\left(-1\right)^{n}\Gamma\left(1+\frac{n}{c}\right).\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=-\frac{\gamma}{c}-\log\left(c\right)+\gamma+1\] - -\end_inset - - where -\begin_inset Formula $\gamma$ -\end_inset - - is Euler's constant and equal to -\begin_inset Formula \[ -\gamma\approx0.57721566490153286061.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Gamma -\end_layout - -\begin_layout Standard -The standard form for the gamma distribution is -\begin_inset Formula $\left(\alpha>0\right)$ -\end_inset - - valid for -\begin_inset Formula $x\geq0$ -\end_inset - -. -\begin_inset Formula \begin{eqnarray*} -f\left(x;\alpha\right) & = & \frac{1}{\Gamma\left(\alpha\right)}x^{\alpha-1}e^{-x}\\ -F\left(x;\alpha\right) & = & \Gamma\left(\alpha,x\right)\\ -G\left(q;\alpha\right) & = & \Gamma^{-1}\left(\alpha,q\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\frac{1}{\left(1-t\right)^{\alpha}}\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \alpha\\ -\mu_{2} & = & \alpha\\ -\gamma_{1} & = & \frac{2}{\sqrt{\alpha}}\\ -\gamma_{2} & = & \frac{6}{\alpha}\\ -m_{d} & = & \alpha-1\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=\Psi\left(a\right)\left[1-a\right]+a+\log\Gamma\left(a\right)\] - -\end_inset - - where -\begin_inset Formula \[ -\Psi\left(a\right)=\frac{\Gamma^{\prime}\left(a\right)}{\Gamma\left(a\right)}.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Generalized Logistic -\end_layout - -\begin_layout Standard -Has been used in the analysis of extreme values. - Has one shape parameter -\begin_inset Formula $c>0.$ -\end_inset - - And -\begin_inset Formula $x>0$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{c\exp\left(-x\right)}{\left[1+\exp\left(-x\right)\right]^{c+1}}\\ -F\left(x;c\right) & = & \frac{1}{\left[1+\exp\left(-x\right)\right]^{c}}\\ -G\left(q;c\right) & = & -\log\left(q^{-1/c}-1\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -M\left(t\right)=\frac{c}{1-t}\,_{2}F_{1}\left(1+c,\,1-t\,;\,2-t\,;-1\right)\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & \gamma+\psi_{0}\left(c\right)\\ -\mu_{2} & = & \frac{\pi^{2}}{6}+\psi_{1}\left(c\right)\\ -\gamma_{1} & = & \frac{\psi_{2}\left(c\right)+2\zeta\left(3\right)}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{\left(\frac{\pi^{4}}{15}+\psi_{3}\left(c\right)\right)}{\mu_{2}^{2}}\\ -m_{d} & = & \log c\\ -m_{n} & = & -\log\left(2^{1/c}-1\right)\end{eqnarray*} - -\end_inset - - Note that the polygamma function is -\begin_inset Formula \begin{eqnarray*} -\psi_{n}\left(z\right) & = & \frac{d^{n+1}}{dz^{n+1}}\log\Gamma\left(z\right)\\ - & = & \left(-1\right)^{n+1}n!\sum_{k=0}^{\infty}\frac{1}{\left(z+k\right)^{n+1}}\\ - & = & \left(-1\right)^{n+1}n!\zeta\left(n+1,z\right)\end{eqnarray*} - -\end_inset - - where -\begin_inset Formula $\zeta\left(k,x\right)$ -\end_inset - - is a generalization of the Riemann zeta function called the Hurwitz zeta - function Note that -\begin_inset Formula $\zeta\left(n\right)\equiv\zeta\left(n,1\right)$ -\end_inset - - -\end_layout - -\begin_layout Section -Generalized Pareto -\end_layout - -\begin_layout Standard -Shape parameter -\begin_inset Formula $c\neq0$ -\end_inset - - and defined for -\begin_inset Formula $x\geq0$ -\end_inset - - for all -\begin_inset Formula $c$ -\end_inset - - and -\begin_inset Formula $x<\frac{1}{\left|c\right|}$ -\end_inset - - if -\begin_inset Formula $c$ -\end_inset - - is negative. - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \left(1+cx\right)^{-1-\frac{1}{c}}\\ -F\left(x;c\right) & = & 1-\frac{1}{\left(1+cx\right)^{1/c}}\\ -G\left(q;c\right) & = & \frac{1}{c}\left[\left(\frac{1}{1-q}\right)^{c}-1\right]\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -M\left(t\right)=\left\{ \begin{array}{cc} -\left(-\frac{t}{c}\right)^{\frac{1}{c}}e^{-\frac{t}{c}}\left[\Gamma\left(1-\frac{1}{c}\right)+\Gamma\left(-\frac{1}{c},-\frac{t}{c}\right)-\pi\csc\left(\frac{\pi}{c}\right)/\Gamma\left(\frac{1}{c}\right)\right] & c>0\\ -\left(\frac{\left|c\right|}{t}\right)^{1/\left|c\right|}\Gamma\left[\frac{1}{\left|c\right|},\frac{t}{\left|c\right|}\right] & c<0\end{array}\right.\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -\mu_{n}^{\prime}=\frac{\left(-1\right)^{n}}{c^{n}}\sum_{k=0}^{n}\left(\begin{array}{c} -n\\ -k\end{array}\right)\frac{\left(-1\right)^{k}}{1-ck}\quad cn<1\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu_{1}^{\prime} & = & \frac{1}{1-c}\quad c<1\\ -\mu_{2}^{\prime} & = & \frac{2}{\left(1-2c\right)\left(1-c\right)}\quad c<\frac{1}{2}\\ -\mu_{3}^{\prime} & = & \frac{6}{\left(1-c\right)\left(1-2c\right)\left(1-3c\right)}\quad c<\frac{1}{3}\\ -\mu_{4}^{\prime} & = & \frac{24}{\left(1-c\right)\left(1-2c\right)\left(1-3c\right)\left(1-4c\right)}\quad c<\frac{1}{4}\end{eqnarray*} - -\end_inset - - Thus, -\begin_inset Formula \begin{eqnarray*} -\mu & = & \mu_{1}^{\prime}\\ -\mu_{2} & = & \mu_{2}^{\prime}-\mu^{2}\\ -\gamma_{1} & = & \frac{\mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{\mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=1+c\quad c>0\] - -\end_inset - - -\end_layout - -\begin_layout Section -Generalized Exponential -\end_layout - -\begin_layout Standard -Three positive shape parameters for -\begin_inset Formula $x\geq0.$ -\end_inset - - Note that -\begin_inset Formula $a,b,$ -\end_inset - - and -\begin_inset Formula $c$ -\end_inset - - are all -\begin_inset Formula $>0.$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;a,b,c\right) & = & \left(a+b\left(1-e^{-cx}\right)\right)\exp\left[ax-bx+\frac{b}{c}\left(1-e^{-cx}\right)\right]\\ -F\left(x;a,b,c\right) & = & 1-\exp\left[ax-bx+\frac{b}{c}\left(1-e^{-cx}\right)\right]\\ -G\left(q;a,b,c\right) & = & F^{-1}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Generalized Extreme Value -\end_layout - -\begin_layout Standard -Extreme value distributions with shape parameter -\begin_inset Formula $c$ -\end_inset - -. - -\end_layout - -\begin_layout Standard -For -\begin_inset Formula $c>0$ -\end_inset - - defined on -\begin_inset Formula $-\infty-1\] - -\end_inset - - So, -\begin_inset Formula \begin{eqnarray*} -\mu_{1}^{\prime} & = & \frac{1}{c}\left(1-\Gamma\left(1+c\right)\right)\quad c>-1\\ -\mu_{2}^{\prime} & = & \frac{1}{c^{2}}\left(1-2\Gamma\left(1+c\right)+\Gamma\left(1+2c\right)\right)\quad c>-\frac{1}{2}\\ -\mu_{3}^{\prime} & = & \frac{1}{c^{3}}\left(1-3\Gamma\left(1+c\right)+3\Gamma\left(1+2c\right)-\Gamma\left(1+3c\right)\right)\quad c>-\frac{1}{3}\\ -\mu_{4}^{\prime} & = & \frac{1}{c^{4}}\left(1-4\Gamma\left(1+c\right)+6\Gamma\left(1+2c\right)-4\Gamma\left(1+3c\right)+\Gamma\left(1+4c\right)\right)\quad c>-\frac{1}{4}\end{eqnarray*} - -\end_inset - - For -\begin_inset Formula $c<0$ -\end_inset - - defined on -\begin_inset Formula $\frac{1}{c}\leq x<\infty.$ -\end_inset - - For -\begin_inset Formula $c=0$ -\end_inset - - defined over all space -\begin_inset Formula \begin{eqnarray*} -f\left(x;0\right) & = & \exp\left[-e^{-x}\right]e^{-x}\\ -F\left(x;0\right) & = & \exp\left[-e^{-x}\right]\\ -G\left(q;0\right) & = & -\log\left(-\log q\right)\end{eqnarray*} - -\end_inset - - This is just the (left-skewed) Gumbel distribution for c=0. - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \gamma=-\psi_{0}\left(1\right)\\ -\mu_{2} & = & \frac{\pi^{2}}{6}\\ -\gamma_{1} & = & \frac{12\sqrt{6}}{\pi^{3}}\zeta\left(3\right)\\ -\gamma_{2} & = & \frac{12}{5}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Generalized Gamma -\end_layout - -\begin_layout Standard -A general probability form that reduces to many common distributions: -\begin_inset Formula $x>0$ -\end_inset - - -\begin_inset Formula $a>0$ -\end_inset - - and -\begin_inset Formula $c\neq0.$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;a,c\right) & = & \frac{\left|c\right|x^{ca-1}}{\Gamma\left(a\right)}\exp\left(-x^{c}\right)\\ -F\left(x;a,c\right) & = & \begin{array}{cc} -\frac{\Gamma\left(a,x^{c}\right)}{\Gamma\left(a\right)} & c>0\\ -1-\frac{\Gamma\left(a,x^{c}\right)}{\Gamma\left(a\right)} & c<0\end{array}\\ -G\left(q;a,c\right) & = & \left\{ \Gamma^{-1}\left[a,\Gamma\left(a\right)q\right]\right\} ^{1/c}\quad c>0\\ - & & \left\{ \Gamma^{-1}\left[a,\Gamma\left(a\right)\left(1-q\right)\right]\right\} ^{1/c}\quad c<0\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\frac{\Gamma\left(a+\frac{n}{c}\right)}{\Gamma\left(a\right)}\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{\Gamma\left(a+\frac{1}{c}\right)}{\Gamma\left(a\right)}\\ -\mu_{2} & = & \frac{\Gamma\left(a+\frac{2}{c}\right)}{\Gamma\left(a\right)}-\mu^{2}\\ -\gamma_{1} & = & \frac{\Gamma\left(a+\frac{3}{c}\right)/\Gamma\left(a\right)-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{\Gamma\left(a+\frac{4}{c}\right)/\Gamma\left(a\right)-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\\ -m_{d} & = & \left(\frac{ac-1}{c}\right)^{1/c}.\end{eqnarray*} - -\end_inset - - Special cases are Weibull -\begin_inset Formula $\left(a=1\right)$ -\end_inset - -, half-normal -\begin_inset Formula $\left(a=1/2,c=2\right)$ -\end_inset - - and ordinary gamma distributions -\begin_inset Formula $c=1.$ -\end_inset - - If -\begin_inset Formula $c=-1$ -\end_inset - - then it is the inverted gamma distribution. - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=a-a\Psi\left(a\right)+\frac{1}{c}\Psi\left(a\right)+\log\Gamma\left(a\right)-\log\left|c\right|.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Generalized Half-Logistic -\end_layout - -\begin_layout Standard -For -\begin_inset Formula $x\in\left[0,1/c\right]$ -\end_inset - - and -\begin_inset Formula $c>0$ -\end_inset - - we have -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{2\left(1-cx\right)^{\frac{1}{c}-1}}{\left(1+\left(1-cx\right)^{1/c}\right)^{2}}\\ -F\left(x;c\right) & = & \frac{1-\left(1-cx\right)^{1/c}}{1+\left(1-cx\right)^{1/c}}\\ -G\left(q;c\right) & = & \frac{1}{c}\left[1-\left(\frac{1-q}{1+q}\right)^{c}\right]\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & 2-\left(2c+1\right)\log2.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Gilbrat -\end_layout - -\begin_layout Standard -Special case of the log-normal with -\begin_inset Formula $\sigma=1$ -\end_inset - - and -\begin_inset Formula $S=1.0$ -\end_inset - - (typically also -\begin_inset Formula $L=0.0$ -\end_inset - -) -\begin_inset Formula \begin{eqnarray*} -f\left(x;\sigma\right) & = & \frac{1}{x\sqrt{2\pi}}\exp\left[-\frac{1}{2}\left(\log x\right)^{2}\right]\\ -F\left(x;\sigma\right) & = & \Phi\left(\log x\right)=\frac{1}{2}\left[1+\textrm{erf}\left(\frac{\log x}{\sqrt{2}}\right)\right]\\ -G\left(q;\sigma\right) & = & \exp\left\{ \Phi^{-1}\left(q\right)\right\} \end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \sqrt{e}\\ -\mu_{2} & = & e\left[e-1\right]\\ -\gamma_{1} & = & \sqrt{e-1}\left(2+e\right)\\ -\gamma_{2} & = & e^{4}+2e^{3}+3e^{2}-6\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & \log\left(\sqrt{2\pi e}\right)\\ - & \approx & 1.4189385332046727418\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Gompertz (Truncated Gumbel) -\end_layout - -\begin_layout Standard -For -\begin_inset Formula $x\geq0$ -\end_inset - - and -\begin_inset Formula $c>0$ -\end_inset - -. - In JKB the two shape parameters -\begin_inset Formula $b,a$ -\end_inset - - are reduced to the single shape-parameter -\begin_inset Formula $c=b/a$ -\end_inset - -. - As -\begin_inset Formula $a$ -\end_inset - - is just a scale parameter when -\begin_inset Formula $a\neq0$ -\end_inset - -. - If -\begin_inset Formula $a=0,$ -\end_inset - - the distribution reduces to the exponential distribution scaled by -\begin_inset Formula $1/b.$ -\end_inset - - Thus, the standard form is given as -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & ce^{x}\exp\left[-c\left(e^{x}-1\right)\right]\\ -F\left(x;c\right) & = & 1-\exp\left[-c\left(e^{x}-1\right)\right]\\ -G\left(q;c\right) & = & \log\left[1-\frac{1}{c}\log\left(1-q\right)\right]\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=1-\log\left(c\right)-e^{c}\textrm{Ei}\left(1,c\right),\] - -\end_inset - -where -\begin_inset Formula \[ -\textrm{Ei}\left(n,x\right)=\int_{1}^{\infty}t^{-n}\exp\left(-xt\right)dt\] - -\end_inset - - -\end_layout - -\begin_layout Section -Gumbel (LogWeibull, Fisher-Tippetts, Type I Extreme Value) -\end_layout - -\begin_layout Standard -One of a clase of extreme value distributions (right-skewed). -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \exp\left(-\left(x+e^{-x}\right)\right)\\ -F\left(x\right) & = & \exp\left(-e^{-x}\right)\\ -G\left(q\right) & = & -\log\left(-\log\left(q\right)\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\Gamma\left(1-t\right)\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \gamma=-\psi_{0}\left(1\right)\\ -\mu_{2} & = & \frac{\pi^{2}}{6}\\ -\gamma_{1} & = & \frac{12\sqrt{6}}{\pi^{3}}\zeta\left(3\right)\\ -\gamma_{2} & = & \frac{12}{5}\\ -m_{d} & = & 0\\ -m_{n} & = & -\log\left(\log2\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]\approx1.0608407169541684911\] - -\end_inset - - -\end_layout - -\begin_layout Section -Gumbel Left-skewed (for minimum order statistic) -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \exp\left(x-e^{x}\right)\\ -F\left(x\right) & = & 1-\exp\left(-e^{x}\right)\\ -G\left(q\right) & = & \log\left(-\log\left(1-q\right)\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\Gamma\left(1+t\right)\] - -\end_inset - - Note, that -\begin_inset Formula $\mu$ -\end_inset - - is negative the mean for the right-skewed distribution. - Similar for median and mode. - All other moments are the same. - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]\approx1.0608407169541684911.\] - -\end_inset - - -\end_layout - -\begin_layout Section -HalfCauchy -\end_layout - -\begin_layout Standard -If -\begin_inset Formula $Z$ -\end_inset - - is Hyperbolic Secant distributed then -\begin_inset Formula $e^{Z}$ -\end_inset - - is Half-Cauchy distributed. - Also, if -\begin_inset Formula $W$ -\end_inset - - is (standard) Cauchy distributed, then -\begin_inset Formula $\left|W\right|$ -\end_inset - - is Half-Cauchy distributed. - Special case of the Folded Cauchy distribution with -\begin_inset Formula $c=0.$ -\end_inset - - The standard form is -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{2}{\pi\left(1+x^{2}\right)}I_{[0,\infty)}\left(x\right)\\ -F\left(x\right) & = & \frac{2}{\pi}\arctan\left(x\right)I_{\left[0,\infty\right]}\left(x\right)\\ -G\left(q\right) & = & \tan\left(\frac{\pi}{2}q\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\cos t+\frac{2}{\pi}\left[\textrm{Si}\left(t\right)\cos t-\textrm{Ci}\left(\textrm{-}t\right)\sin t\right]\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -m_{d} & = & 0\\ -m_{n} & = & \tan\left(\frac{\pi}{4}\right)\end{eqnarray*} - -\end_inset - - No moments, as the integrals diverge. - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & \log\left(2\pi\right)\\ - & \approx & 1.8378770664093454836.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -HalfNormal -\end_layout - -\begin_layout Standard -This is a special case of the chi distribution with -\begin_inset Formula $L=a$ -\end_inset - - and -\begin_inset Formula $S=b$ -\end_inset - - and -\begin_inset Formula $\nu=1.$ -\end_inset - - This is also a special case of the folded normal with shape parameter -\begin_inset Formula $c=0$ -\end_inset - - and -\begin_inset Formula $S=S.$ -\end_inset - - If -\begin_inset Formula $Z$ -\end_inset - - is (standard) normally distributed then, -\begin_inset Formula $\left|Z\right|$ -\end_inset - - is half-normal. - The standard form is -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \sqrt{\frac{2}{\pi}}e^{-x^{2}/2}I_{\left(0,\infty\right)}\left(x\right)\\ -F\left(x\right) & = & 2\Phi\left(x\right)-1\\ -G\left(q\right) & = & \Phi^{-1}\left(\frac{1+q}{2}\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\sqrt{2\pi}e^{t^{2}/2}\Phi\left(t\right)\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & \sqrt{\frac{2}{\pi}}\\ -\mu_{2} & = & 1-\frac{2}{\pi}\\ -\gamma_{1} & = & \frac{\sqrt{2}\left(4-\pi\right)}{\left(\pi-2\right)^{3/2}}\\ -\gamma_{2} & = & \frac{8\left(\pi-3\right)}{\left(\pi-2\right)^{2}}\\ -m_{d} & = & 0\\ -m_{n} & = & \Phi^{-1}\left(\frac{3}{4}\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & \log\left(\sqrt{\frac{\pi e}{2}}\right)\\ - & \approx & 0.72579135264472743239.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Half-Logistic -\end_layout - -\begin_layout Standard -In the limit as -\begin_inset Formula $c\rightarrow\infty$ -\end_inset - - for the generalized half-logistic we have the half-logistic defined over - -\begin_inset Formula $x\geq0.$ -\end_inset - - Also, the distribution of -\begin_inset Formula $\left|X\right|$ -\end_inset - - where -\begin_inset Formula $X$ -\end_inset - - has logistic distribtution. - -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{2e^{-x}}{\left(1+e^{-x}\right)^{2}}=\frac{1}{2}\textrm{sech}^{2}\left(\frac{x}{2}\right)\\ -F\left(x\right) & = & \frac{1-e^{-x}}{1+e^{-x}}=\tanh\left(\frac{x}{2}\right)\\ -G\left(q\right) & = & \log\left(\frac{1+q}{1-q}\right)=2\textrm{arctanh}\left(q\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -M\left(t\right)=1-t\psi_{0}\left(\frac{1}{2}-\frac{t}{2}\right)+t\psi_{0}\left(1-\frac{t}{2}\right)\] - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=2\left(1-2^{1-n}\right)n!\zeta\left(n\right)\quad n\neq1\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu_{1}^{\prime} & = & 2\log\left(2\right)\\ -\mu_{2}^{\prime} & = & 2\zeta\left(2\right)=\frac{\pi^{2}}{3}\\ -\mu_{3}^{\prime} & = & 9\zeta\left(3\right)\\ -\mu_{4}^{\prime} & = & 42\zeta\left(4\right)=\frac{7\pi^{4}}{15}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & 2-\log\left(2\right)\\ - & \approx & 1.3068528194400546906.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Hyperbolic Secant -\end_layout - -\begin_layout Standard -Related to the logistic distribution and used in lifetime analysis. - Standard form is (defined over all -\begin_inset Formula $x$ -\end_inset - -) -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{1}{\pi}\textrm{sech}\left(x\right)\\ -F\left(x\right) & = & \frac{2}{\pi}\arctan\left(e^{x}\right)\\ -G\left(q\right) & = & \log\left(\tan\left(\frac{\pi}{2}q\right)\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\sec\left(\frac{\pi}{2}t\right)\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu_{n}^{\prime} & = & \frac{1+\left(-1\right)^{n}}{2\pi2^{2n}}n!\left[\zeta\left(n+1,\frac{1}{4}\right)-\zeta\left(n+1,\frac{3}{4}\right)\right]\\ - & = & \left\{ \begin{array}{cc} -0 & n\textrm{ odd}\\ -C_{n/2}\frac{\pi^{n}}{2^{n}} & n\textrm{ even}\end{array}\right.\end{eqnarray*} - -\end_inset - - where -\begin_inset Formula $C_{m}$ -\end_inset - - is an integer given by -\begin_inset Formula \begin{eqnarray*} -C_{m} & = & \frac{\left(2m\right)!\left[\zeta\left(2m+1,\frac{1}{4}\right)-\zeta\left(2m+1,\frac{3}{4}\right)\right]}{\pi^{2m+1}2^{2m}}\\ - & = & 4\left(-1\right)^{m-1}\frac{16^{m}}{2m+1}B_{2m+1}\left(\frac{1}{4}\right)\end{eqnarray*} - -\end_inset - -where -\begin_inset Formula $B_{2m+1}\left(\frac{1}{4}\right)$ -\end_inset - - is the Bernoulli polynomial of order -\begin_inset Formula $2m+1$ -\end_inset - - evaluated at -\begin_inset Formula $1/4.$ -\end_inset - - Thus -\begin_inset Formula \[ -\mu_{n}^{\prime}=\left\{ \begin{array}{cc} -0 & n\textrm{ odd}\\ -4\left(-1\right)^{n/2-1}\frac{\left(2\pi\right)^{n}}{n+1}B_{n+1}\left(\frac{1}{4}\right) & n\textrm{ even}\end{array}\right.\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -m_{d}=m_{n}=\mu & = & 0\\ -\mu_{2} & = & \frac{\pi^{2}}{4}\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & 2\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=\log\left(2\pi\right).\] - -\end_inset - - -\end_layout - -\begin_layout Section -Gauss Hypergeometric -\end_layout - -\begin_layout Standard -\begin_inset Formula $x\in\left[0,1\right]$ -\end_inset - -, -\begin_inset Formula $\alpha>0,\,\beta>0$ -\end_inset - - -\begin_inset Formula \[ -C^{-1}=B\left(\alpha,\beta\right)\,_{2}F_{1}\left(\gamma,\alpha;\alpha+\beta;-z\right)\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\alpha,\beta,\gamma,z\right) & = & Cx^{\alpha-1}\frac{\left(1-x\right)^{\beta-1}}{\left(1+zx\right)^{\gamma}}\\ -\mu_{n}^{\prime} & = & \frac{B\left(n+\alpha,\beta\right)}{B\left(\alpha,\beta\right)}\frac{\,_{2}F_{1}\left(\gamma,\alpha+n;\alpha+\beta+n;-z\right)}{\,_{2}F_{1}\left(\gamma,\alpha;\alpha+\beta;-z\right)}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Inverted Gamma -\end_layout - -\begin_layout Standard -Special case of the generalized Gamma distribution with -\begin_inset Formula $c=-1$ -\end_inset - - and -\begin_inset Formula $a>0$ -\end_inset - -, -\begin_inset Formula $x>0$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;a\right) & = & \frac{x^{-a-1}}{\Gamma\left(a\right)}\exp\left(-\frac{1}{x}\right)\\ -F\left(x;a\right) & = & \frac{\Gamma\left(a,\frac{1}{x}\right)}{\Gamma\left(a\right)}\\ -G\left(q;a\right) & = & \left\{ \Gamma^{-1}\left[a,\Gamma\left(a\right)q\right]\right\} ^{-1}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\frac{\Gamma\left(a-n\right)}{\Gamma\left(a\right)}\quad a>n\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{1}{a-1}\quad a>1\\ -\mu_{2} & = & \frac{1}{\left(a-2\right)\left(a-1\right)}-\mu^{2}\quad a>2\\ -\gamma_{1} & = & \frac{\frac{1}{\left(a-3\right)\left(a-2\right)\left(a-1\right)}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{\frac{1}{\left(a-4\right)\left(a-3\right)\left(a-2\right)\left(a-1\right)}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -m_{d}=\frac{1}{a+1}\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=a-\left(a+1\right)\Psi\left(a\right)+\log\Gamma\left(a\right).\] - -\end_inset - - -\end_layout - -\begin_layout Section -Inverse Normal (Inverse Gaussian) -\end_layout - -\begin_layout Standard -The standard form involves the shape parameter -\begin_inset Formula $\mu$ -\end_inset - - (in most definitions, -\begin_inset Formula $L=0.0$ -\end_inset - - is used). - (In terms of the regress documentation -\begin_inset Formula $\mu=A/B$ -\end_inset - -) and -\begin_inset Formula $B=S$ -\end_inset - - and -\begin_inset Formula $L$ -\end_inset - - is not a parameter in that distribution. - A standard form is -\begin_inset Formula $x>0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\mu\right) & = & \frac{1}{\sqrt{2\pi x^{3}}}\exp\left(-\frac{\left(x-\mu\right)^{2}}{2x\mu^{2}}\right).\\ -F\left(x;\mu\right) & = & \Phi\left(\frac{1}{\sqrt{x}}\frac{x-\mu}{\mu}\right)+\exp\left(\frac{2}{\mu}\right)\Phi\left(-\frac{1}{\sqrt{x}}\frac{x+\mu}{\mu}\right)\\ -G\left(q;\mu\right) & = & F^{-1}\left(q;\mu\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & \mu\\ -\mu_{2} & = & \mu^{3}\\ -\gamma_{1} & = & 3\sqrt{\mu}\\ -\gamma_{2} & = & 15\mu\\ -m_{d} & = & \frac{\mu}{2}\left(\sqrt{9\mu^{2}+4}-3\mu\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -This is related to the canonical form or JKB -\begin_inset Quotes eld -\end_inset - -two-parameter -\begin_inset Quotes erd -\end_inset - - inverse Gaussian when written in it's full form with scale parameter -\begin_inset Formula $S$ -\end_inset - - and location parameter -\begin_inset Formula $L$ -\end_inset - - by taking -\begin_inset Formula $L=0$ -\end_inset - - and -\begin_inset Formula $S\equiv\lambda,$ -\end_inset - - then -\begin_inset Formula $\mu S$ -\end_inset - - is equal to -\begin_inset Formula $\mu_{2}$ -\end_inset - - where -\begin_inset Formula $\mu_{2}$ -\end_inset - - is the parameter used by JKB. - We prefer this form because of it's consistent use of the scale parameter. - Notice that in JKB the skew -\begin_inset Formula $\left(\sqrt{\beta_{1}}\right)$ -\end_inset - - and the kurtosis ( -\begin_inset Formula $\beta_{2}-3$ -\end_inset - -) are both functions only of -\begin_inset Formula $\mu_{2}/\lambda=\mu S/S=\mu$ -\end_inset - - as shown here, while the variance and mean of the standard form here are - transformed appropriately. - -\end_layout - -\begin_layout Section -Inverted Weibull -\end_layout - -\begin_layout Standard -Shape parameter -\begin_inset Formula $c>0$ -\end_inset - - and -\begin_inset Formula $x>0$ -\end_inset - -. - Then -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & cx^{-c-1}\exp\left(-x^{-c}\right)\\ -F\left(x;c\right) & = & \exp\left(-x^{-c}\right)\\ -G\left(q;c\right) & = & \left(-\log q\right)^{-1/c}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=1+\gamma+\frac{\gamma}{c}-\log\left(c\right)\] - -\end_inset - - where -\begin_inset Formula $\gamma$ -\end_inset - - is Euler's constant. -\end_layout - -\begin_layout Section -Johnson SB -\end_layout - -\begin_layout Standard -Defined for -\begin_inset Formula $x\in\left(0,1\right)$ -\end_inset - - with two shape parameters -\begin_inset Formula $a$ -\end_inset - - and -\begin_inset Formula $b>0.$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;a,b\right) & = & \frac{b}{x\left(1-x\right)}\phi\left(a+b\log\frac{x}{1-x}\right)\\ -F\left(x;a,b\right) & = & \Phi\left(a+b\log\frac{x}{1-x}\right)\\ -G\left(q;a,b\right) & = & \frac{1}{1+\exp\left[-\frac{1}{b}\left(\Phi^{-1}\left(q\right)-a\right)\right]}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Johnson SU -\end_layout - -\begin_layout Standard -Defined for all -\begin_inset Formula $x$ -\end_inset - - with two shape parameters -\begin_inset Formula $a$ -\end_inset - - and -\begin_inset Formula $b>0$ -\end_inset - -. - -\begin_inset Formula \begin{eqnarray*} -f\left(x;a,b\right) & = & \frac{b}{\sqrt{x^{2}+1}}\phi\left(a+b\log\left(x+\sqrt{x^{2}+1}\right)\right)\\ -F\left(x;a,b\right) & = & \Phi\left(a+b\log\left(x+\sqrt{x^{2}+1}\right)\right)\\ -G\left(q;a,b\right) & = & \sinh\left[\frac{\Phi^{-1}\left(q\right)-a}{b}\right]\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -KSone -\end_layout - -\begin_layout Section -KStwo -\end_layout - -\begin_layout Section -Laplace (Double Exponential, Bilateral Expoooonential) -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{1}{2}e^{-\left|x\right|}\\ -F\left(x\right) & = & \left\{ \begin{array}{ccc} -\frac{1}{2}e^{x} & & x\leq0\\ -1-\frac{1}{2}e^{-x} & & x>0\end{array}\right.\\ -G\left(q\right) & = & \left\{ \begin{array}{ccc} -\log\left(2q\right) & & q\leq\frac{1}{2}\\ --\log\left(2-2q\right) & & q>\frac{1}{2}\end{array}\right.\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -m_{d}=m_{n}=\mu & = & 0\\ -\mu_{2} & = & 2\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & 3\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -The ML estimator of the location parameter is -\begin_inset Formula \[ -\hat{L}=\textrm{median}\left(X_{i}\right)\] - -\end_inset - - where -\begin_inset Formula $X_{i}$ -\end_inset - - is a sequence of -\begin_inset Formula $N$ -\end_inset - - mutually independent Laplace RV's and the median is some number between - the -\begin_inset Formula $\frac{1}{2}N\textrm{th}$ -\end_inset - - and the -\begin_inset Formula $(N/2+1)\textrm{th}$ -\end_inset - - order statistic ( -\emph on -e.g. - -\emph default - take the average of these two) when -\begin_inset Formula $N$ -\end_inset - - is even. - Also, -\begin_inset Formula \[ -\hat{S}=\frac{1}{N}\sum_{j=1}^{N}\left|X_{j}-\hat{L}\right|.\] - -\end_inset - - Replace -\begin_inset Formula $\hat{L}$ -\end_inset - - with -\begin_inset Formula $L$ -\end_inset - - if it is known. - If -\begin_inset Formula $L$ -\end_inset - - is known then this estimator is distributed as -\begin_inset Formula $\left(2N\right)^{-1}S\cdot\chi_{2N}^{2}$ -\end_inset - -. - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & \log\left(2e\right)\\ - & \approx & 1.6931471805599453094.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Left-skewed Lévy -\end_layout - -\begin_layout Standard -Special case of Lévy-stable distribution with -\begin_inset Formula $\alpha=\frac{1}{2}$ -\end_inset - - and -\begin_inset Formula $\beta=-1$ -\end_inset - - the support is -\begin_inset Formula $x<0$ -\end_inset - -. - In standard form -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{1}{\left|x\right|\sqrt{2\pi\left|x\right|}}\exp\left(-\frac{1}{2\left|x\right|}\right)\\ -F\left(x\right) & = & 2\Phi\left(\frac{1}{\sqrt{\left|x\right|}}\right)-1\\ -G\left(q\right) & = & -\left[\Phi^{-1}\left(\frac{q+1}{2}\right)\right]^{-2}.\end{eqnarray*} - -\end_inset - -No moments. -\end_layout - -\begin_layout Section -Lévy -\end_layout - -\begin_layout Standard -A special case of Lévy-stable distributions with -\begin_inset Formula $\alpha=\frac{1}{2}$ -\end_inset - - and -\begin_inset Formula $\beta=1$ -\end_inset - -. - In standard form it is defined for -\begin_inset Formula $x>0$ -\end_inset - - as -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{1}{x\sqrt{2\pi x}}\exp\left(-\frac{1}{2x}\right)\\ -F\left(x\right) & = & 2\left[1-\Phi\left(\frac{1}{\sqrt{x}}\right)\right]\\ -G\left(q\right) & = & \left[\Phi^{-1}\left(1-\frac{q}{2}\right)\right]^{-2}.\end{eqnarray*} - -\end_inset - - It has no finite moments. -\end_layout - -\begin_layout Section -Logistic (Sech-squared) -\end_layout - -\begin_layout Standard -A special case of the Generalized Logistic distribution with -\begin_inset Formula $c=1.$ -\end_inset - - Defined for -\begin_inset Formula $x>0$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{\exp\left(-x\right)}{\left[1+\exp\left(-x\right)\right]^{2}}\\ -F\left(x\right) & = & \frac{1}{1+\exp\left(-x\right)}\\ -G\left(q\right) & = & -\log\left(1/q-1\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & \gamma+\psi_{0}\left(1\right)=0\\ -\mu_{2} & = & \frac{\pi^{2}}{6}+\psi_{1}\left(1\right)=\frac{\pi^{2}}{3}\\ -\gamma_{1} & = & \frac{\psi_{2}\left(c\right)+2\zeta\left(3\right)}{\mu_{2}^{3/2}}=0\\ -\gamma_{2} & = & \frac{\left(\frac{\pi^{4}}{15}+\psi_{3}\left(c\right)\right)}{\mu_{2}^{2}}=\frac{6}{5}\\ -m_{d} & = & \log1=0\\ -m_{n} & = & -\log\left(2-1\right)=0\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=1.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Log Double Exponential (Log-Laplace) -\end_layout - -\begin_layout Standard -Defined over -\begin_inset Formula $x>0$ -\end_inset - - with -\begin_inset Formula $c>0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \left\{ \begin{array}{ccc} -\frac{c}{2}x^{c-1} & & 00$ -\end_inset - - (Defined for all -\begin_inset Formula $x$ -\end_inset - -) -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{\exp\left(cx-e^{x}\right)}{\Gamma\left(c\right)}\\ -F\left(x;c\right) & = & \frac{\Gamma\left(c,e^{x}\right)}{\Gamma\left(c\right)}\\ -G\left(q;c\right) & = & \log\left[\Gamma^{-1}\left[c,q\Gamma\left(c\right)\right]\right]\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\int_{0}^{\infty}\left[\log y\right]^{n}y^{c-1}\exp\left(-y\right)dy.\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \mu_{1}^{\prime}\\ -\mu_{2} & = & \mu_{2}^{\prime}-\mu^{2}\\ -\gamma_{1} & = & \frac{\mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{\mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Log Normal (Cobb-Douglass) -\end_layout - -\begin_layout Standard -Has one shape parameter -\begin_inset Formula $\sigma$ -\end_inset - ->0. - (Notice that the -\begin_inset Quotes eld -\end_inset - -Regress -\begin_inset Quotes erd -\end_inset - - -\begin_inset Formula $A=\log S$ -\end_inset - - where -\begin_inset Formula $S$ -\end_inset - - is the scale parameter and -\begin_inset Formula $A$ -\end_inset - - is the mean of the underlying normal distribution). - The standard form is -\begin_inset Formula $x>0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\sigma\right) & = & \frac{1}{\sigma x\sqrt{2\pi}}\exp\left[-\frac{1}{2}\left(\frac{\log x}{\sigma}\right)^{2}\right]\\ -F\left(x;\sigma\right) & = & \Phi\left(\frac{\log x}{\sigma}\right)\\ -G\left(q;\sigma\right) & = & \exp\left\{ \sigma\Phi^{-1}\left(q\right)\right\} \end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \exp\left(\sigma^{2}/2\right)\\ -\mu_{2} & = & \exp\left(\sigma^{2}\right)\left[\exp\left(\sigma^{2}\right)-1\right]\\ -\gamma_{1} & = & \sqrt{p-1}\left(2+p\right)\\ -\gamma_{2} & = & p^{4}+2p^{3}+3p^{2}-6\quad\quad p=e^{\sigma^{2}}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -Notice that using JKB notation we have -\begin_inset Formula $\theta=L,$ -\end_inset - - -\begin_inset Formula $\zeta=\log S$ -\end_inset - - and we have given the so-called antilognormal form of the distribution. - This is more consistent with the location, scale parameter description - of general probability distributions. - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=\frac{1}{2}\left[1+\log\left(2\pi\right)+2\log\left(\sigma\right)\right].\] - -\end_inset - - -\end_layout - -\begin_layout Standard -Also, note that if -\begin_inset Formula $X$ -\end_inset - - is a log-normally distributed random-variable with -\begin_inset Formula $L=0$ -\end_inset - - and -\begin_inset Formula $S$ -\end_inset - - and shape parameter -\begin_inset Formula $\sigma.$ -\end_inset - - Then, -\begin_inset Formula $\log X$ -\end_inset - - is normally distributed with variance -\begin_inset Formula $\sigma^{2}$ -\end_inset - - and mean -\begin_inset Formula $\log S.$ -\end_inset - - -\end_layout - -\begin_layout Section -Nakagami -\end_layout - -\begin_layout Standard -Generalization of the chi distribution. - Shape parameter is -\begin_inset Formula $\nu>0.$ -\end_inset - - Defined for -\begin_inset Formula $x>0.$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\nu\right) & = & \frac{2\nu^{\nu}}{\Gamma\left(\nu\right)}x^{2\nu-1}\exp\left(-\nu x^{2}\right)\\ -F\left(x;\nu\right) & = & \Gamma\left(\nu,\nu x^{2}\right)\\ -G\left(q;\nu\right) & = & \sqrt{\frac{1}{\nu}\Gamma^{-1}\left(v,q\right)}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{\Gamma\left(\nu+\frac{1}{2}\right)}{\sqrt{\nu}\Gamma\left(\nu\right)}\\ -\mu_{2} & = & \left[1-\mu^{2}\right]\\ -\gamma_{1} & = & \frac{\mu\left(1-4v\mu_{2}\right)}{2\nu\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{-6\mu^{4}\nu+\left(8\nu-2\right)\mu^{2}-2\nu+1}{\nu\mu_{2}^{2}}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Noncentral beta* -\end_layout - -\begin_layout Standard -Defined over -\begin_inset Formula $x\in\left[0,1\right]$ -\end_inset - - with -\begin_inset Formula $a>0$ -\end_inset - - and -\begin_inset Formula $b>0$ -\end_inset - - and -\begin_inset Formula $c\geq0$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -F\left(x;a,b,c\right)=\sum_{j=0}^{\infty}\frac{e^{-c/2}\left(\frac{c}{2}\right)^{j}}{j!}I_{B}\left(a+j,b;0\right)\] - -\end_inset - - -\end_layout - -\begin_layout Section -Noncentral chi* -\end_layout - -\begin_layout Section -Noncentral chi-squared -\end_layout - -\begin_layout Standard -The distribution of -\begin_inset Formula $\sum_{i=1}^{\nu}\left(Z_{i}+\delta_{i}\right)^{2}$ -\end_inset - - where -\begin_inset Formula $Z_{i}$ -\end_inset - - are independent standard normal variables and -\begin_inset Formula $\delta_{i}$ -\end_inset - - are constants. - -\begin_inset Formula $\lambda=\sum_{i=1}^{\nu}\delta_{i}^{2}>0.$ -\end_inset - - (In communications it is called the Marcum-Q function). - Can be thought of as a Generalized Rayleigh-Rice distribution. - For -\begin_inset Formula $x>0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\nu,\lambda\right) & = & e^{-\left(\lambda+x\right)/2}\frac{1}{2}\left(\frac{x}{\lambda}\right)^{\left(\nu-2\right)/4}I_{\left(\nu-2\right)/2}\left(\sqrt{\lambda x}\right)\\ -F\left(x;\nu,\lambda\right) & = & \sum_{j=0}^{\infty}\left\{ \frac{\left(\lambda/2\right)^{j}}{j!}e^{-\lambda/2}\right\} \textrm{Pr}\left[\chi_{\nu+2j}^{2}\leq x\right]\\ -G\left(q;\nu,\lambda\right) & = & F^{-1}\left(x;\nu,\lambda\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \nu+\lambda\\ -\mu_{2} & = & 2\left(\nu+2\lambda\right)\\ -\gamma_{1} & = & \frac{\sqrt{8}\left(\nu+3\lambda\right)}{\left(\nu+2\lambda\right)^{3/2}}\\ -\gamma_{2} & = & \frac{12\left(\nu+4\lambda\right)}{\left(\nu+2\lambda\right)^{2}}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Noncentral F -\end_layout - -\begin_layout Standard -Let -\begin_inset Formula $\lambda>0$ -\end_inset - - and -\begin_inset Formula $\nu_{1}>0$ -\end_inset - - and -\begin_inset Formula $\nu_{2}>0.$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;\lambda,\nu_{1},\nu_{2}\right) & = & \exp\left[\frac{\lambda}{2}+\frac{\left(\lambda\nu_{1}x\right)}{2\left(\nu_{1}x+\nu_{2}\right)}\right]\nu_{1}^{\nu_{1}/2}\nu_{2}^{\nu_{2}/2}x^{\nu_{1}/2-1}\\ - & & \times\left(\nu_{2}+\nu_{1}x\right)^{-\left(\nu_{1}+\nu_{2}\right)/2}\frac{\Gamma\left(\frac{\nu_{1}}{2}\right)\Gamma\left(1+\frac{\nu_{2}}{2}\right)L_{\nu_{2}/2}^{\nu_{1}/2-1}\left(-\frac{\lambda\nu_{1}x}{2\left(\nu_{1}x+\nu_{2}\right)}\right)}{B\left(\frac{\nu_{1}}{2},\frac{\nu_{2}}{2}\right)\Gamma\left(\frac{\nu_{1}+\nu_{2}}{2}\right)}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Noncentral t -\end_layout - -\begin_layout Standard -The distribution of the ratio -\begin_inset Formula \[ -\frac{U+\lambda}{\chi_{\nu}/\sqrt{\nu}}\] - -\end_inset - - where -\begin_inset Formula $U$ -\end_inset - - and -\begin_inset Formula $\chi_{\nu}$ -\end_inset - - are independent and distributed as a standard normal and chi with -\begin_inset Formula $\nu$ -\end_inset - - degrees of freedom. - Note -\begin_inset Formula $\lambda>0$ -\end_inset - - and -\begin_inset Formula $\nu>0$ -\end_inset - -. - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\lambda,\nu\right) & = & \frac{\nu^{\nu/2}\Gamma\left(\nu+1\right)}{2^{\nu}e^{\lambda^{2}/2}\left(\nu+x^{2}\right)^{\nu/2}\Gamma\left(\nu/2\right)}\\ - & & \times\left\{ \frac{\sqrt{2}\lambda x\,_{1}F_{1}\left(\frac{\nu}{2}+1;\frac{3}{2};\frac{\lambda^{2}x^{2}}{2\left(\nu+x^{2}\right)}\right)}{\left(\nu+x^{2}\right)\Gamma\left(\frac{\nu+1}{2}\right)}\right.\\ - & & -\left.\frac{\,_{1}F_{1}\left(\frac{\nu+1}{2};\frac{1}{2};\frac{\lambda^{2}x^{2}}{2\left(\nu+x^{2}\right)}\right)}{\sqrt{\nu+x^{2}}\Gamma\left(\frac{\nu}{2}+1\right)}\right\} \\ - & = & \frac{\Gamma\left(\nu+1\right)}{2^{\left(\nu-1\right)/2}\sqrt{\pi\nu}\Gamma\left(\nu/2\right)}\exp\left[-\frac{\nu\lambda^{2}}{\nu+x^{2}}\right]\\ - & & \times\left(\frac{\nu}{\nu+x^{2}}\right)^{\left(\nu-1\right)/2}Hh_{\nu}\left(-\frac{\lambda x}{\sqrt{\nu+x^{2}}}\right)\\ -F\left(x;\lambda,\nu\right) & =\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Normal -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{e^{-x^{2}/2}}{\sqrt{2\pi}}\\ -F\left(x\right) & = & \Phi\left(x\right)=\frac{1}{2}+\frac{1}{2}\textrm{erf}\left(\frac{\textrm{x}}{\sqrt{2}}\right)\\ -G\left(q\right) & = & \Phi^{-1}\left(q\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\align center -\begin_inset Formula \begin{eqnarray*} -m_{d}=m_{n}=\mu & = & 0\\ -\mu_{2} & = & 1\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & 0\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -h\left[X\right] & = & \log\left(\sqrt{2\pi e}\right)\\ - & \approx & 1.4189385332046727418\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Maxwell -\end_layout - -\begin_layout Standard -This is a special case of the Chi distribution with -\begin_inset Formula $L=0$ -\end_inset - - and -\begin_inset Formula $S=S=\frac{1}{\sqrt{a}}$ -\end_inset - - and -\begin_inset Formula $\nu=3.$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \sqrt{\frac{2}{\pi}}x^{2}e^{-x^{2}/2}I_{\left(0,\infty\right)}\left(x\right)\\ -F\left(x\right) & = & \Gamma\left(\frac{3}{2},\frac{x^{2}}{2}\right)\\ -G\left(\alpha\right) & = & \sqrt{2\Gamma^{-1}\left(\frac{3}{2},\alpha\right)}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & 2\sqrt{\frac{2}{\pi}}\\ -\mu_{2} & = & 3-\frac{8}{\pi}\\ -\gamma_{1} & = & \sqrt{2}\frac{32-10\pi}{\left(3\pi-8\right)^{3/2}}\\ -\gamma_{2} & = & \frac{-12\pi^{2}+160\pi-384}{\left(3\pi-8\right)^{2}}\\ -m_{d} & = & \sqrt{2}\\ -m_{n} & = & \sqrt{2\Gamma^{-1}\left(\frac{3}{2},\frac{1}{2}\right)}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=\log\left(\sqrt{\frac{2\pi}{e}}\right)+\gamma.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Mielke's Beta-Kappa -\end_layout - -\begin_layout Standard -A generalized F distribution. - Two shape parameters -\begin_inset Formula $\kappa$ -\end_inset - - and -\begin_inset Formula $\theta$ -\end_inset - -, and -\begin_inset Formula $x>0$ -\end_inset - -. - The -\begin_inset Formula $\beta$ -\end_inset - - in the DATAPLOT reference is a scale parameter. -\begin_inset Formula \begin{eqnarray*} -f\left(x;\kappa,\theta\right) & = & \frac{\kappa x^{\kappa-1}}{\left(1+x^{\theta}\right)^{1+\frac{\kappa}{\theta}}}\\ -F\left(x;\kappa,\theta\right) & = & \frac{x^{\kappa}}{\left(1+x^{\theta}\right)^{\kappa/\theta}}\\ -G\left(q;\kappa,\theta\right) & = & \left(\frac{q^{\theta/\kappa}}{1-q^{\theta/\kappa}}\right)^{1/\theta}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Pareto -\end_layout - -\begin_layout Standard -For -\begin_inset Formula $x\geq1$ -\end_inset - - and -\begin_inset Formula $b>0$ -\end_inset - -. - Standard form is -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;b\right) & = & \frac{b}{x^{b+1}}\\ -F\left(x;b\right) & = & 1-\frac{1}{x^{b}}\\ -G\left(q;b\right) & = & \left(1-q\right)^{-1/b}\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{b}{b-1}\quad b>1\\ -\mu_{2} & = & \frac{b}{\left(b-2\right)\left(b-1\right)^{2}}\quad b>2\\ -\gamma_{1} & = & \frac{2\left(b+1\right)\sqrt{b-2}}{\left(b-3\right)\sqrt{b}}\quad b>3\\ -\gamma_{2} & = & \frac{6\left(b^{3}+b^{2}-6b-2\right)}{b\left(b^{2}-7b+12\right)}\quad b>4\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left(X\right)=\frac{1}{c}+1-\log\left(c\right)\] - -\end_inset - - -\end_layout - -\begin_layout Section -Pareto Second Kind (Lomax) -\end_layout - -\begin_layout Standard -\begin_inset Formula $c>0.$ -\end_inset - - This is Pareto of the first kind with -\begin_inset Formula $L=-1.0$ -\end_inset - - so -\begin_inset Formula $x\geq0$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{c}{\left(1+x\right)^{c+1}}\\ -F\left(x;c\right) & = & 1-\frac{1}{\left(1+x\right)^{c}}\\ -G\left(q;c\right) & = & \left(1-q\right)^{-1/c}-1\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=\frac{1}{c}+1-\log\left(c\right).\] - -\end_inset - - -\end_layout - -\begin_layout Section -Power Log Normal -\end_layout - -\begin_layout Standard -A generalization of the log-normal distribution -\begin_inset Formula $\sigma>0$ -\end_inset - - and -\begin_inset Formula $c>0$ -\end_inset - - and -\begin_inset Formula $x>0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\sigma,c\right) & = & \frac{c}{x\sigma}\phi\left(\frac{\log x}{\sigma}\right)\left(\Phi\left(-\frac{\log x}{\sigma}\right)\right)^{c-1}\\ -F\left(x;\sigma,c\right) & = & 1-\left(\Phi\left(-\frac{\log x}{\sigma}\right)\right)^{c}\\ -G\left(q;\sigma,c\right) & = & \exp\left[-\sigma\Phi^{-1}\left[\left(1-q\right)^{1/c}\right]\right]\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\int_{0}^{1}\exp\left[-n\sigma\Phi^{-1}\left(y^{1/c}\right)\right]dy\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \mu_{1}^{\prime}\\ -\mu_{2} & = & \mu_{2}^{\prime}-\mu^{2}\\ -\gamma_{1} & = & \frac{\mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{\mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - -\end_inset - - This distribution reduces to the log-normal distribution when -\begin_inset Formula $c=1.$ -\end_inset - - -\end_layout - -\begin_layout Section -Power Normal -\end_layout - -\begin_layout Standard -A generalization of the normal distribution, -\begin_inset Formula $c>0$ -\end_inset - - for -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & c\phi\left(x\right)\left(\Phi\left(-x\right)\right)^{c-1}\\ -F\left(x;c\right) & = & 1-\left(\Phi\left(-x\right)\right)^{c}\\ -G\left(q;c\right) & = & -\Phi^{-1}\left[\left(1-q\right)^{1/c}\right]\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\left(-1\right)^{n}\int_{0}^{1}\left[\Phi^{-1}\left(y^{1/c}\right)\right]^{n}dy\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \mu_{1}^{\prime}\\ -\mu_{2} & = & \mu_{2}^{\prime}-\mu^{2}\\ -\gamma_{1} & = & \frac{\mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ -\gamma_{2} & = & \frac{\mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - -\end_inset - -For -\begin_inset Formula $c=1$ -\end_inset - - this reduces to the normal distribution. - -\end_layout - -\begin_layout Section -Power-function -\end_layout - -\begin_layout Standard -A special case of the beta distribution with -\begin_inset Formula $b=1$ -\end_inset - -: defined for -\begin_inset Formula $x\in\left[0,1\right]$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -a>0\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;a\right) & = & ax^{a-1}\\ -F\left(x;a\right) & = & x^{a}\\ -G\left(q;a\right) & = & q^{1/a}\\ -\mu & = & \frac{a}{a+1}\\ -\mu_{2} & = & \frac{a\left(a+2\right)}{\left(a+1\right)^{2}}\\ -\gamma_{1} & = & 2\left(1-a\right)\sqrt{\frac{a+2}{a\left(a+3\right)}}\\ -\gamma_{2} & = & \frac{6\left(a^{3}-a^{2}-6a+2\right)}{a\left(a+3\right)\left(a+4\right)}\\ -m_{d} & = & 1\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=1-\frac{1}{a}-\log\left(a\right)\] - -\end_inset - - -\end_layout - -\begin_layout Section -R-distribution -\end_layout - -\begin_layout Standard -A general-purpose distribution with a variety of shapes controlled by -\begin_inset Formula $c>0.$ -\end_inset - - Range of standard distribution is -\begin_inset Formula $x\in\left[-1,1\right]$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{\left(1-x^{2}\right)^{c/2-1}}{B\left(\frac{1}{2},\frac{c}{2}\right)}\\ -F\left(x;c\right) & = & \frac{1}{2}+\frac{x}{B\left(\frac{1}{2},\frac{c}{2}\right)}\,_{2}F_{1}\left(\frac{1}{2},1-\frac{c}{2};\frac{3}{2};x^{2}\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\frac{\left(1+\left(-1\right)^{n}\right)}{2}B\left(\frac{n+1}{2},\frac{c}{2}\right)\] - -\end_inset - - The R-distribution with parameter -\begin_inset Formula $n$ -\end_inset - - is the distribution of the correlation coefficient of a random sample of - size -\begin_inset Formula $n$ -\end_inset - - drawn from a bivariate normal distribution with -\begin_inset Formula $\rho=0.$ -\end_inset - - The mean of the standard distribution is always zero and as the sample - size grows, the distribution's mass concentrates more closely about this - mean. - -\end_layout - -\begin_layout Section -Rayleigh -\end_layout - -\begin_layout Standard -This is Chi distribution with -\begin_inset Formula $L=0.0$ -\end_inset - - and -\begin_inset Formula $\nu=2$ -\end_inset - - and -\begin_inset Formula $S=S$ -\end_inset - - (no location parameter is generally used), the mode of the distribution - is -\begin_inset Formula $S.$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(r\right) & = & re^{-r^{2}/2}I_{[0,\infty)}\left(x\right)\\ -F\left(r\right) & = & 1-e^{-r^{2}/2}I_{[0,\infty)}\left(x\right)\\ -G\left(q\right) & = & \sqrt{-2\log\left(1-q\right)}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \sqrt{\frac{\pi}{2}}\\ -\mu_{2} & = & \frac{4-\pi}{2}\\ -\gamma_{1} & = & \frac{2\left(\pi-3\right)\sqrt{\pi}}{\left(4-\pi\right)^{3/2}}\\ -\gamma_{2} & = & \frac{24\pi-6\pi^{2}-16}{\left(4-\pi\right)^{2}}\\ -m_{d} & = & 1\\ -m_{n} & = & \sqrt{2\log\left(2\right)}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=\frac{\gamma}{2}+\log\left(\frac{e}{\sqrt{2}}\right).\] - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -\mu_{n}^{\prime}=\sqrt{2^{n}}\Gamma\left(\frac{n}{2}+1\right)\] - -\end_inset - - -\end_layout - -\begin_layout Section -Rice* -\end_layout - -\begin_layout Standard -Defined for -\begin_inset Formula $x>0$ -\end_inset - - and -\begin_inset Formula $b>0$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;b\right) & = & x\exp\left(-\frac{x^{2}+b^{2}}{2}\right)I_{0}\left(xb\right)\\ -F\left(x;b\right) & = & \int_{0}^{x}\alpha\exp\left(-\frac{\alpha^{2}+b^{2}}{2}\right)I_{0}\left(\alpha b\right)d\alpha\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -\mu_{n}^{\prime}=\sqrt{2^{n}}\Gamma\left(1+\frac{n}{2}\right)\,_{1}F_{1}\left(-\frac{n}{2};1;-\frac{b^{2}}{2}\right)\] - -\end_inset - - -\end_layout - -\begin_layout Section -Reciprocal -\end_layout - -\begin_layout Standard -Shape parameters -\begin_inset Formula $a,b>0$ -\end_inset - - -\begin_inset Formula $x\in\left[a,b\right]$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;a,b\right) & = & \frac{1}{x\log\left(b/a\right)}\\ -F\left(x;a,b\right) & = & \frac{\log\left(x/a\right)}{\log\left(b/a\right)}\\ -G\left(q;a,b\right) & = & a\exp\left(q\log\left(b/a\right)\right)=a\left(\frac{b}{a}\right)^{q}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -d & = & \log\left(a/b\right)\\ -\mu & = & \frac{a-b}{d}\\ -\mu_{2} & = & \mu\frac{a+b}{2}-\mu^{2}=\frac{\left(a-b\right)\left[a\left(d-2\right)+b\left(d+2\right)\right]}{2d^{2}}\\ -\gamma_{1} & = & \frac{\sqrt{2}\left[12d\left(a-b\right)^{2}+d^{2}\left(a^{2}\left(2d-9\right)+2abd+b^{2}\left(2d+9\right)\right)\right]}{3d\sqrt{a-b}\left[a\left(d-2\right)+b\left(d+2\right)\right]^{3/2}}\\ -\gamma_{2} & = & \frac{-36\left(a-b\right)^{3}+36d\left(a-b\right)^{2}\left(a+b\right)-16d^{2}\left(a^{3}-b^{3}\right)+3d^{3}\left(a^{2}+b^{2}\right)\left(a+b\right)}{3\left(a-b\right)\left[a\left(d-2\right)+b\left(d+2\right)\right]^{2}}-3\\ -m_{d} & = & a\\ -m_{n} & = & \sqrt{ab}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=\frac{1}{2}\log\left(ab\right)+\log\left[\log\left(\frac{b}{a}\right)\right].\] - -\end_inset - - -\end_layout - -\begin_layout Section -Reciprocal Inverse Gaussian -\end_layout - -\begin_layout Standard -The pdf is found from the inverse gaussian (IG), -\begin_inset Formula $f_{RIG}\left(x;\mu\right)=\frac{1}{x^{2}}f_{IG}\left(\frac{1}{x};\mu\right)$ -\end_inset - - defined for -\begin_inset Formula $x\geq0$ -\end_inset - - as -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f_{IG}\left(x;\mu\right) & = & \frac{1}{\sqrt{2\pi x^{3}}}\exp\left(-\frac{\left(x-\mu\right)^{2}}{2x\mu^{2}}\right).\\ -F_{IG}\left(x;\mu\right) & = & \Phi\left(\frac{1}{\sqrt{x}}\frac{x-\mu}{\mu}\right)+\exp\left(\frac{2}{\mu}\right)\Phi\left(-\frac{1}{\sqrt{x}}\frac{x+\mu}{\mu}\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f_{RIG}\left(x;\mu\right) & = & \frac{1}{\sqrt{2\pi x}}\exp\left(-\frac{\left(1-\mu x\right)^{2}}{2x\mu^{2}}\right)\\ -F_{RIG}\left(x;\mu\right) & = & 1-F_{IG}\left(\frac{1}{x},\mu\right)\\ - & = & 1-\Phi\left(\frac{1}{\sqrt{x}}\frac{1-\mu x}{\mu}\right)-\exp\left(\frac{2}{\mu}\right)\Phi\left(-\frac{1}{\sqrt{x}}\frac{1+\mu x}{\mu}\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Semicircular -\end_layout - -\begin_layout Standard -Defined on -\begin_inset Formula $x\in\left[-1,1\right]$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{2}{\pi}\sqrt{1-x^{2}}\\ -F\left(x\right) & = & \frac{1}{2}+\frac{1}{\pi}\left[x\sqrt{1-x^{2}}+\arcsin x\right]\\ -G\left(q\right) & = & F^{-1}\left(q\right)\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -m_{d}=m_{n}=\mu & = & 0\\ -\mu_{2} & = & \frac{1}{4}\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & -1\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=0.64472988584940017414.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Studentized Range* -\end_layout - -\begin_layout Section -Student t -\end_layout - -\begin_layout Standard -Shape parameter -\begin_inset Formula $\nu>0.$ -\end_inset - - -\begin_inset Formula $I\left(a,b,x\right)$ -\end_inset - - is the incomplete beta integral and -\begin_inset Formula $I^{-1}\left(a,b,I\left(a,b,x\right)\right)=x$ -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x;\nu\right) & = & \frac{\Gamma\left(\frac{\nu+1}{2}\right)}{\sqrt{\pi\nu}\Gamma\left(\frac{\nu}{2}\right)\left[1+\frac{x^{2}}{\nu}\right]^{\frac{\nu+1}{2}}}\\ -F\left(x;\nu\right) & = & \left\{ \begin{array}{ccc} -\frac{1}{2}I\left(\frac{\nu}{2},\frac{1}{2},\frac{\nu}{\nu+x^{2}}\right) & & x\leq0\\ -1-\frac{1}{2}I\left(\frac{\nu}{2},\frac{1}{2},\frac{\nu}{\nu+x^{2}}\right) & & x\geq0\end{array}\right.\\ -G\left(q;\nu\right) & = & \left\{ \begin{array}{ccc} --\sqrt{\frac{\nu}{I^{-1}\left(\frac{\nu}{2},\frac{1}{2},2q\right)}-\nu} & & q\leq\frac{1}{2}\\ -\sqrt{\frac{\nu}{I^{-1}\left(\frac{\nu}{2},\frac{1}{2},2-2q\right)}-\nu} & & q\geq\frac{1}{2}\end{array}\right.\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -m_{n}=m_{d}=\mu & = & 0\\ -\mu_{2} & = & \frac{\nu}{\nu-2}\quad\nu>2\\ -\gamma_{1} & = & 0\quad\nu>3\\ -\gamma_{2} & = & \frac{6}{\nu-4}\quad\nu>4\end{eqnarray*} - -\end_inset - - As -\begin_inset Formula $\nu\rightarrow\infty,$ -\end_inset - - this distribution approaches the standard normal distribution. - -\end_layout - -\begin_layout Standard -\begin_inset Formula \[ -h\left[X\right]=\frac{1}{4}\log\left(\frac{\pi c\Gamma^{2}\left(\frac{c}{2}\right)}{\Gamma^{2}\left(\frac{c+1}{2}\right)}\right)-\frac{\left(c+1\right)}{4}\left[\Psi\left(\frac{c}{2}\right)-cZ\left(c\right)+\pi\tan\left(\frac{\pi c}{2}\right)+\gamma+2\log2\right]\] - -\end_inset - -where -\begin_inset Formula \[ -Z\left(c\right)=\,_{3}F_{2}\left(1,1,1+\frac{c}{2};\frac{3}{2},2;1\right)=\sum_{k=0}^{\infty}\frac{k!}{k+1}\frac{\Gamma\left(\frac{c}{2}+1+k\right)}{\Gamma\left(\frac{c}{2}+1\right)}\frac{\Gamma\left(\frac{3}{2}\right)}{\Gamma\left(\frac{3}{2}+k\right)}\] - -\end_inset - - -\end_layout - -\begin_layout Section -Student Z -\end_layout - -\begin_layout Standard -The student Z distriubtion is defined over all space with one shape parameter - -\begin_inset Formula $\nu>0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;\nu\right) & = & \frac{\Gamma\left(\frac{\nu}{2}\right)}{\sqrt{\pi}\Gamma\left(\frac{\nu-1}{2}\right)}\left(1+x^{2}\right)^{-\nu/2}\\ -F\left(x;\nu\right) & = & \left\{ \begin{array}{ccc} -Q\left(x;\nu\right) & & x\leq0\\ -1-Q\left(x;\nu\right) & & x\geq0\end{array}\right.\\ -Q\left(x;\nu\right) & = & \frac{\left|x\right|^{1-n}\Gamma\left(\frac{n}{2}\right)\,_{2}F_{1}\left(\frac{n-1}{2},\frac{n}{2};\frac{n+1}{2};-\frac{1}{x^{2}}\right)}{2\sqrt{\pi}\Gamma\left(\frac{n+1}{2}\right)}\end{eqnarray*} - -\end_inset - -Interesting moments are -\begin_inset Formula \begin{eqnarray*} -\mu & = & 0\\ -\sigma^{2} & = & \frac{1}{\nu-3}\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & \frac{6}{\nu-5}.\end{eqnarray*} - -\end_inset - - The moment generating function is -\begin_inset Formula \[ -\theta\left(t\right)=2\sqrt{\left|\frac{t}{2}\right|^{\nu-1}}\frac{K_{\left(n-1\right)/2}\left(\left|t\right|\right)}{\Gamma\left(\frac{\nu-1}{2}\right)}.\] - -\end_inset - - -\end_layout - -\begin_layout Section -Symmetric Power* -\end_layout - -\begin_layout Section -Triangular -\end_layout - -\begin_layout Standard -One shape parameter -\begin_inset Formula $c\in[0,1]$ -\end_inset - - giving the distance to the peak as a percentage of the total extent of - the non-zero portion. - The location parameter is the start of the non-zero portion, and the scale-para -meter is the width of the non-zero portion. - In standard form we have -\begin_inset Formula $x\in\left[0,1\right].$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \left\{ \begin{array}{ccc} -2\frac{x}{c} & & x0$ -\end_inset - -. - Note, the PDF and CDF functions are periodic and are always defined over - -\begin_inset Formula $x\in\left[-\pi,\pi\right]$ -\end_inset - - regardless of the location parameter. - Thus, if an input beyond this range is given, it is converted to the equivalent - angle in this range. - For values of -\begin_inset Formula $b<100$ -\end_inset - - the PDF and CDF formulas below are used. - Otherwise, a normal approximation with variance -\begin_inset Formula $1/b$ -\end_inset - - is used. - -\begin_inset Formula \begin{eqnarray*} -f\left(x;b\right) & = & \frac{e^{b\cos x}}{2\pi I_{0}\left(b\right)}\\ -F\left(x;b\right) & = & \frac{1}{2}+\frac{x}{2\pi}+\sum_{k=1}^{\infty}\frac{I_{k}\left(b\right)\sin\left(kx\right)}{I_{0}\left(b\right)\pi k}\\ -G\left(q;b\right) & = & F^{-1}\left(x;b\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & 0\\ -\mu_{2} & = & \int_{-\pi}^{\pi}x^{2}f\left(x;b\right)dx\\ -\gamma_{1} & = & 0\\ -\gamma_{2} & = & \frac{\int_{-\pi}^{\pi}x^{4}f\left(x;b\right)dx}{\mu_{2}^{2}}-3\end{eqnarray*} - -\end_inset - - This can be used for defining circular variance. - -\end_layout - -\begin_layout Section -Wald -\end_layout - -\begin_layout Standard -Special case of the Inverse Normal with shape parameter set to -\begin_inset Formula $1.0$ -\end_inset - -. - Defined for -\begin_inset Formula $x>0$ -\end_inset - -. -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -f\left(x\right) & = & \frac{1}{\sqrt{2\pi x^{3}}}\exp\left(-\frac{\left(x-1\right)^{2}}{2x}\right).\\ -F\left(x\right) & = & \Phi\left(\frac{x-1}{\sqrt{x}}\right)+\exp\left(2\right)\Phi\left(-\frac{x+1}{\sqrt{x}}\right)\\ -G\left(q;\mu\right) & = & F^{-1}\left(q;\mu\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Standard -\begin_inset Formula \begin{eqnarray*} -\mu & = & 1\\ -\mu_{2} & = & 1\\ -\gamma_{1} & = & 3\\ -\gamma_{2} & = & 15\\ -m_{d} & = & \frac{1}{2}\left(\sqrt{13}-3\right)\end{eqnarray*} - -\end_inset - - -\end_layout - -\begin_layout Section -Wishart* -\end_layout - -\begin_layout Section -Wrapped Cauchy -\end_layout - -\begin_layout Standard -For -\begin_inset Formula $x\in\left[0,2\pi\right]$ -\end_inset - - -\begin_inset Formula $c\in\left(0,1\right)$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -f\left(x;c\right) & = & \frac{1-c^{2}}{2\pi\left(1+c^{2}-2c\cos x\right)}\\ -g_{c}\left(x\right) & = & \frac{1}{\pi}\arctan\left[\frac{1+c}{1-c}\tan\left(\frac{x}{2}\right)\right]\\ -r_{c}\left(q\right) & = & 2\arctan\left[\frac{1-c}{1+c}\tan\left(\pi q\right)\right]\\ -F\left(x;c\right) & = & \left\{ \begin{array}{ccc} -g_{c}\left(x\right) & & 0\leq x<\pi\\ -1-g_{c}\left(2\pi-x\right) & & \pi\leq x\leq2\pi\end{array}\right.\\ -G\left(q;c\right) & = & \left\{ \begin{array}{ccc} -r_{c}\left(q\right) & & 0\leq q<\frac{1}{2}\\ -2\pi-r_{c}\left(1-q\right) & & \frac{1}{2}\leq q\leq1\end{array}\right.\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -\] - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=\log\left(2\pi\left(1-c^{2}\right)\right).\] - -\end_inset - - -\end_layout - -\end_body -\end_document diff --git a/scipy-0.10.1/doc/source/tutorial/stats/continuous.rst b/scipy-0.10.1/doc/source/tutorial/stats/continuous.rst deleted file mode 100644 index 03618434fa..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/stats/continuous.rst +++ /dev/null @@ -1,2564 +0,0 @@ -.. _continuous-random-variables: - -==================================== -Continuous Statistical Distributions -==================================== - -Overview -======== - -All distributions will have location (L) and Scale (S) parameters -along with any shape parameters needed, the names for the shape -parameters will vary. Standard form for the distributions will be -given where :math:`L=0.0` and :math:`S=1.0.` The nonstandard forms can be obtained for the various functions using -(note :math:`U` is a standard uniform random variate). - - -====================================== ============================================================================================================================== ========================================================================================================================================= -Function Name Standard Function Transformation -====================================== ============================================================================================================================== ========================================================================================================================================= -Cumulative Distribution Function (CDF) :math:`F\left(x\right)` :math:`F\left(x;L,S\right)=F\left(\frac{\left(x-L\right)}{S}\right)` -Probability Density Function (PDF) :math:`f\left(x\right)=F^{\prime}\left(x\right)` :math:`f\left(x;L,S\right)=\frac{1}{S}f\left(\frac{\left(x-L\right)}{S}\right)` -Percent Point Function (PPF) :math:`G\left(q\right)=F^{-1}\left(q\right)` :math:`G\left(q;L,S\right)=L+SG\left(q\right)` -Probability Sparsity Function (PSF) :math:`g\left(q\right)=G^{\prime}\left(q\right)` :math:`g\left(q;L,S\right)=Sg\left(q\right)` -Hazard Function (HF) :math:`h_{a}\left(x\right)=\frac{f\left(x\right)}{1-F\left(x\right)}` :math:`h_{a}\left(x;L,S\right)=\frac{1}{S}h_{a}\left(\frac{\left(x-L\right)}{S}\right)` -Cumulative Hazard Functon (CHF) :math:`H_{a}\left(x\right)=` :math:`\log\frac{1}{1-F\left(x\right)}` :math:`H_{a}\left(x;L,S\right)=H_{a}\left(\frac{\left(x-L\right)}{S}\right)` -Survival Function (SF) :math:`S\left(x\right)=1-F\left(x\right)` :math:`S\left(x;L,S\right)=S\left(\frac{\left(x-L\right)}{S}\right)` -Inverse Survival Function (ISF) :math:`Z\left(\alpha\right)=S^{-1}\left(\alpha\right)=G\left(1-\alpha\right)` :math:`Z\left(\alpha;L,S\right)=L+SZ\left(\alpha\right)` -Moment Generating Function (MGF) :math:`M_{Y}\left(t\right)=E\left[e^{Yt}\right]` :math:`M_{X}\left(t\right)=e^{Lt}M_{Y}\left(St\right)` -Random Variates :math:`Y=G\left(U\right)` :math:`X=L+SY` -(Differential) Entropy :math:`h\left[Y\right]=-\int f\left(y\right)\log f\left(y\right)dy` :math:`h\left[X\right]=h\left[Y\right]+\log S` -(Non-central) Moments :math:`\mu_{n}^{\prime}=E\left[Y^{n}\right]` :math:`E\left[X^{n}\right]=L^{n}\sum_{k=0}^{N}\left(\begin{array}{c} n\\ k\end{array}\right)\left(\frac{S}{L}\right)^{k}\mu_{k}^{\prime}` -Central Moments :math:`\mu_{n}=E\left[\left(Y-\mu\right)^{n}\right]` :math:`E\left[\left(X-\mu_{X}\right)^{n}\right]=S^{n}\mu_{n}` -mean (mode, median), var :math:`\mu,\,\mu_{2}` :math:`L+S\mu,\, S^{2}\mu_{2}` -skewness, kurtosis :math:`\gamma_{1}=\frac{\mu_{3}}{\left(\mu_{2}\right)^{3/2}},\,` :math:`\gamma_{2}=\frac{\mu_{4}}{\left(\mu_{2}\right)^{2}}-3` :math:`\gamma_{1},\,\gamma_{2}` -====================================== ============================================================================================================================== ========================================================================================================================================= - - - - - - -Moments -------- - -Non-central moments are defined using the PDF - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\int_{-\infty}^{\infty}x^{n}f\left(x\right)dx.\] - -Note, that these can always be computed using the PPF. Substitute :math:`x=G\left(q\right)` in the above equation and get - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\int_{0}^{1}G^{n}\left(q\right)dq\] - -which may be easier to compute numerically. Note that :math:`q=F\left(x\right)` so that :math:`dq=f\left(x\right)dx.` Central moments are computed similarly :math:`\mu=\mu_{1}^{\prime}` - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu_{n} & = & \int_{-\infty}^{\infty}\left(x-\mu\right)^{n}f\left(x\right)dx\\ & = & \int_{0}^{1}\left(G\left(q\right)-\mu\right)^{n}dq\\ & = & \sum_{k=0}^{n}\left(\begin{array}{c} n\\ k\end{array}\right)\left(-\mu\right)^{k}\mu_{n-k}^{\prime}\end{eqnarray*} - -In particular - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu_{3} & = & \mu_{3}^{\prime}-3\mu\mu_{2}^{\prime}+2\mu^{3}\\ & = & \mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}\\ \mu_{4} & = & \mu_{4}^{\prime}-4\mu\mu_{3}^{\prime}+6\mu^{2}\mu_{2}^{\prime}-3\mu^{4}\\ & = & \mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}\end{eqnarray*} - -Skewness is defined as - -.. math:: - :nowrap: - - \[ \gamma_{1}=\sqrt{\beta_{1}}=\frac{\mu_{3}}{\mu_{2}^{3/2}}\] - -while (Fisher) kurtosis is - -.. math:: - :nowrap: - - \[ \gamma_{2}=\frac{\mu_{4}}{\mu_{2}^{2}}-3,\] - -so that a normal distribution has a kurtosis of zero. - - -Median and mode ---------------- - -The median, :math:`m_{n}` is defined as the point at which half of the density is on one side -and half on the other. In other words, :math:`F\left(m_{n}\right)=\frac{1}{2}` so that - -.. math:: - :nowrap: - - \[ m_{n}=G\left(\frac{1}{2}\right).\] - -In addition, the mode, :math:`m_{d}` , is defined as the value for which the probability density function -reaches it's peak - -.. math:: - :nowrap: - - \[ m_{d}=\arg\max_{x}f\left(x\right).\] - - - - -Fitting data ------------- - -To fit data to a distribution, maximizing the likelihood function is -common. Alternatively, some distributions have well-known minimum -variance unbiased estimators. These will be chosen by default, but the -likelihood function will always be available for minimizing. - -If :math:`f\left(x;\boldsymbol{\theta}\right)` is the PDF of a random-variable where :math:`\boldsymbol{\theta}` is a vector of parameters ( *e.g.* :math:`L` and :math:`S` ), then for a collection of :math:`N` independent samples from this distribution, the joint distribution the -random vector :math:`\mathbf{x}` is - -.. math:: - :nowrap: - - \[ f\left(\mathbf{x};\boldsymbol{\theta}\right)=\prod_{i=1}^{N}f\left(x_{i};\boldsymbol{\theta}\right).\] - -The maximum likelihood estimate of the parameters :math:`\boldsymbol{\theta}` are the parameters which maximize this function with :math:`\mathbf{x}` fixed and given by the data: - -.. math:: - :nowrap: - - \begin{eqnarray*} \boldsymbol{\theta}_{es} & = & \arg\max_{\boldsymbol{\theta}}f\left(\mathbf{x};\boldsymbol{\theta}\right)\\ & = & \arg\min_{\boldsymbol{\theta}}l_{\mathbf{x}}\left(\boldsymbol{\theta}\right).\end{eqnarray*} - -Where - -.. math:: - :nowrap: - - \begin{eqnarray*} l_{\mathbf{x}}\left(\boldsymbol{\theta}\right) & = & -\sum_{i=1}^{N}\log f\left(x_{i};\boldsymbol{\theta}\right)\\ & = & -N\overline{\log f\left(x_{i};\boldsymbol{\theta}\right)}\end{eqnarray*} - -Note that if :math:`\boldsymbol{\theta}` includes only shape parameters, the location and scale-parameters can -be fit by replacing :math:`x_{i}` with :math:`\left(x_{i}-L\right)/S` in the log-likelihood function adding :math:`N\log S` and minimizing, thus - -.. math:: - :nowrap: - - \begin{eqnarray*} l_{\mathbf{x}}\left(L,S;\boldsymbol{\theta}\right) & = & N\log S-\sum_{i=1}^{N}\log f\left(\frac{x_{i}-L}{S};\boldsymbol{\theta}\right)\\ & = & N\log S+l_{\frac{\mathbf{x}-S}{L}}\left(\boldsymbol{\theta}\right)\end{eqnarray*} - -If desired, sample estimates for :math:`L` and :math:`S` (not necessarily maximum likelihood estimates) can be obtained from -samples estimates of the mean and variance using - -.. math:: - :nowrap: - - \begin{eqnarray*} \hat{S} & = & \sqrt{\frac{\hat{\mu}_{2}}{\mu_{2}}}\\ \hat{L} & = & \hat{\mu}-\hat{S}\mu\end{eqnarray*} - -where :math:`\mu` and :math:`\mu_{2}` are assumed known as the mean and variance of the **untransformed** distribution (when :math:`L=0` and :math:`S=1` ) and - -.. math:: - :nowrap: - - \begin{eqnarray*} \hat{\mu} & = & \frac{1}{N}\sum_{i=1}^{N}x_{i}=\bar{\mathbf{x}}\\ \hat{\mu}_{2} & = & \frac{1}{N-1}\sum_{i=1}^{N}\left(x_{i}-\hat{\mu}\right)^{2}=\frac{N}{N-1}\overline{\left(\mathbf{x}-\bar{\mathbf{x}}\right)^{2}}\end{eqnarray*} - - - - -Standard notation for mean --------------------------- - -We will use - -.. math:: - :nowrap: - - \[ \overline{y\left(\mathbf{x}\right)}=\frac{1}{N}\sum_{i=1}^{N}y\left(x_{i}\right)\] - -where :math:`N` should be clear from context as the number of samples :math:`x_{i}` - - -Alpha -===== - -One shape parameters :math:`\alpha>0` (paramter :math:`\beta` in DATAPLOT is a scale-parameter). Standard form is :math:`x>0:` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\alpha\right) & = & \frac{1}{x^{2}\Phi\left(\alpha\right)\sqrt{2\pi}}\exp\left(-\frac{1}{2}\left(\alpha-\frac{1}{x}\right)^{2}\right)\\ F\left(x;\alpha\right) & = & \frac{\Phi\left(\alpha-\frac{1}{x}\right)}{\Phi\left(\alpha\right)}\\ G\left(q;\alpha\right) & = & \left[\alpha-\Phi^{-1}\left(q\Phi\left(\alpha\right)\right)\right]^{-1}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\frac{1}{\Phi\left(a\right)\sqrt{2\pi}}\int_{0}^{\infty}\frac{e^{xt}}{x^{2}}\exp\left(-\frac{1}{2}\left(\alpha-\frac{1}{x}\right)^{2}\right)dx\] - - - -No moments? - -.. math:: - :nowrap: - - \[ l_{\mathbf{x}}\left(\alpha\right)=N\log\left[\Phi\left(\alpha\right)\sqrt{2\pi}\right]+2N\overline{\log\mathbf{x}}+\frac{N}{2}\alpha^{2}-\alpha\overline{\mathbf{x}^{-1}}+\frac{1}{2}\overline{\mathbf{x}^{-2}}\] - - - - -Anglit -====== - -Defined over :math:`x\in\left[-\frac{\pi}{4},\frac{\pi}{4}\right]` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \sin\left(2x+\frac{\pi}{2}\right)=\cos\left(2x\right)\\ F\left(x\right) & = & \sin^{2}\left(x+\frac{\pi}{4}\right)\\ G\left(q\right) & = & \arcsin\left(\sqrt{q}\right)-\frac{\pi}{4}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & 0\\ \mu_{2} & = & \frac{\pi^{2}}{16}-\frac{1}{2}\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & -2\frac{\pi^{4}-96}{\left(\pi^{2}-8\right)^{2}}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & 1-\log2\\ & \approx & 0.30685281944005469058\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} M\left(t\right) & = & \int_{-\frac{\pi}{4}}^{\frac{\pi}{4}}\cos\left(2x\right)e^{xt}dx\\ & = & \frac{4\cosh\left(\frac{\pi t}{4}\right)}{t^{2}+4}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ l_{\mathbf{x}}\left(\cdot\right)=-N\overline{\log\left[\cos\left(2\mathbf{x}\right)\right]}\] - - - - -Arcsine -======= - -Defined over :math:`x\in\left(0,1\right)` . To get the JKB definition put :math:`x=\frac{u+1}{2}.` i.e. :math:`L=-1` and :math:`S=2.` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{1}{\pi\sqrt{x\left(1-x\right)}}\\ F\left(x\right) & = & \frac{2}{\pi}\arcsin\left(\sqrt{x}\right)\\ G\left(q\right) & = & \sin^{2}\left(\frac{\pi}{2}q\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=E^{t/2}I_{0}\left(\frac{t}{2}\right)\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu_{n}^{\prime} & = & \frac{1}{\pi}\int_{0}^{1}dx\, x^{n-1/2}\left(1-x\right)^{-1/2}\\ & = & \frac{1}{\pi}B\left(\frac{1}{2},n+\frac{1}{2}\right)=\frac{\left(2n-1\right)!!}{2^{n}n!}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{1}{2}\\ \mu_{2} & = & \frac{1}{8}\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & -\frac{3}{2}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]\approx-0.24156447527049044468\] - - - - - -.. math:: - :nowrap: - - \[ l_{\mathbf{x}}\left(\cdot\right)=N\log\pi+\frac{N}{2}\overline{\log\mathbf{x}}+\frac{N}{2}\overline{\log\left(1-\mathbf{x}\right)}\] - - - - -Beta -==== - -Two shape parameters - - - -.. math:: - :nowrap: - - \[ a,b>0\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a,b\right) & = & \frac{\Gamma\left(a+b\right)}{\Gamma\left(a\right)\Gamma\left(b\right)}x^{a-1}\left(1-x\right)^{b-1}I_{\left(0,1\right)}\left(x\right)\\ F\left(x;a,b\right) & = & \int_{0}^{x}f\left(y;a,b\right)dy=I\left(x,a,b\right)\\ G\left(\alpha;a,b\right) & = & I^{-1}\left(\alpha;a,b\right)\\ M\left(t\right) & = & \frac{\Gamma\left(a\right)\Gamma\left(b\right)}{\Gamma\left(a+b\right)}\,_{1}F_{1}\left(a;a+b;t\right)\\ \mu & = & \frac{a}{a+b}\\ \mu_{2} & = & \frac{ab\left(a+b+1\right)}{\left(a+b\right)^{2}}\\ \gamma_{1} & = & 2\frac{b-a}{a+b+2}\sqrt{\frac{a+b+1}{ab}}\\ \gamma_{2} & = & \frac{6\left(a^{3}+a^{2}\left(1-2b\right)+b^{2}\left(b+1\right)-2ab\left(b+2\right)\right)}{ab\left(a+b+2\right)\left(a+b+3\right)}\\ m_{d} & = & \frac{\left(a-1\right)}{\left(a+b-2\right)}\, a+b\neq2\end{eqnarray*} - - - -:math:`f\left(x;a,1\right)` is also called the Power-function distribution. - - - -.. math:: - :nowrap: - - \[ l_{\mathbf{x}}\left(a,b\right)=-N\log\Gamma\left(a+b\right)+N\log\Gamma\left(a\right)+N\log\Gamma\left(b\right)-N\left(a-1\right)\overline{\log\mathbf{x}}-N\left(b-1\right)\overline{\log\left(1-\mathbf{x}\right)}\] - -All of the :math:`x_{i}\in\left[0,1\right]` - - -Beta Prime -========== - -Defined over :math:`00.` (Note the CDF evaluation uses Eq. 3.194.1 on pg. 313 of Gradshteyn & -Ryzhik (sixth edition). - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\alpha,\beta\right) & = & \frac{\Gamma\left(\alpha+\beta\right)}{\Gamma\left(\alpha\right)\Gamma\left(\beta\right)}x^{\alpha-1}\left(1+x\right)^{-\alpha-\beta}\\ F\left(x;\alpha,\beta\right) & = & \frac{\Gamma\left(\alpha+\beta\right)}{\alpha\Gamma\left(\alpha\right)\Gamma\left(\beta\right)}x^{\alpha}\,_{2}F_{1}\left(\alpha+\beta,\alpha;1+\alpha;-x\right)\\ G\left(q;\alpha,\beta\right) & = & F^{-1}\left(x;\alpha,\beta\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\left\{ \begin{array}{ccc} \frac{\Gamma\left(n+\alpha\right)\Gamma\left(\beta-n\right)}{\Gamma\left(\alpha\right)\Gamma\left(\beta\right)}=\frac{\left(\alpha\right)_{n}}{\left(\beta-n\right)_{n}} & & \beta>n\\ \infty & & \textrm{otherwise}\end{array}\right.\] - -Therefore, - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{\alpha}{\beta-1}\quad\beta>1\\ \mu_{2} & = & \frac{\alpha\left(\alpha+1\right)}{\left(\beta-2\right)\left(\beta-1\right)}-\frac{\alpha^{2}}{\left(\beta-1\right)^{2}}\quad\beta>2\\ \gamma_{1} & = & \frac{\frac{\alpha\left(\alpha+1\right)\left(\alpha+2\right)}{\left(\beta-3\right)\left(\beta-2\right)\left(\beta-1\right)}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\quad\beta>3\\ \gamma_{2} & = & \frac{\mu_{4}}{\mu_{2}^{2}}-3\\ \mu_{4} & = & \frac{\alpha\left(\alpha+1\right)\left(\alpha+2\right)\left(\alpha+3\right)}{\left(\beta-4\right)\left(\beta-3\right)\left(\beta-2\right)\left(\beta-1\right)}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}\quad\beta>4\end{eqnarray*} - - - - -Bradford -======== - - - -.. math:: - :nowrap: - - \begin{eqnarray*} c & > & 0\\ k & = & \log\left(1+c\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{c}{k\left(1+cx\right)}I_{\left(0,1\right)}\left(x\right)\\ F\left(x;c\right) & = & \frac{\log\left(1+cx\right)}{k}\\ G\left(\alpha\; c\right) & = & \frac{\left(1+c\right)^{\alpha}-1}{c}\\ M\left(t\right) & = & \frac{1}{k}e^{-t/c}\left[\textrm{Ei}\left(t+\frac{t}{c}\right)-\textrm{Ei}\left(\frac{t}{c}\right)\right]\\ \mu & = & \frac{c-k}{ck}\\ \mu_{2} & = & \frac{\left(c+2\right)k-2c}{2ck^{2}}\\ \gamma_{1} & = & \frac{\sqrt{2}\left(12c^{2}-9kc\left(c+2\right)+2k^{2}\left(c\left(c+3\right)+3\right)\right)}{\sqrt{c\left(c\left(k-2\right)+2k\right)}\left(3c\left(k-2\right)+6k\right)}\\ \gamma_{2} & = & \frac{c^{3}\left(k-3\right)\left(k\left(3k-16\right)+24\right)+12kc^{2}\left(k-4\right)\left(k-3\right)+6ck^{2}\left(3k-14\right)+12k^{3}}{3c\left(c\left(k-2\right)+2k\right)^{2}}\\ m_{d} & = & 0\\ m_{n} & = & \sqrt{1+c}-1\end{eqnarray*} - -where :math:`\textrm{Ei}\left(\textrm{z}\right)` is the exponential integral function. Also - -.. math:: - :nowrap: - - \[ h\left[X\right]=\frac{1}{2}\log\left(1+c\right)-\log\left(\frac{c}{\log\left(1+c\right)}\right)\] - - - - -Burr -==== - - - -.. math:: - :nowrap: - - \begin{eqnarray*} c & > & 0\\ d & > & 0\\ k & = & \Gamma\left(d\right)\Gamma\left(1-\frac{2}{c}\right)\Gamma\left(\frac{2}{c}+d\right)-\Gamma^{2}\left(1-\frac{1}{c}\right)\Gamma^{2}\left(\frac{1}{c}+d\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c,d\right) & = & \frac{cd}{x^{c+1}\left(1+x^{-c}\right)^{d+1}}I_{\left(0,\infty\right)}\left(x\right)\\ F\left(x;c,d\right) & = & \left(1+x^{-c}\right)^{-d}\\ G\left(\alpha;c,d\right) & = & \left(\alpha^{-1/d}-1\right)^{-1/c}\\ \mu & = & \frac{\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+d\right)}{\Gamma\left(d\right)}\\ \mu_{2} & = & \frac{k}{\Gamma^{2}\left(d\right)}\\ \gamma_{1} & = & \frac{1}{\sqrt{k^{3}}}\left[2\Gamma^{3}\left(1-\frac{1}{c}\right)\Gamma^{3}\left(\frac{1}{c}+d\right)+\Gamma^{2}\left(d\right)\Gamma\left(1-\frac{3}{c}\right)\Gamma\left(\frac{3}{c}+d\right)\right.\\ & & \left.-3\Gamma\left(d\right)\Gamma\left(1-\frac{2}{c}\right)\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+d\right)\Gamma\left(\frac{2}{c}+d\right)\right]\\ \gamma_{2} & = & -3+\frac{1}{k^{2}}\left[6\Gamma\left(d\right)\Gamma\left(1-\frac{2}{c}\right)\Gamma^{2}\left(1-\frac{1}{c}\right)\Gamma^{2}\left(\frac{1}{c}+d\right)\Gamma\left(\frac{2}{c}+d\right)\right.\\ & & -3\Gamma^{4}\left(1-\frac{1}{c}\right)\Gamma^{4}\left(\frac{1}{c}+d\right)+\Gamma^{3}\left(d\right)\Gamma\left(1-\frac{4}{c}\right)\Gamma\left(\frac{4}{c}+d\right)\\ & & \left.-4\Gamma^{2}\left(d\right)\Gamma\left(1-\frac{3}{c}\right)\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+d\right)\Gamma\left(\frac{3}{c}+d\right)\right]\\ m_{d} & = & \left(\frac{cd-1}{c+1}\right)^{1/c}\,\textrm{if }cd>1\,\textrm{otherwise }0\\ m_{n} & = & \left(2^{1/d}-1\right)^{-1/c}\end{eqnarray*} - - - - -Cauchy -====== - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{1}{\pi\left(1+x^{2}\right)}\\ F\left(x\right) & = & \frac{1}{2}+\frac{1}{\pi}\tan^{-1}x\\ G\left(\alpha\right) & = & \tan\left(\pi\alpha-\frac{\pi}{2}\right)\\ m_{d} & = & 0\\ m_{n} & = & 0\end{eqnarray*} - -No finite moments. This is the t distribution with one degree of -freedom. - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & \log\left(4\pi\right)\\ & \approx & 2.5310242469692907930.\end{eqnarray*} - - - - -Chi -=== - -Generated by taking the (positive) square-root of chi-squared -variates. - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\nu\right) & = & \frac{x^{\nu-1}e^{-x^{2}/2}}{2^{\nu/2-1}\Gamma\left(\frac{\nu}{2}\right)}I_{\left(0,\infty\right)}\left(x\right)\\ F\left(x;\nu\right) & = & \Gamma\left(\frac{\nu}{2},\frac{x^{2}}{2}\right)\\ G\left(\alpha;\nu\right) & = & \sqrt{2\Gamma^{-1}\left(\frac{\nu}{2},\alpha\right)}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\Gamma\left(\frac{v}{2}\right)\,_{1}F_{1}\left(\frac{v}{2};\frac{1}{2};\frac{t^{2}}{2}\right)+\frac{t}{\sqrt{2}}\Gamma\left(\frac{1+\nu}{2}\right)\,_{1}F_{1}\left(\frac{1+\nu}{2};\frac{3}{2};\frac{t^{2}}{2}\right)\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{\sqrt{2}\Gamma\left(\frac{\nu+1}{2}\right)}{\Gamma\left(\frac{\nu}{2}\right)}\\ \mu_{2} & = & \nu-\mu^{2}\\ \gamma_{1} & = & \frac{2\mu^{3}+\mu\left(1-2\nu\right)}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{2\nu\left(1-\nu\right)-6\mu^{4}+4\mu^{2}\left(2\nu-1\right)}{\mu_{2}^{2}}\\ m_{d} & = & \sqrt{\nu-1}\quad\nu\geq1\\ m_{n} & = & \sqrt{2\Gamma^{-1}\left(\frac{\nu}{2},\frac{1}{2}\right)}\end{eqnarray*} - - - - -Chi-squared -=========== - -This is the gamma distribution with :math:`L=0.0` and :math:`S=2.0` and :math:`\alpha=\nu/2` where :math:`\nu` is called the degrees of freedom. If :math:`Z_{1}\ldots Z_{\nu}` are all standard normal distributions, then :math:`W=\sum_{k}Z_{k}^{2}` has (standard) chi-square distribution with :math:`\nu` degrees of freedom. - -The standard form (most often used in standard form only) is :math:`x>0` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\alpha\right) & = & \frac{1}{2\Gamma\left(\frac{\nu}{2}\right)}\left(\frac{x}{2}\right)^{\nu/2-1}e^{-x/2}\\ F\left(x;\alpha\right) & = & \Gamma\left(\frac{\nu}{2},\frac{x}{2}\right)\\ G\left(q;\alpha\right) & = & 2\Gamma^{-1}\left(\frac{\nu}{2},q\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\frac{\Gamma\left(\frac{\nu}{2}\right)}{\left(\frac{1}{2}-t\right)^{\nu/2}}\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \nu\\ \mu_{2} & = & 2\nu\\ \gamma_{1} & = & \frac{2\sqrt{2}}{\sqrt{\nu}}\\ \gamma_{2} & = & \frac{12}{\nu}\\ m_{d} & = & \frac{\nu}{2}-1\end{eqnarray*} - - - - -Cosine -====== - -Approximation to the normal distribution. - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{1}{2\pi}\left[1+\cos x\right]I_{\left[-\pi,\pi\right]}\left(x\right)\\ F\left(x\right) & = & \frac{1}{2\pi}\left[\pi+x+\sin x\right]I_{\left[-\pi,\pi\right]}\left(x\right)+I_{\left(\pi,\infty\right)}\left(x\right)\\ G\left(\alpha\right) & = & F^{-1}\left(\alpha\right)\\ M\left(t\right) & = & \frac{\sinh\left(\pi t\right)}{\pi t\left(1+t^{2}\right)}\\ \mu=m_{d}=m_{n} & = & 0\\ \mu_{2} & = & \frac{\pi^{2}}{3}-2\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & \frac{-6\left(\pi^{4}-90\right)}{5\left(\pi^{2}-6\right)^{2}}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & \log\left(4\pi\right)-1\\ & \approx & 1.5310242469692907930.\end{eqnarray*} - - - - -Double Gamma -============ - -The double gamma is the signed version of the Gamma distribution. For :math:`\alpha>0:` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\alpha\right) & = & \frac{1}{2\Gamma\left(\alpha\right)}\left|x\right|^{\alpha-1}e^{-\left|x\right|}\\ F\left(x;\alpha\right) & = & \left\{ \begin{array}{ccc} \frac{1}{2}-\frac{1}{2}\Gamma\left(\alpha,\left|x\right|\right) & & x\leq0\\ \frac{1}{2}+\frac{1}{2}\Gamma\left(\alpha,\left|x\right|\right) & & x>0\end{array}\right.\\ G\left(q;\alpha\right) & = & \left\{ \begin{array}{ccc} -\Gamma^{-1}\left(\alpha,\left|2q-1\right|\right) & & q\leq\frac{1}{2}\\ \Gamma^{-1}\left(\alpha,\left|2q-1\right|\right) & & q>\frac{1}{2}\end{array}\right.\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\frac{1}{2\left(1-t\right)^{a}}+\frac{1}{2\left(1+t\right)^{a}}\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu=m_{n} & = & 0\\ \mu_{2} & = & \alpha\left(\alpha+1\right)\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & \frac{\left(\alpha+2\right)\left(\alpha+3\right)}{\alpha\left(\alpha+1\right)}-3\\ m_{d} & = & \textrm{NA}\end{eqnarray*} - - - - -Doubly Non-central F* -===================== - - -Doubly Non-central t* -===================== - - -Double Weibull -============== - -This is a signed form of the Weibull distribution. - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{c}{2}\left|x\right|^{c-1}\exp\left(-\left|x\right|^{c}\right)\\ F\left(x;c\right) & = & \left\{ \begin{array}{ccc} \frac{1}{2}\exp\left(-\left|x\right|^{c}\right) & & x\leq0\\ 1-\frac{1}{2}\exp\left(-\left|x\right|^{c}\right) & & x>0\end{array}\right.\\ G\left(q;c\right) & = & \left\{ \begin{array}{ccc} -\log^{1/c}\left(\frac{1}{2q}\right) & & q\leq\frac{1}{2}\\ \log^{1/c}\left(\frac{1}{2q-1}\right) & & q>\frac{1}{2}\end{array}\right.\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\mu_{n}=\begin{cases} \Gamma\left(1+\frac{n}{c}\right) & n\textrm{ even}\\ 0 & n\textrm{ odd}\end{cases}\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} m_{d}=\mu & = & 0\\ \mu_{2} & = & \Gamma\left(\frac{c+2}{c}\right)\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & \frac{\Gamma\left(1+\frac{4}{c}\right)}{\Gamma^{2}\left(1+\frac{2}{c}\right)}\\ m_{d} & = & \textrm{NA bimodal}\end{eqnarray*} - - - - -Erlang -====== - -This is just the Gamma distribution with shape parameter :math:`\alpha=n` an integer. - - -Exponential -=========== - -This is a special case of the Gamma (and Erlang) distributions with -shape parameter :math:`\left(\alpha=1\right)` and the same location and scale parameters. The standard form is -therefore ( :math:`x\geq0` ) - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & e^{-x}\\ F\left(x\right) & = & \Gamma\left(1,x\right)=1-e^{-x}\\ G\left(q\right) & = & -\log\left(1-q\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=n!\] - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\frac{1}{1-t}\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & 1\\ \mu_{2} & = & 1\\ \gamma_{1} & = & 2\\ \gamma_{2} & = & 6\\ m_{d} & = & 0\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=1.\] - - - - -Exponentiated Weibull -===================== - -Two positive shape parameters :math:`a` and :math:`c` and :math:`x\in\left(0,\infty\right)` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a,c\right) & = & ac\left[1-\exp\left(-x^{c}\right)\right]^{a-1}\exp\left(-x^{c}\right)x^{c-1}\\ F\left(x;a,c\right) & = & \left[1-\exp\left(-x^{c}\right)\right]^{a}\\ G\left(q;a,c\right) & = & \left[-\log\left(1-q^{1/a}\right)\right]^{1/c}\end{eqnarray*} - - - - -Exponential Power -================= - -One positive shape parameter :math:`b` . Defined for :math:`x\geq0.` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;b\right) & = & ebx^{b-1}\exp\left[x^{b}-e^{x^{b}}\right]\\ F\left(x;b\right) & = & 1-\exp\left[1-e^{x^{b}}\right]\\ G\left(q;b\right) & = & \log^{1/b}\left[1-\log\left(1-q\right)\right]\end{eqnarray*} - - - - -Fatigue Life (Birnbaum-Sanders) -=============================== - -This distribution's pdf is the average of the inverse-Gaussian :math:`\left(\mu=1\right)` and reciprocal inverse-Gaussian pdf :math:`\left(\mu=1\right)` . We follow the notation of JKB here with :math:`\beta=S.` for :math:`x>0` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{x+1}{2c\sqrt{2\pi x^{3}}}\exp\left(-\frac{\left(x-1\right)^{2}}{2xc^{2}}\right)\\ F\left(x;c\right) & = & \Phi\left(\frac{1}{c}\left(\sqrt{x}-\frac{1}{\sqrt{x}}\right)\right)\\ G\left(q;c\right) & = & \frac{1}{4}\left[c\Phi^{-1}\left(q\right)+\sqrt{c^{2}\left(\Phi^{-1}\left(q\right)\right)^{2}+4}\right]^{2}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=c\sqrt{2\pi}\exp\left[\frac{1}{c^{2}}\left(1-\sqrt{1-2c^{2}t}\right)\right]\left(1+\frac{1}{\sqrt{1-2c^{2}t}}\right)\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{c^{2}}{2}+1\\ \mu_{2} & = & c^{2}\left(\frac{5}{4}c^{2}+1\right)\\ \gamma_{1} & = & \frac{4c\sqrt{11c^{2}+6}}{\left(5c^{2}+4\right)^{3/2}}\\ \gamma_{2} & = & \frac{6c^{2}\left(93c^{2}+41\right)}{\left(5c^{2}+4\right)^{2}}\end{eqnarray*} - - - - -Fisk (Log Logistic) -=================== - -Special case of the Burr distribution with :math:`d=1` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} c & > & 0\\ k & = & \Gamma\left(1-\frac{2}{c}\right)\Gamma\left(\frac{2}{c}+1\right)-\Gamma^{2}\left(1-\frac{1}{c}\right)\Gamma^{2}\left(\frac{1}{c}+1\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c,d\right) & = & \frac{cx^{c-1}}{\left(1+x^{c}\right)^{2}}I_{\left(0,\infty\right)}\left(x\right)\\ F\left(x;c,d\right) & = & \left(1+x^{-c}\right)^{-1}\\ G\left(\alpha;c,d\right) & = & \left(\alpha^{-1}-1\right)^{-1/c}\\ \mu & = & \Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+1\right)\\ \mu_{2} & = & k\\ \gamma_{1} & = & \frac{1}{\sqrt{k^{3}}}\left[2\Gamma^{3}\left(1-\frac{1}{c}\right)\Gamma^{3}\left(\frac{1}{c}+1\right)+\Gamma\left(1-\frac{3}{c}\right)\Gamma\left(\frac{3}{c}+1\right)\right.\\ & & \left.-3\Gamma\left(1-\frac{2}{c}\right)\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+1\right)\Gamma\left(\frac{2}{c}+1\right)\right]\\ \gamma_{2} & = & -3+\frac{1}{k^{2}}\left[6\Gamma\left(1-\frac{2}{c}\right)\Gamma^{2}\left(1-\frac{1}{c}\right)\Gamma^{2}\left(\frac{1}{c}+1\right)\Gamma\left(\frac{2}{c}+1\right)\right.\\ & & -3\Gamma^{4}\left(1-\frac{1}{c}\right)\Gamma^{4}\left(\frac{1}{c}+1\right)+\Gamma\left(1-\frac{4}{c}\right)\Gamma\left(\frac{4}{c}+1\right)\\ & & \left.-4\Gamma\left(1-\frac{3}{c}\right)\Gamma\left(1-\frac{1}{c}\right)\Gamma\left(\frac{1}{c}+1\right)\Gamma\left(\frac{3}{c}+1\right)\right]\\ m_{d} & = & \left(\frac{c-1}{c+1}\right)^{1/c}\,\textrm{if }c>1\,\textrm{otherwise }0\\ m_{n} & = & 1\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=2-\log c.\] - - - - -Folded Cauchy -============= - -This formula can be expressed in terms of the standard formulas for -the Cauchy distribution (call the cdf :math:`C\left(x\right)` and the pdf :math:`d\left(x\right)` ). if :math:`Y` is cauchy then :math:`\left|Y\right|` is folded cauchy. Note that :math:`x\geq0.` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{1}{\pi\left(1+\left(x-c\right)^{2}\right)}+\frac{1}{\pi\left(1+\left(x+c\right)^{2}\right)}\\ F\left(x;c\right) & = & \frac{1}{\pi}\tan^{-1}\left(x-c\right)+\frac{1}{\pi}\tan^{-1}\left(x+c\right)\\ G\left(q;c\right) & = & F^{-1}\left(x;c\right)\end{eqnarray*} - - - -No moments - - -Folded Normal -============= - -If :math:`Z` is Normal with mean :math:`L` and :math:`\sigma=S` , then :math:`\left|Z\right|` is a folded normal with shape parameter :math:`c=\left|L\right|/S` , location parameter :math:`0` and scale parameter :math:`S` . This is a special case of the non-central chi distribution with one- -degree of freedom and non-centrality parameter :math:`c^{2}.` Note that :math:`c\geq0` . The standard form of the folded normal is - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \sqrt{\frac{2}{\pi}}\cosh\left(cx\right)\exp\left(-\frac{x^{2}+c^{2}}{2}\right)\\ F\left(x;c\right) & = & \Phi\left(x-c\right)-\Phi\left(-x-c\right)=\Phi\left(x-c\right)+\Phi\left(x+c\right)-1\\ G\left(\alpha;c\right) & = & F^{-1}\left(x;c\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\exp\left[\frac{t}{2}\left(t-2c\right)\right]\left(1+e^{2ct}\right)\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} k & = & \textrm{erf}\left(\frac{c}{\sqrt{2}}\right)\\ p & = & \exp\left(-\frac{c^{2}}{2}\right)\\ \mu & = & \sqrt{\frac{2}{\pi}}p+ck\\ \mu_{2} & = & c^{2}+1-\mu^{2}\\ \gamma_{1} & = & \frac{\sqrt{\frac{2}{\pi}}p^{3}\left(4-\frac{\pi}{p^{2}}\left(2c^{2}+1\right)\right)+2ck\left(6p^{2}+3cpk\sqrt{2\pi}+\pi c\left(k^{2}-1\right)\right)}{\pi\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{c^{4}+6c^{2}+3+6\left(c^{2}+1\right)\mu^{2}-3\mu^{4}-4p\mu\left(\sqrt{\frac{2}{\pi}}\left(c^{2}+2\right)+\frac{ck}{p}\left(c^{2}+3\right)\right)}{\mu_{2}^{2}}\end{eqnarray*} - - - - -Fratio (or F) -============= - -Defined for :math:`x>0` . The distribution of :math:`\left(X_{1}/X_{2}\right)\left(\nu_{2}/\nu_{1}\right)` if :math:`X_{1}` is chi-squared with :math:`v_{1}` degrees of freedom and :math:`X_{2}` is chi-squared with :math:`v_{2}` degrees of freedom. - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\nu_{1},\nu_{2}\right) & = & \frac{\nu_{2}^{\nu_{2}/2}\nu_{1}^{\nu_{1}/2}x^{\nu_{1}/2-1}}{\left(\nu_{2}+\nu_{1}x\right)^{\left(\nu_{1}+\nu_{2}\right)/2}B\left(\frac{\nu_{1}}{2},\frac{\nu_{2}}{2}\right)}\\ F\left(x;v_{1},v_{2}\right) & = & I\left(\frac{\nu_{1}}{2},\frac{\nu_{2}}{2},\frac{\nu_{2}x}{\nu_{2}+\nu_{1}x}\right)\\ G\left(q;\nu_{1},\nu_{2}\right) & = & \left[\frac{\nu_{2}}{I^{-1}\left(\nu_{1}/2,\nu_{2}/2,q\right)}-\frac{\nu_{1}}{\nu_{2}}\right]^{-1}.\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{\nu_{2}}{\nu_{2}-2}\quad\nu_{2}>2\\ \mu_{2} & = & \frac{2\nu_{2}^{2}\left(\nu_{1}+\nu_{2}-2\right)}{\nu_{1}\left(\nu_{2}-2\right)^{2}\left(\nu_{2}-4\right)}\quad v_{2}>4\\ \gamma_{1} & = & \frac{2\left(2\nu_{1}+\nu_{2}-2\right)}{\nu_{2}-6}\sqrt{\frac{2\left(\nu_{2}-4\right)}{\nu_{1}\left(\nu_{1}+\nu_{2}-2\right)}}\quad\nu_{2}>6\\ \gamma_{2} & = & \frac{3\left[8+\left(\nu_{2}-6\right)\gamma_{1}^{2}\right]}{2\nu-16}\quad\nu_{2}>8\end{eqnarray*} - - - - -Fréchet (ExtremeLB, Extreme Value II, Weibull minimum) -======================================================= - -A type of extreme-value distribution with a lower bound. Defined for :math:`x>0` and :math:`c>0` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & cx^{c-1}\exp\left(-x^{c}\right)\\ F\left(x;c\right) & = & 1-\exp\left(-x^{c}\right)\\ G\left(q;c\right) & = & \left[-\log\left(1-q\right)\right]^{1/c}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\Gamma\left(1+\frac{n}{c}\right)\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \Gamma\left(1+\frac{1}{c}\right)\\ \mu_{2} & = & \Gamma\left(1+\frac{2}{c}\right)-\Gamma^{2}\left(1-\frac{1}{c}\right)\\ \gamma_{1} & = & \frac{\Gamma\left(1+\frac{3}{c}\right)-3\Gamma\left(1+\frac{2}{c}\right)\Gamma\left(1+\frac{1}{c}\right)+2\Gamma^{3}\left(1+\frac{1}{c}\right)}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{\Gamma\left(1+\frac{4}{c}\right)-4\Gamma\left(1+\frac{1}{c}\right)\Gamma\left(1+\frac{3}{c}\right)+6\Gamma^{2}\left(1+\frac{1}{c}\right)\Gamma\left(1+\frac{2}{c}\right)-\Gamma^{4}\left(1+\frac{1}{c}\right)}{\mu_{2}^{2}}-3\\ m_{d} & = & \left(\frac{c}{1+c}\right)^{1/c}\\ m_{n} & = & G\left(\frac{1}{2};c\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=-\frac{\gamma}{c}-\log\left(c\right)+\gamma+1\] - -where :math:`\gamma` is Euler's constant and equal to - -.. math:: - :nowrap: - - \[ \gamma\approx0.57721566490153286061.\] - - - - -Fréchet (left-skewed, Extreme Value Type III, Weibull maximum) -=============================================================== - -Defined for :math:`x<0` and :math:`c>0` . - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & c\left(-x\right)^{c-1}\exp\left(-\left(-x\right)^{c}\right)\\ F\left(x;c\right) & = & \exp\left(-\left(-x\right)^{c}\right)\\ G\left(q;c\right) & = & -\left(-\log q\right)^{1/c}\end{eqnarray*} - - - -The mean is the negative of the right-skewed Frechet distribution -given above, and the other statistical parameters can be computed from - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\left(-1\right)^{n}\Gamma\left(1+\frac{n}{c}\right).\] - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=-\frac{\gamma}{c}-\log\left(c\right)+\gamma+1\] - -where :math:`\gamma` is Euler's constant and equal to - -.. math:: - :nowrap: - - \[ \gamma\approx0.57721566490153286061.\] - - - - -Gamma -===== - -The standard form for the gamma distribution is :math:`\left(\alpha>0\right)` valid for :math:`x\geq0` . - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\alpha\right) & = & \frac{1}{\Gamma\left(\alpha\right)}x^{\alpha-1}e^{-x}\\ F\left(x;\alpha\right) & = & \Gamma\left(\alpha,x\right)\\ G\left(q;\alpha\right) & = & \Gamma^{-1}\left(\alpha,q\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\frac{1}{\left(1-t\right)^{\alpha}}\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \alpha\\ \mu_{2} & = & \alpha\\ \gamma_{1} & = & \frac{2}{\sqrt{\alpha}}\\ \gamma_{2} & = & \frac{6}{\alpha}\\ m_{d} & = & \alpha-1\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\Psi\left(a\right)\left[1-a\right]+a+\log\Gamma\left(a\right)\] - -where - -.. math:: - :nowrap: - - \[ \Psi\left(a\right)=\frac{\Gamma^{\prime}\left(a\right)}{\Gamma\left(a\right)}.\] - - - - -Generalized Logistic -==================== - -Has been used in the analysis of extreme values. Has one shape -parameter :math:`c>0.` And :math:`x>0` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{c\exp\left(-x\right)}{\left[1+\exp\left(-x\right)\right]^{c+1}}\\ F\left(x;c\right) & = & \frac{1}{\left[1+\exp\left(-x\right)\right]^{c}}\\ G\left(q;c\right) & = & -\log\left(q^{-1/c}-1\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\frac{c}{1-t}\,_{2}F_{1}\left(1+c,\,1-t\,;\,2-t\,;-1\right)\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \gamma+\psi_{0}\left(c\right)\\ \mu_{2} & = & \frac{\pi^{2}}{6}+\psi_{1}\left(c\right)\\ \gamma_{1} & = & \frac{\psi_{2}\left(c\right)+2\zeta\left(3\right)}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{\left(\frac{\pi^{4}}{15}+\psi_{3}\left(c\right)\right)}{\mu_{2}^{2}}\\ m_{d} & = & \log c\\ m_{n} & = & -\log\left(2^{1/c}-1\right)\end{eqnarray*} - -Note that the polygamma function is - -.. math:: - :nowrap: - - \begin{eqnarray*} \psi_{n}\left(z\right) & = & \frac{d^{n+1}}{dz^{n+1}}\log\Gamma\left(z\right)\\ & = & \left(-1\right)^{n+1}n!\sum_{k=0}^{\infty}\frac{1}{\left(z+k\right)^{n+1}}\\ & = & \left(-1\right)^{n+1}n!\zeta\left(n+1,z\right)\end{eqnarray*} - -where :math:`\zeta\left(k,x\right)` is a generalization of the Riemann zeta function called the Hurwitz -zeta function Note that :math:`\zeta\left(n\right)\equiv\zeta\left(n,1\right)` - - -Generalized Pareto -================== - -Shape parameter :math:`c\neq0` and defined for :math:`x\geq0` for all :math:`c` and :math:`x<\frac{1}{\left|c\right|}` if :math:`c` is negative. - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \left(1+cx\right)^{-1-\frac{1}{c}}\\ F\left(x;c\right) & = & 1-\frac{1}{\left(1+cx\right)^{1/c}}\\ G\left(q;c\right) & = & \frac{1}{c}\left[\left(\frac{1}{1-q}\right)^{c}-1\right]\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\left\{ \begin{array}{cc} \left(-\frac{t}{c}\right)^{\frac{1}{c}}e^{-\frac{t}{c}}\left[\Gamma\left(1-\frac{1}{c}\right)+\Gamma\left(-\frac{1}{c},-\frac{t}{c}\right)-\pi\csc\left(\frac{\pi}{c}\right)/\Gamma\left(\frac{1}{c}\right)\right] & c>0\\ \left(\frac{\left|c\right|}{t}\right)^{1/\left|c\right|}\Gamma\left[\frac{1}{\left|c\right|},\frac{t}{\left|c\right|}\right] & c<0\end{array}\right.\] - - - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\frac{\left(-1\right)^{n}}{c^{n}}\sum_{k=0}^{n}\left(\begin{array}{c} n\\ k\end{array}\right)\frac{\left(-1\right)^{k}}{1-ck}\quad cn<1\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu_{1}^{\prime} & = & \frac{1}{1-c}\quad c<1\\ \mu_{2}^{\prime} & = & \frac{2}{\left(1-2c\right)\left(1-c\right)}\quad c<\frac{1}{2}\\ \mu_{3}^{\prime} & = & \frac{6}{\left(1-c\right)\left(1-2c\right)\left(1-3c\right)}\quad c<\frac{1}{3}\\ \mu_{4}^{\prime} & = & \frac{24}{\left(1-c\right)\left(1-2c\right)\left(1-3c\right)\left(1-4c\right)}\quad c<\frac{1}{4}\end{eqnarray*} - -Thus, - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \mu_{1}^{\prime}\\ \mu_{2} & = & \mu_{2}^{\prime}-\mu^{2}\\ \gamma_{1} & = & \frac{\mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{\mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=1+c\quad c>0\] - - - - -Generalized Exponential -======================= - -Three positive shape parameters for :math:`x\geq0.` Note that :math:`a,b,` and :math:`c` are all :math:`>0.` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a,b,c\right) & = & \left(a+b\left(1-e^{-cx}\right)\right)\exp\left[ax-bx+\frac{b}{c}\left(1-e^{-cx}\right)\right]\\ F\left(x;a,b,c\right) & = & 1-\exp\left[ax-bx+\frac{b}{c}\left(1-e^{-cx}\right)\right]\\ G\left(q;a,b,c\right) & = & F^{-1}\end{eqnarray*} - - - - -Generalized Extreme Value -========================= - -Extreme value distributions with shape parameter :math:`c` . - -For :math:`c>0` defined on :math:`-\infty-1\] - -So, - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu_{1}^{\prime} & = & \frac{1}{c}\left(1-\Gamma\left(1+c\right)\right)\quad c>-1\\ \mu_{2}^{\prime} & = & \frac{1}{c^{2}}\left(1-2\Gamma\left(1+c\right)+\Gamma\left(1+2c\right)\right)\quad c>-\frac{1}{2}\\ \mu_{3}^{\prime} & = & \frac{1}{c^{3}}\left(1-3\Gamma\left(1+c\right)+3\Gamma\left(1+2c\right)-\Gamma\left(1+3c\right)\right)\quad c>-\frac{1}{3}\\ \mu_{4}^{\prime} & = & \frac{1}{c^{4}}\left(1-4\Gamma\left(1+c\right)+6\Gamma\left(1+2c\right)-4\Gamma\left(1+3c\right)+\Gamma\left(1+4c\right)\right)\quad c>-\frac{1}{4}\end{eqnarray*} - -For :math:`c<0` defined on :math:`\frac{1}{c}\leq x<\infty.` For :math:`c=0` defined over all space - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;0\right) & = & \exp\left[-e^{-x}\right]e^{-x}\\ F\left(x;0\right) & = & \exp\left[-e^{-x}\right]\\ G\left(q;0\right) & = & -\log\left(-\log q\right)\end{eqnarray*} - -This is just the (left-skewed) Gumbel distribution for c=0. - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \gamma=-\psi_{0}\left(1\right)\\ \mu_{2} & = & \frac{\pi^{2}}{6}\\ \gamma_{1} & = & \frac{12\sqrt{6}}{\pi^{3}}\zeta\left(3\right)\\ \gamma_{2} & = & \frac{12}{5}\end{eqnarray*} - - - - -Generalized Gamma -================= - -A general probability form that reduces to many common distributions: :math:`x>0` :math:`a>0` and :math:`c\neq0.` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a,c\right) & = & \frac{\left|c\right|x^{ca-1}}{\Gamma\left(a\right)}\exp\left(-x^{c}\right)\\ F\left(x;a,c\right) & = & \begin{array}{cc} \frac{\Gamma\left(a,x^{c}\right)}{\Gamma\left(a\right)} & c>0\\ 1-\frac{\Gamma\left(a,x^{c}\right)}{\Gamma\left(a\right)} & c<0\end{array}\\ G\left(q;a,c\right) & = & \left\{ \Gamma^{-1}\left[a,\Gamma\left(a\right)q\right]\right\} ^{1/c}\quad c>0\\ & & \left\{ \Gamma^{-1}\left[a,\Gamma\left(a\right)\left(1-q\right)\right]\right\} ^{1/c}\quad c<0\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\frac{\Gamma\left(a+\frac{n}{c}\right)}{\Gamma\left(a\right)}\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{\Gamma\left(a+\frac{1}{c}\right)}{\Gamma\left(a\right)}\\ \mu_{2} & = & \frac{\Gamma\left(a+\frac{2}{c}\right)}{\Gamma\left(a\right)}-\mu^{2}\\ \gamma_{1} & = & \frac{\Gamma\left(a+\frac{3}{c}\right)/\Gamma\left(a\right)-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{\Gamma\left(a+\frac{4}{c}\right)/\Gamma\left(a\right)-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\\ m_{d} & = & \left(\frac{ac-1}{c}\right)^{1/c}.\end{eqnarray*} - -Special cases are Weibull :math:`\left(a=1\right)` , half-normal :math:`\left(a=1/2,c=2\right)` and ordinary gamma distributions :math:`c=1.` If :math:`c=-1` then it is the inverted gamma distribution. - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=a-a\Psi\left(a\right)+\frac{1}{c}\Psi\left(a\right)+\log\Gamma\left(a\right)-\log\left|c\right|.\] - - - - -Generalized Half-Logistic -========================= - -For :math:`x\in\left[0,1/c\right]` and :math:`c>0` we have - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{2\left(1-cx\right)^{\frac{1}{c}-1}}{\left(1+\left(1-cx\right)^{1/c}\right)^{2}}\\ F\left(x;c\right) & = & \frac{1-\left(1-cx\right)^{1/c}}{1+\left(1-cx\right)^{1/c}}\\ G\left(q;c\right) & = & \frac{1}{c}\left[1-\left(\frac{1-q}{1+q}\right)^{c}\right]\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & 2-\left(2c+1\right)\log2.\end{eqnarray*} - - - - -Gilbrat -======= - -Special case of the log-normal with :math:`\sigma=1` and :math:`S=1.0` (typically also :math:`L=0.0` ) - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\sigma\right) & = & \frac{1}{x\sqrt{2\pi}}\exp\left[-\frac{1}{2}\left(\log x\right)^{2}\right]\\ F\left(x;\sigma\right) & = & \Phi\left(\log x\right)=\frac{1}{2}\left[1+\textrm{erf}\left(\frac{\log x}{\sqrt{2}}\right)\right]\\ G\left(q;\sigma\right) & = & \exp\left\{ \Phi^{-1}\left(q\right)\right\} \end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \sqrt{e}\\ \mu_{2} & = & e\left[e-1\right]\\ \gamma_{1} & = & \sqrt{e-1}\left(2+e\right)\\ \gamma_{2} & = & e^{4}+2e^{3}+3e^{2}-6\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & \log\left(\sqrt{2\pi e}\right)\\ & \approx & 1.4189385332046727418\end{eqnarray*} - - - - -Gompertz (Truncated Gumbel) -=========================== - -For :math:`x\geq0` and :math:`c>0` . In JKB the two shape parameters :math:`b,a` are reduced to the single shape-parameter :math:`c=b/a` . As :math:`a` is just a scale parameter when :math:`a\neq0` . If :math:`a=0,` the distribution reduces to the exponential distribution scaled by :math:`1/b.` Thus, the standard form is given as - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & ce^{x}\exp\left[-c\left(e^{x}-1\right)\right]\\ F\left(x;c\right) & = & 1-\exp\left[-c\left(e^{x}-1\right)\right]\\ G\left(q;c\right) & = & \log\left[1-\frac{1}{c}\log\left(1-q\right)\right]\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=1-\log\left(c\right)-e^{c}\textrm{Ei}\left(1,c\right),\] - -where - -.. math:: - :nowrap: - - \[ \textrm{Ei}\left(n,x\right)=\int_{1}^{\infty}t^{-n}\exp\left(-xt\right)dt\] - - - - -Gumbel (LogWeibull, Fisher-Tippetts, Type I Extreme Value) -========================================================== - -One of a clase of extreme value distributions (right-skewed). - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \exp\left(-\left(x+e^{-x}\right)\right)\\ F\left(x\right) & = & \exp\left(-e^{-x}\right)\\ G\left(q\right) & = & -\log\left(-\log\left(q\right)\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\Gamma\left(1-t\right)\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \gamma=-\psi_{0}\left(1\right)\\ \mu_{2} & = & \frac{\pi^{2}}{6}\\ \gamma_{1} & = & \frac{12\sqrt{6}}{\pi^{3}}\zeta\left(3\right)\\ \gamma_{2} & = & \frac{12}{5}\\ m_{d} & = & 0\\ m_{n} & = & -\log\left(\log2\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]\approx1.0608407169541684911\] - - - - -Gumbel Left-skewed (for minimum order statistic) -================================================ - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \exp\left(x-e^{x}\right)\\ F\left(x\right) & = & 1-\exp\left(-e^{x}\right)\\ G\left(q\right) & = & \log\left(-\log\left(1-q\right)\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\Gamma\left(1+t\right)\] - -Note, that :math:`\mu` is negative the mean for the right-skewed distribution. Similar for -median and mode. All other moments are the same. - - - -.. math:: - :nowrap: - - \[ h\left[X\right]\approx1.0608407169541684911.\] - - - - -HalfCauchy -========== - -If :math:`Z` is Hyperbolic Secant distributed then :math:`e^{Z}` is Half-Cauchy distributed. Also, if :math:`W` is (standard) Cauchy distributed, then :math:`\left|W\right|` is Half-Cauchy distributed. Special case of the Folded Cauchy -distribution with :math:`c=0.` The standard form is - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{2}{\pi\left(1+x^{2}\right)}I_{[0,\infty)}\left(x\right)\\ F\left(x\right) & = & \frac{2}{\pi}\arctan\left(x\right)I_{\left[0,\infty\right]}\left(x\right)\\ G\left(q\right) & = & \tan\left(\frac{\pi}{2}q\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\cos t+\frac{2}{\pi}\left[\textrm{Si}\left(t\right)\cos t-\textrm{Ci}\left(\textrm{-}t\right)\sin t\right]\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} m_{d} & = & 0\\ m_{n} & = & \tan\left(\frac{\pi}{4}\right)\end{eqnarray*} - -No moments, as the integrals diverge. - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & \log\left(2\pi\right)\\ & \approx & 1.8378770664093454836.\end{eqnarray*} - - - - -HalfNormal -========== - -This is a special case of the chi distribution with :math:`L=a` and :math:`S=b` and :math:`\nu=1.` This is also a special case of the folded normal with shape parameter :math:`c=0` and :math:`S=S.` If :math:`Z` is (standard) normally distributed then, :math:`\left|Z\right|` is half-normal. The standard form is - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \sqrt{\frac{2}{\pi}}e^{-x^{2}/2}I_{\left(0,\infty\right)}\left(x\right)\\ F\left(x\right) & = & 2\Phi\left(x\right)-1\\ G\left(q\right) & = & \Phi^{-1}\left(\frac{1+q}{2}\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\sqrt{2\pi}e^{t^{2}/2}\Phi\left(t\right)\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \sqrt{\frac{2}{\pi}}\\ \mu_{2} & = & 1-\frac{2}{\pi}\\ \gamma_{1} & = & \frac{\sqrt{2}\left(4-\pi\right)}{\left(\pi-2\right)^{3/2}}\\ \gamma_{2} & = & \frac{8\left(\pi-3\right)}{\left(\pi-2\right)^{2}}\\ m_{d} & = & 0\\ m_{n} & = & \Phi^{-1}\left(\frac{3}{4}\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & \log\left(\sqrt{\frac{\pi e}{2}}\right)\\ & \approx & 0.72579135264472743239.\end{eqnarray*} - - - - -Half-Logistic -============= - -In the limit as :math:`c\rightarrow\infty` for the generalized half-logistic we have the half-logistic defined -over :math:`x\geq0.` Also, the distribution of :math:`\left|X\right|` where :math:`X` has logistic distribtution. - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{2e^{-x}}{\left(1+e^{-x}\right)^{2}}=\frac{1}{2}\textrm{sech}^{2}\left(\frac{x}{2}\right)\\ F\left(x\right) & = & \frac{1-e^{-x}}{1+e^{-x}}=\tanh\left(\frac{x}{2}\right)\\ G\left(q\right) & = & \log\left(\frac{1+q}{1-q}\right)=2\textrm{arctanh}\left(q\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=1-t\psi_{0}\left(\frac{1}{2}-\frac{t}{2}\right)+t\psi_{0}\left(1-\frac{t}{2}\right)\] - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=2\left(1-2^{1-n}\right)n!\zeta\left(n\right)\quad n\neq1\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu_{1}^{\prime} & = & 2\log\left(2\right)\\ \mu_{2}^{\prime} & = & 2\zeta\left(2\right)=\frac{\pi^{2}}{3}\\ \mu_{3}^{\prime} & = & 9\zeta\left(3\right)\\ \mu_{4}^{\prime} & = & 42\zeta\left(4\right)=\frac{7\pi^{4}}{15}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & 2-\log\left(2\right)\\ & \approx & 1.3068528194400546906.\end{eqnarray*} - - - - -Hyperbolic Secant -================= - -Related to the logistic distribution and used in lifetime analysis. -Standard form is (defined over all :math:`x` ) - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{1}{\pi}\textrm{sech}\left(x\right)\\ F\left(x\right) & = & \frac{2}{\pi}\arctan\left(e^{x}\right)\\ G\left(q\right) & = & \log\left(\tan\left(\frac{\pi}{2}q\right)\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\sec\left(\frac{\pi}{2}t\right)\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu_{n}^{\prime} & = & \frac{1+\left(-1\right)^{n}}{2\pi2^{2n}}n!\left[\zeta\left(n+1,\frac{1}{4}\right)-\zeta\left(n+1,\frac{3}{4}\right)\right]\\ & = & \left\{ \begin{array}{cc} 0 & n\textrm{ odd}\\ C_{n/2}\frac{\pi^{n}}{2^{n}} & n\textrm{ even}\end{array}\right.\end{eqnarray*} - -where :math:`C_{m}` is an integer given by - -.. math:: - :nowrap: - - \begin{eqnarray*} C_{m} & = & \frac{\left(2m\right)!\left[\zeta\left(2m+1,\frac{1}{4}\right)-\zeta\left(2m+1,\frac{3}{4}\right)\right]}{\pi^{2m+1}2^{2m}}\\ & = & 4\left(-1\right)^{m-1}\frac{16^{m}}{2m+1}B_{2m+1}\left(\frac{1}{4}\right)\end{eqnarray*} - -where :math:`B_{2m+1}\left(\frac{1}{4}\right)` is the Bernoulli polynomial of order :math:`2m+1` evaluated at :math:`1/4.` Thus - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\left\{ \begin{array}{cc} 0 & n\textrm{ odd}\\ 4\left(-1\right)^{n/2-1}\frac{\left(2\pi\right)^{n}}{n+1}B_{n+1}\left(\frac{1}{4}\right) & n\textrm{ even}\end{array}\right.\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} m_{d}=m_{n}=\mu & = & 0\\ \mu_{2} & = & \frac{\pi^{2}}{4}\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & 2\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\log\left(2\pi\right).\] - - - - -Gauss Hypergeometric -==================== - -:math:`x\in\left[0,1\right]` , :math:`\alpha>0,\,\beta>0` - -.. math:: - :nowrap: - - \[ C^{-1}=B\left(\alpha,\beta\right)\,_{2}F_{1}\left(\gamma,\alpha;\alpha+\beta;-z\right)\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\alpha,\beta,\gamma,z\right) & = & Cx^{\alpha-1}\frac{\left(1-x\right)^{\beta-1}}{\left(1+zx\right)^{\gamma}}\\ \mu_{n}^{\prime} & = & \frac{B\left(n+\alpha,\beta\right)}{B\left(\alpha,\beta\right)}\frac{\,_{2}F_{1}\left(\gamma,\alpha+n;\alpha+\beta+n;-z\right)}{\,_{2}F_{1}\left(\gamma,\alpha;\alpha+\beta;-z\right)}\end{eqnarray*} - - - - -Inverted Gamma -============== - -Special case of the generalized Gamma distribution with :math:`c=-1` and :math:`a>0` , :math:`x>0` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a\right) & = & \frac{x^{-a-1}}{\Gamma\left(a\right)}\exp\left(-\frac{1}{x}\right)\\ F\left(x;a\right) & = & \frac{\Gamma\left(a,\frac{1}{x}\right)}{\Gamma\left(a\right)}\\ G\left(q;a\right) & = & \left\{ \Gamma^{-1}\left[a,\Gamma\left(a\right)q\right]\right\} ^{-1}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\frac{\Gamma\left(a-n\right)}{\Gamma\left(a\right)}\quad a>n\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{1}{a-1}\quad a>1\\ \mu_{2} & = & \frac{1}{\left(a-2\right)\left(a-1\right)}-\mu^{2}\quad a>2\\ \gamma_{1} & = & \frac{\frac{1}{\left(a-3\right)\left(a-2\right)\left(a-1\right)}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{\frac{1}{\left(a-4\right)\left(a-3\right)\left(a-2\right)\left(a-1\right)}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ m_{d}=\frac{1}{a+1}\] - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=a-\left(a+1\right)\Psi\left(a\right)+\log\Gamma\left(a\right).\] - - - - -Inverse Normal (Inverse Gaussian) -================================= - -The standard form involves the shape parameter :math:`\mu` (in most definitions, :math:`L=0.0` is used). (In terms of the regress documentation :math:`\mu=A/B` ) and :math:`B=S` and :math:`L` is not a parameter in that distribution. A standard form is :math:`x>0` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\mu\right) & = & \frac{1}{\sqrt{2\pi x^{3}}}\exp\left(-\frac{\left(x-\mu\right)^{2}}{2x\mu^{2}}\right).\\ F\left(x;\mu\right) & = & \Phi\left(\frac{1}{\sqrt{x}}\frac{x-\mu}{\mu}\right)+\exp\left(\frac{2}{\mu}\right)\Phi\left(-\frac{1}{\sqrt{x}}\frac{x+\mu}{\mu}\right)\\ G\left(q;\mu\right) & = & F^{-1}\left(q;\mu\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \mu\\ \mu_{2} & = & \mu^{3}\\ \gamma_{1} & = & 3\sqrt{\mu}\\ \gamma_{2} & = & 15\mu\\ m_{d} & = & \frac{\mu}{2}\left(\sqrt{9\mu^{2}+4}-3\mu\right)\end{eqnarray*} - - - -This is related to the canonical form or JKB "two-parameter "inverse Gaussian when written in it's full form with scale parameter :math:`S` and location parameter :math:`L` by taking :math:`L=0` and :math:`S\equiv\lambda,` then :math:`\mu S` is equal to :math:`\mu_{2}` where :math:`\mu_{2}` is the parameter used by JKB. We prefer this form because of it's -consistent use of the scale parameter. Notice that in JKB the skew :math:`\left(\sqrt{\beta_{1}}\right)` and the kurtosis ( :math:`\beta_{2}-3` ) are both functions only of :math:`\mu_{2}/\lambda=\mu S/S=\mu` as shown here, while the variance and mean of the standard form here -are transformed appropriately. - - -Inverted Weibull -================ - -Shape parameter :math:`c>0` and :math:`x>0` . Then - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & cx^{-c-1}\exp\left(-x^{-c}\right)\\ F\left(x;c\right) & = & \exp\left(-x^{-c}\right)\\ G\left(q;c\right) & = & \left(-\log q\right)^{-1/c}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=1+\gamma+\frac{\gamma}{c}-\log\left(c\right)\] - -where :math:`\gamma` is Euler's constant. - - -Johnson SB -========== - -Defined for :math:`x\in\left(0,1\right)` with two shape parameters :math:`a` and :math:`b>0.` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a,b\right) & = & \frac{b}{x\left(1-x\right)}\phi\left(a+b\log\frac{x}{1-x}\right)\\ F\left(x;a,b\right) & = & \Phi\left(a+b\log\frac{x}{1-x}\right)\\ G\left(q;a,b\right) & = & \frac{1}{1+\exp\left[-\frac{1}{b}\left(\Phi^{-1}\left(q\right)-a\right)\right]}\end{eqnarray*} - - - - -Johnson SU -========== - -Defined for all :math:`x` with two shape parameters :math:`a` and :math:`b>0` . - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a,b\right) & = & \frac{b}{\sqrt{x^{2}+1}}\phi\left(a+b\log\left(x+\sqrt{x^{2}+1}\right)\right)\\ F\left(x;a,b\right) & = & \Phi\left(a+b\log\left(x+\sqrt{x^{2}+1}\right)\right)\\ G\left(q;a,b\right) & = & \sinh\left[\frac{\Phi^{-1}\left(q\right)-a}{b}\right]\end{eqnarray*} - - - - -KSone -===== - - -KStwo -===== - - -Laplace (Double Exponential, Bilateral Expoooonential) -====================================================== - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{1}{2}e^{-\left|x\right|}\\ F\left(x\right) & = & \left\{ \begin{array}{ccc} \frac{1}{2}e^{x} & & x\leq0\\ 1-\frac{1}{2}e^{-x} & & x>0\end{array}\right.\\ G\left(q\right) & = & \left\{ \begin{array}{ccc} \log\left(2q\right) & & q\leq\frac{1}{2}\\ -\log\left(2-2q\right) & & q>\frac{1}{2}\end{array}\right.\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} m_{d}=m_{n}=\mu & = & 0\\ \mu_{2} & = & 2\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & 3\end{eqnarray*} - - - -The ML estimator of the location parameter is - -.. math:: - :nowrap: - - \[ \hat{L}=\textrm{median}\left(X_{i}\right)\] - -where :math:`X_{i}` is a sequence of :math:`N` mutually independent Laplace RV's and the median is some number -between the :math:`\frac{1}{2}N\textrm{th}` and the :math:`(N/2+1)\textrm{th}` order statistic ( *e.g.* take the average of these two) when :math:`N` is even. Also, - -.. math:: - :nowrap: - - \[ \hat{S}=\frac{1}{N}\sum_{j=1}^{N}\left|X_{j}-\hat{L}\right|.\] - -Replace :math:`\hat{L}` with :math:`L` if it is known. If :math:`L` is known then this estimator is distributed as :math:`\left(2N\right)^{-1}S\cdot\chi_{2N}^{2}` . - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & \log\left(2e\right)\\ & \approx & 1.6931471805599453094.\end{eqnarray*} - - - - -Left-skewed Lévy -================= - -Special case of Lévy-stable distribution with :math:`\alpha=\frac{1}{2}` and :math:`\beta=-1` the support is :math:`x<0` . In standard form - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{1}{\left|x\right|\sqrt{2\pi\left|x\right|}}\exp\left(-\frac{1}{2\left|x\right|}\right)\\ F\left(x\right) & = & 2\Phi\left(\frac{1}{\sqrt{\left|x\right|}}\right)-1\\ G\left(q\right) & = & -\left[\Phi^{-1}\left(\frac{q+1}{2}\right)\right]^{-2}.\end{eqnarray*} - -No moments. - - -Lévy -===== - -A special case of Lévy-stable distributions with :math:`\alpha=\frac{1}{2}` and :math:`\beta=1` . In standard form it is defined for :math:`x>0` as - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{1}{x\sqrt{2\pi x}}\exp\left(-\frac{1}{2x}\right)\\ F\left(x\right) & = & 2\left[1-\Phi\left(\frac{1}{\sqrt{x}}\right)\right]\\ G\left(q\right) & = & \left[\Phi^{-1}\left(1-\frac{q}{2}\right)\right]^{-2}.\end{eqnarray*} - -It has no finite moments. - - -Logistic (Sech-squared) -======================= - -A special case of the Generalized Logistic distribution with :math:`c=1.` Defined for :math:`x>0` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{\exp\left(-x\right)}{\left[1+\exp\left(-x\right)\right]^{2}}\\ F\left(x\right) & = & \frac{1}{1+\exp\left(-x\right)}\\ G\left(q\right) & = & -\log\left(1/q-1\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \gamma+\psi_{0}\left(1\right)=0\\ \mu_{2} & = & \frac{\pi^{2}}{6}+\psi_{1}\left(1\right)=\frac{\pi^{2}}{3}\\ \gamma_{1} & = & \frac{\psi_{2}\left(c\right)+2\zeta\left(3\right)}{\mu_{2}^{3/2}}=0\\ \gamma_{2} & = & \frac{\left(\frac{\pi^{4}}{15}+\psi_{3}\left(c\right)\right)}{\mu_{2}^{2}}=\frac{6}{5}\\ m_{d} & = & \log1=0\\ m_{n} & = & -\log\left(2-1\right)=0\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=1.\] - - - - -Log Double Exponential (Log-Laplace) -==================================== - -Defined over :math:`x>0` with :math:`c>0` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \left\{ \begin{array}{ccc} \frac{c}{2}x^{c-1} & & 00` (Defined for all :math:`x` ) - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{\exp\left(cx-e^{x}\right)}{\Gamma\left(c\right)}\\ F\left(x;c\right) & = & \frac{\Gamma\left(c,e^{x}\right)}{\Gamma\left(c\right)}\\ G\left(q;c\right) & = & \log\left[\Gamma^{-1}\left[c,q\Gamma\left(c\right)\right]\right]\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\int_{0}^{\infty}\left[\log y\right]^{n}y^{c-1}\exp\left(-y\right)dy.\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \mu_{1}^{\prime}\\ \mu_{2} & = & \mu_{2}^{\prime}-\mu^{2}\\ \gamma_{1} & = & \frac{\mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{\mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - - - - -Log Normal (Cobb-Douglass) -========================== - -Has one shape parameter :math:`\sigma` >0. (Notice that the "Regress ":math:`A=\log S` where :math:`S` is the scale parameter and :math:`A` is the mean of the underlying normal distribution). The standard form -is :math:`x>0` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\sigma\right) & = & \frac{1}{\sigma x\sqrt{2\pi}}\exp\left[-\frac{1}{2}\left(\frac{\log x}{\sigma}\right)^{2}\right]\\ F\left(x;\sigma\right) & = & \Phi\left(\frac{\log x}{\sigma}\right)\\ G\left(q;\sigma\right) & = & \exp\left\{ \sigma\Phi^{-1}\left(q\right)\right\} \end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \exp\left(\sigma^{2}/2\right)\\ \mu_{2} & = & \exp\left(\sigma^{2}\right)\left[\exp\left(\sigma^{2}\right)-1\right]\\ \gamma_{1} & = & \sqrt{p-1}\left(2+p\right)\\ \gamma_{2} & = & p^{4}+2p^{3}+3p^{2}-6\quad\quad p=e^{\sigma^{2}}\end{eqnarray*} - - - -Notice that using JKB notation we have :math:`\theta=L,` :math:`\zeta=\log S` and we have given the so-called antilognormal form of the -distribution. This is more consistent with the location, scale -parameter description of general probability distributions. - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\frac{1}{2}\left[1+\log\left(2\pi\right)+2\log\left(\sigma\right)\right].\] - - - -Also, note that if :math:`X` is a log-normally distributed random-variable with :math:`L=0` and :math:`S` and shape parameter :math:`\sigma.` Then, :math:`\log X` is normally distributed with variance :math:`\sigma^{2}` and mean :math:`\log S.` - - -Nakagami -======== - -Generalization of the chi distribution. Shape parameter is :math:`\nu>0.` Defined for :math:`x>0.` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\nu\right) & = & \frac{2\nu^{\nu}}{\Gamma\left(\nu\right)}x^{2\nu-1}\exp\left(-\nu x^{2}\right)\\ F\left(x;\nu\right) & = & \Gamma\left(\nu,\nu x^{2}\right)\\ G\left(q;\nu\right) & = & \sqrt{\frac{1}{\nu}\Gamma^{-1}\left(v,q\right)}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{\Gamma\left(\nu+\frac{1}{2}\right)}{\sqrt{\nu}\Gamma\left(\nu\right)}\\ \mu_{2} & = & \left[1-\mu^{2}\right]\\ \gamma_{1} & = & \frac{\mu\left(1-4v\mu_{2}\right)}{2\nu\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{-6\mu^{4}\nu+\left(8\nu-2\right)\mu^{2}-2\nu+1}{\nu\mu_{2}^{2}}\end{eqnarray*} - - - - -Noncentral beta* -================ - -Defined over :math:`x\in\left[0,1\right]` with :math:`a>0` and :math:`b>0` and :math:`c\geq0` - - - -.. math:: - :nowrap: - - \[ F\left(x;a,b,c\right)=\sum_{j=0}^{\infty}\frac{e^{-c/2}\left(\frac{c}{2}\right)^{j}}{j!}I_{B}\left(a+j,b;0\right)\] - - - - -Noncentral chi* -=============== - - -Noncentral chi-squared -====================== - -The distribution of :math:`\sum_{i=1}^{\nu}\left(Z_{i}+\delta_{i}\right)^{2}` where :math:`Z_{i}` are independent standard normal variables and :math:`\delta_{i}` are constants. :math:`\lambda=\sum_{i=1}^{\nu}\delta_{i}^{2}>0.` (In communications it is called the Marcum-Q function). Can be thought -of as a Generalized Rayleigh-Rice distribution. For :math:`x>0` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\nu,\lambda\right) & = & e^{-\left(\lambda+x\right)/2}\frac{1}{2}\left(\frac{x}{\lambda}\right)^{\left(\nu-2\right)/4}I_{\left(\nu-2\right)/2}\left(\sqrt{\lambda x}\right)\\ F\left(x;\nu,\lambda\right) & = & \sum_{j=0}^{\infty}\left\{ \frac{\left(\lambda/2\right)^{j}}{j!}e^{-\lambda/2}\right\} \textrm{Pr}\left[\chi_{\nu+2j}^{2}\leq x\right]\\ G\left(q;\nu,\lambda\right) & = & F^{-1}\left(x;\nu,\lambda\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \nu+\lambda\\ \mu_{2} & = & 2\left(\nu+2\lambda\right)\\ \gamma_{1} & = & \frac{\sqrt{8}\left(\nu+3\lambda\right)}{\left(\nu+2\lambda\right)^{3/2}}\\ \gamma_{2} & = & \frac{12\left(\nu+4\lambda\right)}{\left(\nu+2\lambda\right)^{2}}\end{eqnarray*} - - - - -Noncentral F -============ - -Let :math:`\lambda>0` and :math:`\nu_{1}>0` and :math:`\nu_{2}>0.` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\lambda,\nu_{1},\nu_{2}\right) & = & \exp\left[\frac{\lambda}{2}+\frac{\left(\lambda\nu_{1}x\right)}{2\left(\nu_{1}x+\nu_{2}\right)}\right]\nu_{1}^{\nu_{1}/2}\nu_{2}^{\nu_{2}/2}x^{\nu_{1}/2-1}\\ & & \times\left(\nu_{2}+\nu_{1}x\right)^{-\left(\nu_{1}+\nu_{2}\right)/2}\frac{\Gamma\left(\frac{\nu_{1}}{2}\right)\Gamma\left(1+\frac{\nu_{2}}{2}\right)L_{\nu_{2}/2}^{\nu_{1}/2-1}\left(-\frac{\lambda\nu_{1}x}{2\left(\nu_{1}x+\nu_{2}\right)}\right)}{B\left(\frac{\nu_{1}}{2},\frac{\nu_{2}}{2}\right)\Gamma\left(\frac{\nu_{1}+\nu_{2}}{2}\right)}\end{eqnarray*} - - - - -Noncentral t -============ - -The distribution of the ratio - -.. math:: - :nowrap: - - \[ \frac{U+\lambda}{\chi_{\nu}/\sqrt{\nu}}\] - -where :math:`U` and :math:`\chi_{\nu}` are independent and distributed as a standard normal and chi with :math:`\nu` degrees of freedom. Note :math:`\lambda>0` and :math:`\nu>0` . - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\lambda,\nu\right) & = & \frac{\nu^{\nu/2}\Gamma\left(\nu+1\right)}{2^{\nu}e^{\lambda^{2}/2}\left(\nu+x^{2}\right)^{\nu/2}\Gamma\left(\nu/2\right)}\\ & & \times\left\{ \frac{\sqrt{2}\lambda x\,_{1}F_{1}\left(\frac{\nu}{2}+1;\frac{3}{2};\frac{\lambda^{2}x^{2}}{2\left(\nu+x^{2}\right)}\right)}{\left(\nu+x^{2}\right)\Gamma\left(\frac{\nu+1}{2}\right)}\right.\\ & & -\left.\frac{\,_{1}F_{1}\left(\frac{\nu+1}{2};\frac{1}{2};\frac{\lambda^{2}x^{2}}{2\left(\nu+x^{2}\right)}\right)}{\sqrt{\nu+x^{2}}\Gamma\left(\frac{\nu}{2}+1\right)}\right\} \\ & = & \frac{\Gamma\left(\nu+1\right)}{2^{\left(\nu-1\right)/2}\sqrt{\pi\nu}\Gamma\left(\nu/2\right)}\exp\left[-\frac{\nu\lambda^{2}}{\nu+x^{2}}\right]\\ & & \times\left(\frac{\nu}{\nu+x^{2}}\right)^{\left(\nu-1\right)/2}Hh_{\nu}\left(-\frac{\lambda x}{\sqrt{\nu+x^{2}}}\right)\\ F\left(x;\lambda,\nu\right) & =\end{eqnarray*} - - - - -Normal -====== - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{e^{-x^{2}/2}}{\sqrt{2\pi}}\\ F\left(x\right) & = & \Phi\left(x\right)=\frac{1}{2}+\frac{1}{2}\textrm{erf}\left(\frac{\textrm{x}}{\sqrt{2}}\right)\\ G\left(q\right) & = & \Phi^{-1}\left(q\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} m_{d}=m_{n}=\mu & = & 0\\ \mu_{2} & = & 1\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & 0\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} h\left[X\right] & = & \log\left(\sqrt{2\pi e}\right)\\ & \approx & 1.4189385332046727418\end{eqnarray*} - - - - -Maxwell -======= - -This is a special case of the Chi distribution with :math:`L=0` and :math:`S=S=\frac{1}{\sqrt{a}}` and :math:`\nu=3.` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \sqrt{\frac{2}{\pi}}x^{2}e^{-x^{2}/2}I_{\left(0,\infty\right)}\left(x\right)\\ F\left(x\right) & = & \Gamma\left(\frac{3}{2},\frac{x^{2}}{2}\right)\\ G\left(\alpha\right) & = & \sqrt{2\Gamma^{-1}\left(\frac{3}{2},\alpha\right)}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & 2\sqrt{\frac{2}{\pi}}\\ \mu_{2} & = & 3-\frac{8}{\pi}\\ \gamma_{1} & = & \sqrt{2}\frac{32-10\pi}{\left(3\pi-8\right)^{3/2}}\\ \gamma_{2} & = & \frac{-12\pi^{2}+160\pi-384}{\left(3\pi-8\right)^{2}}\\ m_{d} & = & \sqrt{2}\\ m_{n} & = & \sqrt{2\Gamma^{-1}\left(\frac{3}{2},\frac{1}{2}\right)}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\log\left(\sqrt{\frac{2\pi}{e}}\right)+\gamma.\] - - - - -Mielke's Beta-Kappa -=================== - -A generalized F distribution. Two shape parameters :math:`\kappa` and :math:`\theta` , and :math:`x>0` . The :math:`\beta` in the DATAPLOT reference is a scale parameter. - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\kappa,\theta\right) & = & \frac{\kappa x^{\kappa-1}}{\left(1+x^{\theta}\right)^{1+\frac{\kappa}{\theta}}}\\ F\left(x;\kappa,\theta\right) & = & \frac{x^{\kappa}}{\left(1+x^{\theta}\right)^{\kappa/\theta}}\\ G\left(q;\kappa,\theta\right) & = & \left(\frac{q^{\theta/\kappa}}{1-q^{\theta/\kappa}}\right)^{1/\theta}\end{eqnarray*} - - - - -Pareto -====== - -For :math:`x\geq1` and :math:`b>0` . Standard form is - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;b\right) & = & \frac{b}{x^{b+1}}\\ F\left(x;b\right) & = & 1-\frac{1}{x^{b}}\\ G\left(q;b\right) & = & \left(1-q\right)^{-1/b}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{b}{b-1}\quad b>1\\ \mu_{2} & = & \frac{b}{\left(b-2\right)\left(b-1\right)^{2}}\quad b>2\\ \gamma_{1} & = & \frac{2\left(b+1\right)\sqrt{b-2}}{\left(b-3\right)\sqrt{b}}\quad b>3\\ \gamma_{2} & = & \frac{6\left(b^{3}+b^{2}-6b-2\right)}{b\left(b^{2}-7b+12\right)}\quad b>4\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ h\left(X\right)=\frac{1}{c}+1-\log\left(c\right)\] - - - - -Pareto Second Kind (Lomax) -========================== - -:math:`c>0.` This is Pareto of the first kind with :math:`L=-1.0` so :math:`x\geq0` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{c}{\left(1+x\right)^{c+1}}\\ F\left(x;c\right) & = & 1-\frac{1}{\left(1+x\right)^{c}}\\ G\left(q;c\right) & = & \left(1-q\right)^{-1/c}-1\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\frac{1}{c}+1-\log\left(c\right).\] - - - - -Power Log Normal -================ - -A generalization of the log-normal distribution :math:`\sigma>0` and :math:`c>0` and :math:`x>0` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\sigma,c\right) & = & \frac{c}{x\sigma}\phi\left(\frac{\log x}{\sigma}\right)\left(\Phi\left(-\frac{\log x}{\sigma}\right)\right)^{c-1}\\ F\left(x;\sigma,c\right) & = & 1-\left(\Phi\left(-\frac{\log x}{\sigma}\right)\right)^{c}\\ G\left(q;\sigma,c\right) & = & \exp\left[-\sigma\Phi^{-1}\left[\left(1-q\right)^{1/c}\right]\right]\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\int_{0}^{1}\exp\left[-n\sigma\Phi^{-1}\left(y^{1/c}\right)\right]dy\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \mu_{1}^{\prime}\\ \mu_{2} & = & \mu_{2}^{\prime}-\mu^{2}\\ \gamma_{1} & = & \frac{\mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{\mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - -This distribution reduces to the log-normal distribution when :math:`c=1.` - - -Power Normal -============ - -A generalization of the normal distribution, :math:`c>0` for - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & c\phi\left(x\right)\left(\Phi\left(-x\right)\right)^{c-1}\\ F\left(x;c\right) & = & 1-\left(\Phi\left(-x\right)\right)^{c}\\ G\left(q;c\right) & = & -\Phi^{-1}\left[\left(1-q\right)^{1/c}\right]\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\left(-1\right)^{n}\int_{0}^{1}\left[\Phi^{-1}\left(y^{1/c}\right)\right]^{n}dy\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \mu_{1}^{\prime}\\ \mu_{2} & = & \mu_{2}^{\prime}-\mu^{2}\\ \gamma_{1} & = & \frac{\mu_{3}^{\prime}-3\mu\mu_{2}-\mu^{3}}{\mu_{2}^{3/2}}\\ \gamma_{2} & = & \frac{\mu_{4}^{\prime}-4\mu\mu_{3}-6\mu^{2}\mu_{2}-\mu^{4}}{\mu_{2}^{2}}-3\end{eqnarray*} - -For :math:`c=1` this reduces to the normal distribution. - - -Power-function -============== - -A special case of the beta distribution with :math:`b=1` : defined for :math:`x\in\left[0,1\right]` - - - -.. math:: - :nowrap: - - \[ a>0\] - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a\right) & = & ax^{a-1}\\ F\left(x;a\right) & = & x^{a}\\ G\left(q;a\right) & = & q^{1/a}\\ \mu & = & \frac{a}{a+1}\\ \mu_{2} & = & \frac{a\left(a+2\right)}{\left(a+1\right)^{2}}\\ \gamma_{1} & = & 2\left(1-a\right)\sqrt{\frac{a+2}{a\left(a+3\right)}}\\ \gamma_{2} & = & \frac{6\left(a^{3}-a^{2}-6a+2\right)}{a\left(a+3\right)\left(a+4\right)}\\ m_{d} & = & 1\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=1-\frac{1}{a}-\log\left(a\right)\] - - - - -R-distribution -============== - -A general-purpose distribution with a variety of shapes controlled by :math:`c>0.` Range of standard distribution is :math:`x\in\left[-1,1\right]` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{\left(1-x^{2}\right)^{c/2-1}}{B\left(\frac{1}{2},\frac{c}{2}\right)}\\ F\left(x;c\right) & = & \frac{1}{2}+\frac{x}{B\left(\frac{1}{2},\frac{c}{2}\right)}\,_{2}F_{1}\left(\frac{1}{2},1-\frac{c}{2};\frac{3}{2};x^{2}\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\frac{\left(1+\left(-1\right)^{n}\right)}{2}B\left(\frac{n+1}{2},\frac{c}{2}\right)\] - -The R-distribution with parameter :math:`n` is the distribution of the correlation coefficient of a random sample -of size :math:`n` drawn from a bivariate normal distribution with :math:`\rho=0.` The mean of the standard distribution is always zero and as the sample -size grows, the distribution's mass concentrates more closely about -this mean. - - -Rayleigh -======== - -This is Chi distribution with :math:`L=0.0` and :math:`\nu=2` and :math:`S=S` (no location parameter is generally used), the mode of the -distribution is :math:`S.` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(r\right) & = & re^{-r^{2}/2}I_{[0,\infty)}\left(x\right)\\ F\left(r\right) & = & 1-e^{-r^{2}/2}I_{[0,\infty)}\left(x\right)\\ G\left(q\right) & = & \sqrt{-2\log\left(1-q\right)}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \sqrt{\frac{\pi}{2}}\\ \mu_{2} & = & \frac{4-\pi}{2}\\ \gamma_{1} & = & \frac{2\left(\pi-3\right)\sqrt{\pi}}{\left(4-\pi\right)^{3/2}}\\ \gamma_{2} & = & \frac{24\pi-6\pi^{2}-16}{\left(4-\pi\right)^{2}}\\ m_{d} & = & 1\\ m_{n} & = & \sqrt{2\log\left(2\right)}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\frac{\gamma}{2}+\log\left(\frac{e}{\sqrt{2}}\right).\] - - - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\sqrt{2^{n}}\Gamma\left(\frac{n}{2}+1\right)\] - - - - -Rice* -===== - -Defined for :math:`x>0` and :math:`b>0` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;b\right) & = & x\exp\left(-\frac{x^{2}+b^{2}}{2}\right)I_{0}\left(xb\right)\\ F\left(x;b\right) & = & \int_{0}^{x}\alpha\exp\left(-\frac{\alpha^{2}+b^{2}}{2}\right)I_{0}\left(\alpha b\right)d\alpha\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\sqrt{2^{n}}\Gamma\left(1+\frac{n}{2}\right)\,_{1}F_{1}\left(-\frac{n}{2};1;-\frac{b^{2}}{2}\right)\] - - - - -Reciprocal -========== - -Shape parameters :math:`a,b>0` :math:`x\in\left[a,b\right]` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;a,b\right) & = & \frac{1}{x\log\left(b/a\right)}\\ F\left(x;a,b\right) & = & \frac{\log\left(x/a\right)}{\log\left(b/a\right)}\\ G\left(q;a,b\right) & = & a\exp\left(q\log\left(b/a\right)\right)=a\left(\frac{b}{a}\right)^{q}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} d & = & \log\left(a/b\right)\\ \mu & = & \frac{a-b}{d}\\ \mu_{2} & = & \mu\frac{a+b}{2}-\mu^{2}=\frac{\left(a-b\right)\left[a\left(d-2\right)+b\left(d+2\right)\right]}{2d^{2}}\\ \gamma_{1} & = & \frac{\sqrt{2}\left[12d\left(a-b\right)^{2}+d^{2}\left(a^{2}\left(2d-9\right)+2abd+b^{2}\left(2d+9\right)\right)\right]}{3d\sqrt{a-b}\left[a\left(d-2\right)+b\left(d+2\right)\right]^{3/2}}\\ \gamma_{2} & = & \frac{-36\left(a-b\right)^{3}+36d\left(a-b\right)^{2}\left(a+b\right)-16d^{2}\left(a^{3}-b^{3}\right)+3d^{3}\left(a^{2}+b^{2}\right)\left(a+b\right)}{3\left(a-b\right)\left[a\left(d-2\right)+b\left(d+2\right)\right]^{2}}-3\\ m_{d} & = & a\\ m_{n} & = & \sqrt{ab}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\frac{1}{2}\log\left(ab\right)+\log\left[\log\left(\frac{b}{a}\right)\right].\] - - - - -Reciprocal Inverse Gaussian -=========================== - -The pdf is found from the inverse gaussian (IG), :math:`f_{RIG}\left(x;\mu\right)=\frac{1}{x^{2}}f_{IG}\left(\frac{1}{x};\mu\right)` defined for :math:`x\geq0` as - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f_{IG}\left(x;\mu\right) & = & \frac{1}{\sqrt{2\pi x^{3}}}\exp\left(-\frac{\left(x-\mu\right)^{2}}{2x\mu^{2}}\right).\\ F_{IG}\left(x;\mu\right) & = & \Phi\left(\frac{1}{\sqrt{x}}\frac{x-\mu}{\mu}\right)+\exp\left(\frac{2}{\mu}\right)\Phi\left(-\frac{1}{\sqrt{x}}\frac{x+\mu}{\mu}\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f_{RIG}\left(x;\mu\right) & = & \frac{1}{\sqrt{2\pi x}}\exp\left(-\frac{\left(1-\mu x\right)^{2}}{2x\mu^{2}}\right)\\ F_{RIG}\left(x;\mu\right) & = & 1-F_{IG}\left(\frac{1}{x},\mu\right)\\ & = & 1-\Phi\left(\frac{1}{\sqrt{x}}\frac{1-\mu x}{\mu}\right)-\exp\left(\frac{2}{\mu}\right)\Phi\left(-\frac{1}{\sqrt{x}}\frac{1+\mu x}{\mu}\right)\end{eqnarray*} - - - - -Semicircular -============ - -Defined on :math:`x\in\left[-1,1\right]` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{2}{\pi}\sqrt{1-x^{2}}\\ F\left(x\right) & = & \frac{1}{2}+\frac{1}{\pi}\left[x\sqrt{1-x^{2}}+\arcsin x\right]\\ G\left(q\right) & = & F^{-1}\left(q\right)\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} m_{d}=m_{n}=\mu & = & 0\\ \mu_{2} & = & \frac{1}{4}\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & -1\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=0.64472988584940017414.\] - - - - -Studentized Range* -================== - - -Student t -========= - -Shape parameter :math:`\nu>0.` :math:`I\left(a,b,x\right)` is the incomplete beta integral and :math:`I^{-1}\left(a,b,I\left(a,b,x\right)\right)=x` - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\nu\right) & = & \frac{\Gamma\left(\frac{\nu+1}{2}\right)}{\sqrt{\pi\nu}\Gamma\left(\frac{\nu}{2}\right)\left[1+\frac{x^{2}}{\nu}\right]^{\frac{\nu+1}{2}}}\\ F\left(x;\nu\right) & = & \left\{ \begin{array}{ccc} \frac{1}{2}I\left(\frac{\nu}{2},\frac{1}{2},\frac{\nu}{\nu+x^{2}}\right) & & x\leq0\\ 1-\frac{1}{2}I\left(\frac{\nu}{2},\frac{1}{2},\frac{\nu}{\nu+x^{2}}\right) & & x\geq0\end{array}\right.\\ G\left(q;\nu\right) & = & \left\{ \begin{array}{ccc} -\sqrt{\frac{\nu}{I^{-1}\left(\frac{\nu}{2},\frac{1}{2},2q\right)}-\nu} & & q\leq\frac{1}{2}\\ \sqrt{\frac{\nu}{I^{-1}\left(\frac{\nu}{2},\frac{1}{2},2-2q\right)}-\nu} & & q\geq\frac{1}{2}\end{array}\right.\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} m_{n}=m_{d}=\mu & = & 0\\ \mu_{2} & = & \frac{\nu}{\nu-2}\quad\nu>2\\ \gamma_{1} & = & 0\quad\nu>3\\ \gamma_{2} & = & \frac{6}{\nu-4}\quad\nu>4\end{eqnarray*} - -As :math:`\nu\rightarrow\infty,` this distribution approaches the standard normal distribution. - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\frac{1}{4}\log\left(\frac{\pi c\Gamma^{2}\left(\frac{c}{2}\right)}{\Gamma^{2}\left(\frac{c+1}{2}\right)}\right)-\frac{\left(c+1\right)}{4}\left[\Psi\left(\frac{c}{2}\right)-cZ\left(c\right)+\pi\tan\left(\frac{\pi c}{2}\right)+\gamma+2\log2\right]\] - -where - -.. math:: - :nowrap: - - \[ Z\left(c\right)=\,_{3}F_{2}\left(1,1,1+\frac{c}{2};\frac{3}{2},2;1\right)=\sum_{k=0}^{\infty}\frac{k!}{k+1}\frac{\Gamma\left(\frac{c}{2}+1+k\right)}{\Gamma\left(\frac{c}{2}+1\right)}\frac{\Gamma\left(\frac{3}{2}\right)}{\Gamma\left(\frac{3}{2}+k\right)}\] - - - - -Student Z -========= - -The student Z distriubtion is defined over all space with one shape -parameter :math:`\nu>0` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;\nu\right) & = & \frac{\Gamma\left(\frac{\nu}{2}\right)}{\sqrt{\pi}\Gamma\left(\frac{\nu-1}{2}\right)}\left(1+x^{2}\right)^{-\nu/2}\\ F\left(x;\nu\right) & = & \left\{ \begin{array}{ccc} Q\left(x;\nu\right) & & x\leq0\\ 1-Q\left(x;\nu\right) & & x\geq0\end{array}\right.\\ Q\left(x;\nu\right) & = & \frac{\left|x\right|^{1-n}\Gamma\left(\frac{n}{2}\right)\,_{2}F_{1}\left(\frac{n-1}{2},\frac{n}{2};\frac{n+1}{2};-\frac{1}{x^{2}}\right)}{2\sqrt{\pi}\Gamma\left(\frac{n+1}{2}\right)}\end{eqnarray*} - -Interesting moments are - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & 0\\ \sigma^{2} & = & \frac{1}{\nu-3}\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & \frac{6}{\nu-5}.\end{eqnarray*} - -The moment generating function is - -.. math:: - :nowrap: - - \[ \theta\left(t\right)=2\sqrt{\left|\frac{t}{2}\right|^{\nu-1}}\frac{K_{\left(n-1\right)/2}\left(\left|t\right|\right)}{\Gamma\left(\frac{\nu-1}{2}\right)}.\] - - - - -Symmetric Power* -================ - - -Triangular -========== - -One shape parameter :math:`c\in[0,1]` giving the distance to the peak as a percentage of the total extent of -the non-zero portion. The location parameter is the start of the non- -zero portion, and the scale-parameter is the width of the non-zero -portion. In standard form we have :math:`x\in\left[0,1\right].` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \left\{ \begin{array}{ccc} 2\frac{x}{c} & & x0` . Note, the PDF and CDF functions are periodic and are always defined -over :math:`x\in\left[-\pi,\pi\right]` regardless of the location parameter. Thus, if an input beyond this -range is given, it is converted to the equivalent angle in this range. -For values of :math:`b<100` the PDF and CDF formulas below are used. Otherwise, a normal -approximation with variance :math:`1/b` is used. - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;b\right) & = & \frac{e^{b\cos x}}{2\pi I_{0}\left(b\right)}\\ F\left(x;b\right) & = & \frac{1}{2}+\frac{x}{2\pi}+\sum_{k=1}^{\infty}\frac{I_{k}\left(b\right)\sin\left(kx\right)}{I_{0}\left(b\right)\pi k}\\ G\left(q;b\right) & = & F^{-1}\left(x;b\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & 0\\ \mu_{2} & = & \int_{-\pi}^{\pi}x^{2}f\left(x;b\right)dx\\ \gamma_{1} & = & 0\\ \gamma_{2} & = & \frac{\int_{-\pi}^{\pi}x^{4}f\left(x;b\right)dx}{\mu_{2}^{2}}-3\end{eqnarray*} - -This can be used for defining circular variance. - - -Wald -==== - -Special case of the Inverse Normal with shape parameter set to :math:`1.0` . Defined for :math:`x>0` . - - - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x\right) & = & \frac{1}{\sqrt{2\pi x^{3}}}\exp\left(-\frac{\left(x-1\right)^{2}}{2x}\right).\\ F\left(x\right) & = & \Phi\left(\frac{x-1}{\sqrt{x}}\right)+\exp\left(2\right)\Phi\left(-\frac{x+1}{\sqrt{x}}\right)\\ G\left(q;\mu\right) & = & F^{-1}\left(q;\mu\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & 1\\ \mu_{2} & = & 1\\ \gamma_{1} & = & 3\\ \gamma_{2} & = & 15\\ m_{d} & = & \frac{1}{2}\left(\sqrt{13}-3\right)\end{eqnarray*} - - - - -Wishart* -======== - - -Wrapped Cauchy -============== - -For :math:`x\in\left[0,2\pi\right]` :math:`c\in\left(0,1\right)` - -.. math:: - :nowrap: - - \begin{eqnarray*} f\left(x;c\right) & = & \frac{1-c^{2}}{2\pi\left(1+c^{2}-2c\cos x\right)}\\ g_{c}\left(x\right) & = & \frac{1}{\pi}\arctan\left[\frac{1+c}{1-c}\tan\left(\frac{x}{2}\right)\right]\\ r_{c}\left(q\right) & = & 2\arctan\left[\frac{1-c}{1+c}\tan\left(\pi q\right)\right]\\ F\left(x;c\right) & = & \left\{ \begin{array}{ccc} g_{c}\left(x\right) & & 0\leq x<\pi\\ 1-g_{c}\left(2\pi-x\right) & & \pi\leq x\leq2\pi\end{array}\right.\\ G\left(q;c\right) & = & \left\{ \begin{array}{ccc} r_{c}\left(q\right) & & 0\leq q<\frac{1}{2}\\ 2\pi-r_{c}\left(1-q\right) & & \frac{1}{2}\leq q\leq1\end{array}\right.\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ \] - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\log\left(2\pi\left(1-c^{2}\right)\right).\] diff --git a/scipy-0.10.1/doc/source/tutorial/stats/discrete.lyx b/scipy-0.10.1/doc/source/tutorial/stats/discrete.lyx deleted file mode 100644 index d67a382ac0..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/stats/discrete.lyx +++ /dev/null @@ -1,1218 +0,0 @@ -#LyX 1.3 created this file. For more info see http://www.lyx.org/ -\lyxformat 221 -\textclass article -\language english -\inputencoding auto -\fontscheme default -\graphics default -\paperfontsize default -\spacing single -\papersize Default -\paperpackage a4 -\use_geometry 1 -\use_amsmath 1 -\use_natbib 0 -\use_numerical_citations 0 -\paperorientation portrait -\leftmargin 1in -\topmargin 1in -\rightmargin 1in -\bottommargin 1in -\secnumdepth 3 -\tocdepth 3 -\paragraph_separation indent -\defskip medskip -\quotes_language english -\quotes_times 2 -\papercolumns 1 -\papersides 1 -\paperpagestyle default - -\layout Title - -Discrete Statistical Distributions -\layout Standard - -Discrete random variables take on only a countable number of values. - The commonly used distributions are included in SciPy and described in - this document. - Each discrete distribution can take one extra integer parameter: -\begin_inset Formula $L.$ -\end_inset - - The relationship between the general distribution and the standard one - is -\begin_inset Formula \[ -p\left(x\right)=p_{0}\left(x-L\right)\] - -\end_inset - - which allows for shifting of the input. - When a distribution generator is initialized, the discrete distribution - can either specify the beginning and ending (integer) values -\begin_inset Formula $a$ -\end_inset - - and -\begin_inset Formula $b$ -\end_inset - - which must be such that -\begin_inset Formula \[ -p_{0}\left(x\right)=0\quad xb\] - -\end_inset - - in which case, it is assumed that the pdf function is specified on the - integers -\begin_inset Formula $a+mk\leq b$ -\end_inset - - where -\begin_inset Formula $k$ -\end_inset - - is a non-negative integer ( -\begin_inset Formula $0,1,2,\ldots$ -\end_inset - -) and -\begin_inset Formula $m$ -\end_inset - - is a positive integer multiplier. - Alternatively, the two lists -\begin_inset Formula $x_{k}$ -\end_inset - - and -\begin_inset Formula $p\left(x_{k}\right)$ -\end_inset - - can be provided directly in which case a dictionary is set up internally - to evaulate probabilities and generate random variates. - -\layout Subsection - -Probability Mass Function (PMF) -\layout Standard - -The probability mass function of a random variable X is defined as the probabili -ty that the random variable takes on a particular value. - -\begin_inset Formula \[ -p\left(x_{k}\right)=P\left[X=x_{k}\right]\] - -\end_inset - - This is also sometimes called the probability density function, although - technically -\begin_inset Formula \[ -f\left(x\right)=\sum_{k}p\left(x_{k}\right)\delta\left(x-x_{k}\right)\] - -\end_inset - - is the probability density function for a discrete distribution -\begin_inset Foot -collapsed false - -\layout Standard - -Note that we will be using -\begin_inset Formula $p$ -\end_inset - - to represent the probability mass function and a parameter (a probability). - The usage should be obvious from context. - -\end_inset - -. - -\layout Subsection - -Cumulative Distribution Function (CDF) -\layout Standard - -The cumulative distribution function is -\begin_inset Formula \[ -F\left(x\right)=P\left[X\leq x\right]=\sum_{x_{k}\leq x}p\left(x_{k}\right)\] - -\end_inset - - and is also useful to be able to compute. - Note that -\begin_inset Formula \[ -F\left(x_{k}\right)-F\left(x_{k-1}\right)=p\left(x_{k}\right)\] - -\end_inset - - -\layout Subsection - -Survival Function -\layout Standard - -The survival function is just -\begin_inset Formula \[ -S\left(x\right)=1-F\left(x\right)=P\left[X>k\right]\] - -\end_inset - - the probability that the random variable is strictly larger than -\begin_inset Formula $k$ -\end_inset - -. - -\layout Subsection - -Percent Point Function (Inverse CDF) -\layout Standard - -The percent point function is the inverse of the cumulative distribution - function and is -\begin_inset Formula \[ -G\left(q\right)=F^{-1}\left(q\right)\] - -\end_inset - - for discrete distributions, this must be modified for cases where there - is no -\begin_inset Formula $x_{k}$ -\end_inset - - such that -\begin_inset Formula $F\left(x_{k}\right)=q.$ -\end_inset - - In these cases we choose -\begin_inset Formula $G\left(q\right)$ -\end_inset - - to be the smallest value -\begin_inset Formula $x_{k}=G\left(q\right)$ -\end_inset - - for which -\begin_inset Formula $F\left(x_{k}\right)\geq q$ -\end_inset - -. - If -\begin_inset Formula $q=0$ -\end_inset - - then we define -\begin_inset Formula $G\left(0\right)=a-1$ -\end_inset - -. - This definition allows random variates to be defined in the same way as - with continuous rv's using the inverse cdf on a uniform distribution to - generate random variates. - -\layout Subsection - -Inverse survival function -\layout Standard - -The inverse survival function is the inverse of the survival function -\begin_inset Formula \[ -Z\left(\alpha\right)=S^{-1}\left(\alpha\right)=G\left(1-\alpha\right)\] - -\end_inset - - and is thus the smallest non-negative integer -\begin_inset Formula $k$ -\end_inset - - for which -\begin_inset Formula $F\left(k\right)\geq1-\alpha$ -\end_inset - - or the smallest non-negative integer -\begin_inset Formula $k$ -\end_inset - - for which -\begin_inset Formula $S\left(k\right)\leq\alpha.$ -\end_inset - - -\layout Subsection - -Hazard functions -\layout Standard - -If desired, the hazard function and the cumulative hazard function could - be defined as -\begin_inset Formula \[ -h\left(x_{k}\right)=\frac{p\left(x_{k}\right)}{1-F\left(x_{k}\right)}\] - -\end_inset - - and -\begin_inset Formula \[ -H\left(x\right)=\sum_{x_{k}\leq x}h\left(x_{k}\right)=\sum_{x_{k}\leq x}\frac{F\left(x_{k}\right)-F\left(x_{k-1}\right)}{1-F\left(x_{k}\right)}.\] - -\end_inset - - -\layout Subsection - -Moments -\layout Standard - -Non-central moments are defined using the PDF -\begin_inset Formula \[ -\mu_{m}^{\prime}=E\left[X^{m}\right]=\sum_{k}x_{k}^{m}p\left(x_{k}\right).\] - -\end_inset - - Central moments are computed similarly -\begin_inset Formula $\mu=\mu_{1}^{\prime}$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu_{m}=E\left[\left(X-\mu\right)^{2}\right] & = & \sum_{k}\left(x_{k}-\mu\right)^{m}p\left(x_{k}\right)\\ - & = & \sum_{k=0}^{m}\left(-1\right)^{m-k}\left(\begin{array}{c} -m\\ -k\end{array}\right)\mu^{m-k}\mu_{k}^{\prime}\end{eqnarray*} - -\end_inset - - The mean is the first moment -\begin_inset Formula \[ -\mu=\mu_{1}^{\prime}=E\left[X\right]=\sum_{k}x_{k}p\left(x_{k}\right)\] - -\end_inset - - the variance is the second central moment -\begin_inset Formula \[ -\mu_{2}=E\left[\left(X-\mu\right)^{2}\right]=\sum_{x_{k}}x_{k}^{2}p\left(x_{k}\right)-\mu^{2}.\] - -\end_inset - -Skewness is defined as -\begin_inset Formula \[ -\gamma_{1}=\frac{\mu_{3}}{\mu_{2}^{3/2}}\] - -\end_inset - - while (Fisher) kurtosis is -\begin_inset Formula \[ -\gamma_{2}=\frac{\mu_{4}}{\mu_{2}^{2}}-3,\] - -\end_inset - - so that a normal distribution has a kurtosis of zero. - -\layout Subsection - -Moment generating function -\layout Standard - -The moment generating funtion is defined as -\begin_inset Formula \[ -M_{X}\left(t\right)=E\left[e^{Xt}\right]=\sum_{x_{k}}e^{x_{k}t}p\left(x_{k}\right)\] - -\end_inset - - Moments are found as the derivatives of the moment generating function - evaluated at -\begin_inset Formula $0.$ -\end_inset - - -\layout Subsection - -Fitting data -\layout Standard - -To fit data to a distribution, maximizing the likelihood function is common. - Alternatively, some distributions have well-known minimum variance unbiased - estimators. - These will be chosen by default, but the likelihood function will always - be available for minimizing. - -\layout Standard - -If -\begin_inset Formula $f_{i}\left(k;\boldsymbol{\theta}\right)$ -\end_inset - - is the PDF of a random-variable where -\begin_inset Formula $\boldsymbol{\theta}$ -\end_inset - - is a vector of parameters ( -\emph on -e.g. - -\begin_inset Formula $L$ -\end_inset - - -\emph default -and -\begin_inset Formula $S$ -\end_inset - -), then for a collection of -\begin_inset Formula $N$ -\end_inset - - independent samples from this distribution, the joint distribution the - random vector -\begin_inset Formula $\mathbf{k}$ -\end_inset - - is -\begin_inset Formula \[ -f\left(\mathbf{k};\boldsymbol{\theta}\right)=\prod_{i=1}^{N}f_{i}\left(k_{i};\boldsymbol{\theta}\right).\] - -\end_inset - - The maximum likelihood estimate of the parameters -\begin_inset Formula $\boldsymbol{\theta}$ -\end_inset - - are the parameters which maximize this function with -\begin_inset Formula $\mathbf{x}$ -\end_inset - - fixed and given by the data: -\begin_inset Formula \begin{eqnarray*} -\hat{\boldsymbol{\theta}} & = & \arg\max_{\boldsymbol{\theta}}f\left(\mathbf{k};\boldsymbol{\theta}\right)\\ - & = & \arg\min_{\boldsymbol{\theta}}l_{\mathbf{k}}\left(\boldsymbol{\theta}\right).\end{eqnarray*} - -\end_inset - - Where -\begin_inset Formula \begin{eqnarray*} -l_{\mathbf{k}}\left(\boldsymbol{\theta}\right) & = & -\sum_{i=1}^{N}\log f\left(k_{i};\boldsymbol{\theta}\right)\\ - & = & -N\overline{\log f\left(k_{i};\boldsymbol{\theta}\right)}\end{eqnarray*} - -\end_inset - - -\layout Subsection - -Standard notation for mean -\layout Standard - -We will use -\begin_inset Formula \[ -\overline{y\left(\mathbf{x}\right)}=\frac{1}{N}\sum_{i=1}^{N}y\left(x_{i}\right)\] - -\end_inset - - where -\begin_inset Formula $N$ -\end_inset - - should be clear from context. - -\layout Subsection - -Combinations -\layout Standard - -Note that -\begin_inset Formula \[ -k!=k\cdot\left(k-1\right)\cdot\left(k-2\right)\cdot\cdots\cdot1=\Gamma\left(k+1\right)\] - -\end_inset - - and has special cases of -\begin_inset Formula \begin{eqnarray*} -0! & \equiv & 1\\ -k! & \equiv & 0\quad k<0\end{eqnarray*} - -\end_inset - - and -\begin_inset Formula \[ -\left(\begin{array}{c} -n\\ -k\end{array}\right)=\frac{n!}{\left(n-k\right)!k!}.\] - -\end_inset - - If -\begin_inset Formula $n<0$ -\end_inset - - or -\begin_inset Formula $k<0$ -\end_inset - - or -\begin_inset Formula $k>n$ -\end_inset - - we define -\begin_inset Formula $\left(\begin{array}{c} -n\\ -k\end{array}\right)=0$ -\end_inset - - -\layout Section - -Bernoulli -\layout Standard - -A Bernoulli random variable of parameter -\begin_inset Formula $p$ -\end_inset - - takes one of only two values -\begin_inset Formula $X=0$ -\end_inset - - or -\begin_inset Formula $X=1$ -\end_inset - -. - The probability of success ( -\begin_inset Formula $X=1$ -\end_inset - -) is -\begin_inset Formula $p$ -\end_inset - -, and the probability of failure ( -\begin_inset Formula $X=0$ -\end_inset - -) is -\begin_inset Formula $1-p.$ -\end_inset - - It can be thought of as a binomial random variable with -\begin_inset Formula $n=1$ -\end_inset - -. - The PMF is -\begin_inset Formula $p\left(k\right)=0$ -\end_inset - - for -\begin_inset Formula $k\neq0,1$ -\end_inset - - and -\begin_inset Formula \begin{eqnarray*} -p\left(k;p\right) & = & \begin{cases} -1-p & k=0\\ -p & k=1\end{cases}\\ -F\left(x;p\right) & = & \begin{cases} -0 & x<0\\ -1-p & 0\le x<1\\ -1 & 1\leq x\end{cases}\\ -G\left(q;p\right) & = & \begin{cases} -0 & 0\leq q<1-p\\ -1 & 1-p\leq q\leq1\end{cases}\\ -\mu & = & p\\ -\mu_{2} & = & p\left(1-p\right)\\ -\gamma_{3} & = & \frac{1-2p}{\sqrt{p\left(1-p\right)}}\\ -\gamma_{4} & = & \frac{1-6p\left(1-p\right)}{p\left(1-p\right)}\end{eqnarray*} - -\end_inset - - -\layout Standard - - -\begin_inset Formula \[ -M\left(t\right)=1-p\left(1-e^{t}\right)\] - -\end_inset - - -\layout Standard - - -\begin_inset Formula \[ -\mu_{m}^{\prime}=p\] - -\end_inset - - -\layout Standard - - -\begin_inset Formula \[ -h\left[X\right]=p\log p+\left(1-p\right)\log\left(1-p\right)\] - -\end_inset - - -\layout Section - -Binomial -\layout Standard - -A binomial random variable with parameters -\begin_inset Formula $\left(n,p\right)$ -\end_inset - - can be described as the sum of -\begin_inset Formula $n$ -\end_inset - - independent Bernoulli random variables of parameter -\begin_inset Formula $p;$ -\end_inset - - -\begin_inset Formula \[ -Y=\sum_{i=1}^{n}X_{i}.\] - -\end_inset - - Therefore, this random variable counts the number of successes in -\begin_inset Formula $n$ -\end_inset - - independent trials of a random experiment where the probability of success - is -\begin_inset Formula $p.$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -p\left(k;n,p\right) & = & \left(\begin{array}{c} -n\\ -k\end{array}\right)p^{k}\left(1-p\right)^{n-k}\,\, k\in\left\{ 0,1,\ldots n\right\} ,\\ -F\left(x;n,p\right) & = & \sum_{k\leq x}\left(\begin{array}{c} -n\\ -k\end{array}\right)p^{k}\left(1-p\right)^{n-k}=I_{1-p}\left(n-\left\lfloor x\right\rfloor ,\left\lfloor x\right\rfloor +1\right)\quad x\geq0\end{eqnarray*} - -\end_inset - - where the incomplete beta integral is -\begin_inset Formula \[ -I_{x}\left(a,b\right)=\frac{\Gamma\left(a+b\right)}{\Gamma\left(a\right)\Gamma\left(b\right)}\int_{0}^{x}t^{a-1}\left(1-t\right)^{b-1}dt.\] - -\end_inset - - Now -\begin_inset Formula \begin{eqnarray*} -\mu & = & np\\ -\mu_{2} & = & np\left(1-p\right)\\ -\gamma_{1} & = & \frac{1-2p}{\sqrt{np\left(1-p\right)}}\\ -\gamma_{2} & = & \frac{1-6p\left(1-p\right)}{np\left(1-p\right)}.\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\left[1-p\left(1-e^{t}\right)\right]^{n}\] - -\end_inset - - -\layout Section - -Boltzmann (truncated Planck) -\layout Standard - - -\begin_inset Formula \begin{eqnarray*} -p\left(k;N,\lambda\right) & = & \frac{1-e^{-\lambda}}{1-e^{-\lambda N}}\exp\left(-\lambda k\right)\quad k\in\left\{ 0,1,\ldots,N-1\right\} \\ -F\left(x;N,\lambda\right) & = & \left\{ \begin{array}{cc} -0 & x<0\\ -\frac{1-\exp\left[-\lambda\left(\left\lfloor x\right\rfloor +1\right)\right]}{1-\exp\left(-\lambda N\right)} & 0\leq x\leq N-1\\ -1 & x\geq N-1\end{array}\right.\\ -G\left(q,\lambda\right) & = & \left\lceil -\frac{1}{\lambda}\log\left[1-q\left(1-e^{-\lambda N}\right)\right]-1\right\rceil \end{eqnarray*} - -\end_inset - - Define -\begin_inset Formula $z=e^{-\lambda}$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{z}{1-z}-\frac{Nz^{N}}{1-z^{N}}\\ -\mu_{2} & = & \frac{z}{\left(1-z\right)^{2}}-\frac{N^{2}z^{N}}{\left(1-z^{N}\right)^{2}}\\ -\gamma_{1} & = & \frac{z\left(1+z\right)\left(\frac{1-z^{N}}{1-z}\right)^{3}-N^{3}z^{N}\left(1+z^{N}\right)}{\left[z\left(\frac{1-z^{N}}{1-z}\right)^{2}-N^{2}z^{N}\right]^{3/2}}\\ -\gamma_{2} & = & \frac{z\left(1+4z+z^{2}\right)\left(\frac{1-z^{N}}{1-z}\right)^{4}-N^{4}z^{N}\left(1+4z^{N}+z^{2N}\right)}{\left[z\left(\frac{1-z^{N}}{1-z}\right)^{2}-N^{2}z^{N}\right]^{2}}\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \[ -M\left(t\right)=\frac{1-e^{N\left(t-\lambda\right)}}{1-e^{t-\lambda}}\frac{1-e^{-\lambda}}{1-e^{-\lambda N}}\] - -\end_inset - - -\layout Section - -Planck (discrete exponential) -\layout Standard - -Named Planck because of its relationship to the black-body problem he solved. - -\layout Standard - - -\begin_inset Formula \begin{eqnarray*} -p\left(k;\lambda\right) & = & \left(1-e^{-\lambda}\right)e^{-\lambda k}\quad k\lambda\geq0\\ -F\left(x;\lambda\right) & = & 1-e^{-\lambda\left(\left\lfloor x\right\rfloor +1\right)}\quad x\lambda\geq0\\ -G\left(q;\lambda\right) & = & \left\lceil -\frac{1}{\lambda}\log\left[1-q\right]-1\right\rceil .\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \frac{1}{e^{\lambda}-1}\\ -\mu_{2} & = & \frac{e^{-\lambda}}{\left(1-e^{-\lambda}\right)^{2}}\\ -\gamma_{1} & = & 2\cosh\left(\frac{\lambda}{2}\right)\\ -\gamma_{2} & = & 4+2\cosh\left(\lambda\right)\end{eqnarray*} - -\end_inset - - -\layout Standard - - -\begin_inset Formula \[ -M\left(t\right)=\frac{1-e^{-\lambda}}{1-e^{t-\lambda}}\] - -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=\frac{\lambda e^{-\lambda}}{1-e^{-\lambda}}-\log\left(1-e^{-\lambda}\right)\] - -\end_inset - - -\layout Section - -Poisson -\layout Standard - -The Poisson random variable counts the number of successes in -\begin_inset Formula $n$ -\end_inset - - independent Bernoulli trials in the limit as -\begin_inset Formula $n\rightarrow\infty$ -\end_inset - - and -\begin_inset Formula $p\rightarrow0$ -\end_inset - - where the probability of success in each trial is -\begin_inset Formula $p$ -\end_inset - - and -\begin_inset Formula $np=\lambda\geq0$ -\end_inset - - is a constant. - It can be used to approximate the Binomial random variable or in it's own - right to count the number of events that occur in the interval -\begin_inset Formula $\left[0,t\right]$ -\end_inset - - for a process satisfying certain -\begin_inset Quotes eld -\end_inset - -sparsity -\begin_inset Quotes erd -\end_inset - - constraints. - The functions are -\begin_inset Formula \begin{eqnarray*} -p\left(k;\lambda\right) & = & e^{-\lambda}\frac{\lambda^{k}}{k!}\quad k\geq0,\\ -F\left(x;\lambda\right) & = & \sum_{n=0}^{\left\lfloor x\right\rfloor }e^{-\lambda}\frac{\lambda^{n}}{n!}=\frac{1}{\Gamma\left(\left\lfloor x\right\rfloor +1\right)}\int_{\lambda}^{\infty}t^{\left\lfloor x\right\rfloor }e^{-t}dt,\\ -\mu & = & \lambda\\ -\mu_{2} & = & \lambda\\ -\gamma_{1} & = & \frac{1}{\sqrt{\lambda}}\\ -\gamma_{2} & = & \frac{1}{\lambda}.\end{eqnarray*} - -\end_inset - - -\layout Standard - - -\begin_inset Formula \[ -M\left(t\right)=\exp\left[\lambda\left(e^{t}-1\right)\right].\] - -\end_inset - - -\layout Section - -Geometric -\layout Standard - -The geometric random variable with parameter -\begin_inset Formula $p\in\left(0,1\right)$ -\end_inset - - can be defined as the number of trials required to obtain a success where - the probability of success on each trial is -\begin_inset Formula $p$ -\end_inset - -. - Thus, -\begin_inset Formula \begin{eqnarray*} -p\left(k;p\right) & = & \left(1-p\right)^{k-1}p\quad k\geq1\\ -F\left(x;p\right) & = & 1-\left(1-p\right)^{\left\lfloor x\right\rfloor }\quad x\geq1\\ -G\left(q;p\right) & = & \left\lceil \frac{\log\left(1-q\right)}{\log\left(1-p\right)}\right\rceil \\ -\mu & = & \frac{1}{p}\\ -\mu_{2} & = & \frac{1-p}{p^{2}}\\ -\gamma_{1} & = & \frac{2-p}{\sqrt{1-p}}\\ -\gamma_{2} & = & \frac{p^{2}-6p+6}{1-p}.\end{eqnarray*} - -\end_inset - - -\layout Standard - - -\begin_inset Formula \begin{eqnarray*} -M\left(t\right) & = & \frac{p}{e^{-t}-\left(1-p\right)}\end{eqnarray*} - -\end_inset - - -\layout Section - -Negative Binomial -\layout Standard - -The negative binomial random variable with parameters -\begin_inset Formula $n$ -\end_inset - - and -\begin_inset Formula $p\in\left(0,1\right)$ -\end_inset - - can be defined as the number of -\emph on -extra -\emph default -independent trials (beyond -\begin_inset Formula $n$ -\end_inset - -) required to accumulate a total of -\begin_inset Formula $n$ -\end_inset - - successes where the probability of a success on each trial is -\begin_inset Formula $p.$ -\end_inset - - Equivalently, this random variable is the number of failures encoutered - while accumulating -\begin_inset Formula $n$ -\end_inset - - successes during independent trials of an experiment that succeeds with - probability -\begin_inset Formula $p.$ -\end_inset - - Thus, -\begin_inset Formula \begin{eqnarray*} -p\left(k;n,p\right) & = & \left(\begin{array}{c} -k+n-1\\ -n-1\end{array}\right)p^{n}\left(1-p\right)^{k}\quad k\geq0\\ -F\left(x;n,p\right) & = & \sum_{i=0}^{\left\lfloor x\right\rfloor }\left(\begin{array}{c} -i+n-1\\ -i\end{array}\right)p^{n}\left(1-p\right)^{i}\quad x\geq0\\ - & = & I_{p}\left(n,\left\lfloor x\right\rfloor +1\right)\quad x\geq0\\ -\mu & = & n\frac{1-p}{p}\\ -\mu_{2} & = & n\frac{1-p}{p^{2}}\\ -\gamma_{1} & = & \frac{2-p}{\sqrt{n\left(1-p\right)}}\\ -\gamma_{2} & = & \frac{p^{2}+6\left(1-p\right)}{n\left(1-p\right)}.\end{eqnarray*} - -\end_inset - - Recall that -\begin_inset Formula $I_{p}\left(a,b\right)$ -\end_inset - - is the incomplete beta integral. - -\layout Section - -Hypergeometric -\layout Standard - -The hypergeometric random variable with parameters -\begin_inset Formula $\left(M,n,N\right)$ -\end_inset - - counts the number of -\begin_inset Quotes eld -\end_inset - -good -\begin_inset Quotes erd -\end_inset - - objects in a sample of size -\begin_inset Formula $N$ -\end_inset - - chosen without replacement from a population of -\begin_inset Formula $M$ -\end_inset - - objects where -\begin_inset Formula $n$ -\end_inset - - is the number of -\begin_inset Quotes eld -\end_inset - -good -\begin_inset Quotes erd -\end_inset - - objects in the total population. - -\begin_inset Formula \begin{eqnarray*} -p\left(k;N,n,M\right) & = & \frac{\left(\begin{array}{c} -n\\ -k\end{array}\right)\left(\begin{array}{c} -M-n\\ -N-k\end{array}\right)}{\left(\begin{array}{c} -M\\ -N\end{array}\right)}\quad N-\left(M-n\right)\leq k\leq\min\left(n,N\right)\\ -F\left(x;N,n,M\right) & = & \sum_{k=0}^{\left\lfloor x\right\rfloor }\frac{\left(\begin{array}{c} -m\\ -k\end{array}\right)\left(\begin{array}{c} -N-m\\ -n-k\end{array}\right)}{\left(\begin{array}{c} -N\\ -n\end{array}\right)},\\ -\mu & = & \frac{nN}{M}\\ -\mu_{2} & = & \frac{nN\left(M-n\right)\left(M-N\right)}{M^{2}\left(M-1\right)}\\ -\gamma_{1} & = & \frac{\left(M-2n\right)\left(M-2N\right)}{M-2}\sqrt{\frac{M-1}{nN\left(M-m\right)\left(M-n\right)}}\\ -\gamma_{2} & = & \frac{g\left(N,n,M\right)}{nN\left(M-n\right)\left(M-3\right)\left(M-2\right)\left(N-M\right)}\end{eqnarray*} - -\end_inset - - where (defining -\begin_inset Formula $m=M-n$ -\end_inset - -) -\begin_inset Formula \begin{eqnarray*} -g\left(N,n,M\right) & = & m^{3}-m^{5}+3m^{2}n-6m^{3}n+m^{4}n+3mn^{2}\\ - & & -12m^{2}n^{2}+8m^{3}n^{2}+n^{3}-6mn^{3}+8m^{2}n^{3}\\ - & & +mn^{4}-n^{5}-6m^{3}N+6m^{4}N+18m^{2}nN\\ - & & -6m^{3}nN+18mn^{2}N-24m^{2}n^{2}N-6n^{3}N\\ - & & -6mn^{3}N+6n^{4}N+6m^{2}N^{2}-6m^{3}N^{2}-24mnN^{2}\\ - & & +12m^{2}nN^{2}+6n^{2}N^{2}+12mn^{2}N^{2}-6n^{3}N^{2}.\end{eqnarray*} - -\end_inset - - -\layout Section - -Zipf (Zeta) -\layout Standard - -A random variable has the zeta distribution (also called the zipf distribution) - with parameter -\begin_inset Formula $\alpha>1$ -\end_inset - - if it's probability mass function is given by -\begin_inset Formula \begin{eqnarray*} -p\left(k;\alpha\right) & = & \frac{1}{\zeta\left(\alpha\right)k^{\alpha}}\quad k\geq1\end{eqnarray*} - -\end_inset - -where -\begin_inset Formula \[ -\zeta\left(\alpha\right)=\sum_{n=1}^{\infty}\frac{1}{n^{\alpha}}\] - -\end_inset - - is the Riemann zeta function. - Other functions of this distribution are -\begin_inset Formula \begin{eqnarray*} -F\left(x;\alpha\right) & = & \frac{1}{\zeta\left(\alpha\right)}\sum_{k=1}^{\left\lfloor x\right\rfloor }\frac{1}{k^{\alpha}}\\ -\mu & = & \frac{\zeta_{1}}{\zeta_{0}}\quad\alpha>2\\ -\mu_{2} & = & \frac{\zeta_{2}\zeta_{0}-\zeta_{1}^{2}}{\zeta_{0}^{2}}\quad\alpha>3\\ -\gamma_{1} & = & \frac{\zeta_{3}\zeta_{0}^{2}-3\zeta_{0}\zeta_{1}\zeta_{2}+2\zeta_{1}^{3}}{\left[\zeta_{2}\zeta_{0}-\zeta_{1}^{2}\right]^{3/2}}\quad\alpha>4\\ -\gamma_{2} & = & \frac{\zeta_{4}\zeta_{0}^{3}-4\zeta_{3}\zeta_{1}\zeta_{0}^{2}+12\zeta_{2}\zeta_{1}^{2}\zeta_{0}-6\zeta_{1}^{4}-3\zeta_{2}^{2}\zeta_{0}^{2}}{\left(\zeta_{2}\zeta_{0}-\zeta_{1}^{2}\right)^{2}}.\end{eqnarray*} - -\end_inset - - -\layout Standard - - -\begin_inset Formula \begin{eqnarray*} -M\left(t\right) & = & \frac{\textrm{Li}_{\alpha}\left(e^{t}\right)}{\zeta\left(\alpha\right)}\end{eqnarray*} - -\end_inset - -where -\begin_inset Formula $\zeta_{i}=\zeta\left(\alpha-i\right)$ -\end_inset - - and -\begin_inset Formula $\textrm{Li}_{n}\left(z\right)$ -\end_inset - - is the -\begin_inset Formula $n^{\textrm{th}}$ -\end_inset - - polylogarithm function of -\begin_inset Formula $z$ -\end_inset - - defined as -\begin_inset Formula \[ -\textrm{Li}_{n}\left(z\right)\equiv\sum_{k=1}^{\infty}\frac{z^{k}}{k^{n}}\] - -\end_inset - - -\begin_inset Formula \[ -\mu_{n}^{\prime}=\left.M^{\left(n\right)}\left(t\right)\right|_{t=0}=\left.\frac{\textrm{Li}_{\alpha-n}\left(e^{t}\right)}{\zeta\left(a\right)}\right|_{t=0}=\frac{\zeta\left(\alpha-n\right)}{\zeta\left(\alpha\right)}\] - -\end_inset - - -\layout Section - -Logarithmic (Log-Series, Series) -\layout Standard - -The logarimthic distribution with parameter -\begin_inset Formula $p$ -\end_inset - - has a probability mass function with terms proportional to the Taylor series - expansion of -\begin_inset Formula $\log\left(1-p\right)$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -p\left(k;p\right) & = & -\frac{p^{k}}{k\log\left(1-p\right)}\quad k\geq1\\ -F\left(x;p\right) & = & -\frac{1}{\log\left(1-p\right)}\sum_{k=1}^{\left\lfloor x\right\rfloor }\frac{p^{k}}{k}=1+\frac{p^{1+\left\lfloor x\right\rfloor }\Phi\left(p,1,1+\left\lfloor x\right\rfloor \right)}{\log\left(1-p\right)}\end{eqnarray*} - -\end_inset - -where -\begin_inset Formula \[ -\Phi\left(z,s,a\right)=\sum_{k=0}^{\infty}\frac{z^{k}}{\left(a+k\right)^{s}}\] - -\end_inset - - is the Lerch Transcendent. - Also define -\begin_inset Formula $r=\log\left(1-p\right)$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & -\frac{p}{\left(1-p\right)r}\\ -\mu_{2} & = & -\frac{p\left[p+r\right]}{\left(1-p\right)^{2}r^{2}}\\ -\gamma_{1} & = & -\frac{2p^{2}+3pr+\left(1+p\right)r^{2}}{r\left(p+r\right)\sqrt{-p\left(p+r\right)}}r\\ -\gamma_{2} & = & -\frac{6p^{3}+12p^{2}r+p\left(4p+7\right)r^{2}+\left(p^{2}+4p+1\right)r^{3}}{p\left(p+r\right)^{2}}.\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -M\left(t\right) & = & -\frac{1}{\log\left(1-p\right)}\sum_{k=1}^{\infty}\frac{e^{tk}p^{k}}{k}\\ - & = & \frac{\log\left(1-pe^{t}\right)}{\log\left(1-p\right)}\end{eqnarray*} - -\end_inset - - Thus, -\begin_inset Formula \[ -\mu_{n}^{\prime}=\left.M^{\left(n\right)}\left(t\right)\right|_{t=0}=\left.\frac{\textrm{Li}_{1-n}\left(pe^{t}\right)}{\log\left(1-p\right)}\right|_{t=0}=-\frac{\textrm{Li}_{1-n}\left(p\right)}{\log\left(1-p\right)}.\] - -\end_inset - - -\layout Section - -Discrete Uniform (randint) -\layout Standard - -The discrete uniform distribution with parameters -\begin_inset Formula $\left(a,b\right)$ -\end_inset - - constructs a random variable that has an equal probability of being any - one of the integers in the half-open range -\begin_inset Formula $[a,b).$ -\end_inset - - If -\begin_inset Formula $a$ -\end_inset - - is not given it is assumed to be zero and the only parameter is -\begin_inset Formula $b.$ -\end_inset - - Therefore, -\begin_inset Formula \begin{eqnarray*} -p\left(k;a,b\right) & = & \frac{1}{b-a}\quad a\leq k0$ -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -p\left(k\right) & = & \tanh\left(\frac{a}{2}\right)e^{-a\left|k\right|},\\ -F\left(x\right) & = & \left\{ \begin{array}{cc} -\frac{e^{a\left(\left\lfloor x\right\rfloor +1\right)}}{e^{a}+1} & \left\lfloor x\right\rfloor <0,\\ -1-\frac{e^{-a\left\lfloor x\right\rfloor }}{e^{a}+1} & \left\lfloor x\right\rfloor \geq0.\end{array}\right.\\ -G\left(q\right) & = & \left\{ \begin{array}{cc} -\left\lceil \frac{1}{a}\log\left[q\left(e^{a}+1\right)\right]-1\right\rceil & q<\frac{1}{1+e^{-a}},\\ -\left\lceil -\frac{1}{a}\log\left[\left(1-q\right)\left(1+e^{a}\right)\right]\right\rceil & q\geq\frac{1}{1+e^{-a}}.\end{array}\right.\end{eqnarray*} - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -M\left(t\right) & = & \tanh\left(\frac{a}{2}\right)\sum_{k=-\infty}^{\infty}e^{tk}e^{-a\left|k\right|}\\ - & = & C\left(1+\sum_{k=1}^{\infty}e^{-\left(t+a\right)k}+\sum_{1}^{\infty}e^{\left(t-a\right)k}\right)\\ - & = & \tanh\left(\frac{a}{2}\right)\left(1+\frac{e^{-\left(t+a\right)}}{1-e^{-\left(t+a\right)}}+\frac{e^{t-a}}{1-e^{t-a}}\right)\\ - & = & \frac{\tanh\left(\frac{a}{2}\right)\sinh a}{\cosh a-\cosh t}.\end{eqnarray*} - -\end_inset - - Thus, -\begin_inset Formula \[ -\mu_{n}^{\prime}=M^{\left(n\right)}\left(0\right)=\left[1+\left(-1\right)^{n}\right]\textrm{Li}_{-n}\left(e^{-a}\right)\] - -\end_inset - - where -\begin_inset Formula $\textrm{Li}_{-n}\left(z\right)$ -\end_inset - - is the polylogarithm function of order -\begin_inset Formula $-n$ -\end_inset - - evaluated at -\begin_inset Formula $z.$ -\end_inset - - -\begin_inset Formula \[ -h\left[X\right]=-\log\left(\tanh\left(\frac{a}{2}\right)\right)+\frac{a}{\sinh a}\] - -\end_inset - - -\layout Section - -Discrete Gaussian* -\layout Standard - -Defined for all -\begin_inset Formula $\mu$ -\end_inset - - and -\begin_inset Formula $\lambda>0$ -\end_inset - - and -\begin_inset Formula $k$ -\end_inset - - -\begin_inset Formula \[ -p\left(k;\mu,\lambda\right)=\frac{1}{Z\left(\lambda\right)}\exp\left[-\lambda\left(k-\mu\right)^{2}\right]\] - -\end_inset - - where -\begin_inset Formula \[ -Z\left(\lambda\right)=\sum_{k=-\infty}^{\infty}\exp\left[-\lambda k^{2}\right]\] - -\end_inset - - -\begin_inset Formula \begin{eqnarray*} -\mu & = & \mu\\ -\mu_{2} & = & -\frac{\partial}{\partial\lambda}\log Z\left(\lambda\right)\\ - & = & G\left(\lambda\right)e^{-\lambda}\end{eqnarray*} - -\end_inset - - where -\begin_inset Formula $G\left(0\right)\rightarrow\infty$ -\end_inset - - and -\begin_inset Formula $G\left(\infty\right)\rightarrow2$ -\end_inset - - with a minimum less than 2 near -\begin_inset Formula $\lambda=1$ -\end_inset - - -\begin_inset Formula \[ -G\left(\lambda\right)=\frac{1}{Z\left(\lambda\right)}\sum_{k=-\infty}^{\infty}k^{2}\exp\left[-\lambda\left(k+1\right)\left(k-1\right)\right]\] - -\end_inset - - -\the_end diff --git a/scipy-0.10.1/doc/source/tutorial/stats/discrete.rst b/scipy-0.10.1/doc/source/tutorial/stats/discrete.rst deleted file mode 100644 index 1c7f3db8a7..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/stats/discrete.rst +++ /dev/null @@ -1,690 +0,0 @@ -.. _discrete-random-variables: - - -================================== -Discrete Statistical Distributions -================================== - -Discrete random variables take on only a countable number of values. -The commonly used distributions are included in SciPy and described in -this document. Each discrete distribution can take one extra integer -parameter: :math:`L.` The relationship between the general distribution -:math:`p` and the standard distribution :math:`p_{0}` is - -.. math:: - :nowrap: - - \[ p\left(x\right)=p_{0}\left(x-L\right)\] - -which allows for shifting of the input. When a distribution generator -is initialized, the discrete distribution can either specify the -beginning and ending (integer) values :math:`a` and :math:`b` which must be such that - -.. math:: - :nowrap: - - \[ p_{0}\left(x\right)=0\quad xb\] - -in which case, it is assumed that the pdf function is specified on the -integers :math:`a+mk\leq b` where :math:`k` is a non-negative integer ( :math:`0,1,2,\ldots` ) and :math:`m` is a positive integer multiplier. Alternatively, the two lists :math:`x_{k}` and :math:`p\left(x_{k}\right)` can be provided directly in which case a dictionary is set up -internally to evaulate probabilities and generate random variates. - - -Probability Mass Function (PMF) -------------------------------- - -The probability mass function of a random variable X is defined as the -probability that the random variable takes on a particular value. - -.. math:: - :nowrap: - - \[ p\left(x_{k}\right)=P\left[X=x_{k}\right]\] - -This is also sometimes called the probability density function, -although technically - -.. math:: - :nowrap: - - \[ f\left(x\right)=\sum_{k}p\left(x_{k}\right)\delta\left(x-x_{k}\right)\] - -is the probability density function for a discrete distribution [#]_ . - - - -.. [#] - XXX: Unknown layout Plain Layout: Note that we will be using :math:`p` to represent the probability mass function and a parameter (a - XXX: probability). The usage should be obvious from context. - - - -Cumulative Distribution Function (CDF) --------------------------------------- - -The cumulative distribution function is - -.. math:: - :nowrap: - - \[ F\left(x\right)=P\left[X\leq x\right]=\sum_{x_{k}\leq x}p\left(x_{k}\right)\] - -and is also useful to be able to compute. Note that - -.. math:: - :nowrap: - - \[ F\left(x_{k}\right)-F\left(x_{k-1}\right)=p\left(x_{k}\right)\] - - - - -Survival Function ------------------ - -The survival function is just - -.. math:: - :nowrap: - - \[ S\left(x\right)=1-F\left(x\right)=P\left[X>k\right]\] - -the probability that the random variable is strictly larger than :math:`k` . - - -Percent Point Function (Inverse CDF) ------------------------------------- - -The percent point function is the inverse of the cumulative -distribution function and is - -.. math:: - :nowrap: - - \[ G\left(q\right)=F^{-1}\left(q\right)\] - -for discrete distributions, this must be modified for cases where -there is no :math:`x_{k}` such that :math:`F\left(x_{k}\right)=q.` In these cases we choose :math:`G\left(q\right)` to be the smallest value :math:`x_{k}=G\left(q\right)` for which :math:`F\left(x_{k}\right)\geq q` . If :math:`q=0` then we define :math:`G\left(0\right)=a-1` . This definition allows random variates to be defined in the same way -as with continuous rv's using the inverse cdf on a uniform -distribution to generate random variates. - - -Inverse survival function -------------------------- - -The inverse survival function is the inverse of the survival function - -.. math:: - :nowrap: - - \[ Z\left(\alpha\right)=S^{-1}\left(\alpha\right)=G\left(1-\alpha\right)\] - -and is thus the smallest non-negative integer :math:`k` for which :math:`F\left(k\right)\geq1-\alpha` or the smallest non-negative integer :math:`k` for which :math:`S\left(k\right)\leq\alpha.` - - -Hazard functions ----------------- - -If desired, the hazard function and the cumulative hazard function -could be defined as - -.. math:: - :nowrap: - - \[ h\left(x_{k}\right)=\frac{p\left(x_{k}\right)}{1-F\left(x_{k}\right)}\] - -and - -.. math:: - :nowrap: - - \[ H\left(x\right)=\sum_{x_{k}\leq x}h\left(x_{k}\right)=\sum_{x_{k}\leq x}\frac{F\left(x_{k}\right)-F\left(x_{k-1}\right)}{1-F\left(x_{k}\right)}.\] - - - - -Moments -------- - -Non-central moments are defined using the PDF - -.. math:: - :nowrap: - - \[ \mu_{m}^{\prime}=E\left[X^{m}\right]=\sum_{k}x_{k}^{m}p\left(x_{k}\right).\] - -Central moments are computed similarly :math:`\mu=\mu_{1}^{\prime}` - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu_{m}=E\left[\left(X-\mu\right)^{m}\right] & = & \sum_{k}\left(x_{k}-\mu\right)^{m}p\left(x_{k}\right)\\ & = & \sum_{k=0}^{m}\left(-1\right)^{m-k}\left(\begin{array}{c} m\\ k\end{array}\right)\mu^{m-k}\mu_{k}^{\prime}\end{eqnarray*} - -The mean is the first moment - -.. math:: - :nowrap: - - \[ \mu=\mu_{1}^{\prime}=E\left[X\right]=\sum_{k}x_{k}p\left(x_{k}\right)\] - -the variance is the second central moment - -.. math:: - :nowrap: - - \[ \mu_{2}=E\left[\left(X-\mu\right)^{2}\right]=\sum_{x_{k}}x_{k}^{2}p\left(x_{k}\right)-\mu^{2}.\] - -Skewness is defined as - -.. math:: - :nowrap: - - \[ \gamma_{1}=\frac{\mu_{3}}{\mu_{2}^{3/2}}\] - -while (Fisher) kurtosis is - -.. math:: - :nowrap: - - \[ \gamma_{2}=\frac{\mu_{4}}{\mu_{2}^{2}}-3,\] - -so that a normal distribution has a kurtosis of zero. - - -Moment generating function --------------------------- - -The moment generating funtion is defined as - -.. math:: - :nowrap: - - \[ M_{X}\left(t\right)=E\left[e^{Xt}\right]=\sum_{x_{k}}e^{x_{k}t}p\left(x_{k}\right)\] - -Moments are found as the derivatives of the moment generating function -evaluated at :math:`0.` - - -Fitting data ------------- - -To fit data to a distribution, maximizing the likelihood function is -common. Alternatively, some distributions have well-known minimum -variance unbiased estimators. These will be chosen by default, but the -likelihood function will always be available for minimizing. - -If :math:`f_{i}\left(k;\boldsymbol{\theta}\right)` is the PDF of a random-variable where :math:`\boldsymbol{\theta}` is a vector of parameters ( *e.g.* :math:`L` and :math:`S` ), then for a collection of :math:`N` independent samples from this distribution, the joint distribution the -random vector :math:`\mathbf{k}` is - -.. math:: - :nowrap: - - \[ f\left(\mathbf{k};\boldsymbol{\theta}\right)=\prod_{i=1}^{N}f_{i}\left(k_{i};\boldsymbol{\theta}\right).\] - -The maximum likelihood estimate of the parameters :math:`\boldsymbol{\theta}` are the parameters which maximize this function with :math:`\mathbf{x}` fixed and given by the data: - -.. math:: - :nowrap: - - \begin{eqnarray*} \hat{\boldsymbol{\theta}} & = & \arg\max_{\boldsymbol{\theta}}f\left(\mathbf{k};\boldsymbol{\theta}\right)\\ & = & \arg\min_{\boldsymbol{\theta}}l_{\mathbf{k}}\left(\boldsymbol{\theta}\right).\end{eqnarray*} - -Where - -.. math:: - :nowrap: - - \begin{eqnarray*} l_{\mathbf{k}}\left(\boldsymbol{\theta}\right) & = & -\sum_{i=1}^{N}\log f\left(k_{i};\boldsymbol{\theta}\right)\\ & = & -N\overline{\log f\left(k_{i};\boldsymbol{\theta}\right)}\end{eqnarray*} - - - - -Standard notation for mean --------------------------- - -We will use - -.. math:: - :nowrap: - - \[ \overline{y\left(\mathbf{x}\right)}=\frac{1}{N}\sum_{i=1}^{N}y\left(x_{i}\right)\] - -where :math:`N` should be clear from context. - - -Combinations ------------- - -Note that - -.. math:: - :nowrap: - - \[ k!=k\cdot\left(k-1\right)\cdot\left(k-2\right)\cdot\cdots\cdot1=\Gamma\left(k+1\right)\] - -and has special cases of - -.. math:: - :nowrap: - - \begin{eqnarray*} 0! & \equiv & 1\\ k! & \equiv & 0\quad k<0\end{eqnarray*} - -and - -.. math:: - :nowrap: - - \[ \left(\begin{array}{c} n\\ k\end{array}\right)=\frac{n!}{\left(n-k\right)!k!}.\] - -If :math:`n<0` or :math:`k<0` or :math:`k>n` we define :math:`\left(\begin{array}{c} n\\ k\end{array}\right)=0` - - -Bernoulli -========= - -A Bernoulli random variable of parameter :math:`p` takes one of only two values :math:`X=0` or :math:`X=1` . The probability of success ( :math:`X=1` ) is :math:`p` , and the probability of failure ( :math:`X=0` ) is :math:`1-p.` It can be thought of as a binomial random variable with :math:`n=1` . The PMF is :math:`p\left(k\right)=0` for :math:`k\neq0,1` and - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;p\right) & = & \begin{cases} 1-p & k=0\\ p & k=1\end{cases}\\ F\left(x;p\right) & = & \begin{cases} 0 & x<0\\ 1-p & 0\le x<1\\ 1 & 1\leq x\end{cases}\\ G\left(q;p\right) & = & \begin{cases} 0 & 0\leq q<1-p\\ 1 & 1-p\leq q\leq1\end{cases}\\ \mu & = & p\\ \mu_{2} & = & p\left(1-p\right)\\ \gamma_{3} & = & \frac{1-2p}{\sqrt{p\left(1-p\right)}}\\ \gamma_{4} & = & \frac{1-6p\left(1-p\right)}{p\left(1-p\right)}\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=1-p\left(1-e^{t}\right)\] - - - - - -.. math:: - :nowrap: - - \[ \mu_{m}^{\prime}=p\] - - - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=p\log p+\left(1-p\right)\log\left(1-p\right)\] - - - - -Binomial -======== - -A binomial random variable with parameters :math:`\left(n,p\right)` can be described as the sum of :math:`n` independent Bernoulli random variables of parameter :math:`p;` - -.. math:: - :nowrap: - - \[ Y=\sum_{i=1}^{n}X_{i}.\] - -Therefore, this random variable counts the number of successes in :math:`n` independent trials of a random experiment where the probability of -success is :math:`p.` - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;n,p\right) & = & \left(\begin{array}{c} n\\ k\end{array}\right)p^{k}\left(1-p\right)^{n-k}\,\, k\in\left\{ 0,1,\ldots n\right\} ,\\ F\left(x;n,p\right) & = & \sum_{k\leq x}\left(\begin{array}{c} n\\ k\end{array}\right)p^{k}\left(1-p\right)^{n-k}=I_{1-p}\left(n-\left\lfloor x\right\rfloor ,\left\lfloor x\right\rfloor +1\right)\quad x\geq0\end{eqnarray*} - -where the incomplete beta integral is - -.. math:: - :nowrap: - - \[ I_{x}\left(a,b\right)=\frac{\Gamma\left(a+b\right)}{\Gamma\left(a\right)\Gamma\left(b\right)}\int_{0}^{x}t^{a-1}\left(1-t\right)^{b-1}dt.\] - -Now - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & np\\ \mu_{2} & = & np\left(1-p\right)\\ \gamma_{1} & = & \frac{1-2p}{\sqrt{np\left(1-p\right)}}\\ \gamma_{2} & = & \frac{1-6p\left(1-p\right)}{np\left(1-p\right)}.\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\left[1-p\left(1-e^{t}\right)\right]^{n}\] - - - - -Boltzmann (truncated Planck) -============================ - - - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;N,\lambda\right) & = & \frac{1-e^{-\lambda}}{1-e^{-\lambda N}}\exp\left(-\lambda k\right)\quad k\in\left\{ 0,1,\ldots,N-1\right\} \\ F\left(x;N,\lambda\right) & = & \left\{ \begin{array}{cc} 0 & x<0\\ \frac{1-\exp\left[-\lambda\left(\left\lfloor x\right\rfloor +1\right)\right]}{1-\exp\left(-\lambda N\right)} & 0\leq x\leq N-1\\ 1 & x\geq N-1\end{array}\right.\\ G\left(q,\lambda\right) & = & \left\lceil -\frac{1}{\lambda}\log\left[1-q\left(1-e^{-\lambda N}\right)\right]-1\right\rceil \end{eqnarray*} - -Define :math:`z=e^{-\lambda}` - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{z}{1-z}-\frac{Nz^{N}}{1-z^{N}}\\ \mu_{2} & = & \frac{z}{\left(1-z\right)^{2}}-\frac{N^{2}z^{N}}{\left(1-z^{N}\right)^{2}}\\ \gamma_{1} & = & \frac{z\left(1+z\right)\left(\frac{1-z^{N}}{1-z}\right)^{3}-N^{3}z^{N}\left(1+z^{N}\right)}{\left[z\left(\frac{1-z^{N}}{1-z}\right)^{2}-N^{2}z^{N}\right]^{3/2}}\\ \gamma_{2} & = & \frac{z\left(1+4z+z^{2}\right)\left(\frac{1-z^{N}}{1-z}\right)^{4}-N^{4}z^{N}\left(1+4z^{N}+z^{2N}\right)}{\left[z\left(\frac{1-z^{N}}{1-z}\right)^{2}-N^{2}z^{N}\right]^{2}}\end{eqnarray*} - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\frac{1-e^{N\left(t-\lambda\right)}}{1-e^{t-\lambda}}\frac{1-e^{-\lambda}}{1-e^{-\lambda N}}\] - - - - -Planck (discrete exponential) -============================= - -Named Planck because of its relationship to the black-body problem he -solved. - - - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;\lambda\right) & = & \left(1-e^{-\lambda}\right)e^{-\lambda k}\quad k\lambda\geq0\\ F\left(x;\lambda\right) & = & 1-e^{-\lambda\left(\left\lfloor x\right\rfloor +1\right)}\quad x\lambda\geq0\\ G\left(q;\lambda\right) & = & \left\lceil -\frac{1}{\lambda}\log\left[1-q\right]-1\right\rceil .\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \frac{1}{e^{\lambda}-1}\\ \mu_{2} & = & \frac{e^{-\lambda}}{\left(1-e^{-\lambda}\right)^{2}}\\ \gamma_{1} & = & 2\cosh\left(\frac{\lambda}{2}\right)\\ \gamma_{2} & = & 4+2\cosh\left(\lambda\right)\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\frac{1-e^{-\lambda}}{1-e^{t-\lambda}}\] - - - -.. math:: - :nowrap: - - \[ h\left[X\right]=\frac{\lambda e^{-\lambda}}{1-e^{-\lambda}}-\log\left(1-e^{-\lambda}\right)\] - - - - -Poisson -======= - -The Poisson random variable counts the number of successes in :math:`n` independent Bernoulli trials in the limit as :math:`n\rightarrow\infty` and :math:`p\rightarrow0` where the probability of success in each trial is :math:`p` and :math:`np=\lambda\geq0` is a constant. It can be used to approximate the Binomial random -variable or in it's own right to count the number of events that occur -in the interval :math:`\left[0,t\right]` for a process satisfying certain "sparsity "constraints. The functions are - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;\lambda\right) & = & e^{-\lambda}\frac{\lambda^{k}}{k!}\quad k\geq0,\\ F\left(x;\lambda\right) & = & \sum_{n=0}^{\left\lfloor x\right\rfloor }e^{-\lambda}\frac{\lambda^{n}}{n!}=\frac{1}{\Gamma\left(\left\lfloor x\right\rfloor +1\right)}\int_{\lambda}^{\infty}t^{\left\lfloor x\right\rfloor }e^{-t}dt,\\ \mu & = & \lambda\\ \mu_{2} & = & \lambda\\ \gamma_{1} & = & \frac{1}{\sqrt{\lambda}}\\ \gamma_{2} & = & \frac{1}{\lambda}.\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \[ M\left(t\right)=\exp\left[\lambda\left(e^{t}-1\right)\right].\] - - - - -Geometric -========= - -The geometric random variable with parameter :math:`p\in\left(0,1\right)` can be defined as the number of trials required to obtain a success -where the probability of success on each trial is :math:`p` . Thus, - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;p\right) & = & \left(1-p\right)^{k-1}p\quad k\geq1\\ F\left(x;p\right) & = & 1-\left(1-p\right)^{\left\lfloor x\right\rfloor }\quad x\geq1\\ G\left(q;p\right) & = & \left\lceil \frac{\log\left(1-q\right)}{\log\left(1-p\right)}\right\rceil \\ \mu & = & \frac{1}{p}\\ \mu_{2} & = & \frac{1-p}{p^{2}}\\ \gamma_{1} & = & \frac{2-p}{\sqrt{1-p}}\\ \gamma_{2} & = & \frac{p^{2}-6p+6}{1-p}.\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} M\left(t\right) & = & \frac{p}{e^{-t}-\left(1-p\right)}\end{eqnarray*} - - - - -Negative Binomial -================= - -The negative binomial random variable with parameters :math:`n` and :math:`p\in\left(0,1\right)` can be defined as the number of *extra* independent trials (beyond :math:`n` ) required to accumulate a total of :math:`n` successes where the probability of a success on each trial is :math:`p.` Equivalently, this random variable is the number of failures -encoutered while accumulating :math:`n` successes during independent trials of an experiment that succeeds -with probability :math:`p.` Thus, - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;n,p\right) & = & \left(\begin{array}{c} k+n-1\\ n-1\end{array}\right)p^{n}\left(1-p\right)^{k}\quad k\geq0\\ F\left(x;n,p\right) & = & \sum_{i=0}^{\left\lfloor x\right\rfloor }\left(\begin{array}{c} i+n-1\\ i\end{array}\right)p^{n}\left(1-p\right)^{i}\quad x\geq0\\ & = & I_{p}\left(n,\left\lfloor x\right\rfloor +1\right)\quad x\geq0\\ \mu & = & n\frac{1-p}{p}\\ \mu_{2} & = & n\frac{1-p}{p^{2}}\\ \gamma_{1} & = & \frac{2-p}{\sqrt{n\left(1-p\right)}}\\ \gamma_{2} & = & \frac{p^{2}+6\left(1-p\right)}{n\left(1-p\right)}.\end{eqnarray*} - -Recall that :math:`I_{p}\left(a,b\right)` is the incomplete beta integral. - - -Hypergeometric -============== - -The hypergeometric random variable with parameters :math:`\left(M,n,N\right)` counts the number of "good "objects in a sample of size :math:`N` chosen without replacement from a population of :math:`M` objects where :math:`n` is the number of "good "objects in the total population. - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;N,n,M\right) & = & \frac{\left(\begin{array}{c} n\\ k\end{array}\right)\left(\begin{array}{c} M-n\\ N-k\end{array}\right)}{\left(\begin{array}{c} M\\ N\end{array}\right)}\quad N-\left(M-n\right)\leq k\leq\min\left(n,N\right)\\ F\left(x;N,n,M\right) & = & \sum_{k=0}^{\left\lfloor x\right\rfloor }\frac{\left(\begin{array}{c} m\\ k\end{array}\right)\left(\begin{array}{c} N-m\\ n-k\end{array}\right)}{\left(\begin{array}{c} N\\ n\end{array}\right)},\\ \mu & = & \frac{nN}{M}\\ \mu_{2} & = & \frac{nN\left(M-n\right)\left(M-N\right)}{M^{2}\left(M-1\right)}\\ \gamma_{1} & = & \frac{\left(M-2n\right)\left(M-2N\right)}{M-2}\sqrt{\frac{M-1}{nN\left(M-m\right)\left(M-n\right)}}\\ \gamma_{2} & = & \frac{g\left(N,n,M\right)}{nN\left(M-n\right)\left(M-3\right)\left(M-2\right)\left(N-M\right)}\end{eqnarray*} - -where (defining :math:`m=M-n` ) - -.. math:: - :nowrap: - - \begin{eqnarray*} g\left(N,n,M\right) & = & m^{3}-m^{5}+3m^{2}n-6m^{3}n+m^{4}n+3mn^{2}\\ & & -12m^{2}n^{2}+8m^{3}n^{2}+n^{3}-6mn^{3}+8m^{2}n^{3}\\ & & +mn^{4}-n^{5}-6m^{3}N+6m^{4}N+18m^{2}nN\\ & & -6m^{3}nN+18mn^{2}N-24m^{2}n^{2}N-6n^{3}N\\ & & -6mn^{3}N+6n^{4}N+6m^{2}N^{2}-6m^{3}N^{2}-24mnN^{2}\\ & & +12m^{2}nN^{2}+6n^{2}N^{2}+12mn^{2}N^{2}-6n^{3}N^{2}.\end{eqnarray*} - - - - -Zipf (Zeta) -=========== - -A random variable has the zeta distribution (also called the zipf -distribution) with parameter :math:`\alpha>1` if it's probability mass function is given by - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;\alpha\right) & = & \frac{1}{\zeta\left(\alpha\right)k^{\alpha}}\quad k\geq1\end{eqnarray*} - -where - -.. math:: - :nowrap: - - \[ \zeta\left(\alpha\right)=\sum_{n=1}^{\infty}\frac{1}{n^{\alpha}}\] - -is the Riemann zeta function. Other functions of this distribution are - -.. math:: - :nowrap: - - \begin{eqnarray*} F\left(x;\alpha\right) & = & \frac{1}{\zeta\left(\alpha\right)}\sum_{k=1}^{\left\lfloor x\right\rfloor }\frac{1}{k^{\alpha}}\\ \mu & = & \frac{\zeta_{1}}{\zeta_{0}}\quad\alpha>2\\ \mu_{2} & = & \frac{\zeta_{2}\zeta_{0}-\zeta_{1}^{2}}{\zeta_{0}^{2}}\quad\alpha>3\\ \gamma_{1} & = & \frac{\zeta_{3}\zeta_{0}^{2}-3\zeta_{0}\zeta_{1}\zeta_{2}+2\zeta_{1}^{3}}{\left[\zeta_{2}\zeta_{0}-\zeta_{1}^{2}\right]^{3/2}}\quad\alpha>4\\ \gamma_{2} & = & \frac{\zeta_{4}\zeta_{0}^{3}-4\zeta_{3}\zeta_{1}\zeta_{0}^{2}+12\zeta_{2}\zeta_{1}^{2}\zeta_{0}-6\zeta_{1}^{4}-3\zeta_{2}^{2}\zeta_{0}^{2}}{\left(\zeta_{2}\zeta_{0}-\zeta_{1}^{2}\right)^{2}}.\end{eqnarray*} - - - - - -.. math:: - :nowrap: - - \begin{eqnarray*} M\left(t\right) & = & \frac{\textrm{Li}_{\alpha}\left(e^{t}\right)}{\zeta\left(\alpha\right)}\end{eqnarray*} - -where :math:`\zeta_{i}=\zeta\left(\alpha-i\right)` and :math:`\textrm{Li}_{n}\left(z\right)` is the :math:`n^{\textrm{th}}` polylogarithm function of :math:`z` defined as - -.. math:: - :nowrap: - - \[ \textrm{Li}_{n}\left(z\right)\equiv\sum_{k=1}^{\infty}\frac{z^{k}}{k^{n}}\] - - - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\left.M^{\left(n\right)}\left(t\right)\right|_{t=0}=\left.\frac{\textrm{Li}_{\alpha-n}\left(e^{t}\right)}{\zeta\left(a\right)}\right|_{t=0}=\frac{\zeta\left(\alpha-n\right)}{\zeta\left(\alpha\right)}\] - - - - -Logarithmic (Log-Series, Series) -================================ - -The logarimthic distribution with parameter :math:`p` has a probability mass function with terms proportional to the Taylor -series expansion of :math:`\log\left(1-p\right)` - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;p\right) & = & -\frac{p^{k}}{k\log\left(1-p\right)}\quad k\geq1\\ F\left(x;p\right) & = & -\frac{1}{\log\left(1-p\right)}\sum_{k=1}^{\left\lfloor x\right\rfloor }\frac{p^{k}}{k}=1+\frac{p^{1+\left\lfloor x\right\rfloor }\Phi\left(p,1,1+\left\lfloor x\right\rfloor \right)}{\log\left(1-p\right)}\end{eqnarray*} - -where - -.. math:: - :nowrap: - - \[ \Phi\left(z,s,a\right)=\sum_{k=0}^{\infty}\frac{z^{k}}{\left(a+k\right)^{s}}\] - -is the Lerch Transcendent. Also define :math:`r=\log\left(1-p\right)` - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & -\frac{p}{\left(1-p\right)r}\\ \mu_{2} & = & -\frac{p\left[p+r\right]}{\left(1-p\right)^{2}r^{2}}\\ \gamma_{1} & = & -\frac{2p^{2}+3pr+\left(1+p\right)r^{2}}{r\left(p+r\right)\sqrt{-p\left(p+r\right)}}r\\ \gamma_{2} & = & -\frac{6p^{3}+12p^{2}r+p\left(4p+7\right)r^{2}+\left(p^{2}+4p+1\right)r^{3}}{p\left(p+r\right)^{2}}.\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} M\left(t\right) & = & -\frac{1}{\log\left(1-p\right)}\sum_{k=1}^{\infty}\frac{e^{tk}p^{k}}{k}\\ & = & \frac{\log\left(1-pe^{t}\right)}{\log\left(1-p\right)}\end{eqnarray*} - -Thus, - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=\left.M^{\left(n\right)}\left(t\right)\right|_{t=0}=\left.\frac{\textrm{Li}_{1-n}\left(pe^{t}\right)}{\log\left(1-p\right)}\right|_{t=0}=-\frac{\textrm{Li}_{1-n}\left(p\right)}{\log\left(1-p\right)}.\] - - - - -Discrete Uniform (randint) -========================== - -The discrete uniform distribution with parameters :math:`\left(a,b\right)` constructs a random variable that has an equal probability of being -any one of the integers in the half-open range :math:`[a,b).` If :math:`a` is not given it is assumed to be zero and the only parameter is :math:`b.` Therefore, - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k;a,b\right) & = & \frac{1}{b-a}\quad a\leq k0` - -.. math:: - :nowrap: - - \begin{eqnarray*} p\left(k\right) & = & \tanh\left(\frac{a}{2}\right)e^{-a\left|k\right|},\\ F\left(x\right) & = & \left\{ \begin{array}{cc} \frac{e^{a\left(\left\lfloor x\right\rfloor +1\right)}}{e^{a}+1} & \left\lfloor x\right\rfloor <0,\\ 1-\frac{e^{-a\left\lfloor x\right\rfloor }}{e^{a}+1} & \left\lfloor x\right\rfloor \geq0.\end{array}\right.\\ G\left(q\right) & = & \left\{ \begin{array}{cc} \left\lceil \frac{1}{a}\log\left[q\left(e^{a}+1\right)\right]-1\right\rceil & q<\frac{1}{1+e^{-a}},\\ \left\lceil -\frac{1}{a}\log\left[\left(1-q\right)\left(1+e^{a}\right)\right]\right\rceil & q\geq\frac{1}{1+e^{-a}}.\end{array}\right.\end{eqnarray*} - - - -.. math:: - :nowrap: - - \begin{eqnarray*} M\left(t\right) & = & \tanh\left(\frac{a}{2}\right)\sum_{k=-\infty}^{\infty}e^{tk}e^{-a\left|k\right|}\\ & = & C\left(1+\sum_{k=1}^{\infty}e^{-\left(t+a\right)k}+\sum_{1}^{\infty}e^{\left(t-a\right)k}\right)\\ & = & \tanh\left(\frac{a}{2}\right)\left(1+\frac{e^{-\left(t+a\right)}}{1-e^{-\left(t+a\right)}}+\frac{e^{t-a}}{1-e^{t-a}}\right)\\ & = & \frac{\tanh\left(\frac{a}{2}\right)\sinh a}{\cosh a-\cosh t}.\end{eqnarray*} - -Thus, - -.. math:: - :nowrap: - - \[ \mu_{n}^{\prime}=M^{\left(n\right)}\left(0\right)=\left[1+\left(-1\right)^{n}\right]\textrm{Li}_{-n}\left(e^{-a}\right)\] - -where :math:`\textrm{Li}_{-n}\left(z\right)` is the polylogarithm function of order :math:`-n` evaluated at :math:`z.` - -.. math:: - :nowrap: - - \[ h\left[X\right]=-\log\left(\tanh\left(\frac{a}{2}\right)\right)+\frac{a}{\sinh a}\] - - - - -Discrete Gaussian* -================== - -Defined for all :math:`\mu` and :math:`\lambda>0` and :math:`k` - -.. math:: - :nowrap: - - \[ p\left(k;\mu,\lambda\right)=\frac{1}{Z\left(\lambda\right)}\exp\left[-\lambda\left(k-\mu\right)^{2}\right]\] - -where - -.. math:: - :nowrap: - - \[ Z\left(\lambda\right)=\sum_{k=-\infty}^{\infty}\exp\left[-\lambda k^{2}\right]\] - - - -.. math:: - :nowrap: - - \begin{eqnarray*} \mu & = & \mu\\ \mu_{2} & = & -\frac{\partial}{\partial\lambda}\log Z\left(\lambda\right)\\ & = & G\left(\lambda\right)e^{-\lambda}\end{eqnarray*} - -where :math:`G\left(0\right)\rightarrow\infty` and :math:`G\left(\infty\right)\rightarrow2` with a minimum less than 2 near :math:`\lambda=1` - -.. math:: - :nowrap: - - \[ G\left(\lambda\right)=\frac{1}{Z\left(\lambda\right)}\sum_{k=-\infty}^{\infty}k^{2}\exp\left[-\lambda\left(k+1\right)\left(k-1\right)\right]\] diff --git a/scipy-0.10.1/doc/source/tutorial/weave.rst b/scipy-0.10.1/doc/source/tutorial/weave.rst deleted file mode 100644 index 6ed8377fe6..0000000000 --- a/scipy-0.10.1/doc/source/tutorial/weave.rst +++ /dev/null @@ -1,2537 +0,0 @@ -********************* -Weave (`scipy.weave`) -********************* - -.. sectionauthor:: Eric Jones eric@enthought.com - -======= -Outline -======= - -.. contents:: - - -============ -Introduction -============ - -The :mod:`scipy.weave` (below just :mod:`weave`) package provides tools for -including C/C++ code within in -Python code. This offers both another level of optimization to those who need -it, and an easy way to modify and extend any supported extension libraries -such as wxPython and hopefully VTK soon. Inlining C/C++ code within Python -generally results in speed ups of 1.5x to 30x speed-up over algorithms -written in pure Python (However, it is also possible to slow things down...). -Generally algorithms that require a large number of calls to the Python API -don't benefit as much from the conversion to C/C++ as algorithms that have -inner loops completely convertable to C. - -There are three basic ways to use ``weave``. The ``weave.inline()`` function -executes C code directly within Python, and ``weave.blitz()`` translates -Python NumPy expressions to C++ for fast execution. ``blitz()`` was the -original reason ``weave`` was built. For those interested in building -extension libraries, the ``ext_tools`` module provides classes for building -extension modules within Python. - -Most of ``weave's`` functionality should work on Windows and Unix, although -some of its functionality requires ``gcc`` or a similarly modern C++ compiler -that handles templates well. Up to now, most testing has been done on Windows -2000 with Microsoft's C++ compiler (MSVC) and with gcc (mingw32 2.95.2 and -2.95.3-6). All tests also pass on Linux (RH 7.1 with gcc 2.96), and I've had -reports that it works on Debian also (thanks Pearu). - -The ``inline`` and ``blitz`` provide new functionality to Python (although -I've recently learned about the `PyInline`_ project which may offer similar -functionality to ``inline``). On the other hand, tools for building Python -extension modules already exists (SWIG, SIP, pycpp, CXX, and others). As of -yet, I'm not sure where ``weave`` fits in this spectrum. It is closest in -flavor to CXX in that it makes creating new C/C++ extension modules pretty -easy. However, if you're wrapping a gaggle of legacy functions or classes, -SWIG and friends are definitely the better choice. ``weave`` is set up so -that you can customize how Python types are converted to C types in -``weave``. This is great for ``inline()``, but, for wrapping legacy code, it -is more flexible to specify things the other way around -- that is how C -types map to Python types. This ``weave`` does not do. I guess it would be -possible to build such a tool on top of ``weave``, but with good tools like -SWIG around, I'm not sure the effort produces any new capabilities. Things -like function overloading are probably easily implemented in ``weave`` and it -might be easier to mix Python/C code in function calls, but nothing beyond -this comes to mind. So, if you're developing new extension modules or -optimizing Python functions in C, ``weave.ext_tools()`` might be the tool for -you. If you're wrapping legacy code, stick with SWIG. - -The next several sections give the basics of how to use ``weave``. We'll -discuss what's happening under the covers in more detail later on. Serious -users will need to at least look at the type conversion section to understand -how Python variables map to C/C++ types and how to customize this behavior. -One other note. If you don't know C or C++ then these docs are probably of -very little help to you. Further, it'd be helpful if you know something about -writing Python extensions. ``weave`` does quite a bit for you, but for -anything complex, you'll need to do some conversions, reference counting, -etc. - -.. note:: - - ``weave`` is actually part of the `SciPy`_ package. However, it - also works fine as a standalone package (you can install from scipy/weave - with ``python setup.py install``). The examples here are given as if it is - used as a stand alone package. If you are using from within scipy, you can - use ``from scipy import weave`` and the examples will work identically. - - -============== - Requirements -============== - -- Python - - I use 2.1.1. Probably 2.0 or higher should work. - -- C++ compiler - - ``weave`` uses ``distutils`` to actually build extension modules, so - it uses whatever compiler was originally used to build Python. ``weave`` - itself requires a C++ compiler. If you used a C++ compiler to build - Python, your probably fine. - - On Unix gcc is the preferred choice because I've done a little - testing with it. All testing has been done with gcc, but I expect the - majority of compilers should work for ``inline`` and ``ext_tools``. The - one issue I'm not sure about is that I've hard coded things so that - compilations are linked with the ``stdc++`` library. *Is this standard - across Unix compilers, or is this a gcc-ism?* - - For ``blitz()``, you'll need a reasonably recent version of gcc. - 2.95.2 works on windows and 2.96 looks fine on Linux. Other versions are - likely to work. Its likely that KAI's C++ compiler and maybe some others - will work, but I haven't tried. My advice is to use gcc for now unless - your willing to tinker with the code some. - - On Windows, either MSVC or gcc (`mingw32`_) should work. Again, - you'll need gcc for ``blitz()`` as the MSVC compiler doesn't handle - templates well. - - I have not tried Cygwin, so please report success if it works for - you. - -- NumPy - - The python `NumPy`_ module is required for ``blitz()`` to - work and for numpy.distutils which is used by weave. - - -============== - Installation -============== - -There are currently two ways to get ``weave``. First, ``weave`` is part of -SciPy and installed automatically (as a sub- package) whenever SciPy is -installed. Second, since ``weave`` is useful outside of the scientific -community, it has been setup so that it can be used as a stand-alone module. - -The stand-alone version can be downloaded from `here`_. Instructions for -installing should be found there as well. setup.py file to simplify -installation. - - -========= - Testing -========= - -Once ``weave`` is installed, fire up python and run its unit tests. - -:: - - >>> import weave - >>> weave.test() - runs long time... spews tons of output and a few warnings - . - . - . - .............................................................. - ................................................................ - .................................................. - ---------------------------------------------------------------------- - Ran 184 tests in 158.418s - OK - >>> - - -This takes a while, usually several minutes. On Unix with remote file -systems, I've had it take 15 or so minutes. In the end, it should run about -180 tests and spew some speed results along the way. If you get errors, -they'll be reported at the end of the output. Please report errors that you -find. Some tests are known to fail at this point. - - -If you only want to test a single module of the package, you can do this by -running test() for that specific module. - -:: - - >>> import weave.scalar_spec - >>> weave.scalar_spec.test() - ....... - ---------------------------------------------------------------------- - Ran 7 tests in 23.284s - - -Testing Notes: -============== - - -- Windows 1 - - I've had some test fail on windows machines where I have msvc, - gcc-2.95.2 (in c:\gcc-2.95.2), and gcc-2.95.3-6 (in c:\gcc) all - installed. My environment has c:\gcc in the path and does not have - c:\gcc-2.95.2 in the path. The test process runs very smoothly until the - end where several test using gcc fail with cpp0 not found by g++. If I - check os.system('gcc -v') before running tests, I get gcc-2.95.3-6. If I - check after running tests (and after failure), I get gcc-2.95.2. ??huh??. - The os.environ['PATH'] still has c:\gcc first in it and is not corrupted - (msvc/distutils messes with the environment variables, so we have to undo - its work in some places). If anyone else sees this, let me know - - it - may just be an quirk on my machine (unlikely). Testing with the gcc- - 2.95.2 installation always works. - -- Windows 2 - - If you run the tests from PythonWin or some other GUI tool, you'll - get a ton of DOS windows popping up periodically as ``weave`` spawns the - compiler multiple times. Very annoying. Anyone know how to fix this? - -- wxPython - - wxPython tests are not enabled by default because importing wxPython - on a Unix machine without access to a X-term will cause the program to - exit. Anyone know of a safe way to detect whether wxPython can be - imported and whether a display exists on a machine? - -============ - Benchmarks -============ - -This section has not been updated from old scipy weave and Numeric.... - -This section has a few benchmarks -- thats all people want to see anyway -right? These are mostly taken from running files in the ``weave/example`` -directory and also from the test scripts. Without more information about what -the test actually do, their value is limited. Still, their here for the -curious. Look at the example scripts for more specifics about what problem -was actually solved by each run. These examples are run under windows 2000 -using Microsoft Visual C++ and python2.1 on a 850 MHz PIII laptop with 320 MB -of RAM. Speed up is the improvement (degredation) factor of ``weave`` -compared to conventional Python functions. ``The blitz()`` comparisons are -shown compared to NumPy. - -.. table:: inline and ext_tools - - ====================== =========== - Algorithm Speed up - ====================== =========== - binary search 1.50 - fibonacci (recursive) 82.10 - fibonacci (loop) 9.17 - return None 0.14 - map 1.20 - dictionary sort 2.54 - vector quantization 37.40 - ====================== =========== - -.. table:: blitz -- double precision - - ==================================== ============= - Algorithm Speed up - ==================================== ============= - a = b + c 512x512 3.05 - a = b + c + d 512x512 4.59 - 5 pt avg. filter, 2D Image 512x512 9.01 - Electromagnetics (FDTD) 100x100x100 8.61 - ==================================== ============= - -The benchmarks shown ``blitz`` in the best possible light. NumPy (at least on -my machine) is significantly worse for double precision than it is for single -precision calculations. If your interested in single precision results, you -can pretty much divide the double precision speed up by 3 and you'll be -close. - - -======== - Inline -======== - -``inline()`` compiles and executes C/C++ code on the fly. Variables in the -local and global Python scope are also available in the C/C++ code. Values -are passed to the C/C++ code by assignment much like variables are passed -into a standard Python function. Values are returned from the C/C++ code -through a special argument called return_val. Also, the contents of mutable -objects can be changed within the C/C++ code and the changes remain after the -C code exits and returns to Python. (more on this later) - -Here's a trivial ``printf`` example using ``inline()``:: - - >>> import weave - >>> a = 1 - >>> weave.inline('printf("%d\\n",a);',['a']) - 1 - -In this, its most basic form, ``inline(c_code, var_list)`` requires two -arguments. ``c_code`` is a string of valid C/C++ code. ``var_list`` is a list -of variable names that are passed from Python into C/C++. Here we have a -simple ``printf`` statement that writes the Python variable ``a`` to the -screen. The first time you run this, there will be a pause while the code is -written to a .cpp file, compiled into an extension module, loaded into -Python, cataloged for future use, and executed. On windows (850 MHz PIII), -this takes about 1.5 seconds when using Microsoft's C++ compiler (MSVC) and -6-12 seconds using gcc (mingw32 2.95.2). All subsequent executions of the -code will happen very quickly because the code only needs to be compiled -once. If you kill and restart the interpreter and then execute the same code -fragment again, there will be a much shorter delay in the fractions of -seconds range. This is because ``weave`` stores a catalog of all previously -compiled functions in an on disk cache. When it sees a string that has been -compiled, it loads the already compiled module and executes the appropriate -function. - -.. note:: - If you try the ``printf`` example in a GUI shell such as IDLE, - PythonWin, PyShell, etc., you're unlikely to see the output. This is because - the C code is writing to stdout, instead of to the GUI window. This doesn't - mean that inline doesn't work in these environments -- it only means that - standard out in C is not the same as the standard out for Python in these - cases. Non input/output functions will work as expected. - -Although effort has been made to reduce the overhead associated with calling -inline, it is still less efficient for simple code snippets than using -equivalent Python code. The simple ``printf`` example is actually slower by -30% or so than using Python ``print`` statement. And, it is not difficult to -create code fragments that are 8-10 times slower using inline than equivalent -Python. However, for more complicated algorithms, the speed up can be worth -while -- anywhwere from 1.5- 30 times faster. Algorithms that have to -manipulate Python objects (sorting a list) usually only see a factor of 2 or -so improvement. Algorithms that are highly computational or manipulate NumPy -arrays can see much larger improvements. The examples/vq.py file shows a -factor of 30 or more improvement on the vector quantization algorithm that is -used heavily in information theory and classification problems. - - -More with printf -================ - -MSVC users will actually see a bit of compiler output that distutils does not -supress the first time the code executes:: - - >>> weave.inline(r'printf("%d\n",a);',['a']) - sc_e013937dbc8c647ac62438874e5795131.cpp - Creating library C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp - \Release\sc_e013937dbc8c647ac62438874e5795131.lib and - object C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\sc_e013937dbc8c647ac62438874e5795131.exp - 1 - -Nothing bad is happening, its just a bit annoying. * Anyone know how to turn -this off?* - -This example also demonstrates using 'raw strings'. The ``r`` preceeding the -code string in the last example denotes that this is a 'raw string'. In raw -strings, the backslash character is not interpreted as an escape character, -and so it isn't necessary to use a double backslash to indicate that the '\n' -is meant to be interpreted in the C ``printf`` statement instead of by -Python. If your C code contains a lot of strings and control characters, raw -strings might make things easier. Most of the time, however, standard strings -work just as well. - -The ``printf`` statement in these examples is formatted to print out -integers. What happens if ``a`` is a string? ``inline`` will happily, compile -a new version of the code to accept strings as input, and execute the code. -The result? - -:: - - >>> a = 'string' - >>> weave.inline(r'printf("%d\n",a);',['a']) - 32956972 - - -In this case, the result is non-sensical, but also non-fatal. In other -situations, it might produce a compile time error because ``a`` is required -to be an integer at some point in the code, or it could produce a -segmentation fault. Its possible to protect against passing ``inline`` -arguments of the wrong data type by using asserts in Python. - -:: - - >>> a = 'string' - >>> def protected_printf(a): - ... assert(type(a) == type(1)) - ... weave.inline(r'printf("%d\n",a);',['a']) - >>> protected_printf(1) - 1 - >>> protected_printf('string') - AssertError... - - -For printing strings, the format statement needs to be changed. Also, weave -doesn't convert strings to char*. Instead it uses CXX Py::String type, so you -have to do a little more work. Here we convert it to a C++ std::string and -then ask cor the char* version. - -:: - - >>> a = 'string' - >>> weave.inline(r'printf("%s\n",std::string(a).c_str());',['a']) - string - -.. admonition:: XXX - - This is a little convoluted. Perhaps strings should convert to ``std::string`` - objects instead of CXX objects. Or maybe to ``char*``. - -As in this case, C/C++ code fragments often have to change to accept -different types. For the given printing task, however, C++ streams provide a -way of a single statement that works for integers and strings. By default, -the stream objects live in the std (standard) namespace and thus require the -use of ``std::``. - -:: - - >>> weave.inline('std::cout << a << std::endl;',['a']) - 1 - >>> a = 'string' - >>> weave.inline('std::cout << a << std::endl;',['a']) - string - - -Examples using ``printf`` and ``cout`` are included in -examples/print_example.py. - - -More examples -============= - -This section shows several more advanced uses of ``inline``. It includes a -few algorithms from the `Python Cookbook`_ that have been re-written in -inline C to improve speed as well as a couple examples using NumPy and -wxPython. - -Binary search -------------- - -Lets look at the example of searching a sorted list of integers for a value. -For inspiration, we'll use Kalle Svensson's `binary_search()`_ algorithm -from the Python Cookbook. His recipe follows:: - - def binary_search(seq, t): - min = 0; max = len(seq) - 1 - while 1: - if max < min: - return -1 - m = (min + max) / 2 - if seq[m] < t: - min = m + 1 - elif seq[m] > t: - max = m - 1 - else: - return m - - -This Python version works for arbitrary Python data types. The C version -below is specialized to handle integer values. There is a little type -checking done in Python to assure that we're working with the correct data -types before heading into C. The variables ``seq`` and ``t`` don't need to be -declared beacuse ``weave`` handles converting and declaring them in the C -code. All other temporary variables such as ``min, max``, etc. must be -declared -- it is C after all. Here's the new mixed Python/C function:: - - def c_int_binary_search(seq,t): - # do a little type checking in Python - assert(type(t) == type(1)) - assert(type(seq) == type([])) - - # now the C code - code = """ - #line 29 "binary_search.py" - int val, m, min = 0; - int max = seq.length() - 1; - PyObject *py_val; - for(;;) - { - if (max < min ) - { - return_val = Py::new_reference_to(Py::Int(-1)); - break; - } - m = (min + max) /2; - val = py_to_int(PyList_GetItem(seq.ptr(),m),"val"); - if (val < t) - min = m + 1; - else if (val > t) - max = m - 1; - else - { - return_val = Py::new_reference_to(Py::Int(m)); - break; - } - } - """ - return inline(code,['seq','t']) - -We have two variables ``seq`` and ``t`` passed in. ``t`` is guaranteed (by -the ``assert``) to be an integer. Python integers are converted to C int -types in the transition from Python to C. ``seq`` is a Python list. By -default, it is translated to a CXX list object. Full documentation for the -CXX library can be found at its `website`_. The basics are that the CXX -provides C++ class equivalents for Python objects that simplify, or at least -object orientify, working with Python objects in C/C++. For example, -``seq.length()`` returns the length of the list. A little more about CXX and -its class methods, etc. is in the ** type conversions ** section. - -.. note:: - CXX uses templates and therefore may be a little less portable than - another alternative by Gordan McMillan called SCXX which was - inspired by CXX. It doesn't use templates so it should compile - faster and be more portable. SCXX has a few less features, but it - appears to me that it would mesh with the needs of weave quite well. - Hopefully xxx_spec files will be written for SCXX in the future, and - we'll be able to compare on a more empirical basis. Both sets of - spec files will probably stick around, it just a question of which - becomes the default. - -Most of the algorithm above looks similar in C to the original Python code. -There are two main differences. The first is the setting of ``return_val`` -instead of directly returning from the C code with a ``return`` statement. -``return_val`` is an automatically defined variable of type ``PyObject*`` -that is returned from the C code back to Python. You'll have to handle -reference counting issues when setting this variable. In this example, CXX -classes and functions handle the dirty work. All CXX functions and classes -live in the namespace ``Py::``. The following code converts the integer ``m`` -to a CXX ``Int()`` object and then to a ``PyObject*`` with an incremented -reference count using ``Py::new_reference_to()``. - -:: - - return_val = Py::new_reference_to(Py::Int(m)); - - -The second big differences shows up in the retrieval of integer values from -the Python list. The simple Python ``seq[i]`` call balloons into a C Python -API call to grab the value out of the list and then a separate call to -``py_to_int()`` that converts the PyObject* to an integer. ``py_to_int()`` -includes both a NULL cheack and a ``PyInt_Check()`` call as well as the -conversion call. If either of the checks fail, an exception is raised. The -entire C++ code block is executed with in a ``try/catch`` block that handles -exceptions much like Python does. This removes the need for most error -checking code. - -It is worth note that CXX lists do have indexing operators that result in -code that looks much like Python. However, the overhead in using them appears -to be relatively high, so the standard Python API was used on the -``seq.ptr()`` which is the underlying ``PyObject*`` of the List object. - -The ``#line`` directive that is the first line of the C code block isn't -necessary, but it's nice for debugging. If the compilation fails because of -the syntax error in the code, the error will be reported as an error in the -Python file "binary_search.py" with an offset from the given line number (29 -here). - -So what was all our effort worth in terms of efficiency? Well not a lot in -this case. The examples/binary_search.py file runs both Python and C versions -of the functions As well as using the standard ``bisect`` module. If we run -it on a 1 million element list and run the search 3000 times (for 0- 2999), -here are the results we get:: - - C:\home\ej\wrk\scipy\weave\examples> python binary_search.py - Binary search for 3000 items in 1000000 length list of integers: - speed in python: 0.159999966621 - speed of bisect: 0.121000051498 - speed up: 1.32 - speed in c: 0.110000014305 - speed up: 1.45 - speed in c(no asserts): 0.0900000333786 - speed up: 1.78 - - -So, we get roughly a 50-75% improvement depending on whether we use the -Python asserts in our C version. If we move down to searching a 10000 element -list, the advantage evaporates. Even smaller lists might result in the Python -version being faster. I'd like to say that moving to NumPy lists (and getting -rid of the GetItem() call) offers a substantial speed up, but my preliminary -efforts didn't produce one. I think the log(N) algorithm is to blame. Because -the algorithm is nice, there just isn't much time spent computing things, so -moving to C isn't that big of a win. If there are ways to reduce conversion -overhead of values, this may improve the C/Python speed up. Anyone have other -explanations or faster code, please let me know. - - -Dictionary Sort ---------------- - -The demo in examples/dict_sort.py is another example from the Python -CookBook. `This submission`_, by Alex Martelli, demonstrates how to return -the values from a dictionary sorted by their keys: - -:: - - def sortedDictValues3(adict): - keys = adict.keys() - keys.sort() - return map(adict.get, keys) - - -Alex provides 3 algorithms and this is the 3rd and fastest of the set. The C -version of this same algorithm follows:: - - def c_sort(adict): - assert(type(adict) == type({})) - code = """ - #line 21 "dict_sort.py" - Py::List keys = adict.keys(); - Py::List items(keys.length()); keys.sort(); - PyObject* item = NULL; - for(int i = 0; i < keys.length();i++) - { - item = PyList_GET_ITEM(keys.ptr(),i); - item = PyDict_GetItem(adict.ptr(),item); - Py_XINCREF(item); - PyList_SetItem(items.ptr(),i,item); - } - return_val = Py::new_reference_to(items); - """ - return inline_tools.inline(code,['adict'],verbose=1) - - -Like the original Python function, the C++ version can handle any Python -dictionary regardless of the key/value pair types. It uses CXX objects for -the most part to declare python types in C++, but uses Python API calls to -manipulate their contents. Again, this choice is made for speed. The C++ -version, while more complicated, is about a factor of 2 faster than Python. - -:: - - C:\home\ej\wrk\scipy\weave\examples> python dict_sort.py - Dict sort of 1000 items for 300 iterations: - speed in python: 0.319999933243 - [0, 1, 2, 3, 4] - speed in c: 0.151000022888 - speed up: 2.12 - [0, 1, 2, 3, 4] - - - -NumPy -- cast/copy/transpose ----------------------------- - -CastCopyTranspose is a function called quite heavily by Linear Algebra -routines in the NumPy library. Its needed in part because of the row-major -memory layout of multi-demensional Python (and C) arrays vs. the col-major -order of the underlying Fortran algorithms. For small matrices (say 100x100 -or less), a significant portion of the common routines such as LU -decompisition or singular value decompostion are spent in this setup routine. -This shouldn't happen. Here is the Python version of the function using -standard NumPy operations. - -:: - - def _castCopyAndTranspose(type, array): - if a.typecode() == type: - cast_array = copy.copy(NumPy.transpose(a)) - else: - cast_array = copy.copy(NumPy.transpose(a).astype(type)) - return cast_array - - -And the following is a inline C version of the same function:: - - from weave.blitz_tools import blitz_type_factories - from weave import scalar_spec - from weave import inline - def _cast_copy_transpose(type,a_2d): - assert(len(shape(a_2d)) == 2) - new_array = zeros(shape(a_2d),type) - NumPy_type = scalar_spec.NumPy_to_blitz_type_mapping[type] - code = \ - """ - for(int i = 0;i < _Na_2d[0]; i++) - for(int j = 0; j < _Na_2d[1]; j++) - new_array(i,j) = (%s) a_2d(j,i); - """ % NumPy_type - inline(code,['new_array','a_2d'], - type_factories = blitz_type_factories,compiler='gcc') - return new_array - - -This example uses blitz++ arrays instead of the standard representation of -NumPy arrays so that indexing is simplier to write. This is accomplished by -passing in the blitz++ "type factories" to override the standard Python to -C++ type conversions. Blitz++ arrays allow you to write clean, fast code, but -they also are sloooow to compile (20 seconds or more for this snippet). This -is why they aren't the default type used for Numeric arrays (and also because -most compilers can't compile blitz arrays...). ``inline()`` is also forced to -use 'gcc' as the compiler because the default compiler on Windows (MSVC) will -not compile blitz code. ('gcc' I think will use the standard compiler on -Unix machine instead of explicitly forcing gcc (check this)) Comparisons of -the Python vs inline C++ code show a factor of 3 speed up. Also shown are the -results of an "inplace" transpose routine that can be used if the output of -the linear algebra routine can overwrite the original matrix (this is often -appropriate). This provides another factor of 2 improvement. - -:: - - #C:\home\ej\wrk\scipy\weave\examples> python cast_copy_transpose.py - # Cast/Copy/Transposing (150,150)array 1 times - # speed in python: 0.870999932289 - # speed in c: 0.25 - # speed up: 3.48 - # inplace transpose c: 0.129999995232 - # speed up: 6.70 - -wxPython --------- - -``inline`` knows how to handle wxPython objects. Thats nice in and of itself, -but it also demonstrates that the type conversion mechanism is reasonably -flexible. Chances are, it won't take a ton of effort to support special types -you might have. The examples/wx_example.py borrows the scrolled window -example from the wxPython demo, accept that it mixes inline C code in the -middle of the drawing function. - -:: - - def DoDrawing(self, dc): - - red = wxNamedColour("RED"); - blue = wxNamedColour("BLUE"); - grey_brush = wxLIGHT_GREY_BRUSH; - code = \ - """ - #line 108 "wx_example.py" - dc->BeginDrawing(); - dc->SetPen(wxPen(*red,4,wxSOLID)); - dc->DrawRectangle(5,5,50,50); - dc->SetBrush(*grey_brush); - dc->SetPen(wxPen(*blue,4,wxSOLID)); - dc->DrawRectangle(15, 15, 50, 50); - """ - inline(code,['dc','red','blue','grey_brush']) - - dc.SetFont(wxFont(14, wxSWISS, wxNORMAL, wxNORMAL)) - dc.SetTextForeground(wxColour(0xFF, 0x20, 0xFF)) - te = dc.GetTextExtent("Hello World") - dc.DrawText("Hello World", 60, 65) - - dc.SetPen(wxPen(wxNamedColour('VIOLET'), 4)) - dc.DrawLine(5, 65+te[1], 60+te[0], 65+te[1]) - ... - -Here, some of the Python calls to wx objects were just converted to C++ -calls. There isn't any benefit, it just demonstrates the capabilities. You -might want to use this if you have a computationally intensive loop in your -drawing code that you want to speed up. On windows, you'll have to use the -MSVC compiler if you use the standard wxPython DLLs distributed by Robin -Dunn. Thats because MSVC and gcc, while binary compatible in C, are not -binary compatible for C++. In fact, its probably best, no matter what -platform you're on, to specify that ``inline`` use the same compiler that was -used to build wxPython to be on the safe side. There isn't currently a way to -learn this info from the library -- you just have to know. Also, at least on -the windows platform, you'll need to install the wxWindows libraries and link -to them. I think there is a way around this, but I haven't found it yet -- I -get some linking errors dealing with wxString. One final note. You'll -probably have to tweak weave/wx_spec.py or weave/wx_info.py for your -machine's configuration to point at the correct directories etc. There. That -should sufficiently scare people into not even looking at this... :) - -Keyword Option -============== - -The basic definition of the ``inline()`` function has a slew of optional -variables. It also takes keyword arguments that are passed to ``distutils`` -as compiler options. The following is a formatted cut/paste of the argument -section of ``inline's`` doc-string. It explains all of the variables. Some -examples using various options will follow. - -:: - - def inline(code,arg_names,local_dict = None, global_dict = None, - force = 0, - compiler='', - verbose = 0, - support_code = None, - customize=None, - type_factories = None, - auto_downcast=1, - **kw): - - -``inline`` has quite a few options as listed below. Also, the keyword -arguments for distutils extension modules are accepted to specify extra -information needed for compiling. - -Inline Arguments -================ - -code string. A string of valid C++ code. It should not specify a return -statement. Instead it should assign results that need to be returned to -Python in the return_val. arg_names list of strings. A list of Python -variable names that should be transferred from Python into the C/C++ code. -local_dict optional. dictionary. If specified, it is a dictionary of values -that should be used as the local scope for the C/C++ code. If local_dict is -not specified the local dictionary of the calling function is used. -global_dict optional. dictionary. If specified, it is a dictionary of values -that should be used as the global scope for the C/C++ code. If global_dict is -not specified the global dictionary of the calling function is used. force -optional. 0 or 1. default 0. If 1, the C++ code is compiled every time inline -is called. This is really only useful for debugging, and probably only useful -if you're editing support_code a lot. compiler optional. string. The name -of compiler to use when compiling. On windows, it understands 'msvc' and -'gcc' as well as all the compiler names understood by distutils. On Unix, -it'll only understand the values understoof by distutils. (I should add 'gcc' -though to this). - -On windows, the compiler defaults to the Microsoft C++ compiler. If this -isn't available, it looks for mingw32 (the gcc compiler). - -On Unix, it'll probably use the same compiler that was used when compiling -Python. Cygwin's behavior should be similar. - -verbose optional. 0,1, or 2. defualt 0. Speficies how much much -information is printed during the compile phase of inlining code. 0 is silent -(except on windows with msvc where it still prints some garbage). 1 informs -you when compiling starts, finishes, and how long it took. 2 prints out the -command lines for the compilation process and can be useful if you're having -problems getting code to work. Its handy for finding the name of the .cpp -file if you need to examine it. verbose has no affect if the compilation -isn't necessary. support_code optional. string. A string of valid C++ code -declaring extra code that might be needed by your compiled function. This -could be declarations of functions, classes, or structures. customize -optional. base_info.custom_info object. An alternative way to specifiy -support_code, headers, etc. needed by the function see the weave.base_info -module for more details. (not sure this'll be used much). type_factories -optional. list of type specification factories. These guys are what convert -Python data types to C/C++ data types. If you'd like to use a different set -of type conversions than the default, specify them here. Look in the type -conversions section of the main documentation for examples. auto_downcast -optional. 0 or 1. default 1. This only affects functions that have Numeric -arrays as input variables. Setting this to 1 will cause all floating point -values to be cast as float instead of double if all the NumPy arrays are of -type float. If even one of the arrays has type double or double complex, all -variables maintain there standard types. - - -Distutils keywords -================== - -``inline()`` also accepts a number of ``distutils`` keywords for -controlling how the code is compiled. The following descriptions have been -copied from Greg Ward's ``distutils.extension.Extension`` class doc- strings -for convenience: sources [string] list of source filenames, relative to the -distribution root (where the setup script lives), in Unix form (slash- -separated) for portability. Source files may be C, C++, SWIG (.i), platform- -specific resource files, or whatever else is recognized by the "build_ext" -command as source for a Python extension. Note: The module_path file is -always appended to the front of this list include_dirs [string] list of -directories to search for C/C++ header files (in Unix form for portability) -define_macros [(name : string, value : string|None)] list of macros to -define; each macro is defined using a 2-tuple, where 'value' is either the -string to define it to or None to define it without a particular value -(equivalent of "#define FOO" in source or -DFOO on Unix C compiler command -line) undef_macros [string] list of macros to undefine explicitly -library_dirs [string] list of directories to search for C/C++ libraries at -link time libraries [string] list of library names (not filenames or paths) -to link against runtime_library_dirs [string] list of directories to search -for C/C++ libraries at run time (for shared extensions, this is when the -extension is loaded) extra_objects [string] list of extra files to link -with (eg. object files not implied by 'sources', static library that must be -explicitly specified, binary resource files, etc.) extra_compile_args -[string] any extra platform- and compiler-specific information to use when -compiling the source files in 'sources'. For platforms and compilers where -"command line" makes sense, this is typically a list of command-line -arguments, but for other platforms it could be anything. extra_link_args -[string] any extra platform- and compiler-specific information to use when -linking object files together to create the extension (or to create a new -static Python interpreter). Similar interpretation as for -'extra_compile_args'. export_symbols [string] list of symbols to be -exported from a shared extension. Not used on all platforms, and not -generally necessary for Python extensions, which typically export exactly one -symbol: "init" + extension_name. - - -Keyword Option Examples ------------------------ - -We'll walk through several examples here to demonstrate the behavior of -``inline`` and also how the various arguments are used. In the simplest -(most) cases, ``code`` and ``arg_names`` are the only arguments that need to -be specified. Here's a simple example run on Windows machine that has -Microsoft VC++ installed. - -:: - - >>> from weave import inline - >>> a = 'string' - >>> code = """ - ... int l = a.length(); - ... return_val = Py::new_reference_to(Py::Int(l)); - ... """ - >>> inline(code,['a']) - sc_86e98826b65b047ffd2cd5f479c627f12.cpp - Creating - library C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\sc_86e98826b65b047ffd2cd5f479c627f12.lib - and object C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\sc_86e98826b65b047ff - d2cd5f479c627f12.exp - 6 - >>> inline(code,['a']) - 6 - - -When ``inline`` is first run, you'll notice that pause and some trash printed -to the screen. The "trash" is acutually part of the compilers output that -distutils does not supress. The name of the extension file, -``sc_bighonkingnumber.cpp``, is generated from the md5 check sum of the C/C++ -code fragment. On Unix or windows machines with only gcc installed, the trash -will not appear. On the second call, the code fragment is not compiled since -it already exists, and only the answer is returned. Now kill the interpreter -and restart, and run the same code with a different string. - -:: - - >>> from weave import inline - >>> a = 'a longer string' - >>> code = """ - ... int l = a.length(); - ... return_val = Py::new_reference_to(Py::Int(l)); - ... """ - >>> inline(code,['a']) - 15 - - -Notice this time, ``inline()`` did not recompile the code because it found -the compiled function in the persistent catalog of functions. There is a -short pause as it looks up and loads the function, but it is much shorter -than compiling would require. - -You can specify the local and global dictionaries if you'd like (much like -``exec`` or ``eval()`` in Python), but if they aren't specified, the -"expected" ones are used -- i.e. the ones from the function that called -``inline()``. This is accomplished through a little call frame trickery. -Here is an example where the local_dict is specified using the same code -example from above:: - - >>> a = 'a longer string' - >>> b = 'an even longer string' - >>> my_dict = {'a':b} - >>> inline(code,['a']) - 15 - >>> inline(code,['a'],my_dict) - 21 - - -Everytime, the ``code`` is changed, ``inline`` does a recompile. However, -changing any of the other options in inline does not force a recompile. The -``force`` option was added so that one could force a recompile when tinkering -with other variables. In practice, it is just as easy to change the ``code`` -by a single character (like adding a space some place) to force the -recompile. - -.. note:: - It also might be nice to add some methods for purging the - cache and on disk catalogs. - -I use ``verbose`` sometimes for debugging. When set to 2, it'll output all -the information (including the name of the .cpp file) that you'd expect from -running a make file. This is nice if you need to examine the generated code -to see where things are going haywire. Note that error messages from failed -compiles are printed to the screen even if ``verbose`` is set to 0. - -The following example demonstrates using gcc instead of the standard msvc -compiler on windows using same code fragment as above. Because the example -has already been compiled, the ``force=1`` flag is needed to make -``inline()`` ignore the previously compiled version and recompile using gcc. -The verbose flag is added to show what is printed out:: - - >>>inline(code,['a'],compiler='gcc',verbose=2,force=1) - running build_ext - building 'sc_86e98826b65b047ffd2cd5f479c627f13' extension - c:\gcc-2.95.2\bin\g++.exe -mno-cygwin -mdll -O2 -w -Wstrict-prototypes -IC: - \home\ej\wrk\scipy\weave -IC:\Python21\Include -c C:\DOCUME~1\eric\LOCAL - S~1\Temp\python21_compiled\sc_86e98826b65b047ffd2cd5f479c627f13.cpp - -o C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\sc_86e98826b65b04ffd2cd5f479c627f13.o - skipping C:\home\ej\wrk\scipy\weave\CXX\cxxextensions.c - (C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\cxxextensions.o up-to-date) - skipping C:\home\ej\wrk\scipy\weave\CXX\cxxsupport.cxx - (C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\cxxsupport.o up-to-date) - skipping C:\home\ej\wrk\scipy\weave\CXX\IndirectPythonInterface.cxx - (C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\indirectpythoninterface.o up-to-date) - skipping C:\home\ej\wrk\scipy\weave\CXX\cxx_extensions.cxx - (C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\cxx_extensions.o - up-to-date) - writing C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\sc_86e98826b65b047ffd2cd5f479c627f13.def - c:\gcc-2.95.2\bin\dllwrap.exe --driver-name g++ -mno-cygwin - -mdll -static --output-lib - C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\libsc_86e98826b65b047ffd2cd5f479c627f13.a --def - C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\sc_86e98826b65b047ffd2cd5f479c627f13.def - -sC:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\sc_86e98826b65b047ffd2cd5f479c627f13.o - C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\cxxextensions.o - C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\cxxsupport.o - C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\indirectpythoninterface.o - C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\temp\Release\cxx_extensions.o -LC:\Python21\libs - -lpython21 -o - C:\DOCUME~1\eric\LOCALS~1\Temp\python21_compiled\sc_86e98826b65b047ffd2cd5f479c627f13.pyd - 15 - -That's quite a bit of output. ``verbose=1`` just prints the compile time. - -:: - - >>>inline(code,['a'],compiler='gcc',verbose=1,force=1) - Compiling code... - finished compiling (sec): 6.00800001621 - 15 - - -.. note:: - I've only used the ``compiler`` option for switching between 'msvc' - and 'gcc' on windows. It may have use on Unix also, but I don't know yet. - -The ``support_code`` argument is likely to be used a lot. It allows you to -specify extra code fragments such as function, structure or class definitions -that you want to use in the ``code`` string. Note that changes to -``support_code`` do *not* force a recompile. The catalog only relies on -``code`` (for performance reasons) to determine whether recompiling is -necessary. So, if you make a change to support_code, you'll need to alter -``code`` in some way or use the ``force`` argument to get the code to -recompile. I usually just add some inocuous whitespace to the end of one of -the lines in ``code`` somewhere. Here's an example of defining a separate -method for calculating the string length: - -:: - - >>> from weave import inline - >>> a = 'a longer string' - >>> support_code = """ - ... PyObject* length(Py::String a) - ... { - ... int l = a.length(); - ... return Py::new_reference_to(Py::Int(l)); - ... } - ... """ - >>> inline("return_val = length(a);",['a'], - ... support_code = support_code) - 15 - - -``customize`` is a left over from a previous way of specifying compiler -options. It is a ``custom_info`` object that can specify quite a bit of -information about how a file is compiled. These ``info`` objects are the -standard way of defining compile information for type conversion classes. -However, I don't think they are as handy here, especially since we've exposed -all the keyword arguments that distutils can handle. Between these keywords, -and the ``support_code`` option, I think ``customize`` may be obsolete. We'll -see if anyone cares to use it. If not, it'll get axed in the next version. - -The ``type_factories`` variable is important to people who want to customize -the way arguments are converted from Python to C. We'll talk about this in -the next chapter **xx** of this document when we discuss type conversions. - -``auto_downcast`` handles one of the big type conversion issues that is -common when using NumPy arrays in conjunction with Python scalar values. If -you have an array of single precision values and multiply that array by a -Python scalar, the result is upcast to a double precision array because the -scalar value is double precision. This is not usually the desired behavior -because it can double your memory usage. ``auto_downcast`` goes some distance -towards changing the casting precedence of arrays and scalars. If your only -using single precision arrays, it will automatically downcast all scalar -values from double to single precision when they are passed into the C++ -code. This is the default behavior. If you want all values to keep there -default type, set ``auto_downcast`` to 0. - - -Returning Values ----------------- - -Python variables in the local and global scope transfer seemlessly from -Python into the C++ snippets. And, if ``inline`` were to completely live up -to its name, any modifications to variables in the C++ code would be -reflected in the Python variables when control was passed back to Python. For -example, the desired behavior would be something like:: - - # THIS DOES NOT WORK - >>> a = 1 - >>> weave.inline("a++;",['a']) - >>> a - 2 - - -Instead you get:: - - >>> a = 1 - >>> weave.inline("a++;",['a']) - >>> a - 1 - - -Variables are passed into C++ as if you are calling a Python function. -Python's calling convention is sometimes called "pass by assignment". This -means its as if a ``c_a = a`` assignment is made right before ``inline`` call -is made and the ``c_a`` variable is used within the C++ code. Thus, any -changes made to ``c_a`` are not reflected in Python's ``a`` variable. Things -do get a little more confusing, however, when looking at variables with -mutable types. Changes made in C++ to the contents of mutable types *are* -reflected in the Python variables. - -:: - - >>> a= [1,2] - >>> weave.inline("PyList_SetItem(a.ptr(),0,PyInt_FromLong(3));",['a']) - >>> print a - [3, 2] - - -So modifications to the contents of mutable types in C++ are seen when -control is returned to Python. Modifications to immutable types such as -tuples, strings, and numbers do not alter the Python variables. If you need -to make changes to an immutable variable, you'll need to assign the new value -to the "magic" variable ``return_val`` in C++. This value is returned by the -``inline()`` function:: - - >>> a = 1 - >>> a = weave.inline("return_val = Py::new_reference_to(Py::Int(a+1));",['a']) - >>> a - 2 - - -The ``return_val`` variable can also be used to return newly created values. -This is possible by returning a tuple. The following trivial example -illustrates how this can be done:: - - # python version - def multi_return(): - return 1, '2nd' - - # C version. - def c_multi_return(): - code = """ - py::tuple results(2); - results[0] = 1; - results[1] = "2nd"; - return_val = results; - """ - return inline_tools.inline(code) - -The example is available in ``examples/tuple_return.py``. It also has the -dubious honor of demonstrating how much ``inline()`` can slow things down. -The C version here is about 7-10 times slower than the Python version. Of -course, something so trivial has no reason to be written in C anyway. - - -The issue with ``locals()`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``inline`` passes the ``locals()`` and ``globals()`` dictionaries from Python -into the C++ function from the calling function. It extracts the variables -that are used in the C++ code from these dictionaries, converts then to C++ -variables, and then calculates using them. It seems like it would be trivial, -then, after the calculations were finished to then insert the new values back -into the ``locals()`` and ``globals()`` dictionaries so that the modified -values were reflected in Python. Unfortunately, as pointed out by the Python -manual, the locals() dictionary is not writable. - -I suspect ``locals()`` is not writable because there are some optimizations -done to speed lookups of the local namespace. I'm guessing local lookups -don't always look at a dictionary to find values. Can someone "in the know" -confirm or correct this? Another thing I'd like to know is whether there is a -way to write to the local namespace of another stack frame from C/C++. If so, -it would be possible to have some clean up code in compiled functions that -wrote final values of variables in C++ back to the correct Python stack -frame. I think this goes a long way toward making ``inline`` truely live up -to its name. I don't think we'll get to the point of creating variables in -Python for variables created in C -- although I suppose with a C/C++ parser -you could do that also. - - -A quick look at the code ------------------------- - -``weave`` generates a C++ file holding an extension function for each -``inline`` code snippet. These file names are generated using from the md5 -signature of the code snippet and saved to a location specified by the -PYTHONCOMPILED environment variable (discussed later). The cpp files are -generally about 200-400 lines long and include quite a few functions to -support type conversions, etc. However, the actual compiled function is -pretty simple. Below is the familiar ``printf`` example: - -:: - - >>> import weave - >>> a = 1 - >>> weave.inline('printf("%d\\n",a);',['a']) - 1 - - -And here is the extension function generated by ``inline``:: - - static PyObject* compiled_func(PyObject*self, PyObject* args) - { - py::object return_val; - int exception_occured = 0; - PyObject *py__locals = NULL; - PyObject *py__globals = NULL; - PyObject *py_a; - py_a = NULL; - - if(!PyArg_ParseTuple(args,"OO:compiled_func",&py__locals,&py__globals)) - return NULL; - try - { - PyObject* raw_locals = py_to_raw_dict(py__locals,"_locals"); - PyObject* raw_globals = py_to_raw_dict(py__globals,"_globals"); - /* argument conversion code */ - py_a = get_variable("a",raw_locals,raw_globals); - int a = convert_to_int(py_a,"a"); - /* inline code */ - /* NDARRAY API VERSION 90907 */ - printf("%d\n",a); /*I would like to fill in changed locals and globals here...*/ - } - catch(...) - { - return_val = py::object(); - exception_occured = 1; - } - /* cleanup code */ - if(!(PyObject*)return_val && !exception_occured) - { - return_val = Py_None; - } - return return_val.disown(); - } - -Every inline function takes exactly two arguments -- the local and global -dictionaries for the current scope. All variable values are looked up out of -these dictionaries. The lookups, along with all ``inline`` code execution, -are done within a C++ ``try`` block. If the variables aren't found, or there -is an error converting a Python variable to the appropriate type in C++, an -exception is raised. The C++ exception is automatically converted to a Python -exception by SCXX and returned to Python. The ``py_to_int()`` function -illustrates how the conversions and exception handling works. py_to_int first -checks that the given PyObject* pointer is not NULL and is a Python integer. -If all is well, it calls the Python API to convert the value to an ``int``. -Otherwise, it calls ``handle_bad_type()`` which gathers information about -what went wrong and then raises a SCXX TypeError which returns to Python as a -TypeError. - -:: - - int py_to_int(PyObject* py_obj,char* name) - { - if (!py_obj || !PyInt_Check(py_obj)) - handle_bad_type(py_obj,"int", name); - return (int) PyInt_AsLong(py_obj); - } - - -:: - - void handle_bad_type(PyObject* py_obj, char* good_type, char* var_name) - { - char msg[500]; - sprintf(msg,"received '%s' type instead of '%s' for variable '%s'", - find_type(py_obj),good_type,var_name); - throw Py::TypeError(msg); - } - - char* find_type(PyObject* py_obj) - { - if(py_obj == NULL) return "C NULL value"; - if(PyCallable_Check(py_obj)) return "callable"; - if(PyString_Check(py_obj)) return "string"; - if(PyInt_Check(py_obj)) return "int"; - if(PyFloat_Check(py_obj)) return "float"; - if(PyDict_Check(py_obj)) return "dict"; - if(PyList_Check(py_obj)) return "list"; - if(PyTuple_Check(py_obj)) return "tuple"; - if(PyFile_Check(py_obj)) return "file"; - if(PyModule_Check(py_obj)) return "module"; - - //should probably do more interagation (and thinking) on these. - if(PyCallable_Check(py_obj) && PyInstance_Check(py_obj)) return "callable"; - if(PyInstance_Check(py_obj)) return "instance"; - if(PyCallable_Check(py_obj)) return "callable"; - return "unkown type"; - } - -Since the ``inline`` is also executed within the ``try/catch`` block, you can -use CXX exceptions within your code. It is usually a bad idea to directly -``return`` from your code, even if an error occurs. This skips the clean up -section of the extension function. In this simple example, there isn't any -clean up code, but in more complicated examples, there may be some reference -counting that needs to be taken care of here on converted variables. To avoid -this, either uses exceptions or set ``return_val`` to NULL and use -``if/then's`` to skip code after errors. - -Technical Details -================= - -There are several main steps to using C/C++ code withing Python: - -1. Type conversion -2. Generating C/C++ code -3. Compile the code to an extension module -4. Catalog (and cache) the function for future use - -Items 1 and 2 above are related, but most easily discussed separately. Type -conversions are customizable by the user if needed. Understanding them is -pretty important for anything beyond trivial uses of ``inline``. Generating -the C/C++ code is handled by ``ext_function`` and ``ext_module`` classes and -. For the most part, compiling the code is handled by distutils. Some -customizations were needed, but they were relatively minor and do not require -changes to distutils itself. Cataloging is pretty simple in concept, but -surprisingly required the most code to implement (and still likely needs some -work). So, this section covers items 1 and 4 from the list. Item 2 is covered -later in the chapter covering the ``ext_tools`` module, and distutils is -covered by a completely separate document xxx. - - -Passing Variables in/out of the C/C++ code -========================================== - -.. note:: - Passing variables into the C code is pretty straight forward, but - there are subtlties to how variable modifications in C are returned to - Python. see `Returning Values`_ for a more thorough discussion of this issue. - -Type Conversions -================ - -.. note:: - Maybe ``xxx_converter`` instead of ``xxx_specification`` is a more - descriptive name. Might change in future version? - -By default, ``inline()`` makes the following type conversions between Python -and C++ types. - -.. table:: Default Data Type Conversions - - ============= ======= - Python C++ - ============= ======= - int int - float double - complex std::complex - string py::string - list py::list - dict py::dict - tuple py::tuple - file FILE* - callable py::object - instance py::object - numpy.ndarray PyArrayObject* - wxXXX wxXXX* - ============= ======= - -The ``Py::`` namespace is defined by the SCXX library which has C++ class -equivalents for many Python types. ``std::`` is the namespace of the standard -library in C++. - - -.. note:: - - I haven't figured out how to handle ``long int`` yet (I think they - are currenlty converted to int - - check this). - - Hopefully VTK will be added to the list soon - -Python to C++ conversions fill in code in several locations in the generated -``inline`` extension function. Below is the basic template for the function. -This is actually the exact code that is generated by calling -``weave.inline("")``. - - -The ``/* inline code */`` section is filled with the code passed to the -``inline()`` function call. The ``/*argument convserion code*/`` and ``/* -cleanup code */`` sections are filled with code that handles conversion from -Python to C++ types and code that deallocates memory or manipulates reference -counts before the function returns. The following sections demostrate how -these two areas are filled in by the default conversion methods. * Note: I'm -not sure I have reference counting correct on a few of these. The only thing -I increase/decrease the ref count on is NumPy arrays. If you see an issue, -please let me know. - -NumPy Argument Conversion -------------------------- - -Integer, floating point, and complex arguments are handled in a very similar -fashion. Consider the following inline function that has a single integer -variable passed in:: - - >>> a = 1 - >>> inline("",['a']) - - -The argument conversion code inserted for ``a`` is:: - - /* argument conversion code */ - int a = py_to_int (get_variable("a",raw_locals,raw_globals),"a"); - -``get_variable()`` reads the variable ``a`` from the local and global -namespaces. ``py_to_int()`` has the following form:: - - static int py_to_int(PyObject* py_obj,char* name) - { - if (!py_obj || !PyInt_Check(py_obj)) - handle_bad_type(py_obj,"int", name); - return (int) PyInt_AsLong(py_obj); - } - - -Similarly, the float and complex conversion routines look like:: - - static double py_to_float(PyObject* py_obj,char* name) - { - if (!py_obj || !PyFloat_Check(py_obj)) - handle_bad_type(py_obj,"float", name); - return PyFloat_AsDouble(py_obj); - } - - static std::complex py_to_complex(PyObject* py_obj,char* name) - { - if (!py_obj || !PyComplex_Check(py_obj)) - handle_bad_type(py_obj,"complex", name); - return std::complex(PyComplex_RealAsDouble(py_obj), - PyComplex_ImagAsDouble(py_obj)); - } - -NumPy conversions do not require any clean up code. - -String, List, Tuple, and Dictionary Conversion ----------------------------------------------- - -Strings, Lists, Tuples and Dictionary conversions are all converted to SCXX -types by default. For the following code, - -:: - - >>> a = [1] - >>> inline("",['a']) - - -The argument conversion code inserted for ``a`` is:: - - /* argument conversion code */ - Py::List a = py_to_list(get_variable("a",raw_locals,raw_globals),"a"); - - -``get_variable()`` reads the variable ``a`` from the local and global -namespaces. ``py_to_list()`` and its friends has the following form:: - - static Py::List py_to_list(PyObject* py_obj,char* name) - { - if (!py_obj || !PyList_Check(py_obj)) - handle_bad_type(py_obj,"list", name); - return Py::List(py_obj); - } - - static Py::String py_to_string(PyObject* py_obj,char* name) - { - if (!PyString_Check(py_obj)) - handle_bad_type(py_obj,"string", name); - return Py::String(py_obj); - } - - static Py::Dict py_to_dict(PyObject* py_obj,char* name) - { - if (!py_obj || !PyDict_Check(py_obj)) - handle_bad_type(py_obj,"dict", name); - return Py::Dict(py_obj); - } - - static Py::Tuple py_to_tuple(PyObject* py_obj,char* name) - { - if (!py_obj || !PyTuple_Check(py_obj)) - handle_bad_type(py_obj,"tuple", name); - return Py::Tuple(py_obj); - } - -SCXX handles reference counts on for strings, lists, tuples, and -dictionaries, so clean up code isn't necessary. - -File Conversion ---------------- - -For the following code, - -:: - - >>> a = open("bob",'w') - >>> inline("",['a']) - - -The argument conversion code is:: - - /* argument conversion code */ - PyObject* py_a = get_variable("a",raw_locals,raw_globals); - FILE* a = py_to_file(py_a,"a"); - - -``get_variable()`` reads the variable ``a`` from the local and global -namespaces. ``py_to_file()`` converts PyObject* to a FILE* and increments the -reference count of the PyObject*:: - - FILE* py_to_file(PyObject* py_obj, char* name) - { - if (!py_obj || !PyFile_Check(py_obj)) - handle_bad_type(py_obj,"file", name); - - Py_INCREF(py_obj); - return PyFile_AsFile(py_obj); - } - -Because the PyObject* was incremented, the clean up code needs to decrement -the counter - -:: - - /* cleanup code */ - Py_XDECREF(py_a); - - -Its important to understand that file conversion only works on actual files --- i.e. ones created using the ``open()`` command in Python. It does not -support converting arbitrary objects that support the file interface into C -``FILE*`` pointers. This can affect many things. For example, in initial -``printf()`` examples, one might be tempted to solve the problem of C and -Python IDE's (PythonWin, PyCrust, etc.) writing to different stdout and -stderr by using ``fprintf()`` and passing in ``sys.stdout`` and -``sys.stderr``. For example, instead of - -:: - - >>> weave.inline('printf("hello\\n");') - - -You might try: - -:: - - >>> buf = sys.stdout - >>> weave.inline('fprintf(buf,"hello\\n");',['buf']) - - -This will work as expected from a standard python interpreter, but in -PythonWin, the following occurs: - -:: - - >>> buf = sys.stdout - >>> weave.inline('fprintf(buf,"hello\\n");',['buf']) - Traceback (most recent call last): - File "", line 1, in ? - File "C:\Python21\weave\inline_tools.py", line 315, in inline - auto_downcast = auto_downcast, - File "C:\Python21\weave\inline_tools.py", line 386, in compile_function - type_factories = type_factories) - File "C:\Python21\weave\ext_tools.py", line 197, in __init__ - auto_downcast, type_factories) - File "C:\Python21\weave\ext_tools.py", line 390, in assign_variable_types - raise TypeError, format_error_msg(errors) - TypeError: {'buf': "Unable to convert variable 'buf' to a C++ type."} - - -The traceback tells us that ``inline()`` was unable to convert 'buf' to a C++ -type (If instance conversion was implemented, the error would have occurred -at runtime instead). Why is this? Let's look at what the ``buf`` object -really is:: - - >>> buf - pywin.framework.interact.InteractiveView instance at 00EAD014 - - -PythonWin has reassigned ``sys.stdout`` to a special object that implements -the Python file interface. This works great in Python, but since the special -object doesn't have a FILE* pointer underlying it, fprintf doesn't know what -to do with it (well this will be the problem when instance conversion is -implemented...). - -Callable, Instance, and Module Conversion ------------------------------------------ - - -.. note:: - Need to look into how ref counts should be handled. Also, Instance and - Module conversion are not currently implemented. - -:: - - >>> def a(): - pass - >>> inline("",['a']) - - -Callable and instance variables are converted to PyObject*. Nothing is done -to there reference counts. - -:: - - /* argument conversion code */ - PyObject* a = py_to_callable(get_variable("a",raw_locals,raw_globals),"a"); - - -``get_variable()`` reads the variable ``a`` from the local and global -namespaces. The ``py_to_callable()`` and ``py_to_instance()`` don't currently -increment the ref count. - -:: - - PyObject* py_to_callable(PyObject* py_obj, char* name) - { - if (!py_obj || !PyCallable_Check(py_obj)) - handle_bad_type(py_obj,"callable", name); - return py_obj; - } - - PyObject* py_to_instance(PyObject* py_obj, char* name) - { - if (!py_obj || !PyFile_Check(py_obj)) - handle_bad_type(py_obj,"instance", name); - return py_obj; - } - -There is no cleanup code for callables, modules, or instances. - -Customizing Conversions ------------------------ - -Converting from Python to C++ types is handled by xxx_specification classes. -A type specification class actually serve in two related but different roles. -The first is in determining whether a Python variable that needs to be -converted should be represented by the given class. The second is as a code -generator that generate C++ code needed to convert from Python to C++ types -for a specific variable. - -When - -:: - - >>> a = 1 - >>> weave.inline('printf("%d",a);',['a']) - - -is called for the first time, the code snippet has to be compiled. In this -process, the variable 'a' is tested against a list of type specifications -(the default list is stored in weave/ext_tools.py). The *first* specification -in the list is used to represent the variable. - -Examples of ``xxx_specification`` are scattered throughout numerous -"xxx_spec.py" files in the ``weave`` package. Closely related to the -``xxx_specification`` classes are ``yyy_info`` classes. These classes contain -compiler, header, and support code information necessary for including a -certain set of capabilities (such as blitz++ or CXX support) in a compiled -module. ``xxx_specification`` classes have one or more ``yyy_info`` classes -associated with them. If you'd like to define your own set of type -specifications, the current best route is to examine some of the existing -spec and info files. Maybe looking over sequence_spec.py and cxx_info.py are -a good place to start. After defining specification classes, you'll need to -pass them into ``inline`` using the ``type_factories`` argument. A lot of -times you may just want to change how a specific variable type is -represented. Say you'd rather have Python strings converted to -``std::string`` or maybe ``char*`` instead of using the CXX string object, -but would like all other type conversions to have default behavior. This -requires that a new specification class that handles strings is written and -then prepended to a list of the default type specifications. Since it is -closer to the front of the list, it effectively overrides the default string -specification. The following code demonstrates how this is done: ... - - -The Catalog -=========== - -``catalog.py`` has a class called ``catalog`` that helps keep track of -previously compiled functions. This prevents ``inline()`` and related -functions from having to compile functions everytime they are called. -Instead, catalog will check an in memory cache to see if the function has -already been loaded into python. If it hasn't, then it starts searching -through persisent catalogs on disk to see if it finds an entry for the given -function. By saving information about compiled functions to disk, it isn't -necessary to re-compile functions everytime you stop and restart the -interpreter. Functions are compiled once and stored for future use. - -When ``inline(cpp_code)`` is called the following things happen: - -1. A fast local cache of functions is checked for the last function - called for ``cpp_code``. If an entry for ``cpp_code`` doesn't exist in - the cache or the cached function call fails (perhaps because the function - doesn't have compatible types) then the next step is to check the - catalog. - -2. The catalog class also keeps an in-memory cache with a list of all - the functions compiled for ``cpp_code``. If ``cpp_code`` has ever been - called, then this cache will be present (loaded from disk). If the cache - isn't present, then it is loaded from disk. - - If the cache is present, each function in the cache is called until - one is found that was compiled for the correct argument types. If none of - the functions work, a new function is compiled with the given argument - types. This function is written to the on-disk catalog as well as into - the in-memory cache. - -3. When a lookup for ``cpp_code`` fails, the catalog looks through the - on-disk function catalogs for the entries. The PYTHONCOMPILED variable - determines where to search for these catalogs and in what order. If - PYTHONCOMPILED is not present several platform dependent locations are - searched. All functions found for ``cpp_code`` in the path are loaded - into the in-memory cache with functions found earlier in the search path - closer to the front of the call list. - - If the function isn't found in the on-disk catalog, then the function - is compiled, written to the first writable directory in the - PYTHONCOMPILED path, and also loaded into the in-memory cache. - - -Function Storage ----------------- - -Function caches are stored as dictionaries where the key is the entire C++ -code string and the value is either a single function (as in the "level 1" -cache) or a list of functions (as in the main catalog cache). On disk -catalogs are stored in the same manor using standard Python shelves. - -Early on, there was a question as to whether md5 check sums of the C++ code -strings should be used instead of the actual code strings. I think this is -the route inline Perl took. Some (admittedly quick) tests of the md5 vs. the -entire string showed that using the entire string was at least a factor of 3 -or 4 faster for Python. I think this is because it is more time consuming to -compute the md5 value than it is to do look-ups of long strings in the -dictionary. Look at the examples/md5_speed.py file for the test run. - - -Catalog search paths and the PYTHONCOMPILED variable ----------------------------------------------------- - -The default location for catalog files on Unix is is ~/.pythonXX_compiled -where XX is version of Python being used. If this directory doesn't exist, it -is created the first time a catalog is used. The directory must be writable. -If, for any reason it isn't, then the catalog attempts to create a directory -based on your user id in the /tmp directory. The directory permissions are -set so that only you have access to the directory. If this fails, I think -you're out of luck. I don't think either of these should ever fail though. On -Windows, a directory called pythonXX_compiled is created in the user's -temporary directory. - -The actual catalog file that lives in this directory is a Python shelve with -a platform specific name such as "nt21compiled_catalog" so that multiple OSes -can share the same file systems without trampling on each other. Along with -the catalog file, the .cpp and .so or .pyd files created by inline will live -in this directory. The catalog file simply contains keys which are the C++ -code strings with values that are lists of functions. The function lists -point at functions within these compiled modules. Each function in the lists -executes the same C++ code string, but compiled for different input -variables. - -You can use the PYTHONCOMPILED environment variable to specify alternative -locations for compiled functions. On Unix this is a colon (':') separated -list of directories. On windows, it is a (';') separated list of directories. -These directories will be searched prior to the default directory for a -compiled function catalog. Also, the first writable directory in the list is -where all new compiled function catalogs, .cpp and .so or .pyd files are -written. Relative directory paths ('.' and '..') should work fine in the -PYTHONCOMPILED variable as should environement variables. - -There is a "special" path variable called MODULE that can be placed in the -PYTHONCOMPILED variable. It specifies that the compiled catalog should reside -in the same directory as the module that called it. This is useful if an -admin wants to build a lot of compiled functions during the build of a -package and then install them in site-packages along with the package. User's -who specify MODULE in their PYTHONCOMPILED variable will have access to these -compiled functions. Note, however, that if they call the function with a set -of argument types that it hasn't previously been built for, the new function -will be stored in their default directory (or some other writable directory -in the PYTHONCOMPILED path) because the user will not have write access to -the site-packages directory. - -An example of using the PYTHONCOMPILED path on bash follows:: - - PYTHONCOMPILED=MODULE:/some/path;export PYTHONCOMPILED; - - -If you are using python21 on linux, and the module bob.py in site-packages -has a compiled function in it, then the catalog search order when calling -that function for the first time in a python session would be:: - - /usr/lib/python21/site-packages/linuxpython_compiled - /some/path/linuxpython_compiled - ~/.python21_compiled/linuxpython_compiled - - -The default location is always included in the search path. - -.. note:: - hmmm. see a possible problem here. I should probably make a sub- - directory such as /usr/lib/python21/site- - packages/python21_compiled/linuxpython_compiled so that library files - compiled with python21 are tried to link with python22 files in some strange - scenarios. Need to check this. - -The in-module cache (in ``weave.inline_tools`` reduces the overhead of -calling inline functions by about a factor of 2. It can be reduced a little -more for type loop calls where the same function is called over and over -again if the cache was a single value instead of a dictionary, but the -benefit is very small (less than 5%) and the utility is quite a bit less. So, -we'll stick with a dictionary as the cache. - - -======= - Blitz -======= - -.. note:: - most of this section is lifted from old documentation. It should be - pretty accurate, but there may be a few discrepancies. - -``weave.blitz()`` compiles NumPy Python expressions for fast execution. For -most applications, compiled expressions should provide a factor of 2-10 -speed-up over NumPy arrays. Using compiled expressions is meant to be as -unobtrusive as possible and works much like pythons exec statement. As an -example, the following code fragment takes a 5 point average of the 512x512 -2d image, b, and stores it in array, a:: - - from scipy import * # or from NumPy import * - a = ones((512,512), Float64) - b = ones((512,512), Float64) - # ...do some stuff to fill in b... - # now average - a[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1] \ - + b[1:-1,2:] + b[1:-1,:-2]) / 5. - - -To compile the expression, convert the expression to a string by putting -quotes around it and then use ``weave.blitz``:: - - import weave - expr = "a[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1]" \ - "+ b[1:-1,2:] + b[1:-1,:-2]) / 5." - weave.blitz(expr) - - -The first time ``weave.blitz`` is run for a given expression and set of -arguements, C++ code that accomplishes the exact same task as the Python -expression is generated and compiled to an extension module. This can take up -to a couple of minutes depending on the complexity of the function. -Subsequent calls to the function are very fast. Futher, the generated module -is saved between program executions so that the compilation is only done once -for a given expression and associated set of array types. If the given -expression is executed with a new set of array types, the code most be -compiled again. This does not overwrite the previously compiled function -- -both of them are saved and available for exectution. - -The following table compares the run times for standard NumPy code and -compiled code for the 5 point averaging. - -Method Run Time (seconds) -Standard NumPy 0.46349 -blitz (1st time compiling) 78.95526 -blitz (subsequent calls) 0.05843 (factor of 8 speedup) - -These numbers are for a 512x512 double precision image run on a 400 MHz -Celeron processor under RedHat Linux 6.2. - -Because of the slow compile times, its probably most effective to develop -algorithms as you usually do using the capabilities of scipy or the NumPy -module. Once the algorithm is perfected, put quotes around it and execute it -using ``weave.blitz``. This provides the standard rapid prototyping strengths -of Python and results in algorithms that run close to that of hand coded C or -Fortran. - - -Requirements -============ - -Currently, the ``weave.blitz`` has only been tested under Linux with -gcc-2.95-3 and on Windows with Mingw32 (2.95.2). Its compiler requirements -are pretty heavy duty (see the `blitz++ home page`_), so it won't work with -just any compiler. Particularly MSVC++ isn't up to snuff. A number of other -compilers such as KAI++ will also work, but my suspicions are that gcc will -get the most use. - -Limitations -=========== - -1. Currently, ``weave.blitz`` handles all standard mathematic operators - except for the ** power operator. The built-in trigonmetric, log, - floor/ceil, and fabs functions might work (but haven't been tested). It - also handles all types of array indexing supported by the NumPy module. - numarray's NumPy compatible array indexing modes are likewise supported, - but numarray's enhanced (array based) indexing modes are not supported. - - ``weave.blitz`` does not currently support operations that use array - broadcasting, nor have any of the special purpose functions in NumPy such - as take, compress, etc. been implemented. Note that there are no obvious - reasons why most of this functionality cannot be added to scipy.weave, so - it will likely trickle into future versions. Using ``slice()`` objects - directly instead of ``start:stop:step`` is also not supported. - -2. Currently Python only works on expressions that include assignment - such as - - :: - - >>> result = b + c + d - - This means that the result array must exist before calling - ``weave.blitz``. Future versions will allow the following:: - - >>> result = weave.blitz_eval("b + c + d") - -3. ``weave.blitz`` works best when algorithms can be expressed in a - "vectorized" form. Algorithms that have a large number of if/thens and - other conditions are better hand written in C or Fortran. Further, the - restrictions imposed by requiring vectorized expressions sometimes - preclude the use of more efficient data structures or algorithms. For - maximum speed in these cases, hand-coded C or Fortran code is the only - way to go. - -4. ``weave.blitz`` can produce different results than NumPy in certain - situations. It can happen when the array receiving the results of a - calculation is also used during the calculation. The NumPy behavior is to - carry out the entire calculation on the right hand side of an equation - and store it in a temporary array. This temprorary array is assigned to - the array on the left hand side of the equation. blitz, on the other - hand, does a "running" calculation of the array elements assigning values - from the right hand side to the elements on the left hand side - immediately after they are calculated. Here is an example, provided by - Prabhu Ramachandran, where this happens:: - - # 4 point average. - >>> expr = "u[1:-1, 1:-1] = (u[0:-2, 1:-1] + u[2:, 1:-1] + \ - ... "u[1:-1,0:-2] + u[1:-1, 2:])*0.25" - >>> u = zeros((5, 5), 'd'); u[0,:] = 100 - >>> exec (expr) - >>> u - array([[ 100., 100., 100., 100., 100.], - [ 0., 25., 25., 25., 0.], - [ 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - - >>> u = zeros((5, 5), 'd'); u[0,:] = 100 - >>> weave.blitz (expr) - >>> u - array([[ 100. , 100. , 100. , 100. , 100. ], - [ 0. , 25. , 31.25 , 32.8125 , 0. ], - [ 0. , 6.25 , 9.375 , 10.546875 , 0. ], - [ 0. , 1.5625 , 2.734375 , 3.3203125, 0. ], - [ 0. , 0. , 0. , 0. , 0. ]]) - - You can prevent this behavior by using a temporary array. - - :: - - >>> u = zeros((5, 5), 'd'); u[0,:] = 100 - >>> temp = zeros((4, 4), 'd'); - >>> expr = "temp = (u[0:-2, 1:-1] + u[2:, 1:-1] + "\ - ... "u[1:-1,0:-2] + u[1:-1, 2:])*0.25;"\ - ... "u[1:-1,1:-1] = temp" - >>> weave.blitz (expr) - >>> u - array([[ 100., 100., 100., 100., 100.], - [ 0., 25., 25., 25., 0.], - [ 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - -5. One other point deserves mention lest people be confused. - ``weave.blitz`` is not a general purpose Python->C compiler. It only - works for expressions that contain NumPy arrays and/or Python scalar - values. This focused scope concentrates effort on the compuationally - intensive regions of the program and sidesteps the difficult issues - associated with a general purpose Python->C compiler. - - -NumPy efficiency issues: What compilation buys you -================================================== - -Some might wonder why compiling NumPy expressions to C++ is beneficial since -operations on NumPy array operations are already executed within C loops. The -problem is that anything other than the simplest expression are executed in -less than optimal fashion. Consider the following NumPy expression:: - - a = 1.2 * b + c * d - - -When NumPy calculates the value for the 2d array, ``a``, it does the -following steps:: - - temp1 = 1.2 * b - temp2 = c * d - a = temp1 + temp2 - - -Two things to note. Since ``c`` is an (perhaps large) array, a large -temporary array must be created to store the results of ``1.2 * b``. The same -is true for ``temp2``. Allocation is slow. The second thing is that we have 3 -loops executing, one to calculate ``temp1``, one for ``temp2`` and one for -adding them up. A C loop for the same problem might look like:: - - for(int i = 0; i < M; i++) - for(int j = 0; j < N; j++) - a[i,j] = 1.2 * b[i,j] + c[i,j] * d[i,j] - - -Here, the 3 loops have been fused into a single loop and there is no longer a -need for a temporary array. This provides a significant speed improvement -over the above example (write me and tell me what you get). - -So, converting NumPy expressions into C/C++ loops that fuse the loops and -eliminate temporary arrays can provide big gains. The goal then,is to convert -NumPy expression to C/C++ loops, compile them in an extension module, and -then call the compiled extension function. The good news is that there is an -obvious correspondence between the NumPy expression above and the C loop. The -bad news is that NumPy is generally much more powerful than this simple -example illustrates and handling all possible indexing possibilities results -in loops that are less than straight forward to write. (take a peak in NumPy -for confirmation). Luckily, there are several available tools that simplify -the process. - - -The Tools -========= - -``weave.blitz`` relies heavily on several remarkable tools. On the Python -side, the main facilitators are Jermey Hylton's parser module and Travis -Oliphant's NumPy module. On the compiled language side, Todd Veldhuizen's -blitz++ array library, written in C++ (shhhh. don't tell David Beazley), does -the heavy lifting. Don't assume that, because it's C++, it's much slower than -C or Fortran. Blitz++ uses a jaw dropping array of template techniques -(metaprogramming, template expression, etc) to convert innocent looking and -readable C++ expressions into to code that usually executes within a few -percentage points of Fortran code for the same problem. This is good. -Unfortunately all the template raz-ma-taz is very expensive to compile, so -the 200 line extension modules often take 2 or more minutes to compile. This -isn't so good. ``weave.blitz`` works to minimize this issue by remembering -where compiled modules live and reusing them instead of re-compiling every -time a program is re-run. - -Parser ------- - -Tearing NumPy expressions apart, examining the pieces, and then rebuilding -them as C++ (blitz) expressions requires a parser of some sort. I can imagine -someone attacking this problem with regular expressions, but it'd likely be -ugly and fragile. Amazingly, Python solves this problem for us. It actually -exposes its parsing engine to the world through the ``parser`` module. The -following fragment creates an Abstract Syntax Tree (AST) object for the -expression and then converts to a (rather unpleasant looking) deeply nested -list representation of the tree. - -:: - - >>> import parser - >>> import scipy.weave.misc - >>> ast = parser.suite("a = b * c + d") - >>> ast_list = ast.tolist() - >>> sym_list = scipy.weave.misc.translate_symbols(ast_list) - >>> pprint.pprint(sym_list) - ['file_input', - ['stmt', - ['simple_stmt', - ['small_stmt', - ['expr_stmt', - ['testlist', - ['test', - ['and_test', - ['not_test', - ['comparison', - ['expr', - ['xor_expr', - ['and_expr', - ['shift_expr', - ['arith_expr', - ['term', - ['factor', ['power', ['atom', ['NAME', 'a']]]]]]]]]]]]]]], - ['EQUAL', '='], - ['testlist', - ['test', - ['and_test', - ['not_test', - ['comparison', - ['expr', - ['xor_expr', - ['and_expr', - ['shift_expr', - ['arith_expr', - ['term', - ['factor', ['power', ['atom', ['NAME', 'b']]]], - ['STAR', '*'], - ['factor', ['power', ['atom', ['NAME', 'c']]]]], - ['PLUS', '+'], - ['term', - ['factor', ['power', ['atom', ['NAME', 'd']]]]]]]]]]]]]]]]], - ['NEWLINE', '']]], - ['ENDMARKER', '']] - - -Despite its looks, with some tools developed by Jermey H., its possible to -search these trees for specific patterns (sub-trees), extract the sub-tree, -manipulate them converting python specific code fragments to blitz code -fragments, and then re-insert it in the parse tree. The parser module -documentation has some details on how to do this. Traversing the new -blitzified tree, writing out the terminal symbols as you go, creates our new -blitz++ expression string. - -Blitz and NumPy ---------------- - -The other nice discovery in the project is that the data structure used for -NumPy arrays and blitz arrays is nearly identical. NumPy stores "strides" as -byte offsets and blitz stores them as element offsets, but other than that, -they are the same. Further, most of the concept and capabilities of the two -libraries are remarkably similar. It is satisfying that two completely -different implementations solved the problem with similar basic -architectures. It is also fortuitous. The work involved in converting NumPy -expressions to blitz expressions was greatly diminished. As an example, -consider the code for slicing an array in Python with a stride:: - - >>> a = b[0:4:2] + c - >>> a - [0,2,4] - - -In Blitz it is as follows:: - - Array<2,int> b(10); - Array<2,int> c(3); - // ... - Array<2,int> a = b(Range(0,3,2)) + c; - - -Here the range object works exactly like Python slice objects with the -exception that the top index (3) is inclusive where as Python's (4) is -exclusive. Other differences include the type declaraions in C++ and -parentheses instead of brackets for indexing arrays. Currently, -``weave.blitz`` handles the inclusive/exclusive issue by subtracting one from -upper indices during the translation. An alternative that is likely more -robust/maintainable in the long run, is to write a PyRange class that behaves -like Python's range. This is likely very easy. - -The stock blitz also doesn't handle negative indices in ranges. The current -implementation of the ``blitz()`` has a partial solution to this problem. It -calculates and index that starts with a '-' sign by subtracting it from the -maximum index in the array so that:: - - upper index limit - /-----\ - b[:-1] -> b(Range(0,Nb[0]-1-1)) - - -This approach fails, however, when the top index is calculated from other -values. In the following scenario, if ``i+j`` evaluates to a negative value, -the compiled code will produce incorrect results and could even core- dump. -Right now, all calculated indices are assumed to be positive. - -:: - - b[:i-j] -> b(Range(0,i+j)) - - -A solution is to calculate all indices up front using if/then to handle the -+/- cases. This is a little work and results in more code, so it hasn't been -done. I'm holding out to see if blitz++ can be modified to handle negative -indexing, but haven't looked into how much effort is involved yet. While it -needs fixin', I don't think there is a ton of code where this is an issue. - -The actual translation of the Python expressions to blitz expressions is -currently a two part process. First, all x:y:z slicing expression are removed -from the AST, converted to slice(x,y,z) and re-inserted into the tree. Any -math needed on these expressions (subtracting from the maximum index, etc.) -are also preformed here. _beg and _end are used as special variables that are -defined as blitz::fromBegin and blitz::toEnd. - -:: - - a[i+j:i+j+1,:] = b[2:3,:] - - -becomes a more verbose:: - - a[slice(i+j,i+j+1),slice(_beg,_end)] = b[slice(2,3),slice(_beg,_end)] - - -The second part does a simple string search/replace to convert to a blitz -expression with the following translations:: - - slice(_beg,_end) -> _all # not strictly needed, but cuts down on code. - slice -> blitz::Range - [ -> ( - ] -> ) - _stp -> 1 - - -``_all`` is defined in the compiled function as ``blitz::Range.all()``. These -translations could of course happen directly in the syntax tree. But the -string replacement is slightly easier. Note that name spaces are maintained -in the C++ code to lessen the likelyhood of name clashes. Currently no effort -is made to detect name clashes. A good rule of thumb is don't use values that -start with '_' or 'py\_' in compiled expressions and you'll be fine. - -Type definitions and coersion -============================= - -So far we've glossed over the dynamic vs. static typing issue between Python -and C++. In Python, the type of value that a variable holds can change -through the course of program execution. C/C++, on the other hand, forces you -to declare the type of value a variables will hold prior at compile time. -``weave.blitz`` handles this issue by examining the types of the variables in -the expression being executed, and compiling a function for those explicit -types. For example:: - - a = ones((5,5),Float32) - b = ones((5,5),Float32) - weave.blitz("a = a + b") - - -When compiling this expression to C++, ``weave.blitz`` sees that the values -for a and b in the local scope have type ``Float32``, or 'float' on a 32 bit -architecture. As a result, it compiles the function using the float type (no -attempt has been made to deal with 64 bit issues). - -What happens if you call a compiled function with array types that are -different than the ones for which it was originally compiled? No biggie, -you'll just have to wait on it to compile a new version for your new types. -This doesn't overwrite the old functions, as they are still accessible. See -the catalog section in the inline() documentation to see how this is handled. -Suffice to say, the mechanism is transparent to the user and behaves like -dynamic typing with the occasional wait for compiling newly typed functions. - -When working with combined scalar/array operations, the type of the array is -*always* used. This is similar to the savespace flag that was recently added -to NumPy. This prevents issues with the following expression perhaps -unexpectedly being calculated at a higher (more expensive) precision that can -occur in Python:: - - >>> a = array((1,2,3),typecode = Float32) - >>> b = a * 2.1 # results in b being a Float64 array. - -In this example, - -:: - - >>> a = ones((5,5),Float32) - >>> b = ones((5,5),Float32) - >>> weave.blitz("b = a * 2.1") - - -the ``2.1`` is cast down to a ``float`` before carrying out the operation. If -you really want to force the calculation to be a ``double``, define ``a`` and -``b`` as ``double`` arrays. - -One other point of note. Currently, you must include both the right hand side -and left hand side (assignment side) of your equation in the compiled -expression. Also, the array being assigned to must be created prior to -calling ``weave.blitz``. I'm pretty sure this is easily changed so that a -compiled_eval expression can be defined, but no effort has been made to -allocate new arrays (and decern their type) on the fly. - - -Cataloging Compiled Functions -============================= - -See `The Catalog`_ section in the ``weave.inline()`` -documentation. - -Checking Array Sizes -==================== - -Surprisingly, one of the big initial problems with compiled code was making -sure all the arrays in an operation were of compatible type. The following -case is trivially easy:: - - a = b + c - - -It only requires that arrays ``a``, ``b``, and ``c`` have the same shape. -However, expressions like:: - - a[i+j:i+j+1,:] = b[2:3,:] + c - - -are not so trivial. Since slicing is involved, the size of the slices, not -the input arrays must be checked. Broadcasting complicates things further -because arrays and slices with different dimensions and shapes may be -compatible for math operations (broadcasting isn't yet supported by -``weave.blitz``). Reductions have a similar effect as their results are -different shapes than their input operand. The binary operators in NumPy -compare the shapes of their two operands just before they operate on them. -This is possible because NumPy treats each operation independently. The -intermediate (temporary) arrays created during sub-operations in an -expression are tested for the correct shape before they are combined by -another operation. Because ``weave.blitz`` fuses all operations into a single -loop, this isn't possible. The shape comparisons must be done and guaranteed -compatible before evaluating the expression. - -The solution chosen converts input arrays to "dummy arrays" that only -represent the dimensions of the arrays, not the data. Binary operations on -dummy arrays check that input array sizes are comptible and return a dummy -array with the size correct size. Evaluating an expression of dummy arrays -traces the changing array sizes through all operations and fails if -incompatible array sizes are ever found. - -The machinery for this is housed in ``weave.size_check``. It basically -involves writing a new class (dummy array) and overloading it math operators -to calculate the new sizes correctly. All the code is in Python and there is -a fair amount of logic (mainly to handle indexing and slicing) so the -operation does impose some overhead. For large arrays (ie. 50x50x50), the -overhead is negligible compared to evaluating the actual expression. For -small arrays (ie. 16x16), the overhead imposed for checking the shapes with -this method can cause the ``weave.blitz`` to be slower than evaluating the -expression in Python. - -What can be done to reduce the overhead? (1) The size checking code could be -moved into C. This would likely remove most of the overhead penalty compared -to NumPy (although there is also some calling overhead), but no effort has -been made to do this. (2) You can also call ``weave.blitz`` with -``check_size=0`` and the size checking isn't done. However, if the sizes -aren't compatible, it can cause a core-dump. So, foregoing size_checking -isn't advisable until your code is well debugged. - - -Creating the Extension Module -============================= - -``weave.blitz`` uses the same machinery as ``weave.inline`` to build the -extension module. The only difference is the code included in the function is -automatically generated from the NumPy array expression instead of supplied -by the user. - -=================== - Extension Modules -=================== - -``weave.inline`` and ``weave.blitz`` are high level tools that generate -extension modules automatically. Under the covers, they use several classes -from ``weave.ext_tools`` to help generate the extension module. The main two -classes are ``ext_module`` and ``ext_function`` (I'd like to add -``ext_class`` and ``ext_method`` also). These classes simplify the process of -generating extension modules by handling most of the "boiler plate" code -automatically. - -.. note:: - ``inline`` actually sub-classes ``weave.ext_tools.ext_function`` to - generate slightly different code than the standard ``ext_function``. - The main difference is that the standard class converts function - arguments to C types, while inline always has two arguments, the - local and global dicts, and the grabs the variables that need to be - convereted to C from these. - -A Simple Example -================ - -The following simple example demonstrates how to build an extension module -within a Python function:: - - # examples/increment_example.py - from weave import ext_tools - - def build_increment_ext(): - """ Build a simple extension with functions that increment numbers. - The extension will be built in the local directory. - """ - mod = ext_tools.ext_module('increment_ext') - - a = 1 # effectively a type declaration for 'a' in the - # following functions. - - ext_code = "return_val = Py::new_reference_to(Py::Int(a+1));" - func = ext_tools.ext_function('increment',ext_code,['a']) - mod.add_function(func) - - ext_code = "return_val = Py::new_reference_to(Py::Int(a+2));" - func = ext_tools.ext_function('increment_by_2',ext_code,['a']) - mod.add_function(func) - - mod.compile() - -The function ``build_increment_ext()`` creates an extension module named -``increment_ext`` and compiles it to a shared library (.so or .pyd) that can -be loaded into Python.. ``increment_ext`` contains two functions, -``increment`` and ``increment_by_2``. The first line of -``build_increment_ext()``, - - mod = ext_tools.ext_module('increment_ext') - - -creates an ``ext_module`` instance that is ready to have ``ext_function`` -instances added to it. ``ext_function`` instances are created much with a -calling convention similar to ``weave.inline()``. The most common call -includes a C/C++ code snippet and a list of the arguments for the function. -The following - - ext_code = "return_val = Py::new_reference_to(Py::Int(a+1));" - func = ext_tools.ext_function('increment',ext_code,['a']) - - -creates a C/C++ extension function that is equivalent to the following Python -function:: - - def increment(a): - return a + 1 - - -A second method is also added to the module and then, - -:: - - mod.compile() - - -is called to build the extension module. By default, the module is created in -the current working directory. This example is available in the -``examples/increment_example.py`` file found in the ``weave`` directory. At -the bottom of the file in the module's "main" program, an attempt to import -``increment_ext`` without building it is made. If this fails (the module -doesn't exist in the PYTHONPATH), the module is built by calling -``build_increment_ext()``. This approach only takes the time consuming ( a -few seconds for this example) process of building the module if it hasn't -been built before. - -:: - - if __name__ == "__main__": - try: - import increment_ext - except ImportError: - build_increment_ext() - import increment_ext - a = 1 - print 'a, a+1:', a, increment_ext.increment(a) - print 'a, a+2:', a, increment_ext.increment_by_2(a) - -.. note:: - If we were willing to always pay the penalty of building the C++ - code for a module, we could store the md5 checksum of the C++ code - along with some information about the compiler, platform, etc. Then, - ``ext_module.compile()`` could try importing the module before it - actually compiles it, check the md5 checksum and other meta-data in - the imported module with the meta-data of the code it just produced - and only compile the code if the module didn't exist or the - meta-data didn't match. This would reduce the above code to:: - - if __name__ == "__main__": - build_increment_ext() - - a = 1 - print 'a, a+1:', a, increment_ext.increment(a) - print 'a, a+2:', a, increment_ext.increment_by_2(a) - -.. note:: - There would always be the overhead of building the C++ code, but it - would only actually compile the code once. You pay a little in overhead and - get cleaner "import" code. Needs some thought. - -If you run ``increment_example.py`` from the command line, you get the -following:: - - [eric@n0]$ python increment_example.py - a, a+1: 1 2 - a, a+2: 1 3 - - -If the module didn't exist before it was run, the module is created. If it -did exist, it is just imported and used. - -Fibonacci Example -================= - -``examples/fibonacci.py`` provides a little more complex example of how to -use ``ext_tools``. Fibonacci numbers are a series of numbers where each -number in the series is the sum of the previous two: 1, 1, 2, 3, 5, 8, etc. -Here, the first two numbers in the series are taken to be 1. One approach to -calculating Fibonacci numbers uses recursive function calls. In Python, it -might be written as:: - - def fib(a): - if a <= 2: - return 1 - else: - return fib(a-2) + fib(a-1) - - -In C, the same function would look something like this:: - - int fib(int a) - { - if(a <= 2) - return 1; - else - return fib(a-2) + fib(a-1); - } - - -Recursion is much faster in C than in Python, so it would be beneficial to -use the C version for fibonacci number calculations instead of the Python -version. We need an extension function that calls this C function to do this. -This is possible by including the above code snippet as "support code" and -then calling it from the extension function. Support code snippets (usually -structure definitions, helper functions and the like) are inserted into the -extension module C/C++ file before the extension function code. Here is how -to build the C version of the fibonacci number generator:: - - def build_fibonacci(): - """ Builds an extension module with fibonacci calculators. - """ - mod = ext_tools.ext_module('fibonacci_ext') - a = 1 # this is effectively a type declaration - - # recursive fibonacci in C - fib_code = """ - int fib1(int a) - { - if(a <= 2) - return 1; - else - return fib1(a-2) + fib1(a-1); - } - """ - ext_code = """ - int val = fib1(a); - return_val = Py::new_reference_to(Py::Int(val)); - """ - fib = ext_tools.ext_function('fib',ext_code,['a']) - fib.customize.add_support_code(fib_code) - mod.add_function(fib) - - mod.compile() - -XXX More about custom_info, and what xxx_info instances are good for. - -.. note:: - recursion is not the fastest way to calculate fibonacci numbers, but - this approach serves nicely for this example. - - -================================================ - Customizing Type Conversions -- Type Factories -================================================ - -not written - -============================= - Things I wish ``weave`` did -============================= - -It is possible to get name clashes if you uses a variable name that is -already defined in a header automatically included (such as ``stdio.h``) For -instance, if you try to pass in a variable named ``stdout``, you'll get a -cryptic error report due to the fact that ``stdio.h`` also defines the name. -``weave`` should probably try and handle this in some way. Other things... - -.. _PyInline: http://pyinline.sourceforge.net/ -.. _SciPy: http://www.scipy.org -.. _mingw32: http://www.mingw.org%3Ewww.mingw.org -.. _NumPy: http://numeric.scipy.org/ -.. _here: http://www.scipy.org/Weave -.. _Python Cookbook: http://aspn.activestate.com/ASPN/Cookbook/Python -.. _binary_search(): - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81188 -.. _website: http://cxx.sourceforge.net/ -.. _This submission: - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52306 -.. _blitz++ home page: http://www.oonumerics.org/blitz/ - -.. - Local Variables: - mode: rst - End: diff --git a/scipy-0.10.1/doc/source/weave.rst b/scipy-0.10.1/doc/source/weave.rst deleted file mode 100644 index f6cad5b051..0000000000 --- a/scipy-0.10.1/doc/source/weave.rst +++ /dev/null @@ -1,19 +0,0 @@ -====================================== -C/C++ integration (:mod:`scipy.weave`) -====================================== - -.. warning:: - - This documentation is work-in-progress and unorganized. - -.. automodule:: scipy.weave - :members: - - -.. autosummary:: - :toctree: generated/ - - inline - blitz - ext_tools - accelerate diff --git a/scipy-0.10.1/doc/sphinxext/LICENSE.txt b/scipy-0.10.1/doc/sphinxext/LICENSE.txt deleted file mode 100644 index e00efc31ec..0000000000 --- a/scipy-0.10.1/doc/sphinxext/LICENSE.txt +++ /dev/null @@ -1,97 +0,0 @@ -------------------------------------------------------------------------------- - The files - - numpydoc.py - - autosummary.py - - autosummary_generate.py - - docscrape.py - - docscrape_sphinx.py - - phantom_import.py - have the following license: - -Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- - The files - - compiler_unparse.py - - comment_eater.py - - traitsdoc.py - have the following license: - -This software is OSI Certified Open Source Software. -OSI Certified is a certification mark of the Open Source Initiative. - -Copyright (c) 2006, Enthought, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Enthought, Inc. nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- - The files - - only_directives.py - - plot_directive.py - originate from Matplotlib (http://matplotlib.sf.net/) which has - the following license: - -Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. - -1. This LICENSE AGREEMENT is between John D. Hunter (“JDHâ€), and the Individual or Organization (“Licenseeâ€) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved†are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. - -4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS†basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. - -5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. - diff --git a/scipy-0.10.1/doc/sphinxext/MANIFEST.in b/scipy-0.10.1/doc/sphinxext/MANIFEST.in deleted file mode 100644 index f88ed785c5..0000000000 --- a/scipy-0.10.1/doc/sphinxext/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include tests *.py -include *.txt diff --git a/scipy-0.10.1/doc/sphinxext/README.txt b/scipy-0.10.1/doc/sphinxext/README.txt deleted file mode 100644 index 6ba63e6d85..0000000000 --- a/scipy-0.10.1/doc/sphinxext/README.txt +++ /dev/null @@ -1,45 +0,0 @@ -===================================== -numpydoc -- Numpy's Sphinx extensions -===================================== - -Numpy's documentation uses several custom extensions to Sphinx. These -are shipped in this ``numpydoc`` package, in case you want to make use -of them in third-party projects. - -The following extensions are available: - - - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add - the code description directives ``np:function``, ``np-c:function``, etc. - that support the Numpy docstring syntax. - - - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes. - - - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::`` - directive. Note that this implementation may still undergo severe - changes or eventually be deprecated. - - -numpydoc -======== - -Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings -following the Numpy/Scipy format to a form palatable to Sphinx. - -Options -------- - -The following options can be set in conf.py: - -- numpydoc_use_plots: bool - - Whether to produce ``plot::`` directives for Examples sections that - contain ``import matplotlib``. - -- numpydoc_show_class_members: bool - - Whether to show all members of a class in the Methods and Attributes - sections automatically. - -- numpydoc_edit_link: bool (DEPRECATED -- edit your HTML template instead) - - Whether to insert an edit link after docstrings. diff --git a/scipy-0.10.1/doc/sphinxext/__init__.py b/scipy-0.10.1/doc/sphinxext/__init__.py deleted file mode 100644 index ae9073bc41..0000000000 --- a/scipy-0.10.1/doc/sphinxext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from numpydoc import setup diff --git a/scipy-0.10.1/doc/sphinxext/comment_eater.py b/scipy-0.10.1/doc/sphinxext/comment_eater.py deleted file mode 100644 index e11eea9021..0000000000 --- a/scipy-0.10.1/doc/sphinxext/comment_eater.py +++ /dev/null @@ -1,158 +0,0 @@ -from cStringIO import StringIO -import compiler -import inspect -import textwrap -import tokenize - -from compiler_unparse import unparse - - -class Comment(object): - """ A comment block. - """ - is_comment = True - def __init__(self, start_lineno, end_lineno, text): - # int : The first line number in the block. 1-indexed. - self.start_lineno = start_lineno - # int : The last line number. Inclusive! - self.end_lineno = end_lineno - # str : The text block including '#' character but not any leading spaces. - self.text = text - - def add(self, string, start, end, line): - """ Add a new comment line. - """ - self.start_lineno = min(self.start_lineno, start[0]) - self.end_lineno = max(self.end_lineno, end[0]) - self.text += string - - def __repr__(self): - return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno, self.text) - - -class NonComment(object): - """ A non-comment block of code. - """ - is_comment = False - def __init__(self, start_lineno, end_lineno): - self.start_lineno = start_lineno - self.end_lineno = end_lineno - - def add(self, string, start, end, line): - """ Add lines to the block. - """ - if string.strip(): - # Only add if not entirely whitespace. - self.start_lineno = min(self.start_lineno, start[0]) - self.end_lineno = max(self.end_lineno, end[0]) - - def __repr__(self): - return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno) - - -class CommentBlocker(object): - """ Pull out contiguous comment blocks. - """ - def __init__(self): - # Start with a dummy. - self.current_block = NonComment(0, 0) - - # All of the blocks seen so far. - self.blocks = [] - - # The index mapping lines of code to their associated comment blocks. - self.index = {} - - def process_file(self, file): - """ Process a file object. - """ - for token in tokenize.generate_tokens(file.next): - self.process_token(*token) - self.make_index() - - def process_token(self, kind, string, start, end, line): - """ Process a single token. - """ - if self.current_block.is_comment: - if kind == tokenize.COMMENT: - self.current_block.add(string, start, end, line) - else: - self.new_noncomment(start[0], end[0]) - else: - if kind == tokenize.COMMENT: - self.new_comment(string, start, end, line) - else: - self.current_block.add(string, start, end, line) - - def new_noncomment(self, start_lineno, end_lineno): - """ We are transitioning from a noncomment to a comment. - """ - block = NonComment(start_lineno, end_lineno) - self.blocks.append(block) - self.current_block = block - - def new_comment(self, string, start, end, line): - """ Possibly add a new comment. - - Only adds a new comment if this comment is the only thing on the line. - Otherwise, it extends the noncomment block. - """ - prefix = line[:start[1]] - if prefix.strip(): - # Oops! Trailing comment, not a comment block. - self.current_block.add(string, start, end, line) - else: - # A comment block. - block = Comment(start[0], end[0], string) - self.blocks.append(block) - self.current_block = block - - def make_index(self): - """ Make the index mapping lines of actual code to their associated - prefix comments. - """ - for prev, block in zip(self.blocks[:-1], self.blocks[1:]): - if not block.is_comment: - self.index[block.start_lineno] = prev - - def search_for_comment(self, lineno, default=None): - """ Find the comment block just before the given line number. - - Returns None (or the specified default) if there is no such block. - """ - if not self.index: - self.make_index() - block = self.index.get(lineno, None) - text = getattr(block, 'text', default) - return text - - -def strip_comment_marker(text): - """ Strip # markers at the front of a block of comment text. - """ - lines = [] - for line in text.splitlines(): - lines.append(line.lstrip('#')) - text = textwrap.dedent('\n'.join(lines)) - return text - - -def get_class_traits(klass): - """ Yield all of the documentation for trait definitions on a class object. - """ - # FIXME: gracefully handle errors here or in the caller? - source = inspect.getsource(klass) - cb = CommentBlocker() - cb.process_file(StringIO(source)) - mod_ast = compiler.parse(source) - class_ast = mod_ast.node.nodes[0] - for node in class_ast.code.nodes: - # FIXME: handle other kinds of assignments? - if isinstance(node, compiler.ast.Assign): - name = node.nodes[0].name - rhs = unparse(node.expr).strip() - doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) - yield name, rhs, doc - diff --git a/scipy-0.10.1/doc/sphinxext/compiler_unparse.py b/scipy-0.10.1/doc/sphinxext/compiler_unparse.py deleted file mode 100644 index ffcf51b353..0000000000 --- a/scipy-0.10.1/doc/sphinxext/compiler_unparse.py +++ /dev/null @@ -1,860 +0,0 @@ -""" Turn compiler.ast structures back into executable python code. - - The unparse method takes a compiler.ast tree and transforms it back into - valid python code. It is incomplete and currently only works for - import statements, function calls, function definitions, assignments, and - basic expressions. - - Inspired by python-2.5-svn/Demo/parser/unparse.py - - fixme: We may want to move to using _ast trees because the compiler for - them is about 6 times faster than compiler.compile. -""" - -import sys -import cStringIO -from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add - -def unparse(ast, single_line_functions=False): - s = cStringIO.StringIO() - UnparseCompilerAst(ast, s, single_line_functions) - return s.getvalue().lstrip() - -op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, - 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } - -class UnparseCompilerAst: - """ Methods in this class recursively traverse an AST and - output source code for the abstract syntax; original formatting - is disregarged. - """ - - ######################################################################### - # object interface. - ######################################################################### - - def __init__(self, tree, file = sys.stdout, single_line_functions=False): - """ Unparser(tree, file=sys.stdout) -> None. - - Print the source for tree to file. - """ - self.f = file - self._single_func = single_line_functions - self._do_indent = True - self._indent = 0 - self._dispatch(tree) - self._write("\n") - self.f.flush() - - ######################################################################### - # Unparser private interface. - ######################################################################### - - ### format, output, and dispatch methods ################################ - - def _fill(self, text = ""): - "Indent a piece of text, according to the current indentation level" - if self._do_indent: - self._write("\n"+" "*self._indent + text) - else: - self._write(text) - - def _write(self, text): - "Append a piece of text to the current line." - self.f.write(text) - - def _enter(self): - "Print ':', and increase the indentation." - self._write(": ") - self._indent += 1 - - def _leave(self): - "Decrease the indentation level." - self._indent -= 1 - - def _dispatch(self, tree): - "_dispatcher function, _dispatching tree type T to method _T." - if isinstance(tree, list): - for t in tree: - self._dispatch(t) - return - meth = getattr(self, "_"+tree.__class__.__name__) - if tree.__class__.__name__ == 'NoneType' and not self._do_indent: - return - meth(tree) - - - ######################################################################### - # compiler.ast unparsing methods. - # - # There should be one method per concrete grammar type. They are - # organized in alphabetical order. - ######################################################################### - - def _Add(self, t): - self.__binary_op(t, '+') - - def _And(self, t): - self._write(" (") - for i, node in enumerate(t.nodes): - self._dispatch(node) - if i != len(t.nodes)-1: - self._write(") and (") - self._write(")") - - def _AssAttr(self, t): - """ Handle assigning an attribute of an object - """ - self._dispatch(t.expr) - self._write('.'+t.attrname) - - def _Assign(self, t): - """ Expression Assignment such as "a = 1". - - This only handles assignment in expressions. Keyword assignment - is handled separately. - """ - self._fill() - for target in t.nodes: - self._dispatch(target) - self._write(" = ") - self._dispatch(t.expr) - if not self._do_indent: - self._write('; ') - - def _AssName(self, t): - """ Name on left hand side of expression. - - Treat just like a name on the right side of an expression. - """ - self._Name(t) - - def _AssTuple(self, t): - """ Tuple on left hand side of an expression. - """ - - # _write each elements, separated by a comma. - for element in t.nodes[:-1]: - self._dispatch(element) - self._write(", ") - - # Handle the last one without writing comma - last_element = t.nodes[-1] - self._dispatch(last_element) - - def _AugAssign(self, t): - """ +=,-=,*=,/=,**=, etc. operations - """ - - self._fill() - self._dispatch(t.node) - self._write(' '+t.op+' ') - self._dispatch(t.expr) - if not self._do_indent: - self._write(';') - - def _Bitand(self, t): - """ Bit and operation. - """ - - for i, node in enumerate(t.nodes): - self._write("(") - self._dispatch(node) - self._write(")") - if i != len(t.nodes)-1: - self._write(" & ") - - def _Bitor(self, t): - """ Bit or operation - """ - - for i, node in enumerate(t.nodes): - self._write("(") - self._dispatch(node) - self._write(")") - if i != len(t.nodes)-1: - self._write(" | ") - - def _CallFunc(self, t): - """ Function call. - """ - self._dispatch(t.node) - self._write("(") - comma = False - for e in t.args: - if comma: self._write(", ") - else: comma = True - self._dispatch(e) - if t.star_args: - if comma: self._write(", ") - else: comma = True - self._write("*") - self._dispatch(t.star_args) - if t.dstar_args: - if comma: self._write(", ") - else: comma = True - self._write("**") - self._dispatch(t.dstar_args) - self._write(")") - - def _Compare(self, t): - self._dispatch(t.expr) - for op, expr in t.ops: - self._write(" " + op + " ") - self._dispatch(expr) - - def _Const(self, t): - """ A constant value such as an integer value, 3, or a string, "hello". - """ - self._dispatch(t.value) - - def _Decorators(self, t): - """ Handle function decorators (eg. @has_units) - """ - for node in t.nodes: - self._dispatch(node) - - def _Dict(self, t): - self._write("{") - for i, (k, v) in enumerate(t.items): - self._dispatch(k) - self._write(": ") - self._dispatch(v) - if i < len(t.items)-1: - self._write(", ") - self._write("}") - - def _Discard(self, t): - """ Node for when return value is ignored such as in "foo(a)". - """ - self._fill() - self._dispatch(t.expr) - - def _Div(self, t): - self.__binary_op(t, '/') - - def _Ellipsis(self, t): - self._write("...") - - def _From(self, t): - """ Handle "from xyz import foo, bar as baz". - """ - # fixme: Are From and ImportFrom handled differently? - self._fill("from ") - self._write(t.modname) - self._write(" import ") - for i, (name,asname) in enumerate(t.names): - if i != 0: - self._write(", ") - self._write(name) - if asname is not None: - self._write(" as "+asname) - - def _Function(self, t): - """ Handle function definitions - """ - if t.decorators is not None: - self._fill("@") - self._dispatch(t.decorators) - self._fill("def "+t.name + "(") - defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) - for i, arg in enumerate(zip(t.argnames, defaults)): - self._write(arg[0]) - if arg[1] is not None: - self._write('=') - self._dispatch(arg[1]) - if i < len(t.argnames)-1: - self._write(', ') - self._write(")") - if self._single_func: - self._do_indent = False - self._enter() - self._dispatch(t.code) - self._leave() - self._do_indent = True - - def _Getattr(self, t): - """ Handle getting an attribute of an object - """ - if isinstance(t.expr, (Div, Mul, Sub, Add)): - self._write('(') - self._dispatch(t.expr) - self._write(')') - else: - self._dispatch(t.expr) - - self._write('.'+t.attrname) - - def _If(self, t): - self._fill() - - for i, (compare,code) in enumerate(t.tests): - if i == 0: - self._write("if ") - else: - self._write("elif ") - self._dispatch(compare) - self._enter() - self._fill() - self._dispatch(code) - self._leave() - self._write("\n") - - if t.else_ is not None: - self._write("else") - self._enter() - self._fill() - self._dispatch(t.else_) - self._leave() - self._write("\n") - - def _IfExp(self, t): - self._dispatch(t.then) - self._write(" if ") - self._dispatch(t.test) - - if t.else_ is not None: - self._write(" else (") - self._dispatch(t.else_) - self._write(")") - - def _Import(self, t): - """ Handle "import xyz.foo". - """ - self._fill("import ") - - for i, (name,asname) in enumerate(t.names): - if i != 0: - self._write(", ") - self._write(name) - if asname is not None: - self._write(" as "+asname) - - def _Keyword(self, t): - """ Keyword value assignment within function calls and definitions. - """ - self._write(t.name) - self._write("=") - self._dispatch(t.expr) - - def _List(self, t): - self._write("[") - for i,node in enumerate(t.nodes): - self._dispatch(node) - if i < len(t.nodes)-1: - self._write(", ") - self._write("]") - - def _Module(self, t): - if t.doc is not None: - self._dispatch(t.doc) - self._dispatch(t.node) - - def _Mul(self, t): - self.__binary_op(t, '*') - - def _Name(self, t): - self._write(t.name) - - def _NoneType(self, t): - self._write("None") - - def _Not(self, t): - self._write('not (') - self._dispatch(t.expr) - self._write(')') - - def _Or(self, t): - self._write(" (") - for i, node in enumerate(t.nodes): - self._dispatch(node) - if i != len(t.nodes)-1: - self._write(") or (") - self._write(")") - - def _Pass(self, t): - self._write("pass\n") - - def _Printnl(self, t): - self._fill("print ") - if t.dest: - self._write(">> ") - self._dispatch(t.dest) - self._write(", ") - comma = False - for node in t.nodes: - if comma: self._write(', ') - else: comma = True - self._dispatch(node) - - def _Power(self, t): - self.__binary_op(t, '**') - - def _Return(self, t): - self._fill("return ") - if t.value: - if isinstance(t.value, Tuple): - text = ', '.join([ name.name for name in t.value.asList() ]) - self._write(text) - else: - self._dispatch(t.value) - if not self._do_indent: - self._write('; ') - - def _Slice(self, t): - self._dispatch(t.expr) - self._write("[") - if t.lower: - self._dispatch(t.lower) - self._write(":") - if t.upper: - self._dispatch(t.upper) - #if t.step: - # self._write(":") - # self._dispatch(t.step) - self._write("]") - - def _Sliceobj(self, t): - for i, node in enumerate(t.nodes): - if i != 0: - self._write(":") - if not (isinstance(node, Const) and node.value is None): - self._dispatch(node) - - def _Stmt(self, tree): - for node in tree.nodes: - self._dispatch(node) - - def _Sub(self, t): - self.__binary_op(t, '-') - - def _Subscript(self, t): - self._dispatch(t.expr) - self._write("[") - for i, value in enumerate(t.subs): - if i != 0: - self._write(",") - self._dispatch(value) - self._write("]") - - def _TryExcept(self, t): - self._fill("try") - self._enter() - self._dispatch(t.body) - self._leave() - - for handler in t.handlers: - self._fill('except ') - self._dispatch(handler[0]) - if handler[1] is not None: - self._write(', ') - self._dispatch(handler[1]) - self._enter() - self._dispatch(handler[2]) - self._leave() - - if t.else_: - self._fill("else") - self._enter() - self._dispatch(t.else_) - self._leave() - - def _Tuple(self, t): - - if not t.nodes: - # Empty tuple. - self._write("()") - else: - self._write("(") - - # _write each elements, separated by a comma. - for element in t.nodes[:-1]: - self._dispatch(element) - self._write(", ") - - # Handle the last one without writing comma - last_element = t.nodes[-1] - self._dispatch(last_element) - - self._write(")") - - def _UnaryAdd(self, t): - self._write("+") - self._dispatch(t.expr) - - def _UnarySub(self, t): - self._write("-") - self._dispatch(t.expr) - - def _With(self, t): - self._fill('with ') - self._dispatch(t.expr) - if t.vars: - self._write(' as ') - self._dispatch(t.vars.name) - self._enter() - self._dispatch(t.body) - self._leave() - self._write('\n') - - def _int(self, t): - self._write(repr(t)) - - def __binary_op(self, t, symbol): - # Check if parenthesis are needed on left side and then dispatch - has_paren = False - left_class = str(t.left.__class__) - if (left_class in op_precedence.keys() and - op_precedence[left_class] < op_precedence[str(t.__class__)]): - has_paren = True - if has_paren: - self._write('(') - self._dispatch(t.left) - if has_paren: - self._write(')') - # Write the appropriate symbol for operator - self._write(symbol) - # Check if parenthesis are needed on the right side and then dispatch - has_paren = False - right_class = str(t.right.__class__) - if (right_class in op_precedence.keys() and - op_precedence[right_class] < op_precedence[str(t.__class__)]): - has_paren = True - if has_paren: - self._write('(') - self._dispatch(t.right) - if has_paren: - self._write(')') - - def _float(self, t): - # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' - # We prefer str here. - self._write(str(t)) - - def _str(self, t): - self._write(repr(t)) - - def _tuple(self, t): - self._write(str(t)) - - ######################################################################### - # These are the methods from the _ast modules unparse. - # - # As our needs to handle more advanced code increase, we may want to - # modify some of the methods below so that they work for compiler.ast. - ######################################################################### - -# # stmt -# def _Expr(self, tree): -# self._fill() -# self._dispatch(tree.value) -# -# def _Import(self, t): -# self._fill("import ") -# first = True -# for a in t.names: -# if first: -# first = False -# else: -# self._write(", ") -# self._write(a.name) -# if a.asname: -# self._write(" as "+a.asname) -# -## def _ImportFrom(self, t): -## self._fill("from ") -## self._write(t.module) -## self._write(" import ") -## for i, a in enumerate(t.names): -## if i == 0: -## self._write(", ") -## self._write(a.name) -## if a.asname: -## self._write(" as "+a.asname) -## # XXX(jpe) what is level for? -## -# -# def _Break(self, t): -# self._fill("break") -# -# def _Continue(self, t): -# self._fill("continue") -# -# def _Delete(self, t): -# self._fill("del ") -# self._dispatch(t.targets) -# -# def _Assert(self, t): -# self._fill("assert ") -# self._dispatch(t.test) -# if t.msg: -# self._write(", ") -# self._dispatch(t.msg) -# -# def _Exec(self, t): -# self._fill("exec ") -# self._dispatch(t.body) -# if t.globals: -# self._write(" in ") -# self._dispatch(t.globals) -# if t.locals: -# self._write(", ") -# self._dispatch(t.locals) -# -# def _Print(self, t): -# self._fill("print ") -# do_comma = False -# if t.dest: -# self._write(">>") -# self._dispatch(t.dest) -# do_comma = True -# for e in t.values: -# if do_comma:self._write(", ") -# else:do_comma=True -# self._dispatch(e) -# if not t.nl: -# self._write(",") -# -# def _Global(self, t): -# self._fill("global") -# for i, n in enumerate(t.names): -# if i != 0: -# self._write(",") -# self._write(" " + n) -# -# def _Yield(self, t): -# self._fill("yield") -# if t.value: -# self._write(" (") -# self._dispatch(t.value) -# self._write(")") -# -# def _Raise(self, t): -# self._fill('raise ') -# if t.type: -# self._dispatch(t.type) -# if t.inst: -# self._write(", ") -# self._dispatch(t.inst) -# if t.tback: -# self._write(", ") -# self._dispatch(t.tback) -# -# -# def _TryFinally(self, t): -# self._fill("try") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# self._fill("finally") -# self._enter() -# self._dispatch(t.finalbody) -# self._leave() -# -# def _excepthandler(self, t): -# self._fill("except ") -# if t.type: -# self._dispatch(t.type) -# if t.name: -# self._write(", ") -# self._dispatch(t.name) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _ClassDef(self, t): -# self._write("\n") -# self._fill("class "+t.name) -# if t.bases: -# self._write("(") -# for a in t.bases: -# self._dispatch(a) -# self._write(", ") -# self._write(")") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _FunctionDef(self, t): -# self._write("\n") -# for deco in t.decorators: -# self._fill("@") -# self._dispatch(deco) -# self._fill("def "+t.name + "(") -# self._dispatch(t.args) -# self._write(")") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _For(self, t): -# self._fill("for ") -# self._dispatch(t.target) -# self._write(" in ") -# self._dispatch(t.iter) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# if t.orelse: -# self._fill("else") -# self._enter() -# self._dispatch(t.orelse) -# self._leave -# -# def _While(self, t): -# self._fill("while ") -# self._dispatch(t.test) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# if t.orelse: -# self._fill("else") -# self._enter() -# self._dispatch(t.orelse) -# self._leave -# -# # expr -# def _Str(self, tree): -# self._write(repr(tree.s)) -## -# def _Repr(self, t): -# self._write("`") -# self._dispatch(t.value) -# self._write("`") -# -# def _Num(self, t): -# self._write(repr(t.n)) -# -# def _ListComp(self, t): -# self._write("[") -# self._dispatch(t.elt) -# for gen in t.generators: -# self._dispatch(gen) -# self._write("]") -# -# def _GeneratorExp(self, t): -# self._write("(") -# self._dispatch(t.elt) -# for gen in t.generators: -# self._dispatch(gen) -# self._write(")") -# -# def _comprehension(self, t): -# self._write(" for ") -# self._dispatch(t.target) -# self._write(" in ") -# self._dispatch(t.iter) -# for if_clause in t.ifs: -# self._write(" if ") -# self._dispatch(if_clause) -# -# def _IfExp(self, t): -# self._dispatch(t.body) -# self._write(" if ") -# self._dispatch(t.test) -# if t.orelse: -# self._write(" else ") -# self._dispatch(t.orelse) -# -# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} -# def _UnaryOp(self, t): -# self._write(self.unop[t.op.__class__.__name__]) -# self._write("(") -# self._dispatch(t.operand) -# self._write(")") -# -# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", -# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", -# "FloorDiv":"//", "Pow": "**"} -# def _BinOp(self, t): -# self._write("(") -# self._dispatch(t.left) -# self._write(")" + self.binop[t.op.__class__.__name__] + "(") -# self._dispatch(t.right) -# self._write(")") -# -# boolops = {_ast.And: 'and', _ast.Or: 'or'} -# def _BoolOp(self, t): -# self._write("(") -# self._dispatch(t.values[0]) -# for v in t.values[1:]: -# self._write(" %s " % self.boolops[t.op.__class__]) -# self._dispatch(v) -# self._write(")") -# -# def _Attribute(self,t): -# self._dispatch(t.value) -# self._write(".") -# self._write(t.attr) -# -## def _Call(self, t): -## self._dispatch(t.func) -## self._write("(") -## comma = False -## for e in t.args: -## if comma: self._write(", ") -## else: comma = True -## self._dispatch(e) -## for e in t.keywords: -## if comma: self._write(", ") -## else: comma = True -## self._dispatch(e) -## if t.starargs: -## if comma: self._write(", ") -## else: comma = True -## self._write("*") -## self._dispatch(t.starargs) -## if t.kwargs: -## if comma: self._write(", ") -## else: comma = True -## self._write("**") -## self._dispatch(t.kwargs) -## self._write(")") -# -# # slice -# def _Index(self, t): -# self._dispatch(t.value) -# -# def _ExtSlice(self, t): -# for i, d in enumerate(t.dims): -# if i != 0: -# self._write(': ') -# self._dispatch(d) -# -# # others -# def _arguments(self, t): -# first = True -# nonDef = len(t.args)-len(t.defaults) -# for a in t.args[0:nonDef]: -# if first:first = False -# else: self._write(", ") -# self._dispatch(a) -# for a,d in zip(t.args[nonDef:], t.defaults): -# if first:first = False -# else: self._write(", ") -# self._dispatch(a), -# self._write("=") -# self._dispatch(d) -# if t.vararg: -# if first:first = False -# else: self._write(", ") -# self._write("*"+t.vararg) -# if t.kwarg: -# if first:first = False -# else: self._write(", ") -# self._write("**"+t.kwarg) -# -## def _keyword(self, t): -## self._write(t.arg) -## self._write("=") -## self._dispatch(t.value) -# -# def _Lambda(self, t): -# self._write("lambda ") -# self._dispatch(t.args) -# self._write(": ") -# self._dispatch(t.body) - - - diff --git a/scipy-0.10.1/doc/sphinxext/docscrape.py b/scipy-0.10.1/doc/sphinxext/docscrape.py deleted file mode 100644 index bbd3fcaccc..0000000000 --- a/scipy-0.10.1/doc/sphinxext/docscrape.py +++ /dev/null @@ -1,505 +0,0 @@ -"""Extract reference documentation from the NumPy source tree. - -""" - -import inspect -import textwrap -import re -import pydoc -from StringIO import StringIO -from warnings import warn - -class Reader(object): - """A line-based string reader. - - """ - def __init__(self, data): - """ - Parameters - ---------- - data : str - String with lines separated by '\n'. - - """ - if isinstance(data,list): - self._str = data - else: - self._str = data.split('\n') # store string as list of lines - - self.reset() - - def __getitem__(self, n): - return self._str[n] - - def reset(self): - self._l = 0 # current line nr - - def read(self): - if not self.eof(): - out = self[self._l] - self._l += 1 - return out - else: - return '' - - def seek_next_non_empty_line(self): - for l in self[self._l:]: - if l.strip(): - break - else: - self._l += 1 - - def eof(self): - return self._l >= len(self._str) - - def read_to_condition(self, condition_func): - start = self._l - for line in self[start:]: - if condition_func(line): - return self[start:self._l] - self._l += 1 - if self.eof(): - return self[start:self._l+1] - return [] - - def read_to_next_empty_line(self): - self.seek_next_non_empty_line() - def is_empty(line): - return not line.strip() - return self.read_to_condition(is_empty) - - def read_to_next_unindented_line(self): - def is_unindented(line): - return (line.strip() and (len(line.lstrip()) == len(line))) - return self.read_to_condition(is_unindented) - - def peek(self,n=0): - if self._l + n < len(self._str): - return self[self._l + n] - else: - return '' - - def is_empty(self): - return not ''.join(self._str).strip() - - -class NumpyDocString(object): - def __init__(self, docstring, config={}): - docstring = textwrap.dedent(docstring).split('\n') - - self._doc = Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': [''], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Attributes': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'Warnings': [], - 'References': '', - 'Examples': '', - 'index': {} - } - - self._parse() - - def __getitem__(self,key): - return self._parsed_data[key] - - def __setitem__(self,key,val): - if not self._parsed_data.has_key(key): - warn("Unknown section %s" % key) - else: - self._parsed_data[key] = val - - def _is_at_section(self): - self._doc.seek_next_non_empty_line() - - if self._doc.eof(): - return False - - l1 = self._doc.peek().strip() # e.g. Parameters - - if l1.startswith('.. index::'): - return True - - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) - - def _strip(self,doc): - i = 0 - j = 0 - for i,line in enumerate(doc): - if line.strip(): break - - for j,line in enumerate(doc[::-1]): - if line.strip(): break - - return doc[i:len(doc)-j] - - def _read_to_next_section(self): - section = self._doc.read_to_next_empty_line() - - while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty - section += [''] - - section += self._doc.read_to_next_empty_line() - - return section - - def _read_sections(self): - while not self._doc.eof(): - data = self._read_to_next_section() - name = data[0].strip() - - if name.startswith('..'): # index section - yield name, data[1:] - elif len(data) < 2: - yield StopIteration - else: - yield name, self._strip(data[2:]) - - def _parse_param_list(self,content): - r = Reader(content) - params = [] - while not r.eof(): - header = r.read().strip() - if ' : ' in header: - arg_name, arg_type = header.split(' : ')[:2] - else: - arg_name, arg_type = header, '' - - desc = r.read_to_next_unindented_line() - desc = dedent_lines(desc) - - params.append((arg_name,arg_type,desc)) - - return params - - - _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" - r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) - def _parse_see_also(self, content): - """ - func_name : Descriptive text - continued text - another_func_name : Descriptive text - func_name1, func_name2, :meth:`func_name`, func_name3 - - """ - items = [] - - def parse_item_name(text): - """Match ':role:`name`' or 'name'""" - m = self._name_rgx.match(text) - if m: - g = m.groups() - if g[1] is None: - return g[3], None - else: - return g[2], g[1] - raise ValueError("%s is not a item name" % text) - - def push_item(name, rest): - if not name: - return - name, role = parse_item_name(name) - items.append((name, list(rest), role)) - del rest[:] - - current_func = None - rest = [] - - for line in content: - if not line.strip(): continue - - m = self._name_rgx.match(line) - if m and line[m.end():].strip().startswith(':'): - push_item(current_func, rest) - current_func, line = line[:m.end()], line[m.end():] - rest = [line.split(':', 1)[1].strip()] - if not rest[0]: - rest = [] - elif not line.startswith(' '): - push_item(current_func, rest) - current_func = None - if ',' in line: - for func in line.split(','): - if func.strip(): - push_item(func, []) - elif line.strip(): - current_func = line - elif current_func is not None: - rest.append(line.strip()) - push_item(current_func, rest) - return items - - def _parse_index(self, section, content): - """ - .. index: default - :refguide: something, else, and more - - """ - def strip_each_in(lst): - return [s.strip() for s in lst] - - out = {} - section = section.split('::') - if len(section) > 1: - out['default'] = strip_each_in(section[1].split(','))[0] - for line in content: - line = line.split(':') - if len(line) > 2: - out[line[1]] = strip_each_in(line[2].split(',')) - return out - - def _parse_summary(self): - """Grab signature (if given) and summary""" - if self._is_at_section(): - return - - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str - if not self._is_at_section(): - self['Summary'] = self._doc.read_to_next_empty_line() - else: - self['Summary'] = summary - - if not self._is_at_section(): - self['Extended Summary'] = self._read_to_next_section() - - def _parse(self): - self._doc.reset() - self._parse_summary() - - for (section,content) in self._read_sections(): - if not section.startswith('..'): - section = ' '.join([s.capitalize() for s in section.split(' ')]) - if section in ('Parameters', 'Returns', 'Raises', 'Warns', - 'Other Parameters', 'Attributes', 'Methods'): - self[section] = self._parse_param_list(content) - elif section.startswith('.. index::'): - self['index'] = self._parse_index(section, content) - elif section == 'See Also': - self['See Also'] = self._parse_see_also(content) - else: - self[section] = content - - # string conversion routines - - def _str_header(self, name, symbol='-'): - return [name, len(name)*symbol] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - if self['Signature']: - return [self['Signature'].replace('*','\*')] + [''] - else: - return [''] - - def _str_summary(self): - if self['Summary']: - return self['Summary'] + [''] - else: - return [] - - def _str_extended_summary(self): - if self['Extended Summary']: - return self['Extended Summary'] + [''] - else: - return [] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_header(name) - for param,param_type,desc in self[name]: - out += ['%s : %s' % (param, param_type)] - out += self._str_indent(desc) - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += self[name] - out += [''] - return out - - def _str_see_also(self, func_role): - if not self['See Also']: return [] - out = [] - out += self._str_header("See Also") - last_had_desc = True - for func, desc, role in self['See Also']: - if role: - link = ':%s:`%s`' % (role, func) - elif func_role: - link = ':%s:`%s`' % (func_role, func) - else: - link = "`%s`_" % func - if desc or last_had_desc: - out += [''] - out += [link] - else: - out[-1] += ", %s" % link - if desc: - out += self._str_indent([' '.join(desc)]) - last_had_desc = True - else: - last_had_desc = False - out += [''] - return out - - def _str_index(self): - idx = self['index'] - out = [] - out += ['.. index:: %s' % idx.get('default','')] - for section, references in idx.iteritems(): - if section == 'default': - continue - out += [' :%s: %s' % (section, ', '.join(references))] - return out - - def __str__(self, func_role=''): - out = [] - out += self._str_signature() - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Other Parameters', - 'Raises', 'Warns'): - out += self._str_param_list(param_list) - out += self._str_section('Warnings') - out += self._str_see_also(func_role) - for s in ('Notes','References','Examples'): - out += self._str_section(s) - for param_list in ('Attributes', 'Methods'): - out += self._str_param_list(param_list) - out += self._str_index() - return '\n'.join(out) - - -def indent(str,indent=4): - indent_str = ' '*indent - if str is None: - return indent_str - lines = str.split('\n') - return '\n'.join(indent_str + l for l in lines) - -def dedent_lines(lines): - """Deindent a list of lines maximally""" - return textwrap.dedent("\n".join(lines)).split("\n") - -def header(text, style='-'): - return text + '\n' + style*len(text) + '\n' - - -class FunctionDoc(NumpyDocString): - def __init__(self, func, role='func', doc=None, config={}): - self._f = func - self._role = role # e.g. "func" or "meth" - - if doc is None: - if func is None: - raise ValueError("No function or docstring given") - doc = inspect.getdoc(func) or '' - NumpyDocString.__init__(self, doc) - - if not self['Signature'] and func is not None: - func, func_name = self.get_func() - try: - # try to read signature - argspec = inspect.getargspec(func) - argspec = inspect.formatargspec(*argspec) - argspec = argspec.replace('*','\*') - signature = '%s%s' % (func_name, argspec) - except TypeError, e: - signature = '%s()' % func_name - self['Signature'] = signature - - def get_func(self): - func_name = getattr(self._f, '__name__', self.__class__.__name__) - if inspect.isclass(self._f): - func = getattr(self._f, '__call__', self._f.__init__) - else: - func = self._f - return func, func_name - - def __str__(self): - out = '' - - func, func_name = self.get_func() - signature = self['Signature'].replace('*', '\*') - - roles = {'func': 'function', - 'meth': 'method'} - - if self._role: - if not roles.has_key(self._role): - print "Warning: invalid role %s" % self._role - out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), - func_name) - - out += super(FunctionDoc, self).__str__(func_role=self._role) - return out - - -class ClassDoc(NumpyDocString): - - extra_public_methods = ['__call__'] - - def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, - config={}): - if not inspect.isclass(cls) and cls is not None: - raise ValueError("Expected a class or None, but got %r" % cls) - self._cls = cls - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - - if doc is None: - if cls is None: - raise ValueError("No class or documentation string given") - doc = pydoc.getdoc(cls) - - NumpyDocString.__init__(self, doc) - - if config.get('show_class_members', True): - if not self['Methods']: - self['Methods'] = [(name, '', '') - for name in sorted(self.methods)] - if not self['Attributes']: - self['Attributes'] = [(name, '', '') - for name in sorted(self.properties)] - - @property - def methods(self): - if self._cls is None: - return [] - return [name for name,func in inspect.getmembers(self._cls) - if ((not name.startswith('_') - or name in self.extra_public_methods) - and callable(func))] - - @property - def properties(self): - if self._cls is None: - return [] - return [name for name,func in inspect.getmembers(self._cls) - if not name.startswith('_') and func is None] diff --git a/scipy-0.10.1/doc/sphinxext/docscrape_sphinx.py b/scipy-0.10.1/doc/sphinxext/docscrape_sphinx.py deleted file mode 100644 index e44e770ef8..0000000000 --- a/scipy-0.10.1/doc/sphinxext/docscrape_sphinx.py +++ /dev/null @@ -1,227 +0,0 @@ -import re, inspect, textwrap, pydoc -import sphinx -from docscrape import NumpyDocString, FunctionDoc, ClassDoc - -class SphinxDocString(NumpyDocString): - def __init__(self, docstring, config={}): - self.use_plots = config.get('use_plots', False) - NumpyDocString.__init__(self, docstring, config=config) - - # string conversion routines - def _str_header(self, name, symbol='`'): - return ['.. rubric:: ' + name, ''] - - def _str_field_list(self, name): - return [':' + name + ':'] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - return [''] - if self['Signature']: - return ['``%s``' % self['Signature']] + [''] - else: - return [''] - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Extended Summary'] + [''] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_field_list(name) - out += [''] - for param,param_type,desc in self[name]: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) - out += [''] - out += self._str_indent(desc,8) - out += [''] - return out - - @property - def _obj(self): - if hasattr(self, '_cls'): - return self._cls - elif hasattr(self, '_f'): - return self._f - return None - - def _str_member_list(self, name): - """ - Generate a member listing, autosummary:: table where possible, - and a table where not. - - """ - out = [] - if self[name]: - out += ['.. rubric:: %s' % name, ''] - prefix = getattr(self, '_name', '') - - if prefix: - prefix = '~%s.' % prefix - - autosum = [] - others = [] - for param, param_type, desc in self[name]: - param = param.strip() - if not self._obj or hasattr(self._obj, param): - autosum += [" %s%s" % (prefix, param)] - else: - others.append((param, param_type, desc)) - - if autosum: - out += ['.. autosummary::', ' :toctree:', ''] - out += autosum - - if others: - maxlen_0 = max([len(x[0]) for x in others]) - maxlen_1 = max([len(x[1]) for x in others]) - hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 - fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) - n_indent = maxlen_0 + maxlen_1 + 4 - out += [hdr] - for param, param_type, desc in others: - out += [fmt % (param.strip(), param_type)] - out += self._str_indent(desc, n_indent) - out += [hdr] - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += [''] - content = textwrap.dedent("\n".join(self[name])).split("\n") - out += content - out += [''] - return out - - def _str_see_also(self, func_role): - out = [] - if self['See Also']: - see_also = super(SphinxDocString, self)._str_see_also(func_role) - out = ['.. seealso::', ''] - out += self._str_indent(see_also[2:]) - return out - - def _str_warnings(self): - out = [] - if self['Warnings']: - out = ['.. warning::', ''] - out += self._str_indent(self['Warnings']) - return out - - def _str_index(self): - idx = self['index'] - out = [] - if len(idx) == 0: - return out - - out += ['.. index:: %s' % idx.get('default','')] - for section, references in idx.iteritems(): - if section == 'default': - continue - elif section == 'refguide': - out += [' single: %s' % (', '.join(references))] - else: - out += [' %s: %s' % (section, ','.join(references))] - return out - - def _str_references(self): - out = [] - if self['References']: - out += self._str_header('References') - if isinstance(self['References'], str): - self['References'] = [self['References']] - out.extend(self['References']) - out += [''] - # Latex collects all references to a separate bibliography, - # so we need to insert links to it - if sphinx.__version__ >= "0.6": - out += ['.. only:: latex',''] - else: - out += ['.. latexonly::',''] - items = [] - for line in self['References']: - m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) - if m: - items.append(m.group(1)) - out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] - return out - - def _str_examples(self): - examples_str = "\n".join(self['Examples']) - - if (self.use_plots and 'import matplotlib' in examples_str - and 'plot::' not in examples_str): - out = [] - out += self._str_header('Examples') - out += ['.. plot::', ''] - out += self._str_indent(self['Examples']) - out += [''] - return out - else: - return self._str_section('Examples') - - def __str__(self, indent=0, func_role="obj"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Other Parameters', - 'Raises', 'Warns'): - out += self._str_param_list(param_list) - out += self._str_warnings() - out += self._str_see_also(func_role) - out += self._str_section('Notes') - out += self._str_references() - out += self._str_examples() - for param_list in ('Attributes', 'Methods'): - out += self._str_member_list(param_list) - out = self._str_indent(out,indent) - return '\n'.join(out) - -class SphinxFunctionDoc(SphinxDocString, FunctionDoc): - def __init__(self, obj, doc=None, config={}): - self.use_plots = config.get('use_plots', False) - FunctionDoc.__init__(self, obj, doc=doc, config=config) - -class SphinxClassDoc(SphinxDocString, ClassDoc): - def __init__(self, obj, doc=None, func_doc=None, config={}): - self.use_plots = config.get('use_plots', False) - ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) - -class SphinxObjDoc(SphinxDocString): - def __init__(self, obj, doc=None, config={}): - self._f = obj - SphinxDocString.__init__(self, doc, config=config) - -def get_doc_object(obj, what=None, doc=None, config={}): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif callable(obj): - what = 'function' - else: - what = 'object' - if what == 'class': - return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, - config=config) - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, doc=doc, config=config) - else: - if doc is None: - doc = pydoc.getdoc(obj) - return SphinxObjDoc(obj, doc, config=config) diff --git a/scipy-0.10.1/doc/sphinxext/numpydoc.py b/scipy-0.10.1/doc/sphinxext/numpydoc.py deleted file mode 100644 index 7679352c2e..0000000000 --- a/scipy-0.10.1/doc/sphinxext/numpydoc.py +++ /dev/null @@ -1,169 +0,0 @@ -""" -======== -numpydoc -======== - -Sphinx extension that handles docstrings in the Numpy standard format. [1] - -It will: - -- Convert Parameters etc. sections to field lists. -- Convert See Also section to a See also entry. -- Renumber references. -- Extract the signature from the docstring, if it can't be determined otherwise. - -.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt - -""" - -import sphinx - -if sphinx.__version__ < '1.0.1': - raise RuntimeError("Sphinx 1.0.1 or newer is required") - -import os, re, pydoc -from docscrape_sphinx import get_doc_object, SphinxDocString -from sphinx.util.compat import Directive -import inspect - -def mangle_docstrings(app, what, name, obj, options, lines, - reference_offset=[0]): - - cfg = dict(use_plots=app.config.numpydoc_use_plots, - show_class_members=app.config.numpydoc_show_class_members) - - if what == 'module': - # Strip top title - title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', - re.I|re.S) - lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") - else: - doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) - lines[:] = unicode(doc).split(u"\n") - - if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ - obj.__name__: - if hasattr(obj, '__module__'): - v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) - else: - v = dict(full_name=obj.__name__) - lines += [u'', u'.. htmlonly::', ''] - lines += [u' %s' % x for x in - (app.config.numpydoc_edit_link % v).split("\n")] - - # replace reference numbers so that there are no duplicates - references = [] - for line in lines: - line = line.strip() - m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) - if m: - references.append(m.group(1)) - - # start renaming from the longest string, to avoid overwriting parts - references.sort(key=lambda x: -len(x)) - if references: - for i, line in enumerate(lines): - for r in references: - if re.match(ur'^\d+$', r): - new_r = u"R%d" % (reference_offset[0] + int(r)) - else: - new_r = u"%s%d" % (r, reference_offset[0]) - lines[i] = lines[i].replace(u'[%s]_' % r, - u'[%s]_' % new_r) - lines[i] = lines[i].replace(u'.. [%s]' % r, - u'.. [%s]' % new_r) - - reference_offset[0] += len(references) - -def mangle_signature(app, what, name, obj, options, sig, retann): - # Do not try to inspect classes that don't define `__init__` - if (inspect.isclass(obj) and - (not hasattr(obj, '__init__') or - 'initializes x; see ' in pydoc.getdoc(obj.__init__))): - return '', '' - - if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return - if not hasattr(obj, '__doc__'): return - - doc = SphinxDocString(pydoc.getdoc(obj)) - if doc['Signature']: - sig = re.sub(u"^[^(]*", u"", doc['Signature']) - return sig, u'' - -def setup(app, get_doc_object_=get_doc_object): - global get_doc_object - get_doc_object = get_doc_object_ - - app.connect('autodoc-process-docstring', mangle_docstrings) - app.connect('autodoc-process-signature', mangle_signature) - app.add_config_value('numpydoc_edit_link', None, False) - app.add_config_value('numpydoc_use_plots', None, False) - app.add_config_value('numpydoc_show_class_members', True, True) - - # Extra mangling domains - app.add_domain(NumpyPythonDomain) - app.add_domain(NumpyCDomain) - -#------------------------------------------------------------------------------ -# Docstring-mangling domains -#------------------------------------------------------------------------------ - -from docutils.statemachine import ViewList -from sphinx.domains.c import CDomain -from sphinx.domains.python import PythonDomain - -class ManglingDomainBase(object): - directive_mangling_map = {} - - def __init__(self, *a, **kw): - super(ManglingDomainBase, self).__init__(*a, **kw) - self.wrap_mangling_directives() - - def wrap_mangling_directives(self): - for name, objtype in self.directive_mangling_map.items(): - self.directives[name] = wrap_mangling_directive( - self.directives[name], objtype) - -class NumpyPythonDomain(ManglingDomainBase, PythonDomain): - name = 'np' - directive_mangling_map = { - 'function': 'function', - 'class': 'class', - 'exception': 'class', - 'method': 'function', - 'classmethod': 'function', - 'staticmethod': 'function', - 'attribute': 'attribute', - } - -class NumpyCDomain(ManglingDomainBase, CDomain): - name = 'np-c' - directive_mangling_map = { - 'function': 'function', - 'member': 'attribute', - 'macro': 'function', - 'type': 'class', - 'var': 'object', - } - -def wrap_mangling_directive(base_directive, objtype): - class directive(base_directive): - def run(self): - env = self.state.document.settings.env - - name = None - if self.arguments: - m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) - name = m.group(2).strip() - - if not name: - name = self.arguments[0] - - lines = list(self.content) - mangle_docstrings(env.app, objtype, name, None, None, lines) - self.content = ViewList(lines, self.content.parent) - - return base_directive.run(self) - - return directive - diff --git a/scipy-0.10.1/doc/sphinxext/phantom_import.py b/scipy-0.10.1/doc/sphinxext/phantom_import.py deleted file mode 100644 index c77eeb544e..0000000000 --- a/scipy-0.10.1/doc/sphinxext/phantom_import.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -============== -phantom_import -============== - -Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar -extensions to use docstrings loaded from an XML file. - -This extension loads an XML file in the Pydocweb format [1] and -creates a dummy module that contains the specified docstrings. This -can be used to get the current docstrings from a Pydocweb instance -without needing to rebuild the documented module. - -.. [1] http://code.google.com/p/pydocweb - -""" -import imp, sys, compiler, types, os, inspect, re - -def setup(app): - app.connect('builder-inited', initialize) - app.add_config_value('phantom_import_file', None, True) - -def initialize(app): - fn = app.config.phantom_import_file - if (fn and os.path.isfile(fn)): - print "[numpydoc] Phantom importing modules from", fn, "..." - import_phantom_module(fn) - -#------------------------------------------------------------------------------ -# Creating 'phantom' modules from an XML description -#------------------------------------------------------------------------------ -def import_phantom_module(xml_file): - """ - Insert a fake Python module to sys.modules, based on a XML file. - - The XML file is expected to conform to Pydocweb DTD. The fake - module will contain dummy objects, which guarantee the following: - - - Docstrings are correct. - - Class inheritance relationships are correct (if present in XML). - - Function argspec is *NOT* correct (even if present in XML). - Instead, the function signature is prepended to the function docstring. - - Class attributes are *NOT* correct; instead, they are dummy objects. - - Parameters - ---------- - xml_file : str - Name of an XML file to read - - """ - import lxml.etree as etree - - object_cache = {} - - tree = etree.parse(xml_file) - root = tree.getroot() - - # Sort items so that - # - Base classes come before classes inherited from them - # - Modules come before their contents - all_nodes = dict([(n.attrib['id'], n) for n in root]) - - def _get_bases(node, recurse=False): - bases = [x.attrib['ref'] for x in node.findall('base')] - if recurse: - j = 0 - while True: - try: - b = bases[j] - except IndexError: break - if b in all_nodes: - bases.extend(_get_bases(all_nodes[b])) - j += 1 - return bases - - type_index = ['module', 'class', 'callable', 'object'] - - def base_cmp(a, b): - x = cmp(type_index.index(a.tag), type_index.index(b.tag)) - if x != 0: return x - - if a.tag == 'class' and b.tag == 'class': - a_bases = _get_bases(a, recurse=True) - b_bases = _get_bases(b, recurse=True) - x = cmp(len(a_bases), len(b_bases)) - if x != 0: return x - if a.attrib['id'] in b_bases: return -1 - if b.attrib['id'] in a_bases: return 1 - - return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) - - nodes = root.getchildren() - nodes.sort(base_cmp) - - # Create phantom items - for node in nodes: - name = node.attrib['id'] - doc = (node.text or '').decode('string-escape') + "\n" - if doc == "\n": doc = "" - - # create parent, if missing - parent = name - while True: - parent = '.'.join(parent.split('.')[:-1]) - if not parent: break - if parent in object_cache: break - obj = imp.new_module(parent) - object_cache[parent] = obj - sys.modules[parent] = obj - - # create object - if node.tag == 'module': - obj = imp.new_module(name) - obj.__doc__ = doc - sys.modules[name] = obj - elif node.tag == 'class': - bases = [object_cache[b] for b in _get_bases(node) - if b in object_cache] - bases.append(object) - init = lambda self: None - init.__doc__ = doc - obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) - obj.__name__ = name.split('.')[-1] - elif node.tag == 'callable': - funcname = node.attrib['id'].split('.')[-1] - argspec = node.attrib.get('argspec') - if argspec: - argspec = re.sub('^[^(]*', '', argspec) - doc = "%s%s\n\n%s" % (funcname, argspec, doc) - obj = lambda: 0 - obj.__argspec_is_invalid_ = True - obj.func_name = funcname - obj.__name__ = name - obj.__doc__ = doc - if inspect.isclass(object_cache[parent]): - obj.__objclass__ = object_cache[parent] - else: - class Dummy(object): pass - obj = Dummy() - obj.__name__ = name - obj.__doc__ = doc - if inspect.isclass(object_cache[parent]): - obj.__get__ = lambda: None - object_cache[name] = obj - - if parent: - if inspect.ismodule(object_cache[parent]): - obj.__module__ = parent - setattr(object_cache[parent], name.split('.')[-1], obj) - - # Populate items - for node in root: - obj = object_cache.get(node.attrib['id']) - if obj is None: continue - for ref in node.findall('ref'): - if node.tag == 'class': - if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) - else: - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) diff --git a/scipy-0.10.1/doc/sphinxext/plot_directive.py b/scipy-0.10.1/doc/sphinxext/plot_directive.py deleted file mode 100644 index 80801e7986..0000000000 --- a/scipy-0.10.1/doc/sphinxext/plot_directive.py +++ /dev/null @@ -1,636 +0,0 @@ -""" -A special directive for generating a matplotlib plot. - -.. warning:: - - This is a hacked version of plot_directive.py from Matplotlib. - It's very much subject to change! - - -Usage ------ - -Can be used like this:: - - .. plot:: examples/example.py - - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3], [4,5,6]) - - .. plot:: - - A plotting example: - - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3], [4,5,6]) - -The content is interpreted as doctest formatted if it has a line starting -with ``>>>``. - -The ``plot`` directive supports the options - - format : {'python', 'doctest'} - Specify the format of the input - - include-source : bool - Whether to display the source code. Default can be changed in conf.py - -and the ``image`` directive options ``alt``, ``height``, ``width``, -``scale``, ``align``, ``class``. - -Configuration options ---------------------- - -The plot directive has the following configuration options: - - plot_include_source - Default value for the include-source option - - plot_pre_code - Code that should be executed before each plot. - - plot_basedir - Base directory, to which plot:: file names are relative to. - (If None or empty, file names are relative to the directoly where - the file containing the directive is.) - - plot_formats - File formats to generate. List of tuples or strings:: - - [(suffix, dpi), suffix, ...] - - that determine the file format and the DPI. For entries whose - DPI was omitted, sensible defaults are chosen. - - plot_html_show_formats - Whether to show links to the files in HTML. - -TODO ----- - -* Refactor Latex output; now it's plain images, but it would be nice - to make them appear side-by-side, or in floats. - -""" - -import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback -import sphinx - -import warnings -warnings.warn("A plot_directive module is also available under " - "matplotlib.sphinxext; expect this numpydoc.plot_directive " - "module to be deprecated after relevant features have been " - "integrated there.", - FutureWarning, stacklevel=2) - - -#------------------------------------------------------------------------------ -# Registration hook -#------------------------------------------------------------------------------ - -def setup(app): - setup.app = app - setup.config = app.config - setup.confdir = app.confdir - - app.add_config_value('plot_pre_code', '', True) - app.add_config_value('plot_include_source', False, True) - app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) - app.add_config_value('plot_basedir', None, True) - app.add_config_value('plot_html_show_formats', True, True) - - app.add_directive('plot', plot_directive, True, (0, 1, False), - **plot_directive_options) - -#------------------------------------------------------------------------------ -# plot:: directive -#------------------------------------------------------------------------------ -from docutils.parsers.rst import directives -from docutils import nodes - -def plot_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return run(arguments, content, options, state_machine, state, lineno) -plot_directive.__doc__ = __doc__ - -def _option_boolean(arg): - if not arg or not arg.strip(): - # no argument given, assume used as a flag - return True - elif arg.strip().lower() in ('no', '0', 'false'): - return False - elif arg.strip().lower() in ('yes', '1', 'true'): - return True - else: - raise ValueError('"%s" unknown boolean' % arg) - -def _option_format(arg): - return directives.choice(arg, ('python', 'lisp')) - -def _option_align(arg): - return directives.choice(arg, ("top", "middle", "bottom", "left", "center", - "right")) - -plot_directive_options = {'alt': directives.unchanged, - 'height': directives.length_or_unitless, - 'width': directives.length_or_percentage_or_unitless, - 'scale': directives.nonnegative_int, - 'align': _option_align, - 'class': directives.class_option, - 'include-source': _option_boolean, - 'format': _option_format, - } - -#------------------------------------------------------------------------------ -# Generating output -#------------------------------------------------------------------------------ - -from docutils import nodes, utils - -try: - # Sphinx depends on either Jinja or Jinja2 - import jinja2 - def format_template(template, **kw): - return jinja2.Template(template).render(**kw) -except ImportError: - import jinja - def format_template(template, **kw): - return jinja.from_string(template, **kw) - -TEMPLATE = """ -{{ source_code }} - -{{ only_html }} - - {% if source_link or (html_show_formats and not multi_image) %} - ( - {%- if source_link -%} - `Source code <{{ source_link }}>`__ - {%- endif -%} - {%- if html_show_formats and not multi_image -%} - {%- for img in images -%} - {%- for fmt in img.formats -%} - {%- if source_link or not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - {%- endfor -%} - {%- endif -%} - ) - {% endif %} - - {% for img in images %} - .. figure:: {{ build_dir }}/{{ img.basename }}.png - {%- for option in options %} - {{ option }} - {% endfor %} - - {% if html_show_formats and multi_image -%} - ( - {%- for fmt in img.formats -%} - {%- if not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - ) - {%- endif -%} - {% endfor %} - -{{ only_latex }} - - {% for img in images %} - .. image:: {{ build_dir }}/{{ img.basename }}.pdf - {% endfor %} - -""" - -class ImageFile(object): - def __init__(self, basename, dirname): - self.basename = basename - self.dirname = dirname - self.formats = [] - - def filename(self, format): - return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) - - def filenames(self): - return [self.filename(fmt) for fmt in self.formats] - -def run(arguments, content, options, state_machine, state, lineno): - if arguments and content: - raise RuntimeError("plot:: directive can't have both args and content") - - document = state_machine.document - config = document.settings.env.config - - options.setdefault('include-source', config.plot_include_source) - - # determine input - rst_file = document.attributes['source'] - rst_dir = os.path.dirname(rst_file) - - if arguments: - if not config.plot_basedir: - source_file_name = os.path.join(rst_dir, - directives.uri(arguments[0])) - else: - source_file_name = os.path.join(setup.confdir, config.plot_basedir, - directives.uri(arguments[0])) - code = open(source_file_name, 'r').read() - output_base = os.path.basename(source_file_name) - else: - source_file_name = rst_file - code = textwrap.dedent("\n".join(map(str, content))) - counter = document.attributes.get('_plot_counter', 0) + 1 - document.attributes['_plot_counter'] = counter - base, ext = os.path.splitext(os.path.basename(source_file_name)) - output_base = '%s-%d.py' % (base, counter) - - base, source_ext = os.path.splitext(output_base) - if source_ext in ('.py', '.rst', '.txt'): - output_base = base - else: - source_ext = '' - - # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames - output_base = output_base.replace('.', '-') - - # is it in doctest format? - is_doctest = contains_doctest(code) - if options.has_key('format'): - if options['format'] == 'python': - is_doctest = False - else: - is_doctest = True - - # determine output directory name fragment - source_rel_name = relpath(source_file_name, setup.confdir) - source_rel_dir = os.path.dirname(source_rel_name) - while source_rel_dir.startswith(os.path.sep): - source_rel_dir = source_rel_dir[1:] - - # build_dir: where to place output files (temporarily) - build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), - 'plot_directive', - source_rel_dir) - if not os.path.exists(build_dir): - os.makedirs(build_dir) - - # output_dir: final location in the builder's directory - dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, - source_rel_dir)) - - # how to link to files from the RST file - dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), - source_rel_dir).replace(os.path.sep, '/') - build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') - source_link = dest_dir_link + '/' + output_base + source_ext - - # make figures - try: - results = makefig(code, source_file_name, build_dir, output_base, - config) - errors = [] - except PlotError, err: - reporter = state.memo.reporter - sm = reporter.system_message( - 2, "Exception occurred in plotting %s: %s" % (output_base, err), - line=lineno) - results = [(code, [])] - errors = [sm] - - # generate output restructuredtext - total_lines = [] - for j, (code_piece, images) in enumerate(results): - if options['include-source']: - if is_doctest: - lines = [''] - lines += [row.rstrip() for row in code_piece.split('\n')] - else: - lines = ['.. code-block:: python', ''] - lines += [' %s' % row.rstrip() - for row in code_piece.split('\n')] - source_code = "\n".join(lines) - else: - source_code = "" - - opts = [':%s: %s' % (key, val) for key, val in options.items() - if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] - - only_html = ".. only:: html" - only_latex = ".. only:: latex" - - if j == 0: - src_link = source_link - else: - src_link = None - - result = format_template( - TEMPLATE, - dest_dir=dest_dir_link, - build_dir=build_dir_link, - source_link=src_link, - multi_image=len(images) > 1, - only_html=only_html, - only_latex=only_latex, - options=opts, - images=images, - source_code=source_code, - html_show_formats=config.plot_html_show_formats) - - total_lines.extend(result.split("\n")) - total_lines.extend("\n") - - if total_lines: - state_machine.insert_input(total_lines, source=source_file_name) - - # copy image files to builder's output directory - if not os.path.exists(dest_dir): - os.makedirs(dest_dir) - - for code_piece, images in results: - for img in images: - for fn in img.filenames(): - shutil.copyfile(fn, os.path.join(dest_dir, - os.path.basename(fn))) - - # copy script (if necessary) - if source_file_name == rst_file: - target_name = os.path.join(dest_dir, output_base + source_ext) - f = open(target_name, 'w') - f.write(unescape_doctest(code)) - f.close() - - return errors - - -#------------------------------------------------------------------------------ -# Run code and capture figures -#------------------------------------------------------------------------------ - -import matplotlib -matplotlib.use('Agg') -import matplotlib.pyplot as plt -import matplotlib.image as image -from matplotlib import _pylab_helpers - -import exceptions - -def contains_doctest(text): - try: - # check if it's valid Python as-is - compile(text, '', 'exec') - return False - except SyntaxError: - pass - r = re.compile(r'^\s*>>>', re.M) - m = r.search(text) - return bool(m) - -def unescape_doctest(text): - """ - Extract code from a piece of text, which contains either Python code - or doctests. - - """ - if not contains_doctest(text): - return text - - code = "" - for line in text.split("\n"): - m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) - if m: - code += m.group(2) + "\n" - elif line.strip(): - code += "# " + line.strip() + "\n" - else: - code += "\n" - return code - -def split_code_at_show(text): - """ - Split code at plt.show() - - """ - - parts = [] - is_doctest = contains_doctest(text) - - part = [] - for line in text.split("\n"): - if (not is_doctest and line.strip() == 'plt.show()') or \ - (is_doctest and line.strip() == '>>> plt.show()'): - part.append(line) - parts.append("\n".join(part)) - part = [] - else: - part.append(line) - if "\n".join(part).strip(): - parts.append("\n".join(part)) - return parts - -class PlotError(RuntimeError): - pass - -def run_code(code, code_path, ns=None): - # Change the working directory to the directory of the example, so - # it can get at its data files, if any. - pwd = os.getcwd() - old_sys_path = list(sys.path) - if code_path is not None: - dirname = os.path.abspath(os.path.dirname(code_path)) - os.chdir(dirname) - sys.path.insert(0, dirname) - - # Redirect stdout - stdout = sys.stdout - sys.stdout = cStringIO.StringIO() - - # Reset sys.argv - old_sys_argv = sys.argv - sys.argv = [code_path] - - try: - try: - code = unescape_doctest(code) - if ns is None: - ns = {} - if not ns: - exec setup.config.plot_pre_code in ns - exec code in ns - except (Exception, SystemExit), err: - raise PlotError(traceback.format_exc()) - finally: - os.chdir(pwd) - sys.argv = old_sys_argv - sys.path[:] = old_sys_path - sys.stdout = stdout - return ns - - -#------------------------------------------------------------------------------ -# Generating figures -#------------------------------------------------------------------------------ - -def out_of_date(original, derived): - """ - Returns True if derivative is out-of-date wrt original, - both of which are full file paths. - """ - return (not os.path.exists(derived) - or os.stat(derived).st_mtime < os.stat(original).st_mtime) - - -def makefig(code, code_path, output_dir, output_base, config): - """ - Run a pyplot script *code* and save the images under *output_dir* - with file names derived from *output_base* - - """ - - # -- Parse format list - default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} - formats = [] - for fmt in config.plot_formats: - if isinstance(fmt, str): - formats.append((fmt, default_dpi.get(fmt, 80))) - elif type(fmt) in (tuple, list) and len(fmt)==2: - formats.append((str(fmt[0]), int(fmt[1]))) - else: - raise PlotError('invalid image format "%r" in plot_formats' % fmt) - - # -- Try to determine if all images already exist - - code_pieces = split_code_at_show(code) - - # Look for single-figure output files first - all_exists = True - img = ImageFile(output_base, output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False - break - img.formats.append(format) - - if all_exists: - return [(code, [img])] - - # Then look for multi-figure output files - results = [] - all_exists = True - for i, code_piece in enumerate(code_pieces): - images = [] - for j in xrange(1000): - img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False - break - img.formats.append(format) - - # assume that if we have one, we have them all - if not all_exists: - all_exists = (j > 0) - break - images.append(img) - if not all_exists: - break - results.append((code_piece, images)) - - if all_exists: - return results - - # -- We didn't find the files, so build them - - results = [] - ns = {} - - for i, code_piece in enumerate(code_pieces): - # Clear between runs - plt.close('all') - - # Run code - run_code(code_piece, code_path, ns) - - # Collect images - images = [] - fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() - for j, figman in enumerate(fig_managers): - if len(fig_managers) == 1 and len(code_pieces) == 1: - img = ImageFile(output_base, output_dir) - else: - img = ImageFile("%s_%02d_%02d" % (output_base, i, j), - output_dir) - images.append(img) - for format, dpi in formats: - try: - figman.canvas.figure.savefig(img.filename(format), dpi=dpi) - except exceptions.BaseException, err: - raise PlotError(traceback.format_exc()) - img.formats.append(format) - - # Results - results.append((code_piece, images)) - - return results - - -#------------------------------------------------------------------------------ -# Relative pathnames -#------------------------------------------------------------------------------ - -try: - from os.path import relpath -except ImportError: - # Copied from Python 2.7 - if 'posix' in sys.builtin_module_names: - def relpath(path, start=os.path.curdir): - """Return a relative version of a path""" - from os.path import sep, curdir, join, abspath, commonprefix, \ - pardir - - if not path: - raise ValueError("no path specified") - - start_list = abspath(start).split(sep) - path_list = abspath(path).split(sep) - - # Work out how much of the filepath is shared by start and path. - i = len(commonprefix([start_list, path_list])) - - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return curdir - return join(*rel_list) - elif 'nt' in sys.builtin_module_names: - def relpath(path, start=os.path.curdir): - """Return a relative version of a path""" - from os.path import sep, curdir, join, abspath, commonprefix, \ - pardir, splitunc - - if not path: - raise ValueError("no path specified") - start_list = abspath(start).split(sep) - path_list = abspath(path).split(sep) - if start_list[0].lower() != path_list[0].lower(): - unc_path, rest = splitunc(path) - unc_start, rest = splitunc(start) - if bool(unc_path) ^ bool(unc_start): - raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" - % (path, start)) - else: - raise ValueError("path is on drive %s, start on drive %s" - % (path_list[0], start_list[0])) - # Work out how much of the filepath is shared by start and path. - for i in range(min(len(start_list), len(path_list))): - if start_list[i].lower() != path_list[i].lower(): - break - else: - i += 1 - - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return curdir - return join(*rel_list) - else: - raise RuntimeError("Unsupported platform (no relpath available!)") diff --git a/scipy-0.10.1/doc/sphinxext/setup.py b/scipy-0.10.1/doc/sphinxext/setup.py deleted file mode 100644 index 76e3fd81bb..0000000000 --- a/scipy-0.10.1/doc/sphinxext/setup.py +++ /dev/null @@ -1,31 +0,0 @@ -from distutils.core import setup -import setuptools -import sys, os - -version = "0.4" - -setup( - name="numpydoc", - packages=["numpydoc"], - package_dir={"numpydoc": ""}, - version=version, - description="Sphinx extension to support docstrings in Numpy format", - # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers - classifiers=["Development Status :: 3 - Alpha", - "Environment :: Plugins", - "License :: OSI Approved :: BSD License", - "Topic :: Documentation"], - keywords="sphinx numpy", - author="Pauli Virtanen and others", - author_email="pav@iki.fi", - url="http://github.com/numpy/numpy/tree/master/doc/sphinxext", - license="BSD", - zip_safe=False, - install_requires=["Sphinx >= 1.0.1"], - package_data={'numpydoc': 'tests', '': ''}, - entry_points={ - "console_scripts": [ - "autosummary_generate = numpydoc.autosummary_generate:main", - ], - }, -) diff --git a/scipy-0.10.1/doc/sphinxext/tests/test_docscrape.py b/scipy-0.10.1/doc/sphinxext/tests/test_docscrape.py deleted file mode 100644 index 6fab79832d..0000000000 --- a/scipy-0.10.1/doc/sphinxext/tests/test_docscrape.py +++ /dev/null @@ -1,615 +0,0 @@ -# -*- encoding:utf-8 -*- - -import sys, os -sys.path.append(os.path.join(os.path.dirname(__file__), '..')) - -from docscrape import NumpyDocString, FunctionDoc, ClassDoc -from docscrape_sphinx import SphinxDocString, SphinxClassDoc -from nose.tools import * - -doc_txt = '''\ - numpy.multivariate_normal(mean, cov, shape=None, spam=None) - - Draw values from a multivariate normal distribution with specified - mean and covariance. - - The multivariate normal or Gaussian distribution is a generalisation - of the one-dimensional normal distribution to higher dimensions. - - Parameters - ---------- - mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - cov : (N,N) ndarray - Covariance matrix of the distribution. - shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - - Returns - ------- - out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - - Other Parameters - ---------------- - spam : parrot - A parrot off its mortal coil. - - Raises - ------ - RuntimeError - Some error - - Warns - ----- - RuntimeWarning - Some warning - - Warnings - -------- - Certain warnings apply. - - Notes - ----- - - Instead of specifying the full covariance matrix, popular - approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - - This geometrical property can be seen in two dimensions by plotting - generated data-points: - - >>> mean = [0,0] - >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - - >>> x,y = multivariate_normal(mean,cov,5000).T - >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - - Note that the covariance matrix must be symmetric and non-negative - definite. - - References - ---------- - .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 - .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - - See Also - -------- - some, other, funcs - otherfunc : relationship - - Examples - -------- - >>> mean = (1,2) - >>> cov = [[1,0],[1,0]] - >>> x = multivariate_normal(mean,cov,(3,3)) - >>> print x.shape - (3, 3, 2) - - The following is probably true, given that 0.6 is roughly twice the - standard deviation: - - >>> print list( (x[0,0,:] - mean) < 0.6 ) - [True, True] - - .. index:: random - :refguide: random;distributions, random;gauss - - ''' -doc = NumpyDocString(doc_txt) - - -def test_signature(): - assert doc['Signature'].startswith('numpy.multivariate_normal(') - assert doc['Signature'].endswith('spam=None)') - -def test_summary(): - assert doc['Summary'][0].startswith('Draw values') - assert doc['Summary'][-1].endswith('covariance.') - -def test_extended_summary(): - assert doc['Extended Summary'][0].startswith('The multivariate normal') - -def test_parameters(): - assert_equal(len(doc['Parameters']), 3) - assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) - - arg, arg_type, desc = doc['Parameters'][1] - assert_equal(arg_type, '(N,N) ndarray') - assert desc[0].startswith('Covariance matrix') - assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' - -def test_other_parameters(): - assert_equal(len(doc['Other Parameters']), 1) - assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam']) - arg, arg_type, desc = doc['Other Parameters'][0] - assert_equal(arg_type, 'parrot') - assert desc[0].startswith('A parrot off its mortal coil') - -def test_returns(): - assert_equal(len(doc['Returns']), 1) - arg, arg_type, desc = doc['Returns'][0] - assert_equal(arg, 'out') - assert_equal(arg_type, 'ndarray') - assert desc[0].startswith('The drawn samples') - assert desc[-1].endswith('distribution.') - -def test_notes(): - assert doc['Notes'][0].startswith('Instead') - assert doc['Notes'][-1].endswith('definite.') - assert_equal(len(doc['Notes']), 17) - -def test_references(): - assert doc['References'][0].startswith('..') - assert doc['References'][-1].endswith('2001.') - -def test_examples(): - assert doc['Examples'][0].startswith('>>>') - assert doc['Examples'][-1].endswith('True]') - -def test_index(): - assert_equal(doc['index']['default'], 'random') - print doc['index'] - assert_equal(len(doc['index']), 2) - assert_equal(len(doc['index']['refguide']), 2) - -def non_blank_line_by_line_compare(a,b): - a = [l for l in a.split('\n') if l.strip()] - b = [l for l in b.split('\n') if l.strip()] - for n,line in enumerate(a): - if not line == b[n]: - raise AssertionError("Lines %s of a and b differ: " - "\n>>> %s\n<<< %s\n" % - (n,line,b[n])) -def test_str(): - non_blank_line_by_line_compare(str(doc), -"""numpy.multivariate_normal(mean, cov, shape=None, spam=None) - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -Parameters ----------- -mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - -cov : (N,N) ndarray - Covariance matrix of the distribution. -shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -Returns -------- -out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - -Other Parameters ----------------- -spam : parrot - A parrot off its mortal coil. - -Raises ------- -RuntimeError : - Some error - -Warns ------ -RuntimeWarning : - Some warning - -Warnings --------- -Certain warnings apply. - -See Also --------- -`some`_, `other`_, `funcs`_ - -`otherfunc`_ - relationship - -Notes ------ -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -References ----------- -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -Examples --------- ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] - -.. index:: random - :refguide: random;distributions, random;gauss""") - - -def test_sphinx_str(): - sphinx_doc = SphinxDocString(doc_txt) - non_blank_line_by_line_compare(str(sphinx_doc), -""" -.. index:: random - single: random;distributions, random;gauss - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -:Parameters: - - **mean** : (N,) ndarray - - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - **cov** : (N,N) ndarray - - Covariance matrix of the distribution. - - **shape** : tuple of ints - - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -:Returns: - - **out** : ndarray - - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - -:Other Parameters: - - **spam** : parrot - - A parrot off its mortal coil. - -:Raises: - - **RuntimeError** : - - Some error - -:Warns: - - **RuntimeWarning** : - - Some warning - -.. warning:: - - Certain warnings apply. - -.. seealso:: - - :obj:`some`, :obj:`other`, :obj:`funcs` - - :obj:`otherfunc` - relationship - -.. rubric:: Notes - -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -.. rubric:: References - -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -.. only:: latex - - [1]_, [2]_ - -.. rubric:: Examples - ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] -""") - - -doc2 = NumpyDocString(""" - Returns array of indices of the maximum values of along the given axis. - - Parameters - ---------- - a : {array_like} - Array to look in. - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis""") - -def test_parameters_without_extended_description(): - assert_equal(len(doc2['Parameters']), 2) - -doc3 = NumpyDocString(""" - my_signature(*params, **kwds) - - Return this and that. - """) - -def test_escape_stars(): - signature = str(doc3).split('\n')[0] - assert_equal(signature, 'my_signature(\*params, \*\*kwds)') - -doc4 = NumpyDocString( - """a.conj() - - Return an array with all complex-valued elements conjugated.""") - -def test_empty_extended_summary(): - assert_equal(doc4['Extended Summary'], []) - -doc5 = NumpyDocString( - """ - a.something() - - Raises - ------ - LinAlgException - If array is singular. - - Warns - ----- - SomeWarning - If needed - """) - -def test_raises(): - assert_equal(len(doc5['Raises']), 1) - name,_,desc = doc5['Raises'][0] - assert_equal(name,'LinAlgException') - assert_equal(desc,['If array is singular.']) - -def test_warns(): - assert_equal(len(doc5['Warns']), 1) - name,_,desc = doc5['Warns'][0] - assert_equal(name,'SomeWarning') - assert_equal(desc,['If needed']) - -def test_see_also(): - doc6 = NumpyDocString( - """ - z(x,theta) - - See Also - -------- - func_a, func_b, func_c - func_d : some equivalent func - foo.func_e : some other func over - multiple lines - func_f, func_g, :meth:`func_h`, func_j, - func_k - :obj:`baz.obj_q` - :class:`class_j`: fubar - foobar - """) - - assert len(doc6['See Also']) == 12 - for func, desc, role in doc6['See Also']: - if func in ('func_a', 'func_b', 'func_c', 'func_f', - 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): - assert(not desc) - else: - assert(desc) - - if func == 'func_h': - assert role == 'meth' - elif func == 'baz.obj_q': - assert role == 'obj' - elif func == 'class_j': - assert role == 'class' - else: - assert role is None - - if func == 'func_d': - assert desc == ['some equivalent func'] - elif func == 'foo.func_e': - assert desc == ['some other func over', 'multiple lines'] - elif func == 'class_j': - assert desc == ['fubar', 'foobar'] - -def test_see_also_print(): - class Dummy(object): - """ - See Also - -------- - func_a, func_b - func_c : some relationship - goes here - func_d - """ - pass - - obj = Dummy() - s = str(FunctionDoc(obj, role='func')) - assert(':func:`func_a`, :func:`func_b`' in s) - assert(' some relationship' in s) - assert(':func:`func_d`' in s) - -doc7 = NumpyDocString(""" - - Doc starts on second line. - - """) - -def test_empty_first_line(): - assert doc7['Summary'][0].startswith('Doc starts') - - -def test_no_summary(): - str(SphinxDocString(""" - Parameters - ----------""")) - - -def test_unicode(): - doc = SphinxDocString(""" - öäöäöäöäöåååå - - öäöäöäööäååå - - Parameters - ---------- - ååå : äää - ööö - - Returns - ------- - ååå : ööö - äää - - """) - assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8') - -def test_plot_examples(): - cfg = dict(use_plots=True) - - doc = SphinxDocString(""" - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3],[4,5,6]) - >>> plt.show() - """, config=cfg) - assert 'plot::' in str(doc), str(doc) - - doc = SphinxDocString(""" - Examples - -------- - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3],[4,5,6]) - plt.show() - """, config=cfg) - assert str(doc).count('plot::') == 1, str(doc) - -def test_class_members(): - - class Dummy(object): - """ - Dummy class. - - """ - def spam(self, a, b): - """Spam\n\nSpam spam.""" - pass - def ham(self, c, d): - """Cheese\n\nNo cheese.""" - pass - - for cls in (ClassDoc, SphinxClassDoc): - doc = cls(Dummy, config=dict(show_class_members=False)) - assert 'Methods' not in str(doc), (cls, str(doc)) - assert 'spam' not in str(doc), (cls, str(doc)) - assert 'ham' not in str(doc), (cls, str(doc)) - - doc = cls(Dummy, config=dict(show_class_members=True)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - -if __name__ == "__main__": - import nose - nose.run() - diff --git a/scipy-0.10.1/doc/sphinxext/traitsdoc.py b/scipy-0.10.1/doc/sphinxext/traitsdoc.py deleted file mode 100644 index 0fcf2c1cd3..0000000000 --- a/scipy-0.10.1/doc/sphinxext/traitsdoc.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -========= -traitsdoc -========= - -Sphinx extension that handles docstrings in the Numpy standard format, [1] -and support Traits [2]. - -This extension can be used as a replacement for ``numpydoc`` when support -for Traits is required. - -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard -.. [2] http://code.enthought.com/projects/traits/ - -""" - -import inspect -import os -import pydoc - -import docscrape -import docscrape_sphinx -from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString - -import numpydoc - -import comment_eater - -class SphinxTraitsDoc(SphinxClassDoc): - def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): - if not inspect.isclass(cls): - raise ValueError("Initialise using a class. Got %r" % cls) - self._cls = cls - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - self._name = cls.__name__ - self._func_doc = func_doc - - docstring = pydoc.getdoc(cls) - docstring = docstring.split('\n') - - # De-indent paragraph - try: - indent = min(len(s) - len(s.lstrip()) for s in docstring - if s.strip()) - except ValueError: - indent = 0 - - for n,line in enumerate(docstring): - docstring[n] = docstring[n][indent:] - - self._doc = docscrape.Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': '', - 'Description': [], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Traits': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'References': '', - 'Example': '', - 'Examples': '', - 'index': {} - } - - self._parse() - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Description'] + self['Extended Summary'] + [''] - - def __str__(self, indent=0, func_role="func"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Traits', 'Methods', - 'Returns','Raises'): - out += self._str_param_list(param_list) - out += self._str_see_also("obj") - out += self._str_section('Notes') - out += self._str_references() - out += self._str_section('Example') - out += self._str_section('Examples') - out = self._str_indent(out,indent) - return '\n'.join(out) - -def looks_like_issubclass(obj, classname): - """ Return True if the object has a class or superclass with the given class - name. - - Ignores old-style classes. - """ - t = obj - if t.__name__ == classname: - return True - for klass in t.__mro__: - if klass.__name__ == classname: - return True - return False - -def get_doc_object(obj, what=None, config=None): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif callable(obj): - what = 'function' - else: - what = 'object' - if what == 'class': - doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) - if looks_like_issubclass(obj, 'HasTraits'): - for name, trait, comment in comment_eater.get_class_traits(obj): - # Exclude private traits. - if not name.startswith('_'): - doc['Traits'].append((name, trait, comment.splitlines())) - return doc - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, '', config=config) - else: - return SphinxDocString(pydoc.getdoc(obj), config=config) - -def setup(app): - # init numpydoc - numpydoc.setup(app, get_doc_object) - diff --git a/scipy-0.10.1/f2py.py b/scipy-0.10.1/f2py.py deleted file mode 100644 index ab5f677ef6..0000000000 --- a/scipy-0.10.1/f2py.py +++ /dev/null @@ -1,273 +0,0 @@ -import os.path as op -import re - -import numpy.f2py -import numpy.distutils.misc_util - -from waflib \ - import \ - Task -from waflib.TaskGen \ - import \ - extension, feature, before_method, after_method - -from interface_gen \ - import \ - generate_interface - - -CGEN_TEMPLATE = '%smodule' -FOBJECT_FILE = 'fortranobject.c' -FHEADER_FILE = 'fortranobject.h' -FWRAP_TEMPLATE = '%s-f2pywrappers.f' - -# This path is relative to build directory -F2PY_TEMP_DIR = '.f2py' - -# Those regex are copied from build_src in numpy.distutils.command -F2PY_MODNAME_MATCH = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -F2PY_UMODNAME_MATCH = re.compile(r'\s*python\s*module\s*(?P[\w_]*?'\ - '__user__[\w_]*)',re.I).match -# End of copy -#F2PY_INCLUDE_MATCH = re.compile(r"^\s+\") -# Copied from crackfortran.py in f2py -F2PY_INCLUDE_MATCH = re.compile(r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")',re.I) - -def is_pyf(node): - return node.name.endswith(".pyf") - -def include_pyf(node): - includes = [] - for line in node.read().splitlines(): - m = F2PY_INCLUDE_MATCH.match(line) - if m: - includes.append(m.groupdict()["name"]) - return includes - -def f2py_modulename(node): - """This returns the name of the module from the pyf source file. - - source is expected to be one string, containing the whole source file - code.""" - name = None - for line in node.read().splitlines(): - m = F2PY_MODNAME_MATCH(line) - if m: - if F2PY_UMODNAME_MATCH(line): # skip *__user__* names - continue - name = m.group('name') - break - return name - -def generate_fake_interface(name, node): - """Generate a (fake) .pyf file from another pyf file (!).""" - content = """\ -python module %(name)s - usercode void empty_module(void) {} - interface - subroutine empty_module() - intent(c) empty_module - end subroutine empty_module - end interface -end python module%(name)s -""" - node.write(content % {"name": name}) - -@feature('f2py') -@before_method('apply_incpaths') -def apply_f2py_includes(task_gen): - includes = task_gen.env["INCLUDES"] - d = op.dirname(numpy.f2py.__file__) - includes.append(op.join(d, 'src')) - includes.extend(numpy.distutils.misc_util.get_numpy_include_dirs()) - task_gen.env["INCLUDES"] = includes - -@feature('f2py_fortran') -@before_method('process_source') -def add_fortran_tasks(task_gen): - assert "f2py" in task_gen.features - - if hasattr(task_gen, "name"): - module_name = task_gen.name - else: - raise ValueError("Please define a name for task_gen %r" % task_gen) - if len(task_gen.source) < 1: - raise ValueError("Gne ?") - sources = task_gen.to_nodes(task_gen.source) - module_node = sources[0].parent.find_or_declare("%smodule.c" % module_name) - - f2py_task = task_gen.create_task('f2py_fortran', sources, module_node) - add_f2py_tasks(task_gen, f2py_task, module_name, module_node) - f2py_task.env.F2PYFLAGS.extend(["--lower", "-m", module_name]) - -def fake_interface_gen_callback(task_gen, node): - return _interface_gen_callback(task_gen, node, "f2py_fake_interface") - -def interface_gen_callback(task_gen, node): - return _interface_gen_callback(task_gen, node, "f2py_interface") - -def _interface_gen_callback(task_gen, node, interface_task_name): - if not hasattr(task_gen, "name"): - module_name = f2py_modulename(node) - else: - module_name = task_gen.name - - intermediate_output = node.parent.find_or_declare("%s.pyf" % module_name) - module_node = node.parent.find_or_declare("%smodule.c" % module_name) - # Guard against overwriting existing source code by accident. Did I say I - # hate find_or_declare ? - assert intermediate_output.is_bld() - assert module_node.is_bld() - - interface_task = task_gen.create_task(interface_task_name, node, intermediate_output) - - f2py_task = task_gen.create_task('f2py', intermediate_output, module_node) - add_f2py_tasks(task_gen, f2py_task, module_name, module_node) - -def add_f2py_tasks(task_gen, f2py_task, module_name, module_node): - build_dir = module_node.parent.bldpath() - # FIXME: ask waf ML how flags sharing and co is supposed to work - f2py_task.env.F2PYFLAGS = task_gen.env.F2PYFLAGS[:] - f2py_task.env.F2PYFLAGS.extend(["--build-dir", build_dir]) - task_gen.source += f2py_task.outputs - - fobject_node = module_node.parent.find_or_declare("%s-fortranobject.c" % module_name) - fobject_task = task_gen.create_task("f2py_fortran_object", [], fobject_node) - task_gen.source += fobject_task.outputs - - fwrapper_node = module_node.parent.find_or_declare(FWRAP_TEMPLATE % module_name) - fwrapper_task = task_gen.create_task("f2py_fwrapper", [], fwrapper_node) - task_gen.source += fwrapper_task.outputs - -@extension('.pyf') -def add_f2py_files(task_gen, node): - ext = '.c' - - if not is_pyf(node): - raise ValueError("Gne ?") - - if not hasattr(task_gen, "name"): - module_name = f2py_modulename(node) - else: - module_name = task_gen.name - module_node = node.parent.find_or_declare("%smodule.c" % module_name) - - f2py_task = task_gen.create_task('f2py', node, module_node) - add_f2py_tasks(task_gen, f2py_task, module_name, module_node) - -@extension('.ipyf') -def add_f2py_files(task_gen, node): - ext = '.pyf' - - if not hasattr(task_gen, "name"): - module_name = f2py_modulename(node) - else: - module_name = task_gen.name - intermediate_output = node.parent.find_or_declare("%s.pyf" % module_name) - assert intermediate_output.is_bld() - - if "f2py_interface" in task_gen.features: - interface_task = task_gen.create_task("f2py_interface", node, intermediate_output) - elif "f2py_fake_interface" in task_gen.features: - interface_task = task_gen.create_task("f2py_fake_interface", node, intermediate_output) - else: - raise ValueError("You need to use f2py_interface or f2py_fake_interface for .ipyf !") - task_gen.source += interface_task.outputs - #module_node = node.parent.find_or_declare("%smodule.c" % module_name) - - #f2py_task = task_gen.create_task('f2py', node, module_node) - #add_f2py_tasks(task_gen, f2py_task, module_name, module_node) - -class _f2py_interface(Task.Task): - pass - -class f2py_fake_interface(_f2py_interface): - def run(self): - node = self.inputs[0] - output = self.outputs[0] - module_name = f2py_modulename(node) - generate_fake_interface(module_name, output) - return 0 - -class f2py_interface(_f2py_interface): - def run(self): - node = self.inputs[0] - output = self.outputs[0] - module_name = f2py_modulename(node) - generate_interface(module_name, node.abspath(), output.abspath()) - return 0 - - def scan(self): - found = [] - missing = [] - - node = self.inputs[0] - if not hasattr(self, "name"): - module_name = f2py_modulename(node) - else: - module_name = self.name - - includes = include_pyf(node) - if includes: - real_pyf = node.parent.find_or_declare("%s.pyf" % module_name) - # Guard against overwriting existing source code by accident. Did I - # say I hate find_or_declare ? - assert real_pyf.is_bld() - - for inc in includes: - x = node.parent.find_resource(inc) - if x: - found.append(x) - else: - missing.append(x) - user_routines = node.parent.find_resource("%s_user_routines.pyf" % module_name) - if user_routines: - found.append(user_routines) - return (found, missing) - -class f2py_fortran_object(Task.Task): - def run(self): - node = self.generator.bld.bldnode.find_node(op.join(F2PY_TEMP_DIR, FOBJECT_FILE)) - output = self.outputs[0] - output.write(node.read()) - return 0 - -class f2py_fwrapper(Task.Task): - after = ["f2py"] - def run(self): - output = self.outputs[0] - if not op.exists(output.abspath()): - output.write("") - return 0 - -# TODO: f2py from .pyf or from .f should be different tasks. -class _f2py_task(Task.Task): - run_str = '${F2PY} ${F2PYFLAGS} ${SRC}' - color = 'CYAN' - -class f2py(_f2py_task): - ext_in = [".pyf"] - ext_out = [".h", ".f", ".c"] - -class f2py_fortran(_f2py_task): - ext_in = [".pyf"] - ext_out = [".h", ".f", ".c"] - -def configure(conf): - if not conf.env.CC and not conf.env.CXX: - conf.fatal('Load a C/C++ compiler first') - if not conf.env.PYTHON: - conf.fatal('Load the python tool first!') - conf.find_program('f2py', var='F2PY') - # FIXME: this has nothing to do here - conf.env.F2PYFLAGS = ["--quiet"] - - f2py_tempdir = conf.bldnode.make_node(F2PY_TEMP_DIR) - f2py_tempdir.mkdir() - - fobject = f2py_tempdir.make_node(FOBJECT_FILE) - - d = op.dirname(numpy.f2py.__file__) - source_c = op.join(d, 'src', FOBJECT_FILE) - fobject.write(open(source_c).read()) diff --git a/scipy-0.10.1/interface_gen.py b/scipy-0.10.1/interface_gen.py deleted file mode 100755 index 5982d0cb00..0000000000 --- a/scipy-0.10.1/interface_gen.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env python - -import os -import re -from distutils.dir_util import mkpath - -def all_subroutines(interface_in): - # remove comments - comment_block_exp = re.compile(r'/\*(?:\s|.)*?\*/') - subroutine_exp = re.compile(r'subroutine (?:\s|.)*?end subroutine.*') - function_exp = re.compile(r'function (?:\s|.)*?end function.*') - - interface = comment_block_exp.sub('',interface_in) - subroutine_list = subroutine_exp.findall(interface) - function_list = function_exp.findall(interface) - subroutine_list = subroutine_list + function_list - subroutine_list = map(lambda x: x.strip(),subroutine_list) - return subroutine_list - -def real_convert(val_string): - return val_string - -def complex_convert(val_string): - return '(' + val_string + ',0.)' - -def convert_types(interface_in,converter): - regexp = re.compile(r'') - interface = interface_in[:] - while 1: - sub = regexp.search(interface) - if sub is None: break - converted = converter(sub.group(1)) - interface = interface.replace(sub.group(),converted) - return interface - -def generic_expand(generic_interface,skip_names=[]): - generic_types ={'s' :('real', 'real', real_convert, - 'real'), - 'd' :('double precision','double precision',real_convert, - 'double precision'), - 'c' :('complex', 'complex',complex_convert, - 'real'), - 'z' :('double complex', 'double complex',complex_convert, - 'double precision'), - 'cs':('complex', 'real',complex_convert, - 'real'), - 'zd':('double complex', 'double precision',complex_convert, - 'double precision'), - 'sc':('real', 'complex',real_convert, - 'real'), - 'dz':('double precision','double complex', real_convert, - 'double precision')} - generic_c_types = {'real':'float', - 'double precision':'double', - 'complex':'complex_float', - 'double complex':'complex_double'} - # cc_types is specific in ATLAS C BLAS, in particular, for complex arguments - generic_cc_types = {'real':'float', - 'double precision':'double', - 'complex':'void', - 'double complex':'void'} - #2. get all subroutines - subs = all_subroutines(generic_interface) - #print len(subs) - #loop through the subs - type_exp = re.compile(r'') - TYPE_EXP = re.compile(r'') - routine_name = re.compile(r'(subroutine|function)\s*(?P\w+)\s*\(') - interface = '' - for sub in subs: - #3. Find the typecodes to use: - m = type_exp.search(sub) - if m is None: - interface = interface + '\n\n' + sub - continue - type_chars = m.group(1) - # get rid of spaces - type_chars = type_chars.replace(' ','') - # get a list of the characters (or character pairs) - type_chars = type_chars.split(',') - # Now get rid of the special tag that contained the types - sub = re.sub(type_exp,'',sub) - m = TYPE_EXP.search(sub) - if m is not None: - sub = re.sub(TYPE_EXP,'',sub) - sub_generic = sub.strip() - for char in type_chars: - type_in,type_out,converter, rtype_in = generic_types[char] - sub = convert_types(sub_generic,converter) - function_def = sub.replace('',char) - function_def = function_def.replace('',char.upper()) - function_def = function_def.replace('',type_in) - function_def = function_def.replace('', - generic_c_types[type_in]) - function_def = function_def.replace('', - generic_cc_types[type_in]) - function_def = function_def.replace('',rtype_in) - function_def = function_def.replace('', - generic_c_types[rtype_in]) - function_def = function_def.replace('',type_out) - function_def = function_def.replace('', - generic_c_types[type_out]) - m = routine_name.match(function_def) - if m: - if m.group('name') in skip_names: - print 'Skipping',m.group('name') - continue - else: - print 'Possible bug: Failed to determine routines name' - interface = interface + '\n\n' + function_def - - return interface - -#def interface_to_module(interface_in,module_name,include_list,sdir='.'): -def interface_to_module(interface_in,module_name): - pre_prefix = "!%f90 -*- f90 -*-\n" - # heading and tail of the module definition. - file_prefix = "\npython module " + module_name +" ! in\n" \ - "!usercode '''#include \"cblas.h\"\n"\ - "!'''\n"\ - " interface \n" - file_suffix = "\n end interface\n" \ - "end module %s" % module_name - return pre_prefix + file_prefix + interface_in + file_suffix - -def process_includes(interface_in,sdir='.'): - include_exp = re.compile(r'\n\s*[^!]\s*') - include_files = include_exp.findall(interface_in) - for filename in include_files: - f = open(os.path.join(sdir,filename)) - interface_in = interface_in.replace(''%filename, - f.read()) - f.close() - return interface_in - -def generate_interface(module_name,src_file,target_file,skip_names=[]): - #print "generating",module_name,"interface" - f = open(src_file) - generic_interface = f.read() - f.close() - sdir = os.path.dirname(src_file) - generic_interface = process_includes(generic_interface,sdir) - generic_interface = generic_expand(generic_interface,skip_names) - module_def = interface_to_module(generic_interface,module_name) - mkpath(os.path.dirname(target_file)) - f = open(target_file,'w') - user_routines = os.path.join(sdir,module_name+"_user_routines.pyf") - if os.path.exists(user_routines): - f2 = open(user_routines) - f.write(f2.read()) - f2.close() - f.write(module_def) - f.close() - -def process_all(): - # process the standard files. - for name in ['fblas','cblas','clapack','flapack']: - generate_interface(name,'generic_%s.pyf'%(name),name+'.pyf') - - -if __name__ == "__main__": - process_all() diff --git a/scipy-0.10.1/scipy/__init__.py b/scipy-0.10.1/scipy/__init__.py deleted file mode 100644 index 8717a9f6dd..0000000000 --- a/scipy-0.10.1/scipy/__init__.py +++ /dev/null @@ -1,133 +0,0 @@ -""" -SciPy: A scientific computing package for Python -================================================ - -Documentation is available in the docstrings and -online at http://docs.scipy.org. - -Contents --------- -SciPy imports all the functions from the NumPy namespace, and in -addition provides: - -Subpackages ------------ -:: - - odr --- Orthogonal Distance Regression [*] - misc --- Various utilities that don't have - another home. - cluster --- Vector Quantization / Kmeans [*] - fftpack --- Discrete Fourier Transform algorithms - [*] - io --- Data input and output [*] - sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned - Conjugate Gradient Method (LOBPCG) [*] - special --- Airy Functions [*] - lib.blas --- Wrappers to BLAS library [*] - sparse.linalg.eigen --- Sparse Eigenvalue Solvers [*] - stats --- Statistical Functions [*] - lib --- Python wrappers to external libraries - [*] - lib.lapack --- Wrappers to LAPACK library [*] - maxentropy --- Routines for fitting maximum entropy - models [*] - integrate --- Integration routines [*] - ndimage --- n-dimensional image package [*] - linalg --- Linear algebra routines [*] - spatial --- Spatial data structures and algorithms - [*] - interpolate --- Interpolation Tools [*] - sparse.linalg --- Sparse Linear Algebra [*] - sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library: [*] - sparse.linalg.dsolve --- Linear Solvers [*] - optimize --- Optimization Tools [*] - sparse.linalg.eigen.arpack --- Eigenvalue solver using iterative - methods. [*] - signal --- Signal Processing Tools [*] - sparse --- Sparse Matrices [*] - - [*] - using a package requires explicit import - -Global symbols from subpackages -------------------------------- -:: - - misc --> info, factorial, factorial2, factorialk, - comb, who, lena, central_diff_weights, - derivative, pade, source - fftpack --> fft, fftn, fft2, ifft, ifft2, ifftn, - fftshift, ifftshift, fftfreq - stats --> find_repeats - linalg.dsolve.umfpack --> UmfpackContext - -Utility tools -------------- -:: - - test --- Run scipy unittests - show_config --- Show scipy build configuration - show_numpy_config --- Show numpy build configuration - __version__ --- Scipy version string - __numpy_version__ --- Numpy version string - -""" - -__all__ = ['test'] - -from numpy import show_config as show_numpy_config -if show_numpy_config is None: - raise ImportError("Cannot import scipy when running from numpy source directory.") -from numpy import __version__ as __numpy_version__ - -# Import numpy symbols to scipy name space -import numpy as _num -from numpy import oldnumeric -from numpy import * -from numpy.random import rand, randn -from numpy.fft import fft, ifft -from numpy.lib.scimath import * - -# Emit a warning if numpy is too old -majver, minver = [float(i) for i in _num.version.version.split('.')[:2]] -if majver < 1 or (majver == 1 and minver < 5): - import warnings - warnings.warn("Numpy 1.5.0 or above is recommended for this version of " \ - "scipy (detected version %s)" % _num.version.version, - UserWarning) - -__all__ += ['oldnumeric']+_num.__all__ - -__all__ += ['randn', 'rand', 'fft', 'ifft'] - -del _num -# Remove the linalg imported from numpy so that the scipy.linalg package can be -# imported. -del linalg -__all__.remove('linalg') - -# We first need to detect if we're being called as part of the scipy -# setup procedure itself in a reliable manner. -try: - __SCIPY_SETUP__ -except NameError: - __SCIPY_SETUP__ = False - - -if __SCIPY_SETUP__: - import sys as _sys - _sys.stderr.write('Running from scipy source directory.\n') - del _sys -else: - try: - from scipy.__config__ import show as show_config - except ImportError: - msg = """Error importing scipy: you cannot import scipy while - being in scipy source directory; please exit the scipy source - tree first, and relaunch your python intepreter.""" - raise ImportError(msg) - from scipy.version import version as __version__ - - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench diff --git a/scipy-0.10.1/scipy/bento.info b/scipy-0.10.1/scipy/bento.info deleted file mode 100644 index 1c218c431b..0000000000 --- a/scipy-0.10.1/scipy/bento.info +++ /dev/null @@ -1,32 +0,0 @@ -Recurse: - cluster, - fftpack, - integrate, - interpolate, - io, - lib, - linalg, - optimize, - signal, - sparse, - spatial, - special, - stats - -Library: - Packages: - cluster, - constants, - fftpack, - integrate, - interpolate, - io, - lib, - linalg, - misc, - optimize, - signal, - sparse, - spatial, - special, - stats diff --git a/scipy-0.10.1/scipy/cluster/SConscript b/scipy-0.10.1/scipy/cluster/SConscript deleted file mode 100644 index 257eb10a86..0000000000 --- a/scipy-0.10.1/scipy/cluster/SConscript +++ /dev/null @@ -1,15 +0,0 @@ -# Last Change: Mon Nov 03 07:00 PM 2008 J -# vim:syntax=python -from os.path import join - -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.NumpyPythonExtension('_hierarchy_wrap', - source = [join('src', 'hierarchy_wrap.c'), - join('src', 'hierarchy.c')]) - -env.NumpyPythonExtension('_vq', - source = [join('src', 'vq_module.c'), - join('src', 'vq.c')]) diff --git a/scipy-0.10.1/scipy/cluster/SConstruct b/scipy-0.10.1/scipy/cluster/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/cluster/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/cluster/__init__.py b/scipy-0.10.1/scipy/cluster/__init__.py deleted file mode 100644 index 0a0e82ccca..0000000000 --- a/scipy-0.10.1/scipy/cluster/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -========================================= -Clustering package (:mod:`scipy.cluster`) -========================================= - -.. currentmodule:: scipy.cluster - -:mod:`scipy.cluster.vq` - -Clustering algorithms are useful in information theory, target detection, -communications, compression, and other areas. The `vq` module only -supports vector quantization and the k-means algorithms. - -:mod:`scipy.cluster.hierarchy` - -The `hierarchy` module provides functions for hierarchical and -agglomerative clustering. Its features include generating hierarchical -clusters from distance matrices, computing distance matrices from -observation vectors, calculating statistics on clusters, cutting linkages -to generate flat clusters, and visualizing clusters with dendrograms. - -""" - -__all__ = ['vq', 'hierarchy'] - -import vq, hierarchy - -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/cluster/bento.info b/scipy-0.10.1/scipy/cluster/bento.info deleted file mode 100644 index fa9dd60a12..0000000000 --- a/scipy-0.10.1/scipy/cluster/bento.info +++ /dev/null @@ -1,7 +0,0 @@ -Library: - Extension: _hierarchy_wrap - Sources: - src/hierarchy_wrap.c, src/hierarchy.c - Extension: _vq - Sources: - src/vq_module.c, src/vq.c diff --git a/scipy-0.10.1/scipy/cluster/hierarchy.py b/scipy-0.10.1/scipy/cluster/hierarchy.py deleted file mode 100644 index 33a90010c8..0000000000 --- a/scipy-0.10.1/scipy/cluster/hierarchy.py +++ /dev/null @@ -1,2760 +0,0 @@ -""" -======================================================== -Hierarchical clustering (:mod:`scipy.cluster.hierarchy`) -======================================================== - -.. currentmodule:: scipy.cluster.hierarchy - -These functions cut hierarchical clusterings into flat clusterings -or find the roots of the forest formed by a cut by providing the flat -cluster ids of each observation. - -.. autosummary:: - :toctree: generated/ - - fcluster - fclusterdata - leaders - -These are routines for agglomerative clustering. - -.. autosummary:: - :toctree: generated/ - - linkage - single - complete - average - weighted - centroid - median - ward - -These routines compute statistics on hierarchies. - -.. autosummary:: - :toctree: generated/ - - cophenet - from_mlab_linkage - inconsistent - maxinconsts - maxdists - maxRstat - to_mlab_linkage - -Routines for visualizing flat clusters. - -.. autosummary:: - :toctree: generated/ - - dendrogram - -These are data structures and routines for representing hierarchies as -tree objects. - -.. autosummary:: - :toctree: generated/ - - ClusterNode - leaves_list - to_tree - -These are predicates for checking the validity of linkage and -inconsistency matrices as well as for checking isomorphism of two -flat cluster assignments. - -.. autosummary:: - :toctree: generated/ - - is_valid_im - is_valid_linkage - is_isomorphic - is_monotonic - correspond - num_obs_linkage - -Utility routines for plotting: - -.. autosummary:: - :toctree: generated/ - - set_link_color_palette - -References ----------- - -.. [Sta07] "Statistics toolbox." API Reference Documentation. The MathWorks. - http://www.mathworks.com/access/helpdesk/help/toolbox/stats/. - Accessed October 1, 2007. - -.. [Mti07] "Hierarchical clustering." API Reference Documentation. - The Wolfram Research, Inc. - http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/ - HierarchicalClustering.html. - Accessed October 1, 2007. - -.. [Gow69] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage - Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969. - -.. [War63] Ward Jr, JH. "Hierarchical grouping to optimize an objective - function." Journal of the American Statistical Association. 58(301): - pp. 236--44. 1963. - -.. [Joh66] Johnson, SC. "Hierarchical clustering schemes." Psychometrika. - 32(2): pp. 241--54. 1966. - -.. [Sne62] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp. - 855--60. 1962. - -.. [Bat95] Batagelj, V. "Comparing resemblance measures." Journal of - Classification. 12: pp. 73--90. 1995. - -.. [Sok58] Sokal, RR and Michener, CD. "A statistical method for evaluating - systematic relationships." Scientific Bulletins. 38(22): - pp. 1409--38. 1958. - -.. [Ede79] Edelbrock, C. "Mixture model tests of hierarchical clustering - algorithms: the problem of classifying everybody." Multivariate - Behavioral Research. 14: pp. 367--84. 1979. - -.. [Jai88] Jain, A., and Dubes, R., "Algorithms for Clustering Data." - Prentice-Hall. Englewood Cliffs, NJ. 1988. - -.. [Fis36] Fisher, RA "The use of multiple measurements in taxonomic - problems." Annals of Eugenics, 7(2): 179-188. 1936 - - -* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc. - -* Mathematica is a registered trademark of The Wolfram Research, Inc. - -Copyright Notice ----------------- - -Copyright (C) Damian Eads, 2007-2008. New BSD License. - -""" - - -# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com) -# -# Author: Damian Eads -# Date: September 22, 2007 -# -# Copyright (c) 2007, 2008, Damian Eads -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# - Redistributions of source code must retain the above -# copyright notice, this list of conditions and the -# following disclaimer. -# - Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# - Neither the name of the author nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import types -import warnings - -import numpy as np -import _hierarchy_wrap -import scipy.spatial.distance as distance - -_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2, - 'weighted': 6} -_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5} -_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union( - set(_cpy_euclid_methods.keys())) - -__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet', - 'correspond', 'dendrogram', 'fcluster', 'fclusterdata', - 'from_mlab_linkage', 'inconsistent', 'is_isomorphic', - 'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders', - 'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts', - 'median', 'num_obs_linkage', 'set_link_color_palette', 'single', - 'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance'] - - -def _warning(s): - warnings.warn('scipy.cluster: %s' % s, stacklevel=3) - - -def _copy_array_if_base_present(a): - """ - Copies the array if its base points to a parent array. - """ - if a.base is not None: - return a.copy() - elif np.issubsctype(a, np.float32): - return np.array(a, dtype=np.double) - else: - return a - - -def _copy_arrays_if_base_present(T): - """ - Accepts a tuple of arrays T. Copies the array T[i] if its base array - points to an actual array. Otherwise, the reference is just copied. - This is useful if the arrays are being passed to a C function that - does not do proper striding. - """ - l = [_copy_array_if_base_present(a) for a in T] - return l - - -def _randdm(pnts): - """ Generates a random distance matrix stored in condensed form. A - pnts * (pnts - 1) / 2 sized vector is returned. - """ - if pnts >= 2: - D = np.random.rand(pnts * (pnts - 1) / 2) - else: - raise ValueError("The number of points in the distance matrix " - "must be at least 2.") - return D - - -def single(y): - """ - Performs single/min/nearest linkage on the condensed distance - matrix ``y``. See ``linkage`` for more information on the return - structure and algorithm. - - Parameters - ---------- - y : ndarray - The upper triangular of the distance matrix. The result of - ``pdist`` is returned in this form. - - Returns - ------- - Z : ndarray - The linkage matrix. - - See Also - -------- - linkage: for advanced creation of hierarchical clusterings. - - """ - return linkage(y, method='single', metric='euclidean') - - -def complete(y): - """ - Performs complete complete/max/farthest point linkage on the - condensed distance matrix ``y``. See ``linkage`` for more - information on the return structure and algorithm. - - Parameters - ---------- - y : ndarray - The upper triangular of the distance matrix. The result of - ``pdist`` is returned in this form. - - Returns - ------- - Z : ndarray - A linkage matrix containing the hierarchical clustering. See - the ``linkage`` function documentation for more information - on its structure. - - """ - return linkage(y, method='complete', metric='euclidean') - - -def average(y): - """ - Performs average/UPGMA linkage on the condensed distance matrix - ``y``. See ``linkage`` for more information on the return - structure and algorithm. - - Parameters - ---------- - y : ndarray - The upper triangular of the distance matrix. The result of - ``pdist`` is returned in this form. - - Returns - ------- - Z : ndarray - A linkage matrix containing the hierarchical clustering. See - the ``linkage`` function documentation for more information - on its structure. - - See Also - -------- - linkage: for advanced creation of hierarchical clusterings. - - """ - return linkage(y, method='average', metric='euclidean') - - -def weighted(y): - """ - Performs weighted/WPGMA linkage on the condensed distance matrix - ``y``. See ``linkage`` for more information on the return - structure and algorithm. - - Parameters - ---------- - y : ndarray - The upper triangular of the distance matrix. The result of - ``pdist`` is returned in this form. - - Returns - ------- - Z : ndarray - A linkage matrix containing the hierarchical clustering. See - the ``linkage`` function documentation for more information - on its structure. - - See Also - -------- - linkage: for advanced creation of hierarchical clusterings. - - """ - return linkage(y, method='weighted', metric='euclidean') - - -def centroid(y): - """ - Performs centroid/UPGMC linkage. See ``linkage`` for more - information on the return structure and algorithm. - - The following are common calling conventions: - - 1. ``Z = centroid(y)`` - - Performs centroid/UPGMC linkage on the condensed distance - matrix ``y``. See ``linkage`` for more information on the return - structure and algorithm. - - 2. ``Z = centroid(X)`` - - Performs centroid/UPGMC linkage on the observation matrix ``X`` - using Euclidean distance as the distance metric. See ``linkage`` - for more information on the return structure and algorithm. - - Parameters - ---------- - Q : ndarray - A condensed or redundant distance matrix. A condensed - distance matrix is a flat array containing the upper - triangular of the distance matrix. This is the form that - ``pdist`` returns. Alternatively, a collection of - m observation vectors in n dimensions may be passed as - a m by n array. - - Returns - ------- - Z : ndarray - A linkage matrix containing the hierarchical clustering. See - the ``linkage`` function documentation for more information - on its structure. - - See Also - -------- - linkage: for advanced creation of hierarchical clusterings. - - """ - return linkage(y, method='centroid', metric='euclidean') - - -def median(y): - """ - Performs median/WPGMC linkage. See ``linkage`` for more - information on the return structure and algorithm. - - The following are common calling conventions: - - 1. ``Z = median(y)`` - - Performs median/WPGMC linkage on the condensed distance matrix - ``y``. See ``linkage`` for more information on the return - structure and algorithm. - - 2. ``Z = median(X)`` - - Performs median/WPGMC linkage on the observation matrix ``X`` - using Euclidean distance as the distance metric. See linkage - for more information on the return structure and algorithm. - - Parameters - ---------- - Q : ndarray - A condensed or redundant distance matrix. A condensed - distance matrix is a flat array containing the upper - triangular of the distance matrix. This is the form that - ``pdist`` returns. Alternatively, a collection of - m observation vectors in n dimensions may be passed as - a m by n array. - - Returns - ------- - Z : ndarray - The hierarchical clustering encoded as a linkage matrix. - - See Also - -------- - linkage: for advanced creation of hierarchical clusterings. - - """ - return linkage(y, method='median', metric='euclidean') - - -def ward(y): - """ - Performs Ward's linkage on a condensed or redundant distance - matrix. See linkage for more information on the return structure - and algorithm. - - The following are common calling conventions: - - 1. ``Z = ward(y)`` - Performs Ward's linkage on the condensed distance matrix ``Z``. See - linkage for more information on the return structure and - algorithm. - - 2. ``Z = ward(X)`` - Performs Ward's linkage on the observation matrix ``X`` using - Euclidean distance as the distance metric. See linkage for more - information on the return structure and algorithm. - - Parameters - ---------- - Q : ndarray - A condensed or redundant distance matrix. A condensed - distance matrix is a flat array containing the upper - triangular of the distance matrix. This is the form that - ``pdist`` returns. Alternatively, a collection of - m observation vectors in n dimensions may be passed as - a m by n array. - - Returns - ------- - Z : ndarray - The hierarchical clustering encoded as a linkage matrix. - - See Also - -------- - linkage: for advanced creation of hierarchical clusterings. - - """ - return linkage(y, method='ward', metric='euclidean') - - -def linkage(y, method='single', metric='euclidean'): - """ - Performs hierarchical/agglomerative clustering on the condensed - distance matrix y. - - y must be a :math:`{n \\choose 2}` sized - vector where n is the number of original observations paired - in the distance matrix. The behavior of this function is very - similar to the MATLAB linkage function. - - A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the - :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and - ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A - cluster with an index less than :math:`n` corresponds to one of - the :math:`n` original observations. The distance between - clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The - fourth value ``Z[i, 3]`` represents the number of original - observations in the newly formed cluster. - - The following linkage methods are used to compute the distance - :math:`d(s, t)` between two clusters :math:`s` and - :math:`t`. The algorithm begins with a forest of clusters that - have yet to be used in the hierarchy being formed. When two - clusters :math:`s` and :math:`t` from this forest are combined - into a single cluster :math:`u`, :math:`s` and :math:`t` are - removed from the forest, and :math:`u` is added to the - forest. When only one cluster remains in the forest, the algorithm - stops, and this cluster becomes the root. - - A distance matrix is maintained at each iteration. The ``d[i,j]`` - entry corresponds to the distance between cluster :math:`i` and - :math:`j` in the original forest. - - At each iteration, the algorithm must update the distance matrix - to reflect the distance of the newly formed cluster u with the - remaining clusters in the forest. - - Suppose there are :math:`|u|` original observations - :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and - :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in - cluster :math:`v`. Recall :math:`s` and :math:`t` are - combined to form cluster :math:`u`. Let :math:`v` be any - remaining cluster in the forest that is not :math:`u`. - - The following are methods for calculating the distance between the - newly formed cluster :math:`u` and each :math:`v`. - - * method='single' assigns - - .. math:: - d(u,v) = \\min(dist(u[i],v[j])) - - for all points :math:`i` in cluster :math:`u` and - :math:`j` in cluster :math:`v`. This is also known as the - Nearest Point Algorithm. - - * method='complete' assigns - - .. math:: - d(u, v) = \\max(dist(u[i],v[j])) - - for all points :math:`i` in cluster u and :math:`j` in - cluster :math:`v`. This is also known by the Farthest Point - Algorithm or Voor Hees Algorithm. - - * method='average' assigns - - .. math:: - d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} - {(|u|*|v|)} - - for all points :math:`i` and :math:`j` where :math:`|u|` - and :math:`|v|` are the cardinalities of clusters :math:`u` - and :math:`v`, respectively. This is also called the UPGMA - algorithm. This is called UPGMA. - - * method='weighted' assigns - - .. math:: - d(u,v) = (dist(s,v) + dist(t,v))/2 - - where cluster u was formed with cluster s and t and v - is a remaining cluster in the forest. (also called WPGMA) - - * method='centroid' assigns - - .. math:: - dist(s,t) = ||c_s-c_t||_2 - - where :math:`c_s` and :math:`c_t` are the centroids of - clusters :math:`s` and :math:`t`, respectively. When two - clusters :math:`s` and :math:`t` are combined into a new - cluster :math:`u`, the new centroid is computed over all the - original objects in clusters :math:`s` and :math:`t`. The - distance then becomes the Euclidean distance between the - centroid of :math:`u` and the centroid of a remaining cluster - :math:`v` in the forest. This is also known as the UPGMC - algorithm. - - * method='median' assigns math:`d(s,t)` like the ``centroid`` - method. When two clusters :math:`s` and :math:`t` are combined - into a new cluster :math:`u`, the average of centroids s and t - give the new centroid :math:`u`. This is also known as the - WPGMC algorithm. - - * method='ward' uses the Ward variance minimization algorithm. - The new entry :math:`d(u,v)` is computed as follows, - - .. math:: - - d(u,v) = \\sqrt{\\frac{|v|+|s|} - {T}d(v,s)^2 - + \\frac{|v|+|t|} - {T}d(v,t)^2 - + \\frac{|v|} - {T}d(s,t)^2} - - where :math:`u` is the newly joined cluster consisting of - clusters :math:`s` and :math:`t`, :math:`v` is an unused - cluster in the forest, :math:`T=|v|+|s|+|t|`, and - :math:`|*|` is the cardinality of its argument. This is also - known as the incremental algorithm. - - Warning: When the minimum distance pair in the forest is chosen, there - may be two or more pairs with the same minimum distance. This - implementation may chose a different minimum than the MATLAB - version. - - Parameters - ---------- - y : ndarray - A condensed or redundant distance matrix. A condensed distance matrix - is a flat array containing the upper triangular of the distance matrix. - This is the form that ``pdist`` returns. Alternatively, a collection of - :math:`m` observation vectors in n dimensions may be passed as an - :math:`m` by :math:`n` array. - method : str, optional - The linkage algorithm to use. See the ``Linkage Methods`` section below - for full descriptions. - metric : str, optional - The distance metric to use. See the ``distance.pdist`` function for a - list of valid distance metrics. - - Returns - ------- - Z : ndarray - The hierarchical clustering encoded as a linkage matrix. - - """ - if not isinstance(method, str): - raise TypeError("Argument 'method' must be a string.") - - y = _convert_to_double(np.asarray(y, order='c')) - - s = y.shape - if len(s) == 1: - distance.is_valid_y(y, throw=True, name='y') - d = distance.num_obs_y(y) - if method not in _cpy_non_euclid_methods.keys(): - raise ValueError("Valid methods when the raw observations are " - "omitted are 'single', 'complete', 'weighted', " - "and 'average'.") - # Since the C code does not support striding using strides. - [y] = _copy_arrays_if_base_present([y]) - - Z = np.zeros((d - 1, 4)) - _hierarchy_wrap.linkage_wrap(y, Z, int(d), \ - int(_cpy_non_euclid_methods[method])) - elif len(s) == 2: - X = y - n = s[0] - m = s[1] - if method not in _cpy_linkage_methods: - raise ValueError('Invalid method: %s' % method) - if method in _cpy_non_euclid_methods.keys(): - dm = distance.pdist(X, metric) - Z = np.zeros((n - 1, 4)) - _hierarchy_wrap.linkage_wrap(dm, Z, n, \ - int(_cpy_non_euclid_methods[method])) - elif method in _cpy_euclid_methods.keys(): - if metric != 'euclidean': - raise ValueError(('Method %s requires the distance metric to ' - 'be euclidean') % s) - dm = distance.pdist(X, metric) - Z = np.zeros((n - 1, 4)) - _hierarchy_wrap.linkage_euclid_wrap(dm, Z, X, m, n, - int(_cpy_euclid_methods[method])) - return Z - - -class ClusterNode: - """ - A tree node class for representing a cluster. - - Leaf nodes correspond to original observations, while non-leaf nodes - correspond to non-singleton clusters. - - The to_tree function converts a matrix returned by the linkage - function into an easy-to-use tree representation. - - See Also - -------- - to_tree: for converting a linkage matrix ``Z`` into a tree object. - - """ - - def __init__(self, id, left=None, right=None, dist=0, count=1): - if id < 0: - raise ValueError('The id must be non-negative.') - if dist < 0: - raise ValueError('The distance must be non-negative.') - if (left is None and right is not None) or \ - (left is not None and right is None): - raise ValueError('Only full or proper binary trees are permitted.' - ' This node has one child.') - if count < 1: - raise ValueError('A cluster must contain at least one original ' - 'observation.') - self.id = id - self.left = left - self.right = right - self.dist = dist - if self.left is None: - self.count = count - else: - self.count = left.count + right.count - - def get_id(self): - """ - The identifier of the target node. - - For ``0 <= i < n``, `i` corresponds to original observation i. - For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed - at iteration ``i-n``. - - Returns - ------- - id : int - The identifier of the target node. - - """ - return self.id - - def get_count(self): - """ - The number of leaf nodes (original observations) belonging to - the cluster node nd. If the target node is a leaf, 1 is - returned. - - Returns - ------- - c : int - The number of leaf nodes below the target node. - - """ - return self.count - - def get_left(self): - """ - Return a reference to the left child tree object. - - Returns - ------- - left : ClusterNode - The left child of the target node. If the node is a leaf, - None is returned. - - """ - return self.left - - def get_right(self): - """ - Returns a reference to the right child tree object. - - Returns - ------- - right : ClusterNode - The left child of the target node. If the node is a leaf, - None is returned. - - """ - return self.right - - def is_leaf(self): - """ - Returns True if the target node is a leaf. - - Returns - ------- - leafness : bool - True if the target node is a leaf node. - - """ - return self.left is None - - def pre_order(self, func=(lambda x: x.id)): - """ - Performs pre-order traversal without recursive function calls. - - When a leaf node is first encountered, ``func`` is called with - the leaf node as its argument, and its result is appended to - the list. - - For example, the statement:: - - ids = root.pre_order(lambda x: x.id) - - returns a list of the node ids corresponding to the leaf nodes - of the tree as they appear from left to right. - - Parameters - ---------- - func : function - Applied to each leaf ClusterNode object in the pre-order traversal. - Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the - result of func(n[i]) is stored in L[i]. If not provided, the index - of the original observation to which the node corresponds is used. - - Returns - ------- - L : list - The pre-order traversal. - - """ - - # Do a preorder traversal, caching the result. To avoid having to do - # recursion, we'll store the previous index we've visited in a vector. - n = self.count - - curNode = [None] * (2 * n) - lvisited = np.zeros((2 * n,), dtype=bool) - rvisited = np.zeros((2 * n,), dtype=bool) - curNode[0] = self - k = 0 - preorder = [] - while k >= 0: - nd = curNode[k] - ndid = nd.id - if nd.is_leaf(): - preorder.append(func(nd)) - k = k - 1 - else: - if not lvisited[ndid]: - curNode[k + 1] = nd.left - lvisited[ndid] = True - k = k + 1 - elif not rvisited[ndid]: - curNode[k + 1] = nd.right - rvisited[ndid] = True - k = k + 1 - # If we've visited the left and right of this non-leaf - # node already, go up in the tree. - else: - k = k - 1 - - return preorder - -_cnode_bare = ClusterNode(0) -_cnode_type = type(ClusterNode) - - -def to_tree(Z, rd=False): - """ - Converts a hierarchical clustering encoded in the matrix ``Z`` (by - linkage) into an easy-to-use tree object. The reference r to the - root ClusterNode object is returned. - - Each ClusterNode object has a left, right, dist, id, and count - attribute. The left and right attributes point to ClusterNode objects - that were combined to generate the cluster. If both are None then - the ClusterNode object is a leaf node, its count must be 1, and its - distance is meaningless but set to 0. - - Note: This function is provided for the convenience of the library - user. ClusterNodes are not used as input to any of the functions in this - library. - - Parameters - ---------- - Z : ndarray - The linkage matrix in proper form (see the ``linkage`` - function documentation). - - rd : bool, optional - When ``False``, a reference to the root ClusterNode object is - returned. Otherwise, a tuple (r,d) is returned. ``r`` is a - reference to the root node while ``d`` is a dictionary - mapping cluster ids to ClusterNode references. If a cluster id is - less than n, then it corresponds to a singleton cluster - (leaf node). See ``linkage`` for more information on the - assignment of cluster ids to clusters. - - Returns - ------- - L : list - The pre-order traversal. - - """ - - Z = np.asarray(Z, order='c') - - is_valid_linkage(Z, throw=True, name='Z') - - # The number of original objects is equal to the number of rows minus - # 1. - n = Z.shape[0] + 1 - - # Create a list full of None's to store the node objects - d = [None] * (n * 2 - 1) - - # Create the nodes corresponding to the n original objects. - for i in xrange(0, n): - d[i] = ClusterNode(i) - - nd = None - - for i in xrange(0, n - 1): - fi = int(Z[i, 0]) - fj = int(Z[i, 1]) - if fi > i + n: - raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' - 'is used before it is formed. See row %d, ' - 'column 0') % fi) - if fj > i + n: - raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' - 'is used before it is formed. See row %d, ' - 'column 1') % fj) - nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2]) - # ^ id ^ left ^ right ^ dist - if Z[i, 3] != nd.count: - raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is ' - 'incorrect.') % i) - d[n + i] = nd - - if rd: - return (nd, d) - else: - return nd - - -def _convert_to_bool(X): - if X.dtype != np.bool: - X = np.bool_(X) - if not X.flags.contiguous: - X = X.copy() - return X - - -def _convert_to_double(X): - if X.dtype != np.double: - X = np.double(X) - if not X.flags.contiguous: - X = X.copy() - return X - - -def cophenet(Z, Y=None): - """ - Calculates the cophenetic distances between each observation in - the hierarchical clustering defined by the linkage ``Z``. - - Suppose ``p`` and ``q`` are original observations in - disjoint clusters ``s`` and ``t``, respectively and - ``s`` and ``t`` are joined by a direct parent cluster - ``u``. The cophenetic distance between observations - ``i`` and ``j`` is simply the distance between - clusters ``s`` and ``t``. - - Parameters - ---------- - Z : ndarray - The hierarchical clustering encoded as an array - (see ``linkage`` function). - - Y : ndarray (optional) - Calculates the cophenetic correlation coefficient ``c`` of a - hierarchical clustering defined by the linkage matrix ``Z`` - of a set of :math:`n` observations in :math:`m` - dimensions. ``Y`` is the condensed distance matrix from which - ``Z`` was generated. - - Returns - ------- - res : tuple - A tuple (c, {d}): - - - c : ndarray - The cophentic correlation distance (if ``y`` is passed). - - - d : ndarray - The cophenetic distance matrix in condensed form. The - :math:`ij` th entry is the cophenetic distance between - original observations :math:`i` and :math:`j`. - - """ - - Z = np.asarray(Z, order='c') - is_valid_linkage(Z, throw=True, name='Z') - Zs = Z.shape - n = Zs[0] + 1 - - zz = np.zeros((n * (n - 1) / 2,), dtype=np.double) - # Since the C code does not support striding using strides. - # The dimensions are used instead. - Z = _convert_to_double(Z) - - _hierarchy_wrap.cophenetic_distances_wrap(Z, zz, int(n)) - if Y is None: - return zz - - Y = np.asarray(Y, order='c') - Ys = Y.shape - distance.is_valid_y(Y, throw=True, name='Y') - - z = zz.mean() - y = Y.mean() - Yy = Y - y - Zz = zz - z - #print Yy.shape, Zz.shape - numerator = (Yy * Zz) - denomA = Yy ** 2 - denomB = Zz ** 2 - c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum())) - #print c, numerator.sum() - return (c, zz) - - -def inconsistent(Z, d=2): - """ - Calculates inconsistency statistics on a linkage. - - Note: This function behaves similarly to the MATLAB(TM) - inconsistent function. - - Parameters - ---------- - d : int - The number of links up to ``d`` levels below each - non-singleton cluster. - Z : ndarray - The :math:`(n-1)` by 4 matrix encoding the linkage - (hierarchical clustering). See ``linkage`` documentation - for more information on its form. - - Returns - ------- - R : ndarray - A :math:`(n-1)` by 5 matrix where the ``i``'th row - contains the link statistics for the non-singleton cluster - ``i``. The link statistics are computed over the link - heights for links :math:`d` levels below the cluster - ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard - deviation of the link heights, respectively; ``R[i,2]`` is - the number of links included in the calculation; and - ``R[i,3]`` is the inconsistency coefficient, - - .. math:: - - \frac{\mathtt{Z[i,2]}-\mathtt{R[i,0]}} {R[i,1]}. - - """ - Z = np.asarray(Z, order='c') - - Zs = Z.shape - is_valid_linkage(Z, throw=True, name='Z') - if (not d == np.floor(d)) or d < 0: - raise ValueError('The second argument d must be a nonnegative ' - 'integer value.') -# if d == 0: -# d = 1 - - # Since the C code does not support striding using strides. - # The dimensions are used instead. - [Z] = _copy_arrays_if_base_present([Z]) - - n = Zs[0] + 1 - R = np.zeros((n - 1, 4), dtype=np.double) - - _hierarchy_wrap.inconsistent_wrap(Z, R, int(n), int(d)) - return R - - -def from_mlab_linkage(Z): - """ - Converts a linkage matrix generated by MATLAB(TM) to a new - linkage matrix compatible with this module. The conversion does - two things: - - * the indices are converted from ``1..N`` to ``0..(N-1)`` form, - and - - * a fourth column Z[:,3] is added where Z[i,3] is represents the - number of original observations (leaves) in the non-singleton - cluster i. - - This function is useful when loading in linkages from legacy data - files generated by MATLAB. - - Parameters - ---------- - Z : ndarray - A linkage matrix generated by MATLAB(TM). - - Returns - ------- - ZS : ndarray - A linkage matrix compatible with this library. - - """ - Z = np.asarray(Z, dtype=np.double, order='c') - Zs = Z.shape - - # If it's empty, return it. - if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): - return Z.copy() - - if len(Zs) != 2: - raise ValueError("The linkage array must be rectangular.") - - # If it contains no rows, return it. - if Zs[0] == 0: - return Z.copy() - - Zpart = Z.copy() - if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]: - raise ValueError('The format of the indices is not 1..N') - Zpart[:, 0:2] -= 1.0 - CS = np.zeros((Zs[0],), dtype=np.double) - _hierarchy_wrap.calculate_cluster_sizes_wrap(Zpart, CS, int(Zs[0]) + 1) - return np.hstack([Zpart, CS.reshape(Zs[0], 1)]) - - -def to_mlab_linkage(Z): - """ - Converts a linkage matrix ``Z`` generated by the linkage function - of this module to a MATLAB(TM) compatible one. The return linkage - matrix has the last column removed and the cluster indices are - converted to ``1..N`` indexing. - - Parameters - ---------- - Z : ndarray - A linkage matrix generated by this library. - - Returns - ------- - ZM : ndarray - A linkage matrix compatible with MATLAB(TM)'s hierarchical - clustering functions. - - """ - Z = np.asarray(Z, order='c', dtype=np.double) - Zs = Z.shape - if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): - return Z.copy() - is_valid_linkage(Z, throw=True, name='Z') - - ZP = Z[:, 0:3].copy() - ZP[:, 0:2] += 1.0 - - return ZP - - -def is_monotonic(Z): - """ - Returns ``True`` if the linkage passed is monotonic. The linkage - is monotonic if for every cluster :math:`s` and :math:`t` - joined, the distance between them is no less than the distance - between any previously joined clusters. - - Parameters - ---------- - Z : ndarray - The linkage matrix to check for monotonicity. - - Returns - ------- - b : bool - A boolean indicating whether the linkage is monotonic. - - """ - Z = np.asarray(Z, order='c') - is_valid_linkage(Z, throw=True, name='Z') - - # We expect the i'th value to be greater than its successor. - return (Z[1:, 2] >= Z[:-1, 2]).all() - - -def is_valid_im(R, warning=False, throw=False, name=None): - """Returns True if the inconsistency matrix passed is valid. - - It must be a :math:`n` by 4 numpy array of doubles. The standard - deviations ``R[:,1]`` must be nonnegative. The link counts - ``R[:,2]`` must be positive and no greater than :math:`n-1`. - - Parameters - ---------- - R : ndarray - The inconsistency matrix to check for validity. - warning : bool, optional - When ``True``, issues a Python warning if the linkage - matrix passed is invalid. - throw : bool, optional - When ``True``, throws a Python exception if the linkage - matrix passed is invalid. - name : str, optional - This string refers to the variable name of the invalid - linkage matrix. - - Returns - ------- - b : bool - True if the inconsistency matrix is valid. - - """ - R = np.asarray(R, order='c') - valid = True - try: - if type(R) != np.ndarray: - if name: - raise TypeError(('Variable \'%s\' passed as inconsistency ' - 'matrix is not a numpy array.') % name) - else: - raise TypeError('Variable passed as inconsistency matrix ' - 'is not a numpy array.') - if R.dtype != np.double: - if name: - raise TypeError(('Inconsistency matrix \'%s\' must contain ' - 'doubles (double).') % name) - else: - raise TypeError('Inconsistency matrix must contain doubles ' - '(double).') - if len(R.shape) != 2: - if name: - raise ValueError(('Inconsistency matrix \'%s\' must have ' - 'shape=2 (i.e. be two-dimensional).') % name) - else: - raise ValueError('Inconsistency matrix must have shape=2 ' - '(i.e. be two-dimensional).') - if R.shape[1] != 4: - if name: - raise ValueError(('Inconsistency matrix \'%s\' must have 4 ' - 'columns.') % name) - else: - raise ValueError('Inconsistency matrix must have 4 columns.') - if R.shape[0] < 1: - if name: - raise ValueError(('Inconsistency matrix \'%s\' must have at ' - 'least one row.') % name) - else: - raise ValueError('Inconsistency matrix must have at least ' - 'one row.') - if (R[:, 0] < 0).any(): - if name: - raise ValueError(('Inconsistency matrix \'%s\' contains ' - 'negative link height means.') % name) - else: - raise ValueError('Inconsistency matrix contains negative ' - 'link height means.') - if (R[:, 1] < 0).any(): - if name: - raise ValueError(('Inconsistency matrix \'%s\' contains ' - 'negative link height standard ' - 'deviations.') % name) - else: - raise ValueError('Inconsistency matrix contains negative ' - 'link height standard deviations.') - if (R[:, 2] < 0).any(): - if name: - raise ValueError(('Inconsistency matrix \'%s\' contains ' - 'negative link counts.') % name) - else: - raise ValueError('Inconsistency matrix contains negative ' - 'link counts.') - except Exception, e: - if throw: - raise - if warning: - _warning(str(e)) - valid = False - return valid - - -def is_valid_linkage(Z, warning=False, throw=False, name=None): - """ - Checks the validity of a linkage matrix. A linkage matrix is valid - if it is a two dimensional nd-array (type double) with :math:`n` - rows and 4 columns. The first two columns must contain indices - between 0 and :math:`2n-1`. For a given row ``i``, - :math:`0 \leq \mathtt{Z[i,0]} \leq i+n-1` - and :math:`0 \leq Z[i,1] \leq i+n-1` - (i.e. a cluster cannot join another cluster unless the cluster - being joined has been generated.) - - Parameters - ---------- - Z : array_like - Linkage matrix. - warning : bool, optional - When ``True``, issues a Python warning if the linkage - matrix passed is invalid. - throw : bool, optional - When ``True``, throws a Python exception if the linkage - matrix passed is invalid. - name : str, optional - This string refers to the variable name of the invalid - linkage matrix. - - Returns - ------- - b : bool - True iff the inconsistency matrix is valid. - - """ - Z = np.asarray(Z, order='c') - valid = True - try: - if type(Z) != np.ndarray: - if name: - raise TypeError(('\'%s\' passed as a linkage is not a valid ' - 'array.') % name) - else: - raise TypeError('Variable is not a valid array.') - if Z.dtype != np.double: - if name: - raise TypeError('Linkage matrix \'%s\' must contain doubles.' - % name) - else: - raise TypeError('Linkage matrix must contain doubles.') - if len(Z.shape) != 2: - if name: - raise ValueError(('Linkage matrix \'%s\' must have shape=2 ' - '(i.e. be two-dimensional).') % name) - else: - raise ValueError('Linkage matrix must have shape=2 ' - '(i.e. be two-dimensional).') - if Z.shape[1] != 4: - if name: - raise ValueError('Linkage matrix \'%s\' must have 4 columns.' - % name) - else: - raise ValueError('Linkage matrix must have 4 columns.') - if Z.shape[0] == 0: - raise ValueError('Linkage must be computed on at least two ' - 'observations.') - n = Z.shape[0] - if n > 1: - if ((Z[:, 0] < 0).any() or - (Z[:, 1] < 0).any()): - if name: - raise ValueError(('Linkage \'%s\' contains negative ' - 'indices.') % name) - else: - raise ValueError('Linkage contains negative indices.') - if (Z[:, 2] < 0).any(): - if name: - raise ValueError(('Linkage \'%s\' contains negative ' - 'distances.') % name) - else: - raise ValueError('Linkage contains negative distances.') - if (Z[:, 3] < 0).any(): - if name: - raise ValueError('Linkage \'%s\' contains negative counts.' - % name) - else: - raise ValueError('Linkage contains negative counts.') - if _check_hierarchy_uses_cluster_before_formed(Z): - if name: - raise ValueError(('Linkage \'%s\' uses non-singleton cluster ' - 'before its formed.') % name) - else: - raise ValueError("Linkage uses non-singleton cluster before " - "it's formed.") - if _check_hierarchy_uses_cluster_more_than_once(Z): - if name: - raise ValueError(('Linkage \'%s\' uses the same cluster more ' - 'than once.') % name) - else: - raise ValueError('Linkage uses the same cluster more than ' - 'once.') -# if _check_hierarchy_not_all_clusters_used(Z): -# if name: -# raise ValueError('Linkage \'%s\' does not use all clusters.' -# % name) -# else: -# raise ValueError('Linkage does not use all clusters.') - except Exception, e: - if throw: - raise - if warning: - _warning(str(e)) - valid = False - return valid - - -def _check_hierarchy_uses_cluster_before_formed(Z): - n = Z.shape[0] + 1 - for i in xrange(0, n - 1): - if Z[i, 0] >= n + i or Z[i, 1] >= n + i: - return True - return False - - -def _check_hierarchy_uses_cluster_more_than_once(Z): - n = Z.shape[0] + 1 - chosen = set([]) - for i in xrange(0, n - 1): - if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]: - return True - chosen.add(Z[i, 0]) - chosen.add(Z[i, 1]) - return False - - -def _check_hierarchy_not_all_clusters_used(Z): - n = Z.shape[0] + 1 - chosen = set([]) - for i in xrange(0, n - 1): - chosen.add(int(Z[i, 0])) - chosen.add(int(Z[i, 1])) - must_chosen = set(range(0, 2 * n - 2)) - return len(must_chosen.difference(chosen)) > 0 - - -def num_obs_linkage(Z): - """ - Returns the number of original observations of the linkage matrix - passed. - - Parameters - ---------- - Z : ndarray - The linkage matrix on which to perform the operation. - - Returns - ------- - n : int - The number of original observations in the linkage. - - """ - Z = np.asarray(Z, order='c') - is_valid_linkage(Z, throw=True, name='Z') - return (Z.shape[0] + 1) - - -def correspond(Z, Y): - """ - Checks if a linkage matrix ``Z`` and condensed distance matrix - ``Y`` could possibly correspond to one another. - - They must have the same number of original observations for - the check to succeed. - - This function is useful as a sanity check in algorithms that make - extensive use of linkage and distance matrices that must - correspond to the same set of original observations. - - Parameters - ---------- - Z : ndarray - The linkage matrix to check for correspondance. - Y : ndarray - The condensed distance matrix to check for correspondance. - - Returns - ------- - b : bool - A boolean indicating whether the linkage matrix and distance - matrix could possibly correspond to one another. - - """ - is_valid_linkage(Z, throw=True) - distance.is_valid_y(Y, throw=True) - Z = np.asarray(Z, order='c') - Y = np.asarray(Y, order='c') - return distance.num_obs_y(Y) == num_obs_linkage(Z) - - -def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): - """ - Forms flat clusters from the hierarchical clustering defined by - the linkage matrix ``Z``. - - Parameters - ---------- - Z : ndarray - The hierarchical clustering encoded with the matrix returned - by the `linkage` function. - t : float - The threshold to apply when forming flat clusters. - criterion : str, optional - The criterion to use in forming flat clusters. This can - be any of the following values: - - 'inconsistent': - If a cluster node and all its - descendants have an inconsistent value less than or equal - to ``t`` then all its leaf descendants belong to the - same flat cluster. When no non-singleton cluster meets - this criterion, every node is assigned to its own - cluster. (Default) - - 'distance': - Forms flat clusters so that the original - observations in each flat cluster have no greater a - cophenetic distance than ``t``. - - 'maxclust': - Finds a minimum threshold ``r`` so that - the cophenetic distance between any two original - observations in the same flat cluster is no more than - ``r`` and no more than ``t`` flat clusters are formed. - - 'monocrit': - Forms a flat cluster from a cluster node c - with index i when ``monocrit[j] <= t``. - - For example, to threshold on the maximum mean distance - as computed in the inconsistency matrix R with a - threshold of 0.8 do: - - ``MR = maxRstat(Z, R, 3)`` - - ``cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)`` - - 'maxclust_monocrit': - Forms a flat cluster from a - non-singleton cluster node ``c`` when ``monocrit[i] <= - r`` for all cluster indices ``i`` below and including - ``c``. ``r`` is minimized such that no more than ``t`` - flat clusters are formed. monocrit must be - monotonic. For example, to minimize the threshold t on - maximum inconsistency values so that no more than 3 flat - clusters are formed, do: - - ``MI = maxinconsts(Z, R)`` - - ``cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)`` - - depth : int, optional - The maximum depth to perform the inconsistency calculation. - It has no meaning for the other criteria. Default is 2. - R : ndarray, optional - The inconsistency matrix to use for the 'inconsistent' - criterion. This matrix is computed if not provided. - monocrit : ndarray, optional - An array of length n-1. ``monocrit[i]`` is the - statistics upon which non-singleton i is thresholded. The - monocrit vector must be monotonic, i.e. given a node c with - index i, for all node indices j corresponding to nodes - below c, ``monocrit[i] >= monocrit[j]``. - - Returns - ------- - fcluster : ndarray - An array of length n. T[i] is the flat cluster number to - which original observation i belongs. - - """ - Z = np.asarray(Z, order='c') - is_valid_linkage(Z, throw=True, name='Z') - - n = Z.shape[0] + 1 - T = np.zeros((n,), dtype='i') - - # Since the C code does not support striding using strides. - # The dimensions are used instead. - [Z] = _copy_arrays_if_base_present([Z]) - - if criterion == 'inconsistent': - if R is None: - R = inconsistent(Z, depth) - else: - R = np.asarray(R, order='c') - is_valid_im(R, throw=True, name='R') - # Since the C code does not support striding using strides. - # The dimensions are used instead. - [R] = _copy_arrays_if_base_present([R]) - _hierarchy_wrap.cluster_in_wrap(Z, R, T, float(t), int(n)) - elif criterion == 'distance': - _hierarchy_wrap.cluster_dist_wrap(Z, T, float(t), int(n)) - elif criterion == 'maxclust': - _hierarchy_wrap.cluster_maxclust_dist_wrap(Z, T, int(n), int(t)) - elif criterion == 'monocrit': - [monocrit] = _copy_arrays_if_base_present([monocrit]) - _hierarchy_wrap.cluster_monocrit_wrap(Z, monocrit, T, float(t), int(n)) - elif criterion == 'maxclust_monocrit': - [monocrit] = _copy_arrays_if_base_present([monocrit]) - _hierarchy_wrap.cluster_maxclust_monocrit_wrap(Z, monocrit, T, - int(n), int(t)) - else: - raise ValueError('Invalid cluster formation criterion: %s' - % str(criterion)) - return T - - -def fclusterdata(X, t, criterion='inconsistent', \ - metric='euclidean', depth=2, method='single', R=None): - """ - Cluster observation data using a given metric. - - Clusters the original observations in the n-by-m data - matrix X (n observations in m dimensions), using the euclidean - distance metric to calculate distances between original observations, - performs hierarchical clustering using the single linkage algorithm, - and forms flat clusters using the inconsistency method with `t` as the - cut-off threshold. - - A one-dimensional array T of length n is returned. T[i] is the index - of the flat cluster to which the original observation i belongs. - - Parameters - ---------- - X : ndarray - n by m data matrix with n observations in m dimensions. - t : float - The threshold to apply when forming flat clusters. - criterion : str, optional - Specifies the criterion for forming flat clusters. Valid - values are 'inconsistent' (default), 'distance', or 'maxclust' - cluster formation algorithms. See `fcluster` for descriptions. - method : str, optional - The linkage method to use (single, complete, average, - weighted, median centroid, ward). See `linkage` for more - information. Default is "single". - metric : str, optional - The distance metric for calculating pairwise distances. See - `distance.pdist` for descriptions and linkage to verify - compatibility with the linkage method. - t : double, optional - The cut-off threshold for the cluster function or the - maximum number of clusters (criterion='maxclust'). - depth : int, optional - The maximum depth for the inconsistency calculation. See - `inconsistent` for more information. - R : ndarray, optional - The inconsistency matrix. It will be computed if necessary - if it is not passed. - - Returns - ------- - T : ndarray - A vector of length n. T[i] is the flat cluster number to - which original observation i belongs. - - Notes - ----- - This function is similar to the MATLAB function clusterdata. - - """ - X = np.asarray(X, order='c', dtype=np.double) - - if type(X) != np.ndarray or len(X.shape) != 2: - raise TypeError('The observation matrix X must be an n by m numpy ' - 'array.') - - Y = distance.pdist(X, metric=metric) - Z = linkage(Y, method=method) - if R is None: - R = inconsistent(Z, d=depth) - else: - R = np.asarray(R, order='c') - T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) - return T - - -def leaves_list(Z): - """ - Returns a list of leaf node ids (corresponding to observation - vector index) as they appear in the tree from left to right. Z is - a linkage matrix. - - Parameters - ---------- - Z : ndarray - The hierarchical clustering encoded as a matrix. See - ``linkage`` for more information. - - Returns - ------- - L : ndarray - The list of leaf node ids. - - """ - Z = np.asarray(Z, order='c') - is_valid_linkage(Z, throw=True, name='Z') - n = Z.shape[0] + 1 - ML = np.zeros((n,), dtype='i') - [Z] = _copy_arrays_if_base_present([Z]) - _hierarchy_wrap.prelist_wrap(Z, ML, int(n)) - return ML - - -# Let's do a conditional import. If matplotlib is not available, -try: - - import matplotlib - try: - import matplotlib.pylab - import matplotlib.patches - except RuntimeError, e: - # importing matplotlib.pylab can fail with a RuntimeError if installed - # but the graphic engine cannot be initialized (for example without X) - raise ImportError("Could not import matplotib (error was %s)" % str(e)) - #import matplotlib.collections - _mpl = True - - # Maps number of leaves to text size. - # - # p <= 20, size="12" - # 20 < p <= 30, size="10" - # 30 < p <= 50, size="8" - # 50 < p <= np.inf, size="6" - - _dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} - _drotation = {20: 0, 40: 45, np.inf: 90} - _dtextsortedkeys = list(_dtextsizes.keys()) - _dtextsortedkeys.sort() - _drotationsortedkeys = list(_drotation.keys()) - _drotationsortedkeys.sort() - - def _remove_dups(L): - """ - Removes duplicates AND preserves the original order of the elements. - The set class is not guaranteed to do this. - """ - seen_before = set([]) - L2 = [] - for i in L: - if i not in seen_before: - seen_before.add(i) - L2.append(i) - return L2 - - def _get_tick_text_size(p): - for k in _dtextsortedkeys: - if p <= k: - return _dtextsizes[k] - - def _get_tick_rotation(p): - for k in _drotationsortedkeys: - if p <= k: - return _drotation[k] - - def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, - no_labels, color_list, leaf_font_size=None, - leaf_rotation=None, contraction_marks=None): - axis = matplotlib.pylab.gca() - # Independent variable plot width - ivw = len(ivl) * 10 - # Depenendent variable plot height - dvw = mh + mh * 0.05 - ivticks = np.arange(5, len(ivl) * 10 + 5, 10) - if orientation == 'top': - axis.set_ylim([0, dvw]) - axis.set_xlim([0, ivw]) - xlines = icoords - ylines = dcoords - if no_labels: - axis.set_xticks([]) - axis.set_xticklabels([]) - else: - axis.set_xticks(ivticks) - axis.set_xticklabels(ivl) - axis.xaxis.set_ticks_position('bottom') - lbls = axis.get_xticklabels() - if leaf_rotation: - matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation) - else: - matplotlib.pylab.setp(lbls, 'rotation', - float(_get_tick_rotation(len(ivl)))) - if leaf_font_size: - matplotlib.pylab.setp(lbls, 'size', leaf_font_size) - else: - matplotlib.pylab.setp(lbls, 'size', - float(_get_tick_text_size(len(ivl)))) -# txt.set_fontsize() -# txt.set_rotation(45) - # Make the tick marks invisible because they cover up the links - for line in axis.get_xticklines(): - line.set_visible(False) - elif orientation == 'bottom': - axis.set_ylim([dvw, 0]) - axis.set_xlim([0, ivw]) - xlines = icoords - ylines = dcoords - if no_labels: - axis.set_xticks([]) - axis.set_xticklabels([]) - else: - axis.set_xticks(ivticks) - axis.set_xticklabels(ivl) - lbls = axis.get_xticklabels() - if leaf_rotation: - matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation) - else: - matplotlib.pylab.setp(lbls, 'rotation', - float(_get_tick_rotation(p))) - if leaf_font_size: - matplotlib.pylab.setp(lbls, 'size', leaf_font_size) - else: - matplotlib.pylab.setp(lbls, 'size', - float(_get_tick_text_size(p))) - axis.xaxis.set_ticks_position('top') - # Make the tick marks invisible because they cover up the links - for line in axis.get_xticklines(): - line.set_visible(False) - elif orientation == 'left': - axis.set_xlim([0, dvw]) - axis.set_ylim([0, ivw]) - xlines = dcoords - ylines = icoords - if no_labels: - axis.set_yticks([]) - axis.set_yticklabels([]) - else: - axis.set_yticks(ivticks) - axis.set_yticklabels(ivl) - - lbls = axis.get_yticklabels() - if leaf_rotation: - matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation) - if leaf_font_size: - matplotlib.pylab.setp(lbls, 'size', leaf_font_size) - axis.yaxis.set_ticks_position('left') - # Make the tick marks invisible because they cover up the - # links - for line in axis.get_yticklines(): - line.set_visible(False) - elif orientation == 'right': - axis.set_xlim([dvw, 0]) - axis.set_ylim([0, ivw]) - xlines = dcoords - ylines = icoords - if no_labels: - axis.set_yticks([]) - axis.set_yticklabels([]) - else: - axis.set_yticks(ivticks) - axis.set_yticklabels(ivl) - lbls = axis.get_yticklabels() - if leaf_rotation: - matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation) - if leaf_font_size: - matplotlib.pylab.setp(lbls, 'size', leaf_font_size) - axis.yaxis.set_ticks_position('right') - # Make the tick marks invisible because they cover up the links - for line in axis.get_yticklines(): - line.set_visible(False) - - # Let's use collections instead. This way there is a separate legend - # item for each tree grouping, rather than stupidly one for each line - # segment. - colors_used = _remove_dups(color_list) - color_to_lines = {} - for color in colors_used: - color_to_lines[color] = [] - for (xline, yline, color) in zip(xlines, ylines, color_list): - color_to_lines[color].append(zip(xline, yline)) - - colors_to_collections = {} - # Construct the collections. - for color in colors_used: - coll = matplotlib.collections.LineCollection(color_to_lines[color], - colors=(color,)) - colors_to_collections[color] = coll - - # Add all the non-blue link groupings, i.e. those groupings below the - # color threshold. - - for color in colors_used: - if color != 'b': - axis.add_collection(colors_to_collections[color]) - # If there is a blue grouping (i.e., links above the color threshold), - # it should go last. - if 'b' in colors_to_collections: - axis.add_collection(colors_to_collections['b']) - - if contraction_marks is not None: - #xs=[x for (x, y) in contraction_marks] - #ys=[y for (x, y) in contraction_marks] - if orientation in ('left', 'right'): - for (x, y) in contraction_marks: - e = matplotlib.patches.Ellipse((y, x), - width=dvw / 100, height=1.0) - axis.add_artist(e) - e.set_clip_box(axis.bbox) - e.set_alpha(0.5) - e.set_facecolor('k') - if orientation in ('top', 'bottom'): - for (x, y) in contraction_marks: - e = matplotlib.patches.Ellipse((x, y), - width=1.0, height=dvw / 100) - axis.add_artist(e) - e.set_clip_box(axis.bbox) - e.set_alpha(0.5) - e.set_facecolor('k') - - #matplotlib.pylab.plot(xs, ys, 'go', markeredgecolor='k', - # markersize=3) - - #matplotlib.pylab.plot(ys, xs, 'go', markeredgecolor='k', - # markersize=3) - matplotlib.pylab.draw_if_interactive() -except ImportError: - _mpl = False - - def _plot_dendrogram(*args, **kwargs): - raise ImportError('matplotlib not available. Plot request denied.') - -_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k'] - - -def set_link_color_palette(palette): - """ - Changes the list of matplotlib color codes to use when coloring - links with the dendrogram color_threshold feature. - - Parameters - ---------- - palette : A list of matplotlib color codes. The order of - the color codes is the order in which the colors are cycled - through when color thresholding in the dendrogram. - - """ - - if type(palette) not in (types.ListType, types.TupleType): - raise TypeError("palette must be a list or tuple") - _ptypes = [type(p) == types.StringType for p in palette] - - if False in _ptypes: - raise TypeError("all palette list elements must be color strings") - - for i in list(_link_line_colors): - _link_line_colors.remove(i) - _link_line_colors.extend(list(palette)) - - -def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None, - get_leaves=True, orientation='top', labels=None, - count_sort=False, distance_sort=False, show_leaf_counts=True, - no_plot=False, no_labels=False, color_list=None, - leaf_font_size=None, leaf_rotation=None, leaf_label_func=None, - no_leaves=False, show_contracted=False, - link_color_func=None): - """ - Plots the hierarchical clustering as a dendrogram. - - The dendrogram illustrates how each cluster is - composed by drawing a U-shaped link between a non-singleton - cluster and its children. The height of the top of the U-link is - the distance between its children clusters. It is also the - cophenetic distance between original observations in the two - children clusters. It is expected that the distances in Z[:,2] be - monotonic, otherwise crossings appear in the dendrogram. - - Parameters - ---------- - Z : ndarray - The linkage matrix encoding the hierarchical clustering to - render as a dendrogram. See the ``linkage`` function for more - information on the format of ``Z``. - p : int, optional - The ``p`` parameter for ``truncate_mode``. - truncate_mode : str, optional - The dendrogram can be hard to read when the original - observation matrix from which the linkage is derived is - large. Truncation is used to condense the dendrogram. There - are several modes: - - * None/'none': no truncation is performed (Default) - * 'lastp': the last ``p`` non-singleton formed in the linkage - are the only non-leaf nodes in the linkage; they correspond - to to rows ``Z[n-p-2:end]`` in ``Z``. All other - non-singleton clusters are contracted into leaf nodes. - * 'mlab': This corresponds to MATLAB(TM) behavior. (not - implemented yet) - * 'level'/'mtica': no more than ``p`` levels of the - dendrogram tree are displayed. This corresponds to - Mathematica(TM) behavior. - - color_threshold : double, optional - For brevity, let :math:`t` be the ``color_threshold``. - Colors all the descendent links below a cluster node - :math:`k` the same color if :math:`k` is the first node below - the cut threshold :math:`t`. All links connecting nodes with - distances greater than or equal to the threshold are colored - blue. If :math:`t` is less than or equal to zero, all nodes - are colored blue. If ``color_threshold`` is ``None`` or - 'default', corresponding with MATLAB(TM) behavior, the - threshold is set to ``0.7*max(Z[:,2])``. - get_leaves : bool, optional - Includes a list ``R['leaves']=H`` in the result - dictionary. For each :math:`i`, ``H[i] == j``, cluster node - :math:`j` appears in the :math:`i` th position in the - left-to-right traversal of the leaves, where :math:`j < 2n-1` - and :math:`i < n`. - orientation : str, optional - The direction to plot the dendrogram, which can be any - of the following strings: - - * 'top' plots the root at the top, and plot descendent - links going downwards. (default). - * 'bottom'- plots the root at the bottom, and plot descendent - links going upwards. - * 'left'- plots the root at the left, and plot descendent - links going right. - * 'right'- plots the root at the right, and plot descendent - links going left. - - labels : ndarray, optional - By default ``labels`` is ``None`` so the index of the - original observation is used to label the leaf nodes. - Otherwise, this is an :math:`n` -sized list (or tuple). The - ``labels[i]`` value is the text to put under the :math:`i` th - leaf node only if it corresponds to an original observation - and not a non-singleton cluster. - count_sort : str or bool, optional - For each node n, the order (visually, from left-to-right) n's - two descendent links are plotted is determined by this - parameter, which can be any of the following values: - - * False: nothing is done. - * 'ascending'/True: the child with the minimum number of - original objects in its cluster is plotted first. - * 'descendent': the child with the maximum number of - original objects in its cluster is plotted first. - - Note ``distance_sort`` and ``count_sort`` cannot both be - ``True``. - - distance_sort : str or bool, optional - For each node n, the order (visually, from left-to-right) n's - two descendent links are plotted is determined by this - parameter, which can be any of the following values: - - * False: nothing is done. - * 'ascending'/True: the child with the minimum distance - between its direct descendents is plotted first. - * 'descending': the child with the maximum distance - between its direct descendents is plotted first. - - Note ``distance_sort`` and ``count_sort`` cannot both be - ``True``. - - show_leaf_counts : bool, optional - When ``True``, leaf nodes representing :math:`k>1` original - observation are labeled with the number of observations they - contain in parentheses. - no_plot : bool, optional - When ``True``, the final rendering is not performed. This is - useful if only the data structures computed for the rendering - are needed or if matplotlib is not available. - no_labels : bool, optional - When ``True``, no labels appear next to the leaf nodes in the - rendering of the dendrogram. - leaf_label_rotation : double, optional - Specifies the angle (in degrees) to rotate the leaf - labels. When unspecified, the rotation based on the number of - nodes in the dendrogram. (Default=0) - leaf_font_size : int, optional - Specifies the font size (in points) of the leaf labels. When - unspecified, the size based on the number of nodes in the - dendrogram. - leaf_label_func : lambda or function, optional - When leaf_label_func is a callable function, for each - leaf with cluster index :math:`k < 2n-1`. The function - is expected to return a string with the label for the - leaf. - - Indices :math:`k < n` correspond to original observations - while indices :math:`k \\geq n` correspond to non-singleton - clusters. - - For example, to label singletons with their node id and - non-singletons with their id, count, and inconsistency - coefficient, simply do:: - - # First define the leaf label function. - def llf(id): - if id < n: - return str(id) - else: - return '[%d %d %1.2f]' % (id, count, R[n-id,3]) - - # The text for the leaf nodes is going to be big so force - # a rotation of 90 degrees. - dendrogram(Z, leaf_label_func=llf, leaf_rotation=90) - - show_contracted : bool - When ``True`` the heights of non-singleton nodes contracted - into a leaf node are plotted as crosses along the link - connecting that leaf node. This really is only useful when - truncation is used (see ``truncate_mode`` parameter). - link_color_func : lambda/function - When a callable function, - link_color_function is called with each non-singleton id - corresponding to each U-shaped link it will paint. The - function is expected to return the color to paint the link, - encoded as a matplotlib color string code. - - For example: - - >>> dendrogram(Z, link_color_func=lambda k: colors[k]) - - colors the direct links below each untruncated non-singleton node - ``k`` using ``colors[k]``. - - Returns - ------- - R : dict - A dictionary of data structures computed to render the - dendrogram. Its has the following keys: - - - 'icoords': a list of lists ``[I1, I2, ..., Ip]`` where - ``Ik`` is a list of 4 independent variable coordinates - corresponding to the line that represents the k'th link - painted. - - - 'dcoords': a list of lists ``[I2, I2, ..., Ip]`` where - ``Ik`` is a list of 4 independent variable coordinates - corresponding to the line that represents the k'th link - painted. - - - 'ivl': a list of labels corresponding to the leaf nodes. - - - 'leaves': for each i, ``H[i] == j``, cluster node - :math:`j` appears in the :math:`i` th position in the - left-to-right traversal of the leaves, where :math:`j < 2n-1` - and :math:`i < n`. If :math:`j` is less than :math:`n`, the - :math:`i` th leaf node corresponds to an original - observation. Otherwise, it corresponds to a non-singleton - cluster. - - """ - - # Features under consideration. - # - # ... = dendrogram(..., leaves_order=None) - # - # Plots the leaves in the order specified by a vector of - # original observation indices. If the vector contains duplicates - # or results in a crossing, an exception will be thrown. Passing - # None orders leaf nodes based on the order they appear in the - # pre-order traversal. - Z = np.asarray(Z, order='c') - - is_valid_linkage(Z, throw=True, name='Z') - Zs = Z.shape - n = Zs[0] + 1 - if type(p) in (types.IntType, types.FloatType): - p = int(p) - else: - raise TypeError('The second argument must be a number') - - if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None): - raise ValueError('Invalid truncation mode.') - - if truncate_mode == 'lastp' or truncate_mode == 'mlab': - if p > n or p == 0: - p = n - - if truncate_mode == 'mtica' or truncate_mode == 'level': - if p <= 0: - p = np.inf - if get_leaves: - lvs = [] - else: - lvs = None - icoord_list = [] - dcoord_list = [] - color_list = [] - current_color = [0] - currently_below_threshold = [False] - if no_leaves: - ivl = None - else: - ivl = [] - if color_threshold is None or \ - (type(color_threshold) == types.StringType and - color_threshold == 'default'): - color_threshold = max(Z[:, 2]) * 0.7 - R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, - 'leaves': lvs, 'color_list': color_list} - props = {'cbt': False, 'cc': 0} - if show_contracted: - contraction_marks = [] - else: - contraction_marks = None - _dendrogram_calculate_info( - Z=Z, p=p, - truncate_mode=truncate_mode, - color_threshold=color_threshold, - get_leaves=get_leaves, - orientation=orientation, - labels=labels, - count_sort=count_sort, - distance_sort=distance_sort, - show_leaf_counts=show_leaf_counts, - i=2 * n - 2, iv=0.0, ivl=ivl, n=n, - icoord_list=icoord_list, - dcoord_list=dcoord_list, lvs=lvs, - current_color=current_color, - color_list=color_list, - currently_below_threshold=currently_below_threshold, - leaf_label_func=leaf_label_func, - contraction_marks=contraction_marks, - link_color_func=link_color_func) - if not no_plot: - mh = max(Z[:, 2]) - _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, - no_labels, color_list, leaf_font_size=leaf_font_size, - leaf_rotation=leaf_rotation, - contraction_marks=contraction_marks) - - return R - - -def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, - i, labels): - # If the leaf id structure is not None and is a list then the caller - # to dendrogram has indicated that cluster id's corresponding to the - # leaf nodes should be recorded. - - if lvs is not None: - lvs.append(int(i)) - - # If leaf node labels are to be displayed... - if ivl is not None: - # If a leaf_label_func has been provided, the label comes from the - # string returned from the leaf_label_func, which is a function - # passed to dendrogram. - if leaf_label_func: - ivl.append(leaf_label_func(int(i))) - else: - # Otherwise, if the dendrogram caller has passed a labels list - # for the leaf nodes, use it. - if labels is not None: - ivl.append(labels[int(i - n)]) - else: - # Otherwise, use the id as the label for the leaf.x - ivl.append(str(int(i))) - - -def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, - i, labels, show_leaf_counts): - # If the leaf id structure is not None and is a list then the caller - # to dendrogram has indicated that cluster id's corresponding to the - # leaf nodes should be recorded. - - if lvs is not None: - lvs.append(int(i)) - if ivl is not None: - if leaf_label_func: - ivl.append(leaf_label_func(int(i))) - else: - if show_leaf_counts: - ivl.append("(" + str(int(Z[i - n, 3])) + ")") - else: - ivl.append("") - - -def _append_contraction_marks(Z, iv, i, n, contraction_marks): - _append_contraction_marks_sub(Z, iv, Z[i - n, 0], n, contraction_marks) - _append_contraction_marks_sub(Z, iv, Z[i - n, 1], n, contraction_marks) - - -def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks): - if i >= n: - contraction_marks.append((iv, Z[i - n, 2])) - _append_contraction_marks_sub(Z, iv, Z[i - n, 0], n, contraction_marks) - _append_contraction_marks_sub(Z, iv, Z[i - n, 1], n, contraction_marks) - - -def _dendrogram_calculate_info(Z, p, truncate_mode, \ - color_threshold=np.inf, get_leaves=True, \ - orientation='top', labels=None, \ - count_sort=False, distance_sort=False, \ - show_leaf_counts=False, i=-1, iv=0.0, \ - ivl=[], n=0, icoord_list=[], dcoord_list=[], \ - lvs=None, mhr=False, \ - current_color=[], color_list=[], \ - currently_below_threshold=[], \ - leaf_label_func=None, level=0, - contraction_marks=None, - link_color_func=None): - """ - Calculates the endpoints of the links as well as the labels for the - the dendrogram rooted at the node with index i. iv is the independent - variable value to plot the left-most leaf node below the root node i - (if orientation='top', this would be the left-most x value where the - plotting of this root node i and its descendents should begin). - - ivl is a list to store the labels of the leaf nodes. The leaf_label_func - is called whenever ivl != None, labels == None, and - leaf_label_func != None. When ivl != None and labels != None, the - labels list is used only for labeling the the leaf nodes. When - ivl == None, no labels are generated for leaf nodes. - - When get_leaves==True, a list of leaves is built as they are visited - in the dendrogram. - - Returns a tuple with l being the independent variable coordinate that - corresponds to the midpoint of cluster to the left of cluster i if - i is non-singleton, otherwise the independent coordinate of the leaf - node if i is a leaf node. - - Returns - ------- - A tuple (left, w, h, md), where: - - * left is the independent variable coordinate of the center of the - the U of the subtree - - * w is the amount of space used for the subtree (in independent - variable units) - - * h is the height of the subtree in dependent variable units - - * md is the max(Z[*,2]) for all nodes * below and including - the target node. - - """ - if n == 0: - raise ValueError("Invalid singleton cluster count n.") - - if i == -1: - raise ValueError("Invalid root cluster index i.") - - if truncate_mode == 'lastp': - # If the node is a leaf node but corresponds to a non-single cluster, - # it's label is either the empty string or the number of original - # observations belonging to cluster i. - if i < 2 * n - p and i >= n: - d = Z[i - n, 2] - _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, - leaf_label_func, i, labels, - show_leaf_counts) - if contraction_marks is not None: - _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks) - return (iv + 5.0, 10.0, 0.0, d) - elif i < n: - _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, - leaf_label_func, i, labels) - return (iv + 5.0, 10.0, 0.0, 0.0) - elif truncate_mode in ('mtica', 'level'): - if i > n and level > p: - d = Z[i - n, 2] - _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, - leaf_label_func, i, labels, - show_leaf_counts) - if contraction_marks is not None: - _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks) - return (iv + 5.0, 10.0, 0.0, d) - elif i < n: - _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, - leaf_label_func, i, labels) - return (iv + 5.0, 10.0, 0.0, 0.0) - elif truncate_mode in ('mlab',): - pass - - # Otherwise, only truncate if we have a leaf node. - # - # If the truncate_mode is mlab, the linkage has been modified - # with the truncated tree. - # - # Only place leaves if they correspond to original observations. - if i < n: - _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, - leaf_label_func, i, labels) - return (iv + 5.0, 10.0, 0.0, 0.0) - - # !!! Otherwise, we don't have a leaf node, so work on plotting a - # non-leaf node. - # Actual indices of a and b - aa = Z[i - n, 0] - ab = Z[i - n, 1] - if aa > n: - # The number of singletons below cluster a - na = Z[aa - n, 3] - # The distance between a's two direct children. - da = Z[aa - n, 2] - else: - na = 1 - da = 0.0 - if ab > n: - nb = Z[ab - n, 3] - db = Z[ab - n, 2] - else: - nb = 1 - db = 0.0 - - if count_sort == 'ascending' or count_sort == True: - # If a has a count greater than b, it and its descendents should - # be drawn to the right. Otherwise, to the left. - if na > nb: - # The cluster index to draw to the left (ua) will be ab - # and the one to draw to the right (ub) will be aa - ua = ab - ub = aa - else: - ua = aa - ub = ab - elif count_sort == 'descending': - # If a has a count less than or equal to b, it and its - # descendents should be drawn to the left. Otherwise, to - # the right. - if na > nb: - ua = aa - ub = ab - else: - ua = ab - ub = aa - elif distance_sort == 'ascending' or distance_sort == True: - # If a has a distance greater than b, it and its descendents should - # be drawn to the right. Otherwise, to the left. - if da > db: - ua = ab - ub = aa - else: - ua = aa - ub = ab - elif distance_sort == 'descending': - # If a has a distance less than or equal to b, it and its - # descendents should be drawn to the left. Otherwise, to - # the right. - if da > db: - ua = aa - ub = ab - else: - ua = ab - ub = aa - else: - ua = aa - ub = ab - - # The distance of the cluster to draw to the left (ua) is uad - # and its count is uan. Likewise, the cluster to draw to the - # right has distance ubd and count ubn. - if ua < n: - uad = 0.0 - uan = 1 - else: - uad = Z[ua - n, 2] - uan = Z[ua - n, 3] - if ub < n: - ubd = 0.0 - ubn = 1 - else: - ubd = Z[ub - n, 2] - ubn = Z[ub - n, 3] - - # Updated iv variable and the amount of space used. - (uiva, uwa, uah, uamd) = \ - _dendrogram_calculate_info( - Z=Z, p=p, - truncate_mode=truncate_mode, - color_threshold=color_threshold, - get_leaves=get_leaves, - orientation=orientation, - labels=labels, - count_sort=count_sort, - distance_sort=distance_sort, - show_leaf_counts=show_leaf_counts, - i=ua, iv=iv, ivl=ivl, n=n, - icoord_list=icoord_list, - dcoord_list=dcoord_list, lvs=lvs, - current_color=current_color, - color_list=color_list, - currently_below_threshold=currently_below_threshold, - leaf_label_func=leaf_label_func, - level=level + 1, contraction_marks=contraction_marks, - link_color_func=link_color_func) - - h = Z[i - n, 2] - if h >= color_threshold or color_threshold <= 0: - c = 'b' - - if currently_below_threshold[0]: - current_color[0] = (current_color[0] + 1) % len(_link_line_colors) - currently_below_threshold[0] = False - else: - currently_below_threshold[0] = True - c = _link_line_colors[current_color[0]] - - (uivb, uwb, ubh, ubmd) = \ - _dendrogram_calculate_info( - Z=Z, p=p, - truncate_mode=truncate_mode, - color_threshold=color_threshold, - get_leaves=get_leaves, - orientation=orientation, - labels=labels, - count_sort=count_sort, - distance_sort=distance_sort, - show_leaf_counts=show_leaf_counts, - i=ub, iv=iv + uwa, ivl=ivl, n=n, - icoord_list=icoord_list, - dcoord_list=dcoord_list, lvs=lvs, - current_color=current_color, - color_list=color_list, - currently_below_threshold=currently_below_threshold, - leaf_label_func=leaf_label_func, - level=level + 1, contraction_marks=contraction_marks, - link_color_func=link_color_func) - - # The height of clusters a and b - ah = uad - bh = ubd - - max_dist = max(uamd, ubmd, h) - - icoord_list.append([uiva, uiva, uivb, uivb]) - dcoord_list.append([uah, h, h, ubh]) - if link_color_func is not None: - v = link_color_func(int(i)) - if type(v) != types.StringType: - raise TypeError("link_color_func must return a matplotlib " - "color string!") - color_list.append(v) - else: - color_list.append(c) - return (((uiva + uivb) / 2), uwa + uwb, h, max_dist) - - -def is_isomorphic(T1, T2): - """ - - Determines if two different cluster assignments ``T1`` and - ``T2`` are equivalent. - - Parameters - ---------- - T1 : ndarray - An assignment of singleton cluster ids to flat cluster ids. - T2 : ndarray - An assignment of singleton cluster ids to flat cluster ids. - - Returns - ------- - b : bool - Whether the flat cluster assignments ``T1`` and ``T2`` are - equivalent. - - """ - T1 = np.asarray(T1, order='c') - T2 = np.asarray(T2, order='c') - - if type(T1) != np.ndarray: - raise TypeError('T1 must be a numpy array.') - if type(T2) != np.ndarray: - raise TypeError('T2 must be a numpy array.') - - T1S = T1.shape - T2S = T2.shape - - if len(T1S) != 1: - raise ValueError('T1 must be one-dimensional.') - if len(T2S) != 1: - raise ValueError('T2 must be one-dimensional.') - if T1S[0] != T2S[0]: - raise ValueError('T1 and T2 must have the same number of elements.') - n = T1S[0] - d = {} - for i in xrange(0, n): - if T1[i] in d.keys(): - if d[T1[i]] != T2[i]: - return False - else: - d[T1[i]] = T2[i] - return True - - -def maxdists(Z): - """ - MD = maxdists(Z) - - Returns the maximum distance between any cluster for each - non-singleton cluster. - - Parameters - ---------- - Z : ndarray - The hierarchical clustering encoded as a matrix. See - ``linkage`` for more information. - - Returns - ------- - MD : ndarray - A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents - the maximum distance between any cluster (including - singletons) below and including the node with index i. More - specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the - set of all node indices below and including node i. - - """ - Z = np.asarray(Z, order='c', dtype=np.double) - is_valid_linkage(Z, throw=True, name='Z') - - n = Z.shape[0] + 1 - MD = np.zeros((n - 1,)) - [Z] = _copy_arrays_if_base_present([Z]) - _hierarchy_wrap.get_max_dist_for_each_cluster_wrap(Z, MD, int(n)) - return MD - - -def maxinconsts(Z, R): - """ - Returns the maximum inconsistency coefficient for each - non-singleton cluster and its descendents. - - Parameters - ---------- - Z : ndarray - The hierarchical clustering encoded as a matrix. See - ``linkage`` for more information. - R : ndarray - The inconsistency matrix. - - Returns - ------- - MI : ndarray - A monotonic ``(n-1)``-sized numpy array of doubles. - - """ - Z = np.asarray(Z, order='c') - R = np.asarray(R, order='c') - is_valid_linkage(Z, throw=True, name='Z') - is_valid_im(R, throw=True, name='R') - - n = Z.shape[0] + 1 - if Z.shape[0] != R.shape[0]: - raise ValueError("The inconsistency matrix and linkage matrix each " - "have a different number of rows.") - MI = np.zeros((n - 1,)) - [Z, R] = _copy_arrays_if_base_present([Z, R]) - _hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MI, int(n), 3) - return MI - - -def maxRstat(Z, R, i): - """ - Returns the maximum statistic for each non-singleton cluster and - its descendents. - - Parameters - ---------- - Z : ndarray - The hierarchical clustering encoded as a matrix. See - ``linkage`` for more information. - R : ndarray - The inconsistency matrix. - i : int - The column of ``R`` to use as the statistic. - - Returns - ------- - MR : ndarray - Calculates the maximum statistic for the i'th column of the - inconsistency matrix ``R`` for each non-singleton cluster - node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where - ``Q(j)`` the set of all node ids corresponding to nodes below - and including ``j``. - - """ - Z = np.asarray(Z, order='c') - R = np.asarray(R, order='c') - is_valid_linkage(Z, throw=True, name='Z') - is_valid_im(R, throw=True, name='R') - if type(i) is not types.IntType: - raise TypeError('The third argument must be an integer.') - if i < 0 or i > 3: - raise ValueError('i must be an integer between 0 and 3 inclusive.') - - if Z.shape[0] != R.shape[0]: - raise ValueError("The inconsistency matrix and linkage matrix each " - "have a different number of rows.") - - n = Z.shape[0] + 1 - MR = np.zeros((n - 1,)) - [Z, R] = _copy_arrays_if_base_present([Z, R]) - _hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MR, int(n), i) - return MR - - -def leaders(Z, T): - """ - (L, M) = leaders(Z, T): - - Returns the root nodes in a hierarchical clustering corresponding - to a cut defined by a flat cluster assignment vector ``T``. See - the ``fcluster`` function for more information on the format of ``T``. - - For each flat cluster :math:`j` of the :math:`k` flat clusters - represented in the n-sized flat cluster assignment vector ``T``, - this function finds the lowest cluster node :math:`i` in the linkage - tree Z such that: - - * leaf descendents belong only to flat cluster j - (i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where - :math:`S(i)` is the set of leaf ids of leaf nodes descendent - with cluster node :math:`i`) - - * there does not exist a leaf that is not descendent with - :math:`i` that also belongs to cluster :math:`j` - (i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If - this condition is violated, ``T`` is not a valid cluster - assignment vector, and an exception will be thrown. - - Parameters - ---------- - Z : ndarray - The hierarchical clustering encoded as a matrix. See - ``linkage`` for more information. - T : ndarray - The flat cluster assignment vector. - - Returns - ------- - A tuple (L, M) with - - L : ndarray - The leader linkage node id's stored as a k-element 1D - array where :math:`k` is the number of flat clusters found - in ``T``. - - ``L[j]=i`` is the linkage cluster node id that is the - leader of flat cluster with id M[j]. If ``i < n``, ``i`` - corresponds to an original observation, otherwise it - corresponds to a non-singleton cluster. - - For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with - id 8's leader is linkage node 2. - - M : ndarray - The leader linkage node id's stored as a k-element 1D - array where :math:`k` is the number of flat clusters found - in ``T``. This allows the set of flat cluster ids to be - any arbitrary set of :math:`k` integers. - - """ - Z = np.asarray(Z, order='c') - T = np.asarray(T, order='c') - if type(T) != np.ndarray or T.dtype != 'i': - raise TypeError('T must be a one-dimensional numpy array of integers.') - is_valid_linkage(Z, throw=True, name='Z') - if len(T) != Z.shape[0] + 1: - raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.') - - Cl = np.unique(T) - kk = len(Cl) - L = np.zeros((kk,), dtype='i') - M = np.zeros((kk,), dtype='i') - n = Z.shape[0] + 1 - [Z, T] = _copy_arrays_if_base_present([Z, T]) - s = _hierarchy_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n)) - if s >= 0: - raise ValueError(('T is not a valid assignment vector. Error found ' - 'when examining linkage node %d (< 2n-1).') % s) - return (L, M) - - -# These are test functions to help me test the leaders function. - -def _leaders_test(Z, T): - tr = to_tree(Z) - _leaders_test_recurs_mark(tr, T) - return tr - - -def _leader_identify(tr, T): - if tr.is_leaf(): - return T[tr.id] - else: - left = tr.get_left() - right = tr.get_right() - lfid = _leader_identify(left, T) - rfid = _leader_identify(right, T) - print 'ndid: %d lid: %d lfid: %d rid: %d rfid: %d' \ - % (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid) - if lfid != rfid: - if lfid != -1: - print 'leader: %d with tag %d' % (left.id, lfid) - if rfid != -1: - print 'leader: %d with tag %d' % (right.id, rfid) - return -1 - else: - return lfid - - -def _leaders_test_recurs_mark(tr, T): - if tr.is_leaf(): - tr.asgn = T[tr.id] - else: - tr.asgn = -1 - _leaders_test_recurs_mark(tr.left, T) - _leaders_test_recurs_mark(tr.right, T) diff --git a/scipy-0.10.1/scipy/cluster/setup.py b/scipy-0.10.1/scipy/cluster/setup.py deleted file mode 100755 index 715bbac5de..0000000000 --- a/scipy-0.10.1/scipy/cluster/setup.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -import sys - -from os.path import join - -if sys.version_info[0] >= 3: - DEFINE_MACROS = [("SCIPY_PY3K", None)] -else: - DEFINE_MACROS = [] - -def configuration(parent_package = '', top_path = None): - from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs - config = Configuration('cluster', parent_package, top_path) - - config.add_data_dir('tests') - - config.add_extension('_vq', - sources=[join('src', 'vq_module.c'), join('src', 'vq.c')], - include_dirs = [get_numpy_include_dirs()], - define_macros=DEFINE_MACROS) - - config.add_extension('_hierarchy_wrap', - sources=[join('src', 'hierarchy_wrap.c'), join('src', 'hierarchy.c')], - include_dirs = [get_numpy_include_dirs()], - define_macros=DEFINE_MACROS) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(maintainer = "SciPy Developers", - author = "Eric Jones", - maintainer_email = "scipy-dev@scipy.org", - description = "Clustering Algorithms (Information Theory)", - url = "http://www.scipy.org", - license = "SciPy License (BSD Style)", - **configuration(top_path='').todict() - ) diff --git a/scipy-0.10.1/scipy/cluster/setupscons.py b/scipy-0.10.1/scipy/cluster/setupscons.py deleted file mode 100755 index 069449d2a1..0000000000 --- a/scipy-0.10.1/scipy/cluster/setupscons.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package = '', top_path = None): - from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs - config = Configuration('cluster', parent_package, top_path) - - config.add_data_dir('tests') - - #config.add_extension('_vq', - # sources=[join('src', 'vq_module.c'), join('src', 'vq.c')], - # include_dirs = [get_numpy_include_dirs()]) - config.add_sconscript('SConstruct') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(maintainer = "SciPy Developers", - author = "Eric Jones", - maintainer_email = "scipy-dev@scipy.org", - description = "Clustering Algorithms (Information Theory)", - url = "http://www.scipy.org", - license = "SciPy License (BSD Style)", - **configuration(top_path='').todict() - ) diff --git a/scipy-0.10.1/scipy/cluster/src/common.h b/scipy-0.10.1/scipy/cluster/src/common.h deleted file mode 100644 index 3d12a7c99f..0000000000 --- a/scipy-0.10.1/scipy/cluster/src/common.h +++ /dev/null @@ -1,69 +0,0 @@ -/** - * common.h - * - * Author: Damian Eads - * Date: September 22, 2007 (moved into new file on June 8, 2008) - * - * Copyright (c) 2007, 2008, Damian Eads. All rights reserved. - * Adapted for incorporation into Scipy, April 9, 2008. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the - * following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - Neither the name of the author nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _CLUSTER_COMMON_H -#define _CLUSTER_COMMON_H - -#define CPY_MAX(_x, _y) ((_x > _y) ? (_x) : (_y)) -#define CPY_MIN(_x, _y) ((_x < _y) ? (_x) : (_y)) - -#define NCHOOSE2(_n) ((_n)*(_n-1)/2) - -#define CPY_BITS_PER_CHAR (sizeof(unsigned char) * 8) -#define CPY_FLAG_ARRAY_SIZE_BYTES(num_bits) (CPY_CEIL_DIV((num_bits), \ - CPY_BITS_PER_CHAR)) -#define CPY_GET_BIT(_xx, i) (((_xx)[(i) / CPY_BITS_PER_CHAR] >> \ - ((CPY_BITS_PER_CHAR-1) - \ - ((i) % CPY_BITS_PER_CHAR))) & 0x1) -#define CPY_SET_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] |= \ - ((0x1) << ((CPY_BITS_PER_CHAR-1) \ - -((i) % CPY_BITS_PER_CHAR)))) -#define CPY_CLEAR_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] &= \ - ~((0x1) << ((CPY_BITS_PER_CHAR-1) \ - -((i) % CPY_BITS_PER_CHAR)))) - -#ifndef CPY_CEIL_DIV -#define CPY_CEIL_DIV(x, y) ((((double)x)/(double)y) == \ - ((double)((x)/(y))) ? ((x)/(y)) : ((x)/(y) + 1)) -#endif - -#ifdef CPY_DEBUG -#define CPY_DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__) -#else -#define CPY_DEBUG_MSG(...) -#endif - -#endif diff --git a/scipy-0.10.1/scipy/cluster/src/hierarchy.c b/scipy-0.10.1/scipy/cluster/src/hierarchy.c deleted file mode 100644 index 7351103772..0000000000 --- a/scipy-0.10.1/scipy/cluster/src/hierarchy.c +++ /dev/null @@ -1,1623 +0,0 @@ -/** - * hierarchy.c - * - * Author: Damian Eads - * Date: September 22, 2007 - * - * Copyright (c) 2007, 2008, Damian Eads. All rights reserved. - * Adapted for incorporation into Scipy, April 9, 2008. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the - * following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - Neither the name of the author nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include - -#include "common.h" - -#define ISCLUSTER(_nd) ((_nd)->id >= n) -#define GETCLUSTER(_id) ((lists + _id - n)) - -/** The number of link stats (for the inconsistency computation) for each - cluster. */ - -#define CPY_NIS 4 - -/** The column offsets for the different link stats for the inconsistency - computation. */ -#define CPY_INS_MEAN 0 -#define CPY_INS_STD 1 -#define CPY_INS_N 2 -#define CPY_INS_INS 3 - -/** The number of linkage stats for each cluster. */ -#define CPY_LIS 4 - -/** The column offsets for the different link stats for the linkage matrix. */ -#define CPY_LIN_LEFT 0 -#define CPY_LIN_RIGHT 1 -#define CPY_LIN_DIST 2 -#define CPY_LIN_CNT 3 - -#include -#include -#include -#include - -#include "hierarchy.h" - - -static NPY_INLINE double euclidean_distance(const double *u, const double *v, int n) { - int i = 0; - double s = 0.0, d; - for (i = 0; i < n; i++) { - d = u[i] - v[i]; - s = s + d * d; - } - return sqrt(s); -} - -void chopmins(int *ind, int mini, int minj, int np) { - int i; - for (i = mini; i < minj - 1; i++) { - ind[i] = ind[i + 1]; - } - for (i = minj - 1; i < np - 2; i++) { - ind[i] = ind[i + 2]; - } - /** CPY_DEBUG_MSG("[Remove mini=%d minj=%d]\n", mini, minj);**/ -} - -void chopmin(int *ind, int minj, int np) { - int i; - for (i = minj; i < np - 1; i++) { - ind[i] = ind[i + 1]; - } - /** CPY_DEBUG_MSG("[Remove mini=%d minj=%d]\n", mini, minj);**/ -} - -void chopmins_ns_ij(double *ind, int mini, int minj, int np) { - int i; - for (i = mini; i < minj - 1; i++) { - ind[i] = ind[i + 1]; - } - for (i = minj - 1; i < np - 2; i++) { - ind[i] = ind[i + 2]; - } -} - -void chopmins_ns_i(double *ind, int mini, int np) { - int i; - for (i = mini; i < np - 1; i++) { - ind[i] = ind[i + 1]; - } -} - -void dist_single(cinfo *info, int mini, int minj, int np, int n) { - double **rows = info->rows; - double *buf = info->buf; - double *bit; - int i; - bit = buf; - for (i = 0; i < mini; i++, bit++) { - *bit = CPY_MIN(*(rows[i] + mini - i - 1), *(rows[i] + minj - i - 1)); - } - for (i = mini + 1; i < minj; i++, bit++) { - *bit = CPY_MIN(*(rows[mini] + i - mini - 1), *(rows[i] + minj - i - 1)); - } - for (i = minj + 1; i < np; i++, bit++) { - *bit = CPY_MIN(*(rows[mini] + i - mini - 1), *(rows[minj] + i - minj - 1)); - } -} - -void dist_complete(cinfo *info, int mini, int minj, int np, int n) { - double **rows = info->rows; - double *buf = info->buf; - double *bit; - int i; - bit = buf; - for (i = 0; i < mini; i++, bit++) { - *bit = CPY_MAX(*(rows[i] + mini - i - 1), *(rows[i] + minj - i - 1)); - } - for (i = mini + 1; i < minj; i++, bit++) { - *bit = CPY_MAX(*(rows[mini] + i - mini - 1), *(rows[i] + minj - i - 1)); - } - for (i = minj + 1; i < np; i++, bit++) { - *bit = CPY_MAX(*(rows[mini] + i - mini - 1), *(rows[minj] + i - minj - 1)); - } -} - -void dist_average(cinfo *info, int mini, int minj, int np, int n) { - double **rows = info->rows, *buf = info->buf, *bit; - int *inds = info->ind; - double drx, dsx, mply, rscnt, rc, sc; - int i, xi, xn; - cnode *rn = info->nodes + inds[mini]; - cnode *sn = info->nodes + inds[minj]; - cnode *xnd; - bit = buf; - rc = (double)rn->n; - sc = (double)sn->n; - rscnt = rc + sc; - - for (i = 0; i < mini; i++, bit++) { - /** d(r,x) **/ - drx = *(rows[i] + mini - i - 1); - dsx = *(rows[i] + minj - i - 1); - xi = inds[i]; - xnd = info->nodes + xi; - xn = xnd->n; - mply = (double)1.0 / (((double)xn) * rscnt); - *bit = mply * ((drx * (rc * xn)) + (dsx * (sc * xn))); - } - for (i = mini + 1; i < minj; i++, bit++) { - drx = *(rows[mini] + i - mini - 1); - dsx = *(rows[i] + minj - i - 1); - xi = inds[i]; - xnd = info->nodes + xi; - xn = xnd->n; - mply = (double)1.0 / (((double)xn) * rscnt); - *bit = mply * ((drx * (rc * xn)) + (dsx * (sc * xn))); - } - for (i = minj + 1; i < np; i++, bit++) { - drx = *(rows[mini] + i - mini - 1); - dsx = *(rows[minj] + i - minj - 1); - xi = inds[i]; - xnd = info->nodes + xi; - xn = xnd->n; - mply = (double)1.0 / (((double)xn) * rscnt); - *bit = mply * ((drx * (rc * xn)) + (dsx * (sc * xn))); - } -} - -void dist_centroid(cinfo *info, int mini, int minj, int np, int n) { - double *buf = info->buf, *bit; - int *inds = info->ind; - const double *centroid_tq; - int i, m, xi; - centroid_tq = info->centroids[info->nid]; - bit = buf; - m = info->m; - for (i = 0; i < np; i++, bit++) { - /** d(r,x) **/ - if (i == mini || i == minj) { - bit--; - continue; - } - xi = inds[i]; - *bit = euclidean_distance(info->centroids[xi], centroid_tq, m); - /** CPY_DEBUG_MSG("%5.5f ", *bit);**/ - } - /** CPY_DEBUG_MSG("\n");**/ -} - -void combine_centroids(double *centroidResult, - const double *centroidA, const double *centroidB, - double na, double nb, int n) { - int i; - double nr = (double)na + (double)nb; - for (i = 0; i < n; i++) { - centroidResult[i] = ((centroidA[i] * na) + (centroidB[i] * nb)) / nr; - } -} - -void dist_weighted(cinfo *info, int mini, int minj, int np, int n) { - double **rows = info->rows, *buf = info->buf, *bit; - int i; - double drx, dsx; - - bit = buf; - - for (i = 0; i < mini; i++, bit++) { - /** d(r,x) **/ - drx = *(rows[i] + mini - i - 1); - dsx = *(rows[i] + minj - i - 1); - *bit = (drx + dsx) / 2; - } - for (i = mini + 1; i < minj; i++, bit++) { - drx = *(rows[mini] + i - mini - 1); - dsx = *(rows[i] + minj - i - 1); - *bit = (drx + dsx) / 2; - } - for (i = minj + 1; i < np; i++, bit++) { - drx = *(rows[mini] + i - mini - 1); - dsx = *(rows[minj] + i - minj - 1); - *bit = (drx + dsx) / 2; - } - /** CPY_DEBUG_MSG("\n");**/ -} - -void dist_ward(cinfo *info, int mini, int minj, int np, int n) { - double **rows = info->rows, *buf = info->buf, *bit; - int *inds = info->ind; - const double *centroid_tq; - int i, m, xi, rind, sind; - double drx, dsx, rf, sf, xf, xn, rn, sn, drsSq; - cnode *newNode; - cnode *xnd; - - rind = inds[mini]; - sind = inds[minj]; - rn = (double)info->nodes[rind].n; - sn = (double)info->nodes[sind].n; - newNode = info->nodes + info->nid; - drsSq = newNode->d; - drsSq = drsSq * drsSq; - centroid_tq = info->centroids[info->nid]; - bit = buf; - m = info->m; - - for (i = 0; i < mini; i++, bit++) { - /** d(r,x) **/ - drx = *(rows[i] + mini - i - 1); - dsx = *(rows[i] + minj - i - 1); - xi = inds[i]; - xnd = info->nodes + xi; - xn = xnd->n; - rf = (rn + xn) / (rn + sn + xn); - sf = (sn + xn) / (rn + sn + xn); - xf = -xn / (rn + sn + xn); - *bit = sqrt(rf * (drx * drx) + - sf * (dsx * dsx) + - xf * drsSq); - - } - for (i = mini + 1; i < minj; i++, bit++) { - drx = *(rows[mini] + i - mini - 1); - dsx = *(rows[i] + minj - i - 1); - xi = inds[i]; - xnd = info->nodes + xi; - xn = xnd->n; - rf = (rn + xn) / (rn + sn + xn); - sf = (sn + xn) / (rn + sn + xn); - xf = -xn / (rn + sn + xn); - *bit = sqrt(rf * (drx * drx) + - sf * (dsx * dsx) + - xf * drsSq); - } - for (i = minj + 1; i < np; i++, bit++) { - drx = *(rows[mini] + i - mini - 1); - dsx = *(rows[minj] + i - minj - 1); - xi = inds[i]; - xnd = info->nodes + xi; - xn = xnd->n; - rf = (rn + xn) / (rn + sn + xn); - sf = (sn + xn) / (rn + sn + xn); - xf = -xn / (rn + sn + xn); - *bit = sqrt(rf * (drx * drx) + - sf * (dsx * dsx) + - xf * drsSq); - } - /** CPY_DEBUG_MSG("\n");**/ -} - - -void print_dm(const double **rows, int np) { - int i, j, k; - const double *row; - CPY_DEBUG_MSG("[DM, np=%d\n", np); - for (i = 0; i < np - 1; i++) { - row = rows[i]; - for (j = 0; j <= i; j++) { - CPY_DEBUG_MSG("%5.5f ", 0.0); - } - - for (k = 0, j = i + 1; j < np; j++, k++) { - CPY_DEBUG_MSG("%5.5f ", *(row + k)); - } - CPY_DEBUG_MSG("|j=%d|\n", i + 1); - } -} - -void print_ind(const int *inds, int np) { - int i; - CPY_DEBUG_MSG("[IND, np=%d || ", np); - for (i = 0; i < np; i++) { - CPY_DEBUG_MSG("%d ", inds[i]); - } - CPY_DEBUG_MSG("]\n"); -} - -void print_vec(const double *d, int n) { - int i; - CPY_DEBUG_MSG("["); - for (i = 0; i < n; i++) { - CPY_DEBUG_MSG("%5.5f ", d[i]); - } - CPY_DEBUG_MSG("]"); -} - - -/** - * notes to self: - * dm: The distance matrix. - * Z: The result of the linkage, a (n-1) x 3 matrix. - * X: The original observations as row vectors (=NULL if not needed). - * n: The number of objects. - * ml: A boolean indicating whether a list of objects in the forest - * clusters should be maintained. - * kc: Keep track of the centroids. - * - * Return values: - * 0: success - * -1: out of memory--malloc() failed. - */ -int linkage(double *dm, double *Z, double *X, - int m, int n, int ml, int kc, distfunc dfunc, - int method) { - int i, j, k, t, np, nid, mini, minj, npc2; - double min, ln, rn, qn; - int *ind = NULL; - /** An iterator through the distance matrix. */ - double *dmit, *buf = NULL; - - int *rowsize = NULL; - - /** Temporary array to store modified distance matrix. */ - double *dmt = NULL, **rows = NULL, *Zrow; - double *centroidsData = NULL; - double **centroids = NULL; - const double *centroidL, *centroidR; - double *centroid; - clist *lists = NULL, *listL, *listR, *listC; - clnode *lnodes = NULL; - cnode *nodes = NULL, *node; - - cinfo info; - - int result = -1; - - /** The next two are only necessary for euclidean distance methods. */ - if (ml) { - lists = (clist*)malloc(sizeof(clist) * (n-1)); - if (!lists) goto finished; - lnodes = (clnode*)malloc(sizeof(clnode) * n); - if (!lnodes) goto finished; - } - else { - lists = 0; - lnodes = 0; - } - if (kc) { - centroids = (double**)malloc(sizeof(double*) * (2 * n)); - if (!centroids) goto finished; - centroidsData = (double*)malloc(sizeof(double) * n * m); - if (!centroidsData) goto finished; - for (i = 0; i < n; i++) { - centroids[i] = X + i * m; - } - for (i = 0; i < n; i++) { - centroids[i+n] = centroidsData + i * m; - } - } - else { - centroids = 0; - centroidsData = 0; - } - - nodes = (cnode*)malloc(sizeof(cnode) * (n * 2) - 1); - if (!nodes) goto finished; - ind = (int*)malloc(sizeof(int) * n); - if (!ind) goto finished; - dmt = (double*)malloc(sizeof(double) * NCHOOSE2(n)); - if (!dmt) goto finished; - buf = (double*)malloc(sizeof(double) * n); - if (!buf) goto finished; - rows = (double**)malloc(sizeof(double*) * n); - if (!rows) goto finished; - rowsize = (int*)malloc(sizeof(int) * n); - if (!rowsize) goto finished; - memcpy(dmt, dm, sizeof(double) * NCHOOSE2(n)); - - info.X = X; - info.m = m; - info.n = n; - info.nodes = nodes; - info.ind = ind; - info.dmt = dmt; - info.buf = buf; - info.rows = rows; - info.rowsize = rowsize; - info.dm = dm; - info.centroids = centroids; - if (kc) { - info.centroidBuffer = centroids[2*n - 1]; - } - else { - info.centroidBuffer = 0; - } - info.lists = lists; - for (i = 0; i < n; i++) { - ind[i] = i; - node = nodes + i; - node->left = 0; - node->right = 0; - node->id = i; - node->n = 1; - node->d = 0.0; - rowsize[i] = n - 1 - i; - } - rows[0] = dmt; - for (i = 1; i < n; i++) { - rows[i] = rows[i-1] + n - i; - } - - if (ml) { - for (i = 0; i < n; i++) { - (lnodes + i)->val = nodes + i; - (lnodes + i)->next = 0; - } - } - - for (k = 0, nid = n; k < n - 1; k++, nid++) { - info.nid = nid; - np = n - k; - npc2 = NCHOOSE2(np); - /** CPY_DEBUG_MSG("k=%d, nid=%d, n=%d np=%d\n", k, nid, n, np);**/ - min = dmt[0]; - mini = 0; - minj = 1; - /** Note that mini < minj since j > i is always true. */ - for (i = 0; i < np - 1; i++) { - dmit = rows[i]; - for (j = i + 1; j < np; j++, dmit++) { - if (*dmit <= min) { - min = *dmit; - mini = i; - minj = j; - } - } - } - - node = nodes + nid; - node->left = nodes + ind[mini]; - node->right = nodes + ind[minj]; - ln = (double)node->left->n; - rn = (double)node->right->n; - qn = ln + rn; - node->n = node->left->n + node->right->n; - node->d = min; - node->id = nid; - - Zrow = Z + (k * CPY_LIS); - Zrow[CPY_LIN_LEFT] = node->left->id; - Zrow[CPY_LIN_RIGHT] = node->right->id; - Zrow[CPY_LIN_DIST] = min; - Zrow[CPY_LIN_CNT] = node->n; - - /** fCPY_DEBUG_MSG(stderr, - "[lid=%d, rid=%d, llid=%d, rrid=%d m=%5.8f]", - node->left->id, node->right->id, ind[mini], ind[minj], min);**/ - - if (ml) { - listC = GETCLUSTER(nid); - if (ISCLUSTER(node->left) != 0) { - listL = GETCLUSTER(node->left->id); - if (ISCLUSTER(node->right) != 0) { - listR = GETCLUSTER(node->right->id); - listL->tail->next = listR->head; - listC->tail = listR->tail; - listR->tail->next = 0; - } - else { - listC->tail = lnodes + node->right->id; - listL->tail->next = listC->tail; - listC->tail->next = 0; - } - listC->head = listL->head; - } - else { - listC->head = lnodes + node->left->id; - if (ISCLUSTER(node->right)) { - listR = GETCLUSTER(node->right->id); - listC->head->next = listR->head; - listC->tail = listR->tail; - listC->tail->next = 0; - } - else { - listC->tail = lnodes + node->right->id; - listC->tail->next = 0; - listC->head->next = listC->tail; - } - } - } - if (kc) { - centroidL = centroids[ind[mini]]; - centroidR = centroids[ind[minj]]; - centroid = centroids[nid]; - switch(method) { - case CPY_LINKAGE_MEDIAN: - for (t = 0; t < m; t++) { - centroid[t] = (centroidL[t] * 0.5 + centroidR[t] * 0.5); - } - break; - case CPY_LINKAGE_CENTROID: - case CPY_LINKAGE_WARD: - default: - for (t = 0; t < m; t++) { - centroid[t] = (centroidL[t] * ln + centroidR[t] * rn) / qn; - } - break; - } - /** CPY_DEBUG_MSG("L: "); - print_vec(centroidL, m); - CPY_DEBUG_MSG("\nR: "); - print_vec(centroidR, m); - CPY_DEBUG_MSG("\nT: "); - print_vec(centroid, m);**/ - } - - /** print_dm(rows, np);**/ - /** dfunc(buf, rows, mini, minj, np, dm, n, ind, nodes);**/ - dfunc(&info, mini, minj, np, n); - - /** For these rows, we must remove, i and j but leave all unused space - at the end. This reduces their size by two.*/ - for (i = 0; i < mini; i++) { - chopmins_ns_ij(rows[i], mini - i - 1, minj - i - 1, rowsize[i]); - } - - /** We skip the i'th row. For rows i+1 up to j-1, we just remove j. */ - for (i = mini + 1; i < minj; i++) { - chopmins_ns_i(rows[i], minj - i - 1, rowsize[i]); - } - - /** For rows 0 to mini - 1, we move them down the matrix, leaving the - first row free. */ - /** for (i = mini; i > 0; i--) { - memcpy(rows[i], rows[i-1], sizeof(double) * rowsize[i]-k); - }**/ - - for (i = mini; i < minj - 1; i++) { - memcpy(rows[i], rows[i+1], sizeof(double) * (rowsize[i+1])); - } - - /** For rows mini+1 to minj-1, we do nothing since they are in the - right place for the next iteration. For rows minj+1 onward, - we move them to the right. */ - - for (i = minj - 1; i < np - 2; i++) { - memcpy(rows[i], rows[i+2], sizeof(double) * (rowsize[i+2])); - } - - /** Rows i+1 to j-1 lose one unit of space, so we move them up. */ - /** Rows j to np-1 lose no space. We do nothing to them. */ - - /** memcpy(rows[0], buf, sizeof(double) * rowsize[0] - k);*/ - - for (i = 0; i < np - 2; i++) { - *(rows[i] + np - 3 - i) = buf[i]; - } - - /** print_dm(rows, np - 1); - print_ind(ind, np);**/ - chopmins(ind, mini, minj, np); - ind[np - 2] = nid; - /** print_ind(ind, np - 1);**/ - } - result = 0; - -finished: - free(lists); - free(lnodes); - free(nodes); - free(ind); - free(dmt); - free(buf); - free(rows); - free(rowsize); - free(centroidsData); - free(centroids); - return result; -} - -/** Trying to reimplement so that output is consistent with MATLAB's in - cases where there are is than one correct choice to make at each - iteration of the algorithm. This implementation is not active. - - XXX linkage_alt is not used. If it ever does get used, it needs to - be updated to check the results of any calls to malloc(). -*/ - -void linkage_alt(double *dm, double *Z, double *X, - int m, int n, int ml, int kc, distfunc dfunc, - int method) { - int i, j, k, t, np, nid, mini, minj, npc2; - double min, ln, rn, qn; - int *ind; - /** An iterator through the distance matrix. */ - double *dmit, *buf; - - int *rowsize; - - /** Temporary array to store modified distance matrix. */ - double *dmt, **rows, *Zrow; - double *centroidsData; - double **centroids; - const double *centroidL, *centroidR; - double *centroid; - clist *lists, *listL, *listR, *listC; - clnode *lnodes; - cnode *nodes, *node; - - cinfo info; - - /** The next two are only necessary for euclidean distance methods. */ - if (ml) { - lists = (clist*)malloc(sizeof(clist) * (n-1)); - lnodes = (clnode*)malloc(sizeof(clnode) * n); - } - else { - lists = 0; - lnodes = 0; - } - if (kc) { - centroids = (double**)malloc(sizeof(double*) * (2 * n)); - centroidsData = (double*)malloc(sizeof(double) * n * m); - for (i = 0; i < n; i++) { - centroids[i] = X + i * m; - } - for (i = 0; i < n; i++) { - centroids[i+n] = centroidsData + i * m; - } - } - else { - centroids = 0; - centroidsData = 0; - } - - nodes = (cnode*)malloc(sizeof(cnode) * (n * 2) - 1); - ind = (int*)malloc(sizeof(int) * n); - dmt = (double*)malloc(sizeof(double) * NCHOOSE2(n)); - buf = (double*)malloc(sizeof(double) * n); - rows = (double**)malloc(sizeof(double*) * n); - rowsize = (int*)malloc(sizeof(int) * n); - memcpy(dmt, dm, sizeof(double) * NCHOOSE2(n)); - - info.X = X; - info.m = m; - info.n = n; - info.nodes = nodes; - info.ind = ind; - info.dmt = dmt; - info.buf = buf; - info.rows = rows; - info.rowsize = rowsize; - info.dm = dm; - info.centroids = centroids; - if (kc) { - info.centroidBuffer = centroids[2*n - 1]; - } - else { - info.centroidBuffer = 0; - } - info.lists = lists; - for (i = 0; i < n; i++) { - ind[i] = i; - node = nodes + i; - node->left = 0; - node->right = 0; - node->id = i; - node->n = 1; - node->d = 0.0; - rowsize[i] = n - 1 - i; - } - rows[0] = dmt; - for (i = 1; i < n; i++) { - rows[i] = rows[i-1] + n - i; - } - - if (ml) { - for (i = 0; i < n; i++) { - (lnodes + i)->val = nodes + i; - (lnodes + i)->next = 0; - } - } - - for (k = 0, nid = n; k < n - 1; k++, nid++) { - info.nid = nid; - np = n - k; - npc2 = NCHOOSE2(np); - /** CPY_DEBUG_MSG("k=%d, nid=%d, n=%d np=%d\n", k, nid, n, np);**/ - min = dmt[0]; - mini = 0; - minj = 1; - /** Note that mini < minj since j > i is always true. */ - /** BEGIN NEW CODE **/ - for (i = 0; i < np - 1; i++) { - dmit = rows[i]; - for (j = i + 1; j < np; j++, dmit++) { - if (*dmit < min) { - min = *dmit; - mini = i; - minj = j; - } - } - } - - node = nodes + nid; - node->left = nodes + ind[mini]; - node->right = nodes + ind[minj]; - ln = (double)node->left->n; - rn = (double)node->right->n; - qn = ln + rn; - node->n = node->left->n + node->right->n; - node->d = min; - node->id = nid; - - Zrow = Z + (k * CPY_LIS); - Zrow[CPY_LIN_LEFT] = node->left->id; - Zrow[CPY_LIN_RIGHT] = node->right->id; - Zrow[CPY_LIN_DIST] = min; - Zrow[CPY_LIN_CNT] = node->n; - - /** fprintf(stderr, - "[lid=%d, rid=%d, llid=%d, rrid=%d m=%5.8f]", - node->left->id, node->right->id, ind[mini], ind[minj], min);**/ - - if (ml) { - listC = GETCLUSTER(nid); - if (ISCLUSTER(node->left) != 0) { - listL = GETCLUSTER(node->left->id); - if (ISCLUSTER(node->right) != 0) { - listR = GETCLUSTER(node->right->id); - listL->tail->next = listR->head; - listC->tail = listR->tail; - listR->tail->next = 0; - } - else { - listC->tail = lnodes + node->right->id; - listL->tail->next = listC->tail; - listC->tail->next = 0; - } - listC->head = listL->head; - } - else { - listC->head = lnodes + node->left->id; - if (ISCLUSTER(node->right)) { - listR = GETCLUSTER(node->right->id); - listC->head->next = listR->head; - listC->tail = listR->tail; - listC->tail->next = 0; - } - else { - listC->tail = lnodes + node->right->id; - listC->tail->next = 0; - listC->head->next = listC->tail; - } - } - } - if (kc) { - centroidL = centroids[ind[mini]]; - centroidR = centroids[ind[minj]]; - centroid = centroids[nid]; - switch(method) { - case CPY_LINKAGE_MEDIAN: - for (t = 0; t < m; t++) { - centroid[t] = (centroidL[t] * 0.5 + centroidR[t] * 0.5); - } - break; - case CPY_LINKAGE_CENTROID: - case CPY_LINKAGE_WARD: - default: - for (t = 0; t < m; t++) { - centroid[t] = (centroidL[t] * ln + centroidR[t] * rn) / qn; - } - break; - } - /** CPY_DEBUG_MSG("L: "); - print_vec(centroidL, m); - CPY_DEBUG_MSG("\nR: "); - print_vec(centroidR, m); - CPY_DEBUG_MSG("\nT: "); - print_vec(centroid, m);**/ - } - - /** print_dm(rows, np);**/ - /** dfunc(buf, rows, mini, minj, np, dm, n, ind, nodes);**/ - dfunc(&info, mini, minj, np, n); - - /** For these rows, we must remove, i and j but leave all unused space - at the end. This reduces their size by two.*/ - for (i = 0; i < minj; i++) { - chopmins_ns_i(rows[i], minj - i - 1, rowsize[i]); - } - - /** We skip the i'th row. For rows i+1 up to j-1, we just remove j. */ - /**for (i = mini + 1; i < minj; i++) { - chopmins_ns_i(rows[i], minj - i - 1, rowsize[i]); - }**/ - - /** For rows 0 to mini - 1, we move them down the matrix, leaving the - first row free. */ - /**for (i = mini; i > 0; i--) { - memcpy(rows[i], rows[i-1], sizeof(double) * rowsize[i]-k); - } - - for (i = mini; i < minj - 1; i++) { - memcpy(rows[i], rows[i+1], sizeof(double) * (rowsize[i+1])); - }**/ - - /** For rows mini+1 to minj-1, we do nothing since they are in the - right place for the next iteration. For rows minj+1 onward, - we move them to the right. */ - - for (i = minj; i < np - 1; i++) { - memcpy(rows[i], rows[i+1], sizeof(double) * (rowsize[i+1])); - } - - /** Rows i+1 to j-1 lose one unit of space, so we move them up. */ - /** Rows j to np-1 lose no space. We do nothing to them. */ - /** memcpy(rows[0], buf, sizeof(double) * rowsize[0] - k);*/ - - for (i = 0; i < mini; i++) { - *(rows[i] + mini - i - 1) = buf[i]; - } - - for (i = mini + 1; i < np - 2; i++) { - *(rows[mini] + i - mini - 1) = buf[i-1]; - } - - /** print_dm(rows, np - 1); - print_ind(ind, np);**/ - chopmin(ind, minj, np); - ind[mini] = nid; - /** print_ind(ind, np - 1);**/ - } - free(lists); - free(lnodes); - free(nodes); - free(ind); - free(dmt); - free(buf); - free(rows); - free(rowsize); - free(centroidsData); - free(centroids); -} - -void cpy_to_tree(const double *Z, cnode **tnodes, int n) { - const double *row; - cnode *node; - cnode *nodes; - int i; - nodes = (cnode*)malloc(sizeof(cnode) * (n * 2) - 1); - *tnodes = nodes; - for (i = 0; i < n; i++) { - node = nodes + i; - node->left = 0; - node->right = 0; - node->id = i; - node->n = 1; - node->d = 0.0; - } - for (i = 0; i < n - 1; i++) { - node = nodes + i + n; - row = Z + (i * CPY_LIS); - node->id = i + n; - node->left = nodes + (int)row[CPY_LIN_LEFT]; - node->right = nodes + (int)row[CPY_LIN_RIGHT]; - node->d = row[CPY_LIN_DIST]; - node->n = (int)row[CPY_LIN_CNT]; - /** CPY_DEBUG_MSG("l: %d r: %d d: %5.5f n: %d\n", (int)row[0], - (int)row[1], row[2], (int)row[3]);**/ - } -} - -NPY_INLINE void set_dist_entry(double *d, double val, int i, int j, int n) { - if (i < j) { - *(d + (NCHOOSE2(n)-NCHOOSE2(n - i)) + j) = val; - } - if (j < i) { - *(d + (NCHOOSE2(n)-NCHOOSE2(n - j)) + i) = val; - } -} - -void cophenetic_distances(const double *Z, double *d, int n) { - int *curNode, *left; - int ndid, lid, rid, i, j, k, t = 0, ln, rn, ii, jj, nc2; - unsigned char *lvisited, *rvisited; - const double *Zrow; - int *members = (int*)malloc(n * sizeof(int)); - const int bff = CPY_FLAG_ARRAY_SIZE_BYTES(n); - k = 0; - curNode = (int*)malloc(n * sizeof(int)); - left = (int*)malloc(n * sizeof(int)); - lvisited = (unsigned char*)malloc(bff); - rvisited = (unsigned char*)malloc(bff); - curNode[k] = (n * 2) - 2; - left[k] = 0; - nc2 = NCHOOSE2(n); - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - - while (k >= 0) { - ndid = curNode[k]; - Zrow = Z + ((ndid-n) * CPY_LIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - if (lid >= n) { - ln = (int)*(Z + (CPY_LIS * (lid-n)) + CPY_LIN_CNT); - } - else { - ln = 1; - } - if (rid >= n) { - rn = (int)*(Z + (CPY_LIS * (rid-n)) + CPY_LIN_CNT); - } - else { - rn = 1; - } - - /** CPY_DEBUG_MSG("[fp] ndid=%d, ndid-n=%d, k=%d, lid=%d, rid=%d\n", - ndid, ndid-n, k, lid, rid);**/ - - if (lid >= n && !CPY_GET_BIT(lvisited, ndid-n)) { - CPY_SET_BIT(lvisited, ndid-n); - curNode[k+1] = lid; - left[k+1] = left[k]; - k++; - continue; - } - else if (lid < n) { - members[left[k]] = lid; - } - if (rid >= n && !CPY_GET_BIT(rvisited, ndid-n)) { - CPY_SET_BIT(rvisited, ndid-n); - curNode[k+1] = rid; - left[k+1] = left[k] + ln; - k++; - continue; - } - else if (rid < n) { - members[left[k]+ln] = rid; - } - - /** If it's not a leaf node, and we've visited both children, - record the final mean in the table. */ - if (ndid >= n) { - for (ii = 0; ii < ln; ii++) { - i = *(members + left[k] + ii); - for (jj = 0; jj < rn; jj++) { - j = *(members + left[k] + ln + jj); - if (i < j) { - t = nc2 - NCHOOSE2(n - i) + (j - i - 1); - } - if (j < i) { - t = nc2 - NCHOOSE2(n - j) + (i - j - 1); - } - d[t] = Zrow[CPY_LIN_DIST]; - /** CPY_DEBUG_MSG("i=%d j=%d k=%d d=%5.5f \n", i, j, k, dist);**/ - } - } - } - k--; - } - free(members); - free(left); - free(curNode); - free(lvisited); - free(rvisited); -} - -void inconsistency_calculation_alt(const double *Z, double *R, int n, int d) { - int *curNode; - int ndid, lid, rid, i, k; - unsigned char *lvisited, *rvisited; - const double *Zrow; - double *Rrow; - double levelSum, levelStdSum; - int levelCnt; - const int bff = CPY_FLAG_ARRAY_SIZE_BYTES(n); - k = 0; - curNode = (int*)malloc(n * sizeof(int)); - lvisited = (unsigned char*)malloc(bff); - rvisited = (unsigned char*)malloc(bff); - /** for each node in the original linkage matrix. */ - for (i = 0; i < n - 1; i++) { - /** the current depth j */ - k = 0; - levelSum = 0.0; - levelCnt = 0; - levelStdSum = 0.0; - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - curNode[0] = i; - for (k = 0; k >= 0;) { - ndid = curNode[k]; - Zrow = Z + ((ndid) * CPY_LIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - /** CPY_DEBUG_MSG("[fp] ndid=%d, ndid-n=%d, k=%d, lid=%d, rid=%d\n", - ndid, ndid, k, lid, rid);**/ - if (k < d - 1) { - if (lid >= n && !CPY_GET_BIT(lvisited, ndid)) { - CPY_SET_BIT(lvisited, ndid); - k++; - curNode[k] = lid-n; - continue; - } - if (rid >= n && !CPY_GET_BIT(rvisited, ndid)) { - CPY_SET_BIT(rvisited, ndid); - k++; - curNode[k] = rid-n; - continue; - } - } - levelCnt++; - levelSum += Zrow[CPY_LIN_DIST]; - levelStdSum += Zrow[CPY_LIN_DIST] * Zrow[CPY_LIN_DIST]; - /**CPY_DEBUG_MSG(" Using range %d to %d, levelCnt[k]=%d\n", lb, ub, levelCnt[k]);**/ - /** Let the count and sum slots be used for the next newly visited - node. */ - k--; - } - Rrow = R + (CPY_NIS * i); - Rrow[CPY_INS_N] = (double)levelCnt; - Rrow[CPY_INS_MEAN] = levelSum / levelCnt; - if (levelCnt < 2) { - Rrow[CPY_INS_STD] = (levelStdSum - (levelSum * levelSum)) / levelCnt; - } - else { - Rrow[CPY_INS_STD] = (levelStdSum - ((levelSum * levelSum) / levelCnt)) / (levelCnt - 1); - } - Rrow[CPY_INS_STD] = sqrt(CPY_MAX(0, Rrow[CPY_INS_STD])); - if (Rrow[CPY_INS_STD] > 0) { - Rrow[CPY_INS_INS] = (Zrow[CPY_LIN_DIST] - Rrow[CPY_INS_MEAN]) / Rrow[CPY_INS_STD]; - } - } - - free(curNode); - free(lvisited); - free(rvisited); -} - -void calculate_cluster_sizes(const double *Z, double *cs, int n) { - int i, j, k, q; - const double *row; - for (k = 0; k < n - 1; k++) { - row = Z + (k * 3); - i = (int)row[CPY_LIN_LEFT]; - j = (int)row[CPY_LIN_RIGHT]; - /** If the left node is a non-singleton, add its count. */ - if (i >= n) { - q = i - n; - cs[k] += cs[q]; - } - /** Otherwise just add 1 for the leaf. */ - else { - cs[k] += 1.0; - } - /** If the right node is a non-singleton, add its count. */ - if (j >= n) { - q = j - n; - cs[k] += cs[q]; - } - /** Otherwise just add 1 for the leaf. */ - else { - cs[k] += 1.0; - } - CPY_DEBUG_MSG("i=%d, j=%d, cs[%d]=%d\n", i, j, k, (int)cs[k]); - } -} - -/** Returns an array of original observation indices (pre-order traversal). */ -void form_member_list(const double *Z, int *members, int n) { - int *curNode, *left; - int ndid, lid, rid, k, ln, rn; - unsigned char *lvisited, *rvisited; - const double *Zrow; - const int bff = CPY_FLAG_ARRAY_SIZE_BYTES(n); - - k = 0; - curNode = (int*)malloc(n * sizeof(int)); - left = (int*)malloc(n * sizeof(int)); - lvisited = (unsigned char*)malloc(bff); - rvisited = (unsigned char*)malloc(bff); - curNode[k] = (n * 2) - 2; - left[k] = 0; - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - - while (k >= 0) { - ndid = curNode[k]; - Zrow = Z + ((ndid-n) * CPY_LIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - if (lid >= n) { - ln = (int)*(Z + (CPY_LIS * (lid-n)) + CPY_LIN_CNT); - } - else { - ln = 1; - } - if (rid >= n) { - rn = (int)*(Z + (CPY_LIS * (rid-n)) + CPY_LIN_CNT); - } - else { - rn = 1; - } - - /** CPY_DEBUG_MSG("[fp] ndid=%d, ndid-n=%d, k=%d, lid=%d, rid=%d\n", - ndid, ndid-n, k, lid, rid);**/ - - if (lid >= n && !CPY_GET_BIT(lvisited, ndid-n)) { - CPY_SET_BIT(lvisited, ndid-n); - curNode[k+1] = lid; - left[k+1] = left[k]; - k++; - continue; - } - else if (lid < n) { - members[left[k]] = lid; - } - if (rid >= n && !CPY_GET_BIT(rvisited, ndid-n)) { - CPY_SET_BIT(rvisited, ndid-n); - curNode[k+1] = rid; - left[k+1] = left[k] + ln; - k++; - continue; - } - else if (rid < n) { - members[left[k]+ln] = rid; - } - k--; - } - free(curNode); - free(left); - free(lvisited); - free(rvisited); -} - -void form_flat_clusters_from_in(const double *Z, const double *R, int *T, - double cutoff, int n) { - double *max_inconsists = (double*)malloc(sizeof(double) * n); - get_max_Rfield_for_each_cluster(Z, R, max_inconsists, n, 3); - form_flat_clusters_from_monotonic_criterion(Z, max_inconsists, T, cutoff, n); - free(max_inconsists); -} - -void form_flat_clusters_from_dist(const double *Z, int *T, - double cutoff, int n) { - double *max_dists = (double*)malloc(sizeof(double) * n); - get_max_dist_for_each_cluster(Z, max_dists, n); - CPY_DEBUG_MSG("cupid: n=%d cutoff=%5.5f MD[0]=%5.5f MD[n-1]=%5.5f\n", n, cutoff, max_dists[0], max_dists[n-2]); - form_flat_clusters_from_monotonic_criterion(Z, max_dists, T, cutoff, n); - free(max_dists); -} - -void form_flat_clusters_maxclust_dist(const double *Z, int *T, int n, int mc) { - - double *MD = (double*)malloc(sizeof(double) * n); - get_max_dist_for_each_cluster(Z, MD, n); - CPY_DEBUG_MSG("fumble: n=%d mc=%d MD[0]=%5.5f MD[n-1]=%5.5f\n", n, mc, MD[0], MD[n-2]); - form_flat_clusters_maxclust_monocrit(Z, MD, T, n, mc); - free(MD); -} - -/** form flat clusters by thresholding a monotonic criterion. */ -void form_flat_clusters_from_monotonic_criterion(const double *Z, - const double *mono_crit, - int *T, double cutoff, int n) { - int *curNode; - int ndid, lid, rid, k, ms, nc; - unsigned char *lvisited, *rvisited; - double max_crit; - const double *Zrow; - const int bff = CPY_FLAG_ARRAY_SIZE_BYTES(n); - - curNode = (int*)malloc(n * sizeof(int)); - lvisited = (unsigned char*)malloc(bff); - rvisited = (unsigned char*)malloc(bff); - - /** number of clusters formed so far. */ - nc = 0; - /** are we in part of a tree below the cutoff? .*/ - ms = -1; - k = 0; - curNode[k] = (n * 2) - 2; - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - ms = -1; - while (k >= 0) { - ndid = curNode[k]; - Zrow = Z + ((ndid-n) * CPY_LIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - max_crit = mono_crit[ndid-n]; - CPY_DEBUG_MSG("cutoff: %5.5f maxc: %5.5f nc: %d\n", cutoff, max_crit, nc); - if (ms == -1 && max_crit <= cutoff) { - CPY_DEBUG_MSG("leader: i=%d\n", ndid); - ms = k; - nc++; - } - if (lid >= n && !CPY_GET_BIT(lvisited, ndid-n)) { - CPY_SET_BIT(lvisited, ndid-n); - curNode[k+1] = lid; - k++; - continue; - } - if (rid >= n && !CPY_GET_BIT(rvisited, ndid-n)) { - CPY_SET_BIT(rvisited, ndid-n); - curNode[k+1] = rid; - k++; - continue; - } - if (ndid >= n) { - if (lid < n) { - if (ms == -1) { - nc++; - T[lid] = nc; - } - else { - T[lid] = nc; - } - } - if (rid < n) { - if (ms == -1) { - nc++; - T[rid] = nc; - } - else { - T[rid] = nc; - } - } - if (ms == k) { - ms = -1; - } - } - k--; - } - - free(curNode); - free(lvisited); - free(rvisited); -} - -void form_flat_clusters_maxclust_monocrit(const double *Z, - const double *mono_crit, - int *T, int n, int mc) { - int *curNode; - int ndid, lid, rid, k, nc, g, ms; - unsigned char *lvisited, *rvisited; - const double *Zrow; - double thresh, maxmono_crit; - /** The maximum unsuccessful distance is initially -1.0 (hack). */ - double max_illegal = -1.0; - double min_legal = 0.0; - int min_legal_nc = 1; - const int bff = CPY_FLAG_ARRAY_SIZE_BYTES(n); - k = 0; - - min_legal = mono_crit[n-2]; - curNode = (int*)malloc(n * sizeof(int)); - lvisited = (unsigned char*)malloc(bff); - rvisited = (unsigned char*)malloc(bff); - curNode[k] = (n * 2) - 2; - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - - /** number of clusters formed so far. */ - nc = 0; - - CPY_DEBUG_MSG("[BEGIN] min legal: %5.5f nc: %d mc: %d\n", min_legal, min_legal_nc, mc); - - for (g = n - 2; g >= 0; g--) { - thresh = mono_crit[g]; - /** 1. If the threshold is <= the minimum threshold we've tried - unsuccessfully, skip the threshold. (or) - - 2. If the threshold is > the minimum legal threshold, it is - less optimal so skip it. */ - if (thresh > min_legal) { /** commented out : && thresh <= max_illegal **/ - continue; - } - k = 0; - curNode[k] = (n * 2) - 2; - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - nc = 0; - ms = -1; - /** See if the threshold MD[g] works. **/ - while (k >= 0) { - ndid = curNode[k]; - Zrow = Z + ((ndid-n) * CPY_LIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - maxmono_crit = mono_crit[ndid-n]; - /** CPY_DEBUG_MSG("cutoff: %5.5f maxi: %5.5f nc: %d\n", cutoff, max_mono_crit, nc);**/ - - /** If the current nodes maxmono_crit is <= the threshold, stop exploring - deeper in the tree. The node and its descendent leaves will be their - own cluster. */ - if (maxmono_crit <= thresh) { - nc++; - k--; - CPY_SET_BIT(lvisited, ndid-n); - CPY_SET_BIT(rvisited, ndid-n); - continue; - } - /** Otherwise, the node is above the threshold, so we need to explore - it's children. */ - if (!CPY_GET_BIT(lvisited, ndid-n)) { - CPY_SET_BIT(lvisited, ndid-n); - if (lid >= n) { - curNode[k+1] = lid; - k++; - continue; - } - else if (lid < n) { - nc++; - } - } - if (!CPY_GET_BIT(rvisited, ndid-n)) { - if (rid >= n) { - CPY_SET_BIT(rvisited, ndid-n); - curNode[k+1] = rid; - k++; - continue; - } - else if (rid < n) { - nc++; - } - } - k--; - } - - if (thresh > max_illegal && nc > mc) { - CPY_DEBUG_MSG("max illegal: %5.5f mc: %d", max_illegal, mc); - max_illegal = thresh; - continue; - } - /** If the threshold is less than the current minimum legal threshold - but has a legal number of clusters, set the new legal minimum. */ - if (thresh < min_legal && nc <= mc) { - min_legal = thresh; - min_legal_nc = nc; - CPY_DEBUG_MSG("min legal: %5.5f nc: %d mc: %d\n", min_legal, min_legal_nc, mc); - } - } - - form_flat_clusters_from_monotonic_criterion(Z, mono_crit, T, min_legal, n); - - free(curNode); - free(lvisited); - free(rvisited); -} - -void get_max_dist_for_each_cluster(const double *Z, double *max_dists, int n) { - int *curNode; - int ndid, lid, rid, k; - unsigned char *lvisited, *rvisited; - const double *Zrow; - double max_dist; - const int bff = CPY_FLAG_ARRAY_SIZE_BYTES(n); - - k = 0; - curNode = (int*)malloc(n * sizeof(int)); - lvisited = (unsigned char*)malloc(bff); - rvisited = (unsigned char*)malloc(bff); - curNode[k] = (n * 2) - 2; - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - while (k >= 0) { - ndid = curNode[k]; - Zrow = Z + ((ndid-n) * CPY_LIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - if (lid >= n && !CPY_GET_BIT(lvisited, ndid-n)) { - CPY_SET_BIT(lvisited, ndid-n); - curNode[k+1] = lid; - k++; - continue; - } - if (rid >= n && !CPY_GET_BIT(rvisited, ndid-n)) { - CPY_SET_BIT(rvisited, ndid-n); - curNode[k+1] = rid; - k++; - continue; - } - max_dist = Zrow[CPY_LIN_DIST]; - if (lid >= n) { - max_dist = CPY_MAX(max_dist, max_dists[lid-n]); - } - if (rid >= n) { - max_dist = CPY_MAX(max_dist, max_dists[rid-n]); - } - max_dists[ndid-n] = max_dist; - CPY_DEBUG_MSG("i=%d maxdist[i]=%5.5f verif=%5.5f\n", - ndid-n, max_dist, max_dists[ndid-n]); - k--; - } - free(curNode); - free(lvisited); - free(rvisited); -} - -/** - Returns the maximum Rrow[rf] field for each cluster node where - 0 <= rf < 3. */ - -void get_max_Rfield_for_each_cluster(const double *Z, const double *R, - double *max_rfs, int n, int rf) { - int *curNode; - int ndid, lid, rid, k; - unsigned char *lvisited, *rvisited; - const double *Zrow, *Rrow; - double max_rf; - const int bff = CPY_FLAG_ARRAY_SIZE_BYTES(n); - k = 0; - curNode = (int*)malloc(n * sizeof(int)); - lvisited = (unsigned char*)malloc(bff); - rvisited = (unsigned char*)malloc(bff); - curNode[k] = (n * 2) - 2; - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - while (k >= 0) { - ndid = curNode[k]; - Zrow = Z + ((ndid-n) * CPY_LIS); - Rrow = R + ((ndid-n) * CPY_NIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - if (lid >= n && !CPY_GET_BIT(lvisited, ndid-n)) { - CPY_SET_BIT(lvisited, ndid-n); - curNode[k+1] = lid; - k++; - continue; - } - if (rid >= n && !CPY_GET_BIT(rvisited, ndid-n)) { - CPY_SET_BIT(rvisited, ndid-n); - curNode[k+1] = rid; - k++; - continue; - } - max_rf = Rrow[rf]; - if (lid >= n) { - max_rf = CPY_MAX(max_rf, max_rfs[lid-n]); - } - if (rid >= n) { - max_rf = CPY_MAX(max_rf, max_rfs[rid-n]); - } - max_rfs[ndid-n] = max_rf; - k--; - } - free(curNode); - free(lvisited); - free(rvisited); -} - -/** find the leaders. report an error if found. */ -int leaders(const double *Z, const int *T, int *L, int *M, int kk, int n) { - int *curNode; - int ndid, lid, rid, k, nc; - unsigned char *lvisited, *rvisited; - const double *Zrow; - const int bff = CPY_FLAG_ARRAY_SIZE_BYTES(n); - int *fid; /** done vector, flat cluster ids **/ - int lfid = 0, rfid = 0, errid = -1; - - curNode = (int*)malloc(n * sizeof(int)); - lvisited = (unsigned char*)malloc(bff); - rvisited = (unsigned char*)malloc(bff); - fid = (int*)malloc((2 * n - 1) * sizeof(int)); - - for (k = 0; k < n; k++) { - fid[k] = T[k]; - } - for (k = n; k < 2 * n - 1; k++) { - fid[k] = -1; - } - - /** number of clusters formed so far. */ - nc = 0; - k = 0; - curNode[k] = (n * 2) - 2; - memset(lvisited, 0, bff); - memset(rvisited, 0, bff); - while (k >= 0) { - ndid = curNode[k]; - Zrow = Z + ((ndid-n) * CPY_LIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - CPY_DEBUG_MSG("ndid=%d lid=%d rid=%d\n", ndid, lid, rid); - if (lid >= n && !CPY_GET_BIT(lvisited, ndid-n)) { - CPY_SET_BIT(lvisited, ndid-n); - curNode[k+1] = lid; - k++; - continue; - } - if (rid >= n && !CPY_GET_BIT(rvisited, ndid-n)) { - CPY_SET_BIT(rvisited, ndid-n); - curNode[k+1] = rid; - k++; - continue; - } - lfid = fid[lid]; - rfid = fid[rid]; - CPY_DEBUG_MSG("[Q] ndid=%d lid=%d lfid=%d rid=%d rfid=%d\n", ndid, lid, lfid, rid, rfid); - - /** If the left and right have the same id, neither can be a leader, - and their parent takes on their flat cluster id. **/ - if (lfid == rfid) { - fid[ndid] = lfid; - } - /** Otherwise, they are both leaders. */ - else { - if (lfid != -1) { - /** If there isn't more room in the result vectors, - something is wrong. Condition (2) in help(hcluster.leader) - is violated. */ - if (nc >= kk) { - errid = ndid; - break; - } - CPY_DEBUG_MSG("[L] new leader i=%d nc=%d, M[nc]=%d kk=%d n=%d\n", lid, nc, lfid, kk, n); - L[nc] = lid; - M[nc] = lfid; - nc++; - } - if (rfid != -1) { - if (nc >= kk) { - errid = ndid; - break; - } - CPY_DEBUG_MSG("[R] new leader i=%d nc=%d, M[nc]=%d kk=%d n=%d\n", rid, nc, rfid, kk, n); - L[nc] = rid; - M[nc] = rfid; - nc++; - } - /** Want to make sure this guy doesn't become a leader since - it's children are both leaders. **/ - fid[ndid] = -1; - - } - k--; - } - /** For the root node, if its too children have the same flat cluster id, - neither is negative, the root becomes the leader. */ - Zrow = Z + ((n-2) * CPY_LIS); - lid = (int)Zrow[CPY_LIN_LEFT]; - rid = (int)Zrow[CPY_LIN_RIGHT]; - lfid = fid[lid]; - rfid = fid[rid]; - if (lfid == rfid && lfid != -1 && errid == -1) { - if (nc >= kk) { - errid = (n * 2) - 2; - /** I know, I know, this looks bad! First time in a good 10 years that I've used one of - these. Don't want to copy the free statements. I don't think this detracts from - the code's readability.*/ - goto leaders_free; - } - L[nc] = (n * 2) - 2; - M[nc] = lfid; - nc++; - } - leaders_free: - free(curNode); - free(lvisited); - free(rvisited); - free(fid); - return errid; -} diff --git a/scipy-0.10.1/scipy/cluster/src/hierarchy.h b/scipy-0.10.1/scipy/cluster/src/hierarchy.h deleted file mode 100644 index 23b196b418..0000000000 --- a/scipy-0.10.1/scipy/cluster/src/hierarchy.h +++ /dev/null @@ -1,130 +0,0 @@ -/** - * hierarchy.h - * - * Author: Damian Eads - * Date: September 22, 2007 - * Adapted for incorporation into Scipy, April 9, 2008. - * - * Copyright (c) 2007, 2008, Damian Eads. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the - * following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - Neither the name of the author nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _CPY_HIERARCHY_H -#define _CPY_HIERARCHY_H - -#define CPY_LINKAGE_SINGLE 0 -#define CPY_LINKAGE_COMPLETE 1 -#define CPY_LINKAGE_AVERAGE 2 -#define CPY_LINKAGE_CENTROID 3 -#define CPY_LINKAGE_MEDIAN 4 -#define CPY_LINKAGE_WARD 5 -#define CPY_LINKAGE_WEIGHTED 6 - -#define CPY_CRIT_INCONSISTENT 0 -#define CPY_CRIT_DISTANCE 1 -#define CPY_CRIT_MAXCLUST 2 - -typedef struct cnode { - int n; - int id; - double d; - struct cnode *left; - struct cnode *right; -} cnode; - -typedef struct clnode { - struct clnode *next; - struct cnode *val; -} clnode; - -typedef struct clist { - struct clnode *head; - struct clnode *tail; -} clist; - -typedef struct cinfo { - struct cnode *nodes; - struct clist *lists; - int *ind; - double *dmt; - double *dm; - double *buf; - double **rows; - double **centroids; - double *centroidBuffer; - const double *X; - int *rowsize; - int m; - int n; - int nid; -} cinfo; - -typedef void (distfunc) (cinfo *info, int mini, int minj, int np, int n); - -void inconsistency_calculation(const double *Z, double *R, int n, int d); -void inconsistency_calculation_alt(const double *Z, double *R, int n, int d); - -void chopmins(int *ind, int mini, int minj, int np); -void chopmins_ns_i(double *ind, int mini, int np); -void chopmins_ns_ij(double *ind, int mini, int minj, int np); - -void dist_single(cinfo *info, int mini, int minj, int np, int n); -void dist_average(cinfo *info, int mini, int minj, int np, int n); -void dist_complete(cinfo *info, int mini, int minj, int np, int n); -void dist_centroid(cinfo *info, int mini, int minj, int np, int n); -void dist_ward(cinfo *info, int mini, int minj, int np, int n); -void dist_weighted(cinfo *info, int mini, int minj, int np, int n); - -int leaders(const double *Z, const int *T, int *L, int *M, int kk, int n); - -int linkage(double *dm, double *Z, double *X, int m, int n, int ml, int kc, distfunc dfunc, int method); -void linkage_alt(double *dm, double *Z, double *X, int m, int n, int ml, int kc, distfunc dfunc, int method); - -void cophenetic_distances(const double *Z, double *d, int n); -void cpy_to_tree(const double *Z, cnode **tnodes, int n); -void calculate_cluster_sizes(const double *Z, double *cs, int n); - -void form_member_list(const double *Z, int *members, int n); -void form_flat_clusters_from_in(const double *Z, const double *R, int *T, - double cutoff, int n); -void form_flat_clusters_from_dist(const double *Z, int *T, - double cutoff, int n); -void form_flat_clusters_from_monotonic_criterion(const double *Z, - const double *mono_crit, - int *T, double cutoff, int n); - -void form_flat_clusters_maxclust_dist(const double *Z, int *T, int n, int mc); - -void form_flat_clusters_maxclust_monocrit(const double *Z, - const double *mono_crit, - int *T, int n, int mc); - -void get_max_dist_for_each_cluster(const double *Z, double *max_dists, int n); -void get_max_Rfield_for_each_cluster(const double *Z, const double *R, - double *max_rfs, int n, int rf); -#endif diff --git a/scipy-0.10.1/scipy/cluster/src/hierarchy_wrap.c b/scipy-0.10.1/scipy/cluster/src/hierarchy_wrap.c deleted file mode 100644 index c958273c17..0000000000 --- a/scipy-0.10.1/scipy/cluster/src/hierarchy_wrap.c +++ /dev/null @@ -1,412 +0,0 @@ -/** - * hierarchy_wrap.c - * - * Author: Damian Eads - * Date: September 22, 2007 - * Adapted for incorporation into Scipy, April 9, 2008. - * - * Copyright (c) 2007, Damian Eads. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the - * following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - Neither the name of the author nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "hierarchy.h" -#include "Python.h" -#include -#include - -extern PyObject *linkage_wrap(PyObject *self, PyObject *args) { - int method, n; - PyArrayObject *dm, *Z; - distfunc *df; - if (!PyArg_ParseTuple(args, "O!O!ii", - &PyArray_Type, &dm, - &PyArray_Type, &Z, - &n, - &method)) { - return NULL; - } - else { - switch (method) { - case CPY_LINKAGE_SINGLE: - df = dist_single; - break; - case CPY_LINKAGE_COMPLETE: - df = dist_complete; - break; - case CPY_LINKAGE_AVERAGE: - df = dist_average; - break; - case CPY_LINKAGE_WEIGHTED: - df = dist_weighted; - break; - default: - /** Report an error. */ - df = 0; - break; - } - if (linkage((double*)dm->data, (double*)Z->data, - 0, 0, n, 0, 0, df, method) == -1) { - PyErr_SetString(PyExc_MemoryError, - "out of memory while computing linkage"); - return NULL; - } - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *linkage_euclid_wrap(PyObject *self, PyObject *args) { - int method, m, n, ml; - PyArrayObject *dm, *Z, *X; - distfunc *df; - if (!PyArg_ParseTuple(args, "O!O!O!iii", - &PyArray_Type, &dm, - &PyArray_Type, &Z, - &PyArray_Type, &X, - &m, - &n, - &method)) { - return NULL; - } - else { - ml = 0; - /** fprintf(stderr, "m: %d, n: %d\n", m, n);**/ - switch (method) { - case CPY_LINKAGE_CENTROID: - df = dist_centroid; - break; - case CPY_LINKAGE_MEDIAN: - df = dist_centroid; - break; - case CPY_LINKAGE_WARD: - df = dist_ward; - // ml = 1; - break; - default: - /** Report an error. */ - df = 0; - break; - } - if (linkage((double*)dm->data, (double*)Z->data, (double*)X->data, - m, n, 1, 1, df, method) == -1) { - PyErr_SetString(PyExc_MemoryError, - "out of memory while computing linkage"); - return NULL; - } - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *calculate_cluster_sizes_wrap(PyObject *self, PyObject *args) { - int n; - PyArrayObject *Z, *cs_; - if (!PyArg_ParseTuple(args, "O!O!i", - &PyArray_Type, &Z, - &PyArray_Type, &cs_, - &n)) { - return 0; - } - calculate_cluster_sizes((const double*)Z->data, (double*)cs_->data, n); - return Py_BuildValue(""); -} - -extern PyObject *get_max_dist_for_each_cluster_wrap(PyObject *self, - PyObject *args) { - int n; - PyArrayObject *Z, *md; - if (!PyArg_ParseTuple(args, "O!O!i", - &PyArray_Type, &Z, - &PyArray_Type, &md, - &n)) { - return 0; - } - get_max_dist_for_each_cluster((const double*)Z->data, (double*)md->data, n); - return Py_BuildValue(""); -} - -extern PyObject *get_max_Rfield_for_each_cluster_wrap(PyObject *self, - PyObject *args) { - int n, rf; - PyArrayObject *Z, *R, *max_rfs; - if (!PyArg_ParseTuple(args, "O!O!O!ii", - &PyArray_Type, &Z, - &PyArray_Type, &R, - &PyArray_Type, &max_rfs, - &n, &rf)) { - return 0; - } - get_max_Rfield_for_each_cluster((const double *)Z->data, - (const double *)R->data, - (double *)max_rfs->data, n, rf); - return Py_BuildValue(""); -} - -extern PyObject *prelist_wrap(PyObject *self, PyObject *args) { - int n; - PyArrayObject *Z, *ML; - if (!PyArg_ParseTuple(args, "O!O!i", - &PyArray_Type, &Z, - &PyArray_Type, &ML, - &n)) { - return 0; - } - form_member_list((const double *)Z->data, (int *)ML->data, n); - return Py_BuildValue("d", 0.0); -} - -extern PyObject *cluster_in_wrap(PyObject *self, PyObject *args) { - int n; - double cutoff; - PyArrayObject *Z, *R, *T; - if (!PyArg_ParseTuple(args, "O!O!O!di", - &PyArray_Type, &Z, - &PyArray_Type, &R, - &PyArray_Type, &T, - &cutoff, - &n)) { - return 0; - } - form_flat_clusters_from_in((const double *)Z->data, (const double *)R->data, - (int *)T->data, cutoff, n); - - return Py_BuildValue("d", 0.0); -} - -extern PyObject *cluster_dist_wrap(PyObject *self, PyObject *args) { - int n; - double cutoff; - PyArrayObject *Z, *T; - if (!PyArg_ParseTuple(args, "O!O!di", - &PyArray_Type, &Z, - &PyArray_Type, &T, - &cutoff, - &n)) { - return 0; - } - form_flat_clusters_from_dist((const double *)Z->data, - (int *)T->data, cutoff, n); - - return Py_BuildValue("d", 0.0); -} - -extern PyObject *cluster_monocrit_wrap(PyObject *self, PyObject *args) { - int n; - double cutoff; - PyArrayObject *Z, *MV, *T; - if (!PyArg_ParseTuple(args, "O!O!O!di", - &PyArray_Type, &Z, - &PyArray_Type, &MV, - &PyArray_Type, &T, - &cutoff, - &n)) { - return 0; - } - form_flat_clusters_from_monotonic_criterion((const double *)Z->data, - (const double *)MV->data, - (int *)T->data, - cutoff, - n); - - form_flat_clusters_from_dist((const double *)Z->data, - (int *)T->data, cutoff, n); - - return Py_BuildValue("d", 0.0); -} - - - -extern PyObject *cluster_maxclust_dist_wrap(PyObject *self, PyObject *args) { - int n, mc; - PyArrayObject *Z, *T; - if (!PyArg_ParseTuple(args, "O!O!ii", - &PyArray_Type, &Z, - &PyArray_Type, &T, - &n, &mc)) { - return 0; - } - form_flat_clusters_maxclust_dist((const double*)Z->data, (int *)T->data, - n, mc); - - return Py_BuildValue(""); -} - - -extern PyObject *cluster_maxclust_monocrit_wrap(PyObject *self, PyObject *args) { - int n, mc; - PyArrayObject *Z, *MC, *T; - if (!PyArg_ParseTuple(args, "O!O!O!ii", - &PyArray_Type, &Z, - &PyArray_Type, &MC, - &PyArray_Type, &T, - &n, &mc)) { - return 0; - } - form_flat_clusters_maxclust_monocrit((const double *)Z->data, - (const double *)MC->data, - (int *)T->data, n, mc); - - return Py_BuildValue(""); -} - - -extern PyObject *inconsistent_wrap(PyObject *self, PyObject *args) { - int n, d; - PyArrayObject *Z, *R; - if (!PyArg_ParseTuple(args, "O!O!ii", - &PyArray_Type, &Z, - &PyArray_Type, &R, - &n, &d)) { - return 0; - } - inconsistency_calculation_alt((const double*)Z->data, (double*)R->data, n, d); - return Py_BuildValue("d", 0.0); -} - -extern PyObject *cophenetic_distances_wrap(PyObject *self, PyObject *args) { - int n; - PyArrayObject *Z, *d; - if (!PyArg_ParseTuple(args, "O!O!i", - &PyArray_Type, &Z, - &PyArray_Type, &d, - &n)) { - return 0; - } - cophenetic_distances((const double*)Z->data, (double*)d->data, n); - return Py_BuildValue("d", 0.0); -} - -extern PyObject *chopmin_ns_ij_wrap(PyObject *self, PyObject *args) { - int mini, minj, n; - PyArrayObject *row; - if (!PyArg_ParseTuple(args, "O!iii", - &PyArray_Type, &row, - &mini, - &minj, - &n)) { - return 0; - } - chopmins_ns_ij((double*)row->data, mini, minj, n); - return Py_BuildValue("d", 0.0); -} - -extern PyObject *chopmin_ns_i_wrap(PyObject *self, PyObject *args) { - int mini, n; - PyArrayObject *row; - if (!PyArg_ParseTuple(args, "O!ii", - &PyArray_Type, &row, - &mini, - &n)) { - return 0; - } - chopmins_ns_i((double*)row->data, mini, n); - return Py_BuildValue("d", 0.0); -} - -extern PyObject *chopmins_wrap(PyObject *self, PyObject *args) { - int mini, minj, n; - PyArrayObject *row; - if (!PyArg_ParseTuple(args, "O!iii", - &PyArray_Type, &row, - &mini, - &minj, - &n)) { - return 0; - } - chopmins((int*)row->data, mini, minj, n); - return Py_BuildValue("d", 0.0); -} - - -extern PyObject *leaders_wrap(PyObject *self, PyObject *args) { - PyArrayObject *Z_, *T_, *L_, *M_; - int kk, n, res; - if (!PyArg_ParseTuple(args, "O!O!O!O!ii", - &PyArray_Type, &Z_, - &PyArray_Type, &T_, - &PyArray_Type, &L_, - &PyArray_Type, &M_, - &kk, &n)) { - return 0; - } - else { - res = leaders((double*)Z_->data, (int*)T_->data, - (int*)L_->data, (int*)M_->data, kk, n); - } - return Py_BuildValue("i", res); -} - -static PyMethodDef _hierarchyWrapMethods[] = { - {"calculate_cluster_sizes_wrap", calculate_cluster_sizes_wrap, METH_VARARGS}, - {"chopmins", chopmins_wrap, METH_VARARGS}, - {"chopmins_ns_i", chopmin_ns_i_wrap, METH_VARARGS}, - {"chopmins_ns_ij", chopmin_ns_ij_wrap, METH_VARARGS}, - {"cluster_in_wrap", cluster_in_wrap, METH_VARARGS}, - {"cluster_dist_wrap", cluster_dist_wrap, METH_VARARGS}, - {"cluster_maxclust_dist_wrap", cluster_maxclust_dist_wrap, METH_VARARGS}, - {"cluster_maxclust_monocrit_wrap", cluster_maxclust_monocrit_wrap, METH_VARARGS}, - {"cluster_monocrit_wrap", cluster_monocrit_wrap, METH_VARARGS}, - {"cophenetic_distances_wrap", cophenetic_distances_wrap, METH_VARARGS}, - {"get_max_dist_for_each_cluster_wrap", - get_max_dist_for_each_cluster_wrap, METH_VARARGS}, - {"get_max_Rfield_for_each_cluster_wrap", - get_max_Rfield_for_each_cluster_wrap, METH_VARARGS}, - {"inconsistent_wrap", inconsistent_wrap, METH_VARARGS}, - {"leaders_wrap", leaders_wrap, METH_VARARGS}, - {"linkage_euclid_wrap", linkage_euclid_wrap, METH_VARARGS}, - {"linkage_wrap", linkage_wrap, METH_VARARGS}, - {"prelist_wrap", prelist_wrap, METH_VARARGS}, - {NULL, NULL} /* Sentinel - marks the end of this structure */ -}; - -#if defined(SCIPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_vq", - NULL, - -1, - _hierarchyWrapMethods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__hierarchy_wrap(void) -{ - PyObject *m; - - m = PyModule_Create(&moduledef); - import_array(); - - return m; -} -#else -PyMODINIT_FUNC init_hierarchy_wrap(void) { - (void) Py_InitModule("_hierarchy_wrap", _hierarchyWrapMethods); - import_array(); // Must be present for NumPy. Called first after above line. -} -#endif diff --git a/scipy-0.10.1/scipy/cluster/src/vq.c b/scipy-0.10.1/scipy/cluster/src/vq.c deleted file mode 100644 index f7ebbc0af2..0000000000 --- a/scipy-0.10.1/scipy/cluster/src/vq.c +++ /dev/null @@ -1,155 +0,0 @@ -/* - * This file implements vq for float and double in C. It is a direct - * translation from the swig interface which could not be generated anymore - * with recent swig - */ - -/* - * Including python.h is necessary because python header redefines some macros - * in standart C header - */ -#include - -#include -#include - -#include "vq.h" -/* - * results is put into code, which contains initially the initial code - * - * mdist and code should have at least n elements - */ -const static double rbig = 1e100; - - -#if 0 -static int float_vq_1d(const float *in, int n, - const float *init, int ncode, - npy_intp *code, float *mdist) -{ - int i, j; - float m, d; - - for (i = 0; i < n; ++i) { - m = (float)rbig; - /* Compute the minimal distance for obsvervation i */ - for (j = 0; j < ncode; ++j) { - d = (in[i] - init[j]); - d *= d; - if ( d < m) { - m = d; - } - } - mdist[i] = m; - code[i] = j; - } - return 0; -} -#endif - -static int float_vq_obs(const float *obs, - float *code_book, int Ncodes, int Nfeatures, - npy_intp* code, float *lowest_dist) -{ - int i,j,k=0; - float dist, diff; - - *lowest_dist = (float) rbig; - for(i = 0; i < Ncodes; i++) { - dist = 0; - for(j=0; j < Nfeatures; j++) { - diff = code_book[k] - obs[j]; - dist += diff*diff; - k++; - } - dist = (float)sqrt(dist); - if (dist < *lowest_dist) { - *code = i; - *lowest_dist = dist; - } - } - - return 0; -} - -int float_tvq( - float* obs, - float* code_book, - int Nobs, int Ncodes, int Nfeatures, - npy_intp* codes, float* lowest_dist) -{ - int i; - for( i = 0; i < Nobs; i++) { - float_vq_obs( - &(obs[i*Nfeatures]), - code_book,Ncodes, Nfeatures, - &(codes[i]), &(lowest_dist[i])); - } - return 0; -} - -#if 0 -static int double_vq_1d(const double *in, int n, - const double *init, int ncode, - npy_intp *code, double *mdist) -{ - int i, j; - double m, d; - - for (i = 0; i < n; ++i) { - m = (double)rbig; - /* Compute the minimal distance for obsvervation i */ - for (j = 0; j < ncode; ++j) { - d = (in[i] - init[j]); - d *= d; - if ( d < m) { - m = d; - } - } - mdist[i] = m; - code[i] = j; - } - return 0; -} -#endif - -static int double_vq_obs(const double *obs, - double *code_book, int Ncodes, int Nfeatures, - npy_intp* code, double *lowest_dist) -{ - int i,j,k=0; - double dist, diff; - - *lowest_dist = (double) rbig; - for(i = 0; i < Ncodes; i++) { - dist = 0; - for(j=0; j < Nfeatures; j++) { - diff = code_book[k] - obs[j]; - dist += diff*diff; - k++; - } - dist = (double)sqrt(dist); - if (dist < *lowest_dist) { - *code = i; - *lowest_dist = dist; - } - } - - return 0; -} - -int double_tvq( - double* obs, - double* code_book, - int Nobs, int Ncodes, int Nfeatures, - npy_intp* codes, double* lowest_dist) -{ - int i; - for( i = 0; i < Nobs; i++) { - double_vq_obs( - &(obs[i*Nfeatures]), - code_book,Ncodes, Nfeatures, - &(codes[i]), &(lowest_dist[i])); - } - return 0; -} diff --git a/scipy-0.10.1/scipy/cluster/src/vq.h b/scipy-0.10.1/scipy/cluster/src/vq.h deleted file mode 100644 index ef344bda34..0000000000 --- a/scipy-0.10.1/scipy/cluster/src/vq.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _VQ_H_ -#define _VQ_H - -#include - -#include - -int double_tvq(double* obs, double* code_book, int Nobs, int Ncodes, - int Nfeatures, npy_intp* codes, double* lowest_dist); - -int float_tvq(float* obs, float* code_book, int Nobs, int Ncodes, - int Nfeatures, npy_intp* codes, float* lowest_dist); - -#endif diff --git a/scipy-0.10.1/scipy/cluster/src/vq_module.c b/scipy-0.10.1/scipy/cluster/src/vq_module.c deleted file mode 100644 index 9b07af51ca..0000000000 --- a/scipy-0.10.1/scipy/cluster/src/vq_module.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Last Change: Wed Jun 20 04:00 PM 2007 J - * - */ -#include - -#include - -#include "vq.h" - -PyObject* compute_vq(PyObject*, PyObject*); - -static PyMethodDef vqmethods [] = { - {"vq", compute_vq, METH_VARARGS, "TODO docstring"}, - {NULL, NULL, 0, NULL} -}; - -#if defined(SCIPY_PY3K) -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_vq", - NULL, - -1, - vqmethods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__vq(void) -{ - PyObject *m; - - m = PyModule_Create(&moduledef); - import_array(); - - return m; -} -#else -PyMODINIT_FUNC init_vq(void) -{ - Py_InitModule("_vq", vqmethods); - import_array(); -} -#endif - -PyObject* compute_vq(PyObject* self, PyObject* args) -{ - PyObject *obs, *code, *out; - PyArrayObject *obs_a, *code_a; - PyArrayObject *index_a, *dist_a; - int typenum1, typenum2; - npy_intp nc, nd; - npy_intp n, d; - - if ( !PyArg_ParseTuple(args, "OO", &obs, &code) ) { - return NULL; - } - - /* Check that obs and code both are arrays of same type, conformant - * dimensions, etc...*/ - if (!(PyArray_Check(obs) && PyArray_Check(code))) { - PyErr_Format(PyExc_ValueError, - "observation and code should be numpy arrays"); - return NULL; - } - - typenum1 = PyArray_TYPE(obs); - typenum2 = PyArray_TYPE(code); - if (typenum1 != typenum1) { - PyErr_Format(PyExc_ValueError, - "observation and code should have same type"); - return NULL; - } - obs_a = (PyArrayObject*)PyArray_FROM_OF(obs, - NPY_CONTIGUOUS | NPY_NOTSWAPPED | NPY_ALIGNED); - if (obs_a == NULL) { - return NULL; - } - - code_a = (PyArrayObject*)PyArray_FROM_OF(code, - NPY_CONTIGUOUS | NPY_NOTSWAPPED | NPY_ALIGNED); - if (code_a == NULL) { - goto clean_obs_a; - } - - if( !(obs_a->nd == code_a->nd)) { - PyErr_Format(PyExc_ValueError, - "observation and code should have same shape"); - goto clean_code_a; - } - - switch (obs_a->nd) { - case 1: - nd = 1; - d = 1; - n = PyArray_DIM(obs, 0); - nc = PyArray_DIM(code, 0); - break; - case 2: - nd = 2; - n = PyArray_DIM(obs, 0); - d = PyArray_DIM(obs, 1); - nc = PyArray_DIM(code, 0); - if (! (d == PyArray_DIM(code, 1)) ) { - PyErr_Format(PyExc_ValueError, - "obs and code should have same number of " - " features (columns)"); - goto clean_code_a; - } - break; - default: - PyErr_Format(PyExc_ValueError, - "rank different than 1 or 2 are not supported"); - goto clean_code_a; - } - - switch (PyArray_TYPE(obs)) { - case NPY_FLOAT: - dist_a = (PyArrayObject*)PyArray_EMPTY(1, &n, typenum1, 0); - if (dist_a == NULL) { - goto clean_code_a; - } - index_a = (PyArrayObject*)PyArray_EMPTY(1, &n, PyArray_INTP, 0); - if (index_a == NULL) { - goto clean_dist_a; - } - float_tvq((float*)obs_a->data, (float*)code_a->data, n, nc, d, - (npy_intp*)index_a->data, (float*)dist_a->data); - break; - case NPY_DOUBLE: - dist_a = (PyArrayObject*)PyArray_EMPTY(1, &n, typenum1, 0); - if (dist_a == NULL) { - goto clean_code_a; - } - index_a = (PyArrayObject*)PyArray_EMPTY(1, &n, PyArray_INTP, 0); - if (index_a == NULL) { - goto clean_dist_a; - } - double_tvq((double*)obs_a->data, (double*)code_a->data, n, nc, d, - (npy_intp*)index_a->data, (double*)dist_a->data); - break; - default: - PyErr_Format(PyExc_ValueError, - "type other than float or double not supported"); - goto clean_code_a; - } - - /* Create output */ - out = PyTuple_New(2); - if (out == NULL) { - goto clean_index_a; - } - if (PyTuple_SetItem(out, 0, (PyObject*)index_a)) { - goto clean_out; - } - if (PyTuple_SetItem(out, 1, (PyObject*)dist_a)) { - goto clean_out; - } - - /* Clean everything */ - Py_DECREF(code_a); - Py_DECREF(obs_a); - return out; - -clean_out: - Py_DECREF(out); -clean_dist_a: - Py_DECREF(dist_a); -clean_index_a: - Py_DECREF(index_a); -clean_code_a: - Py_DECREF(code_a); -clean_obs_a: - Py_DECREF(obs_a); - return NULL; -} diff --git a/scipy-0.10.1/scipy/cluster/tests/Q-X.txt b/scipy-0.10.1/scipy/cluster/tests/Q-X.txt deleted file mode 100644 index a34aceb114..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/Q-X.txt +++ /dev/null @@ -1,30 +0,0 @@ - 5.2656366e-01 3.1416019e-01 8.0065637e-02 - 7.5020518e-01 4.6029983e-01 8.9869646e-01 - 6.6546123e-01 6.9401142e-01 9.1046570e-01 - 9.6404759e-01 1.4308220e-03 7.3987422e-01 - 1.0815906e-01 5.5302879e-01 6.6380478e-02 - 9.3135913e-01 8.2542491e-01 9.5231544e-01 - 6.7808696e-01 3.4190397e-01 5.6148195e-01 - 9.8273094e-01 7.0460521e-01 8.7097863e-02 - 6.1469161e-01 4.6998923e-02 6.0240645e-01 - 5.8016126e-01 9.1735497e-01 5.8816385e-01 - 1.3824631e+00 1.9635816e+00 1.9443788e+00 - 2.1067586e+00 1.6714873e+00 1.3485448e+00 - 1.3988007e+00 1.6614205e+00 1.3222455e+00 - 1.7141046e+00 1.4917638e+00 1.4543217e+00 - 1.5410234e+00 1.8437495e+00 1.6465895e+00 - 2.0851248e+00 1.8452435e+00 2.1734085e+00 - 1.3074874e+00 1.5380165e+00 2.1600774e+00 - 1.4144770e+00 1.9932907e+00 1.9910742e+00 - 1.6194349e+00 1.4770328e+00 1.8978816e+00 - 1.5988060e+00 1.5498898e+00 1.5756335e+00 - 3.3724738e+00 2.6963531e+00 3.3998170e+00 - 3.1370512e+00 3.3652809e+00 3.0608907e+00 - 3.2941325e+00 3.1961950e+00 2.9070017e+00 - 2.6551051e+00 3.0678590e+00 2.9719854e+00 - 3.3094104e+00 2.5928397e+00 2.5771411e+00 - 2.5955722e+00 3.3347737e+00 3.0879319e+00 - 2.5820618e+00 3.4161567e+00 3.2644199e+00 - 2.7112700e+00 2.7703245e+00 2.6346650e+00 - 2.7961785e+00 3.2547372e+00 3.4180156e+00 - 2.6474175e+00 2.5453804e+00 3.2535411e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/data.txt b/scipy-0.10.1/scipy/cluster/tests/data.txt deleted file mode 100644 index 8554da5770..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/data.txt +++ /dev/null @@ -1 +0,0 @@ --2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68, -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45, 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28, -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07, -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29, -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25, 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21, -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67, -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94, -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33, 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8, -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29, 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75, -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17, 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44, -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83, 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28, 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62, -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35, 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84, -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75, -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86, -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83, 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75, -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03, 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0, 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99, -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21, 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75, 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37, -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0, -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84, 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69, -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51, -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71, -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61, 2.11 \ No newline at end of file diff --git a/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-2.txt b/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-2.txt deleted file mode 100644 index 8ce33b53ed..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-2.txt +++ /dev/null @@ -1,30 +0,0 @@ - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-3.txt b/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-3.txt deleted file mode 100644 index b9c8063f8b..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-3.txt +++ /dev/null @@ -1,30 +0,0 @@ - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 1.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-4.txt b/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-4.txt deleted file mode 100644 index 1fe9d5ee62..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/fclusterdata-maxclusts-4.txt +++ /dev/null @@ -1,30 +0,0 @@ - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 3.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 4.0000000e+00 - 1.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 - 2.0000000e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-1.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-1.txt deleted file mode 100644 index 89c2406fd7..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-1.txt +++ /dev/null @@ -1,29 +0,0 @@ - 6.3937355e-02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.7716924e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.9481726e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4887981e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7739218e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.9703742e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.9953732e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.0440560e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.0777762e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.0902082e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.3102505e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.4153568e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.5802170e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.6459874e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.7818440e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.0129405e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.1203984e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.4459698e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.5328393e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.8198330e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 5.0546088e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 5.0591731e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 5.9356257e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 6.0048760e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 6.2656347e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 6.5449319e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 7.0629051e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.0267612e+00 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.2085488e+00 0.0000000e+00 1.0000000e+00 0.0000000e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-2.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-2.txt deleted file mode 100644 index 05cd8037e5..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-2.txt +++ /dev/null @@ -1,29 +0,0 @@ - 6.3937355e-02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.7716924e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.9481726e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4887981e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7739218e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4592734e-01 7.2280574e-02 2.0000000e+00 7.0710678e-01 - 2.7420856e-01 3.5820263e-02 2.0000000e+00 7.0710678e-01 - 3.0440560e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4247343e-01 9.2354075e-02 2.0000000e+00 7.0710678e-01 - 3.0302912e-01 8.4735429e-03 2.0000000e+00 7.0710678e-01 - 3.1940134e-01 1.6438416e-02 2.0000000e+00 7.0710678e-01 - 3.3628037e-01 7.4321374e-03 2.0000000e+00 7.0710678e-01 - 2.5449825e-01 1.6523631e-01 3.0000000e+00 6.2651759e-01 - 3.2284722e-01 3.6239721e-02 3.0000000e+00 1.1520929e+00 - 3.7139157e-01 9.6065094e-03 2.0000000e+00 7.0710678e-01 - 3.8973922e-01 1.6340989e-02 2.0000000e+00 7.0710678e-01 - 3.8503077e-01 3.8196594e-02 2.0000000e+00 7.0710678e-01 - 4.2831841e-01 2.3021378e-02 2.0000000e+00 7.0710678e-01 - 3.8115238e-01 1.0200942e-01 2.0000000e+00 7.0710678e-01 - 4.8198330e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.6291274e-01 5.4639241e-02 3.0000000e+00 7.7871022e-01 - 4.1219781e-01 1.1967450e-01 3.0000000e+00 7.8312006e-01 - 5.4973994e-01 6.1974561e-02 2.0000000e+00 7.0710678e-01 - 5.5297424e-01 6.7194042e-02 2.0000000e+00 7.0710678e-01 - 6.1006302e-01 2.3335155e-02 2.0000000e+00 7.0710678e-01 - 5.4954508e-01 1.4841903e-01 2.0000000e+00 7.0710678e-01 - 6.6642699e-01 5.6375531e-02 2.0000000e+00 7.0710678e-01 - 7.6058065e-01 2.3209524e-01 3.0000000e+00 1.1468590e+00 - 9.8053348e-01 2.5430019e-01 3.0000000e+00 8.9663826e-01 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-3.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-3.txt deleted file mode 100644 index 7a9316902f..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-3.txt +++ /dev/null @@ -1,29 +0,0 @@ - 6.3937355e-02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.7716924e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.9481726e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4887981e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7739218e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4592734e-01 7.2280574e-02 2.0000000e+00 7.0710678e-01 - 2.7420856e-01 3.5820263e-02 2.0000000e+00 7.0710678e-01 - 3.0440560e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4247343e-01 9.2354075e-02 2.0000000e+00 7.0710678e-01 - 2.6695850e-01 6.2762807e-02 3.0000000e+00 6.7017912e-01 - 2.7199064e-01 8.2936326e-02 3.0000000e+00 7.1180408e-01 - 3.2677945e-01 1.7274852e-02 3.0000000e+00 8.5420299e-01 - 2.7362995e-01 1.4023592e-01 4.0000000e+00 6.0178414e-01 - 3.0435537e-01 4.7363902e-02 4.0000000e+00 1.2719259e+00 - 3.3668152e-01 4.0510441e-02 4.0000000e+00 1.0244985e+00 - 3.8135906e-01 1.8552498e-02 3.0000000e+00 1.0745175e+00 - 2.9388364e-01 1.5622696e-01 4.0000000e+00 7.5631117e-01 - 4.0488617e-01 4.3728723e-02 3.0000000e+00 9.0811730e-01 - 3.5311406e-01 8.6956328e-02 3.0000000e+00 1.1519561e+00 - 4.8198330e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.4173066e-01 6.1522522e-02 4.0000000e+00 1.0358844e+00 - 3.8640356e-01 1.1049599e-01 4.0000000e+00 1.0816117e+00 - 4.5753900e-01 1.3330898e-01 4.0000000e+00 1.0203631e+00 - 4.9730646e-01 8.1987855e-02 4.0000000e+00 1.2584931e+00 - 5.7534778e-01 6.2351485e-02 3.0000000e+00 8.2140277e-01 - 5.0371000e-01 1.3159281e-01 3.0000000e+00 1.1458315e+00 - 6.4213885e-01 5.7955510e-02 3.0000000e+00 1.1069121e+00 - 6.4635996e-01 2.2772591e-01 5.0000000e+00 1.6704345e+00 - 8.0385745e-01 2.5222314e-01 6.0000000e+00 1.6044971e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-4.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-4.txt deleted file mode 100644 index 3cfe37ef63..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-4.txt +++ /dev/null @@ -1,29 +0,0 @@ - 6.3937355e-02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.7716924e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.9481726e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4887981e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7739218e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4592734e-01 7.2280574e-02 2.0000000e+00 7.0710678e-01 - 2.7420856e-01 3.5820263e-02 2.0000000e+00 7.0710678e-01 - 3.0440560e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4247343e-01 9.2354075e-02 2.0000000e+00 7.0710678e-01 - 2.6695850e-01 6.2762807e-02 3.0000000e+00 6.7017912e-01 - 2.7199064e-01 8.2936326e-02 3.0000000e+00 7.1180408e-01 - 2.8937690e-01 7.6123263e-02 4.0000000e+00 6.8518849e-01 - 2.8045948e-01 1.2240424e-01 5.0000000e+00 6.3365630e-01 - 3.0435537e-01 4.7363902e-02 4.0000000e+00 1.2719259e+00 - 3.1912118e-01 5.2655955e-02 5.0000000e+00 1.1216818e+00 - 3.4960402e-01 4.5450828e-02 5.0000000e+00 1.1372736e+00 - 3.0131193e-01 1.3631230e-01 5.0000000e+00 8.1231048e-01 - 3.2402631e-01 1.5115571e-01 5.0000000e+00 7.9765872e-01 - 3.1353986e-01 1.0632688e-01 4.0000000e+00 1.3142874e+00 - 4.8198330e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.2630427e-01 6.3471504e-02 5.0000000e+00 1.2471203e+00 - 3.6853033e-01 1.0370286e-01 5.0000000e+00 1.3248137e+00 - 4.2783536e-01 1.3319157e-01 5.0000000e+00 1.2442770e+00 - 4.7348205e-01 8.8766656e-02 5.0000000e+00 1.4307800e+00 - 4.9134389e-01 1.3799391e-01 5.0000000e+00 9.7989526e-01 - 4.6728793e-01 1.2981031e-01 4.0000000e+00 1.4421448e+00 - 6.0808346e-01 8.2935544e-02 4.0000000e+00 1.1841370e+00 - 5.6588963e-01 2.0619528e-01 8.0000000e+00 2.2351217e+00 - 7.0741835e-01 2.4917629e-01 9.0000000e+00 2.0111480e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-5.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-5.txt deleted file mode 100644 index c8a8534313..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-5.txt +++ /dev/null @@ -1,29 +0,0 @@ - 6.3937355e-02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.7716924e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.9481726e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4887981e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7739218e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4592734e-01 7.2280574e-02 2.0000000e+00 7.0710678e-01 - 2.7420856e-01 3.5820263e-02 2.0000000e+00 7.0710678e-01 - 3.0440560e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4247343e-01 9.2354075e-02 2.0000000e+00 7.0710678e-01 - 2.6695850e-01 6.2762807e-02 3.0000000e+00 6.7017912e-01 - 2.7199064e-01 8.2936326e-02 3.0000000e+00 7.1180408e-01 - 2.8937690e-01 7.6123263e-02 4.0000000e+00 6.8518849e-01 - 2.6324444e-01 1.1732171e-01 6.0000000e+00 8.0784074e-01 - 3.0435537e-01 4.7363902e-02 4.0000000e+00 1.2719259e+00 - 3.1912118e-01 5.2655955e-02 5.0000000e+00 1.1216818e+00 - 3.3281665e-01 5.7823149e-02 6.0000000e+00 1.1842557e+00 - 3.0238954e-01 1.2195000e-01 6.0000000e+00 8.9914142e-01 - 3.2519277e-01 1.3522797e-01 6.0000000e+00 8.8298461e-01 - 3.1353986e-01 1.0632688e-01 4.0000000e+00 1.3142874e+00 - 4.8198330e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.9078061e-01 7.9802007e-02 7.0000000e+00 1.4370599e+00 - 3.3957815e-01 1.1675958e-01 6.0000000e+00 1.4246296e+00 - 4.0603571e-01 1.3055016e-01 6.0000000e+00 1.4364353e+00 - 4.5533483e-01 9.0992001e-02 6.0000000e+00 1.5952257e+00 - 4.6095671e-01 1.4413236e-01 6.0000000e+00 1.1489908e+00 - 3.7910412e-01 1.9099694e-01 6.0000000e+00 1.4418506e+00 - 5.2716833e-01 1.5144040e-01 6.0000000e+00 1.1827899e+00 - 5.2633231e-01 2.0011385e-01 1.0000000e+01 2.5007208e+00 - 6.2830766e-01 2.3939755e-01 1.3000000e+01 2.4237553e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-6.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-6.txt deleted file mode 100644 index b685a5ad00..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-Q-single-6.txt +++ /dev/null @@ -1,29 +0,0 @@ - 6.3937355e-02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.7716924e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 1.9481726e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4887981e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7739218e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4592734e-01 7.2280574e-02 2.0000000e+00 7.0710678e-01 - 2.7420856e-01 3.5820263e-02 2.0000000e+00 7.0710678e-01 - 3.0440560e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.4247343e-01 9.2354075e-02 2.0000000e+00 7.0710678e-01 - 2.6695850e-01 6.2762807e-02 3.0000000e+00 6.7017912e-01 - 2.7199064e-01 8.2936326e-02 3.0000000e+00 7.1180408e-01 - 2.8937690e-01 7.6123263e-02 4.0000000e+00 6.8518849e-01 - 2.6324444e-01 1.1732171e-01 6.0000000e+00 8.0784074e-01 - 3.0435537e-01 4.7363902e-02 4.0000000e+00 1.2719259e+00 - 3.1912118e-01 5.2655955e-02 5.0000000e+00 1.1216818e+00 - 3.3281665e-01 5.7823149e-02 6.0000000e+00 1.1842557e+00 - 2.8450093e-01 1.2096771e-01 7.0000000e+00 1.0543220e+00 - 3.2270489e-01 1.2362105e-01 7.0000000e+00 9.8601409e-01 - 3.1353986e-01 1.0632688e-01 4.0000000e+00 1.3142874e+00 - 4.8198330e-01 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.7304301e-01 8.9306070e-02 8.0000000e+00 1.4827420e+00 - 3.3957815e-01 1.1675958e-01 6.0000000e+00 1.4246296e+00 - 3.7586164e-01 1.4344374e-01 7.0000000e+00 1.5176747e+00 - 4.1699399e-01 1.0466959e-01 8.0000000e+00 1.7530748e+00 - 4.3753967e-01 1.4543138e-01 7.0000000e+00 1.2997456e+00 - 3.7223569e-01 1.7529999e-01 7.0000000e+00 1.6101398e+00 - 4.9600440e-01 1.6096634e-01 7.0000000e+00 1.3063980e+00 - 4.6410730e-01 2.2064134e-01 1.3000000e+01 2.5500836e+00 - 5.6675775e-01 2.3884131e-01 1.7000000e+01 2.6871022e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-1.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-1.txt deleted file mode 100644 index 096e9577b3..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-1.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.0000000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 4.1200000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 9.9600000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-2.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-2.txt deleted file mode 100644 index 62d1ce6d15..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-2.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.6900000e+02 1.8526198e+02 2.0000000e+00 7.0710678e-01 - 3.1550000e+02 1.3647161e+02 2.0000000e+00 7.0710678e-01 - 6.0266667e+02 3.4068950e+02 3.0000000e+00 1.1545215e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-3.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-3.txt deleted file mode 100644 index a6cb8759c3..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-3.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.6900000e+02 1.8526198e+02 2.0000000e+00 7.0710678e-01 - 3.1550000e+02 1.3647161e+02 2.0000000e+00 7.0710678e-01 - 4.3300000e+02 3.3590177e+02 5.0000000e+00 1.6760852e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-4.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-4.txt deleted file mode 100644 index a6cb8759c3..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-complete-tdist-depth-4.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.6900000e+02 1.8526198e+02 2.0000000e+00 7.0710678e-01 - 3.1550000e+02 1.3647161e+02 2.0000000e+00 7.0710678e-01 - 4.3300000e+02 3.3590177e+02 5.0000000e+00 1.6760852e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-0.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-0.txt deleted file mode 100644 index 1c20ae251e..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-0.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.5500000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.6800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.9500000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-1.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-1.txt deleted file mode 100644 index 1c20ae251e..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-1.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.5500000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.6800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.9500000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-2.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-2.txt deleted file mode 100644 index 84ed261438..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-2.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.3700000e+02 2.5455844e+01 2.0000000e+00 7.0710678e-01 - 2.6150000e+02 9.1923882e+00 2.0000000e+00 7.0710678e-01 - 2.3366667e+02 8.3942441e+01 3.0000000e+00 7.3065940e-01 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-3.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-3.txt deleted file mode 100644 index ca06ab14e1..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-3.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.3700000e+02 2.5455844e+01 2.0000000e+00 7.0710678e-01 - 2.4733333e+02 2.5383722e+01 3.0000000e+00 8.1417007e-01 - 2.3900000e+02 6.9363775e+01 4.0000000e+00 8.0733783e-01 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-4.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-4.txt deleted file mode 100644 index 053d1d0a5f..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-4.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.3700000e+02 2.5455844e+01 2.0000000e+00 7.0710678e-01 - 2.4733333e+02 2.5383722e+01 3.0000000e+00 8.1417007e-01 - 2.3500000e+02 6.0733022e+01 5.0000000e+00 9.8793042e-01 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-5.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-5.txt deleted file mode 100644 index 053d1d0a5f..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist-depth-5.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.3700000e+02 2.5455844e+01 2.0000000e+00 7.0710678e-01 - 2.4733333e+02 2.5383722e+01 3.0000000e+00 8.1417007e-01 - 2.3500000e+02 6.0733022e+01 5.0000000e+00 9.8793042e-01 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist.txt deleted file mode 100644 index 84ed261438..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-single-tdist.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.3700000e+02 2.5455844e+01 2.0000000e+00 7.0710678e-01 - 2.6150000e+02 9.1923882e+00 2.0000000e+00 7.0710678e-01 - 2.3366667e+02 8.3942441e+01 3.0000000e+00 7.3065940e-01 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-1.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-1.txt deleted file mode 100644 index 1ca8833d8a..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-1.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.3350000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 3.4750000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 6.7012500e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-2.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-2.txt deleted file mode 100644 index 157c07d987..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-2.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7625000e+02 8.0963726e+01 2.0000000e+00 7.0710678e-01 - 2.4275000e+02 1.4813887e+02 2.0000000e+00 7.0710678e-01 - 4.5037500e+02 1.9043778e+02 3.0000000e+00 1.1539202e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-3.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-3.txt deleted file mode 100644 index 660a81d749..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-3.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7625000e+02 8.0963726e+01 2.0000000e+00 7.0710678e-01 - 2.4275000e+02 1.4813887e+02 2.0000000e+00 7.0710678e-01 - 3.4162500e+02 2.0280090e+02 5.0000000e+00 1.6198153e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-4.txt b/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-4.txt deleted file mode 100644 index 660a81d749..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/inconsistent-weighted-tdist-depth-4.txt +++ /dev/null @@ -1,5 +0,0 @@ - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7625000e+02 8.0963726e+01 2.0000000e+00 7.0710678e-01 - 2.4275000e+02 1.4813887e+02 2.0000000e+00 7.0710678e-01 - 3.4162500e+02 2.0280090e+02 5.0000000e+00 1.6198153e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/iris.txt b/scipy-0.10.1/scipy/cluster/tests/iris.txt deleted file mode 100644 index 4d78390c25..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/iris.txt +++ /dev/null @@ -1,150 +0,0 @@ -5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01 -4.900000000000000355e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01 -4.700000000000000178e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01 -4.599999999999999645e+00 3.100000000000000089e+00 1.500000000000000000e+00 2.000000000000000111e-01 -5.000000000000000000e+00 3.600000000000000089e+00 1.399999999999999911e+00 2.000000000000000111e-01 -5.400000000000000355e+00 3.899999999999999911e+00 1.699999999999999956e+00 4.000000000000000222e-01 -4.599999999999999645e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.999999999999999889e-01 -5.000000000000000000e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01 -4.400000000000000355e+00 2.899999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01 -4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 -5.400000000000000355e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01 -4.799999999999999822e+00 3.399999999999999911e+00 1.600000000000000089e+00 2.000000000000000111e-01 -4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 1.000000000000000056e-01 -4.299999999999999822e+00 3.000000000000000000e+00 1.100000000000000089e+00 1.000000000000000056e-01 -5.799999999999999822e+00 4.000000000000000000e+00 1.199999999999999956e+00 2.000000000000000111e-01 -5.700000000000000178e+00 4.400000000000000355e+00 1.500000000000000000e+00 4.000000000000000222e-01 -5.400000000000000355e+00 3.899999999999999911e+00 1.300000000000000044e+00 4.000000000000000222e-01 -5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01 -5.700000000000000178e+00 3.799999999999999822e+00 1.699999999999999956e+00 2.999999999999999889e-01 -5.099999999999999645e+00 3.799999999999999822e+00 1.500000000000000000e+00 2.999999999999999889e-01 -5.400000000000000355e+00 3.399999999999999911e+00 1.699999999999999956e+00 2.000000000000000111e-01 -5.099999999999999645e+00 3.700000000000000178e+00 1.500000000000000000e+00 4.000000000000000222e-01 -4.599999999999999645e+00 3.600000000000000089e+00 1.000000000000000000e+00 2.000000000000000111e-01 -5.099999999999999645e+00 3.299999999999999822e+00 1.699999999999999956e+00 5.000000000000000000e-01 -4.799999999999999822e+00 3.399999999999999911e+00 1.899999999999999911e+00 2.000000000000000111e-01 -5.000000000000000000e+00 3.000000000000000000e+00 1.600000000000000089e+00 2.000000000000000111e-01 -5.000000000000000000e+00 3.399999999999999911e+00 1.600000000000000089e+00 4.000000000000000222e-01 -5.200000000000000178e+00 3.500000000000000000e+00 1.500000000000000000e+00 2.000000000000000111e-01 -5.200000000000000178e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01 -4.700000000000000178e+00 3.200000000000000178e+00 1.600000000000000089e+00 2.000000000000000111e-01 -4.799999999999999822e+00 3.100000000000000089e+00 1.600000000000000089e+00 2.000000000000000111e-01 -5.400000000000000355e+00 3.399999999999999911e+00 1.500000000000000000e+00 4.000000000000000222e-01 -5.200000000000000178e+00 4.099999999999999645e+00 1.500000000000000000e+00 1.000000000000000056e-01 -5.500000000000000000e+00 4.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01 -4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 -5.000000000000000000e+00 3.200000000000000178e+00 1.199999999999999956e+00 2.000000000000000111e-01 -5.500000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01 -4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 -4.400000000000000355e+00 3.000000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01 -5.099999999999999645e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01 -5.000000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.999999999999999889e-01 -4.500000000000000000e+00 2.299999999999999822e+00 1.300000000000000044e+00 2.999999999999999889e-01 -4.400000000000000355e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01 -5.000000000000000000e+00 3.500000000000000000e+00 1.600000000000000089e+00 5.999999999999999778e-01 -5.099999999999999645e+00 3.799999999999999822e+00 1.899999999999999911e+00 4.000000000000000222e-01 -4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01 -5.099999999999999645e+00 3.799999999999999822e+00 1.600000000000000089e+00 2.000000000000000111e-01 -4.599999999999999645e+00 3.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01 -5.299999999999999822e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01 -5.000000000000000000e+00 3.299999999999999822e+00 1.399999999999999911e+00 2.000000000000000111e-01 -7.000000000000000000e+00 3.200000000000000178e+00 4.700000000000000178e+00 1.399999999999999911e+00 -6.400000000000000355e+00 3.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00 -6.900000000000000355e+00 3.100000000000000089e+00 4.900000000000000355e+00 1.500000000000000000e+00 -5.500000000000000000e+00 2.299999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00 -6.500000000000000000e+00 2.799999999999999822e+00 4.599999999999999645e+00 1.500000000000000000e+00 -5.700000000000000178e+00 2.799999999999999822e+00 4.500000000000000000e+00 1.300000000000000044e+00 -6.299999999999999822e+00 3.299999999999999822e+00 4.700000000000000178e+00 1.600000000000000089e+00 -4.900000000000000355e+00 2.399999999999999911e+00 3.299999999999999822e+00 1.000000000000000000e+00 -6.599999999999999645e+00 2.899999999999999911e+00 4.599999999999999645e+00 1.300000000000000044e+00 -5.200000000000000178e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.399999999999999911e+00 -5.000000000000000000e+00 2.000000000000000000e+00 3.500000000000000000e+00 1.000000000000000000e+00 -5.900000000000000355e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.500000000000000000e+00 -6.000000000000000000e+00 2.200000000000000178e+00 4.000000000000000000e+00 1.000000000000000000e+00 -6.099999999999999645e+00 2.899999999999999911e+00 4.700000000000000178e+00 1.399999999999999911e+00 -5.599999999999999645e+00 2.899999999999999911e+00 3.600000000000000089e+00 1.300000000000000044e+00 -6.700000000000000178e+00 3.100000000000000089e+00 4.400000000000000355e+00 1.399999999999999911e+00 -5.599999999999999645e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00 -5.799999999999999822e+00 2.700000000000000178e+00 4.099999999999999645e+00 1.000000000000000000e+00 -6.200000000000000178e+00 2.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00 -5.599999999999999645e+00 2.500000000000000000e+00 3.899999999999999911e+00 1.100000000000000089e+00 -5.900000000000000355e+00 3.200000000000000178e+00 4.799999999999999822e+00 1.800000000000000044e+00 -6.099999999999999645e+00 2.799999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00 -6.299999999999999822e+00 2.500000000000000000e+00 4.900000000000000355e+00 1.500000000000000000e+00 -6.099999999999999645e+00 2.799999999999999822e+00 4.700000000000000178e+00 1.199999999999999956e+00 -6.400000000000000355e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00 -6.599999999999999645e+00 3.000000000000000000e+00 4.400000000000000355e+00 1.399999999999999911e+00 -6.799999999999999822e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.399999999999999911e+00 -6.700000000000000178e+00 3.000000000000000000e+00 5.000000000000000000e+00 1.699999999999999956e+00 -6.000000000000000000e+00 2.899999999999999911e+00 4.500000000000000000e+00 1.500000000000000000e+00 -5.700000000000000178e+00 2.600000000000000089e+00 3.500000000000000000e+00 1.000000000000000000e+00 -5.500000000000000000e+00 2.399999999999999911e+00 3.799999999999999822e+00 1.100000000000000089e+00 -5.500000000000000000e+00 2.399999999999999911e+00 3.700000000000000178e+00 1.000000000000000000e+00 -5.799999999999999822e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.199999999999999956e+00 -6.000000000000000000e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.600000000000000089e+00 -5.400000000000000355e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00 -6.000000000000000000e+00 3.399999999999999911e+00 4.500000000000000000e+00 1.600000000000000089e+00 -6.700000000000000178e+00 3.100000000000000089e+00 4.700000000000000178e+00 1.500000000000000000e+00 -6.299999999999999822e+00 2.299999999999999822e+00 4.400000000000000355e+00 1.300000000000000044e+00 -5.599999999999999645e+00 3.000000000000000000e+00 4.099999999999999645e+00 1.300000000000000044e+00 -5.500000000000000000e+00 2.500000000000000000e+00 4.000000000000000000e+00 1.300000000000000044e+00 -5.500000000000000000e+00 2.600000000000000089e+00 4.400000000000000355e+00 1.199999999999999956e+00 -6.099999999999999645e+00 3.000000000000000000e+00 4.599999999999999645e+00 1.399999999999999911e+00 -5.799999999999999822e+00 2.600000000000000089e+00 4.000000000000000000e+00 1.199999999999999956e+00 -5.000000000000000000e+00 2.299999999999999822e+00 3.299999999999999822e+00 1.000000000000000000e+00 -5.599999999999999645e+00 2.700000000000000178e+00 4.200000000000000178e+00 1.300000000000000044e+00 -5.700000000000000178e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.199999999999999956e+00 -5.700000000000000178e+00 2.899999999999999911e+00 4.200000000000000178e+00 1.300000000000000044e+00 -6.200000000000000178e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00 -5.099999999999999645e+00 2.500000000000000000e+00 3.000000000000000000e+00 1.100000000000000089e+00 -5.700000000000000178e+00 2.799999999999999822e+00 4.099999999999999645e+00 1.300000000000000044e+00 -6.299999999999999822e+00 3.299999999999999822e+00 6.000000000000000000e+00 2.500000000000000000e+00 -5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00 -7.099999999999999645e+00 3.000000000000000000e+00 5.900000000000000355e+00 2.100000000000000089e+00 -6.299999999999999822e+00 2.899999999999999911e+00 5.599999999999999645e+00 1.800000000000000044e+00 -6.500000000000000000e+00 3.000000000000000000e+00 5.799999999999999822e+00 2.200000000000000178e+00 -7.599999999999999645e+00 3.000000000000000000e+00 6.599999999999999645e+00 2.100000000000000089e+00 -4.900000000000000355e+00 2.500000000000000000e+00 4.500000000000000000e+00 1.699999999999999956e+00 -7.299999999999999822e+00 2.899999999999999911e+00 6.299999999999999822e+00 1.800000000000000044e+00 -6.700000000000000178e+00 2.500000000000000000e+00 5.799999999999999822e+00 1.800000000000000044e+00 -7.200000000000000178e+00 3.600000000000000089e+00 6.099999999999999645e+00 2.500000000000000000e+00 -6.500000000000000000e+00 3.200000000000000178e+00 5.099999999999999645e+00 2.000000000000000000e+00 -6.400000000000000355e+00 2.700000000000000178e+00 5.299999999999999822e+00 1.899999999999999911e+00 -6.799999999999999822e+00 3.000000000000000000e+00 5.500000000000000000e+00 2.100000000000000089e+00 -5.700000000000000178e+00 2.500000000000000000e+00 5.000000000000000000e+00 2.000000000000000000e+00 -5.799999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 2.399999999999999911e+00 -6.400000000000000355e+00 3.200000000000000178e+00 5.299999999999999822e+00 2.299999999999999822e+00 -6.500000000000000000e+00 3.000000000000000000e+00 5.500000000000000000e+00 1.800000000000000044e+00 -7.700000000000000178e+00 3.799999999999999822e+00 6.700000000000000178e+00 2.200000000000000178e+00 -7.700000000000000178e+00 2.600000000000000089e+00 6.900000000000000355e+00 2.299999999999999822e+00 -6.000000000000000000e+00 2.200000000000000178e+00 5.000000000000000000e+00 1.500000000000000000e+00 -6.900000000000000355e+00 3.200000000000000178e+00 5.700000000000000178e+00 2.299999999999999822e+00 -5.599999999999999645e+00 2.799999999999999822e+00 4.900000000000000355e+00 2.000000000000000000e+00 -7.700000000000000178e+00 2.799999999999999822e+00 6.700000000000000178e+00 2.000000000000000000e+00 -6.299999999999999822e+00 2.700000000000000178e+00 4.900000000000000355e+00 1.800000000000000044e+00 -6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.100000000000000089e+00 -7.200000000000000178e+00 3.200000000000000178e+00 6.000000000000000000e+00 1.800000000000000044e+00 -6.200000000000000178e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.800000000000000044e+00 -6.099999999999999645e+00 3.000000000000000000e+00 4.900000000000000355e+00 1.800000000000000044e+00 -6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.100000000000000089e+00 -7.200000000000000178e+00 3.000000000000000000e+00 5.799999999999999822e+00 1.600000000000000089e+00 -7.400000000000000355e+00 2.799999999999999822e+00 6.099999999999999645e+00 1.899999999999999911e+00 -7.900000000000000355e+00 3.799999999999999822e+00 6.400000000000000355e+00 2.000000000000000000e+00 -6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.200000000000000178e+00 -6.299999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 1.500000000000000000e+00 -6.099999999999999645e+00 2.600000000000000089e+00 5.599999999999999645e+00 1.399999999999999911e+00 -7.700000000000000178e+00 3.000000000000000000e+00 6.099999999999999645e+00 2.299999999999999822e+00 -6.299999999999999822e+00 3.399999999999999911e+00 5.599999999999999645e+00 2.399999999999999911e+00 -6.400000000000000355e+00 3.100000000000000089e+00 5.500000000000000000e+00 1.800000000000000044e+00 -6.000000000000000000e+00 3.000000000000000000e+00 4.799999999999999822e+00 1.800000000000000044e+00 -6.900000000000000355e+00 3.100000000000000089e+00 5.400000000000000355e+00 2.100000000000000089e+00 -6.700000000000000178e+00 3.100000000000000089e+00 5.599999999999999645e+00 2.399999999999999911e+00 -6.900000000000000355e+00 3.100000000000000089e+00 5.099999999999999645e+00 2.299999999999999822e+00 -5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00 -6.799999999999999822e+00 3.200000000000000178e+00 5.900000000000000355e+00 2.299999999999999822e+00 -6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.500000000000000000e+00 -6.700000000000000178e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.299999999999999822e+00 -6.299999999999999822e+00 2.500000000000000000e+00 5.000000000000000000e+00 1.899999999999999911e+00 -6.500000000000000000e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.000000000000000000e+00 -6.200000000000000178e+00 3.399999999999999911e+00 5.400000000000000355e+00 2.299999999999999822e+00 -5.900000000000000355e+00 3.000000000000000000e+00 5.099999999999999645e+00 1.800000000000000044e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-average.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-Q-average.txt deleted file mode 100644 index ebc50cfce5..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-average.txt +++ /dev/null @@ -1,29 +0,0 @@ - 1.1000000e+01 1.8000000e+01 6.3937355e-02 - 1.4000000e+01 2.0000000e+01 1.7716924e-01 - 2.6000000e+01 2.7000000e+01 1.9481726e-01 - 2.0000000e+00 3.0000000e+00 2.4887981e-01 - 2.2000000e+01 2.3000000e+01 2.7739218e-01 - 7.0000000e+00 9.0000000e+00 3.0440560e-01 - 2.9000000e+01 3.3000000e+01 3.5174379e-01 - 6.0000000e+00 3.4000000e+01 3.5532162e-01 - 1.3000000e+01 3.2000000e+01 3.6158453e-01 - 1.5000000e+01 3.1000000e+01 3.7715616e-01 - 1.7000000e+01 1.9000000e+01 4.1203984e-01 - 2.4000000e+01 3.7000000e+01 4.2046824e-01 - 4.0000000e+00 3.6000000e+01 4.2863303e-01 - 1.0000000e+00 5.0000000e+00 4.8198330e-01 - 1.0000000e+01 3.8000000e+01 4.9787637e-01 - 4.0000000e+01 4.1000000e+01 5.2994677e-01 - 1.2000000e+01 3.9000000e+01 5.7421684e-01 - 2.5000000e+01 2.8000000e+01 6.2656347e-01 - 3.5000000e+01 4.2000000e+01 6.4347240e-01 - 4.6000000e+01 4.7000000e+01 6.8297315e-01 - 4.3000000e+01 4.5000000e+01 6.9186391e-01 - 8.0000000e+00 4.4000000e+01 7.4416964e-01 - 2.1000000e+01 3.0000000e+01 7.5491453e-01 - 1.6000000e+01 5.0000000e+01 8.1859847e-01 - 4.9000000e+01 5.3000000e+01 8.5939683e-01 - 5.1000000e+01 5.2000000e+01 8.7992146e-01 - 4.8000000e+01 5.5000000e+01 8.9017230e-01 - 5.4000000e+01 5.6000000e+01 2.0198221e+00 - 5.7000000e+01 5.8000000e+01 3.2920100e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-centroid.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-Q-centroid.txt deleted file mode 100644 index cf4396f1a3..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-centroid.txt +++ /dev/null @@ -1,29 +0,0 @@ - 1.1000000e+01 1.8000000e+01 6.3937355e-02 - 1.4000000e+01 2.0000000e+01 1.7716924e-01 - 2.6000000e+01 2.7000000e+01 1.9481726e-01 - 2.0000000e+00 3.0000000e+00 2.4887981e-01 - 2.2000000e+01 2.3000000e+01 2.7739218e-01 - 7.0000000e+00 9.0000000e+00 3.0440560e-01 - 6.0000000e+00 3.4000000e+01 3.3746118e-01 - 2.9000000e+01 3.3000000e+01 3.4067653e-01 - 1.3000000e+01 3.2000000e+01 3.5113828e-01 - 1.5000000e+01 3.9000000e+01 3.3976558e-01 - 2.4000000e+01 3.8000000e+01 3.9064620e-01 - 4.0000000e+00 3.6000000e+01 4.0386341e-01 - 1.7000000e+01 1.9000000e+01 4.1203984e-01 - 1.0000000e+01 3.7000000e+01 4.6647208e-01 - 3.1000000e+01 4.3000000e+01 4.7930518e-01 - 1.0000000e+00 5.0000000e+00 4.8198330e-01 - 4.0000000e+01 4.5000000e+01 5.2671920e-01 - 3.5000000e+01 4.1000000e+01 5.9378517e-01 - 2.5000000e+01 2.8000000e+01 6.2656347e-01 - 4.2000000e+01 4.4000000e+01 6.2815954e-01 - 8.0000000e+00 4.6000000e+01 7.1857916e-01 - 5.0000000e+01 5.1000000e+01 7.0424537e-01 - 1.2000000e+01 4.7000000e+01 7.2968219e-01 - 1.6000000e+01 5.3000000e+01 7.1788349e-01 - 3.0000000e+01 4.9000000e+01 7.5478395e-01 - 4.8000000e+01 5.5000000e+01 7.0355234e-01 - 2.1000000e+01 5.6000000e+01 7.3561818e-01 - 5.2000000e+01 5.4000000e+01 1.9510405e+00 - 5.7000000e+01 5.8000000e+01 3.2347576e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-complete.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-Q-complete.txt deleted file mode 100644 index 7f2624b628..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-complete.txt +++ /dev/null @@ -1,29 +0,0 @@ - 1.1000000e+01 1.8000000e+01 6.3937355e-02 - 1.4000000e+01 2.0000000e+01 1.7716924e-01 - 2.6000000e+01 2.7000000e+01 1.9481726e-01 - 2.0000000e+00 3.0000000e+00 2.4887981e-01 - 2.2000000e+01 2.3000000e+01 2.7739218e-01 - 7.0000000e+00 9.0000000e+00 3.0440560e-01 - 1.3000000e+01 3.2000000e+01 3.8163338e-01 - 2.9000000e+01 3.3000000e+01 3.9446675e-01 - 1.5000000e+01 3.1000000e+01 3.9629062e-01 - 6.0000000e+00 3.4000000e+01 4.1110592e-01 - 1.7000000e+01 1.9000000e+01 4.1203984e-01 - 2.4000000e+01 2.8000000e+01 4.5328393e-01 - 4.0000000e+00 3.6000000e+01 4.7908167e-01 - 1.0000000e+00 5.0000000e+00 4.8198330e-01 - 1.0000000e+01 4.0000000e+01 5.7813912e-01 - 3.9000000e+01 4.1000000e+01 6.4162409e-01 - 3.0000000e+01 4.2000000e+01 6.6157735e-01 - 1.2000000e+01 3.7000000e+01 7.0851770e-01 - 2.1000000e+01 3.5000000e+01 7.8597665e-01 - 1.6000000e+01 4.6000000e+01 8.3623329e-01 - 8.0000000e+00 4.5000000e+01 8.8244371e-01 - 3.8000000e+01 4.7000000e+01 9.2493420e-01 - 2.5000000e+01 4.9000000e+01 9.2757043e-01 - 4.3000000e+01 5.1000000e+01 1.0046401e+00 - 4.8000000e+01 5.0000000e+01 1.1468365e+00 - 4.4000000e+01 5.4000000e+01 1.2396527e+00 - 5.2000000e+01 5.3000000e+01 1.2958546e+00 - 5.5000000e+01 5.7000000e+01 3.0467645e+00 - 5.6000000e+01 5.8000000e+01 5.1343343e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-median.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-Q-median.txt deleted file mode 100644 index 5ee806d635..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-median.txt +++ /dev/null @@ -1,29 +0,0 @@ - 1.1000000e+01 1.8000000e+01 6.3937355e-02 - 1.4000000e+01 2.0000000e+01 1.7716924e-01 - 2.6000000e+01 2.7000000e+01 1.9481726e-01 - 2.0000000e+00 3.0000000e+00 2.4887981e-01 - 2.2000000e+01 2.3000000e+01 2.7739218e-01 - 7.0000000e+00 9.0000000e+00 3.0440560e-01 - 6.0000000e+00 3.4000000e+01 3.3746118e-01 - 2.9000000e+01 3.3000000e+01 3.4067653e-01 - 1.3000000e+01 3.2000000e+01 3.5113828e-01 - 1.5000000e+01 3.9000000e+01 3.4054836e-01 - 4.0000000e+00 3.6000000e+01 4.0386341e-01 - 2.4000000e+01 3.8000000e+01 4.1015077e-01 - 1.7000000e+01 1.9000000e+01 4.1203984e-01 - 1.0000000e+01 3.7000000e+01 4.6883527e-01 - 3.1000000e+01 4.3000000e+01 4.7930518e-01 - 4.0000000e+01 4.5000000e+01 4.7776681e-01 - 1.0000000e+00 5.0000000e+00 4.8198330e-01 - 3.5000000e+01 4.2000000e+01 5.6937028e-01 - 2.5000000e+01 2.8000000e+01 6.2656347e-01 - 8.0000000e+00 4.7000000e+01 7.1857916e-01 - 4.8000000e+01 4.9000000e+01 7.1925426e-01 - 3.0000000e+01 5.1000000e+01 6.7611684e-01 - 2.1000000e+01 5.2000000e+01 6.6632819e-01 - 4.4000000e+01 5.0000000e+01 7.2115997e-01 - 4.1000000e+01 5.4000000e+01 6.5190047e-01 - 1.6000000e+01 4.6000000e+01 7.3662916e-01 - 1.2000000e+01 5.6000000e+01 7.0941723e-01 - 5.5000000e+01 5.7000000e+01 2.1188553e+00 - 5.3000000e+01 5.8000000e+01 3.2138035e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-single.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-Q-single.txt deleted file mode 100644 index 75ec1ceff5..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-single.txt +++ /dev/null @@ -1,29 +0,0 @@ - 1.1000000e+01 1.8000000e+01 6.3937355e-02 - 1.4000000e+01 2.0000000e+01 1.7716924e-01 - 2.6000000e+01 2.7000000e+01 1.9481726e-01 - 2.0000000e+00 3.0000000e+00 2.4887981e-01 - 2.2000000e+01 2.3000000e+01 2.7739218e-01 - 2.4000000e+01 3.3000000e+01 2.9703742e-01 - 6.0000000e+00 3.4000000e+01 2.9953732e-01 - 7.0000000e+00 9.0000000e+00 3.0440560e-01 - 1.5000000e+01 3.2000000e+01 3.0777762e-01 - 2.9000000e+01 3.6000000e+01 3.0902082e-01 - 1.9000000e+01 3.9000000e+01 3.3102505e-01 - 1.3000000e+01 4.1000000e+01 3.4153568e-01 - 3.1000000e+01 4.2000000e+01 3.5802170e-01 - 3.7000000e+01 3.8000000e+01 3.6459874e-01 - 4.0000000e+00 4.4000000e+01 3.7818440e-01 - 1.0000000e+01 4.5000000e+01 4.0129405e-01 - 1.7000000e+01 4.3000000e+01 4.1203984e-01 - 1.2000000e+01 4.7000000e+01 4.4459698e-01 - 2.8000000e+01 4.0000000e+01 4.5328393e-01 - 1.0000000e+00 5.0000000e+00 4.8198330e-01 - 4.6000000e+01 5.0000000e+01 5.0546088e-01 - 3.5000000e+01 4.9000000e+01 5.0591731e-01 - 3.0000000e+01 5.2000000e+01 5.9356257e-01 - 8.0000000e+00 5.1000000e+01 6.0048760e-01 - 2.5000000e+01 5.3000000e+01 6.2656347e-01 - 1.6000000e+01 4.8000000e+01 6.5449319e-01 - 2.1000000e+01 5.5000000e+01 7.0629051e-01 - 5.4000000e+01 5.6000000e+01 1.0267612e+00 - 5.7000000e+01 5.8000000e+01 1.2085488e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-ward.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-Q-ward.txt deleted file mode 100644 index e8bdf1aad7..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-ward.txt +++ /dev/null @@ -1,29 +0,0 @@ - 1.1000000e+01 1.8000000e+01 6.3937355e-02 - 1.4000000e+01 2.0000000e+01 1.7716924e-01 - 2.6000000e+01 2.7000000e+01 1.9481726e-01 - 2.0000000e+00 3.0000000e+00 2.4887981e-01 - 2.2000000e+01 2.3000000e+01 2.7739218e-01 - 7.0000000e+00 9.0000000e+00 3.0440560e-01 - 6.0000000e+00 3.4000000e+01 3.8966661e-01 - 2.9000000e+01 3.3000000e+01 3.9337938e-01 - 1.3000000e+01 1.5000000e+01 3.9833425e-01 - 1.7000000e+01 1.9000000e+01 4.1203984e-01 - 3.2000000e+01 3.9000000e+01 4.2295183e-01 - 2.4000000e+01 2.8000000e+01 4.5328393e-01 - 4.0000000e+00 3.6000000e+01 4.6634129e-01 - 1.0000000e+00 5.0000000e+00 4.8198330e-01 - 1.0000000e+01 3.7000000e+01 5.7130929e-01 - 3.0000000e+01 4.2000000e+01 6.7688894e-01 - 3.1000000e+01 4.0000000e+01 6.7783989e-01 - 1.2000000e+01 4.1000000e+01 7.1501676e-01 - 8.0000000e+00 4.4000000e+01 8.2974374e-01 - 2.1000000e+01 2.5000000e+01 8.3155740e-01 - 1.6000000e+01 4.7000000e+01 8.6628075e-01 - 3.5000000e+01 5.0000000e+01 9.1696168e-01 - 3.8000000e+01 4.6000000e+01 1.0741259e+00 - 4.3000000e+01 4.5000000e+01 1.1631255e+00 - 4.8000000e+01 5.1000000e+01 1.3123400e+00 - 5.2000000e+01 5.3000000e+01 1.3876562e+00 - 4.9000000e+01 5.4000000e+01 1.4432735e+00 - 5.5000000e+01 5.7000000e+01 6.1697318e+00 - 5.6000000e+01 5.8000000e+01 1.1811665e+01 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-weighted.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-Q-weighted.txt deleted file mode 100644 index 6121d442c6..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-Q-weighted.txt +++ /dev/null @@ -1,29 +0,0 @@ - 1.1000000e+01 1.8000000e+01 6.3937355e-02 - 1.4000000e+01 2.0000000e+01 1.7716924e-01 - 2.6000000e+01 2.7000000e+01 1.9481726e-01 - 2.0000000e+00 3.0000000e+00 2.4887981e-01 - 2.2000000e+01 2.3000000e+01 2.7739218e-01 - 7.0000000e+00 9.0000000e+00 3.0440560e-01 - 2.9000000e+01 3.3000000e+01 3.5174379e-01 - 6.0000000e+00 3.4000000e+01 3.5532162e-01 - 1.3000000e+01 3.2000000e+01 3.6158453e-01 - 1.5000000e+01 3.1000000e+01 3.7715616e-01 - 1.7000000e+01 1.9000000e+01 4.1203984e-01 - 4.0000000e+00 3.6000000e+01 4.2863303e-01 - 2.4000000e+01 3.7000000e+01 4.4128967e-01 - 1.0000000e+00 5.0000000e+00 4.8198330e-01 - 1.0000000e+01 3.8000000e+01 5.0195626e-01 - 4.0000000e+01 4.1000000e+01 5.3409020e-01 - 3.9000000e+01 4.6000000e+01 6.0088461e-01 - 2.5000000e+01 2.8000000e+01 6.2656347e-01 - 3.5000000e+01 4.3000000e+01 6.2840379e-01 - 8.0000000e+00 4.4000000e+01 7.4416964e-01 - 1.2000000e+01 4.7000000e+01 7.4874549e-01 - 2.1000000e+01 3.0000000e+01 7.5491453e-01 - 4.2000000e+01 4.5000000e+01 7.8567175e-01 - 4.8000000e+01 4.9000000e+01 8.3312410e-01 - 5.2000000e+01 5.4000000e+01 8.4939549e-01 - 1.6000000e+01 5.1000000e+01 8.5308187e-01 - 5.0000000e+01 5.3000000e+01 8.6159523e-01 - 5.5000000e+01 5.6000000e+01 1.9999532e+00 - 5.7000000e+01 5.8000000e+01 3.2929087e+00 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-X.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-X.txt deleted file mode 100644 index 0ee944fcb4..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-X.txt +++ /dev/null @@ -1,117 +0,0 @@ - 6.6200000e+02 8.7700000e+02 2.5500000e+02 4.1200000e+02 9.9600000e+02 2.9500000e+02 4.6800000e+02 2.6800000e+02 4.0000000e+02 7.5400000e+02 5.6400000e+02 1.3800000e+02 2.1900000e+02 8.6900000e+02 6.6900000e+02 - 1.3800000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.1900000e+02 0.0000000e+00 1.0000000e+00 0.0000000e+00 - 2.7625000e+02 8.0963726e+01 2.0000000e+00 7.0710678e-01 - 2.4275000e+02 1.4813887e+02 2.0000000e+00 7.0710678e-01 - 3.4162500e+02 2.0280090e+02 5.0000000e+00 1.6198153e+00 - 9.5012929e-01 5.8279168e-01 4.3979086e-01 3.6031117e-01 - 2.3113851e-01 4.2349626e-01 3.4004795e-01 5.4851281e-01 - 6.0684258e-01 5.1551175e-01 3.1421731e-01 2.6176957e-01 - 4.8598247e-01 3.3395148e-01 3.6507839e-01 5.9734485e-01 - 8.9129897e-01 4.3290660e-01 3.9323955e-01 4.9277997e-02 - 7.6209683e-01 2.2594987e-01 5.9152520e-01 5.7105749e-01 - 4.5646767e-01 5.7980687e-01 1.1974662e-01 7.0085723e-01 - 1.8503643e-02 7.6036501e-01 3.8128797e-02 9.6228826e-01 - 8.2140716e-01 5.2982312e-01 4.5859795e-01 7.5051823e-01 - 4.4470336e-01 6.4052650e-01 8.6986735e-01 7.3999305e-01 - 6.1543235e-01 2.0906940e-01 9.3423652e-01 4.3187339e-01 - 7.9193704e-01 3.7981837e-01 2.6444917e-01 6.3426596e-01 - 9.2181297e-01 7.8332865e-01 1.6030034e-01 8.0302634e-01 - 7.3820725e-01 6.8084575e-01 8.7285526e-01 8.3881007e-02 - 1.7626614e-01 4.6109513e-01 2.3788031e-01 9.4546279e-01 - 4.0570621e-01 5.6782871e-01 6.4583125e-01 9.1594246e-01 - 9.3546970e-01 7.9421065e-01 9.6688742e-01 6.0198742e-01 - 9.1690444e-01 5.9182593e-02 6.6493121e-01 2.5356058e-01 - 4.1027021e-01 6.0286909e-01 8.7038103e-01 8.7345081e-01 - 8.9364953e-01 5.0268804e-02 9.9273048e-03 5.1340071e-01 - 5.7891305e-02 4.1537486e-01 1.3700989e-01 7.3265065e-01 - 3.5286813e-01 3.0499868e-01 8.1875583e-01 4.2222659e-01 - 8.1316650e-01 8.7436717e-01 4.3016605e-01 9.6137000e-01 - 9.8613007e-03 1.5009499e-02 8.9032172e-01 7.2059239e-02 - 1.3889088e-01 7.6795039e-01 7.3490821e-01 5.5340797e-01 - 2.0276522e-01 9.7084494e-01 6.8732359e-01 2.9198392e-01 - 1.9872174e-01 9.9008259e-01 3.4611197e-01 8.5796351e-01 - 6.0379248e-01 7.8886169e-01 1.6603474e-01 3.3575514e-01 - 2.7218792e-01 4.3865853e-01 1.5561258e-01 6.8020385e-01 - 1.9881427e-01 4.9831130e-01 1.9111631e-01 5.3444421e-02 - 1.5273927e-02 2.1396333e-01 4.2245153e-01 3.5665554e-01 - 7.4678568e-01 6.4349229e-01 8.5597571e-01 4.9830460e-01 - 4.4509643e-01 3.2003558e-01 4.9024999e-01 4.3444054e-01 - 9.3181458e-01 9.6009860e-01 8.1593477e-01 5.6245842e-01 - 4.6599434e-01 7.2663177e-01 4.6076983e-01 6.1662113e-01 - 4.1864947e-01 4.1195321e-01 4.5735438e-01 1.1333998e-01 - 8.4622142e-01 7.4456578e-01 4.5068888e-01 8.9825174e-01 - 5.2515250e-01 2.6794725e-01 4.1221906e-01 7.5455138e-01 - 2.0264736e-01 4.3992431e-01 9.0160982e-01 7.9112320e-01 - 6.7213747e-01 9.3338011e-01 5.5839392e-03 8.1495207e-01 - 8.3811845e-01 6.8333232e-01 2.9740568e-01 6.7000386e-01 - 1.9639514e-02 2.1255986e-01 4.9162489e-02 2.0087641e-01 - 6.8127716e-01 8.3923824e-01 6.9318045e-01 2.7308816e-01 - 3.7948102e-01 6.2878460e-01 6.5010641e-01 6.2623464e-01 - 8.3179602e-01 1.3377275e-01 9.8298778e-01 5.3685169e-01 - 5.0281288e-01 2.0713273e-01 5.5267324e-01 5.9504051e-02 - 7.0947139e-01 6.0719894e-01 4.0007352e-01 8.8961759e-02 - 4.2889237e-01 6.2988785e-01 1.9878852e-01 2.7130817e-01 - 3.0461737e-01 3.7047683e-01 6.2520102e-01 4.0907232e-01 - 1.8965375e-01 5.7514778e-01 7.3336280e-01 4.7404145e-01 - 1.9343116e-01 4.5142483e-01 3.7588548e-01 9.0898935e-01 - 6.8222322e-01 4.3895325e-02 9.8764629e-03 5.9624714e-01 - 3.0276440e-01 2.7185123e-02 4.1985781e-01 3.2895530e-01 - 5.4167385e-01 3.1268505e-01 7.5366963e-01 4.7819443e-01 - 1.5087298e-01 1.2862575e-02 7.9387177e-01 5.9717078e-01 - 6.9789848e-01 3.8396729e-01 9.1995721e-01 1.6144875e-01 - 3.7837300e-01 6.8311597e-01 8.4472150e-01 8.2947425e-01 - 8.6001160e-01 9.2842462e-02 3.6775288e-01 9.5612241e-01 - 8.5365513e-01 3.5338324e-02 6.2080133e-01 5.9554800e-01 - 5.9356291e-01 6.1239548e-01 7.3127726e-01 2.8748213e-02 - 4.9655245e-01 6.0854036e-01 1.9389318e-01 8.1211782e-01 - 8.9976918e-01 1.5759818e-02 9.0481233e-01 6.1011358e-01 - 8.2162916e-01 1.6354934e-02 5.6920575e-01 7.0149260e-01 - 6.4491038e-01 1.9007459e-01 6.3178993e-01 9.2196203e-02 - 8.1797434e-01 5.8691847e-01 2.3441296e-01 4.2488914e-01 - 6.6022756e-01 5.7581090e-02 5.4878213e-01 3.7557666e-01 - 3.4197062e-01 3.6756804e-01 9.3158335e-01 1.6615408e-01 - 2.8972590e-01 6.3145116e-01 3.3519743e-01 8.3315146e-01 - 3.4119357e-01 7.1763442e-01 6.5553106e-01 8.3863970e-01 - 5.3407902e-01 6.9266939e-01 3.9190421e-01 4.5161403e-01 - 7.2711322e-01 8.4079061e-02 6.2731479e-01 9.5660138e-01 - 3.0929016e-01 4.5435515e-01 6.9908014e-01 1.4715324e-01 - 8.3849604e-01 4.4182830e-01 3.9718395e-01 8.6993293e-01 - 5.6807246e-01 3.5325046e-01 4.1362890e-01 7.6943640e-01 - 3.7041356e-01 1.5360636e-01 6.5521295e-01 4.4416162e-01 - 7.0273991e-01 6.7564465e-01 8.3758510e-01 6.2062012e-01 - 5.4657115e-01 6.9921333e-01 3.7160803e-01 9.5168928e-01 - 4.4488020e-01 7.2750913e-01 4.2525316e-01 6.4000966e-01 - 6.9456724e-01 4.7838438e-01 5.9466337e-01 2.4732763e-01 - 6.2131013e-01 5.5484199e-01 5.6573857e-01 3.5270199e-01 - 7.9482108e-01 1.2104711e-01 7.1654240e-01 1.8786048e-01 - 9.5684345e-01 4.5075394e-01 5.1131145e-01 4.9064436e-01 - 5.2259035e-01 7.1588295e-01 7.7640121e-01 4.0927433e-01 - 8.8014221e-01 8.9284161e-01 4.8934548e-01 4.6352558e-01 - 1.7295614e-01 2.7310247e-01 1.8590445e-01 6.1094355e-01 - 9.7974690e-01 2.5476930e-01 7.0063541e-01 7.1168466e-02 - 2.7144726e-01 8.6560348e-01 9.8270880e-01 3.1428029e-01 - 2.5232935e-01 2.3235037e-01 8.0663775e-01 6.0838366e-01 - 8.7574190e-01 8.0487174e-01 7.0356766e-01 1.7502018e-01 - 7.3730599e-01 9.0839754e-01 4.8496372e-01 6.2102743e-01 - 1.3651874e-01 2.3189432e-01 1.1461282e-01 2.4595993e-01 - 1.1756687e-02 2.3931256e-01 6.6485557e-01 5.8735822e-01 - 8.9389797e-01 4.9754484e-02 3.6537389e-01 5.0605345e-01 - 1.9913807e-01 7.8384075e-02 1.4004446e-01 4.6477892e-01 - 2.9872301e-01 6.4081541e-01 5.6677280e-01 5.4141893e-01 - 6.6144258e-01 1.9088657e-01 8.2300831e-01 9.4232657e-01 - 2.8440859e-01 8.4386950e-01 6.7394863e-01 3.4175909e-01 - 4.6922429e-01 1.7390025e-01 9.9944730e-01 4.0180434e-01 - 6.4781123e-02 1.7079281e-01 9.6163641e-01 3.0768794e-01 - 9.8833494e-01 9.9429549e-01 5.8862165e-02 4.1156796e-01 - 6.6200000e+02 8.7700000e+02 2.5500000e+02 4.1200000e+02 9.9600000e+02 2.9500000e+02 4.6800000e+02 2.6800000e+02 4.0000000e+02 7.5400000e+02 5.6400000e+02 1.3800000e+02 2.1900000e+02 8.6900000e+02 6.6900000e+02 - 3.0000000e+00 6.0000000e+00 1.3800000e+02 - 4.0000000e+00 5.0000000e+00 2.1900000e+02 - 1.0000000e+00 8.0000000e+00 3.3350000e+02 - 2.0000000e+00 7.0000000e+00 3.4750000e+02 - 9.0000000e+00 1.0000000e+01 6.7012500e+02 - 3.0000000e+00 6.0000000e+00 1.3800000e+02 - 4.0000000e+00 5.0000000e+00 2.1900000e+02 - 1.0000000e+00 8.0000000e+00 2.5500000e+02 - 2.0000000e+00 9.0000000e+00 2.6800000e+02 - 7.0000000e+00 1.0000000e+01 2.9500000e+02 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-average-tdist.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-average-tdist.txt deleted file mode 100644 index 0173ad2b43..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-average-tdist.txt +++ /dev/null @@ -1,5 +0,0 @@ - 3.0000000e+00 6.0000000e+00 1.3800000e+02 - 4.0000000e+00 5.0000000e+00 2.1900000e+02 - 1.0000000e+00 8.0000000e+00 3.3350000e+02 - 2.0000000e+00 7.0000000e+00 3.4750000e+02 - 9.0000000e+00 1.0000000e+01 6.8077778e+02 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-complete-tdist.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-complete-tdist.txt deleted file mode 100644 index ab2ecfdaf9..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-complete-tdist.txt +++ /dev/null @@ -1,5 +0,0 @@ - 3.0000000e+00 6.0000000e+00 1.3800000e+02 - 4.0000000e+00 5.0000000e+00 2.1900000e+02 - 2.0000000e+00 7.0000000e+00 4.0000000e+02 - 1.0000000e+00 8.0000000e+00 4.1200000e+02 - 9.0000000e+00 1.0000000e+01 9.9600000e+02 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-single-tdist.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-single-tdist.txt deleted file mode 100644 index ed2bb2774d..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-single-tdist.txt +++ /dev/null @@ -1,5 +0,0 @@ - 3.0000000e+00 6.0000000e+00 1.3800000e+02 - 4.0000000e+00 5.0000000e+00 2.1900000e+02 - 1.0000000e+00 8.0000000e+00 2.5500000e+02 - 2.0000000e+00 9.0000000e+00 2.6800000e+02 - 7.0000000e+00 1.0000000e+01 2.9500000e+02 diff --git a/scipy-0.10.1/scipy/cluster/tests/linkage-weighted-tdist.txt b/scipy-0.10.1/scipy/cluster/tests/linkage-weighted-tdist.txt deleted file mode 100644 index 28ecdaaef7..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/linkage-weighted-tdist.txt +++ /dev/null @@ -1,5 +0,0 @@ - 3.0000000e+00 6.0000000e+00 1.3800000e+02 - 4.0000000e+00 5.0000000e+00 2.1900000e+02 - 1.0000000e+00 8.0000000e+00 3.3350000e+02 - 2.0000000e+00 7.0000000e+00 3.4750000e+02 - 9.0000000e+00 1.0000000e+01 6.7012500e+02 diff --git a/scipy-0.10.1/scipy/cluster/tests/test_hierarchy.py b/scipy-0.10.1/scipy/cluster/tests/test_hierarchy.py deleted file mode 100644 index 4f2e5aa20c..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/test_hierarchy.py +++ /dev/null @@ -1,1421 +0,0 @@ -#! /usr/bin/env python -# -# Author: Damian Eads -# Date: April 17, 2008 -# -# Copyright (C) 2008 Damian Eads -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os.path - -import numpy as np -from numpy.testing import TestCase, run_module_suite - -from scipy.cluster.hierarchy import linkage, from_mlab_linkage, to_mlab_linkage,\ - num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, \ - is_isomorphic, single, complete, weighted, centroid, leaders, \ - correspond, is_monotonic, maxdists, maxinconsts, maxRstat, \ - is_valid_linkage, is_valid_im, to_tree, leaves_list -from scipy.spatial.distance import squareform, pdist - -_tdist = np.array([[0, 662, 877, 255, 412, 996], - [662, 0, 295, 468, 268, 400], - [877, 295, 0, 754, 564, 138], - [255, 468, 754, 0, 219, 869], - [412, 268, 564, 219, 0, 669], - [996, 400, 138, 869, 669, 0 ]], dtype='double') - -_ytdist = squareform(_tdist) - - -eo = {} - -_filenames = ["iris.txt", - "Q-X.txt", - "fclusterdata-maxclusts-2.txt", - "fclusterdata-maxclusts-3.txt", - "fclusterdata-maxclusts-4.txt", - "linkage-single-tdist.txt", - "linkage-complete-tdist.txt", - "linkage-average-tdist.txt", - "linkage-weighted-tdist.txt", - "inconsistent-Q-single-1.txt", - "inconsistent-Q-single-2.txt", - "inconsistent-Q-single-3.txt", - "inconsistent-Q-single-4.txt", - "inconsistent-Q-single-5.txt", - "inconsistent-Q-single-6.txt", - "inconsistent-complete-tdist-depth-1.txt", - "inconsistent-complete-tdist-depth-2.txt", - "inconsistent-complete-tdist-depth-3.txt", - "inconsistent-complete-tdist-depth-4.txt", - "inconsistent-single-tdist-depth-0.txt", - "inconsistent-single-tdist-depth-1.txt", - "inconsistent-single-tdist-depth-2.txt", - "inconsistent-single-tdist-depth-3.txt", - "inconsistent-single-tdist-depth-4.txt", - "inconsistent-single-tdist-depth-5.txt", - "inconsistent-single-tdist.txt", - "inconsistent-weighted-tdist-depth-1.txt", - "inconsistent-weighted-tdist-depth-2.txt", - "inconsistent-weighted-tdist-depth-3.txt", - "inconsistent-weighted-tdist-depth-4.txt", - "linkage-Q-average.txt", - "linkage-Q-complete.txt", - "linkage-Q-single.txt", - "linkage-Q-weighted.txt", - "linkage-Q-centroid.txt", - "linkage-Q-median.txt", - "linkage-Q-ward.txt" - ] - -def load_testing_files(): - for fn in _filenames: - name = fn.replace(".txt", "").replace("-ml", "") - fqfn = os.path.join(os.path.dirname(__file__), fn) - eo[name] = np.loadtxt(open(fqfn)) - #print "%s: %s %s" % (name, str(eo[name].shape), str(eo[name].dtype)) - #eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp']) - -load_testing_files() - -class TestLinkage(TestCase): - - def test_linkage_empty_distance_matrix(self): - "Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected." - y = np.zeros((0,)) - self.assertRaises(ValueError, linkage, y) - - ################### linkage - def test_linkage_single_tdist(self): - "Tests linkage(Y, 'single') on the tdist data set." - Z = linkage(_ytdist, 'single') - Zmlab = eo['linkage-single-tdist'] - eps = 1e-10 - expectedZ = from_mlab_linkage(Zmlab) - self.assertTrue(within_tol(Z, expectedZ, eps)) - - def test_linkage_complete_tdist(self): - "Tests linkage(Y, 'complete') on the tdist data set." - Z = linkage(_ytdist, 'complete') - Zmlab = eo['linkage-complete-tdist'] - eps = 1e-10 - expectedZ = from_mlab_linkage(Zmlab) - self.assertTrue(within_tol(Z, expectedZ, eps)) - - def test_linkage_average_tdist(self): - "Tests linkage(Y, 'average') on the tdist data set." - Z = linkage(_ytdist, 'average') - Zmlab = eo['linkage-average-tdist'] - eps = 1e-05 - expectedZ = from_mlab_linkage(Zmlab) - #print Z, expectedZ, np.abs(Z - expectedZ).max() - self.assertTrue(within_tol(Z, expectedZ, eps)) - - def test_linkage_weighted_tdist(self): - "Tests linkage(Y, 'weighted') on the tdist data set." - Z = linkage(_ytdist, 'weighted') - Zmlab = eo['linkage-weighted-tdist'] - eps = 1e-10 - expectedZ = from_mlab_linkage(Zmlab) - #print Z, expectedZ, np.abs(Z - expectedZ).max() - self.assertTrue(within_tol(Z, expectedZ, eps)) - - ################### linkage on Q - def test_linkage_single_q(self): - "Tests linkage(Y, 'single') on the Q data set." - X = eo['Q-X'] - Z = single(X) - Zmlab = eo['linkage-Q-single'] - eps = 1e-06 - expectedZ = from_mlab_linkage(Zmlab) - #print abs(Z-expectedZ).max() - self.assertTrue(within_tol(Z, expectedZ, eps)) - - def test_linkage_complete_q(self): - "Tests linkage(Y, 'complete') on the Q data set." - X = eo['Q-X'] - Z = complete(X) - Zmlab = eo['linkage-Q-complete'] - eps = 1e-07 - expectedZ = from_mlab_linkage(Zmlab) - #print abs(Z-expectedZ).max() - self.assertTrue(within_tol(Z, expectedZ, eps)) - - def test_linkage_centroid_q(self): - "Tests linkage(Y, 'centroid') on the Q data set." - X = eo['Q-X'] - Z = centroid(X) - Zmlab = eo['linkage-Q-centroid'] - eps = 1e-07 - expectedZ = from_mlab_linkage(Zmlab) - #print abs(Z-expectedZ).max() - self.assertTrue(within_tol(Z, expectedZ, eps)) - - def test_linkage_weighted_q(self): - "Tests linkage(Y, 'weighted') on the Q data set." - X = eo['Q-X'] - Z = weighted(X) - Zmlab = eo['linkage-Q-weighted'] - eps = 1e-07 - expectedZ = from_mlab_linkage(Zmlab) - #print abs(Z-expectedZ).max() - self.assertTrue(within_tol(Z, expectedZ, eps)) - -class TestInconsistent(TestCase): - - def test_single_inconsistent_tdist_1(self): - "Tests inconsistency matrix calculation (depth=1) on a single linkage." - Y = squareform(_tdist) - Z = linkage(Y, 'single') - R = inconsistent(Z, 1) - Rright = eo['inconsistent-single-tdist-depth-1'] - eps = 1e-15 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_single_inconsistent_tdist_2(self): - "Tests inconsistency matrix calculation (depth=2) on a single linkage." - Y = squareform(_tdist) - Z = linkage(Y, 'single') - R = inconsistent(Z, 2) - Rright = eo['inconsistent-single-tdist-depth-2'] - eps = 1e-05 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_single_inconsistent_tdist_3(self): - "Tests inconsistency matrix calculation (depth=3) on a single linkage." - Y = squareform(_tdist) - Z = linkage(Y, 'single') - R = inconsistent(Z, 3) - Rright = eo['inconsistent-single-tdist-depth-3'] - eps = 1e-05 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_single_inconsistent_tdist_4(self): - "Tests inconsistency matrix calculation (depth=4) on a single linkage." - Y = squareform(_tdist) - Z = linkage(Y, 'single') - R = inconsistent(Z, 4) - Rright = eo['inconsistent-single-tdist-depth-4'] - eps = 1e-05 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - # with complete linkage... - - def test_complete_inconsistent_tdist_1(self): - "Tests inconsistency matrix calculation (depth=1) on a complete linkage." - Y = squareform(_tdist) - Z = linkage(Y, 'complete') - R = inconsistent(Z, 1) - Rright = eo['inconsistent-complete-tdist-depth-1'] - eps = 1e-15 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_complete_inconsistent_tdist_2(self): - "Tests inconsistency matrix calculation (depth=2) on a complete linkage." - Y = squareform(_tdist) - Z = linkage(Y, 'complete') - R = inconsistent(Z, 2) - Rright = eo['inconsistent-complete-tdist-depth-2'] - eps = 1e-05 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_complete_inconsistent_tdist_3(self): - "Tests inconsistency matrix calculation (depth=3) on a complete linkage." - Y = squareform(_tdist) - Z = linkage(Y, 'complete') - R = inconsistent(Z, 3) - Rright = eo['inconsistent-complete-tdist-depth-3'] - eps = 1e-05 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_complete_inconsistent_tdist_4(self): - "Tests inconsistency matrix calculation (depth=4) on a complete linkage." - Y = squareform(_tdist) - Z = linkage(Y, 'complete') - R = inconsistent(Z, 4) - Rright = eo['inconsistent-complete-tdist-depth-4'] - eps = 1e-05 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - # with single linkage and Q data set - - def test_single_inconsistent_Q_1(self): - "Tests inconsistency matrix calculation (depth=1, dataset=Q) with single linkage." - X = eo['Q-X'] - Z = linkage(X, 'single', 'euclidean') - R = inconsistent(Z, 1) - Rright = eo['inconsistent-Q-single-1'] - eps = 1e-06 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_single_inconsistent_Q_2(self): - "Tests inconsistency matrix calculation (depth=2, dataset=Q) with single linkage." - X = eo['Q-X'] - Z = linkage(X, 'single', 'euclidean') - R = inconsistent(Z, 2) - Rright = eo['inconsistent-Q-single-2'] - eps = 1e-06 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_single_inconsistent_Q_3(self): - "Tests inconsistency matrix calculation (depth=3, dataset=Q) with single linkage." - X = eo['Q-X'] - Z = linkage(X, 'single', 'euclidean') - R = inconsistent(Z, 3) - Rright = eo['inconsistent-Q-single-3'] - eps = 1e-05 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - - def test_single_inconsistent_Q_4(self): - "Tests inconsistency matrix calculation (depth=4, dataset=Q) with single linkage." - X = eo['Q-X'] - Z = linkage(X, 'single', 'euclidean') - R = inconsistent(Z, 4) - Rright = eo['inconsistent-Q-single-4'] - eps = 1e-05 - #print np.abs(R - Rright).max() - self.assertTrue(within_tol(R, Rright, eps)) - -class TestCopheneticDistance(TestCase): - - def test_linkage_cophenet_tdist_Z(self): - "Tests cophenet(Z) on tdist data set." - expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, 295, 138, 219, 295, 295]); - Z = linkage(_ytdist, 'single') - M = cophenet(Z) - eps = 1e-10 - self.assertTrue(within_tol(M, expectedM, eps)) - - def test_linkage_cophenet_tdist_Z_Y(self): - "Tests cophenet(Z, Y) on tdist data set." - Z = linkage(_ytdist, 'single') - (c, M) = cophenet(Z, _ytdist) - expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, 295, 138, 219, 295, 295]); - expectedc = 0.639931296433393415057366837573 - eps = 1e-10 - self.assertTrue(np.abs(c - expectedc) <= eps) - self.assertTrue(within_tol(M, expectedM, eps)) - -class TestFromMLabLinkage(TestCase): - - def test_from_mlab_linkage_empty(self): - "Tests from_mlab_linkage on empty linkage array." - X = np.asarray([]) - R = from_mlab_linkage([]) - self.assertTrue((R == X).all()) - - def test_from_mlab_linkage_single_row(self): - "Tests from_mlab_linkage on linkage array with single row." - expectedZP = np.asarray([[ 0., 1., 3., 2.]]) - Z = [[1,2,3]] - ZP = from_mlab_linkage(Z) - return self.assertTrue((ZP == expectedZP).all()) - - def test_from_mlab_linkage_multiple_rows(self): - "Tests from_mlab_linkage on linkage array with multiple rows." - Z = np.asarray([[3, 6, 138], [4, 5, 219], - [1, 8, 255], [2, 9, 268], [7, 10, 295]]) - expectedZS = np.array([[ 2., 5., 138., 2.], - [ 3., 4., 219., 2.], - [ 0., 7., 255., 3.], - [ 1., 8., 268., 4.], - [ 6., 9., 295., 6.]], - dtype=np.double) - ZS = from_mlab_linkage(Z) - #print expectedZS, ZS - self.assertTrue((expectedZS == ZS).all()) - - -class TestToMLabLinkage(TestCase): - - def test_to_mlab_linkage_empty(self): - "Tests to_mlab_linkage on empty linkage array." - X = np.asarray([]) - R = to_mlab_linkage([]) - self.assertTrue((R == X).all()) - - def test_to_mlab_linkage_single_row(self): - "Tests to_mlab_linkage on linkage array with single row." - Z = np.asarray([[ 0., 1., 3., 2.]]) - expectedZP = np.asarray([[1,2,3]]) - ZP = to_mlab_linkage(Z) - return self.assertTrue((ZP == expectedZP).all()) - - def test_from_mlab_linkage_multiple_rows(self): - "Tests to_mlab_linkage on linkage array with multiple rows." - expectedZM = np.asarray([[3, 6, 138], [4, 5, 219], - [1, 8, 255], [2, 9, 268], [7, 10, 295]]) - Z = np.array([[ 2., 5., 138., 2.], - [ 3., 4., 219., 2.], - [ 0., 7., 255., 3.], - [ 1., 8., 268., 4.], - [ 6., 9., 295., 6.]], - dtype=np.double) - ZM = to_mlab_linkage(Z) - #print expectedZM, ZM - self.assertTrue((expectedZM == ZM).all()) - -class TestFcluster(TestCase): - - def test_fclusterdata_maxclusts_2(self): - "Tests fclusterdata(X, criterion='maxclust', t=2) on a random 3-cluster data set." - expectedT = np.int_(eo['fclusterdata-maxclusts-2']) - X = eo['Q-X'] - T = fclusterdata(X, criterion='maxclust', t=2) - self.assertTrue(is_isomorphic(T, expectedT)) - - def test_fclusterdata_maxclusts_3(self): - "Tests fclusterdata(X, criterion='maxclust', t=3) on a random 3-cluster data set." - expectedT = np.int_(eo['fclusterdata-maxclusts-3']) - X = eo['Q-X'] - T = fclusterdata(X, criterion='maxclust', t=3) - self.assertTrue(is_isomorphic(T, expectedT)) - - def test_fclusterdata_maxclusts_4(self): - "Tests fclusterdata(X, criterion='maxclust', t=4) on a random 3-cluster data set." - expectedT = np.int_(eo['fclusterdata-maxclusts-4']) - X = eo['Q-X'] - T = fclusterdata(X, criterion='maxclust', t=4) - self.assertTrue(is_isomorphic(T, expectedT)) - - def test_fcluster_maxclusts_2(self): - "Tests fcluster(Z, criterion='maxclust', t=2) on a random 3-cluster data set." - expectedT = np.int_(eo['fclusterdata-maxclusts-2']) - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(Y) - T = fcluster(Z, criterion='maxclust', t=2) - self.assertTrue(is_isomorphic(T, expectedT)) - - def test_fcluster_maxclusts_3(self): - "Tests fcluster(Z, criterion='maxclust', t=3) on a random 3-cluster data set." - expectedT = np.int_(eo['fclusterdata-maxclusts-3']) - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(Y) - T = fcluster(Z, criterion='maxclust', t=3) - self.assertTrue(is_isomorphic(T, expectedT)) - - def test_fcluster_maxclusts_4(self): - "Tests fcluster(Z, criterion='maxclust', t=4) on a random 3-cluster data set." - expectedT = np.int_(eo['fclusterdata-maxclusts-4']) - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(Y) - T = fcluster(Z, criterion='maxclust', t=4) - self.assertTrue(is_isomorphic(T, expectedT)) - -class TestLeaders(TestCase): - - def test_leaders_single(self): - "Tests leaders using a flat clustering generated by single linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(Y) - T = fcluster(Z, criterion='maxclust', t=3) - Lright = (np.array([53, 55, 56]), np.array([2, 3, 1])) - L = leaders(Z, T) - #print L, Lright, T - self.assertTrue((L[0] == Lright[0]).all() and (L[1] == Lright[1]).all()) - -class TestIsIsomorphic(TestCase): - - def test_is_isomorphic_1(self): - "Tests is_isomorphic on test case #1 (one flat cluster, different labellings)" - a = [1, 1, 1] - b = [2, 2, 2] - self.assertTrue(is_isomorphic(a, b) == True) - self.assertTrue(is_isomorphic(b, a) == True) - - def test_is_isomorphic_2(self): - "Tests is_isomorphic on test case #2 (two flat clusters, different labelings)" - a = [1, 7, 1] - b = [2, 3, 2] - self.assertTrue(is_isomorphic(a, b) == True) - self.assertTrue(is_isomorphic(b, a) == True) - - def test_is_isomorphic_3(self): - "Tests is_isomorphic on test case #3 (no flat clusters)" - a = [] - b = [] - self.assertTrue(is_isomorphic(a, b) == True) - - def test_is_isomorphic_4A(self): - "Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)" - a = [1, 2, 3] - b = [1, 3, 2] - self.assertTrue(is_isomorphic(a, b) == True) - self.assertTrue(is_isomorphic(b, a) == True) - - def test_is_isomorphic_4B(self): - "Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)" - a = [1, 2, 3, 3] - b = [1, 3, 2, 3] - self.assertTrue(is_isomorphic(a, b) == False) - self.assertTrue(is_isomorphic(b, a) == False) - - def test_is_isomorphic_4C(self): - "Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)" - a = [7, 2, 3] - b = [6, 3, 2] - self.assertTrue(is_isomorphic(a, b) == True) - self.assertTrue(is_isomorphic(b, a) == True) - - def test_is_isomorphic_5A(self): - "Tests is_isomorphic on test case #5A (1000 observations, 2 random clusters, random permutation of the labeling). Run 3 times." - for k in xrange(0, 3): - self.help_is_isomorphic_randperm(1000, 2) - - def test_is_isomorphic_5B(self): - "Tests is_isomorphic on test case #5B (1000 observations, 3 random clusters, random permutation of the labeling). Run 3 times." - for k in xrange(0, 3): - self.help_is_isomorphic_randperm(1000, 3) - - def test_is_isomorphic_5C(self): - "Tests is_isomorphic on test case #5C (1000 observations, 5 random clusters, random permutation of the labeling). Run 3 times." - for k in xrange(0, 3): - self.help_is_isomorphic_randperm(1000, 5) - - def test_is_isomorphic_6A(self): - "Tests is_isomorphic on test case #5A (1000 observations, 2 random clusters, random permutation of the labeling, slightly nonisomorphic.) Run 3 times." - for k in xrange(0, 3): - self.help_is_isomorphic_randperm(1000, 2, True, 5) - - def test_is_isomorphic_6B(self): - "Tests is_isomorphic on test case #5B (1000 observations, 3 random clusters, random permutation of the labeling, slightly nonisomorphic.) Run 3 times." - for k in xrange(0, 3): - self.help_is_isomorphic_randperm(1000, 3, True, 5) - - def test_is_isomorphic_6C(self): - "Tests is_isomorphic on test case #5C (1000 observations, 5 random clusters, random permutation of the labeling, slightly non-isomorphic.) Run 3 times." - for k in xrange(0, 3): - self.help_is_isomorphic_randperm(1000, 5, True, 5) - - def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0): - a = np.int_(np.random.rand(nobs) * nclusters) - b = np.zeros(a.size, dtype=np.int_) - q = {} - P = np.random.permutation(nclusters) - for i in xrange(0, a.shape[0]): - b[i] = P[a[i]] - if noniso: - Q = np.random.permutation(nobs) - b[Q[0:nerrors]] += 1 - b[Q[0:nerrors]] %= nclusters - self.assertTrue(is_isomorphic(a, b) == (not noniso)) - self.assertTrue(is_isomorphic(b, a) == (not noniso)) - -class TestIsValidLinkage(TestCase): - - def test_is_valid_linkage_int_type(self): - "Tests is_valid_linkage(Z) with integer type." - Z = np.asarray([[0, 1, 3.0, 2], - [3, 2, 4.0, 3]], dtype=np.int) - self.assertTrue(is_valid_linkage(Z) == False) - self.assertRaises(TypeError, is_valid_linkage, Z, throw=True) - - def test_is_valid_linkage_5_columns(self): - "Tests is_valid_linkage(Z) with 5 columns." - Z = np.asarray([[0, 1, 3.0, 2, 5], - [3, 2, 4.0, 3, 3]], dtype=np.double) - self.assertTrue(is_valid_linkage(Z) == False) - self.assertRaises(ValueError, is_valid_linkage, Z, throw=True) - - def test_is_valid_linkage_3_columns(self): - "Tests is_valid_linkage(Z) with 3 columns." - Z = np.asarray([[0, 1, 3.0], - [3, 2, 4.0]], dtype=np.double) - self.assertTrue(is_valid_linkage(Z) == False) - self.assertRaises(ValueError, is_valid_linkage, Z, throw=True) - - def test_is_valid_linkage_empty(self): - "Tests is_valid_linkage(Z) with empty linkage." - Z = np.zeros((0, 4), dtype=np.double) - self.assertTrue(is_valid_linkage(Z) == False) - self.assertRaises(ValueError, is_valid_linkage, Z, throw=True) - - def test_is_valid_linkage_1x4(self): - "Tests is_valid_linkage(Z) on linkage over 2 observations." - Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) - self.assertTrue(is_valid_linkage(Z) == True) - - def test_is_valid_linkage_2x4(self): - "Tests is_valid_linkage(Z) on linkage over 3 observations." - Z = np.asarray([[0, 1, 3.0, 2], - [3, 2, 4.0, 3]], dtype=np.double) - self.assertTrue(is_valid_linkage(Z) == True) - - def test_is_valid_linkage_4_and_up(self): - "Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3)." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - self.assertTrue(is_valid_linkage(Z) == True) - - def test_is_valid_linkage_4_and_up_neg_index_left(self): - "Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3) with negative indices (left)." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - Z[int(i/2),0] = -2 - self.assertTrue(is_valid_linkage(Z) == False) - self.assertRaises(ValueError, is_valid_linkage, Z, throw=True) - - def test_is_valid_linkage_4_and_up_neg_index_right(self): - "Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3) with negative indices (right)." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - Z[int(i/2),1] = -2 - self.assertTrue(is_valid_linkage(Z) == False) - self.assertRaises(ValueError, is_valid_linkage, Z, throw=True) - - def test_is_valid_linkage_4_and_up_neg_dist(self): - "Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3) with negative distances." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - Z[int(i/2),2] = -0.5 - self.assertTrue(is_valid_linkage(Z) == False) - self.assertRaises(ValueError, is_valid_linkage, Z, throw=True) - - def test_is_valid_linkage_4_and_up_neg_counts(self): - "Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3) with negative counts." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - Z[int(i/2),3] = -2 - self.assertTrue(is_valid_linkage(Z) == False) - self.assertRaises(ValueError, is_valid_linkage, Z, throw=True) - -class TestIsValidInconsistent(TestCase): - - def test_is_valid_im_int_type(self): - "Tests is_valid_im(R) with integer type." - R = np.asarray([[0, 1, 3.0, 2], - [3, 2, 4.0, 3]], dtype=np.int) - self.assertTrue(is_valid_im(R) == False) - self.assertRaises(TypeError, is_valid_im, R, throw=True) - - def test_is_valid_im_5_columns(self): - "Tests is_valid_im(R) with 5 columns." - R = np.asarray([[0, 1, 3.0, 2, 5], - [3, 2, 4.0, 3, 3]], dtype=np.double) - self.assertTrue(is_valid_im(R) == False) - self.assertRaises(ValueError, is_valid_im, R, throw=True) - - def test_is_valid_im_3_columns(self): - "Tests is_valid_im(R) with 3 columns." - R = np.asarray([[0, 1, 3.0], - [3, 2, 4.0]], dtype=np.double) - self.assertTrue(is_valid_im(R) == False) - self.assertRaises(ValueError, is_valid_im, R, throw=True) - - def test_is_valid_im_empty(self): - "Tests is_valid_im(R) with empty inconsistency matrix." - R = np.zeros((0, 4), dtype=np.double) - self.assertTrue(is_valid_im(R) == False) - self.assertRaises(ValueError, is_valid_im, R, throw=True) - - def test_is_valid_im_1x4(self): - "Tests is_valid_im(R) on im over 2 observations." - R = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) - self.assertTrue(is_valid_im(R) == True) - - def test_is_valid_im_2x4(self): - "Tests is_valid_im(R) on im over 3 observations." - R = np.asarray([[0, 1, 3.0, 2], - [3, 2, 4.0, 3]], dtype=np.double) - self.assertTrue(is_valid_im(R) == True) - - def test_is_valid_im_4_and_up(self): - "Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 (step size 3)." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - R = inconsistent(Z) - self.assertTrue(is_valid_im(R) == True) - - def test_is_valid_im_4_and_up_neg_index_left(self): - "Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 (step size 3) with negative link height means." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - R = inconsistent(Z) - R[int(i/2),0] = -2.0 - self.assertTrue(is_valid_im(R) == False) - self.assertRaises(ValueError, is_valid_im, R, throw=True) - - def test_is_valid_im_4_and_up_neg_index_right(self): - "Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 (step size 3) with negative link height standard deviations." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - R = inconsistent(Z) - R[int(i/2),1] = -2.0 - self.assertTrue(is_valid_im(R) == False) - self.assertRaises(ValueError, is_valid_im, R, throw=True) - - def test_is_valid_im_4_and_up_neg_dist(self): - "Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 (step size 3) with negative link counts." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - R = inconsistent(Z) - R[int(i/2),2] = -0.5 - self.assertTrue(is_valid_im(R) == False) - self.assertRaises(ValueError, is_valid_im, R, throw=True) - -class TestNumObsLinkage(TestCase): - - def test_num_obs_linkage_empty(self): - "Tests num_obs_linkage(Z) with empty linkage." - Z = np.zeros((0, 4), dtype=np.double) - self.assertRaises(ValueError, num_obs_linkage, Z) - - def test_num_obs_linkage_1x4(self): - "Tests num_obs_linkage(Z) on linkage over 2 observations." - Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) - self.assertTrue(num_obs_linkage(Z) == 2) - - def test_num_obs_linkage_2x4(self): - "Tests num_obs_linkage(Z) on linkage over 3 observations." - Z = np.asarray([[0, 1, 3.0, 2], - [3, 2, 4.0, 3]], dtype=np.double) - self.assertTrue(num_obs_linkage(Z) == 3) - - def test_num_obs_linkage_4_and_up(self): - "Tests num_obs_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3)." - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - self.assertTrue(num_obs_linkage(Z) == i) - -class TestLeavesList(TestCase): - - def test_leaves_list_1x4(self): - "Tests leaves_list(Z) on a 1x4 linkage." - Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) - node = to_tree(Z) - self.assertTrue((leaves_list(Z) == [0, 1]).all()) - - def test_leaves_list_2x4(self): - "Tests leaves_list(Z) on a 2x4 linkage." - Z = np.asarray([[0, 1, 3.0, 2], - [3, 2, 4.0, 3]], dtype=np.double) - node = to_tree(Z) - self.assertTrue((leaves_list(Z) == [0, 1, 2]).all()) - - def test_leaves_list_iris_single(self): - "Tests leaves_list(Z) on the Iris data set using single linkage." - X = eo['iris'] - Y = pdist(X) - Z = linkage(X, 'single') - node = to_tree(Z) - self.assertTrue((node.pre_order() == leaves_list(Z)).all()) - - def test_leaves_list_iris_complete(self): - "Tests leaves_list(Z) on the Iris data set using complete linkage." - X = eo['iris'] - Y = pdist(X) - Z = linkage(X, 'complete') - node = to_tree(Z) - self.assertTrue((node.pre_order() == leaves_list(Z)).all()) - - def test_leaves_list_iris_centroid(self): - "Tests leaves_list(Z) on the Iris data set using centroid linkage." - X = eo['iris'] - Y = pdist(X) - Z = linkage(X, 'centroid') - node = to_tree(Z) - self.assertTrue((node.pre_order() == leaves_list(Z)).all()) - - def test_leaves_list_iris_median(self): - "Tests leaves_list(Z) on the Iris data set using median linkage." - X = eo['iris'] - Y = pdist(X) - Z = linkage(X, 'median') - node = to_tree(Z) - self.assertTrue((node.pre_order() == leaves_list(Z)).all()) - - def test_leaves_list_iris_ward(self): - "Tests leaves_list(Z) on the Iris data set using ward linkage." - X = eo['iris'] - Y = pdist(X) - Z = linkage(X, 'ward') - node = to_tree(Z) - self.assertTrue((node.pre_order() == leaves_list(Z)).all()) - - def test_leaves_list_iris_average(self): - "Tests leaves_list(Z) on the Iris data set using average linkage." - X = eo['iris'] - Y = pdist(X) - Z = linkage(X, 'average') - node = to_tree(Z) - self.assertTrue((node.pre_order() == leaves_list(Z)).all()) - -class TestCorrespond(TestCase): - - def test_correspond_empty(self): - "Tests correspond(Z, y) with empty linkage and condensed distance matrix." - y = np.zeros((0,)) - Z = np.zeros((0,4)) - self.assertRaises(ValueError, correspond, Z, y) - - def test_correspond_2_and_up(self): - "Tests correspond(Z, y) on linkage and CDMs over observation sets of different sizes." - for i in xrange(2, 4): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - self.assertTrue(correspond(Z, y)) - for i in xrange(4, 15, 3): - y = np.random.rand(i*(i-1)/2) - Z = linkage(y) - self.assertTrue(correspond(Z, y)) - - def test_correspond_4_and_up(self): - "Tests correspond(Z, y) on linkage and CDMs over observation sets of different sizes. Correspondance should be false." - for (i, j) in zip(range(2, 4), range(3, 5)) + zip(range(3, 5), range(2, 4)): - y = np.random.rand(i*(i-1)/2) - y2 = np.random.rand(j*(j-1)/2) - Z = linkage(y) - Z2 = linkage(y2) - self.assertTrue(correspond(Z, y2) == False) - self.assertTrue(correspond(Z2, y) == False) - - def test_correspond_4_and_up_2(self): - "Tests correspond(Z, y) on linkage and CDMs over observation sets of different sizes. Correspondance should be false." - for (i, j) in zip(range(2, 7), range(16, 21)) + zip(range(2, 7), range(16, 21)): - y = np.random.rand(i*(i-1)/2) - y2 = np.random.rand(j*(j-1)/2) - Z = linkage(y) - Z2 = linkage(y2) - self.assertTrue(correspond(Z, y2) == False) - self.assertTrue(correspond(Z2, y) == False) - - def test_num_obs_linkage_multi_matrix(self): - "Tests num_obs_linkage with observation matrices of multiple sizes." - for n in xrange(2, 10): - X = np.random.rand(n, 4) - Y = pdist(X) - Z = linkage(Y) - #print Z - #print A.shape, Y.shape, Yr.shape - self.assertTrue(num_obs_linkage(Z) == n) - -class TestIsMonotonic(TestCase): - - def test_is_monotonic_empty(self): - "Tests is_monotonic(Z) on an empty linkage." - Z = np.zeros((0, 4)) - self.assertRaises(ValueError, is_monotonic, Z) - - def test_is_monotonic_1x4(self): - "Tests is_monotonic(Z) on 1x4 linkage. Expecting True." - Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double); - self.assertTrue(is_monotonic(Z) == True) - - def test_is_monotonic_2x4_T(self): - "Tests is_monotonic(Z) on 2x4 linkage. Expecting True." - Z = np.asarray([[0, 1, 0.3, 2], - [2, 3, 0.4, 3]], dtype=np.double) - self.assertTrue(is_monotonic(Z) == True) - - def test_is_monotonic_2x4_F(self): - "Tests is_monotonic(Z) on 2x4 linkage. Expecting False." - Z = np.asarray([[0, 1, 0.4, 2], - [2, 3, 0.3, 3]], dtype=np.double) - self.assertTrue(is_monotonic(Z) == False) - - def test_is_monotonic_3x4_T(self): - "Tests is_monotonic(Z) on 3x4 linkage. Expecting True." - Z = np.asarray([[0, 1, 0.3, 2], - [2, 3, 0.4, 2], - [4, 5, 0.6, 4]], dtype=np.double) - self.assertTrue(is_monotonic(Z) == True) - - def test_is_monotonic_3x4_F1(self): - "Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False." - Z = np.asarray([[0, 1, 0.3, 2], - [2, 3, 0.2, 2], - [4, 5, 0.6, 4]], dtype=np.double) - self.assertTrue(is_monotonic(Z) == False) - - def test_is_monotonic_3x4_F2(self): - "Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False." - Z = np.asarray([[0, 1, 0.8, 2], - [2, 3, 0.4, 2], - [4, 5, 0.6, 4]], dtype=np.double) - self.assertTrue(is_monotonic(Z) == False) - - def test_is_monotonic_3x4_F3(self): - "Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False" - Z = np.asarray([[0, 1, 0.3, 2], - [2, 3, 0.4, 2], - [4, 5, 0.2, 4]], dtype=np.double) - self.assertTrue(is_monotonic(Z) == False) - - def test_is_monotonic_tdist_linkage1(self): - "Tests is_monotonic(Z) on clustering generated by single linkage on tdist data set. Expecting True." - Z = linkage(_ytdist, 'single') - self.assertTrue(is_monotonic(Z) == True) - - def test_is_monotonic_tdist_linkage2(self): - "Tests is_monotonic(Z) on clustering generated by single linkage on tdist data set. Perturbing. Expecting False." - Z = linkage(_ytdist, 'single') - Z[2,2]=0.0 - self.assertTrue(is_monotonic(Z) == False) - - def test_is_monotonic_iris_linkage(self): - "Tests is_monotonic(Z) on clustering generated by single linkage on Iris data set. Expecting True." - X = eo['iris'] - Y = pdist(X) - Z = linkage(X, 'single') - self.assertTrue(is_monotonic(Z) == True) - -class TestMaxDists(TestCase): - - def test_maxdists_empty_linkage(self): - "Tests maxdists(Z) on empty linkage. Expecting exception." - Z = np.zeros((0, 4), dtype=np.double) - self.assertRaises(ValueError, maxdists, Z) - - def test_maxdists_one_cluster_linkage(self): - "Tests maxdists(Z) on linkage with one cluster." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - MD = maxdists(Z) - eps = 1e-15 - expectedMD = calculate_maximum_distances(Z) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxdists_Q_linkage_single(self): - "Tests maxdists(Z) on the Q data set using single linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'single') - MD = maxdists(Z) - eps = 1e-15 - expectedMD = calculate_maximum_distances(Z) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxdists_Q_linkage_complete(self): - "Tests maxdists(Z) on the Q data set using complete linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'complete') - MD = maxdists(Z) - eps = 1e-15 - expectedMD = calculate_maximum_distances(Z) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxdists_Q_linkage_ward(self): - "Tests maxdists(Z) on the Q data set using Ward linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'ward') - MD = maxdists(Z) - eps = 1e-15 - expectedMD = calculate_maximum_distances(Z) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxdists_Q_linkage_centroid(self): - "Tests maxdists(Z) on the Q data set using centroid linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'centroid') - MD = maxdists(Z) - eps = 1e-15 - expectedMD = calculate_maximum_distances(Z) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxdists_Q_linkage_median(self): - "Tests maxdists(Z) on the Q data set using median linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'median') - MD = maxdists(Z) - eps = 1e-15 - expectedMD = calculate_maximum_distances(Z) - self.assertTrue(within_tol(MD, expectedMD, eps)) - -class TestMaxInconsts(TestCase): - - def test_maxinconsts_empty_linkage(self): - "Tests maxinconsts(Z, R) on empty linkage. Expecting exception." - Z = np.zeros((0, 4), dtype=np.double) - R = np.zeros((0, 4), dtype=np.double) - self.assertRaises(ValueError, maxinconsts, Z, R) - - def test_maxinconsts_difrow_linkage(self): - "Tests maxinconsts(Z, R) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.random.rand(2, 4) - self.assertRaises(ValueError, maxinconsts, Z, R) - - def test_maxinconsts_one_cluster_linkage(self): - "Tests maxinconsts(Z, R) on linkage with one cluster." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) - MD = maxinconsts(Z, R) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxinconsts_Q_linkage_single(self): - "Tests maxinconsts(Z, R) on the Q data set using single linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'single') - R = inconsistent(Z) - MD = maxinconsts(Z, R) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxinconsts_Q_linkage_complete(self): - "Tests maxinconsts(Z, R) on the Q data set using complete linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'complete') - R = inconsistent(Z) - MD = maxinconsts(Z, R) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxinconsts_Q_linkage_ward(self): - "Tests maxinconsts(Z, R) on the Q data set using Ward linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'ward') - R = inconsistent(Z) - MD = maxinconsts(Z, R) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxinconsts_Q_linkage_centroid(self): - "Tests maxinconsts(Z, R) on the Q data set using centroid linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'centroid') - R = inconsistent(Z) - MD = maxinconsts(Z, R) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxinconsts_Q_linkage_median(self): - "Tests maxinconsts(Z, R) on the Q data set using median linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'median') - R = inconsistent(Z) - MD = maxinconsts(Z, R) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R) - self.assertTrue(within_tol(MD, expectedMD, eps)) - -class TestMaxRStat(TestCase): - - def test_maxRstat_float_index(self): - "Tests maxRstat(Z, R, 3.3). Expecting exception." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) - self.assertRaises(TypeError, maxRstat, Z, R, 3.3) - - def test_maxRstat_neg_index(self): - "Tests maxRstat(Z, R, -1). Expecting exception." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) - self.assertRaises(ValueError, maxRstat, Z, R, -1) - - def test_maxRstat_oob_pos_index(self): - "Tests maxRstat(Z, R, 4). Expecting exception." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) - self.assertRaises(ValueError, maxRstat, Z, R, 4) - - def test_maxRstat_0_empty_linkage(self): - "Tests maxRstat(Z, R, 0) on empty linkage. Expecting exception." - Z = np.zeros((0, 4), dtype=np.double) - R = np.zeros((0, 4), dtype=np.double) - self.assertRaises(ValueError, maxRstat, Z, R, 0) - - def test_maxRstat_0_difrow_linkage(self): - "Tests maxRstat(Z, R, 0) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.random.rand(2, 4) - self.assertRaises(ValueError, maxRstat, Z, R, 0) - - def test_maxRstat_0_one_cluster_linkage(self): - "Tests maxRstat(Z, R, 0) on linkage with one cluster." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) - MD = maxRstat(Z, R, 0) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 0) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_0_Q_linkage_single(self): - "Tests maxRstat(Z, R, 0) on the Q data set using single linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'single') - R = inconsistent(Z) - MD = maxRstat(Z, R, 0) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 0) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_0_Q_linkage_complete(self): - "Tests maxRstat(Z, R, 0) on the Q data set using complete linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'complete') - R = inconsistent(Z) - MD = maxRstat(Z, R, 0) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 0) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_0_Q_linkage_ward(self): - "Tests maxRstat(Z, R, 0) on the Q data set using Ward linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'ward') - R = inconsistent(Z) - MD = maxRstat(Z, R, 0) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 0) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_0_Q_linkage_centroid(self): - "Tests maxRstat(Z, R, 0) on the Q data set using centroid linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'centroid') - R = inconsistent(Z) - MD = maxRstat(Z, R, 0) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 0) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_0_Q_linkage_median(self): - "Tests maxRstat(Z, R, 0) on the Q data set using median linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'median') - R = inconsistent(Z) - MD = maxRstat(Z, R, 0) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 0) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_1_empty_linkage(self): - "Tests maxRstat(Z, R, 1) on empty linkage. Expecting exception." - Z = np.zeros((0, 4), dtype=np.double) - R = np.zeros((0, 4), dtype=np.double) - self.assertRaises(ValueError, maxRstat, Z, R, 0) - - def test_maxRstat_1_difrow_linkage(self): - "Tests maxRstat(Z, R, 1) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.random.rand(2, 4) - self.assertRaises(ValueError, maxRstat, Z, R, 0) - - def test_maxRstat_1_one_cluster_linkage(self): - "Tests maxRstat(Z, R, 1) on linkage with one cluster." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) - MD = maxRstat(Z, R, 1) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 1) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_1_Q_linkage_single(self): - "Tests maxRstat(Z, R, 1) on the Q data set using single linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'single') - R = inconsistent(Z) - MD = maxRstat(Z, R, 1) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 1) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_1_Q_linkage_complete(self): - "Tests maxRstat(Z, R, 1) on the Q data set using complete linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'complete') - R = inconsistent(Z) - MD = maxRstat(Z, R, 1) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 1) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_1_Q_linkage_ward(self): - "Tests maxRstat(Z, R, 1) on the Q data set using Ward linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'ward') - R = inconsistent(Z) - MD = maxRstat(Z, R, 1) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 1) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_1_Q_linkage_centroid(self): - "Tests maxRstat(Z, R, 1) on the Q data set using centroid linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'centroid') - R = inconsistent(Z) - MD = maxRstat(Z, R, 1) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 1) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_1_Q_linkage_median(self): - "Tests maxRstat(Z, R, 1) on the Q data set using median linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'median') - R = inconsistent(Z) - MD = maxRstat(Z, R, 1) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 1) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_2_empty_linkage(self): - "Tests maxRstat(Z, R, 2) on empty linkage. Expecting exception." - Z = np.zeros((0, 4), dtype=np.double) - R = np.zeros((0, 4), dtype=np.double) - self.assertRaises(ValueError, maxRstat, Z, R, 2) - - def test_maxRstat_2_difrow_linkage(self): - "Tests maxRstat(Z, R, 2) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.random.rand(2, 4) - self.assertRaises(ValueError, maxRstat, Z, R, 2) - - def test_maxRstat_2_one_cluster_linkage(self): - "Tests maxRstat(Z, R, 2) on linkage with one cluster." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) - MD = maxRstat(Z, R, 2) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 2) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_2_Q_linkage_single(self): - "Tests maxRstat(Z, R, 2) on the Q data set using single linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'single') - R = inconsistent(Z) - MD = maxRstat(Z, R, 2) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 2) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_2_Q_linkage_complete(self): - "Tests maxRstat(Z, R, 2) on the Q data set using complete linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'complete') - R = inconsistent(Z) - MD = maxRstat(Z, R, 2) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 2) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_2_Q_linkage_ward(self): - "Tests maxRstat(Z, R, 2) on the Q data set using Ward linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'ward') - R = inconsistent(Z) - MD = maxRstat(Z, R, 2) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 2) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_2_Q_linkage_centroid(self): - "Tests maxRstat(Z, R, 2) on the Q data set using centroid linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'centroid') - R = inconsistent(Z) - MD = maxRstat(Z, R, 2) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 2) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_2_Q_linkage_median(self): - "Tests maxRstat(Z, R, 2) on the Q data set using median linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'median') - R = inconsistent(Z) - MD = maxRstat(Z, R, 2) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 2) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_3_empty_linkage(self): - "Tests maxRstat(Z, R, 3) on empty linkage. Expecting exception." - Z = np.zeros((0, 4), dtype=np.double) - R = np.zeros((0, 4), dtype=np.double) - self.assertRaises(ValueError, maxRstat, Z, R, 3) - - def test_maxRstat_3_difrow_linkage(self): - "Tests maxRstat(Z, R, 3) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.random.rand(2, 4) - self.assertRaises(ValueError, maxRstat, Z, R, 3) - - def test_maxRstat_3_one_cluster_linkage(self): - "Tests maxRstat(Z, R, 3) on linkage with one cluster." - Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) - R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) - MD = maxRstat(Z, R, 3) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 3) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_3_Q_linkage_single(self): - "Tests maxRstat(Z, R, 3) on the Q data set using single linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'single') - R = inconsistent(Z) - MD = maxRstat(Z, R, 3) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 3) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_3_Q_linkage_complete(self): - "Tests maxRstat(Z, R, 3) on the Q data set using complete linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'complete') - R = inconsistent(Z) - MD = maxRstat(Z, R, 3) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 3) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_3_Q_linkage_ward(self): - "Tests maxRstat(Z, R, 3) on the Q data set using Ward linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'ward') - R = inconsistent(Z) - MD = maxRstat(Z, R, 3) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 3) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_3_Q_linkage_centroid(self): - "Tests maxRstat(Z, R, 3) on the Q data set using centroid linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'centroid') - R = inconsistent(Z) - MD = maxRstat(Z, R, 3) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 3) - self.assertTrue(within_tol(MD, expectedMD, eps)) - - def test_maxRstat_3_Q_linkage_median(self): - "Tests maxRstat(Z, R, 3) on the Q data set using median linkage." - X = eo['Q-X'] - Y = pdist(X) - Z = linkage(X, 'median') - R = inconsistent(Z) - MD = maxRstat(Z, R, 3) - eps = 1e-15 - expectedMD = calculate_maximum_inconsistencies(Z, R, 3) - self.assertTrue(within_tol(MD, expectedMD, eps)) - -def calculate_maximum_distances(Z): - "Used for testing correctness of maxdists. Very slow." - n = Z.shape[0] + 1 - B = np.zeros((n-1,)) - q = np.zeros((3,)) - for i in xrange(0, n - 1): - q[:] = 0.0 - left = Z[i, 0] - right = Z[i, 1] - if left >= n: - q[0] = B[left - n] - if right >= n: - q[1] = B[right - n] - q[2] = Z[i, 2] - B[i] = q.max() - return B - -def calculate_maximum_inconsistencies(Z, R, k=3): - "Used for testing correctness of maxinconsts. Very slow." - n = Z.shape[0] + 1 - B = np.zeros((n-1,)) - q = np.zeros((3,)) - #print R.shape - for i in xrange(0, n - 1): - q[:] = 0.0 - left = Z[i, 0] - right = Z[i, 1] - if left >= n: - q[0] = B[left - n] - if right >= n: - q[1] = B[right - n] - q[2] = R[i, k] - B[i] = q.max() - return B - -def within_tol(a, b, tol): - return np.abs(a - b).max() < tol - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/cluster/tests/test_vq.py b/scipy-0.10.1/scipy/cluster/tests/test_vq.py deleted file mode 100644 index 8e7d27c445..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/test_vq.py +++ /dev/null @@ -1,204 +0,0 @@ -#! /usr/bin/env python - -# David Cournapeau -# Last Change: Wed Nov 05 07:00 PM 2008 J - -import os.path -import warnings - -import numpy as np -from numpy.testing import assert_array_equal, assert_array_almost_equal, \ - TestCase, run_module_suite - -from scipy.cluster.vq import kmeans, kmeans2, py_vq, py_vq2, vq, ClusterError -try: - from scipy.cluster import _vq - TESTC=True -except ImportError: - print "== Error while importing _vq, not testing C imp of vq ==" - TESTC=False - -#Optional: -# import modules that are located in the same directory as this file. -DATAFILE1 = os.path.join(os.path.dirname(__file__), "data.txt") - -# Global data -X = np.array([[3.0, 3], [4, 3], [4, 2], - [9, 2], [5, 1], [6, 2], [9, 4], - [5, 2], [5, 4], [7, 4], [6, 5]]) - -CODET1 = np.array([[3.0000, 3.0000], - [6.2000, 4.0000], - [5.8000, 1.8000]]) - -CODET2 = np.array([[11.0/3, 8.0/3], - [6.7500, 4.2500], - [6.2500, 1.7500]]) - -LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1]) - -class TestVq(TestCase): - def test_py_vq(self): - initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - label1 = py_vq(X, initc)[0] - assert_array_equal(label1, LABEL1) - - def test_py_vq2(self): - initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - label1 = py_vq2(X, initc)[0] - assert_array_equal(label1, LABEL1) - - def test_vq(self): - initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - if TESTC: - label1, dist = _vq.vq(X, initc) - assert_array_equal(label1, LABEL1) - tlabel1, tdist = vq(X, initc) - else: - print "== not testing C imp of vq ==" - - #def test_py_vq_1d(self): - # """Test special rank 1 vq algo, python implementation.""" - # data = X[:, 0] - # initc = data[:3] - # code = initc.copy() - # a, b = _py_vq_1d(data, initc) - # ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis]) - # assert_array_equal(a, ta) - # assert_array_equal(b, tb) - - def test_vq_1d(self): - """Test special rank 1 vq algo, python implementation.""" - data = X[:, 0] - initc = data[:3] - code = initc.copy() - if TESTC: - a, b = _vq.vq(data, initc) - ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis]) - assert_array_equal(a, ta) - assert_array_equal(b, tb) - else: - print "== not testing C imp of vq (rank 1) ==" - -class TestKMean(TestCase): - def test_large_features(self): - # Generate a data set with large values, and run kmeans on it to - # (regression for 1077). - d = 300 - n = 1e2 - - m1 = np.random.randn(d) - m2 = np.random.randn(d) - x = 10000 * np.random.randn(n, d) - 20000 * m1 - y = 10000 * np.random.randn(n, d) + 20000 * m2 - - data = np.empty((x.shape[0] + y.shape[0], d), np.double) - data[:x.shape[0]] = x - data[x.shape[0]:] = y - - res = kmeans(data, 2) - def test_kmeans_simple(self): - initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - code1 = kmeans(X, code, iter = 1)[0] - - assert_array_almost_equal(code1, CODET2) - - def test_kmeans_lost_cluster(self): - """This will cause kmean to have a cluster with no points.""" - data = np.fromfile(open(DATAFILE1), sep = ", ") - data = data.reshape((200, 2)) - initk = np.array([[-1.8127404, -0.67128041], - [ 2.04621601, 0.07401111], - [-2.31149087,-0.05160469]]) - - res = kmeans(data, initk) - warnings.simplefilter('ignore', UserWarning) - try: - res = kmeans2(data, initk, missing = 'warn') - finally: - warnings.simplefilter('default', UserWarning) - - try : - res = kmeans2(data, initk, missing = 'raise') - raise AssertionError("Exception not raised ! Should not happen") - except ClusterError, e: - pass - - def test_kmeans2_simple(self): - """Testing simple call to kmeans2 and its results.""" - initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - code1 = kmeans2(X, code, iter = 1)[0] - code2 = kmeans2(X, code, iter = 2)[0] - - assert_array_almost_equal(code1, CODET1) - assert_array_almost_equal(code2, CODET2) - - def test_kmeans2_rank1(self): - """Testing simple call to kmeans2 with rank 1 data.""" - data = np.fromfile(open(DATAFILE1), sep = ", ") - data = data.reshape((200, 2)) - data1 = data[:, 0] - data2 = data[:, 1] - - initc = data1[:3] - code = initc.copy() - code1 = kmeans2(data1, code, iter = 1)[0] - code2 = kmeans2(data1, code, iter = 2)[0] - - def test_kmeans2_rank1_2(self): - """Testing simple call to kmeans2 with rank 1 data.""" - data = np.fromfile(open(DATAFILE1), sep = ", ") - data = data.reshape((200, 2)) - data1 = data[:, 0] - - code1 = kmeans2(data1, 2, iter = 1) - - def test_kmeans2_init(self): - """Testing that kmeans2 init methods work.""" - data = np.fromfile(open(DATAFILE1), sep = ", ") - data = data.reshape((200, 2)) - - kmeans2(data, 3, minit = 'random') - kmeans2(data, 3, minit = 'points') - - # Check special case 1d - data = data[:, :1] - kmeans2(data, 3, minit = 'random') - kmeans2(data, 3, minit = 'points') - - def test_kmeans2_empty(self): - """Ticket #505.""" - try: - kmeans2([], 2) - raise AssertionError("This should not succeed.") - except ValueError, e: - # OK, that's what we expect - pass - - def test_kmeans_0k(self): - """Regression test for #546: fail when k arg is 0.""" - try: - kmeans(X, 0) - raise AssertionError("kmeans with 0 clusters should fail.") - except ValueError: - pass - - try: - kmeans2(X, 0) - raise AssertionError("kmeans2 with 0 clusters should fail.") - except ValueError: - pass - - try: - kmeans2(X, np.array([])) - raise AssertionError("kmeans2 with 0 clusters should fail.") - except ValueError: - pass - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/cluster/tests/vq_test.py b/scipy-0.10.1/scipy/cluster/tests/vq_test.py deleted file mode 100755 index f215a5c101..0000000000 --- a/scipy-0.10.1/scipy/cluster/tests/vq_test.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np -from scipy.cluster import vq - -def python_vq(all_data,code_book): - import time - t1 = time.time() - codes1,dist1 = vq.vq(all_data,code_book) - t2 = time.time() - #print 'fast (double):', t2 - t1 - #print ' first codes:', codes1[:5] - #print ' first dist:', dist1[:5] - #print ' last codes:', codes1[-5:] - #print ' last dist:', dist1[-5:] - float_obs = all_data.astype(np.float32) - float_code = code_book.astype(np.float32) - t1 = time.time() - codes1,dist1 = vq.vq(float_obs,float_code) - t2 = time.time() - #print 'fast (float):', t2 - t1 - #print ' first codes:', codes1[:5] - #print ' first dist:', dist1[:5] - #print ' last codes:', codes1[-5:] - #print ' last dist:', dist1[-5:] - - return codes1,dist1 - -def read_data(name): - f = open(name,'r') - data = [] - for line in f.readlines(): - data.append(map(float,string.split(line))) - f.close() - return array(data) - -def main(): - np.random.seed((1000,1000)) - Ncodes = 40 - Nfeatures = 16 - Nobs = 4000 - code_book = np.random.normal(0,1,(Ncodes,Nfeatures)) - features = np.random.normal(0,1,(Nobs,Nfeatures)) - codes,dist = python_vq(features,code_book) - -if __name__ == '__main__': - main() diff --git a/scipy-0.10.1/scipy/cluster/vq.py b/scipy-0.10.1/scipy/cluster/vq.py deleted file mode 100644 index 189e5be04a..0000000000 --- a/scipy-0.10.1/scipy/cluster/vq.py +++ /dev/null @@ -1,731 +0,0 @@ -""" -==================================================================== -K-means clustering and vector quantization (:mod:`scipy.cluster.vq`) -==================================================================== - -Provides routines for k-means clustering, generating code books -from k-means models, and quantizing vectors by comparing them with -centroids in a code book. - -.. autosummary:: - :toctree: generated/ - - whiten -- Normalize a group of observations so each feature has unit variance - vq -- Calculate code book membership of a set of observation vectors - kmeans -- Performs k-means on a set of observation vectors forming k clusters - kmeans2 -- A different implementation of k-means with more methods - -- for initializing centroids - -Background information -====================== - -The k-means algorithm takes as input the number of clusters to -generate, k, and a set of observation vectors to cluster. It -returns a set of centroids, one for each of the k clusters. An -observation vector is classified with the cluster number or -centroid index of the centroid closest to it. - -A vector v belongs to cluster i if it is closer to centroid i than -any other centroids. If v belongs to i, we say centroid i is the -dominating centroid of v. The k-means algorithm tries to -minimize distortion, which is defined as the sum of the squared distances -between each observation vector and its dominating centroid. Each -step of the k-means algorithm refines the choices of centroids to -reduce distortion. The change in distortion is used as a -stopping criterion: when the change is lower than a threshold, the -k-means algorithm is not making sufficient progress and -terminates. One can also define a maximum number of iterations. - -Since vector quantization is a natural application for k-means, -information theory terminology is often used. The centroid index -or cluster index is also referred to as a "code" and the table -mapping codes to centroids and vice versa is often referred as a -"code book". The result of k-means, a set of centroids, can be -used to quantize vectors. Quantization aims to find an encoding of -vectors that reduces the expected distortion. - -All routines expect obs to be a M by N array where the rows are -the observation vectors. The codebook is a k by N array where the -i'th row is the centroid of code word i. The observation vectors -and centroids have the same feature dimension. - -As an example, suppose we wish to compress a 24-bit color image -(each pixel is represented by one byte for red, one for blue, and -one for green) before sending it over the web. By using a smaller -8-bit encoding, we can reduce the amount of data by two -thirds. Ideally, the colors for each of the 256 possible 8-bit -encoding values should be chosen to minimize distortion of the -color. Running k-means with k=256 generates a code book of 256 -codes, which fills up all possible 8-bit sequences. Instead of -sending a 3-byte value for each pixel, the 8-bit centroid index -(or code word) of the dominating centroid is transmitted. The code -book is also sent over the wire so each 8-bit code can be -translated back to a 24-bit pixel value representation. If the -image of interest was of an ocean, we would expect many 24-bit -blues to be represented by 8-bit codes. If it was an image of a -human face, more flesh tone colors would be represented in the -code book. - -""" -__docformat__ = 'restructuredtext' - -__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] - -# TODO: -# - implements high level method for running several times k-means with -# different initialialization -# - warning: what happens if different number of clusters ? For now, emit a -# warning, but it is not great, because I am not sure it really make sense to -# succeed in this case (maybe an exception is better ?) -import warnings - -from numpy.random import randint -from numpy import shape, zeros, sqrt, argmin, minimum, array, \ - newaxis, arange, compress, equal, common_type, single, double, take, \ - std, mean -import numpy as np - -class ClusterError(Exception): - pass - -def whiten(obs): - """ - Normalize a group of observations on a per feature basis. - - Before running k-means, it is beneficial to rescale each feature - dimension of the observation set with whitening. Each feature is - divided by its standard deviation across all observations to give - it unit variance. - - Parameters - ---------- - obs : ndarray - Each row of the array is an observation. The - columns are the features seen during each observation. - - >>> # f0 f1 f2 - >>> obs = [[ 1., 1., 1.], #o0 - ... [ 2., 2., 2.], #o1 - ... [ 3., 3., 3.], #o2 - ... [ 4., 4., 4.]]) #o3 - - Returns - ------- - result : ndarray - Contains the values in `obs` scaled by the standard devation - of each column. - - Examples - -------- - >>> from numpy import array - >>> from scipy.cluster.vq import whiten - >>> features = array([[ 1.9,2.3,1.7], - ... [ 1.5,2.5,2.2], - ... [ 0.8,0.6,1.7,]]) - >>> whiten(features) - array([[ 3.41250074, 2.20300046, 5.88897275], - [ 2.69407953, 2.39456571, 7.62102355], - [ 1.43684242, 0.57469577, 5.88897275]]) - - """ - std_dev = std(obs, axis=0) - return obs / std_dev - -def vq(obs, code_book): - """ - Assign codes from a code book to observations. - - Assigns a code from a code book to each observation. Each - observation vector in the 'M' by 'N' `obs` array is compared with the - centroids in the code book and assigned the code of the closest - centroid. - - The features in `obs` should have unit variance, which can be - acheived by passing them through the whiten function. The code - book can be created with the k-means algorithm or a different - encoding algorithm. - - Parameters - ---------- - obs : ndarray - Each row of the 'N' x 'M' array is an observation. The columns are - the "features" seen during each observation. The features must be - whitened first using the whiten function or something equivalent. - code_book : ndarray - The code book is usually generated using the k-means algorithm. - Each row of the array holds a different code, and the columns are - the features of the code. - - >>> # f0 f1 f2 f3 - >>> code_book = [ - ... [ 1., 2., 3., 4.], #c0 - ... [ 1., 2., 3., 4.], #c1 - ... [ 1., 2., 3., 4.]]) #c2 - - Returns - ------- - code : ndarray - A length N array holding the code book index for each observation. - dist : ndarray - The distortion (distance) between the observation and its nearest - code. - - Notes - ----- - This currently forces 32-bit math precision for speed. Anyone know - of a situation where this undermines the accuracy of the algorithm? - - Examples - -------- - >>> from numpy import array - >>> from scipy.cluster.vq import vq - >>> code_book = array([[1.,1.,1.], - ... [2.,2.,2.]]) - >>> features = array([[ 1.9,2.3,1.7], - ... [ 1.5,2.5,2.2], - ... [ 0.8,0.6,1.7]]) - >>> vq(features,code_book) - (array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239])) - - """ - try: - import _vq - ct = common_type(obs, code_book) - c_obs = obs.astype(ct) - c_code_book = code_book.astype(ct) - if ct is single: - results = _vq.vq(c_obs, c_code_book) - elif ct is double: - results = _vq.vq(c_obs, c_code_book) - else: - results = py_vq(obs, code_book) - except ImportError: - results = py_vq(obs, code_book) - return results - -def py_vq(obs, code_book): - """ Python version of vq algorithm. - - The algorithm computes the euclidian distance between each - observation and every frame in the code_book. - - Parameters - ---------- - obs : ndarray - Expects a rank 2 array. Each row is one observation. - code_book : ndarray - Code book to use. Same format than obs. Should have same number of - features (eg columns) than obs. - - Returns - ------- - code : ndarray - code[i] gives the label of the ith obversation, that its code is - code_book[code[i]]. - mind_dist : ndarray - min_dist[i] gives the distance between the ith observation and its - corresponding code. - - Notes - ----- - This function is slower than the C version but works for - all input types. If the inputs have the wrong types for the - C versions of the function, this one is called as a last resort. - - It is about 20 times slower than the C version. - - """ - # n = number of observations - # d = number of features - if np.ndim(obs) == 1: - if not np.ndim(obs) == np.ndim(code_book): - raise ValueError( - "Observation and code_book should have the same rank") - else: - return _py_vq_1d(obs, code_book) - else: - (n, d) = shape(obs) - - # code books and observations should have same number of features and same - # shape - if not np.ndim(obs) == np.ndim(code_book): - raise ValueError("Observation and code_book should have the same rank") - elif not d == code_book.shape[1]: - raise ValueError("Code book(%d) and obs(%d) should have the same " \ - "number of features (eg columns)""" % - (code_book.shape[1], d)) - - code = zeros(n, dtype=int) - min_dist = zeros(n) - for i in range(n): - dist = np.sum((obs[i] - code_book) ** 2, 1) - code[i] = argmin(dist) - min_dist[i] = dist[code[i]] - - return code, sqrt(min_dist) - -def _py_vq_1d(obs, code_book): - """ Python version of vq algorithm for rank 1 only. - - Parameters - ---------- - obs : ndarray - Expects a rank 1 array. Each item is one observation. - code_book : ndarray - Code book to use. Same format than obs. Should rank 1 too. - - Returns - ------- - code : ndarray - code[i] gives the label of the ith obversation, that its code is - code_book[code[i]]. - mind_dist : ndarray - min_dist[i] gives the distance between the ith observation and its - corresponding code. - - """ - raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now") - n = obs.size - nc = code_book.size - dist = np.zeros((n, nc)) - for i in range(nc): - dist[:, i] = np.sum(obs - code_book[i]) - print dist - code = argmin(dist) - min_dist = dist[code] - - return code, sqrt(min_dist) - -def py_vq2(obs, code_book): - """2nd Python version of vq algorithm. - - The algorithm simply computes the euclidian distance between each - observation and every frame in the code_book/ - - Parameters - ---------- - obs : ndarray - Expect a rank 2 array. Each row is one observation. - code_book : ndarray - Code book to use. Same format than obs. Should have same number of - features (eg columns) than obs. - - Returns - ------- - code : ndarray - code[i] gives the label of the ith obversation, that its code is - code_book[code[i]]. - mind_dist : ndarray - min_dist[i] gives the distance between the ith observation and its - corresponding code. - - Notes - ----- - This could be faster when number of codebooks is small, but it - becomes a real memory hog when codebook is large. It requires - N by M by O storage where N=number of obs, M = number of - features, and O = number of codes. - - """ - d = shape(obs)[1] - - # code books and observations should have same number of features - if not d == code_book.shape[1]: - raise ValueError(""" - code book(%d) and obs(%d) should have the same - number of features (eg columns)""" % (code_book.shape[1], d)) - - diff = obs[newaxis, :, :] - code_book[:,newaxis,:] - dist = sqrt(np.sum(diff * diff, -1)) - code = argmin(dist, 0) - min_dist = minimum.reduce(dist, 0) #the next line I think is equivalent - # - and should be faster - #min_dist = choose(code,dist) # but in practice, didn't seem to make - # much difference. - return code, min_dist - -def _kmeans(obs, guess, thresh=1e-5): - """ "raw" version of k-means. - - Returns - ------- - code_book : - the lowest distortion codebook found. - avg_dist : - the average distance a observation is from a code in the book. - Lower means the code_book matches the data better. - - See Also - -------- - kmeans : wrapper around k-means - - XXX should have an axis variable here. - - Examples - -------- - Note: not whitened in this example. - - >>> from numpy import array - >>> from scipy.cluster.vq import _kmeans - >>> features = array([[ 1.9,2.3], - ... [ 1.5,2.5], - ... [ 0.8,0.6], - ... [ 0.4,1.8], - ... [ 1.0,1.0]]) - >>> book = array((features[0],features[2])) - >>> _kmeans(features,book) - (array([[ 1.7 , 2.4 ], - [ 0.73333333, 1.13333333]]), 0.40563916697728591) - - """ - - code_book = array(guess, copy = True) - avg_dist = [] - diff = thresh+1. - while diff > thresh: - nc = code_book.shape[0] - #compute membership and distances between obs and code_book - obs_code, distort = vq(obs, code_book) - avg_dist.append(mean(distort, axis=-1)) - #recalc code_book as centroids of associated obs - if(diff > thresh): - has_members = [] - for i in arange(nc): - cell_members = compress(equal(obs_code, i), obs, 0) - if cell_members.shape[0] > 0: - code_book[i] = mean(cell_members, 0) - has_members.append(i) - #remove code_books that didn't have any members - code_book = take(code_book, has_members, 0) - if len(avg_dist) > 1: - diff = avg_dist[-2] - avg_dist[-1] - #print avg_dist - return code_book, avg_dist[-1] - -def kmeans(obs, k_or_guess, iter=20, thresh=1e-5): - """ - Performs k-means on a set of observation vectors forming k clusters. - - The k-means algorithm adjusts the centroids until sufficient - progress cannot be made, i.e. the change in distortion since - the last iteration is less than some threshold. This yields - a code book mapping centroids to codes and vice versa. - - Distortion is defined as the sum of the squared differences - between the observations and the corresponding centroid. - - Parameters - ---------- - obs : ndarray - Each row of the M by N array is an observation vector. The - columns are the features seen during each observation. - The features must be whitened first with the `whiten` function. - - k_or_guess : int or ndarray - The number of centroids to generate. A code is assigned to - each centroid, which is also the row index of the centroid - in the code_book matrix generated. - - The initial k centroids are chosen by randomly selecting - observations from the observation matrix. Alternatively, - passing a k by N array specifies the initial k centroids. - - iter : int, optional - The number of times to run k-means, returning the codebook - with the lowest distortion. This argument is ignored if - initial centroids are specified with an array for the - ``k_or_guess`` parameter. This parameter does not represent the - number of iterations of the k-means algorithm. - - thresh : float, optional - Terminates the k-means algorithm if the change in - distortion since the last k-means iteration is less than - or equal to thresh. - - Returns - ------- - codebook : ndarray - A k by N array of k centroids. The i'th centroid - codebook[i] is represented with the code i. The centroids - and codes generated represent the lowest distortion seen, - not necessarily the globally minimal distortion. - - distortion : float - The distortion between the observations passed and the - centroids generated. - - See Also - -------- - kmeans2 : a different implementation of k-means clustering - with more methods for generating initial centroids but without - using a distortion change threshold as a stopping criterion. - - whiten : must be called prior to passing an observation matrix - to kmeans. - - Examples - -------- - >>> from numpy import array - >>> from scipy.cluster.vq import vq, kmeans, whiten - >>> features = array([[ 1.9,2.3], - ... [ 1.5,2.5], - ... [ 0.8,0.6], - ... [ 0.4,1.8], - ... [ 0.1,0.1], - ... [ 0.2,1.8], - ... [ 2.0,0.5], - ... [ 0.3,1.5], - ... [ 1.0,1.0]]) - >>> whitened = whiten(features) - >>> book = array((whitened[0],whitened[2])) - >>> kmeans(whitened,book) - (array([[ 2.3110306 , 2.86287398], - [ 0.93218041, 1.24398691]]), 0.85684700941625547) - - >>> from numpy import random - >>> random.seed((1000,2000)) - >>> codes = 3 - >>> kmeans(whitened,codes) - (array([[ 2.3110306 , 2.86287398], - [ 1.32544402, 0.65607529], - [ 0.40782893, 2.02786907]]), 0.5196582527686241) - - """ - if int(iter) < 1: - raise ValueError('iter must be at least 1.') - if type(k_or_guess) == type(array([])): - guess = k_or_guess - if guess.size < 1: - raise ValueError("Asked for 0 cluster ? initial book was %s" % \ - guess) - result = _kmeans(obs, guess, thresh = thresh) - else: - #initialize best distance value to a large value - best_dist = np.inf - No = obs.shape[0] - k = k_or_guess - if k < 1: - raise ValueError("Asked for 0 cluster ? ") - for i in range(iter): - #the intial code book is randomly selected from observations - guess = take(obs, randint(0, No, k), 0) - book, dist = _kmeans(obs, guess, thresh = thresh) - if dist < best_dist: - best_book = book - best_dist = dist - result = best_book, best_dist - return result - -def _kpoints(data, k): - """Pick k points at random in data (one row = one observation). - - This is done by taking the k first values of a random permutation of 1..N - where N is the number of observation. - - Parameters - ---------- - data : ndarray - Expect a rank 1 or 2 array. Rank 1 are assumed to describe one - dimensional data, rank 2 multidimensional data, in which case one - row is one observation. - k : int - Number of samples to generate. - - """ - if data.ndim > 1: - n = data.shape[0] - else: - n = data.size - - p = np.random.permutation(n) - x = data[p[:k], :].copy() - - return x - -def _krandinit(data, k): - """Returns k samples of a random variable which parameters depend on data. - - More precisely, it returns k observations sampled from a Gaussian random - variable which mean and covariances are the one estimated from data. - - Parameters - ---------- - data : ndarray - Expect a rank 1 or 2 array. Rank 1 are assumed to describe one - dimensional data, rank 2 multidimensional data, in which case one - row is one observation. - k : int - Number of samples to generate. - - """ - def init_rank1(data): - mu = np.mean(data) - cov = np.cov(data) - x = np.random.randn(k) - x *= np.sqrt(cov) - x += mu - return x - def init_rankn(data): - mu = np.mean(data, 0) - cov = np.atleast_2d(np.cov(data, rowvar = 0)) - - # k rows, d cols (one row = one obs) - # Generate k sample of a random variable ~ Gaussian(mu, cov) - x = np.random.randn(k, mu.size) - x = np.dot(x, np.linalg.cholesky(cov).T) + mu - return x - - nd = np.ndim(data) - if nd == 1: - return init_rank1(data) - else: - return init_rankn(data) - -_valid_init_meth = {'random': _krandinit, 'points': _kpoints} - -def _missing_warn(): - """Print a warning when called.""" - warnings.warn("One of the clusters is empty. " - "Re-run kmean with a different initialization.") - -def _missing_raise(): - """raise a ClusterError when called.""" - raise ClusterError("One of the clusters is empty. " - "Re-run kmean with a different initialization.") - -_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise} - -def kmeans2(data, k, iter = 10, thresh = 1e-5, minit = 'random', - missing = 'warn'): - """ - Classify a set of observations into k clusters using the k-means algorithm. - - The algorithm attempts to minimize the Euclidian distance between - observations and centroids. Several initialization methods are - included. - - Parameters - ---------- - data : ndarray - A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length - 'M' array of 'M' one-dimensional observations. - k : int or ndarray - The number of clusters to form as well as the number of - centroids to generate. If `minit` initialization string is - 'matrix', or if a ndarray is given instead, it is - interpreted as initial cluster to use instead. - iter : int - Number of iterations of the k-means algrithm to run. Note - that this differs in meaning from the iters parameter to - the kmeans function. - thresh : float - (not used yet) - minit : string - Method for initialization. Available methods are 'random', - 'points', 'uniform', and 'matrix': - - 'random': generate k centroids from a Gaussian with mean and - variance estimated from the data. - - 'points': choose k observations (rows) at random from data for - the initial centroids. - - 'uniform': generate k observations from the data from a uniform - distribution defined by the data set (unsupported). - - 'matrix': interpret the k parameter as a k by M (or length k - array for one-dimensional data) array of initial centroids. - - Returns - ------- - centroid : ndarray - A 'k' by 'N' array of centroids found at the last iteration of - k-means. - label : ndarray - label[i] is the code or index of the centroid the - i'th observation is closest to. - - """ - if missing not in _valid_miss_meth.keys(): - raise ValueError("Unkown missing method: %s" % str(missing)) - # If data is rank 1, then we have 1 dimension problem. - nd = np.ndim(data) - if nd == 1: - d = 1 - #raise ValueError("Input of rank 1 not supported yet") - elif nd == 2: - d = data.shape[1] - else: - raise ValueError("Input of rank > 2 not supported") - - if np.size(data) < 1: - raise ValueError("Input has 0 items.") - - # If k is not a single value, then it should be compatible with data's - # shape - if np.size(k) > 1 or minit == 'matrix': - if not nd == np.ndim(k): - raise ValueError("k is not an int and has not same rank than data") - if d == 1: - nc = len(k) - else: - (nc, dc) = k.shape - if not dc == d: - raise ValueError("k is not an int and has not same rank than\ - data") - clusters = k.copy() - else: - try: - nc = int(k) - except TypeError: - raise ValueError("k (%s) could not be converted to an integer " % str(k)) - - if nc < 1: - raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k)) - - if not nc == k: - warnings.warn("k was not an integer, was converted.") - try: - init = _valid_init_meth[minit] - except KeyError: - raise ValueError("unknown init method %s" % str(minit)) - clusters = init(data, k) - - if int(iter) < 1: - raise ValueError("iter = %s is not valid. iter must be a positive integer." % iter) - - return _kmeans2(data, clusters, iter, nc, _valid_miss_meth[missing]) - -def _kmeans2(data, code, niter, nc, missing): - """ "raw" version of kmeans2. Do not use directly. - - Run k-means with a given initial codebook. - - """ - for i in range(niter): - # Compute the nearest neighbour for each obs - # using the current code book - label = vq(data, code)[0] - # Update the code by computing centroids using the new code book - for j in range(nc): - mbs = np.where(label==j) - if mbs[0].size > 0: - code[j] = np.mean(data[mbs], axis=0) - else: - missing() - - return code, label - -if __name__ == '__main__': - pass - #import _vq - #a = np.random.randn(4, 2) - #b = np.random.randn(2, 2) - - #print _vq.vq(a, b) - #print _vq.vq(np.array([[1], [2], [3], [4], [5], [6.]]), - # np.array([[2.], [5.]])) - #print _vq.vq(np.array([1, 2, 3, 4, 5, 6.]), np.array([2., 5.])) - #_vq.vq(a.astype(np.float32), b.astype(np.float32)) - #_vq.vq(a, b.astype(np.float32)) - #_vq.vq([0], b) diff --git a/scipy-0.10.1/scipy/constants/__init__.py b/scipy-0.10.1/scipy/constants/__init__.py deleted file mode 100644 index dcfb865156..0000000000 --- a/scipy-0.10.1/scipy/constants/__init__.py +++ /dev/null @@ -1,302 +0,0 @@ -""" -================================== -Constants (:mod:`scipy.constants`) -================================== - -.. module:: scipy.constants - -Physical and mathematical constants and units. - - -Mathematical constants -====================== - -============ ================================================================= -``pi`` Pi -``golden`` Golden ratio -============ ================================================================= - - -Physical constants -================== - -============= ================================================================= -``c`` speed of light in vacuum -``mu_0`` the magnetic constant :math:`\mu_0` -``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0` -``h`` the Planck constant :math:`h` -``hbar`` :math:`\hbar = h/(2\pi)` -``G`` Newtonian constant of gravitation -``g`` standard acceleration of gravity -``e`` elementary charge -``R`` molar gas constant -``alpha`` fine-structure constant -``N_A`` Avogadro constant -``k`` Boltzmann constant -``sigma`` Stefan-Boltzmann constant :math:`\sigma` -``Wien`` Wien displacement law constant -``Rydberg`` Rydberg constant -``m_e`` electron mass -``m_p`` proton mass -``m_n`` neutron mass -============= ================================================================= - - -Constants database ------------------- - -In addition to the above variables, :mod:`scipy.constants` also contains the -2010 CODATA recommended values [CODATA2010]_ database containing more physical -constants. - -.. autosummary:: - :toctree: generated/ - - value -- Value in physical_constants indexed by key - unit -- Unit in physical_constants indexed by key - precision -- Relative precision in physical_constants indexed by key - find -- Return list of physical_constant keys with a given string - ConstantWarning -- Constant sought not in newest CODATA data set - -.. data:: physical_constants - - Dictionary of physical constants, of the format - ``physical_constants[name] = (value, unit, uncertainty)``. - -Available constants: - -====================================================================== ==== -%(constant_names)s -====================================================================== ==== - - -Units -===== - -SI prefixes ------------ - -============ ================================================================= -``yotta`` :math:`10^{24}` -``zetta`` :math:`10^{21}` -``exa`` :math:`10^{18}` -``peta`` :math:`10^{15}` -``tera`` :math:`10^{12}` -``giga`` :math:`10^{9}` -``mega`` :math:`10^{6}` -``kilo`` :math:`10^{3}` -``hecto`` :math:`10^{2}` -``deka`` :math:`10^{1}` -``deci`` :math:`10^{-1}` -``centi`` :math:`10^{-2}` -``milli`` :math:`10^{-3}` -``micro`` :math:`10^{-6}` -``nano`` :math:`10^{-9}` -``pico`` :math:`10^{-12}` -``femto`` :math:`10^{-15}` -``atto`` :math:`10^{-18}` -``zepto`` :math:`10^{-21}` -============ ================================================================= - -Binary prefixes ---------------- - -============ ================================================================= -``kibi`` :math:`2^{10}` -``mebi`` :math:`2^{20}` -``gibi`` :math:`2^{30}` -``tebi`` :math:`2^{40}` -``pebi`` :math:`2^{50}` -``exbi`` :math:`2^{60}` -``zebi`` :math:`2^{70}` -``yobi`` :math:`2^{80}` -============ ================================================================= - -Weight ------- - -================= ============================================================ -``gram`` :math:`10^{-3}` kg -``metric_ton`` :math:`10^{3}` kg -``grain`` one grain in kg -``lb`` one pound (avoirdupous) in kg -``oz`` one ounce in kg -``stone`` one stone in kg -``grain`` one grain in kg -``long_ton`` one long ton in kg -``short_ton`` one short ton in kg -``troy_ounce`` one Troy ounce in kg -``troy_pound`` one Troy pound in kg -``carat`` one carat in kg -``m_u`` atomic mass constant (in kg) -================= ============================================================ - -Angle ------ - -================= ============================================================ -``degree`` degree in radians -``arcmin`` arc minute in radians -``arcsec`` arc second in radians -================= ============================================================ - - -Time ----- - -================= ============================================================ -``minute`` one minute in seconds -``hour`` one hour in seconds -``day`` one day in seconds -``week`` one week in seconds -``year`` one year (365 days) in seconds -``Julian_year`` one Julian year (365.25 days) in seconds -================= ============================================================ - - -Length ------- - -================= ============================================================ -``inch`` one inch in meters -``foot`` one foot in meters -``yard`` one yard in meters -``mile`` one mile in meters -``mil`` one mil in meters -``pt`` one point in meters -``survey_foot`` one survey foot in meters -``survey_mile`` one survey mile in meters -``nautical_mile`` one nautical mile in meters -``fermi`` one Fermi in meters -``angstrom`` one Angstrom in meters -``micron`` one micron in meters -``au`` one astronomical unit in meters -``light_year`` one light year in meters -``parsec`` one parsec in meters -================= ============================================================ - -Pressure --------- - -================= ============================================================ -``atm`` standard atmosphere in pascals -``bar`` one bar in pascals -``torr`` one torr (mmHg) in pascals -``psi`` one psi in pascals -================= ============================================================ - -Area ----- - -================= ============================================================ -``hectare`` one hectare in square meters -``acre`` one acre in square meters -================= ============================================================ - - -Volume ------- - -=================== ======================================================== -``liter`` one liter in cubic meters -``gallon`` one gallon (US) in cubic meters -``gallon_imp`` one gallon (UK) in cubic meters -``fluid_ounce`` one fluid ounce (US) in cubic meters -``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters -``bbl`` one barrel in cubic meters -=================== ======================================================== - -Speed ------ - -================= ========================================================== -``kmh`` kilometers per hour in meters per second -``mph`` miles per hour in meters per second -``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second -``knot`` one knot in meters per second -================= ========================================================== - - -Temperature ------------ - -===================== ======================================================= -``zero_Celsius`` zero of Celsius scale in Kelvin -``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins -===================== ======================================================= - -.. autosummary:: - :toctree: generated/ - - C2K - K2C - F2C - C2F - F2K - K2F - -Energy ------- - -==================== ======================================================= -``eV`` one electron volt in Joules -``calorie`` one calorie (thermochemical) in Joules -``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules -``erg`` one erg in Joules -``Btu`` one British thermal unit (International Steam Table) in Joules -``Btu_th`` one British thermal unit (thermochemical) in Joules -``ton_TNT`` one ton of TNT in Joules -==================== ======================================================= - -Power ------ - -==================== ======================================================= -``hp`` one horsepower in watts -==================== ======================================================= - -Force ------ - -==================== ======================================================= -``dyn`` one dyne in newtons -``lbf`` one pound force in newtons -``kgf`` one kilogram force in newtons -==================== ======================================================= - -Optics ------- - -.. autosummary:: - :toctree: generated/ - - lambda2nu - nu2lambda - -References -========== - -.. [CODATA2010] CODATA Recommended Values of the Fundamental - Physical Constants 2010. - - http://physics.nist.gov/cuu/Constants/index.html - -""" - -# Modules contributed by BasSw (wegwerp@gmail.com) -from codata import * -from constants import * -from codata import _obsolete_constants - -_constant_names = [(_k.lower(), _k, _v) - for _k, _v in physical_constants.items() - if _k not in _obsolete_constants] -_constant_names = "\n".join(["``%s``%s %s %s" % (_x[1], " "*(66-len(_x[1])), - _x[2][0], _x[2][1]) - for _x in sorted(_constant_names)]) -__doc__ = __doc__ % dict(constant_names=_constant_names) -del _constant_names - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/constants/codata.py b/scipy-0.10.1/scipy/constants/codata.py deleted file mode 100644 index af1d58e7d8..0000000000 --- a/scipy-0.10.1/scipy/constants/codata.py +++ /dev/null @@ -1,979 +0,0 @@ -# Compiled by Charles Harris, dated October 3, 2002 -# updated to 2002 values by BasSw, 2006 -# Updated to 2006 values by Vincent Davis June 2010 - -""" -Fundamental Physical Constants ------------------------------- - -These constants are taken from CODATA Recommended Values of the Fundamental -Physical Constants 2010. - -The values of the constants provided at this site are recommended for -international use by CODATA and are the latest available. Termed the "2010 -CODATA recommended values," they are generally recognized worldwide for use in -all fields of science and technology. The values became available on 2 June -2011 and replaced the 2006 CODATA set. They are based on all of the data -available through 31 December 2010. The 2010 adjustment was carried out under -the auspices of the CODATA Task Group on Fundamental Constants. Also available -is an Introduction to the constants for nonexperts. - -http://physics.nist.gov/cuu/Constants/ - -""" - -import warnings -from math import pi, sqrt -__all__ = ['physical_constants', 'value', 'unit', 'precision', 'find', - 'ConstantWarning'] - -""" -Source: http://physics.nist.gov/cuu/Constants/index.html - -The values of the constants provided at the above site are recommended -for international use by CODATA and are the latest available. Termed -the "2006 CODATA recommended values", they are generally recognized -worldwide for use in all fields of science and technology. The values -became available in March 2007 and replaced the 2002 CODATA set. They -are based on all of the data available through 31 December 2006. The -2006 adjustment was carried out under the auspices of the CODATA Task -Group on Fundamental Constants. -""" - -# -# Source: http://physics.nist.gov/cuu/Constants/index.html -# - -# Quantity Value Uncertainty Unit -# ---------------------------------------------------- --------------------- -------------------- ------------- -txt2002 = """\ -Wien displacement law constant 2.897 7685e-3 0.000 0051e-3 m K -atomic unit of 1st hyperpolarizablity 3.206 361 51e-53 0.000 000 28e-53 C^3 m^3 J^-2 -atomic unit of 2nd hyperpolarizablity 6.235 3808e-65 0.000 0011e-65 C^4 m^4 J^-3 -atomic unit of electric dipole moment 8.478 353 09e-30 0.000 000 73e-30 C m -atomic unit of electric polarizablity 1.648 777 274e-41 0.000 000 016e-41 C^2 m^2 J^-1 -atomic unit of electric quadrupole moment 4.486 551 24e-40 0.000 000 39e-40 C m^2 -atomic unit of magn. dipole moment 1.854 801 90e-23 0.000 000 16e-23 J T^-1 -atomic unit of magn. flux density 2.350 517 42e5 0.000 000 20e5 T -deuteron magn. moment 0.433 073 482e-26 0.000 000 038e-26 J T^-1 -deuteron magn. moment to Bohr magneton ratio 0.466 975 4567e-3 0.000 000 0050e-3 -deuteron magn. moment to nuclear magneton ratio 0.857 438 2329 0.000 000 0092 -deuteron-electron magn. moment ratio -4.664 345 548e-4 0.000 000 050e-4 -deuteron-proton magn. moment ratio 0.307 012 2084 0.000 000 0045 -deuteron-neutron magn. moment ratio -0.448 206 52 0.000 000 11 -electron gyromagn. ratio 1.760 859 74e11 0.000 000 15e11 s^-1 T^-1 -electron gyromagn. ratio over 2 pi 28 024.9532 0.0024 MHz T^-1 -electron magn. moment -928.476 412e-26 0.000 080e-26 J T^-1 -electron magn. moment to Bohr magneton ratio -1.001 159 652 1859 0.000 000 000 0038 -electron magn. moment to nuclear magneton ratio -1838.281 971 07 0.000 000 85 -electron magn. moment anomaly 1.159 652 1859e-3 0.000 000 0038e-3 -electron to shielded proton magn. moment ratio -658.227 5956 0.000 0071 -electron to shielded helion magn. moment ratio 864.058 255 0.000 010 -electron-deuteron magn. moment ratio -2143.923 493 0.000 023 -electron-muon magn. moment ratio 206.766 9894 0.000 0054 -electron-neutron magn. moment ratio 960.920 50 0.000 23 -electron-proton magn. moment ratio -658.210 6862 0.000 0066 -magn. constant 12.566 370 614...e-7 0 N A^-2 -magn. flux quantum 2.067 833 72e-15 0.000 000 18e-15 Wb -muon magn. moment -4.490 447 99e-26 0.000 000 40e-26 J T^-1 -muon magn. moment to Bohr magneton ratio -4.841 970 45e-3 0.000 000 13e-3 -muon magn. moment to nuclear magneton ratio -8.890 596 98 0.000 000 23 -muon-proton magn. moment ratio -3.183 345 118 0.000 000 089 -neutron gyromagn. ratio 1.832 471 83e8 0.000 000 46e8 s^-1 T^-1 -neutron gyromagn. ratio over 2 pi 29.164 6950 0.000 0073 MHz T^-1 -neutron magn. moment -0.966 236 45e-26 0.000 000 24e-26 J T^-1 -neutron magn. moment to Bohr magneton ratio -1.041 875 63e-3 0.000 000 25e-3 -neutron magn. moment to nuclear magneton ratio -1.913 042 73 0.000 000 45 -neutron to shielded proton magn. moment ratio -0.684 996 94 0.000 000 16 -neutron-electron magn. moment ratio 1.040 668 82e-3 0.000 000 25e-3 -neutron-proton magn. moment ratio -0.684 979 34 0.000 000 16 -proton gyromagn. ratio 2.675 222 05e8 0.000 000 23e8 s^-1 T^-1 -proton gyromagn. ratio over 2 pi 42.577 4813 0.000 0037 MHz T^-1 -proton magn. moment 1.410 606 71e-26 0.000 000 12e-26 J T^-1 -proton magn. moment to Bohr magneton ratio 1.521 032 206e-3 0.000 000 015e-3 -proton magn. moment to nuclear magneton ratio 2.792 847 351 0.000 000 028 -proton magn. shielding correction 25.689e-6 0.015e-6 -proton-neutron magn. moment ratio -1.459 898 05 0.000 000 34 -shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 -shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 -shielded helion magn. moment -1.074 553 024e-26 0.000 000 093e-26 J T^-1 -shielded helion magn. moment to Bohr magneton ratio -1.158 671 474e-3 0.000 000 014e-3 -shielded helion magn. moment to nuclear magneton ratio -2.127 497 723 0.000 000 025 -shielded helion to proton magn. moment ratio -0.761 766 562 0.000 000 012 -shielded helion to shielded proton magn. moment ratio -0.761 786 1313 0.000 000 0033 -shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 -shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 -shielded proton magn. moment 1.410 570 47e-26 0.000 000 12e-26 J T^-1 -shielded proton magn. moment to Bohr magneton ratio 1.520 993 132e-3 0.000 000 016e-3 -shielded proton magn. moment to nuclear magneton ratio 2.792 775 604 0.000 000 030 -{220} lattice spacing of silicon 192.015 5965e-12 0.000 0070e-12 m""" - -txt2006 = """\ -lattice spacing of silicon 192.015 5762 e-12 0.000 0050 e-12 m -alpha particle-electron mass ratio 7294.299 5365 0.000 0031 -alpha particle mass 6.644 656 20 e-27 0.000 000 33 e-27 kg -alpha particle mass energy equivalent 5.971 919 17 e-10 0.000 000 30 e-10 J -alpha particle mass energy equivalent in MeV 3727.379 109 0.000 093 MeV -alpha particle mass in u 4.001 506 179 127 0.000 000 000 062 u -alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 062 e-3 kg mol^-1 -alpha particle-proton mass ratio 3.972 599 689 51 0.000 000 000 41 -Angstrom star 1.000 014 98 e-10 0.000 000 90 e-10 m -atomic mass constant 1.660 538 782 e-27 0.000 000 083 e-27 kg -atomic mass constant energy equivalent 1.492 417 830 e-10 0.000 000 074 e-10 J -atomic mass constant energy equivalent in MeV 931.494 028 0.000 023 MeV -atomic mass unit-electron volt relationship 931.494 028 e6 0.000 023 e6 eV -atomic mass unit-hartree relationship 3.423 177 7149 e7 0.000 000 0049 e7 E_h -atomic mass unit-hertz relationship 2.252 342 7369 e23 0.000 000 0032 e23 Hz -atomic mass unit-inverse meter relationship 7.513 006 671 e14 0.000 000 011 e14 m^-1 -atomic mass unit-joule relationship 1.492 417 830 e-10 0.000 000 074 e-10 J -atomic mass unit-kelvin relationship 1.080 9527 e13 0.000 0019 e13 K -atomic mass unit-kilogram relationship 1.660 538 782 e-27 0.000 000 083 e-27 kg -atomic unit of 1st hyperpolarizability 3.206 361 533 e-53 0.000 000 081 e-53 C^3 m^3 J^-2 -atomic unit of 2nd hyperpolarizability 6.235 380 95 e-65 0.000 000 31 e-65 C^4 m^4 J^-3 -atomic unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s -atomic unit of charge 1.602 176 487 e-19 0.000 000 040 e-19 C -atomic unit of charge density 1.081 202 300 e12 0.000 000 027 e12 C m^-3 -atomic unit of current 6.623 617 63 e-3 0.000 000 17 e-3 A -atomic unit of electric dipole mom. 8.478 352 81 e-30 0.000 000 21 e-30 C m -atomic unit of electric field 5.142 206 32 e11 0.000 000 13 e11 V m^-1 -atomic unit of electric field gradient 9.717 361 66 e21 0.000 000 24 e21 V m^-2 -atomic unit of electric polarizability 1.648 777 2536 e-41 0.000 000 0034 e-41 C^2 m^2 J^-1 -atomic unit of electric potential 27.211 383 86 0.000 000 68 V -atomic unit of electric quadrupole mom. 4.486 551 07 e-40 0.000 000 11 e-40 C m^2 -atomic unit of energy 4.359 743 94 e-18 0.000 000 22 e-18 J -atomic unit of force 8.238 722 06 e-8 0.000 000 41 e-8 N -atomic unit of length 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m -atomic unit of mag. dipole mom. 1.854 801 830 e-23 0.000 000 046 e-23 J T^-1 -atomic unit of mag. flux density 2.350 517 382 e5 0.000 000 059 e5 T -atomic unit of magnetizability 7.891 036 433 e-29 0.000 000 027 e-29 J T^-2 -atomic unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg -atomic unit of momentum 1.992 851 565 e-24 0.000 000 099 e-24 kg m s^-1 -atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 -atomic unit of time 2.418 884 326 505 e-17 0.000 000 000 016 e-17 s -atomic unit of velocity 2.187 691 2541 e6 0.000 000 0015 e6 m s^-1 -Avogadro constant 6.022 141 79 e23 0.000 000 30 e23 mol^-1 -Bohr magneton 927.400 915 e-26 0.000 023 e-26 J T^-1 -Bohr magneton in eV/T 5.788 381 7555 e-5 0.000 000 0079 e-5 eV T^-1 -Bohr magneton in Hz/T 13.996 246 04 e9 0.000 000 35 e9 Hz T^-1 -Bohr magneton in inverse meters per tesla 46.686 4515 0.000 0012 m^-1 T^-1 -Bohr magneton in K/T 0.671 7131 0.000 0012 K T^-1 -Bohr radius 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m -Boltzmann constant 1.380 6504 e-23 0.000 0024 e-23 J K^-1 -Boltzmann constant in eV/K 8.617 343 e-5 0.000 015 e-5 eV K^-1 -Boltzmann constant in Hz/K 2.083 6644 e10 0.000 0036 e10 Hz K^-1 -Boltzmann constant in inverse meters per kelvin 69.503 56 0.000 12 m^-1 K^-1 -characteristic impedance of vacuum 376.730 313 461... (exact) ohm -classical electron radius 2.817 940 2894 e-15 0.000 000 0058 e-15 m -Compton wavelength 2.426 310 2175 e-12 0.000 000 0033 e-12 m -Compton wavelength over 2 pi 386.159 264 59 e-15 0.000 000 53 e-15 m -conductance quantum 7.748 091 7004 e-5 0.000 000 0053 e-5 S -conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 -conventional value of von Klitzing constant 25 812.807 (exact) ohm -Cu x unit 1.002 076 99 e-13 0.000 000 28 e-13 m -deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 -deuteron-electron mass ratio 3670.482 9654 0.000 0016 -deuteron g factor 0.857 438 2308 0.000 000 0072 -deuteron mag. mom. 0.433 073 465 e-26 0.000 000 011 e-26 J T^-1 -deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 -deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 -deuteron mass 3.343 583 20 e-27 0.000 000 17 e-27 kg -deuteron mass energy equivalent 3.005 062 72 e-10 0.000 000 15 e-10 J -deuteron mass energy equivalent in MeV 1875.612 793 0.000 047 MeV -deuteron mass in u 2.013 553 212 724 0.000 000 000 078 u -deuteron molar mass 2.013 553 212 724 e-3 0.000 000 000 078 e-3 kg mol^-1 -deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 -deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 -deuteron-proton mass ratio 1.999 007 501 08 0.000 000 000 22 -deuteron rms charge radius 2.1402 e-15 0.0028 e-15 m -electric constant 8.854 187 817... e-12 (exact) F m^-1 -electron charge to mass quotient -1.758 820 150 e11 0.000 000 044 e11 C kg^-1 -electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 -electron-deuteron mass ratio 2.724 437 1093 e-4 0.000 000 0012 e-4 -electron g factor -2.002 319 304 3622 0.000 000 000 0015 -electron gyromag. ratio 1.760 859 770 e11 0.000 000 044 e11 s^-1 T^-1 -electron gyromag. ratio over 2 pi 28 024.953 64 0.000 70 MHz T^-1 -electron mag. mom. -928.476 377 e-26 0.000 023 e-26 J T^-1 -electron mag. mom. anomaly 1.159 652 181 11 e-3 0.000 000 000 74 e-3 -electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 11 0.000 000 000 000 74 -electron mag. mom. to nuclear magneton ratio -1838.281 970 92 0.000 000 80 -electron mass 9.109 382 15 e-31 0.000 000 45 e-31 kg -electron mass energy equivalent 8.187 104 38 e-14 0.000 000 41 e-14 J -electron mass energy equivalent in MeV 0.510 998 910 0.000 000 013 MeV -electron mass in u 5.485 799 0943 e-4 0.000 000 0023 e-4 u -electron molar mass 5.485 799 0943 e-7 0.000 000 0023 e-7 kg mol^-1 -electron-muon mag. mom. ratio 206.766 9877 0.000 0052 -electron-muon mass ratio 4.836 331 71 e-3 0.000 000 12 e-3 -electron-neutron mag. mom. ratio 960.920 50 0.000 23 -electron-neutron mass ratio 5.438 673 4459 e-4 0.000 000 0033 e-4 -electron-proton mag. mom. ratio -658.210 6848 0.000 0054 -electron-proton mass ratio 5.446 170 2177 e-4 0.000 000 0024 e-4 -electron-tau mass ratio 2.875 64 e-4 0.000 47 e-4 -electron to alpha particle mass ratio 1.370 933 555 70 e-4 0.000 000 000 58 e-4 -electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 -electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 -electron volt 1.602 176 487 e-19 0.000 000 040 e-19 J -electron volt-atomic mass unit relationship 1.073 544 188 e-9 0.000 000 027 e-9 u -electron volt-hartree relationship 3.674 932 540 e-2 0.000 000 092 e-2 E_h -electron volt-hertz relationship 2.417 989 454 e14 0.000 000 060 e14 Hz -electron volt-inverse meter relationship 8.065 544 65 e5 0.000 000 20 e5 m^-1 -electron volt-joule relationship 1.602 176 487 e-19 0.000 000 040 e-19 J -electron volt-kelvin relationship 1.160 4505 e4 0.000 0020 e4 K -electron volt-kilogram relationship 1.782 661 758 e-36 0.000 000 044 e-36 kg -elementary charge 1.602 176 487 e-19 0.000 000 040 e-19 C -elementary charge over h 2.417 989 454 e14 0.000 000 060 e14 A J^-1 -Faraday constant 96 485.3399 0.0024 C mol^-1 -Faraday constant for conventional electric current 96 485.3401 0.0048 C_90 mol^-1 -Fermi coupling constant 1.166 37 e-5 0.000 01 e-5 GeV^-2 -fine-structure constant 7.297 352 5376 e-3 0.000 000 0050 e-3 -first radiation constant 3.741 771 18 e-16 0.000 000 19 e-16 W m^2 -first radiation constant for spectral radiance 1.191 042 759 e-16 0.000 000 059 e-16 W m^2 sr^-1 -hartree-atomic mass unit relationship 2.921 262 2986 e-8 0.000 000 0042 e-8 u -hartree-electron volt relationship 27.211 383 86 0.000 000 68 eV -Hartree energy 4.359 743 94 e-18 0.000 000 22 e-18 J -Hartree energy in eV 27.211 383 86 0.000 000 68 eV -hartree-hertz relationship 6.579 683 920 722 e15 0.000 000 000 044 e15 Hz -hartree-inverse meter relationship 2.194 746 313 705 e7 0.000 000 000 015 e7 m^-1 -hartree-joule relationship 4.359 743 94 e-18 0.000 000 22 e-18 J -hartree-kelvin relationship 3.157 7465 e5 0.000 0055 e5 K -hartree-kilogram relationship 4.850 869 34 e-35 0.000 000 24 e-35 kg -helion-electron mass ratio 5495.885 2765 0.000 0052 -helion mass 5.006 411 92 e-27 0.000 000 25 e-27 kg -helion mass energy equivalent 4.499 538 64 e-10 0.000 000 22 e-10 J -helion mass energy equivalent in MeV 2808.391 383 0.000 070 MeV -helion mass in u 3.014 932 2473 0.000 000 0026 u -helion molar mass 3.014 932 2473 e-3 0.000 000 0026 e-3 kg mol^-1 -helion-proton mass ratio 2.993 152 6713 0.000 000 0026 -hertz-atomic mass unit relationship 4.439 821 6294 e-24 0.000 000 0064 e-24 u -hertz-electron volt relationship 4.135 667 33 e-15 0.000 000 10 e-15 eV -hertz-hartree relationship 1.519 829 846 006 e-16 0.000 000 000010e-16 E_h -hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 -hertz-joule relationship 6.626 068 96 e-34 0.000 000 33 e-34 J -hertz-kelvin relationship 4.799 2374 e-11 0.000 0084 e-11 K -hertz-kilogram relationship 7.372 496 00 e-51 0.000 000 37 e-51 kg -inverse fine-structure constant 137.035 999 679 0.000 000 094 -inverse meter-atomic mass unit relationship 1.331 025 0394 e-15 0.000 000 0019 e-15 u -inverse meter-electron volt relationship 1.239 841 875 e-6 0.000 000 031 e-6 eV -inverse meter-hartree relationship 4.556 335 252 760 e-8 0.000 000 000 030 e-8 E_h -inverse meter-hertz relationship 299 792 458 (exact) Hz -inverse meter-joule relationship 1.986 445 501 e-25 0.000 000 099 e-25 J -inverse meter-kelvin relationship 1.438 7752 e-2 0.000 0025 e-2 K -inverse meter-kilogram relationship 2.210 218 70 e-42 0.000 000 11 e-42 kg -inverse of conductance quantum 12 906.403 7787 0.000 0088 ohm -Josephson constant 483 597.891 e9 0.012 e9 Hz V^-1 -joule-atomic mass unit relationship 6.700 536 41 e9 0.000 000 33 e9 u -joule-electron volt relationship 6.241 509 65 e18 0.000 000 16 e18 eV -joule-hartree relationship 2.293 712 69 e17 0.000 000 11 e17 E_h -joule-hertz relationship 1.509 190 450 e33 0.000 000 075 e33 Hz -joule-inverse meter relationship 5.034 117 47 e24 0.000 000 25 e24 m^-1 -joule-kelvin relationship 7.242 963 e22 0.000 013 e22 K -joule-kilogram relationship 1.112 650 056... e-17 (exact) kg -kelvin-atomic mass unit relationship 9.251 098 e-14 0.000 016 e-14 u -kelvin-electron volt relationship 8.617 343 e-5 0.000 015 e-5 eV -kelvin-hartree relationship 3.166 8153 e-6 0.000 0055 e-6 E_h -kelvin-hertz relationship 2.083 6644 e10 0.000 0036 e10 Hz -kelvin-inverse meter relationship 69.503 56 0.000 12 m^-1 -kelvin-joule relationship 1.380 6504 e-23 0.000 0024 e-23 J -kelvin-kilogram relationship 1.536 1807 e-40 0.000 0027 e-40 kg -kilogram-atomic mass unit relationship 6.022 141 79 e26 0.000 000 30 e26 u -kilogram-electron volt relationship 5.609 589 12 e35 0.000 000 14 e35 eV -kilogram-hartree relationship 2.061 486 16 e34 0.000 000 10 e34 E_h -kilogram-hertz relationship 1.356 392 733 e50 0.000 000 068 e50 Hz -kilogram-inverse meter relationship 4.524 439 15 e41 0.000 000 23 e41 m^-1 -kilogram-joule relationship 8.987 551 787... e16 (exact) J -kilogram-kelvin relationship 6.509 651 e39 0.000 011 e39 K -lattice parameter of silicon 543.102 064 e-12 0.000 014 e-12 m -Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7774 e25 0.000 0047 e25 m^-3 -mag. constant 12.566 370 614... e-7 (exact) N A^-2 -mag. flux quantum 2.067 833 667 e-15 0.000 000 052 e-15 Wb -molar gas constant 8.314 472 0.000 015 J mol^-1 K^-1 -molar mass constant 1 e-3 (exact) kg mol^-1 -molar mass of carbon-12 12 e-3 (exact) kg mol^-1 -molar Planck constant 3.990 312 6821 e-10 0.000 000 0057 e-10 J s mol^-1 -molar Planck constant times c 0.119 626 564 72 0.000 000 000 17 J m mol^-1 -molar volume of ideal gas (273.15 K, 100 kPa) 22.710 981 e-3 0.000 040 e-3 m^3 mol^-1 -molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 996 e-3 0.000 039 e-3 m^3 mol^-1 -molar volume of silicon 12.058 8349 e-6 0.000 0011 e-6 m^3 mol^-1 -Mo x unit 1.002 099 55 e-13 0.000 000 53 e-13 m -muon Compton wavelength 11.734 441 04 e-15 0.000 000 30 e-15 m -muon Compton wavelength over 2 pi 1.867 594 295 e-15 0.000 000 047 e-15 m -muon-electron mass ratio 206.768 2823 0.000 0052 -muon g factor -2.002 331 8414 0.000 000 0012 -muon mag. mom. -4.490 447 86 e-26 0.000 000 16 e-26 J T^-1 -muon mag. mom. anomaly 1.165 920 69 e-3 0.000 000 60 e-3 -muon mag. mom. to Bohr magneton ratio -4.841 970 49 e-3 0.000 000 12 e-3 -muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 23 -muon mass 1.883 531 30 e-28 0.000 000 11 e-28 kg -muon mass energy equivalent 1.692 833 510 e-11 0.000 000 095 e-11 J -muon mass energy equivalent in MeV 105.658 3668 0.000 0038 MeV -muon mass in u 0.113 428 9256 0.000 000 0029 u -muon molar mass 0.113 428 9256 e-3 0.000 000 0029 e-3 kg mol^-1 -muon-neutron mass ratio 0.112 454 5167 0.000 000 0029 -muon-proton mag. mom. ratio -3.183 345 137 0.000 000 085 -muon-proton mass ratio 0.112 609 5261 0.000 000 0029 -muon-tau mass ratio 5.945 92 e-2 0.000 97 e-2 -natural unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s -natural unit of action in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s -natural unit of energy 8.187 104 38 e-14 0.000 000 41 e-14 J -natural unit of energy in MeV 0.510 998 910 0.000 000 013 MeV -natural unit of length 386.159 264 59 e-15 0.000 000 53 e-15 m -natural unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg -natural unit of momentum 2.730 924 06 e-22 0.000 000 14 e-22 kg m s^-1 -natural unit of momentum in MeV/c 0.510 998 910 0.000 000 013 MeV/c -natural unit of time 1.288 088 6570 e-21 0.000 000 0018 e-21 s -natural unit of velocity 299 792 458 (exact) m s^-1 -neutron Compton wavelength 1.319 590 8951 e-15 0.000 000 0020 e-15 m -neutron Compton wavelength over 2 pi 0.210 019 413 82 e-15 0.000 000 000 31 e-15 m -neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 -neutron-electron mass ratio 1838.683 6605 0.000 0011 -neutron g factor -3.826 085 45 0.000 000 90 -neutron gyromag. ratio 1.832 471 85 e8 0.000 000 43 e8 s^-1 T^-1 -neutron gyromag. ratio over 2 pi 29.164 6954 0.000 0069 MHz T^-1 -neutron mag. mom. -0.966 236 41 e-26 0.000 000 23 e-26 J T^-1 -neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 -neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 -neutron mass 1.674 927 211 e-27 0.000 000 084 e-27 kg -neutron mass energy equivalent 1.505 349 505 e-10 0.000 000 075 e-10 J -neutron mass energy equivalent in MeV 939.565 346 0.000 023 MeV -neutron mass in u 1.008 664 915 97 0.000 000 000 43 u -neutron molar mass 1.008 664 915 97 e-3 0.000 000 000 43 e-3 kg mol^-1 -neutron-muon mass ratio 8.892 484 09 0.000 000 23 -neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 -neutron-proton mass ratio 1.001 378 419 18 0.000 000 000 46 -neutron-tau mass ratio 0.528 740 0.000 086 -neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 -Newtonian constant of gravitation 6.674 28 e-11 0.000 67 e-11 m^3 kg^-1 s^-2 -Newtonian constant of gravitation over h-bar c 6.708 81 e-39 0.000 67 e-39 (GeV/c^2)^-2 -nuclear magneton 5.050 783 24 e-27 0.000 000 13 e-27 J T^-1 -nuclear magneton in eV/T 3.152 451 2326 e-8 0.000 000 0045 e-8 eV T^-1 -nuclear magneton in inverse meters per tesla 2.542 623 616 e-2 0.000 000 064 e-2 m^-1 T^-1 -nuclear magneton in K/T 3.658 2637 e-4 0.000 0064 e-4 K T^-1 -nuclear magneton in MHz/T 7.622 593 84 0.000 000 19 MHz T^-1 -Planck constant 6.626 068 96 e-34 0.000 000 33 e-34 J s -Planck constant in eV s 4.135 667 33 e-15 0.000 000 10 e-15 eV s -Planck constant over 2 pi 1.054 571 628 e-34 0.000 000 053 e-34 J s -Planck constant over 2 pi in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s -Planck constant over 2 pi times c in MeV fm 197.326 9631 0.000 0049 MeV fm -Planck length 1.616 252 e-35 0.000 081 e-35 m -Planck mass 2.176 44 e-8 0.000 11 e-8 kg -Planck mass energy equivalent in GeV 1.220 892 e19 0.000 061 e19 GeV -Planck temperature 1.416 785 e32 0.000 071 e32 K -Planck time 5.391 24 e-44 0.000 27 e-44 s -proton charge to mass quotient 9.578 833 92 e7 0.000 000 24 e7 C kg^-1 -proton Compton wavelength 1.321 409 8446 e-15 0.000 000 0019 e-15 m -proton Compton wavelength over 2 pi 0.210 308 908 61 e-15 0.000 000 000 30 e-15 m -proton-electron mass ratio 1836.152 672 47 0.000 000 80 -proton g factor 5.585 694 713 0.000 000 046 -proton gyromag. ratio 2.675 222 099 e8 0.000 000 070 e8 s^-1 T^-1 -proton gyromag. ratio over 2 pi 42.577 4821 0.000 0011 MHz T^-1 -proton mag. mom. 1.410 606 662 e-26 0.000 000 037 e-26 J T^-1 -proton mag. mom. to Bohr magneton ratio 1.521 032 209 e-3 0.000 000 012 e-3 -proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 -proton mag. shielding correction 25.694 e-6 0.014 e-6 -proton mass 1.672 621 637 e-27 0.000 000 083 e-27 kg -proton mass energy equivalent 1.503 277 359 e-10 0.000 000 075 e-10 J -proton mass energy equivalent in MeV 938.272 013 0.000 023 MeV -proton mass in u 1.007 276 466 77 0.000 000 000 10 u -proton molar mass 1.007 276 466 77 e-3 0.000 000 000 10 e-3 kg mol^-1 -proton-muon mass ratio 8.880 243 39 0.000 000 23 -proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 -proton-neutron mass ratio 0.998 623 478 24 0.000 000 000 46 -proton rms charge radius 0.8768 e-15 0.0069 e-15 m -proton-tau mass ratio 0.528 012 0.000 086 -quantum of circulation 3.636 947 5199 e-4 0.000 000 0050 e-4 m^2 s^-1 -quantum of circulation times 2 7.273 895 040 e-4 0.000 000 010 e-4 m^2 s^-1 -Rydberg constant 10 973 731.568 527 0.000 073 m^-1 -Rydberg constant times c in Hz 3.289 841 960 361 e15 0.000 000 000 022 e15 Hz -Rydberg constant times hc in eV 13.605 691 93 0.000 000 34 eV -Rydberg constant times hc in J 2.179 871 97 e-18 0.000 000 11 e-18 J -Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7047 0.000 0044 -Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8677 0.000 0044 -second radiation constant 1.438 7752 e-2 0.000 0025 e-2 m K -shielded helion gyromag. ratio 2.037 894 730 e8 0.000 000 056 e8 s^-1 T^-1 -shielded helion gyromag. ratio over 2 pi 32.434 101 98 0.000 000 90 MHz T^-1 -shielded helion mag. mom. -1.074 552 982 e-26 0.000 000 030 e-26 J T^-1 -shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 -shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 -shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 -shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 -shielded proton gyromag. ratio 2.675 153 362 e8 0.000 000 073 e8 s^-1 T^-1 -shielded proton gyromag. ratio over 2 pi 42.576 3881 0.000 0012 MHz T^-1 -shielded proton mag. mom. 1.410 570 419 e-26 0.000 000 038 e-26 J T^-1 -shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 -shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 -speed of light in vacuum 299 792 458 (exact) m s^-1 -standard acceleration of gravity 9.806 65 (exact) m s^-2 -standard atmosphere 101 325 (exact) Pa -Stefan-Boltzmann constant 5.670 400 e-8 0.000 040 e-8 W m^-2 K^-4 -tau Compton wavelength 0.697 72 e-15 0.000 11 e-15 m -tau Compton wavelength over 2 pi 0.111 046 e-15 0.000 018 e-15 m -tau-electron mass ratio 3477.48 0.57 -tau mass 3.167 77 e-27 0.000 52 e-27 kg -tau mass energy equivalent 2.847 05 e-10 0.000 46 e-10 J -tau mass energy equivalent in MeV 1776.99 0.29 MeV -tau mass in u 1.907 68 0.000 31 u -tau molar mass 1.907 68 e-3 0.000 31 e-3 kg mol^-1 -tau-muon mass ratio 16.8183 0.0027 -tau-neutron mass ratio 1.891 29 0.000 31 -tau-proton mass ratio 1.893 90 0.000 31 -Thomson cross section 0.665 245 8558 e-28 0.000 000 0027 e-28 m^2 -triton-electron mag. mom. ratio -1.620 514 423 e-3 0.000 000 021 e-3 -triton-electron mass ratio 5496.921 5269 0.000 0051 -triton g factor 5.957 924 896 0.000 000 076 -triton mag. mom. 1.504 609 361 e-26 0.000 000 042 e-26 J T^-1 -triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 -triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 -triton mass 5.007 355 88 e-27 0.000 000 25 e-27 kg -triton mass energy equivalent 4.500 387 03 e-10 0.000 000 22 e-10 J -triton mass energy equivalent in MeV 2808.920 906 0.000 070 MeV -triton mass in u 3.015 500 7134 0.000 000 0025 u -triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 -triton-neutron mag. mom. ratio -1.557 185 53 0.000 000 37 -triton-proton mag. mom. ratio 1.066 639 908 0.000 000 010 -triton-proton mass ratio 2.993 717 0309 0.000 000 0025 -unified atomic mass unit 1.660 538 782 e-27 0.000 000 083 e-27 kg -von Klitzing constant 25 812.807 557 0.000 018 ohm -weak mixing angle 0.222 55 0.000 56 -Wien frequency displacement law constant 5.878 933 e10 0.000 010 e10 Hz K^-1 -Wien wavelength displacement law constant 2.897 7685 e-3 0.000 0051 e-3 m K""" - -txt2010 = """\ -{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m -alpha particle-electron mass ratio 7294.299 5361 0.000 0029 -alpha particle mass 6.644 656 75 e-27 0.000 000 29 e-27 kg -alpha particle mass energy equivalent 5.971 919 67 e-10 0.000 000 26 e-10 J -alpha particle mass energy equivalent in MeV 3727.379 240 0.000 082 MeV -alpha particle mass in u 4.001 506 179 125 0.000 000 000 062 u -alpha particle molar mass 4.001 506 179 125 e-3 0.000 000 000 062 e-3 kg mol^-1 -alpha particle-proton mass ratio 3.972 599 689 33 0.000 000 000 36 -Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m -atomic mass constant 1.660 538 921 e-27 0.000 000 073 e-27 kg -atomic mass constant energy equivalent 1.492 417 954 e-10 0.000 000 066 e-10 J -atomic mass constant energy equivalent in MeV 931.494 061 0.000 021 MeV -atomic mass unit-electron volt relationship 931.494 061 e6 0.000 021 e6 eV -atomic mass unit-hartree relationship 3.423 177 6845 e7 0.000 000 0024 e7 E_h -atomic mass unit-hertz relationship 2.252 342 7168 e23 0.000 000 0016 e23 Hz -atomic mass unit-inverse meter relationship 7.513 006 6042 e14 0.000 000 0053 e14 m^-1 -atomic mass unit-joule relationship 1.492 417 954 e-10 0.000 000 066 e-10 J -atomic mass unit-kelvin relationship 1.080 954 08 e13 0.000 000 98 e13 K -atomic mass unit-kilogram relationship 1.660 538 921 e-27 0.000 000 073 e-27 kg -atomic unit of 1st hyperpolarizability 3.206 361 449 e-53 0.000 000 071 e-53 C^3 m^3 J^-2 -atomic unit of 2nd hyperpolarizability 6.235 380 54 e-65 0.000 000 28 e-65 C^4 m^4 J^-3 -atomic unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s -atomic unit of charge 1.602 176 565 e-19 0.000 000 035 e-19 C -atomic unit of charge density 1.081 202 338 e12 0.000 000 024 e12 C m^-3 -atomic unit of current 6.623 617 95 e-3 0.000 000 15 e-3 A -atomic unit of electric dipole mom. 8.478 353 26 e-30 0.000 000 19 e-30 C m -atomic unit of electric field 5.142 206 52 e11 0.000 000 11 e11 V m^-1 -atomic unit of electric field gradient 9.717 362 00 e21 0.000 000 21 e21 V m^-2 -atomic unit of electric polarizability 1.648 777 2754 e-41 0.000 000 0016 e-41 C^2 m^2 J^-1 -atomic unit of electric potential 27.211 385 05 0.000 000 60 V -atomic unit of electric quadrupole mom. 4.486 551 331 e-40 0.000 000 099 e-40 C m^2 -atomic unit of energy 4.359 744 34 e-18 0.000 000 19 e-18 J -atomic unit of force 8.238 722 78 e-8 0.000 000 36 e-8 N -atomic unit of length 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m -atomic unit of mag. dipole mom. 1.854 801 936 e-23 0.000 000 041 e-23 J T^-1 -atomic unit of mag. flux density 2.350 517 464 e5 0.000 000 052 e5 T -atomic unit of magnetizability 7.891 036 607 e-29 0.000 000 013 e-29 J T^-2 -atomic unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg -atomic unit of mom.um 1.992 851 740 e-24 0.000 000 088 e-24 kg m s^-1 -atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 -atomic unit of time 2.418 884 326 502e-17 0.000 000 000 012e-17 s -atomic unit of velocity 2.187 691 263 79 e6 0.000 000 000 71 e6 m s^-1 -Avogadro constant 6.022 141 29 e23 0.000 000 27 e23 mol^-1 -Bohr magneton 927.400 968 e-26 0.000 020 e-26 J T^-1 -Bohr magneton in eV/T 5.788 381 8066 e-5 0.000 000 0038 e-5 eV T^-1 -Bohr magneton in Hz/T 13.996 245 55 e9 0.000 000 31 e9 Hz T^-1 -Bohr magneton in inverse meters per tesla 46.686 4498 0.000 0010 m^-1 T^-1 -Bohr magneton in K/T 0.671 713 88 0.000 000 61 K T^-1 -Bohr radius 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m -Boltzmann constant 1.380 6488 e-23 0.000 0013 e-23 J K^-1 -Boltzmann constant in eV/K 8.617 3324 e-5 0.000 0078 e-5 eV K^-1 -Boltzmann constant in Hz/K 2.083 6618 e10 0.000 0019 e10 Hz K^-1 -Boltzmann constant in inverse meters per kelvin 69.503 476 0.000 063 m^-1 K^-1 -characteristic impedance of vacuum 376.730 313 461... (exact) ohm -classical electron radius 2.817 940 3267 e-15 0.000 000 0027 e-15 m -Compton wavelength 2.426 310 2389 e-12 0.000 000 0016 e-12 m -Compton wavelength over 2 pi 386.159 268 00 e-15 0.000 000 25 e-15 m -conductance quantum 7.748 091 7346 e-5 0.000 000 0025 e-5 S -conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 -conventional value of von Klitzing constant 25 812.807 (exact) ohm -Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m -deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 -deuteron-electron mass ratio 3670.482 9652 0.000 0015 -deuteron g factor 0.857 438 2308 0.000 000 0072 -deuteron mag. mom. 0.433 073 489 e-26 0.000 000 010 e-26 J T^-1 -deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 -deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 -deuteron mass 3.343 583 48 e-27 0.000 000 15 e-27 kg -deuteron mass energy equivalent 3.005 062 97 e-10 0.000 000 13 e-10 J -deuteron mass energy equivalent in MeV 1875.612 859 0.000 041 MeV -deuteron mass in u 2.013 553 212 712 0.000 000 000 077 u -deuteron molar mass 2.013 553 212 712 e-3 0.000 000 000 077 e-3 kg mol^-1 -deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 -deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 -deuteron-proton mass ratio 1.999 007 500 97 0.000 000 000 18 -deuteron rms charge radius 2.1424 e-15 0.0021 e-15 m -electric constant 8.854 187 817... e-12 (exact) F m^-1 -electron charge to mass quotient -1.758 820 088 e11 0.000 000 039 e11 C kg^-1 -electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 -electron-deuteron mass ratio 2.724 437 1095 e-4 0.000 000 0011 e-4 -electron g factor -2.002 319 304 361 53 0.000 000 000 000 53 -electron gyromag. ratio 1.760 859 708 e11 0.000 000 039 e11 s^-1 T^-1 -electron gyromag. ratio over 2 pi 28 024.952 66 0.000 62 MHz T^-1 -electron-helion mass ratio 1.819 543 0761 e-4 0.000 000 0017 e-4 -electron mag. mom. -928.476 430 e-26 0.000 021 e-26 J T^-1 -electron mag. mom. anomaly 1.159 652 180 76 e-3 0.000 000 000 27 e-3 -electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 76 0.000 000 000 000 27 -electron mag. mom. to nuclear magneton ratio -1838.281 970 90 0.000 000 75 -electron mass 9.109 382 91 e-31 0.000 000 40 e-31 kg -electron mass energy equivalent 8.187 105 06 e-14 0.000 000 36 e-14 J -electron mass energy equivalent in MeV 0.510 998 928 0.000 000 011 MeV -electron mass in u 5.485 799 0946 e-4 0.000 000 0022 e-4 u -electron molar mass 5.485 799 0946 e-7 0.000 000 0022 e-7 kg mol^-1 -electron-muon mag. mom. ratio 206.766 9896 0.000 0052 -electron-muon mass ratio 4.836 331 66 e-3 0.000 000 12 e-3 -electron-neutron mag. mom. ratio 960.920 50 0.000 23 -electron-neutron mass ratio 5.438 673 4461 e-4 0.000 000 0032 e-4 -electron-proton mag. mom. ratio -658.210 6848 0.000 0054 -electron-proton mass ratio 5.446 170 2178 e-4 0.000 000 0022 e-4 -electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 -electron to alpha particle mass ratio 1.370 933 555 78 e-4 0.000 000 000 55 e-4 -electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 -electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 -electron-triton mass ratio 1.819 200 0653 e-4 0.000 000 0017 e-4 -electron volt 1.602 176 565 e-19 0.000 000 035 e-19 J -electron volt-atomic mass unit relationship 1.073 544 150 e-9 0.000 000 024 e-9 u -electron volt-hartree relationship 3.674 932 379 e-2 0.000 000 081 e-2 E_h -electron volt-hertz relationship 2.417 989 348 e14 0.000 000 053 e14 Hz -electron volt-inverse meter relationship 8.065 544 29 e5 0.000 000 18 e5 m^-1 -electron volt-joule relationship 1.602 176 565 e-19 0.000 000 035 e-19 J -electron volt-kelvin relationship 1.160 4519 e4 0.000 0011 e4 K -electron volt-kilogram relationship 1.782 661 845 e-36 0.000 000 039 e-36 kg -elementary charge 1.602 176 565 e-19 0.000 000 035 e-19 C -elementary charge over h 2.417 989 348 e14 0.000 000 053 e14 A J^-1 -Faraday constant 96 485.3365 0.0021 C mol^-1 -Faraday constant for conventional electric current 96 485.3321 0.0043 C_90 mol^-1 -Fermi coupling constant 1.166 364 e-5 0.000 005 e-5 GeV^-2 -fine-structure constant 7.297 352 5698 e-3 0.000 000 0024 e-3 -first radiation constant 3.741 771 53 e-16 0.000 000 17 e-16 W m^2 -first radiation constant for spectral radiance 1.191 042 869 e-16 0.000 000 053 e-16 W m^2 sr^-1 -hartree-atomic mass unit relationship 2.921 262 3246 e-8 0.000 000 0021 e-8 u -hartree-electron volt relationship 27.211 385 05 0.000 000 60 eV -Hartree energy 4.359 744 34 e-18 0.000 000 19 e-18 J -Hartree energy in eV 27.211 385 05 0.000 000 60 eV -hartree-hertz relationship 6.579 683 920 729 e15 0.000 000 000 033 e15 Hz -hartree-inverse meter relationship 2.194 746 313 708 e7 0.000 000 000 011 e7 m^-1 -hartree-joule relationship 4.359 744 34 e-18 0.000 000 19 e-18 J -hartree-kelvin relationship 3.157 7504 e5 0.000 0029 e5 K -hartree-kilogram relationship 4.850 869 79 e-35 0.000 000 21 e-35 kg -helion-electron mass ratio 5495.885 2754 0.000 0050 -helion g factor -4.255 250 613 0.000 000 050 -helion mag. mom. -1.074 617 486 e-26 0.000 000 027 e-26 J T^-1 -helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 -helion mag. mom. to nuclear magneton ratio -2.127 625 306 0.000 000 025 -helion mass 5.006 412 34 e-27 0.000 000 22 e-27 kg -helion mass energy equivalent 4.499 539 02 e-10 0.000 000 20 e-10 J -helion mass energy equivalent in MeV 2808.391 482 0.000 062 MeV -helion mass in u 3.014 932 2468 0.000 000 0025 u -helion molar mass 3.014 932 2468 e-3 0.000 000 0025 e-3 kg mol^-1 -helion-proton mass ratio 2.993 152 6707 0.000 000 0025 -hertz-atomic mass unit relationship 4.439 821 6689 e-24 0.000 000 0031 e-24 u -hertz-electron volt relationship 4.135 667 516 e-15 0.000 000 091 e-15 eV -hertz-hartree relationship 1.519 829 8460045e-16 0.000 000 0000076e-16 E_h -hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 -hertz-joule relationship 6.626 069 57 e-34 0.000 000 29 e-34 J -hertz-kelvin relationship 4.799 2434 e-11 0.000 0044 e-11 K -hertz-kilogram relationship 7.372 496 68 e-51 0.000 000 33 e-51 kg -inverse fine-structure constant 137.035 999 074 0.000 000 044 -inverse meter-atomic mass unit relationship 1.331 025 051 20 e-15 0.000 000 000 94 e-15 u -inverse meter-electron volt relationship 1.239 841 930 e-6 0.000 000 027 e-6 eV -inverse meter-hartree relationship 4.556 335 252 755 e-8 0.000 000 000 023 e-8 E_h -inverse meter-hertz relationship 299 792 458 (exact) Hz -inverse meter-joule relationship 1.986 445 684 e-25 0.000 000 088 e-25 J -inverse meter-kelvin relationship 1.438 7770 e-2 0.000 0013 e-2 K -inverse meter-kilogram relationship 2.210 218 902 e-42 0.000 000 098 e-42 kg -inverse of conductance quantum 12 906.403 7217 0.000 0042 ohm -Josephson constant 483 597.870 e9 0.011 e9 Hz V^-1 -joule-atomic mass unit relationship 6.700 535 85 e9 0.000 000 30 e9 u -joule-electron volt relationship 6.241 509 34 e18 0.000 000 14 e18 eV -joule-hartree relationship 2.293 712 48 e17 0.000 000 10 e17 E_h -joule-hertz relationship 1.509 190 311 e33 0.000 000 067 e33 Hz -joule-inverse meter relationship 5.034 117 01 e24 0.000 000 22 e24 m^-1 -joule-kelvin relationship 7.242 9716 e22 0.000 0066 e22 K -joule-kilogram relationship 1.112 650 056... e-17 (exact) kg -kelvin-atomic mass unit relationship 9.251 0868 e-14 0.000 0084 e-14 u -kelvin-electron volt relationship 8.617 3324 e-5 0.000 0078 e-5 eV -kelvin-hartree relationship 3.166 8114 e-6 0.000 0029 e-6 E_h -kelvin-hertz relationship 2.083 6618 e10 0.000 0019 e10 Hz -kelvin-inverse meter relationship 69.503 476 0.000 063 m^-1 -kelvin-joule relationship 1.380 6488 e-23 0.000 0013 e-23 J -kelvin-kilogram relationship 1.536 1790 e-40 0.000 0014 e-40 kg -kilogram-atomic mass unit relationship 6.022 141 29 e26 0.000 000 27 e26 u -kilogram-electron volt relationship 5.609 588 85 e35 0.000 000 12 e35 eV -kilogram-hartree relationship 2.061 485 968 e34 0.000 000 091 e34 E_h -kilogram-hertz relationship 1.356 392 608 e50 0.000 000 060 e50 Hz -kilogram-inverse meter relationship 4.524 438 73 e41 0.000 000 20 e41 m^-1 -kilogram-joule relationship 8.987 551 787... e16 (exact) J -kilogram-kelvin relationship 6.509 6582 e39 0.000 0059 e39 K -lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m -Loschmidt constant (273.15 K, 100 kPa) 2.651 6462 e25 0.000 0024 e25 m^-3 -Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7805 e25 0.000 0024 e25 m^-3 -mag. constant 12.566 370 614... e-7 (exact) N A^-2 -mag. flux quantum 2.067 833 758 e-15 0.000 000 046 e-15 Wb -molar gas constant 8.314 4621 0.000 0075 J mol^-1 K^-1 -molar mass constant 1 e-3 (exact) kg mol^-1 -molar mass of carbon-12 12 e-3 (exact) kg mol^-1 -molar Planck constant 3.990 312 7176 e-10 0.000 000 0028 e-10 J s mol^-1 -molar Planck constant times c 0.119 626 565 779 0.000 000 000 084 J m mol^-1 -molar volume of ideal gas (273.15 K, 100 kPa) 22.710 953 e-3 0.000 021 e-3 m^3 mol^-1 -molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 968 e-3 0.000 020 e-3 m^3 mol^-1 -molar volume of silicon 12.058 833 01 e-6 0.000 000 80 e-6 m^3 mol^-1 -Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m -muon Compton wavelength 11.734 441 03 e-15 0.000 000 30 e-15 m -muon Compton wavelength over 2 pi 1.867 594 294 e-15 0.000 000 047 e-15 m -muon-electron mass ratio 206.768 2843 0.000 0052 -muon g factor -2.002 331 8418 0.000 000 0013 -muon mag. mom. -4.490 448 07 e-26 0.000 000 15 e-26 J T^-1 -muon mag. mom. anomaly 1.165 920 91 e-3 0.000 000 63 e-3 -muon mag. mom. to Bohr magneton ratio -4.841 970 44 e-3 0.000 000 12 e-3 -muon mag. mom. to nuclear magneton ratio -8.890 596 97 0.000 000 22 -muon mass 1.883 531 475 e-28 0.000 000 096 e-28 kg -muon mass energy equivalent 1.692 833 667 e-11 0.000 000 086 e-11 J -muon mass energy equivalent in MeV 105.658 3715 0.000 0035 MeV -muon mass in u 0.113 428 9267 0.000 000 0029 u -muon molar mass 0.113 428 9267 e-3 0.000 000 0029 e-3 kg mol^-1 -muon-neutron mass ratio 0.112 454 5177 0.000 000 0028 -muon-proton mag. mom. ratio -3.183 345 107 0.000 000 084 -muon-proton mass ratio 0.112 609 5272 0.000 000 0028 -muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 -natural unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s -natural unit of action in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s -natural unit of energy 8.187 105 06 e-14 0.000 000 36 e-14 J -natural unit of energy in MeV 0.510 998 928 0.000 000 011 MeV -natural unit of length 386.159 268 00 e-15 0.000 000 25 e-15 m -natural unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg -natural unit of mom.um 2.730 924 29 e-22 0.000 000 12 e-22 kg m s^-1 -natural unit of mom.um in MeV/c 0.510 998 928 0.000 000 011 MeV/c -natural unit of time 1.288 088 668 33 e-21 0.000 000 000 83 e-21 s -natural unit of velocity 299 792 458 (exact) m s^-1 -neutron Compton wavelength 1.319 590 9068 e-15 0.000 000 0011 e-15 m -neutron Compton wavelength over 2 pi 0.210 019 415 68 e-15 0.000 000 000 17 e-15 m -neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 -neutron-electron mass ratio 1838.683 6605 0.000 0011 -neutron g factor -3.826 085 45 0.000 000 90 -neutron gyromag. ratio 1.832 471 79 e8 0.000 000 43 e8 s^-1 T^-1 -neutron gyromag. ratio over 2 pi 29.164 6943 0.000 0069 MHz T^-1 -neutron mag. mom. -0.966 236 47 e-26 0.000 000 23 e-26 J T^-1 -neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 -neutron mag. mom. to nuclear magneton ratio -1.913 042 72 0.000 000 45 -neutron mass 1.674 927 351 e-27 0.000 000 074 e-27 kg -neutron mass energy equivalent 1.505 349 631 e-10 0.000 000 066 e-10 J -neutron mass energy equivalent in MeV 939.565 379 0.000 021 MeV -neutron mass in u 1.008 664 916 00 0.000 000 000 43 u -neutron molar mass 1.008 664 916 00 e-3 0.000 000 000 43 e-3 kg mol^-1 -neutron-muon mass ratio 8.892 484 00 0.000 000 22 -neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 -neutron-proton mass difference 2.305 573 92 e-30 0.000 000 76 e-30 -neutron-proton mass difference energy equivalent 2.072 146 50 e-13 0.000 000 68 e-13 -neutron-proton mass difference energy equivalent in MeV 1.293 332 17 0.000 000 42 -neutron-proton mass difference in u 0.001 388 449 19 0.000 000 000 45 -neutron-proton mass ratio 1.001 378 419 17 0.000 000 000 45 -neutron-tau mass ratio 0.528 790 0.000 048 -neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 -Newtonian constant of gravitation 6.673 84 e-11 0.000 80 e-11 m^3 kg^-1 s^-2 -Newtonian constant of gravitation over h-bar c 6.708 37 e-39 0.000 80 e-39 (GeV/c^2)^-2 -nuclear magneton 5.050 783 53 e-27 0.000 000 11 e-27 J T^-1 -nuclear magneton in eV/T 3.152 451 2605 e-8 0.000 000 0022 e-8 eV T^-1 -nuclear magneton in inverse meters per tesla 2.542 623 527 e-2 0.000 000 056 e-2 m^-1 T^-1 -nuclear magneton in K/T 3.658 2682 e-4 0.000 0033 e-4 K T^-1 -nuclear magneton in MHz/T 7.622 593 57 0.000 000 17 MHz T^-1 -Planck constant 6.626 069 57 e-34 0.000 000 29 e-34 J s -Planck constant in eV s 4.135 667 516 e-15 0.000 000 091 e-15 eV s -Planck constant over 2 pi 1.054 571 726 e-34 0.000 000 047 e-34 J s -Planck constant over 2 pi in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s -Planck constant over 2 pi times c in MeV fm 197.326 9718 0.000 0044 MeV fm -Planck length 1.616 199 e-35 0.000 097 e-35 m -Planck mass 2.176 51 e-8 0.000 13 e-8 kg -Planck mass energy equivalent in GeV 1.220 932 e19 0.000 073 e19 GeV -Planck temperature 1.416 833 e32 0.000 085 e32 K -Planck time 5.391 06 e-44 0.000 32 e-44 s -proton charge to mass quotient 9.578 833 58 e7 0.000 000 21 e7 C kg^-1 -proton Compton wavelength 1.321 409 856 23 e-15 0.000 000 000 94 e-15 m -proton Compton wavelength over 2 pi 0.210 308 910 47 e-15 0.000 000 000 15 e-15 m -proton-electron mass ratio 1836.152 672 45 0.000 000 75 -proton g factor 5.585 694 713 0.000 000 046 -proton gyromag. ratio 2.675 222 005 e8 0.000 000 063 e8 s^-1 T^-1 -proton gyromag. ratio over 2 pi 42.577 4806 0.000 0010 MHz T^-1 -proton mag. mom. 1.410 606 743 e-26 0.000 000 033 e-26 J T^-1 -proton mag. mom. to Bohr magneton ratio 1.521 032 210 e-3 0.000 000 012 e-3 -proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 -proton mag. shielding correction 25.694 e-6 0.014 e-6 -proton mass 1.672 621 777 e-27 0.000 000 074 e-27 kg -proton mass energy equivalent 1.503 277 484 e-10 0.000 000 066 e-10 J -proton mass energy equivalent in MeV 938.272 046 0.000 021 MeV -proton mass in u 1.007 276 466 812 0.000 000 000 090 u -proton molar mass 1.007 276 466 812 e-3 0.000 000 000 090 e-3 kg mol^-1 -proton-muon mass ratio 8.880 243 31 0.000 000 22 -proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 -proton-neutron mass ratio 0.998 623 478 26 0.000 000 000 45 -proton rms charge radius 0.8775 e-15 0.0051 e-15 m -proton-tau mass ratio 0.528 063 0.000 048 -quantum of circulation 3.636 947 5520 e-4 0.000 000 0024 e-4 m^2 s^-1 -quantum of circulation times 2 7.273 895 1040 e-4 0.000 000 0047 e-4 m^2 s^-1 -Rydberg constant 10 973 731.568 539 0.000 055 m^-1 -Rydberg constant times c in Hz 3.289 841 960 364 e15 0.000 000 000 017 e15 Hz -Rydberg constant times hc in eV 13.605 692 53 0.000 000 30 eV -Rydberg constant times hc in J 2.179 872 171 e-18 0.000 000 096 e-18 J -Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7078 0.000 0023 -Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8708 0.000 0023 -second radiation constant 1.438 7770 e-2 0.000 0013 e-2 m K -shielded helion gyromag. ratio 2.037 894 659 e8 0.000 000 051 e8 s^-1 T^-1 -shielded helion gyromag. ratio over 2 pi 32.434 100 84 0.000 000 81 MHz T^-1 -shielded helion mag. mom. -1.074 553 044 e-26 0.000 000 027 e-26 J T^-1 -shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 -shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 -shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 -shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 -shielded proton gyromag. ratio 2.675 153 268 e8 0.000 000 066 e8 s^-1 T^-1 -shielded proton gyromag. ratio over 2 pi 42.576 3866 0.000 0010 MHz T^-1 -shielded proton mag. mom. 1.410 570 499 e-26 0.000 000 035 e-26 J T^-1 -shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 -shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 -speed of light in vacuum 299 792 458 (exact) m s^-1 -standard acceleration of gravity 9.806 65 (exact) m s^-2 -standard atmosphere 101 325 (exact) Pa -standard-state pressure 100 000 (exact) Pa -Stefan-Boltzmann constant 5.670 373 e-8 0.000 021 e-8 W m^-2 K^-4 -tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m -tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m -tau-electron mass ratio 3477.15 0.31 -tau mass 3.167 47 e-27 0.000 29 e-27 kg -tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J -tau mass energy equivalent in MeV 1776.82 0.16 MeV -tau mass in u 1.907 49 0.000 17 u -tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 -tau-muon mass ratio 16.8167 0.0015 -tau-neutron mass ratio 1.891 11 0.000 17 -tau-proton mass ratio 1.893 72 0.000 17 -Thomson cross section 0.665 245 8734 e-28 0.000 000 0013 e-28 m^2 -triton-electron mass ratio 5496.921 5267 0.000 0050 -triton g factor 5.957 924 896 0.000 000 076 -triton mag. mom. 1.504 609 447 e-26 0.000 000 038 e-26 J T^-1 -triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 -triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 -triton mass 5.007 356 30 e-27 0.000 000 22 e-27 kg -triton mass energy equivalent 4.500 387 41 e-10 0.000 000 20 e-10 J -triton mass energy equivalent in MeV 2808.921 005 0.000 062 MeV -triton mass in u 3.015 500 7134 0.000 000 0025 u -triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 -triton-proton mass ratio 2.993 717 0308 0.000 000 0025 -unified atomic mass unit 1.660 538 921 e-27 0.000 000 073 e-27 kg -von Klitzing constant 25 812.807 4434 0.000 0084 ohm -weak mixing angle 0.2223 0.0021 -Wien frequency displacement law constant 5.878 9254 e10 0.000 0053 e10 Hz K^-1 -Wien wavelength displacement law constant 2.897 7721 e-3 0.000 0026 e-3 m K""" - -# ----------------------------------------------------------------------------- - -physical_constants = {} - -def parse_constants(d): - constants = {} - for line in d.split('\n'): - name = line[:55].rstrip() - val = line[55:77].replace(' ','').replace('...','') - val = float(val) - uncert = line[77:99].replace(' ','').replace('(exact)', '0') - uncert = float(uncert) - units = line[99:].rstrip() - constants[name] = (val, units, uncert) - return constants - -_physical_constants_2002 = parse_constants(txt2002) -_physical_constants_2006 = parse_constants(txt2006) -_physical_constants_2010 = parse_constants(txt2010) - -physical_constants.update(_physical_constants_2002) -physical_constants.update(_physical_constants_2006) -physical_constants.update(_physical_constants_2010) -_current_constants = _physical_constants_2010 -_current_codata = "CODATA 2010" - -# check obsolete values -_obsolete_constants = {} -for k in physical_constants.iterkeys(): - if k not in _current_constants: - _obsolete_constants[k] = True - -# generate some additional aliases -_aliases = {} -for k in _physical_constants_2002.iterkeys(): - if 'magn.' in k: - _aliases[k] = k.replace('magn.', 'mag.') -for k in _physical_constants_2006.iterkeys(): - if 'momentum' in k: - _aliases[k] = k.replace('momentum', 'mom.um') - -class ConstantWarning(DeprecationWarning): - """Accessing a constant no longer in current CODATA data set""" - pass - -def _check_obsolete(key): - if key in _obsolete_constants and key not in _aliases: - warnings.warn("Constant '%s' is not in current %s data set" % ( - key, _current_codata), ConstantWarning) - -def value(key) : - """ - Value in physical_constants indexed by key - - Parameters - ---------- - key : Python string or unicode - Key in dictionary `physical_constants` - - Returns - ------- - value : float - Value in `physical_constants` corresponding to `key` - - See Also - -------- - codata : Contains the description of `physical_constants`, which, as a - dictionary literal object, does not itself possess a docstring. - - Examples - -------- - >>> from scipy.constants import codata - >>> codata.value('elementary charge') - 1.602176487e-019 - - """ - _check_obsolete(key) - return physical_constants[key][0] - -def unit(key) : - """ - Unit in physical_constants indexed by key - - Parameters - ---------- - key : Python string or unicode - Key in dictionary `physical_constants` - - Returns - ------- - unit : Python string - Unit in `physical_constants` corresponding to `key` - - See Also - -------- - codata : Contains the description of `physical_constants`, which, as a - dictionary literal object, does not itself possess a docstring. - - Examples - -------- - >>> from scipy.constants import codata - >>> codata.unit(u'proton mass') - 'kg' - - """ - _check_obsolete(key) - return physical_constants[key][1] - -def precision(key) : - """ - Relative precision in physical_constants indexed by key - - Parameters - ---------- - key : Python string or unicode - Key in dictionary `physical_constants` - - Returns - ------- - prec : float - Relative precision in `physical_constants` corresponding to `key` - - See Also - -------- - codata : Contains the description of `physical_constants`, which, as a - dictionary literal object, does not itself possess a docstring. - - Examples - -------- - >>> from scipy.constants import codata - >>> codata.precision(u'proton mass') - 4.96226989798e-08 - - """ - _check_obsolete(key) - return physical_constants[key][2] / physical_constants[key][0] - -def find(sub=None, disp=False): - """ - Return list of codata.physical_constant keys containing a given string. - - Parameters - ---------- - sub : str, unicode - Sub-string to search keys for. By default, return all keys. - disp : bool - If True, print the keys that are found, and return None. - Otherwise, return the list of keys without printing anything. - - Returns - ------- - keys : list or None - If `disp` is False, the list of keys is returned. - Otherwise, None is returned. - - See Also - -------- - codata : Contains the description of `physical_constants`, which, as a - dictionary literal object, does not itself possess a docstring. - - """ - if sub is None: - result = _current_constants.keys() - else: - result = [key for key in _current_constants \ - if sub.lower() in key.lower()] - - result.sort() - if disp: - for key in result: - print key - return - else: - return result - -# Table is lacking some digits for exact values: calculate from definition -c = value('speed of light in vacuum') -mu0 = 4e-7*pi -epsilon0 = 1/(mu0*c*c) - -exact_values = { -'mag. constant': (mu0, 'N A^-2', 0.0), -'electric constant': (epsilon0, 'F m^-1', 0.0), -'characteristic impedance of vacuum': (sqrt(mu0/epsilon0), 'ohm', 0.0), -'atomic unit of permittivity': (4*epsilon0*pi, 'F m^-1', 0.0), #is that the definition? -'joule-kilogram relationship': (1/(c*c), 'kg', 0.0), -'kilogram-joule relationship': (c*c, 'J', 0.0), -'hertz-inverse meter relationship': (1/c, 'm^-1', 0.0) -} - -# sanity check -for key in exact_values: - val = _current_constants[key][0] - if abs(exact_values[key][0] - val) / val > 1e-9: - raise ValueError("Constants.codata: exact values too far off.") - -physical_constants.update(exact_values) - -# finally, insert aliases for values -for k, v in list(_aliases.items()): - if v in _current_constants: - physical_constants[k] = physical_constants[v] - else: - del _aliases[k] diff --git a/scipy-0.10.1/scipy/constants/constants.py b/scipy-0.10.1/scipy/constants/constants.py deleted file mode 100644 index e1cb7acab8..0000000000 --- a/scipy-0.10.1/scipy/constants/constants.py +++ /dev/null @@ -1,398 +0,0 @@ -""" -Collection of physical constants and conversion factors. - -Most constants are in SI units, so you can do -print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots' - -The list is not meant to be comprehensive, but just a convenient list for everyday use. -""" - -""" -BasSw 2006 -physical constants: imported from CODATA -unit conversion: see e.g. NIST special publication 811 -Use at own risk: double-check values before calculating your Mars orbit-insertion burn. -Some constants exist in a few variants, which are marked with suffixes. -The ones without any suffix should be the most common one. -""" - -import math as _math -from codata import value as _cd -import numpy as _np - -#mathematical constants -pi = _math.pi -golden = golden_ratio = (1 + _math.sqrt(5)) / 2 - -#SI prefixes -yotta = 1e24 -zetta = 1e21 -exa = 1e18 -peta = 1e15 -tera = 1e12 -giga = 1e9 -mega = 1e6 -kilo = 1e3 -hecto = 1e2 -deka = 1e1 -deci = 1e-1 -centi = 1e-2 -milli = 1e-3 -micro = 1e-6 -nano = 1e-9 -pico = 1e-12 -femto = 1e-15 -atto = 1e-18 -zepto = 1e-21 - -#binary prefixes -kibi = 2**10 -mebi = 2**20 -gibi = 2**30 -tebi = 2**40 -pebi = 2**50 -exbi = 2**60 -zebi = 2**70 -yobi = 2**80 - -#physical constants -c = speed_of_light = _cd('speed of light in vacuum') -mu_0 = 4e-7*pi -epsilon_0 = 1 / (mu_0*c*c) -h = Planck = _cd('Planck constant') -hbar = h / (2 * pi) -G = gravitational_constant = _cd('Newtonian constant of gravitation') -g = _cd('standard acceleration of gravity') -e = elementary_charge = _cd('elementary charge') -R = gas_constant = _cd('molar gas constant') -alpha = fine_structure = _cd('fine-structure constant') -N_A = Avogadro = _cd('Avogadro constant') -k = Boltzmann = _cd('Boltzmann constant') -sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant') -Wien = _cd('Wien wavelength displacement law constant') -Rydberg = _cd('Rydberg constant') - -#weight in kg -gram = 1e-3 -metric_ton = 1e3 -grain = 64.79891e-6 -lb = pound = 7000 * grain #avoirdupois -oz = ounce = pound / 16 -stone = 14 * pound -long_ton = 2240 * pound -short_ton = 2000 * pound - -troy_ounce = 480 * grain #only for metals / gems -troy_pound = 12 * troy_ounce -carat = 200e-6 - -m_e = electron_mass = _cd('electron mass') -m_p = proton_mass = _cd('proton mass') -m_n = neutron_mass = _cd('neutron mass') -m_u = u = atomic_mass = _cd('atomic mass constant') - -#angle in rad -degree = pi / 180 -arcmin = arcminute = degree / 60 -arcsec = arcsecond = arcmin / 60 - -#time in second -minute = 60.0 -hour = 60 * minute -day = 24 * hour -week = 7 * day -year = 365 * day -Julian_year = 365.25 * day - -#length in meter -inch = 0.0254 -foot = 12 * inch -yard = 3 * foot -mile = 1760 * yard -mil = inch / 1000 -pt = point = inch / 72 #typography -survey_foot = 1200.0 / 3937 -survey_mile = 5280 * survey_foot -nautical_mile = 1852.0 -fermi = 1e-15 -angstrom = 1e-10 -micron = 1e-6 -au = astronomical_unit = 149597870691.0 -light_year = Julian_year * c -parsec = au / arcsec - -#pressure in pascal -atm = atmosphere = _cd('standard atmosphere') -bar = 1e5 -torr = mmHg = atm / 760 -psi = pound * g / (inch * inch) - -#area in meter**2 -hectare = 1e4 -acre = 43560 * foot**2 - -#volume in meter**3 -litre = liter = 1e-3 -gallon = gallon_US = 231 * inch**3 #US -#pint = gallon_US / 8 -fluid_ounce = fluid_ounce_US = gallon_US / 128 -bbl = barrel = 42 * gallon_US #for oil - -gallon_imp = 4.54609e-3 #uk -fluid_ounce_imp = gallon_imp / 160 - -#speed in meter per second -kmh = 1e3 / hour -mph = mile / hour -mach = speed_of_sound = 340.5 #approx value at 15 degrees in 1 atm. is this a common value? -knot = nautical_mile / hour - -#temperature in kelvin -zero_Celsius = 273.15 -degree_Fahrenheit = 1/1.8 #only for differences - -#energy in joule -eV = electron_volt = elementary_charge # * 1 Volt -calorie = calorie_th = 4.184 -calorie_IT = 4.1868 -erg = 1e-7 -Btu_th = pound * degree_Fahrenheit * calorie_th / gram -Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram -ton_TNT = 1e9 * calorie_th -#Wh = watt_hour - -#power in watt -hp = horsepower = 550 * foot * pound * g - -#force in newton -dyn = dyne = 1e-5 -lbf = pound_force = pound * g -kgf = kilogram_force = g # * 1 kg - -#functions for conversions that are not linear - -def C2K(C): - """ - Convert Celsius to Kelvin - - Parameters - ---------- - C : array_like - Celsius temperature(s) to be converted. - - Returns - ------- - K : float or array of floats - Equivalent Kelvin temperature(s). - - Notes - ----- - Computes ``K = C + zero_Celsius`` where `zero_Celsius` = 273.15, i.e., - (the absolute value of) temperature "absolute zero" as measured in Celsius. - - Examples - -------- - >>> from scipy.constants.constants import C2K - >>> C2K(_np.array([-40, 40.0])) - array([ 233.15, 313.15]) - - """ - return _np.asanyarray(C) + zero_Celsius - -def K2C(K): - """ - Convert Kelvin to Celsius - - Parameters - ---------- - K : array_like - Kelvin temperature(s) to be converted. - - Returns - ------- - C : float or array of floats - Equivalent Celsius temperature(s). - - Notes - ----- - Computes ``C = K - zero_Celsius`` where `zero_Celsius` = 273.15, i.e., - (the absolute value of) temperature "absolute zero" as measured in Celsius. - - Examples - -------- - >>> from scipy.constants.constants import K2C - >>> K2C(_np.array([233.15, 313.15])) - array([-40., 40.]) - - """ - return _np.asanyarray(K) - zero_Celsius - -def F2C(F): - """ - Convert Fahrenheit to Celsius - - Parameters - ---------- - F : array_like - Fahrenheit temperature(s) to be converted. - - Returns - ------- - C : float or array of floats - Equivalent Celsius temperature(s). - - Notes - ----- - Computes ``C = (F - 32) / 1.8``. - - Examples - -------- - >>> from scipy.constants.constants import F2C - >>> F2C(_np.array([-40, 40.0])) - array([-40. , 4.44444444]) - - """ - return (_np.asanyarray(F) - 32) / 1.8 - -def C2F(C): - """ - Convert Celsius to Fahrenheit - - Parameters - ---------- - C : array_like - Celsius temperature(s) to be converted. - - Returns - ------- - F : float or array of floats - Equivalent Fahrenheit temperature(s). - - Notes - ----- - Computes ``F = 1.8 * C + 32``. - - Examples - -------- - >>> from scipy.constants.constants import C2F - >>> C2F(_np.array([-40, 40.0])) - array([ -40., 104.]) - - """ - return 1.8 * _np.asanyarray(C) + 32 - -def F2K(F): - """ - Convert Fahrenheit to Kelvin - - Parameters - ---------- - F : array_like - Fahrenheit temperature(s) to be converted. - - Returns - ------- - K : float or array of floats - Equivalent Kelvin temperature(s). - - Notes - ----- - Computes ``K = (F - 32)/1.8 + zero_Celsius`` where `zero_Celsius` = - 273.15, i.e., (the absolute value of) temperature "absolute zero" as - measured in Celsius. - - Examples - -------- - >>> from scipy.constants.constants import F2K - >>> F2K(_np.array([-40, 104])) - array([ 233.15, 313.15]) - - """ - return C2K(F2C(_np.asanyarray(F))) - -def K2F(K): - """ - Convert Kelvin to Fahrenheit - - Parameters - ---------- - K : array_like - Kelvin temperature(s) to be converted. - - Returns - ------- - F : float or array of floats - Equivalent Fahrenheit temperature(s). - - Notes - ----- - Computes ``F = 1.8 * (K - zero_Celsius) + 32`` where `zero_Celsius` = - 273.15, i.e., (the absolute value of) temperature "absolute zero" as - measured in Celsius. - - Examples - -------- - >>> from scipy.constants.constants import K2F - >>> K2F(_np.array([233.15, 313.15])) - array([ -40., 104.]) - - """ - return C2F(K2C(_np.asanyarray(K))) - -#optics - -def lambda2nu(lambda_): - """ - Convert wavelength to optical frequency - - Parameters - ---------- - lambda : array_like - Wavelength(s) to be converted. - - Returns - ------- - nu : float or array of floats - Equivalent optical frequency. - - Notes - ----- - Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the - (vacuum) speed of light in meters/second. - - Examples - -------- - >>> from scipy.constants.constants import lambda2nu - >>> lambda2nu(_np.array((1, speed_of_light))) - array([ 2.99792458e+08, 1.00000000e+00]) - - """ - return _np.asanyarray(c) / lambda_ - -def nu2lambda(nu): - """ - Convert optical frequency to wavelength. - - Parameters - ---------- - nu : array_like - Optical frequency to be converted. - - Returns - ------- - lambda : float or array of floats - Equivalent wavelength(s). - - Notes - ----- - Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the - (vacuum) speed of light in meters/second. - - Examples - -------- - >>> from scipy.constants.constants import nu2lambda - >>> nu2lambda(_np.array((1, speed_of_light))) - array([ 2.99792458e+08, 1.00000000e+00]) - - """ - return c / _np.asanyarray(nu) diff --git a/scipy-0.10.1/scipy/constants/setup.py b/scipy-0.10.1/scipy/constants/setup.py deleted file mode 100644 index 5c1e2d8068..0000000000 --- a/scipy-0.10.1/scipy/constants/setup.py +++ /dev/null @@ -1,12 +0,0 @@ - - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('constants', parent_package, top_path) - config.add_data_dir('tests') - return config - - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/constants/tests/test_codata.py b/scipy-0.10.1/scipy/constants/tests/test_codata.py deleted file mode 100644 index 4c4f100fff..0000000000 --- a/scipy-0.10.1/scipy/constants/tests/test_codata.py +++ /dev/null @@ -1,57 +0,0 @@ -import warnings - -from scipy.constants import constants, codata, find, value -from numpy.testing import assert_equal, assert_, run_module_suite, \ - assert_almost_equal - -def test_find(): - - warnings.simplefilter('ignore', DeprecationWarning) - - keys = find('weak mixing', disp=False) - assert_equal(keys, ['weak mixing angle']) - - keys = find('qwertyuiop', disp=False) - assert_equal(keys, []) - - keys = find('natural unit', disp=False) - assert_equal(keys, sorted(['natural unit of velocity', - 'natural unit of action', - 'natural unit of action in eV s', - 'natural unit of mass', - 'natural unit of energy', - 'natural unit of energy in MeV', - 'natural unit of mom.um', - 'natural unit of mom.um in MeV/c', - 'natural unit of length', - 'natural unit of time'])) - - -def test_basic_table_parse(): - c = 'speed of light in vacuum' - assert_equal(codata.value(c), constants.c) - assert_equal(codata.value(c), constants.speed_of_light) - -def test_basic_lookup(): - assert_equal('%d %s' % (codata.c, codata.unit('speed of light in vacuum')), - '299792458 m s^-1') - -def test_find_all(): - assert_(len(codata.find(disp=False)) > 300) - -def test_find_single(): - assert_equal(codata.find('Wien freq', disp=False)[0], - 'Wien frequency displacement law constant') - -def test_2002_vs_2006(): - assert_almost_equal(codata.value('magn. flux quantum'), - codata.value('mag. flux quantum')) - -def test_exact_values(): - """Check that updating stored values with exact ones worked.""" - for key in codata.exact_values: - assert_((codata.exact_values[key][0] - value(key)) / value(key) == 0) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/constants/tests/test_constants.py b/scipy-0.10.1/scipy/constants/tests/test_constants.py deleted file mode 100644 index bd6a102271..0000000000 --- a/scipy-0.10.1/scipy/constants/tests/test_constants.py +++ /dev/null @@ -1,30 +0,0 @@ -from numpy.testing import run_module_suite, assert_equal -import scipy.constants as sc - -def test_fahrenheit_to_celcius(): - assert_equal(sc.F2C(32), 0) - assert_equal(sc.F2C([32, 32]), [0, 0]) - -def test_celcius_to_kelvin(): - assert_equal(sc.C2K([0, 0]), [273.15, 273.15]) - -def test_kelvin_to_celcius(): - assert_equal(sc.K2C([0, 0]), [-273.15, -273.15]) - -def test_fahrenheit_to_kelvin(): - assert_equal(sc.F2K([32, 32]), [273.15, 273.15]) - -def test_kelvin_to_fahrenheit(): - assert_equal(sc.K2F([273.15, 273.15]), [32, 32]) - -def test_celcius_to_fahrenheit(): - assert_equal(sc.C2F([0, 0]), [32, 32]) - -def test_lambda_to_nu(): - assert_equal(sc.lambda2nu(sc.speed_of_light), 1) - -def test_nu_to_lambda(): - assert_equal(sc.nu2lambda(1), sc.speed_of_light) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/fftpack/SConscript b/scipy-0.10.1/scipy/fftpack/SConscript deleted file mode 100644 index ddbda2e668..0000000000 --- a/scipy-0.10.1/scipy/fftpack/SConscript +++ /dev/null @@ -1,40 +0,0 @@ -# Last Change: Sat Jan 24 04:00 PM 2009 J -# vim:syntax=python -from os.path import join as pjoin - -from numscons import GetNumpyEnvironment -from numscons import CheckF77Mangling - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('f2py') - -config = env.NumpyConfigure(custom_tests = {'CheckF77Mangling' : CheckF77Mangling}) -config.CheckF77Mangling() -config.Finish() - -# Build dfftpack -src = [pjoin("src/dfftpack", i) for i in [ "dcosqb.f", "dcosqf.f", "dcosqi.f", -"dcost.f", "dcosti.f", "dfftb.f", "dfftb1.f", "dfftf.f", "dfftf1.f", "dffti.f", -"dffti1.f", "dsinqb.f", "dsinqf.f", "dsinqi.f", "dsint.f", "dsint1.f", -"dsinti.f", "zfftb.f", "zfftb1.f", "zfftf.f", "zfftf1.f", "zffti.f", -"zffti1.f"]] -dfftpack = env.DistutilsStaticExtLibrary('dfftpack', source = [str(s) for s in src]) - -# Build fftpack (single prec) -src = [pjoin("src/fftpack", i) for i in [ 'cfftb.f', 'cfftb1.f', 'cfftf.f', -'cfftf1.f', 'cffti.f', 'cffti1.f', 'cosqb.f', 'cosqf.f', 'cosqi.f', 'cost.f', -'costi.f', 'rfftb.f', 'rfftb1.f', 'rfftf.f', 'rfftf1.f', 'rffti.f', -'rffti1.f', 'sinqb.f', 'sinqf.f', 'sinqi.f', 'sint.f', 'sint1.f', 'sinti.f']] -fftpack = env.DistutilsStaticExtLibrary('fftpack', source = [str(s) for s in src]) - -env.PrependUnique(LIBS = ['fftpack', 'dfftpack']) -env.PrependUnique(LIBPATH = ['.']) - -# Build _fftpack -src = ['src/zfft.c','src/drfft.c','src/zrfft.c', 'src/zfftnd.c', 'fftpack.pyf'] -src += env.FromCTemplate('src/dct.c.src') -env.NumpyPythonExtension('_fftpack', src) - -# Build convolve -src = ['src/convolve.c', 'convolve.pyf'] -env.NumpyPythonExtension('convolve', src) diff --git a/scipy-0.10.1/scipy/fftpack/SConstruct b/scipy-0.10.1/scipy/fftpack/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/fftpack/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/fftpack/__init__.py b/scipy-0.10.1/scipy/fftpack/__init__.py deleted file mode 100644 index 2731bee848..0000000000 --- a/scipy-0.10.1/scipy/fftpack/__init__.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -================================================== -Discrete Fourier transforms (:mod:`scipy.fftpack`) -================================================== - -Fast Fourier Transforms (FFTs) -============================== - -.. autosummary:: - :toctree: generated/ - - fft - Fast (discrete) Fourier Transform (FFT) - ifft - Inverse FFT - fft2 - Two dimensional FFT - ifft2 - Two dimensional inverse FFT - fftn - n-dimensional FFT - ifftn - n-dimensional inverse FFT - rfft - FFT of strictly real-valued sequence - irfft - Inverse of rfft - dct - Discrete cosine transform - idct - Inverse discrete cosine transform - -Differential and pseudo-differential operators -============================================== - -.. autosummary:: - :toctree: generated/ - - diff - Differentiation and integration of periodic sequences - tilbert - Tilbert transform: cs_diff(x,h,h) - itilbert - Inverse Tilbert transform: sc_diff(x,h,h) - hilbert - Hilbert transform: cs_diff(x,inf,inf) - ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf) - cs_diff - cosh/sinh pseudo-derivative of periodic sequences - sc_diff - sinh/cosh pseudo-derivative of periodic sequences - ss_diff - sinh/sinh pseudo-derivative of periodic sequences - cc_diff - cosh/cosh pseudo-derivative of periodic sequences - shift - Shift periodic sequences - -Helper functions -================ - -.. autosummary:: - :toctree: generated/ - - fftshift - Shift the zero-frequency component to the center of the spectrum - ifftshift - The inverse of `fftshift` - fftfreq - Return the Discrete Fourier Transform sample frequencies - rfftfreq - DFT sample frequencies (for usage with rfft, irfft) - - -Convolutions (:mod:`scipy.fftpack.convolve`) -============================================ - -.. module:: scipy.fftpack.convolve - -.. autosummary:: - :toctree: generated/ - - convolve - convolve_z - init_convolution_kernel - destroy_convolve_cache - - -Other (:mod:`scipy.fftpack._fftpack`) -===================================== - -.. module:: scipy.fftpack._fftpack - -.. autosummary:: - :toctree: generated/ - - drfft - zfft - zrfft - zfftnd - destroy_drfft_cache - destroy_zfft_cache - destroy_zfftnd_cache - -""" - -__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', - 'fft2','ifft2', - 'diff', - 'tilbert','itilbert','hilbert','ihilbert', - 'sc_diff','cs_diff','cc_diff','ss_diff', - 'shift', - 'rfftfreq' - ] - -from fftpack_version import fftpack_version as __version__ - -from basic import * -from pseudo_diffs import * -from helper import * - -from numpy.dual import register_func -for k in ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2']: - register_func(k, eval(k)) -del k, register_func - -from realtransforms import * -__all__.extend(['dct', 'idct']) - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/scipy-0.10.1/scipy/fftpack/basic.py b/scipy-0.10.1/scipy/fftpack/basic.py deleted file mode 100644 index 6eaba3b8c7..0000000000 --- a/scipy-0.10.1/scipy/fftpack/basic.py +++ /dev/null @@ -1,546 +0,0 @@ -""" -Discrete Fourier Transforms - basic.py -""" -# Created by Pearu Peterson, August,September 2002 - -__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', - 'fft2','ifft2'] - -from numpy import zeros, swapaxes -import numpy -import _fftpack - -import atexit -atexit.register(_fftpack.destroy_zfft_cache) -atexit.register(_fftpack.destroy_zfftnd_cache) -atexit.register(_fftpack.destroy_drfft_cache) -atexit.register(_fftpack.destroy_cfft_cache) -atexit.register(_fftpack.destroy_cfftnd_cache) -atexit.register(_fftpack.destroy_rfft_cache) -del atexit - -def istype(arr, typeclass): - return issubclass(arr.dtype.type, typeclass) - -def _datacopied(arr, original): - """ - Strict check for `arr` not sharing any data with `original`, - under the assumption that arr = asarray(original) - - """ - if arr is original: - return False - if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'): - return False - return arr.base is None - -# XXX: single precision FFTs partially disabled due to accuracy issues -# for large prime-sized inputs. -# -# See http://permalink.gmane.org/gmane.comp.python.scientific.devel/13834 -# ("fftpack test failures for 0.8.0b1", Ralf Gommers, 17 Jun 2010, -# @ scipy-dev) -# -# These should be re-enabled once the problems are resolved - -def _is_safe_size(n): - """ - Is the size of FFT such that FFTPACK can handle it in single precision - with sufficient accuracy? - - Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those - """ - n = int(n) - for c in (2, 3, 5): - while n % c == 0: - n /= c - return (n <= 1) - -def _fake_crfft(x, n, *a, **kw): - if _is_safe_size(n): - return _fftpack.crfft(x, n, *a, **kw) - else: - return _fftpack.zrfft(x, n, *a, **kw).astype(numpy.complex64) - -def _fake_cfft(x, n, *a, **kw): - if _is_safe_size(n): - return _fftpack.cfft(x, n, *a, **kw) - else: - return _fftpack.zfft(x, n, *a, **kw).astype(numpy.complex64) - -def _fake_rfft(x, n, *a, **kw): - if _is_safe_size(n): - return _fftpack.rfft(x, n, *a, **kw) - else: - return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32) - -def _fake_cfftnd(x, shape, *a, **kw): - if numpy.all(map(_is_safe_size, shape)): - return _fftpack.cfftnd(x, shape, *a, **kw) - else: - return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64) - -_DTYPE_TO_FFT = { -# numpy.dtype(numpy.float32): _fftpack.crfft, - numpy.dtype(numpy.float32): _fake_crfft, - numpy.dtype(numpy.float64): _fftpack.zrfft, -# numpy.dtype(numpy.complex64): _fftpack.cfft, - numpy.dtype(numpy.complex64): _fake_cfft, - numpy.dtype(numpy.complex128): _fftpack.zfft, -} - -_DTYPE_TO_RFFT = { -# numpy.dtype(numpy.float32): _fftpack.rfft, - numpy.dtype(numpy.float32): _fake_rfft, - numpy.dtype(numpy.float64): _fftpack.drfft, -} - -_DTYPE_TO_FFTN = { -# numpy.dtype(numpy.complex64): _fftpack.cfftnd, - numpy.dtype(numpy.complex64): _fake_cfftnd, - numpy.dtype(numpy.complex128): _fftpack.zfftnd, -# numpy.dtype(numpy.float32): _fftpack.cfftnd, - numpy.dtype(numpy.float32): _fake_cfftnd, - numpy.dtype(numpy.float64): _fftpack.zfftnd, -} - -def _asfarray(x): - """Like numpy asfarray, except that it does not modify x dtype if x is - already an array with a float dtype, and do not cast complex types to - real.""" - if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]: - return x - else: - # We cannot use asfarray directly because it converts sequences of - # complex to sequence of real - ret = numpy.asarray(x) - if not ret.dtype.char in numpy.typecodes["AllFloat"]: - return numpy.asfarray(x) - return ret - -def _fix_shape(x, n, axis): - """ Internal auxiliary function for _raw_fft, _raw_fftnd.""" - s = list(x.shape) - if s[axis] > n: - index = [slice(None)]*len(s) - index[axis] = slice(0,n) - x = x[index] - return x, False - else: - index = [slice(None)]*len(s) - index[axis] = slice(0,s[axis]) - s[axis] = n - z = zeros(s,x.dtype.char) - z[index] = x - return z, True - - -def _raw_fft(x, n, axis, direction, overwrite_x, work_function): - """ Internal auxiliary function for fft, ifft, rfft, irfft.""" - if n is None: - n = x.shape[axis] - elif n != x.shape[axis]: - x, copy_made = _fix_shape(x,n,axis) - overwrite_x = overwrite_x or copy_made - if axis == -1 or axis == len(x.shape)-1: - r = work_function(x,n,direction,overwrite_x=overwrite_x) - else: - x = swapaxes(x, axis, -1) - r = work_function(x,n,direction,overwrite_x=overwrite_x) - r = swapaxes(r, axis, -1) - return r - - -def fft(x, n=None, axis=-1, overwrite_x=0): - """ - Return discrete Fourier transform of real or complex sequence. - - The returned complex array contains ``y(0), y(1),..., y(n-1)`` where - - ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``. - - Parameters - ---------- - x : array_like - Array to Fourier transform. - n : int, optional - Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is - truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The - default results in ``n = x.shape[axis]``. - axis : int, optional - Axis along which the fft's are computed; the default is over the - last axis (i.e., ``axis=-1``). - overwrite_x : bool, optional - If True the contents of `x` can be destroyed; the default is False. - - Returns - ------- - z : complex ndarray - with the elements: - [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even - [y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd - where - y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1 - Note that y(-j) = y(n-j).conjugate(). - - See Also - -------- - ifft : Inverse FFT - rfft : FFT of a real sequence - - Notes - ----- - The packing of the result is "standard": If A = fft(a, n), then A[0] - contains the zero-frequency term, A[1:n/2+1] contains the - positive-frequency terms, and A[n/2+1:] contains the negative-frequency - terms, in order of decreasingly negative frequency. So for an 8-point - transform, the frequencies of the result are [ 0, 1, 2, 3, 4, -3, -2, -1]. - - For n even, A[n/2] contains the sum of the positive and negative-frequency - terms. For n even and x real, A[n/2] will always be real. - - This is most efficient for n a power of two. - - Examples - -------- - >>> from scipy.fftpack import fft, ifft - >>> x = np.arange(5) - >>> np.allclose(fft(ifft(x)), x, atol=1e-15) #within numerical accuracy. - True - - """ - tmp = _asfarray(x) - - try: - work_function = _DTYPE_TO_FFT[tmp.dtype] - except KeyError: - raise ValueError("type %s is not supported" % tmp.dtype) - - if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): - overwrite_x = 1 - - overwrite_x = overwrite_x or _datacopied(tmp, x) - - if n is None: - n = tmp.shape[axis] - elif n != tmp.shape[axis]: - tmp, copy_made = _fix_shape(tmp,n,axis) - overwrite_x = overwrite_x or copy_made - - if axis == -1 or axis == len(tmp.shape) - 1: - return work_function(tmp,n,1,0,overwrite_x) - - tmp = swapaxes(tmp, axis, -1) - tmp = work_function(tmp,n,1,0,overwrite_x) - return swapaxes(tmp, axis, -1) - -def ifft(x, n=None, axis=-1, overwrite_x=0): - """ - Return discrete inverse Fourier transform of real or complex sequence. - - The returned complex array contains ``y(0), y(1),..., y(n-1)`` where - - ``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``. - - Parameters - ---------- - x : array_like - Transformed data to invert. - n : int, optional - Length of the inverse Fourier transform. If ``n < x.shape[axis]``, - `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. - The default results in ``n = x.shape[axis]``. - axis : int, optional - Axis along which the ifft's are computed; the default is over the - last axis (i.e., ``axis=-1``). - overwrite_x : bool, optional - If True the contents of `x` can be destroyed; the default is False. - - """ - tmp = _asfarray(x) - - try: - work_function = _DTYPE_TO_FFT[tmp.dtype] - except KeyError: - raise ValueError("type %s is not supported" % tmp.dtype) - - if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): - overwrite_x = 1 - - overwrite_x = overwrite_x or _datacopied(tmp, x) - - if n is None: - n = tmp.shape[axis] - elif n != tmp.shape[axis]: - tmp, copy_made = _fix_shape(tmp,n,axis) - overwrite_x = overwrite_x or copy_made - - if axis == -1 or axis == len(tmp.shape) - 1: - return work_function(tmp,n,-1,1,overwrite_x) - - tmp = swapaxes(tmp, axis, -1) - tmp = work_function(tmp,n,-1,1,overwrite_x) - return swapaxes(tmp, axis, -1) - - -def rfft(x, n=None, axis=-1, overwrite_x=0): - """ - Discrete Fourier transform of a real sequence. - - The returned real arrays contains:: - - [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even - [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd - - where - :: - - y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n) - j = 0..n-1 - - Note that ``y(-j) == y(n-j).conjugate()``. - - Parameters - ---------- - x : array_like, real-valued - The data to tranform. - n : int, optional - Defines the length of the Fourier transform. If `n` is not specified - (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``, - `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded. - axis : int, optional - The axis along which the transform is applied. The default is the - last axis. - overwrite_x : bool, optional - If set to true, the contents of `x` can be overwritten. Default is - False. - - See also - -------- - fft, irfft, scipy.fftpack.basic - - Notes - ----- - Within numerical accuracy, ``y == rfft(irfft(y))``. - - """ - tmp = _asfarray(x) - - if not numpy.isrealobj(tmp): - raise TypeError("1st argument must be real sequence") - - try: - work_function = _DTYPE_TO_RFFT[tmp.dtype] - except KeyError: - raise ValueError("type %s is not supported" % tmp.dtype) - - overwrite_x = overwrite_x or _datacopied(tmp, x) - - return _raw_fft(tmp,n,axis,1,overwrite_x,work_function) - - -def irfft(x, n=None, axis=-1, overwrite_x=0): - """ irfft(x, n=None, axis=-1, overwrite_x=0) -> y - - Return inverse discrete Fourier transform of real sequence x. - The contents of x is interpreted as the output of rfft(..) - function. - - The returned real array contains - [y(0),y(1),...,y(n-1)] - where for n is even - y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k]) - * exp(sqrt(-1)*j*k* 2*pi/n) - + c.c. + x[0] + (-1)**(j) x[n-1]) - and for n is odd - y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k]) - * exp(sqrt(-1)*j*k* 2*pi/n) - + c.c. + x[0]) - c.c. denotes complex conjugate of preceeding expression. - - Optional input: see rfft.__doc__ - """ - tmp = _asfarray(x) - if not numpy.isrealobj(tmp): - raise TypeError("1st argument must be real sequence") - - try: - work_function = _DTYPE_TO_RFFT[tmp.dtype] - except KeyError: - raise ValueError("type %s is not supported" % tmp.dtype) - - overwrite_x = overwrite_x or _datacopied(tmp, x) - - return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function) - -def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function): - """ Internal auxiliary function for fftnd, ifftnd.""" - if s is None: - if axes is None: - s = x.shape - else: - s = numpy.take(x.shape, axes) - - s = tuple(s) - if axes is None: - noaxes = True - axes = range(-x.ndim, 0) - else: - noaxes = False - if len(axes) != len(s): - raise ValueError("when given, axes and shape arguments "\ - "have to be of the same length") - - # No need to swap axes, array is in C order - if noaxes: - for i in axes: - x, copy_made = _fix_shape(x, s[i], i) - overwrite_x = overwrite_x or copy_made - return work_function(x,s,direction,overwrite_x=overwrite_x) - - # We ordered axes, because the code below to push axes at the end of the - # array assumes axes argument is in ascending order. - id = numpy.argsort(axes) - axes = [axes[i] for i in id] - s = [s[i] for i in id] - - # Swap the request axes, last first (i.e. First swap the axis which ends up - # at -1, then at -2, etc...), such as the request axes on which the - # operation is carried become the last ones - for i in range(1, len(axes)+1): - x = numpy.swapaxes(x, axes[-i], -i) - - # We can now operate on the axes waxes, the p last axes (p = len(axes)), by - # fixing the shape of the input array to 1 for any axis the fft is not - # carried upon. - waxes = range(x.ndim - len(axes), x.ndim) - shape = numpy.ones(x.ndim) - shape[waxes] = s - - for i in range(len(waxes)): - x, copy_made = _fix_shape(x, s[i], waxes[i]) - overwrite_x = overwrite_x or copy_made - - r = work_function(x, shape, direction, overwrite_x=overwrite_x) - - # reswap in the reverse order (first axis first, etc...) to get original - # order - for i in range(len(axes), 0, -1): - r = numpy.swapaxes(r, -i, axes[-i]) - - return r - - -def fftn(x, shape=None, axes=None, overwrite_x=0): - """ - Return multi-dimensional discrete Fourier transform of x. - - The returned array contains:: - - y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] - x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i) - - where d = len(x.shape) and n = x.shape. - Note that ``y[..., -j_i, ...] = y[..., n_i-j_i, ...].conjugate()``. - - Parameters - ---------- - x : array_like - The (n-dimensional) array to transform. - shape : tuple of ints, optional - The shape of the result. If both `shape` and `axes` (see below) are - None, `shape` is ``x.shape``; if `shape` is None but `axes` is - not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. - If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. - If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to - length ``shape[i]``. - axes : array_like of ints, optional - The axes of `x` (`y` if `shape` is not None) along which the - transform is applied. - overwrite_x : bool, optional - If True, the contents of `x` can be destroyed. Default is False. - - Returns - ------- - y : complex-valued n-dimensional numpy array - The (n-dimensional) DFT of the input array. - - See Also - -------- - ifftn - - Examples - -------- - >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) - >>> np.allclose(y, fftn(ifftn(y))) - True - - """ - return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1) - -def _raw_fftn_dispatch(x, shape, axes, overwrite_x, direction): - tmp = _asfarray(x) - - try: - work_function = _DTYPE_TO_FFTN[tmp.dtype] - except KeyError: - raise ValueError("type %s is not supported" % tmp.dtype) - - if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): - overwrite_x = 1 - - overwrite_x = overwrite_x or _datacopied(tmp, x) - return _raw_fftnd(tmp,shape,axes,direction,overwrite_x,work_function) - - -def ifftn(x, shape=None, axes=None, overwrite_x=0): - """ - Return inverse multi-dimensional discrete Fourier transform of - arbitrary type sequence x. - - The returned array contains:: - - y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] - x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i) - - where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``. - - For description of parameters see `fftn`. - - See Also - -------- - fftn : for detailed information. - - """ - return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1) - -def fft2(x, shape=None, axes=(-2,-1), overwrite_x=0): - """ - 2-D discrete Fourier transform. - - Return the two-dimensional discrete Fourier transform of the 2-D argument - `x`. - - See Also - -------- - fftn : for detailed information. - - """ - return fftn(x,shape,axes,overwrite_x) - - -def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=0): - """ - 2-D discrete inverse Fourier transform of real or complex sequence. - - Return inverse two-dimensional discrete Fourier transform of - arbitrary type sequence x. - - See `ifft` for more information. - - See also - -------- - fft2, ifft - - """ - return ifftn(x,shape,axes,overwrite_x) diff --git a/scipy-0.10.1/scipy/fftpack/benchmarks/bench_basic.py b/scipy-0.10.1/scipy/fftpack/benchmarks/bench_basic.py deleted file mode 100644 index 16d6d9716b..0000000000 --- a/scipy-0.10.1/scipy/fftpack/benchmarks/bench_basic.py +++ /dev/null @@ -1,218 +0,0 @@ -""" Test functions for fftpack.basic module -""" -import sys -from numpy.testing import * -from scipy.fftpack import ifft, fft, fftn, irfft, rfft - -from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble -import numpy.fft - -from numpy.random import rand -def random(size): - return rand(*size) - -def direct_dft(x): - x = asarray(x) - n = len(x) - y = zeros(n,dtype=cdouble) - w = -arange(n)*(2j*pi/n) - for i in range(n): - y[i] = dot(exp(i*w),x) - return y - -def direct_idft(x): - x = asarray(x) - n = len(x) - y = zeros(n,dtype=cdouble) - w = arange(n)*(2j*pi/n) - for i in range(n): - y[i] = dot(exp(i*w),x)/n - return y - - -class TestFft(TestCase): - - def bench_random(self): - from numpy.fft import fft as numpy_fft - print - print ' Fast Fourier Transform' - print '=================================================' - print ' | real input | complex input ' - print '-------------------------------------------------' - print ' size | scipy | numpy | scipy | numpy ' - print '-------------------------------------------------' - for size,repeat in [(100,7000),(1000,2000), - (256,10000), - (512,10000), - (1024,1000), - (2048,1000), - (2048*2,500), - (2048*4,500), - ]: - print '%5s' % size, - sys.stdout.flush() - - for x in [random([size]).astype(double), - random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j - ]: - if size > 500: y = fft(x) - else: y = direct_dft(x) - assert_array_almost_equal(fft(x),y) - print '|%8.2f' % measure('fft(x)',repeat), - sys.stdout.flush() - - assert_array_almost_equal(numpy_fft(x),y) - print '|%8.2f' % measure('numpy_fft(x)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) - sys.stdout.flush() - -class TestIfft(TestCase): - - def bench_random(self): - from numpy.fft import ifft as numpy_ifft - print - print ' Inverse Fast Fourier Transform' - print '===============================================' - print ' | real input | complex input ' - print '-----------------------------------------------' - print ' size | scipy | numpy | scipy | numpy ' - print '-----------------------------------------------' - for size,repeat in [(100,7000),(1000,2000), - (256,10000), - (512,10000), - (1024,1000), - (2048,1000), - (2048*2,500), - (2048*4,500), - ]: - print '%5s' % size, - sys.stdout.flush() - - for x in [random([size]).astype(double), - random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j - ]: - if size > 500: y = ifft(x) - else: y = direct_idft(x) - assert_array_almost_equal(ifft(x),y) - print '|%8.2f' % measure('ifft(x)',repeat), - sys.stdout.flush() - - assert_array_almost_equal(numpy_ifft(x),y) - print '|%8.2f' % measure('numpy_ifft(x)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) - sys.stdout.flush() - -class TestRfft(TestCase): - - def bench_random(self): - from numpy.fft import rfft as numpy_rfft - print - print 'Fast Fourier Transform (real data)' - print '==================================' - print ' size | scipy | numpy ' - print '----------------------------------' - for size,repeat in [(100,7000),(1000,2000), - (256,10000), - (512,10000), - (1024,1000), - (2048,1000), - (2048*2,500), - (2048*4,500), - ]: - print '%5s' % size, - sys.stdout.flush() - - x = random([size]).astype(double) - print '|%8.2f' % measure('rfft(x)',repeat), - sys.stdout.flush() - - print '|%8.2f' % measure('numpy_rfft(x)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) - sys.stdout.flush() - -class TestIrfft(TestCase): - - def bench_random(self): - from numpy.fft import irfft as numpy_irfft - - print - print 'Inverse Fast Fourier Transform (real data)' - print '==================================' - print ' size | scipy | numpy ' - print '----------------------------------' - for size,repeat in [(100,7000),(1000,2000), - (256,10000), - (512,10000), - (1024,1000), - (2048,1000), - (2048*2,500), - (2048*4,500), - ]: - print '%5s' % size, - sys.stdout.flush() - - x = random([size]).astype(double) - x1 = zeros(size/2+1,dtype=cdouble) - x1[0] = x[0] - for i in range(1,size/2): - x1[i] = x[2*i-1] + 1j * x[2*i] - if not size%2: - x1[-1] = x[-1] - y = irfft(x) - - print '|%8.2f' % measure('irfft(x)',repeat), - sys.stdout.flush() - - assert_array_almost_equal(numpy_irfft(x1,size),y) - print '|%8.2f' % measure('numpy_irfft(x1,size)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) - - sys.stdout.flush() - -class TestFftn(TestCase): - - def bench_random(self): - from numpy.fft import fftn as numpy_fftn - print - print ' Multi-dimensional Fast Fourier Transform' - print '===================================================' - print ' | real input | complex input ' - print '---------------------------------------------------' - print ' size | scipy | numpy | scipy | numpy ' - print '---------------------------------------------------' - for size,repeat in [((100,100),100),((1000,100),7), - ((256,256),10), - ((512,512),3), - ]: - print '%9s' % ('%sx%s'%size), - sys.stdout.flush() - - for x in [random(size).astype(double), - random(size).astype(cdouble)+random(size).astype(cdouble)*1j - ]: - y = fftn(x) - #if size > 500: y = fftn(x) - #else: y = direct_dft(x) - assert_array_almost_equal(fftn(x),y) - print '|%8.2f' % measure('fftn(x)',repeat), - sys.stdout.flush() - - assert_array_almost_equal(numpy_fftn(x),y) - print '|%8.2f' % measure('numpy_fftn(x)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) - - sys.stdout.flush() - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/fftpack/benchmarks/bench_pseudo_diffs.py b/scipy-0.10.1/scipy/fftpack/benchmarks/bench_pseudo_diffs.py deleted file mode 100644 index ddb1fbc669..0000000000 --- a/scipy-0.10.1/scipy/fftpack/benchmarks/bench_pseudo_diffs.py +++ /dev/null @@ -1,184 +0,0 @@ -""" Benchmark functions for fftpack.pseudo_diffs module -""" -import sys - -from numpy import arange, sin, cos, pi, exp, tanh, sign - -from numpy.testing import * -from scipy.fftpack import diff, fft, ifft, tilbert, hilbert, shift, fftfreq - -def random(size): - return rand(*size) - -def direct_diff(x,k=1,period=None): - fx = fft(x) - n = len (fx) - if period is None: - period = 2*pi - w = fftfreq(n)*2j*pi/period*n - if k<0: - w = 1 / w**k - w[0] = 0.0 - else: - w = w**k - if n>2000: - w[250:n-250] = 0.0 - return ifft(w*fx).real - -def direct_tilbert(x,h=1,period=None): - fx = fft(x) - n = len (fx) - if period is None: - period = 2*pi - w = fftfreq(n)*h*2*pi/period*n - w[0] = 1 - w = 1j/tanh(w) - w[0] = 0j - return ifft(w*fx) - -def direct_hilbert(x): - fx = fft(x) - n = len (fx) - w = fftfreq(n)*n - w = 1j*sign(w) - return ifft(w*fx) - -def direct_shift(x,a,period=None): - n = len(x) - if period is None: - k = fftfreq(n)*1j*n - else: - k = fftfreq(n)*2j*pi/period*n - return ifft(fft(x)*exp(k*a)).real - - -class TestDiff(TestCase): - - def bench_random(self): - print - print 'Differentiation of periodic functions' - print '=====================================' - print ' size | convolve | naive' - print '-------------------------------------' - for size,repeat in [(100,1500),(1000,300), - (256,1500), - (512,1000), - (1024,500), - (2048,200), - (2048*2,100), - (2048*4,50), - ]: - print '%6s' % size, - sys.stdout.flush() - x = arange (size)*2*pi/size - if size<2000: - f = sin(x)*cos(4*x)+exp(sin(3*x)) - else: - f = sin(x)*cos(4*x) - assert_array_almost_equal(diff(f,1),direct_diff(f,1)) - assert_array_almost_equal(diff(f,2),direct_diff(f,2)) - print '| %9.2f' % measure('diff(f,3)',repeat), - sys.stdout.flush() - print '| %9.2f' % measure('direct_diff(f,3)',repeat), - sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) - - -class TestTilbert(TestCase): - - def bench_random(self): - print - print ' Tilbert transform of periodic functions' - print '=========================================' - print ' size | optimized | naive' - print '-----------------------------------------' - for size,repeat in [(100,1500),(1000,300), - (256,1500), - (512,1000), - (1024,500), - (2048,200), - (2048*2,100), - (2048*4,50), - ]: - print '%6s' % size, - sys.stdout.flush() - x = arange (size)*2*pi/size - if size<2000: - f = sin(x)*cos(4*x)+exp(sin(3*x)) - else: - f = sin(x)*cos(4*x) - assert_array_almost_equal(tilbert(f,1),direct_tilbert(f,1)) - print '| %9.2f' % measure('tilbert(f,1)',repeat), - sys.stdout.flush() - print '| %9.2f' % measure('direct_tilbert(f,1)',repeat), - sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) - - -class TestHilbert(TestCase): - - def bench_random(self): - print - print ' Hilbert transform of periodic functions' - print '=========================================' - print ' size | optimized | naive' - print '-----------------------------------------' - for size,repeat in [(100,1500),(1000,300), - (256,1500), - (512,1000), - (1024,500), - (2048,200), - (2048*2,100), - (2048*4,50), - ]: - print '%6s' % size, - sys.stdout.flush() - x = arange (size)*2*pi/size - if size<2000: - f = sin(x)*cos(4*x)+exp(sin(3*x)) - else: - f = sin(x)*cos(4*x) - assert_array_almost_equal(hilbert(f),direct_hilbert(f)) - print '| %9.2f' % measure('hilbert(f)',repeat), - sys.stdout.flush() - print '| %9.2f' % measure('direct_hilbert(f)',repeat), - sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) - - -class TestShift(TestCase): - - def bench_random(self): - print - print ' Shifting periodic functions' - print '==============================' - print ' size | optimized | naive' - print '------------------------------' - for size,repeat in [(100,1500),(1000,300), - (256,1500), - (512,1000), - (1024,500), - (2048,200), - (2048*2,100), - (2048*4,50), - ]: - print '%6s' % size, - sys.stdout.flush() - x = arange (size)*2*pi/size - a = 1 - if size<2000: - f = sin(x)*cos(4*x)+exp(sin(3*x)) - sf = sin(x+a)*cos(4*(x+a))+exp(sin(3*(x+a))) - else: - f = sin(x)*cos(4*x) - sf = sin(x+a)*cos(4*(x+a)) - assert_array_almost_equal(direct_shift(f,1),sf) - assert_array_almost_equal(shift(f,1),sf) - print '| %9.2f' % measure('shift(f,a)',repeat), - sys.stdout.flush() - print '| %9.2f' % measure('direct_shift(f,a)',repeat), - sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/fftpack/bento.info b/scipy-0.10.1/scipy/fftpack/bento.info deleted file mode 100644 index 35e1606377..0000000000 --- a/scipy-0.10.1/scipy/fftpack/bento.info +++ /dev/null @@ -1,21 +0,0 @@ -HookFile: bscript - -Library: - CompiledLibrary: fftpack - Sources: - src/fftpack/*.f - CompiledLibrary: dfftpack - Sources: - src/dfftpack/*.f - Extension: _fftpack - Sources: - src/zfft.c, - src/drfft.c, - src/zrfft.c, - src/zfftnd.c, - fftpack.pyf, - src/dct.c.src - Extension: convolve - Sources: - src/convolve.c, - convolve.pyf diff --git a/scipy-0.10.1/scipy/fftpack/bscript b/scipy-0.10.1/scipy/fftpack/bscript deleted file mode 100644 index 1569093677..0000000000 --- a/scipy-0.10.1/scipy/fftpack/bscript +++ /dev/null @@ -1,13 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pbuild(context): - bld = context.waf_context - - def builder(extension): - return context.default_builder(extension, - features="c cshlib bento f2py pyext", - includes="src", - use="dfftpack fftpack") - context.register_builder("_fftpack", builder) - context.register_builder("convolve", builder) diff --git a/scipy-0.10.1/scipy/fftpack/convolve.pyf b/scipy-0.10.1/scipy/fftpack/convolve.pyf deleted file mode 100644 index efbe2814b6..0000000000 --- a/scipy-0.10.1/scipy/fftpack/convolve.pyf +++ /dev/null @@ -1,47 +0,0 @@ -!%f90 -*- f90 -*- -! Author: Pearu Peterson, September 2002 - -python module convolve__user__routines - interface - real*8 function kernel_func(k) - intent(c) kernel_func - integer intent(in,c) :: k - end function kernel_func - end interface -end python module convolve__user__routines - -python module convolve - interface - - subroutine init_convolution_kernel (n,omega,d,kernel_func,zero_nyquist) - intent(c) init_convolution_kernel - use convolve__user__routines - external kernel_func - integer intent(in,c),check(n>0) :: n - integer intent(in,c),optional :: d = 0 - real*8 intent(out,c),dimension(n),depend(n) :: omega - integer intent(in,c),optional,depend(d) :: zero_nyquist = d%2 - end subroutine init_convolution_kernel - - subroutine destroy_convolve_cache() - intent(c) destroy_convolve_cache - end subroutine destroy_convolve_cache - - subroutine convolve(n,x,omega,swap_real_imag) - intent(c) convolve - integer intent(c,hide),depend (x) :: n = len(x) - real*8 intent(c,in,out,copy,out=y),dimension(n):: x - real*8 intent(c,in,cache),dimension(n),depend(n) :: omega - integer intent(c,in),optional :: swap_real_imag = 0 - end subroutine convolve - - subroutine convolve_z(n,x,omega_real,omega_imag) - intent(c) convolve_z - integer intent(c,hide),depend (x) :: n = len(x) - real*8 intent(c,in,out,copy,out=y),dimension(n):: x - real*8 intent(c,in,cache),dimension(n),depend(n) :: omega_real - real*8 intent(c,in,cache),dimension(n),depend(n) :: omega_imag - end subroutine convolve_z - - end interface -end python module convolve diff --git a/scipy-0.10.1/scipy/fftpack/fftpack.pyf b/scipy-0.10.1/scipy/fftpack/fftpack.pyf deleted file mode 100644 index 350018d6ea..0000000000 --- a/scipy-0.10.1/scipy/fftpack/fftpack.pyf +++ /dev/null @@ -1,251 +0,0 @@ -!%f90 -*- f90 -*- -! Author: Pearu Peterson, August 2002 - -python module _fftpack - interface - - subroutine zfft(x,n,direction,howmany,normalize) - ! y = fft(x[,n,direction,normalize,overwrite_x]) - intent(c) zfft - complex*16 intent(c,in,out,copy,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: direction = 1 - integer optional,intent(c,in),depend(direction) & - :: normalize = (direction<0) - end subroutine zfft - - subroutine drfft(x,n,direction,howmany,normalize) - ! y = drfft(x[,n,direction,normalize,overwrite_x]) - intent(c) drfft - real*8 intent(c,in,out,copy,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: direction = 1 - integer optional,intent(c,in),depend(direction) & - :: normalize = (direction<0) - end subroutine drfft - - subroutine zrfft(x,n,direction,howmany,normalize) - ! y = zrfft(x[,n,direction,normalize,overwrite_x]) - intent(c) zrfft - complex*16 intent(c,in,out,overwrite,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: direction = 1 - integer optional,intent(c,in),depend(direction) & - :: normalize = (direction<0) - end subroutine zrfft - - subroutine zfftnd(x,r,s,direction,howmany,normalize,j) - ! y = zfftnd(x[,s,direction,normalize,overwrite_x]) - intent(c) zfftnd - complex*16 intent(c,in,out,copy,out=y) :: x(*) - integer intent(c,hide),depend(x) :: r=old_rank(x) - integer intent(c,hide) :: j=0 - integer optional,depend(r),dimension(r),intent(c,in) & - :: s=old_shape(x,j++) - check(r>=len(s)) s - integer intent(c,hide) :: howmany = 1 - integer optional,intent(c,in) :: direction = 1 - integer optional,intent(c,in),depend(direction) :: & - normalize = (direction<0) - callprotoargument complex_double*,int,int*,int,int,int - callstatement {& - int i,sz=1,xsz=size(x); & - for (i=0;i0) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: direction = 1 - integer optional,intent(c,in),depend(direction) & - :: normalize = (direction<0) - end subroutine cfft - - subroutine rfft(x,n,direction,howmany,normalize) - ! y = rfft(x[,n,direction,normalize,overwrite_x]) - intent(c) rfft - real*4 intent(c,in,out,copy,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: direction = 1 - integer optional,intent(c,in),depend(direction) & - :: normalize = (direction<0) - end subroutine rfft - - subroutine crfft(x,n,direction,howmany,normalize) - ! y = crfft(x[,n,direction,normalize,overwrite_x]) - intent(c) crfft - complex*8 intent(c,in,out,overwrite,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: direction = 1 - integer optional,intent(c,in),depend(direction) & - :: normalize = (direction<0) - end subroutine crfft - - subroutine cfftnd(x,r,s,direction,howmany,normalize,j) - ! y = cfftnd(x[,s,direction,normalize,overwrite_x]) - intent(c) cfftnd - complex*8 intent(c,in,out,copy,out=y) :: x(*) - integer intent(c,hide),depend(x) :: r=old_rank(x) - integer intent(c,hide) :: j=0 - integer optional,depend(r),dimension(r),intent(c,in) & - :: s=old_shape(x,j++) - check(r>=len(s)) s - integer intent(c,hide) :: howmany = 1 - integer optional,intent(c,in) :: direction = 1 - integer optional,intent(c,in),depend(direction) :: & - normalize = (direction<0) - callprotoargument complex_float*,int,int*,int,int,int - callstatement {& - int i,sz=1,xsz=size(x); & - for (i=0;i0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: normalize = 0 - end subroutine ddct1 - - subroutine ddct2(x,n,howmany,normalize) - ! y = ddct2(x[,n,normalize,overwrite_x]) - intent(c) ddct2 - real*8 intent(c,in,out,copy,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: normalize = 0 - end subroutine ddct2 - - subroutine ddct3(x,n,howmany,normalize) - ! y = ddct3(x[,n,normalize,overwrite_x]) - intent(c) ddct3 - real*8 intent(c,in,out,copy,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: normalize = 0 - end subroutine ddct3 - - subroutine dct1(x,n,howmany,normalize) - ! y = dct1(x[,n,normalize,overwrite_x]) - intent(c) dct1 - real*4 intent(c,in,out,copy,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: normalize = 0 - end subroutine dct1 - - subroutine dct2(x,n,howmany,normalize) - ! y = dct2(x[,n,normalize,overwrite_x]) - intent(c) dct2 - real*4 intent(c,in,out,copy,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: normalize = 0 - end subroutine dct2 - - subroutine dct3(x,n,howmany,normalize) - ! y = dct3(x[,n,normalize,overwrite_x]) - intent(c) dct3 - real*4 intent(c,in,out,copy,out=y) :: x(*) - integer optional,depend(x),intent(c,in) :: n=size(x) - check(n>0&&n<=size(x)) n - integer depend(x,n),intent(c,hide) :: howmany = size(x)/n - check(n*howmany==size(x)) howmany - integer optional,intent(c,in) :: normalize = 0 - end subroutine dct3 - - subroutine destroy_ddct2_cache() - intent(c) destroy_ddct2_cache - end subroutine destroy_ddct2_cache - - subroutine destroy_ddct1_cache() - intent(c) destroy_ddct1_cache - end subroutine destroy_ddct1_cache - - subroutine destroy_dct2_cache() - intent(c) destroy_dct2_cache - end subroutine destroy_dct2_cache - - subroutine destroy_dct1_cache() - intent(c) destroy_dct1_cache - end subroutine destroy_dct1_cache - - end interface -end python module _fftpack - -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/fftpack/fftpack_version.py b/scipy-0.10.1/scipy/fftpack/fftpack_version.py deleted file mode 100644 index 1c9e5ddebb..0000000000 --- a/scipy-0.10.1/scipy/fftpack/fftpack_version.py +++ /dev/null @@ -1,6 +0,0 @@ -major = 0 -minor = 4 -micro = 3 - - -fftpack_version = '%(major)d.%(minor)d.%(micro)d' % (locals ()) diff --git a/scipy-0.10.1/scipy/fftpack/helper.py b/scipy-0.10.1/scipy/fftpack/helper.py deleted file mode 100644 index 712fdbca69..0000000000 --- a/scipy-0.10.1/scipy/fftpack/helper.py +++ /dev/null @@ -1,18 +0,0 @@ -__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] - -from numpy import array -from numpy.fft.helper import fftshift, ifftshift, fftfreq - -def rfftfreq(n, d=1.0): - """DFT sample frequencies (for usage with rfft, irfft). - - The returned float array contains the frequency bins in - cycles/unit (with zero at the start) given a window length n and a - sample spacing d: - - f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even - f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd - """ - if not isinstance(n, int) or n < 0: - raise ValueError("n = %s is not valid. n must be a nonnegative integer." % n) - return (array(range(1,n+1),dtype=int)//2)/float(n*d) diff --git a/scipy-0.10.1/scipy/fftpack/pseudo_diffs.py b/scipy-0.10.1/scipy/fftpack/pseudo_diffs.py deleted file mode 100644 index 7adec0ac29..0000000000 --- a/scipy-0.10.1/scipy/fftpack/pseudo_diffs.py +++ /dev/null @@ -1,451 +0,0 @@ -""" -Differential and pseudo-differential operators. -""" -# Created by Pearu Peterson, September 2002 - -__all__ = ['diff', - 'tilbert','itilbert','hilbert','ihilbert', - 'cs_diff','cc_diff','sc_diff','ss_diff', - 'shift'] - -from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj -import convolve - -from scipy.fftpack.basic import _datacopied - -import atexit -atexit.register(convolve.destroy_convolve_cache) -del atexit - - -_cache = {} -def diff(x,order=1,period=None, - _cache = _cache): - """ diff(x, order=1, period=2*pi) -> y - - Return k-th derivative (or integral) of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j - y_0 = 0 if order is not 0. - - Optional input: - order - The order of differentiation. Default order is 1. If order is - negative, then integration is carried out under the assumption - that x_0==0. - period - The assumed period of the sequence. Default is 2*pi. - - Notes: - If sum(x,axis=0)=0 then - diff(diff(x,k),-k)==x (within numerical accuracy) - For odd order and even len(x), the Nyquist mode is taken zero. - """ - tmp = asarray(x) - if order==0: - return tmp - if iscomplexobj(tmp): - return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period) - if period is not None: - c = 2*pi/period - else: - c = 1.0 - n = len(x) - omega = _cache.get((n,order,c)) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel(k,order=order,c=c): - if k: - return pow(c*k,order) - return 0 - omega = convolve.init_convolution_kernel(n,kernel,d=order, - zero_nyquist=1) - _cache[(n,order,c)] = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve(tmp,omega,swap_real_imag=order%2, - overwrite_x=overwrite_x) -del _cache - - -_cache = {} -def tilbert(x,h,period=None, - _cache = _cache): - """ tilbert(x, h, period=2*pi) -> y - - Return h-Tilbert transform of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j - y_0 = 0 - - Input: - h - Defines the parameter of the Tilbert transform. - period - The assumed period of the sequence. Default period is 2*pi. - - Notes: - If sum(x,axis=0)==0 and n=len(x) is odd then - tilbert(itilbert(x)) == x - If 2*pi*h/period is approximately 10 or larger then numerically - tilbert == hilbert - (theoretically oo-Tilbert == Hilbert). - For even len(x), the Nyquist mode of x is taken zero. - """ - tmp = asarray(x) - if iscomplexobj(tmp): - return tilbert(tmp.real,h,period)+\ - 1j*tilbert(tmp.imag,h,period) - if period is not None: - h = h*2*pi/period - n = len(x) - omega = _cache.get((n,h)) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel(k,h=h): - if k: return 1.0/tanh(h*k) - return 0 - omega = convolve.init_convolution_kernel(n,kernel,d=1) - _cache[(n,h)] = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) -del _cache - - -_cache = {} -def itilbert(x,h,period=None, - _cache = _cache): - """ itilbert(x, h, period=2*pi) -> y - - Return inverse h-Tilbert transform of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j - y_0 = 0 - - Optional input: see tilbert.__doc__ - """ - tmp = asarray(x) - if iscomplexobj(tmp): - return itilbert(tmp.real,h,period)+\ - 1j*itilbert(tmp.imag,h,period) - if period is not None: - h = h*2*pi/period - n = len(x) - omega = _cache.get((n,h)) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel(k,h=h): - if k: return -tanh(h*k) - return 0 - omega = convolve.init_convolution_kernel(n,kernel,d=1) - _cache[(n,h)] = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) -del _cache - - -_cache = {} -def hilbert(x, - _cache=_cache): - """ hilbert(x) -> y - - Return Hilbert transform of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = sqrt(-1)*sign(j) * x_j - y_0 = 0 - - Parameters - ---------- - x : array_like - The input array, should be periodic. - _cache : dict, optional - Dictionary that contains the kernel used to do a convolution with. - - Returns - ------- - y : ndarray - The transformed input. - - Notes - ----- - If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``. - - For even len(x), the Nyquist mode of x is taken zero. - - The sign of the returned transform does not have a factor -1 that is more - often than not found in the definition of the Hilbert transform. Note also - that ``scipy.signal.hilbert`` does have an extra -1 factor compared to this - function. - - """ - tmp = asarray(x) - if iscomplexobj(tmp): - return hilbert(tmp.real)+1j*hilbert(tmp.imag) - n = len(x) - omega = _cache.get(n) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel(k): - if k>0: return 1.0 - elif k<0: return -1.0 - return 0.0 - omega = convolve.init_convolution_kernel(n,kernel,d=1) - _cache[n] = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) -del _cache - - -def ihilbert(x): - """ ihilbert(x) -> y - - Return inverse Hilbert transform of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = -sqrt(-1)*sign(j) * x_j - y_0 = 0 - """ - return -hilbert(x) - - -_cache = {} -def cs_diff(x, a, b, period=None, - _cache = _cache): - """ cs_diff(x, a, b, period=2*pi) -> y - - Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j - y_0 = 0 - - Input: - a,b - Defines the parameters of the cosh/sinh pseudo-differential - operator. - period - The period of the sequence. Default period is 2*pi. - - Notes: - For even len(x), the Nyquist mode of x is taken zero. - """ - tmp = asarray(x) - if iscomplexobj(tmp): - return cs_diff(tmp.real,a,b,period)+\ - 1j*cs_diff(tmp.imag,a,b,period) - if period is not None: - a = a*2*pi/period - b = b*2*pi/period - n = len(x) - omega = _cache.get((n,a,b)) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel(k,a=a,b=b): - if k: return -cosh(a*k)/sinh(b*k) - return 0 - omega = convolve.init_convolution_kernel(n,kernel,d=1) - _cache[(n,a,b)] = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) -del _cache - - -_cache = {} -def sc_diff(x, a, b, period=None, - _cache = _cache): - """ - Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then:: - - y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j - y_0 = 0 - - Parameters - ---------- - x : array_like - Input array. - a,b : float - Defines the parameters of the sinh/cosh pseudo-differential - operator. - period : float, optional - The period of the sequence x. Default is 2*pi. - - Notes - ----- - ``sc_diff(cs_diff(x,a,b),b,a) == x`` - For even ``len(x)``, the Nyquist mode of x is taken as zero. - - """ - tmp = asarray(x) - if iscomplexobj(tmp): - return sc_diff(tmp.real,a,b,period)+\ - 1j*sc_diff(tmp.imag,a,b,period) - if period is not None: - a = a*2*pi/period - b = b*2*pi/period - n = len(x) - omega = _cache.get((n,a,b)) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel(k,a=a,b=b): - if k: return sinh(a*k)/cosh(b*k) - return 0 - omega = convolve.init_convolution_kernel(n,kernel,d=1) - _cache[(n,a,b)] = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) -del _cache - - -_cache = {} -def ss_diff(x, a, b, period=None, - _cache = _cache): - """ ss_diff(x, a, b, period=2*pi) -> y - - Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j - y_0 = a/b * x_0 - - Input: - a,b - Defines the parameters of the sinh/sinh pseudo-differential - operator. - period - The period of the sequence x. Default is 2*pi. - - Notes: - ss_diff(ss_diff(x,a,b),b,a) == x - """ - tmp = asarray(x) - if iscomplexobj(tmp): - return ss_diff(tmp.real,a,b,period)+\ - 1j*ss_diff(tmp.imag,a,b,period) - if period is not None: - a = a*2*pi/period - b = b*2*pi/period - n = len(x) - omega = _cache.get((n,a,b)) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel(k,a=a,b=b): - if k: return sinh(a*k)/sinh(b*k) - return float(a)/b - omega = convolve.init_convolution_kernel(n,kernel) - _cache[(n,a,b)] = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve(tmp,omega,overwrite_x=overwrite_x) -del _cache - - -_cache = {} -def cc_diff(x, a, b, period=None, - _cache = _cache): - """ cc_diff(x, a, b, period=2*pi) -> y - - Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence x. - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j - - Input: - a,b - Defines the parameters of the sinh/sinh pseudo-differential - operator. - - Optional input: - period - The period of the sequence x. Default is 2*pi. - - Notes: - cc_diff(cc_diff(x,a,b),b,a) == x - """ - tmp = asarray(x) - if iscomplexobj(tmp): - return cc_diff(tmp.real,a,b,period)+\ - 1j*cc_diff(tmp.imag,a,b,period) - if period is not None: - a = a*2*pi/period - b = b*2*pi/period - n = len(x) - omega = _cache.get((n,a,b)) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel(k,a=a,b=b): - return cosh(a*k)/cosh(b*k) - omega = convolve.init_convolution_kernel(n,kernel) - _cache[(n,a,b)] = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve(tmp,omega,overwrite_x=overwrite_x) -del _cache - -_cache = {} -def shift(x, a, period=None, - _cache = _cache): - """ shift(x, a, period=2*pi) -> y - - Shift periodic sequence x by a: y(u) = x(u+a). - - If x_j and y_j are Fourier coefficients of periodic functions x - and y, respectively, then - - y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f - - Optional input: - period - The period of the sequences x and y. Default period is 2*pi. - """ - tmp = asarray(x) - if iscomplexobj(tmp): - return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period) - if period is not None: - a = a*2*pi/period - n = len(x) - omega = _cache.get((n,a)) - if omega is None: - if len(_cache)>20: - while _cache: _cache.popitem() - def kernel_real(k,a=a): return cos(a*k) - def kernel_imag(k,a=a): return sin(a*k) - omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0, - zero_nyquist=0) - omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1, - zero_nyquist=0) - _cache[(n,a)] = omega_real,omega_imag - else: - omega_real,omega_imag = omega - overwrite_x = _datacopied(tmp, x) - return convolve.convolve_z(tmp,omega_real,omega_imag, - overwrite_x=overwrite_x) - -del _cache diff --git a/scipy-0.10.1/scipy/fftpack/realtransforms.py b/scipy-0.10.1/scipy/fftpack/realtransforms.py deleted file mode 100644 index 7f52e0173e..0000000000 --- a/scipy-0.10.1/scipy/fftpack/realtransforms.py +++ /dev/null @@ -1,236 +0,0 @@ -""" -Real spectrum tranforms (DCT, DST, MDCT) -""" - -__all__ = ['dct', 'idct'] - -import numpy as np -from scipy.fftpack import _fftpack -from scipy.fftpack.basic import _datacopied - -import atexit -atexit.register(_fftpack.destroy_ddct1_cache) -atexit.register(_fftpack.destroy_ddct2_cache) -atexit.register(_fftpack.destroy_dct1_cache) -atexit.register(_fftpack.destroy_dct2_cache) - -def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=0): - """ - Return the Discrete Cosine Transform of arbitrary type sequence x. - - Parameters - ---------- - x : array_like - The input array. - type : {1, 2, 3}, optional - Type of the DCT (see Notes). Default type is 2. - n : int, optional - Length of the transform. - axis : int, optional - Axis over which to compute the transform. - norm : {None, 'ortho'}, optional - Normalization mode (see Notes). Default is None. - overwrite_x : bool, optional - If True the contents of x can be destroyed. (default=False) - - Returns - ------- - y : ndarray of real - The transformed input array. - - See Also - -------- - idct - - Notes - ----- - For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to - MATLAB ``dct(x)``. - - There are theoretically 8 types of the DCT, only the first 3 types are - implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the' - Inverse DCT generally refers to DCT type 3. - - type I - ~~~~~~ - There are several definitions of the DCT-I; we use the following - (for ``norm=None``):: - - N-2 - y[k] = x[0] + (-1)**k x[N-1] + 2 * sum x[n]*cos(pi*k*n/(N-1)) - n=1 - - Only None is supported as normalization mode for DCT-I. Note also that the - DCT-I is only supported for input size > 1 - - type II - ~~~~~~~ - There are several definitions of the DCT-II; we use the following - (for ``norm=None``):: - - - N-1 - y[k] = 2* sum x[n]*cos(pi*k*(2n+1)/(2*N)), 0 <= k < N. - n=0 - - If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f`:: - - f = sqrt(1/(4*N)) if k = 0, - f = sqrt(1/(2*N)) otherwise. - - Which makes the corresponding matrix of coefficients orthonormal - (``OO' = Id``). - - type III - ~~~~~~~~ - - There are several definitions, we use the following - (for ``norm=None``):: - - N-1 - y[k] = x[0] + 2 * sum x[n]*cos(pi*(k+0.5)*n/N), 0 <= k < N. - n=1 - - or, for ``norm='ortho'`` and 0 <= k < N:: - - N-1 - y[k] = x[0] / sqrt(N) + sqrt(1/N) * sum x[n]*cos(pi*(k+0.5)*n/N) - n=1 - - The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up - to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of - the orthonormalized DCT-II. - - References - ---------- - - http://en.wikipedia.org/wiki/Discrete_cosine_transform - - 'A Fast Cosine Transform in One and Two Dimensions', by J. Makhoul, `IEEE - Transactions on acoustics, speech and signal processing` vol. 28(1), - pp. 27-34, http://dx.doi.org/10.1109/TASSP.1980.1163351 (1980). - - """ - if type == 1 and norm is not None: - raise NotImplementedError( - "Orthonormalization not yet supported for DCT-I") - return _dct(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x) - -def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=0): - """ - Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. - - Parameters - ---------- - x : array_like - The input array. - type : {1, 2, 3}, optional - Type of the DCT (see Notes). Default type is 2. - n : int, optional - Length of the transform. - axis : int, optional - Axis over which to compute the transform. - norm : {None, 'ortho'}, optional - Normalization mode (see Notes). Default is None. - overwrite_x : bool, optional - If True the contents of x can be destroyed. (default=False) - - Returns - ------- - y : ndarray of real - The transformed input array. - - See Also - -------- - dct - - Notes - ----- - For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to - MATLAB ``idct(x)``. - - 'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3. - - IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type - 3, and IDCT of type 3 is the DCT of type 2. For the definition of these - types, see `dct`. - - """ - if type == 1 and norm is not None: - raise NotImplementedError( - "Orthonormalization not yet supported for IDCT-I") - # Inverse/forward type table - _TP = {1:1, 2:3, 3:2} - return _dct(x, _TP[type], n, axis, normalize=norm, overwrite_x=overwrite_x) - -def _dct(x, type, n=None, axis=-1, overwrite_x=0, normalize=None): - """ - Return Discrete Cosine Transform of arbitrary type sequence x. - - Parameters - ---------- - x : array-like - input array. - n : int, optional - Length of the transform. - axis : int, optional - Axis along which the dct is computed. (default=-1) - overwrite_x : bool, optional - If True the contents of x can be destroyed. (default=False) - - Returns - ------- - z : real ndarray - - """ - tmp = np.asarray(x) - if not np.isrealobj(tmp): - raise TypeError("1st argument must be real sequence") - - if n is None: - n = tmp.shape[axis] - else: - raise NotImplemented("Padding/truncating not yet implemented") - - if tmp.dtype == np.double: - if type == 1: - f = _fftpack.ddct1 - elif type == 2: - f = _fftpack.ddct2 - elif type == 3: - f = _fftpack.ddct3 - else: - raise ValueError("Type %d not understood" % type) - elif tmp.dtype == np.float32: - if type == 1: - f = _fftpack.dct1 - elif type == 2: - f = _fftpack.dct2 - elif type == 3: - f = _fftpack.dct3 - else: - raise ValueError("Type %d not understood" % type) - else: - raise ValueError("dtype %s not supported" % tmp.dtype) - - if normalize: - if normalize == "ortho": - nm = 1 - else: - raise ValueError("Unknown normalize mode %s" % normalize) - else: - nm = 0 - - if type == 1 and n < 2: - raise ValueError("DCT-I is not defined for size < 2") - - overwrite_x = overwrite_x or _datacopied(tmp, x) - - if axis == -1 or axis == len(tmp.shape) - 1: - return f(tmp, n, nm, overwrite_x) - #else: - # raise NotImplementedError("Axis arg not yet implemented") - - tmp = np.swapaxes(tmp, axis, -1) - tmp = f(tmp, n, nm, overwrite_x) - return np.swapaxes(tmp, axis, -1) diff --git a/scipy-0.10.1/scipy/fftpack/setup.py b/scipy-0.10.1/scipy/fftpack/setup.py deleted file mode 100755 index e563063473..0000000000 --- a/scipy-0.10.1/scipy/fftpack/setup.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# Created by Pearu Peterson, August 2002 - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('fftpack',parent_package, top_path) - - config.add_data_dir('tests') - config.add_data_dir('benchmarks') - - config.add_library('dfftpack', - sources=[join('src/dfftpack','*.f')]) - - config.add_library('fftpack', - sources=[join('src/fftpack','*.f')]) - - sources = ['fftpack.pyf','src/zfft.c','src/drfft.c','src/zrfft.c', - 'src/zfftnd.c', 'src/dct.c.src'] - - config.add_extension('_fftpack', - sources=sources, - libraries=['dfftpack', 'fftpack'], - include_dirs=['src']) - - config.add_extension('convolve', - sources=['convolve.pyf','src/convolve.c'], - libraries=['dfftpack'], - ) - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - from fftpack_version import fftpack_version - setup(version=fftpack_version, - description='fftpack - Discrete Fourier Transform package', - author='Pearu Peterson', - author_email = 'pearu@cens.ioc.ee', - maintainer_email = 'scipy-dev@scipy.org', - license = 'SciPy License (BSD Style)', - **configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/fftpack/setupscons.py b/scipy-0.10.1/scipy/fftpack/setupscons.py deleted file mode 100755 index ebe3b535fa..0000000000 --- a/scipy-0.10.1/scipy/fftpack/setupscons.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# Created by Pearu Peterson, August 2002 - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - config = Configuration('fftpack',parent_package, top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - from fftpack_version import fftpack_version - setup(version=fftpack_version, - description='fftpack - Discrete Fourier Transform package', - author='Pearu Peterson', - author_email = 'pearu@cens.ioc.ee', - maintainer_email = 'scipy-dev@scipy.org', - license = 'SciPy License (BSD Style)', - **configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/fftpack/src/convolve.c b/scipy-0.10.1/scipy/fftpack/src/convolve.c deleted file mode 100644 index 1597a437eb..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/convolve.c +++ /dev/null @@ -1,131 +0,0 @@ -/* - Generic functions for computing 1D convolutions of periodic sequences. - - Supported FFT libraries: - DJBFFT - optional, used for power-of-two length arrays - FFTW - optional - FFTPACK - used if any of the above libraries is not available - - Author: Pearu Peterson, September 2002 - */ - -#include "fftpack.h" - -/**************** FFTPACK ZFFT **********************/ -extern void F_FUNC(dfftf, DFFTF) (int *, double *, double *); -extern void F_FUNC(dfftb, DFFTB) (int *, double *, double *); -extern void F_FUNC(dffti, DFFTI) (int *, double *); -GEN_CACHE(dfftpack, (int n) - , double *wsave;, (caches_dfftpack[i].n == n) - , caches_dfftpack[id].wsave = - (double *) malloc(sizeof(double) * (2 * n + 15)); - F_FUNC(dffti, DFFTI) (&n, caches_dfftpack[id].wsave);, - free(caches_dfftpack[id].wsave);, 20) - -extern void destroy_convolve_cache(void) -{ - destroy_dfftpack_cache(); -} - -/**************** convolve **********************/ -extern void -convolve(int n, double *inout, double *omega, int swap_real_imag) -{ - int i; - double *wsave = NULL; - - i = get_cache_id_dfftpack(n); - wsave = caches_dfftpack[i].wsave; - F_FUNC(dfftf, DFFTF) (&n, inout, wsave); - if (swap_real_imag) { - double c; - int n1 = n - 1; - inout[0] *= omega[0]; - if (!(n % 2)) - inout[n - 1] *= omega[n - 1]; - for (i = 1; i < n1; i += 2) { - c = inout[i] * omega[i]; - inout[i] = inout[i + 1] * omega[i + 1]; - inout[i + 1] = c; - } - } else - for (i = 0; i < n; ++i) - inout[i] *= omega[i]; - F_FUNC(dfftb, DFFTB) (&n, inout, wsave); -} - -/**************** convolve **********************/ -extern void -convolve_z(int n, double *inout, double *omega_real, double *omega_imag) -{ - int i; - double *wsave = NULL; - i = get_cache_id_dfftpack(n); - wsave = caches_dfftpack[i].wsave; - F_FUNC(dfftf, DFFTF) (&n, inout, wsave); - { - double c; - int n1 = n - 1; - inout[0] *= (omega_real[0] + omega_imag[0]); - if (!(n % 2)) - inout[n - 1] *= (omega_real[n - 1] + omega_imag[n - 1]); - for (i = 1; i < n1; i += 2) { - c = inout[i] * omega_imag[i]; - inout[i] *= omega_real[i]; - inout[i] += inout[i + 1] * omega_imag[i + 1]; - inout[i + 1] *= omega_real[i + 1]; - inout[i + 1] += c; - } - } - F_FUNC(dfftb, DFFTB) (&n, inout, wsave); -} - -extern void -init_convolution_kernel(int n, double *omega, int d, - double (*kernel_func) (int), int zero_nyquist) -{ - /* - omega[k] = pow(sqrt(-1),d) * kernel_func(k) - omega[0] = kernel_func(0) - conjugate(omega[-k]) == omega[k] - */ - int j, k, l = (n % 2 ? n : n - 1); - omega[0] = (*kernel_func) (0) / n; - switch (d % 4) { - case 0: - for (k = j = 1; j < l; j += 2, ++k) - omega[j] = omega[j + 1] = (*kernel_func) (k) / n; - if (!(n % 2)) - omega[n - 1] = - (zero_nyquist ? 0.0 : (*kernel_func) (k) / n); - break; - case 1:; - case -3: - for (k = j = 1; j < l; j += 2, ++k) { - omega[j] = (*kernel_func) (k) / n; - omega[j + 1] = -omega[j]; - } - if (!(n % 2)) - omega[n - 1] = - (zero_nyquist ? 0.0 : (*kernel_func) (k) / n); - break; - case 2:; - case -2: - for (k = j = 1; j < l; j += 2, ++k) - omega[j] = omega[j + 1] = -(*kernel_func) (k) / n; - if (!(n % 2)) - omega[n - 1] = - (zero_nyquist ? 0.0 : -(*kernel_func) (k) / n); - break; - case 3:; - case -1: - for (k = j = 1; j < l; j += 2, ++k) { - omega[j] = -(*kernel_func) (k) / n; - omega[j + 1] = -omega[j]; - } - if (!(n % 2)) - omega[n - 1] = - (zero_nyquist ? 0.0 : -(*kernel_func) (k) / n); - break; - } -} diff --git a/scipy-0.10.1/scipy/fftpack/src/dct.c.src b/scipy-0.10.1/scipy/fftpack/src/dct.c.src deleted file mode 100644 index 6bb5dc4103..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dct.c.src +++ /dev/null @@ -1,158 +0,0 @@ -/* vim:syntax=c - * vim:sw=4 - * - * Interfaces to the DCT transforms of fftpack - */ -#include - -#include "fftpack.h" - -enum normalize { - DCT_NORMALIZE_NO = 0, - DCT_NORMALIZE_ORTHONORMAL = 1 -}; - -/**begin repeat - -#type=float,double# -#pref=,d# -#PREF=,D# -*/ -extern void F_FUNC(@pref@costi, @PREF@COSTI)(int*, @type@*); -extern void F_FUNC(@pref@cost, @PREF@COST)(int*, @type@*, @type@*); -extern void F_FUNC(@pref@cosqi, @PREF@COSQI)(int*, @type@*); -extern void F_FUNC(@pref@cosqb, @PREF@COSQB)(int*, @type@*, @type@*); -extern void F_FUNC(@pref@cosqf, @PREF@COSQF)(int*, @type@*, @type@*); - -GEN_CACHE(@pref@dct1,(int n) - ,@type@* wsave; - ,(caches_@pref@dct1[i].n==n) - ,caches_@pref@dct1[id].wsave = malloc(sizeof(@type@)*(3*n+15)); - F_FUNC(@pref@costi, @PREF@COSTI)(&n, caches_@pref@dct1[id].wsave); - ,free(caches_@pref@dct1[id].wsave); - ,10) - -GEN_CACHE(@pref@dct2,(int n) - ,@type@* wsave; - ,(caches_@pref@dct2[i].n==n) - ,caches_@pref@dct2[id].wsave = malloc(sizeof(@type@)*(3*n+15)); - F_FUNC(@pref@cosqi,@PREF@COSQI)(&n,caches_@pref@dct2[id].wsave); - ,free(caches_@pref@dct2[id].wsave); - ,10) - -void @pref@dct1(@type@ * inout, int n, int howmany, int normalize) -{ - int i, j; - @type@ *ptr = inout, n1, n2; - @type@ *wsave = NULL; - - wsave = caches_@pref@dct1[get_cache_id_@pref@dct1(n)].wsave; - - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(@pref@cost, @PREF@COST)(&n, ptr, wsave); - } - - switch (normalize) { - case DCT_NORMALIZE_NO: - break; -#if 0 - case DCT_NORMALIZE_ORTHONORMAL: - ptr = inout; - n1 = sqrt(0.5 / (n-1)); - n2 = sqrt(1. / (n-1)); - for (i = 0; i < howmany; ++i, ptr+=n) { - ptr[0] *= n1; - for (j = 1; j < n-1; ++j) { - ptr[j] *= n2; - } - } - break; -#endif - default: - fprintf(stderr, "dct1: normalize not yet supported=%d\n", - normalize); - break; - } -} - -void @pref@dct2(@type@ * inout, int n, int howmany, int normalize) -{ - int i, j; - @type@ *ptr = inout; - @type@ *wsave = NULL; - @type@ n1, n2; - - wsave = caches_@pref@dct2[get_cache_id_@pref@dct2(n)].wsave; - - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(@pref@cosqb, @PREF@COSQB)(&n, ptr, wsave); - - } - - switch (normalize) { - case DCT_NORMALIZE_NO: - ptr = inout; - /* 0.5 coeff comes from fftpack defining DCT as - * 4 * sum(cos(something)), whereas most definition - * use 2 */ - for (i = 0; i < n * howmany; ++i) { - ptr[i] *= 0.5; - } - break; - case DCT_NORMALIZE_ORTHONORMAL: - ptr = inout; - /* 0.5 coeff comes from fftpack defining DCT as - * 4 * sum(cos(something)), whereas most definition - * use 2 */ - n1 = 0.25 * sqrt(1./n); - n2 = 0.25 * sqrt(2./n); - for (i = 0; i < howmany; ++i, ptr+=n) { - ptr[0] *= n1; - for (j = 1; j < n; ++j) { - ptr[j] *= n2; - } - } - break; - default: - fprintf(stderr, "dct2: normalize not yet supported=%d\n", - normalize); - break; - } -} - -void @pref@dct3(@type@ * inout, int n, int howmany, int normalize) -{ - int i, j; - @type@ *ptr = inout; - @type@ *wsave = NULL; - @type@ n1, n2; - - wsave = caches_@pref@dct2[get_cache_id_@pref@dct2(n)].wsave; - - switch (normalize) { - case DCT_NORMALIZE_NO: - break; - case DCT_NORMALIZE_ORTHONORMAL: - n1 = sqrt(1./n); - n2 = sqrt(0.5/n); - for (i = 0; i < howmany; ++i, ptr+=n) { - ptr[0] *= n1; - for (j = 1; j < n; ++j) { - ptr[j] *= n2; - } - } - break; - default: - fprintf(stderr, "dct3: normalize not yet supported=%d\n", - normalize); - break; - } - - ptr = inout; - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(@pref@cosqf, @PREF@COSQF)(&n, ptr, wsave); - - } - -} -/**end repeat**/ diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqb.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqb.f deleted file mode 100644 index 41f882cd44..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqb.f +++ /dev/null @@ -1,45 +0,0 @@ - SUBROUTINE DCOSQB (N,X,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(*) ,WSAVE(*) - DATA TSQRT2 /2.82842712474619009760D0/ - IF (N.lt.2) GO TO 101 - IF (N.eq.2) GO TO 102 - GO TO 103 - 101 X(1) = 4.0D0*X(1) - RETURN - 102 X1 = 4.0D0*(X(1)+X(2)) - X(2) = TSQRT2*(X(1)-X(2)) - X(1) = X1 - RETURN - 103 CALL DCOSQB1 (N,X,WSAVE,WSAVE(N+1)) - RETURN - END - - SUBROUTINE DCOSQB1 (N,X,W,XH) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(1) ,W(1) ,XH(1) - NS2 = (N+1)/2 - NP2 = N+2 - DO 101 I=3,N,2 - XIM1 = X(I-1)+X(I) - X(I) = X(I)-X(I-1) - X(I-1) = XIM1 - 101 CONTINUE - X(1) = X(1)+X(1) - MODN = MOD(N,2) - IF (MODN .EQ. 0) X(N) = X(N)+X(N) - CALL DFFTB (N,X,XH) - DO 102 K=2,NS2 - KC = NP2-K - XH(K) = W(K-1)*X(KC)+W(KC-1)*X(K) - XH(KC) = W(K-1)*X(K)-W(KC-1)*X(KC) - 102 CONTINUE - IF (MODN .EQ. 0) X(NS2+1) = W(NS2)*(X(NS2+1)+X(NS2+1)) - DO 103 K=2,NS2 - KC = NP2-K - X(K) = XH(K)+XH(KC) - X(KC) = XH(K)-XH(KC) - 103 CONTINUE - X(1) = X(1)+X(1) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqf.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqf.f deleted file mode 100644 index 924f4cb601..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqf.f +++ /dev/null @@ -1,42 +0,0 @@ - SUBROUTINE DCOSQF (N,X,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(*) ,WSAVE(*) - DATA SQRT2 /1.41421356237309504880D0/ - IF (N.lt.2) GO TO 102 - IF (N.eq.2) GO TO 101 - GO TO 103 - 101 TSQX = SQRT2*X(2) - X(2) = X(1)-TSQX - X(1) = X(1)+TSQX - 102 RETURN - 103 CALL DCOSQF1 (N,X,WSAVE,WSAVE(N+1)) - RETURN - END - - SUBROUTINE DCOSQF1 (N,X,W,XH) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(1) ,W(1) ,XH(1) - NS2 = (N+1)/2 - NP2 = N+2 - DO 101 K=2,NS2 - KC = NP2-K - XH(K) = X(K)+X(KC) - XH(KC) = X(K)-X(KC) - 101 CONTINUE - MODN = MOD(N,2) - IF (MODN .EQ. 0) XH(NS2+1) = X(NS2+1)+X(NS2+1) - DO 102 K=2,NS2 - KC = NP2-K - X(K) = W(K-1)*XH(KC)+W(KC-1)*XH(K) - X(KC) = W(K-1)*XH(K)-W(KC-1)*XH(KC) - 102 CONTINUE - IF (MODN .EQ. 0) X(NS2+1) = W(NS2)*XH(NS2+1) - CALL DFFTF (N,X,XH) - DO 103 I=3,N,2 - XIM1 = X(I-1)-X(I) - X(I) = X(I-1)+X(I) - X(I-1) = XIM1 - 103 CONTINUE - RETURN - END - diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqi.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqi.f deleted file mode 100644 index 215e5c1a5b..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosqi.f +++ /dev/null @@ -1,13 +0,0 @@ - SUBROUTINE DCOSQI (N,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WSAVE(1) - DATA PIH /1.57079632679489661923D0/ - DT = PIH/FLOAT(N) - FK = 0.0D0 - DO 101 K=1,N - FK = FK+1.0D0 - WSAVE(K) = COS(FK*DT) - 101 CONTINUE - CALL DFFTI (N,WSAVE(N+1)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcost.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcost.f deleted file mode 100644 index 7acb657a45..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcost.f +++ /dev/null @@ -1,45 +0,0 @@ - SUBROUTINE DCOST (N,X,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(*) ,WSAVE(*) - NM1 = N-1 - NP1 = N+1 - NS2 = N/2 - IF (N.lt.2) GO TO 106 - IF (N.eq.2) GO TO 101 - GO TO 102 - 101 X1H = X(1)+X(2) - X(2) = X(1)-X(2) - X(1) = X1H - RETURN - 102 IF (N .GT. 3) GO TO 103 - X1P3 = X(1)+X(3) - TX2 = X(2)+X(2) - X(2) = X(1)-X(3) - X(1) = X1P3+TX2 - X(3) = X1P3-TX2 - RETURN - 103 C1 = X(1)-X(N) - X(1) = X(1)+X(N) - DO 104 K=2,NS2 - KC = NP1-K - T1 = X(K)+X(KC) - T2 = X(K)-X(KC) - C1 = C1+WSAVE(KC)*T2 - T2 = WSAVE(K)*T2 - X(K) = T1-T2 - X(KC) = T1+T2 - 104 CONTINUE - MODN = MOD(N,2) - IF (MODN .NE. 0) X(NS2+1) = X(NS2+1)+X(NS2+1) - CALL DFFTF (NM1,X,WSAVE(N+1)) - XIM2 = X(2) - X(2) = C1 - DO 105 I=4,N,2 - XI = X(I) - X(I) = X(I-2)-X(I-1) - X(I-1) = XIM2 - XIM2 = XI - 105 CONTINUE - IF (MODN .NE. 0) X(N) = XIM2 - 106 RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosti.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosti.f deleted file mode 100644 index 4c6d7bb29e..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dcosti.f +++ /dev/null @@ -1,19 +0,0 @@ - SUBROUTINE DCOSTI (N,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WSAVE(1) - DATA PI /3.14159265358979323846D0/ - IF (N .LE. 3) RETURN - NM1 = N-1 - NP1 = N+1 - NS2 = N/2 - DT = PI/FLOAT(NM1) - FK = 0.0D0 - DO 101 K=2,NS2 - KC = NP1-K - FK = FK+1.0D0 - WSAVE(K) = 2.0D0*SIN(FK*DT) - WSAVE(KC) = 2.0D0*COS(FK*DT) - 101 CONTINUE - CALL DFFTI (NM1,WSAVE(N+1)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftb.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftb.f deleted file mode 100644 index ab3a52e0af..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftb.f +++ /dev/null @@ -1,7 +0,0 @@ - SUBROUTINE DFFTB (N,R,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION R(1) ,WSAVE(1) - IF (N .EQ. 1) RETURN - CALL DFFTB1 (N,R,WSAVE,WSAVE(N+1),WSAVE(2*N+1)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftb1.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftb1.f deleted file mode 100644 index 8ae38d791e..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftb1.f +++ /dev/null @@ -1,423 +0,0 @@ - SUBROUTINE DFFTB1 (N,C,CH,WA,IFAC) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(*) ,C(*) ,WA(*) ,IFAC(*) - NF = IFAC(2) - NA = 0 - L1 = 1 - IW = 1 - DO 116 K1=1,NF - IP = IFAC(K1+2) - L2 = IP*L1 - IDO = N/L2 - IDL1 = IDO*L1 - IF (IP .NE. 4) GO TO 103 - IX2 = IW+IDO - IX3 = IX2+IDO - IF (NA .NE. 0) GO TO 101 - CALL DADB4 (IDO,L1,C,CH,WA(IW),WA(IX2),WA(IX3)) - GO TO 102 - 101 CALL DADB4 (IDO,L1,CH,C,WA(IW),WA(IX2),WA(IX3)) - 102 NA = 1-NA - GO TO 115 - 103 IF (IP .NE. 2) GO TO 106 - IF (NA .NE. 0) GO TO 104 - CALL DADB2 (IDO,L1,C,CH,WA(IW)) - GO TO 105 - 104 CALL DADB2 (IDO,L1,CH,C,WA(IW)) - 105 NA = 1-NA - GO TO 115 - 106 IF (IP .NE. 3) GO TO 109 - IX2 = IW+IDO - IF (NA .NE. 0) GO TO 107 - CALL DADB3 (IDO,L1,C,CH,WA(IW),WA(IX2)) - GO TO 108 - 107 CALL DADB3 (IDO,L1,CH,C,WA(IW),WA(IX2)) - 108 NA = 1-NA - GO TO 115 - 109 IF (IP .NE. 5) GO TO 112 - IX2 = IW+IDO - IX3 = IX2+IDO - IX4 = IX3+IDO - IF (NA .NE. 0) GO TO 110 - CALL DADB5 (IDO,L1,C,CH,WA(IW),WA(IX2),WA(IX3),WA(IX4)) - GO TO 111 - 110 CALL DADB5 (IDO,L1,CH,C,WA(IW),WA(IX2),WA(IX3),WA(IX4)) - 111 NA = 1-NA - GO TO 115 - 112 IF (NA .NE. 0) GO TO 113 - CALL DADBG (IDO,IP,L1,IDL1,C,C,C,CH,CH,WA(IW)) - GO TO 114 - 113 CALL DADBG (IDO,IP,L1,IDL1,CH,CH,CH,C,C,WA(IW)) - 114 IF (IDO .EQ. 1) NA = 1-NA - 115 L1 = L2 - IW = IW+(IP-1)*IDO - 116 CONTINUE - IF (NA .EQ. 0) RETURN - DO 117 I=1,N - C(I) = CH(I) - 117 CONTINUE - RETURN - END - - SUBROUTINE DADBG (IDO,IP,L1,IDL1,CC,C1,C2,CH,CH2,WA) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(IDO,L1,IP) ,CC(IDO,IP,L1) , - 1 C1(IDO,L1,IP) ,C2(IDL1,IP), - 2 CH2(IDL1,IP) ,WA(1) - DATA TPI/6.28318530717958647692D0/ - ARG = TPI/FLOAT(IP) - DCP = COS(ARG) - DSP = SIN(ARG) - IDP2 = IDO+2 - NBD = (IDO-1)/2 - IPP2 = IP+2 - IPPH = (IP+1)/2 - IF (IDO .LT. L1) GO TO 103 - DO 102 K=1,L1 - DO 101 I=1,IDO - CH(I,K,1) = CC(I,1,K) - 101 CONTINUE - 102 CONTINUE - GO TO 106 - 103 DO 105 I=1,IDO - DO 104 K=1,L1 - CH(I,K,1) = CC(I,1,K) - 104 CONTINUE - 105 CONTINUE - 106 DO 108 J=2,IPPH - JC = IPP2-J - J2 = J+J - DO 107 K=1,L1 - CH(1,K,J) = CC(IDO,J2-2,K)+CC(IDO,J2-2,K) - CH(1,K,JC) = CC(1,J2-1,K)+CC(1,J2-1,K) - 107 CONTINUE - 108 CONTINUE - IF (IDO .EQ. 1) GO TO 116 - IF (NBD .LT. L1) GO TO 112 - DO 111 J=2,IPPH - JC = IPP2-J - DO 110 K=1,L1 - DO 109 I=3,IDO,2 - IC = IDP2-I - CH(I-1,K,J) = CC(I-1,2*J-1,K)+CC(IC-1,2*J-2,K) - CH(I-1,K,JC) = CC(I-1,2*J-1,K)-CC(IC-1,2*J-2,K) - CH(I,K,J) = CC(I,2*J-1,K)-CC(IC,2*J-2,K) - CH(I,K,JC) = CC(I,2*J-1,K)+CC(IC,2*J-2,K) - 109 CONTINUE - 110 CONTINUE - 111 CONTINUE - GO TO 116 - 112 DO 115 J=2,IPPH - JC = IPP2-J - DO 114 I=3,IDO,2 - IC = IDP2-I - DO 113 K=1,L1 - CH(I-1,K,J) = CC(I-1,2*J-1,K)+CC(IC-1,2*J-2,K) - CH(I-1,K,JC) = CC(I-1,2*J-1,K)-CC(IC-1,2*J-2,K) - CH(I,K,J) = CC(I,2*J-1,K)-CC(IC,2*J-2,K) - CH(I,K,JC) = CC(I,2*J-1,K)+CC(IC,2*J-2,K) - 113 CONTINUE - 114 CONTINUE - 115 CONTINUE - 116 AR1 = 1.0D0 - AI1 = 0.0D0 - DO 120 L=2,IPPH - LC = IPP2-L - AR1H = DCP*AR1-DSP*AI1 - AI1 = DCP*AI1+DSP*AR1 - AR1 = AR1H - DO 117 IK=1,IDL1 - C2(IK,L) = CH2(IK,1)+AR1*CH2(IK,2) - C2(IK,LC) = AI1*CH2(IK,IP) - 117 CONTINUE - DC2 = AR1 - DS2 = AI1 - AR2 = AR1 - AI2 = AI1 - DO 119 J=3,IPPH - JC = IPP2-J - AR2H = DC2*AR2-DS2*AI2 - AI2 = DC2*AI2+DS2*AR2 - AR2 = AR2H - DO 118 IK=1,IDL1 - C2(IK,L) = C2(IK,L)+AR2*CH2(IK,J) - C2(IK,LC) = C2(IK,LC)+AI2*CH2(IK,JC) - 118 CONTINUE - 119 CONTINUE - 120 CONTINUE - DO 122 J=2,IPPH - DO 121 IK=1,IDL1 - CH2(IK,1) = CH2(IK,1)+CH2(IK,J) - 121 CONTINUE - 122 CONTINUE - DO 124 J=2,IPPH - JC = IPP2-J - DO 123 K=1,L1 - CH(1,K,J) = C1(1,K,J)-C1(1,K,JC) - CH(1,K,JC) = C1(1,K,J)+C1(1,K,JC) - 123 CONTINUE - 124 CONTINUE - IF (IDO .EQ. 1) GO TO 132 - IF (NBD .LT. L1) GO TO 128 - DO 127 J=2,IPPH - JC = IPP2-J - DO 126 K=1,L1 - DO 125 I=3,IDO,2 - CH(I-1,K,J) = C1(I-1,K,J)-C1(I,K,JC) - CH(I-1,K,JC) = C1(I-1,K,J)+C1(I,K,JC) - CH(I,K,J) = C1(I,K,J)+C1(I-1,K,JC) - CH(I,K,JC) = C1(I,K,J)-C1(I-1,K,JC) - 125 CONTINUE - 126 CONTINUE - 127 CONTINUE - GO TO 132 - 128 DO 131 J=2,IPPH - JC = IPP2-J - DO 130 I=3,IDO,2 - DO 129 K=1,L1 - CH(I-1,K,J) = C1(I-1,K,J)-C1(I,K,JC) - CH(I-1,K,JC) = C1(I-1,K,J)+C1(I,K,JC) - CH(I,K,J) = C1(I,K,J)+C1(I-1,K,JC) - CH(I,K,JC) = C1(I,K,J)-C1(I-1,K,JC) - 129 CONTINUE - 130 CONTINUE - 131 CONTINUE - 132 CONTINUE - IF (IDO .EQ. 1) RETURN - DO 133 IK=1,IDL1 - C2(IK,1) = CH2(IK,1) - 133 CONTINUE - DO 135 J=2,IP - DO 134 K=1,L1 - C1(1,K,J) = CH(1,K,J) - 134 CONTINUE - 135 CONTINUE - IF (NBD .GT. L1) GO TO 139 - IS = -IDO - DO 138 J=2,IP - IS = IS+IDO - IDIJ = IS - DO 137 I=3,IDO,2 - IDIJ = IDIJ+2 - DO 136 K=1,L1 - C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)-WA(IDIJ)*CH(I,K,J) - C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)+WA(IDIJ)*CH(I-1,K,J) - 136 CONTINUE - 137 CONTINUE - 138 CONTINUE - GO TO 143 - 139 IS = -IDO - DO 142 J=2,IP - IS = IS+IDO - DO 141 K=1,L1 - IDIJ = IS - DO 140 I=3,IDO,2 - IDIJ = IDIJ+2 - C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)-WA(IDIJ)*CH(I,K,J) - C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)+WA(IDIJ)*CH(I-1,K,J) - 140 CONTINUE - 141 CONTINUE - 142 CONTINUE - 143 RETURN - END - - SUBROUTINE DADB2 (IDO,L1,CC,CH,WA1) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,2,L1) ,CH(IDO,L1,2) , - 1 WA1(1) - DO 101 K=1,L1 - CH(1,K,1) = CC(1,1,K)+CC(IDO,2,K) - CH(1,K,2) = CC(1,1,K)-CC(IDO,2,K) - 101 CONTINUE - IF (IDO.lt.2) GO TO 107 - IF (IDO.eq.2) GO TO 105 - GO TO 102 - 102 IDP2 = IDO+2 - DO 104 K=1,L1 - DO 103 I=3,IDO,2 - IC = IDP2-I - CH(I-1,K,1) = CC(I-1,1,K)+CC(IC-1,2,K) - TR2 = CC(I-1,1,K)-CC(IC-1,2,K) - CH(I,K,1) = CC(I,1,K)-CC(IC,2,K) - TI2 = CC(I,1,K)+CC(IC,2,K) - CH(I-1,K,2) = WA1(I-2)*TR2-WA1(I-1)*TI2 - CH(I,K,2) = WA1(I-2)*TI2+WA1(I-1)*TR2 - 103 CONTINUE - 104 CONTINUE - IF (MOD(IDO,2) .EQ. 1) RETURN - 105 DO 106 K=1,L1 - CH(IDO,K,1) = CC(IDO,1,K)+CC(IDO,1,K) - CH(IDO,K,2) = -(CC(1,2,K)+CC(1,2,K)) - 106 CONTINUE - 107 RETURN - END - - SUBROUTINE DADB3 (IDO,L1,CC,CH,WA1,WA2) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,3,L1) ,CH(IDO,L1,3) , - 1 WA1(1) ,WA2(1) -C *** TAUI IS SQRT(3)/2 *** - DATA TAUR,TAUI /-0.5D0,0.86602540378443864676D0/ - DO 101 K=1,L1 - TR2 = CC(IDO,2,K)+CC(IDO,2,K) - CR2 = CC(1,1,K)+TAUR*TR2 - CH(1,K,1) = CC(1,1,K)+TR2 - CI3 = TAUI*(CC(1,3,K)+CC(1,3,K)) - CH(1,K,2) = CR2-CI3 - CH(1,K,3) = CR2+CI3 - 101 CONTINUE - IF (IDO .EQ. 1) RETURN - IDP2 = IDO+2 - DO 103 K=1,L1 - DO 102 I=3,IDO,2 - IC = IDP2-I - TR2 = CC(I-1,3,K)+CC(IC-1,2,K) - CR2 = CC(I-1,1,K)+TAUR*TR2 - CH(I-1,K,1) = CC(I-1,1,K)+TR2 - TI2 = CC(I,3,K)-CC(IC,2,K) - CI2 = CC(I,1,K)+TAUR*TI2 - CH(I,K,1) = CC(I,1,K)+TI2 - CR3 = TAUI*(CC(I-1,3,K)-CC(IC-1,2,K)) - CI3 = TAUI*(CC(I,3,K)+CC(IC,2,K)) - DR2 = CR2-CI3 - DR3 = CR2+CI3 - DI2 = CI2+CR3 - DI3 = CI2-CR3 - CH(I-1,K,2) = WA1(I-2)*DR2-WA1(I-1)*DI2 - CH(I,K,2) = WA1(I-2)*DI2+WA1(I-1)*DR2 - CH(I-1,K,3) = WA2(I-2)*DR3-WA2(I-1)*DI3 - CH(I,K,3) = WA2(I-2)*DI3+WA2(I-1)*DR3 - 102 CONTINUE - 103 CONTINUE - RETURN - END - - SUBROUTINE DADB4 (IDO,L1,CC,CH,WA1,WA2,WA3) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,4,L1) ,CH(IDO,L1,4) , - 1 WA1(1) ,WA2(1) ,WA3(1) - DATA SQRT2 /1.41421356237309504880D0/ - DO 101 K=1,L1 - TR1 = CC(1,1,K)-CC(IDO,4,K) - TR2 = CC(1,1,K)+CC(IDO,4,K) - TR3 = CC(IDO,2,K)+CC(IDO,2,K) - TR4 = CC(1,3,K)+CC(1,3,K) - CH(1,K,1) = TR2+TR3 - CH(1,K,2) = TR1-TR4 - CH(1,K,3) = TR2-TR3 - CH(1,K,4) = TR1+TR4 - 101 CONTINUE - IF (IDO.lt.2) GO TO 107 - IF (IDO.eq.2) GO TO 105 - GO TO 102 - 102 IDP2 = IDO+2 - DO 104 K=1,L1 - DO 103 I=3,IDO,2 - IC = IDP2-I - TI1 = CC(I,1,K)+CC(IC,4,K) - TI2 = CC(I,1,K)-CC(IC,4,K) - TI3 = CC(I,3,K)-CC(IC,2,K) - TR4 = CC(I,3,K)+CC(IC,2,K) - TR1 = CC(I-1,1,K)-CC(IC-1,4,K) - TR2 = CC(I-1,1,K)+CC(IC-1,4,K) - TI4 = CC(I-1,3,K)-CC(IC-1,2,K) - TR3 = CC(I-1,3,K)+CC(IC-1,2,K) - CH(I-1,K,1) = TR2+TR3 - CR3 = TR2-TR3 - CH(I,K,1) = TI2+TI3 - CI3 = TI2-TI3 - CR2 = TR1-TR4 - CR4 = TR1+TR4 - CI2 = TI1+TI4 - CI4 = TI1-TI4 - CH(I-1,K,2) = WA1(I-2)*CR2-WA1(I-1)*CI2 - CH(I,K,2) = WA1(I-2)*CI2+WA1(I-1)*CR2 - CH(I-1,K,3) = WA2(I-2)*CR3-WA2(I-1)*CI3 - CH(I,K,3) = WA2(I-2)*CI3+WA2(I-1)*CR3 - CH(I-1,K,4) = WA3(I-2)*CR4-WA3(I-1)*CI4 - CH(I,K,4) = WA3(I-2)*CI4+WA3(I-1)*CR4 - 103 CONTINUE - 104 CONTINUE - IF (MOD(IDO,2) .EQ. 1) RETURN - 105 CONTINUE - DO 106 K=1,L1 - TI1 = CC(1,2,K)+CC(1,4,K) - TI2 = CC(1,4,K)-CC(1,2,K) - TR1 = CC(IDO,1,K)-CC(IDO,3,K) - TR2 = CC(IDO,1,K)+CC(IDO,3,K) - CH(IDO,K,1) = TR2+TR2 - CH(IDO,K,2) = SQRT2*(TR1-TI1) - CH(IDO,K,3) = TI2+TI2 - CH(IDO,K,4) = -SQRT2*(TR1+TI1) - 106 CONTINUE - 107 RETURN - END - - - SUBROUTINE DADB5 (IDO,L1,CC,CH,WA1,WA2,WA3,WA4) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,5,L1) ,CH(IDO,L1,5) , - 1 WA1(1) ,WA2(1) ,WA3(1) ,WA4(1) -C *** TR11=COS(2*PI/5), TI11=SIN(2*PI/5) -C *** TR12=COS(4*PI/5), TI12=SIN(4*PI/5) - DATA TR11,TI11,TR12,TI12 /0.3090169943749474241D0, - + 0.95105651629515357212D0, - + -0.8090169943749474241D0,0.58778525229247312917D0/ - DO 101 K=1,L1 - TI5 = CC(1,3,K)+CC(1,3,K) - TI4 = CC(1,5,K)+CC(1,5,K) - TR2 = CC(IDO,2,K)+CC(IDO,2,K) - TR3 = CC(IDO,4,K)+CC(IDO,4,K) - CH(1,K,1) = CC(1,1,K)+TR2+TR3 - CR2 = CC(1,1,K)+TR11*TR2+TR12*TR3 - CR3 = CC(1,1,K)+TR12*TR2+TR11*TR3 - CI5 = TI11*TI5+TI12*TI4 - CI4 = TI12*TI5-TI11*TI4 - CH(1,K,2) = CR2-CI5 - CH(1,K,3) = CR3-CI4 - CH(1,K,4) = CR3+CI4 - CH(1,K,5) = CR2+CI5 - 101 CONTINUE - IF (IDO .EQ. 1) RETURN - IDP2 = IDO+2 - DO 103 K=1,L1 - DO 102 I=3,IDO,2 - IC = IDP2-I - TI5 = CC(I,3,K)+CC(IC,2,K) - TI2 = CC(I,3,K)-CC(IC,2,K) - TI4 = CC(I,5,K)+CC(IC,4,K) - TI3 = CC(I,5,K)-CC(IC,4,K) - TR5 = CC(I-1,3,K)-CC(IC-1,2,K) - TR2 = CC(I-1,3,K)+CC(IC-1,2,K) - TR4 = CC(I-1,5,K)-CC(IC-1,4,K) - TR3 = CC(I-1,5,K)+CC(IC-1,4,K) - CH(I-1,K,1) = CC(I-1,1,K)+TR2+TR3 - CH(I,K,1) = CC(I,1,K)+TI2+TI3 - CR2 = CC(I-1,1,K)+TR11*TR2+TR12*TR3 - CI2 = CC(I,1,K)+TR11*TI2+TR12*TI3 - CR3 = CC(I-1,1,K)+TR12*TR2+TR11*TR3 - CI3 = CC(I,1,K)+TR12*TI2+TR11*TI3 - CR5 = TI11*TR5+TI12*TR4 - CI5 = TI11*TI5+TI12*TI4 - CR4 = TI12*TR5-TI11*TR4 - CI4 = TI12*TI5-TI11*TI4 - DR3 = CR3-CI4 - DR4 = CR3+CI4 - DI3 = CI3+CR4 - DI4 = CI3-CR4 - DR5 = CR2+CI5 - DR2 = CR2-CI5 - DI5 = CI2-CR5 - DI2 = CI2+CR5 - CH(I-1,K,2) = WA1(I-2)*DR2-WA1(I-1)*DI2 - CH(I,K,2) = WA1(I-2)*DI2+WA1(I-1)*DR2 - CH(I-1,K,3) = WA2(I-2)*DR3-WA2(I-1)*DI3 - CH(I,K,3) = WA2(I-2)*DI3+WA2(I-1)*DR3 - CH(I-1,K,4) = WA3(I-2)*DR4-WA3(I-1)*DI4 - CH(I,K,4) = WA3(I-2)*DI4+WA3(I-1)*DR4 - CH(I-1,K,5) = WA4(I-2)*DR5-WA4(I-1)*DI5 - CH(I,K,5) = WA4(I-2)*DI5+WA4(I-1)*DR5 - 102 CONTINUE - 103 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftf.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftf.f deleted file mode 100644 index 9bd758eccd..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftf.f +++ /dev/null @@ -1,7 +0,0 @@ - SUBROUTINE DFFTF (N,R,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION R(1) ,WSAVE(1) - IF (N .EQ. 1) RETURN - CALL DFFTF1 (N,R,WSAVE,WSAVE(N+1),WSAVE(2*N+1)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftf1.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftf1.f deleted file mode 100644 index e40e2594e0..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dfftf1.f +++ /dev/null @@ -1,418 +0,0 @@ - SUBROUTINE DFFTF1 (N,C,CH,WA,IFAC) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(*) ,C(*) ,WA(*) ,IFAC(*) - NF = IFAC(2) - NA = 1 - L2 = N - IW = N - DO 111 K1=1,NF - KH = NF-K1 - IP = IFAC(KH+3) - L1 = L2/IP - IDO = N/L2 - IDL1 = IDO*L1 - IW = IW-(IP-1)*IDO - NA = 1-NA - IF (IP .NE. 4) GO TO 102 - IX2 = IW+IDO - IX3 = IX2+IDO - IF (NA .NE. 0) GO TO 101 - CALL DADF4 (IDO,L1,C,CH,WA(IW),WA(IX2),WA(IX3)) - GO TO 110 - 101 CALL DADF4 (IDO,L1,CH,C,WA(IW),WA(IX2),WA(IX3)) - GO TO 110 - 102 IF (IP .NE. 2) GO TO 104 - IF (NA .NE. 0) GO TO 103 - CALL DADF2 (IDO,L1,C,CH,WA(IW)) - GO TO 110 - 103 CALL DADF2 (IDO,L1,CH,C,WA(IW)) - GO TO 110 - 104 IF (IP .NE. 3) GO TO 106 - IX2 = IW+IDO - IF (NA .NE. 0) GO TO 105 - CALL DADF3 (IDO,L1,C,CH,WA(IW),WA(IX2)) - GO TO 110 - 105 CALL DADF3 (IDO,L1,CH,C,WA(IW),WA(IX2)) - GO TO 110 - 106 IF (IP .NE. 5) GO TO 108 - IX2 = IW+IDO - IX3 = IX2+IDO - IX4 = IX3+IDO - IF (NA .NE. 0) GO TO 107 - CALL DADF5 (IDO,L1,C,CH,WA(IW),WA(IX2),WA(IX3),WA(IX4)) - GO TO 110 - 107 CALL DADF5 (IDO,L1,CH,C,WA(IW),WA(IX2),WA(IX3),WA(IX4)) - GO TO 110 - 108 IF (IDO .EQ. 1) NA = 1-NA - IF (NA .NE. 0) GO TO 109 - CALL DADFG (IDO,IP,L1,IDL1,C,C,C,CH,CH,WA(IW)) - NA = 1 - GO TO 110 - 109 CALL DADFG (IDO,IP,L1,IDL1,CH,CH,CH,C,C,WA(IW)) - NA = 0 - 110 L2 = L1 - 111 CONTINUE - IF (NA .EQ. 1) RETURN - DO 112 I=1,N - C(I) = CH(I) - 112 CONTINUE - RETURN - END - - SUBROUTINE DADFG (IDO,IP,L1,IDL1,CC,C1,C2,CH,CH2,WA) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(IDO,L1,IP) ,CC(IDO,IP,L1) , - 1 C1(IDO,L1,IP) ,C2(IDL1,IP), - 2 CH2(IDL1,IP) ,WA(1) - DATA TPI/6.28318530717958647692D0/ - ARG = TPI/FLOAT(IP) - DCP = COS(ARG) - DSP = SIN(ARG) - IPPH = (IP+1)/2 - IPP2 = IP+2 - IDP2 = IDO+2 - NBD = (IDO-1)/2 - IF (IDO .EQ. 1) GO TO 119 - DO 101 IK=1,IDL1 - CH2(IK,1) = C2(IK,1) - 101 CONTINUE - DO 103 J=2,IP - DO 102 K=1,L1 - CH(1,K,J) = C1(1,K,J) - 102 CONTINUE - 103 CONTINUE - IF (NBD .GT. L1) GO TO 107 - IS = -IDO - DO 106 J=2,IP - IS = IS+IDO - IDIJ = IS - DO 105 I=3,IDO,2 - IDIJ = IDIJ+2 - DO 104 K=1,L1 - CH(I-1,K,J) = WA(IDIJ-1)*C1(I-1,K,J)+WA(IDIJ)*C1(I,K,J) - CH(I,K,J) = WA(IDIJ-1)*C1(I,K,J)-WA(IDIJ)*C1(I-1,K,J) - 104 CONTINUE - 105 CONTINUE - 106 CONTINUE - GO TO 111 - 107 IS = -IDO - DO 110 J=2,IP - IS = IS+IDO - DO 109 K=1,L1 - IDIJ = IS - DO 108 I=3,IDO,2 - IDIJ = IDIJ+2 - CH(I-1,K,J) = WA(IDIJ-1)*C1(I-1,K,J)+WA(IDIJ)*C1(I,K,J) - CH(I,K,J) = WA(IDIJ-1)*C1(I,K,J)-WA(IDIJ)*C1(I-1,K,J) - 108 CONTINUE - 109 CONTINUE - 110 CONTINUE - 111 IF (NBD .LT. L1) GO TO 115 - DO 114 J=2,IPPH - JC = IPP2-J - DO 113 K=1,L1 - DO 112 I=3,IDO,2 - C1(I-1,K,J) = CH(I-1,K,J)+CH(I-1,K,JC) - C1(I-1,K,JC) = CH(I,K,J)-CH(I,K,JC) - C1(I,K,J) = CH(I,K,J)+CH(I,K,JC) - C1(I,K,JC) = CH(I-1,K,JC)-CH(I-1,K,J) - 112 CONTINUE - 113 CONTINUE - 114 CONTINUE - GO TO 121 - 115 DO 118 J=2,IPPH - JC = IPP2-J - DO 117 I=3,IDO,2 - DO 116 K=1,L1 - C1(I-1,K,J) = CH(I-1,K,J)+CH(I-1,K,JC) - C1(I-1,K,JC) = CH(I,K,J)-CH(I,K,JC) - C1(I,K,J) = CH(I,K,J)+CH(I,K,JC) - C1(I,K,JC) = CH(I-1,K,JC)-CH(I-1,K,J) - 116 CONTINUE - 117 CONTINUE - 118 CONTINUE - GO TO 121 - 119 DO 120 IK=1,IDL1 - C2(IK,1) = CH2(IK,1) - 120 CONTINUE - 121 DO 123 J=2,IPPH - JC = IPP2-J - DO 122 K=1,L1 - C1(1,K,J) = CH(1,K,J)+CH(1,K,JC) - C1(1,K,JC) = CH(1,K,JC)-CH(1,K,J) - 122 CONTINUE - 123 CONTINUE -C - AR1 = 1.0D0 - AI1 = 0.0D0 - DO 127 L=2,IPPH - LC = IPP2-L - AR1H = DCP*AR1-DSP*AI1 - AI1 = DCP*AI1+DSP*AR1 - AR1 = AR1H - DO 124 IK=1,IDL1 - CH2(IK,L) = C2(IK,1)+AR1*C2(IK,2) - CH2(IK,LC) = AI1*C2(IK,IP) - 124 CONTINUE - DC2 = AR1 - DS2 = AI1 - AR2 = AR1 - AI2 = AI1 - DO 126 J=3,IPPH - JC = IPP2-J - AR2H = DC2*AR2-DS2*AI2 - AI2 = DC2*AI2+DS2*AR2 - AR2 = AR2H - DO 125 IK=1,IDL1 - CH2(IK,L) = CH2(IK,L)+AR2*C2(IK,J) - CH2(IK,LC) = CH2(IK,LC)+AI2*C2(IK,JC) - 125 CONTINUE - 126 CONTINUE - 127 CONTINUE - DO 129 J=2,IPPH - DO 128 IK=1,IDL1 - CH2(IK,1) = CH2(IK,1)+C2(IK,J) - 128 CONTINUE - 129 CONTINUE -C - IF (IDO .LT. L1) GO TO 132 - DO 131 K=1,L1 - DO 130 I=1,IDO - CC(I,1,K) = CH(I,K,1) - 130 CONTINUE - 131 CONTINUE - GO TO 135 - 132 DO 134 I=1,IDO - DO 133 K=1,L1 - CC(I,1,K) = CH(I,K,1) - 133 CONTINUE - 134 CONTINUE - 135 DO 137 J=2,IPPH - JC = IPP2-J - J2 = J+J - DO 136 K=1,L1 - CC(IDO,J2-2,K) = CH(1,K,J) - CC(1,J2-1,K) = CH(1,K,JC) - 136 CONTINUE - 137 CONTINUE - IF (IDO .EQ. 1) RETURN - IF (NBD .LT. L1) GO TO 141 - DO 140 J=2,IPPH - JC = IPP2-J - J2 = J+J - DO 139 K=1,L1 - DO 138 I=3,IDO,2 - IC = IDP2-I - CC(I-1,J2-1,K) = CH(I-1,K,J)+CH(I-1,K,JC) - CC(IC-1,J2-2,K) = CH(I-1,K,J)-CH(I-1,K,JC) - CC(I,J2-1,K) = CH(I,K,J)+CH(I,K,JC) - CC(IC,J2-2,K) = CH(I,K,JC)-CH(I,K,J) - 138 CONTINUE - 139 CONTINUE - 140 CONTINUE - RETURN - 141 DO 144 J=2,IPPH - JC = IPP2-J - J2 = J+J - DO 143 I=3,IDO,2 - IC = IDP2-I - DO 142 K=1,L1 - CC(I-1,J2-1,K) = CH(I-1,K,J)+CH(I-1,K,JC) - CC(IC-1,J2-2,K) = CH(I-1,K,J)-CH(I-1,K,JC) - CC(I,J2-1,K) = CH(I,K,J)+CH(I,K,JC) - CC(IC,J2-2,K) = CH(I,K,JC)-CH(I,K,J) - 142 CONTINUE - 143 CONTINUE - 144 CONTINUE - RETURN - END - - - SUBROUTINE DADF2 (IDO,L1,CC,CH,WA1) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(IDO,2,L1) ,CC(IDO,L1,2) , - 1 WA1(1) - DO 101 K=1,L1 - CH(1,1,K) = CC(1,K,1)+CC(1,K,2) - CH(IDO,2,K) = CC(1,K,1)-CC(1,K,2) - 101 CONTINUE - IF (IDO.lt.2) GO TO 107 - IF (IDO.eq.2) GO TO 105 - GO TO 102 - 102 IDP2 = IDO+2 - DO 104 K=1,L1 - DO 103 I=3,IDO,2 - IC = IDP2-I - TR2 = WA1(I-2)*CC(I-1,K,2)+WA1(I-1)*CC(I,K,2) - TI2 = WA1(I-2)*CC(I,K,2)-WA1(I-1)*CC(I-1,K,2) - CH(I,1,K) = CC(I,K,1)+TI2 - CH(IC,2,K) = TI2-CC(I,K,1) - CH(I-1,1,K) = CC(I-1,K,1)+TR2 - CH(IC-1,2,K) = CC(I-1,K,1)-TR2 - 103 CONTINUE - 104 CONTINUE - IF (MOD(IDO,2) .EQ. 1) RETURN - 105 DO 106 K=1,L1 - CH(1,2,K) = -CC(IDO,K,2) - CH(IDO,1,K) = CC(IDO,K,1) - 106 CONTINUE - 107 RETURN - END - - SUBROUTINE DADF3 (IDO,L1,CC,CH,WA1,WA2) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(IDO,3,L1) ,CC(IDO,L1,3) , - 1 WA1(1) ,WA2(1) -C *** TAUI IS -SQRT(3)/2 *** - DATA TAUR,TAUI /-0.5D0,0.86602540378443864676D0/ - DO 101 K=1,L1 - CR2 = CC(1,K,2)+CC(1,K,3) - CH(1,1,K) = CC(1,K,1)+CR2 - CH(1,3,K) = TAUI*(CC(1,K,3)-CC(1,K,2)) - CH(IDO,2,K) = CC(1,K,1)+TAUR*CR2 - 101 CONTINUE - IF (IDO .EQ. 1) RETURN - IDP2 = IDO+2 - DO 103 K=1,L1 - DO 102 I=3,IDO,2 - IC = IDP2-I - DR2 = WA1(I-2)*CC(I-1,K,2)+WA1(I-1)*CC(I,K,2) - DI2 = WA1(I-2)*CC(I,K,2)-WA1(I-1)*CC(I-1,K,2) - DR3 = WA2(I-2)*CC(I-1,K,3)+WA2(I-1)*CC(I,K,3) - DI3 = WA2(I-2)*CC(I,K,3)-WA2(I-1)*CC(I-1,K,3) - CR2 = DR2+DR3 - CI2 = DI2+DI3 - CH(I-1,1,K) = CC(I-1,K,1)+CR2 - CH(I,1,K) = CC(I,K,1)+CI2 - TR2 = CC(I-1,K,1)+TAUR*CR2 - TI2 = CC(I,K,1)+TAUR*CI2 - TR3 = TAUI*(DI2-DI3) - TI3 = TAUI*(DR3-DR2) - CH(I-1,3,K) = TR2+TR3 - CH(IC-1,2,K) = TR2-TR3 - CH(I,3,K) = TI2+TI3 - CH(IC,2,K) = TI3-TI2 - 102 CONTINUE - 103 CONTINUE - RETURN - END - - SUBROUTINE DADF4 (IDO,L1,CC,CH,WA1,WA2,WA3) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,L1,4) ,CH(IDO,4,L1) , - 1 WA1(1) ,WA2(1) ,WA3(1) - DATA HSQT2 /0.70710678118654752440D0/ - DO 101 K=1,L1 - TR1 = CC(1,K,2)+CC(1,K,4) - TR2 = CC(1,K,1)+CC(1,K,3) - CH(1,1,K) = TR1+TR2 - CH(IDO,4,K) = TR2-TR1 - CH(IDO,2,K) = CC(1,K,1)-CC(1,K,3) - CH(1,3,K) = CC(1,K,4)-CC(1,K,2) - 101 CONTINUE - IF (IDO.lt.2) GO TO 107 - IF (IDO.eq.2) GO TO 105 - GO TO 102 - 102 IDP2 = IDO+2 - DO 104 K=1,L1 - DO 103 I=3,IDO,2 - IC = IDP2-I - CR2 = WA1(I-2)*CC(I-1,K,2)+WA1(I-1)*CC(I,K,2) - CI2 = WA1(I-2)*CC(I,K,2)-WA1(I-1)*CC(I-1,K,2) - CR3 = WA2(I-2)*CC(I-1,K,3)+WA2(I-1)*CC(I,K,3) - CI3 = WA2(I-2)*CC(I,K,3)-WA2(I-1)*CC(I-1,K,3) - CR4 = WA3(I-2)*CC(I-1,K,4)+WA3(I-1)*CC(I,K,4) - CI4 = WA3(I-2)*CC(I,K,4)-WA3(I-1)*CC(I-1,K,4) - TR1 = CR2+CR4 - TR4 = CR4-CR2 - TI1 = CI2+CI4 - TI4 = CI2-CI4 - TI2 = CC(I,K,1)+CI3 - TI3 = CC(I,K,1)-CI3 - TR2 = CC(I-1,K,1)+CR3 - TR3 = CC(I-1,K,1)-CR3 - CH(I-1,1,K) = TR1+TR2 - CH(IC-1,4,K) = TR2-TR1 - CH(I,1,K) = TI1+TI2 - CH(IC,4,K) = TI1-TI2 - CH(I-1,3,K) = TI4+TR3 - CH(IC-1,2,K) = TR3-TI4 - CH(I,3,K) = TR4+TI3 - CH(IC,2,K) = TR4-TI3 - 103 CONTINUE - 104 CONTINUE - IF (MOD(IDO,2) .EQ. 1) RETURN - 105 CONTINUE - DO 106 K=1,L1 - TI1 = -HSQT2*(CC(IDO,K,2)+CC(IDO,K,4)) - TR1 = HSQT2*(CC(IDO,K,2)-CC(IDO,K,4)) - CH(IDO,1,K) = TR1+CC(IDO,K,1) - CH(IDO,3,K) = CC(IDO,K,1)-TR1 - CH(1,2,K) = TI1-CC(IDO,K,3) - CH(1,4,K) = TI1+CC(IDO,K,3) - 106 CONTINUE - 107 RETURN - END - - - SUBROUTINE DADF5 (IDO,L1,CC,CH,WA1,WA2,WA3,WA4) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,L1,5) ,CH(IDO,5,L1) , - 1 WA1(1) ,WA2(1) ,WA3(1) ,WA4(1) - DATA TR11,TI11,TR12,TI12 /0.3090169943749474241D0, - + 0.95105651629515357212D0, - 1 -0.8090169943749474241D0, 0.58778525229247312917D0/ - DO 101 K=1,L1 - CR2 = CC(1,K,5)+CC(1,K,2) - CI5 = CC(1,K,5)-CC(1,K,2) - CR3 = CC(1,K,4)+CC(1,K,3) - CI4 = CC(1,K,4)-CC(1,K,3) - CH(1,1,K) = CC(1,K,1)+CR2+CR3 - CH(IDO,2,K) = CC(1,K,1)+TR11*CR2+TR12*CR3 - CH(1,3,K) = TI11*CI5+TI12*CI4 - CH(IDO,4,K) = CC(1,K,1)+TR12*CR2+TR11*CR3 - CH(1,5,K) = TI12*CI5-TI11*CI4 - 101 CONTINUE - IF (IDO .EQ. 1) RETURN - IDP2 = IDO+2 - DO 103 K=1,L1 - DO 102 I=3,IDO,2 - IC = IDP2-I - DR2 = WA1(I-2)*CC(I-1,K,2)+WA1(I-1)*CC(I,K,2) - DI2 = WA1(I-2)*CC(I,K,2)-WA1(I-1)*CC(I-1,K,2) - DR3 = WA2(I-2)*CC(I-1,K,3)+WA2(I-1)*CC(I,K,3) - DI3 = WA2(I-2)*CC(I,K,3)-WA2(I-1)*CC(I-1,K,3) - DR4 = WA3(I-2)*CC(I-1,K,4)+WA3(I-1)*CC(I,K,4) - DI4 = WA3(I-2)*CC(I,K,4)-WA3(I-1)*CC(I-1,K,4) - DR5 = WA4(I-2)*CC(I-1,K,5)+WA4(I-1)*CC(I,K,5) - DI5 = WA4(I-2)*CC(I,K,5)-WA4(I-1)*CC(I-1,K,5) - CR2 = DR2+DR5 - CI5 = DR5-DR2 - CR5 = DI2-DI5 - CI2 = DI2+DI5 - CR3 = DR3+DR4 - CI4 = DR4-DR3 - CR4 = DI3-DI4 - CI3 = DI3+DI4 - CH(I-1,1,K) = CC(I-1,K,1)+CR2+CR3 - CH(I,1,K) = CC(I,K,1)+CI2+CI3 - TR2 = CC(I-1,K,1)+TR11*CR2+TR12*CR3 - TI2 = CC(I,K,1)+TR11*CI2+TR12*CI3 - TR3 = CC(I-1,K,1)+TR12*CR2+TR11*CR3 - TI3 = CC(I,K,1)+TR12*CI2+TR11*CI3 - TR5 = TI11*CR5+TI12*CR4 - TI5 = TI11*CI5+TI12*CI4 - TR4 = TI12*CR5-TI11*CR4 - TI4 = TI12*CI5-TI11*CI4 - CH(I-1,3,K) = TR2+TR5 - CH(IC-1,2,K) = TR2-TR5 - CH(I,3,K) = TI2+TI5 - CH(IC,2,K) = TI5-TI2 - CH(I-1,5,K) = TR3+TR4 - CH(IC-1,4,K) = TR3-TR4 - CH(I,5,K) = TI3+TI4 - CH(IC,4,K) = TI4-TI3 - 102 CONTINUE - 103 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dffti.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dffti.f deleted file mode 100644 index 1b53f42261..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dffti.f +++ /dev/null @@ -1,8 +0,0 @@ - SUBROUTINE DFFTI (N,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WSAVE(1) - IF (N .EQ. 1) RETURN - CALL DFFTI1 (N,WSAVE(N+1),WSAVE(2*N+1)) - RETURN - END - diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dffti1.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dffti1.f deleted file mode 100644 index 16eff1c927..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dffti1.f +++ /dev/null @@ -1,60 +0,0 @@ - SUBROUTINE DFFTI1 (N,WA,IFAC) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WA(*) ,IFAC(*) ,NTRYH(4) - DATA NTRYH(1),NTRYH(2),NTRYH(3),NTRYH(4)/4,2,3,5/ - NL = N - NF = 0 - J = 0 - 101 J = J+1 - IF (J.le.4) GO TO 102 - GO TO 103 - 102 NTRY = NTRYH(J) - GO TO 104 - 103 NTRY = NTRY+2 - 104 NQ = NL/NTRY - NR = NL-NTRY*NQ - IF (NR.eq.0) GO TO 105 - GO TO 101 - 105 NF = NF+1 - IFAC(NF+2) = NTRY - NL = NQ - IF (NTRY .NE. 2) GO TO 107 - IF (NF .EQ. 1) GO TO 107 - DO 106 I=2,NF - IB = NF-I+2 - IFAC(IB+2) = IFAC(IB+1) - 106 CONTINUE - IFAC(3) = 2 - 107 IF (NL .NE. 1) GO TO 104 - IFAC(1) = N - IFAC(2) = NF - TPI = 6.28318530717958647692D0 - ARGH = TPI/FLOAT(N) - IS = 0 - NFM1 = NF-1 - L1 = 1 - IF (NFM1 .EQ. 0) RETURN - DO 110 K1=1,NFM1 - IP = IFAC(K1+2) - LD = 0 - L2 = L1*IP - IDO = N/L2 - IPM = IP-1 - DO 109 J=1,IPM - LD = LD+L1 - I = IS - ARGLD = FLOAT(LD)*ARGH - FI = 0.0D0 - DO 108 II=3,IDO,2 - I = I+2 - FI = FI+1.0D0 - ARG = FI*ARGLD - WA(I-1) = COS(ARG) - WA(I) = SIN(ARG) - 108 CONTINUE - IS = IS+IDO - 109 CONTINUE - L1 = L2 - 110 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqb.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqb.f deleted file mode 100644 index d8d78354c8..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqb.f +++ /dev/null @@ -1,19 +0,0 @@ - SUBROUTINE DSINQB (N,X,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(1) ,WSAVE(1) - IF (N .GT. 1) GO TO 101 - X(1) = 4.0D0*X(1) - RETURN - 101 NS2 = N/2 - DO 102 K=2,N,2 - X(K) = -X(K) - 102 CONTINUE - CALL DCOSQB (N,X,WSAVE) - DO 103 K=1,NS2 - KC = N-K - XHOLD = X(K) - X(K) = X(KC+1) - X(KC+1) = XHOLD - 103 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqf.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqf.f deleted file mode 100644 index 1b257ea5b0..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqf.f +++ /dev/null @@ -1,17 +0,0 @@ - SUBROUTINE DSINQF (N,X,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(1) ,WSAVE(1) - IF (N .EQ. 1) RETURN - NS2 = N/2 - DO 101 K=1,NS2 - KC = N-K - XHOLD = X(K) - X(K) = X(KC+1) - X(KC+1) = XHOLD - 101 CONTINUE - CALL DCOSQF (N,X,WSAVE) - DO 102 K=2,N,2 - X(K) = -X(K) - 102 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqi.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqi.f deleted file mode 100644 index c4897c2b40..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinqi.f +++ /dev/null @@ -1,6 +0,0 @@ - SUBROUTINE DSINQI (N,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WSAVE(1) - CALL DCOSQI (N,WSAVE) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsint.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsint.f deleted file mode 100644 index e9942fb212..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsint.f +++ /dev/null @@ -1,10 +0,0 @@ - SUBROUTINE DSINT (N,X,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(1) ,WSAVE(1) - NP1 = N+1 - IW1 = N/2+1 - IW2 = IW1+NP1 - IW3 = IW2+NP1 - CALL DSINT1(N,X,WSAVE,WSAVE(IW1),WSAVE(IW2),WSAVE(IW3)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsint1.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsint1.f deleted file mode 100644 index c8453a78d9..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsint1.f +++ /dev/null @@ -1,43 +0,0 @@ - SUBROUTINE DSINT1(N,WAR,WAS,XH,X,IFAC) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WAR(*),WAS(*),X(*),XH(*),IFAC(*) - DATA SQRT3 /1.73205080756887729352D0/ - DO 100 I=1,N - XH(I) = WAR(I) - WAR(I) = X(I) - 100 CONTINUE - IF (N.lt.2) GO TO 101 - IF (N.eq.2) GO TO 102 - GO TO 103 - 101 XH(1) = XH(1)+XH(1) - GO TO 106 - 102 XHOLD = SQRT3*(XH(1)+XH(2)) - XH(2) = SQRT3*(XH(1)-XH(2)) - XH(1) = XHOLD - GO TO 106 - 103 NP1 = N+1 - NS2 = N/2 - X(1) = 0.0D0 - DO 104 K=1,NS2 - KC = NP1-K - T1 = XH(K)-XH(KC) - T2 = WAS(K)*(XH(K)+XH(KC)) - X(K+1) = T1+T2 - X(KC+1) = T2-T1 - 104 CONTINUE - MODN = MOD(N,2) - IF (MODN .NE. 0) X(NS2+2) = 4.0D0*XH(NS2+1) - CALL DFFTF1 (NP1,X,XH,WAR,IFAC) - XH(1) = 0.5D0*X(1) - DO 105 I=3,N,2 - XH(I-1) = -X(I) - XH(I) = XH(I-2)+X(I-1) - 105 CONTINUE - IF (MODN .NE. 0) GO TO 106 - XH(N) = -X(N+1) - 106 DO 107 I=1,N - X(I) = WAR(I) - WAR(I) = XH(I) - 107 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinti.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinti.f deleted file mode 100644 index e655c0a38c..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/dsinti.f +++ /dev/null @@ -1,14 +0,0 @@ - SUBROUTINE DSINTI (N,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WSAVE(1) - DATA PI /3.14159265358979323846D0/ - IF (N .LE. 1) RETURN - NS2 = N/2 - NP1 = N+1 - DT = PI/FLOAT(NP1) - DO 101 K=1,NS2 - WSAVE(K) = 2.0D0*SIN(K*DT) - 101 CONTINUE - CALL DFFTI (NP1,WSAVE(NS2+1)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftb.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftb.f deleted file mode 100644 index 7eb2a7cd98..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftb.f +++ /dev/null @@ -1,9 +0,0 @@ - SUBROUTINE ZFFTB (N,C,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION C(1) ,WSAVE(1) - IF (N .EQ. 1) RETURN - IW1 = N+N+1 - IW2 = IW1+N+N - CALL ZFFTB1 (N,C,WSAVE,WSAVE(IW1),WSAVE(IW2)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftb1.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftb1.f deleted file mode 100644 index 19fa10ac78..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftb1.f +++ /dev/null @@ -1,384 +0,0 @@ - SUBROUTINE ZFFTB1 (N,C,CH,WA,IFAC) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(*) ,C(*) ,WA(*) ,IFAC(*) - NF = IFAC(2) - NA = 0 - L1 = 1 - IW = 1 - DO 116 K1=1,NF - IP = IFAC(K1+2) - L2 = IP*L1 - IDO = N/L2 - IDOT = IDO+IDO - IDL1 = IDOT*L1 - IF (IP .NE. 4) GO TO 103 - IX2 = IW+IDOT - IX3 = IX2+IDOT - IF (NA .NE. 0) GO TO 101 - CALL DPASSB4 (IDOT,L1,C,CH,WA(IW),WA(IX2),WA(IX3)) - GO TO 102 - 101 CALL DPASSB4 (IDOT,L1,CH,C,WA(IW),WA(IX2),WA(IX3)) - 102 NA = 1-NA - GO TO 115 - 103 IF (IP .NE. 2) GO TO 106 - IF (NA .NE. 0) GO TO 104 - CALL DPASSB2 (IDOT,L1,C,CH,WA(IW)) - GO TO 105 - 104 CALL DPASSB2 (IDOT,L1,CH,C,WA(IW)) - 105 NA = 1-NA - GO TO 115 - 106 IF (IP .NE. 3) GO TO 109 - IX2 = IW+IDOT - IF (NA .NE. 0) GO TO 107 - CALL DPASSB3 (IDOT,L1,C,CH,WA(IW),WA(IX2)) - GO TO 108 - 107 CALL DPASSB3 (IDOT,L1,CH,C,WA(IW),WA(IX2)) - 108 NA = 1-NA - GO TO 115 - 109 IF (IP .NE. 5) GO TO 112 - IX2 = IW+IDOT - IX3 = IX2+IDOT - IX4 = IX3+IDOT - IF (NA .NE. 0) GO TO 110 - CALL DPASSB5 (IDOT,L1,C,CH,WA(IW),WA(IX2),WA(IX3),WA(IX4)) - GO TO 111 - 110 CALL DPASSB5 (IDOT,L1,CH,C,WA(IW),WA(IX2),WA(IX3),WA(IX4)) - 111 NA = 1-NA - GO TO 115 - 112 IF (NA .NE. 0) GO TO 113 - CALL DPASSB (NAC,IDOT,IP,L1,IDL1,C,C,C,CH,CH,WA(IW)) - GO TO 114 - 113 CALL DPASSB (NAC,IDOT,IP,L1,IDL1,CH,CH,CH,C,C,WA(IW)) - 114 IF (NAC .NE. 0) NA = 1-NA - 115 L1 = L2 - IW = IW+(IP-1)*IDOT - 116 CONTINUE - IF (NA .EQ. 0) RETURN - N2 = N+N - DO 117 I=1,N2 - C(I) = CH(I) - 117 CONTINUE - RETURN - END - - SUBROUTINE DPASSB (NAC,IDO,IP,L1,IDL1,CC,C1,C2,CH,CH2,WA) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(IDO,L1,IP) ,CC(IDO,IP,L1) , - 1 C1(IDO,L1,IP) ,WA(1) ,C2(IDL1,IP), - 2 CH2(IDL1,IP) - IDOT = IDO/2 - NT = IP*IDL1 - IPP2 = IP+2 - IPPH = (IP+1)/2 - IDP = IP*IDO -C - IF (IDO .LT. L1) GO TO 106 - DO 103 J=2,IPPH - JC = IPP2-J - DO 102 K=1,L1 - DO 101 I=1,IDO - CH(I,K,J) = CC(I,J,K)+CC(I,JC,K) - CH(I,K,JC) = CC(I,J,K)-CC(I,JC,K) - 101 CONTINUE - 102 CONTINUE - 103 CONTINUE - DO 105 K=1,L1 - DO 104 I=1,IDO - CH(I,K,1) = CC(I,1,K) - 104 CONTINUE - 105 CONTINUE - GO TO 112 - 106 DO 109 J=2,IPPH - JC = IPP2-J - DO 108 I=1,IDO - DO 107 K=1,L1 - CH(I,K,J) = CC(I,J,K)+CC(I,JC,K) - CH(I,K,JC) = CC(I,J,K)-CC(I,JC,K) - 107 CONTINUE - 108 CONTINUE - 109 CONTINUE - DO 111 I=1,IDO - DO 110 K=1,L1 - CH(I,K,1) = CC(I,1,K) - 110 CONTINUE - 111 CONTINUE - 112 IDL = 2-IDO - INC = 0 - DO 116 L=2,IPPH - LC = IPP2-L - IDL = IDL+IDO - DO 113 IK=1,IDL1 - C2(IK,L) = CH2(IK,1)+WA(IDL-1)*CH2(IK,2) - C2(IK,LC) = WA(IDL)*CH2(IK,IP) - 113 CONTINUE - IDLJ = IDL - INC = INC+IDO - DO 115 J=3,IPPH - JC = IPP2-J - IDLJ = IDLJ+INC - IF (IDLJ .GT. IDP) IDLJ = IDLJ-IDP - WAR = WA(IDLJ-1) - WAI = WA(IDLJ) - DO 114 IK=1,IDL1 - C2(IK,L) = C2(IK,L)+WAR*CH2(IK,J) - C2(IK,LC) = C2(IK,LC)+WAI*CH2(IK,JC) - 114 CONTINUE - 115 CONTINUE - 116 CONTINUE - DO 118 J=2,IPPH - DO 117 IK=1,IDL1 - CH2(IK,1) = CH2(IK,1)+CH2(IK,J) - 117 CONTINUE - 118 CONTINUE - DO 120 J=2,IPPH - JC = IPP2-J - DO 119 IK=2,IDL1,2 - CH2(IK-1,J) = C2(IK-1,J)-C2(IK,JC) - CH2(IK-1,JC) = C2(IK-1,J)+C2(IK,JC) - CH2(IK,J) = C2(IK,J)+C2(IK-1,JC) - CH2(IK,JC) = C2(IK,J)-C2(IK-1,JC) - 119 CONTINUE - 120 CONTINUE - NAC = 1 - IF (IDO .EQ. 2) RETURN - NAC = 0 - DO 121 IK=1,IDL1 - C2(IK,1) = CH2(IK,1) - 121 CONTINUE - DO 123 J=2,IP - DO 122 K=1,L1 - C1(1,K,J) = CH(1,K,J) - C1(2,K,J) = CH(2,K,J) - 122 CONTINUE - 123 CONTINUE - IF (IDOT .GT. L1) GO TO 127 - IDIJ = 0 - DO 126 J=2,IP - IDIJ = IDIJ+2 - DO 125 I=4,IDO,2 - IDIJ = IDIJ+2 - DO 124 K=1,L1 - C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)-WA(IDIJ)*CH(I,K,J) - C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)+WA(IDIJ)*CH(I-1,K,J) - 124 CONTINUE - 125 CONTINUE - 126 CONTINUE - RETURN - 127 IDJ = 2-IDO - DO 130 J=2,IP - IDJ = IDJ+IDO - DO 129 K=1,L1 - IDIJ = IDJ - DO 128 I=4,IDO,2 - IDIJ = IDIJ+2 - C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)-WA(IDIJ)*CH(I,K,J) - C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)+WA(IDIJ)*CH(I-1,K,J) - 128 CONTINUE - 129 CONTINUE - 130 CONTINUE - RETURN - END - - SUBROUTINE DPASSB2 (IDO,L1,CC,CH,WA1) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,2,L1) ,CH(IDO,L1,2) , - 1 WA1(1) - IF (IDO .GT. 2) GO TO 102 - DO 101 K=1,L1 - CH(1,K,1) = CC(1,1,K)+CC(1,2,K) - CH(1,K,2) = CC(1,1,K)-CC(1,2,K) - CH(2,K,1) = CC(2,1,K)+CC(2,2,K) - CH(2,K,2) = CC(2,1,K)-CC(2,2,K) - 101 CONTINUE - RETURN - 102 DO 104 K=1,L1 - DO 103 I=2,IDO,2 - CH(I-1,K,1) = CC(I-1,1,K)+CC(I-1,2,K) - TR2 = CC(I-1,1,K)-CC(I-1,2,K) - CH(I,K,1) = CC(I,1,K)+CC(I,2,K) - TI2 = CC(I,1,K)-CC(I,2,K) - CH(I,K,2) = WA1(I-1)*TI2+WA1(I)*TR2 - CH(I-1,K,2) = WA1(I-1)*TR2-WA1(I)*TI2 - 103 CONTINUE - 104 CONTINUE - RETURN - END - - SUBROUTINE DPASSB3 (IDO,L1,CC,CH,WA1,WA2) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,3,L1) ,CH(IDO,L1,3) , - 1 WA1(1) ,WA2(1) -C *** TAUI IS SQRT(3)/2 *** - DATA TAUR,TAUI /-0.5D0,0.86602540378443864676D0/ - IF (IDO .NE. 2) GO TO 102 - DO 101 K=1,L1 - TR2 = CC(1,2,K)+CC(1,3,K) - CR2 = CC(1,1,K)+TAUR*TR2 - CH(1,K,1) = CC(1,1,K)+TR2 - TI2 = CC(2,2,K)+CC(2,3,K) - CI2 = CC(2,1,K)+TAUR*TI2 - CH(2,K,1) = CC(2,1,K)+TI2 - CR3 = TAUI*(CC(1,2,K)-CC(1,3,K)) - CI3 = TAUI*(CC(2,2,K)-CC(2,3,K)) - CH(1,K,2) = CR2-CI3 - CH(1,K,3) = CR2+CI3 - CH(2,K,2) = CI2+CR3 - CH(2,K,3) = CI2-CR3 - 101 CONTINUE - RETURN - 102 DO 104 K=1,L1 - DO 103 I=2,IDO,2 - TR2 = CC(I-1,2,K)+CC(I-1,3,K) - CR2 = CC(I-1,1,K)+TAUR*TR2 - CH(I-1,K,1) = CC(I-1,1,K)+TR2 - TI2 = CC(I,2,K)+CC(I,3,K) - CI2 = CC(I,1,K)+TAUR*TI2 - CH(I,K,1) = CC(I,1,K)+TI2 - CR3 = TAUI*(CC(I-1,2,K)-CC(I-1,3,K)) - CI3 = TAUI*(CC(I,2,K)-CC(I,3,K)) - DR2 = CR2-CI3 - DR3 = CR2+CI3 - DI2 = CI2+CR3 - DI3 = CI2-CR3 - CH(I,K,2) = WA1(I-1)*DI2+WA1(I)*DR2 - CH(I-1,K,2) = WA1(I-1)*DR2-WA1(I)*DI2 - CH(I,K,3) = WA2(I-1)*DI3+WA2(I)*DR3 - CH(I-1,K,3) = WA2(I-1)*DR3-WA2(I)*DI3 - 103 CONTINUE - 104 CONTINUE - RETURN - END - - - SUBROUTINE DPASSB4 (IDO,L1,CC,CH,WA1,WA2,WA3) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,4,L1) ,CH(IDO,L1,4) , - 1 WA1(1) ,WA2(1) ,WA3(1) - IF (IDO .NE. 2) GO TO 102 - DO 101 K=1,L1 - TI1 = CC(2,1,K)-CC(2,3,K) - TI2 = CC(2,1,K)+CC(2,3,K) - TR4 = CC(2,4,K)-CC(2,2,K) - TI3 = CC(2,2,K)+CC(2,4,K) - TR1 = CC(1,1,K)-CC(1,3,K) - TR2 = CC(1,1,K)+CC(1,3,K) - TI4 = CC(1,2,K)-CC(1,4,K) - TR3 = CC(1,2,K)+CC(1,4,K) - CH(1,K,1) = TR2+TR3 - CH(1,K,3) = TR2-TR3 - CH(2,K,1) = TI2+TI3 - CH(2,K,3) = TI2-TI3 - CH(1,K,2) = TR1+TR4 - CH(1,K,4) = TR1-TR4 - CH(2,K,2) = TI1+TI4 - CH(2,K,4) = TI1-TI4 - 101 CONTINUE - RETURN - 102 DO 104 K=1,L1 - DO 103 I=2,IDO,2 - TI1 = CC(I,1,K)-CC(I,3,K) - TI2 = CC(I,1,K)+CC(I,3,K) - TI3 = CC(I,2,K)+CC(I,4,K) - TR4 = CC(I,4,K)-CC(I,2,K) - TR1 = CC(I-1,1,K)-CC(I-1,3,K) - TR2 = CC(I-1,1,K)+CC(I-1,3,K) - TI4 = CC(I-1,2,K)-CC(I-1,4,K) - TR3 = CC(I-1,2,K)+CC(I-1,4,K) - CH(I-1,K,1) = TR2+TR3 - CR3 = TR2-TR3 - CH(I,K,1) = TI2+TI3 - CI3 = TI2-TI3 - CR2 = TR1+TR4 - CR4 = TR1-TR4 - CI2 = TI1+TI4 - CI4 = TI1-TI4 - CH(I-1,K,2) = WA1(I-1)*CR2-WA1(I)*CI2 - CH(I,K,2) = WA1(I-1)*CI2+WA1(I)*CR2 - CH(I-1,K,3) = WA2(I-1)*CR3-WA2(I)*CI3 - CH(I,K,3) = WA2(I-1)*CI3+WA2(I)*CR3 - CH(I-1,K,4) = WA3(I-1)*CR4-WA3(I)*CI4 - CH(I,K,4) = WA3(I-1)*CI4+WA3(I)*CR4 - 103 CONTINUE - 104 CONTINUE - RETURN - END - - SUBROUTINE DPASSB5 (IDO,L1,CC,CH,WA1,WA2,WA3,WA4) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,5,L1) ,CH(IDO,L1,5) , - 1 WA1(1) ,WA2(1) ,WA3(1) ,WA4(1) -C *** TR11=COS(2*PI/5), TI11=SIN(2*PI/5) -C *** TR12=COS(4*PI/5), TI12=SIN(4*PI/5) - DATA TR11,TI11,TR12,TI12 /0.3090169943749474241D0, - + 0.95105651629515357212D0, - + -0.8090169943749474241D0,0.58778525229247312917D0/ - IF (IDO .NE. 2) GO TO 102 - DO 101 K=1,L1 - TI5 = CC(2,2,K)-CC(2,5,K) - TI2 = CC(2,2,K)+CC(2,5,K) - TI4 = CC(2,3,K)-CC(2,4,K) - TI3 = CC(2,3,K)+CC(2,4,K) - TR5 = CC(1,2,K)-CC(1,5,K) - TR2 = CC(1,2,K)+CC(1,5,K) - TR4 = CC(1,3,K)-CC(1,4,K) - TR3 = CC(1,3,K)+CC(1,4,K) - CH(1,K,1) = CC(1,1,K)+TR2+TR3 - CH(2,K,1) = CC(2,1,K)+TI2+TI3 - CR2 = CC(1,1,K)+TR11*TR2+TR12*TR3 - CI2 = CC(2,1,K)+TR11*TI2+TR12*TI3 - CR3 = CC(1,1,K)+TR12*TR2+TR11*TR3 - CI3 = CC(2,1,K)+TR12*TI2+TR11*TI3 - CR5 = TI11*TR5+TI12*TR4 - CI5 = TI11*TI5+TI12*TI4 - CR4 = TI12*TR5-TI11*TR4 - CI4 = TI12*TI5-TI11*TI4 - CH(1,K,2) = CR2-CI5 - CH(1,K,5) = CR2+CI5 - CH(2,K,2) = CI2+CR5 - CH(2,K,3) = CI3+CR4 - CH(1,K,3) = CR3-CI4 - CH(1,K,4) = CR3+CI4 - CH(2,K,4) = CI3-CR4 - CH(2,K,5) = CI2-CR5 - 101 CONTINUE - RETURN - 102 DO 104 K=1,L1 - DO 103 I=2,IDO,2 - TI5 = CC(I,2,K)-CC(I,5,K) - TI2 = CC(I,2,K)+CC(I,5,K) - TI4 = CC(I,3,K)-CC(I,4,K) - TI3 = CC(I,3,K)+CC(I,4,K) - TR5 = CC(I-1,2,K)-CC(I-1,5,K) - TR2 = CC(I-1,2,K)+CC(I-1,5,K) - TR4 = CC(I-1,3,K)-CC(I-1,4,K) - TR3 = CC(I-1,3,K)+CC(I-1,4,K) - CH(I-1,K,1) = CC(I-1,1,K)+TR2+TR3 - CH(I,K,1) = CC(I,1,K)+TI2+TI3 - CR2 = CC(I-1,1,K)+TR11*TR2+TR12*TR3 - CI2 = CC(I,1,K)+TR11*TI2+TR12*TI3 - CR3 = CC(I-1,1,K)+TR12*TR2+TR11*TR3 - CI3 = CC(I,1,K)+TR12*TI2+TR11*TI3 - CR5 = TI11*TR5+TI12*TR4 - CI5 = TI11*TI5+TI12*TI4 - CR4 = TI12*TR5-TI11*TR4 - CI4 = TI12*TI5-TI11*TI4 - DR3 = CR3-CI4 - DR4 = CR3+CI4 - DI3 = CI3+CR4 - DI4 = CI3-CR4 - DR5 = CR2+CI5 - DR2 = CR2-CI5 - DI5 = CI2-CR5 - DI2 = CI2+CR5 - CH(I-1,K,2) = WA1(I-1)*DR2-WA1(I)*DI2 - CH(I,K,2) = WA1(I-1)*DI2+WA1(I)*DR2 - CH(I-1,K,3) = WA2(I-1)*DR3-WA2(I)*DI3 - CH(I,K,3) = WA2(I-1)*DI3+WA2(I)*DR3 - CH(I-1,K,4) = WA3(I-1)*DR4-WA3(I)*DI4 - CH(I,K,4) = WA3(I-1)*DI4+WA3(I)*DR4 - CH(I-1,K,5) = WA4(I-1)*DR5-WA4(I)*DI5 - CH(I,K,5) = WA4(I-1)*DI5+WA4(I)*DR5 - 103 CONTINUE - 104 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftf.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftf.f deleted file mode 100644 index 16dadc2bcf..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftf.f +++ /dev/null @@ -1,9 +0,0 @@ - SUBROUTINE ZFFTF (N,C,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION C(1) ,WSAVE(1) - IF (N .EQ. 1) RETURN - IW1 = N+N+1 - IW2 = IW1+N+N - CALL ZFFTF1 (N,C,WSAVE,WSAVE(IW1),WSAVE(IW2)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftf1.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftf1.f deleted file mode 100644 index 6a1dae915e..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zfftf1.f +++ /dev/null @@ -1,383 +0,0 @@ - SUBROUTINE ZFFTF1 (N,C,CH,WA,IFAC) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(*) ,C(*) ,WA(*) ,IFAC(*) - NF = IFAC(2) - NA = 0 - L1 = 1 - IW = 1 - DO 116 K1=1,NF - IP = IFAC(K1+2) - L2 = IP*L1 - IDO = N/L2 - IDOT = IDO+IDO - IDL1 = IDOT*L1 - IF (IP .NE. 4) GO TO 103 - IX2 = IW+IDOT - IX3 = IX2+IDOT - IF (NA .NE. 0) GO TO 101 - CALL DPASSF4 (IDOT,L1,C,CH,WA(IW),WA(IX2),WA(IX3)) - GO TO 102 - 101 CALL DPASSF4 (IDOT,L1,CH,C,WA(IW),WA(IX2),WA(IX3)) - 102 NA = 1-NA - GO TO 115 - 103 IF (IP .NE. 2) GO TO 106 - IF (NA .NE. 0) GO TO 104 - CALL DPASSF2 (IDOT,L1,C,CH,WA(IW)) - GO TO 105 - 104 CALL DPASSF2 (IDOT,L1,CH,C,WA(IW)) - 105 NA = 1-NA - GO TO 115 - 106 IF (IP .NE. 3) GO TO 109 - IX2 = IW+IDOT - IF (NA .NE. 0) GO TO 107 - CALL DPASSF3 (IDOT,L1,C,CH,WA(IW),WA(IX2)) - GO TO 108 - 107 CALL DPASSF3 (IDOT,L1,CH,C,WA(IW),WA(IX2)) - 108 NA = 1-NA - GO TO 115 - 109 IF (IP .NE. 5) GO TO 112 - IX2 = IW+IDOT - IX3 = IX2+IDOT - IX4 = IX3+IDOT - IF (NA .NE. 0) GO TO 110 - CALL DPASSF5 (IDOT,L1,C,CH,WA(IW),WA(IX2),WA(IX3),WA(IX4)) - GO TO 111 - 110 CALL DPASSF5 (IDOT,L1,CH,C,WA(IW),WA(IX2),WA(IX3),WA(IX4)) - 111 NA = 1-NA - GO TO 115 - 112 IF (NA .NE. 0) GO TO 113 - CALL DPASSF (NAC,IDOT,IP,L1,IDL1,C,C,C,CH,CH,WA(IW)) - GO TO 114 - 113 CALL DPASSF (NAC,IDOT,IP,L1,IDL1,CH,CH,CH,C,C,WA(IW)) - 114 IF (NAC .NE. 0) NA = 1-NA - 115 L1 = L2 - IW = IW+(IP-1)*IDOT - 116 CONTINUE - IF (NA .EQ. 0) RETURN - N2 = N+N - DO 117 I=1,N2 - C(I) = CH(I) - 117 CONTINUE - RETURN - END - - SUBROUTINE DPASSF (NAC,IDO,IP,L1,IDL1,CC,C1,C2,CH,CH2,WA) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CH(IDO,L1,IP) ,CC(IDO,IP,L1) , - 1 C1(IDO,L1,IP) ,WA(1) ,C2(IDL1,IP), - 2 CH2(IDL1,IP) - IDOT = IDO/2 - NT = IP*IDL1 - IPP2 = IP+2 - IPPH = (IP+1)/2 - IDP = IP*IDO -C - IF (IDO .LT. L1) GO TO 106 - DO 103 J=2,IPPH - JC = IPP2-J - DO 102 K=1,L1 - DO 101 I=1,IDO - CH(I,K,J) = CC(I,J,K)+CC(I,JC,K) - CH(I,K,JC) = CC(I,J,K)-CC(I,JC,K) - 101 CONTINUE - 102 CONTINUE - 103 CONTINUE - DO 105 K=1,L1 - DO 104 I=1,IDO - CH(I,K,1) = CC(I,1,K) - 104 CONTINUE - 105 CONTINUE - GO TO 112 - 106 DO 109 J=2,IPPH - JC = IPP2-J - DO 108 I=1,IDO - DO 107 K=1,L1 - CH(I,K,J) = CC(I,J,K)+CC(I,JC,K) - CH(I,K,JC) = CC(I,J,K)-CC(I,JC,K) - 107 CONTINUE - 108 CONTINUE - 109 CONTINUE - DO 111 I=1,IDO - DO 110 K=1,L1 - CH(I,K,1) = CC(I,1,K) - 110 CONTINUE - 111 CONTINUE - 112 IDL = 2-IDO - INC = 0 - DO 116 L=2,IPPH - LC = IPP2-L - IDL = IDL+IDO - DO 113 IK=1,IDL1 - C2(IK,L) = CH2(IK,1)+WA(IDL-1)*CH2(IK,2) - C2(IK,LC) = -WA(IDL)*CH2(IK,IP) - 113 CONTINUE - IDLJ = IDL - INC = INC+IDO - DO 115 J=3,IPPH - JC = IPP2-J - IDLJ = IDLJ+INC - IF (IDLJ .GT. IDP) IDLJ = IDLJ-IDP - WAR = WA(IDLJ-1) - WAI = WA(IDLJ) - DO 114 IK=1,IDL1 - C2(IK,L) = C2(IK,L)+WAR*CH2(IK,J) - C2(IK,LC) = C2(IK,LC)-WAI*CH2(IK,JC) - 114 CONTINUE - 115 CONTINUE - 116 CONTINUE - DO 118 J=2,IPPH - DO 117 IK=1,IDL1 - CH2(IK,1) = CH2(IK,1)+CH2(IK,J) - 117 CONTINUE - 118 CONTINUE - DO 120 J=2,IPPH - JC = IPP2-J - DO 119 IK=2,IDL1,2 - CH2(IK-1,J) = C2(IK-1,J)-C2(IK,JC) - CH2(IK-1,JC) = C2(IK-1,J)+C2(IK,JC) - CH2(IK,J) = C2(IK,J)+C2(IK-1,JC) - CH2(IK,JC) = C2(IK,J)-C2(IK-1,JC) - 119 CONTINUE - 120 CONTINUE - NAC = 1 - IF (IDO .EQ. 2) RETURN - NAC = 0 - DO 121 IK=1,IDL1 - C2(IK,1) = CH2(IK,1) - 121 CONTINUE - DO 123 J=2,IP - DO 122 K=1,L1 - C1(1,K,J) = CH(1,K,J) - C1(2,K,J) = CH(2,K,J) - 122 CONTINUE - 123 CONTINUE - IF (IDOT .GT. L1) GO TO 127 - IDIJ = 0 - DO 126 J=2,IP - IDIJ = IDIJ+2 - DO 125 I=4,IDO,2 - IDIJ = IDIJ+2 - DO 124 K=1,L1 - C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)+WA(IDIJ)*CH(I,K,J) - C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)-WA(IDIJ)*CH(I-1,K,J) - 124 CONTINUE - 125 CONTINUE - 126 CONTINUE - RETURN - 127 IDJ = 2-IDO - DO 130 J=2,IP - IDJ = IDJ+IDO - DO 129 K=1,L1 - IDIJ = IDJ - DO 128 I=4,IDO,2 - IDIJ = IDIJ+2 - C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)+WA(IDIJ)*CH(I,K,J) - C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)-WA(IDIJ)*CH(I-1,K,J) - 128 CONTINUE - 129 CONTINUE - 130 CONTINUE - RETURN - END - - SUBROUTINE DPASSF2 (IDO,L1,CC,CH,WA1) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,2,L1) ,CH(IDO,L1,2) , - 1 WA1(1) - IF (IDO .GT. 2) GO TO 102 - DO 101 K=1,L1 - CH(1,K,1) = CC(1,1,K)+CC(1,2,K) - CH(1,K,2) = CC(1,1,K)-CC(1,2,K) - CH(2,K,1) = CC(2,1,K)+CC(2,2,K) - CH(2,K,2) = CC(2,1,K)-CC(2,2,K) - 101 CONTINUE - RETURN - 102 DO 104 K=1,L1 - DO 103 I=2,IDO,2 - CH(I-1,K,1) = CC(I-1,1,K)+CC(I-1,2,K) - TR2 = CC(I-1,1,K)-CC(I-1,2,K) - CH(I,K,1) = CC(I,1,K)+CC(I,2,K) - TI2 = CC(I,1,K)-CC(I,2,K) - CH(I,K,2) = WA1(I-1)*TI2-WA1(I)*TR2 - CH(I-1,K,2) = WA1(I-1)*TR2+WA1(I)*TI2 - 103 CONTINUE - 104 CONTINUE - RETURN - END - - SUBROUTINE DPASSF3 (IDO,L1,CC,CH,WA1,WA2) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,3,L1) ,CH(IDO,L1,3) , - 1 WA1(1) ,WA2(1) -C *** TAUI IS -SQRT(3)/2 *** - DATA TAUR,TAUI /-0.5D0,-0.86602540378443864676D0/ - IF (IDO .NE. 2) GO TO 102 - DO 101 K=1,L1 - TR2 = CC(1,2,K)+CC(1,3,K) - CR2 = CC(1,1,K)+TAUR*TR2 - CH(1,K,1) = CC(1,1,K)+TR2 - TI2 = CC(2,2,K)+CC(2,3,K) - CI2 = CC(2,1,K)+TAUR*TI2 - CH(2,K,1) = CC(2,1,K)+TI2 - CR3 = TAUI*(CC(1,2,K)-CC(1,3,K)) - CI3 = TAUI*(CC(2,2,K)-CC(2,3,K)) - CH(1,K,2) = CR2-CI3 - CH(1,K,3) = CR2+CI3 - CH(2,K,2) = CI2+CR3 - CH(2,K,3) = CI2-CR3 - 101 CONTINUE - RETURN - 102 DO 104 K=1,L1 - DO 103 I=2,IDO,2 - TR2 = CC(I-1,2,K)+CC(I-1,3,K) - CR2 = CC(I-1,1,K)+TAUR*TR2 - CH(I-1,K,1) = CC(I-1,1,K)+TR2 - TI2 = CC(I,2,K)+CC(I,3,K) - CI2 = CC(I,1,K)+TAUR*TI2 - CH(I,K,1) = CC(I,1,K)+TI2 - CR3 = TAUI*(CC(I-1,2,K)-CC(I-1,3,K)) - CI3 = TAUI*(CC(I,2,K)-CC(I,3,K)) - DR2 = CR2-CI3 - DR3 = CR2+CI3 - DI2 = CI2+CR3 - DI3 = CI2-CR3 - CH(I,K,2) = WA1(I-1)*DI2-WA1(I)*DR2 - CH(I-1,K,2) = WA1(I-1)*DR2+WA1(I)*DI2 - CH(I,K,3) = WA2(I-1)*DI3-WA2(I)*DR3 - CH(I-1,K,3) = WA2(I-1)*DR3+WA2(I)*DI3 - 103 CONTINUE - 104 CONTINUE - RETURN - END - - SUBROUTINE DPASSF4 (IDO,L1,CC,CH,WA1,WA2,WA3) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,4,L1) ,CH(IDO,L1,4) , - 1 WA1(1) ,WA2(1) ,WA3(1) - IF (IDO .NE. 2) GO TO 102 - DO 101 K=1,L1 - TI1 = CC(2,1,K)-CC(2,3,K) - TI2 = CC(2,1,K)+CC(2,3,K) - TR4 = CC(2,2,K)-CC(2,4,K) - TI3 = CC(2,2,K)+CC(2,4,K) - TR1 = CC(1,1,K)-CC(1,3,K) - TR2 = CC(1,1,K)+CC(1,3,K) - TI4 = CC(1,4,K)-CC(1,2,K) - TR3 = CC(1,2,K)+CC(1,4,K) - CH(1,K,1) = TR2+TR3 - CH(1,K,3) = TR2-TR3 - CH(2,K,1) = TI2+TI3 - CH(2,K,3) = TI2-TI3 - CH(1,K,2) = TR1+TR4 - CH(1,K,4) = TR1-TR4 - CH(2,K,2) = TI1+TI4 - CH(2,K,4) = TI1-TI4 - 101 CONTINUE - RETURN - 102 DO 104 K=1,L1 - DO 103 I=2,IDO,2 - TI1 = CC(I,1,K)-CC(I,3,K) - TI2 = CC(I,1,K)+CC(I,3,K) - TI3 = CC(I,2,K)+CC(I,4,K) - TR4 = CC(I,2,K)-CC(I,4,K) - TR1 = CC(I-1,1,K)-CC(I-1,3,K) - TR2 = CC(I-1,1,K)+CC(I-1,3,K) - TI4 = CC(I-1,4,K)-CC(I-1,2,K) - TR3 = CC(I-1,2,K)+CC(I-1,4,K) - CH(I-1,K,1) = TR2+TR3 - CR3 = TR2-TR3 - CH(I,K,1) = TI2+TI3 - CI3 = TI2-TI3 - CR2 = TR1+TR4 - CR4 = TR1-TR4 - CI2 = TI1+TI4 - CI4 = TI1-TI4 - CH(I-1,K,2) = WA1(I-1)*CR2+WA1(I)*CI2 - CH(I,K,2) = WA1(I-1)*CI2-WA1(I)*CR2 - CH(I-1,K,3) = WA2(I-1)*CR3+WA2(I)*CI3 - CH(I,K,3) = WA2(I-1)*CI3-WA2(I)*CR3 - CH(I-1,K,4) = WA3(I-1)*CR4+WA3(I)*CI4 - CH(I,K,4) = WA3(I-1)*CI4-WA3(I)*CR4 - 103 CONTINUE - 104 CONTINUE - RETURN - END - - SUBROUTINE DPASSF5 (IDO,L1,CC,CH,WA1,WA2,WA3,WA4) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CC(IDO,5,L1) ,CH(IDO,L1,5) , - 1 WA1(1) ,WA2(1) ,WA3(1) ,WA4(1) -C *** TR11=COS(2*PI/5), TI11=-SIN(2*PI/5) -C *** TR12=-COS(4*PI/5), TI12=-SIN(4*PI/5) - DATA TR11,TI11,TR12,TI12 /0.3090169943749474241D0, - + -0.95105651629515357212D0, - 1 -0.8090169943749474241D0, -0.58778525229247312917D0/ - IF (IDO .NE. 2) GO TO 102 - DO 101 K=1,L1 - TI5 = CC(2,2,K)-CC(2,5,K) - TI2 = CC(2,2,K)+CC(2,5,K) - TI4 = CC(2,3,K)-CC(2,4,K) - TI3 = CC(2,3,K)+CC(2,4,K) - TR5 = CC(1,2,K)-CC(1,5,K) - TR2 = CC(1,2,K)+CC(1,5,K) - TR4 = CC(1,3,K)-CC(1,4,K) - TR3 = CC(1,3,K)+CC(1,4,K) - CH(1,K,1) = CC(1,1,K)+TR2+TR3 - CH(2,K,1) = CC(2,1,K)+TI2+TI3 - CR2 = CC(1,1,K)+TR11*TR2+TR12*TR3 - CI2 = CC(2,1,K)+TR11*TI2+TR12*TI3 - CR3 = CC(1,1,K)+TR12*TR2+TR11*TR3 - CI3 = CC(2,1,K)+TR12*TI2+TR11*TI3 - CR5 = TI11*TR5+TI12*TR4 - CI5 = TI11*TI5+TI12*TI4 - CR4 = TI12*TR5-TI11*TR4 - CI4 = TI12*TI5-TI11*TI4 - CH(1,K,2) = CR2-CI5 - CH(1,K,5) = CR2+CI5 - CH(2,K,2) = CI2+CR5 - CH(2,K,3) = CI3+CR4 - CH(1,K,3) = CR3-CI4 - CH(1,K,4) = CR3+CI4 - CH(2,K,4) = CI3-CR4 - CH(2,K,5) = CI2-CR5 - 101 CONTINUE - RETURN - 102 DO 104 K=1,L1 - DO 103 I=2,IDO,2 - TI5 = CC(I,2,K)-CC(I,5,K) - TI2 = CC(I,2,K)+CC(I,5,K) - TI4 = CC(I,3,K)-CC(I,4,K) - TI3 = CC(I,3,K)+CC(I,4,K) - TR5 = CC(I-1,2,K)-CC(I-1,5,K) - TR2 = CC(I-1,2,K)+CC(I-1,5,K) - TR4 = CC(I-1,3,K)-CC(I-1,4,K) - TR3 = CC(I-1,3,K)+CC(I-1,4,K) - CH(I-1,K,1) = CC(I-1,1,K)+TR2+TR3 - CH(I,K,1) = CC(I,1,K)+TI2+TI3 - CR2 = CC(I-1,1,K)+TR11*TR2+TR12*TR3 - CI2 = CC(I,1,K)+TR11*TI2+TR12*TI3 - CR3 = CC(I-1,1,K)+TR12*TR2+TR11*TR3 - CI3 = CC(I,1,K)+TR12*TI2+TR11*TI3 - CR5 = TI11*TR5+TI12*TR4 - CI5 = TI11*TI5+TI12*TI4 - CR4 = TI12*TR5-TI11*TR4 - CI4 = TI12*TI5-TI11*TI4 - DR3 = CR3-CI4 - DR4 = CR3+CI4 - DI3 = CI3+CR4 - DI4 = CI3-CR4 - DR5 = CR2+CI5 - DR2 = CR2-CI5 - DI5 = CI2-CR5 - DI2 = CI2+CR5 - CH(I-1,K,2) = WA1(I-1)*DR2+WA1(I)*DI2 - CH(I,K,2) = WA1(I-1)*DI2-WA1(I)*DR2 - CH(I-1,K,3) = WA2(I-1)*DR3+WA2(I)*DI3 - CH(I,K,3) = WA2(I-1)*DI3-WA2(I)*DR3 - CH(I-1,K,4) = WA3(I-1)*DR4+WA3(I)*DI4 - CH(I,K,4) = WA3(I-1)*DI4-WA3(I)*DR4 - CH(I-1,K,5) = WA4(I-1)*DR5+WA4(I)*DI5 - CH(I,K,5) = WA4(I-1)*DI5-WA4(I)*DR5 - 103 CONTINUE - 104 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zffti.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/zffti.f deleted file mode 100644 index abd6a18aea..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zffti.f +++ /dev/null @@ -1,9 +0,0 @@ - SUBROUTINE ZFFTI (N,WSAVE) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WSAVE(1) - IF (N .EQ. 1) RETURN - IW1 = N+N+1 - IW2 = IW1+N+N - CALL ZFFTI1 (N,WSAVE(IW1),WSAVE(IW2)) - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zffti1.f b/scipy-0.10.1/scipy/fftpack/src/dfftpack/zffti1.f deleted file mode 100644 index bee57739e4..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/dfftpack/zffti1.f +++ /dev/null @@ -1,63 +0,0 @@ - SUBROUTINE ZFFTI1 (N,WA,IFAC) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION WA(*) ,IFAC(*) ,NTRYH(4) - DATA NTRYH(1),NTRYH(2),NTRYH(3),NTRYH(4)/3,4,2,5/ - NL = N - NF = 0 - J = 0 - 101 J = J+1 - IF (J.le.4) GO TO 102 - GO TO 103 - 102 NTRY = NTRYH(J) - GO TO 104 - 103 NTRY = NTRY+2 - 104 NQ = NL/NTRY - NR = NL-NTRY*NQ - IF (NR.eq.0) GO TO 105 - GO TO 101 - 105 NF = NF+1 - IFAC(NF+2) = NTRY - NL = NQ - IF (NTRY .NE. 2) GO TO 107 - IF (NF .EQ. 1) GO TO 107 - DO 106 I=2,NF - IB = NF-I+2 - IFAC(IB+2) = IFAC(IB+1) - 106 CONTINUE - IFAC(3) = 2 - 107 IF (NL .NE. 1) GO TO 104 - IFAC(1) = N - IFAC(2) = NF - TPI = 6.28318530717958647692D0 - ARGH = TPI/FLOAT(N) - I = 2 - L1 = 1 - DO 110 K1=1,NF - IP = IFAC(K1+2) - LD = 0 - L2 = L1*IP - IDO = N/L2 - IDOT = IDO+IDO+2 - IPM = IP-1 - DO 109 J=1,IPM - I1 = I - WA(I-1) = 1.0D0 - WA(I) = 0.0D0 - LD = LD+L1 - FI = 0.0D0 - ARGLD = FLOAT(LD)*ARGH - DO 108 II=4,IDOT,2 - I = I+2 - FI = FI+1.D0 - ARG = FI*ARGLD - WA(I-1) = COS(ARG) - WA(I) = SIN(ARG) - 108 CONTINUE - IF (IP .LE. 5) GO TO 109 - WA(I1-1) = WA(I-1) - WA(I1) = WA(I) - 109 CONTINUE - L1 = L2 - 110 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/fftpack/src/drfft.c b/scipy-0.10.1/scipy/fftpack/src/drfft.c deleted file mode 100644 index 835415da09..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/drfft.c +++ /dev/null @@ -1,103 +0,0 @@ -/* - Interface to various FFT libraries. - Double real FFT and IFFT. - Author: Pearu Peterson, August 2002 - */ - -#include "fftpack.h" - -extern void F_FUNC(dfftf, DFFTF) (int *, double *, double *); -extern void F_FUNC(dfftb, DFFTB) (int *, double *, double *); -extern void F_FUNC(dffti, DFFTI) (int *, double *); -extern void F_FUNC(rfftf, RFFTF) (int *, float *, float *); -extern void F_FUNC(rfftb, RFFTB) (int *, float *, float *); -extern void F_FUNC(rffti, RFFTI) (int *, float *); - - -GEN_CACHE(drfft, (int n) - , double *wsave; - , (caches_drfft[i].n == n) - , caches_drfft[id].wsave = - (double *) malloc(sizeof(double) * (2 * n + 15)); - F_FUNC(dffti, DFFTI) (&n, caches_drfft[id].wsave); - , free(caches_drfft[id].wsave); - , 10) - -GEN_CACHE(rfft, (int n) - , float *wsave; - , (caches_rfft[i].n == n) - , caches_rfft[id].wsave = - (float *) malloc(sizeof(float) * (2 * n + 15)); - F_FUNC(rffti, RFFTI) (&n, caches_rfft[id].wsave); - , free(caches_rfft[id].wsave); - , 10) - -void drfft(double *inout, int n, int direction, int howmany, - int normalize) -{ - int i; - double *ptr = inout; - double *wsave = NULL; - wsave = caches_drfft[get_cache_id_drfft(n)].wsave; - - - switch (direction) { - case 1: - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(dfftf,DFFTF)(&n, ptr, wsave); - } - break; - - case -1: - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(dfftb,DFFTB)(&n, ptr, wsave); - } - break; - - default: - fprintf(stderr, "drfft: invalid direction=%d\n", direction); - } - - if (normalize) { - double d = 1.0 / n; - ptr = inout; - for (i = n * howmany - 1; i >= 0; --i) { - (*(ptr++)) *= d; - } - } -} - -void rfft(float *inout, int n, int direction, int howmany, - int normalize) -{ - int i; - float *ptr = inout; - float *wsave = NULL; - wsave = caches_rfft[get_cache_id_rfft(n)].wsave; - - - switch (direction) { - case 1: - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(rfftf,RFFTF)(&n, ptr, wsave); - } - break; - - case -1: - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(rfftb,RFFTB)(&n, ptr, wsave); - } - break; - - default: - fprintf(stderr, "rfft: invalid direction=%d\n", direction); - } - - if (normalize) { - float d = 1.0 / n; - ptr = inout; - for (i = n * howmany - 1; i >= 0; --i) { - (*(ptr++)) *= d; - } - } -} diff --git a/scipy-0.10.1/scipy/fftpack/src/fftpack.h b/scipy-0.10.1/scipy/fftpack/src/fftpack.h deleted file mode 100644 index 9addbd1a8a..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/fftpack.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - Interface to various FFT libraries. - Author: Pearu Peterson, August 2002 - */ - -#ifndef FFTPACK_H -#define FFTPACK_H - -#include -#include -#include - -typedef struct {double r,i;} complex_double; -typedef struct {float r,i;} complex_float; - -extern -void init_convolution_kernel(int n,double* omega, int d, - double (*kernel_func)(int), - int zero_nyquist); -extern -void convolve(int n,double* inout,double* omega,int swap_real_imag); -extern -void convolve_z(int n,double* inout,double* omega_real,double* omega_imag); - -extern int ispow2le2e30(int n); -extern int ispow2le2e13(int n); - -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif - -/* - Simple cyclic cache. - */ -#define GEN_CACHE(name,CACHEARG,CACHETYPE,CHECK,MALLOC,FREE,CACHESIZE) \ -typedef struct {\ - int n;\ - CACHETYPE \ -} cache_type_##name;\ -static cache_type_##name caches_##name[CACHESIZE];\ -static int nof_in_cache_##name = 0;\ -static int last_cache_id_##name = 0;\ -static int get_cache_id_##name CACHEARG { \ - int i,id = -1; \ - for (i=0;i=0) goto exit;\ - if (nof_in_cache_##name= 0; --i) { - *((double *) (ptr)) /= n; - *((double *) (ptr++) + 1) /= n; - } - } -} - -void cfft(complex_float * inout, int n, int direction, int howmany, - int normalize) -{ - int i; - complex_float *ptr = inout; - float *wsave = NULL; - - wsave = caches_cfft[get_cache_id_cfft(n)].wsave; - - switch (direction) { - case 1: - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(cfftf, CFFTF)(&n, (float *) (ptr), wsave); - - } - break; - - case -1: - for (i = 0; i < howmany; ++i, ptr += n) { - F_FUNC(cfftb, CFFTB)(&n, (float *) (ptr), wsave); - } - break; - default: - fprintf(stderr, "cfft: invalid direction=%d\n", direction); - } - - if (normalize) { - ptr = inout; - for (i = n * howmany - 1; i >= 0; --i) { - *((float *) (ptr)) /= n; - *((float *) (ptr++) + 1) /= n; - } - } -} diff --git a/scipy-0.10.1/scipy/fftpack/src/zfftnd.c b/scipy-0.10.1/scipy/fftpack/src/zfftnd.c deleted file mode 100644 index 457a913daf..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/zfftnd.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - Interface to various FFT libraries. - Double complex FFT and IFFT, arbitrary dimensions. - Author: Pearu Peterson, August 2002 - */ -#include "fftpack.h" - -GEN_CACHE(zfftnd, (int n, int rank) - , complex_double * ptr; int *iptr; int rank; - , ((caches_zfftnd[i].n == n) - && (caches_zfftnd[i].rank == rank)) - , caches_zfftnd[id].n = n; - caches_zfftnd[id].ptr = - (complex_double *) malloc(2 * sizeof(double) * n); - caches_zfftnd[id].iptr = - (int *) malloc(4 * rank * sizeof(int)); - , - free(caches_zfftnd[id].ptr); - free(caches_zfftnd[id].iptr); - , 10) - -GEN_CACHE(cfftnd, (int n, int rank) - , complex_float * ptr; int *iptr; int rank; - , ((caches_cfftnd[i].n == n) - && (caches_cfftnd[i].rank == rank)) - , caches_cfftnd[id].n = n; - caches_cfftnd[id].ptr = - (complex_float *) malloc(2 * sizeof(float) * n); - caches_cfftnd[id].iptr = - (int *) malloc(4 * rank * sizeof(int)); - , - free(caches_cfftnd[id].ptr); - free(caches_cfftnd[id].iptr); - , 10) - -static -/*inline : disabled because MSVC6.0 fails to compile it. */ -int next_comb(int *ia, int *da, int m) -{ - while (m >= 0 && ia[m] == da[m]) { - ia[m--] = 0; - } - if (m < 0) { - return 0; - } - ia[m]++; - return 1; -} - -static -void flatten(complex_double * dest, complex_double * src, - int rank, int strides_axis, int dims_axis, int unflat, - int *tmp) -{ - int *new_strides = tmp + rank; - int *new_dims = tmp + 2 * rank; - int *ia = tmp + 3 * rank; - int rm1 = rank - 1, rm2 = rank - 2; - int i, j, k; - for (i = 0; i < rm2; ++i) - ia[i] = 0; - ia[rm2] = -1; - j = 0; - if (unflat) { - while (next_comb(ia, new_dims, rm2)) { - k = 0; - for (i = 0; i < rm1; ++i) { - k += ia[i] * new_strides[i]; - } - for (i = 0; i < dims_axis; ++i) { - *(dest + k + i * strides_axis) = *(src + j++); - } - } - } else { - while (next_comb(ia, new_dims, rm2)) { - k = 0; - for (i = 0; i < rm1; ++i) { - k += ia[i] * new_strides[i]; - } - for (i = 0; i < dims_axis; ++i) { - *(dest + j++) = *(src + k + i * strides_axis); - } - } - } -} - -static -void sflatten(complex_float * dest, complex_float * src, - int rank, int strides_axis, int dims_axis, int unflat, - int *tmp) -{ - int *new_strides = tmp + rank; - int *new_dims = tmp + 2 * rank; - int *ia = tmp + 3 * rank; - int rm1 = rank - 1, rm2 = rank - 2; - int i, j, k; - for (i = 0; i < rm2; ++i) - ia[i] = 0; - ia[rm2] = -1; - j = 0; - if (unflat) { - while (next_comb(ia, new_dims, rm2)) { - k = 0; - for (i = 0; i < rm1; ++i) { - k += ia[i] * new_strides[i]; - } - for (i = 0; i < dims_axis; ++i) { - *(dest + k + i * strides_axis) = *(src + j++); - } - } - } else { - while (next_comb(ia, new_dims, rm2)) { - k = 0; - for (i = 0; i < rm1; ++i) { - k += ia[i] * new_strides[i]; - } - for (i = 0; i < dims_axis; ++i) { - *(dest + j++) = *(src + k + i * strides_axis); - } - } - } -} - -extern void cfft(complex_float * inout, - int n, int direction, int howmany, int normalize); - -extern void zfft(complex_double * inout, - int n, int direction, int howmany, int normalize); - -extern void zfftnd(complex_double * inout, int rank, - int *dims, int direction, int howmany, - int normalize) -{ - int i, sz; - complex_double *ptr = inout; - int axis; - complex_double *tmp; - int *itmp; - int k, j; - - sz = 1; - for (i = 0; i < rank; ++i) { - sz *= dims[i]; - } - zfft(ptr, dims[rank - 1], direction, howmany * sz / dims[rank - 1], - normalize); - - i = get_cache_id_zfftnd(sz, rank); - tmp = caches_zfftnd[i].ptr; - itmp = caches_zfftnd[i].iptr; - - itmp[rank - 1] = 1; - for (i = 2; i <= rank; ++i) { - itmp[rank - i] = itmp[rank - i + 1] * dims[rank - i + 1]; - } - - for (i = 0; i < howmany; ++i, ptr += sz) { - for (axis = 0; axis < rank - 1; ++axis) { - for (k = j = 0; k < rank; ++k) { - if (k != axis) { - *(itmp + rank + j) = itmp[k]; - *(itmp + 2 * rank + j++) = dims[k] - 1; - } - } - flatten(tmp, ptr, rank, itmp[axis], dims[axis], 0, itmp); - zfft(tmp, dims[axis], direction, sz / dims[axis], normalize); - flatten(ptr, tmp, rank, itmp[axis], dims[axis], 1, itmp); - } - } - -} - -extern void cfftnd(complex_float * inout, int rank, - int *dims, int direction, int howmany, - int normalize) -{ - int i, sz; - complex_float *ptr = inout; - int axis; - complex_float *tmp; - int *itmp; - int k, j; - - sz = 1; - for (i = 0; i < rank; ++i) { - sz *= dims[i]; - } - cfft(ptr, dims[rank - 1], direction, howmany * sz / dims[rank - 1], - normalize); - - i = get_cache_id_cfftnd(sz, rank); - tmp = caches_cfftnd[i].ptr; - itmp = caches_cfftnd[i].iptr; - - itmp[rank - 1] = 1; - for (i = 2; i <= rank; ++i) { - itmp[rank - i] = itmp[rank - i + 1] * dims[rank - i + 1]; - } - - for (i = 0; i < howmany; ++i, ptr += sz) { - for (axis = 0; axis < rank - 1; ++axis) { - for (k = j = 0; k < rank; ++k) { - if (k != axis) { - *(itmp + rank + j) = itmp[k]; - *(itmp + 2 * rank + j++) = dims[k] - 1; - } - } - sflatten(tmp, ptr, rank, itmp[axis], dims[axis], 0, itmp); - cfft(tmp, dims[axis], direction, sz / dims[axis], normalize); - sflatten(ptr, tmp, rank, itmp[axis], dims[axis], 1, itmp); - } - } - -} diff --git a/scipy-0.10.1/scipy/fftpack/src/zrfft.c b/scipy-0.10.1/scipy/fftpack/src/zrfft.c deleted file mode 100644 index c3f54ef6bf..0000000000 --- a/scipy-0.10.1/scipy/fftpack/src/zrfft.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - Interface to various FFT libraries. - Double complex FFT and IFFT with zero imaginary input. - Author: Pearu Peterson, August 2002 - */ - -#include "fftpack.h" - -extern void drfft(double *inout,int n,int direction,int howmany,int normalize); -extern void rfft(float *inout,int n,int direction,int howmany,int normalize); - -extern void zrfft(complex_double *inout, - int n,int direction,int howmany,int normalize) { - int i,j,k; - double* ptr = (double *)inout; - switch (direction) { - case 1: - for (i=0;i -#include - -#include - -#ifdef DCT_TEST_USE_SINGLE -typedef float float_prec; -#define PF "%.7f" -#define FFTW_PLAN fftwf_plan -#define FFTW_MALLOC fftwf_malloc -#define FFTW_FREE fftwf_free -#define FFTW_PLAN_CREATE fftwf_plan_r2r_1d -#define FFTW_EXECUTE fftwf_execute -#define FFTW_DESTROY_PLAN fftwf_destroy_plan -#define FFTW_CLEANUP fftwf_cleanup -#else -typedef double float_prec; -#define PF "%.18f" -#define FFTW_PLAN fftw_plan -#define FFTW_MALLOC fftw_malloc -#define FFTW_FREE fftw_free -#define FFTW_PLAN_CREATE fftw_plan_r2r_1d -#define FFTW_EXECUTE fftw_execute -#define FFTW_DESTROY_PLAN fftw_destroy_plan -#define FFTW_CLEANUP fftw_cleanup -#endif - - -enum type { - DCT_I = 1, - DCT_II = 2, - DCT_III = 3, - DCT_IV = 4, -}; - -int gen(int type, int sz) -{ - float_prec *a, *b; - FFTW_PLAN p; - int i, tp; - - a = FFTW_MALLOC(sizeof(*a) * sz); - if (a == NULL) { - fprintf(stderr, "failure\n"); - exit(EXIT_FAILURE); - } - b = FFTW_MALLOC(sizeof(*b) * sz); - if (b == NULL) { - fprintf(stderr, "failure\n"); - exit(EXIT_FAILURE); - } - - for(i=0; i < sz; ++i) { - a[i] = i; - } - - switch(type) { - case DCT_I: - tp = FFTW_REDFT00; - break; - case DCT_II: - tp = FFTW_REDFT10; - break; - case DCT_III: - tp = FFTW_REDFT01; - break; - case DCT_IV: - tp = FFTW_REDFT11; - break; - default: - fprintf(stderr, "unknown type\n"); - exit(EXIT_FAILURE); - } - - p = FFTW_PLAN_CREATE(sz, a, b, tp, FFTW_ESTIMATE); - FFTW_EXECUTE(p); - FFTW_DESTROY_PLAN(p); - - for(i=0; i < sz; ++i) { - printf(PF"\n", b[i]); - } - FFTW_FREE(b); - FFTW_FREE(a); - - return 0; -} - -int main(int argc, char* argv[]) -{ - int n, tp; - - if (argc < 3) { - fprintf(stderr, "missing argument: program type n\n"); - exit(EXIT_FAILURE); - } - tp = atoi(argv[1]); - n = atoi(argv[2]); - - gen(tp, n); - FFTW_CLEANUP(); - - return 0; -} diff --git a/scipy-0.10.1/scipy/fftpack/tests/fftw_double_ref.npz b/scipy-0.10.1/scipy/fftpack/tests/fftw_double_ref.npz deleted file mode 100644 index 75e574415a..0000000000 Binary files a/scipy-0.10.1/scipy/fftpack/tests/fftw_double_ref.npz and /dev/null differ diff --git a/scipy-0.10.1/scipy/fftpack/tests/fftw_single_ref.npz b/scipy-0.10.1/scipy/fftpack/tests/fftw_single_ref.npz deleted file mode 100644 index ace6585555..0000000000 Binary files a/scipy-0.10.1/scipy/fftpack/tests/fftw_single_ref.npz and /dev/null differ diff --git a/scipy-0.10.1/scipy/fftpack/tests/gen_fftw_ref.py b/scipy-0.10.1/scipy/fftpack/tests/gen_fftw_ref.py deleted file mode 100644 index 8603678bee..0000000000 --- a/scipy-0.10.1/scipy/fftpack/tests/gen_fftw_ref.py +++ /dev/null @@ -1,35 +0,0 @@ -from subprocess import Popen, PIPE, STDOUT - -import numpy as np - -SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024] - -def gen_data(dt): - arrays = {} - - if dt == np.double: - pg = './fftw_double' - elif dt == np.float32: - pg = './fftw_single' - else: - raise ValueError("unknown: %s" % dt) - # Generate test data using FFTW for reference - for type in [1, 2, 3, 4]: - arrays[type] = {} - for sz in SZ: - a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT) - st = [i.strip() for i in a.stdout.readlines()] - arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt) - - return arrays - -data = gen_data(np.float32) -filename = 'fftw_single_ref' - -# Save ref data into npz format -d = {} -d['sizes'] = SZ -for type in [1, 2, 3, 4]: - for sz in SZ: - d['dct_%d_%d' % (type, sz)] = data[type][sz] -np.savez(filename, **d) diff --git a/scipy-0.10.1/scipy/fftpack/tests/gendata.m b/scipy-0.10.1/scipy/fftpack/tests/gendata.m deleted file mode 100644 index 6c231df4d7..0000000000 --- a/scipy-0.10.1/scipy/fftpack/tests/gendata.m +++ /dev/null @@ -1,21 +0,0 @@ -x0 = linspace(0, 10, 11); -x1 = linspace(0, 10, 15); -x2 = linspace(0, 10, 16); -x3 = linspace(0, 10, 17); - -x4 = randn(32, 1); -x5 = randn(64, 1); -x6 = randn(128, 1); -x7 = randn(256, 1); - -y0 = dct(x0); -y1 = dct(x1); -y2 = dct(x2); -y3 = dct(x3); -y4 = dct(x4); -y5 = dct(x5); -y6 = dct(x6); -y7 = dct(x7); - -save('test.mat', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', ... - 'y0', 'y1', 'y2', 'y3', 'y4', 'y5', 'y6', 'y7'); diff --git a/scipy-0.10.1/scipy/fftpack/tests/gendata.py b/scipy-0.10.1/scipy/fftpack/tests/gendata.py deleted file mode 100644 index 981b958017..0000000000 --- a/scipy-0.10.1/scipy/fftpack/tests/gendata.py +++ /dev/null @@ -1,6 +0,0 @@ -import numpy as np -from scipy.io import loadmat - -m = loadmat('test.mat', squeeze_me=True, struct_as_record=True, - mat_dtype=True) -np.savez('test.npz', **m) diff --git a/scipy-0.10.1/scipy/fftpack/tests/test.npz b/scipy-0.10.1/scipy/fftpack/tests/test.npz deleted file mode 100644 index f90294b41d..0000000000 Binary files a/scipy-0.10.1/scipy/fftpack/tests/test.npz and /dev/null differ diff --git a/scipy-0.10.1/scipy/fftpack/tests/test_basic.py b/scipy-0.10.1/scipy/fftpack/tests/test_basic.py deleted file mode 100644 index 4322757aac..0000000000 --- a/scipy-0.10.1/scipy/fftpack/tests/test_basic.py +++ /dev/null @@ -1,800 +0,0 @@ -#!/usr/bin/env python -# Created by Pearu Peterson, September 2002 -""" Test functions for fftpack.basic module -""" -__usage__ = """ -Build fftpack: - python setup_fftpack.py build -Run tests if scipy is installed: - python -c 'import scipy;scipy.fftpack.test()' -Run tests if fftpack is not installed: - python tests/test_basic.py -""" - -from numpy.testing import assert_, assert_equal, assert_array_almost_equal, \ - assert_array_almost_equal_nulp, assert_raises, run_module_suite, \ - TestCase, dec -from scipy.fftpack import ifft,fft,fftn,ifftn,rfft,irfft, fft2 -from scipy.fftpack import _fftpack as fftpack - -from numpy import arange, add, array, asarray, zeros, dot, exp, pi,\ - swapaxes, double, cdouble -import numpy as np -import numpy.fft - -# "large" composite numbers supported by FFTPACK -LARGE_COMPOSITE_SIZES = [ - 2**13, - 2**5 * 3**5, - 2**3 * 3**3 * 5**2, -] -SMALL_COMPOSITE_SIZES = [ - 2, - 2*3*5, - 2*2*3*3, -] -# prime -LARGE_PRIME_SIZES = [ - 2011 -] -SMALL_PRIME_SIZES = [ - 29 -] - -from numpy.random import rand -def random(size): - return rand(*size) - -def get_mat(n): - data = arange(n) - data = add.outer(data,data) - return data - -def direct_dft(x): - x = asarray(x) - n = len(x) - y = zeros(n,dtype=cdouble) - w = -arange(n)*(2j*pi/n) - for i in range(n): - y[i] = dot(exp(i*w),x) - return y - -def direct_idft(x): - x = asarray(x) - n = len(x) - y = zeros(n,dtype=cdouble) - w = arange(n)*(2j*pi/n) - for i in range(n): - y[i] = dot(exp(i*w),x)/n - return y - -def direct_dftn(x): - x = asarray(x) - for axis in range(len(x.shape)): - x = fft(x,axis=axis) - return x - -def direct_idftn(x): - x = asarray(x) - for axis in range(len(x.shape)): - x = ifft(x,axis=axis) - return x - -def direct_rdft(x): - x = asarray(x) - n = len(x) - w = -arange(n)*(2j*pi/n) - r = zeros(n,dtype=double) - for i in range(int(n/2+1)): - y = dot(exp(i*w),x) - if i: - r[2*i-1] = y.real - if 2*i 1: - # check fortran order: it never overwrites - self._check(data.T, routine, fftshape, axes, - overwrite_x=overwrite_x, - should_overwrite=False) - - def _check_nd(self, routine, dtype, overwritable): - self._check_nd_one(routine, dtype, (16,), None, overwritable) - self._check_nd_one(routine, dtype, (16,), (0,), overwritable) - self._check_nd_one(routine, dtype, (16, 2), (0,), overwritable) - self._check_nd_one(routine, dtype, (2, 16), (1,), overwritable) - self._check_nd_one(routine, dtype, (8, 16), None, overwritable) - self._check_nd_one(routine, dtype, (8, 16), (0, 1), overwritable) - self._check_nd_one(routine, dtype, (8, 16, 2), (0, 1), overwritable) - self._check_nd_one(routine, dtype, (8, 16, 2), (1, 2), overwritable) - self._check_nd_one(routine, dtype, (8, 16, 2), (0,), overwritable) - self._check_nd_one(routine, dtype, (8, 16, 2), (1,), overwritable) - self._check_nd_one(routine, dtype, (8, 16, 2), (2,), overwritable) - self._check_nd_one(routine, dtype, (8, 16, 2), None, overwritable) - self._check_nd_one(routine, dtype, (8, 16, 2), (0,1,2), overwritable) - - def test_fftn(self): - overwritable = (np.complex128, np.complex64) - for dtype in self.dtypes: - self._check_nd(fftn, dtype, overwritable) - - def test_ifftn(self): - overwritable = (np.complex128, np.complex64) - for dtype in self.dtypes: - self._check_nd(ifftn, dtype, overwritable) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/fftpack/tests/test_helper.py b/scipy-0.10.1/scipy/fftpack/tests/test_helper.py deleted file mode 100644 index 19b7b53d63..0000000000 --- a/scipy-0.10.1/scipy/fftpack/tests/test_helper.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python -# Created by Pearu Peterson, September 2002 -""" Test functions for fftpack.helper module -""" -__usage__ = """ -Build fftpack: - python setup_fftpack.py build -Run tests if scipy is installed: - python -c 'import scipy;scipy.fftpack.test()' -Run tests if fftpack is not installed: - python tests/test_helper.py [] -""" - -from numpy.testing import * -from scipy.fftpack import fftshift,ifftshift,fftfreq,rfftfreq - -from numpy import pi - -def random(size): - return rand(*size) - -class TestFFTShift(TestCase): - - def test_definition(self): - x = [0,1,2,3,4,-4,-3,-2,-1] - y = [-4,-3,-2,-1,0,1,2,3,4] - assert_array_almost_equal(fftshift(x),y) - assert_array_almost_equal(ifftshift(y),x) - x = [0,1,2,3,4,-5,-4,-3,-2,-1] - y = [-5,-4,-3,-2,-1,0,1,2,3,4] - assert_array_almost_equal(fftshift(x),y) - assert_array_almost_equal(ifftshift(y),x) - - def test_inverse(self): - for n in [1,4,9,100,211]: - x = random((n,)) - assert_array_almost_equal(ifftshift(fftshift(x)),x) - -class TestFFTFreq(TestCase): - - def test_definition(self): - x = [0,1,2,3,4,-4,-3,-2,-1] - assert_array_almost_equal(9*fftfreq(9),x) - assert_array_almost_equal(9*pi*fftfreq(9,pi),x) - x = [0,1,2,3,4,-5,-4,-3,-2,-1] - assert_array_almost_equal(10*fftfreq(10),x) - assert_array_almost_equal(10*pi*fftfreq(10,pi),x) - -class TestRFFTFreq(TestCase): - - def test_definition(self): - x = [0,1,1,2,2,3,3,4,4] - assert_array_almost_equal(9*rfftfreq(9),x) - assert_array_almost_equal(9*pi*rfftfreq(9,pi),x) - x = [0,1,1,2,2,3,3,4,4,5] - assert_array_almost_equal(10*rfftfreq(10),x) - assert_array_almost_equal(10*pi*rfftfreq(10,pi),x) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/fftpack/tests/test_pseudo_diffs.py b/scipy-0.10.1/scipy/fftpack/tests/test_pseudo_diffs.py deleted file mode 100644 index a39d0b0392..0000000000 --- a/scipy-0.10.1/scipy/fftpack/tests/test_pseudo_diffs.py +++ /dev/null @@ -1,381 +0,0 @@ -#!/usr/bin/env python -# Created by Pearu Peterson, September 2002 -""" Test functions for fftpack.pseudo_diffs module -""" -__usage__ = """ -Build fftpack: - python setup_fftpack.py build -Run tests if scipy is installed: - python -c 'import scipy;scipy.fftpack.test()' -Run tests if fftpack is not installed: - python tests/test_pseudo_diffs.py [] -""" - -from numpy.testing import * -from scipy.fftpack import diff, fft, ifft, tilbert, itilbert, hilbert, \ - ihilbert, shift, fftfreq, cs_diff, sc_diff, \ - ss_diff, cc_diff - -import numpy as np -from numpy import arange, sin, cos, pi, exp, tanh, sum, sign - -def random(size): - return rand(*size) - -def direct_diff(x,k=1,period=None): - fx = fft(x) - n = len (fx) - if period is None: - period = 2*pi - w = fftfreq(n)*2j*pi/period*n - if k<0: - w = 1 / w**k - w[0] = 0.0 - else: - w = w**k - if n>2000: - w[250:n-250] = 0.0 - return ifft(w*fx).real - -def direct_tilbert(x,h=1,period=None): - fx = fft(x) - n = len (fx) - if period is None: - period = 2*pi - w = fftfreq(n)*h*2*pi/period*n - w[0] = 1 - w = 1j/tanh(w) - w[0] = 0j - return ifft(w*fx) - -def direct_itilbert(x,h=1,period=None): - fx = fft(x) - n = len (fx) - if period is None: - period = 2*pi - w = fftfreq(n)*h*2*pi/period*n - w = -1j*tanh(w) - return ifft(w*fx) - -def direct_hilbert(x): - fx = fft(x) - n = len (fx) - w = fftfreq(n)*n - w = 1j*sign(w) - return ifft(w*fx) - -def direct_ihilbert(x): - return -direct_hilbert(x) - -def direct_shift(x,a,period=None): - n = len(x) - if period is None: - k = fftfreq(n)*1j*n - else: - k = fftfreq(n)*2j*pi/period*n - return ifft(fft(x)*exp(k*a)).real - - -class TestDiff(TestCase): - - def test_definition(self): - for n in [16,17,64,127,32]: - x = arange(n)*2*pi/n - assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x))) - assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2)) - assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3)) - assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4)) - assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5)) - assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3)) - assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4)) - assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x))) - assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2)) - assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3)) - assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4)) - assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x))) - assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8))) - assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8))) - for k in range(5): - assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k)) - assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k)) - - def test_period(self): - for n in [17,64]: - x = arange(n)/float(n) - assert_array_almost_equal(diff(sin(2*pi*x),period=1), - 2*pi*cos(2*pi*x)) - assert_array_almost_equal(diff(sin(2*pi*x),3,period=1), - -(2*pi)**3*cos(2*pi*x)) - - def test_sin(self): - for n in [32,64,77]: - x = arange(n)*2*pi/n - assert_array_almost_equal(diff(sin(x)),cos(x)) - assert_array_almost_equal(diff(cos(x)),-sin(x)) - assert_array_almost_equal(diff(sin(x),2),-sin(x)) - assert_array_almost_equal(diff(sin(x),4),sin(x)) - assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x)) - assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x))) - - def test_expr(self): - for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]: - x = arange(n)*2*pi/n - f=sin(x)*cos(4*x)+exp(sin(3*x)) - df=cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) - ddf=-17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ - -9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) - d1 = diff(f) - assert_array_almost_equal(d1,df) - assert_array_almost_equal(diff(df),ddf) - assert_array_almost_equal(diff(f,2),ddf) - assert_array_almost_equal(diff(ddf,-1),df) - #print max(abs(d1-df)) - - def test_expr_large(self): - for n in [2048,4096]: - x = arange(n)*2*pi/n - f=sin(x)*cos(4*x)+exp(sin(3*x)) - df=cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) - ddf=-17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ - -9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) - assert_array_almost_equal(diff(f),df) - assert_array_almost_equal(diff(df),ddf) - assert_array_almost_equal(diff(ddf,-1),df) - assert_array_almost_equal(diff(f,2),ddf) - - def test_int(self): - n = 64 - x = arange(n)*2*pi/n - assert_array_almost_equal(diff(sin(x),-1),-cos(x)) - assert_array_almost_equal(diff(sin(x),-2),-sin(x)) - assert_array_almost_equal(diff(sin(x),-4),sin(x)) - assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x)) - - def test_random_even(self): - for k in [0,2,4,6]: - for n in [60,32,64,56,55]: - f=random ((n,)) - af=sum(f,axis=0)/n - f=f-af - # zeroing Nyquist mode: - f = diff(diff(f,1),-1) - assert_almost_equal(sum(f,axis=0),0.0) - assert_array_almost_equal(diff(diff(f,k),-k),f) - assert_array_almost_equal(diff(diff(f,-k),k),f) - - def test_random_odd(self): - for k in [0,1,2,3,4,5,6]: - for n in [33,65,55]: - f=random ((n,)) - af=sum(f,axis=0)/n - f=f-af - assert_almost_equal(sum(f,axis=0),0.0) - assert_array_almost_equal(diff(diff(f,k),-k),f) - assert_array_almost_equal(diff(diff(f,-k),k),f) - - def test_zero_nyquist (self): - for k in [0,1,2,3,4,5,6]: - for n in [32,33,64,56,55]: - f=random ((n,)) - af=sum(f,axis=0)/n - f=f-af - # zeroing Nyquist mode: - f = diff(diff(f,1),-1) - assert_almost_equal(sum(f,axis=0),0.0) - assert_array_almost_equal(diff(diff(f,k),-k),f) - assert_array_almost_equal(diff(diff(f,-k),k),f) - - -class TestTilbert(TestCase): - - def test_definition(self): - for h in [0.1,0.5,1,5.5,10]: - for n in [16,17,64,127]: - x = arange(n)*2*pi/n - y = tilbert(sin(x),h) - y1 = direct_tilbert(sin(x),h) - assert_array_almost_equal (y,y1) - assert_array_almost_equal(tilbert(sin(x),h), - direct_tilbert(sin(x),h)) - assert_array_almost_equal(tilbert(sin(2*x),h), - direct_tilbert(sin(2*x),h)) - - def test_random_even(self): - for h in [0.1,0.5,1,5.5,10]: - for n in [32,64,56]: - f=random ((n,)) - af=sum(f,axis=0)/n - f=f-af - assert_almost_equal(sum(f,axis=0),0.0) - assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f) - - def test_random_odd(self): - for h in [0.1,0.5,1,5.5,10]: - for n in [33,65,55]: - f=random ((n,)) - af=sum(f,axis=0)/n - f=f-af - assert_almost_equal(sum(f,axis=0),0.0) - assert_array_almost_equal(itilbert(tilbert(f,h),h),f) - assert_array_almost_equal(tilbert(itilbert(f,h),h),f) - - -class TestITilbert(TestCase): - - def test_definition(self): - for h in [0.1,0.5,1,5.5,10]: - for n in [16,17,64,127]: - x = arange(n)*2*pi/n - y = itilbert(sin(x),h) - y1 = direct_itilbert(sin(x),h) - assert_array_almost_equal (y,y1) - assert_array_almost_equal(itilbert(sin(x),h), - direct_itilbert(sin(x),h)) - assert_array_almost_equal(itilbert(sin(2*x),h), - direct_itilbert(sin(2*x),h)) - -class TestHilbert(TestCase): - - def test_definition(self): - for n in [16,17,64,127]: - x = arange(n)*2*pi/n - y = hilbert(sin(x)) - y1 = direct_hilbert(sin(x)) - assert_array_almost_equal (y,y1) - assert_array_almost_equal(hilbert(sin(2*x)), - direct_hilbert(sin(2*x))) - - def test_tilbert_relation(self): - for n in [16,17,64,127]: - x = arange(n)*2*pi/n - f = sin (x)+cos (2*x)*sin(x) - y = hilbert(f) - y1 = direct_hilbert(f) - assert_array_almost_equal (y,y1) - y2 = tilbert(f,h=10) - assert_array_almost_equal (y,y2) - - def test_random_odd(self): - for n in [33,65,55]: - f=random ((n,)) - af=sum(f,axis=0)/n - f=f-af - assert_almost_equal(sum(f,axis=0),0.0) - assert_array_almost_equal(ihilbert(hilbert(f)),f) - assert_array_almost_equal(hilbert(ihilbert(f)),f) - - def test_random_even(self): - for n in [32,64,56]: - f=random ((n,)) - af=sum(f,axis=0)/n - f=f-af - # zeroing Nyquist mode: - f = diff(diff(f,1),-1) - assert_almost_equal(sum(f,axis=0),0.0) - assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f) - assert_array_almost_equal(hilbert(ihilbert(f)),f) - - -class TestIHilbert(TestCase): - - def test_definition(self): - for n in [16,17,64,127]: - x = arange(n)*2*pi/n - y = ihilbert(sin(x)) - y1 = direct_ihilbert(sin(x)) - assert_array_almost_equal (y,y1) - assert_array_almost_equal(ihilbert(sin(2*x)), - direct_ihilbert(sin(2*x))) - - def test_itilbert_relation(self): - for n in [16,17,64,127]: - x = arange(n)*2*pi/n - f = sin (x)+cos (2*x)*sin(x) - y = ihilbert(f) - y1 = direct_ihilbert(f) - assert_array_almost_equal (y,y1) - y2 = itilbert(f,h=10) - assert_array_almost_equal (y,y2) - -class TestShift(TestCase): - - def test_definition(self): - for n in [18,17,64,127,32,2048,256]: - x = arange(n)*2*pi/n - for a in [0.1,3]: - assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a)) - assert_array_almost_equal(shift(sin(x),a),sin(x+a)) - assert_array_almost_equal(shift(cos(x),a),cos(x+a)) - assert_array_almost_equal(shift(cos(2*x)+sin(x),a), - cos(2*(x+a))+sin(x+a)) - assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a))) - assert_array_almost_equal(shift(sin(x),2*pi),sin(x)) - assert_array_almost_equal(shift(sin(x),pi),-sin(x)) - assert_array_almost_equal(shift(sin(x),pi/2),cos(x)) - - -class TestOverwrite(object): - """ - Check input overwrite behavior - """ - - real_dtypes = [np.float32, np.float64] - dtypes = real_dtypes + [np.complex64, np.complex128] - - def _check(self, x, routine, *args, **kwargs): - x2 = x.copy() - y = routine(x2, *args, **kwargs) - sig = routine.__name__ - if args: - sig += repr(args) - if kwargs: - sig += repr(kwargs) - assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) - - def _check_1d(self, routine, dtype, shape, *args, **kwargs): - np.random.seed(1234) - if np.issubdtype(dtype, np.complexfloating): - data = np.random.randn(*shape) + 1j*np.random.randn(*shape) - else: - data = np.random.randn(*shape) - data = data.astype(dtype) - self._check(data, routine, *args, **kwargs) - - def test_diff(self): - for dtype in self.dtypes: - self._check_1d(diff, dtype, (16,)) - - def test_tilbert(self): - for dtype in self.dtypes: - self._check_1d(tilbert, dtype, (16,), 1.6) - - def test_itilbert(self): - for dtype in self.dtypes: - self._check_1d(itilbert, dtype, (16,), 1.6) - - def test_hilbert(self): - for dtype in self.dtypes: - self._check_1d(hilbert, dtype, (16,)) - - def test_cs_diff(self): - for dtype in self.dtypes: - self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0) - - def test_sc_diff(self): - for dtype in self.dtypes: - self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0) - - def test_ss_diff(self): - for dtype in self.dtypes: - self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0) - - def test_cc_diff(self): - for dtype in self.dtypes: - self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0) - - def test_shift(self): - for dtype in self.dtypes: - self._check_1d(shift, dtype, (16,), 1.0) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/fftpack/tests/test_real_transforms.py b/scipy-0.10.1/scipy/fftpack/tests/test_real_transforms.py deleted file mode 100644 index 760739cd3d..0000000000 --- a/scipy-0.10.1/scipy/fftpack/tests/test_real_transforms.py +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/env python -from os.path import join, dirname - -import numpy as np -from numpy.fft import fft as numfft -from numpy.testing import assert_array_almost_equal, assert_equal, TestCase - -from scipy.fftpack.realtransforms import dct, idct - -# Matlab reference data -MDATA = np.load(join(dirname(__file__), 'test.npz')) -X = [MDATA['x%d' % i] for i in range(8)] -Y = [MDATA['y%d' % i] for i in range(8)] - -# FFTW reference data: the data are organized as follows: -# * SIZES is an array containing all available sizes -# * for every type (1, 2, 3, 4) and every size, the array dct_type_size -# contains the output of the DCT applied to the input np.linspace(0, size-1, -# size) -FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz')) -FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz')) -FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes'] - -def fftw_ref(type, size, dt): - x = np.linspace(0, size-1, size).astype(dt) - if dt == np.double: - data = FFTWDATA_DOUBLE - elif dt == np.float32: - data = FFTWDATA_SINGLE - else: - raise ValueError() - y = (data['dct_%d_%d' % (type, size)]).astype(dt) - return x, y - -class _TestDCTBase(TestCase): - def setUp(self): - self.rdt = None - self.dec = 14 - self.type = None - - def test_definition(self): - for i in FFTWDATA_SIZES: - x, yr = fftw_ref(self.type, i, self.rdt) - y = dct(x, type=self.type) - self.assertTrue(y.dtype == self.rdt, - "Output dtype is %s, expected %s" % (y.dtype, self.rdt)) - # XXX: we divide by np.max(y) because the tests fail otherwise. We - # should really use something like assert_array_approx_equal. The - # difference is due to fftw using a better algorithm w.r.t error - # propagation compared to the ones from fftpack. - assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, - err_msg="Size %d failed" % i) - - def test_axis(self): - nt = 2 - for i in [7, 8, 9, 16, 32, 64]: - x = np.random.randn(nt, i) - y = dct(x, type=self.type) - for j in range(nt): - assert_array_almost_equal(y[j], dct(x[j], type=self.type), - decimal=self.dec) - - x = x.T - y = dct(x, axis=0, type=self.type) - for j in range(nt): - assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type), - decimal=self.dec) - -class _TestDCTIIBase(_TestDCTBase): - def test_definition_matlab(self): - """Test correspondance with matlab (orthornomal mode).""" - for i in range(len(X)): - x = np.array(X[i], dtype=self.rdt) - yr = Y[i] - y = dct(x, norm="ortho", type=2) - self.assertTrue(y.dtype == self.rdt, - "Output dtype is %s, expected %s" % (y.dtype, self.rdt)) - assert_array_almost_equal(y, yr, decimal=self.dec) - -class _TestDCTIIIBase(_TestDCTBase): - def test_definition_ortho(self): - """Test orthornomal mode.""" - for i in range(len(X)): - x = np.array(X[i], dtype=self.rdt) - y = dct(x, norm='ortho', type=2) - xi = dct(y, norm="ortho", type=3) - self.assertTrue(xi.dtype == self.rdt, - "Output dtype is %s, expected %s" % (xi.dtype, self.rdt)) - assert_array_almost_equal(xi, x, decimal=self.dec) - -class TestDCTIDouble(_TestDCTBase): - def setUp(self): - self.rdt = np.double - self.dec = 10 - self.type = 1 - -class TestDCTIFloat(_TestDCTBase): - def setUp(self): - self.rdt = np.float32 - self.dec = 5 - self.type = 1 - -class TestDCTIIDouble(_TestDCTIIBase): - def setUp(self): - self.rdt = np.double - self.dec = 10 - self.type = 2 - -class TestDCTIIFloat(_TestDCTIIBase): - def setUp(self): - self.rdt = np.float32 - self.dec = 5 - self.type = 2 - -class TestDCTIIIDouble(_TestDCTIIIBase): - def setUp(self): - self.rdt = np.double - self.dec = 14 - self.type = 3 - -class TestDCTIIIFloat(_TestDCTIIIBase): - def setUp(self): - self.rdt = np.float32 - self.dec = 5 - self.type = 3 - -class _TestIDCTBase(TestCase): - def setUp(self): - self.rdt = None - self.dec = 14 - self.type = None - - def test_definition(self): - for i in FFTWDATA_SIZES: - xr, yr = fftw_ref(self.type, i, self.rdt) - y = dct(xr, type=self.type) - x = idct(yr, type=self.type) - if self.type == 1: - x /= 2 * (i-1) - else: - x /= 2 * i - self.assertTrue(x.dtype == self.rdt, - "Output dtype is %s, expected %s" % (x.dtype, self.rdt)) - # XXX: we divide by np.max(y) because the tests fail otherwise. We - # should really use something like assert_array_approx_equal. The - # difference is due to fftw using a better algorithm w.r.t error - # propagation compared to the ones from fftpack. - assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, - err_msg="Size %d failed" % i) - -class TestIDCTIDouble(_TestIDCTBase): - def setUp(self): - self.rdt = np.double - self.dec = 10 - self.type = 1 - -class TestIDCTIFloat(_TestIDCTBase): - def setUp(self): - self.rdt = np.float32 - self.dec = 4 - self.type = 1 - -class TestIDCTIIDouble(_TestIDCTBase): - def setUp(self): - self.rdt = np.double - self.dec = 10 - self.type = 2 - -class TestIDCTIIFloat(_TestIDCTBase): - def setUp(self): - self.rdt = np.float32 - self.dec = 5 - self.type = 2 - -class TestIDCTIIIDouble(_TestIDCTBase): - def setUp(self): - self.rdt = np.double - self.dec = 14 - self.type = 3 - -class TestIDCTIIIFloat(_TestIDCTBase): - def setUp(self): - self.rdt = np.float32 - self.dec = 5 - self.type = 3 - -class TestOverwrite(object): - """ - Check input overwrite behavior - """ - - real_dtypes = [np.float32, np.float64] - - def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, - should_overwrite, **kw): - x2 = x.copy() - y = routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x) - - sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % ( - routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) - if not should_overwrite: - assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) - else: - if (x2 == x).all(): - raise AssertionError("no overwrite in %s" % sig) - - def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes): - np.random.seed(1234) - if np.issubdtype(dtype, np.complexfloating): - data = np.random.randn(*shape) + 1j*np.random.randn(*shape) - else: - data = np.random.randn(*shape) - data = data.astype(dtype) - - for type in [1, 2, 3]: - for overwrite_x in [True, False]: - for norm in [None, 'ortho']: - if type == 1 and norm == 'ortho': - continue - - should_overwrite = (overwrite_x - and dtype in overwritable_dtypes - and (len(shape) == 1 or - (axis % len(shape) == len(shape)-1 - ))) - self._check(data, routine, type, None, axis, norm, - overwrite_x, should_overwrite) - - def test_dct(self): - overwritable = self.real_dtypes - for dtype in self.real_dtypes: - self._check_1d(dct, dtype, (16,), -1, overwritable) - self._check_1d(dct, dtype, (16, 2), 0, overwritable) - self._check_1d(dct, dtype, (2, 16), 1, overwritable) - - def test_idct(self): - overwritable = self.real_dtypes - for dtype in self.real_dtypes: - self._check_1d(idct, dtype, (16,), -1, overwritable) - self._check_1d(idct, dtype, (16, 2), 0, overwritable) - self._check_1d(idct, dtype, (2, 16), 1, overwritable) - -if __name__ == "__main__": - np.testing.run_module_suite() diff --git a/scipy-0.10.1/scipy/integrate/SConscript b/scipy-0.10.1/scipy/integrate/SConscript deleted file mode 100644 index fbdb67cbda..0000000000 --- a/scipy-0.10.1/scipy/integrate/SConscript +++ /dev/null @@ -1,79 +0,0 @@ -# Last Change: Wed Apr 08 11:00 PM 2009 J -# vim:syntax=python -from os.path import join as pjoin -import warnings - -from numscons import GetNumpyEnvironment, CheckF77Clib, CheckF77BLAS - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('f2py') - -# Configuration -config = env.NumpyConfigure(custom_tests = {'CheckF77BLAS' : CheckF77BLAS}) - -if not config.CheckF77BLAS(): - raise Exception("Could not find F77 BLAS, needed for integrate package") - -config.Finish() - -# XXX: lapack integration - -# Build linpack_lite -src = [pjoin("linpack_lite", s) for s in [ "dgbfa.f", "dgbsl.f", "dgefa.f", -"dgesl.f", "dgtsl.f", "zgbfa.f", "zgbsl.f", "zgefa.f", "zgesl.f"]] -linpack_lite = env.DistutilsStaticExtLibrary('linpack_lite', source = src) - -# Build mach -# XXX: do not use optimization flags for mach -src = [pjoin("mach", s) for s in ["d1mach.f", "i1mach.f", "r1mach.f", -"xerror.f"]] -mach = env.DistutilsStaticExtLibrary('mach', source = src) - -# Build quadpack -src = [pjoin("quadpack", s) for s in ["dqag.f", "dqage.f", "dqagi.f", -"dqagie.f", "dqagp.f", "dqagpe.f", "dqags.f", "dqagse.f", "dqawc.f", -"dqawce.f", "dqawf.f", "dqawfe.f", "dqawo.f", "dqawoe.f", "dqaws.f", -"dqawse.f", "dqc25c.f", "dqc25f.f", "dqc25s.f", "dqcheb.f", "dqelg.f", -"dqk15.f", "dqk15i.f", "dqk15w.f", "dqk21.f", "dqk31.f", "dqk41.f", "dqk51.f", -"dqk61.f", "dqmomo.f", "dqng.f", "dqpsrt.f", "dqwgtc.f", "dqwgtf.f", -"dqwgts.f"]] -quadpack = env.DistutilsStaticExtLibrary('quadpack', source = src) - - -src = [pjoin('dop', f) for f in ['dop853.f', 'dopri5.f']] -env.DistutilsStaticExtLibrary('dop', source=src) - -# Build odepack -src = [pjoin("odepack", s) for s in [ "adjlr.f", "aigbt.f", "ainvg.f", -"blkdta000.f", "bnorm.f", "cdrv.f", "cfode.f", "cntnzu.f", "ddasrt.f", -"ddassl.f", "decbt.f", "ewset.f", "fnorm.f", "intdy.f", "iprep.f", "jgroup.f", -"lsoda.f", "lsodar.f", "lsode.f", "lsodes.f", "lsodi.f", "lsoibt.f", "md.f", -"mdi.f", "mdm.f", "mdp.f", "mdu.f", "nnfc.f", "nnsc.f", "nntc.f", "nroc.f", -"nsfc.f", "odrv.f", "pjibt.f", "prep.f", "prepj.f", "prepji.f", "prja.f", -"prjs.f", "rchek.f", "roots.f", "slsbt.f", "slss.f", "solbt.f", -"solsy.f", "srcar.f", "srcma.f", "srcms.f", "srcom.f", "sro.f", "stoda.f", -"stode.f", "stodi.f", "vmnorm.f", "vnorm.f", "vode.f", "xerrwv.f", "xsetf.f", -"xsetun.f", "zvode.f"]] -odepack = env.DistutilsStaticExtLibrary('odepack', source = src) - -env.AppendUnique(LIBPATH = '.') - -quadenv = env.Clone() -quadenv.Prepend(LIBS = ['quadpack', 'linpack_lite', 'mach']) - -odenv = env.Clone() -odenv.Prepend(LIBS = ['odepack', 'linpack_lite', 'mach']) - -# Build _quadpack -quadenv.NumpyPythonExtension('_quadpack', source = '_quadpackmodule.c') - -# Build _odepack -odenv.NumpyPythonExtension('_odepack', source = '_odepackmodule.c') - -# Build vode -odenv.NumpyPythonExtension('vode', source = 'vode.pyf') - -# Dop extension -dopenv = env.Clone() -dopenv.Prepend(LIBS=['dop']) -dopenv.NumpyPythonExtension('_dop', source='dop.pyf') diff --git a/scipy-0.10.1/scipy/integrate/SConstruct b/scipy-0.10.1/scipy/integrate/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/integrate/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/integrate/__init__.py b/scipy-0.10.1/scipy/integrate/__init__.py deleted file mode 100644 index 38e5048434..0000000000 --- a/scipy-0.10.1/scipy/integrate/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -============================================= -Integration and ODEs (:mod:`scipy.integrate`) -============================================= - -.. currentmodule:: scipy.integrate - -Integrating functions, given function object -============================================ - -.. autosummary:: - :toctree: generated/ - - quad -- General purpose integration. - dblquad -- General purpose double integration. - tplquad -- General purpose triple integration. - fixed_quad -- Integrate func(x) using Gaussian quadrature of order n. - quadrature -- Integrate with given tolerance using Gaussian quadrature. - romberg -- Integrate func using Romberg integration. - -Integrating functions, given fixed samples -========================================== - -.. autosummary:: - :toctree: generated/ - - trapz -- Use trapezoidal rule to compute integral from samples. - cumtrapz -- Use trapezoidal rule to cumulatively compute integral. - simps -- Use Simpson's rule to compute integral from samples. - romb -- Use Romberg Integration to compute integral from - -- (2**k + 1) evenly-spaced samples. - -.. seealso:: - - :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian - quadrature roots and weights for other weighting factors and regions. - -Integrators of ODE systems -========================== - -.. autosummary:: - :toctree: generated/ - - odeint -- General integration of ordinary differential equations. - ode -- Integrate ODE using VODE and ZVODE routines. - complex_ode -- Convert a complex-valued ODE to real-valued and integrate. - -""" - -from quadrature import * -from odepack import * -from quadpack import * -from _ode import * - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/integrate/__odepack.h b/scipy-0.10.1/scipy/integrate/__odepack.h deleted file mode 100644 index 0c27b46eb2..0000000000 --- a/scipy-0.10.1/scipy/integrate/__odepack.h +++ /dev/null @@ -1,406 +0,0 @@ -/* This file should be included in the multipack module */ -/* $Revision$ */ -/* module_methods: - {"odeint", (PyCFunction) odepack_odeint, METH_VARARGS|METH_KEYWORDS, doc_odeint}, - */ -/* link libraries: (should be listed in separate lines) - odepack - linpack_lite - blas - mach - */ -/* python files: (to be appended to Multipack.py) - odepack.py - */ - - -#if defined(UPPERCASE_FORTRAN) - #if defined(NO_APPEND_FORTRAN) - /* nothing to do here */ - #else - #define LSODA LSODA_ - #endif -#else - #if defined(NO_APPEND_FORTRAN) - #define LSODA lsoda - #else - #define LSODA lsoda_ - #endif -#endif - -void LSODA(); - -/* -void ode_function(int *n, double *t, double *y, double *ydot) -{ - ydot[0] = -0.04*y[0] + 1e4*y[1]*y[2]; - ydot[2] = 3e7*y[1]*y[1]; - ydot[1] = -ydot[0] - ydot[2]; - return; -} -*/ - -void ode_function(int *n, double *t, double *y, double *ydot) -{ - /* This is the function called from the Fortran code it should - -- use call_python_function to get a multiarrayobject result - -- check for errors and return -1 if any - -- otherwise place result of calculation in ydot - */ - - PyArrayObject *result_array = NULL; - PyObject *arg1, *arglist; - - /* Append t to argument list */ - if ((arg1 = PyTuple_New(1)) == NULL) { - if (PyErr_Occurred()) - PyErr_Print(); - return; - } - PyTuple_SET_ITEM(arg1, 0, PyFloat_FromDouble(*t)); - /* arg1 now owns newly created reference */ - if ((arglist = PySequence_Concat( arg1, multipack_extra_arguments)) == NULL) { - if (PyErr_Occurred()) - PyErr_Print(); - Py_DECREF(arg1); - return; - } - Py_DECREF(arg1); /* arglist has reference */ - - result_array = (PyArrayObject *)call_python_function(multipack_python_function, *n, y, arglist, 1, odepack_error); - if (result_array == NULL) { - PyErr_Print(); - Py_DECREF(arglist); - return; - } - memcpy(ydot, result_array->data, (*n)*sizeof(double)); - Py_DECREF(result_array); - Py_DECREF(arglist); - return; -} - - -int ode_jacobian_function(int *n, double *t, double *y, int *ml, int *mu, double *pd, int *nrowpd) -{ - /* This is the function called from the Fortran code it should - -- use call_python_function to get a multiarrayobject result - -- check for errors and return -1 if any (though this is ignored - by calling program). - -- otherwise place result of calculation in pd - */ - - PyArrayObject *result_array; - PyObject *arglist, *arg1; - - /* Append t to argument list */ - if ((arg1 = PyTuple_New(1)) == NULL) { - if (PyErr_Occurred()) - PyErr_Print(); - return -1; - } - PyTuple_SET_ITEM(arg1, 0, PyFloat_FromDouble(*t)); - /* arg1 now owns newly created reference */ - if ((arglist = PySequence_Concat( arg1, multipack_extra_arguments)) == NULL) { - if (PyErr_Occurred()) - PyErr_Print(); - Py_DECREF(arg1); - return -1; - } - Py_DECREF(arg1); /* arglist has reference */ - - result_array = (PyArrayObject *)call_python_function(multipack_python_jacobian, *n, y, arglist, 2, odepack_error); - if (result_array == NULL) { - Py_DECREF(arglist); - return -1; - } - if (multipack_jac_transpose == 1) - MATRIXC2F(pd, result_array->data, *n, *nrowpd) - else - memcpy(pd, result_array->data, (*n)*(*nrowpd)*sizeof(double)); - - Py_DECREF(arglist); - Py_DECREF(result_array); - return 0; -} - - -int setup_extra_inputs(PyArrayObject **ap_rtol, PyObject *o_rtol, PyArrayObject **ap_atol, PyObject *o_atol, PyArrayObject **ap_tcrit, PyObject *o_tcrit, int *numcrit, int neq) -{ - int itol = 0; - double tol=1.49012e-8; - npy_intp one = 1; - - /* Setup tolerances */ - if (o_rtol == NULL) { - *ap_rtol = (PyArrayObject *)PyArray_SimpleNew(1, &one, PyArray_DOUBLE); - if (*ap_rtol == NULL) PYERR2(odepack_error,"Error constructing relative tolerance."); - *(double *)(*ap_rtol)->data = tol; /* Default */ - } - else { - *ap_rtol = (PyArrayObject *)PyArray_ContiguousFromObject(o_rtol,PyArray_DOUBLE,0,1); - if (*ap_rtol == NULL) PYERR2(odepack_error,"Error converting relative tolerance."); - if ((*ap_rtol)->nd == 0); /* rtol is scalar */ - else if ((*ap_rtol)->dimensions[0] == neq) - itol |= 2; /* Set rtol array flag */ - else - PYERR(odepack_error,"Tolerances must be an array of the same length as the\n number of equations or a scalar."); - } - - if (o_atol == NULL) { - *ap_atol = (PyArrayObject *)PyArray_SimpleNew(1,&one,PyArray_DOUBLE); - if (*ap_atol == NULL) PYERR2(odepack_error,"Error constructing absolute tolerance"); - *(double *)(*ap_atol)->data = tol; - } - else { - *ap_atol = (PyArrayObject *)PyArray_ContiguousFromObject(o_atol,PyArray_DOUBLE,0,1); - if (*ap_atol == NULL) PYERR2(odepack_error,"Error converting absolute tolerance."); - if ((*ap_atol)->nd == 0); /* atol is scalar */ - else if ((*ap_atol)->dimensions[0] == neq) - itol |= 1; /* Set atol array flag */ - else - PYERR(odepack_error,"Tolerances must be an array of the same length as the\n number of equations or a scalar."); - } - itol++; /* increment to get correct value */ - - - /* Setup t-critical */ - if (o_tcrit != NULL) { - *ap_tcrit = (PyArrayObject *)PyArray_ContiguousFromObject(o_tcrit,PyArray_DOUBLE,0,1); - if (*ap_tcrit == NULL) PYERR2(odepack_error,"Error constructing critical times."); - *numcrit = PyArray_Size((PyObject *)(*ap_tcrit)); - } - return itol; - - fail: /* Needed for use of PYERR */ - return -1; -} - - -int compute_lrw_liw(int *lrw, int *liw, int neq, int jt, int ml, int mu, int mxordn, int mxords) -{ - int lrn, lrs, nyh, lmat; - - if (jt == 1 || jt == 2) - lmat = neq*neq + 2; - else if (jt == 4 || jt == 5) - lmat = (2*ml + mu + 1)*neq + 2; - else PYERR(odepack_error,"Incorrect value for jt"); - - if (mxordn < 0) PYERR(odepack_error,"Incorrect value for mxordn"); - if (mxords < 0) PYERR(odepack_error,"Incorrect value for mxords"); - nyh = neq; - - lrn = 20 + nyh*(mxordn+1) + 3*neq; - lrs = 20 + nyh*(mxords+1) + 3*neq + lmat; - - *lrw = NPY_MAX(lrn,lrs); - *liw = 20 + neq; - return 0; - - fail: - return -1; -} - -static char doc_odeint[] = "[y,{infodict,}istate] = odeint(fun, y0, t, args=(), Dfun=None, col_deriv=0, ml=, mu=, full_output=0, rtol=, atol=, tcrit=, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0.0, mxstep=0.0, mxhnil=0, mxordn=0, mxords=0)\n yprime = fun(y,t,...)"; - -static PyObject *odepack_odeint(PyObject *dummy, PyObject *args, PyObject *kwdict) { - PyObject *fcn, *y0, *p_tout, *o_rtol=NULL, *o_atol=NULL; - PyArrayObject *ap_y = NULL, *ap_yout= NULL; - PyArrayObject *ap_rtol=NULL, *ap_atol=NULL; - PyArrayObject *ap_tout = NULL; - PyObject *extra_args = NULL; - PyObject *Dfun = Py_None; - int neq, itol=1, itask=1, istate=1, iopt=0, lrw, *iwork, liw, jt=4; - double *y, t, *tout, *rtol, *atol, *rwork; - double h0=0.0, hmax=0.0, hmin=0.0; - int ixpr=0, mxstep=0, mxhnil=0, mxordn=12, mxords=5, ml=-1, mu=-1; - PyObject *o_tcrit=NULL; - PyArrayObject *ap_tcrit=NULL; - PyArrayObject *ap_hu=NULL, *ap_tcur=NULL, *ap_tolsf=NULL, *ap_tsw=NULL; - PyArrayObject *ap_nst=NULL, *ap_nfe=NULL, *ap_nje=NULL, *ap_nqu=NULL; - PyArrayObject *ap_mused=NULL; - int imxer=0, lenrw=0, leniw=0, col_deriv = 0; - npy_intp out_sz=0,dims[2]; - int k, ntimes, crit_ind=0; - int allocated = 0, full_output = 0, numcrit=0; - double *yout, *yout_ptr, *tout_ptr, *tcrit; - double *wa; - static char *kwlist[] = {"fun","y0","t","args","Dfun","col_deriv","ml","mu","full_output","rtol","atol","tcrit","h0","hmax","hmin","ixpr","mxstep","mxhnil","mxordn","mxords",NULL}; - - STORE_VARS(); - - if (!PyArg_ParseTupleAndKeywords(args, kwdict, "OOO|OOiiiiOOOdddiiiii", kwlist, &fcn, &y0, &p_tout, &extra_args, &Dfun, &col_deriv, &ml, &mu, &full_output, &o_rtol, &o_atol, &o_tcrit, &h0, &hmax, &hmin, &ixpr, &mxstep, &mxhnil, &mxordn, &mxords)) return NULL; - - if (o_tcrit == Py_None) { - o_tcrit = NULL; - } - if (o_rtol == Py_None) { - o_rtol = NULL; - } - if (o_atol == Py_None) { - o_atol = NULL; - } - - - INIT_JAC_FUNC(fcn,Dfun,extra_args,col_deriv,odepack_error); - - /* Set up jt, ml, and mu */ - if (Dfun == Py_None) jt++; /* set jt for internally generated */ - if (ml < 0 && mu < 0) jt -= 3; /* neither ml nor mu given, - mark jt for full jacobian */ - if (ml < 0) ml = 0; /* if one but not both are given */ - if (mu < 0) mu = 0; - - /* Initial input vector */ - ap_y = (PyArrayObject *)PyArray_ContiguousFromObject(y0, PyArray_DOUBLE, 0, 1); - if (ap_y == NULL) goto fail; - y = (double *) ap_y->data; - neq = PyArray_Size((PyObject *)ap_y); - dims[1] = neq; - - /* Set of output times for integration */ - ap_tout = (PyArrayObject *)PyArray_ContiguousFromObject(p_tout, PyArray_DOUBLE, 0, 1); - if (ap_tout == NULL) goto fail; - tout = (double *)ap_tout->data; - ntimes = PyArray_Size((PyObject *)ap_tout); - dims[0] = ntimes; - t = tout[0]; - - /* Setup array to hold the output evaluations*/ - ap_yout= (PyArrayObject *)PyArray_SimpleNew(2,dims,PyArray_DOUBLE); - if (ap_yout== NULL) goto fail; - yout = (double *) ap_yout->data; - /* Copy initial vector into first row of output */ - memcpy(yout, y, neq*sizeof(double)); /* copy intial value to output */ - yout_ptr = yout + neq; /* set output pointer to next position */ - - itol = setup_extra_inputs(&ap_rtol, o_rtol, &ap_atol, o_atol, &ap_tcrit, o_tcrit, &numcrit, neq); - if (itol < 0 ) goto fail; /* Something didn't work */ - rtol = (double *) ap_rtol->data; - atol = (double *) ap_atol->data; - if (o_tcrit != NULL) tcrit = (double *)(ap_tcrit->data); - - /* Find size of working arrays*/ - if (compute_lrw_liw(&lrw, &liw, neq, jt, ml, mu, mxordn, mxords) < 0) goto fail; - - if ((wa = (double *)malloc(lrw*sizeof(double) + liw*sizeof(int)))==NULL) { - PyErr_NoMemory(); - goto fail; - } - allocated = 1; - rwork = wa; - iwork = (int *)(wa + lrw); - - iwork[0] = ml; iwork[1] = mu; /* ignored if not needed */ - - if (h0 != 0.0 || hmax != 0.0 || hmin != 0.0 || ixpr != 0 || mxstep != 0 || mxhnil != 0 || mxordn != 0 || mxords != 0) { - rwork[4] = h0; rwork[5] = hmax; rwork[6] = hmin; - iwork[4] = ixpr; iwork[5] = mxstep; iwork[6] = mxhnil; - iwork[7] = mxordn; iwork[8] = mxords; - iopt = 1; - } - istate = 1; - k = 1; - - /* If full output make some useful output arrays */ - if (full_output) { - out_sz = ntimes-1; - ap_hu = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_DOUBLE); - ap_tcur = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_DOUBLE); - ap_tolsf = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_DOUBLE); - ap_tsw = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_DOUBLE); - ap_nst = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_INT); - ap_nfe = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_INT); - ap_nje = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_INT); - ap_nqu = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_INT); - ap_mused = (PyArrayObject *)PyArray_SimpleNew(1,&out_sz,PyArray_INT); - if (ap_hu == NULL || ap_tcur == NULL || ap_tolsf == NULL || ap_tsw == NULL || ap_nst == NULL || ap_nfe == NULL || ap_nje == NULL || ap_nqu == NULL || ap_mused == NULL) goto fail; - } - - if (o_tcrit != NULL) {itask = 4; rwork[0] = *tcrit;} /* There are critical points */ - while (k < ntimes && istate > 0) { /* loop over desired times */ - - tout_ptr = tout + k; - /* Use tcrit if relevant */ - if (itask == 4 && *tout_ptr > *(tcrit + crit_ind)) {crit_ind++; rwork[0] = *(tcrit+crit_ind);} - if (crit_ind >= numcrit) itask = 1; /* No more critical values */ - - LSODA(ode_function, &neq, y, &t, tout_ptr, &itol, rtol, atol, &itask, &istate, &iopt, rwork, &lrw, iwork, &liw, ode_jacobian_function, &jt); - if (full_output) { - *((double *)ap_hu->data + (k-1)) = rwork[10]; - *((double *)ap_tcur->data + (k-1)) = rwork[12]; - *((double *)ap_tolsf->data + (k-1)) = rwork[13]; - *((double *)ap_tsw->data + (k-1)) = rwork[14]; - *((int *)ap_nst->data + (k-1)) = iwork[10]; - *((int *)ap_nfe->data + (k-1)) = iwork[11]; - *((int *)ap_nje->data + (k-1)) = iwork[12]; - *((int *)ap_nqu->data + (k-1)) = iwork[13]; - if (istate == -5 || istate == -4) { - imxer = iwork[15]; - } else { - imxer = -1; - } - lenrw = iwork[16]; - leniw = iwork[17]; - *((int *)ap_mused->data + (k-1)) = iwork[18]; - } - if (PyErr_Occurred()) goto fail; - memcpy(yout_ptr, y, neq*sizeof(double)); /* copy integration result to output*/ - yout_ptr += neq; k++; - } - - RESTORE_JAC_FUNC(); - - Py_DECREF(extra_args); - Py_DECREF(ap_atol); - Py_DECREF(ap_rtol); - Py_XDECREF(ap_tcrit); - Py_DECREF(ap_y); - Py_DECREF(ap_tout); - free(wa); - - /* Do Full output */ - if (full_output) { - return Py_BuildValue("N{s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:i,s:i,s:i,s:N}i",PyArray_Return(ap_yout), - "hu",PyArray_Return(ap_hu), - "tcur",PyArray_Return(ap_tcur), - "tolsf",PyArray_Return(ap_tolsf), - "tsw",PyArray_Return(ap_tsw), - "nst",PyArray_Return(ap_nst), - "nfe",PyArray_Return(ap_nfe), - "nje",PyArray_Return(ap_nje), - "nqu",PyArray_Return(ap_nqu), - "imxer",imxer, - "lenrw",lenrw, - "leniw",leniw, - "mused",PyArray_Return(ap_mused), - istate); - } - else { - return Py_BuildValue("Ni",PyArray_Return(ap_yout),istate); - } - - fail: - RESTORE_JAC_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_y); - Py_XDECREF(ap_rtol); - Py_XDECREF(ap_atol); - Py_XDECREF(ap_tcrit); - Py_XDECREF(ap_tout); - Py_XDECREF(ap_yout); - if (allocated) free(wa); - if (full_output) { - Py_XDECREF(ap_hu); - Py_XDECREF(ap_tcur); - Py_XDECREF(ap_tolsf); - Py_XDECREF(ap_tsw); - Py_XDECREF(ap_nst); - Py_XDECREF(ap_nfe); - Py_XDECREF(ap_nje); - Py_XDECREF(ap_nqu); - Py_XDECREF(ap_mused); - } - return NULL; - -} diff --git a/scipy-0.10.1/scipy/integrate/__quadpack.h b/scipy-0.10.1/scipy/integrate/__quadpack.h deleted file mode 100644 index ccd539cc9a..0000000000 --- a/scipy-0.10.1/scipy/integrate/__quadpack.h +++ /dev/null @@ -1,763 +0,0 @@ -/* This file should be included into the _multipackmodule file */ -/* $Revision$ */ -/* module_methods: - {"_qagse", quadpack_qagse, METH_VARARGS, doc_qagse}, - {"_qagie", quadpack_qagie, METH_VARARGS, doc_qagie}, - {"_qagpe", quadpack_qagpe, METH_VARARGS, doc_qagpe}, - {"_qawoe", quadpack_qawoe, METH_VARARGS, doc_qawoe}, - {"_qawfe", quadpack_qawfe, METH_VARARGS, doc_qawfe}, - {"_qawse", quadpack_qawse, METH_VARARGS, doc_qawse}, - {"_qawce", quadpack_qawce, METH_VARARGS, doc_qawce}, - */ -/* link libraries: (should be listed in separate lines) - quadpack - linpack_lite - blas - mach - */ -/* python files: (to be imported to Multipack.py) - quadpack.py - */ - -#if defined(NO_APPEND_FORTRAN) - #if defined(UPPERCASE_FORTRAN) - /* nothing to do here */ - #else - #define DQAGSE dqagse - #define DQAGIE dqagie - #define DQAGPE dqagpe - #define DQAWOE dqawoe - #define DQAWFE dqawfe - #define DQAWSE dqawse - #define DQAWCE dqawce - #endif -#else - #if defined(UPPERCASE_FORTRAN) - #define DQAGSE DQAGSE_ - #define DQAGIE DQAGIE_ - #define DQAGPE DQAGPE_ - #define DQAWOE DQAWOE_ - #define DQAWFE DQAWFE_ - #define DQAWSE DQAWSE_ - #define DQAWCE DQAWCE_ - #else - #define DQAGSE dqagse_ - #define DQAGIE dqagie_ - #define DQAGPE dqagpe_ - #define DQAWOE dqawoe_ - #define DQAWFE dqawfe_ - #define DQAWSE dqawse_ - #define DQAWCE dqawce_ - #endif -#endif - -void DQAGSE(); -void DQAGIE(); -void DQAGPE(); -void DQAWOE(); -void DQAWFE(); -void DQAWSE(); -void DQAWCE(); - - -static int already_printed_python_error = 0; - -#define QUAD_INIT_FUNC(fun,arg) {\ - INIT_FUNC(fun,arg,quadpack_error); \ - already_printed_python_error = 0;\ -} - -double quad_function(double *x) { - - double d_result; - PyObject *arg1 = NULL, *arglist=NULL, *result=NULL; - PyNumberMethods *nb; - - /* Build argument list */ - if ((arg1 = PyTuple_New(1)) == NULL) goto fail; - - PyTuple_SET_ITEM(arg1, 0, PyFloat_FromDouble(*x)); - /* arg1 now owns reference to Float object*/ - if ((arglist = PySequence_Concat( arg1, quadpack_extra_arguments)) == NULL) goto fail; - - /* Call function object --- stored as a global variable. Extra - arguments are in another global variable. - */ - if ((result = PyEval_CallObject(quadpack_python_function, arglist))==NULL) goto fail; - - /* Have to do own error checking because PyFloat_AsDouble returns -1 on - error -- making that return value from the function unusable. - - No; Solution is to test for Python Error Occurrence if -1 is return of PyFloat_AsDouble. - */ - - d_result = PyFloat_AsDouble(result); - if (PyErr_Occurred()) - PYERR(quadpack_error, "Supplied function does not return a valid float.") - - Py_DECREF(arg1); /* arglist has the reference to Float object. */ - Py_DECREF(arglist); - Py_DECREF(result); - - return d_result; - - fail: - Py_XDECREF(arg1); - Py_XDECREF(arglist); - Py_XDECREF(result); - longjmp(quadpack_jmpbuf, 1); -} - -static char doc_qagse[] = "[result,abserr,infodict,ier] = _qagse(fun, a, b, | args, full_output, epsabs, epsrel, limit)"; - -static PyObject *quadpack_qagse(PyObject *dummy, PyObject *args) { - - PyArrayObject *ap_alist = NULL, *ap_iord = NULL; - PyArrayObject *ap_blist = NULL, *ap_elist = NULL; - PyArrayObject *ap_rlist = NULL; - - PyObject *extra_args = NULL; - PyObject *fcn; - - int limit=50; - npy_intp limit_shape[1]; - int full_output = 0; - double a, b, epsabs=1.49e-8, epsrel=1.49e-8; - int neval=0, ier=6, last=0, *iord; - double result=0.0, abserr=0.0; - double *alist, *blist, *rlist, *elist; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "Odd|Oiddi", &fcn, &a, &b, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; - limit_shape[0] = limit; - - /* Need to check that limit is bigger than 1 */ - if (limit < 1) - return Py_BuildValue("ddi",result,abserr,ier); - - QUAD_INIT_FUNC(fcn,extra_args) - - /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; - iord = (int *)ap_iord->data; - alist = (double *)ap_alist->data; - blist = (double *)ap_blist->data; - rlist = (double *)ap_rlist->data; - elist = (double *)ap_elist->data; - - if (setjmp(quadpack_jmpbuf)) { - goto fail; - } - else { - DQAGSE(quad_function, &a, &b, &epsabs, &epsrel, &limit, &result, &abserr, &neval, &ier, alist, blist, rlist, elist, iord, &last); - } - - RESTORE_FUNC(); - - if (PyErr_Occurred()) { - ier = 80; /* Python error */ - PyErr_Clear(); - } - Py_DECREF(extra_args); - - if (full_output) { - return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist),ier); - } - else { - Py_DECREF(ap_alist); - Py_DECREF(ap_blist); - Py_DECREF(ap_rlist); - Py_DECREF(ap_elist); - Py_DECREF(ap_iord); - return Py_BuildValue("ddi",result,abserr,ier); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_alist); - Py_XDECREF(ap_blist); - Py_XDECREF(ap_rlist); - Py_XDECREF(ap_elist); - Py_XDECREF(ap_iord); - return NULL; -} - -static char doc_qagie[] = "[result,abserr,infodict,ier] = _qagie(fun, bound, inf, | args, full_output, epsabs, epsrel, limit)"; - -static PyObject *quadpack_qagie(PyObject *dummy, PyObject *args) { - - PyArrayObject *ap_alist = NULL, *ap_iord = NULL; - PyArrayObject *ap_blist = NULL, *ap_elist = NULL; - PyArrayObject *ap_rlist = NULL; - - PyObject *extra_args = NULL; - PyObject *fcn; - - int limit=50; - npy_intp limit_shape[1]; - int full_output = 0; - double bound, epsabs=1.49e-8, epsrel=1.49e-8; - int inf, neval=0, ier=6, last=0, *iord; - double result=0.0, abserr=0.0; - double *alist, *blist, *rlist, *elist; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "Odi|Oiddi", &fcn, &bound, &inf, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; - limit_shape[0] = limit; - - /* Need to check that limit is bigger than 1 */ - if (limit < 1) - return Py_BuildValue("ddi",result,abserr,ier); - - QUAD_INIT_FUNC(fcn,extra_args); - - /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; - iord = (int *)ap_iord->data; - alist = (double *)ap_alist->data; - blist = (double *)ap_blist->data; - rlist = (double *)ap_rlist->data; - elist = (double *)ap_elist->data; - - if (setjmp(quadpack_jmpbuf)) { - goto fail; - } - else { - DQAGIE(quad_function, &bound, &inf, &epsabs, &epsrel, &limit, &result, &abserr, &neval, &ier, alist, blist, rlist, elist, iord, &last); - } - - RESTORE_FUNC(); - - if (PyErr_Occurred()) { - ier = 80; /* Python error */ - PyErr_Clear(); - } - - Py_DECREF(extra_args); - - if (full_output) { - return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist),ier); - } - else { - Py_DECREF(ap_alist); - Py_DECREF(ap_blist); - Py_DECREF(ap_rlist); - Py_DECREF(ap_elist); - Py_DECREF(ap_iord); - return Py_BuildValue("ddi",result,abserr,ier); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_alist); - Py_XDECREF(ap_blist); - Py_XDECREF(ap_rlist); - Py_XDECREF(ap_elist); - Py_XDECREF(ap_iord); - return NULL; -} - - -static char doc_qagpe[] = "[result,abserr,infodict,ier] = _qagpe(fun, a, b, points, | args, full_output, epsabs, epsrel, limit)"; - -static PyObject *quadpack_qagpe(PyObject *dummy, PyObject *args) { - - PyArrayObject *ap_alist = NULL, *ap_iord = NULL; - PyArrayObject *ap_blist = NULL, *ap_elist = NULL; - PyArrayObject *ap_rlist = NULL, *ap_points = NULL; - PyArrayObject *ap_pts = NULL, *ap_level = NULL; - PyArrayObject *ap_ndin = NULL; - - PyObject *extra_args = NULL; - PyObject *fcn, *o_points; - - int limit=50, npts2; - npy_intp limit_shape[1], npts2_shape[1]; - int full_output = 0; - double a, b, epsabs=1.49e-8, epsrel=1.49e-8; - int neval=0, ier=6, last=0, *iord; - int *level, *ndin; - double result=0.0, abserr=0.0; - double *alist, *blist, *rlist, *elist; - double *pts, *points; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "OddO|Oiddi", &fcn, &a, &b, &o_points, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; - limit_shape[0] = limit; - - /* Need to check that limit is bigger than 1 */ - if (limit < 1) - return Py_BuildValue("ddi",result,abserr,ier); - - QUAD_INIT_FUNC(fcn,extra_args) - - ap_points = (PyArrayObject *)PyArray_ContiguousFromObject(o_points, PyArray_DOUBLE, 1, 1); - if (ap_points == NULL) goto fail; - npts2 = ap_points->dimensions[0]; - npts2_shape[0] = npts2; - points = (double *)ap_points->data; - - /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_pts = (PyArrayObject *)PyArray_SimpleNew(1,npts2_shape,PyArray_DOUBLE); - ap_level = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_ndin = (PyArrayObject *)PyArray_SimpleNew(1,npts2_shape,PyArray_DOUBLE); - if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL || ap_pts == NULL || ap_level == NULL || ap_ndin == NULL) goto fail; - iord = (int *)ap_iord->data; - alist = (double *)ap_alist->data; - blist = (double *)ap_blist->data; - rlist = (double *)ap_rlist->data; - elist = (double *)ap_elist->data; - pts = (double *)ap_pts->data; - level = (int *)ap_level->data; - ndin = (int *)ap_level->data; - - if (setjmp(quadpack_jmpbuf)) { - goto fail; - } - else { - DQAGPE(quad_function, &a, &b, &npts2, points, &epsabs, &epsrel, &limit, &result, &abserr, &neval, &ier, alist, blist, rlist, elist, pts, iord, level, ndin, &last); - } - - RESTORE_FUNC() - - if (PyErr_Occurred()) { - ier = 80; /* Python error */ - PyErr_Clear(); - } - Py_DECREF(extra_args); - Py_DECREF(ap_points); - - if (full_output) { - return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist), "pts", PyArray_Return(ap_pts), "level", PyArray_Return(ap_level), "ndin", PyArray_Return(ap_ndin),ier); - } - else { - Py_DECREF(ap_alist); - Py_DECREF(ap_blist); - Py_DECREF(ap_rlist); - Py_DECREF(ap_elist); - Py_DECREF(ap_pts); - Py_DECREF(ap_iord); - Py_DECREF(ap_ndin); - Py_DECREF(ap_level); - return Py_BuildValue("ddi",result,abserr,ier); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_alist); - Py_XDECREF(ap_blist); - Py_XDECREF(ap_rlist); - Py_XDECREF(ap_elist); - Py_XDECREF(ap_iord); - Py_XDECREF(ap_pts); - Py_XDECREF(ap_points); - Py_XDECREF(ap_ndin); - Py_XDECREF(ap_level); - return NULL; -} - - -static char doc_qawoe[] = "[result,abserr,infodict,ier] = _qawoe(fun, a, b, omega, integr, | args, full_output, epsabs, epsrel, limit, maxp1, icall, momcom, chebmo)"; - -static PyObject *quadpack_qawoe(PyObject *dummy, PyObject *args) { - - PyArrayObject *ap_alist = NULL, *ap_iord = NULL; - PyArrayObject *ap_blist = NULL, *ap_elist = NULL; - PyArrayObject *ap_rlist = NULL, *ap_nnlog = NULL; - PyArrayObject *ap_chebmo = NULL; - - PyObject *extra_args = NULL, *o_chebmo = NULL; - PyObject *fcn; - - int limit=50; - npy_intp limit_shape[1], sz[2]; - int full_output = 0, maxp1=50, icall=1; - double a, b, epsabs=1.49e-8, epsrel=1.49e-8; - int neval=0, ier=6, integr=1, last=0, momcom=0, *iord; - int *nnlog; - double result=0.0, abserr=0.0, omega=0.0; - double *chebmo; - double *alist, *blist, *rlist, *elist; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "Odddi|OiddiiiiO", &fcn, &a, &b, &omega, &integr, &extra_args, &full_output, &epsabs, &epsrel, &limit, &maxp1, &icall, &momcom, &o_chebmo)) return NULL; - limit_shape[0] = limit; - - /* Need to check that limit is bigger than 1 */ - if (limit < 1) - return Py_BuildValue("ddi",result,abserr,ier); - - QUAD_INIT_FUNC(fcn,extra_args) - - if (o_chebmo != NULL) { - ap_chebmo = (PyArrayObject *)PyArray_ContiguousFromObject(o_chebmo, PyArray_DOUBLE, 2, 2); - if (ap_chebmo == NULL) goto fail; - if (ap_chebmo->dimensions[1] != maxp1 || ap_chebmo->dimensions[0] != 25) - PYERR(quadpack_error,"Chebyshev moment array has the wrong size."); - } - else { - sz[0] = 25; - sz[1] = maxp1; - ap_chebmo = (PyArrayObject *)PyArray_SimpleNew(2,sz,PyArray_DOUBLE); - if (ap_chebmo == NULL) goto fail; - } - chebmo = (double *) ap_chebmo->data; - - /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_nnlog = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - if (ap_iord == NULL || ap_nnlog == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; - iord = (int *)ap_iord->data; - nnlog = (int *)ap_nnlog->data; - alist = (double *)ap_alist->data; - blist = (double *)ap_blist->data; - rlist = (double *)ap_rlist->data; - elist = (double *)ap_elist->data; - - if (setjmp(quadpack_jmpbuf)) { - goto fail; - } - else { - DQAWOE(quad_function, &a, &b, &omega, &integr, &epsabs, &epsrel, &limit, &icall, &maxp1, &result, &abserr, &neval, &ier, &last, alist, blist, rlist, elist, iord, nnlog, &momcom, chebmo); - } - - RESTORE_FUNC(); - - if (PyErr_Occurred()) { - ier = 80; /* Python error */ - PyErr_Clear(); - } - Py_DECREF(extra_args); - - if (full_output) { - return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N,s:N,s:i,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist), "nnlog", PyArray_Return(ap_nnlog), "momcom", momcom, "chebmo", PyArray_Return(ap_chebmo),ier); - } - else { - Py_DECREF(ap_alist); - Py_DECREF(ap_blist); - Py_DECREF(ap_rlist); - Py_DECREF(ap_elist); - Py_DECREF(ap_iord); - Py_DECREF(ap_nnlog); - Py_DECREF(ap_chebmo); - return Py_BuildValue("ddi",result,abserr,ier); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_alist); - Py_XDECREF(ap_blist); - Py_XDECREF(ap_rlist); - Py_XDECREF(ap_elist); - Py_XDECREF(ap_iord); - Py_XDECREF(ap_nnlog); - Py_XDECREF(ap_chebmo); - return NULL; -} - - -static char doc_qawfe[] = "[result,abserr,infodict,ier] = _qawfe(fun, a, omega, integr, | args, full_output, epsabs, limlst, limit, maxp1)"; - -static PyObject *quadpack_qawfe(PyObject *dummy, PyObject *args) { - - PyArrayObject *ap_alist = NULL, *ap_iord = NULL; - PyArrayObject *ap_blist = NULL, *ap_elist = NULL; - PyArrayObject *ap_rlist = NULL, *ap_nnlog = NULL; - PyArrayObject *ap_chebmo = NULL, *ap_rslst = NULL; - PyArrayObject *ap_erlst = NULL, *ap_ierlst = NULL; - - PyObject *extra_args = NULL; - PyObject *fcn; - - int limlst = 50, limit=50; - npy_intp limlst_shape[1], limit_shape[1], sz[2]; - int full_output = 0, maxp1=50; - double a, epsabs=1.49e-8; - int neval=0, ier=6, integr=1, *iord; - int lst, *nnlog, *ierlst; - double *chebmo, *rslst, *erlst; - double result=0.0, abserr=0.0, omega=0.0; - double *alist, *blist, *rlist, *elist; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "Oddi|Oidiii", &fcn, &a, &omega, &integr, &extra_args, &full_output, &epsabs, &limlst, &limit, &maxp1)) return NULL; - limit_shape[0] = limit; - limlst_shape[0] = limlst; - - /* Need to check that limit is bigger than 1 */ - if (limit < 1) - return Py_BuildValue("ddi",result,abserr,ier); - - QUAD_INIT_FUNC(fcn,extra_args) - - sz[0] = 25; - sz[1] = maxp1; - ap_chebmo = (PyArrayObject *)PyArray_SimpleNew(2,sz,PyArray_DOUBLE); - if (ap_chebmo == NULL) goto fail; - chebmo = (double *) ap_chebmo->data; - - /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_nnlog = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_rslst = (PyArrayObject *)PyArray_SimpleNew(1,limlst_shape,PyArray_DOUBLE); - ap_erlst = (PyArrayObject *)PyArray_SimpleNew(1,limlst_shape,PyArray_DOUBLE); - ap_ierlst = (PyArrayObject *)PyArray_SimpleNew(1,limlst_shape,PyArray_INT); - if (ap_iord == NULL || ap_nnlog == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL || ap_rslst == NULL || ap_erlst == NULL || ap_ierlst == NULL) goto fail; - iord = (int *)ap_iord->data; - nnlog = (int *)ap_nnlog->data; - alist = (double *)ap_alist->data; - blist = (double *)ap_blist->data; - rlist = (double *)ap_rlist->data; - elist = (double *)ap_elist->data; - rslst = (double *)ap_rslst->data; - erlst = (double *)ap_erlst->data; - ierlst = (int *)ap_ierlst->data; - - if (setjmp(quadpack_jmpbuf)) { - goto fail; - } - else { - DQAWFE(quad_function, &a, &omega, &integr, &epsabs, &limlst, &limit, &maxp1, &result, &abserr, &neval, &ier, rslst, erlst, ierlst, &lst, alist, blist, rlist, elist, iord, nnlog, chebmo); - } - - RESTORE_FUNC(); - - if (PyErr_Occurred()) { - ier = 80; /* Python error */ - PyErr_Clear(); - } - Py_DECREF(extra_args); - Py_DECREF(ap_nnlog); - Py_DECREF(ap_alist); - Py_DECREF(ap_blist); - Py_DECREF(ap_rlist); - Py_DECREF(ap_elist); - Py_DECREF(ap_iord); - Py_DECREF(ap_chebmo); - - if (full_output) { - return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N}i", result, abserr, "neval", neval, "lst", lst, "rslst", PyArray_Return(ap_rslst), "erlst", PyArray_Return(ap_erlst), "ierlst", PyArray_Return(ap_ierlst), ier); - } - else { - Py_DECREF(ap_rslst); - Py_DECREF(ap_erlst); - Py_DECREF(ap_ierlst); - return Py_BuildValue("ddi",result,abserr,ier); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_alist); - Py_XDECREF(ap_blist); - Py_XDECREF(ap_rlist); - Py_XDECREF(ap_elist); - Py_XDECREF(ap_iord); - Py_XDECREF(ap_nnlog); - Py_XDECREF(ap_chebmo); - Py_XDECREF(ap_rslst); - Py_XDECREF(ap_erlst); - Py_XDECREF(ap_ierlst); - return NULL; -} - - -static char doc_qawce[] = "[result,abserr,infodict,ier] = _qawce(fun, a, b, c, | args, full_output, epsabs, epsrel, limit)"; - -static PyObject *quadpack_qawce(PyObject *dummy, PyObject *args) { - - PyArrayObject *ap_alist = NULL, *ap_iord = NULL; - PyArrayObject *ap_blist = NULL, *ap_elist = NULL; - PyArrayObject *ap_rlist = NULL; - - PyObject *extra_args = NULL; - PyObject *fcn; - - int limit; - npy_intp limit_shape[1]; - int full_output = 0; - double a, b, c, epsabs=1.49e-8, epsrel=1.49e-8; - int neval=0, ier=6, last=0, *iord; - double result=0.0, abserr=0.0; - double *alist, *blist, *rlist, *elist; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "Oddd|Oiddi", &fcn, &a, &b, &c, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; - limit_shape[0] = limit; - - /* Need to check that limit is bigger than 1 */ - if (limit < 1) - return Py_BuildValue("ddi",result,abserr,ier); - - QUAD_INIT_FUNC(fcn,extra_args) - - /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; - iord = (int *)ap_iord->data; - alist = (double *)ap_alist->data; - blist = (double *)ap_blist->data; - rlist = (double *)ap_rlist->data; - elist = (double *)ap_elist->data; - - if (setjmp(quadpack_jmpbuf)) { - goto fail; - } - else { - DQAWCE(quad_function, &a, &b, &c, &epsabs, &epsrel, &limit, &result, &abserr, &neval, &ier, alist, blist, rlist, elist, iord, &last); - } - - RESTORE_FUNC(); - - if (PyErr_Occurred()) { - ier = 80; /* Python error */ - PyErr_Clear(); - } - Py_DECREF(extra_args); - - if (full_output) { - return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist),ier); - } - else { - Py_DECREF(ap_alist); - Py_DECREF(ap_blist); - Py_DECREF(ap_rlist); - Py_DECREF(ap_elist); - Py_DECREF(ap_iord); - return Py_BuildValue("ddi",result,abserr,ier); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_alist); - Py_XDECREF(ap_blist); - Py_XDECREF(ap_rlist); - Py_XDECREF(ap_elist); - Py_XDECREF(ap_iord); - return NULL; -} - - -static char doc_qawse[] = "[result,abserr,infodict,ier] = _qawse(fun, a, b, (alfa, beta), integr, | args, full_output, epsabs, epsrel, limit)"; - -static PyObject *quadpack_qawse(PyObject *dummy, PyObject *args) { - - PyArrayObject *ap_alist = NULL, *ap_iord = NULL; - PyArrayObject *ap_blist = NULL, *ap_elist = NULL; - PyArrayObject *ap_rlist = NULL; - - PyObject *extra_args = NULL; - PyObject *fcn; - - int full_output = 0, integr; - int limit=50; - npy_intp limit_shape[1]; - double a, b, epsabs=1.49e-8, epsrel=1.49e-8; - double alfa, beta; - int neval=0, ier=6, last=0, *iord; - double result=0.0, abserr=0.0; - double *alist, *blist, *rlist, *elist; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "Odd(dd)i|Oiddi", &fcn, &a, &b, &alfa, &beta, &integr, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; - limit_shape[0] = limit; - - /* Need to check that limit is bigger than 1 */ - if (limit < 1) - return Py_BuildValue("ddi",result,abserr,ier); - - QUAD_INIT_FUNC(fcn,extra_args) - - /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); - if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; - iord = (int *)ap_iord->data; - alist = (double *)ap_alist->data; - blist = (double *)ap_blist->data; - rlist = (double *)ap_rlist->data; - elist = (double *)ap_elist->data; - - if (setjmp(quadpack_jmpbuf)) { - goto fail; - } - else { - DQAWSE(quad_function, &a, &b, &alfa, &beta, &integr, &epsabs, &epsrel, &limit, &result, &abserr, &neval, &ier, alist, blist, rlist, elist, iord, &last); - } - - RESTORE_FUNC(); - - if (PyErr_Occurred()) { - ier = 80; /* Python error */ - PyErr_Clear(); - } - Py_DECREF(extra_args); - - if (full_output) { - return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist),ier); - } - else { - Py_DECREF(ap_alist); - Py_DECREF(ap_blist); - Py_DECREF(ap_rlist); - Py_DECREF(ap_elist); - Py_DECREF(ap_iord); - return Py_BuildValue("ddi",result,abserr,ier); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_alist); - Py_XDECREF(ap_blist); - Py_XDECREF(ap_rlist); - Py_XDECREF(ap_elist); - Py_XDECREF(ap_iord); - return NULL; -} - - - - diff --git a/scipy-0.10.1/scipy/integrate/_ode.py b/scipy-0.10.1/scipy/integrate/_ode.py deleted file mode 100644 index 4833673ad5..0000000000 --- a/scipy-0.10.1/scipy/integrate/_ode.py +++ /dev/null @@ -1,888 +0,0 @@ -# Authors: Pearu Peterson, Pauli Virtanen, John Travers -""" -First-order ODE integrators. - -User-friendly interface to various numerical integrators for solving a -system of first order ODEs with prescribed initial conditions:: - - d y(t)[i] - --------- = f(t,y(t))[i], - d t - - y(t=0)[i] = y0[i], - -where:: - - i = 0, ..., len(y0) - 1 - -class ode ---------- - -A generic interface class to numeric integrators. It has the following -methods:: - - integrator = ode(f,jac=None) - integrator = integrator.set_integrator(name,**params) - integrator = integrator.set_initial_value(y0,t0=0.0) - integrator = integrator.set_f_params(*args) - integrator = integrator.set_jac_params(*args) - y1 = integrator.integrate(t1,step=0,relax=0) - flag = integrator.successful() - -class complex_ode ------------------ - -This class has the same generic interface as ode, except it can handle complex -f, y and Jacobians by transparently translating them into the equivalent -real valued system. It supports the real valued solvers (i.e not zvode) and is -an alternative to ode with the zvode solver, sometimes performing better. -""" - -# XXX: Integrators must have: -# =========================== -# cvode - C version of vode and vodpk with many improvements. -# Get it from http://www.netlib.org/ode/cvode.tar.gz -# To wrap cvode to Python, one must write extension module by -# hand. Its interface is too much 'advanced C' that using f2py -# would be too complicated (or impossible). -# -# How to define a new integrator: -# =============================== -# -# class myodeint(IntegratorBase): -# -# runner = or None -# -# def __init__(self,...): # required -# -# -# def reset(self,n,has_jac): # optional -# # n - the size of the problem (number of equations) -# # has_jac - whether user has supplied its own routine for Jacobian -# -# -# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required -# # this method is called to integrate from t=t0 to t=t1 -# # with initial condition y0. f and jac are user-supplied functions -# # that define the problem. f_params,jac_params are additional -# # arguments -# # to these functions. -# -# if : -# self.success = 0 -# return t1,y1 -# -# # In addition, one can define step() and run_relax() methods (they -# # take the same arguments as run()) if the integrator can support -# # these features (see IntegratorBase doc strings). -# -# if myodeint.runner: -# IntegratorBase.integrator_classes.append(myodeint) - -__all__ = ['ode', 'complex_ode'] -__version__ = "$Id$" -__docformat__ = "restructuredtext en" - -import re -import warnings - -from numpy import asarray, array, zeros, int32, isscalar, real, imag - -import vode as _vode -import _dop - - -#------------------------------------------------------------------------------ -# User interface -#------------------------------------------------------------------------------ - - -class ode(object): - """\ -A generic interface class to numeric integrators. - -Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``. - -Parameters ----------- -f : callable f(t, y, *f_args) - Rhs of the equation. t is a scalar, y.shape == (n,). - f_args is set by calling set_f_params(*args) -jac : callable jac(t, y, *jac_args) - Jacobian of the rhs, jac[i,j] = d f[i] / d y[j] - jac_args is set by calling set_f_params(*args) - -Attributes ----------- -t : float - Current time -y : ndarray - Current variable values - -See also --------- -odeint : an integrator with a simpler interface based on lsoda from ODEPACK -quad : for finding the area under a curve - -Notes ------ - -Available integrators are listed below. They can be selected using -the `set_integrator` method. - -"vode" - - Real-valued Variable-coefficient Ordinary Differential Equation - solver, with fixed-leading-coefficient implementation. It provides - implicit Adams method (for non-stiff problems) and a method based on - backward differentiation formulas (BDF) (for stiff problems). - - Source: http://www.netlib.org/ode/vode.f - - .. warning:: - - This integrator is not re-entrant. You cannot have two `ode` - instances using the "vode" integrator at the same time. - - This integrator accepts the following parameters in `set_integrator` - method of the `ode` class: - - - atol : float or sequence - absolute tolerance for solution - - rtol : float or sequence - relative tolerance for solution - - lband : None or int - - rband : None or int - Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+rband. - Setting these requires your jac routine to return the jacobian - in packed format, jac_packed[i-j+lband, j] = jac[i,j]. - - method: 'adams' or 'bdf' - Which solver to use, Adams (non-stiff) or BDF (stiff) - - with_jacobian : bool - Whether to use the jacobian - - nsteps : int - Maximum number of (internally defined) steps allowed during one - call to the solver. - - first_step : float - - min_step : float - - max_step : float - Limits for the step sizes used by the integrator. - - order : int - Maximum order used by the integrator, - order <= 12 for Adams, <= 5 for BDF. - -"zvode" - - Complex-valued Variable-coefficient Ordinary Differential Equation - solver, with fixed-leading-coefficient implementation. It provides - implicit Adams method (for non-stiff problems) and a method based on - backward differentiation formulas (BDF) (for stiff problems). - - Source: http://www.netlib.org/ode/zvode.f - - .. warning:: - - This integrator is not re-entrant. You cannot have two `ode` - instances using the "zvode" integrator at the same time. - - This integrator accepts the same parameters in `set_integrator` - as the "vode" solver. - - .. note:: - - When using ZVODE for a stiff system, it should only be used for - the case in which the function f is analytic, that is, when each f(i) - is an analytic function of each y(j). Analyticity means that the - partial derivative df(i)/dy(j) is a unique complex number, and this - fact is critical in the way ZVODE solves the dense or banded linear - systems that arise in the stiff case. For a complex stiff ODE system - in which f is not analytic, ZVODE is likely to have convergence - failures, and for this problem one should instead use DVODE on the - equivalent real system (in the real and imaginary parts of y). - -"dopri5" - - This is an explicit runge-kutta method of order (4)5 due to Dormand & - Prince (with stepsize control and dense output). - - Authors: - - E. Hairer and G. Wanner - Universite de Geneve, Dept. de Mathematiques - CH-1211 Geneve 24, Switzerland - e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch - - This code is described in [HNW93]_. - - This integrator accepts the following parameters in set_integrator() - method of the ode class: - - - atol : float or sequence - absolute tolerance for solution - - rtol : float or sequence - relative tolerance for solution - - nsteps : int - Maximum number of (internally defined) steps allowed during one - call to the solver. - - first_step : float - - max_step : float - - safety : float - Safety factor on new step selection (default 0.9) - - ifactor : float - - dfactor : float - Maximum factor to increase/decrease step size by in one step - - beta : float - Beta parameter for stabilised step size control. - -"dop853" - - This is an explicit runge-kutta method of order 8(5,3) due to Dormand - & Prince (with stepsize control and dense output). - - Options and references the same as "dopri5". - -Examples --------- - -A problem to integrate and the corresponding jacobian: - ->>> from scipy.integrate import ode ->>> ->>> y0, t0 = [1.0j, 2.0], 0 ->>> ->>> def f(t, y, arg1): ->>> return [1j*arg1*y[0] + y[1], -arg1*y[1]**2] ->>> def jac(t, y, arg1): ->>> return [[1j*arg1, 1], [0, -arg1*2*y[1]]] - -The integration: - ->>> r = ode(f, jac).set_integrator('zvode', method='bdf', with_jacobian=True) ->>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0) ->>> t1 = 10 ->>> dt = 1 ->>> while r.successful() and r.t < t1: ->>> r.integrate(r.t+dt) ->>> print r.t, r.y - -References ----------- - -.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary - Differential Equations i. Nonstiff Problems. 2nd edition. - Springer Series in Computational Mathematics, - Springer-Verlag (1993) - -""" - - def __init__(self, f, jac=None): - self.stiff = 0 - self.f = f - self.jac = jac - self.f_params = () - self.jac_params = () - self.y = [] - - def set_initial_value(self, y, t=0.0): - """Set initial conditions y(t) = y.""" - if isscalar(y): - y = [y] - n_prev = len(self.y) - if not n_prev: - self.set_integrator('') # find first available integrator - self.y = asarray(y, self._integrator.scalar) - self.t = t - self._integrator.reset(len(self.y), self.jac is not None) - return self - - def set_integrator(self, name, **integrator_params): - """ - Set integrator by name. - - Parameters - ---------- - name : str - Name of the integrator. - integrator_params : - Additional parameters for the integrator. - """ - integrator = find_integrator(name) - if integrator is None: - # FIXME: this really should be raise an exception. Will that break - # any code? - warnings.warn('No integrator name match with %r or is not ' - 'available.' % name) - else: - self._integrator = integrator(**integrator_params) - if not len(self.y): - self.t = 0.0 - self.y = array([0.0], self._integrator.scalar) - self._integrator.reset(len(self.y), self.jac is not None) - return self - - def integrate(self, t, step=0, relax=0): - """Find y=y(t), set y as an initial condition, and return y.""" - if step and self._integrator.supports_step: - mth = self._integrator.step - elif relax and self._integrator.supports_run_relax: - mth = self._integrator.run_relax - else: - mth = self._integrator.run - self.y, self.t = mth(self.f, self.jac or (lambda: None), - self.y, self.t, t, - self.f_params, self.jac_params) - return self.y - - def successful(self): - """Check if integration was successful.""" - try: - self._integrator - except AttributeError: - self.set_integrator('') - return self._integrator.success == 1 - - def set_f_params(self, *args): - """Set extra parameters for user-supplied function f.""" - self.f_params = args - return self - - def set_jac_params(self, *args): - """Set extra parameters for user-supplied function jac.""" - self.jac_params = args - return self - - -class complex_ode(ode): - """ - A wrapper of ode for complex systems. - - This functions similarly as `ode`, but re-maps a complex-valued - equation system to a real-valued one before using the integrators. - - Parameters - ---------- - f : callable f(t, y, *f_args) - Rhs of the equation. t is a scalar, y.shape == (n,). - f_args is set by calling set_f_params(*args) - jac : jac(t, y, *jac_args) - Jacobian of the rhs, jac[i,j] = d f[i] / d y[j] - jac_args is set by calling set_f_params(*args) - - Attributes - ---------- - t : float - Current time - y : ndarray - Current variable values - - Examples - -------- - For usage examples, see `ode`. - - """ - - def __init__(self, f, jac=None): - self.cf = f - self.cjac = jac - if jac is not None: - ode.__init__(self, self._wrap, self._wrap_jac) - else: - ode.__init__(self, self._wrap, None) - - def _wrap(self, t, y, *f_args): - f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args)) - self.tmp[::2] = real(f) - self.tmp[1::2] = imag(f) - return self.tmp - - def _wrap_jac(self, t, y, *jac_args): - jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args)) - self.jac_tmp[1::2, 1::2] = self.jac_tmp[::2, ::2] = real(jac) - self.jac_tmp[1::2, ::2] = imag(jac) - self.jac_tmp[::2, 1::2] = -self.jac_tmp[1::2, ::2] - return self.jac_tmp - - def set_integrator(self, name, **integrator_params): - """ - Set integrator by name. - - Parameters - ---------- - name : str - Name of the integrator - integrator_params : - Additional parameters for the integrator. - """ - if name == 'zvode': - raise ValueError("zvode should be used with ode, not zode") - return ode.set_integrator(self, name, **integrator_params) - - def set_initial_value(self, y, t=0.0): - """Set initial conditions y(t) = y.""" - y = asarray(y) - self.tmp = zeros(y.size * 2, 'float') - self.tmp[::2] = real(y) - self.tmp[1::2] = imag(y) - if self.cjac is not None: - self.jac_tmp = zeros((y.size * 2, y.size * 2), 'float') - return ode.set_initial_value(self, self.tmp, t) - - def integrate(self, t, step=0, relax=0): - """Find y=y(t), set y as an initial condition, and return y.""" - y = ode.integrate(self, t, step, relax) - return y[::2] + 1j * y[1::2] - - -#------------------------------------------------------------------------------ -# ODE integrators -#------------------------------------------------------------------------------ - -def find_integrator(name): - for cl in IntegratorBase.integrator_classes: - if re.match(name, cl.__name__, re.I): - return cl - return None - -class IntegratorConcurrencyError(RuntimeError): - """ - Failure due to concurrent usage of an integrator that can be used - only for a single problem at a time. - - """ - def __init__(self, name): - msg = ("Integrator `%s` can be used to solve only a single problem " - "at a time. If you want to integrate multiple problems, " - "consider using a different integrator " - "(see `ode.set_integrator`)") % name - RuntimeError.__init__(self, msg) - -class IntegratorBase(object): - - runner = None # runner is None => integrator is not available - success = None # success==1 if integrator was called successfully - supports_run_relax = None - supports_step = None - integrator_classes = [] - scalar = float - - def acquire_new_handle(self): - # Some of the integrators have internal state (ancient - # Fortran...), and so only one instance can use them at a time. - # We keep track of this, and fail when concurrent usage is tried. - self.__class__.active_global_handle += 1 - self.handle = self.__class__.active_global_handle - - def check_handle(self): - if self.handle is not self.__class__.active_global_handle: - raise IntegratorConcurrencyError(self.__class__.__name__) - - def reset(self, n, has_jac): - """Prepare integrator for call: allocate memory, set flags, etc. - n - number of equations. - has_jac - if user has supplied function for evaluating Jacobian. - """ - - def run(self, f, jac, y0, t0, t1, f_params, jac_params): - """Integrate from t=t0 to t=t1 using y0 as an initial condition. - Return 2-tuple (y1,t1) where y1 is the result and t=t1 - defines the stoppage coordinate of the result. - """ - raise NotImplementedError('all integrators must define ' - 'run(f, jac, t0, t1, y0, f_params, jac_params)') - - def step(self, f, jac, y0, t0, t1, f_params, jac_params): - """Make one integration step and return (y1,t1).""" - raise NotImplementedError('%s does not support step() method' % - self.__class__.__name__) - - def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params): - """Integrate from t=t0 to t>=t1 and return (y1,t).""" - raise NotImplementedError('%s does not support run_relax() method' % - self.__class__.__name__) - - #XXX: __str__ method for getting visual state of the integrator - - -class vode(IntegratorBase): - - runner = getattr(_vode, 'dvode', None) - - messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)', - -2: 'Excess accuracy requested. (Tolerances too small.)', - -3: 'Illegal input detected. (See printed message.)', - -4: 'Repeated error test failures. (Check all input.)', - -5: 'Repeated convergence failures. (Perhaps bad' - ' Jacobian supplied or wrong choice of MF or tolerances.)', - -6: 'Error weight became zero during problem. (Solution' - ' component i vanished, and ATOL or ATOL(i) = 0.)' - } - supports_run_relax = 1 - supports_step = 1 - active_global_handle = 0 - - def __init__(self, - method='adams', - with_jacobian=0, - rtol=1e-6, atol=1e-12, - lband=None, uband=None, - order=12, - nsteps=500, - max_step=0.0, # corresponds to infinite - min_step=0.0, - first_step=0.0, # determined by solver - ): - - if re.match(method, r'adams', re.I): - self.meth = 1 - elif re.match(method, r'bdf', re.I): - self.meth = 2 - else: - raise ValueError('Unknown integration method %s' % method) - self.with_jacobian = with_jacobian - self.rtol = rtol - self.atol = atol - self.mu = uband - self.ml = lband - - self.order = order - self.nsteps = nsteps - self.max_step = max_step - self.min_step = min_step - self.first_step = first_step - self.success = 1 - - self.initialized = False - - def reset(self, n, has_jac): - # Calculate parameters for Fortran subroutine dvode. - if has_jac: - if self.mu is None and self.ml is None: - miter = 1 - else: - if self.mu is None: - self.mu = 0 - if self.ml is None: - self.ml = 0 - miter = 4 - else: - if self.mu is None and self.ml is None: - if self.with_jacobian: - miter = 2 - else: - miter = 0 - else: - if self.mu is None: - self.mu = 0 - if self.ml is None: - self.ml = 0 - if self.ml == self.mu == 0: - miter = 3 - else: - miter = 5 - mf = 10 * self.meth + miter - if mf == 10: - lrw = 20 + 16 * n - elif mf in [11, 12]: - lrw = 22 + 16 * n + 2 * n * n - elif mf == 13: - lrw = 22 + 17 * n - elif mf in [14, 15]: - lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n - elif mf == 20: - lrw = 20 + 9 * n - elif mf in [21, 22]: - lrw = 22 + 9 * n + 2 * n * n - elif mf == 23: - lrw = 22 + 10 * n - elif mf in [24, 25]: - lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n - else: - raise ValueError('Unexpected mf=%s' % mf) - if miter in [0, 3]: - liw = 30 - else: - liw = 30 + n - rwork = zeros((lrw,), float) - rwork[4] = self.first_step - rwork[5] = self.max_step - rwork[6] = self.min_step - self.rwork = rwork - iwork = zeros((liw,), int32) - if self.ml is not None: - iwork[0] = self.ml - if self.mu is not None: - iwork[1] = self.mu - iwork[4] = self.order - iwork[5] = self.nsteps - iwork[6] = 2 # mxhnil - self.iwork = iwork - self.call_args = [self.rtol, self.atol, 1, 1, - self.rwork, self.iwork, mf] - self.success = 1 - self.initialized = False - - def run(self, *args): - if self.initialized: - self.check_handle() - else: - self.initialized = True - self.acquire_new_handle() - y1, t, istate = self.runner(*(args[:5] + tuple(self.call_args) + - args[5:])) - if istate < 0: - warnings.warn('vode: ' + - self.messages.get(istate, - 'Unexpected istate=%s' % istate)) - self.success = 0 - else: - self.call_args[3] = 2 # upgrade istate from 1 to 2 - return y1, t - - def step(self, *args): - itask = self.call_args[2] - self.call_args[2] = 2 - r = self.run(*args) - self.call_args[2] = itask - return r - - def run_relax(self, *args): - itask = self.call_args[2] - self.call_args[2] = 3 - r = self.run(*args) - self.call_args[2] = itask - return r - - -if vode.runner is not None: - IntegratorBase.integrator_classes.append(vode) - - -class zvode(vode): - runner = getattr(_vode, 'zvode', None) - - supports_run_relax = 1 - supports_step = 1 - scalar = complex - active_global_handle = 0 - - def reset(self, n, has_jac): - # Calculate parameters for Fortran subroutine dvode. - if has_jac: - if self.mu is None and self.ml is None: - miter = 1 - else: - if self.mu is None: - self.mu = 0 - if self.ml is None: - self.ml = 0 - miter = 4 - else: - if self.mu is None and self.ml is None: - if self.with_jacobian: - miter = 2 - else: - miter = 0 - else: - if self.mu is None: - self.mu = 0 - if self.ml is None: - self.ml = 0 - if self.ml == self.mu == 0: - miter = 3 - else: - miter = 5 - - mf = 10 * self.meth + miter - - if mf in (10,): - lzw = 15 * n - elif mf in (11, 12): - lzw = 15 * n + 2 * n ** 2 - elif mf in (-11, -12): - lzw = 15 * n + n ** 2 - elif mf in (13,): - lzw = 16 * n - elif mf in (14, 15): - lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n - elif mf in (-14, -15): - lzw = 16 * n + (2 * self.ml + self.mu) * n - elif mf in (20,): - lzw = 8 * n - elif mf in (21, 22): - lzw = 8 * n + 2 * n ** 2 - elif mf in (-21, -22): - lzw = 8 * n + n ** 2 - elif mf in (23,): - lzw = 9 * n - elif mf in (24, 25): - lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n - elif mf in (-24, -25): - lzw = 9 * n + (2 * self.ml + self.mu) * n - - lrw = 20 + n - - if miter in (0, 3): - liw = 30 - else: - liw = 30 + n - - zwork = zeros((lzw,), complex) - self.zwork = zwork - - rwork = zeros((lrw,), float) - rwork[4] = self.first_step - rwork[5] = self.max_step - rwork[6] = self.min_step - self.rwork = rwork - - iwork = zeros((liw,), int32) - if self.ml is not None: - iwork[0] = self.ml - if self.mu is not None: - iwork[1] = self.mu - iwork[4] = self.order - iwork[5] = self.nsteps - iwork[6] = 2 # mxhnil - self.iwork = iwork - - self.call_args = [self.rtol, self.atol, 1, 1, - self.zwork, self.rwork, self.iwork, mf] - self.success = 1 - self.initialized = False - - def run(self, *args): - if self.initialized: - self.check_handle() - else: - self.initialized = True - self.acquire_new_handle() - y1, t, istate = self.runner(*(args[:5] + tuple(self.call_args) + - args[5:])) - if istate < 0: - warnings.warn('zvode: ' + - self.messages.get(istate, 'Unexpected istate=%s' % istate)) - self.success = 0 - else: - self.call_args[3] = 2 # upgrade istate from 1 to 2 - return y1, t - - -if zvode.runner is not None: - IntegratorBase.integrator_classes.append(zvode) - - -class dopri5(IntegratorBase): - - runner = getattr(_dop, 'dopri5', None) - name = 'dopri5' - - messages = {1: 'computation successful', - 2: 'comput. successful (interrupted by solout)', - -1: 'input is not consistent', - -2: 'larger nmax is needed', - -3: 'step size becomes too small', - -4: 'problem is probably stiff (interrupted)', - } - - def __init__(self, - rtol=1e-6, atol=1e-12, - nsteps=500, - max_step=0.0, - first_step=0.0, # determined by solver - safety=0.9, - ifactor=10.0, - dfactor=0.2, - beta=0.0, - method=None - ): - self.rtol = rtol - self.atol = atol - self.nsteps = nsteps - self.max_step = max_step - self.first_step = first_step - self.safety = safety - self.ifactor = ifactor - self.dfactor = dfactor - self.beta = beta - self.success = 1 - - def reset(self, n, has_jac): - work = zeros((8 * n + 21,), float) - work[1] = self.safety - work[2] = self.dfactor - work[3] = self.ifactor - work[4] = self.beta - work[5] = self.max_step - work[6] = self.first_step - self.work = work - iwork = zeros((21,), int32) - iwork[0] = self.nsteps - self.iwork = iwork - self.call_args = [self.rtol, self.atol, self._solout, - self.work, self.iwork] - self.success = 1 - - def run(self, f, jac, y0, t0, t1, f_params, jac_params): - x, y, iwork, idid = self.runner(*((f, t0, y0, t1) + - tuple(self.call_args) + (f_params,))) - if idid < 0: - warnings.warn(self.name + ': ' + - self.messages.get(idid, 'Unexpected idid=%s' % idid)) - self.success = 0 - return y, x - - def _solout(self, *args): - # dummy solout function - pass - - -if dopri5.runner is not None: - IntegratorBase.integrator_classes.append(dopri5) - - -class dop853(dopri5): - - runner = getattr(_dop, 'dop853', None) - name = 'dop853' - - def __init__(self, - rtol=1e-6, atol=1e-12, - nsteps=500, - max_step=0.0, - first_step=0.0, # determined by solver - safety=0.9, - ifactor=6.0, - dfactor=0.3, - beta=0.0, - method=None - ): - self.rtol = rtol - self.atol = atol - self.nsteps = nsteps - self.max_step = max_step - self.first_step = first_step - self.safety = safety - self.ifactor = ifactor - self.dfactor = dfactor - self.beta = beta - self.success = 1 - - def reset(self, n, has_jac): - work = zeros((11 * n + 21,), float) - work[1] = self.safety - work[2] = self.dfactor - work[3] = self.ifactor - work[4] = self.beta - work[5] = self.max_step - work[6] = self.first_step - self.work = work - iwork = zeros((21,), int32) - iwork[0] = self.nsteps - self.iwork = iwork - self.call_args = [self.rtol, self.atol, self._solout, - self.work, self.iwork] - self.success = 1 - - -if dop853.runner is not None: - IntegratorBase.integrator_classes.append(dop853) diff --git a/scipy-0.10.1/scipy/integrate/_odepackmodule.c b/scipy-0.10.1/scipy/integrate/_odepackmodule.c deleted file mode 100644 index ae0570e9f4..0000000000 --- a/scipy-0.10.1/scipy/integrate/_odepackmodule.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - Multipack project. - */ -#include "multipack.h" -static PyObject *odepack_error; -#include "__odepack.h" -static struct PyMethodDef odepack_module_methods[] = { -{"odeint", (PyCFunction) odepack_odeint, METH_VARARGS|METH_KEYWORDS, doc_odeint}, -{NULL, NULL, 0, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_odepack", - NULL, - -1, - odepack_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__odepack(void) -{ - PyObject *m, *d, *s; - - m = PyModule_Create(&moduledef); - import_array(); - d = PyModule_GetDict(m); - - s = PyUString_FromString(" 1.9 "); - PyDict_SetItemString(d, "__version__", s); - odepack_error = PyErr_NewException ("odpack.error", NULL, NULL); - Py_DECREF(s); - PyDict_SetItemString(d, "error", odepack_error); - if (PyErr_Occurred()) { - Py_FatalError("can't initialize module odepack"); - } - return m; -} -#else -PyMODINIT_FUNC init_odepack(void) { - PyObject *m, *d, *s; - m = Py_InitModule("_odepack", odepack_module_methods); - import_array(); - d = PyModule_GetDict(m); - - s = PyUString_FromString(" 1.9 "); - PyDict_SetItemString(d, "__version__", s); - odepack_error = PyErr_NewException ("odepack.error", NULL, NULL); - Py_DECREF(s); - if (PyErr_Occurred()) - Py_FatalError("can't initialize module odepack"); -} -#endif diff --git a/scipy-0.10.1/scipy/integrate/_quadpackmodule.c b/scipy-0.10.1/scipy/integrate/_quadpackmodule.c deleted file mode 100644 index 09136bf51d..0000000000 --- a/scipy-0.10.1/scipy/integrate/_quadpackmodule.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - From Multipack project - */ -#include "quadpack.h" -static PyObject *quadpack_error; -#include "__quadpack.h" -static struct PyMethodDef quadpack_module_methods[] = { -{"_qagse", quadpack_qagse, METH_VARARGS, doc_qagse}, -{"_qagie", quadpack_qagie, METH_VARARGS, doc_qagie}, -{"_qagpe", quadpack_qagpe, METH_VARARGS, doc_qagpe}, -{"_qawoe", quadpack_qawoe, METH_VARARGS, doc_qawoe}, -{"_qawfe", quadpack_qawfe, METH_VARARGS, doc_qawfe}, -{"_qawse", quadpack_qawse, METH_VARARGS, doc_qawse}, -{"_qawce", quadpack_qawce, METH_VARARGS, doc_qawce}, -{NULL, NULL, 0, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_quadpack", - NULL, - -1, - quadpack_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__quadpack(void) -{ - PyObject *m, *d, *s; - - m = PyModule_Create(&moduledef); - import_array(); - d = PyModule_GetDict(m); - - s = PyUString_FromString(" 1.13 "); - PyDict_SetItemString(d, "__version__", s); - quadpack_error = PyErr_NewException ("quadpack.error", NULL, NULL); - Py_DECREF(s); - PyDict_SetItemString(d, "error", quadpack_error); - if (PyErr_Occurred()) { - Py_FatalError("can't initialize module quadpack"); - } - return m; -} -#else -PyMODINIT_FUNC init_quadpack(void) { - PyObject *m, *d, *s; - m = Py_InitModule("_quadpack", quadpack_module_methods); - import_array(); - d = PyModule_GetDict(m); - - s = PyUString_FromString(" 1.13 "); - PyDict_SetItemString(d, "__version__", s); - quadpack_error = PyErr_NewException ("quadpack.error", NULL, NULL); - Py_DECREF(s); - PyDict_SetItemString(d, "error", quadpack_error); - if (PyErr_Occurred()) - Py_FatalError("can't initialize module quadpack"); -} -#endif diff --git a/scipy-0.10.1/scipy/integrate/bento.info b/scipy-0.10.1/scipy/integrate/bento.info deleted file mode 100644 index 22d6a7febe..0000000000 --- a/scipy-0.10.1/scipy/integrate/bento.info +++ /dev/null @@ -1,38 +0,0 @@ -HookFile: bscript - -Library: - CompiledLibrary: linpack_lite - Sources: - linpack_lite/dgbfa.f, - linpack_lite/dgbsl.f, - linpack_lite/dgefa.f, - linpack_lite/dgesl.f, - linpack_lite/dgtsl.f, - linpack_lite/zgbfa.f, - linpack_lite/zgbsl.f, - linpack_lite/zgefa.f, - linpack_lite/zgesl.f - CompiledLibrary: mach - Sources: - mach/d1mach.f, - mach/i1mach.f, - mach/r1mach.f, - mach/xerror.f - CompiledLibrary: quadpack - Sources: - quadpack/*.f - CompiledLibrary: dop - Sources: - dop/dop853.f, - dop/dopri5.f - CompiledLibrary: odepack - Sources: - odepack/*.f - Extension: _quadpack - Sources: _quadpackmodule.c - Extension: _odepack - Sources: _odepackmodule.c - Extension: vode - Sources: vode.pyf - Extension: _dop - Sources: dop.pyf diff --git a/scipy-0.10.1/scipy/integrate/bscript b/scipy-0.10.1/scipy/integrate/bscript deleted file mode 100644 index 97da79e4d6..0000000000 --- a/scipy-0.10.1/scipy/integrate/bscript +++ /dev/null @@ -1,18 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - - context.register_builder("_quadpack", - lambda e: default_builder(e, use="quadpack linpack_lite mach FBLAS CLIB")) - context.register_builder("_odepack", - lambda e: default_builder(e, use="odepack linpack_lite mach FBLAS CLIB")) - context.register_builder("vode", - lambda e: default_builder(e, - features="c cshlib pyext bento f2py", - use="odepack linpack_lite mach FBLAS CLIB")) - context.register_builder("_dop", - lambda e: default_builder(e, - features="c cshlib pyext bento f2py", - use="dop FBLAS CLIB")) diff --git a/scipy-0.10.1/scipy/integrate/dop.pyf b/scipy-0.10.1/scipy/integrate/dop.pyf deleted file mode 100644 index 48813a108c..0000000000 --- a/scipy-0.10.1/scipy/integrate/dop.pyf +++ /dev/null @@ -1,80 +0,0 @@ -!%f90 -*- f90 -*- -!Author: John Travers -!Date: 22 Feb 2009 - -python module __user__routines - interface - subroutine fcn(n,x,y,f,rpar,ipar) - integer intent(hide) :: n - double precision intent(in) :: x - double precision dimension(n),intent(in,c) :: y - double precision dimension(n),intent(out,c) :: f - double precision intent(hide) :: rpar - integer intent(hide) :: ipar - end subroutine fcn - subroutine solout(nr,xold,x,y,n,con,icomp,nd,rpar,ipar,irtn) - integer intent(in) :: nr - integer intent(hide) :: n - double precision intent(in) :: xold, x - double precision dimension(n),intent(c,in) :: y - integer intent(in) :: nd - integer dimension(nd), intent(in) :: icomp - double precision dimension(5*nd), intent(in) :: con - double precision intent(hide) :: rpar - integer intent(hide) :: ipar - integer intent(out) :: irtn - end subroutine solout - end interface -end python module __user__routines - -python module _dop - interface - subroutine dopri5(n,fcn,x,y,xend,rtol,atol,itol,solout,iout,work,lwork,iwork,liwork,rpar,ipar,idid) - use __user__routines - external fcn - external solout - integer intent(hide),depend(y) :: n = len(y) - double precision dimension(n),intent(in,out,copy) :: y - double precision intent(in,out):: x - double precision intent(in):: xend - double precision dimension(*),intent(in),check(len(atol)<& - &=1||len(atol)>=n),depend(n) :: atol - double precision dimension(*),intent(in),check(len(rtol)==len(atol)), & - depend(atol) :: rtol - integer intent(hide), depend(atol) :: itol = (len(atol)<=1?0:1) - integer intent(hide) :: iout=0 - double precision dimension(*), intent(in), check(len(work)>=8*n+21), & - :: work - integer intent(hide), depend(work) :: lwork = len(work) - integer intent(in,out), dimension(*), check(len(iwork)>=21) :: iwork - integer intent(hide), depend(iwork) :: liwork = len(iwork) - integer intent(out) :: idid - double precision intent(hide) :: rpar = 0.0 - integer intent(hide) :: ipar = 0 - end subroutine dopri5 - subroutine dop853(n,fcn,x,y,xend,rtol,atol,itol,solout,iout,work,lwork,iwork,liwork,rpar,ipar,idid) - use __user__routines - external fcn - external solout - integer intent(hide),depend(y) :: n = len(y) - double precision dimension(n),intent(in,out,copy) :: y - double precision intent(in,out):: x - double precision intent(in):: xend - double precision dimension(*),intent(in),check(len(atol)<& - &=1||len(atol)>=n),depend(n) :: atol - double precision dimension(*),intent(in),check(len(rtol)==len(atol)), & - depend(atol) :: rtol - integer intent(hide), depend(atol) :: itol = (len(atol)<=1?0:1) - integer intent(hide) :: iout=0 - double precision dimension(*), intent(in), check(len(work)>=8*n+21), & - :: work - integer intent(hide), depend(work) :: lwork = len(work) - integer intent(in,out), dimension(*), check(len(iwork)>=21) :: iwork - integer intent(hide), depend(iwork) :: liwork = len(iwork) - integer intent(out) :: idid - double precision intent(hide) :: rpar = 0.0 - integer intent(hide) :: ipar = 0 - end subroutine dop853 - end interface -end python module dop - diff --git a/scipy-0.10.1/scipy/integrate/dop/dop853.f b/scipy-0.10.1/scipy/integrate/dop/dop853.f deleted file mode 100644 index 7119a11ab5..0000000000 --- a/scipy-0.10.1/scipy/integrate/dop/dop853.f +++ /dev/null @@ -1,879 +0,0 @@ - SUBROUTINE DOP853(N,FCN,X,Y,XEND, - & RTOL,ATOL,ITOL, - & SOLOUT,IOUT, - & WORK,LWORK,IWORK,LIWORK,RPAR,IPAR,IDID) -C ---------------------------------------------------------- -C NUMERICAL SOLUTION OF A SYSTEM OF FIRST 0RDER -C ORDINARY DIFFERENTIAL EQUATIONS Y'=F(X,Y). -C THIS IS AN EXPLICIT RUNGE-KUTTA METHOD OF ORDER 8(5,3) -C DUE TO DORMAND & PRINCE (WITH STEPSIZE CONTROL AND -C DENSE OUTPUT) -C -C AUTHORS: E. HAIRER AND G. WANNER -C UNIVERSITE DE GENEVE, DEPT. DE MATHEMATIQUES -C CH-1211 GENEVE 24, SWITZERLAND -C E-MAIL: Ernst.Hairer@math.unige.ch -C Gerhard.Wanner@math.unige.ch -C -C THIS CODE IS DESCRIBED IN: -C E. HAIRER, S.P. NORSETT AND G. WANNER, SOLVING ORDINARY -C DIFFERENTIAL EQUATIONS I. NONSTIFF PROBLEMS. 2ND EDITION. -C SPRINGER SERIES IN COMPUTATIONAL MATHEMATICS, -C SPRINGER-VERLAG (1993) -C -C VERSION OF APRIL 25, 1996 -C (latest correction of a small bug: August 8, 2005) -C -C Edited (22 Feb 2009) by J.C. Travers: -C renamed HINIT->HINIT853 to avoid name collision with dopri5 -C -C INPUT PARAMETERS -C ---------------- -C N DIMENSION OF THE SYSTEM -C -C FCN NAME (EXTERNAL) OF SUBROUTINE COMPUTING THE -C VALUE OF F(X,Y): -C SUBROUTINE FCN(N,X,Y,F,RPAR,IPAR) -C DOUBLE PRECISION X,Y(N),F(N) -C F(1)=... ETC. -C -C X INITIAL X-VALUE -C -C Y(N) INITIAL VALUES FOR Y -C -C XEND FINAL X-VALUE (XEND-X MAY BE POSITIVE OR NEGATIVE) -C -C RTOL,ATOL RELATIVE AND ABSOLUTE ERROR TOLERANCES. THEY -C CAN BE BOTH SCALARS OR ELSE BOTH VECTORS OF LENGTH N. -C ATOL SHOULD BE STRICTLY POSITIVE (POSSIBLY VERY SMALL) -C -C ITOL SWITCH FOR RTOL AND ATOL: -C ITOL=0: BOTH RTOL AND ATOL ARE SCALARS. -C THE CODE KEEPS, ROUGHLY, THE LOCAL ERROR OF -C Y(I) BELOW RTOL*ABS(Y(I))+ATOL -C ITOL=1: BOTH RTOL AND ATOL ARE VECTORS. -C THE CODE KEEPS THE LOCAL ERROR OF Y(I) BELOW -C RTOL(I)*ABS(Y(I))+ATOL(I). -C -C SOLOUT NAME (EXTERNAL) OF SUBROUTINE PROVIDING THE -C NUMERICAL SOLUTION DURING INTEGRATION. -C IF IOUT.GE.1, IT IS CALLED AFTER EVERY SUCCESSFUL STEP. -C SUPPLY A DUMMY SUBROUTINE IF IOUT=0. -C IT MUST HAVE THE FORM -C SUBROUTINE SOLOUT (NR,XOLD,X,Y,N,CON,ICOMP,ND, -C RPAR,IPAR,IRTRN) -C DIMENSION Y(N),CON(8*ND),ICOMP(ND) -C .... -C SOLOUT FURNISHES THE SOLUTION "Y" AT THE NR-TH -C GRID-POINT "X" (THEREBY THE INITIAL VALUE IS -C THE FIRST GRID-POINT). -C "XOLD" IS THE PRECEEDING GRID-POINT. -C "IRTRN" SERVES TO INTERRUPT THE INTEGRATION. IF IRTRN -C IS SET <0, DOP853 WILL RETURN TO THE CALLING PROGRAM. -C IF THE NUMERICAL SOLUTION IS ALTERED IN SOLOUT, -C SET IRTRN = 2 -C -C ----- CONTINUOUS OUTPUT: ----- -C DURING CALLS TO "SOLOUT", A CONTINUOUS SOLUTION -C FOR THE INTERVAL [XOLD,X] IS AVAILABLE THROUGH -C THE FUNCTION -C >>> CONTD8(I,S,CON,ICOMP,ND) <<< -C WHICH PROVIDES AN APPROXIMATION TO THE I-TH -C COMPONENT OF THE SOLUTION AT THE POINT S. THE VALUE -C S SHOULD LIE IN THE INTERVAL [XOLD,X]. -C -C IOUT SWITCH FOR CALLING THE SUBROUTINE SOLOUT: -C IOUT=0: SUBROUTINE IS NEVER CALLED -C IOUT=1: SUBROUTINE IS USED FOR OUTPUT -C IOUT=2: DENSE OUTPUT IS PERFORMED IN SOLOUT -C (IN THIS CASE WORK(5) MUST BE SPECIFIED) -C -C WORK ARRAY OF WORKING SPACE OF LENGTH "LWORK". -C WORK(1),...,WORK(20) SERVE AS PARAMETERS FOR THE CODE. -C FOR STANDARD USE, SET THEM TO ZERO BEFORE CALLING. -C "LWORK" MUST BE AT LEAST 11*N+8*NRDENS+21 -C WHERE NRDENS = IWORK(5) -C -C LWORK DECLARED LENGHT OF ARRAY "WORK". -C -C IWORK INTEGER WORKING SPACE OF LENGHT "LIWORK". -C IWORK(1),...,IWORK(20) SERVE AS PARAMETERS FOR THE CODE. -C FOR STANDARD USE, SET THEM TO ZERO BEFORE CALLING. -C "LIWORK" MUST BE AT LEAST NRDENS+21 . -C -C LIWORK DECLARED LENGHT OF ARRAY "IWORK". -C -C RPAR, IPAR REAL AND INTEGER PARAMETERS (OR PARAMETER ARRAYS) WHICH -C CAN BE USED FOR COMMUNICATION BETWEEN YOUR CALLING -C PROGRAM AND THE FCN, JAC, MAS, SOLOUT SUBROUTINES. -C -C----------------------------------------------------------------------- -C -C SOPHISTICATED SETTING OF PARAMETERS -C ----------------------------------- -C SEVERAL PARAMETERS (WORK(1),...,IWORK(1),...) ALLOW -C TO ADAPT THE CODE TO THE PROBLEM AND TO THE NEEDS OF -C THE USER. FOR ZERO INPUT, THE CODE CHOOSES DEFAULT VALUES. -C -C WORK(1) UROUND, THE ROUNDING UNIT, DEFAULT 2.3D-16. -C -C WORK(2) THE SAFETY FACTOR IN STEP SIZE PREDICTION, -C DEFAULT 0.9D0. -C -C WORK(3), WORK(4) PARAMETERS FOR STEP SIZE SELECTION -C THE NEW STEP SIZE IS CHOSEN SUBJECT TO THE RESTRICTION -C WORK(3) <= HNEW/HOLD <= WORK(4) -C DEFAULT VALUES: WORK(3)=0.333D0, WORK(4)=6.D0 -C -C WORK(5) IS THE "BETA" FOR STABILIZED STEP SIZE CONTROL -C (SEE SECTION IV.2). POSITIVE VALUES OF BETA ( <= 0.04 ) -C MAKE THE STEP SIZE CONTROL MORE STABLE. -C NEGATIVE WORK(5) PROVOKE BETA=0. -C DEFAULT 0.0D0. -C -C WORK(6) MAXIMAL STEP SIZE, DEFAULT XEND-X. -C -C WORK(7) INITIAL STEP SIZE, FOR WORK(7)=0.D0 AN INITIAL GUESS -C IS COMPUTED WITH HELP OF THE FUNCTION HINIT -C -C IWORK(1) THIS IS THE MAXIMAL NUMBER OF ALLOWED STEPS. -C THE DEFAULT VALUE (FOR IWORK(1)=0) IS 100000. -C -C IWORK(2) SWITCH FOR THE CHOICE OF THE COEFFICIENTS -C IF IWORK(2).EQ.1 METHOD DOP853 OF DORMAND AND PRINCE -C (SECTION II.6). -C THE DEFAULT VALUE (FOR IWORK(2)=0) IS IWORK(2)=1. -C -C IWORK(3) SWITCH FOR PRINTING ERROR MESSAGES -C IF IWORK(3).LT.0 NO MESSAGES ARE BEING PRINTED -C IF IWORK(3).GT.0 MESSAGES ARE PRINTED WITH -C WRITE (IWORK(3),*) ... -C DEFAULT VALUE (FOR IWORK(3)=0) IS IWORK(3)=6 -C -C IWORK(4) TEST FOR STIFFNESS IS ACTIVATED AFTER STEP NUMBER -C J*IWORK(4) (J INTEGER), PROVIDED IWORK(4).GT.0. -C FOR NEGATIVE IWORK(4) THE STIFFNESS TEST IS -C NEVER ACTIVATED; DEFAULT VALUE IS IWORK(4)=1000 -C -C IWORK(5) = NRDENS = NUMBER OF COMPONENTS, FOR WHICH DENSE OUTPUT -C IS REQUIRED; DEFAULT VALUE IS IWORK(5)=0; -C FOR 0 < NRDENS < N THE COMPONENTS (FOR WHICH DENSE -C OUTPUT IS REQUIRED) HAVE TO BE SPECIFIED IN -C IWORK(21),...,IWORK(NRDENS+20); -C FOR NRDENS=N THIS IS DONE BY THE CODE. -C -C---------------------------------------------------------------------- -C -C OUTPUT PARAMETERS -C ----------------- -C X X-VALUE FOR WHICH THE SOLUTION HAS BEEN COMPUTED -C (AFTER SUCCESSFUL RETURN X=XEND). -C -C Y(N) NUMERICAL SOLUTION AT X -C -C H PREDICTED STEP SIZE OF THE LAST ACCEPTED STEP -C -C IDID REPORTS ON SUCCESSFULNESS UPON RETURN: -C IDID= 1 COMPUTATION SUCCESSFUL, -C IDID= 2 COMPUT. SUCCESSFUL (INTERRUPTED BY SOLOUT) -C IDID=-1 INPUT IS NOT CONSISTENT, -C IDID=-2 LARGER NMAX IS NEEDED, -C IDID=-3 STEP SIZE BECOMES TOO SMALL. -C IDID=-4 PROBLEM IS PROBABLY STIFF (INTERRUPTED). -C -C IWORK(17) NFCN NUMBER OF FUNCTION EVALUATIONS -C IWORK(18) NSTEP NUMBER OF COMPUTED STEPS -C IWORK(19) NACCPT NUMBER OF ACCEPTED STEPS -C IWORK(20) NREJCT NUMBER OF REJECTED STEPS (DUE TO ERROR TEST), -C (STEP REJECTIONS IN THE FIRST STEP ARE NOT COUNTED) -C----------------------------------------------------------------------- -C *** *** *** *** *** *** *** *** *** *** *** *** *** -C DECLARATIONS -C *** *** *** *** *** *** *** *** *** *** *** *** *** - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION Y(N),ATOL(*),RTOL(*),WORK(LWORK),IWORK(LIWORK) - DIMENSION RPAR(*),IPAR(*) - LOGICAL ARRET - EXTERNAL FCN,SOLOUT -C *** *** *** *** *** *** *** -C SETTING THE PARAMETERS -C *** *** *** *** *** *** *** - NFCN=0 - NSTEP=0 - NACCPT=0 - NREJCT=0 - ARRET=.FALSE. -C -------- IPRINT FOR MONITORING THE PRINTING - IF(IWORK(3).EQ.0)THEN - IPRINT=6 - ELSE - IPRINT=IWORK(3) - END IF -C -------- NMAX , THE MAXIMAL NUMBER OF STEPS ----- - IF(IWORK(1).EQ.0)THEN - NMAX=100000 - ELSE - NMAX=IWORK(1) - IF(NMAX.LE.0)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' WRONG INPUT IWORK(1)=',IWORK(1) - ARRET=.TRUE. - END IF - END IF -C -------- METH COEFFICIENTS OF THE METHOD - IF(IWORK(2).EQ.0)THEN - METH=1 - ELSE - METH=IWORK(2) - IF(METH.LE.0.OR.METH.GE.4)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' CURIOUS INPUT IWORK(2)=',IWORK(2) - ARRET=.TRUE. - END IF - END IF -C -------- NSTIFF PARAMETER FOR STIFFNESS DETECTION - NSTIFF=IWORK(4) - IF (NSTIFF.EQ.0) NSTIFF=1000 - IF (NSTIFF.LT.0) NSTIFF=NMAX+10 -C -------- NRDENS NUMBER OF DENSE OUTPUT COMPONENTS - NRDENS=IWORK(5) - IF(NRDENS.LT.0.OR.NRDENS.GT.N)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' CURIOUS INPUT IWORK(5)=',IWORK(5) - ARRET=.TRUE. - ELSE - IF(NRDENS.GT.0.AND.IOUT.LT.2)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' WARNING: PUT IOUT=2 FOR DENSE OUTPUT ' - END IF - IF (NRDENS.EQ.N) THEN - DO I=1,NRDENS - IWORK(I+20)=I - END DO - END IF - END IF -C -------- UROUND SMALLEST NUMBER SATISFYING 1.D0+UROUND>1.D0 - IF(WORK(1).EQ.0.D0)THEN - UROUND=2.3D-16 - ELSE - UROUND=WORK(1) - IF(UROUND.LE.1.D-35.OR.UROUND.GE.1.D0)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' WHICH MACHINE DO YOU HAVE? YOUR UROUND WAS:',WORK(1) - ARRET=.TRUE. - END IF - END IF -C ------- SAFETY FACTOR ------------- - IF(WORK(2).EQ.0.D0)THEN - SAFE=0.9D0 - ELSE - SAFE=WORK(2) - IF(SAFE.GE.1.D0.OR.SAFE.LE.1.D-4)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' CURIOUS INPUT FOR SAFETY FACTOR WORK(2)=',WORK(2) - ARRET=.TRUE. - END IF - END IF -C ------- FAC1,FAC2 PARAMETERS FOR STEP SIZE SELECTION - IF(WORK(3).EQ.0.D0)THEN - FAC1=0.333D0 - ELSE - FAC1=WORK(3) - END IF - IF(WORK(4).EQ.0.D0)THEN - FAC2=6.D0 - ELSE - FAC2=WORK(4) - END IF -C --------- BETA FOR STEP CONTROL STABILIZATION ----------- - IF(WORK(5).EQ.0.D0)THEN - BETA=0.0D0 - ELSE - IF(WORK(5).LT.0.D0)THEN - BETA=0.D0 - ELSE - BETA=WORK(5) - IF(BETA.GT.0.2D0)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' CURIOUS INPUT FOR BETA: WORK(5)=',WORK(5) - ARRET=.TRUE. - END IF - END IF - END IF -C -------- MAXIMAL STEP SIZE - IF(WORK(6).EQ.0.D0)THEN - HMAX=XEND-X - ELSE - HMAX=WORK(6) - END IF -C -------- INITIAL STEP SIZE - H=WORK(7) -C ------- PREPARE THE ENTRY-POINTS FOR THE ARRAYS IN WORK ----- - IEK1=21 - IEK2=IEK1+N - IEK3=IEK2+N - IEK4=IEK3+N - IEK5=IEK4+N - IEK6=IEK5+N - IEK7=IEK6+N - IEK8=IEK7+N - IEK9=IEK8+N - IEK10=IEK9+N - IEY1=IEK10+N - IECO=IEY1+N -C ------ TOTAL STORAGE REQUIREMENT ----------- - ISTORE=IECO+8*NRDENS-1 - IF(ISTORE.GT.LWORK)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' INSUFFICIENT STORAGE FOR WORK, MIN. LWORK=',ISTORE - ARRET=.TRUE. - END IF - ICOMP=21 - ISTORE=ICOMP+NRDENS-1 - IF(ISTORE.GT.LIWORK)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' INSUFFICIENT STORAGE FOR IWORK, MIN. LIWORK=',ISTORE - ARRET=.TRUE. - END IF -C -------- WHEN A FAIL HAS OCCURED, WE RETURN WITH IDID=-1 - IF (ARRET) THEN - IDID=-1 - RETURN - END IF -C -------- CALL TO CORE INTEGRATOR ------------ - CALL DP86CO(N,FCN,X,Y,XEND,HMAX,H,RTOL,ATOL,ITOL,IPRINT, - & SOLOUT,IOUT,IDID,NMAX,UROUND,METH,NSTIFF,SAFE,BETA,FAC1,FAC2, - & WORK(IEK1),WORK(IEK2),WORK(IEK3),WORK(IEK4),WORK(IEK5), - & WORK(IEK6),WORK(IEK7),WORK(IEK8),WORK(IEK9),WORK(IEK10), - & WORK(IEY1),WORK(IECO),IWORK(ICOMP),NRDENS,RPAR,IPAR, - & NFCN,NSTEP,NACCPT,NREJCT) - WORK(7)=H - IWORK(17)=NFCN - IWORK(18)=NSTEP - IWORK(19)=NACCPT - IWORK(20)=NREJCT -C ----------- RETURN ----------- - RETURN - END -C -C -C -C ----- ... AND HERE IS THE CORE INTEGRATOR ---------- -C - SUBROUTINE DP86CO(N,FCN,X,Y,XEND,HMAX,H,RTOL,ATOL,ITOL,IPRINT, - & SOLOUT,IOUT,IDID,NMAX,UROUND,METH,NSTIFF,SAFE,BETA,FAC1,FAC2, - & K1,K2,K3,K4,K5,K6,K7,K8,K9,K10,Y1,CONT,ICOMP,NRD,RPAR,IPAR, - & NFCN,NSTEP,NACCPT,NREJCT) -C ---------------------------------------------------------- -C CORE INTEGRATOR FOR DOP853 -C PARAMETERS SAME AS IN DOP853 WITH WORKSPACE ADDED -C ---------------------------------------------------------- -C DECLARATIONS -C ---------------------------------------------------------- - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - parameter ( - & c2 = 0.526001519587677318785587544488D-01, - & c3 = 0.789002279381515978178381316732D-01, - & c4 = 0.118350341907227396726757197510D+00, - & c5 = 0.281649658092772603273242802490D+00, - & c6 = 0.333333333333333333333333333333D+00, - & c7 = 0.25D+00, - & c8 = 0.307692307692307692307692307692D+00, - & c9 = 0.651282051282051282051282051282D+00, - & c10 = 0.6D+00, - & c11 = 0.857142857142857142857142857142D+00, - & c14 = 0.1D+00, - & c15 = 0.2D+00, - & c16 = 0.777777777777777777777777777778D+00) - parameter ( - & b1 = 5.42937341165687622380535766363D-2, - & b6 = 4.45031289275240888144113950566D0, - & b7 = 1.89151789931450038304281599044D0, - & b8 = -5.8012039600105847814672114227D0, - & b9 = 3.1116436695781989440891606237D-1, - & b10 = -1.52160949662516078556178806805D-1, - & b11 = 2.01365400804030348374776537501D-1, - & b12 = 4.47106157277725905176885569043D-2) - parameter ( - & bhh1 = 0.244094488188976377952755905512D+00, - & bhh2 = 0.733846688281611857341361741547D+00, - & bhh3 = 0.220588235294117647058823529412D-01) - parameter ( - & er 1 = 0.1312004499419488073250102996D-01, - & er 6 = -0.1225156446376204440720569753D+01, - & er 7 = -0.4957589496572501915214079952D+00, - & er 8 = 0.1664377182454986536961530415D+01, - & er 9 = -0.3503288487499736816886487290D+00, - & er10 = 0.3341791187130174790297318841D+00, - & er11 = 0.8192320648511571246570742613D-01, - & er12 = -0.2235530786388629525884427845D-01) - parameter ( - & a21 = 5.26001519587677318785587544488D-2, - & a31 = 1.97250569845378994544595329183D-2, - & a32 = 5.91751709536136983633785987549D-2, - & a41 = 2.95875854768068491816892993775D-2, - & a43 = 8.87627564304205475450678981324D-2, - & a51 = 2.41365134159266685502369798665D-1, - & a53 = -8.84549479328286085344864962717D-1, - & a54 = 9.24834003261792003115737966543D-1, - & a61 = 3.7037037037037037037037037037D-2, - & a64 = 1.70828608729473871279604482173D-1, - & a65 = 1.25467687566822425016691814123D-1, - & a71 = 3.7109375D-2, - & a74 = 1.70252211019544039314978060272D-1, - & a75 = 6.02165389804559606850219397283D-2, - & a76 = -1.7578125D-2) - parameter ( - & a81 = 3.70920001185047927108779319836D-2, - & a84 = 1.70383925712239993810214054705D-1, - & a85 = 1.07262030446373284651809199168D-1, - & a86 = -1.53194377486244017527936158236D-2, - & a87 = 8.27378916381402288758473766002D-3, - & a91 = 6.24110958716075717114429577812D-1, - & a94 = -3.36089262944694129406857109825D0, - & a95 = -8.68219346841726006818189891453D-1, - & a96 = 2.75920996994467083049415600797D1, - & a97 = 2.01540675504778934086186788979D1, - & a98 = -4.34898841810699588477366255144D1, - & a101 = 4.77662536438264365890433908527D-1, - & a104 = -2.48811461997166764192642586468D0, - & a105 = -5.90290826836842996371446475743D-1, - & a106 = 2.12300514481811942347288949897D1, - & a107 = 1.52792336328824235832596922938D1, - & a108 = -3.32882109689848629194453265587D1, - & a109 = -2.03312017085086261358222928593D-2) - parameter ( - & a111 = -9.3714243008598732571704021658D-1, - & a114 = 5.18637242884406370830023853209D0, - & a115 = 1.09143734899672957818500254654D0, - & a116 = -8.14978701074692612513997267357D0, - & a117 = -1.85200656599969598641566180701D1, - & a118 = 2.27394870993505042818970056734D1, - & a119 = 2.49360555267965238987089396762D0, - & a1110 = -3.0467644718982195003823669022D0, - & a121 = 2.27331014751653820792359768449D0, - & a124 = -1.05344954667372501984066689879D1, - & a125 = -2.00087205822486249909675718444D0, - & a126 = -1.79589318631187989172765950534D1, - & a127 = 2.79488845294199600508499808837D1, - & a128 = -2.85899827713502369474065508674D0, - & a129 = -8.87285693353062954433549289258D0, - & a1210 = 1.23605671757943030647266201528D1, - & a1211 = 6.43392746015763530355970484046D-1) - parameter ( - & a141 = 5.61675022830479523392909219681D-2, - & a147 = 2.53500210216624811088794765333D-1, - & a148 = -2.46239037470802489917441475441D-1, - & a149 = -1.24191423263816360469010140626D-1, - & a1410 = 1.5329179827876569731206322685D-1, - & a1411 = 8.20105229563468988491666602057D-3, - & a1412 = 7.56789766054569976138603589584D-3, - & a1413 = -8.298D-3) - parameter ( - & a151 = 3.18346481635021405060768473261D-2, - & a156 = 2.83009096723667755288322961402D-2, - & a157 = 5.35419883074385676223797384372D-2, - & a158 = -5.49237485713909884646569340306D-2, - & a1511 = -1.08347328697249322858509316994D-4, - & a1512 = 3.82571090835658412954920192323D-4, - & a1513 = -3.40465008687404560802977114492D-4, - & a1514 = 1.41312443674632500278074618366D-1, - & a161 = -4.28896301583791923408573538692D-1, - & a166 = -4.69762141536116384314449447206D0, - & a167 = 7.68342119606259904184240953878D0, - & a168 = 4.06898981839711007970213554331D0, - & a169 = 3.56727187455281109270669543021D-1, - & a1613 = -1.39902416515901462129418009734D-3, - & a1614 = 2.9475147891527723389556272149D0, - & a1615 = -9.15095847217987001081870187138D0) - parameter ( - & d41 = -0.84289382761090128651353491142D+01, - & d46 = 0.56671495351937776962531783590D+00, - & d47 = -0.30689499459498916912797304727D+01, - & d48 = 0.23846676565120698287728149680D+01, - & d49 = 0.21170345824450282767155149946D+01, - & d410 = -0.87139158377797299206789907490D+00, - & d411 = 0.22404374302607882758541771650D+01, - & d412 = 0.63157877876946881815570249290D+00, - & d413 = -0.88990336451333310820698117400D-01, - & d414 = 0.18148505520854727256656404962D+02, - & d415 = -0.91946323924783554000451984436D+01, - & d416 = -0.44360363875948939664310572000D+01) - parameter ( - & d51 = 0.10427508642579134603413151009D+02, - & d56 = 0.24228349177525818288430175319D+03, - & d57 = 0.16520045171727028198505394887D+03, - & d58 = -0.37454675472269020279518312152D+03, - & d59 = -0.22113666853125306036270938578D+02, - & d510 = 0.77334326684722638389603898808D+01, - & d511 = -0.30674084731089398182061213626D+02, - & d512 = -0.93321305264302278729567221706D+01, - & d513 = 0.15697238121770843886131091075D+02, - & d514 = -0.31139403219565177677282850411D+02, - & d515 = -0.93529243588444783865713862664D+01, - & d516 = 0.35816841486394083752465898540D+02) - parameter ( - & d61 = 0.19985053242002433820987653617D+02, - & d66 = -0.38703730874935176555105901742D+03, - & d67 = -0.18917813819516756882830838328D+03, - & d68 = 0.52780815920542364900561016686D+03, - & d69 = -0.11573902539959630126141871134D+02, - & d610 = 0.68812326946963000169666922661D+01, - & d611 = -0.10006050966910838403183860980D+01, - & d612 = 0.77771377980534432092869265740D+00, - & d613 = -0.27782057523535084065932004339D+01, - & d614 = -0.60196695231264120758267380846D+02, - & d615 = 0.84320405506677161018159903784D+02, - & d616 = 0.11992291136182789328035130030D+02) - parameter ( - & d71 = -0.25693933462703749003312586129D+02, - & d76 = -0.15418974869023643374053993627D+03, - & d77 = -0.23152937917604549567536039109D+03, - & d78 = 0.35763911791061412378285349910D+03, - & d79 = 0.93405324183624310003907691704D+02, - & d710 = -0.37458323136451633156875139351D+02, - & d711 = 0.10409964950896230045147246184D+03, - & d712 = 0.29840293426660503123344363579D+02, - & d713 = -0.43533456590011143754432175058D+02, - & d714 = 0.96324553959188282948394950600D+02, - & d715 = -0.39177261675615439165231486172D+02, - & d716 = -0.14972683625798562581422125276D+03) - DOUBLE PRECISION Y(N),Y1(N),K1(N),K2(N),K3(N),K4(N),K5(N),K6(N) - DOUBLE PRECISION K7(N),K8(N),K9(N),K10(N),ATOL(*),RTOL(*) - DIMENSION CONT(8*NRD),ICOMP(NRD),RPAR(*),IPAR(*) - LOGICAL REJECT,LAST - EXTERNAL FCN - COMMON /CONDO8/XOLD,HOUT -C *** *** *** *** *** *** *** -C INITIALISATIONS -C *** *** *** *** *** *** *** - FACOLD=1.D-4 - EXPO1=1.d0/8.d0-BETA*0.2D0 - FACC1=1.D0/FAC1 - FACC2=1.D0/FAC2 - POSNEG=SIGN(1.D0,XEND-X) -C --- INITIAL PREPARATIONS - ATOLI=ATOL(1) - RTOLI=RTOL(1) - LAST=.FALSE. - HLAMB=0.D0 - IASTI=0 - CALL FCN(N,X,Y,K1,RPAR,IPAR) - HMAX=ABS(HMAX) - IORD=8 - IF (H.EQ.0.D0) H=HINIT853(N,FCN,X,Y,XEND,POSNEG,K1,K2,K3,IORD, - & HMAX,ATOL,RTOL,ITOL,RPAR,IPAR) - NFCN=NFCN+2 - REJECT=.FALSE. - XOLD=X - IF (IOUT.GE.1) THEN - IRTRN=1 - HOUT=1.D0 - CALL SOLOUT(NACCPT+1,XOLD,X,Y,N,CONT,ICOMP,NRD, - & RPAR,IPAR,IRTRN) - IF (IRTRN.LT.0) GOTO 79 - END IF -C --- BASIC INTEGRATION STEP - 1 CONTINUE - IF (NSTEP.GT.NMAX) GOTO 78 - IF (0.1D0*ABS(H).LE.ABS(X)*UROUND)GOTO 77 - IF ((X+1.01D0*H-XEND)*POSNEG.GT.0.D0) THEN - H=XEND-X - LAST=.TRUE. - END IF - NSTEP=NSTEP+1 -C --- THE TWELVE STAGES - IF (IRTRN.GE.2) THEN - CALL FCN(N,X,Y,K1,RPAR,IPAR) - END IF - DO 22 I=1,N - 22 Y1(I)=Y(I)+H*A21*K1(I) - CALL FCN(N,X+C2*H,Y1,K2,RPAR,IPAR) - DO 23 I=1,N - 23 Y1(I)=Y(I)+H*(A31*K1(I)+A32*K2(I)) - CALL FCN(N,X+C3*H,Y1,K3,RPAR,IPAR) - DO 24 I=1,N - 24 Y1(I)=Y(I)+H*(A41*K1(I)+A43*K3(I)) - CALL FCN(N,X+C4*H,Y1,K4,RPAR,IPAR) - DO 25 I=1,N - 25 Y1(I)=Y(I)+H*(A51*K1(I)+A53*K3(I)+A54*K4(I)) - CALL FCN(N,X+C5*H,Y1,K5,RPAR,IPAR) - DO 26 I=1,N - 26 Y1(I)=Y(I)+H*(A61*K1(I)+A64*K4(I)+A65*K5(I)) - CALL FCN(N,X+C6*H,Y1,K6,RPAR,IPAR) - DO 27 I=1,N - 27 Y1(I)=Y(I)+H*(A71*K1(I)+A74*K4(I)+A75*K5(I)+A76*K6(I)) - CALL FCN(N,X+C7*H,Y1,K7,RPAR,IPAR) - DO 28 I=1,N - 28 Y1(I)=Y(I)+H*(A81*K1(I)+A84*K4(I)+A85*K5(I)+A86*K6(I)+A87*K7(I)) - CALL FCN(N,X+C8*H,Y1,K8,RPAR,IPAR) - DO 29 I=1,N - 29 Y1(I)=Y(I)+H*(A91*K1(I)+A94*K4(I)+A95*K5(I)+A96*K6(I)+A97*K7(I) - & +A98*K8(I)) - CALL FCN(N,X+C9*H,Y1,K9,RPAR,IPAR) - DO 30 I=1,N - 30 Y1(I)=Y(I)+H*(A101*K1(I)+A104*K4(I)+A105*K5(I)+A106*K6(I) - & +A107*K7(I)+A108*K8(I)+A109*K9(I)) - CALL FCN(N,X+C10*H,Y1,K10,RPAR,IPAR) - DO 31 I=1,N - 31 Y1(I)=Y(I)+H*(A111*K1(I)+A114*K4(I)+A115*K5(I)+A116*K6(I) - & +A117*K7(I)+A118*K8(I)+A119*K9(I)+A1110*K10(I)) - CALL FCN(N,X+C11*H,Y1,K2,RPAR,IPAR) - XPH=X+H - DO 32 I=1,N - 32 Y1(I)=Y(I)+H*(A121*K1(I)+A124*K4(I)+A125*K5(I)+A126*K6(I) - & +A127*K7(I)+A128*K8(I)+A129*K9(I)+A1210*K10(I)+A1211*K2(I)) - CALL FCN(N,XPH,Y1,K3,RPAR,IPAR) - NFCN=NFCN+11 - DO 35 I=1,N - K4(I)=B1*K1(I)+B6*K6(I)+B7*K7(I)+B8*K8(I)+B9*K9(I) - & +B10*K10(I)+B11*K2(I)+B12*K3(I) - 35 K5(I)=Y(I)+H*K4(I) -C --- ERROR ESTIMATION - ERR=0.D0 - ERR2=0.D0 - IF (ITOL.EQ.0) THEN - DO 41 I=1,N - SK=ATOLI+RTOLI*MAX(ABS(Y(I)),ABS(K5(I))) - ERRI=K4(I)-BHH1*K1(I)-BHH2*K9(I)-BHH3*K3(I) - ERR2=ERR2+(ERRI/SK)**2 - ERRI=ER1*K1(I)+ER6*K6(I)+ER7*K7(I)+ER8*K8(I)+ER9*K9(I) - & +ER10*K10(I)+ER11*K2(I)+ER12*K3(I) - 41 ERR=ERR+(ERRI/SK)**2 - ELSE - DO 42 I=1,N - SK=ATOL(I)+RTOL(I)*MAX(ABS(Y(I)),ABS(K5(I))) - ERRI=K4(I)-BHH1*K1(I)-BHH2*K9(I)-BHH3*K3(I) - ERR2=ERR2+(ERRI/SK)**2 - ERRI=ER1*K1(I)+ER6*K6(I)+ER7*K7(I)+ER8*K8(I)+ER9*K9(I) - & +ER10*K10(I)+ER11*K2(I)+ER12*K3(I) - 42 ERR=ERR+(ERRI/SK)**2 - END IF - DENO=ERR+0.01D0*ERR2 - IF (DENO.LE.0.D0) DENO=1.D0 - ERR=ABS(H)*ERR*SQRT(1.D0/(N*DENO)) -C --- COMPUTATION OF HNEW - FAC11=ERR**EXPO1 -C --- LUND-STABILIZATION - FAC=FAC11/FACOLD**BETA -C --- WE REQUIRE FAC1 <= HNEW/H <= FAC2 - FAC=MAX(FACC2,MIN(FACC1,FAC/SAFE)) - HNEW=H/FAC - IF(ERR.LE.1.D0)THEN -C --- STEP IS ACCEPTED - FACOLD=MAX(ERR,1.0D-4) - NACCPT=NACCPT+1 - CALL FCN(N,XPH,K5,K4,RPAR,IPAR) - NFCN=NFCN+1 -C ------- STIFFNESS DETECTION - IF (MOD(NACCPT,NSTIFF).EQ.0.OR.IASTI.GT.0) THEN - STNUM=0.D0 - STDEN=0.D0 - DO 64 I=1,N - STNUM=STNUM+(K4(I)-K3(I))**2 - STDEN=STDEN+(K5(I)-Y1(I))**2 - 64 CONTINUE - IF (STDEN.GT.0.D0) HLAMB=ABS(H)*SQRT(STNUM/STDEN) - IF (HLAMB.GT.6.1D0) THEN - NONSTI=0 - IASTI=IASTI+1 - IF (IASTI.EQ.15) THEN - IF (IPRINT.GT.0) WRITE (IPRINT,*) - & ' THE PROBLEM SEEMS TO BECOME STIFF AT X = ',X - IF (IPRINT.LE.0) GOTO 76 - END IF - ELSE - NONSTI=NONSTI+1 - IF (NONSTI.EQ.6) IASTI=0 - END IF - END IF -C ------- FINAL PREPARATION FOR DENSE OUTPUT - IF (IOUT.GE.2) THEN -C ---- SAVE THE FIRST FUNCTION EVALUATIONS - DO 62 J=1,NRD - I=ICOMP(J) - CONT(J)=Y(I) - YDIFF=K5(I)-Y(I) - CONT(J+NRD)=YDIFF - BSPL=H*K1(I)-YDIFF - CONT(J+NRD*2)=BSPL - CONT(J+NRD*3)=YDIFF-H*K4(I)-BSPL - CONT(J+NRD*4)=D41*K1(I)+D46*K6(I)+D47*K7(I)+D48*K8(I) - & +D49*K9(I)+D410*K10(I)+D411*K2(I)+D412*K3(I) - CONT(J+NRD*5)=D51*K1(I)+D56*K6(I)+D57*K7(I)+D58*K8(I) - & +D59*K9(I)+D510*K10(I)+D511*K2(I)+D512*K3(I) - CONT(J+NRD*6)=D61*K1(I)+D66*K6(I)+D67*K7(I)+D68*K8(I) - & +D69*K9(I)+D610*K10(I)+D611*K2(I)+D612*K3(I) - CONT(J+NRD*7)=D71*K1(I)+D76*K6(I)+D77*K7(I)+D78*K8(I) - & +D79*K9(I)+D710*K10(I)+D711*K2(I)+D712*K3(I) - 62 CONTINUE -C --- THE NEXT THREE FUNCTION EVALUATIONS - DO 51 I=1,N - 51 Y1(I)=Y(I)+H*(A141*K1(I)+A147*K7(I)+A148*K8(I) - & +A149*K9(I)+A1410*K10(I)+A1411*K2(I)+A1412*K3(I) - & +A1413*K4(I)) - CALL FCN(N,X+C14*H,Y1,K10,RPAR,IPAR) - DO 52 I=1,N - 52 Y1(I)=Y(I)+H*(A151*K1(I)+A156*K6(I)+A157*K7(I) - & +A158*K8(I)+A1511*K2(I)+A1512*K3(I)+A1513*K4(I) - & +A1514*K10(I)) - CALL FCN(N,X+C15*H,Y1,K2,RPAR,IPAR) - DO 53 I=1,N - 53 Y1(I)=Y(I)+H*(A161*K1(I)+A166*K6(I)+A167*K7(I) - & +A168*K8(I)+A169*K9(I)+A1613*K4(I)+A1614*K10(I) - & +A1615*K2(I)) - CALL FCN(N,X+C16*H,Y1,K3,RPAR,IPAR) - NFCN=NFCN+3 -C --- FINAL PREPARATION - DO 63 J=1,NRD - I=ICOMP(J) - CONT(J+NRD*4)=H*(CONT(J+NRD*4)+D413*K4(I)+D414*K10(I) - & +D415*K2(I)+D416*K3(I)) - CONT(J+NRD*5)=H*(CONT(J+NRD*5)+D513*K4(I)+D514*K10(I) - & +D515*K2(I)+D516*K3(I)) - CONT(J+NRD*6)=H*(CONT(J+NRD*6)+D613*K4(I)+D614*K10(I) - & +D615*K2(I)+D616*K3(I)) - CONT(J+NRD*7)=H*(CONT(J+NRD*7)+D713*K4(I)+D714*K10(I) - & +D715*K2(I)+D716*K3(I)) - 63 CONTINUE - HOUT=H - END IF - DO 67 I=1,N - K1(I)=K4(I) - 67 Y(I)=K5(I) - XOLD=X - X=XPH - IF (IOUT.GE.1) THEN - CALL SOLOUT(NACCPT+1,XOLD,X,Y,N,CONT,ICOMP,NRD, - & RPAR,IPAR,IRTRN) - IF (IRTRN.LT.0) GOTO 79 - END IF -C ------- NORMAL EXIT - IF (LAST) THEN - H=HNEW - IDID=1 - RETURN - END IF - IF(ABS(HNEW).GT.HMAX)HNEW=POSNEG*HMAX - IF(REJECT)HNEW=POSNEG*MIN(ABS(HNEW),ABS(H)) - REJECT=.FALSE. - ELSE -C --- STEP IS REJECTED - HNEW=H/MIN(FACC1,FAC11/SAFE) - REJECT=.TRUE. - IF(NACCPT.GE.1)NREJCT=NREJCT+1 - LAST=.FALSE. - END IF - H=HNEW - GOTO 1 -C --- FAIL EXIT - 76 CONTINUE - IDID=-4 - RETURN - 77 CONTINUE - IF (IPRINT.GT.0) WRITE(IPRINT,979)X - IF (IPRINT.GT.0) WRITE(IPRINT,*)' STEP SIZE TOO SMALL, H=',H - IDID=-3 - RETURN - 78 CONTINUE - IF (IPRINT.GT.0) WRITE(IPRINT,979)X - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' MORE THAN NMAX =',NMAX,'STEPS ARE NEEDED' - IDID=-2 - RETURN - 79 CONTINUE - IF (IPRINT.GT.0) WRITE(IPRINT,979)X - 979 FORMAT(' EXIT OF DOP853 AT X=',E18.4) - IDID=2 - RETURN - END -C - FUNCTION HINIT853(N,FCN,X,Y,XEND,POSNEG,F0,F1,Y1,IORD, - & HMAX,ATOL,RTOL,ITOL,RPAR,IPAR) -C ---------------------------------------------------------- -C ---- COMPUTATION OF AN INITIAL STEP SIZE GUESS -C ---------------------------------------------------------- - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION Y(N),Y1(N),F0(N),F1(N),ATOL(*),RTOL(*) - DIMENSION RPAR(*),IPAR(*) -C ---- COMPUTE A FIRST GUESS FOR EXPLICIT EULER AS -C ---- H = 0.01 * NORM (Y0) / NORM (F0) -C ---- THE INCREMENT FOR EXPLICIT EULER IS SMALL -C ---- COMPARED TO THE SOLUTION - DNF=0.0D0 - DNY=0.0D0 - ATOLI=ATOL(1) - RTOLI=RTOL(1) - IF (ITOL.EQ.0) THEN - DO 10 I=1,N - SK=ATOLI+RTOLI*ABS(Y(I)) - DNF=DNF+(F0(I)/SK)**2 - 10 DNY=DNY+(Y(I)/SK)**2 - ELSE - DO 11 I=1,N - SK=ATOL(I)+RTOL(I)*ABS(Y(I)) - DNF=DNF+(F0(I)/SK)**2 - 11 DNY=DNY+(Y(I)/SK)**2 - END IF - IF (DNF.LE.1.D-10.OR.DNY.LE.1.D-10) THEN - H=1.0D-6 - ELSE - H=SQRT(DNY/DNF)*0.01D0 - END IF - H=MIN(H,HMAX) - H=SIGN(H,POSNEG) -C ---- PERFORM AN EXPLICIT EULER STEP - DO 12 I=1,N - 12 Y1(I)=Y(I)+H*F0(I) - CALL FCN(N,X+H,Y1,F1,RPAR,IPAR) -C ---- ESTIMATE THE SECOND DERIVATIVE OF THE SOLUTION - DER2=0.0D0 - IF (ITOL.EQ.0) THEN - DO 15 I=1,N - SK=ATOLI+RTOLI*ABS(Y(I)) - 15 DER2=DER2+((F1(I)-F0(I))/SK)**2 - ELSE - DO 16 I=1,N - SK=ATOL(I)+RTOL(I)*ABS(Y(I)) - 16 DER2=DER2+((F1(I)-F0(I))/SK)**2 - END IF - DER2=SQRT(DER2)/H -C ---- STEP SIZE IS COMPUTED SUCH THAT -C ---- H**IORD * MAX ( NORM (F0), NORM (DER2)) = 0.01 - DER12=MAX(ABS(DER2),SQRT(DNF)) - IF (DER12.LE.1.D-15) THEN - H1=MAX(1.0D-6,ABS(H)*1.0D-3) - ELSE - H1=(0.01D0/DER12)**(1.D0/IORD) - END IF - H=MIN(100*ABS(H),H1,HMAX) - HINIT853=SIGN(H,POSNEG) - RETURN - END -C - FUNCTION CONTD8(II,X,CON,ICOMP,ND) -C ---------------------------------------------------------- -C THIS FUNCTION CAN BE USED FOR CONINUOUS OUTPUT IN CONNECTION -C WITH THE OUTPUT-SUBROUTINE FOR DOP853. IT PROVIDES AN -C APPROXIMATION TO THE II-TH COMPONENT OF THE SOLUTION AT X. -C ---------------------------------------------------------- - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CON(8*ND),ICOMP(ND) - COMMON /CONDO8/XOLD,H -C ----- COMPUTE PLACE OF II-TH COMPONENT - I=0 - DO 5 J=1,ND - IF (ICOMP(J).EQ.II) I=J - 5 CONTINUE - IF (I.EQ.0) THEN - WRITE (6,*) ' NO DENSE OUTPUT AVAILABLE FOR COMP.',II - RETURN - END IF - S=(X-XOLD)/H - S1=1.D0-S - CONPAR=CON(I+ND*4)+S*(CON(I+ND*5)+S1*(CON(I+ND*6)+S*CON(I+ND*7))) - CONTD8=CON(I)+S*(CON(I+ND)+S1*(CON(I+ND*2)+S*(CON(I+ND*3) - & +S1*CONPAR))) - RETURN - END - diff --git a/scipy-0.10.1/scipy/integrate/dop/dopri5.f b/scipy-0.10.1/scipy/integrate/dop/dopri5.f deleted file mode 100644 index e6c2762542..0000000000 --- a/scipy-0.10.1/scipy/integrate/dop/dopri5.f +++ /dev/null @@ -1,693 +0,0 @@ - SUBROUTINE DOPRI5(N,FCN,X,Y,XEND, - & RTOL,ATOL,ITOL, - & SOLOUT,IOUT, - & WORK,LWORK,IWORK,LIWORK,RPAR,IPAR,IDID) -C ---------------------------------------------------------- -C NUMERICAL SOLUTION OF A SYSTEM OF FIRST 0RDER -C ORDINARY DIFFERENTIAL EQUATIONS Y'=F(X,Y). -C THIS IS AN EXPLICIT RUNGE-KUTTA METHOD OF ORDER (4)5 -C DUE TO DORMAND & PRINCE (WITH STEPSIZE CONTROL AND -C DENSE OUTPUT). -C -C AUTHORS: E. HAIRER AND G. WANNER -C UNIVERSITE DE GENEVE, DEPT. DE MATHEMATIQUES -C CH-1211 GENEVE 24, SWITZERLAND -C E-MAIL: Ernst.Hairer@math.unige.ch -C Gerhard.Wanner@math.unige.ch -C -C THIS CODE IS DESCRIBED IN: -C E. HAIRER, S.P. NORSETT AND G. WANNER, SOLVING ORDINARY -C DIFFERENTIAL EQUATIONS I. NONSTIFF PROBLEMS. 2ND EDITION. -C SPRINGER SERIES IN COMPUTATIONAL MATHEMATICS, -C SPRINGER-VERLAG (1993) -C -C VERSION OF APRIL 25, 1996 -C (latest correction of a small bug: August 8, 2005) -C -C INPUT PARAMETERS -C ---------------- -C N DIMENSION OF THE SYSTEM -C -C FCN NAME (EXTERNAL) OF SUBROUTINE COMPUTING THE -C VALUE OF F(X,Y): -C SUBROUTINE FCN(N,X,Y,F,RPAR,IPAR) -C DOUBLE PRECISION X,Y(N),F(N) -C F(1)=... ETC. -C -C X INITIAL X-VALUE -C -C Y(N) INITIAL VALUES FOR Y -C -C XEND FINAL X-VALUE (XEND-X MAY BE POSITIVE OR NEGATIVE) -C -C RTOL,ATOL RELATIVE AND ABSOLUTE ERROR TOLERANCES. THEY -C CAN BE BOTH SCALARS OR ELSE BOTH VECTORS OF LENGTH N. -C -C ITOL SWITCH FOR RTOL AND ATOL: -C ITOL=0: BOTH RTOL AND ATOL ARE SCALARS. -C THE CODE KEEPS, ROUGHLY, THE LOCAL ERROR OF -C Y(I) BELOW RTOL*ABS(Y(I))+ATOL -C ITOL=1: BOTH RTOL AND ATOL ARE VECTORS. -C THE CODE KEEPS THE LOCAL ERROR OF Y(I) BELOW -C RTOL(I)*ABS(Y(I))+ATOL(I). -C -C SOLOUT NAME (EXTERNAL) OF SUBROUTINE PROVIDING THE -C NUMERICAL SOLUTION DURING INTEGRATION. -C IF IOUT.GE.1, IT IS CALLED AFTER EVERY SUCCESSFUL STEP. -C SUPPLY A DUMMY SUBROUTINE IF IOUT=0. -C IT MUST HAVE THE FORM -C SUBROUTINE SOLOUT (NR,XOLD,X,Y,N,CON,ICOMP,ND, -C RPAR,IPAR,IRTRN) -C DIMENSION Y(N),CON(5*ND),ICOMP(ND) -C .... -C SOLOUT FURNISHES THE SOLUTION "Y" AT THE NR-TH -C GRID-POINT "X" (THEREBY THE INITIAL VALUE IS -C THE FIRST GRID-POINT). -C "XOLD" IS THE PRECEEDING GRID-POINT. -C "IRTRN" SERVES TO INTERRUPT THE INTEGRATION. IF IRTRN -C IS SET <0, DOPRI5 WILL RETURN TO THE CALLING PROGRAM. -C IF THE NUMERICAL SOLUTION IS ALTERED IN SOLOUT, -C SET IRTRN = 2 -C -C ----- CONTINUOUS OUTPUT: ----- -C DURING CALLS TO "SOLOUT", A CONTINUOUS SOLUTION -C FOR THE INTERVAL [XOLD,X] IS AVAILABLE THROUGH -C THE FUNCTION -C >>> CONTD5(I,S,CON,ICOMP,ND) <<< -C WHICH PROVIDES AN APPROXIMATION TO THE I-TH -C COMPONENT OF THE SOLUTION AT THE POINT S. THE VALUE -C S SHOULD LIE IN THE INTERVAL [XOLD,X]. -C -C IOUT SWITCH FOR CALLING THE SUBROUTINE SOLOUT: -C IOUT=0: SUBROUTINE IS NEVER CALLED -C IOUT=1: SUBROUTINE IS USED FOR OUTPUT. -C IOUT=2: DENSE OUTPUT IS PERFORMED IN SOLOUT -C (IN THIS CASE WORK(5) MUST BE SPECIFIED) -C -C WORK ARRAY OF WORKING SPACE OF LENGTH "LWORK". -C WORK(1),...,WORK(20) SERVE AS PARAMETERS FOR THE CODE. -C FOR STANDARD USE, SET THEM TO ZERO BEFORE CALLING. -C "LWORK" MUST BE AT LEAST 8*N+5*NRDENS+21 -C WHERE NRDENS = IWORK(5) -C -C LWORK DECLARED LENGHT OF ARRAY "WORK". -C -C IWORK INTEGER WORKING SPACE OF LENGHT "LIWORK". -C IWORK(1),...,IWORK(20) SERVE AS PARAMETERS FOR THE CODE. -C FOR STANDARD USE, SET THEM TO ZERO BEFORE CALLING. -C "LIWORK" MUST BE AT LEAST NRDENS+21 . -C -C LIWORK DECLARED LENGHT OF ARRAY "IWORK". -C -C RPAR, IPAR REAL AND INTEGER PARAMETERS (OR PARAMETER ARRAYS) WHICH -C CAN BE USED FOR COMMUNICATION BETWEEN YOUR CALLING -C PROGRAM AND THE FCN, JAC, MAS, SOLOUT SUBROUTINES. -C -C----------------------------------------------------------------------- -C -C SOPHISTICATED SETTING OF PARAMETERS -C ----------------------------------- -C SEVERAL PARAMETERS (WORK(1),...,IWORK(1),...) ALLOW -C TO ADAPT THE CODE TO THE PROBLEM AND TO THE NEEDS OF -C THE USER. FOR ZERO INPUT, THE CODE CHOOSES DEFAULT VALUES. -C -C WORK(1) UROUND, THE ROUNDING UNIT, DEFAULT 2.3D-16. -C -C WORK(2) THE SAFETY FACTOR IN STEP SIZE PREDICTION, -C DEFAULT 0.9D0. -C -C WORK(3), WORK(4) PARAMETERS FOR STEP SIZE SELECTION -C THE NEW STEP SIZE IS CHOSEN SUBJECT TO THE RESTRICTION -C WORK(3) <= HNEW/HOLD <= WORK(4) -C DEFAULT VALUES: WORK(3)=0.2D0, WORK(4)=10.D0 -C -C WORK(5) IS THE "BETA" FOR STABILIZED STEP SIZE CONTROL -C (SEE SECTION IV.2). LARGER VALUES OF BETA ( <= 0.1 ) -C MAKE THE STEP SIZE CONTROL MORE STABLE. DOPRI5 NEEDS -C A LARGER BETA THAN HIGHAM & HALL. NEGATIVE WORK(5) -C PROVOKE BETA=0. -C DEFAULT 0.04D0. -C -C WORK(6) MAXIMAL STEP SIZE, DEFAULT XEND-X. -C -C WORK(7) INITIAL STEP SIZE, FOR WORK(7)=0.D0 AN INITIAL GUESS -C IS COMPUTED WITH HELP OF THE FUNCTION HINIT -C -C IWORK(1) THIS IS THE MAXIMAL NUMBER OF ALLOWED STEPS. -C THE DEFAULT VALUE (FOR IWORK(1)=0) IS 100000. -C -C IWORK(2) SWITCH FOR THE CHOICE OF THE COEFFICIENTS -C IF IWORK(2).EQ.1 METHOD DOPRI5 OF DORMAND AND PRINCE -C (TABLE 5.2 OF SECTION II.5). -C AT THE MOMENT THIS IS THE ONLY POSSIBLE CHOICE. -C THE DEFAULT VALUE (FOR IWORK(2)=0) IS IWORK(2)=1. -C -C IWORK(3) SWITCH FOR PRINTING ERROR MESSAGES -C IF IWORK(3).LT.0 NO MESSAGES ARE BEING PRINTED -C IF IWORK(3).GT.0 MESSAGES ARE PRINTED WITH -C WRITE (IWORK(3),*) ... -C DEFAULT VALUE (FOR IWORK(3)=0) IS IWORK(3)=6 -C -C IWORK(4) TEST FOR STIFFNESS IS ACTIVATED AFTER STEP NUMBER -C J*IWORK(4) (J INTEGER), PROVIDED IWORK(4).GT.0. -C FOR NEGATIVE IWORK(4) THE STIFFNESS TEST IS -C NEVER ACTIVATED; DEFAULT VALUE IS IWORK(4)=1000 -C -C IWORK(5) = NRDENS = NUMBER OF COMPONENTS, FOR WHICH DENSE OUTPUT -C IS REQUIRED; DEFAULT VALUE IS IWORK(5)=0; -C FOR 0 < NRDENS < N THE COMPONENTS (FOR WHICH DENSE -C OUTPUT IS REQUIRED) HAVE TO BE SPECIFIED IN -C IWORK(21),...,IWORK(NRDENS+20); -C FOR NRDENS=N THIS IS DONE BY THE CODE. -C -C---------------------------------------------------------------------- -C -C OUTPUT PARAMETERS -C ----------------- -C X X-VALUE FOR WHICH THE SOLUTION HAS BEEN COMPUTED -C (AFTER SUCCESSFUL RETURN X=XEND). -C -C Y(N) NUMERICAL SOLUTION AT X -C -C H PREDICTED STEP SIZE OF THE LAST ACCEPTED STEP -C -C IDID REPORTS ON SUCCESSFULNESS UPON RETURN: -C IDID= 1 COMPUTATION SUCCESSFUL, -C IDID= 2 COMPUT. SUCCESSFUL (INTERRUPTED BY SOLOUT) -C IDID=-1 INPUT IS NOT CONSISTENT, -C IDID=-2 LARGER NMAX IS NEEDED, -C IDID=-3 STEP SIZE BECOMES TOO SMALL. -C IDID=-4 PROBLEM IS PROBABLY STIFF (INTERRUPTED). -C -C IWORK(17) NFCN NUMBER OF FUNCTION EVALUATIONS -C IWORK(18) NSTEP NUMBER OF COMPUTED STEPS -C IWORK(19) NACCPT NUMBER OF ACCEPTED STEPS -C IWORK(20) NREJCT NUMBER OF REJECTED STEPS (DUE TO ERROR TEST), -C (STEP REJECTIONS IN THE FIRST STEP ARE NOT COUNTED) -C----------------------------------------------------------------------- -C *** *** *** *** *** *** *** *** *** *** *** *** *** -C DECLARATIONS -C *** *** *** *** *** *** *** *** *** *** *** *** *** - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION Y(N),ATOL(*),RTOL(*),WORK(LWORK),IWORK(LIWORK) - DIMENSION RPAR(*),IPAR(*) - LOGICAL ARRET - EXTERNAL FCN,SOLOUT -C *** *** *** *** *** *** *** -C SETTING THE PARAMETERS -C *** *** *** *** *** *** *** - NFCN=0 - NSTEP=0 - NACCPT=0 - NREJCT=0 - ARRET=.FALSE. -C -------- IPRINT FOR MONITORING THE PRINTING - IF(IWORK(3).EQ.0)THEN - IPRINT=6 - ELSE - IPRINT=IWORK(3) - END IF -C -------- NMAX , THE MAXIMAL NUMBER OF STEPS ----- - IF(IWORK(1).EQ.0)THEN - NMAX=100000 - ELSE - NMAX=IWORK(1) - IF(NMAX.LE.0)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' WRONG INPUT IWORK(1)=',IWORK(1) - ARRET=.TRUE. - END IF - END IF -C -------- METH COEFFICIENTS OF THE METHOD - IF(IWORK(2).EQ.0)THEN - METH=1 - ELSE - METH=IWORK(2) - IF(METH.LE.0.OR.METH.GE.4)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' CURIOUS INPUT IWORK(2)=',IWORK(2) - ARRET=.TRUE. - END IF - END IF -C -------- NSTIFF PARAMETER FOR STIFFNESS DETECTION - NSTIFF=IWORK(4) - IF (NSTIFF.EQ.0) NSTIFF=1000 - IF (NSTIFF.LT.0) NSTIFF=NMAX+10 -C -------- NRDENS NUMBER OF DENSE OUTPUT COMPONENTS - NRDENS=IWORK(5) - IF(NRDENS.LT.0.OR.NRDENS.GT.N)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' CURIOUS INPUT IWORK(5)=',IWORK(5) - ARRET=.TRUE. - ELSE - IF(NRDENS.GT.0.AND.IOUT.LT.2)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' WARNING: PUT IOUT=2 FOR DENSE OUTPUT ' - END IF - IF (NRDENS.EQ.N) THEN - DO 16 I=1,NRDENS - 16 IWORK(20+I)=I - END IF - END IF -C -------- UROUND SMALLEST NUMBER SATISFYING 1.D0+UROUND>1.D0 - IF(WORK(1).EQ.0.D0)THEN - UROUND=2.3D-16 - ELSE - UROUND=WORK(1) - IF(UROUND.LE.1.D-35.OR.UROUND.GE.1.D0)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' WHICH MACHINE DO YOU HAVE? YOUR UROUND WAS:',WORK(1) - ARRET=.TRUE. - END IF - END IF -C ------- SAFETY FACTOR ------------- - IF(WORK(2).EQ.0.D0)THEN - SAFE=0.9D0 - ELSE - SAFE=WORK(2) - IF(SAFE.GE.1.D0.OR.SAFE.LE.1.D-4)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' CURIOUS INPUT FOR SAFETY FACTOR WORK(2)=',WORK(2) - ARRET=.TRUE. - END IF - END IF -C ------- FAC1,FAC2 PARAMETERS FOR STEP SIZE SELECTION - IF(WORK(3).EQ.0.D0)THEN - FAC1=0.2D0 - ELSE - FAC1=WORK(3) - END IF - IF(WORK(4).EQ.0.D0)THEN - FAC2=10.D0 - ELSE - FAC2=WORK(4) - END IF -C --------- BETA FOR STEP CONTROL STABILIZATION ----------- - IF(WORK(5).EQ.0.D0)THEN - BETA=0.04D0 - ELSE - IF(WORK(5).LT.0.D0)THEN - BETA=0.D0 - ELSE - BETA=WORK(5) - IF(BETA.GT.0.2D0)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' CURIOUS INPUT FOR BETA: WORK(5)=',WORK(5) - ARRET=.TRUE. - END IF - END IF - END IF -C -------- MAXIMAL STEP SIZE - IF(WORK(6).EQ.0.D0)THEN - HMAX=XEND-X - ELSE - HMAX=WORK(6) - END IF -C -------- INITIAL STEP SIZE - H=WORK(7) -C ------- PREPARE THE ENTRY-POINTS FOR THE ARRAYS IN WORK ----- - IEY1=21 - IEK1=IEY1+N - IEK2=IEK1+N - IEK3=IEK2+N - IEK4=IEK3+N - IEK5=IEK4+N - IEK6=IEK5+N - IEYS=IEK6+N - IECO=IEYS+N -C ------ TOTAL STORAGE REQUIREMENT ----------- - ISTORE=IEYS+5*NRDENS-1 - IF(ISTORE.GT.LWORK)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' INSUFFICIENT STORAGE FOR WORK, MIN. LWORK=',ISTORE - ARRET=.TRUE. - END IF - ICOMP=21 - ISTORE=ICOMP+NRDENS-1 - IF(ISTORE.GT.LIWORK)THEN - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' INSUFFICIENT STORAGE FOR IWORK, MIN. LIWORK=',ISTORE - ARRET=.TRUE. - END IF -C ------ WHEN A FAIL HAS OCCURED, WE RETURN WITH IDID=-1 - IF (ARRET) THEN - IDID=-1 - RETURN - END IF -C -------- CALL TO CORE INTEGRATOR ------------ - CALL DOPCOR(N,FCN,X,Y,XEND,HMAX,H,RTOL,ATOL,ITOL,IPRINT, - & SOLOUT,IOUT,IDID,NMAX,UROUND,METH,NSTIFF,SAFE,BETA,FAC1,FAC2, - & WORK(IEY1),WORK(IEK1),WORK(IEK2),WORK(IEK3),WORK(IEK4), - & WORK(IEK5),WORK(IEK6),WORK(IEYS),WORK(IECO),IWORK(ICOMP), - & NRDENS,RPAR,IPAR,NFCN,NSTEP,NACCPT,NREJCT) - WORK(7)=H - IWORK(17)=NFCN - IWORK(18)=NSTEP - IWORK(19)=NACCPT - IWORK(20)=NREJCT -C ----------- RETURN ----------- - RETURN - END -C -C -C -C ----- ... AND HERE IS THE CORE INTEGRATOR ---------- -C - SUBROUTINE DOPCOR(N,FCN,X,Y,XEND,HMAX,H,RTOL,ATOL,ITOL,IPRINT, - & SOLOUT,IOUT,IDID,NMAX,UROUND,METH,NSTIFF,SAFE,BETA,FAC1,FAC2, - & Y1,K1,K2,K3,K4,K5,K6,YSTI,CONT,ICOMP,NRD,RPAR,IPAR, - & NFCN,NSTEP,NACCPT,NREJCT) -C ---------------------------------------------------------- -C CORE INTEGRATOR FOR DOPRI5 -C PARAMETERS SAME AS IN DOPRI5 WITH WORKSPACE ADDED -C ---------------------------------------------------------- -C DECLARATIONS -C ---------------------------------------------------------- - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DOUBLE PRECISION K1(N),K2(N),K3(N),K4(N),K5(N),K6(N) - DIMENSION Y(N),Y1(N),YSTI(N),ATOL(*),RTOL(*),RPAR(*),IPAR(*) - DIMENSION CONT(5*NRD),ICOMP(NRD) - LOGICAL REJECT,LAST - EXTERNAL FCN - COMMON /CONDO5/XOLD,HOUT -C *** *** *** *** *** *** *** -C INITIALISATIONS -C *** *** *** *** *** *** *** - IF (METH.EQ.1) CALL CDOPRI(C2,C3,C4,C5,E1,E3,E4,E5,E6,E7, - & A21,A31,A32,A41,A42,A43,A51,A52,A53,A54, - & A61,A62,A63,A64,A65,A71,A73,A74,A75,A76, - & D1,D3,D4,D5,D6,D7) - FACOLD=1.D-4 - EXPO1=0.2D0-BETA*0.75D0 - FACC1=1.D0/FAC1 - FACC2=1.D0/FAC2 - POSNEG=SIGN(1.D0,XEND-X) -C --- INITIAL PREPARATIONS - ATOLI=ATOL(1) - RTOLI=RTOL(1) - LAST=.FALSE. - HLAMB=0.D0 - IASTI=0 - CALL FCN(N,X,Y,K1,RPAR,IPAR) - HMAX=ABS(HMAX) - IORD=5 - IF (H.EQ.0.D0) H=HINIT(N,FCN,X,Y,XEND,POSNEG,K1,K2,K3,IORD, - & HMAX,ATOL,RTOL,ITOL,RPAR,IPAR) - NFCN=NFCN+2 - REJECT=.FALSE. - XOLD=X - IF (IOUT.NE.0) THEN - IRTRN=1 - HOUT=H - CALL SOLOUT(NACCPT+1,XOLD,X,Y,N,CONT,ICOMP,NRD, - & RPAR,IPAR,IRTRN) - IF (IRTRN.LT.0) GOTO 79 - ELSE - IRTRN=0 - END IF -C --- BASIC INTEGRATION STEP - 1 CONTINUE - IF (NSTEP.GT.NMAX) GOTO 78 - IF (0.1D0*ABS(H).LE.ABS(X)*UROUND)GOTO 77 - IF ((X+1.01D0*H-XEND)*POSNEG.GT.0.D0) THEN - H=XEND-X - LAST=.TRUE. - END IF - NSTEP=NSTEP+1 -C --- THE FIRST 6 STAGES - IF (IRTRN.GE.2) THEN - CALL FCN(N,X,Y,K1,RPAR,IPAR) - END IF - DO 22 I=1,N - 22 Y1(I)=Y(I)+H*A21*K1(I) - CALL FCN(N,X+C2*H,Y1,K2,RPAR,IPAR) - DO 23 I=1,N - 23 Y1(I)=Y(I)+H*(A31*K1(I)+A32*K2(I)) - CALL FCN(N,X+C3*H,Y1,K3,RPAR,IPAR) - DO 24 I=1,N - 24 Y1(I)=Y(I)+H*(A41*K1(I)+A42*K2(I)+A43*K3(I)) - CALL FCN(N,X+C4*H,Y1,K4,RPAR,IPAR) - DO 25 I=1,N - 25 Y1(I)=Y(I)+H*(A51*K1(I)+A52*K2(I)+A53*K3(I)+A54*K4(I)) - CALL FCN(N,X+C5*H,Y1,K5,RPAR,IPAR) - DO 26 I=1,N - 26 YSTI(I)=Y(I)+H*(A61*K1(I)+A62*K2(I)+A63*K3(I)+A64*K4(I)+A65*K5(I)) - XPH=X+H - CALL FCN(N,XPH,YSTI,K6,RPAR,IPAR) - DO 27 I=1,N - 27 Y1(I)=Y(I)+H*(A71*K1(I)+A73*K3(I)+A74*K4(I)+A75*K5(I)+A76*K6(I)) - CALL FCN(N,XPH,Y1,K2,RPAR,IPAR) - IF (IOUT.GE.2) THEN - DO 40 J=1,NRD - I=ICOMP(J) - CONT(4*NRD+J)=H*(D1*K1(I)+D3*K3(I)+D4*K4(I)+D5*K5(I) - & +D6*K6(I)+D7*K2(I)) - 40 CONTINUE - END IF - DO 28 I=1,N - 28 K4(I)=(E1*K1(I)+E3*K3(I)+E4*K4(I)+E5*K5(I)+E6*K6(I)+E7*K2(I))*H - NFCN=NFCN+6 -C --- ERROR ESTIMATION - ERR=0.D0 - IF (ITOL.EQ.0) THEN - DO 41 I=1,N - SK=ATOLI+RTOLI*MAX(ABS(Y(I)),ABS(Y1(I))) - 41 ERR=ERR+(K4(I)/SK)**2 - ELSE - DO 42 I=1,N - SK=ATOL(I)+RTOL(I)*MAX(ABS(Y(I)),ABS(Y1(I))) - 42 ERR=ERR+(K4(I)/SK)**2 - END IF - ERR=SQRT(ERR/N) -C --- COMPUTATION OF HNEW - FAC11=ERR**EXPO1 -C --- LUND-STABILIZATION - FAC=FAC11/FACOLD**BETA -C --- WE REQUIRE FAC1 <= HNEW/H <= FAC2 - FAC=MAX(FACC2,MIN(FACC1,FAC/SAFE)) - HNEW=H/FAC - IF(ERR.LE.1.D0)THEN -C --- STEP IS ACCEPTED - FACOLD=MAX(ERR,1.0D-4) - NACCPT=NACCPT+1 -C ------- STIFFNESS DETECTION - IF (MOD(NACCPT,NSTIFF).EQ.0.OR.IASTI.GT.0) THEN - STNUM=0.D0 - STDEN=0.D0 - DO 64 I=1,N - STNUM=STNUM+(K2(I)-K6(I))**2 - STDEN=STDEN+(Y1(I)-YSTI(I))**2 - 64 CONTINUE - IF (STDEN.GT.0.D0) HLAMB=H*SQRT(STNUM/STDEN) - IF (HLAMB.GT.3.25D0) THEN - NONSTI=0 - IASTI=IASTI+1 - IF (IASTI.EQ.15) THEN - IF (IPRINT.GT.0) WRITE (IPRINT,*) - & ' THE PROBLEM SEEMS TO BECOME STIFF AT X = ',X - IF (IPRINT.LE.0) GOTO 76 - END IF - ELSE - NONSTI=NONSTI+1 - IF (NONSTI.EQ.6) IASTI=0 - END IF - END IF - IF (IOUT.GE.2) THEN - DO 43 J=1,NRD - I=ICOMP(J) - YD0=Y(I) - YDIFF=Y1(I)-YD0 - BSPL=H*K1(I)-YDIFF - CONT(J)=Y(I) - CONT(NRD+J)=YDIFF - CONT(2*NRD+J)=BSPL - CONT(3*NRD+J)=-H*K2(I)+YDIFF-BSPL - 43 CONTINUE - END IF - DO 44 I=1,N - K1(I)=K2(I) - 44 Y(I)=Y1(I) - XOLD=X - X=XPH - IF (IOUT.NE.0) THEN - HOUT=H - CALL SOLOUT(NACCPT+1,XOLD,X,Y,N,CONT,ICOMP,NRD, - & RPAR,IPAR,IRTRN) - IF (IRTRN.LT.0) GOTO 79 - END IF -C ------- NORMAL EXIT - IF (LAST) THEN - H=HNEW - IDID=1 - RETURN - END IF - IF(ABS(HNEW).GT.HMAX)HNEW=POSNEG*HMAX - IF(REJECT)HNEW=POSNEG*MIN(ABS(HNEW),ABS(H)) - REJECT=.FALSE. - ELSE -C --- STEP IS REJECTED - HNEW=H/MIN(FACC1,FAC11/SAFE) - REJECT=.TRUE. - IF(NACCPT.GE.1)NREJCT=NREJCT+1 - LAST=.FALSE. - END IF - H=HNEW - GOTO 1 -C --- FAIL EXIT - 76 CONTINUE - IDID=-4 - RETURN - 77 CONTINUE - IF (IPRINT.GT.0) WRITE(IPRINT,979)X - IF (IPRINT.GT.0) WRITE(IPRINT,*)' STEP SIZE T0O SMALL, H=',H - IDID=-3 - RETURN - 78 CONTINUE - IF (IPRINT.GT.0) WRITE(IPRINT,979)X - IF (IPRINT.GT.0) WRITE(IPRINT,*) - & ' MORE THAN NMAX =',NMAX,'STEPS ARE NEEDED' - IDID=-2 - RETURN - 79 CONTINUE - IF (IPRINT.GT.0) WRITE(IPRINT,979)X - 979 FORMAT(' EXIT OF DOPRI5 AT X=',E18.4) - IDID=2 - RETURN - END -C - FUNCTION HINIT(N,FCN,X,Y,XEND,POSNEG,F0,F1,Y1,IORD, - & HMAX,ATOL,RTOL,ITOL,RPAR,IPAR) -C ---------------------------------------------------------- -C ---- COMPUTATION OF AN INITIAL STEP SIZE GUESS -C ---------------------------------------------------------- - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION Y(N),Y1(N),F0(N),F1(N),ATOL(*),RTOL(*) - DIMENSION RPAR(*),IPAR(*) -C ---- COMPUTE A FIRST GUESS FOR EXPLICIT EULER AS -C ---- H = 0.01 * NORM (Y0) / NORM (F0) -C ---- THE INCREMENT FOR EXPLICIT EULER IS SMALL -C ---- COMPARED TO THE SOLUTION - DNF=0.0D0 - DNY=0.0D0 - ATOLI=ATOL(1) - RTOLI=RTOL(1) - IF (ITOL.EQ.0) THEN - DO 10 I=1,N - SK=ATOLI+RTOLI*ABS(Y(I)) - DNF=DNF+(F0(I)/SK)**2 - 10 DNY=DNY+(Y(I)/SK)**2 - ELSE - DO 11 I=1,N - SK=ATOL(I)+RTOL(I)*ABS(Y(I)) - DNF=DNF+(F0(I)/SK)**2 - 11 DNY=DNY+(Y(I)/SK)**2 - END IF - IF (DNF.LE.1.D-10.OR.DNY.LE.1.D-10) THEN - H=1.0D-6 - ELSE - H=SQRT(DNY/DNF)*0.01D0 - END IF - H=MIN(H,HMAX) - H=SIGN(H,POSNEG) -C ---- PERFORM AN EXPLICIT EULER STEP - DO 12 I=1,N - 12 Y1(I)=Y(I)+H*F0(I) - CALL FCN(N,X+H,Y1,F1,RPAR,IPAR) -C ---- ESTIMATE THE SECOND DERIVATIVE OF THE SOLUTION - DER2=0.0D0 - IF (ITOL.EQ.0) THEN - DO 15 I=1,N - SK=ATOLI+RTOLI*ABS(Y(I)) - 15 DER2=DER2+((F1(I)-F0(I))/SK)**2 - ELSE - DO 16 I=1,N - SK=ATOL(I)+RTOL(I)*ABS(Y(I)) - 16 DER2=DER2+((F1(I)-F0(I))/SK)**2 - END IF - DER2=SQRT(DER2)/H -C ---- STEP SIZE IS COMPUTED SUCH THAT -C ---- H**IORD * MAX ( NORM (F0), NORM (DER2)) = 0.01 - DER12=MAX(ABS(DER2),SQRT(DNF)) - IF (DER12.LE.1.D-15) THEN - H1=MAX(1.0D-6,ABS(H)*1.0D-3) - ELSE - H1=(0.01D0/DER12)**(1.D0/IORD) - END IF - H=MIN(100*ABS(H),H1,HMAX) - HINIT=SIGN(H,POSNEG) - RETURN - END -C - FUNCTION CONTD5(II,X,CON,ICOMP,ND) -C ---------------------------------------------------------- -C THIS FUNCTION CAN BE USED FOR CONTINUOUS OUTPUT IN CONNECTION -C WITH THE OUTPUT-SUBROUTINE FOR DOPRI5. IT PROVIDES AN -C APPROXIMATION TO THE II-TH COMPONENT OF THE SOLUTION AT X. -C ---------------------------------------------------------- - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CON(5*ND),ICOMP(ND) - COMMON /CONDO5/XOLD,H -C ----- COMPUTE PLACE OF II-TH COMPONENT - I=0 - DO 5 J=1,ND - IF (ICOMP(J).EQ.II) I=J - 5 CONTINUE - IF (I.EQ.0) THEN - WRITE (6,*) ' NO DENSE OUTPUT AVAILABLE FOR COMP.',II - RETURN - END IF - THETA=(X-XOLD)/H - THETA1=1.D0-THETA - CONTD5=CON(I)+THETA*(CON(ND+I)+THETA1*(CON(2*ND+I)+THETA* - & (CON(3*ND+I)+THETA1*CON(4*ND+I)))) - RETURN - END -C - SUBROUTINE CDOPRI(C2,C3,C4,C5,E1,E3,E4,E5,E6,E7, - & A21,A31,A32,A41,A42,A43,A51,A52,A53,A54, - & A61,A62,A63,A64,A65,A71,A73,A74,A75,A76, - & D1,D3,D4,D5,D6,D7) -C ---------------------------------------------------------- -C RUNGE-KUTTA COEFFICIENTS OF DORMAND AND PRINCE (1980) -C ---------------------------------------------------------- - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - C2=0.2D0 - C3=0.3D0 - C4=0.8D0 - C5=8.D0/9.D0 - A21=0.2D0 - A31=3.D0/40.D0 - A32=9.D0/40.D0 - A41=44.D0/45.D0 - A42=-56.D0/15.D0 - A43=32.D0/9.D0 - A51=19372.D0/6561.D0 - A52=-25360.D0/2187.D0 - A53=64448.D0/6561.D0 - A54=-212.D0/729.D0 - A61=9017.D0/3168.D0 - A62=-355.D0/33.D0 - A63=46732.D0/5247.D0 - A64=49.D0/176.D0 - A65=-5103.D0/18656.D0 - A71=35.D0/384.D0 - A73=500.D0/1113.D0 - A74=125.D0/192.D0 - A75=-2187.D0/6784.D0 - A76=11.D0/84.D0 - E1=71.D0/57600.D0 - E3=-71.D0/16695.D0 - E4=71.D0/1920.D0 - E5=-17253.D0/339200.D0 - E6=22.D0/525.D0 - E7=-1.D0/40.D0 -C ---- DENSE OUTPUT OF SHAMPINE (1986) - D1=-12715105075.D0/11282082432.D0 - D3=87487479700.D0/32700410799.D0 - D4=-10690763975.D0/1880347072.D0 - D5=701980252875.D0/199316789632.D0 - D6=-1453857185.D0/822651844.D0 - D7=69997945.D0/29380423.D0 - RETURN - END - diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/dgbfa.f b/scipy-0.10.1/scipy/integrate/linpack_lite/dgbfa.f deleted file mode 100644 index c26e6f5794..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/dgbfa.f +++ /dev/null @@ -1,174 +0,0 @@ - subroutine dgbfa(abd,lda,n,ml,mu,ipvt,info) - integer lda,n,ml,mu,ipvt(1),info - double precision abd(lda,1) -c -c dgbfa factors a double precision band matrix by elimination. -c -c dgbfa is usually called by dgbco, but it can be called -c directly with a saving in time if rcond is not needed. -c -c on entry -c -c abd double precision(lda, n) -c contains the matrix in band storage. the columns -c of the matrix are stored in the columns of abd and -c the diagonals of the matrix are stored in rows -c ml+1 through 2*ml+mu+1 of abd . -c see the comments below for details. -c -c lda integer -c the leading dimension of the array abd . -c lda must be .ge. 2*ml + mu + 1 . -c -c n integer -c the order of the original matrix. -c -c ml integer -c number of diagonals below the main diagonal. -c 0 .le. ml .lt. n . -c -c mu integer -c number of diagonals above the main diagonal. -c 0 .le. mu .lt. n . -c more efficient if ml .le. mu . -c on return -c -c abd an upper triangular matrix in band storage and -c the multipliers which were used to obtain it. -c the factorization can be written a = l*u where -c l is a product of permutation and unit lower -c triangular matrices and u is upper triangular. -c -c ipvt integer(n) -c an integer vector of pivot indices. -c -c info integer -c = 0 normal value. -c = k if u(k,k) .eq. 0.0 . this is not an error -c condition for this subroutine, but it does -c indicate that dgbsl will divide by zero if -c called. use rcond in dgbco for a reliable -c indication of singularity. -c -c band storage -c -c if a is a band matrix, the following program segment -c will set up the input. -c -c ml = (band width below the diagonal) -c mu = (band width above the diagonal) -c m = ml + mu + 1 -c do 20 j = 1, n -c i1 = max0(1, j-mu) -c i2 = min0(n, j+ml) -c do 10 i = i1, i2 -c k = i - j + m -c abd(k,j) = a(i,j) -c 10 continue -c 20 continue -c -c this uses rows ml+1 through 2*ml+mu+1 of abd . -c in addition, the first ml rows in abd are used for -c elements generated during the triangularization. -c the total number of rows needed in abd is 2*ml+mu+1 . -c the ml+mu by ml+mu upper left triangle and the -c ml by ml lower right triangle are not referenced. -c -c linpack. this version dated 08/14/78 . -c cleve moler, university of new mexico, argonne national lab. -c -c subroutines and functions -c -c blas daxpy,dscal,idamax -c fortran max0,min0 -c -c internal variables -c - double precision t - integer i,idamax,i0,j,ju,jz,j0,j1,k,kp1,l,lm,m,mm,nm1 -c -c - m = ml + mu + 1 - info = 0 -c -c zero initial fill-in columns -c - j0 = mu + 2 - j1 = min0(n,m) - 1 - if (j1 .lt. j0) go to 30 - do 20 jz = j0, j1 - i0 = m + 1 - jz - do 10 i = i0, ml - abd(i,jz) = 0.0d0 - 10 continue - 20 continue - 30 continue - jz = j1 - ju = 0 -c -c gaussian elimination with partial pivoting -c - nm1 = n - 1 - if (nm1 .lt. 1) go to 130 - do 120 k = 1, nm1 - kp1 = k + 1 -c -c zero next fill-in column -c - jz = jz + 1 - if (jz .gt. n) go to 50 - if (ml .lt. 1) go to 50 - do 40 i = 1, ml - abd(i,jz) = 0.0d0 - 40 continue - 50 continue -c -c find l = pivot index -c - lm = min0(ml,n-k) - l = idamax(lm+1,abd(m,k),1) + m - 1 - ipvt(k) = l + k - m -c -c zero pivot implies this column already triangularized -c - if (abd(l,k) .eq. 0.0d0) go to 100 -c -c interchange if necessary -c - if (l .eq. m) go to 60 - t = abd(l,k) - abd(l,k) = abd(m,k) - abd(m,k) = t - 60 continue -c -c compute multipliers -c - t = -1.0d0/abd(m,k) - call dscal(lm,t,abd(m+1,k),1) -c -c row elimination with column indexing -c - ju = min0(max0(ju,mu+ipvt(k)),n) - mm = m - if (ju .lt. kp1) go to 90 - do 80 j = kp1, ju - l = l - 1 - mm = mm - 1 - t = abd(l,j) - if (l .eq. mm) go to 70 - abd(l,j) = abd(mm,j) - abd(mm,j) = t - 70 continue - call daxpy(lm,t,abd(m+1,k),1,abd(mm+1,j),1) - 80 continue - 90 continue - go to 110 - 100 continue - info = k - 110 continue - 120 continue - 130 continue - ipvt(n) = n - if (abd(m,n) .eq. 0.0d0) info = n - return - end diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/dgbsl.f b/scipy-0.10.1/scipy/integrate/linpack_lite/dgbsl.f deleted file mode 100644 index 1b1b6ed541..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/dgbsl.f +++ /dev/null @@ -1,135 +0,0 @@ - subroutine dgbsl(abd,lda,n,ml,mu,ipvt,b,job) - integer lda,n,ml,mu,ipvt(1),job - double precision abd(lda,1),b(1) -c -c dgbsl solves the double precision band system -c a * x = b or trans(a) * x = b -c using the factors computed by dgbco or dgbfa. -c -c on entry -c -c abd double precision(lda, n) -c the output from dgbco or dgbfa. -c -c lda integer -c the leading dimension of the array abd . -c -c n integer -c the order of the original matrix. -c -c ml integer -c number of diagonals below the main diagonal. -c -c mu integer -c number of diagonals above the main diagonal. -c -c ipvt integer(n) -c the pivot vector from dgbco or dgbfa. -c -c b double precision(n) -c the right hand side vector. -c -c job integer -c = 0 to solve a*x = b , -c = nonzero to solve trans(a)*x = b , where -c trans(a) is the transpose. -c -c on return -c -c b the solution vector x . -c -c error condition -c -c a division by zero will occur if the input factor contains a -c zero on the diagonal. technically this indicates singularity -c but it is often caused by improper arguments or improper -c setting of lda . it will not occur if the subroutines are -c called correctly and if dgbco has set rcond .gt. 0.0 -c or dgbfa has set info .eq. 0 . -c -c to compute inverse(a) * c where c is a matrix -c with p columns -c call dgbco(abd,lda,n,ml,mu,ipvt,rcond,z) -c if (rcond is too small) go to ... -c do 10 j = 1, p -c call dgbsl(abd,lda,n,ml,mu,ipvt,c(1,j),0) -c 10 continue -c -c linpack. this version dated 08/14/78 . -c cleve moler, university of new mexico, argonne national lab. -c -c subroutines and functions -c -c blas daxpy,ddot -c fortran min0 -c -c internal variables -c - double precision ddot,t - integer k,kb,l,la,lb,lm,m,nm1 -c - m = mu + ml + 1 - nm1 = n - 1 - if (job .ne. 0) go to 50 -c -c job = 0 , solve a * x = b -c first solve l*y = b -c - if (ml .eq. 0) go to 30 - if (nm1 .lt. 1) go to 30 - do 20 k = 1, nm1 - lm = min0(ml,n-k) - l = ipvt(k) - t = b(l) - if (l .eq. k) go to 10 - b(l) = b(k) - b(k) = t - 10 continue - call daxpy(lm,t,abd(m+1,k),1,b(k+1),1) - 20 continue - 30 continue -c -c now solve u*x = y -c - do 40 kb = 1, n - k = n + 1 - kb - b(k) = b(k)/abd(m,k) - lm = min0(k,m) - 1 - la = m - lm - lb = k - lm - t = -b(k) - call daxpy(lm,t,abd(la,k),1,b(lb),1) - 40 continue - go to 100 - 50 continue -c -c job = nonzero, solve trans(a) * x = b -c first solve trans(u)*y = b -c - do 60 k = 1, n - lm = min0(k,m) - 1 - la = m - lm - lb = k - lm - t = ddot(lm,abd(la,k),1,b(lb),1) - b(k) = (b(k) - t)/abd(m,k) - 60 continue -c -c now solve trans(l)*x = y -c - if (ml .eq. 0) go to 90 - if (nm1 .lt. 1) go to 90 - do 80 kb = 1, nm1 - k = n - kb - lm = min0(ml,n-k) - b(k) = b(k) + ddot(lm,abd(m+1,k),1,b(k+1),1) - l = ipvt(k) - if (l .eq. k) go to 70 - t = b(l) - b(l) = b(k) - b(k) = t - 70 continue - 80 continue - 90 continue - 100 continue - return - end diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/dgefa.f b/scipy-0.10.1/scipy/integrate/linpack_lite/dgefa.f deleted file mode 100644 index 37d705f14f..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/dgefa.f +++ /dev/null @@ -1,103 +0,0 @@ - subroutine dgefa(a,lda,n,ipvt,info) - integer lda,n,ipvt(1),info - double precision a(lda,1) -c -c dgefa factors a double precision matrix by gaussian elimination. -c -c dgefa is usually called by dgeco, but it can be called -c directly with a saving in time if rcond is not needed. -c (time for dgeco) = (1 + 9/n)*(time for dgefa) . -c -c on entry -c -c a double precision(lda, n) -c the matrix to be factored. -c -c lda integer -c the leading dimension of the array a . -c -c n integer -c the order of the matrix a . -c -c on return -c -c a an upper triangular matrix and the multipliers -c which were used to obtain it. -c the factorization can be written a = l*u where -c l is a product of permutation and unit lower -c triangular matrices and u is upper triangular. -c -c ipvt integer(n) -c an integer vector of pivot indices. -c -c info integer -c = 0 normal value. -c = k if u(k,k) .eq. 0.0 . this is not an error -c condition for this subroutine, but it does -c indicate that dgesl or dgedi will divide by zero -c if called. use rcond in dgeco for a reliable -c indication of singularity. -c -c linpack. this version dated 08/14/78 . -c cleve moler, university of new mexico, argonne national lab. -c -c subroutines and functions -c -c blas daxpy,dscal,idamax -c -c internal variables -c - double precision t - integer idamax,j,k,kp1,l,nm1 -c -c -c gaussian elimination with partial pivoting -c - info = 0 - nm1 = n - 1 - if (nm1 .lt. 1) go to 70 - do 60 k = 1, nm1 - kp1 = k + 1 -c -c find l = pivot index -c - l = idamax(n-k+1,a(k,k),1) + k - 1 - ipvt(k) = l -c -c zero pivot implies this column already triangularized -c - if (a(l,k) .eq. 0.0d0) go to 40 -c -c interchange if necessary -c - if (l .eq. k) go to 10 - t = a(l,k) - a(l,k) = a(k,k) - a(k,k) = t - 10 continue -c -c compute multipliers -c - t = -1.0d0/a(k,k) - call dscal(n-k,t,a(k+1,k),1) -c -c row elimination with column indexing -c - do 30 j = kp1, n - t = a(l,j) - if (l .eq. k) go to 20 - a(l,j) = a(k,j) - a(k,j) = t - 20 continue - call daxpy(n-k,t,a(k+1,k),1,a(k+1,j),1) - 30 continue - go to 50 - 40 continue - info = k - 50 continue - 60 continue - 70 continue - ipvt(n) = n - if (a(n,n) .eq. 0.0d0) info = n - return - end diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/dgesl.f b/scipy-0.10.1/scipy/integrate/linpack_lite/dgesl.f deleted file mode 100644 index 093fa51827..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/dgesl.f +++ /dev/null @@ -1,117 +0,0 @@ - subroutine dgesl(a,lda,n,ipvt,b,job) - integer lda,n,ipvt(1),job - double precision a(lda,1),b(1) -c -c dgesl solves the double precision system -c a * x = b or trans(a) * x = b -c using the factors computed by dgeco or dgefa. -c -c on entry -c -c a double precision(lda, n) -c the output from dgeco or dgefa. -c -c lda integer -c the leading dimension of the array a . -c -c n integer -c the order of the matrix a . -c -c ipvt integer(n) -c the pivot vector from dgeco or dgefa. -c -c b double precision(n) -c the right hand side vector. -c -c job integer -c = 0 to solve a*x = b , -c = nonzero to solve trans(a)*x = b where -c trans(a) is the transpose. -c -c on return -c -c b the solution vector x . -c -c error condition -c -c a division by zero will occur if the input factor contains a -c zero on the diagonal. technically this indicates singularity -c but it is often caused by improper arguments or improper -c setting of lda . it will not occur if the subroutines are -c called correctly and if dgeco has set rcond .gt. 0.0 -c or dgefa has set info .eq. 0 . -c -c to compute inverse(a) * c where c is a matrix -c with p columns -c call dgeco(a,lda,n,ipvt,rcond,z) -c if (rcond is too small) go to ... -c do 10 j = 1, p -c call dgesl(a,lda,n,ipvt,c(1,j),0) -c 10 continue -c -c linpack. this version dated 08/14/78 . -c cleve moler, university of new mexico, argonne national lab. -c -c subroutines and functions -c -c blas daxpy,ddot -c -c internal variables -c - double precision ddot,t - integer k,kb,l,nm1 -c - nm1 = n - 1 - if (job .ne. 0) go to 50 -c -c job = 0 , solve a * x = b -c first solve l*y = b -c - if (nm1 .lt. 1) go to 30 - do 20 k = 1, nm1 - l = ipvt(k) - t = b(l) - if (l .eq. k) go to 10 - b(l) = b(k) - b(k) = t - 10 continue - call daxpy(n-k,t,a(k+1,k),1,b(k+1),1) - 20 continue - 30 continue -c -c now solve u*x = y -c - do 40 kb = 1, n - k = n + 1 - kb - b(k) = b(k)/a(k,k) - t = -b(k) - call daxpy(k-1,t,a(1,k),1,b(1),1) - 40 continue - go to 100 - 50 continue -c -c job = nonzero, solve trans(a) * x = b -c first solve trans(u)*y = b -c - do 60 k = 1, n - t = ddot(k-1,a(1,k),1,b(1),1) - b(k) = (b(k) - t)/a(k,k) - 60 continue -c -c now solve trans(l)*x = y -c - if (nm1 .lt. 1) go to 90 - do 80 kb = 1, nm1 - k = n - kb - b(k) = b(k) + ddot(n-k,a(k+1,k),1,b(k+1),1) - l = ipvt(k) - if (l .eq. k) go to 70 - t = b(l) - b(l) = b(k) - b(k) = t - 70 continue - 80 continue - 90 continue - 100 continue - return - end diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/dgtsl.f b/scipy-0.10.1/scipy/integrate/linpack_lite/dgtsl.f deleted file mode 100644 index 710326f581..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/dgtsl.f +++ /dev/null @@ -1,119 +0,0 @@ - subroutine dgtsl(n,c,d,e,b,info) - integer n,info - double precision c(1),d(1),e(1),b(1) -c -c dgtsl given a general tridiagonal matrix and a right hand -c side will find the solution. -c -c on entry -c -c n integer -c is the order of the tridiagonal matrix. -c -c c double precision(n) -c is the subdiagonal of the tridiagonal matrix. -c c(2) through c(n) should contain the subdiagonal. -c on output c is destroyed. -c -c d double precision(n) -c is the diagonal of the tridiagonal matrix. -c on output d is destroyed. -c -c e double precision(n) -c is the superdiagonal of the tridiagonal matrix. -c e(1) through e(n-1) should contain the superdiagonal. -c on output e is destroyed. -c -c b double precision(n) -c is the right hand side vector. -c -c on return -c -c b is the solution vector. -c -c info integer -c = 0 normal value. -c = k if the k-th element of the diagonal becomes -c exactly zero. the subroutine returns when -c this is detected. -c -c linpack. this version dated 08/14/78 . -c jack dongarra, argonne national laboratory. -c -c no externals -c fortran dabs -c -c internal variables -c - integer k,kb,kp1,nm1,nm2 - double precision t -c begin block permitting ...exits to 100 -c - info = 0 - c(1) = d(1) - nm1 = n - 1 - if (nm1 .lt. 1) go to 40 - d(1) = e(1) - e(1) = 0.0d0 - e(n) = 0.0d0 -c - do 30 k = 1, nm1 - kp1 = k + 1 -c -c find the largest of the two rows -c - if (dabs(c(kp1)) .lt. dabs(c(k))) go to 10 -c -c interchange row -c - t = c(kp1) - c(kp1) = c(k) - c(k) = t - t = d(kp1) - d(kp1) = d(k) - d(k) = t - t = e(kp1) - e(kp1) = e(k) - e(k) = t - t = b(kp1) - b(kp1) = b(k) - b(k) = t - 10 continue -c -c zero elements -c - if (c(k) .ne. 0.0d0) go to 20 - info = k -c ............exit - go to 100 - 20 continue - t = -c(kp1)/c(k) - c(kp1) = d(kp1) + t*d(k) - d(kp1) = e(kp1) + t*e(k) - e(kp1) = 0.0d0 - b(kp1) = b(kp1) + t*b(k) - 30 continue - 40 continue - if (c(n) .ne. 0.0d0) go to 50 - info = n - go to 90 - 50 continue -c -c back solve -c - nm2 = n - 2 - b(n) = b(n)/c(n) - if (n .eq. 1) go to 80 - b(nm1) = (b(nm1) - d(nm1)*b(n))/c(nm1) - if (nm2 .lt. 1) go to 70 - do 60 kb = 1, nm2 - k = nm2 - kb + 1 - b(k) = (b(k) - d(k)*b(k+1) - e(k)*b(k+2))/c(k) - 60 continue - 70 continue - 80 continue - 90 continue - 100 continue -c - return - end diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/zgbfa.f b/scipy-0.10.1/scipy/integrate/linpack_lite/zgbfa.f deleted file mode 100644 index 9bb2f9f82d..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/zgbfa.f +++ /dev/null @@ -1,181 +0,0 @@ - subroutine zgbfa(abd,lda,n,ml,mu,ipvt,info) - integer lda,n,ml,mu,ipvt(1),info - complex*16 abd(lda,1) -c -c zgbfa factors a complex*16 band matrix by elimination. -c -c zgbfa is usually called by zgbco, but it can be called -c directly with a saving in time if rcond is not needed. -c -c on entry -c -c abd complex*16(lda, n) -c contains the matrix in band storage. the columns -c of the matrix are stored in the columns of abd and -c the diagonals of the matrix are stored in rows -c ml+1 through 2*ml+mu+1 of abd . -c see the comments below for details. -c -c lda integer -c the leading dimension of the array abd . -c lda must be .ge. 2*ml + mu + 1 . -c -c n integer -c the order of the original matrix. -c -c ml integer -c number of diagonals below the main diagonal. -c 0 .le. ml .lt. n . -c -c mu integer -c number of diagonals above the main diagonal. -c 0 .le. mu .lt. n . -c more efficient if ml .le. mu . -c on return -c -c abd an upper triangular matrix in band storage and -c the multipliers which were used to obtain it. -c the factorization can be written a = l*u where -c l is a product of permutation and unit lower -c triangular matrices and u is upper triangular. -c -c ipvt integer(n) -c an integer vector of pivot indices. -c -c info integer -c = 0 normal value. -c = k if u(k,k) .eq. 0.0 . this is not an error -c condition for this subroutine, but it does -c indicate that zgbsl will divide by zero if -c called. use rcond in zgbco for a reliable -c indication of singularity. -c -c band storage -c -c if a is a band matrix, the following program segment -c will set up the input. -c -c ml = (band width below the diagonal) -c mu = (band width above the diagonal) -c m = ml + mu + 1 -c do 20 j = 1, n -c i1 = max0(1, j-mu) -c i2 = min0(n, j+ml) -c do 10 i = i1, i2 -c k = i - j + m -c abd(k,j) = a(i,j) -c 10 continue -c 20 continue -c -c this uses rows ml+1 through 2*ml+mu+1 of abd . -c in addition, the first ml rows in abd are used for -c elements generated during the triangularization. -c the total number of rows needed in abd is 2*ml+mu+1 . -c the ml+mu by ml+mu upper left triangle and the -c ml by ml lower right triangle are not referenced. -c -c linpack. this version dated 08/14/78 . -c cleve moler, university of new mexico, argonne national lab. -c -c subroutines and functions -c -c blas zaxpy,zscal,izamax -c fortran dabs,max0,min0 -c -c internal variables -c - complex*16 t - integer i,izamax,i0,j,ju,jz,j0,j1,k,kp1,l,lm,m,mm,nm1 -c - complex*16 zdum - double precision cabs1 - double precision dreal,dimag - complex*16 zdumr,zdumi - dreal(zdumr) = zdumr - dimag(zdumi) = (0.0d0,-1.0d0)*zdumi - cabs1(zdum) = dabs(dreal(zdum)) + dabs(dimag(zdum)) -c - m = ml + mu + 1 - info = 0 -c -c zero initial fill-in columns -c - j0 = mu + 2 - j1 = min0(n,m) - 1 - if (j1 .lt. j0) go to 30 - do 20 jz = j0, j1 - i0 = m + 1 - jz - do 10 i = i0, ml - abd(i,jz) = (0.0d0,0.0d0) - 10 continue - 20 continue - 30 continue - jz = j1 - ju = 0 -c -c gaussian elimination with partial pivoting -c - nm1 = n - 1 - if (nm1 .lt. 1) go to 130 - do 120 k = 1, nm1 - kp1 = k + 1 -c -c zero next fill-in column -c - jz = jz + 1 - if (jz .gt. n) go to 50 - if (ml .lt. 1) go to 50 - do 40 i = 1, ml - abd(i,jz) = (0.0d0,0.0d0) - 40 continue - 50 continue -c -c find l = pivot index -c - lm = min0(ml,n-k) - l = izamax(lm+1,abd(m,k),1) + m - 1 - ipvt(k) = l + k - m -c -c zero pivot implies this column already triangularized -c - if (cabs1(abd(l,k)) .eq. 0.0d0) go to 100 -c -c interchange if necessary -c - if (l .eq. m) go to 60 - t = abd(l,k) - abd(l,k) = abd(m,k) - abd(m,k) = t - 60 continue -c -c compute multipliers -c - t = -(1.0d0,0.0d0)/abd(m,k) - call zscal(lm,t,abd(m+1,k),1) -c -c row elimination with column indexing -c - ju = min0(max0(ju,mu+ipvt(k)),n) - mm = m - if (ju .lt. kp1) go to 90 - do 80 j = kp1, ju - l = l - 1 - mm = mm - 1 - t = abd(l,j) - if (l .eq. mm) go to 70 - abd(l,j) = abd(mm,j) - abd(mm,j) = t - 70 continue - call zaxpy(lm,t,abd(m+1,k),1,abd(mm+1,j),1) - 80 continue - 90 continue - go to 110 - 100 continue - info = k - 110 continue - 120 continue - 130 continue - ipvt(n) = n - if (cabs1(abd(m,n)) .eq. 0.0d0) info = n - return - end diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/zgbsl.f b/scipy-0.10.1/scipy/integrate/linpack_lite/zgbsl.f deleted file mode 100644 index 567e9f5e42..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/zgbsl.f +++ /dev/null @@ -1,139 +0,0 @@ - subroutine zgbsl(abd,lda,n,ml,mu,ipvt,b,job) - integer lda,n,ml,mu,ipvt(1),job - complex*16 abd(lda,1),b(1) -c -c zgbsl solves the complex*16 band system -c a * x = b or ctrans(a) * x = b -c using the factors computed by zgbco or zgbfa. -c -c on entry -c -c abd complex*16(lda, n) -c the output from zgbco or zgbfa. -c -c lda integer -c the leading dimension of the array abd . -c -c n integer -c the order of the original matrix. -c -c ml integer -c number of diagonals below the main diagonal. -c -c mu integer -c number of diagonals above the main diagonal. -c -c ipvt integer(n) -c the pivot vector from zgbco or zgbfa. -c -c b complex*16(n) -c the right hand side vector. -c -c job integer -c = 0 to solve a*x = b , -c = nonzero to solve ctrans(a)*x = b , where -c ctrans(a) is the conjugate transpose. -c -c on return -c -c b the solution vector x . -c -c error condition -c -c a division by zero will occur if the input factor contains a -c zero on the diagonal. technically this indicates singularity -c but it is often caused by improper arguments or improper -c setting of lda . it will not occur if the subroutines are -c called correctly and if zgbco has set rcond .gt. 0.0 -c or zgbfa has set info .eq. 0 . -c -c to compute inverse(a) * c where c is a matrix -c with p columns -c call zgbco(abd,lda,n,ml,mu,ipvt,rcond,z) -c if (rcond is too small) go to ... -c do 10 j = 1, p -c call zgbsl(abd,lda,n,ml,mu,ipvt,c(1,j),0) -c 10 continue -c -c linpack. this version dated 08/14/78 . -c cleve moler, university of new mexico, argonne national lab. -c -c subroutines and functions -c -c blas zaxpy,zdotc -c fortran dconjg,min0 -c -c internal variables -c - complex*16 zdotc,t - integer k,kb,l,la,lb,lm,m,nm1 - double precision dreal,dimag - complex*16 zdumr,zdumi - dreal(zdumr) = zdumr - dimag(zdumi) = (0.0d0,-1.0d0)*zdumi -c - m = mu + ml + 1 - nm1 = n - 1 - if (job .ne. 0) go to 50 -c -c job = 0 , solve a * x = b -c first solve l*y = b -c - if (ml .eq. 0) go to 30 - if (nm1 .lt. 1) go to 30 - do 20 k = 1, nm1 - lm = min0(ml,n-k) - l = ipvt(k) - t = b(l) - if (l .eq. k) go to 10 - b(l) = b(k) - b(k) = t - 10 continue - call zaxpy(lm,t,abd(m+1,k),1,b(k+1),1) - 20 continue - 30 continue -c -c now solve u*x = y -c - do 40 kb = 1, n - k = n + 1 - kb - b(k) = b(k)/abd(m,k) - lm = min0(k,m) - 1 - la = m - lm - lb = k - lm - t = -b(k) - call zaxpy(lm,t,abd(la,k),1,b(lb),1) - 40 continue - go to 100 - 50 continue -c -c job = nonzero, solve ctrans(a) * x = b -c first solve ctrans(u)*y = b -c - do 60 k = 1, n - lm = min0(k,m) - 1 - la = m - lm - lb = k - lm - t = zdotc(lm,abd(la,k),1,b(lb),1) - b(k) = (b(k) - t)/dconjg(abd(m,k)) - 60 continue -c -c now solve ctrans(l)*x = y -c - if (ml .eq. 0) go to 90 - if (nm1 .lt. 1) go to 90 - do 80 kb = 1, nm1 - k = n - kb - lm = min0(ml,n-k) - b(k) = b(k) + zdotc(lm,abd(m+1,k),1,b(k+1),1) - l = ipvt(k) - if (l .eq. k) go to 70 - t = b(l) - b(l) = b(k) - b(k) = t - 70 continue - 80 continue - 90 continue - 100 continue - return - end diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/zgefa.f b/scipy-0.10.1/scipy/integrate/linpack_lite/zgefa.f deleted file mode 100644 index f5dba97390..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/zgefa.f +++ /dev/null @@ -1,111 +0,0 @@ - subroutine zgefa(a,lda,n,ipvt,info) - integer lda,n,ipvt(1),info - complex*16 a(lda,1) -c -c zgefa factors a complex*16 matrix by gaussian elimination. -c -c zgefa is usually called by zgeco, but it can be called -c directly with a saving in time if rcond is not needed. -c (time for zgeco) = (1 + 9/n)*(time for zgefa) . -c -c on entry -c -c a complex*16(lda, n) -c the matrix to be factored. -c -c lda integer -c the leading dimension of the array a . -c -c n integer -c the order of the matrix a . -c -c on return -c -c a an upper triangular matrix and the multipliers -c which were used to obtain it. -c the factorization can be written a = l*u where -c l is a product of permutation and unit lower -c triangular matrices and u is upper triangular. -c -c ipvt integer(n) -c an integer vector of pivot indices. -c -c info integer -c = 0 normal value. -c = k if u(k,k) .eq. 0.0 . this is not an error -c condition for this subroutine, but it does -c indicate that zgesl or zgedi will divide by zero -c if called. use rcond in zgeco for a reliable -c indication of singularity. -c -c linpack. this version dated 08/14/78 . -c cleve moler, university of new mexico, argonne national lab. -c -c subroutines and functions -c -c blas zaxpy,zscal,izamax -c fortran dabs -c -c internal variables -c - complex*16 t - integer izamax,j,k,kp1,l,nm1 -c - complex*16 zdum - double precision cabs1 - double precision dreal,dimag - complex*16 zdumr,zdumi - dreal(zdumr) = zdumr - dimag(zdumi) = (0.0d0,-1.0d0)*zdumi - cabs1(zdum) = dabs(dreal(zdum)) + dabs(dimag(zdum)) -c -c gaussian elimination with partial pivoting -c - info = 0 - nm1 = n - 1 - if (nm1 .lt. 1) go to 70 - do 60 k = 1, nm1 - kp1 = k + 1 -c -c find l = pivot index -c - l = izamax(n-k+1,a(k,k),1) + k - 1 - ipvt(k) = l -c -c zero pivot implies this column already triangularized -c - if (cabs1(a(l,k)) .eq. 0.0d0) go to 40 -c -c interchange if necessary -c - if (l .eq. k) go to 10 - t = a(l,k) - a(l,k) = a(k,k) - a(k,k) = t - 10 continue -c -c compute multipliers -c - t = -(1.0d0,0.0d0)/a(k,k) - call zscal(n-k,t,a(k+1,k),1) -c -c row elimination with column indexing -c - do 30 j = kp1, n - t = a(l,j) - if (l .eq. k) go to 20 - a(l,j) = a(k,j) - a(k,j) = t - 20 continue - call zaxpy(n-k,t,a(k+1,k),1,a(k+1,j),1) - 30 continue - go to 50 - 40 continue - info = k - 50 continue - 60 continue - 70 continue - ipvt(n) = n - if (cabs1(a(n,n)) .eq. 0.0d0) info = n - return - end diff --git a/scipy-0.10.1/scipy/integrate/linpack_lite/zgesl.f b/scipy-0.10.1/scipy/integrate/linpack_lite/zgesl.f deleted file mode 100644 index c170fb4e23..0000000000 --- a/scipy-0.10.1/scipy/integrate/linpack_lite/zgesl.f +++ /dev/null @@ -1,122 +0,0 @@ - subroutine zgesl(a,lda,n,ipvt,b,job) - integer lda,n,ipvt(1),job - complex*16 a(lda,1),b(1) -c -c zgesl solves the complex*16 system -c a * x = b or ctrans(a) * x = b -c using the factors computed by zgeco or zgefa. -c -c on entry -c -c a complex*16(lda, n) -c the output from zgeco or zgefa. -c -c lda integer -c the leading dimension of the array a . -c -c n integer -c the order of the matrix a . -c -c ipvt integer(n) -c the pivot vector from zgeco or zgefa. -c -c b complex*16(n) -c the right hand side vector. -c -c job integer -c = 0 to solve a*x = b , -c = nonzero to solve ctrans(a)*x = b where -c ctrans(a) is the conjugate transpose. -c -c on return -c -c b the solution vector x . -c -c error condition -c -c a division by zero will occur if the input factor contains a -c zero on the diagonal. technically this indicates singularity -c but it is often caused by improper arguments or improper -c setting of lda . it will not occur if the subroutines are -c called correctly and if zgeco has set rcond .gt. 0.0 -c or zgefa has set info .eq. 0 . -c -c to compute inverse(a) * c where c is a matrix -c with p columns -c call zgeco(a,lda,n,ipvt,rcond,z) -c if (rcond is too small) go to ... -c do 10 j = 1, p -c call zgesl(a,lda,n,ipvt,c(1,j),0) -c 10 continue -c -c linpack. this version dated 08/14/78 . -c cleve moler, university of new mexico, argonne national lab. -c -c subroutines and functions -c -c blas zaxpy,zdotc -c fortran dconjg -c -c internal variables -c - complex*16 zdotc,t - integer k,kb,l,nm1 - double precision dreal,dimag - complex*16 zdumr,zdumi - dreal(zdumr) = zdumr - dimag(zdumi) = (0.0d0,-1.0d0)*zdumi -c - nm1 = n - 1 - if (job .ne. 0) go to 50 -c -c job = 0 , solve a * x = b -c first solve l*y = b -c - if (nm1 .lt. 1) go to 30 - do 20 k = 1, nm1 - l = ipvt(k) - t = b(l) - if (l .eq. k) go to 10 - b(l) = b(k) - b(k) = t - 10 continue - call zaxpy(n-k,t,a(k+1,k),1,b(k+1),1) - 20 continue - 30 continue -c -c now solve u*x = y -c - do 40 kb = 1, n - k = n + 1 - kb - b(k) = b(k)/a(k,k) - t = -b(k) - call zaxpy(k-1,t,a(1,k),1,b(1),1) - 40 continue - go to 100 - 50 continue -c -c job = nonzero, solve ctrans(a) * x = b -c first solve ctrans(u)*y = b -c - do 60 k = 1, n - t = zdotc(k-1,a(1,k),1,b(1),1) - b(k) = (b(k) - t)/dconjg(a(k,k)) - 60 continue -c -c now solve ctrans(l)*x = y -c - if (nm1 .lt. 1) go to 90 - do 80 kb = 1, nm1 - k = n - kb - b(k) = b(k) + zdotc(n-k,a(k+1,k),1,b(k+1),1) - l = ipvt(k) - if (l .eq. k) go to 70 - t = b(l) - b(l) = b(k) - b(k) = t - 70 continue - 80 continue - 90 continue - 100 continue - return - end diff --git a/scipy-0.10.1/scipy/integrate/mach/d1mach.f b/scipy-0.10.1/scipy/integrate/mach/d1mach.f deleted file mode 100644 index bda4529c9e..0000000000 --- a/scipy-0.10.1/scipy/integrate/mach/d1mach.f +++ /dev/null @@ -1,209 +0,0 @@ - DOUBLE PRECISION FUNCTION D1MACH(I) - INTEGER I -C -C DOUBLE-PRECISION MACHINE CONSTANTS -C D1MACH( 1) = B**(EMIN-1), THE SMALLEST POSITIVE MAGNITUDE. -C D1MACH( 2) = B**EMAX*(1 - B**(-T)), THE LARGEST MAGNITUDE. -C D1MACH( 3) = B**(-T), THE SMALLEST RELATIVE SPACING. -C D1MACH( 4) = B**(1-T), THE LARGEST RELATIVE SPACING. -C D1MACH( 5) = LOG10(B) -C - INTEGER SMALL(2) - INTEGER LARGE(2) - INTEGER RIGHT(2) - INTEGER DIVER(2) - INTEGER LOG10(2) - INTEGER SC, CRAY1(38), J - COMMON /D9MACH/ CRAY1 - SAVE SMALL, LARGE, RIGHT, DIVER, LOG10, SC - DOUBLE PRECISION DMACH(5) - EQUIVALENCE (DMACH(1),SMALL(1)) - EQUIVALENCE (DMACH(2),LARGE(1)) - EQUIVALENCE (DMACH(3),RIGHT(1)) - EQUIVALENCE (DMACH(4),DIVER(1)) - EQUIVALENCE (DMACH(5),LOG10(1)) -C THIS VERSION ADAPTS AUTOMATICALLY TO MOST CURRENT MACHINES. -C R1MACH CAN HANDLE AUTO-DOUBLE COMPILING, BUT THIS VERSION OF -C D1MACH DOES NOT, BECAUSE WE DO NOT HAVE QUAD CONSTANTS FOR -C MANY MACHINES YET. -C TO COMPILE ON OLDER MACHINES, ADD A C IN COLUMN 1 -C ON THE NEXT LINE - DATA SC/0/ -C AND REMOVE THE C FROM COLUMN 1 IN ONE OF THE SECTIONS BELOW. -C CONSTANTS FOR EVEN OLDER MACHINES CAN BE OBTAINED BY -C mail netlib@research.bell-labs.com -C send old1mach from blas -C PLEASE SEND CORRECTIONS TO dmg OR ehg@bell-labs.com. -C -C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES. -C DATA SMALL(1),SMALL(2) / O402400000000, O000000000000 / -C DATA LARGE(1),LARGE(2) / O376777777777, O777777777777 / -C DATA RIGHT(1),RIGHT(2) / O604400000000, O000000000000 / -C DATA DIVER(1),DIVER(2) / O606400000000, O000000000000 / -C DATA LOG10(1),LOG10(2) / O776464202324, O117571775714 /, SC/987/ -C -C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING -C 32-BIT INTEGERS. -C DATA SMALL(1),SMALL(2) / 8388608, 0 / -C DATA LARGE(1),LARGE(2) / 2147483647, -1 / -C DATA RIGHT(1),RIGHT(2) / 612368384, 0 / -C DATA DIVER(1),DIVER(2) / 620756992, 0 / -C DATA LOG10(1),LOG10(2) / 1067065498, -2063872008 /, SC/987/ -C -C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES. -C DATA SMALL(1),SMALL(2) / O000040000000, O000000000000 / -C DATA LARGE(1),LARGE(2) / O377777777777, O777777777777 / -C DATA RIGHT(1),RIGHT(2) / O170540000000, O000000000000 / -C DATA DIVER(1),DIVER(2) / O170640000000, O000000000000 / -C DATA LOG10(1),LOG10(2) / O177746420232, O411757177572 /, SC/987/ -C -C ON FIRST CALL, IF NO DATA UNCOMMENTED, TEST MACHINE TYPES. - IF (SC .NE. 987) THEN - DMACH(1) = 1.D13 - IF ( SMALL(1) .EQ. 1117925532 - * .AND. SMALL(2) .EQ. -448790528) THEN -* *** IEEE BIG ENDIAN *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2146435071 - LARGE(2) = -1 - RIGHT(1) = 1017118720 - RIGHT(2) = 0 - DIVER(1) = 1018167296 - DIVER(2) = 0 - LOG10(1) = 1070810131 - LOG10(2) = 1352628735 - ELSE IF ( SMALL(2) .EQ. 1117925532 - * .AND. SMALL(1) .EQ. -448790528) THEN -* *** IEEE LITTLE ENDIAN *** - SMALL(2) = 1048576 - SMALL(1) = 0 - LARGE(2) = 2146435071 - LARGE(1) = -1 - RIGHT(2) = 1017118720 - RIGHT(1) = 0 - DIVER(2) = 1018167296 - DIVER(1) = 0 - LOG10(2) = 1070810131 - LOG10(1) = 1352628735 - ELSE IF ( SMALL(1) .EQ. -2065213935 - * .AND. SMALL(2) .EQ. 10752) THEN -* *** VAX WITH D_FLOATING *** - SMALL(1) = 128 - SMALL(2) = 0 - LARGE(1) = -32769 - LARGE(2) = -1 - RIGHT(1) = 9344 - RIGHT(2) = 0 - DIVER(1) = 9472 - DIVER(2) = 0 - LOG10(1) = 546979738 - LOG10(2) = -805796613 - ELSE IF ( SMALL(1) .EQ. 1267827943 - * .AND. SMALL(2) .EQ. 704643072) THEN -* *** IBM MAINFRAME *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2147483647 - LARGE(2) = -1 - RIGHT(1) = 856686592 - RIGHT(2) = 0 - DIVER(1) = 873463808 - DIVER(2) = 0 - LOG10(1) = 1091781651 - LOG10(2) = 1352628735 - ELSE IF ( SMALL(1) .EQ. 1120022684 - * .AND. SMALL(2) .EQ. -448790528) THEN -* *** CONVEX C-1 *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2147483647 - LARGE(2) = -1 - RIGHT(1) = 1019215872 - RIGHT(2) = 0 - DIVER(1) = 1020264448 - DIVER(2) = 0 - LOG10(1) = 1072907283 - LOG10(2) = 1352628735 - ELSE IF ( SMALL(1) .EQ. 815547074 - * .AND. SMALL(2) .EQ. 58688) THEN -* *** VAX G-FLOATING *** - SMALL(1) = 16 - SMALL(2) = 0 - LARGE(1) = -32769 - LARGE(2) = -1 - RIGHT(1) = 15552 - RIGHT(2) = 0 - DIVER(1) = 15568 - DIVER(2) = 0 - LOG10(1) = 1142112243 - LOG10(2) = 2046775455 - ELSE - DMACH(2) = 1.D27 + 1 - DMACH(3) = 1.D27 - LARGE(2) = LARGE(2) - RIGHT(2) - IF (LARGE(2) .EQ. 64 .AND. SMALL(2) .EQ. 0) THEN - CRAY1(1) = 67291416 - DO 10 J = 1, 20 - CRAY1(J+1) = CRAY1(J) + CRAY1(J) - 10 CONTINUE - CRAY1(22) = CRAY1(21) + 321322 - DO 20 J = 22, 37 - CRAY1(J+1) = CRAY1(J) + CRAY1(J) - 20 CONTINUE - IF (CRAY1(38) .EQ. SMALL(1)) THEN -* *** CRAY *** - CALL I1MCRY(SMALL(1), J, 8285, 8388608, 0) - SMALL(2) = 0 - CALL I1MCRY(LARGE(1), J, 24574, 16777215, 16777215) - CALL I1MCRY(LARGE(2), J, 0, 16777215, 16777214) - CALL I1MCRY(RIGHT(1), J, 16291, 8388608, 0) - RIGHT(2) = 0 - CALL I1MCRY(DIVER(1), J, 16292, 8388608, 0) - DIVER(2) = 0 - CALL I1MCRY(LOG10(1), J, 16383, 10100890, 8715215) - CALL I1MCRY(LOG10(2), J, 0, 16226447, 9001388) - ELSE - WRITE(*,9000) - STOP 779 - END IF - ELSE - WRITE(*,9000) - STOP 779 - END IF - END IF - SC = 987 - END IF -* SANITY CHECK - IF (DMACH(4) .GE. 1.0D0) STOP 778 - IF (I .LT. 1 .OR. I .GT. 5) THEN - WRITE(*,*) 'D1MACH(I): I =',I,' is out of bounds.' - STOP - END IF - D1MACH = DMACH(I) - RETURN - 9000 FORMAT(/' Adjust D1MACH by uncommenting data statements'/ - *' appropriate for your machine.') -* /* Standard C source for D1MACH -- remove the * in column 1 */ -*#include -*#include -*#include -*double d1mach_(long *i) -*{ -* switch(*i){ -* case 1: return DBL_MIN; -* case 2: return DBL_MAX; -* case 3: return DBL_EPSILON/FLT_RADIX; -* case 4: return DBL_EPSILON; -* case 5: return log10(FLT_RADIX); -* } -* fprintf(stderr, "invalid argument: d1mach(%ld)\n", *i); -* exit(1); return 0; /* some compilers demand return values */ -*} - END - SUBROUTINE I1MCRY(A, A1, B, C, D) -**** SPECIAL COMPUTATION FOR OLD CRAY MACHINES **** - INTEGER A, A1, B, C, D - A1 = 16777216*B + C - A = 16777216*A1 + D - END diff --git a/scipy-0.10.1/scipy/integrate/mach/i1mach.f b/scipy-0.10.1/scipy/integrate/mach/i1mach.f deleted file mode 100644 index 1d6f7fc6bb..0000000000 --- a/scipy-0.10.1/scipy/integrate/mach/i1mach.f +++ /dev/null @@ -1,291 +0,0 @@ - INTEGER FUNCTION I1MACH(I) - INTEGER I -C -C I1MACH( 1) = THE STANDARD INPUT UNIT. -C I1MACH( 2) = THE STANDARD OUTPUT UNIT. -C I1MACH( 3) = THE STANDARD PUNCH UNIT. -C I1MACH( 4) = THE STANDARD ERROR MESSAGE UNIT. -C I1MACH( 5) = THE NUMBER OF BITS PER INTEGER STORAGE UNIT. -C I1MACH( 6) = THE NUMBER OF CHARACTERS PER CHARACTER STORAGE UNIT. -C INTEGERS HAVE FORM SIGN ( X(S-1)*A**(S-1) + ... + X(1)*A + X(0) ) -C I1MACH( 7) = A, THE BASE. -C I1MACH( 8) = S, THE NUMBER OF BASE-A DIGITS. -C I1MACH( 9) = A**S - 1, THE LARGEST MAGNITUDE. -C FLOATS HAVE FORM SIGN (B**E)*( (X(1)/B) + ... + (X(T)/B**T) ) -C WHERE EMIN .LE. E .LE. EMAX. -C I1MACH(10) = B, THE BASE. -C SINGLE-PRECISION -C I1MACH(11) = T, THE NUMBER OF BASE-B DIGITS. -C I1MACH(12) = EMIN, THE SMALLEST EXPONENT E. -C I1MACH(13) = EMAX, THE LARGEST EXPONENT E. -C DOUBLE-PRECISION -C I1MACH(14) = T, THE NUMBER OF BASE-B DIGITS. -C I1MACH(15) = EMIN, THE SMALLEST EXPONENT E. -C I1MACH(16) = EMAX, THE LARGEST EXPONENT E. -C - INTEGER IMACH(16), OUTPUT, SC, SMALL(2) - SAVE IMACH, SC - REAL RMACH - EQUIVALENCE (IMACH(4),OUTPUT), (RMACH,SMALL(1)) - INTEGER I3, J, K, T3E(3) - DATA T3E(1) / 9777664 / - DATA T3E(2) / 5323660 / - DATA T3E(3) / 46980 / -C THIS VERSION ADAPTS AUTOMATICALLY TO MOST CURRENT MACHINES, -C INCLUDING AUTO-DOUBLE COMPILERS. -C TO COMPILE ON OLDER MACHINES, ADD A C IN COLUMN 1 -C ON THE NEXT LINE - DATA SC/0/ -C AND REMOVE THE C FROM COLUMN 1 IN ONE OF THE SECTIONS BELOW. -C CONSTANTS FOR EVEN OLDER MACHINES CAN BE OBTAINED BY -C mail netlib@research.bell-labs.com -C send old1mach from blas -C PLEASE SEND CORRECTIONS TO dmg OR ehg@bell-labs.com. -C -C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES. -C -C DATA IMACH( 1) / 5 / -C DATA IMACH( 2) / 6 / -C DATA IMACH( 3) / 43 / -C DATA IMACH( 4) / 6 / -C DATA IMACH( 5) / 36 / -C DATA IMACH( 6) / 4 / -C DATA IMACH( 7) / 2 / -C DATA IMACH( 8) / 35 / -C DATA IMACH( 9) / O377777777777 / -C DATA IMACH(10) / 2 / -C DATA IMACH(11) / 27 / -C DATA IMACH(12) / -127 / -C DATA IMACH(13) / 127 / -C DATA IMACH(14) / 63 / -C DATA IMACH(15) / -127 / -C DATA IMACH(16) / 127 /, SC/987/ -C -C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING -C 32-BIT INTEGER ARITHMETIC. -C -C DATA IMACH( 1) / 5 / -C DATA IMACH( 2) / 6 / -C DATA IMACH( 3) / 7 / -C DATA IMACH( 4) / 6 / -C DATA IMACH( 5) / 32 / -C DATA IMACH( 6) / 4 / -C DATA IMACH( 7) / 2 / -C DATA IMACH( 8) / 31 / -C DATA IMACH( 9) / 2147483647 / -C DATA IMACH(10) / 2 / -C DATA IMACH(11) / 24 / -C DATA IMACH(12) / -127 / -C DATA IMACH(13) / 127 / -C DATA IMACH(14) / 56 / -C DATA IMACH(15) / -127 / -C DATA IMACH(16) / 127 /, SC/987/ -C -C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES. -C -C NOTE THAT THE PUNCH UNIT, I1MACH(3), HAS BEEN SET TO 7 -C WHICH IS APPROPRIATE FOR THE UNIVAC-FOR SYSTEM. -C IF YOU HAVE THE UNIVAC-FTN SYSTEM, SET IT TO 1. -C -C DATA IMACH( 1) / 5 / -C DATA IMACH( 2) / 6 / -C DATA IMACH( 3) / 7 / -C DATA IMACH( 4) / 6 / -C DATA IMACH( 5) / 36 / -C DATA IMACH( 6) / 6 / -C DATA IMACH( 7) / 2 / -C DATA IMACH( 8) / 35 / -C DATA IMACH( 9) / O377777777777 / -C DATA IMACH(10) / 2 / -C DATA IMACH(11) / 27 / -C DATA IMACH(12) / -128 / -C DATA IMACH(13) / 127 / -C DATA IMACH(14) / 60 / -C DATA IMACH(15) /-1024 / -C DATA IMACH(16) / 1023 /, SC/987/ -C - IF (SC .NE. 987) THEN -* *** CHECK FOR AUTODOUBLE *** - SMALL(2) = 0 - RMACH = 1E13 - IF (SMALL(2) .NE. 0) THEN -* *** AUTODOUBLED *** - IF ( (SMALL(1) .EQ. 1117925532 - * .AND. SMALL(2) .EQ. -448790528) - * .OR. (SMALL(2) .EQ. 1117925532 - * .AND. SMALL(1) .EQ. -448790528)) THEN -* *** IEEE *** - IMACH(10) = 2 - IMACH(14) = 53 - IMACH(15) = -1021 - IMACH(16) = 1024 - ELSE IF ( SMALL(1) .EQ. -2065213935 - * .AND. SMALL(2) .EQ. 10752) THEN -* *** VAX WITH D_FLOATING *** - IMACH(10) = 2 - IMACH(14) = 56 - IMACH(15) = -127 - IMACH(16) = 127 - ELSE IF ( SMALL(1) .EQ. 1267827943 - * .AND. SMALL(2) .EQ. 704643072) THEN -* *** IBM MAINFRAME *** - IMACH(10) = 16 - IMACH(14) = 14 - IMACH(15) = -64 - IMACH(16) = 63 - ELSE - WRITE(*,9010) - STOP 777 - END IF - IMACH(11) = IMACH(14) - IMACH(12) = IMACH(15) - IMACH(13) = IMACH(16) - ELSE - RMACH = 1234567. - IF (SMALL(1) .EQ. 1234613304) THEN -* *** IEEE *** - IMACH(10) = 2 - IMACH(11) = 24 - IMACH(12) = -125 - IMACH(13) = 128 - IMACH(14) = 53 - IMACH(15) = -1021 - IMACH(16) = 1024 - SC = 987 - ELSE IF (SMALL(1) .EQ. -1271379306) THEN -* *** VAX *** - IMACH(10) = 2 - IMACH(11) = 24 - IMACH(12) = -127 - IMACH(13) = 127 - IMACH(14) = 56 - IMACH(15) = -127 - IMACH(16) = 127 - SC = 987 - ELSE IF (SMALL(1) .EQ. 1175639687) THEN -* *** IBM MAINFRAME *** - IMACH(10) = 16 - IMACH(11) = 6 - IMACH(12) = -64 - IMACH(13) = 63 - IMACH(14) = 14 - IMACH(15) = -64 - IMACH(16) = 63 - SC = 987 - ELSE IF (SMALL(1) .EQ. 1251390520) THEN -* *** CONVEX C-1 *** - IMACH(10) = 2 - IMACH(11) = 24 - IMACH(12) = -128 - IMACH(13) = 127 - IMACH(14) = 53 - IMACH(15) = -1024 - IMACH(16) = 1023 - ELSE - DO 10 I3 = 1, 3 - J = SMALL(1) / 10000000 - K = SMALL(1) - 10000000*J - IF (K .NE. T3E(I3)) GO TO 20 - SMALL(1) = J - 10 CONTINUE -* *** CRAY T3E *** - IMACH( 1) = 5 - IMACH( 2) = 6 - IMACH( 3) = 0 - IMACH( 4) = 0 - IMACH( 5) = 64 - IMACH( 6) = 8 - IMACH( 7) = 2 - IMACH( 8) = 63 - CALL I1MCR1(IMACH(9), K, 32767, 16777215, 16777215) - IMACH(10) = 2 - IMACH(11) = 53 - IMACH(12) = -1021 - IMACH(13) = 1024 - IMACH(14) = 53 - IMACH(15) = -1021 - IMACH(16) = 1024 - GO TO 35 - 20 CALL I1MCR1(J, K, 16405, 9876536, 0) - IF (SMALL(1) .NE. J) THEN - WRITE(*,9020) - STOP 777 - END IF -* *** CRAY 1, XMP, 2, AND 3 *** - IMACH(1) = 5 - IMACH(2) = 6 - IMACH(3) = 102 - IMACH(4) = 6 - IMACH(5) = 46 - IMACH(6) = 8 - IMACH(7) = 2 - IMACH(8) = 45 - CALL I1MCR1(IMACH(9), K, 0, 4194303, 16777215) - IMACH(10) = 2 - IMACH(11) = 47 - IMACH(12) = -8188 - IMACH(13) = 8189 - IMACH(14) = 94 - IMACH(15) = -8141 - IMACH(16) = 8189 - GO TO 35 - END IF - END IF - IMACH( 1) = 5 - IMACH( 2) = 6 - IMACH( 3) = 7 - IMACH( 4) = 6 - IMACH( 5) = 32 - IMACH( 6) = 4 - IMACH( 7) = 2 - IMACH( 8) = 31 - IMACH( 9) = 2147483647 - 35 SC = 987 - END IF - 9010 FORMAT(/' Adjust autodoubled I1MACH by uncommenting data'/ - * ' statements appropriate for your machine and setting'/ - * ' IMACH(I) = IMACH(I+3) for I = 11, 12, and 13.') - 9020 FORMAT(/' Adjust I1MACH by uncommenting data statements'/ - * ' appropriate for your machine.') - IF (I .LT. 1 .OR. I .GT. 16) GO TO 40 - I1MACH = IMACH(I) - RETURN - 40 WRITE(*,*) 'I1MACH(I): I =',I,' is out of bounds.' - STOP -* /* C source for I1MACH -- remove the * in column 1 */ -* /* Note that some values may need changing. */ -*#include -*#include -*#include -*#include -* -*long i1mach_(long *i) -*{ -* switch(*i){ -* case 1: return 5; /* standard input */ -* case 2: return 6; /* standard output */ -* case 3: return 7; /* standard punch */ -* case 4: return 0; /* standard error */ -* case 5: return 32; /* bits per integer */ -* case 6: return sizeof(int); -* case 7: return 2; /* base for integers */ -* case 8: return 31; /* digits of integer base */ -* case 9: return LONG_MAX; -* case 10: return FLT_RADIX; -* case 11: return FLT_MANT_DIG; -* case 12: return FLT_MIN_EXP; -* case 13: return FLT_MAX_EXP; -* case 14: return DBL_MANT_DIG; -* case 15: return DBL_MIN_EXP; -* case 16: return DBL_MAX_EXP; -* } -* fprintf(stderr, "invalid argument: i1mach(%ld)\n", *i); -* exit(1);return 0; /* some compilers demand return values */ -*} - END - SUBROUTINE I1MCR1(A, A1, B, C, D) -**** SPECIAL COMPUTATION FOR OLD CRAY MACHINES **** - INTEGER A, A1, B, C, D - A1 = 16777216*B + C - A = 16777216*A1 + D - END diff --git a/scipy-0.10.1/scipy/integrate/mach/r1mach.f b/scipy-0.10.1/scipy/integrate/mach/r1mach.f deleted file mode 100644 index 204530e35e..0000000000 --- a/scipy-0.10.1/scipy/integrate/mach/r1mach.f +++ /dev/null @@ -1,222 +0,0 @@ - REAL FUNCTION R1MACH(I) - INTEGER I -C -C SINGLE-PRECISION MACHINE CONSTANTS -C R1MACH(1) = B**(EMIN-1), THE SMALLEST POSITIVE MAGNITUDE. -C R1MACH(2) = B**EMAX*(1 - B**(-T)), THE LARGEST MAGNITUDE. -C R1MACH(3) = B**(-T), THE SMALLEST RELATIVE SPACING. -C R1MACH(4) = B**(1-T), THE LARGEST RELATIVE SPACING. -C R1MACH(5) = LOG10(B) -C - INTEGER SMALL(2) - INTEGER LARGE(2) - INTEGER RIGHT(2) - INTEGER DIVER(2) - INTEGER LOG10(2) -C needs to be (2) for AUTODOUBLE, HARRIS SLASH 6, ... - INTEGER SC - SAVE SMALL, LARGE, RIGHT, DIVER, LOG10, SC - REAL RMACH(5) - EQUIVALENCE (RMACH(1),SMALL(1)) - EQUIVALENCE (RMACH(2),LARGE(1)) - EQUIVALENCE (RMACH(3),RIGHT(1)) - EQUIVALENCE (RMACH(4),DIVER(1)) - EQUIVALENCE (RMACH(5),LOG10(1)) - INTEGER J, K, L, T3E(3) - DATA T3E(1) / 9777664 / - DATA T3E(2) / 5323660 / - DATA T3E(3) / 46980 / -C THIS VERSION ADAPTS AUTOMATICALLY TO MOST CURRENT MACHINES, -C INCLUDING AUTO-DOUBLE COMPILERS. -C TO COMPILE ON OLDER MACHINES, ADD A C IN COLUMN 1 -C ON THE NEXT LINE - DATA SC/0/ -C AND REMOVE THE C FROM COLUMN 1 IN ONE OF THE SECTIONS BELOW. -C CONSTANTS FOR EVEN OLDER MACHINES CAN BE OBTAINED BY -C mail netlib@research.bell-labs.com -C send old1mach from blas -C PLEASE SEND CORRECTIONS TO dmg OR ehg@bell-labs.com. -C -C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES. -C DATA RMACH(1) / O402400000000 / -C DATA RMACH(2) / O376777777777 / -C DATA RMACH(3) / O714400000000 / -C DATA RMACH(4) / O716400000000 / -C DATA RMACH(5) / O776464202324 /, SC/987/ -C -C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING -C 32-BIT INTEGERS (EXPRESSED IN INTEGER AND OCTAL). -C DATA SMALL(1) / 8388608 / -C DATA LARGE(1) / 2147483647 / -C DATA RIGHT(1) / 880803840 / -C DATA DIVER(1) / 889192448 / -C DATA LOG10(1) / 1067065499 /, SC/987/ -C DATA RMACH(1) / O00040000000 / -C DATA RMACH(2) / O17777777777 / -C DATA RMACH(3) / O06440000000 / -C DATA RMACH(4) / O06500000000 / -C DATA RMACH(5) / O07746420233 /, SC/987/ -C -C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES. -C DATA RMACH(1) / O000400000000 / -C DATA RMACH(2) / O377777777777 / -C DATA RMACH(3) / O146400000000 / -C DATA RMACH(4) / O147400000000 / -C DATA RMACH(5) / O177464202324 /, SC/987/ -C - IF (SC .NE. 987) THEN -* *** CHECK FOR AUTODOUBLE *** - SMALL(2) = 0 - RMACH(1) = 1E13 - IF (SMALL(2) .NE. 0) THEN -* *** AUTODOUBLED *** - IF ( SMALL(1) .EQ. 1117925532 - * .AND. SMALL(2) .EQ. -448790528) THEN -* *** IEEE BIG ENDIAN *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2146435071 - LARGE(2) = -1 - RIGHT(1) = 1017118720 - RIGHT(2) = 0 - DIVER(1) = 1018167296 - DIVER(2) = 0 - LOG10(1) = 1070810131 - LOG10(2) = 1352628735 - ELSE IF ( SMALL(2) .EQ. 1117925532 - * .AND. SMALL(1) .EQ. -448790528) THEN -* *** IEEE LITTLE ENDIAN *** - SMALL(2) = 1048576 - SMALL(1) = 0 - LARGE(2) = 2146435071 - LARGE(1) = -1 - RIGHT(2) = 1017118720 - RIGHT(1) = 0 - DIVER(2) = 1018167296 - DIVER(1) = 0 - LOG10(2) = 1070810131 - LOG10(1) = 1352628735 - ELSE IF ( SMALL(1) .EQ. -2065213935 - * .AND. SMALL(2) .EQ. 10752) THEN -* *** VAX WITH D_FLOATING *** - SMALL(1) = 128 - SMALL(2) = 0 - LARGE(1) = -32769 - LARGE(2) = -1 - RIGHT(1) = 9344 - RIGHT(2) = 0 - DIVER(1) = 9472 - DIVER(2) = 0 - LOG10(1) = 546979738 - LOG10(2) = -805796613 - ELSE IF ( SMALL(1) .EQ. 1267827943 - * .AND. SMALL(2) .EQ. 704643072) THEN -* *** IBM MAINFRAME *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2147483647 - LARGE(2) = -1 - RIGHT(1) = 856686592 - RIGHT(2) = 0 - DIVER(1) = 873463808 - DIVER(2) = 0 - LOG10(1) = 1091781651 - LOG10(2) = 1352628735 - ELSE - WRITE(*,9010) - STOP 777 - END IF - ELSE - RMACH(1) = 1234567. - IF (SMALL(1) .EQ. 1234613304) THEN -* *** IEEE *** - SMALL(1) = 8388608 - LARGE(1) = 2139095039 - RIGHT(1) = 864026624 - DIVER(1) = 872415232 - LOG10(1) = 1050288283 - ELSE IF (SMALL(1) .EQ. -1271379306) THEN -* *** VAX *** - SMALL(1) = 128 - LARGE(1) = -32769 - RIGHT(1) = 13440 - DIVER(1) = 13568 - LOG10(1) = 547045274 - ELSE IF (SMALL(1) .EQ. 1175639687) THEN -* *** IBM MAINFRAME *** - SMALL(1) = 1048576 - LARGE(1) = 2147483647 - RIGHT(1) = 990904320 - DIVER(1) = 1007681536 - LOG10(1) = 1091781651 - ELSE IF (SMALL(1) .EQ. 1251390520) THEN -* *** CONVEX C-1 *** - SMALL(1) = 8388608 - LARGE(1) = 2147483647 - RIGHT(1) = 880803840 - DIVER(1) = 889192448 - LOG10(1) = 1067065499 - ELSE - DO 10 L = 1, 3 - J = SMALL(1) / 10000000 - K = SMALL(1) - 10000000*J - IF (K .NE. T3E(L)) GO TO 20 - SMALL(1) = J - 10 CONTINUE -* *** CRAY T3E *** - CALL I1MCRA(SMALL, K, 16, 0, 0) - CALL I1MCRA(LARGE, K, 32751, 16777215, 16777215) - CALL I1MCRA(RIGHT, K, 15520, 0, 0) - CALL I1MCRA(DIVER, K, 15536, 0, 0) - CALL I1MCRA(LOG10, K, 16339, 4461392, 10451455) - GO TO 30 - 20 CALL I1MCRA(J, K, 16405, 9876536, 0) - IF (SMALL(1) .NE. J) THEN - WRITE(*,9020) - STOP 777 - END IF -* *** CRAY 1, XMP, 2, AND 3 *** - CALL I1MCRA(SMALL(1), K, 8195, 8388608, 1) - CALL I1MCRA(LARGE(1), K, 24574, 16777215, 16777214) - CALL I1MCRA(RIGHT(1), K, 16338, 8388608, 0) - CALL I1MCRA(DIVER(1), K, 16339, 8388608, 0) - CALL I1MCRA(LOG10(1), K, 16383, 10100890, 8715216) - END IF - END IF - 30 SC = 987 - END IF -* SANITY CHECK - IF (RMACH(4) .GE. 1.0) STOP 776 - IF (I .LT. 1 .OR. I .GT. 5) THEN - WRITE(*,*) 'R1MACH(I): I =',I,' is out of bounds.' - STOP - END IF - R1MACH = RMACH(I) - RETURN - 9010 FORMAT(/' Adjust autodoubled R1MACH by getting data'/ - *' appropriate for your machine from D1MACH.') - 9020 FORMAT(/' Adjust R1MACH by uncommenting data statements'/ - *' appropriate for your machine.') -* /* C source for R1MACH -- remove the * in column 1 */ -*#include -*#include -*#include -*float r1mach_(long *i) -*{ -* switch(*i){ -* case 1: return FLT_MIN; -* case 2: return FLT_MAX; -* case 3: return FLT_EPSILON/FLT_RADIX; -* case 4: return FLT_EPSILON; -* case 5: return log10(FLT_RADIX); -* } -* fprintf(stderr, "invalid argument: r1mach(%ld)\n", *i); -* exit(1); return 0; /* else complaint of missing return value */ -*} - END - SUBROUTINE I1MCRA(A, A1, B, C, D) -**** SPECIAL COMPUTATION FOR CRAY MACHINES **** - INTEGER A, A1, B, C, D - A1 = 16777216*B + C - A = 16777216*A1 + D - END diff --git a/scipy-0.10.1/scipy/integrate/mach/xerror.f b/scipy-0.10.1/scipy/integrate/mach/xerror.f deleted file mode 100644 index baa55067ba..0000000000 --- a/scipy-0.10.1/scipy/integrate/mach/xerror.f +++ /dev/null @@ -1,22 +0,0 @@ - SUBROUTINE XERROR(MESS,NMESS,L1,L2) -C -C THIS IS A DUMMY XERROR ROUTINE TO PRINT ERROR MESSAGES WITH NMESS -C CHARACTERS. L1 AND L2 ARE DUMMY PARAMETERS TO MAKE THIS CALL -C COMPATIBLE WITH THE SLATEC XERROR ROUTINE. THIS IS A FORTRAN 77 -C ROUTINE. -C - CHARACTER*(*) MESS - NN=NMESS/70 - NR=NMESS-70*NN - IF(NR.NE.0) NN=NN+1 - K=1 - PRINT 900 - 900 FORMAT(/) - DO 10 I=1,NN - KMIN=MIN0(K+69,NMESS) - PRINT *, MESS(K:KMIN) - K=K+70 - 10 CONTINUE - PRINT 900 - RETURN - END diff --git a/scipy-0.10.1/scipy/integrate/multipack.h b/scipy-0.10.1/scipy/integrate/multipack.h deleted file mode 100644 index c6f21b3cb8..0000000000 --- a/scipy-0.10.1/scipy/integrate/multipack.h +++ /dev/null @@ -1,213 +0,0 @@ -/* MULTIPACK module by Travis Oliphant - -Copyright (c) 2002 Travis Oliphant all rights reserved -oliphant.travis@ieee.org -Permission to use, modify, and distribute this software is given under the -terms of the SciPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -*/ - - -/* This extension module is a collection of wrapper functions around -common FORTRAN code in the packages MINPACK, ODEPACK, and QUADPACK plus -some differential algebraic equation solvers. - -The wrappers are meant to be nearly direct translations between the -FORTAN code and Python. Some parameters like sizes do not need to be -passed since they are available from the objects. - -It is anticipated that a pure Python module be written to call these lower -level routines and make a simpler user interface. All of the routines define -default values for little-used parameters so that even the raw routines are -quite useful without a separate wrapper. - -FORTRAN Outputs that are not either an error indicator or the sought-after -results are placed in a dictionary and returned as an optional member of -the result tuple when the full_output argument is non-zero. -*/ - -#include "Python.h" -#include "numpy/npy_3kcompat.h" - -#include "numpy/arrayobject.h" - -#define PYERR(errobj,message) {PyErr_SetString(errobj,message); goto fail;} -#define PYERR2(errobj,message) {PyErr_Print(); PyErr_SetString(errobj, message); goto fail;} -#define ISCONTIGUOUS(m) ((m)->flags & CONTIGUOUS) - -#define STORE_VARS() PyObject *store_multipack_globals[4]; int store_multipack_globals3; - -#define INIT_FUNC(fun,arg,errobj) { /* Get extra arguments or set to zero length tuple */ \ - store_multipack_globals[0] = multipack_python_function; \ - store_multipack_globals[1] = multipack_extra_arguments; \ - if (arg == NULL) { \ - if ((arg = PyTuple_New(0)) == NULL) goto fail; \ - } \ - else \ - Py_INCREF(arg); /* We decrement on exit. */ \ - if (!PyTuple_Check(arg)) \ - PYERR(errobj,"Extra Arguments must be in a tuple"); \ - /* Set up callback functions */ \ - if (!PyCallable_Check(fun)) \ - PYERR(errobj,"First argument must be a callable function."); \ - multipack_python_function = fun; \ - multipack_extra_arguments = arg; } - -#define INIT_JAC_FUNC(fun,Dfun,arg,col_deriv,errobj) { \ - store_multipack_globals[0] = multipack_python_function; \ - store_multipack_globals[1] = multipack_extra_arguments; \ - store_multipack_globals[2] = multipack_python_jacobian; \ - store_multipack_globals3 = multipack_jac_transpose; \ - if (arg == NULL) { \ - if ((arg = PyTuple_New(0)) == NULL) goto fail; \ - } \ - else \ - Py_INCREF(arg); /* We decrement on exit. */ \ - if (!PyTuple_Check(arg)) \ - PYERR(errobj,"Extra Arguments must be in a tuple"); \ - /* Set up callback functions */ \ - if (!PyCallable_Check(fun) || (Dfun != Py_None && !PyCallable_Check(Dfun))) \ - PYERR(errobj,"The function and its Jacobian must be callable functions."); \ - multipack_python_function = fun; \ - multipack_extra_arguments = arg; \ - multipack_python_jacobian = Dfun; \ - multipack_jac_transpose = !(col_deriv);} - -#define RESTORE_JAC_FUNC() multipack_python_function = store_multipack_globals[0]; \ - multipack_extra_arguments = store_multipack_globals[1]; \ - multipack_python_jacobian = store_multipack_globals[2]; \ - multipack_jac_transpose = store_multipack_globals3; - -#define RESTORE_FUNC() multipack_python_function = store_multipack_globals[0]; \ - multipack_extra_arguments = store_multipack_globals[1]; - -#define SET_DIAG(ap_diag,o_diag,mode) { /* Set the diag vector from input */ \ - if (o_diag == NULL || o_diag == Py_None) { \ - ap_diag = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_DOUBLE); \ - if (ap_diag == NULL) goto fail; \ - diag = (double *)ap_diag -> data; \ - mode = 1; \ - } \ - else { \ - ap_diag = (PyArrayObject *)PyArray_ContiguousFromObject(o_diag, PyArray_DOUBLE, 1, 1); \ - if (ap_diag == NULL) goto fail; \ - diag = (double *)ap_diag -> data; \ - mode = 2; } } - -#define MATRIXC2F(jac,data,n,m) {double *p1=(double *)(jac), *p2, *p3=(double *)(data);\ -int i,j;\ -for (j=0;j<(m);p3++,j++) \ - for (p2=p3,i=0;i<(n);p2+=(m),i++,p1++) \ - *p1 = *p2; } - -static PyObject *multipack_python_function=NULL; -static PyObject *multipack_python_jacobian=NULL; -static PyObject *multipack_extra_arguments=NULL; /* a tuple */ -static int multipack_jac_transpose=1; - - -static PyArrayObject * my_make_numpy_array(PyObject *y0, int type, int mindim, int maxdim) - /* This is just like PyArray_ContiguousFromObject except it handles - * single numeric datatypes as 1-element, rank-1 arrays instead of as - * scalars. - */ -{ - PyArrayObject *new_array; - PyObject *tmpobj; - - Py_INCREF(y0); - - if (PyInt_Check(y0) || PyFloat_Check(y0)) { - tmpobj = PyList_New(1); - PyList_SET_ITEM(tmpobj, 0, y0); /* reference now belongs to tmpobj */ - } - else - tmpobj = y0; - - new_array = (PyArrayObject *)PyArray_ContiguousFromObject(tmpobj, type, mindim, maxdim); - - Py_DECREF(tmpobj); - return new_array; -} - -static PyObject *call_python_function(PyObject *func, npy_intp n, double *x, PyObject *args, int dim, PyObject *error_obj) -{ - /* - This is a generic function to call a python function that takes a 1-D - sequence as a first argument and optional extra_arguments (should be a - zero-length tuple if none desired). The result of the function is - returned in a multiarray object. - -- build sequence object from values in x. - -- add extra arguments (if any) to an argument list. - -- call Python callable object - -- check if error occurred: - if so return NULL - -- if no error, place result of Python code into multiarray object. - */ - - PyArrayObject *sequence = NULL; - PyObject *arglist = NULL, *tmpobj = NULL; - PyObject *arg1 = NULL, *str1 = NULL; - PyObject *result = NULL; - PyArrayObject *result_array = NULL; - - /* Build sequence argument from inputs */ - sequence = (PyArrayObject *)PyArray_SimpleNewFromData(1, &n, PyArray_DOUBLE, (char *)x); - if (sequence == NULL) PYERR2(error_obj,"Internal failure to make an array of doubles out of first\n argument to function call."); - - /* Build argument list */ - if ((arg1 = PyTuple_New(1)) == NULL) { - Py_DECREF(sequence); - return NULL; - } - PyTuple_SET_ITEM(arg1, 0, (PyObject *)sequence); - /* arg1 now owns sequence reference */ - if ((arglist = PySequence_Concat( arg1, args)) == NULL) - PYERR2(error_obj,"Internal error constructing argument list."); - - Py_DECREF(arg1); /* arglist has a reference to sequence, now. */ - arg1=NULL; - - /* Call function object --- variable passed to routine. Extra - arguments are in another passed variable. - */ - if ((result = PyEval_CallObject(func, arglist))==NULL) { - PyErr_Print(); - tmpobj = PyObject_GetAttrString(func, "func_name"); - if (tmpobj == NULL) goto fail; - str1 = PyString_FromString("Error occurred while calling the Python function named "); - if (str1 == NULL) { Py_DECREF(tmpobj); goto fail;} - PyString_ConcatAndDel(&str1, tmpobj); - PyErr_SetString(error_obj, PyString_AsString(str1)); - Py_DECREF(str1); - goto fail; - } - - if ((result_array = (PyArrayObject *)PyArray_ContiguousFromObject(result, PyArray_DOUBLE, dim-1, dim))==NULL) - PYERR2(error_obj,"Result from function call is not a proper array of floats."); - - Py_DECREF(result); - Py_DECREF(arglist); - return (PyObject *)result_array; - - fail: - Py_XDECREF(arglist); - Py_XDECREF(result); - Py_XDECREF(arg1); - return NULL; -} - - - - - - - - - - - - - diff --git a/scipy-0.10.1/scipy/integrate/odepack.py b/scipy-0.10.1/scipy/integrate/odepack.py deleted file mode 100644 index f18ca63da8..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack.py +++ /dev/null @@ -1,158 +0,0 @@ -# Author: Travis Oliphant - -__all__ = ['odeint'] - -import _odepack -from copy import copy - -_msgs = {2: "Integration successful.", - -1: "Excess work done on this call (perhaps wrong Dfun type).", - -2: "Excess accuracy requested (tolerances too small).", - -3: "Illegal input detected (internal error).", - -4: "Repeated error test failures (internal error).", - -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", - -6: "Error weight became zero during problem.", - -7: "Internal workspace insufficient to finish (internal error)." - } - -def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0, - ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0, - hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, - mxords=5, printmessg=0): - """ - Integrate a system of ordinary differential equations. - - Solve a system of ordinary differential equations using lsoda from the - FORTRAN library odepack. - - Solves the initial value problem for stiff or non-stiff systems - of first order ode-s:: - - dy/dt = func(y,t0,...) - - where y can be a vector. - - Parameters - ---------- - func : callable(y, t0, ...) - Computes the derivative of y at t0. - y0 : array - Initial condition on y (can be a vector). - t : array - A sequence of time points for which to solve for y. The initial - value point should be the first element of this sequence. - args : tuple - Extra arguments to pass to function. - Dfun : callable(y, t0, ...) - Gradient (Jacobian) of func. - col_deriv : boolean - True if Dfun defines derivatives down columns (faster), - otherwise Dfun should define derivatives across rows. - full_output : boolean - True if to return a dictionary of optional outputs as the second output - printmessg : boolean - Whether to print the convergence message - - Returns - ------- - y : array, shape (len(t), len(y0)) - Array containing the value of y for each desired time in t, - with the initial value y0 in the first row. - - infodict : dict, only returned if full_output == True - Dictionary containing additional output information - - ======= ============================================================ - key meaning - ======= ============================================================ - 'hu' vector of step sizes successfully used for each time step. - 'tcur' vector with the value of t reached for each time step. - (will always be at least as large as the input times). - 'tolsf' vector of tolerance scale factors, greater than 1.0, - computed when a request for too much accuracy was detected. - 'tsw' value of t at the time of the last method switch - (given for each time step) - 'nst' cumulative number of time steps - 'nfe' cumulative number of function evaluations for each time step - 'nje' cumulative number of jacobian evaluations for each time step - 'nqu' a vector of method orders for each successful step. - 'imxer' index of the component of largest magnitude in the - weighted local error vector (e / ewt) on an error return, -1 - otherwise. - 'lenrw' the length of the double work array required. - 'leniw' the length of integer work array required. - 'mused' a vector of method indicators for each successful time step: - 1: adams (nonstiff), 2: bdf (stiff) - ======= ============================================================ - - Other Parameters - ---------------- - ml, mu : integer - If either of these are not-None or non-negative, then the - Jacobian is assumed to be banded. These give the number of - lower and upper non-zero diagonals in this banded matrix. - For the banded case, Dfun should return a matrix whose - columns contain the non-zero bands (starting with the - lowest diagonal). Thus, the return matrix from Dfun should - have shape ``len(y0) * (ml + mu + 1) when ml >=0 or mu >=0`` - rtol, atol : float - The input parameters rtol and atol determine the error - control performed by the solver. The solver will control the - vector, e, of estimated local errors in y, according to an - inequality of the form ``max-norm of (e / ewt) <= 1``, - where ewt is a vector of positive error weights computed as: - ``ewt = rtol * abs(y) + atol`` - rtol and atol can be either vectors the same length as y or scalars. - Defaults to 1.49012e-8. - tcrit : array - Vector of critical points (e.g. singularities) where integration - care should be taken. - h0 : float, (0: solver-determined) - The step size to be attempted on the first step. - hmax : float, (0: solver-determined) - The maximum absolute step size allowed. - hmin : float, (0: solver-determined) - The minimum absolute step size allowed. - ixpr : boolean - Whether to generate extra printing at method switches. - mxstep : integer, (0: solver-determined) - Maximum number of (internally defined) steps allowed for each - integration point in t. - mxhnil : integer, (0: solver-determined) - Maximum number of messages printed. - mxordn : integer, (0: solver-determined) - Maximum order to be allowed for the nonstiff (Adams) method. - mxords : integer, (0: solver-determined) - Maximum order to be allowed for the stiff (BDF) method. - - See Also - -------- - ode : a more object-oriented integrator based on VODE - quad : for finding the area under a curve - - """ - - if ml is None: - ml = -1 # changed to zero inside function call - if mu is None: - mu = -1 # changed to zero inside function call - t = copy(t) - y0 = copy(y0) - output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu, - full_output, rtol, atol, tcrit, h0, hmax, hmin, - ixpr, mxstep, mxhnil, mxordn, mxords) - if output[-1] < 0: - print _msgs[output[-1]] - print "Run with full_output = 1 to get quantitative information." - else: - if printmessg: - print _msgs[output[-1]] - - if full_output: - output[1]['message'] = _msgs[output[-1]] - - output = output[:-1] - if len(output) == 1: - return output[0] - else: - return output diff --git a/scipy-0.10.1/scipy/integrate/odepack/adjlr.f b/scipy-0.10.1/scipy/integrate/odepack/adjlr.f deleted file mode 100644 index f091b311b4..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/adjlr.f +++ /dev/null @@ -1,24 +0,0 @@ - subroutine adjlr (n, isp, ldif) - integer n, isp, ldif - dimension isp(1) -c----------------------------------------------------------------------- -c this routine computes an adjustment, ldif, to the required -c integer storage space in iwk (sparse matrix work space). -c it is called only if the word length ratio is lrat = 1. -c this is to account for the possibility that the symbolic lu phase -c may require more storage than the numerical lu and solution phases. -c----------------------------------------------------------------------- - integer ip, jlmax, jumax, lnfc, lsfc, nzlu -c - ip = 2*n + 1 -c get jlmax = ijl(n) and jumax = iju(n) (sizes of jl and ju). ---------- - jlmax = isp(ip) - jumax = isp(ip+ip) -c nzlu = (size of l) + (size of u) = (il(n+1)-il(1)) + (iu(n+1)-iu(1)). - nzlu = isp(n+1) - isp(1) + isp(ip+n+1) - isp(ip+1) - lsfc = 12*n + 3 + 2*max0(jlmax,jumax) - lnfc = 9*n + 2 + jlmax + jumax + nzlu - ldif = max0(0, lsfc - lnfc) - return -c----------------------- end of subroutine adjlr ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/aigbt.f b/scipy-0.10.1/scipy/integrate/odepack/aigbt.f deleted file mode 100644 index 8d8a90f2aa..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/aigbt.f +++ /dev/null @@ -1,38 +0,0 @@ - subroutine aigbt (res, adda, neq, t, y, ydot, - 1 mb, nb, pw, ipvt, ier ) -clll. optimize - external res, adda - integer neq, mb, nb, ipvt, ier - integer i, lenpw, lblox, lpb, lpc - double precision t, y, ydot, pw - dimension y(1), ydot(1), pw(1), ipvt(1), neq(1) -c----------------------------------------------------------------------- -c this subroutine computes the initial value -c of the vector ydot satisfying -c a * ydot = g(t,y) -c when a is nonsingular. it is called by lsoibt for -c initialization only, when istate = 0 . -c aigbt returns an error flag ier.. -c ier = 0 means aigbt was successful. -c ier .ge. 2 means res returned an error flag ires = ier. -c ier .lt. 0 means the a matrix was found to have a singular -c diagonal block (hence ydot could not be solved for). -c----------------------------------------------------------------------- - lblox = mb*mb*nb - lpb = 1 + lblox - lpc = lpb + lblox - lenpw = 3*lblox - do 10 i = 1,lenpw - 10 pw(i) = 0.0d0 - ier = 1 - call res (neq, t, y, pw, ydot, ier) - if (ier .gt. 1) return - call adda (neq, t, y, mb, nb, pw(1), pw(lpb), pw(lpc) ) - call decbt (mb, nb, pw, pw(lpb), pw(lpc), ipvt, ier) - if (ier .eq. 0) go to 20 - ier = -ier - return - 20 call solbt (mb, nb, pw, pw(lpb), pw(lpc), ydot, ipvt) - return -c-------------------- end of subroutine aigbt -------------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/ainvg.f b/scipy-0.10.1/scipy/integrate/odepack/ainvg.f deleted file mode 100644 index a9411c69d3..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/ainvg.f +++ /dev/null @@ -1,62 +0,0 @@ - subroutine ainvg (res, adda, neq, t, y, ydot, miter, - 1 ml, mu, pw, ipvt, ier ) -clll. optimize - external res, adda - integer neq, miter, ml, mu, ipvt, ier - integer i, lenpw, mlp1, nrowpw - double precision t, y, ydot, pw - dimension y(1), ydot(1), pw(1), ipvt(1) -c----------------------------------------------------------------------- -c this subroutine computes the initial value -c of the vector ydot satisfying -c a * ydot = g(t,y) -c when a is nonsingular. it is called by lsodi for -c initialization only, when istate = 0 . -c ainvg returns an error flag ier.. -c ier = 0 means ainvg was successful. -c ier .ge. 2 means res returned an error flag ires = ier. -c ier .lt. 0 means the a-matrix was found to be singular. -c----------------------------------------------------------------------- -c - if (miter .ge. 4) go to 100 -c -c full matrix case ----------------------------------------------------- -c - lenpw = neq*neq - do 10 i = 1, lenpw - 10 pw(i) = 0.0d0 -c - ier = 1 - call res ( neq, t, y, pw, ydot, ier ) - if (ier .gt. 1) return -c - call adda ( neq, t, y, 0, 0, pw, neq ) - call dgefa ( pw, neq, neq, ipvt, ier ) - if (ier .eq. 0) go to 20 - ier = -ier - return - 20 call dgesl ( pw, neq, neq, ipvt, ydot, 0 ) - return -c -c band matrix case ----------------------------------------------------- -c - 100 continue - nrowpw = 2*ml + mu + 1 - lenpw = neq * nrowpw - do 110 i = 1, lenpw - 110 pw(i) = 0.0d0 -c - ier = 1 - call res ( neq, t, y, pw, ydot, ier ) - if (ier .gt. 1) return -c - mlp1 = ml + 1 - call adda ( neq, t, y, ml, mu, pw(mlp1), nrowpw ) - call dgbfa ( pw, nrowpw, neq, ml, mu, ipvt, ier ) - if (ier .eq. 0) go to 120 - ier = -ier - return - 120 call dgbsl ( pw, nrowpw, neq, ml, mu, ipvt, ydot, 0 ) - return -c-------------------- end of subroutine ainvg -------------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/blkdta000.f b/scipy-0.10.1/scipy/integrate/odepack/blkdta000.f deleted file mode 100644 index 7885277768..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/blkdta000.f +++ /dev/null @@ -1,26 +0,0 @@ - block data -c----------------------------------------------------------------------- -c this data subprogram loads variables into the internal common -c blocks used by the odepack solvers. the variables are -c defined as follows.. -c illin = counter for the number of consecutive times the package -c was called with illegal input. the run is stopped when -c illin reaches 5. -c ntrep = counter for the number of consecutive times the package -c was called with istate = 1 and tout = t. the run is -c stopped when ntrep reaches 5. -c mesflg = flag to control printing of error messages. 1 means print, -c 0 means no printing. -c lunit = default value of logical unit number for printing of error -c messages. -c----------------------------------------------------------------------- - integer illin, iduma, ntrep, idumb, iowns, icomm, mesflg, lunit - double precision rowns, rcomm - common /ls0001/ rowns(209), rcomm(9), - 1 illin, iduma(10), ntrep, idumb(2), iowns(6), icomm(19) - common /eh0001/ mesflg, lunit - data illin/0/, ntrep/0/ - data mesflg/1/, lunit/6/ -c -c----------------------- end of block data ----------------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/bnorm.f b/scipy-0.10.1/scipy/integrate/odepack/bnorm.f deleted file mode 100644 index 16bbd4ebe9..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/bnorm.f +++ /dev/null @@ -1,30 +0,0 @@ - double precision function bnorm (n, a, nra, ml, mu, w) -clll. optimize -c----------------------------------------------------------------------- -c this function computes the norm of a banded n by n matrix, -c stored in the array a, that is consistent with the weighted max-norm -c on vectors, with weights stored in the array w. -c ml and mu are the lower and upper half-bandwidths of the matrix. -c nra is the first dimension of the a array, nra .ge. ml+mu+1. -c in terms of the matrix elements a(i,j), the norm is given by.. -c bnorm = max(i=1,...,n) ( w(i) * sum(j=1,...,n) abs(a(i,j))/w(j) ) -c----------------------------------------------------------------------- - integer n, nra, ml, mu - integer i, i1, jlo, jhi, j - double precision a, w - double precision an, sum - dimension a(nra,n), w(n) - an = 0.0d0 - do 20 i = 1,n - sum = 0.0d0 - i1 = i + mu + 1 - jlo = max0(i-ml,1) - jhi = min0(i+mu,n) - do 10 j = jlo,jhi - 10 sum = sum + dabs(a(i1-j,j))/w(j) - an = dmax1(an,sum*w(i)) - 20 continue - bnorm = an - return -c----------------------- end of function bnorm ------------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/cdrv.f b/scipy-0.10.1/scipy/integrate/odepack/cdrv.f deleted file mode 100644 index b2835c279b..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/cdrv.f +++ /dev/null @@ -1,267 +0,0 @@ - subroutine cdrv - * (n, r,c,ic, ia,ja,a, b, z, nsp,isp,rsp,esp, path, flag) -clll. optimize -c*** subroutine cdrv -c*** driver for subroutines for solving sparse nonsymmetric systems of -c linear equations (compressed pointer storage) -c -c -c parameters -c class abbreviations are-- -c n - integer variable -c f - real variable -c v - supplies a value to the driver -c r - returns a result from the driver -c i - used internally by the driver -c a - array -c -c class - parameter -c ------+---------- -c - -c the nonzero entries of the coefficient matrix m are stored -c row-by-row in the array a. to identify the individual nonzero -c entries in each row, we need to know in which column each entry -c lies. the column indices which correspond to the nonzero entries -c of m are stored in the array ja. i.e., if a(k) = m(i,j), then -c ja(k) = j. in addition, we need to know where each row starts and -c how long it is. the index positions in ja and a where the rows of -c m begin are stored in the array ia. i.e., if m(i,j) is the first -c nonzero entry (stored) in the i-th row and a(k) = m(i,j), then -c ia(i) = k. moreover, the index in ja and a of the first location -c following the last element in the last row is stored in ia(n+1). -c thus, the number of entries in the i-th row is given by -c ia(i+1) - ia(i), the nonzero entries of the i-th row are stored -c consecutively in -c a(ia(i)), a(ia(i)+1), ..., a(ia(i+1)-1), -c and the corresponding column indices are stored consecutively in -c ja(ia(i)), ja(ia(i)+1), ..., ja(ia(i+1)-1). -c for example, the 5 by 5 matrix -c ( 1. 0. 2. 0. 0.) -c ( 0. 3. 0. 0. 0.) -c m = ( 0. 4. 5. 6. 0.) -c ( 0. 0. 0. 7. 0.) -c ( 0. 0. 0. 8. 9.) -c would be stored as -c - 1 2 3 4 5 6 7 8 9 -c ---+-------------------------- -c ia - 1 3 4 7 8 10 -c ja - 1 3 2 2 3 4 4 4 5 -c a - 1. 2. 3. 4. 5. 6. 7. 8. 9. . -c -c nv - n - number of variables/equations. -c fva - a - nonzero entries of the coefficient matrix m, stored -c - by rows. -c - size = number of nonzero entries in m. -c nva - ia - pointers to delimit the rows in a. -c - size = n+1. -c nva - ja - column numbers corresponding to the elements of a. -c - size = size of a. -c fva - b - right-hand side b. b and z can the same array. -c - size = n. -c fra - z - solution x. b and z can be the same array. -c - size = n. -c -c the rows and columns of the original matrix m can be -c reordered (e.g., to reduce fillin or ensure numerical stability) -c before calling the driver. if no reordering is done, then set -c r(i) = c(i) = ic(i) = i for i=1,...,n. the solution z is returned -c in the original order. -c if the columns have been reordered (i.e., c(i).ne.i for some -c i), then the driver will call a subroutine (nroc) which rearranges -c each row of ja and a, leaving the rows in the original order, but -c placing the elements of each row in increasing order with respect -c to the new ordering. if path.ne.1, then nroc is assumed to have -c been called already. -c -c nva - r - ordering of the rows of m. -c - size = n. -c nva - c - ordering of the columns of m. -c - size = n. -c nva - ic - inverse of the ordering of the columns of m. i.e., -c - ic(c(i)) = i for i=1,...,n. -c - size = n. -c -c the solution of the system of linear equations is divided into -c three stages -- -c nsfc -- the matrix m is processed symbolically to determine where -c fillin will occur during the numeric factorization. -c nnfc -- the matrix m is factored numerically into the product ldu -c of a unit lower triangular matrix l, a diagonal matrix -c d, and a unit upper triangular matrix u, and the system -c mx = b is solved. -c nnsc -- the linear system mx = b is solved using the ldu -c or factorization from nnfc. -c nntc -- the transposed linear system mt x = b is solved using -c the ldu factorization from nnf. -c for several systems whose coefficient matrices have the same -c nonzero structure, nsfc need be done only once (for the first -c system). then nnfc is done once for each additional system. for -c several systems with the same coefficient matrix, nsfc and nnfc -c need be done only once (for the first system). then nnsc or nntc -c is done once for each additional right-hand side. -c -c nv - path - path specification. values and their meanings are -- -c - 1 perform nroc, nsfc, and nnfc. -c - 2 perform nnfc only (nsfc is assumed to have been -c - done in a manner compatible with the storage -c - allocation used in the driver). -c - 3 perform nnsc only (nsfc and nnfc are assumed to -c - have been done in a manner compatible with the -c - storage allocation used in the driver). -c - 4 perform nntc only (nsfc and nnfc are assumed to -c - have been done in a manner compatible with the -c - storage allocation used in the driver). -c - 5 perform nroc and nsfc. -c -c various errors are detected by the driver and the individual -c subroutines. -c -c nr - flag - error flag. values and their meanings are -- -c - 0 no errors detected -c - n+k null row in a -- row = k -c - 2n+k duplicate entry in a -- row = k -c - 3n+k insufficient storage in nsfc -- row = k -c - 4n+1 insufficient storage in nnfc -c - 5n+k null pivot -- row = k -c - 6n+k insufficient storage in nsfc -- row = k -c - 7n+1 insufficient storage in nnfc -c - 8n+k zero pivot -- row = k -c - 10n+1 insufficient storage in cdrv -c - 11n+1 illegal path specification -c -c working storage is needed for the factored form of the matrix -c m plus various temporary vectors. the arrays isp and rsp should be -c equivalenced. integer storage is allocated from the beginning of -c isp and real storage from the end of rsp. -c -c nv - nsp - declared dimension of rsp. nsp generally must -c - be larger than 8n+2 + 2k (where k = (number of -c - nonzero entries in m)). -c nvira - isp - integer working storage divided up into various arrays -c - needed by the subroutines. isp and rsp should be -c - equivalenced. -c - size = lratio*nsp. -c fvira - rsp - real working storage divided up into various arrays -c - needed by the subroutines. isp and rsp should be -c - equivalenced. -c - size = nsp. -c nr - esp - if sufficient storage was available to perform the -c - symbolic factorization (nsfc), then esp is set to -c - the amount of excess storage provided (negative if -c - insufficient storage was available to perform the -c - numeric factorization (nnfc)). -c -c -c conversion to double precision -c -c to convert these routines for double precision arrays.. -c (1) use the double precision declarations in place of the real -c declarations in each subprogram, as given in comment cards. -c (2) change the data-loaded value of the integer lratio -c in subroutine cdrv, as indicated below. -c (3) change e0 to d0 in the constants in statement number 10 -c in subroutine nnfc and the line following that. -c - integer r(1), c(1), ic(1), ia(1), ja(1), isp(1), esp, path, - * flag, d, u, q, row, tmp, ar, umax - double precision a(1), b(1), z(1), rsp(1) -c -c set lratio equal to the ratio between the length of floating point -c and integer array data. e. g., lratio = 1 for (real, integer), -c lratio = 2 for (double precision, integer) -c - data lratio/2/ -c - if (path.lt.1 .or. 5.lt.path) go to 111 -c******initialize and divide up temporary storage ******************* - il = 1 - ijl = il + (n+1) - iu = ijl + n - iju = iu + (n+1) - irl = iju + n - jrl = irl + n - jl = jrl + n -c -c ****** reorder a if necessary, call nsfc if flag is set *********** - if ((path-1) * (path-5) .ne. 0) go to 5 - max = (lratio*nsp + 1 - jl) - (n+1) - 5*n - jlmax = max/2 - q = jl + jlmax - ira = q + (n+1) - jra = ira + n - irac = jra + n - iru = irac + n - jru = iru + n - jutmp = jru + n - jumax = lratio*nsp + 1 - jutmp - esp = max/lratio - if (jlmax.le.0 .or. jumax.le.0) go to 110 -c - do 1 i=1,n - if (c(i).ne.i) go to 2 - 1 continue - go to 3 - 2 ar = nsp + 1 - n - call nroc - * (n, ic, ia,ja,a, isp(il), rsp(ar), isp(iu), flag) - if (flag.ne.0) go to 100 -c - 3 call nsfc - * (n, r, ic, ia,ja, - * jlmax, isp(il), isp(jl), isp(ijl), - * jumax, isp(iu), isp(jutmp), isp(iju), - * isp(q), isp(ira), isp(jra), isp(irac), - * isp(irl), isp(jrl), isp(iru), isp(jru), flag) - if(flag .ne. 0) go to 100 -c ****** move ju next to jl ***************************************** - jlmax = isp(ijl+n-1) - ju = jl + jlmax - jumax = isp(iju+n-1) - if (jumax.le.0) go to 5 - do 4 j=1,jumax - 4 isp(ju+j-1) = isp(jutmp+j-1) -c -c ****** call remaining subroutines ********************************* - 5 jlmax = isp(ijl+n-1) - ju = jl + jlmax - jumax = isp(iju+n-1) - l = (ju + jumax - 2 + lratio) / lratio + 1 - lmax = isp(il+n) - 1 - d = l + lmax - u = d + n - row = nsp + 1 - n - tmp = row - n - umax = tmp - u - esp = umax - (isp(iu+n) - 1) -c - if ((path-1) * (path-2) .ne. 0) go to 6 - if (umax.lt.0) go to 110 - call nnfc - * (n, r, c, ic, ia, ja, a, z, b, - * lmax, isp(il), isp(jl), isp(ijl), rsp(l), rsp(d), - * umax, isp(iu), isp(ju), isp(iju), rsp(u), - * rsp(row), rsp(tmp), isp(irl), isp(jrl), flag) - if(flag .ne. 0) go to 100 -c - 6 if ((path-3) .ne. 0) go to 7 - call nnsc - * (n, r, c, isp(il), isp(jl), isp(ijl), rsp(l), - * rsp(d), isp(iu), isp(ju), isp(iju), rsp(u), - * z, b, rsp(tmp)) -c - 7 if ((path-4) .ne. 0) go to 8 - call nntc - * (n, r, c, isp(il), isp(jl), isp(ijl), rsp(l), - * rsp(d), isp(iu), isp(ju), isp(iju), rsp(u), - * z, b, rsp(tmp)) - 8 return -c -c ** error.. error detected in nroc, nsfc, nnfc, or nnsc - 100 return -c ** error.. insufficient storage - 110 flag = 10*n + 1 - return -c ** error.. illegal path specification - 111 flag = 11*n + 1 - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/cfode.f b/scipy-0.10.1/scipy/integrate/odepack/cfode.f deleted file mode 100644 index 3becd61724..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/cfode.f +++ /dev/null @@ -1,112 +0,0 @@ - subroutine cfode (meth, elco, tesco) -clll. optimize - integer meth - integer i, ib, nq, nqm1, nqp1 - double precision elco, tesco - double precision agamq, fnq, fnqm1, pc, pint, ragq, - 1 rqfac, rq1fac, tsign, xpin - dimension elco(13,12), tesco(3,12) -c----------------------------------------------------------------------- -c cfode is called by the integrator routine to set coefficients -c needed there. the coefficients for the current method, as -c given by the value of meth, are set for all orders and saved. -c the maximum order assumed here is 12 if meth = 1 and 5 if meth = 2. -c (a smaller value of the maximum order is also allowed.) -c cfode is called once at the beginning of the problem, -c and is not called again unless and until meth is changed. -c -c the elco array contains the basic method coefficients. -c the coefficients el(i), 1 .le. i .le. nq+1, for the method of -c order nq are stored in elco(i,nq). they are given by a genetrating -c polynomial, i.e., -c l(x) = el(1) + el(2)*x + ... + el(nq+1)*x**nq. -c for the implicit adams methods, l(x) is given by -c dl/dx = (x+1)*(x+2)*...*(x+nq-1)/factorial(nq-1), l(-1) = 0. -c for the bdf methods, l(x) is given by -c l(x) = (x+1)*(x+2)* ... *(x+nq)/k, -c where k = factorial(nq)*(1 + 1/2 + ... + 1/nq). -c -c the tesco array contains test constants used for the -c local error test and the selection of step size and/or order. -c at order nq, tesco(k,nq) is used for the selection of step -c size at order nq - 1 if k = 1, at order nq if k = 2, and at order -c nq + 1 if k = 3. -c----------------------------------------------------------------------- - dimension pc(12) -c - go to (100, 200), meth -c - 100 elco(1,1) = 1.0d0 - elco(2,1) = 1.0d0 - tesco(1,1) = 0.0d0 - tesco(2,1) = 2.0d0 - tesco(1,2) = 1.0d0 - tesco(3,12) = 0.0d0 - pc(1) = 1.0d0 - rqfac = 1.0d0 - do 140 nq = 2,12 -c----------------------------------------------------------------------- -c the pc array will contain the coefficients of the polynomial -c p(x) = (x+1)*(x+2)*...*(x+nq-1). -c initially, p(x) = 1. -c----------------------------------------------------------------------- - rq1fac = rqfac - rqfac = rqfac/dfloat(nq) - nqm1 = nq - 1 - fnqm1 = dfloat(nqm1) - nqp1 = nq + 1 -c form coefficients of p(x)*(x+nq-1). ---------------------------------- - pc(nq) = 0.0d0 - do 110 ib = 1,nqm1 - i = nqp1 - ib - 110 pc(i) = pc(i-1) + fnqm1*pc(i) - pc(1) = fnqm1*pc(1) -c compute integral, -1 to 0, of p(x) and x*p(x). ----------------------- - pint = pc(1) - xpin = pc(1)/2.0d0 - tsign = 1.0d0 - do 120 i = 2,nq - tsign = -tsign - pint = pint + tsign*pc(i)/dfloat(i) - 120 xpin = xpin + tsign*pc(i)/dfloat(i+1) -c store coefficients in elco and tesco. -------------------------------- - elco(1,nq) = pint*rq1fac - elco(2,nq) = 1.0d0 - do 130 i = 2,nq - 130 elco(i+1,nq) = rq1fac*pc(i)/dfloat(i) - agamq = rqfac*xpin - ragq = 1.0d0/agamq - tesco(2,nq) = ragq - if (nq .lt. 12) tesco(1,nqp1) = ragq*rqfac/dfloat(nqp1) - tesco(3,nqm1) = ragq - 140 continue - return -c - 200 pc(1) = 1.0d0 - rq1fac = 1.0d0 - do 230 nq = 1,5 -c----------------------------------------------------------------------- -c the pc array will contain the coefficients of the polynomial -c p(x) = (x+1)*(x+2)*...*(x+nq). -c initially, p(x) = 1. -c----------------------------------------------------------------------- - fnq = dfloat(nq) - nqp1 = nq + 1 -c form coefficients of p(x)*(x+nq). ------------------------------------ - pc(nqp1) = 0.0d0 - do 210 ib = 1,nq - i = nq + 2 - ib - 210 pc(i) = pc(i-1) + fnq*pc(i) - pc(1) = fnq*pc(1) -c store coefficients in elco and tesco. -------------------------------- - do 220 i = 1,nqp1 - 220 elco(i,nq) = pc(i)/pc(2) - elco(2,nq) = 1.0d0 - tesco(1,nq) = rq1fac - tesco(2,nq) = dfloat(nqp1)/elco(1,nq) - tesco(3,nq) = dfloat(nq+2)/elco(1,nq) - rq1fac = rq1fac/fnq - 230 continue - return -c----------------------- end of subroutine cfode ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/cntnzu.f b/scipy-0.10.1/scipy/integrate/odepack/cntnzu.f deleted file mode 100644 index b672324e85..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/cntnzu.f +++ /dev/null @@ -1,35 +0,0 @@ - subroutine cntnzu (n, ia, ja, nzsut) - integer n, ia, ja, nzsut - dimension ia(1), ja(1) -c----------------------------------------------------------------------- -c this routine counts the number of nonzero elements in the strict -c upper triangle of the matrix m + m(transpose), where the sparsity -c structure of m is given by pointer arrays ia and ja. -c this is needed to compute the storage requirements for the -c sparse matrix reordering operation in odrv. -c----------------------------------------------------------------------- - integer ii, jj, j, jmin, jmax, k, kmin, kmax, num -c - num = 0 - do 50 ii = 1,n - jmin = ia(ii) - jmax = ia(ii+1) - 1 - if (jmin .gt. jmax) go to 50 - do 40 j = jmin,jmax - if (ja(j).lt.ii) go to 10 - if (ja(j).eq.ii) go to 40 - go to 30 - 10 jj =ja(j) - kmin = ia(jj) - kmax = ia(jj+1) - 1 - if (kmin .gt. kmax) go to 30 - do 20 k = kmin,kmax - if (ja(k) .eq. ii) go to 40 - 20 continue - 30 num = num + 1 - 40 continue - 50 continue - nzsut = num - return -c----------------------- end of subroutine cntnzu ---------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/ddasrt.f b/scipy-0.10.1/scipy/integrate/odepack/ddasrt.f deleted file mode 100644 index c8fa83b54e..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/ddasrt.f +++ /dev/null @@ -1,1999 +0,0 @@ - SUBROUTINE DDASRT (RES,NEQ,T,Y,YPRIME,TOUT, - * INFO,RTOL,ATOL,IDID,RWORK,LRW,IWORK,LIW,RPAR,IPAR,JAC, - * G,NG,JROOT) -C -C***BEGIN PROLOGUE DDASRT -C***DATE WRITTEN 821001 (YYMMDD) -C***REVISION DATE 910624 (YYMMDD) -C***KEYWORDS DIFFERENTIAL/ALGEBRAIC,BACKWARD DIFFERENTIATION FORMULAS -C IMPLICIT DIFFERENTIAL SYSTEMS -C***AUTHOR PETZOLD,LINDA R.,COMPUTING AND MATHEMATICS RESEARCH DIVISION -C LAWRENCE LIVERMORE NATIONAL LABORATORY -C L - 316, P.O. Box 808, -C LIVERMORE, CA. 94550 -C***PURPOSE This code solves a system of differential/algebraic -C equations of the form F(T,Y,YPRIME) = 0. -C***DESCRIPTION -C -C *Usage: -C -C IMPLICIT DOUBLE PRECISION (A-H,O-Z) -C EXTERNAL RES, JAC, G -C INTEGER NEQ, INFO(N), IDID, LRW, LIW, IWORK(LIW), IPAR, NG, -C * JROOT(NG) -C DOUBLE PRECISION T, Y(NEQ), YPRIME(NEQ), TOUT, RTOL, ATOL, -C * RWORK(LRW), RPAR -C -C CALL DDASRT (RES, NEQ, T, Y, YPRIME, TOUT, INFO, RTOL, ATOL, -C * IDID, RWORK, LRW, IWORK, LIW, RPAR, IPAR, JAC) -C -C -C -C *Arguments: -C -C RES:EXT This is a subroutine which you provide to define the -C differential/algebraic system. -C -C NEQ:IN This is the number of equations to be solved. -C -C T:INOUT This is the current value of the independent variable. -C -C Y(*):INOUT This array contains the solution components at T. -C -C YPRIME(*):INOUT This array contains the derivatives of the solution -C components at T. -C -C TOUT:IN This is a point at which a solution is desired. -C -C INFO(N):IN The basic task of the code is to solve the system from T -C to TOUT and return an answer at TOUT. INFO is an integer -C array which is used to communicate exactly how you want -C this task to be carried out. N must be greater than or -C equal to 15. -C -C RTOL,ATOL:INOUT These quantities represent absolute and relative -C error tolerances which you provide to indicate how -C accurately you wish the solution to be computed. -C You may choose them to be both scalars or else -C both vectors. -C -C IDID:OUT This scalar quantity is an indicator reporting what the -C code did. You must monitor this integer variable to decide -C what action to take next. -C -C RWORK:WORK A real work array of length LRW which provides the -C code with needed storage space. -C -C LRW:IN The length of RWORK. -C -C IWORK:WORK An integer work array of length LIW which probides the -C code with needed storage space. -C -C LIW:IN The length of IWORK. -C -C RPAR,IPAR:IN These are real and integer parameter arrays which -C you can use for communication between your calling -C program and the RES subroutine (and the JAC subroutine) -C -C JAC:EXT This is the name of a subroutine which you may choose to -C provide for defining a matrix of partial derivatives -C described below. -C -C G This is the name of the subroutine for defining -C constraint functions, G(T,Y), whose roots are desired -C during the integration. This name must be declared -C external in the calling program. -C -C NG This is the number of constraint functions G(I). -C If there are none, set NG=0, and pass a dummy name -C for G. -C -C JROOT This is an integer array of length NG for output -C of root information. -C -C -C *Description -C -C QUANTITIES WHICH MAY BE ALTERED BY THE CODE ARE -C T,Y(*),YPRIME(*),INFO(1),RTOL,ATOL, -C IDID,RWORK(*) AND IWORK(*). -C -C Subroutine DDASRT uses the backward differentiation formulas of -C orders one through five to solve a system of the above form for Y and -C YPRIME. Values for Y and YPRIME at the initial time must be given as -C input. These values must be consistent, (that is, if T,Y,YPRIME are -C the given initial values, they must satisfy F(T,Y,YPRIME) = 0.). The -C subroutine solves the system from T to TOUT. -C It is easy to continue the solution to get results at additional -C TOUT. This is the interval mode of operation. Intermediate results -C can also be obtained easily by using the intermediate-output -C capability. If DDASRT detects a sign-change in G(T,Y), then -C it will return the intermediate value of T and Y for which -C G(T,Y) = 0. -C -C ---------INPUT-WHAT TO DO ON THE FIRST CALL TO DDASRT--------------- -C -C -C The first call of the code is defined to be the start of each new -C problem. Read through the descriptions of all the following items, -C provide sufficient storage space for designated arrays, set -C appropriate variables for the initialization of the problem, and -C give information about how you want the problem to be solved. -C -C -C RES -- Provide a subroutine of the form -C SUBROUTINE RES(T,Y,YPRIME,DELTA,IRES,RPAR,IPAR) -C to define the system of differential/algebraic -C equations which is to be solved. For the given values -C of T,Y and YPRIME, the subroutine should -C return the residual of the defferential/algebraic -C system -C DELTA = F(T,Y,YPRIME) -C (DELTA(*) is a vector of length NEQ which is -C output for RES.) -C -C Subroutine RES must not alter T,Y or YPRIME. -C You must declare the name RES in an external -C statement in your program that calls DDASRT. -C You must dimension Y,YPRIME and DELTA in RES. -C -C IRES is an integer flag which is always equal to -C zero on input. Subroutine RES should alter IRES -C only if it encounters an illegal value of Y or -C a stop condition. Set IRES = -1 if an input value -C is illegal, and DDASRT will try to solve the problem -C without getting IRES = -1. If IRES = -2, DDASRT -C will return control to the calling program -C with IDID = -11. -C -C RPAR and IPAR are real and integer parameter arrays which -C you can use for communication between your calling program -C and subroutine RES. They are not altered by DDASRT. If you -C do not need RPAR or IPAR, ignore these parameters by treat- -C ing them as dummy arguments. If you do choose to use them, -C dimension them in your calling program and in RES as arrays -C of appropriate length. -C -C NEQ -- Set it to the number of differential equations. -C (NEQ .GE. 1) -C -C T -- Set it to the initial point of the integration. -C T must be defined as a variable. -C -C Y(*) -- Set this vector to the initial values of the NEQ solution -C components at the initial point. You must dimension Y of -C length at least NEQ in your calling program. -C -C YPRIME(*) -- Set this vector to the initial values of -C the NEQ first derivatives of the solution -C components at the initial point. You -C must dimension YPRIME at least NEQ -C in your calling program. If you do not -C know initial values of some of the solution -C components, see the explanation of INFO(11). -C -C TOUT - Set it to the first point at which a solution -C is desired. You can not take TOUT = T. -C integration either forward in T (TOUT .GT. T) or -C backward in T (TOUT .LT. T) is permitted. -C -C The code advances the solution from T to TOUT using -C step sizes which are automatically selected so as to -C achieve the desired accuracy. If you wish, the code will -C return with the solution and its derivative at -C intermediate steps (intermediate-output mode) so that -C you can monitor them, but you still must provide TOUT in -C accord with the basic aim of the code. -C -C the first step taken by the code is a critical one -C because it must reflect how fast the solution changes near -C the initial point. The code automatically selects an -C initial step size which is practically always suitable for -C the problem. By using the fact that the code will not step -C past TOUT in the first step, you could, if necessary, -C restrict the length of the initial step size. -C -C For some problems it may not be permissable to integrate -C past a point TSTOP because a discontinuity occurs there -C or the solution or its derivative is not defined beyond -C TSTOP. When you have declared a TSTOP point (SEE INFO(4) -C and RWORK(1)), you have told the code not to integrate -C past TSTOP. In this case any TOUT beyond TSTOP is invalid -C input. -C -C INFO(*) - Use the INFO array to give the code more details about -C how you want your problem solved. This array should be -C dimensioned of length 15, though DDASRT uses -C only the first eleven entries. You must respond to all of -C the following items which are arranged as questions. The -C simplest use of the code corresponds to answering all -C questions as yes, i.e. setting all entries of INFO to 0. -C -C INFO(1) - This parameter enables the code to initialize -C itself. You must set it to indicate the start of every -C new problem. -C -C **** Is this the first call for this problem ... -C Yes - Set INFO(1) = 0 -C No - Not applicable here. -C See below for continuation calls. **** -C -C INFO(2) - How much accuracy you want of your solution -C is specified by the error tolerances RTOL and ATOL. -C The simplest use is to take them both to be scalars. -C To obtain more flexibility, they can both be vectors. -C The code must be told your choice. -C -C **** Are both error tolerances RTOL, ATOL scalars ... -C Yes - Set INFO(2) = 0 -C and input scalars for both RTOL and ATOL -C No - Set INFO(2) = 1 -C and input arrays for both RTOL and ATOL **** -C -C INFO(3) - The code integrates from T in the direction -C of TOUT by steps. If you wish, it will return the -C computed solution and derivative at the next -C intermediate step (the intermediate-output mode) or -C TOUT, whichever comes first. This is a good way to -C proceed if you want to see the behavior of the solution. -C If you must have solutions at a great many specific -C TOUT points, this code will compute them efficiently. -C -C **** Do you want the solution only at -C TOUT (and not at the next intermediate step) ... -C Yes - Set INFO(3) = 0 -C No - Set INFO(3) = 1 **** -C -C INFO(4) - To handle solutions at a great many specific -C values TOUT efficiently, this code may integrate past -C TOUT and interpolate to obtain the result at TOUT. -C Sometimes it is not possible to integrate beyond some -C point TSTOP because the equation changes there or it is -C not defined past TSTOP. Then you must tell the code -C not to go past. -C -C **** Can the integration be carried out without any -C restrictions on the independent variable T ... -C Yes - Set INFO(4)=0 -C No - Set INFO(4)=1 -C and define the stopping point TSTOP by -C setting RWORK(1)=TSTOP **** -C -C INFO(5) - To solve differential/algebraic problems it is -C necessary to use a matrix of partial derivatives of the -C system of differential equations. If you do not -C provide a subroutine to evaluate it analytically (see -C description of the item JAC in the call list), it will -C be approximated by numerical differencing in this code. -C although it is less trouble for you to have the code -C compute partial derivatives by numerical differencing, -C the solution will be more reliable if you provide the -C derivatives via JAC. Sometimes numerical differencing -C is cheaper than evaluating derivatives in JAC and -C sometimes it is not - this depends on your problem. -C -C **** Do you want the code to evaluate the partial -C derivatives automatically by numerical differences ... -C Yes - Set INFO(5)=0 -C No - Set INFO(5)=1 -C and provide subroutine JAC for evaluating the -C matrix of partial derivatives **** -C -C INFO(6) - DDASRT will perform much better if the matrix of -C partial derivatives, DG/DY + CJ*DG/DYPRIME, -C (here CJ is a scalar determined by DDASRT) -C is banded and the code is told this. In this -C case, the storage needed will be greatly reduced, -C numerical differencing will be performed much cheaper, -C and a number of important algorithms will execute much -C faster. The differential equation is said to have -C half-bandwidths ML (lower) and MU (upper) if equation i -C involves only unknowns Y(J) with -C I-ML .LE. J .LE. I+MU -C for all I=1,2,...,NEQ. Thus, ML and MU are the widths -C of the lower and upper parts of the band, respectively, -C with the main diagonal being excluded. If you do not -C indicate that the equation has a banded matrix of partial -C derivatives, the code works with a full matrix of NEQ**2 -C elements (stored in the conventional way). Computations -C with banded matrices cost less time and storage than with -C full matrices if 2*ML+MU .LT. NEQ. If you tell the -C code that the matrix of partial derivatives has a banded -C structure and you want to provide subroutine JAC to -C compute the partial derivatives, then you must be careful -C to store the elements of the matrix in the special form -C indicated in the description of JAC. -C -C **** Do you want to solve the problem using a full -C (dense) matrix (and not a special banded -C structure) ... -C Yes - Set INFO(6)=0 -C No - Set INFO(6)=1 -C and provide the lower (ML) and upper (MU) -C bandwidths by setting -C IWORK(1)=ML -C IWORK(2)=MU **** -C -C -C INFO(7) -- You can specify a maximum (absolute value of) -C stepsize, so that the code -C will avoid passing over very -C large regions. -C -C **** Do you want the code to decide -C on its own maximum stepsize? -C Yes - Set INFO(7)=0 -C No - Set INFO(7)=1 -C and define HMAX by setting -C RWORK(2)=HMAX **** -C -C INFO(8) -- Differential/algebraic problems -C may occaisionally suffer from -C severe scaling difficulties on the -C first step. If you know a great deal -C about the scaling of your problem, you can -C help to alleviate this problem by -C specifying an initial stepsize H0. -C -C **** Do you want the code to define -C its own initial stepsize? -C Yes - Set INFO(8)=0 -C No - Set INFO(8)=1 -C and define H0 by setting -C RWORK(3)=H0 **** -C -C INFO(9) -- If storage is a severe problem, -C you can save some locations by -C restricting the maximum order MAXORD. -C the default value is 5. for each -C order decrease below 5, the code -C requires NEQ fewer locations, however -C it is likely to be slower. In any -C case, you must have 1 .LE. MAXORD .LE. 5 -C **** Do you want the maximum order to -C default to 5? -C Yes - Set INFO(9)=0 -C No - Set INFO(9)=1 -C and define MAXORD by setting -C IWORK(3)=MAXORD **** -C -C INFO(10) --If you know that the solutions to your equations -C will always be nonnegative, it may help to set this -C parameter. However, it is probably best to -C try the code without using this option first, -C and only to use this option if that doesn't -C work very well. -C **** Do you want the code to solve the problem without -C invoking any special nonnegativity constraints? -C Yes - Set INFO(10)=0 -C No - Set INFO(10)=1 -C -C INFO(11) --DDASRT normally requires the initial T, -C Y, and YPRIME to be consistent. That is, -C you must have F(T,Y,YPRIME) = 0 at the initial -C time. If you do not know the initial -C derivative precisely, you can let DDASRT try -C to compute it. -C **** Are the initial T, Y, YPRIME consistent? -C Yes - Set INFO(11) = 0 -C No - Set INFO(11) = 1, -C and set YPRIME to an initial approximation -C to YPRIME. (If you have no idea what -C YPRIME should be, set it to zero. Note -C that the initial Y should be such -C that there must exist a YPRIME so that -C F(T,Y,YPRIME) = 0.) -C -C RTOL, ATOL -- You must assign relative (RTOL) and absolute (ATOL -C error tolerances to tell the code how accurately you -C want the solution to be computed. They must be defined -C as variables because the code may change them. You -C have two choices -- -C Both RTOL and ATOL are scalars. (INFO(2)=0) -C Both RTOL and ATOL are vectors. (INFO(2)=1) -C in either case all components must be non-negative. -C -C The tolerances are used by the code in a local error -C test at each step which requires roughly that -C ABS(LOCAL ERROR) .LE. RTOL*ABS(Y)+ATOL -C for each vector component. -C (More specifically, a root-mean-square norm is used to -C measure the size of vectors, and the error test uses the -C magnitude of the solution at the beginning of the step.) -C -C The true (global) error is the difference between the -C true solution of the initial value problem and the -C computed approximation. Practically all present day -C codes, including this one, control the local error at -C each step and do not even attempt to control the global -C error directly. -C Usually, but not always, the true accuracy of the -C computed Y is comparable to the error tolerances. This -C code will usually, but not always, deliver a more -C accurate solution if you reduce the tolerances and -C integrate again. By comparing two such solutions you -C can get a fairly reliable idea of the true error in the -C solution at the bigger tolerances. -C -C Setting ATOL=0. results in a pure relative error test on -C that component. Setting RTOL=0. results in a pure -C absolute error test on that component. A mixed test -C with non-zero RTOL and ATOL corresponds roughly to a -C relative error test when the solution component is much -C bigger than ATOL and to an absolute error test when the -C solution component is smaller than the threshhold ATOL. -C -C The code will not attempt to compute a solution at an -C accuracy unreasonable for the machine being used. It -C will advise you if you ask for too much accuracy and -C inform you as to the maximum accuracy it believes -C possible. -C -C RWORK(*) -- Dimension this real work array of length LRW in your -C calling program. -C -C LRW -- Set it to the declared length of the RWORK array. -C You must have -C LRW .GE. 50+(MAXORD+4)*NEQ+NEQ**2 -C for the full (dense) JACOBIAN case (when INFO(6)=0), or -C LRW .GE. 50+(MAXORD+4)*NEQ+(2*ML+MU+1)*NEQ -C for the banded user-defined JACOBIAN case -C (when INFO(5)=1 and INFO(6)=1), or -C LRW .GE. 50+(MAXORD+4)*NEQ+(2*ML+MU+1)*NEQ -C +2*(NEQ/(ML+MU+1)+1) -C for the banded finite-difference-generated JACOBIAN case -C (when INFO(5)=0 and INFO(6)=1) -C -C IWORK(*) -- Dimension this integer work array of length LIW in -C your calling program. -C -C LIW -- Set it to the declared length of the IWORK array. -C you must have LIW .GE. 20+NEQ -C -C RPAR, IPAR -- These are parameter arrays, of real and integer -C type, respectively. You can use them for communication -C between your program that calls DDASRT and the -C RES subroutine (and the JAC subroutine). They are not -C altered by DDASRT. If you do not need RPAR or IPAR, -C ignore these parameters by treating them as dummy -C arguments. If you do choose to use them, dimension -C them in your calling program and in RES (and in JAC) -C as arrays of appropriate length. -C -C JAC -- If you have set INFO(5)=0, you can ignore this parameter -C by treating it as a dummy argument. Otherwise, you must -C provide a subroutine of the form -C JAC(T,Y,YPRIME,PD,CJ,RPAR,IPAR) -C to define the matrix of partial derivatives -C PD=DG/DY+CJ*DG/DYPRIME -C CJ is a scalar which is input to JAC. -C For the given values of T,Y,YPRIME, the -C subroutine must evaluate the non-zero partial -C derivatives for each equation and each solution -C component, and store these values in the -C matrix PD. The elements of PD are set to zero -C before each call to JAC so only non-zero elements -C need to be defined. -C -C Subroutine JAC must not alter T,Y,(*),YPRIME(*), or CJ. -C You must declare the name JAC in an -C EXTERNAL STATEMENT in your program that calls -C DDASRT. You must dimension Y, YPRIME and PD -C in JAC. -C -C The way you must store the elements into the PD matrix -C depends on the structure of the matrix which you -C indicated by INFO(6). -C *** INFO(6)=0 -- Full (dense) matrix *** -C Give PD a first dimension of NEQ. -C When you evaluate the (non-zero) partial derivative -C of equation I with respect to variable J, you must -C store it in PD according to -C PD(I,J) = * DF(I)/DY(J)+CJ*DF(I)/DYPRIME(J)* -C *** INFO(6)=1 -- Banded JACOBIAN with ML lower and MU -C upper diagonal bands (refer to INFO(6) description -C of ML and MU) *** -C Give PD a first dimension of 2*ML+MU+1. -C when you evaluate the (non-zero) partial derivative -C of equation I with respect to variable J, you must -C store it in PD according to -C IROW = I - J + ML + MU + 1 -C PD(IROW,J) = *DF(I)/DY(J)+CJ*DF(I)/DYPRIME(J)* -C RPAR and IPAR are real and integer parameter arrays -C which you can use for communication between your calling -C program and your JACOBIAN subroutine JAC. They are not -C altered by DDASRT. If you do not need RPAR or IPAR, -C ignore these parameters by treating them as dummy -C arguments. If you do choose to use them, dimension -C them in your calling program and in JAC as arrays of -C appropriate length. -C -C G -- This is the name of the subroutine for defining constraint -C functions, whose roots are desired during the -C integration. It is to have the form -C SUBROUTINE G(NEQ,T,Y,NG,GOUT,RPAR,IPAR) -C DIMENSION Y(NEQ),GOUT(NG), -C where NEQ, T, Y and NG are INPUT, and the array GOUT is -C output. NEQ, T, and Y have the same meaning as in the -C RES routine, and GOUT is an array of length NG. -C For I=1,...,NG, this routine is to load into GOUT(I) -C the value at (T,Y) of the I-th constraint function G(I). -C DDASRT will find roots of the G(I) of odd multiplicity -C (that is, sign changes) as they occur during -C the integration. G must be declared EXTERNAL in the -C calling program. -C -C CAUTION..because of numerical errors in the functions -C G(I) due to roundoff and integration error, DDASRT -C may return false roots, or return the same root at two -C or more nearly equal values of T. If such false roots -C are suspected, the user should consider smaller error -C tolerances and/or higher precision in the evaluation of -C the G(I). -C -C If a root of some G(I) defines the end of the problem, -C the input to DDASRT should nevertheless allow -C integration to a point slightly past that ROOT, so -C that DDASRT can locate the root by interpolation. -C -C NG -- The number of constraint functions G(I). If there are none, -C set NG = 0, and pass a dummy name for G. -C -C JROOT -- This is an integer array of length NG. It is used only for -C output. On a return where one or more roots have been -C found, JROOT(I)=1 If G(I) has a root at T, -C or JROOT(I)=0 if not. -C -C -C -C OPTIONALLY REPLACEABLE NORM ROUTINE: -C DDASRT uses a weighted norm DDANRM to measure the size -C of vectors such as the estimated error in each step. -C A FUNCTION subprogram -C DOUBLE PRECISION FUNCTION DDANRM(NEQ,V,WT,RPAR,IPAR) -C DIMENSION V(NEQ),WT(NEQ) -C is used to define this norm. Here, V is the vector -C whose norm is to be computed, and WT is a vector of -C weights. A DDANRM routine has been included with DDASRT -C which computes the weighted root-mean-square norm -C given by -C DDANRM=SQRT((1/NEQ)*SUM(V(I)/WT(I))**2) -C this norm is suitable for most problems. In some -C special cases, it may be more convenient and/or -C efficient to define your own norm by writing a function -C subprogram to be called instead of DDANRM. This should -C ,however, be attempted only after careful thought and -C consideration. -C -C -C------OUTPUT-AFTER ANY RETURN FROM DDASRT---- -C -C The principal aim of the code is to return a computed solution at -C TOUT, although it is also possible to obtain intermediate results -C along the way. To find out whether the code achieved its goal -C or if the integration process was interrupted before the task was -C completed, you must check the IDID parameter. -C -C -C T -- The solution was successfully advanced to the -C output value of T. -C -C Y(*) -- Contains the computed solution approximation at T. -C -C YPRIME(*) -- Contains the computed derivative -C approximation at T. -C -C IDID -- Reports what the code did. -C -C *** Task completed *** -C Reported by positive values of IDID -C -C IDID = 1 -- A step was successfully taken in the -C intermediate-output mode. The code has not -C yet reached TOUT. -C -C IDID = 2 -- The integration to TSTOP was successfully -C completed (T=TSTOP) by stepping exactly to TSTOP. -C -C IDID = 3 -- The integration to TOUT was successfully -C completed (T=TOUT) by stepping past TOUT. -C Y(*) is obtained by interpolation. -C YPRIME(*) is obtained by interpolation. -C -C IDID = 4 -- The integration was successfully completed -C by finding one or more roots of G at T. -C -C *** Task interrupted *** -C Reported by negative values of IDID -C -C IDID = -1 -- A large amount of work has been expended. -C (About 500 steps) -C -C IDID = -2 -- The error tolerances are too stringent. -C -C IDID = -3 -- The local error test cannot be satisfied -C because you specified a zero component in ATOL -C and the corresponding computed solution -C component is zero. Thus, a pure relative error -C test is impossible for this component. -C -C IDID = -6 -- DDASRT had repeated error test -C failures on the last attempted step. -C -C IDID = -7 -- The corrector could not converge. -C -C IDID = -8 -- The matrix of partial derivatives -C is singular. -C -C IDID = -9 -- The corrector could not converge. -C there were repeated error test failures -C in this step. -C -C IDID =-10 -- The corrector could not converge -C because IRES was equal to minus one. -C -C IDID =-11 -- IRES equal to -2 was encountered -C and control is being returned to the -C calling program. -C -C IDID =-12 -- DDASRT failed to compute the initial -C YPRIME. -C -C -C -C IDID = -13,..,-32 -- Not applicable for this code -C -C *** Task terminated *** -C Reported by the value of IDID=-33 -C -C IDID = -33 -- The code has encountered trouble from which -C it cannot recover. A message is printed -C explaining the trouble and control is returned -C to the calling program. For example, this occurs -C when invalid input is detected. -C -C RTOL, ATOL -- These quantities remain unchanged except when -C IDID = -2. In this case, the error tolerances have been -C increased by the code to values which are estimated to -C be appropriate for continuing the integration. However, -C the reported solution at T was obtained using the input -C values of RTOL and ATOL. -C -C RWORK, IWORK -- Contain information which is usually of no -C interest to the user but necessary for subsequent calls. -C However, you may find use for -C -C RWORK(3)--Which contains the step size H to be -C attempted on the next step. -C -C RWORK(4)--Which contains the current value of the -C independent variable, i.e., the farthest point -C integration has reached. This will be different -C from T only when interpolation has been -C performed (IDID=3). -C -C RWORK(7)--Which contains the stepsize used -C on the last successful step. -C -C IWORK(7)--Which contains the order of the method to -C be attempted on the next step. -C -C IWORK(8)--Which contains the order of the method used -C on the last step. -C -C IWORK(11)--Which contains the number of steps taken so -C far. -C -C IWORK(12)--Which contains the number of calls to RES -C so far. -C -C IWORK(13)--Which contains the number of evaluations of -C the matrix of partial derivatives needed so -C far. -C -C IWORK(14)--Which contains the total number -C of error test failures so far. -C -C IWORK(15)--Which contains the total number -C of convergence test failures so far. -C (includes singular iteration matrix -C failures.) -C -C IWORK(16)--Which contains the total number of calls -C to the constraint function g so far -C -C -C -C INPUT -- What to do to continue the integration -C (calls after the first) ** -C -C This code is organized so that subsequent calls to continue the -C integration involve little (if any) additional effort on your -C part. You must monitor the IDID parameter in order to determine -C what to do next. -C -C Recalling that the principal task of the code is to integrate -C from T to TOUT (the interval mode), usually all you will need -C to do is specify a new TOUT upon reaching the current TOUT. -C -C Do not alter any quantity not specifically permitted below, -C in particular do not alter NEQ,T,Y(*),YPRIME(*),RWORK(*),IWORK(*) -C or the differential equation in subroutine RES. Any such -C alteration constitutes a new problem and must be treated as such, -C i.e., you must start afresh. -C -C You cannot change from vector to scalar error control or vice -C versa (INFO(2)), but you can change the size of the entries of -C RTOL, ATOL. Increasing a tolerance makes the equation easier -C to integrate. Decreasing a tolerance will make the equation -C harder to integrate and should generally be avoided. -C -C You can switch from the intermediate-output mode to the -C interval mode (INFO(3)) or vice versa at any time. -C -C If it has been necessary to prevent the integration from going -C past a point TSTOP (INFO(4), RWORK(1)), keep in mind that the -C code will not integrate to any TOUT beyond the currently -C specified TSTOP. Once TSTOP has been reached you must change -C the value of TSTOP or set INFO(4)=0. You may change INFO(4) -C or TSTOP at any time but you must supply the value of TSTOP in -C RWORK(1) whenever you set INFO(4)=1. -C -C Do not change INFO(5), INFO(6), IWORK(1), or IWORK(2) -C unless you are going to restart the code. -C -C *** Following a completed task *** -C If -C IDID = 1, call the code again to continue the integration -C another step in the direction of TOUT. -C -C IDID = 2 or 3, define a new TOUT and call the code again. -C TOUT must be different from T. You cannot change -C the direction of integration without restarting. -C -C IDID = 4, call the code again to continue the integration -C another step in the direction of TOUT. You may -C change the functions in G after a return with IDID=4, -C but the number of constraint functions NG must remain -C the same. If you wish to change -C the functions in RES or in G, then you -C must restart the code. -C -C *** Following an interrupted task *** -C To show the code that you realize the task was -C interrupted and that you want to continue, you -C must take appropriate action and set INFO(1) = 1 -C If -C IDID = -1, The code has taken about 500 steps. -C If you want to continue, set INFO(1) = 1 and -C call the code again. An additional 500 steps -C will be allowed. -C -C IDID = -2, The error tolerances RTOL, ATOL have been -C increased to values the code estimates appropriate -C for continuing. You may want to change them -C yourself. If you are sure you want to continue -C with relaxed error tolerances, set INFO(1)=1 and -C call the code again. -C -C IDID = -3, A solution component is zero and you set the -C corresponding component of ATOL to zero. If you -C are sure you want to continue, you must first -C alter the error criterion to use positive values -C for those components of ATOL corresponding to zero -C solution components, then set INFO(1)=1 and call -C the code again. -C -C IDID = -4,-5 --- Cannot occur with this code. -C -C IDID = -6, Repeated error test failures occurred on the -C last attempted step in DDASRT. A singularity in the -C solution may be present. If you are absolutely -C certain you want to continue, you should restart -C the integration. (Provide initial values of Y and -C YPRIME which are consistent) -C -C IDID = -7, Repeated convergence test failures occurred -C on the last attempted step in DDASRT. An inaccurate -C or ill-conditioned JACOBIAN may be the problem. If -C you are absolutely certain you want to continue, you -C should restart the integration. -C -C IDID = -8, The matrix of partial derivatives is singular. -C Some of your equations may be redundant. -C DDASRT cannot solve the problem as stated. -C It is possible that the redundant equations -C could be removed, and then DDASRT could -C solve the problem. It is also possible -C that a solution to your problem either -C does not exist or is not unique. -C -C IDID = -9, DDASRT had multiple convergence test -C failures, preceeded by multiple error -C test failures, on the last attempted step. -C It is possible that your problem -C is ill-posed, and cannot be solved -C using this code. Or, there may be a -C discontinuity or a singularity in the -C solution. If you are absolutely certain -C you want to continue, you should restart -C the integration. -C -C IDID =-10, DDASRT had multiple convergence test failures -C because IRES was equal to minus one. -C If you are absolutely certain you want -C to continue, you should restart the -C integration. -C -C IDID =-11, IRES=-2 was encountered, and control is being -C returned to the calling program. -C -C IDID =-12, DDASRT failed to compute the initial YPRIME. -C This could happen because the initial -C approximation to YPRIME was not very good, or -C if a YPRIME consistent with the initial Y -C does not exist. The problem could also be caused -C by an inaccurate or singular iteration matrix. -C -C -C -C IDID = -13,..,-32 --- Cannot occur with this code. -C -C *** Following a terminated task *** -C If IDID= -33, you cannot continue the solution of this -C problem. An attempt to do so will result in your -C run being terminated. -C -C --------------------------------------------------------------------- -C -C***REFERENCE -C K. E. Brenan, S. L. Campbell, and L. R. Petzold, Numerical -C Solution of Initial-Value Problems in Differential-Algebraic -C Equations, Elsevier, New York, 1989. -C -C***ROUTINES CALLED DDASTP,DDAINI,DDANRM,DDAWTS,DDATRP,DRCHEK,DROOTS, -C XERRWV,D1MACH -C***END PROLOGUE DDASRT -C -C**End -C - IMPLICIT DOUBLE PRECISION(A-H,O-Z) - LOGICAL DONE - EXTERNAL RES, JAC, G - DIMENSION Y(*),YPRIME(*) - DIMENSION INFO(15) - DIMENSION RWORK(*),IWORK(*) - DIMENSION RTOL(*),ATOL(*) - DIMENSION RPAR(*),IPAR(*) - CHARACTER MSG*80 -C -C SET POINTERS INTO IWORK - PARAMETER (LML=1, LMU=2, LMXORD=3, LMTYPE=4, LNST=11, - * LNRE=12, LNJE=13, LETF=14, LCTF=15, LNGE=16, LNPD=17, - * LIRFND=18, LIPVT=21, LJCALC=5, LPHASE=6, LK=7, LKOLD=8, - * LNS=9, LNSTL=10, LIWM=1) -C -C SET RELATIVE OFFSET INTO RWORK - PARAMETER (NPD=1) -C -C SET POINTERS INTO RWORK - PARAMETER (LTSTOP=1, LHMAX=2, LH=3, LTN=4, - * LCJ=5, LCJOLD=6, LHOLD=7, LS=8, LROUND=9, - * LALPHA=11, LBETA=17, LGAMMA=23, - * LPSI=29, LSIGMA=35, LT0=41, LTLAST=42, LALPHR=43, LX2=44, - * LDELTA=51) -C -C***FIRST EXECUTABLE STATEMENT DDASRT - IF(INFO(1).NE.0)GO TO 100 -C -C----------------------------------------------------------------------- -C THIS BLOCK IS EXECUTED FOR THE INITIAL CALL ONLY. -C IT CONTAINS CHECKING OF INPUTS AND INITIALIZATIONS. -C----------------------------------------------------------------------- -C -C FIRST CHECK INFO ARRAY TO MAKE SURE ALL ELEMENTS OF INFO -C ARE EITHER ZERO OR ONE. - DO 10 I=2,11 - IF(INFO(I).NE.0.AND.INFO(I).NE.1)GO TO 701 -10 CONTINUE -C - IF(NEQ.LE.0)GO TO 702 -C -C CHECK AND COMPUTE MAXIMUM ORDER - MXORD=5 - IF(INFO(9).EQ.0)GO TO 20 - MXORD=IWORK(LMXORD) - IF(MXORD.LT.1.OR.MXORD.GT.5)GO TO 703 -20 IWORK(LMXORD)=MXORD -C -C COMPUTE MTYPE,LENPD,LENRW.CHECK ML AND MU. - IF(INFO(6).NE.0)GO TO 40 - LENPD=NEQ**2 - LENRW=50+(IWORK(LMXORD)+4)*NEQ+LENPD - IF(INFO(5).NE.0)GO TO 30 - IWORK(LMTYPE)=2 - GO TO 60 -30 IWORK(LMTYPE)=1 - GO TO 60 -40 IF(IWORK(LML).LT.0.OR.IWORK(LML).GE.NEQ)GO TO 717 - IF(IWORK(LMU).LT.0.OR.IWORK(LMU).GE.NEQ)GO TO 718 - LENPD=(2*IWORK(LML)+IWORK(LMU)+1)*NEQ - IF(INFO(5).NE.0)GO TO 50 - IWORK(LMTYPE)=5 - MBAND=IWORK(LML)+IWORK(LMU)+1 - MSAVE=(NEQ/MBAND)+1 - LENRW=50+(IWORK(LMXORD)+4)*NEQ+LENPD+2*MSAVE - GO TO 60 -50 IWORK(LMTYPE)=4 - LENRW=50+(IWORK(LMXORD)+4)*NEQ+LENPD -C -C CHECK LENGTHS OF RWORK AND IWORK -60 LENIW=20+NEQ - IWORK(LNPD)=LENPD - IF(LRW.LT.LENRW)GO TO 704 - IF(LIW.LT.LENIW)GO TO 705 -C -C CHECK TO SEE THAT TOUT IS DIFFERENT FROM T -C Also check to see that NG is larger than 0. - IF(TOUT .EQ. T)GO TO 719 - IF(NG .LT. 0) GO TO 730 -C -C CHECK HMAX - IF(INFO(7).EQ.0)GO TO 70 - HMAX=RWORK(LHMAX) - IF(HMAX.LE.0.0D0)GO TO 710 -70 CONTINUE -C -C INITIALIZE COUNTERS - IWORK(LNST)=0 - IWORK(LNRE)=0 - IWORK(LNJE)=0 - IWORK(LNGE)=0 -C - IWORK(LNSTL)=0 - IDID=1 - GO TO 200 -C -C----------------------------------------------------------------------- -C THIS BLOCK IS FOR CONTINUATION CALLS -C ONLY. HERE WE CHECK INFO(1),AND IF THE -C LAST STEP WAS INTERRUPTED WE CHECK WHETHER -C APPROPRIATE ACTION WAS TAKEN. -C----------------------------------------------------------------------- -C -100 CONTINUE - IF(INFO(1).EQ.1)GO TO 110 - IF(INFO(1).NE.-1)GO TO 701 -C IF WE ARE HERE, THE LAST STEP WAS INTERRUPTED -C BY AN ERROR CONDITION FROM DDASTP,AND -C APPROPRIATE ACTION WAS NOT TAKEN. THIS -C IS A FATAL ERROR. - MSG = 'DASSL-- THE LAST STEP TERMINATED WITH A NEGATIVE' - CALL XERRWV(MSG,49,201,0,0,0,0,0,0.0D0,0.0D0) - MSG = 'DASSL-- VALUE (=I1) OF IDID AND NO APPROPRIATE' - CALL XERRWV(MSG,47,202,0,1,IDID,0,0,0.0D0,0.0D0) - MSG = 'DASSL-- ACTION WAS TAKEN. RUN TERMINATED' - CALL XERRWV(MSG,41,203,1,0,0,0,0,0.0D0,0.0D0) - RETURN -110 CONTINUE - IWORK(LNSTL)=IWORK(LNST) -C -C----------------------------------------------------------------------- -C THIS BLOCK IS EXECUTED ON ALL CALLS. -C THE ERROR TOLERANCE PARAMETERS ARE -C CHECKED, AND THE WORK ARRAY POINTERS -C ARE SET. -C----------------------------------------------------------------------- -C -200 CONTINUE -C CHECK RTOL,ATOL - NZFLG=0 - RTOLI=RTOL(1) - ATOLI=ATOL(1) - DO 210 I=1,NEQ - IF(INFO(2).EQ.1)RTOLI=RTOL(I) - IF(INFO(2).EQ.1)ATOLI=ATOL(I) - IF(RTOLI.GT.0.0D0.OR.ATOLI.GT.0.0D0)NZFLG=1 - IF(RTOLI.LT.0.0D0)GO TO 706 - IF(ATOLI.LT.0.0D0)GO TO 707 -210 CONTINUE - IF(NZFLG.EQ.0)GO TO 708 -C -C SET UP RWORK STORAGE.IWORK STORAGE IS FIXED -C IN DATA STATEMENT. - LG0=LDELTA+NEQ - LG1=LG0+NG - LGX=LG1+NG - LE=LGX+NG - LWT=LE+NEQ - LPHI=LWT+NEQ - LPD=LPHI+(IWORK(LMXORD)+1)*NEQ - LWM=LPD - NTEMP=NPD+IWORK(LNPD) - IF(INFO(1).EQ.1)GO TO 400 -C -C----------------------------------------------------------------------- -C THIS BLOCK IS EXECUTED ON THE INITIAL CALL -C ONLY. SET THE INITIAL STEP SIZE, AND -C THE ERROR WEIGHT VECTOR, AND PHI. -C COMPUTE INITIAL YPRIME, IF NECESSARY. -C----------------------------------------------------------------------- -C -300 CONTINUE - TN=T - IDID=1 -C -C SET ERROR WEIGHT VECTOR WT - CALL DDAWTS(NEQ,INFO(2),RTOL,ATOL,Y,RWORK(LWT),RPAR,IPAR) - DO 305 I = 1,NEQ - IF(RWORK(LWT+I-1).LE.0.0D0) GO TO 713 -305 CONTINUE -C -C COMPUTE UNIT ROUNDOFF AND HMIN - UROUND = D1MACH(4) - RWORK(LROUND) = UROUND - HMIN = 4.0D0*UROUND*DMAX1(DABS(T),DABS(TOUT)) -C -C CHECK INITIAL INTERVAL TO SEE THAT IT IS LONG ENOUGH - TDIST = DABS(TOUT - T) - IF(TDIST .LT. HMIN) GO TO 714 -C -C CHECK H0, IF THIS WAS INPUT - IF (INFO(8) .EQ. 0) GO TO 310 - HO = RWORK(LH) - IF ((TOUT - T)*HO .LT. 0.0D0) GO TO 711 - IF (HO .EQ. 0.0D0) GO TO 712 - GO TO 320 -310 CONTINUE -C -C COMPUTE INITIAL STEPSIZE, TO BE USED BY EITHER -C DDASTP OR DDAINI, DEPENDING ON INFO(11) - HO = 0.001D0*TDIST - YPNORM = DDANRM(NEQ,YPRIME,RWORK(LWT),RPAR,IPAR) - IF (YPNORM .GT. 0.5D0/HO) HO = 0.5D0/YPNORM - HO = DSIGN(HO,TOUT-T) -C ADJUST HO IF NECESSARY TO MEET HMAX BOUND -320 IF (INFO(7) .EQ. 0) GO TO 330 - RH = DABS(HO)/RWORK(LHMAX) - IF (RH .GT. 1.0D0) HO = HO/RH -C COMPUTE TSTOP, IF APPLICABLE -330 IF (INFO(4) .EQ. 0) GO TO 340 - TSTOP = RWORK(LTSTOP) - IF ((TSTOP - T)*HO .LT. 0.0D0) GO TO 715 - IF ((T + HO - TSTOP)*HO .GT. 0.0D0) HO = TSTOP - T - IF ((TSTOP - TOUT)*HO .LT. 0.0D0) GO TO 709 -C -C COMPUTE INITIAL DERIVATIVE, UPDATING TN AND Y, IF APPLICABLE -340 IF (INFO(11) .EQ. 0) GO TO 350 - CALL DDAINI(TN,Y,YPRIME,NEQ, - * RES,JAC,HO,RWORK(LWT),IDID,RPAR,IPAR, - * RWORK(LPHI),RWORK(LDELTA),RWORK(LE), - * RWORK(LWM),IWORK(LIWM),HMIN,RWORK(LROUND), - * INFO(10),NTEMP) - IF (IDID .LT. 0) GO TO 390 -C -C LOAD H WITH H0. STORE H IN RWORK(LH) -350 H = HO - RWORK(LH) = H -C -C LOAD Y AND H*YPRIME INTO PHI(*,1) AND PHI(*,2) -360 ITEMP = LPHI + NEQ - DO 370 I = 1,NEQ - RWORK(LPHI + I - 1) = Y(I) -370 RWORK(ITEMP + I - 1) = H*YPRIME(I) -C -C INITIALIZE T0 IN RWORK AND CHECK FOR A ZERO OF G NEAR THE -C INITIAL T. -C - RWORK(LT0) = T - IWORK(LIRFND) = 0 - RWORK(LPSI)=H - RWORK(LPSI+1)=2.0D0*H - IWORK(LKOLD)=1 - IF(NG .EQ. 0) GO TO 390 - CALL DRCHEK(1,G,NG,NEQ,T,TOUT,Y,RWORK(LE),RWORK(LPHI), - * RWORK(LPSI),IWORK(LKOLD),RWORK(LG0),RWORK(LG1), - * RWORK(LGX),JROOT,IRT,RWORK(LROUND),INFO(3), - * RWORK,IWORK,RPAR,IPAR) - IF(IRT .NE. 0) GO TO 732 -C -C Check for a root in the interval (T0,TN], unless DDASRT -C did not have to initialize YPRIME. -C - IF(NG .EQ. 0 .OR. INFO(11) .EQ. 0) GO TO 390 - CALL DRCHEK(3,G,NG,NEQ,TN,TOUT,Y,RWORK(LE),RWORK(LPHI), - * RWORK(LPSI),IWORK(LKOLD),RWORK(LG0),RWORK(LG1), - * RWORK(LGX),JROOT,IRT,RWORK(LROUND),INFO(3), - * RWORK,IWORK,RPAR,IPAR) - IF(IRT .NE. 1) GO TO 390 - IWORK(LIRFND) = 1 - IDID = 4 - T = RWORK(LT0) - GO TO 580 -C -390 GO TO 500 -C -C------------------------------------------------------- -C THIS BLOCK IS FOR CONTINUATION CALLS ONLY. ITS -C PURPOSE IS TO CHECK STOP CONDITIONS BEFORE -C TAKING A STEP. -C ADJUST H IF NECESSARY TO MEET HMAX BOUND -C------------------------------------------------------- -C -400 CONTINUE - UROUND=RWORK(LROUND) - DONE = .FALSE. - TN=RWORK(LTN) - H=RWORK(LH) - IF(NG .EQ. 0) GO TO 405 -C -C Check for a zero of G near TN. -C - CALL DRCHEK(2,G,NG,NEQ,TN,TOUT,Y,RWORK(LE),RWORK(LPHI), - * RWORK(LPSI),IWORK(LKOLD),RWORK(LG0),RWORK(LG1), - * RWORK(LGX),JROOT,IRT,RWORK(LROUND),INFO(3), - * RWORK,IWORK,RPAR,IPAR) - IF(IRT .NE. 1) GO TO 405 - IWORK(LIRFND) = 1 - IDID = 4 - T = RWORK(LT0) - DONE = .TRUE. - GO TO 490 -C -405 CONTINUE - IF(INFO(7) .EQ. 0) GO TO 410 - RH = DABS(H)/RWORK(LHMAX) - IF(RH .GT. 1.0D0) H = H/RH -410 CONTINUE - IF(T .EQ. TOUT) GO TO 719 - IF((T - TOUT)*H .GT. 0.0D0) GO TO 711 - IF(INFO(4) .EQ. 1) GO TO 430 - IF(INFO(3) .EQ. 1) GO TO 420 - IF((TN-TOUT)*H.LT.0.0D0)GO TO 490 - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T=TOUT - IDID = 3 - DONE = .TRUE. - GO TO 490 -420 IF((TN-T)*H .LE. 0.0D0) GO TO 490 - IF((TN - TOUT)*H .GT. 0.0D0) GO TO 425 - CALL DDATRP(TN,TN,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T = TN - IDID = 1 - DONE = .TRUE. - GO TO 490 -425 CONTINUE - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T = TOUT - IDID = 3 - DONE = .TRUE. - GO TO 490 -430 IF(INFO(3) .EQ. 1) GO TO 440 - TSTOP=RWORK(LTSTOP) - IF((TN-TSTOP)*H.GT.0.0D0) GO TO 715 - IF((TSTOP-TOUT)*H.LT.0.0D0)GO TO 709 - IF((TN-TOUT)*H.LT.0.0D0)GO TO 450 - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T=TOUT - IDID = 3 - DONE = .TRUE. - GO TO 490 -440 TSTOP = RWORK(LTSTOP) - IF((TN-TSTOP)*H .GT. 0.0D0) GO TO 715 - IF((TSTOP-TOUT)*H .LT. 0.0D0) GO TO 709 - IF((TN-T)*H .LE. 0.0D0) GO TO 450 - IF((TN - TOUT)*H .GT. 0.0D0) GO TO 445 - CALL DDATRP(TN,TN,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T = TN - IDID = 1 - DONE = .TRUE. - GO TO 490 -445 CONTINUE - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T = TOUT - IDID = 3 - DONE = .TRUE. - GO TO 490 -450 CONTINUE -C CHECK WHETHER WE ARE WITH IN ROUNDOFF OF TSTOP - IF(DABS(TN-TSTOP).GT.100.0D0*UROUND* - * (DABS(TN)+DABS(H)))GO TO 460 - CALL DDATRP(TN,TSTOP,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - IDID=2 - T=TSTOP - DONE = .TRUE. - GO TO 490 -460 TNEXT=TN+H - IF((TNEXT-TSTOP)*H.LE.0.0D0)GO TO 490 - H=TSTOP-TN - RWORK(LH)=H -C -490 IF (DONE) GO TO 590 -C -C------------------------------------------------------- -C THE NEXT BLOCK CONTAINS THE CALL TO THE -C ONE-STEP INTEGRATOR DDASTP. -C THIS IS A LOOPING POINT FOR THE INTEGRATION STEPS. -C CHECK FOR TOO MANY STEPS. -C UPDATE WT. -C CHECK FOR TOO MUCH ACCURACY REQUESTED. -C COMPUTE MINIMUM STEPSIZE. -C------------------------------------------------------- -C -500 CONTINUE -C CHECK FOR FAILURE TO COMPUTE INITIAL YPRIME - IF (IDID .EQ. -12) GO TO 527 -C -C CHECK FOR TOO MANY STEPS - IF((IWORK(LNST)-IWORK(LNSTL)).LT.500) - * GO TO 510 - IDID=-1 - GO TO 527 -C -C UPDATE WT -510 CALL DDAWTS(NEQ,INFO(2),RTOL,ATOL,RWORK(LPHI), - * RWORK(LWT),RPAR,IPAR) - DO 520 I=1,NEQ - IF(RWORK(I+LWT-1).GT.0.0D0)GO TO 520 - IDID=-3 - GO TO 527 -520 CONTINUE -C -C TEST FOR TOO MUCH ACCURACY REQUESTED. - R=DDANRM(NEQ,RWORK(LPHI),RWORK(LWT),RPAR,IPAR)* - * 100.0D0*UROUND - IF(R.LE.1.0D0)GO TO 525 -C MULTIPLY RTOL AND ATOL BY R AND RETURN - IF(INFO(2).EQ.1)GO TO 523 - RTOL(1)=R*RTOL(1) - ATOL(1)=R*ATOL(1) - IDID=-2 - GO TO 527 -523 DO 524 I=1,NEQ - RTOL(I)=R*RTOL(I) -524 ATOL(I)=R*ATOL(I) - IDID=-2 - GO TO 527 -525 CONTINUE -C -C COMPUTE MINIMUM STEPSIZE - HMIN=4.0D0*UROUND*DMAX1(DABS(TN),DABS(TOUT)) -C -C TEST H VS. HMAX - IF (INFO(7) .EQ. 0) GO TO 526 - RH = ABS(H)/RWORK(LHMAX) - IF (RH .GT. 1.0D0) H = H/RH -526 CONTINUE -C - CALL DDASTP(TN,Y,YPRIME,NEQ, - * RES,JAC,H,RWORK(LWT),INFO(1),IDID,RPAR,IPAR, - * RWORK(LPHI),RWORK(LDELTA),RWORK(LE), - * RWORK(LWM),IWORK(LIWM), - * RWORK(LALPHA),RWORK(LBETA),RWORK(LGAMMA), - * RWORK(LPSI),RWORK(LSIGMA), - * RWORK(LCJ),RWORK(LCJOLD),RWORK(LHOLD), - * RWORK(LS),HMIN,RWORK(LROUND), - * IWORK(LPHASE),IWORK(LJCALC),IWORK(LK), - * IWORK(LKOLD),IWORK(LNS),INFO(10),NTEMP) -527 IF(IDID.LT.0)GO TO 600 -C -C-------------------------------------------------------- -C THIS BLOCK HANDLES THE CASE OF A SUCCESSFUL RETURN -C FROM DDASTP (IDID=1). TEST FOR STOP CONDITIONS. -C-------------------------------------------------------- -C - IF(NG .EQ. 0) GO TO 529 -C -C Check for a zero of G near TN. -C - CALL DRCHEK(3,G,NG,NEQ,TN,TOUT,Y,RWORK(LE),RWORK(LPHI), - * RWORK(LPSI),IWORK(LKOLD),RWORK(LG0),RWORK(LG1), - * RWORK(LGX),JROOT,IRT,RWORK(LROUND),INFO(3), - * RWORK,IWORK,RPAR,IPAR) - IF(IRT .NE. 1) GO TO 529 - IWORK(LIRFND) = 1 - IDID = 4 - T = RWORK(LT0) - GO TO 580 -C -529 CONTINUE - IF(INFO(4).NE.0)GO TO 540 - IF(INFO(3).NE.0)GO TO 530 - IF((TN-TOUT)*H.LT.0.0D0)GO TO 500 - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - IDID=3 - T=TOUT - GO TO 580 -530 IF((TN-TOUT)*H.GE.0.0D0)GO TO 535 - T=TN - IDID=1 - GO TO 580 -535 CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - IDID=3 - T=TOUT - GO TO 580 -540 IF(INFO(3).NE.0)GO TO 550 - IF((TN-TOUT)*H.LT.0.0D0)GO TO 542 - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - T=TOUT - IDID=3 - GO TO 580 -542 IF(DABS(TN-TSTOP).LE.100.0D0*UROUND* - * (DABS(TN)+DABS(H)))GO TO 545 - TNEXT=TN+H - IF((TNEXT-TSTOP)*H.LE.0.0D0)GO TO 500 - H=TSTOP-TN - GO TO 500 -545 CALL DDATRP(TN,TSTOP,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - IDID=2 - T=TSTOP - GO TO 580 -550 IF((TN-TOUT)*H.GE.0.0D0)GO TO 555 - IF(DABS(TN-TSTOP).LE.100.0D0*UROUND*(DABS(TN)+DABS(H)))GO TO 552 - T=TN - IDID=1 - GO TO 580 -552 CALL DDATRP(TN,TSTOP,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - IDID=2 - T=TSTOP - GO TO 580 -555 CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - T=TOUT - IDID=3 -580 CONTINUE -C -C-------------------------------------------------------- -C ALL SUCCESSFUL RETURNS FROM DDASRT ARE MADE FROM -C THIS BLOCK. -C-------------------------------------------------------- -C -590 CONTINUE - RWORK(LTN)=TN - RWORK(LH)=H - RWORK(LTLAST) = T - RETURN -C -C----------------------------------------------------------------------- -C THIS BLOCK HANDLES ALL UNSUCCESSFUL -C RETURNS OTHER THAN FOR ILLEGAL INPUT. -C----------------------------------------------------------------------- -C -600 CONTINUE - ITEMP=-IDID - GO TO (610,620,630,690,690,640,650,660,670,675, - * 680,685), ITEMP -C -C THE MAXIMUM NUMBER OF STEPS WAS TAKEN BEFORE -C REACHING TOUT -610 MSG = 'DASSL-- AT CURRENT T (=R1) 500 STEPS' - CALL XERRWV(MSG,38,610,0,0,0,0,1,TN,0.0D0) - MSG = 'DASSL-- TAKEN ON THIS CALL BEFORE REACHING TOUT' - CALL XERRWV(MSG,48,611,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -C -C TOO MUCH ACCURACY FOR MACHINE PRECISION -620 MSG = 'DASSL-- AT T (=R1) TOO MUCH ACCURACY REQUESTED' - CALL XERRWV(MSG,47,620,0,0,0,0,1,TN,0.0D0) - MSG = 'DASSL-- FOR PRECISION OF MACHINE. RTOL AND ATOL' - CALL XERRWV(MSG,48,621,0,0,0,0,0,0.0D0,0.0D0) - MSG = 'DASSL-- WERE INCREASED TO APPROPRIATE VALUES' - CALL XERRWV(MSG,45,622,0,0,0,0,0,0.0D0,0.0D0) -C - GO TO 690 -C WT(I) .LE. 0.0D0 FOR SOME I (NOT AT START OF PROBLEM) -630 MSG = 'DASSL-- AT T (=R1) SOME ELEMENT OF WT' - CALL XERRWV(MSG,38,630,0,0,0,0,1,TN,0.0D0) - MSG = 'DASSL-- HAS BECOME .LE. 0.0' - CALL XERRWV(MSG,28,631,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -C -C ERROR TEST FAILED REPEATEDLY OR WITH H=HMIN -640 MSG = 'DASSL-- AT T (=R1) AND STEPSIZE H (=R2) THE' - CALL XERRWV(MSG,44,640,0,0,0,0,2,TN,H) - MSG='DASSL-- ERROR TEST FAILED REPEATEDLY OR WITH ABS(H)=HMIN' - CALL XERRWV(MSG,57,641,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -C -C CORRECTOR CONVERGENCE FAILED REPEATEDLY OR WITH H=HMIN -650 MSG = 'DASSL-- AT T (=R1) AND STEPSIZE H (=R2) THE' - CALL XERRWV(MSG,44,650,0,0,0,0,2,TN,H) - MSG = 'DASSL-- CORRECTOR FAILED TO CONVERGE REPEATEDLY' - CALL XERRWV(MSG,48,651,0,0,0,0,0,0.0D0,0.0D0) - MSG = 'DASSL-- OR WITH ABS(H)=HMIN' - CALL XERRWV(MSG,28,652,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -C -C THE ITERATION MATRIX IS SINGULAR -660 MSG = 'DASSL-- AT T (=R1) AND STEPSIZE H (=R2) THE' - CALL XERRWV(MSG,44,660,0,0,0,0,2,TN,H) - MSG = 'DASSL-- ITERATION MATRIX IS SINGULAR' - CALL XERRWV(MSG,37,661,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -C -C CORRECTOR FAILURE PRECEEDED BY ERROR TEST FAILURES. -670 MSG = 'DASSL-- AT T (=R1) AND STEPSIZE H (=R2) THE' - CALL XERRWV(MSG,44,670,0,0,0,0,2,TN,H) - MSG = 'DASSL-- CORRECTOR COULD NOT CONVERGE. ALSO, THE' - CALL XERRWV(MSG,49,671,0,0,0,0,0,0.0D0,0.0D0) - MSG = 'DASSL-- ERROR TEST FAILED REPEATEDLY.' - CALL XERRWV(MSG,38,672,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -C -C CORRECTOR FAILURE BECAUSE IRES = -1 -675 MSG = 'DASSL-- AT T (=R1) AND STEPSIZE H (=R2) THE' - CALL XERRWV(MSG,44,675,0,0,0,0,2,TN,H) - MSG = 'DASSL-- CORRECTOR COULD NOT CONVERGE BECAUSE' - CALL XERRWV(MSG,45,676,0,0,0,0,0,0.0D0,0.0D0) - MSG = 'DASSL-- IRES WAS EQUAL TO MINUS ONE' - CALL XERRWV(MSG,36,677,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -C -C FAILURE BECAUSE IRES = -2 -680 MSG = 'DASSL-- AT T (=R1) AND STEPSIZE H (=R2)' - CALL XERRWV(MSG,40,680,0,0,0,0,2,TN,H) - MSG = 'DASSL-- IRES WAS EQUAL TO MINUS TWO' - CALL XERRWV(MSG,36,681,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -C -C FAILED TO COMPUTE INITIAL YPRIME -685 MSG = 'DASSL-- AT T (=R1) AND STEPSIZE H (=R2) THE' - CALL XERRWV(MSG,44,685,0,0,0,0,2,TN,HO) - MSG = 'DASSL-- INITIAL YPRIME COULD NOT BE COMPUTED' - CALL XERRWV(MSG,45,686,0,0,0,0,0,0.0D0,0.0D0) - GO TO 690 -690 CONTINUE - INFO(1)=-1 - T=TN - RWORK(LTN)=TN - RWORK(LH)=H - RETURN -C----------------------------------------------------------------------- -C THIS BLOCK HANDLES ALL ERROR RETURNS DUE -C TO ILLEGAL INPUT, AS DETECTED BEFORE CALLING -C DDASTP. FIRST THE ERROR MESSAGE ROUTINE IS -C CALLED. IF THIS HAPPENS TWICE IN -C SUCCESSION, EXECUTION IS TERMINATED -C -C----------------------------------------------------------------------- -701 MSG = 'DASSL-- SOME ELEMENT OF INFO VECTOR IS NOT ZERO OR ONE' - CALL XERRWV(MSG,55,1,0,0,0,0,0,0.0D0,0.0D0) - GO TO 750 -702 MSG = 'DASSL-- NEQ (=I1) .LE. 0' - CALL XERRWV(MSG,25,2,0,1,NEQ,0,0,0.0D0,0.0D0) - GO TO 750 -703 MSG = 'DASSL-- MAXORD (=I1) NOT IN RANGE' - CALL XERRWV(MSG,34,3,0,1,MXORD,0,0,0.0D0,0.0D0) - GO TO 750 -704 MSG='DASSL-- RWORK LENGTH NEEDED, LENRW (=I1), EXCEEDS LRW (=I2)' - CALL XERRWV(MSG,60,4,0,2,LENRW,LRW,0,0.0D0,0.0D0) - GO TO 750 -705 MSG='DASSL-- IWORK LENGTH NEEDED, LENIW (=I1), EXCEEDS LIW (=I2)' - CALL XERRWV(MSG,60,5,0,2,LENIW,LIW,0,0.0D0,0.0D0) - GO TO 750 -706 MSG = 'DASSL-- SOME ELEMENT OF RTOL IS .LT. 0' - CALL XERRWV(MSG,39,6,0,0,0,0,0,0.0D0,0.0D0) - GO TO 750 -707 MSG = 'DASSL-- SOME ELEMENT OF ATOL IS .LT. 0' - CALL XERRWV(MSG,39,7,0,0,0,0,0,0.0D0,0.0D0) - GO TO 750 -708 MSG = 'DASSL-- ALL ELEMENTS OF RTOL AND ATOL ARE ZERO' - CALL XERRWV(MSG,47,8,0,0,0,0,0,0.0D0,0.0D0) - GO TO 750 -709 MSG='DASSL-- INFO(4) = 1 AND TSTOP (=R1) BEHIND TOUT (=R2)' - CALL XERRWV(MSG,54,9,0,0,0,0,2,TSTOP,TOUT) - GO TO 750 -710 MSG = 'DASSL-- HMAX (=R1) .LT. 0.0' - CALL XERRWV(MSG,28,10,0,0,0,0,1,HMAX,0.0D0) - GO TO 750 -711 MSG = 'DASSL-- TOUT (=R1) BEHIND T (=R2)' - CALL XERRWV(MSG,34,11,0,0,0,0,2,TOUT,T) - GO TO 750 -712 MSG = 'DASSL-- INFO(8)=1 AND H0=0.0' - CALL XERRWV(MSG,29,12,0,0,0,0,0,0.0D0,0.0D0) - GO TO 750 -713 MSG = 'DASSL-- SOME ELEMENT OF WT IS .LE. 0.0' - CALL XERRWV(MSG,39,13,0,0,0,0,0,0.0D0,0.0D0) - GO TO 750 -714 MSG='DASSL-- TOUT (=R1) TOO CLOSE TO T (=R2) TO START INTEGRATION' - CALL XERRWV(MSG,60,14,0,0,0,0,2,TOUT,T) - GO TO 750 -715 MSG = 'DASSL-- INFO(4)=1 AND TSTOP (=R1) BEHIND T (=R2)' - CALL XERRWV(MSG,49,15,0,0,0,0,2,TSTOP,T) - GO TO 750 -717 MSG = 'DASSL-- ML (=I1) ILLEGAL. EITHER .LT. 0 OR .GT. NEQ' - CALL XERRWV(MSG,52,17,0,1,IWORK(LML),0,0,0.0D0,0.0D0) - GO TO 750 -718 MSG = 'DASSL-- MU (=I1) ILLEGAL. EITHER .LT. 0 OR .GT. NEQ' - CALL XERRWV(MSG,52,18,0,1,IWORK(LMU),0,0,0.0D0,0.0D0) - GO TO 750 -719 MSG = 'DASSL-- TOUT (=R1) IS EQUAL TO T (=R2)' - CALL XERRWV(MSG,39,19,0,0,0,0,2,TOUT,T) - GO TO 750 -730 MSG = 'DASSL-- NG (=I1) .LT. 0' - CALL XERRWV(MSG,24,30,1,1,NG,0,0,0.0D0,0.0D0) - GO TO 750 -732 MSG = 'DASSL-- ONE OR MORE COMPONENTS OF G HAS A ROOT' - CALL XERRWV(MSG,47,32,1,0,0,0,0,0.0D0,0.0D0) - MSG = ' TOO NEAR TO THE INITIAL POINT' - CALL XERRWV(MSG,38,32,1,0,0,0,0,0.0D0,0.0D0) -750 IF(INFO(1).EQ.-1) GO TO 760 - INFO(1)=-1 - IDID=-33 - RETURN -760 MSG = 'DASSL-- REPEATED OCCURRENCES OF ILLEGAL INPUT' - CALL XERRWV(MSG,46,801,0,0,0,0,0,0.0D0,0.0D0) -770 MSG = 'DASSL-- RUN TERMINATED. APPARENT INFINITE LOOP' - CALL XERRWV(MSG,47,802,1,0,0,0,0,0.0D0,0.0D0) - RETURN -C-----------END OF SUBROUTINE DDASRT------------------------------------ - END - SUBROUTINE DRCHEK (JOB, G, NG, NEQ, TN, TOUT, Y, YP, PHI, PSI, - * KOLD, G0, G1, GX, JROOT, IRT, UROUND, INFO3, RWORK, IWORK, - * RPAR, IPAR) -C -C***BEGIN PROLOGUE DRCHEK -C***REFER TO DDASRT -C***ROUTINES CALLED DDATRP, DROOTS, DCOPY -C***DATE WRITTEN 821001 (YYMMDD) -C***REVISION DATE 900926 (YYMMDD) -C***END PROLOGUE DRCHEK -C - IMPLICIT DOUBLE PRECISION(A-H,O-Z) - PARAMETER (LNGE=16, LIRFND=18, LLAST=19, LIMAX=20, - * LT0=41, LTLAST=42, LALPHR=43, LX2=44) - EXTERNAL G - INTEGER JOB, NG, NEQ, KOLD, JROOT, IRT, INFO3, IWORK, IPAR - DOUBLE PRECISION TN, TOUT, Y, YP, PHI, PSI, G0, G1, GX, UROUND, - * RWORK, RPAR - DIMENSION Y(*), YP(*), PHI(NEQ,*), PSI(*), - 1 G0(*), G1(*), GX(*), JROOT(*), RWORK(*), IWORK(*) - INTEGER I, JFLAG - DOUBLE PRECISION H - DOUBLE PRECISION HMING, T1, TEMP1, TEMP2, X - LOGICAL ZROOT -C----------------------------------------------------------------------- -C THIS ROUTINE CHECKS FOR THE PRESENCE OF A ROOT IN THE -C VICINITY OF THE CURRENT T, IN A MANNER DEPENDING ON THE -C INPUT FLAG JOB. IT CALLS SUBROUTINE DROOTS TO LOCATE THE ROOT -C AS PRECISELY AS POSSIBLE. -C -C IN ADDITION TO VARIABLES DESCRIBED PREVIOUSLY, DRCHEK -C USES THE FOLLOWING FOR COMMUNICATION.. -C JOB = INTEGER FLAG INDICATING TYPE OF CALL.. -C JOB = 1 MEANS THE PROBLEM IS BEING INITIALIZED, AND DRCHEK -C IS TO LOOK FOR A ROOT AT OR VERY NEAR THE INITIAL T. -C JOB = 2 MEANS A CONTINUATION CALL TO THE SOLVER WAS JUST -C MADE, AND DRCHEK IS TO CHECK FOR A ROOT IN THE -C RELEVANT PART OF THE STEP LAST TAKEN. -C JOB = 3 MEANS A SUCCESSFUL STEP WAS JUST TAKEN, AND DRCHEK -C IS TO LOOK FOR A ROOT IN THE INTERVAL OF THE STEP. -C G0 = ARRAY OF LENGTH NG, CONTAINING THE VALUE OF G AT T = T0. -C G0 IS INPUT FOR JOB .GE. 2 AND ON OUTPUT IN ALL CASES. -C G1,GX = ARRAYS OF LENGTH NG FOR WORK SPACE. -C IRT = COMPLETION FLAG.. -C IRT = 0 MEANS NO ROOT WAS FOUND. -C IRT = -1 MEANS JOB = 1 AND A ROOT WAS FOUND TOO NEAR TO T. -C IRT = 1 MEANS A LEGITIMATE ROOT WAS FOUND (JOB = 2 OR 3). -C ON RETURN, T0 IS THE ROOT LOCATION, AND Y IS THE -C CORRESPONDING SOLUTION VECTOR. -C T0 = VALUE OF T AT ONE ENDPOINT OF INTERVAL OF INTEREST. ONLY -C ROOTS BEYOND T0 IN THE DIRECTION OF INTEGRATION ARE SOUGHT. -C T0 IS INPUT IF JOB .GE. 2, AND OUTPUT IN ALL CASES. -C T0 IS UPDATED BY DRCHEK, WHETHER A ROOT IS FOUND OR NOT. -C STORED IN THE GLOBAL ARRAY RWORK. -C TLAST = LAST VALUE OF T RETURNED BY THE SOLVER (INPUT ONLY). -C STORED IN THE GLOBAL ARRAY RWORK. -C TOUT = FINAL OUTPUT TIME FOR THE SOLVER. -C IRFND = INPUT FLAG SHOWING WHETHER THE LAST STEP TAKEN HAD A ROOT. -C IRFND = 1 IF IT DID, = 0 IF NOT. -C STORED IN THE GLOBAL ARRAY IWORK. -C INFO3 = COPY OF INFO(3) (INPUT ONLY). -C----------------------------------------------------------------------- -C - H = PSI(1) - IRT = 0 - DO 10 I = 1,NG - 10 JROOT(I) = 0 - HMING = (DABS(TN) + DABS(H))*UROUND*100.0D0 -C - GO TO (100, 200, 300), JOB -C -C EVALUATE G AT INITIAL T (STORED IN RWORK(LT0)), AND CHECK FOR -C ZERO VALUES.---------------------------------------------------------- - 100 CONTINUE - CALL DDATRP(TN,RWORK(LT0),Y,YP,NEQ,KOLD,PHI,PSI) - CALL G (NEQ, RWORK(LT0), Y, NG, G0, RPAR, IPAR) - IWORK(LNGE) = 1 - ZROOT = .FALSE. - DO 110 I = 1,NG - 110 IF (DABS(G0(I)) .LE. 0.0D0) ZROOT = .TRUE. - IF (.NOT. ZROOT) GO TO 190 -C G HAS A ZERO AT T. LOOK AT G AT T + (SMALL INCREMENT). -------------- - TEMP1 = DSIGN(HMING,H) - RWORK(LT0) = RWORK(LT0) + TEMP1 - TEMP2 = TEMP1/H - DO 120 I = 1,NEQ - 120 Y(I) = Y(I) + TEMP2*PHI(I,2) - CALL G (NEQ, RWORK(LT0), Y, NG, G0, RPAR, IPAR) - IWORK(LNGE) = IWORK(LNGE) + 1 - ZROOT = .FALSE. - DO 130 I = 1,NG - 130 IF (DABS(G0(I)) .LE. 0.0D0) ZROOT = .TRUE. - IF (.NOT. ZROOT) GO TO 190 -C G HAS A ZERO AT T AND ALSO CLOSE TO T. TAKE ERROR RETURN. ----------- - IRT = -1 - RETURN -C - 190 CONTINUE - RETURN -C -C - 200 CONTINUE - IF (IWORK(LIRFND) .EQ. 0) GO TO 260 -C IF A ROOT WAS FOUND ON THE PREVIOUS STEP, EVALUATE G0 = G(T0). ------- - CALL DDATRP (TN, RWORK(LT0), Y, YP, NEQ, KOLD, PHI, PSI) - CALL G (NEQ, RWORK(LT0), Y, NG, G0, RPAR, IPAR) - IWORK(LNGE) = IWORK(LNGE) + 1 - ZROOT = .FALSE. - DO 210 I = 1,NG - 210 IF (DABS(G0(I)) .LE. 0.0D0) ZROOT = .TRUE. - IF (.NOT. ZROOT) GO TO 260 -C G HAS A ZERO AT T0. LOOK AT G AT T + (SMALL INCREMENT). ------------- - TEMP1 = DSIGN(HMING,H) - RWORK(LT0) = RWORK(LT0) + TEMP1 - IF ((RWORK(LT0) - TN)*H .LT. 0.0D0) GO TO 230 - TEMP2 = TEMP1/H - DO 220 I = 1,NEQ - 220 Y(I) = Y(I) + TEMP2*PHI(I,2) - GO TO 240 - 230 CALL DDATRP (TN, RWORK(LT0), Y, YP, NEQ, KOLD, PHI, PSI) - 240 CALL G (NEQ, RWORK(LT0), Y, NG, G0, RPAR, IPAR) - IWORK(LNGE) = IWORK(LNGE) + 1 - ZROOT = .FALSE. - DO 250 I = 1,NG - IF (DABS(G0(I)) .GT. 0.0D0) GO TO 250 - JROOT(I) = 1 - ZROOT = .TRUE. - 250 CONTINUE - IF (.NOT. ZROOT) GO TO 260 -C G HAS A ZERO AT T0 AND ALSO CLOSE TO T0. RETURN ROOT. --------------- - IRT = 1 - RETURN -C HERE, G0 DOES NOT HAVE A ROOT -C G0 HAS NO ZERO COMPONENTS. PROCEED TO CHECK RELEVANT INTERVAL. ------ - 260 IF (TN .EQ. RWORK(LTLAST)) GO TO 390 -C - 300 CONTINUE -C SET T1 TO TN OR TOUT, WHICHEVER COMES FIRST, AND GET G AT T1. -------- - IF (INFO3 .EQ. 1) GO TO 310 - IF ((TOUT - TN)*H .GE. 0.0D0) GO TO 310 - T1 = TOUT - IF ((T1 - RWORK(LT0))*H .LE. 0.0D0) GO TO 390 - CALL DDATRP (TN, T1, Y, YP, NEQ, KOLD, PHI, PSI) - GO TO 330 - 310 T1 = TN - DO 320 I = 1,NEQ - 320 Y(I) = PHI(I,1) - 330 CALL G (NEQ, T1, Y, NG, G1, RPAR, IPAR) - IWORK(LNGE) = IWORK(LNGE) + 1 -C CALL DROOTS TO SEARCH FOR ROOT IN INTERVAL FROM T0 TO T1. ------------ - JFLAG = 0 - 350 CONTINUE - CALL DROOTS (NG, HMING, JFLAG, RWORK(LT0), T1, G0, G1, GX, X, - * JROOT, IWORK(LIMAX), IWORK(LLAST), RWORK(LALPHR), - * RWORK(LX2)) - IF (JFLAG .GT. 1) GO TO 360 - CALL DDATRP (TN, X, Y, YP, NEQ, KOLD, PHI, PSI) - CALL G (NEQ, X, Y, NG, GX, RPAR, IPAR) - IWORK(LNGE) = IWORK(LNGE) + 1 - GO TO 350 - 360 RWORK(LT0) = X - CALL DCOPY (NG, GX, 1, G0, 1) - IF (JFLAG .EQ. 4) GO TO 390 -C FOUND A ROOT. INTERPOLATE TO X AND RETURN. -------------------------- - CALL DDATRP (TN, X, Y, YP, NEQ, KOLD, PHI, PSI) - IRT = 1 - RETURN -C - 390 CONTINUE - RETURN -C---------------------- END OF SUBROUTINE DRCHEK ----------------------- - END - SUBROUTINE DROOTS (NG, HMIN, JFLAG, X0, X1, G0, G1, GX, X, JROOT, - * IMAX, LAST, ALPHA, X2) -C -C***BEGIN PROLOGUE DROOTS -C***REFER TO DDASRT -C***ROUTINES CALLED DCOPY -C***DATE WRITTEN 821001 (YYMMDD) -C***REVISION DATE 900926 (YYMMDD) -C***END PROLOGUE DROOTS -C - IMPLICIT DOUBLE PRECISION(A-H,O-Z) - INTEGER NG, JFLAG, JROOT, IMAX, LAST - DOUBLE PRECISION HMIN, X0, X1, G0, G1, GX, X, ALPHA, X2 - DIMENSION G0(NG), G1(NG), GX(NG), JROOT(NG) -C----------------------------------------------------------------------- -C THIS SUBROUTINE FINDS THE LEFTMOST ROOT OF A SET OF ARBITRARY -C FUNCTIONS GI(X) (I = 1,...,NG) IN AN INTERVAL (X0,X1). ONLY ROOTS -C OF ODD MULTIPLICITY (I.E. CHANGES OF SIGN OF THE GI) ARE FOUND. -C HERE THE SIGN OF X1 - X0 IS ARBITRARY, BUT IS CONSTANT FOR A GIVEN -C PROBLEM, AND -LEFTMOST- MEANS NEAREST TO X0. -C THE VALUES OF THE VECTOR-VALUED FUNCTION G(X) = (GI, I=1...NG) -C ARE COMMUNICATED THROUGH THE CALL SEQUENCE OF DROOTS. -C THE METHOD USED IS THE ILLINOIS ALGORITHM. -C -C REFERENCE.. -C KATHIE L. HIEBERT AND LAWRENCE F. SHAMPINE, IMPLICITLY DEFINED -C OUTPUT POINTS FOR SOLUTIONS OF ODE-S, SANDIA REPORT SAND80-0180, -C FEBRUARY, 1980. -C -C DESCRIPTION OF PARAMETERS. -C -C NG = NUMBER OF FUNCTIONS GI, OR THE NUMBER OF COMPONENTS OF -C THE VECTOR VALUED FUNCTION G(X). INPUT ONLY. -C -C HMIN = RESOLUTION PARAMETER IN X. INPUT ONLY. WHEN A ROOT IS -C FOUND, IT IS LOCATED ONLY TO WITHIN AN ERROR OF HMIN IN X. -C TYPICALLY, HMIN SHOULD BE SET TO SOMETHING ON THE ORDER OF -C 100 * UROUND * MAX(ABS(X0),ABS(X1)), -C WHERE UROUND IS THE UNIT ROUNDOFF OF THE MACHINE. -C -C JFLAG = INTEGER FLAG FOR INPUT AND OUTPUT COMMUNICATION. -C -C ON INPUT, SET JFLAG = 0 ON THE FIRST CALL FOR THE PROBLEM, -C AND LEAVE IT UNCHANGED UNTIL THE PROBLEM IS COMPLETED. -C (THE PROBLEM IS COMPLETED WHEN JFLAG .GE. 2 ON RETURN.) -C -C ON OUTPUT, JFLAG HAS THE FOLLOWING VALUES AND MEANINGS.. -C JFLAG = 1 MEANS DROOTS NEEDS A VALUE OF G(X). SET GX = G(X) -C AND CALL DROOTS AGAIN. -C JFLAG = 2 MEANS A ROOT HAS BEEN FOUND. THE ROOT IS -C AT X, AND GX CONTAINS G(X). (ACTUALLY, X IS THE -C RIGHTMOST APPROXIMATION TO THE ROOT ON AN INTERVAL -C (X0,X1) OF SIZE HMIN OR LESS.) -C JFLAG = 3 MEANS X = X1 IS A ROOT, WITH ONE OR MORE OF THE GI -C BEING ZERO AT X1 AND NO SIGN CHANGES IN (X0,X1). -C GX CONTAINS G(X) ON OUTPUT. -C JFLAG = 4 MEANS NO ROOTS (OF ODD MULTIPLICITY) WERE -C FOUND IN (X0,X1) (NO SIGN CHANGES). -C -C X0,X1 = ENDPOINTS OF THE INTERVAL WHERE ROOTS ARE SOUGHT. -C X1 AND X0 ARE INPUT WHEN JFLAG = 0 (FIRST CALL), AND -C MUST BE LEFT UNCHANGED BETWEEN CALLS UNTIL THE PROBLEM IS -C COMPLETED. X0 AND X1 MUST BE DISTINCT, BUT X1 - X0 MAY BE -C OF EITHER SIGN. HOWEVER, THE NOTION OF -LEFT- AND -RIGHT- -C WILL BE USED TO MEAN NEARER TO X0 OR X1, RESPECTIVELY. -C WHEN JFLAG .GE. 2 ON RETURN, X0 AND X1 ARE OUTPUT, AND -C ARE THE ENDPOINTS OF THE RELEVANT INTERVAL. -C -C G0,G1 = ARRAYS OF LENGTH NG CONTAINING THE VECTORS G(X0) AND G(X1), -C RESPECTIVELY. WHEN JFLAG = 0, G0 AND G1 ARE INPUT AND -C NONE OF THE G0(I) SHOULD BE BE ZERO. -C WHEN JFLAG .GE. 2 ON RETURN, G0 AND G1 ARE OUTPUT. -C -C GX = ARRAY OF LENGTH NG CONTAINING G(X). GX IS INPUT -C WHEN JFLAG = 1, AND OUTPUT WHEN JFLAG .GE. 2. -C -C X = INDEPENDENT VARIABLE VALUE. OUTPUT ONLY. -C WHEN JFLAG = 1 ON OUTPUT, X IS THE POINT AT WHICH G(X) -C IS TO BE EVALUATED AND LOADED INTO GX. -C WHEN JFLAG = 2 OR 3, X IS THE ROOT. -C WHEN JFLAG = 4, X IS THE RIGHT ENDPOINT OF THE INTERVAL, X1. -C -C JROOT = INTEGER ARRAY OF LENGTH NG. OUTPUT ONLY. -C WHEN JFLAG = 2 OR 3, JROOT INDICATES WHICH COMPONENTS -C OF G(X) HAVE A ROOT AT X. JROOT(I) IS 1 IF THE I-TH -C COMPONENT HAS A ROOT, AND JROOT(I) = 0 OTHERWISE. -C -C IMAX, LAST, ALPHA, X2 = -C BOOKKEEPING VARIABLES WHICH MUST BE SAVED FROM CALL -C TO CALL. THEY ARE SAVED INSIDE THE CALLING ROUTINE, -C BUT THEY ARE USED ONLY WITHIN THIS ROUTINE. -C----------------------------------------------------------------------- - INTEGER I, IMXOLD, NXLAST - DOUBLE PRECISION T2, TMAX, ZERO - LOGICAL ZROOT, SGNCHG, XROOT - DATA ZERO/0.0D0/ -C - IF (JFLAG .EQ. 1) GO TO 200 -C JFLAG .NE. 1. CHECK FOR CHANGE IN SIGN OF G OR ZERO AT X1. ---------- - IMAX = 0 - TMAX = ZERO - ZROOT = .FALSE. - DO 120 I = 1,NG - IF (DABS(G1(I)) .GT. ZERO) GO TO 110 - ZROOT = .TRUE. - GO TO 120 -C AT THIS POINT, G0(I) HAS BEEN CHECKED AND CANNOT BE ZERO. ------------ - 110 IF (DSIGN(1.0D0,G0(I)) .EQ. DSIGN(1.0D0,G1(I))) GO TO 120 - T2 = DABS(G1(I)/(G1(I)-G0(I))) - IF (T2 .LE. TMAX) GO TO 120 - TMAX = T2 - IMAX = I - 120 CONTINUE - IF (IMAX .GT. 0) GO TO 130 - SGNCHG = .FALSE. - GO TO 140 - 130 SGNCHG = .TRUE. - 140 IF (.NOT. SGNCHG) GO TO 400 -C THERE IS A SIGN CHANGE. FIND THE FIRST ROOT IN THE INTERVAL. -------- - XROOT = .FALSE. - NXLAST = 0 - LAST = 1 -C -C REPEAT UNTIL THE FIRST ROOT IN THE INTERVAL IS FOUND. LOOP POINT. --- - 150 CONTINUE - IF (XROOT) GO TO 300 - IF (NXLAST .EQ. LAST) GO TO 160 - ALPHA = 1.0D0 - GO TO 180 - 160 IF (LAST .EQ. 0) GO TO 170 - ALPHA = 0.5D0*ALPHA - GO TO 180 - 170 ALPHA = 2.0D0*ALPHA - 180 X2 = X1 - (X1-X0)*G1(IMAX)/(G1(IMAX) - ALPHA*G0(IMAX)) - IF ((DABS(X2-X0) .LT. HMIN) .AND. - 1 (DABS(X1-X0) .GT. 10.0D0*HMIN)) X2 = X0 + 0.1D0*(X1-X0) - JFLAG = 1 - X = X2 -C RETURN TO THE CALLING ROUTINE TO GET A VALUE OF GX = G(X). ----------- - RETURN -C CHECK TO SEE IN WHICH INTERVAL G CHANGES SIGN. ----------------------- - 200 IMXOLD = IMAX - IMAX = 0 - TMAX = ZERO - ZROOT = .FALSE. - DO 220 I = 1,NG - IF (DABS(GX(I)) .GT. ZERO) GO TO 210 - ZROOT = .TRUE. - GO TO 220 -C NEITHER G0(I) NOR GX(I) CAN BE ZERO AT THIS POINT. ------------------- - 210 IF (DSIGN(1.0D0,G0(I)) .EQ. DSIGN(1.0D0,GX(I))) GO TO 220 - T2 = DABS(GX(I)/(GX(I) - G0(I))) - IF (T2 .LE. TMAX) GO TO 220 - TMAX = T2 - IMAX = I - 220 CONTINUE - IF (IMAX .GT. 0) GO TO 230 - SGNCHG = .FALSE. - IMAX = IMXOLD - GO TO 240 - 230 SGNCHG = .TRUE. - 240 NXLAST = LAST - IF (.NOT. SGNCHG) GO TO 250 -C SIGN CHANGE BETWEEN X0 AND X2, SO REPLACE X1 WITH X2. ---------------- - X1 = X2 - CALL DCOPY (NG, GX, 1, G1, 1) - LAST = 1 - XROOT = .FALSE. - GO TO 270 - 250 IF (.NOT. ZROOT) GO TO 260 -C ZERO VALUE AT X2 AND NO SIGN CHANGE IN (X0,X2), SO X2 IS A ROOT. ----- - X1 = X2 - CALL DCOPY (NG, GX, 1, G1, 1) - XROOT = .TRUE. - GO TO 270 -C NO SIGN CHANGE BETWEEN X0 AND X2. REPLACE X0 WITH X2. --------------- - 260 CONTINUE - CALL DCOPY (NG, GX, 1, G0, 1) - X0 = X2 - LAST = 0 - XROOT = .FALSE. - 270 IF (DABS(X1-X0) .LE. HMIN) XROOT = .TRUE. - GO TO 150 -C -C RETURN WITH X1 AS THE ROOT. SET JROOT. SET X = X1 AND GX = G1. ----- - 300 JFLAG = 2 - X = X1 - CALL DCOPY (NG, G1, 1, GX, 1) - DO 320 I = 1,NG - JROOT(I) = 0 - IF (DABS(G1(I)) .GT. ZERO) GO TO 310 - JROOT(I) = 1 - GO TO 320 - 310 IF (DSIGN(1.0D0,G0(I)) .NE. DSIGN(1.0D0,G1(I))) JROOT(I) = 1 - 320 CONTINUE - RETURN -C -C NO SIGN CHANGE IN THE INTERVAL. CHECK FOR ZERO AT RIGHT ENDPOINT. --- - 400 IF (.NOT. ZROOT) GO TO 420 -C -C ZERO VALUE AT X1 AND NO SIGN CHANGE IN (X0,X1). RETURN JFLAG = 3. --- - X = X1 - CALL DCOPY (NG, G1, 1, GX, 1) - DO 410 I = 1,NG - JROOT(I) = 0 - IF (DABS(G1(I)) .LE. ZERO) JROOT (I) = 1 - 410 CONTINUE - JFLAG = 3 - RETURN -C -C NO SIGN CHANGES IN THIS INTERVAL. SET X = X1, RETURN JFLAG = 4. ----- - 420 CALL DCOPY (NG, G1, 1, GX, 1) - X = X1 - JFLAG = 4 - RETURN -C---------------------- END OF SUBROUTINE DROOTS ----------------------- - END - SUBROUTINE XERRWV (MSG, NMES, NERR, LEVEL, NI, I1, I2, NR, R1, R2) - INTEGER NMES, NERR, LEVEL, NI, I1, I2, NR - DOUBLE PRECISION R1, R2 - CHARACTER*1 MSG(NMES) -C----------------------------------------------------------------------- -C Subroutine XERRWV, as given here, constitutes a simplified version of -C the SLATEC error handling package. -C Written by A. C. Hindmarsh and P. N. Brown at LLNL. -C Modified 1/8/90 by Clement Ulrich at LLNL. -C Version of 8 January, 1990. -C This version is in double precision. -C -C All arguments are input arguments. -C -C MSG = The message (character array). -C NMES = The length of MSG (number of characters). -C NERR = The error number (not used). -C LEVEL = The error level.. -C 0 or 1 means recoverable (control returns to caller). -C 2 means fatal (run is aborted--see note below). -C NI = Number of integers (0, 1, or 2) to be printed with message. -C I1,I2 = Integers to be printed, depending on NI. -C NR = Number of reals (0, 1, or 2) to be printed with message. -C R1,R2 = Reals to be printed, depending on NR. -C -C Note.. this routine is compatible with ANSI-77; however the -C following assumptions may not be valid for some machines: -C -C 1. The argument MSG is assumed to be of type CHARACTER, and -C the message is printed with a format of (1X,80A1). -C 2. The message is assumed to take only one line. -C Multi-line messages are generated by repeated calls. -C 3. If LEVEL = 2, control passes to the statement STOP -C to abort the run. For a different run-abort command, -C change the statement following statement 100 at the end. -C 4. R1 and R2 are assumed to be in double precision and are printed -C in E21.13 format. -C 5. The logical unit number 6 is standard output. -C For a different default logical unit number, change the assignment -C statement for LUNIT below. -C -C----------------------------------------------------------------------- -C Subroutines called by XERRWV.. None -C Function routines called by XERRWV.. None -C----------------------------------------------------------------------- -C - INTEGER I, LUNIT, MESFLG -C -C Define message print flag and logical unit number. ------------------- - MESFLG = 1 - LUNIT = 6 - IF (MESFLG .EQ. 0) GO TO 100 -C Write the message. --------------------------------------------------- - WRITE (LUNIT,10) (MSG(I),I=1,NMES) - 10 FORMAT(1X,80A1) - IF (NI .EQ. 1) WRITE (LUNIT, 20) I1 - 20 FORMAT(6X,'In above message, I1 =',I10) - IF (NI .EQ. 2) WRITE (LUNIT, 30) I1,I2 - 30 FORMAT(6X,'In above message, I1 =',I10,3X,'I2 =',I10) - IF (NR .EQ. 1) WRITE (LUNIT, 40) R1 - 40 FORMAT(6X,'In above message, R1 =',E21.13) - IF (NR .EQ. 2) WRITE (LUNIT, 50) R1,R2 - 50 FORMAT(6X,'In above, R1 =',E21.13,3X,'R2 =',E21.13) -C Abort the run if LEVEL = 2. ------------------------------------------ - 100 IF (LEVEL .NE. 2) RETURN - STOP -C----------------------- End of Subroutine XERRWV ---------------------- - END diff --git a/scipy-0.10.1/scipy/integrate/odepack/ddassl.f b/scipy-0.10.1/scipy/integrate/odepack/ddassl.f deleted file mode 100644 index 7f91eeaa25..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/ddassl.f +++ /dev/null @@ -1,3561 +0,0 @@ - SUBROUTINE DDASSL (RES, NEQ, T, Y, YPRIME, TOUT, INFO, RTOL, ATOL, - + IDID, RWORK, LRW, IWORK, LIW, RPAR, IPAR, JAC) -C***BEGIN PROLOGUE DDASSL -C***PURPOSE This code solves a system of differential/algebraic -C equations of the form G(T,Y,YPRIME) = 0. -C***LIBRARY SLATEC (DASSL) -C***CATEGORY I1A2 -C***TYPE DOUBLE PRECISION (SDASSL-S, DDASSL-D) -C***KEYWORDS DIFFERENTIAL/ALGEBRAIC, BACKWARD DIFFERENTIATION FORMULAS, -C IMPLICIT DIFFERENTIAL SYSTEMS -C***AUTHOR PETZOLD, LINDA R., (LLNL) -C COMPUTING AND MATHEMATICS RESEARCH DIVISION -C LAWRENCE LIVERMORE NATIONAL LABORATORY -C L - 316, P.O. BOX 808, -C LIVERMORE, CA. 94550 -C***DESCRIPTION -C -C *Usage: -C -C EXTERNAL RES, JAC -C INTEGER NEQ, INFO(N), IDID, LRW, LIW, IWORK(LIW), IPAR -C DOUBLE PRECISION T, Y(NEQ), YPRIME(NEQ), TOUT, RTOL, ATOL, -C * RWORK(LRW), RPAR -C -C CALL DDASSL (RES, NEQ, T, Y, YPRIME, TOUT, INFO, RTOL, ATOL, -C * IDID, RWORK, LRW, IWORK, LIW, RPAR, IPAR, JAC) -C -C -C *Arguments: -C (In the following, all real arrays should be type DOUBLE PRECISION.) -C -C RES:EXT This is a subroutine which you provide to define the -C differential/algebraic system. -C -C NEQ:IN This is the number of equations to be solved. -C -C T:INOUT This is the current value of the independent variable. -C -C Y(*):INOUT This array contains the solution components at T. -C -C YPRIME(*):INOUT This array contains the derivatives of the solution -C components at T. -C -C TOUT:IN This is a point at which a solution is desired. -C -C INFO(N):IN The basic task of the code is to solve the system from T -C to TOUT and return an answer at TOUT. INFO is an integer -C array which is used to communicate exactly how you want -C this task to be carried out. (See below for details.) -C N must be greater than or equal to 15. -C -C RTOL,ATOL:INOUT These quantities represent relative and absolute -C error tolerances which you provide to indicate how -C accurately you wish the solution to be computed. You -C may choose them to be both scalars or else both vectors. -C Caution: In Fortran 77, a scalar is not the same as an -C array of length 1. Some compilers may object -C to using scalars for RTOL,ATOL. -C -C IDID:OUT This scalar quantity is an indicator reporting what the -C code did. You must monitor this integer variable to -C decide what action to take next. -C -C RWORK:WORK A real work array of length LRW which provides the -C code with needed storage space. -C -C LRW:IN The length of RWORK. (See below for required length.) -C -C IWORK:WORK An integer work array of length LIW which probides the -C code with needed storage space. -C -C LIW:IN The length of IWORK. (See below for required length.) -C -C RPAR,IPAR:IN These are real and integer parameter arrays which -C you can use for communication between your calling -C program and the RES subroutine (and the JAC subroutine) -C -C JAC:EXT This is the name of a subroutine which you may choose -C to provide for defining a matrix of partial derivatives -C described below. -C -C Quantities which may be altered by DDASSL are: -C T, Y(*), YPRIME(*), INFO(1), RTOL, ATOL, -C IDID, RWORK(*) AND IWORK(*) -C -C *Description -C -C Subroutine DDASSL uses the backward differentiation formulas of -C orders one through five to solve a system of the above form for Y and -C YPRIME. Values for Y and YPRIME at the initial time must be given as -C input. These values must be consistent, (that is, if T,Y,YPRIME are -C the given initial values, they must satisfy G(T,Y,YPRIME) = 0.). The -C subroutine solves the system from T to TOUT. It is easy to continue -C the solution to get results at additional TOUT. This is the interval -C mode of operation. Intermediate results can also be obtained easily -C by using the intermediate-output capability. -C -C The following detailed description is divided into subsections: -C 1. Input required for the first call to DDASSL. -C 2. Output after any return from DDASSL. -C 3. What to do to continue the integration. -C 4. Error messages. -C -C -C -------- INPUT -- WHAT TO DO ON THE FIRST CALL TO DDASSL ------------ -C -C The first call of the code is defined to be the start of each new -C problem. Read through the descriptions of all the following items, -C provide sufficient storage space for designated arrays, set -C appropriate variables for the initialization of the problem, and -C give information about how you want the problem to be solved. -C -C -C RES -- Provide a subroutine of the form -C SUBROUTINE RES(T,Y,YPRIME,DELTA,IRES,RPAR,IPAR) -C to define the system of differential/algebraic -C equations which is to be solved. For the given values -C of T,Y and YPRIME, the subroutine should -C return the residual of the defferential/algebraic -C system -C DELTA = G(T,Y,YPRIME) -C (DELTA(*) is a vector of length NEQ which is -C output for RES.) -C -C Subroutine RES must not alter T,Y or YPRIME. -C You must declare the name RES in an external -C statement in your program that calls DDASSL. -C You must dimension Y,YPRIME and DELTA in RES. -C -C IRES is an integer flag which is always equal to -C zero on input. Subroutine RES should alter IRES -C only if it encounters an illegal value of Y or -C a stop condition. Set IRES = -1 if an input value -C is illegal, and DDASSL will try to solve the problem -C without getting IRES = -1. If IRES = -2, DDASSL -C will return control to the calling program -C with IDID = -11. -C -C RPAR and IPAR are real and integer parameter arrays which -C you can use for communication between your calling program -C and subroutine RES. They are not altered by DDASSL. If you -C do not need RPAR or IPAR, ignore these parameters by treat- -C ing them as dummy arguments. If you do choose to use them, -C dimension them in your calling program and in RES as arrays -C of appropriate length. -C -C NEQ -- Set it to the number of differential equations. -C (NEQ .GE. 1) -C -C T -- Set it to the initial point of the integration. -C T must be defined as a variable. -C -C Y(*) -- Set this vector to the initial values of the NEQ solution -C components at the initial point. You must dimension Y of -C length at least NEQ in your calling program. -C -C YPRIME(*) -- Set this vector to the initial values of the NEQ -C first derivatives of the solution components at the initial -C point. You must dimension YPRIME at least NEQ in your -C calling program. If you do not know initial values of some -C of the solution components, see the explanation of INFO(11). -C -C TOUT -- Set it to the first point at which a solution -C is desired. You can not take TOUT = T. -C integration either forward in T (TOUT .GT. T) or -C backward in T (TOUT .LT. T) is permitted. -C -C The code advances the solution from T to TOUT using -C step sizes which are automatically selected so as to -C achieve the desired accuracy. If you wish, the code will -C return with the solution and its derivative at -C intermediate steps (intermediate-output mode) so that -C you can monitor them, but you still must provide TOUT in -C accord with the basic aim of the code. -C -C The first step taken by the code is a critical one -C because it must reflect how fast the solution changes near -C the initial point. The code automatically selects an -C initial step size which is practically always suitable for -C the problem. By using the fact that the code will not step -C past TOUT in the first step, you could, if necessary, -C restrict the length of the initial step size. -C -C For some problems it may not be permissible to integrate -C past a point TSTOP because a discontinuity occurs there -C or the solution or its derivative is not defined beyond -C TSTOP. When you have declared a TSTOP point (SEE INFO(4) -C and RWORK(1)), you have told the code not to integrate -C past TSTOP. In this case any TOUT beyond TSTOP is invalid -C input. -C -C INFO(*) -- Use the INFO array to give the code more details about -C how you want your problem solved. This array should be -C dimensioned of length 15, though DDASSL uses only the first -C eleven entries. You must respond to all of the following -C items, which are arranged as questions. The simplest use -C of the code corresponds to answering all questions as yes, -C i.e. setting all entries of INFO to 0. -C -C INFO(1) - This parameter enables the code to initialize -C itself. You must set it to indicate the start of every -C new problem. -C -C **** Is this the first call for this problem ... -C Yes - Set INFO(1) = 0 -C No - Not applicable here. -C See below for continuation calls. **** -C -C INFO(2) - How much accuracy you want of your solution -C is specified by the error tolerances RTOL and ATOL. -C The simplest use is to take them both to be scalars. -C To obtain more flexibility, they can both be vectors. -C The code must be told your choice. -C -C **** Are both error tolerances RTOL, ATOL scalars ... -C Yes - Set INFO(2) = 0 -C and input scalars for both RTOL and ATOL -C No - Set INFO(2) = 1 -C and input arrays for both RTOL and ATOL **** -C -C INFO(3) - The code integrates from T in the direction -C of TOUT by steps. If you wish, it will return the -C computed solution and derivative at the next -C intermediate step (the intermediate-output mode) or -C TOUT, whichever comes first. This is a good way to -C proceed if you want to see the behavior of the solution. -C If you must have solutions at a great many specific -C TOUT points, this code will compute them efficiently. -C -C **** Do you want the solution only at -C TOUT (and not at the next intermediate step) ... -C Yes - Set INFO(3) = 0 -C No - Set INFO(3) = 1 **** -C -C INFO(4) - To handle solutions at a great many specific -C values TOUT efficiently, this code may integrate past -C TOUT and interpolate to obtain the result at TOUT. -C Sometimes it is not possible to integrate beyond some -C point TSTOP because the equation changes there or it is -C not defined past TSTOP. Then you must tell the code -C not to go past. -C -C **** Can the integration be carried out without any -C restrictions on the independent variable T ... -C Yes - Set INFO(4)=0 -C No - Set INFO(4)=1 -C and define the stopping point TSTOP by -C setting RWORK(1)=TSTOP **** -C -C INFO(5) - To solve differential/algebraic problems it is -C necessary to use a matrix of partial derivatives of the -C system of differential equations. If you do not -C provide a subroutine to evaluate it analytically (see -C description of the item JAC in the call list), it will -C be approximated by numerical differencing in this code. -C although it is less trouble for you to have the code -C compute partial derivatives by numerical differencing, -C the solution will be more reliable if you provide the -C derivatives via JAC. Sometimes numerical differencing -C is cheaper than evaluating derivatives in JAC and -C sometimes it is not - this depends on your problem. -C -C **** Do you want the code to evaluate the partial -C derivatives automatically by numerical differences ... -C Yes - Set INFO(5)=0 -C No - Set INFO(5)=1 -C and provide subroutine JAC for evaluating the -C matrix of partial derivatives **** -C -C INFO(6) - DDASSL will perform much better if the matrix of -C partial derivatives, DG/DY + CJ*DG/DYPRIME, -C (here CJ is a scalar determined by DDASSL) -C is banded and the code is told this. In this -C case, the storage needed will be greatly reduced, -C numerical differencing will be performed much cheaper, -C and a number of important algorithms will execute much -C faster. The differential equation is said to have -C half-bandwidths ML (lower) and MU (upper) if equation i -C involves only unknowns Y(J) with -C I-ML .LE. J .LE. I+MU -C for all I=1,2,...,NEQ. Thus, ML and MU are the widths -C of the lower and upper parts of the band, respectively, -C with the main diagonal being excluded. If you do not -C indicate that the equation has a banded matrix of partial -C derivatives, the code works with a full matrix of NEQ**2 -C elements (stored in the conventional way). Computations -C with banded matrices cost less time and storage than with -C full matrices if 2*ML+MU .LT. NEQ. If you tell the -C code that the matrix of partial derivatives has a banded -C structure and you want to provide subroutine JAC to -C compute the partial derivatives, then you must be careful -C to store the elements of the matrix in the special form -C indicated in the description of JAC. -C -C **** Do you want to solve the problem using a full -C (dense) matrix (and not a special banded -C structure) ... -C Yes - Set INFO(6)=0 -C No - Set INFO(6)=1 -C and provide the lower (ML) and upper (MU) -C bandwidths by setting -C IWORK(1)=ML -C IWORK(2)=MU **** -C -C -C INFO(7) -- You can specify a maximum (absolute value of) -C stepsize, so that the code -C will avoid passing over very -C large regions. -C -C **** Do you want the code to decide -C on its own maximum stepsize? -C Yes - Set INFO(7)=0 -C No - Set INFO(7)=1 -C and define HMAX by setting -C RWORK(2)=HMAX **** -C -C INFO(8) -- Differential/algebraic problems -C may occaisionally suffer from -C severe scaling difficulties on the -C first step. If you know a great deal -C about the scaling of your problem, you can -C help to alleviate this problem by -C specifying an initial stepsize HO. -C -C **** Do you want the code to define -C its own initial stepsize? -C Yes - Set INFO(8)=0 -C No - Set INFO(8)=1 -C and define HO by setting -C RWORK(3)=HO **** -C -C INFO(9) -- If storage is a severe problem, -C you can save some locations by -C restricting the maximum order MAXORD. -C the default value is 5. for each -C order decrease below 5, the code -C requires NEQ fewer locations, however -C it is likely to be slower. In any -C case, you must have 1 .LE. MAXORD .LE. 5 -C **** Do you want the maximum order to -C default to 5? -C Yes - Set INFO(9)=0 -C No - Set INFO(9)=1 -C and define MAXORD by setting -C IWORK(3)=MAXORD **** -C -C INFO(10) --If you know that the solutions to your equations -C will always be nonnegative, it may help to set this -C parameter. However, it is probably best to -C try the code without using this option first, -C and only to use this option if that doesn't -C work very well. -C **** Do you want the code to solve the problem without -C invoking any special nonnegativity constraints? -C Yes - Set INFO(10)=0 -C No - Set INFO(10)=1 -C -C INFO(11) --DDASSL normally requires the initial T, -C Y, and YPRIME to be consistent. That is, -C you must have G(T,Y,YPRIME) = 0 at the initial -C time. If you do not know the initial -C derivative precisely, you can let DDASSL try -C to compute it. -C **** Are the initialHE INITIAL T, Y, YPRIME consistent? -C Yes - Set INFO(11) = 0 -C No - Set INFO(11) = 1, -C and set YPRIME to an initial approximation -C to YPRIME. (If you have no idea what -C YPRIME should be, set it to zero. Note -C that the initial Y should be such -C that there must exist a YPRIME so that -C G(T,Y,YPRIME) = 0.) -C -C RTOL, ATOL -- You must assign relative (RTOL) and absolute (ATOL -C error tolerances to tell the code how accurately you -C want the solution to be computed. They must be defined -C as variables because the code may change them. You -C have two choices -- -C Both RTOL and ATOL are scalars. (INFO(2)=0) -C Both RTOL and ATOL are vectors. (INFO(2)=1) -C in either case all components must be non-negative. -C -C The tolerances are used by the code in a local error -C test at each step which requires roughly that -C ABS(LOCAL ERROR) .LE. RTOL*ABS(Y)+ATOL -C for each vector component. -C (More specifically, a root-mean-square norm is used to -C measure the size of vectors, and the error test uses the -C magnitude of the solution at the beginning of the step.) -C -C The true (global) error is the difference between the -C true solution of the initial value problem and the -C computed approximation. Practically all present day -C codes, including this one, control the local error at -C each step and do not even attempt to control the global -C error directly. -C Usually, but not always, the true accuracy of the -C computed Y is comparable to the error tolerances. This -C code will usually, but not always, deliver a more -C accurate solution if you reduce the tolerances and -C integrate again. By comparing two such solutions you -C can get a fairly reliable idea of the true error in the -C solution at the bigger tolerances. -C -C Setting ATOL=0. results in a pure relative error test on -C that component. Setting RTOL=0. results in a pure -C absolute error test on that component. A mixed test -C with non-zero RTOL and ATOL corresponds roughly to a -C relative error test when the solution component is much -C bigger than ATOL and to an absolute error test when the -C solution component is smaller than the threshhold ATOL. -C -C The code will not attempt to compute a solution at an -C accuracy unreasonable for the machine being used. It will -C advise you if you ask for too much accuracy and inform -C you as to the maximum accuracy it believes possible. -C -C RWORK(*) -- Dimension this real work array of length LRW in your -C calling program. -C -C LRW -- Set it to the declared length of the RWORK array. -C You must have -C LRW .GE. 40+(MAXORD+4)*NEQ+NEQ**2 -C for the full (dense) JACOBIAN case (when INFO(6)=0), or -C LRW .GE. 40+(MAXORD+4)*NEQ+(2*ML+MU+1)*NEQ -C for the banded user-defined JACOBIAN case -C (when INFO(5)=1 and INFO(6)=1), or -C LRW .GE. 40+(MAXORD+4)*NEQ+(2*ML+MU+1)*NEQ -C +2*(NEQ/(ML+MU+1)+1) -C for the banded finite-difference-generated JACOBIAN case -C (when INFO(5)=0 and INFO(6)=1) -C -C IWORK(*) -- Dimension this integer work array of length LIW in -C your calling program. -C -C LIW -- Set it to the declared length of the IWORK array. -C You must have LIW .GE. 20+NEQ -C -C RPAR, IPAR -- These are parameter arrays, of real and integer -C type, respectively. You can use them for communication -C between your program that calls DDASSL and the -C RES subroutine (and the JAC subroutine). They are not -C altered by DDASSL. If you do not need RPAR or IPAR, -C ignore these parameters by treating them as dummy -C arguments. If you do choose to use them, dimension -C them in your calling program and in RES (and in JAC) -C as arrays of appropriate length. -C -C JAC -- If you have set INFO(5)=0, you can ignore this parameter -C by treating it as a dummy argument. Otherwise, you must -C provide a subroutine of the form -C SUBROUTINE JAC(T,Y,YPRIME,PD,CJ,RPAR,IPAR) -C to define the matrix of partial derivatives -C PD=DG/DY+CJ*DG/DYPRIME -C CJ is a scalar which is input to JAC. -C For the given values of T,Y,YPRIME, the -C subroutine must evaluate the non-zero partial -C derivatives for each equation and each solution -C component, and store these values in the -C matrix PD. The elements of PD are set to zero -C before each call to JAC so only non-zero elements -C need to be defined. -C -C Subroutine JAC must not alter T,Y,(*),YPRIME(*), or CJ. -C You must declare the name JAC in an EXTERNAL statement in -C your program that calls DDASSL. You must dimension Y, -C YPRIME and PD in JAC. -C -C The way you must store the elements into the PD matrix -C depends on the structure of the matrix which you -C indicated by INFO(6). -C *** INFO(6)=0 -- Full (dense) matrix *** -C Give PD a first dimension of NEQ. -C When you evaluate the (non-zero) partial derivative -C of equation I with respect to variable J, you must -C store it in PD according to -C PD(I,J) = "DG(I)/DY(J)+CJ*DG(I)/DYPRIME(J)" -C *** INFO(6)=1 -- Banded JACOBIAN with ML lower and MU -C upper diagonal bands (refer to INFO(6) description -C of ML and MU) *** -C Give PD a first dimension of 2*ML+MU+1. -C when you evaluate the (non-zero) partial derivative -C of equation I with respect to variable J, you must -C store it in PD according to -C IROW = I - J + ML + MU + 1 -C PD(IROW,J) = "DG(I)/DY(J)+CJ*DG(I)/DYPRIME(J)" -C -C RPAR and IPAR are real and integer parameter arrays -C which you can use for communication between your calling -C program and your JACOBIAN subroutine JAC. They are not -C altered by DDASSL. If you do not need RPAR or IPAR, -C ignore these parameters by treating them as dummy -C arguments. If you do choose to use them, dimension -C them in your calling program and in JAC as arrays of -C appropriate length. -C -C -C OPTIONALLY REPLACEABLE NORM ROUTINE: -C -C DDASSL uses a weighted norm DDANRM to measure the size -C of vectors such as the estimated error in each step. -C A FUNCTION subprogram -C DOUBLE PRECISION FUNCTION DDANRM(NEQ,V,WT,RPAR,IPAR) -C DIMENSION V(NEQ),WT(NEQ) -C is used to define this norm. Here, V is the vector -C whose norm is to be computed, and WT is a vector of -C weights. A DDANRM routine has been included with DDASSL -C which computes the weighted root-mean-square norm -C given by -C DDANRM=SQRT((1/NEQ)*SUM(V(I)/WT(I))**2) -C this norm is suitable for most problems. In some -C special cases, it may be more convenient and/or -C efficient to define your own norm by writing a function -C subprogram to be called instead of DDANRM. This should, -C however, be attempted only after careful thought and -C consideration. -C -C -C -------- OUTPUT -- AFTER ANY RETURN FROM DDASSL --------------------- -C -C The principal aim of the code is to return a computed solution at -C TOUT, although it is also possible to obtain intermediate results -C along the way. To find out whether the code achieved its goal -C or if the integration process was interrupted before the task was -C completed, you must check the IDID parameter. -C -C -C T -- The solution was successfully advanced to the -C output value of T. -C -C Y(*) -- Contains the computed solution approximation at T. -C -C YPRIME(*) -- Contains the computed derivative -C approximation at T. -C -C IDID -- Reports what the code did. -C -C *** Task completed *** -C Reported by positive values of IDID -C -C IDID = 1 -- A step was successfully taken in the -C intermediate-output mode. The code has not -C yet reached TOUT. -C -C IDID = 2 -- The integration to TSTOP was successfully -C completed (T=TSTOP) by stepping exactly to TSTOP. -C -C IDID = 3 -- The integration to TOUT was successfully -C completed (T=TOUT) by stepping past TOUT. -C Y(*) is obtained by interpolation. -C YPRIME(*) is obtained by interpolation. -C -C *** Task interrupted *** -C Reported by negative values of IDID -C -C IDID = -1 -- A large amount of work has been expended. -C (About 500 steps) -C -C IDID = -2 -- The error tolerances are too stringent. -C -C IDID = -3 -- The local error test cannot be satisfied -C because you specified a zero component in ATOL -C and the corresponding computed solution -C component is zero. Thus, a pure relative error -C test is impossible for this component. -C -C IDID = -6 -- DDASSL had repeated error test -C failures on the last attempted step. -C -C IDID = -7 -- The corrector could not converge. -C -C IDID = -8 -- The matrix of partial derivatives -C is singular. -C -C IDID = -9 -- The corrector could not converge. -C there were repeated error test failures -C in this step. -C -C IDID =-10 -- The corrector could not converge -C because IRES was equal to minus one. -C -C IDID =-11 -- IRES equal to -2 was encountered -C and control is being returned to the -C calling program. -C -C IDID =-12 -- DDASSL failed to compute the initial -C YPRIME. -C -C -C -C IDID = -13,..,-32 -- Not applicable for this code -C -C *** Task terminated *** -C Reported by the value of IDID=-33 -C -C IDID = -33 -- The code has encountered trouble from which -C it cannot recover. A message is printed -C explaining the trouble and control is returned -C to the calling program. For example, this occurs -C when invalid input is detected. -C -C RTOL, ATOL -- These quantities remain unchanged except when -C IDID = -2. In this case, the error tolerances have been -C increased by the code to values which are estimated to -C be appropriate for continuing the integration. However, -C the reported solution at T was obtained using the input -C values of RTOL and ATOL. -C -C RWORK, IWORK -- Contain information which is usually of no -C interest to the user but necessary for subsequent calls. -C However, you may find use for -C -C RWORK(3)--Which contains the step size H to be -C attempted on the next step. -C -C RWORK(4)--Which contains the current value of the -C independent variable, i.e., the farthest point -C integration has reached. This will be different -C from T only when interpolation has been -C performed (IDID=3). -C -C RWORK(7)--Which contains the stepsize used -C on the last successful step. -C -C IWORK(7)--Which contains the order of the method to -C be attempted on the next step. -C -C IWORK(8)--Which contains the order of the method used -C on the last step. -C -C IWORK(11)--Which contains the number of steps taken so -C far. -C -C IWORK(12)--Which contains the number of calls to RES -C so far. -C -C IWORK(13)--Which contains the number of evaluations of -C the matrix of partial derivatives needed so -C far. -C -C IWORK(14)--Which contains the total number -C of error test failures so far. -C -C IWORK(15)--Which contains the total number -C of convergence test failures so far. -C (includes singular iteration matrix -C failures.) -C -C -C -------- INPUT -- WHAT TO DO TO CONTINUE THE INTEGRATION ------------ -C (CALLS AFTER THE FIRST) -C -C This code is organized so that subsequent calls to continue the -C integration involve little (if any) additional effort on your -C part. You must monitor the IDID parameter in order to determine -C what to do next. -C -C Recalling that the principal task of the code is to integrate -C from T to TOUT (the interval mode), usually all you will need -C to do is specify a new TOUT upon reaching the current TOUT. -C -C Do not alter any quantity not specifically permitted below, -C in particular do not alter NEQ,T,Y(*),YPRIME(*),RWORK(*),IWORK(*) -C or the differential equation in subroutine RES. Any such -C alteration constitutes a new problem and must be treated as such, -C i.e., you must start afresh. -C -C You cannot change from vector to scalar error control or vice -C versa (INFO(2)), but you can change the size of the entries of -C RTOL, ATOL. Increasing a tolerance makes the equation easier -C to integrate. Decreasing a tolerance will make the equation -C harder to integrate and should generally be avoided. -C -C You can switch from the intermediate-output mode to the -C interval mode (INFO(3)) or vice versa at any time. -C -C If it has been necessary to prevent the integration from going -C past a point TSTOP (INFO(4), RWORK(1)), keep in mind that the -C code will not integrate to any TOUT beyond the currently -C specified TSTOP. Once TSTOP has been reached you must change -C the value of TSTOP or set INFO(4)=0. You may change INFO(4) -C or TSTOP at any time but you must supply the value of TSTOP in -C RWORK(1) whenever you set INFO(4)=1. -C -C Do not change INFO(5), INFO(6), IWORK(1), or IWORK(2) -C unless you are going to restart the code. -C -C *** Following a completed task *** -C If -C IDID = 1, call the code again to continue the integration -C another step in the direction of TOUT. -C -C IDID = 2 or 3, define a new TOUT and call the code again. -C TOUT must be different from T. You cannot change -C the direction of integration without restarting. -C -C *** Following an interrupted task *** -C To show the code that you realize the task was -C interrupted and that you want to continue, you -C must take appropriate action and set INFO(1) = 1 -C If -C IDID = -1, The code has taken about 500 steps. -C If you want to continue, set INFO(1) = 1 and -C call the code again. An additional 500 steps -C will be allowed. -C -C IDID = -2, The error tolerances RTOL, ATOL have been -C increased to values the code estimates appropriate -C for continuing. You may want to change them -C yourself. If you are sure you want to continue -C with relaxed error tolerances, set INFO(1)=1 and -C call the code again. -C -C IDID = -3, A solution component is zero and you set the -C corresponding component of ATOL to zero. If you -C are sure you want to continue, you must first -C alter the error criterion to use positive values -C for those components of ATOL corresponding to zero -C solution components, then set INFO(1)=1 and call -C the code again. -C -C IDID = -4,-5 --- Cannot occur with this code. -C -C IDID = -6, Repeated error test failures occurred on the -C last attempted step in DDASSL. A singularity in the -C solution may be present. If you are absolutely -C certain you want to continue, you should restart -C the integration. (Provide initial values of Y and -C YPRIME which are consistent) -C -C IDID = -7, Repeated convergence test failures occurred -C on the last attempted step in DDASSL. An inaccurate -C or ill-conditioned JACOBIAN may be the problem. If -C you are absolutely certain you want to continue, you -C should restart the integration. -C -C IDID = -8, The matrix of partial derivatives is singular. -C Some of your equations may be redundant. -C DDASSL cannot solve the problem as stated. -C It is possible that the redundant equations -C could be removed, and then DDASSL could -C solve the problem. It is also possible -C that a solution to your problem either -C does not exist or is not unique. -C -C IDID = -9, DDASSL had multiple convergence test -C failures, preceeded by multiple error -C test failures, on the last attempted step. -C It is possible that your problem -C is ill-posed, and cannot be solved -C using this code. Or, there may be a -C discontinuity or a singularity in the -C solution. If you are absolutely certain -C you want to continue, you should restart -C the integration. -C -C IDID =-10, DDASSL had multiple convergence test failures -C because IRES was equal to minus one. -C If you are absolutely certain you want -C to continue, you should restart the -C integration. -C -C IDID =-11, IRES=-2 was encountered, and control is being -C returned to the calling program. -C -C IDID =-12, DDASSL failed to compute the initial YPRIME. -C This could happen because the initial -C approximation to YPRIME was not very good, or -C if a YPRIME consistent with the initial Y -C does not exist. The problem could also be caused -C by an inaccurate or singular iteration matrix. -C -C IDID = -13,..,-32 --- Cannot occur with this code. -C -C -C *** Following a terminated task *** -C -C If IDID= -33, you cannot continue the solution of this problem. -C An attempt to do so will result in your -C run being terminated. -C -C -C -------- ERROR MESSAGES --------------------------------------------- -C -C The SLATEC error print routine XERMSG is called in the event of -C unsuccessful completion of a task. Most of these are treated as -C "recoverable errors", which means that (unless the user has directed -C otherwise) control will be returned to the calling program for -C possible action after the message has been printed. -C -C In the event of a negative value of IDID other than -33, an appro- -C priate message is printed and the "error number" printed by XERMSG -C is the value of IDID. There are quite a number of illegal input -C errors that can lead to a returned value IDID=-33. The conditions -C and their printed "error numbers" are as follows: -C -C Error number Condition -C -C 1 Some element of INFO vector is not zero or one. -C 2 NEQ .le. 0 -C 3 MAXORD not in range. -C 4 LRW is less than the required length for RWORK. -C 5 LIW is less than the required length for IWORK. -C 6 Some element of RTOL is .lt. 0 -C 7 Some element of ATOL is .lt. 0 -C 8 All elements of RTOL and ATOL are zero. -C 9 INFO(4)=1 and TSTOP is behind TOUT. -C 10 HMAX .lt. 0.0 -C 11 TOUT is behind T. -C 12 INFO(8)=1 and H0=0.0 -C 13 Some element of WT is .le. 0.0 -C 14 TOUT is too close to T to start integration. -C 15 INFO(4)=1 and TSTOP is behind T. -C 16 --( Not used in this version )-- -C 17 ML illegal. Either .lt. 0 or .gt. NEQ -C 18 MU illegal. Either .lt. 0 or .gt. NEQ -C 19 TOUT = T. -C -C If DDASSL is called again without any action taken to remove the -C cause of an unsuccessful return, XERMSG will be called with a fatal -C error flag, which will cause unconditional termination of the -C program. There are two such fatal errors: -C -C Error number -998: The last step was terminated with a negative -C value of IDID other than -33, and no appropriate action was -C taken. -C -C Error number -999: The previous call was terminated because of -C illegal input (IDID=-33) and there is illegal input in the -C present call, as well. (Suspect infinite loop.) -C -C --------------------------------------------------------------------- -C -C***REFERENCES A DESCRIPTION OF DASSL: A DIFFERENTIAL/ALGEBRAIC -C SYSTEM SOLVER, L. R. PETZOLD, SAND82-8637, -C SANDIA NATIONAL LABORATORIES, SEPTEMBER 1982. -C***ROUTINES CALLED D1MACH, DDAINI, DDANRM, DDASTP, DDATRP, DDAWTS, -C XERMSG -C***REVISION HISTORY (YYMMDD) -C 830315 DATE WRITTEN -C 880387 Code changes made. All common statements have been -C replaced by a DATA statement, which defines pointers into -C RWORK, and PARAMETER statements which define pointers -C into IWORK. As well the documentation has gone through -C grammatical changes. -C 881005 The prologue has been changed to mixed case. -C The subordinate routines had revision dates changed to -C this date, although the documentation for these routines -C is all upper case. No code changes. -C 890511 Code changes made. The DATA statement in the declaration -C section of DDASSL was replaced with a PARAMETER -C statement. Also the statement S = 100.D0 was removed -C from the top of the Newton iteration in DDASTP. -C The subordinate routines had revision dates changed to -C this date. -C 890517 The revision date syntax was replaced with the revision -C history syntax. Also the "DECK" comment was added to -C the top of all subroutines. These changes are consistent -C with new SLATEC guidelines. -C The subordinate routines had revision dates changed to -C this date. No code changes. -C 891013 Code changes made. -C Removed all occurrances of FLOAT or DBLE. All operations -C are now performed with "mixed-mode" arithmetic. -C Also, specific function names were replaced with generic -C function names to be consistent with new SLATEC guidelines. -C In particular: -C Replaced DSQRT with SQRT everywhere. -C Replaced DABS with ABS everywhere. -C Replaced DMIN1 with MIN everywhere. -C Replaced MIN0 with MIN everywhere. -C Replaced DMAX1 with MAX everywhere. -C Replaced MAX0 with MAX everywhere. -C Replaced DSIGN with SIGN everywhere. -C Also replaced REVISION DATE with REVISION HISTORY in all -C subordinate routines. -C 901004 Miscellaneous changes to prologue to complete conversion -C to SLATEC 4.0 format. No code changes. (F.N.Fritsch) -C 901009 Corrected GAMS classification code and converted subsidiary -C routines to 4.0 format. No code changes. (F.N.Fritsch) -C 901010 Converted XERRWV calls to XERMSG calls. (R.Clemens,AFWL) -C 901019 Code changes made. -C Merged SLATEC 4.0 changes with previous changes made -C by C. Ulrich. Below is a history of the changes made by -C C. Ulrich. (Changes in subsidiary routines are implied -C by this history) -C 891228 Bug was found and repaired inside the DDASSL -C and DDAINI routines. DDAINI was incorrectly -C returning the initial T with Y and YPRIME -C computed at T+H. The routine now returns T+H -C rather than the initial T. -C Cosmetic changes made to DDASTP. -C 900904 Three modifications were made to fix a bug (inside -C DDASSL) re interpolation for continuation calls and -C cases where TN is very close to TSTOP: -C -C 1) In testing for whether H is too large, just -C compare H to (TSTOP - TN), rather than -C (TSTOP - TN) * (1-4*UROUND), and set H to -C TSTOP - TN. This will force DDASTP to step -C exactly to TSTOP under certain situations -C (i.e. when H returned from DDASTP would otherwise -C take TN beyond TSTOP). -C -C 2) Inside the DDASTP loop, interpolate exactly to -C TSTOP if TN is very close to TSTOP (rather than -C interpolating to within roundoff of TSTOP). -C -C 3) Modified IDID description for IDID = 2 to say that -C the solution is returned by stepping exactly to -C TSTOP, rather than TOUT. (In some cases the -C solution is actually obtained by extrapolating -C over a distance near unit roundoff to TSTOP, -C but this small distance is deemed acceptable in -C these circumstances.) -C 901026 Added explicit declarations for all variables and minor -C cosmetic changes to prologue, removed unreferenced labels, -C and improved XERMSG calls. (FNF) -C 901030 Added ERROR MESSAGES section and reworked other sections to -C be of more uniform format. (FNF) -C 910624 Fixed minor bug related to HMAX (five lines ending in -C statement 526 in DDASSL). (LRP) -C -C***END PROLOGUE DDASSL -C -C**End -C -C Declare arguments. -C - INTEGER NEQ, INFO(15), IDID, LRW, IWORK(*), LIW, IPAR(*) - DOUBLE PRECISION - * T, Y(*), YPRIME(*), TOUT, RTOL(*), ATOL(*), RWORK(*), - * RPAR(*) - EXTERNAL RES, JAC -C -C Declare externals. -C - EXTERNAL D1MACH, DDAINI, DDANRM, DDASTP, DDATRP, DDAWTS, XERMSG - DOUBLE PRECISION D1MACH, DDANRM -C -C Declare local variables. -C - INTEGER I, ITEMP, LALPHA, LBETA, LCJ, LCJOLD, LCTF, LDELTA, - * LENIW, LENPD, LENRW, LE, LETF, LGAMMA, LH, LHMAX, LHOLD, LIPVT, - * LJCALC, LK, LKOLD, LIWM, LML, LMTYPE, LMU, LMXORD, LNJE, LNPD, - * LNRE, LNS, LNST, LNSTL, LPD, LPHASE, LPHI, LPSI, LROUND, LS, - * LSIGMA, LTN, LTSTOP, LWM, LWT, MBAND, MSAVE, MXORD, NPD, NTEMP, - * NZFLG - DOUBLE PRECISION - * ATOLI, H, HMAX, HMIN, HO, R, RH, RTOLI, TDIST, TN, TNEXT, - * TSTOP, UROUND, YPNORM - LOGICAL DONE -C Auxiliary variables for conversion of values to be included in -C error messages. - CHARACTER*8 XERN1, XERN2 - CHARACTER*16 XERN3, XERN4 -C -C SET POINTERS INTO IWORK - PARAMETER (LML=1, LMU=2, LMXORD=3, LMTYPE=4, LNST=11, - * LNRE=12, LNJE=13, LETF=14, LCTF=15, LNPD=16, - * LIPVT=21, LJCALC=5, LPHASE=6, LK=7, LKOLD=8, - * LNS=9, LNSTL=10, LIWM=1) -C -C SET RELATIVE OFFSET INTO RWORK - PARAMETER (NPD=1) -C -C SET POINTERS INTO RWORK - PARAMETER (LTSTOP=1, LHMAX=2, LH=3, LTN=4, - * LCJ=5, LCJOLD=6, LHOLD=7, LS=8, LROUND=9, - * LALPHA=11, LBETA=17, LGAMMA=23, - * LPSI=29, LSIGMA=35, LDELTA=41) -C -C***FIRST EXECUTABLE STATEMENT DDASSL - IF(INFO(1).NE.0)GO TO 100 -C -C----------------------------------------------------------------------- -C THIS BLOCK IS EXECUTED FOR THE INITIAL CALL ONLY. -C IT CONTAINS CHECKING OF INPUTS AND INITIALIZATIONS. -C----------------------------------------------------------------------- -C -C FIRST CHECK INFO ARRAY TO MAKE SURE ALL ELEMENTS OF INFO -C ARE EITHER ZERO OR ONE. - DO 10 I=2,11 - IF(INFO(I).NE.0.AND.INFO(I).NE.1)GO TO 701 -10 CONTINUE -C - IF(NEQ.LE.0)GO TO 702 -C -C CHECK AND COMPUTE MAXIMUM ORDER - MXORD=5 - IF(INFO(9).EQ.0)GO TO 20 - MXORD=IWORK(LMXORD) - IF(MXORD.LT.1.OR.MXORD.GT.5)GO TO 703 -20 IWORK(LMXORD)=MXORD -C -C COMPUTE MTYPE,LENPD,LENRW.CHECK ML AND MU. - IF(INFO(6).NE.0)GO TO 40 - LENPD=NEQ**2 - LENRW=40+(IWORK(LMXORD)+4)*NEQ+LENPD - IF(INFO(5).NE.0)GO TO 30 - IWORK(LMTYPE)=2 - GO TO 60 -30 IWORK(LMTYPE)=1 - GO TO 60 -40 IF(IWORK(LML).LT.0.OR.IWORK(LML).GE.NEQ)GO TO 717 - IF(IWORK(LMU).LT.0.OR.IWORK(LMU).GE.NEQ)GO TO 718 - LENPD=(2*IWORK(LML)+IWORK(LMU)+1)*NEQ - IF(INFO(5).NE.0)GO TO 50 - IWORK(LMTYPE)=5 - MBAND=IWORK(LML)+IWORK(LMU)+1 - MSAVE=(NEQ/MBAND)+1 - LENRW=40+(IWORK(LMXORD)+4)*NEQ+LENPD+2*MSAVE - GO TO 60 -50 IWORK(LMTYPE)=4 - LENRW=40+(IWORK(LMXORD)+4)*NEQ+LENPD -C -C CHECK LENGTHS OF RWORK AND IWORK -60 LENIW=20+NEQ - IWORK(LNPD)=LENPD - IF(LRW.LT.LENRW)GO TO 704 - IF(LIW.LT.LENIW)GO TO 705 -C -C CHECK TO SEE THAT TOUT IS DIFFERENT FROM T - IF(TOUT .EQ. T)GO TO 719 -C -C CHECK HMAX - IF(INFO(7).EQ.0)GO TO 70 - HMAX=RWORK(LHMAX) - IF(HMAX.LE.0.0D0)GO TO 710 -70 CONTINUE -C -C INITIALIZE COUNTERS - IWORK(LNST)=0 - IWORK(LNRE)=0 - IWORK(LNJE)=0 -C - IWORK(LNSTL)=0 - IDID=1 - GO TO 200 -C -C----------------------------------------------------------------------- -C THIS BLOCK IS FOR CONTINUATION CALLS -C ONLY. HERE WE CHECK INFO(1),AND IF THE -C LAST STEP WAS INTERRUPTED WE CHECK WHETHER -C APPROPRIATE ACTION WAS TAKEN. -C----------------------------------------------------------------------- -C -100 CONTINUE - IF(INFO(1).EQ.1)GO TO 110 - IF(INFO(1).NE.-1)GO TO 701 -C -C IF WE ARE HERE, THE LAST STEP WAS INTERRUPTED -C BY AN ERROR CONDITION FROM DDASTP,AND -C APPROPRIATE ACTION WAS NOT TAKEN. THIS -C IS A FATAL ERROR. - WRITE (XERN1, '(I8)') IDID - CALL XERMSG ('SLATEC', 'DDASSL', - * 'THE LAST STEP TERMINATED WITH A NEGATIVE VALUE OF IDID = ' // - * XERN1 // ' AND NO APPROPRIATE ACTION WAS TAKEN. ' // - * 'RUN TERMINATED', -998, 2) - RETURN -110 CONTINUE - IWORK(LNSTL)=IWORK(LNST) -C -C----------------------------------------------------------------------- -C THIS BLOCK IS EXECUTED ON ALL CALLS. -C THE ERROR TOLERANCE PARAMETERS ARE -C CHECKED, AND THE WORK ARRAY POINTERS -C ARE SET. -C----------------------------------------------------------------------- -C -200 CONTINUE -C CHECK RTOL,ATOL - NZFLG=0 - RTOLI=RTOL(1) - ATOLI=ATOL(1) - DO 210 I=1,NEQ - IF(INFO(2).EQ.1)RTOLI=RTOL(I) - IF(INFO(2).EQ.1)ATOLI=ATOL(I) - IF(RTOLI.GT.0.0D0.OR.ATOLI.GT.0.0D0)NZFLG=1 - IF(RTOLI.LT.0.0D0)GO TO 706 - IF(ATOLI.LT.0.0D0)GO TO 707 -210 CONTINUE - IF(NZFLG.EQ.0)GO TO 708 -C -C SET UP RWORK STORAGE.IWORK STORAGE IS FIXED -C IN DATA STATEMENT. - LE=LDELTA+NEQ - LWT=LE+NEQ - LPHI=LWT+NEQ - LPD=LPHI+(IWORK(LMXORD)+1)*NEQ - LWM=LPD - NTEMP=NPD+IWORK(LNPD) - IF(INFO(1).EQ.1)GO TO 400 -C -C----------------------------------------------------------------------- -C THIS BLOCK IS EXECUTED ON THE INITIAL CALL -C ONLY. SET THE INITIAL STEP SIZE, AND -C THE ERROR WEIGHT VECTOR, AND PHI. -C COMPUTE INITIAL YPRIME, IF NECESSARY. -C----------------------------------------------------------------------- -C - TN=T - IDID=1 -C -C SET ERROR WEIGHT VECTOR WT - CALL DDAWTS(NEQ,INFO(2),RTOL,ATOL,Y,RWORK(LWT),RPAR,IPAR) - DO 305 I = 1,NEQ - IF(RWORK(LWT+I-1).LE.0.0D0) GO TO 713 -305 CONTINUE -C -C COMPUTE UNIT ROUNDOFF AND HMIN - UROUND = D1MACH(4) - RWORK(LROUND) = UROUND - HMIN = 4.0D0*UROUND*MAX(ABS(T),ABS(TOUT)) -C -C CHECK INITIAL INTERVAL TO SEE THAT IT IS LONG ENOUGH - TDIST = ABS(TOUT - T) - IF(TDIST .LT. HMIN) GO TO 714 -C -C CHECK HO, IF THIS WAS INPUT - IF (INFO(8) .EQ. 0) GO TO 310 - HO = RWORK(LH) - IF ((TOUT - T)*HO .LT. 0.0D0) GO TO 711 - IF (HO .EQ. 0.0D0) GO TO 712 - GO TO 320 -310 CONTINUE -C -C COMPUTE INITIAL STEPSIZE, TO BE USED BY EITHER -C DDASTP OR DDAINI, DEPENDING ON INFO(11) - HO = 0.001D0*TDIST - YPNORM = DDANRM(NEQ,YPRIME,RWORK(LWT),RPAR,IPAR) - IF (YPNORM .GT. 0.5D0/HO) HO = 0.5D0/YPNORM - HO = SIGN(HO,TOUT-T) -C ADJUST HO IF NECESSARY TO MEET HMAX BOUND -320 IF (INFO(7) .EQ. 0) GO TO 330 - RH = ABS(HO)/RWORK(LHMAX) - IF (RH .GT. 1.0D0) HO = HO/RH -C COMPUTE TSTOP, IF APPLICABLE -330 IF (INFO(4) .EQ. 0) GO TO 340 - TSTOP = RWORK(LTSTOP) - IF ((TSTOP - T)*HO .LT. 0.0D0) GO TO 715 - IF ((T + HO - TSTOP)*HO .GT. 0.0D0) HO = TSTOP - T - IF ((TSTOP - TOUT)*HO .LT. 0.0D0) GO TO 709 -C -C COMPUTE INITIAL DERIVATIVE, UPDATING TN AND Y, IF APPLICABLE -340 IF (INFO(11) .EQ. 0) GO TO 350 - CALL DDAINI(TN,Y,YPRIME,NEQ, - * RES,JAC,HO,RWORK(LWT),IDID,RPAR,IPAR, - * RWORK(LPHI),RWORK(LDELTA),RWORK(LE), - * RWORK(LWM),IWORK(LIWM),HMIN,RWORK(LROUND), - * INFO(10),NTEMP) - IF (IDID .LT. 0) GO TO 390 -C -C LOAD H WITH HO. STORE H IN RWORK(LH) -350 H = HO - RWORK(LH) = H -C -C LOAD Y AND H*YPRIME INTO PHI(*,1) AND PHI(*,2) - ITEMP = LPHI + NEQ - DO 370 I = 1,NEQ - RWORK(LPHI + I - 1) = Y(I) -370 RWORK(ITEMP + I - 1) = H*YPRIME(I) -C -390 GO TO 500 -C -C------------------------------------------------------- -C THIS BLOCK IS FOR CONTINUATION CALLS ONLY. ITS -C PURPOSE IS TO CHECK STOP CONDITIONS BEFORE -C TAKING A STEP. -C ADJUST H IF NECESSARY TO MEET HMAX BOUND -C------------------------------------------------------- -C -400 CONTINUE - UROUND=RWORK(LROUND) - DONE = .FALSE. - TN=RWORK(LTN) - H=RWORK(LH) - IF(INFO(7) .EQ. 0) GO TO 410 - RH = ABS(H)/RWORK(LHMAX) - IF(RH .GT. 1.0D0) H = H/RH -410 CONTINUE - IF(T .EQ. TOUT) GO TO 719 - IF((T - TOUT)*H .GT. 0.0D0) GO TO 711 - IF(INFO(4) .EQ. 1) GO TO 430 - IF(INFO(3) .EQ. 1) GO TO 420 - IF((TN-TOUT)*H.LT.0.0D0)GO TO 490 - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T=TOUT - IDID = 3 - DONE = .TRUE. - GO TO 490 -420 IF((TN-T)*H .LE. 0.0D0) GO TO 490 - IF((TN - TOUT)*H .GT. 0.0D0) GO TO 425 - CALL DDATRP(TN,TN,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T = TN - IDID = 1 - DONE = .TRUE. - GO TO 490 -425 CONTINUE - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T = TOUT - IDID = 3 - DONE = .TRUE. - GO TO 490 -430 IF(INFO(3) .EQ. 1) GO TO 440 - TSTOP=RWORK(LTSTOP) - IF((TN-TSTOP)*H.GT.0.0D0) GO TO 715 - IF((TSTOP-TOUT)*H.LT.0.0D0)GO TO 709 - IF((TN-TOUT)*H.LT.0.0D0)GO TO 450 - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T=TOUT - IDID = 3 - DONE = .TRUE. - GO TO 490 -440 TSTOP = RWORK(LTSTOP) - IF((TN-TSTOP)*H .GT. 0.0D0) GO TO 715 - IF((TSTOP-TOUT)*H .LT. 0.0D0) GO TO 709 - IF((TN-T)*H .LE. 0.0D0) GO TO 450 - IF((TN - TOUT)*H .GT. 0.0D0) GO TO 445 - CALL DDATRP(TN,TN,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T = TN - IDID = 1 - DONE = .TRUE. - GO TO 490 -445 CONTINUE - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - T = TOUT - IDID = 3 - DONE = .TRUE. - GO TO 490 -450 CONTINUE -C CHECK WHETHER WE ARE WITHIN ROUNDOFF OF TSTOP - IF(ABS(TN-TSTOP).GT.100.0D0*UROUND* - * (ABS(TN)+ABS(H)))GO TO 460 - CALL DDATRP(TN,TSTOP,Y,YPRIME,NEQ,IWORK(LKOLD), - * RWORK(LPHI),RWORK(LPSI)) - IDID=2 - T=TSTOP - DONE = .TRUE. - GO TO 490 -460 TNEXT=TN+H - IF((TNEXT-TSTOP)*H.LE.0.0D0)GO TO 490 - H=TSTOP-TN - RWORK(LH)=H -C -490 IF (DONE) GO TO 580 -C -C------------------------------------------------------- -C THE NEXT BLOCK CONTAINS THE CALL TO THE -C ONE-STEP INTEGRATOR DDASTP. -C THIS IS A LOOPING POINT FOR THE INTEGRATION STEPS. -C CHECK FOR TOO MANY STEPS. -C UPDATE WT. -C CHECK FOR TOO MUCH ACCURACY REQUESTED. -C COMPUTE MINIMUM STEPSIZE. -C------------------------------------------------------- -C -500 CONTINUE -C CHECK FOR FAILURE TO COMPUTE INITIAL YPRIME - IF (IDID .EQ. -12) GO TO 527 -C -C CHECK FOR TOO MANY STEPS - IF((IWORK(LNST)-IWORK(LNSTL)).LT.500) - * GO TO 510 - IDID=-1 - GO TO 527 -C -C UPDATE WT -510 CALL DDAWTS(NEQ,INFO(2),RTOL,ATOL,RWORK(LPHI), - * RWORK(LWT),RPAR,IPAR) - DO 520 I=1,NEQ - IF(RWORK(I+LWT-1).GT.0.0D0)GO TO 520 - IDID=-3 - GO TO 527 -520 CONTINUE -C -C TEST FOR TOO MUCH ACCURACY REQUESTED. - R=DDANRM(NEQ,RWORK(LPHI),RWORK(LWT),RPAR,IPAR)* - * 100.0D0*UROUND - IF(R.LE.1.0D0)GO TO 525 -C MULTIPLY RTOL AND ATOL BY R AND RETURN - IF(INFO(2).EQ.1)GO TO 523 - RTOL(1)=R*RTOL(1) - ATOL(1)=R*ATOL(1) - IDID=-2 - GO TO 527 -523 DO 524 I=1,NEQ - RTOL(I)=R*RTOL(I) -524 ATOL(I)=R*ATOL(I) - IDID=-2 - GO TO 527 -525 CONTINUE -C -C COMPUTE MINIMUM STEPSIZE - HMIN=4.0D0*UROUND*MAX(ABS(TN),ABS(TOUT)) -C -C TEST H VS. HMAX - IF (INFO(7) .EQ. 0) GO TO 526 - RH = ABS(H)/RWORK(LHMAX) - IF (RH .GT. 1.0D0) H = H/RH -526 CONTINUE -C - CALL DDASTP(TN,Y,YPRIME,NEQ, - * RES,JAC,H,RWORK(LWT),INFO(1),IDID,RPAR,IPAR, - * RWORK(LPHI),RWORK(LDELTA),RWORK(LE), - * RWORK(LWM),IWORK(LIWM), - * RWORK(LALPHA),RWORK(LBETA),RWORK(LGAMMA), - * RWORK(LPSI),RWORK(LSIGMA), - * RWORK(LCJ),RWORK(LCJOLD),RWORK(LHOLD), - * RWORK(LS),HMIN,RWORK(LROUND), - * IWORK(LPHASE),IWORK(LJCALC),IWORK(LK), - * IWORK(LKOLD),IWORK(LNS),INFO(10),NTEMP) -527 IF(IDID.LT.0)GO TO 600 -C -C-------------------------------------------------------- -C THIS BLOCK HANDLES THE CASE OF A SUCCESSFUL RETURN -C FROM DDASTP (IDID=1). TEST FOR STOP CONDITIONS. -C-------------------------------------------------------- -C - IF(INFO(4).NE.0)GO TO 540 - IF(INFO(3).NE.0)GO TO 530 - IF((TN-TOUT)*H.LT.0.0D0)GO TO 500 - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - IDID=3 - T=TOUT - GO TO 580 -530 IF((TN-TOUT)*H.GE.0.0D0)GO TO 535 - T=TN - IDID=1 - GO TO 580 -535 CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - IDID=3 - T=TOUT - GO TO 580 -540 IF(INFO(3).NE.0)GO TO 550 - IF((TN-TOUT)*H.LT.0.0D0)GO TO 542 - CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - T=TOUT - IDID=3 - GO TO 580 -542 IF(ABS(TN-TSTOP).LE.100.0D0*UROUND* - * (ABS(TN)+ABS(H)))GO TO 545 - TNEXT=TN+H - IF((TNEXT-TSTOP)*H.LE.0.0D0)GO TO 500 - H=TSTOP-TN - GO TO 500 -545 CALL DDATRP(TN,TSTOP,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - IDID=2 - T=TSTOP - GO TO 580 -550 IF((TN-TOUT)*H.GE.0.0D0)GO TO 555 - IF(ABS(TN-TSTOP).LE.100.0D0*UROUND*(ABS(TN)+ABS(H)))GO TO 552 - T=TN - IDID=1 - GO TO 580 -552 CALL DDATRP(TN,TSTOP,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - IDID=2 - T=TSTOP - GO TO 580 -555 CALL DDATRP(TN,TOUT,Y,YPRIME,NEQ, - * IWORK(LKOLD),RWORK(LPHI),RWORK(LPSI)) - T=TOUT - IDID=3 - GO TO 580 -C -C-------------------------------------------------------- -C ALL SUCCESSFUL RETURNS FROM DDASSL ARE MADE FROM -C THIS BLOCK. -C-------------------------------------------------------- -C -580 CONTINUE - RWORK(LTN)=TN - RWORK(LH)=H - RETURN -C -C----------------------------------------------------------------------- -C THIS BLOCK HANDLES ALL UNSUCCESSFUL -C RETURNS OTHER THAN FOR ILLEGAL INPUT. -C----------------------------------------------------------------------- -C -600 CONTINUE - ITEMP=-IDID - GO TO (610,620,630,690,690,640,650,660,670,675, - * 680,685), ITEMP -C -C THE MAXIMUM NUMBER OF STEPS WAS TAKEN BEFORE -C REACHING TOUT -610 WRITE (XERN3, '(1P,D15.6)') TN - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT CURRENT T = ' // XERN3 // ' 500 STEPS TAKEN ON THIS ' // - * 'CALL BEFORE REACHING TOUT', IDID, 1) - GO TO 690 -C -C TOO MUCH ACCURACY FOR MACHINE PRECISION -620 WRITE (XERN3, '(1P,D15.6)') TN - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' TOO MUCH ACCURACY REQUESTED FOR ' // - * 'PRECISION OF MACHINE. RTOL AND ATOL WERE INCREASED TO ' // - * 'APPROPRIATE VALUES', IDID, 1) - GO TO 690 -C -C WT(I) .LE. 0.0 FOR SOME I (NOT AT START OF PROBLEM) -630 WRITE (XERN3, '(1P,D15.6)') TN - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' SOME ELEMENT OF WT HAS BECOME .LE. ' // - * '0.0', IDID, 1) - GO TO 690 -C -C ERROR TEST FAILED REPEATEDLY OR WITH H=HMIN -640 WRITE (XERN3, '(1P,D15.6)') TN - WRITE (XERN4, '(1P,D15.6)') H - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' AND STEPSIZE H = ' // XERN4 // - * ' THE ERROR TEST FAILED REPEATEDLY OR WITH ABS(H)=HMIN', - * IDID, 1) - GO TO 690 -C -C CORRECTOR CONVERGENCE FAILED REPEATEDLY OR WITH H=HMIN -650 WRITE (XERN3, '(1P,D15.6)') TN - WRITE (XERN4, '(1P,D15.6)') H - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' AND STEPSIZE H = ' // XERN4 // - * ' THE CORRECTOR FAILED TO CONVERGE REPEATEDLY OR WITH ' // - * 'ABS(H)=HMIN', IDID, 1) - GO TO 690 -C -C THE ITERATION MATRIX IS SINGULAR -660 WRITE (XERN3, '(1P,D15.6)') TN - WRITE (XERN4, '(1P,D15.6)') H - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' AND STEPSIZE H = ' // XERN4 // - * ' THE ITERATION MATRIX IS SINGULAR', IDID, 1) - GO TO 690 -C -C CORRECTOR FAILURE PRECEEDED BY ERROR TEST FAILURES. -670 WRITE (XERN3, '(1P,D15.6)') TN - WRITE (XERN4, '(1P,D15.6)') H - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' AND STEPSIZE H = ' // XERN4 // - * ' THE CORRECTOR COULD NOT CONVERGE. ALSO, THE ERROR TEST ' // - * 'FAILED REPEATEDLY.', IDID, 1) - GO TO 690 -C -C CORRECTOR FAILURE BECAUSE IRES = -1 -675 WRITE (XERN3, '(1P,D15.6)') TN - WRITE (XERN4, '(1P,D15.6)') H - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' AND STEPSIZE H = ' // XERN4 // - * ' THE CORRECTOR COULD NOT CONVERGE BECAUSE IRES WAS EQUAL ' // - * 'TO MINUS ONE', IDID, 1) - GO TO 690 -C -C FAILURE BECAUSE IRES = -2 -680 WRITE (XERN3, '(1P,D15.6)') TN - WRITE (XERN4, '(1P,D15.6)') H - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' AND STEPSIZE H = ' // XERN4 // - * ' IRES WAS EQUAL TO MINUS TWO', IDID, 1) - GO TO 690 -C -C FAILED TO COMPUTE INITIAL YPRIME -685 WRITE (XERN3, '(1P,D15.6)') TN - WRITE (XERN4, '(1P,D15.6)') HO - CALL XERMSG ('SLATEC', 'DDASSL', - * 'AT T = ' // XERN3 // ' AND STEPSIZE H = ' // XERN4 // - * ' THE INITIAL YPRIME COULD NOT BE COMPUTED', IDID, 1) - GO TO 690 -C -690 CONTINUE - INFO(1)=-1 - T=TN - RWORK(LTN)=TN - RWORK(LH)=H - RETURN -C -C----------------------------------------------------------------------- -C THIS BLOCK HANDLES ALL ERROR RETURNS DUE -C TO ILLEGAL INPUT, AS DETECTED BEFORE CALLING -C DDASTP. FIRST THE ERROR MESSAGE ROUTINE IS -C CALLED. IF THIS HAPPENS TWICE IN -C SUCCESSION, EXECUTION IS TERMINATED -C -C----------------------------------------------------------------------- -701 CALL XERMSG ('SLATEC', 'DDASSL', - * 'SOME ELEMENT OF INFO VECTOR IS NOT ZERO OR ONE', 1, 1) - GO TO 750 -C -702 WRITE (XERN1, '(I8)') NEQ - CALL XERMSG ('SLATEC', 'DDASSL', - * 'NEQ = ' // XERN1 // ' .LE. 0', 2, 1) - GO TO 750 -C -703 WRITE (XERN1, '(I8)') MXORD - CALL XERMSG ('SLATEC', 'DDASSL', - * 'MAXORD = ' // XERN1 // ' NOT IN RANGE', 3, 1) - GO TO 750 -C -704 WRITE (XERN1, '(I8)') LENRW - WRITE (XERN2, '(I8)') LRW - CALL XERMSG ('SLATEC', 'DDASSL', - * 'RWORK LENGTH NEEDED, LENRW = ' // XERN1 // - * ', EXCEEDS LRW = ' // XERN2, 4, 1) - GO TO 750 -C -705 WRITE (XERN1, '(I8)') LENIW - WRITE (XERN2, '(I8)') LIW - CALL XERMSG ('SLATEC', 'DDASSL', - * 'IWORK LENGTH NEEDED, LENIW = ' // XERN1 // - * ', EXCEEDS LIW = ' // XERN2, 5, 1) - GO TO 750 -C -706 CALL XERMSG ('SLATEC', 'DDASSL', - * 'SOME ELEMENT OF RTOL IS .LT. 0', 6, 1) - GO TO 750 -C -707 CALL XERMSG ('SLATEC', 'DDASSL', - * 'SOME ELEMENT OF ATOL IS .LT. 0', 7, 1) - GO TO 750 -C -708 CALL XERMSG ('SLATEC', 'DDASSL', - * 'ALL ELEMENTS OF RTOL AND ATOL ARE ZERO', 8, 1) - GO TO 750 -C -709 WRITE (XERN3, '(1P,D15.6)') TSTOP - WRITE (XERN4, '(1P,D15.6)') TOUT - CALL XERMSG ('SLATEC', 'DDASSL', - * 'INFO(4) = 1 AND TSTOP = ' // XERN3 // ' BEHIND TOUT = ' // - * XERN4, 9, 1) - GO TO 750 -C -710 WRITE (XERN3, '(1P,D15.6)') HMAX - CALL XERMSG ('SLATEC', 'DDASSL', - * 'HMAX = ' // XERN3 // ' .LT. 0.0', 10, 1) - GO TO 750 -C -711 WRITE (XERN3, '(1P,D15.6)') TOUT - WRITE (XERN4, '(1P,D15.6)') T - CALL XERMSG ('SLATEC', 'DDASSL', - * 'TOUT = ' // XERN3 // ' BEHIND T = ' // XERN4, 11, 1) - GO TO 750 -C -712 CALL XERMSG ('SLATEC', 'DDASSL', - * 'INFO(8)=1 AND H0=0.0', 12, 1) - GO TO 750 -C -713 CALL XERMSG ('SLATEC', 'DDASSL', - * 'SOME ELEMENT OF WT IS .LE. 0.0', 13, 1) - GO TO 750 -C -714 WRITE (XERN3, '(1P,D15.6)') TOUT - WRITE (XERN4, '(1P,D15.6)') T - CALL XERMSG ('SLATEC', 'DDASSL', - * 'TOUT = ' // XERN3 // ' TOO CLOSE TO T = ' // XERN4 // - * ' TO START INTEGRATION', 14, 1) - GO TO 750 -C -715 WRITE (XERN3, '(1P,D15.6)') TSTOP - WRITE (XERN4, '(1P,D15.6)') T - CALL XERMSG ('SLATEC', 'DDASSL', - * 'INFO(4)=1 AND TSTOP = ' // XERN3 // ' BEHIND T = ' // XERN4, - * 15, 1) - GO TO 750 -C -717 WRITE (XERN1, '(I8)') IWORK(LML) - CALL XERMSG ('SLATEC', 'DDASSL', - * 'ML = ' // XERN1 // ' ILLEGAL. EITHER .LT. 0 OR .GT. NEQ', - * 17, 1) - GO TO 750 -C -718 WRITE (XERN1, '(I8)') IWORK(LMU) - CALL XERMSG ('SLATEC', 'DDASSL', - * 'MU = ' // XERN1 // ' ILLEGAL. EITHER .LT. 0 OR .GT. NEQ', - * 18, 1) - GO TO 750 -C -719 WRITE (XERN3, '(1P,D15.6)') TOUT - CALL XERMSG ('SLATEC', 'DDASSL', - * 'TOUT = T = ' // XERN3, 19, 1) - GO TO 750 -C -750 IDID=-33 - IF(INFO(1).EQ.-1) THEN - CALL XERMSG ('SLATEC', 'DDASSL', - * 'REPEATED OCCURRENCES OF ILLEGAL INPUT$$' // - * 'RUN TERMINATED. APPARENT INFINITE LOOP', -999, 2) - ENDIF -C - INFO(1)=-1 - RETURN -C-----------END OF SUBROUTINE DDASSL------------------------------------ - END - SUBROUTINE DDAWTS (NEQ, IWT, RTOL, ATOL, Y, WT, RPAR, IPAR) -C***BEGIN PROLOGUE DDAWTS -C***SUBSIDIARY -C***PURPOSE Set error weight vector for DDASSL. -C***LIBRARY SLATEC (DASSL) -C***TYPE DOUBLE PRECISION (SDAWTS-S, DDAWTS-D) -C***AUTHOR PETZOLD, LINDA R., (LLNL) -C***DESCRIPTION -C----------------------------------------------------------------------- -C THIS SUBROUTINE SETS THE ERROR WEIGHT VECTOR -C WT ACCORDING TO WT(I)=RTOL(I)*ABS(Y(I))+ATOL(I), -C I=1,-,N. -C RTOL AND ATOL ARE SCALARS IF IWT = 0, -C AND VECTORS IF IWT = 1. -C----------------------------------------------------------------------- -C***ROUTINES CALLED (NONE) -C***REVISION HISTORY (YYMMDD) -C 830315 DATE WRITTEN -C 901009 Finished conversion to SLATEC 4.0 format (F.N.Fritsch) -C 901019 Merged changes made by C. Ulrich with SLATEC 4.0 format. -C 901026 Added explicit declarations for all variables and minor -C cosmetic changes to prologue. (FNF) -C***END PROLOGUE DDAWTS -C - INTEGER NEQ, IWT, IPAR(*) - DOUBLE PRECISION RTOL(*), ATOL(*), Y(*), WT(*), RPAR(*) -C - INTEGER I - DOUBLE PRECISION ATOLI, RTOLI -C -C***FIRST EXECUTABLE STATEMENT DDAWTS - RTOLI=RTOL(1) - ATOLI=ATOL(1) - DO 20 I=1,NEQ - IF (IWT .EQ.0) GO TO 10 - RTOLI=RTOL(I) - ATOLI=ATOL(I) -10 WT(I)=RTOLI*ABS(Y(I))+ATOLI -20 CONTINUE - RETURN -C-----------END OF SUBROUTINE DDAWTS------------------------------------ - END - DOUBLE PRECISION FUNCTION DDANRM (NEQ, V, WT, RPAR, IPAR) -C***BEGIN PROLOGUE DDANRM -C***SUBSIDIARY -C***PURPOSE Compute vector norm for DDASSL. -C***LIBRARY SLATEC (DASSL) -C***TYPE DOUBLE PRECISION (SDANRM-S, DDANRM-D) -C***AUTHOR PETZOLD, LINDA R., (LLNL) -C***DESCRIPTION -C----------------------------------------------------------------------- -C THIS FUNCTION ROUTINE COMPUTES THE WEIGHTED -C ROOT-MEAN-SQUARE NORM OF THE VECTOR OF LENGTH -C NEQ CONTAINED IN THE ARRAY V,WITH WEIGHTS -C CONTAINED IN THE ARRAY WT OF LENGTH NEQ. -C DDANRM=SQRT((1/NEQ)*SUM(V(I)/WT(I))**2) -C----------------------------------------------------------------------- -C***ROUTINES CALLED (NONE) -C***REVISION HISTORY (YYMMDD) -C 830315 DATE WRITTEN -C 901009 Finished conversion to SLATEC 4.0 format (F.N.Fritsch) -C 901019 Merged changes made by C. Ulrich with SLATEC 4.0 format. -C 901026 Added explicit declarations for all variables and minor -C cosmetic changes to prologue. (FNF) -C***END PROLOGUE DDANRM -C - INTEGER NEQ, IPAR(*) - DOUBLE PRECISION V(NEQ), WT(NEQ), RPAR(*) -C - INTEGER I - DOUBLE PRECISION SUM, VMAX -C -C***FIRST EXECUTABLE STATEMENT DDANRM - DDANRM = 0.0D0 - VMAX = 0.0D0 - DO 10 I = 1,NEQ - IF(ABS(V(I)/WT(I)) .GT. VMAX) VMAX = ABS(V(I)/WT(I)) -10 CONTINUE - IF(VMAX .LE. 0.0D0) GO TO 30 - SUM = 0.0D0 - DO 20 I = 1,NEQ -20 SUM = SUM + ((V(I)/WT(I))/VMAX)**2 - DDANRM = VMAX*SQRT(SUM/NEQ) -30 CONTINUE - RETURN -C------END OF FUNCTION DDANRM------ - END - SUBROUTINE DDAINI (X, Y, YPRIME, NEQ, RES, JAC, H, WT, IDID, RPAR, - + IPAR, PHI, DELTA, E, WM, IWM, HMIN, UROUND, NONNEG, NTEMP) -C***BEGIN PROLOGUE DDAINI -C***SUBSIDIARY -C***PURPOSE Initialization routine for DDASSL. -C***LIBRARY SLATEC (DASSL) -C***TYPE DOUBLE PRECISION (SDAINI-S, DDAINI-D) -C***AUTHOR PETZOLD, LINDA R., (LLNL) -C***DESCRIPTION -C----------------------------------------------------------------- -C DDAINI TAKES ONE STEP OF SIZE H OR SMALLER -C WITH THE BACKWARD EULER METHOD, TO -C FIND YPRIME. X AND Y ARE UPDATED TO BE CONSISTENT WITH THE -C NEW STEP. A MODIFIED DAMPED NEWTON ITERATION IS USED TO -C SOLVE THE CORRECTOR ITERATION. -C -C THE INITIAL GUESS FOR YPRIME IS USED IN THE -C PREDICTION, AND IN FORMING THE ITERATION -C MATRIX, BUT IS NOT INVOLVED IN THE -C ERROR TEST. THIS MAY HAVE TROUBLE -C CONVERGING IF THE INITIAL GUESS IS NO -C GOOD, OR IF G(X,Y,YPRIME) DEPENDS -C NONLINEARLY ON YPRIME. -C -C THE PARAMETERS REPRESENT: -C X -- INDEPENDENT VARIABLE -C Y -- SOLUTION VECTOR AT X -C YPRIME -- DERIVATIVE OF SOLUTION VECTOR -C NEQ -- NUMBER OF EQUATIONS -C H -- STEPSIZE. IMDER MAY USE A STEPSIZE -C SMALLER THAN H. -C WT -- VECTOR OF WEIGHTS FOR ERROR -C CRITERION -C IDID -- COMPLETION CODE WITH THE FOLLOWING MEANINGS -C IDID= 1 -- YPRIME WAS FOUND SUCCESSFULLY -C IDID=-12 -- DDAINI FAILED TO FIND YPRIME -C RPAR,IPAR -- REAL AND INTEGER PARAMETER ARRAYS -C THAT ARE NOT ALTERED BY DDAINI -C PHI -- WORK SPACE FOR DDAINI -C DELTA,E -- WORK SPACE FOR DDAINI -C WM,IWM -- REAL AND INTEGER ARRAYS STORING -C MATRIX INFORMATION -C -C----------------------------------------------------------------- -C***ROUTINES CALLED DDAJAC, DDANRM, DDASLV -C***REVISION HISTORY (YYMMDD) -C 830315 DATE WRITTEN -C 901009 Finished conversion to SLATEC 4.0 format (F.N.Fritsch) -C 901019 Merged changes made by C. Ulrich with SLATEC 4.0 format. -C 901026 Added explicit declarations for all variables and minor -C cosmetic changes to prologue. (FNF) -C 901030 Minor corrections to declarations. (FNF) -C***END PROLOGUE DDAINI -C - INTEGER NEQ, IDID, IPAR(*), IWM(*), NONNEG, NTEMP - DOUBLE PRECISION - * X, Y(*), YPRIME(*), H, WT(*), RPAR(*), PHI(NEQ,*), DELTA(*), - * E(*), WM(*), HMIN, UROUND - EXTERNAL RES, JAC -C - EXTERNAL DDAJAC, DDANRM, DDASLV - DOUBLE PRECISION DDANRM -C - INTEGER I, IER, IRES, JCALC, LNJE, LNRE, M, MAXIT, MJAC, NCF, - * NEF, NSF - DOUBLE PRECISION - * CJ, DAMP, DELNRM, ERR, OLDNRM, R, RATE, S, XOLD, YNORM - LOGICAL CONVGD -C - PARAMETER (LNRE=12) - PARAMETER (LNJE=13) -C - DATA MAXIT/10/,MJAC/5/ - DATA DAMP/0.75D0/ -C -C -C--------------------------------------------------- -C BLOCK 1. -C INITIALIZATIONS. -C--------------------------------------------------- -C -C***FIRST EXECUTABLE STATEMENT DDAINI - IDID=1 - NEF=0 - NCF=0 - NSF=0 - XOLD=X - YNORM=DDANRM(NEQ,Y,WT,RPAR,IPAR) -C -C SAVE Y AND YPRIME IN PHI - DO 100 I=1,NEQ - PHI(I,1)=Y(I) -100 PHI(I,2)=YPRIME(I) -C -C -C---------------------------------------------------- -C BLOCK 2. -C DO ONE BACKWARD EULER STEP. -C---------------------------------------------------- -C -C SET UP FOR START OF CORRECTOR ITERATION -200 CJ=1.0D0/H - X=X+H -C -C PREDICT SOLUTION AND DERIVATIVE - DO 250 I=1,NEQ -250 Y(I)=Y(I)+H*YPRIME(I) -C - JCALC=-1 - M=0 - CONVGD=.TRUE. -C -C -C CORRECTOR LOOP. -300 IWM(LNRE)=IWM(LNRE)+1 - IRES=0 -C - CALL RES(X,Y,YPRIME,DELTA,IRES,RPAR,IPAR) - IF (IRES.LT.0) GO TO 430 -C -C -C EVALUATE THE ITERATION MATRIX - IF (JCALC.NE.-1) GO TO 310 - IWM(LNJE)=IWM(LNJE)+1 - JCALC=0 - CALL DDAJAC(NEQ,X,Y,YPRIME,DELTA,CJ,H, - * IER,WT,E,WM,IWM,RES,IRES, - * UROUND,JAC,RPAR,IPAR,NTEMP) -C - S=1000000.D0 - IF (IRES.LT.0) GO TO 430 - IF (IER.NE.0) GO TO 430 - NSF=0 -C -C -C -C MULTIPLY RESIDUAL BY DAMPING FACTOR -310 CONTINUE - DO 320 I=1,NEQ -320 DELTA(I)=DELTA(I)*DAMP -C -C COMPUTE A NEW ITERATE (BACK SUBSTITUTION) -C STORE THE CORRECTION IN DELTA -C - CALL DDASLV(NEQ,DELTA,WM,IWM) -C -C UPDATE Y AND YPRIME - DO 330 I=1,NEQ - Y(I)=Y(I)-DELTA(I) -330 YPRIME(I)=YPRIME(I)-CJ*DELTA(I) -C -C TEST FOR CONVERGENCE OF THE ITERATION. -C - DELNRM=DDANRM(NEQ,DELTA,WT,RPAR,IPAR) - IF (DELNRM.LE.100.D0*UROUND*YNORM) - * GO TO 400 -C - IF (M.GT.0) GO TO 340 - OLDNRM=DELNRM - GO TO 350 -C -340 RATE=(DELNRM/OLDNRM)**(1.0D0/M) - IF (RATE.GT.0.90D0) GO TO 430 - S=RATE/(1.0D0-RATE) -C -350 IF (S*DELNRM .LE. 0.33D0) GO TO 400 -C -C -C THE CORRECTOR HAS NOT YET CONVERGED. UPDATE -C M AND AND TEST WHETHER THE MAXIMUM -C NUMBER OF ITERATIONS HAVE BEEN TRIED. -C EVERY MJAC ITERATIONS, GET A NEW -C ITERATION MATRIX. -C - M=M+1 - IF (M.GE.MAXIT) GO TO 430 -C - IF ((M/MJAC)*MJAC.EQ.M) JCALC=-1 - GO TO 300 -C -C -C THE ITERATION HAS CONVERGED. -C CHECK NONNEGATIVITY CONSTRAINTS -400 IF (NONNEG.EQ.0) GO TO 450 - DO 410 I=1,NEQ -410 DELTA(I)=MIN(Y(I),0.0D0) -C - DELNRM=DDANRM(NEQ,DELTA,WT,RPAR,IPAR) - IF (DELNRM.GT.0.33D0) GO TO 430 -C - DO 420 I=1,NEQ - Y(I)=Y(I)-DELTA(I) -420 YPRIME(I)=YPRIME(I)-CJ*DELTA(I) - GO TO 450 -C -C -C EXITS FROM CORRECTOR LOOP. -430 CONVGD=.FALSE. -450 IF (.NOT.CONVGD) GO TO 600 -C -C -C -C----------------------------------------------------- -C BLOCK 3. -C THE CORRECTOR ITERATION CONVERGED. -C DO ERROR TEST. -C----------------------------------------------------- -C - DO 510 I=1,NEQ -510 E(I)=Y(I)-PHI(I,1) - ERR=DDANRM(NEQ,E,WT,RPAR,IPAR) -C - IF (ERR.LE.1.0D0) RETURN -C -C -C -C-------------------------------------------------------- -C BLOCK 4. -C THE BACKWARD EULER STEP FAILED. RESTORE X, Y -C AND YPRIME TO THEIR ORIGINAL VALUES. -C REDUCE STEPSIZE AND TRY AGAIN, IF -C POSSIBLE. -C--------------------------------------------------------- -C -600 CONTINUE - X = XOLD - DO 610 I=1,NEQ - Y(I)=PHI(I,1) -610 YPRIME(I)=PHI(I,2) -C - IF (CONVGD) GO TO 640 - IF (IER.EQ.0) GO TO 620 - NSF=NSF+1 - H=H*0.25D0 - IF (NSF.LT.3.AND.ABS(H).GE.HMIN) GO TO 690 - IDID=-12 - RETURN -620 IF (IRES.GT.-2) GO TO 630 - IDID=-12 - RETURN -630 NCF=NCF+1 - H=H*0.25D0 - IF (NCF.LT.10.AND.ABS(H).GE.HMIN) GO TO 690 - IDID=-12 - RETURN -C -640 NEF=NEF+1 - R=0.90D0/(2.0D0*ERR+0.0001D0) - R=MAX(0.1D0,MIN(0.5D0,R)) - H=H*R - IF (ABS(H).GE.HMIN.AND.NEF.LT.10) GO TO 690 - IDID=-12 - RETURN -690 GO TO 200 -C -C-------------END OF SUBROUTINE DDAINI---------------------- - END - SUBROUTINE DDATRP (X, XOUT, YOUT, YPOUT, NEQ, KOLD, PHI, PSI) -C***BEGIN PROLOGUE DDATRP -C***SUBSIDIARY -C***PURPOSE Interpolation routine for DDASSL. -C***LIBRARY SLATEC (DASSL) -C***TYPE DOUBLE PRECISION (SDATRP-S, DDATRP-D) -C***AUTHOR PETZOLD, LINDA R., (LLNL) -C***DESCRIPTION -C----------------------------------------------------------------------- -C THE METHODS IN SUBROUTINE DDASTP USE POLYNOMIALS -C TO APPROXIMATE THE SOLUTION. DDATRP APPROXIMATES THE -C SOLUTION AND ITS DERIVATIVE AT TIME XOUT BY EVALUATING -C ONE OF THESE POLYNOMIALS,AND ITS DERIVATIVE,THERE. -C INFORMATION DEFINING THIS POLYNOMIAL IS PASSED FROM -C DDASTP, SO DDATRP CANNOT BE USED ALONE. -C -C THE PARAMETERS ARE: -C X THE CURRENT TIME IN THE INTEGRATION. -C XOUT THE TIME AT WHICH THE SOLUTION IS DESIRED -C YOUT THE INTERPOLATED APPROXIMATION TO Y AT XOUT -C (THIS IS OUTPUT) -C YPOUT THE INTERPOLATED APPROXIMATION TO YPRIME AT XOUT -C (THIS IS OUTPUT) -C NEQ NUMBER OF EQUATIONS -C KOLD ORDER USED ON LAST SUCCESSFUL STEP -C PHI ARRAY OF SCALED DIVIDED DIFFERENCES OF Y -C PSI ARRAY OF PAST STEPSIZE HISTORY -C----------------------------------------------------------------------- -C***ROUTINES CALLED (NONE) -C***REVISION HISTORY (YYMMDD) -C 830315 DATE WRITTEN -C 901009 Finished conversion to SLATEC 4.0 format (F.N.Fritsch) -C 901019 Merged changes made by C. Ulrich with SLATEC 4.0 format. -C 901026 Added explicit declarations for all variables and minor -C cosmetic changes to prologue. (FNF) -C***END PROLOGUE DDATRP -C - INTEGER NEQ, KOLD - DOUBLE PRECISION X, XOUT, YOUT(*), YPOUT(*), PHI(NEQ,*), PSI(*) -C - INTEGER I, J, KOLDP1 - DOUBLE PRECISION C, D, GAMMA, TEMP1 -C -C***FIRST EXECUTABLE STATEMENT DDATRP - KOLDP1=KOLD+1 - TEMP1=XOUT-X - DO 10 I=1,NEQ - YOUT(I)=PHI(I,1) -10 YPOUT(I)=0.0D0 - C=1.0D0 - D=0.0D0 - GAMMA=TEMP1/PSI(1) - DO 30 J=2,KOLDP1 - D=D*GAMMA+C/PSI(J-1) - C=C*GAMMA - GAMMA=(TEMP1+PSI(J-1))/PSI(J) - DO 20 I=1,NEQ - YOUT(I)=YOUT(I)+C*PHI(I,J) -20 YPOUT(I)=YPOUT(I)+D*PHI(I,J) -30 CONTINUE - RETURN -C -C------END OF SUBROUTINE DDATRP------ - END - SUBROUTINE DDASTP (X, Y, YPRIME, NEQ, RES, JAC, H, WT, JSTART, - + IDID, RPAR, IPAR, PHI, DELTA, E, WM, IWM, ALPHA, BETA, GAMMA, - + PSI, SIGMA, CJ, CJOLD, HOLD, S, HMIN, UROUND, IPHASE, JCALC, - + K, KOLD, NS, NONNEG, NTEMP) -C***BEGIN PROLOGUE DDASTP -C***SUBSIDIARY -C***PURPOSE Perform one step of the DDASSL integration. -C***LIBRARY SLATEC (DASSL) -C***TYPE DOUBLE PRECISION (SDASTP-S, DDASTP-D) -C***AUTHOR PETZOLD, LINDA R., (LLNL) -C***DESCRIPTION -C----------------------------------------------------------------------- -C DDASTP SOLVES A SYSTEM OF DIFFERENTIAL/ -C ALGEBRAIC EQUATIONS OF THE FORM -C G(X,Y,YPRIME) = 0, FOR ONE STEP (NORMALLY -C FROM X TO X+H). -C -C THE METHODS USED ARE MODIFIED DIVIDED -C DIFFERENCE,FIXED LEADING COEFFICIENT -C FORMS OF BACKWARD DIFFERENTIATION -C FORMULAS. THE CODE ADJUSTS THE STEPSIZE -C AND ORDER TO CONTROL THE LOCAL ERROR PER -C STEP. -C -C -C THE PARAMETERS REPRESENT -C X -- INDEPENDENT VARIABLE -C Y -- SOLUTION VECTOR AT X -C YPRIME -- DERIVATIVE OF SOLUTION VECTOR -C AFTER SUCCESSFUL STEP -C NEQ -- NUMBER OF EQUATIONS TO BE INTEGRATED -C RES -- EXTERNAL USER-SUPPLIED SUBROUTINE -C TO EVALUATE THE RESIDUAL. THE CALL IS -C CALL RES(X,Y,YPRIME,DELTA,IRES,RPAR,IPAR) -C X,Y,YPRIME ARE INPUT. DELTA IS OUTPUT. -C ON INPUT, IRES=0. RES SHOULD ALTER IRES ONLY -C IF IT ENCOUNTERS AN ILLEGAL VALUE OF Y OR A -C STOP CONDITION. SET IRES=-1 IF AN INPUT VALUE -C OF Y IS ILLEGAL, AND DDASTP WILL TRY TO SOLVE -C THE PROBLEM WITHOUT GETTING IRES = -1. IF -C IRES=-2, DDASTP RETURNS CONTROL TO THE CALLING -C PROGRAM WITH IDID = -11. -C JAC -- EXTERNAL USER-SUPPLIED ROUTINE TO EVALUATE -C THE ITERATION MATRIX (THIS IS OPTIONAL) -C THE CALL IS OF THE FORM -C CALL JAC(X,Y,YPRIME,PD,CJ,RPAR,IPAR) -C PD IS THE MATRIX OF PARTIAL DERIVATIVES, -C PD=DG/DY+CJ*DG/DYPRIME -C H -- APPROPRIATE STEP SIZE FOR NEXT STEP. -C NORMALLY DETERMINED BY THE CODE -C WT -- VECTOR OF WEIGHTS FOR ERROR CRITERION. -C JSTART -- INTEGER VARIABLE SET 0 FOR -C FIRST STEP, 1 OTHERWISE. -C IDID -- COMPLETION CODE WITH THE FOLLOWING MEANINGS: -C IDID= 1 -- THE STEP WAS COMPLETED SUCCESSFULLY -C IDID=-6 -- THE ERROR TEST FAILED REPEATEDLY -C IDID=-7 -- THE CORRECTOR COULD NOT CONVERGE -C IDID=-8 -- THE ITERATION MATRIX IS SINGULAR -C IDID=-9 -- THE CORRECTOR COULD NOT CONVERGE. -C THERE WERE REPEATED ERROR TEST -C FAILURES ON THIS STEP. -C IDID=-10-- THE CORRECTOR COULD NOT CONVERGE -C BECAUSE IRES WAS EQUAL TO MINUS ONE -C IDID=-11-- IRES EQUAL TO -2 WAS ENCOUNTERED, -C AND CONTROL IS BEING RETURNED TO -C THE CALLING PROGRAM -C RPAR,IPAR -- REAL AND INTEGER PARAMETER ARRAYS THAT -C ARE USED FOR COMMUNICATION BETWEEN THE -C CALLING PROGRAM AND EXTERNAL USER ROUTINES -C THEY ARE NOT ALTERED BY DDASTP -C PHI -- ARRAY OF DIVIDED DIFFERENCES USED BY -C DDASTP. THE LENGTH IS NEQ*(K+1),WHERE -C K IS THE MAXIMUM ORDER -C DELTA,E -- WORK VECTORS FOR DDASTP OF LENGTH NEQ -C WM,IWM -- REAL AND INTEGER ARRAYS STORING -C MATRIX INFORMATION SUCH AS THE MATRIX -C OF PARTIAL DERIVATIVES,PERMUTATION -C VECTOR,AND VARIOUS OTHER INFORMATION. -C -C THE OTHER PARAMETERS ARE INFORMATION -C WHICH IS NEEDED INTERNALLY BY DDASTP TO -C CONTINUE FROM STEP TO STEP. -C -C----------------------------------------------------------------------- -C***ROUTINES CALLED DDAJAC, DDANRM, DDASLV, DDATRP -C***REVISION HISTORY (YYMMDD) -C 830315 DATE WRITTEN -C 901009 Finished conversion to SLATEC 4.0 format (F.N.Fritsch) -C 901019 Merged changes made by C. Ulrich with SLATEC 4.0 format. -C 901026 Added explicit declarations for all variables and minor -C cosmetic changes to prologue. (FNF) -C***END PROLOGUE DDASTP -C - INTEGER NEQ, JSTART, IDID, IPAR(*), IWM(*), IPHASE, JCALC, K, - * KOLD, NS, NONNEG, NTEMP - DOUBLE PRECISION - * X, Y(*), YPRIME(*), H, WT(*), RPAR(*), PHI(NEQ,*), DELTA(*), - * E(*), WM(*), ALPHA(*), BETA(*), GAMMA(*), PSI(*), SIGMA(*), CJ, - * CJOLD, HOLD, S, HMIN, UROUND - EXTERNAL RES, JAC -C - EXTERNAL DDAJAC, DDANRM, DDASLV, DDATRP - DOUBLE PRECISION DDANRM -C - INTEGER I, IER, IRES, J, J1, KDIFF, KM1, KNEW, KP1, KP2, LCTF, - * LETF, LMXORD, LNJE, LNRE, LNST, M, MAXIT, NCF, NEF, NSF, NSP1 - DOUBLE PRECISION - * ALPHA0, ALPHAS, CJLAST, CK, DELNRM, ENORM, ERK, ERKM1, - * ERKM2, ERKP1, ERR, EST, HNEW, OLDNRM, PNORM, R, RATE, TEMP1, - * TEMP2, TERK, TERKM1, TERKM2, TERKP1, XOLD, XRATE - LOGICAL CONVGD -C - PARAMETER (LMXORD=3) - PARAMETER (LNST=11) - PARAMETER (LNRE=12) - PARAMETER (LNJE=13) - PARAMETER (LETF=14) - PARAMETER (LCTF=15) -C - DATA MAXIT/4/ - DATA XRATE/0.25D0/ -C -C -C -C -C -C----------------------------------------------------------------------- -C BLOCK 1. -C INITIALIZE. ON THE FIRST CALL,SET -C THE ORDER TO 1 AND INITIALIZE -C OTHER VARIABLES. -C----------------------------------------------------------------------- -C -C INITIALIZATIONS FOR ALL CALLS -C***FIRST EXECUTABLE STATEMENT DDASTP - IDID=1 - XOLD=X - NCF=0 - NSF=0 - NEF=0 - IF(JSTART .NE. 0) GO TO 120 -C -C IF THIS IS THE FIRST STEP,PERFORM -C OTHER INITIALIZATIONS - IWM(LETF) = 0 - IWM(LCTF) = 0 - K=1 - KOLD=0 - HOLD=0.0D0 - JSTART=1 - PSI(1)=H - CJOLD = 1.0D0/H - CJ = CJOLD - S = 100.D0 - JCALC = -1 - DELNRM=1.0D0 - IPHASE = 0 - NS=0 -120 CONTINUE -C -C -C -C -C -C----------------------------------------------------------------------- -C BLOCK 2 -C COMPUTE COEFFICIENTS OF FORMULAS FOR -C THIS STEP. -C----------------------------------------------------------------------- -200 CONTINUE - KP1=K+1 - KP2=K+2 - KM1=K-1 - XOLD=X - IF(H.NE.HOLD.OR.K .NE. KOLD) NS = 0 - NS=MIN(NS+1,KOLD+2) - NSP1=NS+1 - IF(KP1 .LT. NS)GO TO 230 -C - BETA(1)=1.0D0 - ALPHA(1)=1.0D0 - TEMP1=H - GAMMA(1)=0.0D0 - SIGMA(1)=1.0D0 - DO 210 I=2,KP1 - TEMP2=PSI(I-1) - PSI(I-1)=TEMP1 - BETA(I)=BETA(I-1)*PSI(I-1)/TEMP2 - TEMP1=TEMP2+H - ALPHA(I)=H/TEMP1 - SIGMA(I)=(I-1)*SIGMA(I-1)*ALPHA(I) - GAMMA(I)=GAMMA(I-1)+ALPHA(I-1)/H -210 CONTINUE - PSI(KP1)=TEMP1 -230 CONTINUE -C -C COMPUTE ALPHAS, ALPHA0 - ALPHAS = 0.0D0 - ALPHA0 = 0.0D0 - DO 240 I = 1,K - ALPHAS = ALPHAS - 1.0D0/I - ALPHA0 = ALPHA0 - ALPHA(I) -240 CONTINUE -C -C COMPUTE LEADING COEFFICIENT CJ - CJLAST = CJ - CJ = -ALPHAS/H -C -C COMPUTE VARIABLE STEPSIZE ERROR COEFFICIENT CK - CK = ABS(ALPHA(KP1) + ALPHAS - ALPHA0) - CK = MAX(CK,ALPHA(KP1)) -C -C DECIDE WHETHER NEW JACOBIAN IS NEEDED - TEMP1 = (1.0D0 - XRATE)/(1.0D0 + XRATE) - TEMP2 = 1.0D0/TEMP1 - IF (CJ/CJOLD .LT. TEMP1 .OR. CJ/CJOLD .GT. TEMP2) JCALC = -1 - IF (CJ .NE. CJLAST) S = 100.D0 -C -C CHANGE PHI TO PHI STAR - IF(KP1 .LT. NSP1) GO TO 280 - DO 270 J=NSP1,KP1 - DO 260 I=1,NEQ -260 PHI(I,J)=BETA(J)*PHI(I,J) -270 CONTINUE -280 CONTINUE -C -C UPDATE TIME - X=X+H -C -C -C -C -C -C----------------------------------------------------------------------- -C BLOCK 3 -C PREDICT THE SOLUTION AND DERIVATIVE, -C AND SOLVE THE CORRECTOR EQUATION -C----------------------------------------------------------------------- -C -C FIRST,PREDICT THE SOLUTION AND DERIVATIVE -300 CONTINUE - DO 310 I=1,NEQ - Y(I)=PHI(I,1) -310 YPRIME(I)=0.0D0 - DO 330 J=2,KP1 - DO 320 I=1,NEQ - Y(I)=Y(I)+PHI(I,J) -320 YPRIME(I)=YPRIME(I)+GAMMA(J)*PHI(I,J) -330 CONTINUE - PNORM = DDANRM (NEQ,Y,WT,RPAR,IPAR) -C -C -C -C SOLVE THE CORRECTOR EQUATION USING A -C MODIFIED NEWTON SCHEME. - CONVGD= .TRUE. - M=0 - IWM(LNRE)=IWM(LNRE)+1 - IRES = 0 - CALL RES(X,Y,YPRIME,DELTA,IRES,RPAR,IPAR) - IF (IRES .LT. 0) GO TO 380 -C -C -C IF INDICATED,REEVALUATE THE -C ITERATION MATRIX PD = DG/DY + CJ*DG/DYPRIME -C (WHERE G(X,Y,YPRIME)=0). SET -C JCALC TO 0 AS AN INDICATOR THAT -C THIS HAS BEEN DONE. - IF(JCALC .NE. -1)GO TO 340 - IWM(LNJE)=IWM(LNJE)+1 - JCALC=0 - CALL DDAJAC(NEQ,X,Y,YPRIME,DELTA,CJ,H, - * IER,WT,E,WM,IWM,RES,IRES,UROUND,JAC,RPAR, - * IPAR,NTEMP) - CJOLD=CJ - S = 100.D0 - IF (IRES .LT. 0) GO TO 380 - IF(IER .NE. 0)GO TO 380 - NSF=0 -C -C -C INITIALIZE THE ERROR ACCUMULATION VECTOR E. -340 CONTINUE - DO 345 I=1,NEQ -345 E(I)=0.0D0 -C -C -C CORRECTOR LOOP. -350 CONTINUE -C -C MULTIPLY RESIDUAL BY TEMP1 TO ACCELERATE CONVERGENCE - TEMP1 = 2.0D0/(1.0D0 + CJ/CJOLD) - DO 355 I = 1,NEQ -355 DELTA(I) = DELTA(I) * TEMP1 -C -C COMPUTE A NEW ITERATE (BACK-SUBSTITUTION). -C STORE THE CORRECTION IN DELTA. - CALL DDASLV(NEQ,DELTA,WM,IWM) -C -C UPDATE Y,E,AND YPRIME - DO 360 I=1,NEQ - Y(I)=Y(I)-DELTA(I) - E(I)=E(I)-DELTA(I) -360 YPRIME(I)=YPRIME(I)-CJ*DELTA(I) -C -C TEST FOR CONVERGENCE OF THE ITERATION - DELNRM=DDANRM(NEQ,DELTA,WT,RPAR,IPAR) - IF (DELNRM .LE. 100.D0*UROUND*PNORM) GO TO 375 - IF (M .GT. 0) GO TO 365 - OLDNRM = DELNRM - GO TO 367 -365 RATE = (DELNRM/OLDNRM)**(1.0D0/M) - IF (RATE .GT. 0.90D0) GO TO 370 - S = RATE/(1.0D0 - RATE) -367 IF (S*DELNRM .LE. 0.33D0) GO TO 375 -C -C THE CORRECTOR HAS NOT YET CONVERGED. -C UPDATE M AND TEST WHETHER THE -C MAXIMUM NUMBER OF ITERATIONS HAVE -C BEEN TRIED. - M=M+1 - IF(M.GE.MAXIT)GO TO 370 -C -C EVALUATE THE RESIDUAL -C AND GO BACK TO DO ANOTHER ITERATION - IWM(LNRE)=IWM(LNRE)+1 - IRES = 0 - CALL RES(X,Y,YPRIME,DELTA,IRES, - * RPAR,IPAR) - IF (IRES .LT. 0) GO TO 380 - GO TO 350 -C -C -C THE CORRECTOR FAILED TO CONVERGE IN MAXIT -C ITERATIONS. IF THE ITERATION MATRIX -C IS NOT CURRENT,RE-DO THE STEP WITH -C A NEW ITERATION MATRIX. -370 CONTINUE - IF(JCALC.EQ.0)GO TO 380 - JCALC=-1 - GO TO 300 -C -C -C THE ITERATION HAS CONVERGED. IF NONNEGATIVITY OF SOLUTION IS -C REQUIRED, SET THE SOLUTION NONNEGATIVE, IF THE PERTURBATION -C TO DO IT IS SMALL ENOUGH. IF THE CHANGE IS TOO LARGE, THEN -C CONSIDER THE CORRECTOR ITERATION TO HAVE FAILED. -375 IF(NONNEG .EQ. 0) GO TO 390 - DO 377 I = 1,NEQ -377 DELTA(I) = MIN(Y(I),0.0D0) - DELNRM = DDANRM(NEQ,DELTA,WT,RPAR,IPAR) - IF(DELNRM .GT. 0.33D0) GO TO 380 - DO 378 I = 1,NEQ -378 E(I) = E(I) - DELTA(I) - GO TO 390 -C -C -C EXITS FROM BLOCK 3 -C NO CONVERGENCE WITH CURRENT ITERATION -C MATRIX,OR SINGULAR ITERATION MATRIX -380 CONVGD= .FALSE. -390 JCALC = 1 - IF(.NOT.CONVGD)GO TO 600 -C -C -C -C -C -C----------------------------------------------------------------------- -C BLOCK 4 -C ESTIMATE THE ERRORS AT ORDERS K,K-1,K-2 -C AS IF CONSTANT STEPSIZE WAS USED. ESTIMATE -C THE LOCAL ERROR AT ORDER K AND TEST -C WHETHER THE CURRENT STEP IS SUCCESSFUL. -C----------------------------------------------------------------------- -C -C ESTIMATE ERRORS AT ORDERS K,K-1,K-2 - ENORM = DDANRM(NEQ,E,WT,RPAR,IPAR) - ERK = SIGMA(K+1)*ENORM - TERK = (K+1)*ERK - EST = ERK - KNEW=K - IF(K .EQ. 1)GO TO 430 - DO 405 I = 1,NEQ -405 DELTA(I) = PHI(I,KP1) + E(I) - ERKM1=SIGMA(K)*DDANRM(NEQ,DELTA,WT,RPAR,IPAR) - TERKM1 = K*ERKM1 - IF(K .GT. 2)GO TO 410 - IF(TERKM1 .LE. 0.5D0*TERK)GO TO 420 - GO TO 430 -410 CONTINUE - DO 415 I = 1,NEQ -415 DELTA(I) = PHI(I,K) + DELTA(I) - ERKM2=SIGMA(K-1)*DDANRM(NEQ,DELTA,WT,RPAR,IPAR) - TERKM2 = (K-1)*ERKM2 - IF(MAX(TERKM1,TERKM2).GT.TERK)GO TO 430 -C LOWER THE ORDER -420 CONTINUE - KNEW=K-1 - EST = ERKM1 -C -C -C CALCULATE THE LOCAL ERROR FOR THE CURRENT STEP -C TO SEE IF THE STEP WAS SUCCESSFUL -430 CONTINUE - ERR = CK * ENORM - IF(ERR .GT. 1.0D0)GO TO 600 -C -C -C -C -C -C----------------------------------------------------------------------- -C BLOCK 5 -C THE STEP IS SUCCESSFUL. DETERMINE -C THE BEST ORDER AND STEPSIZE FOR -C THE NEXT STEP. UPDATE THE DIFFERENCES -C FOR THE NEXT STEP. -C----------------------------------------------------------------------- - IDID=1 - IWM(LNST)=IWM(LNST)+1 - KDIFF=K-KOLD - KOLD=K - HOLD=H -C -C -C ESTIMATE THE ERROR AT ORDER K+1 UNLESS: -C ALREADY DECIDED TO LOWER ORDER, OR -C ALREADY USING MAXIMUM ORDER, OR -C STEPSIZE NOT CONSTANT, OR -C ORDER RAISED IN PREVIOUS STEP - IF(KNEW.EQ.KM1.OR.K.EQ.IWM(LMXORD))IPHASE=1 - IF(IPHASE .EQ. 0)GO TO 545 - IF(KNEW.EQ.KM1)GO TO 540 - IF(K.EQ.IWM(LMXORD)) GO TO 550 - IF(KP1.GE.NS.OR.KDIFF.EQ.1)GO TO 550 - DO 510 I=1,NEQ -510 DELTA(I)=E(I)-PHI(I,KP2) - ERKP1 = (1.0D0/(K+2))*DDANRM(NEQ,DELTA,WT,RPAR,IPAR) - TERKP1 = (K+2)*ERKP1 - IF(K.GT.1)GO TO 520 - IF(TERKP1.GE.0.5D0*TERK)GO TO 550 - GO TO 530 -520 IF(TERKM1.LE.MIN(TERK,TERKP1))GO TO 540 - IF(TERKP1.GE.TERK.OR.K.EQ.IWM(LMXORD))GO TO 550 -C -C RAISE ORDER -530 K=KP1 - EST = ERKP1 - GO TO 550 -C -C LOWER ORDER -540 K=KM1 - EST = ERKM1 - GO TO 550 -C -C IF IPHASE = 0, INCREASE ORDER BY ONE AND MULTIPLY STEPSIZE BY -C FACTOR TWO -545 K = KP1 - HNEW = H*2.0D0 - H = HNEW - GO TO 575 -C -C -C DETERMINE THE APPROPRIATE STEPSIZE FOR -C THE NEXT STEP. -550 HNEW=H - TEMP2=K+1 - R=(2.0D0*EST+0.0001D0)**(-1.0D0/TEMP2) - IF(R .LT. 2.0D0) GO TO 555 - HNEW = 2.0D0*H - GO TO 560 -555 IF(R .GT. 1.0D0) GO TO 560 - R = MAX(0.5D0,MIN(0.9D0,R)) - HNEW = H*R -560 H=HNEW -C -C -C UPDATE DIFFERENCES FOR NEXT STEP -575 CONTINUE - IF(KOLD.EQ.IWM(LMXORD))GO TO 585 - DO 580 I=1,NEQ -580 PHI(I,KP2)=E(I) -585 CONTINUE - DO 590 I=1,NEQ -590 PHI(I,KP1)=PHI(I,KP1)+E(I) - DO 595 J1=2,KP1 - J=KP1-J1+1 - DO 595 I=1,NEQ -595 PHI(I,J)=PHI(I,J)+PHI(I,J+1) - RETURN -C -C -C -C -C -C----------------------------------------------------------------------- -C BLOCK 6 -C THE STEP IS UNSUCCESSFUL. RESTORE X,PSI,PHI -C DETERMINE APPROPRIATE STEPSIZE FOR -C CONTINUING THE INTEGRATION, OR EXIT WITH -C AN ERROR FLAG IF THERE HAVE BEEN MANY -C FAILURES. -C----------------------------------------------------------------------- -600 IPHASE = 1 -C -C RESTORE X,PHI,PSI - X=XOLD - IF(KP1.LT.NSP1)GO TO 630 - DO 620 J=NSP1,KP1 - TEMP1=1.0D0/BETA(J) - DO 610 I=1,NEQ -610 PHI(I,J)=TEMP1*PHI(I,J) -620 CONTINUE -630 CONTINUE - DO 640 I=2,KP1 -640 PSI(I-1)=PSI(I)-H -C -C -C TEST WHETHER FAILURE IS DUE TO CORRECTOR ITERATION -C OR ERROR TEST - IF(CONVGD)GO TO 660 - IWM(LCTF)=IWM(LCTF)+1 -C -C -C THE NEWTON ITERATION FAILED TO CONVERGE WITH -C A CURRENT ITERATION MATRIX. DETERMINE THE CAUSE -C OF THE FAILURE AND TAKE APPROPRIATE ACTION. - IF(IER.EQ.0)GO TO 650 -C -C THE ITERATION MATRIX IS SINGULAR. REDUCE -C THE STEPSIZE BY A FACTOR OF 4. IF -C THIS HAPPENS THREE TIMES IN A ROW ON -C THE SAME STEP, RETURN WITH AN ERROR FLAG - NSF=NSF+1 - R = 0.25D0 - H=H*R - IF (NSF .LT. 3 .AND. ABS(H) .GE. HMIN) GO TO 690 - IDID=-8 - GO TO 675 -C -C -C THE NEWTON ITERATION FAILED TO CONVERGE FOR A REASON -C OTHER THAN A SINGULAR ITERATION MATRIX. IF IRES = -2, THEN -C RETURN. OTHERWISE, REDUCE THE STEPSIZE AND TRY AGAIN, UNLESS -C TOO MANY FAILURES HAVE OCCURED. -650 CONTINUE - IF (IRES .GT. -2) GO TO 655 - IDID = -11 - GO TO 675 -655 NCF = NCF + 1 - R = 0.25D0 - H = H*R - IF (NCF .LT. 10 .AND. ABS(H) .GE. HMIN) GO TO 690 - IDID = -7 - IF (IRES .LT. 0) IDID = -10 - IF (NEF .GE. 3) IDID = -9 - GO TO 675 -C -C -C THE NEWTON SCHEME CONVERGED,AND THE CAUSE -C OF THE FAILURE WAS THE ERROR ESTIMATE -C EXCEEDING THE TOLERANCE. -660 NEF=NEF+1 - IWM(LETF)=IWM(LETF)+1 - IF (NEF .GT. 1) GO TO 665 -C -C ON FIRST ERROR TEST FAILURE, KEEP CURRENT ORDER OR LOWER -C ORDER BY ONE. COMPUTE NEW STEPSIZE BASED ON DIFFERENCES -C OF THE SOLUTION. - K = KNEW - TEMP2 = K + 1 - R = 0.90D0*(2.0D0*EST+0.0001D0)**(-1.0D0/TEMP2) - R = MAX(0.25D0,MIN(0.9D0,R)) - H = H*R - IF (ABS(H) .GE. HMIN) GO TO 690 - IDID = -6 - GO TO 675 -C -C ON SECOND ERROR TEST FAILURE, USE THE CURRENT ORDER OR -C DECREASE ORDER BY ONE. REDUCE THE STEPSIZE BY A FACTOR OF -C FOUR. -665 IF (NEF .GT. 2) GO TO 670 - K = KNEW - H = 0.25D0*H - IF (ABS(H) .GE. HMIN) GO TO 690 - IDID = -6 - GO TO 675 -C -C ON THIRD AND SUBSEQUENT ERROR TEST FAILURES, SET THE ORDER TO -C ONE AND REDUCE THE STEPSIZE BY A FACTOR OF FOUR. -670 K = 1 - H = 0.25D0*H - IF (ABS(H) .GE. HMIN) GO TO 690 - IDID = -6 - GO TO 675 -C -C -C -C -C FOR ALL CRASHES, RESTORE Y TO ITS LAST VALUE, -C INTERPOLATE TO FIND YPRIME AT LAST X, AND RETURN -675 CONTINUE - CALL DDATRP(X,X,Y,YPRIME,NEQ,K,PHI,PSI) - RETURN -C -C -C GO BACK AND TRY THIS STEP AGAIN -690 GO TO 200 -C -C------END OF SUBROUTINE DDASTP------ - END - SUBROUTINE DDAJAC (NEQ, X, Y, YPRIME, DELTA, CJ, H, - + IER, WT, E, WM, IWM, RES, IRES, UROUND, JAC, RPAR, - + IPAR, NTEMP) -C***BEGIN PROLOGUE DDAJAC -C***SUBSIDIARY -C***PURPOSE Compute the iteration matrix for DDASSL and form the -C LU-decomposition. -C***LIBRARY SLATEC (DASSL) -C***TYPE DOUBLE PRECISION (SDAJAC-S, DDAJAC-D) -C***AUTHOR PETZOLD, LINDA R., (LLNL) -C***DESCRIPTION -C----------------------------------------------------------------------- -C THIS ROUTINE COMPUTES THE ITERATION MATRIX -C PD=DG/DY+CJ*DG/DYPRIME (WHERE G(X,Y,YPRIME)=0). -C HERE PD IS COMPUTED BY THE USER-SUPPLIED -C ROUTINE JAC IF IWM(MTYPE) IS 1 OR 4, AND -C IT IS COMPUTED BY NUMERICAL FINITE DIFFERENCING -C IF IWM(MTYPE)IS 2 OR 5 -C THE PARAMETERS HAVE THE FOLLOWING MEANINGS. -C Y = ARRAY CONTAINING PREDICTED VALUES -C YPRIME = ARRAY CONTAINING PREDICTED DERIVATIVES -C DELTA = RESIDUAL EVALUATED AT (X,Y,YPRIME) -C (USED ONLY IF IWM(MTYPE)=2 OR 5) -C CJ = SCALAR PARAMETER DEFINING ITERATION MATRIX -C H = CURRENT STEPSIZE IN INTEGRATION -C IER = VARIABLE WHICH IS .NE. 0 -C IF ITERATION MATRIX IS SINGULAR, -C AND 0 OTHERWISE. -C WT = VECTOR OF WEIGHTS FOR COMPUTING NORMS -C E = WORK SPACE (TEMPORARY) OF LENGTH NEQ -C WM = REAL WORK SPACE FOR MATRICES. ON -C OUTPUT IT CONTAINS THE LU DECOMPOSITION -C OF THE ITERATION MATRIX. -C IWM = INTEGER WORK SPACE CONTAINING -C MATRIX INFORMATION -C RES = NAME OF THE EXTERNAL USER-SUPPLIED ROUTINE -C TO EVALUATE THE RESIDUAL FUNCTION G(X,Y,YPRIME) -C IRES = FLAG WHICH IS EQUAL TO ZERO IF NO ILLEGAL VALUES -C IN RES, AND LESS THAN ZERO OTHERWISE. (IF IRES -C IS LESS THAN ZERO, THE MATRIX WAS NOT COMPLETED) -C IN THIS CASE (IF IRES .LT. 0), THEN IER = 0. -C UROUND = THE UNIT ROUNDOFF ERROR OF THE MACHINE BEING USED. -C JAC = NAME OF THE EXTERNAL USER-SUPPLIED ROUTINE -C TO EVALUATE THE ITERATION MATRIX (THIS ROUTINE -C IS ONLY USED IF IWM(MTYPE) IS 1 OR 4) -C----------------------------------------------------------------------- -C***ROUTINES CALLED DGBFA, DGEFA -C***REVISION HISTORY (YYMMDD) -C 830315 DATE WRITTEN -C 901009 Finished conversion to SLATEC 4.0 format (F.N.Fritsch) -C 901010 Modified three MAX calls to be all on one line. (FNF) -C 901019 Merged changes made by C. Ulrich with SLATEC 4.0 format. -C 901026 Added explicit declarations for all variables and minor -C cosmetic changes to prologue. (FNF) -C 901101 Corrected PURPOSE. (FNF) -C***END PROLOGUE DDAJAC -C - INTEGER NEQ, IER, IWM(*), IRES, IPAR(*), NTEMP - DOUBLE PRECISION - * X, Y(*), YPRIME(*), DELTA(*), CJ, H, WT(*), E(*), WM(*), - * UROUND, RPAR(*) - EXTERNAL RES, JAC -C - EXTERNAL DGBFA, DGEFA -C - INTEGER I, I1, I2, II, IPSAVE, ISAVE, J, K, L, LENPD, LIPVT, - * LML, LMTYPE, LMU, MBA, MBAND, MEB1, MEBAND, MSAVE, MTYPE, N, - * NPD, NPDM1, NROW - DOUBLE PRECISION DEL, DELINV, SQUR, YPSAVE, YSAVE -C - PARAMETER (NPD=1) - PARAMETER (LML=1) - PARAMETER (LMU=2) - PARAMETER (LMTYPE=4) - PARAMETER (LIPVT=21) -C -C***FIRST EXECUTABLE STATEMENT DDAJAC - IER = 0 - NPDM1=NPD-1 - MTYPE=IWM(LMTYPE) - GO TO (100,200,300,400,500),MTYPE -C -C -C DENSE USER-SUPPLIED MATRIX -100 LENPD=NEQ*NEQ - DO 110 I=1,LENPD -110 WM(NPDM1+I)=0.0D0 - CALL JAC(X,Y,YPRIME,WM(NPD),CJ,RPAR,IPAR) - GO TO 230 -C -C -C DENSE FINITE-DIFFERENCE-GENERATED MATRIX -200 IRES=0 - NROW=NPDM1 - SQUR = SQRT(UROUND) - DO 210 I=1,NEQ - DEL=SQUR*MAX(ABS(Y(I)),ABS(H*YPRIME(I)),ABS(WT(I))) - DEL=SIGN(DEL,H*YPRIME(I)) - DEL=(Y(I)+DEL)-Y(I) - YSAVE=Y(I) - YPSAVE=YPRIME(I) - Y(I)=Y(I)+DEL - YPRIME(I)=YPRIME(I)+CJ*DEL - CALL RES(X,Y,YPRIME,E,IRES,RPAR,IPAR) - IF (IRES .LT. 0) RETURN - DELINV=1.0D0/DEL - DO 220 L=1,NEQ -220 WM(NROW+L)=(E(L)-DELTA(L))*DELINV - NROW=NROW+NEQ - Y(I)=YSAVE - YPRIME(I)=YPSAVE -210 CONTINUE -C -C -C DO DENSE-MATRIX LU DECOMPOSITION ON PD -230 CALL DGEFA(WM(NPD),NEQ,NEQ,IWM(LIPVT),IER) - RETURN -C -C -C DUMMY SECTION FOR IWM(MTYPE)=3 -300 RETURN -C -C -C BANDED USER-SUPPLIED MATRIX -400 LENPD=(2*IWM(LML)+IWM(LMU)+1)*NEQ - DO 410 I=1,LENPD -410 WM(NPDM1+I)=0.0D0 - CALL JAC(X,Y,YPRIME,WM(NPD),CJ,RPAR,IPAR) - MEBAND=2*IWM(LML)+IWM(LMU)+1 - GO TO 550 -C -C -C BANDED FINITE-DIFFERENCE-GENERATED MATRIX -500 MBAND=IWM(LML)+IWM(LMU)+1 - MBA=MIN(MBAND,NEQ) - MEBAND=MBAND+IWM(LML) - MEB1=MEBAND-1 - MSAVE=(NEQ/MBAND)+1 - ISAVE=NTEMP-1 - IPSAVE=ISAVE+MSAVE - IRES=0 - SQUR=SQRT(UROUND) - DO 540 J=1,MBA - DO 510 N=J,NEQ,MBAND - K= (N-J)/MBAND + 1 - WM(ISAVE+K)=Y(N) - WM(IPSAVE+K)=YPRIME(N) - DEL=SQUR*MAX(ABS(Y(N)),ABS(H*YPRIME(N)),ABS(WT(N))) - DEL=SIGN(DEL,H*YPRIME(N)) - DEL=(Y(N)+DEL)-Y(N) - Y(N)=Y(N)+DEL -510 YPRIME(N)=YPRIME(N)+CJ*DEL - CALL RES(X,Y,YPRIME,E,IRES,RPAR,IPAR) - IF (IRES .LT. 0) RETURN - DO 530 N=J,NEQ,MBAND - K= (N-J)/MBAND + 1 - Y(N)=WM(ISAVE+K) - YPRIME(N)=WM(IPSAVE+K) - DEL=SQUR*MAX(ABS(Y(N)),ABS(H*YPRIME(N)),ABS(WT(N))) - DEL=SIGN(DEL,H*YPRIME(N)) - DEL=(Y(N)+DEL)-Y(N) - DELINV=1.0D0/DEL - I1=MAX(1,(N-IWM(LMU))) - I2=MIN(NEQ,(N+IWM(LML))) - II=N*MEB1-IWM(LML)+NPDM1 - DO 520 I=I1,I2 -520 WM(II+I)=(E(I)-DELTA(I))*DELINV -530 CONTINUE -540 CONTINUE -C -C -C DO LU DECOMPOSITION OF BANDED PD -550 CALL DGBFA(WM(NPD),MEBAND,NEQ, - * IWM(LML),IWM(LMU),IWM(LIPVT),IER) - RETURN -C------END OF SUBROUTINE DDAJAC------ - END - SUBROUTINE DDASLV (NEQ, DELTA, WM, IWM) -C***BEGIN PROLOGUE DDASLV -C***SUBSIDIARY -C***PURPOSE Linear system solver for DDASSL. -C***LIBRARY SLATEC (DASSL) -C***TYPE DOUBLE PRECISION (SDASLV-S, DDASLV-D) -C***AUTHOR PETZOLD, LINDA R., (LLNL) -C***DESCRIPTION -C----------------------------------------------------------------------- -C THIS ROUTINE MANAGES THE SOLUTION OF THE LINEAR -C SYSTEM ARISING IN THE NEWTON ITERATION. -C MATRICES AND REAL TEMPORARY STORAGE AND -C REAL INFORMATION ARE STORED IN THE ARRAY WM. -C INTEGER MATRIX INFORMATION IS STORED IN -C THE ARRAY IWM. -C FOR A DENSE MATRIX, THE LINPACK ROUTINE -C DGESL IS CALLED. -C FOR A BANDED MATRIX,THE LINPACK ROUTINE -C DGBSL IS CALLED. -C----------------------------------------------------------------------- -C***ROUTINES CALLED DGBSL, DGESL -C***REVISION HISTORY (YYMMDD) -C 830315 DATE WRITTEN -C 901009 Finished conversion to SLATEC 4.0 format (F.N.Fritsch) -C 901019 Merged changes made by C. Ulrich with SLATEC 4.0 format. -C 901026 Added explicit declarations for all variables and minor -C cosmetic changes to prologue. (FNF) -C***END PROLOGUE DDASLV -C - INTEGER NEQ, IWM(*) - DOUBLE PRECISION DELTA(*), WM(*) -C - EXTERNAL DGBSL, DGESL -C - INTEGER LIPVT, LML, LMU, LMTYPE, MEBAND, MTYPE, NPD - PARAMETER (NPD=1) - PARAMETER (LML=1) - PARAMETER (LMU=2) - PARAMETER (LMTYPE=4) - PARAMETER (LIPVT=21) -C -C***FIRST EXECUTABLE STATEMENT DDASLV - MTYPE=IWM(LMTYPE) - GO TO(100,100,300,400,400),MTYPE -C -C DENSE MATRIX -100 CALL DGESL(WM(NPD),NEQ,NEQ,IWM(LIPVT),DELTA,0) - RETURN -C -C DUMMY SECTION FOR MTYPE=3 -300 CONTINUE - RETURN -C -C BANDED MATRIX -400 MEBAND=2*IWM(LML)+IWM(LMU)+1 - CALL DGBSL(WM(NPD),MEBAND,NEQ,IWM(LML), - * IWM(LMU),IWM(LIPVT),DELTA,0) - RETURN -C------END OF SUBROUTINE DDASLV------ - END -C*DECK XERMSG - SUBROUTINE XERMSG (LIBRAR, SUBROU, MESSG, NERR, LEVEL) -C***BEGIN PROLOGUE XERMSG -C***PURPOSE Processes error messages for SLATEC and other libraries -C***LIBRARY SLATEC -C***CATEGORY R3C -C***TYPE ALL -C***KEYWORDS ERROR MESSAGE, XERROR -C***AUTHOR FONG, KIRBY, (NMFECC AT LLNL) -C Modified by -C FRITSCH, F. N., (LLNL) -C***DESCRIPTION -C -C XERMSG processes a diagnostic message in a manner determined by the -C value of LEVEL and the current value of the library error control -C flag, KONTRL. See subroutine XSETF for details. -C (XSETF is inoperable in this version.). -C -C LIBRAR A character constant (or character variable) with the name -C of the library. This will be 'SLATEC' for the SLATEC -C Common Math Library. The error handling package is -C general enough to be used by many libraries -C simultaneously, so it is desirable for the routine that -C detects and reports an error to identify the library name -C as well as the routine name. -C -C SUBROU A character constant (or character variable) with the name -C of the routine that detected the error. Usually it is the -C name of the routine that is calling XERMSG. There are -C some instances where a user callable library routine calls -C lower level subsidiary routines where the error is -C detected. In such cases it may be more informative to -C supply the name of the routine the user called rather than -C the name of the subsidiary routine that detected the -C error. -C -C MESSG A character constant (or character variable) with the text -C of the error or warning message. In the example below, -C the message is a character constant that contains a -C generic message. -C -C CALL XERMSG ('SLATEC', 'MMPY', -C *'THE ORDER OF THE MATRIX EXCEEDS THE ROW DIMENSION', -C *3, 1) -C -C It is possible (and is sometimes desirable) to generate a -C specific message--e.g., one that contains actual numeric -C values. Specific numeric values can be converted into -C character strings using formatted WRITE statements into -C character variables. This is called standard Fortran -C internal file I/O and is exemplified in the first three -C lines of the following example. You can also catenate -C substrings of characters to construct the error message. -C Here is an example showing the use of both writing to -C an internal file and catenating character strings. -C -C CHARACTER*5 CHARN, CHARL -C WRITE (CHARN,10) N -C WRITE (CHARL,10) LDA -C 10 FORMAT(I5) -C CALL XERMSG ('SLATEC', 'MMPY', 'THE ORDER'//CHARN// -C * ' OF THE MATRIX EXCEEDS ITS ROW DIMENSION OF'// -C * CHARL, 3, 1) -C -C There are two subtleties worth mentioning. One is that -C the // for character catenation is used to construct the -C error message so that no single character constant is -C continued to the next line. This avoids confusion as to -C whether there are trailing blanks at the end of the line. -C The second is that by catenating the parts of the message -C as an actual argument rather than encoding the entire -C message into one large character variable, we avoid -C having to know how long the message will be in order to -C declare an adequate length for that large character -C variable. XERMSG calls XERPRN to print the message using -C multiple lines if necessary. If the message is very long, -C XERPRN will break it into pieces of 72 characters (as -C requested by XERMSG) for printing on multiple lines. -C Also, XERMSG asks XERPRN to prefix each line with ' * ' -C so that the total line length could be 76 characters. -C Note also that XERPRN scans the error message backwards -C to ignore trailing blanks. Another feature is that -C the substring '$$' is treated as a new line sentinel -C by XERPRN. If you want to construct a multiline -C message without having to count out multiples of 72 -C characters, just use '$$' as a separator. '$$' -C obviously must occur within 72 characters of the -C start of each line to have its intended effect since -C XERPRN is asked to wrap around at 72 characters in -C addition to looking for '$$'. -C -C NERR An integer value that is chosen by the library routine's -C author. It must be in the range -9999999 to 99999999 (8 -C printable digits). Each distinct error should have its -C own error number. These error numbers should be described -C in the machine readable documentation for the routine. -C The error numbers need be unique only within each routine, -C so it is reasonable for each routine to start enumerating -C errors from 1 and proceeding to the next integer. -C -C LEVEL An integer value in the range 0 to 2 that indicates the -C level (severity) of the error. Their meanings are -C -C -1 A warning message. This is used if it is not clear -C that there really is an error, but the user's attention -C may be needed. An attempt is made to only print this -C message once. -C -C 0 A warning message. This is used if it is not clear -C that there really is an error, but the user's attention -C may be needed. -C -C 1 A recoverable error. This is used even if the error is -C so serious that the routine cannot return any useful -C answer. If the user has told the error package to -C return after recoverable errors, then XERMSG will -C return to the Library routine which can then return to -C the user's routine. The user may also permit the error -C package to terminate the program upon encountering a -C recoverable error. -C -C 2 A fatal error. XERMSG will not return to its caller -C after it receives a fatal error. This level should -C hardly ever be used; it is much better to allow the -C user a chance to recover. An example of one of the few -C cases in which it is permissible to declare a level 2 -C error is a reverse communication Library routine that -C is likely to be called repeatedly until it integrates -C across some interval. If there is a serious error in -C the input such that another step cannot be taken and -C the Library routine is called again without the input -C error having been corrected by the caller, the Library -C routine will probably be called forever with improper -C input. In this case, it is reasonable to declare the -C error to be fatal. -C -C Each of the arguments to XERMSG is input; none will be modified by -C XERMSG. A routine may make multiple calls to XERMSG with warning -C level messages; however, after a call to XERMSG with a recoverable -C error, the routine should return to the user. -C -C***REFERENCES JONES, RONDALL E. AND KAHANER, DAVID K., "XERROR, THE -C SLATEC ERROR-HANDLING PACKAGE", SOFTWARE - PRACTICE -C AND EXPERIENCE, VOLUME 13, NO. 3, PP. 251-257, -C MARCH, 1983. -C***ROUTINES CALLED XERHLT, XERPRN -C***REVISION HISTORY (YYMMDD) -C 880101 DATE WRITTEN -C 880621 REVISED AS DIRECTED AT SLATEC CML MEETING OF FEBRUARY 1988. -C THERE ARE TWO BASIC CHANGES. -C 1. A NEW ROUTINE, XERPRN, IS USED INSTEAD OF XERPRT TO -C PRINT MESSAGES. THIS ROUTINE WILL BREAK LONG MESSAGES -C INTO PIECES FOR PRINTING ON MULTIPLE LINES. '$$' IS -C ACCEPTED AS A NEW LINE SENTINEL. A PREFIX CAN BE -C ADDED TO EACH LINE TO BE PRINTED. XERMSG USES EITHER -C ' ***' OR ' * ' AND LONG MESSAGES ARE BROKEN EVERY -C 72 CHARACTERS (AT MOST) SO THAT THE MAXIMUM LINE -C LENGTH OUTPUT CAN NOW BE AS GREAT AS 76. -C 2. THE TEXT OF ALL MESSAGES IS NOW IN UPPER CASE SINCE THE -C FORTRAN STANDARD DOCUMENT DOES NOT ADMIT THE EXISTENCE -C OF LOWER CASE. -C 880708 REVISED AFTER THE SLATEC CML MEETING OF JUNE 29 AND 30. -C THE PRINCIPAL CHANGES ARE -C 1. CLARIFY COMMENTS IN THE PROLOGUES -C 2. RENAME XRPRNT TO XERPRN -C 3. REWORK HANDLING OF '$$' IN XERPRN TO HANDLE BLANK LINES -C SIMILAR TO THE WAY FORMAT STATEMENTS HANDLE THE / -C CHARACTER FOR NEW RECORDS. -C 890706 REVISED WITH THE HELP OF FRED FRITSCH AND REG CLEMENS TO -C CLEAN UP THE CODING. -C 890721 REVISED TO USE NEW FEATURE IN XERPRN TO COUNT CHARACTERS IN -C PREFIX. -C 891013 REVISED TO CORRECT COMMENTS. -C 891214 Prologue converted to Version 4.0 format. (WRB) -C 900510 Changed test on NERR to be -9999999 < NERR < 99999999, but -C NERR .ne. 0, and on LEVEL to be -2 < LEVEL < 3. Added -C LEVEL=-1 logic, changed calls to XERSAV to XERSVE, and -C XERCTL to XERCNT. (RWC) -C 901011 Removed error saving features to produce a simplified -C version for distribution with DASSL and other LLNL codes. -C (FNF) -C***END PROLOGUE XERMSG - CHARACTER*(*) LIBRAR, SUBROU, MESSG - CHARACTER*72 TEMP -C***FIRST EXECUTABLE STATEMENT XERMSG -C -C WE PRINT A FATAL ERROR MESSAGE AND TERMINATE FOR AN ERROR IN -C CALLING XERMSG. THE ERROR NUMBER SHOULD BE POSITIVE, -C AND THE LEVEL SHOULD BE BETWEEN 0 AND 2. -C - IF (NERR.LT.-9999999 .OR. NERR.GT.99999999 .OR. NERR.EQ.0 .OR. - * LEVEL.LT.-1 .OR. LEVEL.GT.2) THEN - CALL XERPRN (' ***', -1, 'FATAL ERROR IN...$$ ' // - * 'XERMSG -- INVALID ERROR NUMBER OR LEVEL$$ '// - * 'JOB ABORT DUE TO FATAL ERROR.', 72) - CALL XERHLT (' ***XERMSG -- INVALID INPUT') - RETURN - ENDIF -C -C SET DEFAULT VALUES FOR CONTROL PARAMETERS. -C - LKNTRL = 1 - MKNTRL = 1 -C -C ANNOUNCE THE NAMES OF THE LIBRARY AND SUBROUTINE BY BUILDING A -C MESSAGE IN CHARACTER VARIABLE TEMP (NOT EXCEEDING 66 CHARACTERS) -C AND SENDING IT OUT VIA XERPRN. PRINT ONLY IF CONTROL FLAG -C IS NOT ZERO. -C - IF (LKNTRL .NE. 0) THEN - TEMP(1:21) = 'MESSAGE FROM ROUTINE ' - I = MIN(LEN(SUBROU), 16) - TEMP(22:21+I) = SUBROU(1:I) - TEMP(22+I:33+I) = ' IN LIBRARY ' - LTEMP = 33 + I - I = MIN(LEN(LIBRAR), 16) - TEMP(LTEMP+1:LTEMP+I) = LIBRAR (1:I) - TEMP(LTEMP+I+1:LTEMP+I+1) = '.' - LTEMP = LTEMP + I + 1 - CALL XERPRN (' ***', -1, TEMP(1:LTEMP), 72) - ENDIF -C -C IF LKNTRL IS POSITIVE, PRINT AN INTRODUCTORY LINE BEFORE -C PRINTING THE MESSAGE. THE INTRODUCTORY LINE TELLS THE CHOICE -C FROM EACH OF THE FOLLOWING TWO OPTIONS. -C 1. LEVEL OF THE MESSAGE -C 'INFORMATIVE MESSAGE' -C 'POTENTIALLY RECOVERABLE ERROR' -C 'FATAL ERROR' -C 2. WHETHER CONTROL FLAG WILL ALLOW PROGRAM TO CONTINUE -C 'PROGRAM CONTINUES' -C 'PROGRAM ABORTED' -C NOTICE THAT THE LINE INCLUDING FOUR PREFIX CHARACTERS WILL NOT -C EXCEED 74 CHARACTERS. -C WE SKIP THE NEXT BLOCK IF THE INTRODUCTORY LINE IS NOT NEEDED. -C - IF (LKNTRL .GT. 0) THEN -C -C THE FIRST PART OF THE MESSAGE TELLS ABOUT THE LEVEL. -C - IF (LEVEL .LE. 0) THEN - TEMP(1:20) = 'INFORMATIVE MESSAGE,' - LTEMP = 20 - ELSEIF (LEVEL .EQ. 1) THEN - TEMP(1:30) = 'POTENTIALLY RECOVERABLE ERROR,' - LTEMP = 30 - ELSE - TEMP(1:12) = 'FATAL ERROR,' - LTEMP = 12 - ENDIF -C -C THEN WHETHER THE PROGRAM WILL CONTINUE. -C - IF ((MKNTRL.EQ.2 .AND. LEVEL.GE.1) .OR. - * (MKNTRL.EQ.1 .AND. LEVEL.EQ.2)) THEN - TEMP(LTEMP+1:LTEMP+17) = ' PROGRAM ABORTED.' - LTEMP = LTEMP + 17 - ELSE - TEMP(LTEMP+1:LTEMP+19) = ' PROGRAM CONTINUES.' - LTEMP = LTEMP + 19 - ENDIF -C - CALL XERPRN (' ***', -1, TEMP(1:LTEMP), 72) - ENDIF -C -C NOW SEND OUT THE MESSAGE. -C - CALL XERPRN (' * ', -1, MESSG, 72) -C -C IF LKNTRL IS POSITIVE, WRITE THE ERROR NUMBER. -C - IF (LKNTRL .GT. 0) THEN - WRITE (TEMP, '(''ERROR NUMBER = '', I8)') NERR - DO 10 I=16,22 - IF (TEMP(I:I) .NE. ' ') GO TO 20 - 10 CONTINUE -C - 20 CALL XERPRN (' * ', -1, TEMP(1:15) // TEMP(I:23), 72) - ENDIF -C -C IF LKNTRL IS NOT ZERO, PRINT A BLANK LINE AND AN END OF MESSAGE. -C - IF (LKNTRL .NE. 0) THEN - CALL XERPRN (' * ', -1, ' ', 72) - CALL XERPRN (' ***', -1, 'END OF MESSAGE', 72) - CALL XERPRN (' ', 0, ' ', 72) - ENDIF -C -C IF THE ERROR IS NOT FATAL OR THE ERROR IS RECOVERABLE AND THE -C CONTROL FLAG IS SET FOR RECOVERY, THEN RETURN. -C - 30 IF (LEVEL.LE.0 .OR. (LEVEL.EQ.1 .AND. MKNTRL.LE.1)) RETURN -C -C THE PROGRAM WILL BE STOPPED DUE TO AN UNRECOVERED ERROR OR A -C FATAL ERROR. PRINT THE REASON FOR THE ABORT AND THE ERROR -C SUMMARY IF THE CONTROL FLAG AND THE MAXIMUM ERROR COUNT PERMIT. -C - IF (LKNTRL.GT.0) THEN - IF (LEVEL .EQ. 1) THEN - CALL XERPRN - * (' ***', -1, 'JOB ABORT DUE TO UNRECOVERED ERROR.', 72) - ELSE - CALL XERPRN(' ***', -1, 'JOB ABORT DUE TO FATAL ERROR.', 72) - ENDIF - CALL XERHLT (' ') - ENDIF - RETURN - END - SUBROUTINE XERHLT (MESSG) -C***BEGIN PROLOGUE XERHLT -C***SUBSIDIARY -C***PURPOSE Abort program execution and print error message. -C***LIBRARY SLATEC (XERROR) -C***CATEGORY R3C -C***TYPE ALL (XERHLT-A) -C***KEYWORDS ERROR, XERROR -C***AUTHOR JONES, R. E., (SNLA) -C***DESCRIPTION -C -C Abstract -C ***Note*** machine dependent routine -C XERHLT aborts the execution of the program. -C The error message causing the abort is given in the calling -C sequence, in case one needs it for printing on a dayfile, -C for example. -C -C Description of Parameters -C MESSG is as in XERROR. -C -C***REFERENCES JONES R.E., KAHANER D.K., 'XERROR, THE SLATEC ERROR- -C HANDLING PACKAGE', SAND82-0800, SANDIA LABORATORIES, -C 1982. -C***ROUTINES CALLED (NONE) -C***REVISION HISTORY (YYMMDD) -C 790801 DATE WRITTEN as XERABT -C 861211 REVISION DATE from Version 3.2 -C 891214 Prologue converted to Version 4.0 format. (BAB) -C 900206 Routine changed from user-callable to subsidiary. (WRB) -C 900510 Changed calling sequence to delete length of char string -C Changed subroutine name from XERABT to XERHLT. (RWC) -C***END PROLOGUE XERHLT - CHARACTER*(*) MESSG -C***FIRST EXECUTABLE STATEMENT XERHLT - STOP - END -C*DECK XERPRN - SUBROUTINE XERPRN (PREFIX, NPREF, MESSG, NWRAP) -C***BEGIN PROLOGUE XERPRN -C***SUBSIDIARY -C***PURPOSE This routine is called by XERMSG to print error messages -C***LIBRARY SLATEC -C***CATEGORY R3C -C***TYPE ALL -C***KEYWORDS ERROR MESSAGES, PRINTING, XERROR -C***AUTHOR FONG, KIRBY, (NMFECC AT LLNL) -C***DESCRIPTION -C -C This routine sends one or more lines to each of the (up to five) -C logical units to which error messages are to be sent. This routine -C is called several times by XERMSG, sometimes with a single line to -C print and sometimes with a (potentially very long) message that may -C wrap around into multiple lines. -C -C PREFIX Input argument of type CHARACTER. This argument contains -C characters to be put at the beginning of each line before -C the body of the message. No more than 16 characters of -C PREFIX will be used. -C -C NPREF Input argument of type INTEGER. This argument is the number -C of characters to use from PREFIX. If it is negative, the -C intrinsic function LEN is used to determine its length. If -C it is zero, PREFIX is not used. If it exceeds 16 or if -C LEN(PREFIX) exceeds 16, only the first 16 characters will be -C used. If NPREF is positive and the length of PREFIX is less -C than NPREF, a copy of PREFIX extended with blanks to length -C NPREF will be used. -C -C MESSG Input argument of type CHARACTER. This is the text of a -C message to be printed. If it is a long message, it will be -C broken into pieces for printing on multiple lines. Each line -C will start with the appropriate prefix and be followed by a -C piece of the message. NWRAP is the number of characters per -C piece; that is, after each NWRAP characters, we break and -C start a new line. In addition the characters '$$' embedded -C in MESSG are a sentinel for a new line. The counting of -C characters up to NWRAP starts over for each new line. The -C value of NWRAP typically used by XERMSG is 72 since many -C older error messages in the SLATEC Library are laid out to -C rely on wrap-around every 72 characters. -C -C NWRAP Input argument of type INTEGER. This gives the maximum size -C piece into which to break MESSG for printing on multiple -C lines. An embedded '$$' ends a line, and the count restarts -C at the following character. If a line break does not occur -C on a blank (it would split a word) that word is moved to the -C next line. Values of NWRAP less than 16 will be treated as -C 16. Values of NWRAP greater than 132 will be treated as 132. -C The actual line length will be NPREF + NWRAP after NPREF has -C been adjusted to fall between 0 and 16 and NWRAP has been -C adjusted to fall between 16 and 132. -C -C***REFERENCES (NONE) -C***ROUTINES CALLED I1MACH, XGETUA -C***REVISION HISTORY (YYMMDD) -C 880621 DATE WRITTEN -C 880708 REVISED AFTER THE SLATEC CML SUBCOMMITTEE MEETING OF -C JUNE 29 AND 30 TO CHANGE THE NAME TO XERPRN AND TO REWORK -C THE HANDLING OF THE NEW LINE SENTINEL TO BEHAVE LIKE THE -C SLASH CHARACTER IN FORMAT STATEMENTS. -C 890706 REVISED WITH THE HELP OF FRED FRITSCH AND REG CLEMMENS TO -C STREAMLINE THE CODING AND FIX A BUG THAT CAUSED EXTRA BLANK -C LINES TO BE PRINTED. -C 890721 REVISED TO ADD A NEW FEATURE. A NEGATIVE VALUE OF NPREF -C CAUSES LEN(PREFIX) TO BE USED AS THE LENGTH. -C 891013 REVISED TO CORRECT ERROR IN CALCULATING PREFIX LENGTH. -C 891214 Prologue converted to Version 4.0 format. (WRB) -C 900510 Added code to break messages between words. (RWC) -C***END PROLOGUE XERPRN - CHARACTER*(*) PREFIX, MESSG - INTEGER NPREF, NWRAP - CHARACTER*148 CBUFF - INTEGER IU(5), NUNIT - CHARACTER*2 NEWLIN - PARAMETER (NEWLIN = '$$') -C***FIRST EXECUTABLE STATEMENT XERPRN - CALL XGETUA(IU,NUNIT) -C -C A ZERO VALUE FOR A LOGICAL UNIT NUMBER MEANS TO USE THE STANDARD -C ERROR MESSAGE UNIT INSTEAD. I1MACH(4) RETRIEVES THE STANDARD -C ERROR MESSAGE UNIT. -C - N = I1MACH(4) - DO 10 I=1,NUNIT - IF (IU(I) .EQ. 0) IU(I) = N - 10 CONTINUE -C -C LPREF IS THE LENGTH OF THE PREFIX. THE PREFIX IS PLACED AT THE -C BEGINNING OF CBUFF, THE CHARACTER BUFFER, AND KEPT THERE DURING -C THE REST OF THIS ROUTINE. -C - IF ( NPREF .LT. 0 ) THEN - LPREF = LEN(PREFIX) - ELSE - LPREF = NPREF - ENDIF - LPREF = MIN(16, LPREF) - IF (LPREF .NE. 0) CBUFF(1:LPREF) = PREFIX -C -C LWRAP IS THE MAXIMUM NUMBER OF CHARACTERS WE WANT TO TAKE AT ONE -C TIME FROM MESSG TO PRINT ON ONE LINE. -C - LWRAP = MAX(16, MIN(132, NWRAP)) -C -C SET LENMSG TO THE LENGTH OF MESSG, IGNORE ANY TRAILING BLANKS. -C - LENMSG = LEN(MESSG) - N = LENMSG - DO 20 I=1,N - IF (MESSG(LENMSG:LENMSG) .NE. ' ') GO TO 30 - LENMSG = LENMSG - 1 - 20 CONTINUE - 30 CONTINUE -C -C IF THE MESSAGE IS ALL BLANKS, THEN PRINT ONE BLANK LINE. -C - IF (LENMSG .EQ. 0) THEN - CBUFF(LPREF+1:LPREF+1) = ' ' - DO 40 I=1,NUNIT - WRITE(IU(I), '(A)') CBUFF(1:LPREF+1) - 40 CONTINUE - RETURN - ENDIF -C -C SET NEXTC TO THE POSITION IN MESSG WHERE THE NEXT SUBSTRING -C STARTS. FROM THIS POSITION WE SCAN FOR THE NEW LINE SENTINEL. -C WHEN NEXTC EXCEEDS LENMSG, THERE IS NO MORE TO PRINT. -C WE LOOP BACK TO LABEL 50 UNTIL ALL PIECES HAVE BEEN PRINTED. -C -C WE LOOK FOR THE NEXT OCCURRENCE OF THE NEW LINE SENTINEL. THE -C INDEX INTRINSIC FUNCTION RETURNS ZERO IF THERE IS NO OCCURRENCE -C OR IF THE LENGTH OF THE FIRST ARGUMENT IS LESS THAN THE LENGTH -C OF THE SECOND ARGUMENT. -C -C THERE ARE SEVERAL CASES WHICH SHOULD BE CHECKED FOR IN THE -C FOLLOWING ORDER. WE ARE ATTEMPTING TO SET LPIECE TO THE NUMBER -C OF CHARACTERS THAT SHOULD BE TAKEN FROM MESSG STARTING AT -C POSITION NEXTC. -C -C LPIECE .EQ. 0 THE NEW LINE SENTINEL DOES NOT OCCUR IN THE -C REMAINDER OF THE CHARACTER STRING. LPIECE -C SHOULD BE SET TO LWRAP OR LENMSG+1-NEXTC, -C WHICHEVER IS LESS. -C -C LPIECE .EQ. 1 THE NEW LINE SENTINEL STARTS AT MESSG(NEXTC: -C NEXTC). LPIECE IS EFFECTIVELY ZERO, AND WE -C PRINT NOTHING TO AVOID PRODUCING UNNECESSARY -C BLANK LINES. THIS TAKES CARE OF THE SITUATION -C WHERE THE LIBRARY ROUTINE HAS A MESSAGE OF -C EXACTLY 72 CHARACTERS FOLLOWED BY A NEW LINE -C SENTINEL FOLLOWED BY MORE CHARACTERS. NEXTC -C SHOULD BE INCREMENTED BY 2. -C -C LPIECE .GT. LWRAP+1 REDUCE LPIECE TO LWRAP. -C -C ELSE THIS LAST CASE MEANS 2 .LE. LPIECE .LE. LWRAP+1 -C RESET LPIECE = LPIECE-1. NOTE THAT THIS -C PROPERLY HANDLES THE END CASE WHERE LPIECE .EQ. -C LWRAP+1. THAT IS, THE SENTINEL FALLS EXACTLY -C AT THE END OF A LINE. -C - NEXTC = 1 - 50 LPIECE = INDEX(MESSG(NEXTC:LENMSG), NEWLIN) - IF (LPIECE .EQ. 0) THEN -C -C THERE WAS NO NEW LINE SENTINEL FOUND. -C - IDELTA = 0 - LPIECE = MIN(LWRAP, LENMSG+1-NEXTC) - IF (LPIECE .LT. LENMSG+1-NEXTC) THEN - DO 52 I=LPIECE+1,2,-1 - IF (MESSG(NEXTC+I-1:NEXTC+I-1) .EQ. ' ') THEN - LPIECE = I-1 - IDELTA = 1 - GOTO 54 - ENDIF - 52 CONTINUE - ENDIF - 54 CBUFF(LPREF+1:LPREF+LPIECE) = MESSG(NEXTC:NEXTC+LPIECE-1) - NEXTC = NEXTC + LPIECE + IDELTA - ELSEIF (LPIECE .EQ. 1) THEN -C -C WE HAVE A NEW LINE SENTINEL AT MESSG(NEXTC:NEXTC+1). -C DON'T PRINT A BLANK LINE. -C - NEXTC = NEXTC + 2 - GO TO 50 - ELSEIF (LPIECE .GT. LWRAP+1) THEN -C -C LPIECE SHOULD BE SET DOWN TO LWRAP. -C - IDELTA = 0 - LPIECE = LWRAP - DO 56 I=LPIECE+1,2,-1 - IF (MESSG(NEXTC+I-1:NEXTC+I-1) .EQ. ' ') THEN - LPIECE = I-1 - IDELTA = 1 - GOTO 58 - ENDIF - 56 CONTINUE - 58 CBUFF(LPREF+1:LPREF+LPIECE) = MESSG(NEXTC:NEXTC+LPIECE-1) - NEXTC = NEXTC + LPIECE + IDELTA - ELSE -C -C IF WE ARRIVE HERE, IT MEANS 2 .LE. LPIECE .LE. LWRAP+1. -C WE SHOULD DECREMENT LPIECE BY ONE. -C - LPIECE = LPIECE - 1 - CBUFF(LPREF+1:LPREF+LPIECE) = MESSG(NEXTC:NEXTC+LPIECE-1) - NEXTC = NEXTC + LPIECE + 2 - ENDIF -C -C PRINT -C - DO 60 I=1,NUNIT - WRITE(IU(I), '(A)') CBUFF(1:LPREF+LPIECE) - 60 CONTINUE -C - IF (NEXTC .LE. LENMSG) GO TO 50 - RETURN - END -C*DECK XGETUA - SUBROUTINE XGETUA (IUNITA, N) -C***BEGIN PROLOGUE XGETUA -C***PURPOSE Return unit number(s) to which error messages are being -C sent. -C***LIBRARY SLATEC (XERROR) -C***CATEGORY R3C -C***TYPE ALL (XGETUA-A) -C***KEYWORDS ERROR, XERROR -C***AUTHOR JONES, R. E., (SNLA) -C Modified by -C FRITSCH, F. N., (LLNL) -C***DESCRIPTION -C -C Abstract -C XGETUA may be called to determine the unit number or numbers -C to which error messages are being sent. -C These unit numbers may have been set by a call to XSETUN, -C or a call to XSETUA, or may be a default value. -C -C Description of Parameters -C --Output-- -C IUNIT - an array of one to five unit numbers, depending -C on the value of N. A value of zero refers to the -C default unit, as defined by the I1MACH machine -C constant routine. Only IUNIT(1),...,IUNIT(N) are -C defined by XGETUA. The values of IUNIT(N+1),..., -C IUNIT(5) are not defined (for N .LT. 5) or altered -C in any way by XGETUA. -C N - the number of units to which copies of the -C error messages are being sent. N will be in the -C range from 1 to 5. -C -C CAUTION: The use of COMMON in this version is not safe for -C multiprocessing. -C -C***REFERENCES JONES R.E., KAHANER D.K., 'XERROR, THE SLATEC ERROR- -C HANDLING PACKAGE', SAND82-0800, SANDIA LABORATORIES, -C 1982. -C***ROUTINES CALLED (NONE) -C***COMMON BLOCKS XERUNI -C***REVISION HISTORY (YYMMDD) -C 790801 DATE WRITTEN -C 861211 REVISION DATE from Version 3.2 -C 891214 Prologue converted to Version 4.0 format. (BAB) -C 901011 Rewritten to not use J4SAVE. (FNF) -C 901012 Corrected initialization problem. (FNF) -C***END PROLOGUE XGETUA - DIMENSION IUNITA(5) - INTEGER NUNIT, IUNIT(5) - COMMON /XERUNI/ NUNIT, IUNIT -C***FIRST EXECUTABLE STATEMENT XGETUA -C Initialize so XERMSG will use standard error unit number if -C block has not been set up by a CALL XSETUA. -C CAUTION: This assumes uninitialized COMMON tests .LE.0 . - IF (NUNIT.LE.0) THEN - NUNIT = 1 - IUNIT(1) = 0 - ENDIF - N = NUNIT - DO 30 I=1,N - IUNITA(I) = IUNIT(I) - 30 CONTINUE - RETURN - END -C*DECK XSETUA - SUBROUTINE XSETUA (IUNITA, N) -C***BEGIN PROLOGUE XSETUA -C***PURPOSE Set logical unit numbers (up to 5) to which error -C messages are to be sent. -C***LIBRARY SLATEC (XERROR) -C***CATEGORY R3B -C***TYPE ALL (XSETUA-A) -C***KEYWORDS ERROR, XERROR -C***AUTHOR JONES, R. E., (SNLA) -C Modified by -C FRITSCH, F. N., (LLNL) -C***DESCRIPTION -C -C Abstract -C XSETUA may be called to declare a list of up to five -C logical units, each of which is to receive a copy of -C each error message processed by this package. -C The purpose of XSETUA is to allow simultaneous printing -C of each error message on, say, a main output file, -C an interactive terminal, and other files such as graphics -C communication files. -C -C Description of Parameters -C --Input-- -C IUNIT - an array of up to five unit numbers. -C Normally these numbers should all be different -C (but duplicates are not prohibited.) -C N - the number of unit numbers provided in IUNIT -C must have 1 .LE. N .LE. 5. -C -C CAUTION: The use of COMMON in this version is not safe for -C multiprocessing. -C -C***REFERENCES JONES R.E., KAHANER D.K., 'XERROR, THE SLATEC ERROR- -C HANDLING PACKAGE', SAND82-0800, SANDIA LABORATORIES, -C 1982. -C***ROUTINES CALLED XERMSG -C***COMMON BLOCKS XERUNI -C***REVISION HISTORY (YYMMDD) -C 790801 DATE WRITTEN -C 861211 REVISION DATE from Version 3.2 -C 891214 Prologue converted to Version 4.0 format. (BAB) -C 900510 Change call to XERRWV to XERMSG. (RWC) -C 901011 Rewritten to not use J4SAVE. (FNF) -C***END PROLOGUE XSETUA - DIMENSION IUNITA(5) - INTEGER NUNIT, IUNIT(5) - COMMON /XERUNI/ NUNIT, IUNIT - CHARACTER *8 XERN1 -C***FIRST EXECUTABLE STATEMENT XSETUA -C - IF (N.LT.1 .OR. N.GT.5) THEN - WRITE (XERN1, '(I8)') N - CALL XERMSG ('SLATEC', 'XSETUA', - * 'INVALID NUMBER OF UNITS, N = ' // XERN1, 1, 2) - RETURN - ENDIF -C - DO 10 I=1,N - IUNIT(I) = IUNITA(I) - 10 CONTINUE - NUNIT = N - RETURN - END diff --git a/scipy-0.10.1/scipy/integrate/odepack/decbt.f b/scipy-0.10.1/scipy/integrate/odepack/decbt.f deleted file mode 100644 index d1f2ab7e16..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/decbt.f +++ /dev/null @@ -1,109 +0,0 @@ - subroutine decbt (m, n, a, b, c, ip, ier) - integer m, n, ip(m,n), ier - double precision a(m,m,n), b(m,m,n), c(m,m,n) -c----------------------------------------------------------------------- -c the following line is for optimized compilation on llnl compilers. -clll. optimize -c----------------------------------------------------------------------- -c block-tridiagonal matrix decomposition routine. -c written by a. c. hindmarsh. -c latest revision.. november 10, 1983 (ach) -c reference.. ucid-30150 -c solution of block-tridiagonal systems of linear -c algebraic equations -c a.c. hindmarsh -c february 1977 -c the input matrix contains three blocks of elements in each block-row, -c including blocks in the (1,3) and (n,n-2) block positions. -c decbt uses block gauss elimination and subroutines dgefa and dgesl -c for solution of blocks. partial pivoting is done within -c block-rows only. -c -c note.. this version uses linpack routines dgefa/dgesl instead of -c of dec/sol for solution of blocks, and it uses the bla routine ddot -c for dot product calculations. -c -c input.. -c m = order of each block. -c n = number of blocks in each direction of the matrix. -c n must be 4 or more. the complete matrix has order m*n. -c a = m by m by n array containing diagonal blocks. -c a(i,j,k) contains the (i,j) element of the k-th block. -c b = m by m by n array containing the super-diagonal blocks -c (in b(*,*,k) for k = 1,...,n-1) and the block in the (n,n-2) -c block position (in b(*,*,n)). -c c = m by m by n array containing the subdiagonal blocks -c (in c(*,*,k) for k = 2,3,...,n) and the block in the -c (1,3) block position (in c(*,*,1)). -c ip = integer array of length m*n for working storage. -c output.. -c a,b,c = m by m by n arrays containing the block lu decomposition -c of the input matrix. -c ip = m by n array of pivot information. ip(*,k) contains -c information for the k-th digonal block. -c ier = 0 if no trouble occurred, or -c = -1 if the input value of m or n was illegal, or -c = k if a singular matrix was found in the k-th diagonal block. -c use solbt to solve the associated linear system. -c -c external routines required.. dgefa and dgesl (from linpack) and -c ddot (from the blas, or basic linear algebra package). -c----------------------------------------------------------------------- - integer nm1, nm2, km1, i, j, k - double precision dp, ddot - if (m .lt. 1 .or. n .lt. 4) go to 210 - nm1 = n - 1 - nm2 = n - 2 -c process the first block-row. ----------------------------------------- - call dgefa (a, m, m, ip, ier) - k = 1 - if (ier .ne. 0) go to 200 - do 10 j = 1,m - call dgesl (a, m, m, ip, b(1,j,1), 0) - call dgesl (a, m, m, ip, c(1,j,1), 0) - 10 continue -c adjust b(*,*,2). ----------------------------------------------------- - do 40 j = 1,m - do 30 i = 1,m - dp = ddot (m, c(i,1,2), m, c(1,j,1), 1) - b(i,j,2) = b(i,j,2) - dp - 30 continue - 40 continue -c main loop. process block-rows 2 to n-1. ----------------------------- - do 100 k = 2,nm1 - km1 = k - 1 - do 70 j = 1,m - do 60 i = 1,m - dp = ddot (m, c(i,1,k), m, b(1,j,km1), 1) - a(i,j,k) = a(i,j,k) - dp - 60 continue - 70 continue - call dgefa (a(1,1,k), m, m, ip(1,k), ier) - if (ier .ne. 0) go to 200 - do 80 j = 1,m - 80 call dgesl (a(1,1,k), m, m, ip(1,k), b(1,j,k), 0) - 100 continue -c process last block-row and return. ----------------------------------- - do 130 j = 1,m - do 120 i = 1,m - dp = ddot (m, b(i,1,n), m, b(1,j,nm2), 1) - c(i,j,n) = c(i,j,n) - dp - 120 continue - 130 continue - do 160 j = 1,m - do 150 i = 1,m - dp = ddot (m, c(i,1,n), m, b(1,j,nm1), 1) - a(i,j,n) = a(i,j,n) - dp - 150 continue - 160 continue - call dgefa (a(1,1,n), m, m, ip(1,n), ier) - k = n - if (ier .ne. 0) go to 200 - return -c error returns. ------------------------------------------------------- - 200 ier = k - return - 210 ier = -1 - return -c----------------------- end of subroutine decbt --------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/ewset.f b/scipy-0.10.1/scipy/integrate/odepack/ewset.f deleted file mode 100644 index d76274be59..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/ewset.f +++ /dev/null @@ -1,32 +0,0 @@ - subroutine ewset (n, itol, rtol, atol, ycur, ewt) -clll. optimize -c----------------------------------------------------------------------- -c this subroutine sets the error weight vector ewt according to -c ewt(i) = rtol(i)*abs(ycur(i)) + atol(i), i = 1,...,n, -c with the subscript on rtol and/or atol possibly replaced by 1 above, -c depending on the value of itol. -c----------------------------------------------------------------------- - integer n, itol - integer i - double precision rtol, atol, ycur, ewt - dimension rtol(1), atol(1), ycur(n), ewt(n) -c - go to (10, 20, 30, 40), itol - 10 continue - do 15 i = 1,n - 15 ewt(i) = rtol(1)*dabs(ycur(i)) + atol(1) - return - 20 continue - do 25 i = 1,n - 25 ewt(i) = rtol(1)*dabs(ycur(i)) + atol(i) - return - 30 continue - do 35 i = 1,n - 35 ewt(i) = rtol(i)*dabs(ycur(i)) + atol(1) - return - 40 continue - do 45 i = 1,n - 45 ewt(i) = rtol(i)*dabs(ycur(i)) + atol(i) - return -c----------------------- end of subroutine ewset ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/fnorm.f b/scipy-0.10.1/scipy/integrate/odepack/fnorm.f deleted file mode 100644 index 85a3129347..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/fnorm.f +++ /dev/null @@ -1,22 +0,0 @@ - double precision function fnorm (n, a, w) -clll. optimize -c----------------------------------------------------------------------- -c this function computes the norm of a full n by n matrix, -c stored in the array a, that is consistent with the weighted max-norm -c on vectors, with weights stored in the array w.. -c fnorm = max(i=1,...,n) ( w(i) * sum(j=1,...,n) abs(a(i,j))/w(j) ) -c----------------------------------------------------------------------- - integer n, i, j - double precision a, w, an, sum - dimension a(n,n), w(n) - an = 0.0d0 - do 20 i = 1,n - sum = 0.0d0 - do 10 j = 1,n - 10 sum = sum + dabs(a(i,j))/w(j) - an = dmax1(an,sum*w(i)) - 20 continue - fnorm = an - return -c----------------------- end of function fnorm ------------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/intdy.f b/scipy-0.10.1/scipy/integrate/odepack/intdy.f deleted file mode 100644 index cb47df8abe..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/intdy.f +++ /dev/null @@ -1,84 +0,0 @@ - subroutine intdy (t, k, yh, nyh, dky, iflag) -clll. optimize - integer k, nyh, iflag - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer i, ic, j, jb, jb2, jj, jj1, jp1 - double precision t, yh, dky - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision c, r, s, tp - dimension yh(nyh,1), dky(1) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu -c----------------------------------------------------------------------- -c intdy computes interpolated values of the k-th derivative of the -c dependent variable vector y, and stores it in dky. this routine -c is called within the package with k = 0 and t = tout, but may -c also be called by the user for any k up to the current order. -c (see detailed instructions in the usage documentation.) -c----------------------------------------------------------------------- -c the computed values in dky are gotten by interpolation using the -c nordsieck history array yh. this array corresponds uniquely to a -c vector-valued polynomial of degree nqcur or less, and dky is set -c to the k-th derivative of this polynomial at t. -c the formula for dky is.. -c q -c dky(i) = sum c(j,k) * (t - tn)**(j-k) * h**(-j) * yh(i,j+1) -c j=k -c where c(j,k) = j*(j-1)*...*(j-k+1), q = nqcur, tn = tcur, h = hcur. -c the quantities nq = nqcur, l = nq+1, n = neq, tn, and h are -c communicated by common. the above sum is done in reverse order. -c iflag is returned negative if either k or t is out of bounds. -c----------------------------------------------------------------------- - iflag = 0 - if (k .lt. 0 .or. k .gt. nq) go to 80 - tp = tn - hu - 100.0d0*uround*(tn + hu) - if ((t-tp)*(t-tn) .gt. 0.0d0) go to 90 -c - s = (t - tn)/h - ic = 1 - if (k .eq. 0) go to 15 - jj1 = l - k - do 10 jj = jj1,nq - 10 ic = ic*jj - 15 c = dfloat(ic) - do 20 i = 1,n - 20 dky(i) = c*yh(i,l) - if (k .eq. nq) go to 55 - jb2 = nq - k - do 50 jb = 1,jb2 - j = nq - jb - jp1 = j + 1 - ic = 1 - if (k .eq. 0) go to 35 - jj1 = jp1 - k - do 30 jj = jj1,j - 30 ic = ic*jj - 35 c = dfloat(ic) - do 40 i = 1,n - 40 dky(i) = c*yh(i,jp1) + s*dky(i) - 50 continue - if (k .eq. 0) return - 55 r = h**(-k) - do 60 i = 1,n - 60 dky(i) = r*dky(i) - return -c - 80 call xerrwv('intdy-- k (=i1) illegal ', - 1 30, 51, 0, 1, k, 0, 0, 0.0d0, 0.0d0) - iflag = -1 - return - 90 call xerrwv('intdy-- t (=r1) illegal ', - 1 30, 52, 0, 0, 0, 0, 1, t, 0.0d0) - call xerrwv( - 1 ' t not in interval tcur - hu (= r1) to tcur (=r2) ', - 1 60, 52, 0, 0, 0, 0, 2, tp, tn) - iflag = -2 - return -c----------------------- end of subroutine intdy ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/iprep.f b/scipy-0.10.1/scipy/integrate/odepack/iprep.f deleted file mode 100644 index b6822412d4..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/iprep.f +++ /dev/null @@ -1,70 +0,0 @@ - subroutine iprep (neq, y, rwork, ia, ja, ipflag, f, jac) -clll. optimize - external f, jac - integer neq, ia, ja, ipflag - integer illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 1 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns - integer icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 1 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 1 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 2 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 3 nslj, ngp, nlu, nnz, nsp, nzl, nzu - integer i, imax, lewtn, lyhd, lyhn - double precision y, rwork - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision rlss - dimension neq(1), y(1), rwork(1), ia(1), ja(1) - common /ls0001/ rowns(209), - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 2 illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 3 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lss001/ rlss(6), - 1 iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 2 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 3 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 4 nslj, ngp, nlu, nnz, nsp, nzl, nzu -c----------------------------------------------------------------------- -c this routine serves as an interface between the driver and -c subroutine prep. it is called only if miter is 1 or 2. -c tasks performed here are.. -c * call prep, -c * reset the required wm segment length lenwk, -c * move yh back to its final location (following wm in rwork), -c * reset pointers for yh, savf, ewt, and acor, and -c * move ewt to its new position if istate = 1. -c ipflag is an output error indication flag. ipflag = 0 if there was -c no trouble, and ipflag is the value of the prep error flag ipper -c if there was trouble in subroutine prep. -c----------------------------------------------------------------------- - ipflag = 0 -c call prep to do matrix preprocessing operations. --------------------- - call prep (neq, y, rwork(lyh), rwork(lsavf), rwork(lewt), - 1 rwork(lacor), ia, ja, rwork(lwm), rwork(lwm), ipflag, f, jac) - lenwk = max0(lreq,lwmin) - if (ipflag .lt. 0) return -c if prep was successful, move yh to end of required space for wm. ----- - lyhn = lwm + lenwk - if (lyhn .gt. lyh) return - lyhd = lyh - lyhn - if (lyhd .eq. 0) go to 20 - imax = lyhn - 1 + lenyhm - do 10 i = lyhn,imax - 10 rwork(i) = rwork(i+lyhd) - lyh = lyhn -c reset pointers for savf, ewt, and acor. ------------------------------ - 20 lsavf = lyh + lenyh - lewtn = lsavf + n - lacor = lewtn + n - if (istatc .eq. 3) go to 40 -c if istate = 1, move ewt (left) to its new position. ------------------ - if (lewtn .gt. lewt) return - do 30 i = 1,n - 30 rwork(i+lewtn-1) = rwork(i+lewt-1) - 40 lewt = lewtn - return -c----------------------- end of subroutine iprep ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/jgroup.f b/scipy-0.10.1/scipy/integrate/odepack/jgroup.f deleted file mode 100644 index 583c1f29e3..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/jgroup.f +++ /dev/null @@ -1,64 +0,0 @@ - subroutine jgroup (n,ia,ja,maxg,ngrp,igp,jgp,incl,jdone,ier) -clll. optimize - integer n, ia, ja, maxg, ngrp, igp, jgp, incl, jdone, ier - dimension ia(1), ja(1), igp(1), jgp(n), incl(n), jdone(n) -c----------------------------------------------------------------------- -c this subroutine constructs groupings of the column indices of -c the jacobian matrix, used in the numerical evaluation of the -c jacobian by finite differences. -c -c input.. -c n = the order of the matrix. -c ia,ja = sparse structure descriptors of the matrix by rows. -c maxg = length of available storate in the igp array. -c -c output.. -c ngrp = number of groups. -c jgp = array of length n containing the column indices by groups. -c igp = pointer array of length ngrp + 1 to the locations in jgp -c of the beginning of each group. -c ier = error indicator. ier = 0 if no error occurred, or 1 if -c maxg was insufficient. -c -c incl and jdone are working arrays of length n. -c----------------------------------------------------------------------- - integer i, j, k, kmin, kmax, ncol, ng -c - ier = 0 - do 10 j = 1,n - 10 jdone(j) = 0 - ncol = 1 - do 60 ng = 1,maxg - igp(ng) = ncol - do 20 i = 1,n - 20 incl(i) = 0 - do 50 j = 1,n -c reject column j if it is already in a group.-------------------------- - if (jdone(j) .eq. 1) go to 50 - kmin = ia(j) - kmax = ia(j+1) - 1 - do 30 k = kmin,kmax -c reject column j if it overlaps any column already in this group.------ - i = ja(k) - if (incl(i) .eq. 1) go to 50 - 30 continue -c accept column j into group ng.---------------------------------------- - jgp(ncol) = j - ncol = ncol + 1 - jdone(j) = 1 - do 40 k = kmin,kmax - i = ja(k) - 40 incl(i) = 1 - 50 continue -c stop if this group is empty (grouping is complete).------------------- - if (ncol .eq. igp(ng)) go to 70 - 60 continue -c error return if not all columns were chosen (maxg too small).--------- - if (ncol .le. n) go to 80 - ng = maxg - 70 ngrp = ng - 1 - return - 80 ier = 1 - return -c----------------------- end of subroutine jgroup ---------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/lsoda.f b/scipy-0.10.1/scipy/integrate/odepack/lsoda.f deleted file mode 100644 index 245fbaf70c..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/lsoda.f +++ /dev/null @@ -1,1654 +0,0 @@ - subroutine lsoda (f, neq, y, t, tout, itol, rtol, atol, itask, - 1 istate, iopt, rwork, lrw, iwork, liw, jac, jt) - external f, jac - integer neq, itol, itask, istate, iopt, lrw, iwork, liw, jt, isav - double precision y, t, tout, rtol, atol, rwork, rsav - dimension neq(1), y(1), rtol(1), atol(1), rwork(lrw), iwork(liw) - dimension rsav(240), isav(50) -c----------------------------------------------------------------------- -c this is the 24 feb 1997 version of -c lsoda.. livermore solver for ordinary differential equations, with -c automatic method switching for stiff and nonstiff problems. -c -c this version is in double precision. -c -c lsoda solves the initial value problem for stiff or nonstiff -c systems of first order ode-s, -c dy/dt = f(t,y) , or, in component form, -c dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(neq)) (i = 1,...,neq). -c -c this a variant version of the lsode package. -c it switches automatically between stiff and nonstiff methods. -c this means that the user does not have to determine whether the -c problem is stiff or not, and the solver will automatically choose the -c appropriate method. it always starts with the nonstiff method. -c -c authors.. -c linda r. petzold and alan c. hindmarsh, -c computing and mathematics research division, l-316 -c lawrence livermore national laboratory -c livermore, ca 94550. -c -c references.. -c 1. alan c. hindmarsh, odepack, a systematized collection of ode -c solvers, in scientific computing, r. s. stepleman et al. (eds.), -c north-holland, amsterdam, 1983, pp. 55-64. -c 2. linda r. petzold, automatic selection of methods for solving -c stiff and nonstiff systems of ordinary differential equations, -c siam j. sci. stat. comput. 4 (1983), pp. 136-148. -c----------------------------------------------------------------------- -c summary of usage. -c -c communication between the user and the lsoda package, for normal -c situations, is summarized here. this summary describes only a subset -c of the full set of options available. see the full description for -c details, including alternative treatment of the jacobian matrix, -c optional inputs and outputs, nonstandard options, and -c instructions for special situations. see also the example -c problem (with program and output) following this summary. -c -c a. first provide a subroutine of the form.. -c subroutine f (neq, t, y, ydot) -c dimension y(neq), ydot(neq) -c which supplies the vector function f by loading ydot(i) with f(i). -c -c b. write a main program which calls subroutine lsoda once for -c each point at which answers are desired. this should also provide -c for possible use of logical unit 6 for output of error messages -c by lsoda. on the first call to lsoda, supply arguments as follows.. -c f = name of subroutine for right-hand side vector f. -c this name must be declared external in calling program. -c neq = number of first order ode-s. -c y = array of initial values, of length neq. -c t = the initial value of the independent variable. -c tout = first point where output is desired (.ne. t). -c itol = 1 or 2 according as atol (below) is a scalar or array. -c rtol = relative tolerance parameter (scalar). -c atol = absolute tolerance parameter (scalar or array). -c the estimated local error in y(i) will be controlled so as -c to be less than -c ewt(i) = rtol*abs(y(i)) + atol if itol = 1, or -c ewt(i) = rtol*abs(y(i)) + atol(i) if itol = 2. -c thus the local error test passes if, in each component, -c either the absolute error is less than atol (or atol(i)), -c or the relative error is less than rtol. -c use rtol = 0.0 for pure absolute error control, and -c use atol = 0.0 (or atol(i) = 0.0) for pure relative error -c control. caution.. actual (global) errors may exceed these -c local tolerances, so choose them conservatively. -c itask = 1 for normal computation of output values of y at t = tout. -c istate = integer flag (input and output). set istate = 1. -c iopt = 0 to indicate no optional inputs used. -c rwork = real work array of length at least.. -c 22 + neq * max(16, neq + 9). -c see also paragraph e below. -c lrw = declared length of rwork (in user-s dimension). -c iwork = integer work array of length at least 20 + neq. -c liw = declared length of iwork (in user-s dimension). -c jac = name of subroutine for jacobian matrix. -c use a dummy name. see also paragraph e below. -c jt = jacobian type indicator. set jt = 2. -c see also paragraph e below. -c note that the main program must declare arrays y, rwork, iwork, -c and possibly atol. -c -c c. the output from the first call (or any call) is.. -c y = array of computed values of y(t) vector. -c t = corresponding value of independent variable (normally tout). -c istate = 2 if lsoda was successful, negative otherwise. -c -1 means excess work done on this call (perhaps wrong jt). -c -2 means excess accuracy requested (tolerances too small). -c -3 means illegal input detected (see printed message). -c -4 means repeated error test failures (check all inputs). -c -5 means repeated convergence failures (perhaps bad jacobian -c supplied or wrong choice of jt or tolerances). -c -6 means error weight became zero during problem. (solution -c component i vanished, and atol or atol(i) = 0.) -c -7 means work space insufficient to finish (see messages). -c -c d. to continue the integration after a successful return, simply -c reset tout and call lsoda again. no other parameters need be reset. -c -c e. note.. if and when lsoda regards the problem as stiff, and -c switches methods accordingly, it must make use of the neq by neq -c jacobian matrix, j = df/dy. for the sake of simplicity, the -c inputs to lsoda recommended in paragraph b above cause lsoda to -c treat j as a full matrix, and to approximate it internally by -c difference quotients. alternatively, j can be treated as a band -c matrix (with great potential reduction in the size of the rwork -c array). also, in either the full or banded case, the user can supply -c j in closed form, with a routine whose name is passed as the jac -c argument. these alternatives are described in the paragraphs on -c rwork, jac, and jt in the full description of the call sequence below. -c -c----------------------------------------------------------------------- -c example problem. -c -c the following is a simple example problem, with the coding -c needed for its solution by lsoda. the problem is from chemical -c kinetics, and consists of the following three rate equations.. -c dy1/dt = -.04*y1 + 1.e4*y2*y3 -c dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*y2**2 -c dy3/dt = 3.e7*y2**2 -c on the interval from t = 0.0 to t = 4.e10, with initial conditions -c y1 = 1.0, y2 = y3 = 0. the problem is stiff. -c -c the following coding solves this problem with lsoda, -c printing results at t = .4, 4., ..., 4.e10. it uses -c itol = 2 and atol much smaller for y2 than y1 or y3 because -c y2 has much smaller values. -c at the end of the run, statistical quantities of interest are -c printed (see optional outputs in the full description below). -c -c external fex -c double precision atol, rtol, rwork, t, tout, y -c dimension y(3), atol(3), rwork(70), iwork(23) -c neq = 3 -c y(1) = 1.0d0 -c y(2) = 0.0d0 -c y(3) = 0.0d0 -c t = 0.0d0 -c tout = 0.4d0 -c itol = 2 -c rtol = 1.0d-4 -c atol(1) = 1.0d-6 -c atol(2) = 1.0d-10 -c atol(3) = 1.0d-6 -c itask = 1 -c istate = 1 -c iopt = 0 -c lrw = 70 -c liw = 23 -c jt = 2 -c do 40 iout = 1,12 -c call lsoda(fex,neq,y,t,tout,itol,rtol,atol,itask,istate, -c 1 iopt,rwork,lrw,iwork,liw,jdum,jt) -c write(6,20)t,y(1),y(2),y(3) -c 20 format(' at t =',e12.4,' y =',3e14.6) -c if (istate .lt. 0) go to 80 -c 40 tout = tout*10.0d0 -c write(6,60)iwork(11),iwork(12),iwork(13),iwork(19),rwork(15) -c 60 format(/' no. steps =',i4,' no. f-s =',i4,' no. j-s =',i4/ -c 1 ' method last used =',i2,' last switch was at t =',e12.4) -c stop -c 80 write(6,90)istate -c 90 format(///' error halt.. istate =',i3) -c stop -c end -c -c subroutine fex (neq, t, y, ydot) -c double precision t, y, ydot -c dimension y(3), ydot(3) -c ydot(1) = -.04d0*y(1) + 1.0d4*y(2)*y(3) -c ydot(3) = 3.0d7*y(2)*y(2) -c ydot(2) = -ydot(1) - ydot(3) -c return -c end -c -c the output of this program (on a cdc-7600 in single precision) -c is as follows.. -c -c at t = 4.0000e-01 y = 9.851712e-01 3.386380e-05 1.479493e-02 -c at t = 4.0000e+00 y = 9.055333e-01 2.240655e-05 9.444430e-02 -c at t = 4.0000e+01 y = 7.158403e-01 9.186334e-06 2.841505e-01 -c at t = 4.0000e+02 y = 4.505250e-01 3.222964e-06 5.494717e-01 -c at t = 4.0000e+03 y = 1.831975e-01 8.941774e-07 8.168016e-01 -c at t = 4.0000e+04 y = 3.898730e-02 1.621940e-07 9.610125e-01 -c at t = 4.0000e+05 y = 4.936363e-03 1.984221e-08 9.950636e-01 -c at t = 4.0000e+06 y = 5.161831e-04 2.065786e-09 9.994838e-01 -c at t = 4.0000e+07 y = 5.179817e-05 2.072032e-10 9.999482e-01 -c at t = 4.0000e+08 y = 5.283401e-06 2.113371e-11 9.999947e-01 -c at t = 4.0000e+09 y = 4.659031e-07 1.863613e-12 9.999995e-01 -c at t = 4.0000e+10 y = 1.404280e-08 5.617126e-14 1.000000e+00 -c -c no. steps = 361 no. f-s = 693 no. j-s = 64 -c method last used = 2 last switch was at t = 6.0092e-03 -c----------------------------------------------------------------------- -c full description of user interface to lsoda. -c -c the user interface to lsoda consists of the following parts. -c -c i. the call sequence to subroutine lsoda, which is a driver -c routine for the solver. this includes descriptions of both -c the call sequence arguments and of user-supplied routines. -c following these descriptions is a description of -c optional inputs available through the call sequence, and then -c a description of optional outputs (in the work arrays). -c -c ii. descriptions of other routines in the lsoda package that may be -c (optionally) called by the user. these provide the ability to -c alter error message handling, save and restore the internal -c common, and obtain specified derivatives of the solution y(t). -c -c iii. descriptions of common blocks to be declared in overlay -c or similar environments, or to be saved when doing an interrupt -c of the problem and continued solution later. -c -c iv. description of a subroutine in the lsoda package, -c which the user may replace with his own version, if desired. -c this relates to the measurement of errors. -c -c----------------------------------------------------------------------- -c part i. call sequence. -c -c the call sequence parameters used for input only are -c f, neq, tout, itol, rtol, atol, itask, iopt, lrw, liw, jac, jt, -c and those used for both input and output are -c y, t, istate. -c the work arrays rwork and iwork are also used for conditional and -c optional inputs and optional outputs. (the term output here refers -c to the return from subroutine lsoda to the user-s calling program.) -c -c the legality of input parameters will be thoroughly checked on the -c initial call for the problem, but not checked thereafter unless a -c change in input parameters is flagged by istate = 3 on input. -c -c the descriptions of the call arguments are as follows. -c -c f = the name of the user-supplied subroutine defining the -c ode system. the system must be put in the first-order -c form dy/dt = f(t,y), where f is a vector-valued function -c of the scalar t and the vector y. subroutine f is to -c compute the function f. it is to have the form -c subroutine f (neq, t, y, ydot) -c dimension y(1), ydot(1) -c where neq, t, and y are input, and the array ydot = f(t,y) -c is output. y and ydot are arrays of length neq. -c (in the dimension statement above, 1 is a dummy -c dimension.. it can be replaced by any value.) -c subroutine f should not alter y(1),...,y(neq). -c f must be declared external in the calling program. -c -c subroutine f may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in f) and/or y has length exceeding neq(1). -c see the descriptions of neq and y below. -c -c if quantities computed in the f routine are needed -c externally to lsoda, an extra call to f should be made -c for this purpose, for consistent and accurate results. -c if only the derivative dy/dt is needed, use intdy instead. -c -c neq = the size of the ode system (number of first order -c ordinary differential equations). used only for input. -c neq may be decreased, but not increased, during the problem. -c if neq is decreased (with istate = 3 on input), the -c remaining components of y should be left undisturbed, if -c these are to be accessed in f and/or jac. -c -c normally, neq is a scalar, and it is generally referred to -c as a scalar in this user interface description. however, -c neq may be an array, with neq(1) set to the system size. -c (the lsoda package accesses only neq(1).) in either case, -c this parameter is passed as the neq argument in all calls -c to f and jac. hence, if it is an array, locations -c neq(2),... may be used to store other integer data and pass -c it to f and/or jac. subroutines f and/or jac must include -c neq in a dimension statement in that case. -c -c y = a real array for the vector of dependent variables, of -c length neq or more. used for both input and output on the -c first call (istate = 1), and only for output on other calls. -c on the first call, y must contain the vector of initial -c values. on output, y contains the computed solution vector, -c evaluated at t. if desired, the y array may be used -c for other purposes between calls to the solver. -c -c this array is passed as the y argument in all calls to -c f and jac. hence its length may exceed neq, and locations -c y(neq+1),... may be used to store other real data and -c pass it to f and/or jac. (the lsoda package accesses only -c y(1),...,y(neq).) -c -c t = the independent variable. on input, t is used only on the -c first call, as the initial point of the integration. -c on output, after each call, t is the value at which a -c computed solution y is evaluated (usually the same as tout). -c on an error return, t is the farthest point reached. -c -c tout = the next value of t at which a computed solution is desired. -c used only for input. -c -c when starting the problem (istate = 1), tout may be equal -c to t for one call, then should .ne. t for the next call. -c for the initial t, an input value of tout .ne. t is used -c in order to determine the direction of the integration -c (i.e. the algebraic sign of the step sizes) and the rough -c scale of the problem. integration in either direction -c (forward or backward in t) is permitted. -c -c if itask = 2 or 5 (one-step modes), tout is ignored after -c the first call (i.e. the first call with tout .ne. t). -c otherwise, tout is required on every call. -c -c if itask = 1, 3, or 4, the values of tout need not be -c monotone, but a value of tout which backs up is limited -c to the current internal t interval, whose endpoints are -c tcur - hu and tcur (see optional outputs, below, for -c tcur and hu). -c -c itol = an indicator for the type of error control. see -c description below under atol. used only for input. -c -c rtol = a relative error tolerance parameter, either a scalar or -c an array of length neq. see description below under atol. -c input only. -c -c atol = an absolute error tolerance parameter, either a scalar or -c an array of length neq. input only. -c -c the input parameters itol, rtol, and atol determine -c the error control performed by the solver. the solver will -c control the vector e = (e(i)) of estimated local errors -c in y, according to an inequality of the form -c max-norm of ( e(i)/ewt(i) ) .le. 1, -c where ewt = (ewt(i)) is a vector of positive error weights. -c the values of rtol and atol should all be non-negative. -c the following table gives the types (scalar/array) of -c rtol and atol, and the corresponding form of ewt(i). -c -c itol rtol atol ewt(i) -c 1 scalar scalar rtol*abs(y(i)) + atol -c 2 scalar array rtol*abs(y(i)) + atol(i) -c 3 array scalar rtol(i)*abs(y(i)) + atol -c 4 array array rtol(i)*abs(y(i)) + atol(i) -c -c when either of these parameters is a scalar, it need not -c be dimensioned in the user-s calling program. -c -c if none of the above choices (with itol, rtol, and atol -c fixed throughout the problem) is suitable, more general -c error controls can be obtained by substituting a -c user-supplied routine for the setting of ewt. -c see part iv below. -c -c if global errors are to be estimated by making a repeated -c run on the same problem with smaller tolerances, then all -c components of rtol and atol (i.e. of ewt) should be scaled -c down uniformly. -c -c itask = an index specifying the task to be performed. -c input only. itask has the following values and meanings. -c 1 means normal computation of output values of y(t) at -c t = tout (by overshooting and interpolating). -c 2 means take one step only and return. -c 3 means stop at the first internal mesh point at or -c beyond t = tout and return. -c 4 means normal computation of output values of y(t) at -c t = tout but without overshooting t = tcrit. -c tcrit must be input as rwork(1). tcrit may be equal to -c or beyond tout, but not behind it in the direction of -c integration. this option is useful if the problem -c has a singularity at or beyond t = tcrit. -c 5 means take one step, without passing tcrit, and return. -c tcrit must be input as rwork(1). -c -c note.. if itask = 4 or 5 and the solver reaches tcrit -c (within roundoff), it will return t = tcrit (exactly) to -c indicate this (unless itask = 4 and tout comes before tcrit, -c in which case answers at t = tout are returned first). -c -c istate = an index used for input and output to specify the -c the state of the calculation. -c -c on input, the values of istate are as follows. -c 1 means this is the first call for the problem -c (initializations will be done). see note below. -c 2 means this is not the first call, and the calculation -c is to continue normally, with no change in any input -c parameters except possibly tout and itask. -c (if itol, rtol, and/or atol are changed between calls -c with istate = 2, the new values will be used but not -c tested for legality.) -c 3 means this is not the first call, and the -c calculation is to continue normally, but with -c a change in input parameters other than -c tout and itask. changes are allowed in -c neq, itol, rtol, atol, iopt, lrw, liw, jt, ml, mu, -c and any optional inputs except h0, mxordn, and mxords. -c (see iwork description for ml and mu.) -c note.. a preliminary call with tout = t is not counted -c as a first call here, as no initialization or checking of -c input is done. (such a call is sometimes useful for the -c purpose of outputting the initial conditions.) -c thus the first call for which tout .ne. t requires -c istate = 1 on input. -c -c on output, istate has the following values and meanings. -c 1 means nothing was done, as tout was equal to t with -c istate = 1 on input. (however, an internal counter was -c set to detect and prevent repeated calls of this type.) -c 2 means the integration was performed successfully. -c -1 means an excessive amount of work (more than mxstep -c steps) was done on this call, before completing the -c requested task, but the integration was otherwise -c successful as far as t. (mxstep is an optional input -c and is normally 500.) to continue, the user may -c simply reset istate to a value .gt. 1 and call again -c (the excess work step counter will be reset to 0). -c in addition, the user may increase mxstep to avoid -c this error return (see below on optional inputs). -c -2 means too much accuracy was requested for the precision -c of the machine being used. this was detected before -c completing the requested task, but the integration -c was successful as far as t. to continue, the tolerance -c parameters must be reset, and istate must be set -c to 3. the optional output tolsf may be used for this -c purpose. (note.. if this condition is detected before -c taking any steps, then an illegal input return -c (istate = -3) occurs instead.) -c -3 means illegal input was detected, before taking any -c integration steps. see written message for details. -c note.. if the solver detects an infinite loop of calls -c to the solver with illegal input, it will cause -c the run to stop. -c -4 means there were repeated error test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c the problem may have a singularity, or the input -c may be inappropriate. -c -5 means there were repeated convergence test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c this may be caused by an inaccurate jacobian matrix, -c if one is being used. -c -6 means ewt(i) became zero for some i during the -c integration. pure relative error control (atol(i)=0.0) -c was requested on a variable which has now vanished. -c the integration was successful as far as t. -c -7 means the length of rwork and/or iwork was too small to -c proceed, but the integration was successful as far as t. -c this happens when lsoda chooses to switch methods -c but lrw and/or liw is too small for the new method. -c -c note.. since the normal output value of istate is 2, -c it does not need to be reset for normal continuation. -c also, since a negative input value of istate will be -c regarded as illegal, a negative output value requires the -c user to change it, and possibly other inputs, before -c calling the solver again. -c -c iopt = an integer flag to specify whether or not any optional -c inputs are being used on this call. input only. -c the optional inputs are listed separately below. -c iopt = 0 means no optional inputs are being used. -c default values will be used in all cases. -c iopt = 1 means one or more optional inputs are being used. -c -c rwork = a real array (double precision) for work space, and (in the -c first 20 words) for conditional and optional inputs and -c optional outputs. -c as lsoda switches automatically between stiff and nonstiff -c methods, the required length of rwork can change during the -c problem. thus the rwork array passed to lsoda can either -c have a static (fixed) length large enough for both methods, -c or have a dynamic (changing) length altered by the calling -c program in response to output from lsoda. -c -c --- fixed length case --- -c if the rwork length is to be fixed, it should be at least -c max (lrn, lrs), -c where lrn and lrs are the rwork lengths required when the -c current method is nonstiff or stiff, respectively. -c -c the separate rwork length requirements lrn and lrs are -c as follows.. -c if neq is constant and the maximum method orders have -c their default values, then -c lrn = 20 + 16*neq, -c lrs = 22 + 9*neq + neq**2 if jt = 1 or 2, -c lrs = 22 + 10*neq + (2*ml+mu)*neq if jt = 4 or 5. -c under any other conditions, lrn and lrs are given by.. -c lrn = 20 + nyh*(mxordn+1) + 3*neq, -c lrs = 20 + nyh*(mxords+1) + 3*neq + lmat, -c where -c nyh = the initial value of neq, -c mxordn = 12, unless a smaller value is given as an -c optional input, -c mxords = 5, unless a smaller value is given as an -c optional input, -c lmat = length of matrix work space.. -c lmat = neq**2 + 2 if jt = 1 or 2, -c lmat = (2*ml + mu + 1)*neq + 2 if jt = 4 or 5. -c -c --- dynamic length case --- -c if the length of rwork is to be dynamic, then it should -c be at least lrn or lrs, as defined above, depending on the -c current method. initially, it must be at least lrn (since -c lsoda starts with the nonstiff method). on any return -c from lsoda, the optional output mcur indicates the current -c method. if mcur differs from the value it had on the -c previous return, or if there has only been one call to -c lsoda and mcur is now 2, then lsoda has switched -c methods during the last call, and the length of rwork -c should be reset (to lrn if mcur = 1, or to lrs if -c mcur = 2). (an increase in the rwork length is required -c if lsoda returned istate = -7, but not otherwise.) -c after resetting the length, call lsoda with istate = 3 -c to signal that change. -c -c lrw = the length of the array rwork, as declared by the user. -c (this will be checked by the solver.) -c -c iwork = an integer array for work space. -c as lsoda switches automatically between stiff and nonstiff -c methods, the required length of iwork can change during -c problem, between -c lis = 20 + neq and lin = 20, -c respectively. thus the iwork array passed to lsoda can -c either have a fixed length of at least 20 + neq, or have a -c dynamic length of at least lin or lis, depending on the -c current method. the comments on dynamic length under -c rwork above apply here. initially, this length need -c only be at least lin = 20. -c -c the first few words of iwork are used for conditional and -c optional inputs and optional outputs. -c -c the following 2 words in iwork are conditional inputs.. -c iwork(1) = ml these are the lower and upper -c iwork(2) = mu half-bandwidths, respectively, of the -c banded jacobian, excluding the main diagonal. -c the band is defined by the matrix locations -c (i,j) with i-ml .le. j .le. i+mu. ml and mu -c must satisfy 0 .le. ml,mu .le. neq-1. -c these are required if jt is 4 or 5, and -c ignored otherwise. ml and mu may in fact be -c the band parameters for a matrix to which -c df/dy is only approximately equal. -c -c liw = the length of the array iwork, as declared by the user. -c (this will be checked by the solver.) -c -c note.. the base addresses of the work arrays must not be -c altered between calls to lsoda for the same problem. -c the contents of the work arrays must not be altered -c between calls, except possibly for the conditional and -c optional inputs, and except for the last 3*neq words of rwork. -c the latter space is used for internal scratch space, and so is -c available for use by the user outside lsoda between calls, if -c desired (but not for use by f or jac). -c -c jac = the name of the user-supplied routine to compute the -c jacobian matrix, df/dy, if jt = 1 or 4. the jac routine -c is optional, but if the problem is expected to be stiff much -c of the time, you are encouraged to supply jac, for the sake -c of efficiency. (alternatively, set jt = 2 or 5 to have -c lsoda compute df/dy internally by difference quotients.) -c if and when lsoda uses df/dy, if treats this neq by neq -c matrix either as full (jt = 1 or 2), or as banded (jt = -c 4 or 5) with half-bandwidths ml and mu (discussed under -c iwork above). in either case, if jt = 1 or 4, the jac -c routine must compute df/dy as a function of the scalar t -c and the vector y. it is to have the form -c subroutine jac (neq, t, y, ml, mu, pd, nrowpd) -c dimension y(1), pd(nrowpd,1) -c where neq, t, y, ml, mu, and nrowpd are input and the array -c pd is to be loaded with partial derivatives (elements of -c the jacobian matrix) on output. pd must be given a first -c dimension of nrowpd. t and y have the same meaning as in -c subroutine f. (in the dimension statement above, 1 is a -c dummy dimension.. it can be replaced by any value.) -c in the full matrix case (jt = 1), ml and mu are -c ignored, and the jacobian is to be loaded into pd in -c columnwise manner, with df(i)/dy(j) loaded into pd(i,j). -c in the band matrix case (jt = 4), the elements -c within the band are to be loaded into pd in columnwise -c manner, with diagonal lines of df/dy loaded into the rows -c of pd. thus df(i)/dy(j) is to be loaded into pd(i-j+mu+1,j). -c ml and mu are the half-bandwidth parameters (see iwork). -c the locations in pd in the two triangular areas which -c correspond to nonexistent matrix elements can be ignored -c or loaded arbitrarily, as they are overwritten by lsoda. -c jac need not provide df/dy exactly. a crude -c approximation (possibly with a smaller bandwidth) will do. -c in either case, pd is preset to zero by the solver, -c so that only the nonzero elements need be loaded by jac. -c each call to jac is preceded by a call to f with the same -c arguments neq, t, and y. thus to gain some efficiency, -c intermediate quantities shared by both calculations may be -c saved in a user common block by f and not recomputed by jac, -c if desired. also, jac may alter the y array, if desired. -c jac must be declared external in the calling program. -c subroutine jac may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in jac) and/or y has length exceeding neq(1). -c see the descriptions of neq and y above. -c -c jt = jacobian type indicator. used only for input. -c jt specifies how the jacobian matrix df/dy will be -c treated, if and when lsoda requires this matrix. -c jt has the following values and meanings.. -c 1 means a user-supplied full (neq by neq) jacobian. -c 2 means an internally generated (difference quotient) full -c jacobian (using neq extra calls to f per df/dy value). -c 4 means a user-supplied banded jacobian. -c 5 means an internally generated banded jacobian (using -c ml+mu+1 extra calls to f per df/dy evaluation). -c if jt = 1 or 4, the user must supply a subroutine jac -c (the name is arbitrary) as described above under jac. -c if jt = 2 or 5, a dummy argument can be used. -c----------------------------------------------------------------------- -c optional inputs. -c -c the following is a list of the optional inputs provided for in the -c call sequence. (see also part ii.) for each such input variable, -c this table lists its name as used in this documentation, its -c location in the call sequence, its meaning, and the default value. -c the use of any of these inputs requires iopt = 1, and in that -c case all of these inputs are examined. a value of zero for any -c of these optional inputs will cause the default value to be used. -c thus to use a subset of the optional inputs, simply preload -c locations 5 to 10 in rwork and iwork to 0.0 and 0 respectively, and -c then set those of interest to nonzero values. -c -c name location meaning and default value -c -c h0 rwork(5) the step size to be attempted on the first step. -c the default value is determined by the solver. -c -c hmax rwork(6) the maximum absolute step size allowed. -c the default value is infinite. -c -c hmin rwork(7) the minimum absolute step size allowed. -c the default value is 0. (this lower bound is not -c enforced on the final step before reaching tcrit -c when itask = 4 or 5.) -c -c ixpr iwork(5) flag to generate extra printing at method switches. -c ixpr = 0 means no extra printing (the default). -c ixpr = 1 means print data on each switch. -c t, h, and nst will be printed on the same logical -c unit as used for error messages. -c -c mxstep iwork(6) maximum number of (internally defined) steps -c allowed during one call to the solver. -c the default value is 500. -c -c mxhnil iwork(7) maximum number of messages printed (per problem) -c warning that t + h = t on a step (h = step size). -c this must be positive to result in a non-default -c value. the default value is 10. -c -c mxordn iwork(8) the maximum order to be allowed for the nonstiff -c (adams) method. the default value is 12. -c if mxordn exceeds the default value, it will -c be reduced to the default value. -c mxordn is held constant during the problem. -c -c mxords iwork(9) the maximum order to be allowed for the stiff -c (bdf) method. the default value is 5. -c if mxords exceeds the default value, it will -c be reduced to the default value. -c mxords is held constant during the problem. -c----------------------------------------------------------------------- -c optional outputs. -c -c as optional additional output from lsoda, the variables listed -c below are quantities related to the performance of lsoda -c which are available to the user. these are communicated by way of -c the work arrays, but also have internal mnemonic names as shown. -c except where stated otherwise, all of these outputs are defined -c on any successful return from lsoda, and on any return with -c istate = -1, -2, -4, -5, or -6. on an illegal input return -c (istate = -3), they will be unchanged from their existing values -c (if any), except possibly for tolsf, lenrw, and leniw. -c on any error return, outputs relevant to the error will be defined, -c as noted below. -c -c name location meaning -c -c hu rwork(11) the step size in t last used (successfully). -c -c hcur rwork(12) the step size to be attempted on the next step. -c -c tcur rwork(13) the current value of the independent variable -c which the solver has actually reached, i.e. the -c current internal mesh point in t. on output, tcur -c will always be at least as far as the argument -c t, but may be farther (if interpolation was done). -c -c tolsf rwork(14) a tolerance scale factor, greater than 1.0, -c computed when a request for too much accuracy was -c detected (istate = -3 if detected at the start of -c the problem, istate = -2 otherwise). if itol is -c left unaltered but rtol and atol are uniformly -c scaled up by a factor of tolsf for the next call, -c then the solver is deemed likely to succeed. -c (the user may also ignore tolsf and alter the -c tolerance parameters in any other way appropriate.) -c -c tsw rwork(15) the value of t at the time of the last method -c switch, if any. -c -c nst iwork(11) the number of steps taken for the problem so far. -c -c nfe iwork(12) the number of f evaluations for the problem so far. -c -c nje iwork(13) the number of jacobian evaluations (and of matrix -c lu decompositions) for the problem so far. -c -c nqu iwork(14) the method order last used (successfully). -c -c nqcur iwork(15) the order to be attempted on the next step. -c -c imxer iwork(16) the index of the component of largest magnitude in -c the weighted local error vector ( e(i)/ewt(i) ), -c on an error return with istate = -4 or -5. -c -c lenrw iwork(17) the length of rwork actually required, assuming -c that the length of rwork is to be fixed for the -c rest of the problem, and that switching may occur. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c leniw iwork(18) the length of iwork actually required, assuming -c that the length of iwork is to be fixed for the -c rest of the problem, and that switching may occur. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c mused iwork(19) the method indicator for the last successful step.. -c 1 means adams (nonstiff), 2 means bdf (stiff). -c -c mcur iwork(20) the current method indicator.. -c 1 means adams (nonstiff), 2 means bdf (stiff). -c this is the method to be attempted -c on the next step. thus it differs from mused -c only if a method switch has just been made. -c -c the following two arrays are segments of the rwork array which -c may also be of interest to the user as optional outputs. -c for each array, the table below gives its internal name, -c its base address in rwork, and its description. -c -c name base address description -c -c yh 21 the nordsieck history array, of size nyh by -c (nqcur + 1), where nyh is the initial value -c of neq. for j = 0,1,...,nqcur, column j+1 -c of yh contains hcur**j/factorial(j) times -c the j-th derivative of the interpolating -c polynomial currently representing the solution, -c evaluated at t = tcur. -c -c acor lacor array of size neq used for the accumulated -c (from common corrections on each step, scaled on output -c as noted) to represent the estimated local error in y -c on the last step. this is the vector e in -c the description of the error control. it is -c defined only on a successful return from lsoda. -c the base address lacor is obtained by -c including in the user-s program the -c following 3 lines.. -c double precision rls -c common /ls0001/ rls(218), ils(39) -c lacor = ils(5) -c -c----------------------------------------------------------------------- -c part ii. other routines callable. -c -c the following are optional calls which the user may make to -c gain additional capabilities in conjunction with lsoda. -c (the routines xsetun and xsetf are designed to conform to the -c slatec error handling package.) -c -c form of call function -c call xsetun(lun) set the logical unit number, lun, for -c output of messages from lsoda, if -c the default is not desired. -c the default value of lun is 6. -c -c call xsetf(mflag) set a flag to control the printing of -c messages by lsoda. -c mflag = 0 means do not print. (danger.. -c this risks losing valuable information.) -c mflag = 1 means print (the default). -c -c either of the above calls may be made at -c any time and will take effect immediately. -c -c call srcma(rsav,isav,job) saves and restores the contents of -c the internal common blocks used by -c lsoda (see part iii below). -c rsav must be a real array of length 240 -c or more, and isav must be an integer -c array of length 50 or more. -c job=1 means save common into rsav/isav. -c job=2 means restore common from rsav/isav. -c srcma is useful if one is -c interrupting a run and restarting -c later, or alternating between two or -c more problems solved with lsoda. -c -c call intdy(,,,,,) provide derivatives of y, of various -c (see below) orders, at a specified point t, if -c desired. it may be called only after -c a successful return from lsoda. -c -c the detailed instructions for using intdy are as follows. -c the form of the call is.. -c -c call intdy (t, k, rwork(21), nyh, dky, iflag) -c -c the input parameters are.. -c -c t = value of independent variable where answers are desired -c (normally the same as the t last returned by lsoda). -c for valid results, t must lie between tcur - hu and tcur. -c (see optional outputs for tcur and hu.) -c k = integer order of the derivative desired. k must satisfy -c 0 .le. k .le. nqcur, where nqcur is the current order -c (see optional outputs). the capability corresponding -c to k = 0, i.e. computing y(t), is already provided -c by lsoda directly. since nqcur .ge. 1, the first -c derivative dy/dt is always available with intdy. -c rwork(21) = the base address of the history array yh. -c nyh = column length of yh, equal to the initial value of neq. -c -c the output parameters are.. -c -c dky = a real array of length neq containing the computed value -c of the k-th derivative of y(t). -c iflag = integer flag, returned as 0 if k and t were legal, -c -1 if k was illegal, and -2 if t was illegal. -c on an error return, a message is also written. -c----------------------------------------------------------------------- -c part iii. common blocks. -c -c if lsoda is to be used in an overlay situation, the user -c must declare, in the primary overlay, the variables in.. -c (1) the call sequence to lsoda, -c (2) the three internal common blocks -c /ls0001/ of length 257 (218 double precision words -c followed by 39 integer words), -c /lsa001/ of length 31 (22 double precision words -c followed by 9 integer words), -c /eh0001/ of length 2 (integer words). -c -c if lsoda is used on a system in which the contents of internal -c common blocks are not preserved between calls, the user should -c declare the above common blocks in his main program to insure -c that their contents are preserved. -c -c if the solution of a given problem by lsoda is to be interrupted -c and then later continued, such as when restarting an interrupted run -c or alternating between two or more problems, the user should save, -c following the return from the last lsoda call prior to the -c interruption, the contents of the call sequence variables and the -c internal common blocks, and later restore these values before the -c next lsoda call for that problem. to save and restore the common -c blocks, use subroutine srcma (see part ii above). -c -c----------------------------------------------------------------------- -c part iv. optionally replaceable solver routines. -c -c below is a description of a routine in the lsoda package which -c relates to the measurement of errors, and can be -c replaced by a user-supplied version, if desired. however, since such -c a replacement may have a major impact on performance, it should be -c done only when absolutely necessary, and only with great caution. -c (note.. the means by which the package version of a routine is -c superseded by the user-s version may be system-dependent.) -c -c (a) ewset. -c the following subroutine is called just before each internal -c integration step, and sets the array of error weights, ewt, as -c described under itol/rtol/atol above.. -c subroutine ewset (neq, itol, rtol, atol, ycur, ewt) -c where neq, itol, rtol, and atol are as in the lsoda call sequence, -c ycur contains the current dependent variable vector, and -c ewt is the array of weights set by ewset. -c -c if the user supplies this subroutine, it must return in ewt(i) -c (i = 1,...,neq) a positive quantity suitable for comparing errors -c in y(i) to. the ewt array returned by ewset is passed to the -c vmnorm routine, and also used by lsoda in the computation -c of the optional output imxer, and the increments for difference -c quotient jacobians. -c -c in the user-supplied version of ewset, it may be desirable to use -c the current values of derivatives of y. derivatives up to order nq -c are available from the history array yh, described above under -c optional outputs. in ewset, yh is identical to the ycur array, -c extended to nq + 1 columns with a column length of nyh and scale -c factors of h**j/factorial(j). on the first call for the problem, -c given by nst = 0, nq is 1 and h is temporarily set to 1.0. -c the quantities nq, nyh, h, and nst can be obtained by including -c in ewset the statements.. -c double precision h, rls -c common /ls0001/ rls(218),ils(39) -c nq = ils(35) -c nyh = ils(14) -c nst = ils(36) -c h = rls(212) -c thus, for example, the current value of dy/dt can be obtained as -c ycur(nyh+i)/h (i=1,...,neq) (and the division by h is -c unnecessary when nst = 0). -c----------------------------------------------------------------------- -c----------------------------------------------------------------------- -c other routines in the lsoda package. -c -c in addition to subroutine lsoda, the lsoda package includes the -c following subroutines and function routines.. -c intdy computes an interpolated value of the y vector at t = tout. -c stoda is the core integrator, which does one step of the -c integration and the associated error control. -c cfode sets all method coefficients and test constants. -c prja computes and preprocesses the jacobian matrix j = df/dy -c and the newton iteration matrix p = i - h*l0*j. -c solsy manages solution of linear system in chord iteration. -c ewset sets the error weight vector ewt before each step. -c vmnorm computes the weighted max-norm of a vector. -c fnorm computes the norm of a full matrix consistent with the -c weighted max-norm on vectors. -c bnorm computes the norm of a band matrix consistent with the -c weighted max-norm on vectors. -c srcma is a user-callable routine to save and restore -c the contents of the internal common blocks. -c dgefa and dgesl are routines from linpack for solving full -c systems of linear algebraic equations. -c dgbfa and dgbsl are routines from linpack for solving banded -c linear systems. -c daxpy, dscal, idamax, and ddot are basic linear algebra modules -c (blas) used by the above linpack routines. -c d1mach computes the unit roundoff in a machine-independent manner. -c xerrwv, xsetun, and xsetf handle the printing of all error -c messages and warnings. xerrwv is machine-dependent. -c note.. vmnorm, fnorm, bnorm, idamax, ddot, and d1mach are function -c routines. all the others are subroutines. -c -c the intrinsic and external routines used by lsoda are.. -c dabs, dmax1, dmin1, dfloat, max0, min0, mod, dsign, dsqrt, and write. -c -c a block data subprogram is also included with the package, -c for loading some of the variables in internal common. -c -c----------------------------------------------------------------------- -c the following card is for optimized compilation on lll compilers. -clll. optimize -c----------------------------------------------------------------------- - external prja, solsy - integer illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 1 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns - integer icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 1 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer insufr, insufi, ixpr, iowns2, jtyp, mused, mxordn, mxords - integer i, i1, i2, iflag, imxer, kgo, lf0, - 1 leniw, lenrw, lenwm, ml, mord, mu, mxhnl0, mxstp0 - integer len1, len1c, len1n, len1s, len2, leniwc, - 1 lenrwc, lenrwn, lenrws - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision tsw, rowns2, pdnorm - double precision atoli, ayi, big, ewti, h0, hmax, hmx, rh, rtoli, - 1 tcrit, tdist, tnext, tol, tolsf, tp, size, sum, w0, - 2 d1mach, vmnorm - dimension mord(2) - logical ihit -c----------------------------------------------------------------------- -c the following two internal common blocks contain -c (a) variables which are local to any subroutine but whose values must -c be preserved between calls to the routine (own variables), and -c (b) variables which are communicated between subroutines. -c the structure of each block is as follows.. all real variables are -c listed first, followed by all integers. within each type, the -c variables are grouped with those local to subroutine lsoda first, -c then those local to subroutine stoda, and finally those used -c for communication. the block ls0001 is declared in subroutines -c lsoda, intdy, stoda, prja, and solsy. the block lsa001 is declared -c in subroutines lsoda, stoda, and prja. groups of variables are -c replaced by dummy arrays in the common declarations in routines -c where those variables are not used. -c----------------------------------------------------------------------- - common /ls0001/ rowns(209), - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 2 illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 3 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lsa001/ tsw, rowns2(20), pdnorm, - 1 insufr, insufi, ixpr, iowns2(2), jtyp, mused, mxordn, mxords -c - data mord(1),mord(2)/12,5/, mxstp0/500/, mxhnl0/10/ -c----------------------------------------------------------------------- -c block a. -c this code block is executed on every call. -c it tests istate and itask for legality and branches appropriately. -c if istate .gt. 1 but the flag init shows that initialization has -c not yet been done, an error return occurs. -c if istate = 1 and tout = t, jump to block g and return immediately. -c----------------------------------------------------------------------- - if (istate .lt. 1 .or. istate .gt. 3) go to 601 - if (itask .lt. 1 .or. itask .gt. 5) go to 602 - if (istate .eq. 1) go to 10 - if (init .eq. 0) go to 603 - if (istate .eq. 2) go to 200 - go to 20 - 10 init = 0 - if (tout .eq. t) go to 430 - 20 ntrep = 0 -c----------------------------------------------------------------------- -c block b. -c the next code block is executed for the initial call (istate = 1), -c or for a continuation call with parameter changes (istate = 3). -c it contains checking of all inputs and various initializations. -c -c first check legality of the non-optional inputs neq, itol, iopt, -c jt, ml, and mu. -c----------------------------------------------------------------------- - if (neq(1) .le. 0) go to 604 - if (istate .eq. 1) go to 25 - if (neq(1) .gt. n) go to 605 - 25 n = neq(1) - if (itol .lt. 1 .or. itol .gt. 4) go to 606 - if (iopt .lt. 0 .or. iopt .gt. 1) go to 607 - if (jt .eq. 3 .or. jt .lt. 1 .or. jt .gt. 5) go to 608 - jtyp = jt - if (jt .le. 2) go to 30 - ml = iwork(1) - mu = iwork(2) - if (ml .lt. 0 .or. ml .ge. n) go to 609 - if (mu .lt. 0 .or. mu .ge. n) go to 610 - 30 continue -c next process and check the optional inputs. -------------------------- - if (iopt .eq. 1) go to 40 - ixpr = 0 - mxstep = mxstp0 - mxhnil = mxhnl0 - hmxi = 0.0d0 - hmin = 0.0d0 - if (istate .ne. 1) go to 60 - h0 = 0.0d0 - mxordn = mord(1) - mxords = mord(2) - go to 60 - 40 ixpr = iwork(5) - if (ixpr .lt. 0 .or. ixpr .gt. 1) go to 611 - mxstep = iwork(6) - if (mxstep .lt. 0) go to 612 - if (mxstep .eq. 0) mxstep = mxstp0 - mxhnil = iwork(7) - if (mxhnil .lt. 0) go to 613 - if (mxhnil .eq. 0) mxhnil = mxhnl0 - if (istate .ne. 1) go to 50 - h0 = rwork(5) - mxordn = iwork(8) - if (mxordn .lt. 0) go to 628 - if (mxordn .eq. 0) mxordn = 100 - mxordn = min0(mxordn,mord(1)) - mxords = iwork(9) - if (mxords .lt. 0) go to 629 - if (mxords .eq. 0) mxords = 100 - mxords = min0(mxords,mord(2)) - if ((tout - t)*h0 .lt. 0.0d0) go to 614 - 50 hmax = rwork(6) - if (hmax .lt. 0.0d0) go to 615 - hmxi = 0.0d0 - if (hmax .gt. 0.0d0) hmxi = 1.0d0/hmax - hmin = rwork(7) - if (hmin .lt. 0.0d0) go to 616 -c----------------------------------------------------------------------- -c set work array pointers and check lengths lrw and liw. -c if istate = 1, meth is initialized to 1 here to facilitate the -c checking of work space lengths. -c pointers to segments of rwork and iwork are named by prefixing l to -c the name of the segment. e.g., the segment yh starts at rwork(lyh). -c segments of rwork (in order) are denoted yh, wm, ewt, savf, acor. -c if the lengths provided are insufficient for the current method, -c an error return occurs. this is treated as illegal input on the -c first call, but as a problem interruption with istate = -7 on a -c continuation call. if the lengths are sufficient for the current -c method but not for both methods, a warning message is sent. -c----------------------------------------------------------------------- - 60 if (istate .eq. 1) meth = 1 - if (istate .eq. 1) nyh = n - lyh = 21 - len1n = 20 + (mxordn + 1)*nyh - len1s = 20 + (mxords + 1)*nyh - lwm = len1s + 1 - if (jt .le. 2) lenwm = n*n + 2 - if (jt .ge. 4) lenwm = (2*ml + mu + 1)*n + 2 - len1s = len1s + lenwm - len1c = len1n - if (meth .eq. 2) len1c = len1s - len1 = max0(len1n,len1s) - len2 = 3*n - lenrw = len1 + len2 - lenrwn = len1n + len2 - lenrws = len1s + len2 - lenrwc = len1c + len2 - iwork(17) = lenrw - liwm = 1 - leniw = 20 + n - leniwc = 20 - if (meth .eq. 2) leniwc = leniw - iwork(18) = leniw - if (istate .eq. 1 .and. lrw .lt. lenrwc) go to 617 - if (istate .eq. 1 .and. liw .lt. leniwc) go to 618 - if (istate .eq. 3 .and. lrw .lt. lenrwc) go to 550 - if (istate .eq. 3 .and. liw .lt. leniwc) go to 555 - lewt = len1 + 1 - insufr = 0 - if (lrw .ge. lenrw) go to 65 - insufr = 2 - lewt = len1c + 1 - call xerrwv( - 1 'lsoda-- warning.. rwork length is sufficient for now, but ', - 1 60, 103, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' may not be later. integration will proceed anyway. ', - 1 60, 103, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is lenrw = i1, while lrw = i2.', - 1 50, 103, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - 65 lsavf = lewt + n - lacor = lsavf + n - insufi = 0 - if (liw .ge. leniw) go to 70 - insufi = 2 - call xerrwv( - 1 'lsoda-- warning.. iwork length is sufficient for now, but ', - 1 60, 104, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' may not be later. integration will proceed anyway. ', - 1 60, 104, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is leniw = i1, while liw = i2.', - 1 50, 104, 0, 2, leniw, liw, 0, 0.0d0, 0.0d0) - 70 continue -c check rtol and atol for legality. ------------------------------------ - rtoli = rtol(1) - atoli = atol(1) - do 75 i = 1,n - if (itol .ge. 3) rtoli = rtol(i) - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - if (rtoli .lt. 0.0d0) go to 619 - if (atoli .lt. 0.0d0) go to 620 - 75 continue - if (istate .eq. 1) go to 100 -c if istate = 3, set flag to signal parameter changes to stoda. -------- - jstart = -1 - if (n .eq. nyh) go to 200 -c neq was reduced. zero part of yh to avoid undefined references. ----- - i1 = lyh + l*nyh - i2 = lyh + (maxord + 1)*nyh - 1 - if (i1 .gt. i2) go to 200 - do 95 i = i1,i2 - 95 rwork(i) = 0.0d0 - go to 200 -c----------------------------------------------------------------------- -c block c. -c the next block is for the initial call only (istate = 1). -c it contains all remaining initializations, the initial call to f, -c and the calculation of the initial step size. -c the error weights in ewt are inverted after being loaded. -c----------------------------------------------------------------------- - 100 uround = d1mach(4) - tn = t - tsw = t - maxord = mxordn - if (itask .ne. 4 .and. itask .ne. 5) go to 110 - tcrit = rwork(1) - if ((tcrit - tout)*(tout - t) .lt. 0.0d0) go to 625 - if (h0 .ne. 0.0d0 .and. (t + h0 - tcrit)*h0 .gt. 0.0d0) - 1 h0 = tcrit - t - 110 jstart = 0 - nhnil = 0 - nst = 0 - nje = 0 - nslast = 0 - hu = 0.0d0 - nqu = 0 - mused = 0 - miter = 0 - ccmax = 0.3d0 - maxcor = 3 - msbp = 20 - mxncf = 10 -c initial call to f. (lf0 points to yh(*,2).) ------------------------- - lf0 = lyh + nyh - call srcma(rsav, isav, 1) - call f (neq, t, y, rwork(lf0)) - call srcma(rsav, isav, 2) - nfe = 1 -c load the initial value vector in yh. --------------------------------- - do 115 i = 1,n - 115 rwork(i+lyh-1) = y(i) -c load and invert the ewt array. (h is temporarily set to 1.0.) ------- - nq = 1 - h = 1.0d0 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 120 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 621 - 120 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) -c----------------------------------------------------------------------- -c the coding below computes the step size, h0, to be attempted on the -c first step, unless the user has supplied a value for this. -c first check that tout - t differs significantly from zero. -c a scalar tolerance quantity tol is computed, as max(rtol(i)) -c if this is positive, or max(atol(i)/abs(y(i))) otherwise, adjusted -c so as to be between 100*uround and 1.0e-3. -c then the computed value h0 is given by.. -c -c h0**(-2) = 1./(tol * w0**2) + tol * (norm(f))**2 -c -c where w0 = max ( abs(t), abs(tout) ), -c f = the initial value of the vector f(t,y), and -c norm() = the weighted vector norm used throughout, given by -c the vmnorm function routine, and weighted by the -c tolerances initially loaded into the ewt array. -c the sign of h0 is inferred from the initial values of tout and t. -c abs(h0) is made .le. abs(tout-t) in any case. -c----------------------------------------------------------------------- - if (h0 .ne. 0.0d0) go to 180 - tdist = dabs(tout - t) - w0 = dmax1(dabs(t),dabs(tout)) - if (tdist .lt. 2.0d0*uround*w0) go to 622 - tol = rtol(1) - if (itol .le. 2) go to 140 - do 130 i = 1,n - 130 tol = dmax1(tol,rtol(i)) - 140 if (tol .gt. 0.0d0) go to 160 - atoli = atol(1) - do 150 i = 1,n - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - ayi = dabs(y(i)) - if (ayi .ne. 0.0d0) tol = dmax1(tol,atoli/ayi) - 150 continue - 160 tol = dmax1(tol,100.0d0*uround) - tol = dmin1(tol,0.001d0) - sum = vmnorm (n, rwork(lf0), rwork(lewt)) - sum = 1.0d0/(tol*w0*w0) + tol*sum**2 - h0 = 1.0d0/dsqrt(sum) - h0 = dmin1(h0,tdist) - h0 = dsign(h0,tout-t) -c adjust h0 if necessary to meet hmax bound. --------------------------- - 180 rh = dabs(h0)*hmxi - if (rh .gt. 1.0d0) h0 = h0/rh -c load h with h0 and scale yh(*,2) by h0. ------------------------------ - h = h0 - do 190 i = 1,n - 190 rwork(i+lf0-1) = h0*rwork(i+lf0-1) - go to 270 -c----------------------------------------------------------------------- -c block d. -c the next code block is for continuation calls only (istate = 2 or 3) -c and is to check stop conditions before taking a step. -c----------------------------------------------------------------------- - 200 nslast = nst - go to (210, 250, 220, 230, 240), itask - 210 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 220 tp = tn - hu*(1.0d0 + 100.0d0*uround) - if ((tp - tout)*h .gt. 0.0d0) go to 623 - if ((tn - tout)*h .lt. 0.0d0) go to 250 - t = tn - go to 400 - 230 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - if ((tcrit - tout)*h .lt. 0.0d0) go to 625 - if ((tn - tout)*h .lt. 0.0d0) go to 245 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 240 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - 245 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) t = tcrit - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - if (istate .eq. 2 .and. jstart .ge. 0) jstart = -2 -c----------------------------------------------------------------------- -c block e. -c the next block is normally executed for all calls and contains -c the call to the one-step core integrator stoda. -c -c this is a looping point for the integration steps. -c -c first check for too many steps being taken, update ewt (if not at -c start of problem), check for too much accuracy being requested, and -c check for h below the roundoff level in t. -c----------------------------------------------------------------------- - 250 continue - if (meth .eq. mused) go to 255 - if (insufr .eq. 1) go to 550 - if (insufi .eq. 1) go to 555 - 255 if ((nst-nslast) .ge. mxstep) go to 500 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 260 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 510 - 260 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) - 270 tolsf = uround*vmnorm (n, rwork(lyh), rwork(lewt)) - if (tolsf .le. 0.01d0) go to 280 - tolsf = tolsf*200.0d0 - if (nst .eq. 0) go to 626 - go to 520 - 280 if ((tn + h) .ne. tn) go to 290 - nhnil = nhnil + 1 - if (nhnil .gt. mxhnil) go to 290 - call xerrwv('lsoda-- warning..internal t (=r1) and h (=r2) are', - 1 50, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' such that in the machine, t + h = t on the next step ', - 1 60, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' (h = step size). solver will continue anyway', - 1 50, 101, 0, 0, 0, 0, 2, tn, h) - if (nhnil .lt. mxhnil) go to 290 - call xerrwv('lsoda-- above warning has been issued i1 times. ', - 1 50, 102, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' it will not be issued again for this problem', - 1 50, 102, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - 290 continue -c----------------------------------------------------------------------- -c call stoda(neq,y,yh,nyh,yh,ewt,savf,acor,wm,iwm,f,jac,prja,solsy) -c----------------------------------------------------------------------- - call stoda (neq, y, rwork(lyh), nyh, rwork(lyh), rwork(lewt), - 1 rwork(lsavf), rwork(lacor), rwork(lwm), iwork(liwm), - 2 f, jac, prja, solsy) - kgo = 1 - kflag - go to (300, 530, 540), kgo -c----------------------------------------------------------------------- -c block f. -c the following block handles the case of a successful return from the -c core integrator (kflag = 0). -c if a method switch was just made, record tsw, reset maxord, -c set jstart to -1 to signal stoda to complete the switch, -c and do extra printing of data if ixpr = 1. -c then, in any case, check for stop conditions. -c----------------------------------------------------------------------- - 300 init = 1 - if (meth .eq. mused) go to 310 - tsw = tn - maxord = mxordn - if (meth .eq. 2) maxord = mxords - if (meth .eq. 2) rwork(lwm) = dsqrt(uround) - insufr = min0(insufr,1) - insufi = min0(insufi,1) - jstart = -1 - if (ixpr .eq. 0) go to 310 - if (meth .eq. 2) call xerrwv( - 1 'lsoda-- a switch to the bdf (stiff) method has occurred ', - 1 60, 105, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - if (meth .eq. 1) call xerrwv( - 1 'lsoda-- a switch to the adams (nonstiff) method has occurred', - 1 60, 106, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' at t = r1, tentative step size h = r2, step nst = i1 ', - 1 60, 107, 0, 1, nst, 0, 2, tn, h) - 310 go to (320, 400, 330, 340, 350), itask -c itask = 1. if tout has been reached, interpolate. ------------------- - 320 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 -c itask = 3. jump to exit if tout was reached. ------------------------ - 330 if ((tn - tout)*h .ge. 0.0d0) go to 400 - go to 250 -c itask = 4. see if tout or tcrit was reached. adjust h if necessary. - 340 if ((tn - tout)*h .lt. 0.0d0) go to 345 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 - 345 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - if (jstart .ge. 0) jstart = -2 - go to 250 -c itask = 5. see if tcrit was reached and jump to exit. --------------- - 350 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx -c----------------------------------------------------------------------- -c block g. -c the following block handles all successful returns from lsoda. -c if itask .ne. 1, y is loaded from yh and t is set accordingly. -c istate is set to 2, the illegal input counter is zeroed, and the -c optional outputs are loaded into the work arrays before returning. -c if istate = 1 and tout = t, there is a return with no action taken, -c except that if this has happened repeatedly, the run is terminated. -c----------------------------------------------------------------------- - 400 do 410 i = 1,n - 410 y(i) = rwork(i+lyh-1) - t = tn - if (itask .ne. 4 .and. itask .ne. 5) go to 420 - if (ihit) t = tcrit - 420 istate = 2 - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - rwork(15) = tsw - iwork(11) = nst - iwork(12) = nfe - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - iwork(19) = mused - iwork(20) = meth - return -c - 430 ntrep = ntrep + 1 - if (ntrep .lt. 5) return - call xerrwv( - 1 'lsoda-- repeated calls with istate = 1 and tout = t (=r1) ', - 1 60, 301, 0, 0, 0, 0, 1, t, 0.0d0) - go to 800 -c----------------------------------------------------------------------- -c block h. -c the following block handles all unsuccessful returns other than -c those for illegal input. first the error message routine is called. -c if there was an error test or convergence test failure, imxer is set. -c then y is loaded from yh, t is set to tn, and the illegal input -c counter illin is set to 0. the optional outputs are loaded into -c the work arrays before returning. -c----------------------------------------------------------------------- -c the maximum number of steps was taken before reaching tout. ---------- - 500 call xerrwv('lsoda-- at current t (=r1), mxstep (=i1) steps ', - 1 50, 201, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' taken on this call before reaching tout ', - 1 50, 201, 0, 1, mxstep, 0, 1, tn, 0.0d0) - istate = -1 - go to 580 -c ewt(i) .le. 0.0 for some i (not at start of problem). ---------------- - 510 ewti = rwork(lewt+i-1) - call xerrwv('lsoda-- at t (=r1), ewt(i1) has become r2 .le. 0.', - 1 50, 202, 0, 1, i, 0, 2, tn, ewti) - istate = -6 - go to 580 -c too much accuracy requested for machine precision. ------------------- - 520 call xerrwv('lsoda-- at t (=r1), too much accuracy requested ', - 1 50, 203, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' for precision of machine.. see tolsf (=r2) ', - 1 50, 203, 0, 0, 0, 0, 2, tn, tolsf) - rwork(14) = tolsf - istate = -2 - go to 580 -c kflag = -1. error test failed repeatedly or with abs(h) = hmin. ----- - 530 call xerrwv('lsoda-- at t(=r1) and step size h(=r2), the error', - 1 50, 204, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' test failed repeatedly or with abs(h) = hmin', - 1 50, 204, 0, 0, 0, 0, 2, tn, h) - istate = -4 - go to 560 -c kflag = -2. convergence failed repeatedly or with abs(h) = hmin. ---- - 540 call xerrwv('lsoda-- at t (=r1) and step size h (=r2), the ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' corrector convergence failed repeatedly ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' or with abs(h) = hmin ', - 1 30, 205, 0, 0, 0, 0, 2, tn, h) - istate = -5 - go to 560 -c rwork length too small to proceed. ----------------------------------- - 550 call xerrwv('lsoda-- at current t(=r1), rwork length too small', - 1 50, 206, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' to proceed. the integration was otherwise successful.', - 1 60, 206, 0, 0, 0, 0, 1, tn, 0.0d0) - istate = -7 - go to 580 -c iwork length too small to proceed. ----------------------------------- - 555 call xerrwv('lsoda-- at current t(=r1), iwork length too small', - 1 50, 207, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' to proceed. the integration was otherwise successful.', - 1 60, 207, 0, 0, 0, 0, 1, tn, 0.0d0) - istate = -7 - go to 580 -c compute imxer if relevant. ------------------------------------------- - 560 big = 0.0d0 - imxer = 1 - do 570 i = 1,n - size = dabs(rwork(i+lacor-1)*rwork(i+lewt-1)) - if (big .ge. size) go to 570 - big = size - imxer = i - 570 continue - iwork(16) = imxer -c set y vector, t, illin, and optional outputs. ------------------------ - 580 do 590 i = 1,n - 590 y(i) = rwork(i+lyh-1) - t = tn - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - rwork(15) = tsw - iwork(11) = nst - iwork(12) = nfe - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - iwork(19) = mused - iwork(20) = meth - return -c----------------------------------------------------------------------- -c block i. -c the following block handles all error returns due to illegal input -c (istate = -3), as detected before calling the core integrator. -c first the error message routine is called. then if there have been -c 5 consecutive such returns just before this call to the solver, -c the run is halted. -c----------------------------------------------------------------------- - 601 call xerrwv('lsoda-- istate (=i1) illegal ', - 1 30, 1, 0, 1, istate, 0, 0, 0.0d0, 0.0d0) - go to 700 - 602 call xerrwv('lsoda-- itask (=i1) illegal ', - 1 30, 2, 0, 1, itask, 0, 0, 0.0d0, 0.0d0) - go to 700 - 603 call xerrwv('lsoda-- istate .gt. 1 but lsoda not initialized ', - 1 50, 3, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - go to 700 - 604 call xerrwv('lsoda-- neq (=i1) .lt. 1 ', - 1 30, 4, 0, 1, neq(1), 0, 0, 0.0d0, 0.0d0) - go to 700 - 605 call xerrwv('lsoda-- istate = 3 and neq increased (i1 to i2) ', - 1 50, 5, 0, 2, n, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 606 call xerrwv('lsoda-- itol (=i1) illegal ', - 1 30, 6, 0, 1, itol, 0, 0, 0.0d0, 0.0d0) - go to 700 - 607 call xerrwv('lsoda-- iopt (=i1) illegal ', - 1 30, 7, 0, 1, iopt, 0, 0, 0.0d0, 0.0d0) - go to 700 - 608 call xerrwv('lsoda-- jt (=i1) illegal ', - 1 30, 8, 0, 1, jt, 0, 0, 0.0d0, 0.0d0) - go to 700 - 609 call xerrwv('lsoda-- ml (=i1) illegal.. .lt.0 or .ge.neq (=i2)', - 1 50, 9, 0, 2, ml, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 610 call xerrwv('lsoda-- mu (=i1) illegal.. .lt.0 or .ge.neq (=i2)', - 1 50, 10, 0, 2, mu, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 611 call xerrwv('lsoda-- ixpr (=i1) illegal ', - 1 30, 11, 0, 1, ixpr, 0, 0, 0.0d0, 0.0d0) - go to 700 - 612 call xerrwv('lsoda-- mxstep (=i1) .lt. 0 ', - 1 30, 12, 0, 1, mxstep, 0, 0, 0.0d0, 0.0d0) - go to 700 - 613 call xerrwv('lsoda-- mxhnil (=i1) .lt. 0 ', - 1 30, 13, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - go to 700 - 614 call xerrwv('lsoda-- tout (=r1) behind t (=r2) ', - 1 40, 14, 0, 0, 0, 0, 2, tout, t) - call xerrwv(' integration direction is given by h0 (=r1) ', - 1 50, 14, 0, 0, 0, 0, 1, h0, 0.0d0) - go to 700 - 615 call xerrwv('lsoda-- hmax (=r1) .lt. 0.0 ', - 1 30, 15, 0, 0, 0, 0, 1, hmax, 0.0d0) - go to 700 - 616 call xerrwv('lsoda-- hmin (=r1) .lt. 0.0 ', - 1 30, 16, 0, 0, 0, 0, 1, hmin, 0.0d0) - go to 700 - 617 call xerrwv( - 1 'lsoda-- rwork length needed, lenrw (=i1), exceeds lrw (=i2)', - 1 60, 17, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 618 call xerrwv( - 1 'lsoda-- iwork length needed, leniw (=i1), exceeds liw (=i2)', - 1 60, 18, 0, 2, leniw, liw, 0, 0.0d0, 0.0d0) - go to 700 - 619 call xerrwv('lsoda-- rtol(i1) is r1 .lt. 0.0 ', - 1 40, 19, 0, 1, i, 0, 1, rtoli, 0.0d0) - go to 700 - 620 call xerrwv('lsoda-- atol(i1) is r1 .lt. 0.0 ', - 1 40, 20, 0, 1, i, 0, 1, atoli, 0.0d0) - go to 700 - 621 ewti = rwork(lewt+i-1) - call xerrwv('lsoda-- ewt(i1) is r1 .le. 0.0 ', - 1 40, 21, 0, 1, i, 0, 1, ewti, 0.0d0) - go to 700 - 622 call xerrwv( - 1 'lsoda-- tout (=r1) too close to t(=r2) to start integration', - 1 60, 22, 0, 0, 0, 0, 2, tout, t) - go to 700 - 623 call xerrwv( - 1 'lsoda-- itask = i1 and tout (=r1) behind tcur - hu (= r2) ', - 1 60, 23, 0, 1, itask, 0, 2, tout, tp) - go to 700 - 624 call xerrwv( - 1 'lsoda-- itask = 4 or 5 and tcrit (=r1) behind tcur (=r2) ', - 1 60, 24, 0, 0, 0, 0, 2, tcrit, tn) - go to 700 - 625 call xerrwv( - 1 'lsoda-- itask = 4 or 5 and tcrit (=r1) behind tout (=r2) ', - 1 60, 25, 0, 0, 0, 0, 2, tcrit, tout) - go to 700 - 626 call xerrwv('lsoda-- at start of problem, too much accuracy ', - 1 50, 26, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' requested for precision of machine.. see tolsf (=r1) ', - 1 60, 26, 0, 0, 0, 0, 1, tolsf, 0.0d0) - rwork(14) = tolsf - go to 700 - 627 call xerrwv('lsoda-- trouble from intdy. itask = i1, tout = r1', - 1 50, 27, 0, 1, itask, 0, 1, tout, 0.0d0) - go to 700 - 628 call xerrwv('lsoda-- mxordn (=i1) .lt. 0 ', - 1 30, 28, 0, 1, mxordn, 0, 0, 0.0d0, 0.0d0) - go to 700 - 629 call xerrwv('lsoda-- mxords (=i1) .lt. 0 ', - 1 30, 29, 0, 1, mxords, 0, 0, 0.0d0, 0.0d0) -c - 700 if (illin .eq. 5) go to 710 - illin = illin + 1 - istate = -3 - return - 710 call xerrwv('lsoda-- repeated occurrences of illegal input ', - 1 50, 302, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) -c - 800 call xerrwv('lsoda-- run aborted.. apparent infinite loop ', - 1 50, 303, 2, 0, 0, 0, 0, 0.0d0, 0.0d0) - return -c----------------------- end of subroutine lsoda ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/lsodar.f b/scipy-0.10.1/scipy/integrate/odepack/lsodar.f deleted file mode 100644 index 17a80a7e19..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/lsodar.f +++ /dev/null @@ -1,1855 +0,0 @@ - subroutine lsodar (f, neq, y, t, tout, itol, rtol, atol, itask, - 1 istate, iopt, rwork, lrw, iwork, liw, jac, jt, - 2 g, ng, jroot) - external f, jac, g - integer neq, itol, itask, istate, iopt, lrw, iwork, liw, jt, - 1 ng, jroot - double precision y, t, tout, rtol, atol, rwork - dimension neq(1), y(1), rtol(1), atol(1), rwork(lrw), iwork(liw), - 1 jroot(ng) -c----------------------------------------------------------------------- -c this is the 24 feb 1997 version of -c lsodar.. livermore solver for ordinary differential equations, with -c automatic method switching for stiff and nonstiff problems, -c and with root-finding. -c -c this version is in double precision. -c -c lsodar solves the initial value problem for stiff or nonstiff -c systems of first order ode-s, -c dy/dt = f(t,y) , or, in component form, -c dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(neq)) (i = 1,...,neq). -c at the same time, it locates the roots of any of a set of functions -c g(i) = g(i,t,y(1),...,y(neq)) (i = 1,...,ng). -c -c this a variant version of the lsode package. it differs from lsode -c in two ways.. -c (a) it switches automatically between stiff and nonstiff methods. -c this means that the user does not have to determine whether the -c problem is stiff or not, and the solver will automatically choose the -c appropriate method. it always starts with the nonstiff method. -c (b) it finds the root of at least one of a set of constraint -c functions g(i) of the independent and dependent variables. -c it finds only those roots for which some g(i), as a function -c of t, changes sign in the interval of integration. -c it then returns the solution at the root, if that occurs -c sooner than the specified stop condition, and otherwise returns -c the solution according the specified stop condition. -c -c authors.. -c linda r. petzold and alan c. hindmarsh, -c computing and mathematics research division, l-316 -c lawrence livermore national laboratory -c livermore, ca 94550. -c -c references.. -c 1. alan c. hindmarsh, odepack, a systematized collection of ode -c solvers, in scientific computing, r. s. stepleman et al. (eds.), -c north-holland, amsterdam, 1983, pp. 55-64. -c 2. linda r. petzold, automatic selection of methods for solving -c stiff and nonstiff systems of ordinary differential equations, -c siam j. sci. stat. comput. 4 (1983), pp. 136-148. -c 3. kathie l. hiebert and lawrence f. shampine, implicitly defined -c output points for solutions of ode-s, sandia report sand80-0180, -c february, 1980. -c----------------------------------------------------------------------- -c summary of usage. -c -c communication between the user and the lsodar package, for normal -c situations, is summarized here. this summary describes only a subset -c of the full set of options available. see the full description for -c details, including alternative treatment of the jacobian matrix, -c optional inputs and outputs, nonstandard options, and -c instructions for special situations. see also the example -c problem (with program and output) following this summary. -c -c a. first provide a subroutine of the form.. -c subroutine f (neq, t, y, ydot) -c dimension y(neq), ydot(neq) -c which supplies the vector function f by loading ydot(i) with f(i). -c -c b. provide a subroutine of the form.. -c subroutine g (neq, t, y, ng, gout) -c dimension y(neq), gout(ng) -c which supplies the vector function g by loading gout(i) with -c g(i), the i-th constraint function whose root is sought. -c -c c. write a main program which calls subroutine lsodar once for -c each point at which answers are desired. this should also provide -c for possible use of logical unit 6 for output of error messages by -c lsodar. on the first call to lsodar, supply arguments as follows.. -c f = name of subroutine for right-hand side vector f. -c this name must be declared external in calling program. -c neq = number of first order ode-s. -c y = array of initial values, of length neq. -c t = the initial value of the independent variable. -c tout = first point where output is desired (.ne. t). -c itol = 1 or 2 according as atol (below) is a scalar or array. -c rtol = relative tolerance parameter (scalar). -c atol = absolute tolerance parameter (scalar or array). -c the estimated local error in y(i) will be controlled so as -c to be less than -c ewt(i) = rtol*abs(y(i)) + atol if itol = 1, or -c ewt(i) = rtol*abs(y(i)) + atol(i) if itol = 2. -c thus the local error test passes if, in each component, -c either the absolute error is less than atol (or atol(i)), -c or the relative error is less than rtol. -c use rtol = 0.0 for pure absolute error control, and -c use atol = 0.0 (or atol(i) = 0.0) for pure relative error -c control. caution.. actual (global) errors may exceed these -c local tolerances, so choose them conservatively. -c itask = 1 for normal computation of output values of y at t = tout. -c istate = integer flag (input and output). set istate = 1. -c iopt = 0 to indicate no optional inputs used. -c rwork = real work array of length at least.. -c 22 + neq * max(16, neq + 9) + 3*ng. -c see also paragraph f below. -c lrw = declared length of rwork (in user-s dimension). -c iwork = integer work array of length at least 20 + neq. -c liw = declared length of iwork (in user-s dimension). -c jac = name of subroutine for jacobian matrix. -c use a dummy name. see also paragraph f below. -c jt = jacobian type indicator. set jt = 2. -c see also paragraph f below. -c g = name of subroutine for constraint functions, whose -c roots are desired during the integration. -c this name must be declared external in calling program. -c ng = number of constraint functions g(i). if there are none, -c set ng = 0, and pass a dummy name for g. -c jroot = integer array of length ng for output of root information. -c see next paragraph. -c note that the main program must declare arrays y, rwork, iwork, -c jroot, and possibly atol. -c -c d. the output from the first call (or any call) is.. -c y = array of computed values of y(t) vector. -c t = corresponding value of independent variable. this is -c tout if istate = 2, or the root location if istate = 3, -c or the farthest point reached if lsodar was unsuccessful. -c istate = 2 or 3 if lsodar was successful, negative otherwise. -c 2 means no root was found, and tout was reached as desired. -c 3 means a root was found prior to reaching tout. -c -1 means excess work done on this call (perhaps wrong jt). -c -2 means excess accuracy requested (tolerances too small). -c -3 means illegal input detected (see printed message). -c -4 means repeated error test failures (check all inputs). -c -5 means repeated convergence failures (perhaps bad jacobian -c supplied or wrong choice of jt or tolerances). -c -6 means error weight became zero during problem. (solution -c component i vanished, and atol or atol(i) = 0.) -c -7 means work space insufficient to finish (see messages). -c jroot = array showing roots found if istate = 3 on return. -c jroot(i) = 1 if g(i) has a root at t, or 0 otherwise. -c -c e. to continue the integration after a successful return, proceed -c as follows.. -c (a) if istate = 2 on return, reset tout and call lsodar again. -c (b) if istate = 3 on return, reset istate to 2 and call lsodar again. -c in either case, no other parameters need be reset. -c -c f. note.. if and when lsodar regards the problem as stiff, and -c switches methods accordingly, it must make use of the neq by neq -c jacobian matrix, j = df/dy. for the sake of simplicity, the -c inputs to lsodar recommended in paragraph c above cause lsodar to -c treat j as a full matrix, and to approximate it internally by -c difference quotients. alternatively, j can be treated as a band -c matrix (with great potential reduction in the size of the rwork -c array). also, in either the full or banded case, the user can supply -c j in closed form, with a routine whose name is passed as the jac -c argument. these alternatives are described in the paragraphs on -c rwork, jac, and jt in the full description of the call sequence below. -c -c----------------------------------------------------------------------- -c example problem. -c -c the following is a simple example problem, with the coding -c needed for its solution by lsodar. the problem is from chemical -c kinetics, and consists of the following three rate equations.. -c dy1/dt = -.04*y1 + 1.e4*y2*y3 -c dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*y2**2 -c dy3/dt = 3.e7*y2**2 -c on the interval from t = 0.0 to t = 4.e10, with initial conditions -c y1 = 1.0, y2 = y3 = 0. the problem is stiff. -c in addition, we want to find the values of t, y1, y2, and y3 at which -c (1) y1 reaches the value 1.e-4, and -c (2) y3 reaches the value 1.e-2. -c -c the following coding solves this problem with lsodar, -c printing results at t = .4, 4., ..., 4.e10, and at the computed -c roots. it uses itol = 2 and atol much smaller for y2 than y1 or y3 -c because y2 has much smaller values. -c at the end of the run, statistical quantities of interest are -c printed (see optional outputs in the full description below). -c -c external fex, gex -c double precision atol, rtol, rwork, t, tout, y -c dimension y(3), atol(3), rwork(76), iwork(23), jroot(2) -c neq = 3 -c y(1) = 1.0d0 -c y(2) = 0.0d0 -c y(3) = 0.0d0 -c t = 0.0d0 -c tout = 0.4d0 -c itol = 2 -c rtol = 1.0d-4 -c atol(1) = 1.0d-6 -c atol(2) = 1.0d-10 -c atol(3) = 1.0d-6 -c itask = 1 -c istate = 1 -c iopt = 0 -c lrw = 76 -c liw = 23 -c jt = 2 -c ng = 2 -c do 40 iout = 1,12 -c 10 call lsodar(fex,neq,y,t,tout,itol,rtol,atol,itask,istate, -c 1 iopt,rwork,lrw,iwork,liw,jdum,jt,gex,ng,jroot) -c write(6,20)t,y(1),y(2),y(3) -c 20 format(' at t =',e12.4,' y =',3e14.6) -c if (istate .lt. 0) go to 80 -c if (istate .eq. 2) go to 40 -c write(6,30)jroot(1),jroot(2) -c 30 format(5x,' the above line is a root, jroot =',2i5) -c istate = 2 -c go to 10 -c 40 tout = tout*10.0d0 -c write(6,60)iwork(11),iwork(12),iwork(13),iwork(10), -c 1 iwork(19),rwork(15) -c 60 format(/' no. steps =',i4,' no. f-s =',i4,' no. j-s =',i4, -c 1 ' no. g-s =',i4/ -c 2 ' method last used =',i2,' last switch was at t =',e12.4) -c stop -c 80 write(6,90)istate -c 90 format(///' error halt.. istate =',i3) -c stop -c end -c -c subroutine fex (neq, t, y, ydot) -c double precision t, y, ydot -c dimension y(3), ydot(3) -c ydot(1) = -0.04d0*y(1) + 1.0d4*y(2)*y(3) -c ydot(3) = 3.0d7*y(2)*y(2) -c ydot(2) = -ydot(1) - ydot(3) -c return -c end -c -c subroutine gex (neq, t, y, ng, gout) -c double precision t, y, gout -c dimension y(3), gout(2) -c gout(1) = y(1) - 1.0d-4 -c gout(2) = y(3) - 1.0d-2 -c return -c end -c -c the output of this program (on a cdc-7600 in single precision) -c is as follows.. -c -c at t = 2.6400e-01 y = 9.899653e-01 3.470563e-05 1.000000e-02 -c the above line is a root, jroot = 0 1 -c at t = 4.0000e-01 y = 9.851712e-01 3.386380e-05 1.479493e-02 -c at t = 4.0000e+00 y = 9.055333e-01 2.240655e-05 9.444430e-02 -c at t = 4.0000e+01 y = 7.158403e-01 9.186334e-06 2.841505e-01 -c at t = 4.0000e+02 y = 4.505250e-01 3.222964e-06 5.494717e-01 -c at t = 4.0000e+03 y = 1.831975e-01 8.941774e-07 8.168016e-01 -c at t = 4.0000e+04 y = 3.898730e-02 1.621940e-07 9.610125e-01 -c at t = 4.0000e+05 y = 4.936363e-03 1.984221e-08 9.950636e-01 -c at t = 4.0000e+06 y = 5.161831e-04 2.065786e-09 9.994838e-01 -c at t = 2.0745e+07 y = 1.000000e-04 4.000395e-10 9.999000e-01 -c the above line is a root, jroot = 1 0 -c at t = 4.0000e+07 y = 5.179817e-05 2.072032e-10 9.999482e-01 -c at t = 4.0000e+08 y = 5.283401e-06 2.113371e-11 9.999947e-01 -c at t = 4.0000e+09 y = 4.659031e-07 1.863613e-12 9.999995e-01 -c at t = 4.0000e+10 y = 1.404280e-08 5.617126e-14 1.000000e+00 -c -c no. steps = 361 no. f-s = 693 no. j-s = 64 no. g-s = 390 -c method last used = 2 last switch was at t = 6.0092e-03 -c----------------------------------------------------------------------- -c full description of user interface to lsodar. -c -c the user interface to lsodar consists of the following parts. -c -c i. the call sequence to subroutine lsodar, which is a driver -c routine for the solver. this includes descriptions of both -c the call sequence arguments and of user-supplied routines. -c following these descriptions is a description of -c optional inputs available through the call sequence, and then -c a description of optional outputs (in the work arrays). -c -c ii. descriptions of other routines in the lsodar package that may be -c (optionally) called by the user. these provide the ability to -c alter error message handling, save and restore the internal -c common, and obtain specified derivatives of the solution y(t). -c -c iii. descriptions of common blocks to be declared in overlay -c or similar environments, or to be saved when doing an interrupt -c of the problem and continued solution later. -c -c iv. description of a subroutine in the lsodar package, -c which the user may replace with his own version, if desired. -c this relates to the measurement of errors. -c -c----------------------------------------------------------------------- -c part i. call sequence. -c -c the call sequence parameters used for input only are -c f, neq, tout, itol, rtol, atol, itask, iopt, lrw, liw, jac, -c jt, g, and ng, -c that used only for output is jroot, -c and those used for both input and output are -c y, t, istate. -c the work arrays rwork and iwork are also used for conditional and -c optional inputs and optional outputs. (the term output here refers -c to the return from subroutine lsodar to the user-s calling program.) -c -c the legality of input parameters will be thoroughly checked on the -c initial call for the problem, but not checked thereafter unless a -c change in input parameters is flagged by istate = 3 on input. -c -c the descriptions of the call arguments are as follows. -c -c f = the name of the user-supplied subroutine defining the -c ode system. the system must be put in the first-order -c form dy/dt = f(t,y), where f is a vector-valued function -c of the scalar t and the vector y. subroutine f is to -c compute the function f. it is to have the form -c subroutine f (neq, t, y, ydot) -c dimension y(1), ydot(1) -c where neq, t, and y are input, and the array ydot = f(t,y) -c is output. y and ydot are arrays of length neq. -c (in the dimension statement above, 1 is a dummy -c dimension.. it can be replaced by any value.) -c subroutine f should not alter y(1),...,y(neq). -c f must be declared external in the calling program. -c -c subroutine f may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in f) and/or y has length exceeding neq(1). -c see the descriptions of neq and y below. -c -c if quantities computed in the f routine are needed -c externally to lsodar, an extra call to f should be made -c for this purpose, for consistent and accurate results. -c if only the derivative dy/dt is needed, use intdy instead. -c -c neq = the size of the ode system (number of first order -c ordinary differential equations). used only for input. -c neq may be decreased, but not increased, during the problem. -c if neq is decreased (with istate = 3 on input), the -c remaining components of y should be left undisturbed, if -c these are to be accessed in f and/or jac. -c -c normally, neq is a scalar, and it is generally referred to -c as a scalar in this user interface description. however, -c neq may be an array, with neq(1) set to the system size. -c (the lsodar package accesses only neq(1).) in either case, -c this parameter is passed as the neq argument in all calls -c to f, jac, and g. hence, if it is an array, locations -c neq(2),... may be used to store other integer data and pass -c it to f, jac, and g. each such subroutine must include -c neq in a dimension statement in that case. -c -c y = a real array for the vector of dependent variables, of -c length neq or more. used for both input and output on the -c first call (istate = 1), and only for output on other calls. -c on the first call, y must contain the vector of initial -c values. on output, y contains the computed solution vector, -c evaluated at t. if desired, the y array may be used -c for other purposes between calls to the solver. -c -c this array is passed as the y argument in all calls to f, -c jac, and g. hence its length may exceed neq, and locations -c y(neq+1),... may be used to store other real data and -c pass it to f, jac, and g. (the lsodar package accesses only -c y(1),...,y(neq).) -c -c t = the independent variable. on input, t is used only on the -c first call, as the initial point of the integration. -c on output, after each call, t is the value at which a -c computed solution y is evaluated (usually the same as tout). -c if a root was found, t is the computed location of the -c root reached first, on output. -c on an error return, t is the farthest point reached. -c -c tout = the next value of t at which a computed solution is desired. -c used only for input. -c -c when starting the problem (istate = 1), tout may be equal -c to t for one call, then should .ne. t for the next call. -c for the initial t, an input value of tout .ne. t is used -c in order to determine the direction of the integration -c (i.e. the algebraic sign of the step sizes) and the rough -c scale of the problem. integration in either direction -c (forward or backward in t) is permitted. -c -c if itask = 2 or 5 (one-step modes), tout is ignored after -c the first call (i.e. the first call with tout .ne. t). -c otherwise, tout is required on every call. -c -c if itask = 1, 3, or 4, the values of tout need not be -c monotone, but a value of tout which backs up is limited -c to the current internal t interval, whose endpoints are -c tcur - hu and tcur (see optional outputs, below, for -c tcur and hu). -c -c itol = an indicator for the type of error control. see -c description below under atol. used only for input. -c -c rtol = a relative error tolerance parameter, either a scalar or -c an array of length neq. see description below under atol. -c input only. -c -c atol = an absolute error tolerance parameter, either a scalar or -c an array of length neq. input only. -c -c the input parameters itol, rtol, and atol determine -c the error control performed by the solver. the solver will -c control the vector e = (e(i)) of estimated local errors -c in y, according to an inequality of the form -c max-norm of ( e(i)/ewt(i) ) .le. 1, -c where ewt = (ewt(i)) is a vector of positive error weights. -c the values of rtol and atol should all be non-negative. -c the following table gives the types (scalar/array) of -c rtol and atol, and the corresponding form of ewt(i). -c -c itol rtol atol ewt(i) -c 1 scalar scalar rtol*abs(y(i)) + atol -c 2 scalar array rtol*abs(y(i)) + atol(i) -c 3 array scalar rtol(i)*abs(y(i)) + atol -c 4 array array rtol(i)*abs(y(i)) + atol(i) -c -c when either of these parameters is a scalar, it need not -c be dimensioned in the user-s calling program. -c -c if none of the above choices (with itol, rtol, and atol -c fixed throughout the problem) is suitable, more general -c error controls can be obtained by substituting a -c user-supplied routine for the setting of ewt. -c see part iv below. -c -c if global errors are to be estimated by making a repeated -c run on the same problem with smaller tolerances, then all -c components of rtol and atol (i.e. of ewt) should be scaled -c down uniformly. -c -c itask = an index specifying the task to be performed. -c input only. itask has the following values and meanings. -c 1 means normal computation of output values of y(t) at -c t = tout (by overshooting and interpolating). -c 2 means take one step only and return. -c 3 means stop at the first internal mesh point at or -c beyond t = tout and return. -c 4 means normal computation of output values of y(t) at -c t = tout but without overshooting t = tcrit. -c tcrit must be input as rwork(1). tcrit may be equal to -c or beyond tout, but not behind it in the direction of -c integration. this option is useful if the problem -c has a singularity at or beyond t = tcrit. -c 5 means take one step, without passing tcrit, and return. -c tcrit must be input as rwork(1). -c -c note.. if itask = 4 or 5 and the solver reaches tcrit -c (within roundoff), it will return t = tcrit (exactly) to -c indicate this (unless itask = 4 and tout comes before tcrit, -c in which case answers at t = tout are returned first). -c -c istate = an index used for input and output to specify the -c the state of the calculation. -c -c on input, the values of istate are as follows. -c 1 means this is the first call for the problem -c (initializations will be done). see note below. -c 2 means this is not the first call, and the calculation -c is to continue normally, with no change in any input -c parameters except possibly tout and itask. -c (if itol, rtol, and/or atol are changed between calls -c with istate = 2, the new values will be used but not -c tested for legality.) -c 3 means this is not the first call, and the -c calculation is to continue normally, but with -c a change in input parameters other than -c tout and itask. changes are allowed in -c neq, itol, rtol, atol, iopt, lrw, liw, jt, ml, mu, -c and any optional inputs except h0, mxordn, and mxords. -c (see iwork description for ml and mu.) -c in addition, immediately following a return with -c istate = 3 (root found), ng and g may be changed. -c (but changing ng from 0 to .gt. 0 is not allowed.) -c note.. a preliminary call with tout = t is not counted -c as a first call here, as no initialization or checking of -c input is done. (such a call is sometimes useful for the -c purpose of outputting the initial conditions.) -c thus the first call for which tout .ne. t requires -c istate = 1 on input. -c -c on output, istate has the following values and meanings. -c 1 means nothing was done, as tout was equal to t with -c istate = 1 on input. (however, an internal counter was -c set to detect and prevent repeated calls of this type.) -c 2 means the integration was performed successfully, and -c no roots were found. -c 3 means the integration was successful, and one or more -c roots were found before satisfying the stop condition -c specified by itask. see jroot. -c -1 means an excessive amount of work (more than mxstep -c steps) was done on this call, before completing the -c requested task, but the integration was otherwise -c successful as far as t. (mxstep is an optional input -c and is normally 500.) to continue, the user may -c simply reset istate to a value .gt. 1 and call again -c (the excess work step counter will be reset to 0). -c in addition, the user may increase mxstep to avoid -c this error return (see below on optional inputs). -c -2 means too much accuracy was requested for the precision -c of the machine being used. this was detected before -c completing the requested task, but the integration -c was successful as far as t. to continue, the tolerance -c parameters must be reset, and istate must be set -c to 3. the optional output tolsf may be used for this -c purpose. (note.. if this condition is detected before -c taking any steps, then an illegal input return -c (istate = -3) occurs instead.) -c -3 means illegal input was detected, before taking any -c integration steps. see written message for details. -c note.. if the solver detects an infinite loop of calls -c to the solver with illegal input, it will cause -c the run to stop. -c -4 means there were repeated error test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c the problem may have a singularity, or the input -c may be inappropriate. -c -5 means there were repeated convergence test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c this may be caused by an inaccurate jacobian matrix, -c if one is being used. -c -6 means ewt(i) became zero for some i during the -c integration. pure relative error control (atol(i)=0.0) -c was requested on a variable which has now vanished. -c the integration was successful as far as t. -c -7 means the length of rwork and/or iwork was too small to -c proceed, but the integration was successful as far as t. -c this happens when lsodar chooses to switch methods -c but lrw and/or liw is too small for the new method. -c -c note.. since the normal output value of istate is 2, -c it does not need to be reset for normal continuation. -c also, since a negative input value of istate will be -c regarded as illegal, a negative output value requires the -c user to change it, and possibly other inputs, before -c calling the solver again. -c -c iopt = an integer flag to specify whether or not any optional -c inputs are being used on this call. input only. -c the optional inputs are listed separately below. -c iopt = 0 means no optional inputs are being used. -c default values will be used in all cases. -c iopt = 1 means one or more optional inputs are being used. -c -c rwork = a real array (double precision) for work space, and (in the -c first 20 words) for conditional and optional inputs and -c optional outputs. -c as lsodar switches automatically between stiff and nonstiff -c methods, the required length of rwork can change during the -c problem. thus the rwork array passed to lsodar can either -c have a static (fixed) length large enough for both methods, -c or have a dynamic (changing) length altered by the calling -c program in response to output from lsodar. -c -c --- fixed length case --- -c if the rwork length is to be fixed, it should be at least -c max (lrn, lrs), -c where lrn and lrs are the rwork lengths required when the -c current method is nonstiff or stiff, respectively. -c -c the separate rwork length requirements lrn and lrs are -c as follows.. -c if neq is constant and the maximum method orders have -c their default values, then -c lrn = 20 + 16*neq + 3*ng, -c lrs = 22 + 9*neq + neq**2 + 3*ng (jt = 1 or 2), -c lrs = 22 + 10*neq + (2*ml+mu)*neq + 3*ng (jt = 4 or 5). -c under any other conditions, lrn and lrs are given by.. -c lrn = 20 + nyh*(mxordn+1) + 3*neq + 3*ng, -c lrs = 20 + nyh*(mxords+1) + 3*neq + lmat + 3*ng, -c where -c nyh = the initial value of neq, -c mxordn = 12, unless a smaller value is given as an -c optional input, -c mxords = 5, unless a smaller value is given as an -c optional input, -c lmat = length of matrix work space.. -c lmat = neq**2 + 2 if jt = 1 or 2, -c lmat = (2*ml + mu + 1)*neq + 2 if jt = 4 or 5. -c -c --- dynamic length case --- -c if the length of rwork is to be dynamic, then it should -c be at least lrn or lrs, as defined above, depending on the -c current method. initially, it must be at least lrn (since -c lsodar starts with the nonstiff method). on any return -c from lsodar, the optional output mcur indicates the current -c method. if mcur differs from the value it had on the -c previous return, or if there has only been one call to -c lsodar and mcur is now 2, then lsodar has switched -c methods during the last call, and the length of rwork -c should be reset (to lrn if mcur = 1, or to lrs if -c mcur = 2). (an increase in the rwork length is required -c if lsodar returned istate = -7, but not otherwise.) -c after resetting the length, call lsodar with istate = 3 -c to signal that change. -c -c lrw = the length of the array rwork, as declared by the user. -c (this will be checked by the solver.) -c -c iwork = an integer array for work space. -c as lsodar switches automatically between stiff and nonstiff -c methods, the required length of iwork can change during -c problem, between -c lis = 20 + neq and lin = 20, -c respectively. thus the iwork array passed to lsodar can -c either have a fixed length of at least 20 + neq, or have a -c dynamic length of at least lin or lis, depending on the -c current method. the comments on dynamic length under -c rwork above apply here. initially, this length need -c only be at least lin = 20. -c -c the first few words of iwork are used for conditional and -c optional inputs and optional outputs. -c -c the following 2 words in iwork are conditional inputs.. -c iwork(1) = ml these are the lower and upper -c iwork(2) = mu half-bandwidths, respectively, of the -c banded jacobian, excluding the main diagonal. -c the band is defined by the matrix locations -c (i,j) with i-ml .le. j .le. i+mu. ml and mu -c must satisfy 0 .le. ml,mu .le. neq-1. -c these are required if jt is 4 or 5, and -c ignored otherwise. ml and mu may in fact be -c the band parameters for a matrix to which -c df/dy is only approximately equal. -c -c liw = the length of the array iwork, as declared by the user. -c (this will be checked by the solver.) -c -c note.. the base addresses of the work arrays must not be -c altered between calls to lsodar for the same problem. -c the contents of the work arrays must not be altered -c between calls, except possibly for the conditional and -c optional inputs, and except for the last 3*neq words of rwork. -c the latter space is used for internal scratch space, and so is -c available for use by the user outside lsodar between calls, if -c desired (but not for use by f, jac, or g). -c -c jac = the name of the user-supplied routine to compute the -c jacobian matrix, df/dy, if jt = 1 or 4. the jac routine -c is optional, but if the problem is expected to be stiff much -c of the time, you are encouraged to supply jac, for the sake -c of efficiency. (alternatively, set jt = 2 or 5 to have -c lsodar compute df/dy internally by difference quotients.) -c if and when lsodar uses df/dy, if treats this neq by neq -c matrix either as full (jt = 1 or 2), or as banded (jt = -c 4 or 5) with half-bandwidths ml and mu (discussed under -c iwork above). in either case, if jt = 1 or 4, the jac -c routine must compute df/dy as a function of the scalar t -c and the vector y. it is to have the form -c subroutine jac (neq, t, y, ml, mu, pd, nrowpd) -c dimension y(1), pd(nrowpd,1) -c where neq, t, y, ml, mu, and nrowpd are input and the array -c pd is to be loaded with partial derivatives (elements of -c the jacobian matrix) on output. pd must be given a first -c dimension of nrowpd. t and y have the same meaning as in -c subroutine f. (in the dimension statement above, 1 is a -c dummy dimension.. it can be replaced by any value.) -c in the full matrix case (jt = 1), ml and mu are -c ignored, and the jacobian is to be loaded into pd in -c columnwise manner, with df(i)/dy(j) loaded into pd(i,j). -c in the band matrix case (jt = 4), the elements -c within the band are to be loaded into pd in columnwise -c manner, with diagonal lines of df/dy loaded into the rows -c of pd. thus df(i)/dy(j) is to be loaded into pd(i-j+mu+1,j). -c ml and mu are the half-bandwidth parameters (see iwork). -c the locations in pd in the two triangular areas which -c correspond to nonexistent matrix elements can be ignored -c or loaded arbitrarily, as they are overwritten by lsodar. -c jac need not provide df/dy exactly. a crude -c approximation (possibly with a smaller bandwidth) will do. -c in either case, pd is preset to zero by the solver, -c so that only the nonzero elements need be loaded by jac. -c each call to jac is preceded by a call to f with the same -c arguments neq, t, and y. thus to gain some efficiency, -c intermediate quantities shared by both calculations may be -c saved in a user common block by f and not recomputed by jac, -c if desired. also, jac may alter the y array, if desired. -c jac must be declared external in the calling program. -c subroutine jac may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in jac) and/or y has length exceeding neq(1). -c see the descriptions of neq and y above. -c -c jt = jacobian type indicator. used only for input. -c jt specifies how the jacobian matrix df/dy will be -c treated, if and when lsodar requires this matrix. -c jt has the following values and meanings.. -c 1 means a user-supplied full (neq by neq) jacobian. -c 2 means an internally generated (difference quotient) full -c jacobian (using neq extra calls to f per df/dy value). -c 4 means a user-supplied banded jacobian. -c 5 means an internally generated banded jacobian (using -c ml+mu+1 extra calls to f per df/dy evaluation). -c if jt = 1 or 4, the user must supply a subroutine jac -c (the name is arbitrary) as described above under jac. -c if jt = 2 or 5, a dummy argument can be used. -c -c g = the name of subroutine for constraint functions, whose -c roots are desired during the integration. it is to have -c the form -c subroutine g (neq, t, y, ng, gout) -c dimension y(neq), gout(ng) -c where neq, t, y, and ng are input, and the array gout -c is output. neq, t, and y have the same meaning as in -c the f routine, and gout is an array of length ng. -c for i = 1,...,ng, this routine is to load into gout(i) -c the value at (t,y) of the i-th constraint function g(i). -c lsodar will find roots of the g(i) of odd multiplicity -c (i.e. sign changes) as they occur during the integration. -c g must be declared external in the calling program. -c -c caution.. because of numerical errors in the functions -c g(i) due to roundoff and integration error, lsodar may -c return false roots, or return the same root at two or more -c nearly equal values of t. if such false roots are -c suspected, the user should consider smaller error tolerances -c and/or higher precision in the evaluation of the g(i). -c -c if a root of some g(i) defines the end of the problem, -c the input to lsodar should nevertheless allow integration -c to a point slightly past that root, so that lsodar can -c locate the root by interpolation. -c -c subroutine g may access user-defined quantities in -c neq(2),... and y(neq(1)+1),... if neq is an array -c (dimensioned in g) and y has length exceeding neq(1). -c see the descriptions of neq and y above. -c -c ng = number of constraint functions g(i). if there are none, -c set ng = 0, and pass a dummy name for g. -c -c jroot = integer array of length ng. used only for output. -c on a return with istate = 3 (one or more roots found), -c jroot(i) = 1 if g(i) has a root at t, or jroot(i) = 0 if not. -c----------------------------------------------------------------------- -c optional inputs. -c -c the following is a list of the optional inputs provided for in the -c call sequence. (see also part ii.) for each such input variable, -c this table lists its name as used in this documentation, its -c location in the call sequence, its meaning, and the default value. -c the use of any of these inputs requires iopt = 1, and in that -c case all of these inputs are examined. a value of zero for any -c of these optional inputs will cause the default value to be used. -c thus to use a subset of the optional inputs, simply preload -c locations 5 to 10 in rwork and iwork to 0.0 and 0 respectively, and -c then set those of interest to nonzero values. -c -c name location meaning and default value -c -c h0 rwork(5) the step size to be attempted on the first step. -c the default value is determined by the solver. -c -c hmax rwork(6) the maximum absolute step size allowed. -c the default value is infinite. -c -c hmin rwork(7) the minimum absolute step size allowed. -c the default value is 0. (this lower bound is not -c enforced on the final step before reaching tcrit -c when itask = 4 or 5.) -c -c ixpr iwork(5) flag to generate extra printing at method switches. -c ixpr = 0 means no extra printing (the default). -c ixpr = 1 means print data on each switch. -c t, h, and nst will be printed on the same logical -c unit as used for error messages. -c -c mxstep iwork(6) maximum number of (internally defined) steps -c allowed during one call to the solver. -c the default value is 500. -c -c mxhnil iwork(7) maximum number of messages printed (per problem) -c warning that t + h = t on a step (h = step size). -c this must be positive to result in a non-default -c value. the default value is 10. -c -c mxordn iwork(8) the maximum order to be allowed for the nonstiff -c (adams) method. the default value is 12. -c if mxordn exceeds the default value, it will -c be reduced to the default value. -c mxordn is held constant during the problem. -c -c mxords iwork(9) the maximum order to be allowed for the stiff -c (bdf) method. the default value is 5. -c if mxords exceeds the default value, it will -c be reduced to the default value. -c mxords is held constant during the problem. -c----------------------------------------------------------------------- -c optional outputs. -c -c as optional additional output from lsodar, the variables listed -c below are quantities related to the performance of lsodar -c which are available to the user. these are communicated by way of -c the work arrays, but also have internal mnemonic names as shown. -c except where stated otherwise, all of these outputs are defined -c on any successful return from lsodar, and on any return with -c istate = -1, -2, -4, -5, or -6. on an illegal input return -c (istate = -3), they will be unchanged from their existing values -c (if any), except possibly for tolsf, lenrw, and leniw. -c on any error return, outputs relevant to the error will be defined, -c as noted below. -c -c name location meaning -c -c hu rwork(11) the step size in t last used (successfully). -c -c hcur rwork(12) the step size to be attempted on the next step. -c -c tcur rwork(13) the current value of the independent variable -c which the solver has actually reached, i.e. the -c current internal mesh point in t. on output, tcur -c will always be at least as far as the argument -c t, but may be farther (if interpolation was done). -c -c tolsf rwork(14) a tolerance scale factor, greater than 1.0, -c computed when a request for too much accuracy was -c detected (istate = -3 if detected at the start of -c the problem, istate = -2 otherwise). if itol is -c left unaltered but rtol and atol are uniformly -c scaled up by a factor of tolsf for the next call, -c then the solver is deemed likely to succeed. -c (the user may also ignore tolsf and alter the -c tolerance parameters in any other way appropriate.) -c -c tsw rwork(15) the value of t at the time of the last method -c switch, if any. -c -c nge iwork(10) the number of g evaluations for the problem so far. -c -c nst iwork(11) the number of steps taken for the problem so far. -c -c nfe iwork(12) the number of f evaluations for the problem so far. -c -c nje iwork(13) the number of jacobian evaluations (and of matrix -c lu decompositions) for the problem so far. -c -c nqu iwork(14) the method order last used (successfully). -c -c nqcur iwork(15) the order to be attempted on the next step. -c -c imxer iwork(16) the index of the component of largest magnitude in -c the weighted local error vector ( e(i)/ewt(i) ), -c on an error return with istate = -4 or -5. -c -c lenrw iwork(17) the length of rwork actually required, assuming -c that the length of rwork is to be fixed for the -c rest of the problem, and that switching may occur. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c leniw iwork(18) the length of iwork actually required, assuming -c that the length of iwork is to be fixed for the -c rest of the problem, and that switching may occur. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c mused iwork(19) the method indicator for the last successful step.. -c 1 means adams (nonstiff), 2 means bdf (stiff). -c -c mcur iwork(20) the current method indicator.. -c 1 means adams (nonstiff), 2 means bdf (stiff). -c this is the method to be attempted -c on the next step. thus it differs from mused -c only if a method switch has just been made. -c -c the following two arrays are segments of the rwork array which -c may also be of interest to the user as optional outputs. -c for each array, the table below gives its internal name, -c its base address in rwork, and its description. -c -c name base address description -c -c yh 21 + 3*ng the nordsieck history array, of size nyh by -c (nqcur + 1), where nyh is the initial value -c of neq. for j = 0,1,...,nqcur, column j+1 -c of yh contains hcur**j/factorial(j) times -c the j-th derivative of the interpolating -c polynomial currently representing the solution, -c evaluated at t = tcur. -c -c acor lacor array of size neq used for the accumulated -c (from common corrections on each step, scaled on output -c as noted) to represent the estimated local error in y -c on the last step. this is the vector e in -c the description of the error control. it is -c defined only on a successful return from -c lsodar. the base address lacor is obtained by -c including in the user-s program the -c following 3 lines.. -c double precision rls -c common /ls0001/ rls(218), ils(39) -c lacor = ils(5) -c -c----------------------------------------------------------------------- -c part ii. other routines callable. -c -c the following are optional calls which the user may make to -c gain additional capabilities in conjunction with lsodar. -c (the routines xsetun and xsetf are designed to conform to the -c slatec error handling package.) -c -c form of call function -c call xsetun(lun) set the logical unit number, lun, for -c output of messages from lsodar, if -c the default is not desired. -c the default value of lun is 6. -c -c call xsetf(mflag) set a flag to control the printing of -c messages by lsodar. -c mflag = 0 means do not print. (danger.. -c this risks losing valuable information.) -c mflag = 1 means print (the default). -c -c either of the above calls may be made at -c any time and will take effect immediately. -c -c call srcar(rsav,isav,job) saves and restores the contents of -c the internal common blocks used by -c lsodar (see part iii below). -c rsav must be a real array of length 245 -c or more, and isav must be an integer -c array of length 59 or more. -c job=1 means save common into rsav/isav. -c job=2 means restore common from rsav/isav. -c srcar is useful if one is -c interrupting a run and restarting -c later, or alternating between two or -c more problems solved with lsodar. -c -c call intdy(,,,,,) provide derivatives of y, of various -c (see below) orders, at a specified point t, if -c desired. it may be called only after -c a successful return from lsodar. -c -c the detailed instructions for using intdy are as follows. -c the form of the call is.. -c -c call intdy (t, k, rwork(lyh), nyh, dky, iflag) -c -c the input parameters are.. -c -c t = value of independent variable where answers are desired -c (normally the same as the t last returned by lsodar). -c for valid results, t must lie between tcur - hu and tcur. -c (see optional outputs for tcur and hu.) -c k = integer order of the derivative desired. k must satisfy -c 0 .le. k .le. nqcur, where nqcur is the current order -c (see optional outputs). the capability corresponding -c to k = 0, i.e. computing y(t), is already provided -c by lsodar directly. since nqcur .ge. 1, the first -c derivative dy/dt is always available with intdy. -c lyh = 21 + 3*ng = base address in rwork of the history array yh. -c nyh = column length of yh, equal to the initial value of neq. -c -c the output parameters are.. -c -c dky = a real array of length neq containing the computed value -c of the k-th derivative of y(t). -c iflag = integer flag, returned as 0 if k and t were legal, -c -1 if k was illegal, and -2 if t was illegal. -c on an error return, a message is also written. -c----------------------------------------------------------------------- -c part iii. common blocks. -c -c if lsodar is to be used in an overlay situation, the user -c must declare, in the primary overlay, the variables in.. -c (1) the call sequence to lsodar, -c (2) the four internal common blocks -c /ls0001/ of length 257 (218 double precision words -c followed by 39 integer words), -c /lsa001/ of length 31 (22 double precision words -c followed by 9 integer words), -c /lsr001/ of length 14 (5 double precision words -c followed by 9 integer words), -c /eh0001/ of length 2 (integer words). -c -c if lsodar is used on a system in which the contents of internal -c common blocks are not preserved between calls, the user should -c declare the above common blocks in his main program to insure -c that their contents are preserved. -c -c if the solution of a given problem by lsodar is to be interrupted -c and then later continued, such as when restarting an interrupted run -c or alternating between two or more problems, the user should save, -c following the return from the last lsodar call prior to the -c interruption, the contents of the call sequence variables and the -c internal common blocks, and later restore these values before the -c next lsodar call for that problem. to save and restore the common -c blocks, use subroutine srcar (see part ii above). -c -c----------------------------------------------------------------------- -c part iv. optionally replaceable solver routines. -c -c below is a description of a routine in the lsodar package which -c relates to the measurement of errors, and can be -c replaced by a user-supplied version, if desired. however, since such -c a replacement may have a major impact on performance, it should be -c done only when absolutely necessary, and only with great caution. -c (note.. the means by which the package version of a routine is -c superseded by the user-s version may be system-dependent.) -c -c (a) ewset. -c the following subroutine is called just before each internal -c integration step, and sets the array of error weights, ewt, as -c described under itol/rtol/atol above.. -c subroutine ewset (neq, itol, rtol, atol, ycur, ewt) -c where neq, itol, rtol, and atol are as in the lsodar call sequence, -c ycur contains the current dependent variable vector, and -c ewt is the array of weights set by ewset. -c -c if the user supplies this subroutine, it must return in ewt(i) -c (i = 1,...,neq) a positive quantity suitable for comparing errors -c in y(i) to. the ewt array returned by ewset is passed to the -c vmnorm routine, and also used by lsodar in the computation -c of the optional output imxer, and the increments for difference -c quotient jacobians. -c -c in the user-supplied version of ewset, it may be desirable to use -c the current values of derivatives of y. derivatives up to order nq -c are available from the history array yh, described above under -c optional outputs. in ewset, yh is identical to the ycur array, -c extended to nq + 1 columns with a column length of nyh and scale -c factors of h**j/factorial(j). on the first call for the problem, -c given by nst = 0, nq is 1 and h is temporarily set to 1.0. -c the quantities nq, nyh, h, and nst can be obtained by including -c in ewset the statements.. -c double precision h, rls -c common /ls0001/ rls(218),ils(39) -c nq = ils(35) -c nyh = ils(14) -c nst = ils(36) -c h = rls(212) -c thus, for example, the current value of dy/dt can be obtained as -c ycur(nyh+i)/h (i=1,...,neq) (and the division by h is -c unnecessary when nst = 0). -c----------------------------------------------------------------------- -c----------------------------------------------------------------------- -c other routines in the lsodar package. -c -c in addition to subroutine lsodar, the lsodar package includes the -c following subroutines and function routines.. -c rchek does preliminary checking for roots, and serves as an -c interface between subroutine lsodar and subroutine roots. -c roots finds the leftmost root of a set of functions. -c intdy computes an interpolated value of the y vector at t = tout. -c stoda is the core integrator, which does one step of the -c integration and the associated error control. -c cfode sets all method coefficients and test constants. -c prja computes and preprocesses the jacobian matrix j = df/dy -c and the newton iteration matrix p = i - h*l0*j. -c solsy manages solution of linear system in chord iteration. -c ewset sets the error weight vector ewt before each step. -c vmnorm computes the weighted max-norm of a vector. -c fnorm computes the norm of a full matrix consistent with the -c weighted max-norm on vectors. -c bnorm computes the norm of a band matrix consistent with the -c weighted max-norm on vectors. -c srcar is a user-callable routine to save and restore -c the contents of the internal common blocks. -c dgefa and dgesl are routines from linpack for solving full -c systems of linear algebraic equations. -c dgbfa and dgbsl are routines from linpack for solving banded -c linear systems. -c daxpy, dscal, idamax, ddot, and dcopy are basic linear algebra -c modules (blas) used by the above linpack routines. -c d1mach computes the unit roundoff in a machine-independent manner. -c xerrwv, xsetun, and xsetf handle the printing of all error -c messages and warnings. xerrwv is machine-dependent. -c note.. vmnorm, fnorm, bnorm, idamax, ddot, and d1mach are function -c routines. all the others are subroutines. -c -c the intrinsic and external routines used by lsodar are.. -c dabs, dmax1, dmin1, dfloat, max0, min0, mod, dsign, dsqrt, and write. -c -c a block data subprogram is also included with the package, -c for loading some of the variables in internal common. -c -c----------------------------------------------------------------------- -c the following card is for optimized compilation on lll compilers. -clll. optimize -c----------------------------------------------------------------------- - external prja, solsy - integer illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 1 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns - integer icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 1 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer insufr, insufi, ixpr, iowns2, jtyp, mused, mxordn, mxords - integer lg0, lg1, lgx, iownr3, irfnd, itaskc, ngc, nge - integer i, i1, i2, iflag, imxer, kgo, lf0, - 1 leniw, lenrw, lenwm, ml, mord, mu, mxhnl0, mxstp0 - integer len1, len1c, len1n, len1s, len2, leniwc, - 1 lenrwc, lenrwn, lenrws - integer irfp, irt, lenyh, lyhnew - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision tsw, rowns2, pdnorm - double precision rownr3, t0, tlast, toutc - double precision atoli, ayi, big, ewti, h0, hmax, hmx, rh, rtoli, - 1 tcrit, tdist, tnext, tol, tolsf, tp, size, sum, w0, - 2 d1mach, vmnorm - dimension mord(2) - logical ihit -c----------------------------------------------------------------------- -c the following three internal common blocks contain -c (a) variables which are local to any subroutine but whose values must -c be preserved between calls to the routine (own variables), and -c (b) variables which are communicated between subroutines. -c the structure of each block is as follows.. all real variables are -c listed first, followed by all integers. within each type, the -c variables are grouped with those local to subroutine lsodar first, -c then those local to subroutine roots or subroutine stoda -c (no other routines have own variables), and finally those used -c for communication. the block ls0001 is declared in subroutines -c lsodar, intdy, stoda, prja, and solsy. the block lsa001 is declared -c in subroutines lsodar, stoda, and prja. the block lsr001 is declared -c in subroutines lsodar, rchek, and roots. groups of variables are -c replaced by dummy arrays in the common declarations in routines -c where those variables are not used. -c----------------------------------------------------------------------- - common /ls0001/ rowns(209), - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 2 illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 3 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lsa001/ tsw, rowns2(20), pdnorm, - 1 insufr, insufi, ixpr, iowns2(2), jtyp, mused, mxordn, mxords - common /lsr001/ rownr3(2), t0, tlast, toutc, - 1 lg0, lg1, lgx, iownr3(2), irfnd, itaskc, ngc, nge -c - data mord(1),mord(2)/12,5/, mxstp0/500/, mxhnl0/10/ -c----------------------------------------------------------------------- -c block a. -c this code block is executed on every call. -c it tests istate and itask for legality and branches appropriately. -c if istate .gt. 1 but the flag init shows that initialization has -c not yet been done, an error return occurs. -c if istate = 1 and tout = t, jump to block g and return immediately. -c----------------------------------------------------------------------- - if (istate .lt. 1 .or. istate .gt. 3) go to 601 - if (itask .lt. 1 .or. itask .gt. 5) go to 602 - itaskc = itask - if (istate .eq. 1) go to 10 - if (init .eq. 0) go to 603 - if (istate .eq. 2) go to 200 - go to 20 - 10 init = 0 - if (tout .eq. t) go to 430 - 20 ntrep = 0 -c----------------------------------------------------------------------- -c block b. -c the next code block is executed for the initial call (istate = 1), -c or for a continuation call with parameter changes (istate = 3). -c it contains checking of all inputs and various initializations. -c -c first check legality of the non-optional inputs neq, itol, iopt, -c jt, ml, mu, and ng. -c----------------------------------------------------------------------- - if (neq(1) .le. 0) go to 604 - if (istate .eq. 1) go to 25 - if (neq(1) .gt. n) go to 605 - 25 n = neq(1) - if (itol .lt. 1 .or. itol .gt. 4) go to 606 - if (iopt .lt. 0 .or. iopt .gt. 1) go to 607 - if (jt .eq. 3 .or. jt .lt. 1 .or. jt .gt. 5) go to 608 - jtyp = jt - if (jt .le. 2) go to 30 - ml = iwork(1) - mu = iwork(2) - if (ml .lt. 0 .or. ml .ge. n) go to 609 - if (mu .lt. 0 .or. mu .ge. n) go to 610 - 30 continue - if (ng .lt. 0) go to 630 - if (istate .eq. 1) go to 35 - if (irfnd .eq. 0 .and. ng .ne. ngc) go to 631 - 35 ngc = ng -c next process and check the optional inputs. -------------------------- - if (iopt .eq. 1) go to 40 - ixpr = 0 - mxstep = mxstp0 - mxhnil = mxhnl0 - hmxi = 0.0d0 - hmin = 0.0d0 - if (istate .ne. 1) go to 60 - h0 = 0.0d0 - mxordn = mord(1) - mxords = mord(2) - go to 60 - 40 ixpr = iwork(5) - if (ixpr .lt. 0 .or. ixpr .gt. 1) go to 611 - mxstep = iwork(6) - if (mxstep .lt. 0) go to 612 - if (mxstep .eq. 0) mxstep = mxstp0 - mxhnil = iwork(7) - if (mxhnil .lt. 0) go to 613 - if (mxhnil .eq. 0) mxhnil = mxhnl0 - if (istate .ne. 1) go to 50 - h0 = rwork(5) - mxordn = iwork(8) - if (mxordn .lt. 0) go to 628 - if (mxordn .eq. 0) mxordn = 100 - mxordn = min0(mxordn,mord(1)) - mxords = iwork(9) - if (mxords .lt. 0) go to 629 - if (mxords .eq. 0) mxords = 100 - mxords = min0(mxords,mord(2)) - if ((tout - t)*h0 .lt. 0.0d0) go to 614 - 50 hmax = rwork(6) - if (hmax .lt. 0.0d0) go to 615 - hmxi = 0.0d0 - if (hmax .gt. 0.0d0) hmxi = 1.0d0/hmax - hmin = rwork(7) - if (hmin .lt. 0.0d0) go to 616 -c----------------------------------------------------------------------- -c set work array pointers and check lengths lrw and liw. -c if istate = 1, meth is initialized to 1 here to facilitate the -c checking of work space lengths. -c pointers to segments of rwork and iwork are named by prefixing l to -c the name of the segment. e.g., the segment yh starts at rwork(lyh). -c segments of rwork (in order) are denoted g0, g1, gx, yh, wm, -c ewt, savf, acor. -c if the lengths provided are insufficient for the current method, -c an error return occurs. this is treated as illegal input on the -c first call, but as a problem interruption with istate = -7 on a -c continuation call. if the lengths are sufficient for the current -c method but not for both methods, a warning message is sent. -c----------------------------------------------------------------------- - 60 if (istate .eq. 1) meth = 1 - if (istate .eq. 1) nyh = n - lg0 = 21 - lg1 = lg0 + ng - lgx = lg1 + ng - lyhnew = lgx + ng - if (istate .eq. 1) lyh = lyhnew - if (lyhnew .eq. lyh) go to 62 -c if istate = 3 and ng was changed, shift yh to its new location. ------ - lenyh = l*nyh - if (lrw .lt. lyhnew-1+lenyh) go to 62 - i1 = 1 - if (lyhnew .gt. lyh) i1 = -1 - call dcopy (lenyh, rwork(lyh), i1, rwork(lyhnew), i1) - lyh = lyhnew - 62 continue - len1n = lyhnew - 1 + (mxordn + 1)*nyh - len1s = lyhnew - 1 + (mxords + 1)*nyh - lwm = len1s + 1 - if (jt .le. 2) lenwm = n*n + 2 - if (jt .ge. 4) lenwm = (2*ml + mu + 1)*n + 2 - len1s = len1s + lenwm - len1c = len1n - if (meth .eq. 2) len1c = len1s - len1 = max0(len1n,len1s) - len2 = 3*n - lenrw = len1 + len2 - lenrwn = len1n + len2 - lenrws = len1s + len2 - lenrwc = len1c + len2 - iwork(17) = lenrw - liwm = 1 - leniw = 20 + n - leniwc = 20 - if (meth .eq. 2) leniwc = leniw - iwork(18) = leniw - if (istate .eq. 1 .and. lrw .lt. lenrwc) go to 617 - if (istate .eq. 1 .and. liw .lt. leniwc) go to 618 - if (istate .eq. 3 .and. lrw .lt. lenrwc) go to 550 - if (istate .eq. 3 .and. liw .lt. leniwc) go to 555 - lewt = len1 + 1 - insufr = 0 - if (lrw .ge. lenrw) go to 65 - insufr = 2 - lewt = len1c + 1 - call xerrwv( - 1 'lsodar- warning.. rwork length is sufficient for now, but ', - 1 60, 103, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' may not be later. integration will proceed anyway. ', - 1 60, 103, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is lenrw = i1, while lrw = i2.', - 1 50, 103, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - 65 lsavf = lewt + n - lacor = lsavf + n - insufi = 0 - if (liw .ge. leniw) go to 70 - insufi = 2 - call xerrwv( - 1 'lsodar- warning.. iwork length is sufficient for now, but ', - 1 60, 104, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' may not be later. integration will proceed anyway. ', - 1 60, 104, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is leniw = i1, while liw = i2.', - 1 50, 104, 0, 2, leniw, liw, 0, 0.0d0, 0.0d0) - 70 continue -c check rtol and atol for legality. ------------------------------------ - rtoli = rtol(1) - atoli = atol(1) - do 75 i = 1,n - if (itol .ge. 3) rtoli = rtol(i) - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - if (rtoli .lt. 0.0d0) go to 619 - if (atoli .lt. 0.0d0) go to 620 - 75 continue - if (istate .eq. 1) go to 100 -c if istate = 3, set flag to signal parameter changes to stoda. -------- - jstart = -1 - if (n .eq. nyh) go to 200 -c neq was reduced. zero part of yh to avoid undefined references. ----- - i1 = lyh + l*nyh - i2 = lyh + (maxord + 1)*nyh - 1 - if (i1 .gt. i2) go to 200 - do 95 i = i1,i2 - 95 rwork(i) = 0.0d0 - go to 200 -c----------------------------------------------------------------------- -c block c. -c the next block is for the initial call only (istate = 1). -c it contains all remaining initializations, the initial call to f, -c and the calculation of the initial step size. -c the error weights in ewt are inverted after being loaded. -c----------------------------------------------------------------------- - 100 uround = d1mach(4) - tn = t - tsw = t - maxord = mxordn - if (itask .ne. 4 .and. itask .ne. 5) go to 110 - tcrit = rwork(1) - if ((tcrit - tout)*(tout - t) .lt. 0.0d0) go to 625 - if (h0 .ne. 0.0d0 .and. (t + h0 - tcrit)*h0 .gt. 0.0d0) - 1 h0 = tcrit - t - 110 jstart = 0 - nhnil = 0 - nst = 0 - nje = 0 - nslast = 0 - hu = 0.0d0 - nqu = 0 - mused = 0 - miter = 0 - ccmax = 0.3d0 - maxcor = 3 - msbp = 20 - mxncf = 10 -c initial call to f. (lf0 points to yh(*,2).) ------------------------- - lf0 = lyh + nyh - call f (neq, t, y, rwork(lf0)) - nfe = 1 -c load the initial value vector in yh. --------------------------------- - do 115 i = 1,n - 115 rwork(i+lyh-1) = y(i) -c load and invert the ewt array. (h is temporarily set to 1.0.) ------- - nq = 1 - h = 1.0d0 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 120 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 621 - 120 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) -c----------------------------------------------------------------------- -c the coding below computes the step size, h0, to be attempted on the -c first step, unless the user has supplied a value for this. -c first check that tout - t differs significantly from zero. -c a scalar tolerance quantity tol is computed, as max(rtol(i)) -c if this is positive, or max(atol(i)/abs(y(i))) otherwise, adjusted -c so as to be between 100*uround and 1.0e-3. -c then the computed value h0 is given by.. -c -c h0**(-2) = 1./(tol * w0**2) + tol * (norm(f))**2 -c -c where w0 = max ( abs(t), abs(tout) ), -c f = the initial value of the vector f(t,y), and -c norm() = the weighted vector norm used throughout, given by -c the vmnorm function routine, and weighted by the -c tolerances initially loaded into the ewt array. -c the sign of h0 is inferred from the initial values of tout and t. -c abs(h0) is made .le. abs(tout-t) in any case. -c----------------------------------------------------------------------- - if (h0 .ne. 0.0d0) go to 180 - tdist = dabs(tout - t) - w0 = dmax1(dabs(t),dabs(tout)) - if (tdist .lt. 2.0d0*uround*w0) go to 622 - tol = rtol(1) - if (itol .le. 2) go to 140 - do 130 i = 1,n - 130 tol = dmax1(tol,rtol(i)) - 140 if (tol .gt. 0.0d0) go to 160 - atoli = atol(1) - do 150 i = 1,n - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - ayi = dabs(y(i)) - if (ayi .ne. 0.0d0) tol = dmax1(tol,atoli/ayi) - 150 continue - 160 tol = dmax1(tol,100.0d0*uround) - tol = dmin1(tol,0.001d0) - sum = vmnorm (n, rwork(lf0), rwork(lewt)) - sum = 1.0d0/(tol*w0*w0) + tol*sum**2 - h0 = 1.0d0/dsqrt(sum) - h0 = dmin1(h0,tdist) - h0 = dsign(h0,tout-t) -c adjust h0 if necessary to meet hmax bound. --------------------------- - 180 rh = dabs(h0)*hmxi - if (rh .gt. 1.0d0) h0 = h0/rh -c load h with h0 and scale yh(*,2) by h0. ------------------------------ - h = h0 - do 190 i = 1,n - 190 rwork(i+lf0-1) = h0*rwork(i+lf0-1) -c -c check for a zero of g at t. ------------------------------------------ - irfnd = 0 - toutc = tout - if (ngc .eq. 0) go to 270 - call rchek (1, g, neq, y, rwork(lyh), nyh, - 1 rwork(lg0), rwork(lg1), rwork(lgx), jroot, irt) - if (irt .eq. 0) go to 270 - go to 632 -c----------------------------------------------------------------------- -c block d. -c the next code block is for continuation calls only (istate = 2 or 3) -c and is to check stop conditions before taking a step. -c first, rchek is called to check for a root within the last step -c taken, other than the last root found there, if any. -c if itask = 2 or 5, and y(tn) has not yet been returned to the user -c because of an intervening root, return through block g. -c----------------------------------------------------------------------- - 200 nslast = nst -c - irfp = irfnd - if (ngc .eq. 0) go to 205 - if (itask .eq. 1 .or. itask .eq. 4) toutc = tout - call rchek (2, g, neq, y, rwork(lyh), nyh, - 1 rwork(lg0), rwork(lg1), rwork(lgx), jroot, irt) - if (irt .ne. 1) go to 205 - irfnd = 1 - istate = 3 - t = t0 - go to 425 - 205 continue - irfnd = 0 - if (irfp .eq. 1 .and. tlast .ne. tn .and. itask .eq. 2) go to 400 -c - go to (210, 250, 220, 230, 240), itask - 210 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 220 tp = tn - hu*(1.0d0 + 100.0d0*uround) - if ((tp - tout)*h .gt. 0.0d0) go to 623 - if ((tn - tout)*h .lt. 0.0d0) go to 250 - t = tn - go to 400 - 230 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - if ((tcrit - tout)*h .lt. 0.0d0) go to 625 - if ((tn - tout)*h .lt. 0.0d0) go to 245 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 240 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - 245 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) t = tcrit - if (irfp .eq. 1 .and. tlast .ne. tn .and. itask .eq. 5) go to 400 - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - if (istate .eq. 2 .and. jstart .ge. 0) jstart = -2 -c----------------------------------------------------------------------- -c block e. -c the next block is normally executed for all calls and contains -c the call to the one-step core integrator stoda. -c -c this is a looping point for the integration steps. -c -c first check for too many steps being taken, update ewt (if not at -c start of problem), check for too much accuracy being requested, and -c check for h below the roundoff level in t. -c----------------------------------------------------------------------- - 250 continue - if (meth .eq. mused) go to 255 - if (insufr .eq. 1) go to 550 - if (insufi .eq. 1) go to 555 - 255 if ((nst-nslast) .ge. mxstep) go to 500 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 260 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 510 - 260 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) - 270 tolsf = uround*vmnorm (n, rwork(lyh), rwork(lewt)) - if (tolsf .le. 0.01d0) go to 280 - tolsf = tolsf*200.0d0 - if (nst .eq. 0) go to 626 - go to 520 - 280 if ((tn + h) .ne. tn) go to 290 - nhnil = nhnil + 1 - if (nhnil .gt. mxhnil) go to 290 - call xerrwv('lsodar- warning..internal t (=r1) and h (=r2) are', - 1 50, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' such that in the machine, t + h = t on the next step ', - 1 60, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' (h = step size). solver will continue anyway', - 1 50, 101, 0, 0, 0, 0, 2, tn, h) - if (nhnil .lt. mxhnil) go to 290 - call xerrwv('lsodar- above warning has been issued i1 times. ', - 1 50, 102, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' it will not be issued again for this problem', - 1 50, 102, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - 290 continue -c----------------------------------------------------------------------- -c call stoda(neq,y,yh,nyh,yh,ewt,savf,acor,wm,iwm,f,jac,prja,solsy) -c----------------------------------------------------------------------- - call stoda (neq, y, rwork(lyh), nyh, rwork(lyh), rwork(lewt), - 1 rwork(lsavf), rwork(lacor), rwork(lwm), iwork(liwm), - 2 f, jac, prja, solsy) - kgo = 1 - kflag - go to (300, 530, 540), kgo -c----------------------------------------------------------------------- -c block f. -c the following block handles the case of a successful return from the -c core integrator (kflag = 0). -c if a method switch was just made, record tsw, reset maxord, -c set jstart to -1 to signal stoda to complete the switch, -c and do extra printing of data if ixpr = 1. -c then call rchek to check for a root within the last step. -c then, if no root was found, check for stop conditions. -c----------------------------------------------------------------------- - 300 init = 1 - if (meth .eq. mused) go to 310 - tsw = tn - maxord = mxordn - if (meth .eq. 2) maxord = mxords - if (meth .eq. 2) rwork(lwm) = dsqrt(uround) - insufr = min0(insufr,1) - insufi = min0(insufi,1) - jstart = -1 - if (ixpr .eq. 0) go to 310 - if (meth .eq. 2) call xerrwv( - 1 'lsodar- a switch to the bdf (stiff) method has occurred ', - 1 60, 105, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - if (meth .eq. 1) call xerrwv( - 1 'lsodar- a switch to the adams (nonstiff) method has occurred', - 1 60, 106, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' at t = r1, tentative step size h = r2, step nst = i1 ', - 1 60, 107, 0, 1, nst, 0, 2, tn, h) - 310 continue -c - if (ngc .eq. 0) go to 315 - call rchek (3, g, neq, y, rwork(lyh), nyh, - 1 rwork(lg0), rwork(lg1), rwork(lgx), jroot, irt) - if (irt .ne. 1) go to 315 - irfnd = 1 - istate = 3 - t = t0 - go to 425 - 315 continue -c - go to (320, 400, 330, 340, 350), itask -c itask = 1. if tout has been reached, interpolate. ------------------- - 320 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 -c itask = 3. jump to exit if tout was reached. ------------------------ - 330 if ((tn - tout)*h .ge. 0.0d0) go to 400 - go to 250 -c itask = 4. see if tout or tcrit was reached. adjust h if necessary. - 340 if ((tn - tout)*h .lt. 0.0d0) go to 345 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 - 345 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - if (jstart .ge. 0) jstart = -2 - go to 250 -c itask = 5. see if tcrit was reached and jump to exit. --------------- - 350 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx -c----------------------------------------------------------------------- -c block g. -c the following block handles all successful returns from lsodar. -c if itask .ne. 1, y is loaded from yh and t is set accordingly. -c istate is set to 2, the illegal input counter is zeroed, and the -c optional outputs are loaded into the work arrays before returning. -c if istate = 1 and tout = t, there is a return with no action taken, -c except that if this has happened repeatedly, the run is terminated. -c----------------------------------------------------------------------- - 400 do 410 i = 1,n - 410 y(i) = rwork(i+lyh-1) - t = tn - if (itask .ne. 4 .and. itask .ne. 5) go to 420 - if (ihit) t = tcrit - 420 istate = 2 - 425 continue - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - rwork(15) = tsw - iwork(11) = nst - iwork(12) = nfe - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - iwork(19) = mused - iwork(20) = meth - iwork(10) = nge - tlast = t - return -c - 430 ntrep = ntrep + 1 - if (ntrep .lt. 5) return - call xerrwv( - 1 'lsodar- repeated calls with istate = 1 and tout = t (=r1) ', - 1 60, 301, 0, 0, 0, 0, 1, t, 0.0d0) - go to 800 -c----------------------------------------------------------------------- -c block h. -c the following block handles all unsuccessful returns other than -c those for illegal input. first the error message routine is called. -c if there was an error test or convergence test failure, imxer is set. -c then y is loaded from yh, t is set to tn, and the illegal input -c counter illin is set to 0. the optional outputs are loaded into -c the work arrays before returning. -c----------------------------------------------------------------------- -c the maximum number of steps was taken before reaching tout. ---------- - 500 call xerrwv('lsodar- at current t (=r1), mxstep (=i1) steps ', - 1 50, 201, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' taken on this call before reaching tout ', - 1 50, 201, 0, 1, mxstep, 0, 1, tn, 0.0d0) - istate = -1 - go to 580 -c ewt(i) .le. 0.0 for some i (not at start of problem). ---------------- - 510 ewti = rwork(lewt+i-1) - call xerrwv('lsodar- at t (=r1), ewt(i1) has become r2 .le. 0.', - 1 50, 202, 0, 1, i, 0, 2, tn, ewti) - istate = -6 - go to 580 -c too much accuracy requested for machine precision. ------------------- - 520 call xerrwv('lsodar- at t (=r1), too much accuracy requested ', - 1 50, 203, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' for precision of machine.. see tolsf (=r2) ', - 1 50, 203, 0, 0, 0, 0, 2, tn, tolsf) - rwork(14) = tolsf - istate = -2 - go to 580 -c kflag = -1. error test failed repeatedly or with abs(h) = hmin. ----- - 530 call xerrwv('lsodar- at t(=r1) and step size h(=r2), the error', - 1 50, 204, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' test failed repeatedly or with abs(h) = hmin', - 1 50, 204, 0, 0, 0, 0, 2, tn, h) - istate = -4 - go to 560 -c kflag = -2. convergence failed repeatedly or with abs(h) = hmin. ---- - 540 call xerrwv('lsodar- at t (=r1) and step size h (=r2), the ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' corrector convergence failed repeatedly ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' or with abs(h) = hmin ', - 1 30, 205, 0, 0, 0, 0, 2, tn, h) - istate = -5 - go to 560 -c rwork length too small to proceed. ----------------------------------- - 550 call xerrwv('lsodar- at current t(=r1), rwork length too small', - 1 50, 206, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' to proceed. the integration was otherwise successful.', - 1 60, 206, 0, 0, 0, 0, 1, tn, 0.0d0) - istate = -7 - go to 580 -c iwork length too small to proceed. ----------------------------------- - 555 call xerrwv('lsodar- at current t(=r1), iwork length too small', - 1 50, 207, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' to proceed. the integration was otherwise successful.', - 1 60, 207, 0, 0, 0, 0, 1, tn, 0.0d0) - istate = -7 - go to 580 -c compute imxer if relevant. ------------------------------------------- - 560 big = 0.0d0 - imxer = 1 - do 570 i = 1,n - size = dabs(rwork(i+lacor-1)*rwork(i+lewt-1)) - if (big .ge. size) go to 570 - big = size - imxer = i - 570 continue - iwork(16) = imxer -c set y vector, t, illin, and optional outputs. ------------------------ - 580 do 590 i = 1,n - 590 y(i) = rwork(i+lyh-1) - t = tn - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - rwork(15) = tsw - iwork(11) = nst - iwork(12) = nfe - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - iwork(19) = mused - iwork(20) = meth - iwork(10) = nge - tlast = t - return -c----------------------------------------------------------------------- -c block i. -c the following block handles all error returns due to illegal input -c (istate = -3), as detected before calling the core integrator. -c first the error message routine is called. then if there have been -c 5 consecutive such returns just before this call to the solver, -c the run is halted. -c----------------------------------------------------------------------- - 601 call xerrwv('lsodar- istate (=i1) illegal ', - 1 30, 1, 0, 1, istate, 0, 0, 0.0d0, 0.0d0) - go to 700 - 602 call xerrwv('lsodar- itask (=i1) illegal ', - 1 30, 2, 0, 1, itask, 0, 0, 0.0d0, 0.0d0) - go to 700 - 603 call xerrwv('lsodar- istate .gt. 1 but lsodar not initialized ', - 1 50, 3, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - go to 700 - 604 call xerrwv('lsodar- neq (=i1) .lt. 1 ', - 1 30, 4, 0, 1, neq(1), 0, 0, 0.0d0, 0.0d0) - go to 700 - 605 call xerrwv('lsodar- istate = 3 and neq increased (i1 to i2) ', - 1 50, 5, 0, 2, n, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 606 call xerrwv('lsodar- itol (=i1) illegal ', - 1 30, 6, 0, 1, itol, 0, 0, 0.0d0, 0.0d0) - go to 700 - 607 call xerrwv('lsodar- iopt (=i1) illegal ', - 1 30, 7, 0, 1, iopt, 0, 0, 0.0d0, 0.0d0) - go to 700 - 608 call xerrwv('lsodar- jt (=i1) illegal ', - 1 30, 8, 0, 1, jt, 0, 0, 0.0d0, 0.0d0) - go to 700 - 609 call xerrwv('lsodar- ml (=i1) illegal.. .lt.0 or .ge.neq (=i2)', - 1 50, 9, 0, 2, ml, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 610 call xerrwv('lsodar- mu (=i1) illegal.. .lt.0 or .ge.neq (=i2)', - 1 50, 10, 0, 2, mu, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 611 call xerrwv('lsodar- ixpr (=i1) illegal ', - 1 30, 11, 0, 1, ixpr, 0, 0, 0.0d0, 0.0d0) - go to 700 - 612 call xerrwv('lsodar- mxstep (=i1) .lt. 0 ', - 1 30, 12, 0, 1, mxstep, 0, 0, 0.0d0, 0.0d0) - go to 700 - 613 call xerrwv('lsodar- mxhnil (=i1) .lt. 0 ', - 1 30, 13, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - go to 700 - 614 call xerrwv('lsodar- tout (=r1) behind t (=r2) ', - 1 40, 14, 0, 0, 0, 0, 2, tout, t) - call xerrwv(' integration direction is given by h0 (=r1) ', - 1 50, 14, 0, 0, 0, 0, 1, h0, 0.0d0) - go to 700 - 615 call xerrwv('lsodar- hmax (=r1) .lt. 0.0 ', - 1 30, 15, 0, 0, 0, 0, 1, hmax, 0.0d0) - go to 700 - 616 call xerrwv('lsodar- hmin (=r1) .lt. 0.0 ', - 1 30, 16, 0, 0, 0, 0, 1, hmin, 0.0d0) - go to 700 - 617 call xerrwv( - 1 'lsodar- rwork length needed, lenrw (=i1), exceeds lrw (=i2)', - 1 60, 17, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 618 call xerrwv( - 1 'lsodar- iwork length needed, leniw (=i1), exceeds liw (=i2)', - 1 60, 18, 0, 2, leniw, liw, 0, 0.0d0, 0.0d0) - go to 700 - 619 call xerrwv('lsodar- rtol(i1) is r1 .lt. 0.0 ', - 1 40, 19, 0, 1, i, 0, 1, rtoli, 0.0d0) - go to 700 - 620 call xerrwv('lsodar- atol(i1) is r1 .lt. 0.0 ', - 1 40, 20, 0, 1, i, 0, 1, atoli, 0.0d0) - go to 700 - 621 ewti = rwork(lewt+i-1) - call xerrwv('lsodar- ewt(i1) is r1 .le. 0.0 ', - 1 40, 21, 0, 1, i, 0, 1, ewti, 0.0d0) - go to 700 - 622 call xerrwv( - 1 'lsodar- tout (=r1) too close to t(=r2) to start integration', - 1 60, 22, 0, 0, 0, 0, 2, tout, t) - go to 700 - 623 call xerrwv( - 1 'lsodar- itask = i1 and tout (=r1) behind tcur - hu (= r2) ', - 1 60, 23, 0, 1, itask, 0, 2, tout, tp) - go to 700 - 624 call xerrwv( - 1 'lsodar- itask = 4 or 5 and tcrit (=r1) behind tcur (=r2) ', - 1 60, 24, 0, 0, 0, 0, 2, tcrit, tn) - go to 700 - 625 call xerrwv( - 1 'lsodar- itask = 4 or 5 and tcrit (=r1) behind tout (=r2) ', - 1 60, 25, 0, 0, 0, 0, 2, tcrit, tout) - go to 700 - 626 call xerrwv('lsodar- at start of problem, too much accuracy ', - 1 50, 26, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' requested for precision of machine.. see tolsf (=r1) ', - 1 60, 26, 0, 0, 0, 0, 1, tolsf, 0.0d0) - rwork(14) = tolsf - go to 700 - 627 call xerrwv('lsodar- trouble from intdy. itask = i1, tout = r1', - 1 50, 27, 0, 1, itask, 0, 1, tout, 0.0d0) - go to 700 - 628 call xerrwv('lsodar- mxordn (=i1) .lt. 0 ', - 1 30, 28, 0, 1, mxordn, 0, 0, 0.0d0, 0.0d0) - go to 700 - 629 call xerrwv('lsodar- mxords (=i1) .lt. 0 ', - 1 30, 29, 0, 1, mxords, 0, 0, 0.0d0, 0.0d0) - go to 700 - 630 call xerrwv('lsodar- ng (=i1) .lt. 0 ', - 1 30, 30, 0, 1, ng, 0, 0, 0.0d0, 0.0d0) - go to 700 - 631 call xerrwv('lsodar- ng changed (from i1 to i2) illegally, ', - 1 50, 31, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' i.e. not immediately after a root was found ', - 1 50, 31, 0, 2, ngc, ng, 0, 0.0d0, 0.0d0) - go to 700 - 632 call xerrwv('lsodar- one or more components of g has a root ', - 1 50, 32, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' too near to the initial point ', - 1 40, 32, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) -c - 700 if (illin .eq. 5) go to 710 - illin = illin + 1 - tlast = t - istate = -3 - return - 710 call xerrwv('lsodar- repeated occurrences of illegal input ', - 1 50, 302, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) -c - 800 call xerrwv('lsodar- run aborted.. apparent infinite loop ', - 1 50, 303, 2, 0, 0, 0, 0, 0.0d0, 0.0d0) - return -c----------------------- end of subroutine lsodar ---------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/lsode.f b/scipy-0.10.1/scipy/integrate/odepack/lsode.f deleted file mode 100644 index ed5d657178..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/lsode.f +++ /dev/null @@ -1,1512 +0,0 @@ - subroutine lsode (f, neq, y, t, tout, itol, rtol, atol, itask, - 1 istate, iopt, rwork, lrw, iwork, liw, jac, mf) - external f, jac - integer neq, itol, itask, istate, iopt, lrw, iwork, liw, mf - double precision y, t, tout, rtol, atol, rwork - dimension neq(1), y(1), rtol(1), atol(1), rwork(lrw), iwork(liw) -c----------------------------------------------------------------------- -c this is the march 30, 1987 version of -c lsode.. livermore solver for ordinary differential equations. -c this version is in double precision. -c -c lsode solves the initial value problem for stiff or nonstiff -c systems of first order ode-s, -c dy/dt = f(t,y) , or, in component form, -c dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(neq)) (i = 1,...,neq). -c lsode is a package based on the gear and gearb packages, and on the -c october 23, 1978 version of the tentative odepack user interface -c standard, with minor modifications. -c----------------------------------------------------------------------- -c reference.. -c alan c. hindmarsh, odepack, a systematized collection of ode -c solvers, in scientific computing, r. s. stepleman et al. (eds.), -c north-holland, amsterdam, 1983, pp. 55-64. -c----------------------------------------------------------------------- -c author and contact.. alan c. hindmarsh, -c computing and mathematics research div., l-316 -c lawrence livermore national laboratory -c livermore, ca 94550. -c----------------------------------------------------------------------- -c summary of usage. -c -c communication between the user and the lsode package, for normal -c situations, is summarized here. this summary describes only a subset -c of the full set of options available. see the full description for -c details, including optional communication, nonstandard options, -c and instructions for special situations. see also the example -c problem (with program and output) following this summary. -c -c a. first provide a subroutine of the form.. -c subroutine f (neq, t, y, ydot) -c dimension y(neq), ydot(neq) -c which supplies the vector function f by loading ydot(i) with f(i). -c -c b. next determine (or guess) whether or not the problem is stiff. -c stiffness occurs when the jacobian matrix df/dy has an eigenvalue -c whose real part is negative and large in magnitude, compared to the -c reciprocal of the t span of interest. if the problem is nonstiff, -c use a method flag mf = 10. if it is stiff, there are four standard -c choices for mf, and lsode requires the jacobian matrix in some form. -c this matrix is regarded either as full (mf = 21 or 22), -c or banded (mf = 24 or 25). in the banded case, lsode requires two -c half-bandwidth parameters ml and mu. these are, respectively, the -c widths of the lower and upper parts of the band, excluding the main -c diagonal. thus the band consists of the locations (i,j) with -c i-ml .le. j .le. i+mu, and the full bandwidth is ml+mu+1. -c -c c. if the problem is stiff, you are encouraged to supply the jacobian -c directly (mf = 21 or 24), but if this is not feasible, lsode will -c compute it internally by difference quotients (mf = 22 or 25). -c if you are supplying the jacobian, provide a subroutine of the form.. -c subroutine jac (neq, t, y, ml, mu, pd, nrowpd) -c dimension y(neq), pd(nrowpd,neq) -c which supplies df/dy by loading pd as follows.. -c for a full jacobian (mf = 21), load pd(i,j) with df(i)/dy(j), -c the partial derivative of f(i) with respect to y(j). (ignore the -c ml and mu arguments in this case.) -c for a banded jacobian (mf = 24), load pd(i-j+mu+1,j) with -c df(i)/dy(j), i.e. load the diagonal lines of df/dy into the rows of -c pd from the top down. -c in either case, only nonzero elements need be loaded. -c -c d. write a main program which calls subroutine lsode once for -c each point at which answers are desired. this should also provide -c for possible use of logical unit 6 for output of error messages -c by lsode. on the first call to lsode, supply arguments as follows.. -c f = name of subroutine for right-hand side vector f. -c this name must be declared external in calling program. -c neq = number of first order ode-s. -c y = array of initial values, of length neq. -c t = the initial value of the independent variable. -c tout = first point where output is desired (.ne. t). -c itol = 1 or 2 according as atol (below) is a scalar or array. -c rtol = relative tolerance parameter (scalar). -c atol = absolute tolerance parameter (scalar or array). -c the estimated local error in y(i) will be controlled so as -c to be roughly less (in magnitude) than -c ewt(i) = rtol*abs(y(i)) + atol if itol = 1, or -c ewt(i) = rtol*abs(y(i)) + atol(i) if itol = 2. -c thus the local error test passes if, in each component, -c either the absolute error is less than atol (or atol(i)), -c or the relative error is less than rtol. -c use rtol = 0.0 for pure absolute error control, and -c use atol = 0.0 (or atol(i) = 0.0) for pure relative error -c control. caution.. actual (global) errors may exceed these -c local tolerances, so choose them conservatively. -c itask = 1 for normal computation of output values of y at t = tout. -c istate = integer flag (input and output). set istate = 1. -c iopt = 0 to indicate no optional inputs used. -c rwork = real work array of length at least.. -c 20 + 16*neq for mf = 10, -c 22 + 9*neq + neq**2 for mf = 21 or 22, -c 22 + 10*neq + (2*ml + mu)*neq for mf = 24 or 25. -c lrw = declared length of rwork (in user-s dimension). -c iwork = integer work array of length at least.. -c 20 for mf = 10, -c 20 + neq for mf = 21, 22, 24, or 25. -c if mf = 24 or 25, input in iwork(1),iwork(2) the lower -c and upper half-bandwidths ml,mu. -c liw = declared length of iwork (in user-s dimension). -c jac = name of subroutine for jacobian matrix (mf = 21 or 24). -c if used, this name must be declared external in calling -c program. if not used, pass a dummy name. -c mf = method flag. standard values are.. -c 10 for nonstiff (adams) method, no jacobian used. -c 21 for stiff (bdf) method, user-supplied full jacobian. -c 22 for stiff method, internally generated full jacobian. -c 24 for stiff method, user-supplied banded jacobian. -c 25 for stiff method, internally generated banded jacobian. -c note that the main program must declare arrays y, rwork, iwork, -c and possibly atol. -c -c e. the output from the first call (or any call) is.. -c y = array of computed values of y(t) vector. -c t = corresponding value of independent variable (normally tout). -c istate = 2 if lsode was successful, negative otherwise. -c -1 means excess work done on this call (perhaps wrong mf). -c -2 means excess accuracy requested (tolerances too small). -c -3 means illegal input detected (see printed message). -c -4 means repeated error test failures (check all inputs). -c -5 means repeated convergence failures (perhaps bad jacobian -c supplied or wrong choice of mf or tolerances). -c -6 means error weight became zero during problem. (solution -c component i vanished, and atol or atol(i) = 0.) -c -c f. to continue the integration after a successful return, simply -c reset tout and call lsode again. no other parameters need be reset. -c -c----------------------------------------------------------------------- -c example problem. -c -c the following is a simple example problem, with the coding -c needed for its solution by lsode. the problem is from chemical -c kinetics, and consists of the following three rate equations.. -c dy1/dt = -.04*y1 + 1.e4*y2*y3 -c dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*y2**2 -c dy3/dt = 3.e7*y2**2 -c on the interval from t = 0.0 to t = 4.e10, with initial conditions -c y1 = 1.0, y2 = y3 = 0. the problem is stiff. -c -c the following coding solves this problem with lsode, using mf = 21 -c and printing results at t = .4, 4., ..., 4.e10. it uses -c itol = 2 and atol much smaller for y2 than y1 or y3 because -c y2 has much smaller values. -c at the end of the run, statistical quantities of interest are -c printed (see optional outputs in the full description below). -c -c external fex, jex -c double precision atol, rtol, rwork, t, tout, y -c dimension y(3), atol(3), rwork(58), iwork(23) -c neq = 3 -c y(1) = 1.d0 -c y(2) = 0.d0 -c y(3) = 0.d0 -c t = 0.d0 -c tout = .4d0 -c itol = 2 -c rtol = 1.d-4 -c atol(1) = 1.d-6 -c atol(2) = 1.d-10 -c atol(3) = 1.d-6 -c itask = 1 -c istate = 1 -c iopt = 0 -c lrw = 58 -c liw = 23 -c mf = 21 -c do 40 iout = 1,12 -c call lsode(fex,neq,y,t,tout,itol,rtol,atol,itask,istate, -c 1 iopt,rwork,lrw,iwork,liw,jex,mf) -c write(6,20)t,y(1),y(2),y(3) -c 20 format(' at t =',e12.4,' y =',3e14.6) -c if (istate .lt. 0) go to 80 -c 40 tout = tout*10.d0 -c write(6,60)iwork(11),iwork(12),iwork(13) -c 60 format(/' no. steps =',i4,' no. f-s =',i4,' no. j-s =',i4) -c stop -c 80 write(6,90)istate -c 90 format(///' error halt.. istate =',i3) -c stop -c end -c -c subroutine fex (neq, t, y, ydot) -c double precision t, y, ydot -c dimension y(3), ydot(3) -c ydot(1) = -.04d0*y(1) + 1.d4*y(2)*y(3) -c ydot(3) = 3.d7*y(2)*y(2) -c ydot(2) = -ydot(1) - ydot(3) -c return -c end -c -c subroutine jex (neq, t, y, ml, mu, pd, nrpd) -c double precision pd, t, y -c dimension y(3), pd(nrpd,3) -c pd(1,1) = -.04d0 -c pd(1,2) = 1.d4*y(3) -c pd(1,3) = 1.d4*y(2) -c pd(2,1) = .04d0 -c pd(2,3) = -pd(1,3) -c pd(3,2) = 6.d7*y(2) -c pd(2,2) = -pd(1,2) - pd(3,2) -c return -c end -c -c the output of this program (on a cdc-7600 in single precision) -c is as follows.. -c -c at t = 4.0000e-01 y = 9.851726e-01 3.386406e-05 1.479357e-02 -c at t = 4.0000e+00 y = 9.055142e-01 2.240418e-05 9.446344e-02 -c at t = 4.0000e+01 y = 7.158050e-01 9.184616e-06 2.841858e-01 -c at t = 4.0000e+02 y = 4.504846e-01 3.222434e-06 5.495122e-01 -c at t = 4.0000e+03 y = 1.831701e-01 8.940379e-07 8.168290e-01 -c at t = 4.0000e+04 y = 3.897016e-02 1.621193e-07 9.610297e-01 -c at t = 4.0000e+05 y = 4.935213e-03 1.983756e-08 9.950648e-01 -c at t = 4.0000e+06 y = 5.159269e-04 2.064759e-09 9.994841e-01 -c at t = 4.0000e+07 y = 5.306413e-05 2.122677e-10 9.999469e-01 -c at t = 4.0000e+08 y = 5.494529e-06 2.197824e-11 9.999945e-01 -c at t = 4.0000e+09 y = 5.129458e-07 2.051784e-12 9.999995e-01 -c at t = 4.0000e+10 y = -7.170586e-08 -2.868234e-13 1.000000e+00 -c -c no. steps = 330 no. f-s = 405 no. j-s = 69 -c----------------------------------------------------------------------- -c full description of user interface to lsode. -c -c the user interface to lsode consists of the following parts. -c -c i. the call sequence to subroutine lsode, which is a driver -c routine for the solver. this includes descriptions of both -c the call sequence arguments and of user-supplied routines. -c following these descriptions is a description of -c optional inputs available through the call sequence, and then -c a description of optional outputs (in the work arrays). -c -c ii. descriptions of other routines in the lsode package that may be -c (optionally) called by the user. these provide the ability to -c alter error message handling, save and restore the internal -c common, and obtain specified derivatives of the solution y(t). -c -c iii. descriptions of common blocks to be declared in overlay -c or similar environments, or to be saved when doing an interrupt -c of the problem and continued solution later. -c -c iv. description of two routines in the lsode package, either of -c which the user may replace with his own version, if desired. -c these relate to the measurement of errors. -c -c----------------------------------------------------------------------- -c part i. call sequence. -c -c the call sequence parameters used for input only are -c f, neq, tout, itol, rtol, atol, itask, iopt, lrw, liw, jac, mf, -c and those used for both input and output are -c y, t, istate. -c the work arrays rwork and iwork are also used for conditional and -c optional inputs and optional outputs. (the term output here refers -c to the return from subroutine lsode to the user-s calling program.) -c -c the legality of input parameters will be thoroughly checked on the -c initial call for the problem, but not checked thereafter unless a -c change in input parameters is flagged by istate = 3 on input. -c -c the descriptions of the call arguments are as follows. -c -c f = the name of the user-supplied subroutine defining the -c ode system. the system must be put in the first-order -c form dy/dt = f(t,y), where f is a vector-valued function -c of the scalar t and the vector y. subroutine f is to -c compute the function f. it is to have the form -c subroutine f (neq, t, y, ydot) -c dimension y(1), ydot(1) -c where neq, t, and y are input, and the array ydot = f(t,y) -c is output. y and ydot are arrays of length neq. -c (in the dimension statement above, 1 is a dummy -c dimension.. it can be replaced by any value.) -c subroutine f should not alter y(1),...,y(neq). -c f must be declared external in the calling program. -c -c subroutine f may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in f) and/or y has length exceeding neq(1). -c see the descriptions of neq and y below. -c -c if quantities computed in the f routine are needed -c externally to lsode, an extra call to f should be made -c for this purpose, for consistent and accurate results. -c if only the derivative dy/dt is needed, use intdy instead. -c -c neq = the size of the ode system (number of first order -c ordinary differential equations). used only for input. -c neq may be decreased, but not increased, during the problem. -c if neq is decreased (with istate = 3 on input), the -c remaining components of y should be left undisturbed, if -c these are to be accessed in f and/or jac. -c -c normally, neq is a scalar, and it is generally referred to -c as a scalar in this user interface description. however, -c neq may be an array, with neq(1) set to the system size. -c (the lsode package accesses only neq(1).) in either case, -c this parameter is passed as the neq argument in all calls -c to f and jac. hence, if it is an array, locations -c neq(2),... may be used to store other integer data and pass -c it to f and/or jac. subroutines f and/or jac must include -c neq in a dimension statement in that case. -c -c y = a real array for the vector of dependent variables, of -c length neq or more. used for both input and output on the -c first call (istate = 1), and only for output on other calls. -c on the first call, y must contain the vector of initial -c values. on output, y contains the computed solution vector, -c evaluated at t. if desired, the y array may be used -c for other purposes between calls to the solver. -c -c this array is passed as the y argument in all calls to -c f and jac. hence its length may exceed neq, and locations -c y(neq+1),... may be used to store other real data and -c pass it to f and/or jac. (the lsode package accesses only -c y(1),...,y(neq).) -c -c t = the independent variable. on input, t is used only on the -c first call, as the initial point of the integration. -c on output, after each call, t is the value at which a -c computed solution y is evaluated (usually the same as tout). -c on an error return, t is the farthest point reached. -c -c tout = the next value of t at which a computed solution is desired. -c used only for input. -c -c when starting the problem (istate = 1), tout may be equal -c to t for one call, then should .ne. t for the next call. -c for the initial t, an input value of tout .ne. t is used -c in order to determine the direction of the integration -c (i.e. the algebraic sign of the step sizes) and the rough -c scale of the problem. integration in either direction -c (forward or backward in t) is permitted. -c -c if itask = 2 or 5 (one-step modes), tout is ignored after -c the first call (i.e. the first call with tout .ne. t). -c otherwise, tout is required on every call. -c -c if itask = 1, 3, or 4, the values of tout need not be -c monotone, but a value of tout which backs up is limited -c to the current internal t interval, whose endpoints are -c tcur - hu and tcur (see optional outputs, below, for -c tcur and hu). -c -c itol = an indicator for the type of error control. see -c description below under atol. used only for input. -c -c rtol = a relative error tolerance parameter, either a scalar or -c an array of length neq. see description below under atol. -c input only. -c -c atol = an absolute error tolerance parameter, either a scalar or -c an array of length neq. input only. -c -c the input parameters itol, rtol, and atol determine -c the error control performed by the solver. the solver will -c control the vector e = (e(i)) of estimated local errors -c in y, according to an inequality of the form -c rms-norm of ( e(i)/ewt(i) ) .le. 1, -c where ewt(i) = rtol(i)*abs(y(i)) + atol(i), -c and the rms-norm (root-mean-square norm) here is -c rms-norm(v) = sqrt(sum v(i)**2 / neq). here ewt = (ewt(i)) -c is a vector of weights which must always be positive, and -c the values of rtol and atol should all be non-negative. -c the following table gives the types (scalar/array) of -c rtol and atol, and the corresponding form of ewt(i). -c -c itol rtol atol ewt(i) -c 1 scalar scalar rtol*abs(y(i)) + atol -c 2 scalar array rtol*abs(y(i)) + atol(i) -c 3 array scalar rtol(i)*abs(y(i)) + atol -c 4 array array rtol(i)*abs(y(i)) + atol(i) -c -c when either of these parameters is a scalar, it need not -c be dimensioned in the user-s calling program. -c -c if none of the above choices (with itol, rtol, and atol -c fixed throughout the problem) is suitable, more general -c error controls can be obtained by substituting -c user-supplied routines for the setting of ewt and/or for -c the norm calculation. see part iv below. -c -c if global errors are to be estimated by making a repeated -c run on the same problem with smaller tolerances, then all -c components of rtol and atol (i.e. of ewt) should be scaled -c down uniformly. -c -c itask = an index specifying the task to be performed. -c input only. itask has the following values and meanings. -c 1 means normal computation of output values of y(t) at -c t = tout (by overshooting and interpolating). -c 2 means take one step only and return. -c 3 means stop at the first internal mesh point at or -c beyond t = tout and return. -c 4 means normal computation of output values of y(t) at -c t = tout but without overshooting t = tcrit. -c tcrit must be input as rwork(1). tcrit may be equal to -c or beyond tout, but not behind it in the direction of -c integration. this option is useful if the problem -c has a singularity at or beyond t = tcrit. -c 5 means take one step, without passing tcrit, and return. -c tcrit must be input as rwork(1). -c -c note.. if itask = 4 or 5 and the solver reaches tcrit -c (within roundoff), it will return t = tcrit (exactly) to -c indicate this (unless itask = 4 and tout comes before tcrit, -c in which case answers at t = tout are returned first). -c -c istate = an index used for input and output to specify the -c the state of the calculation. -c -c on input, the values of istate are as follows. -c 1 means this is the first call for the problem -c (initializations will be done). see note below. -c 2 means this is not the first call, and the calculation -c is to continue normally, with no change in any input -c parameters except possibly tout and itask. -c (if itol, rtol, and/or atol are changed between calls -c with istate = 2, the new values will be used but not -c tested for legality.) -c 3 means this is not the first call, and the -c calculation is to continue normally, but with -c a change in input parameters other than -c tout and itask. changes are allowed in -c neq, itol, rtol, atol, iopt, lrw, liw, mf, ml, mu, -c and any of the optional inputs except h0. -c (see iwork description for ml and mu.) -c note.. a preliminary call with tout = t is not counted -c as a first call here, as no initialization or checking of -c input is done. (such a call is sometimes useful for the -c purpose of outputting the initial conditions.) -c thus the first call for which tout .ne. t requires -c istate = 1 on input. -c -c on output, istate has the following values and meanings. -c 1 means nothing was done, as tout was equal to t with -c istate = 1 on input. (however, an internal counter was -c set to detect and prevent repeated calls of this type.) -c 2 means the integration was performed successfully. -c -1 means an excessive amount of work (more than mxstep -c steps) was done on this call, before completing the -c requested task, but the integration was otherwise -c successful as far as t. (mxstep is an optional input -c and is normally 500.) to continue, the user may -c simply reset istate to a value .gt. 1 and call again -c (the excess work step counter will be reset to 0). -c in addition, the user may increase mxstep to avoid -c this error return (see below on optional inputs). -c -2 means too much accuracy was requested for the precision -c of the machine being used. this was detected before -c completing the requested task, but the integration -c was successful as far as t. to continue, the tolerance -c parameters must be reset, and istate must be set -c to 3. the optional output tolsf may be used for this -c purpose. (note.. if this condition is detected before -c taking any steps, then an illegal input return -c (istate = -3) occurs instead.) -c -3 means illegal input was detected, before taking any -c integration steps. see written message for details. -c note.. if the solver detects an infinite loop of calls -c to the solver with illegal input, it will cause -c the run to stop. -c -4 means there were repeated error test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c the problem may have a singularity, or the input -c may be inappropriate. -c -5 means there were repeated convergence test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c this may be caused by an inaccurate jacobian matrix, -c if one is being used. -c -6 means ewt(i) became zero for some i during the -c integration. pure relative error control (atol(i)=0.0) -c was requested on a variable which has now vanished. -c the integration was successful as far as t. -c -c note.. since the normal output value of istate is 2, -c it does not need to be reset for normal continuation. -c also, since a negative input value of istate will be -c regarded as illegal, a negative output value requires the -c user to change it, and possibly other inputs, before -c calling the solver again. -c -c iopt = an integer flag to specify whether or not any optional -c inputs are being used on this call. input only. -c the optional inputs are listed separately below. -c iopt = 0 means no optional inputs are being used. -c default values will be used in all cases. -c iopt = 1 means one or more optional inputs are being used. -c -c rwork = a real working array (double precision). -c the length of rwork must be at least -c 20 + nyh*(maxord + 1) + 3*neq + lwm where -c nyh = the initial value of neq, -c maxord = 12 (if meth = 1) or 5 (if meth = 2) (unless a -c smaller value is given as an optional input), -c lwm = 0 if miter = 0, -c lwm = neq**2 + 2 if miter is 1 or 2, -c lwm = neq + 2 if miter = 3, and -c lwm = (2*ml+mu+1)*neq + 2 if miter is 4 or 5. -c (see the mf description for meth and miter.) -c thus if maxord has its default value and neq is constant, -c this length is.. -c 20 + 16*neq for mf = 10, -c 22 + 16*neq + neq**2 for mf = 11 or 12, -c 22 + 17*neq for mf = 13, -c 22 + 17*neq + (2*ml+mu)*neq for mf = 14 or 15, -c 20 + 9*neq for mf = 20, -c 22 + 9*neq + neq**2 for mf = 21 or 22, -c 22 + 10*neq for mf = 23, -c 22 + 10*neq + (2*ml+mu)*neq for mf = 24 or 25. -c the first 20 words of rwork are reserved for conditional -c and optional inputs and optional outputs. -c -c the following word in rwork is a conditional input.. -c rwork(1) = tcrit = critical value of t which the solver -c is not to overshoot. required if itask is -c 4 or 5, and ignored otherwise. (see itask.) -c -c lrw = the length of the array rwork, as declared by the user. -c (this will be checked by the solver.) -c -c iwork = an integer work array. the length of iwork must be at least -c 20 if miter = 0 or 3 (mf = 10, 13, 20, 23), or -c 20 + neq otherwise (mf = 11, 12, 14, 15, 21, 22, 24, 25). -c the first few words of iwork are used for conditional and -c optional inputs and optional outputs. -c -c the following 2 words in iwork are conditional inputs.. -c iwork(1) = ml these are the lower and upper -c iwork(2) = mu half-bandwidths, respectively, of the -c banded jacobian, excluding the main diagonal. -c the band is defined by the matrix locations -c (i,j) with i-ml .le. j .le. i+mu. ml and mu -c must satisfy 0 .le. ml,mu .le. neq-1. -c these are required if miter is 4 or 5, and -c ignored otherwise. ml and mu may in fact be -c the band parameters for a matrix to which -c df/dy is only approximately equal. -c -c liw = the length of the array iwork, as declared by the user. -c (this will be checked by the solver.) -c -c note.. the work arrays must not be altered between calls to lsode -c for the same problem, except possibly for the conditional and -c optional inputs, and except for the last 3*neq words of rwork. -c the latter space is used for internal scratch space, and so is -c available for use by the user outside lsode between calls, if -c desired (but not for use by f or jac). -c -c jac = the name of the user-supplied routine (miter = 1 or 4) to -c compute the jacobian matrix, df/dy, as a function of -c the scalar t and the vector y. it is to have the form -c subroutine jac (neq, t, y, ml, mu, pd, nrowpd) -c dimension y(1), pd(nrowpd,1) -c where neq, t, y, ml, mu, and nrowpd are input and the array -c pd is to be loaded with partial derivatives (elements of -c the jacobian matrix) on output. pd must be given a first -c dimension of nrowpd. t and y have the same meaning as in -c subroutine f. (in the dimension statement above, 1 is a -c dummy dimension.. it can be replaced by any value.) -c in the full matrix case (miter = 1), ml and mu are -c ignored, and the jacobian is to be loaded into pd in -c columnwise manner, with df(i)/dy(j) loaded into pd(i,j). -c in the band matrix case (miter = 4), the elements -c within the band are to be loaded into pd in columnwise -c manner, with diagonal lines of df/dy loaded into the rows -c of pd. thus df(i)/dy(j) is to be loaded into pd(i-j+mu+1,j). -c ml and mu are the half-bandwidth parameters (see iwork). -c the locations in pd in the two triangular areas which -c correspond to nonexistent matrix elements can be ignored -c or loaded arbitrarily, as they are overwritten by lsode. -c jac need not provide df/dy exactly. a crude -c approximation (possibly with a smaller bandwidth) will do. -c in either case, pd is preset to zero by the solver, -c so that only the nonzero elements need be loaded by jac. -c each call to jac is preceded by a call to f with the same -c arguments neq, t, and y. thus to gain some efficiency, -c intermediate quantities shared by both calculations may be -c saved in a user common block by f and not recomputed by jac, -c if desired. also, jac may alter the y array, if desired. -c jac must be declared external in the calling program. -c subroutine jac may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in jac) and/or y has length exceeding neq(1). -c see the descriptions of neq and y above. -c -c mf = the method flag. used only for input. the legal values of -c mf are 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, and 25. -c mf has decimal digits meth and miter.. mf = 10*meth + miter. -c meth indicates the basic linear multistep method.. -c meth = 1 means the implicit adams method. -c meth = 2 means the method based on backward -c differentiation formulas (bdf-s). -c miter indicates the corrector iteration method.. -c miter = 0 means functional iteration (no jacobian matrix -c is involved). -c miter = 1 means chord iteration with a user-supplied -c full (neq by neq) jacobian. -c miter = 2 means chord iteration with an internally -c generated (difference quotient) full jacobian -c (using neq extra calls to f per df/dy value). -c miter = 3 means chord iteration with an internally -c generated diagonal jacobian approximation. -c (using 1 extra call to f per df/dy evaluation). -c miter = 4 means chord iteration with a user-supplied -c banded jacobian. -c miter = 5 means chord iteration with an internally -c generated banded jacobian (using ml+mu+1 extra -c calls to f per df/dy evaluation). -c if miter = 1 or 4, the user must supply a subroutine jac -c (the name is arbitrary) as described above under jac. -c for other values of miter, a dummy argument can be used. -c----------------------------------------------------------------------- -c optional inputs. -c -c the following is a list of the optional inputs provided for in the -c call sequence. (see also part ii.) for each such input variable, -c this table lists its name as used in this documentation, its -c location in the call sequence, its meaning, and the default value. -c the use of any of these inputs requires iopt = 1, and in that -c case all of these inputs are examined. a value of zero for any -c of these optional inputs will cause the default value to be used. -c thus to use a subset of the optional inputs, simply preload -c locations 5 to 10 in rwork and iwork to 0.0 and 0 respectively, and -c then set those of interest to nonzero values. -c -c name location meaning and default value -c -c h0 rwork(5) the step size to be attempted on the first step. -c the default value is determined by the solver. -c -c hmax rwork(6) the maximum absolute step size allowed. -c the default value is infinite. -c -c hmin rwork(7) the minimum absolute step size allowed. -c the default value is 0. (this lower bound is not -c enforced on the final step before reaching tcrit -c when itask = 4 or 5.) -c -c maxord iwork(5) the maximum order to be allowed. the default -c value is 12 if meth = 1, and 5 if meth = 2. -c if maxord exceeds the default value, it will -c be reduced to the default value. -c if maxord is changed during the problem, it may -c cause the current order to be reduced. -c -c mxstep iwork(6) maximum number of (internally defined) steps -c allowed during one call to the solver. -c the default value is 500. -c -c mxhnil iwork(7) maximum number of messages printed (per problem) -c warning that t + h = t on a step (h = step size). -c this must be positive to result in a non-default -c value. the default value is 10. -c----------------------------------------------------------------------- -c optional outputs. -c -c as optional additional output from lsode, the variables listed -c below are quantities related to the performance of lsode -c which are available to the user. these are communicated by way of -c the work arrays, but also have internal mnemonic names as shown. -c except where stated otherwise, all of these outputs are defined -c on any successful return from lsode, and on any return with -c istate = -1, -2, -4, -5, or -6. on an illegal input return -c (istate = -3), they will be unchanged from their existing values -c (if any), except possibly for tolsf, lenrw, and leniw. -c on any error return, outputs relevant to the error will be defined, -c as noted below. -c -c name location meaning -c -c hu rwork(11) the step size in t last used (successfully). -c -c hcur rwork(12) the step size to be attempted on the next step. -c -c tcur rwork(13) the current value of the independent variable -c which the solver has actually reached, i.e. the -c current internal mesh point in t. on output, tcur -c will always be at least as far as the argument -c t, but may be farther (if interpolation was done). -c -c tolsf rwork(14) a tolerance scale factor, greater than 1.0, -c computed when a request for too much accuracy was -c detected (istate = -3 if detected at the start of -c the problem, istate = -2 otherwise). if itol is -c left unaltered but rtol and atol are uniformly -c scaled up by a factor of tolsf for the next call, -c then the solver is deemed likely to succeed. -c (the user may also ignore tolsf and alter the -c tolerance parameters in any other way appropriate.) -c -c nst iwork(11) the number of steps taken for the problem so far. -c -c nfe iwork(12) the number of f evaluations for the problem so far. -c -c nje iwork(13) the number of jacobian evaluations (and of matrix -c lu decompositions) for the problem so far. -c -c nqu iwork(14) the method order last used (successfully). -c -c nqcur iwork(15) the order to be attempted on the next step. -c -c imxer iwork(16) the index of the component of largest magnitude in -c the weighted local error vector ( e(i)/ewt(i) ), -c on an error return with istate = -4 or -5. -c -c lenrw iwork(17) the length of rwork actually required. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c leniw iwork(18) the length of iwork actually required. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c the following two arrays are segments of the rwork array which -c may also be of interest to the user as optional outputs. -c for each array, the table below gives its internal name, -c its base address in rwork, and its description. -c -c name base address description -c -c yh 21 the nordsieck history array, of size nyh by -c (nqcur + 1), where nyh is the initial value -c of neq. for j = 0,1,...,nqcur, column j+1 -c of yh contains hcur**j/factorial(j) times -c the j-th derivative of the interpolating -c polynomial currently representing the solution, -c evaluated at t = tcur. -c -c acor lenrw-neq+1 array of size neq used for the accumulated -c corrections on each step, scaled on output -c to represent the estimated local error in y -c on the last step. this is the vector e in -c the description of the error control. it is -c defined only on a successful return from lsode. -c -c----------------------------------------------------------------------- -c part ii. other routines callable. -c -c the following are optional calls which the user may make to -c gain additional capabilities in conjunction with lsode. -c (the routines xsetun and xsetf are designed to conform to the -c slatec error handling package.) -c -c form of call function -c call xsetun(lun) set the logical unit number, lun, for -c output of messages from lsode, if -c the default is not desired. -c the default value of lun is 6. -c -c call xsetf(mflag) set a flag to control the printing of -c messages by lsode. -c mflag = 0 means do not print. (danger.. -c this risks losing valuable information.) -c mflag = 1 means print (the default). -c -c either of the above calls may be made at -c any time and will take effect immediately. -c -c call srcom(rsav,isav,job) saves and restores the contents of -c the internal common blocks used by -c lsode (see part iii below). -c rsav must be a real array of length 218 -c or more, and isav must be an integer -c array of length 41 or more. -c job=1 means save common into rsav/isav. -c job=2 means restore common from rsav/isav. -c srcom is useful if one is -c interrupting a run and restarting -c later, or alternating between two or -c more problems solved with lsode. -c -c call intdy(,,,,,) provide derivatives of y, of various -c (see below) orders, at a specified point t, if -c desired. it may be called only after -c a successful return from lsode. -c -c the detailed instructions for using intdy are as follows. -c the form of the call is.. -c -c call intdy (t, k, rwork(21), nyh, dky, iflag) -c -c the input parameters are.. -c -c t = value of independent variable where answers are desired -c (normally the same as the t last returned by lsode). -c for valid results, t must lie between tcur - hu and tcur. -c (see optional outputs for tcur and hu.) -c k = integer order of the derivative desired. k must satisfy -c 0 .le. k .le. nqcur, where nqcur is the current order -c (see optional outputs). the capability corresponding -c to k = 0, i.e. computing y(t), is already provided -c by lsode directly. since nqcur .ge. 1, the first -c derivative dy/dt is always available with intdy. -c rwork(21) = the base address of the history array yh. -c nyh = column length of yh, equal to the initial value of neq. -c -c the output parameters are.. -c -c dky = a real array of length neq containing the computed value -c of the k-th derivative of y(t). -c iflag = integer flag, returned as 0 if k and t were legal, -c -1 if k was illegal, and -2 if t was illegal. -c on an error return, a message is also written. -c----------------------------------------------------------------------- -c part iii. common blocks. -c -c if lsode is to be used in an overlay situation, the user -c must declare, in the primary overlay, the variables in.. -c (1) the call sequence to lsode, -c (2) the two internal common blocks -c /ls0001/ of length 257 (218 double precision words -c followed by 39 integer words), -c /eh0001/ of length 2 (integer words). -c -c if lsode is used on a system in which the contents of internal -c common blocks are not preserved between calls, the user should -c declare the above two common blocks in his main program to insure -c that their contents are preserved. -c -c if the solution of a given problem by lsode is to be interrupted -c and then later continued, such as when restarting an interrupted run -c or alternating between two or more problems, the user should save, -c following the return from the last lsode call prior to the -c interruption, the contents of the call sequence variables and the -c internal common blocks, and later restore these values before the -c next lsode call for that problem. to save and restore the common -c blocks, use subroutine srcom (see part ii above). -c -c----------------------------------------------------------------------- -c part iv. optionally replaceable solver routines. -c -c below are descriptions of two routines in the lsode package which -c relate to the measurement of errors. either routine can be -c replaced by a user-supplied version, if desired. however, since such -c a replacement may have a major impact on performance, it should be -c done only when absolutely necessary, and only with great caution. -c (note.. the means by which the package version of a routine is -c superseded by the user-s version may be system-dependent.) -c -c (a) ewset. -c the following subroutine is called just before each internal -c integration step, and sets the array of error weights, ewt, as -c described under itol/rtol/atol above.. -c subroutine ewset (neq, itol, rtol, atol, ycur, ewt) -c where neq, itol, rtol, and atol are as in the lsode call sequence, -c ycur contains the current dependent variable vector, and -c ewt is the array of weights set by ewset. -c -c if the user supplies this subroutine, it must return in ewt(i) -c (i = 1,...,neq) a positive quantity suitable for comparing errors -c in y(i) to. the ewt array returned by ewset is passed to the -c vnorm routine (see below), and also used by lsode in the computation -c of the optional output imxer, the diagonal jacobian approximation, -c and the increments for difference quotient jacobians. -c -c in the user-supplied version of ewset, it may be desirable to use -c the current values of derivatives of y. derivatives up to order nq -c are available from the history array yh, described above under -c optional outputs. in ewset, yh is identical to the ycur array, -c extended to nq + 1 columns with a column length of nyh and scale -c factors of h**j/factorial(j). on the first call for the problem, -c given by nst = 0, nq is 1 and h is temporarily set to 1.0. -c the quantities nq, nyh, h, and nst can be obtained by including -c in ewset the statements.. -c double precision h, rls -c common /ls0001/ rls(218),ils(39) -c nq = ils(35) -c nyh = ils(14) -c nst = ils(36) -c h = rls(212) -c thus, for example, the current value of dy/dt can be obtained as -c ycur(nyh+i)/h (i=1,...,neq) (and the division by h is -c unnecessary when nst = 0). -c -c (b) vnorm. -c the following is a real function routine which computes the weighted -c root-mean-square norm of a vector v.. -c d = vnorm (n, v, w) -c where.. -c n = the length of the vector, -c v = real array of length n containing the vector, -c w = real array of length n containing weights, -c d = sqrt( (1/n) * sum(v(i)*w(i))**2 ). -c vnorm is called with n = neq and with w(i) = 1.0/ewt(i), where -c ewt is as set by subroutine ewset. -c -c if the user supplies this function, it should return a non-negative -c value of vnorm suitable for use in the error control in lsode. -c none of the arguments should be altered by vnorm. -c for example, a user-supplied vnorm routine might.. -c -substitute a max-norm of (v(i)*w(i)) for the rms-norm, or -c -ignore some components of v in the norm, with the effect of -c suppressing the error control on those components of y. -c----------------------------------------------------------------------- -c----------------------------------------------------------------------- -c other routines in the lsode package. -c -c in addition to subroutine lsode, the lsode package includes the -c following subroutines and function routines.. -c intdy computes an interpolated value of the y vector at t = tout. -c stode is the core integrator, which does one step of the -c integration and the associated error control. -c cfode sets all method coefficients and test constants. -c prepj computes and preprocesses the jacobian matrix j = df/dy -c and the newton iteration matrix p = i - h*l0*j. -c solsy manages solution of linear system in chord iteration. -c ewset sets the error weight vector ewt before each step. -c vnorm computes the weighted r.m.s. norm of a vector. -c srcom is a user-callable routine to save and restore -c the contents of the internal common blocks. -c dgefa and dgesl are routines from linpack for solving full -c systems of linear algebraic equations. -c dgbfa and dgbsl are routines from linpack for solving banded -c linear systems. -c daxpy, dscal, idamax, and ddot are basic linear algebra modules -c (blas) used by the above linpack routines. -c d1mach computes the unit roundoff in a machine-independent manner. -c xerrwv, xsetun, and xsetf handle the printing of all error -c messages and warnings. xerrwv is machine-dependent. -c note.. vnorm, idamax, ddot, and d1mach are function routines. -c all the others are subroutines. -c -c the intrinsic and external routines used by lsode are.. -c dabs, dmax1, dmin1, dfloat, max0, min0, mod, dsign, dsqrt, and write. -c -c a block data subprogram is also included with the package, -c for loading some of the variables in internal common. -c -c----------------------------------------------------------------------- -c the following card is for optimized compilation on llnl compilers. -clll. optimize -c----------------------------------------------------------------------- - external prepj, solsy - integer illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 1 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns - integer icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 1 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer i, i1, i2, iflag, imxer, kgo, lf0, - 1 leniw, lenrw, lenwm, ml, mord, mu, mxhnl0, mxstp0 - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision atoli, ayi, big, ewti, h0, hmax, hmx, rh, rtoli, - 1 tcrit, tdist, tnext, tol, tolsf, tp, size, sum, w0, - 2 d1mach, vnorm - dimension mord(2) - logical ihit -c----------------------------------------------------------------------- -c the following internal common block contains -c (a) variables which are local to any subroutine but whose values must -c be preserved between calls to the routine (own variables), and -c (b) variables which are communicated between subroutines. -c the structure of the block is as follows.. all real variables are -c listed first, followed by all integers. within each type, the -c variables are grouped with those local to subroutine lsode first, -c then those local to subroutine stode, and finally those used -c for communication. the block is declared in subroutines -c lsode, intdy, stode, prepj, and solsy. groups of variables are -c replaced by dummy arrays in the common declarations in routines -c where those variables are not used. -c----------------------------------------------------------------------- - common /ls0001/ rowns(209), - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 2 illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 3 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu -c - data mord(1),mord(2)/12,5/, mxstp0/500/, mxhnl0/10/ -c----------------------------------------------------------------------- -c block a. -c this code block is executed on every call. -c it tests istate and itask for legality and branches appropriately. -c if istate .gt. 1 but the flag init shows that initialization has -c not yet been done, an error return occurs. -c if istate = 1 and tout = t, jump to block g and return immediately. -c----------------------------------------------------------------------- - if (istate .lt. 1 .or. istate .gt. 3) go to 601 - if (itask .lt. 1 .or. itask .gt. 5) go to 602 - if (istate .eq. 1) go to 10 - if (init .eq. 0) go to 603 - if (istate .eq. 2) go to 200 - go to 20 - 10 init = 0 - if (tout .eq. t) go to 430 - 20 ntrep = 0 -c----------------------------------------------------------------------- -c block b. -c the next code block is executed for the initial call (istate = 1), -c or for a continuation call with parameter changes (istate = 3). -c it contains checking of all inputs and various initializations. -c -c first check legality of the non-optional inputs neq, itol, iopt, -c mf, ml, and mu. -c----------------------------------------------------------------------- - if (neq(1) .le. 0) go to 604 - if (istate .eq. 1) go to 25 - if (neq(1) .gt. n) go to 605 - 25 n = neq(1) - if (itol .lt. 1 .or. itol .gt. 4) go to 606 - if (iopt .lt. 0 .or. iopt .gt. 1) go to 607 - meth = mf/10 - miter = mf - 10*meth - if (meth .lt. 1 .or. meth .gt. 2) go to 608 - if (miter .lt. 0 .or. miter .gt. 5) go to 608 - if (miter .le. 3) go to 30 - ml = iwork(1) - mu = iwork(2) - if (ml .lt. 0 .or. ml .ge. n) go to 609 - if (mu .lt. 0 .or. mu .ge. n) go to 610 - 30 continue -c next process and check the optional inputs. -------------------------- - if (iopt .eq. 1) go to 40 - maxord = mord(meth) - mxstep = mxstp0 - mxhnil = mxhnl0 - if (istate .eq. 1) h0 = 0.0d0 - hmxi = 0.0d0 - hmin = 0.0d0 - go to 60 - 40 maxord = iwork(5) - if (maxord .lt. 0) go to 611 - if (maxord .eq. 0) maxord = 100 - maxord = min0(maxord,mord(meth)) - mxstep = iwork(6) - if (mxstep .lt. 0) go to 612 - if (mxstep .eq. 0) mxstep = mxstp0 - mxhnil = iwork(7) - if (mxhnil .lt. 0) go to 613 - if (mxhnil .eq. 0) mxhnil = mxhnl0 - if (istate .ne. 1) go to 50 - h0 = rwork(5) - if ((tout - t)*h0 .lt. 0.0d0) go to 614 - 50 hmax = rwork(6) - if (hmax .lt. 0.0d0) go to 615 - hmxi = 0.0d0 - if (hmax .gt. 0.0d0) hmxi = 1.0d0/hmax - hmin = rwork(7) - if (hmin .lt. 0.0d0) go to 616 -c----------------------------------------------------------------------- -c set work array pointers and check lengths lrw and liw. -c pointers to segments of rwork and iwork are named by prefixing l to -c the name of the segment. e.g., the segment yh starts at rwork(lyh). -c segments of rwork (in order) are denoted yh, wm, ewt, savf, acor. -c----------------------------------------------------------------------- - 60 lyh = 21 - if (istate .eq. 1) nyh = n - lwm = lyh + (maxord + 1)*nyh - if (miter .eq. 0) lenwm = 0 - if (miter .eq. 1 .or. miter .eq. 2) lenwm = n*n + 2 - if (miter .eq. 3) lenwm = n + 2 - if (miter .ge. 4) lenwm = (2*ml + mu + 1)*n + 2 - lewt = lwm + lenwm - lsavf = lewt + n - lacor = lsavf + n - lenrw = lacor + n - 1 - iwork(17) = lenrw - liwm = 1 - leniw = 20 + n - if (miter .eq. 0 .or. miter .eq. 3) leniw = 20 - iwork(18) = leniw - if (lenrw .gt. lrw) go to 617 - if (leniw .gt. liw) go to 618 -c check rtol and atol for legality. ------------------------------------ - rtoli = rtol(1) - atoli = atol(1) - do 70 i = 1,n - if (itol .ge. 3) rtoli = rtol(i) - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - if (rtoli .lt. 0.0d0) go to 619 - if (atoli .lt. 0.0d0) go to 620 - 70 continue - if (istate .eq. 1) go to 100 -c if istate = 3, set flag to signal parameter changes to stode. -------- - jstart = -1 - if (nq .le. maxord) go to 90 -c maxord was reduced below nq. copy yh(*,maxord+2) into savf. --------- - do 80 i = 1,n - 80 rwork(i+lsavf-1) = rwork(i+lwm-1) -c reload wm(1) = rwork(lwm), since lwm may have changed. --------------- - 90 if (miter .gt. 0) rwork(lwm) = dsqrt(uround) - if (n .eq. nyh) go to 200 -c neq was reduced. zero part of yh to avoid undefined references. ----- - i1 = lyh + l*nyh - i2 = lyh + (maxord + 1)*nyh - 1 - if (i1 .gt. i2) go to 200 - do 95 i = i1,i2 - 95 rwork(i) = 0.0d0 - go to 200 -c----------------------------------------------------------------------- -c block c. -c the next block is for the initial call only (istate = 1). -c it contains all remaining initializations, the initial call to f, -c and the calculation of the initial step size. -c the error weights in ewt are inverted after being loaded. -c----------------------------------------------------------------------- - 100 uround = d1mach(4) - tn = t - if (itask .ne. 4 .and. itask .ne. 5) go to 110 - tcrit = rwork(1) - if ((tcrit - tout)*(tout - t) .lt. 0.0d0) go to 625 - if (h0 .ne. 0.0d0 .and. (t + h0 - tcrit)*h0 .gt. 0.0d0) - 1 h0 = tcrit - t - 110 jstart = 0 - if (miter .gt. 0) rwork(lwm) = dsqrt(uround) - nhnil = 0 - nst = 0 - nje = 0 - nslast = 0 - hu = 0.0d0 - nqu = 0 - ccmax = 0.3d0 - maxcor = 3 - msbp = 20 - mxncf = 10 -c initial call to f. (lf0 points to yh(*,2).) ------------------------- - lf0 = lyh + nyh - call f (neq, t, y, rwork(lf0)) - nfe = 1 -c load the initial value vector in yh. --------------------------------- - do 115 i = 1,n - 115 rwork(i+lyh-1) = y(i) -c load and invert the ewt array. (h is temporarily set to 1.0.) ------- - nq = 1 - h = 1.0d0 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 120 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 621 - 120 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) -c----------------------------------------------------------------------- -c the coding below computes the step size, h0, to be attempted on the -c first step, unless the user has supplied a value for this. -c first check that tout - t differs significantly from zero. -c a scalar tolerance quantity tol is computed, as max(rtol(i)) -c if this is positive, or max(atol(i)/abs(y(i))) otherwise, adjusted -c so as to be between 100*uround and 1.0e-3. -c then the computed value h0 is given by.. -c neq -c h0**2 = tol / ( w0**-2 + (1/neq) * sum ( f(i)/ywt(i) )**2 ) -c 1 -c where w0 = max ( abs(t), abs(tout) ), -c f(i) = i-th component of initial value of f, -c ywt(i) = ewt(i)/tol (a weight for y(i)). -c the sign of h0 is inferred from the initial values of tout and t. -c----------------------------------------------------------------------- - if (h0 .ne. 0.0d0) go to 180 - tdist = dabs(tout - t) - w0 = dmax1(dabs(t),dabs(tout)) - if (tdist .lt. 2.0d0*uround*w0) go to 622 - tol = rtol(1) - if (itol .le. 2) go to 140 - do 130 i = 1,n - 130 tol = dmax1(tol,rtol(i)) - 140 if (tol .gt. 0.0d0) go to 160 - atoli = atol(1) - do 150 i = 1,n - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - ayi = dabs(y(i)) - if (ayi .ne. 0.0d0) tol = dmax1(tol,atoli/ayi) - 150 continue - 160 tol = dmax1(tol,100.0d0*uround) - tol = dmin1(tol,0.001d0) - sum = vnorm (n, rwork(lf0), rwork(lewt)) - sum = 1.0d0/(tol*w0*w0) + tol*sum**2 - h0 = 1.0d0/dsqrt(sum) - h0 = dmin1(h0,tdist) - h0 = dsign(h0,tout-t) -c adjust h0 if necessary to meet hmax bound. --------------------------- - 180 rh = dabs(h0)*hmxi - if (rh .gt. 1.0d0) h0 = h0/rh -c load h with h0 and scale yh(*,2) by h0. ------------------------------ - h = h0 - do 190 i = 1,n - 190 rwork(i+lf0-1) = h0*rwork(i+lf0-1) - go to 270 -c----------------------------------------------------------------------- -c block d. -c the next code block is for continuation calls only (istate = 2 or 3) -c and is to check stop conditions before taking a step. -c----------------------------------------------------------------------- - 200 nslast = nst - go to (210, 250, 220, 230, 240), itask - 210 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 220 tp = tn - hu*(1.0d0 + 100.0d0*uround) - if ((tp - tout)*h .gt. 0.0d0) go to 623 - if ((tn - tout)*h .lt. 0.0d0) go to 250 - go to 400 - 230 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - if ((tcrit - tout)*h .lt. 0.0d0) go to 625 - if ((tn - tout)*h .lt. 0.0d0) go to 245 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 240 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - 245 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - if (istate .eq. 2) jstart = -2 -c----------------------------------------------------------------------- -c block e. -c the next block is normally executed for all calls and contains -c the call to the one-step core integrator stode. -c -c this is a looping point for the integration steps. -c -c first check for too many steps being taken, update ewt (if not at -c start of problem), check for too much accuracy being requested, and -c check for h below the roundoff level in t. -c----------------------------------------------------------------------- - 250 continue - if ((nst-nslast) .ge. mxstep) go to 500 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 260 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 510 - 260 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) - 270 tolsf = uround*vnorm (n, rwork(lyh), rwork(lewt)) - if (tolsf .le. 1.0d0) go to 280 - tolsf = tolsf*2.0d0 - if (nst .eq. 0) go to 626 - go to 520 - 280 if ((tn + h) .ne. tn) go to 290 - nhnil = nhnil + 1 - if (nhnil .gt. mxhnil) go to 290 - call xerrwv('lsode-- warning..internal t (=r1) and h (=r2) are', - 1 50, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' such that in the machine, t + h = t on the next step ', - 1 60, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' (h = step size). solver will continue anyway', - 1 50, 101, 0, 0, 0, 0, 2, tn, h) - if (nhnil .lt. mxhnil) go to 290 - call xerrwv('lsode-- above warning has been issued i1 times. ', - 1 50, 102, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' it will not be issued again for this problem', - 1 50, 102, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - 290 continue -c----------------------------------------------------------------------- -c call stode(neq,y,yh,nyh,yh,ewt,savf,acor,wm,iwm,f,jac,prepj,solsy) -c----------------------------------------------------------------------- - call stode (neq, y, rwork(lyh), nyh, rwork(lyh), rwork(lewt), - 1 rwork(lsavf), rwork(lacor), rwork(lwm), iwork(liwm), - 2 f, jac, prepj, solsy) - kgo = 1 - kflag - go to (300, 530, 540), kgo -c----------------------------------------------------------------------- -c block f. -c the following block handles the case of a successful return from the -c core integrator (kflag = 0). test for stop conditions. -c----------------------------------------------------------------------- - 300 init = 1 - go to (310, 400, 330, 340, 350), itask -c itask = 1. if tout has been reached, interpolate. ------------------- - 310 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 -c itask = 3. jump to exit if tout was reached. ------------------------ - 330 if ((tn - tout)*h .ge. 0.0d0) go to 400 - go to 250 -c itask = 4. see if tout or tcrit was reached. adjust h if necessary. - 340 if ((tn - tout)*h .lt. 0.0d0) go to 345 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 - 345 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - jstart = -2 - go to 250 -c itask = 5. see if tcrit was reached and jump to exit. --------------- - 350 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx -c----------------------------------------------------------------------- -c block g. -c the following block handles all successful returns from lsode. -c if itask .ne. 1, y is loaded from yh and t is set accordingly. -c istate is set to 2, the illegal input counter is zeroed, and the -c optional outputs are loaded into the work arrays before returning. -c if istate = 1 and tout = t, there is a return with no action taken, -c except that if this has happened repeatedly, the run is terminated. -c----------------------------------------------------------------------- - 400 do 410 i = 1,n - 410 y(i) = rwork(i+lyh-1) - t = tn - if (itask .ne. 4 .and. itask .ne. 5) go to 420 - if (ihit) t = tcrit - 420 istate = 2 - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - iwork(11) = nst - iwork(12) = nfe - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - return -c - 430 ntrep = ntrep + 1 - if (ntrep .lt. 5) return - call xerrwv( - 1 'lsode-- repeated calls with istate = 1 and tout = t (=r1) ', - 1 60, 301, 0, 0, 0, 0, 1, t, 0.0d0) - go to 800 -c----------------------------------------------------------------------- -c block h. -c the following block handles all unsuccessful returns other than -c those for illegal input. first the error message routine is called. -c if there was an error test or convergence test failure, imxer is set. -c then y is loaded from yh, t is set to tn, and the illegal input -c counter illin is set to 0. the optional outputs are loaded into -c the work arrays before returning. -c----------------------------------------------------------------------- -c the maximum number of steps was taken before reaching tout. ---------- - 500 call xerrwv('lsode-- at current t (=r1), mxstep (=i1) steps ', - 1 50, 201, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' taken on this call before reaching tout ', - 1 50, 201, 0, 1, mxstep, 0, 1, tn, 0.0d0) - istate = -1 - go to 580 -c ewt(i) .le. 0.0 for some i (not at start of problem). ---------------- - 510 ewti = rwork(lewt+i-1) - call xerrwv('lsode-- at t (=r1), ewt(i1) has become r2 .le. 0.', - 1 50, 202, 0, 1, i, 0, 2, tn, ewti) - istate = -6 - go to 580 -c too much accuracy requested for machine precision. ------------------- - 520 call xerrwv('lsode-- at t (=r1), too much accuracy requested ', - 1 50, 203, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' for precision of machine.. see tolsf (=r2) ', - 1 50, 203, 0, 0, 0, 0, 2, tn, tolsf) - rwork(14) = tolsf - istate = -2 - go to 580 -c kflag = -1. error test failed repeatedly or with abs(h) = hmin. ----- - 530 call xerrwv('lsode-- at t(=r1) and step size h(=r2), the error', - 1 50, 204, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' test failed repeatedly or with abs(h) = hmin', - 1 50, 204, 0, 0, 0, 0, 2, tn, h) - istate = -4 - go to 560 -c kflag = -2. convergence failed repeatedly or with abs(h) = hmin. ---- - 540 call xerrwv('lsode-- at t (=r1) and step size h (=r2), the ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' corrector convergence failed repeatedly ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' or with abs(h) = hmin ', - 1 30, 205, 0, 0, 0, 0, 2, tn, h) - istate = -5 -c compute imxer if relevant. ------------------------------------------- - 560 big = 0.0d0 - imxer = 1 - do 570 i = 1,n - size = dabs(rwork(i+lacor-1)*rwork(i+lewt-1)) - if (big .ge. size) go to 570 - big = size - imxer = i - 570 continue - iwork(16) = imxer -c set y vector, t, illin, and optional outputs. ------------------------ - 580 do 590 i = 1,n - 590 y(i) = rwork(i+lyh-1) - t = tn - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - iwork(11) = nst - iwork(12) = nfe - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - return -c----------------------------------------------------------------------- -c block i. -c the following block handles all error returns due to illegal input -c (istate = -3), as detected before calling the core integrator. -c first the error message routine is called. then if there have been -c 5 consecutive such returns just before this call to the solver, -c the run is halted. -c----------------------------------------------------------------------- - 601 call xerrwv('lsode-- istate (=i1) illegal ', - 1 30, 1, 0, 1, istate, 0, 0, 0.0d0, 0.0d0) - go to 700 - 602 call xerrwv('lsode-- itask (=i1) illegal ', - 1 30, 2, 0, 1, itask, 0, 0, 0.0d0, 0.0d0) - go to 700 - 603 call xerrwv('lsode-- istate .gt. 1 but lsode not initialized ', - 1 50, 3, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - go to 700 - 604 call xerrwv('lsode-- neq (=i1) .lt. 1 ', - 1 30, 4, 0, 1, neq(1), 0, 0, 0.0d0, 0.0d0) - go to 700 - 605 call xerrwv('lsode-- istate = 3 and neq increased (i1 to i2) ', - 1 50, 5, 0, 2, n, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 606 call xerrwv('lsode-- itol (=i1) illegal ', - 1 30, 6, 0, 1, itol, 0, 0, 0.0d0, 0.0d0) - go to 700 - 607 call xerrwv('lsode-- iopt (=i1) illegal ', - 1 30, 7, 0, 1, iopt, 0, 0, 0.0d0, 0.0d0) - go to 700 - 608 call xerrwv('lsode-- mf (=i1) illegal ', - 1 30, 8, 0, 1, mf, 0, 0, 0.0d0, 0.0d0) - go to 700 - 609 call xerrwv('lsode-- ml (=i1) illegal.. .lt.0 or .ge.neq (=i2)', - 1 50, 9, 0, 2, ml, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 610 call xerrwv('lsode-- mu (=i1) illegal.. .lt.0 or .ge.neq (=i2)', - 1 50, 10, 0, 2, mu, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 611 call xerrwv('lsode-- maxord (=i1) .lt. 0 ', - 1 30, 11, 0, 1, maxord, 0, 0, 0.0d0, 0.0d0) - go to 700 - 612 call xerrwv('lsode-- mxstep (=i1) .lt. 0 ', - 1 30, 12, 0, 1, mxstep, 0, 0, 0.0d0, 0.0d0) - go to 700 - 613 call xerrwv('lsode-- mxhnil (=i1) .lt. 0 ', - 1 30, 13, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - go to 700 - 614 call xerrwv('lsode-- tout (=r1) behind t (=r2) ', - 1 40, 14, 0, 0, 0, 0, 2, tout, t) - call xerrwv(' integration direction is given by h0 (=r1) ', - 1 50, 14, 0, 0, 0, 0, 1, h0, 0.0d0) - go to 700 - 615 call xerrwv('lsode-- hmax (=r1) .lt. 0.0 ', - 1 30, 15, 0, 0, 0, 0, 1, hmax, 0.0d0) - go to 700 - 616 call xerrwv('lsode-- hmin (=r1) .lt. 0.0 ', - 1 30, 16, 0, 0, 0, 0, 1, hmin, 0.0d0) - go to 700 - 617 call xerrwv( - 1 'lsode-- rwork length needed, lenrw (=i1), exceeds lrw (=i2)', - 1 60, 17, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 618 call xerrwv( - 1 'lsode-- iwork length needed, leniw (=i1), exceeds liw (=i2)', - 1 60, 18, 0, 2, leniw, liw, 0, 0.0d0, 0.0d0) - go to 700 - 619 call xerrwv('lsode-- rtol(i1) is r1 .lt. 0.0 ', - 1 40, 19, 0, 1, i, 0, 1, rtoli, 0.0d0) - go to 700 - 620 call xerrwv('lsode-- atol(i1) is r1 .lt. 0.0 ', - 1 40, 20, 0, 1, i, 0, 1, atoli, 0.0d0) - go to 700 - 621 ewti = rwork(lewt+i-1) - call xerrwv('lsode-- ewt(i1) is r1 .le. 0.0 ', - 1 40, 21, 0, 1, i, 0, 1, ewti, 0.0d0) - go to 700 - 622 call xerrwv( - 1 'lsode-- tout (=r1) too close to t(=r2) to start integration', - 1 60, 22, 0, 0, 0, 0, 2, tout, t) - go to 700 - 623 call xerrwv( - 1 'lsode-- itask = i1 and tout (=r1) behind tcur - hu (= r2) ', - 1 60, 23, 0, 1, itask, 0, 2, tout, tp) - go to 700 - 624 call xerrwv( - 1 'lsode-- itask = 4 or 5 and tcrit (=r1) behind tcur (=r2) ', - 1 60, 24, 0, 0, 0, 0, 2, tcrit, tn) - go to 700 - 625 call xerrwv( - 1 'lsode-- itask = 4 or 5 and tcrit (=r1) behind tout (=r2) ', - 1 60, 25, 0, 0, 0, 0, 2, tcrit, tout) - go to 700 - 626 call xerrwv('lsode-- at start of problem, too much accuracy ', - 1 50, 26, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' requested for precision of machine.. see tolsf (=r1) ', - 1 60, 26, 0, 0, 0, 0, 1, tolsf, 0.0d0) - rwork(14) = tolsf - go to 700 - 627 call xerrwv('lsode-- trouble from intdy. itask = i1, tout = r1', - 1 50, 27, 0, 1, itask, 0, 1, tout, 0.0d0) -c - 700 if (illin .eq. 5) go to 710 - illin = illin + 1 - istate = -3 - return - 710 call xerrwv('lsode-- repeated occurrences of illegal input ', - 1 50, 302, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) -c - 800 call xerrwv('lsode-- run aborted.. apparent infinite loop ', - 1 50, 303, 2, 0, 0, 0, 0, 0.0d0, 0.0d0) - return -c----------------------- end of subroutine lsode ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/lsodes.f b/scipy-0.10.1/scipy/integrate/odepack/lsodes.f deleted file mode 100644 index dd6b0f5947..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/lsodes.f +++ /dev/null @@ -1,1990 +0,0 @@ - subroutine lsodes (f, neq, y, t, tout, itol, rtol, atol, itask, - 1 istate, iopt, rwork, lrw, iwork, liw, jac, mf) - external f, jac - integer neq, itol, itask, istate, iopt, lrw, iwork, liw, mf - double precision y, t, tout, rtol, atol, rwork - dimension neq(1), y(1), rtol(1), atol(1), rwork(lrw), iwork(liw) -c----------------------------------------------------------------------- -c this is the march 30, 1987 version of -c lsodes.. livermore solver for ordinary differential equations -c with general sparse jacobian matrices. -c this version is in double precision. -c -c lsodes solves the initial value problem for stiff or nonstiff -c systems of first order ode-s, -c dy/dt = f(t,y) , or, in component form, -c dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(neq)) (i = 1,...,neq). -c lsodes is a variant of the lsode package, and is intended for -c problems in which the jacobian matrix df/dy has an arbitrary -c sparse structure (when the problem is stiff). -c -c authors.. alan c. hindmarsh, -c computing and mathematics research division, l-316 -c lawrence livermore national laboratory -c livermore, ca 94550. -c -c and andrew h. sherman -c j. s. nolen and associates -c houston, tx 77084 -c----------------------------------------------------------------------- -c references.. -c 1. alan c. hindmarsh, odepack, a systematized collection of ode -c solvers, in scientific computing, r. s. stepleman et al. (eds.), -c north-holland, amsterdam, 1983, pp. 55-64. -c -c 2. s. c. eisenstat, m. c. gursky, m. h. schultz, and a. h. sherman, -c yale sparse matrix package.. i. the symmetric codes, -c int. j. num. meth. eng., 18 (1982), pp. 1145-1151. -c -c 3. s. c. eisenstat, m. c. gursky, m. h. schultz, and a. h. sherman, -c yale sparse matrix package.. ii. the nonsymmetric codes, -c research report no. 114, dept. of computer sciences, yale -c university, 1977. -c----------------------------------------------------------------------- -c summary of usage. -c -c communication between the user and the lsodes package, for normal -c situations, is summarized here. this summary describes only a subset -c of the full set of options available. see the full description for -c details, including optional communication, nonstandard options, -c and instructions for special situations. see also the example -c problem (with program and output) following this summary. -c -c a. first provide a subroutine of the form.. -c subroutine f (neq, t, y, ydot) -c dimension y(neq), ydot(neq) -c which supplies the vector function f by loading ydot(i) with f(i). -c -c b. next determine (or guess) whether or not the problem is stiff. -c stiffness occurs when the jacobian matrix df/dy has an eigenvalue -c whose real part is negative and large in magnitude, compared to the -c reciprocal of the t span of interest. if the problem is nonstiff, -c use a method flag mf = 10. if it is stiff, there are two standard -c for the method flag, mf = 121 and mf = 222. in both cases, lsodes -c requires the jacobian matrix in some form, and it treats this matrix -c in general sparse form, with sparsity structure determined internally. -c (for options where the user supplies the sparsity structure, see -c the full description of mf below.) -c -c c. if the problem is stiff, you are encouraged to supply the jacobian -c directly (mf = 121), but if this is not feasible, lsodes will -c compute it internally by difference quotients (mf = 222). -c if you are supplying the jacobian, provide a subroutine of the form.. -c subroutine jac (neq, t, y, j, ian, jan, pdj) -c dimension y(1), ian(1), jan(1), pdj(1) -c here neq, t, y, and j are input arguments, and the jac routine is to -c load the array pdj (of length neq) with the j-th column of df/dy. -c i.e., load pdj(i) with df(i)/dy(j) for all relevant values of i. -c the arguments ian and jan should be ignored for normal situations. -c lsodes will call the jac routine with j = 1,2,...,neq. -c only nonzero elements need be loaded. usually, a crude approximation -c to df/dy, possibly with fewer nonzero elements, will suffice. -c -c d. write a main program which calls subroutine lsodes once for -c each point at which answers are desired. this should also provide -c for possible use of logical unit 6 for output of error messages -c by lsodes. on the first call to lsodes, supply arguments as follows.. -c f = name of subroutine for right-hand side vector f. -c this name must be declared external in calling program. -c neq = number of first order ode-s. -c y = array of initial values, of length neq. -c t = the initial value of the independent variable. -c tout = first point where output is desired (.ne. t). -c itol = 1 or 2 according as atol (below) is a scalar or array. -c rtol = relative tolerance parameter (scalar). -c atol = absolute tolerance parameter (scalar or array). -c the estimated local error in y(i) will be controlled so as -c to be roughly less (in magnitude) than -c ewt(i) = rtol*abs(y(i)) + atol if itol = 1, or -c ewt(i) = rtol*abs(y(i)) + atol(i) if itol = 2. -c thus the local error test passes if, in each component, -c either the absolute error is less than atol (or atol(i)), -c or the relative error is less than rtol. -c use rtol = 0.0 for pure absolute error control, and -c use atol = 0.0 (or atol(i) = 0.0) for pure relative error -c control. caution.. actual (global) errors may exceed these -c local tolerances, so choose them conservatively. -c itask = 1 for normal computation of output values of y at t = tout. -c istate = integer flag (input and output). set istate = 1. -c iopt = 0 to indicate no optional inputs used. -c rwork = real work array of length at least.. -c 20 + 16*neq for mf = 10, -c 20 + (2 + 1./lenrat)*nnz + (11 + 9./lenrat)*neq -c for mf = 121 or 222, -c where.. -c nnz = the number of nonzero elements in the sparse -c jacobian (if this is unknown, use an estimate), and -c lenrat = the real to integer wordlength ratio (usually 1 in -c single precision and 2 in double precision). -c in any case, the required size of rwork cannot generally -c be predicted in advance if mf = 121 or 222, and the value -c above is a rough estimate of a crude lower bound. some -c experimentation with this size may be necessary. -c (when known, the correct required length is an optional -c output, available in iwork(17).) -c lrw = declared length of rwork (in user-s dimension). -c iwork = integer work array of length at least 30. -c liw = declared length of iwork (in user-s dimension). -c jac = name of subroutine for jacobian matrix (mf = 121). -c if used, this name must be declared external in calling -c program. if not used, pass a dummy name. -c mf = method flag. standard values are.. -c 10 for nonstiff (adams) method, no jacobian used. -c 121 for stiff (bdf) method, user-supplied sparse jacobian. -c 222 for stiff method, internally generated sparse jacobian. -c note that the main program must declare arrays y, rwork, iwork, -c and possibly atol. -c -c e. the output from the first call (or any call) is.. -c y = array of computed values of y(t) vector. -c t = corresponding value of independent variable (normally tout). -c istate = 2 if lsodes was successful, negative otherwise. -c -1 means excess work done on this call (perhaps wrong mf). -c -2 means excess accuracy requested (tolerances too small). -c -3 means illegal input detected (see printed message). -c -4 means repeated error test failures (check all inputs). -c -5 means repeated convergence failures (perhaps bad jacobian -c supplied or wrong choice of mf or tolerances). -c -6 means error weight became zero during problem. (solution -c component i vanished, and atol or atol(i) = 0.) -c -7 means a fatal error return flag came from the sparse -c solver cdrv by way of prjs or slss. should never happen. -c a return with istate = -1, -4, or -5 may result from using -c an inappropriate sparsity structure, one that is quite -c different from the initial structure. consider calling -c lsodes again with istate = 3 to force the structure to be -c reevaluated. see the full description of istate below. -c -c f. to continue the integration after a successful return, simply -c reset tout and call lsodes again. no other parameters need be reset. -c -c----------------------------------------------------------------------- -c example problem. -c -c the following is a simple example problem, with the coding -c needed for its solution by lsodes. the problem is from chemical -c kinetics, and consists of the following 12 rate equations.. -c dy1/dt = -rk1*y1 -c dy2/dt = rk1*y1 + rk11*rk14*y4 + rk19*rk14*y5 -c - rk3*y2*y3 - rk15*y2*y12 - rk2*y2 -c dy3/dt = rk2*y2 - rk5*y3 - rk3*y2*y3 - rk7*y10*y3 -c + rk11*rk14*y4 + rk12*rk14*y6 -c dy4/dt = rk3*y2*y3 - rk11*rk14*y4 - rk4*y4 -c dy5/dt = rk15*y2*y12 - rk19*rk14*y5 - rk16*y5 -c dy6/dt = rk7*y10*y3 - rk12*rk14*y6 - rk8*y6 -c dy7/dt = rk17*y10*y12 - rk20*rk14*y7 - rk18*y7 -c dy8/dt = rk9*y10 - rk13*rk14*y8 - rk10*y8 -c dy9/dt = rk4*y4 + rk16*y5 + rk8*y6 + rk18*y7 -c dy10/dt = rk5*y3 + rk12*rk14*y6 + rk20*rk14*y7 -c + rk13*rk14*y8 - rk7*y10*y3 - rk17*y10*y12 -c - rk6*y10 - rk9*y10 -c dy11/dt = rk10*y8 -c dy12/dt = rk6*y10 + rk19*rk14*y5 + rk20*rk14*y7 -c - rk15*y2*y12 - rk17*y10*y12 -c -c with rk1 = rk5 = 0.1, rk4 = rk8 = rk16 = rk18 = 2.5, -c rk10 = 5.0, rk2 = rk6 = 10.0, rk14 = 30.0, -c rk3 = rk7 = rk9 = rk11 = rk12 = rk13 = rk19 = rk20 = 50.0, -c rk15 = rk17 = 100.0. -c -c the t interval is from 0 to 1000, and the initial conditions -c are y1 = 1, y2 = y3 = ... = y12 = 0. the problem is stiff. -c -c the following coding solves this problem with lsodes, using mf = 121 -c and printing results at t = .1, 1., 10., 100., 1000. it uses -c itol = 1 and mixed relative/absolute tolerance controls. -c during the run and at the end, statistical quantities of interest -c are printed (see optional outputs in the full description below). -c -c external fex, jex -c double precision atol, rtol, rwork, t, tout, y -c dimension y(12), rwork(500), iwork(30) -c data lrw/500/, liw/30/ -c neq = 12 -c do 10 i = 1,neq -c 10 y(i) = 0.0d0 -c y(1) = 1.0d0 -c t = 0.0d0 -c tout = 0.1d0 -c itol = 1 -c rtol = 1.0d-4 -c atol = 1.0d-6 -c itask = 1 -c istate = 1 -c iopt = 0 -c mf = 121 -c do 40 iout = 1,5 -c call lsodes (fex, neq, y, t, tout, itol, rtol, atol, -c 1 itask, istate, iopt, rwork, lrw, iwork, liw, jex, mf) -c write(6,30)t,iwork(11),rwork(11),(y(i),i=1,neq) -c 30 format(//' at t =',e11.3,4x, -c 1 ' no. steps =',i5,4x,' last step =',e11.3/ -c 2 ' y array = ',4e14.5/13x,4e14.5/13x,4e14.5) -c if (istate .lt. 0) go to 80 -c tout = tout*10.0d0 -c 40 continue -c lenrw = iwork(17) -c leniw = iwork(18) -c nst = iwork(11) -c nfe = iwork(12) -c nje = iwork(13) -c nlu = iwork(21) -c nnz = iwork(19) -c nnzlu = iwork(25) + iwork(26) + neq -c write (6,70) lenrw,leniw,nst,nfe,nje,nlu,nnz,nnzlu -c 70 format(//' required rwork size =',i4,' iwork size =',i4/ -c 1 ' no. steps =',i4,' no. f-s =',i4,' no. j-s =',i4, -c 2 ' no. lu-s =',i4/' no. of nonzeros in j =',i5, -c 3 ' no. of nonzeros in lu =',i5) -c stop -c 80 write(6,90)istate -c 90 format(///' error halt.. istate =',i3) -c stop -c end -c -c subroutine fex (neq, t, y, ydot) -c double precision t, y, ydot -c double precision rk1, rk2, rk3, rk4, rk5, rk6, rk7, rk8, rk9, -c 1 rk10, rk11, rk12, rk13, rk14, rk15, rk16, rk17 -c dimension y(12), ydot(12) -c data rk1/0.1d0/, rk2/10.0d0/, rk3/50.0d0/, rk4/2.5d0/, rk5/0.1d0/, -c 1 rk6/10.0d0/, rk7/50.0d0/, rk8/2.5d0/, rk9/50.0d0/, rk10/5.0d0/, -c 2 rk11/50.0d0/, rk12/50.0d0/, rk13/50.0d0/, rk14/30.0d0/, -c 3 rk15/100.0d0/, rk16/2.5d0/, rk17/100.0d0/, rk18/2.5d0/, -c 4 rk19/50.0d0/, rk20/50.0d0/ -c ydot(1) = -rk1*y(1) -c ydot(2) = rk1*y(1) + rk11*rk14*y(4) + rk19*rk14*y(5) -c 1 - rk3*y(2)*y(3) - rk15*y(2)*y(12) - rk2*y(2) -c ydot(3) = rk2*y(2) - rk5*y(3) - rk3*y(2)*y(3) - rk7*y(10)*y(3) -c 1 + rk11*rk14*y(4) + rk12*rk14*y(6) -c ydot(4) = rk3*y(2)*y(3) - rk11*rk14*y(4) - rk4*y(4) -c ydot(5) = rk15*y(2)*y(12) - rk19*rk14*y(5) - rk16*y(5) -c ydot(6) = rk7*y(10)*y(3) - rk12*rk14*y(6) - rk8*y(6) -c ydot(7) = rk17*y(10)*y(12) - rk20*rk14*y(7) - rk18*y(7) -c ydot(8) = rk9*y(10) - rk13*rk14*y(8) - rk10*y(8) -c ydot(9) = rk4*y(4) + rk16*y(5) + rk8*y(6) + rk18*y(7) -c ydot(10) = rk5*y(3) + rk12*rk14*y(6) + rk20*rk14*y(7) -c 1 + rk13*rk14*y(8) - rk7*y(10)*y(3) - rk17*y(10)*y(12) -c 2 - rk6*y(10) - rk9*y(10) -c ydot(11) = rk10*y(8) -c ydot(12) = rk6*y(10) + rk19*rk14*y(5) + rk20*rk14*y(7) -c 1 - rk15*y(2)*y(12) - rk17*y(10)*y(12) -c return -c end -c -c subroutine jex (neq, t, y, j, ia, ja, pdj) -c double precision t, y, pdj -c double precision rk1, rk2, rk3, rk4, rk5, rk6, rk7, rk8, rk9, -c 1 rk10, rk11, rk12, rk13, rk14, rk15, rk16, rk17 -c dimension y(1), ia(1), ja(1), pdj(1) -c data rk1/0.1d0/, rk2/10.0d0/, rk3/50.0d0/, rk4/2.5d0/, rk5/0.1d0/, -c 1 rk6/10.0d0/, rk7/50.0d0/, rk8/2.5d0/, rk9/50.0d0/, rk10/5.0d0/, -c 2 rk11/50.0d0/, rk12/50.0d0/, rk13/50.0d0/, rk14/30.0d0/, -c 3 rk15/100.0d0/, rk16/2.5d0/, rk17/100.0d0/, rk18/2.5d0/, -c 4 rk19/50.0d0/, rk20/50.0d0/ -c go to (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), j -c 1 pdj(1) = -rk1 -c pdj(2) = rk1 -c return -c 2 pdj(2) = -rk3*y(3) - rk15*y(12) - rk2 -c pdj(3) = rk2 - rk3*y(3) -c pdj(4) = rk3*y(3) -c pdj(5) = rk15*y(12) -c pdj(12) = -rk15*y(12) -c return -c 3 pdj(2) = -rk3*y(2) -c pdj(3) = -rk5 - rk3*y(2) - rk7*y(10) -c pdj(4) = rk3*y(2) -c pdj(6) = rk7*y(10) -c pdj(10) = rk5 - rk7*y(10) -c return -c 4 pdj(2) = rk11*rk14 -c pdj(3) = rk11*rk14 -c pdj(4) = -rk11*rk14 - rk4 -c pdj(9) = rk4 -c return -c 5 pdj(2) = rk19*rk14 -c pdj(5) = -rk19*rk14 - rk16 -c pdj(9) = rk16 -c pdj(12) = rk19*rk14 -c return -c 6 pdj(3) = rk12*rk14 -c pdj(6) = -rk12*rk14 - rk8 -c pdj(9) = rk8 -c pdj(10) = rk12*rk14 -c return -c 7 pdj(7) = -rk20*rk14 - rk18 -c pdj(9) = rk18 -c pdj(10) = rk20*rk14 -c pdj(12) = rk20*rk14 -c return -c 8 pdj(8) = -rk13*rk14 - rk10 -c pdj(10) = rk13*rk14 -c pdj(11) = rk10 -c 9 return -c 10 pdj(3) = -rk7*y(3) -c pdj(6) = rk7*y(3) -c pdj(7) = rk17*y(12) -c pdj(8) = rk9 -c pdj(10) = -rk7*y(3) - rk17*y(12) - rk6 - rk9 -c pdj(12) = rk6 - rk17*y(12) -c 11 return -c 12 pdj(2) = -rk15*y(2) -c pdj(5) = rk15*y(2) -c pdj(7) = rk17*y(10) -c pdj(10) = -rk17*y(10) -c pdj(12) = -rk15*y(2) - rk17*y(10) -c return -c end -c -c the output of this program (on a cray-1 in single precision) -c is as follows.. -c -c -c at t = 1.000e-01 no. steps = 12 last step = 1.515e-02 -c y array = 9.90050e-01 6.28228e-03 3.65313e-03 7.51934e-07 -c 1.12167e-09 1.18458e-09 1.77291e-12 3.26476e-07 -c 5.46720e-08 9.99500e-06 4.48483e-08 2.76398e-06 -c -c -c at t = 1.000e+00 no. steps = 33 last step = 7.880e-02 -c y array = 9.04837e-01 9.13105e-03 8.20622e-02 2.49177e-05 -c 1.85055e-06 1.96797e-06 1.46157e-07 2.39557e-05 -c 3.26306e-05 7.21621e-04 5.06433e-05 3.05010e-03 -c -c -c at t = 1.000e+01 no. steps = 48 last step = 1.239e+00 -c y array = 3.67876e-01 3.68958e-03 3.65133e-01 4.48325e-05 -c 6.10798e-05 4.33148e-05 5.90211e-05 1.18449e-04 -c 3.15235e-03 3.56531e-03 4.15520e-03 2.48741e-01 -c -c -c at t = 1.000e+02 no. steps = 91 last step = 3.764e+00 -c y array = 4.44981e-05 4.42666e-07 4.47273e-04 -3.53257e-11 -c 2.81577e-08 -9.67741e-11 2.77615e-07 1.45322e-07 -c 1.56230e-02 4.37394e-06 1.60104e-02 9.52246e-01 -c -c -c at t = 1.000e+03 no. steps = 111 last step = 4.156e+02 -c y array = -2.65492e-13 2.60539e-14 -8.59563e-12 6.29355e-14 -c -1.78066e-13 5.71471e-13 -1.47561e-12 4.58078e-15 -c 1.56314e-02 1.37878e-13 1.60184e-02 9.52719e-01 -c -c -c required rwork size = 442 iwork size = 30 -c no. steps = 111 no. f-s = 142 no. j-s = 2 no. lu-s = 20 -c no. of nonzeros in j = 44 no. of nonzeros in lu = 50 -c----------------------------------------------------------------------- -c full description of user interface to lsodes. -c -c the user interface to lsodes consists of the following parts. -c -c i. the call sequence to subroutine lsodes, which is a driver -c routine for the solver. this includes descriptions of both -c the call sequence arguments and of user-supplied routines. -c following these descriptions is a description of -c optional inputs available through the call sequence, and then -c a description of optional outputs (in the work arrays). -c -c ii. descriptions of other routines in the lsodes package that may be -c (optionally) called by the user. these provide the ability to -c alter error message handling, save and restore the internal -c common, and obtain specified derivatives of the solution y(t). -c -c iii. descriptions of common blocks to be declared in overlay -c or similar environments, or to be saved when doing an interrupt -c of the problem and continued solution later. -c -c iv. description of two routines in the lsodes package, either of -c which the user may replace with his own version, if desired. -c these relate to the measurement of errors. -c -c----------------------------------------------------------------------- -c part i. call sequence. -c -c the call sequence parameters used for input only are -c f, neq, tout, itol, rtol, atol, itask, iopt, lrw, liw, jac, mf, -c and those used for both input and output are -c y, t, istate. -c the work arrays rwork and iwork are also used for conditional and -c optional inputs and optional outputs. (the term output here refers -c to the return from subroutine lsodes to the user-s calling program.) -c -c the legality of input parameters will be thoroughly checked on the -c initial call for the problem, but not checked thereafter unless a -c change in input parameters is flagged by istate = 3 on input. -c -c the descriptions of the call arguments are as follows. -c -c f = the name of the user-supplied subroutine defining the -c ode system. the system must be put in the first-order -c form dy/dt = f(t,y), where f is a vector-valued function -c of the scalar t and the vector y. subroutine f is to -c compute the function f. it is to have the form -c subroutine f (neq, t, y, ydot) -c dimension y(1), ydot(1) -c where neq, t, and y are input, and the array ydot = f(t,y) -c is output. y and ydot are arrays of length neq. -c (in the dimension statement above, 1 is a dummy -c dimension.. it can be replaced by any value.) -c subroutine f should not alter y(1),...,y(neq). -c f must be declared external in the calling program. -c -c subroutine f may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in f) and/or y has length exceeding neq(1). -c see the descriptions of neq and y below. -c -c if quantities computed in the f routine are needed -c externally to lsodes, an extra call to f should be made -c for this purpose, for consistent and accurate results. -c if only the derivative dy/dt is needed, use intdy instead. -c -c neq = the size of the ode system (number of first order -c ordinary differential equations). used only for input. -c neq may be decreased, but not increased, during the problem. -c if neq is decreased (with istate = 3 on input), the -c remaining components of y should be left undisturbed, if -c these are to be accessed in f and/or jac. -c -c normally, neq is a scalar, and it is generally referred to -c as a scalar in this user interface description. however, -c neq may be an array, with neq(1) set to the system size. -c (the lsodes package accesses only neq(1).) in either case, -c this parameter is passed as the neq argument in all calls -c to f and jac. hence, if it is an array, locations -c neq(2),... may be used to store other integer data and pass -c it to f and/or jac. subroutines f and/or jac must include -c neq in a dimension statement in that case. -c -c y = a real array for the vector of dependent variables, of -c length neq or more. used for both input and output on the -c first call (istate = 1), and only for output on other calls. -c on the first call, y must contain the vector of initial -c values. on output, y contains the computed solution vector, -c evaluated at t. if desired, the y array may be used -c for other purposes between calls to the solver. -c -c this array is passed as the y argument in all calls to -c f and jac. hence its length may exceed neq, and locations -c y(neq+1),... may be used to store other real data and -c pass it to f and/or jac. (the lsodes package accesses only -c y(1),...,y(neq).) -c -c t = the independent variable. on input, t is used only on the -c first call, as the initial point of the integration. -c on output, after each call, t is the value at which a -c computed solution y is evaluated (usually the same as tout). -c on an error return, t is the farthest point reached. -c -c tout = the next value of t at which a computed solution is desired. -c used only for input. -c -c when starting the problem (istate = 1), tout may be equal -c to t for one call, then should .ne. t for the next call. -c for the initial t, an input value of tout .ne. t is used -c in order to determine the direction of the integration -c (i.e. the algebraic sign of the step sizes) and the rough -c scale of the problem. integration in either direction -c (forward or backward in t) is permitted. -c -c if itask = 2 or 5 (one-step modes), tout is ignored after -c the first call (i.e. the first call with tout .ne. t). -c otherwise, tout is required on every call. -c -c if itask = 1, 3, or 4, the values of tout need not be -c monotone, but a value of tout which backs up is limited -c to the current internal t interval, whose endpoints are -c tcur - hu and tcur (see optional outputs, below, for -c tcur and hu). -c -c itol = an indicator for the type of error control. see -c description below under atol. used only for input. -c -c rtol = a relative error tolerance parameter, either a scalar or -c an array of length neq. see description below under atol. -c input only. -c -c atol = an absolute error tolerance parameter, either a scalar or -c an array of length neq. input only. -c -c the input parameters itol, rtol, and atol determine -c the error control performed by the solver. the solver will -c control the vector e = (e(i)) of estimated local errors -c in y, according to an inequality of the form -c rms-norm of ( e(i)/ewt(i) ) .le. 1, -c where ewt(i) = rtol(i)*abs(y(i)) + atol(i), -c and the rms-norm (root-mean-square norm) here is -c rms-norm(v) = sqrt(sum v(i)**2 / neq). here ewt = (ewt(i)) -c is a vector of weights which must always be positive, and -c the values of rtol and atol should all be non-negative. -c the following table gives the types (scalar/array) of -c rtol and atol, and the corresponding form of ewt(i). -c -c itol rtol atol ewt(i) -c 1 scalar scalar rtol*abs(y(i)) + atol -c 2 scalar array rtol*abs(y(i)) + atol(i) -c 3 array scalar rtol(i)*abs(y(i)) + atol -c 4 array array rtol(i)*abs(y(i)) + atol(i) -c -c when either of these parameters is a scalar, it need not -c be dimensioned in the user-s calling program. -c -c if none of the above choices (with itol, rtol, and atol -c fixed throughout the problem) is suitable, more general -c error controls can be obtained by substituting -c user-supplied routines for the setting of ewt and/or for -c the norm calculation. see part iv below. -c -c if global errors are to be estimated by making a repeated -c run on the same problem with smaller tolerances, then all -c components of rtol and atol (i.e. of ewt) should be scaled -c down uniformly. -c -c itask = an index specifying the task to be performed. -c input only. itask has the following values and meanings. -c 1 means normal computation of output values of y(t) at -c t = tout (by overshooting and interpolating). -c 2 means take one step only and return. -c 3 means stop at the first internal mesh point at or -c beyond t = tout and return. -c 4 means normal computation of output values of y(t) at -c t = tout but without overshooting t = tcrit. -c tcrit must be input as rwork(1). tcrit may be equal to -c or beyond tout, but not behind it in the direction of -c integration. this option is useful if the problem -c has a singularity at or beyond t = tcrit. -c 5 means take one step, without passing tcrit, and return. -c tcrit must be input as rwork(1). -c -c note.. if itask = 4 or 5 and the solver reaches tcrit -c (within roundoff), it will return t = tcrit (exactly) to -c indicate this (unless itask = 4 and tout comes before tcrit, -c in which case answers at t = tout are returned first). -c -c istate = an index used for input and output to specify the -c the state of the calculation. -c -c on input, the values of istate are as follows. -c 1 means this is the first call for the problem -c (initializations will be done). see note below. -c 2 means this is not the first call, and the calculation -c is to continue normally, with no change in any input -c parameters except possibly tout and itask. -c (if itol, rtol, and/or atol are changed between calls -c with istate = 2, the new values will be used but not -c tested for legality.) -c 3 means this is not the first call, and the -c calculation is to continue normally, but with -c a change in input parameters other than -c tout and itask. changes are allowed in -c neq, itol, rtol, atol, iopt, lrw, liw, mf, -c the conditional inputs ia and ja, -c and any of the optional inputs except h0. -c in particular, if miter = 1 or 2, a call with istate = 3 -c will cause the sparsity structure of the problem to be -c recomputed (or reread from ia and ja if moss = 0). -c note.. a preliminary call with tout = t is not counted -c as a first call here, as no initialization or checking of -c input is done. (such a call is sometimes useful for the -c purpose of outputting the initial conditions.) -c thus the first call for which tout .ne. t requires -c istate = 1 on input. -c -c on output, istate has the following values and meanings. -c 1 means nothing was done, as tout was equal to t with -c istate = 1 on input. (however, an internal counter was -c set to detect and prevent repeated calls of this type.) -c 2 means the integration was performed successfully. -c -1 means an excessive amount of work (more than mxstep -c steps) was done on this call, before completing the -c requested task, but the integration was otherwise -c successful as far as t. (mxstep is an optional input -c and is normally 500.) to continue, the user may -c simply reset istate to a value .gt. 1 and call again -c (the excess work step counter will be reset to 0). -c in addition, the user may increase mxstep to avoid -c this error return (see below on optional inputs). -c -2 means too much accuracy was requested for the precision -c of the machine being used. this was detected before -c completing the requested task, but the integration -c was successful as far as t. to continue, the tolerance -c parameters must be reset, and istate must be set -c to 3. the optional output tolsf may be used for this -c purpose. (note.. if this condition is detected before -c taking any steps, then an illegal input return -c (istate = -3) occurs instead.) -c -3 means illegal input was detected, before taking any -c integration steps. see written message for details. -c note.. if the solver detects an infinite loop of calls -c to the solver with illegal input, it will cause -c the run to stop. -c -4 means there were repeated error test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c the problem may have a singularity, or the input -c may be inappropriate. -c -5 means there were repeated convergence test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c this may be caused by an inaccurate jacobian matrix, -c if one is being used. -c -6 means ewt(i) became zero for some i during the -c integration. pure relative error control (atol(i)=0.0) -c was requested on a variable which has now vanished. -c the integration was successful as far as t. -c -7 means a fatal error return flag came from the sparse -c solver cdrv by way of prjs or slss (numerical -c factorization or backsolve). this should never happen. -c the integration was successful as far as t. -c -c note.. an error return with istate = -1, -4, or -5 and with -c miter = 1 or 2 may mean that the sparsity structure of the -c problem has changed significantly since it was last -c determined (or input). in that case, one can attempt to -c complete the integration by setting istate = 3 on the next -c call, so that a new structure determination is done. -c -c note.. since the normal output value of istate is 2, -c it does not need to be reset for normal continuation. -c also, since a negative input value of istate will be -c regarded as illegal, a negative output value requires the -c user to change it, and possibly other inputs, before -c calling the solver again. -c -c iopt = an integer flag to specify whether or not any optional -c inputs are being used on this call. input only. -c the optional inputs are listed separately below. -c iopt = 0 means no optional inputs are being used. -c default values will be used in all cases. -c iopt = 1 means one or more optional inputs are being used. -c -c rwork = a work array used for a mixture of real (double precision) -c and integer work space. -c the length of rwork (in real words) must be at least -c 20 + nyh*(maxord + 1) + 3*neq + lwm where -c nyh = the initial value of neq, -c maxord = 12 (if meth = 1) or 5 (if meth = 2) (unless a -c smaller value is given as an optional input), -c lwm = 0 if miter = 0, -c lwm = 2*nnz + 2*neq + (nnz+9*neq)/lenrat if miter = 1, -c lwm = 2*nnz + 2*neq + (nnz+10*neq)/lenrat if miter = 2, -c lwm = neq + 2 if miter = 3. -c in the above formulas, -c nnz = number of nonzero elements in the jacobian matrix. -c lenrat = the real to integer wordlength ratio (usually 1 in -c single precision and 2 in double precision). -c (see the mf description for meth and miter.) -c thus if maxord has its default value and neq is constant, -c the minimum length of rwork is.. -c 20 + 16*neq for mf = 10, -c 20 + 16*neq + lwm for mf = 11, 111, 211, 12, 112, 212, -c 22 + 17*neq for mf = 13, -c 20 + 9*neq for mf = 20, -c 20 + 9*neq + lwm for mf = 21, 121, 221, 22, 122, 222, -c 22 + 10*neq for mf = 23. -c if miter = 1 or 2, the above formula for lwm is only a -c crude lower bound. the required length of rwork cannot -c be readily predicted in general, as it depends on the -c sparsity structure of the problem. some experimentation -c may be necessary. -c -c the first 20 words of rwork are reserved for conditional -c and optional inputs and optional outputs. -c -c the following word in rwork is a conditional input.. -c rwork(1) = tcrit = critical value of t which the solver -c is not to overshoot. required if itask is -c 4 or 5, and ignored otherwise. (see itask.) -c -c lrw = the length of the array rwork, as declared by the user. -c (this will be checked by the solver.) -c -c iwork = an integer work array. the length of iwork must be at least -c 31 + neq + nnz if moss = 0 and miter = 1 or 2, or -c 30 otherwise. -c (nnz is the number of nonzero elements in df/dy.) -c -c in lsodes, iwork is used only for conditional and -c optional inputs and optional outputs. -c -c the following two blocks of words in iwork are conditional -c inputs, required if moss = 0 and miter = 1 or 2, but not -c otherwise (see the description of mf for moss). -c iwork(30+j) = ia(j) (j=1,...,neq+1) -c iwork(31+neq+k) = ja(k) (k=1,...,nnz) -c the two arrays ia and ja describe the sparsity structure -c to be assumed for the jacobian matrix. ja contains the row -c indices where nonzero elements occur, reading in columnwise -c order, and ia contains the starting locations in ja of the -c descriptions of columns 1,...,neq, in that order, with -c ia(1) = 1. thus, for each column index j = 1,...,neq, the -c values of the row index i in column j where a nonzero -c element may occur are given by -c i = ja(k), where ia(j) .le. k .lt. ia(j+1). -c if nnz is the total number of nonzero locations assumed, -c then the length of the ja array is nnz, and ia(neq+1) must -c be nnz + 1. duplicate entries are not allowed. -c -c liw = the length of the array iwork, as declared by the user. -c (this will be checked by the solver.) -c -c note.. the work arrays must not be altered between calls to lsodes -c for the same problem, except possibly for the conditional and -c optional inputs, and except for the last 3*neq words of rwork. -c the latter space is used for internal scratch space, and so is -c available for use by the user outside lsodes between calls, if -c desired (but not for use by f or jac). -c -c jac = name of user-supplied routine (miter = 1 or moss = 1) to -c compute the jacobian matrix, df/dy, as a function of -c the scalar t and the vector y. it is to have the form -c subroutine jac (neq, t, y, j, ian, jan, pdj) -c dimension y(1), ian(1), jan(1), pdj(1) -c where neq, t, y, j, ian, and jan are input, and the array -c pdj, of length neq, is to be loaded with column j -c of the jacobian on output. thus df(i)/dy(j) is to be -c loaded into pdj(i) for all relevant values of i. -c here t and y have the same meaning as in subroutine f, -c and j is a column index (1 to neq). ian and jan are -c undefined in calls to jac for structure determination -c (moss = 1). otherwise, ian and jan are structure -c descriptors, as defined under optional outputs below, and -c so can be used to determine the relevant row indices i, if -c desired. (in the dimension statement above, 1 is a -c dummy dimension.. it can be replaced by any value.) -c jac need not provide df/dy exactly. a crude -c approximation (possibly with greater sparsity) will do. -c in any case, pdj is preset to zero by the solver, -c so that only the nonzero elements need be loaded by jac. -c calls to jac are made with j = 1,...,neq, in that order, and -c each such set of calls is preceded by a call to f with the -c same arguments neq, t, and y. thus to gain some efficiency, -c intermediate quantities shared by both calculations may be -c saved in a user common block by f and not recomputed by jac, -c if desired. jac must not alter its input arguments. -c jac must be declared external in the calling program. -c subroutine jac may access user-defined quantities in -c neq(2),... and y(neq(1)+1),... if neq is an array -c (dimensioned in jac) and y has length exceeding neq(1). -c see the descriptions of neq and y above. -c -c mf = the method flag. used only for input. -c mf has three decimal digits-- moss, meth, miter-- -c mf = 100*moss + 10*meth + miter. -c moss indicates the method to be used to obtain the sparsity -c structure of the jacobian matrix if miter = 1 or 2.. -c moss = 0 means the user has supplied ia and ja -c (see descriptions under iwork above). -c moss = 1 means the user has supplied jac (see below) -c and the structure will be obtained from neq -c initial calls to jac. -c moss = 2 means the structure will be obtained from neq+1 -c initial calls to f. -c meth indicates the basic linear multistep method.. -c meth = 1 means the implicit adams method. -c meth = 2 means the method based on backward -c differentiation formulas (bdf-s). -c miter indicates the corrector iteration method.. -c miter = 0 means functional iteration (no jacobian matrix -c is involved). -c miter = 1 means chord iteration with a user-supplied -c sparse jacobian, given by subroutine jac. -c miter = 2 means chord iteration with an internally -c generated (difference quotient) sparse jacobian -c (using ngp extra calls to f per df/dy value, -c where ngp is an optional output described below.) -c miter = 3 means chord iteration with an internally -c generated diagonal jacobian approximation. -c (using 1 extra call to f per df/dy evaluation). -c if miter = 1 or moss = 1, the user must supply a subroutine -c jac (the name is arbitrary) as described above under jac. -c otherwise, a dummy argument can be used. -c -c the standard choices for mf are.. -c mf = 10 for a nonstiff problem, -c mf = 21 or 22 for a stiff problem with ia/ja supplied -c (21 if jac is supplied, 22 if not), -c mf = 121 for a stiff problem with jac supplied, -c but not ia/ja, -c mf = 222 for a stiff problem with neither ia/ja nor -c jac supplied. -c the sparseness structure can be changed during the -c problem by making a call to lsodes with istate = 3. -c----------------------------------------------------------------------- -c optional inputs. -c -c the following is a list of the optional inputs provided for in the -c call sequence. (see also part ii.) for each such input variable, -c this table lists its name as used in this documentation, its -c location in the call sequence, its meaning, and the default value. -c the use of any of these inputs requires iopt = 1, and in that -c case all of these inputs are examined. a value of zero for any -c of these optional inputs will cause the default value to be used. -c thus to use a subset of the optional inputs, simply preload -c locations 5 to 10 in rwork and iwork to 0.0 and 0 respectively, and -c then set those of interest to nonzero values. -c -c name location meaning and default value -c -c h0 rwork(5) the step size to be attempted on the first step. -c the default value is determined by the solver. -c -c hmax rwork(6) the maximum absolute step size allowed. -c the default value is infinite. -c -c hmin rwork(7) the minimum absolute step size allowed. -c the default value is 0. (this lower bound is not -c enforced on the final step before reaching tcrit -c when itask = 4 or 5.) -c -c seth rwork(8) the element threshhold for sparsity determination -c when moss = 1 or 2. if the absolute value of -c an estimated jacobian element is .le. seth, it -c will be assumed to be absent in the structure. -c the default value of seth is 0. -c -c maxord iwork(5) the maximum order to be allowed. the default -c value is 12 if meth = 1, and 5 if meth = 2. -c if maxord exceeds the default value, it will -c be reduced to the default value. -c if maxord is changed during the problem, it may -c cause the current order to be reduced. -c -c mxstep iwork(6) maximum number of (internally defined) steps -c allowed during one call to the solver. -c the default value is 500. -c -c mxhnil iwork(7) maximum number of messages printed (per problem) -c warning that t + h = t on a step (h = step size). -c this must be positive to result in a non-default -c value. the default value is 10. -c----------------------------------------------------------------------- -c optional outputs. -c -c as optional additional output from lsodes, the variables listed -c below are quantities related to the performance of lsodes -c which are available to the user. these are communicated by way of -c the work arrays, but also have internal mnemonic names as shown. -c except where stated otherwise, all of these outputs are defined -c on any successful return from lsodes, and on any return with -c istate = -1, -2, -4, -5, or -6. on an illegal input return -c (istate = -3), they will be unchanged from their existing values -c (if any), except possibly for tolsf, lenrw, and leniw. -c on any error return, outputs relevant to the error will be defined, -c as noted below. -c -c name location meaning -c -c hu rwork(11) the step size in t last used (successfully). -c -c hcur rwork(12) the step size to be attempted on the next step. -c -c tcur rwork(13) the current value of the independent variable -c which the solver has actually reached, i.e. the -c current internal mesh point in t. on output, tcur -c will always be at least as far as the argument -c t, but may be farther (if interpolation was done). -c -c tolsf rwork(14) a tolerance scale factor, greater than 1.0, -c computed when a request for too much accuracy was -c detected (istate = -3 if detected at the start of -c the problem, istate = -2 otherwise). if itol is -c left unaltered but rtol and atol are uniformly -c scaled up by a factor of tolsf for the next call, -c then the solver is deemed likely to succeed. -c (the user may also ignore tolsf and alter the -c tolerance parameters in any other way appropriate.) -c -c nst iwork(11) the number of steps taken for the problem so far. -c -c nfe iwork(12) the number of f evaluations for the problem so far, -c excluding those for structure determination -c (moss = 2). -c -c nje iwork(13) the number of jacobian evaluations for the problem -c so far, excluding those for structure determination -c (moss = 1). -c -c nqu iwork(14) the method order last used (successfully). -c -c nqcur iwork(15) the order to be attempted on the next step. -c -c imxer iwork(16) the index of the component of largest magnitude in -c the weighted local error vector ( e(i)/ewt(i) ), -c on an error return with istate = -4 or -5. -c -c lenrw iwork(17) the length of rwork actually required. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c leniw iwork(18) the length of iwork actually required. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c nnz iwork(19) the number of nonzero elements in the jacobian -c matrix, including the diagonal (miter = 1 or 2). -c (this may differ from that given by ia(neq+1)-1 -c if moss = 0, because of added diagonal entries.) -c -c ngp iwork(20) the number of groups of column indices, used in -c difference quotient jacobian aproximations if -c miter = 2. this is also the number of extra f -c evaluations needed for each jacobian evaluation. -c -c nlu iwork(21) the number of sparse lu decompositions for the -c problem so far. -c -c lyh iwork(22) the base address in rwork of the history array yh, -c described below in this list. -c -c ipian iwork(23) the base address of the structure descriptor array -c ian, described below in this list. -c -c ipjan iwork(24) the base address of the structure descriptor array -c jan, described below in this list. -c -c nzl iwork(25) the number of nonzero elements in the strict lower -c triangle of the lu factorization used in the chord -c iteration (miter = 1 or 2). -c -c nzu iwork(26) the number of nonzero elements in the strict upper -c triangle of the lu factorization used in the chord -c iteration (miter = 1 or 2). -c the total number of nonzeros in the factorization -c is therefore nzl + nzu + neq. -c -c the following four arrays are segments of the rwork array which -c may also be of interest to the user as optional outputs. -c for each array, the table below gives its internal name, -c its base address, and its description. -c for yh and acor, the base addresses are in rwork (a real array). -c the integer arrays ian and jan are to be obtained by declaring an -c integer array iwk and identifying iwk(1) with rwork(21), using either -c an equivalence statement or a subroutine call. then the base -c addresses ipian (of ian) and ipjan (of jan) in iwk are to be obtained -c as optional outputs iwork(23) and iwork(24), respectively. -c thus ian(1) is iwk(ipian), etc. -c -c name base address description -c -c ian ipian (in iwk) structure descriptor array of size neq + 1. -c jan ipjan (in iwk) structure descriptor array of size nnz. -c (see above) ian and jan together describe the sparsity -c structure of the jacobian matrix, as used by -c lsodes when miter = 1 or 2. -c jan contains the row indices of the nonzero -c locations, reading in columnwise order, and -c ian contains the starting locations in jan of -c the descriptions of columns 1,...,neq, in -c that order, with ian(1) = 1. thus for each -c j = 1,...,neq, the row indices i of the -c nonzero locations in column j are -c i = jan(k), ian(j) .le. k .lt. ian(j+1). -c note that ian(neq+1) = nnz + 1. -c (if moss = 0, ian/jan may differ from the -c input ia/ja because of a different ordering -c in each column, and added diagonal entries.) -c -c yh lyh the nordsieck history array, of size nyh by -c (optional (nqcur + 1), where nyh is the initial value -c output) of neq. for j = 0,1,...,nqcur, column j+1 -c of yh contains hcur**j/factorial(j) times -c the j-th derivative of the interpolating -c polynomial currently representing the solution, -c evaluated at t = tcur. the base address lyh -c is another optional output, listed above. -c -c acor lenrw-neq+1 array of size neq used for the accumulated -c corrections on each step, scaled on output -c to represent the estimated local error in y -c on the last step. this is the vector e in -c the description of the error control. it is -c defined only on a successful return from -c lsodes. -c -c----------------------------------------------------------------------- -c part ii. other routines callable. -c -c the following are optional calls which the user may make to -c gain additional capabilities in conjunction with lsodes. -c (the routines xsetun and xsetf are designed to conform to the -c slatec error handling package.) -c -c form of call function -c call xsetun(lun) set the logical unit number, lun, for -c output of messages from lsodes, if -c the default is not desired. -c the default value of lun is 6. -c -c call xsetf(mflag) set a flag to control the printing of -c messages by lsodes. -c mflag = 0 means do not print. (danger.. -c this risks losing valuable information.) -c mflag = 1 means print (the default). -c -c either of the above calls may be made at -c any time and will take effect immediately. -c -c call srcms(rsav,isav,job) saves and restores the contents of -c the internal common blocks used by -c lsodes (see part iii below). -c rsav must be a real array of length 224 -c or more, and isav must be an integer -c array of length 75 or more. -c job=1 means save common into rsav/isav. -c job=2 means restore common from rsav/isav. -c srcms is useful if one is -c interrupting a run and restarting -c later, or alternating between two or -c more problems solved with lsodes. -c -c call intdy(,,,,,) provide derivatives of y, of various -c (see below) orders, at a specified point t, if -c desired. it may be called only after -c a successful return from lsodes. -c -c the detailed instructions for using intdy are as follows. -c the form of the call is.. -c -c lyh = iwork(22) -c call intdy (t, k, rwork(lyh), nyh, dky, iflag) -c -c the input parameters are.. -c -c t = value of independent variable where answers are desired -c (normally the same as the t last returned by lsodes). -c for valid results, t must lie between tcur - hu and tcur. -c (see optional outputs for tcur and hu.) -c k = integer order of the derivative desired. k must satisfy -c 0 .le. k .le. nqcur, where nqcur is the current order -c (see optional outputs). the capability corresponding -c to k = 0, i.e. computing y(t), is already provided -c by lsodes directly. since nqcur .ge. 1, the first -c derivative dy/dt is always available with intdy. -c lyh = the base address of the history array yh, obtained -c as an optional output as shown above. -c nyh = column length of yh, equal to the initial value of neq. -c -c the output parameters are.. -c -c dky = a real array of length neq containing the computed value -c of the k-th derivative of y(t). -c iflag = integer flag, returned as 0 if k and t were legal, -c -1 if k was illegal, and -2 if t was illegal. -c on an error return, a message is also written. -c----------------------------------------------------------------------- -c part iii. common blocks. -c -c if lsodes is to be used in an overlay situation, the user -c must declare, in the primary overlay, the variables in.. -c (1) the call sequence to lsodes, -c (2) the three internal common blocks -c /ls0001/ of length 257 (218 double precision words -c followed by 39 integer words), -c /lss001/ of length 40 ( 6 double precision words -c followed by 34 integer words), -c /eh0001/ of length 2 (integer words). -c -c if lsodes is used on a system in which the contents of internal -c common blocks are not preserved between calls, the user should -c declare the above three common blocks in his main program to insure -c that their contents are preserved. -c -c if the solution of a given problem by lsodes is to be interrupted -c and then later continued, such as when restarting an interrupted run -c or alternating between two or more problems, the user should save, -c following the return from the last lsodes call prior to the -c interruption, the contents of the call sequence variables and the -c internal common blocks, and later restore these values before the -c next lsodes call for that problem. to save and restore the common -c blocks, use subroutine srcms (see part ii above). -c -c----------------------------------------------------------------------- -c part iv. optionally replaceable solver routines. -c -c below are descriptions of two routines in the lsodes package which -c relate to the measurement of errors. either routine can be -c replaced by a user-supplied version, if desired. however, since such -c a replacement may have a major impact on performance, it should be -c done only when absolutely necessary, and only with great caution. -c (note.. the means by which the package version of a routine is -c superseded by the user-s version may be system-dependent.) -c -c (a) ewset. -c the following subroutine is called just before each internal -c integration step, and sets the array of error weights, ewt, as -c described under itol/rtol/atol above.. -c subroutine ewset (neq, itol, rtol, atol, ycur, ewt) -c where neq, itol, rtol, and atol are as in the lsodes call sequence, -c ycur contains the current dependent variable vector, and -c ewt is the array of weights set by ewset. -c -c if the user supplies this subroutine, it must return in ewt(i) -c (i = 1,...,neq) a positive quantity suitable for comparing errors -c in y(i) to. the ewt array returned by ewset is passed to the -c vnorm routine (see below), and also used by lsodes in the computation -c of the optional output imxer, the diagonal jacobian approximation, -c and the increments for difference quotient jacobians. -c -c in the user-supplied version of ewset, it may be desirable to use -c the current values of derivatives of y. derivatives up to order nq -c are available from the history array yh, described above under -c optional outputs. in ewset, yh is identical to the ycur array, -c extended to nq + 1 columns with a column length of nyh and scale -c factors of h**j/factorial(j). on the first call for the problem, -c given by nst = 0, nq is 1 and h is temporarily set to 1.0. -c the quantities nq, nyh, h, and nst can be obtained by including -c in ewset the statements.. -c double precision h, rls -c common /ls0001/ rls(218),ils(39) -c nq = ils(35) -c nyh = ils(14) -c nst = ils(36) -c h = rls(212) -c thus, for example, the current value of dy/dt can be obtained as -c ycur(nyh+i)/h (i=1,...,neq) (and the division by h is -c unnecessary when nst = 0). -c -c (b) vnorm. -c the following is a real function routine which computes the weighted -c root-mean-square norm of a vector v.. -c d = vnorm (n, v, w) -c where.. -c n = the length of the vector, -c v = real array of length n containing the vector, -c w = real array of length n containing weights, -c d = sqrt( (1/n) * sum(v(i)*w(i))**2 ). -c vnorm is called with n = neq and with w(i) = 1.0/ewt(i), where -c ewt is as set by subroutine ewset. -c -c if the user supplies this function, it should return a non-negative -c value of vnorm suitable for use in the error control in lsodes. -c none of the arguments should be altered by vnorm. -c for example, a user-supplied vnorm routine might.. -c -substitute a max-norm of (v(i)*w(i)) for the rms-norm, or -c -ignore some components of v in the norm, with the effect of -c suppressing the error control on those components of y. -c----------------------------------------------------------------------- -c----------------------------------------------------------------------- -c other routines in the lsodes package. -c -c in addition to subroutine lsodes, the lsodes package includes the -c following subroutines and function routines.. -c iprep acts as an iterface between lsodes and prep, and also does -c adjusting of work space pointers and work arrays. -c prep is called by iprep to compute sparsity and do sparse matrix -c preprocessing if miter = 1 or 2. -c jgroup is called by prep to compute groups of jacobian column -c indices for use when miter = 2. -c adjlr adjusts the length of required sparse matrix work space. -c it is called by prep. -c cntnzu is called by prep and counts the nonzero elements in the -c strict upper triangle of j + j-transpose, where j = df/dy. -c intdy computes an interpolated value of the y vector at t = tout. -c stode is the core integrator, which does one step of the -c integration and the associated error control. -c cfode sets all method coefficients and test constants. -c prjs computes and preprocesses the jacobian matrix j = df/dy -c and the newton iteration matrix p = i - h*l0*j. -c slss manages solution of linear system in chord iteration. -c ewset sets the error weight vector ewt before each step. -c vnorm computes the weighted r.m.s. norm of a vector. -c srcms is a user-callable routine to save and restore -c the contents of the internal common blocks. -c odrv constructs a reordering of the rows and columns of -c a matrix by the minimum degree algorithm. odrv is a -c driver routine which calls subroutines md, mdi, mdm, -c mdp, mdu, and sro. see ref. 2 for details. (the odrv -c module has been modified since ref. 2, however.) -c cdrv performs reordering, symbolic factorization, numerical -c factorization, or linear system solution operations, -c depending on a path argument ipath. cdrv is a -c driver routine which calls subroutines nroc, nsfc, -c nnfc, nnsc, and nntc. see ref. 3 for details. -c lsodes uses cdrv to solve linear systems in which the -c coefficient matrix is p = i - con*j, where i is the -c identity, con is a scalar, and j is an approximation to -c the jacobian df/dy. because cdrv deals with rowwise -c sparsity descriptions, cdrv works with p-transpose, not p. -c d1mach computes the unit roundoff in a machine-independent manner. -c xerrwv, xsetun, and xsetf handle the printing of all error -c messages and warnings. xerrwv is machine-dependent. -c note.. vnorm and d1mach are function routines. -c all the others are subroutines. -c -c the intrinsic and external routines used by lsodes are.. -c dabs, dmax1, dmin1, dfloat, max0, min0, mod, dsign, dsqrt, and write. -c -c a block data subprogram is also included with the package, -c for loading some of the variables in internal common. -c -c----------------------------------------------------------------------- -c the following card is for optimized compilation on lll compilers. -clll. optimize -c----------------------------------------------------------------------- - external prjs, slss - integer illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 1 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns - integer icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 1 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 1 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 2 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 3 nslj, ngp, nlu, nnz, nsp, nzl, nzu - integer i, i1, i2, iflag, imax, imul, imxer, ipflag, ipgo, irem, - 1 j, kgo, lenrat, lenyht, leniw, lenrw, lf0, lia, lja, - 2 lrtem, lwtem, lyhd, lyhn, mf1, mord, mxhnl0, mxstp0, ncolm - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision con0, conmin, ccmxj, psmall, rbig, seth - double precision atoli, ayi, big, ewti, h0, hmax, hmx, rh, rtoli, - 1 tcrit, tdist, tnext, tol, tolsf, tp, size, sum, w0, - 2 d1mach, vnorm - dimension mord(2) - logical ihit -c----------------------------------------------------------------------- -c the following two internal common blocks contain -c (a) variables which are local to any subroutine but whose values must -c be preserved between calls to the routine (own variables), and -c (b) variables which are communicated between subroutines. -c the structure of each block is as follows.. all real variables are -c listed first, followed by all integers. within each type, the -c variables are grouped with those local to subroutine lsodes first, -c then those local to subroutine stode or subroutine prjs -c (no other routines have own variables), and finally those used -c for communication. the block ls0001 is declared in subroutines -c lsodes, iprep, prep, intdy, stode, prjs, and slss. the block lss001 -c is declared in subroutines lsodes, iprep, prep, prjs, and slss. -c groups of variables are replaced by dummy arrays in the common -c declarations in routines where those variables are not used. -c----------------------------------------------------------------------- - common /ls0001/ rowns(209), - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 2 illin, init, lyh, lewt, lacor, lsavf, lwm, liwm, - 3 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu -c - common /lss001/ con0, conmin, ccmxj, psmall, rbig, seth, - 1 iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 2 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 3 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 4 nslj, ngp, nlu, nnz, nsp, nzl, nzu -c - data mord(1),mord(2)/12,5/, mxstp0/500/, mxhnl0/10/ -c----------------------------------------------------------------------- -c in the data statement below, set lenrat equal to the ratio of -c the wordlength for a real number to that for an integer. usually, -c lenrat = 1 for single precision and 2 for double precision. if the -c true ratio is not an integer, use the next smaller integer (.ge. 1). -c----------------------------------------------------------------------- - data lenrat/2/ -c----------------------------------------------------------------------- -c block a. -c this code block is executed on every call. -c it tests istate and itask for legality and branches appropriately. -c if istate .gt. 1 but the flag init shows that initialization has -c not yet been done, an error return occurs. -c if istate = 1 and tout = t, jump to block g and return immediately. -c----------------------------------------------------------------------- - if (istate .lt. 1 .or. istate .gt. 3) go to 601 - if (itask .lt. 1 .or. itask .gt. 5) go to 602 - if (istate .eq. 1) go to 10 - if (init .eq. 0) go to 603 - if (istate .eq. 2) go to 200 - go to 20 - 10 init = 0 - if (tout .eq. t) go to 430 - 20 ntrep = 0 -c----------------------------------------------------------------------- -c block b. -c the next code block is executed for the initial call (istate = 1), -c or for a continuation call with parameter changes (istate = 3). -c it contains checking of all inputs and various initializations. -c if istate = 1, the final setting of work space pointers, the matrix -c preprocessing, and other initializations are done in block c. -c -c first check legality of the non-optional inputs neq, itol, iopt, -c mf, ml, and mu. -c----------------------------------------------------------------------- - if (neq(1) .le. 0) go to 604 - if (istate .eq. 1) go to 25 - if (neq(1) .gt. n) go to 605 - 25 n = neq(1) - if (itol .lt. 1 .or. itol .gt. 4) go to 606 - if (iopt .lt. 0 .or. iopt .gt. 1) go to 607 - moss = mf/100 - mf1 = mf - 100*moss - meth = mf1/10 - miter = mf1 - 10*meth - if (moss .lt. 0 .or. moss .gt. 2) go to 608 - if (meth .lt. 1 .or. meth .gt. 2) go to 608 - if (miter .lt. 0 .or. miter .gt. 3) go to 608 - if (miter .eq. 0 .or. miter .eq. 3) moss = 0 -c next process and check the optional inputs. -------------------------- - if (iopt .eq. 1) go to 40 - maxord = mord(meth) - mxstep = mxstp0 - mxhnil = mxhnl0 - if (istate .eq. 1) h0 = 0.0d0 - hmxi = 0.0d0 - hmin = 0.0d0 - seth = 0.0d0 - go to 60 - 40 maxord = iwork(5) - if (maxord .lt. 0) go to 611 - if (maxord .eq. 0) maxord = 100 - maxord = min0(maxord,mord(meth)) - mxstep = iwork(6) - if (mxstep .lt. 0) go to 612 - if (mxstep .eq. 0) mxstep = mxstp0 - mxhnil = iwork(7) - if (mxhnil .lt. 0) go to 613 - if (mxhnil .eq. 0) mxhnil = mxhnl0 - if (istate .ne. 1) go to 50 - h0 = rwork(5) - if ((tout - t)*h0 .lt. 0.0d0) go to 614 - 50 hmax = rwork(6) - if (hmax .lt. 0.0d0) go to 615 - hmxi = 0.0d0 - if (hmax .gt. 0.0d0) hmxi = 1.0d0/hmax - hmin = rwork(7) - if (hmin .lt. 0.0d0) go to 616 - seth = rwork(8) - if (seth .lt. 0.0d0) go to 609 -c check rtol and atol for legality. ------------------------------------ - 60 rtoli = rtol(1) - atoli = atol(1) - do 65 i = 1,n - if (itol .ge. 3) rtoli = rtol(i) - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - if (rtoli .lt. 0.0d0) go to 619 - if (atoli .lt. 0.0d0) go to 620 - 65 continue -c----------------------------------------------------------------------- -c compute required work array lengths, as far as possible, and test -c these against lrw and liw. then set tentative pointers for work -c arrays. pointers to rwork/iwork segments are named by prefixing l to -c the name of the segment. e.g., the segment yh starts at rwork(lyh). -c segments of rwork (in order) are denoted wm, yh, savf, ewt, acor. -c if miter = 1 or 2, the required length of the matrix work space wm -c is not yet known, and so a crude minimum value is used for the -c initial tests of lrw and liw, and yh is temporarily stored as far -c to the right in rwork as possible, to leave the maximum amount -c of space for wm for matrix preprocessing. thus if miter = 1 or 2 -c and moss .ne. 2, some of the segments of rwork are temporarily -c omitted, as they are not needed in the preprocessing. these -c omitted segments are.. acor if istate = 1, ewt and acor if istate = 3 -c and moss = 1, and savf, ewt, and acor if istate = 3 and moss = 0. -c----------------------------------------------------------------------- - lrat = lenrat - if (istate .eq. 1) nyh = n - lwmin = 0 - if (miter .eq. 1) lwmin = 4*n + 10*n/lrat - if (miter .eq. 2) lwmin = 4*n + 11*n/lrat - if (miter .eq. 3) lwmin = n + 2 - lenyh = (maxord+1)*nyh - lrest = lenyh + 3*n - lenrw = 20 + lwmin + lrest - iwork(17) = lenrw - leniw = 30 - if (moss .eq. 0 .and. miter .ne. 0 .and. miter .ne. 3) - 1 leniw = leniw + n + 1 - iwork(18) = leniw - if (lenrw .gt. lrw) go to 617 - if (leniw .gt. liw) go to 618 - lia = 31 - if (moss .eq. 0 .and. miter .ne. 0 .and. miter .ne. 3) - 1 leniw = leniw + iwork(lia+n) - 1 - iwork(18) = leniw - if (leniw .gt. liw) go to 618 - lja = lia + n + 1 - lia = min0(lia,liw) - lja = min0(lja,liw) - lwm = 21 - if (istate .eq. 1) nq = 1 - ncolm = min0(nq+1,maxord+2) - lenyhm = ncolm*nyh - lenyht = lenyh - if (miter .eq. 1 .or. miter .eq. 2) lenyht = lenyhm - imul = 2 - if (istate .eq. 3) imul = moss - if (moss .eq. 2) imul = 3 - lrtem = lenyht + imul*n - lwtem = lwmin - if (miter .eq. 1 .or. miter .eq. 2) lwtem = lrw - 20 - lrtem - lenwk = lwtem - lyhn = lwm + lwtem - lsavf = lyhn + lenyht - lewt = lsavf + n - lacor = lewt + n - istatc = istate - if (istate .eq. 1) go to 100 -c----------------------------------------------------------------------- -c istate = 3. move yh to its new location. -c note that only the part of yh needed for the next step, namely -c min(nq+1,maxord+2) columns, is actually moved. -c a temporary error weight array ewt is loaded if moss = 2. -c sparse matrix processing is done in iprep/prep if miter = 1 or 2. -c if maxord was reduced below nq, then the pointers are finally set -c so that savf is identical to yh(*,maxord+2). -c----------------------------------------------------------------------- - lyhd = lyh - lyhn - imax = lyhn - 1 + lenyhm -c move yh. branch for move right, no move, or move left. -------------- - if (lyhd.lt.0) go to 70 - if (lyhd.eq.0) go to 80 - go to 74 - 70 do 72 i = lyhn,imax - j = imax + lyhn - i - 72 rwork(j) = rwork(j+lyhd) - go to 80 - 74 do 76 i = lyhn,imax - 76 rwork(i) = rwork(i+lyhd) - 80 lyh = lyhn - iwork(22) = lyh - if (miter .eq. 0 .or. miter .eq. 3) go to 92 - if (moss .ne. 2) go to 85 -c temporarily load ewt if miter = 1 or 2 and moss = 2. ----------------- - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 82 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 621 - 82 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) - 85 continue -c iprep and prep do sparse matrix preprocessing if miter = 1 or 2. ----- - lsavf = min0(lsavf,lrw) - lewt = min0(lewt,lrw) - lacor = min0(lacor,lrw) - call iprep (neq, y, rwork, iwork(lia), iwork(lja), ipflag, f, jac) - lenrw = lwm - 1 + lenwk + lrest - iwork(17) = lenrw - if (ipflag .ne. -1) iwork(23) = ipian - if (ipflag .ne. -1) iwork(24) = ipjan - ipgo = -ipflag + 1 - go to (90, 628, 629, 630, 631, 632, 633), ipgo - 90 iwork(22) = lyh - if (lenrw .gt. lrw) go to 617 -c set flag to signal parameter changes to stode. ----------------------- - 92 jstart = -1 - if (n .eq. nyh) go to 200 -c neq was reduced. zero part of yh to avoid undefined references. ----- - i1 = lyh + l*nyh - i2 = lyh + (maxord + 1)*nyh - 1 - if (i1 .gt. i2) go to 200 - do 95 i = i1,i2 - 95 rwork(i) = 0.0d0 - go to 200 -c----------------------------------------------------------------------- -c block c. -c the next block is for the initial call only (istate = 1). -c it contains all remaining initializations, the initial call to f, -c the sparse matrix preprocessing (miter = 1 or 2), and the -c calculation of the initial step size. -c the error weights in ewt are inverted after being loaded. -c----------------------------------------------------------------------- - 100 continue - lyh = lyhn - iwork(22) = lyh - tn = t - nst = 0 - h = 1.0d0 - nnz = 0 - ngp = 0 - nzl = 0 - nzu = 0 -c load the initial value vector in yh. --------------------------------- - do 105 i = 1,n - 105 rwork(i+lyh-1) = y(i) -c initial call to f. (lf0 points to yh(*,2).) ------------------------- - lf0 = lyh + nyh - call f (neq, t, y, rwork(lf0)) - nfe = 1 -c load and invert the ewt array. (h is temporarily set to 1.0.) ------- - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 110 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 621 - 110 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) - if (miter .eq. 0 .or. miter .eq. 3) go to 120 -c iprep and prep do sparse matrix preprocessing if miter = 1 or 2. ----- - lacor = min0(lacor,lrw) - call iprep (neq, y, rwork, iwork(lia), iwork(lja), ipflag, f, jac) - lenrw = lwm - 1 + lenwk + lrest - iwork(17) = lenrw - if (ipflag .ne. -1) iwork(23) = ipian - if (ipflag .ne. -1) iwork(24) = ipjan - ipgo = -ipflag + 1 - go to (115, 628, 629, 630, 631, 632, 633), ipgo - 115 iwork(22) = lyh - if (lenrw .gt. lrw) go to 617 -c check tcrit for legality (itask = 4 or 5). --------------------------- - 120 continue - if (itask .ne. 4 .and. itask .ne. 5) go to 125 - tcrit = rwork(1) - if ((tcrit - tout)*(tout - t) .lt. 0.0d0) go to 625 - if (h0 .ne. 0.0d0 .and. (t + h0 - tcrit)*h0 .gt. 0.0d0) - 1 h0 = tcrit - t -c initialize all remaining parameters. --------------------------------- - 125 uround = d1mach(4) - jstart = 0 - if (miter .ne. 0) rwork(lwm) = dsqrt(uround) - msbj = 50 - nslj = 0 - ccmxj = 0.2d0 - psmall = 1000.0d0*uround - rbig = 0.01d0/psmall - nhnil = 0 - nje = 0 - nlu = 0 - nslast = 0 - hu = 0.0d0 - nqu = 0 - ccmax = 0.3d0 - maxcor = 3 - msbp = 20 - mxncf = 10 -c----------------------------------------------------------------------- -c the coding below computes the step size, h0, to be attempted on the -c first step, unless the user has supplied a value for this. -c first check that tout - t differs significantly from zero. -c a scalar tolerance quantity tol is computed, as max(rtol(i)) -c if this is positive, or max(atol(i)/abs(y(i))) otherwise, adjusted -c so as to be between 100*uround and 1.0e-3. -c then the computed value h0 is given by.. -c neq -c h0**2 = tol / ( w0**-2 + (1/neq) * sum ( f(i)/ywt(i) )**2 ) -c 1 -c where w0 = max ( abs(t), abs(tout) ), -c f(i) = i-th component of initial value of f, -c ywt(i) = ewt(i)/tol (a weight for y(i)). -c the sign of h0 is inferred from the initial values of tout and t. -c----------------------------------------------------------------------- - lf0 = lyh + nyh - if (h0 .ne. 0.0d0) go to 180 - tdist = dabs(tout - t) - w0 = dmax1(dabs(t),dabs(tout)) - if (tdist .lt. 2.0d0*uround*w0) go to 622 - tol = rtol(1) - if (itol .le. 2) go to 140 - do 130 i = 1,n - 130 tol = dmax1(tol,rtol(i)) - 140 if (tol .gt. 0.0d0) go to 160 - atoli = atol(1) - do 150 i = 1,n - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - ayi = dabs(y(i)) - if (ayi .ne. 0.0d0) tol = dmax1(tol,atoli/ayi) - 150 continue - 160 tol = dmax1(tol,100.0d0*uround) - tol = dmin1(tol,0.001d0) - sum = vnorm (n, rwork(lf0), rwork(lewt)) - sum = 1.0d0/(tol*w0*w0) + tol*sum**2 - h0 = 1.0d0/dsqrt(sum) - h0 = dmin1(h0,tdist) - h0 = dsign(h0,tout-t) -c adjust h0 if necessary to meet hmax bound. --------------------------- - 180 rh = dabs(h0)*hmxi - if (rh .gt. 1.0d0) h0 = h0/rh -c load h with h0 and scale yh(*,2) by h0. ------------------------------ - h = h0 - do 190 i = 1,n - 190 rwork(i+lf0-1) = h0*rwork(i+lf0-1) - go to 270 -c----------------------------------------------------------------------- -c block d. -c the next code block is for continuation calls only (istate = 2 or 3) -c and is to check stop conditions before taking a step. -c----------------------------------------------------------------------- - 200 nslast = nst - go to (210, 250, 220, 230, 240), itask - 210 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 220 tp = tn - hu*(1.0d0 + 100.0d0*uround) - if ((tp - tout)*h .gt. 0.0d0) go to 623 - if ((tn - tout)*h .lt. 0.0d0) go to 250 - go to 400 - 230 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - if ((tcrit - tout)*h .lt. 0.0d0) go to 625 - if ((tn - tout)*h .lt. 0.0d0) go to 245 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 240 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - 245 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - if (istate .eq. 2) jstart = -2 -c----------------------------------------------------------------------- -c block e. -c the next block is normally executed for all calls and contains -c the call to the one-step core integrator stode. -c -c this is a looping point for the integration steps. -c -c first check for too many steps being taken, update ewt (if not at -c start of problem), check for too much accuracy being requested, and -c check for h below the roundoff level in t. -c----------------------------------------------------------------------- - 250 continue - if ((nst-nslast) .ge. mxstep) go to 500 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 260 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 510 - 260 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) - 270 tolsf = uround*vnorm (n, rwork(lyh), rwork(lewt)) - if (tolsf .le. 1.0d0) go to 280 - tolsf = tolsf*2.0d0 - if (nst .eq. 0) go to 626 - go to 520 - 280 if ((tn + h) .ne. tn) go to 290 - nhnil = nhnil + 1 - if (nhnil .gt. mxhnil) go to 290 - call xerrwv('lsodes-- warning..internal t (=r1) and h (=r2) are', - 1 50, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' such that in the machine, t + h = t on the next step ', - 1 60, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' (h = step size). solver will continue anyway', - 1 50, 101, 0, 0, 0, 0, 2, tn, h) - if (nhnil .lt. mxhnil) go to 290 - call xerrwv('lsodes-- above warning has been issued i1 times. ', - 1 50, 102, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' it will not be issued again for this problem', - 1 50, 102, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - 290 continue -c----------------------------------------------------------------------- -c call stode(neq,y,yh,nyh,yh,ewt,savf,acor,wm,wm,f,jac,prjs,slss) -c----------------------------------------------------------------------- - call stode (neq, y, rwork(lyh), nyh, rwork(lyh), rwork(lewt), - 1 rwork(lsavf), rwork(lacor), rwork(lwm), rwork(lwm), - 2 f, jac, prjs, slss) - kgo = 1 - kflag - go to (300, 530, 540, 550), kgo -c----------------------------------------------------------------------- -c block f. -c the following block handles the case of a successful return from the -c core integrator (kflag = 0). test for stop conditions. -c----------------------------------------------------------------------- - 300 init = 1 - go to (310, 400, 330, 340, 350), itask -c itask = 1. if tout has been reached, interpolate. ------------------- - 310 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 -c itask = 3. jump to exit if tout was reached. ------------------------ - 330 if ((tn - tout)*h .ge. 0.0d0) go to 400 - go to 250 -c itask = 4. see if tout or tcrit was reached. adjust h if necessary. - 340 if ((tn - tout)*h .lt. 0.0d0) go to 345 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 - 345 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - jstart = -2 - go to 250 -c itask = 5. see if tcrit was reached and jump to exit. --------------- - 350 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx -c----------------------------------------------------------------------- -c block g. -c the following block handles all successful returns from lsodes. -c if itask .ne. 1, y is loaded from yh and t is set accordingly. -c istate is set to 2, the illegal input counter is zeroed, and the -c optional outputs are loaded into the work arrays before returning. -c if istate = 1 and tout = t, there is a return with no action taken, -c except that if this has happened repeatedly, the run is terminated. -c----------------------------------------------------------------------- - 400 do 410 i = 1,n - 410 y(i) = rwork(i+lyh-1) - t = tn - if (itask .ne. 4 .and. itask .ne. 5) go to 420 - if (ihit) t = tcrit - 420 istate = 2 - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - iwork(11) = nst - iwork(12) = nfe - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - iwork(19) = nnz - iwork(20) = ngp - iwork(21) = nlu - iwork(25) = nzl - iwork(26) = nzu - return -c - 430 ntrep = ntrep + 1 - if (ntrep .lt. 5) return - call xerrwv( - 1 'lsodes-- repeated calls with istate = 1 and tout = t (=r1) ', - 1 60, 301, 0, 0, 0, 0, 1, t, 0.0d0) - go to 800 -c----------------------------------------------------------------------- -c block h. -c the following block handles all unsuccessful returns other than -c those for illegal input. first the error message routine is called. -c if there was an error test or convergence test failure, imxer is set. -c then y is loaded from yh, t is set to tn, and the illegal input -c counter illin is set to 0. the optional outputs are loaded into -c the work arrays before returning. -c----------------------------------------------------------------------- -c the maximum number of steps was taken before reaching tout. ---------- - 500 call xerrwv('lsodes-- at current t (=r1), mxstep (=i1) steps ', - 1 50, 201, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' taken on this call before reaching tout ', - 1 50, 201, 0, 1, mxstep, 0, 1, tn, 0.0d0) - istate = -1 - go to 580 -c ewt(i) .le. 0.0 for some i (not at start of problem). ---------------- - 510 ewti = rwork(lewt+i-1) - call xerrwv('lsodes-- at t (=r1), ewt(i1) has become r2 .le. 0.', - 1 50, 202, 0, 1, i, 0, 2, tn, ewti) - istate = -6 - go to 580 -c too much accuracy requested for machine precision. ------------------- - 520 call xerrwv('lsodes-- at t (=r1), too much accuracy requested ', - 1 50, 203, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' for precision of machine.. see tolsf (=r2) ', - 1 50, 203, 0, 0, 0, 0, 2, tn, tolsf) - rwork(14) = tolsf - istate = -2 - go to 580 -c kflag = -1. error test failed repeatedly or with abs(h) = hmin. ----- - 530 call xerrwv('lsodes-- at t(=r1) and step size h(=r2), the error', - 1 50, 204, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' test failed repeatedly or with abs(h) = hmin', - 1 50, 204, 0, 0, 0, 0, 2, tn, h) - istate = -4 - go to 560 -c kflag = -2. convergence failed repeatedly or with abs(h) = hmin. ---- - 540 call xerrwv('lsodes-- at t (=r1) and step size h (=r2), the ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' corrector convergence failed repeatedly ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' or with abs(h) = hmin ', - 1 30, 205, 0, 0, 0, 0, 2, tn, h) - istate = -5 - go to 560 -c kflag = -3. fatal error flag returned by prjs or slss (cdrv). ------- - 550 call xerrwv('lsodes-- at t (=r1) and step size h (=r2), a fatal', - 1 50, 207, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' error flag was returned by cdrv (by way of ', - 1 50, 207, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' subroutine prjs or slss)', - 1 30, 207, 0, 0, 0, 0, 2, tn, h) - istate = -7 - go to 580 -c compute imxer if relevant. ------------------------------------------- - 560 big = 0.0d0 - imxer = 1 - do 570 i = 1,n - size = dabs(rwork(i+lacor-1)*rwork(i+lewt-1)) - if (big .ge. size) go to 570 - big = size - imxer = i - 570 continue - iwork(16) = imxer -c set y vector, t, illin, and optional outputs. ------------------------ - 580 do 590 i = 1,n - 590 y(i) = rwork(i+lyh-1) - t = tn - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - iwork(11) = nst - iwork(12) = nfe - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - iwork(19) = nnz - iwork(20) = ngp - iwork(21) = nlu - iwork(25) = nzl - iwork(26) = nzu - return -c----------------------------------------------------------------------- -c block i. -c the following block handles all error returns due to illegal input -c (istate = -3), as detected before calling the core integrator. -c first the error message routine is called. then if there have been -c 5 consecutive such returns just before this call to the solver, -c the run is halted. -c----------------------------------------------------------------------- - 601 call xerrwv('lsodes-- istate (=i1) illegal ', - 1 30, 1, 0, 1, istate, 0, 0, 0.0d0, 0.0d0) - go to 700 - 602 call xerrwv('lsodes-- itask (=i1) illegal ', - 1 30, 2, 0, 1, itask, 0, 0, 0.0d0, 0.0d0) - go to 700 - 603 call xerrwv('lsodes-- istate .gt. 1 but lsodes not initialized ', - 1 50, 3, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - go to 700 - 604 call xerrwv('lsodes-- neq (=i1) .lt. 1 ', - 1 30, 4, 0, 1, neq(1), 0, 0, 0.0d0, 0.0d0) - go to 700 - 605 call xerrwv('lsodes-- istate = 3 and neq increased (i1 to i2) ', - 1 50, 5, 0, 2, n, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 606 call xerrwv('lsodes-- itol (=i1) illegal ', - 1 30, 6, 0, 1, itol, 0, 0, 0.0d0, 0.0d0) - go to 700 - 607 call xerrwv('lsodes-- iopt (=i1) illegal ', - 1 30, 7, 0, 1, iopt, 0, 0, 0.0d0, 0.0d0) - go to 700 - 608 call xerrwv('lsodes-- mf (=i1) illegal ', - 1 30, 8, 0, 1, mf, 0, 0, 0.0d0, 0.0d0) - go to 700 - 609 call xerrwv('lsodes-- seth (=r1) .lt. 0.0 ', - 1 30, 9, 0, 0, 0, 0, 1, seth, 0.0d0) - go to 700 - 611 call xerrwv('lsodes-- maxord (=i1) .lt. 0 ', - 1 30, 11, 0, 1, maxord, 0, 0, 0.0d0, 0.0d0) - go to 700 - 612 call xerrwv('lsodes-- mxstep (=i1) .lt. 0 ', - 1 30, 12, 0, 1, mxstep, 0, 0, 0.0d0, 0.0d0) - go to 700 - 613 call xerrwv('lsodes-- mxhnil (=i1) .lt. 0 ', - 1 30, 13, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - go to 700 - 614 call xerrwv('lsodes-- tout (=r1) behind t (=r2) ', - 1 40, 14, 0, 0, 0, 0, 2, tout, t) - call xerrwv(' integration direction is given by h0 (=r1) ', - 1 50, 14, 0, 0, 0, 0, 1, h0, 0.0d0) - go to 700 - 615 call xerrwv('lsodes-- hmax (=r1) .lt. 0.0 ', - 1 30, 15, 0, 0, 0, 0, 1, hmax, 0.0d0) - go to 700 - 616 call xerrwv('lsodes-- hmin (=r1) .lt. 0.0 ', - 1 30, 16, 0, 0, 0, 0, 1, hmin, 0.0d0) - go to 700 - 617 call xerrwv('lsodes-- rwork length is insufficient to proceed. ', - 1 50, 17, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is .ge. lenrw (=i1), exceeds lrw (=i2)', - 1 60, 17, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 618 call xerrwv('lsodes-- iwork length is insufficient to proceed. ', - 1 50, 18, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is .ge. leniw (=i1), exceeds liw (=i2)', - 1 60, 18, 0, 2, leniw, liw, 0, 0.0d0, 0.0d0) - go to 700 - 619 call xerrwv('lsodes-- rtol(i1) is r1 .lt. 0.0 ', - 1 40, 19, 0, 1, i, 0, 1, rtoli, 0.0d0) - go to 700 - 620 call xerrwv('lsodes-- atol(i1) is r1 .lt. 0.0 ', - 1 40, 20, 0, 1, i, 0, 1, atoli, 0.0d0) - go to 700 - 621 ewti = rwork(lewt+i-1) - call xerrwv('lsodes-- ewt(i1) is r1 .le. 0.0 ', - 1 40, 21, 0, 1, i, 0, 1, ewti, 0.0d0) - go to 700 - 622 call xerrwv( - 1 'lsodes-- tout (=r1) too close to t(=r2) to start integration', - 1 60, 22, 0, 0, 0, 0, 2, tout, t) - go to 700 - 623 call xerrwv( - 1 'lsodes-- itask = i1 and tout (=r1) behind tcur - hu (= r2) ', - 1 60, 23, 0, 1, itask, 0, 2, tout, tp) - go to 700 - 624 call xerrwv( - 1 'lsodes-- itask = 4 or 5 and tcrit (=r1) behind tcur (=r2) ', - 1 60, 24, 0, 0, 0, 0, 2, tcrit, tn) - go to 700 - 625 call xerrwv( - 1 'lsodes-- itask = 4 or 5 and tcrit (=r1) behind tout (=r2) ', - 1 60, 25, 0, 0, 0, 0, 2, tcrit, tout) - go to 700 - 626 call xerrwv('lsodes-- at start of problem, too much accuracy ', - 1 50, 26, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' requested for precision of machine.. see tolsf (=r1) ', - 1 60, 26, 0, 0, 0, 0, 1, tolsf, 0.0d0) - rwork(14) = tolsf - go to 700 - 627 call xerrwv('lsodes-- trouble from intdy. itask = i1, tout = r1', - 1 50, 27, 0, 1, itask, 0, 1, tout, 0.0d0) - go to 700 - 628 call xerrwv( - 1 'lsodes-- rwork length insufficient (for subroutine prep). ', - 1 60, 28, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is .ge. lenrw (=i1), exceeds lrw (=i2)', - 1 60, 28, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 629 call xerrwv( - 1 'lsodes-- rwork length insufficient (for subroutine jgroup). ', - 1 60, 29, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is .ge. lenrw (=i1), exceeds lrw (=i2)', - 1 60, 29, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 630 call xerrwv( - 1 'lsodes-- rwork length insufficient (for subroutine odrv). ', - 1 60, 30, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is .ge. lenrw (=i1), exceeds lrw (=i2)', - 1 60, 30, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 631 call xerrwv( - 1 'lsodes-- error from odrv in yale sparse matrix package ', - 1 60, 31, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - imul = (iys - 1)/n - irem = iys - imul*n - call xerrwv( - 1 ' at t (=r1), odrv returned error flag = i1*neq + i2. ', - 1 60, 31, 0, 2, imul, irem, 1, tn, 0.0d0) - go to 700 - 632 call xerrwv( - 1 'lsodes-- rwork length insufficient (for subroutine cdrv). ', - 1 60, 32, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' length needed is .ge. lenrw (=i1), exceeds lrw (=i2)', - 1 60, 32, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 633 call xerrwv( - 1 'lsodes-- error from cdrv in yale sparse matrix package ', - 1 60, 33, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - imul = (iys - 1)/n - irem = iys - imul*n - call xerrwv( - 1 ' at t (=r1), cdrv returned error flag = i1*neq + i2. ', - 1 60, 33, 0, 2, imul, irem, 1, tn, 0.0d0) - if (imul .eq. 2) call xerrwv( - 1 ' duplicate entry in sparsity structure descriptors ', - 1 60, 33, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - if (imul .eq. 3 .or. imul .eq. 6) call xerrwv( - 1 ' insufficient storage for nsfc (called by cdrv) ', - 1 60, 33, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) -c - 700 if (illin .eq. 5) go to 710 - illin = illin + 1 - istate = -3 - return - 710 call xerrwv('lsodes-- repeated occurrences of illegal input ', - 1 50, 302, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) -c - 800 call xerrwv('lsodes-- run aborted.. apparent infinite loop ', - 1 50, 303, 2, 0, 0, 0, 0, 0.0d0, 0.0d0) - return -c----------------------- end of subroutine lsodes ---------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/lsodi.f b/scipy-0.10.1/scipy/integrate/odepack/lsodi.f deleted file mode 100644 index e7159d384d..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/lsodi.f +++ /dev/null @@ -1,1763 +0,0 @@ - subroutine lsodi (res, adda, jac, neq, y, ydoti, t, tout, itol, - 1 rtol, atol, itask, istate, iopt, rwork, lrw, iwork, liw, mf ) - external res, adda, jac - integer neq, itol, itask, istate, iopt, lrw, iwork, liw, mf - double precision y, ydoti, t, tout, rtol, atol, rwork - dimension neq(1), y(1), ydoti(1), rtol(1), atol(1), rwork(lrw), - 1 iwork(liw) -c----------------------------------------------------------------------- -c this is the march 30, 1987 version of lsodi.. -c livermore solver for ordinary differential equations (implicit form). -c this version is in double precision. -c -c lsodi solves the initial value problem for linearly implicit -c systems of first order ode-s, -c a(t,y) * dy/dt = g(t,y) , where a(t,y) is a square matrix, -c or, in component form, -c ( a * ( dy / dt )) + ... + ( a * ( dy / dt )) = -c i,1 1 i,neq neq -c -c = g ( t, y , y ,..., y ) ( i = 1,...,neq ) -c i 1 2 neq -c -c if a is singular, this is a differential-algebraic system. -c -c lsodi is a variant version of the lsode package. -c----------------------------------------------------------------------- -c reference.. -c alan c. hindmarsh, odepack, a systematized collection of ode -c solvers, in scientific computing, r. s. stepleman et al. (eds.), -c north-holland, amsterdam, 1983, pp. 55-64. -c----------------------------------------------------------------------- -c authors... jeffrey f. painter and -c alan c. hindmarsh -c computing and mathematics research division, l-316 -c lawrence livermore national laboratory -c livermore, ca 94550. -c -c----------------------------------------------------------------------- -c summary of usage. -c -c communication between the user and the lsodi package, for normal -c situations, is summarized here. this summary describes only a subset -c of the full set of options available. see the full description for -c details, including optional communication, nonstandard options, -c and instructions for special situations. see also the example -c problem (with program and output) following this summary. -c -c a. first, provide a subroutine of the form.. -c subroutine res (neq, t, y, s, r, ires) -c dimension y(neq), s(neq), r(neq) -c which computes the residual function -c r = g(t,y) - a(t,y) * s , -c as a function of t and the vectors y and s. (s is an internally -c generated approximation to dy/dt.) the arrays y and s are inputs -c to the res routine and should not be altered. the residual -c vector is to be stored in the array r. the argument ires should be -c ignored for casual use of lsodi. (for uses of ires, see the -c paragraph on res in the full description below.) -c -c b. next, decide whether full or banded form is more economical -c for the storage of matrices. lsodi must deal internally with the -c matrices a and dr/dy, where r is the residual function defined above. -c lsodi generates a linear combination of these two matrices, and -c this is treated in either full or banded form. -c the matrix structure is communicated by a method flag mf, -c which is 21 or 22 for the full case, and 24 or 25 in the band case. -c in the banded case, lsodi requires two half-bandwidth -c parameters ml and mu. these are, respectively, the widths of the -c lower and upper parts of the band, excluding the main diagonal. -c thus the band consists of the locations (i,j) with -c i-ml .le. j .le. i+mu, and the full bandwidth is ml+mu+1. -c note that the band must accommodate the nonzero elements of -c a(t,y), dg/dy, and d(a*s)/dy (s fixed). alternatively, one -c can define a band that encloses only the elements that are relatively -c large in magnitude, and gain some economy in storage and possibly -c also efficiency, although the appropriate threshhold for -c retaining matrix elements is highly problem-dependent. -c -c c. you must also provide a subroutine of the form.. -c subroutine adda (neq, t, y, ml, mu, p, nrowp) -c dimension y(neq), p(nrowp,neq) -c which adds the matrix a = a(t,y) to the contents of the array p. -c t and the y array are input and should not be altered. -c in the full matrix case, this routine should add elements of -c to p in the usual order. i.e., add a(i,j) to p(i,j). (ignore the -c ml and mu arguments in this case.) -c in the band matrix case, this routine should add element a(i,j) -c to p(i-j+mu+1,j). i.e., add the diagonal lines of a to the rows of -c p from the top down (the top line of a added to the first row of p). -c -c d. for the sake of efficiency, you are encouraged to supply the -c jacobian matrix dr/dy in closed form, where r = g(t,y) - a(t,y)*s -c (s = a fixed vector) as above. if dr/dy is being supplied, -c use mf = 21 or 24, and provide a subroutine of the form.. -c subroutine jac (neq, t, y, s, ml, mu, p, nrowp) -c dimension y(neq), s(neq), p(nrowp,neq) -c which computes dr/dy as a function of t, y, and s. here t, y, and -c s are inputs, and the routine is to load dr/dy into p as follows.. -c in the full matrix case (mf = 21), load p(i,j) with dr(i)/dy(j), -c the partial derivative of r(i) with respect to y(j). (ignore the -c ml and mu arguments in this case.) -c in the band matrix case (mf = 24), load p(i-j+mu+1,j) with -c dr(i)/dy(j), i.e. load the diagonal lines of dr/dy into the rows of -c p from the top down. -c in either case, only nonzero elements need be loaded, and the -c indexing of p is the same as in the adda routine. -c note that if a is independent of y (or this dependence -c is weak enough to be ignored) then jac is to compute dg/dy. -c if it is not feasible to provide a jac routine, use -c mf = 22 or 25, and lsodi will compute an approximate jacobian -c internally by difference quotients. -c -c e. next decide whether or not to provide the initial value of the -c derivative vector dy/dt. if the initial value of a(t,y) is -c nonsingular (and not too ill-conditioned), you may let lsodi compute -c this vector (istate = 0). (lsodi will solve the system a*s = g for -c s, with initial values of a and g.) if a(t,y) is initially -c singular, then the system is a differential-algebraic system, and -c you must make use of the particular form of the system to compute the -c initial values of y and dy/dt. in that case, use istate = 1 and -c load the initial value of dy/dt into the array ydoti. -c the input array ydoti and the initial y array must be consistent with -c the equations a*dy/dt = g. this implies that the initial residual -c r = g(t,y) - a(t,y)*ydoti must be approximately zero. -c -c f. write a main program which calls subroutine lsodi once for -c each point at which answers are desired. this should also provide -c for possible use of logical unit 6 for output of error messages -c by lsodi. on the first call to lsodi, supply arguments as follows.. -c res = name of user subroutine for residual function r. -c adda = name of user subroutine for computing and adding a(t,y). -c jac = name of user subroutine for jacobian matrix dr/dy -c (mf = 21 or 24). if not used, pass a dummy name. -c note.. the names for the res and adda routines and (if used) the -c jac routine must be declared external in the calling program. -c neq = number of scalar equations in the system. -c y = array of initial values, of length neq. -c ydoti = array of length neq (containing initial dy/dt if istate = 1). -c t = the initial value of the independent variable. -c tout = first point where output is desired (.ne. t). -c itol = 1 or 2 according as atol (below) is a scalar or array. -c rtol = relative tolerance parameter (scalar). -c atol = absolute tolerance parameter (scalar or array). -c the estimated local error in y(i) will be controlled so as -c to be roughly less (in magnitude) than -c ewt(i) = rtol*abs(y(i)) + atol if itol = 1, or -c ewt(i) = rtol*abs(y(i)) + atol(i) if itol = 2. -c thus the local error test passes if, in each component, -c either the absolute error is less than atol (or atol(i)), -c or the relative error is less than rtol. -c use rtol = 0.0 for pure absolute error control, and -c use atol = 0.0 (or atol(i) = 0.0) for pure relative error -c control. caution.. actual (global) errors may exceed these -c local tolerances, so choose them conservatively. -c itask = 1 for normal computation of output values of y at t = tout. -c istate = integer flag (input and output). set istate = 1 if the -c initial dy/dt is supplied, and 0 otherwise. -c iopt = 0 to indicate no optional inputs used. -c rwork = real work array of length at least.. -c 22 + 9*neq + neq**2 for mf = 21 or 22, -c 22 + 10*neq + (2*ml + mu)*neq for mf = 24 or 25. -c lrw = declared length of rwork (in user-s dimension). -c iwork = integer work array of length at least 20 + neq. -c if mf = 24 or 25, input in iwork(1),iwork(2) the lower -c and upper half-bandwidths ml,mu. -c liw = declared length of iwork (in user-s dimension). -c mf = method flag. standard values are.. -c 21 for a user-supplied full jacobian. -c 22 for an internally generated full jacobian. -c 24 for a user-supplied banded jacobian. -c 25 for an internally generated banded jacobian. -c for other choices of mf, see the paragraph on mf in -c the full description below. -c note that the main program must declare arrays y, ydoti, rwork, iwork, -c and possibly atol. -c -c g. the output from the first call (or any call) is.. -c y = array of computed values of y(t) vector. -c t = corresponding value of independent variable (normally tout). -c istate = 2 if lsodi was successful, negative otherwise. -c -1 means excess work done on this call (check all inputs). -c -2 means excess accuracy requested (tolerances too small). -c -3 means illegal input detected (see printed message). -c -4 means repeated error test failures (check all inputs). -c -5 means repeated convergence failures (perhaps bad jacobian -c supplied or wrong choice of tolerances). -c -6 means error weight became zero during problem. (solution -c component i vanished, and atol or atol(i) = 0.) -c -7 cannot occur in casual use. -c -8 means lsodi was unable to compute the initial dy/dt. -c in casual use, this means a(t,y) is initially singular. -c supply ydoti and use istate = 1 on the first call. -c -c if lsodi returns istate = -1, -4, or -5, then the output of -c lsodi also includes ydoti = array containing residual vector -c r = g - a * dy/dt evaluated at the current t, y, and dy/dt. -c -c h. to continue the integration after a successful return, simply -c reset tout and call lsodi again. no other parameters need be reset. -c -c -c----------------------------------------------------------------------- -c example problem. -c -c the following is a simple example problem, with the coding -c needed for its solution by lsodi. the problem is from chemical -c kinetics, and consists of the following three equations.. -c dy1/dt = -.04*y1 + 1.e4*y2*y3 -c dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*y2**2 -c 0. = y1 + y2 + y3 - 1. -c on the interval from t = 0.0 to t = 4.e10, with initial conditions -c y1 = 1.0, y2 = y3 = 0. -c -c the following coding solves this problem with lsodi, using mf = 21 -c and printing results at t = .4, 4., ..., 4.e10. it uses -c itol = 2 and atol much smaller for y2 than y1 or y3 because -c y2 has much smaller values. dy/dt is supplied in ydoti. we had -c obtained the initial value of dy3/dt by differentiating the -c third equation and evaluating the first two at t=0. -c at the end of the run, statistical quantities of interest are -c printed (see optional outputs in the full description below). -c -c external resid, aplusp, dgbydy -c double precision atol, rtol, rwork, t, tout, y, ydoti -c dimension y(3), ydoti(3), atol(3), rwork(58), iwork(23) -c neq = 3 -c y(1) = 1.d0 -c y(2) = 0.d0 -c y(3) = 0.d0 -c ydoti(1) = -.04d0 -c ydoti(2) = .04d0 -c ydoti(3) = 0.d0 -c t = 0.d0 -c tout = .4d0 -c itol = 2 -c rtol = 1.d-4 -c atol(1) = 1.d-6 -c atol(2) = 1.d-10 -c atol(3) = 1.d-6 -c itask = 1 -c istate = 1 -c iopt = 0 -c lrw = 58 -c liw = 23 -c mf = 21 -c do 40 iout = 1,12 -c call lsodi(resid, aplusp, dgbydy, neq, y, ydoti, t, tout, itol, -c 1 rtol, atol, itask, istate, iopt, rwork, lrw, iwork, liw, mf) -c write (6,20) t, y(1), y(2), y(3) -c 20 format(' at t =',e12.4,' y =',3e14.6) -c if (istate .lt. 0 ) go to 80 -c 40 tout = tout*10.d0 -c write (6,60) iwork(11), iwork(12), iwork(13) -c 60 format(/' no. steps =',i4,' no. r-s =',i4, -c 1 ' no. j-s =',i4) -c stop -c 80 write (6,90) istate -c 90 format(///' error halt.. istate =',i3) -c stop -c end -c -c subroutine resid(neq, t, y, s, r, ires) -c double precision r, s, t, y -c dimension y(3), s(3), r(3) -c r(1) = -.04d0*y(1) + 1.d4*y(2)*y(3) - s(1) -c r(2) = .04d0*y(1) - 1.d4*y(2)*y(3) - 3.d7*y(2)*y(2) - s(2) -c r(3) = y(1) + y(2) + y(3) - 1.d0 -c return -c end -c -c subroutine aplusp(neq, t, y, ml, mu, p, nrowp) -c double precision p, t, y -c dimension y(3), p(nrowp,3) -c p(1,1) = p(1,1) + 1.d0 -c p(2,2) = p(2,2) + 1.d0 -c return -c end -c -c subroutine dgbydy(neq, t, y, s, ml, mu, p, nrowp) -c double precision s, t, p, y -c dimension y(3), s(3), p(nrowp,3) -c p(1,1) = -.04d0 -c p(1,2) = 1.d4*y(3) -c p(1,3) = 1.d4*y(2) -c p(2,1) = .04d0 -c p(2,2) = -1.d4*y(3) - 6.d7*y(2) -c p(2,3) = -1.d4*y(2) -c p(3,1) = 1.d0 -c p(3,2) = 1.d0 -c p(3,3) = 1.d0 -c return -c end -c -c the output of this program (on a cdc-7600 in single precision) -c is as follows.. -c -c at t = 4.0000e-01 y = 9.851726e-01 3.386406e-05 1.479357e-02 -c at t = 4.0000e+00 y = 9.055142e-01 2.240418e-05 9.446344e-02 -c at t = 4.0000e+01 y = 7.158050e-01 9.184616e-06 2.841858e-01 -c at t = 4.0000e+02 y = 4.504846e-01 3.222434e-06 5.495122e-01 -c at t = 4.0000e+03 y = 1.831701e-01 8.940379e-07 8.168290e-01 -c at t = 4.0000e+04 y = 3.897016e-02 1.621193e-07 9.610297e-01 -c at t = 4.0000e+05 y = 4.935213e-03 1.983756e-08 9.950648e-01 -c at t = 4.0000e+06 y = 5.159269e-04 2.064759e-09 9.994841e-01 -c at t = 4.0000e+07 y = 5.306413e-05 2.122677e-10 9.999469e-01 -c at t = 4.0000e+08 y = 5.494532e-06 2.197826e-11 9.999945e-01 -c at t = 4.0000e+09 y = 5.129457e-07 2.051784e-12 9.999995e-01 -c at t = 4.0000e+10 y = -7.170472e-08 -2.868188e-13 1.000000e+00 -c -c no. steps = 330 no. r-s = 404 no. j-s = 69 -c----------------------------------------------------------------------- -c full description of user interface to lsodi. -c -c the user interface to lsodi consists of the following parts. -c -c i. the call sequence to subroutine lsodi, which is a driver -c routine for the solver. this includes descriptions of both -c the call sequence arguments and of user-supplied routines. -c following these descriptions is a description of -c optional inputs available through the call sequence, and then -c a description of optional outputs (in the work arrays). -c -c ii. descriptions of other routines in the lsodi package that may be -c (optionally) called by the user. these provide the ability to -c alter error message handling, save and restore the internal -c common, and obtain specified derivatives of the solution y(t). -c -c iii. descriptions of common blocks to be declared in overlay -c or similar environments, or to be saved when doing an interrupt -c of the problem and continued solution later. -c -c iv. description of two routines in the lsodi package, either of -c which the user may replace with his own version, if desired. -c these relate to the measurement of errors. -c -c----------------------------------------------------------------------- -c part i. call sequence. -c -c the call sequence parameters used for input only are -c res, adda, jac, neq, tout, itol, rtol, atol, itask, -c iopt, lrw, liw, mf, -c and those used for both input and output are -c y, t, istate, ydoti. -c the work arrays rwork and iwork are also used for conditional and -c optional inputs and optional outputs. (the term output here refers -c to the return from subroutine lsodi to the user-s calling program.) -c -c the legality of input parameters will be thoroughly checked on the -c initial call for the problem, but not checked thereafter unless a -c change in input parameters is flagged by istate = 3 on input. -c -c the descriptions of the call arguments are as follows. -c -c res = the name of the user-supplied subroutine which supplies -c the residual vector for the ode system, defined by -c r = g(t,y) - a(t,y) * s -c as a function of the scalar t and the vectors -c s and y ( s approximates dy/dt ). this -c subroutine is to have the form -c subroutine res ( neq, t, y, s, r, ires ) -c dimension y(1), s(1), r(1) -c where neq, t, y, s, and ires are input, and r and -c ires are output. y, s, and r are arrays of length neq. -c in dimension statements such as that above, 1 is a -c dummy dimension. it can be replaced by any value. -c on input, ires indicates how lsodi will use the -c returned array r, as follows.. -c ires = 1 means that lsodi needs the full residual, -c r = g - a*s, exactly. -c ires = -1 means that lsodi is using r only to compute -c the jacobian dr/dy by difference quotients. -c the res routine can ignore ires, or it can omit some terms -c if ires = -1. if a does not depend on y, then res can -c just return r = g when ires = -1. if g - a*s contains other -c additive terms that are independent of y, these can also be -c dropped, if done consistently, when ires = -1. -c the subroutine should set the flag ires if it -c encounters a halt condition or illegal input. -c otherwise, it should not reset ires. on output, -c ires = 1 or -1 represents a normal return, and -c lsodi continues integrating the ode. leave ires -c unchanged from its input value. -c ires = 2 tells lsodi to immediately return control -c to the calling program, with istate = 3. this lets -c the calling program change parameters of the prob- -c lem if necessary. -c ires = 3 represents an error condition (for example, an -c illegal value of y). lsodi tries to integrate the ode without -c getting ires = 3 from res. if it cannot, lsodi returns -c with istate = -7 or -1. -c on an lsodi return with istate = 3, -1, or -7, the values -c of t and y returned correspond to the last point reached -c successfully without getting the flag ires = 2 or 3. -c the flag values ires = 2 and 3 should not be used to -c handle switches or root-stop conditions. this is better -c done by calling lsodi in a one-step mode and checking the -c stopping function for a sign change at each step. -c if quantities computed in the res routine are needed -c externally to lsodi, an extra call to res should be made -c for this purpose, for consistent and accurate results. -c to get the current dy/dt for the s argument, use intdy. -c res must be declared external in the calling -c program. see note below for more about res. -c -c adda = the name of the user-supplied subroutine which adds -c the matrix a = a(t,y) to another matrix stored in the same -c form as a. the storage form is determined by miter (see -c mf). this subroutine is to have the form -c subroutine adda ( neq, t, y, ml, mu, p, nrowp ) -c dimension y(1), p(nrowp,1) -c where neq, t, y, ml, mu, and nrowp are input and p is -c output. y is an array of length neq, and the matrix p is -c stored in an nrowp by neq array. -c in the full matrix case ( miter = 1 or 2 ) adda should -c add a to p(i,j). ml and mu are ignored. -c i,j -c in the band matrix case ( miter = 4 or 5 ) adda should -c add a to p(i-j+mu+1,j). -c i,j -c see jac for details on this band storage form. -c adda must be declared external in the calling program. -c see note below for more information about adda. -c -c jac = the name of the user-supplied subroutine which supplies -c the jacobian matrix, dr/dy, where r = g-a*s. the form of the -c jacobian matrix is determined by miter. jac is required -c if miter = 1 or 4 -- otherwise a dummy name can be -c passed. this subroutine is to have the form -c subroutine jac ( neq, t, y, s, ml, mu, p, nrowp ) -c dimension y(1), s(1), p(nrowp,1) -c where neq, t, y, s, ml, mu, and nrowp are input and p -c is output. y and s are arrays of length neq, and the -c matrix p is stored in an nrowp by neq array. -c p is to be loaded with partial derivatives ( elements -c of the jacobian matrix ) on output. -c in the full matrix case ( miter = 1 ), ml and mu -c are ignored and the jacobian is to be loaded into p -c by columns- i.e., dr(i)/dy(j) is loaded into p(i,j). -c in the band matrix case ( miter = 4 ), the ele- -c ments within the band are to be loaded into p by -c by columns, with diagonal lines of dr/dy loaded into -c the rows of p. thus dr(i)/dy(j) is to be loaded -c into p(i-j+mu+1,j). the locations in p in the two -c triangular areas which correspond to nonexistent matrix -c elements can be ignored or loaded arbitrarily, as they -c they are overwritten by lsodi. ml and mu are the half- -c bandwidth parameters ( see iwork ). -c in either case, p is preset to zero by the solver, -c so that only the nonzero elements need be loaded by jac. -c each call to jac is preceded by a call to res with the same -c arguments neq, t, y, and s. thus to gain some efficiency, -c intermediate quantities shared by both calculations may be -c saved in a user common block by res and not recomputed by jac -c if desired. also, jac may alter the y array, if desired. -c jac need not provide dr/dy exactly. a crude -c approximation (possibly with a smaller bandwidth) will do. -c jac must be declared external in the calling program. -c see note below for more about jac. -c -c note on res, adda, and jac-- these -c subroutines may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in the subroutines) and/or y has length -c exceeding neq(1). however, these routines should not alter -c neq(1), y(1),...,y(neq) or any other input variables. -c see the descriptions of neq and y below. -c -c neq = the size of the system (number of first order ordinary -c differential equations or scalar algebraic equations). -c used only for input. -c neq may be decreased, but not increased, during the problem. -c if neq is decreased (with istate = 3 on input), the -c remaining components of y should be left undisturbed, if -c these are to be accessed in res, adda, or jac. -c -c normally, neq is a scalar, and it is generally referred to -c as a scalar in this user interface description. however, -c neq may be an array, with neq(1) set to the system size. -c (the lsodi package accesses only neq(1).) in either case, -c this parameter is passed as the neq argument in all calls -c to res, adda, and jac. hence, if it is an array, -c locations neq(2),... may be used to store other integer data -c and pass it to res, adda, or jac. each such subroutine -c must include neq in a dimension statement in that case. -c -c y = a real array for the vector of dependent variables, of -c length neq or more. used for both input and output on the -c first call (istate = 0 or 1), and only for output on other -c calls. on the first call, y must contain the vector of -c initial values. on output, y contains the computed solution -c vector, evaluated at t. if desired, the y array may be used -c for other purposes between calls to the solver. -c -c this array is passed as the y argument in all calls to res, -c adda, and jac. hence its length may exceed neq, -c and locations y(neq+1),... may be used to store other real -c data and pass it to res, adda, or jac. (the lsodi -c package accesses only y(1),...,y(neq). ) -c -c ydoti = a real array for the initial value of the vector -c dy/dt and for work space, of dimension at least neq. -c -c on input... -c if istate = 0 then lsodi will compute the initial value -c of dy/dt, if a is nonsingular. thus ydoti will -c serve only as work space and may have any value. -c if istate = 1 then ydoti must contain the initial value -c of dy/dt. -c if istate = 2 or 3 (continuation calls) then ydoti -c may have any value. -c n.b.- if the initial value of a is singular, then -c lsodi cannot compute the initial value of dy/dt, so -c it must be provided in ydoti, with istate=1. -c -c on output, when lsodi terminates abnormally with istate = -c -1, -4, or -5, ydoti will contain the residual -c r = g(t,y) - a(t,y)*(dy/dt). if r is large, t is near -c its initial value, and ydoti is supplied with istate=1, -c there may have been an incorrect input value of -c ydoti = dy/dt or the problem ( as given to lsodi ) -c may not have a solution. -c -c if desired, the ydoti array may be used for other -c purposes between calls to the solver. -c -c t = the independent variable. on input, t is used only on the -c first call, as the initial point of the integration. -c on output, after each call, t is the value at which a -c computed solution y is evaluated (usually the same as tout). -c on an error return, t is the farthest point reached. -c -c tout = the next value of t at which a computed solution is desired. -c used only for input. -c -c when starting the problem (istate = 0 or 1), tout may be -c equal to t for one call, then should .ne. t for the next -c call. for the initial t, an input value of tout .ne. t is -c used in order to determine the direction of the integration -c (i.e. the algebraic sign of the step sizes) and the rough -c scale of the problem. integration in either direction -c (forward or backward in t) is permitted. -c -c if itask = 2 or 5 (one-step modes), tout is ignored after -c the first call (i.e. the first call with tout .ne. t). -c otherwise, tout is required on every call. -c -c if itask = 1, 3, or 4, the values of tout need not be -c monotone, but a value of tout which backs up is limited -c to the current internal t interval, whose endpoints are -c tcur - hu and tcur (see optional outputs, below, for -c tcur and hu). -c -c itol = an indicator for the type of error control. see -c description below under atol. used only for input. -c -c rtol = a relative error tolerance parameter, either a scalar or -c an array of length neq. see description below under atol. -c input only. -c -c atol = an absolute error tolerance parameter, either a scalar or -c an array of length neq. input only. -c -c the input parameters itol, rtol, and atol determine -c the error control performed by the solver. the solver will -c control the vector e = (e(i)) of estimated local errors -c in y, according to an inequality of the form -c rms-norm of ( e(i)/ewt(i) ) .le. 1, -c where ewt(i) = rtol(i)*abs(y(i)) + atol(i), -c and the rms-norm (root-mean-square norm) here is -c rms-norm(v) = sqrt(sum v(i)**2 / neq). here ewt = (ewt(i)) -c is a vector of weights which must always be positive, and -c the values of rtol and atol should all be non-negative. -c the following table gives the types (scalar/array) of -c rtol and atol, and the corresponding form of ewt(i). -c -c itol rtol atol ewt(i) -c 1 scalar scalar rtol*abs(y(i)) + atol -c 2 scalar array rtol*abs(y(i)) + atol(i) -c 3 array scalar rtol(i)*abs(y(i)) + atol -c 4 array scalar rtol(i)*abs(y(i)) + atol(i) -c -c when either of these parameters is a scalar, it need not -c be dimensioned in the user-s calling program. -c -c if none of the above choices (with itol, rtol, and atol -c fixed throughout the problem) is suitable, more general -c error controls can be obtained by substituting -c user-supplied routines for the setting of ewt and/or for -c the norm calculation. see part iv below. -c -c if global errors are to be estimated by making a repeated -c run on the same problem with smaller tolerances, then all -c components of rtol and atol (i.e. of ewt) should be scaled -c down uniformly -c -c itask = an index specifying the task to be performed. -c input only. itask has the following values and meanings. -c 1 means normal computation of output values of y(t) at -c t = tout (by overshooting and interpolating). -c 2 means take one step only and return. -c 3 means stop at the first internal mesh point at or -c beyond t = tout and return. -c 4 means normal computation of output values of y(t) at -c t = tout but without overshooting t = tcrit. -c tcrit must be input as rwork(1). tcrit may be equal to -c or beyond tout, but not behind it in the direction of -c integration. this option is useful if the problem -c has a singularity at or beyond t = tcrit. -c 5 means take one step, without passing tcrit, and return. -c tcrit must be input as rwork(1). -c -c note.. if itask = 4 or 5 and the solver reaches tcrit -c (within roundoff), it will return t = tcrit (exactly) to -c indicate this (unless itask = 4 and tout comes before tcrit, -c in which case answers at t = tout are returned first). -c -c istate = an index used for input and output to specify the -c state of the calculation. -c -c on input, the values of istate are as follows. -c 0 means this is the first call for the problem, and -c lsodi is to compute the initial value of dy/dt -c (while doing other initializations). see note below. -c 1 means this is the first call for the problem, and -c the initial value of dy/dt has been supplied in -c ydoti (lsodi will do other initializations). see note -c below. -c 2 means this is not the first call, and the calculation -c is to continue normally, with no change in any input -c parameters except possibly tout and itask. -c (if itol, rtol, and/or atol are changed between calls -c with istate = 2, the new values will be used but not -c tested for legality.) -c 3 means this is not the first call, and the -c calculation is to continue normally, but with -c a change in input parameters other than -c tout and itask. changes are allowed in -c neq, itol, rtol, atol, iopt, lrw, liw, mf, ml, mu, -c and any of the optional inputs except h0. -c (see iwork description for ml and mu.) -c note.. a preliminary call with tout = t is not counted -c as a first call here, as no initialization or checking of -c input is done. (such a call is sometimes useful for the -c purpose of outputting the initial conditions.) -c thus the first call for which tout .ne. t requires -c istate = 0 or 1 on input. -c -c on output, istate has the following values and meanings. -c 0 or 1 means nothing was done, as tout was equal to t with -c istate = 0 or 1 on input. (however, an internal counter -c was set to detect and prevent repeated calls of this -c type. ) -c 2 means that the integration was performed successfully. -c 3 means that the user-supplied subroutine res signalled -c lsodi to halt the integration and return (ires=2). -c integration as far as t was achieved with no occurrence -c of ires=2, but this flag was set on attempting the next -c step. -c -1 means an excessive amount of work (more than mxstep -c steps) was done on this call, before completing the -c requested task, but the integration was otherwise -c successful as far as t. (mxstep is an optional input -c and is normally 500.) to continue, the user may -c simply reset istate to a value .gt. 1 and call again -c (the excess work step counter will be reset to 0). -c in addition, the user may increase mxstep to avoid -c this error return (see below on optional inputs). -c -2 means too much accuracy was requested for the precision -c of the machine being used. this was detected before -c completing the requested task, but the integration -c was successful as far as t. to continue, the tolerance -c parameters must be reset, and istate must be set -c to 3. the optional output tolsf may be used for this -c purpose. (note.. if this condition is detected before -c taking any steps, then an illegal input return -c (istate = -3) occurs instead.) -c -3 means illegal input was detected, before taking any -c integration steps. see written message for details. -c note.. if the solver detects an infinite loop of calls -c to the solver with illegal input, it will cause -c the run to stop. -c -4 means there were repeated error test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c the problem may have a singularity, or the input -c may be inappropriate. -c -5 means there were repeated convergence test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c this may be caused by an inaccurate jacobian matrix. -c -6 means ewt(i) became zero for some i during the -c integration. pure relative error control (atol(i)=0.0) -c was requested on a variable which has now vanished. -c the integration was successful as far as t. -c -7 means that the user-supplied subroutine res set -c its error flag (ires = 3) despite repeated tries by -c lsodi to avoid that condition. -c -8 means that istate was 0 on input but lsodi was unable -c to compute the initial value of dy/dt. see the -c printed message for details. -c -c note.. since the normal output value of istate is 2, -c it does not need to be reset for normal continuation. -c similarly, istate need not be reset if res told lsodi -c to return because the calling program must change -c the parameters of the problem. -c also, since a negative input value of istate will be -c regarded as illegal, a negative output value requires the -c user to change it, and possibly other inputs, before -c calling the solver again. -c -c iopt = an integer flag to specify whether or not any optional -c inputs are being used on this call. input only. -c the optional inputs are listed separately below. -c iopt = 0 means no optional inputs are being used. -c default values will be used in all cases. -c iopt = 1 means one or more optional inputs are being used. -c -c rwork = a real working array (double precision). -c the length of rwork must be at least -c 20 + nyh*(maxord + 1) + 3*neq + lenwm where -c nyh = the initial value of neq, -c maxord = 12 (if meth = 1) or 5 (if meth = 2) (unless a -c smaller value is given as an optional input), -c lenwm = neq**2 + 2 if miter is 1 or 2, and -c lenwm = (2*ml+mu+1)*neq + 2 if miter is 4 or 5. -c (see mf description for the definition of meth and miter.) -c thus if maxord has its default value and neq is constant, -c this length is -c 22 + 16*neq + neq**2 for mf = 11 or 12, -c 22 + 17*neq + (2*ml+mu)*neq for mf = 14 or 15, -c 22 + 9*neq + neq**2 for mf = 21 or 22, -c 22 + 10*neq + (2*ml+mu)*neq for mf = 24 or 25. -c the first 20 words of rwork are reserved for conditional -c and optional inputs and optional outputs. -c -c the following word in rwork is a conditional input.. -c rwork(1) = tcrit = critical value of t which the solver -c is not to overshoot. required if itask is -c 4 or 5, and ignored otherwise. (see itask.) -c -c lrw = the length of the array rwork, as declared by the user. -c (this will be checked by the solver.) -c -c iwork = an integer work array. the length of iwork must be at least -c 20 + neq . the first few words of iwork are used for -c conditional and optional inputs and optional outputs. -c -c the following 2 words in iwork are conditional inputs.. -c iwork(1) = ml these are the lower and upper -c iwork(2) = mu half-bandwidths, respectively, of the -c matrices in the problem-- the jacobian dr/dy -c and the left-hand side matrix a. these half- -c bandwidths exclude the main diagonal, so -c the total bandwidth is ml + mu + 1 . -c the band is defined by the matrix locations -c (i,j) with i-ml .le. j .le. i+mu. ml and mu -c must satisfy 0 .le. ml,mu .le. neq-1. -c these are required if miter is 4 or 5, and -c ignored otherwise. -c ml and mu may in fact be the band parameters for -c matrices to which dr/dy and a are only -c approximately equal. -c -c liw = the length of the array iwork, as declared by the user. -c (this will be checked by the solver.) -c -c note.. the work arrays must not be altered between calls to lsodi -c for the same problem, except possibly for the conditional and -c optional inputs, and except for the last 3*neq words of rwork. -c the latter space is used for internal scratch space, and so is -c available for use by the user outside lsodi between calls, if -c desired (but not for use by res, adda, or jac). -c -c mf = the method flag. used only for input. the legal values of -c mf are 11, 12, 14, 15, 21, 22, 24, and 25. -c mf has decimal digits meth and miter.. mf = 10*meth + miter. -c meth indicates the basic linear multistep method.. -c meth = 1 means the implicit adams method. -c meth = 2 means the method based on backward -c differentiation formulas (bdf-s). -c the bdf method is strongly preferred for stiff prob- -c lems, while the adams method is preferred when the prob- -c lem is not stiff. if the matrix a(t,y) is nonsingular, -c stiffness here can be taken to mean that of the explicit -c ode system dy/dt = a**(-1) * g. if a is singular, the -c concept of stiffness is not well defined. -c if you do not know whether the problem is stiff, we -c recommend using meth = 2. if it is stiff, the advan- -c tage of meth = 2 over 1 will be great, while if it is -c not stiff, the advantage of meth = 1 will be slight. -c if maximum efficiency is important, some experimentation -c with meth may be necessary. -c miter indicates the corrector iteration method.. -c miter = 1 means chord iteration with a user-supplied -c full (neq by neq) jacobian. -c miter = 2 means chord iteration with an internally -c generated (difference quotient) full jacobian. -c this uses neq+1 extra calls to res per dr/dy -c evaluation. -c miter = 4 means chord iteration with a user-supplied -c banded jacobian. -c miter = 5 means chord iteration with an internally -c generated banded jacobian (using ml+mu+2 -c extra calls to res per dr/dy evaluation). -c if miter = 1 or 4, the user must supply a subroutine jac -c (the name is arbitrary) as described above under jac. -c for other values of miter, a dummy argument can be used. -c----------------------------------------------------------------------- -c optional inputs. -c -c the following is a list of the optional inputs provided for in the -c call sequence. (see also part ii.) for each such input variable, -c this table lists its name as used in this documentation, its -c location in the call sequence, its meaning, and the default value. -c the use of any of these inputs requires iopt = 1, and in that -c case all of these inputs are examined. a value of zero for any -c of these optional inputs will cause the default value to be used. -c thus to use a subset of the optional inputs, simply preload -c locations 5 to 10 in rwork and iwork to 0.0 and 0 respectively, and -c then set those of interest to nonzero values. -c -c name location meaning and default value -c -c h0 rwork(5) the step size to be attempted on the first step. -c the default value is determined by the solver. -c -c hmax rwork(6) the maximum absolute step size allowed. -c the default value is infinite. -c -c hmin rwork(7) the minimum absolute step size allowed. -c the default value is 0. (this lower bound is not -c enforced on the final step before reaching tcrit -c when itask = 4 or 5.) -c -c maxord iwork(5) the maximum order to be allowed. the default -c value is 12 if meth = 1, and 5 if meth = 2. -c if maxord exceeds the default value, it will -c be reduced to the default value. -c if maxord is changed during the problem, it may -c cause the current order to be reduced. -c -c mxstep iwork(6) maximum number of (internally defined) steps -c allowed during one call to the solver. -c the default value is 500. -c -c mxhnil iwork(7) maximum number of messages printed (per problem) -c warning that t + h = t on a step (h = step size). -c this must be positive to result in a non-default -c value. the default value is 10. -c----------------------------------------------------------------------- -c optional outputs. -c -c as optional additional output from lsodi, the variables listed -c below are quantities related to the performance of lsodi -c which are available to the user. these are communicated by way of -c the work arrays, but also have internal mnemonic names as shown. -c except where stated otherwise, all of these outputs are defined -c on any successful return from lsodi, and on any return with -c istate = -1, -2, -4, -5, -6, or -7. on a return with -3 (illegal -c input) or -8, they will be unchanged from their existing values -c (if any), except possibly for tolsf, lenrw, and leniw. -c on any error return, outputs relevant to the error will be defined, -c as noted below. -c -c name location meaning -c -c hu rwork(11) the step size in t last used (successfully). -c -c hcur rwork(12) the step size to be attempted on the next step. -c -c tcur rwork(13) the current value of the independent variable -c which the solver has actually reached, i.e. the -c current internal mesh point in t. on output, tcur -c will always be at least as far as the argument -c t, but may be farther (if interpolation was done). -c -c tolsf rwork(14) a tolerance scale factor, greater than 1.0, -c computed when a request for too much accuracy was -c detected (istate = -3 if detected at the start of -c the problem, istate = -2 otherwise). if itol is -c left unaltered but rtol and atol are uniformly -c scaled up by a factor of tolsf for the next call, -c then the solver is deemed likely to succeed. -c (the user may also ignore tolsf and alter the -c tolerance parameters in any other way appropriate.) -c -c nst iwork(11) the number of steps taken for the problem so far. -c -c nre iwork(12) the number of residual evaluations (res calls) -c for the problem so far. -c -c nje iwork(13) the number of jacobian evaluations (each involving -c an evaluation of a and dr/dy) for the problem so -c far. this equals the number of calls to adda and -c (if miter = 1 or 4) jac, and the number of matrix -c l-u decompositions. -c -c nqu iwork(14) the method order last used (successfully). -c -c nqcur iwork(15) the order to be attempted on the next step. -c -c imxer iwork(16) the index of the component of largest magnitude in -c the weighted local error vector ( e(i)/ewt(i) ), -c on an error return with istate = -4 or -5. -c -c lenrw iwork(17) the length of rwork actually required. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c leniw iwork(18) the length of iwork actually required. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c -c the following two arrays are segments of the rwork array which -c may also be of interest to the user as optional outputs. -c for each array, the table below gives its internal name, -c its base address in rwork, and its description. -c -c name base address description -c -c yh 21 the nordsieck history array, of size nyh by -c (nqcur + 1), where nyh is the initial value -c of neq. for j = 0,1,...,nqcur, column j+1 -c of yh contains hcur**j/factorial(j) times -c the j-th derivative of the interpolating -c polynomial currently representing the solution, -c evaluated at t = tcur. -c -c acor lenrw-neq+1 array of size neq used for the accumulated -c corrections on each step, scaled on output to -c represent the estimated local error in y on the -c last step. this is the vector e in the descrip- -c tion of the error control. it is defined only -c on a return from lsodi with istate = 2. -c -c----------------------------------------------------------------------- -c part ii. other routines callable. -c -c the following are optional calls which the user may make to -c gain additional capabilities in conjunction with lsodi. -c (the routines xsetun and xsetf are designed to conform to the -c slatec error handling package.) -c -c form of call function -c call xsetun(lun) set the logical unit number, lun, for -c output of messages from lsodi, if -c the default is not desired. -c the default value of lun is 6. -c -c call xsetf(mflag) set a flag to control the printing of -c messages by lsodi. -c mflag = 0 means do not print. (danger.. -c this risks losing valuable information.) -c mflag = 1 means print (the default). -c -c either of the above calls may be made at -c any time and will take effect immediately. -c -c call srcom(rsav,isav,job) saves and restores the contents of -c the internal common blocks used by -c lsodi (see part iii below). -c rsav must be a real array of length 218 -c or more, and isav must be an integer -c array of length 41 or more. -c job=1 means save common into rsav/isav. -c job=2 means restore common from rsav/isav. -c srcom is useful if one is -c interrupting a run and restarting -c later, or alternating between two or -c more problems solved with lsodi. -c -c call intdy(,,,,,) provide derivatives of y, of various -c (see below) orders, at a specified point t, if -c desired. it may be called only after -c a successful return from lsodi. -c -c the detailed instructions for using intdy are as follows. -c the form of the call is.. -c -c call intdy (t, k, rwork(21), nyh, dky, iflag) -c -c the input parameters are.. -c -c t = value of independent variable where answers are desired -c (normally the same as the t last returned by lsodi). -c for valid results, t must lie between tcur - hu and tcur. -c (see optional outputs for tcur and hu.) -c k = integer order of the derivative desired. k must satisfy -c 0 .le. k .le. nqcur, where nqcur is the current order -c (see optional outputs). the capability corresponding -c to k = 0, i.e. computing y(t), is already provided -c by lsodi directly. since nqcur .ge. 1, the first -c derivative dy/dt is always available with intdy. -c rwork(21) = the base address of the history array yh. -c nyh = column length of yh, equal to the initial value of neq. -c -c the output parameters are.. -c -c dky = a real array of length neq containing the computed value -c of the k-th derivative of y(t). -c iflag = integer flag, returned as 0 if k and t were legal, -c -1 if k was illegal, and -2 if t was illegal. -c on an error return, a message is also written. -c----------------------------------------------------------------------- -c part iii. common blocks. -c -c if lsodi is to be used in an overlay situation, the user -c must declare, in the primary overlay, the variables in.. -c (1) the call sequence to lsodi, -c (2) the two internal common blocks -c /ls0001/ of length 257 (218 double precision words -c followed by 39 integer words), -c /eh0001/ of length 2 (integer words). -c -c if lsodi is used on a system in which the contents of internal -c common blocks are not preserved between calls, the user should -c declare the above two common blocks in his main program to insure -c that their contents are preserved. -c -c if the solution of a given problem by lsodi is to be interrupted -c and then later continued, such as when restarting an interrupted run -c or alternating between two or more problems, the user should save, -c following the return from the last lsodi call prior to the -c interruption, the contents of the call sequence variables and the -c internal common blocks, and later restore these values before the -c next lsodi call for that problem. to save and restore the common -c blocks, use subroutine srcom (see part ii above). -c -c----------------------------------------------------------------------- -c part iv. optionally replaceable solver routines. -c -c below are descriptions of two routines in the lsodi package which -c relate to the measurement of errors. either routine can be -c replaced by a user-supplied version, if desired. however, since such -c a replacement may have a major impact on performance, it should be -c done only when absolutely necessary, and only with great caution. -c (note.. the means by which the package version of a routine is -c superseded by the user-s version may be system-dependent.) -c -c (a) ewset. -c the following subroutine is called just before each internal -c integration step, and sets the array of error weights, ewt, as -c described under itol/rtol/atol above.. -c subroutine ewset (neq, itol, rtol, atol, ycur, ewt) -c where neq, itol, rtol, and atol are as in the lsodi call sequence, -c ycur contains the current dependent variable vector, and -c ewt is the array of weights set by ewset. -c -c if the user supplies this subroutine, it must return in ewt(i) -c (i = 1,...,neq) a positive quantity suitable for comparing errors -c in y(i) to. the ewt array returned by ewset is passed to the -c vnorm routine (see below), and also used by lsodi in the computation -c of the optional output imxer, the diagonal jacobian approximation, -c and the increments for difference quotient jacobians. -c -c in the user-supplied version of ewset, it may be desirable to use -c the current values of derivatives of y. derivatives up to order nq -c are available from the history array yh, described above under -c optional outputs. in ewset, yh is identical to the ycur array, -c extended to nq + 1 columns with a column length of nyh and scale -c factors of h**j/factorial(j). on the first call for the problem, -c given by nst = 0, nq is 1 and h is temporarily set to 1.0. -c the quantities nq, nyh, h, and nst can be obtained by including -c in ewset the statements.. -c double precision h, rls -c common /ls0001/ rls(218),ils(39) -c nq = ils(35) -c nyh = ils(14) -c nst = ils(36) -c h = rls(212) -c thus, for example, the current value of dy/dt can be obtained as -c ycur(nyh+i)/h (i=1,...,neq) (and the division by h is -c unnecessary when nst = 0). -c -c (b) vnorm. -c the following is a real function routine which computes the weighted -c root-mean-square norm of a vector v.. -c d = vnorm (n, v, w) -c where.. -c n = the length of the vector, -c v = real array of length n containing the vector, -c w = real array of length n containing weights, -c d = sqrt( (1/n) * sum(v(i)*w(i))**2 ). -c vnorm is called with n = neq and with w(i) = 1.0/ewt(i), where -c ewt is as set by subroutine ewset. -c -c if the user supplies this function, it should return a non-negative -c value of vnorm suitable for use in the error control in lsodi. -c none of the arguments should be altered by vnorm. -c for example, a user-supplied vnorm routine might.. -c -substitute a max-norm of (v(i)*w(i)) for the rms-norm, or -c -ignore some components of v in the norm, with the effect of -c suppressing the error control on those components of y. -c----------------------------------------------------------------------- -c----------------------------------------------------------------------- -c other routines in the lsodi package. -c -c in addition to subroutine lsodi, the lsodi package includes the -c following subroutines and function routines.. -c ainvg computes the initial value of the vector -c dy/dt = inverse(a) * g -c intdy computes an interpolated value of the y vector at t = tout. -c stodi is the core integrator, which does one step of the -c integration and the associated error control. -c cfode sets all method coefficients and test constants. -c prepji computes and preprocesses the jacobian matrix -c and the newton iteration matrix p. -c solsy manages solution of linear system in chord iteration. -c ewset sets the error weight vector ewt before each step. -c vnorm computes the weighted r.m.s. norm of a vector. -c srcom is a user-callable routine to save and restore -c the contents of the internal common blocks. -c dgefa and dgesl are routines from linpack for solving full -c systems of linear algebraic equations. -c dgbfa and dgbsl are routines from linpack for solving banded -c linear systems. -c daxpy, dscal, idamax, and ddot are basic linear algebra modules -c (blas) used by the above linpack routines. -c d1mach computes the unit roundoff in a machine-independent manner. -c xerrwv, xsetun, and xsetf handle the printing of all error -c messages and warnings. xerrwv is machine-dependent. -c note.. vnorm, idamax, ddot, and d1mach are function routines. -c all the others are subroutines. -c -c the intrinsic and external routines used by lsodi are.. dabs, -c dmax1, dmin1, dfloat, iabs, max0, min0, mod, dsign, dsqrt, and write. -c -c a block data subprogram is also included with the package, -c for loading some of the variables in internal common. -c -c----------------------------------------------------------------------- -c the following card is for optimized compilation on llnl compilers. -clll. optimize -c----------------------------------------------------------------------- - external prepji, solsy - integer illin, init, lyh, lewt, lacor, lsavr, lwm, liwm, - 1 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns - integer icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 1 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu - integer i, i1, i2, ier, iflag, imxer, ires, kgo, - 1 leniw, lenrw, lenwm, lp, lyd0, ml, mord, mu, mxhnl0, mxstp0 - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision atoli, ayi, big, ewti, h0, hmax, hmx, rh, rtoli, - 1 tcrit, tdist, tnext, tol, tolsf, tp, size, sum, w0, - 2 d1mach, vnorm - dimension mord(2) - logical ihit -c----------------------------------------------------------------------- -c the following internal common block contains -c (a) variables which are local to any subroutine but whose values must -c be preserved between calls to the routine (own variables), and -c (b) variables which are communicated between subroutines. -c common block ls0001 is shared by the lsodi and lsode packages. -c the structure of ls0001 is as follows.. all real variables are -c listed first, followed by all integers. within each type, the -c variables are grouped with those local to subroutine lsodi first, -c then those local to subroutine stodi, and finally those used -c for communication. the block is declared in subroutines -c lsodi, intdy, stodi, prepji, and solsy. groups of variables are -c replaced by dummy arrays in the common declarations in routines -c where those variables are not used. -c----------------------------------------------------------------------- - common /ls0001/ rowns(209), - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 2 illin, init, lyh, lewt, lacor, lsavr, lwm, liwm, - 3 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu -c - data mord(1),mord(2)/12,5/, mxstp0/500/, mxhnl0/10/ -c----------------------------------------------------------------------- -c block a. -c this code block is executed on every call. -c it tests istate and itask for legality and branches appropriately. -c if istate .gt. 1 but the flag init shows that initialization has -c not yet been done, an error return occurs. -c if istate = 0 or 1 and tout = t, jump to block g and return -c immediately. -c----------------------------------------------------------------------- - if (istate .lt. 0 .or. istate .gt. 3) go to 601 - if (itask .lt. 1 .or. itask .gt. 5) go to 602 - if (istate .le. 1) go to 10 - if (init .eq. 0) go to 603 - if (istate .eq. 2) go to 200 - go to 20 - 10 init = 0 - if (tout .eq. t) go to 430 - 20 ntrep = 0 -c----------------------------------------------------------------------- -c block b. -c the next code block is executed for the initial call (istate = 0 or 1) -c or for a continuation call with parameter changes (istate = 3). -c it contains checking of all inputs and various initializations. -c -c first check legality of the non-optional inputs neq, itol, iopt, -c mf, ml, and mu. -c----------------------------------------------------------------------- - if (neq(1) .le. 0) go to 604 - if (istate .le. 1) go to 25 - if (neq(1) .gt. n) go to 605 - 25 n = neq(1) - if (itol .lt. 1 .or. itol .gt. 4) go to 606 - if (iopt .lt. 0 .or. iopt .gt. 1) go to 607 - meth = mf/10 - miter = mf - 10*meth - if (meth .lt. 1 .or. meth .gt. 2) go to 608 - if (miter .le. 0 .or. miter .gt. 5) go to 608 - if (miter .eq. 3) go to 608 - if (miter .lt. 3) go to 30 - ml = iwork(1) - mu = iwork(2) - if (ml .lt. 0 .or. ml .ge. n) go to 609 - if (mu .lt. 0 .or. mu .ge. n) go to 610 - 30 continue -c next process and check the optional inputs. -------------------------- - if (iopt .eq. 1) go to 40 - maxord = mord(meth) - mxstep = mxstp0 - mxhnil = mxhnl0 - if (istate .le. 1) h0 = 0.0d0 - hmxi = 0.0d0 - hmin = 0.0d0 - go to 60 - 40 maxord = iwork(5) - if (maxord .lt. 0) go to 611 - if (maxord .eq. 0) maxord = 100 - maxord = min0(maxord,mord(meth)) - mxstep = iwork(6) - if (mxstep .lt. 0) go to 612 - if (mxstep .eq. 0) mxstep = mxstp0 - mxhnil = iwork(7) - if (mxhnil .lt. 0) go to 613 - if (mxhnil .eq. 0) mxhnil = mxhnl0 - if (istate .gt. 1) go to 50 - h0 = rwork(5) - if ((tout - t)*h0 .lt. 0.0d0) go to 614 - 50 hmax = rwork(6) - if (hmax .lt. 0.0d0) go to 615 - hmxi = 0.0d0 - if (hmax .gt. 0.0d0) hmxi = 1.0d0/hmax - hmin = rwork(7) - if (hmin .lt. 0.0d0) go to 616 -c----------------------------------------------------------------------- -c set work array pointers and check lengths lrw and liw. -c pointers to segments of rwork and iwork are named by prefixing l to -c the name of the segment. e.g., the segment yh starts at rwork(lyh). -c segments of rwork (in order) are denoted yh, wm, ewt, savr, acor. -c----------------------------------------------------------------------- - 60 lyh = 21 - if (istate .le. 1) nyh = n - lwm = lyh + (maxord + 1)*nyh - if (miter .le. 2) lenwm = n*n + 2 - if (miter .ge. 4) lenwm = (2*ml + mu + 1)*n + 2 - lewt = lwm + lenwm - lsavr = lewt + n - lacor = lsavr + n - lenrw = lacor + n - 1 - iwork(17) = lenrw - liwm = 1 - leniw = 20 + n - iwork(18) = leniw - if (lenrw .gt. lrw) go to 617 - if (leniw .gt. liw) go to 618 -c check rtol and atol for legality. ------------------------------------ - rtoli = rtol(1) - atoli = atol(1) - do 70 i = 1,n - if (itol .ge. 3) rtoli = rtol(i) - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - if (rtoli .lt. 0.0d0) go to 619 - if (atoli .lt. 0.0d0) go to 620 - 70 continue - if (istate .le. 1) go to 100 -c if istate = 3, set flag to signal parameter changes to stodi. -------- - jstart = -1 - if (nq .le. maxord) go to 90 -c maxord was reduced below nq. copy yh(*,maxord+2) into ydoti.--------- - do 80 i = 1,n - 80 ydoti(i) = rwork(i+lwm-1) -c reload wm(1) = rwork(lwm), since lwm may have changed. --------------- - 90 rwork(lwm) = dsqrt(uround) - if (n .eq. nyh) go to 200 -c neq was reduced. zero part of yh to avoid undefined references. ----- - i1 = lyh + l*nyh - i2 = lyh + (maxord + 1)*nyh - 1 - if (i1 .gt. i2) go to 200 - do 95 i = i1,i2 - 95 rwork(i) = 0.0d0 - go to 200 -c----------------------------------------------------------------------- -c block c. -c the next block is for the initial call only (istate = 0 or 1). -c it contains all remaining initializations, the call to ainvg -c (if istate = 1), and the calculation of the initial step size. -c the error weights in ewt are inverted after being loaded. -c----------------------------------------------------------------------- - 100 uround = d1mach(4) - tn = t - if (itask .ne. 4 .and. itask .ne. 5) go to 105 - tcrit = rwork(1) - if ((tcrit - tout)*(tout - t) .lt. 0.0d0) go to 625 - if (h0 .ne. 0.0d0 .and. (t + h0 - tcrit)*h0 .gt. 0.0d0) - 1 h0 = tcrit - t - 105 jstart = 0 - rwork(lwm) = dsqrt(uround) - nhnil = 0 - nst = 0 - nre = 0 - nje = 0 - nslast = 0 - hu = 0.0d0 - nqu = 0 - ccmax = 0.3d0 - maxcor = 3 - msbp = 20 - mxncf = 10 -c compute initial dy/dt, if necessary, and load it and initial y into yh - lyd0 = lyh + nyh - lp = lwm + 1 - if (istate .eq. 1) go to 120 -c lsodi must compute initial dy/dt (lyd0 points to yh(*,2)). ----------- - call ainvg( res, adda, neq, t, y, rwork(lyd0), miter, - 1 ml, mu, rwork(lp), iwork(21), ier ) - nre = nre + 1 - if (ier.lt.0) go to 560 - if (ier.eq.0) go to 110 - go to 565 - 110 continue - do 115 i = 1,n - 115 rwork(i+lyh-1) = y(i) - go to 130 -c initial dy/dt has been supplied. ------------------------------------- - 120 do 125 i = 1,n - rwork(i+lyh-1) = y(i) - 125 rwork(i+lyd0-1) = ydoti(i) -c load and invert the ewt array. (h is temporarily set to 1.0.) ------- - 130 continue - nq = 1 - h = 1.0d0 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 135 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 621 - 135 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) -c----------------------------------------------------------------------- -c the coding below computes the step size, h0, to be attempted on the -c first step, unless the user has supplied a value for this. -c first check that tout - t differs significantly from zero. -c a scalar tolerance quantity tol is computed, as max(rtol(i)) -c if this is positive, or max(atol(i)/abs(y(i))) otherwise, adjusted -c so as to be between 100*uround and 1.0e-3. -c then the computed value h0 is given by.. -c neq -c h0**2 = tol / ( w0**-2 + (1/neq) * sum ( ydot(i)/ywt(i) )**2 ) -c 1 -c where w0 = max ( abs(t), abs(tout) ), -c ydot(i) = i-th component of initial value of dy/dt, -c ywt(i) = ewt(i)/tol (a weight for y(i)). -c the sign of h0 is inferred from the initial values of tout and t. -c----------------------------------------------------------------------- - if (h0 .ne. 0.0d0) go to 180 - tdist = dabs(tout - t) - w0 = dmax1(dabs(t),dabs(tout)) - if (tdist .lt. 2.0d0*uround*w0) go to 622 - tol = rtol(1) - if (itol .le. 2) go to 145 - do 140 i = 1,n - 140 tol = dmax1(tol,rtol(i)) - 145 if (tol .gt. 0.0d0) go to 160 - atoli = atol(1) - do 150 i = 1,n - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - ayi = dabs(y(i)) - if (ayi .ne. 0.0d0) tol = dmax1(tol,atoli/ayi) - 150 continue - 160 tol = dmax1(tol,100.0d0*uround) - tol = dmin1(tol,0.001d0) - sum = vnorm (n, rwork(lyd0), rwork(lewt)) - sum = 1.0d0/(tol*w0*w0) + tol*sum**2 - h0 = 1.0d0/dsqrt(sum) - h0 = dmin1(h0,tdist) - h0 = dsign(h0,tout-t) -c adjust h0 if necessary to meet hmax bound. --------------------------- - 180 rh = dabs(h0)*hmxi - if (rh .gt. 1.0d0) h0 = h0/rh -c load h with h0 and scale yh(*,2) by h0. ------------------------------ - h = h0 - do 190 i = 1,n - 190 rwork(i+lyd0-1) = h0*rwork(i+lyd0-1) - go to 270 -c----------------------------------------------------------------------- -c block d. -c the next code block is for continuation calls only (istate = 2 or 3) -c and is to check stop conditions before taking a step. -c----------------------------------------------------------------------- - 200 nslast = nst - go to (210, 250, 220, 230, 240), itask - 210 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 220 tp = tn - hu*(1.0d0 + 100.0d0*uround) - if ((tp - tout)*h .gt. 0.0d0) go to 623 - if ((tn - tout)*h .lt. 0.0d0) go to 250 - go to 400 - 230 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - if ((tcrit - tout)*h .lt. 0.0d0) go to 625 - if ((tn - tout)*h .lt. 0.0d0) go to 245 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 240 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - 245 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - if (istate .eq. 2) jstart = -2 -c----------------------------------------------------------------------- -c block e. -c the next block is normally executed for all calls and contains -c the call to the one-step core integrator stodi. -c -c this is a looping point for the integration steps. -c -c first check for too many steps being taken, update ewt (if not at -c start of problem), check for too much accuracy being requested, and -c check for h below the roundoff level in t. -c----------------------------------------------------------------------- - 250 continue - if ((nst-nslast) .ge. mxstep) go to 500 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 260 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 510 - 260 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) - 270 tolsf = uround*vnorm (n, rwork(lyh), rwork(lewt)) - if (tolsf .le. 1.0d0) go to 280 - tolsf = tolsf*2.0d0 - if (nst .eq. 0) go to 626 - go to 520 - 280 if ((tn + h) .ne. tn) go to 290 - nhnil = nhnil + 1 - if (nhnil .gt. mxhnil) go to 290 - call xerrwv('lsodi-- warning..internal t (=r1) and h (=r2) are', - 1 50, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' such that in the machine, t + h = t on the next step ', - 1 60, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' (h = step size). solver will continue anyway', - 1 50, 101, 0, 0, 0, 0, 2, tn, h) - if (nhnil .lt. mxhnil) go to 290 - call xerrwv('lsodi-- above warning has been issued i1 times. ', - 1 50, 102, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' it will not be issued again for this problem', - 1 50, 102, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - 290 continue -c----------------------------------------------------------------------- -c call stodi(neq,y,yh,nyh,yh1,ewt,savf,savr,acor,wm,iwm,res, -c adda,jac,prepji,solsy) -c note... savf in stodi occupies the same space as ydoti in lsodi. -c----------------------------------------------------------------------- - call stodi (neq, y, rwork(lyh), nyh, rwork(lyh), rwork(lewt), - 1 ydoti, rwork(lsavr), rwork(lacor), rwork(lwm), - 2 iwork(liwm), res, adda, jac, prepji, solsy ) - kgo = 1 - kflag - go to (300, 530, 540, 400, 550), kgo -c -c kgo = 1,success. 2,error test failure. 3,convergence failure. -c 4,res ordered return. 5,res returned error. -c----------------------------------------------------------------------- -c block f. -c the following block handles the case of a successful return from the -c core integrator (kflag = 0). test for stop conditions. -c----------------------------------------------------------------------- - 300 init = 1 - go to (310, 400, 330, 340, 350), itask -c itask = 1. if tout has been reached, interpolate. ------------------- - 310 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 -c itask = 3. jump to exit if tout was reached. ------------------------ - 330 if ((tn - tout)*h .ge. 0.0d0) go to 400 - go to 250 -c itask = 4. see if tout or tcrit was reached. adjust h if necessary. - 340 if ((tn - tout)*h .lt. 0.0d0) go to 345 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 - 345 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - jstart = -2 - go to 250 -c itask = 5. see if tcrit was reached and jump to exit. --------------- - 350 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx -c----------------------------------------------------------------------- -c block g. -c the following block handles all successful returns from lsodi. -c if itask .ne. 1, y is loaded from yh and t is set accordingly. -c istate is set to 2, the illegal input counter is zeroed, and the -c optional outputs are loaded into the work arrays before returning. if -c istate = 0 or 1 and tout = t, there is a return with no action taken, -c except that if this has happened repeatedly, the run is terminated. -c----------------------------------------------------------------------- - 400 do 410 i = 1,n - 410 y(i) = rwork(i+lyh-1) - t = tn - if (itask .ne. 4 .and. itask .ne. 5) go to 420 - if (ihit) t = tcrit - 420 istate = 2 - if (kflag .eq. -3) istate = 3 - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - iwork(11) = nst - iwork(12) = nre - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - return -c - 430 ntrep = ntrep + 1 - if (ntrep .lt. 5) return - call xerrwv( - 1 'lsodi-- repeated calls with istate= 0 or 1 and tout= t(=r1)', - 1 60, 301, 0, 0, 0, 0, 1, t, 0.0d0) - go to 800 -c----------------------------------------------------------------------- -c block h. -c the following block handles all unsuccessful returns other than -c those for illegal input. first the error message routine is called. -c if there was an error test or convergence test failure, imxer is set. -c then y is loaded from yh, t is set to tn, and the illegal input -c counter illin is set to 0. the optional outputs are loaded into -c the work arrays before returning. -c----------------------------------------------------------------------- -c the maximum number of steps was taken before reaching tout. ---------- - 500 call xerrwv('lsodi-- at current t (=r1), mxstep (=i1) steps ', - 1 50, 201, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' taken on this call before reaching tout ', - 1 50, 201, 0, 1, mxstep, 0, 1, tn, 0.0d0) - istate = -1 - go to 580 -c ewt(i) .le. 0.0 for some i (not at start of problem). ---------------- - 510 ewti = rwork(lewt+i-1) - call xerrwv('lsodi-- at t (=r1), ewt(i1) has become r2 .le. 0.', - 1 50, 202, 0, 1, i, 0, 2, tn, ewti) - istate = -6 - go to 590 -c too much accuracy requested for machine precision. ------------------- - 520 call xerrwv('lsodi-- at t (=r1), too much accuracy requested ', - 1 50, 203, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' for precision of machine.. see tolsf (=r2) ', - 1 50, 203, 0, 0, 0, 0, 2, tn, tolsf) - rwork(14) = tolsf - istate = -2 - go to 590 -c kflag = -1. error test failed repeatedly or with abs(h) = hmin. ----- - 530 call xerrwv('lsodi-- at t(=r1) and step size h(=r2), the error', - 1 50, 204, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' test failed repeatedly or with abs(h) = hmin', - 1 50, 204, 0, 0, 0, 0, 2, tn, h) - istate = -4 - go to 570 -c kflag = -2. convergence failed repeatedly or with abs(h) = hmin. ---- - 540 call xerrwv('lsodi-- at t (=r1) and step size h (=r2), the ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' corrector convergence failed repeatedly ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' or with abs(h) = hmin ', - 1 30, 205, 0, 0, 0, 0, 2, tn, h) - istate = -5 - go to 570 -c ires = 3 returned by res, despite retries by stodi. ------------------ - 550 call xerrwv('lsodi-- at t (=r1) residual routine returned ', - 1 50, 206, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' error ires = 3 repeatedly ', - 1 40, 206, 0, 0, 0, 0, 1, tn, 0.0d0) - istate = -7 - go to 590 -c ainvg failed because a-matrix was singular. -------------------------- - 560 ier = -ier - call xerrwv( - 1 'lsodi-- attempt to initialize dy/dt failed.. matrix a is ', - 1 60, 207, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' singular. sgefa or sgbfa returned info=(i1)', - 2 50, 207, 0, 1, ier, 0, 0, 0.0d0, 0.0d0) - istate = -8 - return -c ainvg failed because res set ires to 2 or 3. ------------------------- - 565 call xerrwv('lsodi-- attempt to initialize dy/dt failed ', - 1 50, 208, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' because residual routine set its error flag ', - 1 50, 208, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' to ires = (i1)', - 1 20, 208, 0, 1, ier, 0, 0, 0.0d0, 0.0d0) - istate = -8 - return -c compute imxer if relevant. ------------------------------------------- - 570 big = 0.0d0 - imxer = 1 - do 575 i = 1,n - size = dabs(rwork(i+lacor-1)*rwork(i+lewt-1)) - if (big .ge. size) go to 575 - big = size - imxer = i - 575 continue - iwork(16) = imxer -c compute residual if relevant. ---------------------------------------- - 580 lyd0 = lyh + nyh - do 585 i = 1,n - rwork(i+lsavr-1) = rwork(i+lyd0-1)/h - 585 y(i) = rwork(i+lyh-1) - ires = 1 - call res ( neq, tn, y, rwork(lsavr), ydoti, ires ) - nre = nre + 1 - if (ires .le. 1) go to 595 - call xerrwv('lsodi-- residual routine set its flag ires ', - 1 50, 210, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' to (i1) when called for final output. ', - 1 50, 210, 0, 1, ires, 0, 0, 0.0d0, 0.0d0) - go to 595 -c set y vector, t, illin, and optional outputs. ------------------------ - 590 do 592 i = 1,n - 592 y(i) = rwork(i+lyh-1) - 595 t = tn - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - iwork(11) = nst - iwork(12) = nre - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - return -c----------------------------------------------------------------------- -c block i. -c the following block handles all error returns due to illegal input -c (istate = -3), as detected before calling the core integrator. -c first the error message routine is called. then if there have been -c 5 consecutive such returns just before this call to the solver, -c the run is halted. -c----------------------------------------------------------------------- - 601 call xerrwv('lsodi-- istate (=i1) illegal ', - 1 30, 1, 0, 1, istate, 0, 0, 0.0d0, 0.0d0) - go to 700 - 602 call xerrwv('lsodi-- itask (=i1) illegal ', - 1 30, 2, 0, 1, itask, 0, 0, 0.0d0, 0.0d0) - go to 700 - 603 call xerrwv('lsodi-- istate .gt. 1 but lsodi not initialized ', - 1 50, 3, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - go to 700 - 604 call xerrwv('lsodi-- neq (=i1) .lt. 1 ', - 1 30, 4, 0, 1, neq(1), 0, 0, 0.0d0, 0.0d0) - go to 700 - 605 call xerrwv('lsodi-- istate = 3 and neq increased (i1 to i2) ', - 1 50, 5, 0, 2, n, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 606 call xerrwv('lsodi-- itol (=i1) illegal ', - 1 30, 6, 0, 1, itol, 0, 0, 0.0d0, 0.0d0) - go to 700 - 607 call xerrwv('lsodi-- iopt (=i1) illegal ', - 1 30, 7, 0, 1, iopt, 0, 0, 0.0d0, 0.0d0) - go to 700 - 608 call xerrwv('lsodi-- mf (=i1) illegal ', - 1 30, 8, 0, 1, mf, 0, 0, 0.0d0, 0.0d0) - go to 700 - 609 call xerrwv('lsodi-- ml(=i1) illegal.. .lt. 0 or .ge. neq(=i2)', - 1 50, 9, 0, 2, ml, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 610 call xerrwv('lsodi-- mu(=i1) illegal.. .lt. 0 or .ge. neq(=i2)', - 1 50, 10, 0, 2, mu, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 611 call xerrwv('lsodi-- maxord (=i1) .lt. 0 ', - 1 30, 11, 0, 1, maxord, 0, 0, 0.0d0, 0.0d0) - go to 700 - 612 call xerrwv('lsodi-- mxstep (=i1) .lt. 0 ', - 1 30, 12, 0, 1, mxstep, 0, 0, 0.0d0, 0.0d0) - go to 700 - 613 call xerrwv('lsodi-- mxhnil (=i1) .lt. 0 ', - 1 30, 13, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - go to 700 - 614 call xerrwv('lsodi-- tout (=r1) behind t (=r2) ', - 1 40, 14, 0, 0, 0, 0, 2, tout, t) - call xerrwv(' integration direction is given by h0 (=r1) ', - 1 50, 14, 0, 0, 0, 0, 1, h0, 0.0d0) - go to 700 - 615 call xerrwv('lsodi-- hmax (=r1) .lt. 0.0 ', - 1 30, 15, 0, 0, 0, 0, 1, hmax, 0.0d0) - go to 700 - 616 call xerrwv('lsodi-- hmin (=r1) .lt. 0.0 ', - 1 30, 16, 0, 0, 0, 0, 1, hmin, 0.0d0) - go to 700 - 617 call xerrwv( - 1 'lsodi-- rwork length needed, lenrw (=i1), exceeds lrw (=i2)', - 1 60, 17, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 618 call xerrwv( - 1 'lsodi-- iwork length needed, leniw (=i1), exceeds liw (=i2)', - 1 60, 18, 0, 2, leniw, liw, 0, 0.0d0, 0.0d0) - go to 700 - 619 call xerrwv('lsodi-- rtol(=i1) is r1 .lt. 0.0 ', - 1 40, 19, 0, 1, i, 0, 1, rtoli, 0.0d0) - go to 700 - 620 call xerrwv('lsodi-- atol(=i1) is r1 .lt. 0.0 ', - 1 40, 20, 0, 1, i, 0, 1, atoli, 0.0d0) - go to 700 - 621 ewti = rwork(lewt+i-1) - call xerrwv('lsodi-- ewt(=i1) is r1 .le. 0.0 ', - 1 40, 21, 0, 1, i, 0, 1, ewti, 0.0d0) - go to 700 - 622 call xerrwv( - 1 'lsodi-- tout (=r1) too close to t(=r2) to start integration', - 1 60, 22, 0, 0, 0, 0, 2, tout, t) - go to 700 - 623 call xerrwv( - 1 'lsodi-- itask = i1 and tout (=r1) behind tcur - hu (= r2) ', - 1 60, 23, 0, 1, itask, 0, 2, tout, tp) - go to 700 - 624 call xerrwv( - 1 'lsodi-- itask = 4 or 5 and tcrit (=r1) behind tcur (=r2) ', - 1 60, 24, 0, 0, 0, 0, 2, tcrit, tn) - go to 700 - 625 call xerrwv( - 1 'lsodi-- itask = 4 or 5 and tcrit (=r1) behind tout (=r2) ', - 1 60, 25, 0, 0, 0, 0, 2, tcrit, tout) - go to 700 - 626 call xerrwv('lsodi-- at start of problem, too much accuracy ', - 1 50, 26, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' requested for precision of machine.. see tolsf (=r1) ', - 1 60, 26, 0, 0, 0, 0, 1, tolsf, 0.0d0) - rwork(14) = tolsf - go to 700 - 627 call xerrwv('lsodi-- trouble from intdy. itask = i1, tout = r1', - 1 50, 27, 0, 1, itask, 0, 1, tout, 0.0d0) -c - 700 if (illin .eq. 5) go to 710 - illin = illin + 1 - istate = -3 - return - 710 call xerrwv('lsodi-- repeated occurrences of illegal input ', - 1 50, 302, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) -c - 800 call xerrwv('lsodi-- run aborted.. apparent infinite loop ', - 1 50, 303, 2, 0, 0, 0, 0, 0.0d0, 0.0d0) - return -c----------------------- end of subroutine lsodi ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/lsoibt.f b/scipy-0.10.1/scipy/integrate/odepack/lsoibt.f deleted file mode 100644 index 7dab333f4b..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/lsoibt.f +++ /dev/null @@ -1,1817 +0,0 @@ - subroutine lsoibt (res, adda, jac, neq, y, ydoti, t, tout, itol, - 1 rtol, atol, itask, istate, iopt, rwork, lrw, iwork, liw, mf ) - external res, adda, jac - integer neq, itol, itask, istate, iopt, lrw, iwork, liw, mf - double precision y, ydoti, t, tout, rtol, atol, rwork - dimension neq(1), y(1), ydoti(1), rtol(1), atol(1), rwork(lrw), - 1 iwork(liw) -c----------------------------------------------------------------------- -c this is the march 30, 1987 version of lsoibt.. -c livermore solver for ordinary differential equations given in -c implicit form, with block-tridiagonal jacobian treatment. -c this version is in double precision. -c -c lsoibt solves the initial value problem for linearly implicit -c systems of first order ode-s, -c a(t,y) * dy/dt = g(t,y) , where a(t,y) is a square matrix, -c or, in component form, -c ( a * ( dy / dt )) + ... + ( a * ( dy / dt )) = -c i,1 1 i,neq neq -c -c = g ( t, y , y ,..., y ) ( i = 1,...,neq ) -c i 1 2 neq -c -c if a is singular, this is a differential-algebraic system. -c -c lsoibt is a variant version of the lsodi package, for the case where -c the matrices a, dg/dy, and d(a*s)/dy are all block-tridiagonal. -c----------------------------------------------------------------------- -c reference.. -c alan c. hindmarsh, odepack, a systematized collection of ode -c solvers, in scientific computing, r. s. stepleman et al. (eds.), -c north-holland, amsterdam, 1983, pp. 55-64. -c----------------------------------------------------------------------- -c authors.. charles s. kenney -c formerly at.. -c naval weapons center -c china lake, ca 93555 -c and -c jeffrey f. painter and -c alan c. hindmarsh -c computing and mathematics research division, l-316 -c lawrence livermore national laboratory -c livermore, ca 94550. -c----------------------------------------------------------------------- -c summary of usage. -c -c communication between the user and the lsoibt package, for normal -c situations, is summarized here. this summary describes only a subset -c of the full set of options available. see the full description for -c details, including optional communication, nonstandard options, -c and instructions for special situations. see also the example -c problem (with program and output) following this summary. -c -c a. first, provide a subroutine of the form.. -c subroutine res (neq, t, y, s, r, ires) -c dimension y(neq), s(neq), r(neq) -c which computes the residual function -c r = g(t,y) - a(t,y) * s , -c as a function of t and the vectors y and s. (s is an internally -c generated approximation to dy/dt.) the arrays y and s are inputs -c to the res routine and should not be altered. the residual -c vector is to be stored in the array r. the argument ires should be -c ignored for casual use of lsoibt. (for uses of ires, see the -c paragraph on res in the full description below.) -c -c b. next, identify the block structure of the matrices a = a(t,y) and -c dr/dy. lsoibt must deal internally with a linear combination, p, of -c these two matrices. the matrix p (hence both a and dr/dy) must have -c a block-tridiagonal form with fixed structure parameters -c mb = block size, mb .ge. 1, and -c nb = number of blocks in each direction, nb .ge. 4, -c with mb*nb = neq. in each of the nb block-rows of the matrix p -c (each consisting of mb consecutive rows), the nonzero elements are -c to lie in three consecutive mb by mb blocks. in block-rows -c 2 through nb - 1, these are centered about the main diagonal. -c in block-rows 1 and nb, they are the diagonal blocks and the two -c blocks adjacent to the diagonal block. (thus block positions (1,3) -c and (nb,nb-2) can be nonzero.) -c alternatively, p (hence a and dr/dy) may be only approximately -c equal to matrices with this form, and lsoibt should still succeed. -c the block-tridiagonal matrix p is described by three arrays, -c each of size mb by mb by nb.. -c pa = array of diagonal blocks, -c pb = array of superdiagonal (and one subdiagonal) blocks, and -c pc = array of subdiagonal (and one superdiagonal) blocks. -c specifically, the three mb by mb blocks in the k-th block-row of p -c are stored in (reading across).. -c pc(*,*,k) = block to the left of the diagonal block, -c pa(*,*,k) = diagonal block, and -c pb(*,*,k) = block to the right of the diagonal block, -c except for k = 1, where the three blocks (reading across) are -c pa(*,*,1) (= diagonal block), pb(*,*,1), and pc(*,*,1), -c and k = nb, where they are -c pb(*,*,nb), pc(*,*,nb), and pa(*,*,nb) (= diagonal block). -c (each asterisk * stands for an index that ranges from 1 to mb.) -c -c c. you must also provide a subroutine of the form.. -c subroutine adda (neq, t, y, mb, nb, pa, pb, pc) -c dimension y(neq), pa(mb,mb,nb), pb(mb,mb,nb), pc(mb,mb,nb) -c which adds the nonzero blocks of the matrix a = a(t,y) to the -c contents of the arrays pa, pb, and pc, following the structure -c description in paragraph b above. -c t and the y array are input and should not be altered. -c thus the affect of adda should be the following.. -c do 30 k = 1,nb -c do 20 j = 1,mb -c do 10 i = 1,mb -c pa(i,j,k) = pa(i,j,k) + -c ( (i,j) element of k-th diagonal block of a) -c pb(i,j,k) = pb(i,j,k) + -c ( (i,j) element of block in block position (k,k+1) of a, -c or in block position (nb,nb-2) if k = nb) -c pc(i,j,k) = pc(i,j,k) + -c ( (i,j) element of block in block position (k,k-1) of a, -c or in block position (1,3) if k = 1) -c 10 continue -c 20 continue -c 30 continue -c -c d. for the sake of efficiency, you are encouraged to supply the -c jacobian matrix dr/dy in closed form, where r = g(t,y) - a(t,y)*s -c (s = a fixed vector) as above. if dr/dy is being supplied, -c use mf = 21, and provide a subroutine of the form.. -c subroutine jac (neq, t, y, s, mb, nb, pa, pb, pc) -c dimension y(neq), s(neq), pa(mb,mb,nb), pb(mb,mb,nb), pc(mb,mb,nb) -c which computes dr/dy as a function of t, y, and s. here t, y, and -c s are inputs, and the routine is to load dr/dy into pa, pb, pc, -c according to the structure description in paragraph b above. -c that is, load the diagonal blocks into pa, the superdiagonal blocks -c (and block (nb,nb-2) ) into pb, and the subdiagonal blocks (and -c block (1,3) ) into pc. the blocks in block-row k of dr/dy are to -c be loaded into pa(*,*,k), pb(*,*,k), and pc(*,*,k). -c only nonzero elements need be loaded, and the indexing -c of pa, pb, and pc is the same as in the adda routine. -c note that if a is independent of y (or this dependence -c is weak enough to be ignored) then jac is to compute dg/dy. -c if it is not feasible to provide a jac routine, use -c mf = 22, and lsoibt will compute an approximate jacobian -c internally by difference quotients. -c -c e. next decide whether or not to provide the initial value of the -c derivative vector dy/dt. if the initial value of a(t,y) is -c nonsingular (and not too ill-conditioned), you may let lsoibt compute -c this vector (istate = 0). (lsoibt will solve the system a*s = g for -c s, with initial values of a and g.) if a(t,y) is initially -c singular, then the system is a differential-algebraic system, and -c you must make use of the particular form of the system to compute the -c initial values of y and dy/dt. in that case, use istate = 1 and -c load the initial value of dy/dt into the array ydoti. -c the input array ydoti and the initial y array must be consistent with -c the equations a*dy/dt = g. this implies that the initial residual -c r = g(t,y) - a(t,y)*ydoti must be approximately zero. -c -c f. write a main program which calls subroutine lsoibt once for -c each point at which answers are desired. this should also provide -c for possible use of logical unit 6 for output of error messages -c by lsoibt. on the first call to lsoibt, supply arguments as follows.. -c res = name of user subroutine for residual function r. -c adda = name of user subroutine for computing and adding a(t,y). -c jac = name of user subroutine for jacobian matrix dr/dy -c (mf = 21). if not used, pass a dummy name. -c note.. the names for the res and adda routines and (if used) the -c jac routine must be declared external in the calling program. -c neq = number of scalar equations in the system. -c y = array of initial values, of length neq. -c ydoti = array of length neq (containing initial dy/dt if istate = 1). -c t = the initial value of the independent variable. -c tout = first point where output is desired (.ne. t). -c itol = 1 or 2 according as atol (below) is a scalar or array. -c rtol = relative tolerance parameter (scalar). -c atol = absolute tolerance parameter (scalar or array). -c the estimated local error in y(i) will be controlled so as -c to be roughly less (in magnitude) than -c ewt(i) = rtol*abs(y(i)) + atol if itol = 1, or -c ewt(i) = rtol*abs(y(i)) + atol(i) if itol = 2. -c thus the local error test passes if, in each component, -c either the absolute error is less than atol (or atol(i)), -c or the relative error is less than rtol. -c use rtol = 0.0 for pure absolute error control, and -c use atol = 0.0 (or atol(i) = 0.0) for pure relative error -c control. caution.. actual (global) errors may exceed these -c local tolerances, so choose them conservatively. -c itask = 1 for normal computation of output values of y at t = tout. -c istate = integer flag (input and output). set istate = 1 if the -c initial dy/dt is supplied, and 0 otherwise. -c iopt = 0 to indicate no optional inputs used. -c rwork = real work array of length at least.. -c 22 + 9*neq + 3*mb*mb*nb for mf = 21 or 22. -c lrw = declared length of rwork (in user-s dimension). -c iwork = integer work array of length at least 20 + neq. -c input in iwork(1) the block size mb and in iwork(2) the -c number nb of blocks in each direction along the matrix a. -c these must satisfy mb .ge. 1, nb .ge. 4, and mb*nb = neq. -c liw = declared length of iwork (in user-s dimension). -c mf = method flag. standard values are.. -c 21 for a user-supplied jacobian. -c 22 for an internally generated jacobian. -c for other choices of mf, see the paragraph on mf in -c the full description below. -c note that the main program must declare arrays y, ydoti, rwork, iwork, -c and possibly atol. -c -c g. the output from the first call (or any call) is.. -c y = array of computed values of y(t) vector. -c t = corresponding value of independent variable (normally tout). -c istate = 2 if lsoibt was successful, negative otherwise. -c -1 means excess work done on this call (check all inputs). -c -2 means excess accuracy requested (tolerances too small). -c -3 means illegal input detected (see printed message). -c -4 means repeated error test failures (check all inputs). -c -5 means repeated convergence failures (perhaps bad jacobian -c supplied or wrong choice of tolerances). -c -6 means error weight became zero during problem. (solution -c component i vanished, and atol or atol(i) = 0.) -c -7 cannot occur in casual use. -c -8 means lsoibt was unable to compute the initial dy/dt. -c in casual use, this means a(t,y) is initially singular. -c supply ydoti and use istate = 1 on the first call. -c -c if lsoibt returns istate = -1, -4, or -5, then the output of -c lsoibt also includes ydoti = array containing residual vector -c r = g - a * dy/dt evaluated at the current t, y, and dy/dt. -c -c h. to continue the integration after a successful return, simply -c reset tout and call lsoibt again. no other parameters need be reset. -c -c -c----------------------------------------------------------------------- -c example problem. -c -c the following is an example problem, with the coding needed -c for its solution by lsoibt. the problem comes from the partial -c differential equation (the burgers equation) -c du/dt = - u * du/dx + eta * d**2 u/dx**2, eta = .05, -c on -1 .le. x .le. 1. the boundary conditions are -c du/dx = 0 at x = -1 and at x = 1. -c the initial profile is a square wave, -c u = 1 in abs(x) .lt. .5, u = .5 at abs(x) = .5, u = 0 elsewhere. -c the p.d.e. is discretized in x by a simplified galerkin method, -c using piecewise linear basis functions, on a grid of 40 intervals. -c the equations at x = -1 and 1 use a 3-point difference approximation -c for the right-hand side. the result is a system a * dy/dt = g(y), -c of size neq = 41, where y(i) is the approximation to u at x = x(i), -c with x(i) = -1 + (i-1)*delx, delx = 2/(neq-1) = .05. the individual -c equations in the system are -c dy(1)/dt = ( y(3) - 2*y(2) + y(1) ) * eta / delx**2, -c dy(neq)/dt = ( y(neq-2) - 2*y(neq-1) + y(neq) ) * eta / delx**2, -c and for i = 2, 3, ..., neq-1, -c (1/6) dy(i-1)/dt + (4/6) dy(i)/dt + (1/6) dy(i+1)/dt -c = ( y(i-1)**2 - y(i+1)**2 ) / (4*delx) -c + ( y(i+1) - 2*y(i) + y(i-1) ) * eta / delx**2. -c the following coding solves the problem with mf = 21, with output -c of solution statistics at t = .1, .2, .3, and .4, and of the -c solution vector at t = .4. here the block size is just mb = 1. -c -c external resid, addabt, jacbt -c double precision atol, rtol, rwork, t, tout, y, ydoti -c dimension y(41), ydoti(41), rwork(514), iwork(61) -c neq = 41 -c do 10 i = 1,neq -c 10 y(i) = 0.0d0 -c y(11) = 0.5d0 -c do 20 i = 12,30 -c 20 y(i) = 1.0d0 -c y(31) = 0.5d0 -c t = 0.0d0 -c tout = 0.1d0 -c itol = 1 -c rtol = 1.0d-4 -c atol = 1.0d-5 -c itask = 1 -c istate = 0 -c iopt = 0 -c lrw = 514 -c liw = 61 -c iwork(1) = 1 -c iwork(2) = neq -c mf = 21 -c do 40 io = 1,4 -c call lsoibt (resid, addabt, jacbt, neq, y, ydoti, t, tout, -c 1 itol,rtol,atol, itask, istate, iopt, rwork,lrw,iwork,liw, mf) -c write (6,30) t, iwork(11), iwork(12), iwork(13) -c 30 format(' at t =',f5.2,' no. steps =',i4,' no. r-s =',i4, -c 1 ' no. j-s =',i3) -c if (istate .ne. 2) go to 90 -c tout = tout + 0.1d0 -c 40 continue -c write(6,50) (y(i),i=1,neq) -c 50 format(/' final solution values..'/9(5e12.4/)) -c stop -c 90 write(6,95) istate -c 95 format(///' error halt.. istate =',i3) -c stop -c end -c -c subroutine resid (n, t, y, s, r, ires) -c double precision delx, eta, eodsq, r, s, t, y -c dimension y(n), s(n), r(n) -c data eta/0.05d0/, delx/0.05d0/ -c eodsq = eta/delx**2 -c r(1) = eodsq*(y(3) - 2.0d0*y(2) + y(1)) - s(1) -c nm1 = n - 1 -c do 10 i = 2,nm1 -c r(i) = (y(i-1)**2 - y(i+1)**2)/(4.0d0*delx) -c 1 + eodsq*(y(i+1) - 2.0d0*y(i) + y(i-1)) -c 2 - (s(i-1) + 4.0d0*s(i) + s(i+1))/6.0d0 -c 10 continue -c r(n) = eodsq*(y(n-2) - 2.0d0*y(nm1) + y(n)) - s(n) -c return -c end -c -c subroutine addabt (n, t, y, mb, nb, pa, pb, pc) -c double precision pa, pb, pc, t, y -c dimension y(n), pa(mb,mb,nb), pb(mb,mb,nb), pc(mb,mb,nb) -c pa(1,1,1) = pa(1,1,1) + 1.0d0 -c nm1 = n - 1 -c do 10 k = 2,nm1 -c pa(1,1,k) = pa(1,1,k) + (4.0d0/6.0d0) -c pb(1,1,k) = pb(1,1,k) + (1.0d0/6.0d0) -c pc(1,1,k) = pc(1,1,k) + (1.0d0/6.0d0) -c 10 continue -c pa(1,1,n) = pa(1,1,n) + 1.0d0 -c return -c end -c -c subroutine jacbt (n, t, y, s, mb, nb, pa, pb, pc) -c double precision delx, eta, eodsq, pa, pb, pc, s, t, y -c dimension y(n), s(n), pa(mb,mb,nb),pb(mb,mb,nb),pc(mb,mb,nb) -c data eta/0.05d0/, delx/0.05d0/ -c eodsq = eta/delx**2 -c pa(1,1,1) = eodsq -c pb(1,1,1) = -2.0d0*eodsq -c pc(1,1,1) = eodsq -c do 10 k = 2,n -c pa(1,1,k) = -2.0d0*eodsq -c pb(1,1,k) = -y(k+1)*(0.5d0/delx) + eodsq -c pc(1,1,k) = y(k-1)*(0.5d0/delx) + eodsq -c 10 continue -c pb(1,1,n) = eodsq -c pc(1,1,n) = -2.0d0*eodsq -c pa(1,1,n) = eodsq -c return -c end -c -c the output of this program (on a cdc-7600 in single precision) -c is as follows.. -c -c at t = 0.10 no. steps = 35 no. r-s = 45 no. j-s = 9 -c at t = 0.20 no. steps = 43 no. r-s = 54 no. j-s = 10 -c at t = 0.30 no. steps = 48 no. r-s = 60 no. j-s = 11 -c at t = 0.40 no. steps = 51 no. r-s = 64 no. j-s = 12 -c -c final solution values.. -c 1.2747e-02 1.1997e-02 1.5560e-02 2.3767e-02 3.7224e-02 -c 5.6646e-02 8.2645e-02 1.1557e-01 1.5541e-01 2.0177e-01 -c 2.5397e-01 3.1104e-01 3.7189e-01 4.3530e-01 5.0000e-01 -c 5.6472e-01 6.2816e-01 6.8903e-01 7.4612e-01 7.9829e-01 -c 8.4460e-01 8.8438e-01 9.1727e-01 9.4330e-01 9.6281e-01 -c 9.7632e-01 9.8426e-01 9.8648e-01 9.8162e-01 9.6617e-01 -c 9.3374e-01 8.7535e-01 7.8236e-01 6.5321e-01 5.0003e-01 -c 3.4709e-01 2.1876e-01 1.2771e-01 7.3671e-02 5.0642e-02 -c 5.4496e-02 -c----------------------------------------------------------------------- -c full description of user interface to lsoibt. -c -c the user interface to lsoibt consists of the following parts. -c -c i. the call sequence to subroutine lsoibt, which is a driver -c routine for the solver. this includes descriptions of both -c the call sequence arguments and of user-supplied routines. -c following these descriptions is a description of -c optional inputs available through the call sequence, and then -c a description of optional outputs (in the work arrays). -c -c ii. descriptions of other routines in the lsoibt package that may be -c (optionally) called by the user. these provide the ability to -c alter error message handling, save and restore the internal -c common, and obtain specified derivatives of the solution y(t). -c -c iii. descriptions of common blocks to be declared in overlay -c or similar environments, or to be saved when doing an interrupt -c of the problem and continued solution later. -c -c iv. description of two routines in the lsoibt package, either of -c which the user may replace with his own version, if desired. -c these relate to the measurement of errors. -c -c----------------------------------------------------------------------- -c part i. call sequence. -c -c the call sequence parameters used for input only are -c res, adda, jac, neq, tout, itol, rtol, atol, itask, -c iopt, lrw, liw, mf, -c and those used for both input and output are -c y, t, istate, ydoti. -c the work arrays rwork and iwork are also used for additional and -c optional inputs and optional outputs. (the term output here refers -c to the return from subroutine lsoibt to the user-s calling program.) -c -c the legality of input parameters will be thoroughly checked on the -c initial call for the problem, but not checked thereafter unless a -c change in input parameters is flagged by istate = 3 on input. -c -c the descriptions of the call arguments are as follows. -c -c res = the name of the user-supplied subroutine which supplies -c the residual vector for the ode system, defined by -c r = g(t,y) - a(t,y) * s -c as a function of the scalar t and the vectors -c s and y ( s approximates dy/dt ). this -c subroutine is to have the form -c subroutine res ( neq, t, y, s, r, ires ) -c dimension y(1), s(1), r(1) -c where neq, t, y, s, and ires are input, and r and -c ires are output. y, s, and r are arrays of length neq. -c in dimension statements such as that above, 1 is a -c dummy dimension. it can be replaced by any value. -c on input, ires indicates how lsoibt will use the -c returned array r, as follows.. -c ires = 1 means that lsoibt needs the full residual, -c r = g - a*s, exactly. -c ires = -1 means that lsoibt is using r only to compute -c the jacobian dr/dy by difference quotients. -c the res routine can ignore ires, or it can omit some terms -c if ires = -1. if a does not depend on y, then res can -c just return r = g when ires = -1. if g - a*s contains other -c additive terms that are independent of y, these can also be -c dropped, if done consistently, when ires = -1. -c the subroutine should set the flag ires if it -c encounters a halt condition or illegal input. -c otherwise, it should not reset ires. on output, -c ires = 1 or -1 represents a normal return, and -c lsoibt continues integrating the ode. leave ires -c unchanged from its input value. -c ires = 2 tells lsoibt to immediately return control -c to the calling program, with istate = 3. this lets -c the calling program change parameters of the prob- -c lem if necessary. -c ires = 3 represents an error condition (for example, an -c illegal value of y). lsoibt tries to integrate the ode -c without getting ires = 3 from res. if it cannot, lsoibt -c returns with istate = -7 or -1. -c on an lsoibt return with istate = 3, -1, or -7, the values -c of t and y returned correspond to the last point reached -c successfully without getting the flag ires = 2 or 3. -c the flag values ires = 2 and 3 should not be used to -c handle switches or root-stop conditions. this is better -c done by calling lsoibt in a one-step mode and checking the -c stopping function for a sign change at each step. -c if quantities computed in the res routine are needed -c externally to lsoibt, an extra call to res should be made -c for this purpose, for consistent and accurate results. -c to get the current dy/dt for the s argument, use intdy. -c res must be declared external in the calling -c program. see note below for more about res. -c -c adda = the name of the user-supplied subroutine which adds the -c matrix a = a(t,y) to another matrix, p, stored in -c block-tridiagonal form. this routine is to have the form -c subroutine adda (neq, t, y, mb, nb, pa, pb, pc) -c dimension y(neq), -c 1 pa(mb,mb,nb), pb(mb,mb,nb), pc(mb,mb,nb) -c where neq, t, y, mb, nb, and the arrays pa, pb, and pc -c are input, and the arrays pa, pb, and pc are output. -c y is an array of length neq, and the arrays pa, pb, pc -c are all mb by mb by nb. -c here a block-tridiagonal structure is assumed for a(t,y), -c and also for the matrix p to which a is added here, -c as described in paragraph b of the summary of usage above. -c thus the affect of adda should be the following.. -c do 30 k = 1,nb -c do 20 j = 1,mb -c do 10 i = 1,mb -c pa(i,j,k) = pa(i,j,k) + -c ( (i,j) element of k-th diagonal block of a) -c pb(i,j,k) = pb(i,j,k) + -c ( (i,j) element of block (k,k+1) of a, -c or block (nb,nb-2) if k = nb) -c pc(i,j,k) = pc(i,j,k) + -c ( (i,j) element of block (k,k-1) of a, -c or block (1,3) if k = 1) -c 10 continue -c 20 continue -c 30 continue -c adda must be declared external in the calling program. -c see note below for more information about adda. -c -c jac = the name of the user-supplied subroutine which supplies -c the jacobian matrix, dr/dy, where r = g-a*s. jac is -c required if miter = 1 -- otherwise a dummy name can be -c passed. this subroutine is to have the form -c subroutine jac (neq, t, y, s, mb, nb, pa, pb, pc) -c dimension y(neq), s(neq), -c 1 pa(mb,mb,nb), pb(mb,mb,nb), pc(mb,mb,nb) -c where neq, t, y, s, mb, nb, and the arrays pa, pb, and pc -c are input, and the arrays pa, pb, and pc are output. -c y and s are arrays of length neq, and the arrays pa, pb, pc -c are all mb by mb by nb. -c pa, pb, and pc are to be loaded with partial derivatives -c (elements of the jacobian matrix) on output, in terms of the -c block-tridiagonal structure assumed, as described -c in paragraph b of the summary of usage above. -c that is, load the diagonal blocks into pa, the -c superdiagonal blocks (and block (nb,nb-2) ) intp pb, and -c the subdiagonal blocks (and block (1,3) ) into pc. -c the blocks in block-row k of dr/dy are to be loaded into -c pa(*,*,k), pb(*,*,k), and pc(*,*,k). -c thus the affect of jac should be the following.. -c do 30 k = 1,nb -c do 20 j = 1,mb -c do 10 i = 1,mb -c pa(i,j,k) = ( (i,j) element of -c k-th diagonal block of dr/dy) -c pb(i,j,k) = ( (i,j) element of block (k,k+1) -c of dr/dy, or block (nb,nb-2) if k = nb) -c pc(i,j,k) = ( (i,j) element of block (k,k-1) -c of dr/dy, or block (1,3) if k = 1) -c 10 continue -c 20 continue -c 30 continue -c pa, pb, and pc are preset to zero by the solver, -c so that only the nonzero elements need be loaded by jac. -c each call to jac is preceded by a call to res with the same -c arguments neq, t, y, and s. thus to gain some efficiency, -c intermediate quantities shared by both calculations may be -c saved in a user common block by res and not recomputed by jac -c if desired. also, jac may alter the y array, if desired. -c jac need not provide dr/dy exactly. a crude -c approximation will do, so that lsoibt may be used when -c a and dr/dy are not really block-tridiagonal, but are close -c to matrices that are. -c jac must be declared external in the calling program. -c see note below for more about jac. -c -c note on res, adda, and jac-- these -c subroutines may access user-defined quantities in -c neq(2),... and/or in y(neq(1)+1),... if neq is an array -c (dimensioned in the subroutines) and/or y has length -c exceeding neq(1). however, these routines should not alter -c neq(1), y(1),...,y(neq) or any other input variables. -c see the descriptions of neq and y below. -c -c neq = the size of the system (number of first order ordinary -c differential equations or scalar algebraic equations). -c used only for input. -c neq may be decreased, but not increased, during the problem. -c if neq is decreased (with istate = 3 on input), the -c remaining components of y should be left undisturbed, if -c these are to be accessed in res, adda, or jac. -c -c normally, neq is a scalar, and it is generally referred to -c as a scalar in this user interface description. however, -c neq may be an array, with neq(1) set to the system size. -c (the lsoibt package accesses only neq(1).) in either case, -c this parameter is passed as the neq argument in all calls -c to res, adda, and jac. hence, if it is an array, -c locations neq(2),... may be used to store other integer data -c and pass it to res, adda, or jac. each such subroutine -c must include neq in a dimension statement in that case. -c -c y = a real array for the vector of dependent variables, of -c length neq or more. used for both input and output on the -c first call (istate = 0 or 1), and only for output on other -c calls. on the first call, y must contain the vector of -c initial values. on output, y contains the computed solution -c vector, evaluated at t. if desired, the y array may be used -c for other purposes between calls to the solver. -c -c this array is passed as the y argument in all calls to res, -c adda, and jac. hence its length may exceed neq, -c and locations y(neq+1),... may be used to store other real -c data and pass it to res, adda, or jac. (the lsoibt -c package accesses only y(1),...,y(neq). ) -c -c ydoti = a real array for the initial value of the vector -c dy/dt and for work space, of dimension at least neq. -c -c on input... -c if istate = 0 then lsoibt will compute the initial value -c of dy/dt, if a is nonsingular. thus ydoti will -c serve only as work space and may have any value. -c if istate = 1 then ydoti must contain the initial value -c of dy/dt. -c if istate = 2 or 3 (continuation calls) then ydoti -c may have any value. -c n.b.- if the initial value of a is singular, then -c lsoibt cannot compute the initial value of dy/dt, so -c it must be provided in ydoti, with istate=1. -c -c on output, when lsoibt terminates abnormally with istate = -c -1, -4, or -5, ydoti will contain the residual -c r = g(t,y) - a(t,y)*(dy/dt). if r is large, t is near -c its initial value, and ydoti is supplied with istate=1, -c there may have been an incorrect input value of -c ydoti = dy/dt or the problem ( as given to lsoibt ) -c may not have a solution. -c -c if desired, the ydoti array may be used for other -c purposes between calls to the solver. -c -c t = the independent variable. on input, t is used only on the -c first call, as the initial point of the integration. -c on output, after each call, t is the value at which a -c computed solution y is evaluated (usually the same as tout). -c on an error return, t is the farthest point reached. -c -c tout = the next value of t at which a computed solution is desired. -c used only for input. -c -c when starting the problem (istate = 0 or 1), tout may be -c equal to t for one call, then should .ne. t for the next -c call. for the initial t, an input value of tout .ne. t is -c used in order to determine the direction of the integration -c (i.e. the algebraic sign of the step sizes) and the rough -c scale of the problem. integration in either direction -c (forward or backward in t) is permitted. -c -c if itask = 2 or 5 (one-step modes), tout is ignored after -c the first call (i.e. the first call with tout .ne. t). -c otherwise, tout is required on every call. -c -c if itask = 1, 3, or 4, the values of tout need not be -c monotone, but a value of tout which backs up is limited -c to the current internal t interval, whose endpoints are -c tcur - hu and tcur (see optional outputs, below, for -c tcur and hu). -c -c itol = an indicator for the type of error control. see -c description below under atol. used only for input. -c -c rtol = a relative error tolerance parameter, either a scalar or -c an array of length neq. see description below under atol. -c input only. -c -c atol = an absolute error tolerance parameter, either a scalar or -c an array of length neq. input only. -c -c the input parameters itol, rtol, and atol determine -c the error control performed by the solver. the solver will -c control the vector e = (e(i)) of estimated local errors -c in y, according to an inequality of the form -c rms-norm of ( e(i)/ewt(i) ) .le. 1, -c where ewt(i) = rtol(i)*abs(y(i)) + atol(i), -c and the rms-norm (root-mean-square norm) here is -c rms-norm(v) = sqrt(sum v(i)**2 / neq). here ewt = (ewt(i)) -c is a vector of weights which must always be positive, and -c the values of rtol and atol should all be non-negative. -c the following table gives the types (scalar/array) of -c rtol and atol, and the corresponding form of ewt(i). -c -c itol rtol atol ewt(i) -c 1 scalar scalar rtol*abs(y(i)) + atol -c 2 scalar array rtol*abs(y(i)) + atol(i) -c 3 array scalar rtol(i)*abs(y(i)) + atol -c 4 array scalar rtol(i)*abs(y(i)) + atol(i) -c -c when either of these parameters is a scalar, it need not -c be dimensioned in the user-s calling program. -c -c if none of the above choices (with itol, rtol, and atol -c fixed throughout the problem) is suitable, more general -c error controls can be obtained by substituting -c user-supplied routines for the setting of ewt and/or for -c the norm calculation. see part iv below. -c -c if global errors are to be estimated by making a repeated -c run on the same problem with smaller tolerances, then all -c components of rtol and atol (i.e. of ewt) should be scaled -c down uniformly -c -c itask = an index specifying the task to be performed. -c input only. itask has the following values and meanings. -c 1 means normal computation of output values of y(t) at -c t = tout (by overshooting and interpolating). -c 2 means take one step only and return. -c 3 means stop at the first internal mesh point at or -c beyond t = tout and return. -c 4 means normal computation of output values of y(t) at -c t = tout but without overshooting t = tcrit. -c tcrit must be input as rwork(1). tcrit may be equal to -c or beyond tout, but not behind it in the direction of -c integration. this option is useful if the problem -c has a singularity at or beyond t = tcrit. -c 5 means take one step, without passing tcrit, and return. -c tcrit must be input as rwork(1). -c -c note.. if itask = 4 or 5 and the solver reaches tcrit -c (within roundoff), it will return t = tcrit (exactly) to -c indicate this (unless itask = 4 and tout comes before tcrit, -c in which case answers at t = tout are returned first). -c -c istate = an index used for input and output to specify the -c state of the calculation. -c -c on input, the values of istate are as follows. -c 0 means this is the first call for the problem, and -c lsoibt is to compute the initial value of dy/dt -c (while doing other initializations). see note below. -c 1 means this is the first call for the problem, and -c the initial value of dy/dt has been supplied in -c ydoti (lsoibt will do other initializations). see note -c below. -c 2 means this is not the first call, and the calculation -c is to continue normally, with no change in any input -c parameters except possibly tout and itask. -c (if itol, rtol, and/or atol are changed between calls -c with istate = 2, the new values will be used but not -c tested for legality.) -c 3 means this is not the first call, and the -c calculation is to continue normally, but with -c a change in input parameters other than -c tout and itask. changes are allowed in -c neq, itol, rtol, atol, iopt, lrw, liw, mf, mb, nb, -c and any of the optional inputs except h0. -c (see iwork description for mb and nb.) -c note.. a preliminary call with tout = t is not counted -c as a first call here, as no initialization or checking of -c input is done. (such a call is sometimes useful for the -c purpose of outputting the initial conditions.) -c thus the first call for which tout .ne. t requires -c istate = 0 or 1 on input. -c -c on output, istate has the following values and meanings. -c 0 or 1 means nothing was done, as tout was equal to t with -c istate = 0 or 1 on input. (however, an internal counter -c was set to detect and prevent repeated calls of this -c type. ) -c 2 means that the integration was performed successfully. -c 3 means that the user-supplied subroutine res signalled -c lsoibt to halt the integration and return (ires=2). -c integration as far as t was achieved with no occurrence -c of ires=2, but this flag was set on attempting the next -c step. -c -1 means an excessive amount of work (more than mxstep -c steps) was done on this call, before completing the -c requested task, but the integration was otherwise -c successful as far as t. (mxstep is an optional input -c and is normally 500.) to continue, the user may -c simply reset istate to a value .gt. 1 and call again -c (the excess work step counter will be reset to 0). -c in addition, the user may increase mxstep to avoid -c this error return (see below on optional inputs). -c -2 means too much accuracy was requested for the precision -c of the machine being used. this was detected before -c completing the requested task, but the integration -c was successful as far as t. to continue, the tolerance -c parameters must be reset, and istate must be set -c to 3. the optional output tolsf may be used for this -c purpose. (note.. if this condition is detected before -c taking any steps, then an illegal input return -c (istate = -3) occurs instead.) -c -3 means illegal input was detected, before taking any -c integration steps. see written message for details. -c note.. if the solver detects an infinite loop of calls -c to the solver with illegal input, it will cause -c the run to stop. -c -4 means there were repeated error test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c the problem may have a singularity, or the input -c may be inappropriate. -c -5 means there were repeated convergence test failures on -c one attempted step, before completing the requested -c task, but the integration was successful as far as t. -c this may be caused by an inaccurate jacobian matrix. -c -6 means ewt(i) became zero for some i during the -c integration. pure relative error control (atol(i)=0.0) -c was requested on a variable which has now vanished. -c the integration was successful as far as t. -c -7 means that the user-supplied subroutine res set -c its error flag (ires=3) despite repeated tries by lsoibt -c to avoid that condition. -c -8 means that istate was 0 on input but lsoibt was unable -c to compute the initial value of dy/dt. see the -c printed message for details. -c -c note.. since the normal output value of istate is 2, -c it does not need to be reset for normal continuation. -c similarly, istate need not be reset if res told lsoibt -c to return because the calling program must change -c the parameters of the problem. -c also, since a negative input value of istate will be -c regarded as illegal, a negative output value requires the -c user to change it, and possibly other inputs, before -c calling the solver again. -c -c iopt = an integer flag to specify whether or not any optional -c inputs are being used on this call. input only. -c the optional inputs are listed separately below. -c iopt = 0 means no optional inputs are being used. -c default values will be used in all cases. -c iopt = 1 means one or more optional inputs are being used. -c -c rwork = a real working array (double precision). -c the length of rwork must be at least -c 20 + nyh*(maxord + 1) + 3*neq + lenwm where -c nyh = the initial value of neq, -c maxord = 12 (if meth = 1) or 5 (if meth = 2) (unless a -c smaller value is given as an optional input), -c lenwm = 3*mb*mb*nb + 2. -c (see mf description for the definition of meth.) -c thus if maxord has its default value and neq is constant, -c this length is -c 22 + 16*neq + 3*mb*mb*nb for mf = 11 or 12, -c 22 + 9*neq + 3*mb*mb*nb for mf = 21 or 22. -c the first 20 words of rwork are reserved for conditional -c and optional inputs and optional outputs. -c -c the following word in rwork is a conditional input.. -c rwork(1) = tcrit = critical value of t which the solver -c is not to overshoot. required if itask is -c 4 or 5, and ignored otherwise. (see itask.) -c -c lrw = the length of the array rwork, as declared by the user. -c (this will be checked by the solver.) -c -c iwork = an integer work array. the length of iwork must be at least -c 20 + neq . the first few words of iwork are used for -c additional and optional inputs and optional outputs. -c -c the following 2 words in iwork are additional required -c inputs to lsoibt.. -c iwork(1) = mb = block size -c iwork(2) = nb = number of blocks in the main diagonal -c these must satisfy mb .ge. 1, nb .ge. 4, and mb*nb = neq. -c -c liw = the length of the array iwork, as declared by the user. -c (this will be checked by the solver.) -c -c note.. the work arrays must not be altered between calls to lsoibt -c for the same problem, except possibly for the additional and -c optional inputs, and except for the last 3*neq words of rwork. -c the latter space is used for internal scratch space, and so is -c available for use by the user outside lsoibt between calls, if -c desired (but not for use by res, adda, or jac). -c -c mf = the method flag. used only for input. the legal values of -c mf are 11, 12, 21, and 22. -c mf has decimal digits meth and miter.. mf = 10*meth + miter. -c meth indicates the basic linear multistep method.. -c meth = 1 means the implicit adams method. -c meth = 2 means the method based on backward -c differentiation formulas (bdf-s). -c the bdf method is strongly preferred for stiff prob- -c lems, while the adams method is preferred when the prob- -c lem is not stiff. if the matrix a(t,y) is nonsingular, -c stiffness here can be taken to mean that of the explicit -c ode system dy/dt = a**(-1) * g. if a is singular, the -c concept of stiffness is not well defined. -c if you do not know whether the problem is stiff, we -c recommend using meth = 2. if it is stiff, the advan- -c tage of meth = 2 over 1 will be great, while if it is -c not stiff, the advantage of meth = 1 will be slight. -c if maximum efficiency is important, some experimentation -c with meth may be necessary. -c miter indicates the corrector iteration method.. -c miter = 1 means chord iteration with a user-supplied -c block-tridiagonal jacobian. -c miter = 2 means chord iteration with an internally -c generated (difference quotient) block- -c tridiagonal jacobian approximation, using -c 3*mb+1 extra calls to res per dr/dy evaluation. -c if miter = 1, the user must supply a subroutine -c jac (the name is arbitrary) as described above under jac. -c for miter = 2, a dummy argument can be used. -c----------------------------------------------------------------------- -c optional inputs. -c -c the following is a list of the optional inputs provided for in the -c call sequence. (see also part ii.) for each such input variable, -c this table lists its name as used in this documentation, its -c location in the call sequence, its meaning, and the default value. -c the use of any of these inputs requires iopt = 1, and in that -c case all of these inputs are examined. a value of zero for any -c of these optional inputs will cause the default value to be used. -c thus to use a subset of the optional inputs, simply preload -c locations 5 to 10 in rwork and iwork to 0.0 and 0 respectively, and -c then set those of interest to nonzero values. -c -c name location meaning and default value -c -c h0 rwork(5) the step size to be attempted on the first step. -c the default value is determined by the solver. -c -c hmax rwork(6) the maximum absolute step size allowed. -c the default value is infinite. -c -c hmin rwork(7) the minimum absolute step size allowed. -c the default value is 0. (this lower bound is not -c enforced on the final step before reaching tcrit -c when itask = 4 or 5.) -c -c maxord iwork(5) the maximum order to be allowed. the default -c value is 12 if meth = 1, and 5 if meth = 2. -c if maxord exceeds the default value, it will -c be reduced to the default value. -c if maxord is changed during the problem, it may -c cause the current order to be reduced. -c -c mxstep iwork(6) maximum number of (internally defined) steps -c allowed during one call to the solver. -c the default value is 500. -c -c mxhnil iwork(7) maximum number of messages printed (per problem) -c warning that t + h = t on a step (h = step size). -c this must be positive to result in a non-default -c value. the default value is 10. -c----------------------------------------------------------------------- -c optional outputs. -c -c as optional additional output from lsoibt, the variables listed -c below are quantities related to the performance of lsoibt -c which are available to the user. these are communicated by way of -c the work arrays, but also have internal mnemonic names as shown. -c except where stated otherwise, all of these outputs are defined -c on any successful return from lsoibt, and on any return with -c istate = -1, -2, -4, -5, -6, or -7. on a return with -3 (illegal -c input) or -8, they will be unchanged from their existing values -c (if any), except possibly for tolsf, lenrw, and leniw. -c on any error return, outputs relevant to the error will be defined, -c as noted below. -c -c name location meaning -c -c hu rwork(11) the step size in t last used (successfully). -c -c hcur rwork(12) the step size to be attempted on the next step. -c -c tcur rwork(13) the current value of the independent variable -c which the solver has actually reached, i.e. the -c current internal mesh point in t. on output, tcur -c will always be at least as far as the argument -c t, but may be farther (if interpolation was done). -c -c tolsf rwork(14) a tolerance scale factor, greater than 1.0, -c computed when a request for too much accuracy was -c detected (istate = -3 if detected at the start of -c the problem, istate = -2 otherwise). if itol is -c left unaltered but rtol and atol are uniformly -c scaled up by a factor of tolsf for the next call, -c then the solver is deemed likely to succeed. -c (the user may also ignore tolsf and alter the -c tolerance parameters in any other way appropriate.) -c -c nst iwork(11) the number of steps taken for the problem so far. -c -c nre iwork(12) the number of residual evaluations (res calls) -c for the problem so far. -c -c nje iwork(13) the number of jacobian evaluations (each involving -c an evaluation of a and dr/dy) for the problem so -c far. this equals the number of calls to adda and -c (if miter = 1) to jac, and the number of matrix -c l-u decompositions. -c -c nqu iwork(14) the method order last used (successfully). -c -c nqcur iwork(15) the order to be attempted on the next step. -c -c imxer iwork(16) the index of the component of largest magnitude in -c the weighted local error vector ( e(i)/ewt(i) ), -c on an error return with istate = -4 or -5. -c -c lenrw iwork(17) the length of rwork actually required. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c leniw iwork(18) the length of iwork actually required. -c this is defined on normal returns and on an illegal -c input return for insufficient storage. -c -c -c the following two arrays are segments of the rwork array which -c may also be of interest to the user as optional outputs. -c for each array, the table below gives its internal name, -c its base address in rwork, and its description. -c -c name base address description -c -c yh 21 the nordsieck history array, of size nyh by -c (nqcur + 1), where nyh is the initial value -c of neq. for j = 0,1,...,nqcur, column j+1 -c of yh contains hcur**j/factorial(j) times -c the j-th derivative of the interpolating -c polynomial currently representing the solution, -c evaluated at t = tcur. -c -c acor lenrw-neq+1 array of size neq used for the accumulated -c corrections on each step, scaled on output to -c represent the estimated local error in y on the -c last step. this is the vector e in the descrip- -c tion of the error control. it is defined only -c on a return from lsoibt with istate = 2. -c -c----------------------------------------------------------------------- -c part ii. other routines callable. -c -c the following are optional calls which the user may make to -c gain additional capabilities in conjunction with lsoibt. -c (the routines xsetun and xsetf are designed to conform to the -c slatec error handling package.) -c -c form of call function -c call xsetun(lun) set the logical unit number, lun, for -c output of messages from lsoibt, if -c the default is not desired. -c the default value of lun is 6. -c -c call xsetf(mflag) set a flag to control the printing of -c messages by lsoibt. -c mflag = 0 means do not print. (danger.. -c this risks losing valuable information.) -c mflag = 1 means print (the default). -c -c either of the above calls may be made at -c any time and will take effect immediately. -c -c call srcom(rsav,isav,job) saves and restores the contents of -c the internal common blocks used by -c lsoibt (see part iii below). -c rsav must be a real array of length 218 -c or more, and isav must be an integer -c array of length 41 or more. -c job=1 means save common into rsav/isav. -c job=2 means restore common from rsav/isav. -c srcom is useful if one is -c interrupting a run and restarting -c later, or alternating between two or -c more problems solved with lsoibt. -c -c call intdy(,,,,,) provide derivatives of y, of various -c (see below) orders, at a specified point t, if -c desired. it may be called only after -c a successful return from lsoibt. -c -c the detailed instructions for using intdy are as follows. -c the form of the call is.. -c -c call intdy (t, k, rwork(21), nyh, dky, iflag) -c -c the input parameters are.. -c -c t = value of independent variable where answers are desired -c (normally the same as the t last returned by lsoibt). -c for valid results, t must lie between tcur - hu and tcur. -c (see optional outputs for tcur and hu.) -c k = integer order of the derivative desired. k must satisfy -c 0 .le. k .le. nqcur, where nqcur is the current order -c (see optional outputs). the capability corresponding -c to k = 0, i.e. computing y(t), is already provided -c by lsoibt directly. since nqcur .ge. 1, the first -c derivative dy/dt is always available with intdy. -c rwork(21) = the base address of the history array yh. -c nyh = column length of yh, equal to the initial value of neq. -c -c the output parameters are.. -c -c dky = a real array of length neq containing the computed value -c of the k-th derivative of y(t). -c iflag = integer flag, returned as 0 if k and t were legal, -c -1 if k was illegal, and -2 if t was illegal. -c on an error return, a message is also written. -c----------------------------------------------------------------------- -c part iii. common blocks. -c -c if lsoibt is to be used in an overlay situation, the user -c must declare, in the primary overlay, the variables in.. -c (1) the call sequence to lsoibt, -c (2) the two internal common blocks -c /ls0001/ of length 257 (218 double precision words -c followed by 39 integer words), -c /eh0001/ of length 2 (integer words). -c -c if lsoibt is used on a system in which the contents of internal -c common blocks are not preserved between calls, the user should -c declare the above two common blocks in his main program to insure -c that their contents are preserved. -c -c if the solution of a given problem by lsoibt is to be interrupted -c and then later continued, such as when restarting an interrupted run -c or alternating between two or more problems, the user should save, -c following the return from the last lsoibt call prior to the -c interruption, the contents of the call sequence variables and the -c internal common blocks, and later restore these values before the -c next lsoibt call for that problem. to save and restore the common -c blocks, use subroutine srcom (see part ii above). -c -c----------------------------------------------------------------------- -c part iv. optionally replaceable solver routines. -c -c below are descriptions of two routines in the lsoibt package which -c relate to the measurement of errors. either routine can be -c replaced by a user-supplied version, if desired. however, since such -c a replacement may have a major impact on performance, it should be -c done only when absolutely necessary, and only with great caution. -c (note.. the means by which the package version of a routine is -c superseded by the user-s version may be system-dependent.) -c -c (a) ewset. -c the following subroutine is called just before each internal -c integration step, and sets the array of error weights, ewt, as -c described under itol/rtol/atol above.. -c subroutine ewset (neq, itol, rtol, atol, ycur, ewt) -c where neq, itol, rtol, and atol are as in the lsoibt call sequence, -c ycur contains the current dependent variable vector, and -c ewt is the array of weights set by ewset. -c -c if the user supplies this subroutine, it must return in ewt(i) -c (i = 1,...,neq) a positive quantity suitable for comparing errors -c in y(i) to. the ewt array returned by ewset is passed to the -c vnorm routine (see below), and also used by lsoibt in the computation -c of the optional output imxer, the diagonal jacobian approximation, -c and the increments for difference quotient jacobians. -c -c in the user-supplied version of ewset, it may be desirable to use -c the current values of derivatives of y. derivatives up to order nq -c are available from the history array yh, described above under -c optional outputs. in ewset, yh is identical to the ycur array, -c extended to nq + 1 columns with a column length of nyh and scale -c factors of h**j/factorial(j). on the first call for the problem, -c given by nst = 0, nq is 1 and h is temporarily set to 1.0. -c the quantities nq, nyh, h, and nst can be obtained by including -c in ewset the statements.. -c double precision h, rls -c common /ls0001/ rls(218),ils(39) -c nq = ils(35) -c nyh = ils(14) -c nst = ils(36) -c h = rls(212) -c thus, for example, the current value of dy/dt can be obtained as -c ycur(nyh+i)/h (i=1,...,neq) (and the division by h is -c unnecessary when nst = 0). -c -c (b) vnorm. -c the following is a real function routine which computes the weighted -c root-mean-square norm of a vector v.. -c d = vnorm (n, v, w) -c where.. -c n = the length of the vector, -c v = real array of length n containing the vector, -c w = real array of length n containing weights, -c d = sqrt( (1/n) * sum(v(i)*w(i))**2 ). -c vnorm is called with n = neq and with w(i) = 1.0/ewt(i), where -c ewt is as set by subroutine ewset. -c -c if the user supplies this function, it should return a non-negative -c value of vnorm suitable for use in the error control in lsoibt. -c none of the arguments should be altered by vnorm. -c for example, a user-supplied vnorm routine might.. -c -substitute a max-norm of (v(i)*w(i)) for the rms-norm, or -c -ignore some components of v in the norm, with the effect of -c suppressing the error control on those components of y. -c----------------------------------------------------------------------- -c----------------------------------------------------------------------- -c other routines in the lsoibt package. -c -c in addition to subroutine lsoibt, the lsoibt package includes the -c following subroutines and function routines.. -c aigbt computes the initial value of the vector -c dy/dt = inverse(a) * g -c intdy computes an interpolated value of the y vector at t = tout. -c stodi is the core integrator, which does one step of the -c integration and the associated error control. -c cfode sets all method coefficients and test constants. -c ewset sets the error weight vector ewt before each step. -c vnorm computes the weighted r.m.s. norm of a vector. -c srcom is a user-callable routine to save and restore -c the contents of the internal common blocks. -c pjibt computes and preprocesses the jacobian matrix -c and the newton iteration matrix p. -c slsbt manages solution of linear system in chord iteration. -c decbt and solbt are routines for solving block-tridiagonal -c systems of linear algebraic equations. -c dgefa and dgesl are routines from linpack for solving full -c systems of linear algebraic equations. -c daxpy, dscal, idamax, and ddot are basic linear algebra modules -c (blas) used by the above linpack routines. -c d1mach computes the unit roundoff in a machine-independent manner. -c xerrwv, xsetun, and xsetf handle the printing of all error -c messages and warnings. xerrwv is machine-dependent. -c note.. vnorm, idamax, ddot, and d1mach are function routines. -c all the others are subroutines. -c -c the intrinsic and external routines used by lsoibt are.. dabs, -c dmax1, dmin1, dfloat, iabs, max0, min0, mod, dsign, dsqrt, and write. -c -c a block data subprogram is also included with the package, -c for loading some of the variables in internal common. -c -c----------------------------------------------------------------------- -c the following card is for optimized compilation on llnl compilers. -clll. optimize -c----------------------------------------------------------------------- - external pjibt, slsbt - integer illin, init, lyh, lewt, lacor, lsavr, lwm, liwm, - 1 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns - integer icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 1 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu - integer i, i1, i2, ier, iflag, imxer, ires, kgo, - 1 leniw, lenrw, lenwm, lp, lyd0, mb, mord, mxhnl0, mxstp0, nb - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision atoli, ayi, big, ewti, h0, hmax, hmx, rh, rtoli, - 1 tcrit, tdist, tnext, tol, tolsf, tp, size, sum, w0, - 2 d1mach, vnorm - dimension mord(2) - logical ihit -c----------------------------------------------------------------------- -c the following internal common block contains -c (a) variables which are local to any subroutine but whose values must -c be preserved between calls to the routine (own variables), and -c (b) variables which are communicated between subroutines. -c block ls0001 is shared by the lsoibt, lsodi, and lsode packages. -c the structure of ls0001 is as follows.. all real variables are -c listed first, followed by all integers. within each type, the -c variables are grouped with those local to subroutine lsoibt first, -c then those local to subroutine stodi, and finally those used -c for communication. the block is declared in subroutines -c lsoibt, intdy, stodi, pjibt, and slsbt. groups of variables are -c replaced by dummy arrays in the common declarations in routines -c where those variables are not used. -c----------------------------------------------------------------------- - common /ls0001/ rowns(209), - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 2 illin, init, lyh, lewt, lacor, lsavr, lwm, liwm, - 3 mxstep, mxhnil, nhnil, ntrep, nslast, nyh, iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu -c - data mord(1),mord(2)/12,5/, mxstp0/500/, mxhnl0/10/ -c----------------------------------------------------------------------- -c block a. -c this code block is executed on every call. -c it tests istate and itask for legality and branches appropriately. -c if istate .gt. 1 but the flag init shows that initialization has -c not yet been done, an error return occurs. -c if istate = 0 or 1 and tout = t, jump to block g and return -c immediately. -c----------------------------------------------------------------------- - if (istate .lt. 0 .or. istate .gt. 3) go to 601 - if (itask .lt. 1 .or. itask .gt. 5) go to 602 - if (istate .le. 1) go to 10 - if (init .eq. 0) go to 603 - if (istate .eq. 2) go to 200 - go to 20 - 10 init = 0 - if (tout .eq. t) go to 430 - 20 ntrep = 0 -c----------------------------------------------------------------------- -c block b. -c the next code block is executed for the initial call (istate = 0 or 1) -c or for a continuation call with parameter changes (istate = 3). -c it contains checking of all inputs and various initializations. -c -c first check legality of the non-optional inputs neq, itol, iopt, -c mf, mb, and nb. -c----------------------------------------------------------------------- - if (neq(1) .le. 0) go to 604 - if (istate .le. 1) go to 25 - if (neq(1) .gt. n) go to 605 - 25 n = neq(1) - if (itol .lt. 1 .or. itol .gt. 4) go to 606 - if (iopt .lt. 0 .or. iopt .gt. 1) go to 607 - meth = mf/10 - miter = mf - 10*meth - if (meth .lt. 1 .or. meth .gt. 2) go to 608 - if (miter .lt. 1 .or. miter .gt. 2) go to 608 - mb = iwork(1) - nb = iwork(2) - if (mb .lt. 1 .or. mb .gt. n) go to 609 - if (nb .lt. 4) go to 610 - if (mb*nb .ne. n) go to 609 -c next process and check the optional inputs. -------------------------- - if (iopt .eq. 1) go to 40 - maxord = mord(meth) - mxstep = mxstp0 - mxhnil = mxhnl0 - if (istate .le. 1) h0 = 0.0d0 - hmxi = 0.0d0 - hmin = 0.0d0 - go to 60 - 40 maxord = iwork(5) - if (maxord .lt. 0) go to 611 - if (maxord .eq. 0) maxord = 100 - maxord = min0(maxord,mord(meth)) - mxstep = iwork(6) - if (mxstep .lt. 0) go to 612 - if (mxstep .eq. 0) mxstep = mxstp0 - mxhnil = iwork(7) - if (mxhnil .lt. 0) go to 613 - if (mxhnil .eq. 0) mxhnil = mxhnl0 - if (istate .gt. 1) go to 50 - h0 = rwork(5) - if ((tout - t)*h0 .lt. 0.0d0) go to 614 - 50 hmax = rwork(6) - if (hmax .lt. 0.0d0) go to 615 - hmxi = 0.0d0 - if (hmax .gt. 0.0d0) hmxi = 1.0d0/hmax - hmin = rwork(7) - if (hmin .lt. 0.0d0) go to 616 -c----------------------------------------------------------------------- -c set work array pointers and check lengths lrw and liw. -c pointers to segments of rwork and iwork are named by prefixing l to -c the name of the segment. e.g., the segment yh starts at rwork(lyh). -c segments of rwork (in order) are denoted yh, wm, ewt, savr, acor. -c----------------------------------------------------------------------- - 60 lyh = 21 - if (istate .le. 1) nyh = n - lwm = lyh + (maxord + 1)*nyh - lenwm = 3*mb*mb*nb + 2 - lewt = lwm + lenwm - lsavr = lewt + n - lacor = lsavr + n - lenrw = lacor + n - 1 - iwork(17) = lenrw - liwm = 1 - leniw = 20 + n - iwork(18) = leniw - if (lenrw .gt. lrw) go to 617 - if (leniw .gt. liw) go to 618 -c check rtol and atol for legality. ------------------------------------ - rtoli = rtol(1) - atoli = atol(1) - do 70 i = 1,n - if (itol .ge. 3) rtoli = rtol(i) - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - if (rtoli .lt. 0.0d0) go to 619 - if (atoli .lt. 0.0d0) go to 620 - 70 continue - if (istate .le. 1) go to 100 -c if istate = 3, set flag to signal parameter changes to stodi. -------- - jstart = -1 - if (nq .le. maxord) go to 90 -c maxord was reduced below nq. copy yh(*,maxord+2) into ydoti.--------- - do 80 i = 1,n - 80 ydoti(i) = rwork(i+lwm-1) -c reload wm(1) = rwork(lwm), since lwm may have changed. --------------- - 90 rwork(lwm) = dsqrt(uround) - if (n .eq. nyh) go to 200 -c neq was reduced. zero part of yh to avoid undefined references. ----- - i1 = lyh + l*nyh - i2 = lyh + (maxord + 1)*nyh - 1 - if (i1 .gt. i2) go to 200 - do 95 i = i1,i2 - 95 rwork(i) = 0.0d0 - go to 200 -c----------------------------------------------------------------------- -c block c. -c the next block is for the initial call only (istate = 0 or 1). -c it contains all remaining initializations, the call to aigbt -c (if istate = 1), and the calculation of the initial step size. -c the error weights in ewt are inverted after being loaded. -c----------------------------------------------------------------------- - 100 uround = d1mach(4) - tn = t - if (itask .ne. 4 .and. itask .ne. 5) go to 105 - tcrit = rwork(1) - if ((tcrit - tout)*(tout - t) .lt. 0.0d0) go to 625 - if (h0 .ne. 0.0d0 .and. (t + h0 - tcrit)*h0 .gt. 0.0d0) - 1 h0 = tcrit - t - 105 jstart = 0 - rwork(lwm) = dsqrt(uround) - nhnil = 0 - nst = 0 - nre = 0 - nje = 0 - nslast = 0 - hu = 0.0d0 - nqu = 0 - ccmax = 0.3d0 - maxcor = 3 - msbp = 20 - mxncf = 10 -c compute initial dy/dt, if necessary, and load it and initial y into yh - lyd0 = lyh + nyh - lp = lwm + 1 - if ( istate .eq. 1 ) go to 120 -c lsoibt must compute initial dy/dt (lyd0 points to yh(*,2)). ---------- - call aigbt( res, adda, neq, t, y, rwork(lyd0), - 1 mb, nb, rwork(lp), iwork(21), ier ) - nre = nre + 1 - if (ier.lt.0) go to 560 - if (ier.eq.0) go to 110 - go to 565 - 110 continue - do 115 i = 1,n - 115 rwork(i+lyh-1) = y(i) - go to 130 -c initial dy/dt has been supplied. ------------------------------------- - 120 do 125 i = 1,n - rwork(i+lyh-1) = y(i) - 125 rwork(i+lyd0-1) = ydoti(i) -c load and invert the ewt array. (h is temporarily set to 1.0.) ------- - 130 continue - nq = 1 - h = 1.0d0 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 135 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 621 - 135 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) -c----------------------------------------------------------------------- -c the coding below computes the step size, h0, to be attempted on the -c first step, unless the user has supplied a value for this. -c first check that tout - t differs significantly from zero. -c a scalar tolerance quantity tol is computed, as max(rtol(i)) -c if this is positive, or max(atol(i)/abs(y(i))) otherwise, adjusted -c so as to be between 100*uround and 1.0e-3. -c then the computed value h0 is given by.. -c neq -c h0**2 = tol / ( w0**-2 + (1/neq) * sum ( ydot(i)/ywt(i) )**2 ) -c 1 -c where w0 = max ( abs(t), abs(tout) ), -c ydot(i) = i-th component of initial value of dy/dt, -c ywt(i) = ewt(i)/tol (a weight for y(i)). -c the sign of h0 is inferred from the initial values of tout and t. -c----------------------------------------------------------------------- - if (h0 .ne. 0.0d0) go to 180 - tdist = dabs(tout - t) - w0 = dmax1(dabs(t),dabs(tout)) - if (tdist .lt. 2.0d0*uround*w0) go to 622 - tol = rtol(1) - if (itol .le. 2) go to 145 - do 140 i = 1,n - 140 tol = dmax1(tol,rtol(i)) - 145 if (tol .gt. 0.0d0) go to 160 - atoli = atol(1) - do 150 i = 1,n - if (itol .eq. 2 .or. itol .eq. 4) atoli = atol(i) - ayi = dabs(y(i)) - if (ayi .ne. 0.0d0) tol = dmax1(tol,atoli/ayi) - 150 continue - 160 tol = dmax1(tol,100.0d0*uround) - tol = dmin1(tol,0.001d0) - sum = vnorm (n, rwork(lyd0), rwork(lewt)) - sum = 1.0d0/(tol*w0*w0) + tol*sum**2 - h0 = 1.0d0/dsqrt(sum) - h0 = dmin1(h0,tdist) - h0 = dsign(h0,tout-t) -c adjust h0 if necessary to meet hmax bound. --------------------------- - 180 rh = dabs(h0)*hmxi - if (rh .gt. 1.0d0) h0 = h0/rh -c load h with h0 and scale yh(*,2) by h0. ------------------------------ - h = h0 - do 190 i = 1,n - 190 rwork(i+lyd0-1) = h0*rwork(i+lyd0-1) - go to 270 -c----------------------------------------------------------------------- -c block d. -c the next code block is for continuation calls only (istate = 2 or 3) -c and is to check stop conditions before taking a step. -c----------------------------------------------------------------------- - 200 nslast = nst - go to (210, 250, 220, 230, 240), itask - 210 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 220 tp = tn - hu*(1.0d0 + 100.0d0*uround) - if ((tp - tout)*h .gt. 0.0d0) go to 623 - if ((tn - tout)*h .lt. 0.0d0) go to 250 - go to 400 - 230 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - if ((tcrit - tout)*h .lt. 0.0d0) go to 625 - if ((tn - tout)*h .lt. 0.0d0) go to 245 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - if (iflag .ne. 0) go to 627 - t = tout - go to 420 - 240 tcrit = rwork(1) - if ((tn - tcrit)*h .gt. 0.0d0) go to 624 - 245 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - if (istate .eq. 2) jstart = -2 -c----------------------------------------------------------------------- -c block e. -c the next block is normally executed for all calls and contains -c the call to the one-step core integrator stodi. -c -c this is a looping point for the integration steps. -c -c first check for too many steps being taken, update ewt (if not at -c start of problem), check for too much accuracy being requested, and -c check for h below the roundoff level in t. -c----------------------------------------------------------------------- - 250 continue - if ((nst-nslast) .ge. mxstep) go to 500 - call ewset (n, itol, rtol, atol, rwork(lyh), rwork(lewt)) - do 260 i = 1,n - if (rwork(i+lewt-1) .le. 0.0d0) go to 510 - 260 rwork(i+lewt-1) = 1.0d0/rwork(i+lewt-1) - 270 tolsf = uround*vnorm (n, rwork(lyh), rwork(lewt)) - if (tolsf .le. 1.0d0) go to 280 - tolsf = tolsf*2.0d0 - if (nst .eq. 0) go to 626 - go to 520 - 280 if ((tn + h) .ne. tn) go to 290 - nhnil = nhnil + 1 - if (nhnil .gt. mxhnil) go to 290 - call xerrwv('lsoibt-- warning..internal t (=r1) and h (=r2) are', - 1 50, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' such that in the machine, t + h = t on the next step ', - 1 60, 101, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' (h = step size). solver will continue anyway', - 1 50, 101, 0, 0, 0, 0, 2, tn, h) - if (nhnil .lt. mxhnil) go to 290 - call xerrwv('lsoibt-- above warning has been issued i1 times. ', - 1 50, 102, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' it will not be issued again for this problem', - 1 50, 102, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - 290 continue -c----------------------------------------------------------------------- -c call stodi(neq,y,yh,nyh,yh1,ewt,savf,savr,acor,wm,iwm,res, -c adda,jac,pjibt,slsbt) -c note... savf in stodi occupies the same space as ydoti in lsoibt. -c----------------------------------------------------------------------- - call stodi (neq, y, rwork(lyh), nyh, rwork(lyh), rwork(lewt), - 1 ydoti, rwork(lsavr), rwork(lacor), rwork(lwm), - 2 iwork(liwm), res, adda, jac, pjibt, slsbt ) - kgo = 1 - kflag - go to (300, 530, 540, 400, 550), kgo -c -c kgo = 1,success. 2,error test failure. 3,convergence failure. -c 4,res ordered return. 5,res returned error. -c----------------------------------------------------------------------- -c block f. -c the following block handles the case of a successful return from the -c core integrator (kflag = 0). test for stop conditions. -c----------------------------------------------------------------------- - 300 init = 1 - go to (310, 400, 330, 340, 350), itask -c itask = 1. if tout has been reached, interpolate. ------------------- - 310 if ((tn - tout)*h .lt. 0.0d0) go to 250 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 -c itask = 3. jump to exit if tout was reached. ------------------------ - 330 if ((tn - tout)*h .ge. 0.0d0) go to 400 - go to 250 -c itask = 4. see if tout or tcrit was reached. adjust h if necessary. - 340 if ((tn - tout)*h .lt. 0.0d0) go to 345 - call intdy (tout, 0, rwork(lyh), nyh, y, iflag) - t = tout - go to 420 - 345 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx - if (ihit) go to 400 - tnext = tn + h*(1.0d0 + 4.0d0*uround) - if ((tnext - tcrit)*h .le. 0.0d0) go to 250 - h = (tcrit - tn)*(1.0d0 - 4.0d0*uround) - jstart = -2 - go to 250 -c itask = 5. see if tcrit was reached and jump to exit. --------------- - 350 hmx = dabs(tn) + dabs(h) - ihit = dabs(tn - tcrit) .le. 100.0d0*uround*hmx -c----------------------------------------------------------------------- -c block g. -c the following block handles all successful returns from lsoibt. -c if itask .ne. 1, y is loaded from yh and t is set accordingly. -c istate is set to 2, the illegal input counter is zeroed, and the -c optional outputs are loaded into the work arrays before returning. if -c istate = 0 or 1 and tout = t, there is a return with no action taken, -c except that if this has happened repeatedly, the run is terminated. -c----------------------------------------------------------------------- - 400 do 410 i = 1,n - 410 y(i) = rwork(i+lyh-1) - t = tn - if (itask .ne. 4 .and. itask .ne. 5) go to 420 - if (ihit) t = tcrit - 420 istate = 2 - if ( kflag .eq. -3 ) istate = 3 - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - iwork(11) = nst - iwork(12) = nre - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - return -c - 430 ntrep = ntrep + 1 - if (ntrep .lt. 5) return - call xerrwv( - 1 'lsoibt-- repeated calls with istate= 0 or 1 and tout= t(=r1)', - 1 60, 301, 0, 0, 0, 0, 1, t, 0.0d0) - go to 800 -c----------------------------------------------------------------------- -c block h. -c the following block handles all unsuccessful returns other than -c those for illegal input. first the error message routine is called. -c if there was an error test or convergence test failure, imxer is set. -c then y is loaded from yh, t is set to tn, and the illegal input -c counter illin is set to 0. the optional outputs are loaded into -c the work arrays before returning. -c----------------------------------------------------------------------- -c the maximum number of steps was taken before reaching tout. ---------- - 500 call xerrwv('lsoibt-- at current t (=r1), mxstep (=i1) steps ', - 1 50, 201, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' taken on this call before reaching tout ', - 1 50, 201, 0, 1, mxstep, 0, 1, tn, 0.0d0) - istate = -1 - go to 580 -c ewt(i) .le. 0.0 for some i (not at start of problem). ---------------- - 510 ewti = rwork(lewt+i-1) - call xerrwv('lsoibt-- at t (=r1), ewt(i1) has become r2 .le. 0.', - 1 50, 202, 0, 1, i, 0, 2, tn, ewti) - istate = -6 - go to 590 -c too much accuracy requested for machine precision. ------------------- - 520 call xerrwv('lsoibt-- at t (=r1), too much accuracy requested ', - 1 50, 203, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' for precision of machine.. see tolsf (=r2) ', - 1 50, 203, 0, 0, 0, 0, 2, tn, tolsf) - rwork(14) = tolsf - istate = -2 - go to 590 -c kflag = -1. error test failed repeatedly or with abs(h) = hmin. ----- - 530 call xerrwv('lsoibt-- at t(=r1) and step size h(=r2), the error', - 1 50, 204, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' test failed repeatedly or with abs(h) = hmin', - 1 50, 204, 0, 0, 0, 0, 2, tn, h) - istate = -4 - go to 570 -c kflag = -2. convergence failed repeatedly or with abs(h) = hmin. ---- - 540 call xerrwv('lsoibt-- at t (=r1) and step size h (=r2), the ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' corrector convergence failed repeatedly ', - 1 50, 205, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' or with abs(h) = hmin ', - 1 30, 205, 0, 0, 0, 0, 2, tn, h) - istate = -5 - go to 570 -c ires = 3 returned by res, despite retries by stodi. ------------------ - 550 call xerrwv('lsoibt-- at t (=r1) residual routine returned ', - 1 50, 206, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' error ires = 3 repeatedly ', - 1 40, 206, 0, 0, 0, 0, 1, tn, 0.0d0) - istate = -7 - go to 590 -c aigbt failed because a diagonal block of a-matrix was singular. ------ - 560 ier = -ier - call xerrwv( - 1 'lsoibt-- attempt to initialize dy/dt failed.. matrix a has a', - 1 60, 207, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' singular diagonal block, block no. = (i1) ', - 2 50, 207, 0, 1, ier, 0, 0, 0.0d0, 0.0d0) - istate = -8 - return -c aigbt failed because res set ires to 2 or 3. ------------------------- - 565 call xerrwv('lsoibt-- attempt to initialize dy/dt failed ', - 1 50, 208, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' because residual routine set its error flag ', - 1 50, 208, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' to ires = (i1)', - 1 20, 208, 0, 1, ier, 0, 0, 0.0d0, 0.0d0) - istate = -8 - return -c compute imxer if relevant. ------------------------------------------- - 570 big = 0.0d0 - imxer = 1 - do 575 i = 1,n - size = dabs(rwork(i+lacor-1)*rwork(i+lewt-1)) - if (big .ge. size) go to 575 - big = size - imxer = i - 575 continue - iwork(16) = imxer -c compute residual if relevant. ---------------------------------------- - 580 lyd0 = lyh + nyh - do 585 i = 1,n - rwork(i+lsavr-1) = rwork(i+lyd0-1)/h - 585 y(i) = rwork(i+lyh-1) - ires = 1 - call res ( neq, tn, y, rwork(lsavr), ydoti, ires ) - nre = nre + 1 - if ( ires .le. 1 ) go to 595 - call xerrwv('lsoibt-- residual routine set its flag ires ', - 1 50, 210, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv(' to (i1) when called for final output. ', - 1 50, 210, 0, 1, ires, 0, 0, 0.0d0, 0.0d0) - go to 595 -c set y vector, t, illin, and optional outputs. ------------------------ - 590 do 592 i = 1,n - 592 y(i) = rwork(i+lyh-1) - 595 t = tn - illin = 0 - rwork(11) = hu - rwork(12) = h - rwork(13) = tn - iwork(11) = nst - iwork(12) = nre - iwork(13) = nje - iwork(14) = nqu - iwork(15) = nq - return -c----------------------------------------------------------------------- -c block i. -c the following block handles all error returns due to illegal input -c (istate = -3), as detected before calling the core integrator. -c first the error message routine is called. then if there have been -c 5 consecutive such returns just before this call to the solver, -c the run is halted. -c----------------------------------------------------------------------- - 601 call xerrwv('lsoibt-- istate (=i1) illegal ', - 1 30, 1, 0, 1, istate, 0, 0, 0.0d0, 0.0d0) - go to 700 - 602 call xerrwv('lsoibt-- itask (=i1) illegal ', - 1 30, 2, 0, 1, itask, 0, 0, 0.0d0, 0.0d0) - go to 700 - 603 call xerrwv('lsoibt-- istate .gt. 1 but lsoibt not initialized ', - 1 50, 3, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - go to 700 - 604 call xerrwv('lsoibt-- neq (=i1) .lt. 1 ', - 1 30, 4, 0, 1, neq(1), 0, 0, 0.0d0, 0.0d0) - go to 700 - 605 call xerrwv('lsoibt-- istate = 3 and neq increased (i1 to i2) ', - 1 50, 5, 0, 2, n, neq(1), 0, 0.0d0, 0.0d0) - go to 700 - 606 call xerrwv('lsoibt-- itol (=i1) illegal ', - 1 30, 6, 0, 1, itol, 0, 0, 0.0d0, 0.0d0) - go to 700 - 607 call xerrwv('lsoibt-- iopt (=i1) illegal ', - 1 30, 7, 0, 1, iopt, 0, 0, 0.0d0, 0.0d0) - go to 700 - 608 call xerrwv('lsoibt-- mf (=i1) illegal ', - 1 30, 8, 0, 1, mf, 0, 0, 0.0d0, 0.0d0) - go to 700 - 609 call xerrwv('lsoibt-- mb (=i1) or nb (=i2) illegal ', - 1 50, 9, 0, 2, mb, nb, 0, 0.0d0, 0.0d0) - go to 700 - 610 call xerrwv('lsoibt-- nb(=i1) illegal.. .lt. 4 ', - 1 50, 10, 0, 1, nb, 0, 0, 0.0d0, 0.0d0) - go to 700 - 611 call xerrwv('lsoibt-- maxord (=i1) .lt. 0 ', - 1 30, 11, 0, 1, maxord, 0, 0, 0.0d0, 0.0d0) - go to 700 - 612 call xerrwv('lsoibt-- mxstep (=i1) .lt. 0 ', - 1 30, 12, 0, 1, mxstep, 0, 0, 0.0d0, 0.0d0) - go to 700 - 613 call xerrwv('lsoibt-- mxhnil (=i1) .lt. 0 ', - 1 30, 13, 0, 1, mxhnil, 0, 0, 0.0d0, 0.0d0) - go to 700 - 614 call xerrwv('lsoibt-- tout (=r1) behind t (=r2) ', - 1 40, 14, 0, 0, 0, 0, 2, tout, t) - call xerrwv(' integration direction is given by h0 (=r1) ', - 1 50, 14, 0, 0, 0, 0, 1, h0, 0.0d0) - go to 700 - 615 call xerrwv('lsoibt-- hmax (=r1) .lt. 0.0 ', - 1 30, 15, 0, 0, 0, 0, 1, hmax, 0.0d0) - go to 700 - 616 call xerrwv('lsoibt-- hmin (=r1) .lt. 0.0 ', - 1 30, 16, 0, 0, 0, 0, 1, hmin, 0.0d0) - go to 700 - 617 call xerrwv( - 1 'lsoibt-- rwork length needed, lenrw (=i1), exceeds lrw (=i2)', - 1 60, 17, 0, 2, lenrw, lrw, 0, 0.0d0, 0.0d0) - go to 700 - 618 call xerrwv( - 1 'lsoibt-- iwork length needed, leniw (=i1), exceeds liw (=i2)', - 1 60, 18, 0, 2, leniw, liw, 0, 0.0d0, 0.0d0) - go to 700 - 619 call xerrwv('lsoibt-- rtol(=i1) is r1 .lt. 0.0 ', - 1 40, 19, 0, 1, i, 0, 1, rtoli, 0.0d0) - go to 700 - 620 call xerrwv('lsoibt-- atol(=i1) is r1 .lt. 0.0 ', - 1 40, 20, 0, 1, i, 0, 1, atoli, 0.0d0) - go to 700 - 621 ewti = rwork(lewt+i-1) - call xerrwv('lsoibt-- ewt(=i1) is r1 .le. 0.0 ', - 1 40, 21, 0, 1, i, 0, 1, ewti, 0.0d0) - go to 700 - 622 call xerrwv( - 1 'lsoibt-- tout (=r1) too close to t(=r2) to start integration', - 1 60, 22, 0, 0, 0, 0, 2, tout, t) - go to 700 - 623 call xerrwv( - 1 'lsoibt-- itask = i1 and tout (=r1) behind tcur - hu (= r2) ', - 1 60, 23, 0, 1, itask, 0, 2, tout, tp) - go to 700 - 624 call xerrwv( - 1 'lsoibt-- itask = 4 or 5 and tcrit (=r1) behind tcur (=r2) ', - 1 60, 24, 0, 0, 0, 0, 2, tcrit, tn) - go to 700 - 625 call xerrwv( - 1 'lsoibt-- itask = 4 or 5 and tcrit (=r1) behind tout (=r2) ', - 1 60, 25, 0, 0, 0, 0, 2, tcrit, tout) - go to 700 - 626 call xerrwv('lsoibt-- at start of problem, too much accuracy ', - 1 50, 26, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) - call xerrwv( - 1 ' requested for precision of machine.. see tolsf (=r1) ', - 1 60, 26, 0, 0, 0, 0, 1, tolsf, 0.0d0) - rwork(14) = tolsf - go to 700 - 627 call xerrwv('lsoibt-- trouble from intdy. itask = i1, tout = r1', - 1 50, 27, 0, 1, itask, 0, 1, tout, 0.0d0) -c - 700 if (illin .eq. 5) go to 710 - illin = illin + 1 - istate = -3 - return - 710 call xerrwv('lsoibt-- repeated occurrences of illegal input ', - 1 50, 302, 0, 0, 0, 0, 0, 0.0d0, 0.0d0) -c - 800 call xerrwv('lsoibt-- run aborted.. apparent infinite loop ', - 1 50, 303, 2, 0, 0, 0, 0, 0.0d0, 0.0d0) - return -c----------------------- end of subroutine lsoibt ---------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/md.f b/scipy-0.10.1/scipy/integrate/odepack/md.f deleted file mode 100644 index da84e503ed..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/md.f +++ /dev/null @@ -1,140 +0,0 @@ - subroutine md - * (n, ia,ja, max, v,l, head,last,next, mark, flag) -clll. optimize -c*********************************************************************** -c md -- minimum degree algorithm (based on element model) -c*********************************************************************** -c -c description -c -c md finds a minimum degree ordering of the rows and columns of a -c general sparse matrix m stored in (ia,ja,a) format. -c when the structure of m is nonsymmetric, the ordering is that -c obtained for the symmetric matrix m + m-transpose. -c -c -c additional parameters -c -c max - declared dimension of the one-dimensional arrays v and l. -c max must be at least n+2k, where k is the number of -c nonzeroes in the strict upper triangle of m + m-transpose -c -c v - integer one-dimensional work array. dimension = max -c -c l - integer one-dimensional work array. dimension = max -c -c head - integer one-dimensional work array. dimension = n -c -c last - integer one-dimensional array used to return the permutation -c of the rows and columns of m corresponding to the minimum -c degree ordering. dimension = n -c -c next - integer one-dimensional array used to return the inverse of -c the permutation returned in last. dimension = n -c -c mark - integer one-dimensional work array (may be the same as v). -c dimension = n -c -c flag - integer error flag. values and their meanings are - -c 0 no errors detected -c 9n+k insufficient storage in md -c -c -c definitions of internal parameters -c -c ---------+--------------------------------------------------------- -c v(s) - value field of list entry -c ---------+--------------------------------------------------------- -c l(s) - link field of list entry (0 =) end of list) -c ---------+--------------------------------------------------------- -c l(vi) - pointer to element list of uneliminated vertex vi -c ---------+--------------------------------------------------------- -c l(ej) - pointer to boundary list of active element ej -c ---------+--------------------------------------------------------- -c head(d) - vj =) vj head of d-list d -c - 0 =) no vertex in d-list d -c -c -c - vi uneliminated vertex -c - vi in ek - vi not in ek -c ---------+-----------------------------+--------------------------- -c next(vi) - undefined but nonnegative - vj =) vj next in d-list -c - - 0 =) vi tail of d-list -c ---------+-----------------------------+--------------------------- -c last(vi) - (not set until mdp) - -d =) vi head of d-list d -c --vk =) compute degree - vj =) vj last in d-list -c - ej =) vi prototype of ej - 0 =) vi not in any d-list -c - 0 =) do not compute degree - -c ---------+-----------------------------+--------------------------- -c mark(vi) - mark(vk) - nonneg. tag .lt. mark(vk) -c -c -c - vi eliminated vertex -c - ei active element - otherwise -c ---------+-----------------------------+--------------------------- -c next(vi) - -j =) vi was j-th vertex - -j =) vi was j-th vertex -c - to be eliminated - to be eliminated -c ---------+-----------------------------+--------------------------- -c last(vi) - m =) size of ei = m - undefined -c ---------+-----------------------------+--------------------------- -c mark(vi) - -m =) overlap count of ei - undefined -c - with ek = m - -c - otherwise nonnegative tag - -c - .lt. mark(vk) - -c -c----------------------------------------------------------------------- -c - integer ia(1), ja(1), v(1), l(1), head(1), last(1), next(1), - * mark(1), flag, tag, dmin, vk,ek, tail - equivalence (vk,ek) -c -c----initialization - tag = 0 - call mdi - * (n, ia,ja, max,v,l, head,last,next, mark,tag, flag) - if (flag.ne.0) return -c - k = 0 - dmin = 1 -c -c----while k .lt. n do - 1 if (k.ge.n) go to 4 -c -c------search for vertex of minimum degree - 2 if (head(dmin).gt.0) go to 3 - dmin = dmin + 1 - go to 2 -c -c------remove vertex vk of minimum degree from degree list - 3 vk = head(dmin) - head(dmin) = next(vk) - if (head(dmin).gt.0) last(head(dmin)) = -dmin -c -c------number vertex vk, adjust tag, and tag vk - k = k+1 - next(vk) = -k - last(ek) = dmin - 1 - tag = tag + last(ek) - mark(vk) = tag -c -c------form element ek from uneliminated neighbors of vk - call mdm - * (vk,tail, v,l, last,next, mark) -c -c------purge inactive elements and do mass elimination - call mdp - * (k,ek,tail, v,l, head,last,next, mark) -c -c------update degrees of uneliminated vertices in ek - call mdu - * (ek,dmin, v,l, head,last,next, mark) -c - go to 1 -c -c----generate inverse permutation from permutation - 4 do 5 k=1,n - next(k) = -next(k) - 5 last(next(k)) = k -c - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/mdi.f b/scipy-0.10.1/scipy/integrate/odepack/mdi.f deleted file mode 100644 index d6ecd7de89..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/mdi.f +++ /dev/null @@ -1,72 +0,0 @@ - subroutine mdi - * (n, ia,ja, max,v,l, head,last,next, mark,tag, flag) -clll. optimize -c*********************************************************************** -c mdi -- initialization -c*********************************************************************** - integer ia(1), ja(1), v(1), l(1), head(1), last(1), next(1), - * mark(1), tag, flag, sfs, vi,dvi, vj -c -c----initialize degrees, element lists, and degree lists - do 1 vi=1,n - mark(vi) = 1 - l(vi) = 0 - 1 head(vi) = 0 - sfs = n+1 -c -c----create nonzero structure -c----for each nonzero entry a(vi,vj) - do 6 vi=1,n - jmin = ia(vi) - jmax = ia(vi+1) - 1 - if (jmin.gt.jmax) go to 6 - do 5 j=jmin,jmax - vj = ja(j) - if (vj.lt.vi) go to 2 - if (vj.eq.vi) go to 5 - go to 4 -c -c------if a(vi,vj) is in strict lower triangle -c------check for previous occurrence of a(vj,vi) - 2 lvk = vi - kmax = mark(vi) - 1 - if (kmax .eq. 0) go to 4 - do 3 k=1,kmax - lvk = l(lvk) - if (v(lvk).eq.vj) go to 5 - 3 continue -c----for unentered entries a(vi,vj) - 4 if (sfs.ge.max) go to 101 -c -c------enter vj in element list for vi - mark(vi) = mark(vi) + 1 - v(sfs) = vj - l(sfs) = l(vi) - l(vi) = sfs - sfs = sfs+1 -c -c------enter vi in element list for vj - mark(vj) = mark(vj) + 1 - v(sfs) = vi - l(sfs) = l(vj) - l(vj) = sfs - sfs = sfs+1 - 5 continue - 6 continue -c -c----create degree lists and initialize mark vector - do 7 vi=1,n - dvi = mark(vi) - next(vi) = head(dvi) - head(dvi) = vi - last(vi) = -dvi - nextvi = next(vi) - if (nextvi.gt.0) last(nextvi) = vi - 7 mark(vi) = tag -c - return -c -c ** error- insufficient storage - 101 flag = 9*n + vi - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/mdm.f b/scipy-0.10.1/scipy/integrate/odepack/mdm.f deleted file mode 100644 index b50a408f13..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/mdm.f +++ /dev/null @@ -1,56 +0,0 @@ - subroutine mdm - * (vk,tail, v,l, last,next, mark) -clll. optimize -c*********************************************************************** -c mdm -- form element from uneliminated neighbors of vk -c*********************************************************************** - integer vk, tail, v(1), l(1), last(1), next(1), mark(1), - * tag, s,ls,vs,es, b,lb,vb, blp,blpmax - equivalence (vs, es) -c -c----initialize tag and list of uneliminated neighbors - tag = mark(vk) - tail = vk -c -c----for each vertex/element vs/es in element list of vk - ls = l(vk) - 1 s = ls - if (s.eq.0) go to 5 - ls = l(s) - vs = v(s) - if (next(vs).lt.0) go to 2 -c -c------if vs is uneliminated vertex, then tag and append to list of -c------uneliminated neighbors - mark(vs) = tag - l(tail) = s - tail = s - go to 4 -c -c------if es is active element, then ... -c--------for each vertex vb in boundary list of element es - 2 lb = l(es) - blpmax = last(es) - do 3 blp=1,blpmax - b = lb - lb = l(b) - vb = v(b) -c -c----------if vb is untagged vertex, then tag and append to list of -c----------uneliminated neighbors - if (mark(vb).ge.tag) go to 3 - mark(vb) = tag - l(tail) = b - tail = b - 3 continue -c -c--------mark es inactive - mark(es) = tag -c - 4 go to 1 -c -c----terminate list of uneliminated neighbors - 5 l(tail) = 0 -c - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/mdp.f b/scipy-0.10.1/scipy/integrate/odepack/mdp.f deleted file mode 100644 index 084e64ccca..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/mdp.f +++ /dev/null @@ -1,90 +0,0 @@ - subroutine mdp - * (k,ek,tail, v,l, head,last,next, mark) -clll. optimize -c*********************************************************************** -c mdp -- purge inactive elements and do mass elimination -c*********************************************************************** - integer ek, tail, v(1), l(1), head(1), last(1), next(1), - * mark(1), tag, free, li,vi,lvi,evi, s,ls,es, ilp,ilpmax -c -c----initialize tag - tag = mark(ek) -c -c----for each vertex vi in ek - li = ek - ilpmax = last(ek) - if (ilpmax.le.0) go to 12 - do 11 ilp=1,ilpmax - i = li - li = l(i) - vi = v(li) -c -c------remove vi from degree list - if (last(vi).eq.0) go to 3 - if (last(vi).gt.0) go to 1 - head(-last(vi)) = next(vi) - go to 2 - 1 next(last(vi)) = next(vi) - 2 if (next(vi).gt.0) last(next(vi)) = last(vi) -c -c------remove inactive items from element list of vi - 3 ls = vi - 4 s = ls - ls = l(s) - if (ls.eq.0) go to 6 - es = v(ls) - if (mark(es).lt.tag) go to 5 - free = ls - l(s) = l(ls) - ls = s - 5 go to 4 -c -c------if vi is interior vertex, then remove from list and eliminate - 6 lvi = l(vi) - if (lvi.ne.0) go to 7 - l(i) = l(li) - li = i -c - k = k+1 - next(vi) = -k - last(ek) = last(ek) - 1 - go to 11 -c -c------else ... -c--------classify vertex vi - 7 if (l(lvi).ne.0) go to 9 - evi = v(lvi) - if (next(evi).ge.0) go to 9 - if (mark(evi).lt.0) go to 8 -c -c----------if vi is prototype vertex, then mark as such, initialize -c----------overlap count for corresponding element, and move vi to end -c----------of boundary list - last(vi) = evi - mark(evi) = -1 - l(tail) = li - tail = li - l(i) = l(li) - li = i - go to 10 -c -c----------else if vi is duplicate vertex, then mark as such and adjust -c----------overlap count for corresponding element - 8 last(vi) = 0 - mark(evi) = mark(evi) - 1 - go to 10 -c -c----------else mark vi to compute degree - 9 last(vi) = -ek -c -c--------insert ek in element list of vi - 10 v(free) = ek - l(free) = l(vi) - l(vi) = free - 11 continue -c -c----terminate boundary list - 12 l(tail) = 0 -c - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/mdu.f b/scipy-0.10.1/scipy/integrate/odepack/mdu.f deleted file mode 100644 index 3d1d620c96..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/mdu.f +++ /dev/null @@ -1,88 +0,0 @@ - subroutine mdu - * (ek,dmin, v,l, head,last,next, mark) -clll. optimize -c*********************************************************************** -c mdu -- update degrees of uneliminated vertices in ek -c*********************************************************************** - integer ek, dmin, v(1), l(1), head(1), last(1), next(1), - * mark(1), tag, vi,evi,dvi, s,vs,es, b,vb, ilp,ilpmax, - * blp,blpmax - equivalence (vs, es) -c -c----initialize tag - tag = mark(ek) - last(ek) -c -c----for each vertex vi in ek - i = ek - ilpmax = last(ek) - if (ilpmax.le.0) go to 11 - do 10 ilp=1,ilpmax - i = l(i) - vi = v(i) - if (last(vi).lt.0) go to 1 - if (last(vi).eq.0) go to 10 - go to 8 -c -c------if vi neither prototype nor duplicate vertex, then merge elements -c------to compute degree - 1 tag = tag + 1 - dvi = last(ek) -c -c--------for each vertex/element vs/es in element list of vi - s = l(vi) - 2 s = l(s) - if (s.eq.0) go to 9 - vs = v(s) - if (next(vs).lt.0) go to 3 -c -c----------if vs is uneliminated vertex, then tag and adjust degree - mark(vs) = tag - dvi = dvi + 1 - go to 5 -c -c----------if es is active element, then expand -c------------check for outmatched vertex - 3 if (mark(es).lt.0) go to 6 -c -c------------for each vertex vb in es - b = es - blpmax = last(es) - do 4 blp=1,blpmax - b = l(b) - vb = v(b) -c -c--------------if vb is untagged, then tag and adjust degree - if (mark(vb).ge.tag) go to 4 - mark(vb) = tag - dvi = dvi + 1 - 4 continue -c - 5 go to 2 -c -c------else if vi is outmatched vertex, then adjust overlaps but do not -c------compute degree - 6 last(vi) = 0 - mark(es) = mark(es) - 1 - 7 s = l(s) - if (s.eq.0) go to 10 - es = v(s) - if (mark(es).lt.0) mark(es) = mark(es) - 1 - go to 7 -c -c------else if vi is prototype vertex, then calculate degree by -c------inclusion/exclusion and reset overlap count - 8 evi = last(vi) - dvi = last(ek) + last(evi) + mark(evi) - mark(evi) = 0 -c -c------insert vi in appropriate degree list - 9 next(vi) = head(dvi) - head(dvi) = vi - last(vi) = -dvi - if (next(vi).gt.0) last(next(vi)) = vi - if (dvi.lt.dmin) dmin = dvi -c - 10 continue -c - 11 return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/nnfc.f b/scipy-0.10.1/scipy/integrate/odepack/nnfc.f deleted file mode 100644 index c503b73640..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/nnfc.f +++ /dev/null @@ -1,154 +0,0 @@ - subroutine nnfc - * (n, r,c,ic, ia,ja,a, z, b, - * lmax,il,jl,ijl,l, d, umax,iu,ju,iju,u, - * row, tmp, irl,jrl, flag) -clll. optimize -c*** subroutine nnfc -c*** numerical ldu-factorization of sparse nonsymmetric matrix and -c solution of system of linear equations (compressed pointer -c storage) -c -c -c input variables.. n, r, c, ic, ia, ja, a, b, -c il, jl, ijl, lmax, iu, ju, iju, umax -c output variables.. z, l, d, u, flag -c -c parameters used internally.. -c nia - irl, - vectors used to find the rows of l. at the kth step -c nia - jrl of the factorization, jrl(k) points to the head -c - of a linked list in jrl of column indices j -c - such j .lt. k and l(k,j) is nonzero. zero -c - indicates the end of the list. irl(j) (j.lt.k) -c - points to the smallest i such that i .ge. k and -c - l(i,j) is nonzero. -c - size of each = n. -c fia - row - holds intermediate values in calculation of u and l. -c - size = n. -c fia - tmp - holds new right-hand side b* for solution of the -c - equation ux = b*. -c - size = n. -c -c internal variables.. -c jmin, jmax - indices of the first and last positions in a row to -c be examined. -c sum - used in calculating tmp. -c - integer rk,umax - integer r(1), c(1), ic(1), ia(1), ja(1), il(1), jl(1), ijl(1) - integer iu(1), ju(1), iju(1), irl(1), jrl(1), flag - double precision a(1), l(1), d(1), u(1), z(1), b(1), row(1) - double precision tmp(1), lki, sum, dk -c -c ****** initialize pointers and test storage *********************** - if(il(n+1)-1 .gt. lmax) go to 104 - if(iu(n+1)-1 .gt. umax) go to 107 - do 1 k=1,n - irl(k) = il(k) - jrl(k) = 0 - 1 continue -c -c ****** for each row *********************************************** - do 19 k=1,n -c ****** reverse jrl and zero row where kth row of l will fill in *** - row(k) = 0 - i1 = 0 - if (jrl(k) .eq. 0) go to 3 - i = jrl(k) - 2 i2 = jrl(i) - jrl(i) = i1 - i1 = i - row(i) = 0 - i = i2 - if (i .ne. 0) go to 2 -c ****** set row to zero where u will fill in *********************** - 3 jmin = iju(k) - jmax = jmin + iu(k+1) - iu(k) - 1 - if (jmin .gt. jmax) go to 5 - do 4 j=jmin,jmax - 4 row(ju(j)) = 0 -c ****** place kth row of a in row ********************************** - 5 rk = r(k) - jmin = ia(rk) - jmax = ia(rk+1) - 1 - do 6 j=jmin,jmax - row(ic(ja(j))) = a(j) - 6 continue -c ****** initialize sum, and link through jrl *********************** - sum = b(rk) - i = i1 - if (i .eq. 0) go to 10 -c ****** assign the kth row of l and adjust row, sum **************** - 7 lki = -row(i) -c ****** if l is not required, then comment out the following line ** - l(irl(i)) = -lki - sum = sum + lki * tmp(i) - jmin = iu(i) - jmax = iu(i+1) - 1 - if (jmin .gt. jmax) go to 9 - mu = iju(i) - jmin - do 8 j=jmin,jmax - 8 row(ju(mu+j)) = row(ju(mu+j)) + lki * u(j) - 9 i = jrl(i) - if (i .ne. 0) go to 7 -c -c ****** assign kth row of u and diagonal d, set tmp(k) ************* - 10 if (row(k) .eq. 0.0d0) go to 108 - dk = 1.0d0 / row(k) - d(k) = dk - tmp(k) = sum * dk - if (k .eq. n) go to 19 - jmin = iu(k) - jmax = iu(k+1) - 1 - if (jmin .gt. jmax) go to 12 - mu = iju(k) - jmin - do 11 j=jmin,jmax - 11 u(j) = row(ju(mu+j)) * dk - 12 continue -c -c ****** update irl and jrl, keeping jrl in decreasing order ******** - i = i1 - if (i .eq. 0) go to 18 - 14 irl(i) = irl(i) + 1 - i1 = jrl(i) - if (irl(i) .ge. il(i+1)) go to 17 - ijlb = irl(i) - il(i) + ijl(i) - j = jl(ijlb) - 15 if (i .gt. jrl(j)) go to 16 - j = jrl(j) - go to 15 - 16 jrl(i) = jrl(j) - jrl(j) = i - 17 i = i1 - if (i .ne. 0) go to 14 - 18 if (irl(k) .ge. il(k+1)) go to 19 - j = jl(ijl(k)) - jrl(k) = jrl(j) - jrl(j) = k - 19 continue -c -c ****** solve ux = tmp by back substitution ********************** - k = n - do 22 i=1,n - sum = tmp(k) - jmin = iu(k) - jmax = iu(k+1) - 1 - if (jmin .gt. jmax) go to 21 - mu = iju(k) - jmin - do 20 j=jmin,jmax - 20 sum = sum - u(j) * tmp(ju(mu+j)) - 21 tmp(k) = sum - z(c(k)) = sum - 22 k = k-1 - flag = 0 - return -c -c ** error.. insufficient storage for l - 104 flag = 4*n + 1 - return -c ** error.. insufficient storage for u - 107 flag = 7*n + 1 - return -c ** error.. zero pivot - 108 flag = 8*n + k - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/nnsc.f b/scipy-0.10.1/scipy/integrate/odepack/nnsc.f deleted file mode 100644 index 47999616c8..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/nnsc.f +++ /dev/null @@ -1,52 +0,0 @@ - subroutine nnsc - * (n, r, c, il, jl, ijl, l, d, iu, ju, iju, u, z, b, tmp) -clll. optimize -c*** subroutine nnsc -c*** numerical solution of sparse nonsymmetric system of linear -c equations given ldu-factorization (compressed pointer storage) -c -c -c input variables.. n, r, c, il, jl, ijl, l, d, iu, ju, iju, u, b -c output variables.. z -c -c parameters used internally.. -c fia - tmp - temporary vector which gets result of solving ly = b. -c - size = n. -c -c internal variables.. -c jmin, jmax - indices of the first and last positions in a row of -c u or l to be used. -c - integer r(1), c(1), il(1), jl(1), ijl(1), iu(1), ju(1), iju(1) - double precision l(1), d(1), u(1), b(1), z(1), tmp(1), tmpk,sum -c -c ****** set tmp to reordered b ************************************* - do 1 k=1,n - 1 tmp(k) = b(r(k)) -c ****** solve ly = b by forward substitution ********************* - do 3 k=1,n - jmin = il(k) - jmax = il(k+1) - 1 - tmpk = -d(k) * tmp(k) - tmp(k) = -tmpk - if (jmin .gt. jmax) go to 3 - ml = ijl(k) - jmin - do 2 j=jmin,jmax - 2 tmp(jl(ml+j)) = tmp(jl(ml+j)) + tmpk * l(j) - 3 continue -c ****** solve ux = y by back substitution ************************ - k = n - do 6 i=1,n - sum = -tmp(k) - jmin = iu(k) - jmax = iu(k+1) - 1 - if (jmin .gt. jmax) go to 5 - mu = iju(k) - jmin - do 4 j=jmin,jmax - 4 sum = sum + u(j) * tmp(ju(mu+j)) - 5 tmp(k) = -sum - z(c(k)) = -sum - k = k - 1 - 6 continue - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/nntc.f b/scipy-0.10.1/scipy/integrate/odepack/nntc.f deleted file mode 100644 index 237912096a..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/nntc.f +++ /dev/null @@ -1,52 +0,0 @@ - subroutine nntc - * (n, r, c, il, jl, ijl, l, d, iu, ju, iju, u, z, b, tmp) -clll. optimize -c*** subroutine nntc -c*** numeric solution of the transpose of a sparse nonsymmetric system -c of linear equations given lu-factorization (compressed pointer -c storage) -c -c -c input variables.. n, r, c, il, jl, ijl, l, d, iu, ju, iju, u, b -c output variables.. z -c -c parameters used internally.. -c fia - tmp - temporary vector which gets result of solving ut y = b -c - size = n. -c -c internal variables.. -c jmin, jmax - indices of the first and last positions in a row of -c u or l to be used. -c - integer r(1), c(1), il(1), jl(1), ijl(1), iu(1), ju(1), iju(1) - double precision l(1), d(1), u(1), b(1), z(1), tmp(1), tmpk,sum -c -c ****** set tmp to reordered b ************************************* - do 1 k=1,n - 1 tmp(k) = b(c(k)) -c ****** solve ut y = b by forward substitution ******************* - do 3 k=1,n - jmin = iu(k) - jmax = iu(k+1) - 1 - tmpk = -tmp(k) - if (jmin .gt. jmax) go to 3 - mu = iju(k) - jmin - do 2 j=jmin,jmax - 2 tmp(ju(mu+j)) = tmp(ju(mu+j)) + tmpk * u(j) - 3 continue -c ****** solve lt x = y by back substitution ********************** - k = n - do 6 i=1,n - sum = -tmp(k) - jmin = il(k) - jmax = il(k+1) - 1 - if (jmin .gt. jmax) go to 5 - ml = ijl(k) - jmin - do 4 j=jmin,jmax - 4 sum = sum + l(j) * tmp(jl(ml+j)) - 5 tmp(k) = -sum * d(k) - z(r(k)) = tmp(k) - k = k - 1 - 6 continue - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/nroc.f b/scipy-0.10.1/scipy/integrate/odepack/nroc.f deleted file mode 100644 index b91fcf863a..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/nroc.f +++ /dev/null @@ -1,234 +0,0 @@ - subroutine nroc (n, ic, ia, ja, a, jar, ar, p, flag) -clll. optimize -c -c ---------------------------------------------------------------- -c -c yale sparse matrix package - nonsymmetric codes -c solving the system of equations mx = b -c -c i. calling sequences -c the coefficient matrix can be processed by an ordering routine -c (e.g., to reduce fillin or ensure numerical stability) before using -c the remaining subroutines. if no reordering is done, then set -c r(i) = c(i) = ic(i) = i for i=1,...,n. if an ordering subroutine -c is used, then nroc should be used to reorder the coefficient matrix -c the calling sequence is -- -c ( (matrix ordering)) -c (nroc (matrix reordering)) -c nsfc (symbolic factorization to determine where fillin will -c occur during numeric factorization) -c nnfc (numeric factorization into product ldu of unit lower -c triangular matrix l, diagonal matrix d, and unit -c upper triangular matrix u, and solution of linear -c system) -c nnsc (solution of linear system for additional right-hand -c side using ldu factorization from nnfc) -c (if only one system of equations is to be solved, then the -c subroutine trk should be used.) -c -c ii. storage of sparse matrices -c the nonzero entries of the coefficient matrix m are stored -c row-by-row in the array a. to identify the individual nonzero -c entries in each row, we need to know in which column each entry -c lies. the column indices which correspond to the nonzero entries -c of m are stored in the array ja. i.e., if a(k) = m(i,j), then -c ja(k) = j. in addition, we need to know where each row starts and -c how long it is. the index positions in ja and a where the rows of -c m begin are stored in the array ia. i.e., if m(i,j) is the first -c (leftmost) entry in the i-th row and a(k) = m(i,j), then -c ia(i) = k. moreover, the index in ja and a of the first location -c following the last element in the last row is stored in ia(n+1). -c thus, the number of entries in the i-th row is given by -c ia(i+1) - ia(i), the nonzero entries of the i-th row are stored -c consecutively in -c a(ia(i)), a(ia(i)+1), ..., a(ia(i+1)-1), -c and the corresponding column indices are stored consecutively in -c ja(ia(i)), ja(ia(i)+1), ..., ja(ia(i+1)-1). -c for example, the 5 by 5 matrix -c ( 1. 0. 2. 0. 0.) -c ( 0. 3. 0. 0. 0.) -c m = ( 0. 4. 5. 6. 0.) -c ( 0. 0. 0. 7. 0.) -c ( 0. 0. 0. 8. 9.) -c would be stored as -c - 1 2 3 4 5 6 7 8 9 -c ---+-------------------------- -c ia - 1 3 4 7 8 10 -c ja - 1 3 2 2 3 4 4 4 5 -c a - 1. 2. 3. 4. 5. 6. 7. 8. 9. . -c -c the strict upper (lower) triangular portion of the matrix -c u (l) is stored in a similar fashion using the arrays iu, ju, u -c (il, jl, l) except that an additional array iju (ijl) is used to -c compress storage of ju (jl) by allowing some sequences of column -c (row) indices to used for more than one row (column) (n.b., l is -c stored by columns). iju(k) (ijl(k)) points to the starting -c location in ju (jl) of entries for the kth row (column). -c compression in ju (jl) occurs in two ways. first, if a row -c (column) i was merged into the current row (column) k, and the -c number of elements merged in from (the tail portion of) row -c (column) i is the same as the final length of row (column) k, then -c the kth row (column) and the tail of row (column) i are identical -c and iju(k) (ijl(k)) points to the start of the tail. second, if -c some tail portion of the (k-1)st row (column) is identical to the -c head of the kth row (column), then iju(k) (ijl(k)) points to the -c start of that tail portion. for example, the nonzero structure of -c the strict upper triangular part of the matrix -c d 0 x x x -c 0 d 0 x x -c 0 0 d x 0 -c 0 0 0 d x -c 0 0 0 0 d -c would be represented as -c - 1 2 3 4 5 6 -c ----+------------ -c iu - 1 4 6 7 8 8 -c ju - 3 4 5 4 -c iju - 1 2 4 3 . -c the diagonal entries of l and u are assumed to be equal to one and -c are not stored. the array d contains the reciprocals of the -c diagonal entries of the matrix d. -c -c iii. additional storage savings -c in nsfc, r and ic can be the same array in the calling -c sequence if no reordering of the coefficient matrix has been done. -c in nnfc, r, c, and ic can all be the same array if no -c reordering has been done. if only the rows have been reordered, -c then c and ic can be the same array. if the row and column -c orderings are the same, then r and c can be the same array. z and -c row can be the same array. -c in nnsc or nntc, r and c can be the same array if no -c reordering has been done or if the row and column orderings are the -c same. z and b can be the same array. however, then b will be -c destroyed. -c -c iv. parameters -c following is a list of parameters to the programs. names are -c uniform among the various subroutines. class abbreviations are -- -c n - integer variable -c f - real variable -c v - supplies a value to a subroutine -c r - returns a result from a subroutine -c i - used internally by a subroutine -c a - array -c -c class - parameter -c ------+---------- -c fva - a - nonzero entries of the coefficient matrix m, stored -c - by rows. -c - size = number of nonzero entries in m. -c fva - b - right-hand side b. -c - size = n. -c nva - c - ordering of the columns of m. -c - size = n. -c fvra - d - reciprocals of the diagonal entries of the matrix d. -c - size = n. -c nr - flag - error flag. values and their meanings are -- -c - 0 no errors detected -c - n+k null row in a -- row = k -c - 2n+k duplicate entry in a -- row = k -c - 3n+k insufficient storage for jl -- row = k -c - 4n+1 insufficient storage for l -c - 5n+k null pivot -- row = k -c - 6n+k insufficient storage for ju -- row = k -c - 7n+1 insufficient storage for u -c - 8n+k zero pivot -- row = k -c nva - ia - pointers to delimit the rows of a. -c - size = n+1. -c nvra - ijl - pointers to the first element in each column in jl, -c - used to compress storage in jl. -c - size = n. -c nvra - iju - pointers to the first element in each row in ju, used -c - to compress storage in ju. -c - size = n. -c nvra - il - pointers to delimit the columns of l. -c - size = n+1. -c nvra - iu - pointers to delimit the rows of u. -c - size = n+1. -c nva - ja - column numbers corresponding to the elements of a. -c - size = size of a. -c nvra - jl - row numbers corresponding to the elements of l. -c - size = jlmax. -c nv - jlmax - declared dimension of jl. jlmax must be larger than -c - the number of nonzeros in the strict lower triangle -c - of m plus fillin minus compression. -c nvra - ju - column numbers corresponding to the elements of u. -c - size = jumax. -c nv - jumax - declared dimension of ju. jumax must be larger than -c - the number of nonzeros in the strict upper triangle -c - of m plus fillin minus compression. -c fvra - l - nonzero entries in the strict lower triangular portion -c - of the matrix l, stored by columns. -c - size = lmax. -c nv - lmax - declared dimension of l. lmax must be larger than -c - the number of nonzeros in the strict lower triangle -c - of m plus fillin (il(n+1)-1 after nsfc). -c nv - n - number of variables/equations. -c nva - r - ordering of the rows of m. -c - size = n. -c fvra - u - nonzero entries in the strict upper triangular portion -c - of the matrix u, stored by rows. -c - size = umax. -c nv - umax - declared dimension of u. umax must be larger than -c - the number of nonzeros in the strict upper triangle -c - of m plus fillin (iu(n+1)-1 after nsfc). -c fra - z - solution x. -c - size = n. -c -c ---------------------------------------------------------------- -c -c*** subroutine nroc -c*** reorders rows of a, leaving row order unchanged -c -c -c input parameters.. n, ic, ia, ja, a -c output parameters.. ja, a, flag -c -c parameters used internally.. -c nia - p - at the kth step, p is a linked list of the reordered -c - column indices of the kth row of a. p(n+1) points -c - to the first entry in the list. -c - size = n+1. -c nia - jar - at the kth step,jar contains the elements of the -c - reordered column indices of a. -c - size = n. -c fia - ar - at the kth step, ar contains the elements of the -c - reordered row of a. -c - size = n. -c - integer ic(1), ia(1), ja(1), jar(1), p(1), flag - double precision a(1), ar(1) -c -c ****** for each nonempty row ******************************* - do 5 k=1,n - jmin = ia(k) - jmax = ia(k+1) - 1 - if(jmin .gt. jmax) go to 5 - p(n+1) = n + 1 -c ****** insert each element in the list ********************* - do 3 j=jmin,jmax - newj = ic(ja(j)) - i = n + 1 - 1 if(p(i) .ge. newj) go to 2 - i = p(i) - go to 1 - 2 if(p(i) .eq. newj) go to 102 - p(newj) = p(i) - p(i) = newj - jar(newj) = ja(j) - ar(newj) = a(j) - 3 continue -c ****** replace old row in ja and a ************************* - i = n + 1 - do 4 j=jmin,jmax - i = p(i) - ja(j) = jar(i) - 4 a(j) = ar(i) - 5 continue - flag = 0 - return -c -c ** error.. duplicate entry in a - 102 flag = n + k - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/nsfc.f b/scipy-0.10.1/scipy/integrate/odepack/nsfc.f deleted file mode 100644 index e28ebb3d15..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/nsfc.f +++ /dev/null @@ -1,331 +0,0 @@ - subroutine nsfc - * (n, r, ic, ia,ja, jlmax,il,jl,ijl, jumax,iu,ju,iju, - * q, ira,jra, irac, irl,jrl, iru,jru, flag) -clll. optimize -c*** subroutine nsfc -c*** symbolic ldu-factorization of nonsymmetric sparse matrix -c (compressed pointer storage) -c -c -c input variables.. n, r, ic, ia, ja, jlmax, jumax. -c output variables.. il, jl, ijl, iu, ju, iju, flag. -c -c parameters used internally.. -c nia - q - suppose m* is the result of reordering m. if -c - processing of the ith row of m* (hence the ith -c - row of u) is being done, q(j) is initially -c - nonzero if m*(i,j) is nonzero (j.ge.i). since -c - values need not be stored, each entry points to the -c - next nonzero and q(n+1) points to the first. n+1 -c - indicates the end of the list. for example, if n=9 -c - and the 5th row of m* is -c - 0 x x 0 x 0 0 x 0 -c - then q will initially be -c - a a a a 8 a a 10 5 (a - arbitrary). -c - as the algorithm proceeds, other elements of q -c - are inserted in the list because of fillin. -c - q is used in an analogous manner to compute the -c - ith column of l. -c - size = n+1. -c nia - ira, - vectors used to find the columns of m. at the kth -c nia - jra, step of the factorization, irac(k) points to the -c nia - irac head of a linked list in jra of row indices i -c - such that i .ge. k and m(i,k) is nonzero. zero -c - indicates the end of the list. ira(i) (i.ge.k) -c - points to the smallest j such that j .ge. k and -c - m(i,j) is nonzero. -c - size of each = n. -c nia - irl, - vectors used to find the rows of l. at the kth step -c nia - jrl of the factorization, jrl(k) points to the head -c - of a linked list in jrl of column indices j -c - such j .lt. k and l(k,j) is nonzero. zero -c - indicates the end of the list. irl(j) (j.lt.k) -c - points to the smallest i such that i .ge. k and -c - l(i,j) is nonzero. -c - size of each = n. -c nia - iru, - vectors used in a manner analogous to irl and jrl -c nia - jru to find the columns of u. -c - size of each = n. -c -c internal variables.. -c jlptr - points to the last position used in jl. -c juptr - points to the last position used in ju. -c jmin,jmax - are the indices in a or u of the first and last -c elements to be examined in a given row. -c for example, jmin=ia(k), jmax=ia(k+1)-1. -c - integer cend, qm, rend, rk, vj - integer ia(1), ja(1), ira(1), jra(1), il(1), jl(1), ijl(1) - integer iu(1), ju(1), iju(1), irl(1), jrl(1), iru(1), jru(1) - integer r(1), ic(1), q(1), irac(1), flag -c -c ****** initialize pointers **************************************** - np1 = n + 1 - jlmin = 1 - jlptr = 0 - il(1) = 1 - jumin = 1 - juptr = 0 - iu(1) = 1 - do 1 k=1,n - irac(k) = 0 - jra(k) = 0 - jrl(k) = 0 - 1 jru(k) = 0 -c ****** initialize column pointers for a *************************** - do 2 k=1,n - rk = r(k) - iak = ia(rk) - if (iak .ge. ia(rk+1)) go to 101 - jaiak = ic(ja(iak)) - if (jaiak .gt. k) go to 105 - jra(k) = irac(jaiak) - irac(jaiak) = k - 2 ira(k) = iak -c -c ****** for each column of l and row of u ************************** - do 41 k=1,n -c -c ****** initialize q for computing kth column of l ***************** - q(np1) = np1 - luk = -1 -c ****** by filling in kth column of a ****************************** - vj = irac(k) - if (vj .eq. 0) go to 5 - 3 qm = np1 - 4 m = qm - qm = q(m) - if (qm .lt. vj) go to 4 - if (qm .eq. vj) go to 102 - luk = luk + 1 - q(m) = vj - q(vj) = qm - vj = jra(vj) - if (vj .ne. 0) go to 3 -c ****** link through jru ******************************************* - 5 lastid = 0 - lasti = 0 - ijl(k) = jlptr - i = k - 6 i = jru(i) - if (i .eq. 0) go to 10 - qm = np1 - jmin = irl(i) - jmax = ijl(i) + il(i+1) - il(i) - 1 - long = jmax - jmin - if (long .lt. 0) go to 6 - jtmp = jl(jmin) - if (jtmp .ne. k) long = long + 1 - if (jtmp .eq. k) r(i) = -r(i) - if (lastid .ge. long) go to 7 - lasti = i - lastid = long -c ****** and merge the corresponding columns into the kth column **** - 7 do 9 j=jmin,jmax - vj = jl(j) - 8 m = qm - qm = q(m) - if (qm .lt. vj) go to 8 - if (qm .eq. vj) go to 9 - luk = luk + 1 - q(m) = vj - q(vj) = qm - qm = vj - 9 continue - go to 6 -c ****** lasti is the longest column merged into the kth ************ -c ****** see if it equals the entire kth column ********************* - 10 qm = q(np1) - if (qm .ne. k) go to 105 - if (luk .eq. 0) go to 17 - if (lastid .ne. luk) go to 11 -c ****** if so, jl can be compressed ******************************** - irll = irl(lasti) - ijl(k) = irll + 1 - if (jl(irll) .ne. k) ijl(k) = ijl(k) - 1 - go to 17 -c ****** if not, see if kth column can overlap the previous one ***** - 11 if (jlmin .gt. jlptr) go to 15 - qm = q(qm) - do 12 j=jlmin,jlptr - if (jl(j).lt.qm) go to 12 - if (jl(j).eq.qm) go to 13 - go to 15 - 12 continue - go to 15 - 13 ijl(k) = j - do 14 i=j,jlptr - if (jl(i) .ne. qm) go to 15 - qm = q(qm) - if (qm .gt. n) go to 17 - 14 continue - jlptr = j - 1 -c ****** move column indices from q to jl, update vectors *********** - 15 jlmin = jlptr + 1 - ijl(k) = jlmin - if (luk .eq. 0) go to 17 - jlptr = jlptr + luk - if (jlptr .gt. jlmax) go to 103 - qm = q(np1) - do 16 j=jlmin,jlptr - qm = q(qm) - 16 jl(j) = qm - 17 irl(k) = ijl(k) - il(k+1) = il(k) + luk -c -c ****** initialize q for computing kth row of u ******************** - q(np1) = np1 - luk = -1 -c ****** by filling in kth row of reordered a *********************** - rk = r(k) - jmin = ira(k) - jmax = ia(rk+1) - 1 - if (jmin .gt. jmax) go to 20 - do 19 j=jmin,jmax - vj = ic(ja(j)) - qm = np1 - 18 m = qm - qm = q(m) - if (qm .lt. vj) go to 18 - if (qm .eq. vj) go to 102 - luk = luk + 1 - q(m) = vj - q(vj) = qm - 19 continue -c ****** link through jrl, ****************************************** - 20 lastid = 0 - lasti = 0 - iju(k) = juptr - i = k - i1 = jrl(k) - 21 i = i1 - if (i .eq. 0) go to 26 - i1 = jrl(i) - qm = np1 - jmin = iru(i) - jmax = iju(i) + iu(i+1) - iu(i) - 1 - long = jmax - jmin - if (long .lt. 0) go to 21 - jtmp = ju(jmin) - if (jtmp .eq. k) go to 22 -c ****** update irl and jrl, ***************************************** - long = long + 1 - cend = ijl(i) + il(i+1) - il(i) - irl(i) = irl(i) + 1 - if (irl(i) .ge. cend) go to 22 - j = jl(irl(i)) - jrl(i) = jrl(j) - jrl(j) = i - 22 if (lastid .ge. long) go to 23 - lasti = i - lastid = long -c ****** and merge the corresponding rows into the kth row ********** - 23 do 25 j=jmin,jmax - vj = ju(j) - 24 m = qm - qm = q(m) - if (qm .lt. vj) go to 24 - if (qm .eq. vj) go to 25 - luk = luk + 1 - q(m) = vj - q(vj) = qm - qm = vj - 25 continue - go to 21 -c ****** update jrl(k) and irl(k) *********************************** - 26 if (il(k+1) .le. il(k)) go to 27 - j = jl(irl(k)) - jrl(k) = jrl(j) - jrl(j) = k -c ****** lasti is the longest row merged into the kth *************** -c ****** see if it equals the entire kth row ************************ - 27 qm = q(np1) - if (qm .ne. k) go to 105 - if (luk .eq. 0) go to 34 - if (lastid .ne. luk) go to 28 -c ****** if so, ju can be compressed ******************************** - irul = iru(lasti) - iju(k) = irul + 1 - if (ju(irul) .ne. k) iju(k) = iju(k) - 1 - go to 34 -c ****** if not, see if kth row can overlap the previous one ******** - 28 if (jumin .gt. juptr) go to 32 - qm = q(qm) - do 29 j=jumin,juptr - if (ju(j).lt.qm) go to 29 - if (ju(j).eq.qm) go to 30 - go to 32 - 29 continue - go to 32 - 30 iju(k) = j - do 31 i=j,juptr - if (ju(i) .ne. qm) go to 32 - qm = q(qm) - if (qm .gt. n) go to 34 - 31 continue - juptr = j - 1 -c ****** move row indices from q to ju, update vectors ************** - 32 jumin = juptr + 1 - iju(k) = jumin - if (luk .eq. 0) go to 34 - juptr = juptr + luk - if (juptr .gt. jumax) go to 106 - qm = q(np1) - do 33 j=jumin,juptr - qm = q(qm) - 33 ju(j) = qm - 34 iru(k) = iju(k) - iu(k+1) = iu(k) + luk -c -c ****** update iru, jru ******************************************** - i = k - 35 i1 = jru(i) - if (r(i) .lt. 0) go to 36 - rend = iju(i) + iu(i+1) - iu(i) - if (iru(i) .ge. rend) go to 37 - j = ju(iru(i)) - jru(i) = jru(j) - jru(j) = i - go to 37 - 36 r(i) = -r(i) - 37 i = i1 - if (i .eq. 0) go to 38 - iru(i) = iru(i) + 1 - go to 35 -c -c ****** update ira, jra, irac ************************************** - 38 i = irac(k) - if (i .eq. 0) go to 41 - 39 i1 = jra(i) - ira(i) = ira(i) + 1 - if (ira(i) .ge. ia(r(i)+1)) go to 40 - irai = ira(i) - jairai = ic(ja(irai)) - if (jairai .gt. i) go to 40 - jra(i) = irac(jairai) - irac(jairai) = i - 40 i = i1 - if (i .ne. 0) go to 39 - 41 continue -c - ijl(n) = jlptr - iju(n) = juptr - flag = 0 - return -c -c ** error.. null row in a - 101 flag = n + rk - return -c ** error.. duplicate entry in a - 102 flag = 2*n + rk - return -c ** error.. insufficient storage for jl - 103 flag = 3*n + k - return -c ** error.. null pivot - 105 flag = 5*n + k - return -c ** error.. insufficient storage for ju - 106 flag = 6*n + k - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/odrv.f b/scipy-0.10.1/scipy/integrate/odepack/odrv.f deleted file mode 100644 index c01d32cbe5..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/odrv.f +++ /dev/null @@ -1,177 +0,0 @@ - subroutine odrv - * (n, ia,ja,a, p,ip, nsp,isp, path, flag) -clll. optimize -c 5/2/83 -c*********************************************************************** -c odrv -- driver for sparse matrix reordering routines -c*********************************************************************** -c -c description -c -c odrv finds a minimum degree ordering of the rows and columns -c of a matrix m stored in (ia,ja,a) format (see below). for the -c reordered matrix, the work and storage required to perform -c gaussian elimination is (usually) significantly less. -c -c note.. odrv and its subordinate routines have been modified to -c compute orderings for general matrices, not necessarily having any -c symmetry. the miminum degree ordering is computed for the -c structure of the symmetric matrix m + m-transpose. -c modifications to the original odrv module have been made in -c the coding in subroutine mdi, and in the initial comments in -c subroutines odrv and md. -c -c if only the nonzero entries in the upper triangle of m are being -c stored, then odrv symmetrically reorders (ia,ja,a), (optionally) -c with the diagonal entries placed first in each row. this is to -c ensure that if m(i,j) will be in the upper triangle of m with -c respect to the new ordering, then m(i,j) is stored in row i (and -c thus m(j,i) is not stored), whereas if m(i,j) will be in the -c strict lower triangle of m, then m(j,i) is stored in row j (and -c thus m(i,j) is not stored). -c -c -c storage of sparse matrices -c -c the nonzero entries of the matrix m are stored row-by-row in the -c array a. to identify the individual nonzero entries in each row, -c we need to know in which column each entry lies. these column -c indices are stored in the array ja. i.e., if a(k) = m(i,j), then -c ja(k) = j. to identify the individual rows, we need to know where -c each row starts. these row pointers are stored in the array ia. -c i.e., if m(i,j) is the first nonzero entry (stored) in the i-th row -c and a(k) = m(i,j), then ia(i) = k. moreover, ia(n+1) points to -c the first location following the last element in the last row. -c thus, the number of entries in the i-th row is ia(i+1) - ia(i), -c the nonzero entries in the i-th row are stored consecutively in -c -c a(ia(i)), a(ia(i)+1), ..., a(ia(i+1)-1), -c -c and the corresponding column indices are stored consecutively in -c -c ja(ia(i)), ja(ia(i)+1), ..., ja(ia(i+1)-1). -c -c since the coefficient matrix is symmetric, only the nonzero entries -c in the upper triangle need be stored. for example, the matrix -c -c ( 1 0 2 3 0 ) -c ( 0 4 0 0 0 ) -c m = ( 2 0 5 6 0 ) -c ( 3 0 6 7 8 ) -c ( 0 0 0 8 9 ) -c -c could be stored as -c -c - 1 2 3 4 5 6 7 8 9 10 11 12 13 -c ---+-------------------------------------- -c ia - 1 4 5 8 12 14 -c ja - 1 3 4 2 1 3 4 1 3 4 5 4 5 -c a - 1 2 3 4 2 5 6 3 6 7 8 8 9 -c -c or (symmetrically) as -c -c - 1 2 3 4 5 6 7 8 9 -c ---+-------------------------- -c ia - 1 4 5 7 9 10 -c ja - 1 3 4 2 3 4 4 5 5 -c a - 1 2 3 4 5 6 7 8 9 . -c -c -c parameters -c -c n - order of the matrix -c -c ia - integer one-dimensional array containing pointers to delimit -c rows in ja and a. dimension = n+1 -c -c ja - integer one-dimensional array containing the column indices -c corresponding to the elements of a. dimension = number of -c nonzero entries in (the upper triangle of) m -c -c a - real one-dimensional array containing the nonzero entries in -c (the upper triangle of) m, stored by rows. dimension = -c number of nonzero entries in (the upper triangle of) m -c -c p - integer one-dimensional array used to return the permutation -c of the rows and columns of m corresponding to the minimum -c degree ordering. dimension = n -c -c ip - integer one-dimensional array used to return the inverse of -c the permutation returned in p. dimension = n -c -c nsp - declared dimension of the one-dimensional array isp. nsp -c must be at least 3n+4k, where k is the number of nonzeroes -c in the strict upper triangle of m -c -c isp - integer one-dimensional array used for working storage. -c dimension = nsp -c -c path - integer path specification. values and their meanings are - -c 1 find minimum degree ordering only -c 2 find minimum degree ordering and reorder symmetrically -c stored matrix (used when only the nonzero entries in -c the upper triangle of m are being stored) -c 3 reorder symmetrically stored matrix as specified by -c input permutation (used when an ordering has already -c been determined and only the nonzero entries in the -c upper triangle of m are being stored) -c 4 same as 2 but put diagonal entries at start of each row -c 5 same as 3 but put diagonal entries at start of each row -c -c flag - integer error flag. values and their meanings are - -c 0 no errors detected -c 9n+k insufficient storage in md -c 10n+1 insufficient storage in odrv -c 11n+1 illegal path specification -c -c -c conversion from real to double precision -c -c change the real declarations in odrv and sro to double precision -c declarations. -c -c----------------------------------------------------------------------- -c - integer ia(1), ja(1), p(1), ip(1), isp(1), path, flag, - * v, l, head, tmp, q - double precision a(1) - logical dflag -c -c----initialize error flag and validate path specification - flag = 0 - if (path.lt.1 .or. 5.lt.path) go to 111 -c -c----allocate storage and find minimum degree ordering - if ((path-1) * (path-2) * (path-4) .ne. 0) go to 1 - max = (nsp-n)/2 - v = 1 - l = v + max - head = l + max - next = head + n - if (max.lt.n) go to 110 -c - call md - * (n, ia,ja, max,isp(v),isp(l), isp(head),p,ip, isp(v), flag) - if (flag.ne.0) go to 100 -c -c----allocate storage and symmetrically reorder matrix - 1 if ((path-2) * (path-3) * (path-4) * (path-5) .ne. 0) go to 2 - tmp = (nsp+1) - n - q = tmp - (ia(n+1)-1) - if (q.lt.1) go to 110 -c - dflag = path.eq.4 .or. path.eq.5 - call sro - * (n, ip, ia, ja, a, isp(tmp), isp(q), dflag) -c - 2 return -c -c ** error -- error detected in md - 100 return -c ** error -- insufficient storage - 110 flag = 10*n + 1 - return -c ** error -- illegal path specified - 111 flag = 11*n + 1 - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/pjibt.f b/scipy-0.10.1/scipy/integrate/odepack/pjibt.f deleted file mode 100644 index cb7171c560..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/pjibt.f +++ /dev/null @@ -1,161 +0,0 @@ - subroutine pjibt (neq, y, yh, nyh, ewt, rtem, savr, s, wm, iwm, - 1 res, jac, adda) -clll. optimize - external res, jac, adda - integer neq, nyh, iwm - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu - integer i, ier, iia, iib, iic, ipa, ipb, ipc, ires, j, j1, j2, - 1 k, k1, lenp, lblox, lpb, lpc, mb, mbsq, mwid, nb - double precision y, yh, ewt, rtem, savr, s, wm - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision con, fac, hl0, r, srur - dimension neq(1), y(1), yh(nyh,1), ewt(1), rtem(1), - 1 s(1), savr(1), wm(*), iwm(*) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu -c----------------------------------------------------------------------- -c pjibt is called by stodi to compute and process the matrix -c p = a - h*el(1)*j , where j is an approximation to the jacobian dr/dy, -c and r = g(t,y) - a(t,y)*s. here j is computed by the user-supplied -c routine jac if miter = 1, or by finite differencing if miter = 2. -c j is stored in wm, rescaled, and adda is called to generate p. -c p is then subjected to lu decomposition by decbt in preparation -c for later solution of linear systems with p as coefficient matrix. -c -c in addition to variables described previously, communication -c with pjibt uses the following.. -c y = array containing predicted values on entry. -c rtem = work array of length n (acor in stodi). -c savr = array used for output only. on output it contains the -c residual evaluated at current values of t and y. -c s = array containing predicted values of dy/dt (savf in stodi). -c wm = real work space for matrices. on output it contains the -c lu decomposition of p. -c storage of matrix elements starts at wm(3). -c wm also contains the following matrix-related data.. -c wm(1) = dsqrt(uround), used in numerical jacobian increments. -c iwm = integer work space containing pivot information, starting at -c iwm(21). iwm also contains block structure parameters -c mb = iwm(1) and nb = iwm(2). -c el0 = el(1) (input). -c ierpj = output error flag. -c = 0 if no trouble occurred, -c = 1 if the p matrix was found to be unfactorable, -c = ires (= 2 or 3) if res returned ires = 2 or 3. -c jcur = output flag = 1 to indicate that the jacobian matrix -c (or approximation) is now current. -c this routine also uses the common variables el0, h, tn, uround, -c miter, n, nre, and nje. -c----------------------------------------------------------------------- - nje = nje + 1 - hl0 = h*el0 - ierpj = 0 - jcur = 1 - mb = iwm(1) - nb = iwm(2) - mbsq = mb*mb - lblox = mbsq*nb - lpb = 3 + lblox - lpc = lpb + lblox - lenp = 3*lblox - go to (100, 200), miter -c if miter = 1, call res, then jac, and multiply by scalar. ------------ - 100 ires = 1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 - do 110 i = 1,lenp - 110 wm(i+2) = 0.0d0 - call jac (neq, tn, y, s, mb, nb, wm(3), wm(lpb), wm(lpc)) - con = -hl0 - do 120 i = 1,lenp - 120 wm(i+2) = wm(i+2)*con - go to 260 -c -c if miter = 2, make 3*mb + 1 calls to res to approximate j. ----------- - 200 continue - ires = -1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 - mwid = 3*mb - srur = wm(1) - do 205 i = 1,lenp - 205 wm(2+i) = 0.0d0 - do 250 k = 1,3 - do 240 j = 1,mb -c increment y(i) for group of column indices, and call res. ---- - j1 = j+(k-1)*mb - do 210 i = j1,n,mwid - r = dmax1(srur*dabs(y(i)),0.01d0/ewt(i)) - y(i) = y(i) + r - 210 continue - call res (neq, tn, y, s, rtem, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 - do 215 i = 1,n - 215 rtem(i) = rtem(i) - savr(i) - k1 = k - do 230 i = j1,n,mwid -c get jacobian elements in column i (block-column k1). ------- - y(i) = yh(i,1) - r = dmax1(srur*dabs(y(i)),0.01d0/ewt(i)) - fac = -hl0/r -c compute and load elements pa(*,j,k1). ---------------------- - iia = i - j - ipa = 2 + (j-1)*mb + (k1-1)*mbsq - do 221 j2 = 1,mb - 221 wm(ipa+j2) = rtem(iia+j2)*fac - if (k1 .le. 1) go to 223 -c compute and load elements pb(*,j,k1-1). -------------------- - iib = iia - mb - ipb = ipa + lblox - mbsq - do 222 j2 = 1,mb - 222 wm(ipb+j2) = rtem(iib+j2)*fac - 223 continue - if (k1 .ge. nb) go to 225 -c compute and load elements pc(*,j,k1+1). -------------------- - iic = iia + mb - ipc = ipa + 2*lblox + mbsq - do 224 j2 = 1,mb - 224 wm(ipc+j2) = rtem(iic+j2)*fac - 225 continue - if (k1 .ne. 3) go to 227 -c compute and load elements pc(*,j,1). ----------------------- - ipc = ipa - 2*mbsq + 2*lblox - do 226 j2 = 1,mb - 226 wm(ipc+j2) = rtem(j2)*fac - 227 continue - if (k1 .ne. nb-2) go to 229 -c compute and load elements pb(*,j,nb). ---------------------- - iib = n - mb - ipb = ipa + 2*mbsq + lblox - do 228 j2 = 1,mb - 228 wm(ipb+j2) = rtem(iib+j2)*fac - 229 k1 = k1 + 3 - 230 continue - 240 continue - 250 continue -c res call for first corrector iteration. ------------------------------ - ires = 1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 -c add matrix a. -------------------------------------------------------- - 260 continue - call adda (neq, tn, y, mb, nb, wm(3), wm(lpb), wm(lpc)) -c do lu decomposition on p. -------------------------------------------- - call decbt (mb, nb, wm(3), wm(lpb), wm(lpc), iwm(21), ier) - if (ier .ne. 0) ierpj = 1 - return -c error return for ires = 2 or ires = 3 return from res. --------------- - 600 ierpj = ires - return -c----------------------- end of subroutine pjibt ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/prep.f b/scipy-0.10.1/scipy/integrate/odepack/prep.f deleted file mode 100644 index 7bc39aaddd..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/prep.f +++ /dev/null @@ -1,259 +0,0 @@ - subroutine prep (neq, y, yh, savf, ewt, ftem, ia, ja, - 1 wk, iwk, ipper, f, jac) -clll. optimize - external f,jac - integer neq, ia, ja, iwk, ipper - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 1 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 2 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 3 nslj, ngp, nlu, nnz, nsp, nzl, nzu - integer i, ibr, ier, ipil, ipiu, iptt1, iptt2, j, jfound, k, - 1 knew, kmax, kmin, ldif, lenigp, liwk, maxg, np1, nzsut - double precision y, yh, savf, ewt, ftem, wk - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision con0, conmin, ccmxj, psmall, rbig, seth - double precision dq, dyj, erwt, fac, yj - dimension neq(1), y(1), yh(1), savf(1), ewt(1), ftem(1), - 1 ia(1), ja(1), wk(1), iwk(1) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lss001/ con0, conmin, ccmxj, psmall, rbig, seth, - 1 iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 2 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 3 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 4 nslj, ngp, nlu, nnz, nsp, nzl, nzu -c----------------------------------------------------------------------- -c this routine performs preprocessing related to the sparse linear -c systems that must be solved if miter = 1 or 2. -c the operations that are performed here are.. -c * compute sparseness structure of jacobian according to moss, -c * compute grouping of column indices (miter = 2), -c * compute a new ordering of rows and columns of the matrix, -c * reorder ja corresponding to the new ordering, -c * perform a symbolic lu factorization of the matrix, and -c * set pointers for segments of the iwk/wk array. -c in addition to variables described previously, prep uses the -c following for communication.. -c yh = the history array. only the first column, containing the -c current y vector, is used. used only if moss .ne. 0. -c savf = a work array of length neq, used only if moss .ne. 0. -c ewt = array of length neq containing (inverted) error weights. -c used only if moss = 2 or if istate = moss = 1. -c ftem = a work array of length neq, identical to acor in the driver, -c used only if moss = 2. -c wk = a real work array of length lenwk, identical to wm in -c the driver. -c iwk = integer work array, assumed to occupy the same space as wk. -c lenwk = the length of the work arrays wk and iwk. -c istatc = a copy of the driver input argument istate (= 1 on the -c first call, = 3 on a continuation call). -c iys = flag value from odrv or cdrv. -c ipper = output error flag with the following values and meanings.. -c 0 no error. -c -1 insufficient storage for internal structure pointers. -c -2 insufficient storage for jgroup. -c -3 insufficient storage for odrv. -c -4 other error flag from odrv (should never occur). -c -5 insufficient storage for cdrv. -c -6 other error flag from cdrv. -c----------------------------------------------------------------------- - ibian = lrat*2 - ipian = ibian + 1 - np1 = n + 1 - ipjan = ipian + np1 - ibjan = ipjan - 1 - liwk = lenwk*lrat - if (ipjan+n-1 .gt. liwk) go to 210 - if (moss .eq. 0) go to 30 -c - if (istatc .eq. 3) go to 20 -c istate = 1 and moss .ne. 0. perturb y for structure determination. -- - do 10 i = 1,n - erwt = 1.0d0/ewt(i) - fac = 1.0d0 + 1.0d0/(dfloat(i)+1.0d0) - y(i) = y(i) + fac*dsign(erwt,y(i)) - 10 continue - go to (70, 100), moss -c - 20 continue -c istate = 3 and moss .ne. 0. load y from yh(*,1). -------------------- - do 25 i = 1,n - 25 y(i) = yh(i) - go to (70, 100), moss -c -c moss = 0. process user-s ia,ja. add diagonal entries if necessary. - - 30 knew = ipjan - kmin = ia(1) - iwk(ipian) = 1 - do 60 j = 1,n - jfound = 0 - kmax = ia(j+1) - 1 - if (kmin .gt. kmax) go to 45 - do 40 k = kmin,kmax - i = ja(k) - if (i .eq. j) jfound = 1 - if (knew .gt. liwk) go to 210 - iwk(knew) = i - knew = knew + 1 - 40 continue - if (jfound .eq. 1) go to 50 - 45 if (knew .gt. liwk) go to 210 - iwk(knew) = j - knew = knew + 1 - 50 iwk(ipian+j) = knew + 1 - ipjan - kmin = kmax + 1 - 60 continue - go to 140 -c -c moss = 1. compute structure from user-supplied jacobian routine jac. - 70 continue -c a dummy call to f allows user to create temporaries for use in jac. -- - call f (neq, tn, y, savf) - k = ipjan - iwk(ipian) = 1 - do 90 j = 1,n - if (k .gt. liwk) go to 210 - iwk(k) = j - k = k + 1 - do 75 i = 1,n - 75 savf(i) = 0.0d0 - call jac (neq, tn, y, j, iwk(ipian), iwk(ipjan), savf) - do 80 i = 1,n - if (dabs(savf(i)) .le. seth) go to 80 - if (i .eq. j) go to 80 - if (k .gt. liwk) go to 210 - iwk(k) = i - k = k + 1 - 80 continue - iwk(ipian+j) = k + 1 - ipjan - 90 continue - go to 140 -c -c moss = 2. compute structure from results of n + 1 calls to f. ------- - 100 k = ipjan - iwk(ipian) = 1 - call f (neq, tn, y, savf) - do 120 j = 1,n - if (k .gt. liwk) go to 210 - iwk(k) = j - k = k + 1 - yj = y(j) - erwt = 1.0d0/ewt(j) - dyj = dsign(erwt,yj) - y(j) = yj + dyj - call f (neq, tn, y, ftem) - y(j) = yj - do 110 i = 1,n - dq = (ftem(i) - savf(i))/dyj - if (dabs(dq) .le. seth) go to 110 - if (i .eq. j) go to 110 - if (k .gt. liwk) go to 210 - iwk(k) = i - k = k + 1 - 110 continue - iwk(ipian+j) = k + 1 - ipjan - 120 continue -c - 140 continue - if (moss .eq. 0 .or. istatc .ne. 1) go to 150 -c if istate = 1 and moss .ne. 0, restore y from yh. -------------------- - do 145 i = 1,n - 145 y(i) = yh(i) - 150 nnz = iwk(ipian+n) - 1 - lenigp = 0 - ipigp = ipjan + nnz - if (miter .ne. 2) go to 160 -c -c compute grouping of column indices (miter = 2). ---------------------- - maxg = np1 - ipjgp = ipjan + nnz - ibjgp = ipjgp - 1 - ipigp = ipjgp + n - iptt1 = ipigp + np1 - iptt2 = iptt1 + n - lreq = iptt2 + n - 1 - if (lreq .gt. liwk) go to 220 - call jgroup (n, iwk(ipian), iwk(ipjan), maxg, ngp, iwk(ipigp), - 1 iwk(ipjgp), iwk(iptt1), iwk(iptt2), ier) - if (ier .ne. 0) go to 220 - lenigp = ngp + 1 -c -c compute new ordering of rows/columns of jacobian. -------------------- - 160 ipr = ipigp + lenigp - ipc = ipr - ipic = ipc + n - ipisp = ipic + n - iprsp = (ipisp - 2)/lrat + 2 - iesp = lenwk + 1 - iprsp - if (iesp .lt. 0) go to 230 - ibr = ipr - 1 - do 170 i = 1,n - 170 iwk(ibr+i) = i - nsp = liwk + 1 - ipisp - call odrv (n, iwk(ipian), iwk(ipjan), wk, iwk(ipr), iwk(ipic), - 1 nsp, iwk(ipisp), 1, iys) - if (iys .eq. 11*n+1) go to 240 - if (iys .ne. 0) go to 230 -c -c reorder jan and do symbolic lu factorization of matrix. -------------- - ipa = lenwk + 1 - nnz - nsp = ipa - iprsp - lreq = max0(12*n/lrat, 6*n/lrat+2*n+nnz) + 3 - lreq = lreq + iprsp - 1 + nnz - if (lreq .gt. lenwk) go to 250 - iba = ipa - 1 - do 180 i = 1,nnz - 180 wk(iba+i) = 0.0d0 - ipisp = lrat*(iprsp - 1) + 1 - call cdrv (n,iwk(ipr),iwk(ipc),iwk(ipic),iwk(ipian),iwk(ipjan), - 1 wk(ipa),wk(ipa),wk(ipa),nsp,iwk(ipisp),wk(iprsp),iesp,5,iys) - lreq = lenwk - iesp - if (iys .eq. 10*n+1) go to 250 - if (iys .ne. 0) go to 260 - ipil = ipisp - ipiu = ipil + 2*n + 1 - nzu = iwk(ipil+n) - iwk(ipil) - nzl = iwk(ipiu+n) - iwk(ipiu) - if (lrat .gt. 1) go to 190 - call adjlr (n, iwk(ipisp), ldif) - lreq = lreq + ldif - 190 continue - if (lrat .eq. 2 .and. nnz .eq. n) lreq = lreq + 1 - nsp = nsp + lreq - lenwk - ipa = lreq + 1 - nnz - iba = ipa - 1 - ipper = 0 - return -c - 210 ipper = -1 - lreq = 2 + (2*n + 1)/lrat - lreq = max0(lenwk+1,lreq) - return -c - 220 ipper = -2 - lreq = (lreq - 1)/lrat + 1 - return -c - 230 ipper = -3 - call cntnzu (n, iwk(ipian), iwk(ipjan), nzsut) - lreq = lenwk - iesp + (3*n + 4*nzsut - 1)/lrat + 1 - return -c - 240 ipper = -4 - return -c - 250 ipper = -5 - return -c - 260 ipper = -6 - lreq = lenwk - return -c----------------------- end of subroutine prep ------------------------ - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/prepj.f b/scipy-0.10.1/scipy/integrate/odepack/prepj.f deleted file mode 100644 index 396d8a3967..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/prepj.f +++ /dev/null @@ -1,171 +0,0 @@ - subroutine prepj (neq, y, yh, nyh, ewt, ftem, savf, wm, iwm, - 1 f, jac) -clll. optimize - external f, jac - integer neq, nyh, iwm - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer i, i1, i2, ier, ii, j, j1, jj, lenp, - 1 mba, mband, meb1, meband, ml, ml3, mu, np1 - double precision y, yh, ewt, ftem, savf, wm - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision con, di, fac, hl0, r, r0, srur, yi, yj, yjj, - 1 vnorm - dimension neq(1), y(1), yh(nyh,*), ewt(1), ftem(1), savf(1), - 1 wm(*), iwm(*) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu -c----------------------------------------------------------------------- -c prepj is called by stode to compute and process the matrix -c p = i - h*el(1)*j , where j is an approximation to the jacobian. -c here j is computed by the user-supplied routine jac if -c miter = 1 or 4, or by finite differencing if miter = 2, 3, or 5. -c if miter = 3, a diagonal approximation to j is used. -c j is stored in wm and replaced by p. if miter .ne. 3, p is then -c subjected to lu decomposition in preparation for later solution -c of linear systems with p as coefficient matrix. this is done -c by dgefa if miter = 1 or 2, and by dgbfa if miter = 4 or 5. -c -c in addition to variables described previously, communication -c with prepj uses the following.. -c y = array containing predicted values on entry. -c ftem = work array of length n (acor in stode). -c savf = array containing f evaluated at predicted y. -c wm = real work space for matrices. on output it contains the -c inverse diagonal matrix if miter = 3 and the lu decomposition -c of p if miter is 1, 2 , 4, or 5. -c storage of matrix elements starts at wm(3). -c wm also contains the following matrix-related data.. -c wm(1) = sqrt(uround), used in numerical jacobian increments. -c wm(2) = h*el0, saved for later use if miter = 3. -c iwm = integer work space containing pivot information, starting at -c iwm(21), if miter is 1, 2, 4, or 5. iwm also contains band -c parameters ml = iwm(1) and mu = iwm(2) if miter is 4 or 5. -c el0 = el(1) (input). -c ierpj = output error flag, = 0 if no trouble, .gt. 0 if -c p matrix found to be singular. -c jcur = output flag = 1 to indicate that the jacobian matrix -c (or approximation) is now current. -c this routine also uses the common variables el0, h, tn, uround, -c miter, n, nfe, and nje. -c----------------------------------------------------------------------- - nje = nje + 1 - ierpj = 0 - jcur = 1 - hl0 = h*el0 - go to (100, 200, 300, 400, 500), miter -c if miter = 1, call jac and multiply by scalar. ----------------------- - 100 lenp = n*n - do 110 i = 1,lenp - 110 wm(i+2) = 0.0d0 - call jac (neq, tn, y, 0, 0, wm(3), n) - con = -hl0 - do 120 i = 1,lenp - 120 wm(i+2) = wm(i+2)*con - go to 240 -c if miter = 2, make n calls to f to approximate j. -------------------- - 200 fac = vnorm (n, savf, ewt) - r0 = 1000.0d0*dabs(h)*uround*dfloat(n)*fac - if (r0 .eq. 0.0d0) r0 = 1.0d0 - srur = wm(1) - j1 = 2 - do 230 j = 1,n - yj = y(j) - r = dmax1(srur*dabs(yj),r0/ewt(j)) - y(j) = y(j) + r - fac = -hl0/r - call f (neq, tn, y, ftem) - do 220 i = 1,n - 220 wm(i+j1) = (ftem(i) - savf(i))*fac - y(j) = yj - j1 = j1 + n - 230 continue - nfe = nfe + n -c add identity matrix. ------------------------------------------------- - 240 j = 3 - np1 = n + 1 - do 250 i = 1,n - wm(j) = wm(j) + 1.0d0 - 250 j = j + np1 -c do lu decomposition on p. -------------------------------------------- - call dgefa (wm(3), n, n, iwm(21), ier) - if (ier .ne. 0) ierpj = 1 - return -c if miter = 3, construct a diagonal approximation to j and p. --------- - 300 wm(2) = hl0 - r = el0*0.1d0 - do 310 i = 1,n - 310 y(i) = y(i) + r*(h*savf(i) - yh(i,2)) - call f (neq, tn, y, wm(3)) - nfe = nfe + 1 - do 320 i = 1,n - r0 = h*savf(i) - yh(i,2) - di = 0.1d0*r0 - h*(wm(i+2) - savf(i)) - wm(i+2) = 1.0d0 - if (dabs(r0) .lt. uround/ewt(i)) go to 320 - if (dabs(di) .eq. 0.0d0) go to 330 - wm(i+2) = 0.1d0*r0/di - 320 continue - return - 330 ierpj = 1 - return -c if miter = 4, call jac and multiply by scalar. ----------------------- - 400 ml = iwm(1) - mu = iwm(2) - ml3 = ml + 3 - mband = ml + mu + 1 - meband = mband + ml - lenp = meband*n - do 410 i = 1,lenp - 410 wm(i+2) = 0.0d0 - call jac (neq, tn, y, ml, mu, wm(ml3), meband) - con = -hl0 - do 420 i = 1,lenp - 420 wm(i+2) = wm(i+2)*con - go to 570 -c if miter = 5, make mband calls to f to approximate j. ---------------- - 500 ml = iwm(1) - mu = iwm(2) - mband = ml + mu + 1 - mba = min0(mband,n) - meband = mband + ml - meb1 = meband - 1 - srur = wm(1) - fac = vnorm (n, savf, ewt) - r0 = 1000.0d0*dabs(h)*uround*dfloat(n)*fac - if (r0 .eq. 0.0d0) r0 = 1.0d0 - do 560 j = 1,mba - do 530 i = j,n,mband - yi = y(i) - r = dmax1(srur*dabs(yi),r0/ewt(i)) - 530 y(i) = y(i) + r - call f (neq, tn, y, ftem) - do 550 jj = j,n,mband - y(jj) = yh(jj,1) - yjj = y(jj) - r = dmax1(srur*dabs(yjj),r0/ewt(jj)) - fac = -hl0/r - i1 = max0(jj-mu,1) - i2 = min0(jj+ml,n) - ii = jj*meb1 - ml + 2 - do 540 i = i1,i2 - 540 wm(ii+i) = (ftem(i) - savf(i))*fac - 550 continue - 560 continue - nfe = nfe + mba -c add identity matrix. ------------------------------------------------- - 570 ii = mband + 2 - do 580 i = 1,n - wm(ii) = wm(ii) + 1.0d0 - 580 ii = ii + meband -c do lu decomposition of p. -------------------------------------------- - call dgbfa (wm(3), meband, n, ml, mu, iwm(21), ier) - if (ier .ne. 0) ierpj = 1 - return -c----------------------- end of subroutine prepj ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/prepji.f b/scipy-0.10.1/scipy/integrate/odepack/prepji.f deleted file mode 100644 index f39b1ab0fe..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/prepji.f +++ /dev/null @@ -1,177 +0,0 @@ - subroutine prepji (neq, y, yh, nyh, ewt, rtem, savr, s, wm, iwm, - 1 res, jac, adda) -clll. optimize - external res, jac, adda - integer neq, nyh, iwm - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu - integer i, i1, i2, ier, ii, ires, j, j1, jj, lenp, - 1 mba, mband, meb1, meband, ml, ml3, mu - double precision y, yh, ewt, rtem, savr, s, wm - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision con, fac, hl0, r, srur, yi, yj, yjj - dimension neq(1), y(1), yh(nyh,*), ewt(1), rtem(1), - 1 s(1), savr(1), wm(*), iwm(*) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu -c----------------------------------------------------------------------- -c prepji is called by stodi to compute and process the matrix -c p = a - h*el(1)*j , where j is an approximation to the jacobian dr/dy, -c where r = g(t,y) - a(t,y)*s. here j is computed by the user-supplied -c routine jac if miter = 1 or 4, or by finite differencing if miter = -c 2 or 5. j is stored in wm, rescaled, and adda is called to generate -c p. p is then subjected to lu decomposition in preparation -c for later solution of linear systems with p as coefficient -c matrix. this is done by dgefa if miter = 1 or 2, and by -c dgbfa if miter = 4 or 5. -c -c in addition to variables described previously, communication -c with prepji uses the following.. -c y = array containing predicted values on entry. -c rtem = work array of length n (acor in stodi). -c savr = array used for output only. on output it contains the -c residual evaluated at current values of t and y. -c s = array containing predicted values of dy/dt (savf in stodi). -c wm = real work space for matrices. on output it contains the -c lu decomposition of p. -c storage of matrix elements starts at wm(3). -c wm also contains the following matrix-related data.. -c wm(1) = sqrt(uround), used in numerical jacobian increments. -c iwm = integer work space containing pivot information, starting at -c iwm(21). iwm also contains the band parameters -c ml = iwm(1) and mu = iwm(2) if miter is 4 or 5. -c el0 = el(1) (input). -c ierpj = output error flag. -c = 0 if no trouble occurred, -c = 1 if the p matrix was found to be singular, -c = ires (= 2 or 3) if res returned ires = 2 or 3. -c jcur = output flag = 1 to indicate that the jacobian matrix -c (or approximation) is now current. -c this routine also uses the common variables el0, h, tn, uround, -c miter, n, nre, and nje. -c----------------------------------------------------------------------- - nje = nje + 1 - hl0 = h*el0 - ierpj = 0 - jcur = 1 - go to (100, 200, 300, 400, 500), miter -c if miter = 1, call res, then jac, and multiply by scalar. ------------ - 100 ires = 1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 - lenp = n*n - do 110 i = 1,lenp - 110 wm(i+2) = 0.0d0 - call jac ( neq, tn, y, s, 0, 0, wm(3), n ) - con = -hl0 - do 120 i = 1,lenp - 120 wm(i+2) = wm(i+2)*con - go to 240 -c if miter = 2, make n + 1 calls to res to approximate j. -------------- - 200 continue - ires = -1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 - srur = wm(1) - j1 = 2 - do 230 j = 1,n - yj = y(j) - r = dmax1(srur*dabs(yj),0.01d0/ewt(j)) - y(j) = y(j) + r - fac = -hl0/r - call res ( neq, tn, y, s, rtem, ires ) - nre = nre + 1 - if (ires .gt. 1) go to 600 - do 220 i = 1,n - 220 wm(i+j1) = (rtem(i) - savr(i))*fac - y(j) = yj - j1 = j1 + n - 230 continue - ires = 1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 -c add matrix a. -------------------------------------------------------- - 240 continue - call adda(neq, tn, y, 0, 0, wm(3), n) -c do lu decomposition on p. -------------------------------------------- - call dgefa (wm(3), n, n, iwm(21), ier) - if (ier .ne. 0) ierpj = 1 - return -c dummy section for miter = 3 - 300 return -c if miter = 4, call res, then jac, and multiply by scalar. ------------ - 400 ires = 1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 - ml = iwm(1) - mu = iwm(2) - ml3 = ml + 3 - mband = ml + mu + 1 - meband = mband + ml - lenp = meband*n - do 410 i = 1,lenp - 410 wm(i+2) = 0.0d0 - call jac ( neq, tn, y, s, ml, mu, wm(ml3), meband) - con = -hl0 - do 420 i = 1,lenp - 420 wm(i+2) = wm(i+2)*con - go to 570 -c if miter = 5, make ml + mu + 2 calls to res to approximate j. -------- - 500 continue - ires = -1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 - ml = iwm(1) - mu = iwm(2) - ml3 = ml + 3 - mband = ml + mu + 1 - mba = min0(mband,n) - meband = mband + ml - meb1 = meband - 1 - srur = wm(1) - do 560 j = 1,mba - do 530 i = j,n,mband - yi = y(i) - r = dmax1(srur*dabs(yi),0.01d0/ewt(i)) - 530 y(i) = y(i) + r - call res ( neq, tn, y, s, rtem, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 - do 550 jj = j,n,mband - y(jj) = yh(jj,1) - yjj = y(jj) - r = dmax1(srur*dabs(yjj),0.01d0/ewt(jj)) - fac = -hl0/r - i1 = max0(jj-mu,1) - i2 = min0(jj+ml,n) - ii = jj*meb1 - ml + 2 - do 540 i = i1,i2 - 540 wm(ii+i) = (rtem(i) - savr(i))*fac - 550 continue - 560 continue - ires = 1 - call res (neq, tn, y, s, savr, ires) - nre = nre + 1 - if (ires .gt. 1) go to 600 -c add matrix a. -------------------------------------------------------- - 570 continue - call adda(neq, tn, y, ml, mu, wm(ml3), meband) -c do lu decomposition of p. -------------------------------------------- - call dgbfa (wm(3), meband, n, ml, mu, iwm(21), ier) - if (ier .ne. 0) ierpj = 1 - return -c error return for ires = 2 or ires = 3 return from res. --------------- - 600 ierpj = ires - return -c----------------------- end of subroutine prepji ---------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/prja.f b/scipy-0.10.1/scipy/integrate/odepack/prja.f deleted file mode 100644 index 21ea9ab8fc..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/prja.f +++ /dev/null @@ -1,173 +0,0 @@ - subroutine prja (neq, y, yh, nyh, ewt, ftem, savf, wm, iwm, - 1 f, jac) -clll. optimize - external f, jac - integer neq, nyh, iwm - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer iownd2, iowns2, jtyp, mused, mxordn, mxords, isav - integer i, i1, i2, ier, ii, j, j1, jj, lenp, - 1 mba, mband, meb1, meband, ml, ml3, mu, np1 - double precision y, yh, ewt, ftem, savf, wm, rsav - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision rownd2, rowns2, pdnorm - double precision con, fac, hl0, r, r0, srur, yi, yj, yjj, - 1 vmnorm, fnorm, bnorm - dimension neq(1), y(1), yh(nyh,*), ewt(1), ftem(1), savf(1), - 1 wm(*), iwm(*), rsav(240), isav(50) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lsa001/ rownd2, rowns2(20), pdnorm, - 1 iownd2(3), iowns2(2), jtyp, mused, mxordn, mxords -c----------------------------------------------------------------------- -c prja is called by stoda to compute and process the matrix -c p = i - h*el(1)*j , where j is an approximation to the jacobian. -c here j is computed by the user-supplied routine jac if -c miter = 1 or 4 or by finite differencing if miter = 2 or 5. -c j, scaled by -h*el(1), is stored in wm. then the norm of j (the -c matrix norm consistent with the weighted max-norm on vectors given -c by vmnorm) is computed, and j is overwritten by p. p is then -c subjected to lu decomposition in preparation for later solution -c of linear systems with p as coefficient matrix. this is done -c by dgefa if miter = 1 or 2, and by dgbfa if miter = 4 or 5. -c -c in addition to variables described previously, communication -c with prja uses the following.. -c y = array containing predicted values on entry. -c ftem = work array of length n (acor in stoda). -c savf = array containing f evaluated at predicted y. -c wm = real work space for matrices. on output it contains the -c lu decomposition of p. -c storage of matrix elements starts at wm(3). -c wm also contains the following matrix-related data.. -c wm(1) = sqrt(uround), used in numerical jacobian increments. -c iwm = integer work space containing pivot information, starting at -c iwm(21). iwm also contains the band parameters -c ml = iwm(1) and mu = iwm(2) if miter is 4 or 5. -c el0 = el(1) (input). -c pdnorm= norm of jacobian matrix. (output). -c ierpj = output error flag, = 0 if no trouble, .gt. 0 if -c p matrix found to be singular. -c jcur = output flag = 1 to indicate that the jacobian matrix -c (or approximation) is now current. -c this routine also uses the common variables el0, h, tn, uround, -c miter, n, nfe, and nje. -c----------------------------------------------------------------------- - nje = nje + 1 - ierpj = 0 - jcur = 1 - hl0 = h*el0 - go to (100, 200, 300, 400, 500), miter -c if miter = 1, call jac and multiply by scalar. ----------------------- - 100 lenp = n*n - do 110 i = 1,lenp - 110 wm(i+2) = 0.0d0 - call srcma (rsav, isav, 1) - call jac (neq, tn, y, 0, 0, wm(3), n) - call srcma (rsav, isav, 2) - con = -hl0 - do 120 i = 1,lenp - 120 wm(i+2) = wm(i+2)*con - go to 240 -c if miter = 2, make n calls to f to approximate j. -------------------- - 200 fac = vmnorm (n, savf, ewt) - r0 = 1000.0d0*dabs(h)*uround*dfloat(n)*fac - if (r0 .eq. 0.0d0) r0 = 1.0d0 - srur = wm(1) - j1 = 2 - do 230 j = 1,n - yj = y(j) - r = dmax1(srur*dabs(yj),r0/ewt(j)) - y(j) = y(j) + r - fac = -hl0/r - call srcma (rsav, isav, 1) - call f (neq, tn, y, ftem) - call srcma (rsav, isav, 2) - do 220 i = 1,n - 220 wm(i+j1) = (ftem(i) - savf(i))*fac - y(j) = yj - j1 = j1 + n - 230 continue - nfe = nfe + n - 240 continue -c compute norm of jacobian. -------------------------------------------- - pdnorm = fnorm (n, wm(3), ewt)/dabs(hl0) -c add identity matrix. ------------------------------------------------- - np1 = n + 1 - j = 3 - do 250 i = 1,n - wm(j) = wm(j) + 1.0d0 - 250 j = j + np1 -c do lu decomposition on p. -------------------------------------------- - call dgefa (wm(3), n, n, iwm(21), ier) - if (ier .ne. 0) ierpj = 1 - return -c dummy block only, since miter is never 3 in this routine. ------------ - 300 return -c if miter = 4, call jac and multiply by scalar. ----------------------- - 400 ml = iwm(1) - mu = iwm(2) - ml3 = ml + 3 - mband = ml + mu + 1 - meband = mband + ml - lenp = meband*n - do 410 i = 1,lenp - 410 wm(i+2) = 0.0d0 - call srcma (rsav, isav, 1) - call jac (neq, tn, y, ml, mu, wm(ml3), meband) - call srcma (rsav, isav, 2) - con = -hl0 - do 420 i = 1,lenp - 420 wm(i+2) = wm(i+2)*con - go to 570 -c if miter = 5, make mband calls to f to approximate j. ---------------- - 500 ml = iwm(1) - mu = iwm(2) - mband = ml + mu + 1 - mba = min0(mband,n) - meband = mband + ml - meb1 = meband - 1 - srur = wm(1) - fac = vmnorm (n, savf, ewt) - r0 = 1000.0d0*dabs(h)*uround*dfloat(n)*fac - if (r0 .eq. 0.0d0) r0 = 1.0d0 - do 560 j = 1,mba - do 530 i = j,n,mband - yi = y(i) - r = dmax1(srur*dabs(yi),r0/ewt(i)) - 530 y(i) = y(i) + r - call srcma (rsav, isav, 1) - call f (neq, tn, y, ftem) - call srcma (rsav, isav, 2) - do 550 jj = j,n,mband - y(jj) = yh(jj,1) - yjj = y(jj) - r = dmax1(srur*dabs(yjj),r0/ewt(jj)) - fac = -hl0/r - i1 = max0(jj-mu,1) - i2 = min0(jj+ml,n) - ii = jj*meb1 - ml + 2 - do 540 i = i1,i2 - 540 wm(ii+i) = (ftem(i) - savf(i))*fac - 550 continue - 560 continue - nfe = nfe + mba - 570 continue -c compute norm of jacobian. -------------------------------------------- - pdnorm = bnorm (n, wm(3), meband, ml, mu, ewt)/dabs(hl0) -c add identity matrix. ------------------------------------------------- - ii = mband + 2 - do 580 i = 1,n - wm(ii) = wm(ii) + 1.0d0 - 580 ii = ii + meband -c do lu decomposition of p. -------------------------------------------- - call dgbfa (wm(3), meband, n, ml, mu, iwm(21), ier) - if (ier .ne. 0) ierpj = 1 - return -c----------------------- end of subroutine prja ------------------------ - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/prjs.f b/scipy-0.10.1/scipy/integrate/odepack/prjs.f deleted file mode 100644 index 99776c7b21..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/prjs.f +++ /dev/null @@ -1,197 +0,0 @@ - subroutine prjs (neq,y,yh,nyh,ewt,ftem,savf,wk,iwk,f,jac) -clll. optimize - external f,jac - integer neq, nyh, iwk - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 1 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 2 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 3 nslj, ngp, nlu, nnz, nsp, nzl, nzu - integer i, imul, j, jj, jok, jmax, jmin, k, kmax, kmin, ng - double precision y, yh, ewt, ftem, savf, wk - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision con0, conmin, ccmxj, psmall, rbig, seth - double precision con, di, fac, hl0, pij, r, r0, rcon, rcont, - 1 srur, vnorm - dimension neq(1), y(1), yh(nyh,*), ewt(1), ftem(1), savf(1), - 1 wk(*), iwk(*) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lss001/ con0, conmin, ccmxj, psmall, rbig, seth, - 1 iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 2 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 3 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 4 nslj, ngp, nlu, nnz, nsp, nzl, nzu -c----------------------------------------------------------------------- -c prjs is called to compute and process the matrix -c p = i - h*el(1)*j , where j is an approximation to the jacobian. -c j is computed by columns, either by the user-supplied routine jac -c if miter = 1, or by finite differencing if miter = 2. -c if miter = 3, a diagonal approximation to j is used. -c if miter = 1 or 2, and if the existing value of the jacobian -c (as contained in p) is considered acceptable, then a new value of -c p is reconstructed from the old value. in any case, when miter -c is 1 or 2, the p matrix is subjected to lu decomposition in cdrv. -c p and its lu decomposition are stored (separately) in wk. -c -c in addition to variables described previously, communication -c with prjs uses the following.. -c y = array containing predicted values on entry. -c ftem = work array of length n (acor in stode). -c savf = array containing f evaluated at predicted y. -c wk = real work space for matrices. on output it contains the -c inverse diagonal matrix if miter = 3, and p and its sparse -c lu decomposition if miter is 1 or 2. -c storage of matrix elements starts at wk(3). -c wk also contains the following matrix-related data.. -c wk(1) = sqrt(uround), used in numerical jacobian increments. -c wk(2) = h*el0, saved for later use if miter = 3. -c iwk = integer work space for matrix-related data, assumed to -c be equivalenced to wk. in addition, wk(iprsp) and iwk(ipisp) -c are assumed to have identical locations. -c el0 = el(1) (input). -c ierpj = output error flag (in common). -c = 0 if no error. -c = 1 if zero pivot found in cdrv. -c = 2 if a singular matrix arose with miter = 3. -c = -1 if insufficient storage for cdrv (should not occur here). -c = -2 if other error found in cdrv (should not occur here). -c jcur = output flag = 1 to indicate that the jacobian matrix -c (or approximation) is now current. -c this routine also uses other variables in common. -c----------------------------------------------------------------------- - hl0 = h*el0 - con = -hl0 - if (miter .eq. 3) go to 300 -c see whether j should be reevaluated (jok = 0) or not (jok = 1). ------ - jok = 1 - if (nst .eq. 0 .or. nst .ge. nslj+msbj) jok = 0 - if (icf .eq. 1 .and. dabs(rc - 1.0d0) .lt. ccmxj) jok = 0 - if (icf .eq. 2) jok = 0 - if (jok .eq. 1) go to 250 -c -c miter = 1 or 2, and the jacobian is to be reevaluated. --------------- - 20 jcur = 1 - nje = nje + 1 - nslj = nst - iplost = 0 - conmin = dabs(con) - go to (100, 200), miter -c -c if miter = 1, call jac, multiply by scalar, and add identity. -------- - 100 continue - kmin = iwk(ipian) - do 130 j = 1, n - kmax = iwk(ipian+j) - 1 - do 110 i = 1,n - 110 ftem(i) = 0.0d0 - call jac (neq, tn, y, j, iwk(ipian), iwk(ipjan), ftem) - do 120 k = kmin, kmax - i = iwk(ibjan+k) - wk(iba+k) = ftem(i)*con - if (i .eq. j) wk(iba+k) = wk(iba+k) + 1.0d0 - 120 continue - kmin = kmax + 1 - 130 continue - go to 290 -c -c if miter = 2, make ngp calls to f to approximate j and p. ------------ - 200 continue - fac = vnorm(n, savf, ewt) - r0 = 1000.0d0 * dabs(h) * uround * dfloat(n) * fac - if (r0 .eq. 0.0d0) r0 = 1.0d0 - srur = wk(1) - jmin = iwk(ipigp) - do 240 ng = 1,ngp - jmax = iwk(ipigp+ng) - 1 - do 210 j = jmin,jmax - jj = iwk(ibjgp+j) - r = dmax1(srur*dabs(y(jj)),r0/ewt(jj)) - 210 y(jj) = y(jj) + r - call f (neq, tn, y, ftem) - do 230 j = jmin,jmax - jj = iwk(ibjgp+j) - y(jj) = yh(jj,1) - r = dmax1(srur*dabs(y(jj)),r0/ewt(jj)) - fac = -hl0/r - kmin =iwk(ibian+jj) - kmax =iwk(ibian+jj+1) - 1 - do 220 k = kmin,kmax - i = iwk(ibjan+k) - wk(iba+k) = (ftem(i) - savf(i))*fac - if (i .eq. jj) wk(iba+k) = wk(iba+k) + 1.0d0 - 220 continue - 230 continue - jmin = jmax + 1 - 240 continue - nfe = nfe + ngp - go to 290 -c -c if jok = 1, reconstruct new p from old p. ---------------------------- - 250 jcur = 0 - rcon = con/con0 - rcont = dabs(con)/conmin - if (rcont .gt. rbig .and. iplost .eq. 1) go to 20 - kmin = iwk(ipian) - do 275 j = 1,n - kmax = iwk(ipian+j) - 1 - do 270 k = kmin,kmax - i = iwk(ibjan+k) - pij = wk(iba+k) - if (i .ne. j) go to 260 - pij = pij - 1.0d0 - if (dabs(pij) .ge. psmall) go to 260 - iplost = 1 - conmin = dmin1(dabs(con0),conmin) - 260 pij = pij*rcon - if (i .eq. j) pij = pij + 1.0d0 - wk(iba+k) = pij - 270 continue - kmin = kmax + 1 - 275 continue -c -c do numerical factorization of p matrix. ------------------------------ - 290 nlu = nlu + 1 - con0 = con - ierpj = 0 - do 295 i = 1,n - 295 ftem(i) = 0.0d0 - call cdrv (n,iwk(ipr),iwk(ipc),iwk(ipic),iwk(ipian),iwk(ipjan), - 1 wk(ipa),ftem,ftem,nsp,iwk(ipisp),wk(iprsp),iesp,2,iys) - if (iys .eq. 0) return - imul = (iys - 1)/n - ierpj = -2 - if (imul .eq. 8) ierpj = 1 - if (imul .eq. 10) ierpj = -1 - return -c -c if miter = 3, construct a diagonal approximation to j and p. --------- - 300 continue - jcur = 1 - nje = nje + 1 - wk(2) = hl0 - ierpj = 0 - r = el0*0.1d0 - do 310 i = 1,n - 310 y(i) = y(i) + r*(h*savf(i) - yh(i,2)) - call f (neq, tn, y, wk(3)) - nfe = nfe + 1 - do 320 i = 1,n - r0 = h*savf(i) - yh(i,2) - di = 0.1d0*r0 - h*(wk(i+2) - savf(i)) - wk(i+2) = 1.0d0 - if (dabs(r0) .lt. uround/ewt(i)) go to 320 - if (dabs(di) .eq. 0.0d0) go to 330 - wk(i+2) = 0.1d0*r0/di - 320 continue - return - 330 ierpj = 2 - return -c----------------------- end of subroutine prjs ------------------------ - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/rchek.f b/scipy-0.10.1/scipy/integrate/odepack/rchek.f deleted file mode 100644 index 6695b0f31a..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/rchek.f +++ /dev/null @@ -1,165 +0,0 @@ - subroutine rchek (job, g, neq, y, yh, nyh, g0, g1, gx, jroot, irt) -clll. optimize - external g - integer job, neq, nyh, jroot, irt - double precision y, yh, g0, g1, gx - dimension neq(1), y(1), yh(nyh,*), g0(1), g1(1), gx(1), jroot(1) - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer iownd3, iownr3, irfnd, itaskc, ngc, nge - integer i, iflag, jflag - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision rownr3, t0, tlast, toutc - double precision hming, t1, temp1, temp2, x - logical zroot - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lsr001/ rownr3(2), t0, tlast, toutc, - 1 iownd3(3), iownr3(2), irfnd, itaskc, ngc, nge -c----------------------------------------------------------------------- -c this routine checks for the presence of a root in the -c vicinity of the current t, in a manner depending on the -c input flag job. it calls subroutine roots to locate the root -c as precisely as possible. -c -c in addition to variables described previously, rchek -c uses the following for communication.. -c job = integer flag indicating type of call.. -c job = 1 means the problem is being initialized, and rchek -c is to look for a root at or very near the initial t. -c job = 2 means a continuation call to the solver was just -c made, and rchek is to check for a root in the -c relevant part of the step last taken. -c job = 3 means a successful step was just taken, and rchek -c is to look for a root in the interval of the step. -c g0 = array of length ng, containing the value of g at t = t0. -c g0 is input for job .ge. 2 and on output in all cases. -c g1,gx = arrays of length ng for work space. -c irt = completion flag.. -c irt = 0 means no root was found. -c irt = -1 means job = 1 and a root was found too near to t. -c irt = 1 means a legitimate root was found (job = 2 or 3). -c on return, t0 is the root location, and y is the -c corresponding solution vector. -c t0 = value of t at one endpoint of interval of interest. only -c roots beyond t0 in the direction of integration are sought. -c t0 is input if job .ge. 2, and output in all cases. -c t0 is updated by rchek, whether a root is found or not. -c tlast = last value of t returned by the solver (input only). -c toutc = copy of tout (input only). -c irfnd = input flag showing whether the last step taken had a root. -c irfnd = 1 if it did, = 0 if not. -c itaskc = copy of itask (input only). -c ngc = copy of ng (input only). -c----------------------------------------------------------------------- -c - irt = 0 - do 10 i = 1,ngc - 10 jroot(i) = 0 - hming = (dabs(tn) + dabs(h))*uround*100.0d0 -c - go to (100, 200, 300), job -c -c evaluate g at initial t, and check for zero values. ------------------ - 100 continue - t0 = tn - call g (neq, t0, y, ngc, g0) - nge = 1 - zroot = .false. - do 110 i = 1,ngc - 110 if (dabs(g0(i)) .le. 0.0d0) zroot = .true. - if (.not. zroot) go to 190 -c g has a zero at t. look at g at t + (small increment). -------------- - temp1 = dsign(hming,h) - t0 = t0 + temp1 - temp2 = temp1/h - do 120 i = 1,n - 120 y(i) = y(i) + temp2*yh(i,2) - call g (neq, t0, y, ngc, g0) - nge = nge + 1 - zroot = .false. - do 130 i = 1,ngc - 130 if (dabs(g0(i)) .le. 0.0d0) zroot = .true. - if (.not. zroot) go to 190 -c g has a zero at t and also close to t. take error return. ----------- - irt = -1 - return -c - 190 continue - return -c -c - 200 continue - if (irfnd .eq. 0) go to 260 -c if a root was found on the previous step, evaluate g0 = g(t0). ------- - call intdy (t0, 0, yh, nyh, y, iflag) - call g (neq, t0, y, ngc, g0) - nge = nge + 1 - zroot = .false. - do 210 i = 1,ngc - 210 if (dabs(g0(i)) .le. 0.0d0) zroot = .true. - if (.not. zroot) go to 260 -c g has a zero at t0. look at g at t + (small increment). ------------- - temp1 = dsign(hming,h) - t0 = t0 + temp1 - if ((t0 - tn)*h .lt. 0.0d0) go to 230 - temp2 = temp1/h - do 220 i = 1,n - 220 y(i) = y(i) + temp2*yh(i,2) - go to 240 - 230 call intdy (t0, 0, yh, nyh, y, iflag) - 240 call g (neq, t0, y, ngc, g0) - nge = nge + 1 - zroot = .false. - do 250 i = 1,ngc - if (dabs(g0(i)) .gt. 0.0d0) go to 250 - jroot(i) = 1 - zroot = .true. - 250 continue - if (.not. zroot) go to 260 -c g has a zero at t0 and also close to t0. return root. --------------- - irt = 1 - return -c here, g0 does not have a root -c g0 has no zero components. proceed to check relevant interval. ------ - 260 if (tn .eq. tlast) go to 390 -c - 300 continue -c set t1 to tn or toutc, whichever comes first, and get g at t1. ------- - if (itaskc.eq.2 .or. itaskc.eq.3 .or. itaskc.eq.5) go to 310 - if ((toutc - tn)*h .ge. 0.0d0) go to 310 - t1 = toutc - if ((t1 - t0)*h .le. 0.0d0) go to 390 - call intdy (t1, 0, yh, nyh, y, iflag) - go to 330 - 310 t1 = tn - do 320 i = 1,n - 320 y(i) = yh(i,1) - 330 call g (neq, t1, y, ngc, g1) - nge = nge + 1 -c call roots to search for root in interval from t0 to t1. ------------- - jflag = 0 - 350 continue - call roots (ngc, hming, jflag, t0, t1, g0, g1, gx, x, jroot) - if (jflag .gt. 1) go to 360 - call intdy (x, 0, yh, nyh, y, iflag) - call g (neq, x, y, ngc, gx) - nge = nge + 1 - go to 350 - 360 t0 = x - call dcopy (ngc, gx, 1, g0, 1) - if (jflag .eq. 4) go to 390 -c found a root. interpolate to x and return. -------------------------- - call intdy (x, 0, yh, nyh, y, iflag) - irt = 1 - return -c - 390 continue - return -c----------------------- end of subroutine rchek ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/roots.f b/scipy-0.10.1/scipy/integrate/odepack/roots.f deleted file mode 100644 index 59ba15d7fe..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/roots.f +++ /dev/null @@ -1,210 +0,0 @@ - subroutine roots (ng, hmin, jflag, x0, x1, g0, g1, gx, x, jroot) -clll. optimize - integer ng, jflag, jroot - double precision hmin, x0, x1, g0, g1, gx, x - dimension g0(ng), g1(ng), gx(ng), jroot(ng) - integer iownd3, imax, last, idum3 - double precision alpha, x2, rdum3 - common /lsr001/ alpha, x2, rdum3(3), - 1 iownd3(3), imax, last, idum3(4) -c----------------------------------------------------------------------- -c this subroutine finds the leftmost root of a set of arbitrary -c functions gi(x) (i = 1,...,ng) in an interval (x0,x1). only roots -c of odd multiplicity (i.e. changes of sign of the gi) are found. -c here the sign of x1 - x0 is arbitrary, but is constant for a given -c problem, and -leftmost- means nearest to x0. -c the values of the vector-valued function g(x) = (gi, i=1...ng) -c are communicated through the call sequence of roots. -c the method used is the illinois algorithm. -c -c reference.. -c kathie l. hiebert and lawrence f. shampine, implicitly defined -c output points for solutions of ode-s, sandia report sand80-0180, -c february, 1980. -c -c description of parameters. -c -c ng = number of functions gi, or the number of components of -c the vector valued function g(x). input only. -c -c hmin = resolution parameter in x. input only. when a root is -c found, it is located only to within an error of hmin in x. -c typically, hmin should be set to something on the order of -c 100 * uround * max(abs(x0),abs(x1)), -c where uround is the unit roundoff of the machine. -c -c jflag = integer flag for input and output communication. -c -c on input, set jflag = 0 on the first call for the problem, -c and leave it unchanged until the problem is completed. -c (the problem is completed when jflag .ge. 2 on return.) -c -c on output, jflag has the following values and meanings.. -c jflag = 1 means roots needs a value of g(x). set gx = g(x) -c and call roots again. -c jflag = 2 means a root has been found. the root is -c at x, and gx contains g(x). (actually, x is the -c rightmost approximation to the root on an interval -c (x0,x1) of size hmin or less.) -c jflag = 3 means x = x1 is a root, with one or more of the gi -c being zero at x1 and no sign changes in (x0,x1). -c gx contains g(x) on output. -c jflag = 4 means no roots (of odd multiplicity) were -c found in (x0,x1) (no sign changes). -c -c x0,x1 = endpoints of the interval where roots are sought. -c x1 and x0 are input when jflag = 0 (first call), and -c must be left unchanged between calls until the problem is -c completed. x0 and x1 must be distinct, but x1 - x0 may be -c of either sign. however, the notion of -left- and -right- -c will be used to mean nearer to x0 or x1, respectively. -c when jflag .ge. 2 on return, x0 and x1 are output, and -c are the endpoints of the relevant interval. -c -c g0,g1 = arrays of length ng containing the vectors g(x0) and g(x1), -c respectively. when jflag = 0, g0 and g1 are input and -c none of the g0(i) should be be zero. -c when jflag .ge. 2 on return, g0 and g1 are output. -c -c gx = array of length ng containing g(x). gx is input -c when jflag = 1, and output when jflag .ge. 2. -c -c x = independent variable value. output only. -c when jflag = 1 on output, x is the point at which g(x) -c is to be evaluated and loaded into gx. -c when jflag = 2 or 3, x is the root. -c when jflag = 4, x is the right endpoint of the interval, x1. -c -c jroot = integer array of length ng. output only. -c when jflag = 2 or 3, jroot indicates which components -c of g(x) have a root at x. jroot(i) is 1 if the i-th -c component has a root, and jroot(i) = 0 otherwise. -c -c note.. this routine uses the common block /lsr001/ to save -c the values of certain variables between calls (own variables). -c----------------------------------------------------------------------- - integer i, imxold, nxlast - double precision t2, tmax, zero - logical zroot, sgnchg, xroot - data zero/0.0d0/ -c - if (jflag .eq. 1) go to 200 -c jflag .ne. 1. check for change in sign of g or zero at x1. ---------- - imax = 0 - tmax = zero - zroot = .false. - do 120 i = 1,ng - if (dabs(g1(i)) .gt. zero) go to 110 - zroot = .true. - go to 120 -c at this point, g0(i) has been checked and cannot be zero. ------------ - 110 if (dsign(1.0d0,g0(i)) .eq. dsign(1.0d0,g1(i))) go to 120 - t2 = dabs(g1(i)/(g1(i)-g0(i))) - if (t2 .le. tmax) go to 120 - tmax = t2 - imax = i - 120 continue - if (imax .gt. 0) go to 130 - sgnchg = .false. - go to 140 - 130 sgnchg = .true. - 140 if (.not. sgnchg) go to 400 -c there is a sign change. find the first root in the interval. -------- - xroot = .false. - nxlast = 0 - last = 1 -c -c repeat until the first root in the interval is found. loop point. --- - 150 continue - if (xroot) go to 300 - if (nxlast .eq. last) go to 160 - alpha = 1.0d0 - go to 180 - 160 if (last .eq. 0) go to 170 - alpha = 0.5d0*alpha - go to 180 - 170 alpha = 2.0d0*alpha - 180 x2 = x1 - (x1-x0)*g1(imax)/(g1(imax) - alpha*g0(imax)) - if ((dabs(x2-x0) .lt. hmin) .and. - 1 (dabs(x1-x0) .gt. 10.0d0*hmin)) x2 = x0 + 0.1d0*(x1-x0) - jflag = 1 - x = x2 -c return to the calling routine to get a value of gx = g(x). ----------- - return -c check to see in which interval g changes sign. ----------------------- - 200 imxold = imax - imax = 0 - tmax = zero - zroot = .false. - do 220 i = 1,ng - if (dabs(gx(i)) .gt. zero) go to 210 - zroot = .true. - go to 220 -c neither g0(i) nor gx(i) can be zero at this point. ------------------- - 210 if (dsign(1.0d0,g0(i)) .eq. dsign(1.0d0,gx(i))) go to 220 - t2 = dabs(gx(i)/(gx(i) - g0(i))) - if (t2 .le. tmax) go to 220 - tmax = t2 - imax = i - 220 continue - if (imax .gt. 0) go to 230 - sgnchg = .false. - imax = imxold - go to 240 - 230 sgnchg = .true. - 240 nxlast = last - if (.not. sgnchg) go to 250 -c sign change between x0 and x2, so replace x1 with x2. ---------------- - x1 = x2 - call dcopy (ng, gx, 1, g1, 1) - last = 1 - xroot = .false. - go to 270 - 250 if (.not. zroot) go to 260 -c zero value at x2 and no sign change in (x0,x2), so x2 is a root. ----- - x1 = x2 - call dcopy (ng, gx, 1, g1, 1) - xroot = .true. - go to 270 -c no sign change between x0 and x2. replace x0 with x2. --------------- - 260 continue - call dcopy (ng, gx, 1, g0, 1) - x0 = x2 - last = 0 - xroot = .false. - 270 if (dabs(x1-x0) .le. hmin) xroot = .true. - go to 150 -c -c return with x1 as the root. set jroot. set x = x1 and gx = g1. ----- - 300 jflag = 2 - x = x1 - call dcopy (ng, g1, 1, gx, 1) - do 320 i = 1,ng - jroot(i) = 0 - if (dabs(g1(i)) .gt. zero) go to 310 - jroot(i) = 1 - go to 320 - 310 if (dsign(1.0d0,g0(i)) .ne. dsign(1.0d0,g1(i))) jroot(i) = 1 - 320 continue - return -c -c no sign change in the interval. check for zero at right endpoint. --- - 400 if (.not. zroot) go to 420 -c -c zero value at x1 and no sign change in (x0,x1). return jflag = 3. --- - x = x1 - call dcopy (ng, g1, 1, gx, 1) - do 410 i = 1,ng - jroot(i) = 0 - if (dabs(g1(i)) .le. zero) jroot (i) = 1 - 410 continue - jflag = 3 - return -c -c no sign changes in this interval. set x = x1, return jflag = 4. ----- - 420 call dcopy (ng, g1, 1, gx, 1) - x = x1 - jflag = 4 - return -c----------------------- end of subroutine roots ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/slsbt.f b/scipy-0.10.1/scipy/integrate/odepack/slsbt.f deleted file mode 100644 index 1996d63875..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/slsbt.f +++ /dev/null @@ -1,29 +0,0 @@ - subroutine slsbt (wm, iwm, x, tem) -clll. optimize - integer iwm - integer lblox, lpb, lpc, mb, nb - double precision wm, x, tem - dimension wm(*), iwm(*), x(1), tem(1) -c----------------------------------------------------------------------- -c this routine acts as an interface between the core integrator -c routine and the solbt routine for the solution of the linear system -c arising from chord iteration. -c communication with slsbt uses the following variables.. -c wm = real work space containing the lu decomposition, -c starting at wm(3). -c iwm = integer work space containing pivot information, starting at -c iwm(21). iwm also contains block structure parameters -c mb = iwm(1) and nb = iwm(2). -c x = the right-hand side vector on input, and the solution vector -c on output, of length n. -c tem = vector of work space of length n, not used in this version. -c----------------------------------------------------------------------- - mb = iwm(1) - nb = iwm(2) - lblox = mb*mb*nb - lpb = 3 + lblox - lpc = lpb + lblox - call solbt (mb, nb, wm(3), wm(lpb), wm(lpc), x, iwm(21)) - return -c----------------------- end of subroutine slsbt ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/slss.f b/scipy-0.10.1/scipy/integrate/odepack/slss.f deleted file mode 100644 index 65fa7bea83..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/slss.f +++ /dev/null @@ -1,77 +0,0 @@ - subroutine slss (wk, iwk, x, tem) -clll. optimize - integer iwk - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 1 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 2 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 3 nslj, ngp, nlu, nnz, nsp, nzl, nzu - integer i - double precision wk, x, tem - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision rlss - double precision di, hl0, phl0, r - dimension wk(*), iwk(*), x(1), tem(1) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lss001/ rlss(6), - 1 iplost, iesp, istatc, iys, iba, ibian, ibjan, ibjgp, - 2 ipian, ipjan, ipjgp, ipigp, ipr, ipc, ipic, ipisp, iprsp, ipa, - 3 lenyh, lenyhm, lenwk, lreq, lrat, lrest, lwmin, moss, msbj, - 4 nslj, ngp, nlu, nnz, nsp, nzl, nzu -c----------------------------------------------------------------------- -c this routine manages the solution of the linear system arising from -c a chord iteration. it is called if miter .ne. 0. -c if miter is 1 or 2, it calls cdrv to accomplish this. -c if miter = 3 it updates the coefficient h*el0 in the diagonal -c matrix, and then computes the solution. -c communication with slss uses the following variables.. -c wk = real work space containing the inverse diagonal matrix if -c miter = 3 and the lu decomposition of the matrix otherwise. -c storage of matrix elements starts at wk(3). -c wk also contains the following matrix-related data.. -c wk(1) = sqrt(uround) (not used here), -c wk(2) = hl0, the previous value of h*el0, used if miter = 3. -c iwk = integer work space for matrix-related data, assumed to -c be equivalenced to wk. in addition, wk(iprsp) and iwk(ipisp) -c are assumed to have identical locations. -c x = the right-hand side vector on input, and the solution vector -c on output, of length n. -c tem = vector of work space of length n, not used in this version. -c iersl = output flag (in common). -c iersl = 0 if no trouble occurred. -c iersl = -1 if cdrv returned an error flag (miter = 1 or 2). -c this should never occur and is considered fatal. -c iersl = 1 if a singular matrix arose with miter = 3. -c this routine also uses other variables in common. -c----------------------------------------------------------------------- - iersl = 0 - go to (100, 100, 300), miter - 100 call cdrv (n,iwk(ipr),iwk(ipc),iwk(ipic),iwk(ipian),iwk(ipjan), - 1 wk(ipa),x,x,nsp,iwk(ipisp),wk(iprsp),iesp,4,iersl) - if (iersl .ne. 0) iersl = -1 - return -c - 300 phl0 = wk(2) - hl0 = h*el0 - wk(2) = hl0 - if (hl0 .eq. phl0) go to 330 - r = hl0/phl0 - do 320 i = 1,n - di = 1.0d0 - r*(1.0d0 - 1.0d0/wk(i+2)) - if (dabs(di) .eq. 0.0d0) go to 390 - 320 wk(i+2) = 1.0d0/di - 330 do 340 i = 1,n - 340 x(i) = wk(i+2)*x(i) - return - 390 iersl = 1 - return -c -c----------------------- end of subroutine slss ------------------------ - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/solbt.f b/scipy-0.10.1/scipy/integrate/odepack/solbt.f deleted file mode 100644 index 39b867a25f..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/solbt.f +++ /dev/null @@ -1,59 +0,0 @@ - subroutine solbt (m, n, a, b, c, y, ip) - integer m, n, ip(m,n) - double precision a(m,m,n), b(m,m,n), c(m,m,n), y(m,n) -clll. optimize -c----------------------------------------------------------------------- -c solution of block-tridiagonal linear system. -c coefficient matrix must have been previously processed by decbt. -c m, n, a, b, c, and ip must not have been changed since call to decbt. -c written by a. c. hindmarsh. -c input.. -c m = order of each block. -c n = number of blocks in each direction of matrix. -c a,b,c = m by m by n arrays containing block lu decomposition -c of coefficient matrix from decbt. -c ip = m by n integer array of pivot information from decbt. -c y = array of length m*n containg the right-hand side vector -c (treated as an m by n array here). -c output.. -c y = solution vector, of length m*n. -c -c external routines required.. dgesl (linpack) and ddot (blas). -c----------------------------------------------------------------------- -c - integer nm1, nm2, i, k, kb, km1, kp1 - double precision dp, ddot - nm1 = n - 1 - nm2 = n - 2 -c forward solution sweep. ---------------------------------------------- - call dgesl (a, m, m, ip, y, 0) - do 30 k = 2,nm1 - km1 = k - 1 - do 20 i = 1,m - dp = ddot (m, c(i,1,k), m, y(1,km1), 1) - y(i,k) = y(i,k) - dp - 20 continue - call dgesl (a(1,1,k), m, m, ip(1,k), y(1,k), 0) - 30 continue - do 50 i = 1,m - dp = ddot (m, c(i,1,n), m, y(1,nm1), 1) - 1 + ddot (m, b(i,1,n), m, y(1,nm2), 1) - y(i,n) = y(i,n) - dp - 50 continue - call dgesl (a(1,1,n), m, m, ip(1,n), y(1,n), 0) -c backward solution sweep. --------------------------------------------- - do 80 kb = 1,nm1 - k = n - kb - kp1 = k + 1 - do 70 i = 1,m - dp = ddot (m, b(i,1,k), m, y(1,kp1), 1) - y(i,k) = y(i,k) - dp - 70 continue - 80 continue - do 100 i = 1,m - dp = ddot (m, c(i,1,1), m, y(1,3), 1) - y(i,1) = y(i,1) - dp - 100 continue - return -c----------------------- end of subroutine solbt --------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/solsy.f b/scipy-0.10.1/scipy/integrate/odepack/solsy.f deleted file mode 100644 index 5d693350fb..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/solsy.f +++ /dev/null @@ -1,68 +0,0 @@ - subroutine solsy (wm, iwm, x, tem) -clll. optimize - integer iwm - integer iownd, iowns, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer i, meband, ml, mu - double precision wm, x, tem - double precision rowns, - 1 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision di, hl0, phl0, r - dimension wm(*), iwm(*), x(1), tem(1) - common /ls0001/ rowns(209), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, - 3 iownd(14), iowns(6), - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu -c----------------------------------------------------------------------- -c this routine manages the solution of the linear system arising from -c a chord iteration. it is called if miter .ne. 0. -c if miter is 1 or 2, it calls dgesl to accomplish this. -c if miter = 3 it updates the coefficient h*el0 in the diagonal -c matrix, and then computes the solution. -c if miter is 4 or 5, it calls dgbsl. -c communication with solsy uses the following variables.. -c wm = real work space containing the inverse diagonal matrix if -c miter = 3 and the lu decomposition of the matrix otherwise. -c storage of matrix elements starts at wm(3). -c wm also contains the following matrix-related data.. -c wm(1) = sqrt(uround) (not used here), -c wm(2) = hl0, the previous value of h*el0, used if miter = 3. -c iwm = integer work space containing pivot information, starting at -c iwm(21), if miter is 1, 2, 4, or 5. iwm also contains band -c parameters ml = iwm(1) and mu = iwm(2) if miter is 4 or 5. -c x = the right-hand side vector on input, and the solution vector -c on output, of length n. -c tem = vector of work space of length n, not used in this version. -c iersl = output flag (in common). iersl = 0 if no trouble occurred. -c iersl = 1 if a singular matrix arose with miter = 3. -c this routine also uses the common variables el0, h, miter, and n. -c----------------------------------------------------------------------- - iersl = 0 - go to (100, 100, 300, 400, 400), miter - 100 call dgesl (wm(3), n, n, iwm(21), x, 0) - return -c - 300 phl0 = wm(2) - hl0 = h*el0 - wm(2) = hl0 - if (hl0 .eq. phl0) go to 330 - r = hl0/phl0 - do 320 i = 1,n - di = 1.0d0 - r*(1.0d0 - 1.0d0/wm(i+2)) - if (dabs(di) .eq. 0.0d0) go to 390 - 320 wm(i+2) = 1.0d0/di - 330 do 340 i = 1,n - 340 x(i) = wm(i+2)*x(i) - return - 390 iersl = 1 - return -c - 400 ml = iwm(1) - mu = iwm(2) - meband = 2*ml + mu + 1 - call dgbsl (wm(3), meband, n, ml, mu, iwm(21), x, 0) - return -c----------------------- end of subroutine solsy ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/srcar.f b/scipy-0.10.1/scipy/integrate/odepack/srcar.f deleted file mode 100644 index 42ca4da45b..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/srcar.f +++ /dev/null @@ -1,71 +0,0 @@ - subroutine srcar (rsav, isav, job) -c----------------------------------------------------------------------- -c this routine saves or restores (depending on job) the contents of -c the common blocks ls0001, lsa001, lar001, and eh0001, which are used -c internally by one or more odepack solvers. -c -c rsav = real array of length 245 or more. -c isav = integer array of length 59 or more. -c job = flag indicating to save or restore the common blocks.. -c job = 1 if common is to be saved (written to rsav/isav) -c job = 2 if common is to be restored (read from rsav/isav) -c a call with job = 2 presumes a prior call with job = 1. -c----------------------------------------------------------------------- - integer isav, job - integer ieh, ils, ilsa, ilsr - integer i, ioff, lenrls, lenils, lenrla, lenila, lenrlr, lenilr - double precision rsav - double precision rls, rlsa, rlsr - dimension rsav(1), isav(1) - common /ls0001/ rls(218), ils(39) - common /lsa001/ rlsa(22), ilsa(9) - common /lsr001/ rlsr(5), ilsr(9) - common /eh0001/ ieh(2) - data lenrls/218/, lenils/39/, lenrla/22/, lenila/9/ - data lenrlr/5/, lenilr/9/ -c - if (job .eq. 2) go to 100 - do 10 i = 1,lenrls - 10 rsav(i) = rls(i) - do 15 i = 1,lenrla - 15 rsav(lenrls+i) = rlsa(i) - ioff = lenrls + lenrla - do 20 i = 1,lenrlr - 20 rsav(ioff+i) = rlsr(i) -c - do 30 i = 1,lenils - 30 isav(i) = ils(i) - do 35 i = 1,lenila - 35 isav(lenils+i) = ilsa(i) - ioff = lenils + lenila - do 40 i = 1,lenilr - 40 isav(ioff+i) = ilsr(i) -c - ioff = ioff + lenilr - isav(ioff+1) = ieh(1) - isav(ioff+2) = ieh(2) - return -c - 100 continue - do 110 i = 1,lenrls - 110 rls(i) = rsav(i) - do 115 i = 1,lenrla - 115 rlsa(i) = rsav(lenrls+i) - ioff = lenrls + lenrla - do 120 i = 1,lenrlr - 120 rlsr(i) = rsav(ioff+i) -c - do 130 i = 1,lenils - 130 ils(i) = isav(i) - do 135 i = 1,lenila - 135 ilsa(i) = isav(lenils+i) - ioff = lenils + lenila - do 140 i = 1,lenilr - 140 ilsr(i) = isav(ioff+i) -c - ioff = ioff + lenilr - ieh(1) = isav(ioff+1) - ieh(2) = isav(ioff+2) - return -c----------------------- end of subroutine srcar ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/srcma.f b/scipy-0.10.1/scipy/integrate/odepack/srcma.f deleted file mode 100644 index 62c07a5717..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/srcma.f +++ /dev/null @@ -1,55 +0,0 @@ - subroutine srcma (rsav, isav, job) -c----------------------------------------------------------------------- -c this routine saves or restores (depending on job) the contents of -c the common blocks ls0001, lsa001, and eh0001, which are used -c internally by one or more odepack solvers. -c -c rsav = real array of length 240 or more. -c isav = integer array of length 50 or more. -c job = flag indicating to save or restore the common blocks.. -c job = 1 if common is to be saved (written to rsav/isav) -c job = 2 if common is to be restored (read from rsav/isav) -c a call with job = 2 presumes a prior call with job = 1. -c----------------------------------------------------------------------- - integer isav, job - integer ieh, ils, ilsa - integer i, lenrls, lenils, lenrla, lenila - double precision rsav - double precision rls, rlsa - dimension rsav(1), isav(1) - common /ls0001/ rls(218), ils(39) - common /lsa001/ rlsa(22), ilsa(9) - common /eh0001/ ieh(2) - data lenrls/218/, lenils/39/, lenrla/22/, lenila/9/ -c - if (job .eq. 2) go to 100 - do 10 i = 1,lenrls - 10 rsav(i) = rls(i) - do 15 i = 1,lenrla - 15 rsav(lenrls+i) = rlsa(i) -c - do 20 i = 1,lenils - 20 isav(i) = ils(i) - do 25 i = 1,lenila - 25 isav(lenils+i) = ilsa(i) -c - isav(lenils+lenila+1) = ieh(1) - isav(lenils+lenila+2) = ieh(2) - return -c - 100 continue - do 110 i = 1,lenrls - 110 rls(i) = rsav(i) - do 115 i = 1,lenrla - 115 rlsa(i) = rsav(lenrls+i) -c - do 120 i = 1,lenils - 120 ils(i) = isav(i) - do 125 i = 1,lenila - 125 ilsa(i) = isav(lenils+i) -c - ieh(1) = isav(lenils+lenila+1) - ieh(2) = isav(lenils+lenila+2) - return -c----------------------- end of subroutine srcma ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/srcms.f b/scipy-0.10.1/scipy/integrate/odepack/srcms.f deleted file mode 100644 index 63f6feb493..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/srcms.f +++ /dev/null @@ -1,54 +0,0 @@ - subroutine srcms (rsav, isav, job) -c----------------------------------------------------------------------- -c this routine saves or restores (depending on job) the contents of -c the common blocks ls0001, lss001, and eh0001, which are used -c internally by one or more odepack solvers. -c -c rsav = real array of length 224 or more. -c isav = integer array of length 75 or more. -c job = flag indicating to save or restore the common blocks.. -c job = 1 if common is to be saved (written to rsav/isav) -c job = 2 if common is to be restored (read from rsav/isav) -c a call with job = 2 presumes a prior call with job = 1. -c----------------------------------------------------------------------- - integer isav, job - integer ieh, ils, ilss - integer i, lenil, leniss, lenrl, lenrss - double precision rsav, rls, rlss - dimension rsav(1), isav(1) - common /ls0001/ rls(218), ils(39) - common /lss001/ rlss(6), ilss(34) - common /eh0001/ ieh(2) - data lenrl/218/, lenil/39/, lenrss/6/, leniss/34/ -c - if (job .eq. 2) go to 100 - do 10 i = 1,lenrl - 10 rsav(i) = rls(i) - do 15 i = 1,lenrss - 15 rsav(lenrl+i) = rlss(i) -c - do 20 i = 1,lenil - 20 isav(i) = ils(i) - do 25 i = 1,leniss - 25 isav(lenil+i) = ilss(i) -c - isav(lenil+leniss+1) = ieh(1) - isav(lenil+leniss+2) = ieh(2) - return -c - 100 continue - do 110 i = 1,lenrl - 110 rls(i) = rsav(i) - do 115 i = 1,lenrss - 115 rlss(i) = rsav(lenrl+i) -c - do 120 i = 1,lenil - 120 ils(i) = isav(i) - do 125 i = 1,leniss - 125 ilss(i) = isav(lenil+i) -c - ieh(1) = isav(lenil+leniss+1) - ieh(2) = isav(lenil+leniss+2) - return -c----------------------- end of subroutine srcms ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/srcom.f b/scipy-0.10.1/scipy/integrate/odepack/srcom.f deleted file mode 100644 index deb2881b9d..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/srcom.f +++ /dev/null @@ -1,42 +0,0 @@ - subroutine srcom (rsav, isav, job) -c----------------------------------------------------------------------- -c this routine saves or restores (depending on job) the contents of -c the common blocks ls0001 and eh0001, which are used internally -c by one or more odepack solvers. -c -c rsav = real array of length 218 or more. -c isav = integer array of length 41 or more. -c job = flag indicating to save or restore the common blocks.. -c job = 1 if common is to be saved (written to rsav/isav) -c job = 2 if common is to be restored (read from rsav/isav) -c a call with job = 2 presumes a prior call with job = 1. -c----------------------------------------------------------------------- - integer isav, job - integer ieh, ils - integer i, lenils, lenrls - double precision rsav, rls - dimension rsav(1), isav(1) - common /ls0001/ rls(218), ils(39) - common /eh0001/ ieh(2) - data lenrls/218/, lenils/39/ -c - if (job .eq. 2) go to 100 -c - do 10 i = 1,lenrls - 10 rsav(i) = rls(i) - do 20 i = 1,lenils - 20 isav(i) = ils(i) - isav(lenils+1) = ieh(1) - isav(lenils+2) = ieh(2) - return -c - 100 continue - do 110 i = 1,lenrls - 110 rls(i) = rsav(i) - do 120 i = 1,lenils - 120 ils(i) = isav(i) - ieh(1) = isav(lenils+1) - ieh(2) = isav(lenils+2) - return -c----------------------- end of subroutine srcom ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/sro.f b/scipy-0.10.1/scipy/integrate/odepack/sro.f deleted file mode 100644 index 1cc170f15f..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/sro.f +++ /dev/null @@ -1,106 +0,0 @@ - subroutine sro - * (n, ip, ia,ja,a, q, r, dflag) -clll. optimize -c*********************************************************************** -c sro -- symmetric reordering of sparse symmetric matrix -c*********************************************************************** -c -c description -c -c the nonzero entries of the matrix m are assumed to be stored -c symmetrically in (ia,ja,a) format (i.e., not both m(i,j) and m(j,i) -c are stored if i ne j). -c -c sro does not rearrange the order of the rows, but does move -c nonzeroes from one row to another to ensure that if m(i,j) will be -c in the upper triangle of m with respect to the new ordering, then -c m(i,j) is stored in row i (and thus m(j,i) is not stored), whereas -c if m(i,j) will be in the strict lower triangle of m, then m(j,i) is -c stored in row j (and thus m(i,j) is not stored). -c -c -c additional parameters -c -c q - integer one-dimensional work array. dimension = n -c -c r - integer one-dimensional work array. dimension = number of -c nonzero entries in the upper triangle of m -c -c dflag - logical variable. if dflag = .true., then store nonzero -c diagonal elements at the beginning of the row -c -c----------------------------------------------------------------------- -c - integer ip(1), ia(1), ja(1), q(1), r(1) - double precision a(1), ak - logical dflag -c -c -c--phase 1 -- find row in which to store each nonzero -c----initialize count of nonzeroes to be stored in each row - do 1 i=1,n - 1 q(i) = 0 -c -c----for each nonzero element a(j) - do 3 i=1,n - jmin = ia(i) - jmax = ia(i+1) - 1 - if (jmin.gt.jmax) go to 3 - do 2 j=jmin,jmax -c -c--------find row (=r(j)) and column (=ja(j)) in which to store a(j) ... - k = ja(j) - if (ip(k).lt.ip(i)) ja(j) = i - if (ip(k).ge.ip(i)) k = i - r(j) = k -c -c--------... and increment count of nonzeroes (=q(r(j)) in that row - 2 q(k) = q(k) + 1 - 3 continue -c -c -c--phase 2 -- find new ia and permutation to apply to (ja,a) -c----determine pointers to delimit rows in permuted (ja,a) - do 4 i=1,n - ia(i+1) = ia(i) + q(i) - 4 q(i) = ia(i+1) -c -c----determine where each (ja(j),a(j)) is stored in permuted (ja,a) -c----for each nonzero element (in reverse order) - ilast = 0 - jmin = ia(1) - jmax = ia(n+1) - 1 - j = jmax - do 6 jdummy=jmin,jmax - i = r(j) - if (.not.dflag .or. ja(j).ne.i .or. i.eq.ilast) go to 5 -c -c------if dflag, then put diagonal nonzero at beginning of row - r(j) = ia(i) - ilast = i - go to 6 -c -c------put (off-diagonal) nonzero in last unused location in row - 5 q(i) = q(i) - 1 - r(j) = q(i) -c - 6 j = j-1 -c -c -c--phase 3 -- permute (ja,a) to upper triangular form (wrt new ordering) - do 8 j=jmin,jmax - 7 if (r(j).eq.j) go to 8 - k = r(j) - r(j) = r(k) - r(k) = k - jak = ja(k) - ja(k) = ja(j) - ja(j) = jak - ak = a(k) - a(k) = a(j) - a(j) = ak - go to 7 - 8 continue -c - return - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/stoda.f b/scipy-0.10.1/scipy/integrate/odepack/stoda.f deleted file mode 100644 index 3de22e0b2d..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/stoda.f +++ /dev/null @@ -1,637 +0,0 @@ - subroutine stoda (neq, y, yh, nyh, yh1, ewt, savf, acor, - 1 wm, iwm, f, jac, pjac, slvs) -clll. optimize - external f, jac, pjac, slvs - integer neq, nyh, iwm - integer iownd, ialth, ipup, lmax, meo, nqnyh, nslp, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer iownd2, icount, irflag, jtyp, mused, mxordn, mxords - integer i, i1, iredo, iret, j, jb, m, ncf, newq - integer lm1, lm1p1, lm2, lm2p1, nqm1, nqm2, isav - double precision y, yh, yh1, ewt, savf, acor, wm, rsav - double precision conit, crate, el, elco, hold, rmax, tesco, - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision rownd2, pdest, pdlast, ratio, cm1, cm2, - 1 pdnorm - double precision dcon, ddn, del, delp, dsm, dup, exdn, exsm, exup, - 1 r, rh, rhdn, rhsm, rhup, told, vmnorm - double precision alpha, dm1, dm2, exm1, exm2, pdh, pnorm, rate, - 1 rh1, rh1it, rh2, rm, sm1 - dimension neq(1), y(1), yh(nyh,*), yh1(1), ewt(1), savf(1), - 1 acor(1), wm(*), iwm(*), rsav(240), isav(50) - dimension sm1(12) - common /ls0001/ conit, crate, el(13), elco(13,12), - 1 hold, rmax, tesco(3,12), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, iownd(14), - 3 ialth, ipup, lmax, meo, nqnyh, nslp, - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - common /lsa001/ rownd2, pdest, pdlast, ratio, cm1(12), cm2(5), - 1 pdnorm, - 2 iownd2(3), icount, irflag, jtyp, mused, mxordn, mxords - data sm1/0.5d0, 0.575d0, 0.55d0, 0.45d0, 0.35d0, 0.25d0, - 1 0.20d0, 0.15d0, 0.10d0, 0.075d0, 0.050d0, 0.025d0/ -c----------------------------------------------------------------------- -c stoda performs one step of the integration of an initial value -c problem for a system of ordinary differential equations. -c note.. stoda is independent of the value of the iteration method -c indicator miter, when this is .ne. 0, and hence is independent -c of the type of chord method used, or the jacobian structure. -c communication with stoda is done with the following variables.. -c -c y = an array of length .ge. n used as the y argument in -c all calls to f and jac. -c neq = integer array containing problem size in neq(1), and -c passed as the neq argument in all calls to f and jac. -c yh = an nyh by lmax array containing the dependent variables -c and their approximate scaled derivatives, where -c lmax = maxord + 1. yh(i,j+1) contains the approximate -c j-th derivative of y(i), scaled by h**j/factorial(j) -c (j = 0,1,...,nq). on entry for the first step, the first -c two columns of yh must be set from the initial values. -c nyh = a constant integer .ge. n, the first dimension of yh. -c yh1 = a one-dimensional array occupying the same space as yh. -c ewt = an array of length n containing multiplicative weights -c for local error measurements. local errors in y(i) are -c compared to 1.0/ewt(i) in various error tests. -c savf = an array of working storage, of length n. -c acor = a work array of length n, used for the accumulated -c corrections. on a successful return, acor(i) contains -c the estimated one-step local error in y(i). -c wm,iwm = real and integer work arrays associated with matrix -c operations in chord iteration (miter .ne. 0). -c pjac = name of routine to evaluate and preprocess jacobian matrix -c and p = i - h*el0*jac, if a chord method is being used. -c it also returns an estimate of norm(jac) in pdnorm. -c slvs = name of routine to solve linear system in chord iteration. -c ccmax = maximum relative change in h*el0 before pjac is called. -c h = the step size to be attempted on the next step. -c h is altered by the error control algorithm during the -c problem. h can be either positive or negative, but its -c sign must remain constant throughout the problem. -c hmin = the minimum absolute value of the step size h to be used. -c hmxi = inverse of the maximum absolute value of h to be used. -c hmxi = 0.0 is allowed and corresponds to an infinite hmax. -c hmin and hmxi may be changed at any time, but will not -c take effect until the next change of h is considered. -c tn = the independent variable. tn is updated on each step taken. -c jstart = an integer used for input only, with the following -c values and meanings.. -c 0 perform the first step. -c .gt.0 take a new step continuing from the last. -c -1 take the next step with a new value of h, -c n, meth, miter, and/or matrix parameters. -c -2 take the next step with a new value of h, -c but with other inputs unchanged. -c on return, jstart is set to 1 to facilitate continuation. -c kflag = a completion code with the following meanings.. -c 0 the step was succesful. -c -1 the requested error could not be achieved. -c -2 corrector convergence could not be achieved. -c -3 fatal error in pjac or slvs. -c a return with kflag = -1 or -2 means either -c abs(h) = hmin or 10 consecutive failures occurred. -c on a return with kflag negative, the values of tn and -c the yh array are as of the beginning of the last -c step, and h is the last step size attempted. -c maxord = the maximum order of integration method to be allowed. -c maxcor = the maximum number of corrector iterations allowed. -c msbp = maximum number of steps between pjac calls (miter .gt. 0). -c mxncf = maximum number of convergence failures allowed. -c meth = current method. -c meth = 1 means adams method (nonstiff) -c meth = 2 means bdf method (stiff) -c meth may be reset by stoda. -c miter = corrector iteration method. -c miter = 0 means functional iteration. -c miter = jt .gt. 0 means a chord iteration corresponding -c to jacobian type jt. (the lsoda argument jt is -c communicated here as jtyp, but is not used in stoda -c except to load miter following a method switch.) -c miter may be reset by stoda. -c n = the number of first-order differential equations. -c----------------------------------------------------------------------- - kflag = 0 - told = tn - ncf = 0 - ierpj = 0 - iersl = 0 - jcur = 0 - icf = 0 - delp = 0.0d0 - if (jstart .gt. 0) go to 200 - if (jstart .eq. -1) go to 100 - if (jstart .eq. -2) go to 160 -c----------------------------------------------------------------------- -c on the first call, the order is set to 1, and other variables are -c initialized. rmax is the maximum ratio by which h can be increased -c in a single step. it is initially 1.e4 to compensate for the small -c initial h, but then is normally equal to 10. if a failure -c occurs (in corrector convergence or error test), rmax is set at 2 -c for the next increase. -c cfode is called to get the needed coefficients for both methods. -c----------------------------------------------------------------------- - lmax = maxord + 1 - nq = 1 - l = 2 - ialth = 2 - rmax = 10000.0d0 - rc = 0.0d0 - el0 = 1.0d0 - crate = 0.7d0 - hold = h - nslp = 0 - ipup = miter - iret = 3 -c initialize switching parameters. meth = 1 is assumed initially. ----- - icount = 20 - irflag = 0 - pdest = 0.0d0 - pdlast = 0.0d0 - ratio = 5.0d0 - call cfode (2, elco, tesco) - do 10 i = 1,5 - 10 cm2(i) = tesco(2,i)*elco(i+1,i) - call cfode (1, elco, tesco) - do 20 i = 1,12 - 20 cm1(i) = tesco(2,i)*elco(i+1,i) - go to 150 -c----------------------------------------------------------------------- -c the following block handles preliminaries needed when jstart = -1. -c ipup is set to miter to force a matrix update. -c if an order increase is about to be considered (ialth = 1), -c ialth is reset to 2 to postpone consideration one more step. -c if the caller has changed meth, cfode is called to reset -c the coefficients of the method. -c if h is to be changed, yh must be rescaled. -c if h or meth is being changed, ialth is reset to l = nq + 1 -c to prevent further changes in h for that many steps. -c----------------------------------------------------------------------- - 100 ipup = miter - lmax = maxord + 1 - if (ialth .eq. 1) ialth = 2 - if (meth .eq. mused) go to 160 - call cfode (meth, elco, tesco) - ialth = l - iret = 1 -c----------------------------------------------------------------------- -c the el vector and related constants are reset -c whenever the order nq is changed, or at the start of the problem. -c----------------------------------------------------------------------- - 150 do 155 i = 1,l - 155 el(i) = elco(i,nq) - nqnyh = nq*nyh - rc = rc*el(1)/el0 - el0 = el(1) - conit = 0.5d0/dfloat(nq+2) - go to (160, 170, 200), iret -c----------------------------------------------------------------------- -c if h is being changed, the h ratio rh is checked against -c rmax, hmin, and hmxi, and the yh array rescaled. ialth is set to -c l = nq + 1 to prevent a change of h for that many steps, unless -c forced by a convergence or error test failure. -c----------------------------------------------------------------------- - 160 if (h .eq. hold) go to 200 - rh = h/hold - h = hold - iredo = 3 - go to 175 - 170 rh = dmax1(rh,hmin/dabs(h)) - 175 rh = dmin1(rh,rmax) - rh = rh/dmax1(1.0d0,dabs(h)*hmxi*rh) -c----------------------------------------------------------------------- -c if meth = 1, also restrict the new step size by the stability region. -c if this reduces h, set irflag to 1 so that if there are roundoff -c problems later, we can assume that is the cause of the trouble. -c----------------------------------------------------------------------- - if (meth .eq. 2) go to 178 - irflag = 0 - pdh = dmax1(dabs(h)*pdlast,0.000001d0) - if (rh*pdh*1.00001d0 .lt. sm1(nq)) go to 178 - rh = sm1(nq)/pdh - irflag = 1 - 178 continue - r = 1.0d0 - do 180 j = 2,l - r = r*rh - do 180 i = 1,n - 180 yh(i,j) = yh(i,j)*r - h = h*rh - rc = rc*rh - ialth = l - if (iredo .eq. 0) go to 690 -c----------------------------------------------------------------------- -c this section computes the predicted values by effectively -c multiplying the yh array by the pascal triangle matrix. -c rc is the ratio of new to old values of the coefficient h*el(1). -c when rc differs from 1 by more than ccmax, ipup is set to miter -c to force pjac to be called, if a jacobian is involved. -c in any case, pjac is called at least every msbp steps. -c----------------------------------------------------------------------- - 200 if (dabs(rc-1.0d0) .gt. ccmax) ipup = miter - if (nst .ge. nslp+msbp) ipup = miter - tn = tn + h - i1 = nqnyh + 1 - do 215 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 210 i = i1,nqnyh - 210 yh1(i) = yh1(i) + yh1(i+nyh) - 215 continue - pnorm = vmnorm (n, yh1, ewt) -c----------------------------------------------------------------------- -c up to maxcor corrector iterations are taken. a convergence test is -c made on the r.m.s. norm of each correction, weighted by the error -c weight vector ewt. the sum of the corrections is accumulated in the -c vector acor(i). the yh array is not altered in the corrector loop. -c----------------------------------------------------------------------- - 220 m = 0 - rate = 0.0d0 - del = 0.0d0 - do 230 i = 1,n - 230 y(i) = yh(i,1) - call srcma (rsav, isav, 1) - call f (neq, tn, y, savf) - call srcma (rsav, isav, 2) - nfe = nfe + 1 - if (ipup .le. 0) go to 250 -c----------------------------------------------------------------------- -c if indicated, the matrix p = i - h*el(1)*j is reevaluated and -c preprocessed before starting the corrector iteration. ipup is set -c to 0 as an indicator that this has been done. -c----------------------------------------------------------------------- - call pjac (neq, y, yh, nyh, ewt, acor, savf, wm, iwm, f, jac) - ipup = 0 - rc = 1.0d0 - nslp = nst - crate = 0.7d0 - if (ierpj .ne. 0) go to 430 - 250 do 260 i = 1,n - 260 acor(i) = 0.0d0 - 270 if (miter .ne. 0) go to 350 -c----------------------------------------------------------------------- -c in the case of functional iteration, update y directly from -c the result of the last function evaluation. -c----------------------------------------------------------------------- - do 290 i = 1,n - savf(i) = h*savf(i) - yh(i,2) - 290 y(i) = savf(i) - acor(i) - del = vmnorm (n, y, ewt) - do 300 i = 1,n - y(i) = yh(i,1) + el(1)*savf(i) - 300 acor(i) = savf(i) - go to 400 -c----------------------------------------------------------------------- -c in the case of the chord method, compute the corrector error, -c and solve the linear system with that as right-hand side and -c p as coefficient matrix. -c----------------------------------------------------------------------- - 350 do 360 i = 1,n - 360 y(i) = h*savf(i) - (yh(i,2) + acor(i)) - call slvs (wm, iwm, y, savf) - if (iersl .lt. 0) go to 430 - if (iersl .gt. 0) go to 410 - del = vmnorm (n, y, ewt) - do 380 i = 1,n - acor(i) = acor(i) + y(i) - 380 y(i) = yh(i,1) + el(1)*acor(i) -c----------------------------------------------------------------------- -c test for convergence. if m.gt.0, an estimate of the convergence -c rate constant is stored in crate, and this is used in the test. -c -c we first check for a change of iterates that is the size of -c roundoff error. if this occurs, the iteration has converged, and a -c new rate estimate is not formed. -c in all other cases, force at least two iterations to estimate a -c local lipschitz constant estimate for adams methods. -c on convergence, form pdest = local maximum lipschitz constant -c estimate. pdlast is the most recent nonzero estimate. -c----------------------------------------------------------------------- - 400 continue - if (del .le. 100.0d0*pnorm*uround) go to 450 - if (m .eq. 0 .and. meth .eq. 1) go to 405 - if (m .eq. 0) go to 402 - rm = 1024.0d0 - if (del .le. 1024.0d0*delp) rm = del/delp - rate = dmax1(rate,rm) - crate = dmax1(0.2d0*crate,rm) - 402 dcon = del*dmin1(1.0d0,1.5d0*crate)/(tesco(2,nq)*conit) - if (dcon .gt. 1.0d0) go to 405 - pdest = dmax1(pdest,rate/dabs(h*el(1))) - if (pdest .ne. 0.0d0) pdlast = pdest - go to 450 - 405 continue - m = m + 1 - if (m .eq. maxcor) go to 410 - if (m .ge. 2 .and. del .gt. 2.0d0*delp) go to 410 - delp = del - call srcma (rsav, isav, 1) - call f (neq, tn, y, savf) - call srcma (rsav, isav, 2) - nfe = nfe + 1 - go to 270 -c----------------------------------------------------------------------- -c the corrector iteration failed to converge. -c if miter .ne. 0 and the jacobian is out of date, pjac is called for -c the next try. otherwise the yh array is retracted to its values -c before prediction, and h is reduced, if possible. if h cannot be -c reduced or mxncf failures have occurred, exit with kflag = -2. -c----------------------------------------------------------------------- - 410 if (miter .eq. 0 .or. jcur .eq. 1) go to 430 - icf = 1 - ipup = miter - go to 220 - 430 icf = 2 - ncf = ncf + 1 - rmax = 2.0d0 - tn = told - i1 = nqnyh + 1 - do 445 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 440 i = i1,nqnyh - 440 yh1(i) = yh1(i) - yh1(i+nyh) - 445 continue - if (ierpj .lt. 0 .or. iersl .lt. 0) go to 680 - if (dabs(h) .le. hmin*1.00001d0) go to 670 - if (ncf .eq. mxncf) go to 670 - rh = 0.25d0 - ipup = miter - iredo = 1 - go to 170 -c----------------------------------------------------------------------- -c the corrector has converged. jcur is set to 0 -c to signal that the jacobian involved may need updating later. -c the local error test is made and control passes to statement 500 -c if it fails. -c----------------------------------------------------------------------- - 450 jcur = 0 - if (m .eq. 0) dsm = del/tesco(2,nq) - if (m .gt. 0) dsm = vmnorm (n, acor, ewt)/tesco(2,nq) - if (dsm .gt. 1.0d0) go to 500 -c----------------------------------------------------------------------- -c after a successful step, update the yh array. -c decrease icount by 1, and if it is -1, consider switching methods. -c if a method switch is made, reset various parameters, -c rescale the yh array, and exit. if there is no switch, -c consider changing h if ialth = 1. otherwise decrease ialth by 1. -c if ialth is then 1 and nq .lt. maxord, then acor is saved for -c use in a possible order increase on the next step. -c if a change in h is considered, an increase or decrease in order -c by one is considered also. a change in h is made only if it is by a -c factor of at least 1.1. if not, ialth is set to 3 to prevent -c testing for that many steps. -c----------------------------------------------------------------------- - kflag = 0 - iredo = 0 - nst = nst + 1 - hu = h - nqu = nq - mused = meth - do 460 j = 1,l - do 460 i = 1,n - 460 yh(i,j) = yh(i,j) + el(j)*acor(i) - icount = icount - 1 - if (icount .ge. 0) go to 488 - if (meth .eq. 2) go to 480 -c----------------------------------------------------------------------- -c we are currently using an adams method. consider switching to bdf. -c if the current order is greater than 5, assume the problem is -c not stiff, and skip this section. -c if the lipschitz constant and error estimate are not polluted -c by roundoff, go to 470 and perform the usual test. -c otherwise, switch to the bdf methods if the last step was -c restricted to insure stability (irflag = 1), and stay with adams -c method if not. when switching to bdf with polluted error estimates, -c in the absence of other information, double the step size. -c -c when the estimates are ok, we make the usual test by computing -c the step size we could have (ideally) used on this step, -c with the current (adams) method, and also that for the bdf. -c if nq .gt. mxords, we consider changing to order mxords on switching. -c compare the two step sizes to decide whether to switch. -c the step size advantage must be at least ratio = 5 to switch. -c----------------------------------------------------------------------- - if (nq .gt. 5) go to 488 - if (dsm .gt. 100.0d0*pnorm*uround .and. pdest .ne. 0.0d0) - 1 go to 470 - if (irflag .eq. 0) go to 488 - rh2 = 2.0d0 - nqm2 = min0(nq,mxords) - go to 478 - 470 continue - exsm = 1.0d0/dfloat(l) - rh1 = 1.0d0/(1.2d0*dsm**exsm + 0.0000012d0) - rh1it = 2.0d0*rh1 - pdh = pdlast*dabs(h) - if (pdh*rh1 .gt. 0.00001d0) rh1it = sm1(nq)/pdh - rh1 = dmin1(rh1,rh1it) - if (nq .le. mxords) go to 474 - nqm2 = mxords - lm2 = mxords + 1 - exm2 = 1.0d0/dfloat(lm2) - lm2p1 = lm2 + 1 - dm2 = vmnorm (n, yh(1,lm2p1), ewt)/cm2(mxords) - rh2 = 1.0d0/(1.2d0*dm2**exm2 + 0.0000012d0) - go to 476 - 474 dm2 = dsm*(cm1(nq)/cm2(nq)) - rh2 = 1.0d0/(1.2d0*dm2**exsm + 0.0000012d0) - nqm2 = nq - 476 continue - if (rh2 .lt. ratio*rh1) go to 488 -c the switch test passed. reset relevant quantities for bdf. ---------- - 478 rh = rh2 - icount = 20 - meth = 2 - miter = jtyp - pdlast = 0.0d0 - nq = nqm2 - l = nq + 1 - go to 170 -c----------------------------------------------------------------------- -c we are currently using a bdf method. consider switching to adams. -c compute the step size we could have (ideally) used on this step, -c with the current (bdf) method, and also that for the adams. -c if nq .gt. mxordn, we consider changing to order mxordn on switching. -c compare the two step sizes to decide whether to switch. -c the step size advantage must be at least 5/ratio = 1 to switch. -c if the step size for adams would be so small as to cause -c roundoff pollution, we stay with bdf. -c----------------------------------------------------------------------- - 480 continue - exsm = 1.0d0/dfloat(l) - if (mxordn .ge. nq) go to 484 - nqm1 = mxordn - lm1 = mxordn + 1 - exm1 = 1.0d0/dfloat(lm1) - lm1p1 = lm1 + 1 - dm1 = vmnorm (n, yh(1,lm1p1), ewt)/cm1(mxordn) - rh1 = 1.0d0/(1.2d0*dm1**exm1 + 0.0000012d0) - go to 486 - 484 dm1 = dsm*(cm2(nq)/cm1(nq)) - rh1 = 1.0d0/(1.2d0*dm1**exsm + 0.0000012d0) - nqm1 = nq - exm1 = exsm - 486 rh1it = 2.0d0*rh1 - pdh = pdnorm*dabs(h) - if (pdh*rh1 .gt. 0.00001d0) rh1it = sm1(nqm1)/pdh - rh1 = dmin1(rh1,rh1it) - rh2 = 1.0d0/(1.2d0*dsm**exsm + 0.0000012d0) - if (rh1*ratio .lt. 5.0d0*rh2) go to 488 - alpha = dmax1(0.001d0,rh1) - dm1 = (alpha**exm1)*dm1 - if (dm1 .le. 1000.0d0*uround*pnorm) go to 488 -c the switch test passed. reset relevant quantities for adams. -------- - rh = rh1 - icount = 20 - meth = 1 - miter = 0 - pdlast = 0.0d0 - nq = nqm1 - l = nq + 1 - go to 170 -c -c no method switch is being made. do the usual step/order selection. -- - 488 continue - ialth = ialth - 1 - if (ialth .eq. 0) go to 520 - if (ialth .gt. 1) go to 700 - if (l .eq. lmax) go to 700 - do 490 i = 1,n - 490 yh(i,lmax) = acor(i) - go to 700 -c----------------------------------------------------------------------- -c the error test failed. kflag keeps track of multiple failures. -c restore tn and the yh array to their previous values, and prepare -c to try the step again. compute the optimum step size for this or -c one lower order. after 2 or more failures, h is forced to decrease -c by a factor of 0.2 or less. -c----------------------------------------------------------------------- - 500 kflag = kflag - 1 - tn = told - i1 = nqnyh + 1 - do 515 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 510 i = i1,nqnyh - 510 yh1(i) = yh1(i) - yh1(i+nyh) - 515 continue - rmax = 2.0d0 - if (dabs(h) .le. hmin*1.00001d0) go to 660 - if (kflag .le. -3) go to 640 - iredo = 2 - rhup = 0.0d0 - go to 540 -c----------------------------------------------------------------------- -c regardless of the success or failure of the step, factors -c rhdn, rhsm, and rhup are computed, by which h could be multiplied -c at order nq - 1, order nq, or order nq + 1, respectively. -c in the case of failure, rhup = 0.0 to avoid an order increase. -c the largest of these is determined and the new order chosen -c accordingly. if the order is to be increased, we compute one -c additional scaled derivative. -c----------------------------------------------------------------------- - 520 rhup = 0.0d0 - if (l .eq. lmax) go to 540 - do 530 i = 1,n - 530 savf(i) = acor(i) - yh(i,lmax) - dup = vmnorm (n, savf, ewt)/tesco(3,nq) - exup = 1.0d0/dfloat(l+1) - rhup = 1.0d0/(1.4d0*dup**exup + 0.0000014d0) - 540 exsm = 1.0d0/dfloat(l) - rhsm = 1.0d0/(1.2d0*dsm**exsm + 0.0000012d0) - rhdn = 0.0d0 - if (nq .eq. 1) go to 550 - ddn = vmnorm (n, yh(1,l), ewt)/tesco(1,nq) - exdn = 1.0d0/dfloat(nq) - rhdn = 1.0d0/(1.3d0*ddn**exdn + 0.0000013d0) -c if meth = 1, limit rh according to the stability region also. -------- - 550 if (meth .eq. 2) go to 560 - pdh = dmax1(dabs(h)*pdlast,0.000001d0) - if (l .lt. lmax) rhup = dmin1(rhup,sm1(l)/pdh) - rhsm = dmin1(rhsm,sm1(nq)/pdh) - if (nq .gt. 1) rhdn = dmin1(rhdn,sm1(nq-1)/pdh) - pdest = 0.0d0 - 560 if (rhsm .ge. rhup) go to 570 - if (rhup .gt. rhdn) go to 590 - go to 580 - 570 if (rhsm .lt. rhdn) go to 580 - newq = nq - rh = rhsm - go to 620 - 580 newq = nq - 1 - rh = rhdn - if (kflag .lt. 0 .and. rh .gt. 1.0d0) rh = 1.0d0 - go to 620 - 590 newq = l - rh = rhup - if (rh .lt. 1.1d0) go to 610 - r = el(l)/dfloat(l) - do 600 i = 1,n - 600 yh(i,newq+1) = acor(i)*r - go to 630 - 610 ialth = 3 - go to 700 -c if meth = 1 and h is restricted by stability, bypass 10 percent test. - 620 if (meth .eq. 2) go to 622 - if (rh*pdh*1.00001d0 .ge. sm1(newq)) go to 625 - 622 if (kflag .eq. 0 .and. rh .lt. 1.1d0) go to 610 - 625 if (kflag .le. -2) rh = dmin1(rh,0.2d0) -c----------------------------------------------------------------------- -c if there is a change of order, reset nq, l, and the coefficients. -c in any case h is reset according to rh and the yh array is rescaled. -c then exit from 690 if the step was ok, or redo the step otherwise. -c----------------------------------------------------------------------- - if (newq .eq. nq) go to 170 - 630 nq = newq - l = nq + 1 - iret = 2 - go to 150 -c----------------------------------------------------------------------- -c control reaches this section if 3 or more failures have occured. -c if 10 failures have occurred, exit with kflag = -1. -c it is assumed that the derivatives that have accumulated in the -c yh array have errors of the wrong order. hence the first -c derivative is recomputed, and the order is set to 1. then -c h is reduced by a factor of 10, and the step is retried, -c until it succeeds or h reaches hmin. -c----------------------------------------------------------------------- - 640 if (kflag .eq. -10) go to 660 - rh = 0.1d0 - rh = dmax1(hmin/dabs(h),rh) - h = h*rh - do 645 i = 1,n - 645 y(i) = yh(i,1) - call srcma (rsav, isav, 1) - call f (neq, tn, y, savf) - call srcma (rsav, isav, 2) - nfe = nfe + 1 - do 650 i = 1,n - 650 yh(i,2) = h*savf(i) - ipup = miter - ialth = 5 - if (nq .eq. 1) go to 200 - nq = 1 - l = 2 - iret = 3 - go to 150 -c----------------------------------------------------------------------- -c all returns are made through this section. h is saved in hold -c to allow the caller to change h on the next step. -c----------------------------------------------------------------------- - 660 kflag = -1 - go to 720 - 670 kflag = -2 - go to 720 - 680 kflag = -3 - go to 720 - 690 rmax = 10.0d0 - 700 r = 1.0d0/tesco(2,nqu) - do 710 i = 1,n - 710 acor(i) = acor(i)*r - 720 hold = h - jstart = 1 - return -c----------------------- end of subroutine stoda ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/stode.f b/scipy-0.10.1/scipy/integrate/odepack/stode.f deleted file mode 100644 index 75398819eb..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/stode.f +++ /dev/null @@ -1,473 +0,0 @@ - subroutine stode (neq, y, yh, nyh, yh1, ewt, savf, acor, - 1 wm, iwm, f, jac, pjac, slvs) -clll. optimize - external f, jac, pjac, slvs - integer neq, nyh, iwm - integer iownd, ialth, ipup, lmax, meo, nqnyh, nslp, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu - integer i, i1, iredo, iret, j, jb, m, ncf, newq - double precision y, yh, yh1, ewt, savf, acor, wm - double precision conit, crate, el, elco, hold, rmax, tesco, - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision dcon, ddn, del, delp, dsm, dup, exdn, exsm, exup, - 1 r, rh, rhdn, rhsm, rhup, told, vnorm - dimension neq(1), y(1), yh(nyh,*), yh1(1), ewt(1), savf(1), - 1 acor(1), wm(*), iwm(*) - common /ls0001/ conit, crate, el(13), elco(13,12), - 1 hold, rmax, tesco(3,12), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, iownd(14), - 3 ialth, ipup, lmax, meo, nqnyh, nslp, - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nfe, nje, nqu -c----------------------------------------------------------------------- -c stode performs one step of the integration of an initial value -c problem for a system of ordinary differential equations. -c note.. stode is independent of the value of the iteration method -c indicator miter, when this is .ne. 0, and hence is independent -c of the type of chord method used, or the jacobian structure. -c communication with stode is done with the following variables.. -c -c neq = integer array containing problem size in neq(1), and -c passed as the neq argument in all calls to f and jac. -c y = an array of length .ge. n used as the y argument in -c all calls to f and jac. -c yh = an nyh by lmax array containing the dependent variables -c and their approximate scaled derivatives, where -c lmax = maxord + 1. yh(i,j+1) contains the approximate -c j-th derivative of y(i), scaled by h**j/factorial(j) -c (j = 0,1,...,nq). on entry for the first step, the first -c two columns of yh must be set from the initial values. -c nyh = a constant integer .ge. n, the first dimension of yh. -c yh1 = a one-dimensional array occupying the same space as yh. -c ewt = an array of length n containing multiplicative weights -c for local error measurements. local errors in y(i) are -c compared to 1.0/ewt(i) in various error tests. -c savf = an array of working storage, of length n. -c also used for input of yh(*,maxord+2) when jstart = -1 -c and maxord .lt. the current order nq. -c acor = a work array of length n, used for the accumulated -c corrections. on a successful return, acor(i) contains -c the estimated one-step local error in y(i). -c wm,iwm = real and integer work arrays associated with matrix -c operations in chord iteration (miter .ne. 0). -c pjac = name of routine to evaluate and preprocess jacobian matrix -c and p = i - h*el0*jac, if a chord method is being used. -c slvs = name of routine to solve linear system in chord iteration. -c ccmax = maximum relative change in h*el0 before pjac is called. -c h = the step size to be attempted on the next step. -c h is altered by the error control algorithm during the -c problem. h can be either positive or negative, but its -c sign must remain constant throughout the problem. -c hmin = the minimum absolute value of the step size h to be used. -c hmxi = inverse of the maximum absolute value of h to be used. -c hmxi = 0.0 is allowed and corresponds to an infinite hmax. -c hmin and hmxi may be changed at any time, but will not -c take effect until the next change of h is considered. -c tn = the independent variable. tn is updated on each step taken. -c jstart = an integer used for input only, with the following -c values and meanings.. -c 0 perform the first step. -c .gt.0 take a new step continuing from the last. -c -1 take the next step with a new value of h, maxord, -c n, meth, miter, and/or matrix parameters. -c -2 take the next step with a new value of h, -c but with other inputs unchanged. -c on return, jstart is set to 1 to facilitate continuation. -c kflag = a completion code with the following meanings.. -c 0 the step was succesful. -c -1 the requested error could not be achieved. -c -2 corrector convergence could not be achieved. -c -3 fatal error in pjac or slvs. -c a return with kflag = -1 or -2 means either -c abs(h) = hmin or 10 consecutive failures occurred. -c on a return with kflag negative, the values of tn and -c the yh array are as of the beginning of the last -c step, and h is the last step size attempted. -c maxord = the maximum order of integration method to be allowed. -c maxcor = the maximum number of corrector iterations allowed. -c msbp = maximum number of steps between pjac calls (miter .gt. 0). -c mxncf = maximum number of convergence failures allowed. -c meth/miter = the method flags. see description in driver. -c n = the number of first-order differential equations. -c----------------------------------------------------------------------- - kflag = 0 - told = tn - ncf = 0 - ierpj = 0 - iersl = 0 - jcur = 0 - icf = 0 - delp = 0.0d0 - if (jstart .gt. 0) go to 200 - if (jstart .eq. -1) go to 100 - if (jstart .eq. -2) go to 160 -c----------------------------------------------------------------------- -c on the first call, the order is set to 1, and other variables are -c initialized. rmax is the maximum ratio by which h can be increased -c in a single step. it is initially 1.e4 to compensate for the small -c initial h, but then is normally equal to 10. if a failure -c occurs (in corrector convergence or error test), rmax is set at 2 -c for the next increase. -c----------------------------------------------------------------------- - lmax = maxord + 1 - nq = 1 - l = 2 - ialth = 2 - rmax = 10000.0d0 - rc = 0.0d0 - el0 = 1.0d0 - crate = 0.7d0 - hold = h - meo = meth - nslp = 0 - ipup = miter - iret = 3 - go to 140 -c----------------------------------------------------------------------- -c the following block handles preliminaries needed when jstart = -1. -c ipup is set to miter to force a matrix update. -c if an order increase is about to be considered (ialth = 1), -c ialth is reset to 2 to postpone consideration one more step. -c if the caller has changed meth, cfode is called to reset -c the coefficients of the method. -c if the caller has changed maxord to a value less than the current -c order nq, nq is reduced to maxord, and a new h chosen accordingly. -c if h is to be changed, yh must be rescaled. -c if h or meth is being changed, ialth is reset to l = nq + 1 -c to prevent further changes in h for that many steps. -c----------------------------------------------------------------------- - 100 ipup = miter - lmax = maxord + 1 - if (ialth .eq. 1) ialth = 2 - if (meth .eq. meo) go to 110 - call cfode (meth, elco, tesco) - meo = meth - if (nq .gt. maxord) go to 120 - ialth = l - iret = 1 - go to 150 - 110 if (nq .le. maxord) go to 160 - 120 nq = maxord - l = lmax - do 125 i = 1,l - 125 el(i) = elco(i,nq) - nqnyh = nq*nyh - rc = rc*el(1)/el0 - el0 = el(1) - conit = 0.5d0/dfloat(nq+2) - ddn = vnorm (n, savf, ewt)/tesco(1,l) - exdn = 1.0d0/dfloat(l) - rhdn = 1.0d0/(1.3d0*ddn**exdn + 0.0000013d0) - rh = dmin1(rhdn,1.0d0) - iredo = 3 - if (h .eq. hold) go to 170 - rh = dmin1(rh,dabs(h/hold)) - h = hold - go to 175 -c----------------------------------------------------------------------- -c cfode is called to get all the integration coefficients for the -c current meth. then the el vector and related constants are reset -c whenever the order nq is changed, or at the start of the problem. -c----------------------------------------------------------------------- - 140 call cfode (meth, elco, tesco) - 150 do 155 i = 1,l - 155 el(i) = elco(i,nq) - nqnyh = nq*nyh - rc = rc*el(1)/el0 - el0 = el(1) - conit = 0.5d0/dfloat(nq+2) - go to (160, 170, 200), iret -c----------------------------------------------------------------------- -c if h is being changed, the h ratio rh is checked against -c rmax, hmin, and hmxi, and the yh array rescaled. ialth is set to -c l = nq + 1 to prevent a change of h for that many steps, unless -c forced by a convergence or error test failure. -c----------------------------------------------------------------------- - 160 if (h .eq. hold) go to 200 - rh = h/hold - h = hold - iredo = 3 - go to 175 - 170 rh = dmax1(rh,hmin/dabs(h)) - 175 rh = dmin1(rh,rmax) - rh = rh/dmax1(1.0d0,dabs(h)*hmxi*rh) - r = 1.0d0 - do 180 j = 2,l - r = r*rh - do 180 i = 1,n - 180 yh(i,j) = yh(i,j)*r - h = h*rh - rc = rc*rh - ialth = l - if (iredo .eq. 0) go to 690 -c----------------------------------------------------------------------- -c this section computes the predicted values by effectively -c multiplying the yh array by the pascal triangle matrix. -c rc is the ratio of new to old values of the coefficient h*el(1). -c when rc differs from 1 by more than ccmax, ipup is set to miter -c to force pjac to be called, if a jacobian is involved. -c in any case, pjac is called at least every msbp steps. -c----------------------------------------------------------------------- - 200 if (dabs(rc-1.0d0) .gt. ccmax) ipup = miter - if (nst .ge. nslp+msbp) ipup = miter - tn = tn + h - i1 = nqnyh + 1 - do 215 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 210 i = i1,nqnyh - 210 yh1(i) = yh1(i) + yh1(i+nyh) - 215 continue -c----------------------------------------------------------------------- -c up to maxcor corrector iterations are taken. a convergence test is -c made on the r.m.s. norm of each correction, weighted by the error -c weight vector ewt. the sum of the corrections is accumulated in the -c vector acor(i). the yh array is not altered in the corrector loop. -c----------------------------------------------------------------------- - 220 m = 0 - do 230 i = 1,n - 230 y(i) = yh(i,1) - call f (neq, tn, y, savf) - nfe = nfe + 1 - if (ipup .le. 0) go to 250 -c----------------------------------------------------------------------- -c if indicated, the matrix p = i - h*el(1)*j is reevaluated and -c preprocessed before starting the corrector iteration. ipup is set -c to 0 as an indicator that this has been done. -c----------------------------------------------------------------------- - call pjac (neq, y, yh, nyh, ewt, acor, savf, wm, iwm, f, jac) - ipup = 0 - rc = 1.0d0 - nslp = nst - crate = 0.7d0 - if (ierpj .ne. 0) go to 430 - 250 do 260 i = 1,n - 260 acor(i) = 0.0d0 - 270 if (miter .ne. 0) go to 350 -c----------------------------------------------------------------------- -c in the case of functional iteration, update y directly from -c the result of the last function evaluation. -c----------------------------------------------------------------------- - do 290 i = 1,n - savf(i) = h*savf(i) - yh(i,2) - 290 y(i) = savf(i) - acor(i) - del = vnorm (n, y, ewt) - do 300 i = 1,n - y(i) = yh(i,1) + el(1)*savf(i) - 300 acor(i) = savf(i) - go to 400 -c----------------------------------------------------------------------- -c in the case of the chord method, compute the corrector error, -c and solve the linear system with that as right-hand side and -c p as coefficient matrix. -c----------------------------------------------------------------------- - 350 do 360 i = 1,n - 360 y(i) = h*savf(i) - (yh(i,2) + acor(i)) - call slvs (wm, iwm, y, savf) - if (iersl .lt. 0) go to 430 - if (iersl .gt. 0) go to 410 - del = vnorm (n, y, ewt) - do 380 i = 1,n - acor(i) = acor(i) + y(i) - 380 y(i) = yh(i,1) + el(1)*acor(i) -c----------------------------------------------------------------------- -c test for convergence. if m.gt.0, an estimate of the convergence -c rate constant is stored in crate, and this is used in the test. -c----------------------------------------------------------------------- - 400 if (m .ne. 0) crate = dmax1(0.2d0*crate,del/delp) - dcon = del*dmin1(1.0d0,1.5d0*crate)/(tesco(2,nq)*conit) - if (dcon .le. 1.0d0) go to 450 - m = m + 1 - if (m .eq. maxcor) go to 410 - if (m .ge. 2 .and. del .gt. 2.0d0*delp) go to 410 - delp = del - call f (neq, tn, y, savf) - nfe = nfe + 1 - go to 270 -c----------------------------------------------------------------------- -c the corrector iteration failed to converge. -c if miter .ne. 0 and the jacobian is out of date, pjac is called for -c the next try. otherwise the yh array is retracted to its values -c before prediction, and h is reduced, if possible. if h cannot be -c reduced or mxncf failures have occurred, exit with kflag = -2. -c----------------------------------------------------------------------- - 410 if (miter .eq. 0 .or. jcur .eq. 1) go to 430 - icf = 1 - ipup = miter - go to 220 - 430 icf = 2 - ncf = ncf + 1 - rmax = 2.0d0 - tn = told - i1 = nqnyh + 1 - do 445 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 440 i = i1,nqnyh - 440 yh1(i) = yh1(i) - yh1(i+nyh) - 445 continue - if (ierpj .lt. 0 .or. iersl .lt. 0) go to 680 - if (dabs(h) .le. hmin*1.00001d0) go to 670 - if (ncf .eq. mxncf) go to 670 - rh = 0.25d0 - ipup = miter - iredo = 1 - go to 170 -c----------------------------------------------------------------------- -c the corrector has converged. jcur is set to 0 -c to signal that the jacobian involved may need updating later. -c the local error test is made and control passes to statement 500 -c if it fails. -c----------------------------------------------------------------------- - 450 jcur = 0 - if (m .eq. 0) dsm = del/tesco(2,nq) - if (m .gt. 0) dsm = vnorm (n, acor, ewt)/tesco(2,nq) - if (dsm .gt. 1.0d0) go to 500 -c----------------------------------------------------------------------- -c after a successful step, update the yh array. -c consider changing h if ialth = 1. otherwise decrease ialth by 1. -c if ialth is then 1 and nq .lt. maxord, then acor is saved for -c use in a possible order increase on the next step. -c if a change in h is considered, an increase or decrease in order -c by one is considered also. a change in h is made only if it is by a -c factor of at least 1.1. if not, ialth is set to 3 to prevent -c testing for that many steps. -c----------------------------------------------------------------------- - kflag = 0 - iredo = 0 - nst = nst + 1 - hu = h - nqu = nq - do 470 j = 1,l - do 470 i = 1,n - 470 yh(i,j) = yh(i,j) + el(j)*acor(i) - ialth = ialth - 1 - if (ialth .eq. 0) go to 520 - if (ialth .gt. 1) go to 700 - if (l .eq. lmax) go to 700 - do 490 i = 1,n - 490 yh(i,lmax) = acor(i) - go to 700 -c----------------------------------------------------------------------- -c the error test failed. kflag keeps track of multiple failures. -c restore tn and the yh array to their previous values, and prepare -c to try the step again. compute the optimum step size for this or -c one lower order. after 2 or more failures, h is forced to decrease -c by a factor of 0.2 or less. -c----------------------------------------------------------------------- - 500 kflag = kflag - 1 - tn = told - i1 = nqnyh + 1 - do 515 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 510 i = i1,nqnyh - 510 yh1(i) = yh1(i) - yh1(i+nyh) - 515 continue - rmax = 2.0d0 - if (dabs(h) .le. hmin*1.00001d0) go to 660 - if (kflag .le. -3) go to 640 - iredo = 2 - rhup = 0.0d0 - go to 540 -c----------------------------------------------------------------------- -c regardless of the success or failure of the step, factors -c rhdn, rhsm, and rhup are computed, by which h could be multiplied -c at order nq - 1, order nq, or order nq + 1, respectively. -c in the case of failure, rhup = 0.0 to avoid an order increase. -c the largest of these is determined and the new order chosen -c accordingly. if the order is to be increased, we compute one -c additional scaled derivative. -c----------------------------------------------------------------------- - 520 rhup = 0.0d0 - if (l .eq. lmax) go to 540 - do 530 i = 1,n - 530 savf(i) = acor(i) - yh(i,lmax) - dup = vnorm (n, savf, ewt)/tesco(3,nq) - exup = 1.0d0/dfloat(l+1) - rhup = 1.0d0/(1.4d0*dup**exup + 0.0000014d0) - 540 exsm = 1.0d0/dfloat(l) - rhsm = 1.0d0/(1.2d0*dsm**exsm + 0.0000012d0) - rhdn = 0.0d0 - if (nq .eq. 1) go to 560 - ddn = vnorm (n, yh(1,l), ewt)/tesco(1,nq) - exdn = 1.0d0/dfloat(nq) - rhdn = 1.0d0/(1.3d0*ddn**exdn + 0.0000013d0) - 560 if (rhsm .ge. rhup) go to 570 - if (rhup .gt. rhdn) go to 590 - go to 580 - 570 if (rhsm .lt. rhdn) go to 580 - newq = nq - rh = rhsm - go to 620 - 580 newq = nq - 1 - rh = rhdn - if (kflag .lt. 0 .and. rh .gt. 1.0d0) rh = 1.0d0 - go to 620 - 590 newq = l - rh = rhup - if (rh .lt. 1.1d0) go to 610 - r = el(l)/dfloat(l) - do 600 i = 1,n - 600 yh(i,newq+1) = acor(i)*r - go to 630 - 610 ialth = 3 - go to 700 - 620 if ((kflag .eq. 0) .and. (rh .lt. 1.1d0)) go to 610 - if (kflag .le. -2) rh = dmin1(rh,0.2d0) -c----------------------------------------------------------------------- -c if there is a change of order, reset nq, l, and the coefficients. -c in any case h is reset according to rh and the yh array is rescaled. -c then exit from 690 if the step was ok, or redo the step otherwise. -c----------------------------------------------------------------------- - if (newq .eq. nq) go to 170 - 630 nq = newq - l = nq + 1 - iret = 2 - go to 150 -c----------------------------------------------------------------------- -c control reaches this section if 3 or more failures have occured. -c if 10 failures have occurred, exit with kflag = -1. -c it is assumed that the derivatives that have accumulated in the -c yh array have errors of the wrong order. hence the first -c derivative is recomputed, and the order is set to 1. then -c h is reduced by a factor of 10, and the step is retried, -c until it succeeds or h reaches hmin. -c----------------------------------------------------------------------- - 640 if (kflag .eq. -10) go to 660 - rh = 0.1d0 - rh = dmax1(hmin/dabs(h),rh) - h = h*rh - do 645 i = 1,n - 645 y(i) = yh(i,1) - call f (neq, tn, y, savf) - nfe = nfe + 1 - do 650 i = 1,n - 650 yh(i,2) = h*savf(i) - ipup = miter - ialth = 5 - if (nq .eq. 1) go to 200 - nq = 1 - l = 2 - iret = 3 - go to 150 -c----------------------------------------------------------------------- -c all returns are made through this section. h is saved in hold -c to allow the caller to change h on the next step. -c----------------------------------------------------------------------- - 660 kflag = -1 - go to 720 - 670 kflag = -2 - go to 720 - 680 kflag = -3 - go to 720 - 690 rmax = 10.0d0 - 700 r = 1.0d0/tesco(2,nqu) - do 710 i = 1,n - 710 acor(i) = acor(i)*r - 720 hold = h - jstart = 1 - return -c----------------------- end of subroutine stode ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/stodi.f b/scipy-0.10.1/scipy/integrate/odepack/stodi.f deleted file mode 100644 index 265c57d8f0..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/stodi.f +++ /dev/null @@ -1,459 +0,0 @@ - subroutine stodi (neq, y, yh, nyh, yh1, ewt, savf, savr, - 1 acor, wm, iwm, res, adda, jac, pjac, slvs ) -clll. optimize - external res, adda, jac, pjac, slvs - integer neq, nyh, iwm - integer iownd, ialth, ipup, lmax, meo, nqnyh, nslp, - 1 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 2 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu - integer i, i1, iredo, ires, iret, j, jb, kgo, m, ncf, newq - double precision y, yh, yh1, ewt, savf, savr, acor, wm - double precision conit, crate, el, elco, hold, rmax, tesco, - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround - double precision dcon, ddn, del, delp, dsm, dup, - 1 eljh, el1h, exdn, exsm, exup, - 2 r, rh, rhdn, rhsm, rhup, told, vnorm - dimension neq(1), y(1), yh(nyh,*), yh1(1), ewt(1), savf(1), - 1 savr(1), acor(1), wm(*), iwm(*) - common /ls0001/ conit, crate, el(13), elco(13,12), - 1 hold, rmax, tesco(3,12), - 2 ccmax, el0, h, hmin, hmxi, hu, rc, tn, uround, iownd(14), - 3 ialth, ipup, lmax, meo, nqnyh, nslp, - 4 icf, ierpj, iersl, jcur, jstart, kflag, l, meth, miter, - 5 maxord, maxcor, msbp, mxncf, n, nq, nst, nre, nje, nqu -c----------------------------------------------------------------------- -c stodi performs one step of the integration of an initial value -c problem for a system of ordinary differential equations. -c note.. stodi is independent of the value of the iteration method -c indicator miter, and hence is independent -c of the type of chord method used, or the jacobian structure. -c communication with stodi is done with the following variables.. -c -c neq = integer array containing problem size in neq(1), and -c passed as the neq argument in all calls to res, adda, -c and jac. -c y = an array of length .ge. n used as the y argument in -c all calls to res, jac, and adda. -c neq = integer array containing problem size in neq(1), and -c passed as the neq argument in all calls to res, g, adda, -c and jac -c yh = an nyh by lmax array containing the dependent variables -c and their approximate scaled derivatives, where -c lmax = maxord + 1. yh(i,j+1) contains the approximate -c j-th derivative of y(i), scaled by h**j/factorial(j) -c (j = 0,1,...,nq). on entry for the first step, the first -c two columns of yh must et from the initial values. -c nyh = a constant integer .ge. n, the first dimension of yh. -c yh1 = a one-dimensional array occupying the same space as yh. -c ewt = an array of length n containing multiplicative weights -c for local error measurements. local errors in y(i) are -c compared to 1.0/ewt(i) in various error tests. -c savf = an array of working storage, of length n. also used for -c input of yh(*,maxord+2) when jstart = -1 and maxord is less -c than the current order nq. -c same as ydoti in driver. -c savr = an array of working storage, of length n. -c acor = a work array of length n used for the accumulated -c corrections. on a succesful return, acor(i) contains -c the estimated one-step local error in y(i). -c wm,iwm = real and integer work arrays associated with matrix -c operations in chord iteration. -c pjac = name of routine to evaluate and preprocess jacobian matrix. -c slvs = name of routine to solve linear system in chord iteration. -c ccmax = maximum relative change in h*el0 before pjac is called. -c h = the step size to be attempted on the next step. -c h is altered by the error control algorithm during the -c problem. h can be either positive or negative, but its -c sign must remain constant throughout the problem. -c hmin = the minimum absolute value of the step size h to be used. -c hmxi = inverse of the maximum absolute value of h to be used. -c hmxi = 0.0 is allowed and corresponds to an infinite hmax. -c hmin and hmxi may be changed at any time, but will not -c take effect until the next change of h is considered. -c tn = the independent variable. tn is updated on each step taken. -c jstart = an integer used for input only, with the following -c values and meanings.. -c 0 perform the first step. -c .gt.0 take a new step continuing from the last. -c -1 take the next step with a new value of h, maxord, -c n, meth, miter, and/or matrix parameters. -c -2 take the next step with a new value of h, -c but with other inputs unchanged. -c on return, jstart is set to 1 to facilitate continuation. -c kflag = a completion code with the following meanings.. -c 0 the step was succesful. -c -1 the requested error could not be achieved. -c -2 corrector convergence could not be achieved. -c -3 res ordered immediate return. -c -4 error condition from res could not be avoided. -c -5 fatal error in pjac or slvs. -c a return with kflag = -1, -2, or -4 means either -c abs(h) = hmin or 10 consecutive failures occurred. -c on a return with kflag negative, the values of tn and -c the yh array are as of the beginning of the last -c step, and h is the last step size attempted. -c maxord = the maximum order of integration method to be allowed. -c maxcor = the maximum number of corrector iterations allowed. -c msbp = maximum number of steps between pjac calls. -c mxncf = maximum number of convergence failures allowed. -c meth/miter = the method flags. see description in driver. -c n = the number of first-order differential equations. -c----------------------------------------------------------------------- - kflag = 0 - told = tn - ncf = 0 - ierpj = 0 - iersl = 0 - jcur = 0 - icf = 0 - delp = 0.0d0 - if (jstart .gt. 0) go to 200 - if (jstart .eq. -1) go to 100 - if (jstart .eq. -2) go to 160 -c----------------------------------------------------------------------- -c on the first call, the order is set to 1, and other variables are -c initialized. rmax is the maximum ratio by which h can be increased -c in a single step. it is initially 1.e4 to compensate for the small -c initial h, but then is normally equal to 10. if a failure -c occurs (in corrector convergence or error test), rmax is set at 2 -c for the next increase. -c----------------------------------------------------------------------- - lmax = maxord + 1 - nq = 1 - l = 2 - ialth = 2 - rmax = 10000.0d0 - rc = 0.0d0 - el0 = 1.0d0 - crate = 0.7d0 - hold = h - meo = meth - nslp = 0 - ipup = miter - iret = 3 - go to 140 -c----------------------------------------------------------------------- -c the following block handles preliminaries needed when jstart = -1. -c ipup is set to miter to force a matrix update. -c if an order increase is about to be considered (ialth = 1), -c ialth is reset to 2 to postpone consideration one more step. -c if the caller has changed meth, cfode is called to reset -c the coefficients of the method. -c if the caller has changed maxord to a value less than the current -c order nq, nq is reduced to maxord, and a new h chosen accordingly. -c if h is to be changed, yh must be rescaled. -c if h or meth is being changed, ialth is reset to l = nq + 1 -c to prevent further changes in h for that many steps. -c----------------------------------------------------------------------- - 100 ipup = miter - lmax = maxord + 1 - if (ialth .eq. 1) ialth = 2 - if (meth .eq. meo) go to 110 - call cfode (meth, elco, tesco) - meo = meth - if (nq .gt. maxord) go to 120 - ialth = l - iret = 1 - go to 150 - 110 if (nq .le. maxord) go to 160 - 120 nq = maxord - l = lmax - do 125 i = 1,l - 125 el(i) = elco(i,nq) - nqnyh = nq*nyh - rc = rc*el(1)/el0 - el0 = el(1) - conit = 0.5d0/dfloat(nq+2) - ddn = vnorm (n, savf, ewt)/tesco(1,l) - exdn = 1.0d0/dfloat(l) - rhdn = 1.0d0/(1.3d0*ddn**exdn + 0.0000013d0) - rh = dmin1(rhdn,1.0d0) - iredo = 3 - if (h .eq. hold) go to 170 - rh = dmin1(rh,dabs(h/hold)) - h = hold - go to 175 -c----------------------------------------------------------------------- -c cfode is called to get all the integration coefficients for the -c current meth. then the el vector and related constants are reset -c whenever the order nq is changed, or at the start of the problem. -c----------------------------------------------------------------------- - 140 call cfode (meth, elco, tesco) - 150 do 155 i = 1,l - 155 el(i) = elco(i,nq) - nqnyh = nq*nyh - rc = rc*el(1)/el0 - el0 = el(1) - conit = 0.5d0/dfloat(nq+2) - go to (160, 170, 200), iret -c----------------------------------------------------------------------- -c if h is being changed, the h ratio rh is checked against -c rmax, hmin, and hmxi, and the yh array rescaled. ialth is set to -c l = nq + 1 to prevent a change of h for that many steps, unless -c forced by a convergence or error test failure. -c----------------------------------------------------------------------- - 160 if (h .eq. hold) go to 200 - rh = h/hold - h = hold - iredo = 3 - go to 175 - 170 rh = dmax1(rh,hmin/dabs(h)) - 175 rh = dmin1(rh,rmax) - rh = rh/dmax1(1.0d0,dabs(h)*hmxi*rh) - r = 1.0d0 - do 180 j = 2,l - r = r*rh - do 180 i = 1,n - 180 yh(i,j) = yh(i,j)*r - h = h*rh - rc = rc*rh - ialth = l - if (iredo .eq. 0) go to 690 -c----------------------------------------------------------------------- -c this section computes the predicted values by effectively -c multiplying the yh array by the pascal triangle matrix. -c rc is the ratio of new to old values of the coefficient h*el(1). -c when rc differs from 1 by more than ccmax, ipup is set to miter -c to force pjac to be called. -c in any case, pjac is called at least every msbp steps. -c----------------------------------------------------------------------- - 200 if (dabs(rc-1.0d0) .gt. ccmax) ipup = miter - if (nst .ge. nslp+msbp) ipup = miter - tn = tn + h - i1 = nqnyh + 1 - do 215 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 210 i = i1,nqnyh - 210 yh1(i) = yh1(i) + yh1(i+nyh) - 215 continue -c----------------------------------------------------------------------- -c up to maxcor corrector iterations are taken. a convergence test is -c made on the r.m.s. norm of each correction, weighted by h and the -c error weight vector ewt. the sum of the corrections is accumulated -c in acor(i). the yh array is not altered in the corrector loop. -c----------------------------------------------------------------------- - 220 m = 0 - do 230 i = 1,n - savf(i) = yh(i,2) / h - 230 y(i) = yh(i,1) - if (ipup .le. 0) go to 240 -c----------------------------------------------------------------------- -c if indicated, the matrix p = a - h*el(1)*dr/dy is reevaluated and -c preprocessed before starting the corrector iteration. ipup is set -c to 0 as an indicator that this has been done. -c----------------------------------------------------------------------- - call pjac (neq, y, yh, nyh, ewt, acor, savr, savf, wm, iwm, - 1 res, jac, adda ) - ipup = 0 - rc = 1.0d0 - nslp = nst - crate = 0.7d0 - if (ierpj .eq. 0) go to 250 - ires = ierpj - go to (430, 435, 430), ires -c get residual at predicted values, if not already done in pjac. ------- - 240 ires = 1 - call res ( neq, tn, y, savf, savr, ires ) - nre = nre + 1 - kgo = iabs(ires) - go to ( 250, 435, 430 ) , kgo - 250 do 260 i = 1,n - 260 acor(i) = 0.0d0 -c----------------------------------------------------------------------- -c solve the linear system with the current residual as -c right-hand side and p as coefficient matrix. -c----------------------------------------------------------------------- - 270 continue - call slvs (wm, iwm, savr, savf) - if (iersl .lt. 0) go to 430 - if (iersl .gt. 0) go to 410 - el1h = el(1) * h - del = vnorm (n, savr, ewt) * dabs(h) - do 380 i = 1,n - acor(i) = acor(i) + savr(i) - savf(i) = acor(i) + yh(i,2)/h - 380 y(i) = yh(i,1) + el1h*acor(i) -c----------------------------------------------------------------------- -c test for convergence. if m.gt.0, an estimate of the convergence -c rate constant is stored in crate, and this is used in the test. -c----------------------------------------------------------------------- - if (m .ne. 0) crate = dmax1(0.2d0*crate,del/delp) - dcon = del*dmin1(1.0d0,1.5d0*crate)/(tesco(2,nq)*conit) - if (dcon .le. 1.0d0) go to 460 - m = m + 1 - if (m .eq. maxcor) go to 410 - if (m .ge. 2 .and. del .gt. 2.0d0*delp) go to 410 - delp = del - ires = 1 - call res ( neq, tn, y, savf, savr, ires ) - nre = nre + 1 - kgo = iabs(ires) - go to ( 270, 435, 410 ) , kgo -c----------------------------------------------------------------------- -c the correctors failed to converge, or res has returned abnormally. -c on a convergence failure, if the jacobian is out of date, pjac is -c called for the next try. otherwise the yh array is retracted to its -c values before prediction, and h is reduced, if possible. -c take an error exit if ires = 2, or h cannot be reduced, or mxncf -c failures have occurred, or a fatal error occurred in pjac or slvs. -c----------------------------------------------------------------------- - 410 icf = 1 - if (jcur .eq. 1) go to 430 - ipup = miter - go to 220 - 430 icf = 2 - ncf = ncf + 1 - rmax = 2.0d0 - 435 tn = told - i1 = nqnyh + 1 - do 445 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 440 i = i1,nqnyh - 440 yh1(i) = yh1(i) - yh1(i+nyh) - 445 continue - if (ires .eq. 2) go to 680 - if (ierpj .lt. 0 .or. iersl .lt. 0) go to 685 - if (dabs(h) .le. hmin*1.00001d0) go to 450 - if (ncf .eq. mxncf) go to 450 - rh = 0.25d0 - ipup = miter - iredo = 1 - go to 170 - 450 if (ires .eq. 3) go to 680 - go to 670 -c----------------------------------------------------------------------- -c the corrector has converged. jcur is set to 0 -c to signal that the jacobian involved may need updating later. -c the local error test is made and control passes to statement 500 -c if it fails. -c----------------------------------------------------------------------- - 460 jcur = 0 - if (m .eq. 0) dsm = del/tesco(2,nq) - if (m .gt. 0) dsm = dabs(h) * vnorm (n, acor, ewt)/tesco(2,nq) - if (dsm .gt. 1.0d0) go to 500 -c----------------------------------------------------------------------- -c after a successful step, update the yh array. -c consider changing h if ialth = 1. otherwise decrease ialth by 1. -c if ialth is then 1 and nq .lt. maxord, then acor is saved for -c use in a possible order increase on the next step. -c if a change in h is considered, an increase or decrease in order -c by one is considered also. a change in h is made only if it is by a -c factor of at least 1.1. if not, ialth is set to 3 to prevent -c testing for that many steps. -c----------------------------------------------------------------------- - kflag = 0 - iredo = 0 - nst = nst + 1 - hu = h - nqu = nq - do 470 j = 1,l - eljh = el(j)*h - do 470 i = 1,n - 470 yh(i,j) = yh(i,j) + eljh*acor(i) - ialth = ialth - 1 - if (ialth .eq. 0) go to 520 - if (ialth .gt. 1) go to 700 - if (l .eq. lmax) go to 700 - do 490 i = 1,n - 490 yh(i,lmax) = acor(i) - go to 700 -c----------------------------------------------------------------------- -c the error test failed. kflag keeps track of multiple failures. -c restore tn and the yh array to their previous values, and prepare -c to try the step again. compute the optimum step size for this or -c one lower order. after 2 or more failures, h is forced to decrease -c by a factor of 0.1 or less. -c----------------------------------------------------------------------- - 500 kflag = kflag - 1 - tn = told - i1 = nqnyh + 1 - do 515 jb = 1,nq - i1 = i1 - nyh -cdir$ ivdep - do 510 i = i1,nqnyh - 510 yh1(i) = yh1(i) - yh1(i+nyh) - 515 continue - rmax = 2.0d0 - if (dabs(h) .le. hmin*1.00001d0) go to 660 - if (kflag .le. -7) go to 660 - iredo = 2 - rhup = 0.0d0 - go to 540 -c----------------------------------------------------------------------- -c regardless of the success or failure of the step, factors -c rhdn, rhsm, and rhup are computed, by which h could be multiplied -c at order nq - 1, order nq, or order nq + 1, respectively. -c in the case of failure, rhup = 0.0 to avoid an order increase. -c the largest of these is determined and the new order chosen -c accordingly. if the order is to be increased, we compute one -c additional scaled derivative. -c----------------------------------------------------------------------- - 520 rhup = 0.0d0 - if (l .eq. lmax) go to 540 - do 530 i = 1,n - 530 savf(i) = acor(i) - yh(i,lmax) - dup = dabs(h) * vnorm (n, savf, ewt)/tesco(3,nq) - exup = 1.0d0/dfloat(l+1) - rhup = 1.0d0/(1.4d0*dup**exup + 0.0000014d0) - 540 exsm = 1.0d0/dfloat(l) - rhsm = 1.0d0/(1.2d0*dsm**exsm + 0.0000012d0) - rhdn = 0.0d0 - if (nq .eq. 1) go to 560 - ddn = vnorm (n, yh(1,l), ewt)/tesco(1,nq) - exdn = 1.0d0/dfloat(nq) - rhdn = 1.0d0/(1.3d0*ddn**exdn + 0.0000013d0) - 560 if (rhsm .ge. rhup) go to 570 - if (rhup .gt. rhdn) go to 590 - go to 580 - 570 if (rhsm .lt. rhdn) go to 580 - newq = nq - rh = rhsm - go to 620 - 580 newq = nq - 1 - rh = rhdn - if (kflag .lt. 0 .and. rh .gt. 1.0d0) rh = 1.0d0 - go to 620 - 590 newq = l - rh = rhup - if (rh .lt. 1.1d0) go to 610 - r = h*el(l)/dfloat(l) - do 600 i = 1,n - 600 yh(i,newq+1) = acor(i)*r - go to 630 - 610 ialth = 3 - go to 700 - 620 if ((kflag .eq. 0) .and. (rh .lt. 1.1d0)) go to 610 - if (kflag .le. -2) rh = dmin1(rh,0.1d0) -c----------------------------------------------------------------------- -c if there is a change of order, reset nq, l, and the coefficients. -c in any case h is reset according to rh and the yh array is rescaled. -c then exit from 690 if the step was ok, or redo the step otherwise. -c----------------------------------------------------------------------- - if (newq .eq. nq) go to 170 - 630 nq = newq - l = nq + 1 - iret = 2 - go to 150 -c----------------------------------------------------------------------- -c all returns are made through this section. h is saved in hold -c to allow the caller to change h on the next step. -c----------------------------------------------------------------------- - 660 kflag = -1 - go to 720 - 670 kflag = -2 - go to 720 - 680 kflag = -1 - ires - go to 720 - 685 kflag = -5 - go to 720 - 690 rmax = 10.0d0 - 700 r = h/tesco(2,nqu) - do 710 i = 1,n - 710 acor(i) = acor(i)*r - 720 hold = h - jstart = 1 - return -c----------------------- end of subroutine stodi ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/vmnorm.f b/scipy-0.10.1/scipy/integrate/odepack/vmnorm.f deleted file mode 100644 index 6ee68f2851..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/vmnorm.f +++ /dev/null @@ -1,18 +0,0 @@ - double precision function vmnorm (n, v, w) -clll. optimize -c----------------------------------------------------------------------- -c this function routine computes the weighted max-norm -c of the vector of length n contained in the array v, with weights -c contained in the array w of length n.. -c vmnorm = max(i=1,...,n) abs(v(i))*w(i) -c----------------------------------------------------------------------- - integer n, i - double precision v, w, vm - dimension v(n), w(n) - vm = 0.0d0 - do 10 i = 1,n - 10 vm = dmax1(vm,dabs(v(i))*w(i)) - vmnorm = vm - return -c----------------------- end of function vmnorm ------------------------ - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/vnorm.f b/scipy-0.10.1/scipy/integrate/odepack/vnorm.f deleted file mode 100644 index d8461304ac..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/vnorm.f +++ /dev/null @@ -1,18 +0,0 @@ - double precision function vnorm (n, v, w) -clll. optimize -c----------------------------------------------------------------------- -c this function routine computes the weighted root-mean-square norm -c of the vector of length n contained in the array v, with weights -c contained in the array w of length n.. -c vnorm = sqrt( (1/n) * sum( v(i)*w(i) )**2 ) -c----------------------------------------------------------------------- - integer n, i - double precision v, w, sum - dimension v(n), w(n) - sum = 0.0d0 - do 10 i = 1,n - 10 sum = sum + (v(i)*w(i))**2 - vnorm = dsqrt(sum/dfloat(n)) - return -c----------------------- end of function vnorm ------------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/vode.f b/scipy-0.10.1/scipy/integrate/odepack/vode.f deleted file mode 100644 index 1b4e2a92b5..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/vode.f +++ /dev/null @@ -1,3659 +0,0 @@ - -*DECK DVODE - SUBROUTINE DVODE (F, NEQ, Y, T, TOUT, ITOL, RTOL, ATOL, ITASK, - 1 ISTATE, IOPT, RWORK, LRW, IWORK, LIW, JAC, MF, - 2 RPAR, IPAR) - EXTERNAL F, JAC - DOUBLE PRECISION Y, T, TOUT, RTOL, ATOL, RWORK, RPAR - INTEGER NEQ, ITOL, ITASK, ISTATE, IOPT, LRW, IWORK, LIW, - 1 MF, IPAR - DIMENSION Y(*), RTOL(*), ATOL(*), RWORK(LRW), IWORK(LIW), - 1 RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C DVODE.. Variable-coefficient Ordinary Differential Equation solver, -C with fixed-leading-coefficient implementation. -C This version is in double precision. -C -C DVODE solves the initial value problem for stiff or nonstiff -C systems of first order ODEs, -C dy/dt = f(t,y) , or, in component form, -C dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(NEQ)) (i = 1,...,NEQ). -C DVODE is a package based on the EPISODE and EPISODEB packages, and -C on the ODEPACK user interface standard, with minor modifications. -C----------------------------------------------------------------------- -C Revision History (YYMMDD) -C 890615 Date Written -C 890922 Added interrupt/restart ability, minor changes throughout. -C 910228 Minor revisions in line format, prologue, etc. -C 920227 Modifications by D. Pang: -C (1) Applied subgennam to get generic intrinsic names. -C (2) Changed intrinsic names to generic in comments. -C (3) Added *DECK lines before each routine. -C 920721 Names of routines and labeled Common blocks changed, so as -C to be unique in combined single/double precision code (ACH). -C 920722 Minor revisions to prologue (ACH). -C 920831 Conversion to double precision done (ACH). -C 921106 Fixed minor bug: ETAQ,ETAQM1 in DVSTEP SAVE statement (ACH). -C 921118 Changed LUNSAV/MFLGSV to IXSAV (ACH). -C 941222 Removed MF overwrite; attached sign to H in estimated second -C derivative in DVHIN; misc. comment corrections throughout. -C 970515 Minor corrections to comments in prologue, DVJAC. -C----------------------------------------------------------------------- -C References.. -C -C 1. P. N. Brown, G. D. Byrne, and A. C. Hindmarsh, "VODE: A Variable -C Coefficient ODE Solver," SIAM J. Sci. Stat. Comput., 10 (1989), -C pp. 1038-1051. Also, LLNL Report UCRL-98412, June 1988. -C 2. G. D. Byrne and A. C. Hindmarsh, "A Polyalgorithm for the -C Numerical Solution of Ordinary Differential Equations," -C ACM Trans. Math. Software, 1 (1975), pp. 71-96. -C 3. A. C. Hindmarsh and G. D. Byrne, "EPISODE: An Effective Package -C for the Integration of Systems of Ordinary Differential -C Equations," LLNL Report UCID-30112, Rev. 1, April 1977. -C 4. G. D. Byrne and A. C. Hindmarsh, "EPISODEB: An Experimental -C Package for the Integration of Systems of Ordinary Differential -C Equations with Banded Jacobians," LLNL Report UCID-30132, April -C 1976. -C 5. A. C. Hindmarsh, "ODEPACK, a Systematized Collection of ODE -C Solvers," in Scientific Computing, R. S. Stepleman et al., eds., -C North-Holland, Amsterdam, 1983, pp. 55-64. -C 6. K. R. Jackson and R. Sacks-Davis, "An Alternative Implementation -C of Variable Step-Size Multistep Formulas for Stiff ODEs," ACM -C Trans. Math. Software, 6 (1980), pp. 295-318. -C----------------------------------------------------------------------- -C Authors.. -C -C Peter N. Brown and Alan C. Hindmarsh -C Center for Applied Scientific Computing, L-561 -C Lawrence Livermore National Laboratory -C Livermore, CA 94551 -C and -C George D. Byrne -C Illinois Institute of Technology -C Chicago, IL 60616 -C----------------------------------------------------------------------- -C Summary of usage. -C -C Communication between the user and the DVODE package, for normal -C situations, is summarized here. This summary describes only a subset -C of the full set of options available. See the full description for -C details, including optional communication, nonstandard options, -C and instructions for special situations. See also the example -C problem (with program and output) following this summary. -C -C A. First provide a subroutine of the form.. -C -C SUBROUTINE F (NEQ, T, Y, YDOT, RPAR, IPAR) -C DOUBLE PRECISION T, Y, YDOT, RPAR -C DIMENSION Y(NEQ), YDOT(NEQ) -C -C which supplies the vector function f by loading YDOT(i) with f(i). -C -C B. Next determine (or guess) whether or not the problem is stiff. -C Stiffness occurs when the Jacobian matrix df/dy has an eigenvalue -C whose real part is negative and large in magnitude, compared to the -C reciprocal of the t span of interest. If the problem is nonstiff, -C use a method flag MF = 10. If it is stiff, there are four standard -C choices for MF (21, 22, 24, 25), and DVODE requires the Jacobian -C matrix in some form. In these cases (MF .gt. 0), DVODE will use a -C saved copy of the Jacobian matrix. If this is undesirable because of -C storage limitations, set MF to the corresponding negative value -C (-21, -22, -24, -25). (See full description of MF below.) -C The Jacobian matrix is regarded either as full (MF = 21 or 22), -C or banded (MF = 24 or 25). In the banded case, DVODE requires two -C half-bandwidth parameters ML and MU. These are, respectively, the -C widths of the lower and upper parts of the band, excluding the main -C diagonal. Thus the band consists of the locations (i,j) with -C i-ML .le. j .le. i+MU, and the full bandwidth is ML+MU+1. -C -C C. If the problem is stiff, you are encouraged to supply the Jacobian -C directly (MF = 21 or 24), but if this is not feasible, DVODE will -C compute it internally by difference quotients (MF = 22 or 25). -C If you are supplying the Jacobian, provide a subroutine of the form.. -C -C SUBROUTINE JAC (NEQ, T, Y, ML, MU, PD, NROWPD, RPAR, IPAR) -C DOUBLE PRECISION T, Y, PD, RPAR -C DIMENSION Y(NEQ), PD(NROWPD,NEQ) -C -C which supplies df/dy by loading PD as follows.. -C For a full Jacobian (MF = 21), load PD(i,j) with df(i)/dy(j), -C the partial derivative of f(i) with respect to y(j). (Ignore the -C ML and MU arguments in this case.) -C For a banded Jacobian (MF = 24), load PD(i-j+MU+1,j) with -C df(i)/dy(j), i.e. load the diagonal lines of df/dy into the rows of -C PD from the top down. -C In either case, only nonzero elements need be loaded. -C -C D. Write a main program which calls subroutine DVODE once for -C each point at which answers are desired. This should also provide -C for possible use of logical unit 6 for output of error messages -C by DVODE. On the first call to DVODE, supply arguments as follows.. -C F = Name of subroutine for right-hand side vector f. -C This name must be declared external in calling program. -C NEQ = Number of first order ODE-s. -C Y = Array of initial values, of length NEQ. -C T = The initial value of the independent variable. -C TOUT = First point where output is desired (.ne. T). -C ITOL = 1 or 2 according as ATOL (below) is a scalar or array. -C RTOL = Relative tolerance parameter (scalar). -C ATOL = Absolute tolerance parameter (scalar or array). -C The estimated local error in Y(i) will be controlled so as -C to be roughly less (in magnitude) than -C EWT(i) = RTOL*abs(Y(i)) + ATOL if ITOL = 1, or -C EWT(i) = RTOL*abs(Y(i)) + ATOL(i) if ITOL = 2. -C Thus the local error test passes if, in each component, -C either the absolute error is less than ATOL (or ATOL(i)), -C or the relative error is less than RTOL. -C Use RTOL = 0.0 for pure absolute error control, and -C use ATOL = 0.0 (or ATOL(i) = 0.0) for pure relative error -C control. Caution.. Actual (global) errors may exceed these -C local tolerances, so choose them conservatively. -C ITASK = 1 for normal computation of output values of Y at t = TOUT. -C ISTATE = Integer flag (input and output). Set ISTATE = 1. -C IOPT = 0 to indicate no optional input used. -C RWORK = Real work array of length at least.. -C 20 + 16*NEQ for MF = 10, -C 22 + 9*NEQ + 2*NEQ**2 for MF = 21 or 22, -C 22 + 11*NEQ + (3*ML + 2*MU)*NEQ for MF = 24 or 25. -C LRW = Declared length of RWORK (in user's DIMENSION statement). -C IWORK = Integer work array of length at least.. -C 30 for MF = 10, -C 30 + NEQ for MF = 21, 22, 24, or 25. -C If MF = 24 or 25, input in IWORK(1),IWORK(2) the lower -C and upper half-bandwidths ML,MU. -C LIW = Declared length of IWORK (in user's DIMENSION statement). -C JAC = Name of subroutine for Jacobian matrix (MF = 21 or 24). -C If used, this name must be declared external in calling -C program. If not used, pass a dummy name. -C MF = Method flag. Standard values are.. -C 10 for nonstiff (Adams) method, no Jacobian used. -C 21 for stiff (BDF) method, user-supplied full Jacobian. -C 22 for stiff method, internally generated full Jacobian. -C 24 for stiff method, user-supplied banded Jacobian. -C 25 for stiff method, internally generated banded Jacobian. -C RPAR,IPAR = user-defined real and integer arrays passed to F and JAC. -C Note that the main program must declare arrays Y, RWORK, IWORK, -C and possibly ATOL, RPAR, and IPAR. -C -C E. The output from the first call (or any call) is.. -C Y = Array of computed values of y(t) vector. -C T = Corresponding value of independent variable (normally TOUT). -C ISTATE = 2 if DVODE was successful, negative otherwise. -C -1 means excess work done on this call. (Perhaps wrong MF.) -C -2 means excess accuracy requested. (Tolerances too small.) -C -3 means illegal input detected. (See printed message.) -C -4 means repeated error test failures. (Check all input.) -C -5 means repeated convergence failures. (Perhaps bad -C Jacobian supplied or wrong choice of MF or tolerances.) -C -6 means error weight became zero during problem. (Solution -C component i vanished, and ATOL or ATOL(i) = 0.) -C -C F. To continue the integration after a successful return, simply -C reset TOUT and call DVODE again. No other parameters need be reset. -C -C----------------------------------------------------------------------- -C EXAMPLE PROBLEM -C -C The following is a simple example problem, with the coding -C needed for its solution by DVODE. The problem is from chemical -C kinetics, and consists of the following three rate equations.. -C dy1/dt = -.04*y1 + 1.e4*y2*y3 -C dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*y2**2 -C dy3/dt = 3.e7*y2**2 -C on the interval from t = 0.0 to t = 4.e10, with initial conditions -C y1 = 1.0, y2 = y3 = 0. The problem is stiff. -C -C The following coding solves this problem with DVODE, using MF = 21 -C and printing results at t = .4, 4., ..., 4.e10. It uses -C ITOL = 2 and ATOL much smaller for y2 than y1 or y3 because -C y2 has much smaller values. -C At the end of the run, statistical quantities of interest are -C printed. (See optional output in the full description below.) -C To generate Fortran source code, replace C in column 1 with a blank -C in the coding below. -C -C EXTERNAL FEX, JEX -C DOUBLE PRECISION ATOL, RPAR, RTOL, RWORK, T, TOUT, Y -C DIMENSION Y(3), ATOL(3), RWORK(67), IWORK(33) -C NEQ = 3 -C Y(1) = 1.0D0 -C Y(2) = 0.0D0 -C Y(3) = 0.0D0 -C T = 0.0D0 -C TOUT = 0.4D0 -C ITOL = 2 -C RTOL = 1.D-4 -C ATOL(1) = 1.D-8 -C ATOL(2) = 1.D-14 -C ATOL(3) = 1.D-6 -C ITASK = 1 -C ISTATE = 1 -C IOPT = 0 -C LRW = 67 -C LIW = 33 -C MF = 21 -C DO 40 IOUT = 1,12 -C CALL DVODE(FEX,NEQ,Y,T,TOUT,ITOL,RTOL,ATOL,ITASK,ISTATE, -C 1 IOPT,RWORK,LRW,IWORK,LIW,JEX,MF,RPAR,IPAR) -C WRITE(6,20)T,Y(1),Y(2),Y(3) -C 20 FORMAT(' At t =',D12.4,' y =',3D14.6) -C IF (ISTATE .LT. 0) GO TO 80 -C 40 TOUT = TOUT*10. -C WRITE(6,60) IWORK(11),IWORK(12),IWORK(13),IWORK(19), -C 1 IWORK(20),IWORK(21),IWORK(22) -C 60 FORMAT(/' No. steps =',I4,' No. f-s =',I4, -C 1 ' No. J-s =',I4,' No. LU-s =',I4/ -C 2 ' No. nonlinear iterations =',I4/ -C 3 ' No. nonlinear convergence failures =',I4/ -C 4 ' No. error test failures =',I4/) -C STOP -C 80 WRITE(6,90)ISTATE -C 90 FORMAT(///' Error halt.. ISTATE =',I3) -C STOP -C END -C -C SUBROUTINE FEX (NEQ, T, Y, YDOT, RPAR, IPAR) -C DOUBLE PRECISION RPAR, T, Y, YDOT -C DIMENSION Y(NEQ), YDOT(NEQ) -C YDOT(1) = -.04D0*Y(1) + 1.D4*Y(2)*Y(3) -C YDOT(3) = 3.D7*Y(2)*Y(2) -C YDOT(2) = -YDOT(1) - YDOT(3) -C RETURN -C END -C -C SUBROUTINE JEX (NEQ, T, Y, ML, MU, PD, NRPD, RPAR, IPAR) -C DOUBLE PRECISION PD, RPAR, T, Y -C DIMENSION Y(NEQ), PD(NRPD,NEQ) -C PD(1,1) = -.04D0 -C PD(1,2) = 1.D4*Y(3) -C PD(1,3) = 1.D4*Y(2) -C PD(2,1) = .04D0 -C PD(2,3) = -PD(1,3) -C PD(3,2) = 6.D7*Y(2) -C PD(2,2) = -PD(1,2) - PD(3,2) -C RETURN -C END -C -C The following output was obtained from the above program on a -C Cray-1 computer with the CFT compiler. -C -C At t = 4.0000e-01 y = 9.851680e-01 3.386314e-05 1.479817e-02 -C At t = 4.0000e+00 y = 9.055255e-01 2.240539e-05 9.445214e-02 -C At t = 4.0000e+01 y = 7.158108e-01 9.184883e-06 2.841800e-01 -C At t = 4.0000e+02 y = 4.505032e-01 3.222940e-06 5.494936e-01 -C At t = 4.0000e+03 y = 1.832053e-01 8.942690e-07 8.167938e-01 -C At t = 4.0000e+04 y = 3.898560e-02 1.621875e-07 9.610142e-01 -C At t = 4.0000e+05 y = 4.935882e-03 1.984013e-08 9.950641e-01 -C At t = 4.0000e+06 y = 5.166183e-04 2.067528e-09 9.994834e-01 -C At t = 4.0000e+07 y = 5.201214e-05 2.080593e-10 9.999480e-01 -C At t = 4.0000e+08 y = 5.213149e-06 2.085271e-11 9.999948e-01 -C At t = 4.0000e+09 y = 5.183495e-07 2.073399e-12 9.999995e-01 -C At t = 4.0000e+10 y = 5.450996e-08 2.180399e-13 9.999999e-01 -C -C No. steps = 595 No. f-s = 832 No. J-s = 13 No. LU-s = 112 -C No. nonlinear iterations = 831 -C No. nonlinear convergence failures = 0 -C No. error test failures = 22 -C----------------------------------------------------------------------- -C Full description of user interface to DVODE. -C -C The user interface to DVODE consists of the following parts. -C -C i. The call sequence to subroutine DVODE, which is a driver -C routine for the solver. This includes descriptions of both -C the call sequence arguments and of user-supplied routines. -C Following these descriptions is -C * a description of optional input available through the -C call sequence, -C * a description of optional output (in the work arrays), and -C * instructions for interrupting and restarting a solution. -C -C ii. Descriptions of other routines in the DVODE package that may be -C (optionally) called by the user. These provide the ability to -C alter error message handling, save and restore the internal -C COMMON, and obtain specified derivatives of the solution y(t). -C -C iii. Descriptions of COMMON blocks to be declared in overlay -C or similar environments. -C -C iv. Description of two routines in the DVODE package, either of -C which the user may replace with his own version, if desired. -C these relate to the measurement of errors. -C -C----------------------------------------------------------------------- -C Part i. Call Sequence. -C -C The call sequence parameters used for input only are -C F, NEQ, TOUT, ITOL, RTOL, ATOL, ITASK, IOPT, LRW, LIW, JAC, MF, -C and those used for both input and output are -C Y, T, ISTATE. -C The work arrays RWORK and IWORK are also used for conditional and -C optional input and optional output. (The term output here refers -C to the return from subroutine DVODE to the user's calling program.) -C -C The legality of input parameters will be thoroughly checked on the -C initial call for the problem, but not checked thereafter unless a -C change in input parameters is flagged by ISTATE = 3 in the input. -C -C The descriptions of the call arguments are as follows. -C -C F = The name of the user-supplied subroutine defining the -C ODE system. The system must be put in the first-order -C form dy/dt = f(t,y), where f is a vector-valued function -C of the scalar t and the vector y. Subroutine F is to -C compute the function f. It is to have the form -C SUBROUTINE F (NEQ, T, Y, YDOT, RPAR, IPAR) -C DOUBLE PRECISION T, Y, YDOT, RPAR -C DIMENSION Y(NEQ), YDOT(NEQ) -C where NEQ, T, and Y are input, and the array YDOT = f(t,y) -C is output. Y and YDOT are arrays of length NEQ. -C (In the DIMENSION statement above, NEQ can be replaced by -C * to make Y and YDOT assumed size arrays.) -C Subroutine F should not alter Y(1),...,Y(NEQ). -C F must be declared EXTERNAL in the calling program. -C -C Subroutine F may access user-defined real and integer -C work arrays RPAR and IPAR, which are to be dimensioned -C in the main program. -C -C If quantities computed in the F routine are needed -C externally to DVODE, an extra call to F should be made -C for this purpose, for consistent and accurate results. -C If only the derivative dy/dt is needed, use DVINDY instead. -C -C NEQ = The size of the ODE system (number of first order -C ordinary differential equations). Used only for input. -C NEQ may not be increased during the problem, but -C can be decreased (with ISTATE = 3 in the input). -C -C Y = A real array for the vector of dependent variables, of -C length NEQ or more. Used for both input and output on the -C first call (ISTATE = 1), and only for output on other calls. -C On the first call, Y must contain the vector of initial -C values. In the output, Y contains the computed solution -C evaluated at T. If desired, the Y array may be used -C for other purposes between calls to the solver. -C -C This array is passed as the Y argument in all calls to -C F and JAC. -C -C T = The independent variable. In the input, T is used only on -C the first call, as the initial point of the integration. -C In the output, after each call, T is the value at which a -C computed solution Y is evaluated (usually the same as TOUT). -C On an error return, T is the farthest point reached. -C -C TOUT = The next value of t at which a computed solution is desired. -C Used only for input. -C -C When starting the problem (ISTATE = 1), TOUT may be equal -C to T for one call, then should .ne. T for the next call. -C For the initial T, an input value of TOUT .ne. T is used -C in order to determine the direction of the integration -C (i.e. the algebraic sign of the step sizes) and the rough -C scale of the problem. Integration in either direction -C (forward or backward in t) is permitted. -C -C If ITASK = 2 or 5 (one-step modes), TOUT is ignored after -C the first call (i.e. the first call with TOUT .ne. T). -C Otherwise, TOUT is required on every call. -C -C If ITASK = 1, 3, or 4, the values of TOUT need not be -C monotone, but a value of TOUT which backs up is limited -C to the current internal t interval, whose endpoints are -C TCUR - HU and TCUR. (See optional output, below, for -C TCUR and HU.) -C -C ITOL = An indicator for the type of error control. See -C description below under ATOL. Used only for input. -C -C RTOL = A relative error tolerance parameter, either a scalar or -C an array of length NEQ. See description below under ATOL. -C Input only. -C -C ATOL = An absolute error tolerance parameter, either a scalar or -C an array of length NEQ. Input only. -C -C The input parameters ITOL, RTOL, and ATOL determine -C the error control performed by the solver. The solver will -C control the vector e = (e(i)) of estimated local errors -C in Y, according to an inequality of the form -C rms-norm of ( e(i)/EWT(i) ) .le. 1, -C where EWT(i) = RTOL(i)*abs(Y(i)) + ATOL(i), -C and the rms-norm (root-mean-square norm) here is -C rms-norm(v) = sqrt(sum v(i)**2 / NEQ). Here EWT = (EWT(i)) -C is a vector of weights which must always be positive, and -C the values of RTOL and ATOL should all be non-negative. -C The following table gives the types (scalar/array) of -C RTOL and ATOL, and the corresponding form of EWT(i). -C -C ITOL RTOL ATOL EWT(i) -C 1 scalar scalar RTOL*ABS(Y(i)) + ATOL -C 2 scalar array RTOL*ABS(Y(i)) + ATOL(i) -C 3 array scalar RTOL(i)*ABS(Y(i)) + ATOL -C 4 array array RTOL(i)*ABS(Y(i)) + ATOL(i) -C -C When either of these parameters is a scalar, it need not -C be dimensioned in the user's calling program. -C -C If none of the above choices (with ITOL, RTOL, and ATOL -C fixed throughout the problem) is suitable, more general -C error controls can be obtained by substituting -C user-supplied routines for the setting of EWT and/or for -C the norm calculation. See Part iv below. -C -C If global errors are to be estimated by making a repeated -C run on the same problem with smaller tolerances, then all -C components of RTOL and ATOL (i.e. of EWT) should be scaled -C down uniformly. -C -C ITASK = An index specifying the task to be performed. -C Input only. ITASK has the following values and meanings. -C 1 means normal computation of output values of y(t) at -C t = TOUT (by overshooting and interpolating). -C 2 means take one step only and return. -C 3 means stop at the first internal mesh point at or -C beyond t = TOUT and return. -C 4 means normal computation of output values of y(t) at -C t = TOUT but without overshooting t = TCRIT. -C TCRIT must be input as RWORK(1). TCRIT may be equal to -C or beyond TOUT, but not behind it in the direction of -C integration. This option is useful if the problem -C has a singularity at or beyond t = TCRIT. -C 5 means take one step, without passing TCRIT, and return. -C TCRIT must be input as RWORK(1). -C -C Note.. If ITASK = 4 or 5 and the solver reaches TCRIT -C (within roundoff), it will return T = TCRIT (exactly) to -C indicate this (unless ITASK = 4 and TOUT comes before TCRIT, -C in which case answers at T = TOUT are returned first). -C -C ISTATE = an index used for input and output to specify the -C the state of the calculation. -C -C In the input, the values of ISTATE are as follows. -C 1 means this is the first call for the problem -C (initializations will be done). See note below. -C 2 means this is not the first call, and the calculation -C is to continue normally, with no change in any input -C parameters except possibly TOUT and ITASK. -C (If ITOL, RTOL, and/or ATOL are changed between calls -C with ISTATE = 2, the new values will be used but not -C tested for legality.) -C 3 means this is not the first call, and the -C calculation is to continue normally, but with -C a change in input parameters other than -C TOUT and ITASK. Changes are allowed in -C NEQ, ITOL, RTOL, ATOL, IOPT, LRW, LIW, MF, ML, MU, -C and any of the optional input except H0. -C (See IWORK description for ML and MU.) -C Note.. A preliminary call with TOUT = T is not counted -C as a first call here, as no initialization or checking of -C input is done. (Such a call is sometimes useful to include -C the initial conditions in the output.) -C Thus the first call for which TOUT .ne. T requires -C ISTATE = 1 in the input. -C -C In the output, ISTATE has the following values and meanings. -C 1 means nothing was done, as TOUT was equal to T with -C ISTATE = 1 in the input. -C 2 means the integration was performed successfully. -C -1 means an excessive amount of work (more than MXSTEP -C steps) was done on this call, before completing the -C requested task, but the integration was otherwise -C successful as far as T. (MXSTEP is an optional input -C and is normally 500.) To continue, the user may -C simply reset ISTATE to a value .gt. 1 and call again. -C (The excess work step counter will be reset to 0.) -C In addition, the user may increase MXSTEP to avoid -C this error return. (See optional input below.) -C -2 means too much accuracy was requested for the precision -C of the machine being used. This was detected before -C completing the requested task, but the integration -C was successful as far as T. To continue, the tolerance -C parameters must be reset, and ISTATE must be set -C to 3. The optional output TOLSF may be used for this -C purpose. (Note.. If this condition is detected before -C taking any steps, then an illegal input return -C (ISTATE = -3) occurs instead.) -C -3 means illegal input was detected, before taking any -C integration steps. See written message for details. -C Note.. If the solver detects an infinite loop of calls -C to the solver with illegal input, it will cause -C the run to stop. -C -4 means there were repeated error test failures on -C one attempted step, before completing the requested -C task, but the integration was successful as far as T. -C The problem may have a singularity, or the input -C may be inappropriate. -C -5 means there were repeated convergence test failures on -C one attempted step, before completing the requested -C task, but the integration was successful as far as T. -C This may be caused by an inaccurate Jacobian matrix, -C if one is being used. -C -6 means EWT(i) became zero for some i during the -C integration. Pure relative error control (ATOL(i)=0.0) -C was requested on a variable which has now vanished. -C The integration was successful as far as T. -C -C Note.. Since the normal output value of ISTATE is 2, -C it does not need to be reset for normal continuation. -C Also, since a negative input value of ISTATE will be -C regarded as illegal, a negative output value requires the -C user to change it, and possibly other input, before -C calling the solver again. -C -C IOPT = An integer flag to specify whether or not any optional -C input is being used on this call. Input only. -C The optional input is listed separately below. -C IOPT = 0 means no optional input is being used. -C Default values will be used in all cases. -C IOPT = 1 means optional input is being used. -C -C RWORK = A real working array (double precision). -C The length of RWORK must be at least -C 20 + NYH*(MAXORD + 1) + 3*NEQ + LWM where -C NYH = the initial value of NEQ, -C MAXORD = 12 (if METH = 1) or 5 (if METH = 2) (unless a -C smaller value is given as an optional input), -C LWM = length of work space for matrix-related data.. -C LWM = 0 if MITER = 0, -C LWM = 2*NEQ**2 + 2 if MITER = 1 or 2, and MF.gt.0, -C LWM = NEQ**2 + 2 if MITER = 1 or 2, and MF.lt.0, -C LWM = NEQ + 2 if MITER = 3, -C LWM = (3*ML+2*MU+2)*NEQ + 2 if MITER = 4 or 5, and MF.gt.0, -C LWM = (2*ML+MU+1)*NEQ + 2 if MITER = 4 or 5, and MF.lt.0. -C (See the MF description for METH and MITER.) -C Thus if MAXORD has its default value and NEQ is constant, -C this length is.. -C 20 + 16*NEQ for MF = 10, -C 22 + 16*NEQ + 2*NEQ**2 for MF = 11 or 12, -C 22 + 16*NEQ + NEQ**2 for MF = -11 or -12, -C 22 + 17*NEQ for MF = 13, -C 22 + 18*NEQ + (3*ML+2*MU)*NEQ for MF = 14 or 15, -C 22 + 17*NEQ + (2*ML+MU)*NEQ for MF = -14 or -15, -C 20 + 9*NEQ for MF = 20, -C 22 + 9*NEQ + 2*NEQ**2 for MF = 21 or 22, -C 22 + 9*NEQ + NEQ**2 for MF = -21 or -22, -C 22 + 10*NEQ for MF = 23, -C 22 + 11*NEQ + (3*ML+2*MU)*NEQ for MF = 24 or 25. -C 22 + 10*NEQ + (2*ML+MU)*NEQ for MF = -24 or -25. -C The first 20 words of RWORK are reserved for conditional -C and optional input and optional output. -C -C The following word in RWORK is a conditional input.. -C RWORK(1) = TCRIT = critical value of t which the solver -C is not to overshoot. Required if ITASK is -C 4 or 5, and ignored otherwise. (See ITASK.) -C -C LRW = The length of the array RWORK, as declared by the user. -C (This will be checked by the solver.) -C -C IWORK = An integer work array. The length of IWORK must be at least -C 30 if MITER = 0 or 3 (MF = 10, 13, 20, 23), or -C 30 + NEQ otherwise (abs(MF) = 11,12,14,15,21,22,24,25). -C The first 30 words of IWORK are reserved for conditional and -C optional input and optional output. -C -C The following 2 words in IWORK are conditional input.. -C IWORK(1) = ML These are the lower and upper -C IWORK(2) = MU half-bandwidths, respectively, of the -C banded Jacobian, excluding the main diagonal. -C The band is defined by the matrix locations -C (i,j) with i-ML .le. j .le. i+MU. ML and MU -C must satisfy 0 .le. ML,MU .le. NEQ-1. -C These are required if MITER is 4 or 5, and -C ignored otherwise. ML and MU may in fact be -C the band parameters for a matrix to which -C df/dy is only approximately equal. -C -C LIW = the length of the array IWORK, as declared by the user. -C (This will be checked by the solver.) -C -C Note.. The work arrays must not be altered between calls to DVODE -C for the same problem, except possibly for the conditional and -C optional input, and except for the last 3*NEQ words of RWORK. -C The latter space is used for internal scratch space, and so is -C available for use by the user outside DVODE between calls, if -C desired (but not for use by F or JAC). -C -C JAC = The name of the user-supplied routine (MITER = 1 or 4) to -C compute the Jacobian matrix, df/dy, as a function of -C the scalar t and the vector y. It is to have the form -C SUBROUTINE JAC (NEQ, T, Y, ML, MU, PD, NROWPD, -C RPAR, IPAR) -C DOUBLE PRECISION T, Y, PD, RPAR -C DIMENSION Y(NEQ), PD(NROWPD, NEQ) -C where NEQ, T, Y, ML, MU, and NROWPD are input and the array -C PD is to be loaded with partial derivatives (elements of the -C Jacobian matrix) in the output. PD must be given a first -C dimension of NROWPD. T and Y have the same meaning as in -C Subroutine F. (In the DIMENSION statement above, NEQ can -C be replaced by * to make Y and PD assumed size arrays.) -C In the full matrix case (MITER = 1), ML and MU are -C ignored, and the Jacobian is to be loaded into PD in -C columnwise manner, with df(i)/dy(j) loaded into PD(i,j). -C In the band matrix case (MITER = 4), the elements -C within the band are to be loaded into PD in columnwise -C manner, with diagonal lines of df/dy loaded into the rows -C of PD. Thus df(i)/dy(j) is to be loaded into PD(i-j+MU+1,j). -C ML and MU are the half-bandwidth parameters. (See IWORK). -C The locations in PD in the two triangular areas which -C correspond to nonexistent matrix elements can be ignored -C or loaded arbitrarily, as they are overwritten by DVODE. -C JAC need not provide df/dy exactly. A crude -C approximation (possibly with a smaller bandwidth) will do. -C In either case, PD is preset to zero by the solver, -C so that only the nonzero elements need be loaded by JAC. -C Each call to JAC is preceded by a call to F with the same -C arguments NEQ, T, and Y. Thus to gain some efficiency, -C intermediate quantities shared by both calculations may be -C saved in a user COMMON block by F and not recomputed by JAC, -C if desired. Also, JAC may alter the Y array, if desired. -C JAC must be declared external in the calling program. -C Subroutine JAC may access user-defined real and integer -C work arrays, RPAR and IPAR, whose dimensions are set by the -C user in the main program. -C -C MF = The method flag. Used only for input. The legal values of -C MF are 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25, -C -11, -12, -14, -15, -21, -22, -24, -25. -C MF is a signed two-digit integer, MF = JSV*(10*METH + MITER). -C JSV = SIGN(MF) indicates the Jacobian-saving strategy.. -C JSV = 1 means a copy of the Jacobian is saved for reuse -C in the corrector iteration algorithm. -C JSV = -1 means a copy of the Jacobian is not saved -C (valid only for MITER = 1, 2, 4, or 5). -C METH indicates the basic linear multistep method.. -C METH = 1 means the implicit Adams method. -C METH = 2 means the method based on backward -C differentiation formulas (BDF-s). -C MITER indicates the corrector iteration method.. -C MITER = 0 means functional iteration (no Jacobian matrix -C is involved). -C MITER = 1 means chord iteration with a user-supplied -C full (NEQ by NEQ) Jacobian. -C MITER = 2 means chord iteration with an internally -C generated (difference quotient) full Jacobian -C (using NEQ extra calls to F per df/dy value). -C MITER = 3 means chord iteration with an internally -C generated diagonal Jacobian approximation -C (using 1 extra call to F per df/dy evaluation). -C MITER = 4 means chord iteration with a user-supplied -C banded Jacobian. -C MITER = 5 means chord iteration with an internally -C generated banded Jacobian (using ML+MU+1 extra -C calls to F per df/dy evaluation). -C If MITER = 1 or 4, the user must supply a subroutine JAC -C (the name is arbitrary) as described above under JAC. -C For other values of MITER, a dummy argument can be used. -C -C RPAR User-specified array used to communicate real parameters -C to user-supplied subroutines. If RPAR is a vector, then -C it must be dimensioned in the user's main program. If it -C is unused or it is a scalar, then it need not be -C dimensioned. -C -C IPAR User-specified array used to communicate integer parameter -C to user-supplied subroutines. The comments on dimensioning -C RPAR apply to IPAR. -C----------------------------------------------------------------------- -C Optional Input. -C -C The following is a list of the optional input provided for in the -C call sequence. (See also Part ii.) For each such input variable, -C this table lists its name as used in this documentation, its -C location in the call sequence, its meaning, and the default value. -C The use of any of this input requires IOPT = 1, and in that -C case all of this input is examined. A value of zero for any -C of these optional input variables will cause the default value to be -C used. Thus to use a subset of the optional input, simply preload -C locations 5 to 10 in RWORK and IWORK to 0.0 and 0 respectively, and -C then set those of interest to nonzero values. -C -C NAME LOCATION MEANING AND DEFAULT VALUE -C -C H0 RWORK(5) The step size to be attempted on the first step. -C The default value is determined by the solver. -C -C HMAX RWORK(6) The maximum absolute step size allowed. -C The default value is infinite. -C -C HMIN RWORK(7) The minimum absolute step size allowed. -C The default value is 0. (This lower bound is not -C enforced on the final step before reaching TCRIT -C when ITASK = 4 or 5.) -C -C MAXORD IWORK(5) The maximum order to be allowed. The default -C value is 12 if METH = 1, and 5 if METH = 2. -C If MAXORD exceeds the default value, it will -C be reduced to the default value. -C If MAXORD is changed during the problem, it may -C cause the current order to be reduced. -C -C MXSTEP IWORK(6) Maximum number of (internally defined) steps -C allowed during one call to the solver. -C The default value is 500. -C -C MXHNIL IWORK(7) Maximum number of messages printed (per problem) -C warning that T + H = T on a step (H = step size). -C This must be positive to result in a non-default -C value. The default value is 10. -C -C----------------------------------------------------------------------- -C Optional Output. -C -C As optional additional output from DVODE, the variables listed -C below are quantities related to the performance of DVODE -C which are available to the user. These are communicated by way of -C the work arrays, but also have internal mnemonic names as shown. -C Except where stated otherwise, all of this output is defined -C on any successful return from DVODE, and on any return with -C ISTATE = -1, -2, -4, -5, or -6. On an illegal input return -C (ISTATE = -3), they will be unchanged from their existing values -C (if any), except possibly for TOLSF, LENRW, and LENIW. -C On any error return, output relevant to the error will be defined, -C as noted below. -C -C NAME LOCATION MEANING -C -C HU RWORK(11) The step size in t last used (successfully). -C -C HCUR RWORK(12) The step size to be attempted on the next step. -C -C TCUR RWORK(13) The current value of the independent variable -C which the solver has actually reached, i.e. the -C current internal mesh point in t. In the output, -C TCUR will always be at least as far from the -C initial value of t as the current argument T, -C but may be farther (if interpolation was done). -C -C TOLSF RWORK(14) A tolerance scale factor, greater than 1.0, -C computed when a request for too much accuracy was -C detected (ISTATE = -3 if detected at the start of -C the problem, ISTATE = -2 otherwise). If ITOL is -C left unaltered but RTOL and ATOL are uniformly -C scaled up by a factor of TOLSF for the next call, -C then the solver is deemed likely to succeed. -C (The user may also ignore TOLSF and alter the -C tolerance parameters in any other way appropriate.) -C -C NST IWORK(11) The number of steps taken for the problem so far. -C -C NFE IWORK(12) The number of f evaluations for the problem so far. -C -C NJE IWORK(13) The number of Jacobian evaluations so far. -C -C NQU IWORK(14) The method order last used (successfully). -C -C NQCUR IWORK(15) The order to be attempted on the next step. -C -C IMXER IWORK(16) The index of the component of largest magnitude in -C the weighted local error vector ( e(i)/EWT(i) ), -C on an error return with ISTATE = -4 or -5. -C -C LENRW IWORK(17) The length of RWORK actually required. -C This is defined on normal returns and on an illegal -C input return for insufficient storage. -C -C LENIW IWORK(18) The length of IWORK actually required. -C This is defined on normal returns and on an illegal -C input return for insufficient storage. -C -C NLU IWORK(19) The number of matrix LU decompositions so far. -C -C NNI IWORK(20) The number of nonlinear (Newton) iterations so far. -C -C NCFN IWORK(21) The number of convergence failures of the nonlinear -C solver so far. -C -C NETF IWORK(22) The number of error test failures of the integrator -C so far. -C -C The following two arrays are segments of the RWORK array which -C may also be of interest to the user as optional output. -C For each array, the table below gives its internal name, -C its base address in RWORK, and its description. -C -C NAME BASE ADDRESS DESCRIPTION -C -C YH 21 The Nordsieck history array, of size NYH by -C (NQCUR + 1), where NYH is the initial value -C of NEQ. For j = 0,1,...,NQCUR, column j+1 -C of YH contains HCUR**j/factorial(j) times -C the j-th derivative of the interpolating -C polynomial currently representing the -C solution, evaluated at t = TCUR. -C -C ACOR LENRW-NEQ+1 Array of size NEQ used for the accumulated -C corrections on each step, scaled in the output -C to represent the estimated local error in Y -C on the last step. This is the vector e in -C the description of the error control. It is -C defined only on a successful return from DVODE. -C -C----------------------------------------------------------------------- -C Interrupting and Restarting -C -C If the integration of a given problem by DVODE is to be -C interrrupted and then later continued, such as when restarting -C an interrupted run or alternating between two or more ODE problems, -C the user should save, following the return from the last DVODE call -C prior to the interruption, the contents of the call sequence -C variables and internal COMMON blocks, and later restore these -C values before the next DVODE call for that problem. To save -C and restore the COMMON blocks, use subroutine DVSRCO, as -C described below in part ii. -C -C In addition, if non-default values for either LUN or MFLAG are -C desired, an extra call to XSETUN and/or XSETF should be made just -C before continuing the integration. See Part ii below for details. -C -C----------------------------------------------------------------------- -C Part ii. Other Routines Callable. -C -C The following are optional calls which the user may make to -C gain additional capabilities in conjunction with DVODE. -C (The routines XSETUN and XSETF are designed to conform to the -C SLATEC error handling package.) -C -C FORM OF CALL FUNCTION -C CALL XSETUN(LUN) Set the logical unit number, LUN, for -C output of messages from DVODE, if -C the default is not desired. -C The default value of LUN is 6. -C -C CALL XSETF(MFLAG) Set a flag to control the printing of -C messages by DVODE. -C MFLAG = 0 means do not print. (Danger.. -C This risks losing valuable information.) -C MFLAG = 1 means print (the default). -C -C Either of the above calls may be made at -C any time and will take effect immediately. -C -C CALL DVSRCO(RSAV,ISAV,JOB) Saves and restores the contents of -C the internal COMMON blocks used by -C DVODE. (See Part iii below.) -C RSAV must be a real array of length 49 -C or more, and ISAV must be an integer -C array of length 40 or more. -C JOB=1 means save COMMON into RSAV/ISAV. -C JOB=2 means restore COMMON from RSAV/ISAV. -C DVSRCO is useful if one is -C interrupting a run and restarting -C later, or alternating between two or -C more problems solved with DVODE. -C -C CALL DVINDY(,,,,,) Provide derivatives of y, of various -C (See below.) orders, at a specified point T, if -C desired. It may be called only after -C a successful return from DVODE. -C -C The detailed instructions for using DVINDY are as follows. -C The form of the call is.. -C -C CALL DVINDY (T, K, RWORK(21), NYH, DKY, IFLAG) -C -C The input parameters are.. -C -C T = Value of independent variable where answers are desired -C (normally the same as the T last returned by DVODE). -C For valid results, T must lie between TCUR - HU and TCUR. -C (See optional output for TCUR and HU.) -C K = Integer order of the derivative desired. K must satisfy -C 0 .le. K .le. NQCUR, where NQCUR is the current order -C (see optional output). The capability corresponding -C to K = 0, i.e. computing y(T), is already provided -C by DVODE directly. Since NQCUR .ge. 1, the first -C derivative dy/dt is always available with DVINDY. -C RWORK(21) = The base address of the history array YH. -C NYH = Column length of YH, equal to the initial value of NEQ. -C -C The output parameters are.. -C -C DKY = A real array of length NEQ containing the computed value -C of the K-th derivative of y(t). -C IFLAG = Integer flag, returned as 0 if K and T were legal, -C -1 if K was illegal, and -2 if T was illegal. -C On an error return, a message is also written. -C----------------------------------------------------------------------- -C Part iii. COMMON Blocks. -C If DVODE is to be used in an overlay situation, the user -C must declare, in the primary overlay, the variables in.. -C (1) the call sequence to DVODE, -C (2) the two internal COMMON blocks -C /DVOD01/ of length 81 (48 double precision words -C followed by 33 integer words), -C /DVOD02/ of length 9 (1 double precision word -C followed by 8 integer words), -C -C If DVODE is used on a system in which the contents of internal -C COMMON blocks are not preserved between calls, the user should -C declare the above two COMMON blocks in his main program to insure -C that their contents are preserved. -C -C----------------------------------------------------------------------- -C Part iv. Optionally Replaceable Solver Routines. -C -C Below are descriptions of two routines in the DVODE package which -C relate to the measurement of errors. Either routine can be -C replaced by a user-supplied version, if desired. However, since such -C a replacement may have a major impact on performance, it should be -C done only when absolutely necessary, and only with great caution. -C (Note.. The means by which the package version of a routine is -C superseded by the user's version may be system-dependent.) -C -C (a) DEWSET. -C The following subroutine is called just before each internal -C integration step, and sets the array of error weights, EWT, as -C described under ITOL/RTOL/ATOL above.. -C SUBROUTINE DEWSET (NEQ, ITOL, RTOL, ATOL, YCUR, EWT) -C where NEQ, ITOL, RTOL, and ATOL are as in the DVODE call sequence, -C YCUR contains the current dependent variable vector, and -C EWT is the array of weights set by DEWSET. -C -C If the user supplies this subroutine, it must return in EWT(i) -C (i = 1,...,NEQ) a positive quantity suitable for comparison with -C errors in Y(i). The EWT array returned by DEWSET is passed to the -C DVNORM routine (See below.), and also used by DVODE in the computation -C of the optional output IMXER, the diagonal Jacobian approximation, -C and the increments for difference quotient Jacobians. -C -C In the user-supplied version of DEWSET, it may be desirable to use -C the current values of derivatives of y. Derivatives up to order NQ -C are available from the history array YH, described above under -C Optional Output. In DEWSET, YH is identical to the YCUR array, -C extended to NQ + 1 columns with a column length of NYH and scale -C factors of h**j/factorial(j). On the first call for the problem, -C given by NST = 0, NQ is 1 and H is temporarily set to 1.0. -C NYH is the initial value of NEQ. The quantities NQ, H, and NST -C can be obtained by including in DEWSET the statements.. -C DOUBLE PRECISION RVOD, H, HU -C COMMON /DVOD01/ RVOD(48), IVOD(33) -C COMMON /DVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C NQ = IVOD(28) -C H = RVOD(21) -C Thus, for example, the current value of dy/dt can be obtained as -C YCUR(NYH+i)/H (i=1,...,NEQ) (and the division by H is -C unnecessary when NST = 0). -C -C (b) DVNORM. -C The following is a real function routine which computes the weighted -C root-mean-square norm of a vector v.. -C D = DVNORM (N, V, W) -C where.. -C N = the length of the vector, -C V = real array of length N containing the vector, -C W = real array of length N containing weights, -C D = sqrt( (1/N) * sum(V(i)*W(i))**2 ). -C DVNORM is called with N = NEQ and with W(i) = 1.0/EWT(i), where -C EWT is as set by subroutine DEWSET. -C -C If the user supplies this function, it should return a non-negative -C value of DVNORM suitable for use in the error control in DVODE. -C None of the arguments should be altered by DVNORM. -C For example, a user-supplied DVNORM routine might.. -C -substitute a max-norm of (V(i)*W(i)) for the rms-norm, or -C -ignore some components of V in the norm, with the effect of -C suppressing the error control on those components of Y. -C----------------------------------------------------------------------- -C Other Routines in the DVODE Package. -C -C In addition to subroutine DVODE, the DVODE package includes the -C following subroutines and function routines.. -C DVHIN computes an approximate step size for the initial step. -C DVINDY computes an interpolated value of the y vector at t = TOUT. -C DVSTEP is the core integrator, which does one step of the -C integration and the associated error control. -C DVSET sets all method coefficients and test constants. -C DVNLSD solves the underlying nonlinear system -- the corrector. -C DVJAC computes and preprocesses the Jacobian matrix J = df/dy -C and the Newton iteration matrix P = I - (h/l1)*J. -C DVSOL manages solution of linear system in chord iteration. -C DVJUST adjusts the history array on a change of order. -C DEWSET sets the error weight vector EWT before each step. -C DVNORM computes the weighted r.m.s. norm of a vector. -C DVSRCO is a user-callable routine to save and restore -C the contents of the internal COMMON blocks. -C DACOPY is a routine to copy one two-dimensional array to another. -C DGEFA and DGESL are routines from LINPACK for solving full -C systems of linear algebraic equations. -C DGBFA and DGBSL are routines from LINPACK for solving banded -C linear systems. -C DAXPY, DSCAL, and DCOPY are basic linear algebra modules (BLAS). -C D1MACH sets the unit roundoff of the machine. -C XERRWD, XSETUN, XSETF, and IXSAV handle the printing of all -C error messages and warnings. XERRWD is machine-dependent. -C Note.. DVNORM, D1MACH, and IXSAV are function routines. -C All the others are subroutines. -C -C The intrinsic and external routines used by the DVODE package are.. -C ABS, MAX, MIN, REAL, SIGN, SQRT, and WRITE. -C -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block DVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block DVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - EXTERNAL DVNLSD - LOGICAL IHIT - DOUBLE PRECISION ATOLI, BIG, EWTI, FOUR, H0, HMAX, HMX, HUN, ONE, - 1 PT2, RH, RTOLI, SIZE, TCRIT, TNEXT, TOLSF, TP, TWO, ZERO - INTEGER I, IER, IFLAG, IMXER, JCO, KGO, LENIW, LENJ, LENP, LENRW, - 1 LENWM, LF0, MBAND, MFA, ML, MORD, MU, MXHNL0, MXSTP0, NITER, - 2 NSLAST - CHARACTER*80 MSG -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION D1MACH, DVNORM -C - DIMENSION MORD(2) -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to DVODE. -C----------------------------------------------------------------------- - SAVE MORD, MXHNL0, MXSTP0 - SAVE ZERO, ONE, TWO, FOUR, PT2, HUN -C----------------------------------------------------------------------- -C The following internal COMMON blocks contain variables which are -C communicated between subroutines in the DVODE package, or which are -C to be saved between calls to DVODE. -C In each block, real variables precede integers. -C The block /DVOD01/ appears in subroutines DVODE, DVINDY, DVSTEP, -C DVSET, DVNLSD, DVJAC, DVSOL, DVJUST and DVSRCO. -C The block /DVOD02/ appears in subroutines DVODE, DVINDY, DVSTEP, -C DVNLSD, DVJAC, and DVSRCO. -C -C The variables stored in the internal COMMON blocks are as follows.. -C -C ACNRM = Weighted r.m.s. norm of accumulated correction vectors. -C CCMXJ = Threshhold on DRC for updating the Jacobian. (See DRC.) -C CONP = The saved value of TQ(5). -C CRATE = Estimated corrector convergence rate constant. -C DRC = Relative change in H*RL1 since last DVJAC call. -C EL = Real array of integration coefficients. See DVSET. -C ETA = Saved tentative ratio of new to old H. -C ETAMAX = Saved maximum value of ETA to be allowed. -C H = The step size. -C HMIN = The minimum absolute value of the step size H to be used. -C HMXI = Inverse of the maximum absolute value of H to be used. -C HMXI = 0.0 is allowed and corresponds to an infinite HMAX. -C HNEW = The step size to be attempted on the next step. -C HSCAL = Stepsize in scaling of YH array. -C PRL1 = The saved value of RL1. -C RC = Ratio of current H*RL1 to value on last DVJAC call. -C RL1 = The reciprocal of the coefficient EL(1). -C TAU = Real vector of past NQ step sizes, length 13. -C TQ = A real vector of length 5 in which DVSET stores constants -C used for the convergence test, the error test, and the -C selection of H at a new order. -C TN = The independent variable, updated on each step taken. -C UROUND = The machine unit roundoff. The smallest positive real number -C such that 1.0 + UROUND .ne. 1.0 -C ICF = Integer flag for convergence failure in DVNLSD.. -C 0 means no failures. -C 1 means convergence failure with out of date Jacobian -C (recoverable error). -C 2 means convergence failure with current Jacobian or -C singular matrix (unrecoverable error). -C INIT = Saved integer flag indicating whether initialization of the -C problem has been done (INIT = 1) or not. -C IPUP = Saved flag to signal updating of Newton matrix. -C JCUR = Output flag from DVJAC showing Jacobian status.. -C JCUR = 0 means J is not current. -C JCUR = 1 means J is current. -C JSTART = Integer flag used as input to DVSTEP.. -C 0 means perform the first step. -C 1 means take a new step continuing from the last. -C -1 means take the next step with a new value of MAXORD, -C HMIN, HMXI, N, METH, MITER, and/or matrix parameters. -C On return, DVSTEP sets JSTART = 1. -C JSV = Integer flag for Jacobian saving, = sign(MF). -C KFLAG = A completion code from DVSTEP with the following meanings.. -C 0 the step was succesful. -C -1 the requested error could not be achieved. -C -2 corrector convergence could not be achieved. -C -3, -4 fatal error in VNLS (can not occur here). -C KUTH = Input flag to DVSTEP showing whether H was reduced by the -C driver. KUTH = 1 if H was reduced, = 0 otherwise. -C L = Integer variable, NQ + 1, current order plus one. -C LMAX = MAXORD + 1 (used for dimensioning). -C LOCJS = A pointer to the saved Jacobian, whose storage starts at -C WM(LOCJS), if JSV = 1. -C LYH, LEWT, LACOR, LSAVF, LWM, LIWM = Saved integer pointers -C to segments of RWORK and IWORK. -C MAXORD = The maximum order of integration method to be allowed. -C METH/MITER = The method flags. See MF. -C MSBJ = The maximum number of steps between J evaluations, = 50. -C MXHNIL = Saved value of optional input MXHNIL. -C MXSTEP = Saved value of optional input MXSTEP. -C N = The number of first-order ODEs, = NEQ. -C NEWH = Saved integer to flag change of H. -C NEWQ = The method order to be used on the next step. -C NHNIL = Saved counter for occurrences of T + H = T. -C NQ = Integer variable, the current integration method order. -C NQNYH = Saved value of NQ*NYH. -C NQWAIT = A counter controlling the frequency of order changes. -C An order change is about to be considered if NQWAIT = 1. -C NSLJ = The number of steps taken as of the last Jacobian update. -C NSLP = Saved value of NST as of last Newton matrix update. -C NYH = Saved value of the initial value of NEQ. -C HU = The step size in t last used. -C NCFN = Number of nonlinear convergence failures so far. -C NETF = The number of error test failures of the integrator so far. -C NFE = The number of f evaluations for the problem so far. -C NJE = The number of Jacobian evaluations so far. -C NLU = The number of matrix LU decompositions so far. -C NNI = Number of nonlinear iterations so far. -C NQU = The method order last used. -C NST = The number of steps taken for the problem so far. -C----------------------------------------------------------------------- - COMMON /DVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /DVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA MORD(1) /12/, MORD(2) /5/, MXSTP0 /500/, MXHNL0 /10/ - DATA ZERO /0.0D0/, ONE /1.0D0/, TWO /2.0D0/, FOUR /4.0D0/, - 1 PT2 /0.2D0/, HUN /100.0D0/ -C----------------------------------------------------------------------- -C Block A. -C This code block is executed on every call. -C It tests ISTATE and ITASK for legality and branches appropriately. -C If ISTATE .gt. 1 but the flag INIT shows that initialization has -C not yet been done, an error return occurs. -C If ISTATE = 1 and TOUT = T, return immediately. -C----------------------------------------------------------------------- - IF (ISTATE .LT. 1 .OR. ISTATE .GT. 3) GO TO 601 - IF (ITASK .LT. 1 .OR. ITASK .GT. 5) GO TO 602 - IF (ISTATE .EQ. 1) GO TO 10 - IF (INIT .NE. 1) GO TO 603 - IF (ISTATE .EQ. 2) GO TO 200 - GO TO 20 - 10 INIT = 0 - IF (TOUT .EQ. T) RETURN -C----------------------------------------------------------------------- -C Block B. -C The next code block is executed for the initial call (ISTATE = 1), -C or for a continuation call with parameter changes (ISTATE = 3). -C It contains checking of all input and various initializations. -C -C First check legality of the non-optional input NEQ, ITOL, IOPT, -C MF, ML, and MU. -C----------------------------------------------------------------------- - 20 IF (NEQ .LE. 0) GO TO 604 - IF (ISTATE .EQ. 1) GO TO 25 - IF (NEQ .GT. N) GO TO 605 - 25 N = NEQ - IF (ITOL .LT. 1 .OR. ITOL .GT. 4) GO TO 606 - IF (IOPT .LT. 0 .OR. IOPT .GT. 1) GO TO 607 - JSV = SIGN(1,MF) - MFA = ABS(MF) - METH = MFA/10 - MITER = MFA - 10*METH - IF (METH .LT. 1 .OR. METH .GT. 2) GO TO 608 - IF (MITER .LT. 0 .OR. MITER .GT. 5) GO TO 608 - IF (MITER .LE. 3) GO TO 30 - ML = IWORK(1) - MU = IWORK(2) - IF (ML .LT. 0 .OR. ML .GE. N) GO TO 609 - IF (MU .LT. 0 .OR. MU .GE. N) GO TO 610 - 30 CONTINUE -C Next process and check the optional input. --------------------------- - IF (IOPT .EQ. 1) GO TO 40 - MAXORD = MORD(METH) - MXSTEP = MXSTP0 - MXHNIL = MXHNL0 - IF (ISTATE .EQ. 1) H0 = ZERO - HMXI = ZERO - HMIN = ZERO - GO TO 60 - 40 MAXORD = IWORK(5) - IF (MAXORD .LT. 0) GO TO 611 - IF (MAXORD .EQ. 0) MAXORD = 100 - MAXORD = MIN(MAXORD,MORD(METH)) - MXSTEP = IWORK(6) - IF (MXSTEP .LT. 0) GO TO 612 - IF (MXSTEP .EQ. 0) MXSTEP = MXSTP0 - MXHNIL = IWORK(7) - IF (MXHNIL .LT. 0) GO TO 613 - IF (MXHNIL .EQ. 0) MXHNIL = MXHNL0 - IF (ISTATE .NE. 1) GO TO 50 - H0 = RWORK(5) - IF ((TOUT - T)*H0 .LT. ZERO) GO TO 614 - 50 HMAX = RWORK(6) - IF (HMAX .LT. ZERO) GO TO 615 - HMXI = ZERO - IF (HMAX .GT. ZERO) HMXI = ONE/HMAX - HMIN = RWORK(7) - IF (HMIN .LT. ZERO) GO TO 616 -C----------------------------------------------------------------------- -C Set work array pointers and check lengths LRW and LIW. -C Pointers to segments of RWORK and IWORK are named by prefixing L to -C the name of the segment. E.g., the segment YH starts at RWORK(LYH). -C Segments of RWORK (in order) are denoted YH, WM, EWT, SAVF, ACOR. -C Within WM, LOCJS is the location of the saved Jacobian (JSV .gt. 0). -C----------------------------------------------------------------------- - 60 LYH = 21 - IF (ISTATE .EQ. 1) NYH = N - LWM = LYH + (MAXORD + 1)*NYH - JCO = MAX(0,JSV) - IF (MITER .EQ. 0) LENWM = 0 - IF (MITER .EQ. 1 .OR. MITER .EQ. 2) THEN - LENWM = 2 + (1 + JCO)*N*N - LOCJS = N*N + 3 - ENDIF - IF (MITER .EQ. 3) LENWM = 2 + N - IF (MITER .EQ. 4 .OR. MITER .EQ. 5) THEN - MBAND = ML + MU + 1 - LENP = (MBAND + ML)*N - LENJ = MBAND*N - LENWM = 2 + LENP + JCO*LENJ - LOCJS = LENP + 3 - ENDIF - LEWT = LWM + LENWM - LSAVF = LEWT + N - LACOR = LSAVF + N - LENRW = LACOR + N - 1 - IWORK(17) = LENRW - LIWM = 1 - LENIW = 30 + N - IF (MITER .EQ. 0 .OR. MITER .EQ. 3) LENIW = 30 - IWORK(18) = LENIW - IF (LENRW .GT. LRW) GO TO 617 - IF (LENIW .GT. LIW) GO TO 618 -C Check RTOL and ATOL for legality. ------------------------------------ - RTOLI = RTOL(1) - ATOLI = ATOL(1) - DO 70 I = 1,N - IF (ITOL .GE. 3) RTOLI = RTOL(I) - IF (ITOL .EQ. 2 .OR. ITOL .EQ. 4) ATOLI = ATOL(I) - IF (RTOLI .LT. ZERO) GO TO 619 - IF (ATOLI .LT. ZERO) GO TO 620 - 70 CONTINUE - IF (ISTATE .EQ. 1) GO TO 100 -C If ISTATE = 3, set flag to signal parameter changes to DVSTEP. ------- - JSTART = -1 - IF (NQ .LE. MAXORD) GO TO 90 -C MAXORD was reduced below NQ. Copy YH(*,MAXORD+2) into SAVF. --------- - CALL DCOPY (N, RWORK(LWM), 1, RWORK(LSAVF), 1) -C Reload WM(1) = RWORK(LWM), since LWM may have changed. --------------- - 90 IF (MITER .GT. 0) RWORK(LWM) = SQRT(UROUND) -C bug fix 12 Nov 1998 - GO TO 200 -C----------------------------------------------------------------------- -C Block C. -C The next block is for the initial call only (ISTATE = 1). -C It contains all remaining initializations, the initial call to F, -C and the calculation of the initial step size. -C The error weights in EWT are inverted after being loaded. -C----------------------------------------------------------------------- - 100 UROUND = D1MACH(4) - TN = T - IF (ITASK .NE. 4 .AND. ITASK .NE. 5) GO TO 110 - TCRIT = RWORK(1) - IF ((TCRIT - TOUT)*(TOUT - T) .LT. ZERO) GO TO 625 - IF (H0 .NE. ZERO .AND. (T + H0 - TCRIT)*H0 .GT. ZERO) - 1 H0 = TCRIT - T - 110 JSTART = 0 - IF (MITER .GT. 0) RWORK(LWM) = SQRT(UROUND) - CCMXJ = PT2 - MSBJ = 50 - NHNIL = 0 - NST = 0 - NJE = 0 - NNI = 0 - NCFN = 0 - NETF = 0 - NLU = 0 - NSLJ = 0 - NSLAST = 0 - HU = ZERO - NQU = 0 -C Initial call to F. (LF0 points to YH(*,2).) ------------------------- - LF0 = LYH + NYH - CALL F (N, T, Y, RWORK(LF0), RPAR, IPAR) - NFE = 1 -C Load the initial value vector in YH. --------------------------------- - CALL DCOPY (N, Y, 1, RWORK(LYH), 1) -C Load and invert the EWT array. (H is temporarily set to 1.0.) ------- - NQ = 1 - H = ONE - CALL DEWSET (N, ITOL, RTOL, ATOL, RWORK(LYH), RWORK(LEWT)) - DO 120 I = 1,N - IF (RWORK(I+LEWT-1) .LE. ZERO) GO TO 621 - 120 RWORK(I+LEWT-1) = ONE/RWORK(I+LEWT-1) - IF (H0 .NE. ZERO) GO TO 180 -C Call DVHIN to set initial step size H0 to be attempted. -------------- - CALL DVHIN (N, T, RWORK(LYH), RWORK(LF0), F, RPAR, IPAR, TOUT, - 1 UROUND, RWORK(LEWT), ITOL, ATOL, Y, RWORK(LACOR), H0, - 2 NITER, IER) - NFE = NFE + NITER - IF (IER .NE. 0) GO TO 622 -C Adjust H0 if necessary to meet HMAX bound. --------------------------- - 180 RH = ABS(H0)*HMXI - IF (RH .GT. ONE) H0 = H0/RH -C Load H with H0 and scale YH(*,2) by H0. ------------------------------ - H = H0 - CALL DSCAL (N, H0, RWORK(LF0), 1) - GO TO 270 -C----------------------------------------------------------------------- -C Block D. -C The next code block is for continuation calls only (ISTATE = 2 or 3) -C and is to check stop conditions before taking a step. -C----------------------------------------------------------------------- - 200 NSLAST = NST - KUTH = 0 - GO TO (210, 250, 220, 230, 240), ITASK - 210 IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 - CALL DVINDY (TOUT, 0, RWORK(LYH), NYH, Y, IFLAG) - IF (IFLAG .NE. 0) GO TO 627 - T = TOUT - GO TO 420 - 220 TP = TN - HU*(ONE + HUN*UROUND) - IF ((TP - TOUT)*H .GT. ZERO) GO TO 623 - IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 - GO TO 400 - 230 TCRIT = RWORK(1) - IF ((TN - TCRIT)*H .GT. ZERO) GO TO 624 - IF ((TCRIT - TOUT)*H .LT. ZERO) GO TO 625 - IF ((TN - TOUT)*H .LT. ZERO) GO TO 245 - CALL DVINDY (TOUT, 0, RWORK(LYH), NYH, Y, IFLAG) - IF (IFLAG .NE. 0) GO TO 627 - T = TOUT - GO TO 420 - 240 TCRIT = RWORK(1) - IF ((TN - TCRIT)*H .GT. ZERO) GO TO 624 - 245 HMX = ABS(TN) + ABS(H) - IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX - IF (IHIT) GO TO 400 - TNEXT = TN + HNEW*(ONE + FOUR*UROUND) - IF ((TNEXT - TCRIT)*H .LE. ZERO) GO TO 250 - H = (TCRIT - TN)*(ONE - FOUR*UROUND) - KUTH = 1 -C----------------------------------------------------------------------- -C Block E. -C The next block is normally executed for all calls and contains -C the call to the one-step core integrator DVSTEP. -C -C This is a looping point for the integration steps. -C -C First check for too many steps being taken, update EWT (if not at -C start of problem), check for too much accuracy being requested, and -C check for H below the roundoff level in T. -C----------------------------------------------------------------------- - 250 CONTINUE - IF ((NST-NSLAST) .GE. MXSTEP) GO TO 500 - CALL DEWSET (N, ITOL, RTOL, ATOL, RWORK(LYH), RWORK(LEWT)) - DO 260 I = 1,N - IF (RWORK(I+LEWT-1) .LE. ZERO) GO TO 510 - 260 RWORK(I+LEWT-1) = ONE/RWORK(I+LEWT-1) - 270 TOLSF = UROUND*DVNORM (N, RWORK(LYH), RWORK(LEWT)) - IF (TOLSF .LE. ONE) GO TO 280 - TOLSF = TOLSF*TWO - IF (NST .EQ. 0) GO TO 626 - GO TO 520 - 280 IF ((TN + H) .NE. TN) GO TO 290 - NHNIL = NHNIL + 1 - IF (NHNIL .GT. MXHNIL) GO TO 290 - MSG = 'DVODE-- Warning..internal T (=R1) and H (=R2) are' - CALL XERRWD (MSG, 50, 101, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG=' such that in the machine, T + H = T on the next step ' - CALL XERRWD (MSG, 60, 101, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' (H = step size). solver will continue anyway' - CALL XERRWD (MSG, 50, 101, 1, 0, 0, 0, 2, TN, H) - IF (NHNIL .LT. MXHNIL) GO TO 290 - MSG = 'DVODE-- Above warning has been issued I1 times. ' - CALL XERRWD (MSG, 50, 102, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' it will not be issued again for this problem' - CALL XERRWD (MSG, 50, 102, 1, 1, MXHNIL, 0, 0, ZERO, ZERO) - 290 CONTINUE -C----------------------------------------------------------------------- -C CALL DVSTEP (Y, YH, NYH, YH, EWT, SAVF, VSAV, ACOR, -C WM, IWM, F, JAC, F, DVNLSD, RPAR, IPAR) -C----------------------------------------------------------------------- - CALL DVSTEP (Y, RWORK(LYH), NYH, RWORK(LYH), RWORK(LEWT), - 1 RWORK(LSAVF), Y, RWORK(LACOR), RWORK(LWM), IWORK(LIWM), - 2 F, JAC, F, DVNLSD, RPAR, IPAR) - KGO = 1 - KFLAG -C Branch on KFLAG. Note..In this version, KFLAG can not be set to -3. -C KFLAG .eq. 0, -1, -2 - GO TO (300, 530, 540), KGO -C----------------------------------------------------------------------- -C Block F. -C The following block handles the case of a successful return from the -C core integrator (KFLAG = 0). Test for stop conditions. -C----------------------------------------------------------------------- - 300 INIT = 1 - KUTH = 0 - GO TO (310, 400, 330, 340, 350), ITASK -C ITASK = 1. If TOUT has been reached, interpolate. ------------------- - 310 IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 - CALL DVINDY (TOUT, 0, RWORK(LYH), NYH, Y, IFLAG) - T = TOUT - GO TO 420 -C ITASK = 3. Jump to exit if TOUT was reached. ------------------------ - 330 IF ((TN - TOUT)*H .GE. ZERO) GO TO 400 - GO TO 250 -C ITASK = 4. See if TOUT or TCRIT was reached. Adjust H if necessary. - 340 IF ((TN - TOUT)*H .LT. ZERO) GO TO 345 - CALL DVINDY (TOUT, 0, RWORK(LYH), NYH, Y, IFLAG) - T = TOUT - GO TO 420 - 345 HMX = ABS(TN) + ABS(H) - IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX - IF (IHIT) GO TO 400 - TNEXT = TN + HNEW*(ONE + FOUR*UROUND) - IF ((TNEXT - TCRIT)*H .LE. ZERO) GO TO 250 - H = (TCRIT - TN)*(ONE - FOUR*UROUND) - KUTH = 1 - GO TO 250 -C ITASK = 5. See if TCRIT was reached and jump to exit. --------------- - 350 HMX = ABS(TN) + ABS(H) - IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX -C----------------------------------------------------------------------- -C Block G. -C The following block handles all successful returns from DVODE. -C If ITASK .ne. 1, Y is loaded from YH and T is set accordingly. -C ISTATE is set to 2, and the optional output is loaded into the work -C arrays before returning. -C----------------------------------------------------------------------- - 400 CONTINUE - CALL DCOPY (N, RWORK(LYH), 1, Y, 1) - T = TN - IF (ITASK .NE. 4 .AND. ITASK .NE. 5) GO TO 420 - IF (IHIT) T = TCRIT - 420 ISTATE = 2 - RWORK(11) = HU - RWORK(12) = HNEW - RWORK(13) = TN - IWORK(11) = NST - IWORK(12) = NFE - IWORK(13) = NJE - IWORK(14) = NQU - IWORK(15) = NEWQ - IWORK(19) = NLU - IWORK(20) = NNI - IWORK(21) = NCFN - IWORK(22) = NETF - RETURN -C----------------------------------------------------------------------- -C Block H. -C The following block handles all unsuccessful returns other than -C those for illegal input. First the error message routine is called. -C if there was an error test or convergence test failure, IMXER is set. -C Then Y is loaded from YH, and T is set to TN. -C The optional output is loaded into the work arrays before returning. -C----------------------------------------------------------------------- -C The maximum number of steps was taken before reaching TOUT. ---------- - 500 MSG = 'DVODE-- At current T (=R1), MXSTEP (=I1) steps ' - CALL XERRWD (MSG, 50, 201, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' taken on this call before reaching TOUT ' - CALL XERRWD (MSG, 50, 201, 1, 1, MXSTEP, 0, 1, TN, ZERO) - ISTATE = -1 - GO TO 580 -C EWT(i) .le. 0.0 for some i (not at start of problem). ---------------- - 510 EWTI = RWORK(LEWT+I-1) - MSG = 'DVODE-- At T (=R1), EWT(I1) has become R2 .le. 0.' - CALL XERRWD (MSG, 50, 202, 1, 1, I, 0, 2, TN, EWTI) - ISTATE = -6 - GO TO 580 -C Too much accuracy requested for machine precision. ------------------- - 520 MSG = 'DVODE-- At T (=R1), too much accuracy requested ' - CALL XERRWD (MSG, 50, 203, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' for precision of machine.. see TOLSF (=R2) ' - CALL XERRWD (MSG, 50, 203, 1, 0, 0, 0, 2, TN, TOLSF) - RWORK(14) = TOLSF - ISTATE = -2 - GO TO 580 -C KFLAG = -1. Error test failed repeatedly or with ABS(H) = HMIN. ----- - 530 MSG = 'DVODE-- At T(=R1) and step size H(=R2), the error' - CALL XERRWD (MSG, 50, 204, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' test failed repeatedly or with abs(H) = HMIN' - CALL XERRWD (MSG, 50, 204, 1, 0, 0, 0, 2, TN, H) - ISTATE = -4 - GO TO 560 -C KFLAG = -2. Convergence failed repeatedly or with ABS(H) = HMIN. ---- - 540 MSG = 'DVODE-- At T (=R1) and step size H (=R2), the ' - CALL XERRWD (MSG, 50, 205, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' corrector convergence failed repeatedly ' - CALL XERRWD (MSG, 50, 205, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' or with abs(H) = HMIN ' - CALL XERRWD (MSG, 30, 205, 1, 0, 0, 0, 2, TN, H) - ISTATE = -5 -C Compute IMXER if relevant. ------------------------------------------- - 560 BIG = ZERO - IMXER = 1 - DO 570 I = 1,N - SIZE = ABS(RWORK(I+LACOR-1)*RWORK(I+LEWT-1)) - IF (BIG .GE. SIZE) GO TO 570 - BIG = SIZE - IMXER = I - 570 CONTINUE - IWORK(16) = IMXER -C Set Y vector, T, and optional output. -------------------------------- - 580 CONTINUE - CALL DCOPY (N, RWORK(LYH), 1, Y, 1) - T = TN - RWORK(11) = HU - RWORK(12) = H - RWORK(13) = TN - IWORK(11) = NST - IWORK(12) = NFE - IWORK(13) = NJE - IWORK(14) = NQU - IWORK(15) = NQ - IWORK(19) = NLU - IWORK(20) = NNI - IWORK(21) = NCFN - IWORK(22) = NETF - RETURN -C----------------------------------------------------------------------- -C Block I. -C The following block handles all error returns due to illegal input -C (ISTATE = -3), as detected before calling the core integrator. -C First the error message routine is called. If the illegal input -C is a negative ISTATE, the run is aborted (apparent infinite loop). -C----------------------------------------------------------------------- - 601 MSG = 'DVODE-- ISTATE (=I1) illegal ' - CALL XERRWD (MSG, 30, 1, 1, 1, ISTATE, 0, 0, ZERO, ZERO) - IF (ISTATE .LT. 0) GO TO 800 - GO TO 700 - 602 MSG = 'DVODE-- ITASK (=I1) illegal ' - CALL XERRWD (MSG, 30, 2, 1, 1, ITASK, 0, 0, ZERO, ZERO) - GO TO 700 - 603 MSG='DVODE-- ISTATE (=I1) .gt. 1 but DVODE not initialized ' - CALL XERRWD (MSG, 60, 3, 1, 1, ISTATE, 0, 0, ZERO, ZERO) - GO TO 700 - 604 MSG = 'DVODE-- NEQ (=I1) .lt. 1 ' - CALL XERRWD (MSG, 30, 4, 1, 1, NEQ, 0, 0, ZERO, ZERO) - GO TO 700 - 605 MSG = 'DVODE-- ISTATE = 3 and NEQ increased (I1 to I2) ' - CALL XERRWD (MSG, 50, 5, 1, 2, N, NEQ, 0, ZERO, ZERO) - GO TO 700 - 606 MSG = 'DVODE-- ITOL (=I1) illegal ' - CALL XERRWD (MSG, 30, 6, 1, 1, ITOL, 0, 0, ZERO, ZERO) - GO TO 700 - 607 MSG = 'DVODE-- IOPT (=I1) illegal ' - CALL XERRWD (MSG, 30, 7, 1, 1, IOPT, 0, 0, ZERO, ZERO) - GO TO 700 - 608 MSG = 'DVODE-- MF (=I1) illegal ' - CALL XERRWD (MSG, 30, 8, 1, 1, MF, 0, 0, ZERO, ZERO) - GO TO 700 - 609 MSG = 'DVODE-- ML (=I1) illegal.. .lt.0 or .ge.NEQ (=I2)' - CALL XERRWD (MSG, 50, 9, 1, 2, ML, NEQ, 0, ZERO, ZERO) - GO TO 700 - 610 MSG = 'DVODE-- MU (=I1) illegal.. .lt.0 or .ge.NEQ (=I2)' - CALL XERRWD (MSG, 50, 10, 1, 2, MU, NEQ, 0, ZERO, ZERO) - GO TO 700 - 611 MSG = 'DVODE-- MAXORD (=I1) .lt. 0 ' - CALL XERRWD (MSG, 30, 11, 1, 1, MAXORD, 0, 0, ZERO, ZERO) - GO TO 700 - 612 MSG = 'DVODE-- MXSTEP (=I1) .lt. 0 ' - CALL XERRWD (MSG, 30, 12, 1, 1, MXSTEP, 0, 0, ZERO, ZERO) - GO TO 700 - 613 MSG = 'DVODE-- MXHNIL (=I1) .lt. 0 ' - CALL XERRWD (MSG, 30, 13, 1, 1, MXHNIL, 0, 0, ZERO, ZERO) - GO TO 700 - 614 MSG = 'DVODE-- TOUT (=R1) behind T (=R2) ' - CALL XERRWD (MSG, 40, 14, 1, 0, 0, 0, 2, TOUT, T) - MSG = ' integration direction is given by H0 (=R1) ' - CALL XERRWD (MSG, 50, 14, 1, 0, 0, 0, 1, H0, ZERO) - GO TO 700 - 615 MSG = 'DVODE-- HMAX (=R1) .lt. 0.0 ' - CALL XERRWD (MSG, 30, 15, 1, 0, 0, 0, 1, HMAX, ZERO) - GO TO 700 - 616 MSG = 'DVODE-- HMIN (=R1) .lt. 0.0 ' - CALL XERRWD (MSG, 30, 16, 1, 0, 0, 0, 1, HMIN, ZERO) - GO TO 700 - 617 CONTINUE - MSG='DVODE-- RWORK length needed, LENRW (=I1), exceeds LRW (=I2)' - CALL XERRWD (MSG, 60, 17, 1, 2, LENRW, LRW, 0, ZERO, ZERO) - GO TO 700 - 618 CONTINUE - MSG='DVODE-- IWORK length needed, LENIW (=I1), exceeds LIW (=I2)' - CALL XERRWD (MSG, 60, 18, 1, 2, LENIW, LIW, 0, ZERO, ZERO) - GO TO 700 - 619 MSG = 'DVODE-- RTOL(I1) is R1 .lt. 0.0 ' - CALL XERRWD (MSG, 40, 19, 1, 1, I, 0, 1, RTOLI, ZERO) - GO TO 700 - 620 MSG = 'DVODE-- ATOL(I1) is R1 .lt. 0.0 ' - CALL XERRWD (MSG, 40, 20, 1, 1, I, 0, 1, ATOLI, ZERO) - GO TO 700 - 621 EWTI = RWORK(LEWT+I-1) - MSG = 'DVODE-- EWT(I1) is R1 .le. 0.0 ' - CALL XERRWD (MSG, 40, 21, 1, 1, I, 0, 1, EWTI, ZERO) - GO TO 700 - 622 CONTINUE - MSG='DVODE-- TOUT (=R1) too close to T(=R2) to start integration' - CALL XERRWD (MSG, 60, 22, 1, 0, 0, 0, 2, TOUT, T) - GO TO 700 - 623 CONTINUE - MSG='DVODE-- ITASK = I1 and TOUT (=R1) behind TCUR - HU (= R2) ' - CALL XERRWD (MSG, 60, 23, 1, 1, ITASK, 0, 2, TOUT, TP) - GO TO 700 - 624 CONTINUE - MSG='DVODE-- ITASK = 4 or 5 and TCRIT (=R1) behind TCUR (=R2) ' - CALL XERRWD (MSG, 60, 24, 1, 0, 0, 0, 2, TCRIT, TN) - GO TO 700 - 625 CONTINUE - MSG='DVODE-- ITASK = 4 or 5 and TCRIT (=R1) behind TOUT (=R2) ' - CALL XERRWD (MSG, 60, 25, 1, 0, 0, 0, 2, TCRIT, TOUT) - GO TO 700 - 626 MSG = 'DVODE-- At start of problem, too much accuracy ' - CALL XERRWD (MSG, 50, 26, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG=' requested for precision of machine.. see TOLSF (=R1) ' - CALL XERRWD (MSG, 60, 26, 1, 0, 0, 0, 1, TOLSF, ZERO) - RWORK(14) = TOLSF - GO TO 700 - 627 MSG='DVODE-- Trouble from DVINDY. ITASK = I1, TOUT = R1. ' - CALL XERRWD (MSG, 60, 27, 1, 1, ITASK, 0, 1, TOUT, ZERO) -C - 700 CONTINUE - ISTATE = -3 - RETURN -C - 800 MSG = 'DVODE-- Run aborted.. apparent infinite loop ' - CALL XERRWD (MSG, 50, 303, 2, 0, 0, 0, 0, ZERO, ZERO) - RETURN -C----------------------- End of Subroutine DVODE ----------------------- - END -*DECK DVHIN - SUBROUTINE DVHIN (N, T0, Y0, YDOT, F, RPAR, IPAR, TOUT, UROUND, - 1 EWT, ITOL, ATOL, Y, TEMP, H0, NITER, IER) - EXTERNAL F - DOUBLE PRECISION T0, Y0, YDOT, RPAR, TOUT, UROUND, EWT, ATOL, Y, - 1 TEMP, H0 - INTEGER N, IPAR, ITOL, NITER, IER - DIMENSION Y0(*), YDOT(*), EWT(*), ATOL(*), Y(*), - 1 TEMP(*), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C Call sequence input -- N, T0, Y0, YDOT, F, RPAR, IPAR, TOUT, UROUND, -C EWT, ITOL, ATOL, Y, TEMP -C Call sequence output -- H0, NITER, IER -C COMMON block variables accessed -- None -C -C Subroutines called by DVHIN.. F -C Function routines called by DVHIN.. DVNORM -C----------------------------------------------------------------------- -C This routine computes the step size, H0, to be attempted on the -C first step, when the user has not supplied a value for this. -C -C First we check that TOUT - T0 differs significantly from zero. Then -C an iteration is done to approximate the initial second derivative -C and this is used to define h from w.r.m.s.norm(h**2 * yddot / 2) = 1. -C A bias factor of 1/2 is applied to the resulting h. -C The sign of H0 is inferred from the initial values of TOUT and T0. -C -C Communication with DVHIN is done with the following variables.. -C -C N = Size of ODE system, input. -C T0 = Initial value of independent variable, input. -C Y0 = Vector of initial conditions, input. -C YDOT = Vector of initial first derivatives, input. -C F = Name of subroutine for right-hand side f(t,y), input. -C RPAR, IPAR = Dummy names for user's real and integer work arrays. -C TOUT = First output value of independent variable -C UROUND = Machine unit roundoff -C EWT, ITOL, ATOL = Error weights and tolerance parameters -C as described in the driver routine, input. -C Y, TEMP = Work arrays of length N. -C H0 = Step size to be attempted, output. -C NITER = Number of iterations (and of f evaluations) to compute H0, -C output. -C IER = The error flag, returned with the value -C IER = 0 if no trouble occurred, or -C IER = -1 if TOUT and T0 are considered too close to proceed. -C----------------------------------------------------------------------- -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION AFI, ATOLI, DELYI, H, HALF, HG, HLB, HNEW, HRAT, - 1 HUB, HUN, PT1, T1, TDIST, TROUND, TWO, YDDNRM - INTEGER I, ITER -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION DVNORM -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE HALF, HUN, PT1, TWO - DATA HALF /0.5D0/, HUN /100.0D0/, PT1 /0.1D0/, TWO /2.0D0/ -C - NITER = 0 - TDIST = ABS(TOUT - T0) - TROUND = UROUND*MAX(ABS(T0),ABS(TOUT)) - IF (TDIST .LT. TWO*TROUND) GO TO 100 -C -C Set a lower bound on h based on the roundoff level in T0 and TOUT. --- - HLB = HUN*TROUND -C Set an upper bound on h based on TOUT-T0 and the initial Y and YDOT. - - HUB = PT1*TDIST - ATOLI = ATOL(1) - DO 10 I = 1, N - IF (ITOL .EQ. 2 .OR. ITOL .EQ. 4) ATOLI = ATOL(I) - DELYI = PT1*ABS(Y0(I)) + ATOLI - AFI = ABS(YDOT(I)) - IF (AFI*HUB .GT. DELYI) HUB = DELYI/AFI - 10 CONTINUE -C -C Set initial guess for h as geometric mean of upper and lower bounds. - - ITER = 0 - HG = SQRT(HLB*HUB) -C If the bounds have crossed, exit with the mean value. ---------------- - IF (HUB .LT. HLB) THEN - H0 = HG - GO TO 90 - ENDIF -C -C Looping point for iteration. ----------------------------------------- - 50 CONTINUE -C Estimate the second derivative as a difference quotient in f. -------- - H = SIGN (HG, TOUT - T0) - T1 = T0 + H - DO 60 I = 1, N - 60 Y(I) = Y0(I) + H*YDOT(I) - CALL F (N, T1, Y, TEMP, RPAR, IPAR) - DO 70 I = 1, N - 70 TEMP(I) = (TEMP(I) - YDOT(I))/H - YDDNRM = DVNORM (N, TEMP, EWT) -C Get the corresponding new value of h. -------------------------------- - IF (YDDNRM*HUB*HUB .GT. TWO) THEN - HNEW = SQRT(TWO/YDDNRM) - ELSE - HNEW = SQRT(HG*HUB) - ENDIF - ITER = ITER + 1 -C----------------------------------------------------------------------- -C Test the stopping conditions. -C Stop if the new and previous h values differ by a factor of .lt. 2. -C Stop if four iterations have been done. Also, stop with previous h -C if HNEW/HG .gt. 2 after first iteration, as this probably means that -C the second derivative value is bad because of cancellation error. -C----------------------------------------------------------------------- - IF (ITER .GE. 4) GO TO 80 - HRAT = HNEW/HG - IF ( (HRAT .GT. HALF) .AND. (HRAT .LT. TWO) ) GO TO 80 - IF ( (ITER .GE. 2) .AND. (HNEW .GT. TWO*HG) ) THEN - HNEW = HG - GO TO 80 - ENDIF - HG = HNEW - GO TO 50 -C -C Iteration done. Apply bounds, bias factor, and sign. Then exit. ---- - 80 H0 = HNEW*HALF - IF (H0 .LT. HLB) H0 = HLB - IF (H0 .GT. HUB) H0 = HUB - 90 H0 = SIGN(H0, TOUT - T0) - NITER = ITER - IER = 0 - RETURN -C Error return for TOUT - T0 too small. -------------------------------- - 100 IER = -1 - RETURN -C----------------------- End of Subroutine DVHIN ----------------------- - END -*DECK DVINDY - SUBROUTINE DVINDY (T, K, YH, LDYH, DKY, IFLAG) - DOUBLE PRECISION T, YH, DKY - INTEGER K, LDYH, IFLAG - DIMENSION YH(LDYH,*), DKY(*) -C----------------------------------------------------------------------- -C Call sequence input -- T, K, YH, LDYH -C Call sequence output -- DKY, IFLAG -C COMMON block variables accessed.. -C /DVOD01/ -- H, TN, UROUND, L, N, NQ -C /DVOD02/ -- HU -C -C Subroutines called by DVINDY.. DSCAL, XERRWD -C Function routines called by DVINDY.. None -C----------------------------------------------------------------------- -C DVINDY computes interpolated values of the K-th derivative of the -C dependent variable vector y, and stores it in DKY. This routine -C is called within the package with K = 0 and T = TOUT, but may -C also be called by the user for any K up to the current order. -C (See detailed instructions in the usage documentation.) -C----------------------------------------------------------------------- -C The computed values in DKY are gotten by interpolation using the -C Nordsieck history array YH. This array corresponds uniquely to a -C vector-valued polynomial of degree NQCUR or less, and DKY is set -C to the K-th derivative of this polynomial at T. -C The formula for DKY is.. -C q -C DKY(i) = sum c(j,K) * (T - TN)**(j-K) * H**(-j) * YH(i,j+1) -C j=K -C where c(j,K) = j*(j-1)*...*(j-K+1), q = NQCUR, TN = TCUR, H = HCUR. -C The quantities NQ = NQCUR, L = NQ+1, N, TN, and H are -C communicated by COMMON. The above sum is done in reverse order. -C IFLAG is returned negative if either K or T is out of bounds. -C -C Discussion above and comments in driver explain all variables. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block DVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block DVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION C, HUN, R, S, TFUZZ, TN1, TP, ZERO - INTEGER I, IC, J, JB, JB2, JJ, JJ1, JP1 - CHARACTER*80 MSG -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE HUN, ZERO -C - COMMON /DVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /DVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA HUN /100.0D0/, ZERO /0.0D0/ -C - IFLAG = 0 - IF (K .LT. 0 .OR. K .GT. NQ) GO TO 80 - TFUZZ = HUN*UROUND*(TN + HU) - TP = TN - HU - TFUZZ - TN1 = TN + TFUZZ - IF ((T-TP)*(T-TN1) .GT. ZERO) GO TO 90 -C - S = (T - TN)/H - IC = 1 - IF (K .EQ. 0) GO TO 15 - JJ1 = L - K - DO 10 JJ = JJ1, NQ - 10 IC = IC*JJ - 15 C = REAL(IC) - DO 20 I = 1, N - 20 DKY(I) = C*YH(I,L) - IF (K .EQ. NQ) GO TO 55 - JB2 = NQ - K - DO 50 JB = 1, JB2 - J = NQ - JB - JP1 = J + 1 - IC = 1 - IF (K .EQ. 0) GO TO 35 - JJ1 = JP1 - K - DO 30 JJ = JJ1, J - 30 IC = IC*JJ - 35 C = REAL(IC) - DO 40 I = 1, N - 40 DKY(I) = C*YH(I,JP1) + S*DKY(I) - 50 CONTINUE - IF (K .EQ. 0) RETURN - 55 R = H**(-K) - CALL DSCAL (N, R, DKY, 1) - RETURN -C - 80 MSG = 'DVINDY-- K (=I1) illegal ' - CALL XERRWD (MSG, 30, 51, 1, 1, K, 0, 0, ZERO, ZERO) - IFLAG = -1 - RETURN - 90 MSG = 'DVINDY-- T (=R1) illegal ' - CALL XERRWD (MSG, 30, 52, 1, 0, 0, 0, 1, T, ZERO) - MSG=' T not in interval TCUR - HU (= R1) to TCUR (=R2) ' - CALL XERRWD (MSG, 60, 52, 1, 0, 0, 0, 2, TP, TN) - IFLAG = -2 - RETURN -C----------------------- End of Subroutine DVINDY ---------------------- - END -*DECK DVSTEP - SUBROUTINE DVSTEP (Y, YH, LDYH, YH1, EWT, SAVF, VSAV, ACOR, - 1 WM, IWM, F, JAC, PSOL, VNLS, RPAR, IPAR) - EXTERNAL F, JAC, PSOL, VNLS - DOUBLE PRECISION Y, YH, YH1, EWT, SAVF, VSAV, ACOR, WM, RPAR - INTEGER LDYH, IWM, IPAR - DIMENSION Y(*), YH(LDYH,*), YH1(*), EWT(*), SAVF(*), VSAV(*), - 1 ACOR(*), WM(*), IWM(*), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C Call sequence input -- Y, YH, LDYH, YH1, EWT, SAVF, VSAV, -C ACOR, WM, IWM, F, JAC, PSOL, VNLS, RPAR, IPAR -C Call sequence output -- YH, ACOR, WM, IWM -C COMMON block variables accessed.. -C /DVOD01/ ACNRM, EL(13), H, HMIN, HMXI, HNEW, HSCAL, RC, TAU(13), -C TQ(5), TN, JCUR, JSTART, KFLAG, KUTH, -C L, LMAX, MAXORD, N, NEWQ, NQ, NQWAIT -C /DVOD02/ HU, NCFN, NETF, NFE, NQU, NST -C -C Subroutines called by DVSTEP.. F, DAXPY, DCOPY, DSCAL, -C DVJUST, VNLS, DVSET -C Function routines called by DVSTEP.. DVNORM -C----------------------------------------------------------------------- -C DVSTEP performs one step of the integration of an initial value -C problem for a system of ordinary differential equations. -C DVSTEP calls subroutine VNLS for the solution of the nonlinear system -C arising in the time step. Thus it is independent of the problem -C Jacobian structure and the type of nonlinear system solution method. -C DVSTEP returns a completion flag KFLAG (in COMMON). -C A return with KFLAG = -1 or -2 means either ABS(H) = HMIN or 10 -C consecutive failures occurred. On a return with KFLAG negative, -C the values of TN and the YH array are as of the beginning of the last -C step, and H is the last step size attempted. -C -C Communication with DVSTEP is done with the following variables.. -C -C Y = An array of length N used for the dependent variable vector. -C YH = An LDYH by LMAX array containing the dependent variables -C and their approximate scaled derivatives, where -C LMAX = MAXORD + 1. YH(i,j+1) contains the approximate -C j-th derivative of y(i), scaled by H**j/factorial(j) -C (j = 0,1,...,NQ). On entry for the first step, the first -C two columns of YH must be set from the initial values. -C LDYH = A constant integer .ge. N, the first dimension of YH. -C N is the number of ODEs in the system. -C YH1 = A one-dimensional array occupying the same space as YH. -C EWT = An array of length N containing multiplicative weights -C for local error measurements. Local errors in y(i) are -C compared to 1.0/EWT(i) in various error tests. -C SAVF = An array of working storage, of length N. -C also used for input of YH(*,MAXORD+2) when JSTART = -1 -C and MAXORD .lt. the current order NQ. -C VSAV = A work array of length N passed to subroutine VNLS. -C ACOR = A work array of length N, used for the accumulated -C corrections. On a successful return, ACOR(i) contains -C the estimated one-step local error in y(i). -C WM,IWM = Real and integer work arrays associated with matrix -C operations in VNLS. -C F = Dummy name for the user supplied subroutine for f. -C JAC = Dummy name for the user supplied Jacobian subroutine. -C PSOL = Dummy name for the subroutine passed to VNLS, for -C possible use there. -C VNLS = Dummy name for the nonlinear system solving subroutine, -C whose real name is dependent on the method used. -C RPAR, IPAR = Dummy names for user's real and integer work arrays. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block DVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block DVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION ADDON, BIAS1,BIAS2,BIAS3, CNQUOT, DDN, DSM, DUP, - 1 ETACF, ETAMIN, ETAMX1, ETAMX2, ETAMX3, ETAMXF, - 2 ETAQ, ETAQM1, ETAQP1, FLOTL, ONE, ONEPSM, - 3 R, THRESH, TOLD, ZERO - INTEGER I, I1, I2, IBACK, J, JB, KFC, KFH, MXNCF, NCF, NFLAG -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION DVNORM -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE ADDON, BIAS1, BIAS2, BIAS3, - 1 ETACF, ETAMIN, ETAMX1, ETAMX2, ETAMX3, ETAMXF, ETAQ, ETAQM1, - 2 KFC, KFH, MXNCF, ONEPSM, THRESH, ONE, ZERO -C----------------------------------------------------------------------- - COMMON /DVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /DVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA KFC/-3/, KFH/-7/, MXNCF/10/ - DATA ADDON /1.0D-6/, BIAS1 /6.0D0/, BIAS2 /6.0D0/, - 1 BIAS3 /10.0D0/, ETACF /0.25D0/, ETAMIN /0.1D0/, - 2 ETAMXF /0.2D0/, ETAMX1 /1.0D4/, ETAMX2 /10.0D0/, - 3 ETAMX3 /10.0D0/, ONEPSM /1.00001D0/, THRESH /1.5D0/ - DATA ONE/1.0D0/, ZERO/0.0D0/ -C - KFLAG = 0 - TOLD = TN - NCF = 0 - JCUR = 0 - NFLAG = 0 - IF (JSTART .GT. 0) GO TO 20 - IF (JSTART .EQ. -1) GO TO 100 -C----------------------------------------------------------------------- -C On the first call, the order is set to 1, and other variables are -C initialized. ETAMAX is the maximum ratio by which H can be increased -C in a single step. It is normally 10, but is larger during the -C first step to compensate for the small initial H. If a failure -C occurs (in corrector convergence or error test), ETAMAX is set to 1 -C for the next increase. -C----------------------------------------------------------------------- - LMAX = MAXORD + 1 - NQ = 1 - L = 2 - NQNYH = NQ*LDYH - TAU(1) = H - PRL1 = ONE - RC = ZERO - ETAMAX = ETAMX1 - NQWAIT = 2 - HSCAL = H - GO TO 200 -C----------------------------------------------------------------------- -C Take preliminary actions on a normal continuation step (JSTART.GT.0). -C If the driver changed H, then ETA must be reset and NEWH set to 1. -C If a change of order was dictated on the previous step, then -C it is done here and appropriate adjustments in the history are made. -C On an order decrease, the history array is adjusted by DVJUST. -C On an order increase, the history array is augmented by a column. -C On a change of step size H, the history array YH is rescaled. -C----------------------------------------------------------------------- - 20 CONTINUE - IF (KUTH .EQ. 1) THEN - ETA = MIN(ETA,H/HSCAL) - NEWH = 1 - ENDIF - 50 IF (NEWH .EQ. 0) GO TO 200 - IF (NEWQ .EQ. NQ) GO TO 150 - IF (NEWQ .LT. NQ) THEN - CALL DVJUST (YH, LDYH, -1) - NQ = NEWQ - L = NQ + 1 - NQWAIT = L - GO TO 150 - ENDIF - IF (NEWQ .GT. NQ) THEN - CALL DVJUST (YH, LDYH, 1) - NQ = NEWQ - L = NQ + 1 - NQWAIT = L - GO TO 150 - ENDIF -C----------------------------------------------------------------------- -C The following block handles preliminaries needed when JSTART = -1. -C If N was reduced, zero out part of YH to avoid undefined references. -C If MAXORD was reduced to a value less than the tentative order NEWQ, -C then NQ is set to MAXORD, and a new H ratio ETA is chosen. -C Otherwise, we take the same preliminary actions as for JSTART .gt. 0. -C In any case, NQWAIT is reset to L = NQ + 1 to prevent further -C changes in order for that many steps. -C The new H ratio ETA is limited by the input H if KUTH = 1, -C by HMIN if KUTH = 0, and by HMXI in any case. -C Finally, the history array YH is rescaled. -C----------------------------------------------------------------------- - 100 CONTINUE - LMAX = MAXORD + 1 - IF (N .EQ. LDYH) GO TO 120 - I1 = 1 + (NEWQ + 1)*LDYH - I2 = (MAXORD + 1)*LDYH - IF (I1 .GT. I2) GO TO 120 - DO 110 I = I1, I2 - 110 YH1(I) = ZERO - 120 IF (NEWQ .LE. MAXORD) GO TO 140 - FLOTL = REAL(LMAX) - IF (MAXORD .LT. NQ-1) THEN - DDN = DVNORM (N, SAVF, EWT)/TQ(1) - ETA = ONE/((BIAS1*DDN)**(ONE/FLOTL) + ADDON) - ENDIF - IF (MAXORD .EQ. NQ .AND. NEWQ .EQ. NQ+1) ETA = ETAQ - IF (MAXORD .EQ. NQ-1 .AND. NEWQ .EQ. NQ+1) THEN - ETA = ETAQM1 - CALL DVJUST (YH, LDYH, -1) - ENDIF - IF (MAXORD .EQ. NQ-1 .AND. NEWQ .EQ. NQ) THEN - DDN = DVNORM (N, SAVF, EWT)/TQ(1) - ETA = ONE/((BIAS1*DDN)**(ONE/FLOTL) + ADDON) - CALL DVJUST (YH, LDYH, -1) - ENDIF - ETA = MIN(ETA,ONE) - NQ = MAXORD - L = LMAX - 140 IF (KUTH .EQ. 1) ETA = MIN(ETA,ABS(H/HSCAL)) - IF (KUTH .EQ. 0) ETA = MAX(ETA,HMIN/ABS(HSCAL)) - ETA = ETA/MAX(ONE,ABS(HSCAL)*HMXI*ETA) - NEWH = 1 - NQWAIT = L - IF (NEWQ .LE. MAXORD) GO TO 50 -C Rescale the history array for a change in H by a factor of ETA. ------ - 150 R = ONE - DO 180 J = 2, L - R = R*ETA - CALL DSCAL (N, R, YH(1,J), 1 ) - 180 CONTINUE - H = HSCAL*ETA - HSCAL = H - RC = RC*ETA - NQNYH = NQ*LDYH -C----------------------------------------------------------------------- -C This section computes the predicted values by effectively -C multiplying the YH array by the Pascal triangle matrix. -C DVSET is called to calculate all integration coefficients. -C RC is the ratio of new to old values of the coefficient H/EL(2)=h/l1. -C----------------------------------------------------------------------- - 200 TN = TN + H - I1 = NQNYH + 1 - DO 220 JB = 1, NQ - I1 = I1 - LDYH - DO 210 I = I1, NQNYH - 210 YH1(I) = YH1(I) + YH1(I+LDYH) - 220 CONTINUE - CALL DVSET - RL1 = ONE/EL(2) - RC = RC*(RL1/PRL1) - PRL1 = RL1 -C -C Call the nonlinear system solver. ------------------------------------ -C - CALL VNLS (Y, YH, LDYH, VSAV, SAVF, EWT, ACOR, IWM, WM, - 1 F, JAC, PSOL, NFLAG, RPAR, IPAR) -C - IF (NFLAG .EQ. 0) GO TO 450 -C----------------------------------------------------------------------- -C The VNLS routine failed to achieve convergence (NFLAG .NE. 0). -C The YH array is retracted to its values before prediction. -C The step size H is reduced and the step is retried, if possible. -C Otherwise, an error exit is taken. -C----------------------------------------------------------------------- - NCF = NCF + 1 - NCFN = NCFN + 1 - ETAMAX = ONE - TN = TOLD - I1 = NQNYH + 1 - DO 430 JB = 1, NQ - I1 = I1 - LDYH - DO 420 I = I1, NQNYH - 420 YH1(I) = YH1(I) - YH1(I+LDYH) - 430 CONTINUE - IF (NFLAG .LT. -1) GO TO 680 - IF (ABS(H) .LE. HMIN*ONEPSM) GO TO 670 - IF (NCF .EQ. MXNCF) GO TO 670 - ETA = ETACF - ETA = MAX(ETA,HMIN/ABS(H)) - NFLAG = -1 - GO TO 150 -C----------------------------------------------------------------------- -C The corrector has converged (NFLAG = 0). The local error test is -C made and control passes to statement 500 if it fails. -C----------------------------------------------------------------------- - 450 CONTINUE - DSM = ACNRM/TQ(2) - IF (DSM .GT. ONE) GO TO 500 -C----------------------------------------------------------------------- -C After a successful step, update the YH and TAU arrays and decrement -C NQWAIT. If NQWAIT is then 1 and NQ .lt. MAXORD, then ACOR is saved -C for use in a possible order increase on the next step. -C If ETAMAX = 1 (a failure occurred this step), keep NQWAIT .ge. 2. -C----------------------------------------------------------------------- - KFLAG = 0 - NST = NST + 1 - HU = H - NQU = NQ - DO 470 IBACK = 1, NQ - I = L - IBACK - 470 TAU(I+1) = TAU(I) - TAU(1) = H - DO 480 J = 1, L - CALL DAXPY (N, EL(J), ACOR, 1, YH(1,J), 1 ) - 480 CONTINUE - NQWAIT = NQWAIT - 1 - IF ((L .EQ. LMAX) .OR. (NQWAIT .NE. 1)) GO TO 490 - CALL DCOPY (N, ACOR, 1, YH(1,LMAX), 1 ) - CONP = TQ(5) - 490 IF (ETAMAX .NE. ONE) GO TO 560 - IF (NQWAIT .LT. 2) NQWAIT = 2 - NEWQ = NQ - NEWH = 0 - ETA = ONE - HNEW = H - GO TO 690 -C----------------------------------------------------------------------- -C The error test failed. KFLAG keeps track of multiple failures. -C Restore TN and the YH array to their previous values, and prepare -C to try the step again. Compute the optimum step size for the -C same order. After repeated failures, H is forced to decrease -C more rapidly. -C----------------------------------------------------------------------- - 500 KFLAG = KFLAG - 1 - NETF = NETF + 1 - NFLAG = -2 - TN = TOLD - I1 = NQNYH + 1 - DO 520 JB = 1, NQ - I1 = I1 - LDYH - DO 510 I = I1, NQNYH - 510 YH1(I) = YH1(I) - YH1(I+LDYH) - 520 CONTINUE - IF (ABS(H) .LE. HMIN*ONEPSM) GO TO 660 - ETAMAX = ONE - IF (KFLAG .LE. KFC) GO TO 530 -C Compute ratio of new H to current H at the current order. ------------ - FLOTL = REAL(L) - ETA = ONE/((BIAS2*DSM)**(ONE/FLOTL) + ADDON) - ETA = MAX(ETA,HMIN/ABS(H),ETAMIN) - IF ((KFLAG .LE. -2) .AND. (ETA .GT. ETAMXF)) ETA = ETAMXF - GO TO 150 -C----------------------------------------------------------------------- -C Control reaches this section if 3 or more consecutive failures -C have occurred. It is assumed that the elements of the YH array -C have accumulated errors of the wrong order. The order is reduced -C by one, if possible. Then H is reduced by a factor of 0.1 and -C the step is retried. After a total of 7 consecutive failures, -C an exit is taken with KFLAG = -1. -C----------------------------------------------------------------------- - 530 IF (KFLAG .EQ. KFH) GO TO 660 - IF (NQ .EQ. 1) GO TO 540 - ETA = MAX(ETAMIN,HMIN/ABS(H)) - CALL DVJUST (YH, LDYH, -1) - L = NQ - NQ = NQ - 1 - NQWAIT = L - GO TO 150 - 540 ETA = MAX(ETAMIN,HMIN/ABS(H)) - H = H*ETA - HSCAL = H - TAU(1) = H - CALL F (N, TN, Y, SAVF, RPAR, IPAR) - NFE = NFE + 1 - DO 550 I = 1, N - 550 YH(I,2) = H*SAVF(I) - NQWAIT = 10 - GO TO 200 -C----------------------------------------------------------------------- -C If NQWAIT = 0, an increase or decrease in order by one is considered. -C Factors ETAQ, ETAQM1, ETAQP1 are computed by which H could -C be multiplied at order q, q-1, or q+1, respectively. -C The largest of these is determined, and the new order and -C step size set accordingly. -C A change of H or NQ is made only if H increases by at least a -C factor of THRESH. If an order change is considered and rejected, -C then NQWAIT is set to 2 (reconsider it after 2 steps). -C----------------------------------------------------------------------- -C Compute ratio of new H to current H at the current order. ------------ - 560 FLOTL = REAL(L) - ETAQ = ONE/((BIAS2*DSM)**(ONE/FLOTL) + ADDON) - IF (NQWAIT .NE. 0) GO TO 600 - NQWAIT = 2 - ETAQM1 = ZERO - IF (NQ .EQ. 1) GO TO 570 -C Compute ratio of new H to current H at the current order less one. --- - DDN = DVNORM (N, YH(1,L), EWT)/TQ(1) - ETAQM1 = ONE/((BIAS1*DDN)**(ONE/(FLOTL - ONE)) + ADDON) - 570 ETAQP1 = ZERO - IF (L .EQ. LMAX) GO TO 580 -C Compute ratio of new H to current H at current order plus one. ------- - CNQUOT = (TQ(5)/CONP)*(H/TAU(2))**L - DO 575 I = 1, N - 575 SAVF(I) = ACOR(I) - CNQUOT*YH(I,LMAX) - DUP = DVNORM (N, SAVF, EWT)/TQ(3) - ETAQP1 = ONE/((BIAS3*DUP)**(ONE/(FLOTL + ONE)) + ADDON) - 580 IF (ETAQ .GE. ETAQP1) GO TO 590 - IF (ETAQP1 .GT. ETAQM1) GO TO 620 - GO TO 610 - 590 IF (ETAQ .LT. ETAQM1) GO TO 610 - 600 ETA = ETAQ - NEWQ = NQ - GO TO 630 - 610 ETA = ETAQM1 - NEWQ = NQ - 1 - GO TO 630 - 620 ETA = ETAQP1 - NEWQ = NQ + 1 - CALL DCOPY (N, ACOR, 1, YH(1,LMAX), 1) -C Test tentative new H against THRESH, ETAMAX, and HMXI, then exit. ---- - 630 IF (ETA .LT. THRESH .OR. ETAMAX .EQ. ONE) GO TO 640 - ETA = MIN(ETA,ETAMAX) - ETA = ETA/MAX(ONE,ABS(H)*HMXI*ETA) - NEWH = 1 - HNEW = H*ETA - GO TO 690 - 640 NEWQ = NQ - NEWH = 0 - ETA = ONE - HNEW = H - GO TO 690 -C----------------------------------------------------------------------- -C All returns are made through this section. -C On a successful return, ETAMAX is reset and ACOR is scaled. -C----------------------------------------------------------------------- - 660 KFLAG = -1 - GO TO 720 - 670 KFLAG = -2 - GO TO 720 - 680 IF (NFLAG .EQ. -2) KFLAG = -3 - IF (NFLAG .EQ. -3) KFLAG = -4 - GO TO 720 - 690 ETAMAX = ETAMX3 - IF (NST .LE. 10) ETAMAX = ETAMX2 - 700 R = ONE/TQ(2) - CALL DSCAL (N, R, ACOR, 1) - 720 JSTART = 1 - RETURN -C----------------------- End of Subroutine DVSTEP ---------------------- - END -*DECK DVSET - SUBROUTINE DVSET -C----------------------------------------------------------------------- -C Call sequence communication.. None -C COMMON block variables accessed.. -C /DVOD01/ -- EL(13), H, TAU(13), TQ(5), L(= NQ + 1), -C METH, NQ, NQWAIT -C -C Subroutines called by DVSET.. None -C Function routines called by DVSET.. None -C----------------------------------------------------------------------- -C DVSET is called by DVSTEP and sets coefficients for use there. -C -C For each order NQ, the coefficients in EL are calculated by use of -C the generating polynomial lambda(x), with coefficients EL(i). -C lambda(x) = EL(1) + EL(2)*x + ... + EL(NQ+1)*(x**NQ). -C For the backward differentiation formulas, -C NQ-1 -C lambda(x) = (1 + x/xi*(NQ)) * product (1 + x/xi(i) ) . -C i = 1 -C For the Adams formulas, -C NQ-1 -C (d/dx) lambda(x) = c * product (1 + x/xi(i) ) , -C i = 1 -C lambda(-1) = 0, lambda(0) = 1, -C where c is a normalization constant. -C In both cases, xi(i) is defined by -C H*xi(i) = t sub n - t sub (n-i) -C = H + TAU(1) + TAU(2) + ... TAU(i-1). -C -C -C In addition to variables described previously, communication -C with DVSET uses the following.. -C TAU = A vector of length 13 containing the past NQ values -C of H. -C EL = A vector of length 13 in which vset stores the -C coefficients for the corrector formula. -C TQ = A vector of length 5 in which vset stores constants -C used for the convergence test, the error test, and the -C selection of H at a new order. -C METH = The basic method indicator. -C NQ = The current order. -C L = NQ + 1, the length of the vector stored in EL, and -C the number of columns of the YH array being used. -C NQWAIT = A counter controlling the frequency of order changes. -C An order change is about to be considered if NQWAIT = 1. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block DVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION AHATN0, ALPH0, CNQM1, CORTES, CSUM, ELP, EM, - 1 EM0, FLOTI, FLOTL, FLOTNQ, HSUM, ONE, RXI, RXIS, S, SIX, - 2 T1, T2, T3, T4, T5, T6, TWO, XI, ZERO - INTEGER I, IBACK, J, JP1, NQM1, NQM2 -C - DIMENSION EM(13) -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE CORTES, ONE, SIX, TWO, ZERO -C - COMMON /DVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH -C - DATA CORTES /0.1D0/ - DATA ONE /1.0D0/, SIX /6.0D0/, TWO /2.0D0/, ZERO /0.0D0/ -C - FLOTL = REAL(L) - NQM1 = NQ - 1 - NQM2 = NQ - 2 - GO TO (100, 200), METH -C -C Set coefficients for Adams methods. ---------------------------------- - 100 IF (NQ .NE. 1) GO TO 110 - EL(1) = ONE - EL(2) = ONE - TQ(1) = ONE - TQ(2) = TWO - TQ(3) = SIX*TQ(2) - TQ(5) = ONE - GO TO 300 - 110 HSUM = H - EM(1) = ONE - FLOTNQ = FLOTL - ONE - DO 115 I = 2, L - 115 EM(I) = ZERO - DO 150 J = 1, NQM1 - IF ((J .NE. NQM1) .OR. (NQWAIT .NE. 1)) GO TO 130 - S = ONE - CSUM = ZERO - DO 120 I = 1, NQM1 - CSUM = CSUM + S*EM(I)/REAL(I+1) - 120 S = -S - TQ(1) = EM(NQM1)/(FLOTNQ*CSUM) - 130 RXI = H/HSUM - DO 140 IBACK = 1, J - I = (J + 2) - IBACK - 140 EM(I) = EM(I) + EM(I-1)*RXI - HSUM = HSUM + TAU(J) - 150 CONTINUE -C Compute integral from -1 to 0 of polynomial and of x times it. ------- - S = ONE - EM0 = ZERO - CSUM = ZERO - DO 160 I = 1, NQ - FLOTI = REAL(I) - EM0 = EM0 + S*EM(I)/FLOTI - CSUM = CSUM + S*EM(I)/(FLOTI+ONE) - 160 S = -S -C In EL, form coefficients of normalized integrated polynomial. -------- - S = ONE/EM0 - EL(1) = ONE - DO 170 I = 1, NQ - 170 EL(I+1) = S*EM(I)/REAL(I) - XI = HSUM/H - TQ(2) = XI*EM0/CSUM - TQ(5) = XI/EL(L) - IF (NQWAIT .NE. 1) GO TO 300 -C For higher order control constant, multiply polynomial by 1+x/xi(q). - - RXI = ONE/XI - DO 180 IBACK = 1, NQ - I = (L + 1) - IBACK - 180 EM(I) = EM(I) + EM(I-1)*RXI -C Compute integral of polynomial. -------------------------------------- - S = ONE - CSUM = ZERO - DO 190 I = 1, L - CSUM = CSUM + S*EM(I)/REAL(I+1) - 190 S = -S - TQ(3) = FLOTL*EM0/CSUM - GO TO 300 -C -C Set coefficients for BDF methods. ------------------------------------ - 200 DO 210 I = 3, L - 210 EL(I) = ZERO - EL(1) = ONE - EL(2) = ONE - ALPH0 = -ONE - AHATN0 = -ONE - HSUM = H - RXI = ONE - RXIS = ONE - IF (NQ .EQ. 1) GO TO 240 - DO 230 J = 1, NQM2 -C In EL, construct coefficients of (1+x/xi(1))*...*(1+x/xi(j+1)). ------ - HSUM = HSUM + TAU(J) - RXI = H/HSUM - JP1 = J + 1 - ALPH0 = ALPH0 - ONE/REAL(JP1) - DO 220 IBACK = 1, JP1 - I = (J + 3) - IBACK - 220 EL(I) = EL(I) + EL(I-1)*RXI - 230 CONTINUE - ALPH0 = ALPH0 - ONE/REAL(NQ) - RXIS = -EL(2) - ALPH0 - HSUM = HSUM + TAU(NQM1) - RXI = H/HSUM - AHATN0 = -EL(2) - RXI - DO 235 IBACK = 1, NQ - I = (NQ + 2) - IBACK - 235 EL(I) = EL(I) + EL(I-1)*RXIS - 240 T1 = ONE - AHATN0 + ALPH0 - T2 = ONE + REAL(NQ)*T1 - TQ(2) = ABS(ALPH0*T2/T1) - TQ(5) = ABS(T2/(EL(L)*RXI/RXIS)) - IF (NQWAIT .NE. 1) GO TO 300 - CNQM1 = RXIS/EL(L) - T3 = ALPH0 + ONE/REAL(NQ) - T4 = AHATN0 + RXI - ELP = T3/(ONE - T4 + T3) - TQ(1) = ABS(ELP/CNQM1) - HSUM = HSUM + TAU(NQ) - RXI = H/HSUM - T5 = ALPH0 - ONE/REAL(NQ+1) - T6 = AHATN0 - RXI - ELP = T2/(ONE - T6 + T5) - TQ(3) = ABS(ELP*RXI*(FLOTL + ONE)*T5) - 300 TQ(4) = CORTES*TQ(2) - RETURN -C----------------------- End of Subroutine DVSET ----------------------- - END -*DECK DVJUST - SUBROUTINE DVJUST (YH, LDYH, IORD) - DOUBLE PRECISION YH - INTEGER LDYH, IORD - DIMENSION YH(LDYH,*) -C----------------------------------------------------------------------- -C Call sequence input -- YH, LDYH, IORD -C Call sequence output -- YH -C COMMON block input -- NQ, METH, LMAX, HSCAL, TAU(13), N -C COMMON block variables accessed.. -C /DVOD01/ -- HSCAL, TAU(13), LMAX, METH, N, NQ, -C -C Subroutines called by DVJUST.. DAXPY -C Function routines called by DVJUST.. None -C----------------------------------------------------------------------- -C This subroutine adjusts the YH array on reduction of order, -C and also when the order is increased for the stiff option (METH = 2). -C Communication with DVJUST uses the following.. -C IORD = An integer flag used when METH = 2 to indicate an order -C increase (IORD = +1) or an order decrease (IORD = -1). -C HSCAL = Step size H used in scaling of Nordsieck array YH. -C (If IORD = +1, DVJUST assumes that HSCAL = TAU(1).) -C See References 1 and 2 for details. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block DVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION ALPH0, ALPH1, HSUM, ONE, PROD, T1, XI,XIOLD, ZERO - INTEGER I, IBACK, J, JP1, LP1, NQM1, NQM2, NQP1 -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE ONE, ZERO -C - COMMON /DVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH -C - DATA ONE /1.0D0/, ZERO /0.0D0/ -C - IF ((NQ .EQ. 2) .AND. (IORD .NE. 1)) RETURN - NQM1 = NQ - 1 - NQM2 = NQ - 2 - GO TO (100, 200), METH -C----------------------------------------------------------------------- -C Nonstiff option... -C Check to see if the order is being increased or decreased. -C----------------------------------------------------------------------- - 100 CONTINUE - IF (IORD .EQ. 1) GO TO 180 -C Order decrease. ------------------------------------------------------ - DO 110 J = 1, LMAX - 110 EL(J) = ZERO - EL(2) = ONE - HSUM = ZERO - DO 130 J = 1, NQM2 -C Construct coefficients of x*(x+xi(1))*...*(x+xi(j)). ----------------- - HSUM = HSUM + TAU(J) - XI = HSUM/HSCAL - JP1 = J + 1 - DO 120 IBACK = 1, JP1 - I = (J + 3) - IBACK - 120 EL(I) = EL(I)*XI + EL(I-1) - 130 CONTINUE -C Construct coefficients of integrated polynomial. --------------------- - DO 140 J = 2, NQM1 - 140 EL(J+1) = REAL(NQ)*EL(J)/REAL(J) -C Subtract correction terms from YH array. ----------------------------- - DO 170 J = 3, NQ - DO 160 I = 1, N - 160 YH(I,J) = YH(I,J) - YH(I,L)*EL(J) - 170 CONTINUE - RETURN -C Order increase. ------------------------------------------------------ -C Zero out next column in YH array. ------------------------------------ - 180 CONTINUE - LP1 = L + 1 - DO 190 I = 1, N - 190 YH(I,LP1) = ZERO - RETURN -C----------------------------------------------------------------------- -C Stiff option... -C Check to see if the order is being increased or decreased. -C----------------------------------------------------------------------- - 200 CONTINUE - IF (IORD .EQ. 1) GO TO 300 -C Order decrease. ------------------------------------------------------ - DO 210 J = 1, LMAX - 210 EL(J) = ZERO - EL(3) = ONE - HSUM = ZERO - DO 230 J = 1,NQM2 -C Construct coefficients of x*x*(x+xi(1))*...*(x+xi(j)). --------------- - HSUM = HSUM + TAU(J) - XI = HSUM/HSCAL - JP1 = J + 1 - DO 220 IBACK = 1, JP1 - I = (J + 4) - IBACK - 220 EL(I) = EL(I)*XI + EL(I-1) - 230 CONTINUE -C Subtract correction terms from YH array. ----------------------------- - DO 250 J = 3,NQ - DO 240 I = 1, N - 240 YH(I,J) = YH(I,J) - YH(I,L)*EL(J) - 250 CONTINUE - RETURN -C Order increase. ------------------------------------------------------ - 300 DO 310 J = 1, LMAX - 310 EL(J) = ZERO - EL(3) = ONE - ALPH0 = -ONE - ALPH1 = ONE - PROD = ONE - XIOLD = ONE - HSUM = HSCAL - IF (NQ .EQ. 1) GO TO 340 - DO 330 J = 1, NQM1 -C Construct coefficients of x*x*(x+xi(1))*...*(x+xi(j)). --------------- - JP1 = J + 1 - HSUM = HSUM + TAU(JP1) - XI = HSUM/HSCAL - PROD = PROD*XI - ALPH0 = ALPH0 - ONE/REAL(JP1) - ALPH1 = ALPH1 + ONE/XI - DO 320 IBACK = 1, JP1 - I = (J + 4) - IBACK - 320 EL(I) = EL(I)*XIOLD + EL(I-1) - XIOLD = XI - 330 CONTINUE - 340 CONTINUE - T1 = (-ALPH0 - ALPH1)/PROD -C Load column L + 1 in YH array. --------------------------------------- - LP1 = L + 1 - DO 350 I = 1, N - 350 YH(I,LP1) = T1*YH(I,LMAX) -C Add correction terms to YH array. ------------------------------------ - NQP1 = NQ + 1 - DO 370 J = 3, NQP1 - CALL DAXPY (N, EL(J), YH(1,LP1), 1, YH(1,J), 1 ) - 370 CONTINUE - RETURN -C----------------------- End of Subroutine DVJUST ---------------------- - END -*DECK DVNLSD - SUBROUTINE DVNLSD (Y, YH, LDYH, VSAV, SAVF, EWT, ACOR, IWM, WM, - 1 F, JAC, PDUM, NFLAG, RPAR, IPAR) - EXTERNAL F, JAC, PDUM - DOUBLE PRECISION Y, YH, VSAV, SAVF, EWT, ACOR, WM, RPAR - INTEGER LDYH, IWM, NFLAG, IPAR - DIMENSION Y(*), YH(LDYH,*), VSAV(*), SAVF(*), EWT(*), ACOR(*), - 1 IWM(*), WM(*), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C Call sequence input -- Y, YH, LDYH, SAVF, EWT, ACOR, IWM, WM, -C F, JAC, NFLAG, RPAR, IPAR -C Call sequence output -- YH, ACOR, WM, IWM, NFLAG -C COMMON block variables accessed.. -C /DVOD01/ ACNRM, CRATE, DRC, H, RC, RL1, TQ(5), TN, ICF, -C JCUR, METH, MITER, N, NSLP -C /DVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Subroutines called by DVNLSD.. F, DAXPY, DCOPY, DSCAL, DVJAC, DVSOL -C Function routines called by DVNLSD.. DVNORM -C----------------------------------------------------------------------- -C Subroutine DVNLSD is a nonlinear system solver, which uses functional -C iteration or a chord (modified Newton) method. For the chord method -C direct linear algebraic system solvers are used. Subroutine DVNLSD -C then handles the corrector phase of this integration package. -C -C Communication with DVNLSD is done with the following variables. (For -C more details, please see the comments in the driver subroutine.) -C -C Y = The dependent variable, a vector of length N, input. -C YH = The Nordsieck (Taylor) array, LDYH by LMAX, input -C and output. On input, it contains predicted values. -C LDYH = A constant .ge. N, the first dimension of YH, input. -C VSAV = Unused work array. -C SAVF = A work array of length N. -C EWT = An error weight vector of length N, input. -C ACOR = A work array of length N, used for the accumulated -C corrections to the predicted y vector. -C WM,IWM = Real and integer work arrays associated with matrix -C operations in chord iteration (MITER .ne. 0). -C F = Dummy name for user supplied routine for f. -C JAC = Dummy name for user supplied Jacobian routine. -C PDUM = Unused dummy subroutine name. Included for uniformity -C over collection of integrators. -C NFLAG = Input/output flag, with values and meanings as follows.. -C INPUT -C 0 first call for this time step. -C -1 convergence failure in previous call to DVNLSD. -C -2 error test failure in DVSTEP. -C OUTPUT -C 0 successful completion of nonlinear solver. -C -1 convergence failure or singular matrix. -C -2 unrecoverable error in matrix preprocessing -C (cannot occur here). -C -3 unrecoverable error in solution (cannot occur -C here). -C RPAR, IPAR = Dummy names for user's real and integer work arrays. -C -C IPUP = Own variable flag with values and meanings as follows.. -C 0, do not update the Newton matrix. -C MITER .ne. 0, update Newton matrix, because it is the -C initial step, order was changed, the error -C test failed, or an update is indicated by -C the scalar RC or step counter NST. -C -C For more details, see comments in driver subroutine. -C----------------------------------------------------------------------- -C Type declarations for labeled COMMON block DVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block DVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION CCMAX, CRDOWN, CSCALE, DCON, DEL, DELP, ONE, - 1 RDIV, TWO, ZERO - INTEGER I, IERPJ, IERSL, M, MAXCOR, MSBP -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION DVNORM -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE CCMAX, CRDOWN, MAXCOR, MSBP, RDIV, ONE, TWO, ZERO -C - COMMON /DVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /DVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA CCMAX /0.3D0/, CRDOWN /0.3D0/, MAXCOR /3/, MSBP /20/, - 1 RDIV /2.0D0/ - DATA ONE /1.0D0/, TWO /2.0D0/, ZERO /0.0D0/ -C----------------------------------------------------------------------- -C On the first step, on a change of method order, or after a -C nonlinear convergence failure with NFLAG = -2, set IPUP = MITER -C to force a Jacobian update when MITER .ne. 0. -C----------------------------------------------------------------------- - IF (JSTART .EQ. 0) NSLP = 0 - IF (NFLAG .EQ. 0) ICF = 0 - IF (NFLAG .EQ. -2) IPUP = MITER - IF ( (JSTART .EQ. 0) .OR. (JSTART .EQ. -1) ) IPUP = MITER -C If this is functional iteration, set CRATE .eq. 1 and drop to 220 - IF (MITER .EQ. 0) THEN - CRATE = ONE - GO TO 220 - ENDIF -C----------------------------------------------------------------------- -C RC is the ratio of new to old values of the coefficient H/EL(2)=h/l1. -C When RC differs from 1 by more than CCMAX, IPUP is set to MITER -C to force DVJAC to be called, if a Jacobian is involved. -C In any case, DVJAC is called at least every MSBP steps. -C----------------------------------------------------------------------- - DRC = ABS(RC-ONE) - IF (DRC .GT. CCMAX .OR. NST .GE. NSLP+MSBP) IPUP = MITER -C----------------------------------------------------------------------- -C Up to MAXCOR corrector iterations are taken. A convergence test is -C made on the r.m.s. norm of each correction, weighted by the error -C weight vector EWT. The sum of the corrections is accumulated in the -C vector ACOR(i). The YH array is not altered in the corrector loop. -C----------------------------------------------------------------------- - 220 M = 0 - DELP = ZERO - CALL DCOPY (N, YH(1,1), 1, Y, 1 ) - CALL F (N, TN, Y, SAVF, RPAR, IPAR) - NFE = NFE + 1 - IF (IPUP .LE. 0) GO TO 250 -C----------------------------------------------------------------------- -C If indicated, the matrix P = I - h*rl1*J is reevaluated and -C preprocessed before starting the corrector iteration. IPUP is set -C to 0 as an indicator that this has been done. -C----------------------------------------------------------------------- - CALL DVJAC (Y, YH, LDYH, EWT, ACOR, SAVF, WM, IWM, F, JAC, IERPJ, - 1 RPAR, IPAR) - IPUP = 0 - RC = ONE - DRC = ZERO - CRATE = ONE - NSLP = NST -C If matrix is singular, take error return to force cut in step size. -- - IF (IERPJ .NE. 0) GO TO 430 - 250 DO 260 I = 1,N - 260 ACOR(I) = ZERO -C This is a looping point for the corrector iteration. ----------------- - 270 IF (MITER .NE. 0) GO TO 350 -C----------------------------------------------------------------------- -C In the case of functional iteration, update Y directly from -C the result of the last function evaluation. -C----------------------------------------------------------------------- - DO 280 I = 1,N - 280 SAVF(I) = RL1*(H*SAVF(I) - YH(I,2)) - DO 290 I = 1,N - 290 Y(I) = SAVF(I) - ACOR(I) - DEL = DVNORM (N, Y, EWT) - DO 300 I = 1,N - 300 Y(I) = YH(I,1) + SAVF(I) - CALL DCOPY (N, SAVF, 1, ACOR, 1) - GO TO 400 -C----------------------------------------------------------------------- -C In the case of the chord method, compute the corrector error, -C and solve the linear system with that as right-hand side and -C P as coefficient matrix. The correction is scaled by the factor -C 2/(1+RC) to account for changes in h*rl1 since the last DVJAC call. -C----------------------------------------------------------------------- - 350 DO 360 I = 1,N - 360 Y(I) = (RL1*H)*SAVF(I) - (RL1*YH(I,2) + ACOR(I)) - CALL DVSOL (WM, IWM, Y, IERSL) - NNI = NNI + 1 - IF (IERSL .GT. 0) GO TO 410 - IF (METH .EQ. 2 .AND. RC .NE. ONE) THEN - CSCALE = TWO/(ONE + RC) - CALL DSCAL (N, CSCALE, Y, 1) - ENDIF - DEL = DVNORM (N, Y, EWT) - CALL DAXPY (N, ONE, Y, 1, ACOR, 1) - DO 380 I = 1,N - 380 Y(I) = YH(I,1) + ACOR(I) -C----------------------------------------------------------------------- -C Test for convergence. If M .gt. 0, an estimate of the convergence -C rate constant is stored in CRATE, and this is used in the test. -C----------------------------------------------------------------------- - 400 IF (M .NE. 0) CRATE = MAX(CRDOWN*CRATE,DEL/DELP) - DCON = DEL*MIN(ONE,CRATE)/TQ(4) - IF (DCON .LE. ONE) GO TO 450 - M = M + 1 - IF (M .EQ. MAXCOR) GO TO 410 - IF (M .GE. 2 .AND. DEL .GT. RDIV*DELP) GO TO 410 - DELP = DEL - CALL F (N, TN, Y, SAVF, RPAR, IPAR) - NFE = NFE + 1 - GO TO 270 -C - 410 IF (MITER .EQ. 0 .OR. JCUR .EQ. 1) GO TO 430 - ICF = 1 - IPUP = MITER - GO TO 220 -C - 430 CONTINUE - NFLAG = -1 - ICF = 2 - IPUP = MITER - RETURN -C -C Return for successful step. ------------------------------------------ - 450 NFLAG = 0 - JCUR = 0 - ICF = 0 - IF (M .EQ. 0) ACNRM = DEL - IF (M .GT. 0) ACNRM = DVNORM (N, ACOR, EWT) - RETURN -C----------------------- End of Subroutine DVNLSD ---------------------- - END -*DECK DVJAC - SUBROUTINE DVJAC (Y, YH, LDYH, EWT, FTEM, SAVF, WM, IWM, F, JAC, - 1 IERPJ, RPAR, IPAR) - EXTERNAL F, JAC - DOUBLE PRECISION Y, YH, EWT, FTEM, SAVF, WM, RPAR - INTEGER LDYH, IWM, IERPJ, IPAR - DIMENSION Y(*), YH(LDYH,*), EWT(*), FTEM(*), SAVF(*), - 1 WM(*), IWM(*), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C Call sequence input -- Y, YH, LDYH, EWT, FTEM, SAVF, WM, IWM, -C F, JAC, RPAR, IPAR -C Call sequence output -- WM, IWM, IERPJ -C COMMON block variables accessed.. -C /DVOD01/ CCMXJ, DRC, H, RL1, TN, UROUND, ICF, JCUR, LOCJS, -C MITER, MSBJ, N, NSLJ -C /DVOD02/ NFE, NST, NJE, NLU -C -C Subroutines called by DVJAC.. F, JAC, DACOPY, DCOPY, DGBFA, DGEFA, -C DSCAL -C Function routines called by DVJAC.. DVNORM -C----------------------------------------------------------------------- -C DVJAC is called by DVNLSD to compute and process the matrix -C P = I - h*rl1*J , where J is an approximation to the Jacobian. -C Here J is computed by the user-supplied routine JAC if -C MITER = 1 or 4, or by finite differencing if MITER = 2, 3, or 5. -C If MITER = 3, a diagonal approximation to J is used. -C If JSV = -1, J is computed from scratch in all cases. -C If JSV = 1 and MITER = 1, 2, 4, or 5, and if the saved value of J is -C considered acceptable, then P is constructed from the saved J. -C J is stored in wm and replaced by P. If MITER .ne. 3, P is then -C subjected to LU decomposition in preparation for later solution -C of linear systems with P as coefficient matrix. This is done -C by DGEFA if MITER = 1 or 2, and by DGBFA if MITER = 4 or 5. -C -C Communication with DVJAC is done with the following variables. (For -C more details, please see the comments in the driver subroutine.) -C Y = Vector containing predicted values on entry. -C YH = The Nordsieck array, an LDYH by LMAX array, input. -C LDYH = A constant .ge. N, the first dimension of YH, input. -C EWT = An error weight vector of length N. -C SAVF = Array containing f evaluated at predicted y, input. -C WM = Real work space for matrices. In the output, it containS -C the inverse diagonal matrix if MITER = 3 and the LU -C decomposition of P if MITER is 1, 2 , 4, or 5. -C Storage of matrix elements starts at WM(3). -C Storage of the saved Jacobian starts at WM(LOCJS). -C WM also contains the following matrix-related data.. -C WM(1) = SQRT(UROUND), used in numerical Jacobian step. -C WM(2) = H*RL1, saved for later use if MITER = 3. -C IWM = Integer work space containing pivot information, -C starting at IWM(31), if MITER is 1, 2, 4, or 5. -C IWM also contains band parameters ML = IWM(1) and -C MU = IWM(2) if MITER is 4 or 5. -C F = Dummy name for the user supplied subroutine for f. -C JAC = Dummy name for the user supplied Jacobian subroutine. -C RPAR, IPAR = Dummy names for user's real and integer work arrays. -C RL1 = 1/EL(2) (input). -C IERPJ = Output error flag, = 0 if no trouble, 1 if the P -C matrix is found to be singular. -C JCUR = Output flag to indicate whether the Jacobian matrix -C (or approximation) is now current. -C JCUR = 0 means J is not current. -C JCUR = 1 means J is current. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block DVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block DVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION CON, DI, FAC, HRL1, ONE, PT1, R, R0, SRUR, THOU, - 1 YI, YJ, YJJ, ZERO - INTEGER I, I1, I2, IER, II, J, J1, JJ, JOK, LENP, MBA, MBAND, - 1 MEB1, MEBAND, ML, ML3, MU, NP1 -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION DVNORM -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this subroutine. -C----------------------------------------------------------------------- - SAVE ONE, PT1, THOU, ZERO -C----------------------------------------------------------------------- - COMMON /DVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /DVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA ONE /1.0D0/, THOU /1000.0D0/, ZERO /0.0D0/, PT1 /0.1D0/ -C - IERPJ = 0 - HRL1 = H*RL1 -C See whether J should be evaluated (JOK = -1) or not (JOK = 1). ------- - JOK = JSV - IF (JSV .EQ. 1) THEN - IF (NST .EQ. 0 .OR. NST .GT. NSLJ+MSBJ) JOK = -1 - IF (ICF .EQ. 1 .AND. DRC .LT. CCMXJ) JOK = -1 - IF (ICF .EQ. 2) JOK = -1 - ENDIF -C End of setting JOK. -------------------------------------------------- -C - IF (JOK .EQ. -1 .AND. MITER .EQ. 1) THEN -C If JOK = -1 and MITER = 1, call JAC to evaluate Jacobian. ------------ - NJE = NJE + 1 - NSLJ = NST - JCUR = 1 - LENP = N*N - DO 110 I = 1,LENP - 110 WM(I+2) = ZERO - CALL JAC (N, TN, Y, 0, 0, WM(3), N, RPAR, IPAR) - IF (JSV .EQ. 1) CALL DCOPY (LENP, WM(3), 1, WM(LOCJS), 1) - ENDIF -C - IF (JOK .EQ. -1 .AND. MITER .EQ. 2) THEN -C If MITER = 2, make N calls to F to approximate the Jacobian. --------- - NJE = NJE + 1 - NSLJ = NST - JCUR = 1 - FAC = DVNORM (N, SAVF, EWT) - R0 = THOU*ABS(H)*UROUND*REAL(N)*FAC - IF (R0 .EQ. ZERO) R0 = ONE - SRUR = WM(1) - J1 = 2 - DO 230 J = 1,N - YJ = Y(J) - R = MAX(SRUR*ABS(YJ),R0/EWT(J)) - Y(J) = Y(J) + R - FAC = ONE/R - CALL F (N, TN, Y, FTEM, RPAR, IPAR) - DO 220 I = 1,N - 220 WM(I+J1) = (FTEM(I) - SAVF(I))*FAC - Y(J) = YJ - J1 = J1 + N - 230 CONTINUE - NFE = NFE + N - LENP = N*N - IF (JSV .EQ. 1) CALL DCOPY (LENP, WM(3), 1, WM(LOCJS), 1) - ENDIF -C - IF (JOK .EQ. 1 .AND. (MITER .EQ. 1 .OR. MITER .EQ. 2)) THEN - JCUR = 0 - LENP = N*N - CALL DCOPY (LENP, WM(LOCJS), 1, WM(3), 1) - ENDIF -C - IF (MITER .EQ. 1 .OR. MITER .EQ. 2) THEN -C Multiply Jacobian by scalar, add identity, and do LU decomposition. -- - CON = -HRL1 - CALL DSCAL (LENP, CON, WM(3), 1) - J = 3 - NP1 = N + 1 - DO 250 I = 1,N - WM(J) = WM(J) + ONE - 250 J = J + NP1 - NLU = NLU + 1 - CALL DGEFA (WM(3), N, N, IWM(31), IER) - IF (IER .NE. 0) IERPJ = 1 - RETURN - ENDIF -C End of code block for MITER = 1 or 2. -------------------------------- -C - IF (MITER .EQ. 3) THEN -C If MITER = 3, construct a diagonal approximation to J and P. --------- - NJE = NJE + 1 - JCUR = 1 - WM(2) = HRL1 - R = RL1*PT1 - DO 310 I = 1,N - 310 Y(I) = Y(I) + R*(H*SAVF(I) - YH(I,2)) - CALL F (N, TN, Y, WM(3), RPAR, IPAR) - NFE = NFE + 1 - DO 320 I = 1,N - R0 = H*SAVF(I) - YH(I,2) - DI = PT1*R0 - H*(WM(I+2) - SAVF(I)) - WM(I+2) = ONE - IF (ABS(R0) .LT. UROUND/EWT(I)) GO TO 320 - IF (ABS(DI) .EQ. ZERO) GO TO 330 - WM(I+2) = PT1*R0/DI - 320 CONTINUE - RETURN - 330 IERPJ = 1 - RETURN - ENDIF -C End of code block for MITER = 3. ------------------------------------- -C -C Set constants for MITER = 4 or 5. ------------------------------------ - ML = IWM(1) - MU = IWM(2) - ML3 = ML + 3 - MBAND = ML + MU + 1 - MEBAND = MBAND + ML - LENP = MEBAND*N -C - IF (JOK .EQ. -1 .AND. MITER .EQ. 4) THEN -C If JOK = -1 and MITER = 4, call JAC to evaluate Jacobian. ------------ - NJE = NJE + 1 - NSLJ = NST - JCUR = 1 - DO 410 I = 1,LENP - 410 WM(I+2) = ZERO - CALL JAC (N, TN, Y, ML, MU, WM(ML3), MEBAND, RPAR, IPAR) - IF (JSV .EQ. 1) - 1 CALL DACOPY (MBAND, N, WM(ML3), MEBAND, WM(LOCJS), MBAND) - ENDIF -C - IF (JOK .EQ. -1 .AND. MITER .EQ. 5) THEN -C If MITER = 5, make ML+MU+1 calls to F to approximate the Jacobian. --- - NJE = NJE + 1 - NSLJ = NST - JCUR = 1 - MBA = MIN(MBAND,N) - MEB1 = MEBAND - 1 - SRUR = WM(1) - FAC = DVNORM (N, SAVF, EWT) - R0 = THOU*ABS(H)*UROUND*REAL(N)*FAC - IF (R0 .EQ. ZERO) R0 = ONE - DO 560 J = 1,MBA - DO 530 I = J,N,MBAND - YI = Y(I) - R = MAX(SRUR*ABS(YI),R0/EWT(I)) - 530 Y(I) = Y(I) + R - CALL F (N, TN, Y, FTEM, RPAR, IPAR) - DO 550 JJ = J,N,MBAND - Y(JJ) = YH(JJ,1) - YJJ = Y(JJ) - R = MAX(SRUR*ABS(YJJ),R0/EWT(JJ)) - FAC = ONE/R - I1 = MAX(JJ-MU,1) - I2 = MIN(JJ+ML,N) - II = JJ*MEB1 - ML + 2 - DO 540 I = I1,I2 - 540 WM(II+I) = (FTEM(I) - SAVF(I))*FAC - 550 CONTINUE - 560 CONTINUE - NFE = NFE + MBA - IF (JSV .EQ. 1) - 1 CALL DACOPY (MBAND, N, WM(ML3), MEBAND, WM(LOCJS), MBAND) - ENDIF -C - IF (JOK .EQ. 1) THEN - JCUR = 0 - CALL DACOPY (MBAND, N, WM(LOCJS), MBAND, WM(ML3), MEBAND) - ENDIF -C -C Multiply Jacobian by scalar, add identity, and do LU decomposition. - CON = -HRL1 - CALL DSCAL (LENP, CON, WM(3), 1 ) - II = MBAND + 2 - DO 580 I = 1,N - WM(II) = WM(II) + ONE - 580 II = II + MEBAND - NLU = NLU + 1 - CALL DGBFA (WM(3), MEBAND, N, ML, MU, IWM(31), IER) - IF (IER .NE. 0) IERPJ = 1 - RETURN -C End of code block for MITER = 4 or 5. -------------------------------- -C -C----------------------- End of Subroutine DVJAC ----------------------- - END -*DECK DACOPY - SUBROUTINE DACOPY (NROW, NCOL, A, NROWA, B, NROWB) - DOUBLE PRECISION A, B - INTEGER NROW, NCOL, NROWA, NROWB - DIMENSION A(NROWA,NCOL), B(NROWB,NCOL) -C----------------------------------------------------------------------- -C Call sequence input -- NROW, NCOL, A, NROWA, NROWB -C Call sequence output -- B -C COMMON block variables accessed -- None -C -C Subroutines called by DACOPY.. DCOPY -C Function routines called by DACOPY.. None -C----------------------------------------------------------------------- -C This routine copies one rectangular array, A, to another, B, -C where A and B may have different row dimensions, NROWA and NROWB. -C The data copied consists of NROW rows and NCOL columns. -C----------------------------------------------------------------------- - INTEGER IC -C - DO 20 IC = 1,NCOL - CALL DCOPY (NROW, A(1,IC), 1, B(1,IC), 1) - 20 CONTINUE -C - RETURN -C----------------------- End of Subroutine DACOPY ---------------------- - END -*DECK DVSOL - SUBROUTINE DVSOL (WM, IWM, X, IERSL) - DOUBLE PRECISION WM, X - INTEGER IWM, IERSL - DIMENSION WM(*), IWM(*), X(*) -C----------------------------------------------------------------------- -C Call sequence input -- WM, IWM, X -C Call sequence output -- X, IERSL -C COMMON block variables accessed.. -C /DVOD01/ -- H, RL1, MITER, N -C -C Subroutines called by DVSOL.. DGESL, DGBSL -C Function routines called by DVSOL.. None -C----------------------------------------------------------------------- -C This routine manages the solution of the linear system arising from -C a chord iteration. It is called if MITER .ne. 0. -C If MITER is 1 or 2, it calls DGESL to accomplish this. -C If MITER = 3 it updates the coefficient H*RL1 in the diagonal -C matrix, and then computes the solution. -C If MITER is 4 or 5, it calls DGBSL. -C Communication with DVSOL uses the following variables.. -C WM = Real work space containing the inverse diagonal matrix if -C MITER = 3 and the LU decomposition of the matrix otherwise. -C Storage of matrix elements starts at WM(3). -C WM also contains the following matrix-related data.. -C WM(1) = SQRT(UROUND) (not used here), -C WM(2) = HRL1, the previous value of H*RL1, used if MITER = 3. -C IWM = Integer work space containing pivot information, starting at -C IWM(31), if MITER is 1, 2, 4, or 5. IWM also contains band -C parameters ML = IWM(1) and MU = IWM(2) if MITER is 4 or 5. -C X = The right-hand side vector on input, and the solution vector -C on output, of length N. -C IERSL = Output flag. IERSL = 0 if no trouble occurred. -C IERSL = 1 if a singular matrix arose with MITER = 3. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block DVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for local variables -------------------------------- -C - INTEGER I, MEBAND, ML, MU - DOUBLE PRECISION DI, HRL1, ONE, PHRL1, R, ZERO -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE ONE, ZERO -C - COMMON /DVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HSCAL, PRL1, - 2 RC, RL1, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH -C - DATA ONE /1.0D0/, ZERO /0.0D0/ -C - IERSL = 0 - GO TO (100, 100, 300, 400, 400), MITER - 100 CALL DGESL (WM(3), N, N, IWM(31), X, 0) - RETURN -C - 300 PHRL1 = WM(2) - HRL1 = H*RL1 - WM(2) = HRL1 - IF (HRL1 .EQ. PHRL1) GO TO 330 - R = HRL1/PHRL1 - DO 320 I = 1,N - DI = ONE - R*(ONE - ONE/WM(I+2)) - IF (ABS(DI) .EQ. ZERO) GO TO 390 - 320 WM(I+2) = ONE/DI -C - 330 DO 340 I = 1,N - 340 X(I) = WM(I+2)*X(I) - RETURN - 390 IERSL = 1 - RETURN -C - 400 ML = IWM(1) - MU = IWM(2) - MEBAND = 2*ML + MU + 1 - CALL DGBSL (WM(3), MEBAND, N, ML, MU, IWM(31), X, 0) - RETURN -C----------------------- End of Subroutine DVSOL ----------------------- - END -*DECK DVSRCO - SUBROUTINE DVSRCO (RSAV, ISAV, JOB) - DOUBLE PRECISION RSAV - INTEGER ISAV, JOB - DIMENSION RSAV(*), ISAV(*) -C----------------------------------------------------------------------- -C Call sequence input -- RSAV, ISAV, JOB -C Call sequence output -- RSAV, ISAV -C COMMON block variables accessed -- All of /DVOD01/ and /DVOD02/ -C -C Subroutines/functions called by DVSRCO.. None -C----------------------------------------------------------------------- -C This routine saves or restores (depending on JOB) the contents of the -C COMMON blocks DVOD01 and DVOD02, which are used internally by DVODE. -C -C RSAV = real array of length 49 or more. -C ISAV = integer array of length 41 or more. -C JOB = flag indicating to save or restore the COMMON blocks.. -C JOB = 1 if COMMON is to be saved (written to RSAV/ISAV). -C JOB = 2 if COMMON is to be restored (read from RSAV/ISAV). -C A call with JOB = 2 presumes a prior call with JOB = 1. -C----------------------------------------------------------------------- - DOUBLE PRECISION RVOD1, RVOD2 - INTEGER IVOD1, IVOD2 - INTEGER I, LENIV1, LENIV2, LENRV1, LENRV2 -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE LENRV1, LENIV1, LENRV2, LENIV2 -C - COMMON /DVOD01/ RVOD1(48), IVOD1(33) - COMMON /DVOD02/ RVOD2(1), IVOD2(8) - DATA LENRV1/48/, LENIV1/33/, LENRV2/1/, LENIV2/8/ -C - IF (JOB .EQ. 2) GO TO 100 - DO 10 I = 1,LENRV1 - 10 RSAV(I) = RVOD1(I) - DO 15 I = 1,LENRV2 - 15 RSAV(LENRV1+I) = RVOD2(I) -C - DO 20 I = 1,LENIV1 - 20 ISAV(I) = IVOD1(I) - DO 25 I = 1,LENIV2 - 25 ISAV(LENIV1+I) = IVOD2(I) -C - RETURN -C - 100 CONTINUE - DO 110 I = 1,LENRV1 - 110 RVOD1(I) = RSAV(I) - DO 115 I = 1,LENRV2 - 115 RVOD2(I) = RSAV(LENRV1+I) -C - DO 120 I = 1,LENIV1 - 120 IVOD1(I) = ISAV(I) - DO 125 I = 1,LENIV2 - 125 IVOD2(I) = ISAV(LENIV1+I) -C - RETURN -C----------------------- End of Subroutine DVSRCO ---------------------- - END -*DECK DEWSET - SUBROUTINE DEWSET (N, ITOL, RTOL, ATOL, YCUR, EWT) - DOUBLE PRECISION RTOL, ATOL, YCUR, EWT - INTEGER N, ITOL - DIMENSION RTOL(*), ATOL(*), YCUR(N), EWT(N) -C----------------------------------------------------------------------- -C Call sequence input -- N, ITOL, RTOL, ATOL, YCUR -C Call sequence output -- EWT -C COMMON block variables accessed -- None -C -C Subroutines/functions called by DEWSET.. None -C----------------------------------------------------------------------- -C This subroutine sets the error weight vector EWT according to -C EWT(i) = RTOL(i)*abs(YCUR(i)) + ATOL(i), i = 1,...,N, -C with the subscript on RTOL and/or ATOL possibly replaced by 1 above, -C depending on the value of ITOL. -C----------------------------------------------------------------------- - INTEGER I -C - GO TO (10, 20, 30, 40), ITOL - 10 CONTINUE - DO 15 I = 1, N - 15 EWT(I) = RTOL(1)*ABS(YCUR(I)) + ATOL(1) - RETURN - 20 CONTINUE - DO 25 I = 1, N - 25 EWT(I) = RTOL(1)*ABS(YCUR(I)) + ATOL(I) - RETURN - 30 CONTINUE - DO 35 I = 1, N - 35 EWT(I) = RTOL(I)*ABS(YCUR(I)) + ATOL(1) - RETURN - 40 CONTINUE - DO 45 I = 1, N - 45 EWT(I) = RTOL(I)*ABS(YCUR(I)) + ATOL(I) - RETURN -C----------------------- End of Subroutine DEWSET ---------------------- - END -*DECK DVNORM - DOUBLE PRECISION FUNCTION DVNORM (N, V, W) - DOUBLE PRECISION V, W - INTEGER N - DIMENSION V(N), W(N) -C----------------------------------------------------------------------- -C Call sequence input -- N, V, W -C Call sequence output -- None -C COMMON block variables accessed -- None -C -C Subroutines/functions called by DVNORM.. None -C----------------------------------------------------------------------- -C This function routine computes the weighted root-mean-square norm -C of the vector of length N contained in the array V, with weights -C contained in the array W of length N.. -C DVNORM = sqrt( (1/N) * sum( V(i)*W(i) )**2 ) -C----------------------------------------------------------------------- - DOUBLE PRECISION SUM - INTEGER I -C - SUM = 0.0D0 - DO 10 I = 1, N - 10 SUM = SUM + (V(I)*W(I))**2 - DVNORM = SQRT(SUM/REAL(N)) - RETURN -C----------------------- End of Function DVNORM ------------------------ - END -*DECK D1MACH - DOUBLE PRECISION FUNCTION D1MACH (IDUM) - INTEGER IDUM -C----------------------------------------------------------------------- -C This routine computes the unit roundoff of the machine. -C This is defined as the smallest positive machine number -C u such that 1.0 + u .ne. 1.0 -C -C Subroutines/functions called by D1MACH.. None -C----------------------------------------------------------------------- - DOUBLE PRECISION U, COMP - U = 1.0D0 - 10 U = U*0.5D0 - COMP = 1.0D0 + U - IF (COMP .NE. 1.0D0) GO TO 10 - D1MACH = U*2.0D0 - RETURN -C----------------------- End of Function D1MACH ------------------------ - END -*DECK XERRWD - SUBROUTINE XERRWD (MSG, NMES, NERR, LEVEL, NI, I1, I2, NR, R1, R2) - DOUBLE PRECISION R1, R2 - INTEGER NMES, NERR, LEVEL, NI, I1, I2, NR - CHARACTER*1 MSG(NMES) -C----------------------------------------------------------------------- -C Subroutines XERRWD, XSETF, XSETUN, and the function routine IXSAV, -C as given here, constitute a simplified version of the SLATEC error -C handling package. -C Written by A. C. Hindmarsh and P. N. Brown at LLNL. -C Version of 18 November, 1992. -C This version is in double precision. -C -C All arguments are input arguments. -C -C MSG = The message (character array). -C NMES = The length of MSG (number of characters). -C NERR = The error number (not used). -C LEVEL = The error level.. -C 0 or 1 means recoverable (control returns to caller). -C 2 means fatal (run is aborted--see note below). -C NI = Number of integers (0, 1, or 2) to be printed with message. -C I1,I2 = Integers to be printed, depending on NI. -C NR = Number of reals (0, 1, or 2) to be printed with message. -C R1,R2 = Reals to be printed, depending on NR. -C -C Note.. this routine is machine-dependent and specialized for use -C in limited context, in the following ways.. -C 1. The argument MSG is assumed to be of type CHARACTER, and -C the message is printed with a format of (1X,80A1). -C 2. The message is assumed to take only one line. -C Multi-line messages are generated by repeated calls. -C 3. If LEVEL = 2, control passes to the statement STOP -C to abort the run. This statement may be machine-dependent. -C 4. R1 and R2 are assumed to be in double precision and are printed -C in D21.13 format. -C -C For a different default logical unit number, change the data -C statement in function routine IXSAV. -C For a different run-abort command, change the statement following -C statement 100 at the end. -C----------------------------------------------------------------------- -C Subroutines called by XERRWD.. None -C Function routine called by XERRWD.. IXSAV -C----------------------------------------------------------------------- -C - INTEGER I, LUNIT, IXSAV, MESFLG -C -C Get logical unit number and message print flag. ---------------------- - LUNIT = IXSAV (1, 0, .FALSE.) - MESFLG = IXSAV (2, 0, .FALSE.) - IF (MESFLG .EQ. 0) GO TO 100 -C Write the message. --------------------------------------------------- - WRITE (LUNIT,10) (MSG(I),I=1,NMES) - 10 FORMAT(1X,80A1) - IF (NI .EQ. 1) WRITE (LUNIT, 20) I1 - 20 FORMAT(6X,'In above message, I1 =',I10) - IF (NI .EQ. 2) WRITE (LUNIT, 30) I1,I2 - 30 FORMAT(6X,'In above message, I1 =',I10,3X,'I2 =',I10) - IF (NR .EQ. 1) WRITE (LUNIT, 40) R1 - 40 FORMAT(6X,'In above message, R1 =',D21.13) - IF (NR .EQ. 2) WRITE (LUNIT, 50) R1,R2 - 50 FORMAT(6X,'In above, R1 =',D21.13,3X,'R2 =',D21.13) -C Abort the run if LEVEL = 2. ------------------------------------------ - 100 IF (LEVEL .NE. 2) RETURN - STOP -C----------------------- End of Subroutine XERRWD ---------------------- - END -*DECK XSETUN - SUBROUTINE XSETUN (LUN) -C----------------------------------------------------------------------- -C This routine resets the logical unit number for messages. -C -C Subroutines called by XSETUN.. None -C Function routine called by XSETUN.. IXSAV -C----------------------------------------------------------------------- - INTEGER LUN, JUNK, IXSAV -C - IF (LUN .GT. 0) JUNK = IXSAV (1,LUN,.TRUE.) - RETURN -C----------------------- End of Subroutine XSETUN ---------------------- - END -*DECK XSETF - SUBROUTINE XSETF (MFLAG) -C----------------------------------------------------------------------- -C This routine resets the print control flag MFLAG. -C -C Subroutines called by XSETF.. None -C Function routine called by XSETF.. IXSAV -C----------------------------------------------------------------------- - INTEGER MFLAG, JUNK, IXSAV -C - IF (MFLAG .EQ. 0 .OR. MFLAG .EQ. 1) JUNK = IXSAV (2,MFLAG,.TRUE.) - RETURN -C----------------------- End of Subroutine XSETF ----------------------- - END -*DECK IXSAV - INTEGER FUNCTION IXSAV (IPAR, IVALUE, ISET) - LOGICAL ISET - INTEGER IPAR, IVALUE -C----------------------------------------------------------------------- -C IXSAV saves and recalls one of two error message parameters: -C LUNIT, the logical unit number to which messages are printed, and -C MESFLG, the message print flag. -C This is a modification of the SLATEC library routine J4SAVE. -C -C Saved local variables.. -C LUNIT = Logical unit number for messages. -C The default is 6 (machine-dependent). -C MESFLG = Print control flag.. -C 1 means print all messages (the default). -C 0 means no printing. -C -C On input.. -C IPAR = Parameter indicator (1 for LUNIT, 2 for MESFLG). -C IVALUE = The value to be set for the parameter, if ISET = .TRUE. -C ISET = Logical flag to indicate whether to read or write. -C If ISET = .TRUE., the parameter will be given -C the value IVALUE. If ISET = .FALSE., the parameter -C will be unchanged, and IVALUE is a dummy argument. -C -C On return.. -C IXSAV = The (old) value of the parameter. -C -C Subroutines/functions called by IXSAV.. None -C----------------------------------------------------------------------- - INTEGER LUNIT, MESFLG -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this routine. -C----------------------------------------------------------------------- - SAVE LUNIT, MESFLG - DATA LUNIT/6/, MESFLG/1/ -C - IF (IPAR .EQ. 1) THEN - IXSAV = LUNIT - IF (ISET) LUNIT = IVALUE - ENDIF -C - IF (IPAR .EQ. 2) THEN - IXSAV = MESFLG - IF (ISET) MESFLG = IVALUE - ENDIF -C - RETURN -C----------------------- End of Function IXSAV ------------------------- - END diff --git a/scipy-0.10.1/scipy/integrate/odepack/xerrwv.f b/scipy-0.10.1/scipy/integrate/odepack/xerrwv.f deleted file mode 100644 index 7e180e4f88..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/xerrwv.f +++ /dev/null @@ -1,114 +0,0 @@ - subroutine xerrwv (msg, nmes, nerr, level, ni, i1, i2, nr, r1, r2) - integer msg, nmes, nerr, level, ni, i1, i2, nr, - 1 i, lun, lunit, mesflg, ncpw, nch, nwds - double precision r1, r2 - dimension msg(nmes) -c----------------------------------------------------------------------- -c subroutines xerrwv, xsetf, and xsetun, as given here, constitute -c a simplified version of the slatec error handling package. -c written by a. c. hindmarsh at llnl. version of march 30, 1987. -c this version is in double precision. -c -c all arguments are input arguments. -c -c msg = the message (hollerith literal or integer array). -c nmes = the length of msg (number of characters). -c nerr = the error number (not used). -c level = the error level.. -c 0 or 1 means recoverable (control returns to caller). -c 2 means fatal (run is aborted--see note below). -c ni = number of integers (0, 1, or 2) to be printed with message. -c i1,i2 = integers to be printed, depending on ni. -c nr = number of reals (0, 1, or 2) to be printed with message. -c r1,r2 = reals to be printed, depending on nr. -c -c note.. this routine is machine-dependent and specialized for use -c in limited context, in the following ways.. -c 1. the number of hollerith characters stored per word, denoted -c by ncpw below, is a data-loaded constant. -c 2. the value of nmes is assumed to be at most 60. -c (multi-line messages are generated by repeated calls.) -c 3. if level = 2, control passes to the statement stop -c to abort the run. this statement may be machine-dependent. -c 4. r1 and r2 are assumed to be in double precision and are printed -c in d21.13 format. -c 5. the common block /eh0001/ below is data-loaded (a machine- -c dependent feature) with default values. -c this block is needed for proper retention of parameters used by -c this routine which the user can reset by calling xsetf or xsetun. -c the variables in this block are as follows.. -c mesflg = print control flag.. -c 1 means print all messages (the default). -c 0 means no printing. -c lunit = logical unit number for messages. -c the default is 6 (machine-dependent). -c----------------------------------------------------------------------- -c the following are instructions for installing this routine -c in different machine environments. -c -c to change the default output unit, change the data statement -c in the block data subprogram below. -c -c for a different number of characters per word, change the -c data statement setting ncpw below, and format 10. alternatives for -c various computers are shown in comment cards. -c -c for a different run-abort command, change the statement following -c statement 100 at the end. -c----------------------------------------------------------------------- - common /eh0001/ mesflg, lunit -c----------------------------------------------------------------------- -c the following data-loaded value of ncpw is valid for the cdc-6600 -c and cdc-7600 computers. -c data ncpw/10/ -c the following is valid for the cray-1 computer. -c data ncpw/8/ -c the following is valid for the burroughs 6700 and 7800 computers. -c data ncpw/6/ -c the following is valid for the pdp-10 computer. -c data ncpw/5/ -c the following is valid for the vax computer with 4 bytes per integer, -c and for the ibm-360, ibm-370, ibm-303x, and ibm-43xx computers. - data ncpw/4/ -c the following is valid for the pdp-11, or vax with 2-byte integers. -c data ncpw/2/ -c----------------------------------------------------------------------- - if (mesflg .eq. 0) go to 100 -c get logical unit number. --------------------------------------------- - lun = lunit -c get number of words in message. -------------------------------------- - nch = min0(nmes,60) - nwds = nch/ncpw - if (nch .ne. nwds*ncpw) nwds = nwds + 1 -c write the message. --------------------------------------------------- - write (lun, 10) (msg(i),i=1,nwds) -c----------------------------------------------------------------------- -c the following format statement is to have the form -c 10 format(1x,mmann) -c where nn = ncpw and mm is the smallest integer .ge. 60/ncpw. -c the following is valid for ncpw = 10. -c 10 format(1x,6a10) -c the following is valid for ncpw = 8. -c 10 format(1x,8a8) -c the following is valid for ncpw = 6. -c 10 format(1x,10a6) -c the following is valid for ncpw = 5. -c 10 format(1x,12a5) -c the following is valid for ncpw = 4. - 10 format(1x,15a4) -c the following is valid for ncpw = 2. -c 10 format(1x,30a2) -c----------------------------------------------------------------------- - if (ni .eq. 1) write (lun, 20) i1 - 20 format(6x,'in above message, i1 =',i10) - if (ni .eq. 2) write (lun, 30) i1,i2 - 30 format(6x,'in above message, i1 =',i10,3x,'i2 =',i10) - if (nr .eq. 1) write (lun, 40) r1 - 40 format(6x,'in above message, r1 =',d21.13) - if (nr .eq. 2) write (lun, 50) r1,r2 - 50 format(6x,'in above, r1 =',d21.13,3x,'r2 =',d21.13) -c abort the run if level = 2. ------------------------------------------ - 100 if (level .ne. 2) return - stop -c----------------------- end of subroutine xerrwv ---------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/xsetf.f b/scipy-0.10.1/scipy/integrate/odepack/xsetf.f deleted file mode 100644 index edf4f09ed0..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/xsetf.f +++ /dev/null @@ -1,11 +0,0 @@ - subroutine xsetf (mflag) -c -c this routine resets the print control flag mflag. -c - integer mflag, mesflg, lunit - common /eh0001/ mesflg, lunit -c - if (mflag .eq. 0 .or. mflag .eq. 1) mesflg = mflag - return -c----------------------- end of subroutine xsetf ----------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/xsetun.f b/scipy-0.10.1/scipy/integrate/odepack/xsetun.f deleted file mode 100644 index 6d7ddba16f..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/xsetun.f +++ /dev/null @@ -1,11 +0,0 @@ - subroutine xsetun (lun) -c -c this routine resets the logical unit number for messages. -c - integer lun, mesflg, lunit - common /eh0001/ mesflg, lunit -c - if (lun .gt. 0) lunit = lun - return -c----------------------- end of subroutine xsetun ---------------------- - end diff --git a/scipy-0.10.1/scipy/integrate/odepack/zvode.f b/scipy-0.10.1/scipy/integrate/odepack/zvode.f deleted file mode 100644 index be009fe668..0000000000 --- a/scipy-0.10.1/scipy/integrate/odepack/zvode.f +++ /dev/null @@ -1,3650 +0,0 @@ -*DECK ZVODE - SUBROUTINE ZVODE (F, NEQ, Y, T, TOUT, ITOL, RTOL, ATOL, ITASK, - 1 ISTATE, IOPT, ZWORK, LZW, RWORK, LRW, IWORK, LIW, - 2 JAC, MF, RPAR, IPAR) - EXTERNAL F, JAC - DOUBLE COMPLEX Y, ZWORK - DOUBLE PRECISION T, TOUT, RTOL, ATOL, RWORK - INTEGER NEQ, ITOL, ITASK, ISTATE, IOPT, LZW, LRW, IWORK, LIW, - 1 MF, IPAR - DIMENSION Y(*), RTOL(*), ATOL(*), ZWORK(LZW), RWORK(LRW), - 1 IWORK(LIW), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C ZVODE: Variable-coefficient Ordinary Differential Equation solver, -C with fixed-leading-coefficient implementation. -C This version is in complex double precision. -C -C ZVODE solves the initial value problem for stiff or nonstiff -C systems of first order ODEs, -C dy/dt = f(t,y) , or, in component form, -C dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(NEQ)) (i = 1,...,NEQ). -C Here the y vector is treated as complex. -C ZVODE is a package based on the EPISODE and EPISODEB packages, and -C on the ODEPACK user interface standard, with minor modifications. -C -C NOTE: When using ZVODE for a stiff system, it should only be used for -C the case in which the function f is analytic, that is, when each f(i) -C is an analytic function of each y(j). Analyticity means that the -C partial derivative df(i)/dy(j) is a unique complex number, and this -C fact is critical in the way ZVODE solves the dense or banded linear -C systems that arise in the stiff case. For a complex stiff ODE system -C in which f is not analytic, ZVODE is likely to have convergence -C failures, and for this problem one should instead use DVODE on the -C equivalent real system (in the real and imaginary parts of y). -C----------------------------------------------------------------------- -C Authors: -C Peter N. Brown and Alan C. Hindmarsh -C Center for Applied Scientific Computing -C Lawrence Livermore National Laboratory -C Livermore, CA 94551 -C and -C George D. Byrne (Prof. Emeritus) -C Illinois Institute of Technology -C Chicago, IL 60616 -C----------------------------------------------------------------------- -C For references, see DVODE. -C----------------------------------------------------------------------- -C Summary of usage. -C -C Communication between the user and the ZVODE package, for normal -C situations, is summarized here. This summary describes only a subset -C of the full set of options available. See the full description for -C details, including optional communication, nonstandard options, -C and instructions for special situations. See also the example -C problem (with program and output) following this summary. -C -C A. First provide a subroutine of the form: -C SUBROUTINE F (NEQ, T, Y, YDOT, RPAR, IPAR) -C DOUBLE COMPLEX Y(NEQ), YDOT(NEQ) -C DOUBLE PRECISION T -C which supplies the vector function f by loading YDOT(i) with f(i). -C -C B. Next determine (or guess) whether or not the problem is stiff. -C Stiffness occurs when the Jacobian matrix df/dy has an eigenvalue -C whose real part is negative and large in magnitude, compared to the -C reciprocal of the t span of interest. If the problem is nonstiff, -C use a method flag MF = 10. If it is stiff, there are four standard -C choices for MF (21, 22, 24, 25), and ZVODE requires the Jacobian -C matrix in some form. In these cases (MF .gt. 0), ZVODE will use a -C saved copy of the Jacobian matrix. If this is undesirable because of -C storage limitations, set MF to the corresponding negative value -C (-21, -22, -24, -25). (See full description of MF below.) -C The Jacobian matrix is regarded either as full (MF = 21 or 22), -C or banded (MF = 24 or 25). In the banded case, ZVODE requires two -C half-bandwidth parameters ML and MU. These are, respectively, the -C widths of the lower and upper parts of the band, excluding the main -C diagonal. Thus the band consists of the locations (i,j) with -C i-ML .le. j .le. i+MU, and the full bandwidth is ML+MU+1. -C -C C. If the problem is stiff, you are encouraged to supply the Jacobian -C directly (MF = 21 or 24), but if this is not feasible, ZVODE will -C compute it internally by difference quotients (MF = 22 or 25). -C If you are supplying the Jacobian, provide a subroutine of the form: -C SUBROUTINE JAC (NEQ, T, Y, ML, MU, PD, NROWPD, RPAR, IPAR) -C DOUBLE COMPLEX Y(NEQ), PD(NROWPD,NEQ) -C DOUBLE PRECISION T -C which supplies df/dy by loading PD as follows: -C For a full Jacobian (MF = 21), load PD(i,j) with df(i)/dy(j), -C the partial derivative of f(i) with respect to y(j). (Ignore the -C ML and MU arguments in this case.) -C For a banded Jacobian (MF = 24), load PD(i-j+MU+1,j) with -C df(i)/dy(j), i.e. load the diagonal lines of df/dy into the rows of -C PD from the top down. -C In either case, only nonzero elements need be loaded. -C -C D. Write a main program which calls subroutine ZVODE once for -C each point at which answers are desired. This should also provide -C for possible use of logical unit 6 for output of error messages -C by ZVODE. On the first call to ZVODE, supply arguments as follows: -C F = Name of subroutine for right-hand side vector f. -C This name must be declared external in calling program. -C NEQ = Number of first order ODEs. -C Y = Double complex array of initial values, of length NEQ. -C T = The initial value of the independent variable. -C TOUT = First point where output is desired (.ne. T). -C ITOL = 1 or 2 according as ATOL (below) is a scalar or array. -C RTOL = Relative tolerance parameter (scalar). -C ATOL = Absolute tolerance parameter (scalar or array). -C The estimated local error in Y(i) will be controlled so as -C to be roughly less (in magnitude) than -C EWT(i) = RTOL*abs(Y(i)) + ATOL if ITOL = 1, or -C EWT(i) = RTOL*abs(Y(i)) + ATOL(i) if ITOL = 2. -C Thus the local error test passes if, in each component, -C either the absolute error is less than ATOL (or ATOL(i)), -C or the relative error is less than RTOL. -C Use RTOL = 0.0 for pure absolute error control, and -C use ATOL = 0.0 (or ATOL(i) = 0.0) for pure relative error -C control. Caution: Actual (global) errors may exceed these -C local tolerances, so choose them conservatively. -C ITASK = 1 for normal computation of output values of Y at t = TOUT. -C ISTATE = Integer flag (input and output). Set ISTATE = 1. -C IOPT = 0 to indicate no optional input used. -C ZWORK = Double precision complex work array of length at least: -C 15*NEQ for MF = 10, -C 8*NEQ + 2*NEQ**2 for MF = 21 or 22, -C 10*NEQ + (3*ML + 2*MU)*NEQ for MF = 24 or 25. -C LZW = Declared length of ZWORK (in user's DIMENSION statement). -C RWORK = Real work array of length at least 20 + NEQ. -C LRW = Declared length of RWORK (in user's DIMENSION statement). -C IWORK = Integer work array of length at least: -C 30 for MF = 10, -C 30 + NEQ for MF = 21, 22, 24, or 25. -C If MF = 24 or 25, input in IWORK(1),IWORK(2) the lower -C and upper half-bandwidths ML,MU. -C LIW = Declared length of IWORK (in user's DIMENSION statement). -C JAC = Name of subroutine for Jacobian matrix (MF = 21 or 24). -C If used, this name must be declared external in calling -C program. If not used, pass a dummy name. -C MF = Method flag. Standard values are: -C 10 for nonstiff (Adams) method, no Jacobian used. -C 21 for stiff (BDF) method, user-supplied full Jacobian. -C 22 for stiff method, internally generated full Jacobian. -C 24 for stiff method, user-supplied banded Jacobian. -C 25 for stiff method, internally generated banded Jacobian. -C RPAR = user-defined real or complex array passed to F and JAC. -C IPAR = user-defined integer array passed to F and JAC. -C Note that the main program must declare arrays Y, ZWORK, RWORK, IWORK, -C and possibly ATOL, RPAR, and IPAR. RPAR may be declared REAL, DOUBLE, -C COMPLEX, or DOUBLE COMPLEX, depending on the user's needs. -C -C E. The output from the first call (or any call) is: -C Y = Array of computed values of y(t) vector. -C T = Corresponding value of independent variable (normally TOUT). -C ISTATE = 2 if ZVODE was successful, negative otherwise. -C -1 means excess work done on this call. (Perhaps wrong MF.) -C -2 means excess accuracy requested. (Tolerances too small.) -C -3 means illegal input detected. (See printed message.) -C -4 means repeated error test failures. (Check all input.) -C -5 means repeated convergence failures. (Perhaps bad -C Jacobian supplied or wrong choice of MF or tolerances.) -C -6 means error weight became zero during problem. (Solution -C component i vanished, and ATOL or ATOL(i) = 0.) -C -C F. To continue the integration after a successful return, simply -C reset TOUT and call ZVODE again. No other parameters need be reset. -C -C----------------------------------------------------------------------- -C EXAMPLE PROBLEM -C -C The program below uses ZVODE to solve the following system of 2 ODEs: -C dw/dt = -i*w*w*z, dz/dt = i*z; w(0) = 1/2.1, z(0) = 1; t = 0 to 2*pi. -C Solution: w = 1/(z + 1.1), z = exp(it). As z traces the unit circle, -C w traces a circle of radius 10/2.1 with center at 11/2.1. -C For convenience, Main passes RPAR = (imaginary unit i) to FEX and JEX. -C -C EXTERNAL FEX, JEX -C DOUBLE COMPLEX Y(2), ZWORK(24), RPAR, WTRU, ERR -C DOUBLE PRECISION ABERR, AEMAX, ATOL, RTOL, RWORK(22), T, TOUT -C DIMENSION IWORK(32) -C NEQ = 2 -C Y(1) = 1.0D0/2.1D0 -C Y(2) = 1.0D0 -C T = 0.0D0 -C DTOUT = 0.1570796326794896D0 -C TOUT = DTOUT -C ITOL = 1 -C RTOL = 1.D-9 -C ATOL = 1.D-8 -C ITASK = 1 -C ISTATE = 1 -C IOPT = 0 -C LZW = 24 -C LRW = 22 -C LIW = 32 -C MF = 21 -C RPAR = DCMPLX(0.0D0,1.0D0) -C AEMAX = 0.0D0 -C WRITE(6,10) -C 10 FORMAT(' t',11X,'w',26X,'z') -C DO 40 IOUT = 1,40 -C CALL ZVODE(FEX,NEQ,Y,T,TOUT,ITOL,RTOL,ATOL,ITASK,ISTATE,IOPT, -C 1 ZWORK,LZW,RWORK,LRW,IWORK,LIW,JEX,MF,RPAR,IPAR) -C WTRU = 1.0D0/DCMPLX(COS(T) + 1.1D0, SIN(T)) -C ERR = Y(1) - WTRU -C ABERR = ABS(DREAL(ERR)) + ABS(DIMAG(ERR)) -C AEMAX = MAX(AEMAX,ABERR) -C WRITE(6,20) T, DREAL(Y(1)),DIMAG(Y(1)), DREAL(Y(2)),DIMAG(Y(2)) -C 20 FORMAT(F9.5,2X,2F12.7,3X,2F12.7) -C IF (ISTATE .LT. 0) THEN -C WRITE(6,30) ISTATE -C 30 FORMAT(//'***** Error halt. ISTATE =',I3) -C STOP -C ENDIF -C 40 TOUT = TOUT + DTOUT -C WRITE(6,50) IWORK(11), IWORK(12), IWORK(13), IWORK(20), -C 1 IWORK(21), IWORK(22), IWORK(23), AEMAX -C 50 FORMAT(/' No. steps =',I4,' No. f-s =',I5, -C 1 ' No. J-s =',I4,' No. LU-s =',I4/ -C 2 ' No. nonlinear iterations =',I4/ -C 3 ' No. nonlinear convergence failures =',I4/ -C 4 ' No. error test failures =',I4/ -C 5 ' Max. abs. error in w =',D10.2) -C STOP -C END -C -C SUBROUTINE FEX (NEQ, T, Y, YDOT, RPAR, IPAR) -C DOUBLE COMPLEX Y(NEQ), YDOT(NEQ), RPAR -C DOUBLE PRECISION T -C YDOT(1) = -RPAR*Y(1)*Y(1)*Y(2) -C YDOT(2) = RPAR*Y(2) -C RETURN -C END -C -C SUBROUTINE JEX (NEQ, T, Y, ML, MU, PD, NRPD, RPAR, IPAR) -C DOUBLE COMPLEX Y(NEQ), PD(NRPD,NEQ), RPAR -C DOUBLE PRECISION T -C PD(1,1) = -2.0D0*RPAR*Y(1)*Y(2) -C PD(1,2) = -RPAR*Y(1)*Y(1) -C PD(2,2) = RPAR -C RETURN -C END -C -C The output of this example program is as follows: -C -C t w z -C 0.15708 0.4763242 -0.0356919 0.9876884 0.1564345 -C 0.31416 0.4767322 -0.0718256 0.9510565 0.3090170 -C 0.47124 0.4774351 -0.1088651 0.8910065 0.4539906 -C 0.62832 0.4784699 -0.1473206 0.8090170 0.5877853 -C 0.78540 0.4798943 -0.1877789 0.7071067 0.7071069 -C 0.94248 0.4817938 -0.2309414 0.5877852 0.8090171 -C 1.09956 0.4842934 -0.2776778 0.4539904 0.8910066 -C 1.25664 0.4875766 -0.3291039 0.3090169 0.9510566 -C 1.41372 0.4919177 -0.3866987 0.1564343 0.9876884 -C 1.57080 0.4977376 -0.4524889 -0.0000001 1.0000000 -C 1.72788 0.5057044 -0.5293524 -0.1564346 0.9876883 -C 1.88496 0.5169274 -0.6215400 -0.3090171 0.9510565 -C 2.04204 0.5333540 -0.7356275 -0.4539906 0.8910065 -C 2.19911 0.5586542 -0.8823669 -0.5877854 0.8090169 -C 2.35619 0.6004188 -1.0806013 -0.7071069 0.7071067 -C 2.51327 0.6764486 -1.3664281 -0.8090171 0.5877851 -C 2.67035 0.8366909 -1.8175245 -0.8910066 0.4539904 -C 2.82743 1.2657121 -2.6260146 -0.9510566 0.3090168 -C 2.98451 3.0284506 -4.2182180 -0.9876884 0.1564343 -C 3.14159 10.0000699 0.0000663 -1.0000000 -0.0000002 -C 3.29867 3.0284170 4.2182053 -0.9876883 -0.1564346 -C 3.45575 1.2657041 2.6260067 -0.9510565 -0.3090172 -C 3.61283 0.8366878 1.8175205 -0.8910064 -0.4539907 -C 3.76991 0.6764469 1.3664259 -0.8090169 -0.5877854 -C 3.92699 0.6004178 1.0806000 -0.7071066 -0.7071069 -C 4.08407 0.5586535 0.8823662 -0.5877851 -0.8090171 -C 4.24115 0.5333535 0.7356271 -0.4539903 -0.8910066 -C 4.39823 0.5169271 0.6215398 -0.3090168 -0.9510566 -C 4.55531 0.5057041 0.5293523 -0.1564343 -0.9876884 -C 4.71239 0.4977374 0.4524890 0.0000002 -1.0000000 -C 4.86947 0.4919176 0.3866988 0.1564347 -0.9876883 -C 5.02655 0.4875765 0.3291040 0.3090172 -0.9510564 -C 5.18363 0.4842934 0.2776780 0.4539907 -0.8910064 -C 5.34071 0.4817939 0.2309415 0.5877854 -0.8090169 -C 5.49779 0.4798944 0.1877791 0.7071069 -0.7071066 -C 5.65487 0.4784700 0.1473208 0.8090171 -0.5877850 -C 5.81195 0.4774352 0.1088652 0.8910066 -0.4539903 -C 5.96903 0.4767324 0.0718257 0.9510566 -0.3090168 -C 6.12611 0.4763244 0.0356920 0.9876884 -0.1564342 -C 6.28319 0.4761907 0.0000000 1.0000000 0.0000003 -C -C No. steps = 542 No. f-s = 610 No. J-s = 10 No. LU-s = 47 -C No. nonlinear iterations = 607 -C No. nonlinear convergence failures = 0 -C No. error test failures = 13 -C Max. abs. error in w = 0.13E-03 -C -C----------------------------------------------------------------------- -C Full description of user interface to ZVODE. -C -C The user interface to ZVODE consists of the following parts. -C -C i. The call sequence to subroutine ZVODE, which is a driver -C routine for the solver. This includes descriptions of both -C the call sequence arguments and of user-supplied routines. -C Following these descriptions is -C * a description of optional input available through the -C call sequence, -C * a description of optional output (in the work arrays), and -C * instructions for interrupting and restarting a solution. -C -C ii. Descriptions of other routines in the ZVODE package that may be -C (optionally) called by the user. These provide the ability to -C alter error message handling, save and restore the internal -C COMMON, and obtain specified derivatives of the solution y(t). -C -C iii. Descriptions of COMMON blocks to be declared in overlay -C or similar environments. -C -C iv. Description of two routines in the ZVODE package, either of -C which the user may replace with his own version, if desired. -C these relate to the measurement of errors. -C -C----------------------------------------------------------------------- -C Part i. Call Sequence. -C -C The call sequence parameters used for input only are -C F, NEQ, TOUT, ITOL, RTOL, ATOL, ITASK, IOPT, LRW, LIW, JAC, MF, -C and those used for both input and output are -C Y, T, ISTATE. -C The work arrays ZWORK, RWORK, and IWORK are also used for conditional -C and optional input and optional output. (The term output here refers -C to the return from subroutine ZVODE to the user's calling program.) -C -C The legality of input parameters will be thoroughly checked on the -C initial call for the problem, but not checked thereafter unless a -C change in input parameters is flagged by ISTATE = 3 in the input. -C -C The descriptions of the call arguments are as follows. -C -C F = The name of the user-supplied subroutine defining the -C ODE system. The system must be put in the first-order -C form dy/dt = f(t,y), where f is a vector-valued function -C of the scalar t and the vector y. Subroutine F is to -C compute the function f. It is to have the form -C SUBROUTINE F (NEQ, T, Y, YDOT, RPAR, IPAR) -C DOUBLE COMPLEX Y(NEQ), YDOT(NEQ) -C DOUBLE PRECISION T -C where NEQ, T, and Y are input, and the array YDOT = f(t,y) -C is output. Y and YDOT are double complex arrays of length -C NEQ. Subroutine F should not alter Y(1),...,Y(NEQ). -C F must be declared EXTERNAL in the calling program. -C -C Subroutine F may access user-defined real/complex and -C integer work arrays RPAR and IPAR, which are to be -C dimensioned in the calling program. -C -C If quantities computed in the F routine are needed -C externally to ZVODE, an extra call to F should be made -C for this purpose, for consistent and accurate results. -C If only the derivative dy/dt is needed, use ZVINDY instead. -C -C NEQ = The size of the ODE system (number of first order -C ordinary differential equations). Used only for input. -C NEQ may not be increased during the problem, but -C can be decreased (with ISTATE = 3 in the input). -C -C Y = A double precision complex array for the vector of dependent -C variables, of length NEQ or more. Used for both input and -C output on the first call (ISTATE = 1), and only for output -C on other calls. On the first call, Y must contain the -C vector of initial values. In the output, Y contains the -C computed solution evaluated at T. If desired, the Y array -C may be used for other purposes between calls to the solver. -C -C This array is passed as the Y argument in all calls to -C F and JAC. -C -C T = The independent variable. In the input, T is used only on -C the first call, as the initial point of the integration. -C In the output, after each call, T is the value at which a -C computed solution Y is evaluated (usually the same as TOUT). -C On an error return, T is the farthest point reached. -C -C TOUT = The next value of t at which a computed solution is desired. -C Used only for input. -C -C When starting the problem (ISTATE = 1), TOUT may be equal -C to T for one call, then should .ne. T for the next call. -C For the initial T, an input value of TOUT .ne. T is used -C in order to determine the direction of the integration -C (i.e. the algebraic sign of the step sizes) and the rough -C scale of the problem. Integration in either direction -C (forward or backward in t) is permitted. -C -C If ITASK = 2 or 5 (one-step modes), TOUT is ignored after -C the first call (i.e. the first call with TOUT .ne. T). -C Otherwise, TOUT is required on every call. -C -C If ITASK = 1, 3, or 4, the values of TOUT need not be -C monotone, but a value of TOUT which backs up is limited -C to the current internal t interval, whose endpoints are -C TCUR - HU and TCUR. (See optional output, below, for -C TCUR and HU.) -C -C ITOL = An indicator for the type of error control. See -C description below under ATOL. Used only for input. -C -C RTOL = A relative error tolerance parameter, either a scalar or -C an array of length NEQ. See description below under ATOL. -C Input only. -C -C ATOL = An absolute error tolerance parameter, either a scalar or -C an array of length NEQ. Input only. -C -C The input parameters ITOL, RTOL, and ATOL determine -C the error control performed by the solver. The solver will -C control the vector e = (e(i)) of estimated local errors -C in Y, according to an inequality of the form -C rms-norm of ( e(i)/EWT(i) ) .le. 1, -C where EWT(i) = RTOL(i)*abs(Y(i)) + ATOL(i), -C and the rms-norm (root-mean-square norm) here is -C rms-norm(v) = sqrt(sum v(i)**2 / NEQ). Here EWT = (EWT(i)) -C is a vector of weights which must always be positive, and -C the values of RTOL and ATOL should all be non-negative. -C The following table gives the types (scalar/array) of -C RTOL and ATOL, and the corresponding form of EWT(i). -C -C ITOL RTOL ATOL EWT(i) -C 1 scalar scalar RTOL*ABS(Y(i)) + ATOL -C 2 scalar array RTOL*ABS(Y(i)) + ATOL(i) -C 3 array scalar RTOL(i)*ABS(Y(i)) + ATOL -C 4 array array RTOL(i)*ABS(Y(i)) + ATOL(i) -C -C When either of these parameters is a scalar, it need not -C be dimensioned in the user's calling program. -C -C If none of the above choices (with ITOL, RTOL, and ATOL -C fixed throughout the problem) is suitable, more general -C error controls can be obtained by substituting -C user-supplied routines for the setting of EWT and/or for -C the norm calculation. See Part iv below. -C -C If global errors are to be estimated by making a repeated -C run on the same problem with smaller tolerances, then all -C components of RTOL and ATOL (i.e. of EWT) should be scaled -C down uniformly. -C -C ITASK = An index specifying the task to be performed. -C Input only. ITASK has the following values and meanings. -C 1 means normal computation of output values of y(t) at -C t = TOUT (by overshooting and interpolating). -C 2 means take one step only and return. -C 3 means stop at the first internal mesh point at or -C beyond t = TOUT and return. -C 4 means normal computation of output values of y(t) at -C t = TOUT but without overshooting t = TCRIT. -C TCRIT must be input as RWORK(1). TCRIT may be equal to -C or beyond TOUT, but not behind it in the direction of -C integration. This option is useful if the problem -C has a singularity at or beyond t = TCRIT. -C 5 means take one step, without passing TCRIT, and return. -C TCRIT must be input as RWORK(1). -C -C Note: If ITASK = 4 or 5 and the solver reaches TCRIT -C (within roundoff), it will return T = TCRIT (exactly) to -C indicate this (unless ITASK = 4 and TOUT comes before TCRIT, -C in which case answers at T = TOUT are returned first). -C -C ISTATE = an index used for input and output to specify the -C the state of the calculation. -C -C In the input, the values of ISTATE are as follows. -C 1 means this is the first call for the problem -C (initializations will be done). See note below. -C 2 means this is not the first call, and the calculation -C is to continue normally, with no change in any input -C parameters except possibly TOUT and ITASK. -C (If ITOL, RTOL, and/or ATOL are changed between calls -C with ISTATE = 2, the new values will be used but not -C tested for legality.) -C 3 means this is not the first call, and the -C calculation is to continue normally, but with -C a change in input parameters other than -C TOUT and ITASK. Changes are allowed in -C NEQ, ITOL, RTOL, ATOL, IOPT, LRW, LIW, MF, ML, MU, -C and any of the optional input except H0. -C (See IWORK description for ML and MU.) -C Note: A preliminary call with TOUT = T is not counted -C as a first call here, as no initialization or checking of -C input is done. (Such a call is sometimes useful to include -C the initial conditions in the output.) -C Thus the first call for which TOUT .ne. T requires -C ISTATE = 1 in the input. -C -C In the output, ISTATE has the following values and meanings. -C 1 means nothing was done, as TOUT was equal to T with -C ISTATE = 1 in the input. -C 2 means the integration was performed successfully. -C -1 means an excessive amount of work (more than MXSTEP -C steps) was done on this call, before completing the -C requested task, but the integration was otherwise -C successful as far as T. (MXSTEP is an optional input -C and is normally 500.) To continue, the user may -C simply reset ISTATE to a value .gt. 1 and call again. -C (The excess work step counter will be reset to 0.) -C In addition, the user may increase MXSTEP to avoid -C this error return. (See optional input below.) -C -2 means too much accuracy was requested for the precision -C of the machine being used. This was detected before -C completing the requested task, but the integration -C was successful as far as T. To continue, the tolerance -C parameters must be reset, and ISTATE must be set -C to 3. The optional output TOLSF may be used for this -C purpose. (Note: If this condition is detected before -C taking any steps, then an illegal input return -C (ISTATE = -3) occurs instead.) -C -3 means illegal input was detected, before taking any -C integration steps. See written message for details. -C Note: If the solver detects an infinite loop of calls -C to the solver with illegal input, it will cause -C the run to stop. -C -4 means there were repeated error test failures on -C one attempted step, before completing the requested -C task, but the integration was successful as far as T. -C The problem may have a singularity, or the input -C may be inappropriate. -C -5 means there were repeated convergence test failures on -C one attempted step, before completing the requested -C task, but the integration was successful as far as T. -C This may be caused by an inaccurate Jacobian matrix, -C if one is being used. -C -6 means EWT(i) became zero for some i during the -C integration. Pure relative error control (ATOL(i)=0.0) -C was requested on a variable which has now vanished. -C The integration was successful as far as T. -C -C Note: Since the normal output value of ISTATE is 2, -C it does not need to be reset for normal continuation. -C Also, since a negative input value of ISTATE will be -C regarded as illegal, a negative output value requires the -C user to change it, and possibly other input, before -C calling the solver again. -C -C IOPT = An integer flag to specify whether or not any optional -C input is being used on this call. Input only. -C The optional input is listed separately below. -C IOPT = 0 means no optional input is being used. -C Default values will be used in all cases. -C IOPT = 1 means optional input is being used. -C -C ZWORK = A double precision complex working array. -C The length of ZWORK must be at least -C NYH*(MAXORD + 1) + 2*NEQ + LWM where -C NYH = the initial value of NEQ, -C MAXORD = 12 (if METH = 1) or 5 (if METH = 2) (unless a -C smaller value is given as an optional input), -C LWM = length of work space for matrix-related data: -C LWM = 0 if MITER = 0, -C LWM = 2*NEQ**2 if MITER = 1 or 2, and MF.gt.0, -C LWM = NEQ**2 if MITER = 1 or 2, and MF.lt.0, -C LWM = NEQ if MITER = 3, -C LWM = (3*ML+2*MU+2)*NEQ if MITER = 4 or 5, and MF.gt.0, -C LWM = (2*ML+MU+1)*NEQ if MITER = 4 or 5, and MF.lt.0. -C (See the MF description for METH and MITER.) -C Thus if MAXORD has its default value and NEQ is constant, -C this length is: -C 15*NEQ for MF = 10, -C 15*NEQ + 2*NEQ**2 for MF = 11 or 12, -C 15*NEQ + NEQ**2 for MF = -11 or -12, -C 16*NEQ for MF = 13, -C 17*NEQ + (3*ML+2*MU)*NEQ for MF = 14 or 15, -C 16*NEQ + (2*ML+MU)*NEQ for MF = -14 or -15, -C 8*NEQ for MF = 20, -C 8*NEQ + 2*NEQ**2 for MF = 21 or 22, -C 8*NEQ + NEQ**2 for MF = -21 or -22, -C 9*NEQ for MF = 23, -C 10*NEQ + (3*ML+2*MU)*NEQ for MF = 24 or 25. -C 9*NEQ + (2*ML+MU)*NEQ for MF = -24 or -25. -C -C LZW = The length of the array ZWORK, as declared by the user. -C (This will be checked by the solver.) -C -C RWORK = A real working array (double precision). -C The length of RWORK must be at least 20 + NEQ. -C The first 20 words of RWORK are reserved for conditional -C and optional input and optional output. -C -C The following word in RWORK is a conditional input: -C RWORK(1) = TCRIT = critical value of t which the solver -C is not to overshoot. Required if ITASK is -C 4 or 5, and ignored otherwise. (See ITASK.) -C -C LRW = The length of the array RWORK, as declared by the user. -C (This will be checked by the solver.) -C -C IWORK = An integer work array. The length of IWORK must be at least -C 30 if MITER = 0 or 3 (MF = 10, 13, 20, 23), or -C 30 + NEQ otherwise (abs(MF) = 11,12,14,15,21,22,24,25). -C The first 30 words of IWORK are reserved for conditional and -C optional input and optional output. -C -C The following 2 words in IWORK are conditional input: -C IWORK(1) = ML These are the lower and upper -C IWORK(2) = MU half-bandwidths, respectively, of the -C banded Jacobian, excluding the main diagonal. -C The band is defined by the matrix locations -C (i,j) with i-ML .le. j .le. i+MU. ML and MU -C must satisfy 0 .le. ML,MU .le. NEQ-1. -C These are required if MITER is 4 or 5, and -C ignored otherwise. ML and MU may in fact be -C the band parameters for a matrix to which -C df/dy is only approximately equal. -C -C LIW = the length of the array IWORK, as declared by the user. -C (This will be checked by the solver.) -C -C Note: The work arrays must not be altered between calls to ZVODE -C for the same problem, except possibly for the conditional and -C optional input, and except for the last 2*NEQ words of ZWORK and -C the last NEQ words of RWORK. The latter space is used for internal -C scratch space, and so is available for use by the user outside ZVODE -C between calls, if desired (but not for use by F or JAC). -C -C JAC = The name of the user-supplied routine (MITER = 1 or 4) to -C compute the Jacobian matrix, df/dy, as a function of -C the scalar t and the vector y. It is to have the form -C SUBROUTINE JAC (NEQ, T, Y, ML, MU, PD, NROWPD, -C RPAR, IPAR) -C DOUBLE COMPLEX Y(NEQ), PD(NROWPD,NEQ) -C DOUBLE PRECISION T -C where NEQ, T, Y, ML, MU, and NROWPD are input and the array -C PD is to be loaded with partial derivatives (elements of the -C Jacobian matrix) in the output. PD must be given a first -C dimension of NROWPD. T and Y have the same meaning as in -C Subroutine F. -C In the full matrix case (MITER = 1), ML and MU are -C ignored, and the Jacobian is to be loaded into PD in -C columnwise manner, with df(i)/dy(j) loaded into PD(i,j). -C In the band matrix case (MITER = 4), the elements -C within the band are to be loaded into PD in columnwise -C manner, with diagonal lines of df/dy loaded into the rows -C of PD. Thus df(i)/dy(j) is to be loaded into PD(i-j+MU+1,j). -C ML and MU are the half-bandwidth parameters. (See IWORK). -C The locations in PD in the two triangular areas which -C correspond to nonexistent matrix elements can be ignored -C or loaded arbitrarily, as they are overwritten by ZVODE. -C JAC need not provide df/dy exactly. A crude -C approximation (possibly with a smaller bandwidth) will do. -C In either case, PD is preset to zero by the solver, -C so that only the nonzero elements need be loaded by JAC. -C Each call to JAC is preceded by a call to F with the same -C arguments NEQ, T, and Y. Thus to gain some efficiency, -C intermediate quantities shared by both calculations may be -C saved in a user COMMON block by F and not recomputed by JAC, -C if desired. Also, JAC may alter the Y array, if desired. -C JAC must be declared external in the calling program. -C Subroutine JAC may access user-defined real/complex and -C integer work arrays, RPAR and IPAR, whose dimensions are set -C by the user in the calling program. -C -C MF = The method flag. Used only for input. The legal values of -C MF are 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25, -C -11, -12, -14, -15, -21, -22, -24, -25. -C MF is a signed two-digit integer, MF = JSV*(10*METH + MITER). -C JSV = SIGN(MF) indicates the Jacobian-saving strategy: -C JSV = 1 means a copy of the Jacobian is saved for reuse -C in the corrector iteration algorithm. -C JSV = -1 means a copy of the Jacobian is not saved -C (valid only for MITER = 1, 2, 4, or 5). -C METH indicates the basic linear multistep method: -C METH = 1 means the implicit Adams method. -C METH = 2 means the method based on backward -C differentiation formulas (BDF-s). -C MITER indicates the corrector iteration method: -C MITER = 0 means functional iteration (no Jacobian matrix -C is involved). -C MITER = 1 means chord iteration with a user-supplied -C full (NEQ by NEQ) Jacobian. -C MITER = 2 means chord iteration with an internally -C generated (difference quotient) full Jacobian -C (using NEQ extra calls to F per df/dy value). -C MITER = 3 means chord iteration with an internally -C generated diagonal Jacobian approximation -C (using 1 extra call to F per df/dy evaluation). -C MITER = 4 means chord iteration with a user-supplied -C banded Jacobian. -C MITER = 5 means chord iteration with an internally -C generated banded Jacobian (using ML+MU+1 extra -C calls to F per df/dy evaluation). -C If MITER = 1 or 4, the user must supply a subroutine JAC -C (the name is arbitrary) as described above under JAC. -C For other values of MITER, a dummy argument can be used. -C -C RPAR User-specified array used to communicate real or complex -C parameters to user-supplied subroutines. If RPAR is an -C array, it must be dimensioned in the user's calling program; -C if it is unused or it is a scalar, then it need not be -C dimensioned. The type of RPAR may be REAL, DOUBLE, COMPLEX, -C or DOUBLE COMPLEX, depending on the user program's needs. -C RPAR is not type-declared within ZVODE, but simply passed -C (by address) to the user's F and JAC routines. -C -C IPAR User-specified array used to communicate integer parameter -C to user-supplied subroutines. If IPAR is an array, it must -C be dimensioned in the user's calling program. -C----------------------------------------------------------------------- -C Optional Input. -C -C The following is a list of the optional input provided for in the -C call sequence. (See also Part ii.) For each such input variable, -C this table lists its name as used in this documentation, its -C location in the call sequence, its meaning, and the default value. -C The use of any of this input requires IOPT = 1, and in that -C case all of this input is examined. A value of zero for any -C of these optional input variables will cause the default value to be -C used. Thus to use a subset of the optional input, simply preload -C locations 5 to 10 in RWORK and IWORK to 0.0 and 0 respectively, and -C then set those of interest to nonzero values. -C -C NAME LOCATION MEANING AND DEFAULT VALUE -C -C H0 RWORK(5) The step size to be attempted on the first step. -C The default value is determined by the solver. -C -C HMAX RWORK(6) The maximum absolute step size allowed. -C The default value is infinite. -C -C HMIN RWORK(7) The minimum absolute step size allowed. -C The default value is 0. (This lower bound is not -C enforced on the final step before reaching TCRIT -C when ITASK = 4 or 5.) -C -C MAXORD IWORK(5) The maximum order to be allowed. The default -C value is 12 if METH = 1, and 5 if METH = 2. -C If MAXORD exceeds the default value, it will -C be reduced to the default value. -C If MAXORD is changed during the problem, it may -C cause the current order to be reduced. -C -C MXSTEP IWORK(6) Maximum number of (internally defined) steps -C allowed during one call to the solver. -C The default value is 500. -C -C MXHNIL IWORK(7) Maximum number of messages printed (per problem) -C warning that T + H = T on a step (H = step size). -C This must be positive to result in a non-default -C value. The default value is 10. -C -C----------------------------------------------------------------------- -C Optional Output. -C -C As optional additional output from ZVODE, the variables listed -C below are quantities related to the performance of ZVODE -C which are available to the user. These are communicated by way of -C the work arrays, but also have internal mnemonic names as shown. -C Except where stated otherwise, all of this output is defined -C on any successful return from ZVODE, and on any return with -C ISTATE = -1, -2, -4, -5, or -6. On an illegal input return -C (ISTATE = -3), they will be unchanged from their existing values -C (if any), except possibly for TOLSF, LENZW, LENRW, and LENIW. -C On any error return, output relevant to the error will be defined, -C as noted below. -C -C NAME LOCATION MEANING -C -C HU RWORK(11) The step size in t last used (successfully). -C -C HCUR RWORK(12) The step size to be attempted on the next step. -C -C TCUR RWORK(13) The current value of the independent variable -C which the solver has actually reached, i.e. the -C current internal mesh point in t. In the output, -C TCUR will always be at least as far from the -C initial value of t as the current argument T, -C but may be farther (if interpolation was done). -C -C TOLSF RWORK(14) A tolerance scale factor, greater than 1.0, -C computed when a request for too much accuracy was -C detected (ISTATE = -3 if detected at the start of -C the problem, ISTATE = -2 otherwise). If ITOL is -C left unaltered but RTOL and ATOL are uniformly -C scaled up by a factor of TOLSF for the next call, -C then the solver is deemed likely to succeed. -C (The user may also ignore TOLSF and alter the -C tolerance parameters in any other way appropriate.) -C -C NST IWORK(11) The number of steps taken for the problem so far. -C -C NFE IWORK(12) The number of f evaluations for the problem so far. -C -C NJE IWORK(13) The number of Jacobian evaluations so far. -C -C NQU IWORK(14) The method order last used (successfully). -C -C NQCUR IWORK(15) The order to be attempted on the next step. -C -C IMXER IWORK(16) The index of the component of largest magnitude in -C the weighted local error vector ( e(i)/EWT(i) ), -C on an error return with ISTATE = -4 or -5. -C -C LENZW IWORK(17) The length of ZWORK actually required. -C This is defined on normal returns and on an illegal -C input return for insufficient storage. -C -C LENRW IWORK(18) The length of RWORK actually required. -C This is defined on normal returns and on an illegal -C input return for insufficient storage. -C -C LENIW IWORK(19) The length of IWORK actually required. -C This is defined on normal returns and on an illegal -C input return for insufficient storage. -C -C NLU IWORK(20) The number of matrix LU decompositions so far. -C -C NNI IWORK(21) The number of nonlinear (Newton) iterations so far. -C -C NCFN IWORK(22) The number of convergence failures of the nonlinear -C solver so far. -C -C NETF IWORK(23) The number of error test failures of the integrator -C so far. -C -C The following two arrays are segments of the ZWORK array which -C may also be of interest to the user as optional output. -C For each array, the table below gives its internal name, -C its base address in ZWORK, and its description. -C -C NAME BASE ADDRESS DESCRIPTION -C -C YH 1 The Nordsieck history array, of size NYH by -C (NQCUR + 1), where NYH is the initial value -C of NEQ. For j = 0,1,...,NQCUR, column j+1 -C of YH contains HCUR**j/factorial(j) times -C the j-th derivative of the interpolating -C polynomial currently representing the -C solution, evaluated at t = TCUR. -C -C ACOR LENZW-NEQ+1 Array of size NEQ used for the accumulated -C corrections on each step, scaled in the output -C to represent the estimated local error in Y -C on the last step. This is the vector e in -C the description of the error control. It is -C defined only on a successful return from ZVODE. -C -C----------------------------------------------------------------------- -C Interrupting and Restarting -C -C If the integration of a given problem by ZVODE is to be -C interrrupted and then later continued, such as when restarting -C an interrupted run or alternating between two or more ODE problems, -C the user should save, following the return from the last ZVODE call -C prior to the interruption, the contents of the call sequence -C variables and internal COMMON blocks, and later restore these -C values before the next ZVODE call for that problem. To save -C and restore the COMMON blocks, use subroutine ZVSRCO, as -C described below in part ii. -C -C In addition, if non-default values for either LUN or MFLAG are -C desired, an extra call to XSETUN and/or XSETF should be made just -C before continuing the integration. See Part ii below for details. -C -C----------------------------------------------------------------------- -C Part ii. Other Routines Callable. -C -C The following are optional calls which the user may make to -C gain additional capabilities in conjunction with ZVODE. -C (The routines XSETUN and XSETF are designed to conform to the -C SLATEC error handling package.) -C -C FORM OF CALL FUNCTION -C CALL XSETUN(LUN) Set the logical unit number, LUN, for -C output of messages from ZVODE, if -C the default is not desired. -C The default value of LUN is 6. -C -C CALL XSETF(MFLAG) Set a flag to control the printing of -C messages by ZVODE. -C MFLAG = 0 means do not print. (Danger: -C This risks losing valuable information.) -C MFLAG = 1 means print (the default). -C -C Either of the above calls may be made at -C any time and will take effect immediately. -C -C CALL ZVSRCO(RSAV,ISAV,JOB) Saves and restores the contents of -C the internal COMMON blocks used by -C ZVODE. (See Part iii below.) -C RSAV must be a real array of length 51 -C or more, and ISAV must be an integer -C array of length 40 or more. -C JOB=1 means save COMMON into RSAV/ISAV. -C JOB=2 means restore COMMON from RSAV/ISAV. -C ZVSRCO is useful if one is -C interrupting a run and restarting -C later, or alternating between two or -C more problems solved with ZVODE. -C -C CALL ZVINDY(,,,,,) Provide derivatives of y, of various -C (See below.) orders, at a specified point T, if -C desired. It may be called only after -C a successful return from ZVODE. -C -C The detailed instructions for using ZVINDY are as follows. -C The form of the call is: -C -C CALL ZVINDY (T, K, ZWORK, NYH, DKY, IFLAG) -C -C The input parameters are: -C -C T = Value of independent variable where answers are desired -C (normally the same as the T last returned by ZVODE). -C For valid results, T must lie between TCUR - HU and TCUR. -C (See optional output for TCUR and HU.) -C K = Integer order of the derivative desired. K must satisfy -C 0 .le. K .le. NQCUR, where NQCUR is the current order -C (see optional output). The capability corresponding -C to K = 0, i.e. computing y(T), is already provided -C by ZVODE directly. Since NQCUR .ge. 1, the first -C derivative dy/dt is always available with ZVINDY. -C ZWORK = The history array YH. -C NYH = Column length of YH, equal to the initial value of NEQ. -C -C The output parameters are: -C -C DKY = A double complex array of length NEQ containing the -C computed value of the K-th derivative of y(t). -C IFLAG = Integer flag, returned as 0 if K and T were legal, -C -1 if K was illegal, and -2 if T was illegal. -C On an error return, a message is also written. -C----------------------------------------------------------------------- -C Part iii. COMMON Blocks. -C If ZVODE is to be used in an overlay situation, the user -C must declare, in the primary overlay, the variables in: -C (1) the call sequence to ZVODE, -C (2) the two internal COMMON blocks -C /ZVOD01/ of length 83 (50 double precision words -C followed by 33 integer words), -C /ZVOD02/ of length 9 (1 double precision word -C followed by 8 integer words), -C -C If ZVODE is used on a system in which the contents of internal -C COMMON blocks are not preserved between calls, the user should -C declare the above two COMMON blocks in his calling program to insure -C that their contents are preserved. -C -C----------------------------------------------------------------------- -C Part iv. Optionally Replaceable Solver Routines. -C -C Below are descriptions of two routines in the ZVODE package which -C relate to the measurement of errors. Either routine can be -C replaced by a user-supplied version, if desired. However, since such -C a replacement may have a major impact on performance, it should be -C done only when absolutely necessary, and only with great caution. -C (Note: The means by which the package version of a routine is -C superseded by the user's version may be system-dependent.) -C -C (a) ZEWSET. -C The following subroutine is called just before each internal -C integration step, and sets the array of error weights, EWT, as -C described under ITOL/RTOL/ATOL above: -C SUBROUTINE ZEWSET (NEQ, ITOL, RTOL, ATOL, YCUR, EWT) -C where NEQ, ITOL, RTOL, and ATOL are as in the ZVODE call sequence, -C YCUR contains the current (double complex) dependent variable vector, -C and EWT is the array of weights set by ZEWSET. -C -C If the user supplies this subroutine, it must return in EWT(i) -C (i = 1,...,NEQ) a positive quantity suitable for comparison with -C errors in Y(i). The EWT array returned by ZEWSET is passed to the -C ZVNORM routine (See below.), and also used by ZVODE in the computation -C of the optional output IMXER, the diagonal Jacobian approximation, -C and the increments for difference quotient Jacobians. -C -C In the user-supplied version of ZEWSET, it may be desirable to use -C the current values of derivatives of y. Derivatives up to order NQ -C are available from the history array YH, described above under -C Optional Output. In ZEWSET, YH is identical to the YCUR array, -C extended to NQ + 1 columns with a column length of NYH and scale -C factors of h**j/factorial(j). On the first call for the problem, -C given by NST = 0, NQ is 1 and H is temporarily set to 1.0. -C NYH is the initial value of NEQ. The quantities NQ, H, and NST -C can be obtained by including in ZEWSET the statements: -C DOUBLE PRECISION RVOD, H, HU -C COMMON /ZVOD01/ RVOD(50), IVOD(33) -C COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C NQ = IVOD(28) -C H = RVOD(21) -C Thus, for example, the current value of dy/dt can be obtained as -C YCUR(NYH+i)/H (i=1,...,NEQ) (and the division by H is -C unnecessary when NST = 0). -C -C (b) ZVNORM. -C The following is a real function routine which computes the weighted -C root-mean-square norm of a vector v: -C D = ZVNORM (N, V, W) -C where: -C N = the length of the vector, -C V = double complex array of length N containing the vector, -C W = real array of length N containing weights, -C D = sqrt( (1/N) * sum(abs(V(i))*W(i))**2 ). -C ZVNORM is called with N = NEQ and with W(i) = 1.0/EWT(i), where -C EWT is as set by subroutine ZEWSET. -C -C If the user supplies this function, it should return a non-negative -C value of ZVNORM suitable for use in the error control in ZVODE. -C None of the arguments should be altered by ZVNORM. -C For example, a user-supplied ZVNORM routine might: -C -substitute a max-norm of (V(i)*W(i)) for the rms-norm, or -C -ignore some components of V in the norm, with the effect of -C suppressing the error control on those components of Y. -C----------------------------------------------------------------------- -C REVISION HISTORY (YYYYMMDD) -C 20060517 DATE WRITTEN, modified from DVODE of 20020430. -C 20061227 Added note on use for analytic f. -C----------------------------------------------------------------------- -C Other Routines in the ZVODE Package. -C -C In addition to Subroutine ZVODE, the ZVODE package includes the -C following subroutines and function routines: -C ZVHIN computes an approximate step size for the initial step. -C ZVINDY computes an interpolated value of the y vector at t = TOUT. -C ZVSTEP is the core integrator, which does one step of the -C integration and the associated error control. -C ZVSET sets all method coefficients and test constants. -C ZVNLSD solves the underlying nonlinear system -- the corrector. -C ZVJAC computes and preprocesses the Jacobian matrix J = df/dy -C and the Newton iteration matrix P = I - (h/l1)*J. -C ZVSOL manages solution of linear system in chord iteration. -C ZVJUST adjusts the history array on a change of order. -C ZEWSET sets the error weight vector EWT before each step. -C ZVNORM computes the weighted r.m.s. norm of a vector. -C ZABSSQ computes the squared absolute value of a double complex z. -C ZVSRCO is a user-callable routine to save and restore -C the contents of the internal COMMON blocks. -C ZACOPY is a routine to copy one two-dimensional array to another. -C ZGEFA and ZGESL are routines from LINPACK for solving full -C systems of linear algebraic equations. -C ZGBFA and ZGBSL are routines from LINPACK for solving banded -C linear systems. -C DZSCAL scales a double complex array by a double prec. scalar. -C DZAXPY adds a D.P. scalar times one complex vector to another. -C ZCOPY is a basic linear algebra module from the BLAS. -C DUMACH sets the unit roundoff of the machine. -C XERRWD, XSETUN, XSETF, IXSAV, and IUMACH handle the printing of all -C error messages and warnings. XERRWD is machine-dependent. -C Note: ZVNORM, ZABSSQ, DUMACH, IXSAV, and IUMACH are function routines. -C All the others are subroutines. -C The intrinsic functions called with double precision complex arguments -C are: ABS, DREAL, and DIMAG. All of these are expected to return -C double precision real values. -C -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block ZVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block ZVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - EXTERNAL ZVNLSD - LOGICAL IHIT - DOUBLE PRECISION ATOLI, BIG, EWTI, FOUR, H0, HMAX, HMX, HUN, ONE, - 1 PT2, RH, RTOLI, SIZE, TCRIT, TNEXT, TOLSF, TP, TWO, ZERO - INTEGER I, IER, IFLAG, IMXER, JCO, KGO, LENIW, LENJ, LENP, LENZW, - 1 LENRW, LENWM, LF0, MBAND, MFA, ML, MORD, MU, MXHNL0, MXSTP0, - 2 NITER, NSLAST - CHARACTER*80 MSG -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION DUMACH, ZVNORM -C - DIMENSION MORD(2) -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to ZVODE. -C----------------------------------------------------------------------- - SAVE MORD, MXHNL0, MXSTP0 - SAVE ZERO, ONE, TWO, FOUR, PT2, HUN -C----------------------------------------------------------------------- -C The following internal COMMON blocks contain variables which are -C communicated between subroutines in the ZVODE package, or which are -C to be saved between calls to ZVODE. -C In each block, real variables precede integers. -C The block /ZVOD01/ appears in subroutines ZVODE, ZVINDY, ZVSTEP, -C ZVSET, ZVNLSD, ZVJAC, ZVSOL, ZVJUST and ZVSRCO. -C The block /ZVOD02/ appears in subroutines ZVODE, ZVINDY, ZVSTEP, -C ZVNLSD, ZVJAC, and ZVSRCO. -C -C The variables stored in the internal COMMON blocks are as follows: -C -C ACNRM = Weighted r.m.s. norm of accumulated correction vectors. -C CCMXJ = Threshhold on DRC for updating the Jacobian. (See DRC.) -C CONP = The saved value of TQ(5). -C CRATE = Estimated corrector convergence rate constant. -C DRC = Relative change in H*RL1 since last ZVJAC call. -C EL = Real array of integration coefficients. See ZVSET. -C ETA = Saved tentative ratio of new to old H. -C ETAMAX = Saved maximum value of ETA to be allowed. -C H = The step size. -C HMIN = The minimum absolute value of the step size H to be used. -C HMXI = Inverse of the maximum absolute value of H to be used. -C HMXI = 0.0 is allowed and corresponds to an infinite HMAX. -C HNEW = The step size to be attempted on the next step. -C HRL1 = Saved value of H*RL1. -C HSCAL = Stepsize in scaling of YH array. -C PRL1 = The saved value of RL1. -C RC = Ratio of current H*RL1 to value on last ZVJAC call. -C RL1 = The reciprocal of the coefficient EL(1). -C SRUR = Sqrt(UROUND), used in difference quotient algorithms. -C TAU = Real vector of past NQ step sizes, length 13. -C TQ = A real vector of length 5 in which ZVSET stores constants -C used for the convergence test, the error test, and the -C selection of H at a new order. -C TN = The independent variable, updated on each step taken. -C UROUND = The machine unit roundoff. The smallest positive real number -C such that 1.0 + UROUND .ne. 1.0 -C ICF = Integer flag for convergence failure in ZVNLSD: -C 0 means no failures. -C 1 means convergence failure with out of date Jacobian -C (recoverable error). -C 2 means convergence failure with current Jacobian or -C singular matrix (unrecoverable error). -C INIT = Saved integer flag indicating whether initialization of the -C problem has been done (INIT = 1) or not. -C IPUP = Saved flag to signal updating of Newton matrix. -C JCUR = Output flag from ZVJAC showing Jacobian status: -C JCUR = 0 means J is not current. -C JCUR = 1 means J is current. -C JSTART = Integer flag used as input to ZVSTEP: -C 0 means perform the first step. -C 1 means take a new step continuing from the last. -C -1 means take the next step with a new value of MAXORD, -C HMIN, HMXI, N, METH, MITER, and/or matrix parameters. -C On return, ZVSTEP sets JSTART = 1. -C JSV = Integer flag for Jacobian saving, = sign(MF). -C KFLAG = A completion code from ZVSTEP with the following meanings: -C 0 the step was succesful. -C -1 the requested error could not be achieved. -C -2 corrector convergence could not be achieved. -C -3, -4 fatal error in VNLS (can not occur here). -C KUTH = Input flag to ZVSTEP showing whether H was reduced by the -C driver. KUTH = 1 if H was reduced, = 0 otherwise. -C L = Integer variable, NQ + 1, current order plus one. -C LMAX = MAXORD + 1 (used for dimensioning). -C LOCJS = A pointer to the saved Jacobian, whose storage starts at -C WM(LOCJS), if JSV = 1. -C LYH, LEWT, LACOR, LSAVF, LWM, LIWM = Saved integer pointers -C to segments of ZWORK, RWORK, and IWORK. -C MAXORD = The maximum order of integration method to be allowed. -C METH/MITER = The method flags. See MF. -C MSBJ = The maximum number of steps between J evaluations, = 50. -C MXHNIL = Saved value of optional input MXHNIL. -C MXSTEP = Saved value of optional input MXSTEP. -C N = The number of first-order ODEs, = NEQ. -C NEWH = Saved integer to flag change of H. -C NEWQ = The method order to be used on the next step. -C NHNIL = Saved counter for occurrences of T + H = T. -C NQ = Integer variable, the current integration method order. -C NQNYH = Saved value of NQ*NYH. -C NQWAIT = A counter controlling the frequency of order changes. -C An order change is about to be considered if NQWAIT = 1. -C NSLJ = The number of steps taken as of the last Jacobian update. -C NSLP = Saved value of NST as of last Newton matrix update. -C NYH = Saved value of the initial value of NEQ. -C HU = The step size in t last used. -C NCFN = Number of nonlinear convergence failures so far. -C NETF = The number of error test failures of the integrator so far. -C NFE = The number of f evaluations for the problem so far. -C NJE = The number of Jacobian evaluations so far. -C NLU = The number of matrix LU decompositions so far. -C NNI = Number of nonlinear iterations so far. -C NQU = The method order last used. -C NST = The number of steps taken for the problem so far. -C----------------------------------------------------------------------- - COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, - 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA MORD(1) /12/, MORD(2) /5/, MXSTP0 /500/, MXHNL0 /10/ - DATA ZERO /0.0D0/, ONE /1.0D0/, TWO /2.0D0/, FOUR /4.0D0/, - 1 PT2 /0.2D0/, HUN /100.0D0/ -C----------------------------------------------------------------------- -C Block A. -C This code block is executed on every call. -C It tests ISTATE and ITASK for legality and branches appropriately. -C If ISTATE .gt. 1 but the flag INIT shows that initialization has -C not yet been done, an error return occurs. -C If ISTATE = 1 and TOUT = T, return immediately. -C----------------------------------------------------------------------- - IF (ISTATE .LT. 1 .OR. ISTATE .GT. 3) GO TO 601 - IF (ITASK .LT. 1 .OR. ITASK .GT. 5) GO TO 602 - IF (ISTATE .EQ. 1) GO TO 10 - IF (INIT .NE. 1) GO TO 603 - IF (ISTATE .EQ. 2) GO TO 200 - GO TO 20 - 10 INIT = 0 - IF (TOUT .EQ. T) RETURN -C----------------------------------------------------------------------- -C Block B. -C The next code block is executed for the initial call (ISTATE = 1), -C or for a continuation call with parameter changes (ISTATE = 3). -C It contains checking of all input and various initializations. -C -C First check legality of the non-optional input NEQ, ITOL, IOPT, -C MF, ML, and MU. -C----------------------------------------------------------------------- - 20 IF (NEQ .LE. 0) GO TO 604 - IF (ISTATE .EQ. 1) GO TO 25 - IF (NEQ .GT. N) GO TO 605 - 25 N = NEQ - IF (ITOL .LT. 1 .OR. ITOL .GT. 4) GO TO 606 - IF (IOPT .LT. 0 .OR. IOPT .GT. 1) GO TO 607 - JSV = SIGN(1,MF) - MFA = ABS(MF) - METH = MFA/10 - MITER = MFA - 10*METH - IF (METH .LT. 1 .OR. METH .GT. 2) GO TO 608 - IF (MITER .LT. 0 .OR. MITER .GT. 5) GO TO 608 - IF (MITER .LE. 3) GO TO 30 - ML = IWORK(1) - MU = IWORK(2) - IF (ML .LT. 0 .OR. ML .GE. N) GO TO 609 - IF (MU .LT. 0 .OR. MU .GE. N) GO TO 610 - 30 CONTINUE -C Next process and check the optional input. --------------------------- - IF (IOPT .EQ. 1) GO TO 40 - MAXORD = MORD(METH) - MXSTEP = MXSTP0 - MXHNIL = MXHNL0 - IF (ISTATE .EQ. 1) H0 = ZERO - HMXI = ZERO - HMIN = ZERO - GO TO 60 - 40 MAXORD = IWORK(5) - IF (MAXORD .LT. 0) GO TO 611 - IF (MAXORD .EQ. 0) MAXORD = 100 - MAXORD = MIN(MAXORD,MORD(METH)) - MXSTEP = IWORK(6) - IF (MXSTEP .LT. 0) GO TO 612 - IF (MXSTEP .EQ. 0) MXSTEP = MXSTP0 - MXHNIL = IWORK(7) - IF (MXHNIL .LT. 0) GO TO 613 - IF (MXHNIL .EQ. 0) MXHNIL = MXHNL0 - IF (ISTATE .NE. 1) GO TO 50 - H0 = RWORK(5) - IF ((TOUT - T)*H0 .LT. ZERO) GO TO 614 - 50 HMAX = RWORK(6) - IF (HMAX .LT. ZERO) GO TO 615 - HMXI = ZERO - IF (HMAX .GT. ZERO) HMXI = ONE/HMAX - HMIN = RWORK(7) - IF (HMIN .LT. ZERO) GO TO 616 -C----------------------------------------------------------------------- -C Set work array pointers and check lengths LZW, LRW, and LIW. -C Pointers to segments of ZWORK, RWORK, and IWORK are named by prefixing -C L to the name of the segment. E.g., segment YH starts at ZWORK(LYH). -C Segments of ZWORK (in order) are denoted YH, WM, SAVF, ACOR. -C Besides optional inputs/outputs, RWORK has only the segment EWT. -C Within WM, LOCJS is the location of the saved Jacobian (JSV .gt. 0). -C----------------------------------------------------------------------- - 60 LYH = 1 - IF (ISTATE .EQ. 1) NYH = N - LWM = LYH + (MAXORD + 1)*NYH - JCO = MAX(0,JSV) - IF (MITER .EQ. 0) LENWM = 0 - IF (MITER .EQ. 1 .OR. MITER .EQ. 2) THEN - LENWM = (1 + JCO)*N*N - LOCJS = N*N + 1 - ENDIF - IF (MITER .EQ. 3) LENWM = N - IF (MITER .EQ. 4 .OR. MITER .EQ. 5) THEN - MBAND = ML + MU + 1 - LENP = (MBAND + ML)*N - LENJ = MBAND*N - LENWM = LENP + JCO*LENJ - LOCJS = LENP + 1 - ENDIF - LSAVF = LWM + LENWM - LACOR = LSAVF + N - LENZW = LACOR + N - 1 - IWORK(17) = LENZW - LEWT = 21 - LENRW = 20 + N - IWORK(18) = LENRW - LIWM = 1 - LENIW = 30 + N - IF (MITER .EQ. 0 .OR. MITER .EQ. 3) LENIW = 30 - IWORK(19) = LENIW - IF (LENZW .GT. LZW) GO TO 628 - IF (LENRW .GT. LRW) GO TO 617 - IF (LENIW .GT. LIW) GO TO 618 -C Check RTOL and ATOL for legality. ------------------------------------ - RTOLI = RTOL(1) - ATOLI = ATOL(1) - DO 70 I = 1,N - IF (ITOL .GE. 3) RTOLI = RTOL(I) - IF (ITOL .EQ. 2 .OR. ITOL .EQ. 4) ATOLI = ATOL(I) - IF (RTOLI .LT. ZERO) GO TO 619 - IF (ATOLI .LT. ZERO) GO TO 620 - 70 CONTINUE - IF (ISTATE .EQ. 1) GO TO 100 -C If ISTATE = 3, set flag to signal parameter changes to ZVSTEP. ------- - JSTART = -1 - IF (NQ .LE. MAXORD) GO TO 200 -C MAXORD was reduced below NQ. Copy YH(*,MAXORD+2) into SAVF. --------- - CALL ZCOPY (N, ZWORK(LWM), 1, ZWORK(LSAVF), 1) - GO TO 200 -C----------------------------------------------------------------------- -C Block C. -C The next block is for the initial call only (ISTATE = 1). -C It contains all remaining initializations, the initial call to F, -C and the calculation of the initial step size. -C The error weights in EWT are inverted after being loaded. -C----------------------------------------------------------------------- - 100 UROUND = DUMACH() - TN = T - IF (ITASK .NE. 4 .AND. ITASK .NE. 5) GO TO 110 - TCRIT = RWORK(1) - IF ((TCRIT - TOUT)*(TOUT - T) .LT. ZERO) GO TO 625 - IF (H0 .NE. ZERO .AND. (T + H0 - TCRIT)*H0 .GT. ZERO) - 1 H0 = TCRIT - T - 110 JSTART = 0 - IF (MITER .GT. 0) SRUR = SQRT(UROUND) - CCMXJ = PT2 - MSBJ = 50 - NHNIL = 0 - NST = 0 - NJE = 0 - NNI = 0 - NCFN = 0 - NETF = 0 - NLU = 0 - NSLJ = 0 - NSLAST = 0 - HU = ZERO - NQU = 0 -C Initial call to F. (LF0 points to YH(*,2).) ------------------------- - LF0 = LYH + NYH - CALL F (N, T, Y, ZWORK(LF0), RPAR, IPAR) - NFE = 1 -C Load the initial value vector in YH. --------------------------------- - CALL ZCOPY (N, Y, 1, ZWORK(LYH), 1) -C Load and invert the EWT array. (H is temporarily set to 1.0.) ------- - NQ = 1 - H = ONE - CALL ZEWSET (N, ITOL, RTOL, ATOL, ZWORK(LYH), RWORK(LEWT)) - DO 120 I = 1,N - IF (RWORK(I+LEWT-1) .LE. ZERO) GO TO 621 - 120 RWORK(I+LEWT-1) = ONE/RWORK(I+LEWT-1) - IF (H0 .NE. ZERO) GO TO 180 -C Call ZVHIN to set initial step size H0 to be attempted. -------------- - CALL ZVHIN (N, T, ZWORK(LYH), ZWORK(LF0), F, RPAR, IPAR, TOUT, - 1 UROUND, RWORK(LEWT), ITOL, ATOL, Y, ZWORK(LACOR), H0, - 2 NITER, IER) - NFE = NFE + NITER - IF (IER .NE. 0) GO TO 622 -C Adjust H0 if necessary to meet HMAX bound. --------------------------- - 180 RH = ABS(H0)*HMXI - IF (RH .GT. ONE) H0 = H0/RH -C Load H with H0 and scale YH(*,2) by H0. ------------------------------ - H = H0 - CALL DZSCAL (N, H0, ZWORK(LF0), 1) - GO TO 270 -C----------------------------------------------------------------------- -C Block D. -C The next code block is for continuation calls only (ISTATE = 2 or 3) -C and is to check stop conditions before taking a step. -C----------------------------------------------------------------------- - 200 NSLAST = NST - KUTH = 0 - GO TO (210, 250, 220, 230, 240), ITASK - 210 IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 - CALL ZVINDY (TOUT, 0, ZWORK(LYH), NYH, Y, IFLAG) - IF (IFLAG .NE. 0) GO TO 627 - T = TOUT - GO TO 420 - 220 TP = TN - HU*(ONE + HUN*UROUND) - IF ((TP - TOUT)*H .GT. ZERO) GO TO 623 - IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 - GO TO 400 - 230 TCRIT = RWORK(1) - IF ((TN - TCRIT)*H .GT. ZERO) GO TO 624 - IF ((TCRIT - TOUT)*H .LT. ZERO) GO TO 625 - IF ((TN - TOUT)*H .LT. ZERO) GO TO 245 - CALL ZVINDY (TOUT, 0, ZWORK(LYH), NYH, Y, IFLAG) - IF (IFLAG .NE. 0) GO TO 627 - T = TOUT - GO TO 420 - 240 TCRIT = RWORK(1) - IF ((TN - TCRIT)*H .GT. ZERO) GO TO 624 - 245 HMX = ABS(TN) + ABS(H) - IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX - IF (IHIT) GO TO 400 - TNEXT = TN + HNEW*(ONE + FOUR*UROUND) - IF ((TNEXT - TCRIT)*H .LE. ZERO) GO TO 250 - H = (TCRIT - TN)*(ONE - FOUR*UROUND) - KUTH = 1 -C----------------------------------------------------------------------- -C Block E. -C The next block is normally executed for all calls and contains -C the call to the one-step core integrator ZVSTEP. -C -C This is a looping point for the integration steps. -C -C First check for too many steps being taken, update EWT (if not at -C start of problem), check for too much accuracy being requested, and -C check for H below the roundoff level in T. -C----------------------------------------------------------------------- - 250 CONTINUE - IF ((NST-NSLAST) .GE. MXSTEP) GO TO 500 - CALL ZEWSET (N, ITOL, RTOL, ATOL, ZWORK(LYH), RWORK(LEWT)) - DO 260 I = 1,N - IF (RWORK(I+LEWT-1) .LE. ZERO) GO TO 510 - 260 RWORK(I+LEWT-1) = ONE/RWORK(I+LEWT-1) - 270 TOLSF = UROUND*ZVNORM (N, ZWORK(LYH), RWORK(LEWT)) - IF (TOLSF .LE. ONE) GO TO 280 - TOLSF = TOLSF*TWO - IF (NST .EQ. 0) GO TO 626 - GO TO 520 - 280 IF ((TN + H) .NE. TN) GO TO 290 - NHNIL = NHNIL + 1 - IF (NHNIL .GT. MXHNIL) GO TO 290 - MSG = 'ZVODE-- Warning: internal T (=R1) and H (=R2) are' - CALL XERRWD (MSG, 50, 101, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG=' such that in the machine, T + H = T on the next step ' - CALL XERRWD (MSG, 60, 101, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' (H = step size). solver will continue anyway' - CALL XERRWD (MSG, 50, 101, 1, 0, 0, 0, 2, TN, H) - IF (NHNIL .LT. MXHNIL) GO TO 290 - MSG = 'ZVODE-- Above warning has been issued I1 times. ' - CALL XERRWD (MSG, 50, 102, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' it will not be issued again for this problem' - CALL XERRWD (MSG, 50, 102, 1, 1, MXHNIL, 0, 0, ZERO, ZERO) - 290 CONTINUE -C----------------------------------------------------------------------- -C CALL ZVSTEP (Y, YH, NYH, YH, EWT, SAVF, VSAV, ACOR, -C WM, IWM, F, JAC, F, ZVNLSD, RPAR, IPAR) -C----------------------------------------------------------------------- - CALL ZVSTEP (Y, ZWORK(LYH), NYH, ZWORK(LYH), RWORK(LEWT), - 1 ZWORK(LSAVF), Y, ZWORK(LACOR), ZWORK(LWM), IWORK(LIWM), - 2 F, JAC, F, ZVNLSD, RPAR, IPAR) - KGO = 1 - KFLAG -C Branch on KFLAG. Note: In this version, KFLAG can not be set to -3. -C KFLAG .eq. 0, -1, -2 - GO TO (300, 530, 540), KGO -C----------------------------------------------------------------------- -C Block F. -C The following block handles the case of a successful return from the -C core integrator (KFLAG = 0). Test for stop conditions. -C----------------------------------------------------------------------- - 300 INIT = 1 - KUTH = 0 - GO TO (310, 400, 330, 340, 350), ITASK -C ITASK = 1. If TOUT has been reached, interpolate. ------------------- - 310 IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 - CALL ZVINDY (TOUT, 0, ZWORK(LYH), NYH, Y, IFLAG) - T = TOUT - GO TO 420 -C ITASK = 3. Jump to exit if TOUT was reached. ------------------------ - 330 IF ((TN - TOUT)*H .GE. ZERO) GO TO 400 - GO TO 250 -C ITASK = 4. See if TOUT or TCRIT was reached. Adjust H if necessary. - 340 IF ((TN - TOUT)*H .LT. ZERO) GO TO 345 - CALL ZVINDY (TOUT, 0, ZWORK(LYH), NYH, Y, IFLAG) - T = TOUT - GO TO 420 - 345 HMX = ABS(TN) + ABS(H) - IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX - IF (IHIT) GO TO 400 - TNEXT = TN + HNEW*(ONE + FOUR*UROUND) - IF ((TNEXT - TCRIT)*H .LE. ZERO) GO TO 250 - H = (TCRIT - TN)*(ONE - FOUR*UROUND) - KUTH = 1 - GO TO 250 -C ITASK = 5. See if TCRIT was reached and jump to exit. --------------- - 350 HMX = ABS(TN) + ABS(H) - IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX -C----------------------------------------------------------------------- -C Block G. -C The following block handles all successful returns from ZVODE. -C If ITASK .ne. 1, Y is loaded from YH and T is set accordingly. -C ISTATE is set to 2, and the optional output is loaded into the work -C arrays before returning. -C----------------------------------------------------------------------- - 400 CONTINUE - CALL ZCOPY (N, ZWORK(LYH), 1, Y, 1) - T = TN - IF (ITASK .NE. 4 .AND. ITASK .NE. 5) GO TO 420 - IF (IHIT) T = TCRIT - 420 ISTATE = 2 - RWORK(11) = HU - RWORK(12) = HNEW - RWORK(13) = TN - IWORK(11) = NST - IWORK(12) = NFE - IWORK(13) = NJE - IWORK(14) = NQU - IWORK(15) = NEWQ - IWORK(20) = NLU - IWORK(21) = NNI - IWORK(22) = NCFN - IWORK(23) = NETF - RETURN -C----------------------------------------------------------------------- -C Block H. -C The following block handles all unsuccessful returns other than -C those for illegal input. First the error message routine is called. -C if there was an error test or convergence test failure, IMXER is set. -C Then Y is loaded from YH, and T is set to TN. -C The optional output is loaded into the work arrays before returning. -C----------------------------------------------------------------------- -C The maximum number of steps was taken before reaching TOUT. ---------- - 500 MSG = 'ZVODE-- At current T (=R1), MXSTEP (=I1) steps ' - CALL XERRWD (MSG, 50, 201, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' taken on this call before reaching TOUT ' - CALL XERRWD (MSG, 50, 201, 1, 1, MXSTEP, 0, 1, TN, ZERO) - ISTATE = -1 - GO TO 580 -C EWT(i) .le. 0.0 for some i (not at start of problem). ---------------- - 510 EWTI = RWORK(LEWT+I-1) - MSG = 'ZVODE-- At T (=R1), EWT(I1) has become R2 .le. 0.' - CALL XERRWD (MSG, 50, 202, 1, 1, I, 0, 2, TN, EWTI) - ISTATE = -6 - GO TO 580 -C Too much accuracy requested for machine precision. ------------------- - 520 MSG = 'ZVODE-- At T (=R1), too much accuracy requested ' - CALL XERRWD (MSG, 50, 203, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' for precision of machine: see TOLSF (=R2) ' - CALL XERRWD (MSG, 50, 203, 1, 0, 0, 0, 2, TN, TOLSF) - RWORK(14) = TOLSF - ISTATE = -2 - GO TO 580 -C KFLAG = -1. Error test failed repeatedly or with ABS(H) = HMIN. ----- - 530 MSG = 'ZVODE-- At T(=R1) and step size H(=R2), the error' - CALL XERRWD (MSG, 50, 204, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' test failed repeatedly or with abs(H) = HMIN' - CALL XERRWD (MSG, 50, 204, 1, 0, 0, 0, 2, TN, H) - ISTATE = -4 - GO TO 560 -C KFLAG = -2. Convergence failed repeatedly or with ABS(H) = HMIN. ---- - 540 MSG = 'ZVODE-- At T (=R1) and step size H (=R2), the ' - CALL XERRWD (MSG, 50, 205, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' corrector convergence failed repeatedly ' - CALL XERRWD (MSG, 50, 205, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG = ' or with abs(H) = HMIN ' - CALL XERRWD (MSG, 30, 205, 1, 0, 0, 0, 2, TN, H) - ISTATE = -5 -C Compute IMXER if relevant. ------------------------------------------- - 560 BIG = ZERO - IMXER = 1 - DO 570 I = 1,N - SIZE = ABS(ZWORK(I+LACOR-1))*RWORK(I+LEWT-1) - IF (BIG .GE. SIZE) GO TO 570 - BIG = SIZE - IMXER = I - 570 CONTINUE - IWORK(16) = IMXER -C Set Y vector, T, and optional output. -------------------------------- - 580 CONTINUE - CALL ZCOPY (N, ZWORK(LYH), 1, Y, 1) - T = TN - RWORK(11) = HU - RWORK(12) = H - RWORK(13) = TN - IWORK(11) = NST - IWORK(12) = NFE - IWORK(13) = NJE - IWORK(14) = NQU - IWORK(15) = NQ - IWORK(20) = NLU - IWORK(21) = NNI - IWORK(22) = NCFN - IWORK(23) = NETF - RETURN -C----------------------------------------------------------------------- -C Block I. -C The following block handles all error returns due to illegal input -C (ISTATE = -3), as detected before calling the core integrator. -C First the error message routine is called. If the illegal input -C is a negative ISTATE, the run is aborted (apparent infinite loop). -C----------------------------------------------------------------------- - 601 MSG = 'ZVODE-- ISTATE (=I1) illegal ' - CALL XERRWD (MSG, 30, 1, 1, 1, ISTATE, 0, 0, ZERO, ZERO) - IF (ISTATE .LT. 0) GO TO 800 - GO TO 700 - 602 MSG = 'ZVODE-- ITASK (=I1) illegal ' - CALL XERRWD (MSG, 30, 2, 1, 1, ITASK, 0, 0, ZERO, ZERO) - GO TO 700 - 603 MSG='ZVODE-- ISTATE (=I1) .gt. 1 but ZVODE not initialized ' - CALL XERRWD (MSG, 60, 3, 1, 1, ISTATE, 0, 0, ZERO, ZERO) - GO TO 700 - 604 MSG = 'ZVODE-- NEQ (=I1) .lt. 1 ' - CALL XERRWD (MSG, 30, 4, 1, 1, NEQ, 0, 0, ZERO, ZERO) - GO TO 700 - 605 MSG = 'ZVODE-- ISTATE = 3 and NEQ increased (I1 to I2) ' - CALL XERRWD (MSG, 50, 5, 1, 2, N, NEQ, 0, ZERO, ZERO) - GO TO 700 - 606 MSG = 'ZVODE-- ITOL (=I1) illegal ' - CALL XERRWD (MSG, 30, 6, 1, 1, ITOL, 0, 0, ZERO, ZERO) - GO TO 700 - 607 MSG = 'ZVODE-- IOPT (=I1) illegal ' - CALL XERRWD (MSG, 30, 7, 1, 1, IOPT, 0, 0, ZERO, ZERO) - GO TO 700 - 608 MSG = 'ZVODE-- MF (=I1) illegal ' - CALL XERRWD (MSG, 30, 8, 1, 1, MF, 0, 0, ZERO, ZERO) - GO TO 700 - 609 MSG = 'ZVODE-- ML (=I1) illegal: .lt.0 or .ge.NEQ (=I2)' - CALL XERRWD (MSG, 50, 9, 1, 2, ML, NEQ, 0, ZERO, ZERO) - GO TO 700 - 610 MSG = 'ZVODE-- MU (=I1) illegal: .lt.0 or .ge.NEQ (=I2)' - CALL XERRWD (MSG, 50, 10, 1, 2, MU, NEQ, 0, ZERO, ZERO) - GO TO 700 - 611 MSG = 'ZVODE-- MAXORD (=I1) .lt. 0 ' - CALL XERRWD (MSG, 30, 11, 1, 1, MAXORD, 0, 0, ZERO, ZERO) - GO TO 700 - 612 MSG = 'ZVODE-- MXSTEP (=I1) .lt. 0 ' - CALL XERRWD (MSG, 30, 12, 1, 1, MXSTEP, 0, 0, ZERO, ZERO) - GO TO 700 - 613 MSG = 'ZVODE-- MXHNIL (=I1) .lt. 0 ' - CALL XERRWD (MSG, 30, 13, 1, 1, MXHNIL, 0, 0, ZERO, ZERO) - GO TO 700 - 614 MSG = 'ZVODE-- TOUT (=R1) behind T (=R2) ' - CALL XERRWD (MSG, 40, 14, 1, 0, 0, 0, 2, TOUT, T) - MSG = ' integration direction is given by H0 (=R1) ' - CALL XERRWD (MSG, 50, 14, 1, 0, 0, 0, 1, H0, ZERO) - GO TO 700 - 615 MSG = 'ZVODE-- HMAX (=R1) .lt. 0.0 ' - CALL XERRWD (MSG, 30, 15, 1, 0, 0, 0, 1, HMAX, ZERO) - GO TO 700 - 616 MSG = 'ZVODE-- HMIN (=R1) .lt. 0.0 ' - CALL XERRWD (MSG, 30, 16, 1, 0, 0, 0, 1, HMIN, ZERO) - GO TO 700 - 617 CONTINUE - MSG='ZVODE-- RWORK length needed, LENRW (=I1), exceeds LRW (=I2)' - CALL XERRWD (MSG, 60, 17, 1, 2, LENRW, LRW, 0, ZERO, ZERO) - GO TO 700 - 618 CONTINUE - MSG='ZVODE-- IWORK length needed, LENIW (=I1), exceeds LIW (=I2)' - CALL XERRWD (MSG, 60, 18, 1, 2, LENIW, LIW, 0, ZERO, ZERO) - GO TO 700 - 619 MSG = 'ZVODE-- RTOL(I1) is R1 .lt. 0.0 ' - CALL XERRWD (MSG, 40, 19, 1, 1, I, 0, 1, RTOLI, ZERO) - GO TO 700 - 620 MSG = 'ZVODE-- ATOL(I1) is R1 .lt. 0.0 ' - CALL XERRWD (MSG, 40, 20, 1, 1, I, 0, 1, ATOLI, ZERO) - GO TO 700 - 621 EWTI = RWORK(LEWT+I-1) - MSG = 'ZVODE-- EWT(I1) is R1 .le. 0.0 ' - CALL XERRWD (MSG, 40, 21, 1, 1, I, 0, 1, EWTI, ZERO) - GO TO 700 - 622 CONTINUE - MSG='ZVODE-- TOUT (=R1) too close to T(=R2) to start integration' - CALL XERRWD (MSG, 60, 22, 1, 0, 0, 0, 2, TOUT, T) - GO TO 700 - 623 CONTINUE - MSG='ZVODE-- ITASK = I1 and TOUT (=R1) behind TCUR - HU (= R2) ' - CALL XERRWD (MSG, 60, 23, 1, 1, ITASK, 0, 2, TOUT, TP) - GO TO 700 - 624 CONTINUE - MSG='ZVODE-- ITASK = 4 or 5 and TCRIT (=R1) behind TCUR (=R2) ' - CALL XERRWD (MSG, 60, 24, 1, 0, 0, 0, 2, TCRIT, TN) - GO TO 700 - 625 CONTINUE - MSG='ZVODE-- ITASK = 4 or 5 and TCRIT (=R1) behind TOUT (=R2) ' - CALL XERRWD (MSG, 60, 25, 1, 0, 0, 0, 2, TCRIT, TOUT) - GO TO 700 - 626 MSG = 'ZVODE-- At start of problem, too much accuracy ' - CALL XERRWD (MSG, 50, 26, 1, 0, 0, 0, 0, ZERO, ZERO) - MSG=' requested for precision of machine: see TOLSF (=R1) ' - CALL XERRWD (MSG, 60, 26, 1, 0, 0, 0, 1, TOLSF, ZERO) - RWORK(14) = TOLSF - GO TO 700 - 627 MSG='ZVODE-- Trouble from ZVINDY. ITASK = I1, TOUT = R1. ' - CALL XERRWD (MSG, 60, 27, 1, 1, ITASK, 0, 1, TOUT, ZERO) - GO TO 700 - 628 CONTINUE - MSG='ZVODE-- ZWORK length needed, LENZW (=I1), exceeds LZW (=I2)' - CALL XERRWD (MSG, 60, 17, 1, 2, LENZW, LZW, 0, ZERO, ZERO) -C - 700 CONTINUE - ISTATE = -3 - RETURN -C - 800 MSG = 'ZVODE-- Run aborted: apparent infinite loop ' - CALL XERRWD (MSG, 50, 303, 2, 0, 0, 0, 0, ZERO, ZERO) - RETURN -C----------------------- End of Subroutine ZVODE ----------------------- - END -*DECK ZVHIN - SUBROUTINE ZVHIN (N, T0, Y0, YDOT, F, RPAR, IPAR, TOUT, UROUND, - 1 EWT, ITOL, ATOL, Y, TEMP, H0, NITER, IER) - EXTERNAL F - DOUBLE COMPLEX Y0, YDOT, Y, TEMP - DOUBLE PRECISION T0, TOUT, UROUND, EWT, ATOL, H0 - INTEGER N, IPAR, ITOL, NITER, IER - DIMENSION Y0(*), YDOT(*), EWT(*), ATOL(*), Y(*), - 1 TEMP(*), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C Call sequence input -- N, T0, Y0, YDOT, F, RPAR, IPAR, TOUT, UROUND, -C EWT, ITOL, ATOL, Y, TEMP -C Call sequence output -- H0, NITER, IER -C COMMON block variables accessed -- None -C -C Subroutines called by ZVHIN: F -C Function routines called by ZVHIN: ZVNORM -C----------------------------------------------------------------------- -C This routine computes the step size, H0, to be attempted on the -C first step, when the user has not supplied a value for this. -C -C First we check that TOUT - T0 differs significantly from zero. Then -C an iteration is done to approximate the initial second derivative -C and this is used to define h from w.r.m.s.norm(h**2 * yddot / 2) = 1. -C A bias factor of 1/2 is applied to the resulting h. -C The sign of H0 is inferred from the initial values of TOUT and T0. -C -C Communication with ZVHIN is done with the following variables: -C -C N = Size of ODE system, input. -C T0 = Initial value of independent variable, input. -C Y0 = Vector of initial conditions, input. -C YDOT = Vector of initial first derivatives, input. -C F = Name of subroutine for right-hand side f(t,y), input. -C RPAR, IPAR = User's real/complex and integer work arrays. -C TOUT = First output value of independent variable -C UROUND = Machine unit roundoff -C EWT, ITOL, ATOL = Error weights and tolerance parameters -C as described in the driver routine, input. -C Y, TEMP = Work arrays of length N. -C H0 = Step size to be attempted, output. -C NITER = Number of iterations (and of f evaluations) to compute H0, -C output. -C IER = The error flag, returned with the value -C IER = 0 if no trouble occurred, or -C IER = -1 if TOUT and T0 are considered too close to proceed. -C----------------------------------------------------------------------- -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION AFI, ATOLI, DELYI, H, HALF, HG, HLB, HNEW, HRAT, - 1 HUB, HUN, PT1, T1, TDIST, TROUND, TWO, YDDNRM - INTEGER I, ITER -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION ZVNORM -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE HALF, HUN, PT1, TWO - DATA HALF /0.5D0/, HUN /100.0D0/, PT1 /0.1D0/, TWO /2.0D0/ -C - NITER = 0 - TDIST = ABS(TOUT - T0) - TROUND = UROUND*MAX(ABS(T0),ABS(TOUT)) - IF (TDIST .LT. TWO*TROUND) GO TO 100 -C -C Set a lower bound on h based on the roundoff level in T0 and TOUT. --- - HLB = HUN*TROUND -C Set an upper bound on h based on TOUT-T0 and the initial Y and YDOT. - - HUB = PT1*TDIST - ATOLI = ATOL(1) - DO 10 I = 1, N - IF (ITOL .EQ. 2 .OR. ITOL .EQ. 4) ATOLI = ATOL(I) - DELYI = PT1*ABS(Y0(I)) + ATOLI - AFI = ABS(YDOT(I)) - IF (AFI*HUB .GT. DELYI) HUB = DELYI/AFI - 10 CONTINUE -C -C Set initial guess for h as geometric mean of upper and lower bounds. - - ITER = 0 - HG = SQRT(HLB*HUB) -C If the bounds have crossed, exit with the mean value. ---------------- - IF (HUB .LT. HLB) THEN - H0 = HG - GO TO 90 - ENDIF -C -C Looping point for iteration. ----------------------------------------- - 50 CONTINUE -C Estimate the second derivative as a difference quotient in f. -------- - H = SIGN (HG, TOUT - T0) - T1 = T0 + H - DO 60 I = 1, N - 60 Y(I) = Y0(I) + H*YDOT(I) - CALL F (N, T1, Y, TEMP, RPAR, IPAR) - DO 70 I = 1, N - 70 TEMP(I) = (TEMP(I) - YDOT(I))/H - YDDNRM = ZVNORM (N, TEMP, EWT) -C Get the corresponding new value of h. -------------------------------- - IF (YDDNRM*HUB*HUB .GT. TWO) THEN - HNEW = SQRT(TWO/YDDNRM) - ELSE - HNEW = SQRT(HG*HUB) - ENDIF - ITER = ITER + 1 -C----------------------------------------------------------------------- -C Test the stopping conditions. -C Stop if the new and previous h values differ by a factor of .lt. 2. -C Stop if four iterations have been done. Also, stop with previous h -C if HNEW/HG .gt. 2 after first iteration, as this probably means that -C the second derivative value is bad because of cancellation error. -C----------------------------------------------------------------------- - IF (ITER .GE. 4) GO TO 80 - HRAT = HNEW/HG - IF ( (HRAT .GT. HALF) .AND. (HRAT .LT. TWO) ) GO TO 80 - IF ( (ITER .GE. 2) .AND. (HNEW .GT. TWO*HG) ) THEN - HNEW = HG - GO TO 80 - ENDIF - HG = HNEW - GO TO 50 -C -C Iteration done. Apply bounds, bias factor, and sign. Then exit. ---- - 80 H0 = HNEW*HALF - IF (H0 .LT. HLB) H0 = HLB - IF (H0 .GT. HUB) H0 = HUB - 90 H0 = SIGN(H0, TOUT - T0) - NITER = ITER - IER = 0 - RETURN -C Error return for TOUT - T0 too small. -------------------------------- - 100 IER = -1 - RETURN -C----------------------- End of Subroutine ZVHIN ----------------------- - END -*DECK ZVINDY - SUBROUTINE ZVINDY (T, K, YH, LDYH, DKY, IFLAG) - DOUBLE COMPLEX YH, DKY - DOUBLE PRECISION T - INTEGER K, LDYH, IFLAG - DIMENSION YH(LDYH,*), DKY(*) -C----------------------------------------------------------------------- -C Call sequence input -- T, K, YH, LDYH -C Call sequence output -- DKY, IFLAG -C COMMON block variables accessed: -C /ZVOD01/ -- H, TN, UROUND, L, N, NQ -C /ZVOD02/ -- HU -C -C Subroutines called by ZVINDY: DZSCAL, XERRWD -C Function routines called by ZVINDY: None -C----------------------------------------------------------------------- -C ZVINDY computes interpolated values of the K-th derivative of the -C dependent variable vector y, and stores it in DKY. This routine -C is called within the package with K = 0 and T = TOUT, but may -C also be called by the user for any K up to the current order. -C (See detailed instructions in the usage documentation.) -C----------------------------------------------------------------------- -C The computed values in DKY are gotten by interpolation using the -C Nordsieck history array YH. This array corresponds uniquely to a -C vector-valued polynomial of degree NQCUR or less, and DKY is set -C to the K-th derivative of this polynomial at T. -C The formula for DKY is: -C q -C DKY(i) = sum c(j,K) * (T - TN)**(j-K) * H**(-j) * YH(i,j+1) -C j=K -C where c(j,K) = j*(j-1)*...*(j-K+1), q = NQCUR, TN = TCUR, H = HCUR. -C The quantities NQ = NQCUR, L = NQ+1, N, TN, and H are -C communicated by COMMON. The above sum is done in reverse order. -C IFLAG is returned negative if either K or T is out of bounds. -C -C Discussion above and comments in driver explain all variables. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block ZVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block ZVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION C, HUN, R, S, TFUZZ, TN1, TP, ZERO - INTEGER I, IC, J, JB, JB2, JJ, JJ1, JP1 - CHARACTER*80 MSG -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE HUN, ZERO -C - COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, - 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA HUN /100.0D0/, ZERO /0.0D0/ -C - IFLAG = 0 - IF (K .LT. 0 .OR. K .GT. NQ) GO TO 80 - TFUZZ = HUN*UROUND*SIGN(ABS(TN) + ABS(HU), HU) - TP = TN - HU - TFUZZ - TN1 = TN + TFUZZ - IF ((T-TP)*(T-TN1) .GT. ZERO) GO TO 90 -C - S = (T - TN)/H - IC = 1 - IF (K .EQ. 0) GO TO 15 - JJ1 = L - K - DO 10 JJ = JJ1, NQ - 10 IC = IC*JJ - 15 C = REAL(IC) - DO 20 I = 1, N - 20 DKY(I) = C*YH(I,L) - IF (K .EQ. NQ) GO TO 55 - JB2 = NQ - K - DO 50 JB = 1, JB2 - J = NQ - JB - JP1 = J + 1 - IC = 1 - IF (K .EQ. 0) GO TO 35 - JJ1 = JP1 - K - DO 30 JJ = JJ1, J - 30 IC = IC*JJ - 35 C = REAL(IC) - DO 40 I = 1, N - 40 DKY(I) = C*YH(I,JP1) + S*DKY(I) - 50 CONTINUE - IF (K .EQ. 0) RETURN - 55 R = H**(-K) - CALL DZSCAL (N, R, DKY, 1) - RETURN -C - 80 MSG = 'ZVINDY-- K (=I1) illegal ' - CALL XERRWD (MSG, 30, 51, 1, 1, K, 0, 0, ZERO, ZERO) - IFLAG = -1 - RETURN - 90 MSG = 'ZVINDY-- T (=R1) illegal ' - CALL XERRWD (MSG, 30, 52, 1, 0, 0, 0, 1, T, ZERO) - MSG=' T not in interval TCUR - HU (= R1) to TCUR (=R2) ' - CALL XERRWD (MSG, 60, 52, 1, 0, 0, 0, 2, TP, TN) - IFLAG = -2 - RETURN -C----------------------- End of Subroutine ZVINDY ---------------------- - END -*DECK ZVSTEP - SUBROUTINE ZVSTEP (Y, YH, LDYH, YH1, EWT, SAVF, VSAV, ACOR, - 1 WM, IWM, F, JAC, PSOL, VNLS, RPAR, IPAR) - EXTERNAL F, JAC, PSOL, VNLS - DOUBLE COMPLEX Y, YH, YH1, SAVF, VSAV, ACOR, WM - DOUBLE PRECISION EWT - INTEGER LDYH, IWM, IPAR - DIMENSION Y(*), YH(LDYH,*), YH1(*), EWT(*), SAVF(*), VSAV(*), - 1 ACOR(*), WM(*), IWM(*), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C Call sequence input -- Y, YH, LDYH, YH1, EWT, SAVF, VSAV, -C ACOR, WM, IWM, F, JAC, PSOL, VNLS, RPAR, IPAR -C Call sequence output -- YH, ACOR, WM, IWM -C COMMON block variables accessed: -C /ZVOD01/ ACNRM, EL(13), H, HMIN, HMXI, HNEW, HSCAL, RC, TAU(13), -C TQ(5), TN, JCUR, JSTART, KFLAG, KUTH, -C L, LMAX, MAXORD, N, NEWQ, NQ, NQWAIT -C /ZVOD02/ HU, NCFN, NETF, NFE, NQU, NST -C -C Subroutines called by ZVSTEP: F, DZAXPY, ZCOPY, DZSCAL, -C ZVJUST, VNLS, ZVSET -C Function routines called by ZVSTEP: ZVNORM -C----------------------------------------------------------------------- -C ZVSTEP performs one step of the integration of an initial value -C problem for a system of ordinary differential equations. -C ZVSTEP calls subroutine VNLS for the solution of the nonlinear system -C arising in the time step. Thus it is independent of the problem -C Jacobian structure and the type of nonlinear system solution method. -C ZVSTEP returns a completion flag KFLAG (in COMMON). -C A return with KFLAG = -1 or -2 means either ABS(H) = HMIN or 10 -C consecutive failures occurred. On a return with KFLAG negative, -C the values of TN and the YH array are as of the beginning of the last -C step, and H is the last step size attempted. -C -C Communication with ZVSTEP is done with the following variables: -C -C Y = An array of length N used for the dependent variable vector. -C YH = An LDYH by LMAX array containing the dependent variables -C and their approximate scaled derivatives, where -C LMAX = MAXORD + 1. YH(i,j+1) contains the approximate -C j-th derivative of y(i), scaled by H**j/factorial(j) -C (j = 0,1,...,NQ). On entry for the first step, the first -C two columns of YH must be set from the initial values. -C LDYH = A constant integer .ge. N, the first dimension of YH. -C N is the number of ODEs in the system. -C YH1 = A one-dimensional array occupying the same space as YH. -C EWT = An array of length N containing multiplicative weights -C for local error measurements. Local errors in y(i) are -C compared to 1.0/EWT(i) in various error tests. -C SAVF = An array of working storage, of length N. -C also used for input of YH(*,MAXORD+2) when JSTART = -1 -C and MAXORD .lt. the current order NQ. -C VSAV = A work array of length N passed to subroutine VNLS. -C ACOR = A work array of length N, used for the accumulated -C corrections. On a successful return, ACOR(i) contains -C the estimated one-step local error in y(i). -C WM,IWM = Complex and integer work arrays associated with matrix -C operations in VNLS. -C F = Dummy name for the user-supplied subroutine for f. -C JAC = Dummy name for the user-supplied Jacobian subroutine. -C PSOL = Dummy name for the subroutine passed to VNLS, for -C possible use there. -C VNLS = Dummy name for the nonlinear system solving subroutine, -C whose real name is dependent on the method used. -C RPAR, IPAR = User's real/complex and integer work arrays. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block ZVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block ZVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION ADDON, BIAS1,BIAS2,BIAS3, CNQUOT, DDN, DSM, DUP, - 1 ETACF, ETAMIN, ETAMX1, ETAMX2, ETAMX3, ETAMXF, - 2 ETAQ, ETAQM1, ETAQP1, FLOTL, ONE, ONEPSM, - 3 R, THRESH, TOLD, ZERO - INTEGER I, I1, I2, IBACK, J, JB, KFC, KFH, MXNCF, NCF, NFLAG -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION ZVNORM -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE ADDON, BIAS1, BIAS2, BIAS3, - 1 ETACF, ETAMIN, ETAMX1, ETAMX2, ETAMX3, ETAMXF, ETAQ, ETAQM1, - 2 KFC, KFH, MXNCF, ONEPSM, THRESH, ONE, ZERO -C----------------------------------------------------------------------- - COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, - 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA KFC/-3/, KFH/-7/, MXNCF/10/ - DATA ADDON /1.0D-6/, BIAS1 /6.0D0/, BIAS2 /6.0D0/, - 1 BIAS3 /10.0D0/, ETACF /0.25D0/, ETAMIN /0.1D0/, - 2 ETAMXF /0.2D0/, ETAMX1 /1.0D4/, ETAMX2 /10.0D0/, - 3 ETAMX3 /10.0D0/, ONEPSM /1.00001D0/, THRESH /1.5D0/ - DATA ONE/1.0D0/, ZERO/0.0D0/ -C - KFLAG = 0 - TOLD = TN - NCF = 0 - JCUR = 0 - NFLAG = 0 - IF (JSTART .GT. 0) GO TO 20 - IF (JSTART .EQ. -1) GO TO 100 -C----------------------------------------------------------------------- -C On the first call, the order is set to 1, and other variables are -C initialized. ETAMAX is the maximum ratio by which H can be increased -C in a single step. It is normally 10, but is larger during the -C first step to compensate for the small initial H. If a failure -C occurs (in corrector convergence or error test), ETAMAX is set to 1 -C for the next increase. -C----------------------------------------------------------------------- - LMAX = MAXORD + 1 - NQ = 1 - L = 2 - NQNYH = NQ*LDYH - TAU(1) = H - PRL1 = ONE - RC = ZERO - ETAMAX = ETAMX1 - NQWAIT = 2 - HSCAL = H - GO TO 200 -C----------------------------------------------------------------------- -C Take preliminary actions on a normal continuation step (JSTART.GT.0). -C If the driver changed H, then ETA must be reset and NEWH set to 1. -C If a change of order was dictated on the previous step, then -C it is done here and appropriate adjustments in the history are made. -C On an order decrease, the history array is adjusted by ZVJUST. -C On an order increase, the history array is augmented by a column. -C On a change of step size H, the history array YH is rescaled. -C----------------------------------------------------------------------- - 20 CONTINUE - IF (KUTH .EQ. 1) THEN - ETA = MIN(ETA,H/HSCAL) - NEWH = 1 - ENDIF - 50 IF (NEWH .EQ. 0) GO TO 200 - IF (NEWQ .EQ. NQ) GO TO 150 - IF (NEWQ .LT. NQ) THEN - CALL ZVJUST (YH, LDYH, -1) - NQ = NEWQ - L = NQ + 1 - NQWAIT = L - GO TO 150 - ENDIF - IF (NEWQ .GT. NQ) THEN - CALL ZVJUST (YH, LDYH, 1) - NQ = NEWQ - L = NQ + 1 - NQWAIT = L - GO TO 150 - ENDIF -C----------------------------------------------------------------------- -C The following block handles preliminaries needed when JSTART = -1. -C If N was reduced, zero out part of YH to avoid undefined references. -C If MAXORD was reduced to a value less than the tentative order NEWQ, -C then NQ is set to MAXORD, and a new H ratio ETA is chosen. -C Otherwise, we take the same preliminary actions as for JSTART .gt. 0. -C In any case, NQWAIT is reset to L = NQ + 1 to prevent further -C changes in order for that many steps. -C The new H ratio ETA is limited by the input H if KUTH = 1, -C by HMIN if KUTH = 0, and by HMXI in any case. -C Finally, the history array YH is rescaled. -C----------------------------------------------------------------------- - 100 CONTINUE - LMAX = MAXORD + 1 - IF (N .EQ. LDYH) GO TO 120 - I1 = 1 + (NEWQ + 1)*LDYH - I2 = (MAXORD + 1)*LDYH - IF (I1 .GT. I2) GO TO 120 - DO 110 I = I1, I2 - 110 YH1(I) = ZERO - 120 IF (NEWQ .LE. MAXORD) GO TO 140 - FLOTL = REAL(LMAX) - IF (MAXORD .LT. NQ-1) THEN - DDN = ZVNORM (N, SAVF, EWT)/TQ(1) - ETA = ONE/((BIAS1*DDN)**(ONE/FLOTL) + ADDON) - ENDIF - IF (MAXORD .EQ. NQ .AND. NEWQ .EQ. NQ+1) ETA = ETAQ - IF (MAXORD .EQ. NQ-1 .AND. NEWQ .EQ. NQ+1) THEN - ETA = ETAQM1 - CALL ZVJUST (YH, LDYH, -1) - ENDIF - IF (MAXORD .EQ. NQ-1 .AND. NEWQ .EQ. NQ) THEN - DDN = ZVNORM (N, SAVF, EWT)/TQ(1) - ETA = ONE/((BIAS1*DDN)**(ONE/FLOTL) + ADDON) - CALL ZVJUST (YH, LDYH, -1) - ENDIF - ETA = MIN(ETA,ONE) - NQ = MAXORD - L = LMAX - 140 IF (KUTH .EQ. 1) ETA = MIN(ETA,ABS(H/HSCAL)) - IF (KUTH .EQ. 0) ETA = MAX(ETA,HMIN/ABS(HSCAL)) - ETA = ETA/MAX(ONE,ABS(HSCAL)*HMXI*ETA) - NEWH = 1 - NQWAIT = L - IF (NEWQ .LE. MAXORD) GO TO 50 -C Rescale the history array for a change in H by a factor of ETA. ------ - 150 R = ONE - DO 180 J = 2, L - R = R*ETA - CALL DZSCAL (N, R, YH(1,J), 1 ) - 180 CONTINUE - H = HSCAL*ETA - HSCAL = H - RC = RC*ETA - NQNYH = NQ*LDYH -C----------------------------------------------------------------------- -C This section computes the predicted values by effectively -C multiplying the YH array by the Pascal triangle matrix. -C ZVSET is called to calculate all integration coefficients. -C RC is the ratio of new to old values of the coefficient H/EL(2)=h/l1. -C----------------------------------------------------------------------- - 200 TN = TN + H - I1 = NQNYH + 1 - DO 220 JB = 1, NQ - I1 = I1 - LDYH - DO 210 I = I1, NQNYH - 210 YH1(I) = YH1(I) + YH1(I+LDYH) - 220 CONTINUE - CALL ZVSET - RL1 = ONE/EL(2) - RC = RC*(RL1/PRL1) - PRL1 = RL1 -C -C Call the nonlinear system solver. ------------------------------------ -C - CALL VNLS (Y, YH, LDYH, VSAV, SAVF, EWT, ACOR, IWM, WM, - 1 F, JAC, PSOL, NFLAG, RPAR, IPAR) -C - IF (NFLAG .EQ. 0) GO TO 450 -C----------------------------------------------------------------------- -C The VNLS routine failed to achieve convergence (NFLAG .NE. 0). -C The YH array is retracted to its values before prediction. -C The step size H is reduced and the step is retried, if possible. -C Otherwise, an error exit is taken. -C----------------------------------------------------------------------- - NCF = NCF + 1 - NCFN = NCFN + 1 - ETAMAX = ONE - TN = TOLD - I1 = NQNYH + 1 - DO 430 JB = 1, NQ - I1 = I1 - LDYH - DO 420 I = I1, NQNYH - 420 YH1(I) = YH1(I) - YH1(I+LDYH) - 430 CONTINUE - IF (NFLAG .LT. -1) GO TO 680 - IF (ABS(H) .LE. HMIN*ONEPSM) GO TO 670 - IF (NCF .EQ. MXNCF) GO TO 670 - ETA = ETACF - ETA = MAX(ETA,HMIN/ABS(H)) - NFLAG = -1 - GO TO 150 -C----------------------------------------------------------------------- -C The corrector has converged (NFLAG = 0). The local error test is -C made and control passes to statement 500 if it fails. -C----------------------------------------------------------------------- - 450 CONTINUE - DSM = ACNRM/TQ(2) - IF (DSM .GT. ONE) GO TO 500 -C----------------------------------------------------------------------- -C After a successful step, update the YH and TAU arrays and decrement -C NQWAIT. If NQWAIT is then 1 and NQ .lt. MAXORD, then ACOR is saved -C for use in a possible order increase on the next step. -C If ETAMAX = 1 (a failure occurred this step), keep NQWAIT .ge. 2. -C----------------------------------------------------------------------- - KFLAG = 0 - NST = NST + 1 - HU = H - NQU = NQ - DO 470 IBACK = 1, NQ - I = L - IBACK - 470 TAU(I+1) = TAU(I) - TAU(1) = H - DO 480 J = 1, L - CALL DZAXPY (N, EL(J), ACOR, 1, YH(1,J), 1 ) - 480 CONTINUE - NQWAIT = NQWAIT - 1 - IF ((L .EQ. LMAX) .OR. (NQWAIT .NE. 1)) GO TO 490 - CALL ZCOPY (N, ACOR, 1, YH(1,LMAX), 1 ) - CONP = TQ(5) - 490 IF (ETAMAX .NE. ONE) GO TO 560 - IF (NQWAIT .LT. 2) NQWAIT = 2 - NEWQ = NQ - NEWH = 0 - ETA = ONE - HNEW = H - GO TO 690 -C----------------------------------------------------------------------- -C The error test failed. KFLAG keeps track of multiple failures. -C Restore TN and the YH array to their previous values, and prepare -C to try the step again. Compute the optimum step size for the -C same order. After repeated failures, H is forced to decrease -C more rapidly. -C----------------------------------------------------------------------- - 500 KFLAG = KFLAG - 1 - NETF = NETF + 1 - NFLAG = -2 - TN = TOLD - I1 = NQNYH + 1 - DO 520 JB = 1, NQ - I1 = I1 - LDYH - DO 510 I = I1, NQNYH - 510 YH1(I) = YH1(I) - YH1(I+LDYH) - 520 CONTINUE - IF (ABS(H) .LE. HMIN*ONEPSM) GO TO 660 - ETAMAX = ONE - IF (KFLAG .LE. KFC) GO TO 530 -C Compute ratio of new H to current H at the current order. ------------ - FLOTL = REAL(L) - ETA = ONE/((BIAS2*DSM)**(ONE/FLOTL) + ADDON) - ETA = MAX(ETA,HMIN/ABS(H),ETAMIN) - IF ((KFLAG .LE. -2) .AND. (ETA .GT. ETAMXF)) ETA = ETAMXF - GO TO 150 -C----------------------------------------------------------------------- -C Control reaches this section if 3 or more consecutive failures -C have occurred. It is assumed that the elements of the YH array -C have accumulated errors of the wrong order. The order is reduced -C by one, if possible. Then H is reduced by a factor of 0.1 and -C the step is retried. After a total of 7 consecutive failures, -C an exit is taken with KFLAG = -1. -C----------------------------------------------------------------------- - 530 IF (KFLAG .EQ. KFH) GO TO 660 - IF (NQ .EQ. 1) GO TO 540 - ETA = MAX(ETAMIN,HMIN/ABS(H)) - CALL ZVJUST (YH, LDYH, -1) - L = NQ - NQ = NQ - 1 - NQWAIT = L - GO TO 150 - 540 ETA = MAX(ETAMIN,HMIN/ABS(H)) - H = H*ETA - HSCAL = H - TAU(1) = H - CALL F (N, TN, Y, SAVF, RPAR, IPAR) - NFE = NFE + 1 - DO 550 I = 1, N - 550 YH(I,2) = H*SAVF(I) - NQWAIT = 10 - GO TO 200 -C----------------------------------------------------------------------- -C If NQWAIT = 0, an increase or decrease in order by one is considered. -C Factors ETAQ, ETAQM1, ETAQP1 are computed by which H could -C be multiplied at order q, q-1, or q+1, respectively. -C The largest of these is determined, and the new order and -C step size set accordingly. -C A change of H or NQ is made only if H increases by at least a -C factor of THRESH. If an order change is considered and rejected, -C then NQWAIT is set to 2 (reconsider it after 2 steps). -C----------------------------------------------------------------------- -C Compute ratio of new H to current H at the current order. ------------ - 560 FLOTL = REAL(L) - ETAQ = ONE/((BIAS2*DSM)**(ONE/FLOTL) + ADDON) - IF (NQWAIT .NE. 0) GO TO 600 - NQWAIT = 2 - ETAQM1 = ZERO - IF (NQ .EQ. 1) GO TO 570 -C Compute ratio of new H to current H at the current order less one. --- - DDN = ZVNORM (N, YH(1,L), EWT)/TQ(1) - ETAQM1 = ONE/((BIAS1*DDN)**(ONE/(FLOTL - ONE)) + ADDON) - 570 ETAQP1 = ZERO - IF (L .EQ. LMAX) GO TO 580 -C Compute ratio of new H to current H at current order plus one. ------- - CNQUOT = (TQ(5)/CONP)*(H/TAU(2))**L - DO 575 I = 1, N - 575 SAVF(I) = ACOR(I) - CNQUOT*YH(I,LMAX) - DUP = ZVNORM (N, SAVF, EWT)/TQ(3) - ETAQP1 = ONE/((BIAS3*DUP)**(ONE/(FLOTL + ONE)) + ADDON) - 580 IF (ETAQ .GE. ETAQP1) GO TO 590 - IF (ETAQP1 .GT. ETAQM1) GO TO 620 - GO TO 610 - 590 IF (ETAQ .LT. ETAQM1) GO TO 610 - 600 ETA = ETAQ - NEWQ = NQ - GO TO 630 - 610 ETA = ETAQM1 - NEWQ = NQ - 1 - GO TO 630 - 620 ETA = ETAQP1 - NEWQ = NQ + 1 - CALL ZCOPY (N, ACOR, 1, YH(1,LMAX), 1) -C Test tentative new H against THRESH, ETAMAX, and HMXI, then exit. ---- - 630 IF (ETA .LT. THRESH .OR. ETAMAX .EQ. ONE) GO TO 640 - ETA = MIN(ETA,ETAMAX) - ETA = ETA/MAX(ONE,ABS(H)*HMXI*ETA) - NEWH = 1 - HNEW = H*ETA - GO TO 690 - 640 NEWQ = NQ - NEWH = 0 - ETA = ONE - HNEW = H - GO TO 690 -C----------------------------------------------------------------------- -C All returns are made through this section. -C On a successful return, ETAMAX is reset and ACOR is scaled. -C----------------------------------------------------------------------- - 660 KFLAG = -1 - GO TO 720 - 670 KFLAG = -2 - GO TO 720 - 680 IF (NFLAG .EQ. -2) KFLAG = -3 - IF (NFLAG .EQ. -3) KFLAG = -4 - GO TO 720 - 690 ETAMAX = ETAMX3 - IF (NST .LE. 10) ETAMAX = ETAMX2 - 700 R = ONE/TQ(2) - CALL DZSCAL (N, R, ACOR, 1) - 720 JSTART = 1 - RETURN -C----------------------- End of Subroutine ZVSTEP ---------------------- - END -*DECK ZVSET - SUBROUTINE ZVSET -C----------------------------------------------------------------------- -C Call sequence communication: None -C COMMON block variables accessed: -C /ZVOD01/ -- EL(13), H, TAU(13), TQ(5), L(= NQ + 1), -C METH, NQ, NQWAIT -C -C Subroutines called by ZVSET: None -C Function routines called by ZVSET: None -C----------------------------------------------------------------------- -C ZVSET is called by ZVSTEP and sets coefficients for use there. -C -C For each order NQ, the coefficients in EL are calculated by use of -C the generating polynomial lambda(x), with coefficients EL(i). -C lambda(x) = EL(1) + EL(2)*x + ... + EL(NQ+1)*(x**NQ). -C For the backward differentiation formulas, -C NQ-1 -C lambda(x) = (1 + x/xi*(NQ)) * product (1 + x/xi(i) ) . -C i = 1 -C For the Adams formulas, -C NQ-1 -C (d/dx) lambda(x) = c * product (1 + x/xi(i) ) , -C i = 1 -C lambda(-1) = 0, lambda(0) = 1, -C where c is a normalization constant. -C In both cases, xi(i) is defined by -C H*xi(i) = t sub n - t sub (n-i) -C = H + TAU(1) + TAU(2) + ... TAU(i-1). -C -C -C In addition to variables described previously, communication -C with ZVSET uses the following: -C TAU = A vector of length 13 containing the past NQ values -C of H. -C EL = A vector of length 13 in which vset stores the -C coefficients for the corrector formula. -C TQ = A vector of length 5 in which vset stores constants -C used for the convergence test, the error test, and the -C selection of H at a new order. -C METH = The basic method indicator. -C NQ = The current order. -C L = NQ + 1, the length of the vector stored in EL, and -C the number of columns of the YH array being used. -C NQWAIT = A counter controlling the frequency of order changes. -C An order change is about to be considered if NQWAIT = 1. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block ZVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION AHATN0, ALPH0, CNQM1, CORTES, CSUM, ELP, EM, - 1 EM0, FLOTI, FLOTL, FLOTNQ, HSUM, ONE, RXI, RXIS, S, SIX, - 2 T1, T2, T3, T4, T5, T6, TWO, XI, ZERO - INTEGER I, IBACK, J, JP1, NQM1, NQM2 -C - DIMENSION EM(13) -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE CORTES, ONE, SIX, TWO, ZERO -C - COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, - 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH -C - DATA CORTES /0.1D0/ - DATA ONE /1.0D0/, SIX /6.0D0/, TWO /2.0D0/, ZERO /0.0D0/ -C - FLOTL = REAL(L) - NQM1 = NQ - 1 - NQM2 = NQ - 2 - GO TO (100, 200), METH -C -C Set coefficients for Adams methods. ---------------------------------- - 100 IF (NQ .NE. 1) GO TO 110 - EL(1) = ONE - EL(2) = ONE - TQ(1) = ONE - TQ(2) = TWO - TQ(3) = SIX*TQ(2) - TQ(5) = ONE - GO TO 300 - 110 HSUM = H - EM(1) = ONE - FLOTNQ = FLOTL - ONE - DO 115 I = 2, L - 115 EM(I) = ZERO - DO 150 J = 1, NQM1 - IF ((J .NE. NQM1) .OR. (NQWAIT .NE. 1)) GO TO 130 - S = ONE - CSUM = ZERO - DO 120 I = 1, NQM1 - CSUM = CSUM + S*EM(I)/REAL(I+1) - 120 S = -S - TQ(1) = EM(NQM1)/(FLOTNQ*CSUM) - 130 RXI = H/HSUM - DO 140 IBACK = 1, J - I = (J + 2) - IBACK - 140 EM(I) = EM(I) + EM(I-1)*RXI - HSUM = HSUM + TAU(J) - 150 CONTINUE -C Compute integral from -1 to 0 of polynomial and of x times it. ------- - S = ONE - EM0 = ZERO - CSUM = ZERO - DO 160 I = 1, NQ - FLOTI = REAL(I) - EM0 = EM0 + S*EM(I)/FLOTI - CSUM = CSUM + S*EM(I)/(FLOTI+ONE) - 160 S = -S -C In EL, form coefficients of normalized integrated polynomial. -------- - S = ONE/EM0 - EL(1) = ONE - DO 170 I = 1, NQ - 170 EL(I+1) = S*EM(I)/REAL(I) - XI = HSUM/H - TQ(2) = XI*EM0/CSUM - TQ(5) = XI/EL(L) - IF (NQWAIT .NE. 1) GO TO 300 -C For higher order control constant, multiply polynomial by 1+x/xi(q). - - RXI = ONE/XI - DO 180 IBACK = 1, NQ - I = (L + 1) - IBACK - 180 EM(I) = EM(I) + EM(I-1)*RXI -C Compute integral of polynomial. -------------------------------------- - S = ONE - CSUM = ZERO - DO 190 I = 1, L - CSUM = CSUM + S*EM(I)/REAL(I+1) - 190 S = -S - TQ(3) = FLOTL*EM0/CSUM - GO TO 300 -C -C Set coefficients for BDF methods. ------------------------------------ - 200 DO 210 I = 3, L - 210 EL(I) = ZERO - EL(1) = ONE - EL(2) = ONE - ALPH0 = -ONE - AHATN0 = -ONE - HSUM = H - RXI = ONE - RXIS = ONE - IF (NQ .EQ. 1) GO TO 240 - DO 230 J = 1, NQM2 -C In EL, construct coefficients of (1+x/xi(1))*...*(1+x/xi(j+1)). ------ - HSUM = HSUM + TAU(J) - RXI = H/HSUM - JP1 = J + 1 - ALPH0 = ALPH0 - ONE/REAL(JP1) - DO 220 IBACK = 1, JP1 - I = (J + 3) - IBACK - 220 EL(I) = EL(I) + EL(I-1)*RXI - 230 CONTINUE - ALPH0 = ALPH0 - ONE/REAL(NQ) - RXIS = -EL(2) - ALPH0 - HSUM = HSUM + TAU(NQM1) - RXI = H/HSUM - AHATN0 = -EL(2) - RXI - DO 235 IBACK = 1, NQ - I = (NQ + 2) - IBACK - 235 EL(I) = EL(I) + EL(I-1)*RXIS - 240 T1 = ONE - AHATN0 + ALPH0 - T2 = ONE + REAL(NQ)*T1 - TQ(2) = ABS(ALPH0*T2/T1) - TQ(5) = ABS(T2/(EL(L)*RXI/RXIS)) - IF (NQWAIT .NE. 1) GO TO 300 - CNQM1 = RXIS/EL(L) - T3 = ALPH0 + ONE/REAL(NQ) - T4 = AHATN0 + RXI - ELP = T3/(ONE - T4 + T3) - TQ(1) = ABS(ELP/CNQM1) - HSUM = HSUM + TAU(NQ) - RXI = H/HSUM - T5 = ALPH0 - ONE/REAL(NQ+1) - T6 = AHATN0 - RXI - ELP = T2/(ONE - T6 + T5) - TQ(3) = ABS(ELP*RXI*(FLOTL + ONE)*T5) - 300 TQ(4) = CORTES*TQ(2) - RETURN -C----------------------- End of Subroutine ZVSET ----------------------- - END -*DECK ZVJUST - SUBROUTINE ZVJUST (YH, LDYH, IORD) - DOUBLE COMPLEX YH - INTEGER LDYH, IORD - DIMENSION YH(LDYH,*) -C----------------------------------------------------------------------- -C Call sequence input -- YH, LDYH, IORD -C Call sequence output -- YH -C COMMON block input -- NQ, METH, LMAX, HSCAL, TAU(13), N -C COMMON block variables accessed: -C /ZVOD01/ -- HSCAL, TAU(13), LMAX, METH, N, NQ, -C -C Subroutines called by ZVJUST: DZAXPY -C Function routines called by ZVJUST: None -C----------------------------------------------------------------------- -C This subroutine adjusts the YH array on reduction of order, -C and also when the order is increased for the stiff option (METH = 2). -C Communication with ZVJUST uses the following: -C IORD = An integer flag used when METH = 2 to indicate an order -C increase (IORD = +1) or an order decrease (IORD = -1). -C HSCAL = Step size H used in scaling of Nordsieck array YH. -C (If IORD = +1, ZVJUST assumes that HSCAL = TAU(1).) -C See References 1 and 2 for details. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block ZVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION ALPH0, ALPH1, HSUM, ONE, PROD, T1, XI,XIOLD, ZERO - INTEGER I, IBACK, J, JP1, LP1, NQM1, NQM2, NQP1 -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE ONE, ZERO -C - COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, - 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH -C - DATA ONE /1.0D0/, ZERO /0.0D0/ -C - IF ((NQ .EQ. 2) .AND. (IORD .NE. 1)) RETURN - NQM1 = NQ - 1 - NQM2 = NQ - 2 - GO TO (100, 200), METH -C----------------------------------------------------------------------- -C Nonstiff option... -C Check to see if the order is being increased or decreased. -C----------------------------------------------------------------------- - 100 CONTINUE - IF (IORD .EQ. 1) GO TO 180 -C Order decrease. ------------------------------------------------------ - DO 110 J = 1, LMAX - 110 EL(J) = ZERO - EL(2) = ONE - HSUM = ZERO - DO 130 J = 1, NQM2 -C Construct coefficients of x*(x+xi(1))*...*(x+xi(j)). ----------------- - HSUM = HSUM + TAU(J) - XI = HSUM/HSCAL - JP1 = J + 1 - DO 120 IBACK = 1, JP1 - I = (J + 3) - IBACK - 120 EL(I) = EL(I)*XI + EL(I-1) - 130 CONTINUE -C Construct coefficients of integrated polynomial. --------------------- - DO 140 J = 2, NQM1 - 140 EL(J+1) = REAL(NQ)*EL(J)/REAL(J) -C Subtract correction terms from YH array. ----------------------------- - DO 170 J = 3, NQ - DO 160 I = 1, N - 160 YH(I,J) = YH(I,J) - YH(I,L)*EL(J) - 170 CONTINUE - RETURN -C Order increase. ------------------------------------------------------ -C Zero out next column in YH array. ------------------------------------ - 180 CONTINUE - LP1 = L + 1 - DO 190 I = 1, N - 190 YH(I,LP1) = ZERO - RETURN -C----------------------------------------------------------------------- -C Stiff option... -C Check to see if the order is being increased or decreased. -C----------------------------------------------------------------------- - 200 CONTINUE - IF (IORD .EQ. 1) GO TO 300 -C Order decrease. ------------------------------------------------------ - DO 210 J = 1, LMAX - 210 EL(J) = ZERO - EL(3) = ONE - HSUM = ZERO - DO 230 J = 1,NQM2 -C Construct coefficients of x*x*(x+xi(1))*...*(x+xi(j)). --------------- - HSUM = HSUM + TAU(J) - XI = HSUM/HSCAL - JP1 = J + 1 - DO 220 IBACK = 1, JP1 - I = (J + 4) - IBACK - 220 EL(I) = EL(I)*XI + EL(I-1) - 230 CONTINUE -C Subtract correction terms from YH array. ----------------------------- - DO 250 J = 3,NQ - DO 240 I = 1, N - 240 YH(I,J) = YH(I,J) - YH(I,L)*EL(J) - 250 CONTINUE - RETURN -C Order increase. ------------------------------------------------------ - 300 DO 310 J = 1, LMAX - 310 EL(J) = ZERO - EL(3) = ONE - ALPH0 = -ONE - ALPH1 = ONE - PROD = ONE - XIOLD = ONE - HSUM = HSCAL - IF (NQ .EQ. 1) GO TO 340 - DO 330 J = 1, NQM1 -C Construct coefficients of x*x*(x+xi(1))*...*(x+xi(j)). --------------- - JP1 = J + 1 - HSUM = HSUM + TAU(JP1) - XI = HSUM/HSCAL - PROD = PROD*XI - ALPH0 = ALPH0 - ONE/REAL(JP1) - ALPH1 = ALPH1 + ONE/XI - DO 320 IBACK = 1, JP1 - I = (J + 4) - IBACK - 320 EL(I) = EL(I)*XIOLD + EL(I-1) - XIOLD = XI - 330 CONTINUE - 340 CONTINUE - T1 = (-ALPH0 - ALPH1)/PROD -C Load column L + 1 in YH array. --------------------------------------- - LP1 = L + 1 - DO 350 I = 1, N - 350 YH(I,LP1) = T1*YH(I,LMAX) -C Add correction terms to YH array. ------------------------------------ - NQP1 = NQ + 1 - DO 370 J = 3, NQP1 - CALL DZAXPY (N, EL(J), YH(1,LP1), 1, YH(1,J), 1 ) - 370 CONTINUE - RETURN -C----------------------- End of Subroutine ZVJUST ---------------------- - END -*DECK ZVNLSD - SUBROUTINE ZVNLSD (Y, YH, LDYH, VSAV, SAVF, EWT, ACOR, IWM, WM, - 1 F, JAC, PDUM, NFLAG, RPAR, IPAR) - EXTERNAL F, JAC, PDUM - DOUBLE COMPLEX Y, YH, VSAV, SAVF, ACOR, WM - DOUBLE PRECISION EWT - INTEGER LDYH, IWM, NFLAG, IPAR - DIMENSION Y(*), YH(LDYH,*), VSAV(*), SAVF(*), EWT(*), ACOR(*), - 1 IWM(*), WM(*), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C Call sequence input -- Y, YH, LDYH, SAVF, EWT, ACOR, IWM, WM, -C F, JAC, NFLAG, RPAR, IPAR -C Call sequence output -- YH, ACOR, WM, IWM, NFLAG -C COMMON block variables accessed: -C /ZVOD01/ ACNRM, CRATE, DRC, H, RC, RL1, TQ(5), TN, ICF, -C JCUR, METH, MITER, N, NSLP -C /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Subroutines called by ZVNLSD: F, DZAXPY, ZCOPY, DZSCAL, ZVJAC, ZVSOL -C Function routines called by ZVNLSD: ZVNORM -C----------------------------------------------------------------------- -C Subroutine ZVNLSD is a nonlinear system solver, which uses functional -C iteration or a chord (modified Newton) method. For the chord method -C direct linear algebraic system solvers are used. Subroutine ZVNLSD -C then handles the corrector phase of this integration package. -C -C Communication with ZVNLSD is done with the following variables. (For -C more details, please see the comments in the driver subroutine.) -C -C Y = The dependent variable, a vector of length N, input. -C YH = The Nordsieck (Taylor) array, LDYH by LMAX, input -C and output. On input, it contains predicted values. -C LDYH = A constant .ge. N, the first dimension of YH, input. -C VSAV = Unused work array. -C SAVF = A work array of length N. -C EWT = An error weight vector of length N, input. -C ACOR = A work array of length N, used for the accumulated -C corrections to the predicted y vector. -C WM,IWM = Complex and integer work arrays associated with matrix -C operations in chord iteration (MITER .ne. 0). -C F = Dummy name for user-supplied routine for f. -C JAC = Dummy name for user-supplied Jacobian routine. -C PDUM = Unused dummy subroutine name. Included for uniformity -C over collection of integrators. -C NFLAG = Input/output flag, with values and meanings as follows: -C INPUT -C 0 first call for this time step. -C -1 convergence failure in previous call to ZVNLSD. -C -2 error test failure in ZVSTEP. -C OUTPUT -C 0 successful completion of nonlinear solver. -C -1 convergence failure or singular matrix. -C -2 unrecoverable error in matrix preprocessing -C (cannot occur here). -C -3 unrecoverable error in solution (cannot occur -C here). -C RPAR, IPAR = User's real/complex and integer work arrays. -C -C IPUP = Own variable flag with values and meanings as follows: -C 0, do not update the Newton matrix. -C MITER .ne. 0, update Newton matrix, because it is the -C initial step, order was changed, the error -C test failed, or an update is indicated by -C the scalar RC or step counter NST. -C -C For more details, see comments in driver subroutine. -C----------------------------------------------------------------------- -C Type declarations for labeled COMMON block ZVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block ZVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - DOUBLE PRECISION CCMAX, CRDOWN, CSCALE, DCON, DEL, DELP, ONE, - 1 RDIV, TWO, ZERO - INTEGER I, IERPJ, IERSL, M, MAXCOR, MSBP -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION ZVNORM -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE CCMAX, CRDOWN, MAXCOR, MSBP, RDIV, ONE, TWO, ZERO -C - COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, - 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA CCMAX /0.3D0/, CRDOWN /0.3D0/, MAXCOR /3/, MSBP /20/, - 1 RDIV /2.0D0/ - DATA ONE /1.0D0/, TWO /2.0D0/, ZERO /0.0D0/ -C----------------------------------------------------------------------- -C On the first step, on a change of method order, or after a -C nonlinear convergence failure with NFLAG = -2, set IPUP = MITER -C to force a Jacobian update when MITER .ne. 0. -C----------------------------------------------------------------------- - IF (JSTART .EQ. 0) NSLP = 0 - IF (NFLAG .EQ. 0) ICF = 0 - IF (NFLAG .EQ. -2) IPUP = MITER - IF ( (JSTART .EQ. 0) .OR. (JSTART .EQ. -1) ) IPUP = MITER -C If this is functional iteration, set CRATE .eq. 1 and drop to 220 - IF (MITER .EQ. 0) THEN - CRATE = ONE - GO TO 220 - ENDIF -C----------------------------------------------------------------------- -C RC is the ratio of new to old values of the coefficient H/EL(2)=h/l1. -C When RC differs from 1 by more than CCMAX, IPUP is set to MITER -C to force ZVJAC to be called, if a Jacobian is involved. -C In any case, ZVJAC is called at least every MSBP steps. -C----------------------------------------------------------------------- - DRC = ABS(RC-ONE) - IF (DRC .GT. CCMAX .OR. NST .GE. NSLP+MSBP) IPUP = MITER -C----------------------------------------------------------------------- -C Up to MAXCOR corrector iterations are taken. A convergence test is -C made on the r.m.s. norm of each correction, weighted by the error -C weight vector EWT. The sum of the corrections is accumulated in the -C vector ACOR(i). The YH array is not altered in the corrector loop. -C----------------------------------------------------------------------- - 220 M = 0 - DELP = ZERO - CALL ZCOPY (N, YH(1,1), 1, Y, 1 ) - CALL F (N, TN, Y, SAVF, RPAR, IPAR) - NFE = NFE + 1 - IF (IPUP .LE. 0) GO TO 250 -C----------------------------------------------------------------------- -C If indicated, the matrix P = I - h*rl1*J is reevaluated and -C preprocessed before starting the corrector iteration. IPUP is set -C to 0 as an indicator that this has been done. -C----------------------------------------------------------------------- - CALL ZVJAC (Y, YH, LDYH, EWT, ACOR, SAVF, WM, IWM, F, JAC, IERPJ, - 1 RPAR, IPAR) - IPUP = 0 - RC = ONE - DRC = ZERO - CRATE = ONE - NSLP = NST -C If matrix is singular, take error return to force cut in step size. -- - IF (IERPJ .NE. 0) GO TO 430 - 250 DO 260 I = 1,N - 260 ACOR(I) = ZERO -C This is a looping point for the corrector iteration. ----------------- - 270 IF (MITER .NE. 0) GO TO 350 -C----------------------------------------------------------------------- -C In the case of functional iteration, update Y directly from -C the result of the last function evaluation. -C----------------------------------------------------------------------- - DO 280 I = 1,N - 280 SAVF(I) = RL1*(H*SAVF(I) - YH(I,2)) - DO 290 I = 1,N - 290 Y(I) = SAVF(I) - ACOR(I) - DEL = ZVNORM (N, Y, EWT) - DO 300 I = 1,N - 300 Y(I) = YH(I,1) + SAVF(I) - CALL ZCOPY (N, SAVF, 1, ACOR, 1) - GO TO 400 -C----------------------------------------------------------------------- -C In the case of the chord method, compute the corrector error, -C and solve the linear system with that as right-hand side and -C P as coefficient matrix. The correction is scaled by the factor -C 2/(1+RC) to account for changes in h*rl1 since the last ZVJAC call. -C----------------------------------------------------------------------- - 350 DO 360 I = 1,N - 360 Y(I) = (RL1*H)*SAVF(I) - (RL1*YH(I,2) + ACOR(I)) - CALL ZVSOL (WM, IWM, Y, IERSL) - NNI = NNI + 1 - IF (IERSL .GT. 0) GO TO 410 - IF (METH .EQ. 2 .AND. RC .NE. ONE) THEN - CSCALE = TWO/(ONE + RC) - CALL DZSCAL (N, CSCALE, Y, 1) - ENDIF - DEL = ZVNORM (N, Y, EWT) - CALL DZAXPY (N, ONE, Y, 1, ACOR, 1) - DO 380 I = 1,N - 380 Y(I) = YH(I,1) + ACOR(I) -C----------------------------------------------------------------------- -C Test for convergence. If M .gt. 0, an estimate of the convergence -C rate constant is stored in CRATE, and this is used in the test. -C----------------------------------------------------------------------- - 400 IF (M .NE. 0) CRATE = MAX(CRDOWN*CRATE,DEL/DELP) - DCON = DEL*MIN(ONE,CRATE)/TQ(4) - IF (DCON .LE. ONE) GO TO 450 - M = M + 1 - IF (M .EQ. MAXCOR) GO TO 410 - IF (M .GE. 2 .AND. DEL .GT. RDIV*DELP) GO TO 410 - DELP = DEL - CALL F (N, TN, Y, SAVF, RPAR, IPAR) - NFE = NFE + 1 - GO TO 270 -C - 410 IF (MITER .EQ. 0 .OR. JCUR .EQ. 1) GO TO 430 - ICF = 1 - IPUP = MITER - GO TO 220 -C - 430 CONTINUE - NFLAG = -1 - ICF = 2 - IPUP = MITER - RETURN -C -C Return for successful step. ------------------------------------------ - 450 NFLAG = 0 - JCUR = 0 - ICF = 0 - IF (M .EQ. 0) ACNRM = DEL - IF (M .GT. 0) ACNRM = ZVNORM (N, ACOR, EWT) - RETURN -C----------------------- End of Subroutine ZVNLSD ---------------------- - END -*DECK ZVJAC - SUBROUTINE ZVJAC (Y, YH, LDYH, EWT, FTEM, SAVF, WM, IWM, F, JAC, - 1 IERPJ, RPAR, IPAR) - EXTERNAL F, JAC - DOUBLE COMPLEX Y, YH, FTEM, SAVF, WM - DOUBLE PRECISION EWT - INTEGER LDYH, IWM, IERPJ, IPAR - DIMENSION Y(*), YH(LDYH,*), EWT(*), FTEM(*), SAVF(*), - 1 WM(*), IWM(*), RPAR(*), IPAR(*) -C----------------------------------------------------------------------- -C Call sequence input -- Y, YH, LDYH, EWT, FTEM, SAVF, WM, IWM, -C F, JAC, RPAR, IPAR -C Call sequence output -- WM, IWM, IERPJ -C COMMON block variables accessed: -C /ZVOD01/ CCMXJ, DRC, H, HRL1, RL1, SRUR, TN, UROUND, ICF, JCUR, -C LOCJS, MITER, MSBJ, N, NSLJ -C /ZVOD02/ NFE, NST, NJE, NLU -C -C Subroutines called by ZVJAC: F, JAC, ZACOPY, ZCOPY, ZGBFA, ZGEFA, -C DZSCAL -C Function routines called by ZVJAC: ZVNORM -C----------------------------------------------------------------------- -C ZVJAC is called by ZVNLSD to compute and process the matrix -C P = I - h*rl1*J , where J is an approximation to the Jacobian. -C Here J is computed by the user-supplied routine JAC if -C MITER = 1 or 4, or by finite differencing if MITER = 2, 3, or 5. -C If MITER = 3, a diagonal approximation to J is used. -C If JSV = -1, J is computed from scratch in all cases. -C If JSV = 1 and MITER = 1, 2, 4, or 5, and if the saved value of J is -C considered acceptable, then P is constructed from the saved J. -C J is stored in wm and replaced by P. If MITER .ne. 3, P is then -C subjected to LU decomposition in preparation for later solution -C of linear systems with P as coefficient matrix. This is done -C by ZGEFA if MITER = 1 or 2, and by ZGBFA if MITER = 4 or 5. -C -C Communication with ZVJAC is done with the following variables. (For -C more details, please see the comments in the driver subroutine.) -C Y = Vector containing predicted values on entry. -C YH = The Nordsieck array, an LDYH by LMAX array, input. -C LDYH = A constant .ge. N, the first dimension of YH, input. -C EWT = An error weight vector of length N. -C SAVF = Array containing f evaluated at predicted y, input. -C WM = Complex work space for matrices. In the output, it -C contains the inverse diagonal matrix if MITER = 3 and -C the LU decomposition of P if MITER is 1, 2 , 4, or 5. -C Storage of the saved Jacobian starts at WM(LOCJS). -C IWM = Integer work space containing pivot information, -C starting at IWM(31), if MITER is 1, 2, 4, or 5. -C IWM also contains band parameters ML = IWM(1) and -C MU = IWM(2) if MITER is 4 or 5. -C F = Dummy name for the user-supplied subroutine for f. -C JAC = Dummy name for the user-supplied Jacobian subroutine. -C RPAR, IPAR = User's real/complex and integer work arrays. -C RL1 = 1/EL(2) (input). -C IERPJ = Output error flag, = 0 if no trouble, 1 if the P -C matrix is found to be singular. -C JCUR = Output flag to indicate whether the Jacobian matrix -C (or approximation) is now current. -C JCUR = 0 means J is not current. -C JCUR = 1 means J is current. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block ZVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for labeled COMMON block ZVOD02 -------------------- -C - DOUBLE PRECISION HU - INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C -C Type declarations for local variables -------------------------------- -C - DOUBLE COMPLEX DI, R1, YI, YJ, YJJ - DOUBLE PRECISION CON, FAC, ONE, PT1, R, R0, THOU, ZERO - INTEGER I, I1, I2, IER, II, J, J1, JJ, JOK, LENP, MBA, MBAND, - 1 MEB1, MEBAND, ML, ML1, MU, NP1 -C -C Type declaration for function subroutines called --------------------- -C - DOUBLE PRECISION ZVNORM -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this subroutine. -C----------------------------------------------------------------------- - SAVE ONE, PT1, THOU, ZERO -C----------------------------------------------------------------------- - COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, - 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH - COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST -C - DATA ONE /1.0D0/, THOU /1000.0D0/, ZERO /0.0D0/, PT1 /0.1D0/ -C - IERPJ = 0 - HRL1 = H*RL1 -C See whether J should be evaluated (JOK = -1) or not (JOK = 1). ------- - JOK = JSV - IF (JSV .EQ. 1) THEN - IF (NST .EQ. 0 .OR. NST .GT. NSLJ+MSBJ) JOK = -1 - IF (ICF .EQ. 1 .AND. DRC .LT. CCMXJ) JOK = -1 - IF (ICF .EQ. 2) JOK = -1 - ENDIF -C End of setting JOK. -------------------------------------------------- -C - IF (JOK .EQ. -1 .AND. MITER .EQ. 1) THEN -C If JOK = -1 and MITER = 1, call JAC to evaluate Jacobian. ------------ - NJE = NJE + 1 - NSLJ = NST - JCUR = 1 - LENP = N*N - DO 110 I = 1,LENP - 110 WM(I) = ZERO - CALL JAC (N, TN, Y, 0, 0, WM, N, RPAR, IPAR) - IF (JSV .EQ. 1) CALL ZCOPY (LENP, WM, 1, WM(LOCJS), 1) - ENDIF -C - IF (JOK .EQ. -1 .AND. MITER .EQ. 2) THEN -C If MITER = 2, make N calls to F to approximate the Jacobian. --------- - NJE = NJE + 1 - NSLJ = NST - JCUR = 1 - FAC = ZVNORM (N, SAVF, EWT) - R0 = THOU*ABS(H)*UROUND*REAL(N)*FAC - IF (R0 .EQ. ZERO) R0 = ONE - J1 = 0 - DO 230 J = 1,N - YJ = Y(J) - R = MAX(SRUR*ABS(YJ),R0/EWT(J)) - Y(J) = Y(J) + R - FAC = ONE/R - CALL F (N, TN, Y, FTEM, RPAR, IPAR) - DO 220 I = 1,N - 220 WM(I+J1) = (FTEM(I) - SAVF(I))*FAC - Y(J) = YJ - J1 = J1 + N - 230 CONTINUE - NFE = NFE + N - LENP = N*N - IF (JSV .EQ. 1) CALL ZCOPY (LENP, WM, 1, WM(LOCJS), 1) - ENDIF -C - IF (JOK .EQ. 1 .AND. (MITER .EQ. 1 .OR. MITER .EQ. 2)) THEN - JCUR = 0 - LENP = N*N - CALL ZCOPY (LENP, WM(LOCJS), 1, WM, 1) - ENDIF -C - IF (MITER .EQ. 1 .OR. MITER .EQ. 2) THEN -C Multiply Jacobian by scalar, add identity, and do LU decomposition. -- - CON = -HRL1 - CALL DZSCAL (LENP, CON, WM, 1) - J = 1 - NP1 = N + 1 - DO 250 I = 1,N - WM(J) = WM(J) + ONE - 250 J = J + NP1 - NLU = NLU + 1 - CALL ZGEFA (WM, N, N, IWM(31), IER) - IF (IER .NE. 0) IERPJ = 1 - RETURN - ENDIF -C End of code block for MITER = 1 or 2. -------------------------------- -C - IF (MITER .EQ. 3) THEN -C If MITER = 3, construct a diagonal approximation to J and P. --------- - NJE = NJE + 1 - JCUR = 1 - R = RL1*PT1 - DO 310 I = 1,N - 310 Y(I) = Y(I) + R*(H*SAVF(I) - YH(I,2)) - CALL F (N, TN, Y, WM, RPAR, IPAR) - NFE = NFE + 1 - DO 320 I = 1,N - R1 = H*SAVF(I) - YH(I,2) - DI = PT1*R1 - H*(WM(I) - SAVF(I)) - WM(I) = ONE - IF (ABS(R1) .LT. UROUND/EWT(I)) GO TO 320 - IF (ABS(DI) .EQ. ZERO) GO TO 330 - WM(I) = PT1*R1/DI - 320 CONTINUE - RETURN - 330 IERPJ = 1 - RETURN - ENDIF -C End of code block for MITER = 3. ------------------------------------- -C -C Set constants for MITER = 4 or 5. ------------------------------------ - ML = IWM(1) - MU = IWM(2) - ML1 = ML + 1 - MBAND = ML + MU + 1 - MEBAND = MBAND + ML - LENP = MEBAND*N -C - IF (JOK .EQ. -1 .AND. MITER .EQ. 4) THEN -C If JOK = -1 and MITER = 4, call JAC to evaluate Jacobian. ------------ - NJE = NJE + 1 - NSLJ = NST - JCUR = 1 - DO 410 I = 1,LENP - 410 WM(I) = ZERO - CALL JAC (N, TN, Y, ML, MU, WM(ML1), MEBAND, RPAR, IPAR) - IF (JSV .EQ. 1) - 1 CALL ZACOPY (MBAND, N, WM(ML1), MEBAND, WM(LOCJS), MBAND) - ENDIF -C - IF (JOK .EQ. -1 .AND. MITER .EQ. 5) THEN -C If MITER = 5, make ML+MU+1 calls to F to approximate the Jacobian. --- - NJE = NJE + 1 - NSLJ = NST - JCUR = 1 - MBA = MIN(MBAND,N) - MEB1 = MEBAND - 1 - FAC = ZVNORM (N, SAVF, EWT) - R0 = THOU*ABS(H)*UROUND*REAL(N)*FAC - IF (R0 .EQ. ZERO) R0 = ONE - DO 560 J = 1,MBA - DO 530 I = J,N,MBAND - YI = Y(I) - R = MAX(SRUR*ABS(YI),R0/EWT(I)) - 530 Y(I) = Y(I) + R - CALL F (N, TN, Y, FTEM, RPAR, IPAR) - DO 550 JJ = J,N,MBAND - Y(JJ) = YH(JJ,1) - YJJ = Y(JJ) - R = MAX(SRUR*ABS(YJJ),R0/EWT(JJ)) - FAC = ONE/R - I1 = MAX(JJ-MU,1) - I2 = MIN(JJ+ML,N) - II = JJ*MEB1 - ML - DO 540 I = I1,I2 - 540 WM(II+I) = (FTEM(I) - SAVF(I))*FAC - 550 CONTINUE - 560 CONTINUE - NFE = NFE + MBA - IF (JSV .EQ. 1) - 1 CALL ZACOPY (MBAND, N, WM(ML1), MEBAND, WM(LOCJS), MBAND) - ENDIF -C - IF (JOK .EQ. 1) THEN - JCUR = 0 - CALL ZACOPY (MBAND, N, WM(LOCJS), MBAND, WM(ML1), MEBAND) - ENDIF -C -C Multiply Jacobian by scalar, add identity, and do LU decomposition. - CON = -HRL1 - CALL DZSCAL (LENP, CON, WM, 1 ) - II = MBAND - DO 580 I = 1,N - WM(II) = WM(II) + ONE - 580 II = II + MEBAND - NLU = NLU + 1 - CALL ZGBFA (WM, MEBAND, N, ML, MU, IWM(31), IER) - IF (IER .NE. 0) IERPJ = 1 - RETURN -C End of code block for MITER = 4 or 5. -------------------------------- -C -C----------------------- End of Subroutine ZVJAC ----------------------- - END -*DECK ZACOPY - SUBROUTINE ZACOPY (NROW, NCOL, A, NROWA, B, NROWB) - DOUBLE COMPLEX A, B - INTEGER NROW, NCOL, NROWA, NROWB - DIMENSION A(NROWA,NCOL), B(NROWB,NCOL) -C----------------------------------------------------------------------- -C Call sequence input -- NROW, NCOL, A, NROWA, NROWB -C Call sequence output -- B -C COMMON block variables accessed -- None -C -C Subroutines called by ZACOPY: ZCOPY -C Function routines called by ZACOPY: None -C----------------------------------------------------------------------- -C This routine copies one rectangular array, A, to another, B, -C where A and B may have different row dimensions, NROWA and NROWB. -C The data copied consists of NROW rows and NCOL columns. -C----------------------------------------------------------------------- - INTEGER IC -C - DO 20 IC = 1,NCOL - CALL ZCOPY (NROW, A(1,IC), 1, B(1,IC), 1) - 20 CONTINUE -C - RETURN -C----------------------- End of Subroutine ZACOPY ---------------------- - END -*DECK ZVSOL - SUBROUTINE ZVSOL (WM, IWM, X, IERSL) - DOUBLE COMPLEX WM, X - INTEGER IWM, IERSL - DIMENSION WM(*), IWM(*), X(*) -C----------------------------------------------------------------------- -C Call sequence input -- WM, IWM, X -C Call sequence output -- X, IERSL -C COMMON block variables accessed: -C /ZVOD01/ -- H, HRL1, RL1, MITER, N -C -C Subroutines called by ZVSOL: ZGESL, ZGBSL -C Function routines called by ZVSOL: None -C----------------------------------------------------------------------- -C This routine manages the solution of the linear system arising from -C a chord iteration. It is called if MITER .ne. 0. -C If MITER is 1 or 2, it calls ZGESL to accomplish this. -C If MITER = 3 it updates the coefficient H*RL1 in the diagonal -C matrix, and then computes the solution. -C If MITER is 4 or 5, it calls ZGBSL. -C Communication with ZVSOL uses the following variables: -C WM = Real work space containing the inverse diagonal matrix if -C MITER = 3 and the LU decomposition of the matrix otherwise. -C IWM = Integer work space containing pivot information, starting at -C IWM(31), if MITER is 1, 2, 4, or 5. IWM also contains band -C parameters ML = IWM(1) and MU = IWM(2) if MITER is 4 or 5. -C X = The right-hand side vector on input, and the solution vector -C on output, of length N. -C IERSL = Output flag. IERSL = 0 if no trouble occurred. -C IERSL = 1 if a singular matrix arose with MITER = 3. -C----------------------------------------------------------------------- -C -C Type declarations for labeled COMMON block ZVOD01 -------------------- -C - DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, - 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND - INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 4 NSLP, NYH -C -C Type declarations for local variables -------------------------------- -C - DOUBLE COMPLEX DI - DOUBLE PRECISION ONE, PHRL1, R, ZERO - INTEGER I, MEBAND, ML, MU -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE ONE, ZERO -C - COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, - 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, - 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, - 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, - 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, - 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, - 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, - 7 NSLP, NYH -C - DATA ONE /1.0D0/, ZERO /0.0D0/ -C - IERSL = 0 - GO TO (100, 100, 300, 400, 400), MITER - 100 CALL ZGESL (WM, N, N, IWM(31), X, 0) - RETURN -C - 300 PHRL1 = HRL1 - HRL1 = H*RL1 - IF (HRL1 .EQ. PHRL1) GO TO 330 - R = HRL1/PHRL1 - DO 320 I = 1,N - DI = ONE - R*(ONE - ONE/WM(I)) - IF (ABS(DI) .EQ. ZERO) GO TO 390 - 320 WM(I) = ONE/DI -C - 330 DO 340 I = 1,N - 340 X(I) = WM(I)*X(I) - RETURN - 390 IERSL = 1 - RETURN -C - 400 ML = IWM(1) - MU = IWM(2) - MEBAND = 2*ML + MU + 1 - CALL ZGBSL (WM, MEBAND, N, ML, MU, IWM(31), X, 0) - RETURN -C----------------------- End of Subroutine ZVSOL ----------------------- - END -*DECK ZVSRCO - SUBROUTINE ZVSRCO (RSAV, ISAV, JOB) - DOUBLE PRECISION RSAV - INTEGER ISAV, JOB - DIMENSION RSAV(*), ISAV(*) -C----------------------------------------------------------------------- -C Call sequence input -- RSAV, ISAV, JOB -C Call sequence output -- RSAV, ISAV -C COMMON block variables accessed -- All of /ZVOD01/ and /ZVOD02/ -C -C Subroutines/functions called by ZVSRCO: None -C----------------------------------------------------------------------- -C This routine saves or restores (depending on JOB) the contents of the -C COMMON blocks ZVOD01 and ZVOD02, which are used internally by ZVODE. -C -C RSAV = real array of length 51 or more. -C ISAV = integer array of length 41 or more. -C JOB = flag indicating to save or restore the COMMON blocks: -C JOB = 1 if COMMON is to be saved (written to RSAV/ISAV). -C JOB = 2 if COMMON is to be restored (read from RSAV/ISAV). -C A call with JOB = 2 presumes a prior call with JOB = 1. -C----------------------------------------------------------------------- - DOUBLE PRECISION RVOD1, RVOD2 - INTEGER IVOD1, IVOD2 - INTEGER I, LENIV1, LENIV2, LENRV1, LENRV2 -C----------------------------------------------------------------------- -C The following Fortran-77 declaration is to cause the values of the -C listed (local) variables to be saved between calls to this integrator. -C----------------------------------------------------------------------- - SAVE LENRV1, LENIV1, LENRV2, LENIV2 -C - COMMON /ZVOD01/ RVOD1(50), IVOD1(33) - COMMON /ZVOD02/ RVOD2(1), IVOD2(8) - DATA LENRV1/50/, LENIV1/33/, LENRV2/1/, LENIV2/8/ -C - IF (JOB .EQ. 2) GO TO 100 - DO 10 I = 1,LENRV1 - 10 RSAV(I) = RVOD1(I) - DO 15 I = 1,LENRV2 - 15 RSAV(LENRV1+I) = RVOD2(I) -C - DO 20 I = 1,LENIV1 - 20 ISAV(I) = IVOD1(I) - DO 25 I = 1,LENIV2 - 25 ISAV(LENIV1+I) = IVOD2(I) -C - RETURN -C - 100 CONTINUE - DO 110 I = 1,LENRV1 - 110 RVOD1(I) = RSAV(I) - DO 115 I = 1,LENRV2 - 115 RVOD2(I) = RSAV(LENRV1+I) -C - DO 120 I = 1,LENIV1 - 120 IVOD1(I) = ISAV(I) - DO 125 I = 1,LENIV2 - 125 IVOD2(I) = ISAV(LENIV1+I) -C - RETURN -C----------------------- End of Subroutine ZVSRCO ---------------------- - END -*DECK ZEWSET - SUBROUTINE ZEWSET (N, ITOL, RTOL, ATOL, YCUR, EWT) -C***BEGIN PROLOGUE ZEWSET -C***SUBSIDIARY -C***PURPOSE Set error weight vector. -C***TYPE DOUBLE PRECISION (SEWSET-S, DEWSET-D, ZEWSET-Z) -C***AUTHOR Hindmarsh, Alan C., (LLNL) -C***DESCRIPTION -C -C This subroutine sets the error weight vector EWT according to -C EWT(i) = RTOL(i)*ABS(YCUR(i)) + ATOL(i), i = 1,...,N, -C with the subscript on RTOL and/or ATOL possibly replaced by 1 above, -C depending on the value of ITOL. -C -C***SEE ALSO DLSODE -C***ROUTINES CALLED (NONE) -C***REVISION HISTORY (YYMMDD) -C 060502 DATE WRITTEN, modified from DEWSET of 930809. -C***END PROLOGUE ZEWSET - DOUBLE COMPLEX YCUR - DOUBLE PRECISION RTOL, ATOL, EWT - INTEGER N, ITOL - INTEGER I - DIMENSION RTOL(*), ATOL(*), YCUR(N), EWT(N) -C -C***FIRST EXECUTABLE STATEMENT ZEWSET - GO TO (10, 20, 30, 40), ITOL - 10 CONTINUE - DO 15 I = 1,N - 15 EWT(I) = RTOL(1)*ABS(YCUR(I)) + ATOL(1) - RETURN - 20 CONTINUE - DO 25 I = 1,N - 25 EWT(I) = RTOL(1)*ABS(YCUR(I)) + ATOL(I) - RETURN - 30 CONTINUE - DO 35 I = 1,N - 35 EWT(I) = RTOL(I)*ABS(YCUR(I)) + ATOL(1) - RETURN - 40 CONTINUE - DO 45 I = 1,N - 45 EWT(I) = RTOL(I)*ABS(YCUR(I)) + ATOL(I) - RETURN -C----------------------- END OF SUBROUTINE ZEWSET ---------------------- - END -*DECK ZVNORM - DOUBLE PRECISION FUNCTION ZVNORM (N, V, W) -C***BEGIN PROLOGUE ZVNORM -C***SUBSIDIARY -C***PURPOSE Weighted root-mean-square vector norm. -C***TYPE DOUBLE COMPLEX (SVNORM-S, DVNORM-D, ZVNORM-Z) -C***AUTHOR Hindmarsh, Alan C., (LLNL) -C***DESCRIPTION -C -C This function routine computes the weighted root-mean-square norm -C of the vector of length N contained in the double complex array V, -C with weights contained in the array W of length N: -C ZVNORM = SQRT( (1/N) * SUM( abs(V(i))**2 * W(i)**2 ) -C The squared absolute value abs(v)**2 is computed by ZABSSQ. -C -C***SEE ALSO DLSODE -C***ROUTINES CALLED ZABSSQ -C***REVISION HISTORY (YYMMDD) -C 060502 DATE WRITTEN, modified from DVNORM of 930809. -C***END PROLOGUE ZVNORM - DOUBLE COMPLEX V - DOUBLE PRECISION W, SUM, ZABSSQ - INTEGER N, I - DIMENSION V(N), W(N) -C -C***FIRST EXECUTABLE STATEMENT ZVNORM - SUM = 0.0D0 - DO 10 I = 1,N - 10 SUM = SUM + ZABSSQ(V(I)) * W(I)**2 - ZVNORM = SQRT(SUM/N) - RETURN -C----------------------- END OF FUNCTION ZVNORM ------------------------ - END -*DECK ZABSSQ - DOUBLE PRECISION FUNCTION ZABSSQ(Z) -C***BEGIN PROLOGUE ZABSSQ -C***SUBSIDIARY -C***PURPOSE Squared absolute value of a double complex number. -C***TYPE DOUBLE PRECISION (ZABSSQ-Z) -C***AUTHOR Hindmarsh, Alan C., (LLNL) -C***DESCRIPTION -C -C This function routine computes the square of the absolute value of -C a double precision complex number Z, -C ZABSSQ = DREAL(Z)**2 * DIMAG(Z)**2 -C***REVISION HISTORY (YYMMDD) -C 060502 DATE WRITTEN. -C***END PROLOGUE ZABSSQ - DOUBLE COMPLEX Z - ZABSSQ = DREAL(Z)**2 + DIMAG(Z)**2 - RETURN -C----------------------- END OF FUNCTION ZABSSQ ------------------------ - END -*DECK DZSCAL - SUBROUTINE DZSCAL(N, DA, ZX, INCX) -C***BEGIN PROLOGUE DZSCAL -C***SUBSIDIARY -C***PURPOSE Scale a double complex vector by a double prec. constant. -C***TYPE DOUBLE PRECISION (DZSCAL-Z) -C***AUTHOR Hindmarsh, Alan C., (LLNL) -C***DESCRIPTION -C Scales a double complex vector by a double precision constant. -C Minor modification of BLAS routine ZSCAL. -C***REVISION HISTORY (YYMMDD) -C 060530 DATE WRITTEN. -C***END PROLOGUE DZSCAL - DOUBLE COMPLEX ZX(*) - DOUBLE PRECISION DA - INTEGER I,INCX,IX,N -C - IF( N.LE.0 .OR. INCX.LE.0 )RETURN - IF(INCX.EQ.1)GO TO 20 -C Code for increment not equal to 1 - IX = 1 - DO 10 I = 1,N - ZX(IX) = DA*ZX(IX) - IX = IX + INCX - 10 CONTINUE - RETURN -C Code for increment equal to 1 - 20 DO 30 I = 1,N - ZX(I) = DA*ZX(I) - 30 CONTINUE - RETURN - END -*DECK DZAXPY - SUBROUTINE DZAXPY(N, DA, ZX, INCX, ZY, INCY) -C***BEGIN PROLOGUE DZAXPY -C***PURPOSE Real constant times a complex vector plus a complex vector. -C***TYPE DOUBLE PRECISION (DZAXPY-Z) -C***AUTHOR Hindmarsh, Alan C., (LLNL) -C***DESCRIPTION -C Add a D.P. real constant times a complex vector to a complex vector. -C Minor modification of BLAS routine ZAXPY. -C***REVISION HISTORY (YYMMDD) -C 060530 DATE WRITTEN. -C***END PROLOGUE DZAXPY - DOUBLE COMPLEX ZX(*),ZY(*) - DOUBLE PRECISION DA - INTEGER I,INCX,INCY,IX,IY,N - IF(N.LE.0)RETURN - IF (ABS(DA) .EQ. 0.0D0) RETURN - IF (INCX.EQ.1.AND.INCY.EQ.1)GO TO 20 -C Code for unequal increments or equal increments not equal to 1 - IX = 1 - IY = 1 - IF(INCX.LT.0)IX = (-N+1)*INCX + 1 - IF(INCY.LT.0)IY = (-N+1)*INCY + 1 - DO 10 I = 1,N - ZY(IY) = ZY(IY) + DA*ZX(IX) - IX = IX + INCX - IY = IY + INCY - 10 CONTINUE - RETURN -C Code for both increments equal to 1 - 20 DO 30 I = 1,N - ZY(I) = ZY(I) + DA*ZX(I) - 30 CONTINUE - RETURN - END -*DECK DUMACH - DOUBLE PRECISION FUNCTION DUMACH () -C***BEGIN PROLOGUE DUMACH -C***PURPOSE Compute the unit roundoff of the machine. -C***CATEGORY R1 -C***TYPE DOUBLE PRECISION (RUMACH-S, DUMACH-D) -C***KEYWORDS MACHINE CONSTANTS -C***AUTHOR Hindmarsh, Alan C., (LLNL) -C***DESCRIPTION -C *Usage: -C DOUBLE PRECISION A, DUMACH -C A = DUMACH() -C -C *Function Return Values: -C A : the unit roundoff of the machine. -C -C *Description: -C The unit roundoff is defined as the smallest positive machine -C number u such that 1.0 + u .ne. 1.0. This is computed by DUMACH -C in a machine-independent manner. -C -C***REFERENCES (NONE) -C***ROUTINES CALLED DUMSUM -C***REVISION HISTORY (YYYYMMDD) -C 19930216 DATE WRITTEN -C 19930818 Added SLATEC-format prologue. (FNF) -C 20030707 Added DUMSUM to force normal storage of COMP. (ACH) -C***END PROLOGUE DUMACH -C - DOUBLE PRECISION U, COMP -C***FIRST EXECUTABLE STATEMENT DUMACH - U = 1.0D0 - 10 U = U*0.5D0 - CALL DUMSUM(1.0D0, U, COMP) - IF (COMP .NE. 1.0D0) GO TO 10 - DUMACH = U*2.0D0 - RETURN -C----------------------- End of Function DUMACH ------------------------ - END - SUBROUTINE DUMSUM(A,B,C) -C Routine to force normal storing of A + B, for DUMACH. - DOUBLE PRECISION A, B, C - C = A + B - RETURN - END diff --git a/scipy-0.10.1/scipy/integrate/quadpack.h b/scipy-0.10.1/scipy/integrate/quadpack.h deleted file mode 100644 index 0ac6fd8330..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack.h +++ /dev/null @@ -1,79 +0,0 @@ -/* MULTIPACK module by Travis Oliphant - -Copyright (c) 1999 Travis Oliphant all rights reserved -oliphant.travis@ieee.org -Permission to use, modify, and distribute this software is given under the -terms of the Scipy License - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -*/ - - -/* This extension module is a collection of wrapper functions around -common FORTRAN code in the packages MINPACK, ODEPACK, and QUADPACK plus -some differential algebraic equation solvers. - -The wrappers are meant to be nearly direct translations between the -FORTAN code and Python. Some parameters like sizes do not need to be -passed since they are available from the objects. - -It is anticipated that a pure Python module be written to call these lower -level routines and make a simpler user interface. All of the routines define -default values for little-used parameters so that even the raw routines are -quite useful without a separate wrapper. - -FORTRAN Outputs that are not either an error indicator or the sought-after -results are placed in a dictionary and returned as an optional member of -the result tuple when the full_output argument is non-zero. -*/ - -#include "Python.h" - -#include "numpy/npy_3kcompat.h" -#include "numpy/arrayobject.h" -#include - -#define PYERR(errobj,message) {PyErr_SetString(errobj,message); goto fail;} -#define PYERR2(errobj,message) {PyErr_Print(); PyErr_SetString(errobj, message); goto fail;} -#define ISCONTIGUOUS(m) ((m)->flags & CONTIGUOUS) - -#define STORE_VARS() PyObject *store_quadpack_globals[2]; jmp_buf store_jmp; - -#define INIT_FUNC(fun,arg,errobj) { /* Get extra arguments or set to zero length tuple */ \ - store_quadpack_globals[0] = quadpack_python_function; \ - store_quadpack_globals[1] = quadpack_extra_arguments; \ - memcpy(&store_jmp,&quadpack_jmpbuf,sizeof(jmp_buf)); \ - if (arg == NULL) { \ - if ((arg = PyTuple_New(0)) == NULL) goto fail; \ - } \ - else \ - Py_INCREF(arg); /* We decrement on exit. */ \ - if (!PyTuple_Check(arg)) \ - PYERR(errobj,"Extra Arguments must be in a tuple"); \ - /* Set up callback functions */ \ - if (!PyCallable_Check(fun)) \ - PYERR(errobj,"First argument must be a callable function."); \ - quadpack_python_function = fun; \ - quadpack_extra_arguments = arg;} - -#define RESTORE_FUNC() quadpack_python_function = store_quadpack_globals[0]; \ - quadpack_extra_arguments = store_quadpack_globals[1]; \ - memcpy(&quadpack_jmpbuf, &store_jmp, sizeof(jmp_buf)); - -static PyObject *quadpack_python_function=NULL; -static PyObject *quadpack_extra_arguments=NULL; /* a tuple */ -static jmp_buf quadpack_jmpbuf; - - - - - - - - - - - - - - diff --git a/scipy-0.10.1/scipy/integrate/quadpack.py b/scipy-0.10.1/scipy/integrate/quadpack.py deleted file mode 100644 index 234f3360fb..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack.py +++ /dev/null @@ -1,486 +0,0 @@ -# Author: Travis Oliphant 2001 - -import _quadpack -import sys -import numpy -from numpy import inf, Inf - -__all__ = ['quad', 'dblquad', 'tplquad', 'quad_explain'] - - -error = _quadpack.error - -def quad_explain(output=sys.stdout): - """ - Print extra information about integrate.quad() parameters and returns. - - Parameters - ---------- - output : instance with "write" method - Information about `quad` is passed to ``output.write()``. - Default is ``sys.stdout``. - - Returns - ------- - None - - """ - output.write(""" -Extra information for quad() inputs and outputs: - - If full_output is non-zero, then the third output argument (infodict) - is a dictionary with entries as tabulated below. For infinite limits, the - range is transformed to (0,1) and the optional outputs are given with - respect to this transformed range. Let M be the input argument limit and - let K be infodict['last']. The entries are: - - 'neval' : The number of function evaluations. - 'last' : The number, K, of subintervals produced in the subdivision process. - 'alist' : A rank-1 array of length M, the first K elements of which are the - left end points of the subintervals in the partition of the - integration range. - 'blist' : A rank-1 array of length M, the first K elements of which are the - right end points of the subintervals. - 'rlist' : A rank-1 array of length M, the first K elements of which are the - integral approximations on the subintervals. - 'elist' : A rank-1 array of length M, the first K elements of which are the - moduli of the absolute error estimates on the subintervals. - 'iord' : A rank-1 integer array of length M, the first L elements of - which are pointers to the error estimates over the subintervals - with L=K if K<=M/2+2 or L=M+1-K otherwise. Let I be the sequence - infodict['iord'] and let E be the sequence infodict['elist']. - Then E[I[1]], ..., E[I[L]] forms a decreasing sequence. - - If the input argument points is provided (i.e. it is not None), the - following additional outputs are placed in the output dictionary. Assume the - points sequence is of length P. - - 'pts' : A rank-1 array of length P+2 containing the integration limits - and the break points of the intervals in ascending order. - This is an array giving the subintervals over which integration - will occur. - 'level' : A rank-1 integer array of length M (=limit), containing the - subdivision levels of the subintervals, i.e., if (aa,bb) is a - subinterval of (pts[1], pts[2]) where pts[0] and pts[2] are - adjacent elements of infodict['pts'], then (aa,bb) has level l if - |bb-aa|=|pts[2]-pts[1]| * 2**(-l). - 'ndin' : A rank-1 integer array of length P+2. After the first integration - over the intervals (pts[1], pts[2]), the error estimates over some - of the intervals may have been increased artificially in order to - put their subdivision forward. This array has ones in slots - corresponding to the subintervals for which this happens. - -Weighting the integrand: - - The input variables, weight and wvar, are used to weight the integrand by - a select list of functions. Different integration methods are used - to compute the integral with these weighting functions. The possible values - of weight and the corresponding weighting functions are. - - 'cos' : cos(w*x) : wvar = w - 'sin' : sin(w*x) : wvar = w - 'alg' : g(x) = ((x-a)**alpha)*((b-x)**beta) : wvar = (alpha, beta) - 'alg-loga': g(x)*log(x-a) : wvar = (alpha, beta) - 'alg-logb': g(x)*log(b-x) : wvar = (alpha, beta) - 'alg-log' : g(x)*log(x-a)*log(b-x) : wvar = (alpha, beta) - 'cauchy' : 1/(x-c) : wvar = c - - wvar holds the parameter w, (alpha, beta), or c depending on the weight - selected. In these expressions, a and b are the integration limits. - - For the 'cos' and 'sin' weighting, additional inputs and outputs are - available. - - For finite integration limits, the integration is performed using a - Clenshaw-Curtis method which uses Chebyshev moments. For repeated - calculations, these moments are saved in the output dictionary: - - 'momcom' : The maximum level of Chebyshev moments that have been computed, - i.e., if M_c is infodict['momcom'] then the moments have been - computed for intervals of length |b-a|* 2**(-l), l=0,1,...,M_c. - 'nnlog' : A rank-1 integer array of length M(=limit), containing the - subdivision levels of the subintervals, i.e., an element of this - array is equal to l if the corresponding subinterval is - |b-a|* 2**(-l). - 'chebmo' : A rank-2 array of shape (25, maxp1) containing the computed - Chebyshev moments. These can be passed on to an integration - over the same interval by passing this array as the second - element of the sequence wopts and passing infodict['momcom'] as - the first element. - - If one of the integration limits is infinite, then a Fourier integral is - computed (assuming w neq 0). If full_output is 1 and a numerical error - is encountered, besides the error message attached to the output tuple, - a dictionary is also appended to the output tuple which translates the - error codes in the array info['ierlst'] to English messages. The output - information dictionary contains the following entries instead of 'last', - 'alist', 'blist', 'rlist', and 'elist': - - 'lst' : The number of subintervals needed for the integration (call it K_f). - 'rslst' : A rank-1 array of length M_f=limlst, whose first K_f elements - contain the integral contribution over the interval (a+(k-1)c, - a+kc) where c = (2*floor(|w|) + 1) * pi / |w| and k=1,2,...,K_f. - 'erlst' : A rank-1 array of length M_f containing the error estimate - corresponding to the interval in the same position in - infodict['rslist']. - 'ierlst' : A rank-1 integer array of length M_f containing an error flag - corresponding to the interval in the same position in - infodict['rslist']. See the explanation dictionary (last entry - in the output tuple) for the meaning of the codes. -""") - return - - -def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8, - limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50, - limlst=50): - """ - Compute a definite integral. - - Integrate func from a to b (possibly infinite interval) using a technique - from the Fortran library QUADPACK. - - If func takes many arguments, it is integrated along the axis corresponding - to the first argument. Use the keyword argument `args` to pass the other - arguments. - - Run scipy.integrate.quad_explain() for more information on the - more esoteric inputs and outputs. - - Parameters - ---------- - - func : function - A Python function or method to integrate. - a : float - Lower limit of integration (use -scipy.integrate.Inf for -infinity). - b : float - Upper limit of integration (use scipy.integrate.Inf for +infinity). - args : tuple, optional - extra arguments to pass to func - full_output : int - Non-zero to return a dictionary of integration information. - If non-zero, warning messages are also suppressed and the - message is appended to the output tuple. - - Returns - ------- - - y : float - The integral of func from a to b. - abserr : float - an estimate of the absolute error in the result. - - infodict : dict - a dictionary containing additional information. - Run scipy.integrate.quad_explain() for more information. - message : - a convergence message. - explain : - appended only with 'cos' or 'sin' weighting and infinite - integration limits, it contains an explanation of the codes in - infodict['ierlst'] - - Other Parameters - ---------------- - epsabs : - absolute error tolerance. - epsrel : - relative error tolerance. - limit : - an upper bound on the number of subintervals used in the adaptive - algorithm. - points : - a sequence of break points in the bounded integration interval - where local difficulties of the integrand may occur (e.g., - singularities, discontinuities). The sequence does not have - to be sorted. - weight : - string indicating weighting function. - wvar : - variables for use with weighting functions. - limlst : - Upper bound on the number of cylces (>=3) for use with a sinusoidal - weighting and an infinite end-point. - wopts : - Optional input for reusing Chebyshev moments. - maxp1 : - An upper bound on the number of Chebyshev moments. - - See Also - -------- - dblquad, tplquad : double and triple integrals - fixed_quad : fixed-order Gaussian quadrature - quadrature : adaptive Gaussian quadrature - odeint, ode : ODE integrators - simps, trapz, romb : integrators for sampled data - scipy.special : for coefficients and roots of orthogonal polynomials - - Examples - -------- - Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result - - >>> from scipy import integrate - >>> x2 = lambda x: x**2 - >>> integrate.quad(x2,0.,4.) - (21.333333333333332, 2.3684757858670003e-13) - >> print 4.**3/3 - 21.3333333333 - - Calculate :math:`\\int^\\infty_0 e^{-x} dx` - - >>> invexp = lambda x: exp(-x) - >>> integrate.quad(invexp,0,inf) - (0.99999999999999989, 5.8426061711142159e-11) - - >>> f = lambda x,a : a*x - >>> y, err = integrate.quad(f, 0, 1, args=(1,)) - >>> y - 0.5 - >>> y, err = integrate.quad(f, 0, 1, args=(3,)) - >>> y - 1.5 - - """ - if type(args) != type(()): args = (args,) - if (weight is None): - retval = _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points) - else: - retval = _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts) - - ier = retval[-1] - if ier == 0: - return retval[:-1] - - msgs = {80: "A Python error occurred possibly while calling the function.", - 1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit, - 2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.", - 3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.", - 4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.", - 5: "The integral is probably divergent, or slowly convergent.", - 6: "The input is invalid.", - 7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.", - 'unknown': "Unknown error."} - - if weight in ['cos','sin'] and (b == Inf or a == -Inf): - msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1." - msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1." - msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1." - explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.", - 2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.", - 3: "Extremely bad integrand behavior occurs at some points of\n this cycle.", - 4: "The integral over this cycle does not converge (to within the required accuracy) due ot roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.", - 5: "The integral over this cycle is probably divergent or slowly convergent."} - - try: - msg = msgs[ier] - except KeyError: - msg = msgs['unknown'] - - if ier in [1,2,3,4,5,7]: - if full_output: - if weight in ['cos','sin'] and (b == Inf or a == Inf): - return retval[:-1] + (msg, explain) - else: - return retval[:-1] + (msg,) - else: - import warnings - warnings.warn(msg) - return retval[:-1] - else: - raise ValueError(msg) - - -def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points): - infbounds = 0 - if (b != Inf and a != -Inf): - pass # standard integration - elif (b == Inf and a != -Inf): - infbounds = 1 - bound = a - elif (b == Inf and a == -Inf): - infbounds = 2 - bound = 0 # ignored - elif (b != Inf and a == -Inf): - infbounds = -1 - bound = b - else: - raise RuntimeError("Infinity comparisons don't work for you.") - - if points is None: - if infbounds == 0: - return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit) - else: - return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit) - else: - if infbounds !=0: - raise ValueError("Infinity inputs cannot be used with break points.") - else: - nl = len(points) - the_points = numpy.zeros((nl+2,), float) - the_points[:nl] = points - return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit) - - -def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts): - - if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']: - raise ValueError("%s not a recognized weighting function." % weight) - - strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4} - - if weight in ['cos','sin']: - integr = strdict[weight] - if (b != Inf and a != -Inf): # finite limits - if wopts is None: # no precomputed chebyshev moments - return _quadpack._qawoe(func,a,b,wvar,integr,args,full_output,epsabs,epsrel,limit,maxp1,1) - else: # precomputed chebyshev moments - momcom = wopts[0] - chebcom = wopts[1] - return _quadpack._qawoe(func,a,b,wvar,integr,args,full_output,epsabs,epsrel,limit,maxp1,2,momcom,chebcom) - - elif (b == Inf and a != -Inf): - return _quadpack._qawfe(func,a,wvar,integr,args,full_output,epsabs,limlst,limit,maxp1) - elif (b != Inf and a == -Inf): # remap function and interval - if weight == 'cos': - def thefunc(x,*myargs): - y = -x - func = myargs[0] - myargs = (y,) + myargs[1:] - return apply(func,myargs) - else: - def thefunc(x,*myargs): - y = -x - func = myargs[0] - myargs = (y,) + myargs[1:] - return -apply(func,myargs) - args = (func,) + args - return _quadpack._qawfe(thefunc,-b,wvar,integr,args,full_output,epsabs,limlst,limit,maxp1) - else: - raise ValueError("Cannot integrate with this weight from -Inf to +Inf.") - else: - if a in [-Inf,Inf] or b in [-Inf,Inf]: - raise ValueError("Cannot integrate with this weight over an infinite interval.") - - if weight[:3] == 'alg': - integr = strdict[weight] - return _quadpack._qawse(func,a,b,wvar,integr,args,full_output,epsabs,epsrel,limit) - else: # weight == 'cauchy' - return _quadpack._qawce(func,a,b,wvar,args,full_output,epsabs,epsrel,limit) - -def _infunc(x,func,gfun,hfun,more_args): - a = gfun(x) - b = hfun(x) - myargs = (x,) + more_args - return quad(func,a,b,args=myargs)[0] - -def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): - """ - Compute a double integral. - - Return the double (definite) integral of func(y,x) from x=a..b and - y=gfun(x)..hfun(x). - - Parameters - ----------- - func : callable - A Python function or method of at least two variables: y must be the - first argument and x the second argument. - (a,b) : tuple - The limits of integration in x: a < b - gfun : callable - The lower boundary curve in y which is a function taking a single - floating point argument (x) and returning a floating point result: a - lambda function can be useful here. - hfun : callable - The upper boundary curve in y (same requirements as `gfun`). - args : sequence, optional - Extra arguments to pass to `func2d`. - epsabs : float, optional - Absolute tolerance passed directly to the inner 1-D quadrature - integration. Default is 1.49e-8. - epsrel : float - Relative tolerance of the inner 1-D integrals. Default is 1.49e-8. - - Returns - ------- - y : float - The resultant integral. - abserr : float - An estimate of the error. - - See also - -------- - quad : single integral - tplquad : triple integral - fixed_quad : fixed-order Gaussian quadrature - quadrature : adaptive Gaussian quadrature - odeint, ode : ODE integrators - simps, trapz, romb : integrators for sampled data - scipy.special : for coefficients and roots of orthogonal polynomials - - """ - return quad(_infunc,a,b,(func,gfun,hfun,args),epsabs=epsabs,epsrel=epsrel) - -def _infunc2(y,x,func,qfun,rfun,more_args): - a2 = qfun(x,y) - b2 = rfun(x,y) - myargs = (y,x) + more_args - return quad(func,a2,b2,args=myargs)[0] - -def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8, - epsrel=1.49e-8): - """ - Compute a triple (definite) integral. - - Return the triple integral of func(z, y, x) from - x=a..b, y=gfun(x)..hfun(x), and z=qfun(x,y)..rfun(x,y) - - Parameters - ---------- - func : function - A Python function or method of at least three variables in the - order (z, y, x). - (a,b) : tuple - The limits of integration in x: a < b - gfun : function - The lower boundary curve in y which is a function taking a single - floating point argument (x) and returning a floating point result: - a lambda function can be useful here. - hfun : function - The upper boundary curve in y (same requirements as gfun). - qfun : function - The lower boundary surface in z. It must be a function that takes - two floats in the order (x, y) and returns a float. - rfun : function - The upper boundary surface in z. (Same requirements as qfun.) - args : Arguments - Extra arguments to pass to func3d. - epsabs : float, optional - Absolute tolerance passed directly to the innermost 1-D quadrature - integration. Default is 1.49e-8. - epsrel : float, optional - Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8. - - Returns - ------- - y : float - The resultant integral. - abserr : float - An estimate of the error. - - See Also - -------- - quad: Adaptive quadrature using QUADPACK - quadrature: Adaptive Gaussian quadrature - fixed_quad: Fixed-order Gaussian quadrature - dblquad: Double integrals - romb: Integrators for sampled data - trapz: Integrators for sampled data - simps: Integrators for sampled data - ode: ODE integrators - odeint: ODE integrators - scipy.special: For coefficients and roots of orthogonal polynomials - - """ - return dblquad(_infunc2,a,b,gfun,hfun,(func,qfun,rfun,args),epsabs=epsabs,epsrel=epsrel) diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqag.f b/scipy-0.10.1/scipy/integrate/quadpack/dqag.f deleted file mode 100644 index fb97cc7238..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqag.f +++ /dev/null @@ -1,182 +0,0 @@ - subroutine dqag(f,a,b,epsabs,epsrel,key,result,abserr,neval,ier, - * limit,lenw,last,iwork,work) -c***begin prologue dqag -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a1 -c***keywords automatic integrator, general-purpose, -c integrand examinator, globally adaptive, -c gauss-kronrod -c***author piessens,robert,appl. math. & progr. div - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i = integral of f over (a,b), -c hopefully satisfying following claim for accuracy -c abs(i-result)le.max(epsabs,epsrel*abs(i)). -c***description -c -c computation of a definite integral -c standard fortran subroutine -c double precision version -c -c f - double precision -c function subprogam defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c epsabs - double precision -c absolute accoracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c key - integer -c key for choice of local integration rule -c a gauss-kronrod pair is used with -c 7 - 15 points if key.lt.2, -c 10 - 21 points if key = 2, -c 15 - 31 points if key = 3, -c 20 - 41 points if key = 4, -c 25 - 51 points if key = 5, -c 30 - 61 points if key.gt.5. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine -c the estimates for result and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value of -c limit (and taking the according dimension -c adjustments into account). however, if -c this yield no improvement it is advised -c to analyze the integrand in order to -c determine the integration difficulaties. -c if the position of a local difficulty can -c be determined (i.e.singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling the -c integrator on the subranges. if possible, -c an appropriate special-purpose integrator -c should be used which is designed for -c handling the type of difficulty involved. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 6 the input is invalid, because -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c or limit.lt.1 or lenw.lt.limit*4. -c result, abserr, neval, last are set -c to zero. -c except when lenw is invalid, iwork(1), -c work(limit*2+1) and work(limit*3+1) are -c set to zero, work(1) is set to a and -c work(limit+1) to b. -c -c dimensioning parameters -c limit - integer -c dimensioning parameter for iwork -c limit determines the maximum number of subintervals -c in the partition of the given integration interval -c (a,b), limit.ge.1. -c if limit.lt.1, the routine will end with ier = 6. -c -c lenw - integer -c dimensioning parameter for work -c lenw must be at least limit*4. -c if lenw.lt.limit*4, the routine will end with -c ier = 6. -c -c last - integer -c on return, last equals the number of subintervals -c produced in the subdiviosion process, which -c determines the number of significant elements -c actually in the work arrays. -c -c work arrays -c iwork - integer -c vector of dimension at least limit, the first k -c elements of which contain pointers to the error -c estimates over the subintervals, such that -c work(limit*3+iwork(1)),... , work(limit*3+iwork(k)) -c form a decreasing sequence with k = last if -c last.le.(limit/2+2), and k = limit+1-last otherwise -c -c work - double precision -c vector of dimension at least lenw -c on return -c work(1), ..., work(last) contain the left end -c points of the subintervals in the partition of -c (a,b), -c work(limit+1), ..., work(limit+last) contain the -c right end points, -c work(limit*2+1), ..., work(limit*2+last) contain -c the integral approximations over the subintervals, -c work(limit*3+1), ..., work(limit*3+last) contain -c the error estimates. -c -c***references (none) -c***routines called dqage,xerror -c***end prologue dqag - double precision a,abserr,b,epsabs,epsrel,f,result,work - integer ier,iwork,key,last,lenw,limit,lvl,l1,l2,l3,neval -c - dimension iwork(limit),work(lenw) -c - external f -c -c check validity of lenw. -c -c***first executable statement dqag - ier = 6 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(limit.lt.1.or.lenw.lt.limit*4) go to 10 -c -c prepare call for dqage. -c - l1 = limit+1 - l2 = limit+l1 - l3 = limit+l2 -c - call dqage(f,a,b,epsabs,epsrel,key,limit,result,abserr,neval, - * ier,work(1),work(l1),work(l2),work(l3),iwork,last) -c -c call error handler if necessary. -c - lvl = 0 -10 if(ier.eq.6) lvl = 1 - if(ier.ne.0) call xerror('abnormal return from dqag' ,26,ier,lvl) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqage.f b/scipy-0.10.1/scipy/integrate/quadpack/dqage.f deleted file mode 100644 index 2fb5c0d05e..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqage.f +++ /dev/null @@ -1,340 +0,0 @@ - subroutine dqage(f,a,b,epsabs,epsrel,key,limit,result,abserr, - * neval,ier,alist,blist,rlist,elist,iord,last) -c***begin prologue dqage -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a1 -c***keywords automatic integrator, general-purpose, -c integrand examinator, globally adaptive, -c gauss-kronrod -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i = integral of f over (a,b), -c hopefully satisfying following claim for accuracy -c abs(i-reslt).le.max(epsabs,epsrel*abs(i)). -c***description -c -c computation of a definite integral -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c key - integer -c key for choice of local integration rule -c a gauss-kronrod pair is used with -c 7 - 15 points if key.lt.2, -c 10 - 21 points if key = 2, -c 15 - 31 points if key = 3, -c 20 - 41 points if key = 4, -c 25 - 51 points if key = 5, -c 30 - 61 points if key.gt.5. -c -c limit - integer -c gives an upperbound on the number of subintervals -c in the partition of (a,b), limit.ge.1. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine -c the estimates for result and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value -c of limit. -c however, if this yields no improvement it -c is rather advised to analyze the integrand -c in order to determine the integration -c difficulties. if the position of a local -c difficulty can be determined(e.g. -c singularity, discontinuity within the -c interval) one will probably gain from -c splitting up the interval at this point -c and calling the integrator on the -c subranges. if possible, an appropriate -c special-purpose integrator should be used -c which is designed for handling the type of -c difficulty involved. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 6 the input is invalid, because -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c result, abserr, neval, last, rlist(1) , -c elist(1) and iord(1) are set to zero. -c alist(1) and blist(1) are set to a and b -c respectively. -c -c alist - double precision -c vector of dimension at least limit, the first -c last elements of which are the left -c end points of the subintervals in the partition -c of the given integration range (a,b) -c -c blist - double precision -c vector of dimension at least limit, the first -c last elements of which are the right -c end points of the subintervals in the partition -c of the given integration range (a,b) -c -c rlist - double precision -c vector of dimension at least limit, the first -c last elements of which are the -c integral approximations on the subintervals -c -c elist - double precision -c vector of dimension at least limit, the first -c last elements of which are the moduli of the -c absolute error estimates on the subintervals -c -c iord - integer -c vector of dimension at least limit, the first k -c elements of which are pointers to the -c error estimates over the subintervals, -c such that elist(iord(1)), ..., -c elist(iord(k)) form a decreasing sequence, -c with k = last if last.le.(limit/2+2), and -c k = limit+1-last otherwise -c -c last - integer -c number of subintervals actually produced in the -c subdivision process -c -c***references (none) -c***routines called d1mach,dqk15,dqk21,dqk31, -c dqk41,dqk51,dqk61,dqpsrt -c***end prologue dqage -c - double precision a,abserr,alist,area,area1,area12,area2,a1,a2,b, - * blist,b1,b2,dabs,defabs,defab1,defab2,dmax1,d1mach,elist,epmach, - * epsabs,epsrel,errbnd,errmax,error1,error2,erro12,errsum,f, - * resabs,result,rlist,uflow - integer ier,iord,iroff1,iroff2,k,key,keyf,last,limit,maxerr,neval, - * nrmax -c - dimension alist(limit),blist(limit),elist(limit),iord(limit), - * rlist(limit) -c - external f -c -c list of major variables -c ----------------------- -c -c alist - list of left end points of all subintervals -c considered up to now -c blist - list of right end points of all subintervals -c considered up to now -c rlist(i) - approximation to the integral over -c (alist(i),blist(i)) -c elist(i) - error estimate applying to rlist(i) -c maxerr - pointer to the interval with largest -c error estimate -c errmax - elist(maxerr) -c area - sum of the integrals over the subintervals -c errsum - sum of the errors over the subintervals -c errbnd - requested accuracy max(epsabs,epsrel* -c abs(result)) -c *****1 - variable for the left subinterval -c *****2 - variable for the right subinterval -c last - index for subdivision -c -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqage - epmach = d1mach(4) - uflow = d1mach(1) -c -c test on validity of parameters -c ------------------------------ -c - ier = 0 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - alist(1) = a - blist(1) = b - rlist(1) = 0.0d+00 - elist(1) = 0.0d+00 - iord(1) = 0 - if(epsabs.le.0.0d+00.and. - * epsrel.lt.dmax1(0.5d+02*epmach,0.5d-28)) ier = 6 - if(ier.eq.6) go to 999 -c -c first approximation to the integral -c ----------------------------------- -c - keyf = key - if(key.le.0) keyf = 1 - if(key.ge.7) keyf = 6 - neval = 0 - if(keyf.eq.1) call dqk15(f,a,b,result,abserr,defabs,resabs) - if(keyf.eq.2) call dqk21(f,a,b,result,abserr,defabs,resabs) - if(keyf.eq.3) call dqk31(f,a,b,result,abserr,defabs,resabs) - if(keyf.eq.4) call dqk41(f,a,b,result,abserr,defabs,resabs) - if(keyf.eq.5) call dqk51(f,a,b,result,abserr,defabs,resabs) - if(keyf.eq.6) call dqk61(f,a,b,result,abserr,defabs,resabs) - last = 1 - rlist(1) = result - elist(1) = abserr - iord(1) = 1 -c -c test on accuracy. -c - errbnd = dmax1(epsabs,epsrel*dabs(result)) - if(abserr.le.0.5d+02*epmach*defabs.and.abserr.gt.errbnd) ier = 2 - if(limit.eq.1) ier = 1 - if(ier.ne.0.or.(abserr.le.errbnd.and.abserr.ne.resabs) - * .or.abserr.eq.0.0d+00) go to 60 -c -c initialization -c -------------- -c -c - errmax = abserr - maxerr = 1 - area = result - errsum = abserr - nrmax = 1 - iroff1 = 0 - iroff2 = 0 -c -c main do-loop -c ------------ -c - do 30 last = 2,limit -c -c bisect the subinterval with the largest error estimate. -c - a1 = alist(maxerr) - b1 = 0.5d+00*(alist(maxerr)+blist(maxerr)) - a2 = b1 - b2 = blist(maxerr) - if(keyf.eq.1) call dqk15(f,a1,b1,area1,error1,resabs,defab1) - if(keyf.eq.2) call dqk21(f,a1,b1,area1,error1,resabs,defab1) - if(keyf.eq.3) call dqk31(f,a1,b1,area1,error1,resabs,defab1) - if(keyf.eq.4) call dqk41(f,a1,b1,area1,error1,resabs,defab1) - if(keyf.eq.5) call dqk51(f,a1,b1,area1,error1,resabs,defab1) - if(keyf.eq.6) call dqk61(f,a1,b1,area1,error1,resabs,defab1) - if(keyf.eq.1) call dqk15(f,a2,b2,area2,error2,resabs,defab2) - if(keyf.eq.2) call dqk21(f,a2,b2,area2,error2,resabs,defab2) - if(keyf.eq.3) call dqk31(f,a2,b2,area2,error2,resabs,defab2) - if(keyf.eq.4) call dqk41(f,a2,b2,area2,error2,resabs,defab2) - if(keyf.eq.5) call dqk51(f,a2,b2,area2,error2,resabs,defab2) - if(keyf.eq.6) call dqk61(f,a2,b2,area2,error2,resabs,defab2) -c -c improve previous approximations to integral -c and error and test for accuracy. -c - neval = neval+1 - area12 = area1+area2 - erro12 = error1+error2 - errsum = errsum+erro12-errmax - area = area+area12-rlist(maxerr) - if(defab1.eq.error1.or.defab2.eq.error2) go to 5 - if(dabs(rlist(maxerr)-area12).le.0.1d-04*dabs(area12) - * .and.erro12.ge.0.99d+00*errmax) iroff1 = iroff1+1 - if(last.gt.10.and.erro12.gt.errmax) iroff2 = iroff2+1 - 5 rlist(maxerr) = area1 - rlist(last) = area2 - errbnd = dmax1(epsabs,epsrel*dabs(area)) - if(errsum.le.errbnd) go to 8 -c -c test for roundoff error and eventually set error flag. -c - if(iroff1.ge.6.or.iroff2.ge.20) ier = 2 -c -c set error flag in the case that the number of subintervals -c equals limit. -c - if(last.eq.limit) ier = 1 -c -c set error flag in the case of bad integrand behaviour -c at a point of the integration range. -c - if(dmax1(dabs(a1),dabs(b2)).le.(0.1d+01+0.1d+03* - * epmach)*(dabs(a2)+0.1d+04*uflow)) ier = 3 -c -c append the newly-created intervals to the list. -c - 8 if(error2.gt.error1) go to 10 - alist(last) = a2 - blist(maxerr) = b1 - blist(last) = b2 - elist(maxerr) = error1 - elist(last) = error2 - go to 20 - 10 alist(maxerr) = a2 - alist(last) = a1 - blist(last) = b1 - rlist(maxerr) = area2 - rlist(last) = area1 - elist(maxerr) = error2 - elist(last) = error1 -c -c call subroutine dqpsrt to maintain the descending ordering -c in the list of error estimates and select the subinterval -c with the largest error estimate (to be bisected next). -c - 20 call dqpsrt(limit,last,maxerr,errmax,elist,iord,nrmax) -c ***jump out of do-loop - if(ier.ne.0.or.errsum.le.errbnd) go to 40 - 30 continue -c -c compute final result. -c --------------------- -c - 40 result = 0.0d+00 - do 50 k=1,last - result = result+rlist(k) - 50 continue - abserr = errsum - 60 if(keyf.ne.1) neval = (10*keyf+1)*(2*neval+1) - if(keyf.eq.1) neval = 30*neval+15 - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqagi.f b/scipy-0.10.1/scipy/integrate/quadpack/dqagi.f deleted file mode 100644 index 58266c6c18..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqagi.f +++ /dev/null @@ -1,191 +0,0 @@ - subroutine dqagi(f,bound,inf,epsabs,epsrel,result,abserr,neval, - * ier,limit,lenw,last,iwork,work) -c***begin prologue dqagi -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a3a1,h2a4a1 -c***keywords automatic integrator, infinite intervals, -c general-purpose, transformation, extrapolation, -c globally adaptive -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. -k.u.leuven -c***purpose the routine calculates an approximation result to a given -c integral i = integral of f over (bound,+infinity) -c or i = integral of f over (-infinity,bound) -c or i = integral of f over (-infinity,+infinity) -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)). -c***description -c -c integration over infinite intervals -c standard fortran subroutine -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c bound - double precision -c finite bound of integration range -c (has no meaning if interval is doubly-infinite) -c -c inf - integer -c indicating the kind of integration range involved -c inf = 1 corresponds to (bound,+infinity), -c inf = -1 to (-infinity,bound), -c inf = 2 to (-infinity,+infinity). -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c - ier.gt.0 abnormal termination of the routine. the -c estimates for result and error are less -c reliable. it is assumed that the requested -c accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value of -c limit (and taking the according dimension -c adjustments into account). however, if -c this yields no improvement it is advised -c to analyze the integrand in order to -c determine the integration difficulties. if -c the position of a local difficulty can be -c determined (e.g. singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling the -c integrator on the subranges. if possible, -c an appropriate special-purpose integrator -c should be used, which is designed for -c handling the type of difficulty involved. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c the error may be under-estimated. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 4 the algorithm does not converge. -c roundoff error is detected in the -c extrapolation table. -c it is assumed that the requested tolerance -c cannot be achieved, and that the returned -c result is the best which can be obtained. -c = 5 the integral is probably divergent, or -c slowly convergent. it must be noted that -c divergence can occur with any other value -c of ier. -c = 6 the input is invalid, because -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c or limit.lt.1 or leniw.lt.limit*4. -c result, abserr, neval, last are set to -c zero. exept when limit or leniw is -c invalid, iwork(1), work(limit*2+1) and -c work(limit*3+1) are set to zero, work(1) -c is set to a and work(limit+1) to b. -c -c dimensioning parameters -c limit - integer -c dimensioning parameter for iwork -c limit determines the maximum number of subintervals -c in the partition of the given integration interval -c (a,b), limit.ge.1. -c if limit.lt.1, the routine will end with ier = 6. -c -c lenw - integer -c dimensioning parameter for work -c lenw must be at least limit*4. -c if lenw.lt.limit*4, the routine will end -c with ier = 6. -c -c last - integer -c on return, last equals the number of subintervals -c produced in the subdivision process, which -c determines the number of significant elements -c actually in the work arrays. -c -c work arrays -c iwork - integer -c vector of dimension at least limit, the first -c k elements of which contain pointers -c to the error estimates over the subintervals, -c such that work(limit*3+iwork(1)),... , -c work(limit*3+iwork(k)) form a decreasing -c sequence, with k = last if last.le.(limit/2+2), and -c k = limit+1-last otherwise -c -c work - double precision -c vector of dimension at least lenw -c on return -c work(1), ..., work(last) contain the left -c end points of the subintervals in the -c partition of (a,b), -c work(limit+1), ..., work(limit+last) contain -c the right end points, -c work(limit*2+1), ...,work(limit*2+last) contain the -c integral approximations over the subintervals, -c work(limit*3+1), ..., work(limit*3+last) -c contain the error estimates. -c***references (none) -c***routines called dqagie,xerror -c***end prologue dqagi -c - double precision abserr,bound,epsabs,epsrel,f,result,work - integer ier,inf,iwork,last,lenw,limit,lvl,l1,l2,l3,neval -c - dimension iwork(limit),work(lenw) -c - external f -c -c check validity of limit and lenw. -c -c***first executable statement dqagi - ier = 6 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(limit.lt.1.or.lenw.lt.limit*4) go to 10 -c -c prepare call for dqagie. -c - l1 = limit+1 - l2 = limit+l1 - l3 = limit+l2 -c - call dqagie(f,bound,inf,epsabs,epsrel,limit,result,abserr, - * neval,ier,work(1),work(l1),work(l2),work(l3),iwork,last) -c -c call error handler if necessary. -c - lvl = 0 -10 if(ier.eq.6) lvl = 1 - if(ier.ne.0) call xerror('abnormal return from dqagi',26,ier,lvl) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqagie.f b/scipy-0.10.1/scipy/integrate/quadpack/dqagie.f deleted file mode 100644 index e61a4cd5a1..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqagie.f +++ /dev/null @@ -1,452 +0,0 @@ - subroutine dqagie(f,bound,inf,epsabs,epsrel,limit,result,abserr, - * neval,ier,alist,blist,rlist,elist,iord,last) -c***begin prologue dqagie -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a3a1,h2a4a1 -c***keywords automatic integrator, infinite intervals, -c general-purpose, transformation, extrapolation, -c globally adaptive -c***author piessens,robert,appl. math & progr. div - k.u.leuven -c de doncker,elise,appl. math & progr. div - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c integral i = integral of f over (bound,+infinity) -c or i = integral of f over (-infinity,bound) -c or i = integral of f over (-infinity,+infinity), -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)) -c***description -c -c integration over infinite intervals -c standard fortran subroutine -c -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c bound - double precision -c finite bound of integration range -c (has no meaning if interval is doubly-infinite) -c -c inf - double precision -c indicating the kind of integration range involved -c inf = 1 corresponds to (bound,+infinity), -c inf = -1 to (-infinity,bound), -c inf = 2 to (-infinity,+infinity). -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c limit - integer -c gives an upper bound on the number of subintervals -c in the partition of (a,b), limit.ge.1 -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c - ier.gt.0 abnormal termination of the routine. the -c estimates for result and error are less -c reliable. it is assumed that the requested -c accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value of -c limit (and taking the according dimension -c adjustments into account). however,if -c this yields no improvement it is advised -c to analyze the integrand in order to -c determine the integration difficulties. -c if the position of a local difficulty can -c be determined (e.g. singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling the -c integrator on the subranges. if possible, -c an appropriate special-purpose integrator -c should be used, which is designed for -c handling the type of difficulty involved. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c the error may be under-estimated. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 4 the algorithm does not converge. -c roundoff error is detected in the -c extrapolation table. -c it is assumed that the requested tolerance -c cannot be achieved, and that the returned -c result is the best which can be obtained. -c = 5 the integral is probably divergent, or -c slowly convergent. it must be noted that -c divergence can occur with any other value -c of ier. -c = 6 the input is invalid, because -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c result, abserr, neval, last, rlist(1), -c elist(1) and iord(1) are set to zero. -c alist(1) and blist(1) are set to 0 -c and 1 respectively. -c -c alist - double precision -c vector of dimension at least limit, the first -c last elements of which are the left -c end points of the subintervals in the partition -c of the transformed integration range (0,1). -c -c blist - double precision -c vector of dimension at least limit, the first -c last elements of which are the right -c end points of the subintervals in the partition -c of the transformed integration range (0,1). -c -c rlist - double precision -c vector of dimension at least limit, the first -c last elements of which are the integral -c approximations on the subintervals -c -c elist - double precision -c vector of dimension at least limit, the first -c last elements of which are the moduli of the -c absolute error estimates on the subintervals -c -c iord - integer -c vector of dimension limit, the first k -c elements of which are pointers to the -c error estimates over the subintervals, -c such that elist(iord(1)), ..., elist(iord(k)) -c form a decreasing sequence, with k = last -c if last.le.(limit/2+2), and k = limit+1-last -c otherwise -c -c last - integer -c number of subintervals actually produced -c in the subdivision process -c -c***references (none) -c***routines called d1mach,dqelg,dqk15i,dqpsrt -c***end prologue dqagie - double precision abseps,abserr,alist,area,area1,area12,area2,a1, - * a2,blist,boun,bound,b1,b2,correc,dabs,defabs,defab1,defab2, - * dmax1,dres,d1mach,elist,epmach,epsabs,epsrel,erlarg,erlast, - * errbnd,errmax,error1,error2,erro12,errsum,ertest,f,oflow,resabs, - * reseps,result,res3la,rlist,rlist2,small,uflow - integer id,ier,ierro,inf,iord,iroff1,iroff2,iroff3,jupbnd,k,ksgn, - * ktmin,last,limit,maxerr,neval,nres,nrmax,numrl2 - logical extrap,noext -c - dimension alist(limit),blist(limit),elist(limit),iord(limit), - * res3la(3),rlist(limit),rlist2(52) -c - external f -c -c the dimension of rlist2 is determined by the value of -c limexp in subroutine dqelg. -c -c -c list of major variables -c ----------------------- -c -c alist - list of left end points of all subintervals -c considered up to now -c blist - list of right end points of all subintervals -c considered up to now -c rlist(i) - approximation to the integral over -c (alist(i),blist(i)) -c rlist2 - array of dimension at least (limexp+2), -c containing the part of the epsilon table -c wich is still needed for further computations -c elist(i) - error estimate applying to rlist(i) -c maxerr - pointer to the interval with largest error -c estimate -c errmax - elist(maxerr) -c erlast - error on the interval currently subdivided -c (before that subdivision has taken place) -c area - sum of the integrals over the subintervals -c errsum - sum of the errors over the subintervals -c errbnd - requested accuracy max(epsabs,epsrel* -c abs(result)) -c *****1 - variable for the left subinterval -c *****2 - variable for the right subinterval -c last - index for subdivision -c nres - number of calls to the extrapolation routine -c numrl2 - number of elements currently in rlist2. if an -c appropriate approximation to the compounded -c integral has been obtained, it is put in -c rlist2(numrl2) after numrl2 has been increased -c by one. -c small - length of the smallest interval considered up -c to now, multiplied by 1.5 -c erlarg - sum of the errors over the intervals larger -c than the smallest interval considered up to now -c extrap - logical variable denoting that the routine -c is attempting to perform extrapolation. i.e. -c before subdividing the smallest interval we -c try to decrease the value of erlarg. -c noext - logical variable denoting that extrapolation -c is no longer allowed (true-value) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c oflow is the largest positive magnitude. -c -c***first executable statement dqagie - epmach = d1mach(4) -c -c test on validity of parameters -c ----------------------------- -c - ier = 0 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - alist(1) = 0.0d+00 - blist(1) = 0.1d+01 - rlist(1) = 0.0d+00 - elist(1) = 0.0d+00 - iord(1) = 0 - if(epsabs.le.0.0d+00.and.epsrel.lt.dmax1(0.5d+02*epmach,0.5d-28)) - * ier = 6 - if(ier.eq.6) go to 999 -c -c -c first approximation to the integral -c ----------------------------------- -c -c determine the interval to be mapped onto (0,1). -c if inf = 2 the integral is computed as i = i1+i2, where -c i1 = integral of f over (-infinity,0), -c i2 = integral of f over (0,+infinity). -c - boun = bound - if(inf.eq.2) boun = 0.0d+00 - call dqk15i(f,boun,inf,0.0d+00,0.1d+01,result,abserr, - * defabs,resabs) -c -c test on accuracy -c - last = 1 - rlist(1) = result - elist(1) = abserr - iord(1) = 1 - dres = dabs(result) - errbnd = dmax1(epsabs,epsrel*dres) - if(abserr.le.1.0d+02*epmach*defabs.and.abserr.gt.errbnd) ier = 2 - if(limit.eq.1) ier = 1 - if(ier.ne.0.or.(abserr.le.errbnd.and.abserr.ne.resabs).or. - * abserr.eq.0.0d+00) go to 130 -c -c initialization -c -------------- -c - uflow = d1mach(1) - oflow = d1mach(2) - rlist2(1) = result - errmax = abserr - maxerr = 1 - area = result - errsum = abserr - abserr = oflow - nrmax = 1 - nres = 0 - ktmin = 0 - numrl2 = 2 - extrap = .false. - noext = .false. - ierro = 0 - iroff1 = 0 - iroff2 = 0 - iroff3 = 0 - ksgn = -1 - if(dres.ge.(0.1d+01-0.5d+02*epmach)*defabs) ksgn = 1 -c -c main do-loop -c ------------ -c - do 90 last = 2,limit -c -c bisect the subinterval with nrmax-th largest error estimate. -c - a1 = alist(maxerr) - b1 = 0.5d+00*(alist(maxerr)+blist(maxerr)) - a2 = b1 - b2 = blist(maxerr) - erlast = errmax - call dqk15i(f,boun,inf,a1,b1,area1,error1,resabs,defab1) - call dqk15i(f,boun,inf,a2,b2,area2,error2,resabs,defab2) -c -c improve previous approximations to integral -c and error and test for accuracy. -c - area12 = area1+area2 - erro12 = error1+error2 - errsum = errsum+erro12-errmax - area = area+area12-rlist(maxerr) - if(defab1.eq.error1.or.defab2.eq.error2)go to 15 - if(dabs(rlist(maxerr)-area12).gt.0.1d-04*dabs(area12) - * .or.erro12.lt.0.99d+00*errmax) go to 10 - if(extrap) iroff2 = iroff2+1 - if(.not.extrap) iroff1 = iroff1+1 - 10 if(last.gt.10.and.erro12.gt.errmax) iroff3 = iroff3+1 - 15 rlist(maxerr) = area1 - rlist(last) = area2 - errbnd = dmax1(epsabs,epsrel*dabs(area)) -c -c test for roundoff error and eventually set error flag. -c - if(iroff1+iroff2.ge.10.or.iroff3.ge.20) ier = 2 - if(iroff2.ge.5) ierro = 3 -c -c set error flag in the case that the number of -c subintervals equals limit. -c - if(last.eq.limit) ier = 1 -c -c set error flag in the case of bad integrand behaviour -c at some points of the integration range. -c - if(dmax1(dabs(a1),dabs(b2)).le.(0.1d+01+0.1d+03*epmach)* - * (dabs(a2)+0.1d+04*uflow)) ier = 4 -c -c append the newly-created intervals to the list. -c - if(error2.gt.error1) go to 20 - alist(last) = a2 - blist(maxerr) = b1 - blist(last) = b2 - elist(maxerr) = error1 - elist(last) = error2 - go to 30 - 20 alist(maxerr) = a2 - alist(last) = a1 - blist(last) = b1 - rlist(maxerr) = area2 - rlist(last) = area1 - elist(maxerr) = error2 - elist(last) = error1 -c -c call subroutine dqpsrt to maintain the descending ordering -c in the list of error estimates and select the subinterval -c with nrmax-th largest error estimate (to be bisected next). -c - 30 call dqpsrt(limit,last,maxerr,errmax,elist,iord,nrmax) - if(errsum.le.errbnd) go to 115 - if(ier.ne.0) go to 100 - if(last.eq.2) go to 80 - if(noext) go to 90 - erlarg = erlarg-erlast - if(dabs(b1-a1).gt.small) erlarg = erlarg+erro12 - if(extrap) go to 40 -c -c test whether the interval to be bisected next is the -c smallest interval. -c - if(dabs(blist(maxerr)-alist(maxerr)).gt.small) go to 90 - extrap = .true. - nrmax = 2 - 40 if(ierro.eq.3.or.erlarg.le.ertest) go to 60 -c -c the smallest interval has the largest error. -c before bisecting decrease the sum of the errors over the -c larger intervals (erlarg) and perform extrapolation. -c - id = nrmax - jupbnd = last - if(last.gt.(2+limit/2)) jupbnd = limit+3-last - do 50 k = id,jupbnd - maxerr = iord(nrmax) - errmax = elist(maxerr) - if(dabs(blist(maxerr)-alist(maxerr)).gt.small) go to 90 - nrmax = nrmax+1 - 50 continue -c -c perform extrapolation. -c - 60 numrl2 = numrl2+1 - rlist2(numrl2) = area - call dqelg(numrl2,rlist2,reseps,abseps,res3la,nres) - ktmin = ktmin+1 - if(ktmin.gt.5.and.abserr.lt.0.1d-02*errsum) ier = 5 - if(abseps.ge.abserr) go to 70 - ktmin = 0 - abserr = abseps - result = reseps - correc = erlarg - ertest = dmax1(epsabs,epsrel*dabs(reseps)) - if(abserr.le.ertest) go to 100 -c -c prepare bisection of the smallest interval. -c - 70 if(numrl2.eq.1) noext = .true. - if(ier.eq.5) go to 100 - maxerr = iord(1) - errmax = elist(maxerr) - nrmax = 1 - extrap = .false. - small = small*0.5d+00 - erlarg = errsum - go to 90 - 80 small = 0.375d+00 - erlarg = errsum - ertest = errbnd - rlist2(2) = area - 90 continue -c -c set final result and error estimate. -c ------------------------------------ -c - 100 if(abserr.eq.oflow) go to 115 - if((ier+ierro).eq.0) go to 110 - if(ierro.eq.3) abserr = abserr+correc - if(ier.eq.0) ier = 3 - if(result.ne.0.0d+00.and.area.ne.0.0d+00)go to 105 - if(abserr.gt.errsum)go to 115 - if(area.eq.0.0d+00) go to 130 - go to 110 - 105 if(abserr/dabs(result).gt.errsum/dabs(area))go to 115 -c -c test on divergence -c - 110 if(ksgn.eq.(-1).and.dmax1(dabs(result),dabs(area)).le. - * defabs*0.1d-01) go to 130 - if(0.1d-01.gt.(result/area).or.(result/area).gt.0.1d+03. - *or.errsum.gt.dabs(area)) ier = 6 - go to 130 -c -c compute global integral sum. -c - 115 result = 0.0d+00 - do 120 k = 1,last - result = result+rlist(k) - 120 continue - abserr = errsum - 130 neval = 30*last-15 - if(inf.eq.2) neval = 2*neval - if(ier.gt.2) ier=ier-1 - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqagp.f b/scipy-0.10.1/scipy/integrate/quadpack/dqagp.f deleted file mode 100644 index 9918ad036d..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqagp.f +++ /dev/null @@ -1,225 +0,0 @@ - subroutine dqagp(f,a,b,npts2,points,epsabs,epsrel,result,abserr, - * neval,ier,leniw,lenw,last,iwork,work) -c***begin prologue dqagp -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1 -c***keywords automatic integrator, general-purpose, -c singularities at user specified points, -c extrapolation, globally adaptive -c***author piessens,robert,appl. math. & progr. div - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i = integral of f over (a,b), -c hopefully satisfying following claim for accuracy -c break points of the integration interval, where local -c difficulties of the integrand may occur (e.g. -c singularities, discontinuities), are provided by the user. -c***description -c -c computation of a definite integral -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c npts2 - integer -c number equal to two more than the number of -c user-supplied break points within the integration -c range, npts.ge.2. -c if npts2.lt.2, the routine will end with ier = 6. -c -c points - double precision -c vector of dimension npts2, the first (npts2-2) -c elements of which are the user provided break -c points. if these points do not constitute an -c ascending sequence there will be an automatic -c sorting. -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine. -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value of -c limit (and taking the according dimension -c adjustments into account). however, if -c this yields no improvement it is advised -c to analyze the integrand in order to -c determine the integration difficulties. if -c the position of a local difficulty can be -c determined (i.e. singularity, -c discontinuity within the interval), it -c should be supplied to the routine as an -c element of the vector points. if necessary -c an appropriate special-purpose integrator -c must be used, which is designed for -c handling the type of difficulty involved. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c the error may be under-estimated. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 4 the algorithm does not converge. -c roundoff error is detected in the -c extrapolation table. -c it is presumed that the requested -c tolerance cannot be achieved, and that -c the returned result is the best which -c can be obtained. -c = 5 the integral is probably divergent, or -c slowly convergent. it must be noted that -c divergence can occur with any other value -c of ier.gt.0. -c = 6 the input is invalid because -c npts2.lt.2 or -c break points are specified outside -c the integration range or -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c result, abserr, neval, last are set to -c zero. exept when leniw or lenw or npts2 is -c invalid, iwork(1), iwork(limit+1), -c work(limit*2+1) and work(limit*3+1) -c are set to zero. -c work(1) is set to a and work(limit+1) -c to b (where limit = (leniw-npts2)/2). -c -c dimensioning parameters -c leniw - integer -c dimensioning parameter for iwork -c leniw determines limit = (leniw-npts2)/2, -c which is the maximum number of subintervals in the -c partition of the given integration interval (a,b), -c leniw.ge.(3*npts2-2). -c if leniw.lt.(3*npts2-2), the routine will end with -c ier = 6. -c -c lenw - integer -c dimensioning parameter for work -c lenw must be at least leniw*2-npts2. -c if lenw.lt.leniw*2-npts2, the routine will end -c with ier = 6. -c -c last - integer -c on return, last equals the number of subintervals -c produced in the subdivision process, which -c determines the number of significant elements -c actually in the work arrays. -c -c work arrays -c iwork - integer -c vector of dimension at least leniw. on return, -c the first k elements of which contain -c pointers to the error estimates over the -c subintervals, such that work(limit*3+iwork(1)),..., -c work(limit*3+iwork(k)) form a decreasing -c sequence, with k = last if last.le.(limit/2+2), and -c k = limit+1-last otherwise -c iwork(limit+1), ...,iwork(limit+last) contain the -c subdivision levels of the subintervals, i.e. -c if (aa,bb) is a subinterval of (p1,p2) -c where p1 as well as p2 is a user-provided -c break point or integration limit, then (aa,bb) has -c level l if abs(bb-aa) = abs(p2-p1)*2**(-l), -c iwork(limit*2+1), ..., iwork(limit*2+npts2) have -c no significance for the user, -c note that limit = (leniw-npts2)/2. -c -c work - double precision -c vector of dimension at least lenw -c on return -c work(1), ..., work(last) contain the left -c end points of the subintervals in the -c partition of (a,b), -c work(limit+1), ..., work(limit+last) contain -c the right end points, -c work(limit*2+1), ..., work(limit*2+last) contain -c the integral approximations over the subintervals, -c work(limit*3+1), ..., work(limit*3+last) -c contain the corresponding error estimates, -c work(limit*4+1), ..., work(limit*4+npts2) -c contain the integration limits and the -c break points sorted in an ascending sequence. -c note that limit = (leniw-npts2)/2. -c -c***references (none) -c***routines called dqagpe,xerror -c***end prologue dqagp -c - double precision a,abserr,b,epsabs,epsrel,f,points,result,work - integer ier,iwork,last,leniw,lenw,limit,lvl,l1,l2,l3,l4,neval, - * npts2 -c - dimension iwork(leniw),points(npts2),work(lenw) -c - external f -c -c check validity of limit and lenw. -c -c***first executable statement dqagp - ier = 6 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(leniw.lt.(3*npts2-2).or.lenw.lt.(leniw*2-npts2).or.npts2.lt.2) - * go to 10 -c -c prepare call for dqagpe. -c - limit = (leniw-npts2)/2 - l1 = limit+1 - l2 = limit+l1 - l3 = limit+l2 - l4 = limit+l3 -c - call dqagpe(f,a,b,npts2,points,epsabs,epsrel,limit,result,abserr, - * neval,ier,work(1),work(l1),work(l2),work(l3),work(l4), - * iwork(1),iwork(l1),iwork(l2),last) -c -c call error handler if necessary. -c - lvl = 0 -10 if(ier.eq.6) lvl = 1 - if(ier.ne.0) call xerror('abnormal return from dqagp',26,ier,lvl) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqagpe.f b/scipy-0.10.1/scipy/integrate/quadpack/dqagpe.f deleted file mode 100644 index eb5b071bcc..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqagpe.f +++ /dev/null @@ -1,550 +0,0 @@ - subroutine dqagpe(f,a,b,npts2,points,epsabs,epsrel,limit,result, - * abserr,neval,ier,alist,blist,rlist,elist,pts,iord,level,ndin, - * last) -c***begin prologue dqagpe -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1 -c***keywords automatic integrator, general-purpose, -c singularities at user specified points, -c extrapolation, globally adaptive. -c***author piessens,robert ,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i = integral of f over (a,b), hopefully -c satisfying following claim for accuracy abs(i-result).le. -c max(epsabs,epsrel*abs(i)). break points of the integration -c interval, where local difficulties of the integrand may -c occur(e.g. singularities,discontinuities),provided by user. -c***description -c -c computation of a definite integral -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c npts2 - integer -c number equal to two more than the number of -c user-supplied break points within the integration -c range, npts2.ge.2. -c if npts2.lt.2, the routine will end with ier = 6. -c -c points - double precision -c vector of dimension npts2, the first (npts2-2) -c elements of which are the user provided break -c points. if these points do not constitute an -c ascending sequence there will be an automatic -c sorting. -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c limit - integer -c gives an upper bound on the number of subintervals -c in the partition of (a,b), limit.ge.npts2 -c if limit.lt.npts2, the routine will end with -c ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine. -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value of -c limit (and taking the according dimension -c adjustments into account). however, if -c this yields no improvement it is advised -c to analyze the integrand in order to -c determine the integration difficulties. if -c the position of a local difficulty can be -c determined (i.e. singularity, -c discontinuity within the interval), it -c should be supplied to the routine as an -c element of the vector points. if necessary -c an appropriate special-purpose integrator -c must be used, which is designed for -c handling the type of difficulty involved. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c the error may be under-estimated. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 4 the algorithm does not converge. -c roundoff error is detected in the -c extrapolation table. it is presumed that -c the requested tolerance cannot be -c achieved, and that the returned result is -c the best which can be obtained. -c = 5 the integral is probably divergent, or -c slowly convergent. it must be noted that -c divergence can occur with any other value -c of ier.gt.0. -c = 6 the input is invalid because -c npts2.lt.2 or -c break points are specified outside -c the integration range or -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c or limit.lt.npts2. -c result, abserr, neval, last, rlist(1), -c and elist(1) are set to zero. alist(1) and -c blist(1) are set to a and b respectively. -c -c alist - double precision -c vector of dimension at least limit, the first -c last elements of which are the left end points -c of the subintervals in the partition of the given -c integration range (a,b) -c -c blist - double precision -c vector of dimension at least limit, the first -c last elements of which are the right end points -c of the subintervals in the partition of the given -c integration range (a,b) -c -c rlist - double precision -c vector of dimension at least limit, the first -c last elements of which are the integral -c approximations on the subintervals -c -c elist - double precision -c vector of dimension at least limit, the first -c last elements of which are the moduli of the -c absolute error estimates on the subintervals -c -c pts - double precision -c vector of dimension at least npts2, containing the -c integration limits and the break points of the -c interval in ascending sequence. -c -c level - integer -c vector of dimension at least limit, containing the -c subdivision levels of the subinterval, i.e. if -c (aa,bb) is a subinterval of (p1,p2) where p1 as -c well as p2 is a user-provided break point or -c integration limit, then (aa,bb) has level l if -c abs(bb-aa) = abs(p2-p1)*2**(-l). -c -c ndin - integer -c vector of dimension at least npts2, after first -c integration over the intervals (pts(i)),pts(i+1), -c i = 0,1, ..., npts2-2, the error estimates over -c some of the intervals may have been increased -c artificially, in order to put their subdivision -c forward. if this happens for the subinterval -c numbered k, ndin(k) is put to 1, otherwise -c ndin(k) = 0. -c -c iord - integer -c vector of dimension at least limit, the first k -c elements of which are pointers to the -c error estimates over the subintervals, -c such that elist(iord(1)), ..., elist(iord(k)) -c form a decreasing sequence, with k = last -c if last.le.(limit/2+2), and k = limit+1-last -c otherwise -c -c last - integer -c number of subintervals actually produced in the -c subdivisions process -c -c***references (none) -c***routines called d1mach,dqelg,dqk21,dqpsrt -c***end prologue dqagpe - double precision a,abseps,abserr,alist,area,area1,area12,area2,a1, - * a2,b,blist,b1,b2,correc,dabs,defabs,defab1,defab2,dmax1,dmin1, - * dres,d1mach,elist,epmach,epsabs,epsrel,erlarg,erlast,errbnd, - * errmax,error1,erro12,error2,errsum,ertest,f,oflow,points,pts, - * resa,resabs,reseps,result,res3la,rlist,rlist2,sign,temp,uflow - integer i,id,ier,ierro,ind1,ind2,iord,ip1,iroff1,iroff2,iroff3,j, - * jlow,jupbnd,k,ksgn,ktmin,last,levcur,level,levmax,limit,maxerr, - * ndin,neval,nint,nintp1,npts,npts2,nres,nrmax,numrl2 - logical extrap,noext -c -c - dimension alist(limit),blist(limit),elist(limit),iord(limit), - * level(limit),ndin(npts2),points(npts2),pts(npts2),res3la(3), - * rlist(limit),rlist2(52) -c - external f -c -c the dimension of rlist2 is determined by the value of -c limexp in subroutine epsalg (rlist2 should be of dimension -c (limexp+2) at least). -c -c -c list of major variables -c ----------------------- -c -c alist - list of left end points of all subintervals -c considered up to now -c blist - list of right end points of all subintervals -c considered up to now -c rlist(i) - approximation to the integral over -c (alist(i),blist(i)) -c rlist2 - array of dimension at least limexp+2 -c containing the part of the epsilon table which -c is still needed for further computations -c elist(i) - error estimate applying to rlist(i) -c maxerr - pointer to the interval with largest error -c estimate -c errmax - elist(maxerr) -c erlast - error on the interval currently subdivided -c (before that subdivision has taken place) -c area - sum of the integrals over the subintervals -c errsum - sum of the errors over the subintervals -c errbnd - requested accuracy max(epsabs,epsrel* -c abs(result)) -c *****1 - variable for the left subinterval -c *****2 - variable for the right subinterval -c last - index for subdivision -c nres - number of calls to the extrapolation routine -c numrl2 - number of elements in rlist2. if an appropriate -c approximation to the compounded integral has -c been obtained, it is put in rlist2(numrl2) after -c numrl2 has been increased by one. -c erlarg - sum of the errors over the intervals larger -c than the smallest interval considered up to now -c extrap - logical variable denoting that the routine -c is attempting to perform extrapolation. i.e. -c before subdividing the smallest interval we -c try to decrease the value of erlarg. -c noext - logical variable denoting that extrapolation is -c no longer allowed (true-value) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c oflow is the largest positive magnitude. -c -c***first executable statement dqagpe - epmach = d1mach(4) -c -c test on validity of parameters -c ----------------------------- -c - ier = 0 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - alist(1) = a - blist(1) = b - rlist(1) = 0.0d+00 - elist(1) = 0.0d+00 - iord(1) = 0 - level(1) = 0 - npts = npts2-2 - if(npts2.lt.2.or.limit.le.npts.or.(epsabs.le.0.0d+00.and. - * epsrel.lt.dmax1(0.5d+02*epmach,0.5d-28))) ier = 6 - if(ier.eq.6) go to 999 -c -c if any break points are provided, sort them into an -c ascending sequence. -c - sign = 1.0d+00 - if(a.gt.b) sign = -1.0d+00 - pts(1) = dmin1(a,b) - if(npts.eq.0) go to 15 - do 10 i = 1,npts - pts(i+1) = points(i) - 10 continue - 15 pts(npts+2) = dmax1(a,b) - nint = npts+1 - a1 = pts(1) - if(npts.eq.0) go to 40 - nintp1 = nint+1 - do 20 i = 1,nint - ip1 = i+1 - do 20 j = ip1,nintp1 - if(pts(i).le.pts(j)) go to 20 - temp = pts(i) - pts(i) = pts(j) - pts(j) = temp - 20 continue - if(pts(1).ne.dmin1(a,b).or.pts(nintp1).ne.dmax1(a,b)) ier = 6 - if(ier.eq.6) go to 999 -c -c compute first integral and error approximations. -c ------------------------------------------------ -c - 40 resabs = 0.0d+00 - do 50 i = 1,nint - b1 = pts(i+1) - call dqk21(f,a1,b1,area1,error1,defabs,resa) - abserr = abserr+error1 - result = result+area1 - ndin(i) = 0 - if(error1.eq.resa.and.error1.ne.0.0d+00) ndin(i) = 1 - resabs = resabs+defabs - level(i) = 0 - elist(i) = error1 - alist(i) = a1 - blist(i) = b1 - rlist(i) = area1 - iord(i) = i - a1 = b1 - 50 continue - errsum = 0.0d+00 - do 55 i = 1,nint - if(ndin(i).eq.1) elist(i) = abserr - errsum = errsum+elist(i) - 55 continue -c -c test on accuracy. -c - last = nint - neval = 21*nint - dres = dabs(result) - errbnd = dmax1(epsabs,epsrel*dres) - if(abserr.le.0.1d+03*epmach*resabs.and.abserr.gt.errbnd) ier = 2 - if(nint.eq.1) go to 80 - do 70 i = 1,npts - jlow = i+1 - ind1 = iord(i) - do 60 j = jlow,nint - ind2 = iord(j) - if(elist(ind1).gt.elist(ind2)) go to 60 - ind1 = ind2 - k = j - 60 continue - if(ind1.eq.iord(i)) go to 70 - iord(k) = iord(i) - iord(i) = ind1 - 70 continue - if(limit.lt.npts2) ier = 1 - 80 if(ier.ne.0.or.abserr.le.errbnd) go to 210 -c -c initialization -c -------------- -c - rlist2(1) = result - maxerr = iord(1) - errmax = elist(maxerr) - area = result - nrmax = 1 - nres = 0 - numrl2 = 1 - ktmin = 0 - extrap = .false. - noext = .false. - erlarg = errsum - ertest = errbnd - levmax = 1 - iroff1 = 0 - iroff2 = 0 - iroff3 = 0 - ierro = 0 - uflow = d1mach(1) - oflow = d1mach(2) - abserr = oflow - ksgn = -1 - if(dres.ge.(0.1d+01-0.5d+02*epmach)*resabs) ksgn = 1 -c -c main do-loop -c ------------ -c - do 160 last = npts2,limit -c -c bisect the subinterval with the nrmax-th largest error -c estimate. -c - levcur = level(maxerr)+1 - a1 = alist(maxerr) - b1 = 0.5d+00*(alist(maxerr)+blist(maxerr)) - a2 = b1 - b2 = blist(maxerr) - erlast = errmax - call dqk21(f,a1,b1,area1,error1,resa,defab1) - call dqk21(f,a2,b2,area2,error2,resa,defab2) -c -c improve previous approximations to integral -c and error and test for accuracy. -c - neval = neval+42 - area12 = area1+area2 - erro12 = error1+error2 - errsum = errsum+erro12-errmax - area = area+area12-rlist(maxerr) - if(defab1.eq.error1.or.defab2.eq.error2) go to 95 - if(dabs(rlist(maxerr)-area12).gt.0.1d-04*dabs(area12) - * .or.erro12.lt.0.99d+00*errmax) go to 90 - if(extrap) iroff2 = iroff2+1 - if(.not.extrap) iroff1 = iroff1+1 - 90 if(last.gt.10.and.erro12.gt.errmax) iroff3 = iroff3+1 - 95 level(maxerr) = levcur - level(last) = levcur - rlist(maxerr) = area1 - rlist(last) = area2 - errbnd = dmax1(epsabs,epsrel*dabs(area)) -c -c test for roundoff error and eventually set error flag. -c - if(iroff1+iroff2.ge.10.or.iroff3.ge.20) ier = 2 - if(iroff2.ge.5) ierro = 3 -c -c set error flag in the case that the number of -c subintervals equals limit. -c - if(last.eq.limit) ier = 1 -c -c set error flag in the case of bad integrand behaviour -c at a point of the integration range -c - if(dmax1(dabs(a1),dabs(b2)).le.(0.1d+01+0.1d+03*epmach)* - * (dabs(a2)+0.1d+04*uflow)) ier = 4 -c -c append the newly-created intervals to the list. -c - if(error2.gt.error1) go to 100 - alist(last) = a2 - blist(maxerr) = b1 - blist(last) = b2 - elist(maxerr) = error1 - elist(last) = error2 - go to 110 - 100 alist(maxerr) = a2 - alist(last) = a1 - blist(last) = b1 - rlist(maxerr) = area2 - rlist(last) = area1 - elist(maxerr) = error2 - elist(last) = error1 -c -c call subroutine dqpsrt to maintain the descending ordering -c in the list of error estimates and select the subinterval -c with nrmax-th largest error estimate (to be bisected next). -c - 110 call dqpsrt(limit,last,maxerr,errmax,elist,iord,nrmax) -c ***jump out of do-loop - if(errsum.le.errbnd) go to 190 -c ***jump out of do-loop - if(ier.ne.0) go to 170 - if(noext) go to 160 - erlarg = erlarg-erlast - if(levcur+1.le.levmax) erlarg = erlarg+erro12 - if(extrap) go to 120 -c -c test whether the interval to be bisected next is the -c smallest interval. -c - if(level(maxerr)+1.le.levmax) go to 160 - extrap = .true. - nrmax = 2 - 120 if(ierro.eq.3.or.erlarg.le.ertest) go to 140 -c -c the smallest interval has the largest error. -c before bisecting decrease the sum of the errors over -c the larger intervals (erlarg) and perform extrapolation. -c - id = nrmax - jupbnd = last - if(last.gt.(2+limit/2)) jupbnd = limit+3-last - do 130 k = id,jupbnd - maxerr = iord(nrmax) - errmax = elist(maxerr) -c ***jump out of do-loop - if(level(maxerr)+1.le.levmax) go to 160 - nrmax = nrmax+1 - 130 continue -c -c perform extrapolation. -c - 140 numrl2 = numrl2+1 - rlist2(numrl2) = area - if(numrl2.le.2) go to 155 - call dqelg(numrl2,rlist2,reseps,abseps,res3la,nres) - ktmin = ktmin+1 - if(ktmin.gt.5.and.abserr.lt.0.1d-02*errsum) ier = 5 - if(abseps.ge.abserr) go to 150 - ktmin = 0 - abserr = abseps - result = reseps - correc = erlarg - ertest = dmax1(epsabs,epsrel*dabs(reseps)) -c ***jump out of do-loop - if(abserr.lt.ertest) go to 170 -c -c prepare bisection of the smallest interval. -c - 150 if(numrl2.eq.1) noext = .true. - if(ier.ge.5) go to 170 - 155 maxerr = iord(1) - errmax = elist(maxerr) - nrmax = 1 - extrap = .false. - levmax = levmax+1 - erlarg = errsum - 160 continue -c -c set the final result. -c --------------------- -c -c - 170 if(abserr.eq.oflow) go to 190 - if((ier+ierro).eq.0) go to 180 - if(ierro.eq.3) abserr = abserr+correc - if(ier.eq.0) ier = 3 - if(result.ne.0.0d+00.and.area.ne.0.0d+00)go to 175 - if(abserr.gt.errsum)go to 190 - if(area.eq.0.0d+00) go to 210 - go to 180 - 175 if(abserr/dabs(result).gt.errsum/dabs(area))go to 190 -c -c test on divergence. -c - 180 if(ksgn.eq.(-1).and.dmax1(dabs(result),dabs(area)).le. - * resabs*0.1d-01) go to 210 - if(0.1d-01.gt.(result/area).or.(result/area).gt.0.1d+03.or. - * errsum.gt.dabs(area)) ier = 6 - go to 210 -c -c compute global integral sum. -c - 190 result = 0.0d+00 - do 200 k = 1,last - result = result+rlist(k) - 200 continue - abserr = errsum - 210 if(ier.gt.2) ier = ier-1 - result = result*sign - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqags.f b/scipy-0.10.1/scipy/integrate/quadpack/dqags.f deleted file mode 100644 index 9de499962d..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqags.f +++ /dev/null @@ -1,188 +0,0 @@ - subroutine dqags(f,a,b,epsabs,epsrel,result,abserr,neval,ier, - * limit,lenw,last,iwork,work) -c***begin prologue dqags -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a1 -c***keywords automatic integrator, general-purpose, -c (end-point) singularities, extrapolation, -c globally adaptive -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & prog. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i = integral of f over (a,b), -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)). -c***description -c -c computation of a definite integral -c standard fortran subroutine -c double precision version -c -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more sub- -c divisions by increasing the value of limit -c (and taking the according dimension -c adjustments into account. however, if -c this yields no improvement it is advised -c to analyze the integrand in order to -c determine the integration difficulties. if -c the position of a local difficulty can be -c determined (e.g. singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling the -c integrator on the subranges. if possible, -c an appropriate special-purpose integrator -c should be used, which is designed for -c handling the type of difficulty involved. -c = 2 the occurrence of roundoff error is detec- -c ted, which prevents the requested -c tolerance from being achieved. -c the error may be under-estimated. -c = 3 extremely bad integrand behaviour -c occurs at some points of the integration -c interval. -c = 4 the algorithm does not converge. -c roundoff error is detected in the -c extrapolation table. it is presumed that -c the requested tolerance cannot be -c achieved, and that the returned result is -c the best which can be obtained. -c = 5 the integral is probably divergent, or -c slowly convergent. it must be noted that -c divergence can occur with any other value -c of ier. -c = 6 the input is invalid, because -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28) -c or limit.lt.1 or lenw.lt.limit*4. -c result, abserr, neval, last are set to -c zero.except when limit or lenw is invalid, -c iwork(1), work(limit*2+1) and -c work(limit*3+1) are set to zero, work(1) -c is set to a and work(limit+1) to b. -c -c dimensioning parameters -c limit - integer -c dimensioning parameter for iwork -c limit determines the maximum number of subintervals -c in the partition of the given integration interval -c (a,b), limit.ge.1. -c if limit.lt.1, the routine will end with ier = 6. -c -c lenw - integer -c dimensioning parameter for work -c lenw must be at least limit*4. -c if lenw.lt.limit*4, the routine will end -c with ier = 6. -c -c last - integer -c on return, last equals the number of subintervals -c produced in the subdivision process, detemines the -c number of significant elements actually in the work -c arrays. -c -c work arrays -c iwork - integer -c vector of dimension at least limit, the first k -c elements of which contain pointers -c to the error estimates over the subintervals -c such that work(limit*3+iwork(1)),... , -c work(limit*3+iwork(k)) form a decreasing -c sequence, with k = last if last.le.(limit/2+2), -c and k = limit+1-last otherwise -c -c work - double precision -c vector of dimension at least lenw -c on return -c work(1), ..., work(last) contain the left -c end-points of the subintervals in the -c partition of (a,b), -c work(limit+1), ..., work(limit+last) contain -c the right end-points, -c work(limit*2+1), ..., work(limit*2+last) contain -c the integral approximations over the subintervals, -c work(limit*3+1), ..., work(limit*3+last) -c contain the error estimates. -c -c***references (none) -c***routines called dqagse,xerror -c***end prologue dqags -c -c - double precision a,abserr,b,epsabs,epsrel,f,result,work - integer ier,iwork,last,lenw,limit,lvl,l1,l2,l3,neval -c - dimension iwork(limit),work(lenw) -c - external f -c -c check validity of limit and lenw. -c -c***first executable statement dqags - ier = 6 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(limit.lt.1.or.lenw.lt.limit*4) go to 10 -c -c prepare call for dqagse. -c - l1 = limit+1 - l2 = limit+l1 - l3 = limit+l2 -c - call dqagse(f,a,b,epsabs,epsrel,limit,result,abserr,neval, - * ier,work(1),work(l1),work(l2),work(l3),iwork,last) -c -c call error handler if necessary. -c - lvl = 0 -10 if(ier.eq.6) lvl = 1 - if(ier.ne.0) call xerror('abnormal return from dqags',26,ier,lvl) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqagse.f b/scipy-0.10.1/scipy/integrate/quadpack/dqagse.f deleted file mode 100644 index 7b845fa345..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqagse.f +++ /dev/null @@ -1,444 +0,0 @@ - subroutine dqagse(f,a,b,epsabs,epsrel,limit,result,abserr,neval, - * ier,alist,blist,rlist,elist,iord,last) -c***begin prologue dqagse -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a1 -c***keywords automatic integrator, general-purpose, -c (end point) singularities, extrapolation, -c globally adaptive -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i = integral of f over (a,b), -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)). -c***description -c -c computation of a definite integral -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c limit - integer -c gives an upperbound on the number of subintervals -c in the partition of (a,b) -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more sub- -c divisions by increasing the value of limit -c (and taking the according dimension -c adjustments into account). however, if -c this yields no improvement it is advised -c to analyze the integrand in order to -c determine the integration difficulties. if -c the position of a local difficulty can be -c determined (e.g. singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling the -c integrator on the subranges. if possible, -c an appropriate special-purpose integrator -c should be used, which is designed for -c handling the type of difficulty involved. -c = 2 the occurrence of roundoff error is detec- -c ted, which prevents the requested -c tolerance from being achieved. -c the error may be under-estimated. -c = 3 extremely bad integrand behaviour -c occurs at some points of the integration -c interval. -c = 4 the algorithm does not converge. -c roundoff error is detected in the -c extrapolation table. -c it is presumed that the requested -c tolerance cannot be achieved, and that the -c returned result is the best which can be -c obtained. -c = 5 the integral is probably divergent, or -c slowly convergent. it must be noted that -c divergence can occur with any other value -c of ier. -c = 6 the input is invalid, because -c epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28). -c result, abserr, neval, last, rlist(1), -c iord(1) and elist(1) are set to zero. -c alist(1) and blist(1) are set to a and b -c respectively. -c -c alist - double precision -c vector of dimension at least limit, the first -c last elements of which are the left end points -c of the subintervals in the partition of the -c given integration range (a,b) -c -c blist - double precision -c vector of dimension at least limit, the first -c last elements of which are the right end points -c of the subintervals in the partition of the given -c integration range (a,b) -c -c rlist - double precision -c vector of dimension at least limit, the first -c last elements of which are the integral -c approximations on the subintervals -c -c elist - double precision -c vector of dimension at least limit, the first -c last elements of which are the moduli of the -c absolute error estimates on the subintervals -c -c iord - integer -c vector of dimension at least limit, the first k -c elements of which are pointers to the -c error estimates over the subintervals, -c such that elist(iord(1)), ..., elist(iord(k)) -c form a decreasing sequence, with k = last -c if last.le.(limit/2+2), and k = limit+1-last -c otherwise -c -c last - integer -c number of subintervals actually produced in the -c subdivision process -c -c***references (none) -c***routines called d1mach,dqelg,dqk21,dqpsrt -c***end prologue dqagse -c - double precision a,abseps,abserr,alist,area,area1,area12,area2,a1, - * a2,b,blist,b1,b2,correc,dabs,defabs,defab1,defab2,d1mach,dmax1, - * dres,elist,epmach,epsabs,epsrel,erlarg,erlast,errbnd,errmax, - * error1,error2,erro12,errsum,ertest,f,oflow,resabs,reseps,result, - * res3la,rlist,rlist2,small,uflow - integer id,ier,ierro,iord,iroff1,iroff2,iroff3,jupbnd,k,ksgn, - * ktmin,last,limit,maxerr,neval,nres,nrmax,numrl2 - logical extrap,noext -c - dimension alist(limit),blist(limit),elist(limit),iord(limit), - * res3la(3),rlist(limit),rlist2(52) -c - external f -c -c the dimension of rlist2 is determined by the value of -c limexp in subroutine dqelg (rlist2 should be of dimension -c (limexp+2) at least). -c -c list of major variables -c ----------------------- -c -c alist - list of left end points of all subintervals -c considered up to now -c blist - list of right end points of all subintervals -c considered up to now -c rlist(i) - approximation to the integral over -c (alist(i),blist(i)) -c rlist2 - array of dimension at least limexp+2 containing -c the part of the epsilon table which is still -c needed for further computations -c elist(i) - error estimate applying to rlist(i) -c maxerr - pointer to the interval with largest error -c estimate -c errmax - elist(maxerr) -c erlast - error on the interval currently subdivided -c (before that subdivision has taken place) -c area - sum of the integrals over the subintervals -c errsum - sum of the errors over the subintervals -c errbnd - requested accuracy max(epsabs,epsrel* -c abs(result)) -c *****1 - variable for the left interval -c *****2 - variable for the right interval -c last - index for subdivision -c nres - number of calls to the extrapolation routine -c numrl2 - number of elements currently in rlist2. if an -c appropriate approximation to the compounded -c integral has been obtained it is put in -c rlist2(numrl2) after numrl2 has been increased -c by one. -c small - length of the smallest interval considered up -c to now, multiplied by 1.5 -c erlarg - sum of the errors over the intervals larger -c than the smallest interval considered up to now -c extrap - logical variable denoting that the routine is -c attempting to perform extrapolation i.e. before -c subdividing the smallest interval we try to -c decrease the value of erlarg. -c noext - logical variable denoting that extrapolation -c is no longer allowed (true value) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c oflow is the largest positive magnitude. -c -c***first executable statement dqagse - epmach = d1mach(4) -c -c test on validity of parameters -c ------------------------------ - ier = 0 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - alist(1) = a - blist(1) = b - rlist(1) = 0.0d+00 - elist(1) = 0.0d+00 - if(epsabs.le.0.0d+00.and.epsrel.lt.dmax1(0.5d+02*epmach,0.5d-28)) - * ier = 6 - if(ier.eq.6) go to 999 -c -c first approximation to the integral -c ----------------------------------- -c - uflow = d1mach(1) - oflow = d1mach(2) - ierro = 0 - call dqk21(f,a,b,result,abserr,defabs,resabs) -c -c test on accuracy. -c - dres = dabs(result) - errbnd = dmax1(epsabs,epsrel*dres) - last = 1 - rlist(1) = result - elist(1) = abserr - iord(1) = 1 - if(abserr.le.1.0d+02*epmach*defabs.and.abserr.gt.errbnd) ier = 2 - if(limit.eq.1) ier = 1 - if(ier.ne.0.or.(abserr.le.errbnd.and.abserr.ne.resabs).or. - * abserr.eq.0.0d+00) go to 140 -c -c initialization -c -------------- -c - rlist2(1) = result - errmax = abserr - maxerr = 1 - area = result - errsum = abserr - abserr = oflow - nrmax = 1 - nres = 0 - numrl2 = 2 - ktmin = 0 - extrap = .false. - noext = .false. - iroff1 = 0 - iroff2 = 0 - iroff3 = 0 - ksgn = -1 - if(dres.ge.(0.1d+01-0.5d+02*epmach)*defabs) ksgn = 1 -c -c main do-loop -c ------------ -c - do 90 last = 2,limit -c -c bisect the subinterval with the nrmax-th largest error -c estimate. -c - a1 = alist(maxerr) - b1 = 0.5d+00*(alist(maxerr)+blist(maxerr)) - a2 = b1 - b2 = blist(maxerr) - erlast = errmax - call dqk21(f,a1,b1,area1,error1,resabs,defab1) - call dqk21(f,a2,b2,area2,error2,resabs,defab2) -c -c improve previous approximations to integral -c and error and test for accuracy. -c - area12 = area1+area2 - erro12 = error1+error2 - errsum = errsum+erro12-errmax - area = area+area12-rlist(maxerr) - if(defab1.eq.error1.or.defab2.eq.error2) go to 15 - if(dabs(rlist(maxerr)-area12).gt.0.1d-04*dabs(area12) - * .or.erro12.lt.0.99d+00*errmax) go to 10 - if(extrap) iroff2 = iroff2+1 - if(.not.extrap) iroff1 = iroff1+1 - 10 if(last.gt.10.and.erro12.gt.errmax) iroff3 = iroff3+1 - 15 rlist(maxerr) = area1 - rlist(last) = area2 - errbnd = dmax1(epsabs,epsrel*dabs(area)) -c -c test for roundoff error and eventually set error flag. -c - if(iroff1+iroff2.ge.10.or.iroff3.ge.20) ier = 2 - if(iroff2.ge.5) ierro = 3 -c -c set error flag in the case that the number of subintervals -c equals limit. -c - if(last.eq.limit) ier = 1 -c -c set error flag in the case of bad integrand behaviour -c at a point of the integration range. -c - if(dmax1(dabs(a1),dabs(b2)).le.(0.1d+01+0.1d+03*epmach)* - * (dabs(a2)+0.1d+04*uflow)) ier = 4 -c -c append the newly-created intervals to the list. -c - if(error2.gt.error1) go to 20 - alist(last) = a2 - blist(maxerr) = b1 - blist(last) = b2 - elist(maxerr) = error1 - elist(last) = error2 - go to 30 - 20 alist(maxerr) = a2 - alist(last) = a1 - blist(last) = b1 - rlist(maxerr) = area2 - rlist(last) = area1 - elist(maxerr) = error2 - elist(last) = error1 -c -c call subroutine dqpsrt to maintain the descending ordering -c in the list of error estimates and select the subinterval -c with nrmax-th largest error estimate (to be bisected next). -c - 30 call dqpsrt(limit,last,maxerr,errmax,elist,iord,nrmax) -c ***jump out of do-loop - if(errsum.le.errbnd) go to 115 -c ***jump out of do-loop - if(ier.ne.0) go to 100 - if(last.eq.2) go to 80 - if(noext) go to 90 - erlarg = erlarg-erlast - if(dabs(b1-a1).gt.small) erlarg = erlarg+erro12 - if(extrap) go to 40 -c -c test whether the interval to be bisected next is the -c smallest interval. -c - if(dabs(blist(maxerr)-alist(maxerr)).gt.small) go to 90 - extrap = .true. - nrmax = 2 - 40 if(ierro.eq.3.or.erlarg.le.ertest) go to 60 -c -c the smallest interval has the largest error. -c before bisecting decrease the sum of the errors over the -c larger intervals (erlarg) and perform extrapolation. -c - id = nrmax - jupbnd = last - if(last.gt.(2+limit/2)) jupbnd = limit+3-last - do 50 k = id,jupbnd - maxerr = iord(nrmax) - errmax = elist(maxerr) -c ***jump out of do-loop - if(dabs(blist(maxerr)-alist(maxerr)).gt.small) go to 90 - nrmax = nrmax+1 - 50 continue -c -c perform extrapolation. -c - 60 numrl2 = numrl2+1 - rlist2(numrl2) = area - call dqelg(numrl2,rlist2,reseps,abseps,res3la,nres) - ktmin = ktmin+1 - if(ktmin.gt.5.and.abserr.lt.0.1d-02*errsum) ier = 5 - if(abseps.ge.abserr) go to 70 - ktmin = 0 - abserr = abseps - result = reseps - correc = erlarg - ertest = dmax1(epsabs,epsrel*dabs(reseps)) -c ***jump out of do-loop - if(abserr.le.ertest) go to 100 -c -c prepare bisection of the smallest interval. -c - 70 if(numrl2.eq.1) noext = .true. - if(ier.eq.5) go to 100 - maxerr = iord(1) - errmax = elist(maxerr) - nrmax = 1 - extrap = .false. - small = small*0.5d+00 - erlarg = errsum - go to 90 - 80 small = dabs(b-a)*0.375d+00 - erlarg = errsum - ertest = errbnd - rlist2(2) = area - 90 continue -c -c set final result and error estimate. -c ------------------------------------ -c - 100 if(abserr.eq.oflow) go to 115 - if(ier+ierro.eq.0) go to 110 - if(ierro.eq.3) abserr = abserr+correc - if(ier.eq.0) ier = 3 - if(result.ne.0.0d+00.and.area.ne.0.0d+00) go to 105 - if(abserr.gt.errsum) go to 115 - if(area.eq.0.0d+00) go to 130 - go to 110 - 105 if(abserr/dabs(result).gt.errsum/dabs(area)) go to 115 -c -c test on divergence. -c - 110 if(ksgn.eq.(-1).and.dmax1(dabs(result),dabs(area)).le. - * defabs*0.1d-01) go to 130 - if(0.1d-01.gt.(result/area).or.(result/area).gt.0.1d+03 - * .or.errsum.gt.dabs(area)) ier = 6 - go to 130 -c -c compute global integral sum. -c - 115 result = 0.0d+00 - do 120 k = 1,last - result = result+rlist(k) - 120 continue - abserr = errsum - 130 if(ier.gt.2) ier = ier-1 - 140 neval = 42*last-21 - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqawc.f b/scipy-0.10.1/scipy/integrate/quadpack/dqawc.f deleted file mode 100644 index 18f270937c..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqawc.f +++ /dev/null @@ -1,178 +0,0 @@ - subroutine dqawc(f,a,b,c,epsabs,epsrel,result,abserr,neval,ier, - * limit,lenw,last,iwork,work) -c***begin prologue dqawc -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1,j4 -c***keywords automatic integrator, special-purpose, -c cauchy principal value, -c clenshaw-curtis, globally adaptive -c***author piessens,robert ,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a -c cauchy principal value i = integral of f*w over (a,b) -c (w(x) = 1/((x-c), c.ne.a, c.ne.b), hopefully satisfying -c following claim for accuracy -c abs(i-result).le.max(epsabe,epsrel*abs(i)). -c***description -c -c computation of a cauchy principal value -c standard fortran subroutine -c double precision version -c -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c under limit of integration -c -c b - double precision -c upper limit of integration -c -c c - parameter in the weight function, c.ne.a, c.ne.b. -c if c = a or c = b, the routine will end with -c ier = 6 . -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate or the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more sub- -c divisions by increasing the value of limit -c (and taking the according dimension -c adjustments into account). however, if -c this yields no improvement it is advised -c to analyze the integrand in order to -c determine the integration difficulties. -c if the position of a local difficulty -c can be determined (e.g. singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling -c appropriate integrators on the subranges. -c = 2 the occurrence of roundoff error is detec- -c ted, which prevents the requested -c tolerance from being achieved. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 6 the input is invalid, because -c c = a or c = b or -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c or limit.lt.1 or lenw.lt.limit*4. -c result, abserr, neval, last are set to -c zero. exept when lenw or limit is invalid, -c iwork(1), work(limit*2+1) and -c work(limit*3+1) are set to zero, work(1) -c is set to a and work(limit+1) to b. -c -c dimensioning parameters -c limit - integer -c dimensioning parameter for iwork -c limit determines the maximum number of subintervals -c in the partition of the given integration interval -c (a,b), limit.ge.1. -c if limit.lt.1, the routine will end with ier = 6. -c -c lenw - integer -c dimensioning parameter for work -c lenw must be at least limit*4. -c if lenw.lt.limit*4, the routine will end with -c ier = 6. -c -c last - integer -c on return, last equals the number of subintervals -c produced in the subdivision process, which -c determines the number of significant elements -c actually in the work arrays. -c -c work arrays -c iwork - integer -c vector of dimension at least limit, the first k -c elements of which contain pointers -c to the error estimates over the subintervals, -c such that work(limit*3+iwork(1)), ... , -c work(limit*3+iwork(k)) form a decreasing -c sequence, with k = last if last.le.(limit/2+2), -c and k = limit+1-last otherwise -c -c work - double precision -c vector of dimension at least lenw -c on return -c work(1), ..., work(last) contain the left -c end points of the subintervals in the -c partition of (a,b), -c work(limit+1), ..., work(limit+last) contain -c the right end points, -c work(limit*2+1), ..., work(limit*2+last) contain -c the integral approximations over the subintervals, -c work(limit*3+1), ..., work(limit*3+last) -c contain the error estimates. -c -c***references (none) -c***routines called dqawce,xerror -c***end prologue dqawc -c - double precision a,abserr,b,c,epsabs,epsrel,f,result,work - integer ier,iwork,last,lenw,limit,lvl,l1,l2,l3,neval -c - dimension iwork(limit),work(lenw) -c - external f -c -c check validity of limit and lenw. -c -c***first executable statement dqawc - ier = 6 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(limit.lt.1.or.lenw.lt.limit*4) go to 10 -c -c prepare call for dqawce. -c - l1 = limit+1 - l2 = limit+l1 - l3 = limit+l2 - call dqawce(f,a,b,c,epsabs,epsrel,limit,result,abserr,neval,ier, - * work(1),work(l1),work(l2),work(l3),iwork,last) -c -c call error handler if necessary. -c - lvl = 0 -10 if(ier.eq.6) lvl = 1 - if(ier.ne.0) call xerror('abnormal return from dqawc',26,ier,lvl) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqawce.f b/scipy-0.10.1/scipy/integrate/quadpack/dqawce.f deleted file mode 100644 index 90418b2759..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqawce.f +++ /dev/null @@ -1,326 +0,0 @@ - subroutine dqawce(f,a,b,c,epsabs,epsrel,limit,result,abserr,neval, - * ier,alist,blist,rlist,elist,iord,last) -c***begin prologue dqawce -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1,j4 -c***keywords automatic integrator, special-purpose, -c cauchy principal value, clenshaw-curtis method -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c*** purpose the routine calculates an approximation result to a -c cauchy principal value i = integral of f*w over (a,b) -c (w(x) = 1/(x-c), (c.ne.a, c.ne.b), hopefully satisfying -c following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)) -c***description -c -c computation of a cauchy principal value -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c c - double precision -c parameter in the weight function, c.ne.a, c.ne.b -c if c = a or c = b, the routine will end with -c ier = 6. -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c limit - integer -c gives an upper bound on the number of subintervals -c in the partition of (a,b), limit.ge.1 -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more sub- -c divisions by increasing the value of -c limit. however, if this yields no -c improvement it is advised to analyze the -c the integrand, in order to determine the -c the integration difficulties. if the -c position of a local difficulty can be -c determined (e.g. singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling -c appropriate integrators on the subranges. -c = 2 the occurrence of roundoff error is detec- -c ted, which prevents the requested -c tolerance from being achieved. -c = 3 extremely bad integrand behaviour -c occurs at some interior points of -c the integration interval. -c = 6 the input is invalid, because -c c = a or c = b or -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c or limit.lt.1. -c result, abserr, neval, rlist(1), elist(1), -c iord(1) and last are set to zero. alist(1) -c and blist(1) are set to a and b -c respectively. -c -c alist - double precision -c vector of dimension at least limit, the first -c last elements of which are the left -c end points of the subintervals in the partition -c of the given integration range (a,b) -c -c blist - double precision -c vector of dimension at least limit, the first -c last elements of which are the right -c end points of the subintervals in the partition -c of the given integration range (a,b) -c -c rlist - double precision -c vector of dimension at least limit, the first -c last elements of which are the integral -c approximations on the subintervals -c -c elist - double precision -c vector of dimension limit, the first last -c elements of which are the moduli of the absolute -c error estimates on the subintervals -c -c iord - integer -c vector of dimension at least limit, the first k -c elements of which are pointers to the error -c estimates over the subintervals, so that -c elist(iord(1)), ..., elist(iord(k)) with k = last -c if last.le.(limit/2+2), and k = limit+1-last -c otherwise, form a decreasing sequence -c -c last - integer -c number of subintervals actually produced in -c the subdivision process -c -c***references (none) -c***routines called d1mach,dqc25c,dqpsrt -c***end prologue dqawce -c - double precision a,aa,abserr,alist,area,area1,area12,area2,a1,a2, - * b,bb,blist,b1,b2,c,dabs,dmax1,d1mach,elist,epmach,epsabs,epsrel, - * errbnd,errmax,error1,erro12,error2,errsum,f,result,rlist,uflow - integer ier,iord,iroff1,iroff2,k,krule,last,limit,maxerr,nev, - * neval,nrmax -c - dimension alist(limit),blist(limit),rlist(limit),elist(limit), - * iord(limit) -c - external f -c -c list of major variables -c ----------------------- -c -c alist - list of left end points of all subintervals -c considered up to now -c blist - list of right end points of all subintervals -c considered up to now -c rlist(i) - approximation to the integral over -c (alist(i),blist(i)) -c elist(i) - error estimate applying to rlist(i) -c maxerr - pointer to the interval with largest -c error estimate -c errmax - elist(maxerr) -c area - sum of the integrals over the subintervals -c errsum - sum of the errors over the subintervals -c errbnd - requested accuracy max(epsabs,epsrel* -c abs(result)) -c *****1 - variable for the left subinterval -c *****2 - variable for the right subinterval -c last - index for subdivision -c -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqawce - epmach = d1mach(4) - uflow = d1mach(1) -c -c -c test on validity of parameters -c ------------------------------ -c - ier = 6 - neval = 0 - last = 0 - alist(1) = a - blist(1) = b - rlist(1) = 0.0d+00 - elist(1) = 0.0d+00 - iord(1) = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(c.eq.a.or.c.eq.b.or.(epsabs.le.0.0d+00.and - * .epsrel.lt.dmax1(0.5d+02*epmach,0.5d-28))) go to 999 -c -c first approximation to the integral -c ----------------------------------- -c - aa=a - bb=b - if (a.le.b) go to 10 - aa=b - bb=a -10 ier=0 - krule = 1 - call dqc25c(f,aa,bb,c,result,abserr,krule,neval) - last = 1 - rlist(1) = result - elist(1) = abserr - iord(1) = 1 - alist(1) = a - blist(1) = b -c -c test on accuracy -c - errbnd = dmax1(epsabs,epsrel*dabs(result)) - if(limit.eq.1) ier = 1 - if(abserr.lt.dmin1(0.1d-01*dabs(result),errbnd) - * .or.ier.eq.1) go to 70 -c -c initialization -c -------------- -c - alist(1) = aa - blist(1) = bb - rlist(1) = result - errmax = abserr - maxerr = 1 - area = result - errsum = abserr - nrmax = 1 - iroff1 = 0 - iroff2 = 0 -c -c main do-loop -c ------------ -c - do 40 last = 2,limit -c -c bisect the subinterval with nrmax-th largest -c error estimate. -c - a1 = alist(maxerr) - b1 = 0.5d+00*(alist(maxerr)+blist(maxerr)) - b2 = blist(maxerr) - if(c.le.b1.and.c.gt.a1) b1 = 0.5d+00*(c+b2) - if(c.gt.b1.and.c.lt.b2) b1 = 0.5d+00*(a1+c) - a2 = b1 - krule = 2 - call dqc25c(f,a1,b1,c,area1,error1,krule,nev) - neval = neval+nev - call dqc25c(f,a2,b2,c,area2,error2,krule,nev) - neval = neval+nev -c -c improve previous approximations to integral -c and error and test for accuracy. -c - area12 = area1+area2 - erro12 = error1+error2 - errsum = errsum+erro12-errmax - area = area+area12-rlist(maxerr) - if(dabs(rlist(maxerr)-area12).lt.0.1d-04*dabs(area12) - * .and.erro12.ge.0.99d+00*errmax.and.krule.eq.0) - * iroff1 = iroff1+1 - if(last.gt.10.and.erro12.gt.errmax.and.krule.eq.0) - * iroff2 = iroff2+1 - rlist(maxerr) = area1 - rlist(last) = area2 - errbnd = dmax1(epsabs,epsrel*dabs(area)) - if(errsum.le.errbnd) go to 15 -c -c test for roundoff error and eventually set error flag. -c - if(iroff1.ge.6.and.iroff2.gt.20) ier = 2 -c -c set error flag in the case that number of interval -c bisections exceeds limit. -c - if(last.eq.limit) ier = 1 -c -c set error flag in the case of bad integrand behaviour -c at a point of the integration range. -c - if(dmax1(dabs(a1),dabs(b2)).le.(0.1d+01+0.1d+03*epmach) - * *(dabs(a2)+0.1d+04*uflow)) ier = 3 -c -c append the newly-created intervals to the list. -c - 15 if(error2.gt.error1) go to 20 - alist(last) = a2 - blist(maxerr) = b1 - blist(last) = b2 - elist(maxerr) = error1 - elist(last) = error2 - go to 30 - 20 alist(maxerr) = a2 - alist(last) = a1 - blist(last) = b1 - rlist(maxerr) = area2 - rlist(last) = area1 - elist(maxerr) = error2 - elist(last) = error1 -c -c call subroutine dqpsrt to maintain the descending ordering -c in the list of error estimates and select the subinterval -c with nrmax-th largest error estimate (to be bisected next). -c - 30 call dqpsrt(limit,last,maxerr,errmax,elist,iord,nrmax) -c ***jump out of do-loop - if(ier.ne.0.or.errsum.le.errbnd) go to 50 - 40 continue -c -c compute final result. -c --------------------- -c - 50 result = 0.0d+00 - do 60 k=1,last - result = result+rlist(k) - 60 continue - abserr = errsum - 70 if (aa.eq.b) result=-result - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqawf.f b/scipy-0.10.1/scipy/integrate/quadpack/dqawf.f deleted file mode 100644 index 4b77839f8d..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqawf.f +++ /dev/null @@ -1,231 +0,0 @@ - subroutine dqawf(f,a,omega,integr,epsabs,result,abserr,neval,ier, - * limlst,lst,leniw,maxp1,lenw,iwork,work) -c***begin prologue dqawf -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a3a1 -c***keywords automatic integrator, special-purpose,fourier -c integral, integration between zeros with dqawoe, -c convergence acceleration with dqelg -c***author piessens,robert ,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c fourier integral i=integral of f(x)*w(x) over (a,infinity) -c where w(x) = cos(omega*x) or w(x) = sin(omega*x). -c hopefully satisfying following claim for accuracy -c abs(i-result).le.epsabs. -c***description -c -c computation of fourier integrals -c standard fortran subroutine -c double precision version -c -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c omega - double precision -c parameter in the integrand weight function -c -c integr - integer -c indicates which of the weight functions is used -c integr = 1 w(x) = cos(omega*x) -c integr = 2 w(x) = sin(omega*x) -c if integr.ne.1.and.integr.ne.2, the routine -c will end with ier = 6. -c -c epsabs - double precision -c absolute accuracy requested, epsabs.gt.0. -c if epsabs.le.0, the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine. -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c if omega.ne.0 -c ier = 1 maximum number of cycles allowed -c has been achieved, i.e. of subintervals -c (a+(k-1)c,a+kc) where -c c = (2*int(abs(omega))+1)*pi/abs(omega), -c for k = 1, 2, ..., lst. -c one can allow more cycles by increasing -c the value of limlst (and taking the -c according dimension adjustments into -c account). examine the array iwork which -c contains the error flags on the cycles, in -c order to look for eventual local -c integration difficulties. -c if the position of a local difficulty -c can be determined (e.g. singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling -c appropriate integrators on the subranges. -c = 4 the extrapolation table constructed for -c convergence accelaration of the series -c formed by the integral contributions over -c the cycles, does not converge to within -c the requested accuracy. -c as in the case of ier = 1, it is advised -c to examine the array iwork which contains -c the error flags on the cycles. -c = 6 the input is invalid because -c (integr.ne.1 and integr.ne.2) or -c epsabs.le.0 or limlst.lt.1 or -c leniw.lt.(limlst+2) or maxp1.lt.1 or -c lenw.lt.(leniw*2+maxp1*25). -c result, abserr, neval, lst are set to -c zero. -c = 7 bad integrand behaviour occurs within -c one or more of the cycles. location and -c type of the difficulty involved can be -c determined from the first lst elements of -c vector iwork. here lst is the number of -c cycles actually needed (see below). -c iwork(k) = 1 the maximum number of -c subdivisions (=(leniw-limlst) -c /2) has been achieved on the -c k th cycle. -c = 2 occurrence of roundoff error -c is detected and prevents the -c tolerance imposed on the k th -c cycle, from being achieved -c on this cycle. -c = 3 extremely bad integrand -c behaviour occurs at some -c points of the k th cycle. -c = 4 the integration procedure -c over the k th cycle does -c not converge (to within the -c required accuracy) due to -c roundoff in the extrapolation -c procedure invoked on this -c cycle. it is assumed that the -c result on this interval is -c the best which can be -c obtained. -c = 5 the integral over the k th -c cycle is probably divergent -c or slowly convergent. it must -c be noted that divergence can -c occur with any other value of -c iwork(k). -c if omega = 0 and integr = 1, -c the integral is calculated by means of dqagie, -c and ier = iwork(1) (with meaning as described -c for iwork(k),k = 1). -c -c dimensioning parameters -c limlst - integer -c limlst gives an upper bound on the number of -c cycles, limlst.ge.3. -c if limlst.lt.3, the routine will end with ier = 6. -c -c lst - integer -c on return, lst indicates the number of cycles -c actually needed for the integration. -c if omega = 0, then lst is set to 1. -c -c leniw - integer -c dimensioning parameter for iwork. on entry, -c (leniw-limlst)/2 equals the maximum number of -c subintervals allowed in the partition of each -c cycle, leniw.ge.(limlst+2). -c if leniw.lt.(limlst+2), the routine will end with -c ier = 6. -c -c maxp1 - integer -c maxp1 gives an upper bound on the number of -c chebyshev moments which can be stored, i.e. for -c the intervals of lengths abs(b-a)*2**(-l), -c l = 0,1, ..., maxp1-2, maxp1.ge.1. -c if maxp1.lt.1, the routine will end with ier = 6. -c lenw - integer -c dimensioning parameter for work -c lenw must be at least leniw*2+maxp1*25. -c if lenw.lt.(leniw*2+maxp1*25), the routine will -c end with ier = 6. -c -c work arrays -c iwork - integer -c vector of dimension at least leniw -c on return, iwork(k) for k = 1, 2, ..., lst -c contain the error flags on the cycles. -c -c work - double precision -c vector of dimension at least -c on return, -c work(1), ..., work(lst) contain the integral -c approximations over the cycles, -c work(limlst+1), ..., work(limlst+lst) contain -c the error extimates over the cycles. -c further elements of work have no specific -c meaning for the user. -c -c***references (none) -c***routines called dqawfe,xerror -c***end prologue dqawf -c - double precision a,abserr,epsabs,f,omega,result,work - integer ier,integr,iwork,last,leniw,lenw,limit,limlst,ll2,lvl, - * lst,l1,l2,l3,l4,l5,l6,maxp1,neval -c - dimension iwork(leniw),work(lenw) -c - external f -c -c check validity of limlst, leniw, maxp1 and lenw. -c -c***first executable statement dqawf - ier = 6 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(limlst.lt.3.or.leniw.lt.(limlst+2).or.maxp1.lt.1.or.lenw.lt. - * (leniw*2+maxp1*25)) go to 10 -c -c prepare call for dqawfe -c - limit = (leniw-limlst)/2 - l1 = limlst+1 - l2 = limlst+l1 - l3 = limit+l2 - l4 = limit+l3 - l5 = limit+l4 - l6 = limit+l5 - ll2 = limit+l1 - call dqawfe(f,a,omega,integr,epsabs,limlst,limit,maxp1,result, - * abserr,neval,ier,work(1),work(l1),iwork(1),lst,work(l2), - * work(l3),work(l4),work(l5),iwork(l1),iwork(ll2),work(l6)) -c -c call error handler if necessary -c - lvl = 0 -10 if(ier.eq.6) lvl = 1 - if(ier.ne.0) call xerror('abnormal return from dqawf',26,ier,lvl) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqawfe.f b/scipy-0.10.1/scipy/integrate/quadpack/dqawfe.f deleted file mode 100644 index f702e85198..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqawfe.f +++ /dev/null @@ -1,363 +0,0 @@ - subroutine dqawfe(f,a,omega,integr,epsabs,limlst,limit,maxp1, - * result,abserr,neval,ier,rslst,erlst,ierlst,lst,alist,blist, - * rlist,elist,iord,nnlog,chebmo) -c***begin prologue dqawfe -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a3a1 -c***keywords automatic integrator, special-purpose, -c fourier integrals, -c integration between zeros with dqawoe, -c convergence acceleration with dqelg -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c dedoncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a -c given fourier integal -c i = integral of f(x)*w(x) over (a,infinity) -c where w(x)=cos(omega*x) or w(x)=sin(omega*x), -c hopefully satisfying following claim for accuracy -c abs(i-result).le.epsabs. -c***description -c -c computation of fourier integrals -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to -c be declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c omega - double precision -c parameter in the weight function -c -c integr - integer -c indicates which weight function is used -c integr = 1 w(x) = cos(omega*x) -c integr = 2 w(x) = sin(omega*x) -c if integr.ne.1.and.integr.ne.2, the routine will -c end with ier = 6. -c -c epsabs - double precision -c absolute accuracy requested, epsabs.gt.0 -c if epsabs.le.0, the routine will end with ier = 6. -c -c limlst - integer -c limlst gives an upper bound on the number of -c cycles, limlst.ge.1. -c if limlst.lt.3, the routine will end with ier = 6. -c -c limit - integer -c gives an upper bound on the number of subintervals -c allowed in the partition of each cycle, limit.ge.1 -c each cycle, limit.ge.1. -c -c maxp1 - integer -c gives an upper bound on the number of -c chebyshev moments which can be stored, i.e. -c for the intervals of lengths abs(b-a)*2**(-l), -c l=0,1, ..., maxp1-2, maxp1.ge.1 -c -c on return -c result - double precision -c approximation to the integral x -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - ier = 0 normal and reliable termination of -c the routine. it is assumed that the -c requested accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine. the -c estimates for integral and error are less -c reliable. it is assumed that the requested -c accuracy has not been achieved. -c error messages -c if omega.ne.0 -c ier = 1 maximum number of cycles allowed -c has been achieved., i.e. of subintervals -c (a+(k-1)c,a+kc) where -c c = (2*int(abs(omega))+1)*pi/abs(omega), -c for k = 1, 2, ..., lst. -c one can allow more cycles by increasing -c the value of limlst (and taking the -c according dimension adjustments into -c account). -c examine the array iwork which contains -c the error flags on the cycles, in order to -c look for eventual local integration -c difficulties. if the position of a local -c difficulty can be determined (e.g. -c singularity, discontinuity within the -c interval) one will probably gain from -c splitting up the interval at this point -c and calling appropriate integrators on -c the subranges. -c = 4 the extrapolation table constructed for -c convergence acceleration of the series -c formed by the integral contributions over -c the cycles, does not converge to within -c the requested accuracy. as in the case of -c ier = 1, it is advised to examine the -c array iwork which contains the error -c flags on the cycles. -c = 6 the input is invalid because -c (integr.ne.1 and integr.ne.2) or -c epsabs.le.0 or limlst.lt.3. -c result, abserr, neval, lst are set -c to zero. -c = 7 bad integrand behaviour occurs within one -c or more of the cycles. location and type -c of the difficulty involved can be -c determined from the vector ierlst. here -c lst is the number of cycles actually -c needed (see below). -c ierlst(k) = 1 the maximum number of -c subdivisions (= limit) has -c been achieved on the k th -c cycle. -c = 2 occurrence of roundoff error -c is detected and prevents the -c tolerance imposed on the -c k th cycle, from being -c achieved. -c = 3 extremely bad integrand -c behaviour occurs at some -c points of the k th cycle. -c = 4 the integration procedure -c over the k th cycle does -c not converge (to within the -c required accuracy) due to -c roundoff in the -c extrapolation procedure -c invoked on this cycle. it -c is assumed that the result -c on this interval is the -c best which can be obtained. -c = 5 the integral over the k th -c cycle is probably divergent -c or slowly convergent. it -c must be noted that -c divergence can occur with -c any other value of -c ierlst(k). -c if omega = 0 and integr = 1, -c the integral is calculated by means of dqagie -c and ier = ierlst(1) (with meaning as described -c for ierlst(k), k = 1). -c -c rslst - double precision -c vector of dimension at least limlst -c rslst(k) contains the integral contribution -c over the interval (a+(k-1)c,a+kc) where -c c = (2*int(abs(omega))+1)*pi/abs(omega), -c k = 1, 2, ..., lst. -c note that, if omega = 0, rslst(1) contains -c the value of the integral over (a,infinity). -c -c erlst - double precision -c vector of dimension at least limlst -c erlst(k) contains the error estimate corresponding -c with rslst(k). -c -c ierlst - integer -c vector of dimension at least limlst -c ierlst(k) contains the error flag corresponding -c with rslst(k). for the meaning of the local error -c flags see description of output parameter ier. -c -c lst - integer -c number of subintervals needed for the integration -c if omega = 0 then lst is set to 1. -c -c alist, blist, rlist, elist - double precision -c vector of dimension at least limit, -c -c iord, nnlog - integer -c vector of dimension at least limit, providing -c space for the quantities needed in the subdivision -c process of each cycle -c -c chebmo - double precision -c array of dimension at least (maxp1,25), providing -c space for the chebyshev moments needed within the -c cycles -c -c***references (none) -c***routines called d1mach,dqagie,dqawoe,dqelg -c***end prologue dqawfe -c - double precision a,abseps,abserr,alist,blist,chebmo,correc,cycle, - * c1,c2,dabs,dl,dla,dmax1,drl,d1mach,elist,erlst,ep,eps,epsa, - * epsabs,errsum,f,fact,omega,p,pi,p1,psum,reseps,result,res3la, - * rlist,rslst,uflow - integer ier,ierlst,integr,iord,ktmin,l,last,lst,limit,limlst,ll, - * maxp1,momcom,nev,neval,nnlog,nres,numrl2 -c - dimension alist(limit),blist(limit),chebmo(maxp1,25),elist(limit), - * erlst(limlst),ierlst(limlst),iord(limit),nnlog(limit),psum(52), - * res3la(3),rlist(limit),rslst(limlst) -c - external f -c -c -c the dimension of psum is determined by the value of -c limexp in subroutine dqelg (psum must be of dimension -c (limexp+2) at least). -c -c list of major variables -c ----------------------- -c -c c1, c2 - end points of subinterval (of length cycle) -c cycle - (2*int(abs(omega))+1)*pi/abs(omega) -c psum - vector of dimension at least (limexp+2) -c (see routine dqelg) -c psum contains the part of the epsilon table -c which is still needed for further computations. -c each element of psum is a partial sum of the -c series which should sum to the value of the -c integral. -c errsum - sum of error estimates over the subintervals, -c calculated cumulatively -c epsa - absolute tolerance requested over current -c subinterval -c chebmo - array containing the modified chebyshev -c moments (see also routine dqc25f) -c - data p/0.9d+00/ - data pi / 3.1415926535 8979323846 2643383279 50 d0 / -c -c test on validity of parameters -c ------------------------------ -c -c***first executable statement dqawfe - result = 0.0d+00 - abserr = 0.0d+00 - neval = 0 - lst = 0 - ier = 0 - if((integr.ne.1.and.integr.ne.2).or.epsabs.le.0.0d+00.or. - * limlst.lt.3) ier = 6 - if(ier.eq.6) go to 999 - if(omega.ne.0.0d+00) go to 10 -c -c integration by dqagie if omega is zero -c -------------------------------------- -c - if(integr.eq.1) call dqagie(f,0.0d+00,1,epsabs,0.0d+00,limit, - * result,abserr,neval,ier,alist,blist,rlist,elist,iord,last) - rslst(1) = result - erlst(1) = abserr - ierlst(1) = ier - lst = 1 - go to 999 -c -c initializations -c --------------- -c - 10 l = dabs(omega) - dl = 2*l+1 - cycle = dl*pi/dabs(omega) - ier = 0 - ktmin = 0 - neval = 0 - numrl2 = 0 - nres = 0 - c1 = a - c2 = cycle+a - p1 = 0.1d+01-p - uflow = d1mach(1) - eps = epsabs - if(epsabs.gt.uflow/p1) eps = epsabs*p1 - ep = eps - fact = 0.1d+01 - correc = 0.0d+00 - abserr = 0.0d+00 - errsum = 0.0d+00 -c -c main do-loop -c ------------ -c - do 50 lst = 1,limlst -c -c integrate over current subinterval. -c - dla = lst - epsa = eps*fact - call dqawoe(f,c1,c2,omega,integr,epsa,0.0d+00,limit,lst,maxp1, - * rslst(lst),erlst(lst),nev,ierlst(lst),last,alist,blist,rlist, - * elist,iord,nnlog,momcom,chebmo) - neval = neval+nev - fact = fact*p - errsum = errsum+erlst(lst) - drl = 0.5d+02*dabs(rslst(lst)) -c -c test on accuracy with partial sum -c - if((errsum+drl).le.epsabs.and.lst.ge.6) go to 80 - correc = dmax1(correc,erlst(lst)) - if(ierlst(lst).ne.0) eps = dmax1(ep,correc*p1) - if(ierlst(lst).ne.0) ier = 7 - if(ier.eq.7.and.(errsum+drl).le.correc*0.1d+02.and. - * lst.gt.5) go to 80 - numrl2 = numrl2+1 - if(lst.gt.1) go to 20 - psum(1) = rslst(1) - go to 40 - 20 psum(numrl2) = psum(ll)+rslst(lst) - if(lst.eq.2) go to 40 -c -c test on maximum number of subintervals -c - if(lst.eq.limlst) ier = 1 -c -c perform new extrapolation -c - call dqelg(numrl2,psum,reseps,abseps,res3la,nres) -c -c test whether extrapolated result is influenced by roundoff -c - ktmin = ktmin+1 - if(ktmin.ge.15.and.abserr.le.0.1d-02*(errsum+drl)) ier = 4 - if(abseps.gt.abserr.and.lst.ne.3) go to 30 - abserr = abseps - result = reseps - ktmin = 0 -c -c if ier is not 0, check whether direct result (partial sum) -c or extrapolated result yields the best integral -c approximation -c - if((abserr+0.1d+02*correc).le.epsabs.or. - * (abserr.le.epsabs.and.0.1d+02*correc.ge.epsabs)) go to 60 - 30 if(ier.ne.0.and.ier.ne.7) go to 60 - 40 ll = numrl2 - c1 = c2 - c2 = c2+cycle - 50 continue -c -c set final result and error estimate -c ----------------------------------- -c - 60 abserr = abserr+0.1d+02*correc - if(ier.eq.0) go to 999 - if(result.ne.0.0d+00.and.psum(numrl2).ne.0.0d+00) go to 70 - if(abserr.gt.errsum) go to 80 - if(psum(numrl2).eq.0.0d+00) go to 999 - 70 if(abserr/dabs(result).gt.(errsum+drl)/dabs(psum(numrl2))) - * go to 80 - if(ier.ge.1.and.ier.ne.7) abserr = abserr+drl - go to 999 - 80 result = psum(numrl2) - abserr = errsum+drl - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqawo.f b/scipy-0.10.1/scipy/integrate/quadpack/dqawo.f deleted file mode 100644 index 451be197c4..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqawo.f +++ /dev/null @@ -1,225 +0,0 @@ - subroutine dqawo(f,a,b,omega,integr,epsabs,epsrel,result,abserr, - * neval,ier,leniw,maxp1,lenw,last,iwork,work) -c***begin prologue dqawo -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1 -c***keywords automatic integrator, special-purpose, -c integrand with oscillatory cos or sin factor, -c clenshaw-curtis method, (end point) singularities, -c extrapolation, globally adaptive -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i=integral of f(x)*w(x) over (a,b) -c where w(x) = cos(omega*x) -c or w(x) = sin(omega*x), -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)). -c***description -c -c computation of oscillatory integrals -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the function -c f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c omega - double precision -c parameter in the integrand weight function -c -c integr - integer -c indicates which of the weight functions is used -c integr = 1 w(x) = cos(omega*x) -c integr = 2 w(x) = sin(omega*x) -c if integr.ne.1.and.integr.ne.2, the routine will -c end with ier = 6. -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c - ier.gt.0 abnormal termination of the routine. -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c (= leniw/2) has been achieved. one can -c allow more subdivisions by increasing the -c value of leniw (and taking the according -c dimension adjustments into account). -c however, if this yields no improvement it -c is advised to analyze the integrand in -c order to determine the integration -c difficulties. if the position of a local -c difficulty can be determined (e.g. -c singularity, discontinuity within the -c interval) one will probably gain from -c splitting up the interval at this point -c and calling the integrator on the -c subranges. if possible, an appropriate -c special-purpose integrator should be used -c which is designed for handling the type of -c difficulty involved. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c the error may be under-estimated. -c = 3 extremely bad integrand behaviour occurs -c at some interior points of the -c integration interval. -c = 4 the algorithm does not converge. -c roundoff error is detected in the -c extrapolation table. it is presumed that -c the requested tolerance cannot be achieved -c due to roundoff in the extrapolation -c table, and that the returned result is -c the best which can be obtained. -c = 5 the integral is probably divergent, or -c slowly convergent. it must be noted that -c divergence can occur with any other value -c of ier. -c = 6 the input is invalid, because -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c or (integr.ne.1 and integr.ne.2), -c or leniw.lt.2 or maxp1.lt.1 or -c lenw.lt.leniw*2+maxp1*25. -c result, abserr, neval, last are set to -c zero. except when leniw, maxp1 or lenw are -c invalid, work(limit*2+1), work(limit*3+1), -c iwork(1), iwork(limit+1) are set to zero, -c work(1) is set to a and work(limit+1) to -c b. -c -c dimensioning parameters -c leniw - integer -c dimensioning parameter for iwork. -c leniw/2 equals the maximum number of subintervals -c allowed in the partition of the given integration -c interval (a,b), leniw.ge.2. -c if leniw.lt.2, the routine will end with ier = 6. -c -c maxp1 - integer -c gives an upper bound on the number of chebyshev -c moments which can be stored, i.e. for the -c intervals of lengths abs(b-a)*2**(-l), -c l=0,1, ..., maxp1-2, maxp1.ge.1 -c if maxp1.lt.1, the routine will end with ier = 6. -c -c lenw - integer -c dimensioning parameter for work -c lenw must be at least leniw*2+maxp1*25. -c if lenw.lt.(leniw*2+maxp1*25), the routine will -c end with ier = 6. -c -c last - integer -c on return, last equals the number of subintervals -c produced in the subdivision process, which -c determines the number of significant elements -c actually in the work arrays. -c -c work arrays -c iwork - integer -c vector of dimension at least leniw -c on return, the first k elements of which contain -c pointers to the error estimates over the -c subintervals, such that work(limit*3+iwork(1)), .. -c work(limit*3+iwork(k)) form a decreasing -c sequence, with limit = lenw/2 , and k = last -c if last.le.(limit/2+2), and k = limit+1-last -c otherwise. -c furthermore, iwork(limit+1), ..., iwork(limit+ -c last) indicate the subdivision levels of the -c subintervals, such that iwork(limit+i) = l means -c that the subinterval numbered i is of length -c abs(b-a)*2**(1-l). -c -c work - double precision -c vector of dimension at least lenw -c on return -c work(1), ..., work(last) contain the left -c end points of the subintervals in the -c partition of (a,b), -c work(limit+1), ..., work(limit+last) contain -c the right end points, -c work(limit*2+1), ..., work(limit*2+last) contain -c the integral approximations over the -c subintervals, -c work(limit*3+1), ..., work(limit*3+last) -c contain the error estimates. -c work(limit*4+1), ..., work(limit*4+maxp1*25) -c provide space for storing the chebyshev moments. -c note that limit = lenw/2. -c -c***references (none) -c***routines called dqawoe,xerror -c***end prologue dqawo -c - double precision a,abserr,b,epsabs,epsrel,f,omega,result,work - integer ier,integr,iwork,last,limit,lenw,leniw,lvl,l1,l2,l3,l4, - * maxp1,momcom,neval -c - dimension iwork(leniw),work(lenw) -c - external f -c -c check validity of leniw, maxp1 and lenw. -c -c***first executable statement dqawo - ier = 6 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(leniw.lt.2.or.maxp1.lt.1.or.lenw.lt.(leniw*2+maxp1*25)) - * go to 10 -c -c prepare call for dqawoe -c - limit = leniw/2 - l1 = limit+1 - l2 = limit+l1 - l3 = limit+l2 - l4 = limit+l3 - call dqawoe(f,a,b,omega,integr,epsabs,epsrel,limit,1,maxp1,result, - * abserr,neval,ier,last,work(1),work(l1),work(l2),work(l3), - * iwork(1),iwork(l1),momcom,work(l4)) -c -c call error handler if necessary -c - lvl = 0 -10 if(ier.eq.6) lvl = 0 - if(ier.ne.0) call xerror('abnormal return from dqawo',26,ier,lvl) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqawoe.f b/scipy-0.10.1/scipy/integrate/quadpack/dqawoe.f deleted file mode 100644 index 6a8ad1d5df..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqawoe.f +++ /dev/null @@ -1,531 +0,0 @@ - subroutine dqawoe (f,a,b,omega,integr,epsabs,epsrel,limit,icall, - * maxp1,result,abserr,neval,ier,last,alist,blist,rlist,elist,iord, - * nnlog,momcom,chebmo) -c***begin prologue dqawoe -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1 -c***keywords automatic integrator, special-purpose, -c integrand with oscillatory cos or sin factor, -c clenshaw-curtis method, (end point) singularities, -c extrapolation, globally adaptive -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral -c i = integral of f(x)*w(x) over (a,b) -c where w(x) = cos(omega*x) or w(x)=sin(omega*x), -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)). -c***description -c -c computation of oscillatory integrals -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c omega - double precision -c parameter in the integrand weight function -c -c integr - integer -c indicates which of the weight functions is to be -c used -c integr = 1 w(x) = cos(omega*x) -c integr = 2 w(x) = sin(omega*x) -c if integr.ne.1 and integr.ne.2, the routine -c will end with ier = 6. -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c limit - integer -c gives an upper bound on the number of subdivisions -c in the partition of (a,b), limit.ge.1. -c -c icall - integer -c if dqawoe is to be used only once, icall must -c be set to 1. assume that during this call, the -c chebyshev moments (for clenshaw-curtis integration -c of degree 24) have been computed for intervals of -c lenghts (abs(b-a))*2**(-l), l=0,1,2,...momcom-1. -c if icall.gt.1 this means that dqawoe has been -c called twice or more on intervals of the same -c length abs(b-a). the chebyshev moments already -c computed are then re-used in subsequent calls. -c if icall.lt.1, the routine will end with ier = 6. -c -c maxp1 - integer -c gives an upper bound on the number of chebyshev -c moments which can be stored, i.e. for the -c intervals of lenghts abs(b-a)*2**(-l), -c l=0,1, ..., maxp1-2, maxp1.ge.1. -c if maxp1.lt.1, the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the -c requested accuracy has been achieved. -c - ier.gt.0 abnormal termination of the routine. -c the estimates for integral and error are -c less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value of -c limit (and taking according dimension -c adjustments into account). however, if -c this yields no improvement it is advised -c to analyze the integrand, in order to -c determine the integration difficulties. -c if the position of a local difficulty can -c be determined (e.g. singularity, -c discontinuity within the interval) one -c will probably gain from splitting up the -c interval at this point and calling the -c integrator on the subranges. if possible, -c an appropriate special-purpose integrator -c should be used which is designed for -c handling the type of difficulty involved. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c the error may be under-estimated. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 4 the algorithm does not converge. -c roundoff error is detected in the -c extrapolation table. -c it is presumed that the requested -c tolerance cannot be achieved due to -c roundoff in the extrapolation table, -c and that the returned result is the -c best which can be obtained. -c = 5 the integral is probably divergent, or -c slowly convergent. it must be noted that -c divergence can occur with any other value -c of ier.gt.0. -c = 6 the input is invalid, because -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c or (integr.ne.1 and integr.ne.2) or -c icall.lt.1 or maxp1.lt.1. -c result, abserr, neval, last, rlist(1), -c elist(1), iord(1) and nnlog(1) are set -c to zero. alist(1) and blist(1) are set -c to a and b respectively. -c -c last - integer -c on return, last equals the number of -c subintervals produces in the subdivision -c process, which determines the number of -c significant elements actually in the -c work arrays. -c alist - double precision -c vector of dimension at least limit, the first -c last elements of which are the left -c end points of the subintervals in the partition -c of the given integration range (a,b) -c -c blist - double precision -c vector of dimension at least limit, the first -c last elements of which are the right -c end points of the subintervals in the partition -c of the given integration range (a,b) -c -c rlist - double precision -c vector of dimension at least limit, the first -c last elements of which are the integral -c approximations on the subintervals -c -c elist - double precision -c vector of dimension at least limit, the first -c last elements of which are the moduli of the -c absolute error estimates on the subintervals -c -c iord - integer -c vector of dimension at least limit, the first k -c elements of which are pointers to the error -c estimates over the subintervals, -c such that elist(iord(1)), ..., -c elist(iord(k)) form a decreasing sequence, with -c k = last if last.le.(limit/2+2), and -c k = limit+1-last otherwise. -c -c nnlog - integer -c vector of dimension at least limit, containing the -c subdivision levels of the subintervals, i.e. -c iwork(i) = l means that the subinterval -c numbered i is of length abs(b-a)*2**(1-l) -c -c on entry and return -c momcom - integer -c indicating that the chebyshev moments -c have been computed for intervals of lengths -c (abs(b-a))*2**(-l), l=0,1,2, ..., momcom-1, -c momcom.lt.maxp1 -c -c chebmo - double precision -c array of dimension (maxp1,25) containing the -c chebyshev moments -c -c***references (none) -c***routines called d1mach,dqc25f,dqelg,dqpsrt -c***end prologue dqawoe -c - double precision a,abseps,abserr,alist,area,area1,area12,area2,a1, - * a2,b,blist,b1,b2,chebmo,correc,dabs,defab1,defab2,defabs,dmax1, - * domega,d1mach,dres,elist,epmach,epsabs,epsrel,erlarg,erlast, - * errbnd,errmax,error1,erro12,error2,errsum,ertest,f,oflow, - * omega,resabs,reseps,result,res3la,rlist,rlist2,small,uflow,width - integer icall,id,ier,ierro,integr,iord,iroff1,iroff2,iroff3, - * jupbnd,k,ksgn,ktmin,last,limit,maxerr,maxp1,momcom,nev,neval, - * nnlog,nres,nrmax,nrmom,numrl2 - logical extrap,noext,extall -c - dimension alist(limit),blist(limit),rlist(limit),elist(limit), - * iord(limit),rlist2(52),res3la(3),chebmo(maxp1,25),nnlog(limit) -c - external f -c -c the dimension of rlist2 is determined by the value of -c limexp in subroutine dqelg (rlist2 should be of -c dimension (limexp+2) at least). -c -c list of major variables -c ----------------------- -c -c alist - list of left end points of all subintervals -c considered up to now -c blist - list of right end points of all subintervals -c considered up to now -c rlist(i) - approximation to the integral over -c (alist(i),blist(i)) -c rlist2 - array of dimension at least limexp+2 -c containing the part of the epsilon table -c which is still needed for further computations -c elist(i) - error estimate applying to rlist(i) -c maxerr - pointer to the interval with largest -c error estimate -c errmax - elist(maxerr) -c erlast - error on the interval currently subdivided -c area - sum of the integrals over the subintervals -c errsum - sum of the errors over the subintervals -c errbnd - requested accuracy max(epsabs,epsrel* -c abs(result)) -c *****1 - variable for the left subinterval -c *****2 - variable for the right subinterval -c last - index for subdivision -c nres - number of calls to the extrapolation routine -c numrl2 - number of elements in rlist2. if an appropriate -c approximation to the compounded integral has -c been obtained it is put in rlist2(numrl2) after -c numrl2 has been increased by one -c small - length of the smallest interval considered -c up to now, multiplied by 1.5 -c erlarg - sum of the errors over the intervals larger -c than the smallest interval considered up to now -c extrap - logical variable denoting that the routine is -c attempting to perform extrapolation, i.e. before -c subdividing the smallest interval we try to -c decrease the value of erlarg -c noext - logical variable denoting that extrapolation -c is no longer allowed (true value) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c oflow is the largest positive magnitude. -c -c***first executable statement dqawoe - epmach = d1mach(4) -c -c test on validity of parameters -c ------------------------------ -c - ier = 0 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - alist(1) = a - blist(1) = b - rlist(1) = 0.0d+00 - elist(1) = 0.0d+00 - iord(1) = 0 - nnlog(1) = 0 - if((integr.ne.1.and.integr.ne.2).or.(epsabs.le.0.0d+00.and. - * epsrel.lt.dmax1(0.5d+02*epmach,0.5d-28)).or.icall.lt.1.or. - * maxp1.lt.1) ier = 6 - if(ier.eq.6) go to 999 -c -c first approximation to the integral -c ----------------------------------- -c - domega = dabs(omega) - nrmom = 0 - if (icall.gt.1) go to 5 - momcom = 0 - 5 call dqc25f(f,a,b,domega,integr,nrmom,maxp1,0,result,abserr, - * neval,defabs,resabs,momcom,chebmo) -c -c test on accuracy. -c - dres = dabs(result) - errbnd = dmax1(epsabs,epsrel*dres) - rlist(1) = result - elist(1) = abserr - iord(1) = 1 - if(abserr.le.0.1d+03*epmach*defabs.and.abserr.gt.errbnd) ier = 2 - if(limit.eq.1) ier = 1 - if(ier.ne.0.or.abserr.le.errbnd) go to 200 -c -c initializations -c --------------- -c - uflow = d1mach(1) - oflow = d1mach(2) - errmax = abserr - maxerr = 1 - area = result - errsum = abserr - abserr = oflow - nrmax = 1 - extrap = .false. - noext = .false. - ierro = 0 - iroff1 = 0 - iroff2 = 0 - iroff3 = 0 - ktmin = 0 - small = dabs(b-a)*0.75d+00 - nres = 0 - numrl2 = 0 - extall = .false. - if(0.5d+00*dabs(b-a)*domega.gt.0.2d+01) go to 10 - numrl2 = 1 - extall = .true. - rlist2(1) = result - 10 if(0.25d+00*dabs(b-a)*domega.le.0.2d+01) extall = .true. - ksgn = -1 - if(dres.ge.(0.1d+01-0.5d+02*epmach)*defabs) ksgn = 1 -c -c main do-loop -c ------------ -c - do 140 last = 2,limit -c -c bisect the subinterval with the nrmax-th largest -c error estimate. -c - nrmom = nnlog(maxerr)+1 - a1 = alist(maxerr) - b1 = 0.5d+00*(alist(maxerr)+blist(maxerr)) - a2 = b1 - b2 = blist(maxerr) - erlast = errmax - call dqc25f(f,a1,b1,domega,integr,nrmom,maxp1,0, - * area1,error1,nev,resabs,defab1,momcom,chebmo) - neval = neval+nev - call dqc25f(f,a2,b2,domega,integr,nrmom,maxp1,1, - * area2,error2,nev,resabs,defab2,momcom,chebmo) - neval = neval+nev -c -c improve previous approximations to integral -c and error and test for accuracy. -c - area12 = area1+area2 - erro12 = error1+error2 - errsum = errsum+erro12-errmax - area = area+area12-rlist(maxerr) - if(defab1.eq.error1.or.defab2.eq.error2) go to 25 - if(dabs(rlist(maxerr)-area12).gt.0.1d-04*dabs(area12) - * .or.erro12.lt.0.99d+00*errmax) go to 20 - if(extrap) iroff2 = iroff2+1 - if(.not.extrap) iroff1 = iroff1+1 - 20 if(last.gt.10.and.erro12.gt.errmax) iroff3 = iroff3+1 - 25 rlist(maxerr) = area1 - rlist(last) = area2 - nnlog(maxerr) = nrmom - nnlog(last) = nrmom - errbnd = dmax1(epsabs,epsrel*dabs(area)) -c -c test for roundoff error and eventually set error flag. -c - if(iroff1+iroff2.ge.10.or.iroff3.ge.20) ier = 2 - if(iroff2.ge.5) ierro = 3 -c -c set error flag in the case that the number of -c subintervals equals limit. -c - if(last.eq.limit) ier = 1 -c -c set error flag in the case of bad integrand behaviour -c at a point of the integration range. -c - if(dmax1(dabs(a1),dabs(b2)).le.(0.1d+01+0.1d+03*epmach) - * *(dabs(a2)+0.1d+04*uflow)) ier = 4 -c -c append the newly-created intervals to the list. -c - if(error2.gt.error1) go to 30 - alist(last) = a2 - blist(maxerr) = b1 - blist(last) = b2 - elist(maxerr) = error1 - elist(last) = error2 - go to 40 - 30 alist(maxerr) = a2 - alist(last) = a1 - blist(last) = b1 - rlist(maxerr) = area2 - rlist(last) = area1 - elist(maxerr) = error2 - elist(last) = error1 -c -c call subroutine dqpsrt to maintain the descending ordering -c in the list of error estimates and select the subinterval -c with nrmax-th largest error estimate (to bisected next). -c - 40 call dqpsrt(limit,last,maxerr,errmax,elist,iord,nrmax) -c ***jump out of do-loop - if(errsum.le.errbnd) go to 170 - if(ier.ne.0) go to 150 - if(last.eq.2.and.extall) go to 120 - if(noext) go to 140 - if(.not.extall) go to 50 - erlarg = erlarg-erlast - if(dabs(b1-a1).gt.small) erlarg = erlarg+erro12 - if(extrap) go to 70 -c -c test whether the interval to be bisected next is the -c smallest interval. -c - 50 width = dabs(blist(maxerr)-alist(maxerr)) - if(width.gt.small) go to 140 - if(extall) go to 60 -c -c test whether we can start with the extrapolation procedure -c (we do this if we integrate over the next interval with -c use of a gauss-kronrod rule - see subroutine dqc25f). -c - small = small*0.5d+00 - if(0.25d+00*width*domega.gt.0.2d+01) go to 140 - extall = .true. - go to 130 - 60 extrap = .true. - nrmax = 2 - 70 if(ierro.eq.3.or.erlarg.le.ertest) go to 90 -c -c the smallest interval has the largest error. -c before bisecting decrease the sum of the errors over -c the larger intervals (erlarg) and perform extrapolation. -c - jupbnd = last - if (last.gt.(limit/2+2)) jupbnd = limit+3-last - id = nrmax - do 80 k = id,jupbnd - maxerr = iord(nrmax) - errmax = elist(maxerr) - if(dabs(blist(maxerr)-alist(maxerr)).gt.small) go to 140 - nrmax = nrmax+1 - 80 continue -c -c perform extrapolation. -c - 90 numrl2 = numrl2+1 - rlist2(numrl2) = area - if(numrl2.lt.3) go to 110 - call dqelg(numrl2,rlist2,reseps,abseps,res3la,nres) - ktmin = ktmin+1 - if(ktmin.gt.5.and.abserr.lt.0.1d-02*errsum) ier = 5 - if(abseps.ge.abserr) go to 100 - ktmin = 0 - abserr = abseps - result = reseps - correc = erlarg - ertest = dmax1(epsabs,epsrel*dabs(reseps)) -c ***jump out of do-loop - if(abserr.le.ertest) go to 150 -c -c prepare bisection of the smallest interval. -c - 100 if(numrl2.eq.1) noext = .true. - if(ier.eq.5) go to 150 - 110 maxerr = iord(1) - errmax = elist(maxerr) - nrmax = 1 - extrap = .false. - small = small*0.5d+00 - erlarg = errsum - go to 140 - 120 small = small*0.5d+00 - numrl2 = numrl2+1 - rlist2(numrl2) = area - 130 ertest = errbnd - erlarg = errsum - 140 continue -c -c set the final result. -c --------------------- -c - 150 if(abserr.eq.oflow.or.nres.eq.0) go to 170 - if(ier+ierro.eq.0) go to 165 - if(ierro.eq.3) abserr = abserr+correc - if(ier.eq.0) ier = 3 - if(result.ne.0.0d+00.and.area.ne.0.0d+00) go to 160 - if(abserr.gt.errsum) go to 170 - if(area.eq.0.0d+00) go to 190 - go to 165 - 160 if(abserr/dabs(result).gt.errsum/dabs(area)) go to 170 -c -c test on divergence. -c - 165 if(ksgn.eq.(-1).and.dmax1(dabs(result),dabs(area)).le. - * defabs*0.1d-01) go to 190 - if(0.1d-01.gt.(result/area).or.(result/area).gt.0.1d+03 - * .or.errsum.ge.dabs(area)) ier = 6 - go to 190 -c -c compute global integral sum. -c - 170 result = 0.0d+00 - do 180 k=1,last - result = result+rlist(k) - 180 continue - abserr = errsum - 190 if (ier.gt.2) ier=ier-1 - 200 if (integr.eq.2.and.omega.lt.0.0d+00) result=-result - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqaws.f b/scipy-0.10.1/scipy/integrate/quadpack/dqaws.f deleted file mode 100644 index bbcb1f4046..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqaws.f +++ /dev/null @@ -1,200 +0,0 @@ - subroutine dqaws(f,a,b,alfa,beta,integr,epsabs,epsrel,result, - * abserr,neval,ier,limit,lenw,last,iwork,work) -c***begin prologue dqaws -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1 -c***keywords automatic integrator, special-purpose, -c algebraico-logarithmic end-point singularities, -c clenshaw-curtis, globally adaptive -c***author piessens,robert,appl. math. & progr. div. -k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i = integral of f*w over (a,b), -c (where w shows a singular behaviour at the end points -c see parameter integr). -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)). -c***description -c -c integration of functions having algebraico-logarithmic -c end point singularities -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration, b.gt.a -c if b.le.a, the routine will end with ier = 6. -c -c alfa - double precision -c parameter in the integrand function, alfa.gt.(-1) -c if alfa.le.(-1), the routine will end with -c ier = 6. -c -c beta - double precision -c parameter in the integrand function, beta.gt.(-1) -c if beta.le.(-1), the routine will end with -c ier = 6. -c -c integr - integer -c indicates which weight function is to be used -c = 1 (x-a)**alfa*(b-x)**beta -c = 2 (x-a)**alfa*(b-x)**beta*log(x-a) -c = 3 (x-a)**alfa*(b-x)**beta*log(b-x) -c = 4 (x-a)**alfa*(b-x)**beta*log(x-a)*log(b-x) -c if integr.lt.1 or integr.gt.4, the routine -c will end with ier = 6. -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine -c the estimates for the integral and error -c are less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c ier = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value of -c limit (and taking the according dimension -c adjustments into account). however, if -c this yields no improvement it is advised -c to analyze the integrand, in order to -c determine the integration difficulties -c which prevent the requested tolerance from -c being achieved. in case of a jump -c discontinuity or a local singularity -c of algebraico-logarithmic type at one or -c more interior points of the integration -c range, one should proceed by splitting up -c the interval at these points and calling -c the integrator on the subranges. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 6 the input is invalid, because -c b.le.a or alfa.le.(-1) or beta.le.(-1) or -c or integr.lt.1 or integr.gt.4 or -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28)) -c or limit.lt.2 or lenw.lt.limit*4. -c result, abserr, neval, last are set to -c zero. except when lenw or limit is invalid -c iwork(1), work(limit*2+1) and -c work(limit*3+1) are set to zero, work(1) -c is set to a and work(limit+1) to b. -c -c dimensioning parameters -c limit - integer -c dimensioning parameter for iwork -c limit determines the maximum number of -c subintervals in the partition of the given -c integration interval (a,b), limit.ge.2. -c if limit.lt.2, the routine will end with ier = 6. -c -c lenw - integer -c dimensioning parameter for work -c lenw must be at least limit*4. -c if lenw.lt.limit*4, the routine will end -c with ier = 6. -c -c last - integer -c on return, last equals the number of -c subintervals produced in the subdivision process, -c which determines the significant number of -c elements actually in the work arrays. -c -c work arrays -c iwork - integer -c vector of dimension limit, the first k -c elements of which contain pointers -c to the error estimates over the subintervals, -c such that work(limit*3+iwork(1)), ..., -c work(limit*3+iwork(k)) form a decreasing -c sequence with k = last if last.le.(limit/2+2), -c and k = limit+1-last otherwise -c -c work - double precision -c vector of dimension lenw -c on return -c work(1), ..., work(last) contain the left -c end points of the subintervals in the -c partition of (a,b), -c work(limit+1), ..., work(limit+last) contain -c the right end points, -c work(limit*2+1), ..., work(limit*2+last) -c contain the integral approximations over -c the subintervals, -c work(limit*3+1), ..., work(limit*3+last) -c contain the error estimates. -c -c***references (none) -c***routines called dqawse,xerror -c***end prologue dqaws -c - double precision a,abserr,alfa,b,beta,epsabs,epsrel,f,result,work - integer ier,integr,iwork,last,lenw,limit,lvl,l1,l2,l3,neval -c - dimension iwork(limit),work(lenw) -c - external f -c -c check validity of limit and lenw. -c -c***first executable statement dqaws - ier = 6 - neval = 0 - last = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(limit.lt.2.or.lenw.lt.limit*4) go to 10 -c -c prepare call for dqawse. -c - l1 = limit+1 - l2 = limit+l1 - l3 = limit+l2 -c - call dqawse(f,a,b,alfa,beta,integr,epsabs,epsrel,limit,result, - * abserr,neval,ier,work(1),work(l1),work(l2),work(l3),iwork,last) -c -c call error handler if necessary. -c - lvl = 0 -10 if(ier.eq.6) lvl = 1 - if(ier.ne.0) call xerror('abnormal return from dqaws',26,ier,lvl) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqawse.f b/scipy-0.10.1/scipy/integrate/quadpack/dqawse.f deleted file mode 100644 index e1da633e76..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqawse.f +++ /dev/null @@ -1,369 +0,0 @@ - subroutine dqawse(f,a,b,alfa,beta,integr,epsabs,epsrel,limit, - * result,abserr,neval,ier,alist,blist,rlist,elist,iord,last) -c***begin prologue dqawse -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1 -c***keywords automatic integrator, special-purpose, -c algebraico-logarithmic end point singularities, -c clenshaw-curtis method -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the routine calculates an approximation result to a given -c definite integral i = integral of f*w over (a,b), -c (where w shows a singular behaviour at the end points, -c see parameter integr). -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)). -c***description -c -c integration of functions having algebraico-logarithmic -c end point singularities -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration, b.gt.a -c if b.le.a, the routine will end with ier = 6. -c -c alfa - double precision -c parameter in the weight function, alfa.gt.(-1) -c if alfa.le.(-1), the routine will end with -c ier = 6. -c -c beta - double precision -c parameter in the weight function, beta.gt.(-1) -c if beta.le.(-1), the routine will end with -c ier = 6. -c -c integr - integer -c indicates which weight function is to be used -c = 1 (x-a)**alfa*(b-x)**beta -c = 2 (x-a)**alfa*(b-x)**beta*log(x-a) -c = 3 (x-a)**alfa*(b-x)**beta*log(b-x) -c = 4 (x-a)**alfa*(b-x)**beta*log(x-a)*log(b-x) -c if integr.lt.1 or integr.gt.4, the routine -c will end with ier = 6. -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c limit - integer -c gives an upper bound on the number of subintervals -c in the partition of (a,b), limit.ge.2 -c if limit.lt.2, the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - integer -c ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine -c the estimates for the integral and error -c are less reliable. it is assumed that the -c requested accuracy has not been achieved. -c error messages -c = 1 maximum number of subdivisions allowed -c has been achieved. one can allow more -c subdivisions by increasing the value of -c limit. however, if this yields no -c improvement, it is advised to analyze the -c integrand in order to determine the -c integration difficulties which prevent the -c requested tolerance from being achieved. -c in case of a jump discontinuity or a local -c singularity of algebraico-logarithmic type -c at one or more interior points of the -c integration range, one should proceed by -c splitting up the interval at these -c points and calling the integrator on the -c subranges. -c = 2 the occurrence of roundoff error is -c detected, which prevents the requested -c tolerance from being achieved. -c = 3 extremely bad integrand behaviour occurs -c at some points of the integration -c interval. -c = 6 the input is invalid, because -c b.le.a or alfa.le.(-1) or beta.le.(-1), or -c integr.lt.1 or integr.gt.4, or -c (epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c or limit.lt.2. -c result, abserr, neval, rlist(1), elist(1), -c iord(1) and last are set to zero. alist(1) -c and blist(1) are set to a and b -c respectively. -c -c alist - double precision -c vector of dimension at least limit, the first -c last elements of which are the left -c end points of the subintervals in the partition -c of the given integration range (a,b) -c -c blist - double precision -c vector of dimension at least limit, the first -c last elements of which are the right -c end points of the subintervals in the partition -c of the given integration range (a,b) -c -c rlist - double precision -c vector of dimension at least limit,the first -c last elements of which are the integral -c approximations on the subintervals -c -c elist - double precision -c vector of dimension at least limit, the first -c last elements of which are the moduli of the -c absolute error estimates on the subintervals -c -c iord - integer -c vector of dimension at least limit, the first k -c of which are pointers to the error -c estimates over the subintervals, so that -c elist(iord(1)), ..., elist(iord(k)) with k = last -c if last.le.(limit/2+2), and k = limit+1-last -c otherwise form a decreasing sequence -c -c last - integer -c number of subintervals actually produced in -c the subdivision process -c -c***references (none) -c***routines called d1mach,dqc25s,dqmomo,dqpsrt -c***end prologue dqawse -c - double precision a,abserr,alfa,alist,area,area1,area12,area2,a1, - * a2,b,beta,blist,b1,b2,centre,dabs,dmax1,d1mach,elist,epmach, - * epsabs,epsrel,errbnd,errmax,error1,erro12,error2,errsum,f, - * resas1,resas2,result,rg,rh,ri,rj,rlist,uflow - integer ier,integr,iord,iroff1,iroff2,k,last,limit,maxerr,nev, - * neval,nrmax -c - external f -c - dimension alist(limit),blist(limit),rlist(limit),elist(limit), - * iord(limit),ri(25),rj(25),rh(25),rg(25) -c -c list of major variables -c ----------------------- -c -c alist - list of left end points of all subintervals -c considered up to now -c blist - list of right end points of all subintervals -c considered up to now -c rlist(i) - approximation to the integral over -c (alist(i),blist(i)) -c elist(i) - error estimate applying to rlist(i) -c maxerr - pointer to the interval with largest -c error estimate -c errmax - elist(maxerr) -c area - sum of the integrals over the subintervals -c errsum - sum of the errors over the subintervals -c errbnd - requested accuracy max(epsabs,epsrel* -c abs(result)) -c *****1 - variable for the left subinterval -c *****2 - variable for the right subinterval -c last - index for subdivision -c -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqawse - epmach = d1mach(4) - uflow = d1mach(1) -c -c test on validity of parameters -c ------------------------------ -c - ier = 6 - neval = 0 - last = 0 - rlist(1) = 0.0d+00 - elist(1) = 0.0d+00 - iord(1) = 0 - result = 0.0d+00 - abserr = 0.0d+00 - if(b.le.a.or.(epsabs.eq.0.0d+00.and. - * epsrel.lt.dmax1(0.5d+02*epmach,0.5d-28)).or.alfa.le.(-0.1d+01). - * or.beta.le.(-0.1d+01).or.integr.lt.1.or.integr.gt.4.or. - * limit.lt.2) go to 999 - ier = 0 -c -c compute the modified chebyshev moments. -c - call dqmomo(alfa,beta,ri,rj,rg,rh,integr) -c -c integrate over the intervals (a,(a+b)/2) and ((a+b)/2,b). -c - centre = 0.5d+00*(b+a) - call dqc25s(f,a,b,a,centre,alfa,beta,ri,rj,rg,rh,area1, - * error1,resas1,integr,nev) - neval = nev - call dqc25s(f,a,b,centre,b,alfa,beta,ri,rj,rg,rh,area2, - * error2,resas2,integr,nev) - last = 2 - neval = neval+nev - result = area1+area2 - abserr = error1+error2 -c -c test on accuracy. -c - errbnd = dmax1(epsabs,epsrel*dabs(result)) -c -c initialization -c -------------- -c - if(error2.gt.error1) go to 10 - alist(1) = a - alist(2) = centre - blist(1) = centre - blist(2) = b - rlist(1) = area1 - rlist(2) = area2 - elist(1) = error1 - elist(2) = error2 - go to 20 - 10 alist(1) = centre - alist(2) = a - blist(1) = b - blist(2) = centre - rlist(1) = area2 - rlist(2) = area1 - elist(1) = error2 - elist(2) = error1 - 20 iord(1) = 1 - iord(2) = 2 - if(limit.eq.2) ier = 1 - if(abserr.le.errbnd.or.ier.eq.1) go to 999 - errmax = elist(1) - maxerr = 1 - nrmax = 1 - area = result - errsum = abserr - iroff1 = 0 - iroff2 = 0 -c -c main do-loop -c ------------ -c - do 60 last = 3,limit -c -c bisect the subinterval with largest error estimate. -c - a1 = alist(maxerr) - b1 = 0.5d+00*(alist(maxerr)+blist(maxerr)) - a2 = b1 - b2 = blist(maxerr) -c - call dqc25s(f,a,b,a1,b1,alfa,beta,ri,rj,rg,rh,area1, - * error1,resas1,integr,nev) - neval = neval+nev - call dqc25s(f,a,b,a2,b2,alfa,beta,ri,rj,rg,rh,area2, - * error2,resas2,integr,nev) - neval = neval+nev -c -c improve previous approximations integral and error -c and test for accuracy. -c - area12 = area1+area2 - erro12 = error1+error2 - errsum = errsum+erro12-errmax - area = area+area12-rlist(maxerr) - if(a.eq.a1.or.b.eq.b2) go to 30 - if(resas1.eq.error1.or.resas2.eq.error2) go to 30 -c -c test for roundoff error. -c - if(dabs(rlist(maxerr)-area12).lt.0.1d-04*dabs(area12) - * .and.erro12.ge.0.99d+00*errmax) iroff1 = iroff1+1 - if(last.gt.10.and.erro12.gt.errmax) iroff2 = iroff2+1 - 30 rlist(maxerr) = area1 - rlist(last) = area2 -c -c test on accuracy. -c - errbnd = dmax1(epsabs,epsrel*dabs(area)) - if(errsum.le.errbnd) go to 35 -c -c set error flag in the case that the number of interval -c bisections exceeds limit. -c - if(last.eq.limit) ier = 1 -c -c -c set error flag in the case of roundoff error. -c - if(iroff1.ge.6.or.iroff2.ge.20) ier = 2 -c -c set error flag in the case of bad integrand behaviour -c at interior points of integration range. -c - if(dmax1(dabs(a1),dabs(b2)).le.(0.1d+01+0.1d+03*epmach)* - * (dabs(a2)+0.1d+04*uflow)) ier = 3 -c -c append the newly-created intervals to the list. -c - 35 if(error2.gt.error1) go to 40 - alist(last) = a2 - blist(maxerr) = b1 - blist(last) = b2 - elist(maxerr) = error1 - elist(last) = error2 - go to 50 - 40 alist(maxerr) = a2 - alist(last) = a1 - blist(last) = b1 - rlist(maxerr) = area2 - rlist(last) = area1 - elist(maxerr) = error2 - elist(last) = error1 -c -c call subroutine dqpsrt to maintain the descending ordering -c in the list of error estimates and select the subinterval -c with largest error estimate (to be bisected next). -c - 50 call dqpsrt(limit,last,maxerr,errmax,elist,iord,nrmax) -c ***jump out of do-loop - if (ier.ne.0.or.errsum.le.errbnd) go to 70 - 60 continue -c -c compute final result. -c --------------------- -c - 70 result = 0.0d+00 - do 80 k=1,last - result = result+rlist(k) - 80 continue - abserr = errsum - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqc25c.f b/scipy-0.10.1/scipy/integrate/quadpack/dqc25c.f deleted file mode 100644 index a89f5ccb81..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqc25c.f +++ /dev/null @@ -1,161 +0,0 @@ - subroutine dqc25c(f,a,b,c,result,abserr,krul,neval) -c***begin prologue dqc25c -c***date written 810101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a2,j4 -c***keywords 25-point clenshaw-curtis integration -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose to compute i = integral of f*w over (a,b) with -c error estimate, where w(x) = 1/(x-c) -c***description -c -c integration rules for the computation of cauchy -c principal value integrals -c standard fortran subroutine -c double precision version -c -c parameters -c f - double precision -c function subprogram defining the integrand function -c f(x). the actual name for f needs to be declared -c e x t e r n a l in the driver program. -c -c a - double precision -c left end point of the integration interval -c -c b - double precision -c right end point of the integration interval, b.gt.a -c -c c - double precision -c parameter in the weight function -c -c result - double precision -c approximation to the integral -c result is computed by using a generalized -c clenshaw-curtis method if c lies within ten percent -c of the integration interval. in the other case the -c 15-point kronrod rule obtained by optimal addition -c of abscissae to the 7-point gauss rule, is applied. -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c krul - integer -c key which is decreased by 1 if the 15-point -c gauss-kronrod scheme has been used -c -c neval - integer -c number of integrand evaluations -c -c....................................................................... -c***references (none) -c***routines called dqcheb,dqk15w,dqwgtc -c***end prologue dqc25c -c - double precision a,abserr,ak22,amom0,amom1,amom2,b,c,cc,centr, - * cheb12,cheb24,dabs,dlog,dqwgtc,f,fval,hlgth,p2,p3,p4,resabs, - * resasc,result,res12,res24,u,x - integer i,isym,k,kp,krul,neval -c - dimension x(11),fval(25),cheb12(13),cheb24(25) -c - external f,dqwgtc -c -c the vector x contains the values cos(k*pi/24), -c k = 1, ..., 11, to be used for the chebyshev series -c expansion of f -c - data x(1) / 0.9914448613 7381041114 4557526928 563d0 / - data x(2) / 0.9659258262 8906828674 9743199728 897d0 / - data x(3) / 0.9238795325 1128675612 8183189396 788d0 / - data x(4) / 0.8660254037 8443864676 3723170752 936d0 / - data x(5) / 0.7933533402 9123516457 9776961501 299d0 / - data x(6) / 0.7071067811 8654752440 0844362104 849d0 / - data x(7) / 0.6087614290 0872063941 6097542898 164d0 / - data x(8) / 0.5000000000 0000000000 0000000000 000d0 / - data x(9) / 0.3826834323 6508977172 8459984030 399d0 / - data x(10) / 0.2588190451 0252076234 8898837624 048d0 / - data x(11) / 0.1305261922 2005159154 8406227895 489d0 / -c -c list of major variables -c ---------------------- -c fval - value of the function f at the points -c cos(k*pi/24), k = 0, ..., 24 -c cheb12 - chebyshev series expansion coefficients, -c for the function f, of degree 12 -c cheb24 - chebyshev series expansion coefficients, -c for the function f, of degree 24 -c res12 - approximation to the integral corresponding -c to the use of cheb12 -c res24 - approximation to the integral corresponding -c to the use of cheb24 -c dqwgtc - external function subprogram defining -c the weight function -c hlgth - half-length of the interval -c centr - mid point of the interval -c -c -c check the position of c. -c -c***first executable statement dqc25c - cc = (0.2d+01*c-b-a)/(b-a) - if(dabs(cc).lt.0.11d+01) go to 10 -c -c apply the 15-point gauss-kronrod scheme. -c - krul = krul-1 - call dqk15w(f,dqwgtc,c,p2,p3,p4,kp,a,b,result,abserr, - * resabs,resasc) - neval = 15 - if (resasc.eq.abserr) krul = krul+1 - go to 50 -c -c use the generalized clenshaw-curtis method. -c - 10 hlgth = 0.5d+00*(b-a) - centr = 0.5d+00*(b+a) - neval = 25 - fval(1) = 0.5d+00*f(hlgth+centr) - fval(13) = f(centr) - fval(25) = 0.5d+00*f(centr-hlgth) - do 20 i=2,12 - u = hlgth*x(i-1) - isym = 26-i - fval(i) = f(u+centr) - fval(isym) = f(centr-u) - 20 continue -c -c compute the chebyshev series expansion. -c - call dqcheb(x,fval,cheb12,cheb24) -c -c the modified chebyshev moments are computed by forward -c recursion, using amom0 and amom1 as starting values. -c - amom0 = dlog(dabs((0.1d+01-cc)/(0.1d+01+cc))) - amom1 = 0.2d+01+cc*amom0 - res12 = cheb12(1)*amom0+cheb12(2)*amom1 - res24 = cheb24(1)*amom0+cheb24(2)*amom1 - do 30 k=3,13 - amom2 = 0.2d+01*cc*amom1-amom0 - ak22 = (k-2)*(k-2) - if((k/2)*2.eq.k) amom2 = amom2-0.4d+01/(ak22-0.1d+01) - res12 = res12+cheb12(k)*amom2 - res24 = res24+cheb24(k)*amom2 - amom0 = amom1 - amom1 = amom2 - 30 continue - do 40 k=14,25 - amom2 = 0.2d+01*cc*amom1-amom0 - ak22 = (k-2)*(k-2) - if((k/2)*2.eq.k) amom2 = amom2-0.4d+01/(ak22-0.1d+01) - res24 = res24+cheb24(k)*amom2 - amom0 = amom1 - amom1 = amom2 - 40 continue - result = res24 - abserr = dabs(res24-res12) - 50 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqc25f.f b/scipy-0.10.1/scipy/integrate/quadpack/dqc25f.f deleted file mode 100644 index afca5b8c97..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqc25f.f +++ /dev/null @@ -1,353 +0,0 @@ - subroutine dqc25f(f,a,b,omega,integr,nrmom,maxp1,ksave,result, - * abserr,neval,resabs,resasc,momcom,chebmo) -c***begin prologue dqc25f -c***date written 810101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a2 -c***keywords integration rules for functions with cos or sin -c factor, clenshaw-curtis, gauss-kronrod -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose to compute the integral i=integral of f(x) over (a,b) -c where w(x) = cos(omega*x) or w(x)=sin(omega*x) and to -c compute j = integral of abs(f) over (a,b). for small value -c of omega or small intervals (a,b) the 15-point gauss-kronro -c rule is used. otherwise a generalized clenshaw-curtis -c method is used. -c***description -c -c integration rules for functions with cos or sin factor -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to -c be declared e x t e r n a l in the calling program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c omega - double precision -c parameter in the weight function -c -c integr - integer -c indicates which weight function is to be used -c integr = 1 w(x) = cos(omega*x) -c integr = 2 w(x) = sin(omega*x) -c -c nrmom - integer -c the length of interval (a,b) is equal to the length -c of the original integration interval divided by -c 2**nrmom (we suppose that the routine is used in an -c adaptive integration process, otherwise set -c nrmom = 0). nrmom must be zero at the first call. -c -c maxp1 - integer -c gives an upper bound on the number of chebyshev -c moments which can be stored, i.e. for the -c intervals of lengths abs(bb-aa)*2**(-l), -c l = 0,1,2, ..., maxp1-2. -c -c ksave - integer -c key which is one when the moments for the -c current interval have been computed -c -c on return -c result - double precision -c approximation to the integral i -c -c abserr - double precision -c estimate of the modulus of the absolute -c error, which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c resabs - double precision -c approximation to the integral j -c -c resasc - double precision -c approximation to the integral of abs(f-i/(b-a)) -c -c on entry and return -c momcom - integer -c for each interval length we need to compute the -c chebyshev moments. momcom counts the number of -c intervals for which these moments have already been -c computed. if nrmom.lt.momcom or ksave = 1, the -c chebyshev moments for the interval (a,b) have -c already been computed and stored, otherwise we -c compute them and we increase momcom. -c -c chebmo - double precision -c array of dimension at least (maxp1,25) containing -c the modified chebyshev moments for the first momcom -c momcom interval lengths -c -c ...................................................................... -c***references (none) -c***routines called d1mach,dgtsl,dqcheb,dqk15w,dqwgtf -c***end prologue dqc25f -c - double precision a,abserr,ac,an,an2,as,asap,ass,b,centr,chebmo, - * cheb12,cheb24,conc,cons,cospar,d,dabs,dcos,dsin,dqwgtf,d1, - * d1mach,d2,estc,ests,f,fval,hlgth,oflow,omega,parint,par2,par22, - * p2,p3,p4,resabs,resasc,resc12,resc24,ress12,ress24,result, - * sinpar,v,x - integer i,iers,integr,isym,j,k,ksave,m,momcom,neval,maxp1, - * noequ,noeq1,nrmom -c - dimension chebmo(maxp1,25),cheb12(13),cheb24(25),d(25),d1(25), - * d2(25),fval(25),v(28),x(11) -c - external f,dqwgtf -c -c the vector x contains the values cos(k*pi/24) -c k = 1, ...,11, to be used for the chebyshev expansion of f -c - data x(1) / 0.9914448613 7381041114 4557526928 563d0 / - data x(2) / 0.9659258262 8906828674 9743199728 897d0 / - data x(3) / 0.9238795325 1128675612 8183189396 788d0 / - data x(4) / 0.8660254037 8443864676 3723170752 936d0 / - data x(5) / 0.7933533402 9123516457 9776961501 299d0 / - data x(6) / 0.7071067811 8654752440 0844362104 849d0 / - data x(7) / 0.6087614290 0872063941 6097542898 164d0 / - data x(8) / 0.5000000000 0000000000 0000000000 000d0 / - data x(9) / 0.3826834323 6508977172 8459984030 399d0 / - data x(10) / 0.2588190451 0252076234 8898837624 048d0 / - data x(11) / 0.1305261922 2005159154 8406227895 489d0 / -c -c list of major variables -c ----------------------- -c -c centr - mid point of the integration interval -c hlgth - half-length of the integration interval -c fval - value of the function f at the points -c (b-a)*0.5*cos(k*pi/12) + (b+a)*0.5, k = 0, ..., 24 -c cheb12 - coefficients of the chebyshev series expansion -c of degree 12, for the function f, in the -c interval (a,b) -c cheb24 - coefficients of the chebyshev series expansion -c of degree 24, for the function f, in the -c interval (a,b) -c resc12 - approximation to the integral of -c cos(0.5*(b-a)*omega*x)*f(0.5*(b-a)*x+0.5*(b+a)) -c over (-1,+1), using the chebyshev series -c expansion of degree 12 -c resc24 - approximation to the same integral, using the -c chebyshev series expansion of degree 24 -c ress12 - the analogue of resc12 for the sine -c ress24 - the analogue of resc24 for the sine -c -c -c machine dependent constant -c -------------------------- -c -c oflow is the largest positive magnitude. -c -c***first executable statement dqc25f - oflow = d1mach(2) -c - centr = 0.5d+00*(b+a) - hlgth = 0.5d+00*(b-a) - parint = omega*hlgth -c -c compute the integral using the 15-point gauss-kronrod -c formula if the value of the parameter in the integrand -c is small. -c - if(dabs(parint).gt.0.2d+01) go to 10 - call dqk15w(f,dqwgtf,omega,p2,p3,p4,integr,a,b,result, - * abserr,resabs,resasc) - neval = 15 - go to 170 -c -c compute the integral using the generalized clenshaw- -c curtis method. -c - 10 conc = hlgth*dcos(centr*omega) - cons = hlgth*dsin(centr*omega) - resasc = oflow - neval = 25 -c -c check whether the chebyshev moments for this interval -c have already been computed. -c - if(nrmom.lt.momcom.or.ksave.eq.1) go to 120 -c -c compute a new set of chebyshev moments. -c - m = momcom+1 - par2 = parint*parint - par22 = par2+0.2d+01 - sinpar = dsin(parint) - cospar = dcos(parint) -c -c compute the chebyshev moments with respect to cosine. -c - v(1) = 0.2d+01*sinpar/parint - v(2) = (0.8d+01*cospar+(par2+par2-0.8d+01)*sinpar/parint)/par2 - v(3) = (0.32d+02*(par2-0.12d+02)*cospar+(0.2d+01* - * ((par2-0.80d+02)*par2+0.192d+03)*sinpar)/parint)/(par2*par2) - ac = 0.8d+01*cospar - as = 0.24d+02*parint*sinpar - if(dabs(parint).gt.0.24d+02) go to 30 -c -c compute the chebyshev moments as the solutions of a -c boundary value problem with 1 initial value (v(3)) and 1 -c end value (computed using an asymptotic formula). -c - noequ = 25 - noeq1 = noequ-1 - an = 0.6d+01 - do 20 k = 1,noeq1 - an2 = an*an - d(k) = -0.2d+01*(an2-0.4d+01)*(par22-an2-an2) - d2(k) = (an-0.1d+01)*(an-0.2d+01)*par2 - d1(k+1) = (an+0.3d+01)*(an+0.4d+01)*par2 - v(k+3) = as-(an2-0.4d+01)*ac - an = an+0.2d+01 - 20 continue - an2 = an*an - d(noequ) = -0.2d+01*(an2-0.4d+01)*(par22-an2-an2) - v(noequ+3) = as-(an2-0.4d+01)*ac - v(4) = v(4)-0.56d+02*par2*v(3) - ass = parint*sinpar - asap = (((((0.210d+03*par2-0.1d+01)*cospar-(0.105d+03*par2 - * -0.63d+02)*ass)/an2-(0.1d+01-0.15d+02*par2)*cospar - * +0.15d+02*ass)/an2-cospar+0.3d+01*ass)/an2-cospar)/an2 - v(noequ+3) = v(noequ+3)-0.2d+01*asap*par2*(an-0.1d+01)* - * (an-0.2d+01) -c -c solve the tridiagonal system by means of gaussian -c elimination with partial pivoting. -c -c*** call to dgtsl must be replaced by call to -c*** double precision version of linpack routine sgtsl -c - call dgtsl(noequ,d1,d,d2,v(4),iers) - go to 50 -c -c compute the chebyshev moments by means of forward -c recursion. -c - 30 an = 0.4d+01 - do 40 i = 4,13 - an2 = an*an - v(i) = ((an2-0.4d+01)*(0.2d+01*(par22-an2-an2)*v(i-1)-ac) - * +as-par2*(an+0.1d+01)*(an+0.2d+01)*v(i-2))/ - * (par2*(an-0.1d+01)*(an-0.2d+01)) - an = an+0.2d+01 - 40 continue - 50 do 60 j = 1,13 - chebmo(m,2*j-1) = v(j) - 60 continue -c -c compute the chebyshev moments with respect to sine. -c - v(1) = 0.2d+01*(sinpar-parint*cospar)/par2 - v(2) = (0.18d+02-0.48d+02/par2)*sinpar/par2 - * +(-0.2d+01+0.48d+02/par2)*cospar/parint - ac = -0.24d+02*parint*cospar - as = -0.8d+01*sinpar - if(dabs(parint).gt.0.24d+02) go to 80 -c -c compute the chebyshev moments as the solutions of a boundary -c value problem with 1 initial value (v(2)) and 1 end value -c (computed using an asymptotic formula). -c - an = 0.5d+01 - do 70 k = 1,noeq1 - an2 = an*an - d(k) = -0.2d+01*(an2-0.4d+01)*(par22-an2-an2) - d2(k) = (an-0.1d+01)*(an-0.2d+01)*par2 - d1(k+1) = (an+0.3d+01)*(an+0.4d+01)*par2 - v(k+2) = ac+(an2-0.4d+01)*as - an = an+0.2d+01 - 70 continue - an2 = an*an - d(noequ) = -0.2d+01*(an2-0.4d+01)*(par22-an2-an2) - v(noequ+2) = ac+(an2-0.4d+01)*as - v(3) = v(3)-0.42d+02*par2*v(2) - ass = parint*cospar - asap = (((((0.105d+03*par2-0.63d+02)*ass+(0.210d+03*par2 - * -0.1d+01)*sinpar)/an2+(0.15d+02*par2-0.1d+01)*sinpar- - * 0.15d+02*ass)/an2-0.3d+01*ass-sinpar)/an2-sinpar)/an2 - v(noequ+2) = v(noequ+2)-0.2d+01*asap*par2*(an-0.1d+01) - * *(an-0.2d+01) -c -c solve the tridiagonal system by means of gaussian -c elimination with partial pivoting. -c -c*** call to dgtsl must be replaced by call to -c*** double precision version of linpack routine sgtsl -c - call dgtsl(noequ,d1,d,d2,v(3),iers) - go to 100 -c -c compute the chebyshev moments by means of forward recursion. -c - 80 an = 0.3d+01 - do 90 i = 3,12 - an2 = an*an - v(i) = ((an2-0.4d+01)*(0.2d+01*(par22-an2-an2)*v(i-1)+as) - * +ac-par2*(an+0.1d+01)*(an+0.2d+01)*v(i-2)) - * /(par2*(an-0.1d+01)*(an-0.2d+01)) - an = an+0.2d+01 - 90 continue - 100 do 110 j = 1,12 - chebmo(m,2*j) = v(j) - 110 continue - 120 if (nrmom.lt.momcom) m = nrmom+1 - if (momcom.lt.(maxp1-1).and.nrmom.ge.momcom) momcom = momcom+1 -c -c compute the coefficients of the chebyshev expansions -c of degrees 12 and 24 of the function f. -c - fval(1) = 0.5d+00*f(centr+hlgth) - fval(13) = f(centr) - fval(25) = 0.5d+00*f(centr-hlgth) - do 130 i = 2,12 - isym = 26-i - fval(i) = f(hlgth*x(i-1)+centr) - fval(isym) = f(centr-hlgth*x(i-1)) - 130 continue - call dqcheb(x,fval,cheb12,cheb24) -c -c compute the integral and error estimates. -c - resc12 = cheb12(13)*chebmo(m,13) - ress12 = 0.0d+00 - k = 11 - do 140 j = 1,6 - resc12 = resc12+cheb12(k)*chebmo(m,k) - ress12 = ress12+cheb12(k+1)*chebmo(m,k+1) - k = k-2 - 140 continue - resc24 = cheb24(25)*chebmo(m,25) - ress24 = 0.0d+00 - resabs = dabs(cheb24(25)) - k = 23 - do 150 j = 1,12 - resc24 = resc24+cheb24(k)*chebmo(m,k) - ress24 = ress24+cheb24(k+1)*chebmo(m,k+1) - resabs = dabs(cheb24(k))+dabs(cheb24(k+1)) - k = k-2 - 150 continue - estc = dabs(resc24-resc12) - ests = dabs(ress24-ress12) - resabs = resabs*dabs(hlgth) - if(integr.eq.2) go to 160 - result = conc*resc24-cons*ress24 - abserr = dabs(conc*estc)+dabs(cons*ests) - go to 170 - 160 result = conc*ress24+cons*resc24 - abserr = dabs(conc*ests)+dabs(cons*estc) - 170 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqc25s.f b/scipy-0.10.1/scipy/integrate/quadpack/dqc25s.f deleted file mode 100644 index 0ca7b47f57..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqc25s.f +++ /dev/null @@ -1,337 +0,0 @@ - subroutine dqc25s(f,a,b,bl,br,alfa,beta,ri,rj,rg,rh,result, - * abserr,resasc,integr,nev) -c***begin prologue dqc25s -c***date written 810101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a2 -c***keywords 25-point clenshaw-curtis integration -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose to compute i = integral of f*w over (bl,br), with error -c estimate, where the weight function w has a singular -c behaviour of algebraico-logarithmic type at the points -c a and/or b. (bl,br) is a part of (a,b). -c***description -c -c integration rules for integrands having algebraico-logarithmic -c end point singularities -c standard fortran subroutine -c double precision version -c -c parameters -c f - double precision -c function subprogram defining the integrand -c f(x). the actual name for f needs to be declared -c e x t e r n a l in the driver program. -c -c a - double precision -c left end point of the original interval -c -c b - double precision -c right end point of the original interval, b.gt.a -c -c bl - double precision -c lower limit of integration, bl.ge.a -c -c br - double precision -c upper limit of integration, br.le.b -c -c alfa - double precision -c parameter in the weight function -c -c beta - double precision -c parameter in the weight function -c -c ri,rj,rg,rh - double precision -c modified chebyshev moments for the application -c of the generalized clenshaw-curtis -c method (computed in subroutine dqmomo) -c -c result - double precision -c approximation to the integral -c result is computed by using a generalized -c clenshaw-curtis method if b1 = a or br = b. -c in all other cases the 15-point kronrod -c rule is applied, obtained by optimal addition of -c abscissae to the 7-point gauss rule. -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c resasc - double precision -c approximation to the integral of abs(f*w-i/(b-a)) -c -c integr - integer -c which determines the weight function -c = 1 w(x) = (x-a)**alfa*(b-x)**beta -c = 2 w(x) = (x-a)**alfa*(b-x)**beta*log(x-a) -c = 3 w(x) = (x-a)**alfa*(b-x)**beta*log(b-x) -c = 4 w(x) = (x-a)**alfa*(b-x)**beta*log(x-a)* -c log(b-x) -c -c nev - integer -c number of integrand evaluations -c***references (none) -c***routines called dqcheb,dqk15w -c***end prologue dqc25s -c - double precision a,abserr,alfa,b,beta,bl,br,centr,cheb12,cheb24, - * dabs,dc,dlog,f,factor,fix,fval,hlgth,resabs,resasc,result,res12, - * res24,rg,rh,ri,rj,u,dqwgts,x - integer i,integr,isym,nev -c - dimension cheb12(13),cheb24(25),fval(25),rg(25),rh(25),ri(25), - * rj(25),x(11) -c - external f,dqwgts -c -c the vector x contains the values cos(k*pi/24) -c k = 1, ..., 11, to be used for the computation of the -c chebyshev series expansion of f. -c - data x(1) / 0.9914448613 7381041114 4557526928 563d0 / - data x(2) / 0.9659258262 8906828674 9743199728 897d0 / - data x(3) / 0.9238795325 1128675612 8183189396 788d0 / - data x(4) / 0.8660254037 8443864676 3723170752 936d0 / - data x(5) / 0.7933533402 9123516457 9776961501 299d0 / - data x(6) / 0.7071067811 8654752440 0844362104 849d0 / - data x(7) / 0.6087614290 0872063941 6097542898 164d0 / - data x(8) / 0.5000000000 0000000000 0000000000 000d0 / - data x(9) / 0.3826834323 6508977172 8459984030 399d0 / - data x(10) / 0.2588190451 0252076234 8898837624 048d0 / - data x(11) / 0.1305261922 2005159154 8406227895 489d0 / -c -c list of major variables -c ----------------------- -c -c fval - value of the function f at the points -c (br-bl)*0.5*cos(k*pi/24)+(br+bl)*0.5 -c k = 0, ..., 24 -c cheb12 - coefficients of the chebyshev series expansion -c of degree 12, for the function f, in the -c interval (bl,br) -c cheb24 - coefficients of the chebyshev series expansion -c of degree 24, for the function f, in the -c interval (bl,br) -c res12 - approximation to the integral obtained from cheb12 -c res24 - approximation to the integral obtained from cheb24 -c dqwgts - external function subprogram defining -c the four possible weight functions -c hlgth - half-length of the interval (bl,br) -c centr - mid point of the interval (bl,br) -c -c***first executable statement dqc25s - nev = 25 - if(bl.eq.a.and.(alfa.ne.0.0d+00.or.integr.eq.2.or.integr.eq.4)) - * go to 10 - if(br.eq.b.and.(beta.ne.0.0d+00.or.integr.eq.3.or.integr.eq.4)) - * go to 140 -c -c if a.gt.bl and b.lt.br, apply the 15-point gauss-kronrod -c scheme. -c -c - call dqk15w(f,dqwgts,a,b,alfa,beta,integr,bl,br, - * result,abserr,resabs,resasc) - nev = 15 - go to 270 -c -c this part of the program is executed only if a = bl. -c ---------------------------------------------------- -c -c compute the chebyshev series expansion of the -c following function -c f1 = (0.5*(b+b-br-a)-0.5*(br-a)*x)**beta -c *f(0.5*(br-a)*x+0.5*(br+a)) -c - 10 hlgth = 0.5d+00*(br-bl) - centr = 0.5d+00*(br+bl) - fix = b-centr - fval(1) = 0.5d+00*f(hlgth+centr)*(fix-hlgth)**beta - fval(13) = f(centr)*(fix**beta) - fval(25) = 0.5d+00*f(centr-hlgth)*(fix+hlgth)**beta - do 20 i=2,12 - u = hlgth*x(i-1) - isym = 26-i - fval(i) = f(u+centr)*(fix-u)**beta - fval(isym) = f(centr-u)*(fix+u)**beta - 20 continue - factor = hlgth**(alfa+0.1d+01) - result = 0.0d+00 - abserr = 0.0d+00 - res12 = 0.0d+00 - res24 = 0.0d+00 - if(integr.gt.2) go to 70 - call dqcheb(x,fval,cheb12,cheb24) -c -c integr = 1 (or 2) -c - do 30 i=1,13 - res12 = res12+cheb12(i)*ri(i) - res24 = res24+cheb24(i)*ri(i) - 30 continue - do 40 i=14,25 - res24 = res24+cheb24(i)*ri(i) - 40 continue - if(integr.eq.1) go to 130 -c -c integr = 2 -c - dc = dlog(br-bl) - result = res24*dc - abserr = dabs((res24-res12)*dc) - res12 = 0.0d+00 - res24 = 0.0d+00 - do 50 i=1,13 - res12 = res12+cheb12(i)*rg(i) - res24 = res12+cheb24(i)*rg(i) - 50 continue - do 60 i=14,25 - res24 = res24+cheb24(i)*rg(i) - 60 continue - go to 130 -c -c compute the chebyshev series expansion of the -c following function -c f4 = f1*log(0.5*(b+b-br-a)-0.5*(br-a)*x) -c - 70 fval(1) = fval(1)*dlog(fix-hlgth) - fval(13) = fval(13)*dlog(fix) - fval(25) = fval(25)*dlog(fix+hlgth) - do 80 i=2,12 - u = hlgth*x(i-1) - isym = 26-i - fval(i) = fval(i)*dlog(fix-u) - fval(isym) = fval(isym)*dlog(fix+u) - 80 continue - call dqcheb(x,fval,cheb12,cheb24) -c -c integr = 3 (or 4) -c - do 90 i=1,13 - res12 = res12+cheb12(i)*ri(i) - res24 = res24+cheb24(i)*ri(i) - 90 continue - do 100 i=14,25 - res24 = res24+cheb24(i)*ri(i) - 100 continue - if(integr.eq.3) go to 130 -c -c integr = 4 -c - dc = dlog(br-bl) - result = res24*dc - abserr = dabs((res24-res12)*dc) - res12 = 0.0d+00 - res24 = 0.0d+00 - do 110 i=1,13 - res12 = res12+cheb12(i)*rg(i) - res24 = res24+cheb24(i)*rg(i) - 110 continue - do 120 i=14,25 - res24 = res24+cheb24(i)*rg(i) - 120 continue - 130 result = (result+res24)*factor - abserr = (abserr+dabs(res24-res12))*factor - go to 270 -c -c this part of the program is executed only if b = br. -c ---------------------------------------------------- -c -c compute the chebyshev series expansion of the -c following function -c f2 = (0.5*(b+bl-a-a)+0.5*(b-bl)*x)**alfa -c *f(0.5*(b-bl)*x+0.5*(b+bl)) -c - 140 hlgth = 0.5d+00*(br-bl) - centr = 0.5d+00*(br+bl) - fix = centr-a - fval(1) = 0.5d+00*f(hlgth+centr)*(fix+hlgth)**alfa - fval(13) = f(centr)*(fix**alfa) - fval(25) = 0.5d+00*f(centr-hlgth)*(fix-hlgth)**alfa - do 150 i=2,12 - u = hlgth*x(i-1) - isym = 26-i - fval(i) = f(u+centr)*(fix+u)**alfa - fval(isym) = f(centr-u)*(fix-u)**alfa - 150 continue - factor = hlgth**(beta+0.1d+01) - result = 0.0d+00 - abserr = 0.0d+00 - res12 = 0.0d+00 - res24 = 0.0d+00 - if(integr.eq.2.or.integr.eq.4) go to 200 -c -c integr = 1 (or 3) -c - call dqcheb(x,fval,cheb12,cheb24) - do 160 i=1,13 - res12 = res12+cheb12(i)*rj(i) - res24 = res24+cheb24(i)*rj(i) - 160 continue - do 170 i=14,25 - res24 = res24+cheb24(i)*rj(i) - 170 continue - if(integr.eq.1) go to 260 -c -c integr = 3 -c - dc = dlog(br-bl) - result = res24*dc - abserr = dabs((res24-res12)*dc) - res12 = 0.0d+00 - res24 = 0.0d+00 - do 180 i=1,13 - res12 = res12+cheb12(i)*rh(i) - res24 = res24+cheb24(i)*rh(i) - 180 continue - do 190 i=14,25 - res24 = res24+cheb24(i)*rh(i) - 190 continue - go to 260 -c -c compute the chebyshev series expansion of the -c following function -c f3 = f2*log(0.5*(b-bl)*x+0.5*(b+bl-a-a)) -c - 200 fval(1) = fval(1)*dlog(hlgth+fix) - fval(13) = fval(13)*dlog(fix) - fval(25) = fval(25)*dlog(fix-hlgth) - do 210 i=2,12 - u = hlgth*x(i-1) - isym = 26-i - fval(i) = fval(i)*dlog(u+fix) - fval(isym) = fval(isym)*dlog(fix-u) - 210 continue - call dqcheb(x,fval,cheb12,cheb24) -c -c integr = 2 (or 4) -c - do 220 i=1,13 - res12 = res12+cheb12(i)*rj(i) - res24 = res24+cheb24(i)*rj(i) - 220 continue - do 230 i=14,25 - res24 = res24+cheb24(i)*rj(i) - 230 continue - if(integr.eq.2) go to 260 - dc = dlog(br-bl) - result = res24*dc - abserr = dabs((res24-res12)*dc) - res12 = 0.0d+00 - res24 = 0.0d+00 -c -c integr = 4 -c - do 240 i=1,13 - res12 = res12+cheb12(i)*rh(i) - res24 = res24+cheb24(i)*rh(i) - 240 continue - do 250 i=14,25 - res24 = res24+cheb24(i)*rh(i) - 250 continue - 260 result = (result+res24)*factor - abserr = (abserr+dabs(res24-res12))*factor - 270 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqcheb.f b/scipy-0.10.1/scipy/integrate/quadpack/dqcheb.f deleted file mode 100644 index ec85a104e9..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqcheb.f +++ /dev/null @@ -1,148 +0,0 @@ - subroutine dqcheb(x,fval,cheb12,cheb24) -c***begin prologue dqcheb -c***refer to dqc25c,dqc25f,dqc25s -c***routines called (none) -c***revision date 830518 (yymmdd) -c***keywords chebyshev series expansion, fast fourier transform -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose this routine computes the chebyshev series expansion -c of degrees 12 and 24 of a function using a -c fast fourier transform method -c f(x) = sum(k=1,..,13) (cheb12(k)*t(k-1,x)), -c f(x) = sum(k=1,..,25) (cheb24(k)*t(k-1,x)), -c where t(k,x) is the chebyshev polynomial of degree k. -c***description -c -c chebyshev series expansion -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c x - double precision -c vector of dimension 11 containing the -c values cos(k*pi/24), k = 1, ..., 11 -c -c fval - double precision -c vector of dimension 25 containing the -c function values at the points -c (b+a+(b-a)*cos(k*pi/24))/2, k = 0, ...,24, -c where (a,b) is the approximation interval. -c fval(1) and fval(25) are divided by two -c (these values are destroyed at output). -c -c on return -c cheb12 - double precision -c vector of dimension 13 containing the -c chebyshev coefficients for degree 12 -c -c cheb24 - double precision -c vector of dimension 25 containing the -c chebyshev coefficients for degree 24 -c -c***end prologue dqcheb -c - double precision alam,alam1,alam2,cheb12,cheb24,fval,part1,part2, - * part3,v,x - integer i,j -c - dimension cheb12(13),cheb24(25),fval(25),v(12),x(11) -c -c***first executable statement dqcheb - do 10 i=1,12 - j = 26-i - v(i) = fval(i)-fval(j) - fval(i) = fval(i)+fval(j) - 10 continue - alam1 = v(1)-v(9) - alam2 = x(6)*(v(3)-v(7)-v(11)) - cheb12(4) = alam1+alam2 - cheb12(10) = alam1-alam2 - alam1 = v(2)-v(8)-v(10) - alam2 = v(4)-v(6)-v(12) - alam = x(3)*alam1+x(9)*alam2 - cheb24(4) = cheb12(4)+alam - cheb24(22) = cheb12(4)-alam - alam = x(9)*alam1-x(3)*alam2 - cheb24(10) = cheb12(10)+alam - cheb24(16) = cheb12(10)-alam - part1 = x(4)*v(5) - part2 = x(8)*v(9) - part3 = x(6)*v(7) - alam1 = v(1)+part1+part2 - alam2 = x(2)*v(3)+part3+x(10)*v(11) - cheb12(2) = alam1+alam2 - cheb12(12) = alam1-alam2 - alam = x(1)*v(2)+x(3)*v(4)+x(5)*v(6)+x(7)*v(8) - * +x(9)*v(10)+x(11)*v(12) - cheb24(2) = cheb12(2)+alam - cheb24(24) = cheb12(2)-alam - alam = x(11)*v(2)-x(9)*v(4)+x(7)*v(6)-x(5)*v(8) - * +x(3)*v(10)-x(1)*v(12) - cheb24(12) = cheb12(12)+alam - cheb24(14) = cheb12(12)-alam - alam1 = v(1)-part1+part2 - alam2 = x(10)*v(3)-part3+x(2)*v(11) - cheb12(6) = alam1+alam2 - cheb12(8) = alam1-alam2 - alam = x(5)*v(2)-x(9)*v(4)-x(1)*v(6) - * -x(11)*v(8)+x(3)*v(10)+x(7)*v(12) - cheb24(6) = cheb12(6)+alam - cheb24(20) = cheb12(6)-alam - alam = x(7)*v(2)-x(3)*v(4)-x(11)*v(6)+x(1)*v(8) - * -x(9)*v(10)-x(5)*v(12) - cheb24(8) = cheb12(8)+alam - cheb24(18) = cheb12(8)-alam - do 20 i=1,6 - j = 14-i - v(i) = fval(i)-fval(j) - fval(i) = fval(i)+fval(j) - 20 continue - alam1 = v(1)+x(8)*v(5) - alam2 = x(4)*v(3) - cheb12(3) = alam1+alam2 - cheb12(11) = alam1-alam2 - cheb12(7) = v(1)-v(5) - alam = x(2)*v(2)+x(6)*v(4)+x(10)*v(6) - cheb24(3) = cheb12(3)+alam - cheb24(23) = cheb12(3)-alam - alam = x(6)*(v(2)-v(4)-v(6)) - cheb24(7) = cheb12(7)+alam - cheb24(19) = cheb12(7)-alam - alam = x(10)*v(2)-x(6)*v(4)+x(2)*v(6) - cheb24(11) = cheb12(11)+alam - cheb24(15) = cheb12(11)-alam - do 30 i=1,3 - j = 8-i - v(i) = fval(i)-fval(j) - fval(i) = fval(i)+fval(j) - 30 continue - cheb12(5) = v(1)+x(8)*v(3) - cheb12(9) = fval(1)-x(8)*fval(3) - alam = x(4)*v(2) - cheb24(5) = cheb12(5)+alam - cheb24(21) = cheb12(5)-alam - alam = x(8)*fval(2)-fval(4) - cheb24(9) = cheb12(9)+alam - cheb24(17) = cheb12(9)-alam - cheb12(1) = fval(1)+fval(3) - alam = fval(2)+fval(4) - cheb24(1) = cheb12(1)+alam - cheb24(25) = cheb12(1)-alam - cheb12(13) = v(1)-v(3) - cheb24(13) = cheb12(13) - alam = 0.1d+01/0.6d+01 - do 40 i=2,12 - cheb12(i) = cheb12(i)*alam - 40 continue - alam = 0.5d+00*alam - cheb12(1) = cheb12(1)*alam - cheb12(13) = cheb12(13)*alam - do 50 i=2,24 - cheb24(i) = cheb24(i)*alam - 50 continue - cheb24(1) = 0.5d+00*alam*cheb24(1) - cheb24(25) = 0.5d+00*alam*cheb24(25) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqelg.f b/scipy-0.10.1/scipy/integrate/quadpack/dqelg.f deleted file mode 100644 index 6266682abb..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqelg.f +++ /dev/null @@ -1,184 +0,0 @@ - subroutine dqelg(n,epstab,result,abserr,res3la,nres) -c***begin prologue dqelg -c***refer to dqagie,dqagoe,dqagpe,dqagse -c***routines called d1mach -c***revision date 830518 (yymmdd) -c***keywords epsilon algorithm, convergence acceleration, -c extrapolation -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math & progr. div. - k.u.leuven -c***purpose the routine determines the limit of a given sequence of -c approximations, by means of the epsilon algorithm of -c p.wynn. an estimate of the absolute error is also given. -c the condensed epsilon table is computed. only those -c elements needed for the computation of the next diagonal -c are preserved. -c***description -c -c epsilon algorithm -c standard fortran subroutine -c double precision version -c -c parameters -c n - integer -c epstab(n) contains the new element in the -c first column of the epsilon table. -c -c epstab - double precision -c vector of dimension 52 containing the elements -c of the two lower diagonals of the triangular -c epsilon table. the elements are numbered -c starting at the right-hand corner of the -c triangle. -c -c result - double precision -c resulting approximation to the integral -c -c abserr - double precision -c estimate of the absolute error computed from -c result and the 3 previous results -c -c res3la - double precision -c vector of dimension 3 containing the last 3 -c results -c -c nres - integer -c number of calls to the routine -c (should be zero at first call) -c -c***end prologue dqelg -c - double precision abserr,dabs,delta1,delta2,delta3,dmax1,d1mach, - * epmach,epsinf,epstab,error,err1,err2,err3,e0,e1,e1abs,e2,e3, - * oflow,res,result,res3la,ss,tol1,tol2,tol3 - integer i,ib,ib2,ie,indx,k1,k2,k3,limexp,n,newelm,nres,num - dimension epstab(52),res3la(3) -c -c list of major variables -c ----------------------- -c -c e0 - the 4 elements on which the computation of a new -c e1 element in the epsilon table is based -c e2 -c e3 e0 -c e3 e1 new -c e2 -c newelm - number of elements to be computed in the new -c diagonal -c error - error = abs(e1-e0)+abs(e2-e1)+abs(new-e2) -c result - the element in the new diagonal with least value -c of error -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c oflow is the largest positive magnitude. -c limexp is the maximum number of elements the epsilon -c table can contain. if this number is reached, the upper -c diagonal of the epsilon table is deleted. -c -c***first executable statement dqelg - epmach = d1mach(4) - oflow = d1mach(2) - nres = nres+1 - abserr = oflow - result = epstab(n) - if(n.lt.3) go to 100 - limexp = 50 - epstab(n+2) = epstab(n) - newelm = (n-1)/2 - epstab(n) = oflow - num = n - k1 = n - do 40 i = 1,newelm - k2 = k1-1 - k3 = k1-2 - res = epstab(k1+2) - e0 = epstab(k3) - e1 = epstab(k2) - e2 = res - e1abs = dabs(e1) - delta2 = e2-e1 - err2 = dabs(delta2) - tol2 = dmax1(dabs(e2),e1abs)*epmach - delta3 = e1-e0 - err3 = dabs(delta3) - tol3 = dmax1(e1abs,dabs(e0))*epmach - if(err2.gt.tol2.or.err3.gt.tol3) go to 10 -c -c if e0, e1 and e2 are equal to within machine -c accuracy, convergence is assumed. -c result = e2 -c abserr = abs(e1-e0)+abs(e2-e1) -c - result = res - abserr = err2+err3 -c ***jump out of do-loop - go to 100 - 10 e3 = epstab(k1) - epstab(k1) = e1 - delta1 = e1-e3 - err1 = dabs(delta1) - tol1 = dmax1(e1abs,dabs(e3))*epmach -c -c if two elements are very close to each other, omit -c a part of the table by adjusting the value of n -c - if(err1.le.tol1.or.err2.le.tol2.or.err3.le.tol3) go to 20 - ss = 0.1d+01/delta1+0.1d+01/delta2-0.1d+01/delta3 - epsinf = dabs(ss*e1) -c -c test to detect irregular behaviour in the table, and -c eventually omit a part of the table adjusting the value -c of n. -c - if(epsinf.gt.0.1d-03) go to 30 - 20 n = i+i-1 -c ***jump out of do-loop - go to 50 -c -c compute a new element and eventually adjust -c the value of result. -c - 30 res = e1+0.1d+01/ss - epstab(k1) = res - k1 = k1-2 - error = err2+dabs(res-e2)+err3 - if(error.gt.abserr) go to 40 - abserr = error - result = res - 40 continue -c -c shift the table. -c - 50 if(n.eq.limexp) n = 2*(limexp/2)-1 - ib = 1 - if((num/2)*2.eq.num) ib = 2 - ie = newelm+1 - do 60 i=1,ie - ib2 = ib+2 - epstab(ib) = epstab(ib2) - ib = ib2 - 60 continue - if(num.eq.n) go to 80 - indx = num-n+1 - do 70 i = 1,n - epstab(i)= epstab(indx) - indx = indx+1 - 70 continue - 80 if(nres.ge.4) go to 90 - res3la(nres) = result - abserr = oflow - go to 100 -c -c compute error estimate -c - 90 abserr = dabs(result-res3la(3))+dabs(result-res3la(2)) - * +dabs(result-res3la(1)) - res3la(1) = res3la(2) - res3la(2) = res3la(3) - res3la(3) = result - 100 abserr = dmax1(abserr,0.5d+01*epmach*dabs(result)) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqk15.f b/scipy-0.10.1/scipy/integrate/quadpack/dqk15.f deleted file mode 100644 index 1a7ed6c866..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqk15.f +++ /dev/null @@ -1,174 +0,0 @@ - subroutine dqk15(f,a,b,result,abserr,resabs,resasc) -c***begin prologue dqk15 -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a2 -c***keywords 15-point gauss-kronrod rules -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div - k.u.leuven -c***purpose to compute i = integral of f over (a,b), with error -c estimate -c j = integral of abs(f) over (a,b) -c***description -c -c integration rules -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the calling program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c on return -c result - double precision -c approximation to the integral i -c result is computed by applying the 15-point -c kronrod rule (resk) obtained by optimal addition -c of abscissae to the7-point gauss rule(resg). -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should not exceed abs(i-result) -c -c resabs - double precision -c approximation to the integral j -c -c resasc - double precision -c approximation to the integral of abs(f-i/(b-a)) -c over (a,b) -c -c***references (none) -c***routines called d1mach -c***end prologue dqk15 -c - double precision a,absc,abserr,b,centr,dabs,dhlgth,dmax1,dmin1, - * d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth,resabs,resasc, - * resg,resk,reskh,result,uflow,wg,wgk,xgk - integer j,jtw,jtwm1 - external f -c - dimension fv1(7),fv2(7),wg(4),wgk(8),xgk(8) -c -c the abscissae and weights are given for the interval (-1,1). -c because of symmetry only the positive abscissae and their -c corresponding weights are given. -c -c xgk - abscissae of the 15-point kronrod rule -c xgk(2), xgk(4), ... abscissae of the 7-point -c gauss rule -c xgk(1), xgk(3), ... abscissae which are optimally -c added to the 7-point gauss rule -c -c wgk - weights of the 15-point kronrod rule -c -c wg - weights of the 7-point gauss rule -c -c -c gauss quadrature weights and kronron quadrature abscissae and weights -c as evaluated with 80 decimal digit arithmetic by l. w. fullerton, -c bell labs, nov. 1981. -c - data wg ( 1) / 0.1294849661 6886969327 0611432679 082 d0 / - data wg ( 2) / 0.2797053914 8927666790 1467771423 780 d0 / - data wg ( 3) / 0.3818300505 0511894495 0369775488 975 d0 / - data wg ( 4) / 0.4179591836 7346938775 5102040816 327 d0 / -c - data xgk ( 1) / 0.9914553711 2081263920 6854697526 329 d0 / - data xgk ( 2) / 0.9491079123 4275852452 6189684047 851 d0 / - data xgk ( 3) / 0.8648644233 5976907278 9712788640 926 d0 / - data xgk ( 4) / 0.7415311855 9939443986 3864773280 788 d0 / - data xgk ( 5) / 0.5860872354 6769113029 4144838258 730 d0 / - data xgk ( 6) / 0.4058451513 7739716690 6606412076 961 d0 / - data xgk ( 7) / 0.2077849550 0789846760 0689403773 245 d0 / - data xgk ( 8) / 0.0000000000 0000000000 0000000000 000 d0 / -c - data wgk ( 1) / 0.0229353220 1052922496 3732008058 970 d0 / - data wgk ( 2) / 0.0630920926 2997855329 0700663189 204 d0 / - data wgk ( 3) / 0.1047900103 2225018383 9876322541 518 d0 / - data wgk ( 4) / 0.1406532597 1552591874 5189590510 238 d0 / - data wgk ( 5) / 0.1690047266 3926790282 6583426598 550 d0 / - data wgk ( 6) / 0.1903505780 6478540991 3256402421 014 d0 / - data wgk ( 7) / 0.2044329400 7529889241 4161999234 649 d0 / - data wgk ( 8) / 0.2094821410 8472782801 2999174891 714 d0 / -c -c -c list of major variables -c ----------------------- -c -c centr - mid point of the interval -c hlgth - half-length of the interval -c absc - abscissa -c fval* - function value -c resg - result of the 7-point gauss formula -c resk - result of the 15-point kronrod formula -c reskh - approximation to the mean value of f over (a,b), -c i.e. to i/(b-a) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqk15 - epmach = d1mach(4) - uflow = d1mach(1) -c - centr = 0.5d+00*(a+b) - hlgth = 0.5d+00*(b-a) - dhlgth = dabs(hlgth) -c -c compute the 15-point kronrod approximation to -c the integral, and estimate the absolute error. -c - fc = f(centr) - resg = fc*wg(4) - resk = fc*wgk(8) - resabs = dabs(resk) - do 10 j=1,3 - jtw = j*2 - absc = hlgth*xgk(jtw) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtw) = fval1 - fv2(jtw) = fval2 - fsum = fval1+fval2 - resg = resg+wg(j)*fsum - resk = resk+wgk(jtw)*fsum - resabs = resabs+wgk(jtw)*(dabs(fval1)+dabs(fval2)) - 10 continue - do 15 j = 1,4 - jtwm1 = j*2-1 - absc = hlgth*xgk(jtwm1) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtwm1) = fval1 - fv2(jtwm1) = fval2 - fsum = fval1+fval2 - resk = resk+wgk(jtwm1)*fsum - resabs = resabs+wgk(jtwm1)*(dabs(fval1)+dabs(fval2)) - 15 continue - reskh = resk*0.5d+00 - resasc = wgk(8)*dabs(fc-reskh) - do 20 j=1,7 - resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh)) - 20 continue - result = resk*hlgth - resabs = resabs*dhlgth - resasc = resasc*dhlgth - abserr = dabs((resk-resg)*hlgth) - if(resasc.ne.0.0d+00.and.abserr.ne.0.0d+00) - * abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1 - * ((epmach*0.5d+02)*resabs,abserr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqk15i.f b/scipy-0.10.1/scipy/integrate/quadpack/dqk15i.f deleted file mode 100644 index 22f8be2a25..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqk15i.f +++ /dev/null @@ -1,195 +0,0 @@ - subroutine dqk15i(f,boun,inf,a,b,result,abserr,resabs,resasc) -c***begin prologue dqk15i -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a3a2,h2a4a2 -c***keywords 15-point transformed gauss-kronrod rules -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose the original (infinite integration range is mapped -c onto the interval (0,1) and (a,b) is a part of (0,1). -c it is the purpose to compute -c i = integral of transformed integrand over (a,b), -c j = integral of abs(transformed integrand) over (a,b). -c***description -c -c integration rule -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c fuction subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the calling program. -c -c boun - double precision -c finite bound of original integration -c range (set to zero if inf = +2) -c -c inf - integer -c if inf = -1, the original interval is -c (-infinity,bound), -c if inf = +1, the original interval is -c (bound,+infinity), -c if inf = +2, the original interval is -c (-infinity,+infinity) and -c the integral is computed as the sum of two -c integrals, one over (-infinity,0) and one over -c (0,+infinity). -c -c a - double precision -c lower limit for integration over subrange -c of (0,1) -c -c b - double precision -c upper limit for integration over subrange -c of (0,1) -c -c on return -c result - double precision -c approximation to the integral i -c result is computed by applying the 15-point -c kronrod rule(resk) obtained by optimal addition -c of abscissae to the 7-point gauss rule(resg). -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c resabs - double precision -c approximation to the integral j -c -c resasc - double precision -c approximation to the integral of -c abs((transformed integrand)-i/(b-a)) over (a,b) -c -c***references (none) -c***routines called d1mach -c***end prologue dqk15i -c - double precision a,absc,absc1,absc2,abserr,b,boun,centr,dabs,dinf, - * dmax1,dmin1,d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth, - * resabs,resasc,resg,resk,reskh,result,tabsc1,tabsc2,uflow,wg,wgk, - * xgk - integer inf,j - external f -c - dimension fv1(7),fv2(7),xgk(8),wgk(8),wg(8) -c -c the abscissae and weights are supplied for the interval -c (-1,1). because of symmetry only the positive abscissae and -c their corresponding weights are given. -c -c xgk - abscissae of the 15-point kronrod rule -c xgk(2), xgk(4), ... abscissae of the 7-point -c gauss rule -c xgk(1), xgk(3), ... abscissae which are optimally -c added to the 7-point gauss rule -c -c wgk - weights of the 15-point kronrod rule -c -c wg - weights of the 7-point gauss rule, corresponding -c to the abscissae xgk(2), xgk(4), ... -c wg(1), wg(3), ... are set to zero. -c - data wg(1) / 0.0d0 / - data wg(2) / 0.1294849661 6886969327 0611432679 082d0 / - data wg(3) / 0.0d0 / - data wg(4) / 0.2797053914 8927666790 1467771423 780d0 / - data wg(5) / 0.0d0 / - data wg(6) / 0.3818300505 0511894495 0369775488 975d0 / - data wg(7) / 0.0d0 / - data wg(8) / 0.4179591836 7346938775 5102040816 327d0 / -c - data xgk(1) / 0.9914553711 2081263920 6854697526 329d0 / - data xgk(2) / 0.9491079123 4275852452 6189684047 851d0 / - data xgk(3) / 0.8648644233 5976907278 9712788640 926d0 / - data xgk(4) / 0.7415311855 9939443986 3864773280 788d0 / - data xgk(5) / 0.5860872354 6769113029 4144838258 730d0 / - data xgk(6) / 0.4058451513 7739716690 6606412076 961d0 / - data xgk(7) / 0.2077849550 0789846760 0689403773 245d0 / - data xgk(8) / 0.0000000000 0000000000 0000000000 000d0 / -c - data wgk(1) / 0.0229353220 1052922496 3732008058 970d0 / - data wgk(2) / 0.0630920926 2997855329 0700663189 204d0 / - data wgk(3) / 0.1047900103 2225018383 9876322541 518d0 / - data wgk(4) / 0.1406532597 1552591874 5189590510 238d0 / - data wgk(5) / 0.1690047266 3926790282 6583426598 550d0 / - data wgk(6) / 0.1903505780 6478540991 3256402421 014d0 / - data wgk(7) / 0.2044329400 7529889241 4161999234 649d0 / - data wgk(8) / 0.2094821410 8472782801 2999174891 714d0 / -c -c -c list of major variables -c ----------------------- -c -c centr - mid point of the interval -c hlgth - half-length of the interval -c absc* - abscissa -c tabsc* - transformed abscissa -c fval* - function value -c resg - result of the 7-point gauss formula -c resk - result of the 15-point kronrod formula -c reskh - approximation to the mean value of the transformed -c integrand over (a,b), i.e. to i/(b-a) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqk15i - epmach = d1mach(4) - uflow = d1mach(1) - dinf = min0(1,inf) -c - centr = 0.5d+00*(a+b) - hlgth = 0.5d+00*(b-a) - tabsc1 = boun+dinf*(0.1d+01-centr)/centr - fval1 = f(tabsc1) - if(inf.eq.2) fval1 = fval1+f(-tabsc1) - fc = (fval1/centr)/centr -c -c compute the 15-point kronrod approximation to -c the integral, and estimate the error. -c - resg = wg(8)*fc - resk = wgk(8)*fc - resabs = dabs(resk) - do 10 j=1,7 - absc = hlgth*xgk(j) - absc1 = centr-absc - absc2 = centr+absc - tabsc1 = boun+dinf*(0.1d+01-absc1)/absc1 - tabsc2 = boun+dinf*(0.1d+01-absc2)/absc2 - fval1 = f(tabsc1) - fval2 = f(tabsc2) - if(inf.eq.2) fval1 = fval1+f(-tabsc1) - if(inf.eq.2) fval2 = fval2+f(-tabsc2) - fval1 = (fval1/absc1)/absc1 - fval2 = (fval2/absc2)/absc2 - fv1(j) = fval1 - fv2(j) = fval2 - fsum = fval1+fval2 - resg = resg+wg(j)*fsum - resk = resk+wgk(j)*fsum - resabs = resabs+wgk(j)*(dabs(fval1)+dabs(fval2)) - 10 continue - reskh = resk*0.5d+00 - resasc = wgk(8)*dabs(fc-reskh) - do 20 j=1,7 - resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh)) - 20 continue - result = resk*hlgth - resasc = resasc*hlgth - resabs = resabs*hlgth - abserr = dabs((resk-resg)*hlgth) - if(resasc.ne.0.0d+00.and.abserr.ne.0.d0) abserr = resasc* - * dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1 - * ((epmach*0.5d+02)*resabs,abserr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqk15w.f b/scipy-0.10.1/scipy/integrate/quadpack/dqk15w.f deleted file mode 100644 index d21044e73d..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqk15w.f +++ /dev/null @@ -1,180 +0,0 @@ - subroutine dqk15w(f,w,p1,p2,p3,p4,kp,a,b,result,abserr, - * resabs,resasc) -c***begin prologue dqk15w -c***date written 810101 (yymmdd) -c***revision date 830518 (mmddyy) -c***category no. h2a2a2 -c***keywords 15-point gauss-kronrod rules -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose to compute i = integral of f*w over (a,b), with error -c estimate -c j = integral of abs(f*w) over (a,b) -c***description -c -c integration rules -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c w - double precision -c function subprogram defining the integrand -c weight function w(x). the actual name for w -c needs to be declared e x t e r n a l in the -c calling program. -c -c p1, p2, p3, p4 - double precision -c parameters in the weight function -c -c kp - integer -c key for indicating the type of weight function -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c on return -c result - double precision -c approximation to the integral i -c result is computed by applying the 15-point -c kronrod rule (resk) obtained by optimal addition -c of abscissae to the 7-point gauss rule (resg). -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c resabs - double precision -c approximation to the integral of abs(f) -c -c resasc - double precision -c approximation to the integral of abs(f-i/(b-a)) -c -c -c***references (none) -c***routines called d1mach -c***end prologue dqk15w -c - double precision a,absc,absc1,absc2,abserr,b,centr,dabs,dhlgth, - * dmax1,dmin1,d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth, - * p1,p2,p3,p4,resabs,resasc,resg,resk,reskh,result,uflow,w,wg,wgk, - * xgk - integer j,jtw,jtwm1,kp - external f,w -c - dimension fv1(7),fv2(7),xgk(8),wgk(8),wg(4) -c -c the abscissae and weights are given for the interval (-1,1). -c because of symmetry only the positive abscissae and their -c corresponding weights are given. -c -c xgk - abscissae of the 15-point gauss-kronrod rule -c xgk(2), xgk(4), ... abscissae of the 7-point -c gauss rule -c xgk(1), xgk(3), ... abscissae which are optimally -c added to the 7-point gauss rule -c -c wgk - weights of the 15-point gauss-kronrod rule -c -c wg - weights of the 7-point gauss rule -c - data xgk(1),xgk(2),xgk(3),xgk(4),xgk(5),xgk(6),xgk(7),xgk(8)/ - * 0.9914553711208126d+00, 0.9491079123427585d+00, - * 0.8648644233597691d+00, 0.7415311855993944d+00, - * 0.5860872354676911d+00, 0.4058451513773972d+00, - * 0.2077849550078985d+00, 0.0000000000000000d+00/ -c - data wgk(1),wgk(2),wgk(3),wgk(4),wgk(5),wgk(6),wgk(7),wgk(8)/ - * 0.2293532201052922d-01, 0.6309209262997855d-01, - * 0.1047900103222502d+00, 0.1406532597155259d+00, - * 0.1690047266392679d+00, 0.1903505780647854d+00, - * 0.2044329400752989d+00, 0.2094821410847278d+00/ -c - data wg(1),wg(2),wg(3),wg(4)/ - * 0.1294849661688697d+00, 0.2797053914892767d+00, - * 0.3818300505051889d+00, 0.4179591836734694d+00/ -c -c -c list of major variables -c ----------------------- -c -c centr - mid point of the interval -c hlgth - half-length of the interval -c absc* - abscissa -c fval* - function value -c resg - result of the 7-point gauss formula -c resk - result of the 15-point kronrod formula -c reskh - approximation to the mean value of f*w over (a,b), -c i.e. to i/(b-a) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqk15w - epmach = d1mach(4) - uflow = d1mach(1) -c - centr = 0.5d+00*(a+b) - hlgth = 0.5d+00*(b-a) - dhlgth = dabs(hlgth) -c -c compute the 15-point kronrod approximation to the -c integral, and estimate the error. -c - fc = f(centr)*w(centr,p1,p2,p3,p4,kp) - resg = wg(4)*fc - resk = wgk(8)*fc - resabs = dabs(resk) - do 10 j=1,3 - jtw = j*2 - absc = hlgth*xgk(jtw) - absc1 = centr-absc - absc2 = centr+absc - fval1 = f(absc1)*w(absc1,p1,p2,p3,p4,kp) - fval2 = f(absc2)*w(absc2,p1,p2,p3,p4,kp) - fv1(jtw) = fval1 - fv2(jtw) = fval2 - fsum = fval1+fval2 - resg = resg+wg(j)*fsum - resk = resk+wgk(jtw)*fsum - resabs = resabs+wgk(jtw)*(dabs(fval1)+dabs(fval2)) - 10 continue - do 15 j=1,4 - jtwm1 = j*2-1 - absc = hlgth*xgk(jtwm1) - absc1 = centr-absc - absc2 = centr+absc - fval1 = f(absc1)*w(absc1,p1,p2,p3,p4,kp) - fval2 = f(absc2)*w(absc2,p1,p2,p3,p4,kp) - fv1(jtwm1) = fval1 - fv2(jtwm1) = fval2 - fsum = fval1+fval2 - resk = resk+wgk(jtwm1)*fsum - resabs = resabs+wgk(jtwm1)*(dabs(fval1)+dabs(fval2)) - 15 continue - reskh = resk*0.5d+00 - resasc = wgk(8)*dabs(fc-reskh) - do 20 j=1,7 - resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh)) - 20 continue - result = resk*hlgth - resabs = resabs*dhlgth - resasc = resasc*dhlgth - abserr = dabs((resk-resg)*hlgth) - if(resasc.ne.0.0d+00.and.abserr.ne.0.0d+00) - * abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1((epmach* - * 0.5d+02)*resabs,abserr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqk21.f b/scipy-0.10.1/scipy/integrate/quadpack/dqk21.f deleted file mode 100644 index 9ea1458a55..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqk21.f +++ /dev/null @@ -1,182 +0,0 @@ - subroutine dqk21(f,a,b,result,abserr,resabs,resasc) -c***begin prologue dqk21 -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a2 -c***keywords 21-point gauss-kronrod rules -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose to compute i = integral of f over (a,b), with error -c estimate -c j = integral of abs(f) over (a,b) -c***description -c -c integration rules -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c on return -c result - double precision -c approximation to the integral i -c result is computed by applying the 21-point -c kronrod rule (resk) obtained by optimal addition -c of abscissae to the 10-point gauss rule (resg). -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should not exceed abs(i-result) -c -c resabs - double precision -c approximation to the integral j -c -c resasc - double precision -c approximation to the integral of abs(f-i/(b-a)) -c over (a,b) -c -c***references (none) -c***routines called d1mach -c***end prologue dqk21 -c - double precision a,absc,abserr,b,centr,dabs,dhlgth,dmax1,dmin1, - * d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth,resabs,resasc, - * resg,resk,reskh,result,uflow,wg,wgk,xgk - integer j,jtw,jtwm1 - external f -c - dimension fv1(10),fv2(10),wg(5),wgk(11),xgk(11) -c -c the abscissae and weights are given for the interval (-1,1). -c because of symmetry only the positive abscissae and their -c corresponding weights are given. -c -c xgk - abscissae of the 21-point kronrod rule -c xgk(2), xgk(4), ... abscissae of the 10-point -c gauss rule -c xgk(1), xgk(3), ... abscissae which are optimally -c added to the 10-point gauss rule -c -c wgk - weights of the 21-point kronrod rule -c -c wg - weights of the 10-point gauss rule -c -c -c gauss quadrature weights and kronron quadrature abscissae and weights -c as evaluated with 80 decimal digit arithmetic by l. w. fullerton, -c bell labs, nov. 1981. -c - data wg ( 1) / 0.0666713443 0868813759 3568809893 332 d0 / - data wg ( 2) / 0.1494513491 5058059314 5776339657 697 d0 / - data wg ( 3) / 0.2190863625 1598204399 5534934228 163 d0 / - data wg ( 4) / 0.2692667193 0999635509 1226921569 469 d0 / - data wg ( 5) / 0.2955242247 1475287017 3892994651 338 d0 / -c - data xgk ( 1) / 0.9956571630 2580808073 5527280689 003 d0 / - data xgk ( 2) / 0.9739065285 1717172007 7964012084 452 d0 / - data xgk ( 3) / 0.9301574913 5570822600 1207180059 508 d0 / - data xgk ( 4) / 0.8650633666 8898451073 2096688423 493 d0 / - data xgk ( 5) / 0.7808177265 8641689706 3717578345 042 d0 / - data xgk ( 6) / 0.6794095682 9902440623 4327365114 874 d0 / - data xgk ( 7) / 0.5627571346 6860468333 9000099272 694 d0 / - data xgk ( 8) / 0.4333953941 2924719079 9265943165 784 d0 / - data xgk ( 9) / 0.2943928627 0146019813 1126603103 866 d0 / - data xgk ( 10) / 0.1488743389 8163121088 4826001129 720 d0 / - data xgk ( 11) / 0.0000000000 0000000000 0000000000 000 d0 / -c - data wgk ( 1) / 0.0116946388 6737187427 8064396062 192 d0 / - data wgk ( 2) / 0.0325581623 0796472747 8818972459 390 d0 / - data wgk ( 3) / 0.0547558965 7435199603 1381300244 580 d0 / - data wgk ( 4) / 0.0750396748 1091995276 7043140916 190 d0 / - data wgk ( 5) / 0.0931254545 8369760553 5065465083 366 d0 / - data wgk ( 6) / 0.1093871588 0229764189 9210590325 805 d0 / - data wgk ( 7) / 0.1234919762 6206585107 7958109831 074 d0 / - data wgk ( 8) / 0.1347092173 1147332592 8054001771 707 d0 / - data wgk ( 9) / 0.1427759385 7706008079 7094273138 717 d0 / - data wgk ( 10) / 0.1477391049 0133849137 4841515972 068 d0 / - data wgk ( 11) / 0.1494455540 0291690566 4936468389 821 d0 / -c -c -c list of major variables -c ----------------------- -c -c centr - mid point of the interval -c hlgth - half-length of the interval -c absc - abscissa -c fval* - function value -c resg - result of the 10-point gauss formula -c resk - result of the 21-point kronrod formula -c reskh - approximation to the mean value of f over (a,b), -c i.e. to i/(b-a) -c -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqk21 - epmach = d1mach(4) - uflow = d1mach(1) -c - centr = 0.5d+00*(a+b) - hlgth = 0.5d+00*(b-a) - dhlgth = dabs(hlgth) -c -c compute the 21-point kronrod approximation to -c the integral, and estimate the absolute error. -c - resg = 0.0d+00 - fc = f(centr) - resk = wgk(11)*fc - resabs = dabs(resk) - do 10 j=1,5 - jtw = 2*j - absc = hlgth*xgk(jtw) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtw) = fval1 - fv2(jtw) = fval2 - fsum = fval1+fval2 - resg = resg+wg(j)*fsum - resk = resk+wgk(jtw)*fsum - resabs = resabs+wgk(jtw)*(dabs(fval1)+dabs(fval2)) - 10 continue - do 15 j = 1,5 - jtwm1 = 2*j-1 - absc = hlgth*xgk(jtwm1) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtwm1) = fval1 - fv2(jtwm1) = fval2 - fsum = fval1+fval2 - resk = resk+wgk(jtwm1)*fsum - resabs = resabs+wgk(jtwm1)*(dabs(fval1)+dabs(fval2)) - 15 continue - reskh = resk*0.5d+00 - resasc = wgk(11)*dabs(fc-reskh) - do 20 j=1,10 - resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh)) - 20 continue - result = resk*hlgth - resabs = resabs*dhlgth - resasc = resasc*dhlgth - abserr = dabs((resk-resg)*hlgth) - if(resasc.ne.0.0d+00.and.abserr.ne.0.0d+00) - * abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1 - * ((epmach*0.5d+02)*resabs,abserr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqk31.f b/scipy-0.10.1/scipy/integrate/quadpack/dqk31.f deleted file mode 100644 index d5ec411ae5..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqk31.f +++ /dev/null @@ -1,191 +0,0 @@ - subroutine dqk31(f,a,b,result,abserr,resabs,resasc) -c***begin prologue dqk31 -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a2 -c***keywords 31-point gauss-kronrod rules -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose to compute i = integral of f over (a,b) with error -c estimate -c j = integral of abs(f) over (a,b) -c***description -c -c integration rules -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the calling program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c on return -c result - double precision -c approximation to the integral i -c result is computed by applying the 31-point -c gauss-kronrod rule (resk), obtained by optimal -c addition of abscissae to the 15-point gauss -c rule (resg). -c -c abserr - double precison -c estimate of the modulus of the modulus, -c which should not exceed abs(i-result) -c -c resabs - double precision -c approximation to the integral j -c -c resasc - double precision -c approximation to the integral of abs(f-i/(b-a)) -c over (a,b) -c -c***references (none) -c***routines called d1mach -c***end prologue dqk31 - double precision a,absc,abserr,b,centr,dabs,dhlgth,dmax1,dmin1, - * d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth,resabs,resasc, - * resg,resk,reskh,result,uflow,wg,wgk,xgk - integer j,jtw,jtwm1 - external f -c - dimension fv1(15),fv2(15),xgk(16),wgk(16),wg(8) -c -c the abscissae and weights are given for the interval (-1,1). -c because of symmetry only the positive abscissae and their -c corresponding weights are given. -c -c xgk - abscissae of the 31-point kronrod rule -c xgk(2), xgk(4), ... abscissae of the 15-point -c gauss rule -c xgk(1), xgk(3), ... abscissae which are optimally -c added to the 15-point gauss rule -c -c wgk - weights of the 31-point kronrod rule -c -c wg - weights of the 15-point gauss rule -c -c -c gauss quadrature weights and kronron quadrature abscissae and weights -c as evaluated with 80 decimal digit arithmetic by l. w. fullerton, -c bell labs, nov. 1981. -c - data wg ( 1) / 0.0307532419 9611726835 4628393577 204 d0 / - data wg ( 2) / 0.0703660474 8810812470 9267416450 667 d0 / - data wg ( 3) / 0.1071592204 6717193501 1869546685 869 d0 / - data wg ( 4) / 0.1395706779 2615431444 7804794511 028 d0 / - data wg ( 5) / 0.1662692058 1699393355 3200860481 209 d0 / - data wg ( 6) / 0.1861610000 1556221102 6800561866 423 d0 / - data wg ( 7) / 0.1984314853 2711157645 6118326443 839 d0 / - data wg ( 8) / 0.2025782419 2556127288 0620199967 519 d0 / -c - data xgk ( 1) / 0.9980022986 9339706028 5172840152 271 d0 / - data xgk ( 2) / 0.9879925180 2048542848 9565718586 613 d0 / - data xgk ( 3) / 0.9677390756 7913913425 7347978784 337 d0 / - data xgk ( 4) / 0.9372733924 0070590430 7758947710 209 d0 / - data xgk ( 5) / 0.8972645323 4408190088 2509656454 496 d0 / - data xgk ( 6) / 0.8482065834 1042721620 0648320774 217 d0 / - data xgk ( 7) / 0.7904185014 4246593296 7649294817 947 d0 / - data xgk ( 8) / 0.7244177313 6017004741 6186054613 938 d0 / - data xgk ( 9) / 0.6509967412 9741697053 3735895313 275 d0 / - data xgk ( 10) / 0.5709721726 0853884753 7226737253 911 d0 / - data xgk ( 11) / 0.4850818636 4023968069 3655740232 351 d0 / - data xgk ( 12) / 0.3941513470 7756336989 7207370981 045 d0 / - data xgk ( 13) / 0.2991800071 5316881216 6780024266 389 d0 / - data xgk ( 14) / 0.2011940939 9743452230 0628303394 596 d0 / - data xgk ( 15) / 0.1011420669 1871749902 7074231447 392 d0 / - data xgk ( 16) / 0.0000000000 0000000000 0000000000 000 d0 / -c - data wgk ( 1) / 0.0053774798 7292334898 7792051430 128 d0 / - data wgk ( 2) / 0.0150079473 2931612253 8374763075 807 d0 / - data wgk ( 3) / 0.0254608473 2671532018 6874001019 653 d0 / - data wgk ( 4) / 0.0353463607 9137584622 2037948478 360 d0 / - data wgk ( 5) / 0.0445897513 2476487660 8227299373 280 d0 / - data wgk ( 6) / 0.0534815246 9092808726 5343147239 430 d0 / - data wgk ( 7) / 0.0620095678 0067064028 5139230960 803 d0 / - data wgk ( 8) / 0.0698541213 1872825870 9520077099 147 d0 / - data wgk ( 9) / 0.0768496807 5772037889 4432777482 659 d0 / - data wgk ( 10) / 0.0830805028 2313302103 8289247286 104 d0 / - data wgk ( 11) / 0.0885644430 5621177064 7275443693 774 d0 / - data wgk ( 12) / 0.0931265981 7082532122 5486872747 346 d0 / - data wgk ( 13) / 0.0966427269 8362367850 5179907627 589 d0 / - data wgk ( 14) / 0.0991735987 2179195933 2393173484 603 d0 / - data wgk ( 15) / 0.1007698455 2387559504 4946662617 570 d0 / - data wgk ( 16) / 0.1013300070 1479154901 7374792767 493 d0 / -c -c -c list of major variables -c ----------------------- -c centr - mid point of the interval -c hlgth - half-length of the interval -c absc - abscissa -c fval* - function value -c resg - result of the 15-point gauss formula -c resk - result of the 31-point kronrod formula -c reskh - approximation to the mean value of f over (a,b), -c i.e. to i/(b-a) -c -c machine dependent constants -c --------------------------- -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c***first executable statement dqk31 - epmach = d1mach(4) - uflow = d1mach(1) -c - centr = 0.5d+00*(a+b) - hlgth = 0.5d+00*(b-a) - dhlgth = dabs(hlgth) -c -c compute the 31-point kronrod approximation to -c the integral, and estimate the absolute error. -c - fc = f(centr) - resg = wg(8)*fc - resk = wgk(16)*fc - resabs = dabs(resk) - do 10 j=1,7 - jtw = j*2 - absc = hlgth*xgk(jtw) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtw) = fval1 - fv2(jtw) = fval2 - fsum = fval1+fval2 - resg = resg+wg(j)*fsum - resk = resk+wgk(jtw)*fsum - resabs = resabs+wgk(jtw)*(dabs(fval1)+dabs(fval2)) - 10 continue - do 15 j = 1,8 - jtwm1 = j*2-1 - absc = hlgth*xgk(jtwm1) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtwm1) = fval1 - fv2(jtwm1) = fval2 - fsum = fval1+fval2 - resk = resk+wgk(jtwm1)*fsum - resabs = resabs+wgk(jtwm1)*(dabs(fval1)+dabs(fval2)) - 15 continue - reskh = resk*0.5d+00 - resasc = wgk(16)*dabs(fc-reskh) - do 20 j=1,15 - resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh)) - 20 continue - result = resk*hlgth - resabs = resabs*dhlgth - resasc = resasc*dhlgth - abserr = dabs((resk-resg)*hlgth) - if(resasc.ne.0.0d+00.and.abserr.ne.0.0d+00) - * abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1 - * ((epmach*0.5d+02)*resabs,abserr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqk41.f b/scipy-0.10.1/scipy/integrate/quadpack/dqk41.f deleted file mode 100644 index 247d07f394..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqk41.f +++ /dev/null @@ -1,207 +0,0 @@ - subroutine dqk41(f,a,b,result,abserr,resabs,resasc) -c***begin prologue dqk41 -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a2 -c***keywords 41-point gauss-kronrod rules -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose to compute i = integral of f over (a,b), with error -c estimate -c j = integral of abs(f) over (a,b) -c***description -c -c integration rules -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the calling program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c on return -c result - double precision -c approximation to the integral i -c result is computed by applying the 41-point -c gauss-kronrod rule (resk) obtained by optimal -c addition of abscissae to the 20-point gauss -c rule (resg). -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should not exceed abs(i-result) -c -c resabs - double precision -c approximation to the integral j -c -c resasc - double precision -c approximation to the integal of abs(f-i/(b-a)) -c over (a,b) -c -c***references (none) -c***routines called d1mach -c***end prologue dqk41 -c - double precision a,absc,abserr,b,centr,dabs,dhlgth,dmax1,dmin1, - * d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth,resabs,resasc, - * resg,resk,reskh,result,uflow,wg,wgk,xgk - integer j,jtw,jtwm1 - external f -c - dimension fv1(20),fv2(20),xgk(21),wgk(21),wg(10) -c -c the abscissae and weights are given for the interval (-1,1). -c because of symmetry only the positive abscissae and their -c corresponding weights are given. -c -c xgk - abscissae of the 41-point gauss-kronrod rule -c xgk(2), xgk(4), ... abscissae of the 20-point -c gauss rule -c xgk(1), xgk(3), ... abscissae which are optimally -c added to the 20-point gauss rule -c -c wgk - weights of the 41-point gauss-kronrod rule -c -c wg - weights of the 20-point gauss rule -c -c -c gauss quadrature weights and kronron quadrature abscissae and weights -c as evaluated with 80 decimal digit arithmetic by l. w. fullerton, -c bell labs, nov. 1981. -c - data wg ( 1) / 0.0176140071 3915211831 1861962351 853 d0 / - data wg ( 2) / 0.0406014298 0038694133 1039952274 932 d0 / - data wg ( 3) / 0.0626720483 3410906356 9506535187 042 d0 / - data wg ( 4) / 0.0832767415 7670474872 4758143222 046 d0 / - data wg ( 5) / 0.1019301198 1724043503 6750135480 350 d0 / - data wg ( 6) / 0.1181945319 6151841731 2377377711 382 d0 / - data wg ( 7) / 0.1316886384 4917662689 8494499748 163 d0 / - data wg ( 8) / 0.1420961093 1838205132 9298325067 165 d0 / - data wg ( 9) / 0.1491729864 7260374678 7828737001 969 d0 / - data wg ( 10) / 0.1527533871 3072585069 8084331955 098 d0 / -c - data xgk ( 1) / 0.9988590315 8827766383 8315576545 863 d0 / - data xgk ( 2) / 0.9931285991 8509492478 6122388471 320 d0 / - data xgk ( 3) / 0.9815078774 5025025919 3342994720 217 d0 / - data xgk ( 4) / 0.9639719272 7791379126 7666131197 277 d0 / - data xgk ( 5) / 0.9408226338 3175475351 9982722212 443 d0 / - data xgk ( 6) / 0.9122344282 5132590586 7752441203 298 d0 / - data xgk ( 7) / 0.8782768112 5228197607 7442995113 078 d0 / - data xgk ( 8) / 0.8391169718 2221882339 4529061701 521 d0 / - data xgk ( 9) / 0.7950414288 3755119835 0638833272 788 d0 / - data xgk ( 10) / 0.7463319064 6015079261 4305070355 642 d0 / - data xgk ( 11) / 0.6932376563 3475138480 5490711845 932 d0 / - data xgk ( 12) / 0.6360536807 2651502545 2836696226 286 d0 / - data xgk ( 13) / 0.5751404468 1971031534 2946036586 425 d0 / - data xgk ( 14) / 0.5108670019 5082709800 4364050955 251 d0 / - data xgk ( 15) / 0.4435931752 3872510319 9992213492 640 d0 / - data xgk ( 16) / 0.3737060887 1541956067 2548177024 927 d0 / - data xgk ( 17) / 0.3016278681 1491300432 0555356858 592 d0 / - data xgk ( 18) / 0.2277858511 4164507808 0496195368 575 d0 / - data xgk ( 19) / 0.1526054652 4092267550 5220241022 678 d0 / - data xgk ( 20) / 0.0765265211 3349733375 4640409398 838 d0 / - data xgk ( 21) / 0.0000000000 0000000000 0000000000 000 d0 / -c - data wgk ( 1) / 0.0030735837 1852053150 1218293246 031 d0 / - data wgk ( 2) / 0.0086002698 5564294219 8661787950 102 d0 / - data wgk ( 3) / 0.0146261692 5697125298 3787960308 868 d0 / - data wgk ( 4) / 0.0203883734 6126652359 8010231432 755 d0 / - data wgk ( 5) / 0.0258821336 0495115883 4505067096 153 d0 / - data wgk ( 6) / 0.0312873067 7703279895 8543119323 801 d0 / - data wgk ( 7) / 0.0366001697 5820079803 0557240707 211 d0 / - data wgk ( 8) / 0.0416688733 2797368626 3788305936 895 d0 / - data wgk ( 9) / 0.0464348218 6749767472 0231880926 108 d0 / - data wgk ( 10) / 0.0509445739 2372869193 2707670050 345 d0 / - data wgk ( 11) / 0.0551951053 4828599474 4832372419 777 d0 / - data wgk ( 12) / 0.0591114008 8063957237 4967220648 594 d0 / - data wgk ( 13) / 0.0626532375 5478116802 5870122174 255 d0 / - data wgk ( 14) / 0.0658345971 3361842211 1563556969 398 d0 / - data wgk ( 15) / 0.0686486729 2852161934 5623411885 368 d0 / - data wgk ( 16) / 0.0710544235 5344406830 5790361723 210 d0 / - data wgk ( 17) / 0.0730306903 3278666749 5189417658 913 d0 / - data wgk ( 18) / 0.0745828754 0049918898 6581418362 488 d0 / - data wgk ( 19) / 0.0757044976 8455667465 9542775376 617 d0 / - data wgk ( 20) / 0.0763778676 7208073670 5502835038 061 d0 / - data wgk ( 21) / 0.0766007119 1799965644 5049901530 102 d0 / -c -c -c list of major variables -c ----------------------- -c -c centr - mid point of the interval -c hlgth - half-length of the interval -c absc - abscissa -c fval* - function value -c resg - result of the 20-point gauss formula -c resk - result of the 41-point kronrod formula -c reskh - approximation to mean value of f over (a,b), i.e. -c to i/(b-a) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqk41 - epmach = d1mach(4) - uflow = d1mach(1) -c - centr = 0.5d+00*(a+b) - hlgth = 0.5d+00*(b-a) - dhlgth = dabs(hlgth) -c -c compute the 41-point gauss-kronrod approximation to -c the integral, and estimate the absolute error. -c - resg = 0.0d+00 - fc = f(centr) - resk = wgk(21)*fc - resabs = dabs(resk) - do 10 j=1,10 - jtw = j*2 - absc = hlgth*xgk(jtw) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtw) = fval1 - fv2(jtw) = fval2 - fsum = fval1+fval2 - resg = resg+wg(j)*fsum - resk = resk+wgk(jtw)*fsum - resabs = resabs+wgk(jtw)*(dabs(fval1)+dabs(fval2)) - 10 continue - do 15 j = 1,10 - jtwm1 = j*2-1 - absc = hlgth*xgk(jtwm1) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtwm1) = fval1 - fv2(jtwm1) = fval2 - fsum = fval1+fval2 - resk = resk+wgk(jtwm1)*fsum - resabs = resabs+wgk(jtwm1)*(dabs(fval1)+dabs(fval2)) - 15 continue - reskh = resk*0.5d+00 - resasc = wgk(21)*dabs(fc-reskh) - do 20 j=1,20 - resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh)) - 20 continue - result = resk*hlgth - resabs = resabs*dhlgth - resasc = resasc*dhlgth - abserr = dabs((resk-resg)*hlgth) - if(resasc.ne.0.0d+00.and.abserr.ne.0.d+00) - * abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1 - * ((epmach*0.5d+02)*resabs,abserr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqk51.f b/scipy-0.10.1/scipy/integrate/quadpack/dqk51.f deleted file mode 100644 index 691722de84..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqk51.f +++ /dev/null @@ -1,220 +0,0 @@ - subroutine dqk51(f,a,b,result,abserr,resabs,resasc) -c***begin prologue dqk51 -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a2 -c***keywords 51-point gauss-kronrod rules -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math & progr. div. - k.u.leuven -c***purpose to compute i = integral of f over (a,b) with error -c estimate -c j = integral of abs(f) over (a,b) -c***description -c -c integration rules -c standard fortran subroutine -c double precision version -c -c parameters -c on entry -c f - double precision -c function subroutine defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the calling program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c on return -c result - double precision -c approximation to the integral i -c result is computed by applying the 51-point -c kronrod rule (resk) obtained by optimal addition -c of abscissae to the 25-point gauss rule (resg). -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should not exceed abs(i-result) -c -c resabs - double precision -c approximation to the integral j -c -c resasc - double precision -c approximation to the integral of abs(f-i/(b-a)) -c over (a,b) -c -c***references (none) -c***routines called d1mach -c***end prologue dqk51 -c - double precision a,absc,abserr,b,centr,dabs,dhlgth,dmax1,dmin1, - * d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth,resabs,resasc, - * resg,resk,reskh,result,uflow,wg,wgk,xgk - integer j,jtw,jtwm1 - external f -c - dimension fv1(25),fv2(25),xgk(26),wgk(26),wg(13) -c -c the abscissae and weights are given for the interval (-1,1). -c because of symmetry only the positive abscissae and their -c corresponding weights are given. -c -c xgk - abscissae of the 51-point kronrod rule -c xgk(2), xgk(4), ... abscissae of the 25-point -c gauss rule -c xgk(1), xgk(3), ... abscissae which are optimally -c added to the 25-point gauss rule -c -c wgk - weights of the 51-point kronrod rule -c -c wg - weights of the 25-point gauss rule -c -c -c gauss quadrature weights and kronron quadrature abscissae and weights -c as evaluated with 80 decimal digit arithmetic by l. w. fullerton, -c bell labs, nov. 1981. -c - data wg ( 1) / 0.0113937985 0102628794 7902964113 235 d0 / - data wg ( 2) / 0.0263549866 1503213726 1901815295 299 d0 / - data wg ( 3) / 0.0409391567 0130631265 5623487711 646 d0 / - data wg ( 4) / 0.0549046959 7583519192 5936891540 473 d0 / - data wg ( 5) / 0.0680383338 1235691720 7187185656 708 d0 / - data wg ( 6) / 0.0801407003 3500101801 3234959669 111 d0 / - data wg ( 7) / 0.0910282619 8296364981 1497220702 892 d0 / - data wg ( 8) / 0.1005359490 6705064420 2206890392 686 d0 / - data wg ( 9) / 0.1085196244 7426365311 6093957050 117 d0 / - data wg ( 10) / 0.1148582591 4571164833 9325545869 556 d0 / - data wg ( 11) / 0.1194557635 3578477222 8178126512 901 d0 / - data wg ( 12) / 0.1222424429 9031004168 8959518945 852 d0 / - data wg ( 13) / 0.1231760537 2671545120 3902873079 050 d0 / -c - data xgk ( 1) / 0.9992621049 9260983419 3457486540 341 d0 / - data xgk ( 2) / 0.9955569697 9049809790 8784946893 902 d0 / - data xgk ( 3) / 0.9880357945 3407724763 7331014577 406 d0 / - data xgk ( 4) / 0.9766639214 5951751149 8315386479 594 d0 / - data xgk ( 5) / 0.9616149864 2584251241 8130033660 167 d0 / - data xgk ( 6) / 0.9429745712 2897433941 4011169658 471 d0 / - data xgk ( 7) / 0.9207471152 8170156174 6346084546 331 d0 / - data xgk ( 8) / 0.8949919978 7827536885 1042006782 805 d0 / - data xgk ( 9) / 0.8658470652 9327559544 8996969588 340 d0 / - data xgk ( 10) / 0.8334426287 6083400142 1021108693 570 d0 / - data xgk ( 11) / 0.7978737979 9850005941 0410904994 307 d0 / - data xgk ( 12) / 0.7592592630 3735763057 7282865204 361 d0 / - data xgk ( 13) / 0.7177664068 1308438818 6654079773 298 d0 / - data xgk ( 14) / 0.6735663684 7346836448 5120633247 622 d0 / - data xgk ( 15) / 0.6268100990 1031741278 8122681624 518 d0 / - data xgk ( 16) / 0.5776629302 4122296772 3689841612 654 d0 / - data xgk ( 17) / 0.5263252843 3471918259 9623778158 010 d0 / - data xgk ( 18) / 0.4730027314 4571496052 2182115009 192 d0 / - data xgk ( 19) / 0.4178853821 9303774885 1814394594 572 d0 / - data xgk ( 20) / 0.3611723058 0938783773 5821730127 641 d0 / - data xgk ( 21) / 0.3030895389 3110783016 7478909980 339 d0 / - data xgk ( 22) / 0.2438668837 2098843204 5190362797 452 d0 / - data xgk ( 23) / 0.1837189394 2104889201 5969888759 528 d0 / - data xgk ( 24) / 0.1228646926 1071039638 7359818808 037 d0 / - data xgk ( 25) / 0.0615444830 0568507888 6546392366 797 d0 / - data xgk ( 26) / 0.0000000000 0000000000 0000000000 000 d0 / -c - data wgk ( 1) / 0.0019873838 9233031592 6507851882 843 d0 / - data wgk ( 2) / 0.0055619321 3535671375 8040236901 066 d0 / - data wgk ( 3) / 0.0094739733 8617415160 7207710523 655 d0 / - data wgk ( 4) / 0.0132362291 9557167481 3656405846 976 d0 / - data wgk ( 5) / 0.0168478177 0912829823 1516667536 336 d0 / - data wgk ( 6) / 0.0204353711 4588283545 6568292235 939 d0 / - data wgk ( 7) / 0.0240099456 0695321622 0092489164 881 d0 / - data wgk ( 8) / 0.0274753175 8785173780 2948455517 811 d0 / - data wgk ( 9) / 0.0307923001 6738748889 1109020215 229 d0 / - data wgk ( 10) / 0.0340021302 7432933783 6748795229 551 d0 / - data wgk ( 11) / 0.0371162714 8341554356 0330625367 620 d0 / - data wgk ( 12) / 0.0400838255 0403238207 4839284467 076 d0 / - data wgk ( 13) / 0.0428728450 2017004947 6895792439 495 d0 / - data wgk ( 14) / 0.0455029130 4992178890 9870584752 660 d0 / - data wgk ( 15) / 0.0479825371 3883671390 6392255756 915 d0 / - data wgk ( 16) / 0.0502776790 8071567196 3325259433 440 d0 / - data wgk ( 17) / 0.0523628858 0640747586 4366712137 873 d0 / - data wgk ( 18) / 0.0542511298 8854549014 4543370459 876 d0 / - data wgk ( 19) / 0.0559508112 2041231730 8240686382 747 d0 / - data wgk ( 20) / 0.0574371163 6156783285 3582693939 506 d0 / - data wgk ( 21) / 0.0586896800 2239420796 1974175856 788 d0 / - data wgk ( 22) / 0.0597203403 2417405997 9099291932 562 d0 / - data wgk ( 23) / 0.0605394553 7604586294 5360267517 565 d0 / - data wgk ( 24) / 0.0611285097 1705304830 5859030416 293 d0 / - data wgk ( 25) / 0.0614711898 7142531666 1544131965 264 d0 / -c note: wgk (26) was calculated from the values of wgk(1..25) - data wgk ( 26) / 0.0615808180 6783293507 8759824240 066 d0 / -c -c -c list of major variables -c ----------------------- -c -c centr - mid point of the interval -c hlgth - half-length of the interval -c absc - abscissa -c fval* - function value -c resg - result of the 25-point gauss formula -c resk - result of the 51-point kronrod formula -c reskh - approximation to the mean value of f over (a,b), -c i.e. to i/(b-a) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqk51 - epmach = d1mach(4) - uflow = d1mach(1) -c - centr = 0.5d+00*(a+b) - hlgth = 0.5d+00*(b-a) - dhlgth = dabs(hlgth) -c -c compute the 51-point kronrod approximation to -c the integral, and estimate the absolute error. -c - fc = f(centr) - resg = wg(13)*fc - resk = wgk(26)*fc - resabs = dabs(resk) - do 10 j=1,12 - jtw = j*2 - absc = hlgth*xgk(jtw) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtw) = fval1 - fv2(jtw) = fval2 - fsum = fval1+fval2 - resg = resg+wg(j)*fsum - resk = resk+wgk(jtw)*fsum - resabs = resabs+wgk(jtw)*(dabs(fval1)+dabs(fval2)) - 10 continue - do 15 j = 1,13 - jtwm1 = j*2-1 - absc = hlgth*xgk(jtwm1) - fval1 = f(centr-absc) - fval2 = f(centr+absc) - fv1(jtwm1) = fval1 - fv2(jtwm1) = fval2 - fsum = fval1+fval2 - resk = resk+wgk(jtwm1)*fsum - resabs = resabs+wgk(jtwm1)*(dabs(fval1)+dabs(fval2)) - 15 continue - reskh = resk*0.5d+00 - resasc = wgk(26)*dabs(fc-reskh) - do 20 j=1,25 - resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh)) - 20 continue - result = resk*hlgth - resabs = resabs*dhlgth - resasc = resasc*dhlgth - abserr = dabs((resk-resg)*hlgth) - if(resasc.ne.0.0d+00.and.abserr.ne.0.0d+00) - * abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1 - * ((epmach*0.5d+02)*resabs,abserr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqk61.f b/scipy-0.10.1/scipy/integrate/quadpack/dqk61.f deleted file mode 100644 index b590f4c372..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqk61.f +++ /dev/null @@ -1,231 +0,0 @@ - subroutine dqk61(f,a,b,result,abserr,resabs,resasc) -c***begin prologue dqk61 -c***date written 800101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a1a2 -c***keywords 61-point gauss-kronrod rules -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose to compute i = integral of f over (a,b) with error -c estimate -c j = integral of dabs(f) over (a,b) -c***description -c -c integration rule -c standard fortran subroutine -c double precision version -c -c -c parameters -c on entry -c f - double precision -c function subprogram defining the integrand -c function f(x). the actual name for f needs to be -c declared e x t e r n a l in the calling program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c on return -c result - double precision -c approximation to the integral i -c result is computed by applying the 61-point -c kronrod rule (resk) obtained by optimal addition of -c abscissae to the 30-point gauss rule (resg). -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed dabs(i-result) -c -c resabs - double precision -c approximation to the integral j -c -c resasc - double precision -c approximation to the integral of dabs(f-i/(b-a)) -c -c -c***references (none) -c***routines called d1mach -c***end prologue dqk61 -c - double precision a,dabsc,abserr,b,centr,dabs,dhlgth,dmax1,dmin1, - * d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth,resabs,resasc, - * resg,resk,reskh,result,uflow,wg,wgk,xgk - integer j,jtw,jtwm1 - external f -c - dimension fv1(30),fv2(30),xgk(31),wgk(31),wg(15) -c -c the abscissae and weights are given for the -c interval (-1,1). because of symmetry only the positive -c abscissae and their corresponding weights are given. -c -c xgk - abscissae of the 61-point kronrod rule -c xgk(2), xgk(4) ... abscissae of the 30-point -c gauss rule -c xgk(1), xgk(3) ... optimally added abscissae -c to the 30-point gauss rule -c -c wgk - weights of the 61-point kronrod rule -c -c wg - weigths of the 30-point gauss rule -c -c -c gauss quadrature weights and kronron quadrature abscissae and weights -c as evaluated with 80 decimal digit arithmetic by l. w. fullerton, -c bell labs, nov. 1981. -c - data wg ( 1) / 0.0079681924 9616660561 5465883474 674 d0 / - data wg ( 2) / 0.0184664683 1109095914 2302131912 047 d0 / - data wg ( 3) / 0.0287847078 8332336934 9719179611 292 d0 / - data wg ( 4) / 0.0387991925 6962704959 6801936446 348 d0 / - data wg ( 5) / 0.0484026728 3059405290 2938140422 808 d0 / - data wg ( 6) / 0.0574931562 1761906648 1721689402 056 d0 / - data wg ( 7) / 0.0659742298 8218049512 8128515115 962 d0 / - data wg ( 8) / 0.0737559747 3770520626 8243850022 191 d0 / - data wg ( 9) / 0.0807558952 2942021535 4694938460 530 d0 / - data wg ( 10) / 0.0868997872 0108297980 2387530715 126 d0 / - data wg ( 11) / 0.0921225222 3778612871 7632707087 619 d0 / - data wg ( 12) / 0.0963687371 7464425963 9468626351 810 d0 / - data wg ( 13) / 0.0995934205 8679526706 2780282103 569 d0 / - data wg ( 14) / 0.1017623897 4840550459 6428952168 554 d0 / - data wg ( 15) / 0.1028526528 9355884034 1285636705 415 d0 / -c - data xgk ( 1) / 0.9994844100 5049063757 1325895705 811 d0 / - data xgk ( 2) / 0.9968934840 7464954027 1630050918 695 d0 / - data xgk ( 3) / 0.9916309968 7040459485 8628366109 486 d0 / - data xgk ( 4) / 0.9836681232 7974720997 0032581605 663 d0 / - data xgk ( 5) / 0.9731163225 0112626837 4693868423 707 d0 / - data xgk ( 6) / 0.9600218649 6830751221 6871025581 798 d0 / - data xgk ( 7) / 0.9443744447 4855997941 5831324037 439 d0 / - data xgk ( 8) / 0.9262000474 2927432587 9324277080 474 d0 / - data xgk ( 9) / 0.9055733076 9990779854 6522558925 958 d0 / - data xgk ( 10) / 0.8825605357 9205268154 3116462530 226 d0 / - data xgk ( 11) / 0.8572052335 4606109895 8658510658 944 d0 / - data xgk ( 12) / 0.8295657623 8276839744 2898119732 502 d0 / - data xgk ( 13) / 0.7997278358 2183908301 3668942322 683 d0 / - data xgk ( 14) / 0.7677774321 0482619491 7977340974 503 d0 / - data xgk ( 15) / 0.7337900624 5322680472 6171131369 528 d0 / - data xgk ( 16) / 0.6978504947 9331579693 2292388026 640 d0 / - data xgk ( 17) / 0.6600610641 2662696137 0053668149 271 d0 / - data xgk ( 18) / 0.6205261829 8924286114 0477556431 189 d0 / - data xgk ( 19) / 0.5793452358 2636169175 6024932172 540 d0 / - data xgk ( 20) / 0.5366241481 4201989926 4169793311 073 d0 / - data xgk ( 21) / 0.4924804678 6177857499 3693061207 709 d0 / - data xgk ( 22) / 0.4470337695 3808917678 0609900322 854 d0 / - data xgk ( 23) / 0.4004012548 3039439253 5476211542 661 d0 / - data xgk ( 24) / 0.3527047255 3087811347 1037207089 374 d0 / - data xgk ( 25) / 0.3040732022 7362507737 2677107199 257 d0 / - data xgk ( 26) / 0.2546369261 6788984643 9805129817 805 d0 / - data xgk ( 27) / 0.2045251166 8230989143 8957671002 025 d0 / - data xgk ( 28) / 0.1538699136 0858354696 3794672743 256 d0 / - data xgk ( 29) / 0.1028069379 6673703014 7096751318 001 d0 / - data xgk ( 30) / 0.0514718425 5531769583 3025213166 723 d0 / - data xgk ( 31) / 0.0000000000 0000000000 0000000000 000 d0 / -c - data wgk ( 1) / 0.0013890136 9867700762 4551591226 760 d0 / - data wgk ( 2) / 0.0038904611 2709988405 1267201844 516 d0 / - data wgk ( 3) / 0.0066307039 1593129217 3319826369 750 d0 / - data wgk ( 4) / 0.0092732796 5951776342 8441146892 024 d0 / - data wgk ( 5) / 0.0118230152 5349634174 2232898853 251 d0 / - data wgk ( 6) / 0.0143697295 0704580481 2451432443 580 d0 / - data wgk ( 7) / 0.0169208891 8905327262 7572289420 322 d0 / - data wgk ( 8) / 0.0194141411 9394238117 3408951050 128 d0 / - data wgk ( 9) / 0.0218280358 2160919229 7167485738 339 d0 / - data wgk ( 10) / 0.0241911620 7808060136 5686370725 232 d0 / - data wgk ( 11) / 0.0265099548 8233310161 0601709335 075 d0 / - data wgk ( 12) / 0.0287540487 6504129284 3978785354 334 d0 / - data wgk ( 13) / 0.0309072575 6238776247 2884252943 092 d0 / - data wgk ( 14) / 0.0329814470 5748372603 1814191016 854 d0 / - data wgk ( 15) / 0.0349793380 2806002413 7499670731 468 d0 / - data wgk ( 16) / 0.0368823646 5182122922 3911065617 136 d0 / - data wgk ( 17) / 0.0386789456 2472759295 0348651532 281 d0 / - data wgk ( 18) / 0.0403745389 5153595911 1995279752 468 d0 / - data wgk ( 19) / 0.0419698102 1516424614 7147541285 970 d0 / - data wgk ( 20) / 0.0434525397 0135606931 6831728117 073 d0 / - data wgk ( 21) / 0.0448148001 3316266319 2355551616 723 d0 / - data wgk ( 22) / 0.0460592382 7100698811 6271735559 374 d0 / - data wgk ( 23) / 0.0471855465 6929915394 5261478181 099 d0 / - data wgk ( 24) / 0.0481858617 5708712914 0779492298 305 d0 / - data wgk ( 25) / 0.0490554345 5502977888 7528165367 238 d0 / - data wgk ( 26) / 0.0497956834 2707420635 7811569379 942 d0 / - data wgk ( 27) / 0.0504059214 0278234684 0893085653 585 d0 / - data wgk ( 28) / 0.0508817958 9874960649 2297473049 805 d0 / - data wgk ( 29) / 0.0512215478 4925877217 0656282604 944 d0 / - data wgk ( 30) / 0.0514261285 3745902593 3862879215 781 d0 / - data wgk ( 31) / 0.0514947294 2945156755 8340433647 099 d0 / -c -c list of major variables -c ----------------------- -c -c centr - mid point of the interval -c hlgth - half-length of the interval -c dabsc - abscissa -c fval* - function value -c resg - result of the 30-point gauss rule -c resk - result of the 61-point kronrod rule -c reskh - approximation to the mean value of f -c over (a,b), i.e. to i/(b-a) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c - epmach = d1mach(4) - uflow = d1mach(1) -c - centr = 0.5d+00*(b+a) - hlgth = 0.5d+00*(b-a) - dhlgth = dabs(hlgth) -c -c compute the 61-point kronrod approximation to the -c integral, and estimate the absolute error. -c -c***first executable statement dqk61 - resg = 0.0d+00 - fc = f(centr) - resk = wgk(31)*fc - resabs = dabs(resk) - do 10 j=1,15 - jtw = j*2 - dabsc = hlgth*xgk(jtw) - fval1 = f(centr-dabsc) - fval2 = f(centr+dabsc) - fv1(jtw) = fval1 - fv2(jtw) = fval2 - fsum = fval1+fval2 - resg = resg+wg(j)*fsum - resk = resk+wgk(jtw)*fsum - resabs = resabs+wgk(jtw)*(dabs(fval1)+dabs(fval2)) - 10 continue - do 15 j=1,15 - jtwm1 = j*2-1 - dabsc = hlgth*xgk(jtwm1) - fval1 = f(centr-dabsc) - fval2 = f(centr+dabsc) - fv1(jtwm1) = fval1 - fv2(jtwm1) = fval2 - fsum = fval1+fval2 - resk = resk+wgk(jtwm1)*fsum - resabs = resabs+wgk(jtwm1)*(dabs(fval1)+dabs(fval2)) - 15 continue - reskh = resk*0.5d+00 - resasc = wgk(31)*dabs(fc-reskh) - do 20 j=1,30 - resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh)) - 20 continue - result = resk*hlgth - resabs = resabs*dhlgth - resasc = resasc*dhlgth - abserr = dabs((resk-resg)*hlgth) - if(resasc.ne.0.0d+00.and.abserr.ne.0.0d+00) - * abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1 - * ((epmach*0.5d+02)*resabs,abserr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqmomo.f b/scipy-0.10.1/scipy/integrate/quadpack/dqmomo.f deleted file mode 100644 index 4bcfffd4cd..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqmomo.f +++ /dev/null @@ -1,127 +0,0 @@ - subroutine dqmomo(alfa,beta,ri,rj,rg,rh,integr) -c***begin prologue dqmomo -c***date written 820101 (yymmdd) -c***revision date 830518 (yymmdd) -c***category no. h2a2a1,c3a2 -c***keywords modified chebyshev moments -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose this routine computes modified chebsyshev moments. the k-th -c modified chebyshev moment is defined as the integral over -c (-1,1) of w(x)*t(k,x), where t(k,x) is the chebyshev -c polynomial of degree k. -c***description -c -c modified chebyshev moments -c standard fortran subroutine -c double precision version -c -c parameters -c alfa - double precision -c parameter in the weight function w(x), alfa.gt.(-1) -c -c beta - double precision -c parameter in the weight function w(x), beta.gt.(-1) -c -c ri - double precision -c vector of dimension 25 -c ri(k) is the integral over (-1,1) of -c (1+x)**alfa*t(k-1,x), k = 1, ..., 25. -c -c rj - double precision -c vector of dimension 25 -c rj(k) is the integral over (-1,1) of -c (1-x)**beta*t(k-1,x), k = 1, ..., 25. -c -c rg - double precision -c vector of dimension 25 -c rg(k) is the integral over (-1,1) of -c (1+x)**alfa*log((1+x)/2)*t(k-1,x), k = 1, ..., 25. -c -c rh - double precision -c vector of dimension 25 -c rh(k) is the integral over (-1,1) of -c (1-x)**beta*log((1-x)/2)*t(k-1,x), k = 1, ..., 25. -c -c integr - integer -c input parameter indicating the modified -c moments to be computed -c integr = 1 compute ri, rj -c = 2 compute ri, rj, rg -c = 3 compute ri, rj, rh -c = 4 compute ri, rj, rg, rh -c -c***references (none) -c***routines called (none) -c***end prologue dqmomo -c - double precision alfa,alfp1,alfp2,an,anm1,beta,betp1,betp2,ralf, - * rbet,rg,rh,ri,rj - integer i,im1,integr -c - dimension rg(25),rh(25),ri(25),rj(25) -c -c -c***first executable statement dqmomo - alfp1 = alfa+0.1d+01 - betp1 = beta+0.1d+01 - alfp2 = alfa+0.2d+01 - betp2 = beta+0.2d+01 - ralf = 0.2d+01**alfp1 - rbet = 0.2d+01**betp1 -c -c compute ri, rj using a forward recurrence relation. -c - ri(1) = ralf/alfp1 - rj(1) = rbet/betp1 - ri(2) = ri(1)*alfa/alfp2 - rj(2) = rj(1)*beta/betp2 - an = 0.2d+01 - anm1 = 0.1d+01 - do 20 i=3,25 - ri(i) = -(ralf+an*(an-alfp2)*ri(i-1))/(anm1*(an+alfp1)) - rj(i) = -(rbet+an*(an-betp2)*rj(i-1))/(anm1*(an+betp1)) - anm1 = an - an = an+0.1d+01 - 20 continue - if(integr.eq.1) go to 70 - if(integr.eq.3) go to 40 -c -c compute rg using a forward recurrence relation. -c - rg(1) = -ri(1)/alfp1 - rg(2) = -(ralf+ralf)/(alfp2*alfp2)-rg(1) - an = 0.2d+01 - anm1 = 0.1d+01 - im1 = 2 - do 30 i=3,25 - rg(i) = -(an*(an-alfp2)*rg(im1)-an*ri(im1)+anm1*ri(i))/ - * (anm1*(an+alfp1)) - anm1 = an - an = an+0.1d+01 - im1 = i - 30 continue - if(integr.eq.2) go to 70 -c -c compute rh using a forward recurrence relation. -c - 40 rh(1) = -rj(1)/betp1 - rh(2) = -(rbet+rbet)/(betp2*betp2)-rh(1) - an = 0.2d+01 - anm1 = 0.1d+01 - im1 = 2 - do 50 i=3,25 - rh(i) = -(an*(an-betp2)*rh(im1)-an*rj(im1)+ - * anm1*rj(i))/(anm1*(an+betp1)) - anm1 = an - an = an+0.1d+01 - im1 = i - 50 continue - do 60 i=2,25,2 - rh(i) = -rh(i) - 60 continue - 70 do 80 i=2,25,2 - rj(i) = -rj(i) - 80 continue - 90 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqng.f b/scipy-0.10.1/scipy/integrate/quadpack/dqng.f deleted file mode 100644 index 4ec2abab50..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqng.f +++ /dev/null @@ -1,374 +0,0 @@ - subroutine dqng(f,a,b,epsabs,epsrel,result,abserr,neval,ier) -c***begin prologue dqng -c***date written 800101 (yymmdd) -c***revision date 810101 (yymmdd) -c***category no. h2a1a1 -c***keywords automatic integrator, smooth integrand, -c non-adaptive, gauss-kronrod(patterson) -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl math & progr. div. - k.u.leuven -c kahaner,david,nbs - modified (2/82) -c***purpose the routine calculates an approximation result to a -c given definite integral i = integral of f over (a,b), -c hopefully satisfying following claim for accuracy -c abs(i-result).le.max(epsabs,epsrel*abs(i)). -c***description -c -c non-adaptive integration -c standard fortran subroutine -c double precision version -c -c f - double precision -c function subprogram defining the integrand function -c f(x). the actual name for f needs to be declared -c e x t e r n a l in the driver program. -c -c a - double precision -c lower limit of integration -c -c b - double precision -c upper limit of integration -c -c epsabs - double precision -c absolute accuracy requested -c epsrel - double precision -c relative accuracy requested -c if epsabs.le.0 -c and epsrel.lt.max(50*rel.mach.acc.,0.5d-28), -c the routine will end with ier = 6. -c -c on return -c result - double precision -c approximation to the integral i -c result is obtained by applying the 21-point -c gauss-kronrod rule (res21) obtained by optimal -c addition of abscissae to the 10-point gauss rule -c (res10), or by applying the 43-point rule (res43) -c obtained by optimal addition of abscissae to the -c 21-point gauss-kronrod rule, or by applying the -c 87-point rule (res87) obtained by optimal addition -c of abscissae to the 43-point rule. -c -c abserr - double precision -c estimate of the modulus of the absolute error, -c which should equal or exceed abs(i-result) -c -c neval - integer -c number of integrand evaluations -c -c ier - ier = 0 normal and reliable termination of the -c routine. it is assumed that the requested -c accuracy has been achieved. -c ier.gt.0 abnormal termination of the routine. it is -c assumed that the requested accuracy has -c not been achieved. -c error messages -c ier = 1 the maximum number of steps has been -c executed. the integral is probably too -c difficult to be calculated by dqng. -c = 6 the input is invalid, because -c epsabs.le.0 and -c epsrel.lt.max(50*rel.mach.acc.,0.5d-28). -c result, abserr and neval are set to zero. -c -c***references (none) -c***routines called d1mach,xerror -c***end prologue dqng -c - double precision a,absc,abserr,b,centr,dabs,dhlgth,dmax1,dmin1, - * d1mach,epmach,epsabs,epsrel,f,fcentr,fval,fval1,fval2,fv1,fv2, - * fv3,fv4,hlgth,result,res10,res21,res43,res87,resabs,resasc, - * reskh,savfun,uflow,w10,w21a,w21b,w43a,w43b,w87a,w87b,x1,x2,x3,x4 - integer ier,ipx,k,l,neval - external f -c - dimension fv1(5),fv2(5),fv3(5),fv4(5),x1(5),x2(5),x3(11),x4(22), - * w10(5),w21a(5),w21b(6),w43a(10),w43b(12),w87a(21),w87b(23), - * savfun(21) -c -c the following data statements contain the -c abscissae and weights of the integration rules used. -c -c x1 abscissae common to the 10-, 21-, 43- and 87- -c point rule -c x2 abscissae common to the 21-, 43- and 87-point rule -c x3 abscissae common to the 43- and 87-point rule -c x4 abscissae of the 87-point rule -c w10 weights of the 10-point formula -c w21a weights of the 21-point formula for abscissae x1 -c w21b weights of the 21-point formula for abscissae x2 -c w43a weights of the 43-point formula for abscissae x1, x3 -c w43b weights of the 43-point formula for abscissae x3 -c w87a weights of the 87-point formula for abscissae x1, -c x2, x3 -c w87b weights of the 87-point formula for abscissae x4 -c -c -c gauss-kronrod-patterson quadrature coefficients for use in -c quadpack routine qng. these coefficients were calculated with -c 101 decimal digit arithmetic by l. w. fullerton, bell labs, nov 1981. -c - data x1 ( 1) / 0.9739065285 1717172007 7964012084 452 d0 / - data x1 ( 2) / 0.8650633666 8898451073 2096688423 493 d0 / - data x1 ( 3) / 0.6794095682 9902440623 4327365114 874 d0 / - data x1 ( 4) / 0.4333953941 2924719079 9265943165 784 d0 / - data x1 ( 5) / 0.1488743389 8163121088 4826001129 720 d0 / - data w10 ( 1) / 0.0666713443 0868813759 3568809893 332 d0 / - data w10 ( 2) / 0.1494513491 5058059314 5776339657 697 d0 / - data w10 ( 3) / 0.2190863625 1598204399 5534934228 163 d0 / - data w10 ( 4) / 0.2692667193 0999635509 1226921569 469 d0 / - data w10 ( 5) / 0.2955242247 1475287017 3892994651 338 d0 / -c - data x2 ( 1) / 0.9956571630 2580808073 5527280689 003 d0 / - data x2 ( 2) / 0.9301574913 5570822600 1207180059 508 d0 / - data x2 ( 3) / 0.7808177265 8641689706 3717578345 042 d0 / - data x2 ( 4) / 0.5627571346 6860468333 9000099272 694 d0 / - data x2 ( 5) / 0.2943928627 0146019813 1126603103 866 d0 / - data w21a ( 1) / 0.0325581623 0796472747 8818972459 390 d0 / - data w21a ( 2) / 0.0750396748 1091995276 7043140916 190 d0 / - data w21a ( 3) / 0.1093871588 0229764189 9210590325 805 d0 / - data w21a ( 4) / 0.1347092173 1147332592 8054001771 707 d0 / - data w21a ( 5) / 0.1477391049 0133849137 4841515972 068 d0 / - data w21b ( 1) / 0.0116946388 6737187427 8064396062 192 d0 / - data w21b ( 2) / 0.0547558965 7435199603 1381300244 580 d0 / - data w21b ( 3) / 0.0931254545 8369760553 5065465083 366 d0 / - data w21b ( 4) / 0.1234919762 6206585107 7958109831 074 d0 / - data w21b ( 5) / 0.1427759385 7706008079 7094273138 717 d0 / - data w21b ( 6) / 0.1494455540 0291690566 4936468389 821 d0 / -c - data x3 ( 1) / 0.9993333609 0193208139 4099323919 911 d0 / - data x3 ( 2) / 0.9874334029 0808886979 5961478381 209 d0 / - data x3 ( 3) / 0.9548079348 1426629925 7919200290 473 d0 / - data x3 ( 4) / 0.9001486957 4832829362 5099494069 092 d0 / - data x3 ( 5) / 0.8251983149 8311415084 7066732588 520 d0 / - data x3 ( 6) / 0.7321483889 8930498261 2354848755 461 d0 / - data x3 ( 7) / 0.6228479705 3772523864 1159120344 323 d0 / - data x3 ( 8) / 0.4994795740 7105649995 2214885499 755 d0 / - data x3 ( 9) / 0.3649016613 4658076804 3989548502 644 d0 / - data x3 ( 10) / 0.2222549197 7660129649 8260928066 212 d0 / - data x3 ( 11) / 0.0746506174 6138332204 3914435796 506 d0 / - data w43a ( 1) / 0.0162967342 8966656492 4281974617 663 d0 / - data w43a ( 2) / 0.0375228761 2086950146 1613795898 115 d0 / - data w43a ( 3) / 0.0546949020 5825544214 7212685465 005 d0 / - data w43a ( 4) / 0.0673554146 0947808607 5553166302 174 d0 / - data w43a ( 5) / 0.0738701996 3239395343 2140695251 367 d0 / - data w43a ( 6) / 0.0057685560 5976979618 4184327908 655 d0 / - data w43a ( 7) / 0.0273718905 9324884208 1276069289 151 d0 / - data w43a ( 8) / 0.0465608269 1042883074 3339154433 824 d0 / - data w43a ( 9) / 0.0617449952 0144256449 6240336030 883 d0 / - data w43a ( 10) / 0.0713872672 6869339776 8559114425 516 d0 / - data w43b ( 1) / 0.0018444776 4021241410 0389106552 965 d0 / - data w43b ( 2) / 0.0107986895 8589165174 0465406741 293 d0 / - data w43b ( 3) / 0.0218953638 6779542810 2523123075 149 d0 / - data w43b ( 4) / 0.0325974639 7534568944 3882222526 137 d0 / - data w43b ( 5) / 0.0421631379 3519181184 7627924327 955 d0 / - data w43b ( 6) / 0.0507419396 0018457778 0189020092 084 d0 / - data w43b ( 7) / 0.0583793955 4261924837 5475369330 206 d0 / - data w43b ( 8) / 0.0647464049 5144588554 4689259517 511 d0 / - data w43b ( 9) / 0.0695661979 1235648452 8633315038 405 d0 / - data w43b ( 10) / 0.0728244414 7183320815 0939535192 842 d0 / - data w43b ( 11) / 0.0745077510 1417511827 3571813842 889 d0 / - data w43b ( 12) / 0.0747221475 1740300559 4425168280 423 d0 / -c - data x4 ( 1) / 0.9999029772 6272923449 0529830591 582 d0 / - data x4 ( 2) / 0.9979898959 8667874542 7496322365 960 d0 / - data x4 ( 3) / 0.9921754978 6068722280 8523352251 425 d0 / - data x4 ( 4) / 0.9813581635 7271277357 1916941623 894 d0 / - data x4 ( 5) / 0.9650576238 5838461912 8284110607 926 d0 / - data x4 ( 6) / 0.9431676131 3367059681 6416634507 426 d0 / - data x4 ( 7) / 0.9158064146 8550720959 1826430720 050 d0 / - data x4 ( 8) / 0.8832216577 7131650137 2117548744 163 d0 / - data x4 ( 9) / 0.8457107484 6241566660 5902011504 855 d0 / - data x4 ( 10) / 0.8035576580 3523098278 8739474980 964 d0 / - data x4 ( 11) / 0.7570057306 8549555832 8942793432 020 d0 / - data x4 ( 12) / 0.7062732097 8732181982 4094274740 840 d0 / - data x4 ( 13) / 0.6515894665 0117792253 4422205016 736 d0 / - data x4 ( 14) / 0.5932233740 5796108887 5273770349 144 d0 / - data x4 ( 15) / 0.5314936059 7083193228 5268948562 671 d0 / - data x4 ( 16) / 0.4667636230 4202284487 1966781659 270 d0 / - data x4 ( 17) / 0.3994248478 5921880473 2101665817 923 d0 / - data x4 ( 18) / 0.3298748771 0618828826 5053371824 597 d0 / - data x4 ( 19) / 0.2585035592 0216155180 2280975429 025 d0 / - data x4 ( 20) / 0.1856953965 6834665201 5917141167 606 d0 / - data x4 ( 21) / 0.1118422131 7990746817 2398359241 362 d0 / - data x4 ( 22) / 0.0373521233 9461987081 4998165437 704 d0 / - data w87a ( 1) / 0.0081483773 8414917290 0002878448 190 d0 / - data w87a ( 2) / 0.0187614382 0156282224 3935059003 794 d0 / - data w87a ( 3) / 0.0273474510 5005228616 1582829741 283 d0 / - data w87a ( 4) / 0.0336777073 1163793004 6581056957 588 d0 / - data w87a ( 5) / 0.0369350998 2042790761 4589586742 499 d0 / - data w87a ( 6) / 0.0028848724 3021153050 1334156248 695 d0 / - data w87a ( 7) / 0.0136859460 2271270188 8950035273 128 d0 / - data w87a ( 8) / 0.0232804135 0288831112 3409291030 404 d0 / - data w87a ( 9) / 0.0308724976 1171335867 5466394126 442 d0 / - data w87a ( 10) / 0.0356936336 3941877071 9351355457 044 d0 / - data w87a ( 11) / 0.0009152833 4520224136 0843392549 948 d0 / - data w87a ( 12) / 0.0053992802 1930047136 7738743391 053 d0 / - data w87a ( 13) / 0.0109476796 0111893113 4327826856 808 d0 / - data w87a ( 14) / 0.0162987316 9678733526 2665703223 280 d0 / - data w87a ( 15) / 0.0210815688 8920383511 2433060188 190 d0 / - data w87a ( 16) / 0.0253709697 6925382724 3467999831 710 d0 / - data w87a ( 17) / 0.0291896977 5647575250 1446154084 920 d0 / - data w87a ( 18) / 0.0323732024 6720278968 5788194889 595 d0 / - data w87a ( 19) / 0.0347830989 5036514275 0781997949 596 d0 / - data w87a ( 20) / 0.0364122207 3135178756 2801163687 577 d0 / - data w87a ( 21) / 0.0372538755 0304770853 9592001191 226 d0 / - data w87b ( 1) / 0.0002741455 6376207235 0016527092 881 d0 / - data w87b ( 2) / 0.0018071241 5505794294 8341311753 254 d0 / - data w87b ( 3) / 0.0040968692 8275916486 4458070683 480 d0 / - data w87b ( 4) / 0.0067582900 5184737869 9816577897 424 d0 / - data w87b ( 5) / 0.0095499576 7220164653 6053581325 377 d0 / - data w87b ( 6) / 0.0123294476 5224485369 4626639963 780 d0 / - data w87b ( 7) / 0.0150104473 4638895237 6697286041 943 d0 / - data w87b ( 8) / 0.0175489679 8624319109 9665352925 900 d0 / - data w87b ( 9) / 0.0199380377 8644088820 2278192730 714 d0 / - data w87b ( 10) / 0.0221949359 6101228679 6332102959 499 d0 / - data w87b ( 11) / 0.0243391471 2600080547 0360647041 454 d0 / - data w87b ( 12) / 0.0263745054 1483920724 1503786552 615 d0 / - data w87b ( 13) / 0.0282869107 8877120065 9968002987 960 d0 / - data w87b ( 14) / 0.0300525811 2809269532 2521110347 341 d0 / - data w87b ( 15) / 0.0316467513 7143992940 4586051078 883 d0 / - data w87b ( 16) / 0.0330504134 1997850329 0785944862 689 d0 / - data w87b ( 17) / 0.0342550997 0422606178 7082821046 821 d0 / - data w87b ( 18) / 0.0352624126 6015668103 3782717998 428 d0 / - data w87b ( 19) / 0.0360769896 2288870118 5500318003 895 d0 / - data w87b ( 20) / 0.0366986044 9845609449 8018047441 094 d0 / - data w87b ( 21) / 0.0371205492 6983257611 4119958413 599 d0 / - data w87b ( 22) / 0.0373342287 5193504032 1235449094 698 d0 / - data w87b ( 23) / 0.0373610737 6267902341 0321241766 599 d0 / -c -c list of major variables -c ----------------------- -c -c centr - mid point of the integration interval -c hlgth - half-length of the integration interval -c fcentr - function value at mid point -c absc - abscissa -c fval - function value -c savfun - array of function values which have already been -c computed -c res10 - 10-point gauss result -c res21 - 21-point kronrod result -c res43 - 43-point result -c res87 - 87-point result -c resabs - approximation to the integral of abs(f) -c resasc - approximation to the integral of abs(f-i/(b-a)) -c -c machine dependent constants -c --------------------------- -c -c epmach is the largest relative spacing. -c uflow is the smallest positive magnitude. -c -c***first executable statement dqng - epmach = d1mach(4) - uflow = d1mach(1) -c -c test on validity of parameters -c ------------------------------ -c - result = 0.0d+00 - abserr = 0.0d+00 - neval = 0 - ier = 6 - if(epsabs.le.0.0d+00.and.epsrel.lt.dmax1(0.5d+02*epmach,0.5d-28)) - * go to 80 - hlgth = 0.5d+00*(b-a) - dhlgth = dabs(hlgth) - centr = 0.5d+00*(b+a) - fcentr = f(centr) - neval = 21 - ier = 1 -c -c compute the integral using the 10- and 21-point formula. -c - do 70 l = 1,3 - go to (5,25,45),l - 5 res10 = 0.0d+00 - res21 = w21b(6)*fcentr - resabs = w21b(6)*dabs(fcentr) - do 10 k=1,5 - absc = hlgth*x1(k) - fval1 = f(centr+absc) - fval2 = f(centr-absc) - fval = fval1+fval2 - res10 = res10+w10(k)*fval - res21 = res21+w21a(k)*fval - resabs = resabs+w21a(k)*(dabs(fval1)+dabs(fval2)) - savfun(k) = fval - fv1(k) = fval1 - fv2(k) = fval2 - 10 continue - ipx = 5 - do 15 k=1,5 - ipx = ipx+1 - absc = hlgth*x2(k) - fval1 = f(centr+absc) - fval2 = f(centr-absc) - fval = fval1+fval2 - res21 = res21+w21b(k)*fval - resabs = resabs+w21b(k)*(dabs(fval1)+dabs(fval2)) - savfun(ipx) = fval - fv3(k) = fval1 - fv4(k) = fval2 - 15 continue -c -c test for convergence. -c - result = res21*hlgth - resabs = resabs*dhlgth - reskh = 0.5d+00*res21 - resasc = w21b(6)*dabs(fcentr-reskh) - do 20 k = 1,5 - resasc = resasc+w21a(k)*(dabs(fv1(k)-reskh)+dabs(fv2(k)-reskh)) - * +w21b(k)*(dabs(fv3(k)-reskh)+dabs(fv4(k)-reskh)) - 20 continue - abserr = dabs((res21-res10)*hlgth) - resasc = resasc*dhlgth - go to 65 -c -c compute the integral using the 43-point formula. -c - 25 res43 = w43b(12)*fcentr - neval = 43 - do 30 k=1,10 - res43 = res43+savfun(k)*w43a(k) - 30 continue - do 40 k=1,11 - ipx = ipx+1 - absc = hlgth*x3(k) - fval = f(absc+centr)+f(centr-absc) - res43 = res43+fval*w43b(k) - savfun(ipx) = fval - 40 continue -c -c test for convergence. -c - result = res43*hlgth - abserr = dabs((res43-res21)*hlgth) - go to 65 -c -c compute the integral using the 87-point formula. -c - 45 res87 = w87b(23)*fcentr - neval = 87 - do 50 k=1,21 - res87 = res87+savfun(k)*w87a(k) - 50 continue - do 60 k=1,22 - absc = hlgth*x4(k) - res87 = res87+w87b(k)*(f(absc+centr)+f(centr-absc)) - 60 continue - result = res87*hlgth - abserr = dabs((res87-res43)*hlgth) - 65 if(resasc.ne.0.0d+00.and.abserr.ne.0.0d+00) - * abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00) - if (resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1 - * ((epmach*0.5d+02)*resabs,abserr) - if (abserr.le.dmax1(epsabs,epsrel*dabs(result))) ier = 0 -c ***jump out of do-loop - if (ier.eq.0) go to 999 - 70 continue - 80 call xerror('abnormal return from dqng ',26,ier,0) - 999 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqpsrt.f b/scipy-0.10.1/scipy/integrate/quadpack/dqpsrt.f deleted file mode 100644 index 2f8bf8e824..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqpsrt.f +++ /dev/null @@ -1,129 +0,0 @@ - subroutine dqpsrt(limit,last,maxerr,ermax,elist,iord,nrmax) -c***begin prologue dqpsrt -c***refer to dqage,dqagie,dqagpe,dqawse -c***routines called (none) -c***revision date 810101 (yymmdd) -c***keywords sequential sorting -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose this routine maintains the descending ordering in the -c list of the local error estimated resulting from the -c interval subdivision process. at each call two error -c estimates are inserted using the sequential search -c method, top-down for the largest error estimate and -c bottom-up for the smallest error estimate. -c***description -c -c ordering routine -c standard fortran subroutine -c double precision version -c -c parameters (meaning at output) -c limit - integer -c maximum number of error estimates the list -c can contain -c -c last - integer -c number of error estimates currently in the list -c -c maxerr - integer -c maxerr points to the nrmax-th largest error -c estimate currently in the list -c -c ermax - double precision -c nrmax-th largest error estimate -c ermax = elist(maxerr) -c -c elist - double precision -c vector of dimension last containing -c the error estimates -c -c iord - integer -c vector of dimension last, the first k elements -c of which contain pointers to the error -c estimates, such that -c elist(iord(1)),..., elist(iord(k)) -c form a decreasing sequence, with -c k = last if last.le.(limit/2+2), and -c k = limit+1-last otherwise -c -c nrmax - integer -c maxerr = iord(nrmax) -c -c***end prologue dqpsrt -c - double precision elist,ermax,errmax,errmin - integer i,ibeg,ido,iord,isucc,j,jbnd,jupbn,k,last,limit,maxerr, - * nrmax - dimension elist(last),iord(last) -c -c check whether the list contains more than -c two error estimates. -c -c***first executable statement dqpsrt - if(last.gt.2) go to 10 - iord(1) = 1 - iord(2) = 2 - go to 90 -c -c this part of the routine is only executed if, due to a -c difficult integrand, subdivision increased the error -c estimate. in the normal case the insert procedure should -c start after the nrmax-th largest error estimate. -c - 10 errmax = elist(maxerr) - if(nrmax.eq.1) go to 30 - ido = nrmax-1 - do 20 i = 1,ido - isucc = iord(nrmax-1) -c ***jump out of do-loop - if(errmax.le.elist(isucc)) go to 30 - iord(nrmax) = isucc - nrmax = nrmax-1 - 20 continue -c -c compute the number of elements in the list to be maintained -c in descending order. this number depends on the number of -c subdivisions still allowed. -c - 30 jupbn = last - if(last.gt.(limit/2+2)) jupbn = limit+3-last - errmin = elist(last) -c -c insert errmax by traversing the list top-down, -c starting comparison from the element elist(iord(nrmax+1)). -c - jbnd = jupbn-1 - ibeg = nrmax+1 - if(ibeg.gt.jbnd) go to 50 - do 40 i=ibeg,jbnd - isucc = iord(i) -c ***jump out of do-loop - if(errmax.ge.elist(isucc)) go to 60 - iord(i-1) = isucc - 40 continue - 50 iord(jbnd) = maxerr - iord(jupbn) = last - go to 90 -c -c insert errmin by traversing the list bottom-up. -c - 60 iord(i-1) = maxerr - k = jbnd - do 70 j=i,jbnd - isucc = iord(k) -c ***jump out of do-loop - if(errmin.lt.elist(isucc)) go to 80 - iord(k+1) = isucc - k = k-1 - 70 continue - iord(i) = last - go to 90 - 80 iord(k+1) = last -c -c set maxerr and ermax. -c - 90 maxerr = iord(nrmax) - ermax = elist(maxerr) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqwgtc.f b/scipy-0.10.1/scipy/integrate/quadpack/dqwgtc.f deleted file mode 100644 index 73447863c7..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqwgtc.f +++ /dev/null @@ -1,18 +0,0 @@ - double precision function dqwgtc(x,c,p2,p3,p4,kp) -c***begin prologue dqwgtc -c***refer to dqk15w -c***routines called (none) -c***revision date 810101 (yymmdd) -c***keywords weight function, cauchy principal value -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose this function subprogram is used together with the -c routine qawc and defines the weight function. -c***end prologue dqwgtc -c - double precision c,p2,p3,p4,x - integer kp -c***first executable statement dqwgtc - dqwgtc = 0.1d+01/(x-c) - return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqwgtf.f b/scipy-0.10.1/scipy/integrate/quadpack/dqwgtf.f deleted file mode 100644 index 2dc44125db..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqwgtf.f +++ /dev/null @@ -1,20 +0,0 @@ - double precision function dqwgtf(x,omega,p2,p3,p4,integr) -c***begin prologue dqwgtf -c***refer to dqk15w -c***routines called (none) -c***revision date 810101 (yymmdd) -c***keywords cos or sin in weight function -c***author piessens,robert, appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. * progr. div. - k.u.leuven -c***end prologue dqwgtf -c - double precision dcos,dsin,omega,omx,p2,p3,p4,x - integer integr -c***first executable statement dqwgtf - omx = omega*x - go to(10,20),integr - 10 dqwgtf = dcos(omx) - go to 30 - 20 dqwgtf = dsin(omx) - 30 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadpack/dqwgts.f b/scipy-0.10.1/scipy/integrate/quadpack/dqwgts.f deleted file mode 100644 index 3b4cee5e85..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadpack/dqwgts.f +++ /dev/null @@ -1,27 +0,0 @@ - double precision function dqwgts(x,a,b,alfa,beta,integr) -c***begin prologue dqwgts -c***refer to dqk15w -c***routines called (none) -c***revision date 810101 (yymmdd) -c***keywords weight function, algebraico-logarithmic -c end-point singularities -c***author piessens,robert,appl. math. & progr. div. - k.u.leuven -c de doncker,elise,appl. math. & progr. div. - k.u.leuven -c***purpose this function subprogram is used together with the -c routine dqaws and defines the weight function. -c***end prologue dqwgts -c - double precision a,alfa,b,beta,bmx,dlog,x,xma - integer integr -c***first executable statement dqwgts - xma = x-a - bmx = b-x - dqwgts = xma**alfa*bmx**beta - go to (40,10,20,30),integr - 10 dqwgts = dqwgts*dlog(xma) - go to 40 - 20 dqwgts = dqwgts*dlog(bmx) - go to 40 - 30 dqwgts = dqwgts*dlog(xma)*dlog(bmx) - 40 return - end diff --git a/scipy-0.10.1/scipy/integrate/quadrature.py b/scipy-0.10.1/scipy/integrate/quadrature.py deleted file mode 100644 index 4118f9ec66..0000000000 --- a/scipy-0.10.1/scipy/integrate/quadrature.py +++ /dev/null @@ -1,769 +0,0 @@ - -__all__ = ['fixed_quad','quadrature','romberg','trapz','simps','romb', - 'cumtrapz','newton_cotes','composite'] - -from scipy.special.orthogonal import p_roots -from scipy.special import gammaln -from numpy import sum, ones, add, diff, isinf, isscalar, \ - asarray, real, trapz, arange, empty -import numpy as np -import math -import warnings - -class AccuracyWarning(Warning): - pass - -def fixed_quad(func,a,b,args=(),n=5): - """ - Compute a definite integral using fixed-order Gaussian quadrature. - - Integrate `func` from a to b using Gaussian quadrature of order n. - - Parameters - ---------- - func : callable - A Python function or method to integrate (must accept vector inputs). - a : float - Lower limit of integration. - b : float - Upper limit of integration. - args : tuple, optional - Extra arguments to pass to function, if any. - n : int, optional - Order of quadrature integration. Default is 5. - - Returns - ------- - val : float - Gaussian quadrature approximation to the integral - - See Also - -------- - quad : adaptive quadrature using QUADPACK - dblquad, tplquad : double and triple integrals - romberg : adaptive Romberg quadrature - quadrature : adaptive Gaussian quadrature - romb, simps, trapz : integrators for sampled data - cumtrapz : cumulative integration for sampled data - ode, odeint - ODE integrators - - """ - [x,w] = p_roots(n) - x = real(x) - ainf, binf = map(isinf,(a,b)) - if ainf or binf: - raise ValueError("Gaussian quadrature is only available for " - "finite limits.") - y = (b-a)*(x+1)/2.0 + a - return (b-a)/2.0*sum(w*func(y,*args),0), None - -def vectorize1(func, args=(), vec_func=False): - """Vectorize the call to a function. - - This is an internal utility function used by `romberg` and - `quadrature` to create a vectorized version of a function. - - If `vec_func` is True, the function `func` is assumed to take vector - arguments. - - Parameters - ---------- - func : callable - User defined function. - args : tuple - Extra arguments for the function. - vec_func : bool - True if the function func takes vector arguments. - - Returns - ------- - vfunc : callable - A function that will take a vector argument and return the - result. - - """ - if vec_func: - def vfunc(x): - return func(x, *args) - else: - def vfunc(x): - if isscalar(x): - return func(x, *args) - x = asarray(x) - # call with first point to get output type - y0 = func(x[0], *args) - n = len(x) - if hasattr(y0, 'dtype'): - output = empty((n,), dtype=y0.dtype) - else: - output = empty((n,), dtype=type(y0)) - output[0] = y0 - for i in xrange(1, n): - output[i] = func(x[i], *args) - return output - return vfunc - -def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50, - vec_func=True): - """ - Compute a definite integral using fixed-tolerance Gaussian quadrature. - - Integrate func from a to b using Gaussian quadrature - with absolute tolerance `tol`. - - Parameters - ---------- - func : function - A Python function or method to integrate. - a : float - Lower limit of integration. - b : float - Upper limit of integration. - args : tuple, optional - Extra arguments to pass to function. - tol, rol : float, optional - Iteration stops when error between last two iterates is less than - `tol` OR the relative change is less than `rtol`. - maxiter : int, optional - Maximum number of iterations. - vec_func : bool, optional - True or False if func handles arrays as arguments (is - a "vector" function). Default is True. - - Returns - ------- - val : float - Gaussian quadrature approximation (within tolerance) to integral. - err : float - Difference between last two estimates of the integral. - - See also - -------- - romberg: adaptive Romberg quadrature - fixed_quad: fixed-order Gaussian quadrature - quad: adaptive quadrature using QUADPACK - dblquad: double integrals - tplquad: triple integrals - romb: integrator for sampled data - simps: integrator for sampled data - trapz: integrator for sampled data - cumtrapz: cumulative integration for sampled data - ode: ODE integrator - odeint: ODE integrator - - """ - vfunc = vectorize1(func, args, vec_func=vec_func) - val = np.inf - err = np.inf - for n in xrange(1, maxiter+1): - newval = fixed_quad(vfunc, a, b, (), n)[0] - err = abs(newval-val) - val = newval - - if err < tol or err < rtol*abs(val): - break - else: - warnings.warn( - "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err), - AccuracyWarning) - return val, err - -def tupleset(t, i, value): - l = list(t) - l[i] = value - return tuple(l) - -def cumtrapz(y, x=None, dx=1.0, axis=-1): - """ - Cumulatively integrate y(x) using samples along the given axis - and the composite trapezoidal rule. If x is None, spacing given by `dx` - is assumed. - - Parameters - ---------- - y : array - - x : array, optional - - dx : int, optional - - axis : int, optional - Specifies the axis to cumulate: - - - -1 --> X axis - - 0 --> Z axis - - 1 --> Y axis - - See Also - -------- - quad: adaptive quadrature using QUADPACK - romberg: adaptive Romberg quadrature - quadrature: adaptive Gaussian quadrature - fixed_quad: fixed-order Gaussian quadrature - dblquad: double integrals - tplquad: triple integrals - romb: integrators for sampled data - trapz: integrators for sampled data - cumtrapz: cumulative integration for sampled data - ode: ODE integrators - odeint: ODE integrators - - """ - y = asarray(y) - if x is None: - d = dx - else: - d = diff(x,axis=axis) - nd = len(y.shape) - slice1 = tupleset((slice(None),)*nd, axis, slice(1, None)) - slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1)) - return add.accumulate(d * (y[slice1]+y[slice2])/2.0,axis) - -def _basic_simps(y,start,stop,x,dx,axis): - nd = len(y.shape) - if start is None: - start = 0 - step = 2 - all = (slice(None),)*nd - slice0 = tupleset(all, axis, slice(start, stop, step)) - slice1 = tupleset(all, axis, slice(start+1, stop+1, step)) - slice2 = tupleset(all, axis, slice(start+2, stop+2, step)) - - if x is None: # Even spaced Simpson's rule. - result = add.reduce(dx/3.0* (y[slice0]+4*y[slice1]+y[slice2]), - axis) - else: - # Account for possibly different spacings. - # Simpson's rule changes a bit. - h = diff(x,axis=axis) - sl0 = tupleset(all, axis, slice(start, stop, step)) - sl1 = tupleset(all, axis, slice(start+1, stop+1, step)) - h0 = h[sl0] - h1 = h[sl1] - hsum = h0 + h1 - hprod = h0 * h1 - h0divh1 = h0 / h1 - result = add.reduce(hsum/6.0*(y[slice0]*(2-1.0/h0divh1) + \ - y[slice1]*hsum*hsum/hprod + \ - y[slice2]*(2-h0divh1)),axis) - return result - - -def simps(y, x=None, dx=1, axis=-1, even='avg'): - """ - Integrate y(x) using samples along the given axis and the composite - Simpson's rule. If x is None, spacing of dx is assumed. - - If there are an even number of samples, N, then there are an odd - number of intervals (N-1), but Simpson's rule requires an even number - of intervals. The parameter 'even' controls how this is handled. - - Parameters - ---------- - y : array_like - Array to be integrated. - x : array_like, optional - If given, the points at which `y` is sampled. - dx : int, optional - Spacing of integration points along axis of `y`. Only used when - `x` is None. Default is 1. - axis : int, optional - Axis along which to integrate. Default is the last axis. - even : {'avg', 'first', 'str'}, optional - 'avg' : Average two results:1) use the first N-2 intervals with - a trapezoidal rule on the last interval and 2) use the last - N-2 intervals with a trapezoidal rule on the first interval. - - 'first' : Use Simpson's rule for the first N-2 intervals with - a trapezoidal rule on the last interval. - - 'last' : Use Simpson's rule for the last N-2 intervals with a - trapezoidal rule on the first interval. - - See Also - -------- - quad: adaptive quadrature using QUADPACK - romberg: adaptive Romberg quadrature - quadrature: adaptive Gaussian quadrature - fixed_quad: fixed-order Gaussian quadrature - dblquad: double integrals - tplquad: triple integrals - romb: integrators for sampled data - trapz: integrators for sampled data - cumtrapz: cumulative integration for sampled data - ode: ODE integrators - odeint: ODE integrators - - Notes - ----- - For an odd number of samples that are equally spaced the result is - exact if the function is a polynomial of order 3 or less. If - the samples are not equally spaced, then the result is exact only - if the function is a polynomial of order 2 or less. - - """ - y = asarray(y) - nd = len(y.shape) - N = y.shape[axis] - last_dx = dx - first_dx = dx - returnshape = 0 - if not x is None: - x = asarray(x) - if len(x.shape) == 1: - shapex = ones(nd) - shapex[axis] = x.shape[0] - saveshape = x.shape - returnshape = 1 - x=x.reshape(tuple(shapex)) - elif len(x.shape) != len(y.shape): - raise ValueError("If given, shape of x must be 1-d or the " - "same as y.") - if x.shape[axis] != N: - raise ValueError("If given, length of x along axis must be the " - "same as y.") - if N % 2 == 0: - val = 0.0 - result = 0.0 - slice1 = (slice(None),)*nd - slice2 = (slice(None),)*nd - if not even in ['avg', 'last', 'first']: - raise ValueError("Parameter 'even' must be 'avg', 'last', or 'first'.") - # Compute using Simpson's rule on first intervals - if even in ['avg', 'first']: - slice1 = tupleset(slice1, axis, -1) - slice2 = tupleset(slice2, axis, -2) - if not x is None: - last_dx = x[slice1] - x[slice2] - val += 0.5*last_dx*(y[slice1]+y[slice2]) - result = _basic_simps(y,0,N-3,x,dx,axis) - # Compute using Simpson's rule on last set of intervals - if even in ['avg', 'last']: - slice1 = tupleset(slice1, axis, 0) - slice2 = tupleset(slice2, axis, 1) - if not x is None: - first_dx = x[tuple(slice2)] - x[tuple(slice1)] - val += 0.5*first_dx*(y[slice2]+y[slice1]) - result += _basic_simps(y,1,N-2,x,dx,axis) - if even == 'avg': - val /= 2.0 - result /= 2.0 - result = result + val - else: - result = _basic_simps(y,0,N-2,x,dx,axis) - if returnshape: - x = x.reshape(saveshape) - return result - -def romb(y, dx=1.0, axis=-1, show=False): - """ - Romberg integration using samples of a function. - - Parameters - ----------- - y : array_like - A vector of ``2**k + 1`` equally-spaced samples of a function. - dx : array_like, optional - The sample spacing. Default is 1. - axis : array_like?, optional - The axis along which to integrate. Default is -1 (last axis). - show : bool, optional - When y is a single 1-D array, then if this argument is True - print the table showing Richardson extrapolation from the - samples. Default is False. - - Returns - ------- - ret : array_like? - The integrated result for each axis. - - See also - -------- - quad - adaptive quadrature using QUADPACK - romberg - adaptive Romberg quadrature - quadrature - adaptive Gaussian quadrature - fixed_quad - fixed-order Gaussian quadrature - dblquad, tplquad - double and triple integrals - simps, trapz - integrators for sampled data - cumtrapz - cumulative integration for sampled data - ode, odeint - ODE integrators - - """ - y = asarray(y) - nd = len(y.shape) - Nsamps = y.shape[axis] - Ninterv = Nsamps-1 - n = 1 - k = 0 - while n < Ninterv: - n <<= 1 - k += 1 - if n != Ninterv: - raise ValueError("Number of samples must be one plus a " - "non-negative power of 2.") - - R = {} - all = (slice(None),) * nd - slice0 = tupleset(all, axis, 0) - slicem1 = tupleset(all, axis, -1) - h = Ninterv*asarray(dx)*1.0 - R[(1,1)] = (y[slice0] + y[slicem1])/2.0*h - slice_R = all - start = stop = step = Ninterv - for i in range(2,k+1): - start >>= 1 - slice_R = tupleset(slice_R, axis, slice(start,stop,step)) - step >>= 1 - R[(i,1)] = 0.5*(R[(i-1,1)] + h*add.reduce(y[slice_R],axis)) - for j in range(2,i+1): - R[(i,j)] = R[(i,j-1)] + \ - (R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*(j-1)))-1) - h = h / 2.0 - - if show: - if not isscalar(R[(1,1)]): - print "*** Printing table only supported for integrals" + \ - " of a single data set." - else: - try: - precis = show[0] - except (TypeError, IndexError): - precis = 5 - try: - width = show[1] - except (TypeError, IndexError): - width = 8 - formstr = "%" + str(width) + '.' + str(precis)+'f' - - print "\n Richardson Extrapolation Table for Romberg Integration " - print "====================================================================" - for i in range(1,k+1): - for j in range(1,i+1): - print formstr % R[(i,j)], - print - print "====================================================================\n" - - return R[(k,k)] - - - -# Romberg quadratures for numeric integration. -# -# Written by Scott M. Ransom -# last revision: 14 Nov 98 -# -# Cosmetic changes by Konrad Hinsen -# last revision: 1999-7-21 -# -# Adapted to scipy by Travis Oliphant -# last revision: Dec 2001 - -def _difftrap(function, interval, numtraps): - """ - Perform part of the trapezoidal rule to integrate a function. - Assume that we had called difftrap with all lower powers-of-2 - starting with 1. Calling difftrap only returns the summation - of the new ordinates. It does _not_ multiply by the width - of the trapezoids. This must be performed by the caller. - 'function' is the function to evaluate (must accept vector arguments). - 'interval' is a sequence with lower and upper limits - of integration. - 'numtraps' is the number of trapezoids to use (must be a - power-of-2). - """ - if numtraps <= 0: - raise ValueError("numtraps must be > 0 in difftrap().") - elif numtraps == 1: - return 0.5*(function(interval[0])+function(interval[1])) - else: - numtosum = numtraps/2 - h = float(interval[1]-interval[0])/numtosum - lox = interval[0] + 0.5 * h; - points = lox + h * arange(0, numtosum) - s = sum(function(points),0) - return s - -def _romberg_diff(b, c, k): - """ - Compute the differences for the Romberg quadrature corrections. - See Forman Acton's "Real Computing Made Real," p 143. - """ - tmp = 4.0**k - return (tmp * c - b)/(tmp - 1.0) - -def _printresmat(function, interval, resmat): - # Print the Romberg result matrix. - i = j = 0 - print 'Romberg integration of', `function`, - print 'from', interval - print '' - print '%6s %9s %9s' % ('Steps', 'StepSize', 'Results') - for i in range(len(resmat)): - print '%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), - for j in range(i+1): - print '%9f' % (resmat[i][j]), - print '' - print '' - print 'The final result is', resmat[i][j], - print 'after', 2**(len(resmat)-1)+1, 'function evaluations.' - -def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False, - divmax=10, vec_func=False): - """ - Romberg integration of a callable function or method. - - Returns the integral of `function` (a function of one variable) - over the interval (`a`, `b`). - - If `show` is 1, the triangular array of the intermediate results - will be printed. If `vec_func` is True (default is False), then `function` is - assumed to support vector arguments. - - Parameters - ---------- - function : callable - Function to be integrated. - a : float - Lower limit of integration. - b : float - Upper limit of integration. - - Returns - -------- - results : float - Result of the integration. - - Other Parameters - ---------------- - args : tuple, optional - Extra arguments to pass to function. Each element of `args` will - be passed as a single argument to `func`. Default is to pass no - extra arguments. - tol, rtol : float, optional - The desired absolute and relative tolerances. Defaults are 1.48e-8. - show : bool, optional - Whether to print the results. Default is False. - divmax : int, optional - Maximum order of extrapolation. Default is 10. - vec_func : bool, optional - Whether `func` handles arrays as arguments (i.e whether it is a - "vector" function). Default is False. - - See Also - -------- - fixed_quad : Fixed-order Gaussian quadrature. - quad : Adaptive quadrature using QUADPACK. - dblquad, tplquad : Double and triple integrals. - romb, simps, trapz : Integrators for sampled data. - cumtrapz : Cumulative integration for sampled data. - ode, odeint : ODE integrators. - - References - ---------- - .. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method - - Examples - -------- - Integrate a gaussian from 0 to 1 and compare to the error function. - - >>> from scipy.special import erf - >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2) - >>> result = romberg(gaussian, 0, 1, show=True) - Romberg integration of from [0, 1] - - :: - - Steps StepSize Results - 1 1.000000 0.385872 - 2 0.500000 0.412631 0.421551 - 4 0.250000 0.419184 0.421368 0.421356 - 8 0.125000 0.420810 0.421352 0.421350 0.421350 - 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350 - 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350 - - The final result is 0.421350396475 after 33 function evaluations. - - >>> print 2*result,erf(1) - 0.84270079295 0.84270079295 - - """ - if isinf(a) or isinf(b): - raise ValueError("Romberg integration only available for finite limits.") - vfunc = vectorize1(function, args, vec_func=vec_func) - n = 1 - interval = [a,b] - intrange = b-a - ordsum = _difftrap(vfunc, interval, n) - result = intrange * ordsum - resmat = [[result]] - err = np.inf - for i in xrange(1, divmax+1): - n = n * 2 - ordsum = ordsum + _difftrap(vfunc, interval, n) - resmat.append([]) - resmat[i].append(intrange * ordsum / n) - for k in range(i): - resmat[i].append(_romberg_diff(resmat[i-1][k], resmat[i][k], k+1)) - result = resmat[i][i] - lastresult = resmat[i-1][i-1] - - err = abs(result - lastresult) - if err < tol or err < rtol*abs(result): - break - else: - warnings.warn( - "divmax (%d) exceeded. Latest difference = %e" % (divmax, err), - AccuracyWarning) - - if show: - _printresmat(vfunc, interval, resmat) - return result - - -# Coefficients for Netwon-Cotes quadrature -# -# These are the points being used -# to construct the local interpolating polynomial -# a are the weights for Newton-Cotes integration -# B is the error coefficient. -# error in these coefficients grows as N gets larger. -# or as samples are closer and closer together - -# You can use maxima to find these rational coefficients -# for equally spaced data using the commands -# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i); -# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N)); -# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N)); -# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N)); -# -# pre-computed for equally-spaced weights -# -# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N] -# -# a = num_a*array(int_a)/den_a -# B = num_B*1.0 / den_B -# -# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*) -# where k = N // 2 -# -_builtincoeffs = { - 1:(1,2,[1,1],-1,12), - 2:(1,3,[1,4,1],-1,90), - 3:(3,8,[1,3,3,1],-3,80), - 4:(2,45,[7,32,12,32,7],-8,945), - 5:(5,288,[19,75,50,50,75,19],-275,12096), - 6:(1,140,[41,216,27,272,27,216,41],-9,1400), - 7:(7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400), - 8:(4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989], - -2368,467775), - 9:(9,89600,[2857,15741,1080,19344,5778,5778,19344,1080, - 15741,2857], -4671, 394240), - 10:(5,299376,[16067,106300,-48525,272400,-260550,427368, - -260550,272400,-48525,106300,16067], - -673175, 163459296), - 11:(11,87091200,[2171465,13486539,-3237113, 25226685,-9595542, - 15493566,15493566,-9595542,25226685,-3237113, - 13486539,2171465], -2224234463, 237758976000), - 12:(1, 5255250, [1364651,9903168,-7587864,35725120,-51491295, - 87516288,-87797136,87516288,-51491295,35725120, - -7587864,9903168,1364651], -3012, 875875), - 13:(13, 402361344000,[8181904909, 56280729661, -31268252574, - 156074417954,-151659573325,206683437987, - -43111992612,-43111992612,206683437987, - -151659573325,156074417954,-31268252574, - 56280729661,8181904909], -2639651053, - 344881152000), - 14:(7, 2501928000, [90241897,710986864,-770720657,3501442784, - -6625093363,12630121616,-16802270373,19534438464, - -16802270373,12630121616,-6625093363,3501442784, - -770720657,710986864,90241897], -3740727473, - 1275983280000) - } - -def newton_cotes(rn, equal=0): - """ - Return weights and error coefficient for Newton-Cotes integration. - - Suppose we have (N+1) samples of f at the positions - x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the - integral between x_0 and x_N is: - - :math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i) - + B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)` - - where :math:`\\xi \\in [x_0,x_N]` and :math:`\\Delta x = \\frac{x_N-x_0}{N}` - is the averages samples spacing. - - If the samples are equally-spaced and N is even, then the error - term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`. - - Parameters - ---------- - rn : int - The integer order for equally-spaced data or the relative positions of - the samples with the first sample at 0 and the last at N, where N+1 is - the length of `rn`. N is the order of the Newton-Cotes integration. - equal: int, optional - Set to 1 to enforce equally spaced data. - - Returns - ------- - an : ndarray - 1-D array of weights to apply to the function at the provided sample - positions. - B : float - Error coefficient. - - Notes - ----- - Normally, the Newton-Cotes rules are used on smaller integration - regions and a composite rule is used to return the total integral. - - """ - try: - N = len(rn)-1 - if equal: - rn = np.arange(N+1) - elif np.all(np.diff(rn)==1): - equal = 1 - except: - N = rn - rn = np.arange(N+1) - equal = 1 - - if equal and N in _builtincoeffs: - na, da, vi, nb, db = _builtincoeffs[N] - return na*np.array(vi,float)/da, float(nb)/db - - if (rn[0] != 0) or (rn[-1] != N): - raise ValueError("The sample positions must start at 0" - " and end at N") - yi = rn / float(N) - ti = 2.0*yi - 1 - nvec = np.arange(0,N+1) - C = np.mat(ti**nvec[:,np.newaxis]) - Cinv = C.I - # improve precision of result - Cinv = 2*Cinv - Cinv*C*Cinv - Cinv = 2*Cinv - Cinv*C*Cinv - Cinv = Cinv.A - vec = 2.0/ (nvec[::2]+1) - ai = np.dot(Cinv[:,::2],vec) * N/2 - - if (N%2 == 0) and equal: - BN = N/(N+3.) - power = N+2 - else: - BN = N/(N+2.) - power = N+1 - - BN = BN - np.dot(yi**power, ai) - p1 = power+1 - fac = power*math.log(N) - gammaln(p1) - fac = math.exp(fac) - return ai, BN*fac - - -# Should only use if samples are forced on you -def composite(f,x=None,dx=1,axis=-1,n=5): - pass diff --git a/scipy-0.10.1/scipy/integrate/setup.py b/scipy-0.10.1/scipy/integrate/setup.py deleted file mode 100755 index 7b7577268d..0000000000 --- a/scipy-0.10.1/scipy/integrate/setup.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - config = Configuration('integrate', parent_package, top_path) - - blas_opt = get_info('blas_opt',notfound_action=2) - - config.add_library('linpack_lite', - sources=[join('linpack_lite','*.f')]) - config.add_library('mach', - sources=[join('mach','*.f')], - config_fc={'noopt':(__file__,1)}) - config.add_library('quadpack', - sources=[join('quadpack','*.f')]) - config.add_library('odepack', - sources=[join('odepack','*.f')]) - config.add_library('dop', - sources=[join('dop','*.f')]) - # should we try to weed through files and replace with calls to - # LAPACK routines? - # Yes, someday... - - - - # Extensions - # quadpack: - - config.add_extension('_quadpack', - sources=['_quadpackmodule.c'], - libraries=['quadpack', 'linpack_lite', 'mach'], - depends=['quadpack.h','__quadpack.h']) - # odepack - libs = ['odepack','linpack_lite','mach'] - - - # Remove libraries key from blas_opt - if 'libraries' in blas_opt: # key doesn't exist on OS X ... - libs.extend(blas_opt['libraries']) - newblas = {} - for key in blas_opt.keys(): - if key == 'libraries': - continue - newblas[key] = blas_opt[key] - config.add_extension('_odepack', - sources=['_odepackmodule.c'], - libraries=libs, - depends=['__odepack.h','multipack.h'], - **newblas) - - # vode - config.add_extension('vode', - sources=['vode.pyf'], - libraries=libs, - **newblas) - - # dop - config.add_extension('_dop', - sources=['dop.pyf'], - libraries=['dop']) - - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/integrate/setupscons.py b/scipy-0.10.1/scipy/integrate/setupscons.py deleted file mode 100755 index 085c7affc3..0000000000 --- a/scipy-0.10.1/scipy/integrate/setupscons.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('integrate', parent_package, top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/integrate/tests/test_integrate.py b/scipy-0.10.1/scipy/integrate/tests/test_integrate.py deleted file mode 100644 index f9d9ccfe23..0000000000 --- a/scipy-0.10.1/scipy/integrate/tests/test_integrate.py +++ /dev/null @@ -1,375 +0,0 @@ -# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers -""" -Tests for numerical integration. -""" - -import numpy -from numpy import arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, \ - allclose - -from numpy.testing import assert_, TestCase, run_module_suite, \ - assert_array_almost_equal, assert_raises, assert_allclose -from scipy.integrate import odeint, ode, complex_ode - -#------------------------------------------------------------------------------ -# Test ODE integrators -#------------------------------------------------------------------------------ - -class TestOdeint(TestCase): - """ - Check integrate.odeint - """ - def _do_problem(self, problem): - t = arange(0.0, problem.stop_t, 0.05) - z, infodict = odeint(problem.f, problem.z0, t, full_output=True) - assert_(problem.verify(z, t)) - - def test_odeint(self): - for problem_cls in PROBLEMS: - problem = problem_cls() - if problem.cmplx: continue - self._do_problem(problem) - -class TestOde(TestCase): - """ - Check integrate.ode - """ - def _do_problem(self, problem, integrator, method='adams'): - - # ode has callback arguments in different order than odeint - f = lambda t, z: problem.f(z, t) - jac = None - if hasattr(problem, 'jac'): - jac = lambda t, z: problem.jac(z, t) - - ig = ode(f, jac) - ig.set_integrator(integrator, - atol=problem.atol/10, - rtol=problem.rtol/10, - method=method) - ig.set_initial_value(problem.z0, t=0.0) - z = ig.integrate(problem.stop_t) - - assert_(ig.successful(), (problem, method)) - assert_(problem.verify(array([z]), problem.stop_t), (problem, method)) - - def test_vode(self): - """Check the vode solver""" - for problem_cls in PROBLEMS: - problem = problem_cls() - if problem.cmplx: continue - if not problem.stiff: - self._do_problem(problem, 'vode', 'adams') - self._do_problem(problem, 'vode', 'bdf') - - def test_zvode(self): - """Check the zvode solver""" - for problem_cls in PROBLEMS: - problem = problem_cls() - if not problem.stiff: - self._do_problem(problem, 'zvode', 'adams') - self._do_problem(problem, 'zvode', 'bdf') - - def test_dopri5(self): - """Check the dopri5 solver""" - for problem_cls in PROBLEMS: - problem = problem_cls() - if problem.cmplx: continue - if problem.stiff: continue - if hasattr(problem, 'jac'): continue - self._do_problem(problem, 'dopri5') - - def test_dop853(self): - """Check the dop853 solver""" - for problem_cls in PROBLEMS: - problem = problem_cls() - if problem.cmplx: continue - if problem.stiff: continue - if hasattr(problem, 'jac'): continue - self._do_problem(problem, 'dop853') - - def test_concurrent_fail(self): - for sol in ('vode', 'zvode'): - f = lambda t, y: 1.0 - - r = ode(f).set_integrator(sol) - r.set_initial_value(0, 0) - - r2 = ode(f).set_integrator(sol) - r2.set_initial_value(0, 0) - - r.integrate(r.t + 0.1) - r2.integrate(r2.t + 0.1) - - assert_raises(RuntimeError, r.integrate, r.t + 0.1) - - def test_concurrent_ok(self): - f = lambda t, y: 1.0 - - for k in xrange(3): - for sol in ('vode', 'zvode', 'dopri5', 'dop853'): - r = ode(f).set_integrator(sol) - r.set_initial_value(0, 0) - - r2 = ode(f).set_integrator(sol) - r2.set_initial_value(0, 0) - - r.integrate(r.t + 0.1) - r2.integrate(r2.t + 0.1) - r2.integrate(r2.t + 0.1) - - assert_allclose(r.y, 0.1) - assert_allclose(r2.y, 0.2) - - for sol in ('dopri5', 'dop853'): - r = ode(f).set_integrator(sol) - r.set_initial_value(0, 0) - - r2 = ode(f).set_integrator(sol) - r2.set_initial_value(0, 0) - - r.integrate(r.t + 0.1) - r.integrate(r.t + 0.1) - r2.integrate(r2.t + 0.1) - r.integrate(r.t + 0.1) - r2.integrate(r2.t + 0.1) - - assert_allclose(r.y, 0.3) - assert_allclose(r2.y, 0.2) - -class TestComplexOde(TestCase): - """ - Check integrate.complex_ode - """ - def _do_problem(self, problem, integrator, method='adams'): - - # ode has callback arguments in different order than odeint - f = lambda t, z: problem.f(z, t) - jac = None - if hasattr(problem, 'jac'): - jac = lambda t, z: problem.jac(z, t) - ig = complex_ode(f, jac) - ig.set_integrator(integrator, - atol=problem.atol/10, - rtol=problem.rtol/10, - method=method) - ig.set_initial_value(problem.z0, t=0.0) - z = ig.integrate(problem.stop_t) - - assert_(ig.successful(), (problem, method)) - assert_(problem.verify(array([z]), problem.stop_t), (problem, method)) - - def test_vode(self): - """Check the vode solver""" - for problem_cls in PROBLEMS: - problem = problem_cls() - if not problem.stiff: - self._do_problem(problem, 'vode', 'adams') - else: - self._do_problem(problem, 'vode', 'bdf') - - def test_dopri5(self): - """Check the dopri5 solver""" - for problem_cls in PROBLEMS: - problem = problem_cls() - if problem.stiff: continue - if hasattr(problem, 'jac'): continue - self._do_problem(problem, 'dopri5') - - def test_dop853(self): - """Check the dop853 solver""" - for problem_cls in PROBLEMS: - problem = problem_cls() - if problem.stiff: continue - if hasattr(problem, 'jac'): continue - self._do_problem(problem, 'dop853') - -#------------------------------------------------------------------------------ -# Test problems -#------------------------------------------------------------------------------ - -class ODE: - """ - ODE problem - """ - stiff = False - cmplx = False - stop_t = 1 - z0 = [] - - atol = 1e-6 - rtol = 1e-5 - -class SimpleOscillator(ODE): - r""" - Free vibration of a simple oscillator:: - m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 - Solution:: - u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m) - """ - stop_t = 1 + 0.09 - z0 = array([1.0, 0.1], float) - - k = 4.0 - m = 1.0 - - def f(self, z, t): - tmp = zeros((2,2), float) - tmp[0,1] = 1.0 - tmp[1,0] = -self.k / self.m - return dot(tmp, z) - - def verify(self, zs, t): - omega = sqrt(self.k / self.m) - u = self.z0[0]*cos(omega*t)+self.z0[1]*sin(omega*t)/omega - return allclose(u, zs[:,0], atol=self.atol, rtol=self.rtol) - -class ComplexExp(ODE): - r"""The equation :lm:`\dot u = i u`""" - stop_t = 1.23*pi - z0 = exp([1j,2j,3j,4j,5j]) - cmplx = True - - def f(self, z, t): - return 1j*z - - def jac(self, z, t): - return 1j*eye(5) - - def verify(self, zs, t): - u = self.z0 * exp(1j*t) - return allclose(u, zs, atol=self.atol, rtol=self.rtol) - -class Pi(ODE): - r"""Integrate 1/(t + 1j) from t=-10 to t=10""" - stop_t = 20 - z0 = [0] - cmplx = True - - def f(self, z, t): - return array([1./(t - 10 + 1j)]) - def verify(self, zs, t): - u = -2j*numpy.arctan(10) - return allclose(u, zs[-1,:], atol=self.atol, rtol=self.rtol) - -PROBLEMS = [SimpleOscillator, ComplexExp, Pi] - -#------------------------------------------------------------------------------ - -def f(t, x): - dxdt = [x[1], -x[0]] - return dxdt - -def jac(t, x): - j = array([[ 0.0, 1.0], - [-1.0, 0.0]]) - return j - -def f1(t, x, omega): - dxdt = [omega*x[1], -omega*x[0]] - return dxdt - -def jac1(t, x, omega): - j = array([[ 0.0, omega], - [-omega, 0.0]]) - return j - -def f2(t, x, omega1, omega2): - dxdt = [omega1*x[1], -omega2*x[0]] - return dxdt - -def jac2(t, x, omega1, omega2): - j = array([[ 0.0, omega1], - [-omega2, 0.0]]) - return j - -def fv(t, x, omega): - dxdt = [omega[0]*x[1], -omega[1]*x[0]] - return dxdt - -def jacv(t, x, omega): - j = array([[ 0.0, omega[0]], - [-omega[1], 0.0]]) - return j - - -class ODECheckParameterUse(object): - """Call an ode-class solver with several cases of parameter use.""" - - # This class is intentionally not a TestCase subclass. - # solver_name must be set before tests can be run with this class. - - # Set these in subclasses. - solver_name = '' - solver_uses_jac = False - - def _get_solver(self, f, jac): - solver = ode(f, jac) - if self.solver_uses_jac: - solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7, - with_jacobian=self.solver_uses_jac) - else: - # XXX Shouldn't set_integrator *always* accept the keyword arg - # 'with_jacobian', and perhaps raise an exception if it is set - # to True if the solver can't actually use it? - solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7) - return solver - - def _check_solver(self, solver): - ic = [1.0, 0.0] - solver.set_initial_value(ic, 0.0) - solver.integrate(pi) - assert_array_almost_equal(solver.y, [-1.0, 0.0]) - - def test_no_params(self): - solver = self._get_solver(f, jac) - self._check_solver(solver) - - def test_one_scalar_param(self): - solver = self._get_solver(f1, jac1) - omega = 1.0 - solver.set_f_params(omega) - if self.solver_uses_jac: - solver.set_jac_params(omega) - self._check_solver(solver) - - def test_two_scalar_params(self): - solver = self._get_solver(f2, jac2) - omega1 = 1.0 - omega2 = 1.0 - solver.set_f_params(omega1, omega2) - if self.solver_uses_jac: - solver.set_jac_params(omega1, omega2) - self._check_solver(solver) - - def test_vector_param(self): - solver = self._get_solver(fv, jacv) - omega = [1.0, 1.0] - solver.set_f_params(omega) - if self.solver_uses_jac: - solver.set_jac_params(omega) - self._check_solver(solver) - - -class DOPRI5CheckParameterUse(ODECheckParameterUse, TestCase): - solver_name = 'dopri5' - solver_uses_jac = False - - -class DOP853CheckParameterUse(ODECheckParameterUse, TestCase): - solver_name = 'dop853' - solver_uses_jac = False - - -class VODECheckParameterUse(ODECheckParameterUse, TestCase): - solver_name = 'vode' - solver_uses_jac = True - - -class ZVODECheckParameterUse(ODECheckParameterUse, TestCase): - solver_name = 'zvode' - solver_uses_jac = True - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/integrate/tests/test_quadpack.py b/scipy-0.10.1/scipy/integrate/tests/test_quadpack.py deleted file mode 100644 index e5d59ac069..0000000000 --- a/scipy-0.10.1/scipy/integrate/tests/test_quadpack.py +++ /dev/null @@ -1,106 +0,0 @@ -from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf -from numpy.testing import assert_, TestCase, run_module_suite -from scipy.integrate import quad, dblquad, tplquad - -def assert_quad((value, err), tabledValue, errTol=1.5e-8): - assert_(abs(value-tabledValue) < err, (value, tabledValue, err)) - if errTol is not None: - assert_(err < errTol, (err, errTol)) - -class TestQuad(TestCase): - def test_typical(self): - # 1) Typical function with two extra arguments: - def myfunc(x,n,z): # Bessel function integrand - return cos(n*x-z*sin(x))/pi - assert_quad(quad(myfunc,0,pi,(2,1.8)), 0.30614353532540296487) - - def test_indefinite(self): - # 2) Infinite integration limits --- Euler's constant - def myfunc(x): # Euler's constant integrand - return -exp(-x)*log(x) - assert_quad(quad(myfunc,0,Inf), 0.577215664901532860606512) - - def test_singular(self): - # 3) Singular points in region of integration. - def myfunc(x): - if x > 0 and x < 2.5: - return sin(x) - elif x>= 2.5 and x <= 5.0: - return exp(-x) - else: - return 0.0 - - assert_quad(quad(myfunc,0,10,points=[2.5,5.0]), - 1 - cos(2.5) + exp(-2.5) - exp(-5.0)) - - def test_sine_weighted_finite(self): - # 4) Sine weighted integral (finite limits) - def myfunc(x,a): - return exp(a*(x-1)) - - ome = 2.0**3.4 - assert_quad(quad(myfunc,0,1,args=20,weight='sin',wvar=ome), - (20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2)) - - def test_sine_weighted_infinite(self): - # 5) Sine weighted integral (infinite limits) - def myfunc(x,a): - return exp(-x*a) - - a = 4.0 - ome = 3.0 - assert_quad(quad(myfunc,0,Inf,args=a,weight='sin',wvar=ome), - ome/(a**2 + ome**2)) - - def test_cosine_weighted_infinite(self): - # 6) Cosine weighted integral (negative infinite limits) - def myfunc(x,a): - return exp(x*a) - - a = 2.5 - ome = 2.3 - assert_quad(quad(myfunc,-Inf,0,args=a,weight='cos',wvar=ome), - a/(a**2 + ome**2)) - - def test_algebraic_log_weight(self): - # 6) Algebraic-logarithmic weight. - def myfunc(x,a): - return 1/(1+x+2**(-a)) - - a = 1.5 - assert_quad(quad(myfunc,-1,1,args=a,weight='alg',wvar=(-0.5,-0.5)), - pi/sqrt((1+2**(-a))**2 - 1)) - - def test_cauchypv_weight(self): - # 7) Cauchy prinicpal value weighting w(x) = 1/(x-c) - def myfunc(x,a): - return 2.0**(-a)/((x-1)**2+4.0**(-a)) - - a = 0.4 - tabledValue = (2.0**(-0.4)*log(1.5)-2.0**(-1.4)*log((4.0**(-a)+16)/(4.0**(-a)+1)) - - arctan(2.0**(a+2)) - arctan(2.0**a))/(4.0**(-a) + 1) - assert_quad(quad(myfunc,0,5,args=0.4,weight='cauchy',wvar=2.0), - tabledValue, errTol=1.9e-8) - - def test_double_integral(self): - # 8) Double Integral test - def simpfunc(y,x): # Note order of arguments. - return x+y - - a, b = 1.0, 2.0 - assert_quad(dblquad(simpfunc,a,b,lambda x: x, lambda x: 2*x), - 5/6.0 * (b**3.0-a**3.0)) - - def test_triple_integral(self): - # 9) Triple Integral test - def simpfunc(z,y,x): # Note order of arguments. - return x+y+z - - a, b = 1.0, 2.0 - assert_quad(tplquad(simpfunc,a,b, - lambda x: x, lambda x: 2*x, - lambda x,y: x-y, lambda x,y: x+y), - 8/3.0 * (b**4.0 - a**4.0)) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/integrate/tests/test_quadrature.py b/scipy-0.10.1/scipy/integrate/tests/test_quadrature.py deleted file mode 100644 index c3eef9f13c..0000000000 --- a/scipy-0.10.1/scipy/integrate/tests/test_quadrature.py +++ /dev/null @@ -1,94 +0,0 @@ -import numpy -from numpy import cos, sin, pi -from numpy.testing import TestCase, run_module_suite, assert_equal, \ - assert_almost_equal, assert_allclose - -from scipy.integrate import quadrature, romberg, romb, newton_cotes - -class TestQuadrature(TestCase): - def quad(self, x, a, b, args): - raise NotImplementedError - - def test_quadrature(self): - # Typical function with two extra arguments: - def myfunc(x,n,z): # Bessel function integrand - return cos(n*x-z*sin(x))/pi - val, err = quadrature(myfunc,0,pi,(2,1.8)) - table_val = 0.30614353532540296487 - assert_almost_equal(val, table_val, decimal=7) - - def test_quadrature_rtol(self): - def myfunc(x,n,z): # Bessel function integrand - return 1e90 * cos(n*x-z*sin(x))/pi - val, err = quadrature(myfunc,0,pi,(2,1.8),rtol=1e-10) - table_val = 1e90 * 0.30614353532540296487 - assert_allclose(val, table_val, rtol=1e-10) - - def test_romberg(self): - # Typical function with two extra arguments: - def myfunc(x, n, z): # Bessel function integrand - return cos(n*x-z*sin(x))/pi - val = romberg(myfunc,0,pi, args=(2, 1.8)) - table_val = 0.30614353532540296487 - assert_almost_equal(val, table_val, decimal=7) - - def test_romberg_rtol(self): - # Typical function with two extra arguments: - def myfunc(x, n, z): # Bessel function integrand - return 1e19*cos(n*x-z*sin(x))/pi - val = romberg(myfunc,0,pi, args=(2, 1.8), rtol=1e-10) - table_val = 1e19*0.30614353532540296487 - assert_allclose(val, table_val, rtol=1e-10) - - def test_romb(self): - assert_equal(romb(numpy.arange(17)),128) - - def test_non_dtype(self): - # Check that we work fine with functions returning float - import math - valmath = romberg(math.sin, 0, 1) - expected_val = 0.45969769413185085 - assert_almost_equal(valmath, expected_val, decimal=7) - - def test_newton_cotes(self): - """Test the first few degrees, for evenly spaced points.""" - n = 1 - wts, errcoff = newton_cotes(n, 1) - assert_equal(wts, n*numpy.array([0.5, 0.5])) - assert_almost_equal(errcoff, -n**3/12.0) - - n = 2 - wts, errcoff = newton_cotes(n, 1) - assert_almost_equal(wts, n*numpy.array([1.0, 4.0, 1.0])/6.0) - assert_almost_equal(errcoff, -n**5/2880.0) - - n = 3 - wts, errcoff = newton_cotes(n, 1) - assert_almost_equal(wts, n*numpy.array([1.0, 3.0, 3.0, 1.0])/8.0) - assert_almost_equal(errcoff, -n**5/6480.0) - - n = 4 - wts, errcoff = newton_cotes(n, 1) - assert_almost_equal(wts, n*numpy.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0) - assert_almost_equal(errcoff, -n**7/1935360.0) - - def test_newton_cotes2(self): - """Test newton_cotes with points that are not evenly spaced.""" - - x = numpy.array([0.0, 1.5, 2.0]) - y = x**2 - wts, errcoff = newton_cotes(x) - exact_integral = 8.0/3 - numeric_integral = numpy.dot(wts, y) - assert_almost_equal(numeric_integral, exact_integral) - - x = numpy.array([0.0, 1.4, 2.1, 3.0]) - y = x**2 - wts, errcoff = newton_cotes(x) - exact_integral = 9.0 - numeric_integral = numpy.dot(wts, y) - assert_almost_equal(numeric_integral, exact_integral) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/integrate/vode.pyf b/scipy-0.10.1/scipy/integrate/vode.pyf deleted file mode 100644 index 39f2babe66..0000000000 --- a/scipy-0.10.1/scipy/integrate/vode.pyf +++ /dev/null @@ -1,114 +0,0 @@ -!%f90 -*- f90 -*- -!Author: Pearu Peterson -!Date: 3 Feb 2002 -!$Revision$ - -python module dvode__user__routines - interface dvode_user_interface - subroutine f(n,t,y,ydot,rpar,ipar) - integer intent(hide) :: n - double precision intent(in) :: t - double precision dimension(n),intent(in,c) :: y - double precision dimension(n),intent(out,c) :: ydot - double precision intent(hide) :: rpar - integer intent(hide) :: ipar - end subroutine f - subroutine jac(n,t,y,ml,mu,jac,nrowpd,rpar,ipar) - integer intent(hide) :: n - double precision :: t - double precision dimension(n),intent(c,in) :: y - integer intent(hide) :: ml,mu - integer intent(hide):: nrowpd - double precision intent(out) :: jac(nrowpd, n) - double precision intent(hide) :: rpar - integer intent(hide) :: ipar - end subroutine jac - end interface -end python module dvode__user__routines - -python module zvode__user__routines - interface zvode_user_interface - subroutine f(n,t,y,ydot,rpar,ipar) - integer intent(hide) :: n - double precision intent(in) :: t - double complex dimension(n),intent(in,c) :: y - double complex dimension(n),intent(out,c) :: ydot - double precision intent(hide) :: rpar - integer intent(hide) :: ipar - end subroutine f - subroutine jac(n,t,y,ml,mu,jac,nrowpd,rpar,ipar) - integer intent(hide) :: n - double precision :: t - double complex dimension(n),intent(c,in) :: y - integer intent(hide) :: ml,mu - integer intent(hide):: nrowpd - double complex intent(out) :: jac(nrowpd, n) - double precision intent(hide) :: rpar - integer intent(hide) :: ipar - end subroutine jac - end interface -end python module zvode__user__routines - -python module vode - interface - subroutine dvode(f,jac,neq,y,t,tout,itol,rtol,atol,itask,istate,iopt,rwork,lrw,iwork,liw,mf,rpar,ipar) - ! y1,t,istate = dvode(f,jac,y0,t0,t1,rtol,atol,itask,istate,rwork,iwork,mf) - callstatement (*f2py_func)(cb_f_in_dvode__user__routines,&neq,y,&t,&tout,&itol,rtol,atol,&itask,&istate,&iopt,rwork,&lrw,iwork,&liw,cb_jac_in_dvode__user__routines,&mf,&rpar,&ipar) - use dvode__user__routines - external f - external jac - - integer intent(hide),depend(y) :: neq = len(y) - double precision dimension(neq),intent(in,out,copy) :: y - double precision intent(in,out):: t - double precision intent(in):: tout - integer intent(hide),depend(atol) :: itol = (len(atol)<=1 && len(rtol)<=1?1:(len(rtol)<=1?2:(len(atol)<=1?3:4))) - double precision dimension(*),intent(in),check(len(atol)<& - &=1||len(atol)>=neq),depend(neq) :: atol - double precision dimension(*),intent(in),check(len(rtol)<& - &=1||len(rtol)>=neq),depend(neq) :: rtol - integer intent(in),check(itask>0 && itask<6) :: itask - integer intent(in,out),check(istate>0 && istate<4) :: istate - integer intent(hide) :: iopt = 1 - double precision dimension(lrw),intent(in,cache) :: rwork - integer intent(hide),check(len(rwork)>=lrw),depend(rwork) :: lrw=len(rwork) - integer dimension(liw),intent(in,cache) :: iwork - integer intent(hide),check(len(iwork)>=liw),depend(iwork) :: liw=len(iwork) - integer intent(in) :: mf - double precision intent(hide) :: rpar = 0.0 - integer intent(hide) :: ipar = 0 - end subroutine dvode - end interface - - interface - subroutine zvode(f,jac,neq,y,t,tout,itol,rtol,atol,itask,istate,iopt,zwork,lzw,rwork,lrw,iwork,liw,mf,rpar,ipar) - ! y1,t,istate = zvode(f,jac,y0,t0,t1,rtol,atol,itask,istate,rwork,iwork,mf) - callstatement (*f2py_func)(cb_f_in_zvode__user__routines,&neq,y,&t,&tout,&itol,rtol,atol,&itask,&istate,&iopt,zwork,&lzw,rwork,&lrw,iwork,&liw,cb_jac_in_zvode__user__routines,&mf,&rpar,&ipar) - use zvode__user__routines - external f - external jac - - integer intent(hide),depend(y) :: neq = len(y) - double complex dimension(neq),intent(in,out,copy) :: y - double precision intent(in,out):: t - double precision intent(in):: tout - integer intent(hide),depend(atol) :: itol = (len(atol)<=1 && len(rtol)<=1?1:(len(rtol)<=1?2:(len(atol)<=1?3:4))) - double precision dimension(*),intent(in),check(len(atol)<& - &=1||len(atol)>=neq),depend(neq) :: atol - double precision dimension(*),intent(in),check(len(rtol)<& - &=1||len(rtol)>=neq),depend(neq) :: rtol - integer intent(in),check(itask>0 && itask<6) :: itask - integer intent(in,out),check(istate>0 && istate<4) :: istate - integer intent(hide) :: iopt = 1 - double complex dimension(lzw),intent(in,cache) :: zwork - integer intent(hide),check(len(zwork)>=lzw),depend(zwork) :: lzw=len(zwork) - double precision dimension(lrw),intent(in,cache) :: rwork - integer intent(hide),check(len(rwork)>=lrw),depend(rwork) :: lrw=len(rwork) - integer dimension(liw),intent(in,cache) :: iwork - integer intent(hide),check(len(iwork)>=liw),depend(iwork) :: liw=len(iwork) - integer intent(in) :: mf - double precision intent(hide) :: rpar = 0.0 - integer intent(hide) :: ipar = 0 - end subroutine zvode - end interface -end python module vode diff --git a/scipy-0.10.1/scipy/interpolate/SConscript b/scipy-0.10.1/scipy/interpolate/SConscript deleted file mode 100644 index 802b1d99e2..0000000000 --- a/scipy-0.10.1/scipy/interpolate/SConscript +++ /dev/null @@ -1,49 +0,0 @@ -# Last Change: Fri Oct 10 03:00 PM 2008 J -# vim:syntax=python -from os.path import join as pjoin - -from numscons import GetNumpyEnvironment, CheckF77Clib, CheckF77Mangling - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('f2py') - -config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib, 'CheckF77Mangling': CheckF77Mangling}) -if not config.CheckF77Clib(): - raise Exception("Could not check F77 runtime, needed for interpolate") -config.CheckF77Mangling() -config.Finish() - -# Build fitpack -src = [pjoin("fitpack", s) for s in ["bispev.f", "bispeu.f", "clocur.f", -"cocosp.f", "concon.f", "concur.f", "cualde.f", "curev.f", "curfit.f", -"dblint.f", "evapol.f", "fourco.f", "fpader.f", "fpadno.f", "fpadpo.f", -"fpback.f", "fpbacp.f", "fpbfout.f", "fpbisp.f", "fpbspl.f", "fpchec.f", -"fpched.f", "fpchep.f", "fpclos.f", "fpcoco.f", "fpcons.f", "fpcosp.f", -"fpcsin.f", "fpcurf.f", "fpcuro.f", "fpcyt1.f", "fpcyt2.f", "fpdeno.f", -"fpdisc.f", "fpfrno.f", "fpgivs.f", "fpgrdi.f", "fpgrpa.f", "fpgrre.f", -"fpgrsp.f", "fpinst.f", "fpintb.f", "fpknot.f", "fpopdi.f", "fpopsp.f", -"fporde.f", "fppara.f", "fppasu.f", "fpperi.f", "fppocu.f", "fppogr.f", -"fppola.f", "fprank.f", "fprati.f", "fpregr.f", "fprota.f", "fprppo.f", -"fprpsp.f", "fpseno.f", "fpspgr.f", "fpsphe.f", "fpsuev.f", "fpsurf.f", -"fpsysy.f", "fptrnp.f", "fptrpe.f", "insert.f", "parcur.f", "parder.f", -"parsur.f", "percur.f", "pogrid.f", "polar.f", "profil.f", "regrid.f", -"spalde.f", "spgrid.f", "sphere.f", "splder.f", "splev.f", "splint.f", -"sproot.f", "surev.f", "surfit.f"]] -fitpack = env.DistutilsStaticExtLibrary('fitpack', source = src) - -env.PrependUnique(LIBPATH = ['.']) - -# Build _fitpack -env.NumpyPythonExtension('_fitpack', source = 'src/_fitpackmodule.c', - LIBS="fitpack") - -# Build dfitpack -env.NumpyPythonExtension('dfitpack', source = 'src/fitpack.pyf', - LIBS="fitpack") - -# Build _interpolate -env.NumpyPythonExtension('_interpolate', source = 'src/_interpolate.cpp', - CXXFILESUFFIX = ".cpp") - -# Build interpnd -env.NumpyPythonExtension('interpnd', source = 'interpnd.c') diff --git a/scipy-0.10.1/scipy/interpolate/SConstruct b/scipy-0.10.1/scipy/interpolate/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/interpolate/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/interpolate/__init__.py b/scipy-0.10.1/scipy/interpolate/__init__.py deleted file mode 100644 index 6f9a75b959..0000000000 --- a/scipy-0.10.1/scipy/interpolate/__init__.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -======================================== -Interpolation (:mod:`scipy.interpolate`) -======================================== - -.. currentmodule:: scipy.interpolate - -Sub-package for objects used in interpolation. - -As listed below, this sub-package contains spline functions and classes, -one-dimensional and multi-dimensional (univariate and multivariate) -interpolation classes, Lagrange and Taylor polynomial interpolators, and -wrappers for `FITPACK `_ -and DFITPACK functions. - -Univariate interpolation -======================== - -.. autosummary:: - :toctree: generated/ - - interp1d - BarycentricInterpolator - KroghInterpolator - PiecewisePolynomial - barycentric_interpolate - krogh_interpolate - piecewise_polynomial_interpolate - - -Multivariate interpolation -========================== - -Unstructured data: - -.. autosummary:: - :toctree: generated/ - - griddata - LinearNDInterpolator - NearestNDInterpolator - CloughTocher2DInterpolator - Rbf - interp2d - -For data on a grid: - -.. autosummary:: - - RectBivariateSpline - -.. seealso:: `scipy.ndimage.map_coordinates` - - -1-D Splines -=========== - -.. autosummary:: - :toctree: generated/ - - UnivariateSpline - InterpolatedUnivariateSpline - LSQUnivariateSpline - -The above univariate spline classes have the following methods: - -.. autosummary:: - - UnivariateSpline.__call__ - UnivariateSpline.derivatives - UnivariateSpline.integral - UnivariateSpline.roots - UnivariateSpline.get_coeffs - UnivariateSpline.get_knots - UnivariateSpline.get_residual - UnivariateSpline.set_smoothing_factor - - -Low-level interface to FITPACK functions: - -.. autosummary:: - :toctree: generated/ - - splrep - splprep - splev - splint - sproot - spalde - bisplrep - bisplev - - -2-D Splines -=========== - -For data on a grid: - -.. autosummary:: - :toctree: generated/ - - RectBivariateSpline - -For unstructured data: - -.. autosummary:: - :toctree: generated/ - - BivariateSpline - SmoothBivariateSpline - LSQBivariateSpline - -Low-level interface to FITPACK functions: - -.. autosummary:: - :toctree: generated/ - - bisplrep - bisplev - -Additional tools -================ - -.. autosummary:: - :toctree: generated/ - - lagrange - approximate_taylor_polynomial - -.. seealso:: - - `scipy.ndimage.map_coordinates`, - `scipy.ndimage.spline_filter`, - `scipy.signal.resample`, - `scipy.signal.bspline`, - `scipy.signal.gauss_spline`, - `scipy.signal.qspline1d`, - `scipy.signal.cspline1d`, - `scipy.signal.qspline1d_eval`, - `scipy.signal.cspline1d_eval`, - `scipy.signal.qspline2d`, - `scipy.signal.cspline2d`. - -""" - -from interpolate import * -from fitpack import * - -# New interface to fitpack library: -from fitpack2 import * - -from rbf import Rbf - -from polyint import * - -from ndgriddata import * - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/interpolate/bento.info b/scipy-0.10.1/scipy/interpolate/bento.info deleted file mode 100644 index e69b54ee33..0000000000 --- a/scipy-0.10.1/scipy/interpolate/bento.info +++ /dev/null @@ -1,14 +0,0 @@ -HookFile: bscript - -Library: - CompiledLibrary: fitpack - Sources: - fitpack/*.f - Extension: _fitpack - Sources: src/_fitpackmodule.c - Extension: dfitpack - Sources: src/fitpack.pyf - Extension: _interpolate - Sources: src/_interpolate.cpp - Extension: interpnd - Sources: interpnd.c diff --git a/scipy-0.10.1/scipy/interpolate/bscript b/scipy-0.10.1/scipy/interpolate/bscript deleted file mode 100644 index 20627bf8ad..0000000000 --- a/scipy-0.10.1/scipy/interpolate/bscript +++ /dev/null @@ -1,20 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - - def builder_factory(use): - use = "%s FBLAS CLIB" % use - return lambda e: default_builder(e, use=use) - - context.register_builder("_fitpack", builder_factory("fitpack")) - context.register_builder("interpnd", builder_factory("fitpack")) - context.register_builder("dfitpack", - lambda e: default_builder(e, - use="fitpack FBLAS CLIB", - features="c cshlib pyext bento f2py")) - context.register_builder("_interpolate", - lambda e: default_builder(e, - use="FBLAS CLIB", - features="cxx cxxshlib pyext bento")) diff --git a/scipy-0.10.1/scipy/interpolate/fitpack.py b/scipy-0.10.1/scipy/interpolate/fitpack.py deleted file mode 100644 index 45495ffdca..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack.py +++ /dev/null @@ -1,1228 +0,0 @@ -#!/usr/bin/env python -""" -fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx). - FITPACK is a collection of FORTRAN programs for curve and surface - fitting with splines and tensor product splines. - -See - http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html -or - http://www.netlib.org/dierckx/index.html - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the SciPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -Pearu Peterson - -Running test programs: - $ python fitpack.py 1 3 # run test programs 1, and 3 - $ python fitpack.py # run all available test programs - -TODO: Make interfaces to the following fitpack functions: - For univariate splines: cocosp, concon, fourco, insert - For bivariate splines: profil, regrid, parsur, surev -""" - -__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde', - 'bisplrep', 'bisplev', 'insert'] -__version__ = "$Revision$"[10:-1] -import _fitpack -from numpy import atleast_1d, array, ones, zeros, sqrt, ravel, transpose, \ - dot, sin, cos, pi, arange, empty, int32 -myasarray = atleast_1d - -# Try to replace _fitpack interface with -# f2py-generated version -import dfitpack - -_iermess = {0:["""\ - The spline has a residual sum of squares fp such that abs(fp-s)/s<=0.001""",None], - -1:["""\ - The spline is an interpolating spline (fp=0)""",None], - -2:["""\ - The spline is weighted least-squares polynomial of degree k. - fp gives the upper bound fp0 for the smoothing factor s""",None], - 1:["""\ - The required storage space exceeds the available storage space. - Probable causes: data (x,y) size is too small or smoothing parameter s is too small (fp>s).""",ValueError], - 2:["""\ - A theoretically impossible results when finding a smoothin spline - with fp = s. Probably causes: s too small. (abs(fp-s)/s>0.001)""",ValueError], - 3:["""\ - The maximal number of iterations (20) allowed for finding smoothing - spline with fp=s has been reached. Probably causes: s too small. - (abs(fp-s)/s>0.001)""",ValueError], - 10:["""\ - Error on input data""",ValueError], - 'unknown':["""\ - An error occurred""",TypeError]} - -_iermess2 = {0:["""\ - The spline has a residual sum of squares fp such that abs(fp-s)/s<=0.001""",None], - -1:["""\ - The spline is an interpolating spline (fp=0)""",None], - -2:["""\ - The spline is weighted least-squares polynomial of degree kx and ky. - fp gives the upper bound fp0 for the smoothing factor s""",None], - -3:["""\ - Warning. The coefficients of the spline have been computed as the minimal - norm least-squares solution of a rank deficient system.""",None], - 1:["""\ - The required storage space exceeds the available storage space. - Probably causes: nxest or nyest too small or s is too small. (fp>s)""",ValueError], - 2:["""\ - A theoretically impossible results when finding a smoothin spline - with fp = s. Probably causes: s too small or badly chosen eps. - (abs(fp-s)/s>0.001)""",ValueError], - 3:["""\ - The maximal number of iterations (20) allowed for finding smoothing - spline with fp=s has been reached. Probably causes: s too small. - (abs(fp-s)/s>0.001)""",ValueError], - 4:["""\ - No more knots can be added because the number of B-spline coefficients - already exceeds the number of data points m. Probably causes: either - s or m too small. (fp>s)""",ValueError], - 5:["""\ - No more knots can be added because the additional knot would coincide - with an old one. Probably cause: s too small or too large a weight - to an inaccurate data point. (fp>s)""",ValueError], - 10:["""\ - Error on input data""",ValueError], - 11:["""\ - rwrk2 too small, i.e. there is not enough workspace for computing - the minimal least-squares solution of a rank deficient system of linear - equations.""",ValueError], - 'unknown':["""\ - An error occurred""",TypeError]} - -_parcur_cache = {'t': array([],float), 'wrk': array([],float), - 'iwrk':array([],int32), 'u': array([],float),'ub':0,'ue':1} - -def splprep(x,w=None,u=None,ub=None,ue=None,k=3,task=0,s=None,t=None, - full_output=0,nest=None,per=0,quiet=1): - """ - Find the B-spline representation of an N-dimensional curve. - - Given a list of N rank-1 arrays, x, which represent a curve in - N-dimensional space parametrized by u, find a smooth approximating - spline curve g(u). Uses the FORTRAN routine parcur from FITPACK. - - Parameters - ---------- - x : array_like - A list of sample vector arrays representing the curve. - u : array_like, optional - An array of parameter values. If not given, these values are - calculated automatically as ``M = len(x[0])``: - v[0] = 0 - v[i] = v[i-1] + distance(x[i],x[i-1]) - u[i] = v[i] / v[M-1] - ub, ue : int, optional - The end-points of the parameters interval. Defaults to - u[0] and u[-1]. - k : int, optional - Degree of the spline. Cubic splines are recommended. - Even values of `k` should be avoided especially with a small s-value. - ``1 <= k <= 5``, default is 3. - task : int, optional - If task==0 (default), find t and c for a given smoothing factor, s. - If task==1, find t and c for another value of the smoothing factor, s. - There must have been a previous call with task=0 or task=1 - for the same set of data. - If task=-1 find the weighted least square spline for a given set of - knots, t. - s : float, optional - A smoothing condition. - The amount of smoothness is determined by - satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``, - where g(x) is the smoothed interpolation of (x,y). The user can - use `s` to control the trade-off between closeness and smoothness - of fit. Larger `s` means more smoothing while smaller values of `s` - indicate less smoothing. Recommended values of `s` depend on the - weights, w. If the weights represent the inverse of the - standard-deviation of y, then a good `s` value should be found in - the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of - data points in x, y, and w. - t : int, optional - The knots needed for task=-1. - full_output : int, optional - If non-zero, then return optional outputs. - nest : int, optional - An over-estimate of the total number of knots of the spline to - help in determining the storage space. By default nest=m/2. - Always large enough is nest=m+k+1. - per : int, optional - If non-zero, data points are considered periodic with period - x[m-1] - x[0] and a smooth periodic spline approximation is returned. - Values of y[m-1] and w[m-1] are not used. - quiet : int, optional - Non-zero to suppress messages. - - Returns - ------- - tck : tuple - A tuple (t,c,k) containing the vector of knots, the B-spline - coefficients, and the degree of the spline. - u : array - An array of the values of the parameter. - fp : float - The weighted sum of squared residuals of the spline approximation. - ier : int - An integer flag about splrep success. Success is indicated - if ier<=0. If ier in [1,2,3] an error occurred but was not raised. - Otherwise an error is raised. - msg : str - A message corresponding to the integer flag, ier. - - See Also - -------- - splrep, splev, sproot, spalde, splint, - bisplrep, bisplev - UnivariateSpline, BivariateSpline - - Notes - ----- - See `splev` for evaluation of the spline and its derivatives. - - References - ---------- - .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and - parametric splines, Computer Graphics and Image Processing", - 20 (1982) 171-184. - .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and - parametric splines", report tw55, Dept. Computer Science, - K.U.Leuven, 1981. - .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on - Numerical Analysis, Oxford University Press, 1993. - - """ - if task<=0: - _parcur_cache = {'t': array([],float), 'wrk': array([],float), - 'iwrk':array([],int32),'u': array([],float), - 'ub':0,'ue':1} - x=myasarray(x) - idim,m=x.shape - if per: - for i in range(idim): - if x[i][0]!=x[i][-1]: - if quiet<2:print 'Warning: Setting x[%d][%d]=x[%d][0]'%(i,m,i) - x[i][-1]=x[i][0] - if not 0 < idim < 11: - raise TypeError('0 < idim < 11 must hold') - if w is None: - w = ones(m, float) - else: - w = myasarray(w) - ipar = (u is not None) - if ipar: - _parcur_cache['u']=u - if ub is None: _parcur_cache['ub']=u[0] - else: _parcur_cache['ub']=ub - if ue is None: _parcur_cache['ue']=u[-1] - else: _parcur_cache['ue']=ue - else: _parcur_cache['u']=zeros(m,float) - if not (1 <= k <= 5): - raise TypeError('1 <= k= %d <=5 must hold' % k) - if not (-1 <= task <=1): - raise TypeError('task must be -1, 0 or 1') - if (not len(w)==m) or (ipar==1 and (not len(u)==m)): - raise TypeError('Mismatch of input dimensions') - if s is None: s=m-sqrt(2*m) - if t is None and task == -1: - raise TypeError('Knots must be given for task=-1') - if t is not None: - _parcur_cache['t']=myasarray(t) - n=len(_parcur_cache['t']) - if task==-1 and n<2*k+2: - raise TypeError('There must be at least 2*k+2 knots for task=-1') - if m <= k: - raise TypeError('m > k must hold') - if nest is None: nest=m+2*k - - if (task>=0 and s==0) or (nest<0): - if per: nest=m+2*k - else: nest=m+k+1 - nest=max(nest,2*k+3) - u=_parcur_cache['u'] - ub=_parcur_cache['ub'] - ue=_parcur_cache['ue'] - t=_parcur_cache['t'] - wrk=_parcur_cache['wrk'] - iwrk=_parcur_cache['iwrk'] - t,c,o=_fitpack._parcur(ravel(transpose(x)),w,u,ub,ue,k,task,ipar,s,t, - nest,wrk,iwrk,per) - _parcur_cache['u']=o['u'] - _parcur_cache['ub']=o['ub'] - _parcur_cache['ue']=o['ue'] - _parcur_cache['t']=t - _parcur_cache['wrk']=o['wrk'] - _parcur_cache['iwrk']=o['iwrk'] - ier,fp,n=o['ier'],o['fp'],len(t) - u=o['u'] - c.shape=idim,n-k-1 - tcku = [t,list(c),k],u - if ier<=0 and not quiet: - print _iermess[ier][0] - print "\tk=%d n=%d m=%d fp=%f s=%f"%(k,len(t),m,fp,s) - if ier>0 and not full_output: - if ier in [1,2,3]: - print "Warning: "+_iermess[ier][0] - else: - try: - raise _iermess[ier][1](_iermess[ier][0]) - except KeyError: - raise _iermess['unknown'][1](_iermess['unknown'][0]) - if full_output: - try: - return tcku,fp,ier,_iermess[ier][0] - except KeyError: - return tcku,fp,ier,_iermess['unknown'][0] - else: - return tcku - -_curfit_cache = {'t': array([],float), 'wrk': array([],float), - 'iwrk':array([],int32)} -def splrep(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None, - full_output=0,per=0,quiet=1): - """ - Find the B-spline representation of 1-D curve. - - Given the set of data points (x[i], y[i]) determine a smooth spline - approximation of degree k on the interval xb <= x <= xe. The coefficients, - c, and the knot points, t, are returned. Uses the FORTRAN routine - curfit from FITPACK. - - Parameters - ---------- - x, y : array_like - The data points defining a curve y = f(x). - w : array_like - Strictly positive rank-1 array of weights the same length as x and y. - The weights are used in computing the weighted least-squares spline - fit. If the errors in the y values have standard-deviation given by the - vector d, then w should be 1/d. Default is ones(len(x)). - xb, xe : float - The interval to fit. If None, these default to x[0] and x[-1] - respectively. - k : int - The order of the spline fit. It is recommended to use cubic splines. - Even order splines should be avoided especially with small s values. - 1 <= k <= 5 - task : {1, 0, -1} - If task==0 find t and c for a given smoothing factor, s. - - If task==1 find t and c for another value of the smoothing factor, s. - There must have been a previous call with task=0 or task=1 for the same - set of data (t will be stored an used internally) - - If task=-1 find the weighted least square spline for a given set of - knots, t. These should be interior knots as knots on the ends will be - added automatically. - s : float - A smoothing condition. The amount of smoothness is determined by - satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) - is the smoothed interpolation of (x,y). The user can use s to control - the tradeoff between closeness and smoothness of fit. Larger s means - more smoothing while smaller values of s indicate less smoothing. - Recommended values of s depend on the weights, w. If the weights - represent the inverse of the standard-deviation of y, then a good s - value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is - the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if - weights are supplied. s = 0.0 (interpolating) if no weights are - supplied. - t : int - The knots needed for task=-1. If given then task is automatically set - to -1. - full_output : bool - If non-zero, then return optional outputs. - per : bool - If non-zero, data points are considered periodic with period x[m-1] - - x[0] and a smooth periodic spline approximation is returned. Values of - y[m-1] and w[m-1] are not used. - quiet : bool - Non-zero to suppress messages. - - Returns - ------- - tck : tuple - (t,c,k) a tuple containing the vector of knots, the B-spline - coefficients, and the degree of the spline. - fp : array, optional - The weighted sum of squared residuals of the spline approximation. - ier : int, optional - An integer flag about splrep success. Success is indicated if ier<=0. - If ier in [1,2,3] an error occurred but was not raised. Otherwise an - error is raised. - msg : str, optional - A message corresponding to the integer flag, ier. - - Notes - ----- - - See splev for evaluation of the spline and its derivatives. - - See Also - -------- - - UnivariateSpline, BivariateSpline - splprep, splev, sproot, spalde, splint - bisplrep, bisplev - - References - ---------- - - Based on algorithms described in [1], [2], [3], and [4]: - - .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and - integration of experimental data using spline functions", - J.Comp.Appl.Maths 1 (1975) 165-184. - .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular - grid while using spline functions", SIAM J.Numer.Anal. 19 (1982) - 1286-1304. - .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline - functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981. - .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on - Numerical Analysis, Oxford University Press, 1993. - - Examples - -------- - - >>> x = linspace(0, 10, 10) - >>> y = sin(x) - >>> tck = splrep(x, y) - >>> x2 = linspace(0, 10, 200) - >>> y2 = splev(x2, tck) - >>> plot(x, y, 'o', x2, y2) - - """ - if task<=0: - _curfit_cache = {} - x,y=map(myasarray,[x,y]) - m=len(x) - if w is None: - w=ones(m,float) - if s is None: s = 0.0 - else: - w=myasarray(w) - if s is None: s = m-sqrt(2*m) - if not len(w) == m: - raise TypeError('len(w)=%d is not equal to m=%d' % (len(w),m)) - if (m != len(y)) or (m != len(w)): - raise TypeError('Lengths of the first three arguments (x,y,w) must be equal') - if not (1 <= k <= 5): - raise TypeError('Given degree of the spline (k=%d) is not supported. (1<=k<=5)' % k) - if m <= k: - raise TypeError('m > k must hold') - if xb is None: xb=x[0] - if xe is None: xe=x[-1] - if not (-1 <= task <= 1): - raise TypeError('task must be -1, 0 or 1') - if t is not None: - task = -1 - if task == -1: - if t is None: - raise TypeError('Knots must be given for task=-1') - numknots = len(t) - _curfit_cache['t'] = empty((numknots + 2*k+2,),float) - _curfit_cache['t'][k+1:-k-1] = t - nest = len(_curfit_cache['t']) - elif task == 0: - if per: - nest = max(m+2*k,2*k+3) - else: - nest = max(m+k+1,2*k+3) - t = empty((nest,),float) - _curfit_cache['t'] = t - if task <= 0: - if per: _curfit_cache['wrk'] = empty((m*(k+1)+nest*(8+5*k),),float) - else: _curfit_cache['wrk'] = empty((m*(k+1)+nest*(7+3*k),),float) - _curfit_cache['iwrk'] = empty((nest,),int32) - try: - t=_curfit_cache['t'] - wrk=_curfit_cache['wrk'] - iwrk=_curfit_cache['iwrk'] - except KeyError: - raise TypeError("must call with task=1 only after" - " call with task=0,-1") - if not per: - n,c,fp,ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk, xb, xe, k, s) - else: - n,c,fp,ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s) - tck = (t[:n],c[:n],k) - if ier<=0 and not quiet: - print _iermess[ier][0] - print "\tk=%d n=%d m=%d fp=%f s=%f"%(k,len(t),m,fp,s) - if ier>0 and not full_output: - if ier in [1,2,3]: - print "Warning: "+_iermess[ier][0] - else: - try: - raise _iermess[ier][1](_iermess[ier][0]) - except KeyError: - raise _iermess['unknown'][1](_iermess['unknown'][0]) - if full_output: - try: - return tck,fp,ier,_iermess[ier][0] - except KeyError: - return tck,fp,ier,_iermess['unknown'][0] - else: - return tck - -def _ntlist(l): # return non-trivial list - return l - #if len(l)>1: return l - #return l[0] - -def splev(x, tck, der=0, ext=0): - """ - Evaluate a B-spline or its derivatives. - - Given the knots and coefficients of a B-spline representation, evaluate - the value of the smoothing polynomial and its derivatives. This is a - wrapper around the FORTRAN routines splev and splder of FITPACK. - - Parameters - ---------- - x : array_like - A 1-D array of points at which to return the value of the smoothed - spline or its derivatives. If `tck` was returned from `splprep`, - then the parameter values, u should be given. - tck : tuple - A sequence of length 3 returned by `splrep` or `splprep` containing - the knots, coefficients, and degree of the spline. - der : int - The order of derivative of the spline to compute (must be less than - or equal to k). - ext : int - Controls the value returned for elements of ``x`` not in the - interval defined by the knot sequence. - - * if ext=0, return the extrapolated value. - * if ext=1, return 0 - * if ext=2, raise a ValueError - - The default value is 0. - - Returns - ------- - y : ndarray or list of ndarrays - An array of values representing the spline function evaluated at - the points in ``x``. If `tck` was returned from splrep, then this - is a list of arrays representing the curve in N-dimensional space. - - See Also - -------- - splprep, splrep, sproot, spalde, splint - bisplrep, bisplev - - References - ---------- - .. [1] C. de Boor, "On calculating with b-splines", J. Approximation - Theory, 6, p.50-62, 1972. - .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths - Applics, 10, p.134-149, 1972. - .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs - on Numerical Analysis, Oxford University Press, 1993. - - """ - t,c,k = tck - try: - c[0][0] - parametric = True - except: - parametric = False - if parametric: - return map(lambda c, x=x, t=t, k=k, der=der : splev(x, [t,c,k], der), c) - else: - if not (0 <= der <= k): - raise ValueError("0<=der=%d<=k=%d must hold"%(der,k)) - if not ext in (0,1,2): - raise ValueError("ext not in (0, 1, 2)") - - x = myasarray(x) - y, ier =_fitpack._spl_(x, der, t, c, k, ext) - if ier == 10: - raise ValueError("Invalid input data") - if ier == 1: - raise ValueError("Found x value not in the domain") - if ier: - raise TypeError("An error occurred") - if len(y) > 1: - return y - return y[0] - -def splint(a,b,tck,full_output=0): - """ - Evaluate the definite integral of a B-spline. - - Given the knots and coefficients of a B-spline, evaluate the definite - integral of the smoothing polynomial between two given points. - - Parameters - ---------- - a, b : float - The end-points of the integration interval. - tck : tuple - A tuple (t,c,k) containing the vector of knots, the B-spline - coefficients, and the degree of the spline (see `splev`). - full_output : int, optional - Non-zero to return optional output. - - Returns - ------- - integral : float - The resulting integral. - wrk : ndarray - An array containing the integrals of the normalized B-splines - defined on the set of knots. - - See Also - -------- - splprep, splrep, sproot, spalde, splev - bisplrep, bisplev - UnivariateSpline, BivariateSpline - - References - ---------- - .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines", - J. Inst. Maths Applics, 17, p.37-41, 1976. - .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs - on Numerical Analysis, Oxford University Press, 1993. - - """ - t,c,k=tck - try: - c[0][0] - parametric = True - except: - parametric = False - if parametric: - return _ntlist(map(lambda c,a=a,b=b,t=t,k=k:splint(a,b,[t,c,k]),c)) - else: - aint,wrk=_fitpack._splint(t,c,k,a,b) - if full_output: return aint,wrk - else: return aint - -def sproot(tck,mest=10): - """ - Find the roots of a cubic B-spline. - - Given the knots (>=8) and coefficients of a cubic B-spline return the - roots of the spline. - - Parameters - ---------- - tck : tuple - A tuple (t,c,k) containing the vector of knots, - the B-spline coefficients, and the degree of the spline. - The number of knots must be >= 8. - The knots must be a montonically increasing sequence. - mest : int - An estimate of the number of zeros (Default is 10). - - Returns - ------- - zeros : ndarray - An array giving the roots of the spline. - - See also - -------- - splprep, splrep, splint, spalde, splev - bisplrep, bisplev - UnivariateSpline, BivariateSpline - - - References - ---------- - .. [1] C. de Boor, "On calculating with b-splines", J. Approximation - Theory, 6, p.50-62, 1972. - .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths - Applics, 10, p.134-149, 1972. - .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs - on Numerical Analysis, Oxford University Press, 1993. - - """ - t,c,k=tck - if k==4: t=t[1:-1] - if k==5: t=t[2:-2] - try: - c[0][0] - parametric = True - except: - parametric = False - if parametric: - return _ntlist(map(lambda c,t=t,k=k,mest=mest:sproot([t,c,k],mest),c)) - else: - if len(t)<8: - raise TypeError("The number of knots %d>=8" % len(t)) - z,ier=_fitpack._sproot(t,c,k,mest) - if ier==10: - raise TypeError("Invalid input data. t1<=..<=t41: - return map(lambda x,tck=tck:spalde(x,tck),x) - d,ier=_fitpack._spalde(t,c,k,x[0]) - if ier==0: return d - if ier==10: - raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.") - raise TypeError("Unknown error") - -#def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None, -# full_output=0,nest=None,per=0,quiet=1): - -_surfit_cache = {'tx': array([],float),'ty': array([],float), - 'wrk': array([],float), 'iwrk':array([],int32)} -def bisplrep(x,y,z,w=None,xb=None,xe=None,yb=None,ye=None,kx=3,ky=3,task=0, - s=None,eps=1e-16,tx=None,ty=None,full_output=0, - nxest=None,nyest=None,quiet=1): - """ - Find a bivariate B-spline representation of a surface. - - Given a set of data points (x[i], y[i], z[i]) representing a surface - z=f(x,y), compute a B-spline representation of the surface. Based on - the routine SURFIT from FITPACK. - - Parameters - ---------- - x, y, z : ndarray - Rank-1 arrays of data points. - w : ndarray, optional - Rank-1 array of weights. By default ``w=np.ones(len(x))``. - xb, xe : float, optional - End points of approximation interval in `x`. - By default ``xb = x.min(), xe=x.max()``. - yb, ye : float, optional - End points of approximation interval in `y`. - By default ``yb=y.min(), ye = y.max()``. - kx, ky : int, optional - The degrees of the spline (1 <= kx, ky <= 5). - Third order (kx=ky=3) is recommended. - task : int, optional - If task=0, find knots in x and y and coefficients for a given - smoothing factor, s. - If task=1, find knots and coefficients for another value of the - smoothing factor, s. bisplrep must have been previously called - with task=0 or task=1. - If task=-1, find coefficients for a given set of knots tx, ty. - s : float, optional - A non-negative smoothing factor. If weights correspond - to the inverse of the standard-deviation of the errors in z, - then a good s-value should be found in the range - ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x). - eps : float, optional - A threshold for determining the effective rank of an - over-determined linear system of equations (0 < eps < 1). - `eps` is not likely to need changing. - tx, ty : ndarray, optional - Rank-1 arrays of the knots of the spline for task=-1 - full_output : int, optional - Non-zero to return optional outputs. - nxest, nyest : int, optional - Over-estimates of the total number of knots. If None then - ``nxest = max(kx+sqrt(m/2),2*kx+3)``, - ``nyest = max(ky+sqrt(m/2),2*ky+3)``. - quiet : int, optional - Non-zero to suppress printing of messages. - - Returns - ------- - tck : array_like - A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and - coefficients (c) of the bivariate B-spline representation of the - surface along with the degree of the spline. - fp : ndarray - The weighted sum of squared residuals of the spline approximation. - ier : int - An integer flag about splrep success. Success is indicated if - ier<=0. If ier in [1,2,3] an error occurred but was not raised. - Otherwise an error is raised. - msg : str - A message corresponding to the integer flag, ier. - - See Also - -------- - splprep, splrep, splint, sproot, splev - UnivariateSpline, BivariateSpline - - Notes - ----- - See `bisplev` to evaluate the value of the B-spline given its tck - representation. - - References - ---------- - .. [1] Dierckx P.:An algorithm for surface fitting with spline functions - Ima J. Numer. Anal. 1 (1981) 267-283. - .. [2] Dierckx P.:An algorithm for surface fitting with spline functions - report tw50, Dept. Computer Science,K.U.Leuven, 1980. - .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on - Numerical Analysis, Oxford University Press, 1993. - - """ - x,y,z=map(myasarray,[x,y,z]) - x,y,z=map(ravel,[x,y,z]) # ensure 1-d arrays. - m=len(x) - if not (m==len(y)==len(z)): - raise TypeError('len(x)==len(y)==len(z) must hold.') - if w is None: w=ones(m,float) - else: w=myasarray(w) - if not len(w) == m: - raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m)) - if xb is None: xb=x.min() - if xe is None: xe=x.max() - if yb is None: yb=y.min() - if ye is None: ye=y.max() - if not (-1<=task<=1): - raise TypeError('task must be -1, 0 or 1') - if s is None: s=m-sqrt(2*m) - if tx is None and task==-1: - raise TypeError('Knots_x must be given for task=-1') - if tx is not None: _surfit_cache['tx']=myasarray(tx) - nx=len(_surfit_cache['tx']) - if ty is None and task==-1: - raise TypeError('Knots_y must be given for task=-1') - if ty is not None: _surfit_cache['ty']=myasarray(ty) - ny=len(_surfit_cache['ty']) - if task==-1 and nx<2*kx+2: - raise TypeError('There must be at least 2*kx+2 knots_x for task=-1') - if task==-1 and ny<2*ky+2: - raise TypeError('There must be at least 2*ky+2 knots_x for task=-1') - if not ((1<=kx<=5) and (1<=ky<=5)): - raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not supported. (1<=k<=5)' % (kx,ky)) - if m<(kx+1)*(ky+1): - raise TypeError('m >= (kx+1)(ky+1) must hold') - if nxest is None: nxest=int(kx+sqrt(m/2)) - if nyest is None: nyest=int(ky+sqrt(m/2)) - nxest,nyest=max(nxest,2*kx+3),max(nyest,2*ky+3) - if task>=0 and s==0: - nxest=int(kx+sqrt(3*m)) - nyest=int(ky+sqrt(3*m)) - if task==-1: - _surfit_cache['tx']=myasarray(tx) - _surfit_cache['ty']=myasarray(ty) - tx,ty=_surfit_cache['tx'],_surfit_cache['ty'] - wrk=_surfit_cache['wrk'] - iwrk=_surfit_cache['iwrk'] - u,v,km,ne=nxest-kx-1,nyest-ky-1,max(kx,ky)+1,max(nxest,nyest) - bx,by=kx*v+ky+1,ky*u+kx+1 - b1,b2=bx,bx+v-ky - if bx>by: b1,b2=by,by+u-kx - try: - lwrk1=int32(u*v*(2+b1+b2)+2*(u+v+km*(m+ne)+ne-kx-ky)+b2+1) - lwrk2=int32(u*v*(b2+1)+b2) - except OverflowError: - raise OverflowError("Too many data points to interpolate") - tx,ty,c,o = _fitpack._surfit(x,y,z,w,xb,xe,yb,ye,kx,ky,task,s,eps, - tx,ty,nxest,nyest,wrk,lwrk1,lwrk2) - _curfit_cache['tx']=tx - _curfit_cache['ty']=ty - _curfit_cache['wrk']=o['wrk'] - ier,fp=o['ier'],o['fp'] - tck=[tx,ty,c,kx,ky] - - ierm=min(11,max(-3,ier)) - if ierm<=0 and not quiet: - print _iermess2[ierm][0] - print "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f"%(kx,ky,len(tx), - len(ty),m,fp,s) - if ierm>0 and not full_output: - if ier in [1,2,3,4,5]: - print "Warning: "+_iermess2[ierm][0] - print "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f"%(kx,ky,len(tx), - len(ty),m,fp,s) - else: - try: - raise _iermess2[ierm][1](_iermess2[ierm][0]) - except KeyError: - raise _iermess2['unknown'][1](_iermess2['unknown'][0]) - if full_output: - try: - return tck,fp,ier,_iermess2[ierm][0] - except KeyError: - return tck,fp,ier,_iermess2['unknown'][0] - else: - return tck - -def bisplev(x,y,tck,dx=0,dy=0): - """ - Evaluate a bivariate B-spline and its derivatives. - - Return a rank-2 array of spline function values (or spline derivative - values) at points given by the cross-product of the rank-1 arrays x and - y. In special cases, return an array or just a float if either x or y or - both are floats. Based on BISPEV from FITPACK. - - Parameters - ---------- - x, y : ndarray - Rank-1 arrays specifying the domain over which to evaluate the - spline or its derivative. - tck : tuple - A sequence of length 5 returned by `bisplrep` containing the knot - locations, the coefficients, and the degree of the spline: - [tx, ty, c, kx, ky]. - dx, dy : int, optional - The orders of the partial derivatives in `x` and `y` respectively. - - Returns - ------- - vals : ndarray - The B-spline or its derivative evaluated over the set formed by - the cross-product of `x` and `y`. - - See Also - -------- - splprep, splrep, splint, sproot, splev - UnivariateSpline, BivariateSpline - - Notes - ----- - See `bisplrep` to generate the `tck` representation. - - References - ---------- - .. [1] Dierckx P. : An algorithm for surface fitting - with spline functions - Ima J. Numer. Anal. 1 (1981) 267-283. - .. [2] Dierckx P. : An algorithm for surface fitting - with spline functions - report tw50, Dept. Computer Science,K.U.Leuven, 1980. - .. [3] Dierckx P. : Curve and surface fitting with splines, - Monographs on Numerical Analysis, Oxford University Press, 1993. - - """ - tx,ty,c,kx,ky=tck - if not (0<=dx1: return z - if len(z[0])>1: return z[0] - return z[0][0] - -def dblint(xa,xb,ya,yb,tck): - """Evaluate the integral of a spline over area [xa,xb] x [ya,yb]. - - Parameters - ---------- - xa, xb : float - The end-points of the x integration interval. - ya, yb : float - The end-points of the y integration interval. - tck : list [tx, ty, c, kx, ky] - A sequence of length 5 returned by bisplrep containing the knot - locations tx, ty, the coefficients c, and the degrees kx, ky - of the spline. - - Returns - ------- - integ : float - The value of the resulting integral. - """ - tx,ty,c,kx,ky=tck - return dfitpack.dblint(tx,ty,c,kx,ky,xb,xe,yb,ye) - -def insert(x,tck,m=1,per=0): - """ - Insert knots into a B-spline. - - Given the knots and coefficients of a B-spline representation, create a - new B-spline with a knot inserted m times at point x. - This is a wrapper around the FORTRAN routine insert of FITPACK. - - Parameters - ---------- - x (u) : array_like - A 1-D point at which to insert a new knot(s). If `tck` was returned - from `splprep`, then the parameter values, u should be given. - tck : tuple - A tuple (t,c,k) returned by `splrep` or `splprep` containing - the vector of knots, the B-spline coefficients, - and the degree of the spline. - m : int, optional - The number of times to insert the given knot (its multiplicity). - Default is 1. - per : int, optional - If non-zero, input spline is considered periodic. - - Returns - ------- - tck : tuple - A tuple (t,c,k) containing the vector of knots, the B-spline - coefficients, and the degree of the new spline. - ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline. - In case of a periodic spline (`per` != 0) there must be - either at least k interior knots t(j) satisfying ``t(k+1)0: - runtest=map(int,sys.argv[1:]) - put=sys.stdout.write - def norm2(x): - return dot(transpose(x),x) - def f1(x,d=0): - if d is None: return "sin" - if x is None: return "sin(x)" - if d%4 == 0: return sin(x) - if d%4 == 1: return cos(x) - if d%4 == 2: return -sin(x) - if d%4 == 3: return -cos(x) - def f2(x,y=0,dx=0,dy=0): - if x is None: return "sin(x+y)" - d=dx+dy - if d%4 == 0: return sin(x+y) - if d%4 == 1: return cos(x+y) - if d%4 == 2: return -sin(x+y) - if d%4 == 3: return -cos(x+y) - def test1(f=f1,per=0,s=0,a=0,b=2*pi,N=20,at=0,xb=None,xe=None): - if xb is None: xb=a - if xe is None: xe=b - x=a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes - x1=a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes - v,v1=f(x),f(x1) - nk=[] - for k in range(1,6): - tck=splrep(x,v,s=s,per=per,k=k,xe=xe) - if at:t=tck[0][k:-k] - else: t=x1 - nd=[] - for d in range(k+1): - nd.append(norm2(f(t,d)-splev(t,tck,d))) - nk.append(nd) - print "\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]"%(f(None), - `round(xb,3)`,`round(xe,3)`, - `round(a,3)`,`round(b,3)`) - if at: str="at knots" - else: str="at the middle of nodes" - print " per=%d s=%s Evaluation %s"%(per,`s`,str) - print " k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''" - k=1 - for l in nk: - put(' %d : '%k) - for r in l: - put(' %.1e'%r) - put('\n') - k=k+1 - def test2(f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, - ia=0,ib=2*pi,dx=0.2*pi): - if xb is None: xb=a - if xe is None: xe=b - x=a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes - v=f(x) - nk=[] - for k in range(1,6): - tck=splrep(x,v,s=s,per=per,k=k,xe=xe) - nk.append([splint(ia,ib,tck),spalde(dx,tck)]) - print "\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]"%(f(None), - `round(xb,3)`,`round(xe,3)`, - `round(a,3)`,`round(b,3)`) - print " per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s"%(per,`s`,N,`round(ia,3)`,`round(ib,3)`,`round(dx,3)`) - print " k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k" - k=1 - for r in nk: - if r[0]<0: sr='-' - else: sr=' ' - put(" %d %s%.8f %.1e "%(k,sr,abs(r[0]), - abs(r[0]-(f(ib,-1)-f(ia,-1))))) - d=0 - for dr in r[1]: - put(" %.1e "%(abs(1-dr/f(dx,d)))) - d=d+1 - put("\n") - k=k+1 - def test3(f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, - ia=0,ib=2*pi,dx=0.2*pi): - if xb is None: xb=a - if xe is None: xe=b - x=a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes - v=f(x) - nk=[] - print " k : Roots of s(x) approx %s x in [%s,%s]:"%\ - (f(None),`round(a,3)`,`round(b,3)`) - for k in range(1,6): - tck=splrep(x,v,s=s,per=per,k=k,xe=xe) - print ' %d : %s'%(k,`sproot(tck).tolist()`) - def test4(f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, - ia=0,ib=2*pi,dx=0.2*pi): - if xb is None: xb=a - if xe is None: xe=b - x=a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes - x1=a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes - v,v1=f(x),f(x1) - nk=[] - print " u = %s N = %d"%(`round(dx,3)`,N) - print " k : [x(u), %s(x(u))] Error of splprep Error of splrep "%(f(0,None)) - for k in range(1,6): - tckp,u=splprep([x,v],s=s,per=per,k=k,nest=-1) - tck=splrep(x,v,s=s,per=per,k=k) - uv=splev(dx,tckp) - print " %d : %s %.1e %.1e"%\ - (k,`map(lambda x:round(x,3),uv)`, - abs(uv[1]-f(uv[0])), - abs(splev(uv[0],tck)-f(uv[0]))) - print "Derivatives of parametric cubic spline at u (first function):" - k=3 - tckp,u=splprep([x,v],s=s,per=per,k=k,nest=-1) - for d in range(1,k+1): - uv=splev(dx,tckp,d) - put(" %s "%(`uv[0]`)) - print - def makepairs(x,y): - x,y=map(myasarray,[x,y]) - xy=array(map(lambda x,y:map(None,len(y)*[x],y),x,len(x)*[y])) - sh=xy.shape - xy.shape=sh[0]*sh[1],sh[2] - return transpose(xy) - def test5(f=f2,kx=3,ky=3,xb=0,xe=2*pi,yb=0,ye=2*pi,Nx=20,Ny=20,s=0): - x=xb+(xe-xb)*arange(Nx+1,dtype=float)/float(Nx) - y=yb+(ye-yb)*arange(Ny+1,dtype=float)/float(Ny) - xy=makepairs(x,y) - tck=bisplrep(xy[0],xy[1],f(xy[0],xy[1]),s=s,kx=kx,ky=ky) - tt=[tck[0][kx:-kx],tck[1][ky:-ky]] - t2=makepairs(tt[0],tt[1]) - v1=bisplev(tt[0],tt[1],tck) - v2=f2(t2[0],t2[1]) - v2.shape=len(tt[0]),len(tt[1]) - print norm2(ravel(v1-v2)) - if 1 in runtest: - print """\ -****************************************** -\tTests of splrep and splev -******************************************""" - test1(s=1e-6) - test1() - test1(at=1) - test1(per=1) - test1(per=1,at=1) - test1(b=1.5*pi) - test1(b=1.5*pi,xe=2*pi,per=1,s=1e-1) - if 2 in runtest: - print """\ -****************************************** -\tTests of splint and spalde -******************************************""" - test2() - test2(per=1) - test2(ia=0.2*pi,ib=pi) - test2(ia=0.2*pi,ib=pi,N=50) - if 3 in runtest: - print """\ -****************************************** -\tTests of sproot -******************************************""" - test3(a=0,b=15) - print "Note that if k is not 3, some roots are missed or incorrect" - if 4 in runtest: - print """\ -****************************************** -\tTests of splprep, splrep, and splev -******************************************""" - test4() - test4(N=50) - if 5 in runtest: - print """\ -****************************************** -\tTests of bisplrep, bisplev -******************************************""" - test5() diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/bispeu.f b/scipy-0.10.1/scipy/interpolate/fitpack/bispeu.f deleted file mode 100644 index 29d2d9b04c..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/bispeu.f +++ /dev/null @@ -1,64 +0,0 @@ - subroutine bispeu(tx,nx,ty,ny,c,kx,ky,x,y,z,m,wrk,lwrk, ier) -c subroutine bispeu evaluates on a set of points (x(i),y(i)),i=1,...,m -c a bivariate spline s(x,y) of degrees kx and ky, given in the -c b-spline representation. -c -c calling sequence: -c call bispeu(tx,nx,ty,ny,c,kx,ky,x,y,z,m,wrk,lwrk, -c * iwrk,kwrk,ier) -c -c input parameters: -c tx : real array, length nx, which contains the position of the -c knots in the x-direction. -c nx : integer, giving the total number of knots in the x-direction -c ty : real array, length ny, which contains the position of the -c knots in the y-direction. -c ny : integer, giving the total number of knots in the y-direction -c c : real array, length (nx-kx-1)*(ny-ky-1), which contains the -c b-spline coefficients. -c kx,ky : integer values, giving the degrees of the spline. -c x : real array of dimension (mx). -c y : real array of dimension (my). -c m : on entry m must specify the number points. m >= 1. -c wrk : real array of dimension lwrk. used as workspace. -c lwrk : integer, specifying the dimension of wrk. -c lwrk >= kx+ky+2 -c -c output parameters: -c z : real array of dimension m. -c on succesful exit z(i) contains the value of s(x,y) -c at the point (x(i),y(i)), i=1,...,m. -c ier : integer error flag -c ier=0 : normal return -c ier=10: invalid input data (see restrictions) -c -c restrictions: -c m >=1, lwrk>=mx*(kx+1)+my*(ky+1), kwrk>=mx+my -c tx(kx+1) <= x(i-1) <= x(i) <= tx(nx-kx), i=2,...,mx -c ty(ky+1) <= y(j-1) <= y(j) <= ty(ny-ky), j=2,...,my -c -c other subroutines required: -c fpbisp,fpbspl -c -c ..scalar arguments.. - integer nx,ny,kx,ky,m,lwrk,kwrk,ier -c ..array arguments.. - real*8 tx(nx),ty(ny),c((nx-kx-1)*(ny-ky-1)),x(m),y(m),z(m), - * wrk(lwrk) -c ..local scalars.. - integer iwrk(2) - integer i,iw,lwest -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 - lwest = kx+ky+2 - if (lwrk.lt.lwest) go to 100 - if (m.lt.1) go to 100 - ier = 0 - do 10 i=1,m - call fpbisp(tx,nx,ty,ny,c,kx,ky,x(i),1,y(i),1,z(i),wrk(1), - * wrk(kx+2),iwrk(1),iwrk(2)) - 10 continue - 100 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/bispev.f b/scipy-0.10.1/scipy/interpolate/fitpack/bispev.f deleted file mode 100644 index 9204b55b51..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/bispev.f +++ /dev/null @@ -1,103 +0,0 @@ - subroutine bispev(tx,nx,ty,ny,c,kx,ky,x,mx,y,my,z,wrk,lwrk, - * iwrk,kwrk,ier) -c subroutine bispev evaluates on a grid (x(i),y(j)),i=1,...,mx; j=1,... -c ,my a bivariate spline s(x,y) of degrees kx and ky, given in the -c b-spline representation. -c -c calling sequence: -c call bispev(tx,nx,ty,ny,c,kx,ky,x,mx,y,my,z,wrk,lwrk, -c * iwrk,kwrk,ier) -c -c input parameters: -c tx : real array, length nx, which contains the position of the -c knots in the x-direction. -c nx : integer, giving the total number of knots in the x-direction -c ty : real array, length ny, which contains the position of the -c knots in the y-direction. -c ny : integer, giving the total number of knots in the y-direction -c c : real array, length (nx-kx-1)*(ny-ky-1), which contains the -c b-spline coefficients. -c kx,ky : integer values, giving the degrees of the spline. -c x : real array of dimension (mx). -c before entry x(i) must be set to the x co-ordinate of the -c i-th grid point along the x-axis. -c tx(kx+1)<=x(i-1)<=x(i)<=tx(nx-kx), i=2,...,mx. -c mx : on entry mx must specify the number of grid points along -c the x-axis. mx >=1. -c y : real array of dimension (my). -c before entry y(j) must be set to the y co-ordinate of the -c j-th grid point along the y-axis. -c ty(ky+1)<=y(j-1)<=y(j)<=ty(ny-ky), j=2,...,my. -c my : on entry my must specify the number of grid points along -c the y-axis. my >=1. -c wrk : real array of dimension lwrk. used as workspace. -c lwrk : integer, specifying the dimension of wrk. -c lwrk >= mx*(kx+1)+my*(ky+1) -c iwrk : integer array of dimension kwrk. used as workspace. -c kwrk : integer, specifying the dimension of iwrk. kwrk >= mx+my. -c -c output parameters: -c z : real array of dimension (mx*my). -c on succesful exit z(my*(i-1)+j) contains the value of s(x,y) -c at the point (x(i),y(j)),i=1,...,mx;j=1,...,my. -c ier : integer error flag -c ier=0 : normal return -c ier=10: invalid input data (see restrictions) -c -c restrictions: -c mx >=1, my >=1, lwrk>=mx*(kx+1)+my*(ky+1), kwrk>=mx+my -c tx(kx+1) <= x(i-1) <= x(i) <= tx(nx-kx), i=2,...,mx -c ty(ky+1) <= y(j-1) <= y(j) <= ty(ny-ky), j=2,...,my -c -c other subroutines required: -c fpbisp,fpbspl -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c cox m.g. : the numerical evaluation of b-splines, j. inst. maths -c applics 10 (1972) 134-149. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c ..scalar arguments.. - integer nx,ny,kx,ky,mx,my,lwrk,kwrk,ier -c ..array arguments.. - integer iwrk(kwrk) - real*8 tx(nx),ty(ny),c((nx-kx-1)*(ny-ky-1)),x(mx),y(my),z(mx*my), - * wrk(lwrk) -c ..local scalars.. - integer i,iw,lwest -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 - lwest = (kx+1)*mx+(ky+1)*my - if(lwrk.lt.lwest) go to 100 - if(kwrk.lt.(mx+my)) go to 100 - if (mx.lt.1) go to 100 - if (mx.eq.1) go to 30 - go to 10 - 10 do 20 i=2,mx - if(x(i).lt.x(i-1)) go to 100 - 20 continue - 30 if (my.lt.1) go to 100 - if (my.eq.1) go to 60 - go to 40 - 40 do 50 i=2,my - if(y(i).lt.y(i-1)) go to 100 - 50 continue - 60 ier = 0 - iw = mx*(kx+1)+1 - call fpbisp(tx,nx,ty,ny,c,kx,ky,x,mx,y,my,z,wrk(1),wrk(iw), - * iwrk(1),iwrk(mx+1)) - 100 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/clocur.f b/scipy-0.10.1/scipy/interpolate/fitpack/clocur.f deleted file mode 100644 index 544ce078e4..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/clocur.f +++ /dev/null @@ -1,352 +0,0 @@ - subroutine clocur(iopt,ipar,idim,m,u,mx,x,w,k,s,nest,n,t,nc,c,fp, - * wrk,lwrk,iwrk,ier) -c given the ordered set of m points x(i) in the idim-dimensional space -c with x(1)=x(m), and given also a corresponding set of strictly in- -c creasing values u(i) and the set of positive numbers w(i),i=1,2,...,m -c subroutine clocur determines a smooth approximating closed spline -c curve s(u), i.e. -c x1 = s1(u) -c x2 = s2(u) u(1) <= u <= u(m) -c ......... -c xidim = sidim(u) -c with sj(u),j=1,2,...,idim periodic spline functions of degree k with -c common knots t(j),j=1,2,...,n. -c if ipar=1 the values u(i),i=1,2,...,m must be supplied by the user. -c if ipar=0 these values are chosen automatically by clocur as -c v(1) = 0 -c v(i) = v(i-1) + dist(x(i),x(i-1)) ,i=2,3,...,m -c u(i) = v(i)/v(m) ,i=1,2,...,m -c if iopt=-1 clocur calculates the weighted least-squares closed spline -c curve according to a given set of knots. -c if iopt>=0 the number of knots of the splines sj(u) and the position -c t(j),j=1,2,...,n is chosen automatically by the routine. the smooth- -c ness of s(u) is then achieved by minimalizing the discontinuity -c jumps of the k-th derivative of s(u) at the knots t(j),j=k+2,k+3,..., -c n-k-1. the amount of smoothness is determined by the condition that -c f(p)=sum((w(i)*dist(x(i),s(u(i))))**2) be <= s, with s a given non- -c negative constant, called the smoothing factor. -c the fit s(u) is given in the b-spline representation and can be -c evaluated by means of subroutine curev. -c -c calling sequence: -c call clocur(iopt,ipar,idim,m,u,mx,x,w,k,s,nest,n,t,nc,c, -c * fp,wrk,lwrk,iwrk,ier) -c -c parameters: -c iopt : integer flag. on entry iopt must specify whether a weighted -c least-squares closed spline curve (iopt=-1) or a smoothing -c closed spline curve (iopt=0 or 1) must be determined. if -c iopt=0 the routine will start with an initial set of knots -c t(i)=u(1)+(u(m)-u(1))*(i-k-1),i=1,2,...,2*k+2. if iopt=1 the -c routine will continue with the knots found at the last call. -c attention: a call with iopt=1 must always be immediately -c preceded by another call with iopt=1 or iopt=0. -c unchanged on exit. -c ipar : integer flag. on entry ipar must specify whether (ipar=1) -c the user will supply the parameter values u(i),or whether -c (ipar=0) these values are to be calculated by clocur. -c unchanged on exit. -c idim : integer. on entry idim must specify the dimension of the -c curve. 0 < idim < 11. -c unchanged on exit. -c m : integer. on entry m must specify the number of data points. -c m > 1. unchanged on exit. -c u : real array of dimension at least (m). in case ipar=1,before -c entry, u(i) must be set to the i-th value of the parameter -c variable u for i=1,2,...,m. these values must then be -c supplied in strictly ascending order and will be unchanged -c on exit. in case ipar=0, on exit,the array will contain the -c values u(i) as determined by clocur. -c mx : integer. on entry mx must specify the actual dimension of -c the array x as declared in the calling (sub)program. mx must -c not be too small (see x). unchanged on exit. -c x : real array of dimension at least idim*m. -c before entry, x(idim*(i-1)+j) must contain the j-th coord- -c inate of the i-th data point for i=1,2,...,m and j=1,2,..., -c idim. since first and last data point must coincide it -c means that x(j)=x(idim*(m-1)+j),j=1,2,...,idim. -c unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) -c must be set to the i-th value in the set of weights. the -c w(i) must be strictly positive. w(m) is not used. -c unchanged on exit. see also further comments. -c k : integer. on entry k must specify the degree of the splines. -c 1<=k<=5. it is recommended to use cubic splines (k=3). -c the user is strongly dissuaded from choosing k even,together -c with a small s-value. unchanged on exit. -c s : real.on entry (in case iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments. -c nest : integer. on entry nest must contain an over-estimate of the -c total number of knots of the splines returned, to indicate -c the storage space available to the routine. nest >=2*k+2. -c in most practical situation nest=m/2 will be sufficient. -c always large enough is nest=m+2*k, the number of knots -c needed for interpolation (s=0). unchanged on exit. -c n : integer. -c unless ier = 10 (in case iopt >=0), n will contain the -c total number of knots of the smoothing spline curve returned -c if the computation mode iopt=1 is used this value of n -c should be left unchanged between subsequent calls. -c in case iopt=-1, the value of n must be specified on entry. -c t : real array of dimension at least (nest). -c on succesful exit, this array will contain the knots of the -c spline curve,i.e. the position of the interior knots t(k+2), -c t(k+3),..,t(n-k-1) as well as the position of the additional -c t(1),t(2),..,t(k+1)=u(1) and u(m)=t(n-k),...,t(n) needed for -c the b-spline representation. -c if the computation mode iopt=1 is used, the values of t(1), -c t(2),...,t(n) should be left unchanged between subsequent -c calls. if the computation mode iopt=-1 is used, the values -c t(k+2),...,t(n-k-1) must be supplied by the user, before -c entry. see also the restrictions (ier=10). -c nc : integer. on entry nc must specify the actual dimension of -c the array c as declared in the calling (sub)program. nc -c must not be too small (see c). unchanged on exit. -c c : real array of dimension at least (nest*idim). -c on succesful exit, this array will contain the coefficients -c in the b-spline representation of the spline curve s(u),i.e. -c the b-spline coefficients of the spline sj(u) will be given -c in c(n*(j-1)+i),i=1,2,...,n-k-1 for j=1,2,...,idim. -c fp : real. unless ier = 10, fp contains the weighted sum of -c squared residuals of the spline curve returned. -c wrk : real array of dimension at least m*(k+1)+nest*(7+idim+5*k). -c used as working space. if the computation mode iopt=1 is -c used, the values wrk(1),...,wrk(n) should be left unchanged -c between subsequent calls. -c lwrk : integer. on entry,lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program. lwrk -c must not be too small (see wrk). unchanged on exit. -c iwrk : integer array of dimension at least (nest). -c used as working space. if the computation mode iopt=1 is -c used,the values iwrk(1),...,iwrk(n) should be left unchanged -c between subsequent calls. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the close curve returned has a residual -c sum of squares fp such that abs(fp-s)/s <= tol with tol a -c relative tolerance set to 0.001 by the program. -c ier=-1 : normal return. the curve returned is an interpolating -c spline curve (fp=0). -c ier=-2 : normal return. the curve returned is the weighted least- -c squares point,i.e. each spline sj(u) is a constant. in -c this extreme case fp gives the upper bound fp0 for the -c smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameter nest. -c probably causes : nest too small. if nest is already -c large (say nest > m/2), it may also indicate that s is -c too small -c the approximation returned is the least-squares closed -c curve according to the knots t(1),t(2),...,t(n). (n=nest) -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing curve with -c fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing curve -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, 1<=k<=5, m>1, nest>2*k+2, w(i)>0,i=1,2,...,m -c 0<=ipar<=1, 0=(k+1)*m+nest*(7+idim+5*k), -c nc>=nest*idim, x(j)=x(idim*(m-1)+j), j=1,2,...,idim -c if ipar=0: sum j=1,idim (x(i*idim+j)-x((i-1)*idim+j))**2>0 -c i=1,2,...,m-1. -c if ipar=1: u(1)=0: s>=0 -c if s=0 : nest >= m+2*k -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the curve will be too smooth and signal will be -c lost ; if s is too small the curve will pick up too much noise. in -c the extreme cases the program will return an interpolating curve if -c s=0 and the weighted least-squares point if s is very large. -c between these extremes, a properly chosen s will result in a good -c compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the weights w(i). if these are -c taken as 1/d(i) with d(i) an estimate of the standard deviation of -c x(i), a good s-value should be found in the range (m-sqrt(2*m),m+ -c sqrt(2*m)). if nothing is known about the statistical error in x(i) -c each w(i) can be set equal to one and s determined by trial and -c error, taking account of the comments above. the best is then to -c start with a very large value of s ( to determine the weighted -c least-squares point and the upper bound fp0 for s) and then to -c progressively decrease the value of s ( say by a factor 10 in the -c beginning, i.e. s=fp0/10, fp0/100,...and more carefully as the -c approximating curve shows more detail) to obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt=1 the program will continue with the set of knots found at -c the last call of the routine. this will save a lot of computation -c time if clocur is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c curve underlying the data. but, if the computation mode iopt=1 is -c used, the knots returned may also depend on the s-values at previous -c calls (if these were smaller). therefore, if after a number of -c trials with different s-values and iopt=1, the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c clocur once more with the selected value for s but now with iopt=0. -c indeed, clocur may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c -c the form of the approximating curve can strongly be affected by -c the choice of the parameter values u(i). if there is no physical -c reason for choosing a particular parameter u, often good results -c will be obtained with the choice of clocur(in case ipar=0), i.e. -c v(1)=0, v(i)=v(i-1)+q(i), i=2,...,m, u(i)=v(i)/v(m), i=1,..,m -c where -c q(i)= sqrt(sum j=1,idim (xj(i)-xj(i-1))**2 ) -c other possibilities for q(i) are -c q(i)= sum j=1,idim (xj(i)-xj(i-1))**2 -c q(i)= sum j=1,idim abs(xj(i)-xj(i-1)) -c q(i)= max j=1,idim abs(xj(i)-xj(i-1)) -c q(i)= 1 -c -c -c other subroutines required: -c fpbacp,fpbspl,fpchep,fpclos,fpdisc,fpgivs,fpknot,fprati,fprota -c -c references: -c dierckx p. : algorithms for smoothing data with periodic and -c parametric splines, computer graphics and image -c processing 20 (1982) 171-184. -c dierckx p. : algorithms for smoothing data with periodic and param- -c etric splines, report tw55, dept. computer science, -c k.u.leuven, 1981. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : may 1979 -c latest update : march 1987 -c -c .. -c ..scalar arguments.. - real*8 s,fp - integer iopt,ipar,idim,m,mx,k,nest,n,nc,lwrk,ier -c ..array arguments.. - real*8 u(m),x(mx),w(m),t(nest),c(nc),wrk(lwrk) - integer iwrk(nest) -c ..local scalars.. - real*8 per,tol,dist - integer i,ia1,ia2,ib,ifp,ig1,ig2,iq,iz,i1,i2,j1,j2,k1,k2,lwest, - * maxit,m1,nmin,ncc,j -c ..function references.. - real*8 sqrt -c we set up the parameters tol and maxit - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 90 - if(ipar.lt.0 .or. ipar.gt.1) go to 90 - if(idim.le.0 .or. idim.gt.10) go to 90 - if(k.le.0 .or. k.gt.5) go to 90 - k1 = k+1 - k2 = k1+1 - nmin = 2*k1 - if(m.lt.2 .or. nest.lt.nmin) go to 90 - ncc = nest*idim - if(mx.lt.m*idim .or. nc.lt.ncc) go to 90 - lwest = m*k1+nest*(7+idim+5*k) - if(lwrk.lt.lwest) go to 90 - i1 = idim - i2 = m*idim - do 5 j=1,idim - if(x(i1).ne.x(i2)) go to 90 - i1 = i1-1 - i2 = i2-1 - 5 continue - if(ipar.ne.0 .or. iopt.gt.0) go to 40 - i1 = 0 - i2 = idim - u(1) = 0. - do 20 i=2,m - dist = 0. - do 10 j1=1,idim - i1 = i1+1 - i2 = i2+1 - dist = dist+(x(i2)-x(i1))**2 - 10 continue - u(i) = u(i-1)+sqrt(dist) - 20 continue - if(u(m).le.0.) go to 90 - do 30 i=2,m - u(i) = u(i)/u(m) - 30 continue - u(m) = 0.1e+01 - 40 if(w(1).le.0.) go to 90 - m1 = m-1 - do 50 i=1,m1 - if(u(i).ge.u(i+1) .or. w(i).le.0.) go to 90 - 50 continue - if(iopt.ge.0) go to 70 - if(n.le.nmin .or. n.gt.nest) go to 90 - per = u(m)-u(1) - j1 = k1 - t(j1) = u(1) - i1 = n-k - t(i1) = u(m) - j2 = j1 - i2 = i1 - do 60 i=1,k - i1 = i1+1 - i2 = i2-1 - j1 = j1+1 - j2 = j2-1 - t(j2) = t(i2)-per - t(i1) = t(j1)+per - 60 continue - call fpchep(u,m,t,n,k,ier) - if (ier.eq.0) go to 80 - go to 90 - 70 if(s.lt.0.) go to 90 - if(s.eq.0. .and. nest.lt.(m+2*k)) go to 90 - ier = 0 -c we partition the working space and determine the spline approximation. - 80 ifp = 1 - iz = ifp+nest - ia1 = iz+ncc - ia2 = ia1+nest*k1 - ib = ia2+nest*k - ig1 = ib+nest*k2 - ig2 = ig1+nest*k2 - iq = ig2+nest*k1 - call fpclos(iopt,idim,m,u,mx,x,w,k,s,nest,tol,maxit,k1,k2,n,t, - * ncc,c,fp,wrk(ifp),wrk(iz),wrk(ia1),wrk(ia2),wrk(ib),wrk(ig1), - * wrk(ig2),wrk(iq),iwrk,ier) - 90 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/cocosp.f b/scipy-0.10.1/scipy/interpolate/fitpack/cocosp.f deleted file mode 100644 index 054adf0008..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/cocosp.f +++ /dev/null @@ -1,180 +0,0 @@ - subroutine cocosp(m,x,y,w,n,t,e,maxtr,maxbin,c,sq,sx,bind,wrk, - * lwrk,iwrk,kwrk,ier) -c given the set of data points (x(i),y(i)) and the set of positive -c numbers w(i),i=1,2,...,m, subroutine cocosp determines the weighted -c least-squares cubic spline s(x) with given knots t(j),j=1,2,...,n -c which satisfies the following concavity/convexity conditions -c s''(t(j+3))*e(j) <= 0, j=1,2,...n-6 -c the fit is given in the b-spline representation( b-spline coef- -c ficients c(j),j=1,2,...n-4) and can be evaluated by means of -c subroutine splev. -c -c calling sequence: -c call cocosp(m,x,y,w,n,t,e,maxtr,maxbin,c,sq,sx,bind,wrk, -c * lwrk,iwrk,kwrk,ier) -c -c parameters: -c m : integer. on entry m must specify the number of data points. -c m > 3. unchanged on exit. -c x : real array of dimension at least (m). before entry, x(i) -c must be set to the i-th value of the independent variable x, -c for i=1,2,...,m. these values must be supplied in strictly -c ascending order. unchanged on exit. -c y : real array of dimension at least (m). before entry, y(i) -c must be set to the i-th value of the dependent variable y, -c for i=1,2,...,m. unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) -c must be set to the i-th value in the set of weights. the -c w(i) must be strictly positive. unchanged on exit. -c n : integer. on entry n must contain the total number of knots -c of the cubic spline. m+4>=n>=8. unchanged on exit. -c t : real array of dimension at least (n). before entry, this -c array must contain the knots of the spline, i.e. the position -c of the interior knots t(5),t(6),...,t(n-4) as well as the -c position of the boundary knots t(1),t(2),t(3),t(4) and t(n-3) -c t(n-2),t(n-1),t(n) needed for the b-spline representation. -c unchanged on exit. see also the restrictions (ier=10). -c e : real array of dimension at least (n). before entry, e(j) -c must be set to 1 if s(x) must be locally concave at t(j+3), -c to (-1) if s(x) must be locally convex at t(j+3) and to 0 -c if no convexity constraint is imposed at t(j+3),j=1,2,..,n-6. -c e(n-5),...,e(n) are not used. unchanged on exit. -c maxtr : integer. on entry maxtr must contain an over-estimate of the -c total number of records in the used tree structure, to indic- -c ate the storage space available to the routine. maxtr >=1 -c in most practical situation maxtr=100 will be sufficient. -c always large enough is -c n-5 n-6 -c maxtr = ( ) + ( ) with l the greatest -c l l+1 -c integer <= (n-6)/2 . unchanged on exit. -c maxbin: integer. on entry maxbin must contain an over-estimate of the -c number of knots where s(x) will have a zero second derivative -c maxbin >=1. in most practical situation maxbin = 10 will be -c sufficient. always large enough is maxbin=n-6. -c unchanged on exit. -c c : real array of dimension at least (n). -c on succesful exit, this array will contain the coefficients -c c(1),c(2),..,c(n-4) in the b-spline representation of s(x) -c sq : real. on succesful exit, sq contains the weighted sum of -c squared residuals of the spline approximation returned. -c sx : real array of dimension at least m. on succesful exit -c this array will contain the spline values s(x(i)),i=1,...,m -c bind : logical array of dimension at least (n). on succesful exit -c this array will indicate the knots where s''(x)=0, i.e. -c s''(t(j+3)) .eq. 0 if bind(j) = .true. -c s''(t(j+3)) .ne. 0 if bind(j) = .false., j=1,2,...,n-6 -c wrk : real array of dimension at least m*4+n*7+maxbin*(maxbin+n+1) -c used as working space. -c lwrk : integer. on entry,lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program.lwrk -c must not be too small (see wrk). unchanged on exit. -c iwrk : integer array of dimension at least (maxtr*4+2*(maxbin+1)) -c used as working space. -c kwrk : integer. on entry,kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. kwrk -c must not be too small (see iwrk). unchanged on exit. -c ier : integer. error flag -c ier=0 : succesful exit. -c ier>0 : abnormal termination: no approximation is returned -c ier=1 : the number of knots where s''(x)=0 exceeds maxbin. -c probably causes : maxbin too small. -c ier=2 : the number of records in the tree structure exceeds -c maxtr. -c probably causes : maxtr too small. -c ier=3 : the algoritm finds no solution to the posed quadratic -c programming problem. -c probably causes : rounding errors. -c ier=10 : on entry, the input data are controlled on validity. -c the following restrictions must be satisfied -c m>3, maxtr>=1, maxbin>=1, 8<=n<=m+4,w(i) > 0, -c x(1)=maxtr*4+2*(maxbin+1), -c lwrk>=m*4+n*7+maxbin*(maxbin+n+1), -c the schoenberg-whitney conditions, i.e. there must -c be a subset of data points xx(j) such that -c t(j) < xx(j) < t(j+4), j=1,2,...,n-4 -c if one of these restrictions is found to be violated -c control is immediately repassed to the calling program -c -c -c other subroutines required: -c fpcosp,fpbspl,fpadno,fpdeno,fpseno,fpfrno,fpchec -c -c references: -c dierckx p. : an algorithm for cubic spline fitting with convexity -c constraints, computing 24 (1980) 349-371. -c dierckx p. : an algorithm for least-squares cubic spline fitting -c with convexity and concavity constraints, report tw39, -c dept. computer science, k.u.leuven, 1978. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p. dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : march 1978 -c latest update : march 1987. -c -c .. -c ..scalar arguments.. - real*8 sq - integer m,n,maxtr,maxbin,lwrk,kwrk,ier -c ..array arguments.. - real*8 x(m),y(m),w(m),t(n),e(n),c(n),sx(m),wrk(lwrk) - integer iwrk(kwrk) - logical bind(n) -c ..local scalars.. - integer i,ia,ib,ic,iq,iu,iz,izz,ji,jib,jjb,jl,jr,ju,kwest, - * lwest,mb,nm,n6 - real*8 one -c .. -c set constant - one = 0.1e+01 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(m.lt.4 .or. n.lt.8) go to 40 - if(maxtr.lt.1 .or. maxbin.lt.1) go to 40 - lwest = 7*n+m*4+maxbin*(1+n+maxbin) - kwest = 4*maxtr+2*(maxbin+1) - if(lwrk.lt.lwest .or. kwrk.lt.kwest) go to 40 - if(w(1).le.0.) go to 40 - do 10 i=2,m - if(x(i-1).ge.x(i) .or. w(i).le.0.) go to 40 - 10 continue - call fpchec(x,m,t,n,3,ier) - if (ier.eq.0) go to 20 - go to 40 -c set numbers e(i) - 20 n6 = n-6 - do 30 i=1,n6 - if(e(i).gt.0.) e(i) = one - if(e(i).lt.0.) e(i) = -one - 30 continue -c we partition the working space and determine the spline approximation - nm = n+maxbin - mb = maxbin+1 - ia = 1 - ib = ia+4*n - ic = ib+nm*maxbin - iz = ic+n - izz = iz+n - iu = izz+n - iq = iu+maxbin - ji = 1 - ju = ji+maxtr - jl = ju+maxtr - jr = jl+maxtr - jjb = jr+maxtr - jib = jjb+mb - call fpcosp(m,x,y,w,n,t,e,maxtr,maxbin,c,sq,sx,bind,nm,mb,wrk(ia), - * - * wrk(ib),wrk(ic),wrk(iz),wrk(izz),wrk(iu),wrk(iq),iwrk(ji), - * iwrk(ju),iwrk(jl),iwrk(jr),iwrk(jjb),iwrk(jib),ier) - 40 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/concon.f b/scipy-0.10.1/scipy/interpolate/fitpack/concon.f deleted file mode 100644 index 1dca38d76d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/concon.f +++ /dev/null @@ -1,233 +0,0 @@ - subroutine concon(iopt,m,x,y,w,v,s,nest,maxtr,maxbin,n,t,c,sq, - * sx,bind,wrk,lwrk,iwrk,kwrk,ier) -c given the set of data points (x(i),y(i)) and the set of positive -c numbers w(i), i=1,2,...,m,subroutine concon determines a cubic spline -c approximation s(x) which satisfies the following local convexity -c constraints s''(x(i))*v(i) <= 0, i=1,2,...,m. -c the number of knots n and the position t(j),j=1,2,...n is chosen -c automatically by the routine in a way that -c sq = sum((w(i)*(y(i)-s(x(i))))**2) be <= s. -c the fit is given in the b-spline representation (b-spline coef- -c ficients c(j),j=1,2,...n-4) and can be evaluated by means of -c subroutine splev. -c -c calling sequence: -c -c call concon(iopt,m,x,y,w,v,s,nest,maxtr,maxbin,n,t,c,sq, -c * sx,bind,wrk,lwrk,iwrk,kwrk,ier) -c -c parameters: -c iopt: integer flag. -c if iopt=0, the routine will start with the minimal number of -c knots to guarantee that the convexity conditions will be -c satisfied. if iopt=1, the routine will continue with the set -c of knots found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately -c preceded by another call with iopt=1 or iopt=0. -c unchanged on exit. -c m : integer. on entry m must specify the number of data points. -c m > 3. unchanged on exit. -c x : real array of dimension at least (m). before entry, x(i) -c must be set to the i-th value of the independent variable x, -c for i=1,2,...,m. these values must be supplied in strictly -c ascending order. unchanged on exit. -c y : real array of dimension at least (m). before entry, y(i) -c must be set to the i-th value of the dependent variable y, -c for i=1,2,...,m. unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) -c must be set to the i-th value in the set of weights. the -c w(i) must be strictly positive. unchanged on exit. -c v : real array of dimension at least (m). before entry, v(i) -c must be set to 1 if s(x) must be locally concave at x(i), -c to (-1) if s(x) must be locally convex at x(i) and to 0 -c if no convexity constraint is imposed at x(i). -c s : real. on entry s must specify an over-estimate for the -c the weighted sum of squared residuals sq of the requested -c spline. s >=0. unchanged on exit. -c nest : integer. on entry nest must contain an over-estimate of the -c total number of knots of the spline returned, to indicate -c the storage space available to the routine. nest >=8. -c in most practical situation nest=m/2 will be sufficient. -c always large enough is nest=m+4. unchanged on exit. -c maxtr : integer. on entry maxtr must contain an over-estimate of the -c total number of records in the used tree structure, to indic- -c ate the storage space available to the routine. maxtr >=1 -c in most practical situation maxtr=100 will be sufficient. -c always large enough is -c nest-5 nest-6 -c maxtr = ( ) + ( ) with l the greatest -c l l+1 -c integer <= (nest-6)/2 . unchanged on exit. -c maxbin: integer. on entry maxbin must contain an over-estimate of the -c number of knots where s(x) will have a zero second derivative -c maxbin >=1. in most practical situation maxbin = 10 will be -c sufficient. always large enough is maxbin=nest-6. -c unchanged on exit. -c n : integer. -c on exit with ier <=0, n will contain the total number of -c knots of the spline approximation returned. if the comput- -c ation mode iopt=1 is used this value of n should be left -c unchanged between subsequent calls. -c t : real array of dimension at least (nest). -c on exit with ier<=0, this array will contain the knots of the -c spline,i.e. the position of the interior knots t(5),t(6),..., -c t(n-4) as well as the position of the additional knots -c t(1)=t(2)=t(3)=t(4)=x(1) and t(n-3)=t(n-2)=t(n-1)=t(n)=x(m) -c needed for the the b-spline representation. -c if the computation mode iopt=1 is used, the values of t(1), -c t(2),...,t(n) should be left unchanged between subsequent -c calls. -c c : real array of dimension at least (nest). -c on succesful exit, this array will contain the coefficients -c c(1),c(2),..,c(n-4) in the b-spline representation of s(x) -c sq : real. unless ier>0 , sq contains the weighted sum of -c squared residuals of the spline approximation returned. -c sx : real array of dimension at least m. on exit with ier<=0 -c this array will contain the spline values s(x(i)),i=1,...,m -c if the computation mode iopt=1 is used, the values of sx(1), -c sx(2),...,sx(m) should be left unchanged between subsequent -c calls. -c bind: logical array of dimension at least nest. on exit with ier<=0 -c this array will indicate the knots where s''(x)=0, i.e. -c s''(t(j+3)) .eq. 0 if bind(j) = .true. -c s''(t(j+3)) .ne. 0 if bind(j) = .false., j=1,2,...,n-6 -c if the computation mode iopt=1 is used, the values of bind(1) -c ,...,bind(n-6) should be left unchanged between subsequent -c calls. -c wrk : real array of dimension at least (m*4+nest*8+maxbin*(maxbin+ -c nest+1)). used as working space. -c lwrk : integer. on entry,lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program.lwrk -c must not be too small (see wrk). unchanged on exit. -c iwrk : integer array of dimension at least (maxtr*4+2*(maxbin+1)) -c used as working space. -c kwrk : integer. on entry,kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. kwrk -c must not be too small (see iwrk). unchanged on exit. -c ier : integer. error flag -c ier=0 : normal return, s(x) satisfies the concavity/convexity -c constraints and sq <= s. -c ier<0 : abnormal termination: s(x) satisfies the concavity/ -c convexity constraints but sq > s. -c ier=-3 : the requested storage space exceeds the available -c storage space as specified by the parameter nest. -c probably causes: nest too small. if nest is already -c large (say nest > m/2), it may also indicate that s -c is too small. -c the approximation returned is the least-squares cubic -c spline according to the knots t(1),...,t(n) (n=nest) -c which satisfies the convexity constraints. -c ier=-2 : the maximal number of knots n=m+4 has been reached. -c probably causes: s too small. -c ier=-1 : the number of knots n is less than the maximal number -c m+4 but concon finds that adding one or more knots -c will not further reduce the value of sq. -c probably causes : s too small. -c ier>0 : abnormal termination: no approximation is returned -c ier=1 : the number of knots where s''(x)=0 exceeds maxbin. -c probably causes : maxbin too small. -c ier=2 : the number of records in the tree structure exceeds -c maxtr. -c probably causes : maxtr too small. -c ier=3 : the algoritm finds no solution to the posed quadratic -c programming problem. -c probably causes : rounding errors. -c ier=4 : the minimum number of knots (given by n) to guarantee -c that the concavity/convexity conditions will be -c satisfied is greater than nest. -c probably causes: nest too small. -c ier=5 : the minimum number of knots (given by n) to guarantee -c that the concavity/convexity conditions will be -c satisfied is greater than m+4. -c probably causes: strongly alternating convexity and -c concavity conditions. normally the situation can be -c coped with by adding n-m-4 extra data points (found -c by linear interpolation e.g.) with a small weight w(i) -c and a v(i) number equal to zero. -c ier=10 : on entry, the input data are controlled on validity. -c the following restrictions must be satisfied -c 0<=iopt<=1, m>3, nest>=8, s>=0, maxtr>=1, maxbin>=1, -c kwrk>=maxtr*4+2*(maxbin+1), w(i)>0, x(i) < x(i+1), -c lwrk>=m*4+nest*8+maxbin*(maxbin+nest+1) -c if one of these restrictions is found to be violated -c control is immediately repassed to the calling program -c -c further comments: -c as an example of the use of the computation mode iopt=1, the -c following program segment will cause concon to return control -c each time a spline with a new set of knots has been computed. -c ............. -c iopt = 0 -c s = 0.1e+60 (s very large) -c do 10 i=1,m -c call concon(iopt,m,x,y,w,v,s,nest,maxtr,maxbin,n,t,c,sq,sx, -c * bind,wrk,lwrk,iwrk,kwrk,ier) -c ...... -c s = sq -c iopt=1 -c 10 continue -c ............. -c -c other subroutines required: -c fpcoco,fpcosp,fpbspl,fpadno,fpdeno,fpseno,fpfrno -c -c references: -c dierckx p. : an algorithm for cubic spline fitting with convexity -c constraints, computing 24 (1980) 349-371. -c dierckx p. : an algorithm for least-squares cubic spline fitting -c with convexity and concavity constraints, report tw39, -c dept. computer science, k.u.leuven, 1978. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p. dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : march 1978 -c latest update : march 1987. -c -c .. -c ..scalar arguments.. - real*8 s,sq - integer iopt,m,nest,maxtr,maxbin,n,lwrk,kwrk,ier -c ..array arguments.. - real*8 x(m),y(m),w(m),v(m),t(nest),c(nest),sx(m),wrk(lwrk) - integer iwrk(kwrk) - logical bind(nest) -c ..local scalars.. - integer i,lwest,kwest,ie,iw,lww - real*8 one -c .. -c set constant - one = 0.1e+01 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(iopt.lt.0 .or. iopt.gt.1) go to 30 - if(m.lt.4 .or. nest.lt.8) go to 30 - if(s.lt.0.) go to 30 - if(maxtr.lt.1 .or. maxbin.lt.1) go to 30 - lwest = 8*nest+m*4+maxbin*(1+nest+maxbin) - kwest = 4*maxtr+2*(maxbin+1) - if(lwrk.lt.lwest .or. kwrk.lt.kwest) go to 30 - if(iopt.gt.0) go to 20 - if(w(1).le.0.) go to 30 - if(v(1).gt.0.) v(1) = one - if(v(1).lt.0.) v(1) = -one - do 10 i=2,m - if(x(i-1).ge.x(i) .or. w(i).le.0.) go to 30 - if(v(i).gt.0.) v(i) = one - if(v(i).lt.0.) v(i) = -one - 10 continue - 20 ier = 0 -c we partition the working space and determine the spline approximation - ie = 1 - iw = ie+nest - lww = lwrk-nest - call fpcoco(iopt,m,x,y,w,v,s,nest,maxtr,maxbin,n,t,c,sq,sx, - * bind,wrk(ie),wrk(iw),lww,iwrk,kwrk,ier) - 30 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/concur.f b/scipy-0.10.1/scipy/interpolate/fitpack/concur.f deleted file mode 100644 index 14c64db12a..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/concur.f +++ /dev/null @@ -1,370 +0,0 @@ - subroutine concur(iopt,idim,m,u,mx,x,xx,w,ib,db,nb,ie,de,ne,k,s, - * nest,n,t,nc,c,np,cp,fp,wrk,lwrk,iwrk,ier) -c given the ordered set of m points x(i) in the idim-dimensional space -c and given also a corresponding set of strictly increasing values u(i) -c and the set of positive numbers w(i),i=1,2,...,m, subroutine concur -c determines a smooth approximating spline curve s(u), i.e. -c x1 = s1(u) -c x2 = s2(u) ub = u(1) <= u <= u(m) = ue -c ......... -c xidim = sidim(u) -c with sj(u),j=1,2,...,idim spline functions of odd degree k with -c common knots t(j),j=1,2,...,n. -c in addition these splines will satisfy the following boundary -c constraints (l) -c if ib > 0 : sj (u(1)) = db(idim*l+j) ,l=0,1,...,ib-1 -c and (l) -c if ie > 0 : sj (u(m)) = de(idim*l+j) ,l=0,1,...,ie-1. -c if iopt=-1 concur calculates the weighted least-squares spline curve -c according to a given set of knots. -c if iopt>=0 the number of knots of the splines sj(u) and the position -c t(j),j=1,2,...,n is chosen automatically by the routine. the smooth- -c ness of s(u) is then achieved by minimalizing the discontinuity -c jumps of the k-th derivative of s(u) at the knots t(j),j=k+2,k+3,..., -c n-k-1. the amount of smoothness is determined by the condition that -c f(p)=sum((w(i)*dist(x(i),s(u(i))))**2) be <= s, with s a given non- -c negative constant, called the smoothing factor. -c the fit s(u) is given in the b-spline representation and can be -c evaluated by means of subroutine curev. -c -c calling sequence: -c call concur(iopt,idim,m,u,mx,x,xx,w,ib,db,nb,ie,de,ne,k,s,nest,n, -c * t,nc,c,np,cp,fp,wrk,lwrk,iwrk,ier) -c -c parameters: -c iopt : integer flag. on entry iopt must specify whether a weighted -c least-squares spline curve (iopt=-1) or a smoothing spline -c curve (iopt=0 or 1) must be determined.if iopt=0 the routine -c will start with an initial set of knots t(i)=ub,t(i+k+1)=ue, -c i=1,2,...,k+1. if iopt=1 the routine will continue with the -c knots found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately -c preceded by another call with iopt=1 or iopt=0. -c unchanged on exit. -c idim : integer. on entry idim must specify the dimension of the -c curve. 0 < idim < 11. -c unchanged on exit. -c m : integer. on entry m must specify the number of data points. -c m > k-max(ib-1,0)-max(ie-1,0). unchanged on exit. -c u : real array of dimension at least (m). before entry, -c u(i) must be set to the i-th value of the parameter variable -c u for i=1,2,...,m. these values must be supplied in -c strictly ascending order and will be unchanged on exit. -c mx : integer. on entry mx must specify the actual dimension of -c the arrays x and xx as declared in the calling (sub)program -c mx must not be too small (see x). unchanged on exit. -c x : real array of dimension at least idim*m. -c before entry, x(idim*(i-1)+j) must contain the j-th coord- -c inate of the i-th data point for i=1,2,...,m and j=1,2,..., -c idim. unchanged on exit. -c xx : real array of dimension at least idim*m. -c used as working space. on exit xx contains the coordinates -c of the data points to which a spline curve with zero deriv- -c ative constraints has been determined. -c if the computation mode iopt =1 is used xx should be left -c unchanged between calls. -c w : real array of dimension at least (m). before entry, w(i) -c must be set to the i-th value in the set of weights. the -c w(i) must be strictly positive. unchanged on exit. -c see also further comments. -c ib : integer. on entry ib must specify the number of derivative -c constraints for the curve at the begin point. 0<=ib<=(k+1)/2 -c unchanged on exit. -c db : real array of dimension nb. before entry db(idim*l+j) must -c contain the l-th order derivative of sj(u) at u=u(1) for -c j=1,2,...,idim and l=0,1,...,ib-1 (if ib>0). -c unchanged on exit. -c nb : integer, specifying the dimension of db. nb>=max(1,idim*ib) -c unchanged on exit. -c ie : integer. on entry ie must specify the number of derivative -c constraints for the curve at the end point. 0<=ie<=(k+1)/2 -c unchanged on exit. -c de : real array of dimension ne. before entry de(idim*l+j) must -c contain the l-th order derivative of sj(u) at u=u(m) for -c j=1,2,...,idim and l=0,1,...,ie-1 (if ie>0). -c unchanged on exit. -c ne : integer, specifying the dimension of de. ne>=max(1,idim*ie) -c unchanged on exit. -c k : integer. on entry k must specify the degree of the splines. -c k=1,3 or 5. -c unchanged on exit. -c s : real.on entry (in case iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments. -c nest : integer. on entry nest must contain an over-estimate of the -c total number of knots of the splines returned, to indicate -c the storage space available to the routine. nest >=2*k+2. -c in most practical situation nest=m/2 will be sufficient. -c always large enough is nest=m+k+1+max(0,ib-1)+max(0,ie-1), -c the number of knots needed for interpolation (s=0). -c unchanged on exit. -c n : integer. -c unless ier = 10 (in case iopt >=0), n will contain the -c total number of knots of the smoothing spline curve returned -c if the computation mode iopt=1 is used this value of n -c should be left unchanged between subsequent calls. -c in case iopt=-1, the value of n must be specified on entry. -c t : real array of dimension at least (nest). -c on succesful exit, this array will contain the knots of the -c spline curve,i.e. the position of the interior knots t(k+2), -c t(k+3),..,t(n-k-1) as well as the position of the additional -c t(1)=t(2)=...=t(k+1)=ub and t(n-k)=...=t(n)=ue needed for -c the b-spline representation. -c if the computation mode iopt=1 is used, the values of t(1), -c t(2),...,t(n) should be left unchanged between subsequent -c calls. if the computation mode iopt=-1 is used, the values -c t(k+2),...,t(n-k-1) must be supplied by the user, before -c entry. see also the restrictions (ier=10). -c nc : integer. on entry nc must specify the actual dimension of -c the array c as declared in the calling (sub)program. nc -c must not be too small (see c). unchanged on exit. -c c : real array of dimension at least (nest*idim). -c on succesful exit, this array will contain the coefficients -c in the b-spline representation of the spline curve s(u),i.e. -c the b-spline coefficients of the spline sj(u) will be given -c in c(n*(j-1)+i),i=1,2,...,n-k-1 for j=1,2,...,idim. -c cp : real array of dimension at least 2*(k+1)*idim. -c on exit cp will contain the b-spline coefficients of a -c polynomial curve which satisfies the boundary constraints. -c if the computation mode iopt =1 is used cp should be left -c unchanged between calls. -c np : integer. on entry np must specify the actual dimension of -c the array cp as declared in the calling (sub)program. np -c must not be too small (see cp). unchanged on exit. -c fp : real. unless ier = 10, fp contains the weighted sum of -c squared residuals of the spline curve returned. -c wrk : real array of dimension at least m*(k+1)+nest*(6+idim+3*k). -c used as working space. if the computation mode iopt=1 is -c used, the values wrk(1),...,wrk(n) should be left unchanged -c between subsequent calls. -c lwrk : integer. on entry,lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program. lwrk -c must not be too small (see wrk). unchanged on exit. -c iwrk : integer array of dimension at least (nest). -c used as working space. if the computation mode iopt=1 is -c used,the values iwrk(1),...,iwrk(n) should be left unchanged -c between subsequent calls. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the curve returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the curve returned is an interpolating -c spline curve, satisfying the constraints (fp=0). -c ier=-2 : normal return. the curve returned is the weighted least- -c squares polynomial curve of degree k, satisfying the -c constraints. in this extreme case fp gives the upper -c bound fp0 for the smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameter nest. -c probably causes : nest too small. if nest is already -c large (say nest > m/2), it may also indicate that s is -c too small -c the approximation returned is the least-squares spline -c curve according to the knots t(1),t(2),...,t(n). (n=nest) -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline curve -c with fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing curve -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, k = 1,3 or 5, m>k-max(0,ib-1)-max(0,ie-1), -c nest>=2k+2, 0=(k+1)*m+nest*(6+idim+3*k), -c nc >=nest*idim ,u(1)0 i=1,2,...,m, -c mx>=idim*m,0<=ib<=(k+1)/2,0<=ie<=(k+1)/2,nb>=1,ne>=1, -c nb>=ib*idim,ne>=ib*idim,np>=2*(k+1)*idim, -c if iopt=-1:2*k+2<=n<=min(nest,mmax) with mmax = m+k+1+ -c max(0,ib-1)+max(0,ie-1) -c u(1)=0: s>=0 -c if s=0 : nest >=mmax (see above) -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the curve will be too smooth and signal will be -c lost ; if s is too small the curve will pick up too much noise. in -c the extreme cases the program will return an interpolating curve if -c s=0 and the least-squares polynomial curve of degree k if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the weights w(i). if these are -c taken as 1/d(i) with d(i) an estimate of the standard deviation of -c x(i), a good s-value should be found in the range (m-sqrt(2*m),m+ -c sqrt(2*m)). if nothing is known about the statistical error in x(i) -c each w(i) can be set equal to one and s determined by trial and -c error, taking account of the comments above. the best is then to -c start with a very large value of s ( to determine the least-squares -c polynomial curve and the upper bound fp0 for s) and then to -c progressively decrease the value of s ( say by a factor 10 in the -c beginning, i.e. s=fp0/10, fp0/100,...and more carefully as the -c approximating curve shows more detail) to obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt=1 the program will continue with the set of knots found at -c the last call of the routine. this will save a lot of computation -c time if concur is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c curve underlying the data. but, if the computation mode iopt=1 is -c used, the knots returned may also depend on the s-values at previous -c calls (if these were smaller). therefore, if after a number of -c trials with different s-values and iopt=1, the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c concur once more with the selected value for s but now with iopt=0. -c indeed, concur may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c -c the form of the approximating curve can strongly be affected by -c the choice of the parameter values u(i). if there is no physical -c reason for choosing a particular parameter u, often good results -c will be obtained with the choice -c v(1)=0, v(i)=v(i-1)+q(i), i=2,...,m, u(i)=v(i)/v(m), i=1,..,m -c where -c q(i)= sqrt(sum j=1,idim (xj(i)-xj(i-1))**2 ) -c other possibilities for q(i) are -c q(i)= sum j=1,idim (xj(i)-xj(i-1))**2 -c q(i)= sum j=1,idim abs(xj(i)-xj(i-1)) -c q(i)= max j=1,idim abs(xj(i)-xj(i-1)) -c q(i)= 1 -c -c other subroutines required: -c fpback,fpbspl,fpched,fpcons,fpdisc,fpgivs,fpknot,fprati,fprota -c curev,fppocu,fpadpo,fpinst -c -c references: -c dierckx p. : algorithms for smoothing data with periodic and -c parametric splines, computer graphics and image -c processing 20 (1982) 171-184. -c dierckx p. : algorithms for smoothing data with periodic and param- -c etric splines, report tw55, dept. computer science, -c k.u.leuven, 1981. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : may 1979 -c latest update : march 1987 -c -c .. -c ..scalar arguments.. - real*8 s,fp - integer iopt,idim,m,mx,ib,nb,ie,ne,k,nest,n,nc,np,lwrk,ier -c ..array arguments.. - real*8 u(m),x(mx),xx(mx),db(nb),de(ne),w(m),t(nest),c(nc),wrk(lwrk - *) - real*8 cp(np) - integer iwrk(nest) -c ..local scalars.. - real*8 tol,dist - integer i,ib1,ie1,ja,jb,jfp,jg,jq,jz,j,k1,k2,lwest,maxit,nmin, - * ncc,kk,mmin,nmax,mxx -c ..function references - integer max0 -c .. -c we set up the parameters tol and maxit - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 90 - if(idim.le.0 .or. idim.gt.10) go to 90 - if(k.le.0 .or. k.gt.5) go to 90 - k1 = k+1 - kk = k1/2 - if(kk*2.ne.k1) go to 90 - k2 = k1+1 - if(ib.lt.0 .or. ib.gt.kk) go to 90 - if(ie.lt.0 .or. ie.gt.kk) go to 90 - nmin = 2*k1 - ib1 = max0(0,ib-1) - ie1 = max0(0,ie-1) - mmin = k1-ib1-ie1 - if(m.lt.mmin .or. nest.lt.nmin) go to 90 - if(nb.lt.(idim*ib) .or. ne.lt.(idim*ie)) go to 90 - if(np.lt.(2*k1*idim)) go to 90 - mxx = m*idim - ncc = nest*idim - if(mx.lt.mxx .or. nc.lt.ncc) go to 90 - lwest = m*k1+nest*(6+idim+3*k) - if(lwrk.lt.lwest) go to 90 - if(w(1).le.0.) go to 90 - do 10 i=2,m - if(u(i-1).ge.u(i) .or. w(i).le.0.) go to 90 - 10 continue - if(iopt.ge.0) go to 30 - if(n.lt.nmin .or. n.gt.nest) go to 90 - j = n - do 20 i=1,k1 - t(i) = u(1) - t(j) = u(m) - j = j-1 - 20 continue - call fpched(u,m,t,n,k,ib,ie,ier) - if (ier.eq.0) go to 40 - go to 90 - 30 if(s.lt.0.) go to 90 - nmax = m+k1+ib1+ie1 - if(s.eq.0. .and. nest.lt.nmax) go to 90 - ier = 0 - if(iopt.gt.0) go to 70 -c we determine a polynomial curve satisfying the boundary constraints. - 40 call fppocu(idim,k,u(1),u(m),ib,db,nb,ie,de,ne,cp,np) -c we generate new data points which will be approximated by a spline -c with zero derivative constraints. - j = nmin - do 50 i=1,k1 - wrk(i) = u(1) - wrk(j) = u(m) - j = j-1 - 50 continue -c evaluate the polynomial curve - call curev(idim,wrk,nmin,cp,np,k,u,m,xx,mxx,ier) -c substract from the old data, the values of the polynomial curve - do 60 i=1,mxx - xx(i) = x(i)-xx(i) - 60 continue -c we partition the working space and determine the spline curve. - 70 jfp = 1 - jz = jfp+nest - ja = jz+ncc - jb = ja+nest*k1 - jg = jb+nest*k2 - jq = jg+nest*k2 - call fpcons(iopt,idim,m,u,mxx,xx,w,ib,ie,k,s,nest,tol,maxit,k1, - * k2,n,t,ncc,c,fp,wrk(jfp),wrk(jz),wrk(ja),wrk(jb),wrk(jg),wrk(jq), - * - * iwrk,ier) -c add the polynomial curve to the calculated spline. - call fpadpo(idim,t,n,c,ncc,k,cp,np,wrk(jz),wrk(ja),wrk(jb)) - 90 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/cualde.f b/scipy-0.10.1/scipy/interpolate/fitpack/cualde.f deleted file mode 100644 index d84acfc714..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/cualde.f +++ /dev/null @@ -1,91 +0,0 @@ - subroutine cualde(idim,t,n,c,nc,k1,u,d,nd,ier) -c subroutine cualde evaluates at the point u all the derivatives -c (l) -c d(idim*l+j) = sj (u) ,l=0,1,...,k, j=1,2,...,idim -c of a spline curve s(u) of order k1 (degree k=k1-1) and dimension idim -c given in its b-spline representation. -c -c calling sequence: -c call cualde(idim,t,n,c,nc,k1,u,d,nd,ier) -c -c input parameters: -c idim : integer, giving the dimension of the spline curve. -c t : array,length n, which contains the position of the knots. -c n : integer, giving the total number of knots of s(u). -c c : array,length nc, which contains the b-spline coefficients. -c nc : integer, giving the total number of coefficients of s(u). -c k1 : integer, giving the order of s(u) (order=degree+1). -c u : real, which contains the point where the derivatives must -c be evaluated. -c nd : integer, giving the dimension of the array d. nd >= k1*idim -c -c output parameters: -c d : array,length nd,giving the different curve derivatives. -c d(idim*l+j) will contain the j-th coordinate of the l-th -c derivative of the curve at the point u. -c ier : error flag -c ier = 0 : normal return -c ier =10 : invalid input data (see restrictions) -c -c restrictions: -c nd >= k1*idim -c t(k1) <= u <= t(n-k1+1) -c -c further comments: -c if u coincides with a knot, right derivatives are computed -c ( left derivatives if u = t(n-k1+1) ). -c -c other subroutines required: fpader. -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c cox m.g. : the numerical evaluation of b-splines, j. inst. maths -c applics 10 (1972) 134-149. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c ..scalar arguments.. - integer idim,n,nc,k1,nd,ier - real*8 u -c ..array arguments.. - real*8 t(n),c(nc),d(nd) -c ..local scalars.. - integer i,j,kk,l,m,nk1 -c ..local array.. - real*8 h(6) -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 - if(nd.lt.(k1*idim)) go to 500 - nk1 = n-k1 - if(u.lt.t(k1) .or. u.gt.t(nk1+1)) go to 500 -c search for knot interval t(l) <= u < t(l+1) - l = k1 - 100 if(u.lt.t(l+1) .or. l.eq.nk1) go to 200 - l = l+1 - go to 100 - 200 if(t(l).ge.t(l+1)) go to 500 - ier = 0 -c calculate the derivatives. - j = 1 - do 400 i=1,idim - call fpader(t,n,c(j),k1,u,l,h) - m = i - do 300 kk=1,k1 - d(m) = h(kk) - m = m+idim - 300 continue - j = j+n - 400 continue - 500 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/curev.f b/scipy-0.10.1/scipy/interpolate/fitpack/curev.f deleted file mode 100644 index 00aaba6020..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/curev.f +++ /dev/null @@ -1,110 +0,0 @@ - subroutine curev(idim,t,n,c,nc,k,u,m,x,mx,ier) -c subroutine curev evaluates in a number of points u(i),i=1,2,...,m -c a spline curve s(u) of degree k and dimension idim, given in its -c b-spline representation. -c -c calling sequence: -c call curev(idim,t,n,c,nc,k,u,m,x,mx,ier) -c -c input parameters: -c idim : integer, giving the dimension of the spline curve. -c t : array,length n, which contains the position of the knots. -c n : integer, giving the total number of knots of s(u). -c c : array,length nc, which contains the b-spline coefficients. -c nc : integer, giving the total number of coefficients of s(u). -c k : integer, giving the degree of s(u). -c u : array,length m, which contains the points where s(u) must -c be evaluated. -c m : integer, giving the number of points where s(u) must be -c evaluated. -c mx : integer, giving the dimension of the array x. mx >= m*idim -c -c output parameters: -c x : array,length mx,giving the value of s(u) at the different -c points. x(idim*(i-1)+j) will contain the j-th coordinate -c of the i-th point on the curve. -c ier : error flag -c ier = 0 : normal return -c ier =10 : invalid input data (see restrictions) -c -c restrictions: -c m >= 1 -c mx >= m*idim -c t(k+1) <= u(i) <= u(i+1) <= t(n-k) , i=1,2,...,m-1. -c -c other subroutines required: fpbspl. -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c cox m.g. : the numerical evaluation of b-splines, j. inst. maths -c applics 10 (1972) 134-149. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c ..scalar arguments.. - integer idim,n,nc,k,m,mx,ier -c ..array arguments.. - real*8 t(n),c(nc),u(m),x(mx) -c ..local scalars.. - integer i,j,jj,j1,k1,l,ll,l1,mm,nk1 - real*8 arg,sp,tb,te -c ..local array.. - real*8 h(6) -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 - if (m.lt.1) go to 100 - if (m.eq.1) go to 30 - go to 10 - 10 do 20 i=2,m - if(u(i).lt.u(i-1)) go to 100 - 20 continue - 30 if(mx.lt.(m*idim)) go to 100 - ier = 0 -c fetch tb and te, the boundaries of the approximation interval. - k1 = k+1 - nk1 = n-k1 - tb = t(k1) - te = t(nk1+1) - l = k1 - l1 = l+1 -c main loop for the different points. - mm = 0 - do 80 i=1,m -c fetch a new u-value arg. - arg = u(i) - if(arg.lt.tb) arg = tb - if(arg.gt.te) arg = te -c search for knot interval t(l) <= arg < t(l+1) - 40 if(arg.lt.t(l1) .or. l.eq.nk1) go to 50 - l = l1 - l1 = l+1 - go to 40 -c evaluate the non-zero b-splines at arg. - 50 call fpbspl(t,n,k,arg,l,h) -c find the value of s(u) at u=arg. - ll = l-k1 - do 70 j1=1,idim - jj = ll - sp = 0. - do 60 j=1,k1 - jj = jj+1 - sp = sp+c(jj)*h(j) - 60 continue - mm = mm+1 - x(mm) = sp - ll = ll+n - 70 continue - 80 continue - 100 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/curfit.f b/scipy-0.10.1/scipy/interpolate/fitpack/curfit.f deleted file mode 100644 index f6a6acf141..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/curfit.f +++ /dev/null @@ -1,260 +0,0 @@ - subroutine curfit(iopt,m,x,y,w,xb,xe,k,s,nest,n,t,c,fp, - * wrk,lwrk,iwrk,ier) -c given the set of data points (x(i),y(i)) and the set of positive -c numbers w(i),i=1,2,...,m,subroutine curfit determines a smooth spline -c approximation of degree k on the interval xb <= x <= xe. -c if iopt=-1 curfit calculates the weighted least-squares spline -c according to a given set of knots. -c if iopt>=0 the number of knots of the spline s(x) and the position -c t(j),j=1,2,...,n is chosen automatically by the routine. the smooth- -c ness of s(x) is then achieved by minimalizing the discontinuity -c jumps of the k-th derivative of s(x) at the knots t(j),j=k+2,k+3,..., -c n-k-1. the amount of smoothness is determined by the condition that -c f(p)=sum((w(i)*(y(i)-s(x(i))))**2) be <= s, with s a given non- -c negative constant, called the smoothing factor. -c the fit s(x) is given in the b-spline representation (b-spline coef- -c ficients c(j),j=1,2,...,n-k-1) and can be evaluated by means of -c subroutine splev. -c -c calling sequence: -c call curfit(iopt,m,x,y,w,xb,xe,k,s,nest,n,t,c,fp,wrk, -c * lwrk,iwrk,ier) -c -c parameters: -c iopt : integer flag. on entry iopt must specify whether a weighted -c least-squares spline (iopt=-1) or a smoothing spline (iopt= -c 0 or 1) must be determined. if iopt=0 the routine will start -c with an initial set of knots t(i)=xb, t(i+k+1)=xe, i=1,2,... -c k+1. if iopt=1 the routine will continue with the knots -c found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately -c preceded by another call with iopt=1 or iopt=0. -c unchanged on exit. -c m : integer. on entry m must specify the number of data points. -c m > k. unchanged on exit. -c x : real array of dimension at least (m). before entry, x(i) -c must be set to the i-th value of the independent variable x, -c for i=1,2,...,m. these values must be supplied in strictly -c ascending order. unchanged on exit. -c y : real array of dimension at least (m). before entry, y(i) -c must be set to the i-th value of the dependent variable y, -c for i=1,2,...,m. unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) -c must be set to the i-th value in the set of weights. the -c w(i) must be strictly positive. unchanged on exit. -c see also further comments. -c xb,xe : real values. on entry xb and xe must specify the boundaries -c of the approximation interval. xb<=x(1), xe>=x(m). -c unchanged on exit. -c k : integer. on entry k must specify the degree of the spline. -c 1<=k<=5. it is recommended to use cubic splines (k=3). -c the user is strongly dissuaded from choosing k even,together -c with a small s-value. unchanged on exit. -c s : real.on entry (in case iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments. -c nest : integer. on entry nest must contain an over-estimate of the -c total number of knots of the spline returned, to indicate -c the storage space available to the routine. nest >=2*k+2. -c in most practical situation nest=m/2 will be sufficient. -c always large enough is nest=m+k+1, the number of knots -c needed for interpolation (s=0). unchanged on exit. -c n : integer. -c unless ier =10 (in case iopt >=0), n will contain the -c total number of knots of the spline approximation returned. -c if the computation mode iopt=1 is used this value of n -c should be left unchanged between subsequent calls. -c in case iopt=-1, the value of n must be specified on entry. -c t : real array of dimension at least (nest). -c on succesful exit, this array will contain the knots of the -c spline,i.e. the position of the interior knots t(k+2),t(k+3) -c ...,t(n-k-1) as well as the position of the additional knots -c t(1)=t(2)=...=t(k+1)=xb and t(n-k)=...=t(n)=xe needed for -c the b-spline representation. -c if the computation mode iopt=1 is used, the values of t(1), -c t(2),...,t(n) should be left unchanged between subsequent -c calls. if the computation mode iopt=-1 is used, the values -c t(k+2),...,t(n-k-1) must be supplied by the user, before -c entry. see also the restrictions (ier=10). -c c : real array of dimension at least (nest). -c on succesful exit, this array will contain the coefficients -c c(1),c(2),..,c(n-k-1) in the b-spline representation of s(x) -c fp : real. unless ier=10, fp contains the weighted sum of -c squared residuals of the spline approximation returned. -c wrk : real array of dimension at least (m*(k+1)+nest*(7+3*k)). -c used as working space. if the computation mode iopt=1 is -c used, the values wrk(1),...,wrk(n) should be left unchanged -c between subsequent calls. -c lwrk : integer. on entry,lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program.lwrk -c must not be too small (see wrk). unchanged on exit. -c iwrk : integer array of dimension at least (nest). -c used as working space. if the computation mode iopt=1 is -c used,the values iwrk(1),...,iwrk(n) should be left unchanged -c between subsequent calls. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the spline returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline returned is an interpolating -c spline (fp=0). -c ier=-2 : normal return. the spline returned is the weighted least- -c squares polynomial of degree k. in this extreme case fp -c gives the upper bound fp0 for the smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameter nest. -c probably causes : nest too small. if nest is already -c large (say nest > m/2), it may also indicate that s is -c too small -c the approximation returned is the weighted least-squares -c spline according to the knots t(1),t(2),...,t(n). (n=nest) -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline with -c fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing spline -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, 1<=k<=5, m>k, nest>2*k+2, w(i)>0,i=1,2,...,m -c xb<=x(1)=(k+1)*m+nest*(7+3*k) -c if iopt=-1: 2*k+2<=n<=min(nest,m+k+1) -c xb=0: s>=0 -c if s=0 : nest >= m+k+1 -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the spline will be too smooth and signal will be -c lost ; if s is too small the spline will pick up too much noise. in -c the extreme cases the program will return an interpolating spline if -c s=0 and the weighted least-squares polynomial of degree k if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the weights w(i). if these are -c taken as 1/d(i) with d(i) an estimate of the standard deviation of -c y(i), a good s-value should be found in the range (m-sqrt(2*m),m+ -c sqrt(2*m)). if nothing is known about the statistical error in y(i) -c each w(i) can be set equal to one and s determined by trial and -c error, taking account of the comments above. the best is then to -c start with a very large value of s ( to determine the least-squares -c polynomial and the corresponding upper bound fp0 for s) and then to -c progressively decrease the value of s ( say by a factor 10 in the -c beginning, i.e. s=fp0/10, fp0/100,...and more carefully as the -c approximation shows more detail) to obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt=1 the program will continue with the set of knots found at -c the last call of the routine. this will save a lot of computation -c time if curfit is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c function underlying the data. but, if the computation mode iopt=1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt=1, the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c curfit once more with the selected value for s but now with iopt=0. -c indeed, curfit may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c -c other subroutines required: -c fpback,fpbspl,fpchec,fpcurf,fpdisc,fpgivs,fpknot,fprati,fprota -c -c references: -c dierckx p. : an algorithm for smoothing, differentiation and integ- -c ration of experimental data using spline functions, -c j.comp.appl.maths 1 (1975) 165-184. -c dierckx p. : a fast algorithm for smoothing data on a rectangular -c grid while using spline functions, siam j.numer.anal. -c 19 (1982) 1286-1304. -c dierckx p. : an improved algorithm for curve fitting with spline -c functions, report tw54, dept. computer science,k.u. -c leuven, 1981. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : may 1979 -c latest update : march 1987 -c -c .. -c ..scalar arguments.. - real*8 xb,xe,s,fp - integer iopt,m,k,nest,n,lwrk,ier -c ..array arguments.. - real*8 x(m),y(m),w(m),t(nest),c(nest),wrk(lwrk) - integer iwrk(nest) -c ..local scalars.. - real*8 tol - integer i,ia,ib,ifp,ig,iq,iz,j,k1,k2,lwest,maxit,nmin -c .. -c we set up the parameters tol and maxit - maxit = 20 - tol = 0.1d-02 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(k.le.0 .or. k.gt.5) go to 50 - k1 = k+1 - k2 = k1+1 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 50 - nmin = 2*k1 - if(m.lt.k1 .or. nest.lt.nmin) go to 50 - lwest = m*k1+nest*(7+3*k) - if(lwrk.lt.lwest) go to 50 - if(xb.gt.x(1) .or. xe.lt.x(m)) go to 50 - do 10 i=2,m - if(x(i-1).gt.x(i)) go to 50 - 10 continue - if(iopt.ge.0) go to 30 - if(n.lt.nmin .or. n.gt.nest) go to 50 - j = n - do 20 i=1,k1 - t(i) = xb - t(j) = xe - j = j-1 - 20 continue - call fpchec(x,m,t,n,k,ier) - if (ier.eq.0) go to 40 - go to 50 - 30 if(s.lt.0.) go to 50 - if(s.eq.0. .and. nest.lt.(m+k1)) go to 50 -c we partition the working space and determine the spline approximation. - 40 ifp = 1 - iz = ifp+nest - ia = iz+nest - ib = ia+nest*k1 - ig = ib+nest*k2 - iq = ig+nest*k2 - call fpcurf(iopt,x,y,w,m,xb,xe,k,s,nest,tol,maxit,k1,k2,n,t,c,fp, - * wrk(ifp),wrk(iz),wrk(ia),wrk(ib),wrk(ig),wrk(iq),iwrk,ier) - 50 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/dblint.f b/scipy-0.10.1/scipy/interpolate/fitpack/dblint.f deleted file mode 100644 index f7f87c0f12..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/dblint.f +++ /dev/null @@ -1,88 +0,0 @@ - real*8 function dblint(tx,nx,ty,ny,c,kx,ky,xb,xe,yb,ye,wrk) -c function dblint calculates the double integral -c / xe / ye -c | | s(x,y) dx dy -c xb / yb / -c with s(x,y) a bivariate spline of degrees kx and ky, given in the -c b-spline representation. -c -c calling sequence: -c aint = dblint(tx,nx,ty,ny,c,kx,ky,xb,xe,yb,ye,wrk) -c -c input parameters: -c tx : real array, length nx, which contains the position of the -c knots in the x-direction. -c nx : integer, giving the total number of knots in the x-direction -c ty : real array, length ny, which contains the position of the -c knots in the y-direction. -c ny : integer, giving the total number of knots in the y-direction -c c : real array, length (nx-kx-1)*(ny-ky-1), which contains the -c b-spline coefficients. -c kx,ky : integer values, giving the degrees of the spline. -c xb,xe : real values, containing the boundaries of the integration -c yb,ye domain. s(x,y) is considered to be identically zero out- -c side the rectangle (tx(kx+1),tx(nx-kx))*(ty(ky+1),ty(ny-ky)) -c -c output parameters: -c aint : real , containing the double integral of s(x,y). -c wrk : real array of dimension at least (nx+ny-kx-ky-2). -c used as working space. -c on exit, wrk(i) will contain the integral -c / xe -c | ni,kx+1(x) dx , i=1,2,...,nx-kx-1 -c xb / -c with ni,kx+1(x) the normalized b-spline defined on -c the knots tx(i),...,tx(i+kx+1) -c wrk(j+nx-kx-1) will contain the integral -c / ye -c | nj,ky+1(y) dy , j=1,2,...,ny-ky-1 -c yb / -c with nj,ky+1(y) the normalized b-spline defined on -c the knots ty(j),...,ty(j+ky+1) -c -c other subroutines required: fpintb -c -c references : -c gaffney p.w. : the calculation of indefinite integrals of b-splines -c j. inst. maths applics 17 (1976) 37-41. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1989 -c -c ..scalar arguments.. - integer nx,ny,kx,ky - real*8 xb,xe,yb,ye -c ..array arguments.. - real*8 tx(nx),ty(ny),c((nx-kx-1)*(ny-ky-1)),wrk(nx+ny-kx-ky-2) -c ..local scalars.. - integer i,j,l,m,nkx1,nky1 - real*8 res -c .. - nkx1 = nx-kx-1 - nky1 = ny-ky-1 -c we calculate the integrals of the normalized b-splines ni,kx+1(x) - call fpintb(tx,nx,wrk,nkx1,xb,xe) -c we calculate the integrals of the normalized b-splines nj,ky+1(y) - call fpintb(ty,ny,wrk(nkx1+1),nky1,yb,ye) -c calculate the integral of s(x,y) - dblint = 0. - do 200 i=1,nkx1 - res = wrk(i) - if(res.eq.0.) go to 200 - m = (i-1)*nky1 - l = nkx1 - do 100 j=1,nky1 - m = m+1 - l = l+1 - dblint = dblint+res*wrk(l)*c(m) - 100 continue - 200 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/evapol.f b/scipy-0.10.1/scipy/interpolate/fitpack/evapol.f deleted file mode 100644 index f6381acf4a..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/evapol.f +++ /dev/null @@ -1,82 +0,0 @@ - real*8 function evapol(tu,nu,tv,nv,c,rad,x,y) -c function program evacir evaluates the function f(x,y) = s(u,v), -c defined through the transformation -c x = u*rad(v)*cos(v) y = u*rad(v)*sin(v) -c and where s(u,v) is a bicubic spline ( 0<=u<=1 , -pi<=v<=pi ), given -c in its standard b-spline representation. -c -c calling sequence: -c f = evapol(tu,nu,tv,nv,c,rad,x,y) -c -c input parameters: -c tu : real array, length nu, which contains the position of the -c knots in the u-direction. -c nu : integer, giving the total number of knots in the u-direction -c tv : real array, length nv, which contains the position of the -c knots in the v-direction. -c nv : integer, giving the total number of knots in the v-direction -c c : real array, length (nu-4)*(nv-4), which contains the -c b-spline coefficients. -c rad : real function subprogram, defining the boundary of the -c approximation domain. must be declared external in the -c calling (sub)-program -c x,y : real values. -c before entry x and y must be set to the co-ordinates of -c the point where f(x,y) must be evaluated. -c -c output parameter: -c f : real -c on exit f contains the value of f(x,y) -c -c other subroutines required: -c bispev,fpbisp,fpbspl -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c cox m.g. : the numerical evaluation of b-splines, j. inst. maths -c applics 10 (1972) 134-149. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1989 -c -c ..scalar arguments.. - integer nu,nv - real*8 x,y -c ..array arguments.. - real*8 tu(nu),tv(nv),c((nu-4)*(nv-4)) -c ..user specified function - real*8 rad -c ..local scalars.. - integer ier - real*8 u,v,r,f,one,dist -c ..local arrays - real*8 wrk(8) - integer iwrk(2) -c ..function references - real*8 atan2,sqrt -c .. -c calculate the (u,v)-coordinates of the given point. - one = 1 - u = 0. - v = 0. - dist = x**2+y**2 - if(dist.le.0.) go to 10 - v = atan2(y,x) - r = rad(v) - if(r.le.0.) go to 10 - u = sqrt(dist)/r - if(u.gt.one) u = one -c evaluate s(u,v) - 10 call bispev(tu,nu,tv,nv,c,3,3,u,1,v,1,f,wrk,8,iwrk,2,ier) - evapol = f - return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fourco.f b/scipy-0.10.1/scipy/interpolate/fitpack/fourco.f deleted file mode 100644 index 7372d00ac7..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fourco.f +++ /dev/null @@ -1,96 +0,0 @@ - subroutine fourco(t,n,c,alfa,m,ress,resc,wrk1,wrk2,ier) -c subroutine fourco calculates the integrals -c /t(n-3) -c ress(i) = ! s(x)*sin(alfa(i)*x) dx and -c t(4)/ -c /t(n-3) -c resc(i) = ! s(x)*cos(alfa(i)*x) dx, i=1,...,m, -c t(4)/ -c where s(x) denotes a cubic spline which is given in its -c b-spline representation. -c -c calling sequence: -c call fourco(t,n,c,alfa,m,ress,resc,wrk1,wrk2,ier) -c -c input parameters: -c t : real array,length n, containing the knots of s(x). -c n : integer, containing the total number of knots. n>=10. -c c : real array,length n, containing the b-spline coefficients. -c alfa : real array,length m, containing the parameters alfa(i). -c m : integer, specifying the number of integrals to be computed. -c wrk1 : real array,length n. used as working space -c wrk2 : real array,length n. used as working space -c -c output parameters: -c ress : real array,length m, containing the integrals ress(i). -c resc : real array,length m, containing the integrals resc(i). -c ier : error flag: -c ier=0 : normal return. -c ier=10: invalid input data (see restrictions). -c -c restrictions: -c n >= 10 -c t(4) < t(5) < ... < t(n-4) < t(n-3). -c t(1) <= t(2) <= t(3) <= t(4). -c t(n-3) <= t(n-2) <= t(n-1) <= t(n). -c -c other subroutines required: fpbfou,fpcsin -c -c references : -c dierckx p. : calculation of fouriercoefficients of discrete -c functions using cubic splines. j. computational -c and applied mathematics 3 (1977) 207-209. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c ..scalar arguments.. - integer n,m,ier -c ..array arguments.. - real*8 t(n),c(n),wrk1(n),wrk2(n),alfa(m),ress(m),resc(m) -c ..local scalars.. - integer i,j,n4 - real*8 rs,rc -c .. - n4 = n-4 -c before starting computations a data check is made. in the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(n.lt.10) go to 50 - j = n - do 10 i=1,3 - if(t(i).gt.t(i+1)) go to 50 - if(t(j).lt.t(j-1)) go to 50 - j = j-1 - 10 continue - do 20 i=4,n4 - if(t(i).ge.t(i+1)) go to 50 - 20 continue - ier = 0 -c main loop for the different alfa(i). - do 40 i=1,m -c calculate the integrals -c wrk1(j) = integral(nj,4(x)*sin(alfa*x)) and -c wrk2(j) = integral(nj,4(x)*cos(alfa*x)), j=1,2,...,n-4, -c where nj,4(x) denotes the normalised cubic b-spline defined on the -c knots t(j),t(j+1),...,t(j+4). - call fpbfou(t,n,alfa(i),wrk1,wrk2) -c calculate the integrals ress(i) and resc(i). - rs = 0. - rc = 0. - do 30 j=1,n4 - rs = rs+c(j)*wrk1(j) - rc = rc+c(j)*wrk2(j) - 30 continue - ress(i) = rs - resc(i) = rc - 40 continue - 50 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpader.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpader.f deleted file mode 100644 index 7a649c346a..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpader.f +++ /dev/null @@ -1,57 +0,0 @@ - subroutine fpader(t,n,c,k1,x,l,d) -c subroutine fpader calculates the derivatives -c (j-1) -c d(j) = s (x) , j=1,2,...,k1 -c of a spline of order k1 at the point t(l)<=x= 10, t(4) < t(5) < ... < t(n-4) < t(n-3). -c .. -c ..scalar arguments.. - integer n - real*8 par -c ..array arguments.. - real*8 t(n),ress(n),resc(n) -c ..local scalars.. - integer i,ic,ipj,is,j,jj,jp1,jp4,k,li,lj,ll,nmj,nm3,nm7 - real*8 ak,beta,con1,con2,c1,c2,delta,eps,fac,f1,f2,f3,one,quart, - * sign,six,s1,s2,term -c ..local arrays.. - real*8 co(5),si(5),hs(5),hc(5),rs(3),rc(3) -c ..function references.. - real*8 cos,sin,abs -c .. -c initialization. - one = 0.1e+01 - six = 0.6e+01 - eps = 0.1e-07 - quart = 0.25e0 - con1 = 0.5e-01 - con2 = 0.12e+03 - nm3 = n-3 - nm7 = n-7 - if(par.ne.0.) term = six/par - beta = par*t(4) - co(1) = cos(beta) - si(1) = sin(beta) -c calculate the integrals ress(j) and resc(j), j=1,2,3 by setting up -c a divided difference table. - do 30 j=1,3 - jp1 = j+1 - jp4 = j+4 - beta = par*t(jp4) - co(jp1) = cos(beta) - si(jp1) = sin(beta) - call fpcsin(t(4),t(jp4),par,si(1),co(1),si(jp1),co(jp1), - * rs(j),rc(j)) - i = 5-j - hs(i) = 0. - hc(i) = 0. - do 10 jj=1,j - ipj = i+jj - hs(ipj) = rs(jj) - hc(ipj) = rc(jj) - 10 continue - do 20 jj=1,3 - if(i.lt.jj) i = jj - k = 5 - li = jp4 - do 20 ll=i,4 - lj = li-jj - fac = t(li)-t(lj) - hs(k) = (hs(k)-hs(k-1))/fac - hc(k) = (hc(k)-hc(k-1))/fac - k = k-1 - li = li-1 - 20 continue - ress(j) = hs(5)-hs(4) - resc(j) = hc(5)-hc(4) - 30 continue - if(nm7.lt.4) go to 160 -c calculate the integrals ress(j) and resc(j),j=4,5,...,n-7. - do 150 j=4,nm7 - jp4 = j+4 - beta = par*t(jp4) - co(5) = cos(beta) - si(5) = sin(beta) - delta = t(jp4)-t(j) -c the way of computing ress(j) and resc(j) depends on the value of -c beta = par*(t(j+4)-t(j)). - beta = delta*par - if(abs(beta).le.one) go to 60 -c if !beta! > 1 the integrals are calculated by setting up a divided -c difference table. - do 40 k=1,5 - hs(k) = si(k) - hc(k) = co(k) - 40 continue - do 50 jj=1,3 - k = 5 - li = jp4 - do 50 ll=jj,4 - lj = li-jj - fac = par*(t(li)-t(lj)) - hs(k) = (hs(k)-hs(k-1))/fac - hc(k) = (hc(k)-hc(k-1))/fac - k = k-1 - li = li-1 - 50 continue - s2 = (hs(5)-hs(4))*term - c2 = (hc(5)-hc(4))*term - go to 130 -c if !beta! <= 1 the integrals are calculated by evaluating a series -c expansion. - 60 f3 = 0. - do 70 i=1,4 - ipj = i+j - hs(i) = par*(t(ipj)-t(j)) - hc(i) = hs(i) - f3 = f3+hs(i) - 70 continue - f3 = f3*con1 - c1 = quart - s1 = f3 - if(abs(f3).le.eps) go to 120 - sign = one - fac = con2 - k = 5 - is = 0 - do 110 ic=1,20 - k = k+1 - ak = k - fac = fac*ak - f1 = 0. - f3 = 0. - do 80 i=1,4 - f1 = f1+hc(i) - f2 = f1*hs(i) - hc(i) = f2 - f3 = f3+f2 - 80 continue - f3 = f3*six/fac - if(is.eq.0) go to 90 - is = 0 - s1 = s1+f3*sign - go to 100 - 90 sign = -sign - is = 1 - c1 = c1+f3*sign - 100 if(abs(f3).le.eps) go to 120 - 110 continue - 120 s2 = delta*(co(1)*s1+si(1)*c1) - c2 = delta*(co(1)*c1-si(1)*s1) - 130 ress(j) = s2 - resc(j) = c2 - do 140 i=1,4 - co(i) = co(i+1) - si(i) = si(i+1) - 140 continue - 150 continue -c calculate the integrals ress(j) and resc(j),j=n-6,n-5,n-4 by setting -c up a divided difference table. - 160 do 190 j=1,3 - nmj = nm3-j - i = 5-j - call fpcsin(t(nm3),t(nmj),par,si(4),co(4),si(i-1),co(i-1), - * rs(j),rc(j)) - hs(i) = 0. - hc(i) = 0. - do 170 jj=1,j - ipj = i+jj - hc(ipj) = rc(jj) - hs(ipj) = rs(jj) - 170 continue - do 180 jj=1,3 - if(i.lt.jj) i = jj - k = 5 - li = nmj - do 180 ll=i,4 - lj = li+jj - fac = t(lj)-t(li) - hs(k) = (hs(k-1)-hs(k))/fac - hc(k) = (hc(k-1)-hc(k))/fac - k = k-1 - li = li+1 - 180 continue - ress(nmj) = hs(4)-hs(5) - resc(nmj) = hc(4)-hc(5) - 190 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpbisp.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpbisp.f deleted file mode 100644 index 269a31894c..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpbisp.f +++ /dev/null @@ -1,79 +0,0 @@ - subroutine fpbisp(tx,nx,ty,ny,c,kx,ky,x,mx,y,my,z,wx,wy,lx,ly) -c ..scalar arguments.. - integer nx,ny,kx,ky,mx,my -c ..array arguments.. - integer lx(mx),ly(my) - real*8 tx(nx),ty(ny),c((nx-kx-1)*(ny-ky-1)),x(mx),y(my),z(mx*my), - * wx(mx,kx+1),wy(my,ky+1) -c ..local scalars.. - integer kx1,ky1,l,l1,l2,m,nkx1,nky1 - real*8 arg,sp,tb,te -c ..local arrays.. - real*8 h(6) -c ..subroutine references.. -c fpbspl -c .. - kx1 = kx+1 - nkx1 = nx-kx1 - tb = tx(kx1) - te = tx(nkx1+1) - l = kx1 - l1 = l+1 - do 40 i=1,mx - arg = x(i) - if(arg.lt.tb) arg = tb - if(arg.gt.te) arg = te - 10 if(arg.lt.tx(l1) .or. l.eq.nkx1) go to 20 - l = l1 - l1 = l+1 - go to 10 - 20 call fpbspl(tx,nx,kx,arg,l,h) - lx(i) = l-kx1 - do 30 j=1,kx1 - wx(i,j) = h(j) - 30 continue - 40 continue - ky1 = ky+1 - nky1 = ny-ky1 - tb = ty(ky1) - te = ty(nky1+1) - l = ky1 - l1 = l+1 - do 80 i=1,my - arg = y(i) - if(arg.lt.tb) arg = tb - if(arg.gt.te) arg = te - 50 if(arg.lt.ty(l1) .or. l.eq.nky1) go to 60 - l = l1 - l1 = l+1 - go to 50 - 60 call fpbspl(ty,ny,ky,arg,l,h) - ly(i) = l-ky1 - do 70 j=1,ky1 - wy(i,j) = h(j) - 70 continue - 80 continue - m = 0 - do 130 i=1,mx - l = lx(i)*nky1 - do 90 i1=1,kx1 - h(i1) = wx(i,i1) - 90 continue - do 120 j=1,my - l1 = l+ly(j) - sp = 0. - do 110 i1=1,kx1 - l2 = l1 - do 100 j1=1,ky1 - l2 = l2+1 - sp = sp+c(l2)*h(i1)*wy(j,j1) - 100 continue - l1 = l1+nky1 - 110 continue - m = m+1 - z(m) = sp - 120 continue - 130 continue - return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpbspl.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpbspl.f deleted file mode 100644 index 6d155f2f48..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpbspl.f +++ /dev/null @@ -1,42 +0,0 @@ - subroutine fpbspl(t,n,k,x,l,h) -c subroutine fpbspl evaluates the (k+1) non-zero b-splines of -c degree k at t(l) <= x < t(l+1) using the stable recurrence -c relation of de boor and cox. -c Travis Oliphant 2007 -c changed so that weighting of 0 is used when knots with -c multiplicity are present. -c Also, notice that l+k <= n and 1 <= l+1-k -c or else the routine will be accessing memory outside t -c Thus it is imperative that that k <= l <= n-k but this -c is not checked. -c .. -c ..scalar arguments.. - real*8 x - integer n,k,l -c ..array arguments.. - real*8 t(n),h(20) -c ..local scalars.. - real*8 f,one - integer i,j,li,lj -c ..local arrays.. - real*8 hh(19) -c .. - one = 0.1d+01 - h(1) = one - do 20 j=1,k - do 10 i=1,j - hh(i) = h(i) - 10 continue - h(1) = 0.0d0 - do 20 i=1,j - li = l+i - lj = li-j - if (t(li).ne.t(lj)) goto 15 - h(i+1) = 0.0d0 - goto 20 - 15 f = hh(i)/(t(li)-t(lj)) - h(i) = h(i)+f*(t(li)-x) - h(i+1) = f*(x-t(lj)) - 20 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpchec.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpchec.f deleted file mode 100644 index ed60cc9ac9..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpchec.f +++ /dev/null @@ -1,62 +0,0 @@ - subroutine fpchec(x,m,t,n,k,ier) -c subroutine fpchec verifies the number and the position of the knots -c t(j),j=1,2,...,n of a spline of degree k, in relation to the number -c and the position of the data points x(i),i=1,2,...,m. if all of the -c following conditions are fulfilled, the error parameter ier is set -c to zero. if one of the conditions is violated ier is set to ten. -c 1) k+1 <= n-k-1 <= m -c 2) t(1) <= t(2) <= ... <= t(k+1) -c t(n-k) <= t(n-k+1) <= ... <= t(n) -c 3) t(k+1) < t(k+2) < ... < t(n-k) -c 4) t(k+1) <= x(i) <= t(n-k) -c 5) the conditions specified by schoenberg and whitney must hold -c for at least one subset of data points, i.e. there must be a -c subset of data points y(j) such that -c t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1 -c .. -c ..scalar arguments.. - integer m,n,k,ier -c ..array arguments.. - real*8 x(m),t(n) -c ..local scalars.. - integer i,j,k1,k2,l,nk1,nk2,nk3 - real*8 tj,tl -c .. - k1 = k+1 - k2 = k1+1 - nk1 = n-k1 - nk2 = nk1+1 - ier = 10 -c check condition no 1 - if(nk1.lt.k1 .or. nk1.gt.m) go to 80 -c check condition no 2 - j = n - do 20 i=1,k - if(t(i).gt.t(i+1)) go to 80 - if(t(j).lt.t(j-1)) go to 80 - j = j-1 - 20 continue -c check condition no 3 - do 30 i=k2,nk2 - if(t(i).le.t(i-1)) go to 80 - 30 continue -c check condition no 4 - if(x(1).lt.t(k1) .or. x(m).gt.t(nk2)) go to 80 -c check condition no 5 - if(x(1).ge.t(k2) .or. x(m).le.t(nk1)) go to 80 - i = 1 - l = k2 - nk3 = nk1-1 - if(nk3.lt.2) go to 70 - do 60 j=2,nk3 - tj = t(j) - l = l+1 - tl = t(l) - 40 i = i+1 - if(i.ge.m) go to 80 - if(x(i).le.tj) go to 40 - if(x(i).ge.tl) go to 80 - 60 continue - 70 ier = 0 - 80 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpched.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpched.f deleted file mode 100644 index 61c41aee40..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpched.f +++ /dev/null @@ -1,69 +0,0 @@ - subroutine fpched(x,m,t,n,k,ib,ie,ier) -c subroutine fpched verifies the number and the position of the knots -c t(j),j=1,2,...,n of a spline of degree k,with ib derative constraints -c at x(1) and ie constraints at x(m), in relation to the number and -c the position of the data points x(i),i=1,2,...,m. if all of the -c following conditions are fulfilled, the error parameter ier is set -c to zero. if one of the conditions is violated ier is set to ten. -c 1) k+1 <= n-k-1 <= m + max(0,ib-1) + max(0,ie-1) -c 2) t(1) <= t(2) <= ... <= t(k+1) -c t(n-k) <= t(n-k+1) <= ... <= t(n) -c 3) t(k+1) < t(k+2) < ... < t(n-k) -c 4) t(k+1) <= x(i) <= t(n-k) -c 5) the conditions specified by schoenberg and whitney must hold -c for at least one subset of data points, i.e. there must be a -c subset of data points y(j) such that -c t(j) < y(j) < t(j+k+1), j=1+ib1,2+ib1,...,n-k-1-ie1 -c with ib1 = max(0,ib-1), ie1 = max(0,ie-1) -c .. -c ..scalar arguments.. - integer m,n,k,ib,ie,ier -c ..array arguments.. - real*8 x(m),t(n) -c ..local scalars.. - integer i,ib1,ie1,j,jj,k1,k2,l,nk1,nk2,nk3 - real*8 tj,tl -c .. - k1 = k+1 - k2 = k1+1 - nk1 = n-k1 - nk2 = nk1+1 - ib1 = ib-1 - if(ib1.lt.0) ib1 = 0 - ie1 = ie-1 - if(ie1.lt.0) ie1 = 0 - ier = 10 -c check condition no 1 - if(nk1.lt.k1 .or. nk1.gt.(m+ib1+ie1)) go to 80 -c check condition no 2 - j = n - do 20 i=1,k - if(t(i).gt.t(i+1)) go to 80 - if(t(j).lt.t(j-1)) go to 80 - j = j-1 - 20 continue -c check condition no 3 - do 30 i=k2,nk2 - if(t(i).le.t(i-1)) go to 80 - 30 continue -c check condition no 4 - if(x(1).lt.t(k1) .or. x(m).gt.t(nk2)) go to 80 -c check condition no 5 - if(x(1).ge.t(k2) .or. x(m).le.t(nk1)) go to 80 - i = 1 - jj = 2+ib1 - l = jj+k - nk3 = nk1-1-ie1 - if(nk3.lt.jj) go to 70 - do 60 j=jj,nk3 - tj = t(j) - l = l+1 - tl = t(l) - 40 i = i+1 - if(i.ge.m) go to 80 - if(x(i).le.tj) go to 40 - if(x(i).ge.tl) go to 80 - 60 continue - 70 ier = 0 - 80 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpchep.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpchep.f deleted file mode 100644 index 3f75f1cc05..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpchep.f +++ /dev/null @@ -1,81 +0,0 @@ - subroutine fpchep(x,m,t,n,k,ier) -c subroutine fpchep verifies the number and the position of the knots -c t(j),j=1,2,...,n of a periodic spline of degree k, in relation to -c the number and the position of the data points x(i),i=1,2,...,m. -c if all of the following conditions are fulfilled, ier is set -c to zero. if one of the conditions is violated ier is set to ten. -c 1) k+1 <= n-k-1 <= m+k-1 -c 2) t(1) <= t(2) <= ... <= t(k+1) -c t(n-k) <= t(n-k+1) <= ... <= t(n) -c 3) t(k+1) < t(k+2) < ... < t(n-k) -c 4) t(k+1) <= x(i) <= t(n-k) -c 5) the conditions specified by schoenberg and whitney must hold -c for at least one subset of data points, i.e. there must be a -c subset of data points y(j) such that -c t(j) < y(j) < t(j+k+1), j=k+1,...,n-k-1 -c .. -c ..scalar arguments.. - integer m,n,k,ier -c ..array arguments.. - real*8 x(m),t(n) -c ..local scalars.. - integer i,i1,i2,j,j1,k1,k2,l,l1,l2,mm,m1,nk1,nk2 - real*8 per,tj,tl,xi -c .. - k1 = k+1 - k2 = k1+1 - nk1 = n-k1 - nk2 = nk1+1 - m1 = m-1 - ier = 10 -c check condition no 1 - if(nk1.lt.k1 .or. n.gt.m+2*k) go to 130 -c check condition no 2 - j = n - do 20 i=1,k - if(t(i).gt.t(i+1)) go to 130 - if(t(j).lt.t(j-1)) go to 130 - j = j-1 - 20 continue -c check condition no 3 - do 30 i=k2,nk2 - if(t(i).le.t(i-1)) go to 130 - 30 continue -c check condition no 4 - if(x(1).lt.t(k1) .or. x(m).gt.t(nk2)) go to 130 -c check condition no 5 - l1 = k1 - l2 = 1 - do 50 l=1,m - xi = x(l) - 40 if(xi.lt.t(l1+1) .or. l.eq.nk1) go to 50 - l1 = l1+1 - l2 = l2+1 - if(l2.gt.k1) go to 60 - go to 40 - 50 continue - l = m - 60 per = t(nk2)-t(k1) - do 120 i1=2,l - i = i1-1 - mm = i+m1 - do 110 j=k1,nk1 - tj = t(j) - j1 = j+k1 - tl = t(j1) - 70 i = i+1 - if(i.gt.mm) go to 120 - i2 = i-m1 - if (i2.le.0) go to 80 - go to 90 - 80 xi = x(i) - go to 100 - 90 xi = x(i2)+per - 100 if(xi.le.tj) go to 70 - if(xi.ge.tl) go to 120 - 110 continue - ier = 0 - go to 130 - 120 continue - 130 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpclos.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpclos.f deleted file mode 100644 index 0cfb875f01..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpclos.f +++ /dev/null @@ -1,714 +0,0 @@ - subroutine fpclos(iopt,idim,m,u,mx,x,w,k,s,nest,tol,maxit,k1,k2, - * n,t,nc,c,fp,fpint,z,a1,a2,b,g1,g2,q,nrdata,ier) -c .. -c ..scalar arguments.. - real*8 s,tol,fp - integer iopt,idim,m,mx,k,nest,maxit,k1,k2,n,nc,ier -c ..array arguments.. - real*8 u(m),x(mx),w(m),t(nest),c(nc),fpint(nest),z(nc),a1(nest,k1) - *, - * a2(nest,k),b(nest,k2),g1(nest,k2),g2(nest,k1),q(m,k1) - integer nrdata(nest) -c ..local scalars.. - real*8 acc,cos,d1,fac,fpart,fpms,fpold,fp0,f1,f2,f3,p,per,pinv,piv - *, - * p1,p2,p3,sin,store,term,ui,wi,rn,one,con1,con4,con9,half - integer i,ich1,ich3,ij,ik,it,iter,i1,i2,i3,j,jj,jk,jper,j1,j2,kk, - * kk1,k3,l,l0,l1,l5,mm,m1,new,nk1,nk2,nmax,nmin,nplus,npl1, - * nrint,n10,n11,n7,n8 -c ..local arrays.. - real*8 h(6),h1(7),h2(6),xi(10) -c ..function references.. - real*8 abs,fprati - integer max0,min0 -c ..subroutine references.. -c fpbacp,fpbspl,fpgivs,fpdisc,fpknot,fprota -c .. -c set constants - one = 0.1e+01 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 - half = 0.5e0 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position c -c ************************************************************** c -c given a set of knots we compute the least-squares closed curve c -c sinf(u). if the sum f(p=inf) <= s we accept the choice of knots. c -c if iopt=-1 sinf(u) is the requested curve c -c if iopt=0 or iopt=1 we check whether we can accept the knots: c -c if fp <=s we will continue with the current set of knots. c -c if fp > s we will increase the number of knots and compute the c -c corresponding least-squares curve until finally fp<=s. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots equals nmax = m+2*k. c -c if s > 0 and c -c iopt=0 we first compute the least-squares polynomial curve of c -c degree k; n = nmin = 2*k+2. since s(u) must be periodic we c -c find that s(u) reduces to a fixed point. c -c iopt=1 we start with the set of knots found at the last c -c call of the routine, except for the case that s > fp0; then c -c we compute directly the least-squares polynomial curve. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - m1 = m-1 - kk = k - kk1 = k1 - k3 = 3*k+1 - nmin = 2*k1 -c determine the length of the period of the splines. - per = u(m)-u(1) - if(iopt.lt.0) go to 50 -c calculation of acc, the absolute tolerance for the root of f(p)=s. - acc = tol*s -c determine nmax, the number of knots for periodic spline interpolation - nmax = m+2*k - if(s.gt.0. .or. nmax.eq.nmin) go to 30 -c if s=0, s(u) is an interpolating curve. - n = nmax -c test whether the required storage space exceeds the available one. - if(n.gt.nest) go to 620 -c find the position of the interior knots in case of interpolation. - 5 if((k/2)*2 .eq.k) go to 20 - do 10 i=2,m1 - j = i+k - t(j) = u(i) - 10 continue - if(s.gt.0.) go to 50 - kk = k-1 - kk1 = k - if(kk.gt.0) go to 50 - t(1) = t(m)-per - t(2) = u(1) - t(m+1) = u(m) - t(m+2) = t(3)+per - jj = 0 - do 15 i=1,m1 - j = i - do 12 j1=1,idim - jj = jj+1 - c(j) = x(jj) - j = j+n - 12 continue - 15 continue - jj = 1 - j = m - do 17 j1=1,idim - c(j) = c(jj) - j = j+n - jj = jj+n - 17 continue - fp = 0. - fpint(n) = fp0 - fpint(n-1) = 0. - nrdata(n) = 0 - go to 630 - 20 do 25 i=2,m1 - j = i+k - t(j) = (u(i)+u(i-1))*half - 25 continue - go to 50 -c if s > 0 our initial choice depends on the value of iopt. -c if iopt=0 or iopt=1 and s>=fp0, we start computing the least-squares -c polynomial curve. (i.e. a constant point). -c if iopt=1 and fp0>s we start computing the least-squares closed -c curve according the set of knots found at the last call of the -c routine. - 30 if(iopt.eq.0) go to 35 - if(n.eq.nmin) go to 35 - fp0 = fpint(n) - fpold = fpint(n-1) - nplus = nrdata(n) - if(fp0.gt.s) go to 50 -c the case that s(u) is a fixed point is treated separetely. -c fp0 denotes the corresponding sum of squared residuals. - 35 fp0 = 0. - d1 = 0. - do 37 j=1,idim - z(j) = 0. - 37 continue - jj = 0 - do 45 it=1,m1 - wi = w(it) - call fpgivs(wi,d1,cos,sin) - do 40 j=1,idim - jj = jj+1 - fac = wi*x(jj) - call fprota(cos,sin,fac,z(j)) - fp0 = fp0+fac**2 - 40 continue - 45 continue - do 47 j=1,idim - z(j) = z(j)/d1 - 47 continue -c test whether that fixed point is a solution of our problem. - fpms = fp0-s - if(fpms.lt.acc .or. nmax.eq.nmin) go to 640 - fpold = fp0 -c test whether the required storage space exceeds the available one. - if(n.ge.nest) go to 620 -c start computing the least-squares closed curve with one -c interior knot. - nplus = 1 - n = nmin+1 - mm = (m+1)/2 - t(k2) = u(mm) - nrdata(1) = mm-2 - nrdata(2) = m1-mm -c main loop for the different sets of knots. m is a save upper -c bound for the number of trials. - 50 do 340 iter=1,m -c find nrint, the number of knot intervals. - nrint = n-nmin+1 -c find the position of the additional knots which are needed for -c the b-spline representation of s(u). if we take -c t(k+1) = u(1), t(n-k) = u(m) -c t(k+1-j) = t(n-k-j) - per, j=1,2,...k -c t(n-k+j) = t(k+1+j) + per, j=1,2,...k -c then s(u) will be a smooth closed curve if the b-spline -c coefficients satisfy the following conditions -c c((i-1)*n+n7+j) = c((i-1)*n+j), j=1,...k,i=1,2,...,idim (**) -c with n7=n-2*k-1. - t(k1) = u(1) - nk1 = n-k1 - nk2 = nk1+1 - t(nk2) = u(m) - do 60 j=1,k - i1 = nk2+j - i2 = nk2-j - j1 = k1+j - j2 = k1-j - t(i1) = t(j1)+per - t(j2) = t(i2)-per - 60 continue -c compute the b-spline coefficients of the least-squares closed curve -c sinf(u). the observation matrix a is built up row by row while -c taking into account condition (**) and is reduced to triangular -c form by givens transformations . -c at the same time fp=f(p=inf) is computed. -c the n7 x n7 triangularised upper matrix a has the form -c ! a1 ' ! -c a = ! ' a2 ! -c ! 0 ' ! -c with a2 a n7 x k matrix and a1 a n10 x n10 upper triangular -c matrix of bandwith k+1 ( n10 = n7-k). -c initialization. - do 65 i=1,nc - z(i) = 0. - 65 continue - do 70 i=1,nk1 - do 70 j=1,kk1 - a1(i,j) = 0. - 70 continue - n7 = nk1-k - n10 = n7-kk - jper = 0 - fp = 0. - l = k1 - jj = 0 - do 290 it=1,m1 -c fetch the current data point u(it),x(it) - ui = u(it) - wi = w(it) - do 75 j=1,idim - jj = jj+1 - xi(j) = x(jj)*wi - 75 continue -c search for knot interval t(l) <= ui < t(l+1). - 80 if(ui.lt.t(l+1)) go to 85 - l = l+1 - go to 80 -c evaluate the (k+1) non-zero b-splines at ui and store them in q. - 85 call fpbspl(t,n,k,ui,l,h) - do 90 i=1,k1 - q(it,i) = h(i) - h(i) = h(i)*wi - 90 continue - l5 = l-k1 -c test whether the b-splines nj,k+1(u),j=1+n7,...nk1 are all zero at ui - if(l5.lt.n10) go to 285 - if(jper.ne.0) go to 160 -c initialize the matrix a2. - do 95 i=1,n7 - do 95 j=1,kk - a2(i,j) = 0. - 95 continue - jk = n10+1 - do 110 i=1,kk - ik = jk - do 100 j=1,kk1 - if(ik.le.0) go to 105 - a2(ik,i) = a1(ik,j) - ik = ik-1 - 100 continue - 105 jk = jk+1 - 110 continue - jper = 1 -c if one of the b-splines nj,k+1(u),j=n7+1,...nk1 is not zero at ui -c we take account of condition (**) for setting up the new row -c of the observation matrix a. this row is stored in the arrays h1 -c (the part with respect to a1) and h2 (the part with -c respect to a2). - 160 do 170 i=1,kk - h1(i) = 0. - h2(i) = 0. - 170 continue - h1(kk1) = 0. - j = l5-n10 - do 210 i=1,kk1 - j = j+1 - l0 = j - 180 l1 = l0-kk - if(l1.le.0) go to 200 - if(l1.le.n10) go to 190 - l0 = l1-n10 - go to 180 - 190 h1(l1) = h(i) - go to 210 - 200 h2(l0) = h2(l0)+h(i) - 210 continue -c rotate the new row of the observation matrix into triangle -c by givens transformations. - if(n10.le.0) go to 250 -c rotation with the rows 1,2,...n10 of matrix a. - do 240 j=1,n10 - piv = h1(1) - if(piv.ne.0.) go to 214 - do 212 i=1,kk - h1(i) = h1(i+1) - 212 continue - h1(kk1) = 0. - go to 240 -c calculate the parameters of the givens transformation. - 214 call fpgivs(piv,a1(j,1),cos,sin) -c transformation to the right hand side. - j1 = j - do 217 j2=1,idim - call fprota(cos,sin,xi(j2),z(j1)) - j1 = j1+n - 217 continue -c transformations to the left hand side with respect to a2. - do 220 i=1,kk - call fprota(cos,sin,h2(i),a2(j,i)) - 220 continue - if(j.eq.n10) go to 250 - i2 = min0(n10-j,kk) -c transformations to the left hand side with respect to a1. - do 230 i=1,i2 - i1 = i+1 - call fprota(cos,sin,h1(i1),a1(j,i1)) - h1(i) = h1(i1) - 230 continue - h1(i1) = 0. - 240 continue -c rotation with the rows n10+1,...n7 of matrix a. - 250 do 270 j=1,kk - ij = n10+j - if(ij.le.0) go to 270 - piv = h2(j) - if(piv.eq.0.) go to 270 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a2(ij,j),cos,sin) -c transformations to right hand side. - j1 = ij - do 255 j2=1,idim - call fprota(cos,sin,xi(j2),z(j1)) - j1 = j1+n - 255 continue - if(j.eq.kk) go to 280 - j1 = j+1 -c transformations to left hand side. - do 260 i=j1,kk - call fprota(cos,sin,h2(i),a2(ij,i)) - 260 continue - 270 continue -c add contribution of this row to the sum of squares of residual -c right hand sides. - 280 do 282 j2=1,idim - fp = fp+xi(j2)**2 - 282 continue - go to 290 -c rotation of the new row of the observation matrix into -c triangle in case the b-splines nj,k+1(u),j=n7+1,...n-k-1 are all zero -c at ui. - 285 j = l5 - do 140 i=1,kk1 - j = j+1 - piv = h(i) - if(piv.eq.0.) go to 140 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a1(j,1),cos,sin) -c transformations to right hand side. - j1 = j - do 125 j2=1,idim - call fprota(cos,sin,xi(j2),z(j1)) - j1 = j1+n - 125 continue - if(i.eq.kk1) go to 150 - i2 = 1 - i3 = i+1 -c transformations to left hand side. - do 130 i1=i3,kk1 - i2 = i2+1 - call fprota(cos,sin,h(i1),a1(j,i2)) - 130 continue - 140 continue -c add contribution of this row to the sum of squares of residual -c right hand sides. - 150 do 155 j2=1,idim - fp = fp+xi(j2)**2 - 155 continue - 290 continue - fpint(n) = fp0 - fpint(n-1) = fpold - nrdata(n) = nplus -c backward substitution to obtain the b-spline coefficients . - j1 = 1 - do 292 j2=1,idim - call fpbacp(a1,a2,z(j1),n7,kk,c(j1),kk1,nest) - j1 = j1+n - 292 continue -c calculate from condition (**) the remaining coefficients. - do 297 i=1,k - j1 = i - do 295 j=1,idim - j2 = j1+n7 - c(j2) = c(j1) - j1 = j1+n - 295 continue - 297 continue - if(iopt.lt.0) go to 660 -c test whether the approximation sinf(u) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 660 -c if f(p=inf) < s accept the choice of knots. - if(fpms.lt.0.) go to 350 -c if n=nmax, sinf(u) is an interpolating curve. - if(n.eq.nmax) go to 630 -c increase the number of knots. -c if n=nest we cannot increase the number of knots because of the -c storage capacity limitation. - if(n.eq.nest) go to 620 -c determine the number of knots nplus we are going to add. - npl1 = nplus*2 - rn = nplus - if(fpold-fp.gt.acc) npl1 = rn*fpms/(fpold-fp) - nplus = min0(nplus*2,max0(npl1,nplus/2,1)) - fpold = fp -c compute the sum of squared residuals for each knot interval -c t(j+k) <= ui <= t(j+k+1) and store it in fpint(j),j=1,2,...nrint. - fpart = 0. - i = 1 - l = k1 - jj = 0 - do 320 it=1,m1 - if(u(it).lt.t(l)) go to 300 - new = 1 - l = l+1 - 300 term = 0. - l0 = l-k2 - do 310 j2=1,idim - fac = 0. - j1 = l0 - do 305 j=1,k1 - j1 = j1+1 - fac = fac+c(j1)*q(it,j) - 305 continue - jj = jj+1 - term = term+(w(it)*(fac-x(jj)))**2 - l0 = l0+n - 310 continue - fpart = fpart+term - if(new.eq.0) go to 320 - if(l.gt.k2) go to 315 - fpint(nrint) = term - new = 0 - go to 320 - 315 store = term*half - fpint(i) = fpart-store - i = i+1 - fpart = store - new = 0 - 320 continue - fpint(nrint) = fpint(nrint)+fpart - do 330 l=1,nplus -c add a new knot - call fpknot(u,m,t,n,fpint,nrdata,nrint,nest,1) -c if n=nmax we locate the knots as for interpolation - if(n.eq.nmax) go to 5 -c test whether we cannot further increase the number of knots. - if(n.eq.nest) go to 340 - 330 continue -c restart the computations with the new set of knots. - 340 continue -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing closed curve sp(u). c -c ********************************************************** c -c we have determined the number of knots and their position. c -c we now compute the b-spline coefficients of the smoothing curve c -c sp(u). the observation matrix a is extended by the rows of matrix c -c b expressing that the kth derivative discontinuities of sp(u) at c -c the interior knots t(k+2),...t(n-k-1) must be zero. the corres- c -c ponding weights of these additional rows are set to 1/p. c -c iteratively we then have to determine the value of p such that f(p),c -c the sum of squared residuals be = s. we already know that the least-c -c squares polynomial curve corresponds to p=0, and that the least- c -c squares periodic spline curve corresponds to p=infinity. the c -c iteration process which is proposed here, makes use of rational c -c interpolation. since f(p) is a convex and strictly decreasing c -c function of p, it can be approximated by a rational function c -c r(p) = (u*p+v)/(p+w). three values of p(p1,p2,p3) with correspond- c -c ing values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used c -c to calculate the new value of p such that r(p)=s. convergence is c -c guaranteed by taking f1>0 and f3<0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c evaluate the discontinuity jump of the kth derivative of the -c b-splines at the knots t(l),l=k+2,...n-k-1 and store in b. - 350 call fpdisc(t,n,k2,b,nest) -c initial value for p. - p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - n11 = n10-1 - n8 = n7-1 - p = 0. - l = n7 - do 352 i=1,k - j = k+1-i - p = p+a2(l,j) - l = l-1 - if(l.eq.0) go to 356 - 352 continue - do 354 i=1,n10 - p = p+a1(i,1) - 354 continue - 356 rn = n7 - p = rn/p - ich1 = 0 - ich3 = 0 -c iteration process to find the root of f(p) = s. - do 595 iter=1,maxit -c form the matrix g as the matrix a extended by the rows of matrix b. -c the rows of matrix b with weight 1/p are rotated into -c the triangularised observation matrix a. -c after triangularisation our n7 x n7 matrix g takes the form -c ! g1 ' ! -c g = ! ' g2 ! -c ! 0 ' ! -c with g2 a n7 x (k+1) matrix and g1 a n11 x n11 upper triangular -c matrix of bandwidth k+2. ( n11 = n7-k-1) - pinv = one/p -c store matrix a into g - do 358 i=1,nc - c(i) = z(i) - 358 continue - do 360 i=1,n7 - g1(i,k1) = a1(i,k1) - g1(i,k2) = 0. - g2(i,1) = 0. - do 360 j=1,k - g1(i,j) = a1(i,j) - g2(i,j+1) = a2(i,j) - 360 continue - l = n10 - do 370 j=1,k1 - if(l.le.0) go to 375 - g2(l,1) = a1(l,j) - l = l-1 - 370 continue - 375 do 540 it=1,n8 -c fetch a new row of matrix b and store it in the arrays h1 (the part -c with respect to g1) and h2 (the part with respect to g2). - do 380 j=1,idim - xi(j) = 0. - 380 continue - do 385 i=1,k1 - h1(i) = 0. - h2(i) = 0. - 385 continue - h1(k2) = 0. - if(it.gt.n11) go to 420 - l = it - l0 = it - do 390 j=1,k2 - if(l0.eq.n10) go to 400 - h1(j) = b(it,j)*pinv - l0 = l0+1 - 390 continue - go to 470 - 400 l0 = 1 - do 410 l1=j,k2 - h2(l0) = b(it,l1)*pinv - l0 = l0+1 - 410 continue - go to 470 - 420 l = 1 - i = it-n10 - do 460 j=1,k2 - i = i+1 - l0 = i - 430 l1 = l0-k1 - if(l1.le.0) go to 450 - if(l1.le.n11) go to 440 - l0 = l1-n11 - go to 430 - 440 h1(l1) = b(it,j)*pinv - go to 460 - 450 h2(l0) = h2(l0)+b(it,j)*pinv - 460 continue - if(n11.le.0) go to 510 -c rotate this row into triangle by givens transformations -c rotation with the rows l,l+1,...n11. - 470 do 500 j=l,n11 - piv = h1(1) -c calculate the parameters of the givens transformation. - call fpgivs(piv,g1(j,1),cos,sin) -c transformation to right hand side. - j1 = j - do 475 j2=1,idim - call fprota(cos,sin,xi(j2),c(j1)) - j1 = j1+n - 475 continue -c transformation to the left hand side with respect to g2. - do 480 i=1,k1 - call fprota(cos,sin,h2(i),g2(j,i)) - 480 continue - if(j.eq.n11) go to 510 - i2 = min0(n11-j,k1) -c transformation to the left hand side with respect to g1. - do 490 i=1,i2 - i1 = i+1 - call fprota(cos,sin,h1(i1),g1(j,i1)) - h1(i) = h1(i1) - 490 continue - h1(i1) = 0. - 500 continue -c rotation with the rows n11+1,...n7 - 510 do 530 j=1,k1 - ij = n11+j - if(ij.le.0) go to 530 - piv = h2(j) -c calculate the parameters of the givens transformation - call fpgivs(piv,g2(ij,j),cos,sin) -c transformation to the right hand side. - j1 = ij - do 515 j2=1,idim - call fprota(cos,sin,xi(j2),c(j1)) - j1 = j1+n - 515 continue - if(j.eq.k1) go to 540 - j1 = j+1 -c transformation to the left hand side. - do 520 i=j1,k1 - call fprota(cos,sin,h2(i),g2(ij,i)) - 520 continue - 530 continue - 540 continue -c backward substitution to obtain the b-spline coefficients - j1 = 1 - do 542 j2=1,idim - call fpbacp(g1,g2,c(j1),n7,k1,c(j1),k2,nest) - j1 = j1+n - 542 continue -c calculate from condition (**) the remaining b-spline coefficients. - do 547 i=1,k - j1 = i - do 545 j=1,idim - j2 = j1+n7 - c(j2) = c(j1) - j1 = j1+n - 545 continue - 547 continue -c computation of f(p). - fp = 0. - l = k1 - jj = 0 - do 570 it=1,m1 - if(u(it).lt.t(l)) go to 550 - l = l+1 - 550 l0 = l-k2 - term = 0. - do 565 j2=1,idim - fac = 0. - j1 = l0 - do 560 j=1,k1 - j1 = j1+1 - fac = fac+c(j1)*q(it,j) - 560 continue - jj = jj+1 - term = term+(fac-x(jj))**2 - l0 = l0+n - 565 continue - fp = fp+term*w(it)**2 - 570 continue -c test whether the approximation sp(u) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 660 -c test whether the maximal number of iterations is reached. - if(iter.eq.maxit) go to 600 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 580 - if((f2-f3) .gt. acc) go to 575 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 +p2*con1 - go to 595 - 575 if(f2.lt.0.) ich3 = 1 - 580 if(ich1.ne.0) go to 590 - if((f1-f2) .gt. acc) go to 585 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 595 - if(p.ge.p3) p = p2*con1 +p3*con9 - go to 595 - 585 if(f2.gt.0.) ich1 = 1 -c test whether the iteration process proceeds as theoretically -c expected. - 590 if(f2.ge.f1 .or. f2.le.f3) go to 610 -c find the new value for p. - p = fprati(p1,f1,p2,f2,p3,f3) - 595 continue -c error codes and messages. - 600 ier = 3 - go to 660 - 610 ier = 2 - go to 660 - 620 ier = 1 - go to 660 - 630 ier = -1 - go to 660 - 640 ier = -2 -c the point (z(1),z(2),...,z(idim)) is a solution of our problem. -c a constant function is a spline of degree k with all b-spline -c coefficients equal to that constant. - do 650 i=1,k1 - rn = k1-i - t(i) = u(1)-rn*per - j = i+k1 - rn = i-1 - t(j) = u(m)+rn*per - 650 continue - n = nmin - j1 = 0 - do 658 j=1,idim - fac = z(j) - j2 = j1 - do 654 i=1,k1 - j2 = j2+1 - c(j2) = fac - 654 continue - j1 = j1+n - 658 continue - fp = fp0 - fpint(n) = fp0 - fpint(n-1) = 0. - nrdata(n) = 0 - 660 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpcoco.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpcoco.f deleted file mode 100644 index ed14c422ba..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpcoco.f +++ /dev/null @@ -1,168 +0,0 @@ - subroutine fpcoco(iopt,m,x,y,w,v,s,nest,maxtr,maxbin,n,t,c,sq,sx, - * bind,e,wrk,lwrk,iwrk,kwrk,ier) -c ..scalar arguments.. - real*8 s,sq - integer iopt,m,nest,maxtr,maxbin,n,lwrk,kwrk,ier -c ..array arguments.. - integer iwrk(kwrk) - real*8 x(m),y(m),w(m),v(m),t(nest),c(nest),sx(m),e(nest),wrk(lwrk) - * - logical bind(nest) -c ..local scalars.. - integer i,ia,ib,ic,iq,iu,iz,izz,i1,j,k,l,l1,m1,nmax,nr,n4,n6,n8, - * ji,jib,jjb,jl,jr,ju,mb,nm - real*8 sql,sqmax,term,tj,xi,half -c ..subroutine references.. -c fpcosp,fpbspl,fpadno,fpdeno,fpseno,fpfrno -c .. -c set constant - half = 0.5e0 -c determine the maximal admissible number of knots. - nmax = m+4 -c the initial choice of knots depends on the value of iopt. -c if iopt=0 the program starts with the minimal number of knots -c so that can be guarantied that the concavity/convexity constraints -c will be satisfied. -c if iopt = 1 the program will continue from the point on where she -c left at the foregoing call. - if(iopt.gt.0) go to 80 -c find the minimal number of knots. -c a knot is located at the data point x(i), i=2,3,...m-1 if -c 1) v(i) ^= 0 and -c 2) v(i)*v(i-1) <= 0 or v(i)*v(i+1) <= 0. - m1 = m-1 - n = 4 - do 20 i=2,m1 - if(v(i).eq.0. .or. (v(i)*v(i-1).gt.0. .and. - * v(i)*v(i+1).gt.0.)) go to 20 - n = n+1 -c test whether the required storage space exceeds the available one. - if(n+4.gt.nest) go to 200 - t(n) = x(i) - 20 continue -c find the position of the knots t(1),...t(4) and t(n-3),...t(n) which -c are needed for the b-spline representation of s(x). - do 30 i=1,4 - t(i) = x(1) - n = n+1 - t(n) = x(m) - 30 continue -c test whether the minimum number of knots exceeds the maximum number. - if(n.gt.nmax) go to 210 -c main loop for the different sets of knots. -c find corresponding values e(j) to the knots t(j+3),j=1,2,...n-6 -c e(j) will take the value -1,1, or 0 according to the requirement -c that s(x) must be locally convex or concave at t(j+3) or that the -c sign of s''(x) is unrestricted at that point. - 40 i= 1 - xi = x(1) - j = 4 - tj = t(4) - n6 = n-6 - do 70 l=1,n6 - 50 if(xi.eq.tj) go to 60 - i = i+1 - xi = x(i) - go to 50 - 60 e(l) = v(i) - j = j+1 - tj = t(j) - 70 continue -c we partition the working space - nm = n+maxbin - mb = maxbin+1 - ia = 1 - ib = ia+4*n - ic = ib+nm*maxbin - iz = ic+n - izz = iz+n - iu = izz+n - iq = iu+maxbin - ji = 1 - ju = ji+maxtr - jl = ju+maxtr - jr = jl+maxtr - jjb = jr+maxtr - jib = jjb+mb -c given the set of knots t(j),j=1,2,...n, find the least-squares cubic -c spline which satisfies the imposed concavity/convexity constraints. - call fpcosp(m,x,y,w,n,t,e,maxtr,maxbin,c,sq,sx,bind,nm,mb,wrk(ia), - * - * wrk(ib),wrk(ic),wrk(iz),wrk(izz),wrk(iu),wrk(iq),iwrk(ji), - * iwrk(ju),iwrk(jl),iwrk(jr),iwrk(jjb),iwrk(jib),ier) -c if sq <= s or in case of abnormal exit from fpcosp, control is -c repassed to the driver program. - if(sq.le.s .or. ier.gt.0) go to 300 -c calculate for each knot interval t(l-1) <= xi <= t(l) the -c sum((wi*(yi-s(xi)))**2). -c find the interval t(k-1) <= x <= t(k) for which this sum is maximal -c on the condition that this interval contains at least one interior -c data point x(nr) and that s(x) is not given there by a straight line. - 80 sqmax = 0. - sql = 0. - l = 5 - nr = 0 - i1 = 1 - n4 = n-4 - do 110 i=1,m - term = (w(i)*(sx(i)-y(i)))**2 - if(x(i).lt.t(l) .or. l.gt.n4) go to 100 - term = term*half - sql = sql+term - if(i-i1.le.1 .or. (bind(l-4).and.bind(l-3))) go to 90 - if(sql.le.sqmax) go to 90 - k = l - sqmax = sql - nr = i1+(i-i1)/2 - 90 l = l+1 - i1 = i - sql = 0. - 100 sql = sql+term - 110 continue - if(m-i1.le.1 .or. (bind(l-4).and.bind(l-3))) go to 120 - if(sql.le.sqmax) go to 120 - k = l - nr = i1+(m-i1)/2 -c if no such interval is found, control is repassed to the driver -c program (ier = -1). - 120 if(nr.eq.0) go to 190 -c if s(x) is given by the same straight line in two succeeding knot -c intervals t(l-1) <= x <= t(l) and t(l) <= x <= t(l+1),delete t(l) - n8 = n-8 - l1 = 0 - if(n8.le.0) go to 150 - do 140 i=1,n8 - if(.not. (bind(i).and.bind(i+1).and.bind(i+2))) go to 140 - l = i+4-l1 - if(k.gt.l) k = k-1 - n = n-1 - l1 = l1+1 - do 130 j=l,n - t(j) = t(j+1) - 130 continue - 140 continue -c test whether we cannot further increase the number of knots. - 150 if(n.eq.nmax) go to 180 - if(n.eq.nest) go to 170 -c locate an additional knot at the point x(nr). - j = n - do 160 i=k,n - t(j+1) = t(j) - j = j-1 - 160 continue - t(k) = x(nr) - n = n+1 -c restart the computations with the new set of knots. - go to 40 -c error codes and messages. - 170 ier = -3 - go to 300 - 180 ier = -2 - go to 300 - 190 ier = -1 - go to 300 - 200 ier = 4 - go to 300 - 210 ier = 5 - 300 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpcons.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpcons.f deleted file mode 100644 index 2fcf0d213a..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpcons.f +++ /dev/null @@ -1,442 +0,0 @@ - subroutine fpcons(iopt,idim,m,u,mx,x,w,ib,ie,k,s,nest,tol,maxit, - * k1,k2,n,t,nc,c,fp,fpint,z,a,b,g,q,nrdata,ier) -c .. -c ..scalar arguments.. - real*8 s,tol,fp - integer iopt,idim,m,mx,ib,ie,k,nest,maxit,k1,k2,n,nc,ier -c ..array arguments.. - real*8 u(m),x(mx),w(m),t(nest),c(nc),fpint(nest), - * z(nc),a(nest,k1),b(nest,k2),g(nest,k2),q(m,k1) - integer nrdata(nest) -c ..local scalars.. - real*8 acc,con1,con4,con9,cos,fac,fpart,fpms,fpold,fp0,f1,f2,f3, - * half,one,p,pinv,piv,p1,p2,p3,rn,sin,store,term,ui,wi - integer i,ich1,ich3,it,iter,i1,i2,i3,j,jb,je,jj,j1,j2,j3,kbe, - * l,li,lj,l0,mb,me,mm,new,nk1,nmax,nmin,nn,nplus,npl1,nrint,n8 -c ..local arrays.. - real*8 h(7),xi(10) -c ..function references - real*8 abs,fprati - integer max0,min0 -c ..subroutine references.. -c fpbacp,fpbspl,fpgivs,fpdisc,fpknot,fprota -c .. -c set constants - one = 0.1e+01 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 - half = 0.5e0 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position c -c ************************************************************** c -c given a set of knots we compute the least-squares curve sinf(u), c -c and the corresponding sum of squared residuals fp=f(p=inf). c -c if iopt=-1 sinf(u) is the requested curve. c -c if iopt=0 or iopt=1 we check whether we can accept the knots: c -c if fp <=s we will continue with the current set of knots. c -c if fp > s we will increase the number of knots and compute the c -c corresponding least-squares curve until finally fp<=s. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots equals nmax = m+k+1-max(0,ib-1)-max(0,ie-1) c -c if s > 0 and c -c iopt=0 we first compute the least-squares polynomial curve of c -c degree k; n = nmin = 2*k+2 c -c iopt=1 we start with the set of knots found at the last c -c call of the routine, except for the case that s > fp0; then c -c we compute directly the polynomial curve of degree k. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c determine nmin, the number of knots for polynomial approximation. - nmin = 2*k1 -c find which data points are to be concidered. - mb = 2 - jb = ib - if(ib.gt.0) go to 10 - mb = 1 - jb = 1 - 10 me = m-1 - je = ie - if(ie.gt.0) go to 20 - me = m - je = 1 - 20 if(iopt.lt.0) go to 60 -c calculation of acc, the absolute tolerance for the root of f(p)=s. - acc = tol*s -c determine nmax, the number of knots for spline interpolation. - kbe = k1-jb-je - mmin = kbe+2 - mm = m-mmin - nmax = nmin+mm - if(s.gt.0.) go to 40 -c if s=0, s(u) is an interpolating curve. -c test whether the required storage space exceeds the available one. - n = nmax - if(nmax.gt.nest) go to 420 -c find the position of the interior knots in case of interpolation. - if(mm.eq.0) go to 60 - 25 i = k2 - j = 3-jb+k/2 - do 30 l=1,mm - t(i) = u(j) - i = i+1 - j = j+1 - 30 continue - go to 60 -c if s>0 our initial choice of knots depends on the value of iopt. -c if iopt=0 or iopt=1 and s>=fp0, we start computing the least-squares -c polynomial curve which is a spline curve without interior knots. -c if iopt=1 and fp0>s we start computing the least squares spline curve -c according to the set of knots found at the last call of the routine. - 40 if(iopt.eq.0) go to 50 - if(n.eq.nmin) go to 50 - fp0 = fpint(n) - fpold = fpint(n-1) - nplus = nrdata(n) - if(fp0.gt.s) go to 60 - 50 n = nmin - fpold = 0. - nplus = 0 - nrdata(1) = m-2 -c main loop for the different sets of knots. m is a save upper bound -c for the number of trials. - 60 do 200 iter = 1,m - if(n.eq.nmin) ier = -2 -c find nrint, tne number of knot intervals. - nrint = n-nmin+1 -c find the position of the additional knots which are needed for -c the b-spline representation of s(u). - nk1 = n-k1 - i = n - do 70 j=1,k1 - t(j) = u(1) - t(i) = u(m) - i = i-1 - 70 continue -c compute the b-spline coefficients of the least-squares spline curve -c sinf(u). the observation matrix a is built up row by row and -c reduced to upper triangular form by givens transformations. -c at the same time fp=f(p=inf) is computed. - fp = 0. -c nn denotes the dimension of the splines - nn = nk1-ib-ie -c initialize the b-spline coefficients and the observation matrix a. - do 75 i=1,nc - z(i) = 0. - c(i) = 0. - 75 continue - if(me.lt.mb) go to 134 - if(nn.eq.0) go to 82 - do 80 i=1,nn - do 80 j=1,k1 - a(i,j) = 0. - 80 continue - 82 l = k1 - jj = (mb-1)*idim - do 130 it=mb,me -c fetch the current data point u(it),x(it). - ui = u(it) - wi = w(it) - do 84 j=1,idim - jj = jj+1 - xi(j) = x(jj)*wi - 84 continue -c search for knot interval t(l) <= ui < t(l+1). - 86 if(ui.lt.t(l+1) .or. l.eq.nk1) go to 90 - l = l+1 - go to 86 -c evaluate the (k+1) non-zero b-splines at ui and store them in q. - 90 call fpbspl(t,n,k,ui,l,h) - do 92 i=1,k1 - q(it,i) = h(i) - h(i) = h(i)*wi - 92 continue -c take into account that certain b-spline coefficients must be zero. - lj = k1 - j = nk1-l-ie - if(j.ge.0) go to 94 - lj = lj+j - 94 li = 1 - j = l-k1-ib - if(j.ge.0) go to 96 - li = li-j - j = 0 - 96 if(li.gt.lj) go to 120 -c rotate the new row of the observation matrix into triangle. - do 110 i=li,lj - j = j+1 - piv = h(i) - if(piv.eq.0.) go to 110 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(j,1),cos,sin) -c transformations to right hand side. - j1 = j - do 98 j2 =1,idim - call fprota(cos,sin,xi(j2),z(j1)) - j1 = j1+n - 98 continue - if(i.eq.lj) go to 120 - i2 = 1 - i3 = i+1 - do 100 i1 = i3,lj - i2 = i2+1 -c transformations to left hand side. - call fprota(cos,sin,h(i1),a(j,i2)) - 100 continue - 110 continue -c add contribution of this row to the sum of squares of residual -c right hand sides. - 120 do 125 j2=1,idim - fp = fp+xi(j2)**2 - 125 continue - 130 continue - if(ier.eq.(-2)) fp0 = fp - fpint(n) = fp0 - fpint(n-1) = fpold - nrdata(n) = nplus -c backward substitution to obtain the b-spline coefficients. - if(nn.eq.0) go to 134 - j1 = 1 - do 132 j2=1,idim - j3 = j1+ib - call fpback(a,z(j1),nn,k1,c(j3),nest) - j1 = j1+n - 132 continue -c test whether the approximation sinf(u) is an acceptable solution. - 134 if(iopt.lt.0) go to 440 - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c if f(p=inf) < s accept the choice of knots. - if(fpms.lt.0.) go to 250 -c if n = nmax, sinf(u) is an interpolating spline curve. - if(n.eq.nmax) go to 430 -c increase the number of knots. -c if n=nest we cannot increase the number of knots because of -c the storage capacity limitation. - if(n.eq.nest) go to 420 -c determine the number of knots nplus we are going to add. - if(ier.eq.0) go to 140 - nplus = 1 - ier = 0 - go to 150 - 140 npl1 = nplus*2 - rn = nplus - if(fpold-fp.gt.acc) npl1 = rn*fpms/(fpold-fp) - nplus = min0(nplus*2,max0(npl1,nplus/2,1)) - 150 fpold = fp -c compute the sum of squared residuals for each knot interval -c t(j+k) <= u(i) <= t(j+k+1) and store it in fpint(j),j=1,2,...nrint. - fpart = 0. - i = 1 - l = k2 - new = 0 - jj = (mb-1)*idim - do 180 it=mb,me - if(u(it).lt.t(l) .or. l.gt.nk1) go to 160 - new = 1 - l = l+1 - 160 term = 0. - l0 = l-k2 - do 175 j2=1,idim - fac = 0. - j1 = l0 - do 170 j=1,k1 - j1 = j1+1 - fac = fac+c(j1)*q(it,j) - 170 continue - jj = jj+1 - term = term+(w(it)*(fac-x(jj)))**2 - l0 = l0+n - 175 continue - fpart = fpart+term - if(new.eq.0) go to 180 - store = term*half - fpint(i) = fpart-store - i = i+1 - fpart = store - new = 0 - 180 continue - fpint(nrint) = fpart - do 190 l=1,nplus -c add a new knot. - call fpknot(u,m,t,n,fpint,nrdata,nrint,nest,1) -c if n=nmax we locate the knots as for interpolation - if(n.eq.nmax) go to 25 -c test whether we cannot further increase the number of knots. - if(n.eq.nest) go to 200 - 190 continue -c restart the computations with the new set of knots. - 200 continue -c test whether the least-squares kth degree polynomial curve is a -c solution of our approximation problem. - 250 if(ier.eq.(-2)) go to 440 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spline curve sp(u). c -c ********************************************************** c -c we have determined the number of knots and their position. c -c we now compute the b-spline coefficients of the smoothing curve c -c sp(u). the observation matrix a is extended by the rows of matrix c -c b expressing that the kth derivative discontinuities of sp(u) at c -c the interior knots t(k+2),...t(n-k-1) must be zero. the corres- c -c ponding weights of these additional rows are set to 1/p. c -c iteratively we then have to determine the value of p such that f(p),c -c the sum of squared residuals be = s. we already know that the least c -c squares kth degree polynomial curve corresponds to p=0, and that c -c the least-squares spline curve corresponds to p=infinity. the c -c iteration process which is proposed here, makes use of rational c -c interpolation. since f(p) is a convex and strictly decreasing c -c function of p, it can be approximated by a rational function c -c r(p) = (u*p+v)/(p+w). three values of p(p1,p2,p3) with correspond- c -c ing values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used c -c to calculate the new value of p such that r(p)=s. convergence is c -c guaranteed by taking f1>0 and f3<0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c evaluate the discontinuity jump of the kth derivative of the -c b-splines at the knots t(l),l=k+2,...n-k-1 and store in b. - call fpdisc(t,n,k2,b,nest) -c initial value for p. - p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - p = 0. - do 252 i=1,nn - p = p+a(i,1) - 252 continue - rn = nn - p = rn/p - ich1 = 0 - ich3 = 0 - n8 = n-nmin -c iteration process to find the root of f(p) = s. - do 360 iter=1,maxit -c the rows of matrix b with weight 1/p are rotated into the -c triangularised observation matrix a which is stored in g. - pinv = one/p - do 255 i=1,nc - c(i) = z(i) - 255 continue - do 260 i=1,nn - g(i,k2) = 0. - do 260 j=1,k1 - g(i,j) = a(i,j) - 260 continue - do 300 it=1,n8 -c the row of matrix b is rotated into triangle by givens transformation - do 264 i=1,k2 - h(i) = b(it,i)*pinv - 264 continue - do 268 j=1,idim - xi(j) = 0. - 268 continue -c take into account that certain b-spline coefficients must be zero. - if(it.gt.ib) go to 274 - j1 = ib-it+2 - j2 = 1 - do 270 i=j1,k2 - h(j2) = h(i) - j2 = j2+1 - 270 continue - do 272 i=j2,k2 - h(i) = 0. - 272 continue - 274 jj = max0(1,it-ib) - do 290 j=jj,nn - piv = h(1) -c calculate the parameters of the givens transformation. - call fpgivs(piv,g(j,1),cos,sin) -c transformations to right hand side. - j1 = j - do 277 j2=1,idim - call fprota(cos,sin,xi(j2),c(j1)) - j1 = j1+n - 277 continue - if(j.eq.nn) go to 300 - i2 = min0(nn-j,k1) - do 280 i=1,i2 -c transformations to left hand side. - i1 = i+1 - call fprota(cos,sin,h(i1),g(j,i1)) - h(i) = h(i1) - 280 continue - h(i2+1) = 0. - 290 continue - 300 continue -c backward substitution to obtain the b-spline coefficients. - j1 = 1 - do 308 j2=1,idim - j3 = j1+ib - call fpback(g,c(j1),nn,k2,c(j3),nest) - if(ib.eq.0) go to 306 - j3 = j1 - do 304 i=1,ib - c(j3) = 0. - j3 = j3+1 - 304 continue - 306 j1 =j1+n - 308 continue -c computation of f(p). - fp = 0. - l = k2 - jj = (mb-1)*idim - do 330 it=mb,me - if(u(it).lt.t(l) .or. l.gt.nk1) go to 310 - l = l+1 - 310 l0 = l-k2 - term = 0. - do 325 j2=1,idim - fac = 0. - j1 = l0 - do 320 j=1,k1 - j1 = j1+1 - fac = fac+c(j1)*q(it,j) - 320 continue - jj = jj+1 - term = term+(fac-x(jj))**2 - l0 = l0+n - 325 continue - fp = fp+term*w(it)**2 - 330 continue -c test whether the approximation sp(u) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c test whether the maximal number of iterations is reached. - if(iter.eq.maxit) go to 400 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 340 - if((f2-f3).gt.acc) go to 335 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p=p1*con9 + p2*con1 - go to 360 - 335 if(f2.lt.0.) ich3=1 - 340 if(ich1.ne.0) go to 350 - if((f1-f2).gt.acc) go to 345 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 360 - if(p.ge.p3) p = p2*con1 + p3*con9 - go to 360 - 345 if(f2.gt.0.) ich1=1 -c test whether the iteration process proceeds as theoretically -c expected. - 350 if(f2.ge.f1 .or. f2.le.f3) go to 410 -c find the new value for p. - p = fprati(p1,f1,p2,f2,p3,f3) - 360 continue -c error codes and messages. - 400 ier = 3 - go to 440 - 410 ier = 2 - go to 440 - 420 ier = 1 - go to 440 - 430 ier = -1 - 440 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpcosp.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpcosp.f deleted file mode 100644 index 7a65ed6a8f..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpcosp.f +++ /dev/null @@ -1,362 +0,0 @@ - subroutine fpcosp(m,x,y,w,n,t,e,maxtr,maxbin,c,sq,sx,bind,nm,mb,a, - * - * b,const,z,zz,u,q,info,up,left,right,jbind,ibind,ier) -c .. -c ..scalar arguments.. - real*8 sq - integer m,n,maxtr,maxbin,nm,mb,ier -c ..array arguments.. - real*8 x(m),y(m),w(m),t(n),e(n),c(n),sx(m),a(n,4),b(nm,maxbin), - * const(n),z(n),zz(n),u(maxbin),q(m,4) - integer info(maxtr),up(maxtr),left(maxtr),right(maxtr),jbind(mb), - * ibind(mb) - logical bind(n) -c ..local scalars.. - integer count,i,i1,j,j1,j2,j3,k,kdim,k1,k2,k3,k4,k5,k6, - * l,lp1,l1,l2,l3,merk,nbind,number,n1,n4,n6 - real*8 f,wi,xi -c ..local array.. - real*8 h(4) -c ..subroutine references.. -c fpbspl,fpadno,fpdeno,fpfrno,fpseno -c .. -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c if we use the b-spline representation of s(x) our approximation c -c problem results in a quadratic programming problem: c -c find the b-spline coefficients c(j),j=1,2,...n-4 such that c -c (1) sumi((wi*(yi-sumj(cj*nj(xi))))**2),i=1,2,...m is minimal c -c (2) sumj(cj*n''j(t(l+3)))*e(l) <= 0, l=1,2,...n-6. c -c to solve this problem we use the theil-van de panne procedure. c -c if the inequality constraints (2) are numbered from 1 to n-6, c -c this algorithm finds a subset of constraints ibind(1)..ibind(nbind) c -c such that the solution of the minimization problem (1) with these c -c constraints in equality form, satisfies all constraints. such a c -c feasible solution is optimal if the lagrange parameters associated c -c with that problem with equality constraints, are all positive. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c determine n6, the number of inequality constraints. - n6 = n-6 -c fix the parameters which determine these constraints. - do 10 i=1,n6 - const(i) = e(i)*(t(i+4)-t(i+1))/(t(i+5)-t(i+2)) - 10 continue -c initialize the triply linked tree which is used to find the subset -c of constraints ibind(1),...ibind(nbind). - count = 1 - info(1) = 0 - left(1) = 0 - right(1) = 0 - up(1) = 1 - merk = 1 -c set up the normal equations n'nc=n'y where n denotes the m x (n-4) -c observation matrix with elements ni,j = wi*nj(xi) and y is the -c column vector with elements yi*wi. -c from the properties of the b-splines nj(x),j=1,2,...n-4, it follows -c that n'n is a (n-4) x (n-4) positive definit bandmatrix of -c bandwidth 7. the matrices n'n and n'y are built up in a and z. - n4 = n-4 -c initialization - do 20 i=1,n4 - z(i) = 0. - do 20 j=1,4 - a(i,j) = 0. - 20 continue - l = 4 - lp1 = l+1 - do 70 i=1,m -c fetch the current row of the observation matrix. - xi = x(i) - wi = w(i)**2 -c search for knot interval t(l) <= xi < t(l+1) - 30 if(xi.lt.t(lp1) .or. l.eq.n4) go to 40 - l = lp1 - lp1 = l+1 - go to 30 -c evaluate the four non-zero cubic b-splines nj(xi),j=l-3,...l. - 40 call fpbspl(t,n,3,xi,l,h) -c store in q these values h(1),h(2),...h(4). - do 50 j=1,4 - q(i,j) = h(j) - 50 continue -c add the contribution of the current row of the observation matrix -c n to the normal equations. - l3 = l-3 - k1 = 0 - do 60 j1 = l3,l - k1 = k1+1 - f = h(k1) - z(j1) = z(j1)+f*wi*y(i) - k2 = k1 - j2 = 4 - do 60 j3 = j1,l - a(j3,j2) = a(j3,j2)+f*wi*h(k2) - k2 = k2+1 - j2 = j2-1 - 60 continue - 70 continue -c since n'n is a symmetric matrix it can be factorized as -c (3) n'n = (r1)'(d1)(r1) -c with d1 a diagonal matrix and r1 an (n-4) x (n-4) unit upper -c triangular matrix of bandwidth 4. the matrices r1 and d1 are built -c up in a. at the same time we solve the systems of equations -c (4) (r1)'(z2) = n'y -c (5) (d1) (z1) = (z2) -c the vectors z2 and z1 are kept in zz and z. - do 140 i=1,n4 - k1 = 1 - if(i.lt.4) k1 = 5-i - k2 = i-4+k1 - k3 = k2 - do 100 j=k1,4 - k4 = j-1 - k5 = 4-j+k1 - f = a(i,j) - if(k1.gt.k4) go to 90 - k6 = k2 - do 80 k=k1,k4 - f = f-a(i,k)*a(k3,k5)*a(k6,4) - k5 = k5+1 - k6 = k6+1 - 80 continue - 90 if(j.eq.4) go to 110 - a(i,j) = f/a(k3,4) - k3 = k3+1 - 100 continue - 110 a(i,4) = f - f = z(i) - if(i.eq.1) go to 130 - k4 = i - do 120 j=k1,3 - k = k1+3-j - k4 = k4-1 - f = f-a(i,k)*z(k4)*a(k4,4) - 120 continue - 130 z(i) = f/a(i,4) - zz(i) = f - 140 continue -c start computing the least-squares cubic spline without taking account -c of any constraint. - nbind = 0 - n1 = 1 - ibind(1) = 0 -c main loop for the least-squares problems with different subsets of -c the constraints (2) in equality form. the resulting b-spline coeff. -c c and lagrange parameters u are the solution of the system -c ! n'n b' ! ! c ! ! n'y ! -c (6) ! ! ! ! = ! ! -c ! b 0 ! ! u ! ! 0 ! -c z1 is stored into array c. - 150 do 160 i=1,n4 - c(i) = z(i) - 160 continue -c if there are no equality constraints, compute the coeff. c directly. - if(nbind.eq.0) go to 370 -c initialization - kdim = n4+nbind - do 170 i=1,nbind - do 170 j=1,kdim - b(j,i) = 0. - 170 continue -c matrix b is built up,expressing that the constraints nrs ibind(1),... -c ibind(nbind) must be satisfied in equality form. - do 180 i=1,nbind - l = ibind(i) - b(l,i) = e(l) - b(l+1,i) = -(e(l)+const(l)) - b(l+2,i) = const(l) - 180 continue -c find the matrix (b1) as the solution of the system of equations -c (7) (r1)'(d1)(b1) = b' -c (b1) is built up in the upper part of the array b(rows 1,...n-4). - do 220 k1=1,nbind - l = ibind(k1) - do 210 i=l,n4 - f = b(i,k1) - if(i.eq.1) go to 200 - k2 = 3 - if(i.lt.4) k2 = i-1 - do 190 k3=1,k2 - l1 = i-k3 - l2 = 4-k3 - f = f-b(l1,k1)*a(i,l2)*a(l1,4) - 190 continue - 200 b(i,k1) = f/a(i,4) - 210 continue - 220 continue -c factorization of the symmetric matrix -(b1)'(d1)(b1) -c (8) -(b1)'(d1)(b1) = (r2)'(d2)(r2) -c with (d2) a diagonal matrix and (r2) an nbind x nbind unit upper -c triangular matrix. the matrices r2 and d2 are built up in the lower -c part of the array b (rows n-3,n-2,...n-4+nbind). - do 270 i=1,nbind - i1 = i-1 - do 260 j=i,nbind - f = 0. - do 230 k=1,n4 - f = f+b(k,i)*b(k,j)*a(k,4) - 230 continue - k1 = n4+1 - if(i1.eq.0) go to 250 - do 240 k=1,i1 - f = f+b(k1,i)*b(k1,j)*b(k1,k) - k1 = k1+1 - 240 continue - 250 b(k1,j) = -f - if(j.eq.i) go to 260 - b(k1,j) = b(k1,j)/b(k1,i) - 260 continue - 270 continue -c according to (3),(7) and (8) the system of equations (6) becomes -c ! (r1)' 0 ! ! (d1) 0 ! ! (r1) (b1) ! ! c ! ! n'y ! -c (9) ! ! ! ! ! ! ! ! = ! ! -c ! (b1)' (r2)'! ! 0 (d2) ! ! 0 (r2) ! ! u ! ! 0 ! -c backward substitution to obtain the b-spline coefficients c(j),j=1,.. -c n-4 and the lagrange parameters u(j),j=1,2,...nbind. -c first step of the backward substitution: solve the system -c ! (r1)'(d1) 0 ! ! (c1) ! ! n'y ! -c (10) ! ! ! ! = ! ! -c ! (b1)'(d1) (r2)'(d2) ! ! (u1) ! ! 0 ! -c from (4) and (5) we know that this is equivalent to -c (11) (c1) = (z1) -c (12) (r2)'(d2)(u1) = -(b1)'(z2) - do 310 i=1,nbind - f = 0. - do 280 j=1,n4 - f = f+b(j,i)*zz(j) - 280 continue - i1 = i-1 - k1 = n4+1 - if(i1.eq.0) go to 300 - do 290 j=1,i1 - f = f+u(j)*b(k1,i)*b(k1,j) - k1 = k1+1 - 290 continue - 300 u(i) = -f/b(k1,i) - 310 continue -c second step of the backward substitution: solve the system -c ! (r1) (b1) ! ! c ! ! c1 ! -c (13) ! ! ! ! = ! ! -c ! 0 (r2) ! ! u ! ! u1 ! - k1 = nbind - k2 = kdim -c find the lagrange parameters u. - do 340 i=1,nbind - f = u(k1) - if(i.eq.1) go to 330 - k3 = k1+1 - do 320 j=k3,nbind - f = f-u(j)*b(k2,j) - 320 continue - 330 u(k1) = f - k1 = k1-1 - k2 = k2-1 - 340 continue -c find the b-spline coefficients c. - do 360 i=1,n4 - f = c(i) - do 350 j=1,nbind - f = f-u(j)*b(i,j) - 350 continue - c(i) = f - 360 continue - 370 k1 = n4 - do 390 i=2,n4 - k1 = k1-1 - f = c(k1) - k2 = 1 - if(i.lt.5) k2 = 5-i - k3 = k1 - l = 3 - do 380 j=k2,3 - k3 = k3+1 - f = f-a(k3,l)*c(k3) - l = l-1 - 380 continue - c(k1) = f - 390 continue -c test whether the solution of the least-squares problem with the -c constraints ibind(1),...ibind(nbind) in equality form, satisfies -c all of the constraints (2). - k = 1 -c number counts the number of violated inequality constraints. - number = 0 - do 440 j=1,n6 - l = ibind(k) - k = k+1 - if(j.eq.l) go to 440 - k = k-1 -c test whether constraint j is satisfied - f = e(j)*(c(j)-c(j+1))+const(j)*(c(j+2)-c(j+1)) - if(f.le.0.) go to 440 -c if constraint j is not satisfied, add a branch of length nbind+1 -c to the tree. the nodes of this branch contain in their information -c field the number of the constraints ibind(1),...ibind(nbind) and j, -c arranged in increasing order. - number = number+1 - k1 = k-1 - if(k1.eq.0) go to 410 - do 400 i=1,k1 - jbind(i) = ibind(i) - 400 continue - 410 jbind(k) = j - if(l.eq.0) go to 430 - do 420 i=k,nbind - jbind(i+1) = ibind(i) - 420 continue - 430 call fpadno(maxtr,up,left,right,info,count,merk,jbind,n1,ier) -c test whether the storage space which is required for the tree,exceeds -c the available storage space. - if(ier.ne.0) go to 560 - 440 continue -c test whether the solution of the least-squares problem with equality -c constraints is a feasible solution. - if(number.eq.0) go to 470 -c test whether there are still cases with nbind constraints in -c equality form to be considered. - 450 if(merk.gt.1) go to 460 - nbind = n1 -c test whether the number of knots where s''(x)=0 exceeds maxbin. - if(nbind.gt.maxbin) go to 550 - n1 = n1+1 - ibind(n1) = 0 -c search which cases with nbind constraints in equality form -c are going to be considered. - call fpdeno(maxtr,up,left,right,nbind,merk) -c test whether the quadratic programming problem has a solution. - if(merk.eq.1) go to 570 -c find a new case with nbind constraints in equality form. - 460 call fpseno(maxtr,up,left,right,info,merk,ibind,nbind) - go to 150 -c test whether the feasible solution is optimal. - 470 ier = 0 - do 480 i=1,n6 - bind(i) = .false. - 480 continue - if(nbind.eq.0) go to 500 - do 490 i=1,nbind - if(u(i).le.0.) go to 450 - j = ibind(i) - bind(j) = .true. - 490 continue -c evaluate s(x) at the data points x(i) and calculate the weighted -c sum of squared residual right hand sides sq. - 500 sq = 0. - l = 4 - lp1 = 5 - do 530 i=1,m - 510 if(x(i).lt.t(lp1) .or. l.eq.n4) go to 520 - l = lp1 - lp1 = l+1 - go to 510 - 520 sx(i) = c(l-3)*q(i,1)+c(l-2)*q(i,2)+c(l-1)*q(i,3)+c(l)*q(i,4) - sq = sq+(w(i)*(y(i)-sx(i)))**2 - 530 continue - go to 600 -c error codes and messages. - 550 ier = 1 - go to 600 - 560 ier = 2 - go to 600 - 570 ier = 3 - 600 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpcsin.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpcsin.f deleted file mode 100644 index 3b931cc8db..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpcsin.f +++ /dev/null @@ -1,56 +0,0 @@ - subroutine fpcsin(a,b,par,sia,coa,sib,cob,ress,resc) -c fpcsin calculates the integrals ress=integral((b-x)**3*sin(par*x)) -c and resc=integral((b-x)**3*cos(par*x)) over the interval (a,b), -c given sia=sin(par*a),coa=cos(par*a),sib=sin(par*b) and cob=cos(par*b) -c .. -c ..scalar arguments.. - real*8 a,b,par,sia,coa,sib,cob,ress,resc -c ..local scalars.. - integer i,j - real*8 ab,ab4,ai,alfa,beta,b2,b4,eps,fac,f1,f2,one,quart,six, - * three,two -c ..function references.. - real*8 abs -c .. - one = 0.1e+01 - two = 0.2e+01 - three = 0.3e+01 - six = 0.6e+01 - quart = 0.25e+0 - eps = 0.1e-09 - ab = b-a - ab4 = ab**4 - alfa = ab*par -c the way of calculating the integrals ress and resc depends on -c the value of alfa = (b-a)*par. - if(abs(alfa).le.one) go to 100 -c integration by parts. - beta = one/alfa - b2 = beta**2 - b4 = six*b2**2 - f1 = three*b2*(one-two*b2) - f2 = beta*(one-six*b2) - ress = ab4*(coa*f2+sia*f1+sib*b4) - resc = ab4*(coa*f1-sia*f2+cob*b4) - go to 400 -c ress and resc are found by evaluating a series expansion. - 100 fac = quart - f1 = fac - f2 = 0. - i = 4 - do 200 j=1,5 - i = i+1 - ai = i - fac = fac*alfa/ai - f2 = f2+fac - if(abs(fac).le.eps) go to 300 - i = i+1 - ai = i - fac = -fac*alfa/ai - f1 = f1+fac - if(abs(fac).le.eps) go to 300 - 200 continue - 300 ress = ab4*(coa*f2+sia*f1) - resc = ab4*(coa*f1-sia*f2) - 400 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpcurf.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpcurf.f deleted file mode 100644 index 7347d45325..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpcurf.f +++ /dev/null @@ -1,359 +0,0 @@ - subroutine fpcurf(iopt,x,y,w,m,xb,xe,k,s,nest,tol,maxit,k1,k2, - * n,t,c,fp,fpint,z,a,b,g,q,nrdata,ier) -c .. -c ..scalar arguments.. - real*8 xb,xe,s,tol,fp - integer iopt,m,k,nest,maxit,k1,k2,n,ier -c ..array arguments.. - real*8 x(m),y(m),w(m),t(nest),c(nest),fpint(nest), - * z(nest),a(nest,k1),b(nest,k2),g(nest,k2),q(m,k1) - integer nrdata(nest) -c ..local scalars.. - real*8 acc,con1,con4,con9,cos,half,fpart,fpms,fpold,fp0,f1,f2,f3, - * one,p,pinv,piv,p1,p2,p3,rn,sin,store,term,wi,xi,yi - integer i,ich1,ich3,it,iter,i1,i2,i3,j,k3,l,l0, - * mk1,new,nk1,nmax,nmin,nplus,npl1,nrint,n8 -c ..local arrays.. - real*8 h(7) -c ..function references - real*8 abs,fprati - integer max0,min0 -c ..subroutine references.. -c fpback,fpbspl,fpgivs,fpdisc,fpknot,fprota -c .. -c set constants - one = 0.1d+01 - con1 = 0.1d0 - con9 = 0.9d0 - con4 = 0.4d-01 - half = 0.5d0 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position c -c ************************************************************** c -c given a set of knots we compute the least-squares spline sinf(x), c -c and the corresponding sum of squared residuals fp=f(p=inf). c -c if iopt=-1 sinf(x) is the requested approximation. c -c if iopt=0 or iopt=1 we check whether we can accept the knots: c -c if fp <=s we will continue with the current set of knots. c -c if fp > s we will increase the number of knots and compute the c -c corresponding least-squares spline until finally fp<=s. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots equals nmax = m+k+1. c -c if s > 0 and c -c iopt=0 we first compute the least-squares polynomial of c -c degree k; n = nmin = 2*k+2 c -c iopt=1 we start with the set of knots found at the last c -c call of the routine, except for the case that s > fp0; then c -c we compute directly the least-squares polynomial of degree k. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c determine nmin, the number of knots for polynomial approximation. - nmin = 2*k1 - if(iopt.lt.0) go to 60 -c calculation of acc, the absolute tolerance for the root of f(p)=s. - acc = tol*s -c determine nmax, the number of knots for spline interpolation. - nmax = m+k1 - if(s.gt.0.0d0) go to 45 -c if s=0, s(x) is an interpolating spline. -c test whether the required storage space exceeds the available one. - n = nmax - if(nmax.gt.nest) go to 420 -c find the position of the interior knots in case of interpolation. - 10 mk1 = m-k1 - if(mk1.eq.0) go to 60 - k3 = k/2 - i = k2 - j = k3+2 - if(k3*2.eq.k) go to 30 - do 20 l=1,mk1 - t(i) = x(j) - i = i+1 - j = j+1 - 20 continue - go to 60 - 30 do 40 l=1,mk1 - t(i) = (x(j)+x(j-1))*half - i = i+1 - j = j+1 - 40 continue - go to 60 -c if s>0 our initial choice of knots depends on the value of iopt. -c if iopt=0 or iopt=1 and s>=fp0, we start computing the least-squares -c polynomial of degree k which is a spline without interior knots. -c if iopt=1 and fp0>s we start computing the least squares spline -c according to the set of knots found at the last call of the routine. - 45 if(iopt.eq.0) go to 50 - if(n.eq.nmin) go to 50 - fp0 = fpint(n) - fpold = fpint(n-1) - nplus = nrdata(n) - if(fp0.gt.s) go to 60 - 50 n = nmin - fpold = 0.0d0 - nplus = 0 - nrdata(1) = m-2 -c main loop for the different sets of knots. m is a save upper bound -c for the number of trials. - 60 do 200 iter = 1,m - if(n.eq.nmin) ier = -2 -c find nrint, tne number of knot intervals. - nrint = n-nmin+1 -c find the position of the additional knots which are needed for -c the b-spline representation of s(x). - nk1 = n-k1 - i = n - do 70 j=1,k1 - t(j) = xb - t(i) = xe - i = i-1 - 70 continue -c compute the b-spline coefficients of the least-squares spline -c sinf(x). the observation matrix a is built up row by row and -c reduced to upper triangular form by givens transformations. -c at the same time fp=f(p=inf) is computed. - fp = 0.0d0 -c initialize the observation matrix a. - do 80 i=1,nk1 - z(i) = 0.0d0 - do 80 j=1,k1 - a(i,j) = 0.0d0 - 80 continue - l = k1 - do 130 it=1,m -c fetch the current data point x(it),y(it). - xi = x(it) - wi = w(it) - yi = y(it)*wi -c search for knot interval t(l) <= xi < t(l+1). - 85 if(xi.lt.t(l+1) .or. l.eq.nk1) go to 90 - l = l+1 - go to 85 -c evaluate the (k+1) non-zero b-splines at xi and store them in q. - 90 call fpbspl(t,n,k,xi,l,h) - do 95 i=1,k1 - q(it,i) = h(i) - h(i) = h(i)*wi - 95 continue -c rotate the new row of the observation matrix into triangle. - j = l-k1 - do 110 i=1,k1 - j = j+1 - piv = h(i) - if(piv.eq.0.0d0) go to 110 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(j,1),cos,sin) -c transformations to right hand side. - call fprota(cos,sin,yi,z(j)) - if(i.eq.k1) go to 120 - i2 = 1 - i3 = i+1 - do 100 i1 = i3,k1 - i2 = i2+1 -c transformations to left hand side. - call fprota(cos,sin,h(i1),a(j,i2)) - 100 continue - 110 continue -c add contribution of this row to the sum of squares of residual -c right hand sides. - 120 fp = fp+yi*yi - 130 continue - if(ier.eq.(-2)) fp0 = fp - fpint(n) = fp0 - fpint(n-1) = fpold - nrdata(n) = nplus -c backward substitution to obtain the b-spline coefficients. - call fpback(a,z,nk1,k1,c,nest) -c test whether the approximation sinf(x) is an acceptable solution. - if(iopt.lt.0) go to 440 - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c if f(p=inf) < s accept the choice of knots. - if(fpms.lt.0.0d0) go to 250 -c if n = nmax, sinf(x) is an interpolating spline. - if(n.eq.nmax) go to 430 -c increase the number of knots. -c if n=nest we cannot increase the number of knots because of -c the storage capacity limitation. - if(n.eq.nest) go to 420 -c determine the number of knots nplus we are going to add. - if(ier.eq.0) go to 140 - nplus = 1 - ier = 0 - go to 150 - 140 npl1 = nplus*2 - rn = nplus - if(fpold-fp.gt.acc) npl1 = rn*fpms/(fpold-fp) - nplus = min0(nplus*2,max0(npl1,nplus/2,1)) - 150 fpold = fp -c compute the sum((w(i)*(y(i)-s(x(i))))**2) for each knot interval -c t(j+k) <= x(i) <= t(j+k+1) and store it in fpint(j),j=1,2,...nrint. - fpart = 0.0d0 - i = 1 - l = k2 - new = 0 - do 180 it=1,m - if(x(it).lt.t(l) .or. l.gt.nk1) go to 160 - new = 1 - l = l+1 - 160 term = 0.0d0 - l0 = l-k2 - do 170 j=1,k1 - l0 = l0+1 - term = term+c(l0)*q(it,j) - 170 continue - term = (w(it)*(term-y(it)))**2 - fpart = fpart+term - if(new.eq.0) go to 180 - store = term*half - fpint(i) = fpart-store - i = i+1 - fpart = store - new = 0 - 180 continue - fpint(nrint) = fpart - do 190 l=1,nplus -c add a new knot. - call fpknot(x,m,t,n,fpint,nrdata,nrint,nest,1) -c if n=nmax we locate the knots as for interpolation. - if(n.eq.nmax) go to 10 -c test whether we cannot further increase the number of knots. - if(n.eq.nest) go to 200 - 190 continue -c restart the computations with the new set of knots. - 200 continue -c test whether the least-squares kth degree polynomial is a solution -c of our approximation problem. - 250 if(ier.eq.(-2)) go to 440 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spline sp(x). c -c *************************************************** c -c we have determined the number of knots and their position. c -c we now compute the b-spline coefficients of the smoothing spline c -c sp(x). the observation matrix a is extended by the rows of matrix c -c b expressing that the kth derivative discontinuities of sp(x) at c -c the interior knots t(k+2),...t(n-k-1) must be zero. the corres- c -c ponding weights of these additional rows are set to 1/p. c -c iteratively we then have to determine the value of p such that c -c f(p)=sum((w(i)*(y(i)-sp(x(i))))**2) be = s. we already know that c -c the least-squares kth degree polynomial corresponds to p=0, and c -c that the least-squares spline corresponds to p=infinity. the c -c iteration process which is proposed here, makes use of rational c -c interpolation. since f(p) is a convex and strictly decreasing c -c function of p, it can be approximated by a rational function c -c r(p) = (u*p+v)/(p+w). three values of p(p1,p2,p3) with correspond- c -c ing values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used c -c to calculate the new value of p such that r(p)=s. convergence is c -c guaranteed by taking f1>0 and f3<0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c evaluate the discontinuity jump of the kth derivative of the -c b-splines at the knots t(l),l=k+2,...n-k-1 and store in b. - call fpdisc(t,n,k2,b,nest) -c initial value for p. - p1 = 0.0d0 - f1 = fp0-s - p3 = -one - f3 = fpms - p = 0. - do 255 i=1,nk1 - p = p+a(i,1) - 255 continue - rn = nk1 - p = rn/p - ich1 = 0 - ich3 = 0 - n8 = n-nmin -c iteration process to find the root of f(p) = s. - do 360 iter=1,maxit -c the rows of matrix b with weight 1/p are rotated into the -c triangularised observation matrix a which is stored in g. - pinv = one/p - do 260 i=1,nk1 - c(i) = z(i) - g(i,k2) = 0.0d0 - do 260 j=1,k1 - g(i,j) = a(i,j) - 260 continue - do 300 it=1,n8 -c the row of matrix b is rotated into triangle by givens transformation - do 270 i=1,k2 - h(i) = b(it,i)*pinv - 270 continue - yi = 0.0d0 - do 290 j=it,nk1 - piv = h(1) -c calculate the parameters of the givens transformation. - call fpgivs(piv,g(j,1),cos,sin) -c transformations to right hand side. - call fprota(cos,sin,yi,c(j)) - if(j.eq.nk1) go to 300 - i2 = k1 - if(j.gt.n8) i2 = nk1-j - do 280 i=1,i2 -c transformations to left hand side. - i1 = i+1 - call fprota(cos,sin,h(i1),g(j,i1)) - h(i) = h(i1) - 280 continue - h(i2+1) = 0.0d0 - 290 continue - 300 continue -c backward substitution to obtain the b-spline coefficients. - call fpback(g,c,nk1,k2,c,nest) -c computation of f(p). - fp = 0.0d0 - l = k2 - do 330 it=1,m - if(x(it).lt.t(l) .or. l.gt.nk1) go to 310 - l = l+1 - 310 l0 = l-k2 - term = 0.0d0 - do 320 j=1,k1 - l0 = l0+1 - term = term+c(l0)*q(it,j) - 320 continue - fp = fp+(w(it)*(term-y(it)))**2 - 330 continue -c test whether the approximation sp(x) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c test whether the maximal number of iterations is reached. - if(iter.eq.maxit) go to 400 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 340 - if((f2-f3).gt.acc) go to 335 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p=p1*con9 + p2*con1 - go to 360 - 335 if(f2.lt.0.0d0) ich3=1 - 340 if(ich1.ne.0) go to 350 - if((f1-f2).gt.acc) go to 345 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 360 - if(p.ge.p3) p = p2*con1 + p3*con9 - go to 360 - 345 if(f2.gt.0.0d0) ich1=1 -c test whether the iteration process proceeds as theoretically -c expected. - 350 if(f2.ge.f1 .or. f2.le.f3) go to 410 -c find the new value for p. - p = fprati(p1,f1,p2,f2,p3,f3) - 360 continue -c error codes and messages. - 400 ier = 3 - go to 440 - 410 ier = 2 - go to 440 - 420 ier = 1 - go to 440 - 430 ier = -1 - 440 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpcuro.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpcuro.f deleted file mode 100644 index 2fb871a703..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpcuro.f +++ /dev/null @@ -1,94 +0,0 @@ - subroutine fpcuro(a,b,c,d,x,n) -c subroutine fpcuro finds the real zeros of a cubic polynomial -c p(x) = a*x**3+b*x**2+c*x+d. -c -c calling sequence: -c call fpcuro(a,b,c,d,x,n) -c -c input parameters: -c a,b,c,d: real values, containing the coefficients of p(x). -c -c output parameters: -c x : real array,length 3, which contains the real zeros of p(x) -c n : integer, giving the number of real zeros of p(x). -c .. -c ..scalar arguments.. - real*8 a,b,c,d - integer n -c ..array argument.. - real*8 x(3) -c ..local scalars.. - integer i - real*8 a1,b1,c1,df,disc,d1,e3,f,four,half,ovfl,pi3,p3,q,r, - * step,tent,three,two,u,u1,u2,y -c ..function references.. - real*8 abs,max,datan,atan2,cos,sign,sqrt -c set constants - two = 0.2d+01 - three = 0.3d+01 - four = 0.4d+01 - ovfl =0.1d+05 - half = 0.5d+0 - tent = 0.1d+0 - e3 = tent/0.3d0 - pi3 = datan(0.1d+01)/0.75d0 - a1 = abs(a) - b1 = abs(b) - c1 = abs(c) - d1 = abs(d) -c test whether p(x) is a third degree polynomial. - if(max(b1,c1,d1).lt.a1*ovfl) go to 300 -c test whether p(x) is a second degree polynomial. - if(max(c1,d1).lt.b1*ovfl) go to 200 -c test whether p(x) is a first degree polynomial. - if(d1.lt.c1*ovfl) go to 100 -c p(x) is a constant function. - n = 0 - go to 800 -c p(x) is a first degree polynomial. - 100 n = 1 - x(1) = -d/c - go to 500 -c p(x) is a second degree polynomial. - 200 disc = c*c-four*b*d - n = 0 - if(disc.lt.0.) go to 800 - n = 2 - u = sqrt(disc) - b1 = b+b - x(1) = (-c+u)/b1 - x(2) = (-c-u)/b1 - go to 500 -c p(x) is a third degree polynomial. - 300 b1 = b/a*e3 - c1 = c/a - d1 = d/a - q = c1*e3-b1*b1 - r = b1*b1*b1+(d1-b1*c1)*half - disc = q*q*q+r*r - if(disc.gt.0.) go to 400 - u = sqrt(abs(q)) - if(r.lt.0.) u = -u - p3 = atan2(sqrt(-disc),abs(r))*e3 - u2 = u+u - n = 3 - x(1) = -u2*cos(p3)-b1 - x(2) = u2*cos(pi3-p3)-b1 - x(3) = u2*cos(pi3+p3)-b1 - go to 500 - 400 u = sqrt(disc) - u1 = -r+u - u2 = -r-u - n = 1 - x(1) = sign(abs(u1)**e3,u1)+sign(abs(u2)**e3,u2)-b1 -c apply a newton iteration to improve the accuracy of the roots. - 500 do 700 i=1,n - y = x(i) - f = ((a*y+b)*y+c)*y+d - df = (three*a*y+two*b)*y+c - step = 0. - if(abs(f).lt.abs(df)*tent) step = f/df - x(i) = y-step - 700 continue - 800 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpcyt1.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpcyt1.f deleted file mode 100644 index 391a07d8ff..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpcyt1.f +++ /dev/null @@ -1,53 +0,0 @@ - subroutine fpcyt1(a,n,nn) -c (l u)-decomposition of a cyclic tridiagonal matrix with the non-zero -c elements stored as follows -c -c | a(1,2) a(1,3) a(1,1) | -c | a(2,1) a(2,2) a(2,3) | -c | a(3,1) a(3,2) a(3,3) | -c | ............... | -c | a(n-1,1) a(n-1,2) a(n-1,3) | -c | a(n,3) a(n,1) a(n,2) | -c -c .. -c ..scalar arguments.. - integer n,nn -c ..array arguments.. - real*8 a(nn,6) -c ..local scalars.. - real*8 aa,beta,gamma,sum,teta,v,one - integer i,n1,n2 -c .. -c set constant - one = 1 - n2 = n-2 - beta = one/a(1,2) - gamma = a(n,3) - teta = a(1,1)*beta - a(1,4) = beta - a(1,5) = gamma - a(1,6) = teta - sum = gamma*teta - do 10 i=2,n2 - v = a(i-1,3)*beta - aa = a(i,1) - beta = one/(a(i,2)-aa*v) - gamma = -gamma*v - teta = -teta*aa*beta - a(i,4) = beta - a(i,5) = gamma - a(i,6) = teta - sum = sum+gamma*teta - 10 continue - n1 = n-1 - v = a(n2,3)*beta - aa = a(n1,1) - beta = one/(a(n1,2)-aa*v) - gamma = a(n,1)-gamma*v - teta = (a(n1,3)-teta*aa)*beta - a(n1,4) = beta - a(n1,5) = gamma - a(n1,6) = teta - a(n,4) = one/(a(n,2)-(sum+gamma*teta)) - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpcyt2.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpcyt2.f deleted file mode 100644 index 08a3f7b69b..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpcyt2.f +++ /dev/null @@ -1,32 +0,0 @@ - subroutine fpcyt2(a,n,b,c,nn) -c subroutine fpcyt2 solves a linear n x n system -c a * c = b -c where matrix a is a cyclic tridiagonal matrix, decomposed -c using subroutine fpsyt1. -c .. -c ..scalar arguments.. - integer n,nn -c ..array arguments.. - real*8 a(nn,6),b(n),c(n) -c ..local scalars.. - real*8 cc,sum - integer i,j,j1,n1 -c .. - c(1) = b(1)*a(1,4) - sum = c(1)*a(1,5) - n1 = n-1 - do 10 i=2,n1 - c(i) = (b(i)-a(i,1)*c(i-1))*a(i,4) - sum = sum+c(i)*a(i,5) - 10 continue - cc = (b(n)-sum)*a(n,4) - c(n) = cc - c(n1) = c(n1)-cc*a(n1,6) - j = n1 - do 20 i=3,n - j1 = j-1 - c(j1) = c(j1)-c(j)*a(j1,3)*a(j1,4)-cc*a(j1,6) - j = j1 - 20 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpdeno.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpdeno.f deleted file mode 100644 index 122803830b..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpdeno.f +++ /dev/null @@ -1,55 +0,0 @@ - subroutine fpdeno(maxtr,up,left,right,nbind,merk) -c subroutine fpdeno frees the nodes of all branches of a triply linked -c tree with length < nbind by putting to zero their up field. -c on exit the parameter merk points to the terminal node of the -c most left branch of length nbind or takes the value 1 if there -c is no such branch. -c .. -c ..scalar arguments.. - integer maxtr,nbind,merk -c ..array arguments.. - integer up(maxtr),left(maxtr),right(maxtr) -c ..local scalars .. - integer i,j,k,l,niveau,point -c .. - i = 1 - niveau = 0 - 10 point = i - i = left(point) - if(i.eq.0) go to 20 - niveau = niveau+1 - go to 10 - 20 if(niveau.eq.nbind) go to 70 - 30 i = right(point) - j = up(point) - up(point) = 0 - k = left(j) - if(point.ne.k) go to 50 - if(i.ne.0) go to 40 - niveau = niveau-1 - if(niveau.eq.0) go to 80 - point = j - go to 30 - 40 left(j) = i - go to 10 - 50 l = right(k) - if(point.eq.l) go to 60 - k = l - go to 50 - 60 right(k) = i - point = k - 70 i = right(point) - if(i.ne.0) go to 10 - i = up(point) - niveau = niveau-1 - if(niveau.eq.0) go to 80 - point = i - go to 70 - 80 k = 1 - l = left(k) - if(up(l).eq.0) return - 90 merk = k - k = left(k) - if(k.ne.0) go to 90 - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpdisc.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpdisc.f deleted file mode 100644 index 655efb783d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpdisc.f +++ /dev/null @@ -1,43 +0,0 @@ - subroutine fpdisc(t,n,k2,b,nest) -c subroutine fpdisc calculates the discontinuity jumps of the kth -c derivative of the b-splines of degree k at the knots t(k+2)..t(n-k-1) -c ..scalar arguments.. - integer n,k2,nest -c ..array arguments.. - real*8 t(n),b(nest,k2) -c ..local scalars.. - real*8 an,fac,prod - integer i,ik,j,jk,k,k1,l,lj,lk,lmk,lp,nk1,nrint -c ..local array.. - real*8 h(12) -c .. - k1 = k2-1 - k = k1-1 - nk1 = n-k1 - nrint = nk1-k - an = nrint - fac = an/(t(nk1+1)-t(k1)) - do 40 l=k2,nk1 - lmk = l-k1 - do 10 j=1,k1 - ik = j+k1 - lj = l+j - lk = lj-k2 - h(j) = t(l)-t(lk) - h(ik) = t(l)-t(lj) - 10 continue - lp = lmk - do 30 j=1,k2 - jk = j - prod = h(j) - do 20 i=1,k - jk = jk+1 - prod = prod*h(jk)*fac - 20 continue - lk = lp+k1 - b(lmk,j) = (t(lk)-t(lp))/prod - lp = lp+1 - 30 continue - 40 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpfrno.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpfrno.f deleted file mode 100644 index 259966cdd1..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpfrno.f +++ /dev/null @@ -1,69 +0,0 @@ - subroutine fpfrno(maxtr,up,left,right,info,point,merk,n1, - * count,ier) -c subroutine fpfrno collects the free nodes (up field zero) of the -c triply linked tree the information of which is kept in the arrays -c up,left,right and info. the maximal length of the branches of the -c tree is given by n1. if no free nodes are found, the error flag -c ier is set to 1. -c .. -c ..scalar arguments.. - integer maxtr,point,merk,n1,count,ier -c ..array arguments.. - integer up(maxtr),left(maxtr),right(maxtr),info(maxtr) -c ..local scalars - integer i,j,k,l,n,niveau -c .. - ier = 1 - if(n1.eq.2) go to 140 - niveau = 1 - count = 2 - 10 j = 0 - i = 1 - 20 if(j.eq.niveau) go to 30 - k = 0 - l = left(i) - if(l.eq.0) go to 110 - i = l - j = j+1 - go to 20 - 30 if (i.lt.count) go to 110 - if (i.eq.count) go to 100 - go to 40 - 40 if(up(count).eq.0) go to 50 - count = count+1 - go to 30 - 50 up(count) = up(i) - left(count) = left(i) - right(count) = right(i) - info(count) = info(i) - if(merk.eq.i) merk = count - if(point.eq.i) point = count - if(k.eq.0) go to 60 - right(k) = count - go to 70 - 60 n = up(i) - left(n) = count - 70 l = left(i) - 80 if(l.eq.0) go to 90 - up(l) = count - l = right(l) - go to 80 - 90 up(i) = 0 - i = count - 100 count = count+1 - 110 l = right(i) - k = i - if(l.eq.0) go to 120 - i = l - go to 20 - 120 l = up(i) - j = j-1 - if(j.eq.0) go to 130 - i = l - go to 110 - 130 niveau = niveau+1 - if(niveau.le.n1) go to 10 - if(count.gt.maxtr) go to 140 - ier = 0 - 140 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpgivs.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpgivs.f deleted file mode 100644 index 388851446a..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpgivs.f +++ /dev/null @@ -1,20 +0,0 @@ - subroutine fpgivs(piv,ww,cos,sin) -c subroutine fpgivs calculates the parameters of a givens -c transformation . -c .. -c ..scalar arguments.. - real*8 piv,ww,cos,sin -c ..local scalars.. - real*8 dd,one,store -c ..function references.. - real*8 abs,sqrt -c .. - one = 0.1e+01 - store = abs(piv) - if(store.ge.ww) dd = store*sqrt(one+(ww/piv)**2) - if(store.lt.ww) dd = ww*sqrt(one+(piv/ww)**2) - cos = ww/dd - sin = piv/dd - ww = dd - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpgrdi.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpgrdi.f deleted file mode 100644 index 04ea251a30..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpgrdi.f +++ /dev/null @@ -1,600 +0,0 @@ - subroutine fpgrdi(ifsu,ifsv,ifbu,ifbv,iback,u,mu,v,mv,z,mz,dz, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sq,fp,fpu,fpv,mm,mvnu,spu,spv, - * right,q,au,av1,av2,bu,bv,aa,bb,cc,cosi,nru,nrv) -c .. -c ..scalar arguments.. - real*8 p,sq,fp - integer ifsu,ifsv,ifbu,ifbv,iback,mu,mv,mz,iop0,iop1,nu,nv,nc, - * mm,mvnu -c ..array arguments.. - real*8 u(mu),v(mv),z(mz),dz(3),tu(nu),tv(nv),c(nc),fpu(nu),fpv(nv) - *, - * spu(mu,4),spv(mv,4),right(mm),q(mvnu),au(nu,5),av1(nv,6), - * av2(nv,4),aa(2,mv),bb(2,nv),cc(nv),cosi(2,nv),bu(nu,5),bv(nv,5) - integer nru(mu),nrv(mv) -c ..local scalars.. - real*8 arg,co,dz1,dz2,dz3,fac,fac0,pinv,piv,si,term,one,three,half - * - integer i,ic,ii,ij,ik,iq,irot,it,iz,i0,i1,i2,i3,j,jj,jk,jper, - * j0,j1,k,k1,k2,l,l0,l1,l2,mvv,ncof,nrold,nroldu,nroldv,number, - * numu,numu1,numv,numv1,nuu,nu4,nu7,nu8,nu9,nv11,nv4,nv7,nv8,n1 -c ..local arrays.. - real*8 h(5),h1(5),h2(4) -c ..function references.. - integer min0 - real*8 cos,sin -c ..subroutine references.. -c fpback,fpbspl,fpgivs,fpcyt1,fpcyt2,fpdisc,fpbacp,fprota -c .. -c let -c | (spu) | | (spv) | -c (au) = | ---------- | (av) = | ---------- | -c | (1/p) (bu) | | (1/p) (bv) | -c -c | z ' 0 | -c q = | ------ | -c | 0 ' 0 | -c -c with c : the (nu-4) x (nv-4) matrix which contains the b-spline -c coefficients. -c z : the mu x mv matrix which contains the function values. -c spu,spv: the mu x (nu-4), resp. mv x (nv-4) observation matrices -c according to the least-squares problems in the u-,resp. -c v-direction. -c bu,bv : the (nu-7) x (nu-4),resp. (nv-7) x (nv-4) matrices -c containing the discontinuity jumps of the derivatives -c of the b-splines in the u-,resp.v-variable at the knots -c the b-spline coefficients of the smoothing spline are then calculated -c as the least-squares solution of the following over-determined linear -c system of equations -c -c (1) (av) c (au)' = q -c -c subject to the constraints -c -c (2) c(i,nv-3+j) = c(i,j), j=1,2,3 ; i=1,2,...,nu-4 -c -c (3) if iop0 = 0 c(1,j) = dz(1) -c iop0 = 1 c(1,j) = dz(1) -c c(2,j) = dz(1)+(dz(2)*cosi(1,j)+dz(3)*cosi(2,j))* -c tu(5)/3. = cc(j) , j=1,2,...nv-4 -c -c (4) if iop1 = 1 c(nu-4,j) = 0, j=1,2,...,nv-4. -c -c set constants - one = 1 - three = 3 - half = 0.5 -c initialization - nu4 = nu-4 - nu7 = nu-7 - nu8 = nu-8 - nu9 = nu-9 - nv4 = nv-4 - nv7 = nv-7 - nv8 = nv-8 - nv11 = nv-11 - nuu = nu4-iop0-iop1-1 - if(p.gt.0.) pinv = one/p -c it depends on the value of the flags ifsu,ifsv,ifbu,ifbv and iop0 and -c on the value of p whether the matrices (spu), (spv), (bu), (bv) and -c (cosi) still must be determined. - if(ifsu.ne.0) go to 30 -c calculate the non-zero elements of the matrix (spu) which is the ob- -c servation matrix according to the least-squares spline approximation -c problem in the u-direction. - l = 4 - l1 = 5 - number = 0 - do 25 it=1,mu - arg = u(it) - 10 if(arg.lt.tu(l1) .or. l.eq.nu4) go to 15 - l = l1 - l1 = l+1 - number = number+1 - go to 10 - 15 call fpbspl(tu,nu,3,arg,l,h) - do 20 i=1,4 - spu(it,i) = h(i) - 20 continue - nru(it) = number - 25 continue - ifsu = 1 -c calculate the non-zero elements of the matrix (spv) which is the ob- -c servation matrix according to the least-squares spline approximation -c problem in the v-direction. - 30 if(ifsv.ne.0) go to 85 - l = 4 - l1 = 5 - number = 0 - do 50 it=1,mv - arg = v(it) - 35 if(arg.lt.tv(l1) .or. l.eq.nv4) go to 40 - l = l1 - l1 = l+1 - number = number+1 - go to 35 - 40 call fpbspl(tv,nv,3,arg,l,h) - do 45 i=1,4 - spv(it,i) = h(i) - 45 continue - nrv(it) = number - 50 continue - ifsv = 1 - if(iop0.eq.0) go to 85 -c calculate the coefficients of the interpolating splines for cos(v) -c and sin(v). - do 55 i=1,nv4 - cosi(1,i) = 0. - cosi(2,i) = 0. - 55 continue - if(nv7.lt.4) go to 85 - do 65 i=1,nv7 - l = i+3 - arg = tv(l) - call fpbspl(tv,nv,3,arg,l,h) - do 60 j=1,3 - av1(i,j) = h(j) - 60 continue - cosi(1,i) = cos(arg) - cosi(2,i) = sin(arg) - 65 continue - call fpcyt1(av1,nv7,nv) - do 80 j=1,2 - do 70 i=1,nv7 - right(i) = cosi(j,i) - 70 continue - call fpcyt2(av1,nv7,right,right,nv) - do 75 i=1,nv7 - cosi(j,i+1) = right(i) - 75 continue - cosi(j,1) = cosi(j,nv7+1) - cosi(j,nv7+2) = cosi(j,2) - cosi(j,nv4) = cosi(j,3) - 80 continue - 85 if(p.le.0.) go to 150 -c calculate the non-zero elements of the matrix (bu). - if(ifbu.ne.0 .or. nu8.eq.0) go to 90 - call fpdisc(tu,nu,5,bu,nu) - ifbu = 1 -c calculate the non-zero elements of the matrix (bv). - 90 if(ifbv.ne.0 .or. nv8.eq.0) go to 150 - call fpdisc(tv,nv,5,bv,nv) - ifbv = 1 -c substituting (2),(3) and (4) into (1), we obtain the overdetermined -c system -c (5) (avv) (cr) (auu)' = (qq) -c from which the nuu*nv7 remaining coefficients -c c(i,j) , i=2+iop0,3+iop0,...,nu-4-iop1 ; j=1,2,...,nv-7 , -c the elements of (cr), are then determined in the least-squares sense. -c simultaneously, we compute the resulting sum of squared residuals sq. - 150 dz1 = dz(1) - do 155 i=1,mv - aa(1,i) = dz1 - 155 continue - if(nv8.eq.0 .or. p.le.0.) go to 165 - do 160 i=1,nv8 - bb(1,i) = 0. - 160 continue - 165 mvv = mv - if(iop0.eq.0) go to 220 - fac = tu(5)/three - dz2 = dz(2)*fac - dz3 = dz(3)*fac - do 170 i=1,nv4 - cc(i) = dz1+dz2*cosi(1,i)+dz3*cosi(2,i) - 170 continue - do 190 i=1,mv - number = nrv(i) - fac = 0. - do 180 j=1,4 - number = number+1 - fac = fac+cc(number)*spv(i,j) - 180 continue - aa(2,i) = fac - 190 continue - if(nv8.eq.0 .or. p.le.0.) go to 220 - do 210 i=1,nv8 - number = i - fac = 0. - do 200 j=1,5 - fac = fac+cc(number)*bv(i,j) - number = number+1 - 200 continue - bb(2,i) = fac*pinv - 210 continue - mvv = mvv+nv8 -c we first determine the matrices (auu) and (qq). then we reduce the -c matrix (auu) to upper triangular form (ru) using givens rotations. -c we apply the same transformations to the rows of matrix qq to obtain -c the (mv+nv8) x nuu matrix g. -c we store matrix (ru) into au and g into q. - 220 l = mvv*nuu -c initialization. - sq = 0. - do 230 i=1,l - q(i) = 0. - 230 continue - do 240 i=1,nuu - do 240 j=1,5 - au(i,j) = 0. - 240 continue - l = 0 - nrold = 0 - n1 = nrold+1 - do 420 it=1,mu - number = nru(it) -c find the appropriate column of q. - 250 do 260 j=1,mvv - right(j) = 0. - 260 continue - if(nrold.eq.number) go to 280 - if(p.le.0.) go to 410 -c fetch a new row of matrix (bu). - do 270 j=1,5 - h(j) = bu(n1,j)*pinv - 270 continue - i0 = 1 - i1 = 5 - go to 310 -c fetch a new row of matrix (spu). - 280 do 290 j=1,4 - h(j) = spu(it,j) - 290 continue -c find the appropriate column of q. - do 300 j=1,mv - l = l+1 - right(j) = z(l) - 300 continue - i0 = 1 - i1 = 4 - 310 if(nu7-number .eq. iop1) i1 = i1-1 - j0 = n1 -c take into account that we eliminate the constraints (3) - 320 if(j0-1.gt.iop0) go to 360 - fac0 = h(i0) - do 330 j=1,mv - right(j) = right(j)-fac0*aa(j0,j) - 330 continue - if(mv.eq.mvv) go to 350 - j = mv - do 340 jj=1,nv8 - j = j+1 - right(j) = right(j)-fac0*bb(j0,jj) - 340 continue - 350 j0 = j0+1 - i0 = i0+1 - go to 320 - 360 irot = nrold-iop0-1 - if(irot.lt.0) irot = 0 -c rotate the new row of matrix (auu) into triangle. - do 390 i=i0,i1 - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 390 -c calculate the parameters of the givens transformation. - call fpgivs(piv,au(irot,1),co,si) -c apply that transformation to the rows of matrix (qq). - iq = (irot-1)*mvv - do 370 j=1,mvv - iq = iq+1 - call fprota(co,si,right(j),q(iq)) - 370 continue -c apply that transformation to the columns of (auu). - if(i.eq.i1) go to 390 - i2 = 1 - i3 = i+1 - do 380 j=i3,i1 - i2 = i2+1 - call fprota(co,si,h(j),au(irot,i2)) - 380 continue - 390 continue -c we update the sum of squared residuals - do 395 j=1,mvv - sq = sq+right(j)**2 - 395 continue - 400 if(nrold.eq.number) go to 420 - 410 nrold = n1 - n1 = n1+1 - go to 250 - 420 continue -c we determine the matrix (avv) and then we reduce her to -c upper triangular form (rv) using givens rotations. -c we apply the same transformations to the columns of matrix -c g to obtain the (nv-7) x (nu-5-iop0-iop1) matrix h. -c we store matrix (rv) into av1 and av2, h into c. -c the nv7 x nv7 upper triangular matrix (rv) has the form -c | av1 ' | -c (rv) = | ' av2 | -c | 0 ' | -c with (av2) a nv7 x 4 matrix and (av1) a nv11 x nv11 upper -c triangular matrix of bandwidth 5. - ncof = nuu*nv7 -c initialization. - do 430 i=1,ncof - c(i) = 0. - 430 continue - do 440 i=1,nv4 - av1(i,5) = 0. - do 440 j=1,4 - av1(i,j) = 0. - av2(i,j) = 0. - 440 continue - jper = 0 - nrold = 0 - do 770 it=1,mv - number = nrv(it) - 450 if(nrold.eq.number) go to 480 - if(p.le.0.) go to 760 -c fetch a new row of matrix (bv). - n1 = nrold+1 - do 460 j=1,5 - h(j) = bv(n1,j)*pinv - 460 continue -c find the appropiate row of g. - do 465 j=1,nuu - right(j) = 0. - 465 continue - if(mv.eq.mvv) go to 510 - l = mv+n1 - do 470 j=1,nuu - right(j) = q(l) - l = l+mvv - 470 continue - go to 510 -c fetch a new row of matrix (spv) - 480 h(5) = 0. - do 490 j=1,4 - h(j) = spv(it,j) - 490 continue -c find the appropiate row of g. - l = it - do 500 j=1,nuu - right(j) = q(l) - l = l+mvv - 500 continue -c test whether there are non-zero values in the new row of (avv) -c corresponding to the b-splines n(j,v),j=nv7+1,...,nv4. - 510 if(nrold.lt.nv11) go to 710 - if(jper.ne.0) go to 550 -c initialize the matrix (av2). - jk = nv11+1 - do 540 i=1,4 - ik = jk - do 520 j=1,5 - if(ik.le.0) go to 530 - av2(ik,i) = av1(ik,j) - ik = ik-1 - 520 continue - 530 jk = jk+1 - 540 continue - jper = 1 -c if one of the non-zero elements of the new row corresponds to one of -c the b-splines n(j;v),j=nv7+1,...,nv4, we take account of condition -c (2) for setting up this row of (avv). the row is stored in h1( the -c part with respect to av1) and h2 (the part with respect to av2). - 550 do 560 i=1,4 - h1(i) = 0. - h2(i) = 0. - 560 continue - h1(5) = 0. - j = nrold-nv11 - do 600 i=1,5 - j = j+1 - l0 = j - 570 l1 = l0-4 - if(l1.le.0) go to 590 - if(l1.le.nv11) go to 580 - l0 = l1-nv11 - go to 570 - 580 h1(l1) = h(i) - go to 600 - 590 h2(l0) = h2(l0) + h(i) - 600 continue -c rotate the new row of (avv) into triangle. - if(nv11.le.0) go to 670 -c rotations with the rows 1,2,...,nv11 of (avv). - do 660 j=1,nv11 - piv = h1(1) - i2 = min0(nv11-j,4) - if(piv.eq.0.) go to 640 -c calculate the parameters of the givens transformation. - call fpgivs(piv,av1(j,1),co,si) -c apply that transformation to the columns of matrix g. - ic = j - do 610 i=1,nuu - call fprota(co,si,right(i),c(ic)) - ic = ic+nv7 - 610 continue -c apply that transformation to the rows of (avv) with respect to av2. - do 620 i=1,4 - call fprota(co,si,h2(i),av2(j,i)) - 620 continue -c apply that transformation to the rows of (avv) with respect to av1. - if(i2.eq.0) go to 670 - do 630 i=1,i2 - i1 = i+1 - call fprota(co,si,h1(i1),av1(j,i1)) - 630 continue - 640 do 650 i=1,i2 - h1(i) = h1(i+1) - 650 continue - h1(i2+1) = 0. - 660 continue -c rotations with the rows nv11+1,...,nv7 of avv. - 670 do 700 j=1,4 - ij = nv11+j - if(ij.le.0) go to 700 - piv = h2(j) - if(piv.eq.0.) go to 700 -c calculate the parameters of the givens transformation. - call fpgivs(piv,av2(ij,j),co,si) -c apply that transformation to the columns of matrix g. - ic = ij - do 680 i=1,nuu - call fprota(co,si,right(i),c(ic)) - ic = ic+nv7 - 680 continue - if(j.eq.4) go to 700 -c apply that transformation to the rows of (avv) with respect to av2. - j1 = j+1 - do 690 i=j1,4 - call fprota(co,si,h2(i),av2(ij,i)) - 690 continue - 700 continue -c we update the sum of squared residuals - do 705 i=1,nuu - sq = sq+right(i)**2 - 705 continue - go to 750 -c rotation into triangle of the new row of (avv), in case the elements -c corresponding to the b-splines n(j;v),j=nv7+1,...,nv4 are all zero. - 710 irot =nrold - do 740 i=1,5 - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 740 -c calculate the parameters of the givens transformation. - call fpgivs(piv,av1(irot,1),co,si) -c apply that transformation to the columns of matrix g. - ic = irot - do 720 j=1,nuu - call fprota(co,si,right(j),c(ic)) - ic = ic+nv7 - 720 continue -c apply that transformation to the rows of (avv). - if(i.eq.5) go to 740 - i2 = 1 - i3 = i+1 - do 730 j=i3,5 - i2 = i2+1 - call fprota(co,si,h(j),av1(irot,i2)) - 730 continue - 740 continue -c we update the sum of squared residuals - do 745 i=1,nuu - sq = sq+right(i)**2 - 745 continue - 750 if(nrold.eq.number) go to 770 - 760 nrold = nrold+1 - go to 450 - 770 continue -c test whether the b-spline coefficients must be determined. - if(iback.ne.0) return -c backward substitution to obtain the b-spline coefficients as the -c solution of the linear system (rv) (cr) (ru)' = h. -c first step: solve the system (rv) (c1) = h. - k = 1 - do 780 i=1,nuu - call fpbacp(av1,av2,c(k),nv7,4,c(k),5,nv) - k = k+nv7 - 780 continue -c second step: solve the system (cr) (ru)' = (c1). - k = 0 - do 800 j=1,nv7 - k = k+1 - l = k - do 790 i=1,nuu - right(i) = c(l) - l = l+nv7 - 790 continue - call fpback(au,right,nuu,5,right,nu) - l = k - do 795 i=1,nuu - c(l) = right(i) - l = l+nv7 - 795 continue - 800 continue -c calculate from the conditions (2)-(3)-(4), the remaining b-spline -c coefficients. - ncof = nu4*nv4 - i = nv4 - j = 0 - do 805 l=1,nv4 - q(l) = dz1 - 805 continue - if(iop0.eq.0) go to 815 - do 810 l=1,nv4 - i = i+1 - q(i) = cc(l) - 810 continue - 815 if(nuu.eq.0) go to 850 - do 840 l=1,nuu - ii = i - do 820 k=1,nv7 - i = i+1 - j = j+1 - q(i) = c(j) - 820 continue - do 830 k=1,3 - ii = ii+1 - i = i+1 - q(i) = q(ii) - 830 continue - 840 continue - 850 if(iop1.eq.0) go to 870 - do 860 l=1,nv4 - i = i+1 - q(i) = 0. - 860 continue - 870 do 880 i=1,ncof - c(i) = q(i) - 880 continue -c calculate the quantities -c res(i,j) = (z(i,j) - s(u(i),v(j)))**2 , i=1,2,..,mu;j=1,2,..,mv -c fp = sumi=1,mu(sumj=1,mv(res(i,j))) -c fpu(r) = sum''i(sumj=1,mv(res(i,j))) , r=1,2,...,nu-7 -c tu(r+3) <= u(i) <= tu(r+4) -c fpv(r) = sumi=1,mu(sum''j(res(i,j))) , r=1,2,...,nv-7 -c tv(r+3) <= v(j) <= tv(r+4) - fp = 0. - do 890 i=1,nu - fpu(i) = 0. - 890 continue - do 900 i=1,nv - fpv(i) = 0. - 900 continue - iz = 0 - nroldu = 0 -c main loop for the different grid points. - do 950 i1=1,mu - numu = nru(i1) - numu1 = numu+1 - nroldv = 0 - do 940 i2=1,mv - numv = nrv(i2) - numv1 = numv+1 - iz = iz+1 -c evaluate s(u,v) at the current grid point by making the sum of the -c cross products of the non-zero b-splines at (u,v), multiplied with -c the appropiate b-spline coefficients. - term = 0. - k1 = numu*nv4+numv - do 920 l1=1,4 - k2 = k1 - fac = spu(i1,l1) - do 910 l2=1,4 - k2 = k2+1 - term = term+fac*spv(i2,l2)*c(k2) - 910 continue - k1 = k1+nv4 - 920 continue -c calculate the squared residual at the current grid point. - term = (z(iz)-term)**2 -c adjust the different parameters. - fp = fp+term - fpu(numu1) = fpu(numu1)+term - fpv(numv1) = fpv(numv1)+term - fac = term*half - if(numv.eq.nroldv) go to 930 - fpv(numv1) = fpv(numv1)-fac - fpv(numv) = fpv(numv)+fac - 930 nroldv = numv - if(numu.eq.nroldu) go to 940 - fpu(numu1) = fpu(numu1)-fac - fpu(numu) = fpu(numu)+fac - 940 continue - nroldu = numu - 950 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpgrpa.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpgrpa.f deleted file mode 100644 index 63afbab243..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpgrpa.f +++ /dev/null @@ -1,313 +0,0 @@ - subroutine fpgrpa(ifsu,ifsv,ifbu,ifbv,idim,ipar,u,mu,v,mv,z,mz, - * tu,nu,tv,nv,p,c,nc,fp,fpu,fpv,mm,mvnu,spu,spv,right,q,au,au1, - * av,av1,bu,bv,nru,nrv) -c .. -c ..scalar arguments.. - real*8 p,fp - integer ifsu,ifsv,ifbu,ifbv,idim,mu,mv,mz,nu,nv,nc,mm,mvnu -c ..array arguments.. - real*8 u(mu),v(mv),z(mz*idim),tu(nu),tv(nv),c(nc*idim),fpu(nu), - * fpv(nv),spu(mu,4),spv(mv,4),right(mm*idim),q(mvnu),au(nu,5), - * au1(nu,4),av(nv,5),av1(nv,4),bu(nu,5),bv(nv,5) - integer ipar(2),nru(mu),nrv(mv) -c ..local scalars.. - real*8 arg,fac,term,one,half,value - integer i,id,ii,it,iz,i1,i2,j,jz,k,k1,k2,l,l1,l2,mvv,k0,muu, - * ncof,nroldu,nroldv,number,nmd,numu,numu1,numv,numv1,nuu,nvv, - * nu4,nu7,nu8,nv4,nv7,nv8 -c ..local arrays.. - real*8 h(5) -c ..subroutine references.. -c fpback,fpbspl,fpdisc,fpbacp,fptrnp,fptrpe -c .. -c let -c | (spu) | | (spv) | -c (au) = | ---------- | (av) = | ---------- | -c | (1/p) (bu) | | (1/p) (bv) | -c -c | z ' 0 | -c q = | ------ | -c | 0 ' 0 | -c -c with c : the (nu-4) x (nv-4) matrix which contains the b-spline -c coefficients. -c z : the mu x mv matrix which contains the function values. -c spu,spv: the mu x (nu-4), resp. mv x (nv-4) observation matrices -c according to the least-squares problems in the u-,resp. -c v-direction. -c bu,bv : the (nu-7) x (nu-4),resp. (nv-7) x (nv-4) matrices -c containing the discontinuity jumps of the derivatives -c of the b-splines in the u-,resp.v-variable at the knots -c the b-spline coefficients of the smoothing spline are then calculated -c as the least-squares solution of the following over-determined linear -c system of equations -c -c (1) (av) c (au)' = q -c -c subject to the constraints -c -c (2) c(nu-3+i,j) = c(i,j), i=1,2,3 ; j=1,2,...,nv-4 -c if(ipar(1).ne.0) -c -c (3) c(i,nv-3+j) = c(i,j), j=1,2,3 ; i=1,2,...,nu-4 -c if(ipar(2).ne.0) -c -c set constants - one = 1 - half = 0.5 -c initialization - nu4 = nu-4 - nu7 = nu-7 - nu8 = nu-8 - nv4 = nv-4 - nv7 = nv-7 - nv8 = nv-8 - muu = mu - if(ipar(1).ne.0) muu = mu-1 - mvv = mv - if(ipar(2).ne.0) mvv = mv-1 -c it depends on the value of the flags ifsu,ifsv,ifbu and ibvand -c on the value of p whether the matrices (spu), (spv), (bu) and (bv) -c still must be determined. - if(ifsu.ne.0) go to 50 -c calculate the non-zero elements of the matrix (spu) which is the ob- -c servation matrix according to the least-squares spline approximation -c problem in the u-direction. - l = 4 - l1 = 5 - number = 0 - do 40 it=1,muu - arg = u(it) - 10 if(arg.lt.tu(l1) .or. l.eq.nu4) go to 20 - l = l1 - l1 = l+1 - number = number+1 - go to 10 - 20 call fpbspl(tu,nu,3,arg,l,h) - do 30 i=1,4 - spu(it,i) = h(i) - 30 continue - nru(it) = number - 40 continue - ifsu = 1 -c calculate the non-zero elements of the matrix (spv) which is the ob- -c servation matrix according to the least-squares spline approximation -c problem in the v-direction. - 50 if(ifsv.ne.0) go to 100 - l = 4 - l1 = 5 - number = 0 - do 90 it=1,mvv - arg = v(it) - 60 if(arg.lt.tv(l1) .or. l.eq.nv4) go to 70 - l = l1 - l1 = l+1 - number = number+1 - go to 60 - 70 call fpbspl(tv,nv,3,arg,l,h) - do 80 i=1,4 - spv(it,i) = h(i) - 80 continue - nrv(it) = number - 90 continue - ifsv = 1 - 100 if(p.le.0.) go to 150 -c calculate the non-zero elements of the matrix (bu). - if(ifbu.ne.0 .or. nu8.eq.0) go to 110 - call fpdisc(tu,nu,5,bu,nu) - ifbu = 1 -c calculate the non-zero elements of the matrix (bv). - 110 if(ifbv.ne.0 .or. nv8.eq.0) go to 150 - call fpdisc(tv,nv,5,bv,nv) - ifbv = 1 -c substituting (2) and (3) into (1), we obtain the overdetermined -c system -c (4) (avv) (cr) (auu)' = (qq) -c from which the nuu*nvv remaining coefficients -c c(i,j) , i=1,...,nu-4-3*ipar(1) ; j=1,...,nv-4-3*ipar(2) , -c the elements of (cr), are then determined in the least-squares sense. -c we first determine the matrices (auu) and (qq). then we reduce the -c matrix (auu) to upper triangular form (ru) using givens rotations. -c we apply the same transformations to the rows of matrix qq to obtain -c the (mv) x nuu matrix g. -c we store matrix (ru) into au (and au1 if ipar(1)=1) and g into q. - 150 if(ipar(1).ne.0) go to 160 - nuu = nu4 - call fptrnp(mu,mv,idim,nu,nru,spu,p,bu,z,au,q,right) - go to 180 - 160 nuu = nu7 - call fptrpe(mu,mv,idim,nu,nru,spu,p,bu,z,au,au1,q,right) -c we determine the matrix (avv) and then we reduce this matrix to -c upper triangular form (rv) using givens rotations. -c we apply the same transformations to the columns of matrix -c g to obtain the (nvv) x (nuu) matrix h. -c we store matrix (rv) into av (and av1 if ipar(2)=1) and h into c. - 180 if(ipar(2).ne.0) go to 190 - nvv = nv4 - call fptrnp(mv,nuu,idim,nv,nrv,spv,p,bv,q,av,c,right) - go to 200 - 190 nvv = nv7 - call fptrpe(mv,nuu,idim,nv,nrv,spv,p,bv,q,av,av1,c,right) -c backward substitution to obtain the b-spline coefficients as the -c solution of the linear system (rv) (cr) (ru)' = h. -c first step: solve the system (rv) (c1) = h. - 200 ncof = nuu*nvv - k = 1 - if(ipar(2).ne.0) go to 240 - do 220 ii=1,idim - do 220 i=1,nuu - call fpback(av,c(k),nvv,5,c(k),nv) - k = k+nvv - 220 continue - go to 300 - 240 do 260 ii=1,idim - do 260 i=1,nuu - call fpbacp(av,av1,c(k),nvv,4,c(k),5,nv) - k = k+nvv - 260 continue -c second step: solve the system (cr) (ru)' = (c1). - 300 if(ipar(1).ne.0) go to 400 - do 360 ii=1,idim - k = (ii-1)*ncof - do 360 j=1,nvv - k = k+1 - l = k - do 320 i=1,nuu - right(i) = c(l) - l = l+nvv - 320 continue - call fpback(au,right,nuu,5,right,nu) - l = k - do 340 i=1,nuu - c(l) = right(i) - l = l+nvv - 340 continue - 360 continue - go to 500 - 400 do 460 ii=1,idim - k = (ii-1)*ncof - do 460 j=1,nvv - k = k+1 - l = k - do 420 i=1,nuu - right(i) = c(l) - l = l+nvv - 420 continue - call fpbacp(au,au1,right,nuu,4,right,5,nu) - l = k - do 440 i=1,nuu - c(l) = right(i) - l = l+nvv - 440 continue - 460 continue -c calculate from the conditions (2)-(3), the remaining b-spline -c coefficients. - 500 if(ipar(2).eq.0) go to 600 - i = 0 - j = 0 - do 560 id=1,idim - do 560 l=1,nuu - ii = i - do 520 k=1,nvv - i = i+1 - j = j+1 - q(i) = c(j) - 520 continue - do 540 k=1,3 - ii = ii+1 - i = i+1 - q(i) = q(ii) - 540 continue - 560 continue - ncof = nv4*nuu - nmd = ncof*idim - do 580 i=1,nmd - c(i) = q(i) - 580 continue - 600 if(ipar(1).eq.0) go to 700 - i = 0 - j = 0 - n33 = 3*nv4 - do 660 id=1,idim - ii = i - do 620 k=1,ncof - i = i+1 - j = j+1 - q(i) = c(j) - 620 continue - do 640 k=1,n33 - ii = ii+1 - i = i+1 - q(i) = q(ii) - 640 continue - 660 continue - ncof = nv4*nu4 - nmd = ncof*idim - do 680 i=1,nmd - c(i) = q(i) - 680 continue -c calculate the quantities -c res(i,j) = (z(i,j) - s(u(i),v(j)))**2 , i=1,2,..,mu;j=1,2,..,mv -c fp = sumi=1,mu(sumj=1,mv(res(i,j))) -c fpu(r) = sum''i(sumj=1,mv(res(i,j))) , r=1,2,...,nu-7 -c tu(r+3) <= u(i) <= tu(r+4) -c fpv(r) = sumi=1,mu(sum''j(res(i,j))) , r=1,2,...,nv-7 -c tv(r+3) <= v(j) <= tv(r+4) - 700 fp = 0. - do 720 i=1,nu - fpu(i) = 0. - 720 continue - do 740 i=1,nv - fpv(i) = 0. - 740 continue - nroldu = 0 -c main loop for the different grid points. - do 860 i1=1,muu - numu = nru(i1) - numu1 = numu+1 - nroldv = 0 - iz = (i1-1)*mv - do 840 i2=1,mvv - numv = nrv(i2) - numv1 = numv+1 - iz = iz+1 -c evaluate s(u,v) at the current grid point by making the sum of the -c cross products of the non-zero b-splines at (u,v), multiplied with -c the appropiate b-spline coefficients. - term = 0. - k0 = numu*nv4+numv - jz = iz - do 800 id=1,idim - k1 = k0 - value = 0. - do 780 l1=1,4 - k2 = k1 - fac = spu(i1,l1) - do 760 l2=1,4 - k2 = k2+1 - value = value+fac*spv(i2,l2)*c(k2) - 760 continue - k1 = k1+nv4 - 780 continue -c calculate the squared residual at the current grid point. - term = term+(z(jz)-value)**2 - jz = jz+mz - k0 = k0+ncof - 800 continue -c adjust the different parameters. - fp = fp+term - fpu(numu1) = fpu(numu1)+term - fpv(numv1) = fpv(numv1)+term - fac = term*half - if(numv.eq.nroldv) go to 820 - fpv(numv1) = fpv(numv1)-fac - fpv(numv) = fpv(numv)+fac - 820 nroldv = numv - if(numu.eq.nroldu) go to 840 - fpu(numu1) = fpu(numu1)-fac - fpu(numu) = fpu(numu)+fac - 840 continue - nroldu = numu - 860 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpgrre.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpgrre.f deleted file mode 100644 index de89117015..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpgrre.f +++ /dev/null @@ -1,328 +0,0 @@ - subroutine fpgrre(ifsx,ifsy,ifbx,ifby,x,mx,y,my,z,mz,kx,ky,tx,nx, - * ty,ny,p,c,nc,fp,fpx,fpy,mm,mynx,kx1,kx2,ky1,ky2,spx,spy,right,q, - * ax,ay,bx,by,nrx,nry) -c .. -c ..scalar arguments.. - real*8 p,fp - integer ifsx,ifsy,ifbx,ifby,mx,my,mz,kx,ky,nx,ny,nc,mm,mynx, - * kx1,kx2,ky1,ky2 -c ..array arguments.. - real*8 x(mx),y(my),z(mz),tx(nx),ty(ny),c(nc),spx(mx,kx1),spy(my,ky - *1) - * ,right(mm),q(mynx),ax(nx,kx2),bx(nx,kx2),ay(ny,ky2),by(ny,ky2), - * fpx(nx),fpy(ny) - integer nrx(mx),nry(my) -c ..local scalars.. - real*8 arg,cos,fac,pinv,piv,sin,term,one,half - integer i,ibandx,ibandy,ic,iq,irot,it,iz,i1,i2,i3,j,k,k1,k2,l, - * l1,l2,ncof,nk1x,nk1y,nrold,nroldx,nroldy,number,numx,numx1, - * numy,numy1,n1 -c ..local arrays.. - real*8 h(7) -c ..subroutine references.. -c fpback,fpbspl,fpgivs,fpdisc,fprota -c .. -c the b-spline coefficients of the smoothing spline are calculated as -c the least-squares solution of the over-determined linear system of -c equations (ay) c (ax)' = q where -c -c | (spx) | | (spy) | -c (ax) = | ---------- | (ay) = | ---------- | -c | (1/p) (bx) | | (1/p) (by) | -c -c | z ' 0 | -c q = | ------ | -c | 0 ' 0 | -c -c with c : the (ny-ky-1) x (nx-kx-1) matrix which contains the -c b-spline coefficients. -c z : the my x mx matrix which contains the function values. -c spx,spy: the mx x (nx-kx-1) and my x (ny-ky-1) observation -c matrices according to the least-squares problems in -c the x- and y-direction. -c bx,by : the (nx-2*kx-1) x (nx-kx-1) and (ny-2*ky-1) x (ny-ky-1) -c matrices which contain the discontinuity jumps of the -c derivatives of the b-splines in the x- and y-direction. - one = 1 - half = 0.5 - nk1x = nx-kx1 - nk1y = ny-ky1 - if(p.gt.0.) pinv = one/p -c it depends on the value of the flags ifsx,ifsy,ifbx and ifby and on -c the value of p whether the matrices (spx),(spy),(bx) and (by) still -c must be determined. - if(ifsx.ne.0) go to 50 -c calculate the non-zero elements of the matrix (spx) which is the -c observation matrix according to the least-squares spline approximat- -c ion problem in the x-direction. - l = kx1 - l1 = kx2 - number = 0 - do 40 it=1,mx - arg = x(it) - 10 if(arg.lt.tx(l1) .or. l.eq.nk1x) go to 20 - l = l1 - l1 = l+1 - number = number+1 - go to 10 - 20 call fpbspl(tx,nx,kx,arg,l,h) - do 30 i=1,kx1 - spx(it,i) = h(i) - 30 continue - nrx(it) = number - 40 continue - ifsx = 1 - 50 if(ifsy.ne.0) go to 100 -c calculate the non-zero elements of the matrix (spy) which is the -c observation matrix according to the least-squares spline approximat- -c ion problem in the y-direction. - l = ky1 - l1 = ky2 - number = 0 - do 90 it=1,my - arg = y(it) - 60 if(arg.lt.ty(l1) .or. l.eq.nk1y) go to 70 - l = l1 - l1 = l+1 - number = number+1 - go to 60 - 70 call fpbspl(ty,ny,ky,arg,l,h) - do 80 i=1,ky1 - spy(it,i) = h(i) - 80 continue - nry(it) = number - 90 continue - ifsy = 1 - 100 if(p.le.0.) go to 120 -c calculate the non-zero elements of the matrix (bx). - if(ifbx.ne.0 .or. nx.eq.2*kx1) go to 110 - call fpdisc(tx,nx,kx2,bx,nx) - ifbx = 1 -c calculate the non-zero elements of the matrix (by). - 110 if(ifby.ne.0 .or. ny.eq.2*ky1) go to 120 - call fpdisc(ty,ny,ky2,by,ny) - ifby = 1 -c reduce the matrix (ax) to upper triangular form (rx) using givens -c rotations. apply the same transformations to the rows of matrix q -c to obtain the my x (nx-kx-1) matrix g. -c store matrix (rx) into (ax) and g into q. - 120 l = my*nk1x -c initialization. - do 130 i=1,l - q(i) = 0. - 130 continue - do 140 i=1,nk1x - do 140 j=1,kx2 - ax(i,j) = 0. - 140 continue - l = 0 - nrold = 0 -c ibandx denotes the bandwidth of the matrices (ax) and (rx). - ibandx = kx1 - do 270 it=1,mx - number = nrx(it) - 150 if(nrold.eq.number) go to 180 - if(p.le.0.) go to 260 - ibandx = kx2 -c fetch a new row of matrix (bx). - n1 = nrold+1 - do 160 j=1,kx2 - h(j) = bx(n1,j)*pinv - 160 continue -c find the appropriate column of q. - do 170 j=1,my - right(j) = 0. - 170 continue - irot = nrold - go to 210 -c fetch a new row of matrix (spx). - 180 h(ibandx) = 0. - do 190 j=1,kx1 - h(j) = spx(it,j) - 190 continue -c find the appropriate column of q. - do 200 j=1,my - l = l+1 - right(j) = z(l) - 200 continue - irot = number -c rotate the new row of matrix (ax) into triangle. - 210 do 240 i=1,ibandx - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 240 -c calculate the parameters of the givens transformation. - call fpgivs(piv,ax(irot,1),cos,sin) -c apply that transformation to the rows of matrix q. - iq = (irot-1)*my - do 220 j=1,my - iq = iq+1 - call fprota(cos,sin,right(j),q(iq)) - 220 continue -c apply that transformation to the columns of (ax). - if(i.eq.ibandx) go to 250 - i2 = 1 - i3 = i+1 - do 230 j=i3,ibandx - i2 = i2+1 - call fprota(cos,sin,h(j),ax(irot,i2)) - 230 continue - 240 continue - 250 if(nrold.eq.number) go to 270 - 260 nrold = nrold+1 - go to 150 - 270 continue -c reduce the matrix (ay) to upper triangular form (ry) using givens -c rotations. apply the same transformations to the columns of matrix g -c to obtain the (ny-ky-1) x (nx-kx-1) matrix h. -c store matrix (ry) into (ay) and h into c. - ncof = nk1x*nk1y -c initialization. - do 280 i=1,ncof - c(i) = 0. - 280 continue - do 290 i=1,nk1y - do 290 j=1,ky2 - ay(i,j) = 0. - 290 continue - nrold = 0 -c ibandy denotes the bandwidth of the matrices (ay) and (ry). - ibandy = ky1 - do 420 it=1,my - number = nry(it) - 300 if(nrold.eq.number) go to 330 - if(p.le.0.) go to 410 - ibandy = ky2 -c fetch a new row of matrix (by). - n1 = nrold+1 - do 310 j=1,ky2 - h(j) = by(n1,j)*pinv - 310 continue -c find the appropiate row of g. - do 320 j=1,nk1x - right(j) = 0. - 320 continue - irot = nrold - go to 360 -c fetch a new row of matrix (spy) - 330 h(ibandy) = 0. - do 340 j=1,ky1 - h(j) = spy(it,j) - 340 continue -c find the appropiate row of g. - l = it - do 350 j=1,nk1x - right(j) = q(l) - l = l+my - 350 continue - irot = number -c rotate the new row of matrix (ay) into triangle. - 360 do 390 i=1,ibandy - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 390 -c calculate the parameters of the givens transformation. - call fpgivs(piv,ay(irot,1),cos,sin) -c apply that transformation to the colums of matrix g. - ic = irot - do 370 j=1,nk1x - call fprota(cos,sin,right(j),c(ic)) - ic = ic+nk1y - 370 continue -c apply that transformation to the columns of matrix (ay). - if(i.eq.ibandy) go to 400 - i2 = 1 - i3 = i+1 - do 380 j=i3,ibandy - i2 = i2+1 - call fprota(cos,sin,h(j),ay(irot,i2)) - 380 continue - 390 continue - 400 if(nrold.eq.number) go to 420 - 410 nrold = nrold+1 - go to 300 - 420 continue -c backward substitution to obtain the b-spline coefficients as the -c solution of the linear system (ry) c (rx)' = h. -c first step: solve the system (ry) (c1) = h. - k = 1 - do 450 i=1,nk1x - call fpback(ay,c(k),nk1y,ibandy,c(k),ny) - k = k+nk1y - 450 continue -c second step: solve the system c (rx)' = (c1). - k = 0 - do 480 j=1,nk1y - k = k+1 - l = k - do 460 i=1,nk1x - right(i) = c(l) - l = l+nk1y - 460 continue - call fpback(ax,right,nk1x,ibandx,right,nx) - l = k - do 470 i=1,nk1x - c(l) = right(i) - l = l+nk1y - 470 continue - 480 continue -c calculate the quantities -c res(i,j) = (z(i,j) - s(x(i),y(j)))**2 , i=1,2,..,mx;j=1,2,..,my -c fp = sumi=1,mx(sumj=1,my(res(i,j))) -c fpx(r) = sum''i(sumj=1,my(res(i,j))) , r=1,2,...,nx-2*kx-1 -c tx(r+kx) <= x(i) <= tx(r+kx+1) -c fpy(r) = sumi=1,mx(sum''j(res(i,j))) , r=1,2,...,ny-2*ky-1 -c ty(r+ky) <= y(j) <= ty(r+ky+1) - fp = 0. - do 490 i=1,nx - fpx(i) = 0. - 490 continue - do 500 i=1,ny - fpy(i) = 0. - 500 continue - nk1y = ny-ky1 - iz = 0 - nroldx = 0 -c main loop for the different grid points. - do 550 i1=1,mx - numx = nrx(i1) - numx1 = numx+1 - nroldy = 0 - do 540 i2=1,my - numy = nry(i2) - numy1 = numy+1 - iz = iz+1 -c evaluate s(x,y) at the current grid point by making the sum of the -c cross products of the non-zero b-splines at (x,y), multiplied with -c the appropiate b-spline coefficients. - term = 0. - k1 = numx*nk1y+numy - do 520 l1=1,kx1 - k2 = k1 - fac = spx(i1,l1) - do 510 l2=1,ky1 - k2 = k2+1 - term = term+fac*spy(i2,l2)*c(k2) - 510 continue - k1 = k1+nk1y - 520 continue -c calculate the squared residual at the current grid point. - term = (z(iz)-term)**2 -c adjust the different parameters. - fp = fp+term - fpx(numx1) = fpx(numx1)+term - fpy(numy1) = fpy(numy1)+term - fac = term*half - if(numy.eq.nroldy) go to 530 - fpy(numy1) = fpy(numy1)-fac - fpy(numy) = fpy(numy)+fac - 530 nroldy = numy - if(numx.eq.nroldx) go to 540 - fpx(numx1) = fpx(numx1)-fac - fpx(numx) = fpx(numx)+fac - 540 continue - nroldx = numx - 550 continue - return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpgrsp.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpgrsp.f deleted file mode 100644 index 57467765b6..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpgrsp.f +++ /dev/null @@ -1,656 +0,0 @@ - subroutine fpgrsp(ifsu,ifsv,ifbu,ifbv,iback,u,mu,v,mv,r,mr,dr, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sq,fp,fpu,fpv,mm,mvnu,spu,spv, - * right,q,au,av1,av2,bu,bv,a0,a1,b0,b1,c0,c1,cosi,nru,nrv) -c .. -c ..scalar arguments.. - real*8 p,sq,fp - integer ifsu,ifsv,ifbu,ifbv,iback,mu,mv,mr,iop0,iop1,nu,nv,nc, - * mm,mvnu -c ..array arguments.. - real*8 u(mu),v(mv),r(mr),dr(6),tu(nu),tv(nv),c(nc),fpu(nu),fpv(nv) - *, - * spu(mu,4),spv(mv,4),right(mm),q(mvnu),au(nu,5),av1(nv,6),c0(nv), - * av2(nv,4),a0(2,mv),b0(2,nv),cosi(2,nv),bu(nu,5),bv(nv,5),c1(nv), - * a1(2,mv),b1(2,nv) - integer nru(mu),nrv(mv) -c ..local scalars.. - real*8 arg,co,dr01,dr02,dr03,dr11,dr12,dr13,fac,fac0,fac1,pinv,piv - *, - * si,term,one,three,half - integer i,ic,ii,ij,ik,iq,irot,it,ir,i0,i1,i2,i3,j,jj,jk,jper, - * j0,j1,k,k1,k2,l,l0,l1,l2,mvv,ncof,nrold,nroldu,nroldv,number, - * numu,numu1,numv,numv1,nuu,nu4,nu7,nu8,nu9,nv11,nv4,nv7,nv8,n1 -c ..local arrays.. - real*8 h(5),h1(5),h2(4) -c ..function references.. - integer min0 - real*8 cos,sin -c ..subroutine references.. -c fpback,fpbspl,fpgivs,fpcyt1,fpcyt2,fpdisc,fpbacp,fprota -c .. -c let -c | (spu) | | (spv) | -c (au) = | -------------- | (av) = | -------------- | -c | sqrt(1/p) (bu) | | sqrt(1/p) (bv) | -c -c | r ' 0 | -c q = | ------ | -c | 0 ' 0 | -c -c with c : the (nu-4) x (nv-4) matrix which contains the b-spline -c coefficients. -c r : the mu x mv matrix which contains the function values. -c spu,spv: the mu x (nu-4), resp. mv x (nv-4) observation matrices -c according to the least-squares problems in the u-,resp. -c v-direction. -c bu,bv : the (nu-7) x (nu-4),resp. (nv-7) x (nv-4) matrices -c containing the discontinuity jumps of the derivatives -c of the b-splines in the u-,resp.v-variable at the knots -c the b-spline coefficients of the smoothing spline are then calculated -c as the least-squares solution of the following over-determined linear -c system of equations -c -c (1) (av) c (au)' = q -c -c subject to the constraints -c -c (2) c(i,nv-3+j) = c(i,j), j=1,2,3 ; i=1,2,...,nu-4 -c -c (3) if iop0 = 0 c(1,j) = dr(1) -c iop0 = 1 c(1,j) = dr(1) -c c(2,j) = dr(1)+(dr(2)*cosi(1,j)+dr(3)*cosi(2,j))* -c tu(5)/3. = c0(j) , j=1,2,...nv-4 -c -c (4) if iop1 = 0 c(nu-4,j) = dr(4) -c iop1 = 1 c(nu-4,j) = dr(4) -c c(nu-5,j) = dr(4)+(dr(5)*cosi(1,j)+dr(6)*cosi(2,j)) -c *(tu(nu-4)-tu(nu-3))/3. = c1(j) -c -c set constants - one = 1 - three = 3 - half = 0.5 -c initialization - nu4 = nu-4 - nu7 = nu-7 - nu8 = nu-8 - nu9 = nu-9 - nv4 = nv-4 - nv7 = nv-7 - nv8 = nv-8 - nv11 = nv-11 - nuu = nu4-iop0-iop1-2 - if(p.gt.0.) pinv = one/p -c it depends on the value of the flags ifsu,ifsv,ifbu,ifbv,iop0,iop1 -c and on the value of p whether the matrices (spu), (spv), (bu), (bv), -c (cosi) still must be determined. - if(ifsu.ne.0) go to 30 -c calculate the non-zero elements of the matrix (spu) which is the ob- -c servation matrix according to the least-squares spline approximation -c problem in the u-direction. - l = 4 - l1 = 5 - number = 0 - do 25 it=1,mu - arg = u(it) - 10 if(arg.lt.tu(l1) .or. l.eq.nu4) go to 15 - l = l1 - l1 = l+1 - number = number+1 - go to 10 - 15 call fpbspl(tu,nu,3,arg,l,h) - do 20 i=1,4 - spu(it,i) = h(i) - 20 continue - nru(it) = number - 25 continue - ifsu = 1 -c calculate the non-zero elements of the matrix (spv) which is the ob- -c servation matrix according to the least-squares spline approximation -c problem in the v-direction. - 30 if(ifsv.ne.0) go to 85 - l = 4 - l1 = 5 - number = 0 - do 50 it=1,mv - arg = v(it) - 35 if(arg.lt.tv(l1) .or. l.eq.nv4) go to 40 - l = l1 - l1 = l+1 - number = number+1 - go to 35 - 40 call fpbspl(tv,nv,3,arg,l,h) - do 45 i=1,4 - spv(it,i) = h(i) - 45 continue - nrv(it) = number - 50 continue - ifsv = 1 - if(iop0.eq.0 .and. iop1.eq.0) go to 85 -c calculate the coefficients of the interpolating splines for cos(v) -c and sin(v). - do 55 i=1,nv4 - cosi(1,i) = 0. - cosi(2,i) = 0. - 55 continue - if(nv7.lt.4) go to 85 - do 65 i=1,nv7 - l = i+3 - arg = tv(l) - call fpbspl(tv,nv,3,arg,l,h) - do 60 j=1,3 - av1(i,j) = h(j) - 60 continue - cosi(1,i) = cos(arg) - cosi(2,i) = sin(arg) - 65 continue - call fpcyt1(av1,nv7,nv) - do 80 j=1,2 - do 70 i=1,nv7 - right(i) = cosi(j,i) - 70 continue - call fpcyt2(av1,nv7,right,right,nv) - do 75 i=1,nv7 - cosi(j,i+1) = right(i) - 75 continue - cosi(j,1) = cosi(j,nv7+1) - cosi(j,nv7+2) = cosi(j,2) - cosi(j,nv4) = cosi(j,3) - 80 continue - 85 if(p.le.0.) go to 150 -c calculate the non-zero elements of the matrix (bu). - if(ifbu.ne.0 .or. nu8.eq.0) go to 90 - call fpdisc(tu,nu,5,bu,nu) - ifbu = 1 -c calculate the non-zero elements of the matrix (bv). - 90 if(ifbv.ne.0 .or. nv8.eq.0) go to 150 - call fpdisc(tv,nv,5,bv,nv) - ifbv = 1 -c substituting (2),(3) and (4) into (1), we obtain the overdetermined -c system -c (5) (avv) (cc) (auu)' = (qq) -c from which the nuu*nv7 remaining coefficients -c c(i,j) , i=2+iop0,3+iop0,...,nu-5-iop1,j=1,2,...,nv-7. -c the elements of (cc), are then determined in the least-squares sense. -c simultaneously, we compute the resulting sum of squared residuals sq. - 150 dr01 = dr(1) - dr11 = dr(4) - do 155 i=1,mv - a0(1,i) = dr01 - a1(1,i) = dr11 - 155 continue - if(nv8.eq.0 .or. p.le.0.) go to 165 - do 160 i=1,nv8 - b0(1,i) = 0. - b1(1,i) = 0. - 160 continue - 165 mvv = mv - if(iop0.eq.0) go to 195 - fac = (tu(5)-tu(4))/three - dr02 = dr(2)*fac - dr03 = dr(3)*fac - do 170 i=1,nv4 - c0(i) = dr01+dr02*cosi(1,i)+dr03*cosi(2,i) - 170 continue - do 180 i=1,mv - number = nrv(i) - fac = 0. - do 175 j=1,4 - number = number+1 - fac = fac+c0(number)*spv(i,j) - 175 continue - a0(2,i) = fac - 180 continue - if(nv8.eq.0 .or. p.le.0.) go to 195 - do 190 i=1,nv8 - number = i - fac = 0. - do 185 j=1,5 - fac = fac+c0(number)*bv(i,j) - number = number+1 - 185 continue - b0(2,i) = fac*pinv - 190 continue - mvv = mv+nv8 - 195 if(iop1.eq.0) go to 225 - fac = (tu(nu4)-tu(nu4+1))/three - dr12 = dr(5)*fac - dr13 = dr(6)*fac - do 200 i=1,nv4 - c1(i) = dr11+dr12*cosi(1,i)+dr13*cosi(2,i) - 200 continue - do 210 i=1,mv - number = nrv(i) - fac = 0. - do 205 j=1,4 - number = number+1 - fac = fac+c1(number)*spv(i,j) - 205 continue - a1(2,i) = fac - 210 continue - if(nv8.eq.0 .or. p.le.0.) go to 225 - do 220 i=1,nv8 - number = i - fac = 0. - do 215 j=1,5 - fac = fac+c1(number)*bv(i,j) - number = number+1 - 215 continue - b1(2,i) = fac*pinv - 220 continue - mvv = mv+nv8 -c we first determine the matrices (auu) and (qq). then we reduce the -c matrix (auu) to an unit upper triangular form (ru) using givens -c rotations without square roots. we apply the same transformations to -c the rows of matrix qq to obtain the mv x nuu matrix g. -c we store matrix (ru) into au and g into q. - 225 l = mvv*nuu -c initialization. - sq = 0. - if(l.eq.0) go to 245 - do 230 i=1,l - q(i) = 0. - 230 continue - do 240 i=1,nuu - do 240 j=1,5 - au(i,j) = 0. - 240 continue - l = 0 - 245 nrold = 0 - n1 = nrold+1 - do 420 it=1,mu - number = nru(it) -c find the appropriate column of q. - 250 do 260 j=1,mvv - right(j) = 0. - 260 continue - if(nrold.eq.number) go to 280 - if(p.le.0.) go to 410 -c fetch a new row of matrix (bu). - do 270 j=1,5 - h(j) = bu(n1,j)*pinv - 270 continue - i0 = 1 - i1 = 5 - go to 310 -c fetch a new row of matrix (spu). - 280 do 290 j=1,4 - h(j) = spu(it,j) - 290 continue -c find the appropriate column of q. - do 300 j=1,mv - l = l+1 - right(j) = r(l) - 300 continue - i0 = 1 - i1 = 4 - 310 j0 = n1 - j1 = nu7-number -c take into account that we eliminate the constraints (3) - 315 if(j0-1.gt.iop0) go to 335 - fac0 = h(i0) - do 320 j=1,mv - right(j) = right(j)-fac0*a0(j0,j) - 320 continue - if(mv.eq.mvv) go to 330 - j = mv - do 325 jj=1,nv8 - j = j+1 - right(j) = right(j)-fac0*b0(j0,jj) - 325 continue - 330 j0 = j0+1 - i0 = i0+1 - go to 315 -c take into account that we eliminate the constraints (4) - 335 if(j1-1.gt.iop1) go to 360 - fac1 = h(i1) - do 340 j=1,mv - right(j) = right(j)-fac1*a1(j1,j) - 340 continue - if(mv.eq.mvv) go to 350 - j = mv - do 345 jj=1,nv8 - j = j+1 - right(j) = right(j)-fac1*b1(j1,jj) - 345 continue - 350 j1 = j1+1 - i1 = i1-1 - go to 335 - 360 irot = nrold-iop0-1 - if(irot.lt.0) irot = 0 -c rotate the new row of matrix (auu) into triangle. - if(i0.gt.i1) go to 390 - do 385 i=i0,i1 - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 385 -c calculate the parameters of the givens transformation. - call fpgivs(piv,au(irot,1),co,si) -c apply that transformation to the rows of matrix (qq). - iq = (irot-1)*mvv - do 370 j=1,mvv - iq = iq+1 - call fprota(co,si,right(j),q(iq)) - 370 continue -c apply that transformation to the columns of (auu). - if(i.eq.i1) go to 385 - i2 = 1 - i3 = i+1 - do 380 j=i3,i1 - i2 = i2+1 - call fprota(co,si,h(j),au(irot,i2)) - 380 continue - 385 continue -c we update the sum of squared residuals. - 390 do 395 j=1,mvv - sq = sq+right(j)**2 - 395 continue - 400 if(nrold.eq.number) go to 420 - 410 nrold = n1 - n1 = n1+1 - go to 250 - 420 continue - if(nuu.eq.0) go to 800 -c we determine the matrix (avv) and then we reduce her to an unit -c upper triangular form (rv) using givens rotations without square -c roots. we apply the same transformations to the columns of matrix -c g to obtain the (nv-7) x (nu-6-iop0-iop1) matrix h. -c we store matrix (rv) into av1 and av2, h into c. -c the nv7 x nv7 triangular unit upper matrix (rv) has the form -c | av1 ' | -c (rv) = | ' av2 | -c | 0 ' | -c with (av2) a nv7 x 4 matrix and (av1) a nv11 x nv11 unit upper -c triangular matrix of bandwidth 5. - ncof = nuu*nv7 -c initialization. - do 430 i=1,ncof - c(i) = 0. - 430 continue - do 440 i=1,nv4 - av1(i,5) = 0. - do 440 j=1,4 - av1(i,j) = 0. - av2(i,j) = 0. - 440 continue - jper = 0 - nrold = 0 - do 770 it=1,mv - number = nrv(it) - 450 if(nrold.eq.number) go to 480 - if(p.le.0.) go to 760 -c fetch a new row of matrix (bv). - n1 = nrold+1 - do 460 j=1,5 - h(j) = bv(n1,j)*pinv - 460 continue -c find the appropiate row of g. - do 465 j=1,nuu - right(j) = 0. - 465 continue - if(mv.eq.mvv) go to 510 - l = mv+n1 - do 470 j=1,nuu - right(j) = q(l) - l = l+mvv - 470 continue - go to 510 -c fetch a new row of matrix (spv) - 480 h(5) = 0. - do 490 j=1,4 - h(j) = spv(it,j) - 490 continue -c find the appropiate row of g. - l = it - do 500 j=1,nuu - right(j) = q(l) - l = l+mvv - 500 continue -c test whether there are non-zero values in the new row of (avv) -c corresponding to the b-splines n(j;v),j=nv7+1,...,nv4. - 510 if(nrold.lt.nv11) go to 710 - if(jper.ne.0) go to 550 -c initialize the matrix (av2). - jk = nv11+1 - do 540 i=1,4 - ik = jk - do 520 j=1,5 - if(ik.le.0) go to 530 - av2(ik,i) = av1(ik,j) - ik = ik-1 - 520 continue - 530 jk = jk+1 - 540 continue - jper = 1 -c if one of the non-zero elements of the new row corresponds to one of -c the b-splines n(j;v),j=nv7+1,...,nv4, we take account of condition -c (2) for setting up this row of (avv). the row is stored in h1( the -c part with respect to av1) and h2 (the part with respect to av2). - 550 do 560 i=1,4 - h1(i) = 0. - h2(i) = 0. - 560 continue - h1(5) = 0. - j = nrold-nv11 - do 600 i=1,5 - j = j+1 - l0 = j - 570 l1 = l0-4 - if(l1.le.0) go to 590 - if(l1.le.nv11) go to 580 - l0 = l1-nv11 - go to 570 - 580 h1(l1) = h(i) - go to 600 - 590 h2(l0) = h2(l0) + h(i) - 600 continue -c rotate the new row of (avv) into triangle. - if(nv11.le.0) go to 670 -c rotations with the rows 1,2,...,nv11 of (avv). - do 660 j=1,nv11 - piv = h1(1) - i2 = min0(nv11-j,4) - if(piv.eq.0.) go to 640 -c calculate the parameters of the givens transformation. - call fpgivs(piv,av1(j,1),co,si) -c apply that transformation to the columns of matrix g. - ic = j - do 610 i=1,nuu - call fprota(co,si,right(i),c(ic)) - ic = ic+nv7 - 610 continue -c apply that transformation to the rows of (avv) with respect to av2. - do 620 i=1,4 - call fprota(co,si,h2(i),av2(j,i)) - 620 continue -c apply that transformation to the rows of (avv) with respect to av1. - if(i2.eq.0) go to 670 - do 630 i=1,i2 - i1 = i+1 - call fprota(co,si,h1(i1),av1(j,i1)) - 630 continue - 640 do 650 i=1,i2 - h1(i) = h1(i+1) - 650 continue - h1(i2+1) = 0. - 660 continue -c rotations with the rows nv11+1,...,nv7 of avv. - 670 do 700 j=1,4 - ij = nv11+j - if(ij.le.0) go to 700 - piv = h2(j) - if(piv.eq.0.) go to 700 -c calculate the parameters of the givens transformation. - call fpgivs(piv,av2(ij,j),co,si) -c apply that transformation to the columns of matrix g. - ic = ij - do 680 i=1,nuu - call fprota(co,si,right(i),c(ic)) - ic = ic+nv7 - 680 continue - if(j.eq.4) go to 700 -c apply that transformation to the rows of (avv) with respect to av2. - j1 = j+1 - do 690 i=j1,4 - call fprota(co,si,h2(i),av2(ij,i)) - 690 continue - 700 continue -c we update the sum of squared residuals. - do 705 i=1,nuu - sq = sq+right(i)**2 - 705 continue - go to 750 -c rotation into triangle of the new row of (avv), in case the elements -c corresponding to the b-splines n(j;v),j=nv7+1,...,nv4 are all zero. - 710 irot =nrold - do 740 i=1,5 - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 740 -c calculate the parameters of the givens transformation. - call fpgivs(piv,av1(irot,1),co,si) -c apply that transformation to the columns of matrix g. - ic = irot - do 720 j=1,nuu - call fprota(co,si,right(j),c(ic)) - ic = ic+nv7 - 720 continue -c apply that transformation to the rows of (avv). - if(i.eq.5) go to 740 - i2 = 1 - i3 = i+1 - do 730 j=i3,5 - i2 = i2+1 - call fprota(co,si,h(j),av1(irot,i2)) - 730 continue - 740 continue -c we update the sum of squared residuals. - do 745 i=1,nuu - sq = sq+right(i)**2 - 745 continue - 750 if(nrold.eq.number) go to 770 - 760 nrold = nrold+1 - go to 450 - 770 continue -c test whether the b-spline coefficients must be determined. - if(iback.ne.0) return -c backward substitution to obtain the b-spline coefficients as the -c solution of the linear system (rv) (cr) (ru)' = h. -c first step: solve the system (rv) (c1) = h. - k = 1 - do 780 i=1,nuu - call fpbacp(av1,av2,c(k),nv7,4,c(k),5,nv) - k = k+nv7 - 780 continue -c second step: solve the system (cr) (ru)' = (c1). - k = 0 - do 795 j=1,nv7 - k = k+1 - l = k - do 785 i=1,nuu - right(i) = c(l) - l = l+nv7 - 785 continue - call fpback(au,right,nuu,5,right,nu) - l = k - do 790 i=1,nuu - c(l) = right(i) - l = l+nv7 - 790 continue - 795 continue -c calculate from the conditions (2)-(3)-(4), the remaining b-spline -c coefficients. - 800 ncof = nu4*nv4 - j = ncof - do 805 l=1,nv4 - q(l) = dr01 - q(j) = dr11 - j = j-1 - 805 continue - i = nv4 - j = 0 - if(iop0.eq.0) go to 815 - do 810 l=1,nv4 - i = i+1 - q(i) = c0(l) - 810 continue - 815 if(nuu.eq.0) go to 835 - do 830 l=1,nuu - ii = i - do 820 k=1,nv7 - i = i+1 - j = j+1 - q(i) = c(j) - 820 continue - do 825 k=1,3 - ii = ii+1 - i = i+1 - q(i) = q(ii) - 825 continue - 830 continue - 835 if(iop1.eq.0) go to 845 - do 840 l=1,nv4 - i = i+1 - q(i) = c1(l) - 840 continue - 845 do 850 i=1,ncof - c(i) = q(i) - 850 continue -c calculate the quantities -c res(i,j) = (r(i,j) - s(u(i),v(j)))**2 , i=1,2,..,mu;j=1,2,..,mv -c fp = sumi=1,mu(sumj=1,mv(res(i,j))) -c fpu(r) = sum''i(sumj=1,mv(res(i,j))) , r=1,2,...,nu-7 -c tu(r+3) <= u(i) <= tu(r+4) -c fpv(r) = sumi=1,mu(sum''j(res(i,j))) , r=1,2,...,nv-7 -c tv(r+3) <= v(j) <= tv(r+4) - fp = 0. - do 890 i=1,nu - fpu(i) = 0. - 890 continue - do 900 i=1,nv - fpv(i) = 0. - 900 continue - ir = 0 - nroldu = 0 -c main loop for the different grid points. - do 950 i1=1,mu - numu = nru(i1) - numu1 = numu+1 - nroldv = 0 - do 940 i2=1,mv - numv = nrv(i2) - numv1 = numv+1 - ir = ir+1 -c evaluate s(u,v) at the current grid point by making the sum of the -c cross products of the non-zero b-splines at (u,v), multiplied with -c the appropiate b-spline coefficients. - term = 0. - k1 = numu*nv4+numv - do 920 l1=1,4 - k2 = k1 - fac = spu(i1,l1) - do 910 l2=1,4 - k2 = k2+1 - term = term+fac*spv(i2,l2)*c(k2) - 910 continue - k1 = k1+nv4 - 920 continue -c calculate the squared residual at the current grid point. - term = (r(ir)-term)**2 -c adjust the different parameters. - fp = fp+term - fpu(numu1) = fpu(numu1)+term - fpv(numv1) = fpv(numv1)+term - fac = term*half - if(numv.eq.nroldv) go to 930 - fpv(numv1) = fpv(numv1)-fac - fpv(numv) = fpv(numv)+fac - 930 nroldv = numv - if(numu.eq.nroldu) go to 940 - fpu(numu1) = fpu(numu1)-fac - fpu(numu) = fpu(numu)+fac - 940 continue - nroldu = numu - 950 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpinst.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpinst.f deleted file mode 100644 index 965fa53625..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpinst.f +++ /dev/null @@ -1,77 +0,0 @@ - subroutine fpinst(iopt,t,n,c,k,x,l,tt,nn,cc,nest) -c given the b-spline representation (knots t(j),j=1,2,...,n, b-spline -c coefficients c(j),j=1,2,...,n-k-1) of a spline of degree k, fpinst -c calculates the b-spline representation (knots tt(j),j=1,2,...,nn, -c b-spline coefficients cc(j),j=1,2,...,nn-k-1) of the same spline if -c an additional knot is inserted at the point x situated in the inter- -c val t(l)<=x2*k or l0) in such a way that -c - if p tends to infinity, sp(u,v) becomes the least-squares spline -c with given knots, satisfying the constraints. -c - if p tends to zero, sp(u,v) becomes the least-squares polynomial, -c satisfying the constraints. -c - the function f(p)=sumi=1,mu(sumj=1,mv((z(i,j)-sp(u(i),v(j)))**2) -c is continuous and strictly decreasing for p>0. -c -c ..scalar arguments.. - integer ifsu,ifsv,ifbu,ifbv,mu,mv,mz,nu,nv,nuest,nvest, - * nc,lwrk - real*8 z0,p,step,fp -c ..array arguments.. - integer ider(2),nru(mu),nrv(mv),iopt(3) - real*8 u(mu),v(mv),z(mz),dz(3),tu(nu),tv(nv),c(nc),fpu(nu),fpv(nv) - *, - * wrk(lwrk) -c ..local scalars.. - real*8 res,sq,sqq,step1,step2,three - integer i,id0,iop0,iop1,i1,j,l,laa,lau,lav1,lav2,lbb,lbu,lbv, - * lcc,lcs,lq,lri,lsu,lsv,l1,l2,mm,mvnu,number -c ..local arrays.. - integer nr(3) - real*8 delta(3),dzz(3),sum(3),a(6,6),g(6) -c ..function references.. - integer max0 -c ..subroutine references.. -c fpgrdi,fpsysy -c .. -c set constant - three = 3 -c we partition the working space - lsu = 1 - lsv = lsu+4*mu - lri = lsv+4*mv - mm = max0(nuest,mv+nvest) - lq = lri+mm - mvnu = nuest*(mv+nvest-8) - lau = lq+mvnu - lav1 = lau+5*nuest - lav2 = lav1+6*nvest - lbu = lav2+4*nvest - lbv = lbu+5*nuest - laa = lbv+5*nvest - lbb = laa+2*mv - lcc = lbb+2*nvest - lcs = lcc+nvest -c we calculate the smoothing spline sp(u,v) according to the input -c values dz(i),i=1,2,3. - iop0 = iopt(2) - iop1 = iopt(3) - call fpgrdi(ifsu,ifsv,ifbu,ifbv,0,u,mu,v,mv,z,mz,dz, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sq,fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(laa),wrk(lbb), - * wrk(lcc),wrk(lcs),nru,nrv) - id0 = ider(1) - if(id0.ne.0) go to 5 - res = (z0-dz(1))**2 - fp = fp+res - sq = sq+res -c in case all derivative values dz(i) are given (step<=0) or in case -c we have spline interpolation, we accept this spline as a solution. - 5 if(step.le.0. .or. sq.le.0.) return - dzz(1) = dz(1) - dzz(2) = dz(2) - dzz(3) = dz(3) -c number denotes the number of derivative values dz(i) that still must -c be optimized. let us denote these parameters by g(j),j=1,...,number. - number = 0 - if(id0.gt.0) go to 10 - number = 1 - nr(1) = 1 - delta(1) = step - 10 if(iop0.eq.0) go to 20 - if(ider(2).ne.0) go to 20 - step2 = step*three/tu(5) - nr(number+1) = 2 - nr(number+2) = 3 - delta(number+1) = step2 - delta(number+2) = step2 - number = number+2 - 20 if(number.eq.0) return -c the sum of squared residuals sq is a quadratic polynomial in the -c parameters g(j). we determine the unknown coefficients of this -c polymomial by calculating (number+1)*(number+2)/2 different splines -c according to specific values for g(j). - do 30 i=1,number - l = nr(i) - step1 = delta(i) - dzz(l) = dz(l)+step1 - call fpgrdi(ifsu,ifsv,ifbu,ifbv,1,u,mu,v,mv,z,mz,dzz, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sum(i),fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(laa),wrk(lbb), - * wrk(lcc),wrk(lcs),nru,nrv) - if(id0.eq.0) sum(i) = sum(i)+(z0-dzz(1))**2 - dzz(l) = dz(l)-step1 - call fpgrdi(ifsu,ifsv,ifbu,ifbv,1,u,mu,v,mv,z,mz,dzz, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sqq,fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(laa),wrk(lbb), - * wrk(lcc),wrk(lcs),nru,nrv) - if(id0.eq.0) sqq = sqq+(z0-dzz(1))**2 - a(i,i) = (sum(i)+sqq-sq-sq)/step1**2 - if(a(i,i).le.0.) go to 80 - g(i) = (sqq-sum(i))/(step1+step1) - dzz(l) = dz(l) - 30 continue - if(number.eq.1) go to 60 - do 50 i=2,number - l1 = nr(i) - step1 = delta(i) - dzz(l1) = dz(l1)+step1 - i1 = i-1 - do 40 j=1,i1 - l2 = nr(j) - step2 = delta(j) - dzz(l2) = dz(l2)+step2 - call fpgrdi(ifsu,ifsv,ifbu,ifbv,1,u,mu,v,mv,z,mz,dzz, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sqq,fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(laa),wrk(lbb), - * wrk(lcc),wrk(lcs),nru,nrv) - if(id0.eq.0) sqq = sqq+(z0-dzz(1))**2 - a(i,j) = (sq+sqq-sum(i)-sum(j))/(step1*step2) - dzz(l2) = dz(l2) - 40 continue - dzz(l1) = dz(l1) - 50 continue -c the optimal values g(j) are found as the solution of the system -c d (sq) / d (g(j)) = 0 , j=1,...,number. - 60 call fpsysy(a,number,g) - do 70 i=1,number - l = nr(i) - dz(l) = dz(l)+g(i) - 70 continue -c we determine the spline sp(u,v) according to the optimal values g(j). - 80 call fpgrdi(ifsu,ifsv,ifbu,ifbv,0,u,mu,v,mv,z,mz,dz, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sq,fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(laa),wrk(lbb), - * wrk(lcc),wrk(lcs),nru,nrv) - if(id0.eq.0) fp = fp+(z0-dz(1))**2 - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpopsp.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpopsp.f deleted file mode 100644 index 2f11bb8eb7..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpopsp.f +++ /dev/null @@ -1,211 +0,0 @@ - subroutine fpopsp(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,r,mr,r0,r1,dr, - * iopt,ider,tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpu,fpv, - * nru,nrv,wrk,lwrk) -c given the set of function values r(i,j) defined on the rectangular -c grid (u(i),v(j)),i=1,2,...,mu;j=1,2,...,mv, fpopsp determines a -c smooth bicubic spline approximation with given knots tu(i),i=1,..,nu -c in the u-direction and tv(j),j=1,2,...,nv in the v-direction. this -c spline sp(u,v) will be periodic in the variable v and will satisfy -c the following constraints -c -c s(tu(1),v) = dr(1) , tv(4) <=v<= tv(nv-3) -c -c s(tu(nu),v) = dr(4) , tv(4) <=v<= tv(nv-3) -c -c and (if iopt(2) = 1) -c -c d s(tu(1),v) -c ------------ = dr(2)*cos(v)+dr(3)*sin(v) , tv(4) <=v<= tv(nv-3) -c d u -c -c and (if iopt(3) = 1) -c -c d s(tu(nu),v) -c ------------- = dr(5)*cos(v)+dr(6)*sin(v) , tv(4) <=v<= tv(nv-3) -c d u -c -c where the parameters dr(i) correspond to the derivative values at the -c poles as defined in subroutine spgrid. -c -c the b-spline coefficients of sp(u,v) are determined as the least- -c squares solution of an overdetermined linear system which depends -c on the value of p and on the values dr(i),i=1,...,6. the correspond- -c ing sum of squared residuals sq is a simple quadratic function in -c the variables dr(i). these may or may not be provided. the values -c dr(i) which are not given will be determined so as to minimize the -c resulting sum of squared residuals sq. in that case the user must -c provide some initial guess dr(i) and some estimate (dr(i)-step, -c dr(i)+step) of the range of possible values for these latter. -c -c sp(u,v) also depends on the parameter p (p>0) in such a way that -c - if p tends to infinity, sp(u,v) becomes the least-squares spline -c with given knots, satisfying the constraints. -c - if p tends to zero, sp(u,v) becomes the least-squares polynomial, -c satisfying the constraints. -c - the function f(p)=sumi=1,mu(sumj=1,mv((r(i,j)-sp(u(i),v(j)))**2) -c is continuous and strictly decreasing for p>0. -c -c ..scalar arguments.. - integer ifsu,ifsv,ifbu,ifbv,mu,mv,mr,nu,nv,nuest,nvest, - * nc,lwrk - real*8 r0,r1,p,fp -c ..array arguments.. - integer ider(4),nru(mu),nrv(mv),iopt(3) - real*8 u(mu),v(mv),r(mr),dr(6),tu(nu),tv(nv),c(nc),fpu(nu),fpv(nv) - *, - * wrk(lwrk),step(2) -c ..local scalars.. - real*8 res,sq,sqq,sq0,sq1,step1,step2,three - integer i,id0,iop0,iop1,i1,j,l,lau,lav1,lav2,la0,la1,lbu,lbv,lb0, - * lb1,lc0,lc1,lcs,lq,lri,lsu,lsv,l1,l2,mm,mvnu,number -c ..local arrays.. - integer nr(6) - real*8 delta(6),drr(6),sum(6),a(6,6),g(6) -c ..function references.. - integer max0 -c ..subroutine references.. -c fpgrsp,fpsysy -c .. -c set constant - three = 3 -c we partition the working space - lsu = 1 - lsv = lsu+4*mu - lri = lsv+4*mv - mm = max0(nuest,mv+nvest) - lq = lri+mm - mvnu = nuest*(mv+nvest-8) - lau = lq+mvnu - lav1 = lau+5*nuest - lav2 = lav1+6*nvest - lbu = lav2+4*nvest - lbv = lbu+5*nuest - la0 = lbv+5*nvest - la1 = la0+2*mv - lb0 = la1+2*mv - lb1 = lb0+2*nvest - lc0 = lb1+2*nvest - lc1 = lc0+nvest - lcs = lc1+nvest -c we calculate the smoothing spline sp(u,v) according to the input -c values dr(i),i=1,...,6. - iop0 = iopt(2) - iop1 = iopt(3) - id0 = ider(1) - id1 = ider(3) - call fpgrsp(ifsu,ifsv,ifbu,ifbv,0,u,mu,v,mv,r,mr,dr, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sq,fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(la0),wrk(la1),wrk(lb0), - * wrk(lb1),wrk(lc0),wrk(lc1),wrk(lcs),nru,nrv) - sq0 = 0. - sq1 = 0. - if(id0.eq.0) sq0 = (r0-dr(1))**2 - if(id1.eq.0) sq1 = (r1-dr(4))**2 - sq = sq+sq0+sq1 -c in case all derivative values dr(i) are given (step<=0) or in case -c we have spline interpolation, we accept this spline as a solution. - if(sq.le.0.) return - if(step(1).le.0. .and. step(2).le.0.) return - do 10 i=1,6 - drr(i) = dr(i) - 10 continue -c number denotes the number of derivative values dr(i) that still must -c be optimized. let us denote these parameters by g(j),j=1,...,number. - number = 0 - if(id0.gt.0) go to 20 - number = 1 - nr(1) = 1 - delta(1) = step(1) - 20 if(iop0.eq.0) go to 30 - if(ider(2).ne.0) go to 30 - step2 = step(1)*three/(tu(5)-tu(4)) - nr(number+1) = 2 - nr(number+2) = 3 - delta(number+1) = step2 - delta(number+2) = step2 - number = number+2 - 30 if(id1.gt.0) go to 40 - number = number+1 - nr(number) = 4 - delta(number) = step(2) - 40 if(iop1.eq.0) go to 50 - if(ider(4).ne.0) go to 50 - step2 = step(2)*three/(tu(nu)-tu(nu-4)) - nr(number+1) = 5 - nr(number+2) = 6 - delta(number+1) = step2 - delta(number+2) = step2 - number = number+2 - 50 if(number.eq.0) return -c the sum of squared residulas sq is a quadratic polynomial in the -c parameters g(j). we determine the unknown coefficients of this -c polymomial by calculating (number+1)*(number+2)/2 different splines -c according to specific values for g(j). - do 60 i=1,number - l = nr(i) - step1 = delta(i) - drr(l) = dr(l)+step1 - call fpgrsp(ifsu,ifsv,ifbu,ifbv,1,u,mu,v,mv,r,mr,drr, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sum(i),fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(la0),wrk(la1),wrk(lb0), - * wrk(lb1),wrk(lc0),wrk(lc1),wrk(lcs),nru,nrv) - if(id0.eq.0) sq0 = (r0-drr(1))**2 - if(id1.eq.0) sq1 = (r1-drr(4))**2 - sum(i) = sum(i)+sq0+sq1 - drr(l) = dr(l)-step1 - call fpgrsp(ifsu,ifsv,ifbu,ifbv,1,u,mu,v,mv,r,mr,drr, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sqq,fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(la0),wrk(la1),wrk(lb0), - * wrk(lb1),wrk(lc0),wrk(lc1),wrk(lcs),nru,nrv) - if(id0.eq.0) sq0 = (r0-drr(1))**2 - if(id1.eq.0) sq1 = (r1-drr(4))**2 - sqq = sqq+sq0+sq1 - drr(l) = dr(l) - a(i,i) = (sum(i)+sqq-sq-sq)/step1**2 - if(a(i,i).le.0.) go to 110 - g(i) = (sqq-sum(i))/(step1+step1) - 60 continue - if(number.eq.1) go to 90 - do 80 i=2,number - l1 = nr(i) - step1 = delta(i) - drr(l1) = dr(l1)+step1 - i1 = i-1 - do 70 j=1,i1 - l2 = nr(j) - step2 = delta(j) - drr(l2) = dr(l2)+step2 - call fpgrsp(ifsu,ifsv,ifbu,ifbv,1,u,mu,v,mv,r,mr,drr, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sqq,fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(la0),wrk(la1),wrk(lb0), - * wrk(lb1),wrk(lc0),wrk(lc1),wrk(lcs),nru,nrv) - if(id0.eq.0) sq0 = (r0-drr(1))**2 - if(id1.eq.0) sq1 = (r1-drr(4))**2 - sqq = sqq+sq0+sq1 - a(i,j) = (sq+sqq-sum(i)-sum(j))/(step1*step2) - drr(l2) = dr(l2) - 70 continue - drr(l1) = dr(l1) - 80 continue -c the optimal values g(j) are found as the solution of the system -c d (sq) / d (g(j)) = 0 , j=1,...,number. - 90 call fpsysy(a,number,g) - do 100 i=1,number - l = nr(i) - dr(l) = dr(l)+g(i) - 100 continue -c we determine the spline sp(u,v) according to the optimal values g(j). - 110 call fpgrsp(ifsu,ifsv,ifbu,ifbv,0,u,mu,v,mv,r,mr,dr, - * iop0,iop1,tu,nu,tv,nv,p,c,nc,sq,fp,fpu,fpv,mm,mvnu, - * wrk(lsu),wrk(lsv),wrk(lri),wrk(lq),wrk(lau),wrk(lav1), - * wrk(lav2),wrk(lbu),wrk(lbv),wrk(la0),wrk(la1),wrk(lb0), - * wrk(lb1),wrk(lc0),wrk(lc1),wrk(lcs),nru,nrv) - if(id0.eq.0) sq0 = (r0-dr(1))**2 - if(id1.eq.0) sq1 = (r1-dr(4))**2 - sq = sq+sq0+sq1 - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fporde.f b/scipy-0.10.1/scipy/interpolate/fitpack/fporde.f deleted file mode 100644 index e2fc34780d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fporde.f +++ /dev/null @@ -1,47 +0,0 @@ - subroutine fporde(x,y,m,kx,ky,tx,nx,ty,ny,nummer,index,nreg) -c subroutine fporde sorts the data points (x(i),y(i)),i=1,2,...,m -c according to the panel tx(l)<=x s we will increase the number of knots and compute the c -c corresponding least-squares curve until finally fp<=s. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots equals nmax = m+k+1. c -c if s > 0 and c -c iopt=0 we first compute the least-squares polynomial curve of c -c degree k; n = nmin = 2*k+2 c -c iopt=1 we start with the set of knots found at the last c -c call of the routine, except for the case that s > fp0; then c -c we compute directly the polynomial curve of degree k. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c determine nmin, the number of knots for polynomial approximation. - nmin = 2*k1 - if(iopt.lt.0) go to 60 -c calculation of acc, the absolute tolerance for the root of f(p)=s. - acc = tol*s -c determine nmax, the number of knots for spline interpolation. - nmax = m+k1 - if(s.gt.0.) go to 45 -c if s=0, s(u) is an interpolating curve. -c test whether the required storage space exceeds the available one. - n = nmax - if(nmax.gt.nest) go to 420 -c find the position of the interior knots in case of interpolation. - 10 mk1 = m-k1 - if(mk1.eq.0) go to 60 - k3 = k/2 - i = k2 - j = k3+2 - if(k3*2.eq.k) go to 30 - do 20 l=1,mk1 - t(i) = u(j) - i = i+1 - j = j+1 - 20 continue - go to 60 - 30 do 40 l=1,mk1 - t(i) = (u(j)+u(j-1))*half - i = i+1 - j = j+1 - 40 continue - go to 60 -c if s>0 our initial choice of knots depends on the value of iopt. -c if iopt=0 or iopt=1 and s>=fp0, we start computing the least-squares -c polynomial curve which is a spline curve without interior knots. -c if iopt=1 and fp0>s we start computing the least squares spline curve -c according to the set of knots found at the last call of the routine. - 45 if(iopt.eq.0) go to 50 - if(n.eq.nmin) go to 50 - fp0 = fpint(n) - fpold = fpint(n-1) - nplus = nrdata(n) - if(fp0.gt.s) go to 60 - 50 n = nmin - fpold = 0. - nplus = 0 - nrdata(1) = m-2 -c main loop for the different sets of knots. m is a save upper bound -c for the number of trials. - 60 do 200 iter = 1,m - if(n.eq.nmin) ier = -2 -c find nrint, tne number of knot intervals. - nrint = n-nmin+1 -c find the position of the additional knots which are needed for -c the b-spline representation of s(u). - nk1 = n-k1 - i = n - do 70 j=1,k1 - t(j) = ub - t(i) = ue - i = i-1 - 70 continue -c compute the b-spline coefficients of the least-squares spline curve -c sinf(u). the observation matrix a is built up row by row and -c reduced to upper triangular form by givens transformations. -c at the same time fp=f(p=inf) is computed. - fp = 0. -c initialize the b-spline coefficients and the observation matrix a. - do 75 i=1,nc - z(i) = 0. - 75 continue - do 80 i=1,nk1 - do 80 j=1,k1 - a(i,j) = 0. - 80 continue - l = k1 - jj = 0 - do 130 it=1,m -c fetch the current data point u(it),x(it). - ui = u(it) - wi = w(it) - do 83 j=1,idim - jj = jj+1 - xi(j) = x(jj)*wi - 83 continue -c search for knot interval t(l) <= ui < t(l+1). - 85 if(ui.lt.t(l+1) .or. l.eq.nk1) go to 90 - l = l+1 - go to 85 -c evaluate the (k+1) non-zero b-splines at ui and store them in q. - 90 call fpbspl(t,n,k,ui,l,h) - do 95 i=1,k1 - q(it,i) = h(i) - h(i) = h(i)*wi - 95 continue -c rotate the new row of the observation matrix into triangle. - j = l-k1 - do 110 i=1,k1 - j = j+1 - piv = h(i) - if(piv.eq.0.) go to 110 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(j,1),cos,sin) -c transformations to right hand side. - j1 = j - do 97 j2 =1,idim - call fprota(cos,sin,xi(j2),z(j1)) - j1 = j1+n - 97 continue - if(i.eq.k1) go to 120 - i2 = 1 - i3 = i+1 - do 100 i1 = i3,k1 - i2 = i2+1 -c transformations to left hand side. - call fprota(cos,sin,h(i1),a(j,i2)) - 100 continue - 110 continue -c add contribution of this row to the sum of squares of residual -c right hand sides. - 120 do 125 j2=1,idim - fp = fp+xi(j2)**2 - 125 continue - 130 continue - if(ier.eq.(-2)) fp0 = fp - fpint(n) = fp0 - fpint(n-1) = fpold - nrdata(n) = nplus -c backward substitution to obtain the b-spline coefficients. - j1 = 1 - do 135 j2=1,idim - call fpback(a,z(j1),nk1,k1,c(j1),nest) - j1 = j1+n - 135 continue -c test whether the approximation sinf(u) is an acceptable solution. - if(iopt.lt.0) go to 440 - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c if f(p=inf) < s accept the choice of knots. - if(fpms.lt.0.) go to 250 -c if n = nmax, sinf(u) is an interpolating spline curve. - if(n.eq.nmax) go to 430 -c increase the number of knots. -c if n=nest we cannot increase the number of knots because of -c the storage capacity limitation. - if(n.eq.nest) go to 420 -c determine the number of knots nplus we are going to add. - if(ier.eq.0) go to 140 - nplus = 1 - ier = 0 - go to 150 - 140 npl1 = nplus*2 - rn = nplus - if(fpold-fp.gt.acc) npl1 = rn*fpms/(fpold-fp) - nplus = min0(nplus*2,max0(npl1,nplus/2,1)) - 150 fpold = fp -c compute the sum of squared residuals for each knot interval -c t(j+k) <= u(i) <= t(j+k+1) and store it in fpint(j),j=1,2,...nrint. - fpart = 0. - i = 1 - l = k2 - new = 0 - jj = 0 - do 180 it=1,m - if(u(it).lt.t(l) .or. l.gt.nk1) go to 160 - new = 1 - l = l+1 - 160 term = 0. - l0 = l-k2 - do 175 j2=1,idim - fac = 0. - j1 = l0 - do 170 j=1,k1 - j1 = j1+1 - fac = fac+c(j1)*q(it,j) - 170 continue - jj = jj+1 - term = term+(w(it)*(fac-x(jj)))**2 - l0 = l0+n - 175 continue - fpart = fpart+term - if(new.eq.0) go to 180 - store = term*half - fpint(i) = fpart-store - i = i+1 - fpart = store - new = 0 - 180 continue - fpint(nrint) = fpart - do 190 l=1,nplus -c add a new knot. - call fpknot(u,m,t,n,fpint,nrdata,nrint,nest,1) -c if n=nmax we locate the knots as for interpolation - if(n.eq.nmax) go to 10 -c test whether we cannot further increase the number of knots. - if(n.eq.nest) go to 200 - 190 continue -c restart the computations with the new set of knots. - 200 continue -c test whether the least-squares kth degree polynomial curve is a -c solution of our approximation problem. - 250 if(ier.eq.(-2)) go to 440 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spline curve sp(u). c -c ********************************************************** c -c we have determined the number of knots and their position. c -c we now compute the b-spline coefficients of the smoothing curve c -c sp(u). the observation matrix a is extended by the rows of matrix c -c b expressing that the kth derivative discontinuities of sp(u) at c -c the interior knots t(k+2),...t(n-k-1) must be zero. the corres- c -c ponding weights of these additional rows are set to 1/p. c -c iteratively we then have to determine the value of p such that f(p),c -c the sum of squared residuals be = s. we already know that the least c -c squares kth degree polynomial curve corresponds to p=0, and that c -c the least-squares spline curve corresponds to p=infinity. the c -c iteration process which is proposed here, makes use of rational c -c interpolation. since f(p) is a convex and strictly decreasing c -c function of p, it can be approximated by a rational function c -c r(p) = (u*p+v)/(p+w). three values of p(p1,p2,p3) with correspond- c -c ing values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used c -c to calculate the new value of p such that r(p)=s. convergence is c -c guaranteed by taking f1>0 and f3<0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c evaluate the discontinuity jump of the kth derivative of the -c b-splines at the knots t(l),l=k+2,...n-k-1 and store in b. - call fpdisc(t,n,k2,b,nest) -c initial value for p. - p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - p = 0. - do 252 i=1,nk1 - p = p+a(i,1) - 252 continue - rn = nk1 - p = rn/p - ich1 = 0 - ich3 = 0 - n8 = n-nmin -c iteration process to find the root of f(p) = s. - do 360 iter=1,maxit -c the rows of matrix b with weight 1/p are rotated into the -c triangularised observation matrix a which is stored in g. - pinv = one/p - do 255 i=1,nc - c(i) = z(i) - 255 continue - do 260 i=1,nk1 - g(i,k2) = 0. - do 260 j=1,k1 - g(i,j) = a(i,j) - 260 continue - do 300 it=1,n8 -c the row of matrix b is rotated into triangle by givens transformation - do 270 i=1,k2 - h(i) = b(it,i)*pinv - 270 continue - do 275 j=1,idim - xi(j) = 0. - 275 continue - do 290 j=it,nk1 - piv = h(1) -c calculate the parameters of the givens transformation. - call fpgivs(piv,g(j,1),cos,sin) -c transformations to right hand side. - j1 = j - do 277 j2=1,idim - call fprota(cos,sin,xi(j2),c(j1)) - j1 = j1+n - 277 continue - if(j.eq.nk1) go to 300 - i2 = k1 - if(j.gt.n8) i2 = nk1-j - do 280 i=1,i2 -c transformations to left hand side. - i1 = i+1 - call fprota(cos,sin,h(i1),g(j,i1)) - h(i) = h(i1) - 280 continue - h(i2+1) = 0. - 290 continue - 300 continue -c backward substitution to obtain the b-spline coefficients. - j1 = 1 - do 305 j2=1,idim - call fpback(g,c(j1),nk1,k2,c(j1),nest) - j1 =j1+n - 305 continue -c computation of f(p). - fp = 0. - l = k2 - jj = 0 - do 330 it=1,m - if(u(it).lt.t(l) .or. l.gt.nk1) go to 310 - l = l+1 - 310 l0 = l-k2 - term = 0. - do 325 j2=1,idim - fac = 0. - j1 = l0 - do 320 j=1,k1 - j1 = j1+1 - fac = fac+c(j1)*q(it,j) - 320 continue - jj = jj+1 - term = term+(fac-x(jj))**2 - l0 = l0+n - 325 continue - fp = fp+term*w(it)**2 - 330 continue -c test whether the approximation sp(u) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c test whether the maximal number of iterations is reached. - if(iter.eq.maxit) go to 400 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 340 - if((f2-f3).gt.acc) go to 335 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p=p1*con9 + p2*con1 - go to 360 - 335 if(f2.lt.0.) ich3=1 - 340 if(ich1.ne.0) go to 350 - if((f1-f2).gt.acc) go to 345 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 360 - if(p.ge.p3) p = p2*con1 + p3*con9 - go to 360 - 345 if(f2.gt.0.) ich1=1 -c test whether the iteration process proceeds as theoretically -c expected. - 350 if(f2.ge.f1 .or. f2.le.f3) go to 410 -c find the new value for p. - p = fprati(p1,f1,p2,f2,p3,f3) - 360 continue -c error codes and messages. - 400 ier = 3 - go to 440 - 410 ier = 2 - go to 440 - 420 ier = 1 - go to 440 - 430 ier = -1 - 440 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fppasu.f b/scipy-0.10.1/scipy/interpolate/fitpack/fppasu.f deleted file mode 100644 index 7778cf52a0..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fppasu.f +++ /dev/null @@ -1,392 +0,0 @@ - subroutine fppasu(iopt,ipar,idim,u,mu,v,mv,z,mz,s,nuest,nvest, - * tol,maxit,nc,nu,tu,nv,tv,c,fp,fp0,fpold,reducu,reducv,fpintu, - * fpintv,lastdi,nplusu,nplusv,nru,nrv,nrdatu,nrdatv,wrk,lwrk,ier) -c .. -c ..scalar arguments.. - real*8 s,tol,fp,fp0,fpold,reducu,reducv - integer iopt,idim,mu,mv,mz,nuest,nvest,maxit,nc,nu,nv,lastdi, - * nplusu,nplusv,lwrk,ier -c ..array arguments.. - real*8 u(mu),v(mv),z(mz*idim),tu(nuest),tv(nvest),c(nc*idim), - * fpintu(nuest),fpintv(nvest),wrk(lwrk) - integer ipar(2),nrdatu(nuest),nrdatv(nvest),nru(mu),nrv(mv) -c ..local scalars - real*8 acc,fpms,f1,f2,f3,p,p1,p2,p3,rn,one,con1,con9,con4, - * peru,perv,ub,ue,vb,ve - integer i,ich1,ich3,ifbu,ifbv,ifsu,ifsv,iter,j,lau1,lav1,laa, - * l,lau,lav,lbu,lbv,lq,lri,lsu,lsv,l1,l2,l3,l4,mm,mpm,mvnu,ncof, - * nk1u,nk1v,nmaxu,nmaxv,nminu,nminv,nplu,nplv,npl1,nrintu, - * nrintv,nue,nuk,nve,nuu,nvv -c ..function references.. - real*8 abs,fprati - integer max0,min0 -c ..subroutine references.. -c fpgrpa,fpknot -c .. -c set constants - one = 1 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 -c set boundaries of the approximation domain - ub = u(1) - ue = u(mu) - vb = v(1) - ve = v(mv) -c we partition the working space. - lsu = 1 - lsv = lsu+mu*4 - lri = lsv+mv*4 - mm = max0(nuest,mv) - lq = lri+mm*idim - mvnu = nuest*mv*idim - lau = lq+mvnu - nuk = nuest*5 - lbu = lau+nuk - lav = lbu+nuk - nuk = nvest*5 - lbv = lav+nuk - laa = lbv+nuk - lau1 = lau - if(ipar(1).eq.0) go to 10 - peru = ue-ub - lau1 = laa - laa = laa+4*nuest - 10 lav1 = lav - if(ipar(2).eq.0) go to 20 - perv = ve-vb - lav1 = laa -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position. c -c **************************************************************** c -c given a set of knots we compute the least-squares spline sinf(u,v), c -c and the corresponding sum of squared residuals fp=f(p=inf). c -c if iopt=-1 sinf(u,v) is the requested approximation. c -c if iopt=0 or iopt=1 we check whether we can accept the knots: c -c if fp <=s we will continue with the current set of knots. c -c if fp > s we will increase the number of knots and compute the c -c corresponding least-squares spline until finally fp<=s. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots equals nmaxu = mu+4+2*ipar(1) and nmaxv = mv+4+2*ipar(2) c -c if s>0 and c -c *iopt=0 we first compute the least-squares polynomial c -c nu=nminu=8 and nv=nminv=8 c -c *iopt=1 we start with the knots found at the last call of the c -c routine, except for the case that s > fp0; then we can compute c -c the least-squares polynomial directly. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c determine the number of knots for polynomial approximation. - 20 nminu = 8 - nminv = 8 - if(iopt.lt.0) go to 100 -c acc denotes the absolute tolerance for the root of f(p)=s. - acc = tol*s -c find nmaxu and nmaxv which denote the number of knots in u- and v- -c direction in case of spline interpolation. - nmaxu = mu+4+2*ipar(1) - nmaxv = mv+4+2*ipar(2) -c find nue and nve which denote the maximum number of knots -c allowed in each direction - nue = min0(nmaxu,nuest) - nve = min0(nmaxv,nvest) - if(s.gt.0.) go to 60 -c if s = 0, s(u,v) is an interpolating spline. - nu = nmaxu - nv = nmaxv -c test whether the required storage space exceeds the available one. - if(nv.gt.nvest .or. nu.gt.nuest) go to 420 -c find the position of the interior knots in case of interpolation. -c the knots in the u-direction. - nuu = nu-8 - if(nuu.eq.0) go to 40 - i = 5 - j = 3-ipar(1) - do 30 l=1,nuu - tu(i) = u(j) - i = i+1 - j = j+1 - 30 continue -c the knots in the v-direction. - 40 nvv = nv-8 - if(nvv.eq.0) go to 60 - i = 5 - j = 3-ipar(2) - do 50 l=1,nvv - tv(i) = v(j) - i = i+1 - j = j+1 - 50 continue - go to 100 -c if s > 0 our initial choice of knots depends on the value of iopt. - 60 if(iopt.eq.0) go to 90 - if(fp0.le.s) go to 90 -c if iopt=1 and fp0 > s we start computing the least- squares spline -c according to the set of knots found at the last call of the routine. -c we determine the number of grid coordinates u(i) inside each knot -c interval (tu(l),tu(l+1)). - l = 5 - j = 1 - nrdatu(1) = 0 - mpm = mu-1 - do 70 i=2,mpm - nrdatu(j) = nrdatu(j)+1 - if(u(i).lt.tu(l)) go to 70 - nrdatu(j) = nrdatu(j)-1 - l = l+1 - j = j+1 - nrdatu(j) = 0 - 70 continue -c we determine the number of grid coordinates v(i) inside each knot -c interval (tv(l),tv(l+1)). - l = 5 - j = 1 - nrdatv(1) = 0 - mpm = mv-1 - do 80 i=2,mpm - nrdatv(j) = nrdatv(j)+1 - if(v(i).lt.tv(l)) go to 80 - nrdatv(j) = nrdatv(j)-1 - l = l+1 - j = j+1 - nrdatv(j) = 0 - 80 continue - go to 100 -c if iopt=0 or iopt=1 and s>=fp0, we start computing the least-squares -c polynomial (which is a spline without interior knots). - 90 nu = nminu - nv = nminv - nrdatu(1) = mu-2 - nrdatv(1) = mv-2 - lastdi = 0 - nplusu = 0 - nplusv = 0 - fp0 = 0. - fpold = 0. - reducu = 0. - reducv = 0. - 100 mpm = mu+mv - ifsu = 0 - ifsv = 0 - ifbu = 0 - ifbv = 0 - p = -one -c main loop for the different sets of knots.mpm=mu+mv is a save upper -c bound for the number of trials. - do 250 iter=1,mpm - if(nu.eq.nminu .and. nv.eq.nminv) ier = -2 -c find nrintu (nrintv) which is the number of knot intervals in the -c u-direction (v-direction). - nrintu = nu-nminu+1 - nrintv = nv-nminv+1 -c find ncof, the number of b-spline coefficients for the current set -c of knots. - nk1u = nu-4 - nk1v = nv-4 - ncof = nk1u*nk1v -c find the position of the additional knots which are needed for the -c b-spline representation of s(u,v). - if(ipar(1).ne.0) go to 110 - i = nu - do 105 j=1,4 - tu(j) = ub - tu(i) = ue - i = i-1 - 105 continue - go to 120 - 110 l1 = 4 - l2 = l1 - l3 = nu-3 - l4 = l3 - tu(l2) = ub - tu(l3) = ue - do 115 j=1,3 - l1 = l1+1 - l2 = l2-1 - l3 = l3+1 - l4 = l4-1 - tu(l2) = tu(l4)-peru - tu(l3) = tu(l1)+peru - 115 continue - 120 if(ipar(2).ne.0) go to 130 - i = nv - do 125 j=1,4 - tv(j) = vb - tv(i) = ve - i = i-1 - 125 continue - go to 140 - 130 l1 = 4 - l2 = l1 - l3 = nv-3 - l4 = l3 - tv(l2) = vb - tv(l3) = ve - do 135 j=1,3 - l1 = l1+1 - l2 = l2-1 - l3 = l3+1 - l4 = l4-1 - tv(l2) = tv(l4)-perv - tv(l3) = tv(l1)+perv - 135 continue -c find the least-squares spline sinf(u,v) and calculate for each knot -c interval tu(j+3)<=u<=tu(j+4) (tv(j+3)<=v<=tv(j+4)) the sum -c of squared residuals fpintu(j),j=1,2,...,nu-7 (fpintv(j),j=1,2,... -c ,nv-7) for the data points having their absciss (ordinate)-value -c belonging to that interval. -c fp gives the total sum of squared residuals. - 140 call fpgrpa(ifsu,ifsv,ifbu,ifbv,idim,ipar,u,mu,v,mv,z,mz,tu, - * nu,tv,nv,p,c,nc,fp,fpintu,fpintv,mm,mvnu,wrk(lsu),wrk(lsv), - * wrk(lri),wrk(lq),wrk(lau),wrk(lau1),wrk(lav),wrk(lav1), - * wrk(lbu),wrk(lbv),nru,nrv) - if(ier.eq.(-2)) fp0 = fp -c test whether the least-squares spline is an acceptable solution. - if(iopt.lt.0) go to 440 - fpms = fp-s - if(abs(fpms) .lt. acc) go to 440 -c if f(p=inf) < s, we accept the choice of knots. - if(fpms.lt.0.) go to 300 -c if nu=nmaxu and nv=nmaxv, sinf(u,v) is an interpolating spline. - if(nu.eq.nmaxu .and. nv.eq.nmaxv) go to 430 -c increase the number of knots. -c if nu=nue and nv=nve we cannot further increase the number of knots -c because of the storage capacity limitation. - if(nu.eq.nue .and. nv.eq.nve) go to 420 - ier = 0 -c adjust the parameter reducu or reducv according to the direction -c in which the last added knots were located. - if (lastdi.lt.0) go to 150 - if (lastdi.eq.0) go to 170 - go to 160 - 150 reducu = fpold-fp - go to 170 - 160 reducv = fpold-fp -c store the sum of squared residuals for the current set of knots. - 170 fpold = fp -c find nplu, the number of knots we should add in the u-direction. - nplu = 1 - if(nu.eq.nminu) go to 180 - npl1 = nplusu*2 - rn = nplusu - if(reducu.gt.acc) npl1 = rn*fpms/reducu - nplu = min0(nplusu*2,max0(npl1,nplusu/2,1)) -c find nplv, the number of knots we should add in the v-direction. - 180 nplv = 1 - if(nv.eq.nminv) go to 190 - npl1 = nplusv*2 - rn = nplusv - if(reducv.gt.acc) npl1 = rn*fpms/reducv - nplv = min0(nplusv*2,max0(npl1,nplusv/2,1)) - 190 if (nplu.lt.nplv) go to 210 - if (nplu.eq.nplv) go to 200 - go to 230 - 200 if(lastdi.lt.0) go to 230 - 210 if(nu.eq.nue) go to 230 -c addition in the u-direction. - lastdi = -1 - nplusu = nplu - ifsu = 0 - do 220 l=1,nplusu -c add a new knot in the u-direction - call fpknot(u,mu,tu,nu,fpintu,nrdatu,nrintu,nuest,1) -c test whether we cannot further increase the number of knots in the -c u-direction. - if(nu.eq.nue) go to 250 - 220 continue - go to 250 - 230 if(nv.eq.nve) go to 210 -c addition in the v-direction. - lastdi = 1 - nplusv = nplv - ifsv = 0 - do 240 l=1,nplusv -c add a new knot in the v-direction. - call fpknot(v,mv,tv,nv,fpintv,nrdatv,nrintv,nvest,1) -c test whether we cannot further increase the number of knots in the -c v-direction. - if(nv.eq.nve) go to 250 - 240 continue -c restart the computations with the new set of knots. - 250 continue -c test whether the least-squares polynomial is a solution of our -c approximation problem. - 300 if(ier.eq.(-2)) go to 440 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spline sp(u,v) c -c ***************************************************** c -c we have determined the number of knots and their position. we now c -c compute the b-spline coefficients of the smoothing spline sp(u,v). c -c this smoothing spline varies with the parameter p in such a way thatc -c f(p)=suml=1,idim(sumi=1,mu(sumj=1,mv((z(i,j,l)-sp(u(i),v(j),l))**2) c -c is a continuous, strictly decreasing function of p. moreover the c -c least-squares polynomial corresponds to p=0 and the least-squares c -c spline to p=infinity. iteratively we then have to determine the c -c positive value of p such that f(p)=s. the process which is proposed c -c here makes use of rational interpolation. f(p) is approximated by a c -c rational function r(p)=(u*p+v)/(p+w); three values of p (p1,p2,p3) c -c with corresponding values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s)c -c are used to calculate the new value of p such that r(p)=s. c -c convergence is guaranteed by taking f1 > 0 and f3 < 0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c initial value for p. - p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - p = one - ich1 = 0 - ich3 = 0 -c iteration process to find the root of f(p)=s. - do 350 iter = 1,maxit -c find the smoothing spline sp(u,v) and the corresponding sum of -c squared residuals fp. - call fpgrpa(ifsu,ifsv,ifbu,ifbv,idim,ipar,u,mu,v,mv,z,mz,tu, - * nu,tv,nv,p,c,nc,fp,fpintu,fpintv,mm,mvnu,wrk(lsu),wrk(lsv), - * wrk(lri),wrk(lq),wrk(lau),wrk(lau1),wrk(lav),wrk(lav1), - * wrk(lbu),wrk(lbv),nru,nrv) -c test whether the approximation sp(u,v) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c test whether the maximum allowable number of iterations has been -c reached. - if(iter.eq.maxit) go to 400 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 320 - if((f2-f3).gt.acc) go to 310 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 + p2*con1 - go to 350 - 310 if(f2.lt.0.) ich3 = 1 - 320 if(ich1.ne.0) go to 340 - if((f1-f2).gt.acc) go to 330 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 350 - if(p.ge.p3) p = p2*con1 + p3*con9 - go to 350 -c test whether the iteration process proceeds as theoretically -c expected. - 330 if(f2.gt.0.) ich1 = 1 - 340 if(f2.ge.f1 .or. f2.le.f3) go to 410 -c find the new value of p. - p = fprati(p1,f1,p2,f2,p3,f3) - 350 continue -c error codes and messages. - 400 ier = 3 - go to 440 - 410 ier = 2 - go to 440 - 420 ier = 1 - go to 440 - 430 ier = -1 - fp = 0. - 440 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpperi.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpperi.f deleted file mode 100644 index ccc91451fc..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpperi.f +++ /dev/null @@ -1,616 +0,0 @@ - subroutine fpperi(iopt,x,y,w,m,k,s,nest,tol,maxit,k1,k2,n,t,c, - * fp,fpint,z,a1,a2,b,g1,g2,q,nrdata,ier) -c .. -c ..scalar arguments.. - real*8 s,tol,fp - integer iopt,m,k,nest,maxit,k1,k2,n,ier -c ..array arguments.. - real*8 x(m),y(m),w(m),t(nest),c(nest),fpint(nest),z(nest), - * a1(nest,k1),a2(nest,k),b(nest,k2),g1(nest,k2),g2(nest,k1), - * q(m,k1) - integer nrdata(nest) -c ..local scalars.. - real*8 acc,cos,c1,d1,fpart,fpms,fpold,fp0,f1,f2,f3,p,per,pinv,piv, - * - * p1,p2,p3,sin,store,term,wi,xi,yi,rn,one,con1,con4,con9,half - integer i,ich1,ich3,ij,ik,it,iter,i1,i2,i3,j,jk,jper,j1,j2,kk, - * kk1,k3,l,l0,l1,l5,mm,m1,new,nk1,nk2,nmax,nmin,nplus,npl1, - * nrint,n10,n11,n7,n8 -c ..local arrays.. - real*8 h(6),h1(7),h2(6) -c ..function references.. - real*8 abs,fprati - integer max0,min0 -c ..subroutine references.. -c fpbacp,fpbspl,fpgivs,fpdisc,fpknot,fprota -c .. -c set constants - one = 0.1e+01 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 - half = 0.5e0 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position c -c ************************************************************** c -c given a set of knots we compute the least-squares periodic spline c -c sinf(x). if the sum f(p=inf) <= s we accept the choice of knots. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots equals nmax = m+2*k. c -c if s > 0 and c -c iopt=0 we first compute the least-squares polynomial of c -c degree k; n = nmin = 2*k+2. since s(x) must be periodic we c -c find that s(x) is a constant function. c -c iopt=1 we start with the set of knots found at the last c -c call of the routine, except for the case that s > fp0; then c -c we compute directly the least-squares periodic polynomial. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - m1 = m-1 - kk = k - kk1 = k1 - k3 = 3*k+1 - nmin = 2*k1 -c determine the length of the period of s(x). - per = x(m)-x(1) - if(iopt.lt.0) go to 50 -c calculation of acc, the absolute tolerance for the root of f(p)=s. - acc = tol*s -c determine nmax, the number of knots for periodic spline interpolation - nmax = m+2*k - if(s.gt.0. .or. nmax.eq.nmin) go to 30 -c if s=0, s(x) is an interpolating spline. - n = nmax -c test whether the required storage space exceeds the available one. - if(n.gt.nest) go to 620 -c find the position of the interior knots in case of interpolation. - 5 if((k/2)*2 .eq. k) go to 20 - do 10 i=2,m1 - j = i+k - t(j) = x(i) - 10 continue - if(s.gt.0.) go to 50 - kk = k-1 - kk1 = k - if(kk.gt.0) go to 50 - t(1) = t(m)-per - t(2) = x(1) - t(m+1) = x(m) - t(m+2) = t(3)+per - do 15 i=1,m1 - c(i) = y(i) - 15 continue - c(m) = c(1) - fp = 0. - fpint(n) = fp0 - fpint(n-1) = 0. - nrdata(n) = 0 - go to 630 - 20 do 25 i=2,m1 - j = i+k - t(j) = (x(i)+x(i-1))*half - 25 continue - go to 50 -c if s > 0 our initial choice depends on the value of iopt. -c if iopt=0 or iopt=1 and s>=fp0, we start computing the least-squares -c periodic polynomial. (i.e. a constant function). -c if iopt=1 and fp0>s we start computing the least-squares periodic -c spline according the set of knots found at the last call of the -c routine. - 30 if(iopt.eq.0) go to 35 - if(n.eq.nmin) go to 35 - fp0 = fpint(n) - fpold = fpint(n-1) - nplus = nrdata(n) - if(fp0.gt.s) go to 50 -c the case that s(x) is a constant function is treated separetely. -c find the least-squares constant c1 and compute fp0 at the same time. - 35 fp0 = 0. - d1 = 0. - c1 = 0. - do 40 it=1,m1 - wi = w(it) - yi = y(it)*wi - call fpgivs(wi,d1,cos,sin) - call fprota(cos,sin,yi,c1) - fp0 = fp0+yi**2 - 40 continue - c1 = c1/d1 -c test whether that constant function is a solution of our problem. - fpms = fp0-s - if(fpms.lt.acc .or. nmax.eq.nmin) go to 640 - fpold = fp0 -c test whether the required storage space exceeds the available one. - if(nmin.ge.nest) go to 620 -c start computing the least-squares periodic spline with one -c interior knot. - nplus = 1 - n = nmin+1 - mm = (m+1)/2 - t(k2) = x(mm) - nrdata(1) = mm-2 - nrdata(2) = m1-mm -c main loop for the different sets of knots. m is a save upper -c bound for the number of trials. - 50 do 340 iter=1,m -c find nrint, the number of knot intervals. - nrint = n-nmin+1 -c find the position of the additional knots which are needed for -c the b-spline representation of s(x). if we take -c t(k+1) = x(1), t(n-k) = x(m) -c t(k+1-j) = t(n-k-j) - per, j=1,2,...k -c t(n-k+j) = t(k+1+j) + per, j=1,2,...k -c then s(x) is a periodic spline with period per if the b-spline -c coefficients satisfy the following conditions -c c(n7+j) = c(j), j=1,...k (**) with n7=n-2*k-1. - t(k1) = x(1) - nk1 = n-k1 - nk2 = nk1+1 - t(nk2) = x(m) - do 60 j=1,k - i1 = nk2+j - i2 = nk2-j - j1 = k1+j - j2 = k1-j - t(i1) = t(j1)+per - t(j2) = t(i2)-per - 60 continue -c compute the b-spline coefficients c(j),j=1,...n7 of the least-squares -c periodic spline sinf(x). the observation matrix a is built up row -c by row while taking into account condition (**) and is reduced to -c triangular form by givens transformations . -c at the same time fp=f(p=inf) is computed. -c the n7 x n7 triangularised upper matrix a has the form -c ! a1 ' ! -c a = ! ' a2 ! -c ! 0 ' ! -c with a2 a n7 x k matrix and a1 a n10 x n10 upper triangular -c matrix of bandwith k+1 ( n10 = n7-k). -c initialization. - do 70 i=1,nk1 - z(i) = 0. - do 70 j=1,kk1 - a1(i,j) = 0. - 70 continue - n7 = nk1-k - n10 = n7-kk - jper = 0 - fp = 0. - l = k1 - do 290 it=1,m1 -c fetch the current data point x(it),y(it) - xi = x(it) - wi = w(it) - yi = y(it)*wi -c search for knot interval t(l) <= xi < t(l+1). - 80 if(xi.lt.t(l+1)) go to 85 - l = l+1 - go to 80 -c evaluate the (k+1) non-zero b-splines at xi and store them in q. - 85 call fpbspl(t,n,k,xi,l,h) - do 90 i=1,k1 - q(it,i) = h(i) - h(i) = h(i)*wi - 90 continue - l5 = l-k1 -c test whether the b-splines nj,k+1(x),j=1+n7,...nk1 are all zero at xi - if(l5.lt.n10) go to 285 - if(jper.ne.0) go to 160 -c initialize the matrix a2. - do 95 i=1,n7 - do 95 j=1,kk - a2(i,j) = 0. - 95 continue - jk = n10+1 - do 110 i=1,kk - ik = jk - do 100 j=1,kk1 - if(ik.le.0) go to 105 - a2(ik,i) = a1(ik,j) - ik = ik-1 - 100 continue - 105 jk = jk+1 - 110 continue - jper = 1 -c if one of the b-splines nj,k+1(x),j=n7+1,...nk1 is not zero at xi -c we take account of condition (**) for setting up the new row -c of the observation matrix a. this row is stored in the arrays h1 -c (the part with respect to a1) and h2 (the part with -c respect to a2). - 160 do 170 i=1,kk - h1(i) = 0. - h2(i) = 0. - 170 continue - h1(kk1) = 0. - j = l5-n10 - do 210 i=1,kk1 - j = j+1 - l0 = j - 180 l1 = l0-kk - if(l1.le.0) go to 200 - if(l1.le.n10) go to 190 - l0 = l1-n10 - go to 180 - 190 h1(l1) = h(i) - go to 210 - 200 h2(l0) = h2(l0)+h(i) - 210 continue -c rotate the new row of the observation matrix into triangle -c by givens transformations. - if(n10.le.0) go to 250 -c rotation with the rows 1,2,...n10 of matrix a. - do 240 j=1,n10 - piv = h1(1) - if(piv.ne.0.) go to 214 - do 212 i=1,kk - h1(i) = h1(i+1) - 212 continue - h1(kk1) = 0. - go to 240 -c calculate the parameters of the givens transformation. - 214 call fpgivs(piv,a1(j,1),cos,sin) -c transformation to the right hand side. - call fprota(cos,sin,yi,z(j)) -c transformations to the left hand side with respect to a2. - do 220 i=1,kk - call fprota(cos,sin,h2(i),a2(j,i)) - 220 continue - if(j.eq.n10) go to 250 - i2 = min0(n10-j,kk) -c transformations to the left hand side with respect to a1. - do 230 i=1,i2 - i1 = i+1 - call fprota(cos,sin,h1(i1),a1(j,i1)) - h1(i) = h1(i1) - 230 continue - h1(i1) = 0. - 240 continue -c rotation with the rows n10+1,...n7 of matrix a. - 250 do 270 j=1,kk - ij = n10+j - if(ij.le.0) go to 270 - piv = h2(j) - if(piv.eq.0.) go to 270 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a2(ij,j),cos,sin) -c transformations to right hand side. - call fprota(cos,sin,yi,z(ij)) - if(j.eq.kk) go to 280 - j1 = j+1 -c transformations to left hand side. - do 260 i=j1,kk - call fprota(cos,sin,h2(i),a2(ij,i)) - 260 continue - 270 continue -c add contribution of this row to the sum of squares of residual -c right hand sides. - 280 fp = fp+yi**2 - go to 290 -c rotation of the new row of the observation matrix into -c triangle in case the b-splines nj,k+1(x),j=n7+1,...n-k-1 are all zero -c at xi. - 285 j = l5 - do 140 i=1,kk1 - j = j+1 - piv = h(i) - if(piv.eq.0.) go to 140 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a1(j,1),cos,sin) -c transformations to right hand side. - call fprota(cos,sin,yi,z(j)) - if(i.eq.kk1) go to 150 - i2 = 1 - i3 = i+1 -c transformations to left hand side. - do 130 i1=i3,kk1 - i2 = i2+1 - call fprota(cos,sin,h(i1),a1(j,i2)) - 130 continue - 140 continue -c add contribution of this row to the sum of squares of residual -c right hand sides. - 150 fp = fp+yi**2 - 290 continue - fpint(n) = fp0 - fpint(n-1) = fpold - nrdata(n) = nplus -c backward substitution to obtain the b-spline coefficients c(j),j=1,.n - call fpbacp(a1,a2,z,n7,kk,c,kk1,nest) -c calculate from condition (**) the coefficients c(j+n7),j=1,2,...k. - do 295 i=1,k - j = i+n7 - c(j) = c(i) - 295 continue - if(iopt.lt.0) go to 660 -c test whether the approximation sinf(x) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 660 -c if f(p=inf) < s accept the choice of knots. - if(fpms.lt.0.) go to 350 -c if n=nmax, sinf(x) is an interpolating spline. - if(n.eq.nmax) go to 630 -c increase the number of knots. -c if n=nest we cannot increase the number of knots because of the -c storage capacity limitation. - if(n.eq.nest) go to 620 -c determine the number of knots nplus we are going to add. - npl1 = nplus*2 - rn = nplus - if(fpold-fp.gt.acc) npl1 = rn*fpms/(fpold-fp) - nplus = min0(nplus*2,max0(npl1,nplus/2,1)) - fpold = fp -c compute the sum(wi*(yi-s(xi))**2) for each knot interval -c t(j+k) <= xi <= t(j+k+1) and store it in fpint(j),j=1,2,...nrint. - fpart = 0. - i = 1 - l = k1 - do 320 it=1,m1 - if(x(it).lt.t(l)) go to 300 - new = 1 - l = l+1 - 300 term = 0. - l0 = l-k2 - do 310 j=1,k1 - l0 = l0+1 - term = term+c(l0)*q(it,j) - 310 continue - term = (w(it)*(term-y(it)))**2 - fpart = fpart+term - if(new.eq.0) go to 320 - if(l.gt.k2) go to 315 - fpint(nrint) = term - new = 0 - go to 320 - 315 store = term*half - fpint(i) = fpart-store - i = i+1 - fpart = store - new = 0 - 320 continue - fpint(nrint) = fpint(nrint)+fpart - do 330 l=1,nplus -c add a new knot - call fpknot(x,m,t,n,fpint,nrdata,nrint,nest,1) -c if n=nmax we locate the knots as for interpolation. - if(n.eq.nmax) go to 5 -c test whether we cannot further increase the number of knots. - if(n.eq.nest) go to 340 - 330 continue -c restart the computations with the new set of knots. - 340 continue -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing periodic spline sp(x). c -c ************************************************************* c -c we have determined the number of knots and their position. c -c we now compute the b-spline coefficients of the smoothing spline c -c sp(x). the observation matrix a is extended by the rows of matrix c -c b expressing that the kth derivative discontinuities of sp(x) at c -c the interior knots t(k+2),...t(n-k-1) must be zero. the corres- c -c ponding weights of these additional rows are set to 1/sqrt(p). c -c iteratively we then have to determine the value of p such that c -c f(p)=sum(w(i)*(y(i)-sp(x(i)))**2) be = s. we already know that c -c the least-squares constant function corresponds to p=0, and that c -c the least-squares periodic spline corresponds to p=infinity. the c -c iteration process which is proposed here, makes use of rational c -c interpolation. since f(p) is a convex and strictly decreasing c -c function of p, it can be approximated by a rational function c -c r(p) = (u*p+v)/(p+w). three values of p(p1,p2,p3) with correspond- c -c ing values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used c -c to calculate the new value of p such that r(p)=s. convergence is c -c guaranteed by taking f1>0 and f3<0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c evaluate the discontinuity jump of the kth derivative of the -c b-splines at the knots t(l),l=k+2,...n-k-1 and store in b. - 350 call fpdisc(t,n,k2,b,nest) -c initial value for p. - p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - n11 = n10-1 - n8 = n7-1 - p = 0. - l = n7 - do 352 i=1,k - j = k+1-i - p = p+a2(l,j) - l = l-1 - if(l.eq.0) go to 356 - 352 continue - do 354 i=1,n10 - p = p+a1(i,1) - 354 continue - 356 rn = n7 - p = rn/p - ich1 = 0 - ich3 = 0 -c iteration process to find the root of f(p) = s. - do 595 iter=1,maxit -c form the matrix g as the matrix a extended by the rows of matrix b. -c the rows of matrix b with weight 1/p are rotated into -c the triangularised observation matrix a. -c after triangularisation our n7 x n7 matrix g takes the form -c ! g1 ' ! -c g = ! ' g2 ! -c ! 0 ' ! -c with g2 a n7 x (k+1) matrix and g1 a n11 x n11 upper triangular -c matrix of bandwidth k+2. ( n11 = n7-k-1) - pinv = one/p -c store matrix a into g - do 360 i=1,n7 - c(i) = z(i) - g1(i,k1) = a1(i,k1) - g1(i,k2) = 0. - g2(i,1) = 0. - do 360 j=1,k - g1(i,j) = a1(i,j) - g2(i,j+1) = a2(i,j) - 360 continue - l = n10 - do 370 j=1,k1 - if(l.le.0) go to 375 - g2(l,1) = a1(l,j) - l = l-1 - 370 continue - 375 do 540 it=1,n8 -c fetch a new row of matrix b and store it in the arrays h1 (the part -c with respect to g1) and h2 (the part with respect to g2). - yi = 0. - do 380 i=1,k1 - h1(i) = 0. - h2(i) = 0. - 380 continue - h1(k2) = 0. - if(it.gt.n11) go to 420 - l = it - l0 = it - do 390 j=1,k2 - if(l0.eq.n10) go to 400 - h1(j) = b(it,j)*pinv - l0 = l0+1 - 390 continue - go to 470 - 400 l0 = 1 - do 410 l1=j,k2 - h2(l0) = b(it,l1)*pinv - l0 = l0+1 - 410 continue - go to 470 - 420 l = 1 - i = it-n10 - do 460 j=1,k2 - i = i+1 - l0 = i - 430 l1 = l0-k1 - if(l1.le.0) go to 450 - if(l1.le.n11) go to 440 - l0 = l1-n11 - go to 430 - 440 h1(l1) = b(it,j)*pinv - go to 460 - 450 h2(l0) = h2(l0)+b(it,j)*pinv - 460 continue - if(n11.le.0) go to 510 -c rotate this row into triangle by givens transformations without -c square roots. -c rotation with the rows l,l+1,...n11. - 470 do 500 j=l,n11 - piv = h1(1) -c calculate the parameters of the givens transformation. - call fpgivs(piv,g1(j,1),cos,sin) -c transformation to right hand side. - call fprota(cos,sin,yi,c(j)) -c transformation to the left hand side with respect to g2. - do 480 i=1,k1 - call fprota(cos,sin,h2(i),g2(j,i)) - 480 continue - if(j.eq.n11) go to 510 - i2 = min0(n11-j,k1) -c transformation to the left hand side with respect to g1. - do 490 i=1,i2 - i1 = i+1 - call fprota(cos,sin,h1(i1),g1(j,i1)) - h1(i) = h1(i1) - 490 continue - h1(i1) = 0. - 500 continue -c rotation with the rows n11+1,...n7 - 510 do 530 j=1,k1 - ij = n11+j - if(ij.le.0) go to 530 - piv = h2(j) -c calculate the parameters of the givens transformation - call fpgivs(piv,g2(ij,j),cos,sin) -c transformation to the right hand side. - call fprota(cos,sin,yi,c(ij)) - if(j.eq.k1) go to 540 - j1 = j+1 -c transformation to the left hand side. - do 520 i=j1,k1 - call fprota(cos,sin,h2(i),g2(ij,i)) - 520 continue - 530 continue - 540 continue -c backward substitution to obtain the b-spline coefficients -c c(j),j=1,2,...n7 of sp(x). - call fpbacp(g1,g2,c,n7,k1,c,k2,nest) -c calculate from condition (**) the b-spline coefficients c(n7+j),j=1,. - do 545 i=1,k - j = i+n7 - c(j) = c(i) - 545 continue -c computation of f(p). - fp = 0. - l = k1 - do 570 it=1,m1 - if(x(it).lt.t(l)) go to 550 - l = l+1 - 550 l0 = l-k2 - term = 0. - do 560 j=1,k1 - l0 = l0+1 - term = term+c(l0)*q(it,j) - 560 continue - fp = fp+(w(it)*(term-y(it)))**2 - 570 continue -c test whether the approximation sp(x) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 660 -c test whether the maximal number of iterations is reached. - if(iter.eq.maxit) go to 600 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 580 - if((f2-f3) .gt. acc) go to 575 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 +p2*con1 - go to 595 - 575 if(f2.lt.0.) ich3 = 1 - 580 if(ich1.ne.0) go to 590 - if((f1-f2) .gt. acc) go to 585 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 595 - if(p.ge.p3) p = p2*con1 +p3*con9 - go to 595 - 585 if(f2.gt.0.) ich1 = 1 -c test whether the iteration process proceeds as theoretically -c expected. - 590 if(f2.ge.f1 .or. f2.le.f3) go to 610 -c find the new value for p. - p = fprati(p1,f1,p2,f2,p3,f3) - 595 continue -c error codes and messages. - 600 ier = 3 - go to 660 - 610 ier = 2 - go to 660 - 620 ier = 1 - go to 660 - 630 ier = -1 - go to 660 - 640 ier = -2 -c the least-squares constant function c1 is a solution of our problem. -c a constant function is a spline of degree k with all b-spline -c coefficients equal to that constant c1. - do 650 i=1,k1 - rn = k1-i - t(i) = x(1)-rn*per - c(i) = c1 - j = i+k1 - rn = i-1 - t(j) = x(m)+rn*per - 650 continue - n = nmin - fp = fp0 - fpint(n) = fp0 - fpint(n-1) = 0. - nrdata(n) = 0 - 660 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fppocu.f b/scipy-0.10.1/scipy/interpolate/fitpack/fppocu.f deleted file mode 100644 index 740addb5f6..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fppocu.f +++ /dev/null @@ -1,72 +0,0 @@ - subroutine fppocu(idim,k,a,b,ib,db,nb,ie,de,ne,cp,np) -c subroutine fppocu finds a idim-dimensional polynomial curve p(u) = -c (p1(u),p2(u),...,pidim(u)) of degree k, satisfying certain derivative -c constraints at the end points a and b, i.e. -c (l) -c if ib > 0 : pj (a) = db(idim*l+j), l=0,1,...,ib-1 -c (l) -c if ie > 0 : pj (b) = de(idim*l+j), l=0,1,...,ie-1 -c -c the polynomial curve is returned in its b-spline representation -c ( coefficients cp(j), j=1,2,...,np ) -c .. -c ..scalar arguments.. - integer idim,k,ib,nb,ie,ne,np - real*8 a,b -c ..array arguments.. - real*8 db(nb),de(ne),cp(np) -c ..local scalars.. - real*8 ab,aki - integer i,id,j,jj,l,ll,k1,k2 -c ..local array.. - real*8 work(6,6) -c .. - k1 = k+1 - k2 = 2*k1 - ab = b-a - do 110 id=1,idim - do 10 j=1,k1 - work(j,1) = 0. - 10 continue - if(ib.eq.0) go to 50 - l = id - do 20 i=1,ib - work(1,i) = db(l) - l = l+idim - 20 continue - if(ib.eq.1) go to 50 - ll = ib - do 40 j=2,ib - ll = ll-1 - do 30 i=1,ll - aki = k1-i - work(j,i) = ab*work(j-1,i+1)/aki + work(j-1,i) - 30 continue - 40 continue - 50 if(ie.eq.0) go to 90 - l = id - j = k1 - do 60 i=1,ie - work(j,i) = de(l) - l = l+idim - j = j-1 - 60 continue - if(ie.eq.1) go to 90 - ll = ie - do 80 jj=2,ie - ll = ll-1 - j = k1+1-jj - do 70 i=1,ll - aki = k1-i - work(j,i) = work(j+1,i) - ab*work(j,i+1)/aki - j = j-1 - 70 continue - 80 continue - 90 l = (id-1)*k2 - do 100 j=1,k1 - l = l+1 - cp(l) = work(j,1) - 100 continue - 110 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fppogr.f b/scipy-0.10.1/scipy/interpolate/fitpack/fppogr.f deleted file mode 100644 index 0468aee010..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fppogr.f +++ /dev/null @@ -1,410 +0,0 @@ - subroutine fppogr(iopt,ider,u,mu,v,mv,z,mz,z0,r,s,nuest,nvest, - * tol,maxit,nc,nu,tu,nv,tv,c,fp,fp0,fpold,reducu,reducv,fpintu, - * fpintv,dz,step,lastdi,nplusu,nplusv,lasttu,nru,nrv,nrdatu, - * nrdatv,wrk,lwrk,ier) -c .. -c ..scalar arguments.. - integer mu,mv,mz,nuest,nvest,maxit,nc,nu,nv,lastdi,nplusu,nplusv, - * lasttu,lwrk,ier - real*8 z0,r,s,tol,fp,fp0,fpold,reducu,reducv,step -c ..array arguments.. - integer iopt(3),ider(2),nrdatu(nuest),nrdatv(nvest),nru(mu), - * nrv(mv) - real*8 u(mu),v(mv),z(mz),tu(nuest),tv(nvest),c(nc),fpintu(nuest), - * fpintv(nvest),dz(3),wrk(lwrk) -c ..local scalars.. - real*8 acc,fpms,f1,f2,f3,p,per,pi,p1,p2,p3,vb,ve,zmax,zmin,rn,one, - * - * con1,con4,con9 - integer i,ich1,ich3,ifbu,ifbv,ifsu,ifsv,istart,iter,i1,i2,j,ju, - * ktu,l,l1,l2,l3,l4,mpm,mumin,mu0,mu1,nn,nplu,nplv,npl1,nrintu, - * nrintv,nue,numax,nve,nvmax -c ..local arrays.. - integer idd(2) - real*8 dzz(3) -c ..function references.. - real*8 abs,datan2,fprati - integer max0,min0 -c ..subroutine references.. -c fpknot,fpopdi -c .. -c set constants - one = 1d0 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 -c initialization - ifsu = 0 - ifsv = 0 - ifbu = 0 - ifbv = 0 - p = -one - mumin = 4-iopt(3) - if(ider(1).ge.0) mumin = mumin-1 - if(iopt(2).eq.1 .and. ider(2).eq.1) mumin = mumin-1 - pi = datan2(0d0,-one) - per = pi+pi - vb = v(1) - ve = vb+per -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position. c -c **************************************************************** c -c given a set of knots we compute the least-squares spline sinf(u,v) c -c and the corresponding sum of squared residuals fp = f(p=inf). c -c if iopt(1)=-1 sinf(u,v) is the requested approximation. c -c if iopt(1)>=0 we check whether we can accept the knots: c -c if fp <= s we will continue with the current set of knots. c -c if fp > s we will increase the number of knots and compute the c -c corresponding least-squares spline until finally fp <= s. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots in the u-direction equals nu=numax=mu+5+iopt(2)+iopt(3) c -c and in the v-direction nv=nvmax=mv+7. c -c if s>0 and c -c iopt(1)=0 we first compute the least-squares polynomial,i.e. a c -c spline without interior knots : nu=8 ; nv=8. c -c iopt(1)=1 we start with the set of knots found at the last call c -c of the routine, except for the case that s > fp0; then we c -c compute the least-squares polynomial directly. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - if(iopt(1).lt.0) go to 120 -c acc denotes the absolute tolerance for the root of f(p)=s. - acc = tol*s -c numax and nvmax denote the number of knots needed for interpolation. - numax = mu+5+iopt(2)+iopt(3) - nvmax = mv+7 - nue = min0(numax,nuest) - nve = min0(nvmax,nvest) - if(s.gt.0.) go to 100 -c if s = 0, s(u,v) is an interpolating spline. - nu = numax - nv = nvmax -c test whether the required storage space exceeds the available one. - if(nu.gt.nuest .or. nv.gt.nvest) go to 420 -c find the position of the knots in the v-direction. - do 10 l=1,mv - tv(l+3) = v(l) - 10 continue - tv(mv+4) = ve - l1 = mv-2 - l2 = mv+5 - do 20 i=1,3 - tv(i) = v(l1)-per - tv(l2) = v(i+1)+per - l1 = l1+1 - l2 = l2+1 - 20 continue -c if not all the derivative values g(i,j) are given, we will first -c estimate these values by computing a least-squares spline - idd(1) = ider(1) - if(idd(1).eq.0) idd(1) = 1 - if(idd(1).gt.0) dz(1) = z0 - idd(2) = ider(2) - if(ider(1).lt.0) go to 30 - if(iopt(2).eq.0 .or. ider(2).ne.0) go to 70 -c we set up the knots in the u-direction for computing the least-squares -c spline. - 30 i1 = 3 - i2 = mu-2 - nu = 4 - do 40 i=1,mu - if(i1.gt.i2) go to 50 - nu = nu+1 - tu(nu) = u(i1) - i1 = i1+2 - 40 continue - 50 do 60 i=1,4 - tu(i) = 0. - nu = nu+1 - tu(nu) = r - 60 continue -c we compute the least-squares spline for estimating the derivatives. - call fpopdi(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,z,mz,z0,dz,iopt,idd, - * tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpintu,fpintv,nru,nrv, - * wrk,lwrk) - ifsu = 0 -c if all the derivatives at the origin are known, we compute the -c interpolating spline. -c we set up the knots in the u-direction, needed for interpolation. - 70 nn = numax-8 - if(nn.eq.0) go to 95 - ju = 2-iopt(2) - do 80 l=1,nn - tu(l+4) = u(ju) - ju = ju+1 - 80 continue - nu = numax - l = nu - do 90 i=1,4 - tu(i) = 0. - tu(l) = r - l = l-1 - 90 continue -c we compute the interpolating spline. - 95 call fpopdi(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,z,mz,z0,dz,iopt,idd, - * tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpintu,fpintv,nru,nrv, - * wrk,lwrk) - go to 430 -c if s>0 our initial choice of knots depends on the value of iopt(1). - 100 ier = 0 - if(iopt(1).eq.0) go to 115 - step = -step - if(fp0.le.s) go to 115 -c if iopt(1)=1 and fp0 > s we start computing the least-squares spline -c according to the set of knots found at the last call of the routine. -c we determine the number of grid coordinates u(i) inside each knot -c interval (tu(l),tu(l+1)). - l = 5 - j = 1 - nrdatu(1) = 0 - mu0 = 2-iopt(2) - mu1 = mu-2+iopt(3) - do 105 i=mu0,mu1 - nrdatu(j) = nrdatu(j)+1 - if(u(i).lt.tu(l)) go to 105 - nrdatu(j) = nrdatu(j)-1 - l = l+1 - j = j+1 - nrdatu(j) = 0 - 105 continue -c we determine the number of grid coordinates v(i) inside each knot -c interval (tv(l),tv(l+1)). - l = 5 - j = 1 - nrdatv(1) = 0 - do 110 i=2,mv - nrdatv(j) = nrdatv(j)+1 - if(v(i).lt.tv(l)) go to 110 - nrdatv(j) = nrdatv(j)-1 - l = l+1 - j = j+1 - nrdatv(j) = 0 - 110 continue - idd(1) = ider(1) - idd(2) = ider(2) - go to 120 -c if iopt(1)=0 or iopt(1)=1 and s >= fp0,we start computing the least- -c squares polynomial (which is a spline without interior knots). - 115 ier = -2 - idd(1) = ider(1) - idd(2) = 1 - nu = 8 - nv = 8 - nrdatu(1) = mu-3+iopt(2)+iopt(3) - nrdatv(1) = mv-1 - lastdi = 0 - nplusu = 0 - nplusv = 0 - fp0 = 0. - fpold = 0. - reducu = 0. - reducv = 0. -c main loop for the different sets of knots.mpm=mu+mv is a save upper -c bound for the number of trials. - 120 mpm = mu+mv - do 270 iter=1,mpm -c find nrintu (nrintv) which is the number of knot intervals in the -c u-direction (v-direction). - nrintu = nu-7 - nrintv = nv-7 -c find the position of the additional knots which are needed for the -c b-spline representation of s(u,v). - i = nu - do 130 j=1,4 - tu(j) = 0. - tu(i) = r - i = i-1 - 130 continue - l1 = 4 - l2 = l1 - l3 = nv-3 - l4 = l3 - tv(l2) = vb - tv(l3) = ve - do 140 j=1,3 - l1 = l1+1 - l2 = l2-1 - l3 = l3+1 - l4 = l4-1 - tv(l2) = tv(l4)-per - tv(l3) = tv(l1)+per - 140 continue -c find an estimate of the range of possible values for the optimal -c derivatives at the origin. - ktu = nrdatu(1)+2-iopt(2) - if(nrintu.eq.1) ktu = mu - if(ktu.lt.mumin) ktu = mumin - if(ktu.eq.lasttu) go to 150 - zmin = z0 - zmax = z0 - l = mv*ktu - do 145 i=1,l - if(z(i).lt.zmin) zmin = z(i) - if(z(i).gt.zmax) zmax = z(i) - 145 continue - step = zmax-zmin - lasttu = ktu -c find the least-squares spline sinf(u,v). - 150 call fpopdi(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,z,mz,z0,dz,iopt,idd, - * tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpintu,fpintv,nru,nrv, - * wrk,lwrk) - if(step.lt.0.) step = -step - if(ier.eq.(-2)) fp0 = fp -c test whether the least-squares spline is an acceptable solution. - if(iopt(1).lt.0) go to 440 - fpms = fp-s - if(abs(fpms) .lt. acc) go to 440 -c if f(p=inf) < s, we accept the choice of knots. - if(fpms.lt.0.) go to 300 -c if nu=numax and nv=nvmax, sinf(u,v) is an interpolating spline - if(nu.eq.numax .and. nv.eq.nvmax) go to 430 -c increase the number of knots. -c if nu=nue and nv=nve we cannot further increase the number of knots -c because of the storage capacity limitation. - if(nu.eq.nue .and. nv.eq.nve) go to 420 - if(ider(1).eq.0) fpintu(1) = fpintu(1)+(z0-c(1))**2 - ier = 0 -c adjust the parameter reducu or reducv according to the direction -c in which the last added knots were located. - if (lastdi.lt.0) go to 160 - if (lastdi.eq.0) go to 155 - go to 170 - 155 nplv = 3 - idd(2) = ider(2) - fpold = fp - go to 230 - 160 reducu = fpold-fp - go to 175 - 170 reducv = fpold-fp -c store the sum of squared residuals for the current set of knots. - 175 fpold = fp -c find nplu, the number of knots we should add in the u-direction. - nplu = 1 - if(nu.eq.8) go to 180 - npl1 = nplusu*2 - rn = nplusu - if(reducu.gt.acc) npl1 = rn*fpms/reducu - nplu = min0(nplusu*2,max0(npl1,nplusu/2,1)) -c find nplv, the number of knots we should add in the v-direction. - 180 nplv = 3 - if(nv.eq.8) go to 190 - npl1 = nplusv*2 - rn = nplusv - if(reducv.gt.acc) npl1 = rn*fpms/reducv - nplv = min0(nplusv*2,max0(npl1,nplusv/2,1)) -c test whether we are going to add knots in the u- or v-direction. - 190 if (nplu.lt.nplv) go to 210 - if (nplu.eq.nplv) go to 200 - go to 230 - 200 if(lastdi.lt.0) go to 230 - 210 if(nu.eq.nue) go to 230 -c addition in the u-direction. - lastdi = -1 - nplusu = nplu - ifsu = 0 - istart = 0 - if(iopt(2).eq.0) istart = 1 - do 220 l=1,nplusu -c add a new knot in the u-direction - call fpknot(u,mu,tu,nu,fpintu,nrdatu,nrintu,nuest,istart) -c test whether we cannot further increase the number of knots in the -c u-direction. - if(nu.eq.nue) go to 270 - 220 continue - go to 270 - 230 if(nv.eq.nve) go to 210 -c addition in the v-direction. - lastdi = 1 - nplusv = nplv - ifsv = 0 - do 240 l=1,nplusv -c add a new knot in the v-direction. - call fpknot(v,mv,tv,nv,fpintv,nrdatv,nrintv,nvest,1) -c test whether we cannot further increase the number of knots in the -c v-direction. - if(nv.eq.nve) go to 270 - 240 continue -c restart the computations with the new set of knots. - 270 continue -c test whether the least-squares polynomial is a solution of our -c approximation problem. - 300 if(ier.eq.(-2)) go to 440 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spline sp(u,v) c -c ***************************************************** c -c we have determined the number of knots and their position. we now c -c compute the b-spline coefficients of the smoothing spline sp(u,v). c -c this smoothing spline depends on the parameter p in such a way that c -c f(p) = sumi=1,mu(sumj=1,mv((z(i,j)-sp(u(i),v(j)))**2) c -c is a continuous, strictly decreasing function of p. moreover the c -c least-squares polynomial corresponds to p=0 and the least-squares c -c spline to p=infinity. then iteratively we have to determine the c -c positive value of p such that f(p)=s. the process which is proposed c -c here makes use of rational interpolation. f(p) is approximated by a c -c rational function r(p)=(u*p+v)/(p+w); three values of p (p1,p2,p3) c -c with corresponding values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s)c -c are used to calculate the new value of p such that r(p)=s. c -c convergence is guaranteed by taking f1 > 0 and f3 < 0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c initial value for p. - p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - p = one - dzz(1) = dz(1) - dzz(2) = dz(2) - dzz(3) = dz(3) - ich1 = 0 - ich3 = 0 -c iteration process to find the root of f(p)=s. - do 350 iter = 1,maxit -c find the smoothing spline sp(u,v) and the corresponding sum f(p). - call fpopdi(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,z,mz,z0,dzz,iopt,idd, - * tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpintu,fpintv,nru,nrv, - * wrk,lwrk) -c test whether the approximation sp(u,v) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c test whether the maximum allowable number of iterations has been -c reached. - if(iter.eq.maxit) go to 400 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 320 - if((f2-f3).gt.acc) go to 310 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 + p2*con1 - go to 350 - 310 if(f2.lt.0.) ich3 = 1 - 320 if(ich1.ne.0) go to 340 - if((f1-f2).gt.acc) go to 330 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 350 - if(p.ge.p3) p = p2*con1 + p3*con9 - go to 350 -c test whether the iteration process proceeds as theoretically -c expected. - 330 if(f2.gt.0.) ich1 = 1 - 340 if(f2.ge.f1 .or. f2.le.f3) go to 410 -c find the new value of p. - p = fprati(p1,f1,p2,f2,p3,f3) - 350 continue -c error codes and messages. - 400 ier = 3 - go to 440 - 410 ier = 2 - go to 440 - 420 ier = 1 - go to 440 - 430 ier = -1 - fp = 0. - 440 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fppola.f b/scipy-0.10.1/scipy/interpolate/fitpack/fppola.f deleted file mode 100644 index 2c65e1e622..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fppola.f +++ /dev/null @@ -1,840 +0,0 @@ - subroutine fppola(iopt1,iopt2,iopt3,m,u,v,z,w,rad,s,nuest,nvest, - * eta,tol,maxit,ib1,ib3,nc,ncc,intest,nrest,nu,tu,nv,tv,c,fp,sup, - * fpint,coord,f,ff,row,cs,cosi,a,q,bu,bv,spu,spv,h,index,nummer, - * wrk,lwrk,ier) -c ..scalar arguments.. - integer iopt1,iopt2,iopt3,m,nuest,nvest,maxit,ib1,ib3,nc,ncc, - * intest,nrest,nu,nv,lwrk,ier - real*8 s,eta,tol,fp,sup -c ..array arguments.. - integer index(nrest),nummer(m) - real*8 u(m),v(m),z(m),w(m),tu(nuest),tv(nvest),c(nc),fpint(intest) - *, - * coord(intest),f(ncc),ff(nc),row(nvest),cs(nvest),cosi(5,nvest), - * a(ncc,ib1),q(ncc,ib3),bu(nuest,5),bv(nvest,5),spu(m,4),spv(m,4), - * h(ib3),wrk(lwrk) -c ..user supplied function.. - real*8 rad -c ..local scalars.. - real*8 acc,arg,co,c1,c2,c3,c4,dmax,eps,fac,fac1,fac2,fpmax,fpms, - * f1,f2,f3,hui,huj,p,pi,pinv,piv,pi2,p1,p2,p3,r,ratio,si,sigma, - * sq,store,uu,u2,u3,wi,zi,rn,one,two,three,con1,con4,con9,half,ten - integer i,iband,iband3,iband4,ich1,ich3,ii,il,in,ipar,ipar1,irot, - * iter,i1,i2,i3,j,jl,jrot,j1,j2,k,l,la,lf,lh,ll,lu,lv,lwest,l1,l2, - * l3,l4,ncof,ncoff,nvv,nv4,nreg,nrint,nrr,nr1,nuu,nu4,num,num1, - * numin,nvmin,rank,iband1 -c ..local arrays.. - real*8 hu(4),hv(4) -c ..function references.. - real*8 abs,atan,cos,fprati,sin,sqrt - integer min0 -c ..subroutine references.. -c fporde,fpbspl,fpback,fpgivs,fprota,fprank,fpdisc,fprppo -c .. -c set constants - one = 1 - two = 2 - three = 3 - ten = 10 - half = 0.5e0 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 - pi = atan(one)*4 - pi2 = pi+pi - ipar = iopt2*(iopt2+3)/2 - ipar1 = ipar+1 - eps = sqrt(eta) - if(iopt1.lt.0) go to 90 - numin = 9 - nvmin = 9+iopt2*(iopt2+1) -c calculation of acc, the absolute tolerance for the root of f(p)=s. - acc = tol*s - if(iopt1.eq.0) go to 10 - if(s.lt.sup) then - if (nv.lt.nvmin) go to 70 - go to 90 - endif -c if iopt1 = 0 we begin by computing the weighted least-squares -c polymomial of the form -c s(u,v) = f(1)*(1-u**3)+f(2)*u**3+f(3)*(u**2-u**3)+f(4)*(u-u**3) -c where f(4) = 0 if iopt2> 0 , f(3) = 0 if iopt2 > 1 and -c f(2) = 0 if iopt3> 0. -c the corresponding weighted sum of squared residuals gives the upper -c bound sup for the smoothing factor s. - 10 sup = 0. - do 20 i=1,4 - f(i) = 0. - do 20 j=1,4 - a(i,j) = 0. - 20 continue - do 50 i=1,m - wi = w(i) - zi = z(i)*wi - uu = u(i) - u2 = uu*uu - u3 = uu*u2 - h(1) = (one-u3)*wi - h(2) = u3*wi - h(3) = u2*(one-uu)*wi - h(4) = uu*(one-u2)*wi - if(iopt3.ne.0) h(2) = 0. - if(iopt2.gt.1) h(3) = 0. - if(iopt2.gt.0) h(4) = 0. - do 40 j=1,4 - piv = h(j) - if(piv.eq.0.) go to 40 - call fpgivs(piv,a(j,1),co,si) - call fprota(co,si,zi,f(j)) - if(j.eq.4) go to 40 - j1 = j+1 - j2 = 1 - do 30 l=j1,4 - j2 = j2+1 - call fprota(co,si,h(l),a(j,j2)) - 30 continue - 40 continue - sup = sup+zi*zi - 50 continue - if(a(4,1).ne.0.) f(4) = f(4)/a(4,1) - if(a(3,1).ne.0.) f(3) = (f(3)-a(3,2)*f(4))/a(3,1) - if(a(2,1).ne.0.) f(2) = (f(2)-a(2,2)*f(3)-a(2,3)*f(4))/a(2,1) - if(a(1,1).ne.0.) - * f(1) = (f(1)-a(1,2)*f(2)-a(1,3)*f(3)-a(1,4)*f(4))/a(1,1) -c find the b-spline representation of this least-squares polynomial - c1 = f(1) - c4 = f(2) - c2 = f(4)/three+c1 - c3 = (f(3)+two*f(4))/three+c1 - nu = 8 - nv = 8 - do 60 i=1,4 - c(i) = c1 - c(i+4) = c2 - c(i+8) = c3 - c(i+12) = c4 - tu(i) = 0. - tu(i+4) = one - rn = 2*i-9 - tv(i) = rn*pi - rn = 2*i-1 - tv(i+4) = rn*pi - 60 continue - fp = sup -c test whether the least-squares polynomial is an acceptable solution - fpms = sup-s - if(fpms.lt.acc) go to 960 -c test whether we cannot further increase the number of knots. - 70 if(nuest.lt.numin .or. nvest.lt.nvmin) go to 950 -c find the initial set of interior knots of the spline in case iopt1=0. - nu = numin - nv = nvmin - tu(5) = half - nvv = nv-8 - rn = nvv+1 - fac = pi2/rn - do 80 i=1,nvv - rn = i - tv(i+4) = rn*fac-pi - 80 continue -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1 : computation of least-squares bicubic splines. c -c ****************************************************** c -c if iopt1<0 we compute the least-squares bicubic spline according c -c to the given set of knots. c -c if iopt1>=0 we compute least-squares bicubic splines with in- c -c creasing numbers of knots until the corresponding sum f(p=inf)<=s. c -c the initial set of knots then depends on the value of iopt1 c -c if iopt1=0 we start with one interior knot in the u-direction c -c (0.5) and 1+iopt2*(iopt2+1) in the v-direction. c -c if iopt1>0 we start with the set of knots found at the last c -c call of the routine. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c main loop for the different sets of knots. m is a save upper bound -c for the number of trials. - 90 do 570 iter=1,m -c find the position of the additional knots which are needed for the -c b-spline representation of s(u,v). - l1 = 4 - l2 = l1 - l3 = nv-3 - l4 = l3 - tv(l2) = -pi - tv(l3) = pi - do 120 i=1,3 - l1 = l1+1 - l2 = l2-1 - l3 = l3+1 - l4 = l4-1 - tv(l2) = tv(l4)-pi2 - tv(l3) = tv(l1)+pi2 - 120 continue - l = nu - do 130 i=1,4 - tu(i) = 0. - tu(l) = one - l = l-1 - 130 continue -c find nrint, the total number of knot intervals and nreg, the number -c of panels in which the approximation domain is subdivided by the -c intersection of knots. - nuu = nu-7 - nvv = nv-7 - nrr = nvv/2 - nr1 = nrr+1 - nrint = nuu+nvv - nreg = nuu*nvv -c arrange the data points according to the panel they belong to. - call fporde(u,v,m,3,3,tu,nu,tv,nv,nummer,index,nreg) - if(iopt2.eq.0) go to 195 -c find the b-spline coefficients cosi of the cubic spline -c approximations for cr(v)=rad(v)*cos(v) and sr(v) = rad(v)*sin(v) -c if iopt2=1, and additionally also for cr(v)**2,sr(v)**2 and -c 2*cr(v)*sr(v) if iopt2=2 - do 140 i=1,nvv - do 135 j=1,ipar - cosi(j,i) = 0. - 135 continue - do 140 j=1,nvv - a(i,j) = 0. - 140 continue -c the coefficients cosi are obtained from interpolation conditions -c at the knots tv(i),i=4,5,...nv-4. - do 175 i=1,nvv - l2 = i+3 - arg = tv(l2) - call fpbspl(tv,nv,3,arg,l2,hv) - do 145 j=1,nvv - row(j) = 0. - 145 continue - ll = i - do 150 j=1,3 - if(ll.gt.nvv) ll= 1 - row(ll) = row(ll)+hv(j) - ll = ll+1 - 150 continue - co = cos(arg) - si = sin(arg) - r = rad(arg) - cs(1) = co*r - cs(2) = si*r - if(iopt2.eq.1) go to 155 - cs(3) = cs(1)*cs(1) - cs(4) = cs(2)*cs(2) - cs(5) = cs(1)*cs(2) - 155 do 170 j=1,nvv - piv = row(j) - if(piv.eq.0.) go to 170 - call fpgivs(piv,a(j,1),co,si) - do 160 l=1,ipar - call fprota(co,si,cs(l),cosi(l,j)) - 160 continue - if(j.eq.nvv) go to 175 - j1 = j+1 - j2 = 1 - do 165 l=j1,nvv - j2 = j2+1 - call fprota(co,si,row(l),a(j,j2)) - 165 continue - 170 continue - 175 continue - do 190 l=1,ipar - do 180 j=1,nvv - cs(j) = cosi(l,j) - 180 continue - call fpback(a,cs,nvv,nvv,cs,ncc) - do 185 j=1,nvv - cosi(l,j) = cs(j) - 185 continue - 190 continue -c find ncof, the dimension of the spline and ncoff, the number -c of coefficients in the standard b-spline representation. - 195 nu4 = nu-4 - nv4 = nv-4 - ncoff = nu4*nv4 - ncof = ipar1+nvv*(nu4-1-iopt2-iopt3) -c find the bandwidth of the observation matrix a. - iband = 4*nvv - if(nuu-iopt2-iopt3.le.1) iband = ncof - iband1 = iband-1 -c initialize the observation matrix a. - do 200 i=1,ncof - f(i) = 0. - do 200 j=1,iband - a(i,j) = 0. - 200 continue -c initialize the sum of squared residuals. - fp = 0. - ratio = one+tu(6)/tu(5) -c fetch the data points in the new order. main loop for the -c different panels. - do 380 num=1,nreg -c fix certain constants for the current panel; jrot records the column -c number of the first non-zero element in a row of the observation -c matrix according to a data point of the panel. - num1 = num-1 - lu = num1/nvv - l1 = lu+4 - lv = num1-lu*nvv+1 - l2 = lv+3 - jrot = 0 - if(lu.gt.iopt2) jrot = ipar1+(lu-iopt2-1)*nvv - lu = lu+1 -c test whether there are still data points in the current panel. - in = index(num) - 210 if(in.eq.0) go to 380 -c fetch a new data point. - wi = w(in) - zi = z(in)*wi -c evaluate for the u-direction, the 4 non-zero b-splines at u(in) - call fpbspl(tu,nu,3,u(in),l1,hu) -c evaluate for the v-direction, the 4 non-zero b-splines at v(in) - call fpbspl(tv,nv,3,v(in),l2,hv) -c store the value of these b-splines in spu and spv resp. - do 220 i=1,4 - spu(in,i) = hu(i) - spv(in,i) = hv(i) - 220 continue -c initialize the new row of observation matrix. - do 240 i=1,iband - h(i) = 0. - 240 continue -c calculate the non-zero elements of the new row by making the cross -c products of the non-zero b-splines in u- and v-direction and -c by taking into account the conditions of the splines. - do 250 i=1,nvv - row(i) = 0. - 250 continue -c take into account the periodicity condition of the bicubic splines. - ll = lv - do 260 i=1,4 - if(ll.gt.nvv) ll=1 - row(ll) = row(ll)+hv(i) - ll = ll+1 - 260 continue -c take into account the other conditions of the splines. - if(iopt2.eq.0 .or. lu.gt.iopt2+1) go to 280 - do 270 l=1,ipar - cs(l) = 0. - do 270 i=1,nvv - cs(l) = cs(l)+row(i)*cosi(l,i) - 270 continue -c fill in the non-zero elements of the new row. - 280 j1 = 0 - do 330 j =1,4 - jlu = j+lu - huj = hu(j) - if(jlu.gt.iopt2+2) go to 320 - go to (290,290,300,310),jlu - 290 h(1) = huj - j1 = 1 - go to 330 - 300 h(1) = h(1)+huj - h(2) = huj*cs(1) - h(3) = huj*cs(2) - j1 = 3 - go to 330 - 310 h(1) = h(1)+huj - h(2) = h(2)+huj*ratio*cs(1) - h(3) = h(3)+huj*ratio*cs(2) - h(4) = huj*cs(3) - h(5) = huj*cs(4) - h(6) = huj*cs(5) - j1 = 6 - go to 330 - 320 if(jlu.gt.nu4 .and. iopt3.ne.0) go to 330 - do 325 i=1,nvv - j1 = j1+1 - h(j1) = row(i)*huj - 325 continue - 330 continue - do 335 i=1,iband - h(i) = h(i)*wi - 335 continue -c rotate the row into triangle by givens transformations. - irot = jrot - do 350 i=1,iband - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 350 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(irot,1),co,si) -c apply that transformation to the right hand side. - call fprota(co,si,zi,f(irot)) - if(i.eq.iband) go to 360 -c apply that transformation to the left hand side. - i2 = 1 - i3 = i+1 - do 340 j=i3,iband - i2 = i2+1 - call fprota(co,si,h(j),a(irot,i2)) - 340 continue - 350 continue -c add the contribution of the row to the sum of squares of residual -c right hand sides. - 360 fp = fp+zi**2 -c find the number of the next data point in the panel. - 370 in = nummer(in) - go to 210 - 380 continue -c find dmax, the maximum value for the diagonal elements in the reduced -c triangle. - dmax = 0. - do 390 i=1,ncof - if(a(i,1).le.dmax) go to 390 - dmax = a(i,1) - 390 continue -c check whether the observation matrix is rank deficient. - sigma = eps*dmax - do 400 i=1,ncof - if(a(i,1).le.sigma) go to 410 - 400 continue -c backward substitution in case of full rank. - call fpback(a,f,ncof,iband,c,ncc) - rank = ncof - do 405 i=1,ncof - q(i,1) = a(i,1)/dmax - 405 continue - go to 430 -c in case of rank deficiency, find the minimum norm solution. - 410 lwest = ncof*iband+ncof+iband - if(lwrk.lt.lwest) go to 925 - lf = 1 - lh = lf+ncof - la = lh+iband - do 420 i=1,ncof - ff(i) = f(i) - do 420 j=1,iband - q(i,j) = a(i,j) - 420 continue - call fprank(q,ff,ncof,iband,ncc,sigma,c,sq,rank,wrk(la), - * wrk(lf),wrk(lh)) - do 425 i=1,ncof - q(i,1) = q(i,1)/dmax - 425 continue -c add to the sum of squared residuals, the contribution of reducing -c the rank. - fp = fp+sq -c find the coefficients in the standard b-spline representation of -c the spline. - 430 call fprppo(nu,nv,iopt2,iopt3,cosi,ratio,c,ff,ncoff) -c test whether the least-squares spline is an acceptable solution. - if(iopt1.lt.0) then - if (fp.le.0) go to 970 - go to 980 - endif - fpms = fp-s - if(abs(fpms).le.acc) then - if (fp.le.0) go to 970 - go to 980 - endif -c if f(p=inf) < s, accept the choice of knots. - if(fpms.lt.0.) go to 580 -c test whether we cannot further increase the number of knots - if(m.lt.ncof) go to 935 -c search where to add a new knot. -c find for each interval the sum of squared residuals fpint for the -c data points having the coordinate belonging to that knot interval. -c calculate also coord which is the same sum, weighted by the position -c of the data points considered. - 440 do 450 i=1,nrint - fpint(i) = 0. - coord(i) = 0. - 450 continue - do 490 num=1,nreg - num1 = num-1 - lu = num1/nvv - l1 = lu+1 - lv = num1-lu*nvv - l2 = lv+1+nuu - jrot = lu*nv4+lv - in = index(num) - 460 if(in.eq.0) go to 490 - store = 0. - i1 = jrot - do 480 i=1,4 - hui = spu(in,i) - j1 = i1 - do 470 j=1,4 - j1 = j1+1 - store = store+hui*spv(in,j)*c(j1) - 470 continue - i1 = i1+nv4 - 480 continue - store = (w(in)*(z(in)-store))**2 - fpint(l1) = fpint(l1)+store - coord(l1) = coord(l1)+store*u(in) - fpint(l2) = fpint(l2)+store - coord(l2) = coord(l2)+store*v(in) - in = nummer(in) - go to 460 - 490 continue -c bring together the information concerning knot panels which are -c symmetric with respect to the origin. - do 495 i=1,nrr - l1 = nuu+i - l2 = l1+nrr - fpint(l1) = fpint(l1)+fpint(l2) - coord(l1) = coord(l1)+coord(l2)-pi*fpint(l2) - 495 continue -c find the interval for which fpint is maximal on the condition that -c there still can be added a knot. - l1 = 1 - l2 = nuu+nrr - if(nuest.lt.nu+1) l1=nuu+1 - if(nvest.lt.nv+2) l2=nuu -c test whether we cannot further increase the number of knots. - if(l1.gt.l2) go to 950 - 500 fpmax = 0. - l = 0 - do 510 i=l1,l2 - if(fpmax.ge.fpint(i)) go to 510 - l = i - fpmax = fpint(i) - 510 continue - if(l.eq.0) go to 930 -c calculate the position of the new knot. - arg = coord(l)/fpint(l) -c test in what direction the new knot is going to be added. - if(l.gt.nuu) go to 530 -c addition in the u-direction - l4 = l+4 - fpint(l) = 0. - fac1 = tu(l4)-arg - fac2 = arg-tu(l4-1) - if(fac1.gt.(ten*fac2) .or. fac2.gt.(ten*fac1)) go to 500 - j = nu - do 520 i=l4,nu - tu(j+1) = tu(j) - j = j-1 - 520 continue - tu(l4) = arg - nu = nu+1 - go to 570 -c addition in the v-direction - 530 l4 = l+4-nuu - fpint(l) = 0. - fac1 = tv(l4)-arg - fac2 = arg-tv(l4-1) - if(fac1.gt.(ten*fac2) .or. fac2.gt.(ten*fac1)) go to 500 - ll = nrr+4 - j = ll - do 550 i=l4,ll - tv(j+1) = tv(j) - j = j-1 - 550 continue - tv(l4) = arg - nv = nv+2 - nrr = nrr+1 - do 560 i=5,ll - j = i+nrr - tv(j) = tv(i)+pi - 560 continue -c restart the computations with the new set of knots. - 570 continue -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing bicubic spline. c -c ****************************************************** c -c we have determined the number of knots and their position. we now c -c compute the coefficients of the smoothing spline sp(u,v). c -c the observation matrix a is extended by the rows of a matrix, expres-c -c sing that sp(u,v) must be a constant function in the variable c -c v and a cubic polynomial in the variable u. the corresponding c -c weights of these additional rows are set to 1/(p). iteratively c -c we than have to determine the value of p such that f(p) = sum((w(i)* c -c (z(i)-sp(u(i),v(i))))**2) be = s. c -c we already know that the least-squares polynomial corresponds to p=0,c -c and that the least-squares bicubic spline corresponds to p=infin. c -c the iteration process makes use of rational interpolation. since f(p)c -c is a convex and strictly decreasing function of p, it can be approx- c -c imated by a rational function of the form r(p) = (u*p+v)/(p+w). c -c three values of p (p1,p2,p3) with corresponding values of f(p) (f1= c -c f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used to calculate the new value c -c of p such that r(p)=s. convergence is guaranteed by taking f1>0,f3<0.c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c evaluate the discontinuity jumps of the 3-th order derivative of -c the b-splines at the knots tu(l),l=5,...,nu-4. - 580 call fpdisc(tu,nu,5,bu,nuest) -c evaluate the discontinuity jumps of the 3-th order derivative of -c the b-splines at the knots tv(l),l=5,...,nv-4. - call fpdisc(tv,nv,5,bv,nvest) -c initial value for p. - p1 = 0. - f1 = sup-s - p3 = -one - f3 = fpms - p = 0. - do 590 i=1,ncof - p = p+a(i,1) - 590 continue - rn = ncof - p = rn/p -c find the bandwidth of the extended observation matrix. - iband4 = iband+ipar1 - if(iband4.gt.ncof) iband4 = ncof - iband3 = iband4 -1 - ich1 = 0 - ich3 = 0 - nuu = nu4-iopt3-1 -c iteration process to find the root of f(p)=s. - do 920 iter=1,maxit - pinv = one/p -c store the triangularized observation matrix into q. - do 630 i=1,ncof - ff(i) = f(i) - do 620 j=1,iband4 - q(i,j) = 0. - 620 continue - do 630 j=1,iband - q(i,j) = a(i,j) - 630 continue -c extend the observation matrix with the rows of a matrix, expressing -c that for u=constant sp(u,v) must be a constant function. - do 720 i=5,nv4 - ii = i-4 - do 635 l=1,nvv - row(l) = 0. - 635 continue - ll = ii - do 640 l=1,5 - if(ll.gt.nvv) ll=1 - row(ll) = row(ll)+bv(ii,l) - ll = ll+1 - 640 continue - do 720 j=1,nuu -c initialize the new row. - do 645 l=1,iband - h(l) = 0. - 645 continue -c fill in the non-zero elements of the row. jrot records the column -c number of the first non-zero element in the row. - if(j.gt.iopt2) go to 665 - if(j.eq.2) go to 655 - do 650 k=1,2 - cs(k) = 0. - do 650 l=1,nvv - cs(k) = cs(k)+cosi(k,l)*row(l) - 650 continue - h(1) = cs(1) - h(2) = cs(2) - jrot = 2 - go to 675 - 655 do 660 k=3,5 - cs(k) = 0. - do 660 l=1,nvv - cs(k) = cs(k)+cosi(k,l)*row(l) - 660 continue - h(1) = cs(1)*ratio - h(2) = cs(2)*ratio - h(3) = cs(3) - h(4) = cs(4) - h(5) = cs(5) - jrot = 2 - go to 675 - 665 do 670 l=1,nvv - h(l) = row(l) - 670 continue - jrot = ipar1+1+(j-iopt2-1)*nvv - 675 do 677 l=1,iband - h(l) = h(l)*pinv - 677 continue - zi = 0. -c rotate the new row into triangle by givens transformations. - do 710 irot=jrot,ncof - piv = h(1) - i2 = min0(iband1,ncof-irot) - if(piv.eq.0.) then - if (i2.le.0) go to 720 - go to 690 - endif -c calculate the parameters of the givens transformation. - call fpgivs(piv,q(irot,1),co,si) -c apply that givens transformation to the right hand side. - call fprota(co,si,zi,ff(irot)) - if(i2.eq.0) go to 720 -c apply that givens transformation to the left hand side. - do 680 l=1,i2 - l1 = l+1 - call fprota(co,si,h(l1),q(irot,l1)) - 680 continue - 690 do 700 l=1,i2 - h(l) = h(l+1) - 700 continue - h(i2+1) = 0. - 710 continue - 720 continue -c extend the observation matrix with the rows of a matrix expressing -c that for v=constant. sp(u,v) must be a cubic polynomial. - do 810 i=5,nu4 - ii = i-4 - do 810 j=1,nvv -c initialize the new row - do 730 l=1,iband4 - h(l) = 0. - 730 continue -c fill in the non-zero elements of the row. jrot records the column -c number of the first non-zero element in the row. - j1 = 1 - do 760 l=1,5 - il = ii+l-1 - if(il.eq.nu4 .and. iopt3.ne.0) go to 760 - if(il.gt.iopt2+1) go to 750 - go to (735,740,745),il - 735 h(1) = bu(ii,l) - j1 = j+1 - go to 760 - 740 h(1) = h(1)+bu(ii,l) - h(2) = bu(ii,l)*cosi(1,j) - h(3) = bu(ii,l)*cosi(2,j) - j1 = j+3 - go to 760 - 745 h(1) = h(1)+bu(ii,l) - h(2) = bu(ii,l)*cosi(1,j)*ratio - h(3) = bu(ii,l)*cosi(2,j)*ratio - h(4) = bu(ii,l)*cosi(3,j) - h(5) = bu(ii,l)*cosi(4,j) - h(6) = bu(ii,l)*cosi(5,j) - j1 = j+6 - go to 760 - 750 h(j1) = bu(ii,l) - j1 = j1+nvv - 760 continue - do 765 l=1,iband4 - h(l) = h(l)*pinv - 765 continue - zi = 0. - jrot = 1 - if(ii.gt.iopt2+1) jrot = ipar1+(ii-iopt2-2)*nvv+j -c rotate the new row into triangle by givens transformations. - do 800 irot=jrot,ncof - piv = h(1) - i2 = min0(iband3,ncof-irot) - if(piv.eq.0.) then - if (i2.le.0) go to 810 - go to 780 - endif -c calculate the parameters of the givens transformation. - call fpgivs(piv,q(irot,1),co,si) -c apply that givens transformation to the right hand side. - call fprota(co,si,zi,ff(irot)) - if(i2.eq.0) go to 810 -c apply that givens transformation to the left hand side. - do 770 l=1,i2 - l1 = l+1 - call fprota(co,si,h(l1),q(irot,l1)) - 770 continue - 780 do 790 l=1,i2 - h(l) = h(l+1) - 790 continue - h(i2+1) = 0. - 800 continue - 810 continue -c find dmax, the maximum value for the diagonal elements in the -c reduced triangle. - dmax = 0. - do 820 i=1,ncof - if(q(i,1).le.dmax) go to 820 - dmax = q(i,1) - 820 continue -c check whether the matrix is rank deficient. - sigma = eps*dmax - do 830 i=1,ncof - if(q(i,1).le.sigma) go to 840 - 830 continue -c backward substitution in case of full rank. - call fpback(q,ff,ncof,iband4,c,ncc) - rank = ncof - go to 845 -c in case of rank deficiency, find the minimum norm solution. - 840 lwest = ncof*iband4+ncof+iband4 - if(lwrk.lt.lwest) go to 925 - lf = 1 - lh = lf+ncof - la = lh+iband4 - call fprank(q,ff,ncof,iband4,ncc,sigma,c,sq,rank,wrk(la), - * wrk(lf),wrk(lh)) - 845 do 850 i=1,ncof - q(i,1) = q(i,1)/dmax - 850 continue -c find the coefficients in the standard b-spline representation of -c the polar spline. - call fprppo(nu,nv,iopt2,iopt3,cosi,ratio,c,ff,ncoff) -c compute f(p). - fp = 0. - do 890 num = 1,nreg - num1 = num-1 - lu = num1/nvv - lv = num1-lu*nvv - jrot = lu*nv4+lv - in = index(num) - 860 if(in.eq.0) go to 890 - store = 0. - i1 = jrot - do 880 i=1,4 - hui = spu(in,i) - j1 = i1 - do 870 j=1,4 - j1 = j1+1 - store = store+hui*spv(in,j)*c(j1) - 870 continue - i1 = i1+nv4 - 880 continue - fp = fp+(w(in)*(z(in)-store))**2 - in = nummer(in) - go to 860 - 890 continue -c test whether the approximation sp(u,v) is an acceptable solution - fpms = fp-s - if(abs(fpms).le.acc) go to 980 -c test whether the maximum allowable number of iterations has been -c reached. - if(iter.eq.maxit) go to 940 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 900 - if((f2-f3).gt.acc) go to 895 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 + p2*con1 - go to 920 - 895 if(f2.lt.0.) ich3 = 1 - 900 if(ich1.ne.0) go to 910 - if((f1-f2).gt.acc) go to 905 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 920 - if(p.ge.p3) p = p2*con1 +p3*con9 - go to 920 - 905 if(f2.gt.0.) ich1 = 1 -c test whether the iteration process proceeds as theoretically -c expected. - 910 if(f2.ge.f1 .or. f2.le.f3) go to 945 -c find the new value of p. - p = fprati(p1,f1,p2,f2,p3,f3) - 920 continue -c error codes and messages. - 925 ier = lwest - go to 990 - 930 ier = 5 - go to 990 - 935 ier = 4 - go to 990 - 940 ier = 3 - go to 990 - 945 ier = 2 - go to 990 - 950 ier = 1 - go to 990 - 960 ier = -2 - go to 990 - 970 ier = -1 - fp = 0. - 980 if(ncof.ne.rank) ier = -rank - 990 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fprank.f b/scipy-0.10.1/scipy/interpolate/fitpack/fprank.f deleted file mode 100644 index 22627d1b0a..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fprank.f +++ /dev/null @@ -1,236 +0,0 @@ - subroutine fprank(a,f,n,m,na,tol,c,sq,rank,aa,ff,h) -c subroutine fprank finds the minimum norm solution of a least- -c squares problem in case of rank deficiency. -c -c input parameters: -c a : array, which contains the non-zero elements of the observation -c matrix after triangularization by givens transformations. -c f : array, which contains the transformed right hand side. -c n : integer,wich contains the dimension of a. -c m : integer, which denotes the bandwidth of a. -c tol : real value, giving a threshold to determine the rank of a. -c -c output parameters: -c c : array, which contains the minimum norm solution. -c sq : real value, giving the contribution of reducing the rank -c to the sum of squared residuals. -c rank : integer, which contains the rank of matrix a. -c -c ..scalar arguments.. - integer n,m,na,rank - real*8 tol,sq -c ..array arguments.. - real*8 a(na,m),f(n),c(n),aa(n,m),ff(n),h(m) -c ..local scalars.. - integer i,ii,ij,i1,i2,j,jj,j1,j2,j3,k,kk,m1,nl - real*8 cos,fac,piv,sin,yi - double precision store,stor1,stor2,stor3 -c ..function references.. - integer min0 -c ..subroutine references.. -c fpgivs,fprota -c .. - m1 = m-1 -c the rank deficiency nl is considered to be the number of sufficient -c small diagonal elements of a. - nl = 0 - sq = 0. - do 90 i=1,n - if(a(i,1).gt.tol) go to 90 -c if a sufficient small diagonal element is found, we put it to -c zero. the remainder of the row corresponding to that zero diagonal -c element is then rotated into triangle by givens rotations . -c the rank deficiency is increased by one. - nl = nl+1 - if(i.eq.n) go to 90 - yi = f(i) - do 10 j=1,m1 - h(j) = a(i,j+1) - 10 continue - h(m) = 0. - i1 = i+1 - do 60 ii=i1,n - i2 = min0(n-ii,m1) - piv = h(1) - if(piv.eq.0.) go to 30 - call fpgivs(piv,a(ii,1),cos,sin) - call fprota(cos,sin,yi,f(ii)) - if(i2.eq.0) go to 70 - do 20 j=1,i2 - j1 = j+1 - call fprota(cos,sin,h(j1),a(ii,j1)) - h(j) = h(j1) - 20 continue - go to 50 - 30 if(i2.eq.0) go to 70 - do 40 j=1,i2 - h(j) = h(j+1) - 40 continue - 50 h(i2+1) = 0. - 60 continue -c add to the sum of squared residuals the contribution of deleting -c the row with small diagonal element. - 70 sq = sq+yi**2 - 90 continue -c rank denotes the rank of a. - rank = n-nl -c let b denote the (rank*n) upper trapezoidal matrix which can be -c obtained from the (n*n) upper triangular matrix a by deleting -c the rows and interchanging the columns corresponding to a zero -c diagonal element. if this matrix is factorized using givens -c transformations as b = (r) (u) where -c r is a (rank*rank) upper triangular matrix, -c u is a (rank*n) orthonormal matrix -c then the minimal least-squares solution c is given by c = b' v, -c where v is the solution of the system (r) (r)' v = g and -c g denotes the vector obtained from the old right hand side f, by -c removing the elements corresponding to a zero diagonal element of a. -c initialization. - do 100 i=1,rank - do 100 j=1,m - aa(i,j) = 0. - 100 continue -c form in aa the upper triangular matrix obtained from a by -c removing rows and columns with zero diagonal elements. form in ff -c the new right hand side by removing the elements of the old right -c hand side corresponding to a deleted row. - ii = 0 - do 120 i=1,n - if(a(i,1).le.tol) go to 120 - ii = ii+1 - ff(ii) = f(i) - aa(ii,1) = a(i,1) - jj = ii - kk = 1 - j = i - j1 = min0(j-1,m1) - if(j1.eq.0) go to 120 - do 110 k=1,j1 - j = j-1 - if(a(j,1).le.tol) go to 110 - kk = kk+1 - jj = jj-1 - aa(jj,kk) = a(j,k+1) - 110 continue - 120 continue -c form successively in h the columns of a with a zero diagonal element. - ii = 0 - do 200 i=1,n - ii = ii+1 - if(a(i,1).gt.tol) go to 200 - ii = ii-1 - if(ii.eq.0) go to 200 - jj = 1 - j = i - j1 = min0(j-1,m1) - do 130 k=1,j1 - j = j-1 - if(a(j,1).le.tol) go to 130 - h(jj) = a(j,k+1) - jj = jj+1 - 130 continue - do 140 kk=jj,m - h(kk) = 0. - 140 continue -c rotate this column into aa by givens transformations. - jj = ii - do 190 i1=1,ii - j1 = min0(jj-1,m1) - piv = h(1) - if(piv.ne.0.) go to 160 - if(j1.eq.0) go to 200 - do 150 j2=1,j1 - j3 = j2+1 - h(j2) = h(j3) - 150 continue - go to 180 - 160 call fpgivs(piv,aa(jj,1),cos,sin) - if(j1.eq.0) go to 200 - kk = jj - do 170 j2=1,j1 - j3 = j2+1 - kk = kk-1 - call fprota(cos,sin,h(j3),aa(kk,j3)) - h(j2) = h(j3) - 170 continue - 180 jj = jj-1 - h(j3) = 0. - 190 continue - 200 continue -c solve the system (aa) (f1) = ff - ff(rank) = ff(rank)/aa(rank,1) - i = rank-1 - if(i.eq.0) go to 230 - do 220 j=2,rank - store = ff(i) - i1 = min0(j-1,m1) - k = i - do 210 ii=1,i1 - k = k+1 - stor1 = ff(k) - stor2 = aa(i,ii+1) - store = store-stor1*stor2 - 210 continue - stor1 = aa(i,1) - ff(i) = store/stor1 - i = i-1 - 220 continue -c solve the system (aa)' (f2) = f1 - 230 ff(1) = ff(1)/aa(1,1) - if(rank.eq.1) go to 260 - do 250 j=2,rank - store = ff(j) - i1 = min0(j-1,m1) - k = j - do 240 ii=1,i1 - k = k-1 - stor1 = ff(k) - stor2 = aa(k,ii+1) - store = store-stor1*stor2 - 240 continue - stor1 = aa(j,1) - ff(j) = store/stor1 - 250 continue -c premultiply f2 by the transpoze of a. - 260 k = 0 - do 280 i=1,n - store = 0. - if(a(i,1).gt.tol) k = k+1 - j1 = min0(i,m) - kk = k - ij = i+1 - do 270 j=1,j1 - ij = ij-1 - if(a(ij,1).le.tol) go to 270 - stor1 = a(ij,j) - stor2 = ff(kk) - store = store+stor1*stor2 - kk = kk-1 - 270 continue - c(i) = store - 280 continue -c add to the sum of squared residuals the contribution of putting -c to zero the small diagonal elements of matrix (a). - stor3 = 0. - do 310 i=1,n - if(a(i,1).gt.tol) go to 310 - store = f(i) - i1 = min0(n-i,m1) - if(i1.eq.0) go to 300 - do 290 j=1,i1 - ij = i+j - stor1 = c(ij) - stor2 = a(i,j+1) - store = store-stor1*stor2 - 290 continue - 300 fac = a(i,1)*c(i) - stor1 = a(i,1) - stor2 = c(i) - stor1 = stor1*stor2 - stor3 = stor3+stor1*(stor1-store-store) - 310 continue - fac = stor3 - sq = sq+fac - return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fprati.f b/scipy-0.10.1/scipy/interpolate/fitpack/fprati.f deleted file mode 100644 index 4b59716454..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fprati.f +++ /dev/null @@ -1,29 +0,0 @@ - real*8 function fprati(p1,f1,p2,f2,p3,f3) -c given three points (p1,f1),(p2,f2) and (p3,f3), function fprati -c gives the value of p such that the rational interpolating function -c of the form r(p) = (u*p+v)/(p+w) equals zero at p. -c .. -c ..scalar arguments.. - real*8 p1,f1,p2,f2,p3,f3 -c ..local scalars.. - real*8 h1,h2,h3,p -c .. - if(p3.gt.0.) go to 10 -c value of p in case p3 = infinity. - p = (p1*(f1-f3)*f2-p2*(f2-f3)*f1)/((f1-f2)*f3) - go to 20 -c value of p in case p3 ^= infinity. - 10 h1 = f1*(f2-f3) - h2 = f2*(f3-f1) - h3 = f3*(f1-f2) - p = -(p1*p2*h3+p2*p3*h1+p3*p1*h2)/(p1*h1+p2*h2+p3*h3) -c adjust the value of p1,f1,p3 and f3 such that f1 > 0 and f3 < 0. - 20 if(f2.lt.0.) go to 30 - p1 = p2 - f1 = f2 - go to 40 - 30 p3 = p2 - f3 = f2 - 40 fprati = p - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpregr.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpregr.f deleted file mode 100644 index 7d5e97b8be..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpregr.f +++ /dev/null @@ -1,367 +0,0 @@ - subroutine fpregr(iopt,x,mx,y,my,z,mz,xb,xe,yb,ye,kx,ky,s, - * nxest,nyest,tol,maxit,nc,nx,tx,ny,ty,c,fp,fp0,fpold,reducx, - * reducy,fpintx,fpinty,lastdi,nplusx,nplusy,nrx,nry,nrdatx,nrdaty, - * wrk,lwrk,ier) -c .. -c ..scalar arguments.. - real*8 xb,xe,yb,ye,s,tol,fp,fp0,fpold,reducx,reducy - integer iopt,mx,my,mz,kx,ky,nxest,nyest,maxit,nc,nx,ny,lastdi, - * nplusx,nplusy,lwrk,ier -c ..array arguments.. - real*8 x(mx),y(my),z(mz),tx(nxest),ty(nyest),c(nc),fpintx(nxest), - * fpinty(nyest),wrk(lwrk) - integer nrdatx(nxest),nrdaty(nyest),nrx(mx),nry(my) -c ..local scalars - real*8 acc,fpms,f1,f2,f3,p,p1,p2,p3,rn,one,half,con1,con9,con4 - integer i,ich1,ich3,ifbx,ifby,ifsx,ifsy,iter,j,kx1,kx2,ky1,ky2, - * k3,l,lax,lay,lbx,lby,lq,lri,lsx,lsy,mk1,mm,mpm,mynx,ncof, - * nk1x,nk1y,nmaxx,nmaxy,nminx,nminy,nplx,nply,npl1,nrintx, - * nrinty,nxe,nxk,nye -c ..function references.. - real*8 abs,fprati - integer max0,min0 -c ..subroutine references.. -c fpgrre,fpknot -c .. -c set constants - one = 1 - half = 0.5e0 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 -c we partition the working space. - kx1 = kx+1 - ky1 = ky+1 - kx2 = kx1+1 - ky2 = ky1+1 - lsx = 1 - lsy = lsx+mx*kx1 - lri = lsy+my*ky1 - mm = max0(nxest,my) - lq = lri+mm - mynx = nxest*my - lax = lq+mynx - nxk = nxest*kx2 - lbx = lax+nxk - lay = lbx+nxk - lby = lay+nyest*ky2 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position. c -c **************************************************************** c -c given a set of knots we compute the least-squares spline sinf(x,y), c -c and the corresponding sum of squared residuals fp=f(p=inf). c -c if iopt=-1 sinf(x,y) is the requested approximation. c -c if iopt=0 or iopt=1 we check whether we can accept the knots: c -c if fp <=s we will continue with the current set of knots. c -c if fp > s we will increase the number of knots and compute the c -c corresponding least-squares spline until finally fp<=s. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots equals nmaxx = mx+kx+1 and nmaxy = my+ky+1. c -c if s>0 and c -c *iopt=0 we first compute the least-squares polynomial of degree c -c kx in x and ky in y; nx=nminx=2*kx+2 and ny=nymin=2*ky+2. c -c *iopt=1 we start with the knots found at the last call of the c -c routine, except for the case that s > fp0; then we can compute c -c the least-squares polynomial directly. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c determine the number of knots for polynomial approximation. - nminx = 2*kx1 - nminy = 2*ky1 - if(iopt.lt.0) go to 120 -c acc denotes the absolute tolerance for the root of f(p)=s. - acc = tol*s -c find nmaxx and nmaxy which denote the number of knots in x- and y- -c direction in case of spline interpolation. - nmaxx = mx+kx1 - nmaxy = my+ky1 -c find nxe and nye which denote the maximum number of knots -c allowed in each direction - nxe = min0(nmaxx,nxest) - nye = min0(nmaxy,nyest) - if(s.gt.0.) go to 100 -c if s = 0, s(x,y) is an interpolating spline. - nx = nmaxx - ny = nmaxy -c test whether the required storage space exceeds the available one. - if(ny.gt.nyest .or. nx.gt.nxest) go to 420 -c find the position of the interior knots in case of interpolation. -c the knots in the x-direction. - mk1 = mx-kx1 - if(mk1.eq.0) go to 60 - k3 = kx/2 - i = kx1+1 - j = k3+2 - if(k3*2.eq.kx) go to 40 - do 30 l=1,mk1 - tx(i) = x(j) - i = i+1 - j = j+1 - 30 continue - go to 60 - 40 do 50 l=1,mk1 - tx(i) = (x(j)+x(j-1))*half - i = i+1 - j = j+1 - 50 continue -c the knots in the y-direction. - 60 mk1 = my-ky1 - if(mk1.eq.0) go to 120 - k3 = ky/2 - i = ky1+1 - j = k3+2 - if(k3*2.eq.ky) go to 80 - do 70 l=1,mk1 - ty(i) = y(j) - i = i+1 - j = j+1 - 70 continue - go to 120 - 80 do 90 l=1,mk1 - ty(i) = (y(j)+y(j-1))*half - i = i+1 - j = j+1 - 90 continue - go to 120 -c if s > 0 our initial choice of knots depends on the value of iopt. - 100 if(iopt.eq.0) go to 115 - if(fp0.le.s) go to 115 -c if iopt=1 and fp0 > s we start computing the least- squares spline -c according to the set of knots found at the last call of the routine. -c we determine the number of grid coordinates x(i) inside each knot -c interval (tx(l),tx(l+1)). - l = kx2 - j = 1 - nrdatx(1) = 0 - mpm = mx-1 - do 105 i=2,mpm - nrdatx(j) = nrdatx(j)+1 - if(x(i).lt.tx(l)) go to 105 - nrdatx(j) = nrdatx(j)-1 - l = l+1 - j = j+1 - nrdatx(j) = 0 - 105 continue -c we determine the number of grid coordinates y(i) inside each knot -c interval (ty(l),ty(l+1)). - l = ky2 - j = 1 - nrdaty(1) = 0 - mpm = my-1 - do 110 i=2,mpm - nrdaty(j) = nrdaty(j)+1 - if(y(i).lt.ty(l)) go to 110 - nrdaty(j) = nrdaty(j)-1 - l = l+1 - j = j+1 - nrdaty(j) = 0 - 110 continue - go to 120 -c if iopt=0 or iopt=1 and s>=fp0, we start computing the least-squares -c polynomial of degree kx in x and ky in y (which is a spline without -c interior knots). - 115 nx = nminx - ny = nminy - nrdatx(1) = mx-2 - nrdaty(1) = my-2 - lastdi = 0 - nplusx = 0 - nplusy = 0 - fp0 = 0. - fpold = 0. - reducx = 0. - reducy = 0. - 120 mpm = mx+my - ifsx = 0 - ifsy = 0 - ifbx = 0 - ifby = 0 - p = -one -c main loop for the different sets of knots.mpm=mx+my is a save upper -c bound for the number of trials. - do 250 iter=1,mpm - if(nx.eq.nminx .and. ny.eq.nminy) ier = -2 -c find nrintx (nrinty) which is the number of knot intervals in the -c x-direction (y-direction). - nrintx = nx-nminx+1 - nrinty = ny-nminy+1 -c find ncof, the number of b-spline coefficients for the current set -c of knots. - nk1x = nx-kx1 - nk1y = ny-ky1 - ncof = nk1x*nk1y -c find the position of the additional knots which are needed for the -c b-spline representation of s(x,y). - i = nx - do 130 j=1,kx1 - tx(j) = xb - tx(i) = xe - i = i-1 - 130 continue - i = ny - do 140 j=1,ky1 - ty(j) = yb - ty(i) = ye - i = i-1 - 140 continue -c find the least-squares spline sinf(x,y) and calculate for each knot -c interval tx(j+kx)<=x<=tx(j+kx+1) (ty(j+ky)<=y<=ty(j+ky+1)) the sum -c of squared residuals fpintx(j),j=1,2,...,nx-2*kx-1 (fpinty(j),j=1,2, -c ...,ny-2*ky-1) for the data points having their absciss (ordinate)- -c value belonging to that interval. -c fp gives the total sum of squared residuals. - call fpgrre(ifsx,ifsy,ifbx,ifby,x,mx,y,my,z,mz,kx,ky,tx,nx,ty, - * ny,p,c,nc,fp,fpintx,fpinty,mm,mynx,kx1,kx2,ky1,ky2,wrk(lsx), - * wrk(lsy),wrk(lri),wrk(lq),wrk(lax),wrk(lay),wrk(lbx),wrk(lby), - * nrx,nry) - if(ier.eq.(-2)) fp0 = fp -c test whether the least-squares spline is an acceptable solution. - if(iopt.lt.0) go to 440 - fpms = fp-s - if(abs(fpms) .lt. acc) go to 440 -c if f(p=inf) < s, we accept the choice of knots. - if(fpms.lt.0.) go to 300 -c if nx=nmaxx and ny=nmaxy, sinf(x,y) is an interpolating spline. - if(nx.eq.nmaxx .and. ny.eq.nmaxy) go to 430 -c increase the number of knots. -c if nx=nxe and ny=nye we cannot further increase the number of knots -c because of the storage capacity limitation. - if(nx.eq.nxe .and. ny.eq.nye) go to 420 - ier = 0 -c adjust the parameter reducx or reducy according to the direction -c in which the last added knots were located. - if (lastdi.lt.0) go to 150 - if (lastdi.eq.0) go to 170 - go to 160 - 150 reducx = fpold-fp - go to 170 - 160 reducy = fpold-fp -c store the sum of squared residuals for the current set of knots. - 170 fpold = fp -c find nplx, the number of knots we should add in the x-direction. - nplx = 1 - if(nx.eq.nminx) go to 180 - npl1 = nplusx*2 - rn = nplusx - if(reducx.gt.acc) npl1 = rn*fpms/reducx - nplx = min0(nplusx*2,max0(npl1,nplusx/2,1)) -c find nply, the number of knots we should add in the y-direction. - 180 nply = 1 - if(ny.eq.nminy) go to 190 - npl1 = nplusy*2 - rn = nplusy - if(reducy.gt.acc) npl1 = rn*fpms/reducy - nply = min0(nplusy*2,max0(npl1,nplusy/2,1)) - 190 if (nplx.lt.nply) go to 210 - if (nplx.eq.nply) go to 200 - go to 230 - 200 if(lastdi.lt.0) go to 230 - 210 if(nx.eq.nxe) go to 230 -c addition in the x-direction. - lastdi = -1 - nplusx = nplx - ifsx = 0 - do 220 l=1,nplusx -c add a new knot in the x-direction - call fpknot(x,mx,tx,nx,fpintx,nrdatx,nrintx,nxest,1) -c test whether we cannot further increase the number of knots in the -c x-direction. - if(nx.eq.nxe) go to 250 - 220 continue - go to 250 - 230 if(ny.eq.nye) go to 210 -c addition in the y-direction. - lastdi = 1 - nplusy = nply - ifsy = 0 - do 240 l=1,nplusy -c add a new knot in the y-direction. - call fpknot(y,my,ty,ny,fpinty,nrdaty,nrinty,nyest,1) -c test whether we cannot further increase the number of knots in the -c y-direction. - if(ny.eq.nye) go to 250 - 240 continue -c restart the computations with the new set of knots. - 250 continue -c test whether the least-squares polynomial is a solution of our -c approximation problem. - 300 if(ier.eq.(-2)) go to 440 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spline sp(x,y) c -c ***************************************************** c -c we have determined the number of knots and their position. we now c -c compute the b-spline coefficients of the smoothing spline sp(x,y). c -c this smoothing spline varies with the parameter p in such a way thatc -c f(p) = sumi=1,mx(sumj=1,my((z(i,j)-sp(x(i),y(j)))**2) c -c is a continuous, strictly decreasing function of p. moreover the c -c least-squares polynomial corresponds to p=0 and the least-squares c -c spline to p=infinity. iteratively we then have to determine the c -c positive value of p such that f(p)=s. the process which is proposed c -c here makes use of rational interpolation. f(p) is approximated by a c -c rational function r(p)=(u*p+v)/(p+w); three values of p (p1,p2,p3) c -c with corresponding values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s)c -c are used to calculate the new value of p such that r(p)=s. c -c convergence is guaranteed by taking f1 > 0 and f3 < 0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c initial value for p. - p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - p = one - ich1 = 0 - ich3 = 0 -c iteration process to find the root of f(p)=s. - do 350 iter = 1,maxit -c find the smoothing spline sp(x,y) and the corresponding sum of -c squared residuals fp. - call fpgrre(ifsx,ifsy,ifbx,ifby,x,mx,y,my,z,mz,kx,ky,tx,nx,ty, - * ny,p,c,nc,fp,fpintx,fpinty,mm,mynx,kx1,kx2,ky1,ky2,wrk(lsx), - * wrk(lsy),wrk(lri),wrk(lq),wrk(lax),wrk(lay),wrk(lbx),wrk(lby), - * nrx,nry) -c test whether the approximation sp(x,y) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c test whether the maximum allowable number of iterations has been -c reached. - if(iter.eq.maxit) go to 400 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 320 - if((f2-f3).gt.acc) go to 310 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 + p2*con1 - go to 350 - 310 if(f2.lt.0.) ich3 = 1 - 320 if(ich1.ne.0) go to 340 - if((f1-f2).gt.acc) go to 330 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 350 - if(p.ge.p3) p = p2*con1 + p3*con9 - go to 350 -c test whether the iteration process proceeds as theoretically -c expected. - 330 if(f2.gt.0.) ich1 = 1 - 340 if(f2.ge.f1 .or. f2.le.f3) go to 410 -c find the new value of p. - p = fprati(p1,f1,p2,f2,p3,f3) - 350 continue -c error codes and messages. - 400 ier = 3 - go to 440 - 410 ier = 2 - go to 440 - 420 ier = 1 - go to 440 - 430 ier = -1 - fp = 0. - 440 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fprota.f b/scipy-0.10.1/scipy/interpolate/fitpack/fprota.f deleted file mode 100644 index e45bf36482..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fprota.f +++ /dev/null @@ -1,14 +0,0 @@ - subroutine fprota(cos,sin,a,b) -c subroutine fprota applies a givens rotation to a and b. -c .. -c ..scalar arguments.. - real*8 cos,sin,a,b -c ..local scalars.. - real*8 stor1,stor2 -c .. - stor1 = a - stor2 = b - b = cos*stor2+sin*stor1 - a = cos*stor1-sin*stor2 - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fprppo.f b/scipy-0.10.1/scipy/interpolate/fitpack/fprppo.f deleted file mode 100644 index d84a839f97..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fprppo.f +++ /dev/null @@ -1,61 +0,0 @@ - subroutine fprppo(nu,nv,if1,if2,cosi,ratio,c,f,ncoff) -c given the coefficients of a constrained bicubic spline, as determined -c in subroutine fppola, subroutine fprppo calculates the coefficients -c in the standard b-spline representation of bicubic splines. -c .. -c ..scalar arguments.. - real*8 ratio - integer nu,nv,if1,if2,ncoff -c ..array arguments - real*8 c(ncoff),f(ncoff),cosi(5,nv) -c ..local scalars.. - integer i,iopt,ii,j,k,l,nu4,nvv -c .. - nu4 = nu-4 - nvv = nv-7 - iopt = if1+1 - do 10 i=1,ncoff - f(i) = 0. - 10 continue - i = 0 - do 120 l=1,nu4 - ii = i - if(l.gt.iopt) go to 80 - go to (20,40,60),l - 20 do 30 k=1,nvv - i = i+1 - f(i) = c(1) - 30 continue - j = 1 - go to 100 - 40 do 50 k=1,nvv - i = i+1 - f(i) = c(1)+c(2)*cosi(1,k)+c(3)*cosi(2,k) - 50 continue - j = 3 - go to 100 - 60 do 70 k=1,nvv - i = i+1 - f(i) = c(1)+ratio*(c(2)*cosi(1,k)+c(3)*cosi(2,k))+ - * c(4)*cosi(3,k)+c(5)*cosi(4,k)+c(6)*cosi(5,k) - 70 continue - j = 6 - go to 100 - 80 if(l.eq.nu4 .and. if2.ne.0) go to 120 - do 90 k=1,nvv - i = i+1 - j = j+1 - f(i) = c(j) - 90 continue - 100 do 110 k=1,3 - ii = ii+1 - i = i+1 - f(i) = f(ii) - 110 continue - 120 continue - do 130 i=1,ncoff - c(i) = f(i) - 130 continue - return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fprpsp.f b/scipy-0.10.1/scipy/interpolate/fitpack/fprpsp.f deleted file mode 100644 index 7a1a267259..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fprpsp.f +++ /dev/null @@ -1,55 +0,0 @@ - subroutine fprpsp(nt,np,co,si,c,f,ncoff) -c given the coefficients of a spherical spline function, subroutine -c fprpsp calculates the coefficients in the standard b-spline re- -c presentation of this bicubic spline. -c .. -c ..scalar arguments - integer nt,np,ncoff -c ..array arguments - real*8 co(np),si(np),c(ncoff),f(ncoff) -c ..local scalars - real*8 cn,c1,c2,c3 - integer i,ii,j,k,l,ncof,npp,np4,nt4 -c .. - nt4 = nt-4 - np4 = np-4 - npp = np4-3 - ncof = 6+npp*(nt4-4) - c1 = c(1) - cn = c(ncof) - j = ncoff - do 10 i=1,np4 - f(i) = c1 - f(j) = cn - j = j-1 - 10 continue - i = np4 - j=1 - do 70 l=3,nt4 - ii = i - if(l.eq.3 .or. l.eq.nt4) go to 30 - do 20 k=1,npp - i = i+1 - j = j+1 - f(i) = c(j) - 20 continue - go to 50 - 30 if(l.eq.nt4) c1 = cn - c2 = c(j+1) - c3 = c(j+2) - j = j+2 - do 40 k=1,npp - i = i+1 - f(i) = c1+c2*co(k)+c3*si(k) - 40 continue - 50 do 60 k=1,3 - ii = ii+1 - i = i+1 - f(i) = f(ii) - 60 continue - 70 continue - do 80 i=1,ncoff - c(i) = f(i) - 80 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpseno.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpseno.f deleted file mode 100644 index 97587c5a4f..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpseno.f +++ /dev/null @@ -1,34 +0,0 @@ - subroutine fpseno(maxtr,up,left,right,info,merk,ibind,nbind) -c subroutine fpseno fetches a branch of a triply linked tree the -c information of which is kept in the arrays up,left,right and info. -c the branch has a specified length nbind and is determined by the -c parameter merk which points to its terminal node. the information -c field of the nodes of this branch is stored in the array ibind. on -c exit merk points to a new branch of length nbind or takes the value -c 1 if no such branch was found. -c .. -c ..scalar arguments.. - integer maxtr,merk,nbind -c ..array arguments.. - integer up(maxtr),left(maxtr),right(maxtr),info(maxtr), - * ibind(nbind) -c ..scalar arguments.. - integer i,j,k -c .. - k = merk - j = nbind - do 10 i=1,nbind - ibind(j) = info(k) - k = up(k) - j = j-1 - 10 continue - 20 k = right(merk) - if(k.ne.0) go to 30 - merk = up(merk) - if (merk.le.1) go to 40 - go to 20 - 30 merk = k - k = left(merk) - if(k.ne.0) go to 30 - 40 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpspgr.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpspgr.f deleted file mode 100644 index 070757e05f..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpspgr.f +++ /dev/null @@ -1,439 +0,0 @@ - subroutine fpspgr(iopt,ider,u,mu,v,mv,r,mr,r0,r1,s,nuest,nvest, - * tol,maxit,nc,nu,tu,nv,tv,c,fp,fp0,fpold,reducu,reducv,fpintu, - * fpintv,dr,step,lastdi,nplusu,nplusv,lastu0,lastu1,nru,nrv, - * nrdatu,nrdatv,wrk,lwrk,ier) -c .. -c ..scalar arguments.. - integer mu,mv,mr,nuest,nvest,maxit,nc,nu,nv,lastdi,nplusu,nplusv, - * lastu0,lastu1,lwrk,ier - real*8 r0,r1,s,tol,fp,fp0,fpold,reducu,reducv -c ..array arguments.. - integer iopt(3),ider(4),nrdatu(nuest),nrdatv(nvest),nru(mu), - * nrv(mv) - real*8 u(mu),v(mv),r(mr),tu(nuest),tv(nvest),c(nc),fpintu(nuest), - * fpintv(nvest),dr(6),wrk(lwrk),step(2) -c ..local scalars.. - real*8 acc,fpms,f1,f2,f3,p,per,pi,p1,p2,p3,vb,ve,rmax,rmin,rn,one, - * - * con1,con4,con9 - integer i,ich1,ich3,ifbu,ifbv,ifsu,ifsv,istart,iter,i1,i2,j,ju, - * ktu,l,l1,l2,l3,l4,mpm,mumin,mu0,mu1,nn,nplu,nplv,npl1,nrintu, - * nrintv,nue,numax,nve,nvmax -c ..local arrays.. - integer idd(4) - real*8 drr(6) -c ..function references.. - real*8 abs,datan2,fprati - integer max0,min0 -c ..subroutine references.. -c fpknot,fpopsp -c .. -c set constants - one = 1d0 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 -c initialization - ifsu = 0 - ifsv = 0 - ifbu = 0 - ifbv = 0 - p = -one - mumin = 4 - if(ider(1).ge.0) mumin = mumin-1 - if(iopt(2).eq.1 .and. ider(2).eq.1) mumin = mumin-1 - if(ider(3).ge.0) mumin = mumin-1 - if(iopt(3).eq.1 .and. ider(4).eq.1) mumin = mumin-1 - if(mumin.eq.0) mumin = 1 - pi = datan2(0d0,-one) - per = pi+pi - vb = v(1) - ve = vb+per -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position. c -c **************************************************************** c -c given a set of knots we compute the least-squares spline sinf(u,v) c -c and the corresponding sum of squared residuals fp = f(p=inf). c -c if iopt(1)=-1 sinf(u,v) is the requested approximation. c -c if iopt(1)>=0 we check whether we can accept the knots: c -c if fp <= s we will continue with the current set of knots. c -c if fp > s we will increase the number of knots and compute the c -c corresponding least-squares spline until finally fp <= s. c -c the initial choice of knots depends on the value of s and iopt. c -c if s=0 we have spline interpolation; in that case the number of c -c knots in the u-direction equals nu=numax=mu+6+iopt(2)+iopt(3) c -c and in the v-direction nv=nvmax=mv+7. c -c if s>0 and c -c iopt(1)=0 we first compute the least-squares polynomial,i.e. a c -c spline without interior knots : nu=8 ; nv=8. c -c iopt(1)=1 we start with the set of knots found at the last call c -c of the routine, except for the case that s > fp0; then we c -c compute the least-squares polynomial directly. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - if(iopt(1).lt.0) go to 120 -c acc denotes the absolute tolerance for the root of f(p)=s. - acc = tol*s -c numax and nvmax denote the number of knots needed for interpolation. - numax = mu+6+iopt(2)+iopt(3) - nvmax = mv+7 - nue = min0(numax,nuest) - nve = min0(nvmax,nvest) - if(s.gt.0.) go to 100 -c if s = 0, s(u,v) is an interpolating spline. - nu = numax - nv = nvmax -c test whether the required storage space exceeds the available one. - if(nu.gt.nuest .or. nv.gt.nvest) go to 420 -c find the position of the knots in the v-direction. - do 10 l=1,mv - tv(l+3) = v(l) - 10 continue - tv(mv+4) = ve - l1 = mv-2 - l2 = mv+5 - do 20 i=1,3 - tv(i) = v(l1)-per - tv(l2) = v(i+1)+per - l1 = l1+1 - l2 = l2+1 - 20 continue -c if not all the derivative values g(i,j) are given, we will first -c estimate these values by computing a least-squares spline - idd(1) = ider(1) - if(idd(1).eq.0) idd(1) = 1 - if(idd(1).gt.0) dr(1) = r0 - idd(2) = ider(2) - idd(3) = ider(3) - if(idd(3).eq.0) idd(3) = 1 - if(idd(3).gt.0) dr(4) = r1 - idd(4) = ider(4) - if(ider(1).lt.0 .or. ider(3).lt.0) go to 30 - if(iopt(2).ne.0 .and. ider(2).eq.0) go to 30 - if(iopt(3).eq.0 .or. ider(4).ne.0) go to 70 -c we set up the knots in the u-direction for computing the least-squares -c spline. - 30 i1 = 3 - i2 = mu-2 - nu = 4 - do 40 i=1,mu - if(i1.gt.i2) go to 50 - nu = nu+1 - tu(nu) = u(i1) - i1 = i1+2 - 40 continue - 50 do 60 i=1,4 - tu(i) = 0. - nu = nu+1 - tu(nu) = pi - 60 continue -c we compute the least-squares spline for estimating the derivatives. - call fpopsp(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,r,mr,r0,r1,dr,iopt,idd, - * tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpintu,fpintv,nru,nrv, - * wrk,lwrk) - ifsu = 0 -c if all the derivatives at the origin are known, we compute the -c interpolating spline. -c we set up the knots in the u-direction, needed for interpolation. - 70 nn = numax-8 - if(nn.eq.0) go to 95 - ju = 2-iopt(2) - do 80 l=1,nn - tu(l+4) = u(ju) - ju = ju+1 - 80 continue - nu = numax - l = nu - do 90 i=1,4 - tu(i) = 0. - tu(l) = pi - l = l-1 - 90 continue -c we compute the interpolating spline. - 95 call fpopsp(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,r,mr,r0,r1,dr,iopt,idd, - * tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpintu,fpintv,nru,nrv, - * wrk,lwrk) - go to 430 -c if s>0 our initial choice of knots depends on the value of iopt(1). - 100 ier = 0 - if(iopt(1).eq.0) go to 115 - step(1) = -step(1) - step(2) = -step(2) - if(fp0.le.s) go to 115 -c if iopt(1)=1 and fp0 > s we start computing the least-squares spline -c according to the set of knots found at the last call of the routine. -c we determine the number of grid coordinates u(i) inside each knot -c interval (tu(l),tu(l+1)). - l = 5 - j = 1 - nrdatu(1) = 0 - mu0 = 2-iopt(2) - mu1 = mu-1+iopt(3) - do 105 i=mu0,mu1 - nrdatu(j) = nrdatu(j)+1 - if(u(i).lt.tu(l)) go to 105 - nrdatu(j) = nrdatu(j)-1 - l = l+1 - j = j+1 - nrdatu(j) = 0 - 105 continue -c we determine the number of grid coordinates v(i) inside each knot -c interval (tv(l),tv(l+1)). - l = 5 - j = 1 - nrdatv(1) = 0 - do 110 i=2,mv - nrdatv(j) = nrdatv(j)+1 - if(v(i).lt.tv(l)) go to 110 - nrdatv(j) = nrdatv(j)-1 - l = l+1 - j = j+1 - nrdatv(j) = 0 - 110 continue - idd(1) = ider(1) - idd(2) = ider(2) - idd(3) = ider(3) - idd(4) = ider(4) - go to 120 -c if iopt(1)=0 or iopt(1)=1 and s >= fp0,we start computing the least- -c squares polynomial (which is a spline without interior knots). - 115 ier = -2 - idd(1) = ider(1) - idd(2) = 1 - idd(3) = ider(3) - idd(4) = 1 - nu = 8 - nv = 8 - nrdatu(1) = mu-2+iopt(2)+iopt(3) - nrdatv(1) = mv-1 - lastdi = 0 - nplusu = 0 - nplusv = 0 - fp0 = 0. - fpold = 0. - reducu = 0. - reducv = 0. -c main loop for the different sets of knots.mpm=mu+mv is a save upper -c bound for the number of trials. - 120 mpm = mu+mv - do 270 iter=1,mpm -c find nrintu (nrintv) which is the number of knot intervals in the -c u-direction (v-direction). - nrintu = nu-7 - nrintv = nv-7 -c find the position of the additional knots which are needed for the -c b-spline representation of s(u,v). - i = nu - do 125 j=1,4 - tu(j) = 0. - tu(i) = pi - i = i-1 - 125 continue - l1 = 4 - l2 = l1 - l3 = nv-3 - l4 = l3 - tv(l2) = vb - tv(l3) = ve - do 130 j=1,3 - l1 = l1+1 - l2 = l2-1 - l3 = l3+1 - l4 = l4-1 - tv(l2) = tv(l4)-per - tv(l3) = tv(l1)+per - 130 continue -c find an estimate of the range of possible values for the optimal -c derivatives at the origin. - ktu = nrdatu(1)+2-iopt(2) - if(ktu.lt.mumin) ktu = mumin - if(ktu.eq.lastu0) go to 140 - rmin = r0 - rmax = r0 - l = mv*ktu - do 135 i=1,l - if(r(i).lt.rmin) rmin = r(i) - if(r(i).gt.rmax) rmax = r(i) - 135 continue - step(1) = rmax-rmin - lastu0 = ktu - 140 ktu = nrdatu(nrintu)+2-iopt(3) - if(ktu.lt.mumin) ktu = mumin - if(ktu.eq.lastu1) go to 150 - rmin = r1 - rmax = r1 - l = mv*ktu - j = mr - do 145 i=1,l - if(r(j).lt.rmin) rmin = r(j) - if(r(j).gt.rmax) rmax = r(j) - j = j-1 - 145 continue - step(2) = rmax-rmin - lastu1 = ktu -c find the least-squares spline sinf(u,v). - 150 call fpopsp(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,r,mr,r0,r1,dr,iopt, - * idd,tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpintu,fpintv,nru, - * nrv,wrk,lwrk) - if(step(1).lt.0.) step(1) = -step(1) - if(step(2).lt.0.) step(2) = -step(2) - if(ier.eq.(-2)) fp0 = fp -c test whether the least-squares spline is an acceptable solution. - if(iopt(1).lt.0) go to 440 - fpms = fp-s - if(abs(fpms) .lt. acc) go to 440 -c if f(p=inf) < s, we accept the choice of knots. - if(fpms.lt.0.) go to 300 -c if nu=numax and nv=nvmax, sinf(u,v) is an interpolating spline - if(nu.eq.numax .and. nv.eq.nvmax) go to 430 -c increase the number of knots. -c if nu=nue and nv=nve we cannot further increase the number of knots -c because of the storage capacity limitation. - if(nu.eq.nue .and. nv.eq.nve) go to 420 - if(ider(1).eq.0) fpintu(1) = fpintu(1)+(r0-dr(1))**2 - if(ider(3).eq.0) fpintu(nrintu) = fpintu(nrintu)+(r1-dr(4))**2 - ier = 0 -c adjust the parameter reducu or reducv according to the direction -c in which the last added knots were located. - if (lastdi.lt.0) go to 160 - if (lastdi.eq.0) go to 155 - go to 170 - 155 nplv = 3 - idd(2) = ider(2) - idd(4) = ider(4) - fpold = fp - go to 230 - 160 reducu = fpold-fp - go to 175 - 170 reducv = fpold-fp -c store the sum of squared residuals for the current set of knots. - 175 fpold = fp -c find nplu, the number of knots we should add in the u-direction. - nplu = 1 - if(nu.eq.8) go to 180 - npl1 = nplusu*2 - rn = nplusu - if(reducu.gt.acc) npl1 = rn*fpms/reducu - nplu = min0(nplusu*2,max0(npl1,nplusu/2,1)) -c find nplv, the number of knots we should add in the v-direction. - 180 nplv = 3 - if(nv.eq.8) go to 190 - npl1 = nplusv*2 - rn = nplusv - if(reducv.gt.acc) npl1 = rn*fpms/reducv - nplv = min0(nplusv*2,max0(npl1,nplusv/2,1)) -c test whether we are going to add knots in the u- or v-direction. - 190 if (nplu.lt.nplv) go to 210 - if (nplu.eq.nplv) go to 200 - go to 230 - 200 if(lastdi.lt.0) go to 230 - 210 if(nu.eq.nue) go to 230 -c addition in the u-direction. - lastdi = -1 - nplusu = nplu - ifsu = 0 - istart = 0 - if(iopt(2).eq.0) istart = 1 - do 220 l=1,nplusu -c add a new knot in the u-direction - call fpknot(u,mu,tu,nu,fpintu,nrdatu,nrintu,nuest,istart) -c test whether we cannot further increase the number of knots in the -c u-direction. - if(nu.eq.nue) go to 270 - 220 continue - go to 270 - 230 if(nv.eq.nve) go to 210 -c addition in the v-direction. - lastdi = 1 - nplusv = nplv - ifsv = 0 - do 240 l=1,nplusv -c add a new knot in the v-direction. - call fpknot(v,mv,tv,nv,fpintv,nrdatv,nrintv,nvest,1) -c test whether we cannot further increase the number of knots in the -c v-direction. - if(nv.eq.nve) go to 270 - 240 continue -c restart the computations with the new set of knots. - 270 continue -c test whether the least-squares polynomial is a solution of our -c approximation problem. - 300 if(ier.eq.(-2)) go to 440 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spline sp(u,v) c -c ***************************************************** c -c we have determined the number of knots and their position. we now c -c compute the b-spline coefficients of the smoothing spline sp(u,v). c -c this smoothing spline depends on the parameter p in such a way that c -c f(p) = sumi=1,mu(sumj=1,mv((z(i,j)-sp(u(i),v(j)))**2) c -c is a continuous, strictly decreasing function of p. moreover the c -c least-squares polynomial corresponds to p=0 and the least-squares c -c spline to p=infinity. then iteratively we have to determine the c -c positive value of p such that f(p)=s. the process which is proposed c -c here makes use of rational interpolation. f(p) is approximated by a c -c rational function r(p)=(u*p+v)/(p+w); three values of p (p1,p2,p3) c -c with corresponding values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s)c -c are used to calculate the new value of p such that r(p)=s. c -c convergence is guaranteed by taking f1 > 0 and f3 < 0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c initial value for p. - p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - p = one - do 305 i=1,6 - drr(i) = dr(i) - 305 continue - ich1 = 0 - ich3 = 0 -c iteration process to find the root of f(p)=s. - do 350 iter = 1,maxit -c find the smoothing spline sp(u,v) and the corresponding sum f(p). - call fpopsp(ifsu,ifsv,ifbu,ifbv,u,mu,v,mv,r,mr,r0,r1,drr,iopt, - * idd,tu,nu,tv,nv,nuest,nvest,p,step,c,nc,fp,fpintu,fpintv,nru, - * nrv,wrk,lwrk) -c test whether the approximation sp(u,v) is an acceptable solution. - fpms = fp-s - if(abs(fpms).lt.acc) go to 440 -c test whether the maximum allowable number of iterations has been -c reached. - if(iter.eq.maxit) go to 400 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 320 - if((f2-f3).gt.acc) go to 310 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 + p2*con1 - go to 350 - 310 if(f2.lt.0.) ich3 = 1 - 320 if(ich1.ne.0) go to 340 - if((f1-f2).gt.acc) go to 330 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 350 - if(p.ge.p3) p = p2*con1 + p3*con9 - go to 350 -c test whether the iteration process proceeds as theoretically -c expected. - 330 if(f2.gt.0.) ich1 = 1 - 340 if(f2.ge.f1 .or. f2.le.f3) go to 410 -c find the new value of p. - p = fprati(p1,f1,p2,f2,p3,f3) - 350 continue -c error codes and messages. - 400 ier = 3 - go to 440 - 410 ier = 2 - go to 440 - 420 ier = 1 - go to 440 - 430 ier = -1 - fp = 0. - 440 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpsphe.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpsphe.f deleted file mode 100644 index 4d51ca1e6e..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpsphe.f +++ /dev/null @@ -1,764 +0,0 @@ - subroutine fpsphe(iopt,m,teta,phi,r,w,s,ntest,npest,eta,tol,maxit, - * - * ib1,ib3,nc,ncc,intest,nrest,nt,tt,np,tp,c,fp,sup,fpint,coord,f, - * ff,row,coco,cosi,a,q,bt,bp,spt,spp,h,index,nummer,wrk,lwrk,ier) -c .. -c ..scalar arguments.. - integer iopt,m,ntest,npest,maxit,ib1,ib3,nc,ncc,intest,nrest, - * nt,np,lwrk,ier - real*8 s,eta,tol,fp,sup -c ..array arguments.. - real*8 teta(m),phi(m),r(m),w(m),tt(ntest),tp(npest),c(nc), - * fpint(intest),coord(intest),f(ncc),ff(nc),row(npest),coco(npest), - * - * cosi(npest),a(ncc,ib1),q(ncc,ib3),bt(ntest,5),bp(npest,5), - * spt(m,4),spp(m,4),h(ib3),wrk(lwrk) - integer index(nrest),nummer(m) -c ..local scalars.. - real*8 aa,acc,arg,cn,co,c1,dmax,d1,d2,eps,facc,facs,fac1,fac2,fn, - * fpmax,fpms,f1,f2,f3,hti,htj,p,pi,pinv,piv,pi2,p1,p2,p3,ri,si, - * sigma,sq,store,wi,rn,one,con1,con9,con4,half,ten - integer i,iband,iband1,iband3,iband4,ich1,ich3,ii,ij,il,in,irot, - * iter,i1,i2,i3,j,jlt,jrot,j1,j2,l,la,lf,lh,ll,lp,lt,lwest,l1,l2, - * l3,l4,ncof,ncoff,npp,np4,nreg,nrint,nrr,nr1,ntt,nt4,nt6,num, - * num1,rank -c ..local arrays.. - real*8 ht(4),hp(4) -c ..function references.. - real*8 abs,atan,fprati,sqrt,cos,sin - integer min0 -c ..subroutine references.. -c fpback,fpbspl,fpgivs,fpdisc,fporde,fprank,fprota,fprpsp -c .. -c set constants - one = 0.1e+01 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 - half = 0.5e0 - ten = 0.1e+02 - pi = atan(one)*4 - pi2 = pi+pi - eps = sqrt(eta) - if(iopt.lt.0) go to 70 -c calculation of acc, the absolute tolerance for the root of f(p)=s. - acc = tol*s - if(iopt.eq.0) go to 10 - if(s.lt.sup) then - if (np.lt.11) go to 60 - go to 70 - endif -c if iopt=0 we begin by computing the weighted least-squares polynomial -c of the form -c s(teta,phi) = c1*f1(teta) + cn*fn(teta) -c where f1(teta) and fn(teta) are the cubic polynomials satisfying -c f1(0) = 1, f1(pi) = f1'(0) = f1'(pi) = 0 ; fn(teta) = 1-f1(teta). -c the corresponding weighted sum of squared residuals gives the upper -c bound sup for the smoothing factor s. - 10 sup = 0. - d1 = 0. - d2 = 0. - c1 = 0. - cn = 0. - fac1 = pi*(one + half) - fac2 = (one + one)/pi**3 - aa = 0. - do 40 i=1,m - wi = w(i) - ri = r(i)*wi - arg = teta(i) - fn = fac2*arg*arg*(fac1-arg) - f1 = (one-fn)*wi - fn = fn*wi - if(fn.eq.0.) go to 20 - call fpgivs(fn,d1,co,si) - call fprota(co,si,f1,aa) - call fprota(co,si,ri,cn) - 20 if(f1.eq.0.) go to 30 - call fpgivs(f1,d2,co,si) - call fprota(co,si,ri,c1) - 30 sup = sup+ri*ri - 40 continue - if(d2.ne.0.) c1 = c1/d2 - if(d1.ne.0.) cn = (cn-aa*c1)/d1 -c find the b-spline representation of this least-squares polynomial - nt = 8 - np = 8 - do 50 i=1,4 - c(i) = c1 - c(i+4) = c1 - c(i+8) = cn - c(i+12) = cn - tt(i) = 0. - tt(i+4) = pi - tp(i) = 0. - tp(i+4) = pi2 - 50 continue - fp = sup -c test whether the least-squares polynomial is an acceptable solution - fpms = sup-s - if(fpms.lt.acc) go to 960 -c test whether we cannot further increase the number of knots. - 60 if(npest.lt.11 .or. ntest.lt.9) go to 950 -c find the initial set of interior knots of the spherical spline in -c case iopt = 0. - np = 11 - tp(5) = pi*half - tp(6) = pi - tp(7) = tp(5)+pi - nt = 9 - tt(5) = tp(5) -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1 : computation of least-squares spherical splines. c -c ******************************************************** c -c if iopt < 0 we compute the least-squares spherical spline according c -c to the given set of knots. c -c if iopt >=0 we compute least-squares spherical splines with increas-c -c ing numbers of knots until the corresponding sum f(p=inf)<=s. c -c the initial set of knots then depends on the value of iopt: c -c if iopt=0 we start with one interior knot in the teta-direction c -c (pi/2) and three in the phi-direction (pi/2,pi,3*pi/2). c -c if iopt>0 we start with the set of knots found at the last call c -c of the routine. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c main loop for the different sets of knots. m is a save upper bound -c for the number of trials. - 70 do 570 iter=1,m -c find the position of the additional knots which are needed for the -c b-spline representation of s(teta,phi). - l1 = 4 - l2 = l1 - l3 = np-3 - l4 = l3 - tp(l2) = 0. - tp(l3) = pi2 - do 80 i=1,3 - l1 = l1+1 - l2 = l2-1 - l3 = l3+1 - l4 = l4-1 - tp(l2) = tp(l4)-pi2 - tp(l3) = tp(l1)+pi2 - 80 continue - l = nt - do 90 i=1,4 - tt(i) = 0. - tt(l) = pi - l = l-1 - 90 continue -c find nrint, the total number of knot intervals and nreg, the number -c of panels in which the approximation domain is subdivided by the -c intersection of knots. - ntt = nt-7 - npp = np-7 - nrr = npp/2 - nr1 = nrr+1 - nrint = ntt+npp - nreg = ntt*npp -c arrange the data points according to the panel they belong to. - call fporde(teta,phi,m,3,3,tt,nt,tp,np,nummer,index,nreg) -c find the b-spline coefficients coco and cosi of the cubic spline -c approximations sc(phi) and ss(phi) for cos(phi) and sin(phi). - do 100 i=1,npp - coco(i) = 0. - cosi(i) = 0. - do 100 j=1,npp - a(i,j) = 0. - 100 continue -c the coefficients coco and cosi are obtained from the conditions -c sc(tp(i))=cos(tp(i)),resp. ss(tp(i))=sin(tp(i)),i=4,5,...np-4. - do 150 i=1,npp - l2 = i+3 - arg = tp(l2) - call fpbspl(tp,np,3,arg,l2,hp) - do 110 j=1,npp - row(j) = 0. - 110 continue - ll = i - do 120 j=1,3 - if(ll.gt.npp) ll= 1 - row(ll) = row(ll)+hp(j) - ll = ll+1 - 120 continue - facc = cos(arg) - facs = sin(arg) - do 140 j=1,npp - piv = row(j) - if(piv.eq.0.) go to 140 - call fpgivs(piv,a(j,1),co,si) - call fprota(co,si,facc,coco(j)) - call fprota(co,si,facs,cosi(j)) - if(j.eq.npp) go to 150 - j1 = j+1 - i2 = 1 - do 130 l=j1,npp - i2 = i2+1 - call fprota(co,si,row(l),a(j,i2)) - 130 continue - 140 continue - 150 continue - call fpback(a,coco,npp,npp,coco,ncc) - call fpback(a,cosi,npp,npp,cosi,ncc) -c find ncof, the dimension of the spherical spline and ncoff, the -c number of coefficients in the standard b-spline representation. - nt4 = nt-4 - np4 = np-4 - ncoff = nt4*np4 - ncof = 6+npp*(ntt-1) -c find the bandwidth of the observation matrix a. - iband = 4*npp - if(ntt.eq.4) iband = 3*(npp+1) - if(ntt.lt.4) iband = ncof - iband1 = iband-1 -c initialize the observation matrix a. - do 160 i=1,ncof - f(i) = 0. - do 160 j=1,iband - a(i,j) = 0. - 160 continue -c initialize the sum of squared residuals. - fp = 0. -c fetch the data points in the new order. main loop for the -c different panels. - do 340 num=1,nreg -c fix certain constants for the current panel; jrot records the column -c number of the first non-zero element in a row of the observation -c matrix according to a data point of the panel. - num1 = num-1 - lt = num1/npp - l1 = lt+4 - lp = num1-lt*npp+1 - l2 = lp+3 - lt = lt+1 - jrot = 0 - if(lt.gt.2) jrot = 3+(lt-3)*npp -c test whether there are still data points in the current panel. - in = index(num) - 170 if(in.eq.0) go to 340 -c fetch a new data point. - wi = w(in) - ri = r(in)*wi -c evaluate for the teta-direction, the 4 non-zero b-splines at teta(in) - call fpbspl(tt,nt,3,teta(in),l1,ht) -c evaluate for the phi-direction, the 4 non-zero b-splines at phi(in) - call fpbspl(tp,np,3,phi(in),l2,hp) -c store the value of these b-splines in spt and spp resp. - do 180 i=1,4 - spp(in,i) = hp(i) - spt(in,i) = ht(i) - 180 continue -c initialize the new row of observation matrix. - do 190 i=1,iband - h(i) = 0. - 190 continue -c calculate the non-zero elements of the new row by making the cross -c products of the non-zero b-splines in teta- and phi-direction and -c by taking into account the conditions of the spherical splines. - do 200 i=1,npp - row(i) = 0. - 200 continue -c take into account the condition (3) of the spherical splines. - ll = lp - do 210 i=1,4 - if(ll.gt.npp) ll=1 - row(ll) = row(ll)+hp(i) - ll = ll+1 - 210 continue -c take into account the other conditions of the spherical splines. - if(lt.gt.2 .and. lt.lt.(ntt-1)) go to 230 - facc = 0. - facs = 0. - do 220 i=1,npp - facc = facc+row(i)*coco(i) - facs = facs+row(i)*cosi(i) - 220 continue -c fill in the non-zero elements of the new row. - 230 j1 = 0 - do 280 j =1,4 - jlt = j+lt - htj = ht(j) - if(jlt.gt.2 .and. jlt.le.nt4) go to 240 - j1 = j1+1 - h(j1) = h(j1)+htj - go to 280 - 240 if(jlt.eq.3 .or. jlt.eq.nt4) go to 260 - do 250 i=1,npp - j1 = j1+1 - h(j1) = row(i)*htj - 250 continue - go to 280 - 260 if(jlt.eq.3) go to 270 - h(j1+1) = facc*htj - h(j1+2) = facs*htj - h(j1+3) = htj - j1 = j1+2 - go to 280 - 270 h(1) = h(1)+htj - h(2) = facc*htj - h(3) = facs*htj - j1 = 3 - 280 continue - do 290 i=1,iband - h(i) = h(i)*wi - 290 continue -c rotate the row into triangle by givens transformations. - irot = jrot - do 310 i=1,iband - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 310 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(irot,1),co,si) -c apply that transformation to the right hand side. - call fprota(co,si,ri,f(irot)) - if(i.eq.iband) go to 320 -c apply that transformation to the left hand side. - i2 = 1 - i3 = i+1 - do 300 j=i3,iband - i2 = i2+1 - call fprota(co,si,h(j),a(irot,i2)) - 300 continue - 310 continue -c add the contribution of the row to the sum of squares of residual -c right hand sides. - 320 fp = fp+ri**2 -c find the number of the next data point in the panel. - 330 in = nummer(in) - go to 170 - 340 continue -c find dmax, the maximum value for the diagonal elements in the reduced -c triangle. - dmax = 0. - do 350 i=1,ncof - if(a(i,1).le.dmax) go to 350 - dmax = a(i,1) - 350 continue -c check whether the observation matrix is rank deficient. - sigma = eps*dmax - do 360 i=1,ncof - if(a(i,1).le.sigma) go to 370 - 360 continue -c backward substitution in case of full rank. - call fpback(a,f,ncof,iband,c,ncc) - rank = ncof - do 365 i=1,ncof - q(i,1) = a(i,1)/dmax - 365 continue - go to 390 -c in case of rank deficiency, find the minimum norm solution. - 370 lwest = ncof*iband+ncof+iband - if(lwrk.lt.lwest) go to 925 - lf = 1 - lh = lf+ncof - la = lh+iband - do 380 i=1,ncof - ff(i) = f(i) - do 380 j=1,iband - q(i,j) = a(i,j) - 380 continue - call fprank(q,ff,ncof,iband,ncc,sigma,c,sq,rank,wrk(la), - * wrk(lf),wrk(lh)) - do 385 i=1,ncof - q(i,1) = q(i,1)/dmax - 385 continue -c add to the sum of squared residuals, the contribution of reducing -c the rank. - fp = fp+sq -c find the coefficients in the standard b-spline representation of -c the spherical spline. - 390 call fprpsp(nt,np,coco,cosi,c,ff,ncoff) -c test whether the least-squares spline is an acceptable solution. - if(iopt.lt.0) then - if (fp.le.0) go to 970 - go to 980 - endif - fpms = fp-s - if(abs(fpms).le.acc) then - if (fp.le.0) go to 970 - go to 980 - endif -c if f(p=inf) < s, accept the choice of knots. - if(fpms.lt.0.) go to 580 -c test whether we cannot further increase the number of knots. - if(ncof.gt.m) go to 935 -c search where to add a new knot. -c find for each interval the sum of squared residuals fpint for the -c data points having the coordinate belonging to that knot interval. -c calculate also coord which is the same sum, weighted by the position -c of the data points considered. - 440 do 450 i=1,nrint - fpint(i) = 0. - coord(i) = 0. - 450 continue - do 490 num=1,nreg - num1 = num-1 - lt = num1/npp - l1 = lt+1 - lp = num1-lt*npp - l2 = lp+1+ntt - jrot = lt*np4+lp - in = index(num) - 460 if(in.eq.0) go to 490 - store = 0. - i1 = jrot - do 480 i=1,4 - hti = spt(in,i) - j1 = i1 - do 470 j=1,4 - j1 = j1+1 - store = store+hti*spp(in,j)*c(j1) - 470 continue - i1 = i1+np4 - 480 continue - store = (w(in)*(r(in)-store))**2 - fpint(l1) = fpint(l1)+store - coord(l1) = coord(l1)+store*teta(in) - fpint(l2) = fpint(l2)+store - coord(l2) = coord(l2)+store*phi(in) - in = nummer(in) - go to 460 - 490 continue -c find the interval for which fpint is maximal on the condition that -c there still can be added a knot. - l1 = 1 - l2 = nrint - if(ntest.lt.nt+1) l1=ntt+1 - if(npest.lt.np+2) l2=ntt -c test whether we cannot further increase the number of knots. - if(l1.gt.l2) go to 950 - 500 fpmax = 0. - l = 0 - do 510 i=l1,l2 - if(fpmax.ge.fpint(i)) go to 510 - l = i - fpmax = fpint(i) - 510 continue - if(l.eq.0) go to 930 -c calculate the position of the new knot. - arg = coord(l)/fpint(l) -c test in what direction the new knot is going to be added. - if(l.gt.ntt) go to 530 -c addition in the teta-direction - l4 = l+4 - fpint(l) = 0. - fac1 = tt(l4)-arg - fac2 = arg-tt(l4-1) - if(fac1.gt.(ten*fac2) .or. fac2.gt.(ten*fac1)) go to 500 - j = nt - do 520 i=l4,nt - tt(j+1) = tt(j) - j = j-1 - 520 continue - tt(l4) = arg - nt = nt+1 - go to 570 -c addition in the phi-direction - 530 l4 = l+4-ntt - if(arg.lt.pi) go to 540 - arg = arg-pi - l4 = l4-nrr - 540 fpint(l) = 0. - fac1 = tp(l4)-arg - fac2 = arg-tp(l4-1) - if(fac1.gt.(ten*fac2) .or. fac2.gt.(ten*fac1)) go to 500 - ll = nrr+4 - j = ll - do 550 i=l4,ll - tp(j+1) = tp(j) - j = j-1 - 550 continue - tp(l4) = arg - np = np+2 - nrr = nrr+1 - do 560 i=5,ll - j = i+nrr - tp(j) = tp(i)+pi - 560 continue -c restart the computations with the new set of knots. - 570 continue -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spherical spline. c -c ******************************************************** c -c we have determined the number of knots and their position. we now c -c compute the coefficients of the smoothing spline sp(teta,phi). c -c the observation matrix a is extended by the rows of a matrix, expres-c -c sing that sp(teta,phi) must be a constant function in the variable c -c phi and a cubic polynomial in the variable teta. the corresponding c -c weights of these additional rows are set to 1/(p). iteratively c -c we than have to determine the value of p such that f(p) = sum((w(i)* c -c (r(i)-sp(teta(i),phi(i))))**2) be = s. c -c we already know that the least-squares polynomial corresponds to p=0,c -c and that the least-squares spherical spline corresponds to p=infin. c -c the iteration process makes use of rational interpolation. since f(p)c -c is a convex and strictly decreasing function of p, it can be approx- c -c imated by a rational function of the form r(p) = (u*p+v)/(p+w). c -c three values of p (p1,p2,p3) with corresponding values of f(p) (f1= c -c f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used to calculate the new value c -c of p such that r(p)=s. convergence is guaranteed by taking f1>0,f3<0.c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c evaluate the discontinuity jumps of the 3-th order derivative of -c the b-splines at the knots tt(l),l=5,...,nt-4. - 580 call fpdisc(tt,nt,5,bt,ntest) -c evaluate the discontinuity jumps of the 3-th order derivative of -c the b-splines at the knots tp(l),l=5,...,np-4. - call fpdisc(tp,np,5,bp,npest) -c initial value for p. - p1 = 0. - f1 = sup-s - p3 = -one - f3 = fpms - p = 0. - do 585 i=1,ncof - p = p+a(i,1) - 585 continue - rn = ncof - p = rn/p -c find the bandwidth of the extended observation matrix. - iband4 = iband+3 - if(ntt.le.4) iband4 = ncof - iband3 = iband4 -1 - ich1 = 0 - ich3 = 0 -c iteration process to find the root of f(p)=s. - do 920 iter=1,maxit - pinv = one/p -c store the triangularized observation matrix into q. - do 600 i=1,ncof - ff(i) = f(i) - do 590 j=1,iband4 - q(i,j) = 0. - 590 continue - do 600 j=1,iband - q(i,j) = a(i,j) - 600 continue -c extend the observation matrix with the rows of a matrix, expressing -c that for teta=cst. sp(teta,phi) must be a constant function. - nt6 = nt-6 - do 720 i=5,np4 - ii = i-4 - do 610 l=1,npp - row(l) = 0. - 610 continue - ll = ii - do 620 l=1,5 - if(ll.gt.npp) ll=1 - row(ll) = row(ll)+bp(ii,l) - ll = ll+1 - 620 continue - facc = 0. - facs = 0. - do 630 l=1,npp - facc = facc+row(l)*coco(l) - facs = facs+row(l)*cosi(l) - 630 continue - do 720 j=1,nt6 -c initialize the new row. - do 640 l=1,iband - h(l) = 0. - 640 continue -c fill in the non-zero elements of the row. jrot records the column -c number of the first non-zero element in the row. - jrot = 4+(j-2)*npp - if(j.gt.1 .and. j.lt.nt6) go to 650 - h(1) = facc - h(2) = facs - if(j.eq.1) jrot = 2 - go to 670 - 650 do 660 l=1,npp - h(l)=row(l) - 660 continue - 670 do 675 l=1,iband - h(l) = h(l)*pinv - 675 continue - ri = 0. -c rotate the new row into triangle by givens transformations. - do 710 irot=jrot,ncof - piv = h(1) - i2 = min0(iband1,ncof-irot) - if(piv.eq.0.) then - if (i2.le.0) go to 720 - go to 690 - endif -c calculate the parameters of the givens transformation. - call fpgivs(piv,q(irot,1),co,si) -c apply that givens transformation to the right hand side. - call fprota(co,si,ri,ff(irot)) - if(i2.eq.0) go to 720 -c apply that givens transformation to the left hand side. - do 680 l=1,i2 - l1 = l+1 - call fprota(co,si,h(l1),q(irot,l1)) - 680 continue - 690 do 700 l=1,i2 - h(l) = h(l+1) - 700 continue - h(i2+1) = 0. - 710 continue - 720 continue -c extend the observation matrix with the rows of a matrix expressing -c that for phi=cst. sp(teta,phi) must be a cubic polynomial. - do 810 i=5,nt4 - ii = i-4 - do 810 j=1,npp -c initialize the new row - do 730 l=1,iband4 - h(l) = 0. - 730 continue -c fill in the non-zero elements of the row. jrot records the column -c number of the first non-zero element in the row. - j1 = 1 - do 760 l=1,5 - il = ii+l - ij = npp - if(il.ne.3 .and. il.ne.nt4) go to 750 - j1 = j1+3-j - j2 = j1-2 - ij = 0 - if(il.ne.3) go to 740 - j1 = 1 - j2 = 2 - ij = j+2 - 740 h(j2) = bt(ii,l)*coco(j) - h(j2+1) = bt(ii,l)*cosi(j) - 750 h(j1) = h(j1)+bt(ii,l) - j1 = j1+ij - 760 continue - do 765 l=1,iband4 - h(l) = h(l)*pinv - 765 continue - ri = 0. - jrot = 1 - if(ii.gt.2) jrot = 3+j+(ii-3)*npp -c rotate the new row into triangle by givens transformations. - do 800 irot=jrot,ncof - piv = h(1) - i2 = min0(iband3,ncof-irot) - if(piv.eq.0.) then - if (i2.le.0) go to 810 - go to 780 - endif -c calculate the parameters of the givens transformation. - call fpgivs(piv,q(irot,1),co,si) -c apply that givens transformation to the right hand side. - call fprota(co,si,ri,ff(irot)) - if(i2.eq.0) go to 810 -c apply that givens transformation to the left hand side. - do 770 l=1,i2 - l1 = l+1 - call fprota(co,si,h(l1),q(irot,l1)) - 770 continue - 780 do 790 l=1,i2 - h(l) = h(l+1) - 790 continue - h(i2+1) = 0. - 800 continue - 810 continue -c find dmax, the maximum value for the diagonal elements in the -c reduced triangle. - dmax = 0. - do 820 i=1,ncof - if(q(i,1).le.dmax) go to 820 - dmax = q(i,1) - 820 continue -c check whether the matrix is rank deficient. - sigma = eps*dmax - do 830 i=1,ncof - if(q(i,1).le.sigma) go to 840 - 830 continue -c backward substitution in case of full rank. - call fpback(q,ff,ncof,iband4,c,ncc) - rank = ncof - go to 845 -c in case of rank deficiency, find the minimum norm solution. - 840 lwest = ncof*iband4+ncof+iband4 - if(lwrk.lt.lwest) go to 925 - lf = 1 - lh = lf+ncof - la = lh+iband4 - call fprank(q,ff,ncof,iband4,ncc,sigma,c,sq,rank,wrk(la), - * wrk(lf),wrk(lh)) - 845 do 850 i=1,ncof - q(i,1) = q(i,1)/dmax - 850 continue -c find the coefficients in the standard b-spline representation of -c the spherical spline. - call fprpsp(nt,np,coco,cosi,c,ff,ncoff) -c compute f(p). - fp = 0. - do 890 num = 1,nreg - num1 = num-1 - lt = num1/npp - lp = num1-lt*npp - jrot = lt*np4+lp - in = index(num) - 860 if(in.eq.0) go to 890 - store = 0. - i1 = jrot - do 880 i=1,4 - hti = spt(in,i) - j1 = i1 - do 870 j=1,4 - j1 = j1+1 - store = store+hti*spp(in,j)*c(j1) - 870 continue - i1 = i1+np4 - 880 continue - fp = fp+(w(in)*(r(in)-store))**2 - in = nummer(in) - go to 860 - 890 continue -c test whether the approximation sp(teta,phi) is an acceptable solution - fpms = fp-s - if(abs(fpms).le.acc) go to 980 -c test whether the maximum allowable number of iterations has been -c reached. - if(iter.eq.maxit) go to 940 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 900 - if((f2-f3).gt.acc) go to 895 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 + p2*con1 - go to 920 - 895 if(f2.lt.0.) ich3 = 1 - 900 if(ich1.ne.0) go to 910 - if((f1-f2).gt.acc) go to 905 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 920 - if(p.ge.p3) p = p2*con1 +p3*con9 - go to 920 - 905 if(f2.gt.0.) ich1 = 1 -c test whether the iteration process proceeds as theoretically -c expected. - 910 if(f2.ge.f1 .or. f2.le.f3) go to 945 -c find the new value of p. - p = fprati(p1,f1,p2,f2,p3,f3) - 920 continue -c error codes and messages. - 925 ier = lwest - go to 990 - 930 ier = 5 - go to 990 - 935 ier = 4 - go to 990 - 940 ier = 3 - go to 990 - 945 ier = 2 - go to 990 - 950 ier = 1 - go to 990 - 960 ier = -2 - go to 990 - 970 ier = -1 - fp = 0. - 980 if(ncof.ne.rank) ier = -rank - 990 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpsuev.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpsuev.f deleted file mode 100644 index d91187d495..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpsuev.f +++ /dev/null @@ -1,80 +0,0 @@ - subroutine fpsuev(idim,tu,nu,tv,nv,c,u,mu,v,mv,f,wu,wv,lu,lv) -c ..scalar arguments.. - integer idim,nu,nv,mu,mv -c ..array arguments.. - integer lu(mu),lv(mv) - real*8 tu(nu),tv(nv),c((nu-4)*(nv-4)*idim),u(mu),v(mv), - * f(mu*mv*idim),wu(mu,4),wv(mv,4) -c ..local scalars.. - integer i,i1,j,j1,k,l,l1,l2,l3,m,nuv,nu4,nv4 - real*8 arg,sp,tb,te -c ..local arrays.. - real*8 h(4) -c ..subroutine references.. -c fpbspl -c .. - nu4 = nu-4 - tb = tu(4) - te = tu(nu4+1) - l = 4 - l1 = l+1 - do 40 i=1,mu - arg = u(i) - if(arg.lt.tb) arg = tb - if(arg.gt.te) arg = te - 10 if(arg.lt.tu(l1) .or. l.eq.nu4) go to 20 - l = l1 - l1 = l+1 - go to 10 - 20 call fpbspl(tu,nu,3,arg,l,h) - lu(i) = l-4 - do 30 j=1,4 - wu(i,j) = h(j) - 30 continue - 40 continue - nv4 = nv-4 - tb = tv(4) - te = tv(nv4+1) - l = 4 - l1 = l+1 - do 80 i=1,mv - arg = v(i) - if(arg.lt.tb) arg = tb - if(arg.gt.te) arg = te - 50 if(arg.lt.tv(l1) .or. l.eq.nv4) go to 60 - l = l1 - l1 = l+1 - go to 50 - 60 call fpbspl(tv,nv,3,arg,l,h) - lv(i) = l-4 - do 70 j=1,4 - wv(i,j) = h(j) - 70 continue - 80 continue - m = 0 - nuv = nu4*nv4 - do 140 k=1,idim - l3 = (k-1)*nuv - do 130 i=1,mu - l = lu(i)*nv4+l3 - do 90 i1=1,4 - h(i1) = wu(i,i1) - 90 continue - do 120 j=1,mv - l1 = l+lv(j) - sp = 0. - do 110 i1=1,4 - l2 = l1 - do 100 j1=1,4 - l2 = l2+1 - sp = sp+c(l2)*h(i1)*wv(j,j1) - 100 continue - l1 = l1+nv4 - 110 continue - m = m+1 - f(m) = sp - 120 continue - 130 continue - 140 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpsurf.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpsurf.f deleted file mode 100644 index 2e1a0e71e1..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpsurf.f +++ /dev/null @@ -1,680 +0,0 @@ - subroutine fpsurf(iopt,m,x,y,z,w,xb,xe,yb,ye,kxx,kyy,s,nxest, - * nyest,eta,tol,maxit,nmax,km1,km2,ib1,ib3,nc,intest,nrest, - * nx0,tx,ny0,ty,c,fp,fp0,fpint,coord,f,ff,a,q,bx,by,spx,spy,h, - * index,nummer,wrk,lwrk,ier) -c .. -c ..scalar arguments.. - real*8 xb,xe,yb,ye,s,eta,tol,fp,fp0 - integer iopt,m,kxx,kyy,nxest,nyest,maxit,nmax,km1,km2,ib1,ib3, - * nc,intest,nrest,nx0,ny0,lwrk,ier -c ..array arguments.. - real*8 x(m),y(m),z(m),w(m),tx(nmax),ty(nmax),c(nc),fpint(intest), - * coord(intest),f(nc),ff(nc),a(nc,ib1),q(nc,ib3),bx(nmax,km2), - * by(nmax,km2),spx(m,km1),spy(m,km1),h(ib3),wrk(lwrk) - integer index(nrest),nummer(m) -c ..local scalars.. - real*8 acc,arg,cos,dmax,fac1,fac2,fpmax,fpms,f1,f2,f3,hxi,p,pinv, - * piv,p1,p2,p3,sigma,sin,sq,store,wi,x0,x1,y0,y1,zi,eps, - * rn,one,con1,con9,con4,half,ten - integer i,iband,iband1,iband3,iband4,ibb,ichang,ich1,ich3,ii, - * in,irot,iter,i1,i2,i3,j,jrot,jxy,j1,kx,kx1,kx2,ky,ky1,ky2,l, - * la,lf,lh,lwest,lx,ly,l1,l2,n,ncof,nk1x,nk1y,nminx,nminy,nreg, - * nrint,num,num1,nx,nxe,nxx,ny,nye,nyy,n1,rank -c ..local arrays.. - real*8 hx(6),hy(6) -c ..function references.. - real*8 abs,fprati,sqrt - integer min0 -c ..subroutine references.. -c fpback,fpbspl,fpgivs,fpdisc,fporde,fprank,fprota -c .. -c set constants - one = 0.1e+01 - con1 = 0.1e0 - con9 = 0.9e0 - con4 = 0.4e-01 - half = 0.5e0 - ten = 0.1e+02 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 1: determination of the number of knots and their position. c -c **************************************************************** c -c given a set of knots we compute the least-squares spline sinf(x,y), c -c and the corresponding weighted sum of squared residuals fp=f(p=inf). c -c if iopt=-1 sinf(x,y) is the requested approximation. c -c if iopt=0 or iopt=1 we check whether we can accept the knots: c -c if fp <=s we will continue with the current set of knots. c -c if fp > s we will increase the number of knots and compute the c -c corresponding least-squares spline until finally fp<=s. c -c the initial choice of knots depends on the value of s and iopt. c -c if iopt=0 we first compute the least-squares polynomial of degree c -c kx in x and ky in y; nx=nminx=2*kx+2 and ny=nminy=2*ky+2. c -c fp0=f(0) denotes the corresponding weighted sum of squared c -c residuals c -c if iopt=1 we start with the knots found at the last call of the c -c routine, except for the case that s>=fp0; then we can compute c -c the least-squares polynomial directly. c -c eventually the independent variables x and y (and the corresponding c -c parameters) will be switched if this can reduce the bandwidth of the c -c system to be solved. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c ichang denotes whether(1) or not(-1) the directions have been inter- -c changed. - ichang = -1 - x0 = xb - x1 = xe - y0 = yb - y1 = ye - kx = kxx - ky = kyy - kx1 = kx+1 - ky1 = ky+1 - nxe = nxest - nye = nyest - eps = sqrt(eta) - if(iopt.lt.0) go to 20 -c calculation of acc, the absolute tolerance for the root of f(p)=s. - acc = tol*s - if(iopt.eq.0) go to 10 - if(fp0.gt.s) go to 20 -c initialization for the least-squares polynomial. - 10 nminx = 2*kx1 - nminy = 2*ky1 - nx = nminx - ny = nminy - ier = -2 - go to 30 - 20 nx = nx0 - ny = ny0 -c main loop for the different sets of knots. m is a save upper bound -c for the number of trials. - 30 do 420 iter=1,m -c find the position of the additional knots which are needed for the -c b-spline representation of s(x,y). - l = nx - do 40 i=1,kx1 - tx(i) = x0 - tx(l) = x1 - l = l-1 - 40 continue - l = ny - do 50 i=1,ky1 - ty(i) = y0 - ty(l) = y1 - l = l-1 - 50 continue -c find nrint, the total number of knot intervals and nreg, the number -c of panels in which the approximation domain is subdivided by the -c intersection of knots. - nxx = nx-2*kx1+1 - nyy = ny-2*ky1+1 - nrint = nxx+nyy - nreg = nxx*nyy -c find the bandwidth of the observation matrix a. -c if necessary, interchange the variables x and y, in order to obtain -c a minimal bandwidth. - iband1 = kx*(ny-ky1)+ky - l = ky*(nx-kx1)+kx - if(iband1.le.l) go to 130 - iband1 = l - ichang = -ichang - do 60 i=1,m - store = x(i) - x(i) = y(i) - y(i) = store - 60 continue - store = x0 - x0 = y0 - y0 = store - store = x1 - x1 = y1 - y1 = store - n = min0(nx,ny) - do 70 i=1,n - store = tx(i) - tx(i) = ty(i) - ty(i) = store - 70 continue - n1 = n+1 - if (nx.lt.ny) go to 80 - if (nx.eq.ny) go to 120 - go to 100 - 80 do 90 i=n1,ny - tx(i) = ty(i) - 90 continue - go to 120 - 100 do 110 i=n1,nx - ty(i) = tx(i) - 110 continue - 120 l = nx - nx = ny - ny = l - l = nxe - nxe = nye - nye = l - l = nxx - nxx = nyy - nyy = l - l = kx - kx = ky - ky = l - kx1 = kx+1 - ky1 = ky+1 - 130 iband = iband1+1 -c arrange the data points according to the panel they belong to. - call fporde(x,y,m,kx,ky,tx,nx,ty,ny,nummer,index,nreg) -c find ncof, the number of b-spline coefficients. - nk1x = nx-kx1 - nk1y = ny-ky1 - ncof = nk1x*nk1y -c initialize the observation matrix a. - do 140 i=1,ncof - f(i) = 0. - do 140 j=1,iband - a(i,j) = 0. - 140 continue -c initialize the sum of squared residuals. - fp = 0. -c fetch the data points in the new order. main loop for the -c different panels. - do 250 num=1,nreg -c fix certain constants for the current panel; jrot records the column -c number of the first non-zero element in a row of the observation -c matrix according to a data point of the panel. - num1 = num-1 - lx = num1/nyy - l1 = lx+kx1 - ly = num1-lx*nyy - l2 = ly+ky1 - jrot = lx*nk1y+ly -c test whether there are still data points in the panel. - in = index(num) - 150 if(in.eq.0) go to 250 -c fetch a new data point. - wi = w(in) - zi = z(in)*wi -c evaluate for the x-direction, the (kx+1) non-zero b-splines at x(in). - call fpbspl(tx,nx,kx,x(in),l1,hx) -c evaluate for the y-direction, the (ky+1) non-zero b-splines at y(in). - call fpbspl(ty,ny,ky,y(in),l2,hy) -c store the value of these b-splines in spx and spy respectively. - do 160 i=1,kx1 - spx(in,i) = hx(i) - 160 continue - do 170 i=1,ky1 - spy(in,i) = hy(i) - 170 continue -c initialize the new row of observation matrix. - do 180 i=1,iband - h(i) = 0. - 180 continue -c calculate the non-zero elements of the new row by making the cross -c products of the non-zero b-splines in x- and y-direction. - i1 = 0 - do 200 i=1,kx1 - hxi = hx(i) - j1 = i1 - do 190 j=1,ky1 - j1 = j1+1 - h(j1) = hxi*hy(j)*wi - 190 continue - i1 = i1+nk1y - 200 continue -c rotate the row into triangle by givens transformations . - irot = jrot - do 220 i=1,iband - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 220 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(irot,1),cos,sin) -c apply that transformation to the right hand side. - call fprota(cos,sin,zi,f(irot)) - if(i.eq.iband) go to 230 -c apply that transformation to the left hand side. - i2 = 1 - i3 = i+1 - do 210 j=i3,iband - i2 = i2+1 - call fprota(cos,sin,h(j),a(irot,i2)) - 210 continue - 220 continue -c add the contribution of the row to the sum of squares of residual -c right hand sides. - 230 fp = fp+zi**2 -c find the number of the next data point in the panel. - 240 in = nummer(in) - go to 150 - 250 continue -c find dmax, the maximum value for the diagonal elements in the reduced -c triangle. - dmax = 0. - do 260 i=1,ncof - if(a(i,1).le.dmax) go to 260 - dmax = a(i,1) - 260 continue -c check whether the observation matrix is rank deficient. - sigma = eps*dmax - do 270 i=1,ncof - if(a(i,1).le.sigma) go to 280 - 270 continue -c backward substitution in case of full rank. - call fpback(a,f,ncof,iband,c,nc) - rank = ncof - do 275 i=1,ncof - q(i,1) = a(i,1)/dmax - 275 continue - go to 300 -c in case of rank deficiency, find the minimum norm solution. -c check whether there is sufficient working space - 280 lwest = ncof*iband+ncof+iband - if(lwrk.lt.lwest) go to 780 - do 290 i=1,ncof - ff(i) = f(i) - do 290 j=1,iband - q(i,j) = a(i,j) - 290 continue - lf =1 - lh = lf+ncof - la = lh+iband - call fprank(q,ff,ncof,iband,nc,sigma,c,sq,rank,wrk(la), - * wrk(lf),wrk(lh)) - do 295 i=1,ncof - q(i,1) = q(i,1)/dmax - 295 continue -c add to the sum of squared residuals, the contribution of reducing -c the rank. - fp = fp+sq - 300 if(ier.eq.(-2)) fp0 = fp -c test whether the least-squares spline is an acceptable solution. - if(iopt.lt.0) go to 820 - fpms = fp-s - if(abs(fpms).le.acc) then - if (fp.le.0) go to 815 - go to 820 - endif -c test whether we can accept the choice of knots. - if(fpms.lt.0.) go to 430 -c test whether we cannot further increase the number of knots. - if(ncof.gt.m) go to 790 - ier = 0 -c search where to add a new knot. -c find for each interval the sum of squared residuals fpint for the -c data points having the coordinate belonging to that knot interval. -c calculate also coord which is the same sum, weighted by the position -c of the data points considered. - 310 do 320 i=1,nrint - fpint(i) = 0. - coord(i) = 0. - 320 continue - do 360 num=1,nreg - num1 = num-1 - lx = num1/nyy - l1 = lx+1 - ly = num1-lx*nyy - l2 = ly+1+nxx - jrot = lx*nk1y+ly - in = index(num) - 330 if(in.eq.0) go to 360 - store = 0. - i1 = jrot - do 350 i=1,kx1 - hxi = spx(in,i) - j1 = i1 - do 340 j=1,ky1 - j1 = j1+1 - store = store+hxi*spy(in,j)*c(j1) - 340 continue - i1 = i1+nk1y - 350 continue - store = (w(in)*(z(in)-store))**2 - fpint(l1) = fpint(l1)+store - coord(l1) = coord(l1)+store*x(in) - fpint(l2) = fpint(l2)+store - coord(l2) = coord(l2)+store*y(in) - in = nummer(in) - go to 330 - 360 continue -c find the interval for which fpint is maximal on the condition that -c there still can be added a knot. - 370 l = 0 - fpmax = 0. - l1 = 1 - l2 = nrint - if(nx.eq.nxe) l1 = nxx+1 - if(ny.eq.nye) l2 = nxx - if(l1.gt.l2) go to 810 - do 380 i=l1,l2 - if(fpmax.ge.fpint(i)) go to 380 - l = i - fpmax = fpint(i) - 380 continue -c test whether we cannot further increase the number of knots. - if(l.eq.0) go to 785 -c calculate the position of the new knot. - arg = coord(l)/fpint(l) -c test in what direction the new knot is going to be added. - if(l.gt.nxx) go to 400 -c addition in the x-direction. - jxy = l+kx1 - fpint(l) = 0. - fac1 = tx(jxy)-arg - fac2 = arg-tx(jxy-1) - if(fac1.gt.(ten*fac2) .or. fac2.gt.(ten*fac1)) go to 370 - j = nx - do 390 i=jxy,nx - tx(j+1) = tx(j) - j = j-1 - 390 continue - tx(jxy) = arg - nx = nx+1 - go to 420 -c addition in the y-direction. - 400 jxy = l+ky1-nxx - fpint(l) = 0. - fac1 = ty(jxy)-arg - fac2 = arg-ty(jxy-1) - if(fac1.gt.(ten*fac2) .or. fac2.gt.(ten*fac1)) go to 370 - j = ny - do 410 i=jxy,ny - ty(j+1) = ty(j) - j = j-1 - 410 continue - ty(jxy) = arg - ny = ny+1 -c restart the computations with the new set of knots. - 420 continue -c test whether the least-squares polynomial is a solution of our -c approximation problem. - 430 if(ier.eq.(-2)) go to 830 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c part 2: determination of the smoothing spline sp(x,y) c -c ***************************************************** c -c we have determined the number of knots and their position. we now c -c compute the b-spline coefficients of the smoothing spline sp(x,y). c -c the observation matrix a is extended by the rows of a matrix, c -c expressing that sp(x,y) must be a polynomial of degree kx in x and c -c ky in y. the corresponding weights of these additional rows are set c -c to 1./p. iteratively we than have to determine the value of p c -c such that f(p)=sum((w(i)*(z(i)-sp(x(i),y(i))))**2) be = s. c -c we already know that the least-squares polynomial corresponds to c -c p=0 and that the least-squares spline corresponds to p=infinity. c -c the iteration process which is proposed here makes use of rational c -c interpolation. since f(p) is a convex and strictly decreasing c -c function of p, it can be approximated by a rational function r(p)= c -c (u*p+v)/(p+w). three values of p(p1,p2,p3) with corresponding values c -c of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used to calculate the c -c new value of p such that r(p)=s. convergence is guaranteed by taking c -c f1 > 0 and f3 < 0. c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - kx2 = kx1+1 -c test whether there are interior knots in the x-direction. - if(nk1x.eq.kx1) go to 440 -c evaluate the discotinuity jumps of the kx-th order derivative of -c the b-splines at the knots tx(l),l=kx+2,...,nx-kx-1. - call fpdisc(tx,nx,kx2,bx,nmax) - 440 ky2 = ky1 + 1 -c test whether there are interior knots in the y-direction. - if(nk1y.eq.ky1) go to 450 -c evaluate the discontinuity jumps of the ky-th order derivative of -c the b-splines at the knots ty(l),l=ky+2,...,ny-ky-1. - call fpdisc(ty,ny,ky2,by,nmax) -c initial value for p. - 450 p1 = 0. - f1 = fp0-s - p3 = -one - f3 = fpms - p = 0. - do 460 i=1,ncof - p = p+a(i,1) - 460 continue - rn = ncof - p = rn/p -c find the bandwidth of the extended observation matrix. - iband3 = kx1*nk1y - iband4 = iband3 +1 - ich1 = 0 - ich3 = 0 -c iteration process to find the root of f(p)=s. - do 770 iter=1,maxit - pinv = one/p -c store the triangularized observation matrix into q. - do 480 i=1,ncof - ff(i) = f(i) - do 470 j=1,iband - q(i,j) = a(i,j) - 470 continue - ibb = iband+1 - do 480 j=ibb,iband4 - q(i,j) = 0. - 480 continue - if(nk1y.eq.ky1) go to 560 -c extend the observation matrix with the rows of a matrix, expressing -c that for x=cst. sp(x,y) must be a polynomial in y of degree ky. - do 550 i=ky2,nk1y - ii = i-ky1 - do 550 j=1,nk1x -c initialize the new row. - do 490 l=1,iband - h(l) = 0. - 490 continue -c fill in the non-zero elements of the row. jrot records the column -c number of the first non-zero element in the row. - do 500 l=1,ky2 - h(l) = by(ii,l)*pinv - 500 continue - zi = 0. - jrot = (j-1)*nk1y+ii -c rotate the new row into triangle by givens transformations without -c square roots. - do 540 irot=jrot,ncof - piv = h(1) - i2 = min0(iband1,ncof-irot) - if(piv.eq.0.) then - if (i2.le.0) go to 550 - go to 520 - endif -c calculate the parameters of the givens transformation. - call fpgivs(piv,q(irot,1),cos,sin) -c apply that givens transformation to the right hand side. - call fprota(cos,sin,zi,ff(irot)) - if(i2.eq.0) go to 550 -c apply that givens transformation to the left hand side. - do 510 l=1,i2 - l1 = l+1 - call fprota(cos,sin,h(l1),q(irot,l1)) - 510 continue - 520 do 530 l=1,i2 - h(l) = h(l+1) - 530 continue - h(i2+1) = 0. - 540 continue - 550 continue - 560 if(nk1x.eq.kx1) go to 640 -c extend the observation matrix with the rows of a matrix expressing -c that for y=cst. sp(x,y) must be a polynomial in x of degree kx. - do 630 i=kx2,nk1x - ii = i-kx1 - do 630 j=1,nk1y -c initialize the new row - do 570 l=1,iband4 - h(l) = 0. - 570 continue -c fill in the non-zero elements of the row. jrot records the column -c number of the first non-zero element in the row. - j1 = 1 - do 580 l=1,kx2 - h(j1) = bx(ii,l)*pinv - j1 = j1+nk1y - 580 continue - zi = 0. - jrot = (i-kx2)*nk1y+j -c rotate the new row into triangle by givens transformations . - do 620 irot=jrot,ncof - piv = h(1) - i2 = min0(iband3,ncof-irot) - if(piv.eq.0.) then - if (i2.le.0) go to 630 - go to 600 - endif -c calculate the parameters of the givens transformation. - call fpgivs(piv,q(irot,1),cos,sin) -c apply that givens transformation to the right hand side. - call fprota(cos,sin,zi,ff(irot)) - if(i2.eq.0) go to 630 -c apply that givens transformation to the left hand side. - do 590 l=1,i2 - l1 = l+1 - call fprota(cos,sin,h(l1),q(irot,l1)) - 590 continue - 600 do 610 l=1,i2 - h(l) = h(l+1) - 610 continue - h(i2+1) = 0. - 620 continue - 630 continue -c find dmax, the maximum value for the diagonal elements in the -c reduced triangle. - 640 dmax = 0. - do 650 i=1,ncof - if(q(i,1).le.dmax) go to 650 - dmax = q(i,1) - 650 continue -c check whether the matrix is rank deficient. - sigma = eps*dmax - do 660 i=1,ncof - if(q(i,1).le.sigma) go to 670 - 660 continue -c backward substitution in case of full rank. - call fpback(q,ff,ncof,iband4,c,nc) - rank = ncof - go to 675 -c in case of rank deficiency, find the minimum norm solution. - 670 lwest = ncof*iband4+ncof+iband4 - if(lwrk.lt.lwest) go to 780 - lf = 1 - lh = lf+ncof - la = lh+iband4 - call fprank(q,ff,ncof,iband4,nc,sigma,c,sq,rank,wrk(la), - * wrk(lf),wrk(lh)) - 675 do 680 i=1,ncof - q(i,1) = q(i,1)/dmax - 680 continue -c compute f(p). - fp = 0. - do 720 num = 1,nreg - num1 = num-1 - lx = num1/nyy - ly = num1-lx*nyy - jrot = lx*nk1y+ly - in = index(num) - 690 if(in.eq.0) go to 720 - store = 0. - i1 = jrot - do 710 i=1,kx1 - hxi = spx(in,i) - j1 = i1 - do 700 j=1,ky1 - j1 = j1+1 - store = store+hxi*spy(in,j)*c(j1) - 700 continue - i1 = i1+nk1y - 710 continue - fp = fp+(w(in)*(z(in)-store))**2 - in = nummer(in) - go to 690 - 720 continue -c test whether the approximation sp(x,y) is an acceptable solution. - fpms = fp-s - if(abs(fpms).le.acc) go to 820 -c test whether the maximum allowable number of iterations has been -c reached. - if(iter.eq.maxit) go to 795 -c carry out one more step of the iteration process. - p2 = p - f2 = fpms - if(ich3.ne.0) go to 740 - if((f2-f3).gt.acc) go to 730 -c our initial choice of p is too large. - p3 = p2 - f3 = f2 - p = p*con4 - if(p.le.p1) p = p1*con9 + p2*con1 - go to 770 - 730 if(f2.lt.0.) ich3 = 1 - 740 if(ich1.ne.0) go to 760 - if((f1-f2).gt.acc) go to 750 -c our initial choice of p is too small - p1 = p2 - f1 = f2 - p = p/con4 - if(p3.lt.0.) go to 770 - if(p.ge.p3) p = p2*con1 + p3*con9 - go to 770 - 750 if(f2.gt.0.) ich1 = 1 -c test whether the iteration process proceeds as theoretically -c expected. - 760 if(f2.ge.f1 .or. f2.le.f3) go to 800 -c find the new value of p. - p = fprati(p1,f1,p2,f2,p3,f3) - 770 continue -c error codes and messages. - 780 ier = lwest - go to 830 - 785 ier = 5 - go to 830 - 790 ier = 4 - go to 830 - 795 ier = 3 - go to 830 - 800 ier = 2 - go to 830 - 810 ier = 1 - go to 830 - 815 ier = -1 - fp = 0. - 820 if(ncof.ne.rank) ier = -rank -c test whether x and y are in the original order. - 830 if(ichang.lt.0) go to 930 -c if not, interchange x and y once more. - l1 = 1 - do 840 i=1,nk1x - l2 = i - do 840 j=1,nk1y - f(l2) = c(l1) - l1 = l1+1 - l2 = l2+nk1x - 840 continue - do 850 i=1,ncof - c(i) = f(i) - 850 continue - do 860 i=1,m - store = x(i) - x(i) = y(i) - y(i) = store - 860 continue - n = min0(nx,ny) - do 870 i=1,n - store = tx(i) - tx(i) = ty(i) - ty(i) = store - 870 continue - n1 = n+1 - if (nx.lt.ny) go to 880 - if (nx.eq.ny) go to 920 - go to 900 - 880 do 890 i=n1,ny - tx(i) = ty(i) - 890 continue - go to 920 - 900 do 910 i=n1,nx - ty(i) = tx(i) - 910 continue - 920 l = nx - nx = ny - ny = l - 930 if(iopt.lt.0) go to 940 - nx0 = nx - ny0 = ny - 940 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fpsysy.f b/scipy-0.10.1/scipy/interpolate/fitpack/fpsysy.f deleted file mode 100644 index 2226c859cd..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fpsysy.f +++ /dev/null @@ -1,56 +0,0 @@ - subroutine fpsysy(a,n,g) -c subroutine fpsysy solves a linear n x n symmetric system -c (a) * (b) = (g) -c on input, vector g contains the right hand side ; on output it will -c contain the solution (b). -c .. -c ..scalar arguments.. - integer n -c ..array arguments.. - real*8 a(6,6),g(6) -c ..local scalars.. - real*8 fac - integer i,i1,j,k -c .. - g(1) = g(1)/a(1,1) - if(n.eq.1) return -c decomposition of the symmetric matrix (a) = (l) * (d) *(l)' -c with (l) a unit lower triangular matrix and (d) a diagonal -c matrix - do 10 k=2,n - a(k,1) = a(k,1)/a(1,1) - 10 continue - do 40 i=2,n - i1 = i-1 - do 30 k=i,n - fac = a(k,i) - do 20 j=1,i1 - fac = fac-a(j,j)*a(k,j)*a(i,j) - 20 continue - a(k,i) = fac - if(k.gt.i) a(k,i) = fac/a(i,i) - 30 continue - 40 continue -c solve the system (l)*(d)*(l)'*(b) = (g). -c first step : solve (l)*(d)*(c) = (g). - do 60 i=2,n - i1 = i-1 - fac = g(i) - do 50 j=1,i1 - fac = fac-g(j)*a(j,j)*a(i,j) - 50 continue - g(i) = fac/a(i,i) - 60 continue -c second step : solve (l)'*(b) = (c) - i = n - do 80 j=2,n - i1 = i - i = i-1 - fac = g(i) - do 70 k=i1,n - fac = fac-g(k)*a(k,i) - 70 continue - g(i) = fac - 80 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fptrnp.f b/scipy-0.10.1/scipy/interpolate/fitpack/fptrnp.f deleted file mode 100644 index a315ed7b20..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fptrnp.f +++ /dev/null @@ -1,106 +0,0 @@ - subroutine fptrnp(m,mm,idim,n,nr,sp,p,b,z,a,q,right) -c subroutine fptrnp reduces the (m+n-7) x (n-4) matrix a to upper -c triangular form and applies the same givens transformations to -c the (m) x (mm) x (idim) matrix z to obtain the (n-4) x (mm) x -c (idim) matrix q -c .. -c ..scalar arguments.. - real*8 p - integer m,mm,idim,n -c ..array arguments.. - real*8 sp(m,4),b(n,5),z(m*mm*idim),a(n,5),q((n-4)*mm*idim), - * right(mm*idim) - integer nr(m) -c ..local scalars.. - real*8 cos,pinv,piv,sin,one - integer i,iband,irot,it,ii,i2,i3,j,jj,l,mid,nmd,m2,m3, - * nrold,n4,number,n1 -c ..local arrays.. - real*8 h(7) -c ..subroutine references.. -c fpgivs,fprota -c .. - one = 1 - if(p.gt.0.) pinv = one/p - n4 = n-4 - mid = mm*idim - m2 = m*mm - m3 = n4*mm -c reduce the matrix (a) to upper triangular form (r) using givens -c rotations. apply the same transformations to the rows of matrix z -c to obtain the mm x (n-4) matrix g. -c store matrix (r) into (a) and g into q. -c initialization. - nmd = n4*mid - do 50 i=1,nmd - q(i) = 0. - 50 continue - do 100 i=1,n4 - do 100 j=1,5 - a(i,j) = 0. - 100 continue - nrold = 0 -c iband denotes the bandwidth of the matrices (a) and (r). - iband = 4 - do 750 it=1,m - number = nr(it) - 150 if(nrold.eq.number) go to 300 - if(p.le.0.) go to 700 - iband = 5 -c fetch a new row of matrix (b). - n1 = nrold+1 - do 200 j=1,5 - h(j) = b(n1,j)*pinv - 200 continue -c find the appropriate column of q. - do 250 j=1,mid - right(j) = 0. - 250 continue - irot = nrold - go to 450 -c fetch a new row of matrix (sp). - 300 h(iband) = 0. - do 350 j=1,4 - h(j) = sp(it,j) - 350 continue -c find the appropriate column of q. - j = 0 - do 400 ii=1,idim - l = (ii-1)*m2+(it-1)*mm - do 400 jj=1,mm - j = j+1 - l = l+1 - right(j) = z(l) - 400 continue - irot = number -c rotate the new row of matrix (a) into triangle. - 450 do 600 i=1,iband - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 600 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(irot,1),cos,sin) -c apply that transformation to the rows of matrix q. - j = 0 - do 500 ii=1,idim - l = (ii-1)*m3+irot - do 500 jj=1,mm - j = j+1 - call fprota(cos,sin,right(j),q(l)) - l = l+n4 - 500 continue -c apply that transformation to the columns of (a). - if(i.eq.iband) go to 650 - i2 = 1 - i3 = i+1 - do 550 j=i3,iband - i2 = i2+1 - call fprota(cos,sin,h(j),a(irot,i2)) - 550 continue - 600 continue - 650 if(nrold.eq.number) go to 750 - 700 nrold = nrold+1 - go to 150 - 750 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/fptrpe.f b/scipy-0.10.1/scipy/interpolate/fitpack/fptrpe.f deleted file mode 100644 index c413d4bb9c..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/fptrpe.f +++ /dev/null @@ -1,212 +0,0 @@ - subroutine fptrpe(m,mm,idim,n,nr,sp,p,b,z,a,aa,q,right) -c subroutine fptrpe reduces the (m+n-7) x (n-7) cyclic bandmatrix a -c to upper triangular form and applies the same givens transformations -c to the (m) x (mm) x (idim) matrix z to obtain the (n-7) x (mm) x -c (idim) matrix q. -c .. -c ..scalar arguments.. - real*8 p - integer m,mm,idim,n -c ..array arguments.. - real*8 sp(m,4),b(n,5),z(m*mm*idim),a(n,5),aa(n,4),q((n-7)*mm*idim) - *, - * right(mm*idim) - integer nr(m) -c ..local scalars.. - real*8 co,pinv,piv,si,one - integer i,iband,irot,it,ii,i2,i3,j,jj,l,mid,nmd,m2,m3, - * nrold,n4,number,n1,n7,n11,m1 -c ..local arrays.. - real*8 h(5),h1(5),h2(4) -c ..subroutine references.. -c fpgivs,fprota -c .. - one = 1 - if(p.gt.0.) pinv = one/p - n4 = n-4 - n7 = n-7 - n11 = n-11 - mid = mm*idim - m2 = m*mm - m3 = n7*mm - m1 = m-1 -c we determine the matrix (a) and then we reduce her to -c upper triangular form (r) using givens rotations. -c we apply the same transformations to the rows of matrix -c z to obtain the (mm) x (n-7) matrix g. -c we store matrix (r) into a and aa, g into q. -c the n7 x n7 upper triangular matrix (r) has the form -c | a1 ' | -c (r) = | ' a2 | -c | 0 ' | -c with (a2) a n7 x 4 matrix and (a1) a n11 x n11 upper -c triangular matrix of bandwidth 5. -c initialization. - nmd = n7*mid - do 50 i=1,nmd - q(i) = 0. - 50 continue - do 100 i=1,n4 - a(i,5) = 0. - do 100 j=1,4 - a(i,j) = 0. - aa(i,j) = 0. - 100 continue - jper = 0 - nrold = 0 - do 760 it=1,m1 - number = nr(it) - 120 if(nrold.eq.number) go to 180 - if(p.le.0.) go to 740 -c fetch a new row of matrix (b). - n1 = nrold+1 - do 140 j=1,5 - h(j) = b(n1,j)*pinv - 140 continue -c find the appropiate row of q. - do 160 j=1,mid - right(j) = 0. - 160 continue - go to 240 -c fetch a new row of matrix (sp) - 180 h(5) = 0. - do 200 j=1,4 - h(j) = sp(it,j) - 200 continue -c find the appropiate row of q. - j = 0 - do 220 ii=1,idim - l = (ii-1)*m2+(it-1)*mm - do 220 jj=1,mm - j = j+1 - l = l+1 - right(j) = z(l) - 220 continue -c test whether there are non-zero values in the new row of (a) -c corresponding to the b-splines n(j,*),j=n7+1,...,n4. - 240 if(nrold.lt.n11) go to 640 - if(jper.ne.0) go to 320 -c initialize the matrix (aa). - jk = n11+1 - do 300 i=1,4 - ik = jk - do 260 j=1,5 - if(ik.le.0) go to 280 - aa(ik,i) = a(ik,j) - ik = ik-1 - 260 continue - 280 jk = jk+1 - 300 continue - jper = 1 -c if one of the non-zero elements of the new row corresponds to one of -c the b-splines n(j;*),j=n7+1,...,n4,we take account of the periodicity -c conditions for setting up this row of (a). - 320 do 340 i=1,4 - h1(i) = 0. - h2(i) = 0. - 340 continue - h1(5) = 0. - j = nrold-n11 - do 420 i=1,5 - j = j+1 - l0 = j - 360 l1 = l0-4 - if(l1.le.0) go to 400 - if(l1.le.n11) go to 380 - l0 = l1-n11 - go to 360 - 380 h1(l1) = h(i) - go to 420 - 400 h2(l0) = h2(l0) + h(i) - 420 continue -c rotate the new row of (a) into triangle. - if(n11.le.0) go to 560 -c rotations with the rows 1,2,...,n11 of (a). - do 540 irot=1,n11 - piv = h1(1) - i2 = min0(n11-irot,4) - if(piv.eq.0.) go to 500 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(irot,1),co,si) -c apply that transformation to the columns of matrix q. - j = 0 - do 440 ii=1,idim - l = (ii-1)*m3+irot - do 440 jj=1,mm - j = j+1 - call fprota(co,si,right(j),q(l)) - l = l+n7 - 440 continue -c apply that transformation to the rows of (a) with respect to aa. - do 460 i=1,4 - call fprota(co,si,h2(i),aa(irot,i)) - 460 continue -c apply that transformation to the rows of (a) with respect to a. - if(i2.eq.0) go to 560 - do 480 i=1,i2 - i1 = i+1 - call fprota(co,si,h1(i1),a(irot,i1)) - 480 continue - 500 do 520 i=1,i2 - h1(i) = h1(i+1) - 520 continue - h1(i2+1) = 0. - 540 continue -c rotations with the rows n11+1,...,n7 of a. - 560 do 620 irot=1,4 - ij = n11+irot - if(ij.le.0) go to 620 - piv = h2(irot) - if(piv.eq.0.) go to 620 -c calculate the parameters of the givens transformation. - call fpgivs(piv,aa(ij,irot),co,si) -c apply that transformation to the columns of matrix q. - j = 0 - do 580 ii=1,idim - l = (ii-1)*m3+ij - do 580 jj=1,mm - j = j+1 - call fprota(co,si,right(j),q(l)) - l = l+n7 - 580 continue - if(irot.eq.4) go to 620 -c apply that transformation to the rows of (a) with respect to aa. - j1 = irot+1 - do 600 i=j1,4 - call fprota(co,si,h2(i),aa(ij,i)) - 600 continue - 620 continue - go to 720 -c rotation into triangle of the new row of (a), in case the elements -c corresponding to the b-splines n(j;*),j=n7+1,...,n4 are all zero. - 640 irot =nrold - do 700 i=1,5 - irot = irot+1 - piv = h(i) - if(piv.eq.0.) go to 700 -c calculate the parameters of the givens transformation. - call fpgivs(piv,a(irot,1),co,si) -c apply that transformation to the columns of matrix g. - j = 0 - do 660 ii=1,idim - l = (ii-1)*m3+irot - do 660 jj=1,mm - j = j+1 - call fprota(co,si,right(j),q(l)) - l = l+n7 - 660 continue -c apply that transformation to the rows of (a). - if(i.eq.5) go to 700 - i2 = 1 - i3 = i+1 - do 680 j=i3,5 - i2 = i2+1 - call fprota(co,si,h(j),a(irot,i2)) - 680 continue - 700 continue - 720 if(nrold.eq.number) go to 760 - 740 nrold = nrold+1 - go to 120 - 760 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/insert.f b/scipy-0.10.1/scipy/interpolate/fitpack/insert.f deleted file mode 100644 index b323bff53c..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/insert.f +++ /dev/null @@ -1,102 +0,0 @@ - subroutine insert(iopt,t,n,c,k,x,tt,nn,cc,nest,ier) -c subroutine insert inserts a new knot x into a spline function s(x) -c of degree k and calculates the b-spline representation of s(x) with -c respect to the new set of knots. in addition, if iopt.ne.0, s(x) -c will be considered as a periodic spline with period per=t(n-k)-t(k+1) -c satisfying the boundary constraints -c t(i+n-2*k-1) = t(i)+per ,i=1,2,...,2*k+1 -c c(i+n-2*k-1) = c(i) ,i=1,2,...,k -c in that case, the knots and b-spline coefficients returned will also -c satisfy these boundary constraints, i.e. -c tt(i+nn-2*k-1) = tt(i)+per ,i=1,2,...,2*k+1 -c cc(i+nn-2*k-1) = cc(i) ,i=1,2,...,k -c -c calling sequence: -c call insert(iopt,t,n,c,k,x,tt,nn,cc,nest,ier) -c -c input parameters: -c iopt : integer flag, specifying whether (iopt.ne.0) or not (iopt=0) -c the given spline must be considered as being periodic. -c t : array,length nest, which contains the position of the knots. -c n : integer, giving the total number of knots of s(x). -c c : array,length nest, which contains the b-spline coefficients. -c k : integer, giving the degree of s(x). -c x : real, which gives the location of the knot to be inserted. -c nest : integer specifying the dimension of the arrays t,c,tt and cc -c nest > n. -c -c output parameters: -c tt : array,length nest, which contains the position of the knots -c after insertion. -c nn : integer, giving the total number of knots after insertion -c cc : array,length nest, which contains the b-spline coefficients -c of s(x) with respect to the new set of knots. -c ier : error flag -c ier = 0 : normal return -c ier =10 : invalid input data (see restrictions) -c -c restrictions: -c nest > n -c t(k+1) <= x <= t(n-k) -c in case of a periodic spline (iopt.ne.0) there must be -c either at least k interior knots t(j) satisfying t(k+1)=0 the number of knots of the splines sj(u) and the position -c t(j),j=1,2,...,n is chosen automatically by the routine. the smooth- -c ness of s(u) is then achieved by minimalizing the discontinuity -c jumps of the k-th derivative of s(u) at the knots t(j),j=k+2,k+3,..., -c n-k-1. the amount of smoothness is determined by the condition that -c f(p)=sum((w(i)*dist(x(i),s(u(i))))**2) be <= s, with s a given non- -c negative constant, called the smoothing factor. -c the fit s(u) is given in the b-spline representation and can be -c evaluated by means of subroutine curev. -c -c calling sequence: -c call parcur(iopt,ipar,idim,m,u,mx,x,w,ub,ue,k,s,nest,n,t,nc,c, -c * fp,wrk,lwrk,iwrk,ier) -c -c parameters: -c iopt : integer flag. on entry iopt must specify whether a weighted -c least-squares spline curve (iopt=-1) or a smoothing spline -c curve (iopt=0 or 1) must be determined.if iopt=0 the routine -c will start with an initial set of knots t(i)=ub,t(i+k+1)=ue, -c i=1,2,...,k+1. if iopt=1 the routine will continue with the -c knots found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately -c preceded by another call with iopt=1 or iopt=0. -c unchanged on exit. -c ipar : integer flag. on entry ipar must specify whether (ipar=1) -c the user will supply the parameter values u(i),ub and ue -c or whether (ipar=0) these values are to be calculated by -c parcur. unchanged on exit. -c idim : integer. on entry idim must specify the dimension of the -c curve. 0 < idim < 11. -c unchanged on exit. -c m : integer. on entry m must specify the number of data points. -c m > k. unchanged on exit. -c u : real array of dimension at least (m). in case ipar=1,before -c entry, u(i) must be set to the i-th value of the parameter -c variable u for i=1,2,...,m. these values must then be -c supplied in strictly ascending order and will be unchanged -c on exit. in case ipar=0, on exit,array u will contain the -c values u(i) as determined by parcur. -c mx : integer. on entry mx must specify the actual dimension of -c the array x as declared in the calling (sub)program. mx must -c not be too small (see x). unchanged on exit. -c x : real array of dimension at least idim*m. -c before entry, x(idim*(i-1)+j) must contain the j-th coord- -c inate of the i-th data point for i=1,2,...,m and j=1,2,..., -c idim. unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) -c must be set to the i-th value in the set of weights. the -c w(i) must be strictly positive. unchanged on exit. -c see also further comments. -c ub,ue : real values. on entry (in case ipar=1) ub and ue must -c contain the lower and upper bound for the parameter u. -c ub <=u(1), ue>= u(m). if ipar = 0 these values will -c automatically be set to 0 and 1 by parcur. -c k : integer. on entry k must specify the degree of the splines. -c 1<=k<=5. it is recommended to use cubic splines (k=3). -c the user is strongly dissuaded from choosing k even,together -c with a small s-value. unchanged on exit. -c s : real.on entry (in case iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments. -c nest : integer. on entry nest must contain an over-estimate of the -c total number of knots of the splines returned, to indicate -c the storage space available to the routine. nest >=2*k+2. -c in most practical situation nest=m/2 will be sufficient. -c always large enough is nest=m+k+1, the number of knots -c needed for interpolation (s=0). unchanged on exit. -c n : integer. -c unless ier = 10 (in case iopt >=0), n will contain the -c total number of knots of the smoothing spline curve returned -c if the computation mode iopt=1 is used this value of n -c should be left unchanged between subsequent calls. -c in case iopt=-1, the value of n must be specified on entry. -c t : real array of dimension at least (nest). -c on succesful exit, this array will contain the knots of the -c spline curve,i.e. the position of the interior knots t(k+2), -c t(k+3),..,t(n-k-1) as well as the position of the additional -c t(1)=t(2)=...=t(k+1)=ub and t(n-k)=...=t(n)=ue needed for -c the b-spline representation. -c if the computation mode iopt=1 is used, the values of t(1), -c t(2),...,t(n) should be left unchanged between subsequent -c calls. if the computation mode iopt=-1 is used, the values -c t(k+2),...,t(n-k-1) must be supplied by the user, before -c entry. see also the restrictions (ier=10). -c nc : integer. on entry nc must specify the actual dimension of -c the array c as declared in the calling (sub)program. nc -c must not be too small (see c). unchanged on exit. -c c : real array of dimension at least (nest*idim). -c on succesful exit, this array will contain the coefficients -c in the b-spline representation of the spline curve s(u),i.e. -c the b-spline coefficients of the spline sj(u) will be given -c in c(n*(j-1)+i),i=1,2,...,n-k-1 for j=1,2,...,idim. -c fp : real. unless ier = 10, fp contains the weighted sum of -c squared residuals of the spline curve returned. -c wrk : real array of dimension at least m*(k+1)+nest*(6+idim+3*k). -c used as working space. if the computation mode iopt=1 is -c used, the values wrk(1),...,wrk(n) should be left unchanged -c between subsequent calls. -c lwrk : integer. on entry,lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program. lwrk -c must not be too small (see wrk). unchanged on exit. -c iwrk : integer array of dimension at least (nest). -c used as working space. if the computation mode iopt=1 is -c used,the values iwrk(1),...,iwrk(n) should be left unchanged -c between subsequent calls. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the curve returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the curve returned is an interpolating -c spline curve (fp=0). -c ier=-2 : normal return. the curve returned is the weighted least- -c squares polynomial curve of degree k.in this extreme case -c fp gives the upper bound fp0 for the smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameter nest. -c probably causes : nest too small. if nest is already -c large (say nest > m/2), it may also indicate that s is -c too small -c the approximation returned is the least-squares spline -c curve according to the knots t(1),t(2),...,t(n). (n=nest) -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline curve -c with fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing curve -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, 1<=k<=5, m>k, nest>2*k+2, w(i)>0,i=1,2,...,m -c 0<=ipar<=1, 0=(k+1)*m+nest*(6+idim+3*k), -c nc>=nest*idim -c if ipar=0: sum j=1,idim (x(idim*i+j)-x(idim*(i-1)+j))**2>0 -c i=1,2,...,m-1. -c if ipar=1: ub<=u(1)=0: s>=0 -c if s=0 : nest >= m+k+1 -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the curve will be too smooth and signal will be -c lost ; if s is too small the curve will pick up too much noise. in -c the extreme cases the program will return an interpolating curve if -c s=0 and the least-squares polynomial curve of degree k if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the weights w(i). if these are -c taken as 1/d(i) with d(i) an estimate of the standard deviation of -c x(i), a good s-value should be found in the range (m-sqrt(2*m),m+ -c sqrt(2*m)). if nothing is known about the statistical error in x(i) -c each w(i) can be set equal to one and s determined by trial and -c error, taking account of the comments above. the best is then to -c start with a very large value of s ( to determine the least-squares -c polynomial curve and the upper bound fp0 for s) and then to -c progressively decrease the value of s ( say by a factor 10 in the -c beginning, i.e. s=fp0/10, fp0/100,...and more carefully as the -c approximating curve shows more detail) to obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt=1 the program will continue with the set of knots found at -c the last call of the routine. this will save a lot of computation -c time if parcur is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c curve underlying the data. but, if the computation mode iopt=1 is -c used, the knots returned may also depend on the s-values at previous -c calls (if these were smaller). therefore, if after a number of -c trials with different s-values and iopt=1, the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c parcur once more with the selected value for s but now with iopt=0. -c indeed, parcur may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c -c the form of the approximating curve can strongly be affected by -c the choice of the parameter values u(i). if there is no physical -c reason for choosing a particular parameter u, often good results -c will be obtained with the choice of parcur (in case ipar=0), i.e. -c v(1)=0, v(i)=v(i-1)+q(i), i=2,...,m, u(i)=v(i)/v(m), i=1,..,m -c where -c q(i)= sqrt(sum j=1,idim (xj(i)-xj(i-1))**2 ) -c other possibilities for q(i) are -c q(i)= sum j=1,idim (xj(i)-xj(i-1))**2 -c q(i)= sum j=1,idim abs(xj(i)-xj(i-1)) -c q(i)= max j=1,idim abs(xj(i)-xj(i-1)) -c q(i)= 1 -c -c other subroutines required: -c fpback,fpbspl,fpchec,fppara,fpdisc,fpgivs,fpknot,fprati,fprota -c -c references: -c dierckx p. : algorithms for smoothing data with periodic and -c parametric splines, computer graphics and image -c processing 20 (1982) 171-184. -c dierckx p. : algorithms for smoothing data with periodic and param- -c etric splines, report tw55, dept. computer science, -c k.u.leuven, 1981. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : may 1979 -c latest update : march 1987 -c -c .. -c ..scalar arguments.. - real*8 ub,ue,s,fp - integer iopt,ipar,idim,m,mx,k,nest,n,nc,lwrk,ier -c ..array arguments.. - real*8 u(m),x(mx),w(m),t(nest),c(nc),wrk(lwrk) - integer iwrk(nest) -c ..local scalars.. - real*8 tol,dist - integer i,ia,ib,ifp,ig,iq,iz,i1,i2,j,k1,k2,lwest,maxit,nmin,ncc -c ..function references - real*8 sqrt -c .. -c we set up the parameters tol and maxit - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 90 - if(ipar.lt.0 .or. ipar.gt.1) go to 90 - if(idim.le.0 .or. idim.gt.10) go to 90 - if(k.le.0 .or. k.gt.5) go to 90 - k1 = k+1 - k2 = k1+1 - nmin = 2*k1 - if(m.lt.k1 .or. nest.lt.nmin) go to 90 - ncc = nest*idim - if(mx.lt.m*idim .or. nc.lt.ncc) go to 90 - lwest = m*k1+nest*(6+idim+3*k) - if(lwrk.lt.lwest) go to 90 - if(ipar.ne.0 .or. iopt.gt.0) go to 40 - i1 = 0 - i2 = idim - u(1) = 0. - do 20 i=2,m - dist = 0. - do 10 j=1,idim - i1 = i1+1 - i2 = i2+1 - dist = dist+(x(i2)-x(i1))**2 - 10 continue - u(i) = u(i-1)+sqrt(dist) - 20 continue - if(u(m).le.0.) go to 90 - do 30 i=2,m - u(i) = u(i)/u(m) - 30 continue - ub = 0. - ue = 1. - u(m) = ue - 40 if(ub.gt.u(1) .or. ue.lt.u(m) .or. w(1).le.0.) go to 90 - do 50 i=2,m - if(u(i-1).ge.u(i) .or. w(i).le.0.) go to 90 - 50 continue - if(iopt.ge.0) go to 70 - if(n.lt.nmin .or. n.gt.nest) go to 90 - j = n - do 60 i=1,k1 - t(i) = ub - t(j) = ue - j = j-1 - 60 continue - call fpchec(u,m,t,n,k,ier) - if (ier.eq.0) go to 80 - go to 90 - 70 if(s.lt.0.) go to 90 - if(s.eq.0. .and. nest.lt.(m+k1)) go to 90 - ier = 0 -c we partition the working space and determine the spline curve. - 80 ifp = 1 - iz = ifp+nest - ia = iz+ncc - ib = ia+nest*k1 - ig = ib+nest*k2 - iq = ig+nest*k2 - call fppara(iopt,idim,m,u,mx,x,w,ub,ue,k,s,nest,tol,maxit,k1,k2, - * n,t,ncc,c,fp,wrk(ifp),wrk(iz),wrk(ia),wrk(ib),wrk(ig),wrk(iq), - * iwrk,ier) - 90 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/parder.f b/scipy-0.10.1/scipy/interpolate/fitpack/parder.f deleted file mode 100644 index 38092ced20..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/parder.f +++ /dev/null @@ -1,179 +0,0 @@ - subroutine parder(tx,nx,ty,ny,c,kx,ky,nux,nuy,x,mx,y,my,z, - * wrk,lwrk,iwrk,kwrk,ier) -c subroutine parder evaluates on a grid (x(i),y(j)),i=1,...,mx; j=1,... -c ,my the partial derivative ( order nux,nuy) of a bivariate spline -c s(x,y) of degrees kx and ky, given in the b-spline representation. -c -c calling sequence: -c call parder(tx,nx,ty,ny,c,kx,ky,nux,nuy,x,mx,y,my,z,wrk,lwrk, -c * iwrk,kwrk,ier) -c -c input parameters: -c tx : real array, length nx, which contains the position of the -c knots in the x-direction. -c nx : integer, giving the total number of knots in the x-direction -c ty : real array, length ny, which contains the position of the -c knots in the y-direction. -c ny : integer, giving the total number of knots in the y-direction -c c : real array, length (nx-kx-1)*(ny-ky-1), which contains the -c b-spline coefficients. -c kx,ky : integer values, giving the degrees of the spline. -c nux : integer values, specifying the order of the partial -c nuy derivative. 0<=nux=1. -c y : real array of dimension (my). -c before entry y(j) must be set to the y co-ordinate of the -c j-th grid point along the y-axis. -c ty(ky+1)<=y(j-1)<=y(j)<=ty(ny-ky), j=2,...,my. -c my : on entry my must specify the number of grid points along -c the y-axis. my >=1. -c wrk : real array of dimension lwrk. used as workspace. -c lwrk : integer, specifying the dimension of wrk. -c lwrk >= mx*(kx+1-nux)+my*(ky+1-nuy)+(nx-kx-1)*(ny-ky-1) -c iwrk : integer array of dimension kwrk. used as workspace. -c kwrk : integer, specifying the dimension of iwrk. kwrk >= mx+my. -c -c output parameters: -c z : real array of dimension (mx*my). -c on succesful exit z(my*(i-1)+j) contains the value of the -c specified partial derivative of s(x,y) at the point -c (x(i),y(j)),i=1,...,mx;j=1,...,my. -c ier : integer error flag -c ier=0 : normal return -c ier=10: invalid input data (see restrictions) -c -c restrictions: -c mx >=1, my >=1, 0 <= nux < kx, 0 <= nuy < ky, kwrk>=mx+my -c lwrk>=mx*(kx+1-nux)+my*(ky+1-nuy)+(nx-kx-1)*(ny-ky-1), -c tx(kx+1) <= x(i-1) <= x(i) <= tx(nx-kx), i=2,...,mx -c ty(ky+1) <= y(j-1) <= y(j) <= ty(ny-ky), j=2,...,my -c -c other subroutines required: -c fpbisp,fpbspl -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1989 -c -c ..scalar arguments.. - integer nx,ny,kx,ky,nux,nuy,mx,my,lwrk,kwrk,ier -c ..array arguments.. - integer iwrk(kwrk) - real*8 tx(nx),ty(ny),c((nx-kx-1)*(ny-ky-1)),x(mx),y(my),z(mx*my), - * wrk(lwrk) -c ..local scalars.. - integer i,iwx,iwy,j,kkx,kky,kx1,ky1,lx,ly,lwest,l1,l2,m,m0,m1, - * nc,nkx1,nky1,nxx,nyy - real*8 ak,fac -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 - kx1 = kx+1 - ky1 = ky+1 - nkx1 = nx-kx1 - nky1 = ny-ky1 - nc = nkx1*nky1 - if(nux.lt.0 .or. nux.ge.kx) go to 400 - if(nuy.lt.0 .or. nuy.ge.ky) go to 400 - lwest = nc +(kx1-nux)*mx+(ky1-nuy)*my - if(lwrk.lt.lwest) go to 400 - if(kwrk.lt.(mx+my)) go to 400 - if (mx.lt.1) go to 400 - if (mx.eq.1) go to 30 - go to 10 - 10 do 20 i=2,mx - if(x(i).lt.x(i-1)) go to 400 - 20 continue - 30 if (my.lt.1) go to 400 - if (my.eq.1) go to 60 - go to 40 - 40 do 50 i=2,my - if(y(i).lt.y(i-1)) go to 400 - 50 continue - 60 ier = 0 - nxx = nkx1 - nyy = nky1 - kkx = kx - kky = ky -c the partial derivative of order (nux,nuy) of a bivariate spline of -c degrees kx,ky is a bivariate spline of degrees kx-nux,ky-nuy. -c we calculate the b-spline coefficients of this spline - do 70 i=1,nc - wrk(i) = c(i) - 70 continue - if(nux.eq.0) go to 200 - lx = 1 - do 100 j=1,nux - ak = kkx - nxx = nxx-1 - l1 = lx - m0 = 1 - do 90 i=1,nxx - l1 = l1+1 - l2 = l1+kkx - fac = tx(l2)-tx(l1) - if(fac.le.0.) go to 90 - do 80 m=1,nyy - m1 = m0+nyy - wrk(m0) = (wrk(m1)-wrk(m0))*ak/fac - m0 = m0+1 - 80 continue - 90 continue - lx = lx+1 - kkx = kkx-1 - 100 continue - 200 if(nuy.eq.0) go to 300 - ly = 1 - do 230 j=1,nuy - ak = kky - nyy = nyy-1 - l1 = ly - do 220 i=1,nyy - l1 = l1+1 - l2 = l1+kky - fac = ty(l2)-ty(l1) - if(fac.le.0.) go to 220 - m0 = i - do 210 m=1,nxx - m1 = m0+1 - wrk(m0) = (wrk(m1)-wrk(m0))*ak/fac - m0 = m0+nky1 - 210 continue - 220 continue - ly = ly+1 - kky = kky-1 - 230 continue - m0 = nyy - m1 = nky1 - do 250 m=2,nxx - do 240 i=1,nyy - m0 = m0+1 - m1 = m1+1 - wrk(m0) = wrk(m1) - 240 continue - m1 = m1+nuy - 250 continue -c we partition the working space and evaluate the partial derivative - 300 iwx = 1+nxx*nyy - iwy = iwx+mx*(kx1-nux) - call fpbisp(tx(nux+1),nx-2*nux,ty(nuy+1),ny-2*nuy,wrk,kkx,kky, - * x,mx,y,my,z,wrk(iwx),wrk(iwy),iwrk(1),iwrk(mx+1)) - 400 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/parsur.f b/scipy-0.10.1/scipy/interpolate/fitpack/parsur.f deleted file mode 100644 index 6d283b039d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/parsur.f +++ /dev/null @@ -1,391 +0,0 @@ - subroutine parsur(iopt,ipar,idim,mu,u,mv,v,f,s,nuest,nvest, - * nu,tu,nv,tv,c,fp,wrk,lwrk,iwrk,kwrk,ier) -c given the set of ordered points f(i,j) in the idim-dimensional space, -c corresponding to grid values (u(i),v(j)) ,i=1,...,mu ; j=1,...,mv, -c parsur determines a smooth approximating spline surface s(u,v) , i.e. -c f1 = s1(u,v) -c ... u(1) <= u <= u(mu) ; v(1) <= v <= v(mv) -c fidim = sidim(u,v) -c with sl(u,v), l=1,2,...,idim bicubic spline functions with common -c knots tu(i),i=1,...,nu in the u-variable and tv(j),j=1,...,nv in the -c v-variable. -c in addition, these splines will be periodic in the variable u if -c ipar(1) = 1 and periodic in the variable v if ipar(2) = 1. -c if iopt=-1, parsur determines the least-squares bicubic spline -c surface according to a given set of knots. -c if iopt>=0, the number of knots of s(u,v) and their position -c is chosen automatically by the routine. the smoothness of s(u,v) is -c achieved by minimalizing the discontinuity jumps of the derivatives -c of the splines at the knots. the amount of smoothness of s(u,v) is -c determined by the condition that -c fp=sumi=1,mu(sumj=1,mv(dist(f(i,j)-s(u(i),v(j)))**2))<=s, -c with s a given non-negative constant. -c the fit s(u,v) is given in its b-spline representation and can be -c evaluated by means of routine surev. -c -c calling sequence: -c call parsur(iopt,ipar,idim,mu,u,mv,v,f,s,nuest,nvest,nu,tu, -c * nv,tv,c,fp,wrk,lwrk,iwrk,kwrk,ier) -c -c parameters: -c iopt : integer flag. unchanged on exit. -c on entry iopt must specify whether a least-squares surface -c (iopt=-1) or a smoothing surface (iopt=0 or 1)must be -c determined. -c if iopt=0 the routine will start with the initial set of -c knots needed for determining the least-squares polynomial -c surface. -c if iopt=1 the routine will continue with the set of knots -c found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately -c preceded by another call with iopt = 1 or iopt = 0. -c ipar : integer array of dimension 2. unchanged on exit. -c on entry ipar(1) must specify whether (ipar(1)=1) or not -c (ipar(1)=0) the splines must be periodic in the variable u. -c on entry ipar(2) must specify whether (ipar(2)=1) or not -c (ipar(2)=0) the splines must be periodic in the variable v. -c idim : integer. on entry idim must specify the dimension of the -c surface. 1 <= idim <= 3. unchanged on exit. -c mu : integer. on entry mu must specify the number of grid points -c along the u-axis. unchanged on exit. -c mu >= mumin where mumin=4-2*ipar(1) -c u : real array of dimension at least (mu). before entry, u(i) -c must be set to the u-co-ordinate of the i-th grid point -c along the u-axis, for i=1,2,...,mu. these values must be -c supplied in strictly ascending order. unchanged on exit. -c mv : integer. on entry mv must specify the number of grid points -c along the v-axis. unchanged on exit. -c mv >= mvmin where mvmin=4-2*ipar(2) -c v : real array of dimension at least (mv). before entry, v(j) -c must be set to the v-co-ordinate of the j-th grid point -c along the v-axis, for j=1,2,...,mv. these values must be -c supplied in strictly ascending order. unchanged on exit. -c f : real array of dimension at least (mu*mv*idim). -c before entry, f(mu*mv*(l-1)+mv*(i-1)+j) must be set to the -c l-th co-ordinate of the data point corresponding to the -c the grid point (u(i),v(j)) for l=1,...,idim ,i=1,...,mu -c and j=1,...,mv. unchanged on exit. -c if ipar(1)=1 it is expected that f(mu*mv*(l-1)+mv*(mu-1)+j) -c = f(mu*mv*(l-1)+j), l=1,...,idim ; j=1,...,mv -c if ipar(2)=1 it is expected that f(mu*mv*(l-1)+mv*(i-1)+mv) -c = f(mu*mv*(l-1)+mv*(i-1)+1), l=1,...,idim ; i=1,...,mu -c s : real. on entry (if iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments -c nuest : integer. unchanged on exit. -c nvest : integer. unchanged on exit. -c on entry, nuest and nvest must specify an upper bound for the -c number of knots required in the u- and v-directions respect. -c these numbers will also determine the storage space needed by -c the routine. nuest >= 8, nvest >= 8. -c in most practical situation nuest = mu/2, nvest=mv/2, will -c be sufficient. always large enough are nuest=mu+4+2*ipar(1), -c nvest = mv+4+2*ipar(2), the number of knots needed for -c interpolation (s=0). see also further comments. -c nu : integer. -c unless ier=10 (in case iopt>=0), nu will contain the total -c number of knots with respect to the u-variable, of the spline -c surface returned. if the computation mode iopt=1 is used, -c the value of nu should be left unchanged between subsequent -c calls. in case iopt=-1, the value of nu should be specified -c on entry. -c tu : real array of dimension at least (nuest). -c on succesful exit, this array will contain the knots of the -c splines with respect to the u-variable, i.e. the position of -c the interior knots tu(5),...,tu(nu-4) as well as the position -c of the additional knots tu(1),...,tu(4) and tu(nu-3),..., -c tu(nu) needed for the b-spline representation. -c if the computation mode iopt=1 is used,the values of tu(1) -c ...,tu(nu) should be left unchanged between subsequent calls. -c if the computation mode iopt=-1 is used, the values tu(5), -c ...tu(nu-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c nv : integer. -c unless ier=10 (in case iopt>=0), nv will contain the total -c number of knots with respect to the v-variable, of the spline -c surface returned. if the computation mode iopt=1 is used, -c the value of nv should be left unchanged between subsequent -c calls. in case iopt=-1, the value of nv should be specified -c on entry. -c tv : real array of dimension at least (nvest). -c on succesful exit, this array will contain the knots of the -c splines with respect to the v-variable, i.e. the position of -c the interior knots tv(5),...,tv(nv-4) as well as the position -c of the additional knots tv(1),...,tv(4) and tv(nv-3),..., -c tv(nv) needed for the b-spline representation. -c if the computation mode iopt=1 is used,the values of tv(1) -c ...,tv(nv) should be left unchanged between subsequent calls. -c if the computation mode iopt=-1 is used, the values tv(5), -c ...tv(nv-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c c : real array of dimension at least (nuest-4)*(nvest-4)*idim. -c on succesful exit, c contains the coefficients of the spline -c approximation s(u,v) -c fp : real. unless ier=10, fp contains the sum of squared -c residuals of the spline surface returned. -c wrk : real array of dimension (lwrk). used as workspace. -c if the computation mode iopt=1 is used the values of -c wrk(1),...,wrk(4) should be left unchanged between subsequent -c calls. -c lwrk : integer. on entry lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program. -c lwrk must not be too small. -c lwrk >= 4+nuest*(mv*idim+11+4*ipar(1))+nvest*(11+4*ipar(2))+ -c 4*(mu+mv)+q*idim where q is the larger of mv and nuest. -c iwrk : integer array of dimension (kwrk). used as workspace. -c if the computation mode iopt=1 is used the values of -c iwrk(1),.,iwrk(3) should be left unchanged between subsequent -c calls. -c kwrk : integer. on entry kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. -c kwrk >= 3+mu+mv+nuest+nvest. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the surface returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline surface returned is an -c interpolating surface (fp=0). -c ier=-2 : normal return. the surface returned is the least-squares -c polynomial surface. in this extreme case fp gives the -c upper bound for the smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameters nuest and -c nvest. -c probably causes : nuest or nvest too small. if these param- -c eters are already large, it may also indicate that s is -c too small -c the approximation returned is the least-squares surface -c according to the current set of knots. the parameter fp -c gives the corresponding sum of squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing surface with -c fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c sum of squared residuals does not satisfy the condition -c abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing surface -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c sum of squared residuals does not satisfy the condition -c abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, 0<=ipar(1)<=1, 0<=ipar(2)<=1, 1 <=idim<=3 -c mu >= 4-2*ipar(1),mv >= 4-2*ipar(2), nuest >=8, nvest >= 8, -c kwrk>=3+mu+mv+nuest+nvest, -c lwrk >= 4+nuest*(mv*idim+11+4*ipar(1))+nvest*(11+4*ipar(2)) -c +4*(mu+mv)+max(nuest,mv)*idim -c u(i-1)=0: s>=0 -c if s=0: nuest>=mu+4+2*ipar(1) -c nvest>=mv+4+2*ipar(2) -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the surface will be too smooth and signal will be -c lost ; if s is too small the surface will pick up too much noise. in -c the extreme cases the program will return an interpolating surface -c if s=0 and the constrained least-squares polynomial surface if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the accuracy of the data values. -c if the user has an idea of the statistical errors on the data, he -c can also find a proper estimate for s. for, by assuming that, if he -c specifies the right s, parsur will return a surface s(u,v) which -c exactly reproduces the surface underlying the data he can evaluate -c the sum(dist(f(i,j)-s(u(i),v(j)))**2) to find a good estimate for s. -c for example, if he knows that the statistical errors on his f(i,j)- -c values is not greater than 0.1, he may expect that a good s should -c have a value not larger than mu*mv*(0.1)**2. -c if nothing is known about the statistical error in f(i,j), s must -c be determined by trial and error, taking account of the comments -c above. the best is then to start with a very large value of s (to -c determine the le-sq polynomial surface and the corresponding upper -c bound fp0 for s) and then to progressively decrease the value of s -c ( say by a factor 10 in the beginning, i.e. s=fp0/10,fp0/100,... -c and more carefully as the approximation shows more detail) to -c obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt = 1 the program will continue with the knots found at -c the last call of the routine. this will save a lot of computation -c time if parsur is called repeatedly for different values of s. -c the number of knots of the surface returned and their location will -c depend on the value of s and on the complexity of the shape of the -c surface underlying the data. if the computation mode iopt = 1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt=1,the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c parsur once more with the chosen value for s but now with iopt=0. -c indeed, parsur may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c the number of knots may also depend on the upper bounds nuest and -c nvest. indeed, if at a certain stage in parsur the number of knots -c in one direction (say nu) has reached the value of its upper bound -c (nuest), then from that moment on all subsequent knots are added -c in the other (v) direction. this may indicate that the value of -c nuest is too small. on the other hand, it gives the user the option -c of limiting the number of knots the routine locates in any direction -c for example, by setting nuest=8 (the lowest allowable value for -c nuest), the user can indicate that he wants an approximation with -c splines which are simple cubic polynomials in the variable u. -c -c other subroutines required: -c fppasu,fpchec,fpchep,fpknot,fprati,fpgrpa,fptrnp,fpback, -c fpbacp,fpbspl,fptrpe,fpdisc,fpgivs,fprota -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1989 -c -c .. -c ..scalar arguments.. - real*8 s,fp - integer iopt,idim,mu,mv,nuest,nvest,nu,nv,lwrk,kwrk,ier -c ..array arguments.. - real*8 u(mu),v(mv),f(mu*mv*idim),tu(nuest),tv(nvest), - * c((nuest-4)*(nvest-4)*idim),wrk(lwrk) - integer ipar(2),iwrk(kwrk) -c ..local scalars.. - real*8 tol,ub,ue,vb,ve,peru,perv - integer i,j,jwrk,kndu,kndv,knru,knrv,kwest,l1,l2,l3,l4, - * lfpu,lfpv,lwest,lww,maxit,nc,mf,mumin,mvmin -c ..function references.. - integer max0 -c ..subroutine references.. -c fppasu,fpchec,fpchep -c .. -c we set up the parameters tol and maxit. - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 200 - if(ipar(1).lt.0 .or. ipar(1).gt.1) go to 200 - if(ipar(2).lt.0 .or. ipar(2).gt.1) go to 200 - if(idim.le.0 .or. idim.gt.3) go to 200 - mumin = 4-2*ipar(1) - if(mu.lt.mumin .or. nuest.lt.8) go to 200 - mvmin = 4-2*ipar(2) - if(mv.lt.mvmin .or. nvest.lt.8) go to 200 - mf = mu*mv - nc = (nuest-4)*(nvest-4) - lwest = 4+nuest*(mv*idim+11+4*ipar(1))+nvest*(11+4*ipar(2))+ - * 4*(mu+mv)+max0(nuest,mv)*idim - kwest = 3+mu+mv+nuest+nvest - if(lwrk.lt.lwest .or. kwrk.lt.kwest) go to 200 - do 10 i=2,mu - if(u(i-1).ge.u(i)) go to 200 - 10 continue - do 20 i=2,mv - if(v(i-1).ge.v(i)) go to 200 - 20 continue - if(iopt.ge.0) go to 100 - if(nu.lt.8 .or. nu.gt.nuest) go to 200 - ub = u(1) - ue = u(mu) - if (ipar(1).ne.0) go to 40 - j = nu - do 30 i=1,4 - tu(i) = ub - tu(j) = ue - j = j-1 - 30 continue - call fpchec(u,mu,tu,nu,3,ier) - if(ier.ne.0) go to 200 - go to 60 - 40 l1 = 4 - l2 = l1 - l3 = nu-3 - l4 = l3 - peru = ue-ub - tu(l2) = ub - tu(l3) = ue - do 50 j=1,3 - l1 = l1+1 - l2 = l2-1 - l3 = l3+1 - l4 = l4-1 - tu(l2) = tu(l4)-peru - tu(l3) = tu(l1)+peru - 50 continue - call fpchep(u,mu,tu,nu,3,ier) - if(ier.ne.0) go to 200 - 60 if(nv.lt.8 .or. nv.gt.nvest) go to 200 - vb = v(1) - ve = v(mv) - if (ipar(2).ne.0) go to 80 - j = nv - do 70 i=1,4 - tv(i) = vb - tv(j) = ve - j = j-1 - 70 continue - call fpchec(v,mv,tv,nv,3,ier) - if(ier.ne.0) go to 200 - go to 150 - 80 l1 = 4 - l2 = l1 - l3 = nv-3 - l4 = l3 - perv = ve-vb - tv(l2) = vb - tv(l3) = ve - do 90 j=1,3 - l1 = l1+1 - l2 = l2-1 - l3 = l3+1 - l4 = l4-1 - tv(l2) = tv(l4)-perv - tv(l3) = tv(l1)+perv - 90 continue - call fpchep(v,mv,tv,nv,3,ier) - if (ier.eq.0) go to 150 - go to 200 - 100 if(s.lt.0.) go to 200 - if(s.eq.0. .and. (nuest.lt.(mu+4+2*ipar(1)) .or. - * nvest.lt.(mv+4+2*ipar(2))) )go to 200 - ier = 0 -c we partition the working space and determine the spline approximation - 150 lfpu = 5 - lfpv = lfpu+nuest - lww = lfpv+nvest - jwrk = lwrk-4-nuest-nvest - knru = 4 - knrv = knru+mu - kndu = knrv+mv - kndv = kndu+nuest - call fppasu(iopt,ipar,idim,u,mu,v,mv,f,mf,s,nuest,nvest, - * tol,maxit,nc,nu,tu,nv,tv,c,fp,wrk(1),wrk(2),wrk(3),wrk(4), - * wrk(lfpu),wrk(lfpv),iwrk(1),iwrk(2),iwrk(3),iwrk(knru), - * iwrk(knrv),iwrk(kndu),iwrk(kndv),wrk(lww),jwrk,ier) - 200 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/percur.f b/scipy-0.10.1/scipy/interpolate/fitpack/percur.f deleted file mode 100644 index 2d51e1d89c..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/percur.f +++ /dev/null @@ -1,274 +0,0 @@ - subroutine percur(iopt,m,x,y,w,k,s,nest,n,t,c,fp, - * wrk,lwrk,iwrk,ier) -c given the set of data points (x(i),y(i)) and the set of positive -c numbers w(i),i=1,2,...,m-1, subroutine percur determines a smooth -c periodic spline approximation of degree k with period per=x(m)-x(1). -c if iopt=-1 percur calculates the weighted least-squares periodic -c spline according to a given set of knots. -c if iopt>=0 the number of knots of the spline s(x) and the position -c t(j),j=1,2,...,n is chosen automatically by the routine. the smooth- -c ness of s(x) is then achieved by minimalizing the discontinuity -c jumps of the k-th derivative of s(x) at the knots t(j),j=k+2,k+3,..., -c n-k-1. the amount of smoothness is determined by the condition that -c f(p)=sum((w(i)*(y(i)-s(x(i))))**2) be <= s, with s a given non- -c negative constant, called the smoothing factor. -c the fit s(x) is given in the b-spline representation (b-spline coef- -c ficients c(j),j=1,2,...,n-k-1) and can be evaluated by means of -c subroutine splev. -c -c calling sequence: -c call percur(iopt,m,x,y,w,k,s,nest,n,t,c,fp,wrk, -c * lwrk,iwrk,ier) -c -c parameters: -c iopt : integer flag. on entry iopt must specify whether a weighted -c least-squares spline (iopt=-1) or a smoothing spline (iopt= -c 0 or 1) must be determined. if iopt=0 the routine will start -c with an initial set of knots t(i)=x(1)+(x(m)-x(1))*(i-k-1), -c i=1,2,...,2*k+2. if iopt=1 the routine will continue with -c the knots found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately -c preceded by another call with iopt=1 or iopt=0. -c unchanged on exit. -c m : integer. on entry m must specify the number of data points. -c m > 1. unchanged on exit. -c x : real array of dimension at least (m). before entry, x(i) -c must be set to the i-th value of the independent variable x, -c for i=1,2,...,m. these values must be supplied in strictly -c ascending order. x(m) only indicates the length of the -c period of the spline, i.e per=x(m)-x(1). -c unchanged on exit. -c y : real array of dimension at least (m). before entry, y(i) -c must be set to the i-th value of the dependent variable y, -c for i=1,2,...,m-1. the element y(m) is not used. -c unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) -c must be set to the i-th value in the set of weights. the -c w(i) must be strictly positive. w(m) is not used. -c see also further comments. unchanged on exit. -c k : integer. on entry k must specify the degree of the spline. -c 1<=k<=5. it is recommended to use cubic splines (k=3). -c the user is strongly dissuaded from choosing k even,together -c with a small s-value. unchanged on exit. -c s : real.on entry (in case iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments. -c nest : integer. on entry nest must contain an over-estimate of the -c total number of knots of the spline returned, to indicate -c the storage space available to the routine. nest >=2*k+2. -c in most practical situation nest=m/2 will be sufficient. -c always large enough is nest=m+2*k,the number of knots needed -c for interpolation (s=0). unchanged on exit. -c n : integer. -c unless ier = 10 (in case iopt >=0), n will contain the -c total number of knots of the spline approximation returned. -c if the computation mode iopt=1 is used this value of n -c should be left unchanged between subsequent calls. -c in case iopt=-1, the value of n must be specified on entry. -c t : real array of dimension at least (nest). -c on succesful exit, this array will contain the knots of the -c spline,i.e. the position of the interior knots t(k+2),t(k+3) -c ...,t(n-k-1) as well as the position of the additional knots -c t(1),t(2),...,t(k+1)=x(1) and t(n-k)=x(m),..,t(n) needed for -c the b-spline representation. -c if the computation mode iopt=1 is used, the values of t(1), -c t(2),...,t(n) should be left unchanged between subsequent -c calls. if the computation mode iopt=-1 is used, the values -c t(k+2),...,t(n-k-1) must be supplied by the user, before -c entry. see also the restrictions (ier=10). -c c : real array of dimension at least (nest). -c on succesful exit, this array will contain the coefficients -c c(1),c(2),..,c(n-k-1) in the b-spline representation of s(x) -c fp : real. unless ier = 10, fp contains the weighted sum of -c squared residuals of the spline approximation returned. -c wrk : real array of dimension at least (m*(k+1)+nest*(8+5*k)). -c used as working space. if the computation mode iopt=1 is -c used, the values wrk(1),...,wrk(n) should be left unchanged -c between subsequent calls. -c lwrk : integer. on entry,lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program. lwrk -c must not be too small (see wrk). unchanged on exit. -c iwrk : integer array of dimension at least (nest). -c used as working space. if the computation mode iopt=1 is -c used,the values iwrk(1),...,iwrk(n) should be left unchanged -c between subsequent calls. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the spline returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline returned is an interpolating -c periodic spline (fp=0). -c ier=-2 : normal return. the spline returned is the weighted least- -c squares constant. in this extreme case fp gives the upper -c bound fp0 for the smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameter nest. -c probably causes : nest too small. if nest is already -c large (say nest > m/2), it may also indicate that s is -c too small -c the approximation returned is the least-squares periodic -c spline according to the knots t(1),t(2),...,t(n). (n=nest) -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline with -c fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing spline -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, 1<=k<=5, m>1, nest>2*k+2, w(i)>0,i=1,...,m-1 -c x(1)=(k+1)*m+nest*(8+5*k) -c if iopt=-1: 2*k+2<=n<=min(nest,m+2*k) -c x(1)=0: s>=0 -c if s=0 : nest >= m+2*k -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the spline will be too smooth and signal will be -c lost ; if s is too small the spline will pick up too much noise. in -c the extreme cases the program will return an interpolating periodic -c spline if s=0 and the weighted least-squares constant if s is very -c large. between these extremes, a properly chosen s will result in -c a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the weights w(i). if these are -c taken as 1/d(i) with d(i) an estimate of the standard deviation of -c y(i), a good s-value should be found in the range (m-sqrt(2*m),m+ -c sqrt(2*m)). if nothing is known about the statistical error in y(i) -c each w(i) can be set equal to one and s determined by trial and -c error, taking account of the comments above. the best is then to -c start with a very large value of s ( to determine the least-squares -c constant and the corresponding upper bound fp0 for s) and then to -c progressively decrease the value of s ( say by a factor 10 in the -c beginning, i.e. s=fp0/10, fp0/100,...and more carefully as the -c approximation shows more detail) to obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt=1 the program will continue with the set of knots found at -c the last call of the routine. this will save a lot of computation -c time if percur is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c function underlying the data. but, if the computation mode iopt=1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt=1, the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c percur once more with the selected value for s but now with iopt=0. -c indeed, percur may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c -c other subroutines required: -c fpbacp,fpbspl,fpchep,fpperi,fpdisc,fpgivs,fpknot,fprati,fprota -c -c references: -c dierckx p. : algorithms for smoothing data with periodic and -c parametric splines, computer graphics and image -c processing 20 (1982) 171-184. -c dierckx p. : algorithms for smoothing data with periodic and param- -c etric splines, report tw55, dept. computer science, -c k.u.leuven, 1981. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : may 1979 -c latest update : march 1987 -c -c .. -c ..scalar arguments.. - real*8 s,fp - integer iopt,m,k,nest,n,lwrk,ier -c ..array arguments.. - real*8 x(m),y(m),w(m),t(nest),c(nest),wrk(lwrk) - integer iwrk(nest) -c ..local scalars.. - real*8 per,tol - integer i,ia1,ia2,ib,ifp,ig1,ig2,iq,iz,i1,i2,j1,j2,k1,k2,lwest, - * maxit,m1,nmin -c ..subroutine references.. -c perper,pcheck -c .. -c we set up the parameters tol and maxit - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(k.le.0 .or. k.gt.5) go to 50 - k1 = k+1 - k2 = k1+1 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 50 - nmin = 2*k1 - if(m.lt.2 .or. nest.lt.nmin) go to 50 - lwest = m*k1+nest*(8+5*k) - if(lwrk.lt.lwest) go to 50 - m1 = m-1 - do 10 i=1,m1 - if(x(i).ge.x(i+1) .or. w(i).le.0.) go to 50 - 10 continue - if(iopt.ge.0) go to 30 - if(n.le.nmin .or. n.gt.nest) go to 50 - per = x(m)-x(1) - j1 = k1 - t(j1) = x(1) - i1 = n-k - t(i1) = x(m) - j2 = j1 - i2 = i1 - do 20 i=1,k - i1 = i1+1 - i2 = i2-1 - j1 = j1+1 - j2 = j2-1 - t(j2) = t(i2)-per - t(i1) = t(j1)+per - 20 continue - call fpchep(x,m,t,n,k,ier) - if (ier.eq.0) go to 40 - go to 50 - 30 if(s.lt.0.) go to 50 - if(s.eq.0. .and. nest.lt.(m+2*k)) go to 50 - ier = 0 -c we partition the working space and determine the spline approximation. - 40 ifp = 1 - iz = ifp+nest - ia1 = iz+nest - ia2 = ia1+nest*k1 - ib = ia2+nest*k - ig1 = ib+nest*k2 - ig2 = ig1+nest*k2 - iq = ig2+nest*k1 - call fpperi(iopt,x,y,w,m,k,s,nest,tol,maxit,k1,k2,n,t,c,fp, - * wrk(ifp),wrk(iz),wrk(ia1),wrk(ia2),wrk(ib),wrk(ig1),wrk(ig2), - * wrk(iq),iwrk,ier) - 50 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/pogrid.f b/scipy-0.10.1/scipy/interpolate/fitpack/pogrid.f deleted file mode 100644 index e13edcbaad..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/pogrid.f +++ /dev/null @@ -1,466 +0,0 @@ - subroutine pogrid(iopt,ider,mu,u,mv,v,z,z0,r,s,nuest,nvest, - * nu,tu,nv,tv,c,fp,wrk,lwrk,iwrk,kwrk,ier) -c subroutine pogrid fits a function f(x,y) to a set of data points -c z(i,j) given at the nodes (x,y)=(u(i)*cos(v(j)),u(i)*sin(v(j))), -c i=1,...,mu ; j=1,...,mv , of a radius-angle grid over a disc -c x ** 2 + y ** 2 <= r ** 2 . -c -c this approximation problem is reduced to the determination of a -c bicubic spline s(u,v) smoothing the data (u(i),v(j),z(i,j)) on the -c rectangle 0<=u<=r, v(1)<=v<=v(1)+2*pi -c in order to have continuous partial derivatives -c i+j -c d f(0,0) -c g(i,j) = ---------- -c i j -c dx dy -c -c s(u,v)=f(x,y) must satisfy the following conditions -c -c (1) s(0,v) = g(0,0) v(1)<=v<= v(1)+2*pi -c -c d s(0,v) -c (2) -------- = cos(v)*g(1,0)+sin(v)*g(0,1) v(1)<=v<= v(1)+2*pi -c d u -c -c moreover, s(u,v) must be periodic in the variable v, i.e. -c -c j j -c d s(u,vb) d s(u,ve) -c (3) ---------- = --------- 0 <=u<= r, j=0,1,2 , vb=v(1), -c j j ve=vb+2*pi -c d v d v -c -c the number of knots of s(u,v) and their position tu(i),i=1,2,...,nu; -c tv(j),j=1,2,...,nv, is chosen automatically by the routine. the -c smoothness of s(u,v) is achieved by minimalizing the discontinuity -c jumps of the derivatives of the spline at the knots. the amount of -c smoothness of s(u,v) is determined by the condition that -c fp=sumi=1,mu(sumj=1,mv((z(i,j)-s(u(i),v(j)))**2))+(z0-g(0,0))**2<=s, -c with s a given non-negative constant. -c the fit s(u,v) is given in its b-spline representation and can be -c evaluated by means of routine bispev. f(x,y) = s(u,v) can also be -c evaluated by means of function program evapol. -c -c calling sequence: -c call pogrid(iopt,ider,mu,u,mv,v,z,z0,r,s,nuest,nvest,nu,tu, -c * ,nv,tv,c,fp,wrk,lwrk,iwrk,kwrk,ier) -c -c parameters: -c iopt : integer array of dimension 3, specifying different options. -c unchanged on exit. -c iopt(1):on entry iopt(1) must specify whether a least-squares spline -c (iopt(1)=-1) or a smoothing spline (iopt(1)=0 or 1) must be -c determined. -c if iopt(1)=0 the routine will start with an initial set of -c knots tu(i)=0,tu(i+4)=r,i=1,...,4;tv(i)=v(1)+(i-4)*2*pi,i=1,. -c ...,8. -c if iopt(1)=1 the routine will continue with the set of knots -c found at the last call of the routine. -c attention: a call with iopt(1)=1 must always be immediately -c preceded by another call with iopt(1) = 1 or iopt(1) = 0. -c iopt(2):on entry iopt(2) must specify the requested order of conti- -c nuity for f(x,y) at the origin. -c if iopt(2)=0 only condition (1) must be fulfilled and -c if iopt(2)=1 conditions (1)+(2) must be fulfilled. -c iopt(3):on entry iopt(3) must specify whether (iopt(3)=1) or not -c (iopt(3)=0) the approximation f(x,y) must vanish at the -c boundary of the approximation domain. -c ider : integer array of dimension 2, specifying different options. -c unchanged on exit. -c ider(1):on entry ider(1) must specify whether (ider(1)=0 or 1) or not -c (ider(1)=-1) there is a data value z0 at the origin. -c if ider(1)=1, z0 will be considered to be the right function -c value, and it will be fitted exactly (g(0,0)=z0=c(1)). -c if ider(1)=0, z0 will be considered to be a data value just -c like the other data values z(i,j). -c ider(2):on entry ider(2) must specify whether (ider(2)=1) or not -c (ider(2)=0) f(x,y) must have vanishing partial derivatives -c g(1,0) and g(0,1) at the origin. (in case iopt(2)=1) -c mu : integer. on entry mu must specify the number of grid points -c along the u-axis. unchanged on exit. -c mu >= mumin where mumin=4-iopt(3)-ider(2) if ider(1)<0 -c =3-iopt(3)-ider(2) if ider(1)>=0 -c u : real array of dimension at least (mu). before entry, u(i) -c must be set to the u-co-ordinate of the i-th grid point -c along the u-axis, for i=1,2,...,mu. these values must be -c positive and supplied in strictly ascending order. -c unchanged on exit. -c mv : integer. on entry mv must specify the number of grid points -c along the v-axis. mv > 3 . unchanged on exit. -c v : real array of dimension at least (mv). before entry, v(j) -c must be set to the v-co-ordinate of the j-th grid point -c along the v-axis, for j=1,2,...,mv. these values must be -c supplied in strictly ascending order. unchanged on exit. -c -pi <= v(1) < pi , v(mv) < v(1)+2*pi. -c z : real array of dimension at least (mu*mv). -c before entry, z(mv*(i-1)+j) must be set to the data value at -c the grid point (u(i),v(j)) for i=1,...,mu and j=1,...,mv. -c unchanged on exit. -c z0 : real value. on entry (if ider(1) >=0 ) z0 must specify the -c data value at the origin. unchanged on exit. -c r : real value. on entry r must specify the radius of the disk. -c r>=u(mu) (>u(mu) if iopt(3)=1). unchanged on exit. -c s : real. on entry (if iopt(1)>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments -c nuest : integer. unchanged on exit. -c nvest : integer. unchanged on exit. -c on entry, nuest and nvest must specify an upper bound for the -c number of knots required in the u- and v-directions respect. -c these numbers will also determine the storage space needed by -c the routine. nuest >= 8, nvest >= 8. -c in most practical situation nuest = mu/2, nvest=mv/2, will -c be sufficient. always large enough are nuest=mu+5+iopt(2)+ -c iopt(3), nvest = mv+7, the number of knots needed for -c interpolation (s=0). see also further comments. -c nu : integer. -c unless ier=10 (in case iopt(1)>=0), nu will contain the total -c number of knots with respect to the u-variable, of the spline -c approximation returned. if the computation mode iopt(1)=1 is -c used, the value of nu should be left unchanged between sub- -c sequent calls. in case iopt(1)=-1, the value of nu should be -c specified on entry. -c tu : real array of dimension at least (nuest). -c on succesful exit, this array will contain the knots of the -c spline with respect to the u-variable, i.e. the position of -c the interior knots tu(5),...,tu(nu-4) as well as the position -c of the additional knots tu(1)=...=tu(4)=0 and tu(nu-3)=...= -c tu(nu)=r needed for the b-spline representation. -c if the computation mode iopt(1)=1 is used,the values of tu(1) -c ...,tu(nu) should be left unchanged between subsequent calls. -c if the computation mode iopt(1)=-1 is used, the values tu(5), -c ...tu(nu-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c nv : integer. -c unless ier=10 (in case iopt(1)>=0), nv will contain the total -c number of knots with respect to the v-variable, of the spline -c approximation returned. if the computation mode iopt(1)=1 is -c used, the value of nv should be left unchanged between sub- -c sequent calls. in case iopt(1) = -1, the value of nv should -c be specified on entry. -c tv : real array of dimension at least (nvest). -c on succesful exit, this array will contain the knots of the -c spline with respect to the v-variable, i.e. the position of -c the interior knots tv(5),...,tv(nv-4) as well as the position -c of the additional knots tv(1),...,tv(4) and tv(nv-3),..., -c tv(nv) needed for the b-spline representation. -c if the computation mode iopt(1)=1 is used,the values of tv(1) -c ...,tv(nv) should be left unchanged between subsequent calls. -c if the computation mode iopt(1)=-1 is used, the values tv(5), -c ...tv(nv-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c c : real array of dimension at least (nuest-4)*(nvest-4). -c on succesful exit, c contains the coefficients of the spline -c approximation s(u,v) -c fp : real. unless ier=10, fp contains the sum of squared -c residuals of the spline approximation returned. -c wrk : real array of dimension (lwrk). used as workspace. -c if the computation mode iopt(1)=1 is used the values of -c wrk(1),...,wrk(8) should be left unchanged between subsequent -c calls. -c lwrk : integer. on entry lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program. -c lwrk must not be too small. -c lwrk >= 8+nuest*(mv+nvest+3)+nvest*21+4*mu+6*mv+q -c where q is the larger of (mv+nvest) and nuest. -c iwrk : integer array of dimension (kwrk). used as workspace. -c if the computation mode iopt(1)=1 is used the values of -c iwrk(1),.,iwrk(4) should be left unchanged between subsequent -c calls. -c kwrk : integer. on entry kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. -c kwrk >= 4+mu+mv+nuest+nvest. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the spline returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline returned is an interpolating -c spline (fp=0). -c ier=-2 : normal return. the spline returned is the least-squares -c constrained polynomial. in this extreme case fp gives the -c upper bound for the smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameters nuest and -c nvest. -c probably causes : nuest or nvest too small. if these param- -c eters are already large, it may also indicate that s is -c too small -c the approximation returned is the least-squares spline -c according to the current set of knots. the parameter fp -c gives the corresponding sum of squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline with -c fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c sum of squared residuals does not satisfy the condition -c abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing spline -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c sum of squared residuals does not satisfy the condition -c abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1, -c -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0. -c mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8, -c kwrk>=4+mu+mv+nuest+nvest, -c lwrk >= 8+nuest*(mv+nvest+3)+nvest*21+4*mu+6*mv+ -c max(nuest,mv+nvest) -c 0< u(i-1)=0: s>=0 -c if s=0: nuest>=mu+5+iopt(2)+iopt(3), nvest>=mv+7 -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c pogrid does not allow individual weighting of the data-values. -c so, if these were determined to widely different accuracies, then -c perhaps the general data set routine polar should rather be used -c in spite of efficiency. -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the spline will be too smooth and signal will be -c lost ; if s is too small the spline will pick up too much noise. in -c the extreme cases the program will return an interpolating spline if -c s=0 and the constrained least-squares polynomial(degrees 3,0)if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the accuracy of the data values. -c if the user has an idea of the statistical errors on the data, he -c can also find a proper estimate for s. for, by assuming that, if he -c specifies the right s, pogrid will return a spline s(u,v) which -c exactly reproduces the function underlying the data he can evaluate -c the sum((z(i,j)-s(u(i),v(j)))**2) to find a good estimate for this s -c for example, if he knows that the statistical errors on his z(i,j)- -c values is not greater than 0.1, he may expect that a good s should -c have a value not larger than mu*mv*(0.1)**2. -c if nothing is known about the statistical error in z(i,j), s must -c be determined by trial and error, taking account of the comments -c above. the best is then to start with a very large value of s (to -c determine the least-squares polynomial and the corresponding upper -c bound fp0 for s) and then to progressively decrease the value of s -c ( say by a factor 10 in the beginning, i.e. s=fp0/10,fp0/100,... -c and more carefully as the approximation shows more detail) to -c obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt(1)=0. -c if iopt(1) = 1 the program will continue with the knots found at -c the last call of the routine. this will save a lot of computation -c time if pogrid is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c function underlying the data. if the computation mode iopt(1) = 1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt(1)=1,the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c pogrid once more with the chosen value for s but now with iopt(1)=0. -c indeed, pogrid may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c the number of knots may also depend on the upper bounds nuest and -c nvest. indeed, if at a certain stage in pogrid the number of knots -c in one direction (say nu) has reached the value of its upper bound -c (nuest), then from that moment on all subsequent knots are added -c in the other (v) direction. this may indicate that the value of -c nuest is too small. on the other hand, it gives the user the option -c of limiting the number of knots the routine locates in any direction -c for example, by setting nuest=8 (the lowest allowable value for -c nuest), the user can indicate that he wants an approximation which -c is a simple cubic polynomial in the variable u. -c -c other subroutines required: -c fppogr,fpchec,fpchep,fpknot,fpopdi,fprati,fpgrdi,fpsysy,fpback, -c fpbacp,fpbspl,fpcyt1,fpcyt2,fpdisc,fpgivs,fprota -c -c references: -c dierckx p. : fast algorithms for smoothing data over a disc or a -c sphere using tensor product splines, in "algorithms -c for approximation", ed. j.c.mason and m.g.cox, -c clarendon press oxford, 1987, pp. 51-65 -c dierckx p. : fast algorithms for smoothing data over a disc or a -c sphere using tensor product splines, report tw73, dept. -c computer science,k.u.leuven, 1985. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : july 1985 -c latest update : march 1989 -c -c .. -c ..scalar arguments.. - real*8 z0,r,s,fp - integer mu,mv,nuest,nvest,nu,nv,lwrk,kwrk,ier -c ..array arguments.. - integer iopt(3),ider(2),iwrk(kwrk) - real*8 u(mu),v(mv),z(mu*mv),c((nuest-4)*(nvest-4)),tu(nuest), - * tv(nvest),wrk(lwrk) -c ..local scalars.. - real*8 per,pi,tol,uu,ve,zmax,zmin,one,half,rn,zb - integer i,i1,i2,j,jwrk,j1,j2,kndu,kndv,knru,knrv,kwest,l, - * ldz,lfpu,lfpv,lwest,lww,m,maxit,mumin,muu,nc -c ..function references.. - real*8 datan2 - integer max0 -c ..subroutine references.. -c fpchec,fpchep,fppogr -c .. -c set constants - one = 1d0 - half = 0.5e0 - pi = datan2(0d0,-one) - per = pi+pi - ve = v(1)+per -c we set up the parameters tol and maxit. - maxit = 20 - tol = 0.1e-02 -c before starting computations, a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(iopt(1).lt.(-1) .or. iopt(1).gt.1) go to 200 - if(iopt(2).lt.0 .or. iopt(2).gt.1) go to 200 - if(iopt(3).lt.0 .or. iopt(3).gt.1) go to 200 - if(ider(1).lt.(-1) .or. ider(1).gt.1) go to 200 - if(ider(2).lt.0 .or. ider(2).gt.1) go to 200 - if(ider(2).eq.1 .and. iopt(2).eq.0) go to 200 - mumin = 4-iopt(3)-ider(2) - if(ider(1).ge.0) mumin = mumin-1 - if(mu.lt.mumin .or. mv.lt.4) go to 200 - if(nuest.lt.8 .or. nvest.lt.8) go to 200 - m = mu*mv - nc = (nuest-4)*(nvest-4) - lwest = 8+nuest*(mv+nvest+3)+21*nvest+4*mu+6*mv+ - * max0(nuest,mv+nvest) - kwest = 4+mu+mv+nuest+nvest - if(lwrk.lt.lwest .or. kwrk.lt.kwest) go to 200 - if(u(1).le.0. .or. u(mu).gt.r) go to 200 - if(iopt(3).eq.0) go to 10 - if(u(mu).eq.r) go to 200 - 10 if(mu.eq.1) go to 30 - do 20 i=2,mu - if(u(i-1).ge.u(i)) go to 200 - 20 continue - 30 if(v(1).lt. (-pi) .or. v(1).ge.pi ) go to 200 - if(v(mv).ge.v(1)+per) go to 200 - do 40 i=2,mv - if(v(i-1).ge.v(i)) go to 200 - 40 continue - if(iopt(1).gt.0) go to 140 -c if not given, we compute an estimate for z0. - if(ider(1).lt.0) go to 50 - zb = z0 - go to 70 - 50 zb = 0. - do 60 i=1,mv - zb = zb+z(i) - 60 continue - rn = mv - zb = zb/rn -c we determine the range of z-values. - 70 zmin = zb - zmax = zb - do 80 i=1,m - if(z(i).lt.zmin) zmin = z(i) - if(z(i).gt.zmax) zmax = z(i) - 80 continue - wrk(5) = zb - wrk(6) = 0. - wrk(7) = 0. - wrk(8) = zmax -zmin - iwrk(4) = mu - if(iopt(1).eq.0) go to 140 - if(nu.lt.8 .or. nu.gt.nuest) go to 200 - if(nv.lt.11 .or. nv.gt.nvest) go to 200 - j = nu - do 90 i=1,4 - tu(i) = 0. - tu(j) = r - j = j-1 - 90 continue - l = 9 - wrk(l) = 0. - if(iopt(2).eq.0) go to 100 - l = l+1 - uu = u(1) - if(uu.gt.tu(5)) uu = tu(5) - wrk(l) = uu*half - 100 do 110 i=1,mu - l = l+1 - wrk(l) = u(i) - 110 continue - if(iopt(3).eq.0) go to 120 - l = l+1 - wrk(l) = r - 120 muu = l-8 - call fpchec(wrk(9),muu,tu,nu,3,ier) - if(ier.ne.0) go to 200 - j1 = 4 - tv(j1) = v(1) - i1 = nv-3 - tv(i1) = ve - j2 = j1 - i2 = i1 - do 130 i=1,3 - i1 = i1+1 - i2 = i2-1 - j1 = j1+1 - j2 = j2-1 - tv(j2) = tv(i2)-per - tv(i1) = tv(j1)+per - 130 continue - l = 9 - do 135 i=1,mv - wrk(l) = v(i) - l = l+1 - 135 continue - wrk(l) = ve - call fpchep(wrk(9),mv+1,tv,nv,3,ier) - if (ier.eq.0) go to 150 - go to 200 - 140 if(s.lt.0.) go to 200 - if(s.eq.0. .and. (nuest.lt.(mu+5+iopt(2)+iopt(3)) .or. - * nvest.lt.(mv+7)) ) go to 200 -c we partition the working space and determine the spline approximation - 150 ldz = 5 - lfpu = 9 - lfpv = lfpu+nuest - lww = lfpv+nvest - jwrk = lwrk-8-nuest-nvest - knru = 5 - knrv = knru+mu - kndu = knrv+mv - kndv = kndu+nuest - call fppogr(iopt,ider,u,mu,v,mv,z,m,zb,r,s,nuest,nvest,tol,maxit, - * nc,nu,tu,nv,tv,c,fp,wrk(1),wrk(2),wrk(3),wrk(4),wrk(lfpu), - * wrk(lfpv),wrk(ldz),wrk(8),iwrk(1),iwrk(2),iwrk(3),iwrk(4), - * iwrk(knru),iwrk(knrv),iwrk(kndu),iwrk(kndv),wrk(lww),jwrk,ier) - 200 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/polar.f b/scipy-0.10.1/scipy/interpolate/fitpack/polar.f deleted file mode 100644 index 8c5115c8d1..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/polar.f +++ /dev/null @@ -1,450 +0,0 @@ - subroutine polar(iopt,m,x,y,z,w,rad,s,nuest,nvest,eps,nu,tu, - * nv,tv,u,v,c,fp,wrk1,lwrk1,wrk2,lwrk2,iwrk,kwrk,ier) -c subroutine polar fits a smooth function f(x,y) to a set of data -c points (x(i),y(i),z(i)) scattered arbitrarily over an approximation -c domain x**2+y**2 <= rad(atan(y/x))**2. through the transformation -c x = u*rad(v)*cos(v) , y = u*rad(v)*sin(v) -c the approximation problem is reduced to the determination of a bi- -c cubic spline s(u,v) fitting a corresponding set of data points -c (u(i),v(i),z(i)) on the rectangle 0<=u<=1,-pi<=v<=pi. -c in order to have continuous partial derivatives -c i+j -c d f(0,0) -c g(i,j) = ---------- -c i j -c dx dy -c -c s(u,v)=f(x,y) must satisfy the following conditions -c -c (1) s(0,v) = g(0,0) -pi <=v<= pi. -c -c d s(0,v) -c (2) -------- = rad(v)*(cos(v)*g(1,0)+sin(v)*g(0,1)) -c d u -c -pi <=v<= pi -c 2 -c d s(0,v) 2 2 2 -c (3) -------- = rad(v)*(cos(v)*g(2,0)+sin(v)*g(0,2)+sin(2*v)*g(1,1)) -c 2 -c d u -pi <=v<= pi -c -c moreover, s(u,v) must be periodic in the variable v, i.e. -c -c j j -c d s(u,-pi) d s(u,pi) -c (4) ---------- = --------- 0 <=u<= 1, j=0,1,2 -c j j -c d v d v -c -c if iopt(1) < 0 circle calculates a weighted least-squares spline -c according to a given set of knots in u- and v- direction. -c if iopt(1) >=0, the number of knots in each direction and their pos- -c ition tu(j),j=1,2,...,nu ; tv(j),j=1,2,...,nv are chosen automatical- -c ly by the routine. the smoothness of s(u,v) is then achieved by mini- -c malizing the discontinuity jumps of the derivatives of the spline -c at the knots. the amount of smoothness of s(u,v) is determined by -c the condition that fp = sum((w(i)*(z(i)-s(u(i),v(i))))**2) be <= s, -c with s a given non-negative constant. -c the bicubic spline is given in its standard b-spline representation -c and the corresponding function f(x,y) can be evaluated by means of -c function program evapol. -c -c calling sequence: -c call polar(iopt,m,x,y,z,w,rad,s,nuest,nvest,eps,nu,tu, -c * nv,tv,u,v,wrk1,lwrk1,wrk2,lwrk2,iwrk,kwrk,ier) -c -c parameters: -c iopt : integer array of dimension 3, specifying different options. -c unchanged on exit. -c iopt(1):on entry iopt(1) must specify whether a weighted -c least-squares polar spline (iopt(1)=-1) or a smoothing -c polar spline (iopt(1)=0 or 1) must be determined. -c if iopt(1)=0 the routine will start with an initial set of -c knots tu(i)=0,tu(i+4)=1,i=1,...,4;tv(i)=(2*i-9)*pi,i=1,...,8. -c if iopt(1)=1 the routine will continue with the set of knots -c found at the last call of the routine. -c attention: a call with iopt(1)=1 must always be immediately -c preceded by another call with iopt(1) = 1 or iopt(1) = 0. -c iopt(2):on entry iopt(2) must specify the requested order of conti- -c nuity for f(x,y) at the origin. -c if iopt(2)=0 only condition (1) must be fulfilled, -c if iopt(2)=1 conditions (1)+(2) must be fulfilled and -c if iopt(2)=2 conditions (1)+(2)+(3) must be fulfilled. -c iopt(3):on entry iopt(3) must specify whether (iopt(3)=1) or not -c (iopt(3)=0) the approximation f(x,y) must vanish at the -c boundary of the approximation domain. -c m : integer. on entry m must specify the number of data points. -c m >= 4-iopt(2)-iopt(3) unchanged on exit. -c x : real array of dimension at least (m). -c y : real array of dimension at least (m). -c z : real array of dimension at least (m). -c before entry, x(i),y(i),z(i) must be set to the co-ordinates -c of the i-th data point, for i=1,...,m. the order of the data -c points is immaterial. unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) must -c be set to the i-th value in the set of weights. the w(i) must -c be strictly positive. unchanged on exit. -c rad : real function subprogram defining the boundary of the approx- -c imation domain, i.e x = rad(v)*cos(v) , y = rad(v)*sin(v), -c -pi <= v <= pi. -c must be declared external in the calling (sub)program. -c s : real. on entry (in case iopt(1) >=0) s must specify the -c smoothing factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments -c nuest : integer. unchanged on exit. -c nvest : integer. unchanged on exit. -c on entry, nuest and nvest must specify an upper bound for the -c number of knots required in the u- and v-directions resp. -c these numbers will also determine the storage space needed by -c the routine. nuest >= 8, nvest >= 8. -c in most practical situation nuest = nvest = 8+sqrt(m/2) will -c be sufficient. see also further comments. -c eps : real. -c on entry, eps must specify a threshold for determining the -c effective rank of an over-determined linear system of equat- -c ions. 0 < eps < 1. if the number of decimal digits in the -c computer representation of a real number is q, then 10**(-q) -c is a suitable value for eps in most practical applications. -c unchanged on exit. -c nu : integer. -c unless ier=10 (in case iopt(1) >=0),nu will contain the total -c number of knots with respect to the u-variable, of the spline -c approximation returned. if the computation mode iopt(1)=1 -c is used, the value of nu should be left unchanged between -c subsequent calls. -c in case iopt(1)=-1,the value of nu must be specified on entry -c tu : real array of dimension at least nuest. -c on succesful exit, this array will contain the knots of the -c spline with respect to the u-variable, i.e. the position -c of the interior knots tu(5),...,tu(nu-4) as well as the -c position of the additional knots tu(1)=...=tu(4)=0 and -c tu(nu-3)=...=tu(nu)=1 needed for the b-spline representation -c if the computation mode iopt(1)=1 is used,the values of -c tu(1),...,tu(nu) should be left unchanged between subsequent -c calls. if the computation mode iopt(1)=-1 is used,the values -c tu(5),...tu(nu-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c nv : integer. -c unless ier=10 (in case iopt(1)>=0), nv will contain the total -c number of knots with respect to the v-variable, of the spline -c approximation returned. if the computation mode iopt(1)=1 -c is used, the value of nv should be left unchanged between -c subsequent calls. in case iopt(1)=-1, the value of nv should -c be specified on entry. -c tv : real array of dimension at least nvest. -c on succesful exit, this array will contain the knots of the -c spline with respect to the v-variable, i.e. the position of -c the interior knots tv(5),...,tv(nv-4) as well as the position -c of the additional knots tv(1),...,tv(4) and tv(nv-3),..., -c tv(nv) needed for the b-spline representation. -c if the computation mode iopt(1)=1 is used, the values of -c tv(1),...,tv(nv) should be left unchanged between subsequent -c calls. if the computation mode iopt(1)=-1 is used,the values -c tv(5),...tv(nv-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c u : real array of dimension at least (m). -c v : real array of dimension at least (m). -c on succesful exit, u(i),v(i) contains the co-ordinates of -c the i-th data point with respect to the transformed rectan- -c gular approximation domain, for i=1,2,...,m. -c if the computation mode iopt(1)=1 is used the values of -c u(i),v(i) should be left unchanged between subsequent calls. -c c : real array of dimension at least (nuest-4)*(nvest-4). -c on succesful exit, c contains the coefficients of the spline -c approximation s(u,v). -c fp : real. unless ier=10, fp contains the weighted sum of -c squared residuals of the spline approximation returned. -c wrk1 : real array of dimension (lwrk1). used as workspace. -c if the computation mode iopt(1)=1 is used the value of -c wrk1(1) should be left unchanged between subsequent calls. -c on exit wrk1(2),wrk1(3),...,wrk1(1+ncof) will contain the -c values d(i)/max(d(i)),i=1,...,ncof=1+iopt(2)*(iopt(2)+3)/2+ -c (nv-7)*(nu-5-iopt(2)-iopt(3)) with d(i) the i-th diagonal el- -c ement of the triangular matrix for calculating the b-spline -c coefficients.it includes those elements whose square is < eps -c which are treated as 0 in the case of rank deficiency(ier=-2) -c lwrk1 : integer. on entry lwrk1 must specify the actual dimension of -c the array wrk1 as declared in the calling (sub)program. -c lwrk1 must not be too small. let -c k = nuest-7, l = nvest-7, p = 1+iopt(2)*(iopt(2)+3)/2, -c q = k+2-iopt(2)-iopt(3) then -c lwrk1 >= 129+10*k+21*l+k*l+(p+l*q)*(1+8*l+p)+8*m -c wrk2 : real array of dimension (lwrk2). used as workspace, but -c only in the case a rank deficient system is encountered. -c lwrk2 : integer. on entry lwrk2 must specify the actual dimension of -c the array wrk2 as declared in the calling (sub)program. -c lwrk2 > 0 . a save upper bound for lwrk2 = (p+l*q+1)*(4*l+p) -c +p+l*q where p,l,q are as above. if there are enough data -c points, scattered uniformly over the approximation domain -c and if the smoothing factor s is not too small, there is a -c good chance that this extra workspace is not needed. a lot -c of memory might therefore be saved by setting lwrk2=1. -c (see also ier > 10) -c iwrk : integer array of dimension (kwrk). used as workspace. -c kwrk : integer. on entry kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. -c kwrk >= m+(nuest-7)*(nvest-7). -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the spline returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline returned is an interpolating -c spline (fp=0). -c ier=-2 : normal return. the spline returned is the weighted least- -c squares constrained polynomial . in this extreme case -c fp gives the upper bound for the smoothing factor s. -c ier<-2 : warning. the coefficients of the spline returned have been -c computed as the minimal norm least-squares solution of a -c (numerically) rank deficient system. (-ier) gives the rank. -c especially if the rank deficiency which can be computed as -c 1+iopt(2)*(iopt(2)+3)/2+(nv-7)*(nu-5-iopt(2)-iopt(3))+ier -c is large the results may be inaccurate. -c they could also seriously depend on the value of eps. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameters nuest and -c nvest. -c probably causes : nuest or nvest too small. if these param- -c eters are already large, it may also indicate that s is -c too small -c the approximation returned is the weighted least-squares -c polar spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline with -c fp = s. probably causes : s too small or badly chosen eps. -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing spline -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=4 : error. no more knots can be added because the dimension -c of the spline 1+iopt(2)*(iopt(2)+3)/2+(nv-7)*(nu-5-iopt(2) -c -iopt(3)) already exceeds the number of data points m. -c probably causes : either s or m too small. -c the approximation returned is the weighted least-squares -c polar spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=5 : error. no more knots can be added because the additional -c knot would (quasi) coincide with an old one. -c probably causes : s too small or too large a weight to an -c inaccurate data point. -c the approximation returned is the weighted least-squares -c polar spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt(1)<=1 , 0<=iopt(2)<=2 , 0<=iopt(3)<=1 , -c m>=4-iopt(2)-iopt(3) , nuest>=8 ,nvest >=8, 00, i=1,...,m -c lwrk1 >= 129+10*k+21*l+k*l+(p+l*q)*(1+8*l+p)+8*m -c kwrk >= m+(nuest-7)*(nvest-7) -c if iopt(1)=-1:9<=nu<=nuest,9+iopt(2)*(iopt(2)+1)<=nv<=nvest -c 0=0: s>=0 -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c ier>10 : error. lwrk2 is too small, i.e. there is not enough work- -c space for computing the minimal least-squares solution of -c a rank deficient system of linear equations. ier gives the -c requested value for lwrk2. there is no approximation re- -c turned but, having saved the information contained in nu, -c nv,tu,tv,wrk1,u,v and having adjusted the value of lwrk2 -c and the dimension of the array wrk2 accordingly, the user -c can continue at the point the program was left, by calling -c polar with iopt(1)=1. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the spline will be too smooth and signal will be -c lost ; if s is too small the spline will pick up too much noise. in -c the extreme cases the program will return an interpolating spline if -c s=0 and the constrained weighted least-squares polynomial if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the weights w(i). if these are -c taken as 1/d(i) with d(i) an estimate of the standard deviation of -c z(i), a good s-value should be found in the range (m-sqrt(2*m),m+ -c sqrt(2*m)). if nothing is known about the statistical error in z(i) -c each w(i) can be set equal to one and s determined by trial and -c error, taking account of the comments above. the best is then to -c start with a very large value of s ( to determine the least-squares -c polynomial and the corresponding upper bound fp0 for s) and then to -c progressively decrease the value of s ( say by a factor 10 in the -c beginning, i.e. s=fp0/10, fp0/100,...and more carefully as the -c approximation shows more detail) to obtain closer fits. -c to choose s very small is strongly discouraged. this considerably -c increases computation time and memory requirements. it may also -c cause rank-deficiency (ier<-2) and endager numerical stability. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt(1)=0. -c if iopt(1)=1 the program will continue with the set of knots found -c at the last call of the routine. this will save a lot of computation -c time if polar is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c function underlying the data. if the computation mode iopt(1)=1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt(1)=1,the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c polar once more with the selected value for s but now with iopt(1)=0 -c indeed, polar may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c the number of knots may also depend on the upper bounds nuest and -c nvest. indeed, if at a certain stage in polar the number of knots -c in one direction (say nu) has reached the value of its upper bound -c (nuest), then from that moment on all subsequent knots are added -c in the other (v) direction. this may indicate that the value of -c nuest is too small. on the other hand, it gives the user the option -c of limiting the number of knots the routine locates in any direction -c -c other subroutines required: -c fpback,fpbspl,fppola,fpdisc,fpgivs,fprank,fprati,fprota,fporde, -c fprppo -c -c references: -c dierckx p.: an algorithm for fitting data over a circle using tensor -c product splines,j.comp.appl.maths 15 (1986) 161-173. -c dierckx p.: an algorithm for fitting data on a circle using tensor -c product splines, report tw68, dept. computer science, -c k.u.leuven, 1984. -c dierckx p.: curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : june 1984 -c latest update : march 1989 -c -c .. -c ..scalar arguments.. - real*8 s,eps,fp - integer m,nuest,nvest,nu,nv,lwrk1,lwrk2,kwrk,ier -c ..array arguments.. - real*8 x(m),y(m),z(m),w(m),tu(nuest),tv(nvest),u(m),v(m), - * c((nuest-4)*(nvest-4)),wrk1(lwrk1),wrk2(lwrk2) - integer iopt(3),iwrk(kwrk) -c ..user specified function - real*8 rad -c ..local scalars.. - real*8 tol,pi,dist,r,one - integer i,ib1,ib3,ki,kn,kwest,la,lbu,lcc,lcs,lro,j - * lbv,lco,lf,lff,lfp,lh,lq,lsu,lsv,lwest,maxit,ncest,ncc,nuu, - * nvv,nreg,nrint,nu4,nv4,iopt1,iopt2,iopt3,ipar,nvmin -c ..function references.. - real*8 datan2,sqrt - external rad -c ..subroutine references.. -c fppola -c .. -c set up constants - one = 1d0 -c we set up the parameters tol and maxit. - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid,control is immediately repassed to the calling program. - ier = 10 - if(eps.le.0. .or. eps.ge.1.) go to 60 - iopt1 = iopt(1) - if(iopt1.lt.(-1) .or. iopt1.gt.1) go to 60 - iopt2 = iopt(2) - if(iopt2.lt.0 .or. iopt2.gt.2) go to 60 - iopt3 = iopt(3) - if(iopt3.lt.0 .or. iopt3.gt.1) go to 60 - if(m.lt.(4-iopt2-iopt3)) go to 60 - if(nuest.lt.8 .or. nvest.lt.8) go to 60 - nu4 = nuest-4 - nv4 = nvest-4 - ncest = nu4*nv4 - nuu = nuest-7 - nvv = nvest-7 - ipar = 1+iopt2*(iopt2+3)/2 - ncc = ipar+nvv*(nuest-5-iopt2-iopt3) - nrint = nuu+nvv - nreg = nuu*nvv - ib1 = 4*nvv - ib3 = ib1+ipar - lwest = ncc*(1+ib1+ib3)+2*nrint+ncest+m*8+ib3+5*nuest+12*nvest - kwest = m+nreg - if(lwrk1.lt.lwest .or. kwrk.lt.kwest) go to 60 - if(iopt1.gt.0) go to 40 - do 10 i=1,m - if(w(i).le.0.) go to 60 - dist = x(i)**2+y(i)**2 - u(i) = 0. - v(i) = 0. - if(dist.le.0.) go to 10 - v(i) = datan2(y(i),x(i)) - r = rad(v(i)) - if(r.le.0.) go to 60 - u(i) = sqrt(dist)/r - if(u(i).gt.one) go to 60 - 10 continue - if(iopt1.eq.0) go to 40 - nuu = nu-8 - if(nuu.lt.1 .or. nu.gt.nuest) go to 60 - tu(4) = 0. - do 20 i=1,nuu - j = i+4 - if(tu(j).le.tu(j-1) .or. tu(j).ge.one) go to 60 - 20 continue - nvv = nv-8 - nvmin = 9+iopt2*(iopt2+1) - if(nv.lt.nvmin .or. nv.gt.nvest) go to 60 - pi = datan2(0d0,-one) - tv(4) = -pi - do 30 i=1,nvv - j = i+4 - if(tv(j).le.tv(j-1) .or. tv(j).ge.pi) go to 60 - 30 continue - go to 50 - 40 if(s.lt.0.) go to 60 - 50 ier = 0 -c we partition the working space and determine the spline approximation - kn = 1 - ki = kn+m - lq = 2 - la = lq+ncc*ib3 - lf = la+ncc*ib1 - lff = lf+ncc - lfp = lff+ncest - lco = lfp+nrint - lh = lco+nrint - lbu = lh+ib3 - lbv = lbu+5*nuest - lro = lbv+5*nvest - lcc = lro+nvest - lcs = lcc+nvest - lsu = lcs+nvest*5 - lsv = lsu+m*4 - call fppola(iopt1,iopt2,iopt3,m,u,v,z,w,rad,s,nuest,nvest,eps,tol, - * - * maxit,ib1,ib3,ncest,ncc,nrint,nreg,nu,tu,nv,tv,c,fp,wrk1(1), - * wrk1(lfp),wrk1(lco),wrk1(lf),wrk1(lff),wrk1(lro),wrk1(lcc), - * wrk1(lcs),wrk1(la),wrk1(lq),wrk1(lbu),wrk1(lbv),wrk1(lsu), - * wrk1(lsv),wrk1(lh),iwrk(ki),iwrk(kn),wrk2,lwrk2,ier) - 60 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/profil.f b/scipy-0.10.1/scipy/interpolate/fitpack/profil.f deleted file mode 100644 index 856923a0fc..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/profil.f +++ /dev/null @@ -1,117 +0,0 @@ - subroutine profil(iopt,tx,nx,ty,ny,c,kx,ky,u,nu,cu,ier) -c if iopt=0 subroutine profil calculates the b-spline coefficients of -c the univariate spline f(y) = s(u,y) with s(x,y) a bivariate spline of -c degrees kx and ky, given in the b-spline representation. -c if iopt = 1 it calculates the b-spline coefficients of the univariate -c spline g(x) = s(x,u) -c -c calling sequence: -c call profil(iopt,tx,nx,ty,ny,c,kx,ky,u,nu,cu,ier) -c -c input parameters: -c iopt : integer flag, specifying whether the profile f(y) (iopt=0) -c or the profile g(x) (iopt=1) must be determined. -c tx : real array, length nx, which contains the position of the -c knots in the x-direction. -c nx : integer, giving the total number of knots in the x-direction -c ty : real array, length ny, which contains the position of the -c knots in the y-direction. -c ny : integer, giving the total number of knots in the y-direction -c c : real array, length (nx-kx-1)*(ny-ky-1), which contains the -c b-spline coefficients. -c kx,ky : integer values, giving the degrees of the spline. -c u : real value, specifying the requested profile. -c tx(kx+1)<=u<=tx(nx-kx), if iopt=0. -c ty(ky+1)<=u<=ty(ny-ky), if iopt=1. -c nu : on entry nu must specify the dimension of the array cu. -c nu >= ny if iopt=0, nu >= nx if iopt=1. -c -c output parameters: -c cu : real array of dimension (nu). -c on succesful exit this array contains the b-spline -c ier : integer error flag -c ier=0 : normal return -c ier=10: invalid input data (see restrictions) -c -c restrictions: -c if iopt=0 : tx(kx+1) <= u <= tx(nx-kx), nu >=ny. -c if iopt=1 : ty(ky+1) <= u <= ty(ny-ky), nu >=nx. -c -c other subroutines required: -c fpbspl -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c ..scalar arguments.. - integer iopt,nx,ny,kx,ky,nu,ier - real*8 u -c ..array arguments.. - real*8 tx(nx),ty(ny),c((nx-kx-1)*(ny-ky-1)),cu(nu) -c ..local scalars.. - integer i,j,kx1,ky1,l,l1,m,m0,nkx1,nky1 - real*8 sum -c ..local array - real*8 h(6) -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - kx1 = kx+1 - ky1 = ky+1 - nkx1 = nx-kx1 - nky1 = ny-ky1 - ier = 10 - if(iopt.ne.0) go to 200 - if(nu.lt.ny) go to 300 - if(u.lt.tx(kx1) .or. u.gt.tx(nkx1+1)) go to 300 -c the b-splinecoefficients of f(y) = s(u,y). - ier = 0 - l = kx1 - l1 = l+1 - 110 if(u.lt.tx(l1) .or. l.eq.nkx1) go to 120 - l = l1 - l1 = l+1 - go to 110 - 120 call fpbspl(tx,nx,kx,u,l,h) - m0 = (l-kx1)*nky1+1 - do 140 i=1,nky1 - m = m0 - sum = 0. - do 130 j=1,kx1 - sum = sum+h(j)*c(m) - m = m+nky1 - 130 continue - cu(i) = sum - m0 = m0+1 - 140 continue - go to 300 - 200 if(nu.lt.nx) go to 300 - if(u.lt.ty(ky1) .or. u.gt.ty(nky1+1)) go to 300 -c the b-splinecoefficients of g(x) = s(x,u). - ier = 0 - l = ky1 - l1 = l+1 - 210 if(u.lt.ty(l1) .or. l.eq.nky1) go to 220 - l = l1 - l1 = l+1 - go to 210 - 220 call fpbspl(ty,ny,ky,u,l,h) - m0 = l-ky - do 240 i=1,nkx1 - m = m0 - sum = 0. - do 230 j=1,ky1 - sum = sum+h(j)*c(m) - m = m+1 - 230 continue - cu(i) = sum - m0 = m0+nky1 - 240 continue - 300 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/regrid.f b/scipy-0.10.1/scipy/interpolate/fitpack/regrid.f deleted file mode 100644 index dae2ee94f7..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/regrid.f +++ /dev/null @@ -1,353 +0,0 @@ - subroutine regrid(iopt,mx,x,my,y,z,xb,xe,yb,ye,kx,ky,s, - * nxest,nyest,nx,tx,ny,ty,c,fp,wrk,lwrk,iwrk,kwrk,ier) -c given the set of values z(i,j) on the rectangular grid (x(i),y(j)), -c i=1,...,mx;j=1,...,my, subroutine regrid determines a smooth bivar- -c iate spline approximation s(x,y) of degrees kx and ky on the rect- -c angle xb <= x <= xe, yb <= y <= ye. -c if iopt = -1 regrid calculates the least-squares spline according -c to a given set of knots. -c if iopt >= 0 the total numbers nx and ny of these knots and their -c position tx(j),j=1,...,nx and ty(j),j=1,...,ny are chosen automatic- -c ally by the routine. the smoothness of s(x,y) is then achieved by -c minimalizing the discontinuity jumps in the derivatives of s(x,y) -c across the boundaries of the subpanels (tx(i),tx(i+1))*(ty(j),ty(j+1). -c the amounth of smoothness is determined by the condition that f(p) = -c sum ((z(i,j)-s(x(i),y(j))))**2) be <= s, with s a given non-negative -c constant, called the smoothing factor. -c the fit is given in the b-spline representation (b-spline coefficients -c c((ny-ky-1)*(i-1)+j),i=1,...,nx-kx-1;j=1,...,ny-ky-1) and can be eval- -c uated by means of subroutine bispev. -c -c calling sequence: -c call regrid(iopt,mx,x,my,y,z,xb,xe,yb,ye,kx,ky,s,nxest,nyest, -c * nx,tx,ny,ty,c,fp,wrk,lwrk,iwrk,kwrk,ier) -c -c parameters: -c iopt : integer flag. on entry iopt must specify whether a least- -c squares spline (iopt=-1) or a smoothing spline (iopt=0 or 1) -c must be determined. -c if iopt=0 the routine will start with an initial set of knots -c tx(i)=xb,tx(i+kx+1)=xe,i=1,...,kx+1;ty(i)=yb,ty(i+ky+1)=ye,i= -c 1,...,ky+1. if iopt=1 the routine will continue with the set -c of knots found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately pre- -c ceded by another call with iopt=1 or iopt=0 and -c s.ne.0. -c unchanged on exit. -c mx : integer. on entry mx must specify the number of grid points -c along the x-axis. mx > kx . unchanged on exit. -c x : real array of dimension at least (mx). before entry, x(i) -c must be set to the x-co-ordinate of the i-th grid point -c along the x-axis, for i=1,2,...,mx. these values must be -c supplied in strictly ascending order. unchanged on exit. -c my : integer. on entry my must specify the number of grid points -c along the y-axis. my > ky . unchanged on exit. -c y : real array of dimension at least (my). before entry, y(j) -c must be set to the y-co-ordinate of the j-th grid point -c along the y-axis, for j=1,2,...,my. these values must be -c supplied in strictly ascending order. unchanged on exit. -c z : real array of dimension at least (mx*my). -c before entry, z(my*(i-1)+j) must be set to the data value at -c the grid point (x(i),y(j)) for i=1,...,mx and j=1,...,my. -c unchanged on exit. -c xb,xe : real values. on entry xb,xe,yb and ye must specify the bound- -c yb,ye aries of the rectangular approximation domain. -c xb<=x(i)<=xe,i=1,...,mx; yb<=y(j)<=ye,j=1,...,my. -c unchanged on exit. -c kx,ky : integer values. on entry kx and ky must specify the degrees -c of the spline. 1<=kx,ky<=5. it is recommended to use bicubic -c (kx=ky=3) splines. unchanged on exit. -c s : real. on entry (in case iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments -c nxest : integer. unchanged on exit. -c nyest : integer. unchanged on exit. -c on entry, nxest and nyest must specify an upper bound for the -c number of knots required in the x- and y-directions respect. -c these numbers will also determine the storage space needed by -c the routine. nxest >= 2*(kx+1), nyest >= 2*(ky+1). -c in most practical situation nxest = mx/2, nyest=my/2, will -c be sufficient. always large enough are nxest=mx+kx+1, nyest= -c my+ky+1, the number of knots needed for interpolation (s=0). -c see also further comments. -c nx : integer. -c unless ier=10 (in case iopt >=0), nx will contain the total -c number of knots with respect to the x-variable, of the spline -c approximation returned. if the computation mode iopt=1 is -c used, the value of nx should be left unchanged between sub- -c sequent calls. -c in case iopt=-1, the value of nx should be specified on entry -c tx : real array of dimension nmax. -c on succesful exit, this array will contain the knots of the -c spline with respect to the x-variable, i.e. the position of -c the interior knots tx(kx+2),...,tx(nx-kx-1) as well as the -c position of the additional knots tx(1)=...=tx(kx+1)=xb and -c tx(nx-kx)=...=tx(nx)=xe needed for the b-spline representat. -c if the computation mode iopt=1 is used, the values of tx(1), -c ...,tx(nx) should be left unchanged between subsequent calls. -c if the computation mode iopt=-1 is used, the values tx(kx+2), -c ...tx(nx-kx-1) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c ny : integer. -c unless ier=10 (in case iopt >=0), ny will contain the total -c number of knots with respect to the y-variable, of the spline -c approximation returned. if the computation mode iopt=1 is -c used, the value of ny should be left unchanged between sub- -c sequent calls. -c in case iopt=-1, the value of ny should be specified on entry -c ty : real array of dimension nmax. -c on succesful exit, this array will contain the knots of the -c spline with respect to the y-variable, i.e. the position of -c the interior knots ty(ky+2),...,ty(ny-ky-1) as well as the -c position of the additional knots ty(1)=...=ty(ky+1)=yb and -c ty(ny-ky)=...=ty(ny)=ye needed for the b-spline representat. -c if the computation mode iopt=1 is used, the values of ty(1), -c ...,ty(ny) should be left unchanged between subsequent calls. -c if the computation mode iopt=-1 is used, the values ty(ky+2), -c ...ty(ny-ky-1) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c c : real array of dimension at least (nxest-kx-1)*(nyest-ky-1). -c on succesful exit, c contains the coefficients of the spline -c approximation s(x,y) -c fp : real. unless ier=10, fp contains the sum of squared -c residuals of the spline approximation returned. -c wrk : real array of dimension (lwrk). used as workspace. -c if the computation mode iopt=1 is used the values of wrk(1), -c ...,wrk(4) should be left unchanged between subsequent calls. -c lwrk : integer. on entry lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program. -c lwrk must not be too small. -c lwrk >= 4+nxest*(my+2*kx+5)+nyest*(2*ky+5)+mx*(kx+1)+ -c my*(ky+1) +u -c where u is the larger of my and nxest. -c iwrk : integer array of dimension (kwrk). used as workspace. -c if the computation mode iopt=1 is used the values of iwrk(1), -c ...,iwrk(3) should be left unchanged between subsequent calls -c kwrk : integer. on entry kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. -c kwrk >= 3+mx+my+nxest+nyest. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the spline returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline returned is an interpolating -c spline (fp=0). -c ier=-2 : normal return. the spline returned is the least-squares -c polynomial of degrees kx and ky. in this extreme case fp -c gives the upper bound for the smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameters nxest and -c nyest. -c probably causes : nxest or nyest too small. if these param- -c eters are already large, it may also indicate that s is -c too small -c the approximation returned is the least-squares spline -c according to the current set of knots. the parameter fp -c gives the corresponding sum of squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline with -c fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c sum of squared residuals does not satisfy the condition -c abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing spline -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c sum of squared residuals does not satisfy the condition -c abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, 1<=kx,ky<=5, mx>kx, my>ky, nxest>=2*kx+2, -c nyest>=2*ky+2, kwrk>=3+mx+my+nxest+nyest, -c lwrk >= 4+nxest*(my+2*kx+5)+nyest*(2*ky+5)+mx*(kx+1)+ -c my*(ky+1) +max(my,nxest), -c xb<=x(i-1)=0: s>=0 -c if s=0 : nxest>=mx+kx+1, nyest>=my+ky+1 -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c regrid does not allow individual weighting of the data-values. -c so, if these were determined to widely different accuracies, then -c perhaps the general data set routine surfit should rather be used -c in spite of efficiency. -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the spline will be too smooth and signal will be -c lost ; if s is too small the spline will pick up too much noise. in -c the extreme cases the program will return an interpolating spline if -c s=0 and the least-squares polynomial (degrees kx,ky) if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the accuracy of the data values. -c if the user has an idea of the statistical errors on the data, he -c can also find a proper estimate for s. for, by assuming that, if he -c specifies the right s, regrid will return a spline s(x,y) which -c exactly reproduces the function underlying the data he can evaluate -c the sum((z(i,j)-s(x(i),y(j)))**2) to find a good estimate for this s -c for example, if he knows that the statistical errors on his z(i,j)- -c values is not greater than 0.1, he may expect that a good s should -c have a value not larger than mx*my*(0.1)**2. -c if nothing is known about the statistical error in z(i,j), s must -c be determined by trial and error, taking account of the comments -c above. the best is then to start with a very large value of s (to -c determine the least-squares polynomial and the corresponding upper -c bound fp0 for s) and then to progressively decrease the value of s -c ( say by a factor 10 in the beginning, i.e. s=fp0/10,fp0/100,... -c and more carefully as the approximation shows more detail) to -c obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt=1 the program will continue with the set of knots found at -c the last call of the routine. this will save a lot of computation -c time if regrid is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c function underlying the data. if the computation mode iopt=1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt=1, the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c regrid once more with the selected value for s but now with iopt=0. -c indeed, regrid may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c the number of knots may also depend on the upper bounds nxest and -c nyest. indeed, if at a certain stage in regrid the number of knots -c in one direction (say nx) has reached the value of its upper bound -c (nxest), then from that moment on all subsequent knots are added -c in the other (y) direction. this may indicate that the value of -c nxest is too small. on the other hand, it gives the user the option -c of limiting the number of knots the routine locates in any direction -c for example, by setting nxest=2*kx+2 (the lowest allowable value for -c nxest), the user can indicate that he wants an approximation which -c is a simple polynomial of degree kx in the variable x. -c -c other subroutines required: -c fpback,fpbspl,fpregr,fpdisc,fpgivs,fpgrre,fprati,fprota,fpchec, -c fpknot -c -c references: -c dierckx p. : a fast algorithm for smoothing data on a rectangular -c grid while using spline functions, siam j.numer.anal. -c 19 (1982) 1286-1304. -c dierckx p. : a fast algorithm for smoothing data on a rectangular -c grid while using spline functions, report tw53, dept. -c computer science,k.u.leuven, 1980. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : may 1979 -c latest update : march 1989 -c -c .. -c ..scalar arguments.. - real*8 xb,xe,yb,ye,s,fp - integer iopt,mx,my,kx,ky,nxest,nyest,nx,ny,lwrk,kwrk,ier -c ..array arguments.. - real*8 x(mx),y(my),z(mx*my),tx(nxest),ty(nyest), - * c((nxest-kx-1)*(nyest-ky-1)),wrk(lwrk) - integer iwrk(kwrk) -c ..local scalars.. - real*8 tol - integer i,j,jwrk,kndx,kndy,knrx,knry,kwest,kx1,kx2,ky1,ky2, - * lfpx,lfpy,lwest,lww,maxit,nc,nminx,nminy,mz -c ..function references.. - integer max0 -c ..subroutine references.. -c fpregr,fpchec -c .. -c we set up the parameters tol and maxit. - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(kx.le.0 .or. kx.gt.5) go to 70 - kx1 = kx+1 - kx2 = kx1+1 - if(ky.le.0 .or. ky.gt.5) go to 70 - ky1 = ky+1 - ky2 = ky1+1 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 70 - nminx = 2*kx1 - if(mx.lt.kx1 .or. nxest.lt.nminx) go to 70 - nminy = 2*ky1 - if(my.lt.ky1 .or. nyest.lt.nminy) go to 70 - mz = mx*my - nc = (nxest-kx1)*(nyest-ky1) - lwest = 4+nxest*(my+2*kx2+1)+nyest*(2*ky2+1)+mx*kx1+ - * my*ky1+max0(nxest,my) - kwest = 3+mx+my+nxest+nyest - if(lwrk.lt.lwest .or. kwrk.lt.kwest) go to 70 - if(xb.gt.x(1) .or. xe.lt.x(mx)) go to 70 - do 10 i=2,mx - if(x(i-1).ge.x(i)) go to 70 - 10 continue - if(yb.gt.y(1) .or. ye.lt.y(my)) go to 70 - do 20 i=2,my - if(y(i-1).ge.y(i)) go to 70 - 20 continue - if(iopt.ge.0) go to 50 - if(nx.lt.nminx .or. nx.gt.nxest) go to 70 - j = nx - do 30 i=1,kx1 - tx(i) = xb - tx(j) = xe - j = j-1 - 30 continue - call fpchec(x,mx,tx,nx,kx,ier) - if(ier.ne.0) go to 70 - if(ny.lt.nminy .or. ny.gt.nyest) go to 70 - j = ny - do 40 i=1,ky1 - ty(i) = yb - ty(j) = ye - j = j-1 - 40 continue - call fpchec(y,my,ty,ny,ky,ier) - if (ier.eq.0) go to 60 - go to 70 - 50 if(s.lt.0.) go to 70 - if(s.eq.0. .and. (nxest.lt.(mx+kx1) .or. nyest.lt.(my+ky1)) ) - * go to 70 - ier = 0 -c we partition the working space and determine the spline approximation - 60 lfpx = 5 - lfpy = lfpx+nxest - lww = lfpy+nyest - jwrk = lwrk-4-nxest-nyest - knrx = 4 - knry = knrx+mx - kndx = knry+my - kndy = kndx+nxest - call fpregr(iopt,x,mx,y,my,z,mz,xb,xe,yb,ye,kx,ky,s,nxest,nyest, - * tol,maxit,nc,nx,tx,ny,ty,c,fp,wrk(1),wrk(2),wrk(3),wrk(4), - * wrk(lfpx),wrk(lfpy),iwrk(1),iwrk(2),iwrk(3),iwrk(knrx), - * iwrk(knry),iwrk(kndx),iwrk(kndy),wrk(lww),jwrk,ier) - 70 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/spalde.f b/scipy-0.10.1/scipy/interpolate/fitpack/spalde.f deleted file mode 100644 index b6f69b08a5..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/spalde.f +++ /dev/null @@ -1,73 +0,0 @@ - subroutine spalde(t,n,c,k1,x,d,ier) -c subroutine spalde evaluates at a point x all the derivatives -c (j-1) -c d(j) = s (x) , j=1,2,...,k1 -c of a spline s(x) of order k1 (degree k=k1-1), given in its b-spline -c representation. -c -c calling sequence: -c call spalde(t,n,c,k1,x,d,ier) -c -c input parameters: -c t : array,length n, which contains the position of the knots. -c n : integer, giving the total number of knots of s(x). -c c : array,length n, which contains the b-spline coefficients. -c k1 : integer, giving the order of s(x) (order=degree+1) -c x : real, which contains the point where the derivatives must -c be evaluated. -c -c output parameters: -c d : array,length k1, containing the derivative values of s(x). -c ier : error flag -c ier = 0 : normal return -c ier =10 : invalid input data (see restrictions) -c -c restrictions: -c t(k1) <= x <= t(n-k1+1) -c -c further comments: -c if x coincides with a knot, right derivatives are computed -c ( left derivatives if x = t(n-k1+1) ). -c -c other subroutines required: fpader. -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c cox m.g. : the numerical evaluation of b-splines, j. inst. maths -c applics 10 (1972) 134-149. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c ..scalar arguments.. - integer n,k1,ier - real*8 x -c ..array arguments.. - real*8 t(n),c(n),d(k1) -c ..local scalars.. - integer l,nk1 -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 - nk1 = n-k1 - if(x.lt.t(k1) .or. x.gt.t(nk1+1)) go to 300 -c search for knot interval t(l) <= x < t(l+1) - l = k1 - 100 if(x.lt.t(l+1) .or. l.eq.nk1) go to 200 - l = l+1 - go to 100 - 200 if(t(l).ge.t(l+1)) go to 300 - ier = 0 -c calculate the derivatives. - call fpader(t,n,c,k1,x,l,d) - 300 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/spgrid.f b/scipy-0.10.1/scipy/interpolate/fitpack/spgrid.f deleted file mode 100644 index 4738d07397..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/spgrid.f +++ /dev/null @@ -1,501 +0,0 @@ - subroutine spgrid(iopt,ider,mu,u,mv,v,r,r0,r1,s,nuest,nvest, - * nu,tu,nv,tv,c,fp,wrk,lwrk,iwrk,kwrk,ier) -c given the function values r(i,j) on the latitude-longitude grid -c (u(i),v(j)), i=1,...,mu ; j=1,...,mv , spgrid determines a smooth -c bicubic spline approximation on the rectangular domain 0<=u<=pi, -c vb<=v<=ve (vb = v(1), ve=vb+2*pi). -c this approximation s(u,v) will satisfy the properties -c -c (1) s(0,v) = s(0,0) = dr(1) -c -c d s(0,v) d s(0,0) d s(0,pi/2) -c (2) -------- = cos(v)* -------- + sin(v)* ----------- -c d u d u d u -c -c = cos(v)*dr(2)+sin(v)*dr(3) -c vb <= v <= ve -c (3) s(pi,v) = s(pi,0) = dr(4) -c -c d s(pi,v) d s(pi,0) d s(pi,pi/2) -c (4) -------- = cos(v)* --------- + sin(v)* ------------ -c d u d u d u -c -c = cos(v)*dr(5)+sin(v)*dr(6) -c -c and will be periodic in the variable v, i.e. -c -c j j -c d s(u,vb) d s(u,ve) -c (5) --------- = --------- 0 <=u<= pi , j=0,1,2 -c j j -c d v d v -c -c the number of knots of s(u,v) and their position tu(i),i=1,2,...,nu; -c tv(j),j=1,2,...,nv, is chosen automatically by the routine. the -c smoothness of s(u,v) is achieved by minimalizing the discontinuity -c jumps of the derivatives of the spline at the knots. the amount of -c smoothness of s(u,v) is determined by the condition that -c fp=sumi=1,mu(sumj=1,mv((r(i,j)-s(u(i),v(j)))**2))+(r0-s(0,v))**2 -c + (r1-s(pi,v))**2 <= s, with s a given non-negative constant. -c the fit s(u,v) is given in its b-spline representation and can be -c evaluated by means of routine bispev -c -c calling sequence: -c call spgrid(iopt,ider,mu,u,mv,v,r,r0,r1,s,nuest,nvest,nu,tu, -c * ,nv,tv,c,fp,wrk,lwrk,iwrk,kwrk,ier) -c -c parameters: -c iopt : integer array of dimension 3, specifying different options. -c unchanged on exit. -c iopt(1):on entry iopt(1) must specify whether a least-squares spline -c (iopt(1)=-1) or a smoothing spline (iopt(1)=0 or 1) must be -c determined. -c if iopt(1)=0 the routine will start with an initial set of -c knots tu(i)=0,tu(i+4)=pi,i=1,...,4;tv(i)=v(1)+(i-4)*2*pi, -c i=1,...,8. -c if iopt(1)=1 the routine will continue with the set of knots -c found at the last call of the routine. -c attention: a call with iopt(1)=1 must always be immediately -c preceded by another call with iopt(1) = 1 or iopt(1) = 0. -c iopt(2):on entry iopt(2) must specify the requested order of conti- -c nuity at the pole u=0. -c if iopt(2)=0 only condition (1) must be fulfilled and -c if iopt(2)=1 conditions (1)+(2) must be fulfilled. -c iopt(3):on entry iopt(3) must specify the requested order of conti- -c nuity at the pole u=pi. -c if iopt(3)=0 only condition (3) must be fulfilled and -c if iopt(3)=1 conditions (3)+(4) must be fulfilled. -c ider : integer array of dimension 4, specifying different options. -c unchanged on exit. -c ider(1):on entry ider(1) must specify whether (ider(1)=0 or 1) or not -c (ider(1)=-1) there is a data value r0 at the pole u=0. -c if ider(1)=1, r0 will be considered to be the right function -c value, and it will be fitted exactly (s(0,v)=r0). -c if ider(1)=0, r0 will be considered to be a data value just -c like the other data values r(i,j). -c ider(2):on entry ider(2) must specify whether (ider(2)=1) or not -c (ider(2)=0) the approximation has vanishing derivatives -c dr(2) and dr(3) at the pole u=0 (in case iopt(2)=1) -c ider(3):on entry ider(3) must specify whether (ider(3)=0 or 1) or not -c (ider(3)=-1) there is a data value r1 at the pole u=pi. -c if ider(3)=1, r1 will be considered to be the right function -c value, and it will be fitted exactly (s(pi,v)=r1). -c if ider(3)=0, r1 will be considered to be a data value just -c like the other data values r(i,j). -c ider(4):on entry ider(4) must specify whether (ider(4)=1) or not -c (ider(4)=0) the approximation has vanishing derivatives -c dr(5) and dr(6) at the pole u=pi (in case iopt(3)=1) -c mu : integer. on entry mu must specify the number of grid points -c along the u-axis. unchanged on exit. -c mu >= 1, mu >=mumin=4-i0-i1-ider(2)-ider(4) with -c i0=min(1,ider(1)+1), i1=min(1,ider(3)+1) -c u : real array of dimension at least (mu). before entry, u(i) -c must be set to the u-co-ordinate of the i-th grid point -c along the u-axis, for i=1,2,...,mu. these values must be -c supplied in strictly ascending order. unchanged on exit. -c 0 < u(i) < pi. -c mv : integer. on entry mv must specify the number of grid points -c along the v-axis. mv > 3 . unchanged on exit. -c v : real array of dimension at least (mv). before entry, v(j) -c must be set to the v-co-ordinate of the j-th grid point -c along the v-axis, for j=1,2,...,mv. these values must be -c supplied in strictly ascending order. unchanged on exit. -c -pi <= v(1) < pi , v(mv) < v(1)+2*pi. -c r : real array of dimension at least (mu*mv). -c before entry, r(mv*(i-1)+j) must be set to the data value at -c the grid point (u(i),v(j)) for i=1,...,mu and j=1,...,mv. -c unchanged on exit. -c r0 : real value. on entry (if ider(1) >=0 ) r0 must specify the -c data value at the pole u=0. unchanged on exit. -c r1 : real value. on entry (if ider(1) >=0 ) r1 must specify the -c data value at the pole u=pi. unchanged on exit. -c s : real. on entry (if iopt(1)>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments -c nuest : integer. unchanged on exit. -c nvest : integer. unchanged on exit. -c on entry, nuest and nvest must specify an upper bound for the -c number of knots required in the u- and v-directions respect. -c these numbers will also determine the storage space needed by -c the routine. nuest >= 8, nvest >= 8. -c in most practical situation nuest = mu/2, nvest=mv/2, will -c be sufficient. always large enough are nuest=mu+6+iopt(2)+ -c iopt(3), nvest = mv+7, the number of knots needed for -c interpolation (s=0). see also further comments. -c nu : integer. -c unless ier=10 (in case iopt(1)>=0), nu will contain the total -c number of knots with respect to the u-variable, of the spline -c approximation returned. if the computation mode iopt(1)=1 is -c used, the value of nu should be left unchanged between sub- -c sequent calls. in case iopt(1)=-1, the value of nu should be -c specified on entry. -c tu : real array of dimension at least (nuest). -c on succesful exit, this array will contain the knots of the -c spline with respect to the u-variable, i.e. the position of -c the interior knots tu(5),...,tu(nu-4) as well as the position -c of the additional knots tu(1)=...=tu(4)=0 and tu(nu-3)=...= -c tu(nu)=pi needed for the b-spline representation. -c if the computation mode iopt(1)=1 is used,the values of tu(1) -c ...,tu(nu) should be left unchanged between subsequent calls. -c if the computation mode iopt(1)=-1 is used, the values tu(5), -c ...tu(nu-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c nv : integer. -c unless ier=10 (in case iopt(1)>=0), nv will contain the total -c number of knots with respect to the v-variable, of the spline -c approximation returned. if the computation mode iopt(1)=1 is -c used, the value of nv should be left unchanged between sub- -c sequent calls. in case iopt(1) = -1, the value of nv should -c be specified on entry. -c tv : real array of dimension at least (nvest). -c on succesful exit, this array will contain the knots of the -c spline with respect to the v-variable, i.e. the position of -c the interior knots tv(5),...,tv(nv-4) as well as the position -c of the additional knots tv(1),...,tv(4) and tv(nv-3),..., -c tv(nv) needed for the b-spline representation. -c if the computation mode iopt(1)=1 is used,the values of tv(1) -c ...,tv(nv) should be left unchanged between subsequent calls. -c if the computation mode iopt(1)=-1 is used, the values tv(5), -c ...tv(nv-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c c : real array of dimension at least (nuest-4)*(nvest-4). -c on succesful exit, c contains the coefficients of the spline -c approximation s(u,v) -c fp : real. unless ier=10, fp contains the sum of squared -c residuals of the spline approximation returned. -c wrk : real array of dimension (lwrk). used as workspace. -c if the computation mode iopt(1)=1 is used the values of -c wrk(1),..,wrk(12) should be left unchanged between subsequent -c calls. -c lwrk : integer. on entry lwrk must specify the actual dimension of -c the array wrk as declared in the calling (sub)program. -c lwrk must not be too small. -c lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+q -c where q is the larger of (mv+nvest) and nuest. -c iwrk : integer array of dimension (kwrk). used as workspace. -c if the computation mode iopt(1)=1 is used the values of -c iwrk(1),.,iwrk(5) should be left unchanged between subsequent -c calls. -c kwrk : integer. on entry kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. -c kwrk >= 5+mu+mv+nuest+nvest. -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the spline returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline returned is an interpolating -c spline (fp=0). -c ier=-2 : normal return. the spline returned is the least-squares -c constrained polynomial. in this extreme case fp gives the -c upper bound for the smoothing factor s. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameters nuest and -c nvest. -c probably causes : nuest or nvest too small. if these param- -c eters are already large, it may also indicate that s is -c too small -c the approximation returned is the least-squares spline -c according to the current set of knots. the parameter fp -c gives the corresponding sum of squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline with -c fp = s. probably causes : s too small. -c there is an approximation returned but the corresponding -c sum of squared residuals does not satisfy the condition -c abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing spline -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c sum of squared residuals does not satisfy the condition -c abs(fp-s)/s < tol. -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1, -c -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0. -c -1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0. -c mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8, -c kwrk>=5+mu+mv+nuest+nvest, -c lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+ -c max(nuest,mv+nvest) -c 0< u(i-1)=0: s>=0 -c if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7 -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c -c further comments: -c spgrid does not allow individual weighting of the data-values. -c so, if these were determined to widely different accuracies, then -c perhaps the general data set routine sphere should rather be used -c in spite of efficiency. -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the spline will be too smooth and signal will be -c lost ; if s is too small the spline will pick up too much noise. in -c the extreme cases the program will return an interpolating spline if -c s=0 and the constrained least-squares polynomial(degrees 3,0)if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the accuracy of the data values. -c if the user has an idea of the statistical errors on the data, he -c can also find a proper estimate for s. for, by assuming that, if he -c specifies the right s, spgrid will return a spline s(u,v) which -c exactly reproduces the function underlying the data he can evaluate -c the sum((r(i,j)-s(u(i),v(j)))**2) to find a good estimate for this s -c for example, if he knows that the statistical errors on his r(i,j)- -c values is not greater than 0.1, he may expect that a good s should -c have a value not larger than mu*mv*(0.1)**2. -c if nothing is known about the statistical error in r(i,j), s must -c be determined by trial and error, taking account of the comments -c above. the best is then to start with a very large value of s (to -c determine the least-squares polynomial and the corresponding upper -c bound fp0 for s) and then to progressively decrease the value of s -c ( say by a factor 10 in the beginning, i.e. s=fp0/10,fp0/100,... -c and more carefully as the approximation shows more detail) to -c obtain closer fits. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt(1)=0. -c if iopt(1) = 1 the program will continue with the knots found at -c the last call of the routine. this will save a lot of computation -c time if spgrid is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c function underlying the data. if the computation mode iopt(1) = 1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt(1)=1,the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c spgrid once more with the chosen value for s but now with iopt(1)=0. -c indeed, spgrid may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c the number of knots may also depend on the upper bounds nuest and -c nvest. indeed, if at a certain stage in spgrid the number of knots -c in one direction (say nu) has reached the value of its upper bound -c (nuest), then from that moment on all subsequent knots are added -c in the other (v) direction. this may indicate that the value of -c nuest is too small. on the other hand, it gives the user the option -c of limiting the number of knots the routine locates in any direction -c for example, by setting nuest=8 (the lowest allowable value for -c nuest), the user can indicate that he wants an approximation which -c is a simple cubic polynomial in the variable u. -c -c other subroutines required: -c fpspgr,fpchec,fpchep,fpknot,fpopsp,fprati,fpgrsp,fpsysy,fpback, -c fpbacp,fpbspl,fpcyt1,fpcyt2,fpdisc,fpgivs,fprota -c -c references: -c dierckx p. : fast algorithms for smoothing data over a disc or a -c sphere using tensor product splines, in "algorithms -c for approximation", ed. j.c.mason and m.g.cox, -c clarendon press oxford, 1987, pp. 51-65 -c dierckx p. : fast algorithms for smoothing data over a disc or a -c sphere using tensor product splines, report tw73, dept. -c computer science,k.u.leuven, 1985. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : july 1985 -c latest update : march 1989 -c -c .. -c ..scalar arguments.. - real*8 r0,r1,s,fp - integer mu,mv,nuest,nvest,nu,nv,lwrk,kwrk,ier -c ..array arguments.. - integer iopt(3),ider(4),iwrk(kwrk) - real*8 u(mu),v(mv),r(mu*mv),c((nuest-4)*(nvest-4)),tu(nuest), - * tv(nvest),wrk(lwrk) -c ..local scalars.. - real*8 per,pi,tol,uu,ve,rmax,rmin,one,half,rn,rb,re - integer i,i1,i2,j,jwrk,j1,j2,kndu,kndv,knru,knrv,kwest,l, - * ldr,lfpu,lfpv,lwest,lww,m,maxit,mumin,muu,nc -c ..function references.. - real*8 datan2 - integer max0 -c ..subroutine references.. -c fpchec,fpchep,fpspgr -c .. -c set constants - one = 1d0 - half = 0.5e0 - pi = datan2(0d0,-one) - per = pi+pi - ve = v(1)+per -c we set up the parameters tol and maxit. - maxit = 20 - tol = 0.1e-02 -c before starting computations, a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - ier = 10 - if(iopt(1).lt.(-1) .or. iopt(1).gt.1) go to 200 - if(iopt(2).lt.0 .or. iopt(2).gt.1) go to 200 - if(iopt(3).lt.0 .or. iopt(3).gt.1) go to 200 - if(ider(1).lt.(-1) .or. ider(1).gt.1) go to 200 - if(ider(2).lt.0 .or. ider(2).gt.1) go to 200 - if(ider(2).eq.1 .and. iopt(2).eq.0) go to 200 - if(ider(3).lt.(-1) .or. ider(3).gt.1) go to 200 - if(ider(4).lt.0 .or. ider(4).gt.1) go to 200 - if(ider(4).eq.1 .and. iopt(3).eq.0) go to 200 - mumin = 4 - if(ider(1).ge.0) mumin = mumin-1 - if(iopt(2).eq.1 .and. ider(2).eq.1) mumin = mumin-1 - if(ider(3).ge.0) mumin = mumin-1 - if(iopt(3).eq.1 .and. ider(4).eq.1) mumin = mumin-1 - if(mumin.eq.0) mumin = 1 - if(mu.lt.mumin .or. mv.lt.4) go to 200 - if(nuest.lt.8 .or. nvest.lt.8) go to 200 - m = mu*mv - nc = (nuest-4)*(nvest-4) - lwest = 12+nuest*(mv+nvest+3)+24*nvest+4*mu+8*mv+ - * max0(nuest,mv+nvest) - kwest = 5+mu+mv+nuest+nvest - if(lwrk.lt.lwest .or. kwrk.lt.kwest) go to 200 - if(u(1).le.0. .or. u(mu).ge.pi) go to 200 - if(mu.eq.1) go to 30 - do 20 i=2,mu - if(u(i-1).ge.u(i)) go to 200 - 20 continue - 30 if(v(1).lt. (-pi) .or. v(1).ge.pi ) go to 200 - if(v(mv).ge.v(1)+per) go to 200 - do 40 i=2,mv - if(v(i-1).ge.v(i)) go to 200 - 40 continue - if(iopt(1).gt.0) go to 140 -c if not given, we compute an estimate for r0. - rn = mv - if(ider(1).lt.0) go to 45 - rb = r0 - go to 55 - 45 rb = 0. - do 50 i=1,mv - rb = rb+r(i) - 50 continue - rb = rb/rn -c if not given, we compute an estimate for r1. - 55 if(ider(3).lt.0) go to 60 - re = r1 - go to 70 - 60 re = 0. - j = m - do 65 i=1,mv - re = re+r(j) - j = j-1 - 65 continue - re = re/rn -c we determine the range of r-values. - 70 rmin = rb - rmax = re - do 80 i=1,m - if(r(i).lt.rmin) rmin = r(i) - if(r(i).gt.rmax) rmax = r(i) - 80 continue - wrk(5) = rb - wrk(6) = 0. - wrk(7) = 0. - wrk(8) = re - wrk(9) = 0. - wrk(10) = 0. - wrk(11) = rmax -rmin - wrk(12) = wrk(11) - iwrk(4) = mu - iwrk(5) = mu - if(iopt(1).eq.0) go to 140 - if(nu.lt.8 .or. nu.gt.nuest) go to 200 - if(nv.lt.11 .or. nv.gt.nvest) go to 200 - j = nu - do 90 i=1,4 - tu(i) = 0. - tu(j) = pi - j = j-1 - 90 continue - l = 13 - wrk(l) = 0. - if(iopt(2).eq.0) go to 100 - l = l+1 - uu = u(1) - if(uu.gt.tu(5)) uu = tu(5) - wrk(l) = uu*half - 100 do 110 i=1,mu - l = l+1 - wrk(l) = u(i) - 110 continue - if(iopt(3).eq.0) go to 120 - l = l+1 - uu = u(mu) - if(uu.lt.tu(nu-4)) uu = tu(nu-4) - wrk(l) = uu+(pi-uu)*half - 120 l = l+1 - wrk(l) = pi - muu = l-12 - call fpchec(wrk(13),muu,tu,nu,3,ier) - if(ier.ne.0) go to 200 - j1 = 4 - tv(j1) = v(1) - i1 = nv-3 - tv(i1) = ve - j2 = j1 - i2 = i1 - do 130 i=1,3 - i1 = i1+1 - i2 = i2-1 - j1 = j1+1 - j2 = j2-1 - tv(j2) = tv(i2)-per - tv(i1) = tv(j1)+per - 130 continue - l = 13 - do 135 i=1,mv - wrk(l) = v(i) - l = l+1 - 135 continue - wrk(l) = ve - call fpchep(wrk(13),mv+1,tv,nv,3,ier) - if (ier.eq.0) go to 150 - go to 200 - 140 if(s.lt.0.) go to 200 - if(s.eq.0. .and. (nuest.lt.(mu+6+iopt(2)+iopt(3)) .or. - * nvest.lt.(mv+7)) ) go to 200 -c we partition the working space and determine the spline approximation - 150 ldr = 5 - lfpu = 13 - lfpv = lfpu+nuest - lww = lfpv+nvest - jwrk = lwrk-12-nuest-nvest - knru = 6 - knrv = knru+mu - kndu = knrv+mv - kndv = kndu+nuest - call fpspgr(iopt,ider,u,mu,v,mv,r,m,rb,re,s,nuest,nvest,tol,maxit, - * - * nc,nu,tu,nv,tv,c,fp,wrk(1),wrk(2),wrk(3),wrk(4),wrk(lfpu), - * wrk(lfpv),wrk(ldr),wrk(11),iwrk(1),iwrk(2),iwrk(3),iwrk(4), - * iwrk(5),iwrk(knru),iwrk(knrv),iwrk(kndu),iwrk(kndv),wrk(lww), - * jwrk,ier) - 200 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/sphere.f b/scipy-0.10.1/scipy/interpolate/fitpack/sphere.f deleted file mode 100644 index cea3fe5242..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/sphere.f +++ /dev/null @@ -1,404 +0,0 @@ - subroutine sphere(iopt,m,teta,phi,r,w,s,ntest,npest,eps, - * nt,tt,np,tp,c,fp,wrk1,lwrk1,wrk2,lwrk2,iwrk,kwrk,ier) -c subroutine sphere determines a smooth bicubic spherical spline -c approximation s(teta,phi), 0 <= teta <= pi ; 0 <= phi <= 2*pi -c to a given set of data points (teta(i),phi(i),r(i)),i=1,2,...,m. -c such a spline has the following specific properties -c -c (1) s(0,phi) = constant 0 <=phi<= 2*pi. -c -c (2) s(pi,phi) = constant 0 <=phi<= 2*pi -c -c j j -c d s(teta,0) d s(teta,2*pi) -c (3) ----------- = ------------ 0 <=teta<=pi, j=0,1,2 -c j j -c d phi d phi -c -c d s(0,phi) d s(0,0) d s(0,pi/2) -c (4) ---------- = -------- *cos(phi) + ----------- *sin(phi) -c d teta d teta d teta -c -c d s(pi,phi) d s(pi,0) d s(pi,pi/2) -c (5) ----------- = ---------*cos(phi) + ------------*sin(phi) -c d teta d teta d teta -c -c if iopt =-1 sphere calculates a weighted least-squares spherical -c spline according to a given set of knots in teta- and phi- direction. -c if iopt >=0, the number of knots in each direction and their position -c tt(j),j=1,2,...,nt ; tp(j),j=1,2,...,np are chosen automatically by -c the routine. the smoothness of s(teta,phi) is then achieved by mini- -c malizing the discontinuity jumps of the derivatives of the spline -c at the knots. the amount of smoothness of s(teta,phi) is determined -c by the condition that fp = sum((w(i)*(r(i)-s(teta(i),phi(i))))**2) -c be <= s, with s a given non-negative constant. -c the spherical spline is given in the standard b-spline representation -c of bicubic splines and can be evaluated by means of subroutine bispev -c -c calling sequence: -c call sphere(iopt,m,teta,phi,r,w,s,ntest,npest,eps, -c * nt,tt,np,tp,c,fp,wrk1,lwrk1,wrk2,lwrk2,iwrk,kwrk,ier) -c -c parameters: -c iopt : integer flag. on entry iopt must specify whether a weighted -c least-squares spherical spline (iopt=-1) or a smoothing -c spherical spline (iopt=0 or 1) must be determined. -c if iopt=0 the routine will start with an initial set of knots -c tt(i)=0,tt(i+4)=pi,i=1,...,4;tp(i)=0,tp(i+4)=2*pi,i=1,...,4. -c if iopt=1 the routine will continue with the set of knots -c found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately pre- -c ceded by another call with iopt=1 or iopt=0. -c unchanged on exit. -c m : integer. on entry m must specify the number of data points. -c m >= 2. unchanged on exit. -c teta : real array of dimension at least (m). -c phi : real array of dimension at least (m). -c r : real array of dimension at least (m). -c before entry,teta(i),phi(i),r(i) must be set to the spherical -c co-ordinates of the i-th data point, for i=1,...,m.the order -c of the data points is immaterial. unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) must -c be set to the i-th value in the set of weights. the w(i) must -c be strictly positive. unchanged on exit. -c s : real. on entry (in case iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments -c ntest : integer. unchanged on exit. -c npest : integer. unchanged on exit. -c on entry, ntest and npest must specify an upper bound for the -c number of knots required in the teta- and phi-directions. -c these numbers will also determine the storage space needed by -c the routine. ntest >= 8, npest >= 8. -c in most practical situation ntest = npest = 8+sqrt(m/2) will -c be sufficient. see also further comments. -c eps : real. -c on entry, eps must specify a threshold for determining the -c effective rank of an over-determined linear system of equat- -c ions. 0 < eps < 1. if the number of decimal digits in the -c computer representation of a real number is q, then 10**(-q) -c is a suitable value for eps in most practical applications. -c unchanged on exit. -c nt : integer. -c unless ier=10 (in case iopt >=0), nt will contain the total -c number of knots with respect to the teta-variable, of the -c spline approximation returned. if the computation mode iopt=1 -c is used, the value of nt should be left unchanged between -c subsequent calls. -c in case iopt=-1, the value of nt should be specified on entry -c tt : real array of dimension at least ntest. -c on succesful exit, this array will contain the knots of the -c spline with respect to the teta-variable, i.e. the position -c of the interior knots tt(5),...,tt(nt-4) as well as the -c position of the additional knots tt(1)=...=tt(4)=0 and -c tt(nt-3)=...=tt(nt)=pi needed for the b-spline representation -c if the computation mode iopt=1 is used, the values of tt(1), -c ...,tt(nt) should be left unchanged between subsequent calls. -c if the computation mode iopt=-1 is used, the values tt(5), -c ...tt(nt-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c np : integer. -c unless ier=10 (in case iopt >=0), np will contain the total -c number of knots with respect to the phi-variable, of the -c spline approximation returned. if the computation mode iopt=1 -c is used, the value of np should be left unchanged between -c subsequent calls. -c in case iopt=-1, the value of np (>=9) should be specified -c on entry. -c tp : real array of dimension at least npest. -c on succesful exit, this array will contain the knots of the -c spline with respect to the phi-variable, i.e. the position of -c the interior knots tp(5),...,tp(np-4) as well as the position -c of the additional knots tp(1),...,tp(4) and tp(np-3),..., -c tp(np) needed for the b-spline representation. -c if the computation mode iopt=1 is used, the values of tp(1), -c ...,tp(np) should be left unchanged between subsequent calls. -c if the computation mode iopt=-1 is used, the values tp(5), -c ...tp(np-4) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c c : real array of dimension at least (ntest-4)*(npest-4). -c on succesful exit, c contains the coefficients of the spline -c approximation s(teta,phi). -c fp : real. unless ier=10, fp contains the weighted sum of -c squared residuals of the spline approximation returned. -c wrk1 : real array of dimension (lwrk1). used as workspace. -c if the computation mode iopt=1 is used the value of wrk1(1) -c should be left unchanged between subsequent calls. -c on exit wrk1(2),wrk1(3),...,wrk1(1+ncof) will contain the -c values d(i)/max(d(i)),i=1,...,ncof=6+(np-7)*(nt-8) -c with d(i) the i-th diagonal element of the reduced triangular -c matrix for calculating the b-spline coefficients. it includes -c those elements whose square is less than eps,which are treat- -c ed as 0 in the case of presumed rank deficiency (ier<-2). -c lwrk1 : integer. on entry lwrk1 must specify the actual dimension of -c the array wrk1 as declared in the calling (sub)program. -c lwrk1 must not be too small. let -c u = ntest-7, v = npest-7, then -c lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m -c wrk2 : real array of dimension (lwrk2). used as workspace, but -c only in the case a rank deficient system is encountered. -c lwrk2 : integer. on entry lwrk2 must specify the actual dimension of -c the array wrk2 as declared in the calling (sub)program. -c lwrk2 > 0 . a save upper bound for lwrk2 = 48+21*v+7*u*v+ -c 4*(u-1)*v**2 where u,v are as above. if there are enough data -c points, scattered uniformly over the approximation domain -c and if the smoothing factor s is not too small, there is a -c good chance that this extra workspace is not needed. a lot -c of memory might therefore be saved by setting lwrk2=1. -c (see also ier > 10) -c iwrk : integer array of dimension (kwrk). used as workspace. -c kwrk : integer. on entry kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. -c kwrk >= m+(ntest-7)*(npest-7). -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the spline returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline returned is a spherical -c interpolating spline (fp=0). -c ier=-2 : normal return. the spline returned is the weighted least- -c squares constrained polynomial . in this extreme case -c fp gives the upper bound for the smoothing factor s. -c ier<-2 : warning. the coefficients of the spline returned have been -c computed as the minimal norm least-squares solution of a -c (numerically) rank deficient system. (-ier) gives the rank. -c especially if the rank deficiency which can be computed as -c 6+(nt-8)*(np-7)+ier, is large the results may be inaccurate -c they could also seriously depend on the value of eps. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameters ntest and -c npest. -c probably causes : ntest or npest too small. if these param- -c eters are already large, it may also indicate that s is -c too small -c the approximation returned is the weighted least-squares -c spherical spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline with -c fp = s. probably causes : s too small or badly chosen eps. -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing spline -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=4 : error. no more knots can be added because the dimension -c of the spherical spline 6+(nt-8)*(np-7) already exceeds -c the number of data points m. -c probably causes : either s or m too small. -c the approximation returned is the weighted least-squares -c spherical spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=5 : error. no more knots can be added because the additional -c knot would (quasi) coincide with an old one. -c probably causes : s too small or too large a weight to an -c inaccurate data point. -c the approximation returned is the weighted least-squares -c spherical spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 00, i=1,...,m -c lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m -c kwrk >= m+(ntest-7)*(npest-7) -c if iopt=-1: 8<=nt<=ntest , 9<=np<=npest -c 0=0: s>=0 -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c ier>10 : error. lwrk2 is too small, i.e. there is not enough work- -c space for computing the minimal least-squares solution of -c a rank deficient system of linear equations. ier gives the -c requested value for lwrk2. there is no approximation re- -c turned but, having saved the information contained in nt, -c np,tt,tp,wrk1, and having adjusted the value of lwrk2 and -c the dimension of the array wrk2 accordingly, the user can -c continue at the point the program was left, by calling -c sphere with iopt=1. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the spline will be too smooth and signal will be -c lost ; if s is too small the spline will pick up too much noise. in -c the extreme cases the program will return an interpolating spline if -c s=0 and the constrained weighted least-squares polynomial if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the weights w(i). if these are -c taken as 1/d(i) with d(i) an estimate of the standard deviation of -c r(i), a good s-value should be found in the range (m-sqrt(2*m),m+ -c sqrt(2*m)). if nothing is known about the statistical error in r(i) -c each w(i) can be set equal to one and s determined by trial and -c error, taking account of the comments above. the best is then to -c start with a very large value of s ( to determine the least-squares -c polynomial and the corresponding upper bound fp0 for s) and then to -c progressively decrease the value of s ( say by a factor 10 in the -c beginning, i.e. s=fp0/10, fp0/100,...and more carefully as the -c approximation shows more detail) to obtain closer fits. -c to choose s very small is strongly discouraged. this considerably -c increases computation time and memory requirements. it may also -c cause rank-deficiency (ier<-2) and endager numerical stability. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt=1 the program will continue with the set of knots found at -c the last call of the routine. this will save a lot of computation -c time if sphere is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c function underlying the data. if the computation mode iopt=1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt=1, the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c sphere once more with the selected value for s but now with iopt=0. -c indeed, sphere may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c the number of knots may also depend on the upper bounds ntest and -c npest. indeed, if at a certain stage in sphere the number of knots -c in one direction (say nt) has reached the value of its upper bound -c (ntest), then from that moment on all subsequent knots are added -c in the other (phi) direction. this may indicate that the value of -c ntest is too small. on the other hand, it gives the user the option -c of limiting the number of knots the routine locates in any direction -c for example, by setting ntest=8 (the lowest allowable value for -c ntest), the user can indicate that he wants an approximation which -c is a cubic polynomial in the variable teta. -c -c other subroutines required: -c fpback,fpbspl,fpsphe,fpdisc,fpgivs,fprank,fprati,fprota,fporde, -c fprpsp -c -c references: -c dierckx p. : algorithms for smoothing data on the sphere with tensor -c product splines, computing 32 (1984) 319-342. -c dierckx p. : algorithms for smoothing data on the sphere with tensor -c product splines, report tw62, dept. computer science, -c k.u.leuven, 1983. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : july 1983 -c latest update : march 1989 -c -c .. -c ..scalar arguments.. - real*8 s,eps,fp - integer iopt,m,ntest,npest,nt,np,lwrk1,lwrk2,kwrk,ier -c ..array arguments.. - real*8 teta(m),phi(m),r(m),w(m),tt(ntest),tp(npest), - * c((ntest-4)*(npest-4)),wrk1(lwrk1),wrk2(lwrk2) - integer iwrk(kwrk) -c ..local scalars.. - real*8 tol,pi,pi2,one - integer i,ib1,ib3,ki,kn,kwest,la,lbt,lcc,lcs,lro,j - * lbp,lco,lf,lff,lfp,lh,lq,lst,lsp,lwest,maxit,ncest,ncc,ntt, - * npp,nreg,nrint,ncof,nt4,np4 -c ..function references.. - real*8 atan -c ..subroutine references.. -c fpsphe -c .. -c set constants - one = 0.1e+01 -c we set up the parameters tol and maxit. - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid,control is immediately repassed to the calling program. - ier = 10 - if(eps.le.0. .or. eps.ge.1.) go to 80 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 80 - if(m.lt.2) go to 80 - if(ntest.lt.8 .or. npest.lt.8) go to 80 - nt4 = ntest-4 - np4 = npest-4 - ncest = nt4*np4 - ntt = ntest-7 - npp = npest-7 - ncc = 6+npp*(ntt-1) - nrint = ntt+npp - nreg = ntt*npp - ncof = 6+3*npp - ib1 = 4*npp - ib3 = ib1+3 - if(ncof.gt.ib1) ib1 = ncof - if(ncof.gt.ib3) ib3 = ncof - lwest = 185+52*npp+10*ntt+14*ntt*npp+8*(m+(ntt-1)*npp**2) - kwest = m+nreg - if(lwrk1.lt.lwest .or. kwrk.lt.kwest) go to 80 - if(iopt.gt.0) go to 60 - pi = atan(one)*4 - pi2 = pi+pi - do 20 i=1,m - if(w(i).le.0.) go to 80 - if(teta(i).lt.0. .or. teta(i).gt.pi) go to 80 - if(phi(i) .lt.0. .or. phi(i).gt.pi2) go to 80 - 20 continue - if(iopt.eq.0) go to 60 - ntt = nt-8 - if(ntt.lt.0 .or. nt.gt.ntest) go to 80 - if(ntt.eq.0) go to 40 - tt(4) = 0. - do 30 i=1,ntt - j = i+4 - if(tt(j).le.tt(j-1) .or. tt(j).ge.pi) go to 80 - 30 continue - 40 npp = np-8 - if(npp.lt.1 .or. np.gt.npest) go to 80 - tp(4) = 0. - do 50 i=1,npp - j = i+4 - if(tp(j).le.tp(j-1) .or. tp(j).ge.pi2) go to 80 - 50 continue - go to 70 - 60 if(s.lt.0.) go to 80 - 70 ier = 0 -c we partition the working space and determine the spline approximation - kn = 1 - ki = kn+m - lq = 2 - la = lq+ncc*ib3 - lf = la+ncc*ib1 - lff = lf+ncc - lfp = lff+ncest - lco = lfp+nrint - lh = lco+nrint - lbt = lh+ib3 - lbp = lbt+5*ntest - lro = lbp+5*npest - lcc = lro+npest - lcs = lcc+npest - lst = lcs+npest - lsp = lst+m*4 - call fpsphe(iopt,m,teta,phi,r,w,s,ntest,npest,eps,tol,maxit, - * ib1,ib3,ncest,ncc,nrint,nreg,nt,tt,np,tp,c,fp,wrk1(1),wrk1(lfp), - * wrk1(lco),wrk1(lf),wrk1(lff),wrk1(lro),wrk1(lcc),wrk1(lcs), - * wrk1(la),wrk1(lq),wrk1(lbt),wrk1(lbp),wrk1(lst),wrk1(lsp), - * wrk1(lh),iwrk(ki),iwrk(kn),wrk2,lwrk2,ier) - 80 return - end - diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/splder.f b/scipy-0.10.1/scipy/interpolate/fitpack/splder.f deleted file mode 100644 index 8e89e9f27c..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/splder.f +++ /dev/null @@ -1,190 +0,0 @@ - subroutine splder(t,n,c,k,nu,x,y,m,e,wrk,ier) -c subroutine splder evaluates in a number of points x(i),i=1,2,...,m -c the derivative of order nu of a spline s(x) of degree k,given in -c its b-spline representation. -c -c calling sequence: -c call splder(t,n,c,k,nu,x,y,m,e,wrk,ier) -c -c input parameters: -c t : array,length n, which contains the position of the knots. -c n : integer, giving the total number of knots of s(x). -c c : array,length n, which contains the b-spline coefficients. -c k : integer, giving the degree of s(x). -c nu : integer, specifying the order of the derivative. 0<=nu<=k -c x : array,length m, which contains the points where the deriv- -c ative of s(x) must be evaluated. -c m : integer, giving the number of points where the derivative -c of s(x) must be evaluated -c e : integer, if 0 the spline is extrapolated from the end -c spans for points not in the support, if 1 the spline -c evaluates to zero for those points, and if 2 ier is set to -c 1 and the subroutine returns. -c wrk : real array of dimension n. used as working space. -c -c output parameters: -c y : array,length m, giving the value of the derivative of s(x) -c at the different points. -c ier : error flag -c ier = 0 : normal return -c ier = 1 : argument out of bounds and e == 2 -c ier =10 : invalid input data (see restrictions) -c -c restrictions: -c 0 <= nu <= k -c m >= 1 -c t(k+1) <= x(i) <= x(i+1) <= t(n-k) , i=1,2,...,m-1. -c -c other subroutines required: fpbspl -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c cox m.g. : the numerical evaluation of b-splines, j. inst. maths -c applics 10 (1972) 134-149. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c++ pearu: 13 aug 20003 -c++ - disabled cliping x values to interval [min(t),max(t)] -c++ - removed the restriction of the orderness of x values -c++ - fixed initialization of sp to double precision value -c -c ..scalar arguments.. - integer n,k,nu,m,e,ier -c ..array arguments.. - real*8 t(n),c(n),x(m),y(m),wrk(n) -c ..local scalars.. - integer i,j,kk,k1,k2,l,ll,l1,l2,nk1,nk2,nn - real*8 ak,arg,fac,sp,tb,te -c++.. - integer k3 -c..++ -c ..local arrays .. - real*8 h(6) -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 - if(nu.lt.0 .or. nu.gt.k) go to 200 -c-- if(m-1) 200,30,10 -c++.. - if(m.lt.1) go to 200 -c..++ -c-- 10 do 20 i=2,m -c-- if(x(i).lt.x(i-1)) go to 200 -c-- 20 continue - 30 ier = 0 -c fetch tb and te, the boundaries of the approximation interval. - k1 = k+1 - k3 = k1+1 - nk1 = n-k1 - tb = t(k1) - te = t(nk1+1) -c the derivative of order nu of a spline of degree k is a spline of -c degree k-nu,the b-spline coefficients wrk(i) of which can be found -c using the recurrence scheme of de boor. - l = 1 - kk = k - nn = n - do 40 i=1,nk1 - wrk(i) = c(i) - 40 continue - if(nu.eq.0) go to 100 - nk2 = nk1 - do 60 j=1,nu - ak = kk - nk2 = nk2-1 - l1 = l - do 50 i=1,nk2 - l1 = l1+1 - l2 = l1+kk - fac = t(l2)-t(l1) - if(fac.le.0.) go to 50 - wrk(i) = ak*(wrk(i+1)-wrk(i))/fac - 50 continue - l = l+1 - kk = kk-1 - 60 continue - if(kk.ne.0) go to 100 -c if nu=k the derivative is a piecewise constant function - j = 1 - do 90 i=1,m - arg = x(i) -c++.. -c check if arg is in the support - if (arg .lt. tb .or. arg .gt. te) then - if (e .eq. 0) then - goto 65 - else if (e .eq. 1) then - y(i) = 0 - goto 90 - else if (e .eq. 2) then - ier = 1 - goto 200 - endif - endif -c search for knot interval t(l) <= arg < t(l+1) - 65 if(arg.ge.t(l) .or. l+1.eq.k2) go to 70 - l1 = l - l = l-1 - j = j-1 - go to 65 -c..++ - 70 if(arg.lt.t(l+1) .or. l.eq.nk1) go to 80 - l = l+1 - j = j+1 - go to 70 - 80 y(i) = wrk(j) - 90 continue - go to 200 - - 100 l = k1 - l1 = l+1 - k2 = k1-nu -c main loop for the different points. - do 180 i=1,m -c fetch a new x-value arg. - arg = x(i) -c check if arg is in the support - if (arg .lt. tb .or. arg .gt. te) then - if (e .eq. 0) then - goto 135 - else if (e .eq. 1) then - y(i) = 0 - goto 180 - else if (e .eq. 2) then - ier = 1 - goto 200 - endif - endif -c search for knot interval t(l) <= arg < t(l+1) - 135 if(arg.ge.t(l) .or. l1.eq.k3) go to 140 - l1 = l - l = l-1 - go to 135 -c..++ - 140 if(arg.lt.t(l1) .or. l.eq.nk1) go to 150 - l = l1 - l1 = l+1 - go to 140 -c evaluate the non-zero b-splines of degree k-nu at arg. - 150 call fpbspl(t,n,kk,arg,l,h) -c find the value of the derivative at x=arg. - sp = 0.0d0 - ll = l-k1 - do 160 j=1,k2 - ll = ll+1 - sp = sp+wrk(ll)*h(j) - 160 continue - y(i) = sp - 180 continue - 200 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/splev.f b/scipy-0.10.1/scipy/interpolate/fitpack/splev.f deleted file mode 100644 index ef7733b5fc..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/splev.f +++ /dev/null @@ -1,130 +0,0 @@ - subroutine splev(t,n,c,k,x,y,m,e,ier) -c subroutine splev evaluates in a number of points x(i),i=1,2,...,m -c a spline s(x) of degree k, given in its b-spline representation. -c -c calling sequence: -c call splev(t,n,c,k,x,y,m,e,ier) -c -c input parameters: -c t : array,length n, which contains the position of the knots. -c n : integer, giving the total number of knots of s(x). -c c : array,length n, which contains the b-spline coefficients. -c k : integer, giving the degree of s(x). -c x : array,length m, which contains the points where s(x) must -c be evaluated. -c m : integer, giving the number of points where s(x) must be -c evaluated. -c e : integer, if 0 the spline is extrapolated from the end -c spans for points not in the support, if 1 the spline -c evaluates to zero for those points, and if 2 ier is set to -c 1 and the subroutine returns. -c -c output parameter: -c y : array,length m, giving the value of s(x) at the different -c points. -c ier : error flag -c ier = 0 : normal return -c ier = 1 : argument out of bounds and e == 2 -c ier =10 : invalid input data (see restrictions) -c -c restrictions: -c m >= 1 -c-- t(k+1) <= x(i) <= x(i+1) <= t(n-k) , i=1,2,...,m-1. -c -c other subroutines required: fpbspl. -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c cox m.g. : the numerical evaluation of b-splines, j. inst. maths -c applics 10 (1972) 134-149. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c++ pearu: 11 aug 2003 -c++ - disabled cliping x values to interval [min(t),max(t)] -c++ - removed the restriction of the orderness of x values -c++ - fixed initialization of sp to double precision value -c -c ..scalar arguments.. - integer n, k, m, e, ier -c ..array arguments.. - real*8 t(n), c(n), x(m), y(m) -c ..local scalars.. - integer i, j, k1, l, ll, l1, nk1 -c++.. - integer k2 -c..++ - real*8 arg, sp, tb, te -c ..local array.. - real*8 h(20) -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 -c-- if(m-1) 100,30,10 -c++.. - if (m .lt. 1) go to 100 -c..++ -c-- 10 do 20 i=2,m -c-- if(x(i).lt.x(i-1)) go to 100 -c-- 20 continue - 30 ier = 0 -c fetch tb and te, the boundaries of the approximation interval. - k1 = k + 1 -c++.. - k2 = k1 + 1 -c..++ - nk1 = n - k1 - tb = t(k1) - te = t(nk1 + 1) - l = k1 - l1 = l + 1 -c main loop for the different points. - do 80 i = 1, m -c fetch a new x-value arg. - arg = x(i) -c check if arg is in the support - if (arg .lt. tb .or. arg .gt. te) then - if (e .eq. 0) then - goto 35 - else if (e .eq. 1) then - y(i) = 0 - goto 80 - else if (e .eq. 2) then - ier = 1 - goto 100 - endif - endif -c search for knot interval t(l) <= arg < t(l+1) -c++.. - 35 if (arg .ge. t(l) .or. l1 .eq. k2) go to 40 - l1 = l - l = l - 1 - go to 35 -c..++ - 40 if(arg .lt. t(l1) .or. l .eq. nk1) go to 50 - l = l1 - l1 = l + 1 - go to 40 -c evaluate the non-zero b-splines at arg. - 50 call fpbspl(t, n, k, arg, l, h) -c find the value of s(x) at x=arg. - sp = 0.0d0 - ll = l - k1 - do 60 j = 1, k1 - ll = ll + 1 - sp = sp + c(ll)*h(j) - 60 continue - y(i) = sp - 80 continue - 100 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/splint.f b/scipy-0.10.1/scipy/interpolate/fitpack/splint.f deleted file mode 100644 index be58142654..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/splint.f +++ /dev/null @@ -1,58 +0,0 @@ - real*8 function splint(t,n,c,k,a,b,wrk) -c function splint calculates the integral of a spline function s(x) -c of degree k, which is given in its normalized b-spline representation -c -c calling sequence: -c aint = splint(t,n,c,k,a,b,wrk) -c -c input parameters: -c t : array,length n,which contains the position of the knots -c of s(x). -c n : integer, giving the total number of knots of s(x). -c c : array,length n, containing the b-spline coefficients. -c k : integer, giving the degree of s(x). -c a,b : real values, containing the end points of the integration -c interval. s(x) is considered to be identically zero outside -c the interval (t(k+1),t(n-k)). -c -c output parameter: -c aint : real, containing the integral of s(x) between a and b. -c wrk : real array, length n. used as working space -c on output, wrk will contain the integrals of the normalized -c b-splines defined on the set of knots. -c -c other subroutines required: fpintb. -c -c references : -c gaffney p.w. : the calculation of indefinite integrals of b-splines -c j. inst. maths applics 17 (1976) 37-41. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c ..scalar arguments.. - real*8 a,b - integer n,k -c ..array arguments.. - real*8 t(n),c(n),wrk(n) -c ..local scalars.. - integer i,nk1 -c .. - nk1 = n-k-1 -c calculate the integrals wrk(i) of the normalized b-splines -c ni,k+1(x), i=1,2,...nk1. - call fpintb(t,n,wrk,nk1,a,b) -c calculate the integral of s(x). - splint = 0.0d0 - do 10 i=1,nk1 - splint = splint+c(i)*wrk(i) - 10 continue - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/sproot.f b/scipy-0.10.1/scipy/interpolate/fitpack/sproot.f deleted file mode 100644 index da520cfe35..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/sproot.f +++ /dev/null @@ -1,183 +0,0 @@ - subroutine sproot(t,n,c,zero,mest,m,ier) -c subroutine sproot finds the zeros of a cubic spline s(x),which is -c given in its normalized b-spline representation. -c -c calling sequence: -c call sproot(t,n,c,zero,mest,m,ier) -c -c input parameters: -c t : real array,length n, containing the knots of s(x). -c n : integer, containing the number of knots. n>=8 -c c : real array,length n, containing the b-spline coefficients. -c mest : integer, specifying the dimension of array zero. -c -c output parameters: -c zero : real array,lenth mest, containing the zeros of s(x). -c m : integer,giving the number of zeros. -c ier : error flag: -c ier = 0: normal return. -c ier = 1: the number of zeros exceeds mest. -c ier =10: invalid input data (see restrictions). -c -c other subroutines required: fpcuro -c -c restrictions: -c 1) n>= 8. -c 2) t(4) < t(5) < ... < t(n-4) < t(n-3). -c t(1) <= t(2) <= t(3) <= t(4) -c t(n-3) <= t(n-2) <= t(n-1) <= t(n) -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c .. -c ..scalar arguments.. - integer n,mest,m,ier -c ..array arguments.. - real*8 t(n),c(n),zero(mest) -c ..local scalars.. - integer i,j,j1,l,n4 - real*8 ah,a0,a1,a2,a3,bh,b0,b1,c1,c2,c3,c4,c5,d4,d5,h1,h2, - * three,two,t1,t2,t3,t4,t5,zz - logical z0,z1,z2,z3,z4,nz0,nz1,nz2,nz3,nz4 -c ..local array.. - real*8 y(3) -c .. -c set some constants - two = 0.2d+01 - three = 0.3d+01 -c before starting computations a data check is made. if the input data -c are invalid, control is immediately repassed to the calling program. - n4 = n-4 - ier = 10 - if(n.lt.8) go to 800 - j = n - do 10 i=1,3 - if(t(i).gt.t(i+1)) go to 800 - if(t(j).lt.t(j-1)) go to 800 - j = j-1 - 10 continue - do 20 i=4,n4 - if(t(i).ge.t(i+1)) go to 800 - 20 continue -c the problem considered reduces to finding the zeros of the cubic -c polynomials pl(x) which define the cubic spline in each knot -c interval t(l)<=x<=t(l+1). a zero of pl(x) is also a zero of s(x) on -c the condition that it belongs to the knot interval. -c the cubic polynomial pl(x) is determined by computing s(t(l)), -c s'(t(l)),s(t(l+1)) and s'(t(l+1)). in fact we only have to compute -c s(t(l+1)) and s'(t(l+1)); because of the continuity conditions of -c splines and their derivatives, the value of s(t(l)) and s'(t(l)) -c is already known from the foregoing knot interval. - ier = 0 -c evaluate some constants for the first knot interval - h1 = t(4)-t(3) - h2 = t(5)-t(4) - t1 = t(4)-t(2) - t2 = t(5)-t(3) - t3 = t(6)-t(4) - t4 = t(5)-t(2) - t5 = t(6)-t(3) -c calculate a0 = s(t(4)) and ah = s'(t(4)). - c1 = c(1) - c2 = c(2) - c3 = c(3) - c4 = (c2-c1)/t4 - c5 = (c3-c2)/t5 - d4 = (h2*c1+t1*c2)/t4 - d5 = (t3*c2+h1*c3)/t5 - a0 = (h2*d4+h1*d5)/t2 - ah = three*(h2*c4+h1*c5)/t2 - z1 = .true. - if(ah.lt.0.0d0) z1 = .false. - nz1 = .not.z1 - m = 0 -c main loop for the different knot intervals. - do 300 l=4,n4 -c evaluate some constants for the knot interval t(l) <= x <= t(l+1). - h1 = h2 - h2 = t(l+2)-t(l+1) - t1 = t2 - t2 = t3 - t3 = t(l+3)-t(l+1) - t4 = t5 - t5 = t(l+3)-t(l) -c find a0 = s(t(l)), ah = s'(t(l)), b0 = s(t(l+1)) and bh = s'(t(l+1)). - c1 = c2 - c2 = c3 - c3 = c(l) - c4 = c5 - c5 = (c3-c2)/t5 - d4 = (h2*c1+t1*c2)/t4 - d5 = (h1*c3+t3*c2)/t5 - b0 = (h2*d4+h1*d5)/t2 - bh = three*(h2*c4+h1*c5)/t2 -c calculate the coefficients a0,a1,a2 and a3 of the cubic polynomial -c pl(x) = ql(y) = a0+a1*y+a2*y**2+a3*y**3 ; y = (x-t(l))/(t(l+1)-t(l)). - a1 = ah*h1 - b1 = bh*h1 - a2 = three*(b0-a0)-b1-two*a1 - a3 = two*(a0-b0)+b1+a1 -c test whether or not pl(x) could have a zero in the range -c t(l) <= x <= t(l+1). - z3 = .true. - if(b1.lt.0.0d0) z3 = .false. - nz3 = .not.z3 - if(a0*b0.le.0.0d0) go to 100 - z0 = .true. - if(a0.lt.0.0d0) z0 = .false. - nz0 = .not.z0 - z2 = .true. - if(a2.lt.0.) z2 = .false. - nz2 = .not.z2 - z4 = .true. - if(3.0d0*a3+a2.lt.0.0d0) z4 = .false. - nz4 = .not.z4 - if(.not.((z0.and.(nz1.and.(z3.or.z2.and.nz4).or.nz2.and. - * z3.and.z4).or.nz0.and.(z1.and.(nz3.or.nz2.and.z4).or.z2.and. - * nz3.and.nz4))))go to 200 -c find the zeros of ql(y). - 100 call fpcuro(a3,a2,a1,a0,y,j) - if(j.eq.0) go to 200 -c find which zeros of pl(x) are zeros of s(x). - do 150 i=1,j - if(y(i).lt.0.0d0 .or. y(i).gt.1.0d0) go to 150 -c test whether the number of zeros of s(x) exceeds mest. - if(m.ge.mest) go to 700 - m = m+1 - zero(m) = t(l)+h1*y(i) - 150 continue - 200 a0 = b0 - ah = bh - z1 = z3 - nz1 = nz3 - 300 continue -c the zeros of s(x) are arranged in increasing order. - if(m.lt.2) go to 800 - do 400 i=2,m - j = i - 350 j1 = j-1 - if(j1.eq.0) go to 400 - if(zero(j).ge.zero(j1)) go to 400 - zz = zero(j) - zero(j) = zero(j1) - zero(j1) = zz - j = j1 - go to 350 - 400 continue - j = m - m = 1 - do 500 i=2,j - if(zero(i).eq.zero(m)) go to 500 - m = m+1 - zero(m) = zero(i) - 500 continue - go to 800 - 700 ier = 1 - 800 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/surev.f b/scipy-0.10.1/scipy/interpolate/fitpack/surev.f deleted file mode 100644 index ac3543221d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/surev.f +++ /dev/null @@ -1,106 +0,0 @@ - subroutine surev(idim,tu,nu,tv,nv,c,u,mu,v,mv,f,mf,wrk,lwrk, - * iwrk,kwrk,ier) -c subroutine surev evaluates on a grid (u(i),v(j)),i=1,...,mu; j=1,... -c ,mv a bicubic spline surface of dimension idim, given in the -c b-spline representation. -c -c calling sequence: -c call surev(idim,tu,nu,tv,nv,c,u,mu,v,mv,f,mf,wrk,lwrk, -c * iwrk,kwrk,ier) -c -c input parameters: -c idim : integer, specifying the dimension of the spline surface. -c tu : real array, length nu, which contains the position of the -c knots in the u-direction. -c nu : integer, giving the total number of knots in the u-direction -c tv : real array, length nv, which contains the position of the -c knots in the v-direction. -c nv : integer, giving the total number of knots in the v-direction -c c : real array, length (nu-4)*(nv-4)*idim, which contains the -c b-spline coefficients. -c u : real array of dimension (mu). -c before entry u(i) must be set to the u co-ordinate of the -c i-th grid point along the u-axis. -c tu(4)<=u(i-1)<=u(i)<=tu(nu-3), i=2,...,mu. -c mu : on entry mu must specify the number of grid points along -c the u-axis. mu >=1. -c v : real array of dimension (mv). -c before entry v(j) must be set to the v co-ordinate of the -c j-th grid point along the v-axis. -c tv(4)<=v(j-1)<=v(j)<=tv(nv-3), j=2,...,mv. -c mv : on entry mv must specify the number of grid points along -c the v-axis. mv >=1. -c mf : on entry, mf must specify the dimension of the array f. -c mf >= mu*mv*idim -c wrk : real array of dimension lwrk. used as workspace. -c lwrk : integer, specifying the dimension of wrk. -c lwrk >= 4*(mu+mv) -c iwrk : integer array of dimension kwrk. used as workspace. -c kwrk : integer, specifying the dimension of iwrk. kwrk >= mu+mv. -c -c output parameters: -c f : real array of dimension (mf). -c on succesful exit f(mu*mv*(l-1)+mv*(i-1)+j) contains the -c l-th co-ordinate of the bicubic spline surface at the -c point (u(i),v(j)),l=1,...,idim,i=1,...,mu;j=1,...,mv. -c ier : integer error flag -c ier=0 : normal return -c ier=10: invalid input data (see restrictions) -c -c restrictions: -c mu >=1, mv >=1, lwrk>=4*(mu+mv), kwrk>=mu+mv , mf>=mu*mv*idim -c tu(4) <= u(i-1) <= u(i) <= tu(nu-3), i=2,...,mu -c tv(4) <= v(j-1) <= v(j) <= tv(nv-3), j=2,...,mv -c -c other subroutines required: -c fpsuev,fpbspl -c -c references : -c de boor c : on calculating with b-splines, j. approximation theory -c 6 (1972) 50-62. -c cox m.g. : the numerical evaluation of b-splines, j. inst. maths -c applics 10 (1972) 134-149. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author : -c p.dierckx -c dept. computer science, k.u.leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c latest update : march 1987 -c -c ..scalar arguments.. - integer idim,nu,nv,mu,mv,mf,lwrk,kwrk,ier -c ..array arguments.. - integer iwrk(kwrk) - real*8 tu(nu),tv(nv),c((nu-4)*(nv-4)*idim),u(mu),v(mv),f(mf), - * wrk(lwrk) -c ..local scalars.. - integer i,muv -c .. -c before starting computations a data check is made. if the input data -c are invalid control is immediately repassed to the calling program. - ier = 10 - if(mf.lt.mu*mv*idim) go to 100 - muv = mu+mv - if(lwrk.lt.4*muv) go to 100 - if(kwrk.lt.muv) go to 100 - if (mu.lt.1) go to 100 - if (mu.eq.1) go to 30 - go to 10 - 10 do 20 i=2,mu - if(u(i).lt.u(i-1)) go to 100 - 20 continue - 30 if (mv.lt.1) go to 100 - if (mv.eq.1) go to 60 - go to 40 - 40 do 50 i=2,mv - if(v(i).lt.v(i-1)) go to 100 - 50 continue - 60 ier = 0 - call fpsuev(idim,tu,nu,tv,nv,c,u,mu,v,mv,f,wrk(1),wrk(4*mu+1), - * iwrk(1),iwrk(mu+1)) - 100 return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack/surfit.f b/scipy-0.10.1/scipy/interpolate/fitpack/surfit.f deleted file mode 100644 index 73d3f18806..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack/surfit.f +++ /dev/null @@ -1,412 +0,0 @@ - subroutine surfit(iopt,m,x,y,z,w,xb,xe,yb,ye,kx,ky,s,nxest,nyest, - * nmax,eps,nx,tx,ny,ty,c,fp,wrk1,lwrk1,wrk2,lwrk2,iwrk,kwrk,ier) -c given the set of data points (x(i),y(i),z(i)) and the set of positive -c numbers w(i),i=1,...,m, subroutine surfit determines a smooth bivar- -c iate spline approximation s(x,y) of degrees kx and ky on the rect- -c angle xb <= x <= xe, yb <= y <= ye. -c if iopt = -1 surfit calculates the weighted least-squares spline -c according to a given set of knots. -c if iopt >= 0 the total numbers nx and ny of these knots and their -c position tx(j),j=1,...,nx and ty(j),j=1,...,ny are chosen automatic- -c ally by the routine. the smoothness of s(x,y) is then achieved by -c minimalizing the discontinuity jumps in the derivatives of s(x,y) -c across the boundaries of the subpanels (tx(i),tx(i+1))*(ty(j),ty(j+1). -c the amounth of smoothness is determined by the condition that f(p) = -c sum ((w(i)*(z(i)-s(x(i),y(i))))**2) be <= s, with s a given non-neg- -c ative constant, called the smoothing factor. -c the fit is given in the b-spline representation (b-spline coefficients -c c((ny-ky-1)*(i-1)+j),i=1,...,nx-kx-1;j=1,...,ny-ky-1) and can be eval- -c uated by means of subroutine bispev. -c -c calling sequence: -c call surfit(iopt,m,x,y,z,w,xb,xe,yb,ye,kx,ky,s,nxest,nyest, -c * nmax,eps,nx,tx,ny,ty,c,fp,wrk1,lwrk1,wrk2,lwrk2,iwrk,kwrk,ier) -c -c parameters: -c iopt : integer flag. on entry iopt must specify whether a weighted -c least-squares spline (iopt=-1) or a smoothing spline (iopt=0 -c or 1) must be determined. -c if iopt=0 the routine will start with an initial set of knots -c tx(i)=xb,tx(i+kx+1)=xe,i=1,...,kx+1;ty(i)=yb,ty(i+ky+1)=ye,i= -c 1,...,ky+1. if iopt=1 the routine will continue with the set -c of knots found at the last call of the routine. -c attention: a call with iopt=1 must always be immediately pre- -c ceded by another call with iopt=1 or iopt=0. -c unchanged on exit. -c m : integer. on entry m must specify the number of data points. -c m >= (kx+1)*(ky+1). unchanged on exit. -c x : real array of dimension at least (m). -c y : real array of dimension at least (m). -c z : real array of dimension at least (m). -c before entry, x(i),y(i),z(i) must be set to the co-ordinates -c of the i-th data point, for i=1,...,m. the order of the data -c points is immaterial. unchanged on exit. -c w : real array of dimension at least (m). before entry, w(i) must -c be set to the i-th value in the set of weights. the w(i) must -c be strictly positive. unchanged on exit. -c xb,xe : real values. on entry xb,xe,yb and ye must specify the bound- -c yb,ye aries of the rectangular approximation domain. -c xb<=x(i)<=xe,yb<=y(i)<=ye,i=1,...,m. unchanged on exit. -c kx,ky : integer values. on entry kx and ky must specify the degrees -c of the spline. 1<=kx,ky<=5. it is recommended to use bicubic -c (kx=ky=3) splines. unchanged on exit. -c s : real. on entry (in case iopt>=0) s must specify the smoothing -c factor. s >=0. unchanged on exit. -c for advice on the choice of s see further comments -c nxest : integer. unchanged on exit. -c nyest : integer. unchanged on exit. -c on entry, nxest and nyest must specify an upper bound for the -c number of knots required in the x- and y-directions respect. -c these numbers will also determine the storage space needed by -c the routine. nxest >= 2*(kx+1), nyest >= 2*(ky+1). -c in most practical situation nxest = kx+1+sqrt(m/2), nyest = -c ky+1+sqrt(m/2) will be sufficient. see also further comments. -c nmax : integer. on entry nmax must specify the actual dimension of -c the arrays tx and ty. nmax >= nxest, nmax >=nyest. -c unchanged on exit. -c eps : real. -c on entry, eps must specify a threshold for determining the -c effective rank of an over-determined linear system of equat- -c ions. 0 < eps < 1. if the number of decimal digits in the -c computer representation of a real number is q, then 10**(-q) -c is a suitable value for eps in most practical applications. -c unchanged on exit. -c nx : integer. -c unless ier=10 (in case iopt >=0), nx will contain the total -c number of knots with respect to the x-variable, of the spline -c approximation returned. if the computation mode iopt=1 is -c used, the value of nx should be left unchanged between sub- -c sequent calls. -c in case iopt=-1, the value of nx should be specified on entry -c tx : real array of dimension nmax. -c on succesful exit, this array will contain the knots of the -c spline with respect to the x-variable, i.e. the position of -c the interior knots tx(kx+2),...,tx(nx-kx-1) as well as the -c position of the additional knots tx(1)=...=tx(kx+1)=xb and -c tx(nx-kx)=...=tx(nx)=xe needed for the b-spline representat. -c if the computation mode iopt=1 is used, the values of tx(1), -c ...,tx(nx) should be left unchanged between subsequent calls. -c if the computation mode iopt=-1 is used, the values tx(kx+2), -c ...tx(nx-kx-1) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c ny : integer. -c unless ier=10 (in case iopt >=0), ny will contain the total -c number of knots with respect to the y-variable, of the spline -c approximation returned. if the computation mode iopt=1 is -c used, the value of ny should be left unchanged between sub- -c sequent calls. -c in case iopt=-1, the value of ny should be specified on entry -c ty : real array of dimension nmax. -c on succesful exit, this array will contain the knots of the -c spline with respect to the y-variable, i.e. the position of -c the interior knots ty(ky+2),...,ty(ny-ky-1) as well as the -c position of the additional knots ty(1)=...=ty(ky+1)=yb and -c ty(ny-ky)=...=ty(ny)=ye needed for the b-spline representat. -c if the computation mode iopt=1 is used, the values of ty(1), -c ...,ty(ny) should be left unchanged between subsequent calls. -c if the computation mode iopt=-1 is used, the values ty(ky+2), -c ...ty(ny-ky-1) must be supplied by the user, before entry. -c see also the restrictions (ier=10). -c c : real array of dimension at least (nxest-kx-1)*(nyest-ky-1). -c on succesful exit, c contains the coefficients of the spline -c approximation s(x,y) -c fp : real. unless ier=10, fp contains the weighted sum of -c squared residuals of the spline approximation returned. -c wrk1 : real array of dimension (lwrk1). used as workspace. -c if the computation mode iopt=1 is used the value of wrk1(1) -c should be left unchanged between subsequent calls. -c on exit wrk1(2),wrk1(3),...,wrk1(1+(nx-kx-1)*(ny-ky-1)) will -c contain the values d(i)/max(d(i)),i=1,...,(nx-kx-1)*(ny-ky-1) -c with d(i) the i-th diagonal element of the reduced triangular -c matrix for calculating the b-spline coefficients. it includes -c those elements whose square is less than eps,which are treat- -c ed as 0 in the case of presumed rank deficiency (ier<-2). -c lwrk1 : integer. on entry lwrk1 must specify the actual dimension of -c the array wrk1 as declared in the calling (sub)program. -c lwrk1 must not be too small. let -c u = nxest-kx-1, v = nyest-ky-1, km = max(kx,ky)+1, -c ne = max(nxest,nyest), bx = kx*v+ky+1, by = ky*u+kx+1, -c if(bx.le.by) b1 = bx, b2 = b1+v-ky -c if(bx.gt.by) b1 = by, b2 = b1+u-kx then -c lwrk1 >= u*v*(2+b1+b2)+2*(u+v+km*(m+ne)+ne-kx-ky)+b2+1 -c wrk2 : real array of dimension (lwrk2). used as workspace, but -c only in the case a rank deficient system is encountered. -c lwrk2 : integer. on entry lwrk2 must specify the actual dimension of -c the array wrk2 as declared in the calling (sub)program. -c lwrk2 > 0 . a save upper boundfor lwrk2 = u*v*(b2+1)+b2 -c where u,v and b2 are as above. if there are enough data -c points, scattered uniformly over the approximation domain -c and if the smoothing factor s is not too small, there is a -c good chance that this extra workspace is not needed. a lot -c of memory might therefore be saved by setting lwrk2=1. -c (see also ier > 10) -c iwrk : integer array of dimension (kwrk). used as workspace. -c kwrk : integer. on entry kwrk must specify the actual dimension of -c the array iwrk as declared in the calling (sub)program. -c kwrk >= m+(nxest-2*kx-1)*(nyest-2*ky-1). -c ier : integer. unless the routine detects an error, ier contains a -c non-positive value on exit, i.e. -c ier=0 : normal return. the spline returned has a residual sum of -c squares fp such that abs(fp-s)/s <= tol with tol a relat- -c ive tolerance set to 0.001 by the program. -c ier=-1 : normal return. the spline returned is an interpolating -c spline (fp=0). -c ier=-2 : normal return. the spline returned is the weighted least- -c squares polynomial of degrees kx and ky. in this extreme -c case fp gives the upper bound for the smoothing factor s. -c ier<-2 : warning. the coefficients of the spline returned have been -c computed as the minimal norm least-squares solution of a -c (numerically) rank deficient system. (-ier) gives the rank. -c especially if the rank deficiency which can be computed as -c (nx-kx-1)*(ny-ky-1)+ier, is large the results may be inac- -c curate. they could also seriously depend on the value of -c eps. -c ier=1 : error. the required storage space exceeds the available -c storage space, as specified by the parameters nxest and -c nyest. -c probably causes : nxest or nyest too small. if these param- -c eters are already large, it may also indicate that s is -c too small -c the approximation returned is the weighted least-squares -c spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=2 : error. a theoretically impossible result was found during -c the iteration proces for finding a smoothing spline with -c fp = s. probably causes : s too small or badly chosen eps. -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=3 : error. the maximal number of iterations maxit (set to 20 -c by the program) allowed for finding a smoothing spline -c with fp=s has been reached. probably causes : s too small -c there is an approximation returned but the corresponding -c weighted sum of squared residuals does not satisfy the -c condition abs(fp-s)/s < tol. -c ier=4 : error. no more knots can be added because the number of -c b-spline coefficients (nx-kx-1)*(ny-ky-1) already exceeds -c the number of data points m. -c probably causes : either s or m too small. -c the approximation returned is the weighted least-squares -c spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=5 : error. no more knots can be added because the additional -c knot would (quasi) coincide with an old one. -c probably causes : s too small or too large a weight to an -c inaccurate data point. -c the approximation returned is the weighted least-squares -c spline according to the current set of knots. -c the parameter fp gives the corresponding weighted sum of -c squared residuals (fp>s). -c ier=10 : error. on entry, the input data are controlled on validity -c the following restrictions must be satisfied. -c -1<=iopt<=1, 1<=kx,ky<=5, m>=(kx+1)*(ky+1), nxest>=2*kx+2, -c nyest>=2*ky+2, 0=nxest, nmax>=nyest, -c xb<=x(i)<=xe, yb<=y(i)<=ye, w(i)>0, i=1,...,m -c lwrk1 >= u*v*(2+b1+b2)+2*(u+v+km*(m+ne)+ne-kx-ky)+b2+1 -c kwrk >= m+(nxest-2*kx-1)*(nyest-2*ky-1) -c if iopt=-1: 2*kx+2<=nx<=nxest -c xb=0: s>=0 -c if one of these conditions is found to be violated,control -c is immediately repassed to the calling program. in that -c case there is no approximation returned. -c ier>10 : error. lwrk2 is too small, i.e. there is not enough work- -c space for computing the minimal least-squares solution of -c a rank deficient system of linear equations. ier gives the -c requested value for lwrk2. there is no approximation re- -c turned but, having saved the information contained in nx, -c ny,tx,ty,wrk1, and having adjusted the value of lwrk2 and -c the dimension of the array wrk2 accordingly, the user can -c continue at the point the program was left, by calling -c surfit with iopt=1. -c -c further comments: -c by means of the parameter s, the user can control the tradeoff -c between closeness of fit and smoothness of fit of the approximation. -c if s is too large, the spline will be too smooth and signal will be -c lost ; if s is too small the spline will pick up too much noise. in -c the extreme cases the program will return an interpolating spline if -c s=0 and the weighted least-squares polynomial (degrees kx,ky)if s is -c very large. between these extremes, a properly chosen s will result -c in a good compromise between closeness of fit and smoothness of fit. -c to decide whether an approximation, corresponding to a certain s is -c satisfactory the user is highly recommended to inspect the fits -c graphically. -c recommended values for s depend on the weights w(i). if these are -c taken as 1/d(i) with d(i) an estimate of the standard deviation of -c z(i), a good s-value should be found in the range (m-sqrt(2*m),m+ -c sqrt(2*m)). if nothing is known about the statistical error in z(i) -c each w(i) can be set equal to one and s determined by trial and -c error, taking account of the comments above. the best is then to -c start with a very large value of s ( to determine the least-squares -c polynomial and the corresponding upper bound fp0 for s) and then to -c progressively decrease the value of s ( say by a factor 10 in the -c beginning, i.e. s=fp0/10, fp0/100,...and more carefully as the -c approximation shows more detail) to obtain closer fits. -c to choose s very small is strongly discouraged. this considerably -c increases computation time and memory requirements. it may also -c cause rank-deficiency (ier<-2) and endager numerical stability. -c to economize the search for a good s-value the program provides with -c different modes of computation. at the first call of the routine, or -c whenever he wants to restart with the initial set of knots the user -c must set iopt=0. -c if iopt=1 the program will continue with the set of knots found at -c the last call of the routine. this will save a lot of computation -c time if surfit is called repeatedly for different values of s. -c the number of knots of the spline returned and their location will -c depend on the value of s and on the complexity of the shape of the -c function underlying the data. if the computation mode iopt=1 -c is used, the knots returned may also depend on the s-values at -c previous calls (if these were smaller). therefore, if after a number -c of trials with different s-values and iopt=1, the user can finally -c accept a fit as satisfactory, it may be worthwhile for him to call -c surfit once more with the selected value for s but now with iopt=0. -c indeed, surfit may then return an approximation of the same quality -c of fit but with fewer knots and therefore better if data reduction -c is also an important objective for the user. -c the number of knots may also depend on the upper bounds nxest and -c nyest. indeed, if at a certain stage in surfit the number of knots -c in one direction (say nx) has reached the value of its upper bound -c (nxest), then from that moment on all subsequent knots are added -c in the other (y) direction. this may indicate that the value of -c nxest is too small. on the other hand, it gives the user the option -c of limiting the number of knots the routine locates in any direction -c for example, by setting nxest=2*kx+2 (the lowest allowable value for -c nxest), the user can indicate that he wants an approximation which -c is a simple polynomial of degree kx in the variable x. -c -c other subroutines required: -c fpback,fpbspl,fpsurf,fpdisc,fpgivs,fprank,fprati,fprota,fporde -c -c references: -c dierckx p. : an algorithm for surface fitting with spline functions -c ima j. numer. anal. 1 (1981) 267-283. -c dierckx p. : an algorithm for surface fitting with spline functions -c report tw50, dept. computer science,k.u.leuven, 1980. -c dierckx p. : curve and surface fitting with splines, monographs on -c numerical analysis, oxford university press, 1993. -c -c author: -c p.dierckx -c dept. computer science, k.u. leuven -c celestijnenlaan 200a, b-3001 heverlee, belgium. -c e-mail : Paul.Dierckx@cs.kuleuven.ac.be -c -c creation date : may 1979 -c latest update : march 1987 -c -c .. -c ..scalar arguments.. - real*8 xb,xe,yb,ye,s,eps,fp - integer iopt,m,kx,ky,nxest,nyest,nmax,nx,ny,lwrk1,lwrk2,kwrk,ier -c ..array arguments.. - real*8 x(m),y(m),z(m),w(m),tx(nmax),ty(nmax), - * c((nxest-kx-1)*(nyest-ky-1)),wrk1(lwrk1),wrk2(lwrk2) - integer iwrk(kwrk) -c ..local scalars.. - real*8 tol - integer i,ib1,ib3,jb1,ki,kmax,km1,km2,kn,kwest,kx1,ky1,la,lbx, - * lby,lco,lf,lff,lfp,lh,lq,lsx,lsy,lwest,maxit,ncest,nest,nek, - * nminx,nminy,nmx,nmy,nreg,nrint,nxk,nyk -c ..function references.. - integer max0 -c ..subroutine references.. -c fpsurf -c .. -c we set up the parameters tol and maxit. - maxit = 20 - tol = 0.1e-02 -c before starting computations a data check is made. if the input data -c are invalid,control is immediately repassed to the calling program. - ier = 10 - if(eps.le.0. .or. eps.ge.1.) go to 71 - if(kx.le.0 .or. kx.gt.5) go to 71 - kx1 = kx+1 - if(ky.le.0 .or. ky.gt.5) go to 71 - ky1 = ky+1 - kmax = max0(kx,ky) - km1 = kmax+1 - km2 = km1+1 - if(iopt.lt.(-1) .or. iopt.gt.1) go to 71 - if(m.lt.(kx1*ky1)) go to 71 - nminx = 2*kx1 - if(nxest.lt.nminx .or. nxest.gt.nmax) go to 71 - nminy = 2*ky1 - if(nyest.lt.nminy .or. nyest.gt.nmax) go to 71 - nest = max0(nxest,nyest) - nxk = nxest-kx1 - nyk = nyest-ky1 - ncest = nxk*nyk - nmx = nxest-nminx+1 - nmy = nyest-nminy+1 - nrint = nmx+nmy - nreg = nmx*nmy - ib1 = kx*nyk+ky1 - jb1 = ky*nxk+kx1 - ib3 = kx1*nyk+1 - if(ib1.le.jb1) go to 10 - ib1 = jb1 - ib3 = ky1*nxk+1 - 10 lwest = ncest*(2+ib1+ib3)+2*(nrint+nest*km2+m*km1)+ib3 - kwest = m+nreg - if(lwrk1.lt.lwest .or. kwrk.lt.kwest) go to 71 - if(xb.ge.xe .or. yb.ge.ye) go to 71 - do 20 i=1,m - if(w(i).le.0.) go to 70 - if(x(i).lt.xb .or. x(i).gt.xe) go to 71 - if(y(i).lt.yb .or. y(i).gt.ye) go to 71 - 20 continue - if(iopt.ge.0) go to 50 - if(nx.lt.nminx .or. nx.gt.nxest) go to 71 - nxk = nx-kx1 - tx(kx1) = xb - tx(nxk+1) = xe - do 30 i=kx1,nxk - if(tx(i+1).le.tx(i)) go to 72 - 30 continue - if(ny.lt.nminy .or. ny.gt.nyest) go to 71 - nyk = ny-ky1 - ty(ky1) = yb - ty(nyk+1) = ye - do 40 i=ky1,nyk - if(ty(i+1).le.ty(i)) go to 73 - 40 continue - go to 60 - 50 if(s.lt.0.) go to 71 - 60 ier = 0 -c we partition the working space and determine the spline approximation - kn = 1 - ki = kn+m - lq = 2 - la = lq+ncest*ib3 - lf = la+ncest*ib1 - lff = lf+ncest - lfp = lff+ncest - lco = lfp+nrint - lh = lco+nrint - lbx = lh+ib3 - nek = nest*km2 - lby = lbx+nek - lsx = lby+nek - lsy = lsx+m*km1 - call fpsurf(iopt,m,x,y,z,w,xb,xe,yb,ye,kx,ky,s,nxest,nyest, - * eps,tol,maxit,nest,km1,km2,ib1,ib3,ncest,nrint,nreg,nx,tx, - * ny,ty,c,fp,wrk1(1),wrk1(lfp),wrk1(lco),wrk1(lf),wrk1(lff), - * wrk1(la),wrk1(lq),wrk1(lbx),wrk1(lby),wrk1(lsx),wrk1(lsy), - * wrk1(lh),iwrk(ki),iwrk(kn),wrk2,lwrk2,ier) - 70 return - 71 print*,"iopt,kx,ky,m=",iopt,kx,ky,m - print*,"nxest,nyest,nmax=",nxest,nyest,nmax - print*,"lwrk1,lwrk2,kwrk=",lwrk1,lwrk2,kwrk - print*,"xb,xe,yb,ye=",xb,xe,yb,ye - print*,"eps,s",eps,s - return - 72 print*,"tx=",tx - return - 73 print*,"ty=",ty - return - end diff --git a/scipy-0.10.1/scipy/interpolate/fitpack2.py b/scipy-0.10.1/scipy/interpolate/fitpack2.py deleted file mode 100644 index 9f23ea78e2..0000000000 --- a/scipy-0.10.1/scipy/interpolate/fitpack2.py +++ /dev/null @@ -1,737 +0,0 @@ -""" -fitpack --- curve and surface fitting with splines - -fitpack is based on a collection of Fortran routines DIERCKX -by P. Dierckx (see http://www.netlib.org/dierckx/) transformed -to double routines by Pearu Peterson. -""" -# Created by Pearu Peterson, June,August 2003 - -__all__ = [ - 'UnivariateSpline', - 'InterpolatedUnivariateSpline', - 'LSQUnivariateSpline', - - 'BivariateSpline', - 'LSQBivariateSpline', - 'SmoothBivariateSpline', - 'RectBivariateSpline'] - -import warnings -from numpy import zeros, concatenate, alltrue, ravel, all, diff, array -import numpy as np - -import fitpack -import dfitpack - -################ Univariate spline #################### - -_curfit_messages = {1:""" -The required storage space exceeds the available storage space, as -specified by the parameter nest: nest too small. If nest is already -large (say nest > m/2), it may also indicate that s is too small. -The approximation returned is the weighted least-squares spline -according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp -gives the corresponding weighted sum of squared residuals (fp>s). -""", - 2:""" -A theoretically impossible result was found during the iteration -proces for finding a smoothing spline with fp = s: s too small. -There is an approximation returned but the corresponding weighted sum -of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""", - 3:""" -The maximal number of iterations maxit (set to 20 by the program) -allowed for finding a smoothing spline with fp=s has been reached: s -too small. -There is an approximation returned but the corresponding weighted sum -of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""", - 10:""" -Error on entry, no approximation returned. The following conditions -must hold: -xb<=x[0]0, i=0..m-1 -if iopt=-1: - xb>> from numpy import linspace,exp - >>> from numpy.random import randn - >>> from scipy.interpolate import UnivariateSpline - >>> x = linspace(-3, 3, 100) - >>> y = exp(-x**2) + randn(100)/10 - >>> s = UnivariateSpline(x, y, s=1) - >>> xs = linspace(-3, 3, 1000) - >>> ys = s(xs) - - xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y. - - """ - - def __init__(self, x, y, w=None, bbox = [None]*2, k=3, s=None): - """ - Input: - x,y - 1-d sequences of data points (x must be - in strictly ascending order) - - Optional input: - w - positive 1-d sequence of weights - bbox - 2-sequence specifying the boundary of - the approximation interval. - By default, bbox=[x[0],x[-1]] - k=3 - degree of the univariate spline. - s - positive smoothing factor defined for - estimation condition: - sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s - Default s=len(w) which should be a good value - if 1/w[i] is an estimate of the standard - deviation of y[i]. - """ - #_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier - data = dfitpack.fpcurf0(x,y,k,w=w, - xb=bbox[0],xe=bbox[1],s=s) - if data[-1]==1: - # nest too small, setting to maximum bound - data = self._reset_nest(data) - self._data = data - self._reset_class() - - def _reset_class(self): - data = self._data - n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1] - self._eval_args = t[:n],c[:n],k - if ier==0: - # the spline returned has a residual sum of squares fp - # such that abs(fp-s)/s <= tol with tol a relative - # tolerance set to 0.001 by the program - pass - elif ier==-1: - # the spline returned is an interpolating spline - self._set_class(InterpolatedUnivariateSpline) - elif ier==-2: - # the spline returned is the weighted least-squares - # polynomial of degree k. In this extreme case fp gives - # the upper bound fp0 for the smoothing factor s. - self._set_class(LSQUnivariateSpline) - else: - # error - if ier==1: - self._set_class(LSQUnivariateSpline) - message = _curfit_messages.get(ier,'ier=%s' % (ier)) - warnings.warn(message) - - def _set_class(self, cls): - self._spline_class = cls - if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline, - LSQUnivariateSpline): - self.__class__ = cls - else: - # It's an unknown subclass -- don't change class. cf. #731 - pass - - def _reset_nest(self, data, nest=None): - n = data[10] - if nest is None: - k,m = data[5],len(data[0]) - nest = m+k+1 # this is the maximum bound for nest - else: - if not n <= nest: - raise ValueError("`nest` can only be increased") - t, c, fpint, nrdata = [np.resize(data[n], nest) for n in [8,9,11,12]] - - args = data[:8] + (t,c,n,fpint,nrdata,data[13]) - data = dfitpack.fpcurf1(*args) - return data - - def set_smoothing_factor(self, s): - """ Continue spline computation with the given smoothing - factor s and with the knots found at the last call. - - """ - data = self._data - if data[6]==-1: - warnings.warn('smoothing factor unchanged for' - 'LSQ spline with fixed knots') - return - args = data[:6] + (s,) + data[7:] - data = dfitpack.fpcurf1(*args) - if data[-1]==1: - # nest too small, setting to maximum bound - data = self._reset_nest(data) - self._data = data - self._reset_class() - - def __call__(self, x, nu=0): - """ Evaluate spline (or its nu-th derivative) at positions x. - Note: x can be unordered but the evaluation is more efficient - if x is (partially) ordered. - - """ - x = np.asarray(x) - # empty input yields empty output - if x.size == 0: - return array([]) -# if nu is None: -# return dfitpack.splev(*(self._eval_args+(x,))) -# return dfitpack.splder(nu=nu,*(self._eval_args+(x,))) - return fitpack.splev(x, self._eval_args, der=nu) - - def get_knots(self): - """ Return the positions of (boundary and interior) - knots of the spline. - """ - data = self._data - k,n = data[5],data[7] - return data[8][k:n-k] - - def get_coeffs(self): - """Return spline coefficients.""" - data = self._data - k,n = data[5],data[7] - return data[9][:n-k-1] - - def get_residual(self): - """Return weighted sum of squared residuals of the spline - approximation: sum ((w[i]*(y[i]-s(x[i])))**2,axis=0) - - """ - return self._data[10] - - def integral(self, a, b): - """ Return definite integral of the spline between two - given points. - """ - return dfitpack.splint(*(self._eval_args+(a,b))) - - def derivatives(self, x): - """ Return all derivatives of the spline at the point x.""" - d,ier = dfitpack.spalde(*(self._eval_args+(x,))) - if not ier == 0: - raise ValueError("Error code returned by spalde: %s" % ier) - return d - - def roots(self): - """ Return the zeros of the spline. - - Restriction: only cubic splines are supported by fitpack. - """ - k = self._data[5] - if k==3: - z,m,ier = dfitpack.sproot(*self._eval_args[:2]) - if not ier == 0: - raise ValueError("Error code returned by spalde: %s" % ier) - return z[:m] - raise NotImplementedError('finding roots unsupported for ' - 'non-cubic splines') - -class InterpolatedUnivariateSpline(UnivariateSpline): - """ - One-dimensional interpolating spline for a given set of data points. - - Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. Spline - function passes through all provided points. Equivalent to - `UnivariateSpline` with s=0. - - Parameters - ---------- - x : array_like - input dimension of data points -- must be increasing - y : array_like - input dimension of data points - w : array_like, optional - Weights for spline fitting. Must be positive. If None (default), - weights are all equal. - bbox : array_like, optional - 2-sequence specifying the boundary of the approximation interval. If - None (default), bbox=[x[0],x[-1]]. - k : int, optional - Degree of the smoothing spline. Must be <= 5. - - See Also - -------- - UnivariateSpline : Superclass -- allows knots to be selected by a - smoothing condition - LSQUnivariateSpline : spline for which knots are user-selected - splrep : An older, non object-oriented wrapping of FITPACK - splev, sproot, splint, spalde - BivariateSpline : A similar class for two-dimensional spline interpolation - - Notes - ----- - The number of data points must be larger than the spline degree `k`. - - Examples - -------- - >>> from numpy import linspace,exp - >>> from numpy.random import randn - >>> from scipy.interpolate import UnivariateSpline - >>> x = linspace(-3, 3, 100) - >>> y = exp(-x**2) + randn(100)/10 - >>> s = UnivariateSpline(x, y, s=1) - >>> xs = linspace(-3, 3, 1000) - >>> ys = s(xs) - - xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y - - """ - - def __init__(self, x, y, w=None, bbox = [None]*2, k=3): - """ - Input: - x,y - 1-d sequences of data points (x must be - in strictly ascending order) - - Optional input: - w - positive 1-d sequence of weights - bbox - 2-sequence specifying the boundary of - the approximation interval. - By default, bbox=[x[0],x[-1]] - k=3 - degree of the univariate spline. - """ - #_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier - self._data = dfitpack.fpcurf0(x,y,k,w=w, - xb=bbox[0],xe=bbox[1],s=0) - self._reset_class() - -class LSQUnivariateSpline(UnivariateSpline): - """ - One-dimensional spline with explicit internal knots. - - Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `t` - specifies the internal knots of the spline - - Parameters - ---------- - x : array_like - input dimension of data points -- must be increasing - y : array_like - input dimension of data points - t: array_like - interior knots of the spline. Must be in ascending order - and bbox[0]>> from numpy import linspace,exp - >>> from numpy.random import randn - >>> from scipy.interpolate import LSQUnivariateSpline - >>> x = linspace(-3,3,100) - >>> y = exp(-x**2) + randn(100)/10 - >>> t = [-1,0,1] - >>> s = LSQUnivariateSpline(x,y,t) - >>> xs = linspace(-3,3,1000) - >>> ys = s(xs) - - xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y - with knots [-3,-1,0,1,3] - - """ - - def __init__(self, x, y, t, w=None, bbox = [None]*2, k=3): - """ - Input: - x,y - 1-d sequences of data points (x must be - in strictly ascending order) - t - 1-d sequence of the positions of user-defined - interior knots of the spline (t must be in strictly - ascending order and bbox[0] 0,axis=0): - raise ValueError('Interior knots t must satisfy ' - 'Schoenberg-Whitney conditions') - data = dfitpack.fpcurfm1(x,y,k,t,w=w,xb=xb,xe=xe) - self._data = data[:-3] + (None,None,data[-1]) - self._reset_class() - - -################ Bivariate spline #################### - -_surfit_messages = {1:""" -The required storage space exceeds the available storage space: nxest -or nyest too small, or s too small. -The weighted least-squares spline corresponds to the current set of -knots.""", - 2:""" -A theoretically impossible result was found during the iteration -process for finding a smoothing spline with fp = s: s too small or -badly chosen eps. -Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""", - 3:""" -the maximal number of iterations maxit (set to 20 by the program) -allowed for finding a smoothing spline with fp=s has been reached: -s too small. -Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""", - 4:""" -No more knots can be added because the number of b-spline coefficients -(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m: -either s or m too small. -The weighted least-squares spline corresponds to the current set of -knots.""", - 5:""" -No more knots can be added because the additional knot would (quasi) -coincide with an old one: s too small or too large a weight to an -inaccurate data point. -The weighted least-squares spline corresponds to the current set of -knots.""", - 10:""" -Error on entry, no approximation returned. The following conditions -must hold: -xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1 -If iopt==-1, then - xb10: - tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\ - xb,xe,yb,ye,\ - kx,ky,eps,lwrk2=ier) - if ier in [0,-1,-2]: # normal return - pass - else: - if ier<-2: - deficiency = (nx-kx-1)*(ny-ky-1)+ier - message = _surfit_messages.get(-3) % (deficiency) - else: - message = _surfit_messages.get(ier,'ier=%s' % (ier)) - warnings.warn(message) - self.fp = fp - self.tck = tx1,ty1,c - self.degrees = kx,ky - -class RectBivariateSpline(BivariateSpline): - """ Bivariate spline approximation over a rectangular mesh. - - Can be used for both smoothing and interpolating data. - - Parameters - ---------- - x,y : array_like - 1-D arrays of coordinates in strictly ascending order. - z : array_like - 2-D array of data with shape (x.size,y.size). - bbox : array_like, optional - Sequence of length 4 specifying the boundary of the rectangular - approximation domain. By default, - ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``. - kx, ky : ints, optional - Degrees of the bivariate spline. Default is 3. - s : float, optional - Positive smoothing factor defined for estimation condition: - ``sum((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0) <= s`` - Default is s=0, which is for interpolation. - - See Also - -------- - SmoothBivariateSpline : a smoothing bivariate spline for scattered data - bisplrep, bisplev : an older wrapping of FITPACK - UnivariateSpline : a similar class for univariate spline interpolation - """ - - def __init__(self, x, y, z, bbox = [None]*4, kx=3, ky=3, s=0): - x,y = ravel(x),ravel(y) - if not all(diff(x) > 0.0): - raise TypeError('x must be strictly increasing') - if not all(diff(y) > 0.0): - raise TypeError('y must be strictly increasing') - if not ((x.min() == x[0]) and (x.max() == x[-1])): - raise TypeError('x must be strictly ascending') - if not ((y.min() == y[0]) and (y.max() == y[-1])): - raise TypeError('y must be strictly ascending') - if not x.size == z.shape[0]: - raise TypeError('x dimension of z must have same number of ' - 'elements as x') - if not y.size == z.shape[1]: - raise TypeError('y dimension of z must have same number of ' - 'elements as y') - z = ravel(z) - xb,xe,yb,ye = bbox - nx,tx,ny,ty,c,fp,ier = dfitpack.regrid_smth(x,y,z, - xb,xe,yb,ye, - kx,ky,s) - if ier in [0,-1,-2]: # normal return - pass - else: - message = _surfit_messages.get(ier,'ier=%s' % (ier)) - warnings.warn(message) - - self.fp = fp - self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)] - self.degrees = kx,ky diff --git a/scipy-0.10.1/scipy/interpolate/generate_interpnd.py b/scipy-0.10.1/scipy/interpolate/generate_interpnd.py deleted file mode 100755 index 39e40218d5..0000000000 --- a/scipy-0.10.1/scipy/interpolate/generate_interpnd.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python -import tempfile -import subprocess -import os -import sys -import re -import shutil - -from mako.template import Template - -f = open('interpnd.pyx', 'r') -template = f.read() -f.close() - -tmp_dir = tempfile.mkdtemp() -try: - # Run templating engine - fn = os.path.join(tmp_dir, 'interpnd.pyx') - f = open(fn, 'w') - f.write(Template(template).render()) - f.close() - - # Run Cython - dst_fn = os.path.join(tmp_dir, 'interpnd.c') - ret = subprocess.call(['cython', '-I', '../..', '-o', dst_fn, fn]) - if ret != 0: - sys.exit(ret) - - # Strip comments - f = open(dst_fn, 'r') - text = f.read() - f.close() - - r = re.compile(r'/\*(.*?)\*/', re.S) - - text = r.sub('', text) - f = open('interpnd.c', 'w') - f.write(text) - f.close() -finally: - shutil.rmtree(tmp_dir) diff --git a/scipy-0.10.1/scipy/interpolate/interpnd.c b/scipy-0.10.1/scipy/interpolate/interpnd.c deleted file mode 100644 index 8c8756c08d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/interpnd.c +++ /dev/null @@ -1,10048 +0,0 @@ - - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#else - -#include -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__interpnd -#define __PYX_HAVE_API__interpnd -#include "stdio.h" -#include "stdlib.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#include "numpy/ndarrayobject.h" -#include "math.h" -#ifdef _OPENMP -#include -#endif - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - - -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - - -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - - - - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else - #define likely(x) (x) - #define unlikely(x) (x) - #endif -#else - #define likely(x) (x) - #define unlikely(x) (x) -#endif - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - -static const char *__pyx_f[] = { - "interpnd.pyx", - "numpy.pxd", -}; - - -typedef npy_int8 __pyx_t_5numpy_int8_t; - - -typedef npy_int16 __pyx_t_5numpy_int16_t; - - -typedef npy_int32 __pyx_t_5numpy_int32_t; - - -typedef npy_int64 __pyx_t_5numpy_int64_t; - - -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - - -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - - -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - - -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - - -typedef npy_float32 __pyx_t_5numpy_float32_t; - - -typedef npy_float64 __pyx_t_5numpy_float64_t; - - -typedef npy_long __pyx_t_5numpy_int_t; - - -typedef npy_longlong __pyx_t_5numpy_long_t; - - -typedef npy_longlong __pyx_t_5numpy_longlong_t; - - -typedef npy_ulong __pyx_t_5numpy_uint_t; - - -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - - -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - - -typedef npy_intp __pyx_t_5numpy_intp_t; - - -typedef npy_uintp __pyx_t_5numpy_uintp_t; - - -typedef npy_double __pyx_t_5numpy_float_t; - - -typedef npy_double __pyx_t_5numpy_double_t; - - -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif - - - - -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - - -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - - -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - - -typedef npy_cdouble __pyx_t_5numpy_complex_t; -struct __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t; -typedef struct __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t; -struct __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t; -typedef struct __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t; - - -struct __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t { - int ndim; - int npoints; - int nsimplex; - double *points; - int *vertices; - int *neighbors; - double *equations; - double *transform; - int *vertex_to_simplex; - double paraboloid_scale; - double paraboloid_shift; - double *max_bound; - double *min_bound; -}; - - -struct __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t { - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *info; - int index; - int vertex; - int vertex2; - int triangle; - int start_triangle; - int start_index; - int restart; -}; - - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); - - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} - - -#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_List_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Tuple_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - - -#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { - PyObject *r; - if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) { - r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) { - r = PySequence_GetItem(o, i); - } - else { - r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); - } - return r; -} - -static double __Pyx__PyObject_AsDouble(PyObject* obj); - -#define __Pyx_PyObject_AsDouble(obj) \ - ((likely(PyFloat_CheckExact(obj))) ? \ - PyFloat_AS_DOUBLE(obj) : __Pyx__PyObject_AsDouble(obj)) - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact); - - -struct __Pyx_StructField_; - -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - char typegroup; -} __Pyx_TypeInfo; - -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; - -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; - - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -static void __Pyx_RaiseBufferFallbackError(void); -#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) -#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2) - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq(a, b) ((a)==(b)) - #define __Pyx_c_sum(a, b) ((a)+(b)) - #define __Pyx_c_diff(a, b) ((a)-(b)) - #define __Pyx_c_prod(a, b) ((a)*(b)) - #define __Pyx_c_quot(a, b) ((a)/(b)) - #define __Pyx_c_neg(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero(z) ((z)==(double)0) - #define __Pyx_c_conj(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs(z) (::std::abs(z)) - #define __Pyx_c_pow(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero(z) ((z)==0) - #define __Pyx_c_conj(z) (conj(z)) - #if 1 - #define __Pyx_c_abs(z) (cabs(z)) - #define __Pyx_c_pow(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); -static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else -#define __Pyx_GetBuffer PyObject_GetBuffer -#define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - -Py_ssize_t __Pyx_zeros[] = {0, 0, 0}; -Py_ssize_t __Pyx_minusones[] = {-1, -1, -1}; - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); - -static PyObject *__Pyx_FindPy2Metaclass(PyObject *bases); - -static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, - PyObject *modname); - -#define __pyx_binding_PyCFunctionType_USED 1 - -typedef struct { - PyCFunctionObject func; -} __pyx_binding_PyCFunctionType_object; - -static PyTypeObject __pyx_binding_PyCFunctionType_type; -static PyTypeObject *__pyx_binding_PyCFunctionType = NULL; - -static PyObject *__pyx_binding_PyCFunctionType_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module); -#define __pyx_binding_PyCFunctionType_New(ml, self) __pyx_binding_PyCFunctionType_NewEx(ml, self, NULL) - -static int __pyx_binding_PyCFunctionType_init(void); - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); - -#ifndef __PYX_FORCE_INIT_THREADS - #if PY_VERSION_HEX < 0x02040200 - #define __PYX_FORCE_INIT_THREADS 1 - #else - #define __PYX_FORCE_INIT_THREADS 0 - #endif -#endif - -static __pyx_t_double_complex __Pyx_PyComplex_As___pyx_t_double_complex(PyObject*); - -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eqf(a, b) ((a)==(b)) - #define __Pyx_c_sumf(a, b) ((a)+(b)) - #define __Pyx_c_difff(a, b) ((a)-(b)) - #define __Pyx_c_prodf(a, b) ((a)*(b)) - #define __Pyx_c_quotf(a, b) ((a)/(b)) - #define __Pyx_c_negf(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zerof(z) ((z)==(float)0) - #define __Pyx_c_conjf(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_absf(z) (::std::abs(z)) - #define __Pyx_c_powf(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zerof(z) ((z)==0) - #define __Pyx_c_conjf(z) (conjf(z)) - #if 1 - #define __Pyx_c_absf(z) (cabsf(z)) - #define __Pyx_c_powf(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static int __Pyx_check_binary_version(void); - -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); - -static PyObject *__Pyx_ImportModule(const char *name); - -static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - - - - - - - - - - - - - - -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); - - -static void (*__pyx_f_5scipy_7spatial_5qhull__get_delaunay_info)(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, PyObject *, int, int); -static int (*__pyx_f_5scipy_7spatial_5qhull__barycentric_inside)(int, double *, double *, double *, double); -static void (*__pyx_f_5scipy_7spatial_5qhull__barycentric_coordinate_single)(int, double *, double *, double *, int); -static void (*__pyx_f_5scipy_7spatial_5qhull__barycentric_coordinates)(int, double *, double *, double *); -static void (*__pyx_f_5scipy_7spatial_5qhull__lift_point)(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *); -static double (*__pyx_f_5scipy_7spatial_5qhull__distplane)(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int, double *); -static int (*__pyx_f_5scipy_7spatial_5qhull__is_point_fully_outside)(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double); -static int (*__pyx_f_5scipy_7spatial_5qhull__find_simplex_bruteforce)(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, double); -static int (*__pyx_f_5scipy_7spatial_5qhull__find_simplex_directed)(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, int *, double); -static int (*__pyx_f_5scipy_7spatial_5qhull__find_simplex)(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, int *, double); -static void (*__pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_init)(__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *, __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int); -static void (*__pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_next)(__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *); - - - - - - -static int __pyx_f_8interpnd__estimate_gradients_2d_global(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, int, double, double *); -static double __pyx_f_8interpnd__clough_tocher_2d_single_double(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int, double *, double *, double *); -static __pyx_t_double_complex __pyx_f_8interpnd__clough_tocher_2d_single_complex(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int, double *, __pyx_t_double_complex *, __pyx_t_double_complex *); -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_double_t = { "double_t", NULL, sizeof(__pyx_t_5numpy_double_t), 'R' }; -static __Pyx_TypeInfo __Pyx_TypeInfo_nn_npy_int = { "npy_int", NULL, sizeof(npy_int), 'I' }; -static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), 'R' }; -static __Pyx_StructField __Pyx_StructFields_nn___pyx_t_5numpy_complex_t[] = { - {&__Pyx_TypeInfo_double, "real", offsetof(__pyx_t_5numpy_complex_t, real)}, - {&__Pyx_TypeInfo_double, "imag", offsetof(__pyx_t_5numpy_complex_t, imag)}, - {NULL, NULL, 0} -}; -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_complex_t = { "complex_t", __Pyx_StructFields_nn___pyx_t_5numpy_complex_t, sizeof(__pyx_t_5numpy_complex_t), 'C' }; -#define __Pyx_MODULE_NAME "interpnd" -int __pyx_module_is_main_interpnd = 0; - - -static PyObject *__pyx_builtin_object; -static PyObject *__pyx_builtin_Warning; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_xrange; -static PyObject *__pyx_builtin_enumerate; -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_RuntimeError; -static char __pyx_k_2[] = "_ndim_coords_from_arrays"; -static char __pyx_k_5[] = "different number of values and points"; -static char __pyx_k_7[] = "invalid shape for input data points"; -static char __pyx_k_9[] = "input data must be at least 2-D"; -static char __pyx_k_11[] = "this mode of interpolation available only for %d-D data"; -static char __pyx_k_12[] = "number of dimensions in xi does not match x"; -static char __pyx_k_14[] = "coordinate arrays do not have the same shape"; -static char __pyx_k_19[] = "'y' has a wrong number of items"; -static char __pyx_k_21[] = "estimate_gradients_2d_global"; -static char __pyx_k_24[] = "Gradient estimation did not converge, the results may be inaccurate"; -static char __pyx_k_25[] = "GradientEstimationWarning"; -static char __pyx_k_30[] = "ndarray is not C contiguous"; -static char __pyx_k_32[] = "ndarray is not Fortran contiguous"; -static char __pyx_k_34[] = "Non-native byte order not supported"; -static char __pyx_k_36[] = "unknown dtype code in numpy.pxd (%d)"; -static char __pyx_k_37[] = "Format string allocated too short, see comment in numpy.pxd"; -static char __pyx_k_40[] = "Format string allocated too short."; -static char __pyx_k_42[] = "\nSimple N-D interpolation\n\n.. versionadded:: 0.9\n\n"; -static char __pyx_k_43[] = "scipy.spatial.qhull"; -static char __pyx_k_44[] = "*"; -static char __pyx_k_45[] = "\n Common routines for interpolators.\n\n .. versionadded:: 0.9\n\n "; -static char __pyx_k_46[] = "\n LinearNDInterpolator(points, values)\n\n Piecewise linear interpolant in N dimensions.\n\n .. versionadded:: 0.9\n\n Parameters\n ----------\n points : ndarray of floats, shape (npoints, ndims)\n Data point coordinates.\n values : ndarray of float or complex, shape (npoints, ...)\n Data values.\n fill_value : float, optional\n Value used to fill in for requested points outside of the\n convex hull of the input points. If not provided, then\n the default is ``nan``.\n\n Notes\n -----\n The interpolant is constructed by triangulating the input data\n with Qhull [Qhull]_, and on each triangle performing linear\n barycentric interpolation.\n\n References\n ----------\n .. [Qhull] http://www.qhull.org/\n\n "; -static char __pyx_k_47[] = "LinearNDInterpolator"; -static char __pyx_k_48[] = "\n CloughTocher2DInterpolator(points, values, tol=1e-6)\n\n Piecewise cubic, C1 smooth, curvature-minimizing interpolant in 2D.\n\n .. versionadded:: 0.9\n\n Parameters\n ----------\n points : ndarray of floats, shape (npoints, ndims)\n Data point coordinates.\n values : ndarray of float or complex, shape (npoints, ...)\n Data values.\n fill_value : float, optional\n Value used to fill in for requested points outside of the\n convex hull of the input points. If not provided, then\n the default is ``nan``.\n tol : float, optional\n Absolute/relative tolerance for gradient estimation.\n maxiter : int, optional\n Maximum number of iterations in gradient estimation.\n\n Notes\n -----\n The interpolant is constructed by triangulating the input data\n with Qhull [Qhull]_, and constructing a piecewise cubic\n interpolating Bezier polynomial on each triangle, using a\n Clough-Tocher scheme [CT]_. The interpolant is guaranteed to be\n continuously differentiable.\n\n The gradients of the interpolant are chosen so that the curvature\n of the interpolating surface is approximatively minimized. The\n gradients necessary for this are estimated using the global\n algorithm described in [Nielson83,Renka84]_.\n\n References\n ----------\n\n .. [Qhull] http://www.qhull.org/\n\n .. [CT] See, for example,\n P. Alfeld,\n ''A trivariate Clough-Tocher scheme for tetrahedral data''.\n Computer Aided Geometric Design, 1, 169 (1984);\n G. Farin,\n ''Triangular Bernstein-Bezier patches''.\n Computer Aided Geometric Design, 3, 83 (1986).\n\n .. [Nielson83] G. Nielson,\n ''A method for interpolating scattered data based upon a minimum norm\n network''.\n Math. Comp., 40, 253 (1983).\n\n .. [Renka84] R. J. Renka and A. K. Cline.\n ''A Triangle-based C1 interpolation method.'',\n Rocky Mountain J. Mat""h., 14, 223 (1984).\n\n "; -static char __pyx_k_49[] = "CloughTocher2DInterpolator"; -static char __pyx_k__B[] = "B"; -static char __pyx_k__H[] = "H"; -static char __pyx_k__I[] = "I"; -static char __pyx_k__L[] = "L"; -static char __pyx_k__O[] = "O"; -static char __pyx_k__Q[] = "Q"; -static char __pyx_k__T[] = "T"; -static char __pyx_k__b[] = "b"; -static char __pyx_k__d[] = "d"; -static char __pyx_k__f[] = "f"; -static char __pyx_k__g[] = "g"; -static char __pyx_k__h[] = "h"; -static char __pyx_k__i[] = "i"; -static char __pyx_k__l[] = "l"; -static char __pyx_k__q[] = "q"; -static char __pyx_k__y[] = "y"; -static char __pyx_k__Zd[] = "Zd"; -static char __pyx_k__Zf[] = "Zf"; -static char __pyx_k__Zg[] = "Zg"; -static char __pyx_k__np[] = "np"; -static char __pyx_k__xi[] = "xi"; -static char __pyx_k__eps[] = "eps"; -static char __pyx_k__nan[] = "nan"; -static char __pyx_k__tol[] = "tol"; -static char __pyx_k__tri[] = "tri"; -static char __pyx_k__grad[] = "grad"; -static char __pyx_k__imag[] = "imag"; -static char __pyx_k__ndim[] = "ndim"; -static char __pyx_k__prod[] = "prod"; -static char __pyx_k__real[] = "real"; -static char __pyx_k__self[] = "self"; -static char __pyx_k__warn[] = "warn"; -static char __pyx_k__dtype[] = "dtype"; -static char __pyx_k__empty[] = "empty"; -static char __pyx_k__finfo[] = "finfo"; -static char __pyx_k__numpy[] = "numpy"; -static char __pyx_k__qhull[] = "qhull"; -static char __pyx_k__range[] = "range"; -static char __pyx_k__shape[] = "shape"; -static char __pyx_k__zeros[] = "zeros"; -static char __pyx_k__astype[] = "astype"; -static char __pyx_k__double[] = "double"; -static char __pyx_k__object[] = "object"; -static char __pyx_k__points[] = "points"; -static char __pyx_k__values[] = "values"; -static char __pyx_k__xrange[] = "xrange"; -static char __pyx_k__Warning[] = "Warning"; -static char __pyx_k__complex[] = "complex"; -static char __pyx_k__maxiter[] = "maxiter"; -static char __pyx_k__npoints[] = "npoints"; -static char __pyx_k__reshape[] = "reshape"; -static char __pyx_k__Delaunay[] = "Delaunay"; -static char __pyx_k____call__[] = "__call__"; -static char __pyx_k____init__[] = "__init__"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k__interpnd[] = "interpnd"; -static char __pyx_k__vertices[] = "vertices"; -static char __pyx_k__warnings[] = "warnings"; -static char __pyx_k__enumerate[] = "enumerate"; -static char __pyx_k__transpose[] = "transpose"; -static char __pyx_k__ValueError[] = "ValueError"; -static char __pyx_k__asanyarray[] = "asanyarray"; -static char __pyx_k__fill_value[] = "fill_value"; -static char __pyx_k__is_complex[] = "is_complex"; -static char __pyx_k__issubdtype[] = "issubdtype"; -static char __pyx_k__RuntimeError[] = "RuntimeError"; -static char __pyx_k__values_shape[] = "values_shape"; -static char __pyx_k__complexfloating[] = "complexfloating"; -static char __pyx_k___evaluate_double[] = "_evaluate_double"; -static char __pyx_k__broadcast_arrays[] = "broadcast_arrays"; -static char __pyx_k___check_call_shape[] = "_check_call_shape"; -static char __pyx_k___check_init_shape[] = "_check_init_shape"; -static char __pyx_k___evaluate_complex[] = "_evaluate_complex"; -static char __pyx_k__ascontiguousarray[] = "ascontiguousarray"; -static char __pyx_k__NDInterpolatorBase[] = "NDInterpolatorBase"; -static PyObject *__pyx_kp_s_11; -static PyObject *__pyx_kp_s_12; -static PyObject *__pyx_kp_s_14; -static PyObject *__pyx_kp_s_19; -static PyObject *__pyx_n_s_2; -static PyObject *__pyx_n_s_21; -static PyObject *__pyx_kp_s_24; -static PyObject *__pyx_n_s_25; -static PyObject *__pyx_kp_u_30; -static PyObject *__pyx_kp_u_32; -static PyObject *__pyx_kp_u_34; -static PyObject *__pyx_kp_u_36; -static PyObject *__pyx_kp_u_37; -static PyObject *__pyx_kp_u_40; -static PyObject *__pyx_n_s_43; -static PyObject *__pyx_n_s_44; -static PyObject *__pyx_kp_s_45; -static PyObject *__pyx_kp_s_46; -static PyObject *__pyx_n_s_47; -static PyObject *__pyx_kp_s_48; -static PyObject *__pyx_n_s_49; -static PyObject *__pyx_kp_s_5; -static PyObject *__pyx_kp_s_7; -static PyObject *__pyx_kp_s_9; -static PyObject *__pyx_n_s__Delaunay; -static PyObject *__pyx_n_s__NDInterpolatorBase; -static PyObject *__pyx_n_s__RuntimeError; -static PyObject *__pyx_n_s__T; -static PyObject *__pyx_n_s__ValueError; -static PyObject *__pyx_n_s__Warning; -static PyObject *__pyx_n_s____call__; -static PyObject *__pyx_n_s____init__; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s___check_call_shape; -static PyObject *__pyx_n_s___check_init_shape; -static PyObject *__pyx_n_s___evaluate_complex; -static PyObject *__pyx_n_s___evaluate_double; -static PyObject *__pyx_n_s__asanyarray; -static PyObject *__pyx_n_s__ascontiguousarray; -static PyObject *__pyx_n_s__astype; -static PyObject *__pyx_n_s__broadcast_arrays; -static PyObject *__pyx_n_s__complex; -static PyObject *__pyx_n_s__complexfloating; -static PyObject *__pyx_n_s__double; -static PyObject *__pyx_n_s__dtype; -static PyObject *__pyx_n_s__empty; -static PyObject *__pyx_n_s__enumerate; -static PyObject *__pyx_n_s__eps; -static PyObject *__pyx_n_s__fill_value; -static PyObject *__pyx_n_s__finfo; -static PyObject *__pyx_n_s__grad; -static PyObject *__pyx_n_s__imag; -static PyObject *__pyx_n_s__interpnd; -static PyObject *__pyx_n_s__is_complex; -static PyObject *__pyx_n_s__issubdtype; -static PyObject *__pyx_n_s__maxiter; -static PyObject *__pyx_n_s__nan; -static PyObject *__pyx_n_s__ndim; -static PyObject *__pyx_n_s__np; -static PyObject *__pyx_n_s__npoints; -static PyObject *__pyx_n_s__numpy; -static PyObject *__pyx_n_s__object; -static PyObject *__pyx_n_s__points; -static PyObject *__pyx_n_s__prod; -static PyObject *__pyx_n_s__qhull; -static PyObject *__pyx_n_s__range; -static PyObject *__pyx_n_s__real; -static PyObject *__pyx_n_s__reshape; -static PyObject *__pyx_n_s__self; -static PyObject *__pyx_n_s__shape; -static PyObject *__pyx_n_s__tol; -static PyObject *__pyx_n_s__transpose; -static PyObject *__pyx_n_s__tri; -static PyObject *__pyx_n_s__values; -static PyObject *__pyx_n_s__values_shape; -static PyObject *__pyx_n_s__vertices; -static PyObject *__pyx_n_s__warn; -static PyObject *__pyx_n_s__warnings; -static PyObject *__pyx_n_s__xi; -static PyObject *__pyx_n_s__xrange; -static PyObject *__pyx_n_s__y; -static PyObject *__pyx_n_s__zeros; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_2; -static PyObject *__pyx_int_neg_1; -static PyObject *__pyx_int_15; -static PyObject *__pyx_int_100; -static PyObject *__pyx_int_400; -static PyObject *__pyx_k_1; -static PyObject *__pyx_k_17; -static PyObject *__pyx_k_18; -static PyObject *__pyx_k_28; -static PyObject *__pyx_k_29; -static PyObject *__pyx_k_slice_3; -static PyObject *__pyx_k_tuple_4; -static PyObject *__pyx_k_tuple_6; -static PyObject *__pyx_k_tuple_8; -static PyObject *__pyx_k_slice_22; -static PyObject *__pyx_k_tuple_10; -static PyObject *__pyx_k_tuple_13; -static PyObject *__pyx_k_tuple_15; -static PyObject *__pyx_k_tuple_16; -static PyObject *__pyx_k_tuple_20; -static PyObject *__pyx_k_tuple_23; -static PyObject *__pyx_k_tuple_26; -static PyObject *__pyx_k_tuple_27; -static PyObject *__pyx_k_tuple_31; -static PyObject *__pyx_k_tuple_33; -static PyObject *__pyx_k_tuple_35; -static PyObject *__pyx_k_tuple_38; -static PyObject *__pyx_k_tuple_39; -static PyObject *__pyx_k_tuple_41; - - - -static PyObject *__pyx_pf_8interpnd_18NDInterpolatorBase___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_8interpnd_18NDInterpolatorBase___init__[] = "\n Check shape of points and values arrays, and reshape values to\n (npoints, nvalues). Ensure the `points` and values arrays are\n C-contiguous, and of correct type.\n "; -static PyMethodDef __pyx_mdef_8interpnd_18NDInterpolatorBase___init__ = {__Pyx_NAMESTR("__init__"), (PyCFunction)__pyx_pf_8interpnd_18NDInterpolatorBase___init__, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8interpnd_18NDInterpolatorBase___init__)}; -static PyObject *__pyx_pf_8interpnd_18NDInterpolatorBase___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_points = 0; - PyObject *__pyx_v_values = 0; - PyObject *__pyx_v_fill_value = 0; - PyObject *__pyx_v_ndim = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - double __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__points,&__pyx_n_s__values,&__pyx_n_s__fill_value,&__pyx_n_s__ndim,0}; - __Pyx_RefNannySetupContext("__init__"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[5] = {0,0,0,0,0}; - values[3] = __pyx_k_1; - values[4] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__points); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__values); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fill_value); - if (value) { values[3] = value; kw_args--; } - } - case 4: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ndim); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_points = values[1]; - __pyx_v_values = values[2]; - __pyx_v_fill_value = values[3]; - __pyx_v_ndim = values[4]; - } else { - __pyx_v_fill_value = __pyx_k_1; - __pyx_v_ndim = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: - __pyx_v_ndim = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: - __pyx_v_fill_value = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: - __pyx_v_values = PyTuple_GET_ITEM(__pyx_args, 2); - __pyx_v_points = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.NDInterpolatorBase.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_points); - __Pyx_INCREF(__pyx_v_values); - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_points); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_points); - __Pyx_GIVEREF(__pyx_v_points); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_points); - __pyx_v_points = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_values); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_values); - __Pyx_GIVEREF(__pyx_v_values); - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_values); - __pyx_v_values = __pyx_t_1; - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___check_init_shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_points); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_points); - __Pyx_GIVEREF(__pyx_v_points); - __Pyx_INCREF(__pyx_v_values); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_values); - __Pyx_GIVEREF(__pyx_v_values); - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__ndim), __pyx_v_ndim) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__astype); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__double); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_points); - __pyx_v_points = __pyx_t_1; - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_values, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PySequence_GetSlice(__pyx_t_1, 1, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__values_shape, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_values, __pyx_n_s__ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_3, __pyx_int_1, Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { - - - __pyx_t_1 = PyObject_GetItem(__pyx_v_values, ((PyObject *)__pyx_k_tuple_4)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__values, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L6; - } - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_values, __pyx_n_s__ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_int_2, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_5) { - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__values, __pyx_v_values) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - { - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_values, __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_v_values, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__prod); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_values, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = __Pyx_PySequence_GetSlice(__pyx_t_1, 1, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - __pyx_t_6 = 0; - __pyx_t_6 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - __pyx_t_2 = 0; - __pyx_t_6 = 0; - __pyx_t_6 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__values, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_L6:; - - - __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__issubdtype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__dtype); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__complexfloating); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_3 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__is_complex, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__is_complex); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_5) { - - - __pyx_t_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__astype); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__complex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__values, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_fill_value); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_fill_value); - __Pyx_GIVEREF(__pyx_v_fill_value); - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyComplex_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__fill_value, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - goto __pyx_L7; - } - { - - - __pyx_t_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__astype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__double); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - __pyx_t_6 = 0; - __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__values, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - - - __pyx_t_7 = __Pyx_PyObject_AsDouble(__pyx_v_fill_value); if (unlikely(__pyx_t_7 == ((double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = PyFloat_FromDouble(__pyx_t_7); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__fill_value, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_L7:; - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__points, __pyx_v_points) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("interpnd.NDInterpolatorBase.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_points); - __Pyx_XDECREF(__pyx_v_values); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_18NDInterpolatorBase_1_check_init_shape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_8interpnd_18NDInterpolatorBase_1_check_init_shape[] = "\n Check shape of points and values arrays\n\n "; -static PyMethodDef __pyx_mdef_8interpnd_18NDInterpolatorBase_1_check_init_shape = {__Pyx_NAMESTR("_check_init_shape"), (PyCFunction)__pyx_pf_8interpnd_18NDInterpolatorBase_1_check_init_shape, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8interpnd_18NDInterpolatorBase_1_check_init_shape)}; -static PyObject *__pyx_pf_8interpnd_18NDInterpolatorBase_1_check_init_shape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_points = 0; - PyObject *__pyx_v_values = 0; - PyObject *__pyx_v_ndim = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__points,&__pyx_n_s__values,&__pyx_n_s__ndim,0}; - __Pyx_RefNannySetupContext("_check_init_shape"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[4] = {0,0,0,0}; - values[3] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__points); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_check_init_shape", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__values); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_check_init_shape", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ndim); - if (value) { values[3] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_check_init_shape") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_points = values[1]; - __pyx_v_values = values[2]; - __pyx_v_ndim = values[3]; - } else { - __pyx_v_ndim = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: - __pyx_v_ndim = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: - __pyx_v_values = PyTuple_GET_ITEM(__pyx_args, 2); - __pyx_v_points = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_check_init_shape", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.NDInterpolatorBase._check_init_shape", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_values, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_NE); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { - - - __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_int_2, Py_NE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { - - - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_3, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_int_2, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { - - - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - - __pyx_t_4 = (__pyx_v_ndim != Py_None); - if (__pyx_t_4) { - __pyx_t_3 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_3, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_v_ndim, Py_NE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __pyx_t_5; - } else { - __pyx_t_6 = __pyx_t_4; - } - if (__pyx_t_6) { - - - __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_11), __pyx_v_ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L9; - } - __pyx_L9:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("interpnd.NDInterpolatorBase._check_init_shape", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_18NDInterpolatorBase_2_check_call_shape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_8interpnd_18NDInterpolatorBase_2_check_call_shape = {__Pyx_NAMESTR("_check_call_shape"), (PyCFunction)__pyx_pf_8interpnd_18NDInterpolatorBase_2_check_call_shape, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_8interpnd_18NDInterpolatorBase_2_check_call_shape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_xi = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__xi,0}; - __Pyx_RefNannySetupContext("_check_call_shape"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xi); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_check_call_shape", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_check_call_shape") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_xi = values[1]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_xi = PyTuple_GET_ITEM(__pyx_args, 1); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_check_call_shape", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.NDInterpolatorBase._check_call_shape", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_xi); - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__asanyarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_xi); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_xi); - __Pyx_GIVEREF(__pyx_v_xi); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_xi); - __pyx_v_xi = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_3, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_NE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { - - - __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_13), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_xi); - __pyx_r = __pyx_v_xi; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("interpnd.NDInterpolatorBase._check_call_shape", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_xi); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_18NDInterpolatorBase_3__call__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_8interpnd_18NDInterpolatorBase_3__call__[] = "\n interpolator(xi)\n\n Evaluate interpolator at given points.\n\n Parameters\n ----------\n xi : ndarray of float, shape (..., ndim)\n Points where to interpolate data at.\n\n "; -static PyMethodDef __pyx_mdef_8interpnd_18NDInterpolatorBase_3__call__ = {__Pyx_NAMESTR("__call__"), (PyCFunction)__pyx_pf_8interpnd_18NDInterpolatorBase_3__call__, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8interpnd_18NDInterpolatorBase_3__call__)}; -static PyObject *__pyx_pf_8interpnd_18NDInterpolatorBase_3__call__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_args = 0; - PyObject *__pyx_v_xi = NULL; - PyObject *__pyx_v_shape = NULL; - PyObject *__pyx_v_r = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,0}; - __Pyx_RefNannySetupContext("__call__"); - __pyx_self = __pyx_self; - if (PyTuple_GET_SIZE(__pyx_args) > 1) { - __pyx_v_args = PyTuple_GetSlice(__pyx_args, 1, PyTuple_GET_SIZE(__pyx_args)); - if (unlikely(!__pyx_v_args)) { - __Pyx_RefNannyFinishContext(); - return NULL; - } - __Pyx_GOTREF(__pyx_v_args); - } else { - __pyx_v_args = __pyx_empty_tuple; __Pyx_INCREF(__pyx_empty_tuple); - } - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[1] = {0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - default: - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t used_pos_args = (PyTuple_GET_SIZE(__pyx_args) < 1) ? PyTuple_GET_SIZE(__pyx_args) : 1; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, used_pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - } else if (PyTuple_GET_SIZE(__pyx_args) < 1) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__call__", 0, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_DECREF(__pyx_v_args); __pyx_v_args = 0; - __Pyx_AddTraceback("interpnd.NDInterpolatorBase.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_v_args)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_args)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_args)); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_v_xi = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___check_call_shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_xi); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_xi); - __Pyx_GIVEREF(__pyx_v_xi); - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_xi); - __pyx_v_xi = __pyx_t_1; - __pyx_t_1 = 0; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__astype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__double); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_xi); - __pyx_v_xi = __pyx_t_4; - __pyx_t_4 = 0; - - - __pyx_t_4 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_v_shape = __pyx_t_4; - __pyx_t_4 = 0; - - - __pyx_t_4 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__reshape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__prod); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PySequence_GetSlice(__pyx_v_shape, 0, -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_shape, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_xi); - __pyx_v_xi = __pyx_t_1; - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__is_complex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___evaluate_complex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_xi); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_xi); - __Pyx_GIVEREF(__pyx_v_xi); - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_v_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L6; - } - { - - - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___evaluate_double); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_xi); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_xi); - __Pyx_GIVEREF(__pyx_v_xi); - __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_v_r = __pyx_t_1; - __pyx_t_1 = 0; - } - __pyx_L6:; - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_GetAttr(__pyx_v_r, __pyx_n_s__reshape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PySequence_GetSlice(__pyx_v_shape, 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values_shape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("interpnd.NDInterpolatorBase.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_args); - __Pyx_XDECREF(__pyx_v_xi); - __Pyx_XDECREF(__pyx_v_shape); - __Pyx_XDECREF(__pyx_v_r); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd__ndim_coords_from_arrays(PyObject *__pyx_self, PyObject *__pyx_v_points); -static char __pyx_doc_8interpnd__ndim_coords_from_arrays[] = "\n Convert a tuple of coordinate arrays to a (..., ndim)-shaped array.\n\n "; -static PyMethodDef __pyx_mdef_8interpnd__ndim_coords_from_arrays = {__Pyx_NAMESTR("_ndim_coords_from_arrays"), (PyCFunction)__pyx_pf_8interpnd__ndim_coords_from_arrays, METH_O, __Pyx_DOCSTR(__pyx_doc_8interpnd__ndim_coords_from_arrays)}; -static PyObject *__pyx_pf_8interpnd__ndim_coords_from_arrays(PyObject *__pyx_self, PyObject *__pyx_v_points) { - PyObject *__pyx_v_p = NULL; - PyObject *__pyx_v_j = NULL; - PyObject *__pyx_v_item = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *(*__pyx_t_8)(PyObject *); - PyObject *__pyx_t_9 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_ndim_coords_from_arrays"); - __pyx_self = __pyx_self; - __Pyx_INCREF(__pyx_v_points); - - - __pyx_t_1 = ((PyObject *)((PyObject*)(&PyTuple_Type))); - __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_points, __pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_2) { - __pyx_t_3 = PyObject_Length(__pyx_v_points); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = (__pyx_t_3 == 1); - __pyx_t_5 = __pyx_t_4; - } else { - __pyx_t_5 = __pyx_t_2; - } - if (__pyx_t_5) { - - - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_points, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_v_points); - __pyx_v_points = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_t_1 = ((PyObject *)((PyObject*)(&PyTuple_Type))); - __Pyx_INCREF(__pyx_t_1); - __pyx_t_5 = __Pyx_TypeCheck(__pyx_v_points, __pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__broadcast_arrays); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PySequence_Tuple(__pyx_v_points); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_7 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_v_p = __pyx_t_7; - __pyx_t_7 = 0; - - - __pyx_t_3 = PyObject_Length(__pyx_v_p); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_int_1); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - __pyx_t_7 = PyObject_Call(__pyx_builtin_xrange, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (PyList_CheckExact(__pyx_t_7) || PyTuple_CheckExact(__pyx_t_7)) { - __pyx_t_1 = __pyx_t_7; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0; - __pyx_t_8 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_7); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = Py_TYPE(__pyx_t_1)->tp_iternext; - } - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - for (;;) { - if (PyList_CheckExact(__pyx_t_1)) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break; - __pyx_t_7 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_7); __pyx_t_3++; - } else if (PyTuple_CheckExact(__pyx_t_1)) { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_7); __pyx_t_3++; - } else { - __pyx_t_7 = __pyx_t_8(__pyx_t_1); - if (unlikely(!__pyx_t_7)) { - if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - break; - } - __Pyx_GOTREF(__pyx_t_7); - } - __Pyx_XDECREF(__pyx_v_j); - __pyx_v_j = __pyx_t_7; - __pyx_t_7 = 0; - - - __pyx_t_7 = PyObject_GetItem(__pyx_v_p, __pyx_v_j); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__shape); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_p, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__shape); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = PyObject_RichCompare(__pyx_t_6, __pyx_t_9, Py_NE); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (__pyx_t_5) { - - - __pyx_t_7 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_15), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_Raise(__pyx_t_7, 0, 0, 0); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L9; - } - __pyx_L9:; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_p, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__shape); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = PyObject_Length(__pyx_v_points); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_9, ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)((PyObject*)(&PyFloat_Type)))) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_9 = PyEval_CallObjectWithKeywords(__pyx_t_7, ((PyObject *)__pyx_t_6), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_points); - __pyx_v_points = __pyx_t_9; - __pyx_t_9 = 0; - - - __Pyx_INCREF(__pyx_int_0); - __pyx_t_9 = __pyx_int_0; - if (PyList_CheckExact(__pyx_v_p) || PyTuple_CheckExact(__pyx_v_p)) { - __pyx_t_1 = __pyx_v_p; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0; - __pyx_t_8 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_p); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = Py_TYPE(__pyx_t_1)->tp_iternext; - } - for (;;) { - if (PyList_CheckExact(__pyx_t_1)) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break; - __pyx_t_6 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_6); __pyx_t_3++; - } else if (PyTuple_CheckExact(__pyx_t_1)) { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_6); __pyx_t_3++; - } else { - __pyx_t_6 = __pyx_t_8(__pyx_t_1); - if (unlikely(!__pyx_t_6)) { - if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - break; - } - __Pyx_GOTREF(__pyx_t_6); - } - __Pyx_XDECREF(__pyx_v_item); - __pyx_v_item = __pyx_t_6; - __pyx_t_6 = 0; - __Pyx_INCREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_v_j); - __pyx_v_j = __pyx_t_9; - __pyx_t_6 = PyNumber_Add(__pyx_t_9, __pyx_int_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_9); - __pyx_t_9 = __pyx_t_6; - __pyx_t_6 = 0; - - - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - __Pyx_INCREF(Py_Ellipsis); - PyTuple_SET_ITEM(__pyx_t_6, 0, Py_Ellipsis); - __Pyx_GIVEREF(Py_Ellipsis); - __Pyx_INCREF(__pyx_v_j); - PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_j); - __Pyx_GIVEREF(__pyx_v_j); - if (PyObject_SetItem(__pyx_v_points, ((PyObject *)__pyx_t_6), __pyx_v_item) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L6; - } - { - - - __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__asanyarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - __Pyx_INCREF(__pyx_v_points); - PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_points); - __Pyx_GIVEREF(__pyx_v_points); - __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_v_points); - __pyx_v_points = __pyx_t_6; - __pyx_t_6 = 0; - - - __pyx_t_6 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__ndim); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_9 = PyObject_RichCompare(__pyx_t_6, __pyx_int_1, Py_EQ); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__pyx_t_5) { - - - __pyx_t_9 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__reshape); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_6 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_k_tuple_16), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_v_points); - __pyx_v_points = __pyx_t_6; - __pyx_t_6 = 0; - goto __pyx_L12; - } - __pyx_L12:; - } - __pyx_L6:; - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_points); - __pyx_r = __pyx_v_points; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("interpnd._ndim_coords_from_arrays", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_p); - __Pyx_XDECREF(__pyx_v_j); - __Pyx_XDECREF(__pyx_v_item); - __Pyx_XDECREF(__pyx_v_points); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_20LinearNDInterpolator___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_8interpnd_20LinearNDInterpolator___init__ = {__Pyx_NAMESTR("__init__"), (PyCFunction)__pyx_pf_8interpnd_20LinearNDInterpolator___init__, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_8interpnd_20LinearNDInterpolator___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_points = 0; - PyObject *__pyx_v_values = 0; - PyObject *__pyx_v_fill_value = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__points,&__pyx_n_s__values,&__pyx_n_s__fill_value,0}; - __Pyx_RefNannySetupContext("__init__"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[4] = {0,0,0,0}; - values[3] = __pyx_k_17; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__points); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__values); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fill_value); - if (value) { values[3] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_points = values[1]; - __pyx_v_values = values[2]; - __pyx_v_fill_value = values[3]; - } else { - __pyx_v_fill_value = __pyx_k_17; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: - __pyx_v_fill_value = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: - __pyx_v_values = PyTuple_GET_ITEM(__pyx_args, 2); - __pyx_v_points = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.LinearNDInterpolator.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__NDInterpolatorBase); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s____init__); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_self); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_INCREF(__pyx_v_points); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_points); - __Pyx_GIVEREF(__pyx_v_points); - __Pyx_INCREF(__pyx_v_values); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_values); - __Pyx_GIVEREF(__pyx_v_values); - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__fill_value), __pyx_v_fill_value) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__qhull); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__Delaunay); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__tri, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("interpnd.LinearNDInterpolator.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_20LinearNDInterpolator_1_evaluate_double(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_8interpnd_20LinearNDInterpolator_1_evaluate_double = {__Pyx_NAMESTR("_evaluate_double"), (PyCFunction)__pyx_pf_8interpnd_20LinearNDInterpolator_1_evaluate_double, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_8interpnd_20LinearNDInterpolator_1_evaluate_double(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyArrayObject *__pyx_v_xi = 0; - PyArrayObject *__pyx_v_values = 0; - PyArrayObject *__pyx_v_out = 0; - PyArrayObject *__pyx_v_points = 0; - PyArrayObject *__pyx_v_vertices = 0; - double __pyx_v_c[NPY_MAXDIMS]; - double __pyx_v_fill_value; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_k; - int __pyx_v_m; - int __pyx_v_ndim; - int __pyx_v_isimplex; - int __pyx_v_start; - int __pyx_v_nvalues; - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_v_info; - PyObject *__pyx_v_eps = NULL; - Py_buffer __pyx_bstruct_xi; - Py_ssize_t __pyx_bstride_0_xi = 0; - Py_ssize_t __pyx_bstride_1_xi = 0; - Py_ssize_t __pyx_bshape_0_xi = 0; - Py_ssize_t __pyx_bshape_1_xi = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - Py_buffer __pyx_bstruct_points; - Py_ssize_t __pyx_bstride_0_points = 0; - Py_ssize_t __pyx_bstride_1_points = 0; - Py_ssize_t __pyx_bshape_0_points = 0; - Py_ssize_t __pyx_bshape_1_points = 0; - Py_buffer __pyx_bstruct_values; - Py_ssize_t __pyx_bstride_0_values = 0; - Py_ssize_t __pyx_bstride_1_values = 0; - Py_ssize_t __pyx_bshape_0_values = 0; - Py_ssize_t __pyx_bshape_1_values = 0; - Py_buffer __pyx_bstruct_out; - Py_ssize_t __pyx_bstride_0_out = 0; - Py_ssize_t __pyx_bstride_1_out = 0; - Py_ssize_t __pyx_bshape_0_out = 0; - Py_ssize_t __pyx_bshape_1_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyArrayObject *__pyx_t_2 = NULL; - PyArrayObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyArrayObject *__pyx_t_5 = NULL; - double __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyArrayObject *__pyx_t_10 = NULL; - int __pyx_t_11; - PyObject *__pyx_t_12 = NULL; - PyObject *__pyx_t_13 = NULL; - PyObject *__pyx_t_14 = NULL; - npy_intp __pyx_t_15; - int __pyx_t_16; - int __pyx_t_17; - int __pyx_t_18; - int __pyx_t_19; - int __pyx_t_20; - int __pyx_t_21; - int __pyx_t_22; - long __pyx_t_23; - int __pyx_t_24; - int __pyx_t_25; - int __pyx_t_26; - int __pyx_t_27; - int __pyx_t_28; - int __pyx_t_29; - int __pyx_t_30; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__xi,0}; - __Pyx_RefNannySetupContext("_evaluate_double"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xi); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_evaluate_double", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_evaluate_double") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_xi = ((PyArrayObject *)values[1]); - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_xi = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 1)); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_evaluate_double", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.LinearNDInterpolator._evaluate_double", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_bstruct_values.buf = NULL; - __pyx_bstruct_out.buf = NULL; - __pyx_bstruct_points.buf = NULL; - __pyx_bstruct_vertices.buf = NULL; - __pyx_bstruct_xi.buf = NULL; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xi), __pyx_ptype_5numpy_ndarray, 1, "xi", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_xi, (PyObject*)__pyx_v_xi, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_xi = __pyx_bstruct_xi.strides[0]; __pyx_bstride_1_xi = __pyx_bstruct_xi.strides[1]; - __pyx_bshape_0_xi = __pyx_bstruct_xi.shape[0]; __pyx_bshape_1_xi = __pyx_bstruct_xi.shape[1]; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_values, (PyObject*)__pyx_t_2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_values = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_values.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_values = __pyx_bstruct_values.strides[0]; __pyx_bstride_1_values = __pyx_bstruct_values.strides[1]; - __pyx_bshape_0_values = __pyx_bstruct_values.shape[0]; __pyx_bshape_1_values = __pyx_bstruct_values.shape[1]; - } - } - __pyx_t_2 = 0; - __pyx_v_values = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_t_3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_points = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_points.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_points = __pyx_bstruct_points.strides[0]; __pyx_bstride_1_points = __pyx_bstruct_points.strides[1]; - __pyx_bshape_0_points = __pyx_bstruct_points.shape[0]; __pyx_bshape_1_points = __pyx_bstruct_points.shape[1]; - } - } - __pyx_t_3 = 0; - __pyx_v_points = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__vertices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_vertices = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_vertices.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - } - } - __pyx_t_5 = 0; - __pyx_v_vertices = ((PyArrayObject *)__pyx_t_4); - __pyx_t_4 = 0; - - - __pyx_v_ndim = (__pyx_v_xi->dimensions[1]); - - - __pyx_v_start = 0; - - - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__fill_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_6 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_fill_value = __pyx_t_6; - - - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info((&__pyx_v_info), __pyx_t_4, 1, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_xi->dimensions[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_7 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__shape); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_GetItemInt(__pyx_t_8, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_4 = 0; - __pyx_t_7 = 0; - __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_7)); - PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyDict_New(); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_9 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__double); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_8, ((PyObject *)__pyx_n_s__dtype), __pyx_t_9) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_7), ((PyObject *)__pyx_t_8)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; - if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_10 = ((PyArrayObject *)__pyx_t_9); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_out, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_11 < 0)) { - PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_out, (PyObject*)__pyx_v_out, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14); - } - } - __pyx_bstride_0_out = __pyx_bstruct_out.strides[0]; __pyx_bstride_1_out = __pyx_bstruct_out.strides[1]; - __pyx_bshape_0_out = __pyx_bstruct_out.shape[0]; __pyx_bshape_1_out = __pyx_bstruct_out.shape[1]; - if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_10 = 0; - __pyx_v_out = ((PyArrayObject *)__pyx_t_9); - __pyx_t_9 = 0; - - - __pyx_v_nvalues = (__pyx_v_out->dimensions[1]); - - - __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_8 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__finfo); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__double); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - __pyx_t_7 = PyObject_Call(__pyx_t_8, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0; - __pyx_t_9 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__eps); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = PyNumber_Multiply(__pyx_t_9, __pyx_int_100); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_eps = __pyx_t_7; - __pyx_t_7 = 0; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_15 = (__pyx_v_xi->dimensions[0]); - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_15; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - - __pyx_t_6 = __pyx_PyFloat_AsDouble(__pyx_v_eps); if (unlikely((__pyx_t_6 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L7;} - __pyx_v_isimplex = __pyx_f_5scipy_7spatial_5qhull__find_simplex((&__pyx_v_info), __pyx_v_c, (((double *)__pyx_v_xi->data) + (__pyx_v_i * __pyx_v_ndim)), (&__pyx_v_start), __pyx_t_6); - - - __pyx_t_16 = (__pyx_v_isimplex == -1); - if (__pyx_t_16) { - - - __pyx_t_17 = __pyx_v_nvalues; - for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) { - __pyx_v_k = __pyx_t_18; - - - __pyx_t_19 = __pyx_v_i; - __pyx_t_20 = __pyx_v_k; - if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_bshape_0_out; - if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_bshape_1_out; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_out.buf, __pyx_t_19, __pyx_bstride_0_out, __pyx_t_20, __pyx_bstride_1_out) = __pyx_v_fill_value; - } - - - goto __pyx_L9_continue; - goto __pyx_L11; - } - __pyx_L11:; - - - __pyx_t_17 = __pyx_v_nvalues; - for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) { - __pyx_v_k = __pyx_t_18; - - - __pyx_t_21 = __pyx_v_i; - __pyx_t_22 = __pyx_v_k; - if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_bshape_0_out; - if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_bshape_1_out; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_out.buf, __pyx_t_21, __pyx_bstride_0_out, __pyx_t_22, __pyx_bstride_1_out) = 0.0; - } - - - __pyx_t_23 = (__pyx_v_ndim + 1); - for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_23; __pyx_t_17+=1) { - __pyx_v_j = __pyx_t_17; - - - __pyx_t_18 = __pyx_v_nvalues; - for (__pyx_t_24 = 0; __pyx_t_24 < __pyx_t_18; __pyx_t_24+=1) { - __pyx_v_k = __pyx_t_24; - - - __pyx_t_25 = __pyx_v_isimplex; - __pyx_t_26 = __pyx_v_j; - if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_bshape_0_vertices; - if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_bshape_1_vertices; - __pyx_v_m = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_25, __pyx_bstride_0_vertices, __pyx_t_26, __pyx_bstride_1_vertices)); - - - __pyx_t_27 = __pyx_v_m; - __pyx_t_28 = __pyx_v_k; - if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_bshape_0_values; - if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_bshape_1_values; - __pyx_t_29 = __pyx_v_i; - __pyx_t_30 = __pyx_v_k; - if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_bshape_0_out; - if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_bshape_1_out; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_out.buf, __pyx_t_29, __pyx_bstride_0_out, __pyx_t_30, __pyx_bstride_1_out) += ((__pyx_v_c[__pyx_v_j]) * (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_values.buf, __pyx_t_27, __pyx_bstride_0_values, __pyx_t_28, __pyx_bstride_1_values))); - } - } - __pyx_L9_continue:; - } - } - - - { - int __pyx_why; - __pyx_why = 0; goto __pyx_L8; - __pyx_L7: __pyx_why = 4; goto __pyx_L8; - __pyx_L8:; - Py_BLOCK_THREADS - switch (__pyx_why) { - case 4: goto __pyx_L1_error; - } - } - } - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_out)); - __pyx_r = ((PyObject *)__pyx_v_out); - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xi); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_values); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("interpnd.LinearNDInterpolator._evaluate_double", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xi); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_values); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_values); - __Pyx_XDECREF((PyObject *)__pyx_v_out); - __Pyx_XDECREF((PyObject *)__pyx_v_points); - __Pyx_XDECREF((PyObject *)__pyx_v_vertices); - __Pyx_XDECREF(__pyx_v_eps); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_20LinearNDInterpolator_2_evaluate_complex(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_8interpnd_20LinearNDInterpolator_2_evaluate_complex = {__Pyx_NAMESTR("_evaluate_complex"), (PyCFunction)__pyx_pf_8interpnd_20LinearNDInterpolator_2_evaluate_complex, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_8interpnd_20LinearNDInterpolator_2_evaluate_complex(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyArrayObject *__pyx_v_xi = 0; - PyArrayObject *__pyx_v_values = 0; - PyArrayObject *__pyx_v_out = 0; - PyArrayObject *__pyx_v_points = 0; - PyArrayObject *__pyx_v_vertices = 0; - double __pyx_v_c[NPY_MAXDIMS]; - __pyx_t_double_complex __pyx_v_fill_value; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_k; - int __pyx_v_m; - int __pyx_v_ndim; - int __pyx_v_isimplex; - int __pyx_v_start; - int __pyx_v_nvalues; - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_v_info; - PyObject *__pyx_v_eps = NULL; - Py_buffer __pyx_bstruct_xi; - Py_ssize_t __pyx_bstride_0_xi = 0; - Py_ssize_t __pyx_bstride_1_xi = 0; - Py_ssize_t __pyx_bshape_0_xi = 0; - Py_ssize_t __pyx_bshape_1_xi = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - Py_buffer __pyx_bstruct_points; - Py_ssize_t __pyx_bstride_0_points = 0; - Py_ssize_t __pyx_bstride_1_points = 0; - Py_ssize_t __pyx_bshape_0_points = 0; - Py_ssize_t __pyx_bshape_1_points = 0; - Py_buffer __pyx_bstruct_values; - Py_ssize_t __pyx_bstride_0_values = 0; - Py_ssize_t __pyx_bstride_1_values = 0; - Py_ssize_t __pyx_bshape_0_values = 0; - Py_ssize_t __pyx_bshape_1_values = 0; - Py_buffer __pyx_bstruct_out; - Py_ssize_t __pyx_bstride_0_out = 0; - Py_ssize_t __pyx_bstride_1_out = 0; - Py_ssize_t __pyx_bshape_0_out = 0; - Py_ssize_t __pyx_bshape_1_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyArrayObject *__pyx_t_2 = NULL; - PyArrayObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyArrayObject *__pyx_t_5 = NULL; - __pyx_t_double_complex __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyArrayObject *__pyx_t_10 = NULL; - int __pyx_t_11; - PyObject *__pyx_t_12 = NULL; - PyObject *__pyx_t_13 = NULL; - PyObject *__pyx_t_14 = NULL; - npy_intp __pyx_t_15; - double __pyx_t_16; - int __pyx_t_17; - int __pyx_t_18; - int __pyx_t_19; - int __pyx_t_20; - int __pyx_t_21; - int __pyx_t_22; - int __pyx_t_23; - int __pyx_t_24; - int __pyx_t_25; - int __pyx_t_26; - int __pyx_t_27; - long __pyx_t_28; - int __pyx_t_29; - int __pyx_t_30; - int __pyx_t_31; - int __pyx_t_32; - int __pyx_t_33; - int __pyx_t_34; - int __pyx_t_35; - int __pyx_t_36; - int __pyx_t_37; - int __pyx_t_38; - int __pyx_t_39; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__xi,0}; - __Pyx_RefNannySetupContext("_evaluate_complex"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xi); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_evaluate_complex", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_evaluate_complex") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_xi = ((PyArrayObject *)values[1]); - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_xi = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 1)); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_evaluate_complex", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.LinearNDInterpolator._evaluate_complex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_bstruct_values.buf = NULL; - __pyx_bstruct_out.buf = NULL; - __pyx_bstruct_points.buf = NULL; - __pyx_bstruct_vertices.buf = NULL; - __pyx_bstruct_xi.buf = NULL; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xi), __pyx_ptype_5numpy_ndarray, 1, "xi", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_xi, (PyObject*)__pyx_v_xi, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_xi = __pyx_bstruct_xi.strides[0]; __pyx_bstride_1_xi = __pyx_bstruct_xi.strides[1]; - __pyx_bshape_0_xi = __pyx_bstruct_xi.shape[0]; __pyx_bshape_1_xi = __pyx_bstruct_xi.shape[1]; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[2]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_values, (PyObject*)__pyx_t_2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_complex_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_values = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_values.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_values = __pyx_bstruct_values.strides[0]; __pyx_bstride_1_values = __pyx_bstruct_values.strides[1]; - __pyx_bshape_0_values = __pyx_bstruct_values.shape[0]; __pyx_bshape_1_values = __pyx_bstruct_values.shape[1]; - } - } - __pyx_t_2 = 0; - __pyx_v_values = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_t_3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_points = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_points.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_points = __pyx_bstruct_points.strides[0]; __pyx_bstride_1_points = __pyx_bstruct_points.strides[1]; - __pyx_bshape_0_points = __pyx_bstruct_points.shape[0]; __pyx_bshape_1_points = __pyx_bstruct_points.shape[1]; - } - } - __pyx_t_3 = 0; - __pyx_v_points = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__vertices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_vertices = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_vertices.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - } - } - __pyx_t_5 = 0; - __pyx_v_vertices = ((PyArrayObject *)__pyx_t_4); - __pyx_t_4 = 0; - - - __pyx_v_ndim = (__pyx_v_xi->dimensions[1]); - - - __pyx_v_start = 0; - - - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__fill_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_fill_value = __pyx_t_6; - - - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info((&__pyx_v_info), __pyx_t_4, 1, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_xi->dimensions[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_7 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__shape); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_GetItemInt(__pyx_t_8, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_4 = 0; - __pyx_t_7 = 0; - __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_7)); - PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyDict_New(); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_9 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__complex); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_8, ((PyObject *)__pyx_n_s__dtype), __pyx_t_9) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_7), ((PyObject *)__pyx_t_8)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; - if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_10 = ((PyArrayObject *)__pyx_t_9); - { - __Pyx_BufFmt_StackElem __pyx_stack[2]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_out, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_complex_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_11 < 0)) { - PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_out, (PyObject*)__pyx_v_out, &__Pyx_TypeInfo_nn___pyx_t_5numpy_complex_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14); - } - } - __pyx_bstride_0_out = __pyx_bstruct_out.strides[0]; __pyx_bstride_1_out = __pyx_bstruct_out.strides[1]; - __pyx_bshape_0_out = __pyx_bstruct_out.shape[0]; __pyx_bshape_1_out = __pyx_bstruct_out.shape[1]; - if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_10 = 0; - __pyx_v_out = ((PyArrayObject *)__pyx_t_9); - __pyx_t_9 = 0; - - - __pyx_v_nvalues = (__pyx_v_out->dimensions[1]); - - - __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_8 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__finfo); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__double); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - __pyx_t_7 = PyObject_Call(__pyx_t_8, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0; - __pyx_t_9 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__eps); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = PyNumber_Multiply(__pyx_t_9, __pyx_int_100); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_eps = __pyx_t_7; - __pyx_t_7 = 0; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_15 = (__pyx_v_xi->dimensions[0]); - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_15; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - - __pyx_t_16 = __pyx_PyFloat_AsDouble(__pyx_v_eps); if (unlikely((__pyx_t_16 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L7;} - __pyx_v_isimplex = __pyx_f_5scipy_7spatial_5qhull__find_simplex((&__pyx_v_info), __pyx_v_c, (((double *)__pyx_v_xi->data) + (__pyx_v_i * __pyx_v_ndim)), (&__pyx_v_start), __pyx_t_16); - - - __pyx_t_17 = (__pyx_v_isimplex == -1); - if (__pyx_t_17) { - - - __pyx_t_18 = __pyx_v_nvalues; - for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) { - __pyx_v_k = __pyx_t_19; - - - __pyx_t_20 = __pyx_v_i; - __pyx_t_21 = __pyx_v_k; - if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_bshape_0_out; - if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_20, __pyx_bstride_0_out, __pyx_t_21, __pyx_bstride_1_out)).real = __Pyx_CREAL(__pyx_v_fill_value); - - - __pyx_t_22 = __pyx_v_i; - __pyx_t_23 = __pyx_v_k; - if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_bshape_0_out; - if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_22, __pyx_bstride_0_out, __pyx_t_23, __pyx_bstride_1_out)).imag = __Pyx_CIMAG(__pyx_v_fill_value); - } - - - goto __pyx_L9_continue; - goto __pyx_L11; - } - __pyx_L11:; - - - __pyx_t_18 = __pyx_v_nvalues; - for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) { - __pyx_v_k = __pyx_t_19; - - - __pyx_t_24 = __pyx_v_i; - __pyx_t_25 = __pyx_v_k; - if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_bshape_0_out; - if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_24, __pyx_bstride_0_out, __pyx_t_25, __pyx_bstride_1_out)).real = 0.0; - - - __pyx_t_26 = __pyx_v_i; - __pyx_t_27 = __pyx_v_k; - if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_bshape_0_out; - if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_26, __pyx_bstride_0_out, __pyx_t_27, __pyx_bstride_1_out)).imag = 0.0; - } - - - __pyx_t_28 = (__pyx_v_ndim + 1); - for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_28; __pyx_t_18+=1) { - __pyx_v_j = __pyx_t_18; - - - __pyx_t_19 = __pyx_v_nvalues; - for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_19; __pyx_t_29+=1) { - __pyx_v_k = __pyx_t_29; - - - __pyx_t_30 = __pyx_v_isimplex; - __pyx_t_31 = __pyx_v_j; - if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_bshape_0_vertices; - if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_bshape_1_vertices; - __pyx_v_m = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_30, __pyx_bstride_0_vertices, __pyx_t_31, __pyx_bstride_1_vertices)); - - - __pyx_t_32 = __pyx_v_m; - __pyx_t_33 = __pyx_v_k; - if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_bshape_0_values; - if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_bshape_1_values; - __pyx_t_34 = __pyx_v_i; - __pyx_t_35 = __pyx_v_k; - if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_bshape_0_out; - if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_34, __pyx_bstride_0_out, __pyx_t_35, __pyx_bstride_1_out)).real += ((__pyx_v_c[__pyx_v_j]) * (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_values.buf, __pyx_t_32, __pyx_bstride_0_values, __pyx_t_33, __pyx_bstride_1_values)).real); - - - __pyx_t_36 = __pyx_v_m; - __pyx_t_37 = __pyx_v_k; - if (__pyx_t_36 < 0) __pyx_t_36 += __pyx_bshape_0_values; - if (__pyx_t_37 < 0) __pyx_t_37 += __pyx_bshape_1_values; - __pyx_t_38 = __pyx_v_i; - __pyx_t_39 = __pyx_v_k; - if (__pyx_t_38 < 0) __pyx_t_38 += __pyx_bshape_0_out; - if (__pyx_t_39 < 0) __pyx_t_39 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_38, __pyx_bstride_0_out, __pyx_t_39, __pyx_bstride_1_out)).imag += ((__pyx_v_c[__pyx_v_j]) * (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_values.buf, __pyx_t_36, __pyx_bstride_0_values, __pyx_t_37, __pyx_bstride_1_values)).imag); - } - } - __pyx_L9_continue:; - } - } - - - { - int __pyx_why; - __pyx_why = 0; goto __pyx_L8; - __pyx_L7: __pyx_why = 4; goto __pyx_L8; - __pyx_L8:; - Py_BLOCK_THREADS - switch (__pyx_why) { - case 4: goto __pyx_L1_error; - } - } - } - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_out)); - __pyx_r = ((PyObject *)__pyx_v_out); - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xi); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_values); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("interpnd.LinearNDInterpolator._evaluate_complex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xi); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_values); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_values); - __Pyx_XDECREF((PyObject *)__pyx_v_out); - __Pyx_XDECREF((PyObject *)__pyx_v_points); - __Pyx_XDECREF((PyObject *)__pyx_v_vertices); - __Pyx_XDECREF(__pyx_v_eps); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static int __pyx_f_8interpnd__estimate_gradients_2d_global(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, double *__pyx_v_data, int __pyx_v_maxiter, double __pyx_v_tol, double *__pyx_v_y) { - double __pyx_v_Q[(2 * 2)]; - double __pyx_v_s[2]; - double __pyx_v_r[2]; - int __pyx_v_ipoint; - int __pyx_v_iiter; - int __pyx_v_k; - __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t __pyx_v_it; - double __pyx_v_f1; - double __pyx_v_f2; - double __pyx_v_df2; - double __pyx_v_ex; - double __pyx_v_ey; - double __pyx_v_L; - double __pyx_v_L3; - double __pyx_v_det; - double __pyx_v_err; - double __pyx_v_change; - int __pyx_r; - long __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - double __pyx_t_8; - double __pyx_t_9; - double __pyx_t_10; - - - __pyx_t_1 = (2 * __pyx_v_d->npoints); - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_ipoint = __pyx_t_2; - - - (__pyx_v_y[__pyx_v_ipoint]) = 0.0; - } - - - __pyx_t_2 = __pyx_v_maxiter; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_iiter = __pyx_t_3; - - - __pyx_v_err = 0.0; - - - __pyx_t_4 = __pyx_v_d->npoints; - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { - __pyx_v_ipoint = __pyx_t_5; - - - for (__pyx_t_6 = 0; __pyx_t_6 < 4; __pyx_t_6+=1) { - __pyx_v_k = __pyx_t_6; - - - (__pyx_v_Q[__pyx_v_k]) = 0.0; - } - - - for (__pyx_t_6 = 0; __pyx_t_6 < 2; __pyx_t_6+=1) { - __pyx_v_k = __pyx_t_6; - - - (__pyx_v_s[__pyx_v_k]) = 0.0; - } - - - __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_init((&__pyx_v_it), __pyx_v_d, __pyx_v_ipoint); - - - while (1) { - __pyx_t_7 = (__pyx_v_it.index != -1); - if (!__pyx_t_7) break; - - - __pyx_v_ex = ((__pyx_v_d->points[((2 * __pyx_v_it.vertex2) + 0)]) - (__pyx_v_d->points[((2 * __pyx_v_it.vertex) + 0)])); - - - __pyx_v_ey = ((__pyx_v_d->points[((2 * __pyx_v_it.vertex2) + 1)]) - (__pyx_v_d->points[((2 * __pyx_v_it.vertex) + 1)])); - - - __pyx_v_L = sqrt((pow(__pyx_v_ex, 2.0) + pow(__pyx_v_ey, 2.0))); - - - __pyx_v_L3 = ((__pyx_v_L * __pyx_v_L) * __pyx_v_L); - - - __pyx_v_f1 = (__pyx_v_data[__pyx_v_it.vertex]); - - - __pyx_v_f2 = (__pyx_v_data[__pyx_v_it.vertex2]); - - - __pyx_v_df2 = (((-__pyx_v_ex) * (__pyx_v_y[((__pyx_v_it.vertex2 * 2) + 0)])) - (__pyx_v_ey * (__pyx_v_y[((__pyx_v_it.vertex2 * 2) + 1)]))); - - - __pyx_t_1 = 0; - (__pyx_v_Q[__pyx_t_1]) = ((__pyx_v_Q[__pyx_t_1]) + (((4.0 * __pyx_v_ex) * __pyx_v_ex) / __pyx_v_L3)); - - - __pyx_t_1 = 1; - (__pyx_v_Q[__pyx_t_1]) = ((__pyx_v_Q[__pyx_t_1]) + (((4.0 * __pyx_v_ex) * __pyx_v_ey) / __pyx_v_L3)); - - - __pyx_t_1 = 3; - (__pyx_v_Q[__pyx_t_1]) = ((__pyx_v_Q[__pyx_t_1]) + (((4.0 * __pyx_v_ey) * __pyx_v_ey) / __pyx_v_L3)); - - - __pyx_t_1 = 0; - (__pyx_v_s[__pyx_t_1]) = ((__pyx_v_s[__pyx_t_1]) + ((((6.0 * (__pyx_v_f1 - __pyx_v_f2)) - (2.0 * __pyx_v_df2)) * __pyx_v_ex) / __pyx_v_L3)); - - - __pyx_t_1 = 1; - (__pyx_v_s[__pyx_t_1]) = ((__pyx_v_s[__pyx_t_1]) + ((((6.0 * (__pyx_v_f1 - __pyx_v_f2)) - (2.0 * __pyx_v_df2)) * __pyx_v_ey) / __pyx_v_L3)); - - - __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_next((&__pyx_v_it)); - } - - - (__pyx_v_Q[2]) = (__pyx_v_Q[1]); - - - __pyx_v_det = (((__pyx_v_Q[0]) * (__pyx_v_Q[3])) - ((__pyx_v_Q[1]) * (__pyx_v_Q[2]))); - - - (__pyx_v_r[0]) = ((((__pyx_v_Q[3]) * (__pyx_v_s[0])) - ((__pyx_v_Q[1]) * (__pyx_v_s[1]))) / __pyx_v_det); - - - (__pyx_v_r[1]) = ((((-(__pyx_v_Q[2])) * (__pyx_v_s[0])) + ((__pyx_v_Q[0]) * (__pyx_v_s[1]))) / __pyx_v_det); - - - __pyx_t_8 = fabs(((__pyx_v_y[((__pyx_v_it.vertex * 2) + 1)]) + (__pyx_v_r[1]))); - - - __pyx_t_9 = fabs(((__pyx_v_y[((__pyx_v_it.vertex * 2) + 0)]) + (__pyx_v_r[0]))); - - - if ((__pyx_t_8 > __pyx_t_9)) { - __pyx_t_10 = __pyx_t_8; - } else { - __pyx_t_10 = __pyx_t_9; - } - __pyx_v_change = __pyx_t_10; - - - (__pyx_v_y[((__pyx_v_it.vertex * 2) + 0)]) = (-(__pyx_v_r[0])); - - - (__pyx_v_y[((__pyx_v_it.vertex * 2) + 1)]) = (-(__pyx_v_r[1])); - - - __pyx_t_10 = fabs((__pyx_v_r[1])); - __pyx_t_8 = fabs((__pyx_v_r[0])); - if ((__pyx_t_10 > __pyx_t_8)) { - __pyx_t_9 = __pyx_t_10; - } else { - __pyx_t_9 = __pyx_t_8; - } - __pyx_t_10 = __pyx_t_9; - __pyx_t_9 = 1.0; - if ((__pyx_t_10 > __pyx_t_9)) { - __pyx_t_8 = __pyx_t_10; - } else { - __pyx_t_8 = __pyx_t_9; - } - __pyx_v_change = (__pyx_v_change / __pyx_t_8); - - - __pyx_t_8 = __pyx_v_change; - __pyx_t_10 = __pyx_v_err; - if ((__pyx_t_8 > __pyx_t_10)) { - __pyx_t_9 = __pyx_t_8; - } else { - __pyx_t_9 = __pyx_t_10; - } - __pyx_v_err = __pyx_t_9; - } - - - __pyx_t_7 = (__pyx_v_err < __pyx_v_tol); - if (__pyx_t_7) { - - - __pyx_r = (__pyx_v_iiter + 1); - goto __pyx_L0; - goto __pyx_L15; - } - __pyx_L15:; - } - - - __pyx_r = 0; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_1estimate_gradients_2d_global(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_8interpnd_1estimate_gradients_2d_global = {__Pyx_NAMESTR("estimate_gradients_2d_global"), (PyCFunction)__pyx_pf_8interpnd_1estimate_gradients_2d_global, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_8interpnd_1estimate_gradients_2d_global(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_tri = 0; - PyObject *__pyx_v_y = 0; - PyObject *__pyx_v_maxiter = 0; - PyObject *__pyx_v_tol = 0; - PyArrayObject *__pyx_v_data = 0; - PyArrayObject *__pyx_v_grad = 0; - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_v_info; - int __pyx_v_k; - int __pyx_v_ret; - int __pyx_v_nvalues; - PyObject *__pyx_v_rg = NULL; - PyObject *__pyx_v_ig = NULL; - PyObject *__pyx_v_r = NULL; - PyObject *__pyx_v_y_shape = NULL; - PyObject *__pyx_v_yi = NULL; - Py_buffer __pyx_bstruct_grad; - Py_ssize_t __pyx_bstride_0_grad = 0; - Py_ssize_t __pyx_bstride_1_grad = 0; - Py_ssize_t __pyx_bstride_2_grad = 0; - Py_ssize_t __pyx_bshape_0_grad = 0; - Py_ssize_t __pyx_bshape_1_grad = 0; - Py_ssize_t __pyx_bshape_2_grad = 0; - Py_buffer __pyx_bstruct_data; - Py_ssize_t __pyx_bstride_0_data = 0; - Py_ssize_t __pyx_bstride_1_data = 0; - Py_ssize_t __pyx_bshape_0_data = 0; - Py_ssize_t __pyx_bshape_1_data = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyArrayObject *__pyx_t_11 = NULL; - int __pyx_t_12; - int __pyx_t_13; - double __pyx_t_14; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__tri,&__pyx_n_s__y,&__pyx_n_s__maxiter,&__pyx_n_s__tol,0}; - __Pyx_RefNannySetupContext("estimate_gradients_2d_global"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[4] = {0,0,0,0}; - values[2] = ((PyObject *)__pyx_int_400); - values[3] = __pyx_k_18; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__tri); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__y); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("estimate_gradients_2d_global", 0, 2, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 477; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__maxiter); - if (value) { values[2] = value; kw_args--; } - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__tol); - if (value) { values[3] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "estimate_gradients_2d_global") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 477; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_tri = values[0]; - __pyx_v_y = values[1]; - __pyx_v_maxiter = values[2]; - __pyx_v_tol = values[3]; - } else { - __pyx_v_maxiter = ((PyObject *)__pyx_int_400); - __pyx_v_tol = __pyx_k_18; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: - __pyx_v_tol = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: - __pyx_v_maxiter = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_y = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_tri = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("estimate_gradients_2d_global", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 477; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.estimate_gradients_2d_global", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_y); - __pyx_bstruct_data.buf = NULL; - __pyx_bstruct_grad.buf = NULL; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__asanyarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_y); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_y); - __Pyx_GIVEREF(__pyx_v_y); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_y); - __pyx_v_y = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_v_tri, __pyx_n_s__npoints); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_NE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { - - - __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_20), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__issubdtype); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__complexfloating); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_2 = 0; - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_4) { - - - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s_21); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__real); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_tri); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_tri); - __Pyx_GIVEREF(__pyx_v_tri); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__maxiter), __pyx_v_maxiter) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__tol), __pyx_v_tol) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = PyEval_CallObjectWithKeywords(__pyx_t_5, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_v_rg = __pyx_t_2; - __pyx_t_2 = 0; - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s_21); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__imag); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_tri); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_tri); - __Pyx_GIVEREF(__pyx_v_tri); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__maxiter), __pyx_v_maxiter) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__tol), __pyx_v_tol) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_v_ig = __pyx_t_5; - __pyx_t_5 = 0; - - - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 491; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 491; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_GetAttr(__pyx_v_rg, __pyx_n_s__shape); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 491; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 491; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 491; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)((PyObject*)(&PyComplex_Type)))) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 491; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 491; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_v_r = __pyx_t_2; - __pyx_t_2 = 0; - - - if (PyObject_SetAttr(__pyx_v_r, __pyx_n_s__real, __pyx_v_rg) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - if (PyObject_SetAttr(__pyx_v_r, __pyx_n_s__imag, __pyx_v_ig) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 493; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_r); - __pyx_r = __pyx_v_r; - goto __pyx_L0; - goto __pyx_L7; - } - __pyx_L7:; - - - __pyx_t_2 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_y_shape = __pyx_t_2; - __pyx_t_2 = 0; - - - __pyx_t_2 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_RichCompare(__pyx_t_2, __pyx_int_1, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_4) { - - - __pyx_t_5 = PyObject_GetItem(__pyx_v_y, ((PyObject *)__pyx_k_tuple_23)); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 499; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_v_y); - __pyx_v_y = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L8; - } - __pyx_L8:; - - - __pyx_t_5 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__reshape); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyObject_GetAttr(__pyx_v_tri, __pyx_n_s__npoints); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_INCREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__T); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_y); - __pyx_v_y = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_y); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_y); - __Pyx_GIVEREF(__pyx_v_y); - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__astype); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__double); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_v_y); - __pyx_v_y = __pyx_t_2; - __pyx_t_2 = 0; - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_v_y, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_INCREF(__pyx_int_2); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_v_yi = __pyx_t_2; - __pyx_t_2 = 0; - - - if (!(likely(((__pyx_v_y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_y, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = ((PyArrayObject *)__pyx_v_y); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_data); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_data, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_data, (PyObject*)__pyx_v_data, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); - } - } - __pyx_bstride_0_data = __pyx_bstruct_data.strides[0]; __pyx_bstride_1_data = __pyx_bstruct_data.strides[1]; - __pyx_bshape_0_data = __pyx_bstruct_data.shape[0]; __pyx_bshape_1_data = __pyx_bstruct_data.shape[1]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_6 = 0; - __Pyx_INCREF(__pyx_v_y); - __pyx_v_data = ((PyArrayObject *)__pyx_v_y); - - - if (!(likely(((__pyx_v_yi) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_yi, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_11 = ((PyArrayObject *)__pyx_v_yi); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_grad); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_grad, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_grad, (PyObject*)__pyx_v_grad, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); - } - } - __pyx_bstride_0_grad = __pyx_bstruct_grad.strides[0]; __pyx_bstride_1_grad = __pyx_bstruct_grad.strides[1]; __pyx_bstride_2_grad = __pyx_bstruct_grad.strides[2]; - __pyx_bshape_0_grad = __pyx_bstruct_grad.shape[0]; __pyx_bshape_1_grad = __pyx_bstruct_grad.shape[1]; __pyx_bshape_2_grad = __pyx_bstruct_grad.shape[2]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_11 = 0; - __Pyx_INCREF(__pyx_v_yi); - __pyx_v_grad = ((PyArrayObject *)__pyx_v_yi); - - - __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info((&__pyx_v_info), __pyx_v_tri, 0, 1); - - - __pyx_v_nvalues = (__pyx_v_data->dimensions[0]); - - - __pyx_t_7 = __pyx_v_nvalues; - for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_7; __pyx_t_12+=1) { - __pyx_v_k = __pyx_t_12; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_13 = __Pyx_PyInt_AsInt(__pyx_v_maxiter); if (unlikely((__pyx_t_13 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L14;} - - - __pyx_t_14 = __pyx_PyFloat_AsDouble(__pyx_v_tol); if (unlikely((__pyx_t_14 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L14;} - - - __pyx_v_ret = __pyx_f_8interpnd__estimate_gradients_2d_global((&__pyx_v_info), (((double *)__pyx_v_data->data) + (__pyx_v_info.npoints * __pyx_v_k)), __pyx_t_13, __pyx_t_14, (((double *)__pyx_v_grad->data) + ((2 * __pyx_v_info.npoints) * __pyx_v_k))); - } - - - { - int __pyx_why; - __pyx_why = 0; goto __pyx_L15; - __pyx_L14: __pyx_why = 4; goto __pyx_L15; - __pyx_L15:; - Py_BLOCK_THREADS - switch (__pyx_why) { - case 4: goto __pyx_L1_error; - } - } - } - - - __pyx_t_4 = (__pyx_v_ret == 0); - if (__pyx_t_4) { - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__warnings); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__warn); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s_25); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_24)); - PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_kp_s_24)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_24)); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - goto __pyx_L17; - } - __pyx_L17:; - } - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyObject_GetAttr(__pyx_v_yi, __pyx_n_s__transpose); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_26), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Add(__pyx_v_y_shape, ((PyObject *)__pyx_k_tuple_27)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_grad); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_data); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("interpnd.estimate_gradients_2d_global", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_grad); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_data); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_data); - __Pyx_XDECREF((PyObject *)__pyx_v_grad); - __Pyx_XDECREF(__pyx_v_rg); - __Pyx_XDECREF(__pyx_v_ig); - __Pyx_XDECREF(__pyx_v_r); - __Pyx_XDECREF(__pyx_v_y_shape); - __Pyx_XDECREF(__pyx_v_yi); - __Pyx_XDECREF(__pyx_v_y); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static double __pyx_f_8interpnd__clough_tocher_2d_single_double(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, int __pyx_v_isimplex, double *__pyx_v_b, double *__pyx_v_f, double *__pyx_v_df) { - double __pyx_v_c3000; - double __pyx_v_c0300; - double __pyx_v_c0030; - double __pyx_v_c0003; - double __pyx_v_c2100; - double __pyx_v_c2010; - double __pyx_v_c2001; - double __pyx_v_c0210; - double __pyx_v_c0201; - double __pyx_v_c0021; - double __pyx_v_c1200; - double __pyx_v_c1020; - double __pyx_v_c1002; - double __pyx_v_c0120; - double __pyx_v_c0102; - double __pyx_v_c0012; - double __pyx_v_c1101; - double __pyx_v_c1011; - double __pyx_v_c0111; - double __pyx_v_f1; - double __pyx_v_f2; - double __pyx_v_f3; - double __pyx_v_df12; - double __pyx_v_df13; - double __pyx_v_df21; - double __pyx_v_df23; - double __pyx_v_df31; - double __pyx_v_df32; - double __pyx_v_g1; - double __pyx_v_g2; - double __pyx_v_g3; - double __pyx_v_e12x; - double __pyx_v_e12y; - double __pyx_v_e23x; - double __pyx_v_e23y; - double __pyx_v_e31x; - double __pyx_v_e31y; - double __pyx_v_e14x; - double __pyx_v_e14y; - double __pyx_v_e24x; - double __pyx_v_e24y; - double __pyx_v_e34x; - double __pyx_v_e34y; - double __pyx_v_w; - double __pyx_v_minval; - double __pyx_v_b1; - double __pyx_v_b2; - double __pyx_v_b3; - double __pyx_v_b4; - int __pyx_v_k; - int __pyx_v_itri; - double __pyx_v_c[3]; - double __pyx_v_y[2]; - double __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - - - __pyx_v_e12x = ((__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 1)])))]) - (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 0)])))])); - - - __pyx_v_e12y = ((__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 1)])))]) - (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 0)])))])); - - - __pyx_v_e23x = ((__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 2)])))]) - (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 1)])))])); - - - __pyx_v_e23y = ((__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 2)])))]) - (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 1)])))])); - - - __pyx_v_e31x = ((__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 0)])))]) - (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 2)])))])); - - - __pyx_v_e31y = ((__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 0)])))]) - (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 2)])))])); - - - __pyx_v_e14x = ((__pyx_v_e12x - __pyx_v_e31x) / 3.0); - - - __pyx_v_e14y = ((__pyx_v_e12y - __pyx_v_e31y) / 3.0); - - - __pyx_v_e24x = (((-__pyx_v_e12x) + __pyx_v_e23x) / 3.0); - - - __pyx_v_e24y = (((-__pyx_v_e12y) + __pyx_v_e23y) / 3.0); - - - __pyx_v_e34x = ((__pyx_v_e31x - __pyx_v_e23x) / 3.0); - - - __pyx_v_e34y = ((__pyx_v_e31y - __pyx_v_e23y) / 3.0); - - - __pyx_v_f1 = (__pyx_v_f[0]); - - - __pyx_v_f2 = (__pyx_v_f[1]); - - - __pyx_v_f3 = (__pyx_v_f[2]); - - - __pyx_v_df12 = (((__pyx_v_df[0]) * __pyx_v_e12x) + ((__pyx_v_df[1]) * __pyx_v_e12y)); - - - __pyx_v_df21 = (-(((__pyx_v_df[2]) * __pyx_v_e12x) + ((__pyx_v_df[3]) * __pyx_v_e12y))); - - - __pyx_v_df23 = (((__pyx_v_df[2]) * __pyx_v_e23x) + ((__pyx_v_df[3]) * __pyx_v_e23y)); - - - __pyx_v_df32 = (-(((__pyx_v_df[4]) * __pyx_v_e23x) + ((__pyx_v_df[5]) * __pyx_v_e23y))); - - - __pyx_v_df31 = (((__pyx_v_df[4]) * __pyx_v_e31x) + ((__pyx_v_df[5]) * __pyx_v_e31y)); - - - __pyx_v_df13 = (-(((__pyx_v_df[0]) * __pyx_v_e31x) + ((__pyx_v_df[1]) * __pyx_v_e31y))); - - - __pyx_v_c3000 = __pyx_v_f1; - - - __pyx_v_c2100 = ((__pyx_v_df12 + (3.0 * __pyx_v_c3000)) / 3.0); - - - __pyx_v_c2010 = ((__pyx_v_df13 + (3.0 * __pyx_v_c3000)) / 3.0); - - - __pyx_v_c0300 = __pyx_v_f2; - - - __pyx_v_c1200 = ((__pyx_v_df21 + (3.0 * __pyx_v_c0300)) / 3.0); - - - __pyx_v_c0210 = ((__pyx_v_df23 + (3.0 * __pyx_v_c0300)) / 3.0); - - - __pyx_v_c0030 = __pyx_v_f3; - - - __pyx_v_c1020 = ((__pyx_v_df31 + (3.0 * __pyx_v_c0030)) / 3.0); - - - __pyx_v_c0120 = ((__pyx_v_df32 + (3.0 * __pyx_v_c0030)) / 3.0); - - - __pyx_v_c2001 = (((__pyx_v_c2100 + __pyx_v_c2010) + __pyx_v_c3000) / 3.0); - - - __pyx_v_c0201 = (((__pyx_v_c1200 + __pyx_v_c0300) + __pyx_v_c0210) / 3.0); - - - __pyx_v_c0021 = (((__pyx_v_c1020 + __pyx_v_c0120) + __pyx_v_c0030) / 3.0); - - - for (__pyx_t_1 = 0; __pyx_t_1 < 3; __pyx_t_1+=1) { - __pyx_v_k = __pyx_t_1; - - - __pyx_v_itri = (__pyx_v_d->neighbors[((3 * __pyx_v_isimplex) + __pyx_v_k)]); - - - __pyx_t_2 = (__pyx_v_itri == -1); - if (__pyx_t_2) { - - - switch (__pyx_v_k) { - - - case 0: - - - __pyx_v_g1 = (-2. / 3.0); - break; - - - case 1: - - - __pyx_v_g2 = (-2. / 3.0); - break; - - - case 2: - - - __pyx_v_g3 = (-2. / 3.0); - break; - } - - - goto __pyx_L3_continue; - goto __pyx_L5; - } - __pyx_L5:; - - - (__pyx_v_y[0]) = ((((__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 0)])))]) + (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 1)])))])) + (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 2)])))])) / 3.0); - - - (__pyx_v_y[1]) = ((((__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 0)])))]) + (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 1)])))])) + (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 2)])))])) / 3.0); - - - __pyx_f_5scipy_7spatial_5qhull__barycentric_coordinates(2, (__pyx_v_d->transform + ((__pyx_v_isimplex * 2) * 3)), __pyx_v_y, __pyx_v_c); - - - switch (__pyx_v_k) { - - - case 0: - - - __pyx_v_g1 = ((((2.0 * (__pyx_v_c[2])) + (__pyx_v_c[1])) - 1.0) / ((2.0 - (3.0 * (__pyx_v_c[2]))) - (3.0 * (__pyx_v_c[1])))); - break; - - - case 1: - - - __pyx_v_g2 = ((((2.0 * (__pyx_v_c[0])) + (__pyx_v_c[2])) - 1.0) / ((2.0 - (3.0 * (__pyx_v_c[0]))) - (3.0 * (__pyx_v_c[2])))); - break; - - - case 2: - - - __pyx_v_g3 = ((((2.0 * (__pyx_v_c[1])) + (__pyx_v_c[0])) - 1.0) / ((2.0 - (3.0 * (__pyx_v_c[1]))) - (3.0 * (__pyx_v_c[0])))); - break; - } - __pyx_L3_continue:; - } - - - __pyx_v_c0111 = (((__pyx_v_g1 * ((((-__pyx_v_c0300) + (3.0 * __pyx_v_c0210)) - (3.0 * __pyx_v_c0120)) + __pyx_v_c0030)) + (((((-__pyx_v_c0300) + (2.0 * __pyx_v_c0210)) - __pyx_v_c0120) + __pyx_v_c0021) + __pyx_v_c0201)) / 2.0); - - - __pyx_v_c1011 = (((__pyx_v_g2 * ((((-__pyx_v_c0030) + (3.0 * __pyx_v_c1020)) - (3.0 * __pyx_v_c2010)) + __pyx_v_c3000)) + (((((-__pyx_v_c0030) + (2.0 * __pyx_v_c1020)) - __pyx_v_c2010) + __pyx_v_c2001) + __pyx_v_c0021)) / 2.0); - - - __pyx_v_c1101 = (((__pyx_v_g3 * ((((-__pyx_v_c3000) + (3.0 * __pyx_v_c2100)) - (3.0 * __pyx_v_c1200)) + __pyx_v_c0300)) + (((((-__pyx_v_c3000) + (2.0 * __pyx_v_c2100)) - __pyx_v_c1200) + __pyx_v_c2001) + __pyx_v_c0201)) / 2.0); - - - __pyx_v_c1002 = (((__pyx_v_c1101 + __pyx_v_c1011) + __pyx_v_c2001) / 3.0); - - - __pyx_v_c0102 = (((__pyx_v_c1101 + __pyx_v_c0111) + __pyx_v_c0201) / 3.0); - - - __pyx_v_c0012 = (((__pyx_v_c1011 + __pyx_v_c0111) + __pyx_v_c0021) / 3.0); - - - __pyx_v_c0003 = (((__pyx_v_c1002 + __pyx_v_c0102) + __pyx_v_c0012) / 3.0); - - - __pyx_v_minval = (__pyx_v_b[0]); - - - for (__pyx_t_1 = 0; __pyx_t_1 < 3; __pyx_t_1+=1) { - __pyx_v_k = __pyx_t_1; - - - __pyx_t_2 = ((__pyx_v_b[__pyx_v_k]) < __pyx_v_minval); - if (__pyx_t_2) { - - - __pyx_v_minval = (__pyx_v_b[__pyx_v_k]); - goto __pyx_L8; - } - __pyx_L8:; - } - - - __pyx_v_b1 = ((__pyx_v_b[0]) - __pyx_v_minval); - - - __pyx_v_b2 = ((__pyx_v_b[1]) - __pyx_v_minval); - - - __pyx_v_b3 = ((__pyx_v_b[2]) - __pyx_v_minval); - - - __pyx_v_b4 = (3.0 * __pyx_v_minval); - - - __pyx_v_w = (((((((((((((((((((pow(__pyx_v_b1, 3.0) * __pyx_v_c3000) + (((3.0 * pow(__pyx_v_b1, 2.0)) * __pyx_v_b2) * __pyx_v_c2100)) + (((3.0 * pow(__pyx_v_b1, 2.0)) * __pyx_v_b3) * __pyx_v_c2010)) + (((3.0 * pow(__pyx_v_b1, 2.0)) * __pyx_v_b4) * __pyx_v_c2001)) + (((3.0 * __pyx_v_b1) * pow(__pyx_v_b2, 2.0)) * __pyx_v_c1200)) + ((((6.0 * __pyx_v_b1) * __pyx_v_b2) * __pyx_v_b4) * __pyx_v_c1101)) + (((3.0 * __pyx_v_b1) * pow(__pyx_v_b3, 2.0)) * __pyx_v_c1020)) + ((((6.0 * __pyx_v_b1) * __pyx_v_b3) * __pyx_v_b4) * __pyx_v_c1011)) + (((3.0 * __pyx_v_b1) * pow(__pyx_v_b4, 2.0)) * __pyx_v_c1002)) + (pow(__pyx_v_b2, 3.0) * __pyx_v_c0300)) + (((3.0 * pow(__pyx_v_b2, 2.0)) * __pyx_v_b3) * __pyx_v_c0210)) + (((3.0 * pow(__pyx_v_b2, 2.0)) * __pyx_v_b4) * __pyx_v_c0201)) + (((3.0 * __pyx_v_b2) * pow(__pyx_v_b3, 2.0)) * __pyx_v_c0120)) + ((((6.0 * __pyx_v_b2) * __pyx_v_b3) * __pyx_v_b4) * __pyx_v_c0111)) + (((3.0 * __pyx_v_b2) * pow(__pyx_v_b4, 2.0)) * __pyx_v_c0102)) + (pow(__pyx_v_b3, 3.0) * __pyx_v_c0030)) + (((3.0 * pow(__pyx_v_b3, 2.0)) * __pyx_v_b4) * __pyx_v_c0021)) + (((3.0 * __pyx_v_b3) * pow(__pyx_v_b4, 2.0)) * __pyx_v_c0012)) + (pow(__pyx_v_b4, 3.0) * __pyx_v_c0003)); - - - __pyx_r = __pyx_v_w; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - - - -static __pyx_t_double_complex __pyx_f_8interpnd__clough_tocher_2d_single_complex(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, int __pyx_v_isimplex, double *__pyx_v_b, __pyx_t_double_complex *__pyx_v_f, __pyx_t_double_complex *__pyx_v_df) { - __pyx_t_double_complex __pyx_v_c3000; - __pyx_t_double_complex __pyx_v_c0300; - __pyx_t_double_complex __pyx_v_c0030; - __pyx_t_double_complex __pyx_v_c0003; - __pyx_t_double_complex __pyx_v_c2100; - __pyx_t_double_complex __pyx_v_c2010; - __pyx_t_double_complex __pyx_v_c2001; - __pyx_t_double_complex __pyx_v_c0210; - __pyx_t_double_complex __pyx_v_c0201; - __pyx_t_double_complex __pyx_v_c0021; - __pyx_t_double_complex __pyx_v_c1200; - __pyx_t_double_complex __pyx_v_c1020; - __pyx_t_double_complex __pyx_v_c1002; - __pyx_t_double_complex __pyx_v_c0120; - __pyx_t_double_complex __pyx_v_c0102; - __pyx_t_double_complex __pyx_v_c0012; - __pyx_t_double_complex __pyx_v_c1101; - __pyx_t_double_complex __pyx_v_c1011; - __pyx_t_double_complex __pyx_v_c0111; - __pyx_t_double_complex __pyx_v_f1; - __pyx_t_double_complex __pyx_v_f2; - __pyx_t_double_complex __pyx_v_f3; - __pyx_t_double_complex __pyx_v_df12; - __pyx_t_double_complex __pyx_v_df13; - __pyx_t_double_complex __pyx_v_df21; - __pyx_t_double_complex __pyx_v_df23; - __pyx_t_double_complex __pyx_v_df31; - __pyx_t_double_complex __pyx_v_df32; - double __pyx_v_g1; - double __pyx_v_g2; - double __pyx_v_g3; - double __pyx_v_e12x; - double __pyx_v_e12y; - double __pyx_v_e23x; - double __pyx_v_e23y; - double __pyx_v_e31x; - double __pyx_v_e31y; - double __pyx_v_e14x; - double __pyx_v_e14y; - double __pyx_v_e24x; - double __pyx_v_e24y; - double __pyx_v_e34x; - double __pyx_v_e34y; - __pyx_t_double_complex __pyx_v_w; - double __pyx_v_minval; - double __pyx_v_b1; - double __pyx_v_b2; - double __pyx_v_b3; - double __pyx_v_b4; - int __pyx_v_k; - int __pyx_v_itri; - double __pyx_v_c[3]; - double __pyx_v_y[2]; - __pyx_t_double_complex __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - - - __pyx_v_e12x = ((__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 1)])))]) - (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 0)])))])); - - - __pyx_v_e12y = ((__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 1)])))]) - (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 0)])))])); - - - __pyx_v_e23x = ((__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 2)])))]) - (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 1)])))])); - - - __pyx_v_e23y = ((__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 2)])))]) - (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 1)])))])); - - - __pyx_v_e31x = ((__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 0)])))]) - (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 2)])))])); - - - __pyx_v_e31y = ((__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 0)])))]) - (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_isimplex) + 2)])))])); - - - __pyx_v_e14x = ((__pyx_v_e12x - __pyx_v_e31x) / 3.0); - - - __pyx_v_e14y = ((__pyx_v_e12y - __pyx_v_e31y) / 3.0); - - - __pyx_v_e24x = (((-__pyx_v_e12x) + __pyx_v_e23x) / 3.0); - - - __pyx_v_e24y = (((-__pyx_v_e12y) + __pyx_v_e23y) / 3.0); - - - __pyx_v_e34x = ((__pyx_v_e31x - __pyx_v_e23x) / 3.0); - - - __pyx_v_e34y = ((__pyx_v_e31y - __pyx_v_e23y) / 3.0); - - - __pyx_v_f1 = (__pyx_v_f[0]); - - - __pyx_v_f2 = (__pyx_v_f[1]); - - - __pyx_v_f3 = (__pyx_v_f[2]); - - - __pyx_v_df12 = __Pyx_c_sum(__Pyx_c_prod((__pyx_v_df[0]), __pyx_t_double_complex_from_parts(__pyx_v_e12x, 0)), __Pyx_c_prod((__pyx_v_df[1]), __pyx_t_double_complex_from_parts(__pyx_v_e12y, 0))); - - - __pyx_v_df21 = __Pyx_c_neg(__Pyx_c_sum(__Pyx_c_prod((__pyx_v_df[2]), __pyx_t_double_complex_from_parts(__pyx_v_e12x, 0)), __Pyx_c_prod((__pyx_v_df[3]), __pyx_t_double_complex_from_parts(__pyx_v_e12y, 0)))); - - - __pyx_v_df23 = __Pyx_c_sum(__Pyx_c_prod((__pyx_v_df[2]), __pyx_t_double_complex_from_parts(__pyx_v_e23x, 0)), __Pyx_c_prod((__pyx_v_df[3]), __pyx_t_double_complex_from_parts(__pyx_v_e23y, 0))); - - - __pyx_v_df32 = __Pyx_c_neg(__Pyx_c_sum(__Pyx_c_prod((__pyx_v_df[4]), __pyx_t_double_complex_from_parts(__pyx_v_e23x, 0)), __Pyx_c_prod((__pyx_v_df[5]), __pyx_t_double_complex_from_parts(__pyx_v_e23y, 0)))); - - - __pyx_v_df31 = __Pyx_c_sum(__Pyx_c_prod((__pyx_v_df[4]), __pyx_t_double_complex_from_parts(__pyx_v_e31x, 0)), __Pyx_c_prod((__pyx_v_df[5]), __pyx_t_double_complex_from_parts(__pyx_v_e31y, 0))); - - - __pyx_v_df13 = __Pyx_c_neg(__Pyx_c_sum(__Pyx_c_prod((__pyx_v_df[0]), __pyx_t_double_complex_from_parts(__pyx_v_e31x, 0)), __Pyx_c_prod((__pyx_v_df[1]), __pyx_t_double_complex_from_parts(__pyx_v_e31y, 0)))); - - - __pyx_v_c3000 = __pyx_v_f1; - - - __pyx_v_c2100 = __Pyx_c_quot(__Pyx_c_sum(__pyx_v_df12, __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c3000)), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c2010 = __Pyx_c_quot(__Pyx_c_sum(__pyx_v_df13, __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c3000)), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0300 = __pyx_v_f2; - - - __pyx_v_c1200 = __Pyx_c_quot(__Pyx_c_sum(__pyx_v_df21, __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c0300)), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0210 = __Pyx_c_quot(__Pyx_c_sum(__pyx_v_df23, __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c0300)), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0030 = __pyx_v_f3; - - - __pyx_v_c1020 = __Pyx_c_quot(__Pyx_c_sum(__pyx_v_df31, __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c0030)), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0120 = __Pyx_c_quot(__Pyx_c_sum(__pyx_v_df32, __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c0030)), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c2001 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_sum(__pyx_v_c2100, __pyx_v_c2010), __pyx_v_c3000), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0201 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_sum(__pyx_v_c1200, __pyx_v_c0300), __pyx_v_c0210), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0021 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_sum(__pyx_v_c1020, __pyx_v_c0120), __pyx_v_c0030), __pyx_t_double_complex_from_parts(3, 0)); - - - for (__pyx_t_1 = 0; __pyx_t_1 < 3; __pyx_t_1+=1) { - __pyx_v_k = __pyx_t_1; - - - __pyx_v_itri = (__pyx_v_d->neighbors[((3 * __pyx_v_isimplex) + __pyx_v_k)]); - - - __pyx_t_2 = (__pyx_v_itri == -1); - if (__pyx_t_2) { - - - switch (__pyx_v_k) { - - - case 0: - - - __pyx_v_g1 = (-2. / 3.0); - break; - - - case 1: - - - __pyx_v_g2 = (-2. / 3.0); - break; - - - case 2: - - - __pyx_v_g3 = (-2. / 3.0); - break; - } - - - goto __pyx_L3_continue; - goto __pyx_L5; - } - __pyx_L5:; - - - (__pyx_v_y[0]) = ((((__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 0)])))]) + (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 1)])))])) + (__pyx_v_d->points[(0 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 2)])))])) / 3.0); - - - (__pyx_v_y[1]) = ((((__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 0)])))]) + (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 1)])))])) + (__pyx_v_d->points[(1 + (2 * (__pyx_v_d->vertices[((3 * __pyx_v_itri) + 2)])))])) / 3.0); - - - __pyx_f_5scipy_7spatial_5qhull__barycentric_coordinates(2, (__pyx_v_d->transform + ((__pyx_v_isimplex * 2) * 3)), __pyx_v_y, __pyx_v_c); - - - switch (__pyx_v_k) { - - - case 0: - - - __pyx_v_g1 = ((((2.0 * (__pyx_v_c[2])) + (__pyx_v_c[1])) - 1.0) / ((2.0 - (3.0 * (__pyx_v_c[2]))) - (3.0 * (__pyx_v_c[1])))); - break; - - - case 1: - - - __pyx_v_g2 = ((((2.0 * (__pyx_v_c[0])) + (__pyx_v_c[2])) - 1.0) / ((2.0 - (3.0 * (__pyx_v_c[0]))) - (3.0 * (__pyx_v_c[2])))); - break; - - - case 2: - - - __pyx_v_g3 = ((((2.0 * (__pyx_v_c[1])) + (__pyx_v_c[0])) - 1.0) / ((2.0 - (3.0 * (__pyx_v_c[1]))) - (3.0 * (__pyx_v_c[0])))); - break; - } - __pyx_L3_continue:; - } - - - __pyx_v_c0111 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_prod(__pyx_t_double_complex_from_parts(__pyx_v_g1, 0), __Pyx_c_sum(__Pyx_c_diff(__Pyx_c_sum(__Pyx_c_neg(__pyx_v_c0300), __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c0210)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c0120)), __pyx_v_c0030)), __Pyx_c_sum(__Pyx_c_sum(__Pyx_c_diff(__Pyx_c_sum(__Pyx_c_neg(__pyx_v_c0300), __Pyx_c_prod(__pyx_t_double_complex_from_parts(2, 0), __pyx_v_c0210)), __pyx_v_c0120), __pyx_v_c0021), __pyx_v_c0201)), __pyx_t_double_complex_from_parts(2, 0)); - - - __pyx_v_c1011 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_prod(__pyx_t_double_complex_from_parts(__pyx_v_g2, 0), __Pyx_c_sum(__Pyx_c_diff(__Pyx_c_sum(__Pyx_c_neg(__pyx_v_c0030), __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c1020)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c2010)), __pyx_v_c3000)), __Pyx_c_sum(__Pyx_c_sum(__Pyx_c_diff(__Pyx_c_sum(__Pyx_c_neg(__pyx_v_c0030), __Pyx_c_prod(__pyx_t_double_complex_from_parts(2, 0), __pyx_v_c1020)), __pyx_v_c2010), __pyx_v_c2001), __pyx_v_c0021)), __pyx_t_double_complex_from_parts(2, 0)); - - - __pyx_v_c1101 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_prod(__pyx_t_double_complex_from_parts(__pyx_v_g3, 0), __Pyx_c_sum(__Pyx_c_diff(__Pyx_c_sum(__Pyx_c_neg(__pyx_v_c3000), __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c2100)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_c1200)), __pyx_v_c0300)), __Pyx_c_sum(__Pyx_c_sum(__Pyx_c_diff(__Pyx_c_sum(__Pyx_c_neg(__pyx_v_c3000), __Pyx_c_prod(__pyx_t_double_complex_from_parts(2, 0), __pyx_v_c2100)), __pyx_v_c1200), __pyx_v_c2001), __pyx_v_c0201)), __pyx_t_double_complex_from_parts(2, 0)); - - - __pyx_v_c1002 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_sum(__pyx_v_c1101, __pyx_v_c1011), __pyx_v_c2001), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0102 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_sum(__pyx_v_c1101, __pyx_v_c0111), __pyx_v_c0201), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0012 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_sum(__pyx_v_c1011, __pyx_v_c0111), __pyx_v_c0021), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_c0003 = __Pyx_c_quot(__Pyx_c_sum(__Pyx_c_sum(__pyx_v_c1002, __pyx_v_c0102), __pyx_v_c0012), __pyx_t_double_complex_from_parts(3, 0)); - - - __pyx_v_minval = (__pyx_v_b[0]); - - - for (__pyx_t_1 = 0; __pyx_t_1 < 3; __pyx_t_1+=1) { - __pyx_v_k = __pyx_t_1; - - - __pyx_t_2 = ((__pyx_v_b[__pyx_v_k]) < __pyx_v_minval); - if (__pyx_t_2) { - - - __pyx_v_minval = (__pyx_v_b[__pyx_v_k]); - goto __pyx_L8; - } - __pyx_L8:; - } - - - __pyx_v_b1 = ((__pyx_v_b[0]) - __pyx_v_minval); - - - __pyx_v_b2 = ((__pyx_v_b[1]) - __pyx_v_minval); - - - __pyx_v_b3 = ((__pyx_v_b[2]) - __pyx_v_minval); - - - __pyx_v_b4 = (3.0 * __pyx_v_minval); - - - __pyx_v_w = __Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_sum(__Pyx_c_prod(__pyx_t_double_complex_from_parts(pow(__pyx_v_b1, 3.0), 0), __pyx_v_c3000), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * pow(__pyx_v_b1, 2.0)) * __pyx_v_b2), 0), __pyx_v_c2100)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * pow(__pyx_v_b1, 2.0)) * __pyx_v_b3), 0), __pyx_v_c2010)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * pow(__pyx_v_b1, 2.0)) * __pyx_v_b4), 0), __pyx_v_c2001)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * __pyx_v_b1) * pow(__pyx_v_b2, 2.0)), 0), __pyx_v_c1200)), __Pyx_c_prod(__pyx_t_double_complex_from_parts((((6.0 * __pyx_v_b1) * __pyx_v_b2) * __pyx_v_b4), 0), __pyx_v_c1101)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * __pyx_v_b1) * pow(__pyx_v_b3, 2.0)), 0), __pyx_v_c1020)), __Pyx_c_prod(__pyx_t_double_complex_from_parts((((6.0 * __pyx_v_b1) * __pyx_v_b3) * __pyx_v_b4), 0), __pyx_v_c1011)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * __pyx_v_b1) * pow(__pyx_v_b4, 2.0)), 0), __pyx_v_c1002)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(pow(__pyx_v_b2, 3.0), 0), __pyx_v_c0300)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * pow(__pyx_v_b2, 2.0)) * __pyx_v_b3), 0), __pyx_v_c0210)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * pow(__pyx_v_b2, 2.0)) * __pyx_v_b4), 0), __pyx_v_c0201)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * __pyx_v_b2) * pow(__pyx_v_b3, 2.0)), 0), __pyx_v_c0120)), __Pyx_c_prod(__pyx_t_double_complex_from_parts((((6.0 * __pyx_v_b2) * __pyx_v_b3) * __pyx_v_b4), 0), __pyx_v_c0111)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * __pyx_v_b2) * pow(__pyx_v_b4, 2.0)), 0), __pyx_v_c0102)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(pow(__pyx_v_b3, 3.0), 0), __pyx_v_c0030)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * pow(__pyx_v_b3, 2.0)) * __pyx_v_b4), 0), __pyx_v_c0021)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(((3.0 * __pyx_v_b3) * pow(__pyx_v_b4, 2.0)), 0), __pyx_v_c0012)), __Pyx_c_prod(__pyx_t_double_complex_from_parts(pow(__pyx_v_b4, 3.0), 0), __pyx_v_c0003)); - - - __pyx_r = __pyx_v_w; - goto __pyx_L0; - - __pyx_r = __pyx_t_double_complex_from_parts(0, 0); - __pyx_L0:; - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_26CloughTocher2DInterpolator___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_8interpnd_26CloughTocher2DInterpolator___init__ = {__Pyx_NAMESTR("__init__"), (PyCFunction)__pyx_pf_8interpnd_26CloughTocher2DInterpolator___init__, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_8interpnd_26CloughTocher2DInterpolator___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_points = 0; - PyObject *__pyx_v_values = 0; - PyObject *__pyx_v_fill_value = 0; - PyObject *__pyx_v_tol = 0; - PyObject *__pyx_v_maxiter = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__points,&__pyx_n_s__values,&__pyx_n_s__fill_value,&__pyx_n_s__tol,&__pyx_n_s__maxiter,0}; - __Pyx_RefNannySetupContext("__init__"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[6] = {0,0,0,0,0,0}; - values[3] = __pyx_k_28; - values[4] = __pyx_k_29; - values[5] = ((PyObject *)__pyx_int_400); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__points); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__values); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fill_value); - if (value) { values[3] = value; kw_args--; } - } - case 4: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__tol); - if (value) { values[4] = value; kw_args--; } - } - case 5: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__maxiter); - if (value) { values[5] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_points = values[1]; - __pyx_v_values = values[2]; - __pyx_v_fill_value = values[3]; - __pyx_v_tol = values[4]; - __pyx_v_maxiter = values[5]; - } else { - __pyx_v_fill_value = __pyx_k_28; - __pyx_v_tol = __pyx_k_29; - __pyx_v_maxiter = ((PyObject *)__pyx_int_400); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 6: - __pyx_v_maxiter = PyTuple_GET_ITEM(__pyx_args, 5); - case 5: - __pyx_v_tol = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: - __pyx_v_fill_value = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: - __pyx_v_values = PyTuple_GET_ITEM(__pyx_args, 2); - __pyx_v_points = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.CloughTocher2DInterpolator.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__NDInterpolatorBase); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s____init__); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_self); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_INCREF(__pyx_v_points); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_points); - __Pyx_GIVEREF(__pyx_v_points); - __Pyx_INCREF(__pyx_v_values); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_values); - __Pyx_GIVEREF(__pyx_v_values); - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__ndim), __pyx_int_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__fill_value), __pyx_v_fill_value) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__qhull); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__Delaunay); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__tri, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s_21); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_1 = 0; - __pyx_t_3 = 0; - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - - - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__tol), __pyx_v_tol) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__maxiter), __pyx_v_maxiter) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_1 = PyEval_CallObjectWithKeywords(__pyx_t_4, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__grad, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("interpnd.CloughTocher2DInterpolator.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_26CloughTocher2DInterpolator_1_evaluate_double(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_8interpnd_26CloughTocher2DInterpolator_1_evaluate_double = {__Pyx_NAMESTR("_evaluate_double"), (PyCFunction)__pyx_pf_8interpnd_26CloughTocher2DInterpolator_1_evaluate_double, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_8interpnd_26CloughTocher2DInterpolator_1_evaluate_double(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyArrayObject *__pyx_v_xi = 0; - PyArrayObject *__pyx_v_values = 0; - PyArrayObject *__pyx_v_grad = 0; - PyArrayObject *__pyx_v_out = 0; - PyArrayObject *__pyx_v_points = 0; - PyArrayObject *__pyx_v_vertices = 0; - double __pyx_v_c[NPY_MAXDIMS]; - double __pyx_v_f[(NPY_MAXDIMS + 1)]; - double __pyx_v_df[((2 * NPY_MAXDIMS) + 2)]; - double __pyx_v_w; - double __pyx_v_fill_value; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_k; - int __pyx_v_ndim; - int __pyx_v_isimplex; - int __pyx_v_start; - int __pyx_v_nvalues; - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_v_info; - PyObject *__pyx_v_eps = NULL; - Py_buffer __pyx_bstruct_xi; - Py_ssize_t __pyx_bstride_0_xi = 0; - Py_ssize_t __pyx_bstride_1_xi = 0; - Py_ssize_t __pyx_bshape_0_xi = 0; - Py_ssize_t __pyx_bshape_1_xi = 0; - Py_buffer __pyx_bstruct_out; - Py_ssize_t __pyx_bstride_0_out = 0; - Py_ssize_t __pyx_bstride_1_out = 0; - Py_ssize_t __pyx_bshape_0_out = 0; - Py_ssize_t __pyx_bshape_1_out = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - Py_buffer __pyx_bstruct_points; - Py_ssize_t __pyx_bstride_0_points = 0; - Py_ssize_t __pyx_bstride_1_points = 0; - Py_ssize_t __pyx_bshape_0_points = 0; - Py_ssize_t __pyx_bshape_1_points = 0; - Py_buffer __pyx_bstruct_values; - Py_ssize_t __pyx_bstride_0_values = 0; - Py_ssize_t __pyx_bstride_1_values = 0; - Py_ssize_t __pyx_bshape_0_values = 0; - Py_ssize_t __pyx_bshape_1_values = 0; - Py_buffer __pyx_bstruct_grad; - Py_ssize_t __pyx_bstride_0_grad = 0; - Py_ssize_t __pyx_bstride_1_grad = 0; - Py_ssize_t __pyx_bstride_2_grad = 0; - Py_ssize_t __pyx_bshape_0_grad = 0; - Py_ssize_t __pyx_bshape_1_grad = 0; - Py_ssize_t __pyx_bshape_2_grad = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyArrayObject *__pyx_t_2 = NULL; - PyArrayObject *__pyx_t_3 = NULL; - PyArrayObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - double __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyArrayObject *__pyx_t_11 = NULL; - int __pyx_t_12; - PyObject *__pyx_t_13 = NULL; - PyObject *__pyx_t_14 = NULL; - PyObject *__pyx_t_15 = NULL; - npy_intp __pyx_t_16; - int __pyx_t_17; - int __pyx_t_18; - int __pyx_t_19; - int __pyx_t_20; - int __pyx_t_21; - long __pyx_t_22; - int __pyx_t_23; - int __pyx_t_24; - int __pyx_t_25; - npy_int __pyx_t_26; - int __pyx_t_27; - int __pyx_t_28; - int __pyx_t_29; - npy_int __pyx_t_30; - int __pyx_t_31; - long __pyx_t_32; - int __pyx_t_33; - int __pyx_t_34; - npy_int __pyx_t_35; - int __pyx_t_36; - long __pyx_t_37; - int __pyx_t_38; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__xi,0}; - __Pyx_RefNannySetupContext("_evaluate_double"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xi); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_evaluate_double", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_evaluate_double") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_xi = ((PyArrayObject *)values[1]); - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_xi = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 1)); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_evaluate_double", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.CloughTocher2DInterpolator._evaluate_double", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_bstruct_values.buf = NULL; - __pyx_bstruct_grad.buf = NULL; - __pyx_bstruct_out.buf = NULL; - __pyx_bstruct_points.buf = NULL; - __pyx_bstruct_vertices.buf = NULL; - __pyx_bstruct_xi.buf = NULL; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xi), __pyx_ptype_5numpy_ndarray, 1, "xi", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_xi, (PyObject*)__pyx_v_xi, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_xi = __pyx_bstruct_xi.strides[0]; __pyx_bstride_1_xi = __pyx_bstruct_xi.strides[1]; - __pyx_bshape_0_xi = __pyx_bstruct_xi.shape[0]; __pyx_bshape_1_xi = __pyx_bstruct_xi.shape[1]; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1055; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1055; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_values, (PyObject*)__pyx_t_2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_values = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_values.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1055; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_values = __pyx_bstruct_values.strides[0]; __pyx_bstride_1_values = __pyx_bstruct_values.strides[1]; - __pyx_bshape_0_values = __pyx_bstruct_values.shape[0]; __pyx_bshape_1_values = __pyx_bstruct_values.shape[1]; - } - } - __pyx_t_2 = 0; - __pyx_v_values = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__grad); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1056; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1056; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_grad, (PyObject*)__pyx_t_3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { - __pyx_v_grad = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_grad.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1056; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_grad = __pyx_bstruct_grad.strides[0]; __pyx_bstride_1_grad = __pyx_bstruct_grad.strides[1]; __pyx_bstride_2_grad = __pyx_bstruct_grad.strides[2]; - __pyx_bshape_0_grad = __pyx_bstruct_grad.shape[0]; __pyx_bshape_1_grad = __pyx_bstruct_grad.shape[1]; __pyx_bshape_2_grad = __pyx_bstruct_grad.shape[2]; - } - } - __pyx_t_3 = 0; - __pyx_v_grad = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_t_4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_points = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_points.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_points = __pyx_bstruct_points.strides[0]; __pyx_bstride_1_points = __pyx_bstruct_points.strides[1]; - __pyx_bshape_0_points = __pyx_bstruct_points.shape[0]; __pyx_bshape_1_points = __pyx_bstruct_points.shape[1]; - } - } - __pyx_t_4 = 0; - __pyx_v_points = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__vertices); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_vertices = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_vertices.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - } - } - __pyx_t_6 = 0; - __pyx_v_vertices = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - - __pyx_v_ndim = (__pyx_v_xi->dimensions[1]); - - - __pyx_v_start = 0; - - - __pyx_t_5 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__fill_value); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1070; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_7 = __pyx_PyFloat_AsDouble(__pyx_t_5); if (unlikely((__pyx_t_7 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1070; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_fill_value = __pyx_t_7; - - - __pyx_t_5 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1072; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info((&__pyx_v_info), __pyx_t_5, 1, 1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_xi->dimensions[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_8 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_9 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__shape); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_9, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_8); - __pyx_t_5 = 0; - __pyx_t_8 = 0; - __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_t_9)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_9)); - __pyx_t_9 = 0; - __pyx_t_9 = PyDict_New(); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_10 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__double); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyDict_SetItem(__pyx_t_9, ((PyObject *)__pyx_n_s__dtype), __pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_8), ((PyObject *)__pyx_t_9)); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0; - if (!(likely(((__pyx_t_10) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_10, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_11 = ((PyArrayObject *)__pyx_t_10); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __pyx_t_12 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_out, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_12 < 0)) { - PyErr_Fetch(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_out, (PyObject*)__pyx_v_out, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_15); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_13, __pyx_t_14, __pyx_t_15); - } - } - __pyx_bstride_0_out = __pyx_bstruct_out.strides[0]; __pyx_bstride_1_out = __pyx_bstruct_out.strides[1]; - __pyx_bshape_0_out = __pyx_bstruct_out.shape[0]; __pyx_bshape_1_out = __pyx_bstruct_out.shape[1]; - if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_11 = 0; - __pyx_v_out = ((PyArrayObject *)__pyx_t_10); - __pyx_t_10 = 0; - - - __pyx_v_nvalues = (__pyx_v_out->dimensions[1]); - - - __pyx_t_10 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1077; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __pyx_t_9 = PyObject_GetAttr(__pyx_t_10, __pyx_n_s__finfo); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1077; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1077; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __pyx_t_8 = PyObject_GetAttr(__pyx_t_10, __pyx_n_s__double); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1077; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1077; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_10)); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_8); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_10), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1077; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0; - __pyx_t_10 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__eps); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1077; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = PyNumber_Multiply(__pyx_t_10, __pyx_int_100); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1077; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_v_eps = __pyx_t_8; - __pyx_t_8 = 0; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_16 = (__pyx_v_xi->dimensions[0]); - for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_16; __pyx_t_12+=1) { - __pyx_v_i = __pyx_t_12; - - - __pyx_t_7 = __pyx_PyFloat_AsDouble(__pyx_v_eps); if (unlikely((__pyx_t_7 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1085; __pyx_clineno = __LINE__; goto __pyx_L7;} - __pyx_v_isimplex = __pyx_f_5scipy_7spatial_5qhull__find_simplex((&__pyx_v_info), __pyx_v_c, (((double *)__pyx_v_xi->data) + (__pyx_v_i * __pyx_v_ndim)), (&__pyx_v_start), __pyx_t_7); - - - __pyx_t_17 = (__pyx_v_isimplex == -1); - if (__pyx_t_17) { - - - __pyx_t_18 = __pyx_v_nvalues; - for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) { - __pyx_v_k = __pyx_t_19; - - - __pyx_t_20 = __pyx_v_i; - __pyx_t_21 = __pyx_v_k; - if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_bshape_0_out; - if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_bshape_1_out; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_out.buf, __pyx_t_20, __pyx_bstride_0_out, __pyx_t_21, __pyx_bstride_1_out) = __pyx_v_fill_value; - } - - - goto __pyx_L9_continue; - goto __pyx_L11; - } - __pyx_L11:; - - - __pyx_t_18 = __pyx_v_nvalues; - for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) { - __pyx_v_k = __pyx_t_19; - - - __pyx_t_22 = (__pyx_v_ndim + 1); - for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) { - __pyx_v_j = __pyx_t_23; - - - __pyx_t_24 = __pyx_v_isimplex; - __pyx_t_25 = __pyx_v_j; - if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_bshape_0_vertices; - if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_bshape_1_vertices; - __pyx_t_26 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_24, __pyx_bstride_0_vertices, __pyx_t_25, __pyx_bstride_1_vertices)); - __pyx_t_27 = __pyx_v_k; - if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_bshape_0_values; - if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_bshape_1_values; - (__pyx_v_f[__pyx_v_j]) = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_values.buf, __pyx_t_26, __pyx_bstride_0_values, __pyx_t_27, __pyx_bstride_1_values)); - - - __pyx_t_28 = __pyx_v_isimplex; - __pyx_t_29 = __pyx_v_j; - if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_bshape_0_vertices; - if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_bshape_1_vertices; - __pyx_t_30 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_28, __pyx_bstride_0_vertices, __pyx_t_29, __pyx_bstride_1_vertices)); - __pyx_t_31 = __pyx_v_k; - __pyx_t_32 = 0; - if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_bshape_0_grad; - if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_bshape_1_grad; - if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_bshape_2_grad; - (__pyx_v_df[(2 * __pyx_v_j)]) = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_grad.buf, __pyx_t_30, __pyx_bstride_0_grad, __pyx_t_31, __pyx_bstride_1_grad, __pyx_t_32, __pyx_bstride_2_grad)); - - - __pyx_t_33 = __pyx_v_isimplex; - __pyx_t_34 = __pyx_v_j; - if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_bshape_0_vertices; - if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_bshape_1_vertices; - __pyx_t_35 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_33, __pyx_bstride_0_vertices, __pyx_t_34, __pyx_bstride_1_vertices)); - __pyx_t_36 = __pyx_v_k; - __pyx_t_37 = 1; - if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_bshape_0_grad; - if (__pyx_t_36 < 0) __pyx_t_36 += __pyx_bshape_1_grad; - if (__pyx_t_37 < 0) __pyx_t_37 += __pyx_bshape_2_grad; - (__pyx_v_df[((2 * __pyx_v_j) + 1)]) = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_grad.buf, __pyx_t_35, __pyx_bstride_0_grad, __pyx_t_36, __pyx_bstride_1_grad, __pyx_t_37, __pyx_bstride_2_grad)); - } - - - __pyx_v_w = __pyx_f_8interpnd__clough_tocher_2d_single_double((&__pyx_v_info), __pyx_v_isimplex, __pyx_v_c, __pyx_v_f, __pyx_v_df); - - - __pyx_t_23 = __pyx_v_i; - __pyx_t_38 = __pyx_v_k; - if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_bshape_0_out; - if (__pyx_t_38 < 0) __pyx_t_38 += __pyx_bshape_1_out; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_out.buf, __pyx_t_23, __pyx_bstride_0_out, __pyx_t_38, __pyx_bstride_1_out) = __pyx_v_w; - } - __pyx_L9_continue:; - } - } - - - { - int __pyx_why; - __pyx_why = 0; goto __pyx_L8; - __pyx_L7: __pyx_why = 4; goto __pyx_L8; - __pyx_L8:; - Py_BLOCK_THREADS - switch (__pyx_why) { - case 4: goto __pyx_L1_error; - } - } - } - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_out)); - __pyx_r = ((PyObject *)__pyx_v_out); - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_10); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xi); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_values); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_grad); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("interpnd.CloughTocher2DInterpolator._evaluate_double", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xi); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_values); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_grad); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_values); - __Pyx_XDECREF((PyObject *)__pyx_v_grad); - __Pyx_XDECREF((PyObject *)__pyx_v_out); - __Pyx_XDECREF((PyObject *)__pyx_v_points); - __Pyx_XDECREF((PyObject *)__pyx_v_vertices); - __Pyx_XDECREF(__pyx_v_eps); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_8interpnd_26CloughTocher2DInterpolator_2_evaluate_complex(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_8interpnd_26CloughTocher2DInterpolator_2_evaluate_complex = {__Pyx_NAMESTR("_evaluate_complex"), (PyCFunction)__pyx_pf_8interpnd_26CloughTocher2DInterpolator_2_evaluate_complex, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_8interpnd_26CloughTocher2DInterpolator_2_evaluate_complex(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyArrayObject *__pyx_v_xi = 0; - PyArrayObject *__pyx_v_values = 0; - PyArrayObject *__pyx_v_grad = 0; - PyArrayObject *__pyx_v_out = 0; - PyArrayObject *__pyx_v_points = 0; - PyArrayObject *__pyx_v_vertices = 0; - double __pyx_v_c[NPY_MAXDIMS]; - __pyx_t_double_complex __pyx_v_f[(NPY_MAXDIMS + 1)]; - __pyx_t_double_complex __pyx_v_df[((2 * NPY_MAXDIMS) + 2)]; - __pyx_t_double_complex __pyx_v_w; - __pyx_t_double_complex __pyx_v_fill_value; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_k; - int __pyx_v_ndim; - int __pyx_v_isimplex; - int __pyx_v_start; - int __pyx_v_nvalues; - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_v_info; - PyObject *__pyx_v_eps = NULL; - Py_buffer __pyx_bstruct_xi; - Py_ssize_t __pyx_bstride_0_xi = 0; - Py_ssize_t __pyx_bstride_1_xi = 0; - Py_ssize_t __pyx_bshape_0_xi = 0; - Py_ssize_t __pyx_bshape_1_xi = 0; - Py_buffer __pyx_bstruct_out; - Py_ssize_t __pyx_bstride_0_out = 0; - Py_ssize_t __pyx_bstride_1_out = 0; - Py_ssize_t __pyx_bshape_0_out = 0; - Py_ssize_t __pyx_bshape_1_out = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - Py_buffer __pyx_bstruct_points; - Py_ssize_t __pyx_bstride_0_points = 0; - Py_ssize_t __pyx_bstride_1_points = 0; - Py_ssize_t __pyx_bshape_0_points = 0; - Py_ssize_t __pyx_bshape_1_points = 0; - Py_buffer __pyx_bstruct_values; - Py_ssize_t __pyx_bstride_0_values = 0; - Py_ssize_t __pyx_bstride_1_values = 0; - Py_ssize_t __pyx_bshape_0_values = 0; - Py_ssize_t __pyx_bshape_1_values = 0; - Py_buffer __pyx_bstruct_grad; - Py_ssize_t __pyx_bstride_0_grad = 0; - Py_ssize_t __pyx_bstride_1_grad = 0; - Py_ssize_t __pyx_bstride_2_grad = 0; - Py_ssize_t __pyx_bshape_0_grad = 0; - Py_ssize_t __pyx_bshape_1_grad = 0; - Py_ssize_t __pyx_bshape_2_grad = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyArrayObject *__pyx_t_2 = NULL; - PyArrayObject *__pyx_t_3 = NULL; - PyArrayObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - __pyx_t_double_complex __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyArrayObject *__pyx_t_11 = NULL; - int __pyx_t_12; - PyObject *__pyx_t_13 = NULL; - PyObject *__pyx_t_14 = NULL; - PyObject *__pyx_t_15 = NULL; - npy_intp __pyx_t_16; - double __pyx_t_17; - int __pyx_t_18; - int __pyx_t_19; - int __pyx_t_20; - int __pyx_t_21; - int __pyx_t_22; - int __pyx_t_23; - int __pyx_t_24; - long __pyx_t_25; - int __pyx_t_26; - int __pyx_t_27; - int __pyx_t_28; - npy_int __pyx_t_29; - int __pyx_t_30; - int __pyx_t_31; - int __pyx_t_32; - npy_int __pyx_t_33; - int __pyx_t_34; - int __pyx_t_35; - int __pyx_t_36; - npy_int __pyx_t_37; - int __pyx_t_38; - long __pyx_t_39; - int __pyx_t_40; - int __pyx_t_41; - npy_int __pyx_t_42; - int __pyx_t_43; - long __pyx_t_44; - int __pyx_t_45; - int __pyx_t_46; - npy_int __pyx_t_47; - int __pyx_t_48; - long __pyx_t_49; - int __pyx_t_50; - int __pyx_t_51; - npy_int __pyx_t_52; - int __pyx_t_53; - long __pyx_t_54; - int __pyx_t_55; - int __pyx_t_56; - int __pyx_t_57; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__xi,0}; - __Pyx_RefNannySetupContext("_evaluate_complex"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xi); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_evaluate_complex", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_evaluate_complex") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_xi = ((PyArrayObject *)values[1]); - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_xi = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 1)); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_evaluate_complex", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("interpnd.CloughTocher2DInterpolator._evaluate_complex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_bstruct_values.buf = NULL; - __pyx_bstruct_grad.buf = NULL; - __pyx_bstruct_out.buf = NULL; - __pyx_bstruct_points.buf = NULL; - __pyx_bstruct_vertices.buf = NULL; - __pyx_bstruct_xi.buf = NULL; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xi), __pyx_ptype_5numpy_ndarray, 1, "xi", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_xi, (PyObject*)__pyx_v_xi, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_xi = __pyx_bstruct_xi.strides[0]; __pyx_bstride_1_xi = __pyx_bstruct_xi.strides[1]; - __pyx_bshape_0_xi = __pyx_bstruct_xi.shape[0]; __pyx_bshape_1_xi = __pyx_bstruct_xi.shape[1]; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[2]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_values, (PyObject*)__pyx_t_2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_complex_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_values = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_values.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_values = __pyx_bstruct_values.strides[0]; __pyx_bstride_1_values = __pyx_bstruct_values.strides[1]; - __pyx_bshape_0_values = __pyx_bstruct_values.shape[0]; __pyx_bshape_1_values = __pyx_bstruct_values.shape[1]; - } - } - __pyx_t_2 = 0; - __pyx_v_values = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__grad); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[2]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_grad, (PyObject*)__pyx_t_3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_complex_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { - __pyx_v_grad = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_grad.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_grad = __pyx_bstruct_grad.strides[0]; __pyx_bstride_1_grad = __pyx_bstruct_grad.strides[1]; __pyx_bstride_2_grad = __pyx_bstruct_grad.strides[2]; - __pyx_bshape_0_grad = __pyx_bstruct_grad.shape[0]; __pyx_bshape_1_grad = __pyx_bstruct_grad.shape[1]; __pyx_bshape_2_grad = __pyx_bstruct_grad.shape[2]; - } - } - __pyx_t_3 = 0; - __pyx_v_grad = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_t_4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_points = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_points.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_points = __pyx_bstruct_points.strides[0]; __pyx_bstride_1_points = __pyx_bstruct_points.strides[1]; - __pyx_bshape_0_points = __pyx_bstruct_points.shape[0]; __pyx_bshape_1_points = __pyx_bstruct_points.shape[1]; - } - } - __pyx_t_4 = 0; - __pyx_v_points = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__vertices); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_vertices = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_vertices.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - } - } - __pyx_t_6 = 0; - __pyx_v_vertices = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - - __pyx_v_ndim = (__pyx_v_xi->dimensions[1]); - - - __pyx_v_start = 0; - - - __pyx_t_5 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__fill_value); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_7 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_5); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_fill_value = __pyx_t_7; - - - __pyx_t_5 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__tri); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info((&__pyx_v_info), __pyx_t_5, 1, 1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_xi->dimensions[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_8 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__values); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_9 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__shape); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_9, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_8); - __pyx_t_5 = 0; - __pyx_t_8 = 0; - __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_t_9)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_9)); - __pyx_t_9 = 0; - __pyx_t_9 = PyDict_New(); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_10 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__complex); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyDict_SetItem(__pyx_t_9, ((PyObject *)__pyx_n_s__dtype), __pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_8), ((PyObject *)__pyx_t_9)); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0; - if (!(likely(((__pyx_t_10) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_10, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_11 = ((PyArrayObject *)__pyx_t_10); - { - __Pyx_BufFmt_StackElem __pyx_stack[2]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __pyx_t_12 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_out, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_complex_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_12 < 0)) { - PyErr_Fetch(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_out, (PyObject*)__pyx_v_out, &__Pyx_TypeInfo_nn___pyx_t_5numpy_complex_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_15); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_13, __pyx_t_14, __pyx_t_15); - } - } - __pyx_bstride_0_out = __pyx_bstruct_out.strides[0]; __pyx_bstride_1_out = __pyx_bstruct_out.strides[1]; - __pyx_bshape_0_out = __pyx_bstruct_out.shape[0]; __pyx_bshape_1_out = __pyx_bstruct_out.shape[1]; - if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_11 = 0; - __pyx_v_out = ((PyArrayObject *)__pyx_t_10); - __pyx_t_10 = 0; - - - __pyx_v_nvalues = (__pyx_v_out->dimensions[1]); - - - __pyx_t_10 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __pyx_t_9 = PyObject_GetAttr(__pyx_t_10, __pyx_n_s__finfo); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __pyx_t_8 = PyObject_GetAttr(__pyx_t_10, __pyx_n_s__double); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_10)); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_8); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_10), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0; - __pyx_t_10 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__eps); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = PyNumber_Multiply(__pyx_t_10, __pyx_int_100); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_v_eps = __pyx_t_8; - __pyx_t_8 = 0; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_16 = (__pyx_v_xi->dimensions[0]); - for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_16; __pyx_t_12+=1) { - __pyx_v_i = __pyx_t_12; - - - __pyx_t_17 = __pyx_PyFloat_AsDouble(__pyx_v_eps); if (unlikely((__pyx_t_17 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1140; __pyx_clineno = __LINE__; goto __pyx_L7;} - __pyx_v_isimplex = __pyx_f_5scipy_7spatial_5qhull__find_simplex((&__pyx_v_info), __pyx_v_c, (((double *)__pyx_v_xi->data) + (__pyx_v_i * __pyx_v_ndim)), (&__pyx_v_start), __pyx_t_17); - - - __pyx_t_18 = (__pyx_v_isimplex == -1); - if (__pyx_t_18) { - - - __pyx_t_19 = __pyx_v_nvalues; - for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { - __pyx_v_k = __pyx_t_20; - - - __pyx_t_21 = __pyx_v_i; - __pyx_t_22 = __pyx_v_k; - if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_bshape_0_out; - if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_21, __pyx_bstride_0_out, __pyx_t_22, __pyx_bstride_1_out)).real = __Pyx_CREAL(__pyx_v_fill_value); - - - __pyx_t_23 = __pyx_v_i; - __pyx_t_24 = __pyx_v_k; - if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_bshape_0_out; - if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_23, __pyx_bstride_0_out, __pyx_t_24, __pyx_bstride_1_out)).imag = __Pyx_CIMAG(__pyx_v_fill_value); - } - - - goto __pyx_L9_continue; - goto __pyx_L11; - } - __pyx_L11:; - - - __pyx_t_19 = __pyx_v_nvalues; - for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { - __pyx_v_k = __pyx_t_20; - - - __pyx_t_25 = (__pyx_v_ndim + 1); - for (__pyx_t_26 = 0; __pyx_t_26 < __pyx_t_25; __pyx_t_26+=1) { - __pyx_v_j = __pyx_t_26; - - - __pyx_t_27 = __pyx_v_isimplex; - __pyx_t_28 = __pyx_v_j; - if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_bshape_0_vertices; - if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_bshape_1_vertices; - __pyx_t_29 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_27, __pyx_bstride_0_vertices, __pyx_t_28, __pyx_bstride_1_vertices)); - __pyx_t_30 = __pyx_v_k; - if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_bshape_0_values; - if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_bshape_1_values; - __Pyx_SET_CREAL((__pyx_v_f[__pyx_v_j]), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_values.buf, __pyx_t_29, __pyx_bstride_0_values, __pyx_t_30, __pyx_bstride_1_values)).real); - - - __pyx_t_31 = __pyx_v_isimplex; - __pyx_t_32 = __pyx_v_j; - if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_bshape_0_vertices; - if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_bshape_1_vertices; - __pyx_t_33 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_31, __pyx_bstride_0_vertices, __pyx_t_32, __pyx_bstride_1_vertices)); - __pyx_t_34 = __pyx_v_k; - if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_bshape_0_values; - if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_bshape_1_values; - __Pyx_SET_CIMAG((__pyx_v_f[__pyx_v_j]), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_values.buf, __pyx_t_33, __pyx_bstride_0_values, __pyx_t_34, __pyx_bstride_1_values)).imag); - - - __pyx_t_35 = __pyx_v_isimplex; - __pyx_t_36 = __pyx_v_j; - if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_bshape_0_vertices; - if (__pyx_t_36 < 0) __pyx_t_36 += __pyx_bshape_1_vertices; - __pyx_t_37 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_35, __pyx_bstride_0_vertices, __pyx_t_36, __pyx_bstride_1_vertices)); - __pyx_t_38 = __pyx_v_k; - __pyx_t_39 = 0; - if (__pyx_t_37 < 0) __pyx_t_37 += __pyx_bshape_0_grad; - if (__pyx_t_38 < 0) __pyx_t_38 += __pyx_bshape_1_grad; - if (__pyx_t_39 < 0) __pyx_t_39 += __pyx_bshape_2_grad; - __Pyx_SET_CREAL((__pyx_v_df[(2 * __pyx_v_j)]), (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_grad.buf, __pyx_t_37, __pyx_bstride_0_grad, __pyx_t_38, __pyx_bstride_1_grad, __pyx_t_39, __pyx_bstride_2_grad)).real); - - - __pyx_t_40 = __pyx_v_isimplex; - __pyx_t_41 = __pyx_v_j; - if (__pyx_t_40 < 0) __pyx_t_40 += __pyx_bshape_0_vertices; - if (__pyx_t_41 < 0) __pyx_t_41 += __pyx_bshape_1_vertices; - __pyx_t_42 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_40, __pyx_bstride_0_vertices, __pyx_t_41, __pyx_bstride_1_vertices)); - __pyx_t_43 = __pyx_v_k; - __pyx_t_44 = 0; - if (__pyx_t_42 < 0) __pyx_t_42 += __pyx_bshape_0_grad; - if (__pyx_t_43 < 0) __pyx_t_43 += __pyx_bshape_1_grad; - if (__pyx_t_44 < 0) __pyx_t_44 += __pyx_bshape_2_grad; - __Pyx_SET_CIMAG((__pyx_v_df[(2 * __pyx_v_j)]), (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_grad.buf, __pyx_t_42, __pyx_bstride_0_grad, __pyx_t_43, __pyx_bstride_1_grad, __pyx_t_44, __pyx_bstride_2_grad)).imag); - - - __pyx_t_45 = __pyx_v_isimplex; - __pyx_t_46 = __pyx_v_j; - if (__pyx_t_45 < 0) __pyx_t_45 += __pyx_bshape_0_vertices; - if (__pyx_t_46 < 0) __pyx_t_46 += __pyx_bshape_1_vertices; - __pyx_t_47 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_45, __pyx_bstride_0_vertices, __pyx_t_46, __pyx_bstride_1_vertices)); - __pyx_t_48 = __pyx_v_k; - __pyx_t_49 = 1; - if (__pyx_t_47 < 0) __pyx_t_47 += __pyx_bshape_0_grad; - if (__pyx_t_48 < 0) __pyx_t_48 += __pyx_bshape_1_grad; - if (__pyx_t_49 < 0) __pyx_t_49 += __pyx_bshape_2_grad; - __Pyx_SET_CREAL((__pyx_v_df[((2 * __pyx_v_j) + 1)]), (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_grad.buf, __pyx_t_47, __pyx_bstride_0_grad, __pyx_t_48, __pyx_bstride_1_grad, __pyx_t_49, __pyx_bstride_2_grad)).real); - - - __pyx_t_50 = __pyx_v_isimplex; - __pyx_t_51 = __pyx_v_j; - if (__pyx_t_50 < 0) __pyx_t_50 += __pyx_bshape_0_vertices; - if (__pyx_t_51 < 0) __pyx_t_51 += __pyx_bshape_1_vertices; - __pyx_t_52 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_50, __pyx_bstride_0_vertices, __pyx_t_51, __pyx_bstride_1_vertices)); - __pyx_t_53 = __pyx_v_k; - __pyx_t_54 = 1; - if (__pyx_t_52 < 0) __pyx_t_52 += __pyx_bshape_0_grad; - if (__pyx_t_53 < 0) __pyx_t_53 += __pyx_bshape_1_grad; - if (__pyx_t_54 < 0) __pyx_t_54 += __pyx_bshape_2_grad; - __Pyx_SET_CIMAG((__pyx_v_df[((2 * __pyx_v_j) + 1)]), (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_grad.buf, __pyx_t_52, __pyx_bstride_0_grad, __pyx_t_53, __pyx_bstride_1_grad, __pyx_t_54, __pyx_bstride_2_grad)).imag); - } - - - __pyx_v_w = __pyx_f_8interpnd__clough_tocher_2d_single_complex((&__pyx_v_info), __pyx_v_isimplex, __pyx_v_c, __pyx_v_f, __pyx_v_df); - - - __pyx_t_26 = __pyx_v_i; - __pyx_t_55 = __pyx_v_k; - if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_bshape_0_out; - if (__pyx_t_55 < 0) __pyx_t_55 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_26, __pyx_bstride_0_out, __pyx_t_55, __pyx_bstride_1_out)).real = __Pyx_CREAL(__pyx_v_w); - - - __pyx_t_56 = __pyx_v_i; - __pyx_t_57 = __pyx_v_k; - if (__pyx_t_56 < 0) __pyx_t_56 += __pyx_bshape_0_out; - if (__pyx_t_57 < 0) __pyx_t_57 += __pyx_bshape_1_out; - (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_complex_t *, __pyx_bstruct_out.buf, __pyx_t_56, __pyx_bstride_0_out, __pyx_t_57, __pyx_bstride_1_out)).imag = __Pyx_CIMAG(__pyx_v_w); - } - __pyx_L9_continue:; - } - } - - - { - int __pyx_why; - __pyx_why = 0; goto __pyx_L8; - __pyx_L7: __pyx_why = 4; goto __pyx_L8; - __pyx_L8:; - Py_BLOCK_THREADS - switch (__pyx_why) { - case 4: goto __pyx_L1_error; - } - } - } - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_out)); - __pyx_r = ((PyObject *)__pyx_v_out); - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_10); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xi); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_values); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_grad); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("interpnd.CloughTocher2DInterpolator._evaluate_complex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xi); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_values); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_grad); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_values); - __Pyx_XDECREF((PyObject *)__pyx_v_grad); - __Pyx_XDECREF((PyObject *)__pyx_v_out); - __Pyx_XDECREF((PyObject *)__pyx_v_points); - __Pyx_XDECREF((PyObject *)__pyx_v_vertices); - __Pyx_XDECREF(__pyx_v_eps); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_copy_shape; - int __pyx_v_i; - int __pyx_v_ndim; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - int __pyx_v_t; - char *__pyx_v_f; - PyArray_Descr *__pyx_v_descr = 0; - int __pyx_v_offset; - int __pyx_v_hasfields; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - char *__pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getbuffer__"); - if (__pyx_v_info != NULL) { - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - } - - - __pyx_t_1 = (__pyx_v_info == NULL); - if (__pyx_t_1) { - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_v_endian_detector = 1; - - - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - - __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); - - - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - - __pyx_v_copy_shape = 1; - goto __pyx_L6; - } - { - - - __pyx_v_copy_shape = 0; - } - __pyx_L6:; - - - __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); - if (__pyx_t_1) { - - - __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_31), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - - __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); - if (__pyx_t_3) { - - - __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); - __pyx_t_2 = __pyx_t_1; - } else { - __pyx_t_2 = __pyx_t_3; - } - if (__pyx_t_2) { - - - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_33), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - - __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); - - - __pyx_v_info->ndim = __pyx_v_ndim; - - - if (__pyx_v_copy_shape) { - - - __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); - - - __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); - - - __pyx_t_5 = __pyx_v_ndim; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - - (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - - - (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - } - goto __pyx_L9; - } - { - - - __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); - - - __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); - } - __pyx_L9:; - - - __pyx_v_info->suboffsets = NULL; - - - __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); - - - __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); - - - __pyx_v_f = NULL; - - - __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); - __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; - - - __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); - - - __pyx_t_2 = (!__pyx_v_hasfields); - if (__pyx_t_2) { - __pyx_t_3 = (!__pyx_v_copy_shape); - __pyx_t_1 = __pyx_t_3; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = Py_None; - goto __pyx_L12; - } - { - - - __Pyx_INCREF(__pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = __pyx_v_self; - } - __pyx_L12:; - - - __pyx_t_1 = (!__pyx_v_hasfields); - if (__pyx_t_1) { - - - __pyx_v_t = __pyx_v_descr->type_num; - - - __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); - if (__pyx_t_1) { - __pyx_t_2 = __pyx_v_little_endian; - } else { - __pyx_t_2 = __pyx_t_1; - } - if (!__pyx_t_2) { - - - __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); - if (__pyx_t_1) { - __pyx_t_3 = (!__pyx_v_little_endian); - __pyx_t_7 = __pyx_t_3; - } else { - __pyx_t_7 = __pyx_t_1; - } - __pyx_t_1 = __pyx_t_7; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_35), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L14; - } - __pyx_L14:; - - - __pyx_t_1 = (__pyx_v_t == NPY_BYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__b; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__B; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_SHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__h; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_USHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__H; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_INT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__i; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_UINT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__I; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_LONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__l; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_ULONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__L; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__q; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Q; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__f; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__d; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__g; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zf; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zd; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zg; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__O; - goto __pyx_L15; - } - { - - - __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_36), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L15:; - - - __pyx_v_info->format = __pyx_v_f; - - - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L13; - } - { - - - __pyx_v_info->format = ((char *)malloc(255)); - - - (__pyx_v_info->format[0]) = '^'; - - - __pyx_v_offset = 0; - - - __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_9; - - - (__pyx_v_f[0]) = 0; - } - __pyx_L13:; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(Py_None); - __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; - } - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_descr); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__releasebuffer__"); - - - __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); - if (__pyx_t_1) { - - - free(__pyx_v_info->format); - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - - free(__pyx_v_info->strides); - goto __pyx_L6; - } - __pyx_L6:; - - __Pyx_RefNannyFinishContext(); -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { - PyArray_Descr *__pyx_v_child = 0; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - PyObject *__pyx_v_fields = 0; - PyObject *__pyx_v_childname = NULL; - PyObject *__pyx_v_new_offset = NULL; - PyObject *__pyx_v_t = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - long __pyx_t_10; - char *__pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_util_dtypestring"); - - - __pyx_v_endian_detector = 1; - - - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - - if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - for (;;) { - if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; - __Pyx_XDECREF(__pyx_v_childname); - __pyx_v_childname = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); - __pyx_v_fields = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - - if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { - PyObject* sequence = ((PyObject *)__pyx_v_fields); - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - } else { - __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_child)); - __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_v_new_offset); - __pyx_v_new_offset = __pyx_t_4; - __pyx_t_4 = 0; - - - __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - - - __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_38), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_t_6 = (__pyx_v_child->byteorder == '>'); - if (__pyx_t_6) { - __pyx_t_7 = __pyx_v_little_endian; - } else { - __pyx_t_7 = __pyx_t_6; - } - if (!__pyx_t_7) { - - - __pyx_t_6 = (__pyx_v_child->byteorder == '<'); - if (__pyx_t_6) { - __pyx_t_8 = (!__pyx_v_little_endian); - __pyx_t_9 = __pyx_t_8; - } else { - __pyx_t_9 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_9; - } else { - __pyx_t_6 = __pyx_t_7; - } - if (__pyx_t_6) { - - - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_39), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - while (1) { - __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!__pyx_t_6) break; - - - (__pyx_v_f[0]) = 120; - - - __pyx_v_f = (__pyx_v_f + 1); - - - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); - } - - - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); - - - __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); - if (__pyx_t_6) { - - - __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_v_t); - __pyx_v_t = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); - if (__pyx_t_6) { - - - __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_41), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L10; - } - __pyx_L10:; - - - __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 98; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 66; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 104; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 72; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 105; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 73; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 108; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 76; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 113; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 81; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 102; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 100; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 103; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 102; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 100; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 103; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 79; - goto __pyx_L11; - } - { - - - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_36), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L11:; - - - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L9; - } - { - - - __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_11; - } - __pyx_L9:; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_r = __pyx_v_f; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_child); - __Pyx_XDECREF(__pyx_v_fields); - __Pyx_XDECREF(__pyx_v_childname); - __Pyx_XDECREF(__pyx_v_new_offset); - __Pyx_XDECREF(__pyx_v_t); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - PyObject *__pyx_v_baseptr; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("set_array_base"); - - - __pyx_t_1 = (__pyx_v_base == Py_None); - if (__pyx_t_1) { - - - __pyx_v_baseptr = NULL; - goto __pyx_L3; - } - { - - - Py_INCREF(__pyx_v_base); - - - __pyx_v_baseptr = ((PyObject *)__pyx_v_base); - } - __pyx_L3:; - - - Py_XDECREF(__pyx_v_arr->base); - - - __pyx_v_arr->base = __pyx_v_baseptr; - - __Pyx_RefNannyFinishContext(); -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base"); - - - __pyx_t_1 = (__pyx_v_arr->base == NULL); - if (__pyx_t_1) { - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_None); - __pyx_r = Py_None; - goto __pyx_L0; - goto __pyx_L3; - } - { - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); - __pyx_r = ((PyObject *)__pyx_v_arr->base); - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("interpnd"), - __Pyx_DOCSTR(__pyx_k_42), - -1, - __pyx_methods , - NULL, - NULL, - NULL, - NULL -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_s_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 1, 0}, - {&__pyx_kp_s_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 0, 1, 0}, - {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, - {&__pyx_kp_s_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 0, 1, 0}, - {&__pyx_n_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 1}, - {&__pyx_n_s_21, __pyx_k_21, sizeof(__pyx_k_21), 0, 0, 1, 1}, - {&__pyx_kp_s_24, __pyx_k_24, sizeof(__pyx_k_24), 0, 0, 1, 0}, - {&__pyx_n_s_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 0, 1, 1}, - {&__pyx_kp_u_30, __pyx_k_30, sizeof(__pyx_k_30), 0, 1, 0, 0}, - {&__pyx_kp_u_32, __pyx_k_32, sizeof(__pyx_k_32), 0, 1, 0, 0}, - {&__pyx_kp_u_34, __pyx_k_34, sizeof(__pyx_k_34), 0, 1, 0, 0}, - {&__pyx_kp_u_36, __pyx_k_36, sizeof(__pyx_k_36), 0, 1, 0, 0}, - {&__pyx_kp_u_37, __pyx_k_37, sizeof(__pyx_k_37), 0, 1, 0, 0}, - {&__pyx_kp_u_40, __pyx_k_40, sizeof(__pyx_k_40), 0, 1, 0, 0}, - {&__pyx_n_s_43, __pyx_k_43, sizeof(__pyx_k_43), 0, 0, 1, 1}, - {&__pyx_n_s_44, __pyx_k_44, sizeof(__pyx_k_44), 0, 0, 1, 1}, - {&__pyx_kp_s_45, __pyx_k_45, sizeof(__pyx_k_45), 0, 0, 1, 0}, - {&__pyx_kp_s_46, __pyx_k_46, sizeof(__pyx_k_46), 0, 0, 1, 0}, - {&__pyx_n_s_47, __pyx_k_47, sizeof(__pyx_k_47), 0, 0, 1, 1}, - {&__pyx_kp_s_48, __pyx_k_48, sizeof(__pyx_k_48), 0, 0, 1, 0}, - {&__pyx_n_s_49, __pyx_k_49, sizeof(__pyx_k_49), 0, 0, 1, 1}, - {&__pyx_kp_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 0}, - {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0}, - {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0}, - {&__pyx_n_s__Delaunay, __pyx_k__Delaunay, sizeof(__pyx_k__Delaunay), 0, 0, 1, 1}, - {&__pyx_n_s__NDInterpolatorBase, __pyx_k__NDInterpolatorBase, sizeof(__pyx_k__NDInterpolatorBase), 0, 0, 1, 1}, - {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, - {&__pyx_n_s__T, __pyx_k__T, sizeof(__pyx_k__T), 0, 0, 1, 1}, - {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, - {&__pyx_n_s__Warning, __pyx_k__Warning, sizeof(__pyx_k__Warning), 0, 0, 1, 1}, - {&__pyx_n_s____call__, __pyx_k____call__, sizeof(__pyx_k____call__), 0, 0, 1, 1}, - {&__pyx_n_s____init__, __pyx_k____init__, sizeof(__pyx_k____init__), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s___check_call_shape, __pyx_k___check_call_shape, sizeof(__pyx_k___check_call_shape), 0, 0, 1, 1}, - {&__pyx_n_s___check_init_shape, __pyx_k___check_init_shape, sizeof(__pyx_k___check_init_shape), 0, 0, 1, 1}, - {&__pyx_n_s___evaluate_complex, __pyx_k___evaluate_complex, sizeof(__pyx_k___evaluate_complex), 0, 0, 1, 1}, - {&__pyx_n_s___evaluate_double, __pyx_k___evaluate_double, sizeof(__pyx_k___evaluate_double), 0, 0, 1, 1}, - {&__pyx_n_s__asanyarray, __pyx_k__asanyarray, sizeof(__pyx_k__asanyarray), 0, 0, 1, 1}, - {&__pyx_n_s__ascontiguousarray, __pyx_k__ascontiguousarray, sizeof(__pyx_k__ascontiguousarray), 0, 0, 1, 1}, - {&__pyx_n_s__astype, __pyx_k__astype, sizeof(__pyx_k__astype), 0, 0, 1, 1}, - {&__pyx_n_s__broadcast_arrays, __pyx_k__broadcast_arrays, sizeof(__pyx_k__broadcast_arrays), 0, 0, 1, 1}, - {&__pyx_n_s__complex, __pyx_k__complex, sizeof(__pyx_k__complex), 0, 0, 1, 1}, - {&__pyx_n_s__complexfloating, __pyx_k__complexfloating, sizeof(__pyx_k__complexfloating), 0, 0, 1, 1}, - {&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1}, - {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, - {&__pyx_n_s__empty, __pyx_k__empty, sizeof(__pyx_k__empty), 0, 0, 1, 1}, - {&__pyx_n_s__enumerate, __pyx_k__enumerate, sizeof(__pyx_k__enumerate), 0, 0, 1, 1}, - {&__pyx_n_s__eps, __pyx_k__eps, sizeof(__pyx_k__eps), 0, 0, 1, 1}, - {&__pyx_n_s__fill_value, __pyx_k__fill_value, sizeof(__pyx_k__fill_value), 0, 0, 1, 1}, - {&__pyx_n_s__finfo, __pyx_k__finfo, sizeof(__pyx_k__finfo), 0, 0, 1, 1}, - {&__pyx_n_s__grad, __pyx_k__grad, sizeof(__pyx_k__grad), 0, 0, 1, 1}, - {&__pyx_n_s__imag, __pyx_k__imag, sizeof(__pyx_k__imag), 0, 0, 1, 1}, - {&__pyx_n_s__interpnd, __pyx_k__interpnd, sizeof(__pyx_k__interpnd), 0, 0, 1, 1}, - {&__pyx_n_s__is_complex, __pyx_k__is_complex, sizeof(__pyx_k__is_complex), 0, 0, 1, 1}, - {&__pyx_n_s__issubdtype, __pyx_k__issubdtype, sizeof(__pyx_k__issubdtype), 0, 0, 1, 1}, - {&__pyx_n_s__maxiter, __pyx_k__maxiter, sizeof(__pyx_k__maxiter), 0, 0, 1, 1}, - {&__pyx_n_s__nan, __pyx_k__nan, sizeof(__pyx_k__nan), 0, 0, 1, 1}, - {&__pyx_n_s__ndim, __pyx_k__ndim, sizeof(__pyx_k__ndim), 0, 0, 1, 1}, - {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, - {&__pyx_n_s__npoints, __pyx_k__npoints, sizeof(__pyx_k__npoints), 0, 0, 1, 1}, - {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, - {&__pyx_n_s__object, __pyx_k__object, sizeof(__pyx_k__object), 0, 0, 1, 1}, - {&__pyx_n_s__points, __pyx_k__points, sizeof(__pyx_k__points), 0, 0, 1, 1}, - {&__pyx_n_s__prod, __pyx_k__prod, sizeof(__pyx_k__prod), 0, 0, 1, 1}, - {&__pyx_n_s__qhull, __pyx_k__qhull, sizeof(__pyx_k__qhull), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__real, __pyx_k__real, sizeof(__pyx_k__real), 0, 0, 1, 1}, - {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, - {&__pyx_n_s__self, __pyx_k__self, sizeof(__pyx_k__self), 0, 0, 1, 1}, - {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, - {&__pyx_n_s__tol, __pyx_k__tol, sizeof(__pyx_k__tol), 0, 0, 1, 1}, - {&__pyx_n_s__transpose, __pyx_k__transpose, sizeof(__pyx_k__transpose), 0, 0, 1, 1}, - {&__pyx_n_s__tri, __pyx_k__tri, sizeof(__pyx_k__tri), 0, 0, 1, 1}, - {&__pyx_n_s__values, __pyx_k__values, sizeof(__pyx_k__values), 0, 0, 1, 1}, - {&__pyx_n_s__values_shape, __pyx_k__values_shape, sizeof(__pyx_k__values_shape), 0, 0, 1, 1}, - {&__pyx_n_s__vertices, __pyx_k__vertices, sizeof(__pyx_k__vertices), 0, 0, 1, 1}, - {&__pyx_n_s__warn, __pyx_k__warn, sizeof(__pyx_k__warn), 0, 0, 1, 1}, - {&__pyx_n_s__warnings, __pyx_k__warnings, sizeof(__pyx_k__warnings), 0, 0, 1, 1}, - {&__pyx_n_s__xi, __pyx_k__xi, sizeof(__pyx_k__xi), 0, 0, 1, 1}, - {&__pyx_n_s__xrange, __pyx_k__xrange, sizeof(__pyx_k__xrange), 0, 0, 1, 1}, - {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, - {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_object = __Pyx_GetName(__pyx_b, __pyx_n_s__object); if (!__pyx_builtin_object) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_Warning = __Pyx_GetName(__pyx_b, __pyx_n_s__Warning); if (!__pyx_builtin_Warning) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #if PY_MAJOR_VERSION >= 3 - __pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #else - __pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - __pyx_builtin_enumerate = __Pyx_GetName(__pyx_b, __pyx_n_s__enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - - __pyx_k_slice_3 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_k_slice_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_slice_3); - __Pyx_GIVEREF(__pyx_k_slice_3); - __pyx_k_tuple_4 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4)); - __Pyx_INCREF(__pyx_k_slice_3); - PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, __pyx_k_slice_3); - __Pyx_GIVEREF(__pyx_k_slice_3); - __Pyx_INCREF(Py_None); - PyTuple_SET_ITEM(__pyx_k_tuple_4, 1, Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); - - - __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_5)); - PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_s_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); - - - __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_8)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_7)); - PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_s_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); - - - __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_10)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_9)); - PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_s_9)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_9)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); - - - __pyx_k_tuple_13 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_13)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_12)); - PyTuple_SET_ITEM(__pyx_k_tuple_13, 0, ((PyObject *)__pyx_kp_s_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_13)); - - - __pyx_k_tuple_15 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_15)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_14)); - PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_kp_s_14)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_14)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); - - - __pyx_k_tuple_16 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_16)); - __Pyx_INCREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_k_tuple_16, 0, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - __Pyx_INCREF(__pyx_int_1); - PyTuple_SET_ITEM(__pyx_k_tuple_16, 1, __pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16)); - - - __pyx_k_tuple_20 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_20)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_19)); - PyTuple_SET_ITEM(__pyx_k_tuple_20, 0, ((PyObject *)__pyx_kp_s_19)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_19)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_20)); - - - __pyx_k_slice_22 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_k_slice_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 499; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_slice_22); - __Pyx_GIVEREF(__pyx_k_slice_22); - __pyx_k_tuple_23 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 499; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_23)); - __Pyx_INCREF(__pyx_k_slice_22); - PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, __pyx_k_slice_22); - __Pyx_GIVEREF(__pyx_k_slice_22); - __Pyx_INCREF(Py_None); - PyTuple_SET_ITEM(__pyx_k_tuple_23, 1, Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); - - - __pyx_k_tuple_26 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_26)); - __Pyx_INCREF(__pyx_int_1); - PyTuple_SET_ITEM(__pyx_k_tuple_26, 0, __pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - __Pyx_INCREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_k_tuple_26, 1, __pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - __Pyx_INCREF(__pyx_int_2); - PyTuple_SET_ITEM(__pyx_k_tuple_26, 2, __pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_26)); - __pyx_k_tuple_27 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_27)); - __Pyx_INCREF(__pyx_int_2); - PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, __pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); - - - __pyx_k_tuple_31 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_31)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_31)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_30)); - PyTuple_SET_ITEM(__pyx_k_tuple_31, 0, ((PyObject *)__pyx_kp_u_30)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_30)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_31)); - - - __pyx_k_tuple_33 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_33)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_33)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_32)); - PyTuple_SET_ITEM(__pyx_k_tuple_33, 0, ((PyObject *)__pyx_kp_u_32)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_32)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_33)); - - - __pyx_k_tuple_35 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_35)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_35)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_34)); - PyTuple_SET_ITEM(__pyx_k_tuple_35, 0, ((PyObject *)__pyx_kp_u_34)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_34)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_35)); - - - __pyx_k_tuple_38 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_38)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_38)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_37)); - PyTuple_SET_ITEM(__pyx_k_tuple_38, 0, ((PyObject *)__pyx_kp_u_37)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_37)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_38)); - - - __pyx_k_tuple_39 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_39)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_39)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_34)); - PyTuple_SET_ITEM(__pyx_k_tuple_39, 0, ((PyObject *)__pyx_kp_u_34)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_34)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_39)); - - - __pyx_k_tuple_41 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_41)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_41)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_40)); - PyTuple_SET_ITEM(__pyx_k_tuple_41, 0, ((PyObject *)__pyx_kp_u_40)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_40)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_41)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_100 = PyInt_FromLong(100); if (unlikely(!__pyx_int_100)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_400 = PyInt_FromLong(400); if (unlikely(!__pyx_int_400)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initinterpnd(void); -PyMODINIT_FUNC initinterpnd(void) -#else -PyMODINIT_FUNC PyInit_interpnd(void); -PyMODINIT_FUNC PyInit_interpnd(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_interpnd(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - - - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD - PyEval_InitThreads(); - #endif - #endif - - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("interpnd"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_42), 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_interpnd) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - - - - __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - __pyx_t_1 = __Pyx_ImportModule("scipy.spatial.qhull"); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_get_delaunay_info", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__get_delaunay_info, "void (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, PyObject *, int, int)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_barycentric_inside", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__barycentric_inside, "int (int, double *, double *, double *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_barycentric_coordinate_single", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__barycentric_coordinate_single, "void (int, double *, double *, double *, int)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_barycentric_coordinates", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__barycentric_coordinates, "void (int, double *, double *, double *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_lift_point", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__lift_point, "void (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_distplane", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__distplane, "double (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int, double *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_is_point_fully_outside", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__is_point_fully_outside, "int (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_find_simplex_bruteforce", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__find_simplex_bruteforce, "int (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_find_simplex_directed", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__find_simplex_directed, "int (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, int *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_find_simplex", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__find_simplex, "int (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, int *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_RidgeIter2D_init", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_init, "void (__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *, __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "_RidgeIter2D_next", (void (**)(void))&__pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_next, "void (__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - - __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_n_s_44)); - PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s_44)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s_44)); - __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s_43), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__qhull, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - - __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s__warnings), 0, -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__warnings, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__nan); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_k_1 = __pyx_t_4; - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_18NDInterpolatorBase___init__, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s____init__, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_18NDInterpolatorBase_1_check_init_shape, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s___check_init_shape, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_18NDInterpolatorBase_2_check_call_shape, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s___check_call_shape, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_18NDInterpolatorBase_3__call__, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s____call__, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __Pyx_INCREF(__pyx_builtin_object); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_builtin_object); - __Pyx_GIVEREF(__pyx_builtin_object); - if (PyDict_SetItemString(((PyObject *)__pyx_t_3), "__doc__", ((PyObject *)__pyx_kp_s_45)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = __Pyx_CreateClass(((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_3), __pyx_n_s__NDInterpolatorBase, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__NDInterpolatorBase, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - - - __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_8interpnd__ndim_coords_from_arrays, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_2, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__nan); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_k_17 = __pyx_t_4; - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_20LinearNDInterpolator___init__, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s____init__, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_20LinearNDInterpolator_1_evaluate_double, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s___evaluate_double, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_20LinearNDInterpolator_2_evaluate_complex, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s___evaluate_complex, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__NDInterpolatorBase); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - if (PyDict_SetItemString(((PyObject *)__pyx_t_3), "__doc__", ((PyObject *)__pyx_kp_s_46)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = __Pyx_CreateClass(((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3), __pyx_n_s_47, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_47, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - - - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __Pyx_INCREF(__pyx_builtin_Warning); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_builtin_Warning); - __Pyx_GIVEREF(__pyx_builtin_Warning); - __pyx_t_2 = __Pyx_CreateClass(((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_3), __pyx_n_s_25, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_25, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - - - __pyx_t_3 = PyFloat_FromDouble(1e-6); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 477; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_k_18 = __pyx_t_3; - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_8interpnd_1estimate_gradients_2d_global, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 477; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_21, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 477; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 984; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__nan); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_k_28 = __pyx_t_4; - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - - - __pyx_t_4 = PyFloat_FromDouble(1e-6); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1045; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_k_29 = __pyx_t_4; - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - - - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_26CloughTocher2DInterpolator___init__, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s____init__, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_26CloughTocher2DInterpolator_1_evaluate_double, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s___evaluate_double, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_8interpnd_26CloughTocher2DInterpolator_2_evaluate_complex, NULL, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - if (PyObject_SetItem(__pyx_t_3, __pyx_n_s___evaluate_complex, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__NDInterpolatorBase); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 984; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 984; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - if (PyDict_SetItemString(((PyObject *)__pyx_t_3), "__doc__", ((PyObject *)__pyx_kp_s_48)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 984; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = __Pyx_CreateClass(((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3), __pyx_n_s_49, __pyx_n_s__interpnd); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 984; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_49, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 984; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - - - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_3)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - if (__pyx_m) { - __Pyx_AddTraceback("init interpnd", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init interpnd"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - - - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - - -static double __Pyx__PyObject_AsDouble(PyObject* obj) { - PyObject* float_value; - if (Py_TYPE(obj)->tp_as_number && Py_TYPE(obj)->tp_as_number->nb_float) { - return PyFloat_AsDouble(obj); - } else if (PyUnicode_CheckExact(obj) || PyBytes_CheckExact(obj)) { -#if PY_MAJOR_VERSION >= 3 - float_value = PyFloat_FromString(obj); -#else - float_value = PyFloat_FromString(obj, 0); -#endif - } else { - PyObject* args = PyTuple_New(1); - if (unlikely(!args)) goto bad; - PyTuple_SET_ITEM(args, 0, obj); - float_value = PyObject_Call((PyObject*)&PyFloat_Type, args, 0); - PyTuple_SET_ITEM(args, 0, 0); - Py_DECREF(args); - } - if (likely(float_value)) { - double value = PyFloat_AS_DOUBLE(float_value); - Py_DECREF(float_value); - return value; - } -bad: - return (double)-1; -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact) -{ - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (none_allowed && obj == Py_None) return 1; - else if (exact) { - if (Py_TYPE(obj) == type) return 1; - } - else { - if (PyObject_TypeCheck(obj, type)) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { - unsigned int n = 1; - return *(unsigned char*)(&n) != 0; -} - -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; -} __Pyx_BufFmt_Context; - -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} - -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t < '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} - -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} - -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case 'b': return "'char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 0: return "end"; - default: return "unparseable format string"; - } -} - -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif - -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I'; - case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; - case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); - case 'O': return 'O'; - case 'P': return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} - -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset; - if (ctx->enc_type == 0) return 0; - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - } - - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - - ctx->fmt_offset += size; - - --ctx->enc_count; - - - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} - -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case 10: - case 13: - ++ts; - break; - case '<': - if (!__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - } - break; - case '}': - ++ts; - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': - if (ctx->enc_type == *ts && got_Z == ctx->is_complex && - ctx->enc_packmode == ctx->new_packmode) { - - ctx->enc_count += ctx->new_count; - } else { - - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - } - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - default: - { - int number = __Pyx_BufFmt_ParseNumber(&ts); - if (number == -1) { - PyErr_Format(PyExc_ValueError, - "Does not understand character buffer dtype format string ('%c')", *ts); - return NULL; - } - ctx->new_count = (size_t)number; - } - } - } -} - -static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { - buf->buf = NULL; - buf->obj = NULL; - buf->strides = __Pyx_zeros; - buf->shape = __Pyx_zeros; - buf->suboffsets = __Pyx_minusones; -} - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { - if (obj == Py_None || obj == NULL) { - __Pyx_ZeroBuffer(buf); - return 0; - } - buf->buf = NULL; - if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; - if (buf->ndim != nd) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - nd, buf->ndim); - goto fail; - } - if (!cast) { - __Pyx_BufFmt_Context ctx; - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; - } - if ((unsigned)buf->itemsize != dtype->size) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)", - buf->itemsize, (buf->itemsize > 1) ? "s" : "", - dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; - return 0; -fail:; - __Pyx_ZeroBuffer(buf); - return -1; -} - -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { - if (info->buf == NULL) return; - if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; - __Pyx_ReleaseBuffer(info); -} - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(PyObject_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -static void __Pyx_RaiseBufferFallbackError(void) { - PyErr_Format(PyExc_ValueError, - "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); -} - - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", - index, (index == 1) ? "" : "s"); -} - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); -} - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(a, a); - case 3: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, a); - case 4: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_abs(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags); - else { - PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; - } -} - -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject* obj = view->obj; - if (obj) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;} - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view); - Py_DECREF(obj); - view->obj = NULL; - } -} - -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -static PyObject *__Pyx_FindPy2Metaclass(PyObject *bases) { - PyObject *metaclass; - -#if PY_MAJOR_VERSION < 3 - if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) { - PyObject *base = PyTuple_GET_ITEM(bases, 0); - metaclass = PyObject_GetAttrString(base, (char *)"__class__"); - if (!metaclass) { - PyErr_Clear(); - metaclass = (PyObject*) Py_TYPE(base); - } - } else { - metaclass = (PyObject *) &PyClass_Type; - } -#else - if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) { - PyObject *base = PyTuple_GET_ITEM(bases, 0); - metaclass = (PyObject*) Py_TYPE(base); - } else { - metaclass = (PyObject *) &PyType_Type; - } -#endif - Py_INCREF(metaclass); - return metaclass; -} - -static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, - PyObject *modname) { - PyObject *result; - PyObject *metaclass; - - if (PyDict_SetItemString(dict, "__module__", modname) < 0) - return NULL; - - - metaclass = PyDict_GetItemString(dict, "__metaclass__"); - if (metaclass) { - Py_INCREF(metaclass); - } else { - metaclass = __Pyx_FindPy2Metaclass(bases); - } - result = PyObject_CallFunctionObjArgs(metaclass, name, bases, dict, NULL); - Py_DECREF(metaclass); - return result; -} - - -static PyObject *__pyx_binding_PyCFunctionType_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module) { - __pyx_binding_PyCFunctionType_object *op = PyObject_GC_New(__pyx_binding_PyCFunctionType_object, __pyx_binding_PyCFunctionType); - if (op == NULL) - return NULL; - op->func.m_ml = ml; - Py_XINCREF(self); - op->func.m_self = self; - Py_XINCREF(module); - op->func.m_module = module; - PyObject_GC_Track(op); - return (PyObject *)op; -} - -static void __pyx_binding_PyCFunctionType_dealloc(__pyx_binding_PyCFunctionType_object *m) { - PyObject_GC_UnTrack(m); - Py_XDECREF(m->func.m_self); - Py_XDECREF(m->func.m_module); - PyObject_GC_Del(m); -} - -static PyObject *__pyx_binding_PyCFunctionType_descr_get(PyObject *func, PyObject *obj, PyObject *type) { - if (obj == Py_None) - obj = NULL; - return PyMethod_New(func, obj, type); -} - -static int __pyx_binding_PyCFunctionType_init(void) { - __pyx_binding_PyCFunctionType_type = PyCFunction_Type; - __pyx_binding_PyCFunctionType_type.tp_name = __Pyx_NAMESTR("cython_binding_builtin_function_or_method"); - __pyx_binding_PyCFunctionType_type.tp_dealloc = (destructor)__pyx_binding_PyCFunctionType_dealloc; - __pyx_binding_PyCFunctionType_type.tp_descr_get = __pyx_binding_PyCFunctionType_descr_get; - if (PyType_Ready(&__pyx_binding_PyCFunctionType_type) < 0) { - return -1; - } - __pyx_binding_PyCFunctionType = &__pyx_binding_PyCFunctionType_type; - return 0; - -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { - const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(Py_intptr_t) == sizeof(char)) || - (sizeof(Py_intptr_t) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(Py_intptr_t) == sizeof(int)) || - (sizeof(Py_intptr_t) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), - little, !is_unsigned); - } -} - -static __pyx_t_double_complex __Pyx_PyComplex_As___pyx_t_double_complex(PyObject* o) { - Py_complex cval; - if (PyComplex_CheckExact(o)) - cval = ((PyComplexObject *)o)->cval; - else - cval = PyComplex_AsCComplex(o); - return __pyx_t_double_complex_from_parts( - (double)cval.real, - (double)cval.imag); -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(a, a); - case 3: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, a); - case 4: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_absf(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, - size_t size, int strict) -{ - PyObject *py_module = 0; - PyObject *result = 0; - PyObject *py_name = 0; - char warning[200]; - - py_module = __Pyx_ImportModule(module_name); - if (!py_module) - goto bad; - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(class_name); - #else - py_name = PyUnicode_FromString(class_name); - #endif - if (!py_name) - goto bad; - result = PyObject_GetAttr(py_module, py_name); - Py_DECREF(py_name); - py_name = 0; - Py_DECREF(py_module); - py_module = 0; - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility", - module_name, class_name); - #if PY_VERSION_HEX < 0x02050000 - if (PyErr_Warn(NULL, warning) < 0) goto bad; - #else - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - #endif - } - else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { - PyErr_Format(PyExc_ValueError, - "%s.%s has the wrong size, try recompiling", - module_name, class_name); - goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(py_module); - Py_XDECREF(result); - return NULL; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(name); - #else - py_name = PyUnicode_FromString(name); - #endif - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportFunction -#define __PYX_HAVE_RT_ImportFunction -static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { - PyObject *d = 0; - PyObject *cobj = 0; - union { - void (*fp)(void); - void *p; - } tmp; - - d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); - if (!d) - goto bad; - cobj = PyDict_GetItemString(d, funcname); - if (!cobj) { - PyErr_Format(PyExc_ImportError, - "%s does not export expected C function %s", - PyModule_GetName(module), funcname); - goto bad; - } -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) - if (!PyCapsule_IsValid(cobj, sig)) { - PyErr_Format(PyExc_TypeError, - "C function %s.%s has wrong signature (expected %s, got %s)", - PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); - goto bad; - } - tmp.p = PyCapsule_GetPointer(cobj, sig); -#else - {const char *desc, *s1, *s2; - desc = (const char *)PyCObject_GetDesc(cobj); - if (!desc) - goto bad; - s1 = desc; s2 = sig; - while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } - if (*s1 != *s2) { - PyErr_Format(PyExc_TypeError, - "C function %s.%s has wrong signature (expected %s, got %s)", - PyModule_GetName(module), funcname, sig, desc); - goto bad; - } - tmp.p = PyCObject_AsVoidPtr(cobj);} -#endif - *f = tmp.fp; - if (!(*f)) - goto bad; - Py_DECREF(d); - return 0; -bad: - Py_XDECREF(d); - return -1; -} -#endif - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, - #if PY_MAJOR_VERSION >= 3 - 0, - #endif - 0, - 0, - 0, - __pyx_empty_bytes, - __pyx_empty_tuple, - __pyx_empty_tuple, - __pyx_empty_tuple, - __pyx_empty_tuple, - __pyx_empty_tuple, - py_srcfile, - py_funcname, - __pyx_lineno, - __pyx_empty_bytes - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), - py_code, - py_globals, - 0 - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - - - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif diff --git a/scipy-0.10.1/scipy/interpolate/interpnd_info.py b/scipy-0.10.1/scipy/interpolate/interpnd_info.py deleted file mode 100644 index 864d227f39..0000000000 --- a/scipy-0.10.1/scipy/interpolate/interpnd_info.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Here we perform some symbolic computations required for the N-D -interpolation routines in `interpnd.pyx`. - -""" -from sympy import * - -def _estimate_gradients_2d_global(): - - # - # Compute - # - # - - f1, f2, df1, df2, x = symbols(['f1', 'f2', 'df1', 'df2', 'x']) - c = [f1, (df1 + 3*f1)/3, (df2 + 3*f2)/3, f2] - - w = 0 - for k in range(4): - w += binomial(3, k) * c[k] * x**k*(1-x)**(3-k) - - wpp = w.diff(x, 2).expand() - intwpp2 = (wpp**2).integrate((x, 0, 1)).expand() - - A = Matrix([[intwpp2.coeff(df1**2), intwpp2.coeff(df1*df2)/2], - [intwpp2.coeff(df1*df2)/2, intwpp2.coeff(df2**2)]]) - - B = Matrix([[intwpp2.coeff(df1).subs(df2, 0)], - [intwpp2.coeff(df2).subs(df1, 0)]]) / 2 - - print "A" - print A - print "B" - print B - print "solution" - print A.inv() * B diff --git a/scipy-0.10.1/scipy/interpolate/interpolate.py b/scipy-0.10.1/scipy/interpolate/interpolate.py deleted file mode 100644 index 0b0b76b703..0000000000 --- a/scipy-0.10.1/scipy/interpolate/interpolate.py +++ /dev/null @@ -1,844 +0,0 @@ - - -""" Classes for interpolating values. -""" - -__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp', - 'ppform', 'lagrange'] - -from numpy import shape, sometrue, array, transpose, searchsorted, \ - ones, logical_or, atleast_1d, atleast_2d, meshgrid, ravel, \ - dot, poly1d, asarray, intp -import numpy as np -import scipy.special as spec -import math - -import fitpack -import _fitpack - -def reduce_sometrue(a): - all = a - while len(shape(all)) > 1: - all = sometrue(all,axis=0) - return all - -def lagrange(x, w): - """ - Return a Lagrange interpolating polynomial. - - Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating - polynomial through the points ``(x, w)``. - - Warning: This implementation is numerically unstable. Do not expect to - be able to use more than about 20 points even if they are chosen optimally. - - Parameters - ---------- - x : array_like - `x` represents the x-coordinates of a set of datapoints. - w : array_like - `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`). - - """ - M = len(x) - p = poly1d(0.0) - for j in xrange(M): - pt = poly1d(w[j]) - for k in xrange(M): - if k == j: continue - fac = x[j]-x[k] - pt *= poly1d([1.0,-x[k]])/fac - p += pt - return p - - -# !! Need to find argument for keeping initialize. If it isn't -# !! found, get rid of it! - -class interp2d(object): - """ - interp2d(x, y, z, kind='linear', copy=True, bounds_error=False, - fill_value=nan) - - Interpolate over a 2-D grid. - - `x`, `y` and `z` are arrays of values used to approximate some function - f: ``z = f(x, y)``. This class returns a function whose call method uses - spline interpolation to find the value of new points. - - Methods - ------- - __call__ - - Parameters - ---------- - x, y : 1-D ndarrays - Arrays defining the data point coordinates. - - If the points lie on a regular grid, `x` can specify the column - coordinates and `y` the row coordinates, for example:: - - >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]] - - Otherwise, x and y must specify the full coordinates for each point, - for example:: - - >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6] - - If `x` and `y` are multi-dimensional, they are flattened before use. - - z : 1-D ndarray - The values of the function to interpolate at the data points. If - `z` is a multi-dimensional array, it is flattened before use. - kind : {'linear', 'cubic', 'quintic'}, optional - The kind of spline interpolation to use. Default is 'linear'. - copy : bool, optional - If True, then data is copied, otherwise only a reference is held. - bounds_error : bool, optional - If True, when interpolated values are requested outside of the - domain of the input data, an error is raised. - If False, then `fill_value` is used. - fill_value : number, optional - If provided, the value to use for points outside of the - interpolation domain. Defaults to NaN. - - See Also - -------- - bisplrep, bisplev - Spline interpolation based on FITPACK - BivariateSpline : a more recent wrapper of the FITPACK routines - interp1d - - Notes - ----- - The minimum number of data points required along the interpolation - axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for - quintic interpolation. - - The interpolator is constructed by `bisplrep`, with a smoothing factor - of 0. If more control over smoothing is needed, `bisplrep` should be - used directly. - - Examples - -------- - Construct a 2-D grid and interpolate on it: - - >>> x = np.arange(-5.01, 5.01, 0.25) - >>> y = np.arange(-5.01, 5.01, 0.25) - >>> xx, yy = np.meshgrid(x, y) - >>> z = np.sin(xx**2+yy**2) - >>> f = sp.interpolate.interp2d(x, y, z, kind='cubic') - - Now use the obtained interpolation function and plot the result: - - >>> xnew = np.arange(-5.01, 5.01, 1e-2) - >>> ynew = np.arange(-5.01, 5.01, 1e-2) - >>> znew = f(xnew, ynew) - >>> plt.plot(x, z[:, 0], 'ro-', xnew, znew[:, 0], 'b-') - - """ - - def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, - fill_value=np.nan): - self.x, self.y, self.z = map(ravel, map(asarray, [x, y, z])) - - if len(self.z) == len(self.x) * len(self.y): - self.x, self.y = meshgrid(x,y) - self.x, self.y = map(ravel, [self.x, self.y]) - if len(self.x) != len(self.y): - raise ValueError("x and y must have equal lengths") - if len(self.z) != len(self.x): - raise ValueError("Invalid length for input z") - - try: - kx = ky = {'linear' : 1, - 'cubic' : 3, - 'quintic' : 5}[kind] - except KeyError: - raise ValueError("Unsupported interpolation type.") - - self.tck = fitpack.bisplrep(self.x, self.y, self.z, kx=kx, ky=ky, s=0.) - - def __call__(self,x,y,dx=0,dy=0): - """Interpolate the function. - - Parameters - ---------- - x : 1D array - x-coordinates of the mesh on which to interpolate. - y : 1D array - y-coordinates of the mesh on which to interpolate. - dx : int >= 0, < kx - Order of partial derivatives in x. - dy : int >= 0, < ky - Order of partial derivatives in y. - - Returns - ------- - z : 2D array with shape (len(y), len(x)) - The interpolated values. - - """ - - x = atleast_1d(x) - y = atleast_1d(y) - z = fitpack.bisplev(x, y, self.tck, dx, dy) - z = atleast_2d(z) - z = transpose(z) - if len(z)==1: - z = z[0] - return array(z) - - -class interp1d(object): - """ - interp1d(x, y, kind='linear', axis=-1, copy=True, bounds_error=True, - fill_value=np.nan) - - Interpolate a 1-D function. - - `x` and `y` are arrays of values used to approximate some function f: - ``y = f(x)``. This class returns a function whose call method uses - interpolation to find the value of new points. - - Parameters - ---------- - x : array_like - A 1-D array of monotonically increasing real values. - y : array_like - A N-D array of real values. The length of `y` along the interpolation - axis must be equal to the length of `x`. - kind : str or int, optional - Specifies the kind of interpolation as a string - ('linear','nearest', 'zero', 'slinear', 'quadratic, 'cubic') - or as an integer specifying the order of the spline interpolator - to use. Default is 'linear'. - axis : int, optional - Specifies the axis of `y` along which to interpolate. - Interpolation defaults to the last axis of `y`. - copy : bool, optional - If True, the class makes internal copies of x and y. - If False, references to `x` and `y` are used. The default is to copy. - bounds_error : bool, optional - If True, an error is thrown any time interpolation is attempted on - a value outside of the range of x (where extrapolation is - necessary). If False, out of bounds values are assigned `fill_value`. - By default, an error is raised. - fill_value : float, optional - If provided, then this value will be used to fill in for requested - points outside of the data range. If not provided, then the default - is NaN. - - See Also - -------- - UnivariateSpline : A more recent wrapper of the FITPACK routines. - splrep, splev - Spline interpolation based on FITPACK. - interp2d - - Examples - -------- - >>> import scipy.interpolate - >>> x = np.arange(0, 10) - >>> y = np.exp(-x/3.0) - >>> f = sp.interpolate.interp1d(x, y) - - >>> xnew = np.arange(0,9, 0.1) - >>> ynew = f(xnew) # use interpolation function returned by `interp1d` - >>> plt.plot(x, y, 'o', xnew, ynew, '-') - - """ - - def __init__(self, x, y, kind='linear', axis=-1, - copy=True, bounds_error=True, fill_value=np.nan): - """ Initialize a 1D linear interpolation class.""" - - self.copy = copy - self.bounds_error = bounds_error - self.fill_value = fill_value - - if kind in ['zero', 'slinear', 'quadratic', 'cubic']: - order = {'nearest':0, 'zero':0,'slinear':1, - 'quadratic':2, 'cubic':3}[kind] - kind = 'spline' - elif isinstance(kind, int): - order = kind - kind = 'spline' - elif kind not in ('linear', 'nearest'): - raise NotImplementedError("%s is unsupported: Use fitpack "\ - "routines for other types." % kind) - x = array(x, copy=self.copy) - y = array(y, copy=self.copy) - - if x.ndim != 1: - raise ValueError("the x array must have exactly one dimension.") - if y.ndim == 0: - raise ValueError("the y array must have at least one dimension.") - - # Force-cast y to a floating-point type, if it's not yet one - if not issubclass(y.dtype.type, np.inexact): - y = y.astype(np.float_) - - # Normalize the axis to ensure that it is positive. - self.axis = axis % len(y.shape) - self._kind = kind - - if kind in ('linear', 'nearest'): - # Make a "view" of the y array that is rotated to the interpolation - # axis. - axes = range(y.ndim) - del axes[self.axis] - axes.append(self.axis) - oriented_y = y.transpose(axes) - minval = 2 - len_y = oriented_y.shape[-1] - if kind == 'linear': - self._call = self._call_linear - elif kind == 'nearest': - self.x_bds = (x[1:] + x[:-1]) / 2.0 - self._call = self._call_nearest - else: - axes = range(y.ndim) - del axes[self.axis] - axes.insert(0, self.axis) - oriented_y = y.transpose(axes) - minval = order + 1 - len_y = oriented_y.shape[0] - self._call = self._call_spline - self._spline = splmake(x,oriented_y,order=order) - - len_x = len(x) - if len_x != len_y: - raise ValueError("x and y arrays must be equal in length along " - "interpolation axis.") - if len_x < minval: - raise ValueError("x and y arrays must have at " - "least %d entries" % minval) - self.x = x - self.y = oriented_y - - def _call_linear(self, x_new): - - # 2. Find where in the orignal data, the values to interpolate - # would be inserted. - # Note: If x_new[n] == x[m], then m is returned by searchsorted. - x_new_indices = searchsorted(self.x, x_new) - - # 3. Clip x_new_indices so that they are within the range of - # self.x indices and at least 1. Removes mis-interpolation - # of x_new[n] = x[0] - x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int) - - # 4. Calculate the slope of regions that each x_new value falls in. - lo = x_new_indices - 1 - hi = x_new_indices - - x_lo = self.x[lo] - x_hi = self.x[hi] - y_lo = self.y[..., lo] - y_hi = self.y[..., hi] - - # Note that the following two expressions rely on the specifics of the - # broadcasting semantics. - slope = (y_hi-y_lo) / (x_hi-x_lo) - - # 5. Calculate the actual value for each entry in x_new. - y_new = slope*(x_new-x_lo) + y_lo - - return y_new - - def _call_nearest(self, x_new): - """ Find nearest neighbour interpolated y_new = f(x_new).""" - - # 2. Find where in the averaged data the values to interpolate - # would be inserted. - # Note: use side='left' (right) to searchsorted() to define the - # halfway point to be nearest to the left (right) neighbour - x_new_indices = searchsorted(self.x_bds, x_new, side='left') - - # 3. Clip x_new_indices so that they are within the range of x indices. - x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp) - - # 4. Calculate the actual value for each entry in x_new. - y_new = self.y[..., x_new_indices] - - return y_new - - def _call_spline(self, x_new): - x_new =np.asarray(x_new) - result = spleval(self._spline,x_new.ravel()) - return result.reshape(x_new.shape+result.shape[1:]) - - def __call__(self, x_new): - """Find interpolated y_new = f(x_new). - - Parameters - ---------- - x_new : number or array - New independent variable(s). - - Returns - ------- - y_new : ndarray - Interpolated value(s) corresponding to x_new. - - """ - - # 1. Handle values in x_new that are outside of x. Throw error, - # or return a list of mask array indicating the outofbounds values. - # The behavior is set by the bounds_error variable. - x_new = asarray(x_new) - out_of_bounds = self._check_bounds(x_new) - - y_new = self._call(x_new) - - # Rotate the values of y_new back so that they correspond to the - # correct x_new values. For N-D x_new, take the last (for linear) - # or first (for other splines) N axes - # from y_new and insert them where self.axis was in the list of axes. - nx = x_new.ndim - ny = y_new.ndim - - # 6. Fill any values that were out of bounds with fill_value. - # and - # 7. Rotate the values back to their proper place. - - if nx == 0: - # special case: x is a scalar - if out_of_bounds: - if ny == 0: - return asarray(self.fill_value) - else: - y_new[...] = self.fill_value - return asarray(y_new) - elif self._kind in ('linear', 'nearest'): - y_new[..., out_of_bounds] = self.fill_value - axes = range(ny - nx) - axes[self.axis:self.axis] = range(ny - nx, ny) - return y_new.transpose(axes) - else: - y_new[out_of_bounds] = self.fill_value - axes = range(nx, ny) - axes[self.axis:self.axis] = range(nx) - return y_new.transpose(axes) - - def _check_bounds(self, x_new): - """Check the inputs for being in the bounds of the interpolated data. - - Parameters - ---------- - x_new : array - - Returns - ------- - out_of_bounds : bool array - The mask on x_new of values that are out of the bounds. - """ - - # If self.bounds_error is True, we raise an error if any x_new values - # fall outside the range of x. Otherwise, we return an array indicating - # which values are outside the boundary region. - below_bounds = x_new < self.x[0] - above_bounds = x_new > self.x[-1] - - # !! Could provide more information about which values are out of bounds - if self.bounds_error and below_bounds.any(): - raise ValueError("A value in x_new is below the interpolation " - "range.") - if self.bounds_error and above_bounds.any(): - raise ValueError("A value in x_new is above the interpolation " - "range.") - - # !! Should we emit a warning if some values are out of bounds? - # !! matlab does not. - out_of_bounds = logical_or(below_bounds, above_bounds) - return out_of_bounds - -class ppform(object): - """The ppform of the piecewise polynomials is given in terms of coefficients - and breaks. The polynomial in the ith interval is - x_{i} <= x < x_{i+1} - - S_i = sum(coefs[m,i]*(x-breaks[i])^(k-m), m=0..k) - where k is the degree of the polynomial. - """ - def __init__(self, coeffs, breaks, fill=0.0, sort=False): - self.coeffs = np.asarray(coeffs) - if sort: - self.breaks = np.sort(breaks) - else: - self.breaks = np.asarray(breaks) - self.K = self.coeffs.shape[0] - self.fill = fill - self.a = self.breaks[0] - self.b = self.breaks[-1] - - def __call__(self, xnew): - saveshape = np.shape(xnew) - xnew = np.ravel(xnew) - res = np.empty_like(xnew) - mask = (xnew >= self.a) & (xnew <= self.b) - res[~mask] = self.fill - xx = xnew.compress(mask) - indxs = np.searchsorted(self.breaks, xx)-1 - indxs = indxs.clip(0,len(self.breaks)) - pp = self.coeffs - diff = xx - self.breaks.take(indxs) - V = np.vander(diff,N=self.K) - # values = np.diag(dot(V,pp[:,indxs])) - values = array([dot(V[k,:],pp[:,indxs[k]]) for k in xrange(len(xx))]) - res[mask] = values - res.shape = saveshape - return res - - def fromspline(cls, xk, cvals, order, fill=0.0): - N = len(xk)-1 - sivals = np.empty((order+1,N), dtype=float) - for m in xrange(order,-1,-1): - fact = spec.gamma(m+1) - res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m) - res /= fact - sivals[order-m,:] = res - return cls(sivals, xk, fill=fill) - fromspline = classmethod(fromspline) - - -def _dot0(a, b): - """Similar to numpy.dot, but sum over last axis of a and 1st axis of b""" - if b.ndim <= 2: - return dot(a, b) - else: - axes = range(b.ndim) - axes.insert(-1, 0) - axes.pop(0) - return dot(a, b.transpose(axes)) - -def _find_smoothest(xk, yk, order, conds=None, B=None): - # construct Bmatrix, and Jmatrix - # e = J*c - # minimize norm(e,2) given B*c=yk - # if desired B can be given - # conds is ignored - N = len(xk)-1 - K = order - if B is None: - B = _fitpack._bsplmat(order, xk) - J = _fitpack._bspldismat(order, xk) - u,s,vh = np.dual.svd(B) - ind = K-1 - V2 = vh[-ind:,:].T - V1 = vh[:-ind,:].T - A = dot(J.T,J) - tmp = dot(V2.T,A) - Q = dot(tmp,V2) - p = np.dual.solve(Q,tmp) - tmp = dot(V2,p) - tmp = np.eye(N+K) - tmp - tmp = dot(tmp,V1) - tmp = dot(tmp,np.diag(1.0/s)) - tmp = dot(tmp,u.T) - return _dot0(tmp, yk) - -def _setdiag(a, k, v): - if not a.ndim == 2: - raise ValueError("Input array should be 2-D.") - M,N = a.shape - if k > 0: - start = k - num = N-k - else: - num = M+k - start = abs(k)*N - end = start + num*(N+1)-1 - a.flat[start:end:(N+1)] = v - -# Return the spline that minimizes the dis-continuity of the -# "order-th" derivative; for order >= 2. - -def _find_smoothest2(xk, yk): - N = len(xk)-1 - Np1 = N+1 - # find pseudo-inverse of B directly. - Bd = np.empty((Np1,N)) - for k in range(-N,N): - if (k<0): - l = np.arange(-k,Np1) - v = (l+k+1) - if ((k+1) % 2): - v = -v - else: - l = np.arange(k,N) - v = N-l - if ((k % 2)): - v = -v - _setdiag(Bd,k,v) - Bd /= (Np1) - V2 = np.ones((Np1,)) - V2[1::2] = -1 - V2 /= math.sqrt(Np1) - dk = np.diff(xk) - b = 2*np.diff(yk, axis=0)/dk - J = np.zeros((N-1,N+1)) - idk = 1.0/dk - _setdiag(J,0,idk[:-1]) - _setdiag(J,1,-idk[1:]-idk[:-1]) - _setdiag(J,2,idk[1:]) - A = dot(J.T,J) - val = dot(V2,dot(A,V2)) - res1 = dot(np.outer(V2,V2)/val,A) - mk = dot(np.eye(Np1)-res1, _dot0(Bd,b)) - return mk - -def _get_spline2_Bb(xk, yk, kind, conds): - Np1 = len(xk) - dk = xk[1:]-xk[:-1] - if kind == 'not-a-knot': - # use banded-solver - nlu = (1,1) - B = ones((3,Np1)) - alpha = 2*(yk[1:]-yk[:-1])/dk - zrs = np.zeros((1,)+yk.shape[1:]) - row = (Np1-1)//2 - b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0) - B[0,row+2:] = 0 - B[2,:(row-1)] = 0 - B[0,row+1] = dk[row-1] - B[1,row] = -dk[row]-dk[row-1] - B[2,row-1] = dk[row] - return B, b, None, nlu - else: - raise NotImplementedError("quadratic %s is not available" % kind) - -def _get_spline3_Bb(xk, yk, kind, conds): - # internal function to compute different tri-diagonal system - # depending on the kind of spline requested. - # conds is only used for 'second' and 'first' - Np1 = len(xk) - if kind in ['natural', 'second']: - if kind == 'natural': - m0, mN = 0.0, 0.0 - else: - m0, mN = conds - - # the matrix to invert is (N-1,N-1) - # use banded solver - beta = 2*(xk[2:]-xk[:-2]) - alpha = xk[1:]-xk[:-1] - nlu = (1,1) - B = np.empty((3,Np1-2)) - B[0,1:] = alpha[2:] - B[1,:] = beta - B[2,:-1] = alpha[1:-1] - dyk = yk[1:]-yk[:-1] - b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1]) - b *= 6 - b[0] -= m0 - b[-1] -= mN - - def append_func(mk): - # put m0 and mN into the correct shape for - # concatenation - ma = array(m0,copy=0,ndmin=yk.ndim) - mb = array(mN,copy=0,ndmin=yk.ndim) - if ma.shape[1:] != yk.shape[1:]: - ma = ma*(ones(yk.shape[1:])[np.newaxis,...]) - if mb.shape[1:] != yk.shape[1:]: - mb = mb*(ones(yk.shape[1:])[np.newaxis,...]) - mk = np.concatenate((ma,mk),axis=0) - mk = np.concatenate((mk,mb),axis=0) - return mk - - return B, b, append_func, nlu - - - elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout', - 'parabolic']: - if kind == 'endslope': - # match slope of lagrange interpolating polynomial of - # order 3 at end-points. - x0,x1,x2,x3 = xk[:4] - sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0] - sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1] - sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2] - sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3] - - xN3,xN2,xN1,xN0 = xk[-4:] - sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1] - sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2] - sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3] - sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4] - elif kind == 'clamped': - sl_0, sl_N = 0.0, 0.0 - elif kind == 'first': - sl_0, sl_N = conds - - # Now set up the (N+1)x(N+1) system of equations - beta = np.r_[0,2*(xk[2:]-xk[:-2]),0] - alpha = xk[1:]-xk[:-1] - gamma = np.r_[0,alpha[1:]] - B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1) - d1 = alpha[0] - dN = alpha[-1] - if kind == 'not-a-knot': - d2 = alpha[1] - dN1 = alpha[-2] - B[0,:3] = [d2,-d1-d2,d1] - B[-1,-3:] = [dN,-dN1-dN,dN1] - elif kind == 'runout': - B[0,:3] = [1,-2,1] - B[-1,-3:] = [1,-2,1] - elif kind == 'parabolic': - B[0,:2] = [1,-1] - B[-1,-2:] = [-1,1] - elif kind == 'periodic': - raise NotImplementedError - elif kind == 'symmetric': - raise NotImplementedError - else: - B[0,:2] = [2*d1,d1] - B[-1,-2:] = [dN,2*dN] - - # Set up RHS (b) - b = np.empty((Np1,)+yk.shape[1:]) - dyk = (yk[1:]-yk[:-1])*1.0 - if kind in ['not-a-knot', 'runout', 'parabolic']: - b[0] = b[-1] = 0.0 - elif kind == 'periodic': - raise NotImplementedError - elif kind == 'symmetric': - raise NotImplementedError - else: - b[0] = (dyk[0]/d1 - sl_0) - b[-1] = -(dyk[-1]/dN - sl_N) - b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1]) - b *= 6.0 - return B, b, None, None - else: - raise ValueError("%s not supported" % kind) - -# conds is a tuple of an array and a vector -# giving the left-hand and the right-hand side -# of the additional equations to add to B -def _find_user(xk, yk, order, conds, B): - lh = conds[0] - rh = conds[1] - B = np.concatenate((B,lh),axis=0) - w = np.concatenate((yk,rh),axis=0) - M,N = B.shape - if (M>N): - raise ValueError("over-specification of conditions") - elif (M 1) and/or xnew is - N-d, then the result is xnew.shape + cvals.shape[1:] providing the - interpolation of multiple curves. - """ - oldshape = np.shape(xnew) - xx = np.ravel(xnew) - sh = cvals.shape[1:] - res = np.empty(xx.shape + sh, dtype=cvals.dtype) - for index in np.ndindex(*sh): - sl = (slice(None),)+index - if issubclass(cvals.dtype.type, np.complexfloating): - res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv) - res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv) - else: - res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv) - res.shape = oldshape + sh - return res - -def spltopp(xk,cvals,k): - """Return a piece-wise polynomial object from a fixed-spline tuple. - """ - return ppform.fromspline(xk, cvals, k) - -def spline(xk,yk,xnew,order=3,kind='smoothest',conds=None): - """Interpolate a curve (xk,yk) at points xnew using a spline fit. - """ - return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew) diff --git a/scipy-0.10.1/scipy/interpolate/interpolate_wrapper.py b/scipy-0.10.1/scipy/interpolate/interpolate_wrapper.py deleted file mode 100644 index 39253e7a55..0000000000 --- a/scipy-0.10.1/scipy/interpolate/interpolate_wrapper.py +++ /dev/null @@ -1,140 +0,0 @@ -""" helper_funcs.py. - scavenged from enthought,interpolate -""" - -import numpy as np -import _interpolate # C extension. Does all the real work. - -def atleast_1d_and_contiguous(ary, dtype = np.float64): - return np.atleast_1d( np.ascontiguousarray(ary, dtype) ) - -def nearest(x, y, new_x): - """ Rounds each new_x[i] to the closest value in x - and returns corresponding y. - """ - shifted_x = np.concatenate(( np.array([x[0]-1]) , x[0:-1] )) - - midpoints_of_x = atleast_1d_and_contiguous( .5*(x + shifted_x) ) - new_x = atleast_1d_and_contiguous(new_x) - - TINY = 1e-10 - indices = np.searchsorted(midpoints_of_x, new_x+TINY)-1 - indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(np.int)) - new_y = np.take(y, indices, axis=-1) - - return new_y - - - -def linear(x, y, new_x): - """ Linearly interpolates values in new_x based on the values in x and y - - Parameters - ---------- - x - 1-D array - y - 1-D or 2-D array - new_x - 1-D array - """ - x = atleast_1d_and_contiguous(x, np.float64) - y = atleast_1d_and_contiguous(y, np.float64) - new_x = atleast_1d_and_contiguous(new_x, np.float64) - - if y.ndim > 2: - raise ValueError("`linear` only works with 1-D or 2-D arrays.") - if len(y.shape) == 2: - new_y = np.zeros((y.shape[0], len(new_x)), np.float64) - for i in range(len(new_y)): # for each row - _interpolate.linear_dddd(x, y[i], new_x, new_y[i]) - else: - new_y = np.zeros(len(new_x), np.float64) - _interpolate.linear_dddd(x, y, new_x, new_y) - - return new_y - -def logarithmic(x, y, new_x): - """ Linearly interpolates values in new_x based in the log space of y. - - Parameters - ---------- - x - 1-D array - y - 1-D or 2-D array - new_x - 1-D array - """ - x = atleast_1d_and_contiguous(x, np.float64) - y = atleast_1d_and_contiguous(y, np.float64) - new_x = atleast_1d_and_contiguous(new_x, np.float64) - - if y.ndim > 2: - raise ValueError("`linear` only works with 1-D or 2-D arrays.") - if len(y.shape) == 2: - new_y = np.zeros((y.shape[0], len(new_x)), np.float64) - for i in range(len(new_y)): - _interpolate.loginterp_dddd(x, y[i], new_x, new_y[i]) - else: - new_y = np.zeros(len(new_x), np.float64) - _interpolate.loginterp_dddd(x, y, new_x, new_y) - - return new_y - -def block_average_above(x, y, new_x): - """ Linearly interpolates values in new_x based on the values in x and y - - Parameters - ---------- - x - 1-D array - y - 1-D or 2-D array - new_x - 1-D array - """ - bad_index = None - x = atleast_1d_and_contiguous(x, np.float64) - y = atleast_1d_and_contiguous(y, np.float64) - new_x = atleast_1d_and_contiguous(new_x, np.float64) - - if y.ndim > 2: - raise ValueError("`linear` only works with 1-D or 2-D arrays.") - if len(y.shape) == 2: - new_y = np.zeros((y.shape[0], len(new_x)), np.float64) - for i in range(len(new_y)): - bad_index = _interpolate.block_averave_above_dddd(x, y[i], - new_x, new_y[i]) - if bad_index is not None: - break - else: - new_y = np.zeros(len(new_x), np.float64) - bad_index = _interpolate.block_average_above_dddd(x, y, new_x, new_y) - - if bad_index is not None: - msg = "block_average_above cannot extrapolate and new_x[%d]=%f "\ - "is out of the x range (%f, %f)" % \ - (bad_index, new_x[bad_index], x[0], x[-1]) - raise ValueError(msg) - - return new_y - -def block(x, y, new_x): - """ Essentially a step function. - - For each new_x[i], finds largest j such that - x[j] < new_x[j], and returns y[j]. - """ - # find index of values in x that preceed values in x - # This code is a little strange -- we really want a routine that - # returns the index of values where x[j] < x[index] - TINY = 1e-10 - indices = np.searchsorted(x, new_x+TINY)-1 - - # If the value is at the front of the list, it'll have -1. - # In this case, we will use the first (0), element in the array. - # take requires the index array to be an Int - indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(np.int)) - new_y = np.take(y, indices, axis=-1) - return new_y diff --git a/scipy-0.10.1/scipy/interpolate/ndgriddata.py b/scipy-0.10.1/scipy/interpolate/ndgriddata.py deleted file mode 100644 index d0e8e3b2fa..0000000000 --- a/scipy-0.10.1/scipy/interpolate/ndgriddata.py +++ /dev/null @@ -1,190 +0,0 @@ -""" -Convenience interface to N-D interpolation - -.. versionadded:: 0.9 - -""" - -import numpy as np -from interpnd import LinearNDInterpolator, NDInterpolatorBase, \ - CloughTocher2DInterpolator, _ndim_coords_from_arrays -from scipy.spatial import cKDTree - -__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator', - 'CloughTocher2DInterpolator'] - -#------------------------------------------------------------------------------ -# Nearest-neighbour interpolation -#------------------------------------------------------------------------------ - -class NearestNDInterpolator(NDInterpolatorBase): - """ - NearestNDInterpolator(points, values) - - Nearest-neighbour interpolation in N dimensions. - - .. versionadded:: 0.9 - - Parameters - ---------- - points : ndarray of floats, shape (npoints, ndims) - Data point coordinates. - values : ndarray of float or complex, shape (npoints, ...) - Data values. - - Notes - ----- - Uses ``scipy.spatial.cKDTree`` - - """ - - def __init__(self, x, y): - x = _ndim_coords_from_arrays(x) - self._check_init_shape(x, y) - self.tree = cKDTree(x) - self.points = x - self.values = y - - def __call__(self, *args): - """ - Evaluate interpolator at given points. - - Parameters - ---------- - xi : ndarray of float, shape (..., ndim) - Points where to interpolate data at. - - """ - xi = _ndim_coords_from_arrays(args) - xi = self._check_call_shape(xi) - dist, i = self.tree.query(xi) - return self.values[i] - - -#------------------------------------------------------------------------------ -# Convenience interface function -#------------------------------------------------------------------------------ - -def griddata(points, values, xi, method='linear', fill_value=np.nan): - """ - Interpolate unstructured N-dimensional data. - - .. versionadded:: 0.9 - - Parameters - ---------- - points : ndarray of floats, shape (npoints, ndims) - Data point coordinates. Can either be a ndarray of - size (npoints, ndim), or a tuple of `ndim` arrays. - values : ndarray of float or complex, shape (npoints, ...) - Data values. - xi : ndarray of float, shape (..., ndim) - Points where to interpolate data at. - - method : {'linear', 'nearest', 'cubic'}, optional - Method of interpolation. One of - - - ``nearest``: return the value at the data point closest to - the point of interpolation. See `NearestNDInterpolator` for - more details. - - - ``linear``: tesselate the input point set to n-dimensional - simplices, and interpolate linearly on each simplex. See - `LinearNDInterpolator` for more details. - - - ``cubic`` (1-D): return the value detemined from a cubic - spline. - - - ``cubic`` (2-D): return the value determined from a - piecewise cubic, continuously differentiable (C1), and - approximately curvature-minimizing polynomial surface. See - `CloughTocher2DInterpolator` for more details. - - fill_value : float, optional - Value used to fill in for requested points outside of the - convex hull of the input points. If not provided, then the - default is ``nan``. This option has no effect for the - 'nearest' method. - - - Examples - -------- - - Suppose we want to interpolate the 2-D function - - >>> def func(x, y): - >>> return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2 - - on a grid in [0, 1]x[0, 1] - - >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] - - but we only know its values at 1000 data points: - - >>> points = np.random.rand(1000, 2) - >>> values = func(points[:,0], points[:,1]) - - This can be done with `griddata` -- below we try out all of the - interpolation methods: - - >>> from scipy.interpolate import griddata - >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest') - >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear') - >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic') - - One can see that the exact result is reproduced by all of the - methods to some degree, but for this smooth function the piecewise - cubic interpolant gives the best results: - - >>> import matplotlib.pyplot as plt - >>> plt.subplot(221) - >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower') - >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1) - >>> plt.title('Original') - >>> plt.subplot(222) - >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower') - >>> plt.title('Nearest') - >>> plt.subplot(223) - >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower') - >>> plt.title('Linear') - >>> plt.subplot(224) - >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower') - >>> plt.title('Cubic') - >>> plt.gcf().set_size_inches(6, 6) - >>> plt.show() - - """ - - points = _ndim_coords_from_arrays(points) - - if points.ndim < 2: - ndim = points.ndim - else: - ndim = points.shape[-1] - - if ndim == 1 and method in ('nearest', 'linear', 'cubic'): - from interpolate import interp1d - points = points.ravel() - if isinstance(xi, tuple): - if len(xi) != 1: - raise ValueError("invalid number of dimensions in xi") - xi, = xi - # Sort points/values together, necessary as input for interp1d - idx = np.argsort(points) - points = points[idx] - values = values[idx] - ip = interp1d(points, values, kind=method, axis=0, bounds_error=False, - fill_value=fill_value) - return ip(xi) - elif method == 'nearest': - ip = NearestNDInterpolator(points, values) - return ip(xi) - elif method == 'linear': - ip = LinearNDInterpolator(points, values, fill_value=fill_value) - return ip(xi) - elif method == 'cubic' and ndim == 2: - ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value) - return ip(xi) - else: - raise ValueError("Unknown interpolation method %r for " - "%d dimensional data" % (method, ndim)) diff --git a/scipy-0.10.1/scipy/interpolate/polyint.py b/scipy-0.10.1/scipy/interpolate/polyint.py deleted file mode 100644 index 0eb2106f0f..0000000000 --- a/scipy-0.10.1/scipy/interpolate/polyint.py +++ /dev/null @@ -1,967 +0,0 @@ -import numpy as np -from scipy.misc import factorial - -__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator", "barycentric_interpolate", "PiecewisePolynomial", "piecewise_polynomial_interpolate","approximate_taylor_polynomial", "pchip"] - -class KroghInterpolator(object): - """ - The interpolating polynomial for a set of points - - Constructs a polynomial that passes through a given set of points, - optionally with specified derivatives at those points. - Allows evaluation of the polynomial and all its derivatives. - For reasons of numerical stability, this function does not compute - the coefficients of the polynomial, although they can be obtained - by evaluating all the derivatives. - - Be aware that the algorithms implemented here are not necessarily - the most numerically stable known. Moreover, even in a world of - exact computation, unless the x coordinates are chosen very - carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice - - polynomial interpolation itself is a very ill-conditioned process - due to the Runge phenomenon. In general, even with well-chosen - x values, degrees higher than about thirty cause problems with - numerical instability in this code. - - Based on [1]_. - - Parameters - ---------- - xi : array-like, length N - Known x-coordinates - yi : array-like, N by R - Known y-coordinates, interpreted as vectors of length R, - or scalars if R=1. When an xi occurs two or more times in - a row, the corresponding yi's represent derivative values. - - References - ---------- - .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation - and Numerical Differentiation", 1970. - - """ - def __init__(self, xi, yi): - """Construct an interpolator passing through the specified points - - The polynomial passes through all the pairs (xi,yi). One may additionally - specify a number of derivatives at each point xi; this is done by - repeating the value xi and specifying the derivatives as successive - yi values. - - Parameters - ---------- - xi : array-like, length N - known x-coordinates - yi : array-like, N by R - known y-coordinates, interpreted as vectors of length R, - or scalars if R=1. When an xi occurs two or more times in - a row, the corresponding yi's represent derivative values. - - Examples - -------- - To produce a polynomial that is zero at 0 and 1 and has - derivative 2 at 0, call - - >>> KroghInterpolator([0,0,1],[0,2,0]) - - This constructs the quadratic 2*X**2-2*X. The derivative condition - is indicated by the repeated zero in the xi array; the corresponding - yi values are 0, the function value, and 2, the derivative value. - - For another example, given xi, yi, and a derivative ypi for each - point, appropriate arrays can be constructed as: - - >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi))) - >>> KroghInterpolator(xi_k, yi_k) - - To produce a vector-valued polynomial, supply a higher-dimensional - array for yi: - - >>> KroghInterpolator([0,1],[[2,3],[4,5]]) - - This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1. - - """ - self.xi = np.asarray(xi) - self.yi = np.asarray(yi) - if len(self.yi.shape)==1: - self.vector_valued = False - self.yi = self.yi[:,np.newaxis] - elif len(self.yi.shape)>2: - raise ValueError("y coordinates must be either scalars or vectors") - else: - self.vector_valued = True - - n = len(xi) - self.n = n - nn, r = self.yi.shape - if nn!=n: - raise ValueError("%d x values provided and %d y values; must be equal" % (n, nn)) - self.r = r - - c = np.zeros((n+1,r)) - c[0] = yi[0] - Vk = np.zeros((n,r)) - for k in xrange(1,n): - s = 0 - while s<=k and xi[k-s]==xi[k]: - s += 1 - s -= 1 - Vk[0] = yi[k]/float(factorial(s)) - for i in xrange(k-s): - if xi[i] == xi[k]: - raise ValueError("Elements if `xi` can't be equal.") - if s==0: - Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k]) - else: - Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k]) - c[k] = Vk[k-s] - self.c = c - - def __call__(self,x): - """Evaluate the polynomial at the point x - - Parameters - ---------- - x : scalar or array-like of length N - - Returns - ------- - y : scalar, array of length R, array of length N, or array of length N by R - If x is a scalar, returns either a vector or a scalar depending on - whether the interpolator is vector-valued or scalar-valued. - If x is a vector, returns a vector of values. - """ - if _isscalar(x): - scalar = True - m = 1 - else: - scalar = False - m = len(x) - x = np.asarray(x) - - n = self.n - pi = 1 - p = np.zeros((m,self.r)) - p += self.c[0,np.newaxis,:] - for k in xrange(1,n): - w = x - self.xi[k-1] - pi = w*pi - p = p + np.multiply.outer(pi,self.c[k]) - if not self.vector_valued: - if scalar: - return p[0,0] - else: - return p[:,0] - else: - if scalar: - return p[0] - else: - return p - - def derivatives(self,x,der=None): - """ - Evaluate many derivatives of the polynomial at the point x - - Produce an array of all derivative values at the point x. - - Parameters - ---------- - x : scalar or array_like of length N - Point or points at which to evaluate the derivatives - - der : None or integer - How many derivatives to extract; None for all potentially - nonzero derivatives (that is a number equal to the number - of points). This number includes the function value as 0th - derivative. - - Returns - ------- - d : ndarray - If the interpolator's values are R-dimensional then the - returned array will be der by N by R. If x is a scalar, - the middle dimension will be dropped; if R is 1 then the - last dimension will be dropped. - - Examples - -------- - >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0) - array([1.0,2.0,3.0]) - >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0]) - array([[1.0,1.0], - [2.0,2.0], - [3.0,3.0]]) - - """ - if _isscalar(x): - scalar = True - m = 1 - else: - scalar = False - m = len(x) - x = np.asarray(x) - - n = self.n - r = self.r - - if der is None: - der = self.n - dern = min(self.n,der) - pi = np.zeros((n,m)) - w = np.zeros((n,m)) - pi[0] = 1 - p = np.zeros((m,self.r)) - p += self.c[0,np.newaxis,:] - - for k in xrange(1,n): - w[k-1] = x - self.xi[k-1] - pi[k] = w[k-1]*pi[k-1] - p += np.multiply.outer(pi[k],self.c[k]) - - cn = np.zeros((max(der,n+1),m,r)) - cn[:n+1,...] += self.c[:n+1,np.newaxis,:] - cn[0] = p - for k in xrange(1,n): - for i in xrange(1,n-k+1): - pi[i] = w[k+i-1]*pi[i-1]+pi[i] - cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i] - cn[k]*=factorial(k) - - cn[n,...] = 0 - if not self.vector_valued: - if scalar: - return cn[:der,0,0] - else: - return cn[:der,:,0] - else: - if scalar: - return cn[:der,0] - else: - return cn[:der] - def derivative(self,x,der): - """ - Evaluate one derivative of the polynomial at the point x - - Parameters - ---------- - x : scalar or array_like of length N - Point or points at which to evaluate the derivatives - - der : None or integer - Which derivative to extract. This number includes the - function value as 0th derivative. - - Returns - ------- - d : ndarray - If the interpolator's values are R-dimensional then the - returned array will be N by R. If x is a scalar, - the middle dimension will be dropped; if R is 1 then the - last dimension will be dropped. - - Notes - ----- - This is computed by evaluating all derivatives up to the desired - one (using self.derivatives()) and then discarding the rest. - - """ - return self.derivatives(x,der=der+1)[der] - -def krogh_interpolate(xi,yi,x,der=0): - """ - Convenience function for polynomial interpolation. - - Constructs a polynomial that passes through a given set of points, - optionally with specified derivatives at those points. - Evaluates the polynomial or some of its derivatives. - For reasons of numerical stability, this function does not compute - the coefficients of the polynomial, although they can be obtained - by evaluating all the derivatives. - - Be aware that the algorithms implemented here are not necessarily - the most numerically stable known. Moreover, even in a world of - exact computation, unless the x coordinates are chosen very - carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice - - polynomial interpolation itself is a very ill-conditioned process - due to the Runge phenomenon. In general, even with well-chosen - x values, degrees higher than about thirty cause problems with - numerical instability in this code. - - Based on Krogh 1970, "Efficient Algorithms for Polynomial Interpolation - and Numerical Differentiation" - - The polynomial passes through all the pairs (xi,yi). One may additionally - specify a number of derivatives at each point xi; this is done by - repeating the value xi and specifying the derivatives as successive - yi values. - - Parameters - ---------- - xi : array_like, length N - known x-coordinates - yi : array_like, N by R - known y-coordinates, interpreted as vectors of length R, - or scalars if R=1 - x : scalar or array_like of length N - Point or points at which to evaluate the derivatives - der : integer or list - How many derivatives to extract; None for all potentially - nonzero derivatives (that is a number equal to the number - of points), or a list of derivatives to extract. This number - includes the function value as 0th derivative. - - Returns - ------- - d : ndarray - If the interpolator's values are R-dimensional then the - returned array will be the number of derivatives by N by R. - If x is a scalar, the middle dimension will be dropped; if - the yi are scalars then the last dimension will be dropped. - - Notes - ----- - Construction of the interpolating polynomial is a relatively expensive - process. If you want to evaluate it repeatedly consider using the class - KroghInterpolator (which is what this function uses). - - """ - P = KroghInterpolator(xi, yi) - if der==0: - return P(x) - elif _isscalar(der): - return P.derivative(x,der=der) - else: - return P.derivatives(x,der=np.amax(der)+1)[der] - - - - -def approximate_taylor_polynomial(f,x,degree,scale,order=None): - """ - Estimate the Taylor polynomial of f at x by polynomial fitting. - - Parameters - ---------- - f : callable - The function whose Taylor polynomial is sought. Should accept - a vector of x values. - x : scalar - The point at which the polynomial is to be evaluated. - degree : int - The degree of the Taylor polynomial - scale : scalar - The width of the interval to use to evaluate the Taylor polynomial. - Function values spread over a range this wide are used to fit the - polynomial. Must be chosen carefully. - order : int or None - The order of the polynomial to be used in the fitting; f will be - evaluated ``order+1`` times. If None, use `degree`. - - Returns - ------- - p : poly1d instance - The Taylor polynomial (translated to the origin, so that - for example p(0)=f(x)). - - Notes - ----- - The appropriate choice of "scale" is a trade-off; too large and the - function differs from its Taylor polynomial too much to get a good - answer, too small and round-off errors overwhelm the higher-order terms. - The algorithm used becomes numerically unstable around order 30 even - under ideal circumstances. - - Choosing order somewhat larger than degree may improve the higher-order - terms. - - """ - if order is None: - order=degree - - n = order+1 - # Choose n points that cluster near the endpoints of the interval in - # a way that avoids the Runge phenomenon. Ensure, by including the - # endpoint or not as appropriate, that one point always falls at x - # exactly. - xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n%1)) + x - - P = KroghInterpolator(xs, f(xs)) - d = P.derivatives(x,der=degree+1) - - return np.poly1d((d/factorial(np.arange(degree+1)))[::-1]) - - -class BarycentricInterpolator(object): - """The interpolating polynomial for a set of points - - Constructs a polynomial that passes through a given set of points. - Allows evaluation of the polynomial, efficient changing of the y - values to be interpolated, and updating by adding more x values. - For reasons of numerical stability, this function does not compute - the coefficients of the polynomial. - - This class uses a "barycentric interpolation" method that treats - the problem as a special case of rational function interpolation. - This algorithm is quite stable, numerically, but even in a world of - exact computation, unless the x coordinates are chosen very - carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice - - polynomial interpolation itself is a very ill-conditioned process - due to the Runge phenomenon. - - Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation". - """ - def __init__(self, xi, yi=None): - """Construct an object capable of interpolating functions sampled at xi - - The values yi need to be provided before the function is evaluated, - but none of the preprocessing depends on them, so rapid updates - are possible. - - Parameters - ---------- - xi : array-like of length N - The x coordinates of the points the polynomial should pass through - yi : array-like N by R or None - The y coordinates of the points the polynomial should pass through; - if R>1 the polynomial is vector-valued. If None the y values - will be supplied later. - """ - self.n = len(xi) - self.xi = np.asarray(xi) - if yi is not None and len(yi)!=len(self.xi): - raise ValueError("yi dimensions do not match xi dimensions") - self.set_yi(yi) - self.wi = np.zeros(self.n) - self.wi[0] = 1 - for j in xrange(1,self.n): - self.wi[:j]*=(self.xi[j]-self.xi[:j]) - self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j]) - self.wi**=-1 - - def set_yi(self, yi): - """ - Update the y values to be interpolated - - The barycentric interpolation algorithm requires the calculation - of weights, but these depend only on the xi. The yi can be changed - at any time. - - Parameters - ---------- - yi : array_like N by R - The y coordinates of the points the polynomial should pass through; - if R>1 the polynomial is vector-valued. If None the y values - will be supplied later. - - """ - if yi is None: - self.yi = None - return - yi = np.asarray(yi) - if len(yi.shape)==1: - self.vector_valued = False - yi = yi[:,np.newaxis] - elif len(yi.shape)>2: - raise ValueError("y coordinates must be either scalars or vectors") - else: - self.vector_valued = True - - n, r = yi.shape - if n!=len(self.xi): - raise ValueError("yi dimensions do not match xi dimensions") - self.yi = yi - self.r = r - - - def add_xi(self, xi, yi=None): - """ - Add more x values to the set to be interpolated - - The barycentric interpolation algorithm allows easy updating by - adding more points for the polynomial to pass through. - - Parameters - ---------- - xi : array_like of length N1 - The x coordinates of the points the polynomial should pass through - yi : array_like N1 by R or None - The y coordinates of the points the polynomial should pass through; - if R>1 the polynomial is vector-valued. If None the y values - will be supplied later. The yi should be specified if and only if - the interpolator has y values specified. - - """ - if yi is not None: - if self.yi is None: - raise ValueError("No previous yi value to update!") - yi = np.asarray(yi) - if len(yi.shape)==1: - if self.vector_valued: - raise ValueError("Cannot extend dimension %d y vectors with scalars" % self.r) - yi = yi[:,np.newaxis] - elif len(yi.shape)>2: - raise ValueError("y coordinates must be either scalars or vectors") - else: - n, r = yi.shape - if r!=self.r: - raise ValueError("Cannot extend dimension %d y vectors with dimension %d y vectors" % (self.r, r)) - - self.yi = np.vstack((self.yi,yi)) - else: - if self.yi is not None: - raise ValueError("No update to yi provided!") - old_n = self.n - self.xi = np.concatenate((self.xi,xi)) - self.n = len(self.xi) - self.wi**=-1 - old_wi = self.wi - self.wi = np.zeros(self.n) - self.wi[:old_n] = old_wi - for j in xrange(old_n,self.n): - self.wi[:j]*=(self.xi[j]-self.xi[:j]) - self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j]) - self.wi**=-1 - - def __call__(self, x): - """Evaluate the interpolating polynomial at the points x - - Parameters - ---------- - x : scalar or array-like of length M - - Returns - ------- - y : scalar or array-like of length R or length M or M by R - The shape of y depends on the shape of x and whether the - interpolator is vector-valued or scalar-valued. - - Notes - ----- - Currently the code computes an outer product between x and the - weights, that is, it constructs an intermediate array of size - N by M, where N is the degree of the polynomial. - """ - scalar = _isscalar(x) - x = np.atleast_1d(x) - c = np.subtract.outer(x,self.xi) - z = c==0 - c[z] = 1 - c = self.wi/c - p = np.dot(c,self.yi)/np.sum(c,axis=-1)[:,np.newaxis] - i, j = np.nonzero(z) - p[i] = self.yi[j] - if not self.vector_valued: - if scalar: - return p[0,0] - else: - return p[:,0] - else: - if scalar: - return p[0] - else: - return p -def barycentric_interpolate(xi, yi, x): - """ - Convenience function for polynomial interpolation - - Constructs a polynomial that passes through a given set of points, - then evaluates the polynomial. For reasons of numerical stability, - this function does not compute the coefficients of the polynomial. - - This function uses a "barycentric interpolation" method that treats - the problem as a special case of rational function interpolation. - This algorithm is quite stable, numerically, but even in a world of - exact computation, unless the x coordinates are chosen very - carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice - - polynomial interpolation itself is a very ill-conditioned process - due to the Runge phenomenon. - - Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation". - - - Parameters - ---------- - xi : array_like of length N - The x coordinates of the points the polynomial should pass through - yi : array_like N by R - The y coordinates of the points the polynomial should pass through; - if R>1 the polynomial is vector-valued. - x : scalar or array_like of length M - - - Returns - ------- - y : scalar or array_like of length R or length M or M by R - The shape of y depends on the shape of x and whether the - interpolator is vector-valued or scalar-valued. - - - Notes - ----- - - Construction of the interpolation weights is a relatively slow process. - If you want to call this many times with the same xi (but possibly - varying yi or x) you should use the class BarycentricInterpolator. - This is what this function uses internally. - - """ - return BarycentricInterpolator(xi, yi)(x) - - -class PiecewisePolynomial(object): - """Piecewise polynomial curve specified by points and derivatives - - This class represents a curve that is a piecewise polynomial. It - passes through a list of points and has specified derivatives at - each point. The degree of the polynomial may very from segment to - segment, as may the number of derivatives available. The degree - should not exceed about thirty. - - Appending points to the end of the curve is efficient. - """ - def __init__(self, xi, yi, orders=None, direction=None): - """Construct a piecewise polynomial - - Parameters - ---------- - xi : array-like of length N - a sorted list of x-coordinates - yi : list of lists of length N - yi[i] is the list of derivatives known at xi[i] - orders : list of integers, or integer - a list of polynomial orders, or a single universal order - direction : {None, 1, -1} - indicates whether the xi are increasing or decreasing - +1 indicates increasing - -1 indicates decreasing - None indicates that it should be deduced from the first two xi - - Notes - ----- - If orders is None, or orders[i] is None, then the degree of the - polynomial segment is exactly the degree required to match all i - available derivatives at both endpoints. If orders[i] is not None, - then some derivatives will be ignored. The code will try to use an - equal number of derivatives from each end; if the total number of - derivatives needed is odd, it will prefer the rightmost endpoint. If - not enough derivatives are available, an exception is raised. - """ - yi0 = np.asarray(yi[0]) - if len(yi0.shape)==2: - self.vector_valued = True - self.r = yi0.shape[1] - elif len(yi0.shape)==1: - self.vector_valued = False - self.r = 1 - else: - raise ValueError("Each derivative must be a vector, not a higher-rank array") - - self.xi = [xi[0]] - self.yi = [yi0] - self.n = 1 - - self.direction = direction - self.orders = [] - self.polynomials = [] - self.extend(xi[1:],yi[1:],orders) - - def _make_polynomial(self,x1,y1,x2,y2,order,direction): - """Construct the interpolating polynomial object - - Deduces the number of derivatives to match at each end - from order and the number of derivatives available. If - possible it uses the same number of derivatives from - each end; if the number is odd it tries to take the - extra one from y2. In any case if not enough derivatives - are available at one end or another it draws enough to - make up the total from the other end. - """ - n = order+1 - n1 = min(n//2,len(y1)) - n2 = min(n-n1,len(y2)) - n1 = min(n-n2,len(y1)) - if n1+n2!=n: - raise ValueError("Point %g has %d derivatives, point %g has %d derivatives, but order %d requested" % (x1, len(y1), x2, len(y2), order)) - if not (n1 <= len(y1) and n2 <= len(y2)): - raise ValueError("`order` input incompatible with length y1 or y2.") - - xi = np.zeros(n) - if self.vector_valued: - yi = np.zeros((n,self.r)) - else: - yi = np.zeros((n,)) - - xi[:n1] = x1 - yi[:n1] = y1[:n1] - xi[n1:] = x2 - yi[n1:] = y2[:n2] - - return KroghInterpolator(xi,yi) - - def append(self, xi, yi, order=None): - """ - Append a single point with derivatives to the PiecewisePolynomial - - Parameters - ---------- - xi : float - - yi : array_like - yi is the list of derivatives known at xi - - order : integer or None - a polynomial order, or instructions to use the highest - possible order - - """ - - yi = np.asarray(yi) - if self.vector_valued: - if (len(yi.shape)!=2 or yi.shape[1]!=self.r): - raise ValueError("Each derivative must be a vector of length %d" % self.r) - else: - if len(yi.shape)!=1: - raise ValueError("Each derivative must be a scalar") - - if self.direction is None: - self.direction = np.sign(xi-self.xi[-1]) - elif (xi-self.xi[-1])*self.direction < 0: - raise ValueError("x coordinates must be in the %d direction: %s" % (self.direction, self.xi)) - - self.xi.append(xi) - self.yi.append(yi) - - - if order is None: - n1 = len(self.yi[-2]) - n2 = len(self.yi[-1]) - n = n1+n2 - order = n-1 - - self.orders.append(order) - self.polynomials.append(self._make_polynomial( - self.xi[-2], self.yi[-2], - self.xi[-1], self.yi[-1], - order, self.direction)) - self.n += 1 - - - def extend(self, xi, yi, orders=None): - """ - Extend the PiecewisePolynomial by a list of points - - Parameters - ---------- - xi : array_like of length N1 - a sorted list of x-coordinates - yi : list of lists of length N1 - yi[i] is the list of derivatives known at xi[i] - orders : list of integers, or integer - a list of polynomial orders, or a single universal order - direction : {None, 1, -1} - indicates whether the xi are increasing or decreasing - +1 indicates increasing - -1 indicates decreasing - None indicates that it should be deduced from the first two xi - - """ - - for i in xrange(len(xi)): - if orders is None or _isscalar(orders): - self.append(xi[i],yi[i],orders) - else: - self.append(xi[i],yi[i],orders[i]) - - def __call__(self, x): - """Evaluate the piecewise polynomial - - Parameters - ---------- - x : scalar or array-like of length N - - Returns - ------- - y : scalar or array-like of length R or length N or N by R - """ - if _isscalar(x): - pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2) - y = self.polynomials[pos](x) - else: - x = np.asarray(x) - m = len(x) - pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2) - if self.vector_valued: - y = np.zeros((m,self.r)) - else: - y = np.zeros(m) - for i in xrange(self.n-1): - c = pos==i - y[c] = self.polynomials[i](x[c]) - return y - - def derivative(self, x, der): - """ - Evaluate a derivative of the piecewise polynomial - - Parameters - ---------- - x : scalar or array_like of length N - - der : integer - which single derivative to extract - - Returns - ------- - y : scalar or array_like of length R or length N or N by R - - Notes - ----- - This currently computes (using self.derivatives()) all derivatives - of the curve segment containing each x but returns only one. - - """ - return self.derivatives(x,der=der+1)[der] - - def derivatives(self, x, der): - """ - Evaluate a derivative of the piecewise polynomial - - Parameters - ---------- - x : scalar or array_like of length N - - der : integer - how many derivatives (including the function value as - 0th derivative) to extract - - Returns - ------- - y : array_like of shape der by R or der by N or der by N by R - - """ - if _isscalar(x): - pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2) - y = self.polynomials[pos].derivatives(x,der=der) - else: - x = np.asarray(x) - m = len(x) - pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2) - if self.vector_valued: - y = np.zeros((der,m,self.r)) - else: - y = np.zeros((der,m)) - for i in xrange(self.n-1): - c = pos==i - y[:,c] = self.polynomials[i].derivatives(x[c],der=der) - return y - - -def piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0): - """ - Convenience function for piecewise polynomial interpolation - - Parameters - ---------- - xi : array_like - A sorted list of x-coordinates, of length N. - yi : list of lists - yi[i] is the list of derivatives known at xi[i]. Of length N. - x : scalar or array_like - Of length M. - orders : int or list of ints - a list of polynomial orders, or a single universal order - der : int - Which single derivative to extract. - - Returns - ------- - y : scalar or array_like - The result, of length R or length M or M by R, - - Notes - ----- - If orders is None, or orders[i] is None, then the degree of the - polynomial segment is exactly the degree required to match all i - available derivatives at both endpoints. If orders[i] is not None, - then some derivatives will be ignored. The code will try to use an - equal number of derivatives from each end; if the total number of - derivatives needed is odd, it will prefer the rightmost endpoint. If - not enough derivatives are available, an exception is raised. - - Construction of these piecewise polynomials can be an expensive process; - if you repeatedly evaluate the same polynomial, consider using the class - PiecewisePolynomial (which is what this function does). - - """ - - P = PiecewisePolynomial(xi, yi, orders) - if der==0: - return P(x) - elif _isscalar(der): - return P.derivative(x,der=der) - else: - return P.derivatives(x,der=np.amax(der)+1)[der] - -def _isscalar(x): - """Check whether x is if a scalar type, or 0-dim""" - return np.isscalar(x) or hasattr(x, 'shape') and x.shape == () - -def _edge_case(m0, d1): - return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1)) - -def _find_derivatives(x, y): - # Determine the derivatives at the points y_k, d_k, by using - # PCHIP algorithm is: - # We choose the derivatives at the point x_k by - # Let m_k be the slope of the kth segment (between k and k+1) - # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0 - # else use weighted harmonic mean: - # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1} - # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1}) - # where h_k is the spacing between x_k and x_{k+1} - - hk = x[1:] - x[:-1] - mk = (y[1:] - y[:-1]) / hk - smk = np.sign(mk) - condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0)) - - w1 = 2*hk[1:] + hk[:-1] - w2 = hk[1:] + 2*hk[:-1] - whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1]) - - dk = np.zeros_like(y) - dk[1:-1][condition] = 0.0 - dk[1:-1][~condition] = 1.0/whmean[~condition] - - # For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless - # one of d_1 or m_0 is 0, then choose d_0 = 0 - - dk[0] = _edge_case(mk[0],dk[1]) - dk[-1] = _edge_case(mk[-1],dk[-2]) - return dk - - -def pchip(x, y): - """PCHIP 1-d monotonic cubic interpolation - - Description - ----------- - x and y are arrays of values used to approximate some function f: - y = f(x) - This class factory function returns a callable class whose __call__ method - uses monotonic cubic, interpolation to find the value of new points. - - Parameters - ---------- - x : array - A 1D array of monotonically increasing real values. x cannot - include duplicate values (otherwise f is overspecified) - y : array - A 1-D array of real values. y's length along the interpolation - axis must be equal to the length of x. - - Assumes x is sorted in monotonic order (e.g. x[1] > x[0]) - """ - derivs = _find_derivatives(x,y) - return PiecewisePolynomial(x, zip(y, derivs), orders=3, direction=None) diff --git a/scipy-0.10.1/scipy/interpolate/rbf.py b/scipy-0.10.1/scipy/interpolate/rbf.py deleted file mode 100644 index ab41548e5d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/rbf.py +++ /dev/null @@ -1,219 +0,0 @@ -"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data. - -Written by John Travers , February 2007 -Based closely on Matlab code by Alex Chirokov -Additional, large, improvements by Robert Hetland -Some additional alterations by Travis Oliphant - -Permission to use, modify, and distribute this software is given under the -terms of the SciPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -Copyright (c) 2006-2007, Robert Hetland -Copyright (c) 2007, John Travers - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of Robert Hetland nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" -import sys - -from numpy import (sqrt, log, asarray, newaxis, all, dot, exp, eye, - float_) -from scipy import linalg - -__all__ = ['Rbf'] - - -class Rbf(object): - """ - Rbf(*args) - - A class for radial basis function approximation/interpolation of - n-dimensional scattered data. - - Parameters - ---------- - *args : arrays - x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes - and d is the array of values at the nodes - function : str or callable, optional - The radial basis function, based on the radius, r, given by the norm - (defult is Euclidean distance); the default is 'multiquadric':: - - 'multiquadric': sqrt((r/self.epsilon)**2 + 1) - 'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1) - 'gaussian': exp(-(r/self.epsilon)**2) - 'linear': r - 'cubic': r**3 - 'quintic': r**5 - 'thin_plate': r**2 * log(r) - - If callable, then it must take 2 arguments (self, r). The epsilon - parameter will be available as self.epsilon. Other keyword - arguments passed in will be available as well. - - epsilon : float, optional - Adjustable constant for gaussian or multiquadrics functions - - defaults to approximate average distance between nodes (which is - a good start). - smooth : float, optional - Values greater than zero increase the smoothness of the - approximation. 0 is for interpolation (default), the function will - always go through the nodal points in this case. - norm : callable, optional - A function that returns the 'distance' between two points, with - inputs as arrays of positions (x, y, z, ...), and an output as an - array of distance. E.g, the default:: - - def euclidean_norm(x1, x2): - return sqrt( ((x1 - x2)**2).sum(axis=0) ) - - which is called with x1=x1[ndims,newaxis,:] and - x2=x2[ndims,:,newaxis] such that the result is a matrix of the - distances from each point in x1 to each point in x2. - - Examples - -------- - >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance - >>> di = rbfi(xi, yi, zi) # interpolated values - - """ - - def _euclidean_norm(self, x1, x2): - return sqrt( ((x1 - x2)**2).sum(axis=0) ) - - def _h_multiquadric(self, r): - return sqrt((1.0/self.epsilon*r)**2 + 1) - def _h_inverse_multiquadric(self, r): - return 1.0/sqrt((1.0/self.epsilon*r)**2 + 1) - def _h_gaussian(self, r): - return exp(-(1.0/self.epsilon*r)**2) - def _h_linear(self, r): - return r - def _h_cubic(self, r): - return r**3 - def _h_quintic(self, r): - return r**5 - def _h_thin_plate(self, r): - result = r**2 * log(r) - result[r == 0] = 0 # the spline is zero at zero - return result - - # Setup self._function and do smoke test on initial r - def _init_function(self, r): - if isinstance(self.function, str): - self.function = self.function.lower() - _mapped = {'inverse': 'inverse_multiquadric', - 'inverse multiquadric': 'inverse_multiquadric', - 'thin-plate': 'thin_plate'} - if self.function in _mapped: - self.function = _mapped[self.function] - - func_name = "_h_" + self.function - if hasattr(self, func_name): - self._function = getattr(self, func_name) - else: - functionlist = [x[3:] for x in dir(self) if x.startswith('_h_')] - raise ValueError("function must be a callable or one of " + - ", ".join(functionlist)) - self._function = getattr(self, "_h_"+self.function) - elif callable(self.function): - allow_one = False - if hasattr(self.function, 'func_code') or \ - hasattr(self.function, '__code__'): - val = self.function - allow_one = True - elif hasattr(self.function, "im_func"): - val = self.function.im_func - elif hasattr(self.function, "__call__"): - val = self.function.__call__.im_func - else: - raise ValueError("Cannot determine number of arguments to function") - - argcount = val.func_code.co_argcount - if allow_one and argcount == 1: - self._function = self.function - elif argcount == 2: - if sys.version_info[0] >= 3: - self._function = self.function.__get__(self, Rbf) - else: - import new - self._function = new.instancemethod(self.function, self, - Rbf) - else: - raise ValueError("Function argument must take 1 or 2 arguments.") - - a0 = self._function(r) - if a0.shape != r.shape: - raise ValueError("Callable must take array and return array of the same shape") - return a0 - - def __init__(self, *args, **kwargs): - self.xi = asarray([asarray(a, dtype=float_).flatten() - for a in args[:-1]]) - self.N = self.xi.shape[-1] - self.di = asarray(args[-1]).flatten() - - if not all([x.size==self.di.size for x in self.xi]): - raise ValueError("All arrays must be equal length.") - - self.norm = kwargs.pop('norm', self._euclidean_norm) - r = self._call_norm(self.xi, self.xi) - self.epsilon = kwargs.pop('epsilon', r.mean()) - self.smooth = kwargs.pop('smooth', 0.0) - - self.function = kwargs.pop('function', 'multiquadric') - - # attach anything left in kwargs to self - # for use by any user-callable function or - # to save on the object returned. - for item, value in kwargs.items(): - setattr(self, item, value) - - self.A = self._init_function(r) - eye(self.N)*self.smooth - self.nodes = linalg.solve(self.A, self.di) - - def _call_norm(self, x1, x2): - if len(x1.shape) == 1: - x1 = x1[newaxis, :] - if len(x2.shape) == 1: - x2 = x2[newaxis, :] - x1 = x1[..., :, newaxis] - x2 = x2[..., newaxis, :] - return self.norm(x1, x2) - - def __call__(self, *args): - args = [asarray(x) for x in args] - if not all([x.shape == y.shape for x in args for y in args]): - raise ValueError("Array lengths must be equal") - shp = args[0].shape - self.xa = asarray([a.flatten() for a in args], dtype=float_) - r = self._call_norm(self.xa, self.xi) - return dot(self._function(r), self.nodes).reshape(shp) diff --git a/scipy-0.10.1/scipy/interpolate/setup.py b/scipy-0.10.1/scipy/interpolate/setup.py deleted file mode 100755 index 8b36042073..0000000000 --- a/scipy-0.10.1/scipy/interpolate/setup.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('interpolate', parent_package, top_path) - - config.add_library('fitpack', - sources=[join('fitpack', '*.f')], - ) - - config.add_extension('interpnd', - sources=['interpnd.c']) - - config.add_extension('_fitpack', - sources=['src/_fitpackmodule.c'], - libraries=['fitpack'], - depends = ['src/__fitpack.h','src/multipack.h'] - ) - - config.add_extension('dfitpack', - sources=['src/fitpack.pyf'], - libraries=['fitpack'], - ) - - config.add_extension('_interpolate', - sources=['src/_interpolate.cpp'], - include_dirs = ['src'], - depends = ['src/interpolate.h']) - - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/interpolate/setupscons.py b/scipy-0.10.1/scipy/interpolate/setupscons.py deleted file mode 100755 index e3d5e1a6a3..0000000000 --- a/scipy-0.10.1/scipy/interpolate/setupscons.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('interpolate', parent_package, top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/interpolate/src/__fitpack.h b/scipy-0.10.1/scipy/interpolate/src/__fitpack.h deleted file mode 100644 index fd9c06a6b4..0000000000 --- a/scipy-0.10.1/scipy/interpolate/src/__fitpack.h +++ /dev/null @@ -1,1444 +0,0 @@ -/* - * Python-C wrapper of FITPACK (by P. Dierckx) (in netlib known as dierckx) - * Author: Pearu Peterson - * June 1.-4., 1999 - * June 7. 1999 - * $Revision$ - * $Date$ - */ - -/* module_methods: - * {"_curfit", fitpack_curfit, METH_VARARGS, doc_curfit}, - * {"_spl_", fitpack_spl_, METH_VARARGS, doc_spl_}, - * {"_splint", fitpack_splint, METH_VARARGS, doc_splint}, - * {"_sproot", fitpack_sproot, METH_VARARGS, doc_sproot}, - * {"_spalde", fitpack_spalde, METH_VARARGS, doc_spalde}, - * {"_parcur", fitpack_parcur, METH_VARARGS, doc_parcur}, - * {"_surfit", fitpack_surfit, METH_VARARGS, doc_surfit}, - * {"_bispev", fitpack_bispev, METH_VARARGS, doc_bispev}, - * {"_insert", fitpack_insert, METH_VARARGS, doc_insert}, - */ - -/* link libraries: (one item per line) - ddierckx - */ -/* python files: (to be imported to Multipack.py) - fitpack.py - */ - -#if defined(UPPERCASE_FORTRAN) - #if defined(NO_APPEND_FORTRAN) - /* nothing to do */ - #else - #define CURFIT CURFIT_ - #define PERCUR PERCUR_ - #define SPALDE SPALDE_ - #define SPLDER SPLDER_ - #define SPLEV SPLEV_ - #define SPLINT SPLINT_ - #define SPROOT SPROOT_ - #define PARCUR PARCUR_ - #define CLOCUR CLOCUR_ - #define SURFIT SURFIT_ - #define BISPEV BISPEV_ - #define PARDER PARDER_ - #define INSERT INSERT_ - #endif -#else - #if defined(NO_APPEND_FORTRAN) - #define CURFIT curfit - #define PERCUR percur - #define SPALDE spalde - #define SPLDER splder - #define SPLEV splev - #define SPLINT splint - #define SPROOT sproot - #define PARCUR parcur - #define CLOCUR clocur - #define SURFIT surfit - #define BISPEV bispev - #define PARDER parder - #define INSERT insert - #else - #define CURFIT curfit_ - #define PERCUR percur_ - #define SPALDE spalde_ - #define SPLDER splder_ - #define SPLEV splev_ - #define SPLINT splint_ - #define SPROOT sproot_ - #define PARCUR parcur_ - #define CLOCUR clocur_ - #define SURFIT surfit_ - #define BISPEV bispev_ - #define PARDER parder_ - #define INSERT insert_ - #endif -#endif - -void CURFIT(int*,int*,double*,double*,double*,double*, - double*,int*,double*,int*,int*,double*,double*, - double*,double*,int*,int*,int*); -void PERCUR(int*,int*,double*,double*,double*,int*, - double*,int*,int*,double*,double*,double*, - double*,int*,int*,int*); -void SPALDE(double*,int*,double*,int*,double*,double*,int*); -void SPLDER(double*,int*,double*,int*,int*,double*, - double*,int*,int*,double*,int*); -void SPLEV(double*,int*,double*,int*,double*,double*,int*,int*,int*); -double SPLINT(double*,int*,double*,int*,double*,double*,double*); -void SPROOT(double*,int*,double*,double*,int*,int*,int*); -void PARCUR(int*,int*,int*,int*,double*,int*,double*, - double*,double*,double*,int*,double*,int*,int*, - double*,int*,double*,double*,double*,int*,int*,int*); -void CLOCUR(int*,int*,int*,int*,double*,int*,double*, - double*,int*,double*,int*,int*,double*,int*, - double*,double*,double*,int*,int*,int*); -void SURFIT(int*,int*,double*,double*,double*,double*, - double*,double*,double*,double*,int*,int*,double*, - int*,int*,int*,double*,int*,double*,int*,double*, - double*,double*,double*,int*,double*,int*,int*,int*,int*); -void BISPEV(double*,int*,double*,int*,double*,int*,int*, - double*,int*,double*,int*,double*,double*,int*, - int*,int*,int*); -void PARDER(double*,int*,double*,int*,double*,int*,int*, - int*,int*,double*,int*,double*,int*,double*, - double*,int*,int*,int*,int*); -void INSERT(int*,double*,int*,double*,int*,double*,double*, - int*,double*,int*,int*); - -/* Note that curev, cualde need no interface. */ - -static char doc_bispev[] = " [z,ier] = _bispev(tx,ty,c,kx,ky,x,y,nux,nuy)"; -static PyObject * -fitpack_bispev(PyObject *dummy, PyObject *args) -{ - int nx, ny, kx, ky, mx, my, lwrk, *iwrk, kwrk, ier, lwa, nux, nuy; - npy_intp mxy; - double *tx, *ty, *c, *x, *y, *z, *wrk, *wa = NULL; - PyArrayObject *ap_x = NULL, *ap_y = NULL, *ap_z = NULL, *ap_tx = NULL; - PyArrayObject *ap_ty = NULL, *ap_c = NULL; - PyObject *x_py = NULL, *y_py = NULL, *c_py = NULL, *tx_py = NULL, *ty_py = NULL; - - if (!PyArg_ParseTuple(args, "OOOiiOOii",&tx_py,&ty_py,&c_py,&kx,&ky, - &x_py,&y_py,&nux,&nuy)) { - return NULL; - } - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x_py, PyArray_DOUBLE, 0, 1); - ap_y = (PyArrayObject *)PyArray_ContiguousFromObject(y_py, PyArray_DOUBLE, 0, 1); - ap_c = (PyArrayObject *)PyArray_ContiguousFromObject(c_py, PyArray_DOUBLE, 0, 1); - ap_tx = (PyArrayObject *)PyArray_ContiguousFromObject(tx_py, PyArray_DOUBLE, 0, 1); - ap_ty = (PyArrayObject *)PyArray_ContiguousFromObject(ty_py, PyArray_DOUBLE, 0, 1); - if (ap_x == NULL - || ap_y == NULL - || ap_c == NULL - || ap_tx == NULL - || ap_ty == NULL) { - goto fail; - } - x = (double *) ap_x->data; - y = (double *) ap_y->data; - c = (double *) ap_c->data; - tx = (double *) ap_tx->data; - ty = (double *) ap_ty->data; - nx = ap_tx->dimensions[0]; - ny = ap_ty->dimensions[0]; - mx = ap_x->dimensions[0]; - my = ap_y->dimensions[0]; - mxy = mx*my; - ap_z = (PyArrayObject *)PyArray_SimpleNew(1,&mxy,PyArray_DOUBLE); - z = (double *) ap_z->data; - if (nux || nuy) { - lwrk = mx*(kx + 1 - nux) + my*(ky + 1 - nuy) + (nx - kx - 1)*(ny - ky - 1); - } - else { - lwrk = mx*(kx + 1) + my*(ky + 1); - } - kwrk = mx + my; - lwa = lwrk + kwrk; - if ((wa = (double *)malloc(lwa*sizeof(double))) == NULL) { - PyErr_NoMemory(); - goto fail; - } - wrk = wa; - iwrk = (int *)(wrk + lwrk); - if (nux || nuy) { - PARDER(tx, &nx, ty, &ny, c, &kx, &ky, &nux, &nuy, x, &mx, y, &my, z, - wrk, &lwrk, iwrk, &kwrk, &ier); - } - else { - BISPEV(tx, &nx, ty, &ny, c, &kx, &ky, x, &mx, y, &my, z, wrk, &lwrk, - iwrk, &kwrk, &ier); - } - - if (wa) { - free(wa); - } - Py_DECREF(ap_x); - Py_DECREF(ap_y); - Py_DECREF(ap_c); - Py_DECREF(ap_tx); - Py_DECREF(ap_ty); - return Py_BuildValue("Ni",PyArray_Return(ap_z),ier); - -fail: - if (wa) { - free(wa); - } - Py_XDECREF(ap_x); - Py_XDECREF(ap_y); - Py_XDECREF(ap_z); - Py_XDECREF(ap_c); - Py_XDECREF(ap_tx); - Py_XDECREF(ap_ty); - return NULL; -} - -static char doc_surfit[] = " [tx,ty,c,o] = _surfit(x, y, z, w, xb, xe, yb, ye,"\ - " kx,ky,iopt,s,eps,tx,ty,nxest,nyest,wrk,lwrk1,lwrk2)"; -static PyObject * -fitpack_surfit(PyObject *dummy, PyObject *args) -{ - int iopt, m, kx, ky, nxest, nyest, lwrk1, lwrk2, *iwrk, kwrk, ier; - int lwa, nxo, nyo, i, lcest, nmax; - npy_intp nx, ny, lc; - double *x, *y, *z, *w, xb, xe, yb, ye, s, *tx, *ty, *c, fp; - double *wrk1, *wrk2, *wa = NULL, eps; - PyArrayObject *ap_x = NULL, *ap_y = NULL, *ap_z, *ap_w = NULL; - PyArrayObject *ap_tx = NULL,*ap_ty = NULL,*ap_c = NULL, *ap_wrk = NULL; - PyObject *x_py = NULL, *y_py = NULL, *z_py = NULL, *w_py = NULL; - PyObject *tx_py = NULL, *ty_py = NULL, *wrk_py = NULL; - - nx = ny = ier = nxo = nyo = 0; - if (!PyArg_ParseTuple(args, "OOOOddddiiiddOOiiOii", - &x_py, &y_py, &z_py, &w_py, &xb, &xe, &yb, &ye, - &kx, &ky, &iopt, &s, &eps, &tx_py, &ty_py, &nxest, - &nyest, &wrk_py, &lwrk1, &lwrk2)) { - return NULL; - } - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x_py, PyArray_DOUBLE, 0, 1); - ap_y = (PyArrayObject *)PyArray_ContiguousFromObject(y_py, PyArray_DOUBLE, 0, 1); - ap_z = (PyArrayObject *)PyArray_ContiguousFromObject(z_py, PyArray_DOUBLE, 0, 1); - ap_w = (PyArrayObject *)PyArray_ContiguousFromObject(w_py, PyArray_DOUBLE, 0, 1); - ap_wrk=(PyArrayObject *)PyArray_ContiguousFromObject(wrk_py, PyArray_DOUBLE, 0, 1); - /*ap_iwrk=(PyArrayObject *)PyArray_ContiguousFromObject(iwrk_py, PyArray_INT, 0, 1);*/ - if (ap_x == NULL - || ap_y == NULL - || ap_z == NULL - || ap_w == NULL - || ap_wrk == NULL) { - goto fail; - } - x = (double *) ap_x->data; - y = (double *) ap_y->data; - z = (double *) ap_z->data; - w = (double *) ap_w->data; - m = ap_x->dimensions[0]; - nmax = nxest; - if (nmax < nyest) { - nmax = nyest; - } - lcest = (nxest - kx - 1)*(nyest - ky - 1); - kwrk = m + (nxest - 2*kx - 1)*(nyest - 2*ky - 1); - lwa = 2*nmax + lcest + lwrk1 + lwrk2 + kwrk; - if ((wa = (double *)malloc(lwa*sizeof(double))) == NULL) { - PyErr_NoMemory(); - goto fail; - } - tx = wa; - ty = tx + nmax; - c = ty + nmax; - wrk1 = c + lcest; - iwrk = (int *)(wrk1 + lwrk1); - wrk2 = (double *)(iwrk + kwrk); - if (iopt) { - ap_tx = (PyArrayObject *)PyArray_ContiguousFromObject(tx_py, PyArray_DOUBLE, 0, 1); - ap_ty = (PyArrayObject *)PyArray_ContiguousFromObject(ty_py, PyArray_DOUBLE, 0, 1); - if (ap_tx == NULL || ap_ty == NULL) { - goto fail; - } - nx = nxo = ap_tx->dimensions[0]; - ny = nyo = ap_ty->dimensions[0]; - memcpy(tx,ap_tx->data,nx*sizeof(double)); - memcpy(ty,ap_ty->data,ny*sizeof(double)); - } - if (iopt==1) { - lc = (nx - kx - 1)*(ny - ky - 1); - memcpy(wrk1, ap_wrk->data, lc*sizeof(double)); - /*memcpy(iwrk,ap_iwrk->data,n*sizeof(int));*/ - } - SURFIT(&iopt, &m, x, y, z, w, &xb, &xe, &yb, &ye, &kx, &ky, - &s, &nxest, &nyest, &nmax, &eps, &nx, tx, &ny, ty, - c, &fp, wrk1, &lwrk1, wrk2, &lwrk2, iwrk, &kwrk, &ier); - i = 0; - while ((ier > 10) && (i++ < 5)) { - lwrk2 = ier; - if ((wrk2 = (double *)malloc(lwrk2*sizeof(double))) == NULL) { - PyErr_NoMemory(); - goto fail; - } - SURFIT(&iopt, &m, x, y, z, w, &xb, &xe, &yb, &ye, &kx, &ky, - &s, &nxest, &nyest, &nmax, &eps, &nx, tx, &ny, ty, - c, &fp, wrk1, &lwrk1, wrk2, &lwrk2, iwrk, &kwrk, &ier); - if (wrk2) { - free(wrk2); - } - } - if (ier == 10) { - PyErr_SetString(PyExc_ValueError, "Invalid inputs."); - goto fail; - } - lc = (nx - kx - 1)*(ny - ky - 1); - Py_XDECREF(ap_tx); - Py_XDECREF(ap_ty); - ap_tx = (PyArrayObject *)PyArray_SimpleNew(1, &nx, PyArray_DOUBLE); - ap_ty = (PyArrayObject *)PyArray_SimpleNew(1, &ny, PyArray_DOUBLE); - ap_c = (PyArrayObject *)PyArray_SimpleNew(1, &lc, PyArray_DOUBLE); - if (ap_tx == NULL - || ap_ty == NULL - || ap_c == NULL) { - goto fail; - } - if ((iopt == 0)||(nx > nxo)||(ny > nyo)) { - Py_XDECREF(ap_wrk); - ap_wrk = (PyArrayObject *)PyArray_SimpleNew(1, &lc, PyArray_DOUBLE); - if (ap_wrk == NULL) { - goto fail; - } - /*ap_iwrk = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_INT);*/ - } - if (ap_wrk->dimensions[0] < lc) { - Py_XDECREF(ap_wrk); - ap_wrk = (PyArrayObject *)PyArray_SimpleNew(1, &lc, PyArray_DOUBLE); - if (ap_wrk == NULL) { - goto fail; - } - } - memcpy(ap_tx->data, tx, nx*sizeof(double)); - memcpy(ap_ty->data, ty, ny*sizeof(double)); - memcpy(ap_c->data, c, lc*sizeof(double)); - memcpy(ap_wrk->data, wrk1, lc*sizeof(double)); - /*memcpy(ap_iwrk->data,iwrk,n*sizeof(int));*/ - if (wa) { - free(wa); - } - Py_DECREF(ap_x); - Py_DECREF(ap_y); - Py_DECREF(ap_z); - Py_DECREF(ap_w); - return Py_BuildValue("NNN{s:N,s:i,s:d}",PyArray_Return(ap_tx), - PyArray_Return(ap_ty),PyArray_Return(ap_c), - "wrk",PyArray_Return(ap_wrk), - "ier",ier,"fp",fp); - -fail: - if (wa) { - free(wa); - } - Py_XDECREF(ap_x); - Py_XDECREF(ap_y); - Py_XDECREF(ap_z); - Py_XDECREF(ap_w); - Py_XDECREF(ap_tx); - Py_XDECREF(ap_ty); - Py_XDECREF(ap_wrk); - /*Py_XDECREF(ap_iwrk);*/ - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "An error occurred."); - } - return NULL; -} - - -static char doc_parcur[] = " [t,c,o] = _parcur(x,w,u,ub,ue,k,iopt,ipar,s,t,nest,wrk,iwrk,per)"; -static PyObject * -fitpack_parcur(PyObject *dummy, PyObject *args) -{ - int k, iopt, ipar, nest, *iwrk, idim, m, mx, no=0, nc, ier, lwa, lwrk, i, per; - npy_intp n=0, lc; - double *x, *w, *u, *c, *t, *wrk, *wa=NULL, ub, ue, fp, s; - PyObject *x_py = NULL, *u_py = NULL, *w_py = NULL, *t_py = NULL; - PyObject *wrk_py=NULL, *iwrk_py=NULL; - PyArrayObject *ap_x = NULL, *ap_u = NULL, *ap_w = NULL, *ap_t = NULL, *ap_c = NULL; - PyArrayObject *ap_wrk = NULL, *ap_iwrk = NULL; - - if (!PyArg_ParseTuple(args, "OOOddiiidOiOOi", &x_py, &w_py, &u_py, &ub, &ue, - &k, &iopt, &ipar, &s, &t_py, &nest, &wrk_py, &iwrk_py, &per)) { - return NULL; - } - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x_py, PyArray_DOUBLE, 0, 1); - ap_u = (PyArrayObject *)PyArray_ContiguousFromObject(u_py, PyArray_DOUBLE, 0, 1); - ap_w = (PyArrayObject *)PyArray_ContiguousFromObject(w_py, PyArray_DOUBLE, 0, 1); - ap_wrk=(PyArrayObject *)PyArray_ContiguousFromObject(wrk_py, PyArray_DOUBLE, 0, 1); - ap_iwrk=(PyArrayObject *)PyArray_ContiguousFromObject(iwrk_py, PyArray_INT, 0, 1); - if (ap_x == NULL - || ap_u == NULL - || ap_w == NULL - || ap_wrk == NULL - || ap_iwrk == NULL) { - goto fail; - } - x = (double *) ap_x->data; - u = (double *) ap_u->data; - w = (double *) ap_w->data; - m = ap_w->dimensions[0]; - mx = ap_x->dimensions[0]; - idim = mx/m; - if (per) { - lwrk = m*(k + 1) + nest*(7 + idim + 5*k); - } - else { - lwrk = m*(k + 1) + nest*(6 + idim + 3*k); - } - nc = idim*nest; - lwa = nc + 2*nest + lwrk; - if ((wa = (double *)malloc(lwa*sizeof(double))) == NULL) { - PyErr_NoMemory(); - goto fail; - } - t = wa; - c = t + nest; - wrk = c + nc; - iwrk = (int *)(wrk + lwrk); - if (iopt) { - ap_t=(PyArrayObject *)PyArray_ContiguousFromObject(t_py, PyArray_DOUBLE, 0, 1); - if (ap_t == NULL) { - goto fail; - } - n = no = ap_t->dimensions[0]; - memcpy(t, ap_t->data, n*sizeof(double)); - } - if (iopt == 1) { - memcpy(wrk, ap_wrk->data, n*sizeof(double)); - memcpy(iwrk, ap_iwrk->data, n*sizeof(int)); - } - if (per) { - CLOCUR(&iopt, &ipar, &idim, &m, u, &mx, x, w, &k, &s, &nest, - &n, t, &nc, c, &fp, wrk, &lwrk, iwrk, &ier); - } - else { - PARCUR(&iopt, &ipar, &idim, &m, u, &mx, x, w, &ub, &ue, &k, - &s, &nest, &n, t, &nc, c, &fp, wrk, &lwrk, iwrk, &ier); - } - if (ier == 10) { - goto fail; - } - if (ier > 0 && n == 0) { - n = 1; - } - lc = (n - k - 1)*idim; - ap_t = (PyArrayObject *)PyArray_SimpleNew(1, &n, PyArray_DOUBLE); - ap_c = (PyArrayObject *)PyArray_SimpleNew(1, &lc, PyArray_DOUBLE); - if (ap_t == NULL || ap_c == NULL) { - goto fail; - } - if (iopt == 0|| n > no) { - ap_wrk = (PyArrayObject *)PyArray_SimpleNew(1, &n, PyArray_DOUBLE); - ap_iwrk = (PyArrayObject *)PyArray_SimpleNew(1, &n, PyArray_INT); - if (ap_wrk == NULL || ap_iwrk == NULL) { - goto fail; - } - } - memcpy(ap_t->data, t, n*sizeof(double)); - for (i = 0; i < idim; i++) - memcpy((double *)ap_c->data + i*(n - k - 1), c + i*n, (n - k - 1)*sizeof(double)); - memcpy(ap_wrk->data, wrk, n*sizeof(double)); - memcpy(ap_iwrk->data, iwrk, n*sizeof(int)); - if (wa) { - free(wa); - } - Py_DECREF(ap_x); - Py_DECREF(ap_w); - return Py_BuildValue("NN{s:N,s:d,s:d,s:N,s:N,s:i,s:d}", PyArray_Return(ap_t), - PyArray_Return(ap_c), "u", PyArray_Return(ap_u), "ub", ub, "ue", ue, - "wrk", PyArray_Return(ap_wrk), "iwrk", PyArray_Return(ap_iwrk), - "ier", ier, "fp",fp); -fail: - if (wa) { - free(wa); - } - Py_XDECREF(ap_x); - Py_XDECREF(ap_u); - Py_XDECREF(ap_w); - Py_XDECREF(ap_t); - Py_XDECREF(ap_wrk); - Py_XDECREF(ap_iwrk); - return NULL; -} - -static char doc_curfit[] = " [t,c,o] = _curfit(x,y,w,xb,xe,k,iopt,s,t,nest,wrk,iwrk,per)"; -static PyObject * -fitpack_curfit(PyObject *dummy, PyObject *args) -{ - int iopt, m, k, nest, lwrk, *iwrk, ier, lwa, no=0, per; - npy_intp n, lc; - double *x, *y, *w, xb, xe, s, *t, *c, fp, *wrk, *wa = NULL; - PyArrayObject *ap_x = NULL, *ap_y = NULL, *ap_w = NULL, *ap_t = NULL, *ap_c = NULL; - PyArrayObject *ap_wrk = NULL, *ap_iwrk = NULL; - PyObject *x_py = NULL, *y_py = NULL, *w_py = NULL, *t_py = NULL; - PyObject *wrk_py=NULL, *iwrk_py=NULL; - - if (!PyArg_ParseTuple(args, "OOOddiidOiOOi", &x_py, &y_py, &w_py, &xb, &xe, - &k, &iopt, &s, &t_py, &nest, &wrk_py, &iwrk_py, &per)) { - return NULL; - } - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x_py, PyArray_DOUBLE, 0, 1); - ap_y = (PyArrayObject *)PyArray_ContiguousFromObject(y_py, PyArray_DOUBLE, 0, 1); - ap_w = (PyArrayObject *)PyArray_ContiguousFromObject(w_py, PyArray_DOUBLE, 0, 1); - ap_wrk = (PyArrayObject *)PyArray_ContiguousFromObject(wrk_py, PyArray_DOUBLE, 0, 1); - ap_iwrk = (PyArrayObject *)PyArray_ContiguousFromObject(iwrk_py, PyArray_INT, 0, 1); - if (ap_x == NULL - || ap_y == NULL - || ap_w == NULL - || ap_wrk == NULL - || ap_iwrk == NULL) { - goto fail; - } - x = (double *) ap_x->data; - y = (double *) ap_y->data; - w = (double *) ap_w->data; - m = ap_x->dimensions[0]; - if (per) { - lwrk = m*(k + 1) + nest*(8 + 5*k); - } - else { - lwrk = m*(k + 1) + nest*(7 + 3*k); - } - lwa = 3*nest + lwrk; - if ((wa = (double *)malloc(lwa*sizeof(double))) == NULL) { - PyErr_NoMemory(); - goto fail; - } - t = wa; - c = t + nest; - wrk = c + nest; - iwrk = (int *)(wrk + lwrk); - if (iopt) { - ap_t = (PyArrayObject *)PyArray_ContiguousFromObject(t_py, PyArray_DOUBLE, 0, 1); - if (ap_t == NULL) { - goto fail; - } - n = no = ap_t->dimensions[0]; - memcpy(t, ap_t->data, n*sizeof(double)); - } - if (iopt == 1) { - memcpy(wrk, ap_wrk->data, n*sizeof(double)); - memcpy(iwrk, ap_iwrk->data, n*sizeof(int)); - } - if (per) - PERCUR(&iopt, &m, x, y, w, &k, &s, &nest, &n, t, c, &fp, wrk, - &lwrk, iwrk, &ier); - else { - CURFIT(&iopt, &m, x, y, w, &xb, &xe, &k, &s, &nest, &n, t, c, - &fp, wrk, &lwrk, iwrk, &ier); - } - if (ier == 10) { - PyErr_SetString(PyExc_ValueError, "Invalid inputs."); - goto fail; - } - lc = n - k - 1; - if (!iopt) { - ap_t = (PyArrayObject *)PyArray_SimpleNew(1, &n, PyArray_DOUBLE); - if (ap_t == NULL) { - goto fail; - } - } - ap_c = (PyArrayObject *)PyArray_SimpleNew(1, &lc, PyArray_DOUBLE); - if (ap_c == NULL) { - goto fail; - } - if (iopt == 0 || n > no) { - Py_XDECREF(ap_wrk); - Py_XDECREF(ap_iwrk); - ap_wrk = (PyArrayObject *)PyArray_SimpleNew(1, &n, PyArray_DOUBLE); - ap_iwrk = (PyArrayObject *)PyArray_SimpleNew(1, &n, PyArray_INT); - if (ap_wrk == NULL || ap_iwrk == NULL) { - goto fail; - } - } - memcpy(ap_t->data, t, n*sizeof(double)); - memcpy(ap_c->data, c, lc*sizeof(double)); - memcpy(ap_wrk->data, wrk, n*sizeof(double)); - memcpy(ap_iwrk->data, iwrk, n*sizeof(int)); - if (wa) { - free(wa); - } - Py_DECREF(ap_x); - Py_DECREF(ap_y); - Py_DECREF(ap_w); - return Py_BuildValue("NN{s:N,s:N,s:i,s:d}", PyArray_Return(ap_t), - PyArray_Return(ap_c), "wrk", PyArray_Return(ap_wrk), - "iwrk", PyArray_Return(ap_iwrk), "ier", ier, "fp", fp); - -fail: - if (wa) { - free(wa); - } - Py_XDECREF(ap_x); - Py_XDECREF(ap_y); - Py_XDECREF(ap_w); - Py_XDECREF(ap_t); - Py_XDECREF(ap_wrk); - Py_XDECREF(ap_iwrk); - return NULL; -} - -static char doc_spl_[] = " [y,ier] = _spl_(x,nu,t,c,k,e)"; -static PyObject * -fitpack_spl_(PyObject *dummy, PyObject *args) -{ - int n, nu, ier, k, e=0; - npy_intp m; - double *x, *y, *t, *c, *wrk = NULL; - PyArrayObject *ap_x = NULL, *ap_y = NULL, *ap_t = NULL, *ap_c = NULL; - PyObject *x_py = NULL, *t_py = NULL, *c_py = NULL; - - if (!PyArg_ParseTuple(args, "OiOOii", &x_py, &nu, &t_py, &c_py, &k, &e)) { - return NULL; - } - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x_py, PyArray_DOUBLE, 0, 1); - ap_t = (PyArrayObject *)PyArray_ContiguousFromObject(t_py, PyArray_DOUBLE, 0, 1); - ap_c = (PyArrayObject *)PyArray_ContiguousFromObject(c_py, PyArray_DOUBLE, 0, 1); - if ((ap_x == NULL || ap_t == NULL || ap_c == NULL)) { - goto fail; - } - x = (double *)ap_x->data; - m = ap_x->dimensions[0]; - t = (double *)ap_t->data; - c = (double *)ap_c->data; - n = ap_t->dimensions[0]; - ap_y = (PyArrayObject *)PyArray_SimpleNew(1,&m,PyArray_DOUBLE); - if (ap_y == NULL) { - goto fail; - } - y = (double *)ap_y->data; - if ((wrk = (double *)malloc(n*sizeof(double))) == NULL) { - PyErr_NoMemory(); - goto fail; - } - if (nu) { - SPLDER(t, &n, c, &k, &nu, x, y, &m, &e, wrk, &ier); - } - else { - SPLEV(t, &n, c, &k, x, y, &m, &e, &ier); - } - if (wrk) { - free(wrk); - } - Py_DECREF(ap_x); - Py_DECREF(ap_c); - Py_DECREF(ap_t); - return Py_BuildValue("Ni", PyArray_Return(ap_y), ier); - -fail: - if (wrk) { - free(wrk); - } - Py_XDECREF(ap_x); - Py_XDECREF(ap_c); - Py_XDECREF(ap_t); - return NULL; -} - -static char doc_splint[] = " [aint,wrk] = _splint(t,c,k,a,b)"; -static PyObject * -fitpack_splint(PyObject *dummy, PyObject *args) -{ - int k; - npy_intp n; - double *t, *c, *wrk = NULL, a, b, aint; - PyArrayObject *ap_t = NULL, *ap_c = NULL; - PyArrayObject *ap_wrk = NULL; - PyObject *t_py = NULL, *c_py = NULL; - - if (!PyArg_ParseTuple(args, "OOidd",&t_py,&c_py,&k,&a,&b)) { - return NULL; - } - ap_t = (PyArrayObject *)PyArray_ContiguousFromObject(t_py, PyArray_DOUBLE, 0, 1); - ap_c = (PyArrayObject *)PyArray_ContiguousFromObject(c_py, PyArray_DOUBLE, 0, 1); - if ((ap_t == NULL || ap_c == NULL)) { - goto fail; - } - t = (double *)ap_t->data; - c = (double *)ap_c->data; - n = ap_t->dimensions[0]; - ap_wrk = (PyArrayObject *)PyArray_SimpleNew(1, &n, PyArray_DOUBLE); - if (ap_wrk == NULL) { - goto fail; - } - wrk = (double *)ap_wrk->data; - aint = SPLINT(t,&n,c,&k,&a,&b,wrk); - Py_DECREF(ap_c); - Py_DECREF(ap_t); - return Py_BuildValue("dN", aint, PyArray_Return(ap_wrk)); - -fail: - Py_XDECREF(ap_c); - Py_XDECREF(ap_t); - return NULL; -} - -static char doc_sproot[] = " [z,ier] = _sproot(t,c,k,mest)"; -static PyObject * -fitpack_sproot(PyObject *dummy, PyObject *args) -{ - int n, k, mest, ier; - npy_intp m; - double *t, *c, *z = NULL; - PyArrayObject *ap_t = NULL, *ap_c = NULL; - PyArrayObject *ap_z = NULL; - PyObject *t_py = NULL, *c_py = NULL; - - if (!PyArg_ParseTuple(args, "OOii",&t_py,&c_py,&k,&mest)) { - return NULL; - } - ap_t = (PyArrayObject *)PyArray_ContiguousFromObject(t_py, PyArray_DOUBLE, 0, 1); - ap_c = (PyArrayObject *)PyArray_ContiguousFromObject(c_py, PyArray_DOUBLE, 0, 1); - if ((ap_t == NULL || ap_c == NULL)) { - goto fail; - } - t = (double *)ap_t->data; - c = (double *)ap_c->data; - n = ap_t->dimensions[0]; - if ((z = (double *)malloc(mest*sizeof(double))) == NULL) { - PyErr_NoMemory(); - goto fail; - } - SPROOT(t,&n,c,z,&mest,&m,&ier); - if (ier==10) { - m = 0; - } - ap_z = (PyArrayObject *)PyArray_SimpleNew(1, &m, PyArray_DOUBLE); - if (ap_z == NULL) { - goto fail; - } - memcpy(ap_z->data,z,m*sizeof(double)); - if (z) { - free(z); - } - Py_DECREF(ap_c); - Py_DECREF(ap_t); - return Py_BuildValue("Ni", PyArray_Return(ap_z), ier); - -fail: - if (z) { - free(z); - } - Py_XDECREF(ap_c); - Py_XDECREF(ap_t); - return NULL; -} - -static char doc_spalde[] = " [d,ier] = _spalde(t,c,k,x)"; -static PyObject * -fitpack_spalde(PyObject *dummy, PyObject *args) -{ - int n, k, ier; - npy_intp k1; - double *t, *c, *d = NULL, x; - PyArrayObject *ap_t = NULL, *ap_c = NULL, *ap_d = NULL; - PyObject *t_py = NULL, *c_py = NULL; - - if (!PyArg_ParseTuple(args, "OOid",&t_py,&c_py,&k,&x)) { - return NULL; - } - ap_t = (PyArrayObject *)PyArray_ContiguousFromObject(t_py, PyArray_DOUBLE, 0, 1); - ap_c = (PyArrayObject *)PyArray_ContiguousFromObject(c_py, PyArray_DOUBLE, 0, 1); - if ((ap_t == NULL || ap_c == NULL)) { - goto fail; - } - t = (double *)ap_t->data; - c = (double *)ap_c->data; - n = ap_t->dimensions[0]; - k1 = k + 1; - ap_d = (PyArrayObject *)PyArray_SimpleNew(1, &k1, PyArray_DOUBLE); - if (ap_d == NULL) { - goto fail; - } - d = (double *)ap_d->data; - SPALDE(t, &n, c, &k1, &x, d, &ier); - Py_DECREF(ap_c); - Py_DECREF(ap_t); - return Py_BuildValue("Ni", PyArray_Return(ap_d), ier); - -fail: - Py_XDECREF(ap_c); - Py_XDECREF(ap_t); - return NULL; -} - -static char doc_insert[] = " [tt,cc,ier] = _insert(iopt,t,c,k,x,m)"; -static PyObject * -fitpack_insert(PyObject *dummy, PyObject*args) -{ - int iopt, n, nn, k, ier, m; - npy_intp nest; - double x; - double *t, *c, *tt, *cc; - PyArrayObject *ap_t = NULL, *ap_c = NULL, *ap_tt = NULL, *ap_cc = NULL; - PyObject *t_py = NULL, *c_py = NULL; - PyObject *ret = NULL; - - if (!PyArg_ParseTuple(args, "iOOidi",&iopt,&t_py,&c_py,&k, &x, &m)) { - return NULL; - } - ap_t = (PyArrayObject *)PyArray_ContiguousFromObject(t_py, PyArray_DOUBLE, 0, 1); - ap_c = (PyArrayObject *)PyArray_ContiguousFromObject(c_py, PyArray_DOUBLE, 0, 1); - if (ap_t == NULL || ap_c == NULL) { - goto fail; - } - t = (double *)ap_t->data; - c = (double *)ap_c->data; - n = ap_t->dimensions[0]; - nest = n + m; - ap_tt = (PyArrayObject *)PyArray_SimpleNew(1, &nest, PyArray_DOUBLE); - ap_cc = (PyArrayObject *)PyArray_SimpleNew(1, &nest, PyArray_DOUBLE); - if (ap_tt == NULL || ap_cc == NULL) { - goto fail; - } - tt = (double *)ap_tt->data; - cc = (double *)ap_cc->data; - for ( ; n < nest; n++) { - INSERT(&iopt, t, &n, c, &k, &x, tt, &nn, cc, &nest, &ier); - if (ier) { - break; - } - t = tt; - c = cc; - } - Py_DECREF(ap_c); - Py_DECREF(ap_t); - ret = Py_BuildValue("NNi", PyArray_Return(ap_tt), PyArray_Return(ap_cc), ier); - return ret; - -fail: - Py_XDECREF(ap_c); - Py_XDECREF(ap_t); - return NULL; -} - - -static void -_deBoor_D(double *t, double x, int k, int ell, int m, double *result) { - /* - * On completion the result array stores - * the k+1 non-zero values of beta^(m)_i,k(x): for i=ell, ell-1, ell-2, ell-k. - * Where t[ell] <= x < t[ell+1]. - */ - /* - * Implements a recursive algorithm similar to the original algorithm of - * deBoor. - */ - double *hh = result + k + 1; - double *h = result; - double xb, xa, w; - int ind, j, n; - - /* - * Perform k-m "standard" deBoor iterations - * so that h contains the k+1 non-zero values of beta_{ell,k-m}(x) - * needed to calculate the remaining derivatives. - */ - result[0] = 1.0; - for (j = 1; j <= k - m; j++) { - memcpy(hh, h, j*sizeof(double)); - h[0] = 0.0; - for (n = 1; n <= j; n++) { - ind = ell + n; - xb = t[ind]; - xa = t[ind - j]; - if (xb == xa) { - h[n] = 0.0; - continue; - } - w = hh[n - 1]/(xb - xa); - h[n - 1] += w*(xb - x); - h[n] = w*(x - xa); - } - } - - /* - * Now do m "derivative" recursions - * to convert the values of beta into the mth derivative - */ - for (j = k - m + 1; j <= k; j++) { - memcpy(hh, h, j*sizeof(double)); - h[0] = 0.0; - for (n = 1; n <= j; n++) { - ind = ell + n; - xb = t[ind]; - xa = t[ind - j]; - if (xb == xa) { - h[m] = 0.0; - continue; - } - w = j*hh[n - 1]/(xb - xa); - h[n - 1] -= w; - h[n] = w; - } - } -} - - -/* - * Given a set of (N+1) samples: A default set of knots is constructed - * using the samples xk plus 2*(K-1) additional knots where - * K = max(order,1) and the knots are chosen so that distances - * are symmetric around the first and last samples: x_0 and x_N. - * - * There should be a vector of N+K coefficients for the spline - * curve in coef. These coefficients form the curve as - * - * s(x) = sum(c_j B_{j,K}(x), j=-K..N-1) - * - * The spline function is evaluated at all points xx. - * The approximation interval is from xk[0] to xk[-1] - * Any xx outside that interval is set automatically to 0.0 - */ -static char doc_bspleval[] = "y = _bspleval(xx,xk,coef,k,{deriv (0)})\n" -"\n" -"The spline is defined by the approximation interval xk[0] to xk[-1],\n" -"the length of xk (N+1), the order of the spline, k, and \n" -"the number of coeficients N+k. The coefficients range from xk_{-K}\n" -"to xk_{N-1} inclusive and are all the coefficients needed to define\n" -"an arbitrary spline of order k, on the given approximation interval\n" -"\n" -"Extra knot points are internally added using knot-point symmetry \n" -"around xk[0] and xk[-1]"; - -static PyObject * -_bspleval(PyObject *dummy, PyObject *args) -{ - int k, kk, N, i, ell, dk, deriv = 0; - PyObject *xx_py = NULL, *coef_py = NULL, *x_i_py = NULL; - PyArrayObject *xx = NULL, *coef = NULL, *x_i = NULL, *yy = NULL; - PyArrayIterObject *xx_iter; - double *t = NULL, *h = NULL, *ptr; - double x0, xN, xN1, arg, sp, cval; - - if (!PyArg_ParseTuple(args, "OOOi|i", &xx_py, &x_i_py, &coef_py, &k, &deriv)) { - return NULL; - } - if (k < 0) { - PyErr_Format(PyExc_ValueError, - "order (%d) must be >=0", k); - return NULL; - } - if (deriv > k) { - PyErr_Format(PyExc_ValueError, - "derivative (%d) must be <= order (%d)", deriv, k); - return NULL; - } - kk = k; - if (k == 0) { - kk = 1; - } - dk = (k == 0 ? 0 : 1); - x_i = (PyArrayObject *)PyArray_FROMANY(x_i_py, NPY_DOUBLE, 1, 1, NPY_ALIGNED); - coef = (PyArrayObject *)PyArray_FROMANY(coef_py, NPY_DOUBLE, 1, 1, NPY_ALIGNED); - xx = (PyArrayObject *)PyArray_FROMANY(xx_py, NPY_DOUBLE, 0, 0, NPY_ALIGNED); - if (x_i == NULL || coef == NULL || xx == NULL) { - goto fail; - } - - N = PyArray_DIM(x_i, 0) - 1; - if (PyArray_DIM(coef, 0) < N + k) { - PyErr_Format(PyExc_ValueError, - "too few coefficients (have %d need at least %d)", - PyArray_DIM(coef, 0), N + k); - goto fail; - } - - /* create output values */ - yy = (PyArrayObject *)PyArray_EMPTY(xx->nd, xx->dimensions, NPY_DOUBLE, 0); - if (yy == NULL) { - goto fail; - } - /* - * create dummy knot array with new knots inserted at the end - * selected as mirror symmetric versions of the old knots - */ - t = (double *)malloc(sizeof(double)*(N + 2*kk - 1)); - if (t == NULL) { - PyErr_NoMemory(); - goto fail; - } - x0 = *((double *)PyArray_DATA(x_i)); - xN = *((double *)PyArray_DATA(x_i) + N); - for (i = 0; i < kk - 1; i++) { /* fill in ends if kk > 1*/ - t[i] = 2*x0 - *((double *)(PyArray_GETPTR1(x_i, kk - 1 - i))); - t[kk+N+i] = 2*xN - *((double *)(PyArray_GETPTR1(x_i, N - 1 - i))); - } - ptr = t + (kk - 1); - for (i = 0; i <= N; i++) { - *ptr++ = *((double *)(PyArray_GETPTR1(x_i, i))); - } - - /* - * Create work array to hold computed non-zero values for - * the spline for a value of x. - */ - h = (double *)malloc(sizeof(double)*(2*kk+1)); - if (h == NULL) { - PyErr_NoMemory(); - goto fail; - } - - /* Determine the spline for each value of x */ - xx_iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)xx); - if (xx_iter == NULL) { - goto fail; - } - ptr = PyArray_DATA(yy); - - while(PyArray_ITER_NOTDONE(xx_iter)) { - arg = *((double *)PyArray_ITER_DATA(xx_iter)); - if ((arg < x0) || (arg > xN)) { - /* - * If we are outside the interpolation region, - * fill with zeros - */ - *ptr++ = 0.0; - } - else { - /* - * Find the interval that arg lies between in the set of knots - * t[ell] <= arg < t[ell+1] (last-knot use the previous interval) - */ - xN1 = *((double *)PyArray_DATA(x_i) + N-1); - if (arg >= xN1) { - ell = N + kk - 2; - } - else { - ell = kk - 1; - while ((arg > t[ell])) { - ell++; - } - if (arg != t[ell]) { - ell--; - } - } - - _deBoor_D(t, arg, k, ell, deriv, h); - - sp = 0.0; - for (i = 0; i <= k; i++) { - cval = *((double *)(PyArray_GETPTR1(coef, ell - i + dk))); - sp += cval * h[k - i]; - } - *ptr++ = sp; - } - PyArray_ITER_NEXT(xx_iter); - } - Py_DECREF(xx_iter); - Py_DECREF(x_i); - Py_DECREF(coef); - Py_DECREF(xx); - free(t); - free(h); - return PyArray_Return(yy); - -fail: - Py_XDECREF(xx); - Py_XDECREF(coef); - Py_XDECREF(x_i); - Py_XDECREF(yy); - if (t != NULL) { - free(t); - } - if (h != NULL) { - free(h); - } - return NULL; -} - - -/* - * Given a set of (N+1) sample positions: - * Construct the diagonals of the (N+1) x (N+K) matrix that is needed to find - * the coefficients of a spline fit of order K. - * Note that K>=2 because for K=0,1, the coefficients are just the - * sample values themselves. - * - * The equation that expresses the constraints is - * - * s(x_i) = sum(c_j B_{j,K}(x_i), j=-K..N-1) = w_i for i=0..N - * - * This is equivalent to - * - * w = B*c where c.T = [c_{-K}, c{-K+1}, ..., c_{N-1}] and - * w.T = [w_{0}, w_{1}, ..., w_{N}] - * - * Therefore B is an (N+1) times (N+K) matrix with entries - * - * B_{j,K}(x_i) for column j=-K..N-1 - * and row i=0..N - * - * This routine takes the N+1 sample positions and the order k and - * constructs the banded constraint matrix B (with k non-zero diagonals) - * - * The returned array is (N+1) times (N+K) ready to be either used - * to compute a minimally Kth-order derivative discontinuous spline - * or to be expanded with an additional K-1 constraints to be used in - * an exact spline specification. - */ -static char doc_bsplmat[] = "B = _bsplmat(order,xk)\n" -"Construct the constraint matrix for spline fitting of order k\n" -"given sample positions in xk.\n" -"\n" -"If xk is an integer (N+1), then the result is equivalent to\n" -"xk=arange(N+1)+x0 for any value of x0. This produces the\n" -"integer-spaced, or cardinal spline matrix a bit faster."; -static PyObject * -_bsplmat(PyObject *dummy, PyObject *args) { - int k, N, i, numbytes, j, equal; - npy_intp dims[2]; - PyObject *x_i_py = NULL; - PyArrayObject *x_i = NULL, *BB = NULL; - double *t = NULL, *h = NULL, *ptr; - double x0, xN, arg; - - if (!PyArg_ParseTuple(args, "iO", &k, &x_i_py)) { - return NULL; - } - if (k < 2) { - PyErr_Format(PyExc_ValueError, "order (%d) must be >=2", k); - return NULL; - } - - equal = 0; - N = PySequence_Length(x_i_py); - if (N == -1 && PyErr_Occurred()) { - PyErr_Clear(); - N = PyInt_AsLong(x_i_py); - if (N == -1 && PyErr_Occurred()) { - goto fail; - } - equal = 1; - } - N -= 1; - - /* create output matrix */ - dims[0] = N + 1; - dims[1] = N + k; - BB = (PyArrayObject *)PyArray_ZEROS(2, dims, NPY_DOUBLE, 0); - if (BB == NULL) { - goto fail; - } - - t = (double *)malloc(sizeof(double)*(N + 2*k - 1)); - if (t == NULL) { - PyErr_NoMemory(); - goto fail; - } - - /* - * Create work array to hold computed non-zero values for - * the spline for a value of x. - */ - h = (double *)malloc(sizeof(double)*(2*k + 1)); - if (h == NULL) { - PyErr_NoMemory(); - goto fail; - } - - numbytes = k*sizeof(double); - - if (equal) { - /* - * points equally spaced by 1 - * we run deBoor's algorithm one time with artificially created knots - * Then, we keep copying the result to every row - */ - - /* Create knots at equally-spaced locations from -(K-1) to N+K-1 */ - ptr = t; - for (i = -k + 1; i < N + k; i++) { - *ptr++ = i; - } - j = k - 1; - _deBoor_D(t, 0, k, k-1, 0, h); - ptr = PyArray_DATA(BB); - N = N+1; - for (i = 0; i < N; i++) { - memcpy(ptr, h, numbytes); - ptr += N + k; - } - goto finish; - } - - /* Not-equally spaced */ - x_i = (PyArrayObject *)PyArray_FROMANY(x_i_py, NPY_DOUBLE, 1, 1, NPY_ALIGNED); - if (x_i == NULL) { - return NULL; - } - /* - * create dummy knot array with new knots inserted at the end - * selected as mirror symmetric versions of the old knots - */ - x0 = *((double *)PyArray_DATA(x_i)); - xN = *((double *)PyArray_DATA(x_i) + N); - for (i = 0; i < k - 1; i++) { - /* fill in ends if k > 1*/ - t[i] = 2*x0 - *((double *)(PyArray_GETPTR1(x_i, k - 1 - i))); - t[k+N+i] = 2*xN - *((double *)(PyArray_GETPTR1(x_i, N - 1 - i))); - } - ptr = t + (k - 1); - for (i = 0; i <= N; i++) { - *ptr++ = *((double *)(PyArray_GETPTR1(x_i, i))); - } - - - /* - * Determine the K+1 non-zero values of the spline and place them in the - * correct location in the matrix for each row (along the diagonals). - * In fact, the last member is always zero so only K non-zero values - * are present. - */ - ptr = PyArray_DATA(BB); - for (i = 0, j = k - 1; i < N; i++, j++) { - arg = *((double *)PyArray_DATA(x_i) + i); - _deBoor_D(t, arg, k, j, 0, h); - memcpy(ptr, h, numbytes); - /* advance to next row shifted over one */ - ptr += (N + k + 1); - } - /* Last row is different the first coefficient is zero.*/ - _deBoor_D(t, xN, k, j - 1, 0, h); - memcpy(ptr, h + 1, numbytes); - -finish: - Py_XDECREF(x_i); - free(t); - free(h); - return (PyObject *)BB; - -fail: - Py_XDECREF(x_i); - Py_XDECREF(BB); - if (t != NULL) { - free(t); - } - if (h != NULL) { - free(h); - } - return NULL; -} - - - -/* - * Given a set of (N+1) sample positions: - * Construct the (N-1) x (N+K) error matrix J_{ij} such that - * - * for i=1..N-1, - * - * e_i = sum(J_{ij}c_{j},j=-K..N-1) - * - * is the discontinuity of the Kth derivative at the point i in the spline. - * - * This routine takes the N+1 sample positions and the order k and - * constructs the banded matrix J - * - * The returned array is (N+1) times (N+K) ready to be either used - * to compute a minimally Kth-order derivative discontinuous spline - * or to be expanded with an additional K-1 constraints to be used in - * an exact reconstruction approach. - */ -static char doc_bspldismat[] = "B = _bspldismat(order,xk)\n" -"Construct the kth derivative discontinuity jump constraint matrix \n" -"for spline fitting of order k given sample positions in xk.\n" -"\n" -"If xk is an integer (N+1), then the result is equivalent to\n" -"xk=arange(N+1)+x0 for any value of x0. This produces the\n" -"integer-spaced matrix a bit faster. If xk is a 2-tuple (N+1,dx)\n" -"then it produces the result as if the sample distance were dx"; -static PyObject * -_bspldismat(PyObject *dummy, PyObject *args) -{ - int k, N, i, j, equal, m; - npy_intp dims[2]; - PyObject *x_i_py = NULL; - PyArrayObject *x_i = NULL, *BB = NULL; - double *t = NULL, *h = NULL, *ptr, *dptr; - double x0, xN, dx; - - if (!PyArg_ParseTuple(args, "iO", &k, &x_i_py)) { - return NULL; - } - if (k < 2) { - PyErr_Format(PyExc_ValueError, "order (%d) must be >=2", k); - return NULL; - } - - equal = 0; - N = PySequence_Length(x_i_py); - if (N == 2 || (N == -1 && PyErr_Occurred())) { - PyErr_Clear(); - if (PyTuple_Check(x_i_py)) { - /* x_i_py = (N+1, dx) */ - N = PyInt_AsLong(PyTuple_GET_ITEM(x_i_py, 0)); - dx = PyFloat_AsDouble(PyTuple_GET_ITEM(x_i_py, 1)); - } - else { - N = PyInt_AsLong(x_i_py); - if (N == -1 && PyErr_Occurred()) { - goto fail; - } - dx = 1.0; - } - equal = 1; - } - N -= 1; - - if (N < 2) { - PyErr_Format(PyExc_ValueError, "too few samples (%d)", N); - return NULL; - } - /* create output matrix */ - dims[0] = N - 1; - dims[1] = N + k; - BB = (PyArrayObject *)PyArray_ZEROS(2, dims, NPY_DOUBLE, 0); - if (BB == NULL) { - goto fail; - } - t = (double *)malloc(sizeof(double)*(N+2*k-1)); - if (t == NULL) { - PyErr_NoMemory(); - goto fail; - } - - /* - * Create work array to hold computed non-zero values for - * the spline for a value of x. - */ - h = (double *)malloc(sizeof(double)*(2*k + 1)); - if (h == NULL) { - PyErr_NoMemory(); - goto fail; - } - - if (equal) { - /* - * points equally spaced by 1 - * we run deBoor's full derivative algorithm twice, subtract the results - * offset by one and then copy the result one time with artificially created knots - * Then, we keep copying the result to every row - */ - - /* Create knots at equally-spaced locations from -(K-1) to N+K-1 */ - double *tmp, factor; - int numbytes; - numbytes = (k + 2)*sizeof(double); - tmp = malloc(numbytes); - if (tmp == NULL) { - PyErr_NoMemory(); - goto fail; - } - ptr = t; - for (i = -k + 1; i < N + k; i++) { - *ptr++ = i; - } - j = k - 1; - _deBoor_D(t, 0, k, k-1, k, h); - ptr = tmp; - for (m = 0; m <= k; m++) { - *ptr++ = -h[m]; - } - _deBoor_D(t, 0, k, k, k, h); - ptr = tmp + 1; - for (m = 0; m <= k; m++) { - *ptr++ += h[m]; - } - if (dx != 1.0) { - factor = pow(dx, (double)k); - for (m = 0; m < k + 2; m++) { - tmp[m] /= factor; - } - } - ptr = PyArray_DATA(BB); - for (i = 0; i < N - 1; i++) { - memcpy(ptr, tmp, numbytes); - ptr += N + k + 1; - } - free(tmp); - goto finish; - } - - /* Not-equally spaced */ - x_i = (PyArrayObject *)PyArray_FROMANY(x_i_py, NPY_DOUBLE, 1, 1, NPY_ALIGNED); - if (x_i == NULL) { - return NULL; - } - /* - * create dummy knot array with new knots inserted at the end - * selected as mirror symmetric versions of the old knots - */ - x0 = *((double *)PyArray_DATA(x_i)); - xN = *((double *)PyArray_DATA(x_i) + N); - for (i = 0; i < k - 1; i++) { - /* fill in ends if k > 1*/ - t[i] = 2*x0 - *((double *)(PyArray_GETPTR1(x_i, k - 1 - i))); - t[k+N+i] = 2*xN - *((double *)(PyArray_GETPTR1(x_i, N - 1 - i))); - } - ptr = t + (k - 1); - for (i = 0; i <= N; i++) { - *ptr++ = *((double *)(PyArray_GETPTR1(x_i, i))); - } - - - /* - * Determine the K+1 non-zero values of the discontinuity jump matrix - * and place them in the correct location in the matrix for each row - * (along the diagonals). - * - * The matrix is - * - * J_{ij} = b^{(k)}_{j,k}(x^{+}_i) - b^{(k)}_{j,k}(x^{-}_i) - */ - ptr = PyArray_DATA(BB); - dptr = ptr; - for (i = 0, j = k - 1; i < N - 1; i++, j++) { - _deBoor_D(t, 0, k, j, k, h); - /* We need to copy over but negate the terms */ - for (m = 0; m <= k; m++) { - *ptr++ = -h[m]; - } - /* - * If we are past the first row, then we need to also add the current - * values result to the previous row - */ - if (i > 0) { - for (m = 0; m <= k; m++) { - *dptr++ += h[m]; - } - } - /* store location of last start position plus one.*/ - dptr = ptr - k; - /* advance to next row shifted over one */ - ptr += N; - } - /* We need to finish the result for the last row. */ - _deBoor_D(t, 0, k, j, k, h); - for (m = 0; m <= k; m++) { - *dptr++ += h[m]; - } - -finish: - Py_XDECREF(x_i); - free(t); - free(h); - return (PyObject *)BB; - -fail: - Py_XDECREF(x_i); - Py_XDECREF(BB); - if (t != NULL) { - free(t); - } - if (h != NULL) { - free(h); - } - return NULL; -} diff --git a/scipy-0.10.1/scipy/interpolate/src/_fitpackmodule.c b/scipy-0.10.1/scipy/interpolate/src/_fitpackmodule.c deleted file mode 100644 index 3a9e33c672..0000000000 --- a/scipy-0.10.1/scipy/interpolate/src/_fitpackmodule.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - Multipack project. - This file is generated by setmodules.py. Do not modify it. - */ -#include "multipack.h" -static PyObject *fitpack_error; -#include "__fitpack.h" - -static struct PyMethodDef fitpack_module_methods[] = { -{"_curfit", - fitpack_curfit, - METH_VARARGS, doc_curfit}, -{"_spl_", - fitpack_spl_, - METH_VARARGS, doc_spl_}, -{"_splint", - fitpack_splint, - METH_VARARGS, doc_splint}, -{"_sproot", - fitpack_sproot, - METH_VARARGS, doc_sproot}, -{"_spalde", - fitpack_spalde, - METH_VARARGS, doc_spalde}, -{"_parcur", - fitpack_parcur, - METH_VARARGS, doc_parcur}, -{"_surfit", - fitpack_surfit, - METH_VARARGS, doc_surfit}, -{"_bispev", - fitpack_bispev, - METH_VARARGS, doc_bispev}, -{"_insert", - fitpack_insert, - METH_VARARGS, doc_insert}, -{"_bspleval", - _bspleval, - METH_VARARGS, doc_bspleval}, -{"_bsplmat", - _bsplmat, - METH_VARARGS, doc_bsplmat}, -{"_bspldismat", - _bspldismat, - METH_VARARGS, doc_bspldismat}, -{NULL, NULL, 0, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_fitpack", - NULL, - -1, - fitpack_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__fitpack(void) -{ - PyObject *m, *d, *s; - - m = PyModule_Create(&moduledef); - import_array(); - - d = PyModule_GetDict(m); - - s = PyUnicode_FromString(" 1.7 "); - PyDict_SetItemString(d, "__version__", s); - fitpack_error = PyErr_NewException ("fitpack.error", NULL, NULL); - Py_DECREF(s); - if (PyErr_Occurred()) { - Py_FatalError("can't initialize module fitpack"); - } - - return m; -} -#else -PyMODINIT_FUNC init_fitpack(void) { - PyObject *m, *d, *s; - m = Py_InitModule("_fitpack", fitpack_module_methods); - import_array(); - d = PyModule_GetDict(m); - - s = PyString_FromString(" 1.7 "); - PyDict_SetItemString(d, "__version__", s); - fitpack_error = PyErr_NewException ("fitpack.error", NULL, NULL); - Py_DECREF(s); - if (PyErr_Occurred()) { - Py_FatalError("can't initialize module fitpack"); - } -} -#endif diff --git a/scipy-0.10.1/scipy/interpolate/src/_interpolate.cpp b/scipy-0.10.1/scipy/interpolate/src/_interpolate.cpp deleted file mode 100644 index 0ed515d857..0000000000 --- a/scipy-0.10.1/scipy/interpolate/src/_interpolate.cpp +++ /dev/null @@ -1,258 +0,0 @@ -#include "Python.h" -#include - -#include "interpolate.h" -#include "numpy/arrayobject.h" - -using namespace std; - -extern "C" { - -static PyObject* linear_method(PyObject*self, PyObject* args, PyObject* kywds) -{ - static char *kwlist[] = {"x","y","new_x","new_y", NULL}; - PyObject *py_x, *py_y, *py_new_x, *py_new_y; - py_x = py_y = py_new_x = py_new_y = NULL; - PyObject *arr_x, *arr_y, *arr_new_x, *arr_new_y; - arr_x = arr_y = arr_new_x = arr_new_y = NULL; - - if(!PyArg_ParseTupleAndKeywords(args,kywds,"OOOO:linear_dddd",kwlist,&py_x, &py_y, &py_new_x, &py_new_y)) - return NULL; - arr_x = PyArray_FROMANY(py_x, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_x) { - PyErr_SetString(PyExc_ValueError, "x must be a 1-D array of floats"); - goto fail; - } - arr_y = PyArray_FROMANY(py_y, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_y) { - PyErr_SetString(PyExc_ValueError, "y must be a 1-D array of floats"); - goto fail; - } - arr_new_x = PyArray_FROMANY(py_new_x, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_new_x) { - PyErr_SetString(PyExc_ValueError, "new_x must be a 1-D array of floats"); - goto fail; - } - arr_new_y = PyArray_FROMANY(py_new_y, PyArray_DOUBLE, 1, 1, NPY_INOUT_ARRAY); - if (!arr_new_y) { - PyErr_SetString(PyExc_ValueError, "new_y must be a 1-D array of floats"); - goto fail; - } - - linear((double*)PyArray_DATA(arr_x), (double*)PyArray_DATA(arr_y), - PyArray_DIM(arr_x,0), (double*)PyArray_DATA(arr_new_x), - (double*)PyArray_DATA(arr_new_y), PyArray_DIM(arr_new_x,0)); - - Py_DECREF(arr_x); - Py_DECREF(arr_y); - Py_DECREF(arr_new_x); - Py_DECREF(arr_new_y); - - Py_RETURN_NONE; - -fail: - Py_XDECREF(arr_x); - Py_XDECREF(arr_y); - Py_XDECREF(arr_new_x); - Py_XDECREF(arr_new_y); - return NULL; -} - -static PyObject* loginterp_method(PyObject*self, PyObject* args, PyObject* kywds) -{ - static char *kwlist[] = {"x","y","new_x","new_y", NULL}; - PyObject *py_x, *py_y, *py_new_x, *py_new_y; - py_x = py_y = py_new_x = py_new_y = NULL; - PyObject *arr_x, *arr_y, *arr_new_x, *arr_new_y; - arr_x = arr_y = arr_new_x = arr_new_y = NULL; - - if(!PyArg_ParseTupleAndKeywords(args,kywds,"OOOO:loginterp_dddd",kwlist,&py_x, &py_y, &py_new_x, &py_new_y)) - return NULL; - arr_x = PyArray_FROMANY(py_x, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_x) { - PyErr_SetString(PyExc_ValueError, "x must be a 1-D array of floats"); - goto fail; - } - arr_y = PyArray_FROMANY(py_y, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_y) { - PyErr_SetString(PyExc_ValueError, "y must be a 1-D array of floats"); - goto fail; - } - arr_new_x = PyArray_FROMANY(py_new_x, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_new_x) { - PyErr_SetString(PyExc_ValueError, "new_x must be a 1-D array of floats"); - goto fail; - } - arr_new_y = PyArray_FROMANY(py_new_y, PyArray_DOUBLE, 1, 1, NPY_INOUT_ARRAY); - if (!arr_new_y) { - PyErr_SetString(PyExc_ValueError, "new_y must be a 1-D array of floats"); - goto fail; - } - - loginterp((double*)PyArray_DATA(arr_x), (double*)PyArray_DATA(arr_y), - PyArray_DIM(arr_x,0), (double*)PyArray_DATA(arr_new_x), - (double*)PyArray_DATA(arr_new_y), PyArray_DIM(arr_new_x,0)); - - Py_DECREF(arr_x); - Py_DECREF(arr_y); - Py_DECREF(arr_new_x); - Py_DECREF(arr_new_y); - - Py_RETURN_NONE; - -fail: - Py_XDECREF(arr_x); - Py_XDECREF(arr_y); - Py_XDECREF(arr_new_x); - Py_XDECREF(arr_new_y); - return NULL; -} - -static PyObject* window_average_method(PyObject*self, PyObject* args, PyObject* kywds) -{ - static char *kwlist[] = {"x","y","new_x","new_y", NULL}; - PyObject *py_x, *py_y, *py_new_x, *py_new_y; - py_x = py_y = py_new_x = py_new_y = NULL; - PyObject *arr_x, *arr_y, *arr_new_x, *arr_new_y; - arr_x = arr_y = arr_new_x = arr_new_y = NULL; - double width; - - if(!PyArg_ParseTupleAndKeywords(args,kywds,"OOOOd:loginterp_dddd",kwlist,&py_x, &py_y, &py_new_x, &py_new_y, &width)) - return NULL; - arr_x = PyArray_FROMANY(py_x, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_x) { - PyErr_SetString(PyExc_ValueError, "x must be a 1-D array of floats"); - goto fail; - } - arr_y = PyArray_FROMANY(py_y, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_y) { - PyErr_SetString(PyExc_ValueError, "y must be a 1-D array of floats"); - goto fail; - } - arr_new_x = PyArray_FROMANY(py_new_x, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_new_x) { - PyErr_SetString(PyExc_ValueError, "new_x must be a 1-D array of floats"); - goto fail; - } - arr_new_y = PyArray_FROMANY(py_new_y, PyArray_DOUBLE, 1, 1, NPY_INOUT_ARRAY); - if (!arr_new_y) { - PyErr_SetString(PyExc_ValueError, "new_y must be a 1-D array of floats"); - goto fail; - } - - window_average((double*)PyArray_DATA(arr_x), (double*)PyArray_DATA(arr_y), - PyArray_DIM(arr_x,0), (double*)PyArray_DATA(arr_new_x), - (double*)PyArray_DATA(arr_new_y), PyArray_DIM(arr_new_x,0), width); - - Py_DECREF(arr_x); - Py_DECREF(arr_y); - Py_DECREF(arr_new_x); - Py_DECREF(arr_new_y); - - Py_RETURN_NONE; - -fail: - Py_XDECREF(arr_x); - Py_XDECREF(arr_y); - Py_XDECREF(arr_new_x); - Py_XDECREF(arr_new_y); - return NULL; -} - -static PyObject* block_average_above_method(PyObject*self, PyObject* args, PyObject* kywds) -{ - static char *kwlist[] = {"x","y","new_x","new_y", NULL}; - PyObject *py_x, *py_y, *py_new_x, *py_new_y; - py_x = py_y = py_new_x = py_new_y = NULL; - PyObject *arr_x, *arr_y, *arr_new_x, *arr_new_y; - arr_x = arr_y = arr_new_x = arr_new_y = NULL; - - if(!PyArg_ParseTupleAndKeywords(args,kywds,"OOOO:loginterp_dddd",kwlist,&py_x, &py_y, &py_new_x, &py_new_y)) - return NULL; - arr_x = PyArray_FROMANY(py_x, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_x) { - PyErr_SetString(PyExc_ValueError, "x must be a 1-D array of floats"); - goto fail; - } - arr_y = PyArray_FROMANY(py_y, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_y) { - PyErr_SetString(PyExc_ValueError, "y must be a 1-D array of floats"); - goto fail; - } - arr_new_x = PyArray_FROMANY(py_new_x, PyArray_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_new_x) { - PyErr_SetString(PyExc_ValueError, "new_x must be a 1-D array of floats"); - goto fail; - } - arr_new_y = PyArray_FROMANY(py_new_y, PyArray_DOUBLE, 1, 1, NPY_INOUT_ARRAY); - if (!arr_new_y) { - PyErr_SetString(PyExc_ValueError, "new_y must be a 1-D array of floats"); - goto fail; - } - - block_average_above((double*)PyArray_DATA(arr_x), (double*)PyArray_DATA(arr_y), - PyArray_DIM(arr_x,0), (double*)PyArray_DATA(arr_new_x), - (double*)PyArray_DATA(arr_new_y), PyArray_DIM(arr_new_x,0)); - - Py_DECREF(arr_x); - Py_DECREF(arr_y); - Py_DECREF(arr_new_x); - Py_DECREF(arr_new_y); - - Py_RETURN_NONE; - -fail: - Py_XDECREF(arr_x); - Py_XDECREF(arr_y); - Py_XDECREF(arr_new_x); - Py_XDECREF(arr_new_y); - return NULL; -} - -static PyMethodDef interpolate_methods[] = { - {"linear_dddd", (PyCFunction)linear_method, METH_VARARGS|METH_KEYWORDS, - ""}, - {"loginterp_dddd", (PyCFunction)loginterp_method, METH_VARARGS|METH_KEYWORDS, - ""}, - {"window_average_ddddd", (PyCFunction)window_average_method, METH_VARARGS|METH_KEYWORDS, - ""}, - {"block_average_above_dddd", (PyCFunction)block_average_above_method, METH_VARARGS|METH_KEYWORDS, - ""}, - {NULL, NULL, 0, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_interpolate", - NULL, - -1, - interpolate_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__interpolate(void) -{ - PyObject *m, *d, *s; - - m = PyModule_Create(&moduledef); - import_array(); - - return m; -} -#else -PyMODINIT_FUNC init_interpolate(void) -{ - PyObject* m; - m = Py_InitModule3("_interpolate", interpolate_methods, - "A few interpolation routines.\n" - ); - if (m == NULL) - return; - import_array(); -} -#endif -} // extern "C" diff --git a/scipy-0.10.1/scipy/interpolate/src/fitpack.pyf b/scipy-0.10.1/scipy/interpolate/src/fitpack.pyf deleted file mode 100644 index 46fe91c96d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/src/fitpack.pyf +++ /dev/null @@ -1,500 +0,0 @@ -! -*- f90 -*- -! Author: Pearu Peterson -! -python module dfitpack ! in - - usercode ''' - -static double dmax(double* seq,int len) { - double val; - int i; - if (len<1) - return -1e308; - val = seq[0]; - for(i=1;ival) val = seq[i]; - return val; -} -static double dmin(double* seq,int len) { - double val; - int i; - if (len<1) - return 1e308; - val = seq[0]; - for(i=1;ival1) return val1; - val1 = dmax(tx,nx); - return val2 - (val1-val2)/nx; -} -static double calc_e(double* x,int m,double* tx,int nx) { - double val1 = dmax(x,m); - double val2 = dmax(tx,nx); - if (val2=8) :: n=len(t) - real*8 dimension(n),depend(n),check(len(c)==n) :: c - real*8 dimension(mest),intent(out),depend(mest) :: zero - integer optional,intent(in),depend(n) :: mest=3*(n-7) - integer intent(out) :: m - integer intent(out) :: ier - end subroutine sproot - - subroutine spalde(t,n,c,k,x,d,ier) - ! d,ier = spalde(t,c,k,x) - - callprotoargument double*,int*,double*,int*,double*,double*,int* - callstatement {int k1=k+1; (*f2py_func)(t,&n,c,&k1,&x,d,&ier); } - - real*8 dimension(n) :: t - integer intent(hide),depend(t) :: n=len(t) - real*8 dimension(n),depend(n),check(len(c)==n) :: c - integer intent(in) :: k - real*8 intent(in) :: x - real*8 dimension(k+1),intent(out),depend(k) :: d - integer intent(out) :: ier - end subroutine spalde - - subroutine curfit(iopt,m,x,y,w,xb,xe,k,s,nest,n,t,c,fp,wrk,lwrk,iwrk,ier) - ! in curfit.f - integer :: iopt - integer intent(hide),depend(x),check(m>k),depend(k) :: m=len(x) - real*8 dimension(m) :: x - real*8 dimension(m),depend(m),check(len(y)==m) :: y - real*8 dimension(m),depend(m),check(len(w)==m) :: w - real*8 optional,depend(x),check(xb<=x[0]) :: xb = x[0] - real*8 optional,depend(x,m),check(xe>=x[m-1]) :: xe = x[m-1] - integer optional,check(1<=k && k <=5),intent(in) :: k=3 - real*8 optional,check(s>=0.0) :: s = 0.0 - integer intent(hide),depend(t) :: nest=len(t) - integer intent(out), depend(nest) :: n=nest - real*8 dimension(nest),intent(inout) :: t - real*8 dimension(n),intent(out) :: c - real*8 intent(out) :: fp - real*8 dimension(lwrk),intent(inout) :: wrk - integer intent(hide),depend(wrk) :: lwrk=len(wrk) - integer dimension(nest),intent(inout) :: iwrk - integer intent(out) :: ier - end subroutine curfit - - subroutine percur(iopt,m,x,y,w,k,s,nest,n,t,c,fp,wrk,lwrk,iwrk,ier) - ! in percur.f - integer :: iopt - integer intent(hide),depend(x),check(m>k),depend(k) :: m=len(x) - real*8 dimension(m) :: x - real*8 dimension(m),depend(m),check(len(y)==m) :: y - real*8 dimension(m),depend(m),check(len(w)==m) :: w - integer optional,check(1<=k && k <=5),intent(in) :: k=3 - real*8 optional,check(s>=0.0) :: s = 0.0 - integer intent(hide),depend(t) :: nest=len(t) - integer intent(out), depend(nest) :: n=nest - real*8 dimension(nest),intent(inout) :: t - real*8 dimension(n),intent(out) :: c - real*8 intent(out) :: fp - real*8 dimension(lwrk),intent(inout) :: wrk - integer intent(hide),depend(wrk) :: lwrk=len(wrk) - integer dimension(nest),intent(inout) :: iwrk - integer intent(out) :: ier - end subroutine percur - - - subroutine parcur(iopt,ipar,idim,m,u,mx,x,w,ub,ue,k,s,nest,n,t,nc,c,fp,wrk,lwrk,iwrk,ier) - ! in parcur.f - integer check(iopt>=-1 && iopt <= 1):: iopt - integer check(ipar == 1 || ipar == 0) :: ipar - integer check(idim > 0 && idim < 11) :: idim - integer intent(hide),depend(u,k),check(m>k) :: m=len(u) - real*8 dimension(m), intent(inout) :: u - integer intent(hide),depend(x,idim,m),check(mx>=idim*m) :: mx=len(x) - real*8 dimension(mx) :: x - real*8 dimension(m) :: w - real*8 :: ub - real*8 :: ue - integer optional, check(1<=k && k<=5) :: k=3.0 - real*8 optional, check(s>=0.0) :: s = 0.0 - integer intent(hide), depend(t) :: nest=len(t) - integer intent(out), depend(nest) :: n=nest - real*8 dimension(nest), intent(inout) :: t - integer intent(hide), depend(c,nest,idim), check(nc>=idim*nest) :: nc=len(c) - real*8 dimension(nc), intent(out) :: c - real*8 intent(out) :: fp - real*8 dimension(lwrk), intent(inout) :: wrk - integer intent(hide),depend(wrk) :: lwrk=len(wrk) - integer dimension(nest), intent(inout) :: iwrk - integer intent(out) :: ier - end subroutine parcur - - - subroutine fpcurf0(iopt,x,y,w,m,xb,xe,k,s,nest,tol,maxit,k1,k2,n,t,c,fp,fpint,wrk,nrdata,ier) - ! x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier = \ - ! fpcurf0(x,y,k,[w,xb,xe,s,nest]) - - fortranname fpcurf - callprotoargument int*,double*,double*,double*,int*,double*,double*,int*,double*,int*,double*,int*,int*,int*,int*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int*,int* - callstatement (*f2py_func)(&iopt,x,y,w,&m,&xb,&xe,&k,&s,&nest,&tol,&maxit,&k1,&k2,&n,t,c,&fp,fpint,wrk,wrk+nest,wrk+nest*k2,wrk+nest*2*k2,wrk+nest*3*k2,nrdata,&ier) - - integer intent(hide) :: iopt = 0 - real*8 dimension(m),intent(in,out) :: x - real*8 dimension(m),depend(m),check(len(y)==m),intent(in,out) :: y - real*8 dimension(m),depend(m),check(len(w)==m),intent(in,out) :: w = 1.0 - integer intent(hide),depend(x),check(m>k),depend(k) :: m=len(x) - real*8 intent(in,out),depend(x),check(xb<=x[0]) :: xb = x[0] - real*8 intent(in,out),depend(x,m),check(xe>=x[m-1]) :: xe = x[m-1] - integer check(1<=k && k<=5),intent(in,out) :: k - real*8 check(s>=0.0),depend(m),intent(in,out) :: s = m - integer intent(in),depend(m,s,k,k1),check(nest>=2*k1) :: nest = (s==0.0?m+k+1:MAX(m/2,2*k1)) - real*8 intent(hide) :: tol = 0.001 - integer intent(hide) :: maxit = 20 - integer intent(hide),depend(k) :: k1=k+1 - integer intent(hide),depend(k) :: k2=k+2 - integer intent(out) :: n - real*8 dimension(nest),intent(out),depend(nest) :: t - real*8 dimension(nest),depend(nest),intent(out) :: c - real*8 intent(out) :: fp - real*8 dimension(nest),depend(nest),intent(out,cache) :: fpint - real*8 dimension(nest*3*k2+m*k1),intent(cache,hide),depend(nest,k1,k2,m) :: wrk - integer dimension(nest),depend(nest),intent(out,cache) :: nrdata - integer intent(out) :: ier - end subroutine fpcurf0 - - subroutine fpcurf1(iopt,x,y,w,m,xb,xe,k,s,nest,tol,maxit,k1,k2,n,t,c,fp,fpint,wrk,nrdata,ier) - ! x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier = \ - ! fpcurf1(x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier) - - fortranname fpcurf - callprotoargument int*,double*,double*,double*,int*,double*,double*,int*,double*,int*,double*,int*,int*,int*,int*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int*,int* - callstatement (*f2py_func)(&iopt,x,y,w,&m,&xb,&xe,&k,&s,&nest,&tol,&maxit,&k1,&k2,&n,t,c,&fp,fpint,wrk,wrk+nest,wrk+nest*k2,wrk+nest*2*k2,wrk+nest*3*k2,nrdata,&ier) - - integer intent(hide) :: iopt = 1 - real*8 dimension(m),intent(in,out,overwrite) :: x - real*8 dimension(m),depend(m),check(len(y)==m),intent(in,out,overwrite) :: y - real*8 dimension(m),depend(m),check(len(w)==m),intent(in,out,overwrite) :: w - integer intent(hide),depend(x),check(m>k),depend(k) :: m=len(x) - real*8 intent(in,out) :: xb - real*8 intent(in,out) :: xe - integer check(1<=k && k<=5),intent(in,out) :: k - real*8 check(s>=0.0),intent(in,out) :: s - integer intent(hide),depend(t) :: nest = len(t) - real*8 intent(hide) :: tol = 0.001 - integer intent(hide) :: maxit = 20 - integer intent(hide),depend(k) :: k1=k+1 - integer intent(hide),depend(k) :: k2=k+2 - integer intent(in,out) :: n - real*8 dimension(nest),intent(in,out,overwrite) :: t - real*8 dimension(nest),depend(nest),check(len(c)==nest),intent(in,out,overwrite) :: c - real*8 intent(in,out) :: fp - real*8 dimension(nest),depend(nest),check(len(fpint)==nest),intent(in,out,cache,overwrite) :: fpint - real*8 dimension(nest*3*k2+m*k1),intent(cache,hide),depend(nest,k1,k2,m) :: wrk - integer dimension(nest),depend(nest),check(len(nrdata)==nest),intent(in,out,cache,overwrite) :: nrdata - integer intent(in,out) :: ier - end subroutine fpcurf1 - - subroutine fpcurfm1(iopt,x,y,w,m,xb,xe,k,s,nest,tol,maxit,k1,k2,n,t,c,fp,fpint,wrk,nrdata,ier) - ! x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier = \ - ! fpcurfm1(x,y,k,t,[w,xb,xe]) - - fortranname fpcurf - callprotoargument int*,double*,double*,double*,int*,double*,double*,int*,double*,int*,double*,int*,int*,int*,int*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int*,int* - callstatement (*f2py_func)(&iopt,x,y,w,&m,&xb,&xe,&k,&s,&nest,&tol,&maxit,&k1,&k2,&n,t,c,&fp,fpint,wrk,wrk+nest,wrk+nest*k2,wrk+nest*2*k2,wrk+nest*3*k2,nrdata,&ier) - - integer intent(hide) :: iopt = -1 - real*8 dimension(m),intent(in,out) :: x - real*8 dimension(m),depend(m),check(len(y)==m),intent(in,out) :: y - real*8 dimension(m),depend(m),check(len(w)==m),intent(in,out) :: w = 1.0 - integer intent(hide),depend(x),check(m>k),depend(k) :: m=len(x) - real*8 intent(in,out),depend(x),check(xb<=x[0]) :: xb = x[0] - real*8 intent(in,out),depend(x,m),check(xe>=x[m-1]) :: xe = x[m-1] - integer check(1<=k && k<=5),intent(in,out) :: k - real*8 intent(out) :: s = -1 - integer intent(hide),depend(n) :: nest = n - real*8 intent(hide) :: tol = 0.001 - integer intent(hide) :: maxit = 20 - integer intent(hide),depend(k) :: k1=k+1 - integer intent(hide),depend(k) :: k2=k+2 - integer intent(out),depend(t) :: n = len(t) - real*8 dimension(n),intent(in,out,overwrite) :: t - real*8 dimension(nest),depend(nest),intent(out) :: c - real*8 intent(out) :: fp - real*8 dimension(nest),depend(nest),intent(out,cache) :: fpint - real*8 dimension(nest*3*k2+m*k1),intent(cache,hide),depend(nest,k1,k2,m) :: wrk - integer dimension(nest),depend(nest),intent(out,cache) :: nrdata - integer intent(out) :: ier - end subroutine fpcurfm1 - - !!!!!!!!!! Bivariate spline !!!!!!!!!!! - - subroutine bispev(tx,nx,ty,ny,c,kx,ky,x,mx,y,my,z,wrk,lwrk,iwrk,kwrk,ier) - ! z,ier = bispev(tx,ty,c,kx,ky,x,y) - real*8 dimension(nx),intent(in) :: tx - integer intent(hide),depend(tx) :: nx=len(tx) - real*8 dimension(ny),intent(in) :: ty - integer intent(hide),depend(ty) :: ny=len(ty) - real*8 intent(in),dimension((nx-kx-1)*(ny-ky-1)),depend(nx,ny,kx,ky),& - check(len(c)==(nx-kx-1)*(ny-ky-1)):: c - integer :: kx - integer :: ky - real*8 intent(in),dimension(mx) :: x - integer intent(hide),depend(x) :: mx=len(x) - real*8 intent(in),dimension(my) :: y - integer intent(hide),depend(y) :: my=len(y) - real*8 dimension(mx,my),depend(mx,my),intent(out,c) :: z - real*8 dimension(lwrk),depend(lwrk),intent(hide,cache) :: wrk - integer intent(hide),depend(mx,kx,my,ky) :: lwrk=mx*(kx+1)+my*(ky+1) - integer dimension(kwrk),depend(kwrk),intent(hide,cache) :: iwrk - integer intent(hide),depend(mx,my) :: kwrk=mx+my - integer intent(out) :: ier - end subroutine bispev - - subroutine bispeu(tx,nx,ty,ny,c,kx,ky,x,y,z,m,wrk,lwrk,ier) - ! z,ier = bispeu(tx,ty,c,kx,ky,x,y) - real*8 dimension(nx),intent(in) :: tx - integer intent(hide),depend(tx) :: nx=len(tx) - real*8 dimension(ny),intent(in) :: ty - integer intent(hide),depend(ty) :: ny=len(ty) - real*8 intent(in),dimension((nx-kx-1)*(ny-ky-1)),depend(nx,ny,kx,ky),& - check(len(c)==(nx-kx-1)*(ny-ky-1)):: c - integer :: kx - integer :: ky - real*8 intent(in),dimension(m) :: x - real*8 intent(in),dimension(m) :: y - integer intent(hide),depend(x) :: m=len(x) - real*8 dimension(m),depend(m),intent(out,c) :: z - real*8 dimension(lwrk),depend(lwrk),intent(hide,cache) :: wrk - integer intent(hide),depend(kx,ky) :: lwrk=kx+ky+2 - integer intent(out) :: ier - end subroutine bispeu - - subroutine surfit_smth(iopt,m,x,y,z,w,xb,xe,yb,ye,kx,ky,s,nxest,nyest,& - nmax,eps,nx,tx,ny,ty,c,fp,wrk1,lwrk1,wrk2,lwrk2,& - iwrk,kwrk,ier) - ! nx,tx,ny,ty,c,fp,ier = surfit_smth(x,y,z,[w,xb,xe,yb,ye,kx,ky,s,eps,lwrk2]) - - fortranname surfit - - integer intent(hide) :: iopt=0 - integer intent(hide),depend(x,kx,ky),check(m>=(kx+1)*(ky+1)) & - :: m=len(x) - real*8 dimension(m) :: x - real*8 dimension(m),depend(m),check(len(y)==m) :: y - real*8 dimension(m),depend(m),check(len(z)==m) :: z - real*8 optional,dimension(m),depend(m),check(len(w)==m) :: w = 1.0 - real*8 optional,depend(x,m) :: xb=dmin(x,m) - real*8 optional,depend(x,m) :: xe=dmax(x,m) - real*8 optional,depend(y,m) :: yb=dmin(y,m) - real*8 optional,depend(y,m) :: ye=dmax(y,m) - integer check(1<=kx && kx<=5) :: kx = 3 - integer check(1<=ky && ky<=5) :: ky = 3 - real*8 optional,check(0.0<=s) :: s = m - integer optional,depend(kx,m),check(nxest>=2*(kx+1)) & - :: nxest = imax(kx+1+sqrt(m/2),2*(kx+1)) - integer optional,depend(ky,m),check(nyest>=2*(ky+1)) & - :: nyest = imax(ky+1+sqrt(m/2),2*(ky+1)) - integer intent(hide),depend(nxest,nyest) :: nmax=MAX(nxest,nyest) - real*8 optional,check(0.0=(kx+1)*(ky+1)) & - :: m=len(x) - real*8 dimension(m) :: x - real*8 dimension(m),depend(m),check(len(y)==m) :: y - real*8 dimension(m),depend(m),check(len(z)==m) :: z - real*8 optional,dimension(m),depend(m),check(len(w)==m) :: w = 1.0 - real*8 optional,depend(x,tx,m,nx) :: xb=calc_b(x,m,tx,nx) - real*8 optional,depend(x,tx,m,nx) :: xe=calc_e(x,m,tx,nx) - real*8 optional,depend(y,ty,m,ny) :: yb=calc_b(y,m,ty,ny) - real*8 optional,depend(y,ty,m,ny) :: ye=calc_e(y,m,ty,ny) - integer check(1<=kx && kx<=5) :: kx = 3 - integer check(1<=ky && ky<=5) :: ky = 3 - real*8 intent(hide) :: s = 0.0 - integer intent(hide),depend(nx) :: nxest = nx - integer intent(hide),depend(ny) :: nyest = ny - integer intent(hide),depend(nx,ny) :: nmax=MAX(nx,ny) - real*8 optional,check(0.0kx) :: mx=len(x) - real*8 dimension(mx) :: x - integer intent(hide),depend(y,ky),check(my>ky) :: my=len(y) - real*8 dimension(my) :: y - real*8 dimension(mx*my),depend(mx,my),check(len(z)==mx*my) :: z - real*8 optional,depend(x,mx) :: xb=dmin(x,mx) - real*8 optional,depend(x,mx) :: xe=dmax(x,mx) - real*8 optional,depend(y,my) :: yb=dmin(y,my) - real*8 optional,depend(y,my) :: ye=dmax(y,my) - integer optional,check(1<=kx && kx<=5) :: kx = 3 - integer optional,check(1<=ky && ky<=5) :: ky = 3 - real*8 optional,check(0.0<=s) :: s = 0.0 - integer intent(hide),depend(kx,mx),check(nxest>=2*(kx+1)) & - :: nxest = mx+kx+1 - integer intent(hide),depend(ky,my),check(nyest>=2*(ky+1)) & - :: nyest = my+ky+1 - integer intent(out) :: nx - real*8 dimension(nxest),intent(out),depend(nxest) :: tx - integer intent(out) :: ny - real*8 dimension(nyest),intent(out),depend(nyest) :: ty - real*8 dimension((nxest-kx-1)*(nyest-ky-1)), & - depend(kx,ky,nxest,nyest),intent(out) :: c - real*8 intent(out) :: fp - real*8 dimension(lwrk),intent(cache,hide),depend(lwrk) :: wrk - integer intent(hide),depend(mx,my,kx,ky,nxest,nyest) & - :: lwrk=calc_regrid_lwrk(mx,my,kx,ky,nxest,nyest) - integer dimension(kwrk),depend(kwrk),intent(cache,hide) :: iwrk - integer intent(hide),depend(mx,my,nxest,nyest) & - :: kwrk=3+mx+my+nxest+nyest - integer intent(out) :: ier - end subroutine regrid_smth - - function dblint(tx,nx,ty,ny,c,kx,ky,xb,xe,yb,ye,wrk) - ! iy = dblint(tx,ty,c,kx,ky,xb,xe,yb,ye) - real*8 dimension(nx),intent(in) :: tx - integer intent(hide),depend(tx) :: nx=len(tx) - real*8 dimension(ny),intent(in) :: ty - integer intent(hide),depend(ty) :: ny=len(ty) - real*8 intent(in),dimension((nx-kx-1)*(ny-ky-1)),depend(nx,ny,kx,ky),& - check(len(c)==(nx-kx-1)*(ny-ky-1)):: c - integer :: kx - integer :: ky - real*8 intent(in) :: xb - real*8 intent(in) :: xe - real*8 intent(in) :: yb - real*8 intent(in) :: ye - real*8 dimension(nx+ny-kx-ky-2),depend(nx,ny,kx,ky),intent(cache,hide) :: wrk - real*8 :: dblint - end function dblint - end interface -end python module dfitpack - diff --git a/scipy-0.10.1/scipy/interpolate/src/interpolate.h b/scipy-0.10.1/scipy/interpolate/src/interpolate.h deleted file mode 100644 index 0de37d1b27..0000000000 --- a/scipy-0.10.1/scipy/interpolate/src/interpolate.h +++ /dev/null @@ -1,205 +0,0 @@ -#include -#include -#include -#include - -template -void linear(T* x_vec, T* y_vec, int len, - T* new_x_vec, T* new_y_vec, int new_len) -{ - for (int i=0;i=x_vec[len-1]) - index = len-2; - else - { - T* which = std::lower_bound(x_vec, x_vec+len, new_x); - index = which - x_vec-1; - } - - if(new_x == x_vec[index]) - { - // exact value - new_y_vec[i] = y_vec[index]; - } - else - { - //interpolate - double x_lo = x_vec[index]; - double x_hi = x_vec[index+1]; - double y_lo = y_vec[index]; - double y_hi = y_vec[index+1]; - double slope = (y_hi-y_lo)/(x_hi-x_lo); - new_y_vec[i] = slope * (new_x-x_lo) + y_lo; - } - } -} - -template -void loginterp(T* x_vec, T* y_vec, int len, - T* new_x_vec, T* new_y_vec, int new_len) -{ - for (int i=0;i=x_vec[len-1]) - index = len-2; - else - { - T* which = std::lower_bound(x_vec, x_vec+len, new_x); - index = which - x_vec-1; - } - - if(new_x == x_vec[index]) - { - // exact value - new_y_vec[i] = y_vec[index]; - } - else - { - //interpolate - double x_lo = x_vec[index]; - double x_hi = x_vec[index+1]; - double y_lo = log10(y_vec[index]); - double y_hi = log10(y_vec[index+1]); - double slope = (y_hi-y_lo)/(x_hi-x_lo); - new_y_vec[i] = pow(10.0, (slope * (new_x-x_lo) + y_lo)); - } - } -} - -template -int block_average_above(T* x_vec, T* y_vec, int len, - T* new_x_vec, T* new_y_vec, int new_len) -{ - int bad_index = -1; - int start_index = 0; - T last_y = 0.0; - T thickness = 0.0; - - for(int i=0;i x_vec[len-1])) - { - bad_index = i; - break; - } - else if (new_x == x_vec[0]) - { - // for the first sample, just return the cooresponding y value - new_y_vec[i] = y_vec[0]; - } - else - { - T* which = std::lower_bound(x_vec, x_vec+len, new_x); - int index = which - x_vec-1; - - // calculate weighted average - - // Start off with "residue" from last interval in case last x - // was between to samples. - T weighted_y_sum = last_y * thickness; - T thickness_sum = thickness; - for(int j=start_index; j<=index; j++) - { - if (x_vec[j+1] < new_x) - thickness = x_vec[j+1] - x_vec[j]; - else - thickness = new_x -x_vec[j]; - weighted_y_sum += y_vec[j] * thickness; - thickness_sum += thickness; - } - new_y_vec[i] = weighted_y_sum/thickness_sum; - - // Store the thickness between the x value and the next sample - // to add to the next weighted average. - last_y = y_vec[index]; - thickness = x_vec[index+1] - new_x; - - // start next weighted average at next sample - start_index =index+1; - } - } - return bad_index; -} - -template -int window_average(T* x_vec, T* y_vec, int len, - T* new_x_vec, T* new_y_vec, int new_len, - T width) -{ - for(int i=0;i= len) - { - //top = x_vec[len-1]; - top_index = len-1; - } - //std::cout << std::endl; - //std::cout << bottom_index << " " << top_index << std::endl; - //std::cout << bottom << " " << top << std::endl; - // calculate weighted average - T thickness =0.0; - T thickness_sum =0.0; - T weighted_y_sum =0.0; - for(int j=bottom_index; j < top_index; j++) - { - thickness = x_vec[j+1] - bottom; - weighted_y_sum += y_vec[j] * thickness; - thickness_sum += thickness; - bottom = x_vec[j+1]; - /* - std::cout << "iter: " << j - bottom_index << " " << - "index: " << j << " " << - "bottom: " << bottom << " " << - "x+1: " << x_vec[j+1] << " " << - "x: " << x_vec[j] << " " << - "y: " << y_vec[j] << " " << - "weighted_sum: " << weighted_y_sum << - "thickness: " << thickness << " " << - "thickness_sum: " << thickness_sum << std::endl; - */ - //std::cout << x_vec[j] << " "; - //std::cout << thickness << " "; - } - - // last element - thickness = top - bottom; - weighted_y_sum += y_vec[top_index] * thickness; - thickness_sum += thickness; - /* - std::cout << "iter: last" << " " << - "index: " << top_index << " " << - "x: " << x_vec[top_index] << " " << - "y: " << y_vec[top_index] << " " << - "weighted_sum: " << weighted_y_sum << - "thickness: " << thickness << " " << - "thickness_sum: " << thickness_sum << std::endl; - */ - //std::cout << x_vec[top_index] << " " << thickness_sum << std::endl; - new_y_vec[i] = weighted_y_sum/thickness_sum; - } - return -1; -} diff --git a/scipy-0.10.1/scipy/interpolate/src/multipack.h b/scipy-0.10.1/scipy/interpolate/src/multipack.h deleted file mode 100644 index 10b36c9814..0000000000 --- a/scipy-0.10.1/scipy/interpolate/src/multipack.h +++ /dev/null @@ -1,229 +0,0 @@ -/* MULTIPACK module by Travis Oliphant - -Copyright (c) 2002 Travis Oliphant all rights reserved -Oliphant.Travis@altavista.net -Permission to use, modify, and distribute this software is given under the -terms of the SciPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -*/ - - -/* This extension module is a collection of wrapper functions around -common FORTRAN code in the packages MINPACK, ODEPACK, and QUADPACK plus -some differential algebraic equation solvers. - -The wrappers are meant to be nearly direct translations between the -FORTAN code and Python. Some parameters like sizes do not need to be -passed since they are available from the objects. - -It is anticipated that a pure Python module be written to call these lower -level routines and make a simpler user interface. All of the routines define -default values for little-used parameters so that even the raw routines are -quite useful without a separate wrapper. - -FORTRAN Outputs that are not either an error indicator or the sought-after -results are placed in a dictionary and returned as an optional member of -the result tuple when the full_output argument is non-zero. -*/ - -#include "Python.h" -#include "numpy/arrayobject.h" - -#if PY_VERSION_HEX >= 0x03000000 - #define PyString_AsString PyBytes_AsString - #define PyString_FromString PyBytes_FromString - #define PyString_ConcatAndDel PyBytes_ConcatAndDel - - #define PyInt_AsLong PyLong_AsLong - - /* Return True only if the long fits in a C long */ - static NPY_INLINE int PyInt_Check(PyObject *op) { - int overflow = 0; - if (!PyLong_Check(op)) { - return 0; - } - PyLong_AsLongAndOverflow(op, &overflow); - return (overflow == 0); - } -#endif - -#define PYERR(errobj,message) {PyErr_SetString(errobj,message); goto fail;} -#define PYERR2(errobj,message) {PyErr_Print(); PyErr_SetString(errobj, message); goto fail;} -#define ISCONTIGUOUS(m) ((m)->flags & CONTIGUOUS) - -#define STORE_VARS() PyObject *store_multipack_globals[4]; int store_multipack_globals3; - -#define INIT_FUNC(fun,arg,errobj) { /* Get extra arguments or set to zero length tuple */ \ - store_multipack_globals[0] = multipack_python_function; \ - store_multipack_globals[1] = multipack_extra_arguments; \ - if (arg == NULL) { \ - if ((arg = PyTuple_New(0)) == NULL) goto fail; \ - } \ - else \ - Py_INCREF(arg); /* We decrement on exit. */ \ - if (!PyTuple_Check(arg)) \ - PYERR(errobj,"Extra Arguments must be in a tuple"); \ - /* Set up callback functions */ \ - if (!PyCallable_Check(fun)) \ - PYERR(errobj,"First argument must be a callable function."); \ - multipack_python_function = fun; \ - multipack_extra_arguments = arg; } - -#define INIT_JAC_FUNC(fun,Dfun,arg,col_deriv,errobj) { \ - store_multipack_globals[0] = multipack_python_function; \ - store_multipack_globals[1] = multipack_extra_arguments; \ - store_multipack_globals[2] = multipack_python_jacobian; \ - store_multipack_globals3 = multipack_jac_transpose; \ - if (arg == NULL) { \ - if ((arg = PyTuple_New(0)) == NULL) goto fail; \ - } \ - else \ - Py_INCREF(arg); /* We decrement on exit. */ \ - if (!PyTuple_Check(arg)) \ - PYERR(errobj,"Extra Arguments must be in a tuple"); \ - /* Set up callback functions */ \ - if (!PyCallable_Check(fun) || (Dfun != Py_None && !PyCallable_Check(Dfun))) \ - PYERR(errobj,"The function and its Jacobian must be callable functions."); \ - multipack_python_function = fun; \ - multipack_extra_arguments = arg; \ - multipack_python_jacobian = Dfun; \ - multipack_jac_transpose = !(col_deriv);} - -#define RESTORE_JAC_FUNC() multipack_python_function = store_multipack_globals[0]; \ - multipack_extra_arguments = store_multipack_globals[1]; \ - multipack_python_jacobian = store_multipack_globals[2]; \ - multipack_jac_transpose = store_multipack_globals3; - -#define RESTORE_FUNC() multipack_python_function = store_multipack_globals[0]; \ - multipack_extra_arguments = store_multipack_globals[1]; - -#define SET_DIAG(ap_diag,o_diag,mode) { /* Set the diag vector from input */ \ - if (o_diag == NULL || o_diag == Py_None) { \ - ap_diag = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_DOUBLE); \ - if (ap_diag == NULL) goto fail; \ - diag = (double *)ap_diag -> data; \ - mode = 1; \ - } \ - else { \ - ap_diag = (PyArrayObject *)PyArray_ContiguousFromObject(o_diag, PyArray_DOUBLE, 1, 1); \ - if (ap_diag == NULL) goto fail; \ - diag = (double *)ap_diag -> data; \ - mode = 2; } } - -#define MATRIXC2F(jac,data,n,m) {double *p1=(double *)(jac), *p2, *p3=(double *)(data);\ -int i,j;\ -for (j=0;j<(m);p3++,j++) \ - for (p2=p3,i=0;i<(n);p2+=(m),i++,p1++) \ - *p1 = *p2; } -/* -static PyObject *multipack_python_function=NULL; -static PyObject *multipack_python_jacobian=NULL; -static PyObject *multipack_extra_arguments=NULL; -static int multipack_jac_transpose=1; -*/ - -static PyArrayObject * my_make_numpy_array(PyObject *y0, int type, int mindim, int maxdim) - /* This is just like PyArray_ContiguousFromObject except it handles - * single numeric datatypes as 1-element, rank-1 arrays instead of as - * scalars. - */ -{ - PyArrayObject *new_array; - PyObject *tmpobj; - - Py_INCREF(y0); - - if (PyInt_Check(y0) || PyFloat_Check(y0)) { - tmpobj = PyList_New(1); - PyList_SET_ITEM(tmpobj, 0, y0); /* reference now belongs to tmpobj */ - } - else - tmpobj = y0; - - new_array = (PyArrayObject *)PyArray_ContiguousFromObject(tmpobj, type, mindim, maxdim); - - Py_DECREF(tmpobj); - return new_array; -} - -static PyObject *call_python_function(PyObject *func, npy_intp n, double *x, PyObject *args, int dim, PyObject *error_obj) -{ - /* - This is a generic function to call a python function that takes a 1-D - sequence as a first argument and optional extra_arguments (should be a - zero-length tuple if none desired). The result of the function is - returned in a multiarray object. - -- build sequence object from values in x. - -- add extra arguments (if any) to an argument list. - -- call Python callable object - -- check if error occurred: - if so return NULL - -- if no error, place result of Python code into multiarray object. - */ - - PyArrayObject *sequence = NULL; - PyObject *arglist = NULL, *tmpobj = NULL; - PyObject *arg1 = NULL, *str1 = NULL; - PyObject *result = NULL; - PyArrayObject *result_array = NULL; - - /* Build sequence argument from inputs */ - sequence = (PyArrayObject *)PyArray_SimpleNewFromData(1, &n, PyArray_DOUBLE, (char *)x); - if (sequence == NULL) PYERR2(error_obj,"Internal failure to make an array of doubles out of first\n argument to function call."); - - /* Build argument list */ - if ((arg1 = PyTuple_New(1)) == NULL) { - Py_DECREF(sequence); - return NULL; - } - PyTuple_SET_ITEM(arg1, 0, (PyObject *)sequence); - /* arg1 now owns sequence reference */ - if ((arglist = PySequence_Concat( arg1, args)) == NULL) - PYERR2(error_obj,"Internal error constructing argument list."); - - Py_DECREF(arg1); /* arglist has a reference to sequence, now. */ - - - /* Call function object --- variable passed to routine. Extra - arguments are in another passed variable. - */ - if ((result = PyEval_CallObject(func, arglist))==NULL) { - PyErr_Print(); - tmpobj = PyObject_GetAttrString(func, "func_name"); - if (tmpobj == NULL) goto fail; - str1 = PyString_FromString("Error occurred while calling the Python function named "); - if (str1 == NULL) { Py_DECREF(tmpobj); goto fail;} - PyString_ConcatAndDel(&str1, tmpobj); - PyErr_SetString(error_obj, PyString_AsString(str1)); - Py_DECREF(str1); - goto fail; - } - - if ((result_array = (PyArrayObject *)PyArray_ContiguousFromObject(result, PyArray_DOUBLE, dim-1, dim))==NULL) - PYERR2(error_obj,"Result from function call is not a proper array of floats."); - - Py_DECREF(result); - Py_DECREF(arglist); - return (PyObject *)result_array; - - fail: - Py_XDECREF(arglist); - Py_XDECREF(result); - Py_XDECREF(arg1); - return NULL; -} - - - - - - - - - - - - - diff --git a/scipy-0.10.1/scipy/interpolate/tests/test_fitpack.py b/scipy-0.10.1/scipy/interpolate/tests/test_fitpack.py deleted file mode 100644 index 6d6676d3bf..0000000000 --- a/scipy-0.10.1/scipy/interpolate/tests/test_fitpack.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env python -# Created by Pearu Peterson, June 2003 -""" Test functions for interpolate.fitpack2 module -""" -__usage__ = """ -Build interpolate: - python setup_interpolate.py build -Run tests if scipy is installed: - python -c 'import scipy;scipy.interpolate.test()' -Run tests if interpolate is not installed: - python tests/test_fitpack.py [] -""" -#import libwadpy - -from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, \ - assert_array_almost_equal, assert_allclose, TestCase, run_module_suite -from numpy import array, diff, shape -from scipy.interpolate.fitpack2 import UnivariateSpline, LSQBivariateSpline, \ - SmoothBivariateSpline, RectBivariateSpline - -class TestUnivariateSpline(TestCase): - def test_linear_constant(self): - x = [1,2,3] - y = [3,3,3] - lut = UnivariateSpline(x,y,k=1) - assert_array_almost_equal(lut.get_knots(),[1,3]) - assert_array_almost_equal(lut.get_coeffs(),[3,3]) - assert_almost_equal(lut.get_residual(),0.0) - assert_array_almost_equal(lut([1,1.5,2]),[3,3,3]) - - def test_preserve_shape(self): - x = [1, 2, 3] - y = [0, 2, 4] - lut = UnivariateSpline(x, y, k=1) - arg = 2 - assert_equal(shape(arg), shape(lut(arg))) - assert_equal(shape(arg), shape(lut(arg, nu=1))) - arg = [1.5, 2, 2.5] - assert_equal(shape(arg), shape(lut(arg))) - assert_equal(shape(arg), shape(lut(arg, nu=1))) - - def test_linear_1d(self): - x = [1,2,3] - y = [0,2,4] - lut = UnivariateSpline(x,y,k=1) - assert_array_almost_equal(lut.get_knots(),[1,3]) - assert_array_almost_equal(lut.get_coeffs(),[0,4]) - assert_almost_equal(lut.get_residual(),0.0) - assert_array_almost_equal(lut([1,1.5,2]),[0,1,2]) - - def test_subclassing(self): - # See #731 - - class ZeroSpline(UnivariateSpline): - def __call__(self, x): - return 0*array(x) - - sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2) - assert_array_equal(sp([1.5, 2.5]), [0., 0.]) - - def test_empty_input(self): - """Test whether empty input returns an empty output. Ticket 1014""" - x = [1,3,5,7,9] - y = [0,4,9,12,21] - spl = UnivariateSpline(x, y, k=3) - assert_array_equal(spl([]), array([])) - - def test_resize_regression(self): - """Regression test for #1375.""" - x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892, - -0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235, - 0.65016502, 1.] - y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061, - 0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223, - 0.62928599, 1.] - w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02, - 6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02, - 6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02, - 1.00000000e+12] - spl = UnivariateSpline(x=x, y=y, w=w, s=None) - desired = array([ 0.35100374, 0.51715855, 0.87789547, 0.98719344]) - assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4) - - -class TestLSQBivariateSpline(TestCase): - def test_linear_constant(self): - x = [1,1,1,2,2,2,3,3,3] - y = [1,2,3,1,2,3,1,2,3] - z = [3,3,3,3,3,3,3,3,3] - s = 0.1 - tx = [1+s,3-s] - ty = [1+s,3-s] - lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) - - assert_almost_equal(lut(2,2), 3.) - - def test_bilinearity(self): - x = [1,1,1,2,2,2,3,3,3] - y = [1,2,3,1,2,3,1,2,3] - z = [0,7,8,3,4,7,1,3,4] - s = 0.1 - tx = [1+s,3-s] - ty = [1+s,3-s] - lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) - - tx, ty = lut.get_knots() - - for xa, xb in zip(tx[:-1], tx[1:]): - for ya, yb in zip(ty[:-1], ty[1:]): - for t in [0.1, 0.5, 0.9]: - for s in [0.3, 0.4, 0.7]: - xp = xa*(1-t) + xb*t - yp = ya*(1-s) + yb*s - zp = (+ lut(xa, ya)*(1-t)*(1-s) - + lut(xb, ya)*t*(1-s) - + lut(xa, yb)*(1-t)*s - + lut(xb, yb)*t*s) - assert_almost_equal(lut(xp,yp), zp) - - def test_integral(self): - x = [1,1,1,2,2,2,8,8,8] - y = [1,2,3,1,2,3,1,2,3] - z = array([0,7,8,3,4,7,1,3,4]) - - s = 0.1 - tx = [1+s,3-s] - ty = [1+s,3-s] - lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) - tx, ty = lut.get_knots() - - tz = lut(tx, ty) - trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:] - *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() - - assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz) - - def test_empty_input(self): - """Test whether empty inputs returns an empty output. Ticket 1014""" - x = [1,1,1,2,2,2,3,3,3] - y = [1,2,3,1,2,3,1,2,3] - z = [3,3,3,3,3,3,3,3,3] - s = 0.1 - tx = [1+s,3-s] - ty = [1+s,3-s] - lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) - - assert_array_equal(lut([], []), array([])) - -class TestSmoothBivariateSpline(TestCase): - def test_linear_constant(self): - x = [1,1,1,2,2,2,3,3,3] - y = [1,2,3,1,2,3,1,2,3] - z = [3,3,3,3,3,3,3,3,3] - lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1) - assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3])) - assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3]) - assert_almost_equal(lut.get_residual(),0.0) - assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]]) - - def test_linear_1d(self): - x = [1,1,1,2,2,2,3,3,3] - y = [1,2,3,1,2,3,1,2,3] - z = [0,0,0,2,2,2,4,4,4] - lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1) - assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3])) - assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4]) - assert_almost_equal(lut.get_residual(),0.0) - assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]]) - - def test_integral(self): - x = [1,1,1,2,2,2,4,4,4] - y = [1,2,3,1,2,3,1,2,3] - z = array([0,7,8,3,4,7,1,3,4]) - - lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1,s=0) - tx = [1,2,4] - ty = [1,2,3] - - tz = lut(tx, ty) - trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:] - *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() - assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz) - - lut2 = SmoothBivariateSpline(x,y,z,kx=2,ky=2,s=0) - assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz, - decimal=0) # the quadratures give 23.75 and 23.85 - - tz = lut(tx[:-1], ty[:-1]) - trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:] - *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() - assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz) - -class TestRectBivariateSpline(TestCase): - def test_defaults(self): - x = array([1,2,3,4,5]) - y = array([1,2,3,4,5]) - z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]]) - lut = RectBivariateSpline(x,y,z) - assert_array_almost_equal(lut(x,y),z) - - def test_evaluate(self): - x = array([1,2,3,4,5]) - y = array([1,2,3,4,5]) - z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]]) - lut = RectBivariateSpline(x,y,z) - - xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3] - yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3] - zi = lut.ev(xi, yi) - zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)]) - - assert_almost_equal(zi, zi2) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/interpolate/tests/test_interpnd.py b/scipy-0.10.1/scipy/interpolate/tests/test_interpnd.py deleted file mode 100644 index fae88dc8c4..0000000000 --- a/scipy-0.10.1/scipy/interpolate/tests/test_interpnd.py +++ /dev/null @@ -1,191 +0,0 @@ -import numpy as np -from numpy.testing import assert_equal, assert_allclose, assert_almost_equal, \ - run_module_suite, assert_raises - -import scipy.interpolate.interpnd as interpnd -import scipy.spatial.qhull as qhull - -class TestLinearNDInterpolation(object): - def test_smoketest(self): - # Test at single points - x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], - dtype=np.double) - y = np.arange(x.shape[0], dtype=np.double) - - yi = interpnd.LinearNDInterpolator(x, y)(x) - assert_almost_equal(y, yi) - - def test_smoketest_alternate(self): - # Test at single points, alternate calling convention - x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], - dtype=np.double) - y = np.arange(x.shape[0], dtype=np.double) - - yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1]) - assert_almost_equal(y, yi) - - def test_complex_smoketest(self): - # Test at single points - x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], - dtype=np.double) - y = np.arange(x.shape[0], dtype=np.double) - y = y - 3j*y - - yi = interpnd.LinearNDInterpolator(x, y)(x) - assert_almost_equal(y, yi) - - def test_square(self): - # Test barycentric interpolation on a square against a manual - # implementation - - points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double) - values = np.array([1., 2., -3., 5.], dtype=np.double) - - # NB: assume triangles (0, 1, 3) and (1, 2, 3) - # - # 1----2 - # | \ | - # | \ | - # 0----3 - - def ip(x, y): - t1 = (x + y <= 1) - t2 = ~t1 - - x1 = x[t1] - y1 = y[t1] - - x2 = x[t2] - y2 = y[t2] - - z = 0*x - - z[t1] = (values[0]*(1 - x1 - y1) - + values[1]*y1 - + values[3]*x1) - - z[t2] = (values[2]*(x2 + y2 - 1) - + values[1]*(1 - x2) - + values[3]*(1 - y2)) - return z - - xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None], - np.linspace(0, 1, 14)[None,:]) - xx = xx.ravel() - yy = yy.ravel() - - xi = np.array([xx, yy]).T.copy() - zi = interpnd.LinearNDInterpolator(points, values)(xi) - - assert_almost_equal(zi, ip(xx, yy)) - -class TestEstimateGradients2DGlobal(object): - def test_smoketest(self): - x = np.array([(0, 0), (0, 2), - (1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float) - tri = qhull.Delaunay(x) - - # Should be exact for linear functions, independent of triangulation - - funcs = [ - (lambda x, y: 0*x + 1, (0, 0)), - (lambda x, y: 0 + x, (1, 0)), - (lambda x, y: -2 + y, (0, 1)), - (lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15)) - ] - - for j, (func, grad) in enumerate(funcs): - z = func(x[:,0], x[:,1]) - dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6) - - assert_equal(dz.shape, (6, 2)) - assert_allclose(dz, np.array(grad)[None,:] + 0*dz, - rtol=1e-5, atol=1e-5, err_msg="item %d" % j) - -class TestCloughTocher2DInterpolator(object): - - def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False, **kw): - np.random.seed(1234) - if x is None: - x = np.array([(0, 0), (0, 1), - (1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8), - (0.5, 0.2)], - dtype=float) - - if not alternate: - ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]), - tol=1e-6) - else: - ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]), - func(x[:,0], x[:,1]), - tol=1e-6) - - p = np.random.rand(50, 2) - - if not alternate: - a = ip(p) - else: - a = ip(p[:,0], p[:,1]) - b = func(p[:,0], p[:,1]) - - try: - assert_allclose(a, b, **kw) - except AssertionError: - print abs(a - b) - print ip.grad - raise - - def test_linear_smoketest(self): - # Should be exact for linear functions, independent of triangulation - funcs = [ - lambda x, y: 0*x + 1, - lambda x, y: 0 + x, - lambda x, y: -2 + y, - lambda x, y: 3 + 3*x + 14.15*y, - ] - - for j, func in enumerate(funcs): - self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7, - err_msg="Function %d" % j) - self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7, - alternate=True, - err_msg="Function (alternate) %d" % j) - - def test_quadratic_smoketest(self): - # Should be reasonably accurate for quadratic functions - funcs = [ - lambda x, y: x**2, - lambda x, y: y**2, - lambda x, y: x**2 - y**2, - lambda x, y: x*y, - ] - - for j, func in enumerate(funcs): - self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0, - err_msg="Function %d" % j) - - def test_dense(self): - # Should be more accurate for dense meshes - funcs = [ - lambda x, y: x**2, - lambda x, y: y**2, - lambda x, y: x**2 - y**2, - lambda x, y: x*y, - lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y) - ] - - np.random.seed(4321) # use a different seed than the check! - grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float), - np.random.rand(30*30, 2)] - - for j, func in enumerate(funcs): - self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2, - err_msg="Function %d" % j) - - def test_wrong_ndim(self): - x = np.random.randn(30, 3) - y = np.random.randn(30) - assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/interpolate/tests/test_interpolate.py b/scipy-0.10.1/scipy/interpolate/tests/test_interpolate.py deleted file mode 100644 index df6971d207..0000000000 --- a/scipy-0.10.1/scipy/interpolate/tests/test_interpolate.py +++ /dev/null @@ -1,337 +0,0 @@ -from numpy.testing import assert_, assert_equal, assert_almost_equal, \ - assert_array_almost_equal, assert_raises, assert_array_equal, \ - dec, TestCase, run_module_suite -from numpy import mgrid, pi, sin, ogrid, poly1d, linspace -import numpy as np - -from scipy.interpolate import interp1d, interp2d, lagrange - - -class TestInterp2D(TestCase): - def test_interp2d(self): - y, x = mgrid[0:2:20j, 0:pi:21j] - z = sin(x+0.5*y) - I = interp2d(x, y, z) - assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2) - - v,u = ogrid[0:2:24j, 0:pi:25j] - assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2) - - def test_interp2d_meshgrid_input(self): - # Ticket #703 - x = linspace(0, 2, 16) - y = linspace(0, pi, 21) - z = sin(x[None,:] + y[:,None]/2.) - I = interp2d(x, y, z) - assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2) - -class TestInterp1D(object): - - def setUp(self): - self.x10 = np.arange(10.) - self.y10 = np.arange(10.) - self.x25 = self.x10.reshape((2,5)) - self.x2 = np.arange(2.) - self.y2 = np.arange(2.) - self.x1 = np.array([0.]) - self.y1 = np.array([0.]) - - self.y210 = np.arange(20.).reshape((2, 10)) - self.y102 = np.arange(20.).reshape((10, 2)) - - self.fill_value = -100.0 - - def test_validation(self): - """ Make sure that appropriate exceptions are raised when invalid values - are given to the constructor. - """ - - # These should all work. - interp1d(self.x10, self.y10, kind='linear') - interp1d(self.x10, self.y10, kind='cubic') - interp1d(self.x10, self.y10, kind='slinear') - interp1d(self.x10, self.y10, kind='quadratic') - interp1d(self.x10, self.y10, kind='zero') - interp1d(self.x10, self.y10, kind='nearest') - interp1d(self.x10, self.y10, kind=0) - interp1d(self.x10, self.y10, kind=1) - interp1d(self.x10, self.y10, kind=2) - interp1d(self.x10, self.y10, kind=3) - - # x array must be 1D. - assert_raises(ValueError, interp1d, self.x25, self.y10) - - # y array cannot be a scalar. - assert_raises(ValueError, interp1d, self.x10, np.array(0)) - - # Check for x and y arrays having the same length. - assert_raises(ValueError, interp1d, self.x10, self.y2) - assert_raises(ValueError, interp1d, self.x2, self.y10) - assert_raises(ValueError, interp1d, self.x10, self.y102) - interp1d(self.x10, self.y210) - interp1d(self.x10, self.y102, axis=0) - - # Check for x and y having at least 1 element. - assert_raises(ValueError, interp1d, self.x1, self.y10) - assert_raises(ValueError, interp1d, self.x10, self.y1) - assert_raises(ValueError, interp1d, self.x1, self.y1) - - - def test_init(self): - """ Check that the attributes are initialized appropriately by the - constructor. - """ - - assert_(interp1d(self.x10, self.y10).copy) - assert_(not interp1d(self.x10, self.y10, copy=False).copy) - assert_(interp1d(self.x10, self.y10).bounds_error) - assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error) - assert_(np.isnan(interp1d(self.x10, self.y10).fill_value)) - assert_equal( - interp1d(self.x10, self.y10, fill_value=3.0).fill_value, - 3.0, - ) - assert_equal( - interp1d(self.x10, self.y10).axis, - 0, - ) - assert_equal( - interp1d(self.x10, self.y210).axis, - 1, - ) - assert_equal( - interp1d(self.x10, self.y102, axis=0).axis, - 0, - ) - assert_array_equal( - interp1d(self.x10, self.y10).x, - self.x10, - ) - assert_array_equal( - interp1d(self.x10, self.y10).y, - self.y10, - ) - assert_array_equal( - interp1d(self.x10, self.y210).y, - self.y210, - ) - - - def test_linear(self): - """ Check the actual implementation of linear interpolation. - """ - - interp10 = interp1d(self.x10, self.y10) - assert_array_almost_equal( - interp10(self.x10), - self.y10, - ) - assert_array_almost_equal( - interp10(1.2), - np.array([1.2]), - ) - assert_array_almost_equal( - interp10([2.4, 5.6, 6.0]), - np.array([2.4, 5.6, 6.0]), - ) - - def test_cubic(self): - """ Check the actual implementation of spline interpolation. - """ - - interp10 = interp1d(self.x10, self.y10, kind='cubic') - assert_array_almost_equal( - interp10(self.x10), - self.y10, - ) - assert_array_almost_equal( - interp10(1.2), - np.array([1.2]), - ) - assert_array_almost_equal( - interp10([2.4, 5.6, 6.0]), - np.array([2.4, 5.6, 6.0]), - ) - - def test_nearest(self): - """Check the actual implementation of nearest-neighbour interpolation. - """ - - interp10 = interp1d(self.x10, self.y10, kind='nearest') - assert_array_almost_equal( - interp10(self.x10), - self.y10, - ) - assert_array_almost_equal( - interp10(1.2), - np.array(1.), - ) - assert_array_almost_equal( - interp10([2.4, 5.6, 6.0]), - np.array([2., 6., 6.]), - ) - - @dec.knownfailureif(True, "zero-order splines fail for the last point") - def test_zero(self): - """Check the actual implementation of zero-order spline interpolation. - """ - interp10 = interp1d(self.x10, self.y10, kind='zero') - assert_array_almost_equal(interp10(self.x10), self.y10) - assert_array_almost_equal(interp10(1.2), np.array(1.)) - assert_array_almost_equal(interp10([2.4, 5.6, 6.0]), - np.array([2., 6., 6.])) - - def _bounds_check(self, kind='linear'): - """ Test that our handling of out-of-bounds input is correct. - """ - - extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value, - bounds_error=False, kind=kind) - assert_array_equal( - extrap10(11.2), - np.array(self.fill_value), - ) - assert_array_equal( - extrap10(-3.4), - np.array(self.fill_value), - ) - assert_array_equal( - extrap10([[[11.2], [-3.4], [12.6], [19.3]]]), - np.array(self.fill_value), - ) - assert_array_equal( - extrap10._check_bounds(np.array([-1.0, 0.0, 5.0, 9.0, 11.0])), - np.array([True, False, False, False, True]), - ) - - raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True, - kind=kind) - assert_raises(ValueError, raises_bounds_error, -1.0) - assert_raises(ValueError, raises_bounds_error, 11.0) - raises_bounds_error([0.0, 5.0, 9.0]) - - def _bounds_check_int_nan_fill(self, kind='linear'): - x = np.arange(10).astype(np.int_) - y = np.arange(10).astype(np.int_) - c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False) - yi = c(x - 1) - assert_(np.isnan(yi[0])) - assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]]) - - def test_bounds(self): - for kind in ('linear', 'cubic', 'nearest', - 'slinear', 'zero', 'quadratic'): - self._bounds_check(kind) - self._bounds_check_int_nan_fill(kind) - - def _nd_check_interp(self, kind='linear'): - """Check the behavior when the inputs and outputs are multidimensional. - """ - - # Multidimensional input. - interp10 = interp1d(self.x10, self.y10, kind=kind) - assert_array_almost_equal( - interp10(np.array([[3., 5.], [2., 7.]])), - np.array([[3., 5.], [2., 7.]]), - ) - - # Scalar input -> 0-dim scalar array output - assert_(isinstance(interp10(1.2), np.ndarray)) - assert_equal(interp10(1.2).shape, ()) - - # Multidimensional outputs. - interp210 = interp1d(self.x10, self.y210, kind=kind) - assert_array_almost_equal( - interp210(1.), - np.array([1., 11.]), - ) - assert_array_almost_equal( - interp210(np.array([1., 2.])), - np.array([[1., 2.], - [11., 12.]]), - ) - - interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind) - assert_array_almost_equal( - interp102(1.), - np.array([2.0, 3.0]), - ) - assert_array_almost_equal( - interp102(np.array([1., 3.])), - np.array([[2., 3.], - [6., 7.]]), - ) - - # Both at the same time! - x_new = np.array([[3., 5.], [2., 7.]]) - assert_array_almost_equal( - interp210(x_new), - np.array([[[3., 5.], [2., 7.]], - [[13., 15.], [12., 17.]]]), - ) - assert_array_almost_equal( - interp102(x_new), - np.array([[[6., 7.], [10., 11.]], - [[4., 5.], [14., 15.]]]), - ) - - def _nd_check_shape(self, kind='linear'): - # Check large ndim output shape - a = [4, 5, 6, 7] - y = np.arange(np.prod(a)).reshape(*a) - for n, s in enumerate(a): - x = np.arange(s) - z = interp1d(x, y, axis=n, kind=kind) - assert_array_almost_equal(z(x), y, err_msg=kind) - - x2 = np.arange(2*3*1).reshape((2,3,1)) / 12. - b = list(a) - b[n:n+1] = [2,3,1] - assert_array_almost_equal(z(x2).shape, b, err_msg=kind) - - def test_nd(self): - for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest'): - self._nd_check_interp(kind) - self._nd_check_shape(kind) - - def _check_complex(self, dtype=np.complex_, kind='linear'): - x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10]) - y = x * x ** (1 + 2j) - y = y.astype(dtype) - - # simple test - c = interp1d(x, y, kind=kind) - assert_array_almost_equal(y[:-1], c(x)[:-1]) - - # check against interpolating real+imag separately - xi = np.linspace(1, 10, 31) - cr = interp1d(x, y.real, kind=kind) - ci = interp1d(x, y.imag, kind=kind) - assert_array_almost_equal(c(xi).real, cr(xi)) - assert_array_almost_equal(c(xi).imag, ci(xi)) - - def test_complex(self): - for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic', - 'zero'): - self._check_complex(np.complex64, kind) - self._check_complex(np.complex128, kind) - - @dec.knownfailureif(True, "zero-order splines fail for the last point") - def test_nd_zero_spline(self): - # zero-order splines don't get the last point right, - # see test_zero above - #yield self._nd_check_interp, 'zero' - #yield self._nd_check_interp, 'zero' - pass - -class TestLagrange(TestCase): - - def test_lagrange(self): - p = poly1d([5,2,1,4,3]) - xs = np.arange(len(p.coeffs)) - ys = p(xs) - pl = lagrange(xs,ys) - assert_array_almost_equal(p.coeffs,pl.coeffs) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/interpolate/tests/test_interpolate_wrapper.py b/scipy-0.10.1/scipy/interpolate/tests/test_interpolate_wrapper.py deleted file mode 100644 index 2675f6da2d..0000000000 --- a/scipy-0.10.1/scipy/interpolate/tests/test_interpolate_wrapper.py +++ /dev/null @@ -1,85 +0,0 @@ -""" module to test interpolate_wrapper.py -""" - -# Unit Test -import unittest -import time -from numpy import arange, allclose, ones, NaN, isnan -import numpy as np - -# functionality to be tested -from scipy.interpolate.interpolate_wrapper import atleast_1d_and_contiguous, \ - linear, logarithmic, block_average_above, block, nearest - -class Test(unittest.TestCase): - - def assertAllclose(self, x, y, rtol=1.0e-5): - for i, xi in enumerate(x): - self.assertTrue(allclose(xi, y[i], rtol) or (isnan(xi) and isnan(y[i]))) - - def test_nearest(self): - N = 5 - x = arange(N) - y = arange(N) - self.assertAllclose(y, nearest(x, y, x+.1)) - self.assertAllclose(y, nearest(x, y, x-.1)) - - def test_linear(self): - N = 3000. - x = arange(N) - y = arange(N) - new_x = arange(N)+0.5 - t1 = time.clock() - new_y = linear(x, y, new_x) - t2 = time.clock() - #print "time for linear interpolation with N = %i:" % N, t2 - t1 - - self.assertAllclose(new_y[:5], [0.5, 1.5, 2.5, 3.5, 4.5]) - - def test_block_average_above(self): - N = 3000. - x = arange(N) - y = arange(N) - - new_x = arange(N/2)*2 - t1 = time.clock() - new_y = block_average_above(x, y, new_x) - t2 = time.clock() - #print "time for block_avg_above interpolation with N = %i:" % N, t2 - t1 - self.assertAllclose(new_y[:5], [0.0, 0.5, 2.5, 4.5, 6.5]) - - def test_linear2(self): - N = 3000. - x = arange(N) - y = ones((100,N)) * arange(N) - new_x = arange(N)+0.5 - t1 = time.clock() - new_y = linear(x, y, new_x) - t2 = time.clock() - #print "time for 2D linear interpolation with N = %i:" % N, t2 - t1 - self.assertAllclose(new_y[:5,:5], - [[ 0.5, 1.5, 2.5, 3.5, 4.5], - [ 0.5, 1.5, 2.5, 3.5, 4.5], - [ 0.5, 1.5, 2.5, 3.5, 4.5], - [ 0.5, 1.5, 2.5, 3.5, 4.5], - [ 0.5, 1.5, 2.5, 3.5, 4.5]]) - - def test_logarithmic(self): - N = 4000. - x = arange(N) - y = arange(N) - new_x = arange(N)+0.5 - t1 = time.clock() - new_y = logarithmic(x, y, new_x) - t2 = time.clock() - #print "time for logarithmic interpolation with N = %i:" % N, t2 - t1 - correct_y = [np.NaN, 1.41421356, 2.44948974, 3.46410162, 4.47213595] - self.assertAllclose(new_y[:5], correct_y) - - def runTest(self): - test_list = [name for name in dir(self) if name.find('test_')==0] - for test_name in test_list: - exec("self.%s()" % test_name) - -if __name__ == '__main__': - unittest.main() diff --git a/scipy-0.10.1/scipy/interpolate/tests/test_ndgriddata.py b/scipy-0.10.1/scipy/interpolate/tests/test_ndgriddata.py deleted file mode 100644 index ada640f0db..0000000000 --- a/scipy-0.10.1/scipy/interpolate/tests/test_ndgriddata.py +++ /dev/null @@ -1,94 +0,0 @@ -import numpy as np -from numpy.testing import assert_equal, assert_array_equal, assert_allclose, \ - run_module_suite - -from scipy.interpolate import griddata - - -class TestGriddata(object): - def test_fill_value(self): - x = [(0,0), (0,1), (1,0)] - y = [1, 2, 3] - - yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1) - assert_array_equal(yi, [-1., -1, 1]) - - yi = griddata(x, y, [(1,1), (1,2), (0,0)]) - assert_array_equal(yi, [np.nan, np.nan, 1]) - - def test_alternative_call(self): - x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], - dtype=np.double) - y = (np.arange(x.shape[0], dtype=np.double)[:,None] - + np.array([0,1])[None,:]) - - for method in ('nearest', 'linear', 'cubic'): - yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method) - assert_allclose(y, yi, atol=1e-14, err_msg=method) - - def test_multivalue_2d(self): - x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], - dtype=np.double) - y = (np.arange(x.shape[0], dtype=np.double)[:,None] - + np.array([0,1])[None,:]) - - for method in ('nearest', 'linear', 'cubic'): - yi = griddata(x, y, x, method=method) - assert_allclose(y, yi, atol=1e-14, err_msg=method) - - def test_multipoint_2d(self): - x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], - dtype=np.double) - y = np.arange(x.shape[0], dtype=np.double) - - xi = x[:,None,:] + np.array([0,0,0])[None,:,None] - - for method in ('nearest', 'linear', 'cubic'): - yi = griddata(x, y, xi, method=method) - - assert_equal(yi.shape, (5, 3), err_msg=method) - assert_allclose(yi, np.tile(y[:,None], (1, 3)), - atol=1e-14, err_msg=method) - - def test_complex_2d(self): - x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], - dtype=np.double) - y = np.arange(x.shape[0], dtype=np.double) - y = y - 2j*y[::-1] - - xi = x[:,None,:] + np.array([0,0,0])[None,:,None] - - for method in ('nearest', 'linear', 'cubic'): - yi = griddata(x, y, xi, method=method) - - assert_equal(yi.shape, (5, 3), err_msg=method) - assert_allclose(yi, np.tile(y[:,None], (1, 3)), - atol=1e-14, err_msg=method) - - def test_1d(self): - x = np.array([1, 2.5, 3, 4.5, 5, 6]) - y = np.array([1, 2, 0, 3.9, 2, 1]) - - for method in ('nearest', 'linear', 'cubic'): - assert_allclose(griddata(x, y, x, method=method), y, - err_msg=method, atol=1e-14) - assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y, - err_msg=method, atol=1e-14) - assert_allclose(griddata((x,), y, (x,), method=method), y, - err_msg=method, atol=1e-14) - - def test_1d_unsorted(self): - x = np.array([2.5, 1, 4.5, 5, 6, 3]) - y = np.array([1, 2, 0, 3.9, 2, 1]) - - for method in ('nearest', 'linear', 'cubic'): - assert_allclose(griddata(x, y, x, method=method), y, - err_msg=method, atol=1e-14) - assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y, - err_msg=method, atol=1e-14) - assert_allclose(griddata((x,), y, (x,), method=method), y, - err_msg=method, atol=1e-14) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/interpolate/tests/test_polyint.py b/scipy-0.10.1/scipy/interpolate/tests/test_polyint.py deleted file mode 100644 index 74db358a9f..0000000000 --- a/scipy-0.10.1/scipy/interpolate/tests/test_polyint.py +++ /dev/null @@ -1,286 +0,0 @@ - -from numpy.testing import assert_almost_equal, assert_array_equal, \ - TestCase, run_module_suite -from scipy.interpolate import KroghInterpolator, krogh_interpolate, \ - BarycentricInterpolator, barycentric_interpolate, \ - PiecewisePolynomial, piecewise_polynomial_interpolate, \ - approximate_taylor_polynomial -import scipy -import numpy as np -from scipy.interpolate import splrep, splev - -class CheckKrogh(TestCase): - def setUp(self): - self.true_poly = scipy.poly1d([-2,3,1,5,-4]) - self.test_xs = np.linspace(-1,1,100) - self.xs = np.linspace(-1,1,5) - self.ys = self.true_poly(self.xs) - - def test_lagrange(self): - P = KroghInterpolator(self.xs,self.ys) - assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs)) - - def test_scalar(self): - P = KroghInterpolator(self.xs,self.ys) - assert_almost_equal(self.true_poly(7),P(7)) - assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7))) - - def test_derivatives(self): - P = KroghInterpolator(self.xs,self.ys) - D = P.derivatives(self.test_xs) - for i in xrange(D.shape[0]): - assert_almost_equal(self.true_poly.deriv(i)(self.test_xs), - D[i]) - - def test_low_derivatives(self): - P = KroghInterpolator(self.xs,self.ys) - D = P.derivatives(self.test_xs,len(self.xs)+2) - for i in xrange(D.shape[0]): - assert_almost_equal(self.true_poly.deriv(i)(self.test_xs), - D[i]) - - def test_derivative(self): - P = KroghInterpolator(self.xs,self.ys) - m = 10 - r = P.derivatives(self.test_xs,m) - for i in xrange(m): - assert_almost_equal(P.derivative(self.test_xs,i),r[i]) - - def test_high_derivative(self): - P = KroghInterpolator(self.xs,self.ys) - for i in xrange(len(self.xs),2*len(self.xs)): - assert_almost_equal(P.derivative(self.test_xs,i), - np.zeros(len(self.test_xs))) - - def test_hermite(self): - xs = [0,0,0,1,1,1,2] - ys = [self.true_poly(0), - self.true_poly.deriv(1)(0), - self.true_poly.deriv(2)(0), - self.true_poly(1), - self.true_poly.deriv(1)(1), - self.true_poly.deriv(2)(1), - self.true_poly(2)] - P = KroghInterpolator(self.xs,self.ys) - assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs)) - - def test_vector(self): - xs = [0, 1, 2] - ys = np.array([[0,1],[1,0],[2,1]]) - P = KroghInterpolator(xs,ys) - Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])] - test_xs = np.linspace(-1,3,100) - assert_almost_equal(P(test_xs), - np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1)) - assert_almost_equal(P.derivatives(test_xs), - np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]), - (1,2,0))) - - def test_empty(self): - P = KroghInterpolator(self.xs,self.ys) - assert_array_equal(P([]), []) - - def test_shapes_scalarvalue(self): - P = KroghInterpolator(self.xs,self.ys) - assert_array_equal(np.shape(P(0)), ()) - assert_array_equal(np.shape(P(np.array(0))), ()) - assert_array_equal(np.shape(P([0])), (1,)) - assert_array_equal(np.shape(P([0,1])), (2,)) - - def test_shapes_scalarvalue_derivative(self): - P = KroghInterpolator(self.xs,self.ys) - n = P.n - assert_array_equal(np.shape(P.derivatives(0)), (n,)) - assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,)) - assert_array_equal(np.shape(P.derivatives([0])), (n,1)) - assert_array_equal(np.shape(P.derivatives([0,1])), (n,2)) - - def test_shapes_vectorvalue(self): - P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3))) - assert_array_equal(np.shape(P(0)), (3,)) - assert_array_equal(np.shape(P([0])), (1,3)) - assert_array_equal(np.shape(P([0,1])), (2,3)) - - def test_shapes_1d_vectorvalue(self): - P = KroghInterpolator(self.xs,np.outer(self.ys,[1])) - assert_array_equal(np.shape(P(0)), (1,)) - assert_array_equal(np.shape(P([0])), (1,1)) - assert_array_equal(np.shape(P([0,1])), (2,1)) - - def test_shapes_vectorvalue_derivative(self): - P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3))) - n = P.n - assert_array_equal(np.shape(P.derivatives(0)), (n,3)) - assert_array_equal(np.shape(P.derivatives([0])), (n,1,3)) - assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3)) - - def test_wrapper(self): - P = KroghInterpolator(self.xs,self.ys) - assert_almost_equal(P(self.test_xs),krogh_interpolate(self.xs,self.ys,self.test_xs)) - assert_almost_equal(P.derivative(self.test_xs,2),krogh_interpolate(self.xs,self.ys,self.test_xs,der=2)) - assert_almost_equal(P.derivatives(self.test_xs,2),krogh_interpolate(self.xs,self.ys,self.test_xs,der=[0,1])) - -class CheckTaylor(TestCase): - def test_exponential(self): - degree = 5 - p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15) - for i in xrange(degree+1): - assert_almost_equal(p(0),1) - p = p.deriv() - assert_almost_equal(p(0),0) - -class CheckBarycentric(TestCase): - def setUp(self): - self.true_poly = scipy.poly1d([-2,3,1,5,-4]) - self.test_xs = np.linspace(-1,1,100) - self.xs = np.linspace(-1,1,5) - self.ys = self.true_poly(self.xs) - - def test_lagrange(self): - P = BarycentricInterpolator(self.xs,self.ys) - assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs)) - - def test_scalar(self): - P = BarycentricInterpolator(self.xs,self.ys) - assert_almost_equal(self.true_poly(7),P(7)) - assert_almost_equal(self.true_poly(np.array(7)),P(np.array(7))) - - def test_delayed(self): - P = BarycentricInterpolator(self.xs) - P.set_yi(self.ys) - assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs)) - - def test_append(self): - P = BarycentricInterpolator(self.xs[:3],self.ys[:3]) - P.add_xi(self.xs[3:],self.ys[3:]) - assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs)) - - def test_vector(self): - xs = [0, 1, 2] - ys = np.array([[0,1],[1,0],[2,1]]) - P = BarycentricInterpolator(xs,ys) - Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])] - test_xs = np.linspace(-1,3,100) - assert_almost_equal(P(test_xs), - np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1)) - - def test_shapes_scalarvalue(self): - P = BarycentricInterpolator(self.xs,self.ys) - assert_array_equal(np.shape(P(0)), ()) - assert_array_equal(np.shape(P(np.array(0))), ()) - assert_array_equal(np.shape(P([0])), (1,)) - assert_array_equal(np.shape(P([0,1])), (2,)) - - def test_shapes_vectorvalue(self): - P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3))) - assert_array_equal(np.shape(P(0)), (3,)) - assert_array_equal(np.shape(P([0])), (1,3)) - assert_array_equal(np.shape(P([0,1])), (2,3)) - - def test_shapes_1d_vectorvalue(self): - P = BarycentricInterpolator(self.xs,np.outer(self.ys,[1])) - assert_array_equal(np.shape(P(0)), (1,)) - assert_array_equal(np.shape(P([0])), (1,1)) - assert_array_equal(np.shape(P([0,1])), (2,1)) - - def test_wrapper(self): - P = BarycentricInterpolator(self.xs,self.ys) - assert_almost_equal(P(self.test_xs),barycentric_interpolate(self.xs,self.ys,self.test_xs)) - -class CheckPiecewise(TestCase): - def setUp(self): - self.tck = splrep([0,1,2,3,4,5], [0,10,-1,3,7,2], s=0) - self.test_xs = np.linspace(-1,6,100) - self.spline_ys = splev(self.test_xs, self.tck) - self.spline_yps = splev(self.test_xs, self.tck, der=1) - self.xi = np.unique(self.tck[0]) - self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)] for x in self.xi] - - def test_construction(self): - P = PiecewisePolynomial(self.xi, self.yi, 3) - assert_almost_equal(P(self.test_xs), self.spline_ys) - - def test_scalar(self): - P = PiecewisePolynomial(self.xi,self.yi,3) - assert_almost_equal(P(self.test_xs[0]),self.spline_ys[0]) - assert_almost_equal(P.derivative(self.test_xs[0],1),self.spline_yps[0]) - assert_almost_equal(P(np.array(self.test_xs[0])),self.spline_ys[0]) - assert_almost_equal(P.derivative(np.array(self.test_xs[0]),1), - self.spline_yps[0]) - - def test_derivative(self): - P = PiecewisePolynomial(self.xi,self.yi,3) - assert_almost_equal(P.derivative(self.test_xs,1),self.spline_yps) - - def test_derivatives(self): - P = PiecewisePolynomial(self.xi,self.yi,3) - m = 4 - r = P.derivatives(self.test_xs,m) - #print r.shape, r - for i in xrange(m): - assert_almost_equal(P.derivative(self.test_xs,i),r[i]) - - def test_vector(self): - xs = [0, 1, 2] - ys = [[[0,1]],[[1,0],[-1,-1]],[[2,1]]] - P = PiecewisePolynomial(xs,ys) - Pi = [PiecewisePolynomial(xs,[[yd[i] for yd in y] for y in ys]) - for i in xrange(len(ys[0][0]))] - test_xs = np.linspace(-1,3,100) - assert_almost_equal(P(test_xs), - np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1)) - assert_almost_equal(P.derivative(test_xs,1), - np.transpose(np.asarray([p.derivative(test_xs,1) for p in Pi]), - (1,0))) - - def test_incremental(self): - P = PiecewisePolynomial([self.xi[0]], [self.yi[0]], 3) - for i in xrange(1,len(self.xi)): - P.append(self.xi[i],self.yi[i],3) - assert_almost_equal(P(self.test_xs),self.spline_ys) - - def test_shapes_scalarvalue(self): - P = PiecewisePolynomial(self.xi,self.yi,4) - assert_array_equal(np.shape(P(0)), ()) - assert_array_equal(np.shape(P(np.array(0))), ()) - assert_array_equal(np.shape(P([0])), (1,)) - assert_array_equal(np.shape(P([0,1])), (2,)) - - def test_shapes_scalarvalue_derivative(self): - P = PiecewisePolynomial(self.xi,self.yi,4) - n = 4 - assert_array_equal(np.shape(P.derivative(0,1)), ()) - assert_array_equal(np.shape(P.derivative(np.array(0),1)), ()) - assert_array_equal(np.shape(P.derivative([0],1)), (1,)) - assert_array_equal(np.shape(P.derivative([0,1],1)), (2,)) - - def test_shapes_vectorvalue(self): - yi = np.multiply.outer(np.asarray(self.yi),np.arange(3)) - P = PiecewisePolynomial(self.xi,yi,4) - assert_array_equal(np.shape(P(0)), (3,)) - assert_array_equal(np.shape(P([0])), (1,3)) - assert_array_equal(np.shape(P([0,1])), (2,3)) - - def test_shapes_vectorvalue_1d(self): - yi = np.multiply.outer(np.asarray(self.yi),np.arange(1)) - P = PiecewisePolynomial(self.xi,yi,4) - assert_array_equal(np.shape(P(0)), (1,)) - assert_array_equal(np.shape(P([0])), (1,1)) - assert_array_equal(np.shape(P([0,1])), (2,1)) - - def test_shapes_vectorvalue_derivative(self): - P = PiecewisePolynomial(self.xi,np.multiply.outer(self.yi,np.arange(3)),4) - n = 4 - assert_array_equal(np.shape(P.derivative(0,1)), (3,)) - assert_array_equal(np.shape(P.derivative([0],1)), (1,3)) - assert_array_equal(np.shape(P.derivative([0,1],1)), (2,3)) - - def test_wrapper(self): - P = PiecewisePolynomial(self.xi,self.yi) - assert_almost_equal(P(self.test_xs),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs)) - assert_almost_equal(P.derivative(self.test_xs,2),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs,der=2)) - assert_almost_equal(P.derivatives(self.test_xs,2),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs,der=[0,1])) - - -if __name__=='__main__': - run_module_suite() diff --git a/scipy-0.10.1/scipy/interpolate/tests/test_rbf.py b/scipy-0.10.1/scipy/interpolate/tests/test_rbf.py deleted file mode 100644 index 15835e41b9..0000000000 --- a/scipy-0.10.1/scipy/interpolate/tests/test_rbf.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python -# Created by John Travers, Robert Hetland, 2007 -""" Test functions for rbf module """ - -import numpy as np -from numpy.testing import assert_, assert_array_almost_equal, assert_almost_equal -from numpy import linspace, sin, random, exp, allclose -from scipy.interpolate.rbf import Rbf - -FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian', - 'cubic', 'quintic', 'thin-plate', 'linear') - -def check_rbf1d_interpolation(function): - """Check that the Rbf function interpolates throught the nodes (1D)""" - olderr = np.seterr(all="ignore") - try: - x = linspace(0,10,9) - y = sin(x) - rbf = Rbf(x, y, function=function) - yi = rbf(x) - assert_array_almost_equal(y, yi) - assert_almost_equal(rbf(float(x[0])), y[0]) - finally: - np.seterr(**olderr) - -def check_rbf2d_interpolation(function): - """Check that the Rbf function interpolates throught the nodes (2D)""" - olderr = np.seterr(all="ignore") - try: - x = random.rand(50,1)*4-2 - y = random.rand(50,1)*4-2 - z = x*exp(-x**2-1j*y**2) - rbf = Rbf(x, y, z, epsilon=2, function=function) - zi = rbf(x, y) - zi.shape = x.shape - assert_array_almost_equal(z, zi) - finally: - np.seterr(**olderr) - -def check_rbf3d_interpolation(function): - """Check that the Rbf function interpolates throught the nodes (3D)""" - olderr = np.seterr(all="ignore") - try: - x = random.rand(50,1)*4-2 - y = random.rand(50,1)*4-2 - z = random.rand(50,1)*4-2 - d = x*exp(-x**2-y**2) - rbf = Rbf(x, y, z, d, epsilon=2, function=function) - di = rbf(x, y, z) - di.shape = x.shape - assert_array_almost_equal(di, d) - finally: - np.seterr(**olderr) - -def test_rbf_interpolation(): - for function in FUNCTIONS: - yield check_rbf1d_interpolation, function - yield check_rbf2d_interpolation, function - yield check_rbf3d_interpolation, function - -def check_rbf1d_regularity(function, atol): - """Check that the Rbf function approximates a smooth function well away - from the nodes.""" - olderr = np.seterr(all="ignore") - try: - x = linspace(0, 10, 9) - y = sin(x) - rbf = Rbf(x, y, function=function) - xi = linspace(0, 10, 100) - yi = rbf(xi) - #import matplotlib.pyplot as plt - #plt.figure() - #plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-') - #plt.title(function) - #plt.show() - msg = "abs-diff: %f" % abs(yi - sin(xi)).max() - assert_(allclose(yi, sin(xi), atol=atol), msg) - finally: - np.seterr(**olderr) - -def test_rbf_regularity(): - tolerances = { - 'multiquadric': 0.05, - 'inverse multiquadric': 0.02, - 'gaussian': 0.01, - 'cubic': 0.15, - 'quintic': 0.1, - 'thin-plate': 0.1, - 'linear': 0.2 - } - for function in FUNCTIONS: - yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2) - -def test_default_construction(): - """Check that the Rbf class can be constructed with the default - multiquadric basis function. Regression test for ticket #1228.""" - x = linspace(0,10,9) - y = sin(x) - rbf = Rbf(x, y) - yi = rbf(x) - assert_array_almost_equal(y, yi) - -def test_function_is_callable(): - """Check that the Rbf class can be constructed with function=callable.""" - x = linspace(0,10,9) - y = sin(x) - linfunc = lambda x:x - rbf = Rbf(x, y, function=linfunc) - yi = rbf(x) - assert_array_almost_equal(y, yi) - -def test_two_arg_function_is_callable(): - """Check that the Rbf class can be constructed with a two argument - function=callable.""" - def _func(self, r): - return self.epsilon + r - - x = linspace(0,10,9) - y = sin(x) - rbf = Rbf(x, y, function=_func) - yi = rbf(x) - assert_array_almost_equal(y, yi) diff --git a/scipy-0.10.1/scipy/interpolate/tests/test_regression.py b/scipy-0.10.1/scipy/interpolate/tests/test_regression.py deleted file mode 100644 index 08ad96f796..0000000000 --- a/scipy-0.10.1/scipy/interpolate/tests/test_regression.py +++ /dev/null @@ -1,13 +0,0 @@ -import numpy as np -import scipy.interpolate as interp -from numpy.testing import assert_almost_equal, TestCase - -class TestRegression(TestCase): - def test_spalde_scalar_input(self): - """Ticket #629""" - x = np.linspace(0,10) - y = x**3 - tck = interp.splrep(x, y, k=3, t=[5]) - res = interp.spalde(np.float64(1), tck) - des = np.array([ 1., 3., 6., 6.]) - assert_almost_equal(res, des) diff --git a/scipy-0.10.1/scipy/io/SConscript b/scipy-0.10.1/scipy/io/SConscript deleted file mode 100644 index c5b7b82725..0000000000 --- a/scipy-0.10.1/scipy/io/SConscript +++ /dev/null @@ -1,5 +0,0 @@ -# Last Change: Wed Mar 05 03:00 PM 2008 J -# vim:syntax=python -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) diff --git a/scipy-0.10.1/scipy/io/SConstruct b/scipy-0.10.1/scipy/io/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/io/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/io/__init__.py b/scipy-0.10.1/scipy/io/__init__.py deleted file mode 100644 index e5abef03a5..0000000000 --- a/scipy-0.10.1/scipy/io/__init__.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- encoding:utf-8 -*- -""" -================================== -Input and output (:mod:`scipy.io`) -================================== - -.. currentmodule:: scipy.io - -SciPy has many modules, classes, and functions available to read data -from and write data to a variety of file formats. - -.. seealso:: :ref:`numpy-reference.routines.io` (in Numpy) - -MATLAB® files -============= - -.. autosummary:: - :toctree: generated/ - - loadmat - Read a MATLAB style mat file (version 4 through 7.1) - savemat - Write a MATLAB style mat file (version 4 through 7.1) - -IDL® files -========== - -.. autosummary:: - :toctree: generated/ - - readsav - Read an IDL 'save' file - -Matrix Market files -=================== - -.. autosummary:: - :toctree: generated/ - - mminfo - Query matrix info from Matrix Market formatted file - mmread - Read matrix from Matrix Market formatted file - mmwrite - Write matrix to Matrix Market formatted file - -Other -===== - -.. autosummary:: - :toctree: generated/ - - save_as_module - Data saved as module, accessed on load as attirbutes - -Wav sound files (:mod:`scipy.io.wavfile`) -========================================= - -.. module:: scipy.io.wavfile - -.. autosummary:: - :toctree: generated/ - - read - write - -Arff files (:mod:`scipy.io.arff`) -================================= - -.. module:: scipy.io.arff - -.. autosummary:: - :toctree: generated/ - - loadarff - -Netcdf (:mod:`scipy.io.netcdf`) -=============================== - -.. module:: scipy.io.netcdf - -.. autosummary:: - :toctree: generated/ - - netcdf_file - A file object for NetCDF data - netcdf_variable - A data object for the netcdf module - -""" -# matfile read and write -from matlab import loadmat, savemat, byteordercodes - -# netCDF file support -from netcdf import netcdf_file, netcdf_variable - -from data_store import save_as_module -from mmio import mminfo, mmread, mmwrite -from idl import readsav -from harwell_boeing import hb_read, hb_write - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test - diff --git a/scipy-0.10.1/scipy/io/arff/__init__.py b/scipy-0.10.1/scipy/io/arff/__init__.py deleted file mode 100644 index e8e34e419f..0000000000 --- a/scipy-0.10.1/scipy/io/arff/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Module to read ARFF files, which are the standard data format for WEKA. - -ARFF is a text file format which support numerical, string and data values. -The format can also represent missing data and sparse data. - -See the `WEKA website -`_ -for more details about arff format and available datasets. - -Examples --------- - ->>> from scipy.io import arff ->>> from cStringIO import StringIO ->>> content = \"\"\" -... @relation foo -... @attribute width numeric -... @attribute height numeric -... @attribute color {red,green,blue,yellow,black} -... @data -... 5.0,3.25,blue -... 4.5,3.75,green -... 3.0,4.00,red -... \"\"\" ->>> f = StringIO(content) ->>> data, meta = arff.loadarff(f) ->>> data -array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')], - dtype=[('width', '>> meta -Dataset: foo -\twidth's type is numeric -\theight's type is numeric -\tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black') - -""" - -from arffread import * -import arffread - -__all__ = arffread.__all__ - -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/io/arff/arffread.py b/scipy-0.10.1/scipy/io/arff/arffread.py deleted file mode 100644 index cf1e10dc63..0000000000 --- a/scipy-0.10.1/scipy/io/arff/arffread.py +++ /dev/null @@ -1,709 +0,0 @@ -#! /usr/bin/env python -# Last Change: Mon Aug 20 08:00 PM 2007 J -import re -import itertools - -import numpy as np - -from scipy.io.arff.utils import partial - -"""A module to read arff files.""" - -__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError'] - -# An Arff file is basically two parts: -# - header -# - data -# -# A header has each of its components starting by @META where META is one of -# the keyword (attribute of relation, for now). - -# TODO: -# - both integer and reals are treated as numeric -> the integer info is lost ! -# - Replace ValueError by ParseError or something - -# We know can handle the following: -# - numeric and nominal attributes -# - missing values for numeric attributes - -r_meta = re.compile('^\s*@') -# Match a comment -r_comment = re.compile(r'^%') -# Match an empty line -r_empty = re.compile(r'^\s+$') -# Match a header line, that is a line which starts by @ + a word -r_headerline = re.compile(r'^@\S*') -r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]') -r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)') -r_attribute = re.compile(r'^@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)') - -# To get attributes name enclosed with '' -r_comattrval = re.compile(r"'(..+)'\s+(..+$)") -# To get attributes name enclosed with '', possibly spread across multilines -r_mcomattrval = re.compile(r"'([..\n]+)'\s+(..+$)") -# To get normal attributes -r_wcomattrval = re.compile(r"(\S+)\s+(..+$)") - -#------------------------- -# Module defined exception -#------------------------- -class ArffError(IOError): - pass - -class ParseArffError(ArffError): - pass - -#------------------ -# Various utilities -#------------------ - -# An attribute is defined as @attribute name value -def parse_type(attrtype): - """Given an arff attribute value (meta data), returns its type. - - Expect the value to be a name.""" - uattribute = attrtype.lower().strip() - if uattribute[0] == '{': - return 'nominal' - elif uattribute[:len('real')] == 'real': - return 'numeric' - elif uattribute[:len('integer')] == 'integer': - return 'numeric' - elif uattribute[:len('numeric')] == 'numeric': - return 'numeric' - elif uattribute[:len('string')] == 'string': - return 'string' - elif uattribute[:len('relational')] == 'relational': - return 'relational' - else: - raise ParseArffError("unknown attribute %s" % uattribute) - - -def get_nominal(attribute): - """If attribute is nominal, returns a list of the values""" - return attribute.split(',') - - -def read_data_list(ofile): - """Read each line of the iterable and put it in a list.""" - data = [ofile.next()] - if data[0].strip()[0] == '{': - raise ValueError("This looks like a sparse ARFF: not supported yet") - data.extend([i for i in ofile]) - return data - - -def get_ndata(ofile): - """Read the whole file to get number of data attributes.""" - data = [ofile.next()] - loc = 1 - if data[0].strip()[0] == '{': - raise ValueError("This looks like a sparse ARFF: not supported yet") - for i in ofile: - loc += 1 - return loc - - -def maxnomlen(atrv): - """Given a string containing a nominal type definition, returns the - string len of the biggest component. - - A nominal type is defined as seomthing framed between brace ({}). - - Parameters - ---------- - atrv : str - Nominal type definition - - Returns - ------- - slen : int - length of longest component - - Examples - -------- - maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of - ratata, the longest nominal value). - - >>> maxnomlen("{floup, bouga, fl, ratata}") - 6 - """ - nomtp = get_nom_val(atrv) - return max(len(i) for i in nomtp) - - -def get_nom_val(atrv): - """Given a string containing a nominal type, returns a tuple of the - possible values. - - A nominal type is defined as something framed between braces ({}). - - Parameters - ---------- - atrv : str - Nominal type definition - - Returns - ------- - poss_vals : tuple - possible values - - Examples - -------- - >>> get_nom_val("{floup, bouga, fl, ratata}") - ('floup', 'bouga', 'fl', 'ratata') - """ - r_nominal = re.compile('{(..+)}') - m = r_nominal.match(atrv) - if m: - return tuple(i.strip() for i in m.group(1).split(',')) - else: - raise ValueError("This does not look like a nominal string") - - -def go_data(ofile): - """Skip header. - - the first next() call of the returned iterator will be the @data line""" - return itertools.dropwhile(lambda x : not r_datameta.match(x), ofile) - - -#---------------- -# Parsing header -#---------------- -def tokenize_attribute(iterable, attribute): - """Parse a raw string in header (eg starts by @attribute). - - Given a raw string attribute, try to get the name and type of the - attribute. Constraints: - - * The first line must start with @attribute (case insensitive, and - space like characters before @attribute are allowed) - * Works also if the attribute is spread on multilines. - * Works if empty lines or comments are in between - - Parameters - ---------- - attribute : str - the attribute string. - - Returns - ------- - name : str - name of the attribute - value : str - value of the attribute - next : str - next line to be parsed - - Examples - -------- - If attribute is a string defined in python as r"floupi real", will - return floupi as name, and real as value. - - >>> iterable = iter([0] * 10) # dummy iterator - >>> tokenize_attribute(iterable, r"@attribute floupi real") - ('floupi', 'real', 0) - - If attribute is r"'floupi 2' real", will return 'floupi 2' as name, - and real as value. - - >>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ") - ('floupi 2', 'real', 0) - - """ - sattr = attribute.strip() - mattr = r_attribute.match(sattr) - if mattr: - # atrv is everything after @attribute - atrv = mattr.group(1) - if r_comattrval.match(atrv): - name, type = tokenize_single_comma(atrv) - next_item = iterable.next() - elif r_wcomattrval.match(atrv): - name, type = tokenize_single_wcomma(atrv) - next_item = iterable.next() - else: - # Not sure we should support this, as it does not seem supported by - # weka. - raise ValueError("multi line not supported yet") - #name, type, next_item = tokenize_multilines(iterable, atrv) - else: - raise ValueError("First line unparsable: %s" % sattr) - - if type == 'relational': - raise ValueError("relational attributes not supported yet") - return name, type, next_item - - -def tokenize_multilines(iterable, val): - """Can tokenize an attribute spread over several lines.""" - # If one line does not match, read all the following lines up to next - # line with meta character, and try to parse everything up to there. - if not r_mcomattrval.match(val): - all = [val] - i = iterable.next() - while not r_meta.match(i): - all.append(i) - i = iterable.next() - if r_mend.search(i): - raise ValueError("relational attribute not supported yet") - print "".join(all[:-1]) - m = r_comattrval.match("".join(all[:-1])) - return m.group(1), m.group(2), i - else: - raise ValueError("Cannot parse attribute names spread over multi "\ - "lines yet") - - -def tokenize_single_comma(val): - # XXX we match twice the same string (here and at the caller level). It is - # stupid, but it is easier for now... - m = r_comattrval.match(val) - if m: - try: - name = m.group(1).strip() - type = m.group(2).strip() - except IndexError: - raise ValueError("Error while tokenizing attribute") - else: - raise ValueError("Error while tokenizing single %s" % val) - return name, type - - -def tokenize_single_wcomma(val): - # XXX we match twice the same string (here and at the caller level). It is - # stupid, but it is easier for now... - m = r_wcomattrval.match(val) - if m: - try: - name = m.group(1).strip() - type = m.group(2).strip() - except IndexError: - raise ValueError("Error while tokenizing attribute") - else: - raise ValueError("Error while tokenizing single %s" % val) - return name, type - - -def read_header(ofile): - """Read the header of the iterable ofile.""" - i = ofile.next() - - # Pass first comments - while r_comment.match(i): - i = ofile.next() - - # Header is everything up to DATA attribute ? - relation = None - attributes = [] - while not r_datameta.match(i): - m = r_headerline.match(i) - if m: - isattr = r_attribute.match(i) - if isattr: - name, type, i = tokenize_attribute(ofile, i) - attributes.append((name, type)) - else: - isrel = r_relation.match(i) - if isrel: - relation = isrel.group(1) - else: - raise ValueError("Error parsing line %s" % i) - i = ofile.next() - else: - i = ofile.next() - - return relation, attributes - - -#-------------------- -# Parsing actual data -#-------------------- -def safe_float(x): - """given a string x, convert it to a float. If the stripped string is a ?, - return a Nan (missing value). - - Parameters - ---------- - x : str - string to convert - - Returns - ------- - f : float - where float can be nan - - Examples - -------- - >>> safe_float('1') - 1.0 - >>> safe_float('1\\n') - 1.0 - >>> safe_float('?\\n') - nan - """ - if '?' in x: - return np.nan - else: - return np.float(x) - - -def safe_nominal(value, pvalue): - svalue = value.strip() - if svalue in pvalue: - return svalue - elif svalue == '?': - return svalue - else: - raise ValueError("%s value not in %s" % (str(svalue), str(pvalue))) - - -def get_delim(line): - """Given a string representing a line of data, check whether the - delimiter is ',' or space. - - Parameters - ---------- - line : str - line of data - - Returns - ------- - delim : {',', ' '} - - Examples - -------- - >>> get_delim(',') - ',' - >>> get_delim(' ') - ' ' - >>> get_delim(', ') - ',' - >>> get_delim('x') - Traceback (most recent call last): - ... - ValueError: delimiter not understood: x - """ - if ',' in line: - return ',' - if ' ' in line: - return ' ' - raise ValueError("delimiter not understood: " + line) - - -class MetaData(object): - """Small container to keep useful informations on a ARFF dataset. - - Knows about attributes names and types. - - Examples - -------- - data, meta = loadarff('iris.arff') - # This will print the attributes names of the iris.arff dataset - for i in meta: - print i - # This works too - meta.names() - # Getting attribute type - types = meta.types() - - Notes - ----- - Also maintains the list of attributes in order, i.e. doing for i in - meta, where meta is an instance of MetaData, will return the - different attribute names in the order they were defined. - """ - def __init__(self, rel, attr): - self.name = rel - # We need the dictionary to be ordered - # XXX: may be better to implement an ordered dictionary - self._attributes = {} - self._attrnames = [] - for name, value in attr: - tp = parse_type(value) - self._attrnames.append(name) - if tp == 'nominal': - self._attributes[name] = (tp, get_nom_val(value)) - else: - self._attributes[name] = (tp, None) - - def __repr__(self): - msg = "" - msg += "Dataset: %s\n" % self.name - for i in self._attrnames: - msg += "\t%s's type is %s" % (i, self._attributes[i][0]) - if self._attributes[i][1]: - msg += ", range is %s" % str(self._attributes[i][1]) - msg += '\n' - return msg - - def __iter__(self): - return iter(self._attrnames) - - def __getitem__(self, key): - return self._attributes[key] - - def names(self): - """Return the list of attribute names.""" - return self._attrnames - - def types(self): - """Return the list of attribute types.""" - attr_types = [self._attributes[name][0] for name in self._attrnames] - return attr_types - - -def loadarff(f): - """ - Read an arff file. - - The data is returned as a record array, which can be accessed much like - a dictionary of numpy arrays. For example, if one of the attributes is - called 'pressure', then its first 10 data points can be accessed from the - ``data`` record array like so: ``data['pressure'][0:10]`` - - - Parameters - ---------- - f : file-like or str - File-like object to read from, or filename to open. - - Returns - ------- - data : record array - The data of the arff file, accessible by attribute names. - meta : `MetaData` - Contains information about the arff file such as name and - type of attributes, the relation (name of the dataset), etc... - - Raises - ------ - `ParseArffError` - This is raised if the given file is not ARFF-formatted. - NotImplementedError - The ARFF file has an attribute which is not supported yet. - - Notes - ----- - - This function should be able to read most arff files. Not - implemented functionality include: - - * date type attributes - * string type attributes - - It can read files with numeric and nominal attributes. It cannot read - files with sparse data ({} in the file). However, this function can - read files with missing data (? in the file), representing the data - points as NaNs. - - """ - if hasattr(f, 'read'): - ofile = f - else: - ofile = open(f, 'rt') - try: - return _loadarff(ofile) - finally: - if ofile is not f: # only close what we opened - ofile.close() - -def _loadarff(ofile): - # Parse the header file - try: - rel, attr = read_header(ofile) - except ValueError, e: - msg = "Error while parsing header, error was: " + str(e) - raise ParseArffError(msg) - - # Check whether we have a string attribute (not supported yet) - hasstr = False - for name, value in attr: - type = parse_type(value) - if type == 'string': - hasstr = True - - meta = MetaData(rel, attr) - - # XXX The following code is not great - # Build the type descriptor descr and the list of convertors to convert - # each attribute to the suitable type (which should match the one in - # descr). - - # This can be used once we want to support integer as integer values and - # not as numeric anymore (using masked arrays ?). - acls2dtype = {'real' : np.float, 'integer' : np.float, 'numeric' : np.float} - acls2conv = {'real' : safe_float, 'integer' : safe_float, 'numeric' : safe_float} - descr = [] - convertors = [] - if not hasstr: - for name, value in attr: - type = parse_type(value) - if type == 'date': - raise ValueError("date type not supported yet, sorry") - elif type == 'nominal': - n = maxnomlen(value) - descr.append((name, 'S%d' % n)) - pvalue = get_nom_val(value) - convertors.append(partial(safe_nominal, pvalue = pvalue)) - else: - descr.append((name, acls2dtype[type])) - convertors.append(safe_float) - #dc.append(acls2conv[type]) - #sdescr.append((name, acls2sdtype[type])) - else: - # How to support string efficiently ? Ideally, we should know the max - # size of the string before allocating the numpy array. - raise NotImplementedError("String attributes not supported yet, sorry") - - ni = len(convertors) - - # Get the delimiter from the first line of data: - def next_data_line(row_iter): - """Assumes we are already in the data part (eg after @data).""" - raw = row_iter.next() - while r_empty.match(raw): - raw = row_iter.next() - while r_comment.match(raw): - raw = row_iter.next() - return raw - - try: - try: - dtline = next_data_line(ofile) - delim = get_delim(dtline) - except ValueError, e: - raise ParseArffError("Error while parsing delimiter: " + str(e)) - finally: - ofile.seek(0, 0) - ofile = go_data(ofile) - # skip the @data line - ofile.next() - - def generator(row_iter, delim = ','): - # TODO: this is where we are spending times (~80%). I think things - # could be made more efficiently: - # - We could for example "compile" the function, because some values - # do not change here. - # - The function to convert a line to dtyped values could also be - # generated on the fly from a string and be executed instead of - # looping. - # - The regex are overkill: for comments, checking that a line starts - # by % should be enough and faster, and for empty lines, same thing - # --> this does not seem to change anything. - - # We do not abstract skipping comments and empty lines for performances - # reason. - raw = row_iter.next() - while r_empty.match(raw): - raw = row_iter.next() - while r_comment.match(raw): - raw = row_iter.next() - - # 'compiling' the range since it does not change - # Note, I have already tried zipping the converters and - # row elements and got slightly worse performance. - elems = range(ni) - - row = raw.split(delim) - yield tuple([convertors[i](row[i]) for i in elems]) - for raw in row_iter: - while r_comment.match(raw): - raw = row_iter.next() - while r_empty.match(raw): - raw = row_iter.next() - row = raw.split(delim) - yield tuple([convertors[i](row[i]) for i in elems]) - - a = generator(ofile, delim = delim) - # No error should happen here: it is a bug otherwise - data = np.fromiter(a, descr) - return data, meta - - -#----- -# Misc -#----- -def basic_stats(data): - nbfac = data.size * 1. / (data.size - 1) - return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac - - -def print_attribute(name, tp, data): - type = tp[0] - if type == 'numeric' or type == 'real' or type == 'integer': - min, max, mean, std = basic_stats(data) - print "%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std) - else: - msg = name + ",{" - for i in range(len(tp[1])-1): - msg += tp[1][i] + "," - msg += tp[1][-1] - msg += "}" - print msg - - -def test_weka(filename): - data, meta = loadarff(filename) - print len(data.dtype) - print data.size - for i in meta: - print_attribute(i,meta[i],data[i]) - -# make sure nose does not find this as a test -test_weka.__test__ = False - - -def floupi(filename): - data, meta = loadarff(filename) - from attrselect import print_dataset_info - print_dataset_info(data) - print "relation %s, has %d instances" % (meta.name, data.size) - itp = iter(types) - for i in data.dtype.names: - print_attribute(i,itp.next(),data[i]) - #tp = itp.next() - #if tp == 'numeric' or tp == 'real' or tp == 'integer': - # min, max, mean, std = basic_stats(data[i]) - # print "\tinstance %s: min %f, max %f, mean %f, std %f" % \ - # (i, min, max, mean, std) - #else: - # print "\tinstance %s is non numeric" % i - - -if __name__ == '__main__': - #import glob - #for i in glob.glob('arff.bak/data/*'): - # relation, attributes = read_header(open(i)) - # print "Parsing header of %s: relation %s, %d attributes" % (i, - # relation, len(attributes)) - - import sys - filename = sys.argv[1] - #filename = 'arff.bak/data/pharynx.arff' - #floupi(filename) - test_weka(filename) - - #gf = [] - #wf = [] - #for i in glob.glob('arff.bak/data/*'): - # try: - # print "=============== reading %s ======================" % i - # floupi(i) - # gf.append(i) - # except ValueError, e: - # print "!!!! Error parsing the file !!!!!" - # print e - # wf.append(i) - # except IndexError, e: - # print "!!!! Error parsing the file !!!!!" - # print e - # wf.append(i) - # except ArffError, e: - # print "!!!! Error parsing the file !!!!!" - # print e - # wf.append(i) - - #print "%d good files" % len(gf) - #print "%d bad files" % len(wf) diff --git a/scipy-0.10.1/scipy/io/arff/myfunctools.py b/scipy-0.10.1/scipy/io/arff/myfunctools.py deleted file mode 100644 index 445ed9ab6c..0000000000 --- a/scipy-0.10.1/scipy/io/arff/myfunctools.py +++ /dev/null @@ -1,18 +0,0 @@ -# Last Change: Mon Aug 20 01:00 PM 2007 J -# Implement partial application (should only be used if functools is not -# available (eg python < 2.5) - -class partial: - def __init__(self, fun, *args, **kwargs): - self.fun = fun - self.pending = args[:] - self.kwargs = kwargs.copy() - - def __call__(self, *args, **kwargs): - if kwargs and self.kwargs: - kw = self.kwargs.copy() - kw.update(kwargs) - else: - kw = kwargs or self.kwargs - - return self.fun(*(self.pending + args), **kw) diff --git a/scipy-0.10.1/scipy/io/arff/setup.py b/scipy-0.10.1/scipy/io/arff/setup.py deleted file mode 100755 index 824108517e..0000000000 --- a/scipy-0.10.1/scipy/io/arff/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='io',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('arff', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/io/arff/tests/data/iris.arff b/scipy-0.10.1/scipy/io/arff/tests/data/iris.arff deleted file mode 100644 index 780480c7c6..0000000000 --- a/scipy-0.10.1/scipy/io/arff/tests/data/iris.arff +++ /dev/null @@ -1,225 +0,0 @@ -% 1. Title: Iris Plants Database -% -% 2. Sources: -% (a) Creator: R.A. Fisher -% (b) Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov) -% (c) Date: July, 1988 -% -% 3. Past Usage: -% - Publications: too many to mention!!! Here are a few. -% 1. Fisher,R.A. "The use of multiple measurements in taxonomic problems" -% Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions -% to Mathematical Statistics" (John Wiley, NY, 1950). -% 2. Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis. -% (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218. -% 3. Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System -% Structure and Classification Rule for Recognition in Partially Exposed -% Environments". IEEE Transactions on Pattern Analysis and Machine -% Intelligence, Vol. PAMI-2, No. 1, 67-71. -% -- Results: -% -- very low misclassification rates (0% for the setosa class) -% 4. Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE -% Transactions on Information Theory, May 1972, 431-433. -% -- Results: -% -- very low misclassification rates again -% 5. See also: 1988 MLC Proceedings, 54-64. Cheeseman et al's AUTOCLASS II -% conceptual clustering system finds 3 classes in the data. -% -% 4. Relevant Information: -% --- This is perhaps the best known database to be found in the pattern -% recognition literature. Fisher's paper is a classic in the field -% and is referenced frequently to this day. (See Duda & Hart, for -% example.) The data set contains 3 classes of 50 instances each, -% where each class refers to a type of iris plant. One class is -% linearly separable from the other 2; the latter are NOT linearly -% separable from each other. -% --- Predicted attribute: class of iris plant. -% --- This is an exceedingly simple domain. -% -% 5. Number of Instances: 150 (50 in each of three classes) -% -% 6. Number of Attributes: 4 numeric, predictive attributes and the class -% -% 7. Attribute Information: -% 1. sepal length in cm -% 2. sepal width in cm -% 3. petal length in cm -% 4. petal width in cm -% 5. class: -% -- Iris Setosa -% -- Iris Versicolour -% -- Iris Virginica -% -% 8. Missing Attribute Values: None -% -% Summary Statistics: -% Min Max Mean SD Class Correlation -% sepal length: 4.3 7.9 5.84 0.83 0.7826 -% sepal width: 2.0 4.4 3.05 0.43 -0.4194 -% petal length: 1.0 6.9 3.76 1.76 0.9490 (high!) -% petal width: 0.1 2.5 1.20 0.76 0.9565 (high!) -% -% 9. Class Distribution: 33.3% for each of 3 classes. - -@RELATION iris - -@ATTRIBUTE sepallength REAL -@ATTRIBUTE sepalwidth REAL -@ATTRIBUTE petallength REAL -@ATTRIBUTE petalwidth REAL -@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica} - -@DATA -5.1,3.5,1.4,0.2,Iris-setosa -4.9,3.0,1.4,0.2,Iris-setosa -4.7,3.2,1.3,0.2,Iris-setosa -4.6,3.1,1.5,0.2,Iris-setosa -5.0,3.6,1.4,0.2,Iris-setosa -5.4,3.9,1.7,0.4,Iris-setosa -4.6,3.4,1.4,0.3,Iris-setosa -5.0,3.4,1.5,0.2,Iris-setosa -4.4,2.9,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.4,3.7,1.5,0.2,Iris-setosa -4.8,3.4,1.6,0.2,Iris-setosa -4.8,3.0,1.4,0.1,Iris-setosa -4.3,3.0,1.1,0.1,Iris-setosa -5.8,4.0,1.2,0.2,Iris-setosa -5.7,4.4,1.5,0.4,Iris-setosa -5.4,3.9,1.3,0.4,Iris-setosa -5.1,3.5,1.4,0.3,Iris-setosa -5.7,3.8,1.7,0.3,Iris-setosa -5.1,3.8,1.5,0.3,Iris-setosa -5.4,3.4,1.7,0.2,Iris-setosa -5.1,3.7,1.5,0.4,Iris-setosa -4.6,3.6,1.0,0.2,Iris-setosa -5.1,3.3,1.7,0.5,Iris-setosa -4.8,3.4,1.9,0.2,Iris-setosa -5.0,3.0,1.6,0.2,Iris-setosa -5.0,3.4,1.6,0.4,Iris-setosa -5.2,3.5,1.5,0.2,Iris-setosa -5.2,3.4,1.4,0.2,Iris-setosa -4.7,3.2,1.6,0.2,Iris-setosa -4.8,3.1,1.6,0.2,Iris-setosa -5.4,3.4,1.5,0.4,Iris-setosa -5.2,4.1,1.5,0.1,Iris-setosa -5.5,4.2,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.0,3.2,1.2,0.2,Iris-setosa -5.5,3.5,1.3,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -4.4,3.0,1.3,0.2,Iris-setosa -5.1,3.4,1.5,0.2,Iris-setosa -5.0,3.5,1.3,0.3,Iris-setosa -4.5,2.3,1.3,0.3,Iris-setosa -4.4,3.2,1.3,0.2,Iris-setosa -5.0,3.5,1.6,0.6,Iris-setosa -5.1,3.8,1.9,0.4,Iris-setosa -4.8,3.0,1.4,0.3,Iris-setosa -5.1,3.8,1.6,0.2,Iris-setosa -4.6,3.2,1.4,0.2,Iris-setosa -5.3,3.7,1.5,0.2,Iris-setosa -5.0,3.3,1.4,0.2,Iris-setosa -7.0,3.2,4.7,1.4,Iris-versicolor -6.4,3.2,4.5,1.5,Iris-versicolor -6.9,3.1,4.9,1.5,Iris-versicolor -5.5,2.3,4.0,1.3,Iris-versicolor -6.5,2.8,4.6,1.5,Iris-versicolor -5.7,2.8,4.5,1.3,Iris-versicolor -6.3,3.3,4.7,1.6,Iris-versicolor -4.9,2.4,3.3,1.0,Iris-versicolor -6.6,2.9,4.6,1.3,Iris-versicolor -5.2,2.7,3.9,1.4,Iris-versicolor -5.0,2.0,3.5,1.0,Iris-versicolor -5.9,3.0,4.2,1.5,Iris-versicolor -6.0,2.2,4.0,1.0,Iris-versicolor -6.1,2.9,4.7,1.4,Iris-versicolor -5.6,2.9,3.6,1.3,Iris-versicolor -6.7,3.1,4.4,1.4,Iris-versicolor -5.6,3.0,4.5,1.5,Iris-versicolor -5.8,2.7,4.1,1.0,Iris-versicolor -6.2,2.2,4.5,1.5,Iris-versicolor -5.6,2.5,3.9,1.1,Iris-versicolor -5.9,3.2,4.8,1.8,Iris-versicolor -6.1,2.8,4.0,1.3,Iris-versicolor -6.3,2.5,4.9,1.5,Iris-versicolor -6.1,2.8,4.7,1.2,Iris-versicolor -6.4,2.9,4.3,1.3,Iris-versicolor -6.6,3.0,4.4,1.4,Iris-versicolor -6.8,2.8,4.8,1.4,Iris-versicolor -6.7,3.0,5.0,1.7,Iris-versicolor -6.0,2.9,4.5,1.5,Iris-versicolor -5.7,2.6,3.5,1.0,Iris-versicolor -5.5,2.4,3.8,1.1,Iris-versicolor -5.5,2.4,3.7,1.0,Iris-versicolor -5.8,2.7,3.9,1.2,Iris-versicolor -6.0,2.7,5.1,1.6,Iris-versicolor -5.4,3.0,4.5,1.5,Iris-versicolor -6.0,3.4,4.5,1.6,Iris-versicolor -6.7,3.1,4.7,1.5,Iris-versicolor -6.3,2.3,4.4,1.3,Iris-versicolor -5.6,3.0,4.1,1.3,Iris-versicolor -5.5,2.5,4.0,1.3,Iris-versicolor -5.5,2.6,4.4,1.2,Iris-versicolor -6.1,3.0,4.6,1.4,Iris-versicolor -5.8,2.6,4.0,1.2,Iris-versicolor -5.0,2.3,3.3,1.0,Iris-versicolor -5.6,2.7,4.2,1.3,Iris-versicolor -5.7,3.0,4.2,1.2,Iris-versicolor -5.7,2.9,4.2,1.3,Iris-versicolor -6.2,2.9,4.3,1.3,Iris-versicolor -5.1,2.5,3.0,1.1,Iris-versicolor -5.7,2.8,4.1,1.3,Iris-versicolor -6.3,3.3,6.0,2.5,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -7.1,3.0,5.9,2.1,Iris-virginica -6.3,2.9,5.6,1.8,Iris-virginica -6.5,3.0,5.8,2.2,Iris-virginica -7.6,3.0,6.6,2.1,Iris-virginica -4.9,2.5,4.5,1.7,Iris-virginica -7.3,2.9,6.3,1.8,Iris-virginica -6.7,2.5,5.8,1.8,Iris-virginica -7.2,3.6,6.1,2.5,Iris-virginica -6.5,3.2,5.1,2.0,Iris-virginica -6.4,2.7,5.3,1.9,Iris-virginica -6.8,3.0,5.5,2.1,Iris-virginica -5.7,2.5,5.0,2.0,Iris-virginica -5.8,2.8,5.1,2.4,Iris-virginica -6.4,3.2,5.3,2.3,Iris-virginica -6.5,3.0,5.5,1.8,Iris-virginica -7.7,3.8,6.7,2.2,Iris-virginica -7.7,2.6,6.9,2.3,Iris-virginica -6.0,2.2,5.0,1.5,Iris-virginica -6.9,3.2,5.7,2.3,Iris-virginica -5.6,2.8,4.9,2.0,Iris-virginica -7.7,2.8,6.7,2.0,Iris-virginica -6.3,2.7,4.9,1.8,Iris-virginica -6.7,3.3,5.7,2.1,Iris-virginica -7.2,3.2,6.0,1.8,Iris-virginica -6.2,2.8,4.8,1.8,Iris-virginica -6.1,3.0,4.9,1.8,Iris-virginica -6.4,2.8,5.6,2.1,Iris-virginica -7.2,3.0,5.8,1.6,Iris-virginica -7.4,2.8,6.1,1.9,Iris-virginica -7.9,3.8,6.4,2.0,Iris-virginica -6.4,2.8,5.6,2.2,Iris-virginica -6.3,2.8,5.1,1.5,Iris-virginica -6.1,2.6,5.6,1.4,Iris-virginica -7.7,3.0,6.1,2.3,Iris-virginica -6.3,3.4,5.6,2.4,Iris-virginica -6.4,3.1,5.5,1.8,Iris-virginica -6.0,3.0,4.8,1.8,Iris-virginica -6.9,3.1,5.4,2.1,Iris-virginica -6.7,3.1,5.6,2.4,Iris-virginica -6.9,3.1,5.1,2.3,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -6.8,3.2,5.9,2.3,Iris-virginica -6.7,3.3,5.7,2.5,Iris-virginica -6.7,3.0,5.2,2.3,Iris-virginica -6.3,2.5,5.0,1.9,Iris-virginica -6.5,3.0,5.2,2.0,Iris-virginica -6.2,3.4,5.4,2.3,Iris-virginica -5.9,3.0,5.1,1.8,Iris-virginica -% -% -% diff --git a/scipy-0.10.1/scipy/io/arff/tests/data/missing.arff b/scipy-0.10.1/scipy/io/arff/tests/data/missing.arff deleted file mode 100644 index dedc64c8fa..0000000000 --- a/scipy-0.10.1/scipy/io/arff/tests/data/missing.arff +++ /dev/null @@ -1,8 +0,0 @@ -% This arff file contains some missing data -@relation missing -@attribute yop real -@attribute yap real -@data -1,5 -2,4 -?,? diff --git a/scipy-0.10.1/scipy/io/arff/tests/data/test1.arff b/scipy-0.10.1/scipy/io/arff/tests/data/test1.arff deleted file mode 100644 index ccc8e0cc7c..0000000000 --- a/scipy-0.10.1/scipy/io/arff/tests/data/test1.arff +++ /dev/null @@ -1,10 +0,0 @@ -@RELATION test1 - -@ATTRIBUTE attr0 REAL -@ATTRIBUTE attr1 REAL -@ATTRIBUTE attr2 REAL -@ATTRIBUTE attr3 REAL -@ATTRIBUTE class {class0, class1, class2, class3} - -@DATA -0.1, 0.2, 0.3, 0.4,class1 diff --git a/scipy-0.10.1/scipy/io/arff/tests/data/test2.arff b/scipy-0.10.1/scipy/io/arff/tests/data/test2.arff deleted file mode 100644 index 30f0dbf91b..0000000000 --- a/scipy-0.10.1/scipy/io/arff/tests/data/test2.arff +++ /dev/null @@ -1,15 +0,0 @@ -@RELATION test2 - -@ATTRIBUTE attr0 REAL -@ATTRIBUTE attr1 real -@ATTRIBUTE attr2 integer -@ATTRIBUTE attr3 Integer -@ATTRIBUTE attr4 Numeric -@ATTRIBUTE attr5 numeric -@ATTRIBUTE attr6 string -@ATTRIBUTE attr7 STRING -@ATTRIBUTE attr8 {bla} -@ATTRIBUTE attr9 {bla, bla} - -@DATA -0.1, 0.2, 0.3, 0.4,class1 diff --git a/scipy-0.10.1/scipy/io/arff/tests/data/test3.arff b/scipy-0.10.1/scipy/io/arff/tests/data/test3.arff deleted file mode 100644 index 23da3b3096..0000000000 --- a/scipy-0.10.1/scipy/io/arff/tests/data/test3.arff +++ /dev/null @@ -1,6 +0,0 @@ -@RELATION test3 - -@ATTRIBUTE attr0 crap - -@DATA -0.1, 0.2, 0.3, 0.4,class1 diff --git a/scipy-0.10.1/scipy/io/arff/tests/data/test4.arff b/scipy-0.10.1/scipy/io/arff/tests/data/test4.arff deleted file mode 100644 index c56fd930b9..0000000000 --- a/scipy-0.10.1/scipy/io/arff/tests/data/test4.arff +++ /dev/null @@ -1,14 +0,0 @@ -@RELATION test5 - -@ATTRIBUTE attr0 REAL -@ATTRIBUTE attr1 REAL -@ATTRIBUTE attr2 REAL -@ATTRIBUTE attr3 REAL -@ATTRIBUTE class {class0, class1, class2, class3} - -@DATA -0.1, 0.2, 0.3, 0.4,class1 -% laksjdhf --0.1, -0.2, -0.3, -0.4,class2 -% lsdflkjhaksjdhf -1, 2, 3, 4,class3 diff --git a/scipy-0.10.1/scipy/io/arff/tests/data/test5.arff b/scipy-0.10.1/scipy/io/arff/tests/data/test5.arff deleted file mode 100644 index 41816356aa..0000000000 --- a/scipy-0.10.1/scipy/io/arff/tests/data/test5.arff +++ /dev/null @@ -1,12 +0,0 @@ -@RELATION test4 - -@ATTRIBUTE attr0 REAL -@ATTRIBUTE attr1 REAL -@ATTRIBUTE attr2 REAL -@ATTRIBUTE attr3 REAL -@ATTRIBUTE class {class0, class1, class2, class3} - -@DATA -0.1, 0.2, 0.3, 0.4,class1 --0.1, -0.2, -0.3, -0.4,class2 -1, 2, 3, 4,class3 diff --git a/scipy-0.10.1/scipy/io/arff/tests/test_arffread.py b/scipy-0.10.1/scipy/io/arff/tests/test_arffread.py deleted file mode 100644 index b9555c8b4d..0000000000 --- a/scipy-0.10.1/scipy/io/arff/tests/test_arffread.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python -import os -from os.path import join as pjoin -from cStringIO import StringIO - -import numpy as np - -from numpy.testing import TestCase, assert_array_almost_equal, assert_equal, \ - assert_, assert_raises - -from scipy.io.arff.arffread import loadarff -from scipy.io.arff.arffread import read_header, parse_type, ParseArffError - -data_path = pjoin(os.path.dirname(__file__), 'data') - -test1 = os.path.join(data_path, 'test1.arff') -test2 = os.path.join(data_path, 'test2.arff') -test3 = os.path.join(data_path, 'test3.arff') - -test4 = pjoin(data_path, 'test4.arff') -test5 = pjoin(data_path, 'test5.arff') -expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'), - (-0.1, -0.2, -0.3, -0.4, 'class2'), - (1, 2, 3, 4, 'class3')] -expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal'] - -missing = pjoin(data_path, 'missing.arff') -expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) -expect_missing = np.empty(3, [('yop', np.float), ('yap', np.float)]) -expect_missing['yop'] = expect_missing_raw[:, 0] -expect_missing['yap'] = expect_missing_raw[:, 1] - -class DataTest(TestCase): - def test1(self): - """Parsing trivial file with nothing.""" - self._test(test4) - - def test2(self): - """Parsing trivial file with some comments in the data section.""" - self._test(test5) - - def _test(self, test_file): - data, meta = loadarff(test_file) - for i in range(len(data)): - for j in range(4): - assert_array_almost_equal(expect4_data[i][j], data[i][j]) - assert_equal(meta.types(), expected_types) - - def test_filelike(self): - """Test reading from file-like object (StringIO)""" - f1 = open(test1) - f2 = StringIO(open(test1).read()) - data1, meta1 = loadarff(f1) - data2, meta2 = loadarff(f2) - assert_(data1 == data2) - assert_(repr(meta1) == repr(meta2)) - - -class MissingDataTest(TestCase): - def test_missing(self): - data, meta = loadarff(missing) - for i in ['yop', 'yap']: - assert_array_almost_equal(data[i], expect_missing[i]) - -class HeaderTest(TestCase): - def test_type_parsing(self): - """Test parsing type of attribute from their value.""" - ofile = open(test2) - rel, attrs = read_header(ofile) - - expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric', - 'numeric', 'string', 'string', 'nominal', 'nominal'] - - for i in range(len(attrs)): - assert_(parse_type(attrs[i][1]) == expected[i]) - - def test_badtype_parsing(self): - """Test parsing wrong type of attribute from their value.""" - ofile = open(test3) - rel, attrs = read_header(ofile) - - for name, value in attrs: - assert_raises(ParseArffError, parse_type, value) - - def test_fullheader1(self): - """Parsing trivial header with nothing.""" - ofile = open(test1) - rel, attrs = read_header(ofile) - - # Test relation - assert_(rel == 'test1') - - # Test numerical attributes - assert_(len(attrs) == 5) - for i in range(4): - assert_(attrs[i][0] == 'attr%d' % i) - assert_(attrs[i][1] == 'REAL') - classes = attrs[4][1] - - # Test nominal attribute - assert_(attrs[4][0] == 'class') - assert_(attrs[4][1] == '{class0, class1, class2, class3}') - -if __name__ == "__main__": - import nose - nose.run(argv=['', __file__]) diff --git a/scipy-0.10.1/scipy/io/arff/utils.py b/scipy-0.10.1/scipy/io/arff/utils.py deleted file mode 100644 index 3c7f4418c0..0000000000 --- a/scipy-0.10.1/scipy/io/arff/utils.py +++ /dev/null @@ -1,7 +0,0 @@ -#! /usr/bin/env python -# Last Change: Mon Aug 20 02:00 PM 2007 J - -try: - from functools import partial -except ImportError: - from myfunctools import partial diff --git a/scipy-0.10.1/scipy/io/bento.info b/scipy-0.10.1/scipy/io/bento.info deleted file mode 100644 index d0d8b5f1f9..0000000000 --- a/scipy-0.10.1/scipy/io/bento.info +++ /dev/null @@ -1,4 +0,0 @@ -Recurse: matlab - -Library: - Packages: arff, matlab diff --git a/scipy-0.10.1/scipy/io/data_store.py b/scipy-0.10.1/scipy/io/data_store.py deleted file mode 100644 index 538541a7f7..0000000000 --- a/scipy-0.10.1/scipy/io/data_store.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -Load or save values to a file. - -Shelves work well for storing data, but they are slow to access -repeatedly - especially for large data sets. This module allows -you to store data to a file and then load it back into the workspace. -When the data is stored, a python module is also created as the -"namespace for the data" - - -Examples --------- - -Saving the data to a data store: - ->>> import scipy.io ->>> import os ->>> a = 1 ->>> scipy.io.save_as_module('junker', {'a':a}) - -Loading the data saved to a data store in the same directory: - ->>> import junker ->>> print junker.a -1 - -""" - -__all__ = ['save_as_module'] - -import dumb_shelve -import os - - -def _create_module(file_name): - """ Create the module file. - """ - if not os.path.exists(file_name+'.py'): # don't clobber existing files - module_name = os.path.split(file_name)[-1] - f = open(file_name+'.py','w') - f.write('import scipy.io.data_store as data_store\n') - f.write('import %s\n' % module_name) - f.write('data_store._load(%s)' % module_name) - f.close() - - -def _create_shelf(file_name,data): - """Use this to write the data to a new file - """ - shelf_name = file_name.split('.')[0] - f = dumb_shelve.open(shelf_name,'w') - for i in data.keys(): -# print 'saving...',i - f[i] = data[i] -# print 'done' - f.close() - - -def save_as_module(file_name=None,data=None): - """ - Save the dictionary "data" into a module and shelf named save. - - Parameters - ---------- - file_name : str, optional - File name of the module to save. - data : dict, optional - The dictionary to store in the module. - - """ - _create_module(file_name) - _create_shelf(file_name,data) diff --git a/scipy-0.10.1/scipy/io/dumb_shelve.py b/scipy-0.10.1/scipy/io/dumb_shelve.py deleted file mode 100644 index e22e02ca6b..0000000000 --- a/scipy-0.10.1/scipy/io/dumb_shelve.py +++ /dev/null @@ -1,47 +0,0 @@ -from shelve import Shelf -try: - import zlib -except ImportError: - # Some python installations don't have zlib. - pass - -import cPickle - -class DbfilenameShelf(Shelf): - """Shelf implementation using the "anydbm" generic dbm interface. - - This is initialized with the filename for the dbm database. - See the module's __doc__ string for an overview of the interface. - """ - - def __init__(self, filename, flag='c'): - import dumbdbm_patched - Shelf.__init__(self, dumbdbm_patched.open(filename, flag)) - - def __getitem__(self, key): - compressed = self.dict[key] - try: - r = zlib.decompress(compressed) - except zlib.error: - r = compressed - except NameError: - r = compressed - - return cPickle.loads(r) - - def __setitem__(self, key, value): - s = cPickle.dumps(value,1) - try: - self.dict[key] = zlib.compress(s) - except NameError: - #zlib doesn't exist, leave it uncompressed. - self.dict[key] = s - -def open(filename, flag='c'): - """Open a persistent dictionary for reading and writing. - - Argument is the filename for the dbm database. - See the module's __doc__ string for an overview of the interface. - """ - - return DbfilenameShelf(filename, flag) diff --git a/scipy-0.10.1/scipy/io/dumbdbm_patched.py b/scipy-0.10.1/scipy/io/dumbdbm_patched.py deleted file mode 100644 index 5cc440ee5a..0000000000 --- a/scipy-0.10.1/scipy/io/dumbdbm_patched.py +++ /dev/null @@ -1,149 +0,0 @@ -"""A dumb and slow but simple dbm clone. - -For database spam, spam.dir contains the index (a text file), -spam.bak *may* contain a backup of the index (also a text file), -while spam.dat contains the data (a binary file). - -XXX TO DO: - -- seems to contain a bug when updating... - -- reclaim free space (currently, space once occupied by deleted or expanded -items is never reused) - -- support concurrent access (currently, if two processes take turns making -updates, they can mess up the index) - -- support efficient access to large databases (currently, the whole index -is read when the database is opened, and some updates rewrite the whole index) - -- support opening for read-only (flag = 'm') - -""" - -_os = __import__('os') -import __builtin__ - -_open = __builtin__.open - -_BLOCKSIZE = 512 - -error = IOError # For anydbm - -class _Database(object): - - def __init__(self, file): - self._dirfile = file + '.dir' - self._datfile = file + '.dat' - self._bakfile = file + '.bak' - # Mod by Jack: create data file if needed - try: - f = _open(self._datfile, 'r') - except IOError: - f = _open(self._datfile, 'w') - f.close() - self._update() - - def _update(self): - import string - self._index = {} - try: - f = _open(self._dirfile) - except IOError: - pass - else: - while 1: - line = string.rstrip(f.readline()) - if not line: break - key, (pos, siz) = eval(line) - self._index[key] = (pos, siz) - f.close() - - def _commit(self): - try: _os.unlink(self._bakfile) - except _os.error: pass - try: _os.rename(self._dirfile, self._bakfile) - except _os.error: pass - f = _open(self._dirfile, 'w') - for key, (pos, siz) in self._index.items(): - f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`)) - f.close() - - def __getitem__(self, key): - pos, siz = self._index[key] # may raise KeyError - f = _open(self._datfile, 'rb') - f.seek(pos) - dat = f.read(siz) - f.close() - return dat - - def __contains__(self, key): - return key in self._index - - def _addval(self, val): - f = _open(self._datfile, 'rb+') - f.seek(0, 2) - pos = f.tell() -## Does not work under MW compiler -## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE -## f.seek(pos) - npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE - f.write('\0'*(npos-pos)) - pos = npos - - f.write(val) - f.close() - return (pos, len(val)) - - def _setval(self, pos, val): - f = _open(self._datfile, 'rb+') - f.seek(pos) - f.write(val) - f.close() - return (pos, len(val)) - - def _addkey(self, key, (pos, siz)): - self._index[key] = (pos, siz) - f = _open(self._dirfile, 'a') - f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`)) - f.close() - - def __setitem__(self, key, val): - if not isinstance(key, str) or not isinstance(val, str): - raise TypeError("keys and values must be strings") - if not self._index.has_key(key): - (pos, siz) = self._addval(val) - self._addkey(key, (pos, siz)) - else: - pos, siz = self._index[key] - oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE - newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE - if newblocks <= oldblocks: - pos, siz = self._setval(pos, val) - self._index[key] = pos, siz - else: - pos, siz = self._addval(val) - self._index[key] = pos, siz - self._addkey(key, (pos, siz)) - - def __delitem__(self, key): - del self._index[key] - self._commit() - - def keys(self): - return self._index.keys() - - def has_key(self, key): - return self._index.has_key(key) - - def __len__(self): - return len(self._index) - - def close(self): - self._index = None - self._datfile = self._dirfile = self._bakfile = None - - -def open(file, flag = None, mode = None): - # flag, mode arguments are currently ignored - return _Database(file) diff --git a/scipy-0.10.1/scipy/io/harwell_boeing/__init__.py b/scipy-0.10.1/scipy/io/harwell_boeing/__init__.py deleted file mode 100644 index ba4641bc3f..0000000000 --- a/scipy-0.10.1/scipy/io/harwell_boeing/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from scipy.io.harwell_boeing.hb import MalformedHeader, HBInfo, HBFile, \ - HBMatrixType, hb_read, hb_write - diff --git a/scipy-0.10.1/scipy/io/harwell_boeing/_fortran_format_parser.py b/scipy-0.10.1/scipy/io/harwell_boeing/_fortran_format_parser.py deleted file mode 100644 index d5dbe7ac06..0000000000 --- a/scipy-0.10.1/scipy/io/harwell_boeing/_fortran_format_parser.py +++ /dev/null @@ -1,312 +0,0 @@ -""" -Preliminary module to handle fortran formats for IO. Does not use this outside -scipy.sparse io for now, until the API is deemed reasonable. - -The *Format classes handle conversion between fortran and python format, and -FortranFormatParser can create *Format instances from raw fortran format -strings (e.g. '(3I4)', '(10I3)', etc...) -""" - -import re -import warnings - -import numpy as np - - -__all__ = ["BadFortranFormat", "FortranFormatParser", "IntFormat", "ExpFormat"] - - -TOKENS = { - "LPAR": r"\(", - "RPAR": r"\)", - "INT_ID": r"I", - "EXP_ID": r"E", - "INT": r"\d+", - "DOT": r"\.", -} - - -class BadFortranFormat(SyntaxError): - pass - - -def number_digits(n): - return int(np.floor(np.log10(np.abs(n))) + 1) - - -class IntFormat(object): - @classmethod - def from_number(cls, n, min=None): - """Given an integer, returns a "reasonable" IntFormat instance to represent - any number between 0 and n if n > 0, -n and n if n < 0 - - Parameters - ---------- - n: int - max number one wants to be able to represent - min: int - minimum number of characters to use for the format - - Returns - ------- - res: IntFormat - IntFormat instance with reasonable (see Note) computed width - - Note - ---- - Reasonable should be understood as the minimal string length necessary - without losing precision. For example, IntFormat.from_number(1) will - return an IntFormat instance of width 2, so that any 0 and 1 may be - represented as 1-character strings without loss of information.""" - width = number_digits(n) + 1 - if n < 0: - width += 1 - repeat = 80 // width - return cls(width, min, repeat=repeat) - - def __init__(self, width, min=None, repeat=None): - self.width = width - self.repeat = repeat - self.min = min - - def __repr__(self): - r = "IntFormat(" - if self.repeat: - r += "%d" % self.repeat - r += "I%d" % self.width - if self.min: - r += ".%d" % self.min - return r + ")" - - @property - def fortran_format(self): - r = "(" - if self.repeat: - r += "%d" % self.repeat - r += "I%d" % self.width - if self.min: - r += ".%d" % self.min - return r + ")" - - @property - def python_format(self): - return "%" + str(self.width) + "d" - - -class ExpFormat(object): - @classmethod - def from_number(cls, n, min=None): - """Given a float number, returns a "reasonable" ExpFormat instance to - represent any number between -n and n. - - Parameters - ---------- - n: float - max number one wants to be able to represent - min: int - minimum number of characters to use for the format - - Returns - ------- - res: ExpFormat - ExpFormat instance with reasonable (see Note) computed width - - Note - ---- - Reasonable should be understood as the minimal string length necessary - to avoid losing precision.""" - # len of one number in exp format: sign + 1|0 + "." + - # number of digit for fractional part + 'E' + sign of exponent + - # len of exponent - finfo = np.finfo(n.dtype) - # Number of digits for fractional part - n_prec = finfo.precision + 1 - # Number of digits for exponential part - n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp]))) - width = 1 + 1 + n_prec + 1 + n_exp + 1 - if n < 0: - width += 1 - repeat = int(np.floor(80 / width)) - return cls(width, n_prec, min, repeat=repeat) - - def __init__(self, width, significand, min=None, repeat=None): - """\ - Parameters - ---------- - width: int - number of characters taken by the string (includes space). - """ - self.width = width - self.significand = significand - self.repeat = repeat - self.min = min - - def __repr__(self): - r = "ExpFormat(" - if self.repeat: - r += "%d" % self.repeat - r += "E%d.%d" % (self.width, self.significand) - if self.min: - r += "E%d" % self.min - return r + ")" - - @property - def fortran_format(self): - r = "(" - if self.repeat: - r += "%d" % self.repeat - r += "E%d.%d" % (self.width, self.significand) - if self.min: - r += "E%d" % self.min - return r + ")" - - @property - def python_format(self): - return "%" + str(self.width-1) + "." + str(self.significand) + "E" - - -class Token(object): - def __init__(self, type, value, pos): - self.type = type - self.value = value - self.pos = pos - - def __str__(self): - return """Token('%s', "%s")""" % (self.type, self.value) - - def __repr__(self): - return self.__str__() - - -class Tokenizer(object): - def __init__(self): - self.tokens = TOKENS.keys() - self.res = [re.compile(TOKENS[i]) for i in self.tokens] - - def input(self, s): - self.data = s - self.curpos = 0 - self.len = len(s) - - def next_token(self): - curpos = self.curpos - tokens = self.tokens - - while curpos < self.len: - for i, r in enumerate(self.res): - m = r.match(self.data, curpos) - if m is None: - continue - else: - self.curpos = m.end() - return Token(self.tokens[i], m.group(), self.curpos) - else: - raise SyntaxError("Unknown character at position %d (%s)" \ - % (self.curpos, self.data[curpos])) - - -# Grammar for fortran format: -# format : LPAR format_string RPAR -# format_string : repeated | simple -# repeated : repeat simple -# simple : int_fmt | exp_fmt -# int_fmt : INT_ID width -# exp_fmt : simple_exp_fmt -# simple_exp_fmt : EXP_ID width DOT significand -# extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits -# repeat : INT -# width : INT -# significand : INT -# ndigits : INT - -# Naive fortran formatter - parser is hand-made -class FortranFormatParser(object): - """Parser for fortran format strings. The parse method returns a *Format - instance. - - Note - ---- - Only ExpFormat (exponential format for floating values) and IntFormat - (integer format) for now. - """ - def __init__(self): - self.tokenizer = Tokenizer() - - def parse(self, s): - self.tokenizer.input(s) - - tokens = [] - - try: - while True: - t = self.tokenizer.next_token() - if t is None: - break - else: - tokens.append(t) - return self._parse_format(tokens) - except SyntaxError, e: - raise BadFortranFormat(str(e)) - - def _get_min(self, tokens): - next = tokens.pop(0) - if not next.type == "DOT": - raise SyntaxError() - next = tokens.pop(0) - return next.value - - def _expect(self, token, tp): - if not token.type == tp: - raise SyntaxError() - - def _parse_format(self, tokens): - if not tokens[0].type == "LPAR": - raise SyntaxError("Expected left parenthesis at position "\ - "%d (got '%s')" % (0, tokens[0].value)) - elif not tokens[-1].type == "RPAR": - raise SyntaxError("Expected right parenthesis at position "\ - "%d (got '%s')" % (len(tokens), tokens[-1].value)) - - tokens = tokens[1:-1] - types = [t.type for t in tokens] - if types[0] == "INT": - repeat = int(tokens.pop(0).value) - else: - repeat = None - - next = tokens.pop(0) - if next.type == "INT_ID": - next = self._next(tokens, "INT") - width = int(next.value) - if tokens: - min = int(self._get_min(tokens)) - else: - min = None - return IntFormat(width, min, repeat) - elif next.type == "EXP_ID": - next = self._next(tokens, "INT") - width = int(next.value) - - next = self._next(tokens, "DOT") - - next = self._next(tokens, "INT") - significand = int(next.value) - - if tokens: - next = self._next(tokens, "EXP_ID") - - next = self._next(tokens, "INT") - min = int(next.value) - else: - min = None - return ExpFormat(width, significand, min, repeat) - else: - raise SyntaxError("Invalid formater type %s" % next.value) - - def _next(self, tokens, tp): - if not len(tokens) > 0: - raise SyntaxError() - next = tokens.pop(0) - self._expect(next, tp) - return next - diff --git a/scipy-0.10.1/scipy/io/harwell_boeing/hb.py b/scipy-0.10.1/scipy/io/harwell_boeing/hb.py deleted file mode 100644 index 33d20afdf2..0000000000 --- a/scipy-0.10.1/scipy/io/harwell_boeing/hb.py +++ /dev/null @@ -1,548 +0,0 @@ -""" -Implementation of Harwell-Boeing read/write. - -At the moment not the full Harwell-Boeing format is supported. Supported -features are: - - - assembled, non-symmetric, real matrices - - integer for pointer/indices - - exponential format for float values, and int format - -""" - -# TODO: -# - Add more support (symmetric/complex matrices, non-assembled matrices ?) - -# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but -# takes a lot of memory. Being faster would require compiled code. -# write is not efficient. Although not a terribly exciting task, -# having reusable facilities to efficiently read/write fortran-formatted files -# would be useful outside this module. - -import warnings - -import numpy as np -from scipy.sparse import csc_matrix -from scipy.io.harwell_boeing._fortran_format_parser import \ - FortranFormatParser, IntFormat, ExpFormat - - -__all__ = ["MalformedHeader", "read_hb", "write", "HBInfo", "HBFile", - "HBMatrixType"] - - -class MalformedHeader(Exception): - pass - -class LineOverflow(Warning): - pass - - -def _nbytes_full(fmt, nlines): - """Return the number of bytes to read to get every full lines for the - given parsed fortran format.""" - return (fmt.repeat * fmt.width + 1) * (nlines - 1) - - -class HBInfo(object): - @classmethod - def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None): - """Create a HBInfo instance from an existing sparse matrix. - - Parameters - ---------- - m: sparse matrix - the HBInfo instance will derive its parameters from m - title: str - Title to put in the HB header - key: str - Key - mxtype: HBMatrixType - type of the input matrix - fmt: dict - not implemented - - Returns - ------- - hb_info: HBInfo instance - """ - pointer = m.indptr - indices = m.indices - values = m.data - - nrows, ncols = m.shape - nnon_zeros = m.nnz - - if fmt is None: - # +1 because HB use one-based indexing (Fortran), and we will write - # the indices /pointer as such - pointer_fmt = IntFormat.from_number(np.max(pointer+1)) - indices_fmt = IntFormat.from_number(np.max(indices+1)) - - if values.dtype.kind in np.typecodes["AllFloat"]: - values_fmt = ExpFormat.from_number(-np.max(np.abs(values))) - elif values.dtype.kind in np.typecodes["AllInteger"]: - values_fmt = IntFormat.from_number(-np.max(np.abs(values))) - else: - raise NotImplementedError("type %s not implemented yet" % values.dtype.kind) - else: - raise NotImplementedError("fmt argument not supported yet.") - - if mxtype is None: - if not np.isrealobj(values): - raise ValueError("Complex values not supported yet") - if values.dtype.kind in np.typecodes["AllInteger"]: - tp = "integer" - elif values.dtype.kind in np.typecodes["AllFloat"]: - tp = "real" - else: - raise NotImplementedError("type %s for values not implemented" \ - % values.dtype) - mxtype = HBMatrixType(tp, "unsymmetric", "assembled") - else: - raise ValueError("mxtype argument not handled yet.") - - def _nlines(fmt, size): - nlines = size // fmt.repeat - if nlines * fmt.repeat != size: - nlines += 1 - return nlines - - pointer_nlines = _nlines(pointer_fmt, pointer.size) - indices_nlines = _nlines(indices_fmt, indices.size) - values_nlines = _nlines(values_fmt, values.size) - - total_nlines = pointer_nlines + indices_nlines + values_nlines - - return cls(title, key, - total_nlines, pointer_nlines, indices_nlines, values_nlines, - mxtype, nrows, ncols, nnon_zeros, - pointer_fmt.fortran_format, indices_fmt.fortran_format, - values_fmt.fortran_format) - - @classmethod - def from_file(cls, fid): - """Create a HBInfo instance from a file object containg a matrix in the - HB format. - - Parameters - ---------- - fid: file-like matrix - File or file-like object containing a matrix in the HB format. - - Returns - ------- - hb_info: HBInfo instance - """ - # First line - line = fid.readline().strip("\n") - if not len(line) > 72: - raise ValueError("Expected at least 72 characters for first line, " - "got: \n%s" % line) - title = line[:72] - key = line[72:] - - # Second line - line = fid.readline().strip("\n") - if not len(line.rstrip()) >= 56: - raise ValueError("Expected at least 56 characters for second line, " - "got: \n%s" % line) - total_nlines = _expect_int(line[:14]) - pointer_nlines = _expect_int(line[14:28]) - indices_nlines = _expect_int(line[28:42]) - values_nlines = _expect_int(line[42:56]) - - rhs_nlines = line[56:72].strip() - if rhs_nlines == '': - rhs_nlines = 0 - else: - rhs_nlines = _expect_int(rhs_nlines) - if not rhs_nlines == 0: - raise ValueError("Only files without right hand side supported for " \ - "now.") - - # Third line - line = fid.readline().strip("\n") - if not len(line) >= 70: - raise ValueError("Expected at least 72 character for third line, got:\n" - "%s" % line) - - mxtype_s = line[:3].upper() - if not len(mxtype_s) == 3: - raise ValueError("mxtype expected to be 3 characters long") - - mxtype = HBMatrixType.from_fortran(mxtype_s) - if not mxtype.value_type in ["real", "integer"]: - raise ValueError("Only real or integer matrices supported for " - "now (detected %s)" % mxtype) - if not mxtype.structure == "unsymmetric": - raise ValueError("Only unsymmetric matrices supported for " - "now (detected %s)" % mxtype) - if not mxtype.storage == "assembled": - raise ValueError("Only assembled matrices supported for now") - - if not line[3:14] == " " * 11: - raise ValueError("Malformed data for third line: %s" % line) - - nrows = _expect_int(line[14:28]) - ncols = _expect_int(line[28:42]) - nnon_zeros = _expect_int(line[42:56]) - nelementals = _expect_int(line[56:70]) - if not nelementals == 0: - raise ValueError("Unexpected value %d for nltvl (last entry of line 3)" - % nelementals) - - # Fourth line - line = fid.readline().strip("\n") - - ct = line.split() - if not len(ct) == 3: - raise ValueError("Expected 3 formats, got %s" % ct) - - return cls(title, key, - total_nlines, pointer_nlines, indices_nlines, values_nlines, - mxtype, nrows, ncols, nnon_zeros, - ct[0], ct[1], ct[2], - rhs_nlines, nelementals) - - def __init__(self, title, key, - total_nlines, pointer_nlines, indices_nlines, values_nlines, - mxtype, nrows, ncols, nnon_zeros, - pointer_format_str, indices_format_str, values_format_str, - right_hand_sides_nlines=0, nelementals=0): - """Do not use this directly, but the class ctrs (from_* functions).""" - self.title = title - self.key = key - if title is None: - title = "No Title" - if len(title) > 72: - raise ValueError("title cannot be > 72 characters") - - if key is None: - key = "|No Key" - if len(key) > 8: - warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow) - - - self.total_nlines = total_nlines - self.pointer_nlines = pointer_nlines - self.indices_nlines = indices_nlines - self.values_nlines = values_nlines - - parser = FortranFormatParser() - pointer_format = parser.parse(pointer_format_str) - if not isinstance(pointer_format, IntFormat): - raise ValueError("Expected int format for pointer format, got %s" - % pointer_format) - - indices_format = parser.parse(indices_format_str) - if not isinstance(indices_format, IntFormat): - raise ValueError("Expected int format for indices format, got %s" % - indices_format) - - values_format = parser.parse(values_format_str) - if isinstance(values_format, ExpFormat): - if not mxtype.value_type in ["real", "complex"]: - raise ValueError("Inconsistency between matrix type %s and " \ - "value type %s" % (mxtype, values_format)) - values_dtype = np.float64 - elif isinstance(values_format, IntFormat): - if not mxtype.value_type in ["integer"]: - raise ValueError("Inconsistency between matrix type %s and " \ - "value type %s" % (mxtype, values_format)) - # XXX: fortran int -> dtype association ? - values_dtype = np.int - else: - raise ValueError("Unsupported format for values %s" % ct[2]) - - self.pointer_format = pointer_format - self.indices_format = indices_format - self.values_format = values_format - - self.pointer_dtype = np.int32 - self.indices_dtype = np.int32 - self.values_dtype = values_dtype - - self.pointer_nlines = pointer_nlines - self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines) - - self.indices_nlines = indices_nlines - self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines) - - self.values_nlines = values_nlines - self.values_nbytes_full = _nbytes_full(values_format, values_nlines) - - self.nrows = nrows - self.ncols = ncols - self.nnon_zeros = nnon_zeros - self.nelementals = nelementals - self.mxtype = mxtype - - def dump(self): - """Gives the header corresponding to this instance as a string.""" - header = [self.title.ljust(72) + self.key.ljust(8)] - - header.append("%14d%14d%14d%14d" % - (self.total_nlines, self.pointer_nlines, - self.indices_nlines, self.values_nlines)) - header.append("%14s%14d%14d%14d%14d" % - (self.mxtype.fortran_format.ljust(14), self.nrows, - self.ncols, self.nnon_zeros, 0)) - - pffmt = self.pointer_format.fortran_format - iffmt = self.indices_format.fortran_format - vffmt = self.values_format.fortran_format - header.append("%16s%16s%20s" % - (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20))) - return "\n".join(header) - - -def _expect_int(value, msg=None): - try: - return int(value) - except ValueError: - if msg is None: - msg = "Expected an int, got %s" - raise ValueError(msg % value) - - -def _read_hb_data(content, header): - # XXX: look at a way to reduce memory here (big string creation) - ptr_string = "".join([content.read(header.pointer_nbytes_full), - content.readline()]) - ptr = np.fromstring(ptr_string, - dtype=np.int, sep=' ') - - ind_string = "".join([content.read(header.indices_nbytes_full), - content.readline()]) - ind = np.fromstring(ind_string, - dtype=np.int, sep=' ') - - val_string = "".join([content.read(header.values_nbytes_full), - content.readline()]) - val = np.fromstring(val_string, - dtype=header.values_dtype, sep=' ') - - try: - return csc_matrix((val, ind-1, ptr-1), - shape=(header.nrows, header.ncols)) - except ValueError, e: - raise e - - -def _write_data(m, fid, header): - def write_array(f, ar, nlines, fmt): - # ar_nlines is the number of full lines, n is the number of items per - # line, ffmt the fortran format - pyfmt = fmt.python_format - pyfmt_full = pyfmt * fmt.repeat - - # for each array to write, we first write the full lines, and special - # case for partial line - full = ar[:(nlines - 1) * fmt.repeat] - for row in full.reshape((nlines-1, fmt.repeat)): - f.write(pyfmt_full % tuple(row) + "\n") - nremain = ar.size - full.size - if nremain > 0: - f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n") - - fid.write(header.dump()) - fid.write("\n") - # +1 is for fortran one-based indexing - write_array(fid, m.indptr+1, header.pointer_nlines, - header.pointer_format) - write_array(fid, m.indices+1, header.indices_nlines, - header.indices_format) - write_array(fid, m.data, header.values_nlines, - header.values_format) - - -class HBMatrixType(object): - """Class to hold the matrix type.""" - # q2f* translates qualified names to fortran character - _q2f_type = { - "real": "R", - "complex": "C", - "pattern": "P", - "integer": "I", - } - _q2f_structure = { - "symmetric": "S", - "unsymmetric": "U", - "hermitian": "H", - "skewsymmetric": "Z", - "rectangular": "R" - } - _q2f_storage = { - "assembled": "A", - "elemental": "E", - } - - _f2q_type = dict([(j, i) for i, j in _q2f_type.items()]) - _f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()]) - _f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()]) - - @classmethod - def from_fortran(cls, fmt): - if not len(fmt) == 3: - raise ValueError("Fortran format for matrix type should be 3 " \ - "characters long") - try: - value_type = cls._f2q_type[fmt[0]] - structure = cls._f2q_structure[fmt[1]] - storage = cls._f2q_storage[fmt[2]] - return cls(value_type, structure, storage) - except KeyError: - raise ValueError("Unrecognized format %s" % fmt) - - def __init__(self, value_type, structure, storage="assembled"): - self.value_type = value_type - self.structure = structure - self.storage = storage - - if not value_type in self._q2f_type.keys(): - raise ValueError("Unrecognized type %s" % value_type) - if not structure in self._q2f_structure.keys(): - raise ValueError("Unrecognized structure %s" % structure) - if not storage in self._q2f_storage.keys(): - raise ValueError("Unrecognized storage %s" % storage) - - @property - def fortran_format(self): - return self._q2f_type[self.value_type] + \ - self._q2f_structure[self.structure] + \ - self._q2f_storage[self.storage] - - def __repr__(self): - return "HBMatrixType(%s, %s, %s)" % \ - (self.value_type, self.structure, self.storage) - - -class HBFile(object): - def __init__(self, file, hb_info=None): - """Create a HBFile instance. - - Parameters - ---------- - file: file-object - StringIO work as well - hb_info: HBInfo - Should be given as an argument for writing, in which case the file - should be writable. - """ - self._fid = file - if hb_info is None: - self._hb_info = HBInfo.from_file(file) - else: - #raise IOError("file %s is not writable, and hb_info " - # "was given." % file) - self._hb_info = hb_info - - @property - def title(self): - return self._hb_info.title - - @property - def key(self): - return self._hb_info.key - - @property - def type(self): - return self._hb_info.mxtype.value_type - - @property - def structure(self): - return self._hb_info.mxtype.structure - - @property - def storage(self): - return self._hb_info.mxtype.storage - - def read_matrix(self): - return _read_hb_data(self._fid, self._hb_info) - - def write_matrix(self, m): - return _write_data(m, self._fid, self._hb_info) - - -def hb_read(file): - """Read HB-format file. - - Parameters - ---------- - file: str-like or file-like - If a string-like object, file is the name of the file to read. If a - file-like object, the data are read from it. - - Returns - ------- - data : scipy.sparse.csc_matrix instance - The data read from the HB file as a sparse matrix. - - Notes - ----- - At the moment not the full Harwell-Boeing format is supported. Supported - features are: - - - assembled, non-symmetric, real matrices - - integer for pointer/indices - - exponential format for float values, and int format - - """ - def _get_matrix(fid): - hb = HBFile(fid) - return hb.read_matrix() - - if isinstance(file, basestring): - fid = open(file) - try: - return _get_matrix(fid) - finally: - fid.close() - else: - return _get_matrix(file) - - -def hb_write(file, m, hb_info=None): - """Write HB-format file. - - Parameters - ---------- - file: str-like or file-like - if a string-like object, file is the name of the file to read. If a - file-like object, the data are read from it. - m: sparse-matrix - the sparse matrix to write - hb_info: HBInfo - contains the meta-data for write - - Returns - ------- - None - - Notes - ----- - At the moment not the full Harwell-Boeing format is supported. Supported - features are: - - - assembled, non-symmetric, real matrices - - integer for pointer/indices - - exponential format for float values, and int format - - """ - if hb_info is None: - hb_info = HBInfo.from_data(m) - - def _set_matrix(fid): - hb = HBFile(fid, hb_info) - return hb.write_matrix(m) - - if isinstance(file, basestring): - fid = open(file, "w") - try: - return _set_matrix(fid) - finally: - fid.close() - else: - return _set_matrix(file) - diff --git a/scipy-0.10.1/scipy/io/harwell_boeing/setup.py b/scipy-0.10.1/scipy/io/harwell_boeing/setup.py deleted file mode 100755 index 36447ec369..0000000000 --- a/scipy-0.10.1/scipy/io/harwell_boeing/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('harwell_boeing',parent_package,top_path) - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/io/harwell_boeing/setupscons.py b/scipy-0.10.1/scipy/io/harwell_boeing/setupscons.py deleted file mode 100755 index 1c7ce65cd3..0000000000 --- a/scipy-0.10.1/scipy/io/harwell_boeing/setupscons.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('harwell_boeing',parent_package,top_path) - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) - diff --git a/scipy-0.10.1/scipy/io/harwell_boeing/tests/test_fortran_format.py b/scipy-0.10.1/scipy/io/harwell_boeing/tests/test_fortran_format.py deleted file mode 100644 index 1d887fac6d..0000000000 --- a/scipy-0.10.1/scipy/io/harwell_boeing/tests/test_fortran_format.py +++ /dev/null @@ -1,73 +0,0 @@ -import numpy as np - -from numpy.testing import TestCase, assert_equal, assert_raises - -from scipy.io.harwell_boeing._fortran_format_parser import \ - FortranFormatParser, IntFormat, ExpFormat, BadFortranFormat, \ - number_digits - - -class TestFortranFormatParser(TestCase): - def setUp(self): - self.parser = FortranFormatParser() - - def _test_equal(self, format, ref): - ret = self.parser.parse(format) - assert_equal(ret.__dict__, ref.__dict__) - - def test_simple_int(self): - self._test_equal("(I4)", IntFormat(4)) - - def test_simple_repeated_int(self): - self._test_equal("(3I4)", IntFormat(4, repeat=3)) - - def test_simple_exp(self): - self._test_equal("(E4.3)", ExpFormat(4, 3)) - - def test_exp_exp(self): - self._test_equal("(E8.3E3)", ExpFormat(8, 3, 3)) - - def test_repeat_exp(self): - self._test_equal("(2E4.3)", ExpFormat(4, 3, repeat=2)) - - def test_repeat_exp_exp(self): - self._test_equal("(2E8.3E3)", ExpFormat(8, 3, 3, repeat=2)) - - def test_wrong_formats(self): - def _test_invalid(bad_format): - assert_raises(BadFortranFormat, lambda: self.parser.parse(bad_format)) - _test_invalid("I4") - _test_invalid("(E4)") - _test_invalid("(E4.)") - _test_invalid("(E4.E3)") - -class TestIntFormat(TestCase): - def test_to_fortran(self): - f = [IntFormat(10), IntFormat(12, 10), IntFormat(12, 10, 3)] - res = ["(I10)", "(I12.10)", "(3I12.10)"] - - for i, j in zip(f, res): - assert_equal(i.fortran_format, j) - - def test_from_number(self): - f = [10, -12, 123456789] - r_f = [IntFormat(3, repeat=26), IntFormat(4, repeat=20), - IntFormat(10, repeat=8)] - for i, j in zip(f, r_f): - assert_equal(IntFormat.from_number(i).__dict__, j.__dict__) - -class TestExpFormat(TestCase): - def test_to_fortran(self): - f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3), - ExpFormat(10, 5, repeat=3)] - res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"] - - for i, j in zip(f, res): - assert_equal(i.fortran_format, j) - - def test_from_number(self): - f = np.array([1.0, -1.2]) - r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)] - for i, j in zip(f, r_f): - assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__) - diff --git a/scipy-0.10.1/scipy/io/harwell_boeing/tests/test_hb.py b/scipy-0.10.1/scipy/io/harwell_boeing/tests/test_hb.py deleted file mode 100644 index e69b8008da..0000000000 --- a/scipy-0.10.1/scipy/io/harwell_boeing/tests/test_hb.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -from cStringIO import StringIO -import tempfile - -import numpy as np - -from numpy.testing import TestCase, assert_equal, \ - assert_array_almost_equal_nulp - -from scipy.sparse import coo_matrix, csc_matrix, rand - -from scipy.io import hb_read, hb_write -from scipy.io.harwell_boeing import HBFile, HBInfo - - -SIMPLE = """\ -No Title |No Key - 9 4 1 4 -RUA 100 100 10 0 -(26I3) (26I3) (3E23.15) -1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 -3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 -3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9 -9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11 -37 71 89 18 30 45 70 19 25 52 -2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01 -6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01 -4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01 -6.912334991524289e-01 -""" - -SIMPLE_MATRIX = coo_matrix( - ( - (0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799, - 0.0661749042483, 0.887037034319, 0.419647859016, - 0.564960307211, 0.993442388709, 0.691233499152,), - (np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51], - [ 0, 4, 58, 61, 61, 72, 72, 73, 99, 99]])))) - -def assert_csc_almost_equal(r, l): - r = csc_matrix(r) - l = csc_matrix(l) - assert_equal(r.indptr, l.indptr) - assert_equal(r.indices, l.indices) - assert_array_almost_equal_nulp(r.data, l.data, 10000) - -class TestHBReader(TestCase): - def test_simple(self): - m = hb_read(StringIO(SIMPLE)) - assert_csc_almost_equal(m, SIMPLE_MATRIX) - -class TestRBRoundtrip(TestCase): - def test_simple(self): - rm = rand(100, 1000, 0.05).tocsc() - fd, filename = tempfile.mkstemp(suffix="rb") - try: - hb_write(filename, rm, HBInfo.from_data(rm)) - m = hb_read(filename) - finally: - os.close(fd) - os.remove(filename) - - assert_csc_almost_equal(m, rm) - diff --git a/scipy-0.10.1/scipy/io/idl.py b/scipy-0.10.1/scipy/io/idl.py deleted file mode 100644 index 5d7edfaf10..0000000000 --- a/scipy-0.10.1/scipy/io/idl.py +++ /dev/null @@ -1,869 +0,0 @@ -# IDLSave - a python module to read IDL 'save' files -# Copyright (c) 2010 Thomas P. Robitaille - -# Many thanks to Craig Markwardt for publishing the Unofficial Format -# Specification for IDL .sav files, without which this Python module would not -# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt). - -# This code was developed by with permission from ITT Visual Information -# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems, -# Inc. for their Interactive Data Language software. - -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import struct -import numpy as np -from numpy.compat import asbytes, asstr -import tempfile -import zlib -import warnings - -# Define the different data types that can be found in an IDL save file -DTYPE_DICT = {} -DTYPE_DICT[1] = '>u1' -DTYPE_DICT[2] = '>i2' -DTYPE_DICT[3] = '>i4' -DTYPE_DICT[4] = '>f4' -DTYPE_DICT[5] = '>f8' -DTYPE_DICT[6] = '>c8' -DTYPE_DICT[7] = '|O' -DTYPE_DICT[8] = '|O' -DTYPE_DICT[9] = '>c16' -DTYPE_DICT[10] = '|O' -DTYPE_DICT[11] = '|O' -DTYPE_DICT[12] = '>u2' -DTYPE_DICT[13] = '>u4' -DTYPE_DICT[14] = '>i8' -DTYPE_DICT[15] = '>u8' - -# Define the different record types that can be found in an IDL save file -RECTYPE_DICT = {} -RECTYPE_DICT[0] = "START_MARKER" -RECTYPE_DICT[1] = "COMMON_VARIABLE" -RECTYPE_DICT[2] = "VARIABLE" -RECTYPE_DICT[3] = "SYSTEM_VARIABLE" -RECTYPE_DICT[6] = "END_MARKER" -RECTYPE_DICT[10] = "TIMESTAMP" -RECTYPE_DICT[12] = "COMPILED" -RECTYPE_DICT[13] = "IDENTIFICATION" -RECTYPE_DICT[14] = "VERSION" -RECTYPE_DICT[15] = "HEAP_HEADER" -RECTYPE_DICT[16] = "HEAP_DATA" -RECTYPE_DICT[17] = "PROMOTE64" -RECTYPE_DICT[19] = "NOTICE" - -# Define a dictionary to contain structure definitions -STRUCT_DICT = {} - - -def _align_32(f): - '''Align to the next 32-bit position in a file''' - - pos = f.tell() - if pos % 4 != 0: - f.seek(pos + 4 - pos % 4) - return - - -def _skip_bytes(f, n): - '''Skip `n` bytes''' - f.read(n) - return - - -def _read_bytes(f, n): - '''Read the next `n` bytes''' - return f.read(n) - - -def _read_byte(f): - '''Read a single byte''' - return np.uint8(struct.unpack('>B', f.read(4)[:1])[0]) - - -def _read_long(f): - '''Read a signed 32-bit integer''' - return np.int32(struct.unpack('>l', f.read(4))[0]) - - -def _read_int16(f): - '''Read a signed 16-bit integer''' - return np.int16(struct.unpack('>h', f.read(4)[2:4])[0]) - - -def _read_int32(f): - '''Read a signed 32-bit integer''' - return np.int32(struct.unpack('>i', f.read(4))[0]) - - -def _read_int64(f): - '''Read a signed 64-bit integer''' - return np.int64(struct.unpack('>q', f.read(8))[0]) - - -def _read_uint16(f): - '''Read an unsigned 16-bit integer''' - return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0]) - - -def _read_uint32(f): - '''Read an unsigned 32-bit integer''' - return np.uint32(struct.unpack('>I', f.read(4))[0]) - - -def _read_uint64(f): - '''Read an unsigned 64-bit integer''' - return np.uint64(struct.unpack('>Q', f.read(8))[0]) - - -def _read_float32(f): - '''Read a 32-bit float''' - return np.float32(struct.unpack('>f', f.read(4))[0]) - - -def _read_float64(f): - '''Read a 64-bit float''' - return np.float64(struct.unpack('>d', f.read(8))[0]) - - -class Pointer(object): - '''Class used to define pointers''' - - def __init__(self, index): - self.index = index - return - - -class ObjectPointer(Pointer): - '''Class used to define object pointers''' - pass - - -def _read_string(f): - '''Read a string''' - length = _read_long(f) - if length > 0: - chars = _read_bytes(f, length) - _align_32(f) - chars = asstr(chars) - else: - warnings.warn("warning: empty strings are now set to '' instead of None") - chars = '' - return chars - - -def _read_string_data(f): - '''Read a data string (length is specified twice)''' - length = _read_long(f) - if length > 0: - length = _read_long(f) - string_data = _read_bytes(f, length) - _align_32(f) - else: - warnings.warn("warning: empty strings are now set to '' instead of None") - string_data = '' - return string_data - - -def _read_data(f, dtype): - '''Read a variable with a specified data type''' - if dtype==1: - if _read_int32(f) != 1: - raise Exception("Error occurred while reading byte variable") - return _read_byte(f) - elif dtype==2: - return _read_int16(f) - elif dtype==3: - return _read_int32(f) - elif dtype==4: - return _read_float32(f) - elif dtype==5: - return _read_float64(f) - elif dtype==6: - real = _read_float32(f) - imag = _read_float32(f) - return np.complex64(real + imag * 1j) - elif dtype==7: - return _read_string_data(f) - elif dtype==8: - raise Exception("Should not be here - please report this") - elif dtype==9: - real = _read_float64(f) - imag = _read_float64(f) - return np.complex128(real + imag * 1j) - elif dtype==10: - return Pointer(_read_int32(f)) - elif dtype==11: - return ObjectPointer(_read_int32(f)) - elif dtype==12: - return _read_uint16(f) - elif dtype==13: - return _read_uint32(f) - elif dtype==14: - return _read_int64(f) - elif dtype==15: - return _read_uint64(f) - else: - raise Exception("Unknown IDL type: %i - please report this" % dtype) - - -def _read_structure(f, array_desc, struct_desc): - ''' - Read a structure, with the array and structure descriptors given as - `array_desc` and `structure_desc` respectively. - ''' - - nrows = array_desc['nelements'] - ncols = struct_desc['ntags'] - columns = struct_desc['tagtable'] - - dtype = [] - for col in columns: - if col['structure'] or col['array']: - dtype.append(((col['name'].lower(), col['name']), np.object_)) - else: - if col['typecode'] in DTYPE_DICT: - dtype.append(((col['name'].lower(), col['name']), - DTYPE_DICT[col['typecode']])) - else: - raise Exception("Variable type %i not implemented" % - col['typecode']) - - structure = np.recarray((nrows, ), dtype=dtype) - - for i in range(nrows): - for col in columns: - dtype = col['typecode'] - if col['structure']: - structure[col['name']][i] = _read_structure(f, \ - struct_desc['arrtable'][col['name']], \ - struct_desc['structtable'][col['name']]) - elif col['array']: - structure[col['name']][i] = _read_array(f, dtype, \ - struct_desc['arrtable'][col['name']]) - else: - structure[col['name']][i] = _read_data(f, dtype) - - # Reshape structure if needed - if array_desc['ndims'] > 1: - warnings.warn("warning: multi-dimensional structures are now correctly reshaped") - dims = array_desc['dims'][:int(array_desc['ndims'])] - dims.reverse() - structure = structure.reshape(dims) - - return structure - - -def _read_array(f, typecode, array_desc): - ''' - Read an array of type `typecode`, with the array descriptor given as - `array_desc`. - ''' - - if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]: - - if typecode == 1: - nbytes = _read_int32(f) - if nbytes != array_desc['nbytes']: - raise Exception("Error occurred while reading byte array") - - # Read bytes as numpy array - array = np.fromstring(f.read(array_desc['nbytes']), \ - dtype=DTYPE_DICT[typecode]) - - elif typecode in [2, 12]: - - # These are 2 byte types, need to skip every two as they are not packed - - array = np.fromstring(f.read(array_desc['nbytes']*2), \ - dtype=DTYPE_DICT[typecode])[1::2] - - else: - - # Read bytes into list - array = [] - for i in range(array_desc['nelements']): - dtype = typecode - data = _read_data(f, dtype) - array.append(data) - - array = np.array(array, dtype=np.object_) - - # Reshape array if needed - if array_desc['ndims'] > 1: - dims = array_desc['dims'][:int(array_desc['ndims'])] - dims.reverse() - array = array.reshape(dims) - - # Go to next alignment position - _align_32(f) - - return array - - -def _read_record(f): - '''Function to read in a full record''' - - record = {} - - recpos = f.tell() - record['rectype'] = _read_long(f) - - nextrec = _read_uint32(f) - nextrec += _read_uint32(f) * 2**32 - - _skip_bytes(f, 4) - - if not record['rectype'] in RECTYPE_DICT: - raise Exception("Unknown RECTYPE: %i" % record['rectype']) - - record['rectype'] = RECTYPE_DICT[record['rectype']] - - if record['rectype'] in ["VARIABLE", "HEAP_DATA"]: - - if record['rectype'] == "VARIABLE": - record['varname'] = _read_string(f) - else: - record['heap_index'] = _read_long(f) - _skip_bytes(f, 4) - - rectypedesc = _read_typedesc(f) - - varstart = _read_long(f) - if varstart != 7: - raise Exception("VARSTART is not 7") - - if rectypedesc['structure']: - record['data'] = _read_structure(f, rectypedesc['array_desc'], \ - rectypedesc['struct_desc']) - elif rectypedesc['array']: - record['data'] = _read_array(f, rectypedesc['typecode'], \ - rectypedesc['array_desc']) - else: - dtype = rectypedesc['typecode'] - record['data'] = _read_data(f, dtype) - - elif record['rectype'] == "TIMESTAMP": - - _skip_bytes(f, 4*256) - record['date'] = _read_string(f) - record['user'] = _read_string(f) - record['host'] = _read_string(f) - - elif record['rectype'] == "VERSION": - - record['format'] = _read_long(f) - record['arch'] = _read_string(f) - record['os'] = _read_string(f) - record['release'] = _read_string(f) - - elif record['rectype'] == "IDENTIFICATON": - - record['author'] = _read_string(f) - record['title'] = _read_string(f) - record['idcode'] = _read_string(f) - - elif record['rectype'] == "NOTICE": - - record['notice'] = _read_string(f) - - elif record['rectype'] == "HEAP_HEADER": - - record['nvalues'] = _read_long(f) - record['indices'] = [] - for i in range(record['nvalues']): - record['indices'].append(_read_long(f)) - - elif record['rectype'] == "COMMONBLOCK": - - record['nvars'] = _read_long(f) - record['name'] = _read_string(f) - record['varnames'] = [] - for i in range(record['nvars']): - record['varnames'].append(_read_string(f)) - - elif record['rectype'] == "END_MARKER": - - record['end'] = True - - elif record['rectype'] == "UNKNOWN": - - warnings.warn("Skipping UNKNOWN record") - - elif record['rectype'] == "SYSTEM_VARIABLE": - - warnings.warn("Skipping SYSTEM_VARIABLE record") - - else: - - raise Exception("record['rectype']=%s not implemented" % \ - record['rectype']) - - f.seek(nextrec) - - return record - - -def _read_typedesc(f): - '''Function to read in a type descriptor''' - - typedesc = {} - - typedesc['typecode'] = _read_long(f) - typedesc['varflags'] = _read_long(f) - - if typedesc['varflags'] & 2 == 2: - raise Exception("System variables not implemented") - - typedesc['array'] = typedesc['varflags'] & 4 == 4 - typedesc['structure'] = typedesc['varflags'] & 32 == 32 - - if typedesc['structure']: - typedesc['array_desc'] = _read_arraydesc(f) - typedesc['struct_desc'] = _read_structdesc(f) - elif typedesc['array']: - typedesc['array_desc'] = _read_arraydesc(f) - - return typedesc - - -def _read_arraydesc(f): - '''Function to read in an array descriptor''' - - arraydesc = {} - - arraydesc['arrstart'] = _read_long(f) - - if arraydesc['arrstart'] == 8: - - _skip_bytes(f, 4) - - arraydesc['nbytes'] = _read_long(f) - arraydesc['nelements'] = _read_long(f) - arraydesc['ndims'] = _read_long(f) - - _skip_bytes(f, 8) - - arraydesc['nmax'] = _read_long(f) - - arraydesc['dims'] = [] - for d in range(arraydesc['nmax']): - arraydesc['dims'].append(_read_long(f)) - - elif arraydesc['arrstart'] == 18: - - warnings.warn("Using experimental 64-bit array read") - - _skip_bytes(f, 8) - - arraydesc['nbytes'] = _read_uint64(f) - arraydesc['nelements'] = _read_uint64(f) - arraydesc['ndims'] = _read_long(f) - - _skip_bytes(f, 8) - - arraydesc['nmax'] = 8 - - arraydesc['dims'] = [] - for d in range(arraydesc['nmax']): - v = _read_long(f) - if v != 0: - raise Exception("Expected a zero in ARRAY_DESC") - arraydesc['dims'].append(_read_long(f)) - - else: - - raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart']) - - return arraydesc - - -def _read_structdesc(f): - '''Function to read in a structure descriptor''' - - structdesc = {} - - structstart = _read_long(f) - if structstart != 9: - raise Exception("STRUCTSTART should be 9") - - structdesc['name'] = _read_string(f) - predef = _read_long(f) - structdesc['ntags'] = _read_long(f) - structdesc['nbytes'] = _read_long(f) - - structdesc['predef'] = predef & 1 - structdesc['inherits'] = predef & 2 - structdesc['is_super'] = predef & 4 - - if not structdesc['predef']: - - structdesc['tagtable'] = [] - for t in range(structdesc['ntags']): - structdesc['tagtable'].append(_read_tagdesc(f)) - - for tag in structdesc['tagtable']: - tag['name'] = _read_string(f) - - structdesc['arrtable'] = {} - for tag in structdesc['tagtable']: - if tag['array']: - structdesc['arrtable'][tag['name']] = _read_arraydesc(f) - - structdesc['structtable'] = {} - for tag in structdesc['tagtable']: - if tag['structure']: - structdesc['structtable'][tag['name']] = _read_structdesc(f) - - if structdesc['inherits'] or structdesc['is_super']: - structdesc['classname'] = _read_string(f) - structdesc['nsupclasses'] = _read_long(f) - structdesc['supclassnames'] = [] - for s in range(structdesc['nsupclasses']): - structdesc['supclassnames'].append(_read_string(f)) - structdesc['supclasstable'] = [] - for s in range(structdesc['nsupclasses']): - structdesc['supclasstable'].append(_read_structdesc(f)) - - STRUCT_DICT[structdesc['name']] = structdesc - - else: - - if not structdesc['name'] in STRUCT_DICT: - raise Exception("PREDEF=1 but can't find definition") - - structdesc = STRUCT_DICT[structdesc['name']] - - return structdesc - - -def _read_tagdesc(f): - '''Function to read in a tag descriptor''' - - tagdesc = {} - - tagdesc['offset'] = _read_long(f) - - if tagdesc['offset'] == -1: - tagdesc['offset'] = _read_uint64(f) - - tagdesc['typecode'] = _read_long(f) - tagflags = _read_long(f) - - tagdesc['array'] = tagflags & 4 == 4 - tagdesc['structure'] = tagflags & 32 == 32 - tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT - # Assume '10'x is scalar - - return tagdesc - - -def _replace_heap(variable, heap): - - if isinstance(variable, Pointer): - - while isinstance(variable, Pointer): - - if variable.index == 0: - variable = None - else: - variable = heap[variable.index] - - replace, new = _replace_heap(variable, heap) - - if replace: - variable = new - - return True, variable - - elif isinstance(variable, np.core.records.recarray): - - # Loop over records - for ir, record in enumerate(variable): - - replace, new = _replace_heap(record, heap) - - if replace: - variable[ir] = new - - return False, variable - - elif isinstance(variable, np.core.records.record): - - # Loop over values - for iv, value in enumerate(variable): - - replace, new = _replace_heap(value, heap) - - if replace: - variable[iv] = new - - return False, variable - - elif isinstance(variable, np.ndarray): - - # Loop over values if type is np.object_ - if variable.dtype.type is np.object_: - - for iv in range(variable.size): - - replace, new = _replace_heap(variable.item(iv), heap) - - if replace: - variable.itemset(iv, new) - - return False, variable - - else: - - return False, variable - - -class AttrDict(dict): - ''' - A case-insensitive dictionary with access via item, attribute, and call - notations: - - >>> d = AttrDict() - >>> d['Variable'] = 123 - >>> d['Variable'] - 123 - >>> d.Variable - 123 - >>> d.variable - 123 - >>> d('VARIABLE') - 123 - ''' - - def __init__(self, init={}): - dict.__init__(self, init) - - def __getitem__(self, name): - return super(AttrDict, self).__getitem__(name.lower()) - - def __setitem__(self, key, value): - return super(AttrDict, self).__setitem__(key.lower(), value) - - __getattr__ = __getitem__ - __setattr__ = __setitem__ - __call__ = __getitem__ - - -def readsav(file_name, idict=None, python_dict=False, - uncompressed_file_name=None, verbose=False): - ''' - Read an IDL .sav file - - Parameters - ---------- - file_name : str - Name of the IDL save file. - idict : dict, optional - Dictionary in which to insert .sav file variables - python_dict: bool, optional - By default, the object return is not a Python dictionary, but a - case-insensitive dictionary with item, attribute, and call access - to variables. To get a standard Python dictionary, set this option - to True. - uncompressed_file_name : str, optional - This option only has an effect for .sav files written with the - /compress option. If a file name is specified, compressed .sav - files are uncompressed to this file. Otherwise, readsav will use - the `tempfile` module to determine a temporary filename - automatically, and will remove the temporary file upon successfully - reading it in. - verbose : bool, optional - Whether to print out information about the save file, including - the records read, and available variables. - - Returns - ---------- - idl_dict : AttrDict or dict - If `python_dict` is set to False (default), this function returns a - case-insensitive dictionary with item, attribute, and call access - to variables. If `python_dict` is set to True, this function - returns a Python dictionary with all variable names in lowercase. - If `idict` was specified, then variables are written to the - dictionary specified, and the updated dictionary is returned. - ''' - - # Initialize record and variable holders - records = [] - if python_dict or idict: - variables = {} - else: - variables = AttrDict() - - # Open the IDL file - f = open(file_name, 'rb') - - # Read the signature, which should be 'SR' - signature = _read_bytes(f, 2) - if signature != asbytes('SR'): - raise Exception("Invalid SIGNATURE: %s" % signature) - - # Next, the record format, which is '\x00\x04' for normal .sav - # files, and '\x00\x06' for compressed .sav files. - recfmt = _read_bytes(f, 2) - - if recfmt == asbytes('\x00\x04'): - pass - - elif recfmt == asbytes('\x00\x06'): - - if verbose: - print "IDL Save file is compressed" - - if uncompressed_file_name: - fout = open(uncompressed_file_name, 'w+b') - else: - fout = tempfile.NamedTemporaryFile(suffix='.sav') - - if verbose: - print " -> expanding to %s" % fout.name - - # Write header - fout.write(asbytes('SR\x00\x04')) - - # Cycle through records - while True: - - # Read record type - rectype = _read_long(f) - fout.write(struct.pack('>l', int(rectype))) - - # Read position of next record and return as int - nextrec = _read_uint32(f) - nextrec += _read_uint32(f) * 2**32 - - # Read the unknown 4 bytes - unknown = f.read(4) - - # Check if the end of the file has been reached - if RECTYPE_DICT[rectype] == 'END_MARKER': - fout.write(struct.pack('>I', int(nextrec) % 2**32)) - fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32))) - fout.write(unknown) - break - - # Find current position - pos = f.tell() - - # Decompress record - rec_string = zlib.decompress(f.read(nextrec-pos)) - - # Find new position of next record - nextrec = fout.tell() + len(rec_string) + 12 - - # Write out record - fout.write(struct.pack('>I', int(nextrec % 2**32))) - fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32))) - fout.write(unknown) - fout.write(rec_string) - - # Close the original compressed file - f.close() - - # Set f to be the decompressed file, and skip the first four bytes - f = fout - f.seek(4) - - else: - raise Exception("Invalid RECFMT: %s" % recfmt) - - # Loop through records, and add them to the list - while True: - r = _read_record(f) - records.append(r) - if 'end' in r: - if r['end']: - break - - # Close the file - f.close() - - # Find heap data variables - heap = {} - for r in records: - if r['rectype'] == "HEAP_DATA": - heap[r['heap_index']] = r['data'] - - # Find all variables - for r in records: - if r['rectype'] == "VARIABLE": - replace, new = _replace_heap(r['data'], heap) - if replace: - r['data'] = new - variables[r['varname'].lower()] = r['data'] - - if verbose: - - # Print out timestamp info about the file - for record in records: - if record['rectype'] == "TIMESTAMP": - print "-"*50 - print "Date: %s" % record['date'] - print "User: %s" % record['user'] - print "Host: %s" % record['host'] - break - - # Print out version info about the file - for record in records: - if record['rectype'] == "VERSION": - print "-"*50 - print "Format: %s" % record['format'] - print "Architecture: %s" % record['arch'] - print "Operating System: %s" % record['os'] - print "IDL Version: %s" % record['release'] - break - - # Print out identification info about the file - for record in records: - if record['rectype'] == "IDENTIFICATON": - print "-"*50 - print "Author: %s" % record['author'] - print "Title: %s" % record['title'] - print "ID Code: %s" % record['idcode'] - break - - print "-"*50 - print "Successfully read %i records of which:" % \ - (len(records)) - - # Create convenience list of record types - rectypes = [r['rectype'] for r in records] - - for rt in set(rectypes): - if rt != 'END_MARKER': - print " - %i are of type %s" % (rectypes.count(rt), rt) - print "-"*50 - - if 'VARIABLE' in rectypes: - print "Available variables:" - for var in variables: - print " - %s [%s]" % (var, type(variables[var])) - print "-"*50 - - if idict: - for var in variables: - idict[var] = variables[var] - return idict - else: - return variables diff --git a/scipy-0.10.1/scipy/io/matlab/SConscript b/scipy-0.10.1/scipy/io/matlab/SConscript deleted file mode 100644 index 0ae6ae2c5f..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/SConscript +++ /dev/null @@ -1,6 +0,0 @@ -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) -env.NumpyPythonExtension('streams', source='streams.c') -env.NumpyPythonExtension('mio_utils', source='mio_utils.c') -env.NumpyPythonExtension('mio5_utils', source='mio5_utils.c') diff --git a/scipy-0.10.1/scipy/io/matlab/SConstruct b/scipy-0.10.1/scipy/io/matlab/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/io/matlab/__init__.py b/scipy-0.10.1/scipy/io/matlab/__init__.py deleted file mode 100644 index a6b09750da..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -""" -Utilities for dealing with MATLAB(R) files - -Notes ------ -MATLAB(R) is a registered trademark of The MathWorks, Inc., 3 Apple Hill -Drive, Natick, MA 01760-2098, USA. - -""" -# Matlab file read and write utilities -from mio import loadmat, savemat -import byteordercodes - -__all__ = ['loadmat', 'savemat', 'byteordercodes'] - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/scipy-0.10.1/scipy/io/matlab/benchmarks/bench_structarr.py b/scipy-0.10.1/scipy/io/matlab/benchmarks/bench_structarr.py deleted file mode 100644 index c3a86c1f22..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/benchmarks/bench_structarr.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import division -from numpy.testing import * - -from cStringIO import StringIO - -import numpy as np -import scipy.io as sio - - -def make_structarr(n_vars, n_fields, n_structs): - var_dict = {} - for vno in range(n_vars): - vname = 'var%00d' % vno - end_dtype = [('f%d' % d, 'i4', 10) for d in range(n_fields)] - s_arrs = np.zeros((n_structs,), dtype=end_dtype) - var_dict[vname] = s_arrs - return var_dict - - -def bench_run(): - str_io = StringIO() - print - print 'Read / writing matlab structs' - print '='*60 - print ' write | read | vars | fields | structs ' - print '-'*60 - print - for n_vars, n_fields, n_structs in ( - (10, 10, 20),): - var_dict = make_structarr(n_vars, n_fields, n_structs) - str_io = StringIO() - write_time = measure('sio.savemat(str_io, var_dict)') - read_time = measure('sio.loadmat(str_io)') - print '%.5f | %.5f | %5d | %5d | %5d ' % ( - write_time, - read_time, - n_vars, - n_fields, - n_structs) - - -if __name__ == '__main__' : - bench_run() diff --git a/scipy-0.10.1/scipy/io/matlab/bento.info b/scipy-0.10.1/scipy/io/matlab/bento.info deleted file mode 100644 index 7a339b06c3..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/bento.info +++ /dev/null @@ -1,7 +0,0 @@ -Library: - Extension: streams - Sources: streams.c - Extension: mio_utils - Sources: mio_utils.c - Extension: mio5_utils - Sources: mio5_utils.c diff --git a/scipy-0.10.1/scipy/io/matlab/byteordercodes.py b/scipy-0.10.1/scipy/io/matlab/byteordercodes.py deleted file mode 100644 index 40ab4aa50e..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/byteordercodes.py +++ /dev/null @@ -1,68 +0,0 @@ -''' Byteorder utilities for system - numpy byteorder encoding - -Converts a variety of string codes for little endian, big endian, -native byte order and swapped byte order to explicit numpy endian -codes - one of '<' (little endian) or '>' (big endian) - -''' - -import sys - -sys_is_le = sys.byteorder == 'little' -native_code = sys_is_le and '<' or '>' -swapped_code = sys_is_le and '>' or '<' - -aliases = {'little': ('little', '<', 'l', 'le'), - 'big': ('big', '>', 'b', 'be'), - 'native': ('native', '='), - 'swapped': ('swapped', 'S')} - -def to_numpy_code(code): - """ - Convert various order codings to numpy format. - - Parameters - ---------- - code : str - The code to convert. It is converted to lower case before parsing. - Legal values are: - 'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=', - 'swapped', 's'. - - Returns - ------- - out_code : {'<', '>'} - Here '<' is the numpy dtype code for little endian, - and '>' is the code for big endian. - - Examples - -------- - >>> import sys - >>> sys_is_le == (sys.byteorder == 'little') - True - >>> to_numpy_code('big') - '>' - >>> to_numpy_code('little') - '<' - >>> nc = to_numpy_code('native') - >>> nc == '<' if sys_is_le else nc == '>' - True - >>> sc = to_numpy_code('swapped') - >>> sc == '>' if sys_is_le else sc == '<' - True - - """ - code = code.lower() - if code is None: - return native_code - if code in aliases['little']: - return '<' - elif code in aliases['big']: - return '>' - elif code in aliases['native']: - return native_code - elif code in aliases['swapped']: - return swapped_code - else: - raise ValueError( - 'We cannot handle byte order %s' % code) diff --git a/scipy-0.10.1/scipy/io/matlab/mio.py b/scipy-0.10.1/scipy/io/matlab/mio.py deleted file mode 100644 index ae2a8f9e2a..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/mio.py +++ /dev/null @@ -1,271 +0,0 @@ -"""Module for reading and writing MATLAB .mat files""" -# Authors: Travis Oliphant, Matthew Brett - -""" -Module for reading and writing matlab (TM) .mat files -""" - -import os -import sys -import warnings - -from numpy.compat import asbytes - -from miobase import get_matfile_version, docfiller -from mio4 import MatFile4Reader, MatFile4Writer -from mio5 import MatFile5Reader, MatFile5Writer - -__all__ = ['find_mat_file', 'mat_reader_factory', 'loadmat', 'savemat'] - -@docfiller -def find_mat_file(file_name, appendmat=True): - ''' Try to find .mat file on system path - - Parameters - ---------- - file_name : str - file name for mat file - %(append_arg)s - - Returns - ------- - full_name : string - possibly modified name after path search - ''' - warnings.warn('Searching for mat files on python system path will be ' + - 'removed in next version of scipy', - DeprecationWarning, stacklevel=2) - if appendmat and file_name.endswith(".mat"): - file_name = file_name[:-4] - if os.sep in file_name: - full_name = file_name - if appendmat: - full_name = file_name + ".mat" - else: - full_name = None - junk, file_name = os.path.split(file_name) - for path in [os.curdir] + list(sys.path): - test_name = os.path.join(path, file_name) - if appendmat: - test_name += ".mat" - try: - fid = open(test_name,'rb') - fid.close() - full_name = test_name - break - except IOError: - pass - return full_name - - -def _open_file(file_like, appendmat): - ''' Open `file_like` and return as file-like object ''' - if isinstance(file_like, basestring): - try: - return open(file_like, 'rb') - except IOError: - pass - if appendmat and not file_like.endswith('.mat'): - try: - return open(file_like + '.mat', 'rb') - except IOError: - pass - # search the python path - we'll remove this soon - full_name = find_mat_file(file_like, appendmat) - if full_name is None: - raise IOError("%s not found on the path." - % file_like) - return open(full_name, 'rb') - # not a string - maybe file-like object - try: - file_like.read(0) - except AttributeError: - raise IOError('Reader needs file name or open file-like object') - return file_like - - -@docfiller -def mat_reader_factory(file_name, appendmat=True, **kwargs): - """Create reader for matlab .mat format files - - Parameters - ---------- - %(file_arg)s - %(append_arg)s - %(load_args)s - %(struct_arg)s - - Returns - ------- - matreader : MatFileReader object - Initialized instance of MatFileReader class matching the mat file - type detected in `filename`. - """ - byte_stream = _open_file(file_name, appendmat) - mjv, mnv = get_matfile_version(byte_stream) - if mjv == 0: - return MatFile4Reader(byte_stream, **kwargs) - elif mjv == 1: - return MatFile5Reader(byte_stream, **kwargs) - elif mjv == 2: - raise NotImplementedError('Please use HDF reader for matlab v7.3 files') - else: - raise TypeError('Did not recognize version %s' % mjv) - -@docfiller -def loadmat(file_name, mdict=None, appendmat=True, **kwargs): - """ - Load MATLAB file - - Parameters - ---------- - file_name : str - Name of the mat file (do not need .mat extension if - appendmat==True) Can also pass open file-like object. - m_dict : dict, optional - Dictionary in which to insert matfile variables. - appendmat : bool, optional - True to append the .mat extension to the end of the given - filename, if not already present. - byte_order : str or None, optional - None by default, implying byte order guessed from mat - file. Otherwise can be one of ('native', '=', 'little', '<', - 'BIG', '>'). - mat_dtype : bool, optional - If True, return arrays in same dtype as would be loaded into - MATLAB (instead of the dtype with which they are saved). - squeeze_me : bool, optional - Whether to squeeze unit matrix dimensions or not. - chars_as_strings : bool, optional - Whether to convert char arrays to string arrays. - matlab_compatible : bool, optional - Returns matrices as would be loaded by MATLAB (implies - squeeze_me=False, chars_as_strings=False, mat_dtype=True, - struct_as_record=True). - struct_as_record : bool, optional - Whether to load MATLAB structs as numpy record arrays, or as - old-style numpy arrays with dtype=object. Setting this flag to - False replicates the behavior of scipy version 0.7.x (returning - numpy object arrays). The default setting is True, because it - allows easier round-trip load and save of MATLAB files. - variable_names : None or sequence - If None (the default) - read all variables in file. Otherwise - `variable_names` should be a sequence of strings, giving names of the - matlab variables to read from the file. The reader will skip any - variable with a name not in this sequence, possibly saving some read - processing. - - Returns - ------- - mat_dict : dict - dictionary with variable names as keys, and loaded matrices as - values - - Notes - ----- - v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. - - You will need an HDF5 python library to read matlab 7.3 format mat - files. Because scipy does not supply one, we do not implement the - HDF5 / 7.3 interface here. - - """ - variable_names = kwargs.pop('variable_names', None) - MR = mat_reader_factory(file_name, appendmat, **kwargs) - matfile_dict = MR.get_variables(variable_names) - if mdict is not None: - mdict.update(matfile_dict) - else: - mdict = matfile_dict - if isinstance(file_name, basestring): - MR.mat_stream.close() - return mdict - -@docfiller -def savemat(file_name, mdict, - appendmat=True, - format='5', - long_field_names=False, - do_compression=False, - oned_as=None): - """ - Save a dictionary of names and arrays into a MATLAB-style .mat file. - - This saves the array objects in the given dictionary to a MATLAB- - style .mat file. - - Parameters - ---------- - file_name : str or file-like object - Name of the .mat file (.mat extension not needed if ``appendmat == - True``). - Can also pass open file_like object. - mdict : dict - Dictionary from which to save matfile variables. - appendmat : bool, optional - True (the default) to append the .mat extension to the end of the - given filename, if not already present. - format : {'5', '4'}, string, optional - '5' (the default) for MATLAB 5 and up (to 7.2), - '4' for MATLAB 4 .mat files - long_field_names : bool, optional - False (the default) - maximum field name length in a structure is - 31 characters which is the documented maximum length. - True - maximum field name length in a structure is 63 characters - which works for MATLAB 7.6+ - do_compression : bool, optional - Whether or not to compress matrices on write. Default is False. - oned_as : {'column', 'row', None}, optional - If 'column', write 1-D numpy arrays as column vectors. - If 'row', write 1-D numpy arrays as row vectors. - If None (the default), the behavior depends on the value of `format` - (see Notes below). - - See also - -------- - mio4.MatFile4Writer - mio5.MatFile5Writer - - Notes - ----- - If ``format == '4'``, `mio4.MatFile4Writer` is called, which sets - `oned_as` to 'row' if it had been None. If ``format == '5'``, - `mio5.MatFile5Writer` is called, which sets `oned_as` to 'column' if - it had been None, but first it executes: - - ``warnings.warn("Using oned_as default value ('column')" +`` - ``" This will change to 'row' in future versions",`` - ``FutureWarning, stacklevel=2)`` - - without being more specific as to precisely when the change will take - place. - - """ - file_is_string = isinstance(file_name, basestring) - if file_is_string: - if appendmat and file_name[-4:] != ".mat": - file_name = file_name + ".mat" - file_stream = open(file_name, 'wb') - else: - try: - file_name.write(asbytes('')) - except AttributeError: - raise IOError('Writer needs file name or writeable ' - 'file-like object') - file_stream = file_name - - if format == '4': - if long_field_names: - raise ValueError("Long field names are not available for version 4 files") - MW = MatFile4Writer(file_stream, oned_as) - elif format == '5': - MW = MatFile5Writer(file_stream, - do_compression=do_compression, - unicode_strings=True, - long_field_names=long_field_names, - oned_as=oned_as) - else: - raise ValueError("Format should be '4' or '5'") - MW.put_variables(mdict) - if file_is_string: - file_stream.close() diff --git a/scipy-0.10.1/scipy/io/matlab/mio4.py b/scipy-0.10.1/scipy/io/matlab/mio4.py deleted file mode 100644 index dea3d44589..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/mio4.py +++ /dev/null @@ -1,508 +0,0 @@ -''' Classes for read / write of matlab (TM) 4 files -''' -import sys -import warnings - -import numpy as np -from numpy.compat import asbytes, asstr - -import scipy.sparse - -from miobase import MatFileReader, docfiller, matdims, \ - read_dtype, convert_dtypes, arr_to_chars, arr_dtype_number, \ - MatWriteError - -from mio_utils import squeeze_element, chars_to_strings - - -SYS_LITTLE_ENDIAN = sys.byteorder == 'little' - -miDOUBLE = 0 -miSINGLE = 1 -miINT32 = 2 -miINT16 = 3 -miUINT16 = 4 -miUINT8 = 5 - -mdtypes_template = { - miDOUBLE: 'f8', - miSINGLE: 'f4', - miINT32: 'i4', - miINT16: 'i2', - miUINT16: 'u2', - miUINT8: 'u1', - 'header': [('mopt', 'i4'), - ('mrows', 'i4'), - ('ncols', 'i4'), - ('imagf', 'i4'), - ('namlen', 'i4')], - 'U1': 'U1', - } - -np_to_mtypes = { - 'f8': miDOUBLE, - 'c32': miDOUBLE, - 'c24': miDOUBLE, - 'c16': miDOUBLE, - 'f4': miSINGLE, - 'c8': miSINGLE, - 'i4': miINT32, - 'i2': miINT16, - 'u2': miUINT16, - 'u1': miUINT8, - 'S1': miUINT8, - } - -# matrix classes -mxFULL_CLASS = 0 -mxCHAR_CLASS = 1 -mxSPARSE_CLASS = 2 - -order_codes = { - 0: '<', - 1: '>', - 2: 'VAX D-float', #! - 3: 'VAX G-float', - 4: 'Cray', #!! - } - -class VarHeader4(object): - # Mat4 variables never logical or global - is_logical = False - is_global = False - - def __init__(self, - name, - dtype, - mclass, - dims, - is_complex): - self.name = name - self.dtype = dtype - self.mclass = mclass - self.dims = dims - self.is_complex = is_complex - - -class VarReader4(object): - ''' Class to read matlab 4 variables ''' - - def __init__(self, file_reader): - self.file_reader = file_reader - self.mat_stream = file_reader.mat_stream - self.dtypes = file_reader.dtypes - self.chars_as_strings = file_reader.chars_as_strings - self.squeeze_me = file_reader.squeeze_me - - def read_header(self): - ''' Reads and return header for variable ''' - data = read_dtype(self.mat_stream, self.dtypes['header']) - name = self.mat_stream.read(int(data['namlen'])).strip(asbytes('\x00')) - if data['mopt'] < 0 or data['mopt'] > 5000: - ValueError, 'Mat 4 mopt wrong format, byteswapping problem?' - M,rest = divmod(data['mopt'], 1000) - O,rest = divmod(rest,100) - P,rest = divmod(rest,10) - T = rest - if O != 0: - raise ValueError('O in MOPT integer should be 0, wrong format?') - dims = (data['mrows'], data['ncols']) - is_complex = data['imagf'] == 1 - dtype = self.dtypes[P] - return VarHeader4( - name, - dtype, - T, - dims, - is_complex) - - def array_from_header(self, hdr, process=True): - mclass = hdr.mclass - if mclass == mxFULL_CLASS: - arr = self.read_full_array(hdr) - elif mclass == mxCHAR_CLASS: - arr = self.read_char_array(hdr) - if process and self.chars_as_strings: - arr = chars_to_strings(arr) - elif mclass == mxSPARSE_CLASS: - # no current processing (below) makes sense for sparse - return self.read_sparse_array(hdr) - else: - raise TypeError('No reader for class code %s' % mclass) - if process and self.squeeze_me: - return squeeze_element(arr) - return arr - - def read_sub_array(self, hdr, copy=True): - ''' Mat4 read always uses header dtype and dims - hdr : object - object with attributes 'dtype', 'dims' - copy : bool - copies array if True (default True) - (buffer is usually read only) - - self.dtype is assumed to be correct endianness - ''' - dt = hdr.dtype - dims = hdr.dims - num_bytes = dt.itemsize - for d in dims: - num_bytes *= d - arr = np.ndarray(shape=dims, - dtype=dt, - buffer=self.mat_stream.read(int(num_bytes)), - order='F') - if copy: - arr = arr.copy() - return arr - - def read_full_array(self, hdr): - ''' Full (rather than sparse matrix) getter - ''' - if hdr.is_complex: - # avoid array copy to save memory - res = self.read_sub_array(hdr, copy=False) - res_j = self.read_sub_array(hdr, copy=False) - return res + (res_j * 1j) - return self.read_sub_array(hdr) - - def read_char_array(self, hdr): - ''' Ascii text matrix (char matrix) reader - - ''' - arr = self.read_sub_array(hdr).astype(np.uint8) - # ascii to unicode - S = arr.tostring().decode('ascii') - return np.ndarray(shape=hdr.dims, - dtype=np.dtype('U1'), - buffer = np.array(S)).copy() - - def read_sparse_array(self, hdr): - ''' Read sparse matrix type - - Matlab (TM) 4 real sparse arrays are saved in a N+1 by 3 array - format, where N is the number of non-zero values. Column 1 values - [0:N] are the (1-based) row indices of the each non-zero value, - column 2 [0:N] are the column indices, column 3 [0:N] are the - (real) values. The last values [-1,0:2] of the rows, column - indices are shape[0] and shape[1] respectively of the output - matrix. The last value for the values column is a padding 0. mrows - and ncols values from the header give the shape of the stored - matrix, here [N+1, 3]. Complex data is saved as a 4 column - matrix, where the fourth column contains the imaginary component; - the last value is again 0. Complex sparse data do _not_ have the - header imagf field set to True; the fact that the data are complex - is only detectable because there are 4 storage columns - ''' - res = self.read_sub_array(hdr) - tmp = res[:-1,:] - dims = res[-1,0:2] - I = np.ascontiguousarray(tmp[:,0],dtype='intc') #fixes byte order also - J = np.ascontiguousarray(tmp[:,1],dtype='intc') - I -= 1 # for 1-based indexing - J -= 1 - if res.shape[1] == 3: - V = np.ascontiguousarray(tmp[:,2],dtype='float') - else: - V = np.ascontiguousarray(tmp[:,2],dtype='complex') - V.imag = tmp[:,3] - return scipy.sparse.coo_matrix((V,(I,J)), dims) - - -class MatFile4Reader(MatFileReader): - ''' Reader for Mat4 files ''' - @docfiller - def __init__(self, mat_stream, *args, **kwargs): - ''' Initialize matlab 4 file reader - - %(matstream_arg)s - %(load_args)s - ''' - super(MatFile4Reader, self).__init__(mat_stream, *args, **kwargs) - self._matrix_reader = None - - def guess_byte_order(self): - self.mat_stream.seek(0) - mopt = read_dtype(self.mat_stream, np.dtype('i4')) - self.mat_stream.seek(0) - if mopt < 0 or mopt > 5000: - return SYS_LITTLE_ENDIAN and '>' or '<' - return SYS_LITTLE_ENDIAN and '<' or '>' - - def initialize_read(self): - ''' Run when beginning read of variables - - Sets up readers from parameters in `self` - ''' - self.dtypes = convert_dtypes(mdtypes_template, self.byte_order) - self._matrix_reader = VarReader4(self) - - def read_var_header(self): - ''' Read header, return header, next position - - Header has to define at least .name and .is_global - - Parameters - ---------- - None - - Returns - ------- - header : object - object that can be passed to self.read_var_array, and that - has attributes .name and .is_global - next_position : int - position in stream of next variable - ''' - hdr = self._matrix_reader.read_header() - n = reduce(lambda x, y: x*y, hdr.dims, 1) # fast product - remaining_bytes = hdr.dtype.itemsize * n - if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS: - remaining_bytes *= 2 - next_position = self.mat_stream.tell() + remaining_bytes - return hdr, next_position - - def read_var_array(self, header, process=True): - ''' Read array, given `header` - - Parameters - ---------- - header : header object - object with fields defining variable header - process : {True, False} bool, optional - If True, apply recursive post-processing during loading of - array. - - Returns - ------- - arr : array - array with post-processing applied or not according to - `process`. - ''' - return self._matrix_reader.array_from_header(header, process) - - def get_variables(self, variable_names=None): - ''' get variables from stream as dictionary - - variable_names - optional list of variable names to get - - If variable_names is None, then get all variables in file - ''' - if isinstance(variable_names, basestring): - variable_names = [variable_names] - self.mat_stream.seek(0) - # set up variable reader - self.initialize_read() - mdict = {} - while not self.end_of_stream(): - hdr, next_position = self.read_var_header() - name = asstr(hdr.name) - if variable_names and name not in variable_names: - self.mat_stream.seek(next_position) - continue - mdict[name] = self.read_var_array(hdr) - self.mat_stream.seek(next_position) - if variable_names: - variable_names.remove(name) - if len(variable_names) == 0: - break - return mdict - - -def arr_to_2d(arr, oned_as='row'): - ''' Make ``arr`` exactly two dimensional - - If `arr` has more than 2 dimensions, then, for the sake of - compatibility with previous versions of scipy, we reshape to 2D - preserving the last dimension and increasing the first dimension. - In future versions we will raise an error, as this is at best a very - counterinituitive thing to do. - - Parameters - ---------- - arr : array - oned_as : {'row', 'column'} - Whether to reshape 1D vectors as row vectors or column vectors. - See documentation for ``matdims`` for more detail - - Returns - ------- - arr2d : array - 2D version of the array - ''' - dims = matdims(arr, oned_as) - if len(dims) > 2: - warnings.warn('Matlab 4 files only support <=2 ' - 'dimensions; the next version of scipy will ' - 'raise an error when trying to write >2D arrays ' - 'to matlab 4 format files', - DeprecationWarning, - ) - return arr.reshape((-1,dims[-1])) - return arr.reshape(dims) - - -class VarWriter4(object): - def __init__(self, file_writer): - self.file_stream = file_writer.file_stream - self.oned_as = file_writer.oned_as - - def write_bytes(self, arr): - self.file_stream.write(arr.tostring(order='F')) - - def write_string(self, s): - self.file_stream.write(s) - - def write_header(self, name, shape, P=0, T=0, imagf=0): - ''' Write header for given data options - - Parameters - ---------- - name : str - shape : sequence - Shape of array as it will be read in matlab - P - mat4 data type - T - mat4 matrix class - imagf - complex flag - ''' - header = np.empty((), mdtypes_template['header']) - M = not SYS_LITTLE_ENDIAN - O = 0 - header['mopt'] = (M * 1000 + - O * 100 + - P * 10 + - T) - header['mrows'] = shape[0] - header['ncols'] = shape[1] - header['imagf'] = imagf - header['namlen'] = len(name) + 1 - self.write_bytes(header) - self.write_string(asbytes(name + '\0')) - - def write(self, arr, name): - ''' Write matrix `arr`, with name `name` - - Parameters - ---------- - arr : array-like - array to write - name : str - name in matlab workspace - ''' - # we need to catch sparse first, because np.asarray returns an - # an object array for scipy.sparse - if scipy.sparse.issparse(arr): - self.write_sparse(arr, name) - return - arr = np.asarray(arr) - dt = arr.dtype - if not dt.isnative: - arr = arr.astype(dt.newbyteorder('=')) - dtt = dt.type - if dtt is np.object_: - raise TypeError('Cannot save object arrays in Mat4') - elif dtt is np.void: - raise TypeError('Cannot save void type arrays') - elif dtt in (np.unicode_, np.string_): - self.write_char(arr, name) - return - self.write_numeric(arr, name) - - def write_numeric(self, arr, name): - arr = arr_to_2d(arr, self.oned_as) - imagf = arr.dtype.kind == 'c' - try: - P = np_to_mtypes[arr.dtype.str[1:]] - except KeyError: - if imagf: - arr = arr.astype('c128') - else: - arr = arr.astype('f8') - P = miDOUBLE - self.write_header(name, - arr.shape, - P=P, - T=mxFULL_CLASS, - imagf=imagf) - if imagf: - self.write_bytes(arr.real) - self.write_bytes(arr.imag) - else: - self.write_bytes(arr) - - def write_char(self, arr, name): - arr = arr_to_chars(arr) - arr = arr_to_2d(arr, self.oned_as) - dims = arr.shape - self.write_header( - name, - dims, - P=miUINT8, - T=mxCHAR_CLASS) - if arr.dtype.kind == 'U': - # Recode unicode to ascii - n_chars = np.product(dims) - st_arr = np.ndarray(shape=(), - dtype=arr_dtype_number(arr, n_chars), - buffer=arr) - st = st_arr.item().encode('ascii') - arr = np.ndarray(shape=dims, dtype='S1', buffer=st) - self.write_bytes(arr) - - def write_sparse(self, arr, name): - ''' Sparse matrices are 2D - - See docstring for VarReader4.read_sparse_array - ''' - A = arr.tocoo() #convert to sparse COO format (ijv) - imagf = A.dtype.kind == 'c' - ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8') - ijv[:-1,0] = A.row - ijv[:-1,1] = A.col - ijv[:-1,0:2] += 1 # 1 based indexing - if imagf: - ijv[:-1,2] = A.data.real - ijv[:-1,3] = A.data.imag - else: - ijv[:-1,2] = A.data - ijv[-1,0:2] = A.shape - self.write_header( - name, - ijv.shape, - P=miDOUBLE, - T=mxSPARSE_CLASS) - self.write_bytes(ijv) - - -class MatFile4Writer(object): - ''' Class for writing matlab 4 format files ''' - def __init__(self, file_stream, oned_as=None): - self.file_stream = file_stream - if oned_as is None: - oned_as = 'row' - self.oned_as = oned_as - self._matrix_writer = None - - def put_variables(self, mdict, write_header=None): - ''' Write variables in `mdict` to stream - - Parameters - ---------- - mdict : mapping - mapping with method ``items`` return name, contents pairs - where ``name`` which will appeak in the matlab workspace in - file load, and ``contents`` is something writeable to a - matlab file, such as a numpy array. - write_header : {None, True, False} - If True, then write the matlab file header before writing the - variables. If None (the default) then write the file header - if we are at position 0 in the stream. By setting False - here, and setting the stream position to the end of the file, - you can append variables to a matlab file - ''' - # there is no header for a matlab 4 mat file, so we ignore the - # ``write_header`` input argument. It's there for compatibility - # with the matlab 5 version of this method - self._matrix_writer = VarWriter4(self) - for name, var in mdict.items(): - self._matrix_writer.write(var, name) diff --git a/scipy-0.10.1/scipy/io/matlab/mio5.py b/scipy-0.10.1/scipy/io/matlab/mio5.py deleted file mode 100644 index 8152d9b016..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/mio5.py +++ /dev/null @@ -1,841 +0,0 @@ -''' Classes for read / write of matlab (TM) 5 files - -The matfile specification last found here: - -http://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf - -(as of December 5 2008) -''' - -''' -================================= - Note on functions and mat files -================================= - -The document above does not give any hints as to the storage of matlab -function handles, or anonymous function handles. I had therefore to -guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and -``mxOPAQUE_CLASS`` by looking at example mat files. - -``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to -contain a struct matrix with a set pattern of fields. For anonymous -functions, a sub-fields of one of these fields seems to contain the -well-named ``mxOPAQUE_CLASS``. This seems to cotain: - -* array flags as for any matlab matrix -* 3 int8 strings -* a matrix - -It seems that, whenever the mat file contains a ``mxOPAQUE_CLASS`` -instance, there is also an un-named matrix (name == '') at the end of -the mat file. I'll call this the ``__function_workspace__`` matrix. - -When I saved two anonymous functions in a mat file, or appended another -anonymous function to the mat file, there was still only one -``__function_workspace__`` un-named matrix at the end, but larger than -that for a mat file with a single anonymous function, suggesting that -the workspaces for the two functions had been merged. - -The ``__function_workspace__`` matrix appears to be of double class -(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in -the format of a mini .mat file, without the first 124 bytes of the file -header (the description and the subsystem_offset), but with the version -U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes, -presumably for 8 byte padding, and then a series of ``miMATRIX`` -entries, as in a standard mat file. The ``miMATRIX`` entries appear to -be series of un-named (name == '') matrices, and may also contain arrays -of this same mini-mat format. - -I guess that: - -* saving an anonymous function back to a mat file will need the - associated ``__function_workspace__`` matrix saved as well for the - anonymous function to work correctly. -* appending to a mat file that has a ``__function_workspace__`` would - involve first pulling off this workspace, appending, checking whether - there were any more anonymous functions appended, and then somehow - merging the relevant workspaces, and saving at the end of the mat - file. - -The mat files I was playing with are in ``tests/data``: - -* sqr.mat -* parabola.mat -* some_functions.mat - -See ``tests/test_mio.py:test_mio_funcs.py`` for a debugging -script I was working with. - -''' - -# Small fragments of current code adapted from matfile.py by Heiko -# Henkelmann - -import os -import time -import sys -import zlib -if sys.version_info[0] >= 3: - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO -import warnings - -import numpy as np -from numpy.compat import asbytes, asstr - -import scipy.sparse - -import byteordercodes as boc - -from miobase import MatFileReader, docfiller, matdims, \ - read_dtype, arr_to_chars, arr_dtype_number, \ - MatWriteError, MatReadError, MatReadWarning - -# Reader object for matlab 5 format variables -from mio5_utils import VarReader5 - -# Constants and helper objects -from mio5_params import MatlabObject, MatlabFunction, \ - MDTYPES, NP_TO_MTYPES, NP_TO_MXTYPES, \ - miCOMPRESSED, miMATRIX, miINT8, miUTF8, miUINT32, \ - mxCELL_CLASS, mxSTRUCT_CLASS, mxOBJECT_CLASS, mxCHAR_CLASS, \ - mxSPARSE_CLASS, mxDOUBLE_CLASS - - -class MatFile5Reader(MatFileReader): - ''' Reader for Mat 5 mat files - Adds the following attribute to base class - - uint16_codec - char codec to use for uint16 char arrays - (defaults to system default codec) - - Uses variable reader that has the following stardard interface (see - abstract class in ``miobase``:: - - __init__(self, file_reader) - read_header(self) - array_from_header(self) - - and added interface:: - - set_stream(self, stream) - read_full_tag(self) - - ''' - @docfiller - def __init__(self, - mat_stream, - byte_order=None, - mat_dtype=False, - squeeze_me=False, - chars_as_strings=True, - matlab_compatible=False, - struct_as_record=True, - uint16_codec=None - ): - '''Initializer for matlab 5 file format reader - - %(matstream_arg)s - %(load_args)s - %(struct_arg)s - uint16_codec : {None, string} - Set codec to use for uint16 char arrays (e.g. 'utf-8'). - Use system default codec if None - ''' - super(MatFile5Reader, self).__init__( - mat_stream, - byte_order, - mat_dtype, - squeeze_me, - chars_as_strings, - matlab_compatible, - struct_as_record - ) - # Set uint16 codec - if not uint16_codec: - uint16_codec = sys.getdefaultencoding() - self.uint16_codec = uint16_codec - # placeholders for readers - see initialize_read method - self._file_reader = None - self._matrix_reader = None - - def guess_byte_order(self): - ''' Guess byte order. - Sets stream pointer to 0 ''' - self.mat_stream.seek(126) - mi = self.mat_stream.read(2) - self.mat_stream.seek(0) - return mi == asbytes('IM') and '<' or '>' - - def read_file_header(self): - ''' Read in mat 5 file header ''' - hdict = {} - hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header'] - hdr = read_dtype(self.mat_stream, hdr_dtype) - hdict['__header__'] = hdr['description'].item().strip(asbytes(' \t\n\000')) - v_major = hdr['version'] >> 8 - v_minor = hdr['version'] & 0xFF - hdict['__version__'] = '%d.%d' % (v_major, v_minor) - return hdict - - def initialize_read(self): - ''' Run when beginning read of variables - - Sets up readers from parameters in `self` - ''' - # reader for top level stream. We need this extra top-level - # reader because we use the matrix_reader object to contain - # compressed matrices (so they have their own stream) - self._file_reader = VarReader5(self) - # reader for matrix streams - self._matrix_reader = VarReader5(self) - - def read_var_header(self): - ''' Read header, return header, next position - - Header has to define at least .name and .is_global - - Parameters - ---------- - None - - Returns - ------- - header : object - object that can be passed to self.read_var_array, and that - has attributes .name and .is_global - next_position : int - position in stream of next variable - ''' - mdtype, byte_count = self._file_reader.read_full_tag() - if not byte_count > 0: - raise ValueError("Did not read any bytes") - next_pos = self.mat_stream.tell() + byte_count - if mdtype == miCOMPRESSED: - # make new stream from compressed data - data = self.mat_stream.read(byte_count) - # Some matlab files contain zlib streams without valid - # Z_STREAM_END termination. To get round this, we use the - # decompressobj object, that allows you to decode an - # incomplete stream. See discussion at - # http://bugs.python.org/issue8672 - dcor = zlib.decompressobj() - stream = BytesIO(dcor.decompress(data)) - # Check the stream is not so broken as to leave cruft behind - if not dcor.flush() == asbytes(''): - raise ValueError("Something wrong with byte stream.") - del data - self._matrix_reader.set_stream(stream) - mdtype, byte_count = self._matrix_reader.read_full_tag() - else: - self._matrix_reader.set_stream(self.mat_stream) - if not mdtype == miMATRIX: - raise TypeError('Expecting miMATRIX type here, got %d' % mdtype) - header = self._matrix_reader.read_header() - return header, next_pos - - def read_var_array(self, header, process=True): - ''' Read array, given `header` - - Parameters - ---------- - header : header object - object with fields defining variable header - process : {True, False} bool, optional - If True, apply recursive post-processing during loading of - array. - - Returns - ------- - arr : array - array with post-processing applied or not according to - `process`. - ''' - return self._matrix_reader.array_from_header(header, process) - - def get_variables(self, variable_names=None): - ''' get variables from stream as dictionary - - variable_names - optional list of variable names to get - - If variable_names is None, then get all variables in file - ''' - if isinstance(variable_names, basestring): - variable_names = [variable_names] - self.mat_stream.seek(0) - # Here we pass all the parameters in self to the reading objects - self.initialize_read() - mdict = self.read_file_header() - mdict['__globals__'] = [] - while not self.end_of_stream(): - hdr, next_position = self.read_var_header() - name = asstr(hdr.name) - if name in mdict: - warnings.warn('Duplicate variable name "%s" in stream' - ' - replacing previous with new\n' - 'Consider mio5.varmats_from_mat to split ' - 'file into single variable files' % name, - MatReadWarning, stacklevel=2) - if name == '': - # can only be a matlab 7 function workspace - name = '__function_workspace__' - # We want to keep this raw because mat_dtype processing - # will break the format (uint8 as mxDOUBLE_CLASS) - process = False - else: - process = True - if variable_names and name not in variable_names: - self.mat_stream.seek(next_position) - continue - try: - res = self.read_var_array(hdr, process) - except MatReadError, err: - warnings.warn( - 'Unreadable variable "%s", because "%s"' % \ - (name, err), - Warning, stacklevel=2) - res = "Read error: %s" % err - self.mat_stream.seek(next_position) - mdict[name] = res - if hdr.is_global: - mdict['__globals__'].append(name) - if variable_names: - variable_names.remove(name) - if len(variable_names) == 0: - break - return mdict - - -def varmats_from_mat(file_obj): - """ Pull variables out of mat 5 file as a sequence of mat file objects - - This can be useful with a difficult mat file, containing unreadable - variables. This routine pulls the variables out in raw form and puts them, - unread, back into a file stream for saving or reading. Another use is the - pathological case where there is more than one variable of the same name in - the file; this routine returns the duplicates, whereas the standard reader - will overwrite duplicates in the returned dictionary. - - The file pointer in `file_obj` will be undefined. File pointers for the - returned file-like objects are set at 0. - - Parameters - ---------- - file_obj : file-like - file object containing mat file - - Returns - ------- - named_mats : list - list contains tuples of (name, BytesIO) where BytesIO is a file-like - object containing mat file contents as for a single variable. The - BytesIO contains a string with the original header and a single var. If - ``var_file_obj`` is an individual BytesIO instance, then save as a mat - file with something like ``open('test.mat', - 'wb').write(var_file_obj.read())`` - - Examples - -------- - >>> import scipy.io - - BytesIO is from the ``io`` module in python 3, and is ``cStringIO`` for - python < 3. - - >>> mat_fileobj = BytesIO() - >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'}) - >>> varmats = varmats_from_mat(mat_fileobj) - >>> sorted([name for name, str_obj in varmats]) - ['a', 'b'] - """ - rdr = MatFile5Reader(file_obj) - file_obj.seek(0) - # Raw read of top-level file header - hdr_len = MDTYPES[boc.native_code]['dtypes']['file_header'].itemsize - raw_hdr = file_obj.read(hdr_len) - # Initialize variable reading - file_obj.seek(0) - rdr.initialize_read() - mdict = rdr.read_file_header() - next_position = file_obj.tell() - named_mats = [] - while not rdr.end_of_stream(): - start_position = next_position - hdr, next_position = rdr.read_var_header() - name = asstr(hdr.name) - # Read raw variable string - file_obj.seek(start_position) - byte_count = next_position - start_position - var_str = file_obj.read(byte_count) - # write to stringio object - out_obj = BytesIO() - out_obj.write(raw_hdr) - out_obj.write(var_str) - out_obj.seek(0) - named_mats.append((name, out_obj)) - return named_mats - - -def to_writeable(source): - ''' Convert input object ``source`` to something we can write - - Parameters - ---------- - source : object - - Returns - ------- - arr : ndarray - - Examples - -------- - >>> to_writeable(np.array([1])) # pass through ndarrays - array([1]) - >>> expected = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')]) - >>> np.all(to_writeable({'a':1,'b':2}) == expected) - True - >>> np.all(to_writeable({'a':1,'b':2, '_c':3}) == expected) - True - >>> np.all(to_writeable({'a':1,'b':2, 100:3}) == expected) - True - >>> np.all(to_writeable({'a':1,'b':2, '99':3}) == expected) - True - >>> class klass(object): pass - >>> c = klass - >>> c.a = 1 - >>> c.b = 2 - >>> np.all(to_writeable({'a':1,'b':2}) == expected) - True - >>> to_writeable([]) - array([], dtype=float64) - >>> to_writeable(()) - array([], dtype=float64) - >>> to_writeable(None) - - >>> to_writeable('a string').dtype.type == np.str_ - True - >>> to_writeable(1) - array(1) - >>> to_writeable([1]) - array([1]) - >>> to_writeable([1]) - array([1]) - >>> to_writeable(object()) # not convertable - - dict keys with legal characters are convertible - - >>> to_writeable({'a':1})['a'] - array([1], dtype=object) - - but not with illegal characters - - >>> to_writeable({'1':1}) is None - True - >>> to_writeable({'_a':1}) is None - True - ''' - if isinstance(source, np.ndarray): - return source - if source is None: - return None - # Objects that have dicts - if hasattr(source, '__dict__'): - source = dict((key, value) for key, value in source.__dict__.items() - if not key.startswith('_')) - # Mappings or object dicts - if hasattr(source, 'keys'): - dtype = [] - values = [] - for field, value in source.items(): - if (isinstance(field, basestring) and - not field[0] in '_0123456789'): - dtype.append((field,object)) - values.append(value) - if dtype: - return np.array( [tuple(values)] ,dtype) - else: - return None - # Next try and convert to an array - narr = np.asanyarray(source) - if narr.dtype.type in (np.object, np.object_) and \ - narr.shape == () and narr == source: - # No interesting conversion possible - return None - return narr - - -# Native byte ordered dtypes for convenience for writers -NDT_FILE_HDR = MDTYPES[boc.native_code]['dtypes']['file_header'] -NDT_TAG_FULL = MDTYPES[boc.native_code]['dtypes']['tag_full'] -NDT_TAG_SMALL = MDTYPES[boc.native_code]['dtypes']['tag_smalldata'] -NDT_ARRAY_FLAGS = MDTYPES[boc.native_code]['dtypes']['array_flags'] - - -class VarWriter5(object): - ''' Generic matlab matrix writing class ''' - mat_tag = np.zeros((), NDT_TAG_FULL) - mat_tag['mdtype'] = miMATRIX - - def __init__(self, file_writer): - self.file_stream = file_writer.file_stream - self.unicode_strings=file_writer.unicode_strings - self.long_field_names=file_writer.long_field_names - self.oned_as = file_writer.oned_as - # These are used for top level writes, and unset after - self._var_name = None - self._var_is_global = False - - def write_bytes(self, arr): - self.file_stream.write(arr.tostring(order='F')) - - def write_string(self, s): - self.file_stream.write(s) - - def write_element(self, arr, mdtype=None): - ''' write tag and data ''' - if mdtype is None: - mdtype = NP_TO_MTYPES[arr.dtype.str[1:]] - byte_count = arr.size*arr.itemsize - if byte_count <= 4: - self.write_smalldata_element(arr, mdtype, byte_count) - else: - self.write_regular_element(arr, mdtype, byte_count) - - def write_smalldata_element(self, arr, mdtype, byte_count): - # write tag with embedded data - tag = np.zeros((), NDT_TAG_SMALL) - tag['byte_count_mdtype'] = (byte_count << 16) + mdtype - # if arr.tostring is < 4, the element will be zero-padded as needed. - tag['data'] = arr.tostring(order='F') - self.write_bytes(tag) - - def write_regular_element(self, arr, mdtype, byte_count): - # write tag, data - tag = np.zeros((), NDT_TAG_FULL) - tag['mdtype'] = mdtype - tag['byte_count'] = byte_count - self.write_bytes(tag) - self.write_bytes(arr) - # pad to next 64-bit boundary - bc_mod_8 = byte_count % 8 - if bc_mod_8: - self.file_stream.write(asbytes('\x00') * (8-bc_mod_8)) - - def write_header(self, - shape, - mclass, - is_complex=False, - is_logical=False, - nzmax=0): - ''' Write header for given data options - shape : sequence - array shape - mclass - mat5 matrix class - is_complex - True if matrix is complex - is_logical - True if matrix is logical - nzmax - max non zero elements for sparse arrays - - We get the name and the global flag from the object, and reset - them to defaults after we've used them - ''' - # get name and is_global from one-shot object store - name = self._var_name - is_global = self._var_is_global - # initialize the top-level matrix tag, store position - self._mat_tag_pos = self.file_stream.tell() - self.write_bytes(self.mat_tag) - # write array flags (complex, global, logical, class, nzmax) - af = np.zeros((), NDT_ARRAY_FLAGS) - af['data_type'] = miUINT32 - af['byte_count'] = 8 - flags = is_complex << 3 | is_global << 2 | is_logical << 1 - af['flags_class'] = mclass | flags << 8 - af['nzmax'] = nzmax - self.write_bytes(af) - # shape - self.write_element(np.array(shape, dtype='i4')) - # write name - name = np.asarray(name) - if name == '': # empty string zero-terminated - self.write_smalldata_element(name, miINT8, 0) - else: - self.write_element(name, miINT8) - # reset the one-shot store to defaults - self._var_name = '' - self._var_is_global = False - - def update_matrix_tag(self, start_pos): - curr_pos = self.file_stream.tell() - self.file_stream.seek(start_pos) - self.mat_tag['byte_count'] = curr_pos - start_pos - 8 - self.write_bytes(self.mat_tag) - self.file_stream.seek(curr_pos) - - def write_top(self, arr, name, is_global): - """ Write variable at top level of mat file - - Parameters - ---------- - arr : array-like - array-like object to create writer for - name : str, optional - name as it will appear in matlab workspace - default is empty string - is_global : {False, True} optional - whether variable will be global on load into matlab - """ - # these are set before the top-level header write, and unset at - # the end of the same write, because they do not apply for lower levels - self._var_is_global = is_global - self._var_name = name - # write the header and data - self.write(arr) - - def write(self, arr): - ''' Write `arr` to stream at top and sub levels - - Parameters - ---------- - arr : array-like - array-like object to create writer for - ''' - # store position, so we can update the matrix tag - mat_tag_pos = self.file_stream.tell() - # First check if these are sparse - if scipy.sparse.issparse(arr): - self.write_sparse(arr) - self.update_matrix_tag(mat_tag_pos) - return - # Try to convert things that aren't arrays - narr = to_writeable(arr) - if narr is None: - raise TypeError('Could not convert %s (type %s) to array' - % (arr, type(arr))) - if isinstance(narr, MatlabObject): - self.write_object(narr) - elif isinstance(narr, MatlabFunction): - raise MatWriteError('Cannot write matlab functions') - elif narr.dtype.fields: # struct array - self.write_struct(narr) - elif narr.dtype.hasobject: # cell array - self.write_cells(narr) - elif narr.dtype.kind in ('U', 'S'): - if self.unicode_strings: - codec='UTF8' - else: - codec = 'ascii' - self.write_char(narr, codec) - else: - self.write_numeric(narr) - self.update_matrix_tag(mat_tag_pos) - - def write_numeric(self, arr): - imagf = arr.dtype.kind == 'c' - try: - mclass = NP_TO_MXTYPES[arr.dtype.str[1:]] - except KeyError: - # No matching matlab type, probably complex256 / float128 / float96 - # Cast data to complex128 / float64. - if imagf: - arr = arr.astype('c128') - else: - arr = arr.astype('f8') - mclass = mxDOUBLE_CLASS - self.write_header(matdims(arr, self.oned_as), - mclass, - is_complex=imagf) - if imagf: - self.write_element(arr.real) - self.write_element(arr.imag) - else: - self.write_element(arr) - - def write_char(self, arr, codec='ascii'): - ''' Write string array `arr` with given `codec` - ''' - if arr.size == 0 or np.all(arr == ''): - # This an empty string array or a string array containing - # only empty strings. Matlab cannot distiguish between a - # string array that is empty, and a string array containing - # only empty strings, because it stores strings as arrays of - # char. There is no way of having an array of char that is - # not empty, but contains an empty string. We have to - # special-case the array-with-empty-strings because even - # empty strings have zero padding, which would otherwise - # appear in matlab as a string with a space. - shape = (0,) * np.max([arr.ndim, 2]) - self.write_header(shape, mxCHAR_CLASS) - self.write_smalldata_element(arr, miUTF8, 0) - return - # non-empty string. - # - # Convert to char array - arr = arr_to_chars(arr) - # We have to write the shape directly, because we are going - # recode the characters, and the resulting stream of chars - # may have a different length - shape = arr.shape - self.write_header(shape, mxCHAR_CLASS) - if arr.dtype.kind == 'U' and arr.size: - # Make one long string from all the characters. We need to - # transpose here, because we're flattening the array, before - # we write the bytes. The bytes have to be written in - # Fortran order. - n_chars = np.product(shape) - st_arr = np.ndarray(shape=(), - dtype=arr_dtype_number(arr, n_chars), - buffer=arr.T.copy()) # Fortran order - # Recode with codec to give byte string - st = st_arr.item().encode(codec) - # Reconstruct as one-dimensional byte array - arr = np.ndarray(shape=(len(st),), - dtype='S1', - buffer=st) - self.write_element(arr, mdtype=miUTF8) - - def write_sparse(self, arr): - ''' Sparse matrices are 2D - ''' - A = arr.tocsc() # convert to sparse CSC format - A.sort_indices() # MATLAB expects sorted row indices - is_complex = (A.dtype.kind == 'c') - nz = A.nnz - self.write_header(matdims(arr, self.oned_as), - mxSPARSE_CLASS, - is_complex=is_complex, - nzmax=nz) - self.write_element(A.indices.astype('i4')) - self.write_element(A.indptr.astype('i4')) - self.write_element(A.data.real) - if is_complex: - self.write_element(A.data.imag) - - def write_cells(self, arr): - self.write_header(matdims(arr, self.oned_as), - mxCELL_CLASS) - # loop over data, column major - A = np.atleast_2d(arr).flatten('F') - for el in A: - self.write(el) - - def write_struct(self, arr): - self.write_header(matdims(arr, self.oned_as), - mxSTRUCT_CLASS) - self._write_items(arr) - - def _write_items(self, arr): - # write fieldnames - fieldnames = [f[0] for f in arr.dtype.descr] - length = max([len(fieldname) for fieldname in fieldnames])+1 - max_length = (self.long_field_names and 64) or 32 - if length > max_length: - raise ValueError( - "Field names are restricted to %d characters" - % (max_length-1)) - self.write_element(np.array([length], dtype='i4')) - self.write_element( - np.array(fieldnames, dtype='S%d'%(length)), - mdtype=miINT8) - A = np.atleast_2d(arr).flatten('F') - for el in A: - for f in fieldnames: - self.write(el[f]) - - def write_object(self, arr): - '''Same as writing structs, except different mx class, and extra - classname element after header - ''' - self.write_header(matdims(arr, self.oned_as), - mxOBJECT_CLASS) - self.write_element(np.array(arr.classname, dtype='S'), - mdtype=miINT8) - self._write_items(arr) - - -class MatFile5Writer(object): - ''' Class for writing mat5 files ''' - - @docfiller - def __init__(self, file_stream, - do_compression=False, - unicode_strings=False, - global_vars=None, - long_field_names=False, - oned_as=None): - ''' Initialize writer for matlab 5 format files - - Parameters - ---------- - %(do_compression)s - %(unicode_strings)s - global_vars : None or sequence of strings, optional - Names of variables to be marked as global for matlab - %(long_fields)s - %(oned_as)s - ''' - self.file_stream = file_stream - self.do_compression = do_compression - self.unicode_strings = unicode_strings - if global_vars: - self.global_vars = global_vars - else: - self.global_vars = [] - self.long_field_names = long_field_names - # deal with deprecations - if oned_as is None: - warnings.warn("Using oned_as default value ('column')" + - " This will change to 'row' in future versions", - FutureWarning, stacklevel=2) - oned_as = 'column' - self.oned_as = oned_as - self._matrix_writer = None - - def write_file_header(self): - # write header - hdr = np.zeros((), NDT_FILE_HDR) - hdr['description']='MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \ - % (os.name,time.asctime()) - hdr['version']= 0x0100 - hdr['endian_test']=np.ndarray(shape=(), - dtype='S2', - buffer=np.uint16(0x4d49)) - self.file_stream.write(hdr.tostring()) - - def put_variables(self, mdict, write_header=None): - ''' Write variables in `mdict` to stream - - Parameters - ---------- - mdict : mapping - mapping with method ``items`` returns name, contents pairs where - ``name`` which will appear in the matlab workspace in file load, and - ``contents`` is something writeable to a matlab file, such as a numpy - array. - write_header : {None, True, False} - If True, then write the matlab file header before writing the - variables. If None (the default) then write the file header - if we are at position 0 in the stream. By setting False - here, and setting the stream position to the end of the file, - you can append variables to a matlab file - ''' - # write header if requested, or None and start of file - if write_header is None: - write_header = self.file_stream.tell() == 0 - if write_header: - self.write_file_header() - self._matrix_writer = VarWriter5(self) - for name, var in mdict.items(): - if name[0] == '_': - continue - is_global = name in self.global_vars - if self.do_compression: - stream = BytesIO() - self._matrix_writer.file_stream = stream - self._matrix_writer.write_top(var, asbytes(name), is_global) - out_str = zlib.compress(stream.getvalue()) - tag = np.empty((), NDT_TAG_FULL) - tag['mdtype'] = miCOMPRESSED - tag['byte_count'] = len(out_str) - self.file_stream.write(tag.tostring() + out_str) - else: # not compressing - self._matrix_writer.write_top(var, asbytes(name), is_global) diff --git a/scipy-0.10.1/scipy/io/matlab/mio5_params.py b/scipy-0.10.1/scipy/io/matlab/mio5_params.py deleted file mode 100644 index 94653d70a5..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/mio5_params.py +++ /dev/null @@ -1,230 +0,0 @@ -''' Constants and classes for matlab 5 read and write - -See also mio5_utils.pyx where these same constants arise as c enums. - -If you make changes in this file, don't forget to change mio5_utils.pyx -''' - -import numpy as np - -from miobase import convert_dtypes - -miINT8 = 1 -miUINT8 = 2 -miINT16 = 3 -miUINT16 = 4 -miINT32 = 5 -miUINT32 = 6 -miSINGLE = 7 -miDOUBLE = 9 -miINT64 = 12 -miUINT64 = 13 -miMATRIX = 14 -miCOMPRESSED = 15 -miUTF8 = 16 -miUTF16 = 17 -miUTF32 = 18 - -mxCELL_CLASS = 1 -mxSTRUCT_CLASS = 2 -# The March 2008 edition of "Matlab 7 MAT-File Format" says that -# mxOBJECT_CLASS = 3, whereas matrix.h says that mxLOGICAL = 3. -# Matlab 2008a appears to save logicals as type 9, so we assume that -# the document is correct. See type 18, below. -mxOBJECT_CLASS = 3 -mxCHAR_CLASS = 4 -mxSPARSE_CLASS = 5 -mxDOUBLE_CLASS = 6 -mxSINGLE_CLASS = 7 -mxINT8_CLASS = 8 -mxUINT8_CLASS = 9 -mxINT16_CLASS = 10 -mxUINT16_CLASS = 11 -mxINT32_CLASS = 12 -mxUINT32_CLASS = 13 -# The following are not in the March 2008 edition of "Matlab 7 -# MAT-File Format," but were guessed from matrix.h. -mxINT64_CLASS = 14 -mxUINT64_CLASS = 15 -mxFUNCTION_CLASS = 16 -# Not doing anything with these at the moment. -mxOPAQUE_CLASS = 17 # This appears to be a function workspace -# https://www-old.cae.wisc.edu/pipermail/octave-maintainers/2007-May/002824.html -mxOBJECT_CLASS_FROM_MATRIX_H = 18 - -mdtypes_template = { - miINT8: 'i1', - miUINT8: 'u1', - miINT16: 'i2', - miUINT16: 'u2', - miINT32: 'i4', - miUINT32: 'u4', - miSINGLE: 'f4', - miDOUBLE: 'f8', - miINT64: 'i8', - miUINT64: 'u8', - miUTF8: 'u1', - miUTF16: 'u2', - miUTF32: 'u4', - 'file_header': [('description', 'S116'), - ('subsystem_offset', 'i8'), - ('version', 'u2'), - ('endian_test', 'S2')], - 'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')], - 'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')], - 'array_flags': [('data_type', 'u4'), - ('byte_count', 'u4'), - ('flags_class','u4'), - ('nzmax', 'u4')], - 'U1': 'U1', - } - -mclass_dtypes_template = { - mxINT8_CLASS: 'i1', - mxUINT8_CLASS: 'u1', - mxINT16_CLASS: 'i2', - mxUINT16_CLASS: 'u2', - mxINT32_CLASS: 'i4', - mxUINT32_CLASS: 'u4', - mxINT64_CLASS: 'i8', - mxUINT64_CLASS: 'u8', - mxSINGLE_CLASS: 'f4', - mxDOUBLE_CLASS: 'f8', - } - - -NP_TO_MTYPES = { - 'f8': miDOUBLE, - 'c32': miDOUBLE, - 'c24': miDOUBLE, - 'c16': miDOUBLE, - 'f4': miSINGLE, - 'c8': miSINGLE, - 'i8': miINT64, - 'i4': miINT32, - 'i2': miINT16, - 'i1': miINT8, - 'u8': miUINT64, - 'u4': miUINT32, - 'u2': miUINT16, - 'u1': miUINT8, - 'S1': miUINT8, - 'U1': miUTF16, - } - - -NP_TO_MXTYPES = { - 'f8': mxDOUBLE_CLASS, - 'c32': mxDOUBLE_CLASS, - 'c24': mxDOUBLE_CLASS, - 'c16': mxDOUBLE_CLASS, - 'f4': mxSINGLE_CLASS, - 'c8': mxSINGLE_CLASS, - 'i8': mxINT64_CLASS, - 'i4': mxINT32_CLASS, - 'i2': mxINT16_CLASS, - 'i1': mxINT8_CLASS, - 'u8': mxUINT64_CLASS, - 'u4': mxUINT32_CLASS, - 'u2': mxUINT16_CLASS, - 'u1': mxUINT8_CLASS, - 'S1': mxUINT8_CLASS, - } - -''' Before release v7.1 (release 14) matlab (TM) used the system -default character encoding scheme padded out to 16-bits. Release 14 -and later use Unicode. When saving character data, R14 checks if it -can be encoded in 7-bit ascii, and saves in that format if so.''' - -codecs_template = { - miUTF8: {'codec': 'utf_8', 'width': 1}, - miUTF16: {'codec': 'utf_16', 'width': 2}, - miUTF32: {'codec': 'utf_32','width': 4}, - } - - -def _convert_codecs(template, byte_order): - ''' Convert codec template mapping to byte order - - Set codecs not on this system to None - - Parameters - ---------- - template : mapping - key, value are respectively codec name, and root name for codec - (without byte order suffix) - byte_order : {'<', '>'} - code for little or big endian - - Returns - ------- - codecs : dict - key, value are name, codec (as in .encode(codec)) - ''' - codecs = {} - postfix = byte_order == '<' and '_le' or '_be' - for k, v in template.items(): - codec = v['codec'] - try: - " ".encode(codec) - except LookupError: - codecs[k] = None - continue - if v['width'] > 1: - codec += postfix - codecs[k] = codec - return codecs.copy() - - -MDTYPES = {} -for _bytecode in '<>': - _def = {} - _def['dtypes'] = convert_dtypes(mdtypes_template, _bytecode) - _def['classes'] = convert_dtypes(mclass_dtypes_template, _bytecode) - _def['codecs'] = _convert_codecs(codecs_template, _bytecode) - MDTYPES[_bytecode] = _def - - -class mat_struct(object): - ''' Placeholder for holding read data from structs - - We deprecate this method of holding struct information, and will - soon remove it, in favor of the recarray method (see loadmat - docstring) - ''' - pass - - -class MatlabObject(np.ndarray): - ''' ndarray Subclass to contain matlab object ''' - def __new__(cls, input_array, classname=None): - # Input array is an already formed ndarray instance - # We first cast to be our class type - obj = np.asarray(input_array).view(cls) - # add the new attribute to the created instance - obj.classname = classname - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self,obj): - # reset the attribute from passed original object - self.classname = getattr(obj, 'classname', None) - # We do not need to return anything - - -class MatlabFunction(np.ndarray): - ''' Subclass to signal this is a matlab function ''' - def __new__(cls, input_array): - obj = np.asarray(input_array).view(cls) - return obj - - -class MatlabOpaque(np.ndarray): - ''' Subclass to signal this is a matlab opaque matrix ''' - def __new__(cls, input_array): - obj = np.asarray(input_array).view(cls) - return obj - - -OPAQUE_DTYPE = np.dtype( - [('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')]) diff --git a/scipy-0.10.1/scipy/io/matlab/mio5_utils.c b/scipy-0.10.1/scipy/io/matlab/mio5_utils.c deleted file mode 100644 index a8dfd1a51d..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/mio5_utils.c +++ /dev/null @@ -1,13657 +0,0 @@ -/* Generated by Cython 0.15 on Tue Nov 1 18:18:28 2011 */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#else - -#include /* For offsetof */ -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__io__matlab__mio5_utils -#define __PYX_HAVE_API__scipy__io__matlab__mio5_utils -#include "stdlib.h" -#include "string.h" -#include "stdio.h" -#include "pythread.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#include "numpy_rephrasing.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - -/* inline attribute */ -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -/* unused attribute */ -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ - - -/* Type Conversion Predeclarations */ - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - /* Test for GCC > 2.95 */ - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else /* __GNUC__ > 2 ... */ - #define likely(x) (x) - #define unlikely(x) (x) - #endif /* __GNUC__ > 2 ... */ -#else /* __GNUC__ */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - -static const char *__pyx_f[] = { - "mio5_utils.pyx", - "numpy.pxd", - "bool.pxd", - "complex.pxd", - "streams.pxd", -}; - -/* "numpy.pxd":719 - * # in Cython to enable them only on the right systems. - * - * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - */ -typedef npy_int8 __pyx_t_5numpy_int8_t; - -/* "numpy.pxd":720 - * - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t - */ -typedef npy_int16 __pyx_t_5numpy_int16_t; - -/* "numpy.pxd":721 - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< - * ctypedef npy_int64 int64_t - * #ctypedef npy_int96 int96_t - */ -typedef npy_int32 __pyx_t_5numpy_int32_t; - -/* "numpy.pxd":722 - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< - * #ctypedef npy_int96 int96_t - * #ctypedef npy_int128 int128_t - */ -typedef npy_int64 __pyx_t_5numpy_int64_t; - -/* "numpy.pxd":726 - * #ctypedef npy_int128 int128_t - * - * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - */ -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - -/* "numpy.pxd":727 - * - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t - */ -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - -/* "numpy.pxd":728 - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< - * ctypedef npy_uint64 uint64_t - * #ctypedef npy_uint96 uint96_t - */ -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - -/* "numpy.pxd":729 - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< - * #ctypedef npy_uint96 uint96_t - * #ctypedef npy_uint128 uint128_t - */ -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - -/* "numpy.pxd":733 - * #ctypedef npy_uint128 uint128_t - * - * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< - * ctypedef npy_float64 float64_t - * #ctypedef npy_float80 float80_t - */ -typedef npy_float32 __pyx_t_5numpy_float32_t; - -/* "numpy.pxd":734 - * - * ctypedef npy_float32 float32_t - * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< - * #ctypedef npy_float80 float80_t - * #ctypedef npy_float128 float128_t - */ -typedef npy_float64 __pyx_t_5numpy_float64_t; - -/* "numpy.pxd":743 - * # The int types are mapped a bit surprising -- - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t - */ -typedef npy_long __pyx_t_5numpy_int_t; - -/* "numpy.pxd":744 - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong longlong_t - * - */ -typedef npy_longlong __pyx_t_5numpy_long_t; - -/* "numpy.pxd":745 - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_ulong uint_t - */ -typedef npy_longlong __pyx_t_5numpy_longlong_t; - -/* "numpy.pxd":747 - * ctypedef npy_longlong longlong_t - * - * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t - */ -typedef npy_ulong __pyx_t_5numpy_uint_t; - -/* "numpy.pxd":748 - * - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulonglong_t - * - */ -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - -/* "numpy.pxd":749 - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_intp intp_t - */ -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - -/* "numpy.pxd":751 - * ctypedef npy_ulonglong ulonglong_t - * - * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< - * ctypedef npy_uintp uintp_t - * - */ -typedef npy_intp __pyx_t_5numpy_intp_t; - -/* "numpy.pxd":752 - * - * ctypedef npy_intp intp_t - * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< - * - * ctypedef npy_double float_t - */ -typedef npy_uintp __pyx_t_5numpy_uintp_t; - -/* "numpy.pxd":754 - * ctypedef npy_uintp uintp_t - * - * ctypedef npy_double float_t # <<<<<<<<<<<<<< - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t - */ -typedef npy_double __pyx_t_5numpy_float_t; - -/* "numpy.pxd":755 - * - * ctypedef npy_double float_t - * ctypedef npy_double double_t # <<<<<<<<<<<<<< - * ctypedef npy_longdouble longdouble_t - * - */ -typedef npy_double __pyx_t_5numpy_double_t; - -/* "numpy.pxd":756 - * ctypedef npy_double float_t - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cfloat cfloat_t - */ -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif - -/*--- Type declarations ---*/ -struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream; -struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5; -struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5; - -/* "numpy.pxd":758 - * ctypedef npy_longdouble longdouble_t - * - * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t - */ -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - -/* "numpy.pxd":759 - * - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< - * ctypedef npy_clongdouble clongdouble_t - * - */ -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - -/* "numpy.pxd":760 - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cdouble complex_t - */ -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - -/* "numpy.pxd":762 - * ctypedef npy_clongdouble clongdouble_t - * - * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew1(a): - */ -typedef npy_cdouble __pyx_t_5numpy_complex_t; -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek; -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string; - -/* "streams.pxd":6 - * cdef object fobj - * - * cpdef int seek(self, long int offset, int whence=*) except -1 # <<<<<<<<<<<<<< - * cpdef long int tell(self) except -1 - * cdef int read_into(self, void *buf, size_t n) except -1 - */ -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek { - int __pyx_n; - int whence; -}; - -/* "streams.pxd":9 - * cpdef long int tell(self) except -1 - * cdef int read_into(self, void *buf, size_t n) except -1 - * cdef object read_string(self, size_t n, void **pp, int copy=*) # <<<<<<<<<<<<<< - * - * cpdef GenericStream make_stream(object fobj) - */ -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string { - int __pyx_n; - int copy; -}; -struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element; -struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric; -struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_mi_matrix; -struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header; - -/* "scipy/io/matlab/mio5_utils.pyx":68 - * - * - * cdef enum: # <<<<<<<<<<<<<< - * miINT8 = 1 - * miUINT8 = 2 - */ -enum { - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miINT8 = 1, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUINT8 = 2, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miINT16 = 3, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUINT16 = 4, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miINT32 = 5, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUINT32 = 6, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miSINGLE = 7, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miDOUBLE = 9, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miINT64 = 12, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUINT64 = 13, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miMATRIX = 14, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miCOMPRESSED = 15, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUTF8 = 16, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUTF16 = 17, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUTF32 = 18 -}; - -/* "scipy/io/matlab/mio5_utils.pyx":85 - * miUTF32 = 18 - * - * cdef enum: # see comments in mio5_params # <<<<<<<<<<<<<< - * mxCELL_CLASS = 1 - * mxSTRUCT_CLASS = 2 - */ -enum { - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxCELL_CLASS = 1, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxSTRUCT_CLASS = 2, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxOBJECT_CLASS = 3, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxCHAR_CLASS = 4, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxSPARSE_CLASS = 5, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxDOUBLE_CLASS = 6, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxSINGLE_CLASS = 7, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxINT8_CLASS = 8, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxUINT8_CLASS = 9, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxINT16_CLASS = 10, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxUINT16_CLASS = 11, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxINT32_CLASS = 12, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxUINT32_CLASS = 13, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxINT64_CLASS = 14, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxUINT64_CLASS = 15, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxFUNCTION_CLASS = 16, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxOPAQUE_CLASS = 17, - __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxOBJECT_CLASS_FROM_MATRIX_H = 18 -}; - -/* "scipy/io/matlab/mio5_utils.pyx":316 - * return 1 - * - * cdef object read_element(self, # <<<<<<<<<<<<<< - * cnp.uint32_t *mdtype_ptr, - * cnp.uint32_t *byte_count_ptr, - */ -struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element { - int __pyx_n; - int copy; -}; - -/* "scipy/io/matlab/mio5_utils.pyx":412 - * return 0 - * - * cpdef inline cnp.ndarray read_numeric(self, int copy=True): # <<<<<<<<<<<<<< - * ''' Read numeric data element into ndarray - * - */ -struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric { - int __pyx_n; - int copy; -}; - -/* "scipy/io/matlab/mio5_utils.pyx":592 - * return size - * - * cdef read_mi_matrix(self, int process=1): # <<<<<<<<<<<<<< - * ''' Read header with matrix at sub-levels - * - */ -struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_mi_matrix { - int __pyx_n; - int process; -}; - -/* "scipy/io/matlab/mio5_utils.pyx":624 - * return self.array_from_header(header, process) - * - * cpdef array_from_header(self, VarHeader5 header, int process=1): # <<<<<<<<<<<<<< - * ''' Read array of any class, given matrix `header` - * - */ -struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header { - int __pyx_n; - int process; -}; - -/* "streams.pxd":3 - * # -*- python -*- or rather like - * - * cdef class GenericStream: # <<<<<<<<<<<<<< - * cdef object fobj - * - */ -struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream { - PyObject_HEAD - struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *__pyx_vtab; - PyObject *fobj; -}; - - -/* "scipy/io/matlab/mio5_utils.pyx":119 - * - * - * cdef class VarHeader5: # <<<<<<<<<<<<<< - * cdef readonly object name - * cdef readonly int mclass - */ -struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 { - PyObject_HEAD - PyObject *name; - int mclass; - PyObject *dims; - __pyx_t_5numpy_int32_t dims_ptr[32]; - int n_dims; - int is_complex; - int is_logical; - int is_global; - size_t nzmax; -}; - - -/* "scipy/io/matlab/mio5_utils.pyx":141 - * - * - * cdef class VarReader5: # <<<<<<<<<<<<<< - * cdef public int is_swapped, little_endian - * cdef int struct_as_record - */ -struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 { - PyObject_HEAD - struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_vtab; - int is_swapped; - int little_endian; - int struct_as_record; - PyObject *codecs; - PyObject *uint16_codec; - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *cstream; - PyObject *dtypes[20]; - PyObject *class_dtypes[20]; - PyArray_Descr *U1_dtype; - PyArray_Descr *bool_dtype; - int mat_dtype; - int squeeze_me; - int chars_as_strings; -}; - - - -/* "streams.pxd":3 - * # -*- python -*- or rather like - * - * cdef class GenericStream: # <<<<<<<<<<<<<< - * cdef object fobj - * - */ - -struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream { - int (*seek)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, long, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek *__pyx_optional_args); - long (*tell)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, int __pyx_skip_dispatch); - int (*read_into)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, void *, size_t); - PyObject *(*read_string)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, size_t, void **, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string *__pyx_optional_args); -}; -static struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *__pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream; - - -/* "scipy/io/matlab/mio5_utils.pyx":141 - * - * - * cdef class VarReader5: # <<<<<<<<<<<<<< - * cdef public int is_swapped, little_endian - * cdef int struct_as_record - */ - -struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 { - int (*cread_tag)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_uint32_t *, __pyx_t_5numpy_uint32_t *, char *); - PyObject *(*read_element)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_uint32_t *, __pyx_t_5numpy_uint32_t *, void **, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element *__pyx_optional_args); - int (*read_element_into)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_uint32_t *, __pyx_t_5numpy_uint32_t *, void *); - PyArrayObject *(*read_numeric)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric *__pyx_optional_args); - PyObject *(*read_int8_string)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *); - int (*read_into_int32s)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_int32_t *); - int (*cread_full_tag)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_uint32_t *, __pyx_t_5numpy_uint32_t *); - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *(*read_header)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, int __pyx_skip_dispatch); - size_t (*size_from_header)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *); - PyObject *(*read_mi_matrix)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_mi_matrix *__pyx_optional_args); - PyObject *(*array_from_header)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header *__pyx_optional_args); - PyArrayObject *(*read_real_complex)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch); - PyObject *(*read_sparse)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *); - PyArrayObject *(*read_char)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch); - PyArrayObject *(*read_cells)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch); - PyObject *(*cread_fieldnames)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, int *); - PyArrayObject *(*read_struct)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch); - PyArrayObject *(*read_opaque)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch); -}; -static struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_vtabptr_5scipy_2io_6matlab_10mio5_utils_VarReader5; - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); /*proto*/ - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ - -static CYTHON_INLINE long __Pyx_mod_long(long, long); /* proto */ - -static CYTHON_INLINE long __Pyx_div_long(long, long); /* proto */ - -static CYTHON_INLINE PyObject* __Pyx_PyObject_Append(PyObject* L, PyObject* x) { - if (likely(PyList_CheckExact(L))) { - if (PyList_Append(L, x) < 0) return NULL; - Py_INCREF(Py_None); - return Py_None; /* this is just to have an accurate signature */ - } - else { - PyObject *r, *m; - m = __Pyx_GetAttrString(L, "append"); - if (!m) return NULL; - r = PyObject_CallFunctionObjArgs(m, x, NULL); - Py_DECREF(m); - return r; - } -} - -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact); /*proto*/ - - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} - - -#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_List_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Tuple_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - - -#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { - PyObject *r; - if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) { - r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) { - r = PySequence_GetItem(o, i); - } - else { - r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); - } - return r; -} - -static CYTHON_INLINE int __Pyx_NegateNonNeg(int b) { - return unlikely(b < 0) ? b : !b; -} -static CYTHON_INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) { - return unlikely(b < 0) ? NULL : __Pyx_PyBool_FromLong(b); -} - -/* Run-time type information about structs used with buffers */ -struct __Pyx_StructField_; - -typedef struct { - const char* name; /* for error messages only */ - struct __Pyx_StructField_* fields; - size_t size; /* sizeof(type) */ - char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */ -} __Pyx_TypeInfo; - -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; - -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; - - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); - -static void __Pyx_RaiseBufferFallbackError(void); /*proto*/ -static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/ -#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) - -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* proto */ - -#define UNARY_NEG_WOULD_OVERFLOW(x) (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); -static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else -#define __Pyx_GetBuffer PyObject_GetBuffer -#define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - -Py_ssize_t __Pyx_zeros[] = {0}; -Py_ssize_t __Pyx_minusones[] = {-1}; - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ - -#include - -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ - -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ - -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals -#else -#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals -#endif - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_uint32(npy_uint32); - -static CYTHON_INLINE npy_int32 __Pyx_PyInt_from_py_npy_int32(PyObject *); - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_int32(npy_int32); - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq(a, b) ((a)==(b)) - #define __Pyx_c_sum(a, b) ((a)+(b)) - #define __Pyx_c_diff(a, b) ((a)-(b)) - #define __Pyx_c_prod(a, b) ((a)*(b)) - #define __Pyx_c_quot(a, b) ((a)/(b)) - #define __Pyx_c_neg(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero(z) ((z)==(double)0) - #define __Pyx_c_conj(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs(z) (::std::abs(z)) - #define __Pyx_c_pow(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero(z) ((z)==0) - #define __Pyx_c_conj(z) (conj(z)) - #if 1 - #define __Pyx_c_abs(z) (cabs(z)) - #define __Pyx_c_pow(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eqf(a, b) ((a)==(b)) - #define __Pyx_c_sumf(a, b) ((a)+(b)) - #define __Pyx_c_difff(a, b) ((a)-(b)) - #define __Pyx_c_prodf(a, b) ((a)*(b)) - #define __Pyx_c_quotf(a, b) ((a)/(b)) - #define __Pyx_c_negf(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zerof(z) ((z)==(float)0) - #define __Pyx_c_conjf(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_absf(z) (::std::abs(z)) - #define __Pyx_c_powf(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zerof(z) ((z)==0) - #define __Pyx_c_conjf(z) (conjf(z)) - #if 1 - #define __Pyx_c_absf(z) (cabsf(z)) - #define __Pyx_c_powf(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static CYTHON_INLINE npy_uint32 __Pyx_PyInt_from_py_npy_uint32(PyObject *); - -static int __Pyx_check_binary_version(void); - -static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/ - -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ - -static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ - -static void* __Pyx_GetVtable(PyObject *dict); /*proto*/ - -static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /*proto*/ - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -/* Module declarations from 'libc.stdlib' */ - -/* Module declarations from 'libc.string' */ - -/* Module declarations from 'cpython.version' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'cpython.exc' */ - -/* Module declarations from 'cpython.module' */ - -/* Module declarations from 'cpython.mem' */ - -/* Module declarations from 'cpython.tuple' */ - -/* Module declarations from 'cpython.list' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'cpython.sequence' */ - -/* Module declarations from 'cpython.mapping' */ - -/* Module declarations from 'cpython.iterator' */ - -/* Module declarations from 'cpython.type' */ - -/* Module declarations from 'cpython.number' */ - -/* Module declarations from 'cpython.int' */ - -/* Module declarations from '__builtin__' */ - -/* Module declarations from 'cpython.bool' */ -static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; - -/* Module declarations from 'cpython.long' */ - -/* Module declarations from 'cpython.float' */ - -/* Module declarations from '__builtin__' */ - -/* Module declarations from 'cpython.complex' */ -static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; - -/* Module declarations from 'cpython.string' */ - -/* Module declarations from 'cpython.unicode' */ - -/* Module declarations from 'cpython.dict' */ - -/* Module declarations from 'cpython.instance' */ - -/* Module declarations from 'cpython.function' */ - -/* Module declarations from 'cpython.method' */ - -/* Module declarations from 'cpython.weakref' */ - -/* Module declarations from 'cpython.getargs' */ - -/* Module declarations from 'cpython.pythread' */ - -/* Module declarations from 'cpython.pystate' */ - -/* Module declarations from 'cpython.cobject' */ - -/* Module declarations from 'cpython.oldbuffer' */ - -/* Module declarations from 'cpython.set' */ - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'cpython.bytes' */ - -/* Module declarations from 'cpython.pycapsule' */ - -/* Module declarations from 'cpython' */ - -/* Module declarations from 'numpy' */ - -/* Module declarations from 'numpy' */ -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/ - -/* Module declarations from 'scipy.io.matlab.streams' */ -static PyTypeObject *__pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream = 0; -static struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *(*__pyx_f_5scipy_2io_6matlab_7streams_make_stream)(PyObject *, int __pyx_skip_dispatch); /*proto*/ - -/* Module declarations from 'scipy.io.matlab.mio5_utils' */ -static PyTypeObject *__pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5 = 0; -static PyTypeObject *__pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarReader5 = 0; -static PyArray_Descr *__pyx_v_5scipy_2io_6matlab_10mio5_utils_OPAQUE_DTYPE = 0; -static __pyx_t_5numpy_uint32_t __pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4(__pyx_t_5numpy_uint32_t, int __pyx_skip_dispatch); /*proto*/ -static __Pyx_TypeInfo __Pyx_TypeInfo_object = { "Python object", NULL, sizeof(PyObject *), 'O' }; -#define __Pyx_MODULE_NAME "scipy.io.matlab.mio5_utils" -int __pyx_module_is_main_scipy__io__matlab__mio5_utils = 0; - -/* Implementation of 'scipy.io.matlab.mio5_utils' */ -static PyObject *__pyx_builtin_enumerate; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_TypeError; -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_object; -static PyObject *__pyx_builtin_RuntimeError; -static char __pyx_k_1[] = " "; -static char __pyx_k_2[] = " "; -static char __pyx_k_3[] = "> 8 & 0xff00u)) | - * (u4 >> 24)) # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = ((((__pyx_v_u4 << 24) | ((__pyx_v_u4 << 8) & 0xff0000U)) | ((__pyx_v_u4 >> 8) & 0xff00U)) | (__pyx_v_u4 >> 24)); - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":112 - * - * - * cpdef cnp.uint32_t byteswap_u4(cnp.uint32_t u4): # <<<<<<<<<<<<<< - * return ((u4 << 24) | - * ((u4 << 8) & 0xff0000U) | - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_byteswap_u4(PyObject *__pyx_self, PyObject *__pyx_arg_u4); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_byteswap_u4(PyObject *__pyx_self, PyObject *__pyx_arg_u4) { - __pyx_t_5numpy_uint32_t __pyx_v_u4; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("byteswap_u4"); - __pyx_self = __pyx_self; - assert(__pyx_arg_u4); { - __pyx_v_u4 = __Pyx_PyInt_from_py_npy_uint32(__pyx_arg_u4); if (unlikely((__pyx_v_u4 == (npy_uint32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.byteswap_u4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_to_py_npy_uint32(__pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4(__pyx_v_u4, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.byteswap_u4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":130 - * cdef size_t nzmax - * - * def set_dims(self, dims): # <<<<<<<<<<<<<< - * """ Allow setting of dimensions from python - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_set_dims(PyObject *__pyx_v_self, PyObject *__pyx_v_dims); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_set_dims[] = " Allow setting of dimensions from python\n\n This is for constructing headers for tests\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_set_dims(PyObject *__pyx_v_self, PyObject *__pyx_v_dims) { - PyObject *__pyx_v_i = NULL; - PyObject *__pyx_v_dim = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *(*__pyx_t_4)(PyObject *); - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - __pyx_t_5numpy_int32_t __pyx_t_7; - Py_ssize_t __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("set_dims"); - - /* "scipy/io/matlab/mio5_utils.pyx":135 - * This is for constructing headers for tests - * """ - * self.dims = dims # <<<<<<<<<<<<<< - * self.n_dims = len(dims) - * for i, dim in enumerate(dims): - */ - __Pyx_INCREF(__pyx_v_dims); - __Pyx_GIVEREF(__pyx_v_dims); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->dims); - __Pyx_DECREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->dims); - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->dims = __pyx_v_dims; - - /* "scipy/io/matlab/mio5_utils.pyx":136 - * """ - * self.dims = dims - * self.n_dims = len(dims) # <<<<<<<<<<<<<< - * for i, dim in enumerate(dims): - * self.dims_ptr[i] = int(dim) - */ - __pyx_t_1 = PyObject_Length(__pyx_v_dims); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->n_dims = __pyx_t_1; - - /* "scipy/io/matlab/mio5_utils.pyx":137 - * self.dims = dims - * self.n_dims = len(dims) - * for i, dim in enumerate(dims): # <<<<<<<<<<<<<< - * self.dims_ptr[i] = int(dim) - * - */ - __Pyx_INCREF(__pyx_int_0); - __pyx_t_2 = __pyx_int_0; - if (PyList_CheckExact(__pyx_v_dims) || PyTuple_CheckExact(__pyx_v_dims)) { - __pyx_t_3 = __pyx_v_dims; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; - __pyx_t_4 = NULL; - } else { - __pyx_t_1 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_dims); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = Py_TYPE(__pyx_t_3)->tp_iternext; - } - for (;;) { - if (PyList_CheckExact(__pyx_t_3)) { - if (__pyx_t_1 >= PyList_GET_SIZE(__pyx_t_3)) break; - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; - } else if (PyTuple_CheckExact(__pyx_t_3)) { - if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; - } else { - __pyx_t_5 = __pyx_t_4(__pyx_t_3); - if (unlikely(!__pyx_t_5)) { - if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - __Pyx_XDECREF(__pyx_v_dim); - __pyx_v_dim = __pyx_t_5; - __pyx_t_5 = 0; - __Pyx_INCREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_v_i); - __pyx_v_i = __pyx_t_2; - __pyx_t_5 = PyNumber_Add(__pyx_t_2, __pyx_int_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); - __pyx_t_2 = __pyx_t_5; - __pyx_t_5 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":138 - * self.n_dims = len(dims) - * for i, dim in enumerate(dims): - * self.dims_ptr[i] = int(dim) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __Pyx_INCREF(__pyx_v_dim); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_dim); - __Pyx_GIVEREF(__pyx_v_dim); - __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyInt_Type))), ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_7 = __Pyx_PyInt_from_py_npy_int32(__pyx_t_6); if (unlikely((__pyx_t_7 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - (((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->dims_ptr[__pyx_t_8]) = ((__pyx_t_5numpy_int32_t)__pyx_t_7); - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarHeader5.set_dims", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_i); - __Pyx_XDECREF(__pyx_v_dim); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":120 - * - * cdef class VarHeader5: - * cdef readonly object name # <<<<<<<<<<<<<< - * cdef readonly int mclass - * cdef readonly object dims - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_4name___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_4name___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->name); - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->name; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":121 - * cdef class VarHeader5: - * cdef readonly object name - * cdef readonly int mclass # <<<<<<<<<<<<<< - * cdef readonly object dims - * cdef cnp.int32_t dims_ptr[_MAT_MAXDIMS] - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_6mclass___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_6mclass___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->mclass); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarHeader5.mclass.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":122 - * cdef readonly object name - * cdef readonly int mclass - * cdef readonly object dims # <<<<<<<<<<<<<< - * cdef cnp.int32_t dims_ptr[_MAT_MAXDIMS] - * cdef int n_dims - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_4dims___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_4dims___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->dims); - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->dims; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":127 - * cdef int is_complex - * cdef int is_logical - * cdef public int is_global # <<<<<<<<<<<<<< - * cdef size_t nzmax - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_9is_global___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_9is_global___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->is_global); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarHeader5.is_global.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_9is_global_1__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_9is_global_1__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__set__"); - __pyx_t_1 = __Pyx_PyInt_AsInt(__pyx_v_value); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_self)->is_global = __pyx_t_1; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarHeader5.is_global.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":172 - * * squeeze_me (bool) - * """ - * def __cinit__(self, preader): # <<<<<<<<<<<<<< - * byte_order = preader.byte_order - * self.is_swapped = byte_order == swapped_code - */ - -static int __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_preader = 0; - PyObject *__pyx_v_byte_order = NULL; - PyObject *__pyx_v_uint16_codec = NULL; - PyObject *__pyx_v_key = NULL; - PyObject *__pyx_v_dt = NULL; - PyObject *__pyx_v_bool_dtype = NULL; - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - Py_ssize_t __pyx_t_7; - PyObject *(*__pyx_t_8)(PyObject *); - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *(*__pyx_t_11)(PyObject *); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__preader,0}; - __Pyx_RefNannySetupContext("__cinit__"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[1] = {0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__preader); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_preader = values[0]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_preader = PyTuple_GET_ITEM(__pyx_args, 0); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/io/matlab/mio5_utils.pyx":173 - * """ - * def __cinit__(self, preader): - * byte_order = preader.byte_order # <<<<<<<<<<<<<< - * self.is_swapped = byte_order == swapped_code - * if self.is_swapped: - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_preader, __pyx_n_s__byte_order); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_byte_order = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":174 - * def __cinit__(self, preader): - * byte_order = preader.byte_order - * self.is_swapped = byte_order == swapped_code # <<<<<<<<<<<<<< - * if self.is_swapped: - * self.little_endian = not sys_is_le - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__swapped_code); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_RichCompare(__pyx_v_byte_order, __pyx_t_1, Py_EQ); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->is_swapped = __pyx_t_3; - - /* "scipy/io/matlab/mio5_utils.pyx":175 - * byte_order = preader.byte_order - * self.is_swapped = byte_order == swapped_code - * if self.is_swapped: # <<<<<<<<<<<<<< - * self.little_endian = not sys_is_le - * else: - */ - if (((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->is_swapped) { - - /* "scipy/io/matlab/mio5_utils.pyx":176 - * self.is_swapped = byte_order == swapped_code - * if self.is_swapped: - * self.little_endian = not sys_is_le # <<<<<<<<<<<<<< - * else: - * self.little_endian = sys_is_le - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__sys_is_le); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->little_endian = (!__pyx_t_4); - goto __pyx_L6; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":178 - * self.little_endian = not sys_is_le - * else: - * self.little_endian = sys_is_le # <<<<<<<<<<<<<< - * # option affecting reading of matlab struct arrays - * self.struct_as_record = preader.struct_as_record - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__sys_is_le); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->little_endian = __pyx_t_3; - } - __pyx_L6:; - - /* "scipy/io/matlab/mio5_utils.pyx":180 - * self.little_endian = sys_is_le - * # option affecting reading of matlab struct arrays - * self.struct_as_record = preader.struct_as_record # <<<<<<<<<<<<<< - * # store codecs for text matrix reading - * self.codecs = mio5p.MDTYPES[byte_order]['codecs'].copy() - */ - __pyx_t_2 = PyObject_GetAttr(__pyx_v_preader, __pyx_n_s__struct_as_record); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->struct_as_record = __pyx_t_3; - - /* "scipy/io/matlab/mio5_utils.pyx":182 - * self.struct_as_record = preader.struct_as_record - * # store codecs for text matrix reading - * self.codecs = mio5p.MDTYPES[byte_order]['codecs'].copy() # <<<<<<<<<<<<<< - * self.uint16_codec = preader.uint16_codec - * uint16_codec = self.uint16_codec - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__mio5p); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__MDTYPES); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_byte_order); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__codecs)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__copy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->codecs); - __Pyx_DECREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->codecs); - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->codecs = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":183 - * # store codecs for text matrix reading - * self.codecs = mio5p.MDTYPES[byte_order]['codecs'].copy() - * self.uint16_codec = preader.uint16_codec # <<<<<<<<<<<<<< - * uint16_codec = self.uint16_codec - * # Set length of miUINT16 char encoding - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_preader, __pyx_n_s__uint16_codec); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->uint16_codec); - __Pyx_DECREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->uint16_codec); - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->uint16_codec = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":184 - * self.codecs = mio5p.MDTYPES[byte_order]['codecs'].copy() - * self.uint16_codec = preader.uint16_codec - * uint16_codec = self.uint16_codec # <<<<<<<<<<<<<< - * # Set length of miUINT16 char encoding - * self.codecs['uint16_len'] = len(" ".encode(uint16_codec)) \ - */ - __Pyx_INCREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->uint16_codec); - __pyx_v_uint16_codec = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->uint16_codec; - - /* "scipy/io/matlab/mio5_utils.pyx":186 - * uint16_codec = self.uint16_codec - * # Set length of miUINT16 char encoding - * self.codecs['uint16_len'] = len(" ".encode(uint16_codec)) \ # <<<<<<<<<<<<<< - * - len(" ".encode(uint16_codec)) - * self.codecs['uint16_codec'] = uint16_codec - */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_1), __pyx_n_s__encode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_uint16_codec); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_uint16_codec); - __Pyx_GIVEREF(__pyx_v_uint16_codec); - __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_6 = PyObject_Length(__pyx_t_5); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":187 - * # Set length of miUINT16 char encoding - * self.codecs['uint16_len'] = len(" ".encode(uint16_codec)) \ - * - len(" ".encode(uint16_codec)) # <<<<<<<<<<<<<< - * self.codecs['uint16_codec'] = uint16_codec - * # set c-optimized stream object from python file-like object - */ - __pyx_t_5 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_2), __pyx_n_s__encode); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_uint16_codec); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_uint16_codec); - __Pyx_GIVEREF(__pyx_v_uint16_codec); - __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_7 = PyObject_Length(__pyx_t_1); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyInt_FromSsize_t((__pyx_t_6 - __pyx_t_7)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - - /* "scipy/io/matlab/mio5_utils.pyx":186 - * uint16_codec = self.uint16_codec - * # Set length of miUINT16 char encoding - * self.codecs['uint16_len'] = len(" ".encode(uint16_codec)) \ # <<<<<<<<<<<<<< - * - len(" ".encode(uint16_codec)) - * self.codecs['uint16_codec'] = uint16_codec - */ - if (PyObject_SetItem(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->codecs, ((PyObject *)__pyx_n_s__uint16_len), __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":188 - * self.codecs['uint16_len'] = len(" ".encode(uint16_codec)) \ - * - len(" ".encode(uint16_codec)) - * self.codecs['uint16_codec'] = uint16_codec # <<<<<<<<<<<<<< - * # set c-optimized stream object from python file-like object - * self.set_stream(preader.mat_stream) - */ - if (PyObject_SetItem(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->codecs, ((PyObject *)__pyx_n_s__uint16_codec), __pyx_v_uint16_codec) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":190 - * self.codecs['uint16_codec'] = uint16_codec - * # set c-optimized stream object from python file-like object - * self.set_stream(preader.mat_stream) # <<<<<<<<<<<<<< - * # options for element processing - * self.mat_dtype = preader.mat_dtype - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__set_stream); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_v_preader, __pyx_n_s__mat_stream); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":192 - * self.set_stream(preader.mat_stream) - * # options for element processing - * self.mat_dtype = preader.mat_dtype # <<<<<<<<<<<<<< - * self.chars_as_strings = preader.chars_as_strings - * self.squeeze_me = preader.squeeze_me - */ - __pyx_t_2 = PyObject_GetAttr(__pyx_v_preader, __pyx_n_s__mat_dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->mat_dtype = __pyx_t_3; - - /* "scipy/io/matlab/mio5_utils.pyx":193 - * # options for element processing - * self.mat_dtype = preader.mat_dtype - * self.chars_as_strings = preader.chars_as_strings # <<<<<<<<<<<<<< - * self.squeeze_me = preader.squeeze_me - * # copy refs to dtypes into object pointer array. We only need the - */ - __pyx_t_2 = PyObject_GetAttr(__pyx_v_preader, __pyx_n_s__chars_as_strings); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->chars_as_strings = __pyx_t_3; - - /* "scipy/io/matlab/mio5_utils.pyx":194 - * self.mat_dtype = preader.mat_dtype - * self.chars_as_strings = preader.chars_as_strings - * self.squeeze_me = preader.squeeze_me # <<<<<<<<<<<<<< - * # copy refs to dtypes into object pointer array. We only need the - * # integer-keyed dtypes - */ - __pyx_t_2 = PyObject_GetAttr(__pyx_v_preader, __pyx_n_s__squeeze_me); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 194; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 194; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->squeeze_me = __pyx_t_3; - - /* "scipy/io/matlab/mio5_utils.pyx":197 - * # copy refs to dtypes into object pointer array. We only need the - * # integer-keyed dtypes - * for key, dt in mio5p.MDTYPES[byte_order]['dtypes'].items(): # <<<<<<<<<<<<<< - * if isinstance(key, str): - * continue - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__mio5p); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__MDTYPES); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetItem(__pyx_t_5, __pyx_v_byte_order); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_GetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtypes)); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__items); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (PyList_CheckExact(__pyx_t_5) || PyTuple_CheckExact(__pyx_t_5)) { - __pyx_t_2 = __pyx_t_5; __Pyx_INCREF(__pyx_t_2); __pyx_t_7 = 0; - __pyx_t_8 = NULL; - } else { - __pyx_t_7 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_8 = Py_TYPE(__pyx_t_2)->tp_iternext; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - for (;;) { - if (PyList_CheckExact(__pyx_t_2)) { - if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_2)) break; - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_7); __Pyx_INCREF(__pyx_t_5); __pyx_t_7++; - } else if (PyTuple_CheckExact(__pyx_t_2)) { - if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_7); __Pyx_INCREF(__pyx_t_5); __pyx_t_7++; - } else { - __pyx_t_5 = __pyx_t_8(__pyx_t_2); - if (unlikely(!__pyx_t_5)) { - if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { - PyObject* sequence = __pyx_t_5; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_9 = PyTuple_GET_ITEM(sequence, 1); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 2)) { - if (PyList_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = PyList_GET_ITEM(sequence, 0); - __pyx_t_9 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_10 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_11 = Py_TYPE(__pyx_t_10)->tp_iternext; - index = 0; __pyx_t_1 = __pyx_t_11(__pyx_t_10); if (unlikely(!__pyx_t_1)) goto __pyx_L9_unpacking_failed; - __Pyx_GOTREF(__pyx_t_1); - index = 1; __pyx_t_9 = __pyx_t_11(__pyx_t_10); if (unlikely(!__pyx_t_9)) goto __pyx_L9_unpacking_failed; - __Pyx_GOTREF(__pyx_t_9); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_11(__pyx_t_10), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - goto __pyx_L10_unpacking_done; - __pyx_L9_unpacking_failed:; - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_L10_unpacking_done:; - } - __Pyx_XDECREF(__pyx_v_key); - __pyx_v_key = __pyx_t_1; - __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_v_dt); - __pyx_v_dt = __pyx_t_9; - __pyx_t_9 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":198 - * # integer-keyed dtypes - * for key, dt in mio5p.MDTYPES[byte_order]['dtypes'].items(): - * if isinstance(key, str): # <<<<<<<<<<<<<< - * continue - * self.dtypes[key] = dt - */ - __pyx_t_5 = ((PyObject *)((PyObject*)(&PyString_Type))); - __Pyx_INCREF(__pyx_t_5); - __pyx_t_4 = __Pyx_TypeCheck(__pyx_v_key, __pyx_t_5); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_4) { - - /* "scipy/io/matlab/mio5_utils.pyx":199 - * for key, dt in mio5p.MDTYPES[byte_order]['dtypes'].items(): - * if isinstance(key, str): - * continue # <<<<<<<<<<<<<< - * self.dtypes[key] = dt - * # copy refs to class_dtypes into object pointer array - */ - goto __pyx_L7_continue; - goto __pyx_L11; - } - __pyx_L11:; - - /* "scipy/io/matlab/mio5_utils.pyx":200 - * if isinstance(key, str): - * continue - * self.dtypes[key] = dt # <<<<<<<<<<<<<< - * # copy refs to class_dtypes into object pointer array - * for key, dt in mio5p.MDTYPES[byte_order]['classes'].items(): - */ - __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - (((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->dtypes[__pyx_t_6]) = ((PyObject *)__pyx_v_dt); - __pyx_L7_continue:; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":202 - * self.dtypes[key] = dt - * # copy refs to class_dtypes into object pointer array - * for key, dt in mio5p.MDTYPES[byte_order]['classes'].items(): # <<<<<<<<<<<<<< - * if isinstance(key, str): - * continue - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__mio5p); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__MDTYPES); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetItem(__pyx_t_5, __pyx_v_byte_order); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_GetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__classes)); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__items); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (PyList_CheckExact(__pyx_t_5) || PyTuple_CheckExact(__pyx_t_5)) { - __pyx_t_2 = __pyx_t_5; __Pyx_INCREF(__pyx_t_2); __pyx_t_7 = 0; - __pyx_t_8 = NULL; - } else { - __pyx_t_7 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_8 = Py_TYPE(__pyx_t_2)->tp_iternext; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - for (;;) { - if (PyList_CheckExact(__pyx_t_2)) { - if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_2)) break; - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_7); __Pyx_INCREF(__pyx_t_5); __pyx_t_7++; - } else if (PyTuple_CheckExact(__pyx_t_2)) { - if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_7); __Pyx_INCREF(__pyx_t_5); __pyx_t_7++; - } else { - __pyx_t_5 = __pyx_t_8(__pyx_t_2); - if (unlikely(!__pyx_t_5)) { - if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { - PyObject* sequence = __pyx_t_5; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_9 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 1); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 2)) { - if (PyList_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_9 = PyList_GET_ITEM(sequence, 0); - __pyx_t_1 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_9); - __Pyx_INCREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_10 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_11 = Py_TYPE(__pyx_t_10)->tp_iternext; - index = 0; __pyx_t_9 = __pyx_t_11(__pyx_t_10); if (unlikely(!__pyx_t_9)) goto __pyx_L14_unpacking_failed; - __Pyx_GOTREF(__pyx_t_9); - index = 1; __pyx_t_1 = __pyx_t_11(__pyx_t_10); if (unlikely(!__pyx_t_1)) goto __pyx_L14_unpacking_failed; - __Pyx_GOTREF(__pyx_t_1); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_11(__pyx_t_10), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - goto __pyx_L15_unpacking_done; - __pyx_L14_unpacking_failed:; - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_L15_unpacking_done:; - } - __Pyx_XDECREF(__pyx_v_key); - __pyx_v_key = __pyx_t_9; - __pyx_t_9 = 0; - __Pyx_XDECREF(__pyx_v_dt); - __pyx_v_dt = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":203 - * # copy refs to class_dtypes into object pointer array - * for key, dt in mio5p.MDTYPES[byte_order]['classes'].items(): - * if isinstance(key, str): # <<<<<<<<<<<<<< - * continue - * self.class_dtypes[key] = dt - */ - __pyx_t_5 = ((PyObject *)((PyObject*)(&PyString_Type))); - __Pyx_INCREF(__pyx_t_5); - __pyx_t_4 = __Pyx_TypeCheck(__pyx_v_key, __pyx_t_5); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_4) { - - /* "scipy/io/matlab/mio5_utils.pyx":204 - * for key, dt in mio5p.MDTYPES[byte_order]['classes'].items(): - * if isinstance(key, str): - * continue # <<<<<<<<<<<<<< - * self.class_dtypes[key] = dt - * # cache correctly byte ordered dtypes - */ - goto __pyx_L12_continue; - goto __pyx_L16; - } - __pyx_L16:; - - /* "scipy/io/matlab/mio5_utils.pyx":205 - * if isinstance(key, str): - * continue - * self.class_dtypes[key] = dt # <<<<<<<<<<<<<< - * # cache correctly byte ordered dtypes - * if self.little_endian: - */ - __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_key); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - (((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->class_dtypes[__pyx_t_6]) = ((PyObject *)__pyx_v_dt); - __pyx_L12_continue:; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":207 - * self.class_dtypes[key] = dt - * # cache correctly byte ordered dtypes - * if self.little_endian: # <<<<<<<<<<<<<< - * self.U1_dtype = np.dtype('little_endian) { - - /* "scipy/io/matlab/mio5_utils.pyx":208 - * # cache correctly byte ordered dtypes - * if self.little_endian: - * self.U1_dtype = np.dtype('U1') - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__dtype); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->U1_dtype); - __Pyx_DECREF(((PyObject *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->U1_dtype)); - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->U1_dtype = ((PyArray_Descr *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L17; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":210 - * self.U1_dtype = np.dtype('U1') # <<<<<<<<<<<<<< - * bool_dtype = np.dtype('bool') - * - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__dtype); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->U1_dtype); - __Pyx_DECREF(((PyObject *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->U1_dtype)); - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->U1_dtype = ((PyArray_Descr *)__pyx_t_2); - __pyx_t_2 = 0; - } - __pyx_L17:; - - /* "scipy/io/matlab/mio5_utils.pyx":211 - * else: - * self.U1_dtype = np.dtype('>U1') - * bool_dtype = np.dtype('bool') # <<<<<<<<<<<<<< - * - * def set_stream(self, fobj): - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__dtype); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_k_tuple_7), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_bool_dtype = __pyx_t_2; - __pyx_t_2 = 0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_byte_order); - __Pyx_XDECREF(__pyx_v_uint16_codec); - __Pyx_XDECREF(__pyx_v_key); - __Pyx_XDECREF(__pyx_v_dt); - __Pyx_XDECREF(__pyx_v_bool_dtype); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":213 - * bool_dtype = np.dtype('bool') - * - * def set_stream(self, fobj): # <<<<<<<<<<<<<< - * ''' Set stream of best type from file-like `fobj` - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_1set_stream(PyObject *__pyx_v_self, PyObject *__pyx_v_fobj); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_1set_stream[] = " Set stream of best type from file-like `fobj`\n\n Called from Python when initiating a variable read\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_1set_stream(PyObject *__pyx_v_self, PyObject *__pyx_v_fobj) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("set_stream"); - - /* "scipy/io/matlab/mio5_utils.pyx":218 - * Called from Python when initiating a variable read - * ''' - * self.cstream = streams.make_stream(fobj) # <<<<<<<<<<<<<< - * - * def read_tag(self): - */ - __pyx_t_1 = ((PyObject *)__pyx_f_5scipy_2io_6matlab_7streams_make_stream(__pyx_v_fobj, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->cstream); - __Pyx_DECREF(((PyObject *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->cstream)); - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->cstream = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_t_1); - __pyx_t_1 = 0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.set_stream", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":220 - * self.cstream = streams.make_stream(fobj) - * - * def read_tag(self): # <<<<<<<<<<<<<< - * ''' Read tag mdtype and byte_count - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_2read_tag(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_2read_tag[] = " Read tag mdtype and byte_count\n\n Does necessary swapping and takes account of SDE formats.\n\n See also ``read_full_tag`` method.\n \n Returns\n -------\n mdtype : int\n matlab data type code\n byte_count : int\n number of bytes following that comprise the data\n tag_data : None or str\n Any data from the tag itself. This is None for a full tag,\n and string length `byte_count` if this is a small data\n element.\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_2read_tag(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - char __pyx_v_tag_ptr[4]; - int __pyx_v_tag_res; - PyObject *__pyx_v_tag_data = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_tag"); - - /* "scipy/io/matlab/mio5_utils.pyx":241 - * cdef char tag_ptr[4] - * cdef int tag_res - * cdef object tag_data = None # <<<<<<<<<<<<<< - * tag_res = self.cread_tag(&mdtype, &byte_count, tag_ptr) - * if tag_res == 2: # sde format - */ - __Pyx_INCREF(Py_None); - __pyx_v_tag_data = Py_None; - - /* "scipy/io/matlab/mio5_utils.pyx":242 - * cdef int tag_res - * cdef object tag_data = None - * tag_res = self.cread_tag(&mdtype, &byte_count, tag_ptr) # <<<<<<<<<<<<<< - * if tag_res == 2: # sde format - * tag_data = tag_ptr[:byte_count] - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->cread_tag(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), (&__pyx_v_mdtype), (&__pyx_v_byte_count), __pyx_v_tag_ptr); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_tag_res = __pyx_t_1; - - /* "scipy/io/matlab/mio5_utils.pyx":243 - * cdef object tag_data = None - * tag_res = self.cread_tag(&mdtype, &byte_count, tag_ptr) - * if tag_res == 2: # sde format # <<<<<<<<<<<<<< - * tag_data = tag_ptr[:byte_count] - * return (mdtype, byte_count, tag_data) - */ - __pyx_t_2 = (__pyx_v_tag_res == 2); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":244 - * tag_res = self.cread_tag(&mdtype, &byte_count, tag_ptr) - * if tag_res == 2: # sde format - * tag_data = tag_ptr[:byte_count] # <<<<<<<<<<<<<< - * return (mdtype, byte_count, tag_data) - * - */ - __pyx_t_3 = PyBytes_FromStringAndSize(__pyx_v_tag_ptr + 0, __pyx_v_byte_count - 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_DECREF(__pyx_v_tag_data); - __pyx_v_tag_data = ((PyObject *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L5; - } - __pyx_L5:; - - /* "scipy/io/matlab/mio5_utils.pyx":245 - * if tag_res == 2: # sde format - * tag_data = tag_ptr[:byte_count] - * return (mdtype, byte_count, tag_data) # <<<<<<<<<<<<<< - * - * cdef int cread_tag(self, - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_PyInt_to_py_npy_uint32(__pyx_v_mdtype); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyInt_to_py_npy_uint32(__pyx_v_byte_count); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __Pyx_INCREF(__pyx_v_tag_data); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_tag_data); - __Pyx_GIVEREF(__pyx_v_tag_data); - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_r = ((PyObject *)__pyx_t_5); - __pyx_t_5 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_tag", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_tag_data); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":247 - * return (mdtype, byte_count, tag_data) - * - * cdef int cread_tag(self, # <<<<<<<<<<<<<< - * cnp.uint32_t *mdtype_ptr, - * cnp.uint32_t *byte_count_ptr, - */ - -static int __pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_cread_tag(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, __pyx_t_5numpy_uint32_t *__pyx_v_mdtype_ptr, __pyx_t_5numpy_uint32_t *__pyx_v_byte_count_ptr, char *__pyx_v_data_ptr) { - __pyx_t_5numpy_uint16_t __pyx_v_mdtype_sde; - __pyx_t_5numpy_uint16_t __pyx_v_byte_count_sde; - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t *__pyx_v_u4_ptr; - __pyx_t_5numpy_uint32_t __pyx_v_u4s[2]; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("cread_tag"); - - /* "scipy/io/matlab/mio5_utils.pyx":262 - * cdef cnp.uint16_t mdtype_sde, byte_count_sde - * cdef cnp.uint32_t mdtype - * cdef cnp.uint32_t* u4_ptr = data_ptr # <<<<<<<<<<<<<< - * cdef cnp.uint32_t u4s[2] - * # First read 8 bytes. The 8 bytes can be in one of two formats. - */ - __pyx_v_u4_ptr = ((__pyx_t_5numpy_uint32_t *)__pyx_v_data_ptr); - - /* "scipy/io/matlab/mio5_utils.pyx":290 - * # first four bytes are two little-endian uint16 values, first - * # ``mdtype`` and second ``byte_count``. - * self.cstream.read_into(u4s, 8) # <<<<<<<<<<<<<< - * if self.is_swapped: - * mdtype = byteswap_u4(u4s[0]) - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self->cstream->__pyx_vtab)->read_into(__pyx_v_self->cstream, ((void *)__pyx_v_u4s), 8); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":291 - * # ``mdtype`` and second ``byte_count``. - * self.cstream.read_into(u4s, 8) - * if self.is_swapped: # <<<<<<<<<<<<<< - * mdtype = byteswap_u4(u4s[0]) - * else: - */ - if (__pyx_v_self->is_swapped) { - - /* "scipy/io/matlab/mio5_utils.pyx":292 - * self.cstream.read_into(u4s, 8) - * if self.is_swapped: - * mdtype = byteswap_u4(u4s[0]) # <<<<<<<<<<<<<< - * else: - * mdtype = u4s[0] - */ - __pyx_v_mdtype = __pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4((__pyx_v_u4s[0]), 0); - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":294 - * mdtype = byteswap_u4(u4s[0]) - * else: - * mdtype = u4s[0] # <<<<<<<<<<<<<< - * # The most significant two bytes of a U4 *mdtype* will always be - * # 0, if they are not, this must be SDE format - */ - __pyx_v_mdtype = (__pyx_v_u4s[0]); - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":297 - * # The most significant two bytes of a U4 *mdtype* will always be - * # 0, if they are not, this must be SDE format - * byte_count_sde = mdtype >> 16 # <<<<<<<<<<<<<< - * if byte_count_sde: # small data element format - * mdtype_sde = mdtype & 0xffff - */ - __pyx_v_byte_count_sde = (__pyx_v_mdtype >> 16); - - /* "scipy/io/matlab/mio5_utils.pyx":298 - * # 0, if they are not, this must be SDE format - * byte_count_sde = mdtype >> 16 - * if byte_count_sde: # small data element format # <<<<<<<<<<<<<< - * mdtype_sde = mdtype & 0xffff - * if byte_count_sde > 4: - */ - if (__pyx_v_byte_count_sde) { - - /* "scipy/io/matlab/mio5_utils.pyx":299 - * byte_count_sde = mdtype >> 16 - * if byte_count_sde: # small data element format - * mdtype_sde = mdtype & 0xffff # <<<<<<<<<<<<<< - * if byte_count_sde > 4: - * raise ValueError('Error in SDE format data') - */ - __pyx_v_mdtype_sde = (__pyx_v_mdtype & 0xffff); - - /* "scipy/io/matlab/mio5_utils.pyx":300 - * if byte_count_sde: # small data element format - * mdtype_sde = mdtype & 0xffff - * if byte_count_sde > 4: # <<<<<<<<<<<<<< - * raise ValueError('Error in SDE format data') - * return -1 - */ - __pyx_t_2 = (__pyx_v_byte_count_sde > 4); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":301 - * mdtype_sde = mdtype & 0xffff - * if byte_count_sde > 4: - * raise ValueError('Error in SDE format data') # <<<<<<<<<<<<<< - * return -1 - * u4_ptr[0] = u4s[1] - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - /* "scipy/io/matlab/mio5_utils.pyx":303 - * raise ValueError('Error in SDE format data') - * return -1 - * u4_ptr[0] = u4s[1] # <<<<<<<<<<<<<< - * mdtype_ptr[0] = mdtype_sde - * byte_count_ptr[0] = byte_count_sde - */ - (__pyx_v_u4_ptr[0]) = (__pyx_v_u4s[1]); - - /* "scipy/io/matlab/mio5_utils.pyx":304 - * return -1 - * u4_ptr[0] = u4s[1] - * mdtype_ptr[0] = mdtype_sde # <<<<<<<<<<<<<< - * byte_count_ptr[0] = byte_count_sde - * return 2 - */ - (__pyx_v_mdtype_ptr[0]) = __pyx_v_mdtype_sde; - - /* "scipy/io/matlab/mio5_utils.pyx":305 - * u4_ptr[0] = u4s[1] - * mdtype_ptr[0] = mdtype_sde - * byte_count_ptr[0] = byte_count_sde # <<<<<<<<<<<<<< - * return 2 - * # regular element - */ - (__pyx_v_byte_count_ptr[0]) = __pyx_v_byte_count_sde; - - /* "scipy/io/matlab/mio5_utils.pyx":306 - * mdtype_ptr[0] = mdtype_sde - * byte_count_ptr[0] = byte_count_sde - * return 2 # <<<<<<<<<<<<<< - * # regular element - * if self.is_swapped: - */ - __pyx_r = 2; - goto __pyx_L0; - goto __pyx_L4; - } - __pyx_L4:; - - /* "scipy/io/matlab/mio5_utils.pyx":308 - * return 2 - * # regular element - * if self.is_swapped: # <<<<<<<<<<<<<< - * byte_count_ptr[0] = byteswap_u4(u4s[1]) - * else: - */ - if (__pyx_v_self->is_swapped) { - - /* "scipy/io/matlab/mio5_utils.pyx":309 - * # regular element - * if self.is_swapped: - * byte_count_ptr[0] = byteswap_u4(u4s[1]) # <<<<<<<<<<<<<< - * else: - * byte_count_ptr[0] = u4s[1] - */ - (__pyx_v_byte_count_ptr[0]) = __pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4((__pyx_v_u4s[1]), 0); - goto __pyx_L6; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":311 - * byte_count_ptr[0] = byteswap_u4(u4s[1]) - * else: - * byte_count_ptr[0] = u4s[1] # <<<<<<<<<<<<<< - * mdtype_ptr[0] = mdtype - * u4_ptr[0] = 0 - */ - (__pyx_v_byte_count_ptr[0]) = (__pyx_v_u4s[1]); - } - __pyx_L6:; - - /* "scipy/io/matlab/mio5_utils.pyx":312 - * else: - * byte_count_ptr[0] = u4s[1] - * mdtype_ptr[0] = mdtype # <<<<<<<<<<<<<< - * u4_ptr[0] = 0 - * return 1 - */ - (__pyx_v_mdtype_ptr[0]) = __pyx_v_mdtype; - - /* "scipy/io/matlab/mio5_utils.pyx":313 - * byte_count_ptr[0] = u4s[1] - * mdtype_ptr[0] = mdtype - * u4_ptr[0] = 0 # <<<<<<<<<<<<<< - * return 1 - * - */ - (__pyx_v_u4_ptr[0]) = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":314 - * mdtype_ptr[0] = mdtype - * u4_ptr[0] = 0 - * return 1 # <<<<<<<<<<<<<< - * - * cdef object read_element(self, - */ - __pyx_r = 1; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.cread_tag", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":316 - * return 1 - * - * cdef object read_element(self, # <<<<<<<<<<<<<< - * cnp.uint32_t *mdtype_ptr, - * cnp.uint32_t *byte_count_ptr, - */ - -static PyObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, __pyx_t_5numpy_uint32_t *__pyx_v_mdtype_ptr, __pyx_t_5numpy_uint32_t *__pyx_v_byte_count_ptr, void **__pyx_v_pp, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element *__pyx_optional_args) { - - /* "scipy/io/matlab/mio5_utils.pyx":320 - * cnp.uint32_t *byte_count_ptr, - * void **pp, - * int copy=True): # <<<<<<<<<<<<<< - * ''' Read data element into string buffer, return buffer - * - */ - int __pyx_v_copy = ((int)1); - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - char __pyx_v_tag_data[4]; - PyObject *__pyx_v_data = 0; - int __pyx_v_mod8; - int __pyx_v_tag_res; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string __pyx_t_4; - struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek __pyx_t_5; - char *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_element"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_copy = __pyx_optional_args->copy; - } - } - - /* "scipy/io/matlab/mio5_utils.pyx":356 - * cdef int tag_res = self.cread_tag(mdtype_ptr, - * byte_count_ptr, - * tag_data) # <<<<<<<<<<<<<< - * mdtype = mdtype_ptr[0] - * byte_count = byte_count_ptr[0] - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->cread_tag(__pyx_v_self, __pyx_v_mdtype_ptr, __pyx_v_byte_count_ptr, __pyx_v_tag_data); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_tag_res = __pyx_t_1; - - /* "scipy/io/matlab/mio5_utils.pyx":357 - * byte_count_ptr, - * tag_data) - * mdtype = mdtype_ptr[0] # <<<<<<<<<<<<<< - * byte_count = byte_count_ptr[0] - * if tag_res == 1: # full format - */ - __pyx_v_mdtype = (__pyx_v_mdtype_ptr[0]); - - /* "scipy/io/matlab/mio5_utils.pyx":358 - * tag_data) - * mdtype = mdtype_ptr[0] - * byte_count = byte_count_ptr[0] # <<<<<<<<<<<<<< - * if tag_res == 1: # full format - * data = self.cstream.read_string( - */ - __pyx_v_byte_count = (__pyx_v_byte_count_ptr[0]); - - /* "scipy/io/matlab/mio5_utils.pyx":359 - * mdtype = mdtype_ptr[0] - * byte_count = byte_count_ptr[0] - * if tag_res == 1: # full format # <<<<<<<<<<<<<< - * data = self.cstream.read_string( - * byte_count, - */ - __pyx_t_2 = (__pyx_v_tag_res == 1); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":363 - * byte_count, - * pp, - * copy) # <<<<<<<<<<<<<< - * # Seek to next 64-bit boundary - * mod8 = byte_count % 8 - */ - __pyx_t_4.__pyx_n = 1; - __pyx_t_4.copy = __pyx_v_copy; - __pyx_t_3 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self->cstream->__pyx_vtab)->read_string(__pyx_v_self->cstream, __pyx_v_byte_count, __pyx_v_pp, &__pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_data = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":365 - * copy) - * # Seek to next 64-bit boundary - * mod8 = byte_count % 8 # <<<<<<<<<<<<<< - * if mod8: - * self.cstream.seek(8 - mod8, 1) - */ - __pyx_v_mod8 = __Pyx_mod_long(__pyx_v_byte_count, 8); - - /* "scipy/io/matlab/mio5_utils.pyx":366 - * # Seek to next 64-bit boundary - * mod8 = byte_count % 8 - * if mod8: # <<<<<<<<<<<<<< - * self.cstream.seek(8 - mod8, 1) - * else: # SDE format, make safer home for data - */ - if (__pyx_v_mod8) { - - /* "scipy/io/matlab/mio5_utils.pyx":367 - * mod8 = byte_count % 8 - * if mod8: - * self.cstream.seek(8 - mod8, 1) # <<<<<<<<<<<<<< - * else: # SDE format, make safer home for data - * data = PyBytes_FromStringAndSize(tag_data, byte_count) - */ - __pyx_t_5.__pyx_n = 1; - __pyx_t_5.whence = 1; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self->cstream->__pyx_vtab)->seek(__pyx_v_self->cstream, (8 - __pyx_v_mod8), 0, &__pyx_t_5); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L4; - } - __pyx_L4:; - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":369 - * self.cstream.seek(8 - mod8, 1) - * else: # SDE format, make safer home for data - * data = PyBytes_FromStringAndSize(tag_data, byte_count) # <<<<<<<<<<<<<< - * pp[0] = data - * return data - */ - __pyx_t_3 = ((PyObject *)PyBytes_FromStringAndSize(__pyx_v_tag_data, __pyx_v_byte_count)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_data = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":370 - * else: # SDE format, make safer home for data - * data = PyBytes_FromStringAndSize(tag_data, byte_count) - * pp[0] = data # <<<<<<<<<<<<<< - * return data - * - */ - __pyx_t_6 = PyBytes_AsString(__pyx_v_data); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - (__pyx_v_pp[0]) = ((char *)__pyx_t_6); - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":371 - * data = PyBytes_FromStringAndSize(tag_data, byte_count) - * pp[0] = data - * return data # <<<<<<<<<<<<<< - * - * cdef int read_element_into(self, - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_data); - __pyx_r = __pyx_v_data; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_element", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_data); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":373 - * return data - * - * cdef int read_element_into(self, # <<<<<<<<<<<<<< - * cnp.uint32_t *mdtype_ptr, - * cnp.uint32_t *byte_count_ptr, - */ - -static int __pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element_into(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, __pyx_t_5numpy_uint32_t *__pyx_v_mdtype_ptr, __pyx_t_5numpy_uint32_t *__pyx_v_byte_count_ptr, void *__pyx_v_ptr) { - int __pyx_v_mod8; - int __pyx_v_res; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek __pyx_t_3; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_element_into"); - - /* "scipy/io/matlab/mio5_utils.pyx":402 - * mdtype_ptr, - * byte_count_ptr, - * ptr) # <<<<<<<<<<<<<< - * cdef cnp.uint32_t byte_count = byte_count_ptr[0] - * if res == 1: # full format - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->cread_tag(__pyx_v_self, __pyx_v_mdtype_ptr, __pyx_v_byte_count_ptr, ((char *)__pyx_v_ptr)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_res = __pyx_t_1; - - /* "scipy/io/matlab/mio5_utils.pyx":403 - * byte_count_ptr, - * ptr) - * cdef cnp.uint32_t byte_count = byte_count_ptr[0] # <<<<<<<<<<<<<< - * if res == 1: # full format - * res = self.cstream.read_into(ptr, byte_count) - */ - __pyx_v_byte_count = (__pyx_v_byte_count_ptr[0]); - - /* "scipy/io/matlab/mio5_utils.pyx":404 - * ptr) - * cdef cnp.uint32_t byte_count = byte_count_ptr[0] - * if res == 1: # full format # <<<<<<<<<<<<<< - * res = self.cstream.read_into(ptr, byte_count) - * # Seek to next 64-bit boundary - */ - __pyx_t_2 = (__pyx_v_res == 1); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":405 - * cdef cnp.uint32_t byte_count = byte_count_ptr[0] - * if res == 1: # full format - * res = self.cstream.read_into(ptr, byte_count) # <<<<<<<<<<<<<< - * # Seek to next 64-bit boundary - * mod8 = byte_count % 8 - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self->cstream->__pyx_vtab)->read_into(__pyx_v_self->cstream, __pyx_v_ptr, __pyx_v_byte_count); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_res = __pyx_t_1; - - /* "scipy/io/matlab/mio5_utils.pyx":407 - * res = self.cstream.read_into(ptr, byte_count) - * # Seek to next 64-bit boundary - * mod8 = byte_count % 8 # <<<<<<<<<<<<<< - * if mod8: - * self.cstream.seek(8 - mod8, 1) - */ - __pyx_v_mod8 = __Pyx_mod_long(__pyx_v_byte_count, 8); - - /* "scipy/io/matlab/mio5_utils.pyx":408 - * # Seek to next 64-bit boundary - * mod8 = byte_count % 8 - * if mod8: # <<<<<<<<<<<<<< - * self.cstream.seek(8 - mod8, 1) - * return 0 - */ - if (__pyx_v_mod8) { - - /* "scipy/io/matlab/mio5_utils.pyx":409 - * mod8 = byte_count % 8 - * if mod8: - * self.cstream.seek(8 - mod8, 1) # <<<<<<<<<<<<<< - * return 0 - * - */ - __pyx_t_3.__pyx_n = 1; - __pyx_t_3.whence = 1; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self->cstream->__pyx_vtab)->seek(__pyx_v_self->cstream, (8 - __pyx_v_mod8), 0, &__pyx_t_3); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L4; - } - __pyx_L4:; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":410 - * if mod8: - * self.cstream.seek(8 - mod8, 1) - * return 0 # <<<<<<<<<<<<<< - * - * cpdef inline cnp.ndarray read_numeric(self, int copy=True): - */ - __pyx_r = 0; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_element_into", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":412 - * return 0 - * - * cpdef inline cnp.ndarray read_numeric(self, int copy=True): # <<<<<<<<<<<<<< - * ''' Read numeric data element into ndarray - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_3read_numeric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static CYTHON_INLINE PyArrayObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric *__pyx_optional_args) { - int __pyx_v_copy = ((int)1); - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - void *__pyx_v_data_ptr; - npy_intp __pyx_v_el_count; - PyArrayObject *__pyx_v_el = 0; - PyObject *__pyx_v_data = 0; - PyArray_Descr *__pyx_v_dt = 0; - int __pyx_v_flags; - PyArrayObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_numeric"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_copy = __pyx_optional_args->copy; - } - } - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__read_numeric); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_3read_numeric)) { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyInt_FromLong(__pyx_v_copy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_2); - __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":425 - * cdef cnp.ndarray el - * cdef object data = self.read_element( - * &mdtype, &byte_count, &data_ptr, copy) # <<<<<<<<<<<<<< - * cdef cnp.dtype dt = self.dtypes[mdtype] - * el_count = byte_count // dt.itemsize - */ - __pyx_t_4.__pyx_n = 1; - __pyx_t_4.copy = __pyx_v_copy; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_element(__pyx_v_self, (&__pyx_v_mdtype), (&__pyx_v_byte_count), ((void **)(&__pyx_v_data_ptr)), &__pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_data = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":426 - * cdef object data = self.read_element( - * &mdtype, &byte_count, &data_ptr, copy) - * cdef cnp.dtype dt = self.dtypes[mdtype] # <<<<<<<<<<<<<< - * el_count = byte_count // dt.itemsize - * cdef int flags = 0 - */ - __Pyx_INCREF(((PyObject *)((PyArray_Descr *)(__pyx_v_self->dtypes[__pyx_v_mdtype])))); - __pyx_v_dt = ((PyArray_Descr *)(__pyx_v_self->dtypes[__pyx_v_mdtype])); - - /* "scipy/io/matlab/mio5_utils.pyx":427 - * &mdtype, &byte_count, &data_ptr, copy) - * cdef cnp.dtype dt = self.dtypes[mdtype] - * el_count = byte_count // dt.itemsize # <<<<<<<<<<<<<< - * cdef int flags = 0 - * if copy: - */ - if (unlikely(__pyx_v_dt->elsize == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_v_el_count = (__pyx_v_byte_count / __pyx_v_dt->elsize); - - /* "scipy/io/matlab/mio5_utils.pyx":428 - * cdef cnp.dtype dt = self.dtypes[mdtype] - * el_count = byte_count // dt.itemsize - * cdef int flags = 0 # <<<<<<<<<<<<<< - * if copy: - * flags = cnp.NPY_WRITEABLE - */ - __pyx_v_flags = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":429 - * el_count = byte_count // dt.itemsize - * cdef int flags = 0 - * if copy: # <<<<<<<<<<<<<< - * flags = cnp.NPY_WRITEABLE - * Py_INCREF( dt) - */ - if (__pyx_v_copy) { - - /* "scipy/io/matlab/mio5_utils.pyx":430 - * cdef int flags = 0 - * if copy: - * flags = cnp.NPY_WRITEABLE # <<<<<<<<<<<<<< - * Py_INCREF( dt) - * el = PyArray_NewFromDescr(&PyArray_Type, - */ - __pyx_v_flags = NPY_WRITEABLE; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":431 - * if copy: - * flags = cnp.NPY_WRITEABLE - * Py_INCREF( dt) # <<<<<<<<<<<<<< - * el = PyArray_NewFromDescr(&PyArray_Type, - * dt, - */ - Py_INCREF(((PyObject *)__pyx_v_dt)); - - /* "scipy/io/matlab/mio5_utils.pyx":439 - * data_ptr, - * flags, - * NULL) # <<<<<<<<<<<<<< - * Py_INCREF( data) - * PyArray_Set_BASE(el, data) - */ - __pyx_t_1 = ((PyObject *)PyArray_NewFromDescr((&PyArray_Type), __pyx_v_dt, 1, (&__pyx_v_el_count), NULL, ((void *)__pyx_v_data_ptr), __pyx_v_flags, ((PyObject *)NULL))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 432; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_el = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":440 - * flags, - * NULL) - * Py_INCREF( data) # <<<<<<<<<<<<<< - * PyArray_Set_BASE(el, data) - * return el - */ - Py_INCREF(__pyx_v_data); - - /* "scipy/io/matlab/mio5_utils.pyx":441 - * NULL) - * Py_INCREF( data) - * PyArray_Set_BASE(el, data) # <<<<<<<<<<<<<< - * return el - * - */ - PyArray_Set_BASE(__pyx_v_el, __pyx_v_data); - - /* "scipy/io/matlab/mio5_utils.pyx":442 - * Py_INCREF( data) - * PyArray_Set_BASE(el, data) - * return el # <<<<<<<<<<<<<< - * - * cdef inline object read_int8_string(self): - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __Pyx_INCREF(((PyObject *)__pyx_v_el)); - __pyx_r = __pyx_v_el; - goto __pyx_L0; - - __pyx_r = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_numeric", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_el); - __Pyx_XDECREF(__pyx_v_data); - __Pyx_XDECREF((PyObject *)__pyx_v_dt); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":412 - * return 0 - * - * cpdef inline cnp.ndarray read_numeric(self, int copy=True): # <<<<<<<<<<<<<< - * ''' Read numeric data element into ndarray - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_3read_numeric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_3read_numeric[] = " Read numeric data element into ndarray\n\n Reads element, then casts to ndarray. \n\n The type of the array is given by the ``mdtype`` returned via\n ``read_element``. \n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_3read_numeric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - int __pyx_v_copy; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__copy,0}; - __Pyx_RefNannySetupContext("read_numeric"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[1] = {0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__copy); - if (value) { values[0] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "read_numeric") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - if (values[0]) { - __pyx_v_copy = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_copy == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_copy = ((int)1); - } - } else { - __pyx_v_copy = ((int)1); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 1: __pyx_v_copy = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_copy == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("read_numeric", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_numeric", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_XDECREF(__pyx_r); - __pyx_t_2.__pyx_n = 1; - __pyx_t_2.copy = __pyx_v_copy; - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->read_numeric(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), 1, &__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_numeric", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":444 - * return el - * - * cdef inline object read_int8_string(self): # <<<<<<<<<<<<<< - * ''' Read, return int8 type string - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_int8_string(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self) { - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - void *__pyx_v_ptr; - PyObject *__pyx_v_data = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_int8_string"); - - /* "scipy/io/matlab/mio5_utils.pyx":456 - * void *ptr - * object data - * data = self.read_element(&mdtype, &byte_count, &ptr) # <<<<<<<<<<<<<< - * if mdtype != miINT8: - * raise TypeError('Expecting miINT8 as data type') - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_element(__pyx_v_self, (&__pyx_v_mdtype), (&__pyx_v_byte_count), (&__pyx_v_ptr), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_data = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":457 - * object data - * data = self.read_element(&mdtype, &byte_count, &ptr) - * if mdtype != miINT8: # <<<<<<<<<<<<<< - * raise TypeError('Expecting miINT8 as data type') - * return data - */ - __pyx_t_2 = (__pyx_v_mdtype != __pyx_e_5scipy_2io_6matlab_10mio5_utils_miINT8); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":458 - * data = self.read_element(&mdtype, &byte_count, &ptr) - * if mdtype != miINT8: - * raise TypeError('Expecting miINT8 as data type') # <<<<<<<<<<<<<< - * return data - * - */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":459 - * if mdtype != miINT8: - * raise TypeError('Expecting miINT8 as data type') - * return data # <<<<<<<<<<<<<< - * - * cdef int read_into_int32s(self, cnp.int32_t *int32p) except -1: - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_data); - __pyx_r = __pyx_v_data; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_int8_string", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_data); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":461 - * return data - * - * cdef int read_into_int32s(self, cnp.int32_t *int32p) except -1: # <<<<<<<<<<<<<< - * ''' Read int32 values into pre-allocated memory - * - */ - -static int __pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_into_int32s(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, __pyx_t_5numpy_int32_t *__pyx_v_int32p) { - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - int __pyx_v_i; - int __pyx_v_n_ints; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_into_int32s"); - - /* "scipy/io/matlab/mio5_utils.pyx":478 - * cnp.uint32_t mdtype, byte_count - * int i - * self.read_element_into(&mdtype, &byte_count, int32p) # <<<<<<<<<<<<<< - * if mdtype != miINT32: - * raise TypeError('Expecting miINT32 as data type') - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_element_into(__pyx_v_self, (&__pyx_v_mdtype), (&__pyx_v_byte_count), ((void *)__pyx_v_int32p)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 478; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":479 - * int i - * self.read_element_into(&mdtype, &byte_count, int32p) - * if mdtype != miINT32: # <<<<<<<<<<<<<< - * raise TypeError('Expecting miINT32 as data type') - * return -1 - */ - __pyx_t_2 = (__pyx_v_mdtype != __pyx_e_5scipy_2io_6matlab_10mio5_utils_miINT32); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":480 - * self.read_element_into(&mdtype, &byte_count, int32p) - * if mdtype != miINT32: - * raise TypeError('Expecting miINT32 as data type') # <<<<<<<<<<<<<< - * return -1 - * cdef int n_ints = byte_count // 4 - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_13), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 480; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 480; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":482 - * raise TypeError('Expecting miINT32 as data type') - * return -1 - * cdef int n_ints = byte_count // 4 # <<<<<<<<<<<<<< - * if self.is_swapped: - * for i in range(n_ints): - */ - __pyx_v_n_ints = __Pyx_div_long(__pyx_v_byte_count, 4); - - /* "scipy/io/matlab/mio5_utils.pyx":483 - * return -1 - * cdef int n_ints = byte_count // 4 - * if self.is_swapped: # <<<<<<<<<<<<<< - * for i in range(n_ints): - * int32p[i] = byteswap_u4(int32p[i]) - */ - if (__pyx_v_self->is_swapped) { - - /* "scipy/io/matlab/mio5_utils.pyx":484 - * cdef int n_ints = byte_count // 4 - * if self.is_swapped: - * for i in range(n_ints): # <<<<<<<<<<<<<< - * int32p[i] = byteswap_u4(int32p[i]) - * return n_ints - */ - __pyx_t_1 = __pyx_v_n_ints; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_1; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "scipy/io/matlab/mio5_utils.pyx":485 - * if self.is_swapped: - * for i in range(n_ints): - * int32p[i] = byteswap_u4(int32p[i]) # <<<<<<<<<<<<<< - * return n_ints - * - */ - (__pyx_v_int32p[__pyx_v_i]) = __pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4((__pyx_v_int32p[__pyx_v_i]), 0); - } - goto __pyx_L4; - } - __pyx_L4:; - - /* "scipy/io/matlab/mio5_utils.pyx":486 - * for i in range(n_ints): - * int32p[i] = byteswap_u4(int32p[i]) - * return n_ints # <<<<<<<<<<<<<< - * - * def read_full_tag(self): - */ - __pyx_r = __pyx_v_n_ints; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_into_int32s", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":488 - * return n_ints - * - * def read_full_tag(self): # <<<<<<<<<<<<<< - * ''' Python method for reading full u4, u4 tag from stream - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_4read_full_tag(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_4read_full_tag[] = " Python method for reading full u4, u4 tag from stream\n\n Returns\n -------\n mdtype : int32\n matlab data type code\n byte_count : int32\n number of data bytes following\n\n Notes\n -----\n Assumes tag is in fact full, that is, is not a small data\n element. This means it can skip some checks and makes it\n slightly faster than ``read_tag``\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_4read_full_tag(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_full_tag"); - - /* "scipy/io/matlab/mio5_utils.pyx":505 - * ''' - * cdef cnp.uint32_t mdtype, byte_count - * self.cread_full_tag(&mdtype, &byte_count) # <<<<<<<<<<<<<< - * return mdtype, byte_count - * - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->cread_full_tag(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), (&__pyx_v_mdtype), (&__pyx_v_byte_count)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":506 - * cdef cnp.uint32_t mdtype, byte_count - * self.cread_full_tag(&mdtype, &byte_count) - * return mdtype, byte_count # <<<<<<<<<<<<<< - * - * cdef int cread_full_tag(self, - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyInt_to_py_npy_uint32(__pyx_v_mdtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_to_py_npy_uint32(__pyx_v_byte_count); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_r = ((PyObject *)__pyx_t_4); - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_full_tag", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":508 - * return mdtype, byte_count - * - * cdef int cread_full_tag(self, # <<<<<<<<<<<<<< - * cnp.uint32_t* mdtype, - * cnp.uint32_t* byte_count) except -1: - */ - -static int __pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_cread_full_tag(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, __pyx_t_5numpy_uint32_t *__pyx_v_mdtype, __pyx_t_5numpy_uint32_t *__pyx_v_byte_count) { - __pyx_t_5numpy_uint32_t __pyx_v_u4s[2]; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("cread_full_tag"); - - /* "scipy/io/matlab/mio5_utils.pyx":513 - * ''' C method for reading full u4, u4 tag from stream''' - * cdef cnp.uint32_t u4s[2] - * self.cstream.read_into(u4s, 8) # <<<<<<<<<<<<<< - * if self.is_swapped: - * mdtype[0] = byteswap_u4(u4s[0]) - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self->cstream->__pyx_vtab)->read_into(__pyx_v_self->cstream, ((void *)__pyx_v_u4s), 8); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 513; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":514 - * cdef cnp.uint32_t u4s[2] - * self.cstream.read_into(u4s, 8) - * if self.is_swapped: # <<<<<<<<<<<<<< - * mdtype[0] = byteswap_u4(u4s[0]) - * byte_count[0] = byteswap_u4(u4s[1]) - */ - if (__pyx_v_self->is_swapped) { - - /* "scipy/io/matlab/mio5_utils.pyx":515 - * self.cstream.read_into(u4s, 8) - * if self.is_swapped: - * mdtype[0] = byteswap_u4(u4s[0]) # <<<<<<<<<<<<<< - * byte_count[0] = byteswap_u4(u4s[1]) - * else: - */ - (__pyx_v_mdtype[0]) = __pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4((__pyx_v_u4s[0]), 0); - - /* "scipy/io/matlab/mio5_utils.pyx":516 - * if self.is_swapped: - * mdtype[0] = byteswap_u4(u4s[0]) - * byte_count[0] = byteswap_u4(u4s[1]) # <<<<<<<<<<<<<< - * else: - * mdtype[0] = u4s[0] - */ - (__pyx_v_byte_count[0]) = __pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4((__pyx_v_u4s[1]), 0); - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":518 - * byte_count[0] = byteswap_u4(u4s[1]) - * else: - * mdtype[0] = u4s[0] # <<<<<<<<<<<<<< - * byte_count[0] = u4s[1] - * return 0 - */ - (__pyx_v_mdtype[0]) = (__pyx_v_u4s[0]); - - /* "scipy/io/matlab/mio5_utils.pyx":519 - * else: - * mdtype[0] = u4s[0] - * byte_count[0] = u4s[1] # <<<<<<<<<<<<<< - * return 0 - * - */ - (__pyx_v_byte_count[0]) = (__pyx_v_u4s[1]); - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":520 - * mdtype[0] = u4s[0] - * byte_count[0] = u4s[1] - * return 0 # <<<<<<<<<<<<<< - * - * cpdef VarHeader5 read_header(self): - */ - __pyx_r = 0; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.cread_full_tag", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":522 - * return 0 - * - * cpdef VarHeader5 read_header(self): # <<<<<<<<<<<<<< - * ''' Return matrix header for current stream position - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_5read_header(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_header(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, int __pyx_skip_dispatch) { - __pyx_t_5numpy_uint32_t __pyx_v_u4s[2]; - __pyx_t_5numpy_uint32_t __pyx_v_flags_class; - __pyx_t_5numpy_uint32_t __pyx_v_nzmax; - __pyx_t_5numpy_uint16_t __pyx_v_mc; - int __pyx_v_i; - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header = 0; - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_header"); - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__read_header); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_5read_header)) { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_t_2); - __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":536 - * VarHeader5 header - * # Read and discard mdtype and byte_count - * self.cstream.read_into(u4s, 8) # <<<<<<<<<<<<<< - * # get array flags and nzmax - * self.cstream.read_into(u4s, 8) - */ - __pyx_t_3 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self->cstream->__pyx_vtab)->read_into(__pyx_v_self->cstream, ((void *)__pyx_v_u4s), 8); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":538 - * self.cstream.read_into(u4s, 8) - * # get array flags and nzmax - * self.cstream.read_into(u4s, 8) # <<<<<<<<<<<<<< - * if self.is_swapped: - * flags_class = byteswap_u4(u4s[0]) - */ - __pyx_t_3 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self->cstream->__pyx_vtab)->read_into(__pyx_v_self->cstream, ((void *)__pyx_v_u4s), 8); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 538; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":539 - * # get array flags and nzmax - * self.cstream.read_into(u4s, 8) - * if self.is_swapped: # <<<<<<<<<<<<<< - * flags_class = byteswap_u4(u4s[0]) - * nzmax = byteswap_u4(u4s[1]) - */ - if (__pyx_v_self->is_swapped) { - - /* "scipy/io/matlab/mio5_utils.pyx":540 - * self.cstream.read_into(u4s, 8) - * if self.is_swapped: - * flags_class = byteswap_u4(u4s[0]) # <<<<<<<<<<<<<< - * nzmax = byteswap_u4(u4s[1]) - * else: - */ - __pyx_v_flags_class = __pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4((__pyx_v_u4s[0]), 0); - - /* "scipy/io/matlab/mio5_utils.pyx":541 - * if self.is_swapped: - * flags_class = byteswap_u4(u4s[0]) - * nzmax = byteswap_u4(u4s[1]) # <<<<<<<<<<<<<< - * else: - * flags_class = u4s[0] - */ - __pyx_v_nzmax = __pyx_f_5scipy_2io_6matlab_10mio5_utils_byteswap_u4((__pyx_v_u4s[1]), 0); - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":543 - * nzmax = byteswap_u4(u4s[1]) - * else: - * flags_class = u4s[0] # <<<<<<<<<<<<<< - * nzmax = u4s[1] - * header = VarHeader5() - */ - __pyx_v_flags_class = (__pyx_v_u4s[0]); - - /* "scipy/io/matlab/mio5_utils.pyx":544 - * else: - * flags_class = u4s[0] - * nzmax = u4s[1] # <<<<<<<<<<<<<< - * header = VarHeader5() - * mc = flags_class & 0xFF - */ - __pyx_v_nzmax = (__pyx_v_u4s[1]); - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":545 - * flags_class = u4s[0] - * nzmax = u4s[1] - * header = VarHeader5() # <<<<<<<<<<<<<< - * mc = flags_class & 0xFF - * header.mclass = mc - */ - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 545; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_header = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":546 - * nzmax = u4s[1] - * header = VarHeader5() - * mc = flags_class & 0xFF # <<<<<<<<<<<<<< - * header.mclass = mc - * header.is_logical = flags_class >> 9 & 1 - */ - __pyx_v_mc = (__pyx_v_flags_class & 0xFF); - - /* "scipy/io/matlab/mio5_utils.pyx":547 - * header = VarHeader5() - * mc = flags_class & 0xFF - * header.mclass = mc # <<<<<<<<<<<<<< - * header.is_logical = flags_class >> 9 & 1 - * header.is_global = flags_class >> 10 & 1 - */ - __pyx_v_header->mclass = __pyx_v_mc; - - /* "scipy/io/matlab/mio5_utils.pyx":548 - * mc = flags_class & 0xFF - * header.mclass = mc - * header.is_logical = flags_class >> 9 & 1 # <<<<<<<<<<<<<< - * header.is_global = flags_class >> 10 & 1 - * header.is_complex = flags_class >> 11 & 1 - */ - __pyx_v_header->is_logical = ((__pyx_v_flags_class >> 9) & 1); - - /* "scipy/io/matlab/mio5_utils.pyx":549 - * header.mclass = mc - * header.is_logical = flags_class >> 9 & 1 - * header.is_global = flags_class >> 10 & 1 # <<<<<<<<<<<<<< - * header.is_complex = flags_class >> 11 & 1 - * header.nzmax = nzmax - */ - __pyx_v_header->is_global = ((__pyx_v_flags_class >> 10) & 1); - - /* "scipy/io/matlab/mio5_utils.pyx":550 - * header.is_logical = flags_class >> 9 & 1 - * header.is_global = flags_class >> 10 & 1 - * header.is_complex = flags_class >> 11 & 1 # <<<<<<<<<<<<<< - * header.nzmax = nzmax - * # all miMATRIX types except the mxOPAQUE_CLASS have dims and a - */ - __pyx_v_header->is_complex = ((__pyx_v_flags_class >> 11) & 1); - - /* "scipy/io/matlab/mio5_utils.pyx":551 - * header.is_global = flags_class >> 10 & 1 - * header.is_complex = flags_class >> 11 & 1 - * header.nzmax = nzmax # <<<<<<<<<<<<<< - * # all miMATRIX types except the mxOPAQUE_CLASS have dims and a - * # name. - */ - __pyx_v_header->nzmax = __pyx_v_nzmax; - - /* "scipy/io/matlab/mio5_utils.pyx":554 - * # all miMATRIX types except the mxOPAQUE_CLASS have dims and a - * # name. - * if mc == mxOPAQUE_CLASS: # <<<<<<<<<<<<<< - * header.name = None - * header.dims = None - */ - __pyx_t_4 = (__pyx_v_mc == __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxOPAQUE_CLASS); - if (__pyx_t_4) { - - /* "scipy/io/matlab/mio5_utils.pyx":555 - * # name. - * if mc == mxOPAQUE_CLASS: - * header.name = None # <<<<<<<<<<<<<< - * header.dims = None - * return header - */ - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_header->name); - __Pyx_DECREF(__pyx_v_header->name); - __pyx_v_header->name = Py_None; - - /* "scipy/io/matlab/mio5_utils.pyx":556 - * if mc == mxOPAQUE_CLASS: - * header.name = None - * header.dims = None # <<<<<<<<<<<<<< - * return header - * header.n_dims = self.read_into_int32s(header.dims_ptr) - */ - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_header->dims); - __Pyx_DECREF(__pyx_v_header->dims); - __pyx_v_header->dims = Py_None; - - /* "scipy/io/matlab/mio5_utils.pyx":557 - * header.name = None - * header.dims = None - * return header # <<<<<<<<<<<<<< - * header.n_dims = self.read_into_int32s(header.dims_ptr) - * if header.n_dims > _MAT_MAXDIMS: - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __Pyx_INCREF(((PyObject *)__pyx_v_header)); - __pyx_r = __pyx_v_header; - goto __pyx_L0; - goto __pyx_L4; - } - __pyx_L4:; - - /* "scipy/io/matlab/mio5_utils.pyx":558 - * header.dims = None - * return header - * header.n_dims = self.read_into_int32s(header.dims_ptr) # <<<<<<<<<<<<<< - * if header.n_dims > _MAT_MAXDIMS: - * raise ValueError('Too many dimensions (%d) for numpy arrays' - */ - __pyx_t_3 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_into_int32s(__pyx_v_self, __pyx_v_header->dims_ptr); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_header->n_dims = __pyx_t_3; - - /* "scipy/io/matlab/mio5_utils.pyx":559 - * return header - * header.n_dims = self.read_into_int32s(header.dims_ptr) - * if header.n_dims > _MAT_MAXDIMS: # <<<<<<<<<<<<<< - * raise ValueError('Too many dimensions (%d) for numpy arrays' - * % header.n_dims) - */ - __pyx_t_4 = (__pyx_v_header->n_dims > 32); - if (__pyx_t_4) { - - /* "scipy/io/matlab/mio5_utils.pyx":561 - * if header.n_dims > _MAT_MAXDIMS: - * raise ValueError('Too many dimensions (%d) for numpy arrays' - * % header.n_dims) # <<<<<<<<<<<<<< - * # convert dims to list - * header.dims = [] - */ - __pyx_t_1 = PyInt_FromLong(__pyx_v_header->n_dims); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_14), __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - /* "scipy/io/matlab/mio5_utils.pyx":563 - * % header.n_dims) - * # convert dims to list - * header.dims = [] # <<<<<<<<<<<<<< - * for i in range(header.n_dims): - * header.dims.append(header.dims_ptr[i]) - */ - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); - __Pyx_GOTREF(__pyx_v_header->dims); - __Pyx_DECREF(__pyx_v_header->dims); - __pyx_v_header->dims = ((PyObject *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":564 - * # convert dims to list - * header.dims = [] - * for i in range(header.n_dims): # <<<<<<<<<<<<<< - * header.dims.append(header.dims_ptr[i]) - * header.name = self.read_int8_string() - */ - __pyx_t_3 = __pyx_v_header->n_dims; - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { - __pyx_v_i = __pyx_t_5; - - /* "scipy/io/matlab/mio5_utils.pyx":565 - * header.dims = [] - * for i in range(header.n_dims): - * header.dims.append(header.dims_ptr[i]) # <<<<<<<<<<<<<< - * header.name = self.read_int8_string() - * return header - */ - __pyx_t_2 = __Pyx_PyInt_to_py_npy_int32((__pyx_v_header->dims_ptr[__pyx_v_i])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_Append(__pyx_v_header->dims, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":566 - * for i in range(header.n_dims): - * header.dims.append(header.dims_ptr[i]) - * header.name = self.read_int8_string() # <<<<<<<<<<<<<< - * return header - * - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_int8_string(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(__pyx_v_header->name); - __Pyx_DECREF(__pyx_v_header->name); - __pyx_v_header->name = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":567 - * header.dims.append(header.dims_ptr[i]) - * header.name = self.read_int8_string() - * return header # <<<<<<<<<<<<<< - * - * cdef inline size_t size_from_header(self, VarHeader5 header): - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __Pyx_INCREF(((PyObject *)__pyx_v_header)); - __pyx_r = __pyx_v_header; - goto __pyx_L0; - - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_header", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_header); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":522 - * return 0 - * - * cpdef VarHeader5 read_header(self): # <<<<<<<<<<<<<< - * ''' Return matrix header for current stream position - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_5read_header(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_5read_header[] = " Return matrix header for current stream position\n\n Returns matrix headers at top level and sub levels\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_5read_header(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_header"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->read_header(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_header", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":569 - * return header - * - * cdef inline size_t size_from_header(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Supporting routine for calculating array sizes from header - * - */ - -static CYTHON_INLINE size_t __pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_size_from_header(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header) { - size_t __pyx_v_size; - int __pyx_v_i; - size_t __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - __Pyx_RefNannySetupContext("size_from_header"); - - /* "scipy/io/matlab/mio5_utils.pyx":586 - * ''' - * # calculate number of items in array from dims product - * cdef size_t size = 1 # <<<<<<<<<<<<<< - * cdef int i - * for i in range(header.n_dims): - */ - __pyx_v_size = 1; - - /* "scipy/io/matlab/mio5_utils.pyx":588 - * cdef size_t size = 1 - * cdef int i - * for i in range(header.n_dims): # <<<<<<<<<<<<<< - * size *= header.dims_ptr[i] - * return size - */ - __pyx_t_1 = __pyx_v_header->n_dims; - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - /* "scipy/io/matlab/mio5_utils.pyx":589 - * cdef int i - * for i in range(header.n_dims): - * size *= header.dims_ptr[i] # <<<<<<<<<<<<<< - * return size - * - */ - __pyx_v_size = (__pyx_v_size * (__pyx_v_header->dims_ptr[__pyx_v_i])); - } - - /* "scipy/io/matlab/mio5_utils.pyx":590 - * for i in range(header.n_dims): - * size *= header.dims_ptr[i] - * return size # <<<<<<<<<<<<<< - * - * cdef read_mi_matrix(self, int process=1): - */ - __pyx_r = __pyx_v_size; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":592 - * return size - * - * cdef read_mi_matrix(self, int process=1): # <<<<<<<<<<<<<< - * ''' Read header with matrix at sub-levels - * - */ - -static PyObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_mi_matrix(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_mi_matrix *__pyx_optional_args) { - int __pyx_v_process = ((int)1); - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header = 0; - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_mi_matrix"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_process = __pyx_optional_args->process; - } - } - - /* "scipy/io/matlab/mio5_utils.pyx":613 - * object arr - * # read full tag - * self.cread_full_tag(&mdtype, &byte_count) # <<<<<<<<<<<<<< - * if mdtype != miMATRIX: - * raise TypeError('Expecting matrix here') - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->cread_full_tag(__pyx_v_self, (&__pyx_v_mdtype), (&__pyx_v_byte_count)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 613; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":614 - * # read full tag - * self.cread_full_tag(&mdtype, &byte_count) - * if mdtype != miMATRIX: # <<<<<<<<<<<<<< - * raise TypeError('Expecting matrix here') - * if byte_count == 0: # empty matrix - */ - __pyx_t_2 = (__pyx_v_mdtype != __pyx_e_5scipy_2io_6matlab_10mio5_utils_miMATRIX); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":615 - * self.cread_full_tag(&mdtype, &byte_count) - * if mdtype != miMATRIX: - * raise TypeError('Expecting matrix here') # <<<<<<<<<<<<<< - * if byte_count == 0: # empty matrix - * if process and self.squeeze_me: - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_k_tuple_16), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 615; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 615; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":616 - * if mdtype != miMATRIX: - * raise TypeError('Expecting matrix here') - * if byte_count == 0: # empty matrix # <<<<<<<<<<<<<< - * if process and self.squeeze_me: - * return np.array([]) - */ - __pyx_t_2 = (__pyx_v_byte_count == 0); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":617 - * raise TypeError('Expecting matrix here') - * if byte_count == 0: # empty matrix - * if process and self.squeeze_me: # <<<<<<<<<<<<<< - * return np.array([]) - * else: - */ - if (__pyx_v_process) { - __pyx_t_2 = __pyx_v_self->squeeze_me; - } else { - __pyx_t_2 = __pyx_v_process; - } - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":618 - * if byte_count == 0: # empty matrix - * if process and self.squeeze_me: - * return np.array([]) # <<<<<<<<<<<<<< - * else: - * return np.array([[]]) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - goto __pyx_L5; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":620 - * return np.array([]) - * else: - * return np.array([[]]) # <<<<<<<<<<<<<< - * header = self.read_header() - * return self.array_from_header(header, process) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 620; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 620; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 620; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 620; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyList_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); - __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 620; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 620; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - } - __pyx_L5:; - goto __pyx_L4; - } - __pyx_L4:; - - /* "scipy/io/matlab/mio5_utils.pyx":621 - * else: - * return np.array([[]]) - * header = self.read_header() # <<<<<<<<<<<<<< - * return self.array_from_header(header, process) - * - */ - __pyx_t_4 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_header(__pyx_v_self, 0)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 621; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_v_header = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_t_4); - __pyx_t_4 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":622 - * return np.array([[]]) - * header = self.read_header() - * return self.array_from_header(header, process) # <<<<<<<<<<<<<< - * - * cpdef array_from_header(self, VarHeader5 header, int process=1): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_6.__pyx_n = 1; - __pyx_t_6.process = __pyx_v_process; - __pyx_t_4 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->array_from_header(__pyx_v_self, __pyx_v_header, 0, &__pyx_t_6); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_mi_matrix", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_header); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":624 - * return self.array_from_header(header, process) - * - * cpdef array_from_header(self, VarHeader5 header, int process=1): # <<<<<<<<<<<<<< - * ''' Read array of any class, given matrix `header` - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_6array_from_header(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header *__pyx_optional_args) { - int __pyx_v_process = ((int)1); - PyObject *__pyx_v_arr = 0; - PyArray_Descr *__pyx_v_mat_dtype = 0; - int __pyx_v_mc; - PyObject *__pyx_v_classname = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("array_from_header"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_process = __pyx_optional_args->process; - } - } - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__array_from_header); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_6array_from_header)) { - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyInt_FromLong(__pyx_v_process); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(((PyObject *)__pyx_v_header)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_header)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_header)); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":642 - * object arr - * cnp.dtype mat_dtype - * cdef int mc = header.mclass # <<<<<<<<<<<<<< - * if (mc == mxDOUBLE_CLASS - * or mc == mxSINGLE_CLASS - */ - __pyx_v_mc = __pyx_v_header->mclass; - - /* "scipy/io/matlab/mio5_utils.pyx":681 - * # to make them more re-writeable - don't squeeze - * return arr - * elif mc == mxOPAQUE_CLASS: # <<<<<<<<<<<<<< - * arr = self.read_opaque(header) - * arr = mio5p.MatlabOpaque(arr) - */ - switch (__pyx_v_mc) { - - /* "scipy/io/matlab/mio5_utils.pyx":644 - * cdef int mc = header.mclass - * if (mc == mxDOUBLE_CLASS - * or mc == mxSINGLE_CLASS # <<<<<<<<<<<<<< - * or mc == mxINT8_CLASS - * or mc == mxUINT8_CLASS - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxDOUBLE_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":645 - * if (mc == mxDOUBLE_CLASS - * or mc == mxSINGLE_CLASS - * or mc == mxINT8_CLASS # <<<<<<<<<<<<<< - * or mc == mxUINT8_CLASS - * or mc == mxINT16_CLASS - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxSINGLE_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":646 - * or mc == mxSINGLE_CLASS - * or mc == mxINT8_CLASS - * or mc == mxUINT8_CLASS # <<<<<<<<<<<<<< - * or mc == mxINT16_CLASS - * or mc == mxUINT16_CLASS - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxINT8_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":647 - * or mc == mxINT8_CLASS - * or mc == mxUINT8_CLASS - * or mc == mxINT16_CLASS # <<<<<<<<<<<<<< - * or mc == mxUINT16_CLASS - * or mc == mxINT32_CLASS - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxUINT8_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":648 - * or mc == mxUINT8_CLASS - * or mc == mxINT16_CLASS - * or mc == mxUINT16_CLASS # <<<<<<<<<<<<<< - * or mc == mxINT32_CLASS - * or mc == mxUINT32_CLASS - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxINT16_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":649 - * or mc == mxINT16_CLASS - * or mc == mxUINT16_CLASS - * or mc == mxINT32_CLASS # <<<<<<<<<<<<<< - * or mc == mxUINT32_CLASS - * or mc == mxINT64_CLASS - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxUINT16_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":650 - * or mc == mxUINT16_CLASS - * or mc == mxINT32_CLASS - * or mc == mxUINT32_CLASS # <<<<<<<<<<<<<< - * or mc == mxINT64_CLASS - * or mc == mxUINT64_CLASS): # numeric matrix - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxINT32_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":651 - * or mc == mxINT32_CLASS - * or mc == mxUINT32_CLASS - * or mc == mxINT64_CLASS # <<<<<<<<<<<<<< - * or mc == mxUINT64_CLASS): # numeric matrix - * arr = self.read_real_complex(header) - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxUINT32_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":652 - * or mc == mxUINT32_CLASS - * or mc == mxINT64_CLASS - * or mc == mxUINT64_CLASS): # numeric matrix # <<<<<<<<<<<<<< - * arr = self.read_real_complex(header) - * if process and self.mat_dtype: # might need to recast - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxINT64_CLASS: - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxUINT64_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":653 - * or mc == mxINT64_CLASS - * or mc == mxUINT64_CLASS): # numeric matrix - * arr = self.read_real_complex(header) # <<<<<<<<<<<<<< - * if process and self.mat_dtype: # might need to recast - * if header.is_logical: - */ - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_real_complex(__pyx_v_self, __pyx_v_header, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_arr = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":654 - * or mc == mxUINT64_CLASS): # numeric matrix - * arr = self.read_real_complex(header) - * if process and self.mat_dtype: # might need to recast # <<<<<<<<<<<<<< - * if header.is_logical: - * mat_dtype = self.bool_dtype - */ - if (__pyx_v_process) { - __pyx_t_4 = __pyx_v_self->mat_dtype; - } else { - __pyx_t_4 = __pyx_v_process; - } - if (__pyx_t_4) { - - /* "scipy/io/matlab/mio5_utils.pyx":655 - * arr = self.read_real_complex(header) - * if process and self.mat_dtype: # might need to recast - * if header.is_logical: # <<<<<<<<<<<<<< - * mat_dtype = self.bool_dtype - * else: - */ - if (__pyx_v_header->is_logical) { - - /* "scipy/io/matlab/mio5_utils.pyx":656 - * if process and self.mat_dtype: # might need to recast - * if header.is_logical: - * mat_dtype = self.bool_dtype # <<<<<<<<<<<<<< - * else: - * mat_dtype = self.class_dtypes[mc] - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self->bool_dtype)); - __pyx_v_mat_dtype = __pyx_v_self->bool_dtype; - goto __pyx_L4; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":658 - * mat_dtype = self.bool_dtype - * else: - * mat_dtype = self.class_dtypes[mc] # <<<<<<<<<<<<<< - * arr = arr.astype(mat_dtype) - * elif mc == mxSPARSE_CLASS: - */ - if (!(likely(((((PyObject *)(__pyx_v_self->class_dtypes[__pyx_v_mc]))) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)(__pyx_v_self->class_dtypes[__pyx_v_mc])), __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_INCREF(((PyObject *)(__pyx_v_self->class_dtypes[__pyx_v_mc]))); - __pyx_v_mat_dtype = ((PyArray_Descr *)((PyObject *)(__pyx_v_self->class_dtypes[__pyx_v_mc]))); - } - __pyx_L4:; - - /* "scipy/io/matlab/mio5_utils.pyx":659 - * else: - * mat_dtype = self.class_dtypes[mc] - * arr = arr.astype(mat_dtype) # <<<<<<<<<<<<<< - * elif mc == mxSPARSE_CLASS: - * arr = self.read_sparse(header) - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_arr, __pyx_n_s__astype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_v_mat_dtype)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_mat_dtype)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_mat_dtype)); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_arr); - __pyx_v_arr = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L3; - } - __pyx_L3:; - break; - - /* "scipy/io/matlab/mio5_utils.pyx":660 - * mat_dtype = self.class_dtypes[mc] - * arr = arr.astype(mat_dtype) - * elif mc == mxSPARSE_CLASS: # <<<<<<<<<<<<<< - * arr = self.read_sparse(header) - * # no current processing makes sense for sparse - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxSPARSE_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":661 - * arr = arr.astype(mat_dtype) - * elif mc == mxSPARSE_CLASS: - * arr = self.read_sparse(header) # <<<<<<<<<<<<<< - * # no current processing makes sense for sparse - * return arr - */ - __pyx_t_3 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_sparse(__pyx_v_self, __pyx_v_header); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_arr = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":663 - * arr = self.read_sparse(header) - * # no current processing makes sense for sparse - * return arr # <<<<<<<<<<<<<< - * elif mc == mxCHAR_CLASS: - * arr = self.read_char(header) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_arr); - __pyx_r = __pyx_v_arr; - goto __pyx_L0; - break; - - /* "scipy/io/matlab/mio5_utils.pyx":664 - * # no current processing makes sense for sparse - * return arr - * elif mc == mxCHAR_CLASS: # <<<<<<<<<<<<<< - * arr = self.read_char(header) - * if process and self.chars_as_strings: - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxCHAR_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":665 - * return arr - * elif mc == mxCHAR_CLASS: - * arr = self.read_char(header) # <<<<<<<<<<<<<< - * if process and self.chars_as_strings: - * arr = chars_to_strings(arr) - */ - __pyx_t_3 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_char(__pyx_v_self, __pyx_v_header, 0)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_arr = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":666 - * elif mc == mxCHAR_CLASS: - * arr = self.read_char(header) - * if process and self.chars_as_strings: # <<<<<<<<<<<<<< - * arr = chars_to_strings(arr) - * elif mc == mxCELL_CLASS: - */ - if (__pyx_v_process) { - __pyx_t_4 = __pyx_v_self->chars_as_strings; - } else { - __pyx_t_4 = __pyx_v_process; - } - if (__pyx_t_4) { - - /* "scipy/io/matlab/mio5_utils.pyx":667 - * arr = self.read_char(header) - * if process and self.chars_as_strings: - * arr = chars_to_strings(arr) # <<<<<<<<<<<<<< - * elif mc == mxCELL_CLASS: - * arr = self.read_cells(header) - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__chars_to_strings); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_arr); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_arr); - __Pyx_GIVEREF(__pyx_v_arr); - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_arr); - __pyx_v_arr = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L5; - } - __pyx_L5:; - break; - - /* "scipy/io/matlab/mio5_utils.pyx":668 - * if process and self.chars_as_strings: - * arr = chars_to_strings(arr) - * elif mc == mxCELL_CLASS: # <<<<<<<<<<<<<< - * arr = self.read_cells(header) - * elif mc == mxSTRUCT_CLASS: - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxCELL_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":669 - * arr = chars_to_strings(arr) - * elif mc == mxCELL_CLASS: - * arr = self.read_cells(header) # <<<<<<<<<<<<<< - * elif mc == mxSTRUCT_CLASS: - * arr = self.read_struct(header) - */ - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_cells(__pyx_v_self, __pyx_v_header, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 669; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_arr = __pyx_t_1; - __pyx_t_1 = 0; - break; - - /* "scipy/io/matlab/mio5_utils.pyx":670 - * elif mc == mxCELL_CLASS: - * arr = self.read_cells(header) - * elif mc == mxSTRUCT_CLASS: # <<<<<<<<<<<<<< - * arr = self.read_struct(header) - * elif mc == mxOBJECT_CLASS: # like structs, but with classname - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxSTRUCT_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":671 - * arr = self.read_cells(header) - * elif mc == mxSTRUCT_CLASS: - * arr = self.read_struct(header) # <<<<<<<<<<<<<< - * elif mc == mxOBJECT_CLASS: # like structs, but with classname - * classname = asstr(self.read_int8_string()) - */ - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_struct(__pyx_v_self, __pyx_v_header, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 671; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_arr = __pyx_t_1; - __pyx_t_1 = 0; - break; - - /* "scipy/io/matlab/mio5_utils.pyx":672 - * elif mc == mxSTRUCT_CLASS: - * arr = self.read_struct(header) - * elif mc == mxOBJECT_CLASS: # like structs, but with classname # <<<<<<<<<<<<<< - * classname = asstr(self.read_int8_string()) - * arr = self.read_struct(header) - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxOBJECT_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":673 - * arr = self.read_struct(header) - * elif mc == mxOBJECT_CLASS: # like structs, but with classname - * classname = asstr(self.read_int8_string()) # <<<<<<<<<<<<<< - * arr = self.read_struct(header) - * arr = mio5p.MatlabObject(arr, classname) - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__asstr); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_int8_string(__pyx_v_self); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_classname = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":674 - * elif mc == mxOBJECT_CLASS: # like structs, but with classname - * classname = asstr(self.read_int8_string()) - * arr = self.read_struct(header) # <<<<<<<<<<<<<< - * arr = mio5p.MatlabObject(arr, classname) - * elif mc == mxFUNCTION_CLASS: # just a matrix of struct type - */ - __pyx_t_2 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_struct(__pyx_v_self, __pyx_v_header, 0)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 674; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_arr = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":675 - * classname = asstr(self.read_int8_string()) - * arr = self.read_struct(header) - * arr = mio5p.MatlabObject(arr, classname) # <<<<<<<<<<<<<< - * elif mc == mxFUNCTION_CLASS: # just a matrix of struct type - * arr = self.read_mi_matrix() - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__mio5p); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__MatlabObject); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_arr); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_arr); - __Pyx_GIVEREF(__pyx_v_arr); - __Pyx_INCREF(__pyx_v_classname); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_classname); - __Pyx_GIVEREF(__pyx_v_classname); - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_arr); - __pyx_v_arr = __pyx_t_1; - __pyx_t_1 = 0; - break; - - /* "scipy/io/matlab/mio5_utils.pyx":676 - * arr = self.read_struct(header) - * arr = mio5p.MatlabObject(arr, classname) - * elif mc == mxFUNCTION_CLASS: # just a matrix of struct type # <<<<<<<<<<<<<< - * arr = self.read_mi_matrix() - * arr = mio5p.MatlabFunction(arr) - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxFUNCTION_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":677 - * arr = mio5p.MatlabObject(arr, classname) - * elif mc == mxFUNCTION_CLASS: # just a matrix of struct type - * arr = self.read_mi_matrix() # <<<<<<<<<<<<<< - * arr = mio5p.MatlabFunction(arr) - * # to make them more re-writeable - don't squeeze - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_mi_matrix(__pyx_v_self, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_arr = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":678 - * elif mc == mxFUNCTION_CLASS: # just a matrix of struct type - * arr = self.read_mi_matrix() - * arr = mio5p.MatlabFunction(arr) # <<<<<<<<<<<<<< - * # to make them more re-writeable - don't squeeze - * return arr - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__mio5p); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__MatlabFunction); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_arr); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_arr); - __Pyx_GIVEREF(__pyx_v_arr); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_arr); - __pyx_v_arr = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":680 - * arr = mio5p.MatlabFunction(arr) - * # to make them more re-writeable - don't squeeze - * return arr # <<<<<<<<<<<<<< - * elif mc == mxOPAQUE_CLASS: - * arr = self.read_opaque(header) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_arr); - __pyx_r = __pyx_v_arr; - goto __pyx_L0; - break; - - /* "scipy/io/matlab/mio5_utils.pyx":681 - * # to make them more re-writeable - don't squeeze - * return arr - * elif mc == mxOPAQUE_CLASS: # <<<<<<<<<<<<<< - * arr = self.read_opaque(header) - * arr = mio5p.MatlabOpaque(arr) - */ - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_mxOPAQUE_CLASS: - - /* "scipy/io/matlab/mio5_utils.pyx":682 - * return arr - * elif mc == mxOPAQUE_CLASS: - * arr = self.read_opaque(header) # <<<<<<<<<<<<<< - * arr = mio5p.MatlabOpaque(arr) - * # to make them more re-writeable - don't squeeze - */ - __pyx_t_3 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_opaque(__pyx_v_self, __pyx_v_header, 0)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_arr = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":683 - * elif mc == mxOPAQUE_CLASS: - * arr = self.read_opaque(header) - * arr = mio5p.MatlabOpaque(arr) # <<<<<<<<<<<<<< - * # to make them more re-writeable - don't squeeze - * return arr - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__mio5p); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 683; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__MatlabOpaque); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 683; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 683; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_arr); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_arr); - __Pyx_GIVEREF(__pyx_v_arr); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 683; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_arr); - __pyx_v_arr = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":685 - * arr = mio5p.MatlabOpaque(arr) - * # to make them more re-writeable - don't squeeze - * return arr # <<<<<<<<<<<<<< - * if process and self.squeeze_me: - * return squeeze_element(arr) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_arr); - __pyx_r = __pyx_v_arr; - goto __pyx_L0; - break; - } - - /* "scipy/io/matlab/mio5_utils.pyx":686 - * # to make them more re-writeable - don't squeeze - * return arr - * if process and self.squeeze_me: # <<<<<<<<<<<<<< - * return squeeze_element(arr) - * return arr - */ - if (__pyx_v_process) { - __pyx_t_4 = __pyx_v_self->squeeze_me; - } else { - __pyx_t_4 = __pyx_v_process; - } - if (__pyx_t_4) { - - /* "scipy/io/matlab/mio5_utils.pyx":687 - * return arr - * if process and self.squeeze_me: - * return squeeze_element(arr) # <<<<<<<<<<<<<< - * return arr - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__squeeze_element); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 687; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (unlikely(!__pyx_v_arr)) { __Pyx_RaiseUnboundLocalError("arr"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 687; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 687; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_arr); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_arr); - __Pyx_GIVEREF(__pyx_v_arr); - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 687; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - goto __pyx_L6; - } - __pyx_L6:; - - /* "scipy/io/matlab/mio5_utils.pyx":688 - * if process and self.squeeze_me: - * return squeeze_element(arr) - * return arr # <<<<<<<<<<<<<< - * - * cpdef cnp.ndarray read_real_complex(self, VarHeader5 header): - */ - __Pyx_XDECREF(__pyx_r); - if (unlikely(!__pyx_v_arr)) { __Pyx_RaiseUnboundLocalError("arr"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }__Pyx_INCREF(__pyx_v_arr); - __pyx_r = __pyx_v_arr; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.array_from_header", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_arr); - __Pyx_XDECREF((PyObject *)__pyx_v_mat_dtype); - __Pyx_XDECREF(__pyx_v_classname); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":624 - * return self.array_from_header(header, process) - * - * cpdef array_from_header(self, VarHeader5 header, int process=1): # <<<<<<<<<<<<<< - * ''' Read array of any class, given matrix `header` - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_6array_from_header(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_6array_from_header[] = " Read array of any class, given matrix `header`\n\n Parameters\n ----------\n header : VarHeader5\n array header object\n process : int, optional\n If not zero, apply post-processing on returned array\n \n Returns\n -------\n arr : array or sparse array\n read array\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_6array_from_header(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header = 0; - int __pyx_v_process; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__header,&__pyx_n_s__process,0}; - __Pyx_RefNannySetupContext("array_from_header"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__header); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__process); - if (value) { values[1] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "array_from_header") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_header = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)values[0]); - if (values[1]) { - __pyx_v_process = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_process == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_process = ((int)1); - } - } else { - __pyx_v_process = ((int)1); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: __pyx_v_process = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_process == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 1: __pyx_v_header = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)PyTuple_GET_ITEM(__pyx_args, 0)); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("array_from_header", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.array_from_header", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_header), __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5, 1, "header", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(__pyx_r); - __pyx_t_2.__pyx_n = 1; - __pyx_t_2.process = __pyx_v_process; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->array_from_header(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), __pyx_v_header, 1, &__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.array_from_header", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":690 - * return arr - * - * cpdef cnp.ndarray read_real_complex(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read real / complex matrices from stream ''' - * cdef: - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_7read_real_complex(PyObject *__pyx_v_self, PyObject *__pyx_v_header); /*proto*/ -static PyArrayObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_real_complex(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header, int __pyx_skip_dispatch) { - PyArrayObject *__pyx_v_res = 0; - PyArrayObject *__pyx_v_res_j = 0; - PyArrayObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric __pyx_t_4; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_real_complex"); - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__read_real_complex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_7read_real_complex)) { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_v_header)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_header)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_header)); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":694 - * cdef: - * cnp.ndarray res, res_j - * if header.is_complex: # <<<<<<<<<<<<<< - * # avoid array copy to save memory - * res = self.read_numeric(False) - */ - if (__pyx_v_header->is_complex) { - - /* "scipy/io/matlab/mio5_utils.pyx":696 - * if header.is_complex: - * # avoid array copy to save memory - * res = self.read_numeric(False) # <<<<<<<<<<<<<< - * res_j = self.read_numeric(False) - * # Use c8 for f4s and c16 for f8 input. Just ``res = res + res_j * - */ - __pyx_t_4.__pyx_n = 1; - __pyx_t_4.copy = 0; - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_numeric(__pyx_v_self, 0, &__pyx_t_4)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_res = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":697 - * # avoid array copy to save memory - * res = self.read_numeric(False) - * res_j = self.read_numeric(False) # <<<<<<<<<<<<<< - * # Use c8 for f4s and c16 for f8 input. Just ``res = res + res_j * - * # 1j`` upcasts to c16 regardless of input type. - */ - __pyx_t_4.__pyx_n = 1; - __pyx_t_4.copy = 0; - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_numeric(__pyx_v_self, 0, &__pyx_t_4)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_res_j = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":700 - * # Use c8 for f4s and c16 for f8 input. Just ``res = res + res_j * - * # 1j`` upcasts to c16 regardless of input type. - * if res.itemsize == 4: # <<<<<<<<<<<<<< - * res = res.astype('c8') - * else: - */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_res), __pyx_n_s__itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 700; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_int_4, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 700; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 700; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_5) { - - /* "scipy/io/matlab/mio5_utils.pyx":701 - * # 1j`` upcasts to c16 regardless of input type. - * if res.itemsize == 4: - * res = res.astype('c8') # <<<<<<<<<<<<<< - * else: - * res = res.astype('c16') - */ - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_res), __pyx_n_s__astype); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_17), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_v_res)); - __pyx_v_res = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - goto __pyx_L4; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":703 - * res = res.astype('c8') - * else: - * res = res.astype('c16') # <<<<<<<<<<<<<< - * res.imag = res_j - * else: - */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_res), __pyx_n_s__astype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_k_tuple_18), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_v_res)); - __pyx_v_res = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - } - __pyx_L4:; - - /* "scipy/io/matlab/mio5_utils.pyx":704 - * else: - * res = res.astype('c16') - * res.imag = res_j # <<<<<<<<<<<<<< - * else: - * res = self.read_numeric() - */ - if (PyObject_SetAttr(((PyObject *)__pyx_v_res), __pyx_n_s__imag, ((PyObject *)__pyx_v_res_j)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":706 - * res.imag = res_j - * else: - * res = self.read_numeric() # <<<<<<<<<<<<<< - * return res.reshape(header.dims[::-1]).T - * - */ - __pyx_t_3 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_numeric(__pyx_v_self, 0, NULL)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_res = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":707 - * else: - * res = self.read_numeric() - * return res.reshape(header.dims[::-1]).T # <<<<<<<<<<<<<< - * - * cdef object read_sparse(self, VarHeader5 header): - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_res), __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetItem(__pyx_v_header->dims, __pyx_k_slice_19); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__T); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L0; - - __pyx_r = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_real_complex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_res); - __Pyx_XDECREF((PyObject *)__pyx_v_res_j); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":690 - * return arr - * - * cpdef cnp.ndarray read_real_complex(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read real / complex matrices from stream ''' - * cdef: - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_7read_real_complex(PyObject *__pyx_v_self, PyObject *__pyx_v_header); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_7read_real_complex[] = " Read real / complex matrices from stream "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_7read_real_complex(PyObject *__pyx_v_self, PyObject *__pyx_v_header) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_real_complex"); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_header), __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5, 1, "header", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->read_real_complex(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_header), 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_real_complex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":709 - * return res.reshape(header.dims[::-1]).T - * - * cdef object read_sparse(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read sparse matrices from stream ''' - * cdef cnp.ndarray rowind, indptr, data, data_j - */ - -static PyObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_sparse(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header) { - PyArrayObject *__pyx_v_rowind = 0; - PyArrayObject *__pyx_v_indptr = 0; - PyArrayObject *__pyx_v_data = 0; - PyArrayObject *__pyx_v_data_j = 0; - size_t __pyx_v_M; - size_t __pyx_v_N; - size_t __pyx_v_nnz; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *(*__pyx_t_5)(PyObject *); - size_t __pyx_t_6; - size_t __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_sparse"); - - /* "scipy/io/matlab/mio5_utils.pyx":713 - * cdef cnp.ndarray rowind, indptr, data, data_j - * cdef size_t M, N, nnz - * rowind = self.read_numeric() # <<<<<<<<<<<<<< - * indptr = self.read_numeric() - * if header.is_complex: - */ - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_numeric(__pyx_v_self, 0, NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 713; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_rowind = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":714 - * cdef size_t M, N, nnz - * rowind = self.read_numeric() - * indptr = self.read_numeric() # <<<<<<<<<<<<<< - * if header.is_complex: - * # avoid array copy to save memory - */ - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_numeric(__pyx_v_self, 0, NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_indptr = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":715 - * rowind = self.read_numeric() - * indptr = self.read_numeric() - * if header.is_complex: # <<<<<<<<<<<<<< - * # avoid array copy to save memory - * data = self.read_numeric(False) - */ - if (__pyx_v_header->is_complex) { - - /* "scipy/io/matlab/mio5_utils.pyx":717 - * if header.is_complex: - * # avoid array copy to save memory - * data = self.read_numeric(False) # <<<<<<<<<<<<<< - * data_j = self.read_numeric(False) - * data = data + (data_j * 1j) - */ - __pyx_t_2.__pyx_n = 1; - __pyx_t_2.copy = 0; - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_numeric(__pyx_v_self, 0, &__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_data = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":718 - * # avoid array copy to save memory - * data = self.read_numeric(False) - * data_j = self.read_numeric(False) # <<<<<<<<<<<<<< - * data = data + (data_j * 1j) - * else: - */ - __pyx_t_2.__pyx_n = 1; - __pyx_t_2.copy = 0; - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_numeric(__pyx_v_self, 0, &__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_data_j = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":719 - * data = self.read_numeric(False) - * data_j = self.read_numeric(False) - * data = data + (data_j * 1j) # <<<<<<<<<<<<<< - * else: - * data = self.read_numeric() - */ - __pyx_t_1 = PyComplex_FromDoubles(0.0, 1.0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Multiply(((PyObject *)__pyx_v_data_j), __pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(((PyObject *)__pyx_v_data), __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_v_data)); - __pyx_v_data = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":721 - * data = data + (data_j * 1j) - * else: - * data = self.read_numeric() # <<<<<<<<<<<<<< - * ''' From the matlab (TM) API documentation, last found here: - * http://www.mathworks.com/access/helpdesk/help/techdoc/matlab_external/ - */ - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_numeric(__pyx_v_self, 0, NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_data = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":734 - * to each rowind - * ''' - * M,N = header.dims # <<<<<<<<<<<<<< - * indptr = indptr[:N+1] - * nnz = indptr[-1] - */ - if ((likely(PyTuple_CheckExact(__pyx_v_header->dims))) || (PyList_CheckExact(__pyx_v_header->dims))) { - PyObject* sequence = __pyx_v_header->dims; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 2)) { - if (PyList_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - } else { - Py_ssize_t index = -1; - __pyx_t_4 = PyObject_GetIter(__pyx_v_header->dims); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; - index = 0; __pyx_t_1 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_1)) goto __pyx_L4_unpacking_failed; - __Pyx_GOTREF(__pyx_t_1); - index = 1; __pyx_t_3 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_3)) goto __pyx_L4_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_4), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L5_unpacking_done; - __pyx_L4_unpacking_failed:; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_L5_unpacking_done:; - } - __pyx_t_6 = __Pyx_PyInt_AsSize_t(__pyx_t_1); if (unlikely((__pyx_t_6 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyInt_AsSize_t(__pyx_t_3); if (unlikely((__pyx_t_7 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_M = __pyx_t_6; - __pyx_v_N = __pyx_t_7; - - /* "scipy/io/matlab/mio5_utils.pyx":735 - * ''' - * M,N = header.dims - * indptr = indptr[:N+1] # <<<<<<<<<<<<<< - * nnz = indptr[-1] - * rowind = rowind[:nnz] - */ - __pyx_t_3 = __Pyx_PySequence_GetSlice(((PyObject *)__pyx_v_indptr), 0, (__pyx_v_N + 1)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_v_indptr)); - __pyx_v_indptr = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":736 - * M,N = header.dims - * indptr = indptr[:N+1] - * nnz = indptr[-1] # <<<<<<<<<<<<<< - * rowind = rowind[:nnz] - * data = data[:nnz] - */ - __pyx_t_3 = __Pyx_GetItemInt(((PyObject *)__pyx_v_indptr), -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 736; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyInt_AsSize_t(__pyx_t_3); if (unlikely((__pyx_t_7 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 736; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_nnz = __pyx_t_7; - - /* "scipy/io/matlab/mio5_utils.pyx":737 - * indptr = indptr[:N+1] - * nnz = indptr[-1] - * rowind = rowind[:nnz] # <<<<<<<<<<<<<< - * data = data[:nnz] - * return scipy.sparse.csc_matrix( - */ - __pyx_t_3 = __Pyx_PySequence_GetSlice(((PyObject *)__pyx_v_rowind), 0, __pyx_v_nnz); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 737; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 737; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_v_rowind)); - __pyx_v_rowind = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":738 - * nnz = indptr[-1] - * rowind = rowind[:nnz] - * data = data[:nnz] # <<<<<<<<<<<<<< - * return scipy.sparse.csc_matrix( - * (data,rowind,indptr), - */ - __pyx_t_3 = __Pyx_PySequence_GetSlice(((PyObject *)__pyx_v_data), 0, __pyx_v_nnz); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 738; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 738; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_v_data)); - __pyx_v_data = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":739 - * rowind = rowind[:nnz] - * data = data[:nnz] - * return scipy.sparse.csc_matrix( # <<<<<<<<<<<<<< - * (data,rowind,indptr), - * shape=(M,N)) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__scipy); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__sparse); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__csc_matrix); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":740 - * data = data[:nnz] - * return scipy.sparse.csc_matrix( - * (data,rowind,indptr), # <<<<<<<<<<<<<< - * shape=(M,N)) - * - */ - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 740; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(((PyObject *)__pyx_v_data)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_data)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_data)); - __Pyx_INCREF(((PyObject *)__pyx_v_rowind)); - PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_rowind)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_rowind)); - __Pyx_INCREF(((PyObject *)__pyx_v_indptr)); - PyTuple_SET_ITEM(__pyx_t_1, 2, ((PyObject *)__pyx_v_indptr)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_indptr)); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":739 - * rowind = rowind[:nnz] - * data = data[:nnz] - * return scipy.sparse.csc_matrix( # <<<<<<<<<<<<<< - * (data,rowind,indptr), - * shape=(M,N)) - */ - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - - /* "scipy/io/matlab/mio5_utils.pyx":741 - * return scipy.sparse.csc_matrix( - * (data,rowind,indptr), - * shape=(M,N)) # <<<<<<<<<<<<<< - * - * cpdef cnp.ndarray read_char(self, VarHeader5 header): - */ - __pyx_t_8 = __Pyx_PyInt_FromSize_t(__pyx_v_M); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 741; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_9 = __Pyx_PyInt_FromSize_t(__pyx_v_N); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 741; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 741; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_10)); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_8); - PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); - __Pyx_GIVEREF(__pyx_t_9); - __pyx_t_8 = 0; - __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__shape), ((PyObject *)__pyx_t_10)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0; - __pyx_t_10 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_r = __pyx_t_10; - __pyx_t_10 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_sparse", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_rowind); - __Pyx_XDECREF((PyObject *)__pyx_v_indptr); - __Pyx_XDECREF((PyObject *)__pyx_v_data); - __Pyx_XDECREF((PyObject *)__pyx_v_data_j); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":743 - * shape=(M,N)) - * - * cpdef cnp.ndarray read_char(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read char matrices from stream as arrays - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_8read_char(PyObject *__pyx_v_self, PyObject *__pyx_v_header); /*proto*/ -static PyArrayObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_char(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header, int __pyx_skip_dispatch) { - __pyx_t_5numpy_uint32_t __pyx_v_mdtype; - __pyx_t_5numpy_uint32_t __pyx_v_byte_count; - char *__pyx_v_data_ptr; - PyObject *__pyx_v_data = 0; - PyObject *__pyx_v_codec = 0; - PyArrayObject *__pyx_v_arr = 0; - PyArray_Descr *__pyx_v_dt = 0; - size_t __pyx_v_length; - PyObject *__pyx_v_uc_str = NULL; - PyArrayObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_char"); - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__read_char); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_8read_char)) { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_v_header)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_header)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_header)); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":770 - * cnp.ndarray arr - * cnp.dtype dt - * cdef size_t length = self.size_from_header(header) # <<<<<<<<<<<<<< - * data = self.read_element( - * &mdtype, &byte_count, &data_ptr, True) - */ - __pyx_v_length = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->size_from_header(__pyx_v_self, __pyx_v_header); - - /* "scipy/io/matlab/mio5_utils.pyx":772 - * cdef size_t length = self.size_from_header(header) - * data = self.read_element( - * &mdtype, &byte_count, &data_ptr, True) # <<<<<<<<<<<<<< - * # There are mat files in the wild that have 0 byte count strings, but - * # maybe with non-zero length. - */ - __pyx_t_4.__pyx_n = 1; - __pyx_t_4.copy = 1; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_element(__pyx_v_self, (&__pyx_v_mdtype), (&__pyx_v_byte_count), ((void **)(&__pyx_v_data_ptr)), &__pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_data = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":775 - * # There are mat files in the wild that have 0 byte count strings, but - * # maybe with non-zero length. - * if byte_count == 0: # <<<<<<<<<<<<<< - * arr = np.array(' ' * length, dtype='U') - * return np.ndarray(shape=header.dims, - */ - __pyx_t_5 = (__pyx_v_byte_count == 0); - if (__pyx_t_5) { - - /* "scipy/io/matlab/mio5_utils.pyx":776 - * # maybe with non-zero length. - * if byte_count == 0: - * arr = np.array(' ' * length, dtype='U') # <<<<<<<<<<<<<< - * return np.ndarray(shape=header.dims, - * dtype=self.U1_dtype, - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_v_length); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Multiply(((PyObject *)__pyx_kp_s_2), __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_n_s__U)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_arr = ((PyArrayObject *)__pyx_t_6); - __pyx_t_6 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":777 - * if byte_count == 0: - * arr = np.array(' ' * length, dtype='U') - * return np.ndarray(shape=header.dims, # <<<<<<<<<<<<<< - * dtype=self.U1_dtype, - * buffer=arr, - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__ndarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__shape), __pyx_v_header->dims) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":778 - * arr = np.array(' ' * length, dtype='U') - * return np.ndarray(shape=header.dims, - * dtype=self.U1_dtype, # <<<<<<<<<<<<<< - * buffer=arr, - * order='F') - */ - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_v_self->U1_dtype)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":779 - * return np.ndarray(shape=header.dims, - * dtype=self.U1_dtype, - * buffer=arr, # <<<<<<<<<<<<<< - * order='F') - * # Character data can be of apparently numerical types, - */ - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__buffer), ((PyObject *)__pyx_v_arr)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__order), ((PyObject *)__pyx_n_s__F)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_1 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - goto __pyx_L0; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":785 - * # a length 1 type encoding, like ascii, or length 2 type - * # encoding - * dt = self.dtypes[mdtype] # <<<<<<<<<<<<<< - * if mdtype == miUINT16: - * codec = self.uint16_codec - */ - __Pyx_INCREF(((PyObject *)((PyArray_Descr *)(__pyx_v_self->dtypes[__pyx_v_mdtype])))); - __pyx_v_dt = ((PyArray_Descr *)(__pyx_v_self->dtypes[__pyx_v_mdtype])); - - /* "scipy/io/matlab/mio5_utils.pyx":786 - * # encoding - * dt = self.dtypes[mdtype] - * if mdtype == miUINT16: # <<<<<<<<<<<<<< - * codec = self.uint16_codec - * if self.codecs['uint16_len'] == 1: # need LSBs only - */ - __pyx_t_5 = (__pyx_v_mdtype == __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUINT16); - if (__pyx_t_5) { - - /* "scipy/io/matlab/mio5_utils.pyx":787 - * dt = self.dtypes[mdtype] - * if mdtype == miUINT16: - * codec = self.uint16_codec # <<<<<<<<<<<<<< - * if self.codecs['uint16_len'] == 1: # need LSBs only - * arr = np.ndarray(shape=(length,), - */ - __Pyx_INCREF(__pyx_v_self->uint16_codec); - __pyx_v_codec = __pyx_v_self->uint16_codec; - - /* "scipy/io/matlab/mio5_utils.pyx":788 - * if mdtype == miUINT16: - * codec = self.uint16_codec - * if self.codecs['uint16_len'] == 1: # need LSBs only # <<<<<<<<<<<<<< - * arr = np.ndarray(shape=(length,), - * dtype=dt, - */ - __pyx_t_1 = PyObject_GetItem(__pyx_v_self->codecs, ((PyObject *)__pyx_n_s__uint16_len)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = PyObject_RichCompare(__pyx_t_1, __pyx_int_1, Py_EQ); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (__pyx_t_5) { - - /* "scipy/io/matlab/mio5_utils.pyx":789 - * codec = self.uint16_codec - * if self.codecs['uint16_len'] == 1: # need LSBs only - * arr = np.ndarray(shape=(length,), # <<<<<<<<<<<<<< - * dtype=dt, - * buffer=data) - */ - __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__ndarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - __pyx_t_2 = __Pyx_PyInt_FromSize_t(__pyx_v_length); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__shape), ((PyObject *)__pyx_t_3)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":790 - * if self.codecs['uint16_len'] == 1: # need LSBs only - * arr = np.ndarray(shape=(length,), - * dtype=dt, # <<<<<<<<<<<<<< - * buffer=data) - * data = arr.astype(np.uint8).tostring() - */ - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_v_dt)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":791 - * arr = np.ndarray(shape=(length,), - * dtype=dt, - * buffer=data) # <<<<<<<<<<<<<< - * data = arr.astype(np.uint8).tostring() - * elif mdtype == miINT8 or mdtype == miUINT8: - */ - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__buffer), __pyx_v_data) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_arr = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":792 - * dtype=dt, - * buffer=data) - * data = arr.astype(np.uint8).tostring() # <<<<<<<<<<<<<< - * elif mdtype == miINT8 or mdtype == miUINT8: - * codec = 'ascii' - */ - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_s__astype); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__uint8); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__tostring); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_v_data); - __pyx_v_data = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L5; - } - __pyx_L5:; - goto __pyx_L4; - } - - /* "scipy/io/matlab/mio5_utils.pyx":793 - * buffer=data) - * data = arr.astype(np.uint8).tostring() - * elif mdtype == miINT8 or mdtype == miUINT8: # <<<<<<<<<<<<<< - * codec = 'ascii' - * elif mdtype in self.codecs: # encoded char data - */ - switch (__pyx_v_mdtype) { - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_miINT8: - case __pyx_e_5scipy_2io_6matlab_10mio5_utils_miUINT8: - __pyx_t_5 = 1; - break; - default: - __pyx_t_5 = 0; - break; - } - if (__pyx_t_5) { - - /* "scipy/io/matlab/mio5_utils.pyx":794 - * data = arr.astype(np.uint8).tostring() - * elif mdtype == miINT8 or mdtype == miUINT8: - * codec = 'ascii' # <<<<<<<<<<<<<< - * elif mdtype in self.codecs: # encoded char data - * codec = self.codecs[mdtype] - */ - __Pyx_INCREF(((PyObject *)__pyx_n_s__ascii)); - __pyx_v_codec = ((PyObject *)__pyx_n_s__ascii); - goto __pyx_L4; - } - - /* "scipy/io/matlab/mio5_utils.pyx":795 - * elif mdtype == miINT8 or mdtype == miUINT8: - * codec = 'ascii' - * elif mdtype in self.codecs: # encoded char data # <<<<<<<<<<<<<< - * codec = self.codecs[mdtype] - * if not codec: - */ - __pyx_t_1 = __Pyx_PyInt_to_py_npy_uint32(__pyx_v_mdtype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = ((PySequence_Contains(__pyx_v_self->codecs, __pyx_t_1))); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { - - /* "scipy/io/matlab/mio5_utils.pyx":796 - * codec = 'ascii' - * elif mdtype in self.codecs: # encoded char data - * codec = self.codecs[mdtype] # <<<<<<<<<<<<<< - * if not codec: - * raise TypeError('Do not support encoding %d' % mdtype) - */ - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_self->codecs, __pyx_v_mdtype, sizeof(__pyx_t_5numpy_uint32_t)+1, __Pyx_PyInt_to_py_npy_uint32); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_codec = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":797 - * elif mdtype in self.codecs: # encoded char data - * codec = self.codecs[mdtype] - * if not codec: # <<<<<<<<<<<<<< - * raise TypeError('Do not support encoding %d' % mdtype) - * else: - */ - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_codec); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = (!__pyx_t_5); - if (__pyx_t_7) { - - /* "scipy/io/matlab/mio5_utils.pyx":798 - * codec = self.codecs[mdtype] - * if not codec: - * raise TypeError('Do not support encoding %d' % mdtype) # <<<<<<<<<<<<<< - * else: - * raise ValueError('Type %d does not appear to be char type' - */ - __pyx_t_1 = __Pyx_PyInt_to_py_npy_uint32(__pyx_v_mdtype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_20), __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_6)); - __pyx_t_6 = 0; - __pyx_t_6 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_Raise(__pyx_t_6, 0, 0, 0); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - goto __pyx_L4; - } - /*else*/ { - - /* "scipy/io/matlab/mio5_utils.pyx":801 - * else: - * raise ValueError('Type %d does not appear to be char type' - * % mdtype) # <<<<<<<<<<<<<< - * uc_str = data.decode(codec) - * # cast to array to deal with 2, 4 byte width characters - */ - __pyx_t_6 = __Pyx_PyInt_to_py_npy_uint32(__pyx_v_mdtype); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_21), __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_t_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L4:; - - /* "scipy/io/matlab/mio5_utils.pyx":802 - * raise ValueError('Type %d does not appear to be char type' - * % mdtype) - * uc_str = data.decode(codec) # <<<<<<<<<<<<<< - * # cast to array to deal with 2, 4 byte width characters - * arr = np.array(uc_str, dtype='U') - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_data, __pyx_n_s__decode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - __Pyx_INCREF(__pyx_v_codec); - PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_codec); - __Pyx_GIVEREF(__pyx_v_codec); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_v_uc_str = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":804 - * uc_str = data.decode(codec) - * # cast to array to deal with 2, 4 byte width characters - * arr = np.array(uc_str, dtype='U') # <<<<<<<<<<<<<< - * # could take this to numpy C-API level, but probably not worth - * # it - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_uc_str); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_uc_str); - __Pyx_GIVEREF(__pyx_v_uc_str); - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_n_s__U)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = PyEval_CallObjectWithKeywords(__pyx_t_6, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_arr)); - __pyx_v_arr = ((PyArrayObject *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":807 - * # could take this to numpy C-API level, but probably not worth - * # it - * return np.ndarray(shape=header.dims, # <<<<<<<<<<<<<< - * dtype=self.U1_dtype, - * buffer=arr, - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__ndarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__shape), __pyx_v_header->dims) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":808 - * # it - * return np.ndarray(shape=header.dims, - * dtype=self.U1_dtype, # <<<<<<<<<<<<<< - * buffer=arr, - * order='F') - */ - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_v_self->U1_dtype)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":809 - * return np.ndarray(shape=header.dims, - * dtype=self.U1_dtype, - * buffer=arr, # <<<<<<<<<<<<<< - * order='F') - * - */ - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__buffer), ((PyObject *)__pyx_v_arr)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__order), ((PyObject *)__pyx_n_s__F)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_char", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_data); - __Pyx_XDECREF(__pyx_v_codec); - __Pyx_XDECREF((PyObject *)__pyx_v_arr); - __Pyx_XDECREF((PyObject *)__pyx_v_dt); - __Pyx_XDECREF(__pyx_v_uc_str); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":743 - * shape=(M,N)) - * - * cpdef cnp.ndarray read_char(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read char matrices from stream as arrays - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_8read_char(PyObject *__pyx_v_self, PyObject *__pyx_v_header); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_8read_char[] = " Read char matrices from stream as arrays\n\n Matrices of char are likely to be converted to matrices of\n string by later processing in ``array_from_header``\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_8read_char(PyObject *__pyx_v_self, PyObject *__pyx_v_header) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_char"); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_header), __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5, 1, "header", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->read_char(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_header), 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_char", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":812 - * order='F') - * - * cpdef cnp.ndarray read_cells(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read cell array from stream ''' - * cdef: - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_9read_cells(PyObject *__pyx_v_self, PyObject *__pyx_v_header); /*proto*/ -static PyArrayObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_cells(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header, int __pyx_skip_dispatch) { - size_t __pyx_v_i; - PyArrayObject *__pyx_v_result = 0; - PyObject *__pyx_v_tupdims = NULL; - size_t __pyx_v_length; - Py_buffer __pyx_bstruct_result; - Py_ssize_t __pyx_bstride_0_result = 0; - Py_ssize_t __pyx_bshape_0_result = 0; - PyArrayObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyArrayObject *__pyx_t_5 = NULL; - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - size_t __pyx_t_10; - size_t __pyx_t_11; - size_t __pyx_t_12; - PyObject **__pyx_t_13; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_cells"); - __pyx_bstruct_result.buf = NULL; - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__read_cells); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_9read_cells)) { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_v_header)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_header)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_header)); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":818 - * cnp.ndarray[object, ndim=1] result - * # Account for fortran indexing of cells - * tupdims = tuple(header.dims[::-1]) # <<<<<<<<<<<<<< - * cdef size_t length = self.size_from_header(header) - * result = np.empty(length, dtype=object) - */ - __pyx_t_1 = PyObject_GetItem(__pyx_v_header->dims, __pyx_k_slice_22); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 818; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 818; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyTuple_Type))), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 818; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_tupdims = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":819 - * # Account for fortran indexing of cells - * tupdims = tuple(header.dims[::-1]) - * cdef size_t length = self.size_from_header(header) # <<<<<<<<<<<<<< - * result = np.empty(length, dtype=object) - * for i in range(length): - */ - __pyx_v_length = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->size_from_header(__pyx_v_self, __pyx_v_header); - - /* "scipy/io/matlab/mio5_utils.pyx":820 - * tupdims = tuple(header.dims[::-1]) - * cdef size_t length = self.size_from_header(header) - * result = np.empty(length, dtype=object) # <<<<<<<<<<<<<< - * for i in range(length): - * result[i] = self.read_mi_matrix() - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_v_length); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_builtin_object) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_result); - __pyx_t_6 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_result, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_object, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_6 < 0)) { - PyErr_Fetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_result, (PyObject*)__pyx_v_result, &__Pyx_TypeInfo_object, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_7); Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_7, __pyx_t_8, __pyx_t_9); - } - } - __pyx_bstride_0_result = __pyx_bstruct_result.strides[0]; - __pyx_bshape_0_result = __pyx_bstruct_result.shape[0]; - if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_5 = 0; - __pyx_v_result = ((PyArrayObject *)__pyx_t_4); - __pyx_t_4 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":821 - * cdef size_t length = self.size_from_header(header) - * result = np.empty(length, dtype=object) - * for i in range(length): # <<<<<<<<<<<<<< - * result[i] = self.read_mi_matrix() - * return result.reshape(tupdims).T - */ - __pyx_t_10 = __pyx_v_length; - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - /* "scipy/io/matlab/mio5_utils.pyx":822 - * result = np.empty(length, dtype=object) - * for i in range(length): - * result[i] = self.read_mi_matrix() # <<<<<<<<<<<<<< - * return result.reshape(tupdims).T - * - */ - __pyx_t_4 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_mi_matrix(__pyx_v_self, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_12 = __pyx_v_i; - __pyx_t_6 = -1; - if (unlikely(__pyx_t_12 >= (size_t)__pyx_bshape_0_result)) __pyx_t_6 = 0; - if (unlikely(__pyx_t_6 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_6); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_13 = __Pyx_BufPtrStrided1d(PyObject **, __pyx_bstruct_result.buf, __pyx_t_12, __pyx_bstride_0_result); - __Pyx_GOTREF(*__pyx_t_13); - __Pyx_DECREF(*__pyx_t_13); __Pyx_INCREF(__pyx_t_4); - *__pyx_t_13 = __pyx_t_4; - __Pyx_GIVEREF(*__pyx_t_13); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":823 - * for i in range(length): - * result[i] = self.read_mi_matrix() - * return result.reshape(tupdims).T # <<<<<<<<<<<<<< - * - * def read_fieldnames(self): - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_4 = PyObject_GetAttr(((PyObject *)__pyx_v_result), __pyx_n_s__reshape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(((PyObject *)__pyx_v_tupdims)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_tupdims)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_tupdims)); - __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__T); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_result); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_cells", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_result); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XDECREF(__pyx_v_tupdims); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":812 - * order='F') - * - * cpdef cnp.ndarray read_cells(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read cell array from stream ''' - * cdef: - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_9read_cells(PyObject *__pyx_v_self, PyObject *__pyx_v_header); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_9read_cells[] = " Read cell array from stream "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_9read_cells(PyObject *__pyx_v_self, PyObject *__pyx_v_header) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_cells"); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_header), __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5, 1, "header", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->read_cells(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_header), 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_cells", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":825 - * return result.reshape(tupdims).T - * - * def read_fieldnames(self): # <<<<<<<<<<<<<< - * ''' Read fieldnames for struct-like matrix ' - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10read_fieldnames(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10read_fieldnames[] = " Read fieldnames for struct-like matrix '\n\n Python wrapper for cdef'ed method\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10read_fieldnames(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - int __pyx_v_n_names; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_fieldnames"); - - /* "scipy/io/matlab/mio5_utils.pyx":831 - * ''' - * cdef int n_names - * return self.cread_fieldnames(&n_names) # <<<<<<<<<<<<<< - * - * cdef inline object cread_fieldnames(self, int *n_names_ptr): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->cread_fieldnames(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), (&__pyx_v_n_names)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_fieldnames", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":833 - * return self.cread_fieldnames(&n_names) - * - * cdef inline object cread_fieldnames(self, int *n_names_ptr): # <<<<<<<<<<<<<< - * cdef: - * cnp.int32_t namelength - */ - -static CYTHON_INLINE PyObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_cread_fieldnames(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, int *__pyx_v_n_names_ptr) { - __pyx_t_5numpy_int32_t __pyx_v_namelength; - int __pyx_v_i; - int __pyx_v_n_names; - PyObject *__pyx_v_name = 0; - PyObject *__pyx_v_field_names = 0; - int __pyx_v_res; - PyObject *__pyx_v_names = 0; - int *__pyx_v_n_duplicates; - char **__pyx_v_name_ptrs; - char *__pyx_v_n_ptr; - int __pyx_v_j; - int __pyx_v_dup_no; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t __pyx_t_4; - char *__pyx_t_5; - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - int __pyx_t_10; - int __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("cread_fieldnames"); - - /* "scipy/io/matlab/mio5_utils.pyx":839 - * object name, field_names - * # Read field names into list - * cdef int res = self.read_into_int32s(&namelength) # <<<<<<<<<<<<<< - * if res != 1: - * raise ValueError('Only one value for namelength') - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_into_int32s(__pyx_v_self, (&__pyx_v_namelength)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_res = __pyx_t_1; - - /* "scipy/io/matlab/mio5_utils.pyx":840 - * # Read field names into list - * cdef int res = self.read_into_int32s(&namelength) - * if res != 1: # <<<<<<<<<<<<<< - * raise ValueError('Only one value for namelength') - * cdef object names = self.read_int8_string() - */ - __pyx_t_2 = (__pyx_v_res != 1); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":841 - * cdef int res = self.read_into_int32s(&namelength) - * if res != 1: - * raise ValueError('Only one value for namelength') # <<<<<<<<<<<<<< - * cdef object names = self.read_int8_string() - * field_names = [] - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_24), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":842 - * if res != 1: - * raise ValueError('Only one value for namelength') - * cdef object names = self.read_int8_string() # <<<<<<<<<<<<<< - * field_names = [] - * n_names = PyBytes_Size(names) // namelength - */ - __pyx_t_3 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_int8_string(__pyx_v_self); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_names = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":843 - * raise ValueError('Only one value for namelength') - * cdef object names = self.read_int8_string() - * field_names = [] # <<<<<<<<<<<<<< - * n_names = PyBytes_Size(names) // namelength - * # Make n_duplicates and pointer arrays - */ - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_v_field_names = ((PyObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":844 - * cdef object names = self.read_int8_string() - * field_names = [] - * n_names = PyBytes_Size(names) // namelength # <<<<<<<<<<<<<< - * # Make n_duplicates and pointer arrays - * cdef: - */ - __pyx_t_4 = PyBytes_Size(__pyx_v_names); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(__pyx_v_namelength == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_namelength == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_t_4))) { - PyErr_Format(PyExc_OverflowError, "value too large to perform division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_v_n_names = __Pyx_div_Py_ssize_t(__pyx_t_4, __pyx_v_namelength); - - /* "scipy/io/matlab/mio5_utils.pyx":849 - * int *n_duplicates - * char **name_ptrs - * n_duplicates = calloc(n_names, sizeof(int)) # <<<<<<<<<<<<<< - * name_ptrs = calloc(n_names, sizeof(char *)) - * cdef: - */ - __pyx_v_n_duplicates = ((int *)calloc(__pyx_v_n_names, (sizeof(int)))); - - /* "scipy/io/matlab/mio5_utils.pyx":850 - * char **name_ptrs - * n_duplicates = calloc(n_names, sizeof(int)) - * name_ptrs = calloc(n_names, sizeof(char *)) # <<<<<<<<<<<<<< - * cdef: - * char *n_ptr = names - */ - __pyx_v_name_ptrs = ((char **)calloc(__pyx_v_n_names, (sizeof(char *)))); - - /* "scipy/io/matlab/mio5_utils.pyx":852 - * name_ptrs = calloc(n_names, sizeof(char *)) - * cdef: - * char *n_ptr = names # <<<<<<<<<<<<<< - * int j, dup_no - * for i in range(n_names): - */ - __pyx_t_5 = PyBytes_AsString(__pyx_v_names); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_n_ptr = __pyx_t_5; - - /* "scipy/io/matlab/mio5_utils.pyx":854 - * char *n_ptr = names - * int j, dup_no - * for i in range(n_names): # <<<<<<<<<<<<<< - * name = asstr(PyBytes_FromString(n_ptr)) - * # Check if this is a duplicate field, rename if so - */ - __pyx_t_1 = __pyx_v_n_names; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_1; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "scipy/io/matlab/mio5_utils.pyx":855 - * int j, dup_no - * for i in range(n_names): - * name = asstr(PyBytes_FromString(n_ptr)) # <<<<<<<<<<<<<< - * # Check if this is a duplicate field, rename if so - * name_ptrs[i] = n_ptr - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__asstr); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 855; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = ((PyObject *)PyBytes_FromString(__pyx_v_n_ptr)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 855; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 855; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - __pyx_t_7 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_8), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 855; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; - __Pyx_XDECREF(__pyx_v_name); - __pyx_v_name = __pyx_t_7; - __pyx_t_7 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":857 - * name = asstr(PyBytes_FromString(n_ptr)) - * # Check if this is a duplicate field, rename if so - * name_ptrs[i] = n_ptr # <<<<<<<<<<<<<< - * dup_no = 0 - * for j in range(i): - */ - (__pyx_v_name_ptrs[__pyx_v_i]) = __pyx_v_n_ptr; - - /* "scipy/io/matlab/mio5_utils.pyx":858 - * # Check if this is a duplicate field, rename if so - * name_ptrs[i] = n_ptr - * dup_no = 0 # <<<<<<<<<<<<<< - * for j in range(i): - * if strcmp(n_ptr, name_ptrs[j]) == 0: # the same - */ - __pyx_v_dup_no = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":859 - * name_ptrs[i] = n_ptr - * dup_no = 0 - * for j in range(i): # <<<<<<<<<<<<<< - * if strcmp(n_ptr, name_ptrs[j]) == 0: # the same - * n_duplicates[j] += 1 - */ - __pyx_t_9 = __pyx_v_i; - for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { - __pyx_v_j = __pyx_t_10; - - /* "scipy/io/matlab/mio5_utils.pyx":860 - * dup_no = 0 - * for j in range(i): - * if strcmp(n_ptr, name_ptrs[j]) == 0: # the same # <<<<<<<<<<<<<< - * n_duplicates[j] += 1 - * dup_no = n_duplicates[j] - */ - __pyx_t_2 = (strcmp(__pyx_v_n_ptr, (__pyx_v_name_ptrs[__pyx_v_j])) == 0); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":861 - * for j in range(i): - * if strcmp(n_ptr, name_ptrs[j]) == 0: # the same - * n_duplicates[j] += 1 # <<<<<<<<<<<<<< - * dup_no = n_duplicates[j] - * break - */ - __pyx_t_11 = __pyx_v_j; - (__pyx_v_n_duplicates[__pyx_t_11]) = ((__pyx_v_n_duplicates[__pyx_t_11]) + 1); - - /* "scipy/io/matlab/mio5_utils.pyx":862 - * if strcmp(n_ptr, name_ptrs[j]) == 0: # the same - * n_duplicates[j] += 1 - * dup_no = n_duplicates[j] # <<<<<<<<<<<<<< - * break - * if dup_no != 0: - */ - __pyx_v_dup_no = (__pyx_v_n_duplicates[__pyx_v_j]); - - /* "scipy/io/matlab/mio5_utils.pyx":863 - * n_duplicates[j] += 1 - * dup_no = n_duplicates[j] - * break # <<<<<<<<<<<<<< - * if dup_no != 0: - * name = '_%d_%s' % (dup_no, name) - */ - goto __pyx_L7_break; - goto __pyx_L8; - } - __pyx_L8:; - } - __pyx_L7_break:; - - /* "scipy/io/matlab/mio5_utils.pyx":864 - * dup_no = n_duplicates[j] - * break - * if dup_no != 0: # <<<<<<<<<<<<<< - * name = '_%d_%s' % (dup_no, name) - * field_names.append(name) - */ - __pyx_t_2 = (__pyx_v_dup_no != 0); - if (__pyx_t_2) { - - /* "scipy/io/matlab/mio5_utils.pyx":865 - * break - * if dup_no != 0: - * name = '_%d_%s' % (dup_no, name) # <<<<<<<<<<<<<< - * field_names.append(name) - * n_ptr += namelength - */ - __pyx_t_7 = PyInt_FromLong(__pyx_v_dup_no); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 865; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 865; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __Pyx_INCREF(__pyx_v_name); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_name); - __Pyx_GIVEREF(__pyx_v_name); - __pyx_t_7 = 0; - __pyx_t_7 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_25), ((PyObject *)__pyx_t_8)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 865; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_7)); - __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_v_name); - __pyx_v_name = ((PyObject *)__pyx_t_7); - __pyx_t_7 = 0; - goto __pyx_L9; - } - __pyx_L9:; - - /* "scipy/io/matlab/mio5_utils.pyx":866 - * if dup_no != 0: - * name = '_%d_%s' % (dup_no, name) - * field_names.append(name) # <<<<<<<<<<<<<< - * n_ptr += namelength - * free(n_duplicates) - */ - __pyx_t_7 = __Pyx_PyObject_Append(__pyx_v_field_names, __pyx_v_name); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 866; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":867 - * name = '_%d_%s' % (dup_no, name) - * field_names.append(name) - * n_ptr += namelength # <<<<<<<<<<<<<< - * free(n_duplicates) - * free(name_ptrs) - */ - __pyx_v_n_ptr = (__pyx_v_n_ptr + __pyx_v_namelength); - } - - /* "scipy/io/matlab/mio5_utils.pyx":868 - * field_names.append(name) - * n_ptr += namelength - * free(n_duplicates) # <<<<<<<<<<<<<< - * free(name_ptrs) - * n_names_ptr[0] = n_names - */ - free(__pyx_v_n_duplicates); - - /* "scipy/io/matlab/mio5_utils.pyx":869 - * n_ptr += namelength - * free(n_duplicates) - * free(name_ptrs) # <<<<<<<<<<<<<< - * n_names_ptr[0] = n_names - * return field_names - */ - free(__pyx_v_name_ptrs); - - /* "scipy/io/matlab/mio5_utils.pyx":870 - * free(n_duplicates) - * free(name_ptrs) - * n_names_ptr[0] = n_names # <<<<<<<<<<<<<< - * return field_names - * - */ - (__pyx_v_n_names_ptr[0]) = __pyx_v_n_names; - - /* "scipy/io/matlab/mio5_utils.pyx":871 - * free(name_ptrs) - * n_names_ptr[0] = n_names - * return field_names # <<<<<<<<<<<<<< - * - * cpdef cnp.ndarray read_struct(self, VarHeader5 header): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_field_names); - __pyx_r = __pyx_v_field_names; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.cread_fieldnames", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_name); - __Pyx_XDECREF(__pyx_v_field_names); - __Pyx_XDECREF(__pyx_v_names); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":873 - * return field_names - * - * cpdef cnp.ndarray read_struct(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read struct or object array from stream - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_11read_struct(PyObject *__pyx_v_self, PyObject *__pyx_v_header); /*proto*/ -static PyArrayObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_struct(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_header, int __pyx_skip_dispatch) { - int __pyx_v_i; - int __pyx_v_n_names; - PyArrayObject *__pyx_v_rec_res = 0; - PyArrayObject *__pyx_v_result = 0; - PyObject *__pyx_v_dt = 0; - PyObject *__pyx_v_tupdims = 0; - PyObject *__pyx_v_field_names = 0; - size_t __pyx_v_length; - PyObject *__pyx_v_field_name = NULL; - PyObject *__pyx_v_obj_template = NULL; - PyObject *__pyx_v_item = NULL; - PyObject *__pyx_v_name = NULL; - Py_buffer __pyx_bstruct_result; - Py_ssize_t __pyx_bstride_0_result = 0; - Py_ssize_t __pyx_bshape_0_result = 0; - PyArrayObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - PyObject *(*__pyx_t_7)(PyObject *); - size_t __pyx_t_8; - int __pyx_t_9; - PyArrayObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - PyObject *__pyx_t_13 = NULL; - int __pyx_t_14; - int __pyx_t_15; - PyObject **__pyx_t_16; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_struct"); - __pyx_bstruct_result.buf = NULL; - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__read_struct); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_11read_struct)) { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_v_header)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_header)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_header)); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":886 - * object dt, tupdims - * # Read field names into list - * cdef object field_names = self.cread_fieldnames(&n_names) # <<<<<<<<<<<<<< - * # Prepare struct array - * tupdims = tuple(header.dims[::-1]) - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->cread_fieldnames(__pyx_v_self, (&__pyx_v_n_names)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_field_names = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":888 - * cdef object field_names = self.cread_fieldnames(&n_names) - * # Prepare struct array - * tupdims = tuple(header.dims[::-1]) # <<<<<<<<<<<<<< - * cdef size_t length = self.size_from_header(header) - * if self.struct_as_record: # to record arrays - */ - __pyx_t_1 = PyObject_GetItem(__pyx_v_header->dims, __pyx_k_slice_26); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyTuple_Type))), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_tupdims = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":889 - * # Prepare struct array - * tupdims = tuple(header.dims[::-1]) - * cdef size_t length = self.size_from_header(header) # <<<<<<<<<<<<<< - * if self.struct_as_record: # to record arrays - * if not n_names: - */ - __pyx_v_length = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->size_from_header(__pyx_v_self, __pyx_v_header); - - /* "scipy/io/matlab/mio5_utils.pyx":890 - * tupdims = tuple(header.dims[::-1]) - * cdef size_t length = self.size_from_header(header) - * if self.struct_as_record: # to record arrays # <<<<<<<<<<<<<< - * if not n_names: - * # If there are no field names, there is no dtype - */ - if (__pyx_v_self->struct_as_record) { - - /* "scipy/io/matlab/mio5_utils.pyx":891 - * cdef size_t length = self.size_from_header(header) - * if self.struct_as_record: # to record arrays - * if not n_names: # <<<<<<<<<<<<<< - * # If there are no field names, there is no dtype - * # representation we can use, falling back to empty - */ - __pyx_t_4 = (!__pyx_v_n_names); - if (__pyx_t_4) { - - /* "scipy/io/matlab/mio5_utils.pyx":895 - * # representation we can use, falling back to empty - * # object - * return np.empty(tupdims, dtype=object).T # <<<<<<<<<<<<<< - * dt = [(field_name, object) for field_name in field_names] - * rec_res = np.empty(length, dtype=dt) - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_tupdims); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_tupdims); - __Pyx_GIVEREF(__pyx_v_tupdims); - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_builtin_object) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__T); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L0; - goto __pyx_L4; - } - __pyx_L4:; - - /* "scipy/io/matlab/mio5_utils.pyx":896 - * # object - * return np.empty(tupdims, dtype=object).T - * dt = [(field_name, object) for field_name in field_names] # <<<<<<<<<<<<<< - * rec_res = np.empty(length, dtype=dt) - * for i in range(length): - */ - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyList_CheckExact(__pyx_v_field_names) || PyTuple_CheckExact(__pyx_v_field_names)) { - __pyx_t_5 = __pyx_v_field_names; __Pyx_INCREF(__pyx_t_5); __pyx_t_6 = 0; - __pyx_t_7 = NULL; - } else { - __pyx_t_6 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_v_field_names); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_7 = Py_TYPE(__pyx_t_5)->tp_iternext; - } - for (;;) { - if (PyList_CheckExact(__pyx_t_5)) { - if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_5)) break; - __pyx_t_1 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_1); __pyx_t_6++; - } else if (PyTuple_CheckExact(__pyx_t_5)) { - if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_5)) break; - __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_1); __pyx_t_6++; - } else { - __pyx_t_1 = __pyx_t_7(__pyx_t_5); - if (unlikely(!__pyx_t_1)) { - if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - break; - } - __Pyx_GOTREF(__pyx_t_1); - } - __Pyx_XDECREF(__pyx_v_field_name); - __pyx_v_field_name = __pyx_t_1; - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_field_name); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_field_name); - __Pyx_GIVEREF(__pyx_v_field_name); - __Pyx_INCREF(__pyx_builtin_object); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_builtin_object); - __Pyx_GIVEREF(__pyx_builtin_object); - if (unlikely(PyList_Append(__pyx_t_2, (PyObject*)__pyx_t_1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_INCREF(((PyObject *)__pyx_t_2)); - __pyx_v_dt = ((PyObject *)__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":897 - * return np.empty(tupdims, dtype=object).T - * dt = [(field_name, object) for field_name in field_names] - * rec_res = np.empty(length, dtype=dt) # <<<<<<<<<<<<<< - * for i in range(length): - * for field_name in field_names: - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyInt_FromSize_t(__pyx_v_length); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_v_dt) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = PyEval_CallObjectWithKeywords(__pyx_t_5, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_rec_res = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":898 - * dt = [(field_name, object) for field_name in field_names] - * rec_res = np.empty(length, dtype=dt) - * for i in range(length): # <<<<<<<<<<<<<< - * for field_name in field_names: - * rec_res[i][field_name] = self.read_mi_matrix() - */ - __pyx_t_8 = __pyx_v_length; - for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { - __pyx_v_i = __pyx_t_9; - - /* "scipy/io/matlab/mio5_utils.pyx":899 - * rec_res = np.empty(length, dtype=dt) - * for i in range(length): - * for field_name in field_names: # <<<<<<<<<<<<<< - * rec_res[i][field_name] = self.read_mi_matrix() - * return rec_res.reshape(tupdims).T - */ - if (PyList_CheckExact(__pyx_v_field_names) || PyTuple_CheckExact(__pyx_v_field_names)) { - __pyx_t_3 = __pyx_v_field_names; __Pyx_INCREF(__pyx_t_3); __pyx_t_6 = 0; - __pyx_t_7 = NULL; - } else { - __pyx_t_6 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_field_names); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 899; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = Py_TYPE(__pyx_t_3)->tp_iternext; - } - for (;;) { - if (PyList_CheckExact(__pyx_t_3)) { - if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_3)) break; - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_6); __Pyx_INCREF(__pyx_t_2); __pyx_t_6++; - } else if (PyTuple_CheckExact(__pyx_t_3)) { - if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_6); __Pyx_INCREF(__pyx_t_2); __pyx_t_6++; - } else { - __pyx_t_2 = __pyx_t_7(__pyx_t_3); - if (unlikely(!__pyx_t_2)) { - if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 899; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - break; - } - __Pyx_GOTREF(__pyx_t_2); - } - __Pyx_XDECREF(__pyx_v_field_name); - __pyx_v_field_name = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":900 - * for i in range(length): - * for field_name in field_names: - * rec_res[i][field_name] = self.read_mi_matrix() # <<<<<<<<<<<<<< - * return rec_res.reshape(tupdims).T - * # Backward compatibility with previous format - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_mi_matrix(__pyx_v_self, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(((PyObject *)__pyx_v_rec_res), __pyx_v_i, sizeof(int), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetItem(__pyx_t_1, __pyx_v_field_name, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":901 - * for field_name in field_names: - * rec_res[i][field_name] = self.read_mi_matrix() - * return rec_res.reshape(tupdims).T # <<<<<<<<<<<<<< - * # Backward compatibility with previous format - * obj_template = mio5p.mat_struct() - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_rec_res), __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 901; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 901; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_tupdims); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_tupdims); - __Pyx_GIVEREF(__pyx_v_tupdims); - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 901; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__T); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 901; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 901; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L0; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio5_utils.pyx":903 - * return rec_res.reshape(tupdims).T - * # Backward compatibility with previous format - * obj_template = mio5p.mat_struct() # <<<<<<<<<<<<<< - * obj_template._fieldnames = field_names - * result = np.empty(length, dtype=object) - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__mio5p); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__mat_struct); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_obj_template = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":904 - * # Backward compatibility with previous format - * obj_template = mio5p.mat_struct() - * obj_template._fieldnames = field_names # <<<<<<<<<<<<<< - * result = np.empty(length, dtype=object) - * for i in range(length): - */ - if (PyObject_SetAttr(__pyx_v_obj_template, __pyx_n_s___fieldnames, __pyx_v_field_names) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/mio5_utils.pyx":905 - * obj_template = mio5p.mat_struct() - * obj_template._fieldnames = field_names - * result = np.empty(length, dtype=object) # <<<<<<<<<<<<<< - * for i in range(length): - * item = pycopy(obj_template) - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyInt_FromSize_t(__pyx_v_length); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_builtin_object) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_10 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_result); - __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_result, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_object, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_9 < 0)) { - PyErr_Fetch(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_result, (PyObject*)__pyx_v_result, &__Pyx_TypeInfo_object, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_11, __pyx_t_12, __pyx_t_13); - } - } - __pyx_bstride_0_result = __pyx_bstruct_result.strides[0]; - __pyx_bshape_0_result = __pyx_bstruct_result.shape[0]; - if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 905; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_10 = 0; - __pyx_v_result = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":906 - * obj_template._fieldnames = field_names - * result = np.empty(length, dtype=object) - * for i in range(length): # <<<<<<<<<<<<<< - * item = pycopy(obj_template) - * for name in field_names: - */ - __pyx_t_8 = __pyx_v_length; - for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { - __pyx_v_i = __pyx_t_9; - - /* "scipy/io/matlab/mio5_utils.pyx":907 - * result = np.empty(length, dtype=object) - * for i in range(length): - * item = pycopy(obj_template) # <<<<<<<<<<<<<< - * for name in field_names: - * item.__dict__[name] = self.read_mi_matrix() - */ - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__pycopy); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 907; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 907; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_obj_template); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_obj_template); - __Pyx_GIVEREF(__pyx_v_obj_template); - __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 907; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_v_item); - __pyx_v_item = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":908 - * for i in range(length): - * item = pycopy(obj_template) - * for name in field_names: # <<<<<<<<<<<<<< - * item.__dict__[name] = self.read_mi_matrix() - * result[i] = item - */ - if (PyList_CheckExact(__pyx_v_field_names) || PyTuple_CheckExact(__pyx_v_field_names)) { - __pyx_t_3 = __pyx_v_field_names; __Pyx_INCREF(__pyx_t_3); __pyx_t_6 = 0; - __pyx_t_7 = NULL; - } else { - __pyx_t_6 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_field_names); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = Py_TYPE(__pyx_t_3)->tp_iternext; - } - for (;;) { - if (PyList_CheckExact(__pyx_t_3)) { - if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_3)) break; - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_6); __Pyx_INCREF(__pyx_t_2); __pyx_t_6++; - } else if (PyTuple_CheckExact(__pyx_t_3)) { - if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_6); __Pyx_INCREF(__pyx_t_2); __pyx_t_6++; - } else { - __pyx_t_2 = __pyx_t_7(__pyx_t_3); - if (unlikely(!__pyx_t_2)) { - if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - break; - } - __Pyx_GOTREF(__pyx_t_2); - } - __Pyx_XDECREF(__pyx_v_name); - __pyx_v_name = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":909 - * item = pycopy(obj_template) - * for name in field_names: - * item.__dict__[name] = self.read_mi_matrix() # <<<<<<<<<<<<<< - * result[i] = item - * return result.reshape(tupdims).T - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_mi_matrix(__pyx_v_self, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 909; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_v_item, __pyx_n_s____dict__); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 909; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - if (PyObject_SetItem(__pyx_t_5, __pyx_v_name, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 909; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":910 - * for name in field_names: - * item.__dict__[name] = self.read_mi_matrix() - * result[i] = item # <<<<<<<<<<<<<< - * return result.reshape(tupdims).T - * - */ - __pyx_t_14 = __pyx_v_i; - __pyx_t_15 = -1; - if (__pyx_t_14 < 0) { - __pyx_t_14 += __pyx_bshape_0_result; - if (unlikely(__pyx_t_14 < 0)) __pyx_t_15 = 0; - } else if (unlikely(__pyx_t_14 >= __pyx_bshape_0_result)) __pyx_t_15 = 0; - if (unlikely(__pyx_t_15 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_15); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 910; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_16 = __Pyx_BufPtrStrided1d(PyObject **, __pyx_bstruct_result.buf, __pyx_t_14, __pyx_bstride_0_result); - __Pyx_GOTREF(*__pyx_t_16); - __Pyx_DECREF(*__pyx_t_16); __Pyx_INCREF(__pyx_v_item); - *__pyx_t_16 = __pyx_v_item; - __Pyx_GIVEREF(*__pyx_t_16); - } - - /* "scipy/io/matlab/mio5_utils.pyx":911 - * item.__dict__[name] = self.read_mi_matrix() - * result[i] = item - * return result.reshape(tupdims).T # <<<<<<<<<<<<<< - * - * cpdef cnp.ndarray read_opaque(self, VarHeader5 hdr): - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_result), __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 911; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 911; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_tupdims); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_tupdims); - __Pyx_GIVEREF(__pyx_v_tupdims); - __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 911; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__T); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 911; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 911; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L0; - - __pyx_r = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_result); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_struct", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_result); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_rec_res); - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XDECREF(__pyx_v_dt); - __Pyx_XDECREF(__pyx_v_tupdims); - __Pyx_XDECREF(__pyx_v_field_names); - __Pyx_XDECREF(__pyx_v_field_name); - __Pyx_XDECREF(__pyx_v_obj_template); - __Pyx_XDECREF(__pyx_v_item); - __Pyx_XDECREF(__pyx_v_name); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":873 - * return field_names - * - * cpdef cnp.ndarray read_struct(self, VarHeader5 header): # <<<<<<<<<<<<<< - * ''' Read struct or object array from stream - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_11read_struct(PyObject *__pyx_v_self, PyObject *__pyx_v_header); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_11read_struct[] = " Read struct or object array from stream\n\n Objects are just structs with an extra field *classname*,\n defined before (this here) struct format structure\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_11read_struct(PyObject *__pyx_v_self, PyObject *__pyx_v_header) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_struct"); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_header), __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5, 1, "header", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->read_struct(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_header), 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_struct", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":913 - * return result.reshape(tupdims).T - * - * cpdef cnp.ndarray read_opaque(self, VarHeader5 hdr): # <<<<<<<<<<<<<< - * ''' Read opaque (function workspace) type - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_12read_opaque(PyObject *__pyx_v_self, PyObject *__pyx_v_hdr); /*proto*/ -static PyArrayObject *__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_opaque(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *__pyx_v_self, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *__pyx_v_hdr, int __pyx_skip_dispatch) { - PyArrayObject *__pyx_v_res = 0; - PyArrayObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_opaque"); - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__read_opaque); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_12read_opaque)) { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_v_hdr)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_hdr)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_hdr)); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/mio5_utils.pyx":929 - * See the comments at the beginning of ``mio5.py`` - * ''' - * cdef cnp.ndarray res = np.empty((1,), dtype=OPAQUE_DTYPE) # <<<<<<<<<<<<<< - * res[0]['s0'] = self.read_int8_string() - * res[0]['s1'] = self.read_int8_string() - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_v_5scipy_2io_6matlab_10mio5_utils_OPAQUE_DTYPE)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_k_tuple_28), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_res = ((PyArrayObject *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":930 - * ''' - * cdef cnp.ndarray res = np.empty((1,), dtype=OPAQUE_DTYPE) - * res[0]['s0'] = self.read_int8_string() # <<<<<<<<<<<<<< - * res[0]['s1'] = self.read_int8_string() - * res[0]['s2'] = self.read_int8_string() - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_int8_string(__pyx_v_self); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 930; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(((PyObject *)__pyx_v_res), 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 930; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__s0), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 930; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":931 - * cdef cnp.ndarray res = np.empty((1,), dtype=OPAQUE_DTYPE) - * res[0]['s0'] = self.read_int8_string() - * res[0]['s1'] = self.read_int8_string() # <<<<<<<<<<<<<< - * res[0]['s2'] = self.read_int8_string() - * res[0]['arr'] = self.read_mi_matrix() - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_int8_string(__pyx_v_self); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 931; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(((PyObject *)__pyx_v_res), 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 931; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__s1), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 931; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":932 - * res[0]['s0'] = self.read_int8_string() - * res[0]['s1'] = self.read_int8_string() - * res[0]['s2'] = self.read_int8_string() # <<<<<<<<<<<<<< - * res[0]['arr'] = self.read_mi_matrix() - * return res - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_int8_string(__pyx_v_self); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(((PyObject *)__pyx_v_res), 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__s2), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":933 - * res[0]['s1'] = self.read_int8_string() - * res[0]['s2'] = self.read_int8_string() - * res[0]['arr'] = self.read_mi_matrix() # <<<<<<<<<<<<<< - * return res - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self->__pyx_vtab)->read_mi_matrix(__pyx_v_self, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 933; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(((PyObject *)__pyx_v_res), 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 933; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__arr), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 933; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":934 - * res[0]['s2'] = self.read_int8_string() - * res[0]['arr'] = self.read_mi_matrix() - * return res # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __Pyx_INCREF(((PyObject *)__pyx_v_res)); - __pyx_r = __pyx_v_res; - goto __pyx_L0; - - __pyx_r = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_opaque", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_res); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":913 - * return result.reshape(tupdims).T - * - * cpdef cnp.ndarray read_opaque(self, VarHeader5 hdr): # <<<<<<<<<<<<<< - * ''' Read opaque (function workspace) type - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_12read_opaque(PyObject *__pyx_v_self, PyObject *__pyx_v_hdr); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_12read_opaque[] = " Read opaque (function workspace) type\n\n Looking at some mat files, the structure of this type seems to\n be:\n\n * array flags as usual (already read into `hdr`)\n * 3 int8 strings\n * a matrix\n\n Then there's a matrix at the end of the mat file that seems have\n the anonymous founction workspaces - we load it as\n ``__function_workspace__``\n\n See the comments at the beginning of ``mio5.py``\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_12read_opaque(PyObject *__pyx_v_self, PyObject *__pyx_v_hdr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_opaque"); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_hdr), __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5, 1, "hdr", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->__pyx_vtab)->read_opaque(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self), ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)__pyx_v_hdr), 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.read_opaque", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio5_utils.pyx":142 - * - * cdef class VarReader5: - * cdef public int is_swapped, little_endian # <<<<<<<<<<<<<< - * cdef int struct_as_record - * cdef object codecs, uint16_codec - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10is_swapped___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10is_swapped___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->is_swapped); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.is_swapped.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10is_swapped_1__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10is_swapped_1__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__set__"); - __pyx_t_1 = __Pyx_PyInt_AsInt(__pyx_v_value); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->is_swapped = __pyx_t_1; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.is_swapped.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_13little_endian___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_13little_endian___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->little_endian); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.little_endian.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_13little_endian_1__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_13little_endian_1__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__set__"); - __pyx_t_1 = __Pyx_PyInt_AsInt(__pyx_v_value); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)__pyx_v_self)->little_endian = __pyx_t_1; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("scipy.io.matlab.mio5_utils.VarReader5.little_endian.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":190 - * # experimental exception made for __getbuffer__ and __releasebuffer__ - * # -- the details of this may change. - * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< - * # This implementation of getbuffer is geared towards Cython - * # requirements, and does not yet fullfill the PEP. - */ - -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_copy_shape; - int __pyx_v_i; - int __pyx_v_ndim; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - int __pyx_v_t; - char *__pyx_v_f; - PyArray_Descr *__pyx_v_descr = 0; - int __pyx_v_offset; - int __pyx_v_hasfields; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - char *__pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getbuffer__"); - if (__pyx_v_info != NULL) { - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - } - - /* "numpy.pxd":196 - * # of flags - * - * if info == NULL: return # <<<<<<<<<<<<<< - * - * cdef int copy_shape, i, ndim - */ - __pyx_t_1 = (__pyx_v_info == NULL); - if (__pyx_t_1) { - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":199 - * - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":200 - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * - * ndim = PyArray_NDIM(self) - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":202 - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":204 - * ndim = PyArray_NDIM(self) - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * copy_shape = 1 - * else: - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":205 - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * copy_shape = 1 # <<<<<<<<<<<<<< - * else: - * copy_shape = 0 - */ - __pyx_v_copy_shape = 1; - goto __pyx_L6; - } - /*else*/ { - - /* "numpy.pxd":207 - * copy_shape = 1 - * else: - * copy_shape = 0 # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - */ - __pyx_v_copy_shape = 0; - } - __pyx_L6:; - - /* "numpy.pxd":209 - * copy_shape = 0 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); - if (__pyx_t_1) { - - /* "numpy.pxd":210 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not C contiguous") - * - */ - __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_30), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "numpy.pxd":213 - * raise ValueError(u"ndarray is not C contiguous") - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") - */ - __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); - if (__pyx_t_3) { - - /* "numpy.pxd":214 - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not Fortran contiguous") - * - */ - __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); - __pyx_t_2 = __pyx_t_1; - } else { - __pyx_t_2 = __pyx_t_3; - } - if (__pyx_t_2) { - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_32), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - /* "numpy.pxd":217 - * raise ValueError(u"ndarray is not Fortran contiguous") - * - * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< - * info.ndim = ndim - * if copy_shape: - */ - __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":218 - * - * info.buf = PyArray_DATA(self) - * info.ndim = ndim # <<<<<<<<<<<<<< - * if copy_shape: - * # Allocate new buffer for strides and shape info. - */ - __pyx_v_info->ndim = __pyx_v_ndim; - - /* "numpy.pxd":219 - * info.buf = PyArray_DATA(self) - * info.ndim = ndim - * if copy_shape: # <<<<<<<<<<<<<< - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - */ - if (__pyx_v_copy_shape) { - - /* "numpy.pxd":222 - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< - * info.shape = info.strides + ndim - * for i in range(ndim): - */ - __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); - - /* "numpy.pxd":223 - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim # <<<<<<<<<<<<<< - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - */ - __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); - - /* "numpy.pxd":224 - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim - * for i in range(ndim): # <<<<<<<<<<<<<< - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] - */ - __pyx_t_5 = __pyx_v_ndim; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "numpy.pxd":225 - * info.shape = info.strides + ndim - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - */ - (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - - /* "numpy.pxd":226 - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< - * else: - * info.strides = PyArray_STRIDES(self) - */ - (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - } - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":228 - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - */ - __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":229 - * else: - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - */ - __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); - } - __pyx_L9:; - - /* "numpy.pxd":230 - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) - */ - __pyx_v_info->suboffsets = NULL; - - /* "numpy.pxd":231 - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< - * info.readonly = not PyArray_ISWRITEABLE(self) - * - */ - __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":232 - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< - * - * cdef int t - */ - __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":235 - * - * cdef int t - * cdef char* f = NULL # <<<<<<<<<<<<<< - * cdef dtype descr = self.descr - * cdef list stack - */ - __pyx_v_f = NULL; - - /* "numpy.pxd":236 - * cdef int t - * cdef char* f = NULL - * cdef dtype descr = self.descr # <<<<<<<<<<<<<< - * cdef list stack - * cdef int offset - */ - __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); - __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; - - /* "numpy.pxd":240 - * cdef int offset - * - * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< - * - * if not hasfields and not copy_shape: - */ - __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); - - /* "numpy.pxd":242 - * cdef bint hasfields = PyDataType_HASFIELDS(descr) - * - * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< - * # do not call releasebuffer - * info.obj = None - */ - __pyx_t_2 = (!__pyx_v_hasfields); - if (__pyx_t_2) { - __pyx_t_3 = (!__pyx_v_copy_shape); - __pyx_t_1 = __pyx_t_3; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":244 - * if not hasfields and not copy_shape: - * # do not call releasebuffer - * info.obj = None # <<<<<<<<<<<<<< - * else: - * # need to call releasebuffer - */ - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = Py_None; - goto __pyx_L12; - } - /*else*/ { - - /* "numpy.pxd":247 - * else: - * # need to call releasebuffer - * info.obj = self # <<<<<<<<<<<<<< - * - * if not hasfields: - */ - __Pyx_INCREF(__pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = __pyx_v_self; - } - __pyx_L12:; - - /* "numpy.pxd":249 - * info.obj = self - * - * if not hasfields: # <<<<<<<<<<<<<< - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - */ - __pyx_t_1 = (!__pyx_v_hasfields); - if (__pyx_t_1) { - - /* "numpy.pxd":250 - * - * if not hasfields: - * t = descr.type_num # <<<<<<<<<<<<<< - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - */ - __pyx_v_t = __pyx_v_descr->type_num; - - /* "numpy.pxd":251 - * if not hasfields: - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); - if (__pyx_t_1) { - __pyx_t_2 = __pyx_v_little_endian; - } else { - __pyx_t_2 = __pyx_t_1; - } - if (!__pyx_t_2) { - - /* "numpy.pxd":252 - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); - if (__pyx_t_1) { - __pyx_t_3 = (!__pyx_v_little_endian); - __pyx_t_7 = __pyx_t_3; - } else { - __pyx_t_7 = __pyx_t_1; - } - __pyx_t_1 = __pyx_t_7; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_34), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L14; - } - __pyx_L14:; - - /* "numpy.pxd":254 - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - */ - __pyx_t_1 = (__pyx_v_t == NPY_BYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__b; - goto __pyx_L15; - } - - /* "numpy.pxd":255 - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__B; - goto __pyx_L15; - } - - /* "numpy.pxd":256 - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - */ - __pyx_t_1 = (__pyx_v_t == NPY_SHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__h; - goto __pyx_L15; - } - - /* "numpy.pxd":257 - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - */ - __pyx_t_1 = (__pyx_v_t == NPY_USHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__H; - goto __pyx_L15; - } - - /* "numpy.pxd":258 - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - */ - __pyx_t_1 = (__pyx_v_t == NPY_INT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__i; - goto __pyx_L15; - } - - /* "numpy.pxd":259 - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UINT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__I; - goto __pyx_L15; - } - - /* "numpy.pxd":260 - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__l; - goto __pyx_L15; - } - - /* "numpy.pxd":261 - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__L; - goto __pyx_L15; - } - - /* "numpy.pxd":262 - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__q; - goto __pyx_L15; - } - - /* "numpy.pxd":263 - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Q; - goto __pyx_L15; - } - - /* "numpy.pxd":264 - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - */ - __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__f; - goto __pyx_L15; - } - - /* "numpy.pxd":265 - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - */ - __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__d; - goto __pyx_L15; - } - - /* "numpy.pxd":266 - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__g; - goto __pyx_L15; - } - - /* "numpy.pxd":267 - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zf; - goto __pyx_L15; - } - - /* "numpy.pxd":268 - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zd; - goto __pyx_L15; - } - - /* "numpy.pxd":269 - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f = "O" - * else: - */ - __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zg; - goto __pyx_L15; - } - - /* "numpy.pxd":270 - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__O; - goto __pyx_L15; - } - /*else*/ { - - /* "numpy.pxd":272 - * elif t == NPY_OBJECT: f = "O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * info.format = f - * return - */ - __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_35), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L15:; - - /* "numpy.pxd":273 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f # <<<<<<<<<<<<<< - * return - * else: - */ - __pyx_v_info->format = __pyx_v_f; - - /* "numpy.pxd":274 - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f - * return # <<<<<<<<<<<<<< - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - */ - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L13; - } - /*else*/ { - - /* "numpy.pxd":276 - * return - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 - */ - __pyx_v_info->format = ((char *)malloc(255)); - - /* "numpy.pxd":277 - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<< - * offset = 0 - * f = _util_dtypestring(descr, info.format + 1, - */ - (__pyx_v_info->format[0]) = '^'; - - /* "numpy.pxd":278 - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 # <<<<<<<<<<<<<< - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - */ - __pyx_v_offset = 0; - - /* "numpy.pxd":281 - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - * &offset) # <<<<<<<<<<<<<< - * f[0] = 0 # Terminate format string - * - */ - __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_9; - - /* "numpy.pxd":282 - * info.format + _buffer_format_string_len, - * &offset) - * f[0] = 0 # Terminate format string # <<<<<<<<<<<<<< - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - */ - (__pyx_v_f[0]) = 0; - } - __pyx_L13:; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(Py_None); - __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; - } - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_descr); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":284 - * f[0] = 0 # Terminate format string - * - * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - */ - -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__releasebuffer__"); - - /* "numpy.pxd":285 - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); - if (__pyx_t_1) { - - /* "numpy.pxd":286 - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) # <<<<<<<<<<<<<< - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) - */ - free(__pyx_v_info->format); - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":287 - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * stdlib.free(info.strides) - * # info.shape was stored after info.strides in the same block - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":288 - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) # <<<<<<<<<<<<<< - * # info.shape was stored after info.strides in the same block - * - */ - free(__pyx_v_info->strides); - goto __pyx_L6; - } - __pyx_L6:; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":764 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); - - /* "numpy.pxd":765 - * - * cdef inline object PyArray_MultiIterNew1(a): - * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew2(a, b): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":767 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); - - /* "numpy.pxd":768 - * - * cdef inline object PyArray_MultiIterNew2(a, b): - * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":770 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); - - /* "numpy.pxd":771 - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":773 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); - - /* "numpy.pxd":774 - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":776 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); - - /* "numpy.pxd":777 - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":779 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< - * # Recursive utility function used in __getbuffer__ to get format - * # string. The new location in the format string is returned. - */ - -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { - PyArray_Descr *__pyx_v_child = 0; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - PyObject *__pyx_v_fields = 0; - PyObject *__pyx_v_childname = NULL; - PyObject *__pyx_v_new_offset = NULL; - PyObject *__pyx_v_t = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - long __pyx_t_10; - char *__pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_util_dtypestring"); - - /* "numpy.pxd":786 - * cdef int delta_offset - * cdef tuple i - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * cdef tuple fields - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":787 - * cdef tuple i - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * cdef tuple fields - * - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":790 - * cdef tuple fields - * - * for childname in descr.names: # <<<<<<<<<<<<<< - * fields = descr.fields[childname] - * child, new_offset = fields - */ - if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - for (;;) { - if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; - __Pyx_XDECREF(__pyx_v_childname); - __pyx_v_childname = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":791 - * - * for childname in descr.names: - * fields = descr.fields[childname] # <<<<<<<<<<<<<< - * child, new_offset = fields - * - */ - __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); - __pyx_v_fields = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "numpy.pxd":792 - * for childname in descr.names: - * fields = descr.fields[childname] - * child, new_offset = fields # <<<<<<<<<<<<<< - * - * if (end - f) - (new_offset - offset[0]) < 15: - */ - if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { - PyObject* sequence = ((PyObject *)__pyx_v_fields); - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - } else { - __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_child)); - __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_v_new_offset); - __pyx_v_new_offset = __pyx_t_4; - __pyx_t_4 = 0; - - /* "numpy.pxd":794 - * child, new_offset = fields - * - * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - */ - __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_37), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":797 - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - * if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '>'); - if (__pyx_t_6) { - __pyx_t_7 = __pyx_v_little_endian; - } else { - __pyx_t_7 = __pyx_t_6; - } - if (!__pyx_t_7) { - - /* "numpy.pxd":798 - * - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * # One could encode it in the format string and have Cython - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '<'); - if (__pyx_t_6) { - __pyx_t_8 = (!__pyx_v_little_endian); - __pyx_t_9 = __pyx_t_8; - } else { - __pyx_t_9 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_9; - } else { - __pyx_t_6 = __pyx_t_7; - } - if (__pyx_t_6) { - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_38), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - /* "numpy.pxd":809 - * - * # Output padding bytes - * while offset[0] < new_offset: # <<<<<<<<<<<<<< - * f[0] = 120 # "x"; pad byte - * f += 1 - */ - while (1) { - __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!__pyx_t_6) break; - - /* "numpy.pxd":810 - * # Output padding bytes - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< - * f += 1 - * offset[0] += 1 - */ - (__pyx_v_f[0]) = 120; - - /* "numpy.pxd":811 - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte - * f += 1 # <<<<<<<<<<<<<< - * offset[0] += 1 - * - */ - __pyx_v_f = (__pyx_v_f + 1); - - /* "numpy.pxd":812 - * f[0] = 120 # "x"; pad byte - * f += 1 - * offset[0] += 1 # <<<<<<<<<<<<<< - * - * offset[0] += child.itemsize - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); - } - - /* "numpy.pxd":814 - * offset[0] += 1 - * - * offset[0] += child.itemsize # <<<<<<<<<<<<<< - * - * if not PyDataType_HASFIELDS(child): - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); - - /* "numpy.pxd":816 - * offset[0] += child.itemsize - * - * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< - * t = child.type_num - * if end - f < 5: - */ - __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); - if (__pyx_t_6) { - - /* "numpy.pxd":817 - * - * if not PyDataType_HASFIELDS(child): - * t = child.type_num # <<<<<<<<<<<<<< - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") - */ - __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_v_t); - __pyx_v_t = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":818 - * if not PyDataType_HASFIELDS(child): - * t = child.type_num - * if end - f < 5: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short.") - * - */ - __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); - if (__pyx_t_6) { - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_40), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L10; - } - __pyx_L10:; - - /* "numpy.pxd":822 - * - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - */ - __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 98; - goto __pyx_L11; - } - - /* "numpy.pxd":823 - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 66; - goto __pyx_L11; - } - - /* "numpy.pxd":824 - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - */ - __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 104; - goto __pyx_L11; - } - - /* "numpy.pxd":825 - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - */ - __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 72; - goto __pyx_L11; - } - - /* "numpy.pxd":826 - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - */ - __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 105; - goto __pyx_L11; - } - - /* "numpy.pxd":827 - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 73; - goto __pyx_L11; - } - - /* "numpy.pxd":828 - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 108; - goto __pyx_L11; - } - - /* "numpy.pxd":829 - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 76; - goto __pyx_L11; - } - - /* "numpy.pxd":830 - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 113; - goto __pyx_L11; - } - - /* "numpy.pxd":831 - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 81; - goto __pyx_L11; - } - - /* "numpy.pxd":832 - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - */ - __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 102; - goto __pyx_L11; - } - - /* "numpy.pxd":833 - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - */ - __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 100; - goto __pyx_L11; - } - - /* "numpy.pxd":834 - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 103; - goto __pyx_L11; - } - - /* "numpy.pxd":835 - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - */ - __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 102; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":836 - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" - */ - __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 100; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":837 - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - */ - __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 103; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":838 - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 79; - goto __pyx_L11; - } - /*else*/ { - - /* "numpy.pxd":840 - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * f += 1 - * else: - */ - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_35), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L11:; - - /* "numpy.pxd":841 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * f += 1 # <<<<<<<<<<<<<< - * else: - * # Cython ignores struct boundary information ("T{...}"), - */ - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":845 - * # Cython ignores struct boundary information ("T{...}"), - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< - * return f - * - */ - __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_11; - } - __pyx_L9:; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "numpy.pxd":846 - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) - * return f # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_f; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_child); - __Pyx_XDECREF(__pyx_v_fields); - __Pyx_XDECREF(__pyx_v_childname); - __Pyx_XDECREF(__pyx_v_new_offset); - __Pyx_XDECREF(__pyx_v_t); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":961 - * - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * cdef PyObject* baseptr - * if base is None: - */ - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - PyObject *__pyx_v_baseptr; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("set_array_base"); - - /* "numpy.pxd":963 - * cdef inline void set_array_base(ndarray arr, object base): - * cdef PyObject* baseptr - * if base is None: # <<<<<<<<<<<<<< - * baseptr = NULL - * else: - */ - __pyx_t_1 = (__pyx_v_base == Py_None); - if (__pyx_t_1) { - - /* "numpy.pxd":964 - * cdef PyObject* baseptr - * if base is None: - * baseptr = NULL # <<<<<<<<<<<<<< - * else: - * Py_INCREF(base) # important to do this before decref below! - */ - __pyx_v_baseptr = NULL; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":966 - * baseptr = NULL - * else: - * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< - * baseptr = base - * Py_XDECREF(arr.base) - */ - Py_INCREF(__pyx_v_base); - - /* "numpy.pxd":967 - * else: - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base # <<<<<<<<<<<<<< - * Py_XDECREF(arr.base) - * arr.base = baseptr - */ - __pyx_v_baseptr = ((PyObject *)__pyx_v_base); - } - __pyx_L3:; - - /* "numpy.pxd":968 - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base - * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< - * arr.base = baseptr - * - */ - Py_XDECREF(__pyx_v_arr->base); - - /* "numpy.pxd":969 - * baseptr = base - * Py_XDECREF(arr.base) - * arr.base = baseptr # <<<<<<<<<<<<<< - * - * cdef inline object get_array_base(ndarray arr): - */ - __pyx_v_arr->base = __pyx_v_baseptr; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base"); - - /* "numpy.pxd":972 - * - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: # <<<<<<<<<<<<<< - * return None - * else: - */ - __pyx_t_1 = (__pyx_v_arr->base == NULL); - if (__pyx_t_1) { - - /* "numpy.pxd":973 - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: - * return None # <<<<<<<<<<<<<< - * else: - * return arr.base - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_None); - __pyx_r = Py_None; - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":975 - * return None - * else: - * return arr.base # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); - __pyx_r = ((PyObject *)__pyx_v_arr->base); - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_tp_new_5scipy_2io_6matlab_10mio5_utils_VarHeader5(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *p; - PyObject *o = (*t->tp_alloc)(t, 0); - if (!o) return 0; - p = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)o); - p->name = Py_None; Py_INCREF(Py_None); - p->dims = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_5scipy_2io_6matlab_10mio5_utils_VarHeader5(PyObject *o) { - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *p = (struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)o; - Py_XDECREF(p->name); - Py_XDECREF(p->dims); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_5scipy_2io_6matlab_10mio5_utils_VarHeader5(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *p = (struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)o; - if (p->name) { - e = (*v)(p->name, a); if (e) return e; - } - if (p->dims) { - e = (*v)(p->dims, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_5scipy_2io_6matlab_10mio5_utils_VarHeader5(PyObject *o) { - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *p = (struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *)o; - PyObject* tmp; - tmp = ((PyObject*)p->name); - p->name = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->dims); - p->dims = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyObject *__pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_name(PyObject *o, void *x) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_4name___get__(o); -} - -static PyObject *__pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_mclass(PyObject *o, void *x) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_6mclass___get__(o); -} - -static PyObject *__pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_dims(PyObject *o, void *x) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_4dims___get__(o); -} - -static PyObject *__pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_is_global(PyObject *o, void *x) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_9is_global___get__(o); -} - -static int __pyx_setprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_is_global(PyObject *o, PyObject *v, void *x) { - if (v) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_9is_global_1__set__(o, v); - } - else { - PyErr_SetString(PyExc_NotImplementedError, "__del__"); - return -1; - } -} - -static PyMethodDef __pyx_methods_5scipy_2io_6matlab_10mio5_utils_VarHeader5[] = { - {__Pyx_NAMESTR("set_dims"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_set_dims, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_set_dims)}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_5scipy_2io_6matlab_10mio5_utils_VarHeader5[] = { - {(char *)"name", __pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_name, 0, 0, 0}, - {(char *)"mclass", __pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_mclass, 0, 0, 0}, - {(char *)"dims", __pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_dims, 0, 0, 0}, - {(char *)"is_global", __pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_is_global, __pyx_setprop_5scipy_2io_6matlab_10mio5_utils_10VarHeader5_is_global, 0, 0}, - {0, 0, 0, 0, 0} -}; - -static PyNumberMethods __pyx_tp_as_number_VarHeader5 = { - 0, /*nb_add*/ - 0, /*nb_subtract*/ - 0, /*nb_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_divide*/ - #endif - 0, /*nb_remainder*/ - 0, /*nb_divmod*/ - 0, /*nb_power*/ - 0, /*nb_negative*/ - 0, /*nb_positive*/ - 0, /*nb_absolute*/ - 0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_coerce*/ - #endif - 0, /*nb_int*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_long*/ - #else - 0, /*reserved*/ - #endif - 0, /*nb_float*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_oct*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*nb_hex*/ - #endif - 0, /*nb_inplace_add*/ - 0, /*nb_inplace_subtract*/ - 0, /*nb_inplace_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_inplace_divide*/ - #endif - 0, /*nb_inplace_remainder*/ - 0, /*nb_inplace_power*/ - 0, /*nb_inplace_lshift*/ - 0, /*nb_inplace_rshift*/ - 0, /*nb_inplace_and*/ - 0, /*nb_inplace_xor*/ - 0, /*nb_inplace_or*/ - 0, /*nb_floor_divide*/ - 0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - #if PY_VERSION_HEX >= 0x02050000 - 0, /*nb_index*/ - #endif -}; - -static PySequenceMethods __pyx_tp_as_sequence_VarHeader5 = { - 0, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_VarHeader5 = { - 0, /*mp_length*/ - 0, /*mp_subscript*/ - 0, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_VarHeader5 = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_getbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_releasebuffer*/ - #endif -}; - -static PyTypeObject __pyx_type_5scipy_2io_6matlab_10mio5_utils_VarHeader5 = { - PyVarObject_HEAD_INIT(0, 0) - __Pyx_NAMESTR("scipy.io.matlab.mio5_utils.VarHeader5"), /*tp_name*/ - sizeof(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_5scipy_2io_6matlab_10mio5_utils_VarHeader5, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #else - 0, /*reserved*/ - #endif - 0, /*tp_repr*/ - &__pyx_tp_as_number_VarHeader5, /*tp_as_number*/ - &__pyx_tp_as_sequence_VarHeader5, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_VarHeader5, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_VarHeader5, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_5scipy_2io_6matlab_10mio5_utils_VarHeader5, /*tp_traverse*/ - __pyx_tp_clear_5scipy_2io_6matlab_10mio5_utils_VarHeader5, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_5scipy_2io_6matlab_10mio5_utils_VarHeader5, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_5scipy_2io_6matlab_10mio5_utils_VarHeader5, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_5scipy_2io_6matlab_10mio5_utils_VarHeader5, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - #if PY_VERSION_HEX >= 0x02060000 - 0, /*tp_version_tag*/ - #endif -}; -static struct __pyx_vtabstruct_5scipy_2io_6matlab_10mio5_utils_VarReader5 __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5; - -static PyObject *__pyx_tp_new_5scipy_2io_6matlab_10mio5_utils_VarReader5(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *p; - PyObject *o = (*t->tp_alloc)(t, 0); - if (!o) return 0; - p = ((struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)o); - p->__pyx_vtab = __pyx_vtabptr_5scipy_2io_6matlab_10mio5_utils_VarReader5; - p->codecs = Py_None; Py_INCREF(Py_None); - p->uint16_codec = Py_None; Py_INCREF(Py_None); - p->cstream = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)Py_None); Py_INCREF(Py_None); - p->U1_dtype = ((PyArray_Descr *)Py_None); Py_INCREF(Py_None); - p->bool_dtype = ((PyArray_Descr *)Py_None); Py_INCREF(Py_None); - if (__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5___cinit__(o, a, k) < 0) { - Py_DECREF(o); o = 0; - } - return o; -} - -static void __pyx_tp_dealloc_5scipy_2io_6matlab_10mio5_utils_VarReader5(PyObject *o) { - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *p = (struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)o; - Py_XDECREF(p->codecs); - Py_XDECREF(p->uint16_codec); - Py_XDECREF(((PyObject *)p->cstream)); - Py_XDECREF(((PyObject *)p->U1_dtype)); - Py_XDECREF(((PyObject *)p->bool_dtype)); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_5scipy_2io_6matlab_10mio5_utils_VarReader5(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *p = (struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)o; - if (p->codecs) { - e = (*v)(p->codecs, a); if (e) return e; - } - if (p->uint16_codec) { - e = (*v)(p->uint16_codec, a); if (e) return e; - } - if (p->cstream) { - e = (*v)(((PyObject*)p->cstream), a); if (e) return e; - } - if (p->U1_dtype) { - e = (*v)(((PyObject*)p->U1_dtype), a); if (e) return e; - } - if (p->bool_dtype) { - e = (*v)(((PyObject*)p->bool_dtype), a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_5scipy_2io_6matlab_10mio5_utils_VarReader5(PyObject *o) { - struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *p = (struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *)o; - PyObject* tmp; - tmp = ((PyObject*)p->codecs); - p->codecs = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->uint16_codec); - p->uint16_codec = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->cstream); - p->cstream = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)Py_None); Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->U1_dtype); - p->U1_dtype = ((PyArray_Descr *)Py_None); Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->bool_dtype); - p->bool_dtype = ((PyArray_Descr *)Py_None); Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyObject *__pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarReader5_is_swapped(PyObject *o, void *x) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10is_swapped___get__(o); -} - -static int __pyx_setprop_5scipy_2io_6matlab_10mio5_utils_10VarReader5_is_swapped(PyObject *o, PyObject *v, void *x) { - if (v) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10is_swapped_1__set__(o, v); - } - else { - PyErr_SetString(PyExc_NotImplementedError, "__del__"); - return -1; - } -} - -static PyObject *__pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarReader5_little_endian(PyObject *o, void *x) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_13little_endian___get__(o); -} - -static int __pyx_setprop_5scipy_2io_6matlab_10mio5_utils_10VarReader5_little_endian(PyObject *o, PyObject *v, void *x) { - if (v) { - return __pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_13little_endian_1__set__(o, v); - } - else { - PyErr_SetString(PyExc_NotImplementedError, "__del__"); - return -1; - } -} - -static PyMethodDef __pyx_methods_5scipy_2io_6matlab_10mio5_utils_VarReader5[] = { - {__Pyx_NAMESTR("set_stream"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_1set_stream, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_1set_stream)}, - {__Pyx_NAMESTR("read_tag"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_2read_tag, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_2read_tag)}, - {__Pyx_NAMESTR("read_numeric"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_3read_numeric, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_3read_numeric)}, - {__Pyx_NAMESTR("read_full_tag"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_4read_full_tag, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_4read_full_tag)}, - {__Pyx_NAMESTR("read_header"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_5read_header, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_5read_header)}, - {__Pyx_NAMESTR("array_from_header"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_6array_from_header, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_6array_from_header)}, - {__Pyx_NAMESTR("read_real_complex"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_7read_real_complex, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_7read_real_complex)}, - {__Pyx_NAMESTR("read_char"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_8read_char, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_8read_char)}, - {__Pyx_NAMESTR("read_cells"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_9read_cells, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_9read_cells)}, - {__Pyx_NAMESTR("read_fieldnames"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10read_fieldnames, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_10read_fieldnames)}, - {__Pyx_NAMESTR("read_struct"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_11read_struct, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_11read_struct)}, - {__Pyx_NAMESTR("read_opaque"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_10VarReader5_12read_opaque, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_10mio5_utils_10VarReader5_12read_opaque)}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_5scipy_2io_6matlab_10mio5_utils_VarReader5[] = { - {(char *)"is_swapped", __pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarReader5_is_swapped, __pyx_setprop_5scipy_2io_6matlab_10mio5_utils_10VarReader5_is_swapped, 0, 0}, - {(char *)"little_endian", __pyx_getprop_5scipy_2io_6matlab_10mio5_utils_10VarReader5_little_endian, __pyx_setprop_5scipy_2io_6matlab_10mio5_utils_10VarReader5_little_endian, 0, 0}, - {0, 0, 0, 0, 0} -}; - -static PyNumberMethods __pyx_tp_as_number_VarReader5 = { - 0, /*nb_add*/ - 0, /*nb_subtract*/ - 0, /*nb_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_divide*/ - #endif - 0, /*nb_remainder*/ - 0, /*nb_divmod*/ - 0, /*nb_power*/ - 0, /*nb_negative*/ - 0, /*nb_positive*/ - 0, /*nb_absolute*/ - 0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_coerce*/ - #endif - 0, /*nb_int*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_long*/ - #else - 0, /*reserved*/ - #endif - 0, /*nb_float*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_oct*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*nb_hex*/ - #endif - 0, /*nb_inplace_add*/ - 0, /*nb_inplace_subtract*/ - 0, /*nb_inplace_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_inplace_divide*/ - #endif - 0, /*nb_inplace_remainder*/ - 0, /*nb_inplace_power*/ - 0, /*nb_inplace_lshift*/ - 0, /*nb_inplace_rshift*/ - 0, /*nb_inplace_and*/ - 0, /*nb_inplace_xor*/ - 0, /*nb_inplace_or*/ - 0, /*nb_floor_divide*/ - 0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - #if PY_VERSION_HEX >= 0x02050000 - 0, /*nb_index*/ - #endif -}; - -static PySequenceMethods __pyx_tp_as_sequence_VarReader5 = { - 0, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_VarReader5 = { - 0, /*mp_length*/ - 0, /*mp_subscript*/ - 0, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_VarReader5 = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_getbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_releasebuffer*/ - #endif -}; - -static PyTypeObject __pyx_type_5scipy_2io_6matlab_10mio5_utils_VarReader5 = { - PyVarObject_HEAD_INIT(0, 0) - __Pyx_NAMESTR("scipy.io.matlab.mio5_utils.VarReader5"), /*tp_name*/ - sizeof(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_5scipy_2io_6matlab_10mio5_utils_VarReader5, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #else - 0, /*reserved*/ - #endif - 0, /*tp_repr*/ - &__pyx_tp_as_number_VarReader5, /*tp_as_number*/ - &__pyx_tp_as_sequence_VarReader5, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_VarReader5, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_VarReader5, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_5scipy_2io_6matlab_10mio5_utils_VarReader5, /*tp_traverse*/ - __pyx_tp_clear_5scipy_2io_6matlab_10mio5_utils_VarReader5, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_5scipy_2io_6matlab_10mio5_utils_VarReader5, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_5scipy_2io_6matlab_10mio5_utils_VarReader5, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_5scipy_2io_6matlab_10mio5_utils_VarReader5, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - #if PY_VERSION_HEX >= 0x02060000 - 0, /*tp_version_tag*/ - #endif -}; - -static PyMethodDef __pyx_methods[] = { - {__Pyx_NAMESTR("byteswap_u4"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_10mio5_utils_byteswap_u4, METH_O, __Pyx_DOCSTR(0)}, - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("mio5_utils"), - __Pyx_DOCSTR(__pyx_k_41), /* m_doc */ - -1, /* m_size */ - __pyx_methods /* m_methods */, - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, - {&__pyx_kp_s_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 0, 1, 0}, - {&__pyx_kp_s_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 0, 1, 0}, - {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, - {&__pyx_kp_s_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 0, 1, 0}, - {&__pyx_kp_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 0}, - {&__pyx_kp_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 0}, - {&__pyx_kp_s_21, __pyx_k_21, sizeof(__pyx_k_21), 0, 0, 1, 0}, - {&__pyx_kp_s_23, __pyx_k_23, sizeof(__pyx_k_23), 0, 0, 1, 0}, - {&__pyx_kp_s_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 0, 1, 0}, - {&__pyx_kp_u_29, __pyx_k_29, sizeof(__pyx_k_29), 0, 1, 0, 0}, - {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0}, - {&__pyx_kp_u_31, __pyx_k_31, sizeof(__pyx_k_31), 0, 1, 0, 0}, - {&__pyx_kp_u_33, __pyx_k_33, sizeof(__pyx_k_33), 0, 1, 0, 0}, - {&__pyx_kp_u_35, __pyx_k_35, sizeof(__pyx_k_35), 0, 1, 0, 0}, - {&__pyx_kp_u_36, __pyx_k_36, sizeof(__pyx_k_36), 0, 1, 0, 0}, - {&__pyx_kp_u_39, __pyx_k_39, sizeof(__pyx_k_39), 0, 1, 0, 0}, - {&__pyx_n_s_42, __pyx_k_42, sizeof(__pyx_k_42), 0, 0, 1, 1}, - {&__pyx_n_s_43, __pyx_k_43, sizeof(__pyx_k_43), 0, 0, 1, 1}, - {&__pyx_n_s_44, __pyx_k_44, sizeof(__pyx_k_44), 0, 0, 1, 1}, - {&__pyx_n_s_45, __pyx_k_45, sizeof(__pyx_k_45), 0, 0, 1, 1}, - {&__pyx_n_s_46, __pyx_k_46, sizeof(__pyx_k_46), 0, 0, 1, 1}, - {&__pyx_n_s_47, __pyx_k_47, sizeof(__pyx_k_47), 0, 0, 1, 1}, - {&__pyx_kp_s_48, __pyx_k_48, sizeof(__pyx_k_48), 0, 0, 1, 0}, - {&__pyx_kp_s_49, __pyx_k_49, sizeof(__pyx_k_49), 0, 0, 1, 0}, - {&__pyx_kp_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 0}, - {&__pyx_kp_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 0}, - {&__pyx_n_s__F, __pyx_k__F, sizeof(__pyx_k__F), 0, 0, 1, 1}, - {&__pyx_n_s__MDTYPES, __pyx_k__MDTYPES, sizeof(__pyx_k__MDTYPES), 0, 0, 1, 1}, - {&__pyx_n_s__MatlabFunction, __pyx_k__MatlabFunction, sizeof(__pyx_k__MatlabFunction), 0, 0, 1, 1}, - {&__pyx_n_s__MatlabObject, __pyx_k__MatlabObject, sizeof(__pyx_k__MatlabObject), 0, 0, 1, 1}, - {&__pyx_n_s__MatlabOpaque, __pyx_k__MatlabOpaque, sizeof(__pyx_k__MatlabOpaque), 0, 0, 1, 1}, - {&__pyx_n_s__OPAQUE_DTYPE, __pyx_k__OPAQUE_DTYPE, sizeof(__pyx_k__OPAQUE_DTYPE), 0, 0, 1, 1}, - {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, - {&__pyx_n_s__T, __pyx_k__T, sizeof(__pyx_k__T), 0, 0, 1, 1}, - {&__pyx_n_s__TypeError, __pyx_k__TypeError, sizeof(__pyx_k__TypeError), 0, 0, 1, 1}, - {&__pyx_n_s__U, __pyx_k__U, sizeof(__pyx_k__U), 0, 0, 1, 1}, - {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, - {&__pyx_n_s____dict__, __pyx_k____dict__, sizeof(__pyx_k____dict__), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s___fieldnames, __pyx_k___fieldnames, sizeof(__pyx_k___fieldnames), 0, 0, 1, 1}, - {&__pyx_n_s__arr, __pyx_k__arr, sizeof(__pyx_k__arr), 0, 0, 1, 1}, - {&__pyx_n_s__array, __pyx_k__array, sizeof(__pyx_k__array), 0, 0, 1, 1}, - {&__pyx_n_s__array_from_header, __pyx_k__array_from_header, sizeof(__pyx_k__array_from_header), 0, 0, 1, 1}, - {&__pyx_n_s__asbytes, __pyx_k__asbytes, sizeof(__pyx_k__asbytes), 0, 0, 1, 1}, - {&__pyx_n_s__ascii, __pyx_k__ascii, sizeof(__pyx_k__ascii), 0, 0, 1, 1}, - {&__pyx_n_s__asstr, __pyx_k__asstr, sizeof(__pyx_k__asstr), 0, 0, 1, 1}, - {&__pyx_n_s__astype, __pyx_k__astype, sizeof(__pyx_k__astype), 0, 0, 1, 1}, - {&__pyx_n_s__bool, __pyx_k__bool, sizeof(__pyx_k__bool), 0, 0, 1, 1}, - {&__pyx_n_s__buffer, __pyx_k__buffer, sizeof(__pyx_k__buffer), 0, 0, 1, 1}, - {&__pyx_n_s__byte_order, __pyx_k__byte_order, sizeof(__pyx_k__byte_order), 0, 0, 1, 1}, - {&__pyx_n_s__byteorder, __pyx_k__byteorder, sizeof(__pyx_k__byteorder), 0, 0, 1, 1}, - {&__pyx_n_s__c16, __pyx_k__c16, sizeof(__pyx_k__c16), 0, 0, 1, 1}, - {&__pyx_n_s__c8, __pyx_k__c8, sizeof(__pyx_k__c8), 0, 0, 1, 1}, - {&__pyx_n_s__chars_as_strings, __pyx_k__chars_as_strings, sizeof(__pyx_k__chars_as_strings), 0, 0, 1, 1}, - {&__pyx_n_s__chars_to_strings, __pyx_k__chars_to_strings, sizeof(__pyx_k__chars_to_strings), 0, 0, 1, 1}, - {&__pyx_n_s__classes, __pyx_k__classes, sizeof(__pyx_k__classes), 0, 0, 1, 1}, - {&__pyx_n_s__codecs, __pyx_k__codecs, sizeof(__pyx_k__codecs), 0, 0, 1, 1}, - {&__pyx_n_s__copy, __pyx_k__copy, sizeof(__pyx_k__copy), 0, 0, 1, 1}, - {&__pyx_n_s__csc_matrix, __pyx_k__csc_matrix, sizeof(__pyx_k__csc_matrix), 0, 0, 1, 1}, - {&__pyx_n_s__decode, __pyx_k__decode, sizeof(__pyx_k__decode), 0, 0, 1, 1}, - {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, - {&__pyx_n_s__dtypes, __pyx_k__dtypes, sizeof(__pyx_k__dtypes), 0, 0, 1, 1}, - {&__pyx_n_s__empty, __pyx_k__empty, sizeof(__pyx_k__empty), 0, 0, 1, 1}, - {&__pyx_n_s__encode, __pyx_k__encode, sizeof(__pyx_k__encode), 0, 0, 1, 1}, - {&__pyx_n_s__enumerate, __pyx_k__enumerate, sizeof(__pyx_k__enumerate), 0, 0, 1, 1}, - {&__pyx_n_s__header, __pyx_k__header, sizeof(__pyx_k__header), 0, 0, 1, 1}, - {&__pyx_n_s__imag, __pyx_k__imag, sizeof(__pyx_k__imag), 0, 0, 1, 1}, - {&__pyx_n_s__items, __pyx_k__items, sizeof(__pyx_k__items), 0, 0, 1, 1}, - {&__pyx_n_s__itemsize, __pyx_k__itemsize, sizeof(__pyx_k__itemsize), 0, 0, 1, 1}, - {&__pyx_n_s__little, __pyx_k__little, sizeof(__pyx_k__little), 0, 0, 1, 1}, - {&__pyx_n_s__mat_dtype, __pyx_k__mat_dtype, sizeof(__pyx_k__mat_dtype), 0, 0, 1, 1}, - {&__pyx_n_s__mat_stream, __pyx_k__mat_stream, sizeof(__pyx_k__mat_stream), 0, 0, 1, 1}, - {&__pyx_n_s__mat_struct, __pyx_k__mat_struct, sizeof(__pyx_k__mat_struct), 0, 0, 1, 1}, - {&__pyx_n_s__mio5p, __pyx_k__mio5p, sizeof(__pyx_k__mio5p), 0, 0, 1, 1}, - {&__pyx_n_s__miob, __pyx_k__miob, sizeof(__pyx_k__miob), 0, 0, 1, 1}, - {&__pyx_n_s__native_code, __pyx_k__native_code, sizeof(__pyx_k__native_code), 0, 0, 1, 1}, - {&__pyx_n_s__ndarray, __pyx_k__ndarray, sizeof(__pyx_k__ndarray), 0, 0, 1, 1}, - {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, - {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, - {&__pyx_n_s__object, __pyx_k__object, sizeof(__pyx_k__object), 0, 0, 1, 1}, - {&__pyx_n_s__order, __pyx_k__order, sizeof(__pyx_k__order), 0, 0, 1, 1}, - {&__pyx_n_s__preader, __pyx_k__preader, sizeof(__pyx_k__preader), 0, 0, 1, 1}, - {&__pyx_n_s__process, __pyx_k__process, sizeof(__pyx_k__process), 0, 0, 1, 1}, - {&__pyx_n_s__pycopy, __pyx_k__pycopy, sizeof(__pyx_k__pycopy), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__read_cells, __pyx_k__read_cells, sizeof(__pyx_k__read_cells), 0, 0, 1, 1}, - {&__pyx_n_s__read_char, __pyx_k__read_char, sizeof(__pyx_k__read_char), 0, 0, 1, 1}, - {&__pyx_n_s__read_header, __pyx_k__read_header, sizeof(__pyx_k__read_header), 0, 0, 1, 1}, - {&__pyx_n_s__read_numeric, __pyx_k__read_numeric, sizeof(__pyx_k__read_numeric), 0, 0, 1, 1}, - {&__pyx_n_s__read_opaque, __pyx_k__read_opaque, sizeof(__pyx_k__read_opaque), 0, 0, 1, 1}, - {&__pyx_n_s__read_real_complex, __pyx_k__read_real_complex, sizeof(__pyx_k__read_real_complex), 0, 0, 1, 1}, - {&__pyx_n_s__read_struct, __pyx_k__read_struct, sizeof(__pyx_k__read_struct), 0, 0, 1, 1}, - {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, - {&__pyx_n_s__s0, __pyx_k__s0, sizeof(__pyx_k__s0), 0, 0, 1, 1}, - {&__pyx_n_s__s1, __pyx_k__s1, sizeof(__pyx_k__s1), 0, 0, 1, 1}, - {&__pyx_n_s__s2, __pyx_k__s2, sizeof(__pyx_k__s2), 0, 0, 1, 1}, - {&__pyx_n_s__scipy, __pyx_k__scipy, sizeof(__pyx_k__scipy), 0, 0, 1, 1}, - {&__pyx_n_s__set_stream, __pyx_k__set_stream, sizeof(__pyx_k__set_stream), 0, 0, 1, 1}, - {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, - {&__pyx_n_s__sparse, __pyx_k__sparse, sizeof(__pyx_k__sparse), 0, 0, 1, 1}, - {&__pyx_n_s__squeeze_element, __pyx_k__squeeze_element, sizeof(__pyx_k__squeeze_element), 0, 0, 1, 1}, - {&__pyx_n_s__squeeze_me, __pyx_k__squeeze_me, sizeof(__pyx_k__squeeze_me), 0, 0, 1, 1}, - {&__pyx_n_s__struct_as_record, __pyx_k__struct_as_record, sizeof(__pyx_k__struct_as_record), 0, 0, 1, 1}, - {&__pyx_n_s__swapped_code, __pyx_k__swapped_code, sizeof(__pyx_k__swapped_code), 0, 0, 1, 1}, - {&__pyx_n_s__sys, __pyx_k__sys, sizeof(__pyx_k__sys), 0, 0, 1, 1}, - {&__pyx_n_s__sys_is_le, __pyx_k__sys_is_le, sizeof(__pyx_k__sys_is_le), 0, 0, 1, 1}, - {&__pyx_n_s__tostring, __pyx_k__tostring, sizeof(__pyx_k__tostring), 0, 0, 1, 1}, - {&__pyx_n_s__uint16_codec, __pyx_k__uint16_codec, sizeof(__pyx_k__uint16_codec), 0, 0, 1, 1}, - {&__pyx_n_s__uint16_len, __pyx_k__uint16_len, sizeof(__pyx_k__uint16_len), 0, 0, 1, 1}, - {&__pyx_n_s__uint8, __pyx_k__uint8, sizeof(__pyx_k__uint8), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_enumerate = __Pyx_GetName(__pyx_b, __pyx_n_s__enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_TypeError = __Pyx_GetName(__pyx_b, __pyx_n_s__TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_object = __Pyx_GetName(__pyx_b, __pyx_n_s__object); if (!__pyx_builtin_object) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - /* "scipy/io/matlab/mio5_utils.pyx":208 - * # cache correctly byte ordered dtypes - * if self.little_endian: - * self.U1_dtype = np.dtype('U1') - */ - __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_3)); - PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_s_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); - - /* "scipy/io/matlab/mio5_utils.pyx":210 - * self.U1_dtype = np.dtype('U1') # <<<<<<<<<<<<<< - * bool_dtype = np.dtype('bool') - * - */ - __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_5)); - PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_s_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); - - /* "scipy/io/matlab/mio5_utils.pyx":211 - * else: - * self.U1_dtype = np.dtype('>U1') - * bool_dtype = np.dtype('bool') # <<<<<<<<<<<<<< - * - * def set_stream(self, fobj): - */ - __pyx_k_tuple_7 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_7)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__bool)); - PyTuple_SET_ITEM(__pyx_k_tuple_7, 0, ((PyObject *)__pyx_n_s__bool)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__bool)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_7)); - - /* "scipy/io/matlab/mio5_utils.pyx":301 - * mdtype_sde = mdtype & 0xffff - * if byte_count_sde > 4: - * raise ValueError('Error in SDE format data') # <<<<<<<<<<<<<< - * return -1 - * u4_ptr[0] = u4s[1] - */ - __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_9)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_8)); - PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_s_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); - - /* "scipy/io/matlab/mio5_utils.pyx":458 - * data = self.read_element(&mdtype, &byte_count, &ptr) - * if mdtype != miINT8: - * raise TypeError('Expecting miINT8 as data type') # <<<<<<<<<<<<<< - * return data - * - */ - __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_11)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_10)); - PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_s_10)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_10)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); - - /* "scipy/io/matlab/mio5_utils.pyx":480 - * self.read_element_into(&mdtype, &byte_count, int32p) - * if mdtype != miINT32: - * raise TypeError('Expecting miINT32 as data type') # <<<<<<<<<<<<<< - * return -1 - * cdef int n_ints = byte_count // 4 - */ - __pyx_k_tuple_13 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 480; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_13)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_12)); - PyTuple_SET_ITEM(__pyx_k_tuple_13, 0, ((PyObject *)__pyx_kp_s_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_13)); - - /* "scipy/io/matlab/mio5_utils.pyx":615 - * self.cread_full_tag(&mdtype, &byte_count) - * if mdtype != miMATRIX: - * raise TypeError('Expecting matrix here') # <<<<<<<<<<<<<< - * if byte_count == 0: # empty matrix - * if process and self.squeeze_me: - */ - __pyx_k_tuple_16 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 615; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_16)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_15)); - PyTuple_SET_ITEM(__pyx_k_tuple_16, 0, ((PyObject *)__pyx_kp_s_15)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_15)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16)); - - /* "scipy/io/matlab/mio5_utils.pyx":701 - * # 1j`` upcasts to c16 regardless of input type. - * if res.itemsize == 4: - * res = res.astype('c8') # <<<<<<<<<<<<<< - * else: - * res = res.astype('c16') - */ - __pyx_k_tuple_17 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_17)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__c8)); - PyTuple_SET_ITEM(__pyx_k_tuple_17, 0, ((PyObject *)__pyx_n_s__c8)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c8)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_17)); - - /* "scipy/io/matlab/mio5_utils.pyx":703 - * res = res.astype('c8') - * else: - * res = res.astype('c16') # <<<<<<<<<<<<<< - * res.imag = res_j - * else: - */ - __pyx_k_tuple_18 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_18)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__c16)); - PyTuple_SET_ITEM(__pyx_k_tuple_18, 0, ((PyObject *)__pyx_n_s__c16)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c16)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_18)); - - /* "scipy/io/matlab/mio5_utils.pyx":707 - * else: - * res = self.read_numeric() - * return res.reshape(header.dims[::-1]).T # <<<<<<<<<<<<<< - * - * cdef object read_sparse(self, VarHeader5 header): - */ - __pyx_k_slice_19 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_k_slice_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_slice_19); - __Pyx_GIVEREF(__pyx_k_slice_19); - - /* "scipy/io/matlab/mio5_utils.pyx":818 - * cnp.ndarray[object, ndim=1] result - * # Account for fortran indexing of cells - * tupdims = tuple(header.dims[::-1]) # <<<<<<<<<<<<<< - * cdef size_t length = self.size_from_header(header) - * result = np.empty(length, dtype=object) - */ - __pyx_k_slice_22 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_k_slice_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 818; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_slice_22); - __Pyx_GIVEREF(__pyx_k_slice_22); - - /* "scipy/io/matlab/mio5_utils.pyx":841 - * cdef int res = self.read_into_int32s(&namelength) - * if res != 1: - * raise ValueError('Only one value for namelength') # <<<<<<<<<<<<<< - * cdef object names = self.read_int8_string() - * field_names = [] - */ - __pyx_k_tuple_24 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_24)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_23)); - PyTuple_SET_ITEM(__pyx_k_tuple_24, 0, ((PyObject *)__pyx_kp_s_23)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_23)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_24)); - - /* "scipy/io/matlab/mio5_utils.pyx":888 - * cdef object field_names = self.cread_fieldnames(&n_names) - * # Prepare struct array - * tupdims = tuple(header.dims[::-1]) # <<<<<<<<<<<<<< - * cdef size_t length = self.size_from_header(header) - * if self.struct_as_record: # to record arrays - */ - __pyx_k_slice_26 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_k_slice_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_slice_26); - __Pyx_GIVEREF(__pyx_k_slice_26); - - /* "scipy/io/matlab/mio5_utils.pyx":929 - * See the comments at the beginning of ``mio5.py`` - * ''' - * cdef cnp.ndarray res = np.empty((1,), dtype=OPAQUE_DTYPE) # <<<<<<<<<<<<<< - * res[0]['s0'] = self.read_int8_string() - * res[0]['s1'] = self.read_int8_string() - */ - __pyx_k_tuple_27 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_27)); - __Pyx_INCREF(__pyx_int_1); - PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, __pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); - __pyx_k_tuple_28 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 929; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_28)); - __Pyx_INCREF(((PyObject *)__pyx_k_tuple_27)); - PyTuple_SET_ITEM(__pyx_k_tuple_28, 0, ((PyObject *)__pyx_k_tuple_27)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_28)); - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_k_tuple_30 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_30)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_30)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_29)); - PyTuple_SET_ITEM(__pyx_k_tuple_30, 0, ((PyObject *)__pyx_kp_u_29)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_29)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_30)); - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_k_tuple_32 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_32)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_32)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_31)); - PyTuple_SET_ITEM(__pyx_k_tuple_32, 0, ((PyObject *)__pyx_kp_u_31)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_31)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_32)); - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_k_tuple_34 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_34)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_34)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_33)); - PyTuple_SET_ITEM(__pyx_k_tuple_34, 0, ((PyObject *)__pyx_kp_u_33)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_33)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_34)); - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_k_tuple_37 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_37)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_37)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_36)); - PyTuple_SET_ITEM(__pyx_k_tuple_37, 0, ((PyObject *)__pyx_kp_u_36)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_36)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_37)); - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_k_tuple_38 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_38)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_38)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_33)); - PyTuple_SET_ITEM(__pyx_k_tuple_38, 0, ((PyObject *)__pyx_kp_u_33)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_33)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_38)); - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_k_tuple_40 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_40)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_40)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_39)); - PyTuple_SET_ITEM(__pyx_k_tuple_40, 0, ((PyObject *)__pyx_kp_u_39)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_39)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_40)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initmio5_utils(void); /*proto*/ -PyMODINIT_FUNC initmio5_utils(void) -#else -PyMODINIT_FUNC PyInit_mio5_utils(void); /*proto*/ -PyMODINIT_FUNC PyInit_mio5_utils(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_mio5_utils(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("mio5_utils"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_41), 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - /*--- Initialize various global constants etc. ---*/ - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__io__matlab__mio5_utils) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - /*--- Builtin init code ---*/ - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Constants init code ---*/ - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Global init code ---*/ - __pyx_v_5scipy_2io_6matlab_10mio5_utils_OPAQUE_DTYPE = ((PyArray_Descr *)Py_None); Py_INCREF(Py_None); - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - /*--- Type init code ---*/ - if (PyType_Ready(&__pyx_type_5scipy_2io_6matlab_10mio5_utils_VarHeader5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "VarHeader5", (PyObject *)&__pyx_type_5scipy_2io_6matlab_10mio5_utils_VarHeader5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarHeader5 = &__pyx_type_5scipy_2io_6matlab_10mio5_utils_VarHeader5; - __pyx_vtabptr_5scipy_2io_6matlab_10mio5_utils_VarReader5 = &__pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.cread_tag = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_uint32_t *, __pyx_t_5numpy_uint32_t *, char *))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_cread_tag; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_element = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_uint32_t *, __pyx_t_5numpy_uint32_t *, void **, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_element_into = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_uint32_t *, __pyx_t_5numpy_uint32_t *, void *))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_element_into; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_numeric = (PyArrayObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_numeric; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_int8_string = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_int8_string; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_into_int32s = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_int32_t *))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_into_int32s; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.cread_full_tag = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, __pyx_t_5numpy_uint32_t *, __pyx_t_5numpy_uint32_t *))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_cread_full_tag; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_header = (struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, int __pyx_skip_dispatch))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_header; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.size_from_header = (size_t (*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_size_from_header; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_mi_matrix = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_mi_matrix *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_mi_matrix; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.array_from_header = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_array_from_header; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_real_complex = (PyArrayObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_real_complex; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_sparse = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_sparse; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_char = (PyArrayObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_char; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_cells = (PyArrayObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_cells; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.cread_fieldnames = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, int *))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_cread_fieldnames; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_struct = (PyArrayObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_struct; - __pyx_vtable_5scipy_2io_6matlab_10mio5_utils_VarReader5.read_opaque = (PyArrayObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarReader5 *, struct __pyx_obj_5scipy_2io_6matlab_10mio5_utils_VarHeader5 *, int __pyx_skip_dispatch))__pyx_f_5scipy_2io_6matlab_10mio5_utils_10VarReader5_read_opaque; - if (PyType_Ready(&__pyx_type_5scipy_2io_6matlab_10mio5_utils_VarReader5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetVtable(__pyx_type_5scipy_2io_6matlab_10mio5_utils_VarReader5.tp_dict, __pyx_vtabptr_5scipy_2io_6matlab_10mio5_utils_VarReader5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "VarReader5", (PyObject *)&__pyx_type_5scipy_2io_6matlab_10mio5_utils_VarReader5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5scipy_2io_6matlab_10mio5_utils_VarReader5 = &__pyx_type_5scipy_2io_6matlab_10mio5_utils_VarReader5; - /*--- Type import code ---*/ - __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream = __Pyx_ImportType("scipy.io.matlab.streams", "GenericStream", sizeof(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream), 1); if (unlikely(!__pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream = (struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream*)__Pyx_GetVtable(__pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream->tp_dict); if (unlikely(!__pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Variable import code ---*/ - /*--- Function import code ---*/ - __pyx_t_1 = __Pyx_ImportModule("scipy.io.matlab.streams"); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ImportFunction(__pyx_t_1, "make_stream", (void (**)(void))&__pyx_f_5scipy_2io_6matlab_7streams_make_stream, "struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *(PyObject *, int __pyx_skip_dispatch)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; - /*--- Execution code ---*/ - - /* "scipy/io/matlab/mio5_utils.pyx":15 - * ''' - * - * import sys # <<<<<<<<<<<<<< - * - * from copy import copy as pycopy - */ - __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s__sys), 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__sys, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":17 - * import sys - * - * from copy import copy as pycopy # <<<<<<<<<<<<<< - * - * from libc.stdlib cimport calloc, free - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__copy)); - PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s__copy)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__copy)); - __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s__copy), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__copy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pycopy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":32 - * PyBytes_FromStringAndSize - * - * import numpy as np # <<<<<<<<<<<<<< - * from numpy.compat import asbytes, asstr - * cimport numpy as cnp - */ - __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":33 - * - * import numpy as np - * from numpy.compat import asbytes, asstr # <<<<<<<<<<<<<< - * cimport numpy as cnp - * - */ - __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__asbytes)); - PyList_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_n_s__asbytes)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__asbytes)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__asstr)); - PyList_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_n_s__asstr)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__asstr)); - __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_42), ((PyObject *)__pyx_t_3), -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__asbytes); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__asbytes, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__asstr); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__asstr, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":52 - * # Numpy must be initialized before any code using the numpy C-API - * # directly - * cnp.import_array() # <<<<<<<<<<<<<< - * - * # Constant from numpy - max number of array dimensions - */ - import_array(); - - /* "scipy/io/matlab/mio5_utils.pyx":62 - * - * cimport streams - * import scipy.io.matlab.miobase as miob # <<<<<<<<<<<<<< - * from scipy.io.matlab.mio_utils import squeeze_element, chars_to_strings - * import scipy.io.matlab.mio5_params as mio5p - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_n_s_44)); - PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s_44)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s_44)); - __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s_43), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__miob, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":63 - * cimport streams - * import scipy.io.matlab.miobase as miob - * from scipy.io.matlab.mio_utils import squeeze_element, chars_to_strings # <<<<<<<<<<<<<< - * import scipy.io.matlab.mio5_params as mio5p - * import scipy.sparse - */ - __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__squeeze_element)); - PyList_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_n_s__squeeze_element)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__squeeze_element)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__chars_to_strings)); - PyList_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_n_s__chars_to_strings)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__chars_to_strings)); - __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_45), ((PyObject *)__pyx_t_3), -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__squeeze_element); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__squeeze_element, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__chars_to_strings); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__chars_to_strings, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":64 - * import scipy.io.matlab.miobase as miob - * from scipy.io.matlab.mio_utils import squeeze_element, chars_to_strings - * import scipy.io.matlab.mio5_params as mio5p # <<<<<<<<<<<<<< - * import scipy.sparse - * - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_n_s_44)); - PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s_44)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s_44)); - __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s_46), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__mio5p, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":65 - * from scipy.io.matlab.mio_utils import squeeze_element, chars_to_strings - * import scipy.io.matlab.mio5_params as mio5p - * import scipy.sparse # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s_47), 0, -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__scipy, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":105 - * mxOBJECT_CLASS_FROM_MATRIX_H = 18 - * - * sys_is_le = sys.byteorder == 'little' # <<<<<<<<<<<<<< - * native_code = sys_is_le and '<' or '>' - * swapped_code = sys_is_le and '>' or '<' - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__sys); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__byteorder); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_PyString_Equals(__pyx_t_2, ((PyObject *)__pyx_n_s__little), Py_EQ); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__sys_is_le, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":106 - * - * sys_is_le = sys.byteorder == 'little' - * native_code = sys_is_le and '<' or '>' # <<<<<<<<<<<<<< - * swapped_code = sys_is_le and '>' or '<' - * - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__sys_is_le); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_t_4) { - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_INCREF(((PyObject *)__pyx_kp_s_48)); - __pyx_t_3 = __pyx_kp_s_48; - } else { - __pyx_t_3 = __pyx_t_2; - __pyx_t_2 = 0; - } - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (!__pyx_t_4) { - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_INCREF(((PyObject *)__pyx_kp_s_49)); - __pyx_t_2 = __pyx_kp_s_49; - } else { - __pyx_t_2 = __pyx_t_3; - __pyx_t_3 = 0; - } - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__native_code, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":107 - * sys_is_le = sys.byteorder == 'little' - * native_code = sys_is_le and '<' or '>' - * swapped_code = sys_is_le and '>' or '<' # <<<<<<<<<<<<<< - * - * cdef cnp.dtype OPAQUE_DTYPE = mio5p.OPAQUE_DTYPE - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__sys_is_le); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_t_4) { - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_INCREF(((PyObject *)__pyx_kp_s_49)); - __pyx_t_3 = __pyx_kp_s_49; - } else { - __pyx_t_3 = __pyx_t_2; - __pyx_t_2 = 0; - } - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (!__pyx_t_4) { - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_INCREF(((PyObject *)__pyx_kp_s_48)); - __pyx_t_2 = __pyx_kp_s_48; - } else { - __pyx_t_2 = __pyx_t_3; - __pyx_t_3 = 0; - } - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__swapped_code, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":109 - * swapped_code = sys_is_le and '>' or '<' - * - * cdef cnp.dtype OPAQUE_DTYPE = mio5p.OPAQUE_DTYPE # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__mio5p); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__OPAQUE_DTYPE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XGOTREF(((PyObject *)__pyx_v_5scipy_2io_6matlab_10mio5_utils_OPAQUE_DTYPE)); - __Pyx_DECREF(((PyObject *)__pyx_v_5scipy_2io_6matlab_10mio5_utils_OPAQUE_DTYPE)); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_v_5scipy_2io_6matlab_10mio5_utils_OPAQUE_DTYPE = ((PyArray_Descr *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio5_utils.pyx":1 - * ''' Cython mio5 utility routines (-*- python -*- like) # <<<<<<<<<<<<<< - * - * ''' - */ - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_3)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - - /* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.io.matlab.mio5_utils", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.io.matlab.mio5_utils"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - -/* Runtime support code */ - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - /* unexpected keyword found */ - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", - index, (index == 1) ? "" : "s"); -} - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); -} - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } else if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { - PyErr_Clear(); - return 0; - } else { - return -1; - } - } - return 0; -} - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(PyObject_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - /* cause is unused */ - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - /* First, check the traceback argument, replacing None with NULL. */ - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - /* Next, replace a missing value with None */ - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - /* Raising an instance. The value should be a dummy. */ - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - /* Normalize to raise , */ - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else /* Python 3+ */ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - -static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { - long r = a % b; - r += ((r != 0) & ((r ^ b) < 0)) * b; - return r; -} - -static CYTHON_INLINE long __Pyx_div_long(long a, long b) { - long q = a / b; - long r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { - PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); -} - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact) -{ - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (none_allowed && obj == Py_None) return 1; - else if (exact) { - if (Py_TYPE(obj) == type) return 1; - } - else { - if (PyObject_TypeCheck(obj, type)) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - - -static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { - unsigned int n = 1; - return *(unsigned char*)(&n) != 0; -} - -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; -} __Pyx_BufFmt_Context; - -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} - -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t < '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} - -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} - -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case 'b': return "'char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 0: return "end"; - default: return "unparseable format string"; - } -} - -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif - -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I'; - case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; - case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); - case 'O': return 'O'; - case 'P': return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} - -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset; - if (ctx->enc_type == 0) return 0; - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - } - - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - /* special case -- treat as struct rather than complex number */ - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - - ctx->fmt_offset += size; - - --ctx->enc_count; /* Consume from buffer string */ - - /* Done checking, move to next field, pushing or popping struct stack if needed */ - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; /* breaks both loops as ctx->enc_count == 0 */ - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; /* empty struct */ - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} - -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case 10: - case 13: - ++ts; - break; - case '<': - if (!__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': /* substruct */ - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - } - break; - case '}': /* end of substruct; either repeat or move on */ - ++ts; - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } /* fall through */ - case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': - if (ctx->enc_type == *ts && got_Z == ctx->is_complex && - ctx->enc_packmode == ctx->new_packmode) { - /* Continue pooling same type */ - ctx->enc_count += ctx->new_count; - } else { - /* New type */ - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - } - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - default: - { - int number = __Pyx_BufFmt_ParseNumber(&ts); - if (number == -1) { /* First char was not a digit */ - PyErr_Format(PyExc_ValueError, - "Does not understand character buffer dtype format string ('%c')", *ts); - return NULL; - } - ctx->new_count = (size_t)number; - } - } - } -} - -static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { - buf->buf = NULL; - buf->obj = NULL; - buf->strides = __Pyx_zeros; - buf->shape = __Pyx_zeros; - buf->suboffsets = __Pyx_minusones; -} - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { - if (obj == Py_None || obj == NULL) { - __Pyx_ZeroBuffer(buf); - return 0; - } - buf->buf = NULL; - if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; - if (buf->ndim != nd) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - nd, buf->ndim); - goto fail; - } - if (!cast) { - __Pyx_BufFmt_Context ctx; - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; - } - if ((unsigned)buf->itemsize != dtype->size) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)", - buf->itemsize, (buf->itemsize > 1) ? "s" : "", - dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; - return 0; -fail:; - __Pyx_ZeroBuffer(buf); - return -1; -} - -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { - if (info->buf == NULL) return; - if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; - __Pyx_ReleaseBuffer(info); -} - -static void __Pyx_RaiseBufferFallbackError(void) { - PyErr_Format(PyExc_ValueError, - "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); -} - -static void __Pyx_RaiseBufferIndexError(int axis) { - PyErr_Format(PyExc_IndexError, - "Out of bounds on buffer access (axis %d)", axis); -} - - -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { - Py_ssize_t q = a / b; - Py_ssize_t r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags); - else { - PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; - } -} - -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject* obj = view->obj; - if (obj) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;} - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view); - Py_DECREF(obj); - view->obj = NULL; - } -} - -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { - if (s1 == s2) { /* as done by PyObject_RichCompareBool(); also catches the (interned) empty string */ - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - if (PyBytes_GET_SIZE(s1) != PyBytes_GET_SIZE(s2)) { - return (equals == Py_NE); - } else if (PyBytes_GET_SIZE(s1) == 1) { - if (equals == Py_EQ) - return (PyBytes_AS_STRING(s1)[0] == PyBytes_AS_STRING(s2)[0]); - else - return (PyBytes_AS_STRING(s1)[0] != PyBytes_AS_STRING(s2)[0]); - } else { - int result = memcmp(PyBytes_AS_STRING(s1), PyBytes_AS_STRING(s2), (size_t)PyBytes_GET_SIZE(s1)); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -} - -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { - if (s1 == s2) { /* as done by PyObject_RichCompareBool(); also catches the (interned) empty string */ - return (equals == Py_EQ); - } else if (PyUnicode_CheckExact(s1) & PyUnicode_CheckExact(s2)) { - if (PyUnicode_GET_SIZE(s1) != PyUnicode_GET_SIZE(s2)) { - return (equals == Py_NE); - } else if (PyUnicode_GET_SIZE(s1) == 1) { - if (equals == Py_EQ) - return (PyUnicode_AS_UNICODE(s1)[0] == PyUnicode_AS_UNICODE(s2)[0]); - else - return (PyUnicode_AS_UNICODE(s1)[0] != PyUnicode_AS_UNICODE(s2)[0]); - } else { - int result = PyUnicode_Compare(s1, s2); - if ((result == -1) && unlikely(PyErr_Occurred())) - return -1; - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyUnicode_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyUnicode_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_uint32(npy_uint32 val) { - const npy_uint32 neg_one = (npy_uint32)-1, const_zero = (npy_uint32)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(npy_uint32) == sizeof(char)) || - (sizeof(npy_uint32) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(npy_uint32) == sizeof(int)) || - (sizeof(npy_uint32) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(npy_uint32) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(npy_uint32), - little, !is_unsigned); - } -} - -static CYTHON_INLINE npy_int32 __Pyx_PyInt_from_py_npy_int32(PyObject* x) { - const npy_int32 neg_one = (npy_int32)-1, const_zero = (npy_int32)0; - const int is_unsigned = const_zero < neg_one; - if (sizeof(npy_int32) == sizeof(char)) { - if (is_unsigned) - return (npy_int32)__Pyx_PyInt_AsUnsignedChar(x); - else - return (npy_int32)__Pyx_PyInt_AsSignedChar(x); - } else if (sizeof(npy_int32) == sizeof(short)) { - if (is_unsigned) - return (npy_int32)__Pyx_PyInt_AsUnsignedShort(x); - else - return (npy_int32)__Pyx_PyInt_AsSignedShort(x); - } else if (sizeof(npy_int32) == sizeof(int)) { - if (is_unsigned) - return (npy_int32)__Pyx_PyInt_AsUnsignedInt(x); - else - return (npy_int32)__Pyx_PyInt_AsSignedInt(x); - } else if (sizeof(npy_int32) == sizeof(long)) { - if (is_unsigned) - return (npy_int32)__Pyx_PyInt_AsUnsignedLong(x); - else - return (npy_int32)__Pyx_PyInt_AsSignedLong(x); - } else if (sizeof(npy_int32) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return (npy_int32)__Pyx_PyInt_AsUnsignedLongLong(x); - else - return (npy_int32)__Pyx_PyInt_AsSignedLongLong(x); - } else { - npy_int32 val; - PyObject *v = __Pyx_PyNumber_Int(x); - #if PY_VERSION_HEX < 0x03000000 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } - return (npy_int32)-1; - } -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_int32(npy_int32 val) { - const npy_int32 neg_one = (npy_int32)-1, const_zero = (npy_int32)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(npy_int32) == sizeof(char)) || - (sizeof(npy_int32) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(npy_int32) == sizeof(int)) || - (sizeof(npy_int32) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(npy_int32) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(npy_int32), - little, !is_unsigned); - } -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(a, a); - case 3: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, a); - case 4: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_abs(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(a, a); - case 3: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, a); - case 4: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_absf(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE npy_uint32 __Pyx_PyInt_from_py_npy_uint32(PyObject* x) { - const npy_uint32 neg_one = (npy_uint32)-1, const_zero = (npy_uint32)0; - const int is_unsigned = const_zero < neg_one; - if (sizeof(npy_uint32) == sizeof(char)) { - if (is_unsigned) - return (npy_uint32)__Pyx_PyInt_AsUnsignedChar(x); - else - return (npy_uint32)__Pyx_PyInt_AsSignedChar(x); - } else if (sizeof(npy_uint32) == sizeof(short)) { - if (is_unsigned) - return (npy_uint32)__Pyx_PyInt_AsUnsignedShort(x); - else - return (npy_uint32)__Pyx_PyInt_AsSignedShort(x); - } else if (sizeof(npy_uint32) == sizeof(int)) { - if (is_unsigned) - return (npy_uint32)__Pyx_PyInt_AsUnsignedInt(x); - else - return (npy_uint32)__Pyx_PyInt_AsSignedInt(x); - } else if (sizeof(npy_uint32) == sizeof(long)) { - if (is_unsigned) - return (npy_uint32)__Pyx_PyInt_AsUnsignedLong(x); - else - return (npy_uint32)__Pyx_PyInt_AsSignedLong(x); - } else if (sizeof(npy_uint32) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return (npy_uint32)__Pyx_PyInt_AsUnsignedLongLong(x); - else - return (npy_uint32)__Pyx_PyInt_AsSignedLongLong(x); - } else { - npy_uint32 val; - PyObject *v = __Pyx_PyNumber_Int(x); - #if PY_VERSION_HEX < 0x03000000 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } - return (npy_uint32)-1; - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -static int __Pyx_SetVtable(PyObject *dict, void *vtable) { -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) - PyObject *ob = PyCapsule_New(vtable, 0, 0); -#else - PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); -#endif - if (!ob) - goto bad; - if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0) - goto bad; - Py_DECREF(ob); - return 0; -bad: - Py_XDECREF(ob); - return -1; -} - -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, - size_t size, int strict) -{ - PyObject *py_module = 0; - PyObject *result = 0; - PyObject *py_name = 0; - char warning[200]; - - py_module = __Pyx_ImportModule(module_name); - if (!py_module) - goto bad; - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(class_name); - #else - py_name = PyUnicode_FromString(class_name); - #endif - if (!py_name) - goto bad; - result = PyObject_GetAttr(py_module, py_name); - Py_DECREF(py_name); - py_name = 0; - Py_DECREF(py_module); - py_module = 0; - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility", - module_name, class_name); - #if PY_VERSION_HEX < 0x02050000 - if (PyErr_Warn(NULL, warning) < 0) goto bad; - #else - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - #endif - } - else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { - PyErr_Format(PyExc_ValueError, - "%s.%s has the wrong size, try recompiling", - module_name, class_name); - goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(py_module); - Py_XDECREF(result); - return NULL; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(name); - #else - py_name = PyUnicode_FromString(name); - #endif - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - -static void* __Pyx_GetVtable(PyObject *dict) { - void* ptr; - PyObject *ob = PyMapping_GetItemString(dict, (char *)"__pyx_vtable__"); - if (!ob) - goto bad; -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) - ptr = PyCapsule_GetPointer(ob, 0); -#else - ptr = PyCObject_AsVoidPtr(ob); -#endif - if (!ptr && !PyErr_Occurred()) - PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type"); - Py_DECREF(ob); - return ptr; -bad: - Py_XDECREF(ob); - return NULL; -} - -#ifndef __PYX_HAVE_RT_ImportFunction -#define __PYX_HAVE_RT_ImportFunction -static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { - PyObject *d = 0; - PyObject *cobj = 0; - union { - void (*fp)(void); - void *p; - } tmp; - - d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); - if (!d) - goto bad; - cobj = PyDict_GetItemString(d, funcname); - if (!cobj) { - PyErr_Format(PyExc_ImportError, - "%s does not export expected C function %s", - PyModule_GetName(module), funcname); - goto bad; - } -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) - if (!PyCapsule_IsValid(cobj, sig)) { - PyErr_Format(PyExc_TypeError, - "C function %s.%s has wrong signature (expected %s, got %s)", - PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); - goto bad; - } - tmp.p = PyCapsule_GetPointer(cobj, sig); -#else - {const char *desc, *s1, *s2; - desc = (const char *)PyCObject_GetDesc(cobj); - if (!desc) - goto bad; - s1 = desc; s2 = sig; - while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } - if (*s1 != *s2) { - PyErr_Format(PyExc_TypeError, - "C function %s.%s has wrong signature (expected %s, got %s)", - PyModule_GetName(module), funcname, sig, desc); - goto bad; - } - tmp.p = PyCObject_AsVoidPtr(cobj);} -#endif - *f = tmp.fp; - if (!(*f)) - goto bad; - Py_DECREF(d); - return 0; -bad: - Py_XDECREF(d); - return -1; -} -#endif - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - #if PY_MAJOR_VERSION >= 3 - 0, /*int kwonlyargcount,*/ - #endif - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else /* Python 3+ has unicode identifiers */ - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -/* Type Conversion Functions */ - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif /* Py_PYTHON_H */ diff --git a/scipy-0.10.1/scipy/io/matlab/mio_utils.c b/scipy-0.10.1/scipy/io/matlab/mio_utils.c deleted file mode 100644 index 920e1b2c00..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/mio_utils.c +++ /dev/null @@ -1,5146 +0,0 @@ -/* Generated by Cython 0.15 on Tue Nov 1 18:18:44 2011 */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#else - -#include /* For offsetof */ -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__io__matlab__mio_utils -#define __PYX_HAVE_API__scipy__io__matlab__mio_utils -#include "stdio.h" -#include "stdlib.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - -/* inline attribute */ -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -/* unused attribute */ -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ - - -/* Type Conversion Predeclarations */ - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - /* Test for GCC > 2.95 */ - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else /* __GNUC__ > 2 ... */ - #define likely(x) (x) - #define unlikely(x) (x) - #endif /* __GNUC__ > 2 ... */ -#else /* __GNUC__ */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - -static const char *__pyx_f[] = { - "mio_utils.pyx", - "numpy.pxd", -}; - -/* "numpy.pxd":719 - * # in Cython to enable them only on the right systems. - * - * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - */ -typedef npy_int8 __pyx_t_5numpy_int8_t; - -/* "numpy.pxd":720 - * - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t - */ -typedef npy_int16 __pyx_t_5numpy_int16_t; - -/* "numpy.pxd":721 - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< - * ctypedef npy_int64 int64_t - * #ctypedef npy_int96 int96_t - */ -typedef npy_int32 __pyx_t_5numpy_int32_t; - -/* "numpy.pxd":722 - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< - * #ctypedef npy_int96 int96_t - * #ctypedef npy_int128 int128_t - */ -typedef npy_int64 __pyx_t_5numpy_int64_t; - -/* "numpy.pxd":726 - * #ctypedef npy_int128 int128_t - * - * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - */ -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - -/* "numpy.pxd":727 - * - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t - */ -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - -/* "numpy.pxd":728 - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< - * ctypedef npy_uint64 uint64_t - * #ctypedef npy_uint96 uint96_t - */ -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - -/* "numpy.pxd":729 - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< - * #ctypedef npy_uint96 uint96_t - * #ctypedef npy_uint128 uint128_t - */ -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - -/* "numpy.pxd":733 - * #ctypedef npy_uint128 uint128_t - * - * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< - * ctypedef npy_float64 float64_t - * #ctypedef npy_float80 float80_t - */ -typedef npy_float32 __pyx_t_5numpy_float32_t; - -/* "numpy.pxd":734 - * - * ctypedef npy_float32 float32_t - * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< - * #ctypedef npy_float80 float80_t - * #ctypedef npy_float128 float128_t - */ -typedef npy_float64 __pyx_t_5numpy_float64_t; - -/* "numpy.pxd":743 - * # The int types are mapped a bit surprising -- - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t - */ -typedef npy_long __pyx_t_5numpy_int_t; - -/* "numpy.pxd":744 - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong longlong_t - * - */ -typedef npy_longlong __pyx_t_5numpy_long_t; - -/* "numpy.pxd":745 - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_ulong uint_t - */ -typedef npy_longlong __pyx_t_5numpy_longlong_t; - -/* "numpy.pxd":747 - * ctypedef npy_longlong longlong_t - * - * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t - */ -typedef npy_ulong __pyx_t_5numpy_uint_t; - -/* "numpy.pxd":748 - * - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulonglong_t - * - */ -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - -/* "numpy.pxd":749 - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_intp intp_t - */ -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - -/* "numpy.pxd":751 - * ctypedef npy_ulonglong ulonglong_t - * - * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< - * ctypedef npy_uintp uintp_t - * - */ -typedef npy_intp __pyx_t_5numpy_intp_t; - -/* "numpy.pxd":752 - * - * ctypedef npy_intp intp_t - * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< - * - * ctypedef npy_double float_t - */ -typedef npy_uintp __pyx_t_5numpy_uintp_t; - -/* "numpy.pxd":754 - * ctypedef npy_uintp uintp_t - * - * ctypedef npy_double float_t # <<<<<<<<<<<<<< - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t - */ -typedef npy_double __pyx_t_5numpy_float_t; - -/* "numpy.pxd":755 - * - * ctypedef npy_double float_t - * ctypedef npy_double double_t # <<<<<<<<<<<<<< - * ctypedef npy_longdouble longdouble_t - * - */ -typedef npy_double __pyx_t_5numpy_double_t; - -/* "numpy.pxd":756 - * ctypedef npy_double float_t - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cfloat cfloat_t - */ -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif - -/*--- Type declarations ---*/ - -/* "numpy.pxd":758 - * ctypedef npy_longdouble longdouble_t - * - * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t - */ -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - -/* "numpy.pxd":759 - * - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< - * ctypedef npy_clongdouble clongdouble_t - * - */ -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - -/* "numpy.pxd":760 - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cdouble complex_t - */ -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - -/* "numpy.pxd":762 - * ctypedef npy_clongdouble clongdouble_t - * - * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew1(a): - */ -typedef npy_cdouble __pyx_t_5numpy_complex_t; - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} - - -#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_List_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Tuple_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - - -#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { - PyObject *r; - if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) { - r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) { - r = PySequence_GetItem(o, i); - } - else { - r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); - } - return r; -} - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact); /*proto*/ - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eqf(a, b) ((a)==(b)) - #define __Pyx_c_sumf(a, b) ((a)+(b)) - #define __Pyx_c_difff(a, b) ((a)-(b)) - #define __Pyx_c_prodf(a, b) ((a)*(b)) - #define __Pyx_c_quotf(a, b) ((a)/(b)) - #define __Pyx_c_negf(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zerof(z) ((z)==(float)0) - #define __Pyx_c_conjf(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_absf(z) (::std::abs(z)) - #define __Pyx_c_powf(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zerof(z) ((z)==0) - #define __Pyx_c_conjf(z) (conjf(z)) - #if 1 - #define __Pyx_c_absf(z) (cabsf(z)) - #define __Pyx_c_powf(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq(a, b) ((a)==(b)) - #define __Pyx_c_sum(a, b) ((a)+(b)) - #define __Pyx_c_diff(a, b) ((a)-(b)) - #define __Pyx_c_prod(a, b) ((a)*(b)) - #define __Pyx_c_quot(a, b) ((a)/(b)) - #define __Pyx_c_neg(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero(z) ((z)==(double)0) - #define __Pyx_c_conj(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs(z) (::std::abs(z)) - #define __Pyx_c_pow(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero(z) ((z)==0) - #define __Pyx_c_conj(z) (conj(z)) - #if 1 - #define __Pyx_c_abs(z) (cabs(z)) - #define __Pyx_c_pow(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename); /*proto*/ - -static int __Pyx_check_binary_version(void); - -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ - -static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'libc.stdlib' */ - -/* Module declarations from 'numpy' */ - -/* Module declarations from 'numpy' */ -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/ - -/* Module declarations from 'scipy.io.matlab.mio_utils' */ -static size_t __pyx_f_5scipy_2io_6matlab_9mio_utils_cproduct(PyObject *, int __pyx_skip_dispatch); /*proto*/ -static PyObject *__pyx_f_5scipy_2io_6matlab_9mio_utils_squeeze_element(PyArrayObject *, int __pyx_skip_dispatch); /*proto*/ -static PyArrayObject *__pyx_f_5scipy_2io_6matlab_9mio_utils_chars_to_strings(PyObject *, int __pyx_skip_dispatch); /*proto*/ -#define __Pyx_MODULE_NAME "scipy.io.matlab.mio_utils" -int __pyx_module_is_main_scipy__io__matlab__mio_utils = 0; - -/* Implementation of 'scipy.io.matlab.mio_utils' */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_RuntimeError; -static char __pyx_k_3[] = "ndarray is not C contiguous"; -static char __pyx_k_5[] = "ndarray is not Fortran contiguous"; -static char __pyx_k_7[] = "Non-native byte order not supported"; -static char __pyx_k_9[] = "unknown dtype code in numpy.pxd (%d)"; -static char __pyx_k_10[] = "Format string allocated too short, see comment in numpy.pxd"; -static char __pyx_k_13[] = "Format string allocated too short."; -static char __pyx_k_15[] = " Utilities for generic processing of return arrays from read\n"; -static char __pyx_k__B[] = "B"; -static char __pyx_k__H[] = "H"; -static char __pyx_k__I[] = "I"; -static char __pyx_k__L[] = "L"; -static char __pyx_k__O[] = "O"; -static char __pyx_k__Q[] = "Q"; -static char __pyx_k__b[] = "b"; -static char __pyx_k__d[] = "d"; -static char __pyx_k__f[] = "f"; -static char __pyx_k__g[] = "g"; -static char __pyx_k__h[] = "h"; -static char __pyx_k__i[] = "i"; -static char __pyx_k__l[] = "l"; -static char __pyx_k__q[] = "q"; -static char __pyx_k__Zd[] = "Zd"; -static char __pyx_k__Zf[] = "Zf"; -static char __pyx_k__Zg[] = "Zg"; -static char __pyx_k__np[] = "np"; -static char __pyx_k__str[] = "str"; -static char __pyx_k__item[] = "item"; -static char __pyx_k__size[] = "size"; -static char __pyx_k__view[] = "view"; -static char __pyx_k__array[] = "array"; -static char __pyx_k__dtype[] = "dtype"; -static char __pyx_k__numpy[] = "numpy"; -static char __pyx_k__range[] = "range"; -static char __pyx_k__shape[] = "shape"; -static char __pyx_k__reshape[] = "reshape"; -static char __pyx_k__squeeze[] = "squeeze"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k__isbuiltin[] = "isbuiltin"; -static char __pyx_k__ValueError[] = "ValueError"; -static char __pyx_k__RuntimeError[] = "RuntimeError"; -static char __pyx_k__ascontiguousarray[] = "ascontiguousarray"; -static PyObject *__pyx_kp_u_10; -static PyObject *__pyx_kp_u_13; -static PyObject *__pyx_kp_u_3; -static PyObject *__pyx_kp_u_5; -static PyObject *__pyx_kp_u_7; -static PyObject *__pyx_kp_u_9; -static PyObject *__pyx_n_s__RuntimeError; -static PyObject *__pyx_n_s__ValueError; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s__array; -static PyObject *__pyx_n_s__ascontiguousarray; -static PyObject *__pyx_n_s__dtype; -static PyObject *__pyx_n_s__isbuiltin; -static PyObject *__pyx_n_s__item; -static PyObject *__pyx_n_s__np; -static PyObject *__pyx_n_s__numpy; -static PyObject *__pyx_n_s__range; -static PyObject *__pyx_n_s__reshape; -static PyObject *__pyx_n_s__shape; -static PyObject *__pyx_n_s__size; -static PyObject *__pyx_n_s__squeeze; -static PyObject *__pyx_n_s__str; -static PyObject *__pyx_n_s__view; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_15; -static PyObject *__pyx_k_tuple_1; -static PyObject *__pyx_k_tuple_2; -static PyObject *__pyx_k_tuple_4; -static PyObject *__pyx_k_tuple_6; -static PyObject *__pyx_k_tuple_8; -static PyObject *__pyx_k_tuple_11; -static PyObject *__pyx_k_tuple_12; -static PyObject *__pyx_k_tuple_14; - -/* "scipy/io/matlab/mio_utils.pyx":9 - * - * - * cpdef size_t cproduct(tup): # <<<<<<<<<<<<<< - * cdef size_t res = 1 - * cdef int i - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_cproduct(PyObject *__pyx_self, PyObject *__pyx_v_tup); /*proto*/ -static size_t __pyx_f_5scipy_2io_6matlab_9mio_utils_cproduct(PyObject *__pyx_v_tup, int __pyx_skip_dispatch) { - size_t __pyx_v_res; - int __pyx_v_i; - size_t __pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - size_t __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("cproduct"); - - /* "scipy/io/matlab/mio_utils.pyx":10 - * - * cpdef size_t cproduct(tup): - * cdef size_t res = 1 # <<<<<<<<<<<<<< - * cdef int i - * for i in range(len(tup)): - */ - __pyx_v_res = 1; - - /* "scipy/io/matlab/mio_utils.pyx":12 - * cdef size_t res = 1 - * cdef int i - * for i in range(len(tup)): # <<<<<<<<<<<<<< - * res *= tup[i] - * return res - */ - __pyx_t_1 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - /* "scipy/io/matlab/mio_utils.pyx":13 - * cdef int i - * for i in range(len(tup)): - * res *= tup[i] # <<<<<<<<<<<<<< - * return res - * - */ - __pyx_t_3 = __Pyx_PyInt_FromSize_t(__pyx_v_res); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_tup, __pyx_v_i, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyNumber_InPlaceMultiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_6 = __Pyx_PyInt_AsSize_t(__pyx_t_5); if (unlikely((__pyx_t_6 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_res = __pyx_t_6; - } - - /* "scipy/io/matlab/mio_utils.pyx":14 - * for i in range(len(tup)): - * res *= tup[i] - * return res # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_res; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_WriteUnraisable("scipy.io.matlab.mio_utils.cproduct", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio_utils.pyx":9 - * - * - * cpdef size_t cproduct(tup): # <<<<<<<<<<<<<< - * cdef size_t res = 1 - * cdef int i - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_cproduct(PyObject *__pyx_self, PyObject *__pyx_v_tup); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_cproduct(PyObject *__pyx_self, PyObject *__pyx_v_tup) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("cproduct"); - __pyx_self = __pyx_self; - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_f_5scipy_2io_6matlab_9mio_utils_cproduct(__pyx_v_tup, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio_utils.cproduct", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio_utils.pyx":17 - * - * - * cpdef object squeeze_element(cnp.ndarray arr): # <<<<<<<<<<<<<< - * ''' Return squeezed element - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_1squeeze_element(PyObject *__pyx_self, PyObject *__pyx_v_arr); /*proto*/ -static PyObject *__pyx_f_5scipy_2io_6matlab_9mio_utils_squeeze_element(PyArrayObject *__pyx_v_arr, int __pyx_skip_dispatch) { - PyObject *__pyx_v_arr2 = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("squeeze_element"); - - /* "scipy/io/matlab/mio_utils.pyx":22 - * The returned object may not be an ndarray - for example if we do - * ``arr.item`` to return a ``mat_struct`` object from a struct array ''' - * if not arr.size: # <<<<<<<<<<<<<< - * return np.array([]) - * arr2 = np.squeeze(arr) - */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_s__size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = (!__pyx_t_2); - if (__pyx_t_3) { - - /* "scipy/io/matlab/mio_utils.pyx":23 - * ``arr.item`` to return a ``mat_struct`` object from a struct array ''' - * if not arr.size: - * return np.array([]) # <<<<<<<<<<<<<< - * arr2 = np.squeeze(arr) - * if (not arr2.shape) and arr2.dtype.isbuiltin: # 0d coverted to scalar - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio_utils.pyx":24 - * if not arr.size: - * return np.array([]) - * arr2 = np.squeeze(arr) # <<<<<<<<<<<<<< - * if (not arr2.shape) and arr2.dtype.isbuiltin: # 0d coverted to scalar - * return arr2.item() - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__squeeze); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(((PyObject *)__pyx_v_arr)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_arr)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_arr)); - __pyx_t_4 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_v_arr2 = __pyx_t_4; - __pyx_t_4 = 0; - - /* "scipy/io/matlab/mio_utils.pyx":25 - * return np.array([]) - * arr2 = np.squeeze(arr) - * if (not arr2.shape) and arr2.dtype.isbuiltin: # 0d coverted to scalar # <<<<<<<<<<<<<< - * return arr2.item() - * return arr2 - */ - __pyx_t_4 = PyObject_GetAttr(__pyx_v_arr2, __pyx_n_s__shape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_2 = (!__pyx_t_3); - if (__pyx_t_2) { - __pyx_t_4 = PyObject_GetAttr(__pyx_v_arr2, __pyx_n_s__dtype); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__isbuiltin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __pyx_t_3; - } else { - __pyx_t_6 = __pyx_t_2; - } - if (__pyx_t_6) { - - /* "scipy/io/matlab/mio_utils.pyx":26 - * arr2 = np.squeeze(arr) - * if (not arr2.shape) and arr2.dtype.isbuiltin: # 0d coverted to scalar - * return arr2.item() # <<<<<<<<<<<<<< - * return arr2 - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_GetAttr(__pyx_v_arr2, __pyx_n_s__item); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - goto __pyx_L4; - } - __pyx_L4:; - - /* "scipy/io/matlab/mio_utils.pyx":27 - * if (not arr2.shape) and arr2.dtype.isbuiltin: # 0d coverted to scalar - * return arr2.item() - * return arr2 # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_arr2); - __pyx_r = __pyx_v_arr2; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("scipy.io.matlab.mio_utils.squeeze_element", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_arr2); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio_utils.pyx":17 - * - * - * cpdef object squeeze_element(cnp.ndarray arr): # <<<<<<<<<<<<<< - * ''' Return squeezed element - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_1squeeze_element(PyObject *__pyx_self, PyObject *__pyx_v_arr); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_9mio_utils_1squeeze_element[] = " Return squeezed element\n\n The returned object may not be an ndarray - for example if we do\n ``arr.item`` to return a ``mat_struct`` object from a struct array "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_1squeeze_element(PyObject *__pyx_self, PyObject *__pyx_v_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("squeeze_element"); - __pyx_self = __pyx_self; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_arr), __pyx_ptype_5numpy_ndarray, 1, "arr", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_f_5scipy_2io_6matlab_9mio_utils_squeeze_element(((PyArrayObject *)__pyx_v_arr), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio_utils.squeeze_element", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio_utils.pyx":30 - * - * - * cpdef cnp.ndarray chars_to_strings(in_arr): # <<<<<<<<<<<<<< - * ''' Convert final axis of char array to strings - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_2chars_to_strings(PyObject *__pyx_self, PyObject *__pyx_v_in_arr); /*proto*/ -static PyArrayObject *__pyx_f_5scipy_2io_6matlab_9mio_utils_chars_to_strings(PyObject *__pyx_v_in_arr, int __pyx_skip_dispatch) { - PyArrayObject *__pyx_v_arr = 0; - int __pyx_v_ndim; - npy_intp *__pyx_v_dims; - npy_intp __pyx_v_last_dim; - PyObject *__pyx_v_new_dt_str = 0; - PyObject *__pyx_v_out_shape = 0; - PyArrayObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("chars_to_strings"); - - /* "scipy/io/matlab/mio_utils.pyx":44 - * ``arr`` - * ''' - * cdef cnp.ndarray arr = in_arr # <<<<<<<<<<<<<< - * cdef int ndim = arr.ndim - * cdef cnp.npy_intp *dims = arr.shape - */ - if (!(likely(((__pyx_v_in_arr) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_in_arr, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_INCREF(__pyx_v_in_arr); - __pyx_v_arr = ((PyArrayObject *)__pyx_v_in_arr); - - /* "scipy/io/matlab/mio_utils.pyx":45 - * ''' - * cdef cnp.ndarray arr = in_arr - * cdef int ndim = arr.ndim # <<<<<<<<<<<<<< - * cdef cnp.npy_intp *dims = arr.shape - * cdef cnp.npy_intp last_dim = dims[ndim-1] - */ - __pyx_v_ndim = __pyx_v_arr->nd; - - /* "scipy/io/matlab/mio_utils.pyx":46 - * cdef cnp.ndarray arr = in_arr - * cdef int ndim = arr.ndim - * cdef cnp.npy_intp *dims = arr.shape # <<<<<<<<<<<<<< - * cdef cnp.npy_intp last_dim = dims[ndim-1] - * cdef object new_dt_str, out_shape - */ - __pyx_v_dims = __pyx_v_arr->dimensions; - - /* "scipy/io/matlab/mio_utils.pyx":47 - * cdef int ndim = arr.ndim - * cdef cnp.npy_intp *dims = arr.shape - * cdef cnp.npy_intp last_dim = dims[ndim-1] # <<<<<<<<<<<<<< - * cdef object new_dt_str, out_shape - * if last_dim == 0: # deal with empty array case - */ - __pyx_v_last_dim = (__pyx_v_dims[(__pyx_v_ndim - 1)]); - - /* "scipy/io/matlab/mio_utils.pyx":49 - * cdef cnp.npy_intp last_dim = dims[ndim-1] - * cdef object new_dt_str, out_shape - * if last_dim == 0: # deal with empty array case # <<<<<<<<<<<<<< - * # Started with U1 - which is OK for us - * new_dt_str = arr.dtype.str - */ - __pyx_t_1 = (__pyx_v_last_dim == 0); - if (__pyx_t_1) { - - /* "scipy/io/matlab/mio_utils.pyx":51 - * if last_dim == 0: # deal with empty array case - * # Started with U1 - which is OK for us - * new_dt_str = arr.dtype.str # <<<<<<<<<<<<<< - * # So far we only know this is an empty array and that the last length is - * # 0. The other dimensions could be non-zero. We set the next to last - */ - __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__str); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_new_dt_str = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio_utils.pyx":55 - * # 0. The other dimensions could be non-zero. We set the next to last - * # dimension to zero to signal emptiness - * if ndim == 2: # <<<<<<<<<<<<<< - * out_shape = (0,) - * else: - */ - __pyx_t_1 = (__pyx_v_ndim == 2); - if (__pyx_t_1) { - - /* "scipy/io/matlab/mio_utils.pyx":56 - * # dimension to zero to signal emptiness - * if ndim == 2: - * out_shape = (0,) # <<<<<<<<<<<<<< - * else: - * out_shape = in_arr.shape[:-2] + (0,) - */ - __Pyx_INCREF(((PyObject *)__pyx_k_tuple_1)); - __pyx_v_out_shape = ((PyObject *)__pyx_k_tuple_1); - goto __pyx_L4; - } - /*else*/ { - - /* "scipy/io/matlab/mio_utils.pyx":58 - * out_shape = (0,) - * else: - * out_shape = in_arr.shape[:-2] + (0,) # <<<<<<<<<<<<<< - * else: # make new dtype string with N appended - * new_dt_str = arr.dtype.str[:-1] + str(last_dim) - */ - __pyx_t_3 = PyObject_GetAttr(__pyx_v_in_arr, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PySequence_GetSlice(__pyx_t_3, 0, -2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Add(__pyx_t_2, ((PyObject *)__pyx_k_tuple_2)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_out_shape = __pyx_t_3; - __pyx_t_3 = 0; - } - __pyx_L4:; - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/io/matlab/mio_utils.pyx":60 - * out_shape = in_arr.shape[:-2] + (0,) - * else: # make new dtype string with N appended - * new_dt_str = arr.dtype.str[:-1] + str(last_dim) # <<<<<<<<<<<<<< - * out_shape = in_arr.shape[:-1] - * # Copy to deal with F ordered arrays - */ - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_s__dtype); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__str); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PySequence_GetSlice(__pyx_t_2, 0, -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_last_dim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = PyNumber_Add(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_new_dt_str = __pyx_t_4; - __pyx_t_4 = 0; - - /* "scipy/io/matlab/mio_utils.pyx":61 - * else: # make new dtype string with N appended - * new_dt_str = arr.dtype.str[:-1] + str(last_dim) - * out_shape = in_arr.shape[:-1] # <<<<<<<<<<<<<< - * # Copy to deal with F ordered arrays - * arr = np.ascontiguousarray(arr) - */ - __pyx_t_4 = PyObject_GetAttr(__pyx_v_in_arr, __pyx_n_s__shape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PySequence_GetSlice(__pyx_t_4, 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_out_shape = __pyx_t_2; - __pyx_t_2 = 0; - } - __pyx_L3:; - - /* "scipy/io/matlab/mio_utils.pyx":63 - * out_shape = in_arr.shape[:-1] - * # Copy to deal with F ordered arrays - * arr = np.ascontiguousarray(arr) # <<<<<<<<<<<<<< - * arr = arr.view(new_dt_str) - * return arr.reshape(out_shape) - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_v_arr)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_arr)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_arr)); - __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_v_arr)); - __pyx_v_arr = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/io/matlab/mio_utils.pyx":64 - * # Copy to deal with F ordered arrays - * arr = np.ascontiguousarray(arr) - * arr = arr.view(new_dt_str) # <<<<<<<<<<<<<< - * return arr.reshape(out_shape) - */ - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_s__view); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_new_dt_str); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_new_dt_str); - __Pyx_GIVEREF(__pyx_v_new_dt_str); - __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_v_arr)); - __pyx_v_arr = ((PyArrayObject *)__pyx_t_4); - __pyx_t_4 = 0; - - /* "scipy/io/matlab/mio_utils.pyx":65 - * arr = np.ascontiguousarray(arr) - * arr = arr.view(new_dt_str) - * return arr.reshape(out_shape) # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_4 = PyObject_GetAttr(((PyObject *)__pyx_v_arr), __pyx_n_s__reshape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_out_shape); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_out_shape); - __Pyx_GIVEREF(__pyx_v_out_shape); - __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.io.matlab.mio_utils.chars_to_strings", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_arr); - __Pyx_XDECREF(__pyx_v_new_dt_str); - __Pyx_XDECREF(__pyx_v_out_shape); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/mio_utils.pyx":30 - * - * - * cpdef cnp.ndarray chars_to_strings(in_arr): # <<<<<<<<<<<<<< - * ''' Convert final axis of char array to strings - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_2chars_to_strings(PyObject *__pyx_self, PyObject *__pyx_v_in_arr); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_9mio_utils_2chars_to_strings[] = " Convert final axis of char array to strings\n\n Parameters\n ----------\n in_arr : array\n dtype of 'U1'\n\n Returns\n -------\n str_arr : array\n dtype of 'UN' where N is the length of the last dimension of\n ``arr``\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_9mio_utils_2chars_to_strings(PyObject *__pyx_self, PyObject *__pyx_v_in_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("chars_to_strings"); - __pyx_self = __pyx_self; - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((PyObject *)__pyx_f_5scipy_2io_6matlab_9mio_utils_chars_to_strings(__pyx_v_in_arr, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.mio_utils.chars_to_strings", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":190 - * # experimental exception made for __getbuffer__ and __releasebuffer__ - * # -- the details of this may change. - * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< - * # This implementation of getbuffer is geared towards Cython - * # requirements, and does not yet fullfill the PEP. - */ - -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_copy_shape; - int __pyx_v_i; - int __pyx_v_ndim; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - int __pyx_v_t; - char *__pyx_v_f; - PyArray_Descr *__pyx_v_descr = 0; - int __pyx_v_offset; - int __pyx_v_hasfields; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - char *__pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getbuffer__"); - if (__pyx_v_info != NULL) { - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - } - - /* "numpy.pxd":196 - * # of flags - * - * if info == NULL: return # <<<<<<<<<<<<<< - * - * cdef int copy_shape, i, ndim - */ - __pyx_t_1 = (__pyx_v_info == NULL); - if (__pyx_t_1) { - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":199 - * - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":200 - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * - * ndim = PyArray_NDIM(self) - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":202 - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":204 - * ndim = PyArray_NDIM(self) - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * copy_shape = 1 - * else: - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":205 - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * copy_shape = 1 # <<<<<<<<<<<<<< - * else: - * copy_shape = 0 - */ - __pyx_v_copy_shape = 1; - goto __pyx_L6; - } - /*else*/ { - - /* "numpy.pxd":207 - * copy_shape = 1 - * else: - * copy_shape = 0 # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - */ - __pyx_v_copy_shape = 0; - } - __pyx_L6:; - - /* "numpy.pxd":209 - * copy_shape = 0 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); - if (__pyx_t_1) { - - /* "numpy.pxd":210 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not C contiguous") - * - */ - __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "numpy.pxd":213 - * raise ValueError(u"ndarray is not C contiguous") - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") - */ - __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); - if (__pyx_t_3) { - - /* "numpy.pxd":214 - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not Fortran contiguous") - * - */ - __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); - __pyx_t_2 = __pyx_t_1; - } else { - __pyx_t_2 = __pyx_t_3; - } - if (__pyx_t_2) { - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - /* "numpy.pxd":217 - * raise ValueError(u"ndarray is not Fortran contiguous") - * - * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< - * info.ndim = ndim - * if copy_shape: - */ - __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":218 - * - * info.buf = PyArray_DATA(self) - * info.ndim = ndim # <<<<<<<<<<<<<< - * if copy_shape: - * # Allocate new buffer for strides and shape info. - */ - __pyx_v_info->ndim = __pyx_v_ndim; - - /* "numpy.pxd":219 - * info.buf = PyArray_DATA(self) - * info.ndim = ndim - * if copy_shape: # <<<<<<<<<<<<<< - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - */ - if (__pyx_v_copy_shape) { - - /* "numpy.pxd":222 - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< - * info.shape = info.strides + ndim - * for i in range(ndim): - */ - __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); - - /* "numpy.pxd":223 - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim # <<<<<<<<<<<<<< - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - */ - __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); - - /* "numpy.pxd":224 - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim - * for i in range(ndim): # <<<<<<<<<<<<<< - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] - */ - __pyx_t_5 = __pyx_v_ndim; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "numpy.pxd":225 - * info.shape = info.strides + ndim - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - */ - (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - - /* "numpy.pxd":226 - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< - * else: - * info.strides = PyArray_STRIDES(self) - */ - (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - } - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":228 - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - */ - __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":229 - * else: - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - */ - __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); - } - __pyx_L9:; - - /* "numpy.pxd":230 - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) - */ - __pyx_v_info->suboffsets = NULL; - - /* "numpy.pxd":231 - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< - * info.readonly = not PyArray_ISWRITEABLE(self) - * - */ - __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":232 - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< - * - * cdef int t - */ - __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":235 - * - * cdef int t - * cdef char* f = NULL # <<<<<<<<<<<<<< - * cdef dtype descr = self.descr - * cdef list stack - */ - __pyx_v_f = NULL; - - /* "numpy.pxd":236 - * cdef int t - * cdef char* f = NULL - * cdef dtype descr = self.descr # <<<<<<<<<<<<<< - * cdef list stack - * cdef int offset - */ - __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); - __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; - - /* "numpy.pxd":240 - * cdef int offset - * - * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< - * - * if not hasfields and not copy_shape: - */ - __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); - - /* "numpy.pxd":242 - * cdef bint hasfields = PyDataType_HASFIELDS(descr) - * - * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< - * # do not call releasebuffer - * info.obj = None - */ - __pyx_t_2 = (!__pyx_v_hasfields); - if (__pyx_t_2) { - __pyx_t_3 = (!__pyx_v_copy_shape); - __pyx_t_1 = __pyx_t_3; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":244 - * if not hasfields and not copy_shape: - * # do not call releasebuffer - * info.obj = None # <<<<<<<<<<<<<< - * else: - * # need to call releasebuffer - */ - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = Py_None; - goto __pyx_L12; - } - /*else*/ { - - /* "numpy.pxd":247 - * else: - * # need to call releasebuffer - * info.obj = self # <<<<<<<<<<<<<< - * - * if not hasfields: - */ - __Pyx_INCREF(__pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = __pyx_v_self; - } - __pyx_L12:; - - /* "numpy.pxd":249 - * info.obj = self - * - * if not hasfields: # <<<<<<<<<<<<<< - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - */ - __pyx_t_1 = (!__pyx_v_hasfields); - if (__pyx_t_1) { - - /* "numpy.pxd":250 - * - * if not hasfields: - * t = descr.type_num # <<<<<<<<<<<<<< - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - */ - __pyx_v_t = __pyx_v_descr->type_num; - - /* "numpy.pxd":251 - * if not hasfields: - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); - if (__pyx_t_1) { - __pyx_t_2 = __pyx_v_little_endian; - } else { - __pyx_t_2 = __pyx_t_1; - } - if (!__pyx_t_2) { - - /* "numpy.pxd":252 - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); - if (__pyx_t_1) { - __pyx_t_3 = (!__pyx_v_little_endian); - __pyx_t_7 = __pyx_t_3; - } else { - __pyx_t_7 = __pyx_t_1; - } - __pyx_t_1 = __pyx_t_7; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L14; - } - __pyx_L14:; - - /* "numpy.pxd":254 - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - */ - __pyx_t_1 = (__pyx_v_t == NPY_BYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__b; - goto __pyx_L15; - } - - /* "numpy.pxd":255 - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__B; - goto __pyx_L15; - } - - /* "numpy.pxd":256 - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - */ - __pyx_t_1 = (__pyx_v_t == NPY_SHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__h; - goto __pyx_L15; - } - - /* "numpy.pxd":257 - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - */ - __pyx_t_1 = (__pyx_v_t == NPY_USHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__H; - goto __pyx_L15; - } - - /* "numpy.pxd":258 - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - */ - __pyx_t_1 = (__pyx_v_t == NPY_INT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__i; - goto __pyx_L15; - } - - /* "numpy.pxd":259 - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UINT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__I; - goto __pyx_L15; - } - - /* "numpy.pxd":260 - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__l; - goto __pyx_L15; - } - - /* "numpy.pxd":261 - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__L; - goto __pyx_L15; - } - - /* "numpy.pxd":262 - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__q; - goto __pyx_L15; - } - - /* "numpy.pxd":263 - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Q; - goto __pyx_L15; - } - - /* "numpy.pxd":264 - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - */ - __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__f; - goto __pyx_L15; - } - - /* "numpy.pxd":265 - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - */ - __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__d; - goto __pyx_L15; - } - - /* "numpy.pxd":266 - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__g; - goto __pyx_L15; - } - - /* "numpy.pxd":267 - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zf; - goto __pyx_L15; - } - - /* "numpy.pxd":268 - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zd; - goto __pyx_L15; - } - - /* "numpy.pxd":269 - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f = "O" - * else: - */ - __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zg; - goto __pyx_L15; - } - - /* "numpy.pxd":270 - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__O; - goto __pyx_L15; - } - /*else*/ { - - /* "numpy.pxd":272 - * elif t == NPY_OBJECT: f = "O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * info.format = f - * return - */ - __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L15:; - - /* "numpy.pxd":273 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f # <<<<<<<<<<<<<< - * return - * else: - */ - __pyx_v_info->format = __pyx_v_f; - - /* "numpy.pxd":274 - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f - * return # <<<<<<<<<<<<<< - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - */ - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L13; - } - /*else*/ { - - /* "numpy.pxd":276 - * return - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 - */ - __pyx_v_info->format = ((char *)malloc(255)); - - /* "numpy.pxd":277 - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<< - * offset = 0 - * f = _util_dtypestring(descr, info.format + 1, - */ - (__pyx_v_info->format[0]) = '^'; - - /* "numpy.pxd":278 - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 # <<<<<<<<<<<<<< - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - */ - __pyx_v_offset = 0; - - /* "numpy.pxd":281 - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - * &offset) # <<<<<<<<<<<<<< - * f[0] = 0 # Terminate format string - * - */ - __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_9; - - /* "numpy.pxd":282 - * info.format + _buffer_format_string_len, - * &offset) - * f[0] = 0 # Terminate format string # <<<<<<<<<<<<<< - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - */ - (__pyx_v_f[0]) = 0; - } - __pyx_L13:; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(Py_None); - __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; - } - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_descr); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":284 - * f[0] = 0 # Terminate format string - * - * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - */ - -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__releasebuffer__"); - - /* "numpy.pxd":285 - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); - if (__pyx_t_1) { - - /* "numpy.pxd":286 - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) # <<<<<<<<<<<<<< - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) - */ - free(__pyx_v_info->format); - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":287 - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * stdlib.free(info.strides) - * # info.shape was stored after info.strides in the same block - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":288 - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) # <<<<<<<<<<<<<< - * # info.shape was stored after info.strides in the same block - * - */ - free(__pyx_v_info->strides); - goto __pyx_L6; - } - __pyx_L6:; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":764 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); - - /* "numpy.pxd":765 - * - * cdef inline object PyArray_MultiIterNew1(a): - * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew2(a, b): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":767 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); - - /* "numpy.pxd":768 - * - * cdef inline object PyArray_MultiIterNew2(a, b): - * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":770 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); - - /* "numpy.pxd":771 - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":773 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); - - /* "numpy.pxd":774 - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":776 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); - - /* "numpy.pxd":777 - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":779 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< - * # Recursive utility function used in __getbuffer__ to get format - * # string. The new location in the format string is returned. - */ - -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { - PyArray_Descr *__pyx_v_child = 0; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - PyObject *__pyx_v_fields = 0; - PyObject *__pyx_v_childname = NULL; - PyObject *__pyx_v_new_offset = NULL; - PyObject *__pyx_v_t = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - long __pyx_t_10; - char *__pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_util_dtypestring"); - - /* "numpy.pxd":786 - * cdef int delta_offset - * cdef tuple i - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * cdef tuple fields - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":787 - * cdef tuple i - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * cdef tuple fields - * - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":790 - * cdef tuple fields - * - * for childname in descr.names: # <<<<<<<<<<<<<< - * fields = descr.fields[childname] - * child, new_offset = fields - */ - if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - for (;;) { - if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; - __Pyx_XDECREF(__pyx_v_childname); - __pyx_v_childname = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":791 - * - * for childname in descr.names: - * fields = descr.fields[childname] # <<<<<<<<<<<<<< - * child, new_offset = fields - * - */ - __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); - __pyx_v_fields = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "numpy.pxd":792 - * for childname in descr.names: - * fields = descr.fields[childname] - * child, new_offset = fields # <<<<<<<<<<<<<< - * - * if (end - f) - (new_offset - offset[0]) < 15: - */ - if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { - PyObject* sequence = ((PyObject *)__pyx_v_fields); - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - } else { - __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_child)); - __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_v_new_offset); - __pyx_v_new_offset = __pyx_t_4; - __pyx_t_4 = 0; - - /* "numpy.pxd":794 - * child, new_offset = fields - * - * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - */ - __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":797 - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - * if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '>'); - if (__pyx_t_6) { - __pyx_t_7 = __pyx_v_little_endian; - } else { - __pyx_t_7 = __pyx_t_6; - } - if (!__pyx_t_7) { - - /* "numpy.pxd":798 - * - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * # One could encode it in the format string and have Cython - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '<'); - if (__pyx_t_6) { - __pyx_t_8 = (!__pyx_v_little_endian); - __pyx_t_9 = __pyx_t_8; - } else { - __pyx_t_9 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_9; - } else { - __pyx_t_6 = __pyx_t_7; - } - if (__pyx_t_6) { - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - /* "numpy.pxd":809 - * - * # Output padding bytes - * while offset[0] < new_offset: # <<<<<<<<<<<<<< - * f[0] = 120 # "x"; pad byte - * f += 1 - */ - while (1) { - __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!__pyx_t_6) break; - - /* "numpy.pxd":810 - * # Output padding bytes - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< - * f += 1 - * offset[0] += 1 - */ - (__pyx_v_f[0]) = 120; - - /* "numpy.pxd":811 - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte - * f += 1 # <<<<<<<<<<<<<< - * offset[0] += 1 - * - */ - __pyx_v_f = (__pyx_v_f + 1); - - /* "numpy.pxd":812 - * f[0] = 120 # "x"; pad byte - * f += 1 - * offset[0] += 1 # <<<<<<<<<<<<<< - * - * offset[0] += child.itemsize - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); - } - - /* "numpy.pxd":814 - * offset[0] += 1 - * - * offset[0] += child.itemsize # <<<<<<<<<<<<<< - * - * if not PyDataType_HASFIELDS(child): - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); - - /* "numpy.pxd":816 - * offset[0] += child.itemsize - * - * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< - * t = child.type_num - * if end - f < 5: - */ - __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); - if (__pyx_t_6) { - - /* "numpy.pxd":817 - * - * if not PyDataType_HASFIELDS(child): - * t = child.type_num # <<<<<<<<<<<<<< - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") - */ - __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_v_t); - __pyx_v_t = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":818 - * if not PyDataType_HASFIELDS(child): - * t = child.type_num - * if end - f < 5: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short.") - * - */ - __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); - if (__pyx_t_6) { - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L10; - } - __pyx_L10:; - - /* "numpy.pxd":822 - * - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - */ - __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 98; - goto __pyx_L11; - } - - /* "numpy.pxd":823 - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 66; - goto __pyx_L11; - } - - /* "numpy.pxd":824 - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - */ - __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 104; - goto __pyx_L11; - } - - /* "numpy.pxd":825 - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - */ - __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 72; - goto __pyx_L11; - } - - /* "numpy.pxd":826 - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - */ - __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 105; - goto __pyx_L11; - } - - /* "numpy.pxd":827 - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 73; - goto __pyx_L11; - } - - /* "numpy.pxd":828 - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 108; - goto __pyx_L11; - } - - /* "numpy.pxd":829 - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 76; - goto __pyx_L11; - } - - /* "numpy.pxd":830 - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 113; - goto __pyx_L11; - } - - /* "numpy.pxd":831 - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 81; - goto __pyx_L11; - } - - /* "numpy.pxd":832 - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - */ - __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 102; - goto __pyx_L11; - } - - /* "numpy.pxd":833 - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - */ - __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 100; - goto __pyx_L11; - } - - /* "numpy.pxd":834 - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 103; - goto __pyx_L11; - } - - /* "numpy.pxd":835 - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - */ - __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 102; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":836 - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" - */ - __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 100; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":837 - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - */ - __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 103; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":838 - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 79; - goto __pyx_L11; - } - /*else*/ { - - /* "numpy.pxd":840 - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * f += 1 - * else: - */ - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L11:; - - /* "numpy.pxd":841 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * f += 1 # <<<<<<<<<<<<<< - * else: - * # Cython ignores struct boundary information ("T{...}"), - */ - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":845 - * # Cython ignores struct boundary information ("T{...}"), - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< - * return f - * - */ - __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_11; - } - __pyx_L9:; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "numpy.pxd":846 - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) - * return f # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_f; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_child); - __Pyx_XDECREF(__pyx_v_fields); - __Pyx_XDECREF(__pyx_v_childname); - __Pyx_XDECREF(__pyx_v_new_offset); - __Pyx_XDECREF(__pyx_v_t); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":961 - * - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * cdef PyObject* baseptr - * if base is None: - */ - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - PyObject *__pyx_v_baseptr; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("set_array_base"); - - /* "numpy.pxd":963 - * cdef inline void set_array_base(ndarray arr, object base): - * cdef PyObject* baseptr - * if base is None: # <<<<<<<<<<<<<< - * baseptr = NULL - * else: - */ - __pyx_t_1 = (__pyx_v_base == Py_None); - if (__pyx_t_1) { - - /* "numpy.pxd":964 - * cdef PyObject* baseptr - * if base is None: - * baseptr = NULL # <<<<<<<<<<<<<< - * else: - * Py_INCREF(base) # important to do this before decref below! - */ - __pyx_v_baseptr = NULL; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":966 - * baseptr = NULL - * else: - * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< - * baseptr = base - * Py_XDECREF(arr.base) - */ - Py_INCREF(__pyx_v_base); - - /* "numpy.pxd":967 - * else: - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base # <<<<<<<<<<<<<< - * Py_XDECREF(arr.base) - * arr.base = baseptr - */ - __pyx_v_baseptr = ((PyObject *)__pyx_v_base); - } - __pyx_L3:; - - /* "numpy.pxd":968 - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base - * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< - * arr.base = baseptr - * - */ - Py_XDECREF(__pyx_v_arr->base); - - /* "numpy.pxd":969 - * baseptr = base - * Py_XDECREF(arr.base) - * arr.base = baseptr # <<<<<<<<<<<<<< - * - * cdef inline object get_array_base(ndarray arr): - */ - __pyx_v_arr->base = __pyx_v_baseptr; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base"); - - /* "numpy.pxd":972 - * - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: # <<<<<<<<<<<<<< - * return None - * else: - */ - __pyx_t_1 = (__pyx_v_arr->base == NULL); - if (__pyx_t_1) { - - /* "numpy.pxd":973 - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: - * return None # <<<<<<<<<<<<<< - * else: - * return arr.base - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_None); - __pyx_r = Py_None; - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":975 - * return None - * else: - * return arr.base # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); - __pyx_r = ((PyObject *)__pyx_v_arr->base); - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {__Pyx_NAMESTR("cproduct"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_9mio_utils_cproduct, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("squeeze_element"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_9mio_utils_1squeeze_element, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_9mio_utils_1squeeze_element)}, - {__Pyx_NAMESTR("chars_to_strings"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_9mio_utils_2chars_to_strings, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_9mio_utils_2chars_to_strings)}, - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("mio_utils"), - __Pyx_DOCSTR(__pyx_k_15), /* m_doc */ - -1, /* m_size */ - __pyx_methods /* m_methods */, - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_u_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 1, 0, 0}, - {&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0}, - {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, - {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, - {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, - {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, - {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, - {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s__array, __pyx_k__array, sizeof(__pyx_k__array), 0, 0, 1, 1}, - {&__pyx_n_s__ascontiguousarray, __pyx_k__ascontiguousarray, sizeof(__pyx_k__ascontiguousarray), 0, 0, 1, 1}, - {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, - {&__pyx_n_s__isbuiltin, __pyx_k__isbuiltin, sizeof(__pyx_k__isbuiltin), 0, 0, 1, 1}, - {&__pyx_n_s__item, __pyx_k__item, sizeof(__pyx_k__item), 0, 0, 1, 1}, - {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, - {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, - {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, - {&__pyx_n_s__size, __pyx_k__size, sizeof(__pyx_k__size), 0, 0, 1, 1}, - {&__pyx_n_s__squeeze, __pyx_k__squeeze, sizeof(__pyx_k__squeeze), 0, 0, 1, 1}, - {&__pyx_n_s__str, __pyx_k__str, sizeof(__pyx_k__str), 0, 0, 1, 1}, - {&__pyx_n_s__view, __pyx_k__view, sizeof(__pyx_k__view), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - /* "scipy/io/matlab/mio_utils.pyx":56 - * # dimension to zero to signal emptiness - * if ndim == 2: - * out_shape = (0,) # <<<<<<<<<<<<<< - * else: - * out_shape = in_arr.shape[:-2] + (0,) - */ - __pyx_k_tuple_1 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_1)); - __Pyx_INCREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_k_tuple_1, 0, __pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_1)); - - /* "scipy/io/matlab/mio_utils.pyx":58 - * out_shape = (0,) - * else: - * out_shape = in_arr.shape[:-2] + (0,) # <<<<<<<<<<<<<< - * else: # make new dtype string with N appended - * new_dt_str = arr.dtype.str[:-1] + str(last_dim) - */ - __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_2)); - __Pyx_INCREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, __pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); - PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); - PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_8)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); - PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_11)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_10)); - PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_10)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_10)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_12)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); - PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_14)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); - PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initmio_utils(void); /*proto*/ -PyMODINIT_FUNC initmio_utils(void) -#else -PyMODINIT_FUNC PyInit_mio_utils(void); /*proto*/ -PyMODINIT_FUNC PyInit_mio_utils(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_mio_utils(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("mio_utils"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_15), 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - /*--- Initialize various global constants etc. ---*/ - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__io__matlab__mio_utils) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - /*--- Builtin init code ---*/ - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Constants init code ---*/ - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Global init code ---*/ - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - /*--- Type init code ---*/ - /*--- Type import code ---*/ - __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Variable import code ---*/ - /*--- Function import code ---*/ - /*--- Execution code ---*/ - - /* "scipy/io/matlab/mio_utils.pyx":5 - * ''' - * - * import numpy as np # <<<<<<<<<<<<<< - * cimport numpy as cnp - * - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/mio_utils.pyx":1 - * # -*- python -*- like file # <<<<<<<<<<<<<< - * ''' Utilities for generic processing of return arrays from read - * ''' - */ - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - - /* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.io.matlab.mio_utils", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.io.matlab.mio_utils"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - -/* Runtime support code */ - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact) -{ - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (none_allowed && obj == Py_None) return 1; - else if (exact) { - if (Py_TYPE(obj) == type) return 1; - } - else { - if (PyObject_TypeCheck(obj, type)) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(PyObject_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - /* cause is unused */ - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - /* First, check the traceback argument, replacing None with NULL. */ - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - /* Next, replace a missing value with None */ - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - /* Raising an instance. The value should be a dummy. */ - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - /* Normalize to raise , */ - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else /* Python 3+ */ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", - index, (index == 1) ? "" : "s"); -} - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); -} - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { - const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(Py_intptr_t) == sizeof(char)) || - (sizeof(Py_intptr_t) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(Py_intptr_t) == sizeof(int)) || - (sizeof(Py_intptr_t) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), - little, !is_unsigned); - } -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(a, a); - case 3: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, a); - case 4: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_absf(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(a, a); - case 3: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, a); - case 4: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_abs(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename) { - PyObject *old_exc, *old_val, *old_tb; - PyObject *ctx; - __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); - #if PY_MAJOR_VERSION < 3 - ctx = PyString_FromString(name); - #else - ctx = PyUnicode_FromString(name); - #endif - __Pyx_ErrRestore(old_exc, old_val, old_tb); - if (!ctx) { - PyErr_WriteUnraisable(Py_None); - } else { - PyErr_WriteUnraisable(ctx); - Py_DECREF(ctx); - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, - size_t size, int strict) -{ - PyObject *py_module = 0; - PyObject *result = 0; - PyObject *py_name = 0; - char warning[200]; - - py_module = __Pyx_ImportModule(module_name); - if (!py_module) - goto bad; - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(class_name); - #else - py_name = PyUnicode_FromString(class_name); - #endif - if (!py_name) - goto bad; - result = PyObject_GetAttr(py_module, py_name); - Py_DECREF(py_name); - py_name = 0; - Py_DECREF(py_module); - py_module = 0; - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility", - module_name, class_name); - #if PY_VERSION_HEX < 0x02050000 - if (PyErr_Warn(NULL, warning) < 0) goto bad; - #else - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - #endif - } - else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { - PyErr_Format(PyExc_ValueError, - "%s.%s has the wrong size, try recompiling", - module_name, class_name); - goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(py_module); - Py_XDECREF(result); - return NULL; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(name); - #else - py_name = PyUnicode_FromString(name); - #endif - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - #if PY_MAJOR_VERSION >= 3 - 0, /*int kwonlyargcount,*/ - #endif - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else /* Python 3+ has unicode identifiers */ - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -/* Type Conversion Functions */ - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif /* Py_PYTHON_H */ diff --git a/scipy-0.10.1/scipy/io/matlab/miobase.py b/scipy-0.10.1/scipy/io/matlab/miobase.py deleted file mode 100644 index 6f6071344a..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/miobase.py +++ /dev/null @@ -1,393 +0,0 @@ -# Authors: Travis Oliphant, Matthew Brett - -""" -Base classes for matlab (TM) file stream reading -""" -import sys -import numpy as np -from numpy.compat import asbytes - -if sys.version_info[0] >= 3: - byteord = int -else: - byteord = ord - -from scipy.misc import doccer - -import byteordercodes as boc - -class MatReadError(Exception): pass - -class MatWriteError(Exception): pass - -class MatReadWarning(UserWarning): pass - -doc_dict = \ - {'file_arg': - '''file_name : str - Name of the mat file (do not need .mat extension if - appendmat==True) Can also pass open file-like object.''', - 'append_arg': - '''appendmat : bool, optional - True to append the .mat extension to the end of the given - filename, if not already present.''', - 'load_args': - '''byte_order : str or None, optional - None by default, implying byte order guessed from mat - file. Otherwise can be one of ('native', '=', 'little', '<', - 'BIG', '>'). -mat_dtype : bool, optional - If True, return arrays in same dtype as would be loaded into - MATLAB (instead of the dtype with which they are saved). -squeeze_me : bool, optional - Whether to squeeze unit matrix dimensions or not. -chars_as_strings : bool, optional - Whether to convert char arrays to string arrays. -matlab_compatible : bool, optional - Returns matrices as would be loaded by MATLAB (implies - squeeze_me=False, chars_as_strings=False, mat_dtype=True, - struct_as_record=True).''', - 'struct_arg': - '''struct_as_record : bool, optional - Whether to load MATLAB structs as numpy record arrays, or as - old-style numpy arrays with dtype=object. Setting this flag to - False replicates the behaviour of scipy version 0.7.x (returning - numpy object arrays). The default setting is True, because it - allows easier round-trip load and save of MATLAB files.''', - 'matstream_arg': - '''mat_stream : file-like - Object with file API, open for reading.''', - 'long_fields': - '''long_field_names : bool, optional - * False - maximum field name length in a structure is 31 characters - which is the documented maximum length. This is the default. - * True - maximum field name length in a structure is 63 characters - which works for Matlab 7.6''', - 'do_compression': - '''do_compression : bool, optional - Whether to compress matrices on write. Default is False.''', - 'oned_as': - '''oned_as : {'column', 'row'}, optional - If 'column', write 1-D numpy arrays as column vectors. - If 'row', write 1D numpy arrays as row vectors.''', - 'unicode_strings': - '''unicode_strings : bool, optional - If True, write strings as Unicode, else matlab usual encoding.'''} - -docfiller = doccer.filldoc(doc_dict) - -''' - - Note on architecture -====================== - -There are three sets of parameters relevant for reading files. The -first are *file read parameters* - containing options that are common -for reading the whole file, and therefore every variable within that -file. At the moment these are: - -* mat_stream -* dtypes (derived from byte code) -* byte_order -* chars_as_strings -* squeeze_me -* struct_as_record (matlab 5 files) -* class_dtypes (derived from order code, matlab 5 files) -* codecs (matlab 5 files) -* uint16_codec (matlab 5 files) - -Another set of parameters are those that apply only the the current -variable being read - the header**: - -* header related variables (different for v4 and v5 mat files) -* is_complex -* mclass -* var_stream - -With the header, we need ``next_position`` to tell us where the next -variable in the stream is. - -Then, there can be, for each element in a matrix, *element read -parameters*. An element is, for example, one element in a Matlab cell -array. At the moment these are: - -* mat_dtype - -The file-reading object contains the *file read parameters*. The -*header* is passed around as a data object, or may be read and discarded -in a single function. The *element read parameters* - the mat_dtype in -this instance, is passed into a general post-processing function - see -``mio_utils`` for details. -''' - - -def convert_dtypes(dtype_template, order_code): - ''' Convert dtypes in mapping to given order - - Parameters - ---------- - dtype_template : mapping - mapping with values returning numpy dtype from ``np.dtype(val)`` - order_code : str - an order code suitable for using in ``dtype.newbyteorder()`` - - Returns - ------- - dtypes : mapping - mapping where values have been replaced by - ``np.dtype(val).newbyteorder(order_code)`` - - ''' - dtypes = dtype_template.copy() - for k in dtypes: - dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code) - return dtypes - - -def read_dtype(mat_stream, a_dtype): - """ - Generic get of byte stream data of known type - - Parameters - ---------- - mat_stream : file-like object - Matlam (TM) stream - a_dtype : dtype - dtype of array to read. `a_dtype` is assumed to be correct - endianness - - Returns - ------- - arr : array - Array of given datatype obtained from stream. - - """ - num_bytes = a_dtype.itemsize - arr = np.ndarray(shape=(), - dtype=a_dtype, - buffer=mat_stream.read(num_bytes), - order='F') - return arr - - -def get_matfile_version(fileobj): - ''' Return major, minor tuple depending on apparent mat file type - - Where: - - #. 0,x -> version 4 format mat files - #. 1,x -> version 5 format mat files - #. 2,x -> version 7.3 format mat files (HDF format) - - Parameters - ---------- - fileobj : {file-like} - object implementing seek() and read() - - Returns - ------- - major_version : {0, 1, 2} - major matlab file format version - minor_version : int - major matlab file format version - - Notes - ----- - Has the side effect of setting the file read pointer to 0 - ''' - # Mat4 files have a zero somewhere in first 4 bytes - fileobj.seek(0) - mopt_bytes = np.ndarray(shape=(4,), - dtype=np.uint8, - buffer = fileobj.read(4)) - if 0 in mopt_bytes: - fileobj.seek(0) - return (0,0) - - # For 5 format or 7.3 format we need to read an integer in the - # header. Bytes 124 through 128 contain a version integer and an - # endian test string - fileobj.seek(124) - tst_str = fileobj.read(4) - fileobj.seek(0) - maj_ind = int(tst_str[2] == asbytes('I')[0]) - maj_val = byteord(tst_str[maj_ind]) - min_val = byteord(tst_str[1-maj_ind]) - ret = (maj_val, min_val) - if maj_val in (1, 2): - return ret - else: - raise ValueError('Unknown mat file type, version %s, %s' - % ret) - - -def matdims(arr, oned_as='column'): - """ - Determine equivalent matlab dimensions for given array - - Parameters - ---------- - arr : ndarray - Input array. - oned_as : {'column', 'row'}, optional - Whether 1-D arrays are returned as Matlab row or column matrices. - Default is 'column'. - - Returns - ------- - dims : tuple - Shape tuple, in the form Matlab expects it. - - Notes - ----- - We had to decide what shape a 1 dimensional array would be by - default. ``np.atleast_2d`` thinks it is a row vector. The - default for a vector in matlab (e.g. ``>> 1:12``) is a row vector. - - Versions of scipy up to and including 0.7 resulted (accidentally) - in 1-D arrays being read as column vectors. For the moment, we - maintain the same tradition here. - - Examples - -------- - >>> matdims(np.array(1)) # numpy scalar - (1, 1) - >>> matdims(np.array([1])) # 1d array, 1 element - (1, 1) - >>> matdims(np.array([1,2])) # 1d array, 2 elements - (2, 1) - >>> matdims(np.array([[2],[3]])) # 2d array, column vector - (2, 1) - >>> matdims(np.array([[2,3]])) # 2d array, row vector - (1, 2) - >>> matdims(np.array([[[2,3]]])) # 3d array, rowish vector - (1, 1, 2) - >>> matdims(np.array([])) # empty 1d array - (0, 0) - >>> matdims(np.array([[]])) # empty 2d - (0, 0) - >>> matdims(np.array([[[]]])) # empty 3d - (0, 0, 0) - - Optional argument flips 1-D shape behavior. - - >>> matdims(np.array([1,2]), 'row') # 1d array, 2 elements - (1, 2) - - The argument has to make sense though - - >>> matdims(np.array([1,2]), 'bizarre') - Traceback (most recent call last): - ... - ValueError: 1D option "bizarre" is strange - - """ - if arr.size == 0: # empty - return (0,) * np.max([arr.ndim, 2]) - shape = arr.shape - if shape == (): # scalar - return (1,1) - if len(shape) == 1: # 1D - if oned_as == 'column': - return shape + (1,) - elif oned_as == 'row': - return (1,) + shape - else: - raise ValueError('1D option "%s" is strange' - % oned_as) - return shape - - -class MatVarReader(object): - ''' Abstract class defining required interface for var readers''' - def __init__(self, file_reader): - pass - - def read_header(self): - ''' Returns header ''' - pass - - def array_from_header(self, header): - ''' Reads array given header ''' - pass - - -class MatFileReader(object): - """ Base object for reading mat files - - To make this class functional, you will need to override the - following methods: - - matrix_getter_factory - gives object to fetch next matrix from stream - guess_byte_order - guesses file byte order from file - """ - - @docfiller - def __init__(self, mat_stream, - byte_order=None, - mat_dtype=False, - squeeze_me=False, - chars_as_strings=True, - matlab_compatible=False, - struct_as_record=True - ): - ''' - Initializer for mat file reader - - mat_stream : file-like - object with file API, open for reading - %(load_args)s - ''' - # Initialize stream - self.mat_stream = mat_stream - self.dtypes = {} - if not byte_order: - byte_order = self.guess_byte_order() - else: - byte_order = boc.to_numpy_code(byte_order) - self.byte_order = byte_order - self.struct_as_record = struct_as_record - if matlab_compatible: - self.set_matlab_compatible() - else: - self.squeeze_me = squeeze_me - self.chars_as_strings = chars_as_strings - self.mat_dtype = mat_dtype - - def set_matlab_compatible(self): - ''' Sets options to return arrays as matlab (tm) loads them ''' - self.mat_dtype = True - self.squeeze_me = False - self.chars_as_strings = False - - def guess_byte_order(self): - ''' As we do not know what file type we have, assume native ''' - return boc.native_code - - def end_of_stream(self): - b = self.mat_stream.read(1) - curpos = self.mat_stream.tell() - self.mat_stream.seek(curpos-1) - return len(b) == 0 - - -def arr_dtype_number(arr, num): - ''' Return dtype for given number of items per element''' - return np.dtype(arr.dtype.str[:2] + str(num)) - - -def arr_to_chars(arr): - ''' Convert string array to char array ''' - dims = list(arr.shape) - if not dims: - dims = [1] - dims.append(int(arr.dtype.str[2:])) - arr = np.ndarray(shape=dims, - dtype=arr_dtype_number(arr, 1), - buffer=arr) - empties = [arr == ''] - if not np.any(empties): - return arr - arr = arr.copy() - arr[empties] = ' ' - return arr diff --git a/scipy-0.10.1/scipy/io/matlab/numpy_rephrasing.h b/scipy-0.10.1/scipy/io/matlab/numpy_rephrasing.h deleted file mode 100644 index 535f984658..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/numpy_rephrasing.h +++ /dev/null @@ -1,5 +0,0 @@ -#include -#define PyArray_Set_BASE(arr, obj) PyArray_BASE(arr) = obj -#define PyArray_PyANewFromDescr(descr, nd, dims, data, parent) \ - PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ - NULL, data, 0, parent) diff --git a/scipy-0.10.1/scipy/io/matlab/py3k.h b/scipy-0.10.1/scipy/io/matlab/py3k.h deleted file mode 100644 index 3daf6ecb3c..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/py3k.h +++ /dev/null @@ -1,104 +0,0 @@ -#include -#include - -#if PY_VERSION_HEX < 0x03000000 - -#include "cStringIO.h" - -#define npy_PyFile_Dup(file, mode) PyFile_AsFile(file) -#define npy_PyFile_DupClose(file, handle) (0) -#define npy_PyFile_Check PyFile_Check - -#else - -/* - * No-op implementation -- always fall back to the generic one. - */ - -static struct PycStringIO_CAPI { - int(*cread)(PyObject *, char **, Py_ssize_t); - int(*creadline)(PyObject *, char **); - int(*cwrite)(PyObject *, const char *, Py_ssize_t); - PyObject *(*cgetvalue)(PyObject *); - PyObject *(*NewOutput)(int); - PyObject *(*NewInput)(PyObject *); - PyTypeObject *InputType, *OutputType; -} *PycStringIO; - -static void PycString_IMPORT() {} - -#define PycStringIO_InputCheck(O) 0 -#define PycStringIO_OutputCheck(O) 0 - -/* - * PyFile_* compatibility - */ - -/* - * Get a FILE* handle to the file represented by the Python object - */ -static FILE* -npy_PyFile_Dup(PyObject *file, char *mode) -{ - int fd, fd2; - PyObject *ret, *os; - /* Flush first to ensure things end up in the file in the correct order */ - ret = PyObject_CallMethod(file, "flush", ""); - if (ret == NULL) { - return NULL; - } - Py_DECREF(ret); - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - return NULL; - } - os = PyImport_ImportModule("os"); - if (os == NULL) { - return NULL; - } - ret = PyObject_CallMethod(os, "dup", "i", fd); - Py_DECREF(os); - if (ret == NULL) { - return NULL; - } - fd2 = PyNumber_AsSsize_t(ret, NULL); - Py_DECREF(ret); -#ifdef _WIN32 - return _fdopen(fd2, mode); -#else - return fdopen(fd2, mode); -#endif -} - -/* - * Close the dup-ed file handle, and seek the Python one to the current position - */ -static int -npy_PyFile_DupClose(PyObject *file, FILE* handle) -{ - PyObject *ret; - long position; - position = ftell(handle); - fclose(handle); - - ret = PyObject_CallMethod(file, "seek", "li", position, 0); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - -static int -npy_PyFile_Check(PyObject *file) -{ - int fd; - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - PyErr_Clear(); - return 0; - } - return 1; -} - -#endif diff --git a/scipy-0.10.1/scipy/io/matlab/setup.py b/scipy-0.10.1/scipy/io/matlab/setup.py deleted file mode 100755 index d11d5c99ce..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='io',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('matlab', parent_package, top_path) - config.add_extension('streams', sources=['streams.c']) - config.add_extension('mio_utils', sources=['mio_utils.c']) - config.add_extension('mio5_utils', sources=['mio5_utils.c']) - config.add_data_dir('tests') - config.add_data_dir('benchmarks') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/io/matlab/setupscons.py b/scipy-0.10.1/scipy/io/matlab/setupscons.py deleted file mode 100755 index c95ccf929c..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/setupscons.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='io',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('matlab', parent_package, top_path) - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - config.add_data_dir('benchmarks') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/io/matlab/streams.c b/scipy-0.10.1/scipy/io/matlab/streams.c deleted file mode 100644 index 6a71e70e8d..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/streams.c +++ /dev/null @@ -1,5105 +0,0 @@ -/* Generated by Cython 0.15 on Tue Nov 1 18:18:59 2011 */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#else - -#include /* For offsetof */ -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__io__matlab__streams -#define __PYX_HAVE_API__scipy__io__matlab__streams -#include "stdio.h" -#include "pythread.h" -#include "stdlib.h" -#include "py3k.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - -/* inline attribute */ -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -/* unused attribute */ -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ - - -/* Type Conversion Predeclarations */ - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - /* Test for GCC > 2.95 */ - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else /* __GNUC__ > 2 ... */ - #define likely(x) (x) - #define unlikely(x) (x) - #endif /* __GNUC__ > 2 ... */ -#else /* __GNUC__ */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -static const char *__pyx_f[] = { - "streams.pyx", - "pyalloc.pxd", - "bool.pxd", - "complex.pxd", -}; - -/*--- Type declarations ---*/ -struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream; -struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream; -struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream; -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek; -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string; -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13cStringStream_seek; -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13cStringStream_read_string; -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_10FileStream_seek; -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_10FileStream_read_string; - -/* "scipy/io/matlab/streams.pxd":6 - * cdef object fobj - * - * cpdef int seek(self, long int offset, int whence=*) except -1 # <<<<<<<<<<<<<< - * cpdef long int tell(self) except -1 - * cdef int read_into(self, void *buf, size_t n) except -1 - */ -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek { - int __pyx_n; - int whence; -}; - -/* "scipy/io/matlab/streams.pxd":9 - * cpdef long int tell(self) except -1 - * cdef int read_into(self, void *buf, size_t n) except -1 - * cdef object read_string(self, size_t n, void **pp, int copy=*) # <<<<<<<<<<<<<< - * - * cpdef GenericStream make_stream(object fobj) - */ -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string { - int __pyx_n; - int copy; -}; - -/* "scipy/io/matlab/streams.pyx":89 - * cdef class cStringStream(GenericStream): - * - * cpdef int seek(self, long int offset, int whence=0) except -1: # <<<<<<<<<<<<<< - * cdef char *ptr - * if whence == 1 and offset >=0: # forward, from here - */ -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13cStringStream_seek { - int __pyx_n; - int whence; -}; - -/* "scipy/io/matlab/streams.pyx":109 - * return 0 - * - * cdef object read_string(self, size_t n, void **pp, int copy=True): # <<<<<<<<<<<<<< - * """ Make new memory, wrap with object - * - */ -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13cStringStream_read_string { - int __pyx_n; - int copy; -}; - -/* "scipy/io/matlab/streams.pyx":135 - * npy_PyFile_DupClose(self.fobj, self.file) - * - * cpdef int seek(self, long int offset, int whence=0) except -1: # <<<<<<<<<<<<<< - * cdef int ret - * """ move `offset` bytes in stream - */ -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_10FileStream_seek { - int __pyx_n; - int whence; -}; - -/* "scipy/io/matlab/streams.pyx":176 - * return 0 - * - * cdef object read_string(self, size_t n, void **pp, int copy=True): # <<<<<<<<<<<<<< - * """ Make new memory, wrap with object """ - * cdef object obj = pyalloc_v(n, pp) - */ -struct __pyx_opt_args_5scipy_2io_6matlab_7streams_10FileStream_read_string { - int __pyx_n; - int copy; -}; - -/* "scipy/io/matlab/streams.pxd":3 - * # -*- python -*- or rather like - * - * cdef class GenericStream: # <<<<<<<<<<<<<< - * cdef object fobj - * - */ -struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream { - PyObject_HEAD - struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *__pyx_vtab; - PyObject *fobj; -}; - - -/* "scipy/io/matlab/streams.pyx":87 - * - * - * cdef class cStringStream(GenericStream): # <<<<<<<<<<<<<< - * - * cpdef int seek(self, long int offset, int whence=0) except -1: - */ -struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream { - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream __pyx_base; -}; - - -/* "scipy/io/matlab/streams.pyx":125 - * - * - * cdef class FileStream(GenericStream): # <<<<<<<<<<<<<< - * cdef FILE* file - * - */ -struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream { - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream __pyx_base; - FILE *file; -}; - - - -/* "scipy/io/matlab/streams.pyx":47 - * - * - * cdef class GenericStream: # <<<<<<<<<<<<<< - * - * def __init__(self, fobj): - */ - -struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream { - int (*seek)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, long, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek *__pyx_optional_args); - long (*tell)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, int __pyx_skip_dispatch); - int (*read_into)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, void *, size_t); - PyObject *(*read_string)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, size_t, void **, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string *__pyx_optional_args); -}; -static struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *__pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream; - - -/* "scipy/io/matlab/streams.pyx":125 - * - * - * cdef class FileStream(GenericStream): # <<<<<<<<<<<<<< - * cdef FILE* file - * - */ - -struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_FileStream { - struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream __pyx_base; -}; -static struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_FileStream *__pyx_vtabptr_5scipy_2io_6matlab_7streams_FileStream; - - -/* "scipy/io/matlab/streams.pyx":87 - * - * - * cdef class cStringStream(GenericStream): # <<<<<<<<<<<<<< - * - * cpdef int seek(self, long int offset, int whence=0) except -1: - */ - -struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_cStringStream { - struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream __pyx_base; -}; -static struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_cStringStream *__pyx_vtabptr_5scipy_2io_6matlab_7streams_cStringStream; - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); /*proto*/ - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact); /*proto*/ - - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} - - -#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_List_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Tuple_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - - -#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { - PyObject *r; - if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) { - r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) { - r = PySequence_GetItem(o, i); - } - else { - r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); - } - return r; -} - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static int __Pyx_check_binary_version(void); - -static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); /*proto*/ - -static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/ - -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ - -static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -/* Module declarations from 'cpython.version' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'cpython.exc' */ - -/* Module declarations from 'cpython.module' */ - -/* Module declarations from 'cpython.mem' */ - -/* Module declarations from 'cpython.tuple' */ - -/* Module declarations from 'cpython.list' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'cpython.sequence' */ - -/* Module declarations from 'cpython.mapping' */ - -/* Module declarations from 'cpython.iterator' */ - -/* Module declarations from 'cpython.type' */ - -/* Module declarations from 'cpython.number' */ - -/* Module declarations from 'cpython.int' */ - -/* Module declarations from '__builtin__' */ - -/* Module declarations from 'cpython.bool' */ -static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; - -/* Module declarations from 'cpython.long' */ - -/* Module declarations from 'cpython.float' */ - -/* Module declarations from '__builtin__' */ - -/* Module declarations from 'cpython.complex' */ -static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; - -/* Module declarations from 'cpython.string' */ - -/* Module declarations from 'cpython.unicode' */ - -/* Module declarations from 'cpython.dict' */ - -/* Module declarations from 'cpython.instance' */ - -/* Module declarations from 'cpython.function' */ - -/* Module declarations from 'cpython.method' */ - -/* Module declarations from 'cpython.weakref' */ - -/* Module declarations from 'cpython.getargs' */ - -/* Module declarations from 'cpython.pythread' */ - -/* Module declarations from 'cpython.pystate' */ - -/* Module declarations from 'cpython.cobject' */ - -/* Module declarations from 'cpython.oldbuffer' */ - -/* Module declarations from 'cpython.set' */ - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'cpython.bytes' */ - -/* Module declarations from 'cpython.pycapsule' */ - -/* Module declarations from 'cpython' */ - -/* Module declarations from 'scipy.io.matlab.pyalloc' */ -static CYTHON_INLINE PyObject *__pyx_f_5scipy_2io_6matlab_7pyalloc_pyalloc_v(Py_ssize_t, void **); /*proto*/ - -/* Module declarations from 'scipy.io.matlab.streams' */ -static PyTypeObject *__pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream = 0; -static PyTypeObject *__pyx_ptype_5scipy_2io_6matlab_7streams_cStringStream = 0; -static PyTypeObject *__pyx_ptype_5scipy_2io_6matlab_7streams_FileStream = 0; -static struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_f_5scipy_2io_6matlab_7streams_make_stream(PyObject *, int __pyx_skip_dispatch); /*proto*/ -#define __Pyx_MODULE_NAME "scipy.io.matlab.streams" -int __pyx_module_is_main_scipy__io__matlab__streams = 0; - -/* Implementation of 'scipy.io.matlab.streams' */ -static PyObject *__pyx_builtin_IOError; -static char __pyx_k_1[] = "could not read bytes"; -static char __pyx_k_6[] = "Failed seek"; -static char __pyx_k_8[] = "Could not read bytes"; -static char __pyx_k_11[] = " "; -static char __pyx_k_12[] = "scipy.io.matlab.streams"; -static char __pyx_k__A[] = "A"; -static char __pyx_k__n[] = "n"; -static char __pyx_k__rb[] = "rb"; -static char __pyx_k__st[] = "st"; -static char __pyx_k__sys[] = "sys"; -static char __pyx_k__fobj[] = "fobj"; -static char __pyx_k__read[] = "read"; -static char __pyx_k__seek[] = "seek"; -static char __pyx_k__tell[] = "tell"; -static char __pyx_k__offset[] = "offset"; -static char __pyx_k__whence[] = "whence"; -static char __pyx_k__IOError[] = "IOError"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k___read_into[] = "_read_into"; -static char __pyx_k___read_string[] = "_read_string"; -static char __pyx_k__version_info[] = "version_info"; -static PyObject *__pyx_kp_s_1; -static PyObject *__pyx_kp_b_11; -static PyObject *__pyx_n_s_12; -static PyObject *__pyx_kp_s_6; -static PyObject *__pyx_kp_s_8; -static PyObject *__pyx_n_b__A; -static PyObject *__pyx_n_s__IOError; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s___read_into; -static PyObject *__pyx_n_s___read_string; -static PyObject *__pyx_n_s__fobj; -static PyObject *__pyx_n_s__n; -static PyObject *__pyx_n_s__offset; -static PyObject *__pyx_n_s__read; -static PyObject *__pyx_n_s__seek; -static PyObject *__pyx_n_s__st; -static PyObject *__pyx_n_s__sys; -static PyObject *__pyx_n_s__tell; -static PyObject *__pyx_n_s__version_info; -static PyObject *__pyx_n_s__whence; -static PyObject *__pyx_int_3; -static PyObject *__pyx_k_tuple_2; -static PyObject *__pyx_k_tuple_3; -static PyObject *__pyx_k_tuple_4; -static PyObject *__pyx_k_tuple_5; -static PyObject *__pyx_k_tuple_7; -static PyObject *__pyx_k_tuple_9; -static PyObject *__pyx_k_tuple_10; - -/* "scipy/io/matlab/streams.pyx":49 - * cdef class GenericStream: - * - * def __init__(self, fobj): # <<<<<<<<<<<<<< - * self.fobj = fobj - * - */ - -static int __pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_fobj = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__fobj,0}; - __Pyx_RefNannySetupContext("__init__"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[1] = {0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fobj); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_fobj = values[0]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_fobj = PyTuple_GET_ITEM(__pyx_args, 0); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/io/matlab/streams.pyx":50 - * - * def __init__(self, fobj): - * self.fobj = fobj # <<<<<<<<<<<<<< - * - * cpdef int seek(self, long int offset, int whence=0) except -1: - */ - __Pyx_INCREF(__pyx_v_fobj); - __Pyx_GIVEREF(__pyx_v_fobj); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self)->fobj); - __Pyx_DECREF(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self)->fobj); - ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self)->fobj = __pyx_v_fobj; - - __pyx_r = 0; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":52 - * self.fobj = fobj - * - * cpdef int seek(self, long int offset, int whence=0) except -1: # <<<<<<<<<<<<<< - * self.fobj.seek(offset, whence) - * return 0 - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_1seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_f_5scipy_2io_6matlab_7streams_13GenericStream_seek(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_v_self, long __pyx_v_offset, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek *__pyx_optional_args) { - int __pyx_v_whence = ((int)0); - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("seek"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_whence = __pyx_optional_args->whence; - } - } - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__seek); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_1seek)) { - __pyx_t_2 = PyInt_FromLong(__pyx_v_offset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromLong(__pyx_v_whence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_5; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/streams.pyx":53 - * - * cpdef int seek(self, long int offset, int whence=0) except -1: - * self.fobj.seek(offset, whence) # <<<<<<<<<<<<<< - * return 0 - * - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->fobj, __pyx_n_s__seek); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyInt_FromLong(__pyx_v_offset); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyInt_FromLong(__pyx_v_whence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "scipy/io/matlab/streams.pyx":54 - * cpdef int seek(self, long int offset, int whence=0) except -1: - * self.fobj.seek(offset, whence) - * return 0 # <<<<<<<<<<<<<< - * - * cpdef long int tell(self) except -1: - */ - __pyx_r = 0; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":52 - * self.fobj = fobj - * - * cpdef int seek(self, long int offset, int whence=0) except -1: # <<<<<<<<<<<<<< - * self.fobj.seek(offset, whence) - * return 0 - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_1seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_1seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - long __pyx_v_offset; - int __pyx_v_whence; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__offset,&__pyx_n_s__whence,0}; - __Pyx_RefNannySetupContext("seek"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__offset); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__whence); - if (value) { values[1] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "seek") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_offset = __Pyx_PyInt_AsLong(values[0]); if (unlikely((__pyx_v_offset == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - if (values[1]) { - __pyx_v_whence = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_whence == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_whence = ((int)0); - } - } else { - __pyx_v_whence = ((int)0); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: __pyx_v_whence = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_whence == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 1: __pyx_v_offset = __Pyx_PyInt_AsLong(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_offset == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("seek", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_XDECREF(__pyx_r); - __pyx_t_2.__pyx_n = 1; - __pyx_t_2.whence = __pyx_v_whence; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self)->__pyx_vtab)->seek(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self), __pyx_v_offset, 1, &__pyx_t_2); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":56 - * return 0 - * - * cpdef long int tell(self) except -1: # <<<<<<<<<<<<<< - * return self.fobj.tell() - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_2tell(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static long __pyx_f_5scipy_2io_6matlab_7streams_13GenericStream_tell(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_v_self, int __pyx_skip_dispatch) { - long __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - long __pyx_t_3; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("tell"); - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__tell); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_2tell)) { - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_AsLong(__pyx_t_2); if (unlikely((__pyx_t_3 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/streams.pyx":57 - * - * cpdef long int tell(self) except -1: - * return self.fobj.tell() # <<<<<<<<<<<<<< - * - * def read(self, n_bytes): - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->fobj, __pyx_n_s__tell); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_PyInt_AsLong(__pyx_t_2); if (unlikely((__pyx_t_3 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.tell", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":56 - * return 0 - * - * cpdef long int tell(self) except -1: # <<<<<<<<<<<<<< - * return self.fobj.tell() - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_2tell(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_2tell(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - long __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("tell"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self)->__pyx_vtab)->tell(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self), 1); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.tell", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":59 - * return self.fobj.tell() - * - * def read(self, n_bytes): # <<<<<<<<<<<<<< - * return self.fobj.read(n_bytes) - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_3read(PyObject *__pyx_v_self, PyObject *__pyx_v_n_bytes); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_3read(PyObject *__pyx_v_self, PyObject *__pyx_v_n_bytes) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read"); - - /* "scipy/io/matlab/streams.pyx":60 - * - * def read(self, n_bytes): - * return self.fobj.read(n_bytes) # <<<<<<<<<<<<<< - * - * cdef int read_into(self, void *buf, size_t n) except -1: - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_GetAttr(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self)->fobj, __pyx_n_s__read); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_n_bytes); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n_bytes); - __Pyx_GIVEREF(__pyx_v_n_bytes); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.read", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":62 - * return self.fobj.read(n_bytes) - * - * cdef int read_into(self, void *buf, size_t n) except -1: # <<<<<<<<<<<<<< - * """ Read n bytes from stream into pre-allocated buffer `buf` - * """ - */ - -static int __pyx_f_5scipy_2io_6matlab_7streams_13GenericStream_read_into(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_v_self, void *__pyx_v_buf, size_t __pyx_v_n) { - char *__pyx_v_d_ptr; - PyObject *__pyx_v_data = NULL; - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t __pyx_t_4; - int __pyx_t_5; - char *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_into"); - - /* "scipy/io/matlab/streams.pyx":66 - * """ - * cdef char* d_ptr - * data = self.fobj.read(n) # <<<<<<<<<<<<<< - * if PyBytes_Size(data) != n: - * raise IOError('could not read bytes') - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->fobj, __pyx_n_s__read); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyInt_FromSize_t(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_data = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/streams.pyx":67 - * cdef char* d_ptr - * data = self.fobj.read(n) - * if PyBytes_Size(data) != n: # <<<<<<<<<<<<<< - * raise IOError('could not read bytes') - * return -1 - */ - __pyx_t_4 = PyBytes_Size(__pyx_v_data); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = (__pyx_t_4 != __pyx_v_n); - if (__pyx_t_5) { - - /* "scipy/io/matlab/streams.pyx":68 - * data = self.fobj.read(n) - * if PyBytes_Size(data) != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * return -1 - * d_ptr = data - */ - __pyx_t_2 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/streams.pyx":70 - * raise IOError('could not read bytes') - * return -1 - * d_ptr = data # <<<<<<<<<<<<<< - * memcpy(buf, d_ptr, n) - * return 0 - */ - __pyx_t_6 = PyBytes_AsString(__pyx_v_data); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_d_ptr = __pyx_t_6; - - /* "scipy/io/matlab/streams.pyx":71 - * return -1 - * d_ptr = data - * memcpy(buf, d_ptr, n) # <<<<<<<<<<<<<< - * return 0 - * - */ - memcpy(__pyx_v_buf, __pyx_v_d_ptr, __pyx_v_n); - - /* "scipy/io/matlab/streams.pyx":72 - * d_ptr = data - * memcpy(buf, d_ptr, n) - * return 0 # <<<<<<<<<<<<<< - * - * cdef object read_string(self, size_t n, void **pp, int copy=True): - */ - __pyx_r = 0; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.read_into", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_data); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":74 - * return 0 - * - * cdef object read_string(self, size_t n, void **pp, int copy=True): # <<<<<<<<<<<<<< - * """ Make new memory, wrap with object """ - * data = self.fobj.read(n) - */ - -static PyObject *__pyx_f_5scipy_2io_6matlab_7streams_13GenericStream_read_string(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_v_self, size_t __pyx_v_n, void **__pyx_v_pp, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string *__pyx_optional_args) { - int __pyx_v_copy = ((int)1); - PyObject *__pyx_v_data = NULL; - PyObject *__pyx_v_d_copy = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t __pyx_t_4; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_string"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_copy = __pyx_optional_args->copy; - } - } - - /* "scipy/io/matlab/streams.pyx":76 - * cdef object read_string(self, size_t n, void **pp, int copy=True): - * """ Make new memory, wrap with object """ - * data = self.fobj.read(n) # <<<<<<<<<<<<<< - * if PyBytes_Size(data) != n: - * raise IOError('could not read bytes') - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->fobj, __pyx_n_s__read); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyInt_FromSize_t(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_data = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/streams.pyx":77 - * """ Make new memory, wrap with object """ - * data = self.fobj.read(n) - * if PyBytes_Size(data) != n: # <<<<<<<<<<<<<< - * raise IOError('could not read bytes') - * if copy != True: - */ - __pyx_t_4 = PyBytes_Size(__pyx_v_data); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = (__pyx_t_4 != __pyx_v_n); - if (__pyx_t_5) { - - /* "scipy/io/matlab/streams.pyx":78 - * data = self.fobj.read(n) - * if PyBytes_Size(data) != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * if copy != True: - * pp[0] = PyBytes_AS_STRING(data) - */ - __pyx_t_2 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_k_tuple_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/streams.pyx":79 - * if PyBytes_Size(data) != n: - * raise IOError('could not read bytes') - * if copy != True: # <<<<<<<<<<<<<< - * pp[0] = PyBytes_AS_STRING(data) - * return data - */ - __pyx_t_5 = (__pyx_v_copy != 1); - if (__pyx_t_5) { - - /* "scipy/io/matlab/streams.pyx":80 - * raise IOError('could not read bytes') - * if copy != True: - * pp[0] = PyBytes_AS_STRING(data) # <<<<<<<<<<<<<< - * return data - * cdef object d_copy = pyalloc_v(n, pp) - */ - (__pyx_v_pp[0]) = ((void *)PyBytes_AS_STRING(__pyx_v_data)); - - /* "scipy/io/matlab/streams.pyx":81 - * if copy != True: - * pp[0] = PyBytes_AS_STRING(data) - * return data # <<<<<<<<<<<<<< - * cdef object d_copy = pyalloc_v(n, pp) - * memcpy(pp[0], PyBytes_AS_STRING(data), n) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_data); - __pyx_r = __pyx_v_data; - goto __pyx_L0; - goto __pyx_L4; - } - __pyx_L4:; - - /* "scipy/io/matlab/streams.pyx":82 - * pp[0] = PyBytes_AS_STRING(data) - * return data - * cdef object d_copy = pyalloc_v(n, pp) # <<<<<<<<<<<<<< - * memcpy(pp[0], PyBytes_AS_STRING(data), n) - * return d_copy - */ - __pyx_t_2 = __pyx_f_5scipy_2io_6matlab_7pyalloc_pyalloc_v(__pyx_v_n, __pyx_v_pp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_d_copy = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/streams.pyx":83 - * return data - * cdef object d_copy = pyalloc_v(n, pp) - * memcpy(pp[0], PyBytes_AS_STRING(data), n) # <<<<<<<<<<<<<< - * return d_copy - * - */ - memcpy((__pyx_v_pp[0]), PyBytes_AS_STRING(__pyx_v_data), __pyx_v_n); - - /* "scipy/io/matlab/streams.pyx":84 - * cdef object d_copy = pyalloc_v(n, pp) - * memcpy(pp[0], PyBytes_AS_STRING(data), n) - * return d_copy # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_d_copy); - __pyx_r = __pyx_v_d_copy; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.streams.GenericStream.read_string", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_data); - __Pyx_XDECREF(__pyx_v_d_copy); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":89 - * cdef class cStringStream(GenericStream): - * - * cpdef int seek(self, long int offset, int whence=0) except -1: # <<<<<<<<<<<<<< - * cdef char *ptr - * if whence == 1 and offset >=0: # forward, from here - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13cStringStream_seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_f_5scipy_2io_6matlab_7streams_13cStringStream_seek(struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream *__pyx_v_self, long __pyx_v_offset, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13cStringStream_seek *__pyx_optional_args) { - int __pyx_v_whence = ((int)0); - char *__pyx_v_ptr; - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("seek"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_whence = __pyx_optional_args->whence; - } - } - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__seek); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_7streams_13cStringStream_seek)) { - __pyx_t_2 = PyInt_FromLong(__pyx_v_offset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromLong(__pyx_v_whence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_5; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/streams.pyx":91 - * cpdef int seek(self, long int offset, int whence=0) except -1: - * cdef char *ptr - * if whence == 1 and offset >=0: # forward, from here # <<<<<<<<<<<<<< - * StringIO_cread(self.fobj, &ptr, offset) - * return 0 - */ - __pyx_t_6 = (__pyx_v_whence == 1); - if (__pyx_t_6) { - __pyx_t_7 = (__pyx_v_offset >= 0); - __pyx_t_8 = __pyx_t_7; - } else { - __pyx_t_8 = __pyx_t_6; - } - if (__pyx_t_8) { - - /* "scipy/io/matlab/streams.pyx":92 - * cdef char *ptr - * if whence == 1 and offset >=0: # forward, from here - * StringIO_cread(self.fobj, &ptr, offset) # <<<<<<<<<<<<<< - * return 0 - * else: # use python interface - */ - __pyx_t_1 = __pyx_v_self->__pyx_base.fobj; - __Pyx_INCREF(__pyx_t_1); - PycStringIO->cread(__pyx_t_1, (&__pyx_v_ptr), __pyx_v_offset); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":93 - * if whence == 1 and offset >=0: # forward, from here - * StringIO_cread(self.fobj, &ptr, offset) - * return 0 # <<<<<<<<<<<<<< - * else: # use python interface - * return GenericStream.seek(self, offset, whence) - */ - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/io/matlab/streams.pyx":95 - * return 0 - * else: # use python interface - * return GenericStream.seek(self, offset, whence) # <<<<<<<<<<<<<< - * - * cdef int read_into(self, void *buf, size_t n) except -1: - */ - __pyx_t_9.__pyx_n = 1; - __pyx_t_9.whence = __pyx_v_whence; - __pyx_t_5 = __pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream->seek(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self), __pyx_v_offset, 1, &__pyx_t_9); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = __pyx_t_5; - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.io.matlab.streams.cStringStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":89 - * cdef class cStringStream(GenericStream): - * - * cpdef int seek(self, long int offset, int whence=0) except -1: # <<<<<<<<<<<<<< - * cdef char *ptr - * if whence == 1 and offset >=0: # forward, from here - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13cStringStream_seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_13cStringStream_seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - long __pyx_v_offset; - int __pyx_v_whence; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__offset,&__pyx_n_s__whence,0}; - __Pyx_RefNannySetupContext("seek"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__offset); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__whence); - if (value) { values[1] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "seek") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_offset = __Pyx_PyInt_AsLong(values[0]); if (unlikely((__pyx_v_offset == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - if (values[1]) { - __pyx_v_whence = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_whence == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_whence = ((int)0); - } - } else { - __pyx_v_whence = ((int)0); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: __pyx_v_whence = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_whence == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 1: __pyx_v_offset = __Pyx_PyInt_AsLong(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_offset == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("seek", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.streams.cStringStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_XDECREF(__pyx_r); - __pyx_t_2.__pyx_n = 1; - __pyx_t_2.whence = __pyx_v_whence; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_cStringStream *)((struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream *)__pyx_v_self)->__pyx_base.__pyx_vtab)->__pyx_base.seek(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self), __pyx_v_offset, 1, &__pyx_t_2); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.streams.cStringStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":97 - * return GenericStream.seek(self, offset, whence) - * - * cdef int read_into(self, void *buf, size_t n) except -1: # <<<<<<<<<<<<<< - * """ Read n bytes from stream into pre-allocated buffer `buf` - * """ - */ - -static int __pyx_f_5scipy_2io_6matlab_7streams_13cStringStream_read_into(struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream *__pyx_v_self, void *__pyx_v_buf, size_t __pyx_v_n) { - size_t __pyx_v_n_red; - char *__pyx_v_d_ptr; - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_into"); - - /* "scipy/io/matlab/streams.pyx":103 - * size_t n_red - * char* d_ptr - * n_red = StringIO_cread(self.fobj, &d_ptr, n) # <<<<<<<<<<<<<< - * if n_red != n: - * raise IOError('could not read bytes') - */ - __pyx_t_1 = __pyx_v_self->__pyx_base.fobj; - __Pyx_INCREF(__pyx_t_1); - __pyx_v_n_red = PycStringIO->cread(__pyx_t_1, (&__pyx_v_d_ptr), __pyx_v_n); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":104 - * char* d_ptr - * n_red = StringIO_cread(self.fobj, &d_ptr, n) - * if n_red != n: # <<<<<<<<<<<<<< - * raise IOError('could not read bytes') - * memcpy(buf, d_ptr, n) - */ - __pyx_t_2 = (__pyx_v_n_red != __pyx_v_n); - if (__pyx_t_2) { - - /* "scipy/io/matlab/streams.pyx":105 - * n_red = StringIO_cread(self.fobj, &d_ptr, n) - * if n_red != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * memcpy(buf, d_ptr, n) - * return 0 - */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/streams.pyx":106 - * if n_red != n: - * raise IOError('could not read bytes') - * memcpy(buf, d_ptr, n) # <<<<<<<<<<<<<< - * return 0 - * - */ - memcpy(__pyx_v_buf, ((void *)__pyx_v_d_ptr), __pyx_v_n); - - /* "scipy/io/matlab/streams.pyx":107 - * raise IOError('could not read bytes') - * memcpy(buf, d_ptr, n) - * return 0 # <<<<<<<<<<<<<< - * - * cdef object read_string(self, size_t n, void **pp, int copy=True): - */ - __pyx_r = 0; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.streams.cStringStream.read_into", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":109 - * return 0 - * - * cdef object read_string(self, size_t n, void **pp, int copy=True): # <<<<<<<<<<<<<< - * """ Make new memory, wrap with object - * - */ - -static PyObject *__pyx_f_5scipy_2io_6matlab_7streams_13cStringStream_read_string(struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream *__pyx_v_self, size_t __pyx_v_n, void **__pyx_v_pp, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13cStringStream_read_string *__pyx_optional_args) { - int __pyx_v_copy = ((int)1); - char *__pyx_v_d_ptr; - PyObject *__pyx_v_obj = 0; - size_t __pyx_v_n_red; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_string"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_copy = __pyx_optional_args->copy; - } - } - - /* "scipy/io/matlab/streams.pyx":117 - * char *d_ptr - * object obj - * cdef size_t n_red = StringIO_cread(self.fobj, &d_ptr, n) # <<<<<<<<<<<<<< - * if n_red != n: - * raise IOError('could not read bytes') - */ - __pyx_t_1 = __pyx_v_self->__pyx_base.fobj; - __Pyx_INCREF(__pyx_t_1); - __pyx_v_n_red = PycStringIO->cread(__pyx_t_1, (&__pyx_v_d_ptr), __pyx_v_n); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":118 - * object obj - * cdef size_t n_red = StringIO_cread(self.fobj, &d_ptr, n) - * if n_red != n: # <<<<<<<<<<<<<< - * raise IOError('could not read bytes') - * obj = pyalloc_v(n, pp) - */ - __pyx_t_2 = (__pyx_v_n_red != __pyx_v_n); - if (__pyx_t_2) { - - /* "scipy/io/matlab/streams.pyx":119 - * cdef size_t n_red = StringIO_cread(self.fobj, &d_ptr, n) - * if n_red != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * obj = pyalloc_v(n, pp) - * memcpy(pp[0], d_ptr, n) - */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_k_tuple_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/streams.pyx":120 - * if n_red != n: - * raise IOError('could not read bytes') - * obj = pyalloc_v(n, pp) # <<<<<<<<<<<<<< - * memcpy(pp[0], d_ptr, n) - * return obj - */ - __pyx_t_1 = __pyx_f_5scipy_2io_6matlab_7pyalloc_pyalloc_v(__pyx_v_n, __pyx_v_pp); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_obj = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":121 - * raise IOError('could not read bytes') - * obj = pyalloc_v(n, pp) - * memcpy(pp[0], d_ptr, n) # <<<<<<<<<<<<<< - * return obj - * - */ - memcpy((__pyx_v_pp[0]), __pyx_v_d_ptr, __pyx_v_n); - - /* "scipy/io/matlab/streams.pyx":122 - * obj = pyalloc_v(n, pp) - * memcpy(pp[0], d_ptr, n) - * return obj # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_obj); - __pyx_r = __pyx_v_obj; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.streams.cStringStream.read_string", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":128 - * cdef FILE* file - * - * def __init__(self, fobj): # <<<<<<<<<<<<<< - * self.fobj = fobj - * self.file = npy_PyFile_Dup(fobj, "rb") - */ - -static int __pyx_pf_5scipy_2io_6matlab_7streams_10FileStream___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_pf_5scipy_2io_6matlab_7streams_10FileStream___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_fobj = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - FILE *__pyx_t_1; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__fobj,0}; - __Pyx_RefNannySetupContext("__init__"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[1] = {0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fobj); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_fobj = values[0]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_fobj = PyTuple_GET_ITEM(__pyx_args, 0); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/io/matlab/streams.pyx":129 - * - * def __init__(self, fobj): - * self.fobj = fobj # <<<<<<<<<<<<<< - * self.file = npy_PyFile_Dup(fobj, "rb") - * - */ - __Pyx_INCREF(__pyx_v_fobj); - __Pyx_GIVEREF(__pyx_v_fobj); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)__pyx_v_self)->__pyx_base.fobj); - __Pyx_DECREF(((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)__pyx_v_self)->__pyx_base.fobj); - ((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)__pyx_v_self)->__pyx_base.fobj = __pyx_v_fobj; - - /* "scipy/io/matlab/streams.pyx":130 - * def __init__(self, fobj): - * self.fobj = fobj - * self.file = npy_PyFile_Dup(fobj, "rb") # <<<<<<<<<<<<<< - * - * def __del__(self): - */ - __pyx_t_1 = npy_PyFile_Dup(__pyx_v_fobj, __pyx_k__rb); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - ((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)__pyx_v_self)->file = __pyx_t_1; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":132 - * self.file = npy_PyFile_Dup(fobj, "rb") - * - * def __del__(self): # <<<<<<<<<<<<<< - * npy_PyFile_DupClose(self.fobj, self.file) - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_1__del__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_1__del__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__del__"); - - /* "scipy/io/matlab/streams.pyx":133 - * - * def __del__(self): - * npy_PyFile_DupClose(self.fobj, self.file) # <<<<<<<<<<<<<< - * - * cpdef int seek(self, long int offset, int whence=0) except -1: - */ - __pyx_t_1 = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)__pyx_v_self)->__pyx_base.fobj; - __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = npy_PyFile_DupClose(__pyx_t_1, ((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)__pyx_v_self)->file); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.__del__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":135 - * npy_PyFile_DupClose(self.fobj, self.file) - * - * cpdef int seek(self, long int offset, int whence=0) except -1: # <<<<<<<<<<<<<< - * cdef int ret - * """ move `offset` bytes in stream - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_2seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_f_5scipy_2io_6matlab_7streams_10FileStream_seek(struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *__pyx_v_self, long __pyx_v_offset, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_10FileStream_seek *__pyx_optional_args) { - int __pyx_v_whence = ((int)0); - int __pyx_v_ret; - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("seek"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_whence = __pyx_optional_args->whence; - } - } - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__seek); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_2seek)) { - __pyx_t_2 = PyInt_FromLong(__pyx_v_offset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromLong(__pyx_v_whence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_5; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/streams.pyx":155 - * ret : int - * """ - * ret = fseek(self.file, offset, whence) # <<<<<<<<<<<<<< - * if ret: - * raise IOError('Failed seek') - */ - __pyx_v_ret = fseek(__pyx_v_self->file, __pyx_v_offset, __pyx_v_whence); - - /* "scipy/io/matlab/streams.pyx":156 - * """ - * ret = fseek(self.file, offset, whence) - * if ret: # <<<<<<<<<<<<<< - * raise IOError('Failed seek') - * return -1 - */ - if (__pyx_v_ret) { - - /* "scipy/io/matlab/streams.pyx":157 - * ret = fseek(self.file, offset, whence) - * if ret: - * raise IOError('Failed seek') # <<<<<<<<<<<<<< - * return -1 - * return ret - */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_k_tuple_7), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/streams.pyx":159 - * raise IOError('Failed seek') - * return -1 - * return ret # <<<<<<<<<<<<<< - * - * cpdef long int tell(self): - */ - __pyx_r = __pyx_v_ret; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":135 - * npy_PyFile_DupClose(self.fobj, self.file) - * - * cpdef int seek(self, long int offset, int whence=0) except -1: # <<<<<<<<<<<<<< - * cdef int ret - * """ move `offset` bytes in stream - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_2seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_2seek(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - long __pyx_v_offset; - int __pyx_v_whence; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__offset,&__pyx_n_s__whence,0}; - __Pyx_RefNannySetupContext("seek"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__offset); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__whence); - if (value) { values[1] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "seek") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_offset = __Pyx_PyInt_AsLong(values[0]); if (unlikely((__pyx_v_offset == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - if (values[1]) { - __pyx_v_whence = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_whence == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_whence = ((int)0); - } - } else { - __pyx_v_whence = ((int)0); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: __pyx_v_whence = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_whence == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 1: __pyx_v_offset = __Pyx_PyInt_AsLong(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_offset == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("seek", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_XDECREF(__pyx_r); - __pyx_t_2.__pyx_n = 1; - __pyx_t_2.whence = __pyx_v_whence; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_FileStream *)((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)__pyx_v_self)->__pyx_base.__pyx_vtab)->__pyx_base.seek(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self), __pyx_v_offset, 1, &__pyx_t_2); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.seek", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":161 - * return ret - * - * cpdef long int tell(self): # <<<<<<<<<<<<<< - * return ftell(self.file) - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_3tell(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static long __pyx_f_5scipy_2io_6matlab_7streams_10FileStream_tell(struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *__pyx_v_self, int __pyx_skip_dispatch) { - long __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - long __pyx_t_3; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("tell"); - /* Check if called by wrapper */ - if (unlikely(__pyx_skip_dispatch)) ; - /* Check if overriden in Python */ - else if (unlikely(Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0)) { - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__tell); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (void *)&__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_3tell)) { - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_AsLong(__pyx_t_2); if (unlikely((__pyx_t_3 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - - /* "scipy/io/matlab/streams.pyx":162 - * - * cpdef long int tell(self): - * return ftell(self.file) # <<<<<<<<<<<<<< - * - * cdef int read_into(self, void *buf, size_t n) except -1: - */ - __pyx_r = ftell(__pyx_v_self->file); - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.tell", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":161 - * return ret - * - * cpdef long int tell(self): # <<<<<<<<<<<<<< - * return ftell(self.file) - * - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_3tell(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_3tell(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - long __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("tell"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_FileStream *)((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)__pyx_v_self)->__pyx_base.__pyx_vtab)->__pyx_base.tell(((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_self), 1); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.tell", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":164 - * return ftell(self.file) - * - * cdef int read_into(self, void *buf, size_t n) except -1: # <<<<<<<<<<<<<< - * """ Read n bytes from stream into pre-allocated buffer `buf` - * """ - */ - -static int __pyx_f_5scipy_2io_6matlab_7streams_10FileStream_read_into(struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *__pyx_v_self, void *__pyx_v_buf, size_t __pyx_v_n) { - size_t __pyx_v_n_red; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_into"); - - /* "scipy/io/matlab/streams.pyx":170 - * size_t n_red - * char* d_ptr - * n_red = fread(buf, 1, n, self.file) # <<<<<<<<<<<<<< - * if n_red != n: - * raise IOError('Could not read bytes') - */ - __pyx_v_n_red = fread(__pyx_v_buf, 1, __pyx_v_n, __pyx_v_self->file); - - /* "scipy/io/matlab/streams.pyx":171 - * char* d_ptr - * n_red = fread(buf, 1, n, self.file) - * if n_red != n: # <<<<<<<<<<<<<< - * raise IOError('Could not read bytes') - * return -1 - */ - __pyx_t_1 = (__pyx_v_n_red != __pyx_v_n); - if (__pyx_t_1) { - - /* "scipy/io/matlab/streams.pyx":172 - * n_red = fread(buf, 1, n, self.file) - * if n_red != n: - * raise IOError('Could not read bytes') # <<<<<<<<<<<<<< - * return -1 - * return 0 - */ - __pyx_t_2 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/streams.pyx":174 - * raise IOError('Could not read bytes') - * return -1 - * return 0 # <<<<<<<<<<<<<< - * - * cdef object read_string(self, size_t n, void **pp, int copy=True): - */ - __pyx_r = 0; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.read_into", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":176 - * return 0 - * - * cdef object read_string(self, size_t n, void **pp, int copy=True): # <<<<<<<<<<<<<< - * """ Make new memory, wrap with object """ - * cdef object obj = pyalloc_v(n, pp) - */ - -static PyObject *__pyx_f_5scipy_2io_6matlab_7streams_10FileStream_read_string(struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *__pyx_v_self, size_t __pyx_v_n, void **__pyx_v_pp, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_10FileStream_read_string *__pyx_optional_args) { - int __pyx_v_copy = ((int)1); - PyObject *__pyx_v_obj = 0; - size_t __pyx_v_n_red; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("read_string"); - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_copy = __pyx_optional_args->copy; - } - } - - /* "scipy/io/matlab/streams.pyx":178 - * cdef object read_string(self, size_t n, void **pp, int copy=True): - * """ Make new memory, wrap with object """ - * cdef object obj = pyalloc_v(n, pp) # <<<<<<<<<<<<<< - * cdef size_t n_red = fread(pp[0], 1, n, self.file) - * if n_red != n: - */ - __pyx_t_1 = __pyx_f_5scipy_2io_6matlab_7pyalloc_pyalloc_v(__pyx_v_n, __pyx_v_pp); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_obj = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":179 - * """ Make new memory, wrap with object """ - * cdef object obj = pyalloc_v(n, pp) - * cdef size_t n_red = fread(pp[0], 1, n, self.file) # <<<<<<<<<<<<<< - * if n_red != n: - * raise IOError('could not read bytes') - */ - __pyx_v_n_red = fread((__pyx_v_pp[0]), 1, __pyx_v_n, __pyx_v_self->file); - - /* "scipy/io/matlab/streams.pyx":180 - * cdef object obj = pyalloc_v(n, pp) - * cdef size_t n_red = fread(pp[0], 1, n, self.file) - * if n_red != n: # <<<<<<<<<<<<<< - * raise IOError('could not read bytes') - * return obj - */ - __pyx_t_2 = (__pyx_v_n_red != __pyx_v_n); - if (__pyx_t_2) { - - /* "scipy/io/matlab/streams.pyx":181 - * cdef size_t n_red = fread(pp[0], 1, n, self.file) - * if n_red != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * return obj - * - */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/streams.pyx":182 - * if n_red != n: - * raise IOError('could not read bytes') - * return obj # <<<<<<<<<<<<<< - * - * def _read_into(GenericStream st, size_t n): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_obj); - __pyx_r = __pyx_v_obj; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.streams.FileStream.read_string", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":184 - * return obj - * - * def _read_into(GenericStream st, size_t n): # <<<<<<<<<<<<<< - * # for testing only. Use st.read instead - * cdef char * d_ptr - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams__read_into(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_5scipy_2io_6matlab_7streams__read_into = {__Pyx_NAMESTR("_read_into"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams__read_into, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams__read_into(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_v_st = 0; - size_t __pyx_v_n; - char *__pyx_v_d_ptr; - PyObject *__pyx_v_my_str = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - char *__pyx_t_3; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__st,&__pyx_n_s__n,0}; - __Pyx_RefNannySetupContext("_read_into"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_read_into", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_read_into") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_st = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)values[0]); - __pyx_v_n = __Pyx_PyInt_AsSize_t(values[1]); if (unlikely((__pyx_v_n == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_st = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)PyTuple_GET_ITEM(__pyx_args, 0)); - __pyx_v_n = __Pyx_PyInt_AsSize_t(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_n == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_read_into", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.streams._read_into", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_st), __pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream, 1, "st", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/streams.pyx":187 - * # for testing only. Use st.read instead - * cdef char * d_ptr - * my_str = b' ' * n # <<<<<<<<<<<<<< - * d_ptr = my_str - * st.read_into(d_ptr, n) - */ - __pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Multiply(((PyObject *)__pyx_kp_b_11), __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_my_str = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/io/matlab/streams.pyx":188 - * cdef char * d_ptr - * my_str = b' ' * n - * d_ptr = my_str # <<<<<<<<<<<<<< - * st.read_into(d_ptr, n) - * return my_str - */ - __pyx_t_3 = PyBytes_AsString(((PyObject *)__pyx_v_my_str)); if (unlikely((!__pyx_t_3) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_d_ptr = __pyx_t_3; - - /* "scipy/io/matlab/streams.pyx":189 - * my_str = b' ' * n - * d_ptr = my_str - * st.read_into(d_ptr, n) # <<<<<<<<<<<<<< - * return my_str - * - */ - __pyx_t_4 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_st->__pyx_vtab)->read_into(__pyx_v_st, __pyx_v_d_ptr, __pyx_v_n); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/streams.pyx":190 - * d_ptr = my_str - * st.read_into(d_ptr, n) - * return my_str # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_my_str)); - __pyx_r = ((PyObject *)__pyx_v_my_str); - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.io.matlab.streams._read_into", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_my_str); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":193 - * - * - * def _read_string(GenericStream st, size_t n): # <<<<<<<<<<<<<< - * # for testing only. Use st.read instead - * cdef char *d_ptr - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_1_read_string(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_5scipy_2io_6matlab_7streams_1_read_string = {__Pyx_NAMESTR("_read_string"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_1_read_string, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_1_read_string(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_v_st = 0; - size_t __pyx_v_n; - char *__pyx_v_d_ptr; - PyObject *__pyx_v_obj = 0; - PyObject *__pyx_v_my_str = NULL; - char *__pyx_v_mys_ptr; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - char *__pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__st,&__pyx_n_s__n,0}; - __Pyx_RefNannySetupContext("_read_string"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_read_string", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_read_string") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_st = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)values[0]); - __pyx_v_n = __Pyx_PyInt_AsSize_t(values[1]); if (unlikely((__pyx_v_n == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_st = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)PyTuple_GET_ITEM(__pyx_args, 0)); - __pyx_v_n = __Pyx_PyInt_AsSize_t(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_n == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_read_string", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.io.matlab.streams._read_string", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_st), __pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream, 1, "st", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/io/matlab/streams.pyx":196 - * # for testing only. Use st.read instead - * cdef char *d_ptr - * cdef object obj = st.read_string(n, &d_ptr, True) # <<<<<<<<<<<<<< - * my_str = b'A' * n - * cdef char *mys_ptr = my_str - */ - __pyx_t_2.__pyx_n = 1; - __pyx_t_2.copy = 1; - __pyx_t_1 = ((struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_v_st->__pyx_vtab)->read_string(__pyx_v_st, __pyx_v_n, ((void **)(&__pyx_v_d_ptr)), &__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_obj = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":197 - * cdef char *d_ptr - * cdef object obj = st.read_string(n, &d_ptr, True) - * my_str = b'A' * n # <<<<<<<<<<<<<< - * cdef char *mys_ptr = my_str - * memcpy(mys_ptr, d_ptr, n) - */ - __pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Multiply(((PyObject *)__pyx_n_b__A), __pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_my_str = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/io/matlab/streams.pyx":198 - * cdef object obj = st.read_string(n, &d_ptr, True) - * my_str = b'A' * n - * cdef char *mys_ptr = my_str # <<<<<<<<<<<<<< - * memcpy(mys_ptr, d_ptr, n) - * return my_str - */ - __pyx_t_4 = PyBytes_AsString(((PyObject *)__pyx_v_my_str)); if (unlikely((!__pyx_t_4) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_mys_ptr = __pyx_t_4; - - /* "scipy/io/matlab/streams.pyx":199 - * my_str = b'A' * n - * cdef char *mys_ptr = my_str - * memcpy(mys_ptr, d_ptr, n) # <<<<<<<<<<<<<< - * return my_str - * - */ - memcpy(__pyx_v_mys_ptr, __pyx_v_d_ptr, __pyx_v_n); - - /* "scipy/io/matlab/streams.pyx":200 - * cdef char *mys_ptr = my_str - * memcpy(mys_ptr, d_ptr, n) - * return my_str # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_my_str)); - __pyx_r = ((PyObject *)__pyx_v_my_str); - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.streams._read_string", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XDECREF(__pyx_v_my_str); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":203 - * - * - * cpdef GenericStream make_stream(object fobj): # <<<<<<<<<<<<<< - * """ Make stream of correct type for file-like `fobj` - * """ - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_2make_stream(PyObject *__pyx_self, PyObject *__pyx_v_fobj); /*proto*/ -static struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_f_5scipy_2io_6matlab_7streams_make_stream(PyObject *__pyx_v_fobj, int __pyx_skip_dispatch) { - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("make_stream"); - - /* "scipy/io/matlab/streams.pyx":206 - * """ Make stream of correct type for file-like `fobj` - * """ - * if npy_PyFile_Check(fobj): # <<<<<<<<<<<<<< - * if sys.version_info[0] >= 3: - * return GenericStream(fobj) - */ - __pyx_t_1 = npy_PyFile_Check(__pyx_v_fobj); - if (__pyx_t_1) { - - /* "scipy/io/matlab/streams.pyx":207 - * """ - * if npy_PyFile_Check(fobj): - * if sys.version_info[0] >= 3: # <<<<<<<<<<<<<< - * return GenericStream(fobj) - * else: - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__sys); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__version_info); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_int_3, Py_GE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { - - /* "scipy/io/matlab/streams.pyx":208 - * if npy_PyFile_Check(fobj): - * if sys.version_info[0] >= 3: - * return GenericStream(fobj) # <<<<<<<<<<<<<< - * else: - * return FileStream(fobj) - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_fobj); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_fobj); - __Pyx_GIVEREF(__pyx_v_fobj); - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream)), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L0; - goto __pyx_L4; - } - /*else*/ { - - /* "scipy/io/matlab/streams.pyx":210 - * return GenericStream(fobj) - * else: - * return FileStream(fobj) # <<<<<<<<<<<<<< - * elif PycStringIO_InputCheck(fobj) or PycStringIO_OutputCheck(fobj): - * return cStringStream(fobj) - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_fobj); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_fobj); - __Pyx_GIVEREF(__pyx_v_fobj); - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5scipy_2io_6matlab_7streams_FileStream)), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - } - __pyx_L4:; - goto __pyx_L3; - } - - /* "scipy/io/matlab/streams.pyx":211 - * else: - * return FileStream(fobj) - * elif PycStringIO_InputCheck(fobj) or PycStringIO_OutputCheck(fobj): # <<<<<<<<<<<<<< - * return cStringStream(fobj) - * return GenericStream(fobj) - */ - __pyx_t_4 = PycStringIO_InputCheck(__pyx_v_fobj); - if (!__pyx_t_4) { - __pyx_t_5 = PycStringIO_OutputCheck(__pyx_v_fobj); - __pyx_t_6 = __pyx_t_5; - } else { - __pyx_t_6 = __pyx_t_4; - } - if (__pyx_t_6) { - - /* "scipy/io/matlab/streams.pyx":212 - * return FileStream(fobj) - * elif PycStringIO_InputCheck(fobj) or PycStringIO_OutputCheck(fobj): - * return cStringStream(fobj) # <<<<<<<<<<<<<< - * return GenericStream(fobj) - * - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_fobj); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_fobj); - __Pyx_GIVEREF(__pyx_v_fobj); - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5scipy_2io_6matlab_7streams_cStringStream)), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L0; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/io/matlab/streams.pyx":213 - * elif PycStringIO_InputCheck(fobj) or PycStringIO_OutputCheck(fobj): - * return cStringStream(fobj) - * return GenericStream(fobj) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_fobj); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_fobj); - __Pyx_GIVEREF(__pyx_v_fobj); - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream)), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)Py_None); __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.io.matlab.streams.make_stream", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/io/matlab/streams.pyx":203 - * - * - * cpdef GenericStream make_stream(object fobj): # <<<<<<<<<<<<<< - * """ Make stream of correct type for file-like `fobj` - * """ - */ - -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_2make_stream(PyObject *__pyx_self, PyObject *__pyx_v_fobj); /*proto*/ -static char __pyx_doc_5scipy_2io_6matlab_7streams_2make_stream[] = " Make stream of correct type for file-like `fobj`\n "; -static PyObject *__pyx_pf_5scipy_2io_6matlab_7streams_2make_stream(PyObject *__pyx_self, PyObject *__pyx_v_fobj) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("make_stream"); - __pyx_self = __pyx_self; - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((PyObject *)__pyx_f_5scipy_2io_6matlab_7streams_make_stream(__pyx_v_fobj, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.streams.make_stream", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pyalloc.pxd":8 - * - * # Function to allocate, wrap memory via Python string creation - * cdef inline object pyalloc_v(Py_ssize_t n, void **pp): # <<<<<<<<<<<<<< - * cdef object ob = PyBytes_FromStringAndSize(NULL, n) - * pp[0] = PyBytes_AS_STRING(ob) - */ - -static CYTHON_INLINE PyObject *__pyx_f_5scipy_2io_6matlab_7pyalloc_pyalloc_v(Py_ssize_t __pyx_v_n, void **__pyx_v_pp) { - PyObject *__pyx_v_ob = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("pyalloc_v"); - - /* "pyalloc.pxd":9 - * # Function to allocate, wrap memory via Python string creation - * cdef inline object pyalloc_v(Py_ssize_t n, void **pp): - * cdef object ob = PyBytes_FromStringAndSize(NULL, n) # <<<<<<<<<<<<<< - * pp[0] = PyBytes_AS_STRING(ob) - * return ob - */ - __pyx_t_1 = ((PyObject *)PyBytes_FromStringAndSize(NULL, __pyx_v_n)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_ob = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pyalloc.pxd":10 - * cdef inline object pyalloc_v(Py_ssize_t n, void **pp): - * cdef object ob = PyBytes_FromStringAndSize(NULL, n) - * pp[0] = PyBytes_AS_STRING(ob) # <<<<<<<<<<<<<< - * return ob - * - */ - (__pyx_v_pp[0]) = ((void *)PyBytes_AS_STRING(__pyx_v_ob)); - - /* "pyalloc.pxd":11 - * cdef object ob = PyBytes_FromStringAndSize(NULL, n) - * pp[0] = PyBytes_AS_STRING(ob) - * return ob # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_ob); - __pyx_r = __pyx_v_ob; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.io.matlab.pyalloc.pyalloc_v", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_ob); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream __pyx_vtable_5scipy_2io_6matlab_7streams_GenericStream; - -static PyObject *__pyx_tp_new_5scipy_2io_6matlab_7streams_GenericStream(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *p; - PyObject *o = (*t->tp_alloc)(t, 0); - if (!o) return 0; - p = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)o); - p->__pyx_vtab = __pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream; - p->fobj = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_5scipy_2io_6matlab_7streams_GenericStream(PyObject *o) { - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *p = (struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)o; - Py_XDECREF(p->fobj); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_5scipy_2io_6matlab_7streams_GenericStream(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *p = (struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)o; - if (p->fobj) { - e = (*v)(p->fobj, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_5scipy_2io_6matlab_7streams_GenericStream(PyObject *o) { - struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *p = (struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *)o; - PyObject* tmp; - tmp = ((PyObject*)p->fobj); - p->fobj = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyMethodDef __pyx_methods_5scipy_2io_6matlab_7streams_GenericStream[] = { - {__Pyx_NAMESTR("seek"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_1seek, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("tell"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_2tell, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("read"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream_3read, METH_O, __Pyx_DOCSTR(0)}, - {0, 0, 0, 0} -}; - -static PyNumberMethods __pyx_tp_as_number_GenericStream = { - 0, /*nb_add*/ - 0, /*nb_subtract*/ - 0, /*nb_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_divide*/ - #endif - 0, /*nb_remainder*/ - 0, /*nb_divmod*/ - 0, /*nb_power*/ - 0, /*nb_negative*/ - 0, /*nb_positive*/ - 0, /*nb_absolute*/ - 0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_coerce*/ - #endif - 0, /*nb_int*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_long*/ - #else - 0, /*reserved*/ - #endif - 0, /*nb_float*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_oct*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*nb_hex*/ - #endif - 0, /*nb_inplace_add*/ - 0, /*nb_inplace_subtract*/ - 0, /*nb_inplace_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_inplace_divide*/ - #endif - 0, /*nb_inplace_remainder*/ - 0, /*nb_inplace_power*/ - 0, /*nb_inplace_lshift*/ - 0, /*nb_inplace_rshift*/ - 0, /*nb_inplace_and*/ - 0, /*nb_inplace_xor*/ - 0, /*nb_inplace_or*/ - 0, /*nb_floor_divide*/ - 0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - #if PY_VERSION_HEX >= 0x02050000 - 0, /*nb_index*/ - #endif -}; - -static PySequenceMethods __pyx_tp_as_sequence_GenericStream = { - 0, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_GenericStream = { - 0, /*mp_length*/ - 0, /*mp_subscript*/ - 0, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_GenericStream = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_getbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_releasebuffer*/ - #endif -}; - -static PyTypeObject __pyx_type_5scipy_2io_6matlab_7streams_GenericStream = { - PyVarObject_HEAD_INIT(0, 0) - __Pyx_NAMESTR("scipy.io.matlab.streams.GenericStream"), /*tp_name*/ - sizeof(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_5scipy_2io_6matlab_7streams_GenericStream, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #else - 0, /*reserved*/ - #endif - 0, /*tp_repr*/ - &__pyx_tp_as_number_GenericStream, /*tp_as_number*/ - &__pyx_tp_as_sequence_GenericStream, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_GenericStream, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_GenericStream, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_5scipy_2io_6matlab_7streams_GenericStream, /*tp_traverse*/ - __pyx_tp_clear_5scipy_2io_6matlab_7streams_GenericStream, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_5scipy_2io_6matlab_7streams_GenericStream, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - __pyx_pf_5scipy_2io_6matlab_7streams_13GenericStream___init__, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_5scipy_2io_6matlab_7streams_GenericStream, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - #if PY_VERSION_HEX >= 0x02060000 - 0, /*tp_version_tag*/ - #endif -}; -static struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_cStringStream __pyx_vtable_5scipy_2io_6matlab_7streams_cStringStream; - -static PyObject *__pyx_tp_new_5scipy_2io_6matlab_7streams_cStringStream(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream *p; - PyObject *o = __pyx_tp_new_5scipy_2io_6matlab_7streams_GenericStream(t, a, k); - if (!o) return 0; - p = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream *)o); - p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream*)__pyx_vtabptr_5scipy_2io_6matlab_7streams_cStringStream; - return o; -} - -static PyMethodDef __pyx_methods_5scipy_2io_6matlab_7streams_cStringStream[] = { - {__Pyx_NAMESTR("seek"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_13cStringStream_seek, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, - {0, 0, 0, 0} -}; - -static PyNumberMethods __pyx_tp_as_number_cStringStream = { - 0, /*nb_add*/ - 0, /*nb_subtract*/ - 0, /*nb_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_divide*/ - #endif - 0, /*nb_remainder*/ - 0, /*nb_divmod*/ - 0, /*nb_power*/ - 0, /*nb_negative*/ - 0, /*nb_positive*/ - 0, /*nb_absolute*/ - 0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_coerce*/ - #endif - 0, /*nb_int*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_long*/ - #else - 0, /*reserved*/ - #endif - 0, /*nb_float*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_oct*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*nb_hex*/ - #endif - 0, /*nb_inplace_add*/ - 0, /*nb_inplace_subtract*/ - 0, /*nb_inplace_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_inplace_divide*/ - #endif - 0, /*nb_inplace_remainder*/ - 0, /*nb_inplace_power*/ - 0, /*nb_inplace_lshift*/ - 0, /*nb_inplace_rshift*/ - 0, /*nb_inplace_and*/ - 0, /*nb_inplace_xor*/ - 0, /*nb_inplace_or*/ - 0, /*nb_floor_divide*/ - 0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - #if PY_VERSION_HEX >= 0x02050000 - 0, /*nb_index*/ - #endif -}; - -static PySequenceMethods __pyx_tp_as_sequence_cStringStream = { - 0, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_cStringStream = { - 0, /*mp_length*/ - 0, /*mp_subscript*/ - 0, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_cStringStream = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_getbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_releasebuffer*/ - #endif -}; - -static PyTypeObject __pyx_type_5scipy_2io_6matlab_7streams_cStringStream = { - PyVarObject_HEAD_INIT(0, 0) - __Pyx_NAMESTR("scipy.io.matlab.streams.cStringStream"), /*tp_name*/ - sizeof(struct __pyx_obj_5scipy_2io_6matlab_7streams_cStringStream), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_5scipy_2io_6matlab_7streams_GenericStream, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #else - 0, /*reserved*/ - #endif - 0, /*tp_repr*/ - &__pyx_tp_as_number_cStringStream, /*tp_as_number*/ - &__pyx_tp_as_sequence_cStringStream, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_cStringStream, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_cStringStream, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_5scipy_2io_6matlab_7streams_GenericStream, /*tp_traverse*/ - __pyx_tp_clear_5scipy_2io_6matlab_7streams_GenericStream, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_5scipy_2io_6matlab_7streams_cStringStream, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_5scipy_2io_6matlab_7streams_cStringStream, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - #if PY_VERSION_HEX >= 0x02060000 - 0, /*tp_version_tag*/ - #endif -}; -static struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_FileStream __pyx_vtable_5scipy_2io_6matlab_7streams_FileStream; - -static PyObject *__pyx_tp_new_5scipy_2io_6matlab_7streams_FileStream(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *p; - PyObject *o = __pyx_tp_new_5scipy_2io_6matlab_7streams_GenericStream(t, a, k); - if (!o) return 0; - p = ((struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream *)o); - p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_5scipy_2io_6matlab_7streams_GenericStream*)__pyx_vtabptr_5scipy_2io_6matlab_7streams_FileStream; - return o; -} - -static PyMethodDef __pyx_methods_5scipy_2io_6matlab_7streams_FileStream[] = { - {__Pyx_NAMESTR("__del__"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_1__del__, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("seek"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_2seek, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("tell"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_10FileStream_3tell, METH_NOARGS, __Pyx_DOCSTR(0)}, - {0, 0, 0, 0} -}; - -static PyNumberMethods __pyx_tp_as_number_FileStream = { - 0, /*nb_add*/ - 0, /*nb_subtract*/ - 0, /*nb_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_divide*/ - #endif - 0, /*nb_remainder*/ - 0, /*nb_divmod*/ - 0, /*nb_power*/ - 0, /*nb_negative*/ - 0, /*nb_positive*/ - 0, /*nb_absolute*/ - 0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_coerce*/ - #endif - 0, /*nb_int*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_long*/ - #else - 0, /*reserved*/ - #endif - 0, /*nb_float*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_oct*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*nb_hex*/ - #endif - 0, /*nb_inplace_add*/ - 0, /*nb_inplace_subtract*/ - 0, /*nb_inplace_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_inplace_divide*/ - #endif - 0, /*nb_inplace_remainder*/ - 0, /*nb_inplace_power*/ - 0, /*nb_inplace_lshift*/ - 0, /*nb_inplace_rshift*/ - 0, /*nb_inplace_and*/ - 0, /*nb_inplace_xor*/ - 0, /*nb_inplace_or*/ - 0, /*nb_floor_divide*/ - 0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - #if PY_VERSION_HEX >= 0x02050000 - 0, /*nb_index*/ - #endif -}; - -static PySequenceMethods __pyx_tp_as_sequence_FileStream = { - 0, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_FileStream = { - 0, /*mp_length*/ - 0, /*mp_subscript*/ - 0, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_FileStream = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_getbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_releasebuffer*/ - #endif -}; - -static PyTypeObject __pyx_type_5scipy_2io_6matlab_7streams_FileStream = { - PyVarObject_HEAD_INIT(0, 0) - __Pyx_NAMESTR("scipy.io.matlab.streams.FileStream"), /*tp_name*/ - sizeof(struct __pyx_obj_5scipy_2io_6matlab_7streams_FileStream), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_5scipy_2io_6matlab_7streams_GenericStream, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #else - 0, /*reserved*/ - #endif - 0, /*tp_repr*/ - &__pyx_tp_as_number_FileStream, /*tp_as_number*/ - &__pyx_tp_as_sequence_FileStream, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_FileStream, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_FileStream, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_5scipy_2io_6matlab_7streams_GenericStream, /*tp_traverse*/ - __pyx_tp_clear_5scipy_2io_6matlab_7streams_GenericStream, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_5scipy_2io_6matlab_7streams_FileStream, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - __pyx_pf_5scipy_2io_6matlab_7streams_10FileStream___init__, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_5scipy_2io_6matlab_7streams_FileStream, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - #if PY_VERSION_HEX >= 0x02060000 - 0, /*tp_version_tag*/ - #endif -}; - -static PyMethodDef __pyx_methods[] = { - {__Pyx_NAMESTR("make_stream"), (PyCFunction)__pyx_pf_5scipy_2io_6matlab_7streams_2make_stream, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_2io_6matlab_7streams_2make_stream)}, - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("streams"), - 0, /* m_doc */ - -1, /* m_size */ - __pyx_methods /* m_methods */, - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, - {&__pyx_kp_b_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 0, 0}, - {&__pyx_n_s_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 0, 1, 1}, - {&__pyx_kp_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 0}, - {&__pyx_kp_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 0}, - {&__pyx_n_b__A, __pyx_k__A, sizeof(__pyx_k__A), 0, 0, 0, 1}, - {&__pyx_n_s__IOError, __pyx_k__IOError, sizeof(__pyx_k__IOError), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s___read_into, __pyx_k___read_into, sizeof(__pyx_k___read_into), 0, 0, 1, 1}, - {&__pyx_n_s___read_string, __pyx_k___read_string, sizeof(__pyx_k___read_string), 0, 0, 1, 1}, - {&__pyx_n_s__fobj, __pyx_k__fobj, sizeof(__pyx_k__fobj), 0, 0, 1, 1}, - {&__pyx_n_s__n, __pyx_k__n, sizeof(__pyx_k__n), 0, 0, 1, 1}, - {&__pyx_n_s__offset, __pyx_k__offset, sizeof(__pyx_k__offset), 0, 0, 1, 1}, - {&__pyx_n_s__read, __pyx_k__read, sizeof(__pyx_k__read), 0, 0, 1, 1}, - {&__pyx_n_s__seek, __pyx_k__seek, sizeof(__pyx_k__seek), 0, 0, 1, 1}, - {&__pyx_n_s__st, __pyx_k__st, sizeof(__pyx_k__st), 0, 0, 1, 1}, - {&__pyx_n_s__sys, __pyx_k__sys, sizeof(__pyx_k__sys), 0, 0, 1, 1}, - {&__pyx_n_s__tell, __pyx_k__tell, sizeof(__pyx_k__tell), 0, 0, 1, 1}, - {&__pyx_n_s__version_info, __pyx_k__version_info, sizeof(__pyx_k__version_info), 0, 0, 1, 1}, - {&__pyx_n_s__whence, __pyx_k__whence, sizeof(__pyx_k__whence), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_IOError = __Pyx_GetName(__pyx_b, __pyx_n_s__IOError); if (!__pyx_builtin_IOError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - /* "scipy/io/matlab/streams.pyx":68 - * data = self.fobj.read(n) - * if PyBytes_Size(data) != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * return -1 - * d_ptr = data - */ - __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_2)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); - PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); - - /* "scipy/io/matlab/streams.pyx":78 - * data = self.fobj.read(n) - * if PyBytes_Size(data) != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * if copy != True: - * pp[0] = PyBytes_AS_STRING(data) - */ - __pyx_k_tuple_3 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_3)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); - PyTuple_SET_ITEM(__pyx_k_tuple_3, 0, ((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_3)); - - /* "scipy/io/matlab/streams.pyx":105 - * n_red = StringIO_cread(self.fobj, &d_ptr, n) - * if n_red != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * memcpy(buf, d_ptr, n) - * return 0 - */ - __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); - PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); - - /* "scipy/io/matlab/streams.pyx":119 - * cdef size_t n_red = StringIO_cread(self.fobj, &d_ptr, n) - * if n_red != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * obj = pyalloc_v(n, pp) - * memcpy(pp[0], d_ptr, n) - */ - __pyx_k_tuple_5 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_5)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); - PyTuple_SET_ITEM(__pyx_k_tuple_5, 0, ((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_5)); - - /* "scipy/io/matlab/streams.pyx":157 - * ret = fseek(self.file, offset, whence) - * if ret: - * raise IOError('Failed seek') # <<<<<<<<<<<<<< - * return -1 - * return ret - */ - __pyx_k_tuple_7 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_7)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_6)); - PyTuple_SET_ITEM(__pyx_k_tuple_7, 0, ((PyObject *)__pyx_kp_s_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_7)); - - /* "scipy/io/matlab/streams.pyx":172 - * n_red = fread(buf, 1, n, self.file) - * if n_red != n: - * raise IOError('Could not read bytes') # <<<<<<<<<<<<<< - * return -1 - * return 0 - */ - __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_9)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_8)); - PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_s_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); - - /* "scipy/io/matlab/streams.pyx":181 - * cdef size_t n_red = fread(pp[0], 1, n, self.file) - * if n_red != n: - * raise IOError('could not read bytes') # <<<<<<<<<<<<<< - * return obj - * - */ - __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_10)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); - PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initstreams(void); /*proto*/ -PyMODINIT_FUNC initstreams(void) -#else -PyMODINIT_FUNC PyInit_streams(void); /*proto*/ -PyMODINIT_FUNC PyInit_streams(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_streams(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("streams"), __pyx_methods, 0, 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - /*--- Initialize various global constants etc. ---*/ - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__io__matlab__streams) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - /*--- Builtin init code ---*/ - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Constants init code ---*/ - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Global init code ---*/ - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - if (__Pyx_ExportFunction("make_stream", (void (*)(void))__pyx_f_5scipy_2io_6matlab_7streams_make_stream, "struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *(PyObject *, int __pyx_skip_dispatch)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Type init code ---*/ - __pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream = &__pyx_vtable_5scipy_2io_6matlab_7streams_GenericStream; - __pyx_vtable_5scipy_2io_6matlab_7streams_GenericStream.seek = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, long, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_7streams_13GenericStream_seek; - __pyx_vtable_5scipy_2io_6matlab_7streams_GenericStream.tell = (long (*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, int __pyx_skip_dispatch))__pyx_f_5scipy_2io_6matlab_7streams_13GenericStream_tell; - __pyx_vtable_5scipy_2io_6matlab_7streams_GenericStream.read_into = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, void *, size_t))__pyx_f_5scipy_2io_6matlab_7streams_13GenericStream_read_into; - __pyx_vtable_5scipy_2io_6matlab_7streams_GenericStream.read_string = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, size_t, void **, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_7streams_13GenericStream_read_string; - if (PyType_Ready(&__pyx_type_5scipy_2io_6matlab_7streams_GenericStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetVtable(__pyx_type_5scipy_2io_6matlab_7streams_GenericStream.tp_dict, __pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "GenericStream", (PyObject *)&__pyx_type_5scipy_2io_6matlab_7streams_GenericStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream = &__pyx_type_5scipy_2io_6matlab_7streams_GenericStream; - __pyx_vtabptr_5scipy_2io_6matlab_7streams_cStringStream = &__pyx_vtable_5scipy_2io_6matlab_7streams_cStringStream; - __pyx_vtable_5scipy_2io_6matlab_7streams_cStringStream.__pyx_base = *__pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream; - __pyx_vtable_5scipy_2io_6matlab_7streams_cStringStream.__pyx_base.seek = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, long, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_7streams_13cStringStream_seek; - __pyx_vtable_5scipy_2io_6matlab_7streams_cStringStream.__pyx_base.read_into = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, void *, size_t))__pyx_f_5scipy_2io_6matlab_7streams_13cStringStream_read_into; - __pyx_vtable_5scipy_2io_6matlab_7streams_cStringStream.__pyx_base.read_string = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, size_t, void **, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_7streams_13cStringStream_read_string; - __pyx_type_5scipy_2io_6matlab_7streams_cStringStream.tp_base = __pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream; - if (PyType_Ready(&__pyx_type_5scipy_2io_6matlab_7streams_cStringStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetVtable(__pyx_type_5scipy_2io_6matlab_7streams_cStringStream.tp_dict, __pyx_vtabptr_5scipy_2io_6matlab_7streams_cStringStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "cStringStream", (PyObject *)&__pyx_type_5scipy_2io_6matlab_7streams_cStringStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5scipy_2io_6matlab_7streams_cStringStream = &__pyx_type_5scipy_2io_6matlab_7streams_cStringStream; - __pyx_vtabptr_5scipy_2io_6matlab_7streams_FileStream = &__pyx_vtable_5scipy_2io_6matlab_7streams_FileStream; - __pyx_vtable_5scipy_2io_6matlab_7streams_FileStream.__pyx_base = *__pyx_vtabptr_5scipy_2io_6matlab_7streams_GenericStream; - __pyx_vtable_5scipy_2io_6matlab_7streams_FileStream.__pyx_base.seek = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, long, int __pyx_skip_dispatch, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_seek *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_7streams_10FileStream_seek; - __pyx_vtable_5scipy_2io_6matlab_7streams_FileStream.__pyx_base.tell = (long (*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, int __pyx_skip_dispatch))__pyx_f_5scipy_2io_6matlab_7streams_10FileStream_tell; - __pyx_vtable_5scipy_2io_6matlab_7streams_FileStream.__pyx_base.read_into = (int (*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, void *, size_t))__pyx_f_5scipy_2io_6matlab_7streams_10FileStream_read_into; - __pyx_vtable_5scipy_2io_6matlab_7streams_FileStream.__pyx_base.read_string = (PyObject *(*)(struct __pyx_obj_5scipy_2io_6matlab_7streams_GenericStream *, size_t, void **, struct __pyx_opt_args_5scipy_2io_6matlab_7streams_13GenericStream_read_string *__pyx_optional_args))__pyx_f_5scipy_2io_6matlab_7streams_10FileStream_read_string; - __pyx_type_5scipy_2io_6matlab_7streams_FileStream.tp_base = __pyx_ptype_5scipy_2io_6matlab_7streams_GenericStream; - if (PyType_Ready(&__pyx_type_5scipy_2io_6matlab_7streams_FileStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetVtable(__pyx_type_5scipy_2io_6matlab_7streams_FileStream.tp_dict, __pyx_vtabptr_5scipy_2io_6matlab_7streams_FileStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "FileStream", (PyObject *)&__pyx_type_5scipy_2io_6matlab_7streams_FileStream) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5scipy_2io_6matlab_7streams_FileStream = &__pyx_type_5scipy_2io_6matlab_7streams_FileStream; - /*--- Type import code ---*/ - __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Variable import code ---*/ - /*--- Function import code ---*/ - /*--- Execution code ---*/ - - /* "scipy/io/matlab/streams.pyx":3 - * # -*- python -*- or near enough - * - * import sys # <<<<<<<<<<<<<< - * - * from cpython cimport PyBytes_FromStringAndSize, \ - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__sys), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__sys, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":44 - * - * # initialize cStringIO - * PycString_IMPORT # <<<<<<<<<<<<<< - * - * - */ - PycString_IMPORT; - - /* "scipy/io/matlab/streams.pyx":184 - * return obj - * - * def _read_into(GenericStream st, size_t n): # <<<<<<<<<<<<<< - * # for testing only. Use st.read instead - * cdef char * d_ptr - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_2io_6matlab_7streams__read_into, NULL, __pyx_n_s_12); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s___read_into, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":193 - * - * - * def _read_string(GenericStream st, size_t n): # <<<<<<<<<<<<<< - * # for testing only. Use st.read instead - * cdef char *d_ptr - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_2io_6matlab_7streams_1_read_string, NULL, __pyx_n_s_12); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s___read_string, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/io/matlab/streams.pyx":1 - * # -*- python -*- or near enough # <<<<<<<<<<<<<< - * - * import sys - */ - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - - /* "pyalloc.pxd":8 - * - * # Function to allocate, wrap memory via Python string creation - * cdef inline object pyalloc_v(Py_ssize_t n, void **pp): # <<<<<<<<<<<<<< - * cdef object ob = PyBytes_FromStringAndSize(NULL, n) - * pp[0] = PyBytes_AS_STRING(ob) - */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.io.matlab.streams", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.io.matlab.streams"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - -/* Runtime support code */ - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - /* unexpected keyword found */ - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - /* cause is unused */ - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - /* First, check the traceback argument, replacing None with NULL. */ - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - /* Next, replace a missing value with None */ - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - /* Raising an instance. The value should be a dummy. */ - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - /* Normalize to raise , */ - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else /* Python 3+ */ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact) -{ - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (none_allowed && obj == Py_None) return 1; - else if (exact) { - if (Py_TYPE(obj) == type) return 1; - } - else { - if (PyObject_TypeCheck(obj, type)) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) { - PyObject *d = 0; - PyObject *cobj = 0; - union { - void (*fp)(void); - void *p; - } tmp; - - d = PyObject_GetAttrString(__pyx_m, (char *)"__pyx_capi__"); - if (!d) { - PyErr_Clear(); - d = PyDict_New(); - if (!d) - goto bad; - Py_INCREF(d); - if (PyModule_AddObject(__pyx_m, (char *)"__pyx_capi__", d) < 0) - goto bad; - } - tmp.fp = f; -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) - cobj = PyCapsule_New(tmp.p, sig, 0); -#else - cobj = PyCObject_FromVoidPtrAndDesc(tmp.p, (void *)sig, 0); -#endif - if (!cobj) - goto bad; - if (PyDict_SetItemString(d, name, cobj) < 0) - goto bad; - Py_DECREF(cobj); - Py_DECREF(d); - return 0; -bad: - Py_XDECREF(cobj); - Py_XDECREF(d); - return -1; -} - -static int __Pyx_SetVtable(PyObject *dict, void *vtable) { -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) - PyObject *ob = PyCapsule_New(vtable, 0, 0); -#else - PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); -#endif - if (!ob) - goto bad; - if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0) - goto bad; - Py_DECREF(ob); - return 0; -bad: - Py_XDECREF(ob); - return -1; -} - -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, - size_t size, int strict) -{ - PyObject *py_module = 0; - PyObject *result = 0; - PyObject *py_name = 0; - char warning[200]; - - py_module = __Pyx_ImportModule(module_name); - if (!py_module) - goto bad; - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(class_name); - #else - py_name = PyUnicode_FromString(class_name); - #endif - if (!py_name) - goto bad; - result = PyObject_GetAttr(py_module, py_name); - Py_DECREF(py_name); - py_name = 0; - Py_DECREF(py_module); - py_module = 0; - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility", - module_name, class_name); - #if PY_VERSION_HEX < 0x02050000 - if (PyErr_Warn(NULL, warning) < 0) goto bad; - #else - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - #endif - } - else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { - PyErr_Format(PyExc_ValueError, - "%s.%s has the wrong size, try recompiling", - module_name, class_name); - goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(py_module); - Py_XDECREF(result); - return NULL; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(name); - #else - py_name = PyUnicode_FromString(name); - #endif - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - #if PY_MAJOR_VERSION >= 3 - 0, /*int kwonlyargcount,*/ - #endif - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else /* Python 3+ has unicode identifiers */ - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -/* Type Conversion Functions */ - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif /* Py_PYTHON_H */ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/afunc.m b/scipy-0.10.1/scipy/io/matlab/tests/afunc.m deleted file mode 100644 index 5cbf628f1a..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/afunc.m +++ /dev/null @@ -1,4 +0,0 @@ -function [a, b] = afunc(c, d) -% A function -a = c + 1; -b = d + 10; diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/japanese_utf8.txt b/scipy-0.10.1/scipy/io/matlab/tests/data/japanese_utf8.txt deleted file mode 100644 index 1459b6b6ea..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/data/japanese_utf8.txt +++ /dev/null @@ -1,5 +0,0 @@ -Japanese: -ã™ã¹ã¦ã®äººé–“ã¯ã€ç”Ÿã¾ã‚ŒãªãŒã‚‰ã«ã—ã¦è‡ªç”±ã§ã‚り〠-ã‹ã¤ã€å°ŠåŽ³ã¨æ¨©åˆ©ã¨ ã«ã¤ã„ã¦å¹³ç­‰ã§ã‚る。 -人間ã¯ã€ç†æ€§ã¨è‰¯å¿ƒã¨ã‚’授ã‘られã¦ãŠã‚Šã€ -互ã„ã«åŒèƒžã®ç²¾ç¥žã‚’ã‚‚ã£ã¦è¡Œå‹•ã—ãªã‘れã°ãªã‚‰ãªã„。 \ No newline at end of file diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat deleted file mode 100644 index 35dcb715bc..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/one_by_zero_char.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/one_by_zero_char.mat deleted file mode 100644 index 07e7dca456..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/one_by_zero_char.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/parabola.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/parabola.mat deleted file mode 100644 index 66350532a7..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/parabola.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/single_empty_string.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/single_empty_string.mat deleted file mode 100644 index 293f387719..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/single_empty_string.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/some_functions.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/some_functions.mat deleted file mode 100644 index cc818593b4..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/some_functions.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/sqr.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/sqr.mat deleted file mode 100644 index 2436d87cc5..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/sqr.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat deleted file mode 100644 index 453712610b..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat deleted file mode 100644 index e04d27d303..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat deleted file mode 100644 index 4c03030398..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat deleted file mode 100644 index 232a051c77..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/test_empty_struct.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/test_empty_struct.mat deleted file mode 100644 index 30c8c8ad53..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/test_empty_struct.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/test_skip_variable.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/test_skip_variable.mat deleted file mode 100644 index efbe3fec64..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/test_skip_variable.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat deleted file mode 100644 index 512f7d8894..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat deleted file mode 100644 index a7633104c1..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat deleted file mode 100644 index 2ac1da1587..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat deleted file mode 100644 index fc893f331c..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat deleted file mode 100644 index 4198a4f2ae..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat deleted file mode 100644 index 2c7826eeac..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat deleted file mode 100644 index b3b086cc31..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat deleted file mode 100644 index 316f8894c5..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat deleted file mode 100644 index 36621b25c0..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat deleted file mode 100644 index 32fcd2a93c..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat deleted file mode 100644 index f3ecd20337..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat deleted file mode 100644 index c0c083855f..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat deleted file mode 100644 index 6a187edb18..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat deleted file mode 100644 index 5dbfcf17dd..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat deleted file mode 100644 index 8e36c0c8ce..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat deleted file mode 100644 index a003b6d866..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat deleted file mode 100644 index 3106712e10..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat deleted file mode 100644 index 9097bb0871..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat deleted file mode 100644 index e7dec3b81a..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat deleted file mode 100644 index a1c9348359..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat deleted file mode 100644 index f29d4f9327..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat deleted file mode 100644 index 8b244044cf..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat deleted file mode 100644 index adb6c28ee9..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat deleted file mode 100644 index 6066c1e30f..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat deleted file mode 100644 index 3698c8853b..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat deleted file mode 100644 index 164be1109d..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat deleted file mode 100644 index a8735e9a23..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat deleted file mode 100644 index b6fb05bb75..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat deleted file mode 100644 index eb537ab104..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat deleted file mode 100644 index cc207ed9f3..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat deleted file mode 100644 index c2f0ba2ae4..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat deleted file mode 100644 index b4dbd152d6..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat deleted file mode 100644 index fadcd2366b..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat deleted file mode 100644 index 9ce65f9111..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat deleted file mode 100644 index 9c6ba793cf..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat deleted file mode 100644 index 0c4729c56b..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat deleted file mode 100644 index 6d3e068977..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat deleted file mode 100644 index fc13642263..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat deleted file mode 100644 index f68323b0c8..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat deleted file mode 100644 index 83dcad3424..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat deleted file mode 100644 index 59d243c4de..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat deleted file mode 100644 index cdb4191c7d..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat deleted file mode 100644 index 3b5a428501..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat deleted file mode 100644 index 8cef2dd7ea..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat deleted file mode 100644 index 5ba4810ac6..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat deleted file mode 100644 index 8964765f7b..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat deleted file mode 100644 index 1dcd72e51a..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat deleted file mode 100644 index 55cbd3c1b3..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat deleted file mode 100644 index 194ca4d7d4..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat deleted file mode 100644 index 3e1e9a1ec9..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat deleted file mode 100644 index 55b510762e..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat deleted file mode 100644 index bdb6ce66ce..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat deleted file mode 100644 index 81c536d0b0..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat deleted file mode 100644 index 520e1cedb3..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat deleted file mode 100644 index 969b7143df..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat deleted file mode 100644 index 9117dce309..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat deleted file mode 100644 index a8a615a320..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat deleted file mode 100644 index 15424266a3..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat deleted file mode 100644 index 137561e1f6..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat deleted file mode 100644 index 2ad75f2e17..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat deleted file mode 100644 index 6fd12d884d..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat deleted file mode 100644 index ab93994f7b..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat deleted file mode 100644 index 63059b8447..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat deleted file mode 100644 index fa687ee988..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat deleted file mode 100644 index 11afb41205..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat deleted file mode 100644 index 75e07a0b55..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat deleted file mode 100644 index 7d76f63643..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat deleted file mode 100644 index 954e39beb8..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat deleted file mode 100644 index 5086bb7acd..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat deleted file mode 100644 index 6feb6e4237..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat deleted file mode 100644 index b2ff222622..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat deleted file mode 100644 index 028841f9d3..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat deleted file mode 100644 index da57365926..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat deleted file mode 100644 index d1c97a7a2e..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat deleted file mode 100644 index c7ca095941..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat deleted file mode 100644 index 8716f7e3db..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat deleted file mode 100644 index 2c34c4d8c1..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat deleted file mode 100644 index c6dccc0028..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat deleted file mode 100644 index 0f6f5444b0..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat deleted file mode 100644 index faf9221b77..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat deleted file mode 100644 index 1b7b3d7f00..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat deleted file mode 100644 index d22fb57c81..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat b/scipy-0.10.1/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat deleted file mode 100644 index 76c51d0138..0000000000 Binary files a/scipy-0.10.1/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/matlab/tests/gen_mat4files.m b/scipy-0.10.1/scipy/io/matlab/tests/gen_mat4files.m deleted file mode 100644 index a67cc2057d..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/gen_mat4files.m +++ /dev/null @@ -1,50 +0,0 @@ -% Generates mat files for loadmat unit tests -% Uses save_matfile.m function -% This is the version for matlab 4 - -% work out matlab version and file suffix for test files -global FILEPREFIX FILESUFFIX -sepchar = '/'; -if strcmp(computer, 'PCWIN'), sepchar = '\'; end -FILEPREFIX = [pwd sepchar 'data' sepchar]; -mlv = version; -FILESUFFIX = ['_' mlv '_' computer '.mat']; - -% basic double array -theta = 0:pi/4:2*pi; -save_matfile('testdouble', theta); - -% string -save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.') - -% complex -save_matfile('testcomplex', cos(theta) + 1j*sin(theta)); - -% asymmetric array to check indexing -a = zeros(3, 5); -a(:,1) = [1:3]'; -a(1,:) = 1:5; - -% 2D matrix -save_matfile('testmatrix', a); - -% minus number - tests signed int -save_matfile('testminus', -1); - -% single character -save_matfile('testonechar', 'r'); - -% string array -save_matfile('teststringarray', ['one '; 'two '; 'three']); - -% sparse array -save_matfile('testsparse', sparse(a)); - -% sparse complex array -b = sparse(a); -b(1,1) = b(1,1) + j; -save_matfile('testsparsecomplex', b); - -% Two variables in same file -save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') - diff --git a/scipy-0.10.1/scipy/io/matlab/tests/gen_mat5files.m b/scipy-0.10.1/scipy/io/matlab/tests/gen_mat5files.m deleted file mode 100644 index 9351127d15..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/gen_mat5files.m +++ /dev/null @@ -1,100 +0,0 @@ -% Generates mat files for loadmat unit tests -% This is the version for matlab 5 and higher -% Uses save_matfile.m function - -% work out matlab version and file suffix for test files -global FILEPREFIX FILESUFFIX -FILEPREFIX = [fullfile(pwd, 'data') filesep]; -temp = ver('MATLAB'); -mlv = temp.Version; -FILESUFFIX = ['_' mlv '_' computer '.mat']; - -% basic double array -theta = 0:pi/4:2*pi; -save_matfile('testdouble', theta); - -% string -save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.') - -% complex -save_matfile('testcomplex', cos(theta) + 1j*sin(theta)); - -% asymmetric array to check indexing -a = zeros(3, 5); -a(:,1) = [1:3]'; -a(1,:) = 1:5; - -% 2D matrix -save_matfile('testmatrix', a); - -% minus number - tests signed int -save_matfile('testminus', -1); - -% single character -save_matfile('testonechar', 'r'); - -% string array -save_matfile('teststringarray', ['one '; 'two '; 'three']); - -% sparse array -save_matfile('testsparse', sparse(a)); - -% sparse complex array -b = sparse(a); -b(1,1) = b(1,1) + j; -save_matfile('testsparsecomplex', b); - -% Two variables in same file -save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') - - -% struct -save_matfile('teststruct', ... - struct('stringfield','Rats live on no evil star.',... - 'doublefield',[sqrt(2) exp(1) pi],... - 'complexfield',(1+1j)*[sqrt(2) exp(1) pi])); - -% cell -save_matfile('testcell', ... - {['This cell contains this string and 3 arrays of increasing' ... - ' length'], 1., 1.:2., 1.:3.}); - -% scalar cell -save_matfile('testscalarcell', {1}) - -% Empty cells in two cell matrices -save_matfile('testemptycell', {1, 2, [], [], 3}); - -% 3D matrix -save_matfile('test3dmatrix', reshape(1:24,[2 3 4])) - -% nested cell array -save_matfile('testcellnest', {1, {2, 3, {4, 5}}}); - -% nested struct -save_matfile('teststructnest', struct('one', 1, 'two', ... - struct('three', 'number 3'))); - -% array of struct -save_matfile('teststructarr', [struct('one', 1, 'two', 2) ... - struct('one', 'number 1', 'two', 'number 2')]); - -% matlab object -save_matfile('testobject', inline('x')) - -% array of matlab objects -%save_matfile('testobjarr', [inline('x') inline('x')]) - -% unicode test -if str2num(mlv) > 7 % function added 7.0.1 - fid = fopen([FILEPREFIX 'japanese_utf8.txt']); - from_japan = fread(fid, 'uint8')'; - fclose(fid); - save_matfile('testunicode', native2unicode(from_japan, 'utf-8')); -end - -% func -if str2num(mlv) > 7 % function pointers added recently - func = @afunc; - save_matfile('testfunc', func); -end \ No newline at end of file diff --git a/scipy-0.10.1/scipy/io/matlab/tests/save_matfile.m b/scipy-0.10.1/scipy/io/matlab/tests/save_matfile.m deleted file mode 100644 index a6ff677476..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/save_matfile.m +++ /dev/null @@ -1,6 +0,0 @@ -function save_matfile(test_name, v) -% saves variable passed in m with filename from prefix - -global FILEPREFIX FILESUFFIX -eval([test_name ' = v;']); -save([FILEPREFIX test_name FILESUFFIX], test_name) \ No newline at end of file diff --git a/scipy-0.10.1/scipy/io/matlab/tests/test_byteordercodes.py b/scipy-0.10.1/scipy/io/matlab/tests/test_byteordercodes.py deleted file mode 100644 index 5a70758e9e..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/test_byteordercodes.py +++ /dev/null @@ -1,30 +0,0 @@ -''' Tests for byteorder module ''' - -import sys - -from numpy.testing import assert_raises, assert_, run_module_suite - -import scipy.io.matlab.byteordercodes as sibc - - -def test_native(): - native_is_le = sys.byteorder == 'little' - assert_(sibc.sys_is_le == native_is_le) - -def test_to_numpy(): - if sys.byteorder == 'little': - assert_(sibc.to_numpy_code('native') == '<') - assert_(sibc.to_numpy_code('swapped') == '>') - else: - assert_(sibc.to_numpy_code('native') == '>') - assert_(sibc.to_numpy_code('swapped') == '<') - assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('=')) - assert_(sibc.to_numpy_code('big') == '>') - for code in ('little', '<', 'l', 'L', 'le'): - assert_(sibc.to_numpy_code(code) == '<') - for code in ('big', '>', 'b', 'B', 'be'): - assert_(sibc.to_numpy_code(code) == '>') - assert_raises(ValueError, sibc.to_numpy_code, 'silly string') - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/io/matlab/tests/test_mio.py b/scipy-0.10.1/scipy/io/matlab/tests/test_mio.py deleted file mode 100644 index fb2bfc0e89..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/test_mio.py +++ /dev/null @@ -1,887 +0,0 @@ -#!/usr/bin/env python -''' Nose test generators - -Need function load / save / roundtrip tests - -''' -import sys -import os -from os.path import join as pjoin, dirname -from glob import glob -if sys.version_info[0] >= 3: - from io import BytesIO -else: - from StringIO import StringIO as BytesIO -from tempfile import mkdtemp -# functools is only available in Python >=2.5 -try: - from functools import partial -except ImportError: - from scipy.io.arff.myfunctools import partial - -import warnings -import shutil -import gzip - -from numpy.testing import \ - assert_array_equal, \ - assert_array_almost_equal, \ - assert_equal, \ - assert_raises, run_module_suite - -from nose.tools import assert_true - -import numpy as np -from numpy import array -import scipy.sparse as SP - -import scipy.io.matlab.byteordercodes as boc -from scipy.io.matlab.miobase import matdims, MatFileReader, \ - MatWriteError -from scipy.io.matlab.mio import find_mat_file, mat_reader_factory, \ - loadmat, savemat -from scipy.io.matlab.mio5 import MatlabObject, MatFile5Writer, \ - MatFile5Reader, MatlabFunction, varmats_from_mat - -# Use future defaults to silence unwanted test warnings -savemat_future = partial(savemat, oned_as='row') -class MatFile5Reader_future(MatFile5Reader): - def __init__(self, *args, **kwargs): - sar = kwargs.get('struct_as_record') - if sar is None: - kwargs['struct_as_record'] = True - super(MatFile5Reader_future, self).__init__(*args, **kwargs) - - -test_data_path = pjoin(dirname(__file__), 'data') - -def mlarr(*args, **kwargs): - ''' Convenience function to return matlab-compatible 2D array - ''' - arr = np.array(*args, **kwargs) - arr.shape = matdims(arr) - return arr - -# Define cases to test -theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9) -case_table4 = [ - {'name': 'double', - 'expected': {'testdouble': theta} - }] -case_table4.append( - {'name': 'string', - 'expected': {'teststring': - array([u'"Do nine men interpret?" "Nine men," I nod.'])}, - }) -case_table4.append( - {'name': 'complex', - 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)} - }) -A = np.zeros((3,5)) -A[0] = range(1,6) -A[:,0] = range(1,4) -case_table4.append( - {'name': 'matrix', - 'expected': {'testmatrix': A}, - }) -case_table4.append( - {'name': 'sparse', - 'expected': {'testsparse': SP.coo_matrix(A)}, - }) -B = A.astype(complex) -B[0,0] += 1j -case_table4.append( - {'name': 'sparsecomplex', - 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, - }) -case_table4.append( - {'name': 'multi', - 'expected': {'theta': theta, - 'a': A}, - }) -case_table4.append( - {'name': 'minus', - 'expected': {'testminus': mlarr(-1)}, - }) -case_table4.append( - {'name': 'onechar', - 'expected': {'testonechar': array([u'r'])}, - }) -# Cell arrays stored as object arrays -CA = mlarr(( # tuple for object array creation - [], - mlarr([1]), - mlarr([[1,2]]), - mlarr([[1,2,3]])), dtype=object).reshape(1,-1) -CA[0,0] = array( - [u'This cell contains this string and 3 arrays of increasing length']) -case_table5 = [ - {'name': 'cell', - 'expected': {'testcell': CA}}] -CAE = mlarr(( # tuple for object array creation - mlarr(1), - mlarr(2), - mlarr([]), - mlarr([]), - mlarr(3)), dtype=object).reshape(1,-1) -objarr = np.empty((1,1),dtype=object) -objarr[0,0] = mlarr(1) -case_table5.append( - {'name': 'scalarcell', - 'expected': {'testscalarcell': objarr} - }) -case_table5.append( - {'name': 'emptycell', - 'expected': {'testemptycell': CAE}}) -case_table5.append( - {'name': 'stringarray', - 'expected': {'teststringarray': array( - [u'one ', u'two ', u'three'])}, - }) -case_table5.append( - {'name': '3dmatrix', - 'expected': { - 'test3dmatrix': np.transpose(np.reshape(range(1,25), (4,3,2)))} - }) -st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3) -dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']] -st1 = np.zeros((1,1), dtype) -st1['stringfield'][0,0] = array([u'Rats live on no evil star.']) -st1['doublefield'][0,0] = st_sub_arr -st1['complexfield'][0,0] = st_sub_arr * (1 + 1j) -case_table5.append( - {'name': 'struct', - 'expected': {'teststruct': st1} - }) -CN = np.zeros((1,2), dtype=object) -CN[0,0] = mlarr(1) -CN[0,1] = np.zeros((1,3), dtype=object) -CN[0,1][0,0] = mlarr(2, dtype=np.uint8) -CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8) -CN[0,1][0,2] = np.zeros((1,2), dtype=object) -CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8) -CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8) -case_table5.append( - {'name': 'cellnest', - 'expected': {'testcellnest': CN}, - }) -st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']]) -st2[0,0]['one'] = mlarr(1) -st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)]) -st2[0,0]['two'][0,0]['three'] = array([u'number 3']) -case_table5.append( - {'name': 'structnest', - 'expected': {'teststructnest': st2} - }) -a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']]) -a[0,0]['one'] = mlarr(1) -a[0,0]['two'] = mlarr(2) -a[0,1]['one'] = array([u'number 1']) -a[0,1]['two'] = array([u'number 2']) -case_table5.append( - {'name': 'structarr', - 'expected': {'teststructarr': a} - }) -ODT = np.dtype([(n, object) for n in - ['expr', 'inputExpr', 'args', - 'isEmpty', 'numArgs', 'version']]) -MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline') -m0 = MO[0,0] -m0['expr'] = array([u'x']) -m0['inputExpr'] = array([u' x = INLINE_INPUTS_{1};']) -m0['args'] = array([u'x']) -m0['isEmpty'] = mlarr(0) -m0['numArgs'] = mlarr(1) -m0['version'] = mlarr(1) -case_table5.append( - {'name': 'object', - 'expected': {'testobject': MO} - }) -u_str = open( - pjoin(test_data_path, 'japanese_utf8.txt'), - 'rb').read().decode('utf-8') -case_table5.append( - {'name': 'unicode', - 'expected': {'testunicode': array([u_str])} - }) -case_table5.append( - {'name': 'sparse', - 'expected': {'testsparse': SP.coo_matrix(A)}, - }) -case_table5.append( - {'name': 'sparsecomplex', - 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, - }) - -case_table5_rt = case_table5[:] -# Inline functions can't be concatenated in matlab, so RT only -case_table5_rt.append( - {'name': 'objectarray', - 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}}) - - -def types_compatible(var1, var2): - ''' Check if types are same or compatible - - 0d numpy scalars are compatible with bare python scalars - ''' - type1 = type(var1) - type2 = type(var2) - if type1 is type2: - return True - if type1 is np.ndarray and var1.shape == (): - return type(var1.item()) is type2 - if type2 is np.ndarray and var2.shape == (): - return type(var2.item()) is type1 - return False - - -def _check_level(label, expected, actual): - """ Check one level of a potentially nested array """ - if SP.issparse(expected): # allow different types of sparse matrices - assert_true(SP.issparse(actual)) - assert_array_almost_equal(actual.todense(), - expected.todense(), - err_msg = label, - decimal = 5) - return - # Check types are as expected - assert_true(types_compatible(expected, actual), \ - "Expected type %s, got %s at %s" % - (type(expected), type(actual), label)) - # A field in a record array may not be an ndarray - # A scalar from a record array will be type np.void - if not isinstance(expected, - (np.void, np.ndarray, MatlabObject)): - assert_equal(expected, actual) - return - # This is an ndarray-like thing - assert_true(expected.shape == actual.shape, - msg='Expected shape %s, got %s at %s' % (expected.shape, - actual.shape, - label) - ) - ex_dtype = expected.dtype - if ex_dtype.hasobject: # array of objects - if isinstance(expected, MatlabObject): - assert_equal(expected.classname, actual.classname) - for i, ev in enumerate(expected): - level_label = "%s, [%d], " % (label, i) - _check_level(level_label, ev, actual[i]) - return - if ex_dtype.fields: # probably recarray - for fn in ex_dtype.fields: - level_label = "%s, field %s, " % (label, fn) - _check_level(level_label, - expected[fn], actual[fn]) - return - if ex_dtype.type in (np.unicode, # string - np.unicode_): - assert_equal(actual, expected, err_msg=label) - return - # Something numeric - assert_array_almost_equal(actual, expected, err_msg=label, decimal=5) - - -def _load_check_case(name, files, case): - for file_name in files: - matdict = loadmat(file_name, struct_as_record=True) - label = "test %s; file %s" % (name, file_name) - for k, expected in case.items(): - k_label = "%s, variable %s" % (label, k) - assert_true(k in matdict, "Missing key at %s" % k_label) - _check_level(k_label, expected, matdict[k]) - - -# Round trip tests -def _rt_check_case(name, expected, format): - mat_stream = BytesIO() - savemat_future(mat_stream, expected, format=format) - mat_stream.seek(0) - _load_check_case(name, [mat_stream], expected) - - -# generator for load tests -def test_load(): - for case in case_table4 + case_table5: - name = case['name'] - expected = case['expected'] - filt = pjoin(test_data_path, 'test%s_*.mat' % name) - files = glob(filt) - assert_true(len(files) > 0, - "No files for test %s using filter %s" % (name, filt)) - yield _load_check_case, name, files, expected - - -# generator for round trip tests -def test_round_trip(): - for case in case_table4 + case_table5_rt: - name = case['name'] + '_round_trip' - expected = case['expected'] - format = case in case_table4 and '4' or '5' - yield _rt_check_case, name, expected, format - - -def test_gzip_simple(): - xdense = np.zeros((20,20)) - xdense[2,3]=2.3 - xdense[4,5]=4.5 - x = SP.csc_matrix(xdense) - - name = 'gzip_test' - expected = {'x':x} - format='4' - - tmpdir = mkdtemp() - try: - fname = pjoin(tmpdir,name) - mat_stream = gzip.open( fname,mode='wb') - savemat_future(mat_stream, expected, format=format) - mat_stream.close() - - mat_stream = gzip.open( fname,mode='rb') - actual = loadmat(mat_stream, struct_as_record=True) - mat_stream.close() - finally: - shutil.rmtree(tmpdir) - - assert_array_almost_equal(actual['x'].todense(), - expected['x'].todense(), - err_msg=repr(actual)) - -def test_multiple_open(): - # Ticket #1039, on Windows: check that files are not left open - tmpdir = mkdtemp() - try: - x = dict(x=np.zeros((2, 2))) - - fname = pjoin(tmpdir, "a.mat") - - # Check that file is not left open - savemat(fname, x, oned_as='row') - os.unlink(fname) - savemat(fname, x, oned_as='row') - loadmat(fname) - os.unlink(fname) - - # Check that stream is left open - f = open(fname, 'wb') - savemat(f, x, oned_as='column') - f.seek(0) - f.close() - - f = open(fname, 'rb') - loadmat(f) - f.seek(0) - f.close() - finally: - shutil.rmtree(tmpdir) - -def test_mat73(): - # Check any hdf5 files raise an error - filenames = glob( - pjoin(test_data_path, 'testhdf5*.mat')) - assert_true(len(filenames)>0) - for filename in filenames: - assert_raises(NotImplementedError, - loadmat, - filename, - struct_as_record=True) - - -def test_warnings(): - fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat') - warnings.simplefilter('error') - # This should not generate a warning - mres = loadmat(fname, struct_as_record=True) - # This neither - mres = loadmat(fname, struct_as_record=False) - # This should - because of deprecated system path search - yield assert_raises, DeprecationWarning, find_mat_file, fname - warnings.resetwarnings() - - -def test_regression_653(): - """Regression test for #653.""" - assert_raises(TypeError, savemat_future, BytesIO(), {'d':{1:2}}, format='5') - - -def test_structname_len(): - # Test limit for length of field names in structs - lim = 31 - fldname = 'a' * lim - st1 = np.zeros((1,1), dtype=[(fldname, object)]) - mat_stream = BytesIO() - savemat_future(BytesIO(), {'longstruct': st1}, format='5') - fldname = 'a' * (lim+1) - st1 = np.zeros((1,1), dtype=[(fldname, object)]) - assert_raises(ValueError, savemat_future, BytesIO(), - {'longstruct': st1}, format='5') - - -def test_4_and_long_field_names_incompatible(): - # Long field names option not supported in 4 - my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)]) - assert_raises(ValueError, savemat_future, BytesIO(), - {'my_struct':my_struct}, format='4', long_field_names=True) - - -def test_long_field_names(): - # Test limit for length of field names in structs - lim = 63 - fldname = 'a' * lim - st1 = np.zeros((1,1), dtype=[(fldname, object)]) - mat_stream = BytesIO() - savemat_future(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) - fldname = 'a' * (lim+1) - st1 = np.zeros((1,1), dtype=[(fldname, object)]) - assert_raises(ValueError, savemat_future, BytesIO(), - {'longstruct': st1}, format='5',long_field_names=True) - - -def test_long_field_names_in_struct(): - # Regression test - long_field_names was erased if you passed a struct - # within a struct - lim = 63 - fldname = 'a' * lim - cell = np.ndarray((1,2),dtype=object) - st1 = np.zeros((1,1), dtype=[(fldname, object)]) - cell[0,0]=st1 - cell[0,1]=st1 - mat_stream = BytesIO() - savemat_future(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True) - # - # Check to make sure it fails with long field names off - # - assert_raises(ValueError, savemat_future, BytesIO(), - {'longstruct': cell}, format='5', long_field_names=False) - - -def test_cell_with_one_thing_in_it(): - # Regression test - make a cell array that's 1 x 2 and put two - # strings in it. It works. Make a cell array that's 1 x 1 and put - # a string in it. It should work but, in the old days, it didn't. - cells = np.ndarray((1,2),dtype=object) - cells[0,0]='Hello' - cells[0,1]='World' - mat_stream = BytesIO() - savemat_future(BytesIO(), {'x': cells}, format='5') - - cells = np.ndarray((1,1),dtype=object) - cells[0,0]='Hello, world' - mat_stream = BytesIO() - savemat_future(BytesIO(), {'x': cells}, format='5') - - -def test_writer_properties(): - # Tests getting, setting of properties of matrix writer - mfw = MatFile5Writer(BytesIO(), oned_as='row') - yield assert_equal, mfw.global_vars, [] - mfw.global_vars = ['avar'] - yield assert_equal, mfw.global_vars, ['avar'] - yield assert_equal, mfw.unicode_strings, False - mfw.unicode_strings = True - yield assert_equal, mfw.unicode_strings, True - yield assert_equal, mfw.long_field_names, False - mfw.long_field_names = True - yield assert_equal, mfw.long_field_names, True - - -def test_use_small_element(): - # Test whether we're using small data element or not - sio = BytesIO() - wtr = MatFile5Writer(sio, oned_as='column') - # First check size for no sde for name - arr = np.zeros(10) - wtr.put_variables({'aaaaa': arr}) - w_sz = len(sio.getvalue()) - # Check small name results in largish difference in size - sio.truncate(0) - sio.seek(0) - wtr.put_variables({'aaaa': arr}) - yield assert_true, w_sz - len(sio.getvalue()) > 4 - # Whereas increasing name size makes less difference - sio.truncate(0) - sio.seek(0) - wtr.put_variables({'aaaaaa': arr}) - yield assert_true, len(sio.getvalue()) - w_sz < 4 - - -def test_save_dict(): - # Test that dict can be saved (as recarray), loaded as matstruct - d = {'a':1, 'b':2} - stream = BytesIO() - savemat_future(stream, {'dict':d}) - stream.seek(0) - vals = loadmat(stream) - - -def test_1d_shape(): - # Current 5 behavior is 1D -> column vector - arr = np.arange(5) - stream = BytesIO() - # silence warnings for tests - warnings.simplefilter('ignore') - savemat(stream, {'oned':arr}, format='5') - vals = loadmat(stream) - assert_equal(vals['oned'].shape, (5,1)) - # Current 4 behavior is 1D -> row vector - stream = BytesIO() - savemat(stream, {'oned':arr}, format='4') - vals = loadmat(stream) - assert_equal(vals['oned'].shape, (1, 5)) - for format in ('4', '5'): - # can be explicitly 'column' for oned_as - stream = BytesIO() - savemat(stream, {'oned':arr}, - format=format, - oned_as='column') - vals = loadmat(stream) - assert_equal(vals['oned'].shape, (5,1)) - # but different from 'row' - stream = BytesIO() - savemat(stream, {'oned':arr}, - format=format, - oned_as='row') - vals = loadmat(stream) - assert_equal(vals['oned'].shape, (1,5)) - warnings.resetwarnings() - - -def test_compression(): - arr = np.zeros(100).reshape((5,20)) - arr[2,10] = 1 - stream = BytesIO() - savemat_future(stream, {'arr':arr}) - raw_len = len(stream.getvalue()) - vals = loadmat(stream) - yield assert_array_equal, vals['arr'], arr - stream = BytesIO() - savemat_future(stream, {'arr':arr}, do_compression=True) - compressed_len = len(stream.getvalue()) - vals = loadmat(stream) - yield assert_array_equal, vals['arr'], arr - yield assert_true, raw_len>compressed_len - # Concatenate, test later - arr2 = arr.copy() - arr2[0,0] = 1 - stream = BytesIO() - savemat_future(stream, {'arr':arr, 'arr2':arr2}, do_compression=False) - vals = loadmat(stream) - yield assert_array_equal, vals['arr2'], arr2 - stream = BytesIO() - savemat_future(stream, {'arr':arr, 'arr2':arr2}, do_compression=True) - vals = loadmat(stream) - yield assert_array_equal, vals['arr2'], arr2 - - -def test_single_object(): - stream = BytesIO() - savemat_future(stream, {'A':np.array(1, dtype=object)}) - - -def test_skip_variable(): - # Test skipping over the first of two variables in a MAT file - # using mat_reader_factory and put_variables to read them in. - # - # This is a regression test of a problem that's caused by - # using the compressed file reader seek instead of the raw file - # I/O seek when skipping over a compressed chunk. - # - # The problem arises when the chunk is large: this file has - # a 256x256 array of random (uncompressible) doubles. - # - filename = pjoin(test_data_path,'test_skip_variable.mat') - # - # Prove that it loads with loadmat - # - d = loadmat(filename, struct_as_record=True) - yield assert_true, d.has_key('first') - yield assert_true, d.has_key('second') - # - # Make the factory - # - factory = mat_reader_factory(filename, struct_as_record=True) - # - # This is where the factory breaks with an error in MatMatrixGetter.to_next - # - d = factory.get_variables('second') - yield assert_true, d.has_key('second') - - -def test_empty_struct(): - # ticket 885 - filename = pjoin(test_data_path,'test_empty_struct.mat') - # before ticket fix, this would crash with ValueError, empty data - # type - d = loadmat(filename, struct_as_record=True) - a = d['a'] - assert_equal(a.shape, (1,1)) - assert_equal(a.dtype, np.dtype(np.object)) - assert_true(a[0,0] is None) - stream = BytesIO() - arr = np.array((), dtype='U') - # before ticket fix, this used to give data type not understood - savemat_future(stream, {'arr':arr}) - d = loadmat(stream) - a2 = d['arr'] - assert_array_equal(a2, arr) - - -def test_recarray(): - # check roundtrip of structured array - dt = [('f1', 'f8'), - ('f2', 'S10')] - arr = np.zeros((2,), dtype=dt) - arr[0]['f1'] = 0.5 - arr[0]['f2'] = 'python' - arr[1]['f1'] = 99 - arr[1]['f2'] = 'not perl' - stream = BytesIO() - savemat_future(stream, {'arr': arr}) - d = loadmat(stream, struct_as_record=False) - a20 = d['arr'][0,0] - yield assert_equal, a20.f1, 0.5 - yield assert_equal, a20.f2, 'python' - d = loadmat(stream, struct_as_record=True) - a20 = d['arr'][0,0] - yield assert_equal, a20['f1'], 0.5 - yield assert_equal, a20['f2'], 'python' - # structs always come back as object types - yield assert_equal, a20.dtype, np.dtype([('f1', 'O'), - ('f2', 'O')]) - a21 = d['arr'].flat[1] - yield assert_equal, a21['f1'], 99 - yield assert_equal, a21['f2'], 'not perl' - - -def test_save_object(): - class C(object): pass - c = C() - c.field1 = 1 - c.field2 = 'a string' - stream = BytesIO() - savemat_future(stream, {'c': c}) - d = loadmat(stream, struct_as_record=False) - c2 = d['c'][0,0] - assert_equal(c2.field1, 1) - assert_equal(c2.field2, 'a string') - d = loadmat(stream, struct_as_record=True) - c2 = d['c'][0,0] - assert_equal(c2['field1'], 1) - assert_equal(c2['field2'], 'a string') - - -def test_read_opts(): - # tests if read is seeing option sets, at initialization and after - # initialization - arr = np.arange(6).reshape(1,6) - stream = BytesIO() - savemat_future(stream, {'a': arr}) - rdr = MatFile5Reader_future(stream) - back_dict = rdr.get_variables() - rarr = back_dict['a'] - assert_array_equal(rarr, arr) - rdr = MatFile5Reader_future(stream, squeeze_me=True) - assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) - rdr.squeeze_me = False - assert_array_equal(rarr, arr) - rdr = MatFile5Reader_future(stream, byte_order=boc.native_code) - assert_array_equal(rdr.get_variables()['a'], arr) - # inverted byte code leads to error on read because of swapped - # header etc - rdr = MatFile5Reader_future(stream, byte_order=boc.swapped_code) - assert_raises(Exception, rdr.get_variables) - rdr.byte_order = boc.native_code - assert_array_equal(rdr.get_variables()['a'], arr) - arr = np.array(['a string']) - stream.truncate(0) - stream.seek(0) - savemat_future(stream, {'a': arr}) - rdr = MatFile5Reader_future(stream) - assert_array_equal(rdr.get_variables()['a'], arr) - rdr = MatFile5Reader_future(stream, chars_as_strings=False) - carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) - assert_array_equal(rdr.get_variables()['a'], carr) - rdr.chars_as_strings=True - assert_array_equal(rdr.get_variables()['a'], arr) - - -def test_empty_string(): - # make sure reading empty string does not raise error - estring_fname = pjoin(test_data_path, 'single_empty_string.mat') - rdr = MatFile5Reader_future(open(estring_fname, 'rb')) - d = rdr.get_variables() - assert_array_equal(d['a'], np.array([], dtype='U1')) - # empty string round trip. Matlab cannot distiguish - # between a string array that is empty, and a string array - # containing a single empty string, because it stores strings as - # arrays of char. There is no way of having an array of char that - # is not empty, but contains an empty string. - stream = BytesIO() - savemat_future(stream, {'a': np.array([''])}) - rdr = MatFile5Reader_future(stream) - d = rdr.get_variables() - assert_array_equal(d['a'], np.array([], dtype='U1')) - stream.truncate(0) - stream.seek(0) - savemat_future(stream, {'a': np.array([], dtype='U1')}) - rdr = MatFile5Reader_future(stream) - d = rdr.get_variables() - assert_array_equal(d['a'], np.array([], dtype='U1')) - - -def test_mat4_3d(): - # test behavior when writing 3D arrays to matlab 4 files - stream = BytesIO() - arr = np.arange(24).reshape((2,3,4)) - warnings.simplefilter('error') - assert_raises(DeprecationWarning, savemat_future, - stream, {'a': arr}, True, '4') - warnings.resetwarnings() - # For now, we save a 3D array as 2D - warnings.simplefilter('ignore') - savemat_future(stream, {'a': arr}, format='4') - warnings.resetwarnings() - d = loadmat(stream) - assert_array_equal(d['a'], arr.reshape((6,4))) - - -def test_func_read(): - func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat') - rdr = MatFile5Reader_future(open(func_eg, 'rb')) - d = rdr.get_variables() - yield assert_true, isinstance(d['testfunc'], MatlabFunction) - stream = BytesIO() - wtr = MatFile5Writer(stream, oned_as='row') - yield assert_raises, MatWriteError, wtr.put_variables, d - - -def test_mat_dtype(): - double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat') - rdr = MatFile5Reader_future(open(double_eg, 'rb'), mat_dtype=False) - d = rdr.get_variables() - yield assert_equal, d['testmatrix'].dtype.kind, 'u' - rdr = MatFile5Reader_future(open(double_eg, 'rb'), mat_dtype=True) - d = rdr.get_variables() - yield assert_equal, d['testmatrix'].dtype.kind, 'f' - - -def test_sparse_in_struct(): - # reproduces bug found by DC where Cython code was insisting on - # ndarray return type, but getting sparse matrix - st = {'sparsefield': SP.coo_matrix(np.eye(4))} - stream = BytesIO() - savemat_future(stream, {'a':st}) - d = loadmat(stream, struct_as_record=True) - yield assert_array_equal, d['a'][0,0]['sparsefield'].todense(), np.eye(4) - - -def test_mat_struct_squeeze(): - stream = BytesIO() - in_d = {'st':{'one':1, 'two':2}} - savemat_future(stream, in_d) - # no error without squeeze - out_d = loadmat(stream, struct_as_record=False) - # previous error was with squeeze, with mat_struct - out_d = loadmat(stream, - struct_as_record=False, - squeeze_me=True, - ) - - -def test_str_round(): - # from report by Angus McMorland on mailing list 3 May 2010 - stream = BytesIO() - in_arr = np.array(['Hello', 'Foob']) - out_arr = np.array(['Hello', 'Foob ']) - savemat_future(stream, dict(a=in_arr)) - res = loadmat(stream) - # resulted in [u'HloolFoa', u'elWrdobr'] - assert_array_equal(res['a'], out_arr) - stream.truncate(0) - stream.seek(0) - # Make Fortran ordered version of string - in_str = in_arr.tostring(order='F') - in_from_str = np.ndarray(shape=a.shape, - dtype=in_arr.dtype, - order='F', - buffer=in_str) - savemat_future(stream, dict(a=in_from_str)) - assert_array_equal(res['a'], out_arr) - # unicode save did lead to buffer too small error - stream.truncate(0) - stream.seek(0) - in_arr_u = in_arr.astype('U') - out_arr_u = out_arr.astype('U') - savemat_future(stream, {'a': in_arr_u}) - res = loadmat(stream) - assert_array_equal(res['a'], out_arr_u) - - -def test_fieldnames(): - # Check that field names are as expected - stream = BytesIO() - savemat_future(stream, {'a': {'a':1, 'b':2}}) - res = loadmat(stream) - field_names = res['a'].dtype.names - assert_equal(set(field_names), set(('a', 'b'))) - - -def test_loadmat_varnames(): - # Test that we can get just one variable from a mat file using loadmat - eg_file = pjoin(test_data_path, 'testmulti_7.4_GLNX86.mat') - sys_v_names = ['__globals__', - '__header__', - '__version__'] - vars = loadmat(eg_file) - assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names)) - vars = loadmat(eg_file, variable_names=['a']) - assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) - vars = loadmat(eg_file, variable_names=['theta']) - assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) - - -def test_round_types(): - # Check that saving, loading preserves dtype in most cases - arr = np.arange(10) - stream = BytesIO() - for dts in ('f8','f4','i8','i4','i2','i1', - 'u8','u4','u2','u1','c16','c8'): - stream.truncate(0) - stream.seek(0) # needed for BytesIO in python 3 - savemat_future(stream, {'arr': arr.astype(dts)}) - vars = loadmat(stream) - assert_equal(np.dtype(dts), vars['arr'].dtype) - - -def test_varmats_from_mat(): - # Make a mat file with several variables, write it, read it back - names_vars = (('arr', mlarr(np.arange(10))), - ('mystr', mlarr('a string')), - ('mynum', mlarr(10))) - # Dict like thing to give variables in defined order - class C(object): - def items(self): return names_vars - stream = BytesIO() - savemat_future(stream, C()) - varmats = varmats_from_mat(stream) - assert_equal(len(varmats), 3) - for i in range(3): - name, var_stream = varmats[i] - exp_name, exp_res = names_vars[i] - assert_equal(name, exp_name) - res = loadmat(var_stream) - assert_array_equal(res[name], exp_res) - - -def test_one_by_zero(): - ''' Test 1x0 chars get read correctly ''' - func_eg = pjoin(test_data_path, 'one_by_zero_char.mat') - rdr = MatFile5Reader_future(open(func_eg, 'rb')) - d = rdr.get_variables() - assert_equal(d['var'].shape, (0,)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/io/matlab/tests/test_mio5_utils.py b/scipy-0.10.1/scipy/io/matlab/tests/test_mio5_utils.py deleted file mode 100644 index d85c191fee..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/test_mio5_utils.py +++ /dev/null @@ -1,194 +0,0 @@ -""" Testing - -""" -import sys - -if sys.version_info[0] >= 3: - from io import BytesIO - cStringIO = BytesIO -else: - from cStringIO import StringIO as cStringIO - from StringIO import StringIO as BytesIO - -import numpy as np - -from numpy.compat import asbytes - -from nose.tools import assert_true, assert_false, \ - assert_equal, assert_raises - -from numpy.testing import assert_array_equal, assert_array_almost_equal, \ - run_module_suite - -import scipy.io.matlab.byteordercodes as boc -import scipy.io.matlab.streams as streams -import scipy.io.matlab.mio5_params as mio5p -import scipy.io.matlab.mio5_utils as m5u - - -def test_byteswap(): - for val in ( - 1, - 0x100, - 0x10000): - a = np.array(val, dtype=np.uint32) - b = a.byteswap() - c = m5u.byteswap_u4(a) - yield assert_equal, b.item(), c - d = m5u.byteswap_u4(c) - yield assert_equal, a.item(), d - - -def _make_tag(base_dt, val, mdtype, sde=False): - ''' Makes a simple matlab tag, full or sde ''' - base_dt = np.dtype(base_dt) - bo = boc.to_numpy_code(base_dt.byteorder) - byte_count = base_dt.itemsize - if not sde: - udt = bo + 'u4' - padding = 8 - (byte_count % 8) - all_dt = [('mdtype', udt), - ('byte_count', udt), - ('val', base_dt)] - if padding: - all_dt.append(('padding', 'u1', padding)) - else: # is sde - udt = bo + 'u2' - padding = 4-byte_count - if bo == '<': # little endian - all_dt = [('mdtype', udt), - ('byte_count', udt), - ('val', base_dt)] - else: # big endian - all_dt = [('byte_count', udt), - ('mdtype', udt), - ('val', base_dt)] - if padding: - all_dt.append(('padding', 'u1', padding)) - tag = np.zeros((1,), dtype=all_dt) - tag['mdtype'] = mdtype - tag['byte_count'] = byte_count - tag['val'] = val - return tag - - -def _write_stream(stream, *strings): - stream.truncate(0) - stream.seek(0) - for s in strings: - stream.write(s) - stream.seek(0) - - -def _make_readerlike(stream, byte_order=boc.native_code): - class R(object): - pass - r = R() - r.mat_stream = stream - r.byte_order = byte_order - r.struct_as_record = True - r.uint16_codec = sys.getdefaultencoding() - r.chars_as_strings = False - r.mat_dtype = False - r.squeeze_me = False - return r - - -def test_read_tag(): - # mainly to test errors - # make reader-like thing - str_io = BytesIO() - r = _make_readerlike(str_io) - c_reader = m5u.VarReader5(r) - # This works for StringIO but _not_ cStringIO - yield assert_raises, IOError, c_reader.read_tag - # bad SDE - tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) - tag['byte_count'] = 5 - _write_stream(str_io, tag.tostring()) - yield assert_raises, ValueError, c_reader.read_tag - - -def test_read_stream(): - tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) - tag_str = tag.tostring() - str_io = cStringIO(tag_str) - st = streams.make_stream(str_io) - s = streams._read_into(st, tag.itemsize) - yield assert_equal, s, tag.tostring() - - -def test_read_numeric(): - # make reader-like thing - str_io = cStringIO() - r = _make_readerlike(str_io) - # check simplest of tags - for base_dt, val, mdtype in ( - ('u2', 30, mio5p.miUINT16), - ('i4', 1, mio5p.miINT32), - ('i2', -1, mio5p.miINT16)): - for byte_code in ('<', '>'): - r.byte_order = byte_code - c_reader = m5u.VarReader5(r) - yield assert_equal, c_reader.little_endian, byte_code == '<' - yield assert_equal, c_reader.is_swapped, byte_code != boc.native_code - for sde_f in (False, True): - dt = np.dtype(base_dt).newbyteorder(byte_code) - a = _make_tag(dt, val, mdtype, sde_f) - a_str = a.tostring() - _write_stream(str_io, a_str) - el = c_reader.read_numeric() - yield assert_equal, el, val - # two sequential reads - _write_stream(str_io, a_str, a_str) - el = c_reader.read_numeric() - yield assert_equal, el, val - el = c_reader.read_numeric() - yield assert_equal, el, val - - -def test_read_numeric_writeable(): - # make reader-like thing - str_io = cStringIO() - r = _make_readerlike(str_io, '<') - c_reader = m5u.VarReader5(r) - dt = np.dtype('= 3: - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - -from numpy.testing import \ - assert_array_equal, \ - assert_array_almost_equal, \ - assert_equal, \ - assert_raises, run_module_suite - -from nose.tools import assert_true - -import numpy as np -from numpy.compat import asbytes, asstr - -from scipy.io.matlab.mio5 import MatlabObject, MatFile5Writer, \ - MatFile5Reader, MatlabFunction - -test_data_path = pjoin(dirname(__file__), 'data') - -def read_minimat_vars(rdr): - rdr.initialize_read() - mdict = {'__globals__': []} - i = 0 - while not rdr.end_of_stream(): - hdr, next_position = rdr.read_var_header() - name = asstr(hdr.name) - if name == '': - name = 'var_%d' % i - i += 1 - res = rdr.read_var_array(hdr, process=False) - rdr.mat_stream.seek(next_position) - mdict[name] = res - if hdr.is_global: - mdict['__globals__'].append(name) - return mdict - -def read_workspace_vars(fname): - rdr = MatFile5Reader(open(fname, 'rb'), - struct_as_record=True) - vars = rdr.get_variables() - fws = vars['__function_workspace__'] - ws_bs = BytesIO(fws.tostring()) - ws_bs.seek(2) - rdr.mat_stream = ws_bs - # Guess byte order. - mi = rdr.mat_stream.read(2) - rdr.byte_order = mi == asbytes('IM') and '<' or '>' - rdr.mat_stream.read(4) # presumably byte padding - return read_minimat_vars(rdr) - - -def test_jottings(): - # example - fname = pjoin(test_data_path, 'parabola.mat') - ws_vars = read_workspace_vars(fname) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/io/matlab/tests/test_mio_utils.py b/scipy-0.10.1/scipy/io/matlab/tests/test_mio_utils.py deleted file mode 100644 index 96dc0cb492..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/test_mio_utils.py +++ /dev/null @@ -1,55 +0,0 @@ -""" Testing - -""" - -import numpy as np - -from numpy.testing import assert_array_equal, assert_array_almost_equal, \ - run_module_suite, assert_ - -from scipy.io.matlab.mio_utils import cproduct, squeeze_element, \ - chars_to_strings - - -def test_cproduct(): - assert_(cproduct(()) == 1) - assert_(cproduct((1,)) == 1) - assert_(cproduct((1,3)) == 3) - assert_(cproduct([1,3]) == 3) - - -def test_squeeze_element(): - a = np.zeros((1,3)) - assert_array_equal(np.squeeze(a), squeeze_element(a)) - # 0d output from squeeze gives scalar - sq_int = squeeze_element(np.zeros((1,1), dtype=np.float)) - assert_(isinstance(sq_int, float)) - # Unless it's a structured array - sq_sa = squeeze_element(np.zeros((1,1),dtype=[('f1', 'f')])) - assert_(isinstance(sq_sa, np.ndarray)) - - -def test_chars_strings(): - # chars as strings - strings = ['learn ', 'python', 'fast ', 'here '] - str_arr = np.array(strings, dtype='U6') # shape (4,) - chars = [list(s) for s in strings] - char_arr = np.array(chars, dtype='U1') # shape (4,6) - assert_array_equal(chars_to_strings(char_arr), str_arr) - ca2d = char_arr.reshape((2,2,6)) - sa2d = str_arr.reshape((2,2)) - assert_array_equal(chars_to_strings(ca2d), sa2d) - ca3d = char_arr.reshape((1,2,2,6)) - sa3d = str_arr.reshape((1,2,2)) - assert_array_equal(chars_to_strings(ca3d), sa3d) - # Fortran ordered arrays - char_arrf = np.array(chars, dtype='U1', order='F') # shape (4,6) - assert_array_equal(chars_to_strings(char_arrf), str_arr) - # empty array - arr = np.array([['']], dtype='U1') - out_arr = np.array([''], dtype='U1') - assert_array_equal(chars_to_strings(arr), out_arr) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/io/matlab/tests/test_pathological.py b/scipy-0.10.1/scipy/io/matlab/tests/test_pathological.py deleted file mode 100644 index 1ab7855a2e..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/test_pathological.py +++ /dev/null @@ -1,34 +0,0 @@ -""" Test reading of files not conforming to matlab specification - -We try and read any file that matlab reads, these files included -""" -from os.path import dirname, join as pjoin -import sys - -if sys.version_info[0] >= 3: - from io import BytesIO - cStringIO = BytesIO -else: - from cStringIO import StringIO as cStringIO - from StringIO import StringIO as BytesIO - -import numpy as np - -from nose.tools import assert_true, assert_false, \ - assert_equal, assert_raises - -from numpy.testing import assert_array_equal, assert_array_almost_equal, \ - run_module_suite - -from scipy.io.matlab.mio import loadmat - -TEST_DATA_PATH = pjoin(dirname(__file__), 'data') - -def test_multiple_fieldnames(): - # Example provided by Dharhas Pothina - # Extracted using mio5.varmats_from_mat - multi_fname = pjoin(TEST_DATA_PATH, 'nasty_duplicate_fieldnames.mat') - vars = loadmat(multi_fname) - funny_names = vars['Summary'].dtype.names - assert_true(set(['_1_Station_Q', '_2_Station_Q', - '_3_Station_Q']).issubset(funny_names)) diff --git a/scipy-0.10.1/scipy/io/matlab/tests/test_streams.py b/scipy-0.10.1/scipy/io/matlab/tests/test_streams.py deleted file mode 100644 index 57a0f6629c..0000000000 --- a/scipy-0.10.1/scipy/io/matlab/tests/test_streams.py +++ /dev/null @@ -1,104 +0,0 @@ -""" Testing - -""" - -import os - -import sys - -if sys.version_info[0] >= 3: - from io import BytesIO - cStringIO = BytesIO -else: - from cStringIO import StringIO as cStringIO - from StringIO import StringIO as BytesIO - -from tempfile import mkstemp - -import numpy as np -from numpy.compat import asbytes - -from nose.tools import assert_true, assert_false, \ - assert_equal, assert_raises - -from numpy.testing import assert_array_equal, assert_array_almost_equal, \ - run_module_suite - -from scipy.io.matlab.streams import make_stream, \ - GenericStream, cStringStream, FileStream, \ - _read_into, _read_string - - -def setup(): - val = asbytes('a\x00string') - global fs, gs, cs, fname - fd, fname = mkstemp() - fs = os.fdopen(fd, 'wb') - fs.write(val) - fs.close() - fs = open(fname, 'rb') - gs = BytesIO(val) - cs = cStringIO(val) - - -def teardown(): - global fname, fs - fs.close() - del fs - os.unlink(fname) - - -def test_make_stream(): - global fs, gs, cs - # test stream initialization - assert_true(isinstance(make_stream(gs), GenericStream)) - if sys.version_info[0] < 3: - assert_true(isinstance(make_stream(cs), cStringStream)) - assert_true(isinstance(make_stream(fs), FileStream)) - - -def test_tell_seek(): - global fs, gs, cs - for s in (fs, gs, cs): - st = make_stream(s) - res = st.seek(0) - yield assert_equal, res, 0 - yield assert_equal, st.tell(), 0 - res = st.seek(5) - yield assert_equal, res, 0 - yield assert_equal, st.tell(), 5 - res = st.seek(2, 1) - yield assert_equal, res, 0 - yield assert_equal, st.tell(), 7 - res = st.seek(-2, 2) - yield assert_equal, res, 0 - yield assert_equal, st.tell(), 6 - - -def test_read(): - global fs, gs, cs - for s in (fs, gs, cs): - st = make_stream(s) - st.seek(0) - res = st.read(-1) - yield assert_equal, res, asbytes('a\x00string') - st.seek(0) - res = st.read(4) - yield assert_equal, res, asbytes('a\x00st') - # read into - st.seek(0) - res = _read_into(st, 4) - yield assert_equal, res, asbytes('a\x00st') - res = _read_into(st, 4) - yield assert_equal, res, asbytes('ring') - yield assert_raises, IOError, _read_into, st, 2 - # read alloc - st.seek(0) - res = _read_string(st, 4) - yield assert_equal, res, asbytes('a\x00st') - res = _read_string(st, 4) - yield assert_equal, res, asbytes('ring') - yield assert_raises, IOError, _read_string, st, 2 - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/io/mmio.py b/scipy-0.10.1/scipy/io/mmio.py deleted file mode 100644 index dcd940371d..0000000000 --- a/scipy-0.10.1/scipy/io/mmio.py +++ /dev/null @@ -1,617 +0,0 @@ -""" - Matrix Market I/O in Python. -""" -# -# Author: Pearu Peterson -# Created: October, 2004 -# -# References: -# http://math.nist.gov/MatrixMarket/ -# - -import os -from numpy import asarray, real, imag, conj, zeros, ndarray, concatenate, \ - ones, ascontiguousarray, vstack, savetxt, fromfile, fromstring -from numpy.compat import asbytes, asstr - -__all__ = ['mminfo','mmread','mmwrite', 'MMFile'] - - -#------------------------------------------------------------------------------- -def mminfo(source): - """ - Queries the contents of the Matrix Market file 'filename' to - extract size and storage information. - - Parameters - ---------- - - source : file - Matrix Market filename (extension .mtx) or open file object - - Returns - ------- - - rows,cols : int - Number of matrix rows and columns - entries : int - Number of non-zero entries of a sparse matrix - or rows*cols for a dense matrix - - format : {'coordinate', 'array'} - - field : {'real', 'complex', 'pattern', 'integer'} - - symm : {'general', 'symmetric', 'skew-symmetric', 'hermitian'} - - """ - return MMFile.info(source) - -#------------------------------------------------------------------------------- -def mmread(source): - """ - Reads the contents of a Matrix Market file 'filename' into a matrix. - - Parameters - ---------- - - source : file - Matrix Market filename (extensions .mtx, .mtz.gz) - or open file object. - - Returns - ------- - a: - Sparse or full matrix - - """ - return MMFile().read(source) - -#------------------------------------------------------------------------------- -def mmwrite(target, a, comment='', field=None, precision=None): - """ - Writes the sparse or dense matrix A to a Matrix Market formatted file. - - Parameters - ---------- - - target : file - Matrix Market filename (extension .mtx) or open file object - a : array like - Sparse or full matrix - comment : str - comments to be prepended to the Matrix Market file - - field : {'real', 'complex', 'pattern', 'integer'}, optional - - precision : - Number of digits to display for real or complex values. - - """ - MMFile().write(target, a, comment, field, precision) - - -################################################################################ -class MMFile (object): - __slots__ = ( - '_rows', - '_cols', - '_entries', - '_format', - '_field', - '_symmetry') - - @property - def rows(self): return self._rows - @property - def cols(self): return self._cols - @property - def entries(self): return self._entries - @property - def format(self): return self._format - @property - def field(self): return self._field - @property - def symmetry(self): return self._symmetry - - @property - def has_symmetry(self): - return self._symmetry in (self.SYMMETRY_SYMMETRIC, - self.SYMMETRY_SKEW_SYMMETRIC, self.SYMMETRY_HERMITIAN) - - # format values - FORMAT_COORDINATE = 'coordinate' - FORMAT_ARRAY = 'array' - FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY) - - @classmethod - def _validate_format(self, format): - if format not in self.FORMAT_VALUES: - raise ValueError('unknown format type %s, must be one of %s' % - (format, self.FORMAT_VALUES)) - - # field values - FIELD_INTEGER = 'integer' - FIELD_REAL = 'real' - FIELD_COMPLEX = 'complex' - FIELD_PATTERN = 'pattern' - FIELD_VALUES = (FIELD_INTEGER, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN) - - @classmethod - def _validate_field(self, field): - if field not in self.FIELD_VALUES: - raise ValueError('unknown field type %s, must be one of %s' % - (field, self.FIELD_VALUES)) - - # symmetry values - SYMMETRY_GENERAL = 'general' - SYMMETRY_SYMMETRIC = 'symmetric' - SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric' - SYMMETRY_HERMITIAN = 'hermitian' - SYMMETRY_VALUES = ( SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC, - SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN) - - @classmethod - def _validate_symmetry(self, symmetry): - if symmetry not in self.SYMMETRY_VALUES: - raise ValueError('unknown symmetry type %s, must be one of %s' % - (symmetry, self.SYMMETRY_VALUES)) - - DTYPES_BY_FIELD = { - FIELD_INTEGER: 'i', - FIELD_REAL: 'd', - FIELD_COMPLEX: 'D', - FIELD_PATTERN: 'd'} - - #--------------------------------------------------------------------------- - @staticmethod - def reader(): pass - - #--------------------------------------------------------------------------- - @staticmethod - def writer(): pass - - #--------------------------------------------------------------------------- - @classmethod - def info(self, source): - source, close_it = self._open(source) - - try: - - # read and validate header line - line = source.readline() - mmid, matrix, format, field, symmetry = \ - [asstr(part.strip().lower()) for part in line.split()] - if not mmid.startswith('%%matrixmarket'): - raise ValueError('source is not in Matrix Market format') - if not matrix == 'matrix': - raise ValueError("Problem reading file header: " + line) - - # ??? Is this necessary? I don't see 'dense' or 'sparse' in the spec - # http://math.nist.gov/MatrixMarket/formats.html - if format == 'dense': - format = self.FORMAT_ARRAY - elif format == 'sparse': - format = self.FORMAT_COORDINATE - - # skip comments - while line.startswith(asbytes('%')): line = source.readline() - - line = line.split() - if format == self.FORMAT_ARRAY: - if not len(line) == 2: - raise ValueError("Header line not of length 2: " + line) - rows,cols = map(float, line) - entries = rows*cols - else: - if not len(line) == 3: - raise ValueError("Header line not of length 3: " + line) - rows, cols, entries = map(float, line) - - return (rows, cols, entries, format, field, symmetry) - - finally: - if close_it: source.close() - - #--------------------------------------------------------------------------- - @staticmethod - def _open(filespec, mode='rb'): - """ - Return an open file stream for reading based on source. If source is - a file name, open it (after trying to find it with mtx and gzipped mtx - extensions). Otherwise, just return source. - """ - close_it = False - if type(filespec) is type(''): - close_it = True - - # open for reading - if mode[0] == 'r': - - # determine filename plus extension - if not os.path.isfile(filespec): - if os.path.isfile(filespec+'.mtx'): - filespec = filespec + '.mtx' - elif os.path.isfile(filespec+'.mtx.gz'): - filespec = filespec + '.mtx.gz' - elif os.path.isfile(filespec+'.mtx.bz2'): - filespec = filespec + '.mtx.bz2' - # open filename - if filespec.endswith('.gz'): - import gzip - stream = gzip.open(filespec, mode) - elif filespec.endswith('.bz2'): - import bz2 - stream = bz2.BZ2File(filespec, 'rb') - else: - stream = open(filespec, mode) - - # open for writing - else: - if filespec[-4:] != '.mtx': - filespec = filespec + '.mtx' - stream = open(filespec, mode) - else: - stream = filespec - - return stream, close_it - - #--------------------------------------------------------------------------- - @staticmethod - def _get_symmetry(a): - m,n = a.shape - if m!=n: - return MMFile.SYMMETRY_GENERAL - issymm = 1 - isskew = 1 - isherm = a.dtype.char in 'FD' - for j in range(n): - for i in range(j+1,n): - aij,aji = a[i][j],a[j][i] - if issymm and aij != aji: - issymm = 0 - if isskew and aij != -aji: - isskew = 0 - if isherm and aij != conj(aji): - isherm = 0 - if not (issymm or isskew or isherm): - break - if issymm: return MMFile.SYMMETRY_SYMMETRIC - if isskew: return MMFile.SYMMETRY_SKEW_SYMMETRIC - if isherm: return MMFile.SYMMETRY_HERMITIAN - return MMFile.SYMMETRY_GENERAL - - #--------------------------------------------------------------------------- - @staticmethod - def _field_template(field, precision): - return { - MMFile.FIELD_REAL: '%%.%ie\n' % precision, - MMFile.FIELD_INTEGER: '%i\n', - MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % (precision,precision) - }.get(field, None) - - #--------------------------------------------------------------------------- - def __init__(self, **kwargs): self._init_attrs(**kwargs) - - #--------------------------------------------------------------------------- - def read(self, source): - stream, close_it = self._open(source) - - try: - self._parse_header(stream) - return self._parse_body(stream) - - finally: - if close_it: stream.close() - - #--------------------------------------------------------------------------- - def write(self, target, a, comment='', field=None, precision=None): - stream, close_it = self._open(target, 'wb') - - try: - self._write(stream, a, comment, field, precision) - - finally: - if close_it: stream.close() - else: stream.flush() - - #--------------------------------------------------------------------------- - def _init_attrs(self, **kwargs): - """ - Initialize each attributes with the corresponding keyword arg value - or a default of None - """ - attrs = self.__class__.__slots__ - public_attrs = [attr[1:] for attr in attrs] - invalid_keys = set(kwargs.keys()) - set(public_attrs) - - if invalid_keys: - raise ValueError('found %s invalid keyword arguments, please only use %s' % - (tuple(invalid_keys), public_attrs)) - - for attr in attrs: setattr(self, attr, kwargs.get(attr[1:], None)) - - #--------------------------------------------------------------------------- - def _parse_header(self, stream): - rows, cols, entries, format, field, symmetry = \ - self.__class__.info(stream) - self._init_attrs(rows=rows, cols=cols, entries=entries, format=format, - field=field, symmetry=symmetry) - - #--------------------------------------------------------------------------- - def _parse_body(self, stream): - rows, cols, entries, format, field, symm = \ - (self.rows, self.cols, self.entries, self.format, self.field, self.symmetry) - - try: - from scipy.sparse import coo_matrix - except ImportError: - coo_matrix = None - - dtype = self.DTYPES_BY_FIELD.get(field, None) - - has_symmetry = self.has_symmetry - is_complex = field == self.FIELD_COMPLEX - is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC - is_herm = symm == self.SYMMETRY_HERMITIAN - is_pattern = field == self.FIELD_PATTERN - - if format == self.FORMAT_ARRAY: - a = zeros((rows,cols),dtype=dtype) - line = 1 - i,j = 0,0 - while line: - line = stream.readline() - if not line or line.startswith(asbytes('%')): - continue - if is_complex: - aij = complex(*map(float,line.split())) - else: - aij = float(line) - a[i,j] = aij - if has_symmetry and i!=j: - if is_skew: - a[j,i] = -aij - elif is_herm: - a[j,i] = conj(aij) - else: - a[j,i] = aij - if i base 0) - J -= 1 - - if has_symmetry: - mask = (I != J) #off diagonal mask - od_I = I[mask] - od_J = J[mask] - od_V = V[mask] - - I = concatenate((I,od_J)) - J = concatenate((J,od_I)) - - if is_skew: - od_V *= -1 - elif is_herm: - od_V = od_V.conjugate() - - V = concatenate((V,od_V)) - - a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype) - else: - raise NotImplementedError(format) - - return a - - #--------------------------------------------------------------------------- - def _write(self, stream, a, comment='', field=None, precision=None): - - if isinstance(a, list) or isinstance(a, ndarray) or isinstance(a, tuple) or hasattr(a,'__array__'): - rep = self.FORMAT_ARRAY - a = asarray(a) - if len(a.shape) != 2: - raise ValueError('expected matrix') - rows,cols = a.shape - entries = rows*cols - - if field is not None: - - if field == self.FIELD_INTEGER: - a = a.astype('i') - elif field == self.FIELD_REAL: - if a.dtype.char not in 'fd': - a = a.astype('d') - elif field == self.FIELD_COMPLEX: - if a.dtype.char not in 'FD': - a = a.astype('D') - - else: - from scipy.sparse import spmatrix - if not isinstance(a,spmatrix): - raise ValueError('unknown matrix type: %s' % type(a)) - rep = 'coordinate' - rows, cols = a.shape - entries = a.getnnz() - - typecode = a.dtype.char - - if precision is None: - if typecode in 'fF': - precision = 8 - else: - precision = 16 - - if field is None: - kind = a.dtype.kind - if kind == 'i': - field = 'integer' - elif kind == 'f': - field = 'real' - elif kind == 'c': - field = 'complex' - else: - raise TypeError('unexpected dtype kind ' + kind) - - if rep == self.FORMAT_ARRAY: - symm = self._get_symmetry(a) - else: - symm = self.SYMMETRY_GENERAL - - # validate rep, field, and symmetry - self.__class__._validate_format(rep) - self.__class__._validate_field(field) - self.__class__._validate_symmetry(symm) - - # write initial header line - stream.write(asbytes('%%%%MatrixMarket matrix %s %s %s\n' % (rep,field,symm))) - - # write comments - for line in comment.split('\n'): - stream.write(asbytes('%%%s\n' % (line))) - - - template = self._field_template(field, precision) - - # write dense format - if rep == self.FORMAT_ARRAY: - - # write shape spec - stream.write(asbytes('%i %i\n' % (rows,cols))) - - if field in (self.FIELD_INTEGER, self.FIELD_REAL): - - if symm == self.SYMMETRY_GENERAL: - for j in range(cols): - for i in range(rows): - stream.write(asbytes(template % a[i,j])) - else: - for j in range(cols): - for i in range(j,rows): - stream.write(asbytes(template % a[i,j])) - - elif field == self.FIELD_COMPLEX: - - if symm == self.SYMMETRY_GENERAL: - for j in range(cols): - for i in range(rows): - aij = a[i,j] - stream.write(asbytes(template % (real(aij),imag(aij)))) - else: - for j in range(cols): - for i in range(j,rows): - aij = a[i,j] - stream.write(asbytes(template % (real(aij),imag(aij)))) - - elif field == self.FIELD_PATTERN: - raise ValueError('pattern type inconsisted with dense format') - - else: - raise TypeError('Unknown field type %s' % field) - - # write sparse format - else: - - if symm != self.SYMMETRY_GENERAL: - raise NotImplementedError('symmetric matrices not yet supported') - - coo = a.tocoo() # convert to COOrdinate format - - # write shape spec - stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz))) - - fmt = '%%.%dg' % precision - - if field == self.FIELD_PATTERN: - IJV = vstack((coo.row, coo.col)).T - elif field in [ self.FIELD_INTEGER, self.FIELD_REAL ]: - IJV = vstack((coo.row, coo.col, coo.data)).T - elif field == self.FIELD_COMPLEX: - IJV = vstack((coo.row, coo.col, coo.data.real, coo.data.imag)).T - else: - raise TypeError('Unknown field type %s' % field) - - IJV[:,:2] += 1 # change base 0 -> base 1 - - savetxt(stream, IJV, fmt=fmt) - -#------------------------------------------------------------------------------- -if __name__ == '__main__': - import sys - import time - for filename in sys.argv[1:]: - print 'Reading',filename,'...', - sys.stdout.flush() - t = time.time() - mmread(filename) - print 'took %s seconds' % (time.time() - t) diff --git a/scipy-0.10.1/scipy/io/netcdf.py b/scipy-0.10.1/scipy/io/netcdf.py deleted file mode 100644 index 9bcb530486..0000000000 --- a/scipy-0.10.1/scipy/io/netcdf.py +++ /dev/null @@ -1,859 +0,0 @@ -""" -NetCDF reader/writer module. - -This module is used to read and create NetCDF files. NetCDF files are -accessed through the `netcdf_file` object. Data written to and from NetCDF -files are contained in `netcdf_variable` objects. Attributes are given -as member variables of the `netcdf_file` and `netcdf_variable` objects. - -Notes ------ -NetCDF files are a self-describing binary data format. The file contains -metadata that describes the dimensions and variables in the file. More -details about NetCDF files can be found `here -`_. There -are three main sections to a NetCDF data structure: - -1. Dimensions -2. Variables -3. Attributes - -The dimensions section records the name and length of each dimension used -by the variables. The variables would then indicate which dimensions it -uses and any attributes such as data units, along with containing the data -values for the variable. It is good practice to include a -variable that is the same name as a dimension to provide the values for -that axes. Lastly, the attributes section would contain additional -information such as the name of the file creator or the instrument used to -collect the data. - -When writing data to a NetCDF file, there is often the need to indicate the -'record dimension'. A record dimension is the unbounded dimension for a -variable. For example, a temperature variable may have dimensions of -latitude, longitude and time. If one wants to add more temperature data to -the NetCDF file as time progresses, then the temperature variable should -have the time dimension flagged as the record dimension. - -This module implements the Scientific.IO.NetCDF API to read and create -NetCDF files. The same API is also used in the PyNIO and pynetcdf -modules, allowing these modules to be used interchangeably when working -with NetCDF files. The major advantage of this module over other -modules is that it doesn't require the code to be linked to the NetCDF -libraries. - -In addition, the NetCDF file header contains the position of the data in -the file, so access can be done in an efficient manner without loading -unnecessary data into memory. It uses the ``mmap`` module to create -Numpy arrays mapped to the data on disk, for the same purpose. - -Examples --------- -To create a NetCDF file: - - >>> from scipy.io import netcdf - >>> f = netcdf.netcdf_file('simple.nc', 'w') - >>> f.history = 'Created for a test' - >>> f.createDimension('time', 10) - >>> time = f.createVariable('time', 'i', ('time',)) - >>> time[:] = range(10) - >>> time.units = 'days since 2008-01-01' - >>> f.close() - -Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice -of the time variable allows for the data to be set in the object, rather -than letting ``range(10)`` overwrite the ``time`` variable. - -To read the NetCDF file we just created: - - >>> from scipy.io import netcdf - >>> f = netcdf.netcdf_file('simple.nc', 'r') - >>> print f.history - Created for a test - >>> time = f.variables['time'] - >>> print time.units - days since 2008-01-01 - >>> print time.shape - (10,) - >>> print time[-1] - 9 - >>> f.close() - -""" - -#TODO: -# * properly implement ``_FillValue``. -# * implement Jeff Whitaker's patch for masked variables. -# * fix character variables. -# * implement PAGESIZE for Python 2.6? - -#The Scientific.IO.NetCDF API allows attributes to be added directly to -#instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate -#between user-set attributes and instance attributes, user-set attributes -#are automatically stored in the ``_attributes`` attribute by overloading -#``__setattr__``. This is the reason why the code sometimes uses -#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``; -#otherwise the key would be inserted into userspace attributes. - - -__all__ = ['netcdf_file'] - - -from operator import mul -from mmap import mmap, ACCESS_READ - -import numpy as np -from numpy.compat import asbytes, asstr -from numpy import fromstring, ndarray, dtype, empty, array, asarray -from numpy import little_endian as LITTLE_ENDIAN - - -ABSENT = asbytes('\x00\x00\x00\x00\x00\x00\x00\x00') -ZERO = asbytes('\x00\x00\x00\x00') -NC_BYTE = asbytes('\x00\x00\x00\x01') -NC_CHAR = asbytes('\x00\x00\x00\x02') -NC_SHORT = asbytes('\x00\x00\x00\x03') -NC_INT = asbytes('\x00\x00\x00\x04') -NC_FLOAT = asbytes('\x00\x00\x00\x05') -NC_DOUBLE = asbytes('\x00\x00\x00\x06') -NC_DIMENSION = asbytes('\x00\x00\x00\n') -NC_VARIABLE = asbytes('\x00\x00\x00\x0b') -NC_ATTRIBUTE = asbytes('\x00\x00\x00\x0c') - - -TYPEMAP = { NC_BYTE: ('b', 1), - NC_CHAR: ('c', 1), - NC_SHORT: ('h', 2), - NC_INT: ('i', 4), - NC_FLOAT: ('f', 4), - NC_DOUBLE: ('d', 8) } - -REVERSE = { ('b', 1): NC_BYTE, - ('B', 1): NC_CHAR, - ('c', 1): NC_CHAR, - ('h', 2): NC_SHORT, - ('i', 4): NC_INT, - ('f', 4): NC_FLOAT, - ('d', 8): NC_DOUBLE, - - # these come from asarray(1).dtype.char and asarray('foo').dtype.char, - # used when getting the types from generic attributes. - ('l', 4): NC_INT, - ('S', 1): NC_CHAR } - - -class netcdf_file(object): - """ - A file object for NetCDF data. - - A `netcdf_file` object has two standard attributes: `dimensions` and - `variables`. The values of both are dictionaries, mapping dimension - names to their associated lengths and variable names to variables, - respectively. Application programs should never modify these - dictionaries. - - All other attributes correspond to global attributes defined in the - NetCDF file. Global file attributes are created by assigning to an - attribute of the `netcdf_file` object. - - Parameters - ---------- - filename : string or file-like - string -> filename - mode : {'r', 'w'}, optional - read-write mode, default is 'r' - mmap : None or bool, optional - Whether to mmap `filename` when reading. Default is True - when `filename` is a file name, False when `filename` is a - file-like object - version : {1, 2}, optional - version of netcdf to read / write, where 1 means *Classic - format* and 2 means *64-bit offset format*. Default is 1. See - `here `_ - for more info. - - """ - def __init__(self, filename, mode='r', mmap=None, version=1): - """Initialize netcdf_file from fileobj (str or file-like).""" - if hasattr(filename, 'seek'): # file-like - self.fp = filename - self.filename = 'None' - if mmap is None: - mmap = False - elif mmap and not hasattr(filename, 'fileno'): - raise ValueError('Cannot use file object for mmap') - else: # maybe it's a string - self.filename = filename - self.fp = open(self.filename, '%sb' % mode) - if mmap is None: - mmap = True - self.use_mmap = mmap - self.version_byte = version - - if not mode in 'rw': - raise ValueError("Mode must be either 'r' or 'w'.") - self.mode = mode - - self.dimensions = {} - self.variables = {} - - self._dims = [] - self._recs = 0 - self._recsize = 0 - - self._attributes = {} - - if mode == 'r': - self._read() - - def __setattr__(self, attr, value): - # Store user defined attributes in a separate dict, - # so we can save them to file later. - try: - self._attributes[attr] = value - except AttributeError: - pass - self.__dict__[attr] = value - - def close(self): - """Closes the NetCDF file.""" - if not self.fp.closed: - try: - self.flush() - finally: - self.fp.close() - __del__ = close - - def createDimension(self, name, length): - """ - Adds a dimension to the Dimension section of the NetCDF data structure. - - Note that this function merely adds a new dimension that the variables can - reference. The values for the dimension, if desired, should be added as - a variable using `createVariable`, referring to this dimension. - - Parameters - ---------- - name : str - Name of the dimension (Eg, 'lat' or 'time'). - length : int - Length of the dimension. - - See Also - -------- - createVariable - - """ - self.dimensions[name] = length - self._dims.append(name) - - def createVariable(self, name, type, dimensions): - """ - Create an empty variable for the `netcdf_file` object, specifying its data - type and the dimensions it uses. - - Parameters - ---------- - name : str - Name of the new variable. - type : dtype or str - Data type of the variable. - dimensions : sequence of str - List of the dimension names used by the variable, in the desired order. - - Returns - ------- - variable : netcdf_variable - The newly created ``netcdf_variable`` object. - This object has also been added to the `netcdf_file` object as well. - - See Also - -------- - createDimension - - Notes - ----- - Any dimensions to be used by the variable should already exist in the - NetCDF data structure or should be created by `createDimension` prior to - creating the NetCDF variable. - - """ - shape = tuple([self.dimensions[dim] for dim in dimensions]) - shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy - - if isinstance(type, basestring): type = dtype(type) - typecode, size = type.char, type.itemsize - if (typecode, size) not in REVERSE: - raise ValueError("NetCDF 3 does not support type %s" % type) - dtype_ = '>%s' % typecode - if size > 1: dtype_ += str(size) - - data = empty(shape_, dtype=dtype_) - self.variables[name] = netcdf_variable(data, typecode, size, shape, dimensions) - return self.variables[name] - - def flush(self): - """ - Perform a sync-to-disk flush if the `netcdf_file` object is in write mode. - - See Also - -------- - sync : Identical function - - """ - if hasattr(self, 'mode') and self.mode is 'w': - self._write() - sync = flush - - def _write(self): - self.fp.write(asbytes('CDF')) - self.fp.write(array(self.version_byte, '>b').tostring()) - - # Write headers and data. - self._write_numrecs() - self._write_dim_array() - self._write_gatt_array() - self._write_var_array() - - def _write_numrecs(self): - # Get highest record count from all record variables. - for var in self.variables.values(): - if var.isrec and len(var.data) > self._recs: - self.__dict__['_recs'] = len(var.data) - self._pack_int(self._recs) - - def _write_dim_array(self): - if self.dimensions: - self.fp.write(NC_DIMENSION) - self._pack_int(len(self.dimensions)) - for name in self._dims: - self._pack_string(name) - length = self.dimensions[name] - self._pack_int(length or 0) # replace None with 0 for record dimension - else: - self.fp.write(ABSENT) - - def _write_gatt_array(self): - self._write_att_array(self._attributes) - - def _write_att_array(self, attributes): - if attributes: - self.fp.write(NC_ATTRIBUTE) - self._pack_int(len(attributes)) - for name, values in attributes.items(): - self._pack_string(name) - self._write_values(values) - else: - self.fp.write(ABSENT) - - def _write_var_array(self): - if self.variables: - self.fp.write(NC_VARIABLE) - self._pack_int(len(self.variables)) - - # Sort variables non-recs first, then recs. We use a DSU - # since some people use pupynere with Python 2.3.x. - deco = [ (v._shape and not v.isrec, k) for (k, v) in self.variables.items() ] - deco.sort() - variables = [ k for (unused, k) in deco ][::-1] - - # Set the metadata for all variables. - for name in variables: - self._write_var_metadata(name) - # Now that we have the metadata, we know the vsize of - # each record variable, so we can calculate recsize. - self.__dict__['_recsize'] = sum([ - var._vsize for var in self.variables.values() - if var.isrec]) - # Set the data for all variables. - for name in variables: - self._write_var_data(name) - else: - self.fp.write(ABSENT) - - def _write_var_metadata(self, name): - var = self.variables[name] - - self._pack_string(name) - self._pack_int(len(var.dimensions)) - for dimname in var.dimensions: - dimid = self._dims.index(dimname) - self._pack_int(dimid) - - self._write_att_array(var._attributes) - - nc_type = REVERSE[var.typecode(), var.itemsize()] - self.fp.write(asbytes(nc_type)) - - if not var.isrec: - vsize = var.data.size * var.data.itemsize - vsize += -vsize % 4 - else: # record variable - try: - vsize = var.data[0].size * var.data.itemsize - except IndexError: - vsize = 0 - rec_vars = len([var for var in self.variables.values() - if var.isrec]) - if rec_vars > 1: - vsize += -vsize % 4 - self.variables[name].__dict__['_vsize'] = vsize - self._pack_int(vsize) - - # Pack a bogus begin, and set the real value later. - self.variables[name].__dict__['_begin'] = self.fp.tell() - self._pack_begin(0) - - def _write_var_data(self, name): - var = self.variables[name] - - # Set begin in file header. - the_beguine = self.fp.tell() - self.fp.seek(var._begin) - self._pack_begin(the_beguine) - self.fp.seek(the_beguine) - - # Write data. - if not var.isrec: - self.fp.write(var.data.tostring()) - count = var.data.size * var.data.itemsize - self.fp.write(asbytes('0') * (var._vsize - count)) - else: # record variable - # Handle rec vars with shape[0] < nrecs. - if self._recs > len(var.data): - shape = (self._recs,) + var.data.shape[1:] - var.data.resize(shape) - - pos0 = pos = self.fp.tell() - for rec in var.data: - # Apparently scalars cannot be converted to big endian. If we - # try to convert a ``=i4`` scalar to, say, '>i4' the dtype - # will remain as ``=i4``. - if not rec.shape and (rec.dtype.byteorder == '<' or - (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)): - rec = rec.byteswap() - self.fp.write(rec.tostring()) - # Padding - count = rec.size * rec.itemsize - self.fp.write(asbytes('0') * (var._vsize - count)) - pos += self._recsize - self.fp.seek(pos) - self.fp.seek(pos0 + var._vsize) - - def _write_values(self, values): - if hasattr(values, 'dtype'): - nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] - else: - types = [ - (int, NC_INT), - (long, NC_INT), - (float, NC_FLOAT), - (basestring, NC_CHAR), - ] - try: - sample = values[0] - except TypeError: - sample = values - for class_, nc_type in types: - if isinstance(sample, class_): break - - typecode, size = TYPEMAP[nc_type] - dtype_ = '>%s' % typecode - - values = asarray(values, dtype=dtype_) - - self.fp.write(asbytes(nc_type)) - - if values.dtype.char == 'S': - nelems = values.itemsize - else: - nelems = values.size - self._pack_int(nelems) - - if not values.shape and (values.dtype.byteorder == '<' or - (values.dtype.byteorder == '=' and LITTLE_ENDIAN)): - values = values.byteswap() - self.fp.write(values.tostring()) - count = values.size * values.itemsize - self.fp.write(asbytes('0') * (-count % 4)) # pad - - def _read(self): - # Check magic bytes and version - magic = self.fp.read(3) - if not magic == asbytes('CDF'): - raise TypeError("Error: %s is not a valid NetCDF 3 file" % - self.filename) - self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0] - - # Read file headers and set data. - self._read_numrecs() - self._read_dim_array() - self._read_gatt_array() - self._read_var_array() - - def _read_numrecs(self): - self.__dict__['_recs'] = self._unpack_int() - - def _read_dim_array(self): - header = self.fp.read(4) - if not header in [ZERO, NC_DIMENSION]: - raise ValueError("Unexpected header.") - count = self._unpack_int() - - for dim in range(count): - name = asstr(self._unpack_string()) - length = self._unpack_int() or None # None for record dimension - self.dimensions[name] = length - self._dims.append(name) # preserve order - - def _read_gatt_array(self): - for k, v in self._read_att_array().items(): - self.__setattr__(k, v) - - def _read_att_array(self): - header = self.fp.read(4) - if not header in [ZERO, NC_ATTRIBUTE]: - raise ValueError("Unexpected header.") - count = self._unpack_int() - - attributes = {} - for attr in range(count): - name = asstr(self._unpack_string()) - attributes[name] = self._read_values() - return attributes - - def _read_var_array(self): - header = self.fp.read(4) - if not header in [ZERO, NC_VARIABLE]: - raise ValueError("Unexpected header.") - - begin = 0 - dtypes = {'names': [], 'formats': []} - rec_vars = [] - count = self._unpack_int() - for var in range(count): - (name, dimensions, shape, attributes, - typecode, size, dtype_, begin_, vsize) = self._read_var() - # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html - # Note that vsize is the product of the dimension lengths - # (omitting the record dimension) and the number of bytes - # per value (determined from the type), increased to the - # next multiple of 4, for each variable. If a record - # variable, this is the amount of space per record. The - # netCDF "record size" is calculated as the sum of the - # vsize's of all the record variables. - # - # The vsize field is actually redundant, because its value - # may be computed from other information in the header. The - # 32-bit vsize field is not large enough to contain the size - # of variables that require more than 2^32 - 4 bytes, so - # 2^32 - 1 is used in the vsize field for such variables. - if shape and shape[0] is None: # record variable - rec_vars.append(name) - # The netCDF "record size" is calculated as the sum of - # the vsize's of all the record variables. - self.__dict__['_recsize'] += vsize - if begin == 0: begin = begin_ - dtypes['names'].append(name) - dtypes['formats'].append(str(shape[1:]) + dtype_) - - # Handle padding with a virtual variable. - if typecode in 'bch': - actual_size = reduce(mul, (1,) + shape[1:]) * size - padding = -actual_size % 4 - if padding: - dtypes['names'].append('_padding_%d' % var) - dtypes['formats'].append('(%d,)>b' % padding) - - # Data will be set later. - data = None - else: # not a record variable - # Calculate size to avoid problems with vsize (above) - a_size = reduce(mul, shape, 1) * size - if self.use_mmap: - mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ) - data = ndarray.__new__(ndarray, shape, dtype=dtype_, - buffer=mm, offset=begin_, order=0) - else: - pos = self.fp.tell() - self.fp.seek(begin_) - data = fromstring(self.fp.read(a_size), dtype=dtype_) - data.shape = shape - self.fp.seek(pos) - - # Add variable. - self.variables[name] = netcdf_variable( - data, typecode, size, shape, dimensions, attributes) - - if rec_vars: - # Remove padding when only one record variable. - if len(rec_vars) == 1: - dtypes['names'] = dtypes['names'][:1] - dtypes['formats'] = dtypes['formats'][:1] - - # Build rec array. - if self.use_mmap: - mm = mmap(self.fp.fileno(), begin+self._recs*self._recsize, access=ACCESS_READ) - rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes, - buffer=mm, offset=begin, order=0) - else: - pos = self.fp.tell() - self.fp.seek(begin) - rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes) - rec_array.shape = (self._recs,) - self.fp.seek(pos) - - for var in rec_vars: - self.variables[var].__dict__['data'] = rec_array[var] - - def _read_var(self): - name = asstr(self._unpack_string()) - dimensions = [] - shape = [] - dims = self._unpack_int() - - for i in range(dims): - dimid = self._unpack_int() - dimname = self._dims[dimid] - dimensions.append(dimname) - dim = self.dimensions[dimname] - shape.append(dim) - dimensions = tuple(dimensions) - shape = tuple(shape) - - attributes = self._read_att_array() - nc_type = self.fp.read(4) - vsize = self._unpack_int() - begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() - - typecode, size = TYPEMAP[nc_type] - dtype_ = '>%s' % typecode - - return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize - - def _read_values(self): - nc_type = self.fp.read(4) - n = self._unpack_int() - - typecode, size = TYPEMAP[nc_type] - - count = n*size - values = self.fp.read(int(count)) - self.fp.read(-count % 4) # read padding - - if typecode is not 'c': - values = fromstring(values, dtype='>%s' % typecode) - if values.shape == (1,): values = values[0] - else: - values = values.rstrip(asbytes('\x00')) - return values - - def _pack_begin(self, begin): - if self.version_byte == 1: - self._pack_int(begin) - elif self.version_byte == 2: - self._pack_int64(begin) - - def _pack_int(self, value): - self.fp.write(array(value, '>i').tostring()) - _pack_int32 = _pack_int - - def _unpack_int(self): - return int(fromstring(self.fp.read(4), '>i')[0]) - _unpack_int32 = _unpack_int - - def _pack_int64(self, value): - self.fp.write(array(value, '>q').tostring()) - - def _unpack_int64(self): - return fromstring(self.fp.read(8), '>q')[0] - - def _pack_string(self, s): - count = len(s) - self._pack_int(count) - self.fp.write(asbytes(s)) - self.fp.write(asbytes('0') * (-count % 4)) # pad - - def _unpack_string(self): - count = self._unpack_int() - s = self.fp.read(count).rstrip(asbytes('\x00')) - self.fp.read(-count % 4) # read padding - return s - - -class netcdf_variable(object): - """ - A data object for the `netcdf` module. - - `netcdf_variable` objects are constructed by calling the method - `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` - objects behave much like array objects defined in numpy, except that their - data resides in a file. Data is read by indexing and written by assigning - to an indexed subset; the entire array can be accessed by the index ``[:]`` - or (for scalars) by using the methods `getValue` and `assignValue`. - `netcdf_variable` objects also have attribute `shape` with the same meaning - as for arrays, but the shape cannot be modified. There is another read-only - attribute `dimensions`, whose value is the tuple of dimension names. - - All other attributes correspond to variable attributes defined in - the NetCDF file. Variable attributes are created by assigning to an - attribute of the `netcdf_variable` object. - - Parameters - ---------- - data : array_like - The data array that holds the values for the variable. - Typically, this is initialized as empty, but with the proper shape. - typecode : dtype character code - Desired data-type for the data array. - size : int - Desired element size for the data array. - shape : sequence of ints - The shape of the array. This should match the lengths of the - variable's dimensions. - dimensions : sequence of strings - The names of the dimensions used by the variable. Must be in the - same order of the dimension lengths given by `shape`. - attributes : dict, optional - Attribute values (any type) keyed by string names. These attributes - become attributes for the netcdf_variable object. - - - Attributes - ---------- - dimensions : list of str - List of names of dimensions used by the variable object. - isrec, shape - Properties - - See also - -------- - isrec, shape - - """ - def __init__(self, data, typecode, size, shape, dimensions, attributes=None): - self.data = data - self._typecode = typecode - self._size = size - self._shape = shape - self.dimensions = dimensions - - self._attributes = attributes or {} - for k, v in self._attributes.items(): - self.__dict__[k] = v - - def __setattr__(self, attr, value): - # Store user defined attributes in a separate dict, - # so we can save them to file later. - try: - self._attributes[attr] = value - except AttributeError: - pass - self.__dict__[attr] = value - - def isrec(self): - """Returns whether the variable has a record dimension or not. - - A record dimension is a dimension along which additional data could be - easily appended in the netcdf data structure without much rewriting of - the data file. This attribute is a read-only property of the - `netcdf_variable`. - - """ - return self.data.shape and not self._shape[0] - isrec = property(isrec) - - def shape(self): - """Returns the shape tuple of the data variable. - - This is a read-only attribute and can not be modified in the - same manner of other numpy arrays. - """ - return self.data.shape - shape = property(shape) - - def getValue(self): - """ - Retrieve a scalar value from a `netcdf_variable` of length one. - - Raises - ------ - ValueError - If the netcdf variable is an array of length greater than one, - this exception will be raised. - - """ - return self.data.item() - - def assignValue(self, value): - """ - Assign a scalar value to a `netcdf_variable` of length one. - - Parameters - ---------- - value : scalar - Scalar value (of compatible type) to assign to a length-one netcdf - variable. This value will be written to file. - - Raises - ------ - ValueError - If the input is not a scalar, or if the destination is not a length-one - netcdf variable. - - """ - if not self.data.flags.writeable: - # Work-around for a bug in NumPy. Calling itemset() on a read-only - # memory-mapped array causes a seg. fault. - # See NumPy ticket #1622, and SciPy ticket #1202. - # This check for `writeable` can be removed when the oldest version - # of numpy still supported by scipy contains the fix for #1622. - raise RuntimeError("variable is not writeable") - - self.data.itemset(value) - - def typecode(self): - """ - Return the typecode of the variable. - - Returns - ------- - typecode : char - The character typecode of the variable (eg, 'i' for int). - - """ - return self._typecode - - def itemsize(self): - """ - Return the itemsize of the variable. - - Returns - ------- - itemsize : int - The element size of the variable (eg, 8 for float64). - - """ - return self._size - - def __getitem__(self, index): - return self.data[index] - - def __setitem__(self, index, data): - # Expand data for record vars? - if self.isrec: - if isinstance(index, tuple): - rec_index = index[0] - else: - rec_index = index - if isinstance(rec_index, slice): - recs = (rec_index.start or 0) + len(data) - else: - recs = rec_index + 1 - if recs > len(self.data): - shape = (recs,) + self._shape[1:] - self.data.resize(shape) - self.data[index] = data - - -NetCDFFile = netcdf_file -NetCDFVariable = netcdf_variable diff --git a/scipy-0.10.1/scipy/io/setup.py b/scipy-0.10.1/scipy/io/setup.py deleted file mode 100755 index 90d2e86a9e..0000000000 --- a/scipy-0.10.1/scipy/io/setup.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('io', parent_package, top_path) - - config.add_data_dir('tests') - config.add_data_dir('docs') - config.add_subpackage('matlab') - config.add_subpackage('arff') - config.add_subpackage('harwell_boeing') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/io/setupscons.py b/scipy-0.10.1/scipy/io/setupscons.py deleted file mode 100755 index 95fa398c70..0000000000 --- a/scipy-0.10.1/scipy/io/setupscons.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('io', parent_package, top_path, - setup_name = 'setupscons.py') - - config.add_sconscript('SConstruct') - - config.add_data_dir('tests') - config.add_data_dir('docs') - config.add_subpackage('matlab') - config.add_subpackage('arff') - config.add_subpackage('harwell_boeing') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_1d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_1d.sav deleted file mode 100644 index 619a125967..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_1d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_2d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_2d.sav deleted file mode 100644 index 804d8b1a8a..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_2d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_3d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_3d.sav deleted file mode 100644 index 3fa56c450e..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_3d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_4d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_4d.sav deleted file mode 100644 index 4bb951e274..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_4d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_5d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_5d.sav deleted file mode 100644 index 2854dbc8b1..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_5d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_6d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_6d.sav deleted file mode 100644 index 91588d348d..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_6d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_7d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_7d.sav deleted file mode 100644 index 3e978fad54..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_7d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_8d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_8d.sav deleted file mode 100644 index f699fe2427..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_8d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_1d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_1d.sav deleted file mode 100644 index 8e3a402c60..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_1d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_2d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_2d.sav deleted file mode 100644 index dd3504f0ec..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_2d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_3d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_3d.sav deleted file mode 100644 index 285da7f78f..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_3d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_4d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_4d.sav deleted file mode 100644 index d99fa48f0a..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_4d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_5d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_5d.sav deleted file mode 100644 index de5e984e49..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_5d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_6d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_6d.sav deleted file mode 100644 index bb76671a65..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_6d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_7d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_7d.sav deleted file mode 100644 index 995d23c6ed..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_7d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_8d.sav b/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_8d.sav deleted file mode 100644 index 4249ec6211..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/array_float32_pointer_8d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/example_1.nc b/scipy-0.10.1/scipy/io/tests/data/example_1.nc deleted file mode 100644 index 5775622d0e..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/example_1.nc and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_byte.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_byte.sav deleted file mode 100644 index e4027b3cf3..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_byte.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_complex32.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_complex32.sav deleted file mode 100644 index 593e8c6208..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_complex32.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_complex64.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_complex64.sav deleted file mode 100644 index edb19d388a..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_complex64.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_float32.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_float32.sav deleted file mode 100644 index be9e3877ea..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_float32.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_float64.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_float64.sav deleted file mode 100644 index 9680b2878c..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_float64.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_heap_pointer.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_heap_pointer.sav deleted file mode 100644 index d02b1756ac..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_heap_pointer.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_int16.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_int16.sav deleted file mode 100644 index 603525694c..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_int16.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_int32.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_int32.sav deleted file mode 100644 index 40210b8894..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_int32.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_int64.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_int64.sav deleted file mode 100644 index c91cd0a561..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_int64.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_string.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_string.sav deleted file mode 100644 index ee6e69fe84..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_string.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_uint16.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_uint16.sav deleted file mode 100644 index 759c2e64fa..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_uint16.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_uint32.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_uint32.sav deleted file mode 100644 index 74dec7b893..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_uint32.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/scalar_uint64.sav b/scipy-0.10.1/scipy/io/tests/data/scalar_uint64.sav deleted file mode 100644 index fc9da5796e..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/scalar_uint64.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_arrays.sav b/scipy-0.10.1/scipy/io/tests/data/struct_arrays.sav deleted file mode 100644 index 40c9cd330e..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_arrays.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_arrays_replicated.sav b/scipy-0.10.1/scipy/io/tests/data/struct_arrays_replicated.sav deleted file mode 100644 index 6f01fbfd10..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_arrays_replicated.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_arrays_replicated_3d.sav b/scipy-0.10.1/scipy/io/tests/data/struct_arrays_replicated_3d.sav deleted file mode 100644 index bac9b20748..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_arrays_replicated_3d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_inherit.sav b/scipy-0.10.1/scipy/io/tests/data/struct_inherit.sav deleted file mode 100644 index 8babd56306..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_inherit.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays.sav b/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays.sav deleted file mode 100644 index a3c6781629..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays_replicated.sav b/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays_replicated.sav deleted file mode 100644 index 38b8122611..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays_replicated.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav b/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav deleted file mode 100644 index db1c256c85..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_pointers.sav b/scipy-0.10.1/scipy/io/tests/data/struct_pointers.sav deleted file mode 100644 index acbb058a30..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_pointers.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_pointers_replicated.sav b/scipy-0.10.1/scipy/io/tests/data/struct_pointers_replicated.sav deleted file mode 100644 index d16f4655cc..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_pointers_replicated.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_pointers_replicated_3d.sav b/scipy-0.10.1/scipy/io/tests/data/struct_pointers_replicated_3d.sav deleted file mode 100644 index 732dd2cbfa..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_pointers_replicated_3d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_scalars.sav b/scipy-0.10.1/scipy/io/tests/data/struct_scalars.sav deleted file mode 100644 index 69d7eaf4ec..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_scalars.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_scalars_replicated.sav b/scipy-0.10.1/scipy/io/tests/data/struct_scalars_replicated.sav deleted file mode 100644 index 2222391ae5..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_scalars_replicated.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/struct_scalars_replicated_3d.sav b/scipy-0.10.1/scipy/io/tests/data/struct_scalars_replicated_3d.sav deleted file mode 100644 index a35f1acfb4..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/struct_scalars_replicated_3d.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/test-44100-le-1ch-4bytes.wav b/scipy-0.10.1/scipy/io/tests/data/test-44100-le-1ch-4bytes.wav deleted file mode 100644 index 8aae8e2c6a..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/test-44100-le-1ch-4bytes.wav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/test-8000-le-2ch-1byteu.wav b/scipy-0.10.1/scipy/io/tests/data/test-8000-le-2ch-1byteu.wav deleted file mode 100644 index 709008194a..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/test-8000-le-2ch-1byteu.wav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/data/various_compressed.sav b/scipy-0.10.1/scipy/io/tests/data/various_compressed.sav deleted file mode 100644 index dcdb0b0d43..0000000000 Binary files a/scipy-0.10.1/scipy/io/tests/data/various_compressed.sav and /dev/null differ diff --git a/scipy-0.10.1/scipy/io/tests/test_idl.py b/scipy-0.10.1/scipy/io/tests/test_idl.py deleted file mode 100644 index e2a5bb9abc..0000000000 --- a/scipy-0.10.1/scipy/io/tests/test_idl.py +++ /dev/null @@ -1,393 +0,0 @@ -from os import path -import warnings - -DATA_PATH = path.join(path.dirname(__file__), 'data') - -import numpy as np -from numpy.compat import asbytes_nested, asbytes -from numpy.testing import assert_equal, assert_array_equal, run_module_suite -from nose.tools import assert_true - -from scipy.io.idl import readsav - -warnings.filterwarnings('ignore', message="warning: multi-dimensional structures") -warnings.filterwarnings('ignore', message="warning: empty strings") - -def object_array(*args): - '''Constructs a numpy array of objects''' - array = np.empty(len(args), dtype=np.object) - for i in range(len(args)): - array[i] = args[i] - return array - - -def assert_identical(a, b): - '''Assert whether value AND type are the same''' - assert_equal(a, b) - if type(b) is np.str: - assert_equal(type(a), type(b)) - else: - assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type) - - -def assert_array_identical(a, b): - '''Assert whether values AND type are the same''' - assert_array_equal(a, b) - assert_equal(a.dtype.type, b.dtype.type) - - -# Define vectorized ID function for pointer arrays -vect_id = np.vectorize(id) - - -class TestIdict: - '''Test the idict= argument to read''' - - def test_idict(self): - custom_dict = {'a': np.int16(999)} - original_id = id(custom_dict) - s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False) - assert_equal(original_id, id(s)) - assert_true('a' in s) - assert_identical(s['a'], np.int16(999)) - assert_identical(s['i8u'], np.uint8(234)) - - -class TestScalars: - '''Test that scalar values are read in with the correct value and type''' - - def test_byte(self): - s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False) - assert_identical(s.i8u, np.uint8(234)) - - def test_int16(self): - s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False) - assert_identical(s.i16s, np.int16(-23456)) - - def test_int32(self): - s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False) - assert_identical(s.i32s, np.int32(-1234567890)) - - def test_float32(self): - s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False) - assert_identical(s.f32, np.float32(-3.1234567e+37)) - - def test_float64(self): - s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False) - assert_identical(s.f64, np.float64(-1.1976931348623157e+307)) - - def test_complex32(self): - s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False) - assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j)) - - def test_bytes(self): - s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False) - assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python")) - - def test_structure(self): - pass - - def test_complex64(self): - s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False) - assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) - - def test_heap_pointer(self): - pass - - def test_object_reference(self): - pass - - def test_uint16(self): - s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False) - assert_identical(s.i16u, np.uint16(65511)) - - def test_uint32(self): - s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False) - assert_identical(s.i32u, np.uint32(4294967233)) - - def test_int64(self): - s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False) - assert_identical(s.i64s, np.int64(-9223372036854774567)) - - def test_uint64(self): - s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False) - assert_identical(s.i64u, np.uint64(18446744073709529285)) - - -class TestCompressed(TestScalars): - '''Test that compressed .sav files can be read in''' - - def test_compressed(self): - s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False) - assert_identical(s.i8u, np.uint8(234)) - assert_identical(s.f32, np.float32(-3.1234567e+37)) - assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) - assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) - assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) - assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) - assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) - assert_identical(s.arrays.d[0], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object)) - - -class TestArrayDimensions: - '''Test that multi-dimensional arrays are read in with the correct dimensions''' - - def test_1d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False) - assert_equal(s.array1d.shape, (123, )) - - def test_2d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False) - assert_equal(s.array2d.shape, (22, 12)) - - def test_3d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False) - assert_equal(s.array3d.shape, (11, 22, 12)) - - def test_4d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False) - assert_equal(s.array4d.shape, (4, 5, 8, 7)) - - def test_5d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False) - assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) - - def test_6d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False) - assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4)) - - def test_7d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False) - assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2)) - - def test_8d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False) - assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4)) - - -class TestStructures: - '''Test that structures are correctly read in''' - - def test_scalars(self): - s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False) - assert_identical(s.scalars.a, np.array(np.int16(1))) - assert_identical(s.scalars.b, np.array(np.int32(2))) - assert_identical(s.scalars.c, np.array(np.float32(3.))) - assert_identical(s.scalars.d, np.array(np.float64(4.))) - assert_identical(s.scalars.e, np.array(asbytes_nested(["spam"]), dtype=np.object)) - assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j))) - - def test_scalars_replicated(self): - s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False) - assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5)) - assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5)) - assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5)) - assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5)) - assert_identical(s.scalars_rep.e, np.repeat(asbytes("spam"), 5).astype(np.object)) - assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5)) - - def test_scalars_replicated_3d(self): - s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False) - assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2)) - assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2)) - assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2)) - assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2)) - assert_identical(s.scalars_rep.e, np.repeat(asbytes("spam"), 24).reshape(4, 3, 2).astype(np.object)) - assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2)) - - def test_arrays(self): - s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False) - assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) - assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) - assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) - assert_array_identical(s.arrays.d[0], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object)) - - def test_arrays_replicated(self): - - s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False) - - # Check column types - assert_true(s.arrays_rep.a.dtype.type is np.object_) - assert_true(s.arrays_rep.b.dtype.type is np.object_) - assert_true(s.arrays_rep.c.dtype.type is np.object_) - assert_true(s.arrays_rep.d.dtype.type is np.object_) - - # Check column shapes - assert_equal(s.arrays_rep.a.shape, (5, )) - assert_equal(s.arrays_rep.b.shape, (5, )) - assert_equal(s.arrays_rep.c.shape, (5, )) - assert_equal(s.arrays_rep.d.shape, (5, )) - - # Check values - for i in range(5): - assert_array_identical(s.arrays_rep.a[i], np.array([1, 2, 3], dtype=np.int16)) - assert_array_identical(s.arrays_rep.b[i], np.array([4., 5., 6., 7.], dtype=np.float32)) - assert_array_identical(s.arrays_rep.c[i], np.array([np.complex64(1+2j), np.complex64(7+8j)])) - assert_array_identical(s.arrays_rep.d[i], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object)) - - def test_arrays_replicated_3d(self): - - s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False) - - # Check column types - assert_true(s.arrays_rep.a.dtype.type is np.object_) - assert_true(s.arrays_rep.b.dtype.type is np.object_) - assert_true(s.arrays_rep.c.dtype.type is np.object_) - assert_true(s.arrays_rep.d.dtype.type is np.object_) - - # Check column shapes - assert_equal(s.arrays_rep.a.shape, (4, 3, 2)) - assert_equal(s.arrays_rep.b.shape, (4, 3, 2)) - assert_equal(s.arrays_rep.c.shape, (4, 3, 2)) - assert_equal(s.arrays_rep.d.shape, (4, 3, 2)) - - # Check values - for i in range(4): - for j in range(3): - for k in range(2): - assert_array_identical(s.arrays_rep.a[i, j, k], np.array([1, 2, 3], dtype=np.int16)) - assert_array_identical(s.arrays_rep.b[i, j, k], np.array([4., 5., 6., 7.], dtype=np.float32)) - assert_array_identical(s.arrays_rep.c[i, j, k], np.array([np.complex64(1+2j), np.complex64(7+8j)])) - assert_array_identical(s.arrays_rep.d[i, j, k], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object)) - - def test_inheritance(self): - s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False) - assert_identical(s.fc.x, np.array([0], dtype=np.int16)) - assert_identical(s.fc.y, np.array([0], dtype=np.int16)) - assert_identical(s.fc.r, np.array([0], dtype=np.int16)) - assert_identical(s.fc.c, np.array([4], dtype=np.int16)) - - -class TestPointers: - '''Check that pointers in .sav files produce references to the same object in Python''' - - def test_pointers(self): - s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False) - assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) - assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) - assert_true(s.c64_pointer1 is s.c64_pointer2) - - -class TestPointerArray: - '''Test that pointers in arrays are correctly read in''' - - def test_1d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False) - assert_equal(s.array1d.shape, (123, )) - assert_true(np.all(s.array1d == np.float32(4.))) - assert_true(np.all(vect_id(s.array1d) == id(s.array1d[0]))) - - def test_2d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False) - assert_equal(s.array2d.shape, (22, 12)) - assert_true(np.all(s.array2d == np.float32(4.))) - assert_true(np.all(vect_id(s.array2d) == id(s.array2d[0,0]))) - - def test_3d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False) - assert_equal(s.array3d.shape, (11, 22, 12)) - assert_true(np.all(s.array3d == np.float32(4.))) - assert_true(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0]))) - - def test_4d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False) - assert_equal(s.array4d.shape, (4, 5, 8, 7)) - assert_true(np.all(s.array4d == np.float32(4.))) - assert_true(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0]))) - - def test_5d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False) - assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) - assert_true(np.all(s.array5d == np.float32(4.))) - assert_true(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0]))) - - def test_6d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False) - assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4)) - assert_true(np.all(s.array6d == np.float32(4.))) - assert_true(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0]))) - - def test_7d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False) - assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2)) - assert_true(np.all(s.array7d == np.float32(4.))) - assert_true(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0]))) - - def test_8d(self): - s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False) - assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4)) - assert_true(np.all(s.array8d == np.float32(4.))) - assert_true(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0]))) - -class TestPointerStructures: - '''Test that structures are correctly read in''' - - def test_scalars(self): - s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False) - assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_)) - assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_)) - assert_true(id(s.pointers.g[0]) == id(s.pointers.h[0])) - - def test_pointers_replicated(self): - s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False) - assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_)) - assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_)) - assert_true(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))) - - def test_pointers_replicated_3d(self): - s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False) - assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_)) - assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_)) - assert_true(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))) - - def test_arrays(self): - s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False) - assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_)) - assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_)) - assert_true(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0]))) - assert_true(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0]))) - assert_true(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0])) - - def test_arrays_replicated(self): - - s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False) - - # Check column types - assert_true(s.arrays_rep.g.dtype.type is np.object_) - assert_true(s.arrays_rep.h.dtype.type is np.object_) - - # Check column shapes - assert_equal(s.arrays_rep.g.shape, (5, )) - assert_equal(s.arrays_rep.h.shape, (5, )) - - # Check values - for i in range(5): - assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_)) - assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_)) - assert_true(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0]))) - assert_true(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0]))) - - def test_arrays_replicated_3d(self): - - s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav'), verbose=False) - - # Check column types - assert_true(s.arrays_rep.g.dtype.type is np.object_) - assert_true(s.arrays_rep.h.dtype.type is np.object_) - - # Check column shapes - assert_equal(s.arrays_rep.g.shape, (4, 3, 2)) - assert_equal(s.arrays_rep.h.shape, (4, 3, 2)) - - # Check values - for i in range(4): - for j in range(3): - for k in range(2): - assert_array_identical(s.arrays_rep.g[i, j, k], np.repeat(np.float32(4.), 2).astype(np.object_)) - assert_array_identical(s.arrays_rep.h[i, j, k], np.repeat(np.float32(4.), 3).astype(np.object_)) - assert_true(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0]))) - assert_true(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0]))) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/io/tests/test_mmio.py b/scipy-0.10.1/scipy/io/tests/test_mmio.py deleted file mode 100644 index 14b59872f6..0000000000 --- a/scipy-0.10.1/scipy/io/tests/test_mmio.py +++ /dev/null @@ -1,331 +0,0 @@ -#!/usr/bin/env python - -from tempfile import mktemp -from numpy import array,transpose -from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal, \ - assert_equal, rand - -import scipy.sparse -from scipy.io.mmio import mminfo,mmread,mmwrite - - -class TestMMIOArray(TestCase): - - def test_simple(self): - a = [[1,2],[3,4]] - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(2,2,4,'array','integer','general')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_simple_rectangular(self): - a = [[1,2,3],[4,5,6]] - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(2,3,6,'array','integer','general')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_simple_rectangular_real(self): - a = [[1,2],[3.5,4],[5,6]] - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(3,2,6,'array','real','general')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_simple_real(self): - a = [[1,2],[3,4.0]] - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(2,2,4,'array','real','general')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_simple_complex(self): - a = [[1,2],[3,4j]] - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(2,2,4,'array','complex','general')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_simple_symmetric(self): - a = [[1,2],[2,4]] - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(2,2,4,'array','integer','symmetric')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_simple_skew_symmetric(self): - a = [[1,2],[-2,4]] - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(2,2,4,'array','integer','skew-symmetric')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_simple_skew_symmetric_float(self): - a = array([[1,2],[-2.0,4]],'f') - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(2,2,4,'array','real','skew-symmetric')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_simple_hermitian(self): - a = [[1,2+3j],[2-3j,4]] - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(2,2,4,'array','complex','hermitian')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_random_symmetric_real(self): - sz = (20,20) - a = rand(*sz) - a = a + transpose(a) - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(20,20,400,'array','real','symmetric')) - b = mmread(fn) - assert_array_almost_equal(a,b) - - def test_random_rect_real(self): - sz = (20,15) - a = rand(*sz) - fn = mktemp() - mmwrite(fn,a) - assert_equal(mminfo(fn),(20,15,300,'array','real','general')) - b = mmread(fn) - assert_array_almost_equal(a,b) - -_general_example = '''\ -%%MatrixMarket matrix coordinate real general -%================================================================================= -% -% This ASCII file represents a sparse MxN matrix with L -% nonzeros in the following Matrix Market format: -% -% +----------------------------------------------+ -% |%%MatrixMarket matrix coordinate real general | <--- header line -% |% | <--+ -% |% comments | |-- 0 or more comment lines -% |% | <--+ -% | M N L | <--- rows, columns, entries -% | I1 J1 A(I1, J1) | <--+ -% | I2 J2 A(I2, J2) | | -% | I3 J3 A(I3, J3) | |-- L lines -% | . . . | | -% | IL JL A(IL, JL) | <--+ -% +----------------------------------------------+ -% -% Indices are 1-based, i.e. A(1,1) is the first element. -% -%================================================================================= - 5 5 8 - 1 1 1.000e+00 - 2 2 1.050e+01 - 3 3 1.500e-02 - 1 4 6.000e+00 - 4 2 2.505e+02 - 4 4 -2.800e+02 - 4 5 3.332e+01 - 5 5 1.200e+01 -''' - -_hermitian_example = '''\ -%%MatrixMarket matrix coordinate complex hermitian - 5 5 7 - 1 1 1.0 0 - 2 2 10.5 0 - 4 2 250.5 22.22 - 3 3 1.5e-2 0 - 4 4 -2.8e2 0 - 5 5 12. 0 - 5 4 0 33.32 -''' - -_skew_example = '''\ -%%MatrixMarket matrix coordinate real skew-symmetric - 5 5 7 - 1 1 1.0 - 2 2 10.5 - 4 2 250.5 - 3 3 1.5e-2 - 4 4 -2.8e2 - 5 5 12. - 5 4 0 -''' - -_symmetric_example = '''\ -%%MatrixMarket matrix coordinate real symmetric - 5 5 7 - 1 1 1.0 - 2 2 10.5 - 4 2 250.5 - 3 3 1.5e-2 - 4 4 -2.8e2 - 5 5 12. - 5 4 8 -''' - -_symmetric_pattern_example = '''\ -%%MatrixMarket matrix coordinate pattern symmetric - 5 5 7 - 1 1 - 2 2 - 4 2 - 3 3 - 4 4 - 5 5 - 5 4 -''' - -class TestMMIOCoordinate(TestCase): - def test_read_general(self): - """read a general matrix""" - fn = mktemp() - f = open(fn,'w') - f.write(_general_example) - f.close() - assert_equal(mminfo(fn),(5,5,8,'coordinate','real','general')) - a = [[1, 0, 0, 6, 0], - [0, 10.5, 0, 0, 0], - [0, 0, .015, 0, 0], - [0, 250.5, 0, -280, 33.32], - [0, 0, 0, 0, 12]] - b = mmread(fn).todense() - assert_array_almost_equal(a,b) - - def test_read_hermitian(self): - """read a hermitian matrix""" - fn = mktemp() - f = open(fn,'w') - f.write(_hermitian_example) - f.close() - assert_equal(mminfo(fn),(5,5,7,'coordinate','complex','hermitian')) - a = [[1, 0, 0, 0, 0], - [0, 10.5, 0, 250.5 - 22.22j, 0], - [0, 0, .015, 0, 0], - [0, 250.5 + 22.22j, 0, -280, -33.32j], - [0, 0, 0, 33.32j, 12]] - b = mmread(fn).todense() - assert_array_almost_equal(a,b) - - def test_read_skew(self): - """read a skew-symmetric matrix""" - fn = mktemp() - f = open(fn,'w') - f.write(_skew_example) - f.close() - assert_equal(mminfo(fn),(5,5,7,'coordinate','real','skew-symmetric')) - a = [[1, 0, 0, 0, 0], - [0, 10.5, 0, -250.5, 0], - [0, 0, .015, 0, 0], - [0, 250.5, 0, -280, 0], - [0, 0, 0, 0, 12]] - b = mmread(fn).todense() - assert_array_almost_equal(a,b) - - def test_read_symmetric(self): - """read a symmetric matrix""" - fn = mktemp() - f = open(fn,'w') - f.write(_symmetric_example) - f.close() - assert_equal(mminfo(fn),(5,5,7,'coordinate','real','symmetric')) - a = [[1, 0, 0, 0, 0], - [0, 10.5, 0, 250.5, 0], - [0, 0, .015, 0, 0], - [0, 250.5, 0, -280, 8], - [0, 0, 0, 8, 12]] - b = mmread(fn).todense() - assert_array_almost_equal(a,b) - - def test_read_symmetric_pattern(self): - """read a symmetric pattern matrix""" - fn = mktemp() - f = open(fn,'w') - f.write(_symmetric_pattern_example) - f.close() - assert_equal(mminfo(fn),(5,5,7,'coordinate','pattern','symmetric')) - a = [[1, 0, 0, 0, 0], - [0, 1, 0, 1, 0], - [0, 0, 1, 0, 0], - [0, 1, 0, 1, 1], - [0, 0, 0, 1, 1]] - b = mmread(fn).todense() - assert_array_almost_equal(a,b) - - def test_empty_write_read(self): - #http://projects.scipy.org/scipy/ticket/883 - - b = scipy.sparse.coo_matrix((10,10)) - fn = mktemp() - mmwrite(fn,b) - - assert_equal(mminfo(fn),(10,10,0,'coordinate','real','general')) - a = b.todense() - b = mmread(fn).todense() - assert_array_almost_equal(a,b) - - - def test_real_write_read(self): - I = array([0, 0, 1, 2, 3, 3, 3, 4]) - J = array([0, 3, 1, 2, 1, 3, 4, 4]) - V = array([ 1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0 ]) - - b = scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)) - - fn = mktemp() - mmwrite(fn,b) - - assert_equal(mminfo(fn),(5,5,8,'coordinate','real','general')) - a = b.todense() - b = mmread(fn).todense() - assert_array_almost_equal(a,b) - - def test_complex_write_read(self): - I = array([0, 0, 1, 2, 3, 3, 3, 4]) - J = array([0, 3, 1, 2, 1, 3, 4, 4]) - V = array([ 1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, - 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) - - b = scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)) - - fn = mktemp() - mmwrite(fn,b) - - assert_equal(mminfo(fn),(5,5,8,'coordinate','complex','general')) - a = b.todense() - b = mmread(fn).todense() - assert_array_almost_equal(a,b) - - def test_sparse_formats(self): - mats = [] - - I = array([0, 0, 1, 2, 3, 3, 3, 4]) - J = array([0, 3, 1, 2, 1, 3, 4, 4]) - - V = array([ 1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0 ]) - mats.append( scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)) ) - - V = array([ 1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, - 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) - mats.append( scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)) ) - - for mat in mats: - expected = mat.todense() - for fmt in ['csr','csc','coo']: - fn = mktemp() - mmwrite(fn, mat.asformat(fmt)) - - result = mmread(fn).todense() - assert_array_almost_equal(result, expected) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/io/tests/test_netcdf.py b/scipy-0.10.1/scipy/io/tests/test_netcdf.py deleted file mode 100644 index 9b66b87c60..0000000000 --- a/scipy-0.10.1/scipy/io/tests/test_netcdf.py +++ /dev/null @@ -1,149 +0,0 @@ -''' Tests for netcdf ''' - -import os -from os.path import join as pjoin, dirname -import shutil -import tempfile -import time -import sys -if sys.version_info[0] >= 3: - from io import BytesIO -else: - from StringIO import StringIO as BytesIO -from glob import glob - -import numpy as np -from numpy.compat import asbytes - -from scipy.io.netcdf import netcdf_file - -from nose.tools import assert_true, assert_false, assert_equal, assert_raises - -TEST_DATA_PATH = pjoin(dirname(__file__), 'data') - -N_EG_ELS = 11 # number of elements for example variable -VARTYPE_EG = 'b' # var type for example variable - - -def make_simple(*args, **kwargs): - f = netcdf_file(*args, **kwargs) - f.history = 'Created for a test' - f.createDimension('time', N_EG_ELS) - time = f.createVariable('time', VARTYPE_EG, ('time',)) - time[:] = np.arange(N_EG_ELS) - time.units = 'days since 2008-01-01' - f.flush() - return f - - -def gen_for_simple(ncfileobj): - ''' Generator for example fileobj tests ''' - yield assert_equal, ncfileobj.history, asbytes('Created for a test') - time = ncfileobj.variables['time'] - yield assert_equal, time.units, asbytes('days since 2008-01-01') - yield assert_equal, time.shape, (N_EG_ELS,) - yield assert_equal, time[-1], N_EG_ELS-1 - - -def test_read_write_files(): - # test round trip for example file - cwd = os.getcwd() - try: - tmpdir = tempfile.mkdtemp() - os.chdir(tmpdir) - f = make_simple('simple.nc', 'w') - f.close() - # To read the NetCDF file we just created:: - f = netcdf_file('simple.nc') - # Using mmap is the default - yield assert_true, f.use_mmap - for testargs in gen_for_simple(f): - yield testargs - f.close() - # Now without mmap - f = netcdf_file('simple.nc', mmap=False) - # Using mmap is the default - yield assert_false, f.use_mmap - for testargs in gen_for_simple(f): - yield testargs - f.close() - # To read the NetCDF file we just created, as file object, no - # mmap. When n * n_bytes(var_type) is not divisible by 4, this - # raised an error in pupynere 1.0.12 and scipy rev 5893, because - # calculated vsize was rounding up in units of 4 - see - # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html - fobj = open('simple.nc', 'rb') - f = netcdf_file(fobj) - # by default, don't use mmap for file-like - yield assert_false, f.use_mmap - for testargs in gen_for_simple(f): - yield testargs - f.close() - except: - os.chdir(cwd) - shutil.rmtree(tmpdir) - raise - os.chdir(cwd) - shutil.rmtree(tmpdir) - - -def test_read_write_sio(): - eg_sio1 = BytesIO() - f1 = make_simple(eg_sio1, 'w') - str_val = eg_sio1.getvalue() - f1.close() - eg_sio2 = BytesIO(str_val) - f2 = netcdf_file(eg_sio2) - for testargs in gen_for_simple(f2): - yield testargs - f2.close() - # Test that error is raised if attempting mmap for sio - eg_sio3 = BytesIO(str_val) - yield assert_raises, ValueError, netcdf_file, eg_sio3, 'r', True - # Test 64-bit offset write / read - eg_sio_64 = BytesIO() - f_64 = make_simple(eg_sio_64, 'w', version=2) - str_val = eg_sio_64.getvalue() - f_64.close() - eg_sio_64 = BytesIO(str_val) - f_64 = netcdf_file(eg_sio_64) - for testargs in gen_for_simple(f_64): - yield testargs - yield assert_equal, f_64.version_byte, 2 - # also when version 2 explicitly specified - eg_sio_64 = BytesIO(str_val) - f_64 = netcdf_file(eg_sio_64, version=2) - for testargs in gen_for_simple(f_64): - yield testargs - yield assert_equal, f_64.version_byte, 2 - - -def test_read_example_data(): - # read any example data files - for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')): - f = netcdf_file(fname, 'r') - f = netcdf_file(fname, 'r', mmap=False) - -def test_itemset_no_segfault_on_readonly(): - # Regression test for ticket #1202. - # Open the test file in read-only mode. - filename = pjoin(TEST_DATA_PATH, 'example_1.nc') - f = netcdf_file(filename, 'r') - time_var = f.variables['time'] - # time_var.assignValue(42) should raise a RuntimeError--not seg. fault! - assert_raises(RuntimeError, time_var.assignValue, 42) - -def test_write_invalid_dtype(): - dtypes = ['int64', 'uint64'] - if np.dtype('int').itemsize == 8: # 64-bit machines - dtypes.append('int') - if np.dtype('uint').itemsize == 8: # 64-bit machines - dtypes.append('uint') - - f = netcdf_file(BytesIO(), 'w') - f.createDimension('time', N_EG_ELS) - for dt in dtypes: - yield assert_raises, ValueError, \ - f.createVariable, 'time', dt, ('time',) - f.close() - diff --git a/scipy-0.10.1/scipy/io/tests/test_wavfile.py b/scipy-0.10.1/scipy/io/tests/test_wavfile.py deleted file mode 100644 index 8fb8530a47..0000000000 --- a/scipy-0.10.1/scipy/io/tests/test_wavfile.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import tempfile -import numpy as np - -from numpy.testing import assert_equal, assert_, assert_raises, assert_array_equal -from scipy.io import wavfile - -def datafile(fn): - return os.path.join(os.path.dirname(__file__), 'data', fn) - -def test_read_1(): - rate, data = wavfile.read(datafile('test-44100-le-1ch-4bytes.wav')) - assert_equal(rate, 44100) - assert_(np.issubdtype(data.dtype, np.int32)) - assert_equal(data.shape, (4410,)) - -def test_read_2(): - rate, data = wavfile.read(datafile('test-8000-le-2ch-1byteu.wav')) - assert_equal(rate, 8000) - assert_(np.issubdtype(data.dtype, np.uint8)) - assert_equal(data.shape, (800, 2)) - -def test_read_fail(): - assert_raises(ValueError, wavfile.read, datafile('example_1.nc')) - -def _check_roundtrip(rate, dtype, channels): - fd, tmpfile = tempfile.mkstemp(suffix='.wav') - try: - os.close(fd) - - data = np.random.rand(100, channels) - if channels == 1: - data = data[:,0] - data = (data*128).astype(dtype) - - wavfile.write(tmpfile, rate, data) - rate2, data2 = wavfile.read(tmpfile) - - assert_equal(rate, rate2) - assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype) - assert_array_equal(data, data2) - finally: - os.unlink(tmpfile) - -def test_write_roundtrip(): - for signed in ('i', 'u'): - for size in (1, 2, 4, 8): - if size == 1 and signed == 'i': - # signed 8-bit integer PCM is not allowed - continue - for endianness in ('>', '<'): - if size == 1 and endianness == '<': - continue - for rate in (8000, 32000): - for channels in (1, 2, 5): - dt = np.dtype('%s%s%d' % (endianness, signed, size)) - yield _check_roundtrip, rate, dt, channels diff --git a/scipy-0.10.1/scipy/io/wavfile.py b/scipy-0.10.1/scipy/io/wavfile.py deleted file mode 100644 index c83da19e4f..0000000000 --- a/scipy-0.10.1/scipy/io/wavfile.py +++ /dev/null @@ -1,181 +0,0 @@ -""" -Module to read / write wav files using numpy arrays - -Functions ---------- -`read`: Return the sample rate (in samples/sec) and data from a WAV file. - -`write`: Write a numpy array as a WAV file. - -""" -import numpy -from numpy.compat import asbytes -import struct -import warnings - -class WavFileWarning(UserWarning): - pass - -_big_endian = False - -# assumes file pointer is immediately -# after the 'fmt ' id -def _read_fmt_chunk(fid): - if _big_endian: - fmt = '>' - else: - fmt = '<' - res = struct.unpack(fmt+'ihHIIHH',fid.read(20)) - size, comp, noc, rate, sbytes, ba, bits = res - if (comp != 1 or size > 16): - warnings.warn("Unfamiliar format bytes", WavFileWarning) - if (size>16): - fid.read(size-16) - return size, comp, noc, rate, sbytes, ba, bits - -# assumes file pointer is immediately -# after the 'data' id -def _read_data_chunk(fid, noc, bits): - if _big_endian: - fmt = '>i' - else: - fmt = ' 1: - data = data.reshape(-1,noc) - else: - bytes = bits//8 - if _big_endian: - dtype = '>i%d' % bytes - else: - dtype = ' 1: - data = data.reshape(-1,noc) - return data - -def _read_riff_chunk(fid): - global _big_endian - str1 = fid.read(4) - if str1 == asbytes('RIFX'): - _big_endian = True - elif str1 != asbytes('RIFF'): - raise ValueError("Not a WAV file.") - if _big_endian: - fmt = '>I' - else: - fmt = '' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'): - data = data.byteswap() - data.tofile(fid) - # Determine file size and place it in correct - # position at start of the file. - size = fid.tell() - fid.seek(4) - fid.write(struct.pack('axpy(n,a,x,incx,y,incy) - - ! z = axpy(a,x,y,n=len(x)/abs(incx),incx=1,incy=incx,overwrite_y=0) - ! Calculate z = a*x+y, where a is scalar. - - fortranname cblas_axpy - - callstatement (*f2py_func)(n,<,,&,&>a,x,incx,y,incy); - callprotoargument const int,const <,,*,*>,const *,const int,*,const int - - intent(c) - intent(c) axpy - - integer optional,intent(in),depend(x,incx) :: n = len(x)/abs(incx) - optional,intent(in):: a=<1.0,\0,(1.0\,0.0),\2> - dimension(n),intent(in) :: x - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - dimension(n),depend(x),check(len(x)==len(y)) :: y - intent(in,out,copy,out=z) :: y - integer optional, intent(in),depend(incx) ,check(incy>0||incy<0) :: incy = incx - -end subroutine axpy diff --git a/scipy-0.10.1/scipy/lib/blas/fblas.pyf.src b/scipy-0.10.1/scipy/lib/blas/fblas.pyf.src deleted file mode 100644 index b63b743c0d..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/fblas.pyf.src +++ /dev/null @@ -1,18 +0,0 @@ -!%f90 -*- f90 -*- -! Signatures for f2py-wrappers of FORTRAN BLAS functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! $Revision$ $Date$ -! - - -python module fblas - interface - - include 'fblas_l1.pyf.src' - include 'fblas_l2.pyf.src' - include 'fblas_l3.pyf.src' - - end interface -end python module fblas diff --git a/scipy-0.10.1/scipy/lib/blas/fblas_l1.pyf.src b/scipy-0.10.1/scipy/lib/blas/fblas_l1.pyf.src deleted file mode 100644 index 78fe4295a2..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/fblas_l1.pyf.src +++ /dev/null @@ -1,369 +0,0 @@ -!%f90 -*- f90 -*- -! Signatures for f2py-wrappers of FORTRAN LEVEL 1 BLAS functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! $Revision$ $Date$ -! -! rotg, rotmg, rot, rotm -! swap, scal, copy, axpy -! dot, dotu, dotc -! nrm2, asum, amax, iamax -! -! Not Implemented: NONE -! -! NOTE: Avoiding wrappers hack does not work under 64-bit Gentoo system -! with single precision routines, so they are removed. - -! Level 1 BLAS - -subroutine rotg(a,b,c,s) - ! a,b are elements of a direction vector of rotated x-axis. - - ! if abs(a) + abs(b)>0: - ! roe = abs(a)*,*,*,* - - ! XXX: a and b get new values. Are they relevant? - intent(in) :: a - intent(in) :: b - intent(out,out=c) :: c - intent(out,out=s) :: s - -end subroutine rotg - - -! -subroutine rotmg(d1,d2,x1,y1,param) - ! XXX: Could one give a geometrical meaning to the parameters d1,d2,x1,y1? - - ! Construct matrix H such that (H * (sqrt(d1)*x1,sqrt(d2)*y1)^T)_2 = 0. - ! H = [[1,0],[0,1]] if param[0]==-2 - ! H = [[param[1],param[3]],[param[2],param[4]]] if param[0]==-1 - ! H = [[1,param[3]],[param[2],1]] if param[0]==0 - ! H = [[param[1],1],[-1,param[4]]] if param[0]==1 - - callstatement { (*f2py_func)(&d1,&d2,&x1,&y1,param); } - callprotoargument *,*,*,*,* - - intent(in) :: d1 - intent(in) :: d2 - intent(in) :: x1 - intent(in) :: y1 - intent(out), dimension(5) :: param -end subroutine rotmg - - -subroutine <,,s,d>rot(n,x,offx,incx,y,offy,incy,c,s) - - ! Apply plane rotation - - callstatement (*f2py_func)(&n,x+offx,&incx,y+offy,&incy,&c,&s) - callprotoargument int*,*,int*,*,int*,*,* - - dimension(*),intent(in,out,copy) :: x,y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - - intent(in) :: c - intent(in) :: s -end subroutine <,,s,d>rot - - -subroutine rotm(n,x,offx,incx,y,offy,incy,param) - - ! Apply modified plane rotation - - callstatement (*f2py_func)(&n,x+offx,&incx,y+offy,&incy,param) - callprotoargument int*,*,int*,*,int*,* - - dimension(*),intent(in,out,copy) :: x,y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - - dimension(5),intent(in) :: param -end subroutine rotm - - -subroutine swap(n,x,offx,incx,y,offy,incy) - - ! Swap two arrays: x <-> y - - callstatement (*f2py_func)(&n,x+offx,&incx,y+offy,&incy) - callprotoargument int*,*,int*,*,int* - - dimension(*),intent(in,out) :: x,y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine swap - -subroutine scal(n,a,x,offx,incx) - - ! Calculate y = a*x - - intent(in):: a - callstatement (*f2py_func)(&n,&a,x+offx,&incx) - callprotoargument int*,*,*,int* - - dimension(*),intent(in,out) :: x - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional,intent(in),depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end subroutine scal - - -subroutine <_prefix2=cs,zd>scal(n,a,x,offx,incx) - - ! Calculate y = a*x - - intent(in):: a - - callstatement (*f2py_func)(&n,&a,x+offx,&incx) - callprotoargument int*,*,*,int* - - dimension(*),intent(in,out,copy) :: x - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - - integer optional,intent(in),depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end subroutine <_prefix2>scal - - -subroutine copy(n,x,offx,incx,y,offy,incy) - - ! Copy y <- x - - callstatement (*f2py_func)(&n,x+offx,&incx,y+offy,&incy) - callprotoargument int*,*,int*,*,int* - - dimension(*),intent(in) :: x - dimension(*),intent(in,out) :: y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine copy - -subroutine axpy(n,a,x,offx,incx,y,offy,incy) - - ! Calculate z = a*x+y, where a is scalar. - - callstatement (*f2py_func)(&n,&a,x+offx,&incx,y+offy,&incy) - callprotoargument int*,*,*,int*,*,int* - - dimension(*),intent(in) :: x - dimension(*),intent(in,out,out=z) :: y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - - optional, intent(in):: a=<1.0,\0,(1.0\,0.0),\2> - -end subroutine axpy - -function dot(n,x,offx,incx,y,offy,incy) result (xy) - - dot,xy - - callstatement (*f2py_func)(&dot,&n,x+offx,&incx,y+offy,&incy) - callprotoargument *,int*,*,int*,*,int* - - dimension(*),intent(in) :: x - dimension(*),intent(in) :: y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end function dot - -! -subroutine dotu(xy,n,x,offx,incx,y,offy,incy) - - intent(out) :: xy - fortranname wdotu - - callstatement (*f2py_func)(&xy,&n,x+offx,&incx,y+offy,&incy) - callprotoargument *,int*,*,int*,*,int* - - dimension(*),intent(in) :: x - dimension(*),intent(in) :: y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine dotu - -subroutine dotc(xy,n,x,offx,incx,y,offy,incy) - - intent (out) :: xy - fortranname wdotc - - callstatement (*f2py_func)(&xy,&n,x+offx,&incx,y+offy,&incy) - callprotoargument *,int*,*,int*,*,int* - - dimension(*),intent(in) :: x - dimension(*),intent(in) :: y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine dotc - -! -function nrm2(n,x,offx,incx) result(n2) - - nrm2, n2 - - callstatement (*f2py_func)(&nrm2, &n,x+offx,&incx) - callprotoargument *,int*,*,int* - - dimension(*),intent(in) :: x - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - - integer optional,intent(in),depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end function nrm2 - -function asum(n,x,offx,incx) result (s) - - asum,s - - callstatement (*f2py_func)(&asum,&n,x+offx,&incx) - callprotoargument *,int*,*,int* - - dimension(*),intent(in) :: x - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - - integer optional,intent(in),depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end function asum - -function iamax(n,x,offx,incx) result(k) - - ! This is to avoid Fortran wrappers. - integer iamax,k - fortranname F_FUNC(iamax,IAMAX) - intent(c) iamax - - callstatement iamax_return_value = (*f2py_func)(&n,x+offx,&incx) - 1 - callprotoargument int*,*,int* - - dimension(*),intent(in) :: x - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - - integer optional,intent(in),depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end function iamax - diff --git a/scipy-0.10.1/scipy/lib/blas/fblas_l2.pyf.src b/scipy-0.10.1/scipy/lib/blas/fblas_l2.pyf.src deleted file mode 100644 index 27a116c2c1..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/fblas_l2.pyf.src +++ /dev/null @@ -1,119 +0,0 @@ -! -*- f90 -*- -! Signatures for f2py-wrappers of FORTRAN LEVEL 2 BLAS functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! -! gemv, hemv, symv, trmv, ger, geru, gerc -! -! Not implemented: -! gbmv, hbmv, hpmv, sbmv, spmv, tbmv, tpmv, trsv, tbsv, tpsv, -! her, hpr, her2, hpr2, syr, spr, syr2, spr2 -! -!XXX: make beta and y optional in hemv,symv similarly to gemv - -subroutine gemv(m,n,alpha,a,x,beta,y,offx,incx,offy,incy,trans,rows,cols,ly) - ! y = gemv(alpha,a,x,beta=0,y=0,offx=0,incx=1,offy=0,incy=0,trans=0) - ! Calculate y <- alpha * op(A) * x + beta * y - - callstatement (*f2py_func)((trans?(trans==2?"C":"T"):"N"),&m,&n,&alpha,a,&m,x+offx,&incx,&beta,y+offy,&incy) - callprotoargument char*,int*,int*,*,*,int*,*,int*,*,*,int* - - integer optional,intent(in),check(trans>=0 && trans <=2) :: trans = 0 - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - intent(in) :: alpha - intent(in),optional :: beta = <0.0,\0,(0.0\,0.0),\2> - - dimension(*),intent(in) :: x - dimension(ly),intent(in,copy,out),depend(ly),optional :: y - integer intent(hide),depend(incy,rows,offy) :: ly = (y_capi==Py_None?1+offy+(rows-1)*abs(incy):-1) - dimension(m,n),intent(in) :: a - integer depend(a),intent(hide):: m = shape(a,0) - integer depend(a),intent(hide):: n = shape(a,1) - - integer optional,intent(in) :: offx=0 - integer optional,intent(in) :: offy=0 - check(offx>=0 && offxoffx+(cols-1)*abs(incx)) :: x - depend(offx,cols,incx) :: x - - check(offy>=0 && offyoffy+(rows-1)*abs(incy)) :: y - depend(offy,rows,incy) :: y - - integer depend(m,n,trans),intent(hide) :: rows = (trans?n:m) - integer depend(m,n,trans),intent(hide) :: cols = (trans?m:n) - -end subroutine gemv - -subroutine (n,alpha,a,x,beta,y,offx,incx,offy,incy,lower,ly) - ! Calculate y <- alpha * A * x + beta * y, A is symmmetric/hermitian - - callstatement (*f2py_func)((lower?"L":"U"),&n,&alpha,a,&n,x+offx,&incx,&beta,y+offy,&incy) - callprotoargument char*,int*,*,*,int*,*,int*,*,*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - intent(in) :: alpha - intent(in),optional :: beta = <0.0,\0,(0.0\,0.0),\2> - - dimension(*),intent(in) :: x - dimension(ly),intent(in,copy,out),depend(ly),optional :: y - integer intent(hide),depend(incy,n,offy) :: ly = (y_capi==Py_None?1+offy+(n-1)*abs(incy):-1) - dimension(n,n),intent(in),check(shape(a,0)==shape(a,1)) :: a - integer depend(a),intent(hide):: n = shape(a,0) - - integer optional,intent(in) :: offx=0 - integer optional,intent(in) :: offy=0 - check(offx>=0 && offxoffx+(n-1)*abs(incx)) :: x - depend(offx,n,incx) :: x - - check(offy>=0 && offyoffy+(n-1)*abs(incy)) :: y - depend(offy,n,incy) :: y - -end subroutine - -subroutine trmv(n,a,x,offx,incx,lower,trans,unitdiag) - ! Calculate x <- op(A) * x, A is triangular - - callstatement (*f2py_func)((lower?"L":"U"),(trans?(trans==2?"C":"T"):"N"),(unitdiag?"U":"N"),&n,a,&n,x+offx,&incx) - callprotoargument char*,char*,char*,int*,*,int*,*,int* - - integer optional,intent(in),check(trans>=0 && trans <=2) :: trans = 0 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(unitdiag==0||unitdiag==1) :: unitdiag = 0 - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - - dimension(*),intent(in,out,copy) :: x - dimension(n,n),intent(in),check(shape(a,0)==shape(a,1)) :: a - integer depend(a),intent(hide):: n = shape(a,0) - - integer optional,intent(in),depend(x) :: offx=0 - check(offx>=0 && offxoffx+(n-1)*abs(incx)) :: n - depend(x,offx,incx) :: n - -end subroutine trmv - -! -subroutine ger<,,u,u,c,c>(m,n,alpha,x,incx,y,incy,a,lda) -! a = ger(alpha,x,y,incx=1,incy=1,a=0,overwrite_x=1,overwrite_y=1,overwrite_a=0) -! Calculate a <- alpha*x*y^T + a -! Calculate a <- alpha*x*y^H + a - integer intent(hide),depend(x) :: m = len(x) - integer intent(hide),depend(y) :: n = len(y) - intent(in) :: alpha - dimension(m),intent(in,overwrite) :: x - integer optional,intent(in),check(incx==1||incx==-1) :: incx = 1 - dimension(n),intent(in,overwrite) :: y - integer optional,intent(in),check(incy==1||incy==-1) :: incy = 1 - dimension(m,n),intent(in,out,copy),optional :: a = <0.0,\0,(0.0\,0.0),\2,\2,\2> - integer intent(hide), depend(m) :: lda=m -end subroutine ger<,,u,u,c,c> - diff --git a/scipy-0.10.1/scipy/lib/blas/fblas_l3.pyf.src b/scipy-0.10.1/scipy/lib/blas/fblas_l3.pyf.src deleted file mode 100644 index fab99defa8..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/fblas_l3.pyf.src +++ /dev/null @@ -1,40 +0,0 @@ -! -*- f90 -*- -! Signatures for f2py-wrappers of FORTRAN LEVEL 3 BLAS functions. -! -! Author: Pearu Peterson -! Created: April 2002 -! -! gemm -! -! Not Implemented: -! symm, hemm, syrk, herk, syr2k, her2k, trmm, trsm -! - -subroutine gemm(m,n,k,alpha,a,b,beta,c,trans_a,trans_b,lda,ka,ldb,kb) - ! c = gemm(alpha,a,b,beta=0,c=0,trans_a=0,trans_b=0,overwrite_c=0) - ! Calculate C <- alpha * op(A) * op(B) + beta * C - - callstatement (*f2py_func)((trans_a?(trans_a==2?"C":"T"):"N"),(trans_b?(trans_b==2?"C":"T"):"N"),&m,&n,&k,&alpha,a,&lda,b,&ldb,&beta,c,&m) - callprotoargument char*,char*,int*,int*,int*,*,*,int*,*,int*,*,*,int* - - integer optional,intent(in),check(trans_a>=0 && trans_a <=2) :: trans_a = 0 - integer optional,intent(in),check(trans_b>=0 && trans_b <=2) :: trans_b = 0 - intent(in) :: alpha - intent(in),optional :: beta = <0.0,\0,(0.0\,0.0),\2> - - dimension(lda,ka),intent(in) :: a - dimension(ldb,kb),intent(in) :: b - dimension(m,n),intent(in,out,copy),depend(m,n),optional :: c - check(shape(c,0)==m && shape(c,1)==n) :: c - - integer depend(a),intent(hide) :: lda = shape(a,0) - integer depend(a),intent(hide) :: ka = shape(a,1) - integer depend(b),intent(hide) :: ldb = shape(b,0) - integer depend(b),intent(hide) :: kb = shape(b,1) - - integer depend(a,trans_a,ka,lda),intent(hide):: m = (trans_a?ka:lda) - integer depend(a,trans_a,ka,lda),intent(hide):: k = (trans_a?lda:ka) - integer depend(b,trans_b,kb,ldb,k),intent(hide),check(trans_b?kb==k:ldb==k):: n = (trans_b?ldb:kb) - - -end subroutine gemm diff --git a/scipy-0.10.1/scipy/lib/blas/fblaswrap.f.src b/scipy-0.10.1/scipy/lib/blas/fblaswrap.f.src deleted file mode 100644 index f0e16809a4..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/fblaswrap.f.src +++ /dev/null @@ -1,11 +0,0 @@ -c - subroutine wdot (r, n, x, incx, y, incy) - external dot - dot, r - integer n - x (*) - integer incx - y (*) - integer incy - r = dot (n, x, incx, y, incy) - end diff --git a/scipy-0.10.1/scipy/lib/blas/fblaswrap_veclib_c.c.src b/scipy-0.10.1/scipy/lib/blas/fblaswrap_veclib_c.c.src deleted file mode 100644 index 17fb7feeb5..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/fblaswrap_veclib_c.c.src +++ /dev/null @@ -1,19 +0,0 @@ -#include -#include - -//#define WRAP_F77(a) wcblas_##a##_ -#define WRAP_F77(a) w##a##_ - -/**begin repeat -#p2=c,z,c,z# -#s2=u,u,c,c# -#ctype2=complex,double complex,complex,double complex# -*/ - -void WRAP_F77(@p2@dot@s2@)(@ctype2@ *dot@s2@, const int *N, const @ctype2@ *X, const int *incX, const @ctype2@ *Y, const int *incY) -{ - cblas_@p2@dot@s2@_sub(*N, X, *incX, Y, *incY, dot@s2@); -} - -/**end repeat**/ - diff --git a/scipy-0.10.1/scipy/lib/blas/info.py b/scipy-0.10.1/scipy/lib/blas/info.py deleted file mode 100644 index 9b43299920..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/info.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Wrappers to BLAS library -======================== - -fblas -- wrappers for Fortran [*] BLAS routines -cblas -- wrappers for ATLAS BLAS routines -get_blas_funcs -- query for wrapper functions. - -[*] If ATLAS libraries are available then Fortran routines - actually use ATLAS routines and should perform equally - well to ATLAS routines. - -Module fblas -++++++++++++ - -In the following all function names are shown without type prefixes. - -Level 1 routines ----------------- - - c,s = rotg(a,b) - param = rotmg(d1,d2,x1,y1) - x,y = rot(x,y,c,s,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1,overwrite_x=0,overwrite_y=0) - x,y = rotm(x,y,param,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1,overwrite_x=0,overwrite_y=0) - x,y = swap(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1) - x = scal(a,x,n=(len(x)-offx)/abs(incx),offx=0,incx=1) - y = copy(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1) - y = axpy(x,y,n=(len(x)-offx)/abs(incx),a=1.0,offx=0,incx=1,offy=0,incy=1) - xy = dot(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1) - xy = dotu(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1) - xy = dotc(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1) - n2 = nrm2(x,n=(len(x)-offx)/abs(incx),offx=0,incx=1) - s = asum(x,n=(len(x)-offx)/abs(incx),offx=0,incx=1) - k = amax(x,n=(len(x)-offx)/abs(incx),offx=0,incx=1) - - Prefixes: - rotg,swap,copy,axpy: s,d,c,z - amax: is,id,ic,iz - asum,nrm2: s,d,sc,dz - scal: s,d,c,z,sc,dz - rotm,rotmg,dot: s,d - dotu,dotc: c,z - rot: s,d,cs,zd - -Level 2 routines ----------------- - - y = gemv(alpha,a,x,beta=0.0,y=,offx=0,incx=1,offy=0,incy=1,trans=0,overwrite_y=0) - y = symv(alpha,a,x,beta=0.0,y=,offx=0,incx=1,offy=0,incy=1,lower=0,overwrite_y=0) - y = hemv(alpha,a,x,beta=(0.0, 0.0),y=,offx=0,incx=1,offy=0,incy=1,lower=0,overwrite_y=0) - x = trmv(a,x,offx=0,incx=1,lower=0,trans=0,unitdiag=0,overwrite_x=0) - a = ger(alpha,x,y,incx=1,incy=1,a=0.0,overwrite_x=1,overwrite_y=1,overwrite_a=0) - a = ger{u|c}(alpha,x,y,incx=1,incy=1,a=(0.0,0.0),overwrite_x=1,overwrite_y=1,overwrite_a=0) - - Prefixes: - gemv, trmv: s,d,c,z - symv,ger: s,d - hemv,geru,gerc: c,z - -Level 3 routines ----------------- - - c = gemm(alpha,a,b,beta=0.0,c=,trans_a=0,trans_b=0,overwrite_c=0) - - Prefixes: - gemm: s,d,c,z - -Module cblas -++++++++++++ - -In the following all function names are shown without type prefixes. - -Level 1 routines ----------------- - - z = axpy(x,y,n=len(x)/abs(incx),a=1.0,incx=1,incy=incx,overwrite_y=0) - - Prefixes: - axpy: s,d,c,z -""" diff --git a/scipy-0.10.1/scipy/lib/blas/scons_support.py b/scipy-0.10.1/scipy/lib/blas/scons_support.py deleted file mode 100644 index f78d0474cf..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/scons_support.py +++ /dev/null @@ -1,27 +0,0 @@ -from os.path import join as pjoin, splitext, basename as pbasename - -def generate_interface_emitter(target, source, env): - base = str(target[0]) - return (['%s.pyf' % base], source) - -def do_generate_fake_interface(target, source, env): - """Generate a (fake) .pyf file from another pyf file (!).""" - # XXX: do this correctly - target_name = str(target[0]) - source_name = str(source[0]) - - # XXX handle skip names - name = splitext(pbasename(target_name))[0] - #generate_interface(name, source_name, target_name) - - f = open(target_name, 'w') - f.write('python module '+name+'\n') - f.write('usercode void empty_module(void) {}\n') - f.write('interface\n') - f.write('subroutine empty_module()\n') - f.write('intent(c) empty_module\n') - f.write('end subroutine empty_module\n') - f.write('end interface\nend python module'+name+'\n') - f.close() - - return 0 diff --git a/scipy-0.10.1/scipy/lib/blas/setup.py b/scipy-0.10.1/scipy/lib/blas/setup.py deleted file mode 100755 index 5c92fa1619..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/setup.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -import re -from distutils.dep_util import newer_group, newer -from glob import glob -from os.path import join - -#------------------- -# To skip wrapping single precision atlas/lapack/blas routines, set -# the following flag to True: -skip_single_routines = 0 - -# Some OS distributions (e.g. Redhat, Suse) provide a blas library that -# is built using incomplete blas sources that come with lapack tar-ball. -# In order to use such a library in scipy.linalg, the following flag -# must be set to True: -using_lapack_blas = 0 - -#-------------------- - -def needs_cblas_wrapper(info): - """Returns true if needs c wrapper around cblas for calling from - fortran.""" - r_accel = re.compile("Accelerate") - r_vec = re.compile("vecLib") - res = False - try: - tmpstr = info['extra_link_args'] - for i in tmpstr: - if r_accel.search(i) or r_vec.search(i): - res = True - except KeyError: - pass - - return res - -tmpl_empty_cblas_pyf = ''' -python module cblas - usercode void empty_module(void) {} - interface - subroutine empty_module() - intent(c) empty_module - end subroutine empty_module - end interface -end python module cblas -''' - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - - config = Configuration('blas',parent_package,top_path) - - blas_opt = get_info('blas_opt',notfound_action=2) - - atlas_version = ([v[3:-3] for k,v in blas_opt.get('define_macros',[]) \ - if k=='ATLAS_INFO']+[None])[0] - if atlas_version: - print ('ATLAS version: %s' % atlas_version) - - target_dir = '' - skip_names = {'cblas':[],'fblas':[]} - if skip_single_routines: - target_dir = 'dbl' - skip_names['cblas'].extend('saxpy caxpy'.split()) - skip_names['fblas'].extend(skip_names['cblas']) - skip_names['fblas'].extend(\ - 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ - ' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ - ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ - ' sgemm cgemm'.split()) - - if using_lapack_blas: - target_dir = join(target_dir,'blas') - skip_names['fblas'].extend(\ - 'drotmg srotmg drotm srotm'.split()) - - depends = [__file__, 'fblas_l?.pyf.src', 'fblas.pyf.src','fblaswrap.f.src', - 'fblaswrap_veclib_c.c.src'] - # fblas: - if needs_cblas_wrapper(blas_opt): - sources = ['fblas.pyf.src', 'fblaswrap_veclib_c.c.src'], - else: - sources = ['fblas.pyf.src','fblaswrap.f.src'] - config.add_extension('fblas', - sources = sources, - depends = depends, - f2py_options = ['skip:']+skip_names['fblas']+[':'], - extra_info = blas_opt - ) - # cblas: - def get_cblas_source(ext, build_dir): - name = ext.name.split('.')[-1] - assert name=='cblas', repr(name) - if atlas_version is None: - target = join(build_dir,target_dir,'cblas.pyf') - from distutils.dep_util import newer - if newer(__file__,target): - f = open(target,'w') - f.write(tmpl_empty_cblas_pyf) - f.close() - else: - target = ext.depends[0] - assert os.path.basename(target)=='cblas.pyf.src' - return target - - config.add_extension('cblas', - sources = [get_cblas_source], - depends = ['cblas.pyf.src','cblas_l?.pyf.src'], - f2py_options = ['skip:']+skip_names['cblas']+[':'], - extra_info = blas_opt - ) - - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/lib/blas/setupscons.py b/scipy-0.10.1/scipy/lib/blas/setupscons.py deleted file mode 100755 index 2df5488797..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/setupscons.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - - config = Configuration('blas',parent_package,top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/lib/blas/tests/test_blas.py b/scipy-0.10.1/scipy/lib/blas/tests/test_blas.py deleted file mode 100644 index 7b3c0d002b..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/tests/test_blas.py +++ /dev/null @@ -1,211 +0,0 @@ -#!/usr/bin/env python -# -# Created by: Pearu Peterson, April 2002 -# - -__usage__ = """ -Build linalg: - python setup.py build -Run tests if scipy is installed: - python -c 'import scipy;scipy.lib.blas.test()' -""" - -import math - -from numpy import array -from numpy.testing import assert_equal, assert_almost_equal, \ - assert_array_almost_equal, TestCase, run_module_suite -from scipy.lib.blas import fblas -from scipy.lib.blas import cblas -from scipy.lib.blas import get_blas_funcs - -class TestCBLAS1Simple(TestCase): - - def test_axpy(self): - for p in 'sd': - f = getattr(cblas,p+'axpy',None) - if f is None: continue - assert_array_almost_equal(f([1,2,3],[2,-1,3],a=5),[7,9,18]) - for p in 'cz': - f = getattr(cblas,p+'axpy',None) - if f is None: continue - assert_array_almost_equal(f([1,2j,3],[2,-1,3],a=5),[7,10j-1,18]) - -class TestFBLAS1Simple(TestCase): - - def test_axpy(self): - for p in 'sd': - f = getattr(fblas,p+'axpy',None) - if f is None: continue - assert_array_almost_equal(f([1,2,3],[2,-1,3],a=5),[7,9,18]) - for p in 'cz': - f = getattr(fblas,p+'axpy',None) - if f is None: continue - assert_array_almost_equal(f([1,2j,3],[2,-1,3],a=5),[7,10j-1,18]) - def test_copy(self): - for p in 'sd': - f = getattr(fblas,p+'copy',None) - if f is None: continue - assert_array_almost_equal(f([3,4,5],[8]*3),[3,4,5]) - for p in 'cz': - f = getattr(fblas,p+'copy',None) - if f is None: continue - assert_array_almost_equal(f([3,4j,5+3j],[8]*3),[3,4j,5+3j]) - def test_asum(self): - for p in 'sd': - f = getattr(fblas,p+'asum',None) - if f is None: continue - assert_almost_equal(f([3,-4,5]),12) - for p in ['sc','dz']: - f = getattr(fblas,p+'asum',None) - if f is None: continue - assert_almost_equal(f([3j,-4,3-4j]),14) - def test_dot(self): - for p in 'sd': - f = getattr(fblas,p+'dot',None) - if f is None: continue - assert_almost_equal(f([3,-4,5],[2,5,1]),-9) - for p in 'cz': - f = getattr(fblas,p+'dotu',None) - if f is None: continue - assert_almost_equal(f([3j,-4,3-4j],[2,3,1]),-9+2j) - f = getattr(fblas,p+'dotc') - assert_almost_equal(f([3j,-4,3-4j],[2,3j,1]),3-14j) - def test_nrm2(self): - for p in 'sd': - f = getattr(fblas,p+'nrm2',None) - if f is None: continue - assert_almost_equal(f([3,-4,5]),math.sqrt(50)) - for p in ['sc','dz']: - f = getattr(fblas,p+'nrm2',None) - if f is None: continue - assert_almost_equal(f([3j,-4,3-4j]),math.sqrt(50)) - def test_scal(self): - for p in 'sd': - f = getattr(fblas,p+'scal',None) - if f is None: continue - assert_array_almost_equal(f(2,[3,-4,5]),[6,-8,10]) - for p in 'cz': - f = getattr(fblas,p+'scal',None) - if f is None: continue - assert_array_almost_equal(f(3j,[3j,-4,3-4j]),[-9,-12j,12+9j]) - for p in ['cs','zd']: - f = getattr(fblas,p+'scal',None) - if f is None: continue - assert_array_almost_equal(f(3,[3j,-4,3-4j]),[9j,-12,9-12j]) - def test_swap(self): - for p in 'sd': - f = getattr(fblas,p+'swap',None) - if f is None: continue - x,y = [2,3,1],[-2,3,7] - x1,y1 = f(x,y) - assert_array_almost_equal(x1,y) - assert_array_almost_equal(y1,x) - for p in 'cz': - f = getattr(fblas,p+'swap',None) - if f is None: continue - x,y = [2,3j,1],[-2,3,7-3j] - x1,y1 = f(x,y) - assert_array_almost_equal(x1,y) - assert_array_almost_equal(y1,x) - def test_amax(self): - for p in 'sd': - f = getattr(fblas,'i'+p+'amax') - assert_equal(f([-2,4,3]),1) - for p in 'cz': - f = getattr(fblas,'i'+p+'amax') - assert_equal(f([-5,4+3j,6]),1) - #XXX: need tests for rot,rotm,rotg,rotmg - -class TestFBLAS2Simple(TestCase): - - def test_gemv(self): - for p in 'sd': - f = getattr(fblas,p+'gemv',None) - if f is None: continue - assert_array_almost_equal(f(3,[[3]],[-4]),[-36]) - assert_array_almost_equal(f(3,[[3]],[-4],3,[5]),[-21]) - for p in 'cz': - f = getattr(fblas,p+'gemv',None) - if f is None: continue - assert_array_almost_equal(f(3j,[[3-4j]],[-4]),[-48-36j]) - assert_array_almost_equal(f(3j,[[3-4j]],[-4],3,[5j]),[-48-21j]) - - def test_ger(self): - - for p in 'sd': - f = getattr(fblas,p+'ger',None) - if f is None: continue - assert_array_almost_equal(f(1,[1, - 2],[3,4]),[[3,4],[6,8]]) - assert_array_almost_equal(f(2,[1, - 2, - 3],[3,4]),[[6,8],[12,16],[18,24]]) - - assert_array_almost_equal(f(1,[1, - 2],[3,4], - a=[[1,2],[3,4]] - ),[[4,6],[9,12]]) - - for p in 'cz': - f = getattr(fblas,p+'geru',None) - if f is None: continue - assert_array_almost_equal(f(1,[1j, - 2],[3,4]),[[3j,4j],[6,8]]) - assert_array_almost_equal(f(-2,[1j, - 2j, - 3j],[3j,4j]),[[6,8],[12,16],[18,24]]) - - for p in 'cz': - f = getattr(fblas,p+'gerc',None) - if f is None: continue - assert_array_almost_equal(f(1,[1j, - 2],[3,4]),[[3j,4j],[6,8]]) - assert_array_almost_equal(f(2,[1j, - 2j, - 3j],[3j,4j]),[[6,8],[12,16],[18,24]]) - -class TestFBLAS3Simple(TestCase): - - def test_gemm(self): - for p in 'sd': - f = getattr(fblas,p+'gemm',None) - if f is None: continue - assert_array_almost_equal(f(3,[3],[-4]),[[-36]]) - assert_array_almost_equal(f(3,[3],[-4],3,[5]),[-21]) - assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]]) - assert_array_almost_equal(f(1,[[1,2]],[[3,3],[4,4]]),[[11,11]]) - for p in 'cz': - f = getattr(fblas,p+'gemm',None) - if f is None: continue - assert_array_almost_equal(f(3j,[3-4j],[-4]),[[-48-36j]]) - assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j]) - assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]]) - assert_array_almost_equal(f(1,[[1,2]],[[3,3],[4,4]]),[[11,11]]) - - def test_gemm2(self): - for p in 'sdcz': - f = getattr(fblas,p+'gemm',None) - if f is None: continue - assert_array_almost_equal(f(1,[[1,2]],[[3],[4]]),[[11]]) - assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]]) - - -class TestBLAS(TestCase): - - def test_blas(self): - a = array([[1,1,1]]) - b = array([[1],[1],[1]]) - - # get_blas_funcs is deprecated, silence the warning - import warnings - warnings.simplefilter('ignore', DeprecationWarning) - gemm, = get_blas_funcs(('gemm',),(a,b)) - # remove filter again - warnings.filters.pop(0) - - assert_array_almost_equal(gemm(1,a,b),[[3]],15) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/lib/blas/tests/test_fblas.py b/scipy-0.10.1/scipy/lib/blas/tests/test_fblas.py deleted file mode 100644 index bf966bc29a..0000000000 --- a/scipy-0.10.1/scipy/lib/blas/tests/test_fblas.py +++ /dev/null @@ -1,595 +0,0 @@ -# Test interfaces to fortran blas. -# -# The tests are more of interface than they are of the underlying blas. -# Only very small matrices checked -- N=3 or so. -# -# !! Complex calculations really aren't checked that carefully. -# !! Only real valued complex numbers are used in tests. - -from numpy import zeros, transpose, newaxis, shape, float32, float64, \ - complex64, complex128, arange, array, common_type, conjugate -from numpy.testing import assert_equal, assert_array_almost_equal, \ - run_module_suite, TestCase -from scipy.lib.blas import fblas - -#decimal accuracy to require between Python and LAPACK/BLAS calculations -accuracy = 5 - -# Since numpy.dot likely uses the same blas, use this routine -# to check. -def matrixmultiply(a, b): - if len(b.shape) == 1: - b_is_vector = True - b = b[:,newaxis] - else: - b_is_vector = False - assert_equal(a.shape[1], b.shape[0]) - c = zeros((a.shape[0], b.shape[1]), common_type(a, b)) - for i in xrange(a.shape[0]): - for j in xrange(b.shape[1]): - s = 0 - for k in xrange(a.shape[1]): - s += a[i,k] * b[k, j] - c[i,j] = s - if b_is_vector: - c = c.reshape((a.shape[0],)) - return c - -################################################## -### Test blas ?axpy - -class BaseAxpy(object): - - # Mixin class to test dtypes - - def test_default_a(self): - x = arange(3.,dtype=self.dtype) - y = arange(3.,dtype=x.dtype) - real_y = x*1.+y - self.blas_func(x,y) - assert_array_almost_equal(real_y,y) - - def test_simple(self): - x = arange(3.,dtype=self.dtype) - y = arange(3.,dtype=x.dtype) - real_y = x*3.+y - self.blas_func(x,y,a=3.) - assert_array_almost_equal(real_y,y) - - def test_x_stride(self): - x = arange(6.,dtype=self.dtype) - y = zeros(3,x.dtype) - y = arange(3.,dtype=x.dtype) - real_y = x[::2]*3.+y - self.blas_func(x,y,a=3.,n=3,incx=2) - assert_array_almost_equal(real_y,y) - - def test_y_stride(self): - x = arange(3.,dtype=self.dtype) - y = zeros(6,x.dtype) - real_y = x*3.+y[::2] - self.blas_func(x,y,a=3.,n=3,incy=2) - assert_array_almost_equal(real_y,y[::2]) - - def test_x_and_y_stride(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - real_y = x[::4]*3.+y[::2] - self.blas_func(x,y,a=3.,n=3,incx=4,incy=2) - assert_array_almost_equal(real_y,y[::2]) - - def test_x_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=4,incx=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - - def test_y_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=3,incy=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - -try: - class TestSaxpy(TestCase, BaseAxpy): - blas_func = fblas.saxpy - dtype = float32 -except AttributeError: - class TestSaxpy: pass - -class TestDaxpy(TestCase, BaseAxpy): - blas_func = fblas.daxpy - dtype = float64 - -try: - class TestCaxpy(TestCase, BaseAxpy): - blas_func = fblas.caxpy - dtype = complex64 -except AttributeError: - class TestCaxpy: pass - -class TestZaxpy(TestCase, BaseAxpy): - blas_func = fblas.zaxpy - dtype = complex128 - - -################################################## -### Test blas ?scal - -class BaseScal(object): - - # Mixin class for testing particular dtypes - - def test_simple(self): - x = arange(3.,dtype=self.dtype) - real_x = x*3. - self.blas_func(3.,x) - assert_array_almost_equal(real_x,x) - - def test_x_stride(self): - x = arange(6.,dtype=self.dtype) - real_x = x.copy() - real_x[::2] = x[::2]*array(3.,self.dtype) - self.blas_func(3.,x,n=3,incx=2) - assert_array_almost_equal(real_x,x) - - def test_x_bad_size(self): - x = arange(12.,dtype=self.dtype) - try: - self.blas_func(2.,x,n=4,incx=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - -try: - class TestSscal(TestCase, BaseScal): - blas_func = fblas.sscal - dtype = float32 -except AttributeError: - class TestSscal: pass - -class TestDscal(TestCase, BaseScal): - blas_func = fblas.dscal - dtype = float64 - -try: - class TestCscal(TestCase, BaseScal): - blas_func = fblas.cscal - dtype = complex64 -except AttributeError: - class TestCscal: pass - -class TestZscal(TestCase, BaseScal): - blas_func = fblas.zscal - dtype = complex128 - - - - -################################################## -### Test blas ?copy - -class BaseCopy(object): - - # Mixin class for testing dtypes - - def test_simple(self): - x = arange(3.,dtype=self.dtype) - y = zeros(shape(x),x.dtype) - self.blas_func(x,y) - assert_array_almost_equal(x,y) - - def test_x_stride(self): - x = arange(6.,dtype=self.dtype) - y = zeros(3,x.dtype) - self.blas_func(x,y,n=3,incx=2) - assert_array_almost_equal(x[::2],y) - - def test_y_stride(self): - x = arange(3.,dtype=self.dtype) - y = zeros(6,x.dtype) - self.blas_func(x,y,n=3,incy=2) - assert_array_almost_equal(x,y[::2]) - - def test_x_and_y_stride(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - self.blas_func(x,y,n=3,incx=4,incy=2) - assert_array_almost_equal(x[::4],y[::2]) - - def test_x_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=4,incx=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - - def test_y_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=3,incy=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - - #def test_y_bad_type(self): - ## Hmmm. Should this work? What should be the output. - # x = arange(3.,dtype=self.dtype) - # y = zeros(shape(x)) - # self.blas_func(x,y) - # assert_array_almost_equal(x,y) - -try: - class TestScopy(TestCase, BaseCopy): - blas_func = fblas.scopy - dtype = float32 -except AttributeError: - class TestScopy: pass - -class TestDcopy(TestCase, BaseCopy): - blas_func = fblas.dcopy - dtype = float64 - -try: - class TestCcopy(TestCase, BaseCopy): - blas_func = fblas.ccopy - dtype = complex64 -except AttributeError: - class TestCcopy: pass - -class TestZcopy(TestCase, BaseCopy): - blas_func = fblas.zcopy - dtype = complex128 - - -################################################## -### Test blas ?swap - -class BaseSwap(object): - - # Mixin class to implement test objects - - def test_simple(self): - x = arange(3.,dtype=self.dtype) - y = zeros(shape(x),x.dtype) - desired_x = y.copy() - desired_y = x.copy() - self.blas_func(x,y) - assert_array_almost_equal(desired_x,x) - assert_array_almost_equal(desired_y,y) - - def test_x_stride(self): - x = arange(6.,dtype=self.dtype) - y = zeros(3,x.dtype) - desired_x = y.copy() - desired_y = x.copy()[::2] - self.blas_func(x,y,n=3,incx=2) - assert_array_almost_equal(desired_x,x[::2]) - assert_array_almost_equal(desired_y,y) - - def test_y_stride(self): - x = arange(3.,dtype=self.dtype) - y = zeros(6,x.dtype) - desired_x = y.copy()[::2] - desired_y = x.copy() - self.blas_func(x,y,n=3,incy=2) - assert_array_almost_equal(desired_x,x) - assert_array_almost_equal(desired_y,y[::2]) - - def test_x_and_y_stride(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - desired_x = y.copy()[::2] - desired_y = x.copy()[::4] - self.blas_func(x,y,n=3,incx=4,incy=2) - assert_array_almost_equal(desired_x,x[::4]) - assert_array_almost_equal(desired_y,y[::2]) - - def test_x_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=4,incx=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - - def test_y_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=3,incy=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - -try: - class TestSswap(TestCase, BaseSwap): - blas_func = fblas.sswap - dtype = float32 -except AttributeError: - class TestSswap: pass - -class TestDswap(TestCase, BaseSwap): - blas_func = fblas.dswap - dtype = float64 - -try: - class TestCswap(TestCase, BaseSwap): - blas_func = fblas.cswap - dtype = complex64 -except AttributeError: - class TestCswap: pass - -class TestZswap(TestCase, BaseSwap): - blas_func = fblas.zswap - dtype = complex128 - -################################################## -### Test blas ?gemv -### This will be a mess to test all cases. - -class BaseGemv(object): - - # Mixin class to test dtypes - - def get_data(self,x_stride=1,y_stride=1): - mult = array(1, dtype = self.dtype) - if self.dtype in [complex64, complex128]: - mult = array(1+1j, dtype = self.dtype) - from numpy.random import normal - alpha = array(1., dtype = self.dtype) * mult - beta = array(1.,dtype = self.dtype) * mult - a = normal(0.,1.,(3,3)).astype(self.dtype) * mult - x = arange(shape(a)[0]*x_stride,dtype=self.dtype) * mult - y = arange(shape(a)[1]*y_stride,dtype=self.dtype) * mult - return alpha,beta,a,x,y - - def test_simple(self): - alpha,beta,a,x,y = self.get_data() - desired_y = alpha*matrixmultiply(a,x)+beta*y - y = self.blas_func(alpha,a,x,beta,y) - assert_array_almost_equal(desired_y,y) - - def test_default_beta_y(self): - alpha,beta,a,x,y = self.get_data() - desired_y = matrixmultiply(a,x) - y = self.blas_func(1,a,x) - assert_array_almost_equal(desired_y,y) - - def test_simple_transpose(self): - alpha,beta,a,x,y = self.get_data() - desired_y = alpha*matrixmultiply(transpose(a),x)+beta*y - y = self.blas_func(alpha,a,x,beta,y,trans=1) - assert_array_almost_equal(desired_y,y) - - def test_simple_transpose_conj(self): - alpha,beta,a,x,y = self.get_data() - desired_y = alpha*matrixmultiply(transpose(conjugate(a)),x)+beta*y - y = self.blas_func(alpha,a,x,beta,y,trans=2) - assert_array_almost_equal(desired_y,y) - - def test_x_stride(self): - alpha,beta,a,x,y = self.get_data(x_stride=2) - desired_y = alpha*matrixmultiply(a,x[::2])+beta*y - y = self.blas_func(alpha,a,x,beta,y,incx=2) - assert_array_almost_equal(desired_y,y) - - def test_x_stride_transpose(self): - alpha,beta,a,x,y = self.get_data(x_stride=2) - desired_y = alpha*matrixmultiply(transpose(a),x[::2])+beta*y - y = self.blas_func(alpha,a,x,beta,y,trans=1,incx=2) - assert_array_almost_equal(desired_y,y) - - def test_x_stride_assert(self): - # What is the use of this test? - alpha,beta,a,x,y = self.get_data(x_stride=2) - try: - y = self.blas_func(1,a,x,1,y,trans=0,incx=3) - assert_(0) - except: - pass - try: - y = self.blas_func(1,a,x,1,y,trans=1,incx=3) - assert_(0) - except: - pass - - def test_y_stride(self): - alpha,beta,a,x,y = self.get_data(y_stride=2) - desired_y = y.copy() - desired_y[::2] = alpha*matrixmultiply(a,x)+beta*y[::2] - y = self.blas_func(alpha,a,x,beta,y,incy=2) - assert_array_almost_equal(desired_y,y) - - def test_y_stride_transpose(self): - alpha,beta,a,x,y = self.get_data(y_stride=2) - desired_y = y.copy() - desired_y[::2] = alpha*matrixmultiply(transpose(a),x)+beta*y[::2] - y = self.blas_func(alpha,a,x,beta,y,trans=1,incy=2) - assert_array_almost_equal(desired_y,y) - - def test_y_stride_assert(self): - # What is the use of this test? - alpha,beta,a,x,y = self.get_data(y_stride=2) - try: - y = self.blas_func(1,a,x,1,y,trans=0,incy=3) - assert_(0) - except: - pass - try: - y = self.blas_func(1,a,x,1,y,trans=1,incy=3) - assert_(0) - except: - pass - -try: - class TestSgemv(TestCase, BaseGemv): - blas_func = fblas.sgemv - dtype = float32 -except AttributeError: - class TestSgemv: pass - -class TestDgemv(TestCase, BaseGemv): - blas_func = fblas.dgemv - dtype = float64 - -try: - class TestCgemv(TestCase, BaseGemv): - blas_func = fblas.cgemv - dtype = complex64 -except AttributeError: - class TestCgemv: pass - -class TestZgemv(TestCase, BaseGemv): - blas_func = fblas.zgemv - dtype = complex128 - -""" -################################################## -### Test blas ?ger -### This will be a mess to test all cases. - -class BaseGer(TestCase): - - def get_data(self,x_stride=1,y_stride=1): - from numpy.random import normal - alpha = array(1., dtype = self.dtype) - a = normal(0.,1.,(3,3)).astype(self.dtype) - x = arange(shape(a)[0]*x_stride,dtype=self.dtype) - y = arange(shape(a)[1]*y_stride,dtype=self.dtype) - return alpha,a,x,y - - def test_simple(self): - alpha,a,x,y = self.get_data() - # tranpose takes care of Fortran vs. C(and Python) memory layout - desired_a = alpha*transpose(x[:,newaxis]*y) + a - self.blas_func(x,y,a) - assert_array_almost_equal(desired_a,a) - - def test_x_stride(self): - alpha,a,x,y = self.get_data(x_stride=2) - desired_a = alpha*transpose(x[::2,newaxis]*y) + a - self.blas_func(x,y,a,incx=2) - assert_array_almost_equal(desired_a,a) - - def test_x_stride_assert(self): - alpha,a,x,y = self.get_data(x_stride=2) - try: - self.blas_func(x,y,a,incx=3) - assert(0) - except: - pass - - def test_y_stride(self): - alpha,a,x,y = self.get_data(y_stride=2) - desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a - self.blas_func(x,y,a,incy=2) - assert_array_almost_equal(desired_a,a) - - def test_y_stride_assert(self): - alpha,a,x,y = self.get_data(y_stride=2) - try: - self.blas_func(a,x,y,incy=3) - assert(0) - except: - pass - -class TestSger(BaseGer): - blas_func = fblas.sger - dtype = float32 - -class TestDger(BaseGer): - blas_func = fblas.dger - dtype = float64 -""" -################################################## -### Test blas ?gerc -### This will be a mess to test all cases. - -""" -class BaseGerComplex(BaseGer): - - def get_data(self,x_stride=1,y_stride=1): - from numpy.random import normal - alpha = array(1+1j, dtype = self.dtype) - a = normal(0.,1.,(3,3)).astype(self.dtype) - a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype) - x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype) - x = x + x * array(1j, dtype = self.dtype) - y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype) - y = y + y * array(1j, dtype = self.dtype) - return alpha,a,x,y - - def test_simple(self): - alpha,a,x,y = self.get_data() - # tranpose takes care of Fortran vs. C(and Python) memory layout - a = a * array(0.,dtype = self.dtype) - #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a - desired_a = alpha*transpose(x[:,newaxis]*y) + a - #self.blas_func(x,y,a,alpha = alpha) - fblas.cgeru(x,y,a,alpha = alpha) - assert_array_almost_equal(desired_a,a) - - #def test_x_stride(self): - # alpha,a,x,y = self.get_data(x_stride=2) - # desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a - # self.blas_func(x,y,a,incx=2) - # assert_array_almost_equal(desired_a,a) - #def test_y_stride(self): - # alpha,a,x,y = self.get_data(y_stride=2) - # desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a - # self.blas_func(x,y,a,incy=2) - # assert_array_almost_equal(desired_a,a) - -class TestCgeru(BaseGerComplex): - - blas_func = fblas.cgeru - dtype = complex64 - - def transform(self,x): - return x - -class TestZgeru(BaseGerComplex): - - blas_func = fblas.zgeru - dtype = complex128 - - def transform(self,x): - return x - -class TestCgerc(BaseGerComplex): - - blas_func = fblas.cgerc - dtype = complex64 - - def transform(self,x): - return conjugate(x) - -class TestZgerc(BaseGerComplex): - - blas_func = fblas.zgerc - dtype = complex128 - - def transform(self,x): - return conjugate(x) -""" - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/lib/lapack/SConscript b/scipy-0.10.1/scipy/lib/lapack/SConscript deleted file mode 100644 index 7b1a4b96bc..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/SConscript +++ /dev/null @@ -1,86 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK,\ - CheckCLAPACK, \ - IsATLAS, GetATLASVersion, \ - CheckF77Clib -from numscons import write_info - -from scons_support import do_generate_fake_interface, \ - generate_interface_emitter - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('f2py') -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckCLAPACK' : CheckCLAPACK, - 'CheckLAPACK' : CheckF77LAPACK, - 'CheckF77Clib' : CheckF77Clib}) - -#-------------- -# Checking Blas -#-------------- -st = config.CheckLAPACK(check_version = 1) -if not st: - raise RuntimeError("no lapack found, necessary for lapack module") - -if IsATLAS(env, 'lapack'): - version = GetATLASVersion(env) - env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)]) -else: - env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)]) - -if config.CheckCLAPACK(): - has_clapack = 1 -else: - has_clapack = 0 - -config.Finish() -write_info(env) - -#========== -# Build -#========== -env.AppendUnique(F2PYOPTIONS = '--quiet') - -env['BUILDERS']['GenerateFakePyf'] = Builder(action = do_generate_fake_interface, - emitter = generate_interface_emitter) - -#------------ -# flapack -#------------ -yop = env.FromFTemplate('flapack.pyf', 'flapack.pyf.src') -env.NumpyPythonExtension('flapack', source = ['flapack.pyf']) - -#------------ -# clapack -#------------ -if has_clapack: - env.FromFTemplate('clapack.pyf', 'clapack.pyf.src') -else: - env.GenerateFakePyf('clapack', 'clapack.pyf.src') -env.NumpyPythonExtension('clapack', source = 'clapack.pyf') - -#---------------- -# calc_lwork: -#---------------- -calc_src = env.F2py(pjoin('calc_lworkmodule.c'), - source = pjoin('calc_lwork.f')) -env.NumpyPythonExtension('calc_lwork', source = calc_src + ['calc_lwork.f']) - -#-------------- -# Atlas version -#-------------- -env.NumpyPythonExtension('atlas_version', 'atlas_version.c') diff --git a/scipy-0.10.1/scipy/lib/lapack/SConstruct b/scipy-0.10.1/scipy/lib/lapack/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/lib/lapack/__init__.py b/scipy-0.10.1/scipy/lib/lapack/__init__.py deleted file mode 100644 index e8bf75ca57..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -# -# LAPACK wrappers -# - -from info import __doc__ - -__all__ = ['get_lapack_funcs','calc_lwork','flapack','clapack'] - -import calc_lwork - -# The following ensures that possibly missing flavor (C or Fortran) is -# replaced with the available one. If none is available, exception -# is raised at the first attempt to use the resources. - -import flapack -import clapack - -_use_force_clapack = 1 -if hasattr(clapack,'empty_module'): - clapack = flapack - _use_force_clapack = 0 -elif hasattr(flapack,'empty_module'): - flapack = clapack - -_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',.. -_inv_type_conv = {'s':'f','d':'d','c':'F','z':'D'} - -def get_lapack_funcs(names,arrays=(),debug=0,force_clapack=1): - """Return available LAPACK function objects with names. - arrays are used to determine the optimal prefix of - LAPACK routines. - If force_clapack is True then available Atlas routine - is returned for column major storaged arrays with - rowmajor argument set to False. - """ - force_clapack=0 #XXX: Don't set it true! The feature is unreliable - # and may cause incorrect results. - # See test_basic.test_solve.check_20Feb04_bug. - - ordering = [] - for i in range(len(arrays)): - t = arrays[i].dtype.char - if t not in _type_conv: - t = 'd' - ordering.append((t,i)) - if ordering: - ordering.sort() - required_prefix = _type_conv[ordering[0][0]] - else: - required_prefix = 'd' - dtypechar = _inv_type_conv[required_prefix] - # Default lookup: - if ordering and arrays[ordering[0][1]].flags['FORTRAN']: - # prefer Fortran code for leading array with column major order - m1,m2 = flapack,clapack - else: - # in all other cases, C code is preferred - m1,m2 = clapack,flapack - if not _use_force_clapack: - force_clapack = 0 - funcs = [] - m1_name = m1.__name__.split('.')[-1] - m2_name = m2.__name__.split('.')[-1] - for name in names: - func_name = required_prefix + name - func = getattr(m1,func_name,None) - if func is None: - func = getattr(m2,func_name) - func.module_name = m2_name - else: - func.module_name = m1_name - if force_clapack and m1 is flapack: - func2 = getattr(m2,func_name,None) - if func2 is not None: - import new - exec _colmajor_func_template % {'func_name':func_name} - func = new.function(func_code,{'clapack_func':func2},func_name) - func.module_name = m2_name - func.__doc__ = func2.__doc__ - func.prefix = required_prefix - func.dtypechar = dtypechar - funcs.append(func) - return tuple(funcs) - -_colmajor_func_template = '''\ -def %(func_name)s(*args,**kws): - if "rowmajor" not in kws: - kws["rowmajor"] = 0 - return clapack_func(*args,**kws) -func_code = %(func_name)s.func_code -''' - -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/lib/lapack/atlas_version.c b/scipy-0.10.1/scipy/lib/lapack/atlas_version.c deleted file mode 100644 index 6915a7f937..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/atlas_version.c +++ /dev/null @@ -1,61 +0,0 @@ -#include "Python.h" -#include "numpy/npy_3kcompat.h" - -static PyObject* version(PyObject* self, PyObject* dummy) -{ -#if defined(NO_ATLAS_INFO) - printf("NO ATLAS INFO AVAILABLE\n"); -#else - void ATL_buildinfo(void); - ATL_buildinfo(); -#endif - - Py_INCREF(Py_None); - return Py_None; -} - -static char version_doc[] = "Print the build info from atlas."; - -static PyMethodDef module_methods[] = { - {"version", version, METH_VARARGS, version_doc}, - {NULL, NULL, 0, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 - -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "atlas_version", - NULL, - -1, - NULL, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit_atlas_version(void) -{ -#define RETVAL m - PyObject *m; - m = PyModule_Create(&moduledef); -#else -#define RETVAL -PyMODINIT_FUNC initatlas_version(void) -{ - PyObject *m = NULL; - m = Py_InitModule("atlas_version", module_methods); -#endif - if (m == NULL) { - return RETVAL; - } -#if defined(ATLAS_INFO) - { - PyObject *d = PyModule_GetDict(m); - PyDict_SetItemString(d,"ATLAS_VERSION", - PyUString_FromString(ATLAS_INFO)); - } -#endif - return RETVAL; -} diff --git a/scipy-0.10.1/scipy/lib/lapack/bento.info b/scipy-0.10.1/scipy/lib/lapack/bento.info deleted file mode 100644 index 6ed4cfb4f9..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/bento.info +++ /dev/null @@ -1,11 +0,0 @@ -HookFile: bscript - -Library: - Extension: flapack - Sources: flapack.pyf.src - Extension: clapack - Sources: clapack.pyf.src - Extension: calc_lwork - Sources: calc_lwork.f - Extension: atlas_version - Sources: atlas_version.c diff --git a/scipy-0.10.1/scipy/lib/lapack/bscript b/scipy-0.10.1/scipy/lib/lapack/bscript deleted file mode 100644 index e2c64d16b4..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/bscript +++ /dev/null @@ -1,30 +0,0 @@ -import sys - -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - - def builder(extension): - return default_builder(extension, - features="c fc pyext bento cshlib f2py", - use="FLAPACK") - context.register_builder("flapack", builder) - - def builder(extension): - kw = {"features": "c fc pyext bento cshlib f2py"} - if context.waf_context.env.HAS_CLAPACK: - kw["use"] = "CLAPACK" - else: - kw["source"] = ["dummy_clapack.pyf"] - return default_builder(extension, **kw) - context.register_builder("clapack", builder) - - def builder(extension): - return default_builder(extension, - features="c pyext bento cshlib f2py f2py_fortran", - use="FLAPACK CLIB") - context.register_builder("calc_lwork", builder) - - context.disable_extension("atlas_version") diff --git a/scipy-0.10.1/scipy/lib/lapack/calc_lwork.f b/scipy-0.10.1/scipy/lib/lapack/calc_lwork.f deleted file mode 100644 index 8372628074..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/calc_lwork.f +++ /dev/null @@ -1,483 +0,0 @@ - subroutine gehrd(min_lwork,max_lwork,prefix,n,lo,hi) - integer min_lwork,max_lwork,n,lo,hi - character prefix -c -c Returned maxwrk is acctually optimal lwork. -c -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: n -cf2py integer optional,intent(in),depend(n) :: lo=0, hi=n-1 - - INTEGER NB - EXTERNAL ILAENV - INTRINSIC MIN - - NB = MIN( 64, ILAENV( 1, prefix // 'GEHRD', ' ', n, lo, hi, -1 ) ) - max_lwork = n * NB - min_lwork = MIN(max_lwork,MAX(1,n)) - - end - - subroutine gesdd(min_lwork,max_lwork,prefix,m,n,compute_uv) - integer min_lwork,max_lwork,m,n,compute_uv - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&m,&n,&compute_uv) -cf2py callprotoargument int*,int*,char*,int*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: m,n -cf2py integer optional,intent(in):: compute_uv=1 - - INTEGER MINMN, MNTHR, MINWRK, MAXWRK, SMLSIZ, BDSPAC, BDSPAN - INTEGER ILAENV, WRKBL - EXTERNAL ILAENV - INTRINSIC INT, MAX, MIN - - MINMN = MIN( M, N ) - MNTHR = INT( MINMN*11.0D0 / 6.0D0 ) - MINWRK = 1 - MAXWRK = 1 - SMLSIZ = ILAENV( 9, prefix // 'GESDD', ' ', 0, 0, 0, 0 ) - IF( M.GE.N ) THEN -* -* Compute space needed for DBDSDC -* - BDSPAC = 3*N*N + 7*N - BDSPAN = MAX( 12*N+4, 8*N+2+SMLSIZ*( SMLSIZ+8 ) ) - IF( M.GE.MNTHR ) THEN - IF (compute_uv.eq.0) THEN -* -* Path 1 (M much larger than N, JOBZ='N') -* - MAXWRK = N + N*ILAENV( 1, prefix // 'GEQRF', ' ', - $ M, N, -1, - $ -1 ) - MAXWRK = MAX( MAXWRK, 3*N+2*N* - $ ILAENV( 1, prefix // 'GEBRD', ' ', - $ N, N, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC ) - MINWRK = BDSPAC - ELSE -* -* Path 4 (M much larger than N, JOBZ='A') -* - WRKBL = N + N*ILAENV( 1, prefix // 'GEQRF', ' ', - $ M, N, -1, -1 ) - WRKBL = MAX( WRKBL, N+M*ILAENV( 1, prefix // 'ORGQR', - $ ' ', M, - $ M, N, -1 ) ) - WRKBL = MAX( WRKBL, 3*N+2*N* - $ ILAENV( 1, prefix // 'GEBRD', ' ', - $ N, N, -1, -1 ) ) - WRKBL = MAX( WRKBL, 3*N+N* - $ ILAENV( 1, prefix // 'ORMBR', 'QLN', - $ N, N, N, -1 ) ) - WRKBL = MAX( WRKBL, 3*N+N* - $ ILAENV( 1, prefix // 'ORMBR', 'PRT', - $ N, N, N, -1 ) ) - WRKBL = MAX( WRKBL, BDSPAC+2*N ) - MAXWRK = N*N + WRKBL - MINWRK = BDSPAC + N*N + M + N - ENDIF - ELSE -* -* Path 5 (M at least N, but not much larger) -* - WRKBL = 3*N + ( M+N )*ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, N, -1, -1) - IF (compute_uv.eq.0) THEN - MAXWRK = MAX(WRKBL,BDSPAC + 3*N) - MINWRK = 3*N + MAX(M,BDSPAC) - ELSE - MAXWRK = MAX( MAXWRK, 3*N+M* - $ ILAENV( 1, prefix // 'ORMBR', 'QLN', - $ M, M, N, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*N+N* - $ ILAENV( 1, prefix // 'ORMBR', 'PRT', - $ N, N, N, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC+2*N+M ) - MINWRK = BDSPAC + 2*N + M - ENDIF - ENDIF - ELSE -* -* Compute space needed for DBDSDC -* - BDSPAC = 3*M*M + 7*M - BDSPAN = MAX( 12*M+4, 8*M+2+SMLSIZ*( SMLSIZ+8 ) ) - IF( N.GE.MNTHR ) THEN - IF( compute_uv.eq.0 ) THEN -* -* Path 1t (N much larger than M, JOBZ='N') -* - MAXWRK = M + M*ILAENV( 1, prefix // 'GELQF', ' ', - $ M, N, -1, - $ -1 ) - MAXWRK = MAX( MAXWRK, 3*M+2*M* - $ ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, M, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC ) - MINWRK = BDSPAC - ELSE -* -* Path 4t (N much larger than M, JOBZ='A') -* - WRKBL = M + M*ILAENV( 1, prefix // 'GELQF', ' ', - $ M, N, -1, -1 ) - WRKBL = MAX( WRKBL, M+N*ILAENV( 1, prefix // 'ORGLQ', - $ ' ', N, - $ N, M, -1 ) ) - WRKBL = MAX( WRKBL, 3*M+2*M* - $ ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, M, -1, -1 ) ) - WRKBL = MAX( WRKBL, 3*M+M* - $ ILAENV( 1, prefix // 'ORMBR', 'QLN', - $ M, M, M, -1 ) ) - WRKBL = MAX( WRKBL, 3*M+M* - $ ILAENV( 1, prefix // 'ORMBR', 'PRT', - $ M, M, M, -1 ) ) - WRKBL = MAX( WRKBL, BDSPAC+2*M ) - MAXWRK = WRKBL + M*M - MINWRK = BDSPAC + M*M + M + N - ENDIF - ELSE - WRKBL = 3*M + ( M+N )*ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, N, -1, - $ -1 ) - IF (compute_uv.eq.0) THEN - MAXWRK = MAX(WRKBL,BDSPAC + 3*M) - MINWRK = 3*M + MAX(N,BDSPAC) - ELSE - MAXWRK = MAX( MAXWRK, 3*M+M* - $ ILAENV( 1, prefix // 'ORMBR', 'QLN', - $ M, M, N, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*M+N* - $ ILAENV( 1, prefix // 'ORMBR', 'PRT', - $ N, N, M, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC+2*M ) - MINWRK = BDSPAC + 2*M + N - ENDIF - ENDIF - ENDIF - min_lwork = MINWRK - max_lwork = MAX(MINWRK,MAXWRK) - end - - subroutine gelss(min_lwork,max_lwork,prefix,m,n,nrhs) - - integer min_lwork,max_lwork,m,n,nrhs - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&m,&n,&nrhs) -cf2py callprotoargument int*,int*,char*,int*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: m,n,nrhs - - INTEGER MAXWRK, MINMN, MINWRK, MM, MNTHR - INTEGER ILAENV, BDSPAC, MAXMN - EXTERNAL ILAENV - INTRINSIC MAX, MIN - - MINMN = MIN( M, N ) - MAXMN = MAX( M, N ) - MNTHR = ILAENV( 6, prefix // 'GELSS', ' ', M, N, NRHS, -1 ) - MINWRK = 1 - MAXWRK = 0 - MM = M - IF( M.GE.N .AND. M.GE.MNTHR ) THEN -* -* Path 1a - overdetermined, with many more rows than columns -* - MM = N - MAXWRK = MAX( MAXWRK, N+N*ILAENV( 1, prefix // 'GEQRF', ' ', - $ M, N, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, N+NRHS* - $ ILAENV( 1, prefix // 'ORMQR', 'LT', M, NRHS, N, -1 ) ) - END IF - IF( M.GE.N ) THEN -* -* Path 1 - overdetermined or exactly determined -* -* Compute workspace neede for BDSQR -* - BDSPAC = MAX( 1, 5*N ) - MAXWRK = MAX( MAXWRK, 3*N+( MM+N )* - $ ILAENV( 1, prefix // 'GEBRD', ' ', MM, N, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*N+NRHS* - $ ILAENV( 1, prefix // 'ORMBR', 'QLT', MM, NRHS, N, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*N+( N-1 )* - $ ILAENV( 1, prefix // 'ORGBR', 'P', N, N, N, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC ) - MAXWRK = MAX( MAXWRK, N*NRHS ) - MINWRK = MAX( 3*N+MM, 3*N+NRHS, BDSPAC ) - MAXWRK = MAX( MINWRK, MAXWRK ) - END IF - - IF( N.GT.M ) THEN -* -* Compute workspace neede for DBDSQR -* - BDSPAC = MAX( 1, 5*M ) - MINWRK = MAX( 3*M+NRHS, 3*M+N, BDSPAC ) - IF( N.GE.MNTHR ) THEN -* -* Path 2a - underdetermined, with many more columns -* than rows -* - MAXWRK = M + M*ILAENV( 1, prefix // 'GELQF', ' ', - $ M, N, -1, -1 ) - MAXWRK = MAX( MAXWRK, M*M+4*M+2*M* - $ ILAENV( 1, prefix // 'GEBRD', ' ', M, M, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, M*M+4*M+NRHS* - $ ILAENV( 1, prefix // 'ORMBR', 'QLT', M, NRHS, M, -1 )) - MAXWRK = MAX( MAXWRK, M*M+4*M+( M-1 )* - $ ILAENV( 1, prefix // 'ORGBR', 'P', M, M, M, -1 ) ) - MAXWRK = MAX( MAXWRK, M*M+M+BDSPAC ) - IF( NRHS.GT.1 ) THEN - MAXWRK = MAX( MAXWRK, M*M+M+M*NRHS ) - ELSE - MAXWRK = MAX( MAXWRK, M*M+2*M ) - END IF - MAXWRK = MAX( MAXWRK, M+NRHS* - $ ILAENV( 1, prefix // 'ORMLQ', 'LT', N, NRHS, M, -1 ) ) - - ELSE -* -* Path 2 - underdetermined -* - MAXWRK = 3*M + ( N+M )*ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, N, -1, -1 ) - MAXWRK = MAX( MAXWRK, 3*M+NRHS* - $ ILAENV( 1, prefix // 'ORMBR', 'QLT', M, NRHS, M, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*M+M* - $ ILAENV( 1, prefix // 'ORGBR', 'P', M, N, M, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC ) - MAXWRK = MAX( MAXWRK, N*NRHS ) - END IF - END IF - MAXWRK = MAX( MINWRK, MAXWRK ) - MINWRK = MAX( MINWRK, 1 ) - - min_lwork = MINWRK - max_lwork = MAXWRK - end - - subroutine getri(min_lwork,max_lwork,prefix,n) - integer min_lwork,max_lwork,n - character prefix -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n) -cf2py callprotoargument int*,int*,char*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: n - INTEGER ILAENV, NB - EXTERNAL ILAENV - NB = ILAENV( 1, prefix // 'GETRI', ' ', N, -1, -1, -1 ) - min_lwork = N - max_lwork = N*NB - end - - subroutine geev(min_lwork,max_lwork,prefix,n, - $ compute_vl,compute_vr) - - integer min_lwork,max_lwork,n,compute_vl,compute_vr - character prefix -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n,&compute_vl,&compute_vr) -cf2py callprotoargument int*,int*,char*,int*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py integer optional,intent(in) :: compute_vl = 1,compute_vr = 1 -cf2py intent(in) :: prefix -cf2py intent(in) :: n - - LOGICAL WANTVL, WANTVR - INTEGER ILAENV, MINWRK, MAXWRK, MAXB, HSWORK, K - EXTERNAL ILAENV - INTRINSIC MAX, MIN - - WANTVL = compute_vl.eq.1 - WANTVR = compute_vr.eq.1 - - MINWRK = 1 - MAXWRK = 2*N + N*ILAENV( 1, prefix // 'GEHRD', ' ', N, 1, N, 0 ) - IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN - MINWRK = MAX( 1, 3*N ) - MAXB = MAX( ILAENV( 8, prefix // 'HSEQR', 'EN', N, 1, N, -1 ) - $ , 2 ) - K = MIN( MAXB, N, MAX( 2, ILAENV( 4, prefix // 'HSEQR', 'EN', N - $ , 1, N, -1 ) ) ) - HSWORK = MAX( K*( K+2 ), 2*N ) - MAXWRK = MAX( MAXWRK, N+1, N+HSWORK ) - ELSE - MINWRK = MAX( 1, 4*N ) - MAXWRK = MAX( MAXWRK, 2*N+( N-1 )* - $ ILAENV( 1, prefix // 'ORGHR', ' ', N, 1, N, -1 ) ) - MAXB = MAX( ILAENV( 8, prefix // 'HSEQR', 'SV', N, 1, N, -1 ), - $ 2 ) - K = MIN( MAXB, N, MAX( 2, ILAENV( 4, prefix // 'HSEQR', 'SV', N - $ , 1, N, -1 ) ) ) - HSWORK = MAX( K*( K+2 ), 2*N ) - MAXWRK = MAX( MAXWRK, N+1, N+HSWORK ) - MAXWRK = MAX( MAXWRK, 4*N ) - END IF - min_lwork = MINWRK - max_lwork = MAXWRK - end - - subroutine heev(min_lwork,max_lwork,prefix,n,lower) - - integer min_lwork,max_lwork,n,lower - character prefix -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n,&lower) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py integer optional,intent(in) :: lower = 0 -cf2py intent(in) :: prefix -cf2py intent(in) :: n - - CHARACTER UPLO - INTEGER ILAENV, NB - EXTERNAL ILAENV - INTRINSIC MAX - - UPLO = 'L' - if (lower.eq.0) then - UPLO = 'U' - endif - - NB = ILAENV( 1, prefix // 'HETRD', UPLO, N, -1, -1, -1 ) - - min_lwork = MAX(1,2*N-1) - max_lwork = MAX( 1, ( NB+1 )*N ) - - end - - subroutine syev(min_lwork,max_lwork,prefix,n,lower) - - integer min_lwork,max_lwork,n,lower - character prefix -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n,&lower) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py integer optional,intent(in) :: lower = 0 -cf2py intent(in) :: prefix -cf2py intent(in) :: n - - CHARACTER UPLO - INTEGER ILAENV, NB - EXTERNAL ILAENV - INTRINSIC MAX - - UPLO = 'L' - if (lower.eq.0) then - UPLO = 'U' - end if - - NB = ILAENV( 1, prefix // 'SYTRD', UPLO, N, -1, -1, -1 ) - - min_lwork = MAX(1,3*N-1) - max_lwork = MAX( 1, ( NB+2 )*N ) - - end - - subroutine gees(min_lwork,max_lwork,prefix,n,compute_v) - - integer min_lwork,max_lwork,n,compute_v - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n,&compute_v) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py integer optional,intent(in) :: compute_v = 1 -cf2py intent(in) :: prefix -cf2py intent(in) :: n - - INTEGER HSWORK, MAXWRK, MINWRK, MAXB, K - INTEGER ILAENV - EXTERNAL ILAENV - INTRINSIC MAX, MIN - - MAXWRK = N + N*ILAENV( 1, prefix // 'GEHRD', ' ', N, 1, N, 0 ) - MINWRK = MAX( 1, 2*N ) - IF( compute_v.eq.0 ) THEN - MAXB = MAX( ILAENV( 8, prefix // 'HSEQR', - $ 'SN', N, 1, N, -1 ), 2 ) - K = MIN( MAXB, N, MAX( 2, ILAENV( 4, prefix // 'HSEQR', - $ 'SN', N, 1, N, -1 ) ) ) - HSWORK = MAX( K*( K+2 ), 2*N ) - MAXWRK = MAX( MAXWRK, HSWORK, 1 ) - ELSE - MAXWRK = MAX( MAXWRK, N+( N-1 )* - $ ILAENV( 1, prefix // 'UNGHR', ' ', N, 1, N, -1 ) ) - MAXB = MAX( ILAENV( 8, prefix // 'HSEQR', - $ 'EN', N, 1, N, -1 ), 2 ) - K = MIN( MAXB, N, MAX( 2, ILAENV( 4, prefix // 'HSEQR', - $ 'EN', N, 1, N, -1 ) ) ) - HSWORK = MAX( K*( K+2 ), 2*N ) - MAXWRK = MAX( MAXWRK, HSWORK, 1 ) - END IF - - min_lwork = MINWRK - max_lwork = MAXWRK - - end - - subroutine geqrf(min_lwork,max_lwork,prefix,m,n) - - integer min_lwork,max_lwork,m,n - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&m,&n) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: m,n - - INTEGER NB - INTEGER ILAENV - EXTERNAL ILAENV - INTRINSIC MAX - - NB = ILAENV( 1, prefix // 'GEQRF', ' ', M, N, -1, -1 ) - - min_lwork = MAX(1,N) - max_lwork = MAX(1,N*NB) - end - - subroutine gqr(min_lwork,max_lwork,prefix,m,n) - - integer min_lwork,max_lwork,m,n - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&m,&n) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: m,n - - INTEGER NB - INTEGER ILAENV - EXTERNAL ILAENV - INTRINSIC MAX - - if ((prefix.eq.'d').or.(prefix.eq.'s') - $ .or.(prefix.eq.'D').or.(prefix.eq.'S')) then - NB = ILAENV( 1, prefix // 'ORGQR', ' ', M, N, -1, -1 ) - else - NB = ILAENV( 1, prefix // 'UNGQR', ' ', M, N, -1, -1 ) - endif - min_lwork = MAX(1,N) - max_lwork = MAX(1,N*NB) - end diff --git a/scipy-0.10.1/scipy/lib/lapack/clapack.pyf.src b/scipy-0.10.1/scipy/lib/lapack/clapack.pyf.src deleted file mode 100644 index d9514d9ecb..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/clapack.pyf.src +++ /dev/null @@ -1,207 +0,0 @@ -!%f90 -*- f90 -*- -! -! Signatures for f2py wrappers of ATLAS LAPACK functions. -! -! gesv -! getrf -! getrs -! getri -! posv -! potrf -! potrs -! potri -! lauum -! trtri -! - -python module clapack - interface - - function gesv(n,nrhs,a,piv,b,info,rowmajor) - - ! lu,piv,x,info = gesv(a,b,rowmajor=1,overwrite_a=0,overwrite_b=0) - ! Solve A * X = B. - ! A * P = L * U - ! U is unit upper diagonal triangular, L is lower triangular, - ! piv pivots columns. - - fortranname clapack_gesv - integer intent(c,hide) :: gesv - callstatement gesv_return_value = info = (*f2py_func)(102-rowmajor,n,nrhs,a,n,piv,b,n) - callprotoargument const int,const int,const int,*,const int,int*,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - - integer depend(a),intent(hide):: n = shape(a,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - integer dimension(n),depend(n),intent(out) :: piv - dimension(n,nrhs),check(shape(a,0)==shape(b,0)),depend(n) :: b - integer intent(out)::info - intent(in,out,copy,out=x) b - intent(c,in,out,copy,out=lu) a - - end function gesv - - function posv(n,nrhs,a,b,info,lower,rowmajor) - - ! c,x,info = posv(a,b,lower=0,rowmajor=1,overwrite_a=0,overwrite_b=0) - ! Solve A * X = B. - ! A is symmetric positive defined - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - fortranname clapack_posv - integer intent(c,hide) :: posv - callstatement posv_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,nrhs,a,n,b,n) - callprotoargument const int,const int,const int,const int,*,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(a),intent(hide):: n = shape(a,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(c,in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n):: b - check(shape(a,0)==shape(b,0)) :: b - integer intent(out) :: info - - end function posv - - function potrf(n,a,info,lower,clean,rowmajor) - - ! c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0) - ! Compute Cholesky decomposition of symmetric positive defined matrix: - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - ! clean==1 zeros strictly lower or upper parts of U or L, respectively - - ! c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0) - ! Compute Cholesky decomposition of symmetric positive defined matrix: - ! A = U^H * U, C = U if lower = 0 - ! A = L * L^H, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - ! clean==1 zeros strictly lower or upper parts of U or L, respectively - - fortranname clapack_potrf - integer intent(c,hide) :: potrf - ! <_init1=*(a+i*n+j)=0.0;,\0,k=i*n+j;(a+k)-\>r=(a+k)-\>i=0.0;,\2> - ! <_init2=*(a+j*n+i)=0.0;,\0,k=j*n+i;(a+k)-\>r=(a+k)-\>i=0.0;,\2> - callstatement potrf_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,a,n); if(clean){int i,j<,,\,k,\2>;if(lower){for(i=0;i\}} else {for(i=0;i\}}} - callprotoargument const int,const int,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(clean==0||clean==1) :: clean = 1 - - integer depend(a),intent(hide):: n = shape(a,0) - dimension(n,n),intent(c,in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - integer intent(out) :: info - - end function potrf - - function potrs(n,nrhs,c,b,info,lower,rowmajor) - - ! x,info = potrs(c,b,lower=0,rowmajor=1,overwrite_b=0) - ! Solve A * X = b. - ! A is symmetric positive defined - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - fortranname clapack_potrs - integer intent(c,hide) :: potrs - callstatement potrs_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,nrhs,c,n,b,n) - callprotoargument const int,const int,const int,const int,*,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(c,in) :: c - check(shape(c,0)==shape(c,1)) :: c - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n):: b - check(shape(c,0)==shape(b,0)) :: b - integer intent(out) :: info - - end function potrs - - function potri(n,c,info,lower,rowmajor) - - ! inv_a,info = potri(c,lower=0,rowmajor=1,overwrite_c=0) - ! Compute A inverse A^-1. - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - fortranname clapack_potri - integer intent(c,hide) :: potri - callstatement potri_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,c,n) - callprotoargument const int,const int,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(c,in,out,copy,out=inv_a) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end function potri - - function lauum(n,c,info,lower,rowmajor) - - ! a,info = lauum(c,lower=0,rowmajor=1,overwrite_c=0) - ! Compute product - ! U^T * U, C = U if lower = 0 - ! L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - fortranname clapack_lauum - integer intent(c,hide) :: lauum - callstatement lauum_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,c,n) - callprotoargument const int,const int,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(c,in,out,copy,out=a) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end function lauum - - function trtri(n,c,info,lower,unitdiag,rowmajor) - - ! inv_c,info = trtri(c,lower=0,unitdiag=0,rowmajor=1,overwrite_c=0) - ! Compute C inverse C^-1 where - ! C = U if lower = 0 - ! C = L if lower = 1 - ! C is non-unit triangular matrix if unitdiag = 0 - ! C is unit triangular matrix if unitdiag = 1 - - fortranname clapack_trtri - integer intent(c,hide) :: trtri - callstatement trtri_return_value = info = (*f2py_func)(102-rowmajor,121+lower,131+unitdiag,n,c,n) - callprotoargument const int,const int,const int,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(unitdiag==0||unitdiag==1) :: unitdiag = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(c,in,out,copy,out=inv_c) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end function trtri - - - end interface -end python module clapack diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack.pyf.src deleted file mode 100644 index 2932dcaf1c..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack.pyf.src +++ /dev/null @@ -1,37 +0,0 @@ -!%f90 -*- f90 -*- -! -! Signatures for f2py wrappers of FORTRAN LAPACK functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! $Revision$ $Date$ -! -! Additions by Travis Oliphant -! -! Split and use scipy_distutils.from_template: Pearu -! - -python module flapack -interface - - include 'flapack_user.pyf.src' - - ! Driver Routines - include 'flapack_le.pyf.src' - include 'flapack_lls.pyf.src' - include 'flapack_esv.pyf.src' - include 'flapack_gesv.pyf.src' - - ! Computational Routines - include 'flapack_lec.pyf.src' - include 'flapack_llsc.pyf.src' - include 'flapack_sevc.pyf.src' - include 'flapack_evc.pyf.src' - include 'flapack_svdc.pyf.src' - include 'flapack_gsevc.pyf.src' - include 'flapack_gevc.pyf.src' - - include 'flapack_aux.pyf.src' - -end interface -end python module flapack diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_aux.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_aux.pyf.src deleted file mode 100644 index f39b30ae31..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_aux.pyf.src +++ /dev/null @@ -1,53 +0,0 @@ -! -*- f90 -*- - - subroutine lauum(n,c,info,lower) - - ! a,info = lauum(c,lower=0,overwrite_c=0) - ! Compute product - ! U^T * U, C = U if lower = 0 - ! L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,c,&n,&info) - callprotoargument char*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(in,out,copy,out=a) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end subroutine lauum - - subroutine laswp(n,a,nrows,k1,k2,piv,off,inc,m) - - ! a = laswp(a,piv,k1=0,k2=len(piv)-1,off=0,inc=1,overwrite_a=0) - ! Perform row interchanges on the matrix A for each of row k1 through k2 - ! - ! piv pivots rows. - - callstatement {int i;m=len(piv);for(i=0;i*,int*,int*,int*,int*,int* - - integer depend(a),intent(hide):: nrows = shape(a,0) - integer depend(a),intent(hide):: n = shape(a,1) - dimension(nrows,n),intent(in,out,copy) :: a - integer dimension(*),intent(in),depend(nrows) :: piv - check(len(piv)<=nrows) :: piv -!XXX: how to check that all elements in piv are < n? - - integer optional,intent(in) :: k1 = 0 - check(0<=k1) :: k1 - integer optional,intent(in),depend(k1,piv,off) :: k2 = len(piv)-1 - check(k1<=k2 && k20||inc<0) :: inc = 1 - integer optional,intent(in),depend(piv) :: off=0 - check(off>=0 && off(m-1)*abs(inc)) :: m - - end subroutine laswp - diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_esv.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_esv.pyf.src deleted file mode 100644 index ca1a1bb2ea..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_esv.pyf.src +++ /dev/null @@ -1,268 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Driver routines for standard eigenvalue and singular value problems: -! syev, heev (SEP symmetric/hermitian, eigenvalues/vectors) -! syevd, heevd (SEP symmetric/hermitian, eigenvalues/vectors, D&C) -! syevx, heevx (.., expert) - Not Implemented -! syevr, heevr (.., RRR) -! spev, hpev, spevd, hpevd, spevx, hpevx (..., packed storage) - Not Implemented -! sbev, hbev, sbevd, hbevd, sbevx, hbevx (..., band) - Not Implemented -! stev, stevd, stevx, stevr (..., tridiagonal) - Not Implemented -! gees (NEP, general, Schur factorization) -! geesx (NEP, general, Schur factorization, expert) - Not Implemented -! geev (NEP, general, eigenvalues/vectors) -! geevx (NEP, general, eigenvalues/vectors, expert) - Not Implemented -! gesvd (SVD, general, singular values/vectors) - Not Implemented -! gesdd (SVD, general, singular values/vectors, D&C) -! -! - - ! - - subroutine ev(compute_v,lower,n,w,a,work,lwork,<_1=,,rwork\,,\2>info) - - ! w,v,info = syev(a,compute_v=1,lower=0,lwork=3*n-1,overwrite_a=0) - ! Compute all eigenvalues and, optionally, eigenvectors of a - ! real symmetric matrix A. - ! - ! Performance tip: - ! If compute_v=0 then set also overwrite_a=1. - - ! w,v,info = heev(a,compute_v=1,lower=0,lwork=2*n-1,overwrite_a=0) - ! Compute all eigenvalues and, optionally, eigenvectors of a - ! complex Hermitian matrix A. - ! - ! Warning: - ! If compute_v=0 and overwrite_a=1, the contents of a is destroyed. - - callstatement (*f2py_func)((compute_v?"V":"N"),(lower?"L":"U"),&n,a,&n,w,work,&lwork,<_2=,,rwork\,,\2>&info) - callprotoargument char*,char*,int*,*,int*,*,*,int*,<_3=,,float*\,,double*\,>int* - - integer optional,intent(in):: compute_v = 1 - check(compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer intent(hide),depend(a):: n = shape(a,0) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - intent(in,copy,out,out=v) :: a - - dimension(n),intent(out),depend(n) :: w - - ! <_lwork=3*n-1,\0,2*n-1,\2> - integer optional,intent(in),depend(n) :: lwork=<_lwork> - check(lwork>=<_lwork>) :: lwork - dimension(lwork),intent(hide,cache),depend(lwork) :: work - - dimension(3*n-1),intent(hide,cache),depend(n) :: rwork - - integer intent(out) :: info - end subroutine ev - - subroutine evd(compute_v,lower,n,w,a,work,lwork,iwork,liwork,<_1=,,rwork\,lrwork\,,\2>info) - - ! w,v,info = syevd(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0) - ! Compute all eigenvalues and, optionally, eigenvectors of a - ! real symmetric matrix A using D&C. - ! - ! Performance tip: - ! If compute_v=0 then set also overwrite_a=1. - - ! w,v,info = heevd(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0) - ! Compute all eigenvalues and, optionally, eigenvectors of a - ! complex Hermitian matrix A using D&C. - ! - ! Warning: - ! If compute_v=0 and overwrite_a=1, the contents of a is destroyed. - - callstatement (*f2py_func)((compute_v?"V":"N"),(lower?"L":"U"),&n,a,&n,w,work,&lwork,<_2=,,rwork\,&lrwork\,,\2>iwork,&liwork,&info) - callprotoargument char*,char*,int*,*,int*,*,*,int*,<_3=,,float*\,int*\,,double*\,int*\,>int*,int*,int* - - integer optional,intent(in):: compute_v = 1 - check(compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer intent(hide),depend(a):: n = shape(a,0) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - intent(in,copy,out,out=v) :: a - - dimension(n),intent(out),depend(n) :: w - - ! <_lwork=(compute_v?1+6*n+2*n*n:2*n+1),\0,(compute_v?2*n+n*n:n+1),\2> - integer optional,intent(in),depend(n,compute_v) :: lwork=<_lwork> - check(lwork>=<_lwork>) :: lwork - dimension(lwork),intent(hide,cache),depend(lwork) :: work - - integer intent(hide),depend(n,compute_v) :: liwork = (compute_v?3+5*n:1) - integer dimension(liwork),intent(hide,cache),depend(liwork) :: iwork - - ! <_lrwork=,,(compute_v?1+5*n+2*n*n:n),\2> - integer intent(hide),depend(n,compute_v) :: lrwork = <_lrwork> - dimension(lrwork),intent(hide,cache),depend(n,lrwork) :: rwork - - integer intent(out) :: info - end subroutine evd - - subroutine evr(n,a,compute_v,lower,vrange,irange,atol,w,z,m,ldz,isuppz,work,lwork,<,,rwork\,lrwork\,,\2>iwork,liwork,info) - - ! w,v,info = {sy|he}evr(a,compute_v=1,lower=0,vrange=None,irange=None,atol=-1,lwork=min_lwork,overwrite_a=0) - ! - ! Compute range of eigenvalues and, optionally, eigenvectors of a - ! real symmetric matrix A using RRR. - ! - ! Performance tip: - ! If compute_v=0 then set also overwrite_a=1. - ! Warning: - ! If compute_v=0 and overwrite_a=1, the contents of a is destroyed. - - callstatement if(irange_capi==Py_None);else{irange[0]++;irange[1]++;}(*f2py_func)((compute_v?"V":"N"),(vrange_capi==Py_None?(irange_capi==Py_None?"A":"I"):"V"),(lower?"L":"U"),&n,a,&n,vrange,vrange+1,irange,irange+1,&atol,&m,w,z,&ldz,isuppz,work,&lwork,<_2=,,rwork\,&lrwork\,,\2>iwork,&liwork,&info);if(irange_capi==Py_None);else{irange[0]--;irange[1]--;}if(vrange_capi==Py_None);else {capi_w_tmp-\>dimensions[0]=capi_z_tmp-\>dimensions[1]=m;/*capi_z_tmp-\>strides[0]=m*capi_z_tmp-\>descr-\>elsize;*/} - - callprotoargument char*,char*,char*,int*,*,int*,*,*,int*,int*,*,int*,*,*,int*,int*,*,int*,<_3=,,float*\,int*\,,double*\,int*\,>int*,int*,int* - - integer optional,intent(in):: compute_v = 1 - check(compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer intent(hide),depend(a):: n = shape(a,0) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - intent(in,copy) :: a - - optional,dimension(2),intent(in) :: vrange - integer optional,dimension(2),intent(in),depend(n) :: irange - check(irange_capi==Py_None || (irange[0]>=0 && irange[1] optional,intent(in) :: atol = -1.0 - - integer intent(hide),depend(vrange,irange,n) :: m = (irange_capi==Py_None?n:irange[1]-irange[0]+1) - - dimension(m),intent(out),depend(m) :: w - - integer intent(hide),depend(compute_v,n) :: ldz = (compute_v?n:1) - dimension(ldz,m),intent(out,out=v),depend(ldz,m) :: z - - integer intent(hide),depend(m),dimension(2*m) :: isuppz - - ! <_lwork=26*n,\0,18*n,\2> Includes bug fix in `man zheevr`. - integer optional,intent(in),depend(n) :: lwork=<_lwork> - check(lwork>=<_lwork>) lwork - dimension(lwork),intent(hide,cache),depend(lwork) :: work - - integer intent(hide),depend(n,compute_v) :: liwork = 10*n - integer dimension(liwork),intent(hide,cache),depend(liwork) :: iwork - - ! <_lrwork=,,24*n,\2> - integer intent(hide),depend(n) :: lrwork = <_lrwork> - dimension(lrwork),intent(hide,cache),depend(n,lrwork) :: rwork - - integer intent(out) :: info - end subroutine evr - - subroutine gees(compute_v,sort_t,select,n,a,nrows,sdim,,vs,ldvs,work,lwork,<,,rwork\,,\2>bwork,info) - - ! t,sdim,(wr,wi|w),vs,info = gees(zselect,a,compute_v=1,sort_t=0,lwork=3*n,zselect_extra_args=(),overwrite_a=0) - ! For an NxN matrix compute the eigenvalues, the schur form T, and optionally - ! the matrix of Schur vectors Z. This gives the Schur factorization - ! A = Z * T * Z^H -- a complex matrix is in Schur form if it is upper - ! triangular - - ! t,sdim,wr,wi,vs,info=gees(compute_v=1,sort_t=0,select,a,lwork=3*n) - ! For an NxN matrix compute the eigenvalues, the schur form T, and optionally - ! the matrix of Schur vectors Z. This gives the Schur factorization - ! A = Z * T * Z^H -- a real matrix is in Schur form if it is upper quasi- - ! triangular with 1x1 and 2x2 blocks. - - callstatement (*f2py_func)((compute_v?"V":"N"),(sort_t?"S":"N"),cb_select_in_gees__user__routines,&n,a,&nrows,&sdim,,vs,&ldvs,work,&lwork,<,,rwork\,,\2>bwork,&info,1,1) - callprotoargument char*,char*,int(*)(),int*,*,int*,int*,*,*,int*,*,int*,<,,float*\,,double*\,>int*,int*,int,int - - use gees__user__routines - - integer optional,intent(in),check(compute_v==0||compute_v==1) :: compute_v = 1 - integer optional,intent(in),check(sort_t==0||sort_t==1) :: sort_t = 0 - external select - integer intent(hide),depend(a) :: n = shape(a,1) - intent(in,out,copy,out=t),check(shape(a,0)==shape(a,1)),dimension(n,n) :: a - integer intent(hide),depend(a) :: nrows=shape(a,0) - integer intent(out) :: sdim=0 - intent(out),dimension(n) :: - intent(out),depend(ldvs,n),dimension(ldvs,n) :: vs - integer intent(hide),depend(compute_v,n) :: ldvs=((compute_v==1)?n:1) - intent(hide,cache),depend(lwork),dimension(lwork) :: work - integer optional,intent(in),check(lwork >= MAX(1,3*n)),depend(n) :: lwork = 3*n - intent(hide,cache),depend(n),dimension(n) :: rwork - logical intent(hide,cache),depend(n),dimension(n) :: bwork - integer intent(out) :: info - end subroutine gees - - subroutine geev(compute_vl,compute_vr,n,a,,vl,ldvl,vr,ldvr,work,lwork,<,,rwork\,,\2>info) - - ! wr,wi,vl,vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=4*n,overwrite_a=0) - ! w,vl,vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=2*n,overwrite_a=0) - - callstatement {(*f2py_func)((compute_vl?"V":"N"),(compute_vr?"V":"N"),&n,a,&n,,vl,&ldvl,vr,&ldvr,work,&lwork,<,,rwork\,,\2>&info);} - callprotoargument char*,char*,int*,*,int*,*,*,int*,*,int*,*,int*,<,,float*\,,double*\,>int* - - integer optional,intent(in):: compute_vl = 1 - check(compute_vl==1||compute_vl==0) compute_vl - integer optional,intent(in):: compute_vr = 1 - check(compute_vr==1||compute_vr==0) compute_vr - - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,copy) :: a - check(shape(a,0)==shape(a,1)) :: a - - dimension(n),intent(out),depend(n) :: - - dimension(ldvl,n),intent(out) :: vl - integer intent(hide),depend(n,compute_vl) :: ldvl=(compute_vl?n:1) - - dimension(ldvr,n),intent(out) :: vr - integer intent(hide),depend(n,compute_vr) :: ldvr=(compute_vr?n:1) - - ! <_lwork=(compute_vl||compute_vr)?4*n:3*n,\0,2*n,\2> - integer optional,intent(in),depend(n,compute_vl,compute_vr) :: lwork=<_lwork> - check(lwork>=<_lwork>) :: lwork - dimension(lwork),intent(hide,cache),depend(lwork) :: work - dimension(2*n),intent(hide,cache),depend(n) :: rwork - - integer intent(out):: info - end subroutine geev - - subroutine gesdd(m,n,minmn,du,dvt,a,compute_uv,u,s,vt,work,lwork,<,,rwork\,,\2>iwork,info) - - ! u,s,vh,info = gesdd(a,compute_uv=1,lwork=..,overwrite_a=0) - ! Compute the singular value decomposition (SVD): - ! A = U * SIGMA * conjugate-transpose(V) - ! A - M x N matrix - ! U - M x M matrix - ! SIGMA - M x N zero matrix with a main diagonal filled with min(M,N) - ! singular values - ! conjugate-transpose(V) - N x N matrix - ! - - callstatement (*f2py_func)((compute_uv?"A":"N"),&m,&n,a,&m,s,u,&du,vt,&dvt,work,&lwork,<,,rwork\,,\2>iwork,&info) - callprotoargument char*,int*,int*,*,int*,*,*,int*,*,int*,*,int*,<,,float*\,,double*\,>int*,int* - - integer intent(in),optional,check(compute_uv==0||compute_uv==1):: compute_uv = 1 - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(m,n):: minmn = MIN(m,n) - integer intent(hide),depend(compute_uv,minmn) :: du = (compute_uv?m:1) - integer intent(hide),depend(compute_uv,n) :: dvt = (compute_uv?n:1) - dimension(m,n),intent(in,copy) :: a - dimension(minmn),intent(out),depend(minmn) :: s - dimension(du,du),intent(out),depend(du) :: u - dimension(dvt,dvt),intent(out),depend(dvt) :: vt - dimension(lwork),intent(hide,cache),depend(lwork) :: work - - ! <_lwork=(compute_uv?4*minmn*minmn+MAX(m\,n)+9*minmn:MAX(14*minmn+4\,10*minmn+827)+MAX(m\,n)),\0,(compute_uv?2*minmn*minmn+MAX(m\,n)+2*minmn:2*minmn+MAX(m\,n)),\2> - integer optional,intent(in),depend(minmn,compute_uv) & - :: lwork = <_lwork> - ! gesdd docs are mess: optimal turns out to be less than minimal in docs - ! check(lwork>=(compute_uv?3*minmn*minmn+MAX(MAX(m,n),4*minmn*(minmn+1)):MAX(14*minmn+4,10*minmn+2+25*(25+8))+MAX(m,n))) :: lwork - dimension((compute_uv?5*minmn*minmn+7*minmn:5*minmn)),intent(hide,cache),depend(minmn,compute_uv) :: rwork - - integer intent(hide,cache),dimension(8*minmn),depend(minmn) :: iwork - integer intent(out)::info - - end subroutine gesdd diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_evc.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_evc.pyf.src deleted file mode 100644 index c07c369618..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_evc.pyf.src +++ /dev/null @@ -1,82 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Computational routines for the non-symmetric eigenproblem: -! -! gehrd (general Hessenberg reduction) -! gebal (general balancing) -! gebak (general backtransforming) -! orghr, unghr (orthogonal/unitary generate matrix after Hessenberg reduction) - Not Implemented -! ormhr, unmhr ((orthogonal/unitary multiply matrix after Hessenberg reduction) - Not Implemented -! hseqr (Hessenberg Schur factorization) - Not Implemented -! hsein (Hessenberg eigenvectors by inverse iteration) - Not Implemented -! trevc ((quasi)triangular eigenvectors) - Not Implemented -! trexc ((quasi)triangular reordering Schur factorization) - Not Implemented -! trsyl ((quasi)triangular Sylvester equation) - Not Implemented -! trsna ((quasi)triangular condition numbers of eigenvalues/vectors) - Not Implemented -! trsen ((quasi)triangular condition numbers of eigenvalue cluster/invariant subspace) - Not Implemented -! - - subroutine gehrd(n,lo,hi,a,tau,work,lwork,info) - ! - ! hq,tau,info = gehrd(a,lo=0,hi=n-1,lwork=n,overwrite_a=0) - ! Reduce general matrix A to upper Hessenberg form H by unitary similarity - ! transform Q^H * A * Q = H - ! - ! Q = H(lo) * H(lo+1) * ... * H(hi-1) - ! H(i) = I - tau * v * v^H - ! v[0:i+1] = 0, v[i+1]=1, v[hi+1:n] = 0 - ! v[i+2:hi+1] is stored in hq[i+2:hi+i,i] - ! tau is tau[i] - ! - ! hq for n=7,lo=1,hi=5: - ! [a a h h h h a - ! a h h h h a - ! h h h h h h - ! v2h h h h h - ! v2v3h h h h - ! v2v3v4h h h - ! a] - ! - callstatement { hi++; lo++; (*f2py_func)(&n,&lo,&hi,a,&n,tau,work,&lwork,&info); } - callprotoargument int*,int*,int*,*,int*,*,*,int*,int* - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,out,copy,out=ht),check(shape(a,0)==shape(a,1)) :: a - integer intent(in),optional :: lo = 0 - integer intent(in),optional,depend(n) :: hi = n-1 - dimension(n-1),intent(out),depend(n) :: tau - dimension(lwork),intent(cahce,hide),depend(lwork) :: work - integer intent(in),optional,depend(n),check(lwork>=MAX(n,1)) :: lwork = MAX(n,1) - integer intent(out) :: info - - end subroutine gehrd - - subroutine gebal(scale,permute,n,a,m,lo,hi,pivscale,info) - ! - ! ba,lo,hi,pivscale,info = gebal(a,scale=0,permute=0,overwrite_a=0) - ! Balance general matrix a. - ! hi,lo are such that ba[i][j]==0 if i>j and j=0...lo-1 or i=hi+1..n-1 - ! pivscale([0:lo], [lo:hi+1], [hi:n+1]) = (p1,d,p2) where (p1,p2)[j] is - ! the index of the row and column interchanged with row and column j. - ! d[j] is the scaling factor applied to row and column j. - ! The order in which the interchanges are made is n-1 to hi+1, then 0 to lo-1. - ! - ! P * A * P = [[T1,X,Y],[0,B,Z],[0,0,T2]] - ! BA = [[T1,X*D,Y],[0,inv(D)*B*D,ind(D)*Z],[0,0,T2]] - ! where D = diag(d), T1,T2 are upper triangular matrices. - ! lo,hi mark the starting and ending columns of submatrix B. - - callstatement { (*f2py_func)((permute?(scale?"B":"P"):(scale?"S":"N")),&n,a,&m,&lo,&hi,pivscale,&info); hi--; lo--; } - callprotoargument char*,int*,*,int*,int*,int*,*,int* - integer intent(in),optional :: permute = 0 - integer intent(in),optional :: scale = 0 - integer intent(hide),depend(a,n) :: m = shape(a,0) - integer intent(hide),depend(a) :: n = shape(a,1) - check(m>=n) m - integer intent(out) :: hi,lo - dimension(n),intent(out),depend(n) :: pivscale - dimension(m,n),intent(in,out,copy,out=ba) :: a - integer intent(out) :: info - - end subroutine gebal diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_gesv.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_gesv.pyf.src deleted file mode 100644 index aca09a2670..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_gesv.pyf.src +++ /dev/null @@ -1,108 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Driver routines for generalized eigenvalue and singular value problems: -! -! sygv,hegv (GSEP, symmetric-definite eigenvalues/vectors) -! sygvd,hegvd (GSEP, symmetric-definite eigenvalues/vectors, D&C) -! sygvx,hegvx (GSEP, symmetric-definite eigenvalues/vectors, expert) - Not Implemented -! spgv, hpgv, spgvd, hpgvd, spgvx, hpgvx (..., packed storage) - Not Implemented -! sbgv, hbgv, sbgvd, hbgvd, sbgvx, hbgvx (..., band) - Not Implemented -! gges,ggesx (GNEP, general, Schur Factorization) - Not Implemented -! ggev,ggevx (GNEP, general, eigenvalues/vectors) -! ggsvd (GSVD, general, singular values/vectors) - Not Implemented -! gegv (general, eigenvalues/vectors, deprecated, use ggev instead) - Removed -! - -! -! -! -! -! -! - -subroutine gv(itype,compute_v,lower,n,w,a,b,work,lwork,info) - ! - ! w,v,info = sygv|hegv(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0) - ! - integer check(1<=itype && itype<=3):: itype = 1 - integer :: compute_v=1 - integer :: lower=0 - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,out,copy,out=v) :: a - dimension(n,n),intent(in,copy) :: b - dimension(n),intent(out) :: w - <_lwork=3*n-1,\0,2*n-1,\2> - integer optional,intent(in),depend(n) :: lwork=<_lwork> - check(<_lwork>\<=lwork) lwork - dimension(lwork),intent(hide,cache),depend(lwork) :: work - dimension(3*n-2),intent(hide,cache),depend(n) :: rwork - integer intent(out) :: info - callstatement (*f2py_func)(&itype,(compute_v?"V":"N"),(lower?"L":"U"),&n,a,&n,b,&n,w,work,&lwork,&info) - callprotoargument int*,char*,char*,int*,*,int*,*,int*,*,*,int*,int* -end subroutine gv - -subroutine gvd(itype,compute_v,lower,n,w,a,b,work,lwork,iwork,liwork,info) - ! - ! w,v,info = sygvd|hegvd(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0) - ! - integer check(1<=itype && itype<=3):: itype = 1 - integer :: compute_v=1 - integer :: lower=0 - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,out,copy,out=v) :: a - dimension(n,n),intent(in,copy) :: b - dimension(n),intent(out) :: w - <_lwork=(compute_v?1+6*n+2*n*n:2*n+1),\0,(compute_v?2*n+n*n:n+1),\2> - integer optional,intent(in),depend(n,compute_v) :: lwork=<_lwork> - check(<_lwork>\<=lwork) lwork - dimension(lwork),intent(hide,cache),depend(lwork) :: work - integer intent(hide),depend(n,compute_v) :: lrwork = (compute_v?1+5*n+2*n*n:n) - dimension(lrwork),intent(hide,cache),depend(lrwork) :: rwork - integer intent(hide),depend(compute_v,n) :: liwork = (compute_v?3+5*n:1) - integer intent(hide,cache),dimension(liwork),depend(liwork) :: iwork - integer intent(out) :: info - callstatement (*f2py_func)(&itype,(compute_v?"V":"N"),(lower?"L":"U"),&n,a,&n,b,&n,w,work,&lwork,iwork,&liwork,&info) - callprotoargument int*,char*,char*,int*,*,int*,*,int*,*,*,int*,int*,int*,int* -end subroutine gvd - -! -! - -subroutine ggev(compute_vl,compute_vr,n,a,b,,beta,vl,ldvl,vr,ldvr,work,lwork,info) - - callstatement {(*f2py_func)((compute_vl?"V":"N"),(compute_vr?"V":"N"),&n,a,&n,b,&n,,beta,vl,&ldvl,vr,&ldvr,work,&lwork,&info);} - callprotoargument char*,char*,int*,*,int*,*,int*,,*,*,int*,*,int*,*,int*,int* - - integer optional,intent(in):: compute_vl = 1 - check(compute_vl==1||compute_vl==0) compute_vl - integer optional,intent(in):: compute_vr = 1 - check(compute_vr==1||compute_vr==0) compute_vr - - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,copy) :: a - check(shape(a,0)==shape(a,1)) :: a - - intent(in,copy), dimension(n,n) :: b - check(shape(b,0)==shape(b,1)) :: b - - intent(out), dimension(n), depend(n) :: - intent(out), dimension(n), depend(n) :: beta - - depend(ldvl,n), dimension(ldvl,n),intent(out) :: vl - integer intent(hide),depend(n,compute_vl) :: ldvl=(compute_vl?n:1) - - depend(ldvr,n), dimension(ldvr,n),intent(out) :: vr - integer intent(hide),depend(n,compute_vr) :: ldvr=(compute_vr?n:1) - - ! <_lwork=8*n,\0,2*n,\2> - integer optional,intent(in),depend(n,compute_vl,compute_vr) :: lwork=<_lwork> - check(lwork>=<_lwork>) :: lwork - intent(hide,cache), dimension(lwork), depend(lwork) :: work - intent(hide), dimension(8*n), depend(n) :: rwork - - integer intent(out):: info - -end subroutine ggev - diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_gevc.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_gevc.pyf.src deleted file mode 100644 index b6f2f59023..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_gevc.pyf.src +++ /dev/null @@ -1,16 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Computational routines for the generalized nonsymmetric eigenproblem: -! -! gghrd (general, Hessenberg reduction) - Not Implemented -! ggbal (general, balancing) - Not Implemented -! ggbak (general, back transforming) - Not Implemented -! hgeqz (Hessenberg, Schur factorization) - Not Implemented -! tgevc ((quasi)triangular, eigenvectors) - Not Implemented -! tgexc ((quasi)triangular, reordering Schur decomposition) - Not Implemented -! tgsyl ((quasi)triangular, Sylvester equation) - Not Implemented -! tgsna ((quasi)triangular condition numbers of eigenvalues/vectors) - Not Implemented -! tgsen ((quasi)triangular condition numbers of eigenvalue cluster/deflating subspaces) - Not Implemented -! \ No newline at end of file diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_gsevc.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_gsevc.pyf.src deleted file mode 100644 index 5dc7a48060..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_gsevc.pyf.src +++ /dev/null @@ -1,11 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Computational routines for the generalized symmetric definite eigenproblem: -! -! sygst, hegst (symmetric/Hermitian, reduction) - Not Implemented -! spgst, hpgst (symmetric/Hermitian, reduction, packed storage) - Not Implemented -! pbstf (symmetric/Hermitian, split Cholesky factorization, banded) - Not Implemented -! sbgst,hbgst (symmetric/Hermitian, reduction, banded) - Not Implemented -! diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_le.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_le.pyf.src deleted file mode 100644 index 0ab72c61c9..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_le.pyf.src +++ /dev/null @@ -1,101 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Simple Driver Routines for Linear Equations: -! gesv (general) -! gbsv (general band) -! gtsv (general tridiagonla) - Not Implemented -! posv (symmetric/hermitian positive definite) -! ppsv (symmetric/hermitian positive definite packed storage) - Not Implemented -! pbsv (symmetric/hermitian positive definite band) - Not Implemeted -! ptsv (symmetric/hermitian positive definite tridiagonal) - Not Implemented -! sysv, hesv (symmetric/hermitian indefinite) - Not Implemented -! spsv, hpsv (symmetric/hermitian indefinite packed storage) - Not Implemented -! -! -! Expert Driver Routines for Linear Equations: -! gesvx (general) - Not Implemented -! gbsvx (general band) - Not Implemented -! gtsvx (general tridiagonla) - Not Implemented -! posvx (symmetric/hermitian positive definite) - Not Implemented -! ppsvx (symmetric/hermitian positive definite packed storage) - Not Implemented -! pbsvx (symmetric/hermitian positive definite band) - Not Implemeted -! ptsvx (symmetric/hermitian positive definite tridiagonal) - Not Implemented -! sysvx, hesvx (symmetric/hermitian indefinite) - Not Implemented -! spsvx, hpsvx (symmetric/hermitian indefinite packed storage) - Not Implemented -! - -! -! Simple Driver Routines for Linear Equations -! =========================================== - - subroutine gesv(n,nrhs,a,piv,b,info) - - ! lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0) - ! Solve A * X = B. - ! A = P * L * U - ! U is upper diagonal triangular, L is unit lower triangular, - ! piv pivots columns. - - callstatement {int i;(*f2py_func)(&n,&nrhs,a,&n,piv,b,&n,&info);for(i=0;i\*,int*,int*,*,int*,int* - - integer depend(a),intent(hide):: n = shape(a,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - integer dimension(n),depend(n),intent(out) :: piv - dimension(n,nrhs),check(shape(a,0)==shape(b,0)),depend(n) :: b - integer intent(out)::info - intent(in,out,copy,out=x) b - intent(in,out,copy,out=lu) a - - end subroutine gesv - - subroutine gbsv(n,kl,ku,nrhs,ab,piv,b,info) - ! - ! lub,piv,x,info = gbsv(kl,ku,ab,b,overwrite_ab=0,overwrite_b=0) - ! Solve A * X = B - ! A = P * L * U - ! A is a band matrix of order n with kl subdiagonals and ku superdiagonals - ! starting at kl-th row. - ! X, B are n-by-nrhs matrices - ! - callstatement {int i=2*kl+ku+1;(*f2py_func)(&n,&kl,&ku,&nrhs,ab,&i,piv,b,&n,&info);for(i=0;i\*,int*,int*,*,int*,int* - integer depend(ab),intent(hide):: n = shape(ab,1) - integer intent(in) :: kl - integer intent(in) :: ku - integer depend(b),intent(hide) :: nrhs = shape(b,1) - dimension(2*kl+ku+1,n),depend(kl,ku), check(2*kl+ku+1==shape(ab,0)) :: ab - integer dimension(n),depend(n),intent(out) :: piv - dimension(n,nrhs),depend(n),check(shape(ab,1)==shape(b,0)) :: b - integer intent(out) :: info - intent(in,out,copy,out=x) b - intent(in,out,copy,out=lub) ab - end subroutine gbsv - - subroutine posv(n,nrhs,a,b,info,lower) - - ! c,x,info = posv(a,b,lower=0,overwrite_a=0,overwrite_b=0) - ! Solve A * X = B. - ! A is symmetric positive defined - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,&nrhs,a,&n,b,&n,&info) - callprotoargument char*,int*,int*,*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(a),intent(hide):: n = shape(a,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n):: b - check(shape(a,0)==shape(b,0)) :: b - integer intent(out) :: info - - end subroutine posv - diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_lec.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_lec.pyf.src deleted file mode 100644 index 30bbfc1a43..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_lec.pyf.src +++ /dev/null @@ -1,197 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Computational routines for linear equations: -! -! getrf (general, factorize) -! getrs (general, solve) -! gecon (general, estimate condition number) - Not Implemented -! gerfs (general, error bounds) - Not Implemented -! getri (general, invert) -! geequ (general, equilibrate) - Not Implemented -! gbtrf, gbtrs, gbcon, gbrfs, gbequ (general band) - Not Implemented -! gttrf, gttrs, gtcon, gtrfs (general tridiagonal) - Not Implemented -! potrf (symmetric/Hermitian positive, factorize) -! potrs (symmetric/Hermitian positive, solve) -! pocon (symmetric/Hermitian positive, estimate condition number) - Not Implemented -! porfs (symmetric/Hermitian positive, error bounds) - Not Implemented -! potri (symmetric/Hermitian positive, invert) -! poequ (symmetric/Hermitian positive, equilibrate) - Not Implemented -! pptrf, pptrs, ppcon, pprfs, pptri, ppequ (symmetric/Hermitian positive packed storage) - Not Implemented -! pbtrf, pbtrs, pbcon, pbrfs, pbequ (symmetric/Hermitian positive band) - Not Implemented -! pttrf, pttrs, ptcon, ptrfs (symmetric/Hermitian positive tridiagonal) - Not Implemented -! sytrf, sytrs, sycon, syrfs, sytri (symmetric indefinite) - Not Implemented -! hetrf, hetrs, hecon, herfs, hetri (Hermitian indefinite) - Not Implemented -! sptrf, sptrs, spcon, sprfs, sptri (symmetric indefinite packed storage) - Not Implemented -! hptrf, hptrs, hpcon, hprfs, hptri (Hermitian indefinite packed storage) - Not Implemented -! trtrs (triangular, solve) - Not Implemented -! trcon (triangular, estimate condition number) - Not Implemented -! trrfs (triangular, error bounds) - Not Implemented -! trtri (triangular, invert) -! tptrs, tpcon, tprfs, tptri (triangular packed storage) - Not Implemented -! tbtrs, tbcon, tbrfs (triangular band) - Not Implemented - -! -! Factorize -! ========= - - subroutine getrf(m,n,a,piv,info) - - ! lu,piv,info = getrf(a,overwrite_a=0) - ! Compute an LU factorization of a general M-by-N matrix A. - ! A = P * L * U - threadsafe - callstatement {int i;(*f2py_func)(&m,&n,a,&m,piv,&info);for(i=0,n=MIN(m,n);i\*,int*,int*,int* - - integer depend(a),intent(hide):: m = shape(a,0) - integer depend(a),intent(hide):: n = shape(a,1) - dimension(m,n),intent(in,out,copy,out=lu) :: a - integer dimension(MIN(m,n)),depend(m,n),intent(out) :: piv - integer intent(out):: info - - end subroutine getrf - - subroutine potrf(n,a,info,lower,clean) - - ! c,info = potrf(a,lower=0,clean=1,overwrite_a=0) - ! Compute Cholesky decomposition of symmetric positive defined matrix: - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - ! clean==1 zeros strictly lower or upper parts of U or L, respectively - - - ! <_def=,,\,k,\2> - ! <_init1=*(a+j*n+i)=0.0;,\0,k=j*n+i;(*(a+k)).r=(*(a+k)).i=0.0;,\2> - ! <_init2=*(a+i*n+j)=0.0;,\0,k=i*n+j;(*(a+k)).r=(*(a+k)).i=0.0;,\2> - callstatement (*f2py_func)((lower?"L":"U"),&n,a,&n,&info); if(clean){int i,j<_def>;if(lower){for(i=0;i\}} else {for(i=0;i\}}} - callprotoargument char*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(clean==0||clean==1) :: clean = 1 - integer depend(a),intent(hide):: n = shape(a,0) - dimension(n,n),intent(in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - integer intent(out) :: info - - end subroutine potrf - - -! -! Solve using factorization -! ========================= - - - subroutine getrs(n,nrhs,lu,piv,b,info,trans) - - ! x,info = getrs(lu,piv,b,trans=0,overwrite_b=0) - ! Solve A * X = B if trans=0 - ! Solve A^T * X = B if trans=1 - ! Solve A^H * X = B if trans=2 - ! A = P * L * U - threadsafe - callstatement {int i;for(i=0;i\*,int*,int*,*,int*,int* - - integer optional,intent(in),check(trans>=0 && trans \<=2) :: trans = 0 - - integer depend(lu),intent(hide):: n = shape(lu,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(in) :: lu - check(shape(lu,0)==shape(lu,1)) :: lu - integer dimension(n),intent(in),depend(n) :: piv - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n),check(shape(lu,0)==shape(b,0)) :: b - integer intent(out):: info - end subroutine getrs - - subroutine potrs(n,nrhs,c,b,info,lower) - - ! x,info = potrs(c,b,lower=0=1,overwrite_b=0) - ! Solve A * X = B. - ! A is symmetric positive defined - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,&nrhs,c,&n,b,&n,&info) - callprotoargument char*,int*,int*,*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(in) :: c - check(shape(c,0)==shape(c,1)) :: c - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n):: b - check(shape(c,0)==shape(b,0)) :: b - integer intent(out) :: info - - end subroutine potrs - -! -! Invert using factorization -! ========================== - - subroutine getri(n,lu,piv,work,lwork,info) - - ! inv_a,info = getri(lu,piv,lwork=3*n,overwrite_lu=0) - ! Find A inverse A^-1. - ! A = P * L * U - - callstatement {int i;for(i=0;i\*,int*,int*,*,int*,int* - - integer depend(lu),intent(hide):: n = shape(lu,0) - dimension(n,n),intent(in,out,copy,out=inv_a) :: lu - check(shape(lu,0)==shape(lu,1)) :: lu - integer dimension(n),intent(in),depend(n) :: piv - integer intent(out):: info - integer optional,intent(in),depend(n),check(lwork>=n) :: lwork=3*n - dimension(lwork),intent(hide,cache),depend(lwork) :: work - - end subroutine getri - - subroutine potri(n,c,info,lower) - - ! inv_a,info = potri(c,lower=0,overwrite_c=0) - ! Compute A inverse A^-1. - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,c,&n,&info) - callprotoargument char*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(c,in,out,copy,out=inv_a) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end subroutine potri - - subroutine trtri(n,c,info,lower,unitdiag) - - ! inv_c,info = trtri(c,lower=0,unitdiag=1,overwrite_c=0) - ! Compute C inverse C^-1 where - ! C = U if lower = 0 - ! C = L if lower = 1 - ! C is non-unit triangular matrix if unitdiag = 0 - ! C is unit triangular matrix if unitdiag = 1 - - callstatement (*f2py_func)((lower?"L":"U"),(unitdiag?"U":"N"),&n,c,&n,&info) - callprotoargument char*,char*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(unitdiag==0||unitdiag==1) :: unitdiag = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(in,out,copy,out=inv_c) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end subroutine trtri - diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_lls.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_lls.pyf.src deleted file mode 100644 index 1ef10fe994..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_lls.pyf.src +++ /dev/null @@ -1,48 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Simple Driver Routines for Standard and Generalized Linear Least Squares Problems: -! gels (using QR or LQ factorization, assume full rank) - Not Implemented -! gglse (solve LSE problem using GRQ) - Not Implemented -! ggglm (solve GLM problem using GQR) - Not Implemented -! -! Divide and Conquer and Expert Driver Routines for Linear Least Squares Problems: -! gelss (using SVD, allow rank-deficiency) -! gelsy (using complete orthogonal factor) - Not Implemented -! gelsd (using D&C SVD, allow rank-deficiency) - Not Implemented -! - - - subroutine gelss(m,n,minmn,maxmn,nrhs,a,b,s,cond,r,work,lwork,<,,rwork\,,rwork\,>info) - - ! v,x,s,rank,info = gelss(a,b,cond=-1.0,overwrite_a=0,overwrite_b=0) - ! Solve Minimize 2-norm(A * X - B). - - callstatement (*f2py_func)(&m,&n,&nrhs,a,&m,b,&maxmn,s,&cond,&r,work,&lwork,<,,rwork\,,rwork\,>&info) - callprotoargument int*,int*,int*,*,int*,*,int*,*,*,int*,*,int*,<,,float*\,,double*\,>int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(m,n):: minmn = MIN(m,n) - integer intent(hide),depend(m,n):: maxmn = MAX(m,n) - dimension(m,n),intent(in,out,copy,out=v) :: a - - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(maxmn,nrhs),check(maxmn==shape(b,0)),depend(maxmn) :: b - intent(in,out,copy,out=x) b - - integer intent(out)::info - ! <_lwork=3*minmn+MAX(2*minmn\,MAX(maxmn\,nrhs)),\0,2*minmn+MAX(maxmn\,nrhs),\2> - integer optional,intent(in),depend(nrhs,minmn,maxmn), & - check(lwork>=1) & - :: lwork=<_lwork> - !check(lwork>=<_lwork>) - dimension(lwork),intent(hide),depend(lwork) :: work - dimension(5*minmn-1),intent(hide),depend(lwork) :: rwork - intent(in),optional :: cond = -1.0 - integer intent(out,out=rank) :: r - intent(out),dimension(minmn),depend(minmn) :: s - - end subroutine gelss - diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_llsc.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_llsc.pyf.src deleted file mode 100644 index 094ba3e0dc..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_llsc.pyf.src +++ /dev/null @@ -1,69 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Computational routines for orthogonal/unitary factorizations: -! -! geqp3 (QR, general,factorize with pivoting) - Not Implemented -! geqrf (QR, general, factorize, no pivoting) -! orgqr, ungqr (QR, general, generate Q) -! ormqr, unmqr (QR, general, multiply matrix by Q) - Not Implemented -! gelqf (LQ, general, factorize, no pivoting) - Not Implemented -! orglq, unglq (LQ, general, generate Q) - Not Implemented -! ormlq, unmlq (LQ, general, multiply matrix by Q) - Not Implemented -! geqlf (QL, general, factorize, no pivoting) - Not Implemented -! orgql, ungql (QL, general, generate Q) - Not Implemented -! ormql, unmql (QL, general, multiply matrix by Q) - Not Implemented -! gerqf (RQ, general, factorize, no pivoting) - Not Implemented -! orgrq, ungrq (RQ, general, generate Q) - Not Implemented -! ormrq, unmrq (RQ, general, multiply matrix by Q) - Not Implemented -! tzrzf (RZ, trapezoidal, factorize, no pivoting) - Not Implemented -! ormrz, unmrz (RZ, trapezoidal,multiply matrix by Q) - Not Implemented -! -! Computational routines for general orthogonal/unitary factorizations: -! -! ggqrf (GQR, factorize) - Not Implemented -! ggrqf (GRQ, factorize) - Not Implemented -! - - subroutine geqrf(m,n,a,tau,work,lwork,info) - - ! qr,tau,info = geqrf(a,lwork=n,overwrite_a=0) - ! Compute a QR factorization of a real M-by-N matrix A: - ! A = Q * R. - - callstatement (*f2py_func)(&m,&n,a,&m,tau,work,&lwork,&info) - callprotoargument int*,int*,*,int*,*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - dimension(m,n),intent(in,out,copy,out=qr) :: a - dimension(MIN(m,n)),intent(out) :: tau - - <_lwork=n,\0,\0,\0> - integer optional,intent(in),depend(n),check(lwork\>=<_lwork>) :: lwork=<_lwork> - dimension(lwork),intent(hide,cache),depend(lwork) :: work - integer intent(out) :: info - end subroutine geqrf - - ! - subroutine gqr(m,n,k,qr,tau,work,lwork,info) - - ! q,info = (or|un)gqr(qr,tau,lwork=n,overwrite_qr=0,overwrite_tau=1) - ! Compute matrix Q of a QR factorization of a real M-by-N matrix A - ! from the results of geqrf. - - callstatement (*f2py_func)(&m,&n,&k,qr,&m,tau,work,&lwork,&info) - callprotoargument int*,int*,int*,*,int*,*,*,int*,int* - - integer intent(hide),depend(qr):: m = shape(qr,0) - integer intent(hide),depend(qr):: n = shape(qr,1) - integer intent(hide),depend(m,n):: k = MIN(m,n) - dimension(m,n),intent(in,out,copy,out=q) :: qr - dimension(k),intent(in,overwrite) :: tau - ! <_lwork=n,\0,\0,\0> - integer optional,intent(in),depend(n),check(lwork\>=<_lwork>) :: lwork=<_lwork> - dimension(lwork),intent(hide,cache),depend(lwork) :: work - - integer intent(out) :: info - end subroutine gqr diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_sevc.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_sevc.pyf.src deleted file mode 100644 index e71f174316..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_sevc.pyf.src +++ /dev/null @@ -1,22 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Computational routines for the symmetric eigenproblem: -! -! sytrd, hetrd (dense symmetric/Hermitian, tridiagonal reduction) - Not Implemented -! sptrd, hptrd (packed symmetric/Hermitian, tridiagonal reduction) - Not Implemented -! sbtrd, hbtrd (band symmetric/Hermitian, tridiagonal reduction) - Not Implemented -! orgtr, ungtr (orthogonal/unitary, generate matrix after sytrd) - Not Implemented -! ormtr, unmtr (orthogonal/unitary, multiply matrix after sytrd) - Not Implemented -! opgtr, upgtr (packed orthogonal/unitary, generate matrix after sptrd) - Not Implemented -! opmtr, upmtr (packed orthogonal/unitary, multiply matrix after sptrd) - Not Implemented -! steqr (symmetric tridiagonal eigenvalues/vectors via QR) - Not Implemented -! sterf (symmetric tridiagonal eigenvalues only via root-free QR, real) - Not Implemented -! stedc (symmetric tridiagonal eigenvalues/vectors via D&C) - Not Implemented -! stegr (symmetric tridiagonal eigenvalues/vectors via RRR) - Not Implemented -! stebz (symmetric tridiagonal eigenvalues only via bisection, real) - Not Implemented -! stein (symmetric tridiagonal eigenvectors by inverse iteration) - Not Implemented -! pteqr (symmetric tridiagonal positive eigenvalues/vectors) - Not Implemented -! - diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_svdc.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_svdc.pyf.src deleted file mode 100644 index e62f659916..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_svdc.pyf.src +++ /dev/null @@ -1,18 +0,0 @@ -! -*- f90 -*- -! -! Contains wrappers for the following LAPACK routines: -! -! Computational routines for the singular value decomposition: -! -! gebrd - (general, bidiagonal reduction) - Not Implemented -! gbbrd - (general band, bidiagonal reduction) - Not Implemented -! orgbr, ungber (orthogonal/unitary, generate matrix after bidiagonal reduction) - Not Implemented -! ormbr, unmber (orthogonal/unitary, multiply matrix after bidiagonal reduction) - Not Implemented -! bdsqr (bidiagonal, SVD using QR or dqds) - Not Implemented -! bdsdc (bidiagonal, SVD using D&C, real) - Not Implemented -! -! Computational routines for the generalized singular value decomposition: -! -! ggsvp (triangular reduction) - Not Implemented -! tgsja (GSVD of a pair of triangular matrices) - Not Implemented -! diff --git a/scipy-0.10.1/scipy/lib/lapack/flapack_user.pyf.src b/scipy-0.10.1/scipy/lib/lapack/flapack_user.pyf.src deleted file mode 100644 index e9bb73312b..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/flapack_user.pyf.src +++ /dev/null @@ -1,8 +0,0 @@ -python module gees__user__routines - interface gees_user_interface - function select(<_arg=arg1\,arg2,\0,arg,\2>) - :: <_arg> - logical :: select - end function select - end interface gees_user_interface -end python module gees__user__routines diff --git a/scipy-0.10.1/scipy/lib/lapack/info.py b/scipy-0.10.1/scipy/lib/lapack/info.py deleted file mode 100644 index 383342ca57..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/info.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Wrappers to LAPACK library -========================== - - flapack -- wrappers for Fortran [*] LAPACK routines - clapack -- wrappers for ATLAS LAPACK routines - calc_lwork -- calculate optimal lwork parameters - get_lapack_funcs -- query for wrapper functions. - -[*] If ATLAS libraries are available then Fortran routines - actually use ATLAS routines and should perform equally - well to ATLAS routines. - -Module flapack -++++++++++++++ - -In the following all function names are shown without -type prefix (s,d,c,z). Optimal values for lwork can -be computed using calc_lwork module. - -Linear Equations ----------------- - - Drivers:: - - lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0) - lub,piv,x,info = gbsv(kl,ku,ab,b,overwrite_ab=0,overwrite_b=0) - c,x,info = posv(a,b,lower=0,overwrite_a=0,overwrite_b=0) - - Computational routines:: - - lu,piv,info = getrf(a,overwrite_a=0) - x,info = getrs(lu,piv,b,trans=0,overwrite_b=0) - inv_a,info = getri(lu,piv,lwork=min_lwork,overwrite_lu=0) - - c,info = potrf(a,lower=0,clean=1,overwrite_a=0) - x,info = potrs(c,b,lower=0,overwrite_b=0) - inv_a,info = potri(c,lower=0,overwrite_c=0) - - inv_c,info = trtri(c,lower=0,unitdiag=0,overwrite_c=0) - -Linear Least Squares (LLS) Problems ------------------------------------ - - Drivers:: - - v,x,s,rank,info = gelss(a,b,cond=-1.0,lwork=min_lwork,overwrite_a=0,overwrite_b=0) - - Computational routines:: - - qr,tau,info = geqrf(a,lwork=min_lwork,overwrite_a=0) - q,info = orgqr|ungqr(qr,tau,lwork=min_lwork,overwrite_qr=0,overwrite_tau=1) - -Generalized Linear Least Squares (LSE and GLM) Problems -------------------------------------------------------- - -Standard Eigenvalue and Singular Value Problems ------------------------------------------------ - - Drivers:: - - w,v,info = syev|heev(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0) - w,v,info = syevd|heevd(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0) - w,v,info = syevr|heevr(a,compute_v=1,lower=0,vrange=,irange=,atol=-1.0,lwork=min_lwork,overwrite_a=0) - t,sdim,(wr,wi|w),vs,info = gees(select,a,compute_v=1,sort_t=0,lwork=min_lwork,select_extra_args=(),overwrite_a=0) - wr,(wi,vl|w),vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0) - u,s,vt,info = gesdd(a,compute_uv=1,lwork=min_lwork,overwrite_a=0) - - Computational routines:: - - ht,tau,info = gehrd(a,lo=0,hi=n-1,lwork=min_lwork,overwrite_a=0) - ba,lo,hi,pivscale,info = gebal(a,scale=0,permute=0,overwrite_a=0) - -Generalized Eigenvalue and Singular Value Problems --------------------------------------------------- - - Drivers:: - - w,v,info = sygv|hegv(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0) - w,v,info = sygvd|hegvd(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0) - (alphar,alphai|alpha),beta,vl,vr,info = ggev(a,b,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0,overwrite_b=0) - - -Auxiliary routines ------------------- - - a,info = lauum(c,lower=0,overwrite_c=0) - a = laswp(a,piv,k1=0,k2=len(piv)-1,off=0,inc=1,overwrite_a=0) - -Module clapack -++++++++++++++ - -Linear Equations ----------------- - - Drivers:: - - lu,piv,x,info = gesv(a,b,rowmajor=1,overwrite_a=0,overwrite_b=0) - c,x,info = posv(a,b,lower=0,rowmajor=1,overwrite_a=0,overwrite_b=0) - - Computational routines:: - - lu,piv,info = getrf(a,rowmajor=1,overwrite_a=0) - x,info = getrs(lu,piv,b,trans=0,rowmajor=1,overwrite_b=0) - inv_a,info = getri(lu,piv,rowmajor=1,overwrite_lu=0) - - c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0) - x,info = potrs(c,b,lower=0,rowmajor=1,overwrite_b=0) - inv_a,info = potri(c,lower=0,rowmajor=1,overwrite_c=0) - - inv_c,info = trtri(c,lower=0,unitdiag=0,rowmajor=1,overwrite_c=0) - -Auxiliary routines ------------------- - - a,info = lauum(c,lower=0,rowmajor=1,overwrite_c=0) - -Module calc_lwork -+++++++++++++++++ - -Optimal lwork is maxwrk. Default is minwrk. - - minwrk,maxwrk = gehrd(prefix,n,lo=0,hi=n-1) - minwrk,maxwrk = gesdd(prefix,m,n,compute_uv=1) - minwrk,maxwrk = gelss(prefix,m,n,nrhs) - minwrk,maxwrk = getri(prefix,n) - minwrk,maxwrk = geev(prefix,n,compute_vl=1,compute_vr=1) - minwrk,maxwrk = heev(prefix,n,lower=0) - minwrk,maxwrk = syev(prefix,n,lower=0) - minwrk,maxwrk = gees(prefix,n,compute_v=1) - minwrk,maxwrk = geqrf(prefix,m,n) - minwrk,maxwrk = gqr(prefix,m,n) - - -""" -postpone_import = 1 diff --git a/scipy-0.10.1/scipy/lib/lapack/scons_support.py b/scipy-0.10.1/scipy/lib/lapack/scons_support.py deleted file mode 100644 index f78d0474cf..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/scons_support.py +++ /dev/null @@ -1,27 +0,0 @@ -from os.path import join as pjoin, splitext, basename as pbasename - -def generate_interface_emitter(target, source, env): - base = str(target[0]) - return (['%s.pyf' % base], source) - -def do_generate_fake_interface(target, source, env): - """Generate a (fake) .pyf file from another pyf file (!).""" - # XXX: do this correctly - target_name = str(target[0]) - source_name = str(source[0]) - - # XXX handle skip names - name = splitext(pbasename(target_name))[0] - #generate_interface(name, source_name, target_name) - - f = open(target_name, 'w') - f.write('python module '+name+'\n') - f.write('usercode void empty_module(void) {}\n') - f.write('interface\n') - f.write('subroutine empty_module()\n') - f.write('intent(c) empty_module\n') - f.write('end subroutine empty_module\n') - f.write('end interface\nend python module'+name+'\n') - f.close() - - return 0 diff --git a/scipy-0.10.1/scipy/lib/lapack/setup.py b/scipy-0.10.1/scipy/lib/lapack/setup.py deleted file mode 100755 index 443e93cd4a..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/setup.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python - -import os -from glob import glob - -#------------------- -# To skip wrapping single precision atlas/lapack routines, set -# the following flag to True: - -skip_single_routines = 0 - -#-------------------- - -tmpl_empty_clapack_pyf = ''' -python module clapack - usercode void empty_module(void) {} - interface - subroutine empty_module() - intent(c) empty_module - end subroutine empty_module - end interface -end python module clapack -''' - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - - config = Configuration('lapack',parent_package,top_path) - - lapack_opt = get_info('lapack_opt',notfound_action=2) - - atlas_version = ([v[3:-3] for k,v in lapack_opt.get('define_macros',[]) \ - if k=='ATLAS_INFO']+[None])[0] - if atlas_version: - print ('ATLAS version: %s' % atlas_version) - - target_dir = '' - skip_names = {'clapack':[],'flapack':[]} - if skip_single_routines: - target_dir = 'dbl' - skip_names['clapack'].extend(\ - 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ - ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ - ' slauum clauum strtri ctrtri'.split()) - skip_names['flapack'].extend(skip_names['clapack']) - skip_names['flapack'].extend(\ - 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ - ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' - ' sggev cggev'.split()) - - if atlas_version=='3.2.1_pre3.3.6': - target_dir = os.path.join(target_dir,'atlas321') - skip_names['clapack'].extend(\ - 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ - ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) - elif atlas_version and atlas_version>'3.4.0' and atlas_version<='3.5.12': - skip_names['clapack'].extend('cpotrf zpotrf'.split()) - - # flapack: - config.add_extension('flapack', - sources = ['flapack.pyf.src'], - depends = [__file__,'flapack_*.pyf.src'], - f2py_options = ['skip:']+skip_names['flapack']+[':'], - extra_info = lapack_opt - ) - - # clapack: - def get_clapack_source(ext, build_dir): - name = ext.name.split('.')[-1] - assert name=='clapack', repr(name) - if atlas_version is None: - target = os.path.join(build_dir,target_dir,'clapack.pyf') - from distutils.dep_util import newer - if newer(__file__,target): - f = open(target,'w') - f.write(tmpl_empty_clapack_pyf) - f.close() - else: - target = ext.depends[0] - assert os.path.basename(target)=='clapack.pyf.src' - return target - - config.add_extension('clapack', - sources = [get_clapack_source], - depends = ['clapack.pyf.src'], - f2py_options = ['skip:']+skip_names['clapack']+[':'], - extra_info = lapack_opt - ) - - # calc_lwork: - config.add_extension('calc_lwork', - sources = ['calc_lwork.f'], - extra_info = lapack_opt - ) - - # atlas_version: - if os.name == 'nt' and 'FPATH' in os.environ: - define_macros = [('NO_ATLAS_INFO', 1)] - else: - define_macros = [] - - config.add_extension('atlas_version', - sources = ['atlas_version.c'], - extra_info = lapack_opt, - define_macros = define_macros - ) - - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/lib/lapack/setupscons.py b/scipy-0.10.1/scipy/lib/lapack/setupscons.py deleted file mode 100755 index f747424770..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/setupscons.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python - -import os -from glob import glob - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - - config = Configuration('lapack',parent_package,top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/lib/lapack/tests/common.py b/scipy-0.10.1/scipy/lib/lapack/tests/common.py deleted file mode 100644 index 5374af3e23..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/tests/common.py +++ /dev/null @@ -1,52 +0,0 @@ -import numpy as np - -from scipy.lib.lapack import flapack, clapack - -FUNCS_TP = {'ssygv' : np.float32, - 'dsygv': np.float, - 'ssygvd' : np.float32, - 'dsygvd' : np.float, - 'ssyev' : np.float32, - 'dsyev': np.float, - 'ssyevr' : np.float32, - 'dsyevr' : np.float, - 'ssyevr' : np.float32, - 'dsyevr' : np.float, - 'sgehrd' : np.float32, - 'dgehrd' : np.float, - 'sgebal' : np.float32, - 'dgebal': np.float} - -# Test FLAPACK if not empty -if hasattr(flapack, 'empty_module'): - FLAPACK_IS_EMPTY = True -else: - FLAPACK_IS_EMPTY = False - -# Test CLAPACK if not empty and not the same as clapack -if hasattr(clapack, 'empty_module') or (clapack == flapack): - CLAPACK_IS_EMPTY = True -else: - CLAPACK_IS_EMPTY = False - -funcs = ['ssygv', 'dsygv', 'ssygvd', 'dsygvd', 'ssyev', 'dsyev', 'ssyevr', - 'dsyevr', 'sgehrd', 'dgehrd', 'sgebal', 'dgebal'] - -if not FLAPACK_IS_EMPTY: - FUNCS_FLAPACK = {} - for f in funcs: - FUNCS_FLAPACK[f] = getattr(flapack, f) -else: - FUNCS_FLAPACK = None - -if not CLAPACK_IS_EMPTY: - FUNCS_CLAPACK = {} - for f in funcs: - try: - FUNCS_CLAPACK[f] = getattr(clapack, f) - except AttributeError: - FUNCS_CLAPACK[f] = None -else: - FUNCS_CLAPACK = None - -PREC = {np.float32: 5, np.float: 12} diff --git a/scipy-0.10.1/scipy/lib/lapack/tests/test_esv.py b/scipy-0.10.1/scipy/lib/lapack/tests/test_esv.py deleted file mode 100644 index 120f64277b..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/tests/test_esv.py +++ /dev/null @@ -1,138 +0,0 @@ -import numpy as np -from numpy.testing import TestCase, assert_array_almost_equal, dec, \ - assert_equal, assert_ - -from common import FUNCS_TP, FLAPACK_IS_EMPTY, CLAPACK_IS_EMPTY, FUNCS_FLAPACK, \ - FUNCS_CLAPACK, PREC - -SYEV_ARG = np.array([[1,2,3],[2,2,3],[3,3,6]]) -SYEV_REF = np.array([-0.6699243371851365, 0.4876938861533345, - 9.182230451031804]) - -class TestEsv(TestCase): - def _test_base(self, func, lang): - tp = FUNCS_TP[func] - a = SYEV_ARG.astype(tp) - if lang == 'C': - f = FUNCS_CLAPACK[func] - elif lang == 'F': - f = FUNCS_FLAPACK[func] - else: - raise ValueError("Lang %s ??" % lang) - - w, v, info = f(a) - - assert_(not info, msg=repr(info)) - assert_array_almost_equal(w, SYEV_REF, decimal=PREC[tp]) - for i in range(3): - assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i], - decimal=PREC[tp]) - - def _test_base_irange(self, func, irange, lang): - tp = FUNCS_TP[func] - a = SYEV_ARG.astype(tp) - if lang == 'C': - f = FUNCS_CLAPACK[func] - elif lang == 'F': - f = FUNCS_FLAPACK[func] - else: - raise ValueError("Lang %s ??" % lang) - - w, v, info = f(a, irange=irange) - rslice = slice(irange[0], irange[1]+1) - m = irange[1] - irange[0] + 1 - assert_(not info, msg=repr(info)) - - assert_equal(len(w),m) - assert_array_almost_equal(w, SYEV_REF[rslice], decimal=PREC[tp]) - - for i in range(m): - assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i], - decimal=PREC[tp]) - - def _test_base_vrange(self, func, vrange, lang): - tp = FUNCS_TP[func] - a = SYEV_ARG.astype(tp) - ew = [value for value in SYEV_REF if vrange[0] < value <= vrange[1]] - - if lang == 'C': - f = FUNCS_CLAPACK[func] - elif lang == 'F': - f = FUNCS_FLAPACK[func] - else: - raise ValueError("Lang %s ??" % lang) - - w, v, info = f(a, vrange=vrange) - assert_(not info, msg=repr(info)) - - assert_array_almost_equal(w, ew, decimal=PREC[tp]) - - for i in range(len(w)): - assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i], - decimal=PREC[tp]) - - def _test_syevr_ranges(self, func, lang): - for irange in ([0, 2], [0, 1], [1, 1], [1, 2]): - self._test_base_irange(func, irange, lang) - - for vrange in ([-1, 10], [-1, 1], [0, 1], [1, 10]): - self._test_base_vrange(func, vrange, lang) - - # Flapack tests - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_ssyev(self): - self._test_base('ssyev', 'F') - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_dsyev(self): - self._test_base('dsyev', 'F') - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_ssyevr(self): - self._test_base('ssyevr', 'F') - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_dsyevr(self): - self._test_base('dsyevr', 'F') - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_ssyevr_ranges(self): - self._test_syevr_ranges('ssyevr', 'F') - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_dsyevr_ranges(self): - self._test_syevr_ranges('dsyevr', 'F') - - # Clapack tests - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyev"], - "Clapack empty, skip clapack test") - def test_clapack_ssyev(self): - self._test_base('ssyev', 'C') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyev"], - "Clapack empty, skip clapack test") - def test_clapack_dsyev(self): - self._test_base('dsyev', 'C') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyevr"], - "Clapack empty, skip clapack test") - def test_clapack_ssyevr(self): - self._test_base('ssyevr', 'C') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyevr"], - "Clapack empty, skip clapack test") - def test_clapack_dsyevr(self): - self._test_base('dsyevr', 'C') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyevr"], - "Clapack empty, skip clapack test") - def test_clapack_ssyevr_ranges(self): - self._test_syevr_ranges('ssyevr', 'C') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyevr"], - "Clapack empty, skip clapack test") - def test_clapack_dsyevr_ranges(self): - self._test_syevr_ranges('dsyevr', 'C') - -if __name__=="__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/lib/lapack/tests/test_gesv.py b/scipy-0.10.1/scipy/lib/lapack/tests/test_gesv.py deleted file mode 100644 index 9b7f4bb4f0..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/tests/test_gesv.py +++ /dev/null @@ -1,94 +0,0 @@ -import numpy as np -from numpy.testing import TestCase, assert_array_almost_equal, dec, \ - assert_equal, assert_ - -from common import FUNCS_TP, FLAPACK_IS_EMPTY, CLAPACK_IS_EMPTY, FUNCS_FLAPACK, \ - FUNCS_CLAPACK, PREC - -A = np.array([[1,2,3],[2,2,3],[3,3,6]]) -B = np.array([[10,-1,1],[-1,8,-2],[1,-2,6]]) - -class TestSygv(TestCase): - def _test_base(self, func, lang, itype): - tp = FUNCS_TP[func] - a = A.astype(tp) - b = B.astype(tp) - if lang == 'C': - f = FUNCS_CLAPACK[func] - elif lang == 'F': - f = FUNCS_FLAPACK[func] - else: - raise ValueError("Lang %s ??" % lang) - - w, v, info = f(a, b, itype=itype) - - assert_(not info, msg=repr(info)) - for i in range(3): - if itype == 1: - assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*np.dot(b,v[:,i]), - decimal=PREC[tp]) - elif itype == 2: - assert_array_almost_equal(np.dot(a,np.dot(b,v[:,i])), w[i]*v[:,i], - decimal=PREC[tp]) - elif itype == 3: - assert_array_almost_equal(np.dot(b,np.dot(a,v[:,i])), - w[i]*v[:,i], decimal=PREC[tp] - 1) - else: - raise ValueError(itype) - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_ssygv_1(self): - self._test_base('ssygv', 'F', 1) - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_ssygv_2(self): - self._test_base('ssygv', 'F', 2) - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_ssygv_3(self): - self._test_base('ssygv', 'F', 3) - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_dsygv_1(self): - self._test_base('dsygv', 'F', 1) - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_dsygv_2(self): - self._test_base('dsygv', 'F', 2) - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_dsygv_3(self): - self._test_base('dsygv', 'F', 3) - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"], - "Clapack empty, skip flapack test") - def test_clapack_ssygv_1(self): - self._test_base('ssygv', 'C', 1) - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"], - "Clapack empty, skip flapack test") - def test_clapack_ssygv_2(self): - self._test_base('ssygv', 'C', 2) - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"], - "Clapack empty, skip flapack test") - def test_clapack_ssygv_3(self): - self._test_base('ssygv', 'C', 3) - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"], - "Clapack empty, skip flapack test") - def test_clapack_dsygv_1(self): - self._test_base('dsygv', 'C', 1) - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"], - "Clapack empty, skip flapack test") - def test_clapack_dsygv_2(self): - self._test_base('dsygv', 'C', 2) - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"], - "Clapack empty, skip flapack test") - def test_clapack_dsygv_3(self): - self._test_base('dsygv', 'C', 3) - -if __name__=="__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/lib/lapack/tests/test_lapack.py b/scipy-0.10.1/scipy/lib/lapack/tests/test_lapack.py deleted file mode 100644 index b413b395a7..0000000000 --- a/scipy-0.10.1/scipy/lib/lapack/tests/test_lapack.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python -# -# Created by: Pearu Peterson, September 2002 -# -import numpy as np -from numpy.testing import * - -from common import FUNCS_TP, FUNCS_CLAPACK, FUNCS_FLAPACK, FLAPACK_IS_EMPTY, \ - CLAPACK_IS_EMPTY - -class TestLapack(TestCase): - def _test_gebal_base(self, func, lang): - tp = FUNCS_TP[func] - - a = np.array([[1,2,3],[4,5,6],[7,8,9]]).astype(tp) - a1 = np.array([[1,0,0,3e-4], - [4,0,0,2e-3], - [7,1,0,0], - [0,1,0,0]]).astype(tp) - - if lang == 'C': - f = FUNCS_CLAPACK[func] - elif lang == 'F': - f = FUNCS_FLAPACK[func] - else: - raise ValueError("Lang %s ??" % lang) - - ba, lo, hi, pivscale, info = f(a) - assert_(not info, msg=repr(info)) - assert_array_almost_equal(ba, a) - assert_equal((lo,hi), (0, len(a[0])-1)) - assert_array_almost_equal(pivscale, np.ones(len(a))) - - ba, lo, hi, pivscale, info = f(a1,permute=1,scale=1) - assert_(not info, msg=repr(info)) - - def _test_gehrd_base(self, func, lang): - tp = FUNCS_TP[func] - - a = np.array([[-149, -50,-154], - [ 537, 180, 546], - [ -27, -9, -25]]).astype(tp) - - if lang == 'C': - f = FUNCS_CLAPACK[func] - elif lang == 'F': - f = FUNCS_FLAPACK[func] - else: - raise ValueError("Lang %s ??" % lang) - - ht, tau, info = f(a) - assert_(not info, msg=repr(info)) - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_sgebal(self): - self._test_gebal_base('sgebal', 'F') - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test") - def test_dgebal(self): - self._test_gebal_base('dgebal', 'F') - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip clapack test") - def test_sgehrd(self): - self._test_gehrd_base('sgehrd', 'F') - - @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip clapack test") - def test_dgehrd(self): - self._test_gehrd_base('dgehrd', 'F') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["sgebal"], - "Clapack empty, skip flapack test") - def test_clapack_sgebal(self): - self._test_gebal_base('sgebal', 'C') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dgebal"], - "Clapack empty, skip flapack test") - def test_clapack_dgebal(self): - self._test_gebal_base('dgebal', 'C') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["sgehrd"], - "Clapack empty, skip flapack test") - def test_clapack_sgehrd(self): - self._test_gehrd_base('sgehrd', 'C') - - @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dgehrd"], - "Clapack empty, skip flapack test") - def test_clapack_dgehrd(self): - self._test_gehrd_base('dgehrd', 'C') diff --git a/scipy-0.10.1/scipy/lib/setup.py b/scipy-0.10.1/scipy/lib/setup.py deleted file mode 100644 index e89cc7f074..0000000000 --- a/scipy-0.10.1/scipy/lib/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('lib',parent_package,top_path) - config.add_subpackage('blas') - config.add_subpackage('lapack') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/lib/setupscons.py b/scipy-0.10.1/scipy/lib/setupscons.py deleted file mode 100644 index a0766d05f3..0000000000 --- a/scipy-0.10.1/scipy/lib/setupscons.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('lib',parent_package,top_path, - setup_name = 'setupscons.py') - config.add_subpackage('blas') - config.add_subpackage('lapack') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/linalg/SConscript b/scipy-0.10.1/scipy/linalg/SConscript deleted file mode 100644 index ec64813b88..0000000000 --- a/scipy-0.10.1/scipy/linalg/SConscript +++ /dev/null @@ -1,155 +0,0 @@ -# Last Change: Sat Nov 01 11:00 PM 2008 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numscons import GetNumpyEnvironment -from numscons import CheckCBLAS, CheckF77BLAS, CheckF77LAPACK,\ - CheckCLAPACK, IsVeclib, IsAccelerate, \ - IsATLAS, GetATLASVersion, CheckF77Clib -from numscons import write_info - -from scons_support import do_generate_interface, do_generate_fake_interface, \ - generate_interface_emitter - -#from scons_support import CheckBrokenMathlib, define_no_smp, \ -# generate_config_header, generate_config_header_emitter - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('f2py') - -# XXX: handle cblas wrapper for complex (check in numpy.scons or here ?) -env.AppendUnique(F2PYOPTIONS = '--quiet') - -env['BUILDERS']['haha'] = Builder(action = do_generate_interface, - emitter = generate_interface_emitter) - -env['BUILDERS']['hihi'] = Builder(action = do_generate_fake_interface, - emitter = generate_interface_emitter) - -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -fenv = env.Clone() - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckCBLAS' : CheckCBLAS, - 'CheckCLAPACK' : CheckCLAPACK}) - -#------------------------- -# Checking cblas/clapack -#------------------------- -if config.CheckCBLAS(): - has_cblas = 1 -else: - has_cblas = 0 -if has_cblas: - if IsATLAS(env, 'cblas'): - version = GetATLASVersion(env) - env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)]) - else: - env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)]) - -if config.CheckCLAPACK(): - has_clapack = 1 -else: - has_clapack = 0 - -config.Finish() -write_info(env) - -#--------------------------- -# Checking F77 blas/lapack -#--------------------------- -fconfig = fenv.NumpyConfigure(custom_tests = {'CheckBLAS' : CheckF77BLAS, - 'CheckLAPACK' : CheckF77LAPACK, - 'CheckF77Clib' : CheckF77Clib}) - -if not fconfig.CheckF77Clib(): - raise RuntimeError("Could not check F/C runtime library for %s/%s, " \ - "contact the maintainer" % (fenv['CC'], fenv['F77'])) - -st = fconfig.CheckBLAS(check_version = 1) -if not st: - raise RuntimeError("no blas found, necessary for linalg module") -if IsATLAS(fenv, 'blas'): - version = GetATLASVersion(fenv) - env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)]) -else: - env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)]) - -st = fconfig.CheckLAPACK() -if not st: - raise RuntimeError("no lapack found, necessary for linalg module") -fconfig.Finish() -write_info(fenv) - - -#========== -# Build -#========== -#------------ -# fblas -#------------ -fenv.FromFTemplate('fblas.pyf', 'fblas.pyf.src') -source = ['fblas.pyf'] -if IsVeclib(fenv, 'blas') or IsAccelerate(fenv, 'blas'): - source.append(pjoin('src', 'fblaswrap_veclib_c.c')) -else: - source.append(pjoin('src', 'fblaswrap.f')) -fenv.NumpyPythonExtension('fblas', source) - -#------------ -# cblas -#------------ -if has_cblas: - env.haha('cblas', 'generic_cblas.pyf') -else: - env.hihi('cblas', 'generic_cblas.pyf') -env.NumpyPythonExtension('cblas', source = 'cblas.pyf') - -#------------ -# flapack -#------------ -yop = fenv.haha('flapack', 'generic_flapack.pyf') -# XXX: automatically scan dependency on flapack_user_routines.pyf ? -fenv.Depends(yop, 'flapack_user_routines.pyf') -fenv.NumpyPythonExtension('flapack', 'flapack.pyf') - -#------------ -# clapack -#------------ -if has_clapack: - env.haha('clapack', 'generic_clapack.pyf') -else: - env.hihi('clapack', 'generic_clapack.pyf') -env.NumpyPythonExtension('clapack', source = 'clapack.pyf') - -#---------------- -# _flinalg -#---------------- -flinalg_fsrc = [pjoin('src', i) for i in ['det.f', 'lu.f']] -flinalg_src = fenv.F2py(pjoin('src', '_flinalgmodule.c'), flinalg_fsrc) - -fenv.NumpyPythonExtension('_flinalg', source = flinalg_src + flinalg_fsrc) - -#---------------- -# calc_lwork: -#---------------- -calc_fsrc = [pjoin('src', 'calc_lwork.f')] -calc_src = env.F2py(pjoin('src', 'calc_lworkmodule.c'), calc_fsrc) -fenv.NumpyPythonExtension('calc_lwork', calc_src + calc_fsrc) - -#-------------- -# Atlas version -#-------------- -atlas_env = env.Clone() -if not IsATLAS(env, 'cblas'): - atlas_env.AppendUnique(CPPDEFINES = "NO_ATLAS_INFO") -atlas_env.NumpyPythonExtension('atlas_version', 'atlas_version.c') diff --git a/scipy-0.10.1/scipy/linalg/SConstruct b/scipy-0.10.1/scipy/linalg/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/linalg/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/linalg/__init__.py b/scipy-0.10.1/scipy/linalg/__init__.py deleted file mode 100644 index a94f748457..0000000000 --- a/scipy-0.10.1/scipy/linalg/__init__.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -==================================== -Linear algebra (:mod:`scipy.linalg`) -==================================== - -.. currentmodule:: scipy.linalg - -Linear algebra functions. - -.. seealso:: - - `numpy.linalg` for more linear algebra functions. Note that - although `scipy.linalg` imports most of them, identically named - functions from `scipy.linalg` may offer more or slightly differing - functionality. - - -Basics -====== - -.. autosummary:: - :toctree: generated/ - - inv - Find the inverse of a square matrix - solve - Solve a linear system of equations - solve_banded - Solve a banded linear system - solveh_banded - Solve a Hermitian or symmetric banded system - solve_triangular - Solve a triangular matrix - det - Find the determinant of a square matrix - norm - Matrix and vector norm - lstsq - Solve a linear least-squares problem - pinv - Pseudo-inverse (Moore-Penrose) using lstsq - pinv2 - Pseudo-inverse using svd - kron - Kronecker product of two arrays - tril - Construct a lower-triangular matrix from a given matrix - triu - Construct an upper-triangular matrix from a given matrix - -Eigenvalue Problems -=================== - -.. autosummary:: - :toctree: generated/ - - eig - Find the eigenvalues and eigenvectors of a square matrix - eigvals - Find just the eigenvalues of a square matrix - eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix - eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix - eig_banded - Find the eigenvalues and eigenvectors of a banded matrix - eigvals_banded - Find just the eigenvalues of a banded matrix - -Decompositions -============== - -.. autosummary:: - :toctree: generated/ - - lu - LU decomposition of a matrix - lu_factor - LU decomposition returning unordered matrix and pivots - lu_solve - Solve Ax=b using back substitution with output of lu_factor - svd - Singular value decomposition of a matrix - svdvals - Singular values of a matrix - diagsvd - Construct matrix of singular values from output of svd - orth - Construct orthonormal basis for the range of A using svd - cholesky - Cholesky decomposition of a matrix - cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix - cho_factor - Cholesky decomposition for use in solving a linear system - cho_solve - Solve previously factored linear system - cho_solve_banded - Solve previously factored banded linear system - qr - QR decomposition of a matrix - schur - Schur decomposition of a matrix - rsf2csf - Real to complex Schur form - hessenberg - Hessenberg form of a matrix - -Matrix Functions -================ - -.. autosummary:: - :toctree: generated/ - - expm - Matrix exponential using Pade approximation - expm2 - Matrix exponential using eigenvalue decomposition - expm3 - Matrix exponential using Taylor-series expansion - logm - Matrix logarithm - cosm - Matrix cosine - sinm - Matrix sine - tanm - Matrix tangent - coshm - Matrix hyperbolic cosine - sinhm - Matrix hyperbolic sine - tanhm - Matrix hyperbolic tangent - signm - Matrix sign - sqrtm - Matrix square root - funm - Evaluating an arbitrary matrix function - -Special Matrices -================ - -.. autosummary:: - :toctree: generated/ - - block_diag - Construct a block diagonal matrix from submatrices - circulant - Circulant matrix - companion - Companion matrix - hadamard - Hadamard matrix of order 2**n - hankel - Hankel matrix - hilbert - Hilbert matrix - invhilbert - Inverse Hilbert matrix - leslie - Leslie matrix - toeplitz - Toeplitz matrix - tri - Construct a matrix filled with ones at and below a given diagonal - -""" - -from linalg_version import linalg_version as __version__ - -from misc import * -from basic import * -from decomp import * -from decomp_lu import * -from decomp_cholesky import * -from decomp_qr import * -from decomp_svd import * -from decomp_schur import * -from matfuncs import * -from blas import * -from special_matrices import * - -__all__ = filter(lambda s: not s.startswith('_'), dir()) - -from numpy.dual import register_func -for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals', - 'eigvalsh', 'lstsq', 'cholesky']: - try: - register_func(k, eval(k)) - except ValueError: - pass - -try: - register_func('pinv', pinv2) -except ValueError: - pass - -del k, register_func - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/scipy-0.10.1/scipy/linalg/_testutils.py b/scipy-0.10.1/scipy/linalg/_testutils.py deleted file mode 100644 index 26be37cfe5..0000000000 --- a/scipy-0.10.1/scipy/linalg/_testutils.py +++ /dev/null @@ -1,57 +0,0 @@ -import numpy as np - -class _FakeMatrix(object): - def __init__(self, data): - self._data = data - self.__array_interface__ = data.__array_interface__ - -class _FakeMatrix2(object): - def __init__(self, data): - self._data = data - def __array__(self): - return self._data - -def _get_array(shape, dtype): - """ - Get a test array of given shape and data type. - Returned NxN matrices are posdef, and 2xN are banded-posdef. - - """ - if len(shape) == 2 and shape[0] == 2: - # yield a banded positive definite one - x = np.zeros(shape, dtype=dtype) - x[0,1:] = -1 - x[1] = 2 - return x - elif len(shape) == 2 and shape[0] == shape[1]: - # always yield a positive definite matrix - x = np.zeros(shape, dtype=dtype) - j = np.arange(shape[0]) - x[j,j] = 2 - x[j[:-1],j[:-1]+1] = -1 - x[j[:-1]+1,j[:-1]] = -1 - return x - else: - np.random.seed(1234) - return np.random.randn(*shape).astype(dtype) - -def _id(x): - return x - -def assert_no_overwrite(call, shapes, dtypes=None): - """ - Test that a call does not overwrite its input arguments - """ - - if dtypes is None: - dtypes = [np.float32, np.float64, np.complex64, np.complex128] - - for dtype in dtypes: - for order in ["C", "F"]: - for faker in [_id, _FakeMatrix, _FakeMatrix2]: - orig_inputs = [_get_array(s, dtype) for s in shapes] - inputs = [faker(x.copy(order)) for x in orig_inputs] - call(*inputs) - msg = "call modified inputs [%r, %r]" % (dtype, faker) - for a, b in zip(inputs, orig_inputs): - np.testing.assert_equal(a, b, err_msg=msg) diff --git a/scipy-0.10.1/scipy/linalg/atlas_version.c b/scipy-0.10.1/scipy/linalg/atlas_version.c deleted file mode 100644 index 6915a7f937..0000000000 --- a/scipy-0.10.1/scipy/linalg/atlas_version.c +++ /dev/null @@ -1,61 +0,0 @@ -#include "Python.h" -#include "numpy/npy_3kcompat.h" - -static PyObject* version(PyObject* self, PyObject* dummy) -{ -#if defined(NO_ATLAS_INFO) - printf("NO ATLAS INFO AVAILABLE\n"); -#else - void ATL_buildinfo(void); - ATL_buildinfo(); -#endif - - Py_INCREF(Py_None); - return Py_None; -} - -static char version_doc[] = "Print the build info from atlas."; - -static PyMethodDef module_methods[] = { - {"version", version, METH_VARARGS, version_doc}, - {NULL, NULL, 0, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 - -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "atlas_version", - NULL, - -1, - NULL, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit_atlas_version(void) -{ -#define RETVAL m - PyObject *m; - m = PyModule_Create(&moduledef); -#else -#define RETVAL -PyMODINIT_FUNC initatlas_version(void) -{ - PyObject *m = NULL; - m = Py_InitModule("atlas_version", module_methods); -#endif - if (m == NULL) { - return RETVAL; - } -#if defined(ATLAS_INFO) - { - PyObject *d = PyModule_GetDict(m); - PyDict_SetItemString(d,"ATLAS_VERSION", - PyUString_FromString(ATLAS_INFO)); - } -#endif - return RETVAL; -} diff --git a/scipy-0.10.1/scipy/linalg/basic.py b/scipy-0.10.1/scipy/linalg/basic.py deleted file mode 100644 index b29c8d23e2..0000000000 --- a/scipy-0.10.1/scipy/linalg/basic.py +++ /dev/null @@ -1,548 +0,0 @@ -# -# Author: Pearu Peterson, March 2002 -# -# w/ additions by Travis Oliphant, March 2002 - -__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded', - 'inv', 'det', 'lstsq', 'pinv', 'pinv2'] - -import numpy as np - -from flinalg import get_flinalg_funcs -from lapack import get_lapack_funcs -from misc import LinAlgError, _datacopied -from scipy.linalg import calc_lwork -import decomp_svd - - -# Linear equations -def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False, - debug=False): - """Solve the equation a x = b for x - - Parameters - ---------- - a : array, shape (M, M) - b : array, shape (M,) or (M, N) - sym_pos : boolean - Assume a is symmetric and positive definite - lower : boolean - Use only data contained in the lower triangle of a, if sym_pos is true. - Default is to use upper triangle. - overwrite_a : boolean - Allow overwriting data in a (may enhance performance) - overwrite_b : boolean - Allow overwriting data in b (may enhance performance) - - Returns - ------- - x : array, shape (M,) or (M, N) depending on b - Solution to the system a x = b - - Raises LinAlgError if a is singular - - """ - a1, b1 = map(np.asarray_chkfinite,(a,b)) - if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: - raise ValueError('expected square matrix') - if a1.shape[0] != b1.shape[0]: - raise ValueError('incompatible dimensions') - overwrite_a = overwrite_a or _datacopied(a1, a) - overwrite_b = overwrite_b or _datacopied(b1, b) - if debug: - print 'solve:overwrite_a=',overwrite_a - print 'solve:overwrite_b=',overwrite_b - if sym_pos: - posv, = get_lapack_funcs(('posv',), (a1,b1)) - c, x, info = posv(a1, b1, lower=lower, - overwrite_a=overwrite_a, - overwrite_b=overwrite_b) - else: - gesv, = get_lapack_funcs(('gesv',), (a1,b1)) - lu, piv, x, info = gesv(a1, b1, overwrite_a=overwrite_a, - overwrite_b=overwrite_b) - - if info == 0: - return x - if info > 0: - raise LinAlgError("singular matrix") - raise ValueError('illegal value in %d-th argument of internal gesv|posv' - % -info) - -def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, - overwrite_b=False, debug=False): - """Solve the equation `a x = b` for `x`, assuming a is a triangular matrix. - - Parameters - ---------- - a : array, shape (M, M) - b : array, shape (M,) or (M, N) - lower : boolean - Use only data contained in the lower triangle of a. - Default is to use upper triangle. - trans : {0, 1, 2, 'N', 'T', 'C'} - Type of system to solve: - - ======== ========= - trans system - ======== ========= - 0 or 'N' a x = b - 1 or 'T' a^T x = b - 2 or 'C' a^H x = b - ======== ========= - - unit_diagonal : boolean - If True, diagonal elements of A are assumed to be 1 and - will not be referenced. - - overwrite_b : boolean - Allow overwriting data in b (may enhance performance) - - Returns - ------- - x : array, shape (M,) or (M, N) depending on b - Solution to the system a x = b - - Raises - ------ - LinAlgError - If a is singular - - Notes - ----- - .. versionadded:: 0.9.0 - """ - - a1, b1 = map(np.asarray_chkfinite,(a,b)) - if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: - raise ValueError('expected square matrix') - if a1.shape[0] != b1.shape[0]: - raise ValueError('incompatible dimensions') - overwrite_b = overwrite_b or _datacopied(b1, b) - if debug: - print 'solve:overwrite_b=',overwrite_b - trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans) - trtrs, = get_lapack_funcs(('trtrs',), (a1,b1)) - x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower, - trans=trans, unitdiag=unit_diagonal) - - if info == 0: - return x - if info > 0: - raise LinAlgError("singular matrix: resolution failed at diagonal %s" % (info-1)) - raise ValueError('illegal value in %d-th argument of internal trtrs') - -def solve_banded((l, u), ab, b, overwrite_ab=False, overwrite_b=False, - debug=False): - """ - Solve the equation a x = b for x, assuming a is banded matrix. - - The matrix a is stored in ab using the matrix diagonal ordered form:: - - ab[u + i - j, j] == a[i,j] - - Example of ab (shape of a is (6,6), u=1, l=2):: - - * a01 a12 a23 a34 a45 - a00 a11 a22 a33 a44 a55 - a10 a21 a32 a43 a54 * - a20 a31 a42 a53 * * - - Parameters - ---------- - (l, u) : (integer, integer) - Number of non-zero lower and upper diagonals - ab : array, shape (l+u+1, M) - Banded matrix - b : array, shape (M,) or (M, K) - Right-hand side - overwrite_ab : boolean - Discard data in ab (may enhance performance) - overwrite_b : boolean - Discard data in b (may enhance performance) - - Returns - ------- - x : array, shape (M,) or (M, K) - The solution to the system a x = b - - """ - a1, b1 = map(np.asarray_chkfinite, (ab, b)) - - # Validate shapes. - if a1.shape[-1] != b1.shape[0]: - raise ValueError("shapes of ab and b are not compatible.") - if l + u + 1 != a1.shape[0]: - raise ValueError("invalid values for the number of lower and upper diagonals:" - " l+u+1 (%d) does not equal ab.shape[0] (%d)" % (l+u+1, ab.shape[0])) - - overwrite_b = overwrite_b or _datacopied(b1, b) - - gbsv, = get_lapack_funcs(('gbsv',), (a1, b1)) - a2 = np.zeros((2*l+u+1, a1.shape[1]), dtype=gbsv.dtype) - a2[l:,:] = a1 - lu, piv, x, info = gbsv(l, u, a2, b1, overwrite_ab=True, - overwrite_b=overwrite_b) - if info == 0: - return x - if info > 0: - raise LinAlgError("singular matrix") - raise ValueError('illegal value in %d-th argument of internal gbsv' % -info) - -def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False): - """Solve equation a x = b. a is Hermitian positive-definite banded matrix. - - The matrix a is stored in ab either in lower diagonal or upper - diagonal ordered form: - - ab[u + i - j, j] == a[i,j] (if upper form; i <= j) - ab[ i - j, j] == a[i,j] (if lower form; i >= j) - - Example of ab (shape of a is (6,6), u=2):: - - upper form: - * * a02 a13 a24 a35 - * a01 a12 a23 a34 a45 - a00 a11 a22 a33 a44 a55 - - lower form: - a00 a11 a22 a33 a44 a55 - a10 a21 a32 a43 a54 * - a20 a31 a42 a53 * * - - Cells marked with * are not used. - - Parameters - ---------- - ab : array, shape (u + 1, M) - Banded matrix - b : array, shape (M,) or (M, K) - Right-hand side - overwrite_ab : boolean - Discard data in ab (may enhance performance) - overwrite_b : boolean - Discard data in b (may enhance performance) - lower : boolean - Is the matrix in the lower form. (Default is upper form) - - Returns - ------- - x : array, shape (M,) or (M, K) - The solution to the system a x = b - - """ - - ab, b = map(np.asarray_chkfinite, (ab, b)) - - # Validate shapes. - if ab.shape[-1] != b.shape[0]: - raise ValueError("shapes of ab and b are not compatible.") - - pbsv, = get_lapack_funcs(('pbsv',), (ab, b)) - c, x, info = pbsv(ab, b, lower=lower, overwrite_ab=overwrite_ab, - overwrite_b=overwrite_b) - if info > 0: - raise LinAlgError("%d-th leading minor not positive definite" % info) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal pbsv' - % -info) - return x - - -# matrix inversion -def inv(a, overwrite_a=False): - """ - Compute the inverse of a matrix. - - Parameters - ---------- - a : array_like - Square matrix to be inverted. - overwrite_a : bool, optional - Discard data in `a` (may improve performance). Default is False. - - Returns - ------- - ainv : ndarray - Inverse of the matrix `a`. - - Raises - ------ - LinAlgError : - If `a` is singular. - ValueError : - If `a` is not square, or not 2-dimensional. - - Examples - -------- - >>> a = np.array([[1., 2.], [3., 4.]]) - >>> sp.linalg.inv(a) - array([[-2. , 1. ], - [ 1.5, -0.5]]) - >>> np.dot(a, sp.linalg.inv(a)) - array([[ 1., 0.], - [ 0., 1.]]) - - """ - a1 = np.asarray_chkfinite(a) - if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: - raise ValueError('expected square matrix') - overwrite_a = overwrite_a or _datacopied(a1, a) - #XXX: I found no advantage or disadvantage of using finv. -## finv, = get_flinalg_funcs(('inv',),(a1,)) -## if finv is not None: -## a_inv,info = finv(a1,overwrite_a=overwrite_a) -## if info==0: -## return a_inv -## if info>0: raise LinAlgError, "singular matrix" -## if info<0: raise ValueError,\ -## 'illegal value in %d-th argument of internal inv.getrf|getri'%(-info) - getrf, getri = get_lapack_funcs(('getrf','getri'), (a1,)) - #XXX: C ATLAS versions of getrf/i have rowmajor=1, this could be - # exploited for further optimization. But it will be probably - # a mess. So, a good testing site is required before trying - # to do that. - if getrf.module_name[:7] == 'clapack' != getri.module_name[:7]: - # ATLAS 3.2.1 has getrf but not getri. - lu, piv, info = getrf(np.transpose(a1), rowmajor=0, - overwrite_a=overwrite_a) - lu = np.transpose(lu) - else: - lu, piv, info = getrf(a1, overwrite_a=overwrite_a) - if info == 0: - if getri.module_name[:7] == 'flapack': - lwork = calc_lwork.getri(getri.prefix, a1.shape[0]) - lwork = lwork[1] - # XXX: the following line fixes curious SEGFAULT when - # benchmarking 500x500 matrix inverse. This seems to - # be a bug in LAPACK ?getri routine because if lwork is - # minimal (when using lwork[0] instead of lwork[1]) then - # all tests pass. Further investigation is required if - # more such SEGFAULTs occur. - lwork = int(1.01 * lwork) - inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1) - else: # clapack - inv_a, info = getri(lu, piv, overwrite_lu=1) - if info > 0: - raise LinAlgError("singular matrix") - if info < 0: - raise ValueError('illegal value in %d-th argument of internal ' - 'getrf|getri' % -info) - return inv_a - - -### Determinant - -def det(a, overwrite_a=False): - """Compute the determinant of a matrix - - Parameters - ---------- - a : array, shape (M, M) - - Returns - ------- - det : float or complex - Determinant of a - - Notes - ----- - The determinant is computed via LU factorization, LAPACK routine z/dgetrf. - """ - a1 = np.asarray_chkfinite(a) - if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: - raise ValueError('expected square matrix') - overwrite_a = overwrite_a or _datacopied(a1, a) - fdet, = get_flinalg_funcs(('det',), (a1,)) - a_det, info = fdet(a1, overwrite_a=overwrite_a) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal ' - 'det.getrf' % -info) - return a_det - -### Linear Least Squares - -def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False): - """ - Compute least-squares solution to equation Ax = b. - - Compute a vector x such that the 2-norm ``|b - A x|`` is minimized. - - Parameters - ---------- - a : array, shape (M, N) - Left hand side matrix (2-D array). - b : array, shape (M,) or (M, K) - Right hand side matrix or vector (1-D or 2-D array). - cond : float, optional - Cutoff for 'small' singular values; used to determine effective - rank of a. Singular values smaller than - ``rcond * largest_singular_value`` are considered zero. - overwrite_a : bool, optional - Discard data in `a` (may enhance performance). Default is False. - overwrite_b : bool, optional - Discard data in `b` (may enhance performance). Default is False. - - Returns - ------- - x : array, shape (N,) or (N, K) depending on shape of b - Least-squares solution. - residues : ndarray, shape () or (1,) or (K,) - Sums of residues, squared 2-norm for each column in ``b - a x``. - If rank of matrix a is < N or > M this is an empty array. - If b was 1-D, this is an (1,) shape array, otherwise the shape is (K,). - rank : int - Effective rank of matrix `a`. - s : array, shape (min(M,N),) - Singular values of `a`. The condition number of a is - ``abs(s[0]/s[-1])``. - - Raises - ------ - LinAlgError : - If computation does not converge. - - - See Also - -------- - optimize.nnls : linear least squares with non-negativity constraint - - """ - a1, b1 = map(np.asarray_chkfinite, (a, b)) - if len(a1.shape) != 2: - raise ValueError('expected matrix') - m, n = a1.shape - if len(b1.shape) == 2: - nrhs = b1.shape[1] - else: - nrhs = 1 - if m != b1.shape[0]: - raise ValueError('incompatible dimensions') - gelss, = get_lapack_funcs(('gelss',), (a1, b1)) - if n > m: - # need to extend b matrix as it will be filled with - # a larger solution matrix - b2 = np.zeros((n, nrhs), dtype=gelss.dtype) - if len(b1.shape) == 2: - b2[:m,:] = b1 - else: - b2[:m,0] = b1 - b1 = b2 - overwrite_a = overwrite_a or _datacopied(a1, a) - overwrite_b = overwrite_b or _datacopied(b1, b) - if gelss.module_name[:7] == 'flapack': - # get optimal work array - work = gelss(a1, b1, lwork=-1)[4] - lwork = work[0].real.astype(np.int) - v, x, s, rank, work, info = gelss( - a1, b1, cond=cond, lwork=lwork, overwrite_a=overwrite_a, - overwrite_b=overwrite_b) - - else: - raise NotImplementedError('calling gelss from %s' % gelss.module_name) - if info > 0: - raise LinAlgError("SVD did not converge in Linear Least Squares") - if info < 0: - raise ValueError('illegal value in %d-th argument of internal gelss' - % -info) - resids = np.asarray([], dtype=x.dtype) - if n < m: - x1 = x[:n] - if rank == n: - resids = np.sum(np.abs(x[n:])**2, axis=0) - x = x1 - return x, resids, rank, s - - -def pinv(a, cond=None, rcond=None): - """Compute the (Moore-Penrose) pseudo-inverse of a matrix. - - Calculate a generalized inverse of a matrix using a least-squares - solver. - - Parameters - ---------- - a : array, shape (M, N) - Matrix to be pseudo-inverted - cond, rcond : float - Cutoff for 'small' singular values in the least-squares solver. - Singular values smaller than rcond*largest_singular_value are - considered zero. - - Returns - ------- - B : array, shape (N, M) - - Raises LinAlgError if computation does not converge - - Examples - -------- - >>> from numpy import * - >>> a = random.randn(9, 6) - >>> B = linalg.pinv(a) - >>> allclose(a, dot(a, dot(B, a))) - True - >>> allclose(B, dot(B, dot(a, B))) - True - - """ - a = np.asarray_chkfinite(a) - b = np.identity(a.shape[0], dtype=a.dtype) - if rcond is not None: - cond = rcond - return lstsq(a, b, cond=cond)[0] - - -def pinv2(a, cond=None, rcond=None): - """Compute the (Moore-Penrose) pseudo-inverse of a matrix. - - Calculate a generalized inverse of a matrix using its - singular-value decomposition and including all 'large' singular - values. - - Parameters - ---------- - a : array, shape (M, N) - Matrix to be pseudo-inverted - cond, rcond : float or None - Cutoff for 'small' singular values. - Singular values smaller than rcond*largest_singular_value are - considered zero. - - If None or -1, suitable machine precision is used. - - Returns - ------- - B : array, shape (N, M) - - Raises LinAlgError if SVD computation does not converge - - Examples - -------- - >>> from numpy import * - >>> a = random.randn(9, 6) - >>> B = linalg.pinv2(a) - >>> allclose(a, dot(a, dot(B, a))) - True - >>> allclose(B, dot(B, dot(a, B))) - True - - """ - a = np.asarray_chkfinite(a) - u, s, vh = decomp_svd.svd(a) - t = u.dtype.char - if rcond is not None: - cond = rcond - if cond in [None,-1]: - eps = np.finfo(np.float).eps - feps = np.finfo(np.single).eps - _array_precision = {'f': 0, 'd': 1, 'F': 0, 'D': 1} - cond = {0: feps*1e3, 1: eps*1e6}[_array_precision[t]] - m, n = a.shape - cutoff = cond*np.maximum.reduce(s) - psigma = np.zeros((m, n), t) - for i in range(len(s)): - if s[i] > cutoff: - psigma[i,i] = 1.0/np.conjugate(s[i]) - #XXX: use lapack/blas routines for dot - return np.transpose(np.conjugate(np.dot(np.dot(u,psigma),vh))) diff --git a/scipy-0.10.1/scipy/linalg/benchmarks/bench_basic.py b/scipy-0.10.1/scipy/linalg/benchmarks/bench_basic.py deleted file mode 100644 index 176148e293..0000000000 --- a/scipy-0.10.1/scipy/linalg/benchmarks/bench_basic.py +++ /dev/null @@ -1,122 +0,0 @@ -import sys -from numpy.testing import * -import numpy.linalg as linalg - -def random(size): - return rand(*size) - -class TestSolve(TestCase): - - def bench_random(self): - basic_solve = linalg.solve - print - print ' Solving system of linear equations' - print ' ==================================' - - print ' | contiguous | non-contiguous ' - print '----------------------------------------------' - print ' size | scipy | basic | scipy | basic ' - - for size,repeat in [(20,1000),(100,150),(500,2),(1000,1)][:-1]: - repeat *= 2 - print '%5s' % size, - sys.stdout.flush() - - a = random([size,size]) - # larger diagonal ensures non-singularity: - for i in range(size): a[i,i] = 10*(.1+a[i,i]) - b = random([size]) - - print '| %6.2f ' % measure('solve(a,b)',repeat), - sys.stdout.flush() - - print '| %6.2f ' % measure('basic_solve(a,b)',repeat), - sys.stdout.flush() - - a = a[-1::-1,-1::-1] # turn into a non-contiguous array - assert_(not a.flags['CONTIGUOUS']) - - print '| %6.2f ' % measure('solve(a,b)',repeat), - sys.stdout.flush() - - print '| %6.2f ' % measure('basic_solve(a,b)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) - -class TestInv(TestCase): - - def bench_random(self): - basic_inv = linalg.inv - print - print ' Finding matrix inverse' - print ' ==================================' - print ' | contiguous | non-contiguous ' - print '----------------------------------------------' - print ' size | scipy | basic | scipy | basic' - - for size,repeat in [(20,1000),(100,150),(500,2),(1000,1)][:-1]: - repeat *= 2 - print '%5s' % size, - sys.stdout.flush() - - a = random([size,size]) - # large diagonal ensures non-singularity: - for i in range(size): a[i,i] = 10*(.1+a[i,i]) - - print '| %6.2f ' % measure('inv(a)',repeat), - sys.stdout.flush() - - print '| %6.2f ' % measure('basic_inv(a)',repeat), - sys.stdout.flush() - - a = a[-1::-1,-1::-1] # turn into a non-contiguous array - assert_(not a.flags['CONTIGUOUS']) - - print '| %6.2f ' % measure('inv(a)',repeat), - sys.stdout.flush() - - print '| %6.2f ' % measure('basic_inv(a)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) - - -class TestDet(TestCase): - - def bench_random(self): - basic_det = linalg.det - print - print ' Finding matrix determinant' - print ' ==================================' - print ' | contiguous | non-contiguous ' - print '----------------------------------------------' - print ' size | scipy | basic | scipy | basic ' - - for size,repeat in [(20,1000),(100,150),(500,2),(1000,1)][:-1]: - repeat *= 2 - print '%5s' % size, - sys.stdout.flush() - - a = random([size,size]) - - print '| %6.2f ' % measure('det(a)',repeat), - sys.stdout.flush() - - print '| %6.2f ' % measure('basic_det(a)',repeat), - sys.stdout.flush() - - a = a[-1::-1,-1::-1] # turn into a non-contiguous array - assert_(not a.flags['CONTIGUOUS']) - - print '| %6.2f ' % measure('det(a)',repeat), - sys.stdout.flush() - - print '| %6.2f ' % measure('basic_det(a)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/linalg/benchmarks/bench_decom.py b/scipy-0.10.1/scipy/linalg/benchmarks/bench_decom.py deleted file mode 100644 index bf570f0fc1..0000000000 --- a/scipy-0.10.1/scipy/linalg/benchmarks/bench_decom.py +++ /dev/null @@ -1,31 +0,0 @@ -""" Benchmark functions for linalg.decomp module - -""" -import sys - -from numpy import linalg -from numpy.testing import * - -def random(size): - return rand(*size) - -def bench_random(): - Numeric_eigvals = linalg.eigvals - print - print ' Finding matrix eigenvalues' - print ' ==================================' - print ' | contiguous '#'| non-contiguous ' - print '----------------------------------------------' - print ' size | scipy '#'| core | scipy | core ' - - for size,repeat in [(20,150),(100,7),(200,2)]: - repeat *= 1 - print '%5s' % size, - sys.stdout.flush() - - a = random([size,size]) - - print '| %6.2f ' % measure('eigvals(a)',repeat), - sys.stdout.flush() - - print ' (secs for %s calls)' % (repeat) diff --git a/scipy-0.10.1/scipy/linalg/bento.info b/scipy-0.10.1/scipy/linalg/bento.info deleted file mode 100644 index 8394756394..0000000000 --- a/scipy-0.10.1/scipy/linalg/bento.info +++ /dev/null @@ -1,24 +0,0 @@ -HookFile: bscript - -Library: - Extension: fblas - Sources: - fblas.pyf.src, - src/fblaswrap.f - Extension: cblas - Sources: - generic_cblas.pyf - Extension: flapack - Sources: - generic_flapack.pyf - Extension: clapack - Sources: - generic_clapack.pyf - Extension: _flinalg - Sources: - src/det.f, src/lu.f - Extension: calc_lwork - Sources: - src/calc_lwork.f - Extension: atlas_version - Sources: atlas_version.c diff --git a/scipy-0.10.1/scipy/linalg/blas.py b/scipy-0.10.1/scipy/linalg/blas.py deleted file mode 100644 index 99b3a3bc93..0000000000 --- a/scipy-0.10.1/scipy/linalg/blas.py +++ /dev/null @@ -1,105 +0,0 @@ -# -# Author: Pearu Peterson, March 2002 -# refactoring by Fabian Pedregosa, March 2010 -# - -__all__ = ['get_blas_funcs'] - -import numpy as np -# The following ensures that possibly missing flavor (C or Fortran) is -# replaced with the available one. If none is available, exception -# is raised at the first attempt to use the resources. - -from scipy.linalg import cblas, fblas -if hasattr(cblas,'empty_module'): - cblas = fblas -elif hasattr(fblas,'empty_module'): - fblas = cblas - - # 'd' will be default for 'i',.. -_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z', 'G':'z'} - -# some convenience alias for complex functions -_blas_alias = {'cnrm2' : 'scnrm2', 'znrm2' : 'dznrm2', - 'cdot' : 'cdotc', 'zdot' : 'zdotc', - 'cger' : 'cgerc', 'zger' : 'zgerc', - 'sdotc': 'sdot', 'sdotu': 'sdot', - 'ddotc': 'ddot', 'ddotu': 'ddot'} - - -def get_blas_funcs(names, arrays=(), dtype=None): - """Return available BLAS function objects from names. - - Arrays are used to determine the optimal prefix of BLAS routines. - - Parameters - ---------- - names : str or sequence of str - Name(s) of BLAS functions withouth type prefix. - - arrays : sequency of ndarrays, optional - Arrays can be given to determine optiomal prefix of BLAS - routines. If not given, double-precision routines will be - used, otherwise the most generic type in arrays will be used. - - dtype : str or dtype, optional - Data-type specifier. Not used if `arrays` is non-empty. - - - Returns - ------- - funcs : list - List containing the found function(s). - - - Notes - ----- - This routines automatically chooses between Fortran/C - interfaces. Fortran code is used whenever possible for arrays with - column major order. In all other cases, C code is preferred. - - In BLAS, the naming convention is that all functions start with a - type prefix, which depends on the type of the principal - matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy - types {float32, float64, complex64, complex128} respectevely, and - are stored in attribute `typecode` of the returned functions. - """ - - blas_funcs = [] - unpack = False - dtype = np.dtype(dtype) - module1 = (cblas, 'cblas') - module2 = (fblas, 'fblas') - - if isinstance(names, str): - names = (names,) - unpack = True - - if arrays: - # use the most generic type in arrays - dtype, index = max( - [(ar.dtype, i) for i, ar in enumerate(arrays)]) - if arrays[index].flags['FORTRAN']: - # prefer Fortran for leading array with column major order - module1, module2 = module2, module1 - - prefix = _type_conv.get(dtype.char, 'd') - - for i, name in enumerate(names): - func_name = prefix + name - func_name = _blas_alias.get(func_name, func_name) - func = getattr(module1[0], func_name, None) - module_name = module1[1] - if func is None: - func = getattr(module2[0], func_name, None) - module_name = module2[1] - if func is None: - raise ValueError( - 'BLAS function %s could not be found' % func_name) - func.module_name, func.typecode = module_name, prefix - blas_funcs.append(func) - - if unpack: - return blas_funcs[0] - else: - return blas_funcs diff --git a/scipy-0.10.1/scipy/linalg/bscript b/scipy-0.10.1/scipy/linalg/bscript deleted file mode 100644 index 40d952a61c..0000000000 --- a/scipy-0.10.1/scipy/linalg/bscript +++ /dev/null @@ -1,66 +0,0 @@ -import sys - -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - bld = context.waf_context - f2py = bld.tools["f2py"] - - def builder(extension): - source = extension.sources[:] - if sys.platform == "darwin": - source.pop(source.index("src/fblaswrap.f")) - source.append("src/fblaswrap_veclib_c.c") - return default_builder(extension, - features="c fc pyext bento cshlib f2py", - source=source, - use="FBLAS") - context.register_builder("fblas", builder) - - def builder(extension): - t = default_builder(extension, - features="c pyext bento cshlib f2py", - use="CBLAS") - t.mappings[".pyf"] = f2py.interface_gen_callback - return t - context.register_builder("cblas", builder) - - def builder(extension): - t = default_builder(extension, - features="c pyext bento cshlib f2py", - use="FLAPACK") - t.mappings[".pyf"] = f2py.interface_gen_callback - return t - context.register_builder("flapack", builder) - - def builder(extension): - kw = {} - if bld.env.HAS_CLAPACK: - kw["use"] = "CLAPACK" - kw["features"] = "c pyext bento cshlib f2py" - interface_gen_callback = f2py.interface_gen_callback - else: - kw["features"] = "c fc pyext bento cshlib f2py" - interface_gen_callback = f2py.fake_interface_gen_callback - t = default_builder(extension, **kw) - t.mappings[".pyf"] = interface_gen_callback - return t - context.register_builder("clapack", builder) - - context.register_builder("_flinalg", - lambda e: default_builder(e, - features="c pyext bento cshlib f2py f2py_fortran", - use="FLAPACK")) - context.register_builder("calc_lwork", - lambda e: default_builder(e, - features="c pyext bento cshlib f2py f2py_fortran", - use="FLAPACK CLIB")) - - defines = [] - if bld.env.HAS_ATLAS: - raise NotImplementedError("Atlas not implemented yet") - else: - defines.append("NO_ATLAS_INFO=1") - context.register_builder("atlas_version", lambda e: default_builder(e, defines=defines)) diff --git a/scipy-0.10.1/scipy/linalg/decomp.py b/scipy-0.10.1/scipy/linalg/decomp.py deleted file mode 100644 index c6c2c12ebe..0000000000 --- a/scipy-0.10.1/scipy/linalg/decomp.py +++ /dev/null @@ -1,777 +0,0 @@ -# -# Author: Pearu Peterson, March 2002 -# -# additions by Travis Oliphant, March 2002 -# additions by Eric Jones, June 2002 -# additions by Johannes Loehnert, June 2006 -# additions by Bart Vandereycken, June 2006 -# additions by Andrew D Straw, May 2007 -# additions by Tiziano Zito, November 2008 -# -# April 2010: Functions for LU, QR, SVD, Schur and Cholesky decompositions were -# moved to their own files. Still in this file are functions for eigenstuff -# and for the Hessenberg form. - -__all__ = ['eig','eigh','eig_banded','eigvals','eigvalsh', 'eigvals_banded', - 'hessenberg'] - -import numpy -from numpy import array, asarray_chkfinite, asarray, diag, zeros, ones, \ - isfinite, inexact, nonzero, iscomplexobj, cast, flatnonzero, conj - -# Local imports -from scipy.linalg import calc_lwork -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs -from blas import get_blas_funcs - - -_I = cast['F'](1j) - -def _make_complex_eigvecs(w, vin, dtype): - """ - Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output - """ - # - see LAPACK man page DGGEV at ALPHAI - v = numpy.array(vin, dtype=dtype) - m = (w.imag > 0) - m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709 - for i in flatnonzero(m): - v.imag[:,i] = vin[:,i+1] - conj(v[:,i], v[:,i+1]) - return v - -def _geneig(a1, b, left, right, overwrite_a, overwrite_b): - b1 = asarray(b) - overwrite_b = overwrite_b or _datacopied(b1, b) - if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: - raise ValueError('expected square matrix') - ggev, = get_lapack_funcs(('ggev',), (a1, b1)) - cvl, cvr = left, right - if ggev.module_name[:7] == 'clapack': - raise NotImplementedError('calling ggev from %s' % ggev.module_name) - res = ggev(a1, b1, lwork=-1) - lwork = res[-2][0].real.astype(numpy.int) - if ggev.prefix in 'cz': - alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork, - overwrite_a, overwrite_b) - w = alpha / beta - else: - alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork, - overwrite_a,overwrite_b) - w = (alphar + _I * alphai) / beta - if info < 0: - raise ValueError('illegal value in %d-th argument of internal ggev' - % -info) - if info > 0: - raise LinAlgError("generalized eig algorithm did not converge (info=%d)" - % info) - - only_real = numpy.logical_and.reduce(numpy.equal(w.imag, 0.0)) - if not (ggev.prefix in 'cz' or only_real): - t = w.dtype.char - if left: - vl = _make_complex_eigvecs(w, vl, t) - if right: - vr = _make_complex_eigvecs(w, vr, t) - if not (left or right): - return w - if left: - if right: - return w, vl, vr - return w, vl - return w, vr - -def eig(a, b=None, left=False, right=True, overwrite_a=False, overwrite_b=False): - """Solve an ordinary or generalized eigenvalue problem of a square matrix. - - Find eigenvalues w and right or left eigenvectors of a general matrix:: - - a vr[:,i] = w[i] b vr[:,i] - a.H vl[:,i] = w[i].conj() b.H vl[:,i] - - where .H is the Hermitean conjugation. - - Parameters - ---------- - a : array, shape (M, M) - A complex or real matrix whose eigenvalues and eigenvectors - will be computed. - b : array, shape (M, M) - Right-hand side matrix in a generalized eigenvalue problem. - If omitted, identity matrix is assumed. - left : boolean - Whether to calculate and return left eigenvectors - right : boolean - Whether to calculate and return right eigenvectors - - overwrite_a : boolean - Whether to overwrite data in a (may improve performance) - overwrite_b : boolean - Whether to overwrite data in b (may improve performance) - - Returns - ------- - w : double or complex array, shape (M,) - The eigenvalues, each repeated according to its multiplicity. - - (if left == True) - vl : double or complex array, shape (M, M) - The normalized left eigenvector corresponding to the eigenvalue w[i] - is the column v[:,i]. - - (if right == True) - vr : double or complex array, shape (M, M) - The normalized right eigenvector corresponding to the eigenvalue w[i] - is the column vr[:,i]. - - Raises LinAlgError if eigenvalue computation does not converge - - See Also - -------- - eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays - - """ - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: - raise ValueError('expected square matrix') - overwrite_a = overwrite_a or (_datacopied(a1, a)) - if b is not None: - b = asarray_chkfinite(b) - if b.shape != a1.shape: - raise ValueError('a and b must have the same shape') - return _geneig(a1, b, left, right, overwrite_a, overwrite_b) - geev, = get_lapack_funcs(('geev',), (a1,)) - compute_vl, compute_vr = left, right - if geev.module_name[:7] == 'flapack': - lwork = calc_lwork.geev(geev.prefix, a1.shape[0], - compute_vl, compute_vr)[1] - if geev.prefix in 'cz': - w, vl, vr, info = geev(a1, lwork=lwork, - compute_vl=compute_vl, - compute_vr=compute_vr, - overwrite_a=overwrite_a) - else: - wr, wi, vl, vr, info = geev(a1, lwork=lwork, - compute_vl=compute_vl, - compute_vr=compute_vr, - overwrite_a=overwrite_a) - t = {'f':'F','d':'D'}[wr.dtype.char] - w = wr + _I * wi - else: # 'clapack' - if geev.prefix in 'cz': - w, vl, vr, info = geev(a1, - compute_vl=compute_vl, - compute_vr=compute_vr, - overwrite_a=overwrite_a) - else: - wr, wi, vl, vr, info = geev(a1, - compute_vl=compute_vl, - compute_vr=compute_vr, - overwrite_a=overwrite_a) - t = {'f':'F','d':'D'}[wr.dtype.char] - w = wr + _I * wi - if info < 0: - raise ValueError('illegal value in %d-th argument of internal geev' - % -info) - if info > 0: - raise LinAlgError("eig algorithm did not converge (only eigenvalues " - "with order >= %d have converged)" % info) - - only_real = numpy.logical_and.reduce(numpy.equal(w.imag, 0.0)) - if not (geev.prefix in 'cz' or only_real): - t = w.dtype.char - if left: - vl = _make_complex_eigvecs(w, vl, t) - if right: - vr = _make_complex_eigvecs(w, vr, t) - if not (left or right): - return w - if left: - if right: - return w, vl, vr - return w, vl - return w, vr - -def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False, - overwrite_b=False, turbo=True, eigvals=None, type=1): - """Solve an ordinary or generalized eigenvalue problem for a complex - Hermitian or real symmetric matrix. - - Find eigenvalues w and optionally eigenvectors v of matrix a, where - b is positive definite:: - - a v[:,i] = w[i] b v[:,i] - v[i,:].conj() a v[:,i] = w[i] - v[i,:].conj() b v[:,i] = 1 - - - Parameters - ---------- - a : array, shape (M, M) - A complex Hermitian or real symmetric matrix whose eigenvalues and - eigenvectors will be computed. - b : array, shape (M, M) - A complex Hermitian or real symmetric definite positive matrix in. - If omitted, identity matrix is assumed. - lower : boolean - Whether the pertinent array data is taken from the lower or upper - triangle of a. (Default: lower) - eigvals_only : boolean - Whether to calculate only eigenvalues and no eigenvectors. - (Default: both are calculated) - turbo : boolean - Use divide and conquer algorithm (faster but expensive in memory, - only for generalized eigenvalue problem and if eigvals=None) - eigvals : tuple (lo, hi) - Indexes of the smallest and largest (in ascending order) eigenvalues - and corresponding eigenvectors to be returned: 0 <= lo < hi <= M-1. - If omitted, all eigenvalues and eigenvectors are returned. - type: integer - Specifies the problem type to be solved: - type = 1: a v[:,i] = w[i] b v[:,i] - type = 2: a b v[:,i] = w[i] v[:,i] - type = 3: b a v[:,i] = w[i] v[:,i] - overwrite_a : boolean - Whether to overwrite data in a (may improve performance) - overwrite_b : boolean - Whether to overwrite data in b (may improve performance) - - Returns - ------- - w : real array, shape (N,) - The N (1<=N<=M) selected eigenvalues, in ascending order, each - repeated according to its multiplicity. - - (if eigvals_only == False) - v : complex array, shape (M, N) - The normalized selected eigenvector corresponding to the - eigenvalue w[i] is the column v[:,i]. Normalization: - type 1 and 3: v.conj() a v = w - type 2: inv(v).conj() a inv(v) = w - type = 1 or 2: v.conj() b v = I - type = 3 : v.conj() inv(b) v = I - - Raises LinAlgError if eigenvalue computation does not converge, - an error occurred, or b matrix is not definite positive. Note that - if input matrices are not symmetric or hermitian, no error is reported - but results will be wrong. - - See Also - -------- - eig : eigenvalues and right eigenvectors for non-symmetric arrays - - """ - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: - raise ValueError('expected square matrix') - overwrite_a = overwrite_a or (_datacopied(a1, a)) - if iscomplexobj(a1): - cplx = True - else: - cplx = False - if b is not None: - b1 = asarray_chkfinite(b) - overwrite_b = overwrite_b or _datacopied(b1, b) - if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: - raise ValueError('expected square matrix') - - if b1.shape != a1.shape: - raise ValueError("wrong b dimensions %s, should " - "be %s" % (str(b1.shape), str(a1.shape))) - if iscomplexobj(b1): - cplx = True - else: - cplx = cplx or False - else: - b1 = None - - # Set job for fortran routines - _job = (eigvals_only and 'N') or 'V' - - # port eigenvalue range from python to fortran convention - if eigvals is not None: - lo, hi = eigvals - if lo < 0 or hi >= a1.shape[0]: - raise ValueError('The eigenvalue range specified is not valid.\n' - 'Valid range is [%s,%s]' % (0, a1.shape[0]-1)) - lo += 1 - hi += 1 - eigvals = (lo, hi) - - # set lower - if lower: - uplo = 'L' - else: - uplo = 'U' - - # fix prefix for lapack routines - if cplx: - pfx = 'he' - else: - pfx = 'sy' - - # Standard Eigenvalue Problem - # Use '*evr' routines - # FIXME: implement calculation of optimal lwork - # for all lapack routines - if b1 is None: - (evr,) = get_lapack_funcs((pfx+'evr',), (a1,)) - if eigvals is None: - w, v, info = evr(a1, uplo=uplo, jobz=_job, range="A", il=1, - iu=a1.shape[0], overwrite_a=overwrite_a) - else: - (lo, hi)= eigvals - w_tot, v, info = evr(a1, uplo=uplo, jobz=_job, range="I", - il=lo, iu=hi, overwrite_a=overwrite_a) - w = w_tot[0:hi-lo+1] - - # Generalized Eigenvalue Problem - else: - # Use '*gvx' routines if range is specified - if eigvals is not None: - (gvx,) = get_lapack_funcs((pfx+'gvx',), (a1,b1)) - (lo, hi) = eigvals - w_tot, v, ifail, info = gvx(a1, b1, uplo=uplo, iu=hi, - itype=type,jobz=_job, il=lo, - overwrite_a=overwrite_a, - overwrite_b=overwrite_b) - w = w_tot[0:hi-lo+1] - # Use '*gvd' routine if turbo is on and no eigvals are specified - elif turbo: - (gvd,) = get_lapack_funcs((pfx+'gvd',), (a1,b1)) - v, w, info = gvd(a1, b1, uplo=uplo, itype=type, jobz=_job, - overwrite_a=overwrite_a, - overwrite_b=overwrite_b) - # Use '*gv' routine if turbo is off and no eigvals are specified - else: - (gv,) = get_lapack_funcs((pfx+'gv',), (a1,b1)) - v, w, info = gv(a1, b1, uplo=uplo, itype= type, jobz=_job, - overwrite_a=overwrite_a, - overwrite_b=overwrite_b) - - # Check if we had a successful exit - if info == 0: - if eigvals_only: - return w - else: - return w, v - - elif info < 0: - raise LinAlgError("illegal value in %i-th argument of internal" - " fortran routine." % (-info)) - elif info > 0 and b1 is None: - raise LinAlgError("unrecoverable internal error.") - - # The algorithm failed to converge. - elif info > 0 and info <= b1.shape[0]: - if eigvals is not None: - raise LinAlgError("the eigenvectors %s failed to" - " converge." % nonzero(ifail)-1) - else: - raise LinAlgError("internal fortran routine failed to converge: " - "%i off-diagonal elements of an " - "intermediate tridiagonal form did not converge" - " to zero." % info) - - # This occurs when b is not positive definite - else: - raise LinAlgError("the leading minor of order %i" - " of 'b' is not positive definite. The" - " factorization of 'b' could not be completed" - " and no eigenvalues or eigenvectors were" - " computed." % (info-b1.shape[0])) - -def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False, - select='a', select_range=None, max_ev = 0): - """Solve real symmetric or complex hermitian band matrix eigenvalue problem. - - Find eigenvalues w and optionally right eigenvectors v of a:: - - a v[:,i] = w[i] v[:,i] - v.H v = identity - - The matrix a is stored in a_band either in lower diagonal or upper - diagonal ordered form: - - a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) - a_band[ i - j, j] == a[i,j] (if lower form; i >= j) - - where u is the number of bands above the diagonal. - - Example of a_band (shape of a is (6,6), u=2):: - - upper form: - * * a02 a13 a24 a35 - * a01 a12 a23 a34 a45 - a00 a11 a22 a33 a44 a55 - - lower form: - a00 a11 a22 a33 a44 a55 - a10 a21 a32 a43 a54 * - a20 a31 a42 a53 * * - - Cells marked with * are not used. - - Parameters - ---------- - a_band : array, shape (u+1, M) - The bands of the M by M matrix a. - lower : boolean - Is the matrix in the lower form. (Default is upper form) - eigvals_only : boolean - Compute only the eigenvalues and no eigenvectors. - (Default: calculate also eigenvectors) - overwrite_a_band: - Discard data in a_band (may enhance performance) - select: {'a', 'v', 'i'} - Which eigenvalues to calculate - - ====== ======================================== - select calculated - ====== ======================================== - 'a' All eigenvalues - 'v' Eigenvalues in the interval (min, max] - 'i' Eigenvalues with indices min <= i <= max - ====== ======================================== - select_range : (min, max) - Range of selected eigenvalues - max_ev : integer - For select=='v', maximum number of eigenvalues expected. - For other values of select, has no meaning. - - In doubt, leave this parameter untouched. - - Returns - ------- - w : array, shape (M,) - The eigenvalues, in ascending order, each repeated according to its - multiplicity. - - v : double or complex double array, shape (M, M) - The normalized eigenvector corresponding to the eigenvalue w[i] is - the column v[:,i]. - - Raises LinAlgError if eigenvalue computation does not converge - - """ - if eigvals_only or overwrite_a_band: - a1 = asarray_chkfinite(a_band) - overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band)) - else: - a1 = array(a_band) - if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all(): - raise ValueError("array must not contain infs or NaNs") - overwrite_a_band = 1 - - if len(a1.shape) != 2: - raise ValueError('expected two-dimensional array') - if select.lower() not in [0, 1, 2, 'a', 'v', 'i', 'all', 'value', 'index']: - raise ValueError('invalid argument for select') - if select.lower() in [0, 'a', 'all']: - if a1.dtype.char in 'GFD': - bevd, = get_lapack_funcs(('hbevd',), (a1,)) - # FIXME: implement this somewhen, for now go with builtin values - # FIXME: calc optimal lwork by calling ?hbevd(lwork=-1) - # or by using calc_lwork.f ??? - # lwork = calc_lwork.hbevd(bevd.prefix, a1.shape[0], lower) - internal_name = 'hbevd' - else: # a1.dtype.char in 'fd': - bevd, = get_lapack_funcs(('sbevd',), (a1,)) - # FIXME: implement this somewhen, for now go with builtin values - # see above - # lwork = calc_lwork.sbevd(bevd.prefix, a1.shape[0], lower) - internal_name = 'sbevd' - w,v,info = bevd(a1, compute_v=not eigvals_only, - lower=lower, - overwrite_ab=overwrite_a_band) - if select.lower() in [1, 2, 'i', 'v', 'index', 'value']: - # calculate certain range only - if select.lower() in [2, 'i', 'index']: - select = 2 - vl, vu, il, iu = 0.0, 0.0, min(select_range), max(select_range) - if min(il, iu) < 0 or max(il, iu) >= a1.shape[1]: - raise ValueError('select_range out of bounds') - max_ev = iu - il + 1 - else: # 1, 'v', 'value' - select = 1 - vl, vu, il, iu = min(select_range), max(select_range), 0, 0 - if max_ev == 0: - max_ev = a_band.shape[1] - if eigvals_only: - max_ev = 1 - # calculate optimal abstol for dsbevx (see manpage) - if a1.dtype.char in 'fF': # single precision - lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),)) - else: - lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),)) - abstol = 2 * lamch('s') - if a1.dtype.char in 'GFD': - bevx, = get_lapack_funcs(('hbevx',), (a1,)) - internal_name = 'hbevx' - else: # a1.dtype.char in 'gfd' - bevx, = get_lapack_funcs(('sbevx',), (a1,)) - internal_name = 'sbevx' - # il+1, iu+1: translate python indexing (0 ... N-1) into Fortran - # indexing (1 ... N) - w, v, m, ifail, info = bevx(a1, vl, vu, il+1, iu+1, - compute_v=not eigvals_only, - mmax=max_ev, - range=select, lower=lower, - overwrite_ab=overwrite_a_band, - abstol=abstol) - # crop off w and v - w = w[:m] - if not eigvals_only: - v = v[:, :m] - if info < 0: - raise ValueError('illegal value in %d-th argument of internal %s' - % (-info, internal_name)) - if info > 0: - raise LinAlgError("eig algorithm did not converge") - - if eigvals_only: - return w - return w, v - -def eigvals(a, b=None, overwrite_a=False): - """Compute eigenvalues from an ordinary or generalized eigenvalue problem. - - Find eigenvalues of a general matrix:: - - a vr[:,i] = w[i] b vr[:,i] - - Parameters - ---------- - a : array, shape (M, M) - A complex or real matrix whose eigenvalues and eigenvectors - will be computed. - b : array, shape (M, M) - Right-hand side matrix in a generalized eigenvalue problem. - If omitted, identity matrix is assumed. - overwrite_a : boolean - Whether to overwrite data in a (may improve performance) - - Returns - ------- - w : double or complex array, shape (M,) - The eigenvalues, each repeated according to its multiplicity, - but not in any specific order. - - Raises LinAlgError if eigenvalue computation does not converge - - See Also - -------- - eigvalsh : eigenvalues of symmetric or Hemitiean arrays - eig : eigenvalues and right eigenvectors of general arrays - eigh : eigenvalues and eigenvectors of symmetric/Hermitean arrays. - - """ - return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a) - -def eigvalsh(a, b=None, lower=True, overwrite_a=False, - overwrite_b=False, turbo=True, eigvals=None, type=1): - """Solve an ordinary or generalized eigenvalue problem for a complex - Hermitian or real symmetric matrix. - - Find eigenvalues w of matrix a, where b is positive definite:: - - a v[:,i] = w[i] b v[:,i] - v[i,:].conj() a v[:,i] = w[i] - v[i,:].conj() b v[:,i] = 1 - - - Parameters - ---------- - a : array, shape (M, M) - A complex Hermitian or real symmetric matrix whose eigenvalues and - eigenvectors will be computed. - b : array, shape (M, M) - A complex Hermitian or real symmetric definite positive matrix in. - If omitted, identity matrix is assumed. - lower : boolean - Whether the pertinent array data is taken from the lower or upper - triangle of a. (Default: lower) - turbo : boolean - Use divide and conquer algorithm (faster but expensive in memory, - only for generalized eigenvalue problem and if eigvals=None) - eigvals : tuple (lo, hi) - Indexes of the smallest and largest (in ascending order) eigenvalues - and corresponding eigenvectors to be returned: 0 <= lo < hi <= M-1. - If omitted, all eigenvalues and eigenvectors are returned. - type: integer - Specifies the problem type to be solved: - type = 1: a v[:,i] = w[i] b v[:,i] - type = 2: a b v[:,i] = w[i] v[:,i] - type = 3: b a v[:,i] = w[i] v[:,i] - overwrite_a : boolean - Whether to overwrite data in a (may improve performance) - overwrite_b : boolean - Whether to overwrite data in b (may improve performance) - - Returns - ------- - w : real array, shape (N,) - The N (1<=N<=M) selected eigenvalues, in ascending order, each - repeated according to its multiplicity. - - Raises LinAlgError if eigenvalue computation does not converge, - an error occurred, or b matrix is not definite positive. Note that - if input matrices are not symmetric or hermitian, no error is reported - but results will be wrong. - - See Also - -------- - eigvals : eigenvalues of general arrays - eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays - eig : eigenvalues and right eigenvectors for non-symmetric arrays - - """ - return eigh(a, b=b, lower=lower, eigvals_only=True, - overwrite_a=overwrite_a, overwrite_b=overwrite_b, - turbo=turbo, eigvals=eigvals, type=type) - -def eigvals_banded(a_band, lower=False, overwrite_a_band=False, - select='a', select_range=None): - """Solve real symmetric or complex hermitian band matrix eigenvalue problem. - - Find eigenvalues w of a:: - - a v[:,i] = w[i] v[:,i] - v.H v = identity - - The matrix a is stored in a_band either in lower diagonal or upper - diagonal ordered form: - - a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) - a_band[ i - j, j] == a[i,j] (if lower form; i >= j) - - where u is the number of bands above the diagonal. - - Example of a_band (shape of a is (6,6), u=2):: - - upper form: - * * a02 a13 a24 a35 - * a01 a12 a23 a34 a45 - a00 a11 a22 a33 a44 a55 - - lower form: - a00 a11 a22 a33 a44 a55 - a10 a21 a32 a43 a54 * - a20 a31 a42 a53 * * - - Cells marked with * are not used. - - Parameters - ---------- - a_band : array, shape (u+1, M) - The bands of the M by M matrix a. - lower : boolean - Is the matrix in the lower form. (Default is upper form) - overwrite_a_band: - Discard data in a_band (may enhance performance) - select: {'a', 'v', 'i'} - Which eigenvalues to calculate - - ====== ======================================== - select calculated - ====== ======================================== - 'a' All eigenvalues - 'v' Eigenvalues in the interval (min, max] - 'i' Eigenvalues with indices min <= i <= max - ====== ======================================== - select_range : (min, max) - Range of selected eigenvalues - - Returns - ------- - w : array, shape (M,) - The eigenvalues, in ascending order, each repeated according to its - multiplicity. - - Raises LinAlgError if eigenvalue computation does not converge - - See Also - -------- - eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian band matrices - eigvals : eigenvalues of general arrays - eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays - eig : eigenvalues and right eigenvectors for non-symmetric arrays - - """ - return eig_banded(a_band, lower=lower, eigvals_only=1, - overwrite_a_band=overwrite_a_band, select=select, - select_range=select_range) - -_double_precision = ['i','l','d'] - - -def hessenberg(a, calc_q=False, overwrite_a=False): - """Compute Hessenberg form of a matrix. - - The Hessenberg decomposition is - - A = Q H Q^H - - where Q is unitary/orthogonal and H has only zero elements below the first - subdiagonal. - - Parameters - ---------- - a : array, shape (M,M) - Matrix to bring into Hessenberg form - calc_q : boolean - Whether to compute the transformation matrix - overwrite_a : boolean - Whether to ovewrite data in a (may improve performance) - - Returns - ------- - H : array, shape (M,M) - Hessenberg form of A - - (If calc_q == True) - Q : array, shape (M,M) - Unitary/orthogonal similarity transformation matrix s.t. A = Q H Q^H - - """ - a1 = asarray(a) - if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): - raise ValueError('expected square matrix') - overwrite_a = overwrite_a or (_datacopied(a1, a)) - gehrd,gebal = get_lapack_funcs(('gehrd','gebal'), (a1,)) - ba, lo, hi, pivscale, info = gebal(a1, permute=1, overwrite_a=overwrite_a) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal gebal ' - '(hessenberg)' % -info) - n = len(a1) - lwork = calc_lwork.gehrd(gehrd.prefix, n, lo, hi) - hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal gehrd ' - '(hessenberg)' % -info) - - if not calc_q: - for i in range(lo, hi): - hq[i+2:hi+1, i] = 0.0 - return hq - - # XXX: Use ORGHR routines to compute q. - typecode = hq.dtype - ger,gemm = get_blas_funcs(('ger','gemm'), dtype=typecode) - q = None - for i in range(lo, hi): - if tau[i]==0.0: - continue - v = zeros(n, dtype=typecode) - v[i+1] = 1.0 - v[i+2:hi+1] = hq[i+2:hi+1, i] - hq[i+2:hi+1, i] = 0.0 - h = ger(-tau[i], v, v,a=diag(ones(n, dtype=typecode)), overwrite_a=1) - if q is None: - q = h - else: - q = gemm(1.0, q, h) - if q is None: - q = diag(ones(n, dtype=typecode)) - return hq, q diff --git a/scipy-0.10.1/scipy/linalg/decomp_cholesky.py b/scipy-0.10.1/scipy/linalg/decomp_cholesky.py deleted file mode 100644 index cb14cb466f..0000000000 --- a/scipy-0.10.1/scipy/linalg/decomp_cholesky.py +++ /dev/null @@ -1,243 +0,0 @@ -"""Cholesky decomposition functions.""" - -from numpy import asarray_chkfinite - -# Local imports -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs - -__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded', - 'cho_solve_banded'] - - -def _cholesky(a, lower=False, overwrite_a=False, clean=True): - """Common code for cholesky() and cho_factor().""" - - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: - raise ValueError('expected square matrix') - - overwrite_a = overwrite_a or _datacopied(a1, a) - potrf, = get_lapack_funcs(('potrf',), (a1,)) - c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean) - if info > 0: - raise LinAlgError("%d-th leading minor not positive definite" % info) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal potrf' - % -info) - return c, lower - -def cholesky(a, lower=False, overwrite_a=False): - """Compute the Cholesky decomposition of a matrix. - - Returns the Cholesky decomposition, :lm:`A = L L^*` or :lm:`A = U^* U` - of a Hermitian positive-definite matrix :lm:`A`. - - Parameters - ---------- - a : array, shape (M, M) - Matrix to be decomposed - lower : boolean - Whether to compute the upper or lower triangular Cholesky factorization - (Default: upper-triangular) - overwrite_a : boolean - Whether to overwrite data in a (may improve performance) - - Returns - ------- - c : array, shape (M, M) - Upper- or lower-triangular Cholesky factor of A - - Raises LinAlgError if decomposition fails - - Examples - -------- - >>> from scipy import array, linalg, dot - >>> a = array([[1,-2j],[2j,5]]) - >>> L = linalg.cholesky(a, lower=True) - >>> L - array([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) - >>> dot(L, L.T.conj()) - array([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - - """ - c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True) - return c - - -def cho_factor(a, lower=False, overwrite_a=False): - """Compute the Cholesky decomposition of a matrix, to use in cho_solve - - Returns a matrix containing the Cholesky decomposition, - ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`. - The return value can be directly used as the first parameter to cho_solve. - - .. warning:: - The returned matrix also contains random data in the entries not - used by the Cholesky decomposition. If you need to zero these - entries, use the function `cholesky` instead. - - Parameters - ---------- - a : array, shape (M, M) - Matrix to be decomposed - lower : boolean - Whether to compute the upper or lower triangular Cholesky factorization - (Default: upper-triangular) - overwrite_a : boolean - Whether to overwrite data in a (may improve performance) - - Returns - ------- - c : array, shape (M, M) - Matrix whose upper or lower triangle contains the Cholesky factor - of `a`. Other parts of the matrix contain random data. - lower : boolean - Flag indicating whether the factor is in the lower or upper triangle - - Raises - ------ - LinAlgError - Raised if decomposition fails. - - See also - -------- - cho_solve : Solve a linear set equations using the Cholesky factorization - of a matrix. - - """ - c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False) - return c, lower - - -def cho_solve((c, lower), b, overwrite_b=False): - """Solve the linear equations A x = b, given the Cholesky factorization of A. - - Parameters - ---------- - (c, lower) : tuple, (array, bool) - Cholesky factorization of a, as given by cho_factor - b : array - Right-hand side - - Returns - ------- - x : array - The solution to the system A x = b - - See also - -------- - cho_factor : Cholesky factorization of a matrix - - """ - - b1 = asarray_chkfinite(b) - c = asarray_chkfinite(c) - if c.ndim != 2 or c.shape[0] != c.shape[1]: - raise ValueError("The factored matrix c is not square.") - if c.shape[1] != b1.shape[0]: - raise ValueError("incompatible dimensions.") - - overwrite_b = overwrite_b or _datacopied(b1, b) - - potrs, = get_lapack_funcs(('potrs',), (c, b1)) - x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b) - if info != 0: - raise ValueError('illegal value in %d-th argument of internal potrs' - % -info) - return x - -def cholesky_banded(ab, overwrite_ab=False, lower=False): - """Cholesky decompose a banded Hermitian positive-definite matrix - - The matrix a is stored in ab either in lower diagonal or upper - diagonal ordered form: - - ab[u + i - j, j] == a[i,j] (if upper form; i <= j) - ab[ i - j, j] == a[i,j] (if lower form; i >= j) - - Example of ab (shape of a is (6,6), u=2):: - - upper form: - * * a02 a13 a24 a35 - * a01 a12 a23 a34 a45 - a00 a11 a22 a33 a44 a55 - - lower form: - a00 a11 a22 a33 a44 a55 - a10 a21 a32 a43 a54 * - a20 a31 a42 a53 * * - - Parameters - ---------- - ab : array, shape (u + 1, M) - Banded matrix - overwrite_ab : boolean - Discard data in ab (may enhance performance) - lower : boolean - Is the matrix in the lower form. (Default is upper form) - - Returns - ------- - c : array, shape (u+1, M) - Cholesky factorization of a, in the same banded format as ab - - """ - ab = asarray_chkfinite(ab) - - pbtrf, = get_lapack_funcs(('pbtrf',), (ab,)) - c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab) - if info > 0: - raise LinAlgError("%d-th leading minor not positive definite" % info) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal pbtrf' - % -info) - return c - - -def cho_solve_banded((cb, lower), b, overwrite_b=False): - """Solve the linear equations A x = b, given the Cholesky factorization of A. - - Parameters - ---------- - (cb, lower) : tuple, (array, bool) - `cb` is the Cholesky factorization of A, as given by cholesky_banded. - `lower` must be the same value that was given to cholesky_banded. - b : array - Right-hand side - overwrite_b : bool - If True, the function will overwrite the values in `b`. - - Returns - ------- - x : array - The solution to the system A x = b - - See also - -------- - cholesky_banded : Cholesky factorization of a banded matrix - - Notes - ----- - - .. versionadded:: 0.8.0 - - """ - - cb = asarray_chkfinite(cb) - b = asarray_chkfinite(b) - - # Validate shapes. - if cb.shape[-1] != b.shape[0]: - raise ValueError("shapes of cb and b are not compatible.") - - pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b)) - x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b) - if info > 0: - raise LinAlgError("%d-th leading minor not positive definite" % info) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal pbtrs' - % -info) - return x diff --git a/scipy-0.10.1/scipy/linalg/decomp_lu.py b/scipy-0.10.1/scipy/linalg/decomp_lu.py deleted file mode 100644 index 17fb48bc23..0000000000 --- a/scipy-0.10.1/scipy/linalg/decomp_lu.py +++ /dev/null @@ -1,161 +0,0 @@ -"""LU decomposition functions.""" - -from warnings import warn - -from numpy import asarray, asarray_chkfinite - -# Local imports -from misc import _datacopied -from lapack import get_lapack_funcs -from flinalg import get_flinalg_funcs - -__all__ = ['lu', 'lu_solve', 'lu_factor'] - - -def lu_factor(a, overwrite_a=False): - """Compute pivoted LU decomposition of a matrix. - - The decomposition is:: - - A = P L U - - where P is a permutation matrix, L lower triangular with unit - diagonal elements, and U upper triangular. - - Parameters - ---------- - a : array, shape (M, M) - Matrix to decompose - overwrite_a : boolean - Whether to overwrite data in A (may increase performance) - - Returns - ------- - lu : array, shape (N, N) - Matrix containing U in its upper triangle, and L in its lower triangle. - The unit diagonal elements of L are not stored. - piv : array, shape (N,) - Pivot indices representing the permutation matrix P: - row i of matrix was interchanged with row piv[i]. - - See also - -------- - lu_solve : solve an equation system using the LU factorization of a matrix - - Notes - ----- - This is a wrapper to the *GETRF routines from LAPACK. - - """ - a1 = asarray(a) - if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): - raise ValueError('expected square matrix') - overwrite_a = overwrite_a or (_datacopied(a1, a)) - getrf, = get_lapack_funcs(('getrf',), (a1,)) - lu, piv, info = getrf(a1, overwrite_a=overwrite_a) - if info < 0: - raise ValueError('illegal value in %d-th argument of ' - 'internal getrf (lu_factor)' % -info) - if info > 0: - warn("Diagonal number %d is exactly zero. Singular matrix." % info, - RuntimeWarning) - return lu, piv - - -def lu_solve((lu, piv), b, trans=0, overwrite_b=False): - """Solve an equation system, a x = b, given the LU factorization of a - - Parameters - ---------- - (lu, piv) - Factorization of the coefficient matrix a, as given by lu_factor - b : array - Right-hand side - trans : {0, 1, 2} - Type of system to solve: - - ===== ========= - trans system - ===== ========= - 0 a x = b - 1 a^T x = b - 2 a^H x = b - ===== ========= - - Returns - ------- - x : array - Solution to the system - - See also - -------- - lu_factor : LU factorize a matrix - - """ - b1 = asarray_chkfinite(b) - overwrite_b = overwrite_b or _datacopied(b1, b) - if lu.shape[0] != b1.shape[0]: - raise ValueError("incompatible dimensions.") - - getrs, = get_lapack_funcs(('getrs',), (lu, b1)) - x,info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b) - if info == 0: - return x - raise ValueError('illegal value in %d-th argument of internal gesv|posv' - % -info) - - -def lu(a, permute_l=False, overwrite_a=False): - """Compute pivoted LU decompostion of a matrix. - - The decomposition is:: - - A = P L U - - where P is a permutation matrix, L lower triangular with unit - diagonal elements, and U upper triangular. - - Parameters - ---------- - a : array, shape (M, N) - Array to decompose - permute_l : boolean - Perform the multiplication P*L (Default: do not permute) - overwrite_a : boolean - Whether to overwrite data in a (may improve performance) - - Returns - ------- - (If permute_l == False) - p : array, shape (M, M) - Permutation matrix - l : array, shape (M, K) - Lower triangular or trapezoidal matrix with unit diagonal. - K = min(M, N) - u : array, shape (K, N) - Upper triangular or trapezoidal matrix - - (If permute_l == True) - pl : array, shape (M, K) - Permuted L matrix. - K = min(M, N) - u : array, shape (K, N) - Upper triangular or trapezoidal matrix - - Notes - ----- - This is a LU factorization routine written for Scipy. - - """ - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2: - raise ValueError('expected matrix') - overwrite_a = overwrite_a or (_datacopied(a1, a)) - flu, = get_flinalg_funcs(('lu',), (a1,)) - p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a) - if info < 0: - raise ValueError('illegal value in %d-th argument of ' - 'internal lu.getrf' % -info) - if permute_l: - return l, u - return p, l, u diff --git a/scipy-0.10.1/scipy/linalg/decomp_qr.py b/scipy-0.10.1/scipy/linalg/decomp_qr.py deleted file mode 100644 index 2468c66ace..0000000000 --- a/scipy-0.10.1/scipy/linalg/decomp_qr.py +++ /dev/null @@ -1,331 +0,0 @@ -"""QR decomposition functions.""" - -import numpy -from numpy import asarray_chkfinite - -# Local imports -import special_matrices -from blas import get_blas_funcs -from lapack import get_lapack_funcs, find_best_lapack_type -from misc import _datacopied - -# XXX: what is qr_old, should it be kept? -__all__ = ['qr', 'rq', 'qr_old'] - - -def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False): - """Compute QR decomposition of a matrix. - - Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal - and R upper triangular. - - Parameters - ---------- - a : array, shape (M, N) - Matrix to be decomposed - overwrite_a : bool, optional - Whether data in a is overwritten (may improve performance) - lwork : int, optional - Work array size, lwork >= a.shape[1]. If None or -1, an optimal size - is computed. - mode : {'full', 'r', 'economic'} - Determines what information is to be returned: either both Q and R - ('full', default), only R ('r') or both Q and R but computed in - economy-size ('economic', see Notes). - pivoting : bool, optional - Whether or not factorization should include pivoting for rank-revealing - qr decomposition. If pivoting, compute the decomposition - :lm:`A P = Q R` as above, but where P is chosen such that the diagonal - of R is non-increasing. - - Returns - ------- - Q : double or complex ndarray - Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned if - ``mode='r'``. - R : double or complex ndarray - Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``. - P : integer ndarray - Of shape (N,) for ``pivoting=True``. Not returned if ``pivoting=False``. - - Raises - ------ - LinAlgError - Raised if decomposition fails - - Notes - ----- - This is an interface to the LAPACK routines dgeqrf, zgeqrf, - dorgqr, zungqr, dgeqp3, and zgeqp3. - - If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead - of (M,M) and (M,N), with ``K=min(M,N)``. - - Examples - -------- - >>> from scipy import random, linalg, dot, diag, all, allclose - >>> a = random.randn(9, 6) - - >>> q, r = linalg.qr(a) - >>> allclose(a, dot(q, r)) - True - >>> q.shape, r.shape - ((9, 9), (9, 6)) - - >>> r2 = linalg.qr(a, mode='r') - >>> allclose(r, r2) - True - - >>> q3, r3 = linalg.qr(a, mode='economic') - >>> q3.shape, r3.shape - ((9, 6), (6, 6)) - - >>> q4, r4, p4 = linalg.qr(a, pivoting=True) - >>> d = abs(diag(r4)) - >>> all(d[1:] <= d[:-1]) - True - >>> allclose(a[:, p4], dot(q4, r4)) - True - >>> q4.shape, r4.shape, p4.shape - ((9, 9), (9, 6), (6,)) - - >>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True) - >>> q5.shape, r5.shape, p5.shape - ((9, 6), (6, 6), (6,)) - - """ - if mode == 'qr': - # 'qr' was the old default, equivalent to 'full'. Neither 'full' nor - # 'qr' are used below, but set to 'full' anyway to be sure - mode = 'full' - if not mode in ['full', 'qr', 'r', 'economic']: - raise ValueError(\ - "Mode argument should be one of ['full', 'r', 'economic']") - - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2: - raise ValueError("expected 2D array") - M, N = a1.shape - overwrite_a = overwrite_a or (_datacopied(a1, a)) - - if pivoting: - geqp3, = get_lapack_funcs(('geqp3',), (a1,)) - if lwork is None or lwork == -1: - # get optimal work array - qr, jpvt, tau, work, info = geqp3(a1, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - - qr, jpvt, tau, work, info = geqp3(a1, lwork=lwork, - overwrite_a=overwrite_a) - jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1 - if info < 0: - raise ValueError("illegal value in %d-th argument of internal geqp3" - % -info) - else: - geqrf, = get_lapack_funcs(('geqrf',), (a1,)) - if lwork is None or lwork == -1: - # get optimal work array - qr, tau, work, info = geqrf(a1, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - - qr, tau, work, info = geqrf(a1, lwork=lwork, overwrite_a=overwrite_a) - if info < 0: - raise ValueError("illegal value in %d-th argument of internal geqrf" - % -info) - if not mode == 'economic' or M < N: - R = special_matrices.triu(qr) - else: - R = special_matrices.triu(qr[0:N, 0:N]) - - if mode == 'r': - if pivoting: - return R, jpvt - else: - return R - - if find_best_lapack_type((a1,))[0] in ('s', 'd'): - gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,)) - else: - gor_un_gqr, = get_lapack_funcs(('ungqr',), (qr,)) - - if M < N: - # get optimal work array - Q, work, info = gor_un_gqr(qr[:,0:M], tau, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - Q, work, info = gor_un_gqr(qr[:,0:M], tau, lwork=lwork, overwrite_a=1) - elif mode == 'economic': - # get optimal work array - Q, work, info = gor_un_gqr(qr, tau, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - Q, work, info = gor_un_gqr(qr, tau, lwork=lwork, overwrite_a=1) - else: - t = qr.dtype.char - qqr = numpy.empty((M, M), dtype=t) - qqr[:,0:N] = qr - # get optimal work array - Q, work, info = gor_un_gqr(qqr, tau, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - Q, work, info = gor_un_gqr(qqr, tau, lwork=lwork, overwrite_a=1) - - if info < 0: - raise ValueError("illegal value in %d-th argument of internal gorgqr" - % -info) - if pivoting: - return Q, R, jpvt - return Q, R - - - -def qr_old(a, overwrite_a=False, lwork=None): - """Compute QR decomposition of a matrix. - - Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal - and R upper triangular. - - Parameters - ---------- - a : array, shape (M, N) - Matrix to be decomposed - overwrite_a : boolean - Whether data in a is overwritten (may improve performance) - lwork : integer - Work array size, lwork >= a.shape[1]. If None or -1, an optimal size - is computed. - - Returns - ------- - Q : double or complex array, shape (M, M) - R : double or complex array, shape (M, N) - Size K = min(M, N) - - Raises LinAlgError if decomposition fails - - """ - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2: - raise ValueError('expected matrix') - M,N = a1.shape - overwrite_a = overwrite_a or (_datacopied(a1, a)) - geqrf, = get_lapack_funcs(('geqrf',), (a1,)) - if lwork is None or lwork == -1: - # get optimal work array - qr, tau, work, info = geqrf(a1, lwork=-1, overwrite_a=1) - lwork = work[0] - qr, tau, work, info = geqrf(a1, lwork=lwork, overwrite_a=overwrite_a) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal geqrf' - % -info) - gemm, = get_blas_funcs(('gemm',), (qr,)) - t = qr.dtype.char - R = special_matrices.triu(qr) - Q = numpy.identity(M, dtype=t) - ident = numpy.identity(M, dtype=t) - zeros = numpy.zeros - for i in range(min(M, N)): - v = zeros((M,), t) - v[i] = 1 - v[i+1:M] = qr[i+1:M, i] - H = gemm(-tau[i], v, v, 1+0j, ident, trans_b=2) - Q = gemm(1, Q, H) - return Q, R - - -def rq(a, overwrite_a=False, lwork=None, mode='full'): - """Compute RQ decomposition of a square real matrix. - - Calculate the decomposition :lm:`A = R Q` where Q is unitary/orthogonal - and R upper triangular. - - Parameters - ---------- - a : array, shape (M, M) - Matrix to be decomposed - overwrite_a : boolean - Whether data in a is overwritten (may improve performance) - lwork : integer - Work array size, lwork >= a.shape[1]. If None or -1, an optimal size - is computed. - mode : {'full', 'r', 'economic'} - Determines what information is to be returned: either both Q and R - ('full', default), only R ('r') or both Q and R but computed in - economy-size ('economic', see Notes). - - Returns - ------- - R : double array, shape (M, N) - Q : double or complex array, shape (M, M) - - Raises LinAlgError if decomposition fails - - Examples - -------- - >>> from scipy import linalg - >>> from numpy import random, dot, allclose - >>> a = random.randn(6, 9) - >>> r, q = linalg.rq(a) - >>> allclose(a, dot(r, q)) - True - >>> r.shape, q.shape - ((6, 9), (9, 9)) - >>> r2 = linalg.rq(a, mode='r') - >>> allclose(r, r2) - True - >>> r3, q3 = linalg.rq(a, mode='economic') - >>> r3.shape, q3.shape - ((6, 6), (6, 9)) - - """ - if not mode in ['full', 'r', 'economic']: - raise ValueError(\ - "Mode argument should be one of ['full', 'r', 'economic']") - - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2: - raise ValueError('expected matrix') - M, N = a1.shape - overwrite_a = overwrite_a or (_datacopied(a1, a)) - - gerqf, = get_lapack_funcs(('gerqf',), (a1,)) - if lwork is None or lwork == -1: - # get optimal work array - rq, tau, work, info = gerqf(a1, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - rq, tau, work, info = gerqf(a1, lwork=lwork, overwrite_a=overwrite_a) - if info < 0: - raise ValueError('illegal value in %d-th argument of internal gerqf' - % -info) - if not mode == 'economic' or N < M: - R = special_matrices.triu(rq, N-M) - else: - R = special_matrices.triu(rq[-M:, -M:]) - - if mode == 'r': - return R - - if find_best_lapack_type((a1,))[0] in ('s', 'd'): - gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,)) - else: - gor_un_grq, = get_lapack_funcs(('ungrq',), (rq,)) - - if N < M: - # get optimal work array - Q, work, info = gor_un_grq(rq[-N:], tau, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - Q, work, info = gor_un_grq(rq[-N:], tau, lwork=lwork, overwrite_a=1) - elif mode == 'economic': - # get optimal work array - Q, work, info = gor_un_grq(rq, tau, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - Q, work, info = gor_un_grq(rq, tau, lwork=lwork, overwrite_a=1) - else: - rq1 = numpy.empty((N, N), dtype=rq.dtype) - rq1[-M:] = rq - # get optimal work array - Q, work, info = gor_un_grq(rq1, tau, lwork=-1, overwrite_a=1) - lwork = work[0].real.astype(numpy.int) - Q, work, info = gor_un_grq(rq1, tau, lwork=lwork, overwrite_a=1) - - if info < 0: - raise ValueError("illegal value in %d-th argument of internal orgrq" - % -info) - return R, Q diff --git a/scipy-0.10.1/scipy/linalg/decomp_schur.py b/scipy-0.10.1/scipy/linalg/decomp_schur.py deleted file mode 100644 index 028028672c..0000000000 --- a/scipy-0.10.1/scipy/linalg/decomp_schur.py +++ /dev/null @@ -1,222 +0,0 @@ -"""Schur decomposition functions.""" - -import numpy -from numpy import asarray_chkfinite, single - -# Local imports. -import misc -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs -from decomp import eigvals - - -__all__ = ['schur', 'rsf2csf'] - -_double_precision = ['i','l','d'] - -def schur(a, output='real', lwork=None, overwrite_a=False, sort=None): - """Compute Schur decomposition of a matrix. - - The Schur decomposition is - - A = Z T Z^H - - where Z is unitary and T is either upper-triangular, or for real - Schur decomposition (output='real'), quasi-upper triangular. In - the quasi-triangular form, 2x2 blocks describing complex-valued - eigenvalue pairs may extrude from the diagonal. - - Parameters - ---------- - a : array, shape (M, M) - Matrix to decompose - output : {'real', 'complex'} - Construct the real or complex Schur decomposition (for real matrices). - lwork : integer - Work array size. If None or -1, it is automatically computed. - overwrite_a : boolean - Whether to overwrite data in a (may improve performance) - sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'} - Specifies whether the upper eigenvalues should be sorted. A callable - may be passed that, given a eigenvalue, returns a boolean denoting - whether the eigenvalue should be sorted to the top-left (True). - Alternatively, string parameters may be used: - 'lhp' Left-hand plane (x.real < 0.0) - 'rhp' Right-hand plane (x.real > 0.0) - 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0) - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) - Defaults to None (no sorting). - - Returns - ------- - T : array, shape (M, M) - Schur form of A. It is real-valued for the real Schur decomposition. - Z : array, shape (M, M) - An unitary Schur transformation matrix for A. - It is real-valued for the real Schur decomposition. - sdim : integer - If and only if sorting was requested, a third return value will - contain the number of eigenvalues satisfying the sort condition. - - Raises - ------ - LinAlgError - Error raised under three conditions: - 1. The algorithm failed due to a failure of the QR algorithm to - compute all eigenvalues - 2. If eigenvalue sorting was requested, the eigenvalues could not be - reordered due to a failure to separate eigenvalues, usually because - of poor conditioning - 3. If eigenvalue sorting was requested, roundoff errors caused the - leading eigenvalues to no longer satisfy the sorting condition - - See also - -------- - rsf2csf : Convert real Schur form to complex Schur form - - """ - if not output in ['real','complex','r','c']: - raise ValueError("argument must be 'real', or 'complex'") - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): - raise ValueError('expected square matrix') - typ = a1.dtype.char - if output in ['complex','c'] and typ not in ['F','D']: - if typ in _double_precision: - a1 = a1.astype('D') - typ = 'D' - else: - a1 = a1.astype('F') - typ = 'F' - overwrite_a = overwrite_a or (_datacopied(a1, a)) - gees, = get_lapack_funcs(('gees',), (a1,)) - if lwork is None or lwork == -1: - # get optimal work array - result = gees(lambda x: None, a1, lwork=-1) - lwork = result[-2][0].real.astype(numpy.int) - - if sort is None: - sort_t = 0 - sfunction = lambda x: None - else: - sort_t = 1 - if callable(sort): - sfunction = sort - elif sort == 'lhp': - sfunction = lambda x: (numpy.real(x) < 0.0) - elif sort == 'rhp': - sfunction = lambda x: (numpy.real(x) >= 0.0) - elif sort == 'iuc': - sfunction = lambda x: (abs(x) <= 1.0) - elif sort == 'ouc': - sfunction = lambda x: (abs(x) > 1.0) - else: - raise ValueError("sort parameter must be None, a callable, or " + - "one of ('lhp','rhp','iuc','ouc')") - - result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a, - sort_t=sort_t) - - info = result[-1] - if info < 0: - raise ValueError('illegal value in %d-th argument of internal gees' - % -info) - elif info == a1.shape[0] + 1: - raise LinAlgError('Eigenvalues could not be separated for reordering.') - elif info == a1.shape[0] + 2: - raise LinAlgError('Leading eigenvalues do not satisfy sort condition.') - elif info > 0: - raise LinAlgError("Schur form not found. Possibly ill-conditioned.") - - if sort_t == 0: - return result[0], result[-3] - else: - return result[0], result[-3], result[1] - - -eps = numpy.finfo(float).eps -feps = numpy.finfo(single).eps - -_array_kind = {'b':0, 'h':0, 'B': 0, 'i':0, 'l': 0, 'f': 0, 'd': 0, 'F': 1, 'D': 1} -_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} -_array_type = [['f', 'd'], ['F', 'D']] - -def _commonType(*arrays): - kind = 0 - precision = 0 - for a in arrays: - t = a.dtype.char - kind = max(kind, _array_kind[t]) - precision = max(precision, _array_precision[t]) - return _array_type[kind][precision] - -def _castCopy(type, *arrays): - cast_arrays = () - for a in arrays: - if a.dtype.char == type: - cast_arrays = cast_arrays + (a.copy(),) - else: - cast_arrays = cast_arrays + (a.astype(type),) - if len(cast_arrays) == 1: - return cast_arrays[0] - else: - return cast_arrays - - -def rsf2csf(T, Z): - """Convert real Schur form to complex Schur form. - - Convert a quasi-diagonal real-valued Schur form to the upper triangular - complex-valued Schur form. - - Parameters - ---------- - T : array, shape (M, M) - Real Schur form of the original matrix - Z : array, shape (M, M) - Schur transformation matrix - - Returns - ------- - T : array, shape (M, M) - Complex Schur form of the original matrix - Z : array, shape (M, M) - Schur transformation matrix corresponding to the complex form - - See also - -------- - schur : Schur decompose a matrix - - """ - Z, T = map(asarray_chkfinite, (Z, T)) - if len(Z.shape) != 2 or Z.shape[0] != Z.shape[1]: - raise ValueError("matrix must be square.") - if len(T.shape) != 2 or T.shape[0] != T.shape[1]: - raise ValueError("matrix must be square.") - if T.shape[0] != Z.shape[0]: - raise ValueError("matrices must be same dimension.") - N = T.shape[0] - arr = numpy.array - t = _commonType(Z, T, arr([3.0],'F')) - Z, T = _castCopy(t, Z, T) - conj = numpy.conj - dot = numpy.dot - r_ = numpy.r_ - transp = numpy.transpose - for m in range(N-1, 0, -1): - if abs(T[m,m-1]) > eps*(abs(T[m-1,m-1]) + abs(T[m,m])): - k = slice(m-1, m+1) - mu = eigvals(T[k,k]) - T[m,m] - r = misc.norm([mu[0], T[m,m-1]]) - c = mu[0] / r - s = T[m,m-1] / r - G = r_[arr([[conj(c), s]], dtype=t), arr([[-s, c]], dtype=t)] - Gc = conj(transp(G)) - j = slice(m-1, N) - T[k,j] = dot(G, T[k,j]) - i = slice(0, m+1) - T[i,k] = dot(T[i,k], Gc) - i = slice(0, N) - Z[i,k] = dot(Z[i,k], Gc) - T[m,m-1] = 0.0; - return T, Z diff --git a/scipy-0.10.1/scipy/linalg/decomp_svd.py b/scipy-0.10.1/scipy/linalg/decomp_svd.py deleted file mode 100644 index 9786e7186b..0000000000 --- a/scipy-0.10.1/scipy/linalg/decomp_svd.py +++ /dev/null @@ -1,171 +0,0 @@ -"""SVD decomposition functions.""" - -import numpy -from numpy import asarray_chkfinite, zeros, r_, diag -from scipy.linalg import calc_lwork - -# Local imports. -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs - -__all__ = ['svd', 'svdvals', 'diagsvd', 'orth'] - - -def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False): - """Singular Value Decomposition. - - Factorizes the matrix a into two unitary matrices U and Vh and - an 1d-array s of singular values (real, non-negative) such that - a == U S Vh if S is an suitably shaped matrix of zeros whose - main diagonal is s. - - Parameters - ---------- - a : array, shape (M, N) - Matrix to decompose - full_matrices : boolean - If true, U, Vh are shaped (M,M), (N,N) - If false, the shapes are (M,K), (K,N) where K = min(M,N) - compute_uv : boolean - Whether to compute also U, Vh in addition to s (Default: true) - overwrite_a : boolean - Whether data in a is overwritten (may improve performance) - - Returns - ------- - U: array, shape (M,M) or (M,K) depending on full_matrices - s: array, shape (K,) - The singular values, sorted so that s[i] >= s[i+1]. K = min(M, N) - Vh: array, shape (N,N) or (K,N) depending on full_matrices - - For compute_uv = False, only s is returned. - - Raises LinAlgError if SVD computation does not converge - - Examples - -------- - >>> from scipy import random, linalg, allclose, dot - >>> a = random.randn(9, 6) + 1j*random.randn(9, 6) - >>> U, s, Vh = linalg.svd(a) - >>> U.shape, Vh.shape, s.shape - ((9, 9), (6, 6), (6,)) - - >>> U, s, Vh = linalg.svd(a, full_matrices=False) - >>> U.shape, Vh.shape, s.shape - ((9, 6), (6, 6), (6,)) - >>> S = linalg.diagsvd(s, 6, 6) - >>> allclose(a, dot(U, dot(S, Vh))) - True - - >>> s2 = linalg.svd(a, compute_uv=False) - >>> allclose(s, s2) - True - - See also - -------- - svdvals : return singular values of a matrix - diagsvd : return the Sigma matrix, given the vector s - - """ - a1 = asarray_chkfinite(a) - if len(a1.shape) != 2: - raise ValueError('expected matrix') - m,n = a1.shape - overwrite_a = overwrite_a or (_datacopied(a1, a)) - gesdd, = get_lapack_funcs(('gesdd',), (a1,)) - if gesdd.module_name[:7] == 'flapack': - lwork = calc_lwork.gesdd(gesdd.prefix, m, n, compute_uv)[1] - u,s,v,info = gesdd(a1,compute_uv = compute_uv, lwork = lwork, - full_matrices=full_matrices, overwrite_a = overwrite_a) - else: # 'clapack' - raise NotImplementedError('calling gesdd from %s' % gesdd.module_name) - if info > 0: - raise LinAlgError("SVD did not converge") - if info < 0: - raise ValueError('illegal value in %d-th argument of internal gesdd' - % -info) - if compute_uv: - return u, s, v - else: - return s - -def svdvals(a, overwrite_a=False): - """Compute singular values of a matrix. - - Parameters - ---------- - a : array, shape (M, N) - Matrix to decompose - overwrite_a : boolean - Whether data in a is overwritten (may improve performance) - - Returns - ------- - s: array, shape (K,) - The singular values, sorted so that s[i] >= s[i+1]. K = min(M, N) - - Raises LinAlgError if SVD computation does not converge - - See also - -------- - svd : return the full singular value decomposition of a matrix - diagsvd : return the Sigma matrix, given the vector s - - """ - return svd(a, compute_uv=0, overwrite_a=overwrite_a) - -def diagsvd(s, M, N): - """Construct the sigma matrix in SVD from singular values and size M,N. - - Parameters - ---------- - s : array, shape (M,) or (N,) - Singular values - M : integer - N : integer - Size of the matrix whose singular values are s - - Returns - ------- - S : array, shape (M, N) - The S-matrix in the singular value decomposition - - """ - part = diag(s) - typ = part.dtype.char - MorN = len(s) - if MorN == M: - return r_['-1', part, zeros((M, N-M), typ)] - elif MorN == N: - return r_[part, zeros((M-N,N), typ)] - else: - raise ValueError("Length of s must be M or N.") - - -# Orthonormal decomposition - -def orth(A): - """Construct an orthonormal basis for the range of A using SVD - - Parameters - ---------- - A : array, shape (M, N) - - Returns - ------- - Q : array, shape (M, K) - Orthonormal basis for the range of A. - K = effective rank of A, as determined by automatic cutoff - - See also - -------- - svd : Singular value decomposition of a matrix - - """ - u, s, vh = svd(A) - M, N = A.shape - eps = numpy.finfo(float).eps - tol = max(M,N) * numpy.amax(s) * eps - num = numpy.sum(s > tol, dtype=int) - Q = u[:,:num] - return Q diff --git a/scipy-0.10.1/scipy/linalg/fblas.pyf.src b/scipy-0.10.1/scipy/linalg/fblas.pyf.src deleted file mode 100644 index b63b743c0d..0000000000 --- a/scipy-0.10.1/scipy/linalg/fblas.pyf.src +++ /dev/null @@ -1,18 +0,0 @@ -!%f90 -*- f90 -*- -! Signatures for f2py-wrappers of FORTRAN BLAS functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! $Revision$ $Date$ -! - - -python module fblas - interface - - include 'fblas_l1.pyf.src' - include 'fblas_l2.pyf.src' - include 'fblas_l3.pyf.src' - - end interface -end python module fblas diff --git a/scipy-0.10.1/scipy/linalg/fblas_l1.pyf.src b/scipy-0.10.1/scipy/linalg/fblas_l1.pyf.src deleted file mode 100644 index 664bdd8062..0000000000 --- a/scipy-0.10.1/scipy/linalg/fblas_l1.pyf.src +++ /dev/null @@ -1,360 +0,0 @@ -!%f90 -*- f90 -*- -! Signatures for f2py-wrappers of FORTRAN LEVEL 1 BLAS functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! Modified: Fabian Pedregosa, 2011 -! -! Implemented: -! -! rotg, rotmg, rot, rotm -! swap, scal, copy, axpy -! dot, dotu, dotc -! nrm2, asum, amax, iamax -! -! Not Implemented: NONE -! -! NOTE: Avoiding wrappers hack does not work under 64-bit Gentoo system -! with single precision routines, so they are removed. -! -! Level 1 BLAS - -subroutine rotg(a,b,c,s) - ! Computes the parameters for a Givens rotation. - ! - ! Given the Cartesian coordinates (a, b) of a point p, these routines return - ! the parameters a, b, c, and s associated with the Givens rotation that zeros - ! the y-coordinate of the point. - ! - ! FIXME: parameters a, b are not returned. - ! - callprotoargument *,*,*,* - - intent(in) :: a, b - intent(out,out=c) :: c - intent(out,out=s) :: s - -end subroutine rotg - -! -subroutine rotmg(d1,d2,x1,y1,param) - ! Computes the parameters for a modified Givens rotation. - ! - ! Given Cartesian coordinates (x1, y1) of an input vector, this - ! routine compute the components of a modified Givens - ! transformation matrix H that zeros the y-component of the - ! resulting vector: - ! - ! [x] [sqrt(d1) x1] - ! [ ] = H [ ] - ! [0] [sqrt(d2) y1] - ! - - callstatement (*f2py_func)(&d1,&d2,&x1,&y1,param) - callprotoargument *,*,*,*,* - - intent(in) :: d1, d2, x1, y1 - intent(out), dimension(5) :: param - -end subroutine rotmg - - -subroutine rot(n,x,offx,incx,y,offy,incy,c,s) - ! Applies a plane rotation with real cosine and complex sine to a - ! pair of complex vectors and returns the modified vectors. - ! - ! x, y are input vectors and c, s are values that define a rotation: - ! - ! [ c s] - ! [ ] - ! [-conj(s) c] - ! - ! where c*c + s*conjg(s) = 1.0. - ! - - callstatement (*f2py_func)(&n,x+offx,&incx,y+offy,&incy,&c,&s) - callprotoargument int*,*,int*,*,int*,*,* - - dimension(*),intent(in,out,copy) :: x,y - intent(in) :: c, s - integer optional, intent(in), check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in), check(incy>0||incy<0) :: incy = 1 - integer optional, intent(in), depend(x) :: offx=0 - integer optional, intent(in), depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine rot - - -subroutine rotm(n,x,offx,incx,y,offy,incy,param) - ! Performs rotation of points in the modified plane - ! - ! Given two complex vectors x and y, each vector element of these vectors is - ! replaced as follows: - ! - ! x(i) = H*x(i) + H*y(i) - ! y(i) = H*y(i) - H*x(i) - ! - ! where H is a modified Givens transformation matrix whose values are stored - ! in the param(2) through param(5) array. - - callstatement (*f2py_func)(&n,x+offx,&incx,y+offy,&incy,param) - callprotoargument int*,*,int*,*,int*,* - - dimension(*), intent(in,out,copy) :: x, y - dimension(5), intent(in) :: param - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - integer optional, intent(in),depend(x) :: offx=0 - integer optional, intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine rotm - - -subroutine swap(n,x,offx,incx,y,offy,incy) - ! Swap two arrays: x <-> y - - callstatement (*f2py_func)(&n,x+offx,&incx,y+offy,&incy) - callprotoargument int*,*,int*,*,int* - - dimension(*), intent(in,out) :: x, y - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - integer optional, intent(in),depend(x) :: offx=0 - integer optional, intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine swap - - -subroutine scal(n,a,x,offx,incx) - ! Computes the product of a vector by a scalar: y = a*x - - callstatement (*f2py_func)(&n,&a,x+offx,&incx) - callprotoargument int*,*,*,int* - - intent(in):: a - dimension(*), intent(in,out) :: x - integer optional, intent(in), check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in), depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end subroutine scal - - -subroutine scal(n,a,x,offx,incx) - ! Computes the product of a vector by a scalar, scales a complex - ! vector by a real constant - ! y = a*x - - callstatement (*f2py_func)(&n,&a,x+offx,&incx) - callprotoargument int*,*,*,int* - - intent(in) :: a - dimension(*), intent(in,out,copy) :: x - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end subroutine scal - - -subroutine copy(n,x,offx,incx,y,offy,incy) - ! Copy y <- x - - callstatement (*f2py_func)(&n,x+offx,&incx,y+offy,&incy) - callprotoargument int*,*,int*,*,int* - - dimension(*), intent(in) :: x - dimension(*), intent(in,out) :: y - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - integer optional, intent(in),depend(x) :: offx=0 - integer optional, intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine copy - - -subroutine axpy(n,a,x,offx,incx,y,offy,incy) - ! Calculate z = a*x+y, where a is scalar. - - callstatement (*f2py_func)(&n,&a,x+offx,&incx,y+offy,&incy) - callprotoargument int*,*,*,int*,*,int* - - dimension(*), intent(in) :: x - dimension(*), intent(in,out,out=z) :: y - optional, intent(in):: a=<1.0,\0,(1.0\,0.0),\2> - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - integer optional, intent(in),depend(x) :: offx=0 - integer optional, intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine axpy - - -function dot(n,x,offx,incx,y,offy,incy) result (xy) - ! Computes a vector-vector dot product. - - callstatement (*f2py_func)(&dot,&n,x+offx,&incx,y+offy,&incy) - callprotoargument *,int*,*,int*,*,int* - - dimension(*), intent(in) :: x - dimension(*), intent(in) :: y - dot,xy - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - integer optional, intent(in),depend(x) :: offx=0 - integer optional, intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end function dot - - - -! -subroutine dotu(xy,n,x,offx,incx,y,offy,incy) - - intent(out) :: xy - fortranname wdotu - - callstatement (*f2py_func)(&xy,&n,x+offx,&incx,y+offy,&incy) - callprotoargument *,int*,*,int*,*,int* - - dimension(*),intent(in) :: x - dimension(*),intent(in) :: y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine dotu - - -subroutine dotc(xy,n,x,offx,incx,y,offy,incy) - - intent (out) :: xy - fortranname wdotc - - callstatement (*f2py_func)(&xy,&n,x+offx,&incx,y+offy,&incy) - callprotoargument *,int*,*,int*,*,int* - - dimension(*),intent(in) :: x - dimension(*),intent(in) :: y - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - - integer optional,intent(in),depend(x) :: offx=0 - integer optional,intent(in),depend(y) :: offy=0 - check(offx>=0 && offx=0 && offy(n-1)*abs(incx)) :: n - check(len(y)-offy>(n-1)*abs(incy)) :: n - -end subroutine dotc - -! -function nrm2(n,x,offx,incx) result(n2) - - nrm2, n2 - - callstatement (*f2py_func)(&nrm2, &n,x+offx,&incx) - callprotoargument *,int*,*,int* - - dimension(*),intent(in) :: x - - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - - integer optional,intent(in),depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end function nrm2 - - -function asum(n,x,offx,incx) result (s) - ! Computes the sum of magnitudes of the vector elements - - callstatement (*f2py_func)(&asum,&n,x+offx,&incx) - callprotoargument *,int*,*,int* - - dimension(*), intent(in) :: x - asum,s - integer optional, intent(in), check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in), depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end function asum - - -function iamax(n,x,offx,incx) result(k) - ! Finds the index of the element with maximum absolute value. - - callstatement iamax_return_value = (*f2py_func)(&n,x+offx,&incx) - 1 - callprotoargument int*,*,int* - - ! This is to avoid Fortran wrappers. - integer iamax,k - fortranname F_FUNC(iamax,IAMAX) - intent(c) iamax - dimension(*), intent(in) :: x - integer optional, intent(in), check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in), depend(x) :: offx=0 - check(offx>=0 && offx(n-1)*abs(incx)) :: n - -end function iamax - diff --git a/scipy-0.10.1/scipy/linalg/fblas_l2.pyf.src b/scipy-0.10.1/scipy/linalg/fblas_l2.pyf.src deleted file mode 100644 index 61a0e06349..0000000000 --- a/scipy-0.10.1/scipy/linalg/fblas_l2.pyf.src +++ /dev/null @@ -1,142 +0,0 @@ -! -*- f90 -*- -! Signatures for f2py-wrappers of FORTRAN LEVEL 2 BLAS functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! Modified: Fabian Pedregosa, 2011 -! -! Implemented: -! gemv, hemv, symv, trmv, ger, geru, gerc -! -! Not implemented: -! gbmv, hbmv, hpmv, sbmv, spmv, tbmv, tpmv, trsv, tbsv, tpsv, -! her, hpr, her2, hpr2, syr, spr, syr2, spr2 -! - -subroutine gemv(m,n,alpha,a,x,beta,y,offx,incx,offy,incy,trans,rows,cols,ly) - ! Computes a matrix-vector product using a general matrix - ! - ! y = gemv(alpha,a,x,beta=0,y=0,offx=0,incx=1,offy=0,incy=0,trans=0) - ! Calculate y <- alpha * op(A) * x + beta * y - - callstatement (*f2py_func)((trans?(trans==2?"C":"T"):"N"),&m,&n,&alpha,a,&m, & - x+offx,&incx,&beta,y+offy,&incy) - callprotoargument char*,int*,int*,*,*,int*,*,int*,*, & - *,int* - - integer optional, intent(in), check(trans>=0 && trans <=2) :: trans = 0 - integer optional, intent(in), check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in), check(incy>0||incy<0) :: incy = 1 - intent(in) :: alpha - intent(in), optional :: beta = <0.0,\0,(0.0\,0.0),\2> - - dimension(*), intent(in) :: x - dimension(ly), intent(in,copy,out), depend(ly),optional :: y - integer intent(hide), depend(incy,rows,offy) :: ly = & - (y_capi==Py_None?1+offy+(rows-1)*abs(incy):-1) - dimension(m,n), intent(in) :: a - integer depend(a), intent(hide):: m = shape(a,0) - integer depend(a), intent(hide):: n = shape(a,1) - - integer optional, intent(in) :: offx=0 - integer optional, intent(in) :: offy=0 - check(offx>=0 && offxoffx+(cols-1)*abs(incx)) :: x - depend(offx,cols,incx) :: x - - check(offy>=0 && offyoffy+(rows-1)*abs(incy)) :: y - depend(offy,rows,incy) :: y - - integer depend(m,n,trans), intent(hide) :: rows = (trans?n:m) - integer depend(m,n,trans), intent(hide) :: cols = (trans?m:n) - -end subroutine gemv - - -subroutine (n,alpha,a,x,beta,y,offx,incx,offy,incy,lower,ly) - ! Computes a matrix-vector product for a symmetric/hermitian matrix - ! - ! Calculate y <- alpha * A * x + beta * y, A is symmmetric/hermitian - - callstatement (*f2py_func)((lower?"L":"U"),&n,&alpha,a,&n,x+offx,&incx,&beta, & - y+offy,&incy) - callprotoargument char*,int*,*,*,int*,*,int*,*, & - *,int* - - integer optional, intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - integer optional, intent(in),check(incy>0||incy<0) :: incy = 1 - intent(in) :: alpha - intent(in),optional :: beta = <0.0,\0,(0.0\,0.0),\2> - - dimension(*), intent(in) :: x - dimension(ly), intent(in,copy,out),depend(ly),optional :: y - integer intent(hide),depend(incy,n,offy) :: ly = & - (y_capi==Py_None?1+offy+(n-1)*abs(incy):-1) - dimension(n,n), intent(in),check(shape(a,0)==shape(a,1)) :: a - integer depend(a), intent(hide):: n = shape(a,0) - - integer optional, intent(in) :: offx=0 - integer optional, intent(in) :: offy=0 - check(offx>=0 && offxoffx+(n-1)*abs(incx)) :: x - depend(offx,n,incx) :: x - - check(offy>=0 && offyoffy+(n-1)*abs(incy)) :: y - depend(offy,n,incy) :: y - -end subroutine - - -subroutine trmv(n,a,x,offx,incx,lower,trans,unitdiag) - ! Computes a matrix-vector product using a triangular matrix - ! - ! x <- op(A) * x, A is triangular - ! - - callstatement (*f2py_func)((lower?"L":"U"), (trans?(trans==2?"C":"T"):"N"), & - (unitdiag?"U":"N"),&n,a,&n,x+offx,&incx) - callprotoargument char*,char*,char*,int*,*,int*,*,int* - - integer optional, intent(in), check(trans>=0 && trans <=2) :: trans = 0 - integer optional, intent(in), check(lower==0||lower==1) :: lower = 0 - integer optional, intent(in), check(unitdiag==0||unitdiag==1) :: unitdiag = 0 - integer optional, intent(in), check(incx>0||incx<0) :: incx = 1 - - dimension(*), intent(in,out,copy) :: x - dimension(n,n), intent(in),check(shape(a,0)==shape(a,1)) :: a - integer depend(a), intent(hide):: n = shape(a,0) - - integer optional, intent(in), depend(x) :: offx=0 - check(offx>=0 && offxoffx+(n-1)*abs(incx)) :: n - depend(x,offx,incx) :: n - -end subroutine trmv - - -! -! -subroutine ger<,,u,u,c,c>(m,n,alpha,x,incx,y,incy,a,lda) - ! Performs a rank-1 update of a general matrix. - ! - ! Calculate a <- alpha*x*y^T + a - ! Calculate a <- alpha*x*y^H + a - ! - - integer intent(hide),depend(x) :: m = len(x) - integer intent(hide),depend(y) :: n = len(y) - - intent(in) :: alpha - dimension(m), intent(in,overwrite) :: x - integer optional, intent(in),check(incx==1||incx==-1) :: incx = 1 - dimension(n), intent(in,overwrite) :: y - integer optional, intent(in),check(incy==1||incy==-1) :: incy = 1 - dimension(m,n), intent(in,out,copy),optional :: & - a = <0.0,\0,(0.0\,0.0),\2,\2,\2> - integer intent(hide), depend(m) :: lda=m - -end subroutine ger<,,u,u,c,c> diff --git a/scipy-0.10.1/scipy/linalg/fblas_l3.pyf.src b/scipy-0.10.1/scipy/linalg/fblas_l3.pyf.src deleted file mode 100644 index cb1234ae12..0000000000 --- a/scipy-0.10.1/scipy/linalg/fblas_l3.pyf.src +++ /dev/null @@ -1,49 +0,0 @@ -! -*- f90 -*- -! Signatures for f2py-wrappers of FORTRAN LEVEL 3 BLAS functions. -! -! Author: Pearu Peterson -! Created: April 2002 -! Modified: Fabian Pedregosa, 2011 -! -! Implemented: -! gemm -! -! Not Implemented: -! symm, hemm, syrk, herk, syr2k, her2k, trmm, trsm -! - - -subroutine gemm(m,n,k,alpha,a,b,beta,c,trans_a,trans_b,lda,ka,ldb,kb) - ! Computes a scalar-matrix-matrix product and adds the result to a - ! scalar-matrix product. - ! - ! c = gemm(alpha,a,b,beta=0,c=0,trans_a=0,trans_b=0,overwrite_c=0) - ! Calculate C <- alpha * op(A) * op(B) + beta * C - - callstatement (*f2py_func)((trans_a?(trans_a==2?"C":"T"):"N"), & - (trans_b?(trans_b==2?"C":"T"):"N"),&m,&n,&k,&alpha,a,&lda,b,&ldb,&beta,c,&m) - callprotoargument char*,char*,int*,int*,int*,*,*,int*,*, & - int*,*,*,int* - - integer optional,intent(in),check(trans_a>=0 && trans_a <=2) :: trans_a = 0 - integer optional,intent(in),check(trans_b>=0 && trans_b <=2) :: trans_b = 0 - intent(in) :: alpha - intent(in),optional :: beta = <0.0,\0,(0.0\,0.0),\2> - - dimension(lda,ka),intent(in) :: a - dimension(ldb,kb),intent(in) :: b - dimension(m,n),intent(in,out,copy),depend(m,n),optional :: c - check(shape(c,0)==m && shape(c,1)==n) :: c - - integer depend(a),intent(hide) :: lda = shape(a,0) - integer depend(a),intent(hide) :: ka = shape(a,1) - integer depend(b),intent(hide) :: ldb = shape(b,0) - integer depend(b),intent(hide) :: kb = shape(b,1) - - integer depend(a,trans_a,ka,lda),intent(hide):: m = (trans_a?ka:lda) - integer depend(a,trans_a,ka,lda),intent(hide):: k = (trans_a?lda:ka) - integer depend(b,trans_b,kb,ldb,k),intent(hide),check(trans_b?kb==k:ldb==k) :: & - n = (trans_b?ldb:kb) - - -end subroutine gemm diff --git a/scipy-0.10.1/scipy/linalg/flapack_user_routines.pyf b/scipy-0.10.1/scipy/linalg/flapack_user_routines.pyf deleted file mode 100644 index 6b4b700dbf..0000000000 --- a/scipy-0.10.1/scipy/linalg/flapack_user_routines.pyf +++ /dev/null @@ -1,37 +0,0 @@ -python module cgees__user__routines - interface cgees_user_interface - function cselect(e_w__i__e) ! in :flapack:cgees.f:cgees:unknown_interface - complex :: e_w__i__e - logical :: cselect - end function cselect - end interface cgees_user_interface -end python module cgees__user__routines - -python module dgees__user__routines - interface dgees_user_interface - function dselect(e_wr__i__e,e_wi__i__e) ! in :flapack:dgees.f:dgees:unknown_interface - double precision :: e_wr__i__e - double precision :: e_wi__i__e - logical :: dselect - end function dselect - end interface dgees_user_interface -end python module dgees__user__routines - -python module sgees__user__routines - interface sgees_user_interface - function sselect(e_wr__i__e,e_wi__i__e) ! in :flapack:sgees.f:sgees:unknown_interface - real :: e_wr__i__e - real :: e_wi__i__e - logical :: sselect - end function sselect - end interface sgees_user_interface -end python module sgees__user__routines - -python module zgees__user__routines - interface zgees_user_interface - function zselect(e_w__i__e) ! in :flapack:zgees.f:zgees:unknown_interface - complex*16 :: e_w__i__e - logical :: zselect - end function zselect - end interface zgees_user_interface -end python module zgees__user__routines diff --git a/scipy-0.10.1/scipy/linalg/flinalg.py b/scipy-0.10.1/scipy/linalg/flinalg.py deleted file mode 100644 index 85fb7601f9..0000000000 --- a/scipy-0.10.1/scipy/linalg/flinalg.py +++ /dev/null @@ -1,53 +0,0 @@ -# -# Author: Pearu Peterson, March 2002 -# - -__all__ = ['get_flinalg_funcs'] - -# The following ensures that possibly missing flavor (C or Fortran) is -# replaced with the available one. If none is available, exception -# is raised at the first attempt to use the resources. -try: - import _flinalg -except ImportError: - _flinalg = None -# from numpy.distutils.misc_util import PostponedException -# _flinalg = PostponedException() -# print _flinalg.__doc__ - has_column_major_storage = lambda a:0 - -def has_column_major_storage(arr): - return arr.flags['FORTRAN'] - -_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',.. - -def get_flinalg_funcs(names,arrays=(),debug=0): - """Return optimal available _flinalg function objects with - names. arrays are used to determine optimal prefix.""" - ordering = [] - for i in range(len(arrays)): - t = arrays[i].dtype.char - if t not in _type_conv: - t = 'd' - ordering.append((t,i)) - if ordering: - ordering.sort() - required_prefix = _type_conv[ordering[0][0]] - else: - required_prefix = 'd' - # Some routines may require special treatment. - # Handle them here before the default lookup. - - # Default lookup: - if ordering and has_column_major_storage(arrays[ordering[0][1]]): - suffix1,suffix2 = '_c','_r' - else: - suffix1,suffix2 = '_r','_c' - - funcs = [] - for name in names: - func_name = required_prefix + name - func = getattr(_flinalg,func_name+suffix1, - getattr(_flinalg,func_name+suffix2,None)) - funcs.append(func) - return tuple(funcs) diff --git a/scipy-0.10.1/scipy/linalg/generic_cblas.pyf b/scipy-0.10.1/scipy/linalg/generic_cblas.pyf deleted file mode 100644 index 016c673fe9..0000000000 --- a/scipy-0.10.1/scipy/linalg/generic_cblas.pyf +++ /dev/null @@ -1,17 +0,0 @@ -!%f90 -*- f90 -*- -! Signatures for f2py-wrappers of ATLAS C BLAS functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! $Revision$ $Date$ -! -! Usage: -! f2py -c cblas.pyf -L/usr/local/lib/atlas -lf77blas -lcblas -latlas -lg2c - -python module cblas - interface - - - - end interface -end python module cblas diff --git a/scipy-0.10.1/scipy/linalg/generic_cblas1.pyf b/scipy-0.10.1/scipy/linalg/generic_cblas1.pyf deleted file mode 100644 index a8180b2609..0000000000 --- a/scipy-0.10.1/scipy/linalg/generic_cblas1.pyf +++ /dev/null @@ -1,55 +0,0 @@ -!%f90 -*- f90 -*- -! Signatures for f2py-wrappers of ATLAS LEVEL 1 BLAS functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! $Revision$ $Date$ -! -! Level 1 BLAS - -subroutine axpy(n,a,x,incx,y,incy) - - ! z = axpy(a,x,y,n=len(x)/abs(incx),incx=1,incy=incx,overwrite_y=0) - ! Calculate z = a*x+y, where a is scalar. - - fortranname cblas_axpy - - callstatement (*f2py_func)(n,a,x,incx,y,incy); - callprotoargument const int,const ,const *,const int,*,const int - - intent(c) - intent(c) axpy - - integer optional,intent(in),depend(x,incx) :: n = len(x)/abs(incx) - intent(in):: a - dimension(n),intent(in) :: x - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - dimension(n),depend(x),check(len(x)==len(y)) :: y - intent(in,out,copy,out=z) :: y - integer optional, intent(in),depend(incx) ,check(incy>0||incy<0) :: incy = incx - -end subroutine axpy - -subroutine axpy(n,a,x,incx,y,incy) - - ! z = axpy(a,x,y,n=len(x)/abs(incx),incx=1,incy=incx,overwrite_y=0) - ! Calculate z = a*x+y, where a is scalar. - - fortranname cblas_axpy - - callstatement (*f2py_func)(n,&a,x,incx,y,incy); - callprotoargument const int,const *,const *,const int,*,const int - - intent(c) - intent(c) axpy - - integer optional,intent(in),depend(x,incx) :: n = len(x)/abs(incx) - intent(in):: a - dimension(n),intent(in) :: x - integer optional, intent(in),check(incx>0||incx<0) :: incx = 1 - dimension(n),depend(x),check(len(x)==len(y)) :: y - intent(in,out,copy,out=z) :: y - integer optional, intent(in),depend(incx) ,check(incy>0||incy<0) :: incy = incx - -end subroutine axpy - diff --git a/scipy-0.10.1/scipy/linalg/generic_clapack.pyf b/scipy-0.10.1/scipy/linalg/generic_clapack.pyf deleted file mode 100644 index 5b7824cc8f..0000000000 --- a/scipy-0.10.1/scipy/linalg/generic_clapack.pyf +++ /dev/null @@ -1,223 +0,0 @@ -!%f90 -*- f90 -*- -! Signatures for f2py-wrappers of ATLAS C LAPACK functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! $Revision$ $Date$ -! -! Usage: -! f2py -c clapack.pyf -L/usr/local/lib/atlas -llapack -lf77blas -lcblas -latlas -lg2c - -python module clapack - -interface - - function gesv(n,nrhs,a,piv,b,info,rowmajor) - - ! lu,piv,x,info = gesv(a,b,rowmajor=1,overwrite_a=0,overwrite_b=0) - ! Solve A * X = B. - ! A * P = L * U - ! U is unit upper diagonal triangular, L is lower triangular, - ! piv pivots columns. - - fortranname clapack_gesv - integer intent(c,hide) :: gesv - callstatement gesv_return_value = info = (*f2py_func)(102-rowmajor,n,nrhs,a,n,piv,b,n) - callprotoargument const int,const int,const int,*,const int,int*,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - - integer depend(a),intent(hide):: n = shape(a,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - integer dimension(n),depend(n),intent(out) :: piv - dimension(n,nrhs),check(shape(a,0)==shape(b,0)),depend(n) :: b - integer intent(out)::info - intent(in,out,copy,out=x) b - intent(c,in,out,copy,out=lu) a - - end function gesv - - function posv(n,nrhs,a,b,info,lower,rowmajor) - - ! c,x,info = posv(a,b,lower=0,rowmajor=1,overwrite_a=0,overwrite_b=0) - ! Solve A * X = B. - ! A is symmetric positive defined - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - fortranname clapack_posv - integer intent(c,hide) :: posv - callstatement posv_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,nrhs,a,n,b,n) - callprotoargument const int,const int,const int,const int,*,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(a),intent(hide):: n = shape(a,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(c,in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n):: b - check(shape(a,0)==shape(b,0)) :: b - integer intent(out) :: info - - end function posv - - function potrf(n,a,info,lower,clean,rowmajor) - - ! c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0) - ! Compute Cholesky decomposition of symmetric positive defined matrix: - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - ! clean==1 zeros strictly lower or upper parts of U or L, respectively - - fortranname clapack_potrf - integer intent(c,hide) :: potrf - callstatement potrf_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,a,n); if(clean){int i,j;if(lower){for(i=0;i*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(clean==0||clean==1) :: clean = 1 - - integer depend(a),intent(hide):: n = shape(a,0) - dimension(n,n),intent(c,in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - integer intent(out) :: info - - end function potrf - - function potrf(n,a,info,lower,clean,rowmajor) - - ! c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0) - ! Compute Cholesky decomposition of symmetric positive defined matrix: - ! A = U^H * U, C = U if lower = 0 - ! A = L * L^H, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - ! clean==1 zeros strictly lower or upper parts of U or L, respectively - - fortranname clapack_potrf - integer intent(c,hide) :: potrf - callstatement potrf_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,a,n); if(clean){int i,j,k;if(lower){for(i=0;ir=(a+k)->i=0.0;}} else {for(i=0;ir=(a+k)->i=0.0;}}} - callprotoargument const int,const int,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(clean==0||clean==1) :: clean = 1 - - integer depend(a),intent(hide):: n = shape(a,0) - dimension(n,n),intent(c,in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - integer intent(out) :: info - - end function potrf - - - function potrs(n,nrhs,c,b,info,lower,rowmajor) - - ! x,info = potrs(c,b,lower=0,rowmajor=1,overwrite_b=0) - ! Solve A * X = b. - ! A is symmetric positive defined - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - fortranname clapack_potrs - integer intent(c,hide) :: potrs - callstatement potrs_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,nrhs,c,n,b,n) - callprotoargument const int,const int,const int,const int,*,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(c,in) :: c - check(shape(c,0)==shape(c,1)) :: c - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n):: b - check(shape(c,0)==shape(b,0)) :: b - integer intent(out) :: info - - end function potrs - - function potri(n,c,info,lower,rowmajor) - - ! inv_a,info = potri(c,lower=0,rowmajor=1,overwrite_c=0) - ! Compute A inverse A^-1. - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - fortranname clapack_potri - integer intent(c,hide) :: potri - callstatement potri_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,c,n) - callprotoargument const int,const int,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(c,in,out,copy,out=inv_a) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end function potri - - - function lauum(n,c,info,lower,rowmajor) - - ! a,info = lauum(c,lower=0,rowmajor=1,overwrite_c=0) - ! Compute product - ! U^T * U, C = U if lower = 0 - ! L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - fortranname clapack_lauum - integer intent(c,hide) :: lauum - callstatement lauum_return_value = info = (*f2py_func)(102-rowmajor,121+lower,n,c,n) - callprotoargument const int,const int,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(c,in,out,copy,out=a) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end function lauum - - function trtri(n,c,info,lower,unitdiag,rowmajor) - - ! inv_c,info = trtri(c,lower=0,unitdiag=0,rowmajor=1,overwrite_c=0) - ! Compute C inverse C^-1 where - ! C = U if lower = 0 - ! C = L if lower = 1 - ! C is non-unit triangular matrix if unitdiag = 0 - ! C is unit triangular matrix if unitdiag = 1 - - fortranname clapack_trtri - integer intent(c,hide) :: trtri - callstatement trtri_return_value = info = (*f2py_func)(102-rowmajor,121+lower,131+unitdiag,n,c,n) - callprotoargument const int,const int,const int,const int,*,const int - - integer optional,intent(in),check(rowmajor==1||rowmajor==0) :: rowmajor = 1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(unitdiag==0||unitdiag==1) :: unitdiag = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(c,in,out,copy,out=inv_c) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end function trtri - -end interface - -end python module clapack - -! This file was auto-generated with f2py (version:2.10.173). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/linalg/generic_flapack.pyf b/scipy-0.10.1/scipy/linalg/generic_flapack.pyf deleted file mode 100644 index 5b12c5db51..0000000000 --- a/scipy-0.10.1/scipy/linalg/generic_flapack.pyf +++ /dev/null @@ -1,1962 +0,0 @@ -!%f90 -*- f90 -*- -! Signatures for f2py wrappers of FORTRAN LAPACK functions. -! -! Author: Pearu Peterson -! Created: Jan-Feb 2002 -! $Revision$ $Date$ -! -! Additions by Travis Oliphant -! Additions by Tiziano Zito -! Additions by Collin RM Stocks -! Usage: -! f2py -c generic_flapack.pyf -L/usr/local/lib/atlas -llapack -lf77blas -lcblas -latlas -lg2c - -python module flapack -interface - - subroutine pbtrf(lower,n,kd,ab,ldab,info) - - ! Compute Cholesky decomposition of banded symmetric positive definite - ! matrix: - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,&kd,ab,&ldab,&info); - callprotoargument char*,int*,int*,*,int*,int* - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - dimension(ldab,n),intent(in,out,copy,out=c) :: ab - integer intent(out) :: info - - end subroutine pbtrf - - subroutine pbtrf(lower,n,kd,ab,ldab,info) - - - ! Compute Cholesky decomposition of banded symmetric positive definite - ! matrix: - ! A = U^H * U, C = U if lower = 0 - ! A = L * L^H, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,&kd,ab,&ldab,&info); - callprotoargument char*,int*,int*,*,int*,int* - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - dimension(ldab,n),intent(in,out,copy,out=c) :: ab - integer intent(out) :: info - - end subroutine pbtrf - - - subroutine pbtrs(lower, n, kd, nrhs, ab, ldab, b, ldb, info) - - ! Solve a system of linear equations A*X = B with a symmetric - ! positive definite band matrix A using the Cholesky factorization. - ! AB is the triangular factur U or L from the Cholesky factorization - ! previously computed with *PBTRF. - ! A = U^T * U, AB = U if lower = 0 - ! A = L * L^T, AB = L if lower = 1 - - callstatement (*f2py_func)((lower?"L":"U"),&n,&kd,&nrhs,ab,&ldab,b,&ldb,&info); - callprotoargument char*,int*,int*,int*,*,int*,*,int*,int* - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - integer intent(hide),depend(b) :: ldb=shape(b,0) - integer intent(hide),depend(b) :: nrhs=shape(b,1) - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - dimension(ldb, nrhs),intent(in,out,copy,out=x) :: b - dimension(ldab,n),intent(in) :: ab - integer intent(out) :: info - - end subroutine pbtrs - - subroutine pbtrs(lower, n, kd, nrhs, ab, ldab, b, ldb, info) - - ! Solve a system of linear equations A*X = B with a symmetric - ! positive definite band matrix A using the Cholesky factorization. - ! AB is the triangular factur U or L from the Cholesky factorization - ! previously computed with *PBTRF. - ! A = U^T * U, AB = U if lower = 0 - ! A = L * L^T, AB = L if lower = 1 - - callstatement (*f2py_func)((lower?"L":"U"),&n,&kd,&nrhs,ab,&ldab,b,&ldb,&info); - callprotoargument char*,int*,int*,int*,*,int*,*,int*,int* - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - integer intent(hide),depend(b) :: ldb=shape(b,0) - integer intent(hide),depend(b) :: nrhs=shape(b,1) - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - dimension(ldb, nrhs),intent(in,out,copy,out=x) :: b - dimension(ldab,n),intent(in) :: ab - integer intent(out) :: info - - end subroutine pbtrs - - - subroutine trtrs(lower, trans, unitdiag, n, nrhs, a, lda, b, ldb, info) - - ! Solve a system of linear equations A*X = B with a triangular - ! matrix A. - - callstatement (*f2py_func)((lower?"L":"U"),(trans?(trans==2?"C":"T"):"N"),(unitdiag?"U":"N"),&n,&nrhs,a,&lda,b,&ldb,&info); - callprotoargument char*,char*,char*,int*,int*,*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(trans>=0 && trans <=2) :: trans = 0 - integer optional,intent(in),check(unitdiag==0||unitdiag==1) :: unitdiag = 0 - integer optional,check(shape(a,0)==lda),depend(a) :: lda=shape(a,0) - integer intent(hide),depend(a) :: n=shape(a,1) - integer intent(hide),depend(b) :: ldb=shape(b,0) - integer intent(hide),depend(b) :: nrhs=shape(b,1) - - dimension(ldb, nrhs),intent(in,out,copy,out=x) :: b - dimension(lda,n),intent(in) :: a - integer intent(out) :: info - end subroutine trtrs - - - subroutine pbsv(lower,n,kd,nrhs,ab,ldab,b,ldb,info) - - ! - ! Computes the solution to a real system of linear equations - ! A * X = B, - ! where A is an N-by-N symmetric positive definite band matrix and X - ! and B are N-by-NRHS matrices. - ! - ! The Cholesky decomposition is used to factor A as - ! A = U**T * U, if lower=1, or - ! A = L * L**T, if lower=0 - ! where U is an upper triangular band matrix, and L is a lower - ! triangular band matrix, with the same number of superdiagonals or - ! subdiagonals as A. The factored form of A is then used to solve the - ! system of equations A * X = B. - - callstatement (*f2py_func)((lower?"L":"U"),&n,&kd,&nrhs,ab,&ldab,b,&ldb,&info); - callprotoargument char*,int*,int*,int*,*,int*,*,int*,int* - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - integer intent(hide),depend(b) :: ldb=shape(b,0) - integer intent(hide),depend(b) :: nrhs=shape(b,1) - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - dimension(ldb, nrhs),intent(in,out,copy,out=x) :: b - dimension(ldab,n),intent(in,out,copy,out=c) :: ab - integer intent(out) :: info - - end subroutine pbsv - - subroutine pbsv(lower,n,kd,nrhs,ab,ldab,b,ldb,info) - - ! - ! Computes the solution to a real system of linear equations - ! A * X = B, - ! where A is an N-by-N Hermitian positive definite band matrix and X - ! and B are N-by-NRHS matrices. - ! - ! The Cholesky decomposition is used to factor A as - ! A = U**H * U, if lower=1, or - ! A = L * L**H, if lower=0 - ! where U is an upper triangular band matrix, and L is a lower - ! triangular band matrix, with the same number of superdiagonals or - ! subdiagonals as A. The factored form of A is then used to solve the - ! system of equations A * X = B. - - callstatement (*f2py_func)((lower?"L":"U"),&n,&kd,&nrhs,ab,&ldab,b,&ldb,&info); - callprotoargument char*,int*,int*,int*,*,int*,*,int*,int* - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - integer intent(hide),depend(b) :: ldb=shape(b,0) - integer intent(hide),depend(b) :: nrhs=shape(b,1) - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - dimension(ldb, nrhs),intent(in,out,copy,out=x) :: b - dimension(ldab,n),intent(in,out,copy,out=c) :: ab - integer intent(out) :: info - - end subroutine pbsv - - subroutine gebal(scale,permute,n,a,m,lo,hi,pivscale,info) - ! - ! ba,lo,hi,pivscale,info = gebal(a,scale=0,permute=0,overwrite_a=0) - ! Balance general matrix a. - ! hi,lo are such that ba[i][j]==0 if i>j and j=0...lo-1 or i=hi+1..n-1 - ! pivscale([0:lo], [lo:hi+1], [hi:n+1]) = (p1,d,p2) where (p1,p2)[j] is - ! the index of the row and column interchanged with row and column j. - ! d[j] is the scaling factor applied to row and column j. - ! The order in which the interchanges are made is n-1 to hi+1, then 0 to lo-1. - ! - ! P * A * P = [[T1,X,Y],[0,B,Z],[0,0,T2]] - ! BA = [[T1,X*D,Y],[0,inv(D)*B*D,ind(D)*Z],[0,0,T2]] - ! where D = diag(d), T1,T2 are upper triangular matrices. - ! lo,hi mark the starting and ending columns of submatrix B. - - callstatement { (*f2py_func)((permute?(scale?"B":"P"):(scale?"S":"N")),&n,a,&m,&lo,&hi,pivscale,&info); hi--; lo--; } - callprotoargument char*,int*,*,int*,int*,int*,*,int* - integer intent(in),optional :: permute = 0 - integer intent(in),optional :: scale = 0 - integer intent(hide),depend(a,n) :: m = shape(a,0) - integer intent(hide),depend(a) :: n = shape(a,1) - check(m>=n) m - integer intent(out) :: hi,lo - dimension(n),intent(out),depend(n) :: pivscale - dimension(m,n),intent(in,out,copy,out=ba) :: a - integer intent(out) :: info - - end subroutine gebal - - subroutine gehrd(n,lo,hi,a,tau,work,lwork,info) - ! - ! hq,tau,info = gehrd(a,lo=0,hi=n-1,lwork=n,overwrite_a=0) - ! Reduce general matrix A to upper Hessenberg form H by unitary similarity - ! transform Q^H * A * Q = H - ! - ! Q = H(lo) * H(lo+1) * ... * H(hi-1) - ! H(i) = I - tau * v * v^H - ! v[0:i+1] = 0, v[i+1]=1, v[hi+1:n] = 0 - ! v[i+2:hi+1] is stored in hq[i+2:hi+i,i] - ! tau is tau[i] - ! - ! hq for n=7,lo=1,hi=5: - ! [a a h h h h a - ! a h h h h a - ! h h h h h h - ! v2h h h h h - ! v2v3h h h h - ! v2v3v4h h h - ! a] - ! - callstatement { hi++; lo++; (*f2py_func)(&n,&lo,&hi,a,&n,tau,work,&lwork,&info); } - callprotoargument int*,int*,int*,*,int*,*,*,int*,int* - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,out,copy,out=ht,aligned8),check(shape(a,0)==shape(a,1)) :: a - integer intent(in),optional :: lo = 0 - integer intent(in),optional,depend(n) :: hi = n-1 - dimension(n-1),intent(out),depend(n) :: tau - dimension(lwork),intent(cache,hide),depend(lwork) :: work - integer intent(in),optional,depend(n),check(lwork>=MAX(n,1)) :: lwork = MAX(n,1) - integer intent(out) :: info - - end subroutine gehrd - - subroutine gbsv(n,kl,ku,nrhs,ab,piv,b,info) - ! - ! lub,piv,x,info = gbsv(kl,ku,ab,b,overwrite_ab=0,overwrite_b=0) - ! Solve A * X = B - ! A = P * L * U - ! A is a band matrix of order n with kl subdiagonals and ku superdiagonals - ! starting at kl-th row. - ! X, B are n-by-nrhs matrices - ! - callstatement {int i=2*kl+ku+1;(*f2py_func)(&n,&kl,&ku,&nrhs,ab,&i,piv,b,&n,&info);for(i=0;i*,int*,int*,*,int*,int* - integer depend(ab),intent(hide):: n = shape(ab,1) - integer intent(in) :: kl - integer intent(in) :: ku - integer depend(b),intent(hide) :: nrhs = shape(b,1) - dimension(2*kl+ku+1,n),depend(kl,ku), check(2*kl+ku+1==shape(ab,0)) :: ab - integer dimension(n),depend(n),intent(out) :: piv - dimension(n,nrhs),depend(n),check(shape(ab,1)==shape(b,0)) :: b - integer intent(out) :: info - intent(in,out,copy,out=x) b - intent(in,out,copy,out=lub) ab - end subroutine gbsv - - subroutine gesv(n,nrhs,a,piv,b,info) - - ! lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0) - ! Solve A * X = B. - ! A = P * L * U - ! U is upper diagonal triangular, L is unit lower triangular, - ! piv pivots columns. - - callstatement {int i;(*f2py_func)(&n,&nrhs,a,&n,piv,b,&n,&info);for(i=0;i*,int*,int*,*,int*,int* - - integer depend(a),intent(hide):: n = shape(a,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - integer dimension(n),depend(n),intent(out) :: piv - dimension(n,nrhs),check(shape(a,0)==shape(b,0)),depend(n) :: b - integer intent(out)::info - intent(in,out,copy,out=x) b - intent(in,out,copy,out=lu) a - - end subroutine gesv - - subroutine getrf(m,n,a,piv,info) - - ! lu,piv,info = getrf(a,overwrite_a=0) - ! Compute an LU factorization of a general M-by-N matrix A. - ! A = P * L * U - threadsafe - callstatement {int i;(*f2py_func)(&m,&n,a,&m,piv,&info);for(i=0,n=MIN(m,n);i*,int*,int*,int* - - integer depend(a),intent(hide):: m = shape(a,0) - integer depend(a),intent(hide):: n = shape(a,1) - dimension(m,n),intent(in,out,copy,out=lu) :: a - integer dimension(MIN(m,n)),depend(m,n),intent(out) :: piv - integer intent(out):: info - - end subroutine getrf - - subroutine getrs(n,nrhs,lu,piv,b,info,trans) - - ! x,info = getrs(lu,piv,b,trans=0,overwrite_b=0) - ! Solve A * X = B if trans=0 - ! Solve A^T * X = B if trans=1 - ! Solve A^H * X = B if trans=2 - ! A = P * L * U - threadsafe - callstatement {int i;for(i=0;i*,int*,int*,*,int*,int* - - integer optional,intent(in),check(trans>=0 && trans <=2) :: trans = 0 - - integer depend(lu),intent(hide):: n = shape(lu,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(in) :: lu - check(shape(lu,0)==shape(lu,1)) :: lu - integer dimension(n),intent(in),depend(n) :: piv - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n),check(shape(lu,0)==shape(b,0)) :: b - integer intent(out):: info - end subroutine getrs - - subroutine getri(n,lu,piv,work,lwork,info) - - ! inv_a,info = getri(lu,piv,lwork=3*n,overwrite_lu=0) - ! Find A inverse A^-1. - ! A = P * L * U - - callstatement {int i;for(i=0;i*,int*,int*,*,int*,int* - - integer depend(lu),intent(hide):: n = shape(lu,0) - dimension(n,n),intent(in,out,copy,out=inv_a) :: lu - check(shape(lu,0)==shape(lu,1)) :: lu - integer dimension(n),intent(in),depend(n) :: piv - integer intent(out):: info - integer optional,intent(in),depend(n),check(lwork>=n) :: lwork=3*n - dimension(lwork),intent(hide,cache),depend(lwork) :: work - - end subroutine getri - - subroutine gesdd(m,n,minmn,u0,u1,vt0,vt1,a,compute_uv,full_matrices,u,s,vt,work,lwork,iwork,info) - - ! u,s,vt,info = gesdd(a,compute_uv=1,lwork=..,overwrite_a=0) - ! Compute the singular value decomposition (SVD): - ! A = U * SIGMA * transpose(V) - ! A - M x N matrix - ! U - M x M matrix or min(M,N) x N if full_matrices=False - ! SIGMA - M x N zero matrix with a main diagonal filled with min(M,N) - ! singular values - ! transpose(V) - N x N matrix or N x min(M,N) if full_matrices=False - - callstatement (*f2py_func)((compute_uv?(full_matrices?"A":"S"):"N"),&m,&n,a,&m,s,u,&u0,vt,&vt0,work,&lwork,iwork,&info) - callprotoargument char*,int*,int*,*,int*,*,*,int*,*,int*,*,int*,int*,int* - - integer intent(in),optional,check(compute_uv==0||compute_uv==1):: compute_uv = 1 - integer intent(in),optional,check(full_matrices==0||full_matrices==1):: full_matrices = 1 - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(m,n):: minmn = MIN(m,n) - integer intent(hide),depend(compute_uv,minmn) :: u0 = (compute_uv?m:1) - integer intent(hide),depend(compute_uv,minmn, full_matrices) :: u1 = (compute_uv?(full_matrices?m:minmn):1) - integer intent(hide),depend(compute_uv,minmn, full_matrices) :: vt0 = (compute_uv?(full_matrices?n:minmn):1) - integer intent(hide),depend(compute_uv,minmn) :: vt1 = (compute_uv?n:1) - dimension(m,n),intent(in,copy,aligned8) :: a - dimension(minmn),intent(out),depend(minmn) :: s - dimension(u0,u1),intent(out),depend(u0, u1) :: u - dimension(vt0,vt1),intent(out),depend(vt0, vt1) :: vt - dimension(lwork),intent(hide,cache),depend(lwork) :: work - integer optional,intent(in),depend(minmn,compute_uv) & - :: lwork = (compute_uv?4*minmn*minmn+MAX(m,n)+9*minmn:MAX(14*minmn+4,10*minmn+2+25*(25+8))+MAX(m,n)) - ! gesdd docs are mess: optimal turns out to be less than minimal in docs - ! check(lwork>=(compute_uv?3*minmn*minmn+MAX(MAX(m,n),4*minmn*(minmn+1)):MAX(14*minmn+4,10*minmn+2+25*(25+8))+MAX(m,n))) :: lwork - integer intent(hide,cache),dimension(8*minmn),depend(minmn) :: iwork - integer intent(out)::info - - end subroutine gesdd - - subroutine gesdd(m,n,minmn,u0,u1,vt0,vt1,a,compute_uv,full_matrices,u,s,vt,work,rwork,lwork,iwork,info) - - ! u,s,vt,info = gesdd(a,compute_uv=1,lwork=..,overwrite_a=0) - ! Compute the singular value decomposition (SVD): - ! A = U * SIGMA * conjugate-transpose(V) - ! A - M x N matrix - ! U - M x M matrix or min(M,N) x N if full_matrices=False - ! SIGMA - M x N zero matrix with a main diagonal filled with min(M,N) - ! singular values - ! transpose(V) - N x N matrix or N x min(M,N) if full_matrices=False - - callstatement (*f2py_func)((compute_uv?(full_matrices?"A":"S"):"N"),&m,&n,a,&m,s,u,&u0,vt,&vt0,work,&lwork,rwork,iwork,&info) - callprotoargument char*,int*,int*,*,int*,*,*,int*,*,int*,*,int*,*,int*,int* - - integer intent(in),optional,check(compute_uv==0||compute_uv==1):: compute_uv = 1 - integer intent(in),optional,check(full_matrices==0||full_matrices==1):: full_matrices = 1 - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(m,n):: minmn = MIN(m,n) - integer intent(hide),depend(compute_uv,minmn) :: u0 = (compute_uv?m:1) - integer intent(hide),depend(compute_uv,minmn, full_matrices) :: u1 = (compute_uv?(full_matrices?m:minmn):1) - integer intent(hide),depend(compute_uv,minmn, full_matrices) :: vt0 = (compute_uv?(full_matrices?n:minmn):1) - integer intent(hide),depend(compute_uv,minmn) :: vt1 = (compute_uv?n:1) - dimension(m,n),intent(in,copy) :: a - dimension(minmn),intent(out),depend(minmn) :: s - dimension(u0,u1),intent(out),depend(u0,u1) :: u - dimension(vt0,vt1),intent(out),depend(vt0,vt1) :: vt - dimension(lwork),intent(hide,cache),depend(lwork) :: work - dimension((compute_uv?5*minmn*minmn+7*minmn:5*minmn)),intent(hide,cache),depend(minmn,compute_uv) :: rwork - integer optional,intent(in),depend(minmn,compute_uv) & - :: lwork = (compute_uv?2*minmn*minmn+MAX(m,n)+2*minmn:2*minmn+MAX(m,n)) - check(lwork>=(compute_uv?2*minmn*minmn+MAX(m,n)+2*minmn:2*minmn+MAX(m,n))) :: lwork - integer intent(hide,cache),dimension(8*minmn),depend(minmn) :: iwork - integer intent(out)::info - - end subroutine gesdd - - subroutine gelss(m,n,minmn,maxmn,nrhs,a,b,s,cond,r,work,lwork,info) - - ! v,x,s,rank,work,info = gelss(a,b,cond=-1.0,overwrite_a=0,overwrite_b=0) - ! Solve Minimize 2-norm(A * X - B). - - callstatement (*f2py_func)(&m,&n,&nrhs,a,&m,b,&maxmn,s,&cond,&r,work,&lwork,&info) - callprotoargument int*,int*,int*,*,int*,*,int*,*,*,int*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(m,n):: minmn = MIN(m,n) - integer intent(hide),depend(m,n):: maxmn = MAX(m,n) - dimension(m,n),intent(in,out,copy,out=v) :: a - - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(maxmn,nrhs),check(maxmn==shape(b,0)),depend(maxmn) :: b - intent(in,out,copy,out=x) b - - intent(in),optional :: cond = -1.0 - integer intent(out,out=rank) :: r - intent(out),dimension(minmn),depend(minmn) :: s - - integer optional,intent(in),depend(nrhs,minmn,maxmn),& - check(lwork>=1||lwork==-1) & - :: lwork=3*minmn+MAX(2*minmn,MAX(maxmn,nrhs)) - !check(lwork>=3*minmn+MAX(2*minmn,MAX(maxmn,nrhs))) - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - integer intent(out)::info - - end subroutine gelss - - subroutine gelss(m,n,minmn,maxmn,nrhs,a,b,s,cond,r,work,rwork,lwork,info) - - ! v,x,s,rank,work,info = gelss(a,b,cond=-1.0,overwrite_a=0,overwrite_b=0) - ! Solve Minimize 2-norm(A * X - B). - - callstatement (*f2py_func)(&m,&n,&nrhs,a,&m,b,&maxmn,s,&cond,&r,work,&lwork,rwork,&info) - callprotoargument int*,int*,int*,*,int*,*,int*,*,*,int*,*,int*,*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(m,n):: minmn = MIN(m,n) - integer intent(hide),depend(m,n):: maxmn = MAX(m,n) - dimension(m,n),intent(in,out,copy,out=v) :: a - - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(maxmn,nrhs),check(maxmn==shape(b,0)),depend(maxmn) :: b - intent(in,out,copy,out=x) b - - intent(in),optional :: cond = -1.0 - integer intent(out,out=rank) :: r - intent(out),dimension(minmn),depend(minmn) :: s - - integer optional,intent(in),depend(nrhs,minmn,maxmn),& - check(lwork>=1||lwork==-1) & - :: lwork=2*minmn+MAX(maxmn,nrhs) - ! check(lwork>=2*minmn+MAX(maxmn,nrhs)) - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - dimension(5*minmn-1),intent(hide),depend(lwork) :: rwork - integer intent(out)::info - - end subroutine gelss - - subroutine geqp3(m,n,a,jpvt,tau,work,lwork,info) - - ! qr_a,jpvt,tau,work,info = geqp3(a,lwork=3*(n+1),overwrite_a=0) - ! Compute a QR factorization of a real M-by-N matrix A with column pivoting: - ! A * P = Q * R. - - callstatement (*f2py_func)(&m,&n,a,&m,jpvt,tau,work,&lwork,&info) - callprotoargument int*,int*,*,int*,int*,*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - dimension(m,n),intent(in,out,copy,out=qr,aligned8) :: a - integer dimension(n),intent(out) :: jpvt - dimension(MIN(m,n)),intent(out) :: tau - - integer optional,intent(in),depend(n),check(lwork>=n||lwork==-1) :: lwork=3*(n+1) - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - integer intent(out) :: info - end subroutine geqp3 - - subroutine geqp3(m,n,a,jpvt,tau,work,lwork,rwork,info) - - ! qr_a,jpvt,tau,work,info = geqp3(a,lwork,overwrite_a=0) - ! Compute a QR factorization of a complex M-by-N matrix A with column pivoting: - ! A * P = Q * R. - - callstatement (*f2py_func)(&m,&n,a,&m,jpvt,tau,work,&lwork,rwork,&info) - callprotoargument int*,int*,*,int*,int*,*,*,int*,*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - dimension(m,n),intent(in,out,copy,out=qr,aligned8) :: a - integer dimension(n),intent(out) :: jpvt - dimension(MIN(m,n)),intent(out) :: tau - - integer optional,intent(in),depend(n),check(lwork>=n||lwork==-1) :: lwork=3*(n+1) - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - dimension(2*n),intent(hide),depend(n) :: rwork - integer intent(out) :: info - end subroutine geqp3 - - subroutine geqrf(m,n,a,tau,work,lwork,info) - - ! qr_a,tau,work,info = geqrf(a,lwork=3*n,overwrite_a=0) - ! Compute a QR factorization of a real M-by-N matrix A: - ! A = Q * R. - - callstatement (*f2py_func)(&m,&n,a,&m,tau,work,&lwork,&info) - callprotoargument int*,int*,*,int*,*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - dimension(m,n),intent(in,out,copy,out=qr,aligned8) :: a - dimension(MIN(m,n)),intent(out) :: tau - - integer optional,intent(in),depend(n),check(lwork>=n||lwork==-1) :: lwork=3*n - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - integer intent(out) :: info - end subroutine geqrf - - subroutine gerqf(m,n,a,tau,work,lwork,info) - - ! rq_a,tau,work,info = gerqf(a,lwork=3*n,overwrite_a=0) - ! Compute an RQ factorization of a real M-by-N matrix A: - ! A = R * Q. - - callstatement (*f2py_func)(&m,&n,a,&m,tau,work,&lwork,&info) - callprotoargument int*,int*,*,int*,*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - dimension(m,n),intent(in,out,copy,out=qr,aligned8) :: a - dimension(MIN(m,n)),intent(out) :: tau - - integer optional,intent(in),depend(n),check(lwork>=n||lwork==-1) :: lwork=3*n - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - integer intent(out) :: info - end subroutine gerqf - - subroutine orgqr(m,n,k,a,tau,work,lwork,info) - - ! q,work,info = orgqr(a,lwork=3*n,overwrite_a=0) - ! Generates an M-by-N real matrix Q with orthonormal columns, - ! which is defined as the first N columns of a product of K elementary - ! reflectors of order M (e.g. output of geqrf) - - callstatement (*f2py_func)(&m,&n,&k,a,&m,tau,work,&lwork,&info) - callprotoargument int*,int*,int*,*,int*,*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(tau):: k = shape(tau,0) - dimension(m,n),intent(in,out,copy,out=q) :: a - dimension(k),intent(in) :: tau - - integer optional,intent(in),depend(n),check(lwork>=n||lwork==-1) :: lwork=3*n - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - integer intent(out) :: info - end subroutine orgqr - - subroutine ungqr(m,n,k,a,tau,work,lwork,info) - - ! q,work,info = ungqr(a,lwork=3*n,overwrite_a=0) - ! Generates an M-by-N complex matrix Q with unitary columns, - ! which is defined as the first N columns of a product of K elementary - ! reflectors of order M (e.g. output of geqrf) - - callstatement (*f2py_func)(&m,&n,&k,a,&m,tau,work,&lwork,&info) - callprotoargument int*,int*,int*,*,int*,*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(tau):: k = shape(tau,0) - dimension(m,n),intent(in,out,copy,out=q) :: a - dimension(k),intent(in) :: tau - - integer optional,intent(in),depend(n),check(lwork>=n||lwork==-1) :: lwork=3*n - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - integer intent(out) :: info - end subroutine ungqr - - subroutine orgrq(m,n,k,a,tau,work,lwork,info) - - ! q,work,info = orgrq(a,lwork=3*n,overwrite_a=0) - ! Generates an M-by-N real matrix Q with orthonormal columns, - ! which is defined as the first N columns of a product of K elementary - ! reflectors of order M (e.g. output of gerqf) - - callstatement (*f2py_func)(&m,&n,&k,a,&m,tau,work,&lwork,&info) - callprotoargument int*,int*,int*,*,int*,*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(tau):: k = shape(tau,0) - dimension(m,n),intent(in,out,copy,out=q) :: a - dimension(k),intent(in) :: tau - - integer optional,intent(in),depend(n),check(lwork>=n||lwork==-1) :: lwork=3*n - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - integer intent(out) :: info - end subroutine orgrq - - subroutine ungrq(m,n,k,a,tau,work,lwork,info) - - ! q,work,info = ungrq(a,lwork=3*n,overwrite_a=0) - ! Generates an M-by-N complex matrix Q with unitary columns, - ! which is defined as the first N columns of a product of K elementary - ! reflectors of order M (e.g. output of gerqf) - - callstatement (*f2py_func)(&m,&n,&k,a,&m,tau,work,&lwork,&info) - callprotoargument int*,int*,int*,*,int*,*,*,int*,int* - - integer intent(hide),depend(a):: m = shape(a,0) - integer intent(hide),depend(a):: n = shape(a,1) - integer intent(hide),depend(tau):: k = shape(tau,0) - dimension(m,n),intent(in,out,copy,out=q) :: a - dimension(k),intent(in) :: tau - - integer optional,intent(in),depend(n),check(lwork>=n||lwork==-1) :: lwork=3*n - dimension(MAX(lwork,1)),intent(out),depend(lwork) :: work - integer intent(out) :: info - end subroutine ungrq - - - subroutine geev(compute_vl,compute_vr,n,a,wr,wi,vl,ldvl,vr,ldvr,work,lwork,info) - - ! wr,wi,vl,vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=4*n,overwrite_a=0) - - callstatement {(*f2py_func)((compute_vl?"V":"N"),(compute_vr?"V":"N"),&n,a,&n,wr,wi,vl,&ldvl,vr,&ldvr,work,&lwork,&info);} - callprotoargument char*,char*,int*,*,int*,*,*,*,int*,*,int*,*,int*,int* - - integer optional,intent(in):: compute_vl = 1 - check(compute_vl==1||compute_vl==0) compute_vl - integer optional,intent(in):: compute_vr = 1 - check(compute_vr==1||compute_vr==0) compute_vr - - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,copy,aligned8) :: a - check(shape(a,0)==shape(a,1)) :: a - - dimension(n),intent(out),depend(n) :: wr - dimension(n),intent(out),depend(n) :: wi - - dimension(ldvl,n),intent(out) :: vl - integer intent(hide),depend(n,compute_vl) :: ldvl=(compute_vl?n:1) - - dimension(ldvr,n),intent(out) :: vr - integer intent(hide),depend(n,compute_vr) :: ldvr=(compute_vr?n:1) - - integer optional,intent(in),depend(n,compute_vl,compute_vr) :: lwork=4*n - check(lwork>=((compute_vl||compute_vr)?4*n:3*n)) :: lwork - dimension(lwork),intent(hide,cache),depend(lwork) :: work - - integer intent(out):: info - end subroutine geev - - subroutine geev(compute_vl,compute_vr,n,a,w,vl,ldvl,vr,ldvr,work,lwork,rwork,info) - - ! w,vl,vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=2*n,overwrite_a=0) - - callstatement (*f2py_func)((compute_vl?"V":"N"),(compute_vr?"V":"N"),&n,a,&n,w,vl,&ldvl,vr,&ldvr,work,&lwork,rwork,&info) - callprotoargument char*,char*,int*,*,int*,*,*,int*,*,int*,*,int*,*,int* - - integer optional,intent(in):: compute_vl = 1 - check(compute_vl==1||compute_vl==0) compute_vl - integer optional,intent(in):: compute_vr = 1 - check(compute_vr==1||compute_vr==0) compute_vr - - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,copy) :: a - check(shape(a,0)==shape(a,1)) :: a - - dimension(n),intent(out),depend(n) :: w - - dimension(ldvl,n),depend(ldvl),intent(out) :: vl - integer intent(hide),depend(compute_vl,n) :: ldvl=(compute_vl?n:1) - - dimension(ldvr,n),depend(ldvr),intent(out) :: vr - integer intent(hide),depend(compute_vr,n) :: ldvr=(compute_vr?n:1) - - integer optional,intent(in),depend(n) :: lwork=2*n - check(lwork>=2*n) :: lwork - dimension(lwork),intent(hide),depend(lwork) :: work - dimension(2*n),intent(hide,cache),depend(n) :: rwork - - integer intent(out):: info - end subroutine geev - - subroutine gegv(compute_vl,compute_vr,n,a,b,alphar,alphai,beta,vl,ldvl,vr,ldvr,work,lwork,info) - ! Compute the generalized eigenvalues (alphar +/- alphai*i, beta) - ! of the real nonsymmetric matrices A and B: det(A-w*B)=0 where w=alpha/beta. - ! Optionally, compute the left and/or right generalized eigenvectors: - ! (A - w B) r = 0, l^H * (A - w B) = 0 - ! - ! alphar,alphai,beta,vl,vr,info = gegv(a,b,compute_vl=1,compute_vr=1,lwork=8*n,overwrite_a=0,overwrite_b=0) - - callstatement (*f2py_func)((compute_vl?"V":"N"),(compute_vr?"V":"N"),&n,a,&n,b,&n,alphar,alphai,beta,vl,&ldvl,vr,&ldvr,work,&lwork,&info) - callprotoargument char*,char*,int*,*,int*,*,int*,*,*,*,*,int*,*,int*,*,int*,int* - - integer optional,intent(in):: compute_vl = 1 - check(compute_vl==1||compute_vl==0) compute_vl - integer optional,intent(in):: compute_vr = 1 - check(compute_vr==1||compute_vr==0) compute_vr - - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,copy) :: a - check(shape(a,0)==shape(a,1)) :: a - - dimension(n,n),depend(n),intent(in,copy) :: b - check(shape(b,0)==shape(b,1) && shape(b,0)==n) :: b - - dimension(n),depend(n),intent(out) :: alphar - dimension(n),depend(n),intent(out) :: alphai - dimension(n),depend(n),intent(out) :: beta - - dimension(ldvl,n),intent(out),depend(ldvl) :: vl - integer intent(hide),depend(compute_vl,n) :: ldvl=(compute_vl?n:1) - - dimension(ldvr,n),intent(out),depend(ldvr) :: vr - integer intent(hide),depend(compute_vr,n) :: ldvr=(compute_vr?n:1) - - integer optional,intent(in),depend(n) :: lwork=8*n - check(lwork>=8*n) :: lwork - dimension(lwork),intent(hide),depend(lwork) :: work - - integer intent(out):: info - - end subroutine gegv - - subroutine gegv(compute_vl,compute_vr,n,a,b,alpha,beta,vl,ldvl,vr,ldvr,work,lwork,rwork,info) - ! Compute the generalized eigenvalues (alpha, beta) - ! of the comples nonsymmetric matrices A and B: det(A-w*B)=0 where w=alpha/beta. - ! Optionally, compute the left and/or right generalized eigenvectors: - ! (A - w B) r = 0, l^H * (A - w B) = 0 - ! - ! alpha,beta,vl,vr,info = gegv(a,b,compute_vl=1,compute_vr=1,lwork=2*n,overwrite_a=0,overwrite_b=0) - - callstatement (*f2py_func)((compute_vl?"V":"N"),(compute_vr?"V":"N"),&n,a,&n,b,&n,alpha,beta,vl,&ldvl,vr,&ldvr,work,&lwork,rwork,&info) - callprotoargument char*,char*,int*,*,int*,*,int*,*,*,*,int*,*,int*,*,int*,*,int* - - integer optional,intent(in):: compute_vl = 1 - check(compute_vl==1||compute_vl==0) compute_vl - integer optional,intent(in):: compute_vr = 1 - check(compute_vr==1||compute_vr==0) compute_vr - - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,copy) :: a - check(shape(a,0)==shape(a,1)) :: a - - dimension(n,n),depend(n),intent(in,copy) :: b - check(shape(b,0)==shape(b,1) && shape(b,0)==n) :: b - - dimension(n),depend(n),intent(out) :: alpha - dimension(n),depend(n),intent(out) :: beta - - dimension(ldvl,n),intent(out),depend(ldvl) :: vl - integer intent(hide),depend(compute_vl,n) :: ldvl=(compute_vl?n:1) - - dimension(ldvr,n),intent(out),depend(ldvr) :: vr - integer intent(hide),depend(compute_vr,n) :: ldvr=(compute_vr?n:1) - - integer optional,intent(in),depend(n) :: lwork=2*n - check(lwork>=2*n) :: lwork - dimension(lwork),intent(hide),depend(lwork) :: work - dimension(8*n),intent(hide),depend(n) :: rwork - - integer intent(out):: info - - end subroutine gegv - - - subroutine syev(compute_v,lower,n,w,a,work,lwork,info) - - ! w,v,info = syev(a,compute_v=1,lower=0,lwork=3*n-1,overwrite_a=0) - ! Compute all eigenvalues and, optionally, eigenvectors of a - ! real symmetric matrix A. - ! - ! Performance tip: - ! If compute_v=0 then set also overwrite_a=1. - - callstatement (*f2py_func)((compute_v?"V":"N"),(lower?"L":"U"),&n,a,&n,w,work,&lwork,&info) - callprotoargument char*,char*,int*,*,int*,*,*,int*,int* - - integer optional,intent(in):: compute_v = 1 - check(compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer intent(hide),depend(a):: n = shape(a,0) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - intent(in,copy,out,out=v) :: a - - dimension(n),intent(out),depend(n) :: w - - integer optional,intent(in),depend(n) :: lwork=3*n-1 - check(lwork>=3*n-1) :: lwork - dimension(lwork),intent(hide),depend(lwork) :: work - - integer intent(out) :: info - end subroutine syev - - subroutine heev(compute_v,lower,n,w,a,work,lwork,rwork,info) - - ! w,v,info = syev(a,compute_v=1,lower=0,lwork=3*n-1,overwrite_a=0) - ! Compute all eigenvalues and, optionally, eigenvectors of a - ! complex Hermitian matrix A. - ! - ! Warning: - ! If compute_v=0 and overwrite_a=1, the contents of a is destroyed. - - callstatement (*f2py_func)((compute_v?"V":"N"),(lower?"L":"U"),&n,a,&n,w,work,&lwork,rwork,&info) - callprotoargument char*,char*,int*,*,int*,*,*,int*,*,int* - - integer optional,intent(in):: compute_v = 1 - check(compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer intent(hide),depend(a):: n = shape(a,0) - dimension(n,n),check(shape(a,0)==shape(a,1)) :: a - intent(in,copy,out,out=v) :: a - - dimension(n),intent(out),depend(n) :: w - - integer optional,intent(in),depend(n) :: lwork=2*n-1 - check(lwork>=2*n-1) :: lwork - dimension(lwork),intent(hide),depend(lwork) :: work - - dimension(3*n-1),intent(hide),depend(n) :: rwork - - integer intent(out) :: info - end subroutine heev - - subroutine posv(n,nrhs,a,b,info,lower) - - ! c,x,info = posv(a,b,lower=0,overwrite_a=0,overwrite_b=0) - ! Solve A * X = B. - ! A is symmetric positive defined - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,&nrhs,a,&n,b,&n,&info) - callprotoargument char*,int*,int*,*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(a),intent(hide):: n = shape(a,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n):: b - check(shape(a,0)==shape(b,0)) :: b - integer intent(out) :: info - - end subroutine posv - - subroutine potrf(n,a,info,lower,clean) - - ! c,info = potrf(a,lower=0,clean=1,overwrite_a=0) - ! Compute Cholesky decomposition of symmetric positive defined matrix: - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - ! clean==1 zeros strictly lower or upper parts of U or L, respectively - - callstatement (*f2py_func)((lower?"L":"U"),&n,a,&n,&info); if(clean){int i,j;if(lower){for(i=0;i*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(clean==0||clean==1) :: clean = 1 - integer depend(a),intent(hide):: n = shape(a,0) - dimension(n,n),intent(in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - integer intent(out) :: info - - end subroutine potrf - - subroutine potrf(n,a,info,lower,clean) - - ! c,info = potrf(a,lower=0,clean=1,overwrite_a=0) - ! Compute Cholesky decomposition of symmetric positive defined matrix: - ! A = U^H * U, C = U if lower = 0 - ! A = L * L^H, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - ! clean==1 zeros strictly lower or upper parts of U or L, respectively - - callstatement (*f2py_func)((lower?"L":"U"),&n,a,&n,&info); if(clean){int i,j,k;if(lower){for(i=0;ir=(a+k)->i=0.0;}} else {for(i=0;ir=(a+k)->i=0.0;}}} - callprotoargument char*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(clean==0||clean==1) :: clean = 1 - integer depend(a),intent(hide):: n = shape(a,0) - dimension(n,n),intent(in,out,copy,out=c) :: a - check(shape(a,0)==shape(a,1)) :: a - integer intent(out) :: info - - end subroutine potrf - - subroutine potrs(n,nrhs,c,b,info,lower) - - ! x,info = potrs(c,b,lower=0=1,overwrite_b=0) - ! Solve A * X = B. - ! A is symmetric positive defined - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,&nrhs,c,&n,b,&n,&info) - callprotoargument char*,int*,int*,*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - integer depend(b),intent(hide):: nrhs = shape(b,1) - dimension(n,n),intent(in) :: c - check(shape(c,0)==shape(c,1)) :: c - dimension(n,nrhs),intent(in,out,copy,out=x),depend(n):: b - check(shape(c,0)==shape(b,0)) :: b - integer intent(out) :: info - - end subroutine potrs - - subroutine potri(n,c,info,lower) - - ! inv_a,info = potri(c,lower=0,overwrite_c=0) - ! Compute A inverse A^-1. - ! A = U^T * U, C = U if lower = 0 - ! A = L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,c,&n,&info) - callprotoargument char*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(c,in,out,copy,out=inv_a) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end subroutine potri - - - subroutine lauum(n,c,info,lower) - - ! a,info = lauum(c,lower=0,overwrite_c=0) - ! Compute product - ! U^T * U, C = U if lower = 0 - ! L * L^T, C = L if lower = 1 - ! C is triangular matrix of the corresponding Cholesky decomposition. - - callstatement (*f2py_func)((lower?"L":"U"),&n,c,&n,&info) - callprotoargument char*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(in,out,copy,out=a) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end subroutine lauum - - subroutine trtri(n,c,info,lower,unitdiag) - - ! inv_c,info = trtri(c,lower=0,unitdiag=1,overwrite_c=0) - ! Compute C inverse C^-1 where - ! C = U if lower = 0 - ! C = L if lower = 1 - ! C is non-unit triangular matrix if unitdiag = 0 - ! C is unit triangular matrix if unitdiag = 1 - - callstatement (*f2py_func)((lower?"L":"U"),(unitdiag?"U":"N"),&n,c,&n,&info) - callprotoargument char*,char*,int*,*,int*,int* - - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - integer optional,intent(in),check(unitdiag==0||unitdiag==1) :: unitdiag = 0 - - integer depend(c),intent(hide):: n = shape(c,0) - dimension(n,n),intent(in,out,copy,out=inv_c) :: c - check(shape(c,0)==shape(c,1)) :: c - integer intent(out) :: info - - end subroutine trtri - - - subroutine laswp(n,a,nrows,k1,k2,piv,off,inc,m) - - ! a = laswp(a,piv,k1=0,k2=len(piv)-1,off=0,inc=1,overwrite_a=0) - ! Perform row interchanges on the matrix A for each of row k1 through k2 - ! - ! piv pivots rows. - - callstatement {int i;m=len(piv);for(i=0;i*,int*,int*,int*,int*,int* - - integer depend(a),intent(hide):: nrows = shape(a,0) - integer depend(a),intent(hide):: n = shape(a,1) - dimension(nrows,n),intent(in,out,copy) :: a - integer dimension(*),intent(in),depend(nrows) :: piv - check(len(piv)<=nrows) :: piv -!XXX: how to check that all elements in piv are < n? - - integer optional,intent(in) :: k1 = 0 - check(0<=k1) :: k1 - integer optional,intent(in),depend(k1,piv,off) :: k2 = len(piv)-1 - check(k1<=k2 && k20||inc<0) :: inc = 1 - integer optional,intent(in),depend(piv) :: off=0 - check(off>=0 && off(m-1)*abs(inc)) :: m - - end subroutine laswp - - subroutine gees(compute_v,sort_t,select,n,a,nrows,sdim,w,vs,ldvs,work,lwork,rwork,bwork,info) - - ! t,sdim,w,vs,work,info=gees(compute_v=1,sort_t=0,select,a,lwork=3*n) - ! For an NxN matrix compute the eigenvalues, the schur form T, and optionally - ! the matrix of Schur vectors Z. This gives the Schur factorization - ! A = Z * T * Z^H -- a complex matrix is in Schur form if it is upper - ! triangular - - callstatement (*f2py_func)((compute_v?"V":"N"),(sort_t?"S":"N"),cb_select_in_gees__user__routines,&n,a,&nrows,&sdim,w,vs,&ldvs,work,&lwork,rwork,bwork,&info,1,1) - callprotoargument char*,char*,int(*)(*),int*,*,int*,int*,*,*,int*,*,int*,*,int*,int*,int,int - - use gees__user__routines - - integer optional,intent(in),check(compute_v==0||compute_v==1) :: compute_v = 1 - integer optional,intent(in),check(sort_t==0||sort_t==1) :: sort_t = 0 - external select - integer intent(hide),depend(a) :: n = shape(a,1) - intent(in,out,copy,out=t),check(shape(a,0)==shape(a,1)),dimension(n,n) :: a - integer intent(hide),depend(a) :: nrows=shape(a,0) - integer intent(out) :: sdim=0 - intent(out),dimension(n) :: w - intent(out),depend(ldvs,n),dimension(ldvs,n) :: vs - integer intent(hide),depend(compute_v,n) :: ldvs=((compute_v==1)?n:1) - intent(out),depend(lwork),dimension(MAX(lwork,1)) :: work - integer optional,intent(in),check((lwork==-1)||(lwork >= MAX(1,2*n))),depend(n) :: lwork = 3*n - optional,intent(hide),depend(n),dimension(n) :: rwork - logical optional,intent(hide),depend(n),dimension(n) :: bwork - integer intent(out) :: info - end subroutine gees - - subroutine gees(compute_v,sort_t,select,n,a,nrows,sdim,wr,wi,vs,ldvs,work,lwork,bwork,info) - - ! t,sdim,w,vs,work,info=gees(compute_v=1,sort_t=0,select,a,lwork=3*n) - ! For an NxN matrix compute the eigenvalues, the schur form T, and optionally - ! the matrix of Schur vectors Z. This gives the Schur factorization - ! A = Z * T * Z^H -- a real matrix is in Schur form if it is upper quasi- - ! triangular with 1x1 and 2x2 blocks. - - callstatement (*f2py_func)((compute_v?"V":"N"),(sort_t?"S":"N"),cb_select_in_gees__user__routines,&n,a,&nrows,&sdim,wr,wi,vs,&ldvs,work,&lwork,bwork,&info,1,1) - callprotoargument char*,char*,int(*)(*,*),int*,*,int*,int*,*,*,*,int*,*,int*,int*,int*,int,int - - use gees__user__routines - - integer optional,intent(in),check(compute_v==0||compute_v==1) :: compute_v = 1 - integer optional,intent(in),check(sort_t==0||sort_t==1) :: sort_t = 0 - external select - integer intent(hide),depend(a) :: n = shape(a,1) - intent(in,out,copy,out=t,aligned8),check(shape(a,0)==shape(a,1)),dimension(n,n) :: a - integer intent(hide),depend(a) :: nrows=shape(a,0) - integer intent(out) :: sdim=0 - intent(out),dimension(n) :: wr - intent(out),dimension(n) :: wi - intent(out),depend(ldvs,n),dimension(ldvs,n) :: vs - integer intent(hide),depend(compute_v,n) :: ldvs=((compute_v==1)?n:1) - intent(out),depend(lwork),dimension(MAX(lwork,1)) :: work - integer optional,intent(in),check((lwork==-1)||(lwork >= MAX(1,2*n))),depend(n) :: lwork = 3*n - optional,intent(hide),depend(n),dimension(n) :: rwork - logical optional,intent(hide),depend(n),dimension(n) :: bwork - integer intent(out) :: info -end subroutine gees - - -subroutine ggev(compute_vl,compute_vr,n,a,b,alphar,alphai,beta,vl,ldvl,vr,ldvr,work,lwork,info) - - callstatement {(*f2py_func)((compute_vl?"V":"N"),(compute_vr?"V":"N"),&n,a,&n,b,&n,alphar,alphai,beta,vl,&ldvl,vr,&ldvr,work,&lwork,&info);} - callprotoargument char*,char*,int*,*,int*,*,int*,*,*,*,*,int*,*,int*,*,int*,int* - - integer optional,intent(in):: compute_vl = 1 - check(compute_vl==1||compute_vl==0) compute_vl - integer optional,intent(in):: compute_vr = 1 - check(compute_vr==1||compute_vr==0) compute_vr - - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,copy) :: a - check(shape(a,0)==shape(a,1)) :: a - - intent(in,copy), dimension(n,n) :: b - check(shape(b,0)==shape(b,1)) :: b - - intent(out), dimension(n), depend(n) :: alphar - intent(out), dimension(n), depend(n) :: alphai - intent(out), dimension(n), depend(n) :: beta - - depend(ldvl,n), dimension(ldvl,n),intent(out) :: vl - integer intent(hide),depend(n,compute_vl) :: ldvl=(compute_vl?n:1) - - depend(ldvr,n), dimension(ldvr,n),intent(out) :: vr - integer intent(hide),depend(n,compute_vr) :: ldvr=(compute_vr?n:1) - - integer optional,intent(in),depend(n,compute_vl,compute_vr) :: lwork=8*n - check((lwork==-1) || (lwork>=MAX(1,8*n))) :: lwork - intent(out), dimension(MAX(lwork,1)), depend(lwork) :: work - - integer intent(out):: info - -end subroutine ggev - -subroutine ggev(compute_vl,compute_vr,n,a,b,alpha,beta,vl,ldvl,vr,ldvr,work,lwork,rwork,info) - - callstatement {(*f2py_func)((compute_vl?"V":"N"),(compute_vr?"V":"N"),&n,a,&n,b,&n,alpha,beta,vl,&ldvl,vr,&ldvr,work,&lwork,rwork,&info);} - callprotoargument char*,char*,int*,*,int*,*,int*,*,*,*,int*,*,int*,*,int*,*,int* - - integer optional,intent(in):: compute_vl = 1 - check(compute_vl==1||compute_vl==0) compute_vl - integer optional,intent(in):: compute_vr = 1 - check(compute_vr==1||compute_vr==0) compute_vr - - integer intent(hide),depend(a) :: n = shape(a,0) - dimension(n,n),intent(in,copy) :: a - check(shape(a,0)==shape(a,1)) :: a - - intent(in,copy), dimension(n,n) :: b - check(shape(b,0)==shape(b,1)) :: b - - intent(out), dimension(n), depend(n) :: alpha - intent(out), dimension(n), depend(n) :: beta - - depend(ldvl,n), dimension(ldvl,n),intent(out) :: vl - integer intent(hide),depend(n,compute_vl) :: ldvl=(compute_vl?n:1) - - depend(ldvr,n), dimension(ldvr,n),intent(out) :: vr - integer intent(hide),depend(n,compute_vr) :: ldvr=(compute_vr?n:1) - - integer optional,intent(in),depend(n,compute_vl,compute_vr) :: lwork=2*n - check((lwork==-1) || (lwork>=MAX(1,2*n))) :: lwork - intent(out), dimension(MAX(lwork,1)), depend(lwork) :: work - intent(hide), dimension(8*n), depend(n) :: rwork - - integer intent(out):: info - -end subroutine ggev - -! if anything is wrong with the following wrappers (until *gbtrs) -! blame Arnd Baecker and Johannes Loehnert and not Pearu -subroutine sbev(ab,compute_v,lower,n,ldab,kd,w,z,ldz,work,info) ! in :Band:dsbev.f - ! principally sbevd does the same, and are recommended for use. - ! (see man dsbevd) - - callstatement (*f2py_func)((compute_v?"V":"N"),(lower?"L":"U"),&n,&kd,ab,&ldab,w,z,&ldz,work,&info) - - callprotoargument char*,char*,int*,int*,*,int*,*,*,int*,*,int* - - ! Remark: if ab is fortran contigous on input - ! and overwrite_ab=1 ab will be overwritten. - dimension(ldab,*), intent(in,overwrite) :: ab - - integer optional,intent(in):: compute_v = 1 - check(compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - - dimension(n),intent(out),depend(n) :: w - - ! For compute_v=1 z is used and contains the eigenvectors - integer intent(hide),depend(n) :: ldz=(compute_v?n:1) - dimension(ldz,ldz),intent(out),depend(ldz) :: z - - dimension(MAX(1,3*n-1)),intent(hide),depend(n) :: work - integer intent(out)::info -end subroutine sbev - - - -subroutine sbevd(ab,compute_v,lower,n,ldab,kd,w,z,ldz,work,lwork,iwork,liwork,info) ! in :Band:dsbevd.f - - callstatement (*f2py_func)((compute_v?"V":"N"),(lower?"L":"U"),&n,&kd,ab,&ldab,w,z,&ldz,work,&lwork,iwork,&liwork,&info) - - callprotoargument char*,char*,int*,int*,*,int*,*,*,int*,*,int*,int*,int*,int* - - ! Remark: if ab is fortran contigous on input - ! and overwrite_ab=1 ab will be overwritten. - dimension(ldab,*), intent(in, overwrite) :: ab - - integer optional,intent(in):: compute_v = 1 - check( compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - - dimension(n),intent(out),depend(n) :: w - dimension(ldz,ldz),intent(out),depend(ldz) :: z - - ! For compute_v=1 z is used and contains the eigenvectors - integer intent(hide),depend(n) :: ldz=(compute_v?n:1) - dimension(ldz,ldz),depend(ldz) :: z - - integer intent(hide),depend(n) :: lwork=(compute_v?1+5*n+2*n*n:2*n) - dimension(lwork),intent(hide),depend(lwork) :: work - integer intent(out)::info - - integer optional,check(liwork>=(compute_v?3+5*n:1)),depend(n) :: liwork=(compute_v?3+5*n:1) - integer intent(hide),dimension(liwork),depend(liwork) :: iwork -end subroutine sbevd - - - -subroutine sbevx(ab,ldab,compute_v,range,lower,n,kd,q,ldq,vl,vu,il,iu,abstol,w,z,m,mmax,ldz,work,iwork,ifail,info) ! in :Band:dsbevx.f - - callstatement (*f2py_func)((compute_v?"V":"N"),(range>0?(range==1?"V":"I"):"A"),(lower?"L":"U"),&n,&kd,ab,&ldab,q,&ldq,&vl,&vu,&il,&iu,&abstol,&m,w,z,&ldz,work,iwork,ifail,&info) - - callprotoargument char*,char*,char*,int*,int*,*,int*,*,int*,*,*,int*,int*,*,int*,*,*,int*,*, int*,int*,int* - - integer optional,intent(in):: compute_v = 1 - check(compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - - integer optional,intent(in):: range = 0 - check(range==2||range==1||range==0) range - - - ! Remark: if ab is fortran contigous on input - ! and overwrite_ab=1 ab will be overwritten. - dimension(ldab,*),intent(in, overwrite) :: ab - - - ! FIXME: do we need to make q available for outside usage ??? - ! If so: how to make this optional - !* Q (output) DOUBLE PRECISION array, dimension (LDQ, N) - !* If JOBZ = 'V', the N-by-N orthogonal matrix used in the - !* reduction to tridiagonal form. - !* If JOBZ = 'N', the array Q is not referenced. - integer intent(hide),depend(n) :: ldq=(compute_v?n:1) - dimension(ldq,ldq),intent(hide),depend(ldq) :: q - - - :: vl - :: vu - integer,check((il>=1 && il<=n)),depend(n) :: il - integer,check((iu>=1 && iu<=n && iu>=il)),depend(n,il) :: iu - - ! Remark, we don't use python indexing here, because - ! if someone uses ?sbevx directly, - ! he should expect Fortran style indexing. - !integer,check((il>=0 && il=0 && iu=il)),depend(n,il) :: iu+1 - - ! Remark: - ! Eigenvalues will be computed most accurately when ABSTOL is - ! set to twice the underflow threshold 2*DLAMCH('S'), not zero. - ! - ! The easiest is to wrap DLAMCH (done below) - ! and let the user provide the value. - optional,intent(in):: abstol=0.0 - - dimension(n),intent(out),depend(n) :: w - - dimension(ldz,mmax),intent(out) :: z - integer intent(hide),depend(n) :: ldz=(compute_v?n:1) - - ! We use the mmax parameter to fix the size of z - ! (only if eigenvalues are requested) - ! Otherwise we would allocate a (possibly) huge - ! region of memory for the eigenvectors, even - ! in cases where only a few are requested. - ! If RANGE = 'V' (range=1) we a priori don't know the - ! number of eigenvalues in the interval in advance. - ! As default we use the maximum value - ! but the user should use an appropriate mmax. - integer intent(in),depend(n) :: mmax=(compute_v?(range==2?(iu-il+1):n):1) - integer intent(out) :: m - - dimension(7*n),intent(hide) :: work - integer dimension(5*n),intent(hide) :: iwork - integer dimension((compute_v?n:1)),intent(out) :: ifail - integer intent(out):: info -end subroutine sbevx - - -subroutine hbevd(ab,compute_v,lower,n,ldab,kd,w,z,ldz,work,lwork,rwork,lrwork,iwork,liwork,info) ! in :Band:zubevd.f - - callstatement (*f2py_func)((compute_v?"V":"N"),(lower?"L":"U"),&n,&kd,ab,&ldab,w,z,&ldz,work,&lwork,rwork,&lrwork,iwork,&liwork,&info) - - callprotoargument char*,char*,int*,int*,*,int*,*,*,int*,*,int*,*,int*,int*,int*,int* - - ! Remark: if ab is fortran contigous on input - ! and overwrite_ab=1 ab will be overwritten. - dimension(ldab,*), intent(in, overwrite) :: ab - - integer optional,intent(in):: compute_v = 1 - check( compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - ! case n=0 is omitted in calculaton of lwork, lrwork, liwork - ! so we forbid it - check( n>0 ) n - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - - dimension(n),intent(out),depend(n) :: w - - ! For compute_v=1 z is used and contains the eigenvectors - integer intent(hide),depend(n) :: ldz=(compute_v?n:1) - dimension(ldz,ldz),intent(out),depend(ldz) :: z - - integer intent(hide),depend(n) :: lwork=(compute_v?2*n*n:n) - dimension(lwork),intent(hide),depend(lwork) :: work - integer intent(out)::info - - integer optional, check(lrwork>=(compute_v?1+5*n+2*n*n:n)),depend(n) :: lrwork=(compute_v?1+5*n+2*n*n:n) - - intent(hide),dimension(lrwork),depend(lrwork) :: rwork - - ! documentation says liwork >=2+5*n, but that crashes, +1 helps - integer optional, check(liwork>=(compute_v?3+5*n:1)),depend(n) :: liwork=(compute_v?3+5*n:1) - integer intent(hide),dimension(liwork),depend(liwork) :: iwork - -end subroutine hbevd - - - -subroutine hbevx(ab,ldab,compute_v,range,lower,n,kd,q,ldq,vl,vu,il,iu,abstol,w,z,m,mmax,ldz,work,rwork,iwork,ifail,info) ! in :Band:dsbevx.f - - callstatement (*f2py_func)((compute_v?"V":"N"),(range>0?(range==1?"V":"I"):"A"),(lower?"L":"U"),&n,&kd,ab,&ldab,q,&ldq,&vl,&vu,&il,&iu,&abstol,&m,w,z,&ldz,work,rwork,iwork,ifail,&info) - - callprotoargument - char*,char*,char*,int*,int*,*,int*,*,int*,*,*,int*,int*,*,int*,*,*,int*,*,*,int*,int*,int* - - integer optional,intent(in):: compute_v = 1 - check(compute_v==1||compute_v==0) compute_v - integer optional,intent(in),check(lower==0||lower==1) :: lower = 0 - - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer intent(hide),depend(ab) :: n=shape(ab,1) - integer intent(hide),depend(ab) :: kd=shape(ab,0)-1 - - integer optional,intent(in):: range = 0 - check(range==2||range==1||range==0) range - - - ! Remark: if ab is fortran contigous on input - ! and overwrite_ab=1 ab will be overwritten. - dimension(ldab,*),intent(in, overwrite) :: ab - - - ! FIXME: do we need to make q available for outside usage ??? - ! If so: how to make this optional - !* Q (output) DOUBLE PRECISION array, dimension (LDQ, N) - !* If JOBZ = 'V', the N-by-N orthogonal matrix used in the - !* reduction to tridiagonal form. - !* If JOBZ = 'N', the array Q is not referenced. - integer intent(hide),depend(n) :: ldq=(compute_v?n:1) - dimension(ldq,ldq),intent(hide),depend(ldq) :: q - - - :: vl - :: vu - integer,check((il>=1 && il<=n)),depend(n) :: il - integer,check((iu>=1 && iu<=n && iu>=il)),depend(n,il) :: iu - - ! Remark, we don't use python indexing here, because - ! if someone uses ?sbevx directly, - ! he should expect Fortran style indexing. - !integer,check((il>=0 && il=0 && iu=il)),depend(n,il) :: iu+1 - - ! Remark: - ! Eigenvalues will be computed most accurately when ABSTOL is - ! set to twice the underflow threshold 2*DLAMCH('S'), not zero. - ! - ! The easiest is to wrap DLAMCH (done below) - ! and let the user provide the value. - optional,intent(in):: abstol=0.0 - - dimension(n),intent(out),depend(n) :: w - - dimension(ldz,mmax),intent(out) :: z - integer intent(hide),depend(n) :: ldz=(compute_v?n:1) - - ! We use the mmax parameter to fix the size of z - ! (only if eigenvalues are requested) - ! Otherwise we would allocate a (possibly) huge - ! region of memory for the eigenvectors, even - ! in cases where only a few are requested. - ! If RANGE = 'V' (range=1) we a priori don't know the - ! number of eigenvalues in the interval in advance. - ! As default we use the maximum value - ! but the user should use an appropriate mmax. - integer intent(in),depend(n) :: mmax=(compute_v?(range==2?(iu-il+1):n):1) - integer intent(out) :: m - - dimension(n),intent(hide) :: work - dimension(7*n),intent(hide) :: rwork - integer dimension(5*n),intent(hide) :: iwork - integer dimension((compute_v?n:1)),intent(out) :: ifail - integer intent(out):: info -end subroutine hbevx - - -! dlamch = dlamch(cmach) -! -! determine double precision machine parameters -! CMACH (input) CHARACTER*1 -! Specifies the value to be returned by DLAMCH: -! = 'E' or 'e', DLAMCH := eps -! = 'S' or 's , DLAMCH := sfmin -! = 'B' or 'b', DLAMCH := base -! = 'P' or 'p', DLAMCH := eps*base -! = 'N' or 'n', DLAMCH := t -! = 'R' or 'r', DLAMCH := rnd -! = 'M' or 'm', DLAMCH := emin -! = 'U' or 'u', DLAMCH := rmin -! = 'L' or 'l', DLAMCH := emax -! = 'O' or 'o', DLAMCH := rmax -! -! where -! -! eps = relative machine precision -! sfmin = safe minimum, such that 1/sfmin does not overflow -! base = base of the machine -! prec = eps*base -! t = number of (base) digits in the mantissa -! rnd = 1.0 when rounding occurs in addition, 0.0 otherwise -! emin = minimum exponent before (gradual) underflow -! rmin = underflow threshold - base**(emin-1) -! emax = largest exponent before overflow -! rmax = overflow threshold - (base**emax)*(1-eps) -function lamch(cmach) - character :: cmach - intent(out):: dlamch -end function lamch - - - -! lu,ipiv,info = dgbtrf(ab,kl,ku,[m,n,ldab,overwrite_ab]) -! Compute an LU factorization of a real m-by-n band matrix -subroutine gbtrf(m,n,ab,kl,ku,ldab,ipiv,info) ! in :Band:dgbtrf.f - ! threadsafe ! FIXME: should this be added ? - - callstatement {int i;(*f2py_func)(&m,&n,&kl,&ku,ab,&ldab,ipiv,&info); for(i=0,n=MIN(m,n);i*,int*,int*,int* - - ! let the default be a square matrix: - integer optional,depend(ab) :: m=shape(ab,1) - integer optional,depend(ab) :: n=shape(ab,1) - integer :: kl - integer :: ku - - dimension(ldab,*),intent(in,out,copy,out=lu) :: ab - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - integer dimension(MIN(m,n)),depend(m,n),intent(out) :: ipiv - integer intent(out):: info -end subroutine gbtrf - - - -subroutine gbtrs(ab,kl,ku,b,ipiv,trans,n,nrhs,ldab,ldb,info) ! in :Band:dgbtrs.f -! x,info = dgbtrs(ab,kl,ku,b,ipiv,[trans,n,ldab,ldb,overwrite_b]) -! solve a system of linear equations A * X = B or A' * X = B -! with a general band matrix A using the LU factorization -! computed by DGBTRF -! -! TRANS Specifies the form of the system of equations. -! 0 = 'N': A * X =B (No transpose) -! 1 = 'T': A'* X = B (Transpose) -! 2 = 'C': A'* X = B (Conjugate transpose = Transpose) - -callstatement {int i;for(i=0;i0?(trans==1?"T":"C"):"N"),&n,&kl,&ku,&nrhs,ab,&ldab,ipiv,b,&ldb,&info);for(i=0;i*,int*,int*,*,int*,int* - !character optional:: trans='N' - integer optional:: trans=0 - integer optional,depend(ab) :: n=shape(ab,1) - integer :: kl - integer :: ku - integer intent(hide),depend(b):: nrhs=shape(b,1) - - dimension(ldab,*),intent(in) :: ab - integer optional,check(shape(ab,0)==ldab),depend(ab) :: ldab=shape(ab,0) - - integer dimension(n),intent(in) :: ipiv - dimension(ldb,*),intent(in,out,copy,out=x) :: b - integer optional,check(shape(b,0)==ldb),depend(b) :: ldb=shape(b,0) - integer optional,check(shape(b,0)==ldb),depend(b) :: ldb=shape(b,0) - integer intent(out):: info -end subroutine gbtrs - -! -! RRR routines for standard eigenvalue problem -! -subroutine ssyevr(jobz,range,uplo,n,a,lda,vl,vu,il,iu,abstol,m,w,z,ldz,isuppz,work,lwork,iwork,liwork,info) - ! Standard Eigenvalue Problem - ! simple/expert driver: all eigenvectors or optionally selected eigenvalues - ! algorithm: Relatively Robust Representation - ! matrix storage - ! Real - Single precision - character intent(in) :: jobz='V' - character intent(in) :: range='A' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - real intent(in,copy,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - real intent(hide) :: vl=0 - real intent(hide) :: vu=1 - integer optional,intent(in) :: il=1 - integer optional,intent(in),depend(n) :: iu=n - real intent(hide) :: abstol=0. - integer intent(hide),depend(iu) :: m=iu-il+1 - real intent(out),dimension(n),depend(n) :: w - real intent(out),dimension(n,m),depend(n,m) :: z - integer intent(hide),check(shape(z,0)==ldz),depend(n,z) :: ldz=n - integer intent(hide),dimension(2*m) :: isuppz - integer intent(in),depend(n) :: lwork=26*n - real intent(hide),dimension(lwork) :: work - integer intent(hide),depend(n):: liwork=10*n - integer intent(hide),dimension(liwork) :: iwork - integer intent(out) :: info -end subroutine ssyevr -subroutine dsyevr(jobz,range,uplo,n,a,lda,vl,vu,il,iu,abstol,m,w,z,ldz,isuppz,work,lwork,iwork,liwork,info) - ! Standard Eigenvalue Problem - ! simple/expert driver: all eigenvectors or optionally selected eigenvalues - ! algorithm: Relatively Robust Representation - ! matrix storage - ! Real - Double precision - character intent(in) :: jobz='V' - character intent(in) :: range='A' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - double precision intent(in,copy,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - double precision intent(hide) :: vl=0 - double precision intent(hide) :: vu=1 - integer optional,intent(in) :: il=1 - integer optional,intent(in),depend(n) :: iu=n - double precision intent(hide) :: abstol=0. - integer intent(hide),depend(iu) :: m=iu-il+1 - double precision intent(out),dimension(n),depend(n) :: w - double precision intent(out),dimension(n,m),depend(n,m) :: z - integer intent(hide),check(shape(z,0)==ldz),depend(n,z) :: ldz=n - integer intent(hide),dimension(2*m) :: isuppz - integer intent(in),depend(n) :: lwork=26*n - double precision intent(hide),dimension(lwork) :: work - integer intent(hide),depend(n):: liwork=10*n - integer intent(hide),dimension(liwork) :: iwork - integer intent(out) :: info -end subroutine dsyevr -subroutine cheevr(jobz,range,uplo,n,a,lda,vl,vu,il,iu,abstol,m,w,z,ldz,isuppz,work,lwork,rwork,lrwork,iwork,liwork,info) - ! Standard Eigenvalue Problem - ! simple/expert driver: all eigenvectors or optionally selected eigenvalues - ! algorithm: Relatively Robust Representation - ! matrix storage - ! Complex - Single precision - character intent(in) :: jobz='V' - character intent(in) :: range='A' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - complex intent(in,copy,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - real intent(hide) :: vl=0 - real intent(hide) :: vu=1 - integer optional,intent(in) :: il=1 - integer optional,intent(in),depend(n) :: iu=n - real intent(hide) :: abstol=0. - integer intent(hide),depend(iu) :: m=iu-il+1 - real intent(out),dimension(n),depend(n) :: w - complex intent(out),dimension(n,m),depend(n,m) :: z - integer intent(hide),check(shape(z,0)==ldz),depend(n,z) :: ldz=n - integer intent(hide),dimension(2*m) :: isuppz - integer intent(in),depend(n) :: lwork=18*n - complex intent(hide),dimension(lwork) :: work - integer intent(hide),depend(n) :: lrwork=24*n - real intent(hide),dimension(lrwork) :: rwork - integer intent(hide),depend(n):: liwork=10*n - integer intent(hide),dimension(liwork) :: iwork - integer intent(out) :: info -end subroutine cheevr -subroutine zheevr(jobz,range,uplo,n,a,lda,vl,vu,il,iu,abstol,m,w,z,ldz,isuppz,work,lwork,rwork,lrwork,iwork,liwork,info) - ! Standard Eigenvalue Problem - ! simple/expert driver: all eigenvectors or optionally selected eigenvalues - ! algorithm: Relatively Robust Representation - ! matrix storage - ! Complex - Double precision - character intent(in) :: jobz='V' - character intent(in) :: range='A' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - complex*16 intent(in,copy,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - double precision intent(hide) :: vl=0 - double precision intent(hide) :: vu=1 - integer optional,intent(in) :: il=1 - integer optional,intent(in),depend(n) :: iu=n - double precision intent(hide) :: abstol=0. - integer intent(hide),depend(iu) :: m=iu-il+1 - double precision intent(out),dimension(n),depend(n) :: w - complex*16 intent(out),dimension(n,m),depend(n,m) :: z - integer intent(hide),check(shape(z,0)==ldz),depend(n,z) :: ldz=n - integer intent(hide),dimension(2*m) :: isuppz - integer intent(in),depend(n) :: lwork=18*n - complex*16 intent(hide),dimension(lwork) :: work - integer intent(hide),depend(n) :: lrwork=24*n - double precision intent(hide),dimension(lrwork) :: rwork - integer intent(hide),depend(n):: liwork=10*n - integer intent(hide),dimension(liwork) :: iwork - integer intent(out) :: info -end subroutine zheevr -subroutine ssygv(itype,jobz,uplo,n,a,lda,b,ldb,w,work,lwork,info) - ! Generalized Eigenvalue Problem - ! simple driver (all eigenvectors) - ! algorithm: standard - ! matrix storage - ! Real - Single precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - real intent(in,copy,out,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - real intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - real intent(out),dimension(n),depend(n) :: w - integer intent(hide) :: lwork=3*n-1 - real intent(hide),dimension(lwork) :: work - integer intent(out) :: info -end subroutine ssygv -subroutine dsygv(itype,jobz,uplo,n,a,lda,b,ldb,w,work,lwork,info) - ! Generalized Eigenvalue Problem - ! simple driver (all eigenvectors) - ! algorithm: standard - ! matrix storage - ! Real - Double precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - double precision intent(in,copy,out,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - double precision intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - double precision intent(out),dimension(n),depend(n) :: w - integer intent(hide) :: lwork=3*n-1 - double precision intent(hide),dimension(lwork) :: work - integer intent(out) :: info -end subroutine dsygv -subroutine chegv(itype,jobz,uplo,n,a,lda,b,ldb,w,work,lwork,rwork,info) - ! Generalized Eigenvalue Problem - ! simple driver (all eigenvectors) - ! algorithm: standard - ! matrix storage - ! Complex - Single precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - complex intent(in,copy,out,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - complex intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - real intent(out),dimension(n),depend(n) :: w - integer intent(hide) :: lwork=18*n-1 - complex intent(hide),dimension(lwork) :: work - real intent(hide),dimension(3*n-2) :: rwork - integer intent(out) :: info -end subroutine chegv -subroutine zhegv(itype,jobz,uplo,n,a,lda,b,ldb,w,work,lwork,rwork,info) - ! Generalized Eigenvalue Problem - ! simple driver (all eigenvectors) - ! algorithm: standard - ! matrix storage - ! Complex - Double precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - complex*16 intent(in,copy,out,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - complex*16 intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - double precision intent(out),dimension(n),depend(n) :: w - integer intent(hide) :: lwork=18*n-1 - complex*16 intent(hide),dimension(lwork) :: work - double precision intent(hide),dimension(3*n-2) :: rwork - integer intent(out) :: info -end subroutine zhegv -! -! Divide and conquer routines for generalized eigenvalue problem -! -subroutine ssygvd(itype,jobz,uplo,n,a,lda,b,ldb,w,work,lwork,iwork,liwork,info) - ! Generalized Eigenvalue Problem - ! simple driver (all eigenvectors) - ! algorithm: divide and conquer - ! matrix storage - ! Real - Single precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - real intent(in,copy,out,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - real intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - real intent(out),dimension(n),depend(n) :: w - integer intent(in),depend(n) :: lwork=1+6*n+2*n*n - real intent(hide),dimension(lwork) :: work - integer intent(hide),depend(n) :: liwork=3+5*n - integer intent(hide),dimension(liwork) :: iwork - integer intent(out) :: info -end subroutine ssygvd -subroutine dsygvd(itype,jobz,uplo,n,a,lda,b,ldb,w,work,lwork,iwork,liwork,info) - ! Generalized Eigenvalue Problem - ! simple driver (all eigenvectors) - ! algorithm: divide and conquer - ! matrix storage - ! Real - Double precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - double precision intent(in,copy,out,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - double precision intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - double precision intent(out),dimension(n),depend(n) :: w - integer intent(in),depend(n) :: lwork=1+6*n+2*n*n - double precision intent(hide),dimension(lwork) :: work - integer intent(hide),depend(n) :: liwork=3+5*n - integer intent(hide),dimension(liwork) :: iwork - integer intent(out) :: info -end subroutine dsygvd -subroutine chegvd(itype,jobz,uplo,n,a,lda,b,ldb,w,work,lwork,rwork,lrwork,iwork,liwork,info) - ! Generalized Eigenvalue Problem - ! simple driver (all eigenvectors) - ! algorithm: divide and conquer - ! matrix storage - ! Complex - Single precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - complex intent(in,copy,out,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - complex intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - real intent(out),dimension(n),depend(n) :: w - integer intent(in),depend(n) :: lwork=2*n+n*n - complex intent(hide),dimension(lwork) :: work - integer intent(hide),depend(n) :: lrwork=1+5*n+2*n*n - real intent(hide),dimension(lrwork) :: rwork - integer intent(hide),depend(n) :: liwork=3+5*n - integer intent(hide),dimension(liwork) :: iwork - integer intent(out) :: info -end subroutine chegvd -subroutine zhegvd(itype,jobz,uplo,n,a,lda,b,ldb,w,work,lwork,rwork,lrwork,iwork,liwork,info) - ! Generalized Eigenvalue Problem - ! simple driver (all eigenvectors) - ! algorithm: divide and conquer - ! matrix storage - ! Complex - Double precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - complex*16 intent(in,copy,out,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - complex*16 intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - double precision intent(out),dimension(n),depend(n) :: w - integer intent(in),depend(n) :: lwork=2*n+n*n - complex*16 intent(hide),dimension(lwork) :: work - integer intent(hide),depend(n) :: lrwork=1+5*n+2*n*n - double precision intent(hide),dimension(lrwork) :: rwork - integer intent(hide),depend(n) :: liwork=3+5*n - integer intent(hide),dimension(liwork) :: iwork - integer intent(out) :: info -end subroutine zhegvd -! Expert routines for generalized eigenvalue problem -! -subroutine ssygvx(itype,jobz,range,uplo,n,a,lda,b,ldb,vl,vu,il,iu,abstol,m,w,z,ldz,work,lwork,iwork,ifail,info) - ! Generalized Eigenvalue Problem - ! expert driver (selected eigenvectors) - ! algorithm: standard - ! matrix storage - ! Real - Single precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(hide) :: range='I' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - real intent(in,copy,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - real intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - real intent(hide) :: vl=0. - real intent(hide) :: vu=0. - integer optional,intent(in) :: il=1 - integer intent(in) :: iu - real intent(hide) :: abstol=0. - integer intent(hide),depend(iu) :: m=iu-il+1 - real intent(out),dimension(n),depend(n) :: w - real intent(out),dimension(n,m),depend(n,m) :: z - integer intent(hide),check(shape(z,0)==ldz),depend(n,z) :: ldz=n - integer intent(in),depend(n) :: lwork=8*n - real intent(hide),dimension(lwork),depend(n,lwork) :: work - integer intent(hide),dimension(5*n) :: iwork - integer intent(out),dimension(n),depend(n) :: ifail - integer intent(out) :: info -end subroutine ssygvx -subroutine dsygvx(itype,jobz,range,uplo,n,a,lda,b,ldb,vl,vu,il,iu,abstol,m,w,z,ldz,work,lwork,iwork,ifail,info) - ! Generalized Eigenvalue Problem - ! expert driver (selected eigenvectors) - ! algorithm: standard - ! matrix storage - ! Real - Double precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(hide) :: range='I' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - double precision intent(in,copy,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - double precision intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - double precision intent(hide) :: vl=0. - double precision intent(hide) :: vu=0. - integer optional,intent(in) :: il=1 - integer intent(in) :: iu - double precision intent(hide) :: abstol=0. - integer intent(hide),depend(iu) :: m=iu-il+1 - double precision intent(out),dimension(n),depend(n) :: w - double precision intent(out),dimension(n,m),depend(n,m) :: z - integer intent(hide),check(shape(z,0)==ldz),depend(n,z) :: ldz=n - integer intent(in),depend(n) :: lwork=8*n - double precision intent(hide),dimension(lwork),depend(n,lwork) :: work - integer intent(hide),dimension(5*n) :: iwork - integer intent(out),dimension(n),depend(n) :: ifail - integer intent(out) :: info -end subroutine dsygvx -subroutine chegvx(itype,jobz,range,uplo,n,a,lda,b,ldb,vl,vu,il,iu,abstol,m,w,z,ldz,work,lwork,rwork,iwork,ifail,info) - ! Generalized Eigenvalue Problem - ! expert driver (selected eigenvectors) - ! algorithm: standard - ! matrix storage - ! Complex - Single precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(hide) :: range='I' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - complex intent(in,copy,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - complex intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - real intent(hide) :: vl=0. - real intent(hide) :: vu=0. - integer optional,intent(in) :: il=1 - integer intent(in) :: iu - real intent(hide) :: abstol=0. - integer intent(hide),depend(iu) :: m=iu-il+1 - real intent(out),dimension(n),depend(n) :: w - complex intent(out),dimension(n,m),depend(n,m) :: z - integer intent(hide),check(shape(z,0)==ldz),depend(n,z) :: ldz=n - integer intent(in),depend(n) :: lwork=18*n-1 - complex intent(hide),dimension(lwork),depend(n,lwork) :: work - real intent(hide),dimension(7*n) :: rwork - integer intent(hide),dimension(5*n) :: iwork - integer intent(out),dimension(n),depend(n) :: ifail - integer intent(out) :: info -end subroutine chegvx -subroutine zhegvx(itype,jobz,range,uplo,n,a,lda,b,ldb,vl,vu,il,iu,abstol,m,w,z,ldz,work,lwork,rwork,iwork,ifail,info) - ! Generalized Eigenvalue Problem - ! expert driver (selected eigenvectors) - ! algorithm: standard - ! matrix storage - ! Complex - Double precision - integer optional,intent(in) :: itype=1 - character intent(in) :: jobz='V' - character intent(hide) :: range='I' - character intent(in) :: uplo='L' - integer intent(hide) :: n=shape(a,0) - complex*16 intent(in,copy,aligned8),dimension(n,n) :: a - integer intent(hide),depend(n,a) :: lda=n - complex*16 intent(in,copy,aligned8),dimension(n,n) :: b - integer intent(hide),depend(n,b) :: ldb=n - double precision intent(hide) :: vl=0. - double precision intent(hide) :: vu=0. - integer optional,intent(in) :: il=1 - integer intent(in) :: iu - double precision intent(hide) :: abstol=0. - integer intent(hide),depend(iu) :: m=iu-il+1 - double precision intent(out),dimension(n),depend(n) :: w - complex*16 intent(out),dimension(n,m),depend(n,m) :: z - integer intent(hide),check(shape(z,0)==ldz),depend(n,z) :: ldz=n - integer intent(in),depend(n) :: lwork=18*n-1 - complex*16 intent(hide),dimension(lwork),depend(n,lwork) :: work - double precision intent(hide),dimension(7*n) :: rwork - integer intent(hide),dimension(5*n) :: iwork - integer intent(out),dimension(n),depend(n) :: ifail - integer intent(out) :: info -end subroutine zhegvx - - -end interface - -end python module flapack - -! This file was auto-generated with f2py (version:2.10.173). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/linalg/interface_gen.py b/scipy-0.10.1/scipy/linalg/interface_gen.py deleted file mode 100755 index 4c55c15585..0000000000 --- a/scipy-0.10.1/scipy/linalg/interface_gen.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env python - -import os -import re -from distutils.dir_util import mkpath - -def all_subroutines(interface_in): - # remove comments - comment_block_exp = re.compile(r'/\*(?:\s|.)*?\*/') - subroutine_exp = re.compile(r'subroutine (?:\s|.)*?end subroutine.*') - function_exp = re.compile(r'function (?:\s|.)*?end function.*') - - interface = comment_block_exp.sub('',interface_in) - subroutine_list = subroutine_exp.findall(interface) - function_list = function_exp.findall(interface) - subroutine_list = subroutine_list + function_list - subroutine_list = map(lambda x: x.strip(),subroutine_list) - return subroutine_list - -def real_convert(val_string): - return val_string - -def complex_convert(val_string): - return '(' + val_string + ',0.)' - -def convert_types(interface_in,converter): - regexp = re.compile(r'') - interface = interface_in[:] - while 1: - sub = regexp.search(interface) - if sub is None: break - converted = converter(sub.group(1)) - interface = interface.replace(sub.group(),converted) - return interface - -def generic_expand(generic_interface,skip_names=[]): - generic_types ={'s' :('real', 'real', real_convert, - 'real'), - 'd' :('double precision','double precision',real_convert, - 'double precision'), - 'c' :('complex', 'complex',complex_convert, - 'real'), - 'z' :('double complex', 'double complex',complex_convert, - 'double precision'), - 'cs':('complex', 'real',complex_convert, - 'real'), - 'zd':('double complex', 'double precision',complex_convert, - 'double precision'), - 'sc':('real', 'complex',real_convert, - 'real'), - 'dz':('double precision','double complex', real_convert, - 'double precision')} - generic_c_types = {'real':'float', - 'double precision':'double', - 'complex':'complex_float', - 'double complex':'complex_double'} - # cc_types is specific in ATLAS C BLAS, in particular, for complex arguments - generic_cc_types = {'real':'float', - 'double precision':'double', - 'complex':'void', - 'double complex':'void'} - #2. get all subroutines - subs = all_subroutines(generic_interface) - print len(subs) - #loop through the subs - type_exp = re.compile(r'') - TYPE_EXP = re.compile(r'') - routine_name = re.compile(r'(subroutine|function)\s*(?P\w+)\s*\(') - interface = '' - for sub in subs: - #3. Find the typecodes to use: - m = type_exp.search(sub) - if m is None: - interface = interface + '\n\n' + sub - continue - type_chars = m.group(1) - # get rid of spaces - type_chars = type_chars.replace(' ','') - # get a list of the characters (or character pairs) - type_chars = type_chars.split(',') - # Now get rid of the special tag that contained the types - sub = re.sub(type_exp,'',sub) - m = TYPE_EXP.search(sub) - if m is not None: - sub = re.sub(TYPE_EXP,'',sub) - sub_generic = sub.strip() - for char in type_chars: - type_in,type_out,converter, rtype_in = generic_types[char] - sub = convert_types(sub_generic,converter) - function_def = sub.replace('',char) - function_def = function_def.replace('',char.upper()) - function_def = function_def.replace('',type_in) - function_def = function_def.replace('', - generic_c_types[type_in]) - function_def = function_def.replace('', - generic_cc_types[type_in]) - function_def = function_def.replace('',rtype_in) - function_def = function_def.replace('', - generic_c_types[rtype_in]) - function_def = function_def.replace('',type_out) - function_def = function_def.replace('', - generic_c_types[type_out]) - m = routine_name.match(function_def) - if m: - if m.group('name') in skip_names: - print 'Skipping',m.group('name') - continue - else: - print 'Possible bug: Failed to determine routines name' - interface = interface + '\n\n' + function_def - - return interface - -#def interface_to_module(interface_in,module_name,include_list,sdir='.'): -def interface_to_module(interface_in,module_name): - pre_prefix = "!%f90 -*- f90 -*-\n" - # heading and tail of the module definition. - file_prefix = "\npython module " + module_name +" ! in\n" \ - "!usercode '''#include \"cblas.h\"\n"\ - "!'''\n"\ - " interface \n" - file_suffix = "\n end interface\n" \ - "end module %s" % module_name - return pre_prefix + file_prefix + interface_in + file_suffix - -def process_includes(interface_in,sdir='.'): - include_exp = re.compile(r'\n\s*[^!]\s*') - include_files = include_exp.findall(interface_in) - for filename in include_files: - f = open(os.path.join(sdir,filename)) - interface_in = interface_in.replace(''%filename, - f.read()) - f.close() - return interface_in - -def generate_interface(module_name,src_file,target_file,skip_names=[]): - print "generating",module_name,"interface" - f = open(src_file) - generic_interface = f.read() - f.close() - sdir = os.path.dirname(src_file) - generic_interface = process_includes(generic_interface,sdir) - generic_interface = generic_expand(generic_interface,skip_names) - module_def = interface_to_module(generic_interface,module_name) - mkpath(os.path.dirname(target_file)) - f = open(target_file,'w') - user_routines = os.path.join(sdir,module_name+"_user_routines.pyf") - if os.path.exists(user_routines): - f2 = open(user_routines) - f.write(f2.read()) - f2.close() - f.write(module_def) - f.close() - -def process_all(): - # process the standard files. - for name in ['fblas','cblas','clapack','flapack']: - generate_interface(name,'generic_%s.pyf'%(name),name+'.pyf') - - -if __name__ == "__main__": - process_all() diff --git a/scipy-0.10.1/scipy/linalg/lapack.py b/scipy-0.10.1/scipy/linalg/lapack.py deleted file mode 100644 index 9c42c16ab4..0000000000 --- a/scipy-0.10.1/scipy/linalg/lapack.py +++ /dev/null @@ -1,115 +0,0 @@ -# -# Author: Pearu Peterson, March 2002 -# - -__all__ = ['get_lapack_funcs'] - -# The following ensures that possibly missing flavor (C or Fortran) is -# replaced with the available one. If none is available, exception -# is raised at the first attempt to use the resources. -import types - -import numpy - -from scipy.linalg import flapack -from scipy.linalg import clapack -_use_force_clapack = 1 -if hasattr(clapack,'empty_module'): - clapack = flapack - _use_force_clapack = 0 -elif hasattr(flapack,'empty_module'): - flapack = clapack - -def cast_to_lapack_prefix(t): - if issubclass(t, numpy.single): - prefix = 's' - elif issubclass(t, numpy.double): - prefix = 'd' - elif issubclass(t, numpy.longdouble): - prefix = 'd' - elif issubclass(t, numpy.csingle): - prefix = 'c' - elif issubclass(t, numpy.cdouble): - prefix = 'z' - elif issubclass(t, numpy.clongdouble): - prefix = 'z' - else: - prefix = 'd' - return prefix - -prefix_to_order = dict(s=3, d=2, c=1, z=0) -order_to_prefix = ['s', 'd', 'c', 'z'] -prefix_to_dtype = dict(s=numpy.single, d=numpy.double, - c=numpy.csingle, z=numpy.cdouble) - -def find_best_lapack_type(arrays): - if not arrays: - return 'd', numpy.double, False - ordering = [] - for i in range(len(arrays)): - t = arrays[i].dtype.type - prefix = cast_to_lapack_prefix(t) - order = prefix_to_order[prefix] - ordering.append((order, prefix, i)) - ordering.sort() - _, required_prefix, lowest_array_index = ordering[0] - dtype = prefix_to_dtype[required_prefix] - isfortran = numpy.isfortran(arrays[lowest_array_index]) - return required_prefix, dtype, isfortran - -def get_lapack_funcs(names, arrays=()): - """Return available LAPACK function objects with names. - arrays are used to determine the optimal prefix of - LAPACK routines. - """ - #If force_clapack is True then available Atlas routine - #is returned for column major storaged arrays with - #rowmajor argument set to False. - force_clapack=False #XXX: Don't set it true! The feature is unreliable - # and may cause incorrect results. - # See test_basic.test_solve.check_20Feb04_bug. - - required_prefix, dtype, isfortran = find_best_lapack_type(arrays) - # Default lookup: - if isfortran: - # prefer Fortran code for leading array with column major order - m1, m2 = flapack, clapack - else: - # in all other cases, C code is preferred - m1, m2 = clapack, flapack - if not _use_force_clapack: - force_clapack = False - funcs = [] - m1_name = m1.__name__.split('.')[-1] - m2_name = m2.__name__.split('.')[-1] - for name in names: - func_name = required_prefix + name - func = getattr(m1,func_name,None) - if func is None: - func = getattr(m2,func_name) - func.module_name = m2_name - else: - func.module_name = m1_name - if force_clapack and m1 is flapack: - func2 = getattr(m2,func_name,None) - if func2 is not None: - exec _colmajor_func_template % {'func_name':func_name} - func = types.FunctionType(func_code, - {'clapack_func':func2}, - func_name) - func.module_name = m2_name - func.__doc__ = func2.__doc__ - func.prefix = required_prefix - func.dtype = dtype - funcs.append(func) - return tuple(funcs) - - - -_colmajor_func_template = '''\ -def %(func_name)s(*args,**kws): - if "rowmajor" not in kws: - kws["rowmajor"] = 0 - return clapack_func(*args,**kws) -func_code = %(func_name)s.func_code -''' diff --git a/scipy-0.10.1/scipy/linalg/linalg_version.py b/scipy-0.10.1/scipy/linalg/linalg_version.py deleted file mode 100644 index 0eba228ca0..0000000000 --- a/scipy-0.10.1/scipy/linalg/linalg_version.py +++ /dev/null @@ -1,5 +0,0 @@ -major = 0 -minor = 4 -micro = 9 - -linalg_version = '%(major)d.%(minor)d.%(micro)d' % (locals ()) diff --git a/scipy-0.10.1/scipy/linalg/matfuncs.py b/scipy-0.10.1/scipy/linalg/matfuncs.py deleted file mode 100644 index 7d52f7a444..0000000000 --- a/scipy-0.10.1/scipy/linalg/matfuncs.py +++ /dev/null @@ -1,533 +0,0 @@ -# -# Author: Travis Oliphant, March 2002 -# - -__all__ = ['expm','expm2','expm3','cosm','sinm','tanm','coshm','sinhm', - 'tanhm','logm','funm','signm','sqrtm'] - -from numpy import asarray, Inf, dot, floor, eye, diag, exp, \ - product, logical_not, ravel, transpose, conjugate, \ - cast, log, ogrid, imag, real, absolute, amax, sign, \ - isfinite, sqrt, identity, single -from numpy import matrix as mat -import numpy as np - -# Local imports -from misc import norm -from basic import solve, inv -from special_matrices import triu, all_mat -from decomp import eig -from decomp_svd import orth, svd -from decomp_schur import schur, rsf2csf - -eps = np.finfo(float).eps -feps = np.finfo(single).eps - -def expm(A, q=7): - """Compute the matrix exponential using Pade approximation. - - Parameters - ---------- - A : array, shape(M,M) - Matrix to be exponentiated - q : integer - Order of the Pade approximation - - Returns - ------- - expA : array, shape(M,M) - Matrix exponential of A - - """ - A = asarray(A) - - # Scale A so that norm is < 1/2 - nA = norm(A,Inf) - if nA==0: - return identity(len(A), A.dtype.char) - from numpy import log2 - val = log2(nA) - e = int(floor(val)) - j = max(0,e+1) - A = A / 2.0**j - - # Pade Approximation for exp(A) - X = A - c = 1.0/2 - N = eye(*A.shape) + c*A - D = eye(*A.shape) - c*A - for k in range(2,q+1): - c = c * (q-k+1) / (k*(2*q-k+1)) - X = dot(A,X) - cX = c*X - N = N + cX - if not k % 2: - D = D + cX; - else: - D = D - cX; - F = solve(D,N) - for k in range(1,j+1): - F = dot(F,F) - return F - -def expm2(A): - """Compute the matrix exponential using eigenvalue decomposition. - - Parameters - ---------- - A : array, shape(M,M) - Matrix to be exponentiated - - Returns - ------- - expA : array, shape(M,M) - Matrix exponential of A - - """ - A = asarray(A) - t = A.dtype.char - if t not in ['f','F','d','D']: - A = A.astype('d') - t = 'd' - s,vr = eig(A) - vri = inv(vr) - r = dot(dot(vr,diag(exp(s))),vri) - if t in ['f', 'd']: - return r.real.astype(t) - else: - return r.astype(t) - -def expm3(A, q=20): - """Compute the matrix exponential using Taylor series. - - Parameters - ---------- - A : array, shape(M,M) - Matrix to be exponentiated - q : integer - Order of the Taylor series - - Returns - ------- - expA : array, shape(M,M) - Matrix exponential of A - - """ - A = asarray(A) - t = A.dtype.char - if t not in ['f','F','d','D']: - A = A.astype('d') - t = 'd' - A = mat(A) - eA = eye(*A.shape,**{'dtype':t}) - trm = mat(eA, copy=True) - castfunc = cast[t] - for k in range(1,q): - trm *= A / castfunc(k) - eA += trm - return eA - -_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} - -def toreal(arr, tol=None): - """Return as real array if imaginary part is small. - - Parameters - ---------- - arr : array - tol : float - Absolute tolerance - - Returns - ------- - arr : double or complex array - """ - if tol is None: - tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[arr.dtype.char]] - if (arr.dtype.char in ['F', 'D','G']) and \ - np.allclose(arr.imag, 0.0, atol=tol): - arr = arr.real - return arr - -def cosm(A): - """Compute the matrix cosine. - - This routine uses expm to compute the matrix exponentials. - - Parameters - ---------- - A : array, shape(M,M) - - Returns - ------- - cosA : array, shape(M,M) - Matrix cosine of A - - """ - A = asarray(A) - if A.dtype.char not in ['F','D','G']: - return expm(1j*A).real - else: - return 0.5*(expm(1j*A) + expm(-1j*A)) - - -def sinm(A): - """Compute the matrix sine. - - This routine uses expm to compute the matrix exponentials. - - Parameters - ---------- - A : array, shape(M,M) - - Returns - ------- - sinA : array, shape(M,M) - Matrix cosine of A - - """ - A = asarray(A) - if A.dtype.char not in ['F','D','G']: - return expm(1j*A).imag - else: - return -0.5j*(expm(1j*A) - expm(-1j*A)) - -def tanm(A): - """Compute the matrix tangent. - - This routine uses expm to compute the matrix exponentials. - - Parameters - ---------- - A : array, shape(M,M) - - Returns - ------- - tanA : array, shape(M,M) - Matrix tangent of A - - """ - A = asarray(A) - if A.dtype.char not in ['F','D','G']: - return toreal(solve(cosm(A), sinm(A))) - else: - return solve(cosm(A), sinm(A)) - -def coshm(A): - """Compute the hyperbolic matrix cosine. - - This routine uses expm to compute the matrix exponentials. - - Parameters - ---------- - A : array, shape(M,M) - - Returns - ------- - coshA : array, shape(M,M) - Hyperbolic matrix cosine of A - - """ - A = asarray(A) - if A.dtype.char not in ['F','D','G']: - return toreal(0.5*(expm(A) + expm(-A))) - else: - return 0.5*(expm(A) + expm(-A)) - -def sinhm(A): - """Compute the hyperbolic matrix sine. - - This routine uses expm to compute the matrix exponentials. - - Parameters - ---------- - A : array, shape(M,M) - - Returns - ------- - sinhA : array, shape(M,M) - Hyperbolic matrix sine of A - - """ - A = asarray(A) - if A.dtype.char not in ['F','D']: - return toreal(0.5*(expm(A) - expm(-A))) - else: - return 0.5*(expm(A) - expm(-A)) - -def tanhm(A): - """Compute the hyperbolic matrix tangent. - - This routine uses expm to compute the matrix exponentials. - - Parameters - ---------- - A : array, shape(M,M) - - Returns - ------- - tanhA : array, shape(M,M) - Hyperbolic matrix tangent of A - - """ - A = asarray(A) - if A.dtype.char not in ['F','D']: - return toreal(solve(coshm(A), sinhm(A))) - else: - return solve(coshm(A), sinhm(A)) - -def funm(A, func, disp=True): - """Evaluate a matrix function specified by a callable. - - Returns the value of matrix-valued function f at A. The function f - is an extension of the scalar-valued function func to matrices. - - Parameters - ---------- - A : array, shape(M,M) - Matrix at which to evaluate the function - func : callable - Callable object that evaluates a scalar function f. - Must be vectorized (eg. using vectorize). - disp : boolean - Print warning if error in the result is estimated large - instead of returning estimated error. (Default: True) - - Returns - ------- - fA : array, shape(M,M) - Value of the matrix function specified by func evaluated at A - - (if disp == False) - errest : float - 1-norm of the estimated error, ||err||_1 / ||A||_1 - - """ - # Perform Shur decomposition (lapack ?gees) - A = asarray(A) - if len(A.shape)!=2: - raise ValueError("Non-matrix input to matrix function.") - if A.dtype.char in ['F', 'D', 'G']: - cmplx_type = 1 - else: - cmplx_type = 0 - T, Z = schur(A) - T, Z = rsf2csf(T,Z) - n,n = T.shape - F = diag(func(diag(T))) # apply function to diagonal elements - F = F.astype(T.dtype.char) # e.g. when F is real but T is complex - - minden = abs(T[0,0]) - - # implement Algorithm 11.1.1 from Golub and Van Loan - # "matrix Computations." - for p in range(1,n): - for i in range(1,n-p+1): - j = i + p - s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1]) - ksl = slice(i,j-1) - val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1]) - s = s + val - den = T[j-1,j-1] - T[i-1,i-1] - if den != 0.0: - s = s / den - F[i-1,j-1] = s - minden = min(minden,abs(den)) - - F = dot(dot(Z, F),transpose(conjugate(Z))) - if not cmplx_type: - F = toreal(F) - - tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]] - if minden == 0.0: - minden = tol - err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1))) - if product(ravel(logical_not(isfinite(F))),axis=0): - err = Inf - if disp: - if err > 1000*tol: - print "Result may be inaccurate, approximate err =", err - return F - else: - return F, err - -def logm(A, disp=True): - """Compute matrix logarithm. - - The matrix logarithm is the inverse of expm: expm(logm(A)) == A - - Parameters - ---------- - A : array, shape(M,M) - Matrix whose logarithm to evaluate - disp : boolean - Print warning if error in the result is estimated large - instead of returning estimated error. (Default: True) - - Returns - ------- - logA : array, shape(M,M) - Matrix logarithm of A - - (if disp == False) - errest : float - 1-norm of the estimated error, ||err||_1 / ||A||_1 - - """ - # Compute using general funm but then use better error estimator and - # make one step in improving estimate using a rotation matrix. - A = mat(asarray(A)) - F, errest = funm(A,log,disp=0) - errtol = 1000*eps - # Only iterate if estimate of error is too large. - if errest >= errtol: - # Use better approximation of error - errest = norm(expm(F)-A,1) / norm(A,1) - if not isfinite(errest) or errest >= errtol: - N,N = A.shape - X,Y = ogrid[1:N+1,1:N+1] - R = mat(orth(eye(N,dtype='d')+X+Y)) - F, dontcare = funm(R*A*R.H,log,disp=0) - F = R.H*F*R - if (norm(imag(F),1)<=1000*errtol*norm(F,1)): - F = mat(real(F)) - E = mat(expm(F)) - temp = mat(solve(E.T,(E-A).T)) - F = F - temp.T - errest = norm(expm(F)-A,1) / norm(A,1) - if disp: - if not isfinite(errest) or errest >= errtol: - print "Result may be inaccurate, approximate err =", errest - return F - else: - return F, errest - -def signm(a, disp=True): - """Matrix sign function. - - Extension of the scalar sign(x) to matrices. - - Parameters - ---------- - A : array, shape(M,M) - Matrix at which to evaluate the sign function - disp : boolean - Print warning if error in the result is estimated large - instead of returning estimated error. (Default: True) - - Returns - ------- - sgnA : array, shape(M,M) - Value of the sign function at A - - (if disp == False) - errest : float - 1-norm of the estimated error, ||err||_1 / ||A||_1 - - Examples - -------- - >>> from scipy.linalg import signm, eigvals - >>> a = [[1,2,3], [1,2,1], [1,1,1]] - >>> eigvals(a) - array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j]) - >>> eigvals(signm(a)) - array([-1.+0.j, 1.+0.j, 1.+0.j]) - - """ - def rounded_sign(x): - rx = real(x) - if rx.dtype.char=='f': - c = 1e3*feps*amax(x) - else: - c = 1e3*eps*amax(x) - return sign( (absolute(rx) > c) * rx ) - result,errest = funm(a, rounded_sign, disp=0) - errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]] - if errest < errtol: - return result - - # Handle signm of defective matrices: - - # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp., - # 8:237-250,1981" for how to improve the following (currently a - # rather naive) iteration process: - - a = asarray(a) - #a = result # sometimes iteration converges faster but where?? - - # Shifting to avoid zero eigenvalues. How to ensure that shifting does - # not change the spectrum too much? - vals = svd(a,compute_uv=0) - max_sv = np.amax(vals) - #min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1] - #c = 0.5/min_nonzero_sv - c = 0.5/max_sv - S0 = a + c*np.identity(a.shape[0]) - prev_errest = errest - for i in range(100): - iS0 = inv(S0) - S0 = 0.5*(S0 + iS0) - Pp=0.5*(dot(S0,S0)+S0) - errest = norm(dot(Pp,Pp)-Pp,1) - if errest < errtol or prev_errest==errest: - break - prev_errest = errest - if disp: - if not isfinite(errest) or errest >= errtol: - print "Result may be inaccurate, approximate err =", errest - return S0 - else: - return S0, errest - -def sqrtm(A, disp=True): - """Matrix square root. - - Parameters - ---------- - A : array, shape(M,M) - Matrix whose square root to evaluate - disp : boolean - Print warning if error in the result is estimated large - instead of returning estimated error. (Default: True) - - Returns - ------- - sgnA : array, shape(M,M) - Value of the sign function at A - - (if disp == False) - errest : float - Frobenius norm of the estimated error, ||err||_F / ||A||_F - - Notes - ----- - Uses algorithm by Nicholas J. Higham - - """ - A = asarray(A) - if len(A.shape)!=2: - raise ValueError("Non-matrix input to matrix function.") - T, Z = schur(A) - T, Z = rsf2csf(T,Z) - n,n = T.shape - - R = np.zeros((n,n),T.dtype.char) - for j in range(n): - R[j,j] = sqrt(T[j,j]) - for i in range(j-1,-1,-1): - s = 0 - for k in range(i+1,j): - s = s + R[i,k]*R[k,j] - R[i,j] = (T[i,j] - s)/(R[i,i] + R[j,j]) - - R, Z = all_mat(R,Z) - X = (Z * R * Z.H) - - if disp: - nzeig = np.any(diag(T)==0) - if nzeig: - print "Matrix is singular and may not have a square root." - return X.A - else: - arg2 = norm(X*X - A,'fro')**2 / norm(A,'fro') - return X.A, arg2 diff --git a/scipy-0.10.1/scipy/linalg/misc.py b/scipy-0.10.1/scipy/linalg/misc.py deleted file mode 100644 index 179135660e..0000000000 --- a/scipy-0.10.1/scipy/linalg/misc.py +++ /dev/null @@ -1,32 +0,0 @@ -import numpy as np -from numpy.linalg import LinAlgError -import fblas - -__all__ = ['LinAlgError', 'norm'] - -_nrm2_prefix = {'f' : 's', 'F': 'sc', 'D': 'dz'} - -def norm(a, ord=None): - # Differs from numpy only in non-finite handling and the use of - # blas - a = np.asarray_chkfinite(a) - if ord in (None, 2) and (a.ndim == 1) and (a.dtype.char in 'fdFD'): - # use blas for fast and stable euclidean norm - func_name = _nrm2_prefix.get(a.dtype.char, 'd') + 'nrm2' - nrm2 = getattr(fblas, func_name) - return nrm2(a) - return np.linalg.norm(a, ord=ord) - -norm.__doc__ = np.linalg.norm.__doc__ - -def _datacopied(arr, original): - """ - Strict check for `arr` not sharing any data with `original`, - under the assumption that arr = asarray(original) - - """ - if arr is original: - return False - if not isinstance(original, np.ndarray) and hasattr(original, '__array__'): - return False - return arr.base is None diff --git a/scipy-0.10.1/scipy/linalg/scons_support.py b/scipy-0.10.1/scipy/linalg/scons_support.py deleted file mode 100644 index f05d2b5ee5..0000000000 --- a/scipy-0.10.1/scipy/linalg/scons_support.py +++ /dev/null @@ -1,40 +0,0 @@ -from os.path import join as pjoin, splitext, basename as pbasename - -from interface_gen import generate_interface - -def do_generate_interface(target, source, env): - """Generate a .pyf file from another pyf file (!).""" - # XXX: do this correctly - target_name = str(target[0]) - source_name = str(source[0]) - - # XXX handle skip names - name = splitext(pbasename(target_name))[0] - generate_interface(name, source_name, target_name) - return 0 - -def generate_interface_emitter(target, source, env): - base = str(target[0]) - return (['%s.pyf' % base], source) - -def do_generate_fake_interface(target, source, env): - """Generate a (fake) .pyf file from another pyf file (!).""" - # XXX: do this correctly - target_name = str(target[0]) - source_name = str(source[0]) - - # XXX handle skip names - name = splitext(pbasename(target_name))[0] - generate_interface(name, source_name, target_name) - - f = open(target_name, 'w') - f.write('python module '+name+'\n') - f.write('usercode void empty_module(void) {}\n') - f.write('interface\n') - f.write('subroutine empty_module()\n') - f.write('intent(c) empty_module\n') - f.write('end subroutine empty_module\n') - f.write('end interface\nend python module'+name+'\n') - f.close() - - return 0 diff --git a/scipy-0.10.1/scipy/linalg/setup.py b/scipy-0.10.1/scipy/linalg/setup.py deleted file mode 100755 index 1f79fb6d96..0000000000 --- a/scipy-0.10.1/scipy/linalg/setup.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python - -import os -from distutils.dep_util import newer_group, newer -from os.path import join - -#------------------- -# To skip wrapping single precision atlas/lapack/blas routines, set -# the following flag to True: -skip_single_routines = 0 - -# Some OS distributions (e.g. Redhat, Suse) provide a blas library that -# is built using incomplete blas sources that come with lapack tar-ball. -# In order to use such a library in scipy.linalg, the following flag -# must be set to True: -using_lapack_blas = 0 - -#-------------------- - -def needs_cblas_wrapper(info): - """Returns true if needs c wrapper around cblas for calling from - fortran.""" - import re - r_accel = re.compile("Accelerate") - r_vec = re.compile("vecLib") - res = False - try: - tmpstr = info['extra_link_args'] - for i in tmpstr: - if r_accel.search(i) or r_vec.search(i): - res = True - except KeyError: - pass - - return res - -def configuration(parent_package='',top_path=None): - from numpy.distutils.system_info import get_info, NotFoundError - - from numpy.distutils.misc_util import Configuration - - from interface_gen import generate_interface - - config = Configuration('linalg',parent_package,top_path) - - lapack_opt = get_info('lapack_opt') - - if not lapack_opt: - raise NotFoundError('no lapack/blas resources found') - - atlas_version = ([v[3:-3] for k,v in lapack_opt.get('define_macros',[]) \ - if k=='ATLAS_INFO']+[None])[0] - if atlas_version: - print ('ATLAS version: %s' % atlas_version) - - target_dir = '' - skip_names = {'clapack':[],'flapack':[],'cblas':[],'fblas':[]} - if skip_single_routines: - target_dir = 'dbl' - skip_names['clapack'].extend(\ - 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ - ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ - ' slauum clauum strtri ctrtri'.split()) - skip_names['flapack'].extend(skip_names['clapack']) - skip_names['flapack'].extend(\ - 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ - ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' - ' sggev cggev'.split()) - skip_names['cblas'].extend('saxpy caxpy'.split()) - skip_names['fblas'].extend(skip_names['cblas']) - skip_names['fblas'].extend(\ - 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ - ' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ - ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ - ' sgemm cgemm'.split()) - - if using_lapack_blas: - target_dir = join(target_dir,'blas') - skip_names['fblas'].extend(\ - 'drotmg srotmg drotm srotm'.split()) - - if atlas_version=='3.2.1_pre3.3.6': - target_dir = join(target_dir,'atlas321') - skip_names['clapack'].extend(\ - 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ - ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) - elif atlas_version and atlas_version>'3.4.0' and atlas_version<='3.5.12': - skip_names['clapack'].extend('cpotrf zpotrf'.split()) - - def generate_pyf(extension, build_dir): - name = extension.name.split('.')[-1] - target = join(build_dir,target_dir,name+'.pyf') - if name[0]=='c' and atlas_version is None and newer(__file__,target): - f = open(target,'w') - f.write('python module '+name+'\n') - f.write('usercode void empty_module(void) {}\n') - f.write('interface\n') - f.write('subroutine empty_module()\n') - f.write('intent(c) empty_module\n') - f.write('end subroutine empty_module\n') - f.write('end interface\nend python module'+name+'\n') - f.close() - return target - if newer_group(extension.depends,target): - generate_interface(name, - extension.depends[0], - target, - skip_names[name]) - return target - - - # fblas: - if needs_cblas_wrapper(lapack_opt): - sources = ['fblas.pyf.src', join('src', 'fblaswrap_veclib_c.c')], - else: - sources = ['fblas.pyf.src', join('src', 'fblaswrap.f')] - - # Note: `depends` needs to include fblaswrap(_veclib) for both files to be - # included by "python setup.py sdist" - config.add_extension('fblas', - sources = sources, - depends = ['fblas_l?.pyf.src', - join('src', 'fblaswrap_veclib_c.c'), - join('src', 'fblaswrap.f')], - extra_info = lapack_opt - ) - - # cblas: - config.add_extension('cblas', - sources = [generate_pyf], - depends = ['generic_cblas.pyf', - 'generic_cblas1.pyf', - 'interface_gen.py'], - extra_info = lapack_opt - ) - - # flapack: - config.add_extension('flapack', - sources = [generate_pyf], - depends = ['generic_flapack.pyf', - 'flapack_user_routines.pyf', - 'interface_gen.py'], - extra_info = lapack_opt - ) - - # clapack: - config.add_extension('clapack', - sources = [generate_pyf], - depends = ['generic_clapack.pyf', - 'interface_gen.py'], - extra_info = lapack_opt - ) - - # _flinalg: - config.add_extension('_flinalg', - sources = [join('src','det.f'),join('src','lu.f')], - extra_info = lapack_opt - ) - - # calc_lwork: - config.add_extension('calc_lwork', - [join('src','calc_lwork.f')], - extra_info = lapack_opt - ) - - # atlas_version: - if os.name == 'nt' and 'FPATH' in os.environ: - define_macros = [('NO_ATLAS_INFO', 1)] - else: - define_macros = [] - - config.add_extension('atlas_version', - ['atlas_version.c'], - extra_info = lapack_opt, - define_macros = define_macros - ) - - config.add_data_dir('tests') - config.add_data_dir('benchmarks') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - from linalg_version import linalg_version - - setup(version=linalg_version, - **configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/linalg/setup_atlas_version.py b/scipy-0.10.1/scipy/linalg/setup_atlas_version.py deleted file mode 100755 index 1adc8815d2..0000000000 --- a/scipy-0.10.1/scipy/linalg/setup_atlas_version.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -import os -from distutils.core import Extension -from numpy.distutils.misc_util import get_path, default_config_dict -from numpy.distutils.system_info import get_info,AtlasNotFoundError - -def configuration (parent_package=''): - package = 'linalg' - config = default_config_dict(package,parent_package) - del config['fortran_libraries'] - local_path = get_path(__name__) - atlas_info = get_info('atlas_threads') - if not atlas_info: - atlas_info = get_info('atlas') - if not atlas_info: - raise AtlasNotFoundError(AtlasNotFoundError.__doc__) - ext = Extension('atlas_version', - sources=[os.path.join(local_path,'atlas_version.c')], - libraries=[atlas_info['libraries'][-1]], - library_dirs=atlas_info['library_dirs']) - config['ext_modules'].append(ext) - return config - -if __name__ == '__main__': - from distutils.core import setup - setup(**configuration()) diff --git a/scipy-0.10.1/scipy/linalg/setupscons.py b/scipy-0.10.1/scipy/linalg/setupscons.py deleted file mode 100755 index 1ff36d30ad..0000000000 --- a/scipy-0.10.1/scipy/linalg/setupscons.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('linalg',parent_package,top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - from linalg_version import linalg_version - - setup(version=linalg_version, - **configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/linalg/special_matrices.py b/scipy-0.10.1/scipy/linalg/special_matrices.py deleted file mode 100644 index e23022e9a5..0000000000 --- a/scipy-0.10.1/scipy/linalg/special_matrices.py +++ /dev/null @@ -1,676 +0,0 @@ - -import math -import numpy as np -from scipy.misc import comb - -__all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel', - 'hadamard', 'leslie', 'all_mat', 'kron', 'block_diag', 'companion', - 'hilbert', 'invhilbert'] - - -#----------------------------------------------------------------------------- -# matrix construction functions -#----------------------------------------------------------------------------- - -# -# *Note*: tri{,u,l} is implemented in numpy, but an important bug was fixed in -# 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards -# compatibility. - -def tri(N, M=None, k=0, dtype=None): - """ - Construct (N, M) matrix filled with ones at and below the k-th diagonal. - - The matrix has A[i,j] == 1 for i <= j + k - - Parameters - ---------- - N : integer - The size of the first dimension of the matrix. - M : integer or None - The size of the second dimension of the matrix. If `M` is None, - `M = N` is assumed. - k : integer - Number of subdiagonal below which matrix is filled with ones. - `k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0 - superdiagonal. - dtype : dtype - Data type of the matrix. - - Returns - ------- - A : array, shape (N, M) - - Examples - -------- - >>> from scipy.linalg import tri - >>> tri(3, 5, 2, dtype=int) - array([[1, 1, 1, 0, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 1]]) - >>> tri(3, 5, -1, dtype=int) - array([[0, 0, 0, 0, 0], - [1, 0, 0, 0, 0], - [1, 1, 0, 0, 0]]) - - """ - if M is None: M = N - if type(M) == type('d'): - #pearu: any objections to remove this feature? - # As tri(N,'d') is equivalent to tri(N,dtype='d') - dtype = M - M = N - m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)),-k) - if dtype is None: - return m - else: - return m.astype(dtype) - -def tril(m, k=0): - """Construct a copy of a matrix with elements above the k-th diagonal zeroed. - - Parameters - ---------- - m : array - Matrix whose elements to return - k : integer - Diagonal above which to zero elements. - k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal. - - Returns - ------- - A : array, shape m.shape, dtype m.dtype - - Examples - -------- - >>> from scipy.linalg import tril - >>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) - array([[ 0, 0, 0], - [ 4, 0, 0], - [ 7, 8, 0], - [10, 11, 12]]) - - """ - m = np.asarray(m) - out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char)*m - return out - -def triu(m, k=0): - """Construct a copy of a matrix with elements below the k-th diagonal zeroed. - - Parameters - ---------- - m : array - Matrix whose elements to return - k : integer - Diagonal below which to zero elements. - k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal. - - Returns - ------- - A : array, shape m.shape, dtype m.dtype - - Examples - -------- - >>> from scipy.linalg import tril - >>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) - array([[ 1, 2, 3], - [ 4, 5, 6], - [ 0, 8, 9], - [ 0, 0, 12]]) - - """ - m = np.asarray(m) - out = (1-tri(m.shape[0], m.shape[1], k-1, m.dtype.char))*m - return out - - -def toeplitz(c, r=None): - """ - Construct a Toeplitz matrix. - - The Toeplitz matrix has constant diagonals, with c as its first column - and r as its first row. If r is not given, ``r == conjugate(c)`` is - assumed. - - Parameters - ---------- - c : array_like - First column of the matrix. Whatever the actual shape of `c`, it - will be converted to a 1-D array. - r : array_like - First row of the matrix. If None, ``r = conjugate(c)`` is assumed; - in this case, if c[0] is real, the result is a Hermitian matrix. - r[0] is ignored; the first row of the returned matrix is - ``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be - converted to a 1-D array. - - Returns - ------- - A : array, shape (len(c), len(r)) - The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. - - See also - -------- - circulant : circulant matrix - hankel : Hankel matrix - - Notes - ----- - The behavior when `c` or `r` is a scalar, or when `c` is complex and - `r` is None, was changed in version 0.8.0. The behavior in previous - versions was undocumented and is no longer supported. - - Examples - -------- - >>> from scipy.linalg import toeplitz - >>> toeplitz([1,2,3], [1,4,5,6]) - array([[1, 4, 5, 6], - [2, 1, 4, 5], - [3, 2, 1, 4]]) - >>> toeplitz([1.0, 2+3j, 4-1j]) - array([[ 1.+0.j, 2.-3.j, 4.+1.j], - [ 2.+3.j, 1.+0.j, 2.-3.j], - [ 4.-1.j, 2.+3.j, 1.+0.j]]) - - """ - c = np.asarray(c).ravel() - if r is None: - r = c.conjugate() - else: - r = np.asarray(r).ravel() - # Form a 1D array of values to be used in the matrix, containing a reversed - # copy of r[1:], followed by c. - vals = np.concatenate((r[-1:0:-1], c)) - a, b = np.ogrid[0:len(c), len(r)-1:-1:-1] - indx = a + b - # `indx` is a 2D array of indices into the 1D array `vals`, arranged so that - # `vals[indx]` is the Toeplitz matrix. - return vals[indx] - -def circulant(c): - """ - Construct a circulant matrix. - - Parameters - ---------- - c : array_like - 1-D array, the first column of the matrix. - - Returns - ------- - A : array, shape (len(c), len(c)) - A circulant matrix whose first column is `c`. - - See also - -------- - toeplitz : Toeplitz matrix - hankel : Hankel matrix - - Notes - ----- - .. versionadded:: 0.8.0 - - Examples - -------- - >>> from scipy.linalg import circulant - >>> circulant([1, 2, 3]) - array([[1, 3, 2], - [2, 1, 3], - [3, 2, 1]]) - - """ - c = np.asarray(c).ravel() - a, b = np.ogrid[0:len(c), 0:-len(c):-1] - indx = a + b - # `indx` is a 2D array of indices into `c`, arranged so that `c[indx]` is - # the circulant matrix. - return c[indx] - -def hankel(c, r=None): - """ - Construct a Hankel matrix. - - The Hankel matrix has constant anti-diagonals, with `c` as its - first column and `r` as its last row. If `r` is not given, then - `r = zeros_like(c)` is assumed. - - Parameters - ---------- - c : array_like - First column of the matrix. Whatever the actual shape of `c`, it - will be converted to a 1-D array. - r : array_like, 1D - Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed. - r[0] is ignored; the last row of the returned matrix is - ``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be - converted to a 1-D array. - - Returns - ------- - A : array, shape (len(c), len(r)) - The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. - - See also - -------- - toeplitz : Toeplitz matrix - circulant : circulant matrix - - Examples - -------- - >>> from scipy.linalg import hankel - >>> hankel([1, 17, 99]) - array([[ 1, 17, 99], - [17, 99, 0], - [99, 0, 0]]) - >>> hankel([1,2,3,4], [4,7,7,8,9]) - array([[1, 2, 3, 4, 7], - [2, 3, 4, 7, 7], - [3, 4, 7, 7, 8], - [4, 7, 7, 8, 9]]) - - """ - c = np.asarray(c).ravel() - if r is None: - r = np.zeros_like(c) - else: - r = np.asarray(r).ravel() - # Form a 1D array of values to be used in the matrix, containing `c` - # followed by r[1:]. - vals = np.concatenate((c, r[1:])) - a, b = np.ogrid[0:len(c), 0:len(r)] - indx = a + b - # `indx` is a 2D array of indices into the 1D array `vals`, arranged so that - # `vals[indx]` is the Hankel matrix. - return vals[indx] - -def hadamard(n, dtype=int): - """ - Construct a Hadamard matrix. - - `hadamard(n)` constructs an n-by-n Hadamard matrix, using Sylvester's - construction. `n` must be a power of 2. - - Parameters - ---------- - n : int - The order of the matrix. `n` must be a power of 2. - dtype : numpy dtype - The data type of the array to be constructed. - - Returns - ------- - H : ndarray with shape (n, n) - The Hadamard matrix. - - Notes - ----- - .. versionadded:: 0.8.0 - - Examples - -------- - >>> hadamard(2, dtype=complex) - array([[ 1.+0.j, 1.+0.j], - [ 1.+0.j, -1.-0.j]]) - >>> hadamard(4) - array([[ 1, 1, 1, 1], - [ 1, -1, 1, -1], - [ 1, 1, -1, -1], - [ 1, -1, -1, 1]]) - - """ - - # This function is a slightly modified version of the - # function contributed by Ivo in ticket #675. - - if n < 1: - lg2 = 0 - else: - lg2 = int(math.log(n, 2)) - if 2 ** lg2 != n: - raise ValueError("n must be an positive integer, and n must be power of 2") - - H = np.array([[1]], dtype=dtype) - - # Sylvester's construction - for i in range(0, lg2): - H = np.vstack((np.hstack((H, H)), np.hstack((H, -H)))) - - return H - - -def leslie(f, s): - """ - Create a Leslie matrix. - - Given the length n array of fecundity coefficients `f` and the length - n-1 array of survival coefficents `s`, return the associated Leslie matrix. - - Parameters - ---------- - f : array_like - The "fecundity" coefficients, has to be 1-D. - s : array_like - The "survival" coefficients, has to be 1-D. The length of `s` - must be one less than the length of `f`, and it must be at least 1. - - Returns - ------- - L : ndarray - Returns a 2-D ndarray of shape ``(n, n)``, where `n` is the - length of `f`. The array is zero except for the first row, - which is `f`, and the first sub-diagonal, which is `s`. - The data-type of the array will be the data-type of ``f[0]+s[0]``. - - Notes - ----- - .. versionadded:: 0.8.0 - - The Leslie matrix is used to model discrete-time, age-structured - population growth [1]_ [2]_. In a population with `n` age classes, two sets - of parameters define a Leslie matrix: the `n` "fecundity coefficients", - which give the number of offspring per-capita produced by each age - class, and the `n` - 1 "survival coefficients", which give the - per-capita survival rate of each age class. - - References - ---------- - .. [1] P. H. Leslie, On the use of matrices in certain population - mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945) - .. [2] P. H. Leslie, Some further notes on the use of matrices in - population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245 - (Dec. 1948) - - Examples - -------- - >>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7]) - array([[ 0.1, 2. , 1. , 0.1], - [ 0.2, 0. , 0. , 0. ], - [ 0. , 0.8, 0. , 0. ], - [ 0. , 0. , 0.7, 0. ]]) - - """ - f = np.atleast_1d(f) - s = np.atleast_1d(s) - if f.ndim != 1: - raise ValueError("Incorrect shape for f. f must be one-dimensional") - if s.ndim != 1: - raise ValueError("Incorrect shape for s. s must be one-dimensional") - if f.size != s.size + 1: - raise ValueError("Incorrect lengths for f and s. The length" - " of s must be one less than the length of f.") - if s.size == 0: - raise ValueError("The length of s must be at least 1.") - - tmp = f[0] + s[0] - n = f.size - a = np.zeros((n,n), dtype=tmp.dtype) - a[0] = f - a[range(1,n), range(0,n-1)] = s - return a - - -def all_mat(*args): - return map(np.matrix,args) - -def kron(a,b): - """Kronecker product of a and b. - - The result is the block matrix:: - - a[0,0]*b a[0,1]*b ... a[0,-1]*b - a[1,0]*b a[1,1]*b ... a[1,-1]*b - ... - a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b - - Parameters - ---------- - a : array, shape (M, N) - b : array, shape (P, Q) - - Returns - ------- - A : array, shape (M*P, N*Q) - Kronecker product of a and b - - Examples - -------- - >>> from scipy import kron, array - >>> kron(array([[1,2],[3,4]]), array([[1,1,1]])) - array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]]) - - """ - if not a.flags['CONTIGUOUS']: - a = np.reshape(a, a.shape) - if not b.flags['CONTIGUOUS']: - b = np.reshape(b, b.shape) - o = np.outer(a,b) - o = o.reshape(a.shape + b.shape) - return np.concatenate(np.concatenate(o, axis=1), axis=1) - -def block_diag(*arrs): - """ - Create a block diagonal matrix from provided arrays. - - Given the inputs `A`, `B` and `C`, the output will have these - arrays arranged on the diagonal:: - - [[A, 0, 0], - [0, B, 0], - [0, 0, C]] - - Parameters - ---------- - A, B, C, ... : array_like, up to 2-D - Input arrays. A 1-D array or array_like sequence of length `n`is - treated as a 2-D array with shape ``(1,n)``. - - Returns - ------- - D : ndarray - Array with `A`, `B`, `C`, ... on the diagonal. `D` has the - same dtype as `A`. - - Notes - ----- - If all the input arrays are square, the output is known as a - block diagonal matrix. - - Examples - -------- - >>> A = [[1, 0], - ... [0, 1]] - >>> B = [[3, 4, 5], - ... [6, 7, 8]] - >>> C = [[7]] - >>> block_diag(A, B, C) - [[1 0 0 0 0 0] - [0 1 0 0 0 0] - [0 0 3 4 5 0] - [0 0 6 7 8 0] - [0 0 0 0 0 7]] - >>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]]) - array([[ 1., 0., 0., 0., 0.], - [ 0., 2., 3., 0., 0.], - [ 0., 0., 0., 4., 5.], - [ 0., 0., 0., 6., 7.]]) - - """ - if arrs == (): - arrs = ([],) - arrs = [np.atleast_2d(a) for a in arrs] - - bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2] - if bad_args: - raise ValueError("arguments in the following positions have dimension " - "greater than 2: %s" % bad_args) - - shapes = np.array([a.shape for a in arrs]) - out = np.zeros(np.sum(shapes, axis=0), dtype=arrs[0].dtype) - - r, c = 0, 0 - for i, (rr, cc) in enumerate(shapes): - out[r:r + rr, c:c + cc] = arrs[i] - r += rr - c += cc - return out - -def companion(a): - """ - Create a companion matrix. - - Create the companion matrix [1]_ associated with the polynomial whose - coefficients are given in `a`. - - Parameters - ---------- - a : array_like - 1-D array of polynomial coefficients. The length of `a` must be - at least two, and ``a[0]`` must not be zero. - - Returns - ------- - c : ndarray - A square array of shape ``(n-1, n-1)``, where `n` is the length - of `a`. The first row of `c` is ``-a[1:]/a[0]``, and the first - sub-diagonal is all ones. The data-type of the array is the same - as the data-type of ``1.0*a[0]``. - - Raises - ------ - ValueError - If any of the following are true: a) ``a.ndim != 1``; - b) ``a.size < 2``; c) ``a[0] == 0``. - - Notes - ----- - .. versionadded:: 0.8.0 - - References - ---------- - .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: - Cambridge University Press, 1999, pp. 146-7. - - Examples - -------- - >>> from scipy.linalg import companion - >>> companion([1, -10, 31, -30]) - array([[ 10., -31., 30.], - [ 1., 0., 0.], - [ 0., 1., 0.]]) - - """ - a = np.atleast_1d(a) - - if a.ndim != 1: - raise ValueError("Incorrect shape for `a`. `a` must be one-dimensional.") - - if a.size < 2: - raise ValueError("The length of `a` must be at least 2.") - - if a[0] == 0: - raise ValueError("The first coefficient in `a` must not be zero.") - - first_row = -a[1:]/(1.0*a[0]) - n = a.size - c = np.zeros((n-1, n-1), dtype=first_row.dtype) - c[0] = first_row - c[range(1,n-1), range(0, n-2)] = 1 - return c - - -def hilbert(n): - """Create a Hilbert matrix of order n. - - Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`. - - Parameters - ---------- - n : int - The size of the array to create. - - Returns - ------- - h : ndarray with shape (n, n) - The Hilber matrix. - - Notes - ----- - .. versionadded:: 0.10.0 - - Examples - -------- - >>> hilbert(3) - array([[ 1. , 0.5 , 0.33333333], - [ 0.5 , 0.33333333, 0.25 ], - [ 0.33333333, 0.25 , 0.2 ]]) - - """ - values = 1.0 / (1.0 + np.arange(2 * n - 1)) - h = hankel(values[:n], r=values[n-1:]) - return h - - -def invhilbert(n, exact=False): - """Compute the inverse of the Hilbert matrix of order `n`. - - Parameters - ---------- - n : int - The order of the Hilbert matrix. - exact : bool - If False, the data type of the array that is returned in np.float64, - and the array is an approximation of the inverse. - If True, the array is exact integer array. To represent the exact - inverse when n > 14, the returned array is an object array of long - integers. For n <= 14, the exact inverse is returned as an array - with data type np.int64. - - Returns - ------- - invh : ndarray with shape (n, n) - The data type of the array is np.float64 is exact is False. - If exact is True, the data type is either np.int64 (for n <= 14) - or object (for n > 14). In the latter case, the objects in the - array will be long integers. - - Notes - ----- - .. versionadded:: 0.10.0 - - Examples - -------- - >>> invhilbert(4) - array([[ 16., -120., 240., -140.], - [ -120., 1200., -2700., 1680.], - [ 240., -2700., 6480., -4200.], - [ -140., 1680., -4200., 2800.]]) - >>> invhilbert(4, exact=True) - array([[ 16, -120, 240, -140], - [ -120, 1200, -2700, 1680], - [ 240, -2700, 6480, -4200], - [ -140, 1680, -4200, 2800]], dtype=int64) - >>> invhilbert(16)[7,7] - 4.2475099528537506e+19 - >>> invhilbert(16, exact=True)[7,7] - 42475099528537378560L - """ - if exact: - if n > 14: - dtype = object - else: - dtype = np.int64 - else: - dtype = np.float64 - invh = np.empty((n, n), dtype=dtype) - for i in xrange(n): - for j in xrange(0, i + 1): - s = i + j - invh[i, j] = ((-1)**s * (s + 1) * - comb(n + i, n - j - 1, exact) * - comb(n + j, n - i - 1, exact) * - comb(s, i, exact) ** 2) - if i != j: - invh[j, i] = invh[i, j] - return invh diff --git a/scipy-0.10.1/scipy/linalg/src/calc_lwork.f b/scipy-0.10.1/scipy/linalg/src/calc_lwork.f deleted file mode 100644 index 452ef7d80b..0000000000 --- a/scipy-0.10.1/scipy/linalg/src/calc_lwork.f +++ /dev/null @@ -1,481 +0,0 @@ - subroutine gehrd(min_lwork,max_lwork,prefix,n,lo,hi) - integer min_lwork,max_lwork,n,lo,hi - character prefix -c -c Returned maxwrk is acctually optimal lwork. -c -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: n,lo,hi - - INTEGER NB - EXTERNAL ILAENV - INTRINSIC MIN - - NB = MIN( 64, ILAENV( 1, prefix // 'GEHRD', ' ', n, lo, hi, -1 ) ) - max_lwork = n * NB - min_lwork = MIN(max_lwork,MAX(1,n)) - - end - - subroutine gesdd(min_lwork,max_lwork,prefix,m,n,compute_uv) - integer min_lwork,max_lwork,m,n,compute_uv - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&m,&n,&compute_uv) -cf2py callprotoargument int*,int*,char*,int*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: m,n,compute_uv - - INTEGER MINMN, MNTHR, MINWRK, MAXWRK, SMLSIZ, BDSPAC, BDSPAN - INTEGER ILAENV, WRKBL - EXTERNAL ILAENV - INTRINSIC INT, MAX, MIN - - MINMN = MIN( M, N ) - MNTHR = INT( MINMN*11.0D0 / 6.0D0 ) - MINWRK = 1 - MAXWRK = 1 - SMLSIZ = ILAENV( 9, prefix // 'GESDD', ' ', 0, 0, 0, 0 ) - IF( M.GE.N ) THEN -* -* Compute space needed for DBDSDC -* - BDSPAC = 3*N*N + 7*N - BDSPAN = MAX( 12*N+4, 8*N+2+SMLSIZ*( SMLSIZ+8 ) ) - IF( M.GE.MNTHR ) THEN - IF (compute_uv.eq.0) THEN -* -* Path 1 (M much larger than N, JOBZ='N') -* - MAXWRK = N + N*ILAENV( 1, prefix // 'GEQRF', ' ', - $ M, N, -1, - $ -1 ) - MAXWRK = MAX( MAXWRK, 3*N+2*N* - $ ILAENV( 1, prefix // 'GEBRD', ' ', - $ N, N, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC ) - MINWRK = BDSPAC - ELSE -* -* Path 4 (M much larger than N, JOBZ='A') -* - WRKBL = N + N*ILAENV( 1, prefix // 'GEQRF', ' ', - $ M, N, -1, -1 ) - WRKBL = MAX( WRKBL, N+M*ILAENV( 1, prefix // 'ORGQR', - $ ' ', M, - $ M, N, -1 ) ) - WRKBL = MAX( WRKBL, 3*N+2*N* - $ ILAENV( 1, prefix // 'GEBRD', ' ', - $ N, N, -1, -1 ) ) - WRKBL = MAX( WRKBL, 3*N+N* - $ ILAENV( 1, prefix // 'ORMBR', 'QLN', - $ N, N, N, -1 ) ) - WRKBL = MAX( WRKBL, 3*N+N* - $ ILAENV( 1, prefix // 'ORMBR', 'PRT', - $ N, N, N, -1 ) ) - WRKBL = MAX( WRKBL, BDSPAC+2*N ) - MAXWRK = N*N + WRKBL - MINWRK = BDSPAC + N*N + M + N - ENDIF - ELSE -* -* Path 5 (M at least N, but not much larger) -* - WRKBL = 3*N + ( M+N )*ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, N, -1, -1) - IF (compute_uv.eq.0) THEN - MAXWRK = MAX(WRKBL,BDSPAC + 3*N) - MINWRK = 3*N + MAX(M,BDSPAC) - ELSE - MAXWRK = MAX( MAXWRK, 3*N+M* - $ ILAENV( 1, prefix // 'ORMBR', 'QLN', - $ M, M, N, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*N+N* - $ ILAENV( 1, prefix // 'ORMBR', 'PRT', - $ N, N, N, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC+2*N+M ) - MINWRK = BDSPAC + 2*N + M - ENDIF - ENDIF - ELSE -* -* Compute space needed for DBDSDC -* - BDSPAC = 3*M*M + 7*M - BDSPAN = MAX( 12*M+4, 8*M+2+SMLSIZ*( SMLSIZ+8 ) ) - IF( N.GE.MNTHR ) THEN - IF( compute_uv.eq.0 ) THEN -* -* Path 1t (N much larger than M, JOBZ='N') -* - MAXWRK = M + M*ILAENV( 1, prefix // 'GELQF', ' ', - $ M, N, -1, - $ -1 ) - MAXWRK = MAX( MAXWRK, 3*M+2*M* - $ ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, M, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC ) - MINWRK = BDSPAC - ELSE -* -* Path 4t (N much larger than M, JOBZ='A') -* - WRKBL = M + M*ILAENV( 1, prefix // 'GELQF', ' ', - $ M, N, -1, -1 ) - WRKBL = MAX( WRKBL, M+N*ILAENV( 1, prefix // 'ORGLQ', - $ ' ', N, - $ N, M, -1 ) ) - WRKBL = MAX( WRKBL, 3*M+2*M* - $ ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, M, -1, -1 ) ) - WRKBL = MAX( WRKBL, 3*M+M* - $ ILAENV( 1, prefix // 'ORMBR', 'QLN', - $ M, M, M, -1 ) ) - WRKBL = MAX( WRKBL, 3*M+M* - $ ILAENV( 1, prefix // 'ORMBR', 'PRT', - $ M, M, M, -1 ) ) - WRKBL = MAX( WRKBL, BDSPAC+2*M ) - MAXWRK = WRKBL + M*M - MINWRK = BDSPAC + M*M + M + N - ENDIF - ELSE - WRKBL = 3*M + ( M+N )*ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, N, -1, - $ -1 ) - IF (compute_uv.eq.0) THEN - MAXWRK = MAX(WRKBL,BDSPAC + 3*M) - MINWRK = 3*M + MAX(N,BDSPAC) - ELSE - MAXWRK = MAX( MAXWRK, 3*M+M* - $ ILAENV( 1, prefix // 'ORMBR', 'QLN', - $ M, M, N, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*M+N* - $ ILAENV( 1, prefix // 'ORMBR', 'PRT', - $ N, N, M, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC+2*M ) - MINWRK = BDSPAC + 2*M + N - ENDIF - ENDIF - ENDIF - min_lwork = MINWRK - max_lwork = MAX(MINWRK,MAXWRK) - end - - subroutine gelss(min_lwork,max_lwork,prefix,m,n,nrhs) - - integer min_lwork,max_lwork,m,n,nrhs - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&m,&n,&nrhs) -cf2py callprotoargument int*,int*,char*,int*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: m,n,nrhs - - INTEGER MAXWRK, MINMN, MINWRK, MM, MNTHR - INTEGER ILAENV, BDSPAC, MAXMN - EXTERNAL ILAENV - INTRINSIC MAX, MIN - - MINMN = MIN( M, N ) - MAXMN = MAX( M, N ) - MNTHR = ILAENV( 6, prefix // 'GELSS', ' ', M, N, NRHS, -1 ) - MINWRK = 1 - MAXWRK = 0 - MM = M - IF( M.GE.N .AND. M.GE.MNTHR ) THEN -* -* Path 1a - overdetermined, with many more rows than columns -* - MM = N - MAXWRK = MAX( MAXWRK, N+N*ILAENV( 1, prefix // 'GEQRF', ' ', - $ M, N, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, N+NRHS* - $ ILAENV( 1, prefix // 'ORMQR', 'LT', M, NRHS, N, -1 ) ) - END IF - IF( M.GE.N ) THEN -* -* Path 1 - overdetermined or exactly determined -* -* Compute workspace neede for BDSQR -* - BDSPAC = MAX( 1, 5*N ) - MAXWRK = MAX( MAXWRK, 3*N+( MM+N )* - $ ILAENV( 1, prefix // 'GEBRD', ' ', MM, N, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*N+NRHS* - $ ILAENV( 1, prefix // 'ORMBR', 'QLT', MM, NRHS, N, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*N+( N-1 )* - $ ILAENV( 1, prefix // 'ORGBR', 'P', N, N, N, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC ) - MAXWRK = MAX( MAXWRK, N*NRHS ) - MINWRK = MAX( 3*N+MM, 3*N+NRHS, BDSPAC ) - MAXWRK = MAX( MINWRK, MAXWRK ) - END IF - - IF( N.GT.M ) THEN -* -* Compute workspace neede for DBDSQR -* - BDSPAC = MAX( 1, 5*M ) - MINWRK = MAX( 3*M+NRHS, 3*M+N, BDSPAC ) - IF( N.GE.MNTHR ) THEN -* -* Path 2a - underdetermined, with many more columns -* than rows -* - MAXWRK = M + M*ILAENV( 1, prefix // 'GELQF', ' ', - $ M, N, -1, -1 ) - MAXWRK = MAX( MAXWRK, M*M+4*M+2*M* - $ ILAENV( 1, prefix // 'GEBRD', ' ', M, M, -1, -1 ) ) - MAXWRK = MAX( MAXWRK, M*M+4*M+NRHS* - $ ILAENV( 1, prefix // 'ORMBR', 'QLT', M, NRHS, M, -1 )) - MAXWRK = MAX( MAXWRK, M*M+4*M+( M-1 )* - $ ILAENV( 1, prefix // 'ORGBR', 'P', M, M, M, -1 ) ) - MAXWRK = MAX( MAXWRK, M*M+M+BDSPAC ) - IF( NRHS.GT.1 ) THEN - MAXWRK = MAX( MAXWRK, M*M+M+M*NRHS ) - ELSE - MAXWRK = MAX( MAXWRK, M*M+2*M ) - END IF - MAXWRK = MAX( MAXWRK, M+NRHS* - $ ILAENV( 1, prefix // 'ORMLQ', 'LT', N, NRHS, M, -1 ) ) - - ELSE -* -* Path 2 - underdetermined -* - MAXWRK = 3*M + ( N+M )*ILAENV( 1, prefix // 'GEBRD', ' ', - $ M, N, -1, -1 ) - MAXWRK = MAX( MAXWRK, 3*M+NRHS* - $ ILAENV( 1, prefix // 'ORMBR', 'QLT', M, NRHS, M, -1 ) ) - MAXWRK = MAX( MAXWRK, 3*M+M* - $ ILAENV( 1, prefix // 'ORGBR', 'P', M, N, M, -1 ) ) - MAXWRK = MAX( MAXWRK, BDSPAC ) - MAXWRK = MAX( MAXWRK, N*NRHS ) - END IF - END IF - MAXWRK = MAX( MINWRK, MAXWRK ) - MINWRK = MAX( MINWRK, 1 ) - - min_lwork = MINWRK - max_lwork = MAXWRK - end - - subroutine getri(min_lwork,max_lwork,prefix,n) - integer min_lwork,max_lwork,n - character prefix -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n) -cf2py callprotoargument int*,int*,char*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: n - INTEGER ILAENV, NB - EXTERNAL ILAENV - NB = ILAENV( 1, prefix // 'GETRI', ' ', N, -1, -1, -1 ) - min_lwork = N - max_lwork = N*NB - end - - subroutine geev(min_lwork,max_lwork,prefix,n, - $ compute_vl,compute_vr) - - integer min_lwork,max_lwork,n,compute_vl,compute_vr - character prefix -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n,&compute_vl,&compute_vr) -cf2py callprotoargument int*,int*,char*,int*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py integer optional,intent(in) :: compute_vl = 1,compute_vr = 1 -cf2py intent(in) :: prefix -cf2py intent(in) :: n - - LOGICAL WANTVL, WANTVR - INTEGER ILAENV, MINWRK, MAXWRK, MAXB, HSWORK, K - EXTERNAL ILAENV - INTRINSIC MAX, MIN - - WANTVL = compute_vl.eq.1 - WANTVR = compute_vr.eq.1 - - MINWRK = 1 - MAXWRK = 2*N + N*ILAENV( 1, prefix // 'GEHRD', ' ', N, 1, N, 0 ) - IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN - MINWRK = MAX( 1, 3*N ) - MAXB = MAX( ILAENV( 8, prefix // 'HSEQR', 'EN', N, 1, N, -1 ) - $ , 2 ) - K = MIN( MAXB, N, MAX( 2, ILAENV( 4, prefix // 'HSEQR', 'EN', N - $ , 1, N, -1 ) ) ) - HSWORK = MAX( K*( K+2 ), 2*N ) - MAXWRK = MAX( MAXWRK, N+1, N+HSWORK ) - ELSE - MINWRK = MAX( 1, 4*N ) - MAXWRK = MAX( MAXWRK, 2*N+( N-1 )* - $ ILAENV( 1, prefix // 'ORGHR', ' ', N, 1, N, -1 ) ) - MAXB = MAX( ILAENV( 8, prefix // 'HSEQR', 'SV', N, 1, N, -1 ), - $ 2 ) - K = MIN( MAXB, N, MAX( 2, ILAENV( 4, prefix // 'HSEQR', 'SV', N - $ , 1, N, -1 ) ) ) - HSWORK = MAX( K*( K+2 ), 2*N ) - MAXWRK = MAX( MAXWRK, N+1, N+HSWORK ) - MAXWRK = MAX( MAXWRK, 4*N ) - END IF - min_lwork = MINWRK - max_lwork = MAXWRK - end - - subroutine heev(min_lwork,max_lwork,prefix,n,lower) - - integer min_lwork,max_lwork,n,lower - character prefix -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n,&lower) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py integer optional,intent(in) :: lower = 0 -cf2py intent(in) :: prefix -cf2py intent(in) :: n - - CHARACTER UPLO - INTEGER ILAENV, NB - EXTERNAL ILAENV - INTRINSIC MAX - - UPLO = 'L' - if (lower.eq.0) then - UPLO = 'U' - endif - - NB = ILAENV( 1, prefix // 'HETRD', UPLO, N, -1, -1, -1 ) - - min_lwork = MAX(1,2*N-1) - max_lwork = MAX( 1, ( NB+1 )*N ) - - end - - subroutine syev(min_lwork,max_lwork,prefix,n,lower) - - integer min_lwork,max_lwork,n,lower - character prefix -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n,&lower) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py integer optional,intent(in) :: lower = 0 -cf2py intent(in) :: prefix -cf2py intent(in) :: n - - CHARACTER UPLO - INTEGER ILAENV, NB - EXTERNAL ILAENV - INTRINSIC MAX - - UPLO = 'L' - if (lower.eq.0) then - UPLO = 'U' - end if - - NB = ILAENV( 1, prefix // 'SYTRD', UPLO, N, -1, -1, -1 ) - - min_lwork = MAX(1,3*N-1) - max_lwork = MAX( 1, ( NB+2 )*N ) - - end - - subroutine gees(min_lwork,max_lwork,prefix,n,compute_v) - - integer min_lwork,max_lwork,n,compute_v - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&n,&compute_v) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py integer optional,intent(in) :: compute_v = 1 -cf2py intent(in) :: prefix -cf2py intent(in) :: n - - INTEGER HSWORK, MAXWRK, MINWRK, MAXB, K - INTEGER ILAENV - EXTERNAL ILAENV - INTRINSIC MAX, MIN - - MAXWRK = N + N*ILAENV( 1, prefix // 'GEHRD', ' ', N, 1, N, 0 ) - MINWRK = MAX( 1, 2*N ) - IF( compute_v.eq.0 ) THEN - MAXB = MAX( ILAENV( 8, prefix // 'HSEQR', - $ 'SN', N, 1, N, -1 ), 2 ) - K = MIN( MAXB, N, MAX( 2, ILAENV( 4, prefix // 'HSEQR', - $ 'SN', N, 1, N, -1 ) ) ) - HSWORK = MAX( K*( K+2 ), 2*N ) - MAXWRK = MAX( MAXWRK, HSWORK, 1 ) - ELSE - MAXWRK = MAX( MAXWRK, N+( N-1 )* - $ ILAENV( 1, prefix // 'UNGHR', ' ', N, 1, N, -1 ) ) - MAXB = MAX( ILAENV( 8, prefix // 'HSEQR', - $ 'EN', N, 1, N, -1 ), 2 ) - K = MIN( MAXB, N, MAX( 2, ILAENV( 4, prefix // 'HSEQR', - $ 'EN', N, 1, N, -1 ) ) ) - HSWORK = MAX( K*( K+2 ), 2*N ) - MAXWRK = MAX( MAXWRK, HSWORK, 1 ) - END IF - - min_lwork = MINWRK - max_lwork = MAXWRK - - end - - subroutine geqrf(min_lwork,max_lwork,prefix,m,n) - - integer min_lwork,max_lwork,m,n - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&m,&n) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: m,n - - INTEGER NB - INTEGER ILAENV - EXTERNAL ILAENV - INTRINSIC MAX - - NB = ILAENV( 1, prefix // 'GEQRF', ' ', M, N, -1, -1 ) - - min_lwork = MAX(1,N) - max_lwork = MAX(1,N*NB) - end - - subroutine gqr(min_lwork,max_lwork,prefix,m,n) - - integer min_lwork,max_lwork,m,n - character prefix - -cf2py callstatement (*f2py_func)(&min_lwork,&max_lwork,prefix,&m,&n) -cf2py callprotoargument int*,int*,char*,int*,int* -cf2py intent(out,out=minwrk) :: min_lwork -cf2py intent(out,out=maxwrk) :: max_lwork -cf2py intent(in) :: prefix -cf2py intent(in) :: m,n - - INTEGER NB - INTEGER ILAENV - EXTERNAL ILAENV - INTRINSIC MAX - - if ((prefix.eq.'d').or.(prefix.eq.'s') - $ .or.(prefix.eq.'D').or.(prefix.eq.'S')) then - NB = ILAENV( 1, prefix // 'ORGQR', ' ', M, N, -1, -1 ) - else - NB = ILAENV( 1, prefix // 'UNGQR', ' ', M, N, -1, -1 ) - endif - min_lwork = MAX(1,N) - max_lwork = MAX(1,N*NB) - end diff --git a/scipy-0.10.1/scipy/linalg/src/det.f b/scipy-0.10.1/scipy/linalg/src/det.f deleted file mode 100644 index b3a25ec488..0000000000 --- a/scipy-0.10.1/scipy/linalg/src/det.f +++ /dev/null @@ -1,161 +0,0 @@ - -c Calculate determinant of square matrix -c Author: Pearu Peterson, March 2002 -c -c prefixes: d,z,s,c (double,complex double,float,complex float) -c suffixes: _c,_r (column major order,row major order) - - subroutine ddet_c(det,a,n,piv,info) - integer n,piv(n),i - double precision det,a(n,n) -cf2py intent(in,copy) :: a -cf2py intent(out) :: det,info -cf2py integer intent(hide,cache),depend(n),dimension(n) :: piv -cf2py integer intent(hide),depend(a) :: n = shape(a,0) -cf2py check(shape(a,0)==shape(a,1)) :: a -cf2py callprotoargument double*,double*,int*,int*,int* - external dgetrf - call dgetrf(n,n,a,n,piv,info) - det = 0d0 - if (info.ne.0) then - return - endif - det = 1d0 - do 10,i=1,n - if (piv(i).ne.i) then - det = -det * a(i,i) - else - det = det * a(i,i) - endif - 10 continue - end - - subroutine ddet_r(det,a,n,piv,info) - integer n,piv(n) - double precision det,a(n,n) -cf2py intent(c,in,copy) :: a -cf2py intent(out) :: det,info -cf2py integer intent(hide,cache),depend(n),dimension(n) :: piv -cf2py integer intent(hide),depend(a) :: n = shape(a,0) -cf2py check(shape(a,0)==shape(a,1)) :: a -cf2py callprotoargument double*,double*,int*,int*,int* - external ddet_c - call ddet_c(det,a,n,piv,info) - end - - subroutine sdet_c(det,a,n,piv,info) - integer n,piv(n),i - real det,a(n,n) -cf2py intent(in,copy) :: a -cf2py intent(out) :: det,info -cf2py integer intent(hide,cache),depend(n),dimension(n) :: piv -cf2py integer intent(hide),depend(a) :: n = shape(a,0) -cf2py check(shape(a,0)==shape(a,1)) :: a -cf2py callprotoargument float*,float*,int*,int*,int* - external sgetrf - call sgetrf(n,n,a,n,piv,info) - det = 0e0 - if (info.ne.0) then - return - endif - det = 1e0 - do 10,i=1,n - if (piv(i).ne.i) then - det = -det * a(i,i) - else - det = det * a(i,i) - endif - 10 continue - end - - subroutine sdet_r(det,a,n,piv,info) - integer n,piv(n) - real det,a(n,n) -cf2py intent(c,in,copy) :: a -cf2py intent(out) :: det,info -cf2py integer intent(hide,cache),depend(n),dimension(n) :: piv -cf2py integer intent(hide),depend(a) :: n = shape(a,0) -cf2py check(shape(a,0)==shape(a,1)) :: a -cf2py callprotoargument float*,float*,int*,int*,int* - external sdet_c - call sdet_c(det,a,n,piv,info) - end - - subroutine zdet_c(det,a,n,piv,info) - integer n,piv(n),i - complex*16 det,a(n,n) -cf2py intent(in,copy) :: a -cf2py intent(out) :: det,info -cf2py integer intent(hide,cache),depend(n),dimension(n) :: piv -cf2py integer intent(hide),depend(a) :: n = shape(a,0) -cf2py check(shape(a,0)==shape(a,1)) :: a -cf2py callprotoargument complex_double*,complex_double*,int*,int*,int* - external zgetrf - call zgetrf(n,n,a,n,piv,info) - det = (0d0,0d0) - if (info.ne.0) then - return - endif - det = (1d0,0d0) - do 10,i=1,n - if (piv(i).ne.i) then - det = -det * a(i,i) - else - det = det * a(i,i) - endif - 10 continue - end - - subroutine zdet_r(det,a,n,piv,info) - integer n,piv(n) - complex*16 det,a(n,n) -cf2py intent(c,in,copy) :: a -cf2py intent(out) :: det,info -cf2py integer intent(hide,cache),depend(n),dimension(n) :: piv -cf2py integer intent(hide),depend(a) :: n = shape(a,0) -cf2py check(shape(a,0)==shape(a,1)) :: a -cf2py callprotoargument complex_double*,complex_double*,int*,int*,int* - external zdet_c - call zdet_c(det,a,n,piv,info) - end - - subroutine cdet_c(det,a,n,piv,info) - integer n,piv(n),i - complex det,a(n,n) -cf2py intent(in,copy) :: a -cf2py intent(out) :: det,info -cf2py integer intent(hide,cache),depend(n),dimension(n) :: piv -cf2py integer intent(hide),depend(a) :: n = shape(a,0) -cf2py check(shape(a,0)==shape(a,1)) :: a -cf2py callprotoargument complex_float*,complex_float*,int*,int*,int* - external cgetrf - call cgetrf(n,n,a,n,piv,info) - det = (0e0,0e0) - if (info.ne.0) then - return - endif - det = (1e0,0e0) - do 10,i=1,n - if (piv(i).ne.i) then - det = -det * a(i,i) - else - det = det * a(i,i) - endif - 10 continue - end - - subroutine cdet_r(det,a,n,piv,info) - integer n,piv(n) - complex det,a(n,n) -cf2py intent(c,in,copy) :: a -cf2py intent(out) :: det,info -cf2py integer intent(hide,cache),depend(n),dimension(n) :: piv -cf2py integer intent(hide),depend(a) :: n = shape(a,0) -cf2py check(shape(a,0)==shape(a,1)) :: a -cf2py callprotoargument complex_float*,complex_float*,int*,int*,int* - external cdet_c - call cdet_c(det,a,n,piv,info) - end - - - diff --git a/scipy-0.10.1/scipy/linalg/src/fblaswrap.f b/scipy-0.10.1/scipy/linalg/src/fblaswrap.f deleted file mode 100644 index f9f5e211ce..0000000000 --- a/scipy-0.10.1/scipy/linalg/src/fblaswrap.f +++ /dev/null @@ -1,43 +0,0 @@ - subroutine wcdotu (r, n, cx, incx, cy, incy) - external cdotu - complex cdotu, r - integer n - complex cx (*) - integer incx - complex cy (*) - integer incy - r = cdotu (n, cx, incx, cy, incy) - end - - subroutine wzdotu (r, n, zx, incx, zy, incy) - external zdotu - double complex zdotu, r - integer n - double complex zx (*) - integer incx - double complex zy (*) - integer incy - r = zdotu (n, zx, incx, zy, incy) - end - - subroutine wcdotc (r, n, cx, incx, cy, incy) - external cdotc - complex cdotc, r - integer n - complex cx (*) - integer incx - complex cy (*) - integer incy - r = cdotc (n, cx, incx, cy, incy) - end - - subroutine wzdotc (r, n, zx, incx, zy, incy) - external zdotc - double complex zdotc, r - integer n - double complex zx (*) - integer incx - double complex zy (*) - integer incy - r = zdotc (n, zx, incx, zy, incy) - end diff --git a/scipy-0.10.1/scipy/linalg/src/fblaswrap_veclib_c.c b/scipy-0.10.1/scipy/linalg/src/fblaswrap_veclib_c.c deleted file mode 100644 index 891ac6fb82..0000000000 --- a/scipy-0.10.1/scipy/linalg/src/fblaswrap_veclib_c.c +++ /dev/null @@ -1,23 +0,0 @@ -#include -#include - -//#define WRAP_F77(a) wcblas_##a##_ -#define WRAP_F77(a) w##a##_ -void WRAP_F77(cdotc)(complex *dotc, const int *N, const complex *X, const int *incX, const complex *Y, const int *incY) -{ - cblas_cdotc_sub(*N, X, *incX, Y, *incY, dotc); -} - -void WRAP_F77(cdotu)(complex* dotu, const int *N, const complex *X, const int *incX, const complex *Y, const int *incY) -{ - cblas_cdotu_sub(*N, X, *incX, Y, *incY, dotu); -} - -void WRAP_F77(zdotc)(double complex *dotu, const int *N, const double complex *X, const int *incX, const double complex *Y, const int *incY) -{ - cblas_zdotc_sub(*N, X, *incX, Y, *incY, dotu); -} -void WRAP_F77(zdotu)(double complex *dotu, const int *N, const double complex *X, const int *incX, const double complex *Y, const int *incY) -{ - cblas_zdotu_sub(*N, X, *incX, Y, *incY, dotu); -} diff --git a/scipy-0.10.1/scipy/linalg/src/lu.f b/scipy-0.10.1/scipy/linalg/src/lu.f deleted file mode 100644 index 81dc225274..0000000000 --- a/scipy-0.10.1/scipy/linalg/src/lu.f +++ /dev/null @@ -1,194 +0,0 @@ - -c Calculate LU decomposition of a matrix -c Author: Pearu Peterson, March 2002 -c -c prefixes: d,z,s,c (double,complex double,float,complex float) -c suffixes: _c,_r (column major order,row major order) - - subroutine dlu_c(p,l,u,a,m,n,k,piv,info,permute_l,m1) - integer m,n,piv(k),i,j,k,permute_l,m1 - double precision l(m,k),u(k,n),a(m,n) - double precision p(m1,m1) - -cf2py intent(in,copy) :: a -cf2py intent(out) :: info -cf2py integer intent(hide,cache),depend(k),dimension(k) :: piv -cf2py integer intent(hide),depend(a) :: m = shape(a,0) -cf2py integer intent(hide),depend(a) :: n = shape(a,1) -cf2py integer intent(hide),depend(m,n) :: k = (m 1, -"""Both g77 and gfortran runtimes linked in scipy.linalg.flapack ! This is -likely to cause random crashes and wrong results. See numpy INSTALL.txt for -more information.""") diff --git a/scipy-0.10.1/scipy/linalg/tests/test_decomp.py b/scipy-0.10.1/scipy/linalg/tests/test_decomp.py deleted file mode 100644 index a98fd59a20..0000000000 --- a/scipy-0.10.1/scipy/linalg/tests/test_decomp.py +++ /dev/null @@ -1,1509 +0,0 @@ -""" Test functions for linalg.decomp module - -""" -__usage__ = """ -Build linalg: - python setup_linalg.py build -Run tests if scipy is installed: - python -c 'import scipy;scipy.linalg.test()' -Run tests if linalg is not installed: - python tests/test_decomp.py -""" - -import numpy as np -from numpy.testing import TestCase, assert_equal, assert_array_almost_equal, \ - assert_array_equal, assert_raises, assert_, run_module_suite, dec - -from scipy.linalg import eig, eigvals, lu, svd, svdvals, cholesky, qr, \ - schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq, \ - eig_banded, eigvals_banded, eigh, eigvalsh, LinAlgError -from scipy.linalg.flapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \ - dsbev, dsbevd, dsbevx, zhbevd, zhbevx - -from numpy import array, transpose, sometrue, diag, ones, linalg, \ - argsort, zeros, arange, float32, complex64, dot, conj, identity, \ - ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \ - asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\ - triu, tril - -from numpy.random import rand, normal, seed - -from scipy.linalg._testutils import assert_no_overwrite - -# digit precision to use in asserts for different types -DIGITS = {'d':11, 'D':11, 'f':4, 'F':4} - -# XXX: This function should be available through numpy.testing -def assert_dtype_equal(act, des): - if isinstance(act, ndarray): - act = act.dtype - else: - act = dtype(act) - - if isinstance(des, ndarray): - des = des.dtype - else: - des = dtype(des) - - assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des)) - -# XXX: This function should not be defined here, but somewhere in -# scipy.linalg namespace -def symrand(dim_or_eigv): - """Return a random symmetric (Hermitian) matrix. - - If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues - uniformly distributed on (-1,1). - - If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose - eigenvalues are 'a'. - """ - if isinstance(dim_or_eigv, int): - dim = dim_or_eigv - d = (rand(dim)*2)-1 - elif (isinstance(dim_or_eigv, ndarray) and - len(dim_or_eigv.shape) == 1): - dim = dim_or_eigv.shape[0] - d = dim_or_eigv - else: - raise TypeError("input type not supported.") - - v = random_rot(dim) - h = dot(dot(v.T.conj(), diag(d)), v) - # to avoid roundoff errors, symmetrize the matrix (again) - h = 0.5*(h.T+h) - return h - -# XXX: This function should not be defined here, but somewhere in -# scipy.linalg namespace -def random_rot(dim): - """Return a random rotation matrix, drawn from the Haar distribution - (the only uniform distribution on SO(n)). - The algorithm is described in the paper - Stewart, G.W., 'The efficient generation of random orthogonal - matrices with an application to condition estimators', SIAM Journal - on Numerical Analysis, 17(3), pp. 403-409, 1980. - For more information see - http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization""" - H = eye(dim) - D = ones((dim, )) - for n in range(1, dim): - x = normal(size=(dim-n+1, )) - D[n-1] = sign(x[0]) - x[0] -= D[n-1]*sqrt((x*x).sum()) - # Householder transformation - - Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum() - mat = eye(dim) - mat[n-1:,n-1:] = Hx - H = dot(H, mat) - # Fix the last sign such that the determinant is 1 - D[-1] = -D.prod() - H = (D*H.T).T - return H - -def random(size): - return rand(*size) - -class TestEigVals(TestCase): - - def test_simple(self): - a = [[1,2,3],[1,2,3],[2,5,6]] - w = eigvals(a) - exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] - assert_array_almost_equal(w,exact_w) - - def test_simple_tr(self): - a = array([[1,2,3],[1,2,3],[2,5,6]],'d') - a = transpose(a).copy() - a = transpose(a) - w = eigvals(a) - exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] - assert_array_almost_equal(w,exact_w) - - def test_simple_complex(self): - a = [[1,2,3],[1,2,3],[2,5,6+1j]] - w = eigvals(a) - exact_w = [(9+1j+sqrt(92+6j))/2, - 0, - (9+1j-sqrt(92+6j))/2] - assert_array_almost_equal(w,exact_w) - - -class TestEig(object): - - def test_simple(self): - a = [[1,2,3],[1,2,3],[2,5,6]] - w,v = eig(a) - exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] - v0 = array([1,1,(1+sqrt(93)/3)/2]) - v1 = array([3.,0,-1]) - v2 = array([1,1,(1-sqrt(93)/3)/2]) - v0 = v0 / sqrt(dot(v0,transpose(v0))) - v1 = v1 / sqrt(dot(v1,transpose(v1))) - v2 = v2 / sqrt(dot(v2,transpose(v2))) - assert_array_almost_equal(w,exact_w) - assert_array_almost_equal(v0,v[:,0]*sign(v[0,0])) - assert_array_almost_equal(v1,v[:,1]*sign(v[0,1])) - assert_array_almost_equal(v2,v[:,2]*sign(v[0,2])) - for i in range(3): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) - w,v = eig(a,left=1,right=0) - for i in range(3): - assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i]) - - def test_simple_complex_eig(self): - a = [[1,2],[-2,1]] - w,vl,vr = eig(a,left=1,right=1) - assert_array_almost_equal(w, array([1+2j, 1-2j])) - for i in range(2): - assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) - for i in range(2): - assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), - conjugate(w[i])*vl[:,i]) - - def test_simple_complex(self): - a = [[1,2,3],[1,2,3],[2,5,6+1j]] - w,vl,vr = eig(a,left=1,right=1) - for i in range(3): - assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) - for i in range(3): - assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), - conjugate(w[i])*vl[:,i]) - - def _check_gen_eig(self, A, B): - A, B = asarray(A), asarray(B) - msg = "\n%r\n%r" % (A, B) - w, vr = eig(A,B) - wt = eigvals(A,B) - val1 = dot(A, vr) - val2 = dot(B, vr) * w - res = val1 - val2 - for i in range(res.shape[1]): - if all(isfinite(res[:, i])): - assert_array_almost_equal(res[:, i], 0, err_msg=msg) - - assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]), - err_msg=msg) - - def test_singular(self): - """Test singular pair""" - # Example taken from - # http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html - A = array(( [22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34], - [27,31,26,21,15], [38,44,44,24,30])) - B = array(( [13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25], - [16,25,27,14,23], [24,35,18,21,22])) - - olderr = np.seterr(all='ignore') - try: - self._check_gen_eig(A, B) - finally: - np.seterr(**olderr) - - def test_falker(self): - """Test matrices giving some Nan generalized eigen values.""" - M = diag(array(([1,0,3]))) - K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2])) - D = array(([1,-1,0],[-1,1,0],[0,0,0])) - Z = zeros((3,3)) - I = identity(3) - A = bmat([[I,Z],[Z,-K]]) - B = bmat([[Z,I],[M,D]]) - - olderr = np.seterr(all='ignore') - try: - self._check_gen_eig(A, B) - finally: - np.seterr(**olderr) - - def test_bad_geneig(self): - # Ticket #709 (strange return values from DGGEV) - - def matrices(omega): - c1 = -9 + omega**2 - c2 = 2*omega - A = [[1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, c1, 0], - [0, 0, 0, c1]] - B = [[0, 0, 1, 0], - [0, 0, 0, 1], - [1, 0, 0, -c2], - [0, 1, c2, 0]] - return A, B - - # With a buggy LAPACK, this can fail for different omega on different - # machines -- so we need to test several values - olderr = np.seterr(all='ignore') - try: - for k in xrange(100): - A, B = matrices(omega=k*5./100) - self._check_gen_eig(A, B) - finally: - np.seterr(**olderr) - - def test_not_square_error(self): - """Check that passing a non-square array raises a ValueError.""" - A = np.arange(6).reshape(3,2) - assert_raises(ValueError, eig, A) - - def test_shape_mismatch(self): - """Check that passing arrays of with different shapes raises a ValueError.""" - A = identity(2) - B = np.arange(9.0).reshape(3,3) - assert_raises(ValueError, eig, A, B) - assert_raises(ValueError, eig, B, A) - -class TestEigBanded(TestCase): - - def __init__(self, *args): - TestCase.__init__(self, *args) - - self.create_bandmat() - - def create_bandmat(self): - """Create the full matrix `self.fullmat` and - the corresponding band matrix `self.bandmat`.""" - N = 10 - self.KL = 2 # number of subdiagonals (below the diagonal) - self.KU = 2 # number of superdiagonals (above the diagonal) - - # symmetric band matrix - self.sym_mat = ( diag(1.0*ones(N)) - + diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1) - + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2) ) - - # hermitian band matrix - self.herm_mat = ( diag(-1.0*ones(N)) - + 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1) - + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2) ) - - # general real band matrix - self.real_mat = ( diag(1.0*ones(N)) - + diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1) - + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2) ) - - # general complex band matrix - self.comp_mat = ( 1j*diag(1.0*ones(N)) - + diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1) - + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2) ) - - - # Eigenvalues and -vectors from linalg.eig - ew, ev = linalg.eig(self.sym_mat) - ew = ew.real - args = argsort(ew) - self.w_sym_lin = ew[args] - self.evec_sym_lin = ev[:,args] - - ew, ev = linalg.eig(self.herm_mat) - ew = ew.real - args = argsort(ew) - self.w_herm_lin = ew[args] - self.evec_herm_lin = ev[:,args] - - - # Extract upper bands from symmetric and hermitian band matrices - # (for use in dsbevd, dsbevx, zhbevd, zhbevx - # and their single precision versions) - LDAB = self.KU + 1 - self.bandmat_sym = zeros((LDAB, N), dtype=float) - self.bandmat_herm = zeros((LDAB, N), dtype=complex) - for i in xrange(LDAB): - self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i) - self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i) - - - # Extract bands from general real and complex band matrix - # (for use in dgbtrf, dgbtrs and their single precision versions) - LDAB = 2*self.KL + self.KU + 1 - self.bandmat_real = zeros((LDAB, N), dtype=float) - self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal - for i in xrange(self.KL): - # superdiagonals - self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1) - # subdiagonals - self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1) - - self.bandmat_comp = zeros((LDAB, N), dtype=complex) - self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal - for i in xrange(self.KL): - # superdiagonals - self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1) - # subdiagonals - self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1) - - # absolute value for linear equation system A*x = b - self.b = 1.0*arange(N) - self.bc = self.b *(1 + 1j) - - - ##################################################################### - - - def test_dsbev(self): - """Compare dsbev eigenvalues and eigenvectors with - the result of linalg.eig.""" - w, evec, info = dsbev(self.bandmat_sym, compute_v=1) - evec_ = evec[:,argsort(w)] - assert_array_almost_equal(sort(w), self.w_sym_lin) - assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) - - - - def test_dsbevd(self): - """Compare dsbevd eigenvalues and eigenvectors with - the result of linalg.eig.""" - w, evec, info = dsbevd(self.bandmat_sym, compute_v=1) - evec_ = evec[:,argsort(w)] - assert_array_almost_equal(sort(w), self.w_sym_lin) - assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) - - - - def test_dsbevx(self): - """Compare dsbevx eigenvalues and eigenvectors - with the result of linalg.eig.""" - N,N = shape(self.sym_mat) - ## Achtung: Argumente 0.0,0.0,range? - w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N, - compute_v=1, range=2) - evec_ = evec[:,argsort(w)] - assert_array_almost_equal(sort(w), self.w_sym_lin) - assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) - - - def test_zhbevd(self): - """Compare zhbevd eigenvalues and eigenvectors - with the result of linalg.eig.""" - w, evec, info = zhbevd(self.bandmat_herm, compute_v=1) - evec_ = evec[:,argsort(w)] - assert_array_almost_equal(sort(w), self.w_herm_lin) - assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) - - - - def test_zhbevx(self): - """Compare zhbevx eigenvalues and eigenvectors - with the result of linalg.eig.""" - N,N = shape(self.herm_mat) - ## Achtung: Argumente 0.0,0.0,range? - w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N, - compute_v=1, range=2) - evec_ = evec[:,argsort(w)] - assert_array_almost_equal(sort(w), self.w_herm_lin) - assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) - - - - def test_eigvals_banded(self): - """Compare eigenvalues of eigvals_banded with those of linalg.eig.""" - w_sym = eigvals_banded(self.bandmat_sym) - w_sym = w_sym.real - assert_array_almost_equal(sort(w_sym), self.w_sym_lin) - - w_herm = eigvals_banded(self.bandmat_herm) - w_herm = w_herm.real - assert_array_almost_equal(sort(w_herm), self.w_herm_lin) - - # extracting eigenvalues with respect to an index range - ind1 = 2 - ind2 = 6 - w_sym_ind = eigvals_banded(self.bandmat_sym, - select='i', select_range=(ind1, ind2) ) - assert_array_almost_equal(sort(w_sym_ind), - self.w_sym_lin[ind1:ind2+1]) - w_herm_ind = eigvals_banded(self.bandmat_herm, - select='i', select_range=(ind1, ind2) ) - assert_array_almost_equal(sort(w_herm_ind), - self.w_herm_lin[ind1:ind2+1]) - - # extracting eigenvalues with respect to a value range - v_lower = self.w_sym_lin[ind1] - 1.0e-5 - v_upper = self.w_sym_lin[ind2] + 1.0e-5 - w_sym_val = eigvals_banded(self.bandmat_sym, - select='v', select_range=(v_lower, v_upper) ) - assert_array_almost_equal(sort(w_sym_val), - self.w_sym_lin[ind1:ind2+1]) - - v_lower = self.w_herm_lin[ind1] - 1.0e-5 - v_upper = self.w_herm_lin[ind2] + 1.0e-5 - w_herm_val = eigvals_banded(self.bandmat_herm, - select='v', select_range=(v_lower, v_upper) ) - assert_array_almost_equal(sort(w_herm_val), - self.w_herm_lin[ind1:ind2+1]) - - - - def test_eig_banded(self): - """Compare eigenvalues and eigenvectors of eig_banded - with those of linalg.eig. """ - w_sym, evec_sym = eig_banded(self.bandmat_sym) - evec_sym_ = evec_sym[:,argsort(w_sym.real)] - assert_array_almost_equal(sort(w_sym), self.w_sym_lin) - assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) - - w_herm, evec_herm = eig_banded(self.bandmat_herm) - evec_herm_ = evec_herm[:,argsort(w_herm.real)] - assert_array_almost_equal(sort(w_herm), self.w_herm_lin) - assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin)) - - # extracting eigenvalues with respect to an index range - ind1 = 2 - ind2 = 6 - w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym, - select='i', select_range=(ind1, ind2) ) - assert_array_almost_equal(sort(w_sym_ind), - self.w_sym_lin[ind1:ind2+1]) - assert_array_almost_equal(abs(evec_sym_ind), - abs(self.evec_sym_lin[:,ind1:ind2+1]) ) - - w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm, - select='i', select_range=(ind1, ind2) ) - assert_array_almost_equal(sort(w_herm_ind), - self.w_herm_lin[ind1:ind2+1]) - assert_array_almost_equal(abs(evec_herm_ind), - abs(self.evec_herm_lin[:,ind1:ind2+1]) ) - - # extracting eigenvalues with respect to a value range - v_lower = self.w_sym_lin[ind1] - 1.0e-5 - v_upper = self.w_sym_lin[ind2] + 1.0e-5 - w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym, - select='v', select_range=(v_lower, v_upper) ) - assert_array_almost_equal(sort(w_sym_val), - self.w_sym_lin[ind1:ind2+1]) - assert_array_almost_equal(abs(evec_sym_val), - abs(self.evec_sym_lin[:,ind1:ind2+1]) ) - - v_lower = self.w_herm_lin[ind1] - 1.0e-5 - v_upper = self.w_herm_lin[ind2] + 1.0e-5 - w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm, - select='v', select_range=(v_lower, v_upper) ) - assert_array_almost_equal(sort(w_herm_val), - self.w_herm_lin[ind1:ind2+1]) - assert_array_almost_equal(abs(evec_herm_val), - abs(self.evec_herm_lin[:,ind1:ind2+1]) ) - - - def test_dgbtrf(self): - """Compare dgbtrf LU factorisation with the LU factorisation result - of linalg.lu.""" - M,N = shape(self.real_mat) - lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) - - # extract matrix u from lu_symm_band - u = diag(lu_symm_band[2*self.KL,:]) - for i in xrange(self.KL + self.KU): - u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1) - - p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0) - assert_array_almost_equal(u, u_lin) - - - def test_zgbtrf(self): - """Compare zgbtrf LU factorisation with the LU factorisation result - of linalg.lu.""" - M,N = shape(self.comp_mat) - lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) - - # extract matrix u from lu_symm_band - u = diag(lu_symm_band[2*self.KL,:]) - for i in xrange(self.KL + self.KU): - u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1) - - p_lin, l_lin, u_lin =lu(self.comp_mat, permute_l=0) - assert_array_almost_equal(u, u_lin) - - - - def test_dgbtrs(self): - """Compare dgbtrs solutions for linear equation system A*x = b - with solutions of linalg.solve.""" - - lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) - y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv) - - y_lin = linalg.solve(self.real_mat, self.b) - assert_array_almost_equal(y, y_lin) - - def test_zgbtrs(self): - """Compare zgbtrs solutions for linear equation system A*x = b - with solutions of linalg.solve.""" - - lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) - y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv) - - y_lin = linalg.solve(self.comp_mat, self.bc) - assert_array_almost_equal(y, y_lin) - -def test_eigh(): - DIM = 6 - v = {'dim': (DIM, ), - 'dtype': ('f','d','F','D'), - 'overwrite': (True, False), - 'lower': (True, False), - 'turbo': (True, False), - 'eigvals': (None, (2, DIM-2))} - - for dim in v['dim']: - for typ in v['dtype']: - for overwrite in v['overwrite']: - for turbo in v['turbo']: - for eigvals in v['eigvals']: - for lower in v['lower']: - yield (eigenhproblem_standard, - 'ordinary', - dim, typ, overwrite, lower, - turbo, eigvals) - yield (eigenhproblem_general, - 'general ', - dim, typ, overwrite, lower, - turbo, eigvals) - -def _complex_symrand(dim, dtype): - a1, a2 = symrand(dim), symrand(dim) - # add antisymmetric matrix as imag part - a = a1 +1j*(triu(a2)-tril(a2)) - return a.astype(dtype) - -def eigenhproblem_standard(desc, dim, dtype, - overwrite, lower, turbo, - eigvals): - """Solve a standard eigenvalue problem.""" - if iscomplex(empty(1, dtype=dtype)): - a = _complex_symrand(dim, dtype) - else: - a = symrand(dim).astype(dtype) - - if overwrite: - a_c = a.copy() - else: - a_c = a - w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigvals) - assert_dtype_equal(z.dtype, dtype) - w = w.astype(dtype) - diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real - assert_array_almost_equal(diag_, w, DIGITS[dtype]) - -def eigenhproblem_general(desc, dim, dtype, - overwrite, lower, turbo, - eigvals): - """Solve a generalized eigenvalue problem.""" - if iscomplex(empty(1, dtype=dtype)): - a = _complex_symrand(dim, dtype) - b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype) - else: - a = symrand(dim).astype(dtype) - b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype) - - if overwrite: - a_c, b_c = a.copy(), b.copy() - else: - a_c, b_c = a, b - - w, z = eigh(a, b, overwrite_a=overwrite, lower=lower, - overwrite_b=overwrite, turbo=turbo, eigvals=eigvals) - assert_dtype_equal(z.dtype, dtype) - w = w.astype(dtype) - diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real - assert_array_almost_equal(diag1_, w, DIGITS[dtype]) - diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real - assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype]) - -def test_eigh_integer(): - a = array([[1,2],[2,7]]) - b = array([[3,1],[1,5]]) - w,z = eigh(a) - w,z = eigh(a,b) - -class TestLU(TestCase): - - def __init__(self, *args, **kw): - TestCase.__init__(self, *args, **kw) - - self.a = array([[1,2,3],[1,2,3],[2,5,6]]) - self.ca = array([[1,2,3],[1,2,3],[2,5j,6]]) - # Those matrices are more robust to detect problems in permutation - # matrices than the ones above - self.b = array([[1,2,3],[4,5,6],[7,8,9]]) - self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]]) - - # Reectangular matrices - self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]]) - self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]]) - - self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]]) - self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]]) - - # Medium sizes matrices - self.med = rand(30, 40) - self.cmed = rand(30, 40) + 1.j * rand(30, 40) - - def _test_common(self, data): - p,l,u = lu(data) - assert_array_almost_equal(dot(dot(p,l),u),data) - pl,u = lu(data,permute_l=1) - assert_array_almost_equal(dot(pl,u),data) - - # Simple tests - def test_simple(self): - self._test_common(self.a) - - def test_simple_complex(self): - self._test_common(self.ca) - - def test_simple2(self): - self._test_common(self.b) - - def test_simple2_complex(self): - self._test_common(self.cb) - - # rectangular matrices tests - def test_hrectangular(self): - self._test_common(self.hrect) - - def test_vrectangular(self): - self._test_common(self.vrect) - - def test_hrectangular_complex(self): - self._test_common(self.chrect) - - def test_vrectangular_complex(self): - self._test_common(self.cvrect) - - # Bigger matrices - def test_medium1(self): - """Check lu decomposition on medium size, rectangular matrix.""" - self._test_common(self.med) - - def test_medium1_complex(self): - """Check lu decomposition on medium size, rectangular matrix.""" - self._test_common(self.cmed) - - def test_simple_known(self): - # Ticket #1458 - for order in ['C', 'F']: - A = np.array([[2, 1],[0, 1.]], order=order) - LU, P = lu_factor(A) - assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]])) - assert_array_equal(P, np.array([0, 1])) - - -class TestLUSingle(TestLU): - """LU testers for single precision, real and double""" - def __init__(self, *args, **kw): - TestLU.__init__(self, *args, **kw) - - self.a = self.a.astype(float32) - self.ca = self.ca.astype(complex64) - self.b = self.b.astype(float32) - self.cb = self.cb.astype(complex64) - - self.hrect = self.hrect.astype(float32) - self.chrect = self.hrect.astype(complex64) - - self.vrect = self.vrect.astype(float32) - self.cvrect = self.vrect.astype(complex64) - - self.med = self.vrect.astype(float32) - self.cmed = self.vrect.astype(complex64) - -class TestLUSolve(TestCase): - def setUp(self): - seed(1234) - - def test_lu(self): - a0 = random((10,10)) - b = random((10,)) - - for order in ['C', 'F']: - a = np.array(a0, order=order) - - x1 = solve(a,b) - - lu_a = lu_factor(a) - x2 = lu_solve(lu_a,b) - - assert_array_almost_equal(x1,x2) - -class TestSVD(TestCase): - def setUp(self): - seed(1234) - - def test_simple(self): - a = [[1,2,3],[1,20,3],[2,5,6]] - for full_matrices in (True, False): - u,s,vh = svd(a, full_matrices=full_matrices) - assert_array_almost_equal(dot(transpose(u),u),identity(3)) - assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) - sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) - for i in range(len(s)): sigma[i,i] = s[i] - assert_array_almost_equal(dot(dot(u,sigma),vh),a) - - def test_simple_singular(self): - a = [[1,2,3],[1,2,3],[2,5,6]] - for full_matrices in (True, False): - u,s,vh = svd(a, full_matrices=full_matrices) - assert_array_almost_equal(dot(transpose(u),u),identity(3)) - assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) - sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) - for i in range(len(s)): sigma[i,i] = s[i] - assert_array_almost_equal(dot(dot(u,sigma),vh),a) - - def test_simple_underdet(self): - a = [[1,2,3],[4,5,6]] - for full_matrices in (True, False): - u,s,vh = svd(a, full_matrices=full_matrices) - assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0])) - sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) - for i in range(len(s)): sigma[i,i] = s[i] - assert_array_almost_equal(dot(dot(u,sigma),vh),a) - - def test_simple_overdet(self): - a = [[1,2],[4,5],[3,4]] - for full_matrices in (True, False): - u,s,vh = svd(a, full_matrices=full_matrices) - assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1])) - assert_array_almost_equal(dot(transpose(vh),vh),identity(2)) - sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) - for i in range(len(s)): sigma[i,i] = s[i] - assert_array_almost_equal(dot(dot(u,sigma),vh),a) - - def test_random(self): - n = 20 - m = 15 - for i in range(3): - for a in [random([n,m]),random([m,n])]: - for full_matrices in (True, False): - u,s,vh = svd(a, full_matrices=full_matrices) - assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1])) - assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0])) - sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) - for i in range(len(s)): sigma[i,i] = s[i] - assert_array_almost_equal(dot(dot(u,sigma),vh),a) - - def test_simple_complex(self): - a = [[1,2,3],[1,2j,3],[2,5,6]] - for full_matrices in (True, False): - u,s,vh = svd(a, full_matrices=full_matrices) - assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1])) - assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0])) - sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) - for i in range(len(s)): sigma[i,i] = s[i] - assert_array_almost_equal(dot(dot(u,sigma),vh),a) - - def test_random_complex(self): - n = 20 - m = 15 - for i in range(3): - for full_matrices in (True, False): - for a in [random([n,m]),random([m,n])]: - a = a + 1j*random(list(a.shape)) - u,s,vh = svd(a, full_matrices=full_matrices) - assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1])) - # This fails when [m,n] - #assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char)) - sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) - for i in range(len(s)): sigma[i,i] = s[i] - assert_array_almost_equal(dot(dot(u,sigma),vh),a) - -class TestSVDVals(TestCase): - - def test_simple(self): - a = [[1,2,3],[1,2,3],[2,5,6]] - s = svdvals(a) - assert_(len(s) == 3) - assert_(s[0] >= s[1] >= s[2]) - - def test_simple_underdet(self): - a = [[1,2,3],[4,5,6]] - s = svdvals(a) - assert_(len(s) == 2) - assert_(s[0] >= s[1]) - - def test_simple_overdet(self): - a = [[1,2],[4,5],[3,4]] - s = svdvals(a) - assert_(len(s) == 2) - assert_(s[0] >= s[1]) - - def test_simple_complex(self): - a = [[1,2,3],[1,20,3j],[2,5,6]] - s = svdvals(a) - assert_(len(s) == 3) - assert_(s[0] >= s[1] >= s[2]) - - def test_simple_underdet_complex(self): - a = [[1,2,3],[4,5j,6]] - s = svdvals(a) - assert_(len(s) == 2) - assert_(s[0] >= s[1]) - - def test_simple_overdet_complex(self): - a = [[1,2],[4,5],[3j,4]] - s = svdvals(a) - assert_(len(s) == 2) - assert_(s[0] >= s[1]) - -class TestDiagSVD(TestCase): - - def test_simple(self): - assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]]) - - -class TestQR(TestCase): - - def setUp(self): - seed(1234) - - def test_simple(self): - a = [[8,2,3],[2,9,3],[5,3,6]] - q,r = qr(a) - assert_array_almost_equal(dot(transpose(q),q),identity(3)) - assert_array_almost_equal(dot(q,r),a) - - def test_simple_pivoting(self): - a = np.asarray([[8,2,3],[2,9,3],[5,3,6]]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(3)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_simple_trap(self): - a = [[8,2,3],[2,9,3]] - q,r = qr(a) - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(q,r),a) - - def test_simple_trap_pivoting(self): - a = np.asarray([[8,2,3],[2,9,3]]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_simple_tall(self): - # full version - a = [[8,2],[2,9],[5,3]] - q,r = qr(a) - assert_array_almost_equal(dot(transpose(q),q),identity(3)) - assert_array_almost_equal(dot(q,r),a) - - def test_simple_tall_pivoting(self): - # full version pivoting - a = np.asarray([[8,2],[2,9],[5,3]]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(3)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_simple_tall_e(self): - # economy version - a = [[8,2],[2,9],[5,3]] - q,r = qr(a, mode='economic') - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(q,r),a) - assert_equal(q.shape, (3,2)) - assert_equal(r.shape, (2,2)) - - def test_simple_tall_e_pivoting(self): - # economy version pivoting - a = np.asarray([[8,2],[2,9],[5,3]]) - q,r,p = qr(a, pivoting=True, mode='economic') - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p], mode='economic') - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_simple_fat(self): - # full version - a = [[8,2,5],[2,9,3]] - q,r = qr(a) - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(q,r),a) - assert_equal(q.shape, (2,2)) - assert_equal(r.shape, (2,3)) - - def test_simple_fat_pivoting(self): - # full version pivoting - a = np.asarray([[8,2,5],[2,9,3]]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(q,r),a[:,p]) - assert_equal(q.shape, (2,2)) - assert_equal(r.shape, (2,3)) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_simple_fat_e(self): - # economy version - a = [[8,2,3],[2,9,5]] - q,r = qr(a, mode='economic') - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(q,r),a) - assert_equal(q.shape, (2,2)) - assert_equal(r.shape, (2,3)) - - def test_simple_fat_e_pivoting(self): - # economy version pivoting - a = np.asarray([[8,2,3],[2,9,5]]) - q,r,p = qr(a, pivoting=True, mode='economic') - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(q,r),a[:,p]) - assert_equal(q.shape, (2,2)) - assert_equal(r.shape, (2,3)) - q2,r2 = qr(a[:,p], mode='economic') - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_simple_complex(self): - a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] - q,r = qr(a) - assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3)) - assert_array_almost_equal(dot(q,r),a) - - def test_simple_complex_pivoting(self): - a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_random(self): - n = 20 - for k in range(2): - a = random([n,n]) - q,r = qr(a) - assert_array_almost_equal(dot(transpose(q),q),identity(n)) - assert_array_almost_equal(dot(q,r),a) - - def test_random_pivoting(self): - n = 20 - for k in range(2): - a = random([n,n]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(n)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_random_tall(self): - # full version - m = 200 - n = 100 - for k in range(2): - a = random([m,n]) - q,r = qr(a) - assert_array_almost_equal(dot(transpose(q),q),identity(m)) - assert_array_almost_equal(dot(q,r),a) - - def test_random_tall_pivoting(self): - # full version pivoting - m = 200 - n = 100 - for k in range(2): - a = random([m,n]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(m)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_random_tall_e(self): - # economy version - m = 200 - n = 100 - for k in range(2): - a = random([m,n]) - q,r = qr(a, mode='economic') - assert_array_almost_equal(dot(transpose(q),q),identity(n)) - assert_array_almost_equal(dot(q,r),a) - assert_equal(q.shape, (m,n)) - assert_equal(r.shape, (n,n)) - - def test_random_tall_e_pivoting(self): - # economy version pivoting - m = 200 - n = 100 - for k in range(2): - a = random([m,n]) - q,r,p = qr(a, pivoting=True, mode='economic') - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(n)) - assert_array_almost_equal(dot(q,r),a[:,p]) - assert_equal(q.shape, (m,n)) - assert_equal(r.shape, (n,n)) - q2,r2 = qr(a[:,p], mode='economic') - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_random_trap(self): - m = 100 - n = 200 - for k in range(2): - a = random([m,n]) - q,r = qr(a) - assert_array_almost_equal(dot(transpose(q),q),identity(m)) - assert_array_almost_equal(dot(q,r),a) - - def test_random_trap_pivoting(self): - m = 100 - n = 200 - for k in range(2): - a = random([m,n]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(transpose(q),q),identity(m)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - - def test_random_complex(self): - n = 20 - for k in range(2): - a = random([n,n])+1j*random([n,n]) - q,r = qr(a) - assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n)) - assert_array_almost_equal(dot(q,r),a) - - def test_random_complex_pivoting(self): - n = 20 - for k in range(2): - a = random([n,n])+1j*random([n,n]) - q,r,p = qr(a, pivoting=True) - d = abs(diag(r)) - assert_(all(d[1:] <= d[:-1])) - assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n)) - assert_array_almost_equal(dot(q,r),a[:,p]) - q2,r2 = qr(a[:,p]) - assert_array_almost_equal(q,q2) - assert_array_almost_equal(r,r2) - -class TestRQ(TestCase): - - def setUp(self): - seed(1234) - - def test_simple(self): - a = [[8,2,3],[2,9,3],[5,3,6]] - r,q = rq(a) - assert_array_almost_equal(dot(q, transpose(q)),identity(3)) - assert_array_almost_equal(dot(r,q),a) - - def test_r(self): - a = [[8,2,3],[2,9,3],[5,3,6]] - r,q = rq(a) - r2 = rq(a, mode='r') - assert_array_almost_equal(r, r2) - - def test_random(self): - n = 20 - for k in range(2): - a = random([n,n]) - r,q = rq(a) - assert_array_almost_equal(dot(q, transpose(q)),identity(n)) - assert_array_almost_equal(dot(r,q),a) - - def test_simple_trap(self): - a = [[8,2,3],[2,9,3]] - r,q = rq(a) - assert_array_almost_equal(dot(transpose(q),q),identity(3)) - assert_array_almost_equal(dot(r,q),a) - - def test_simple_tall(self): - a = [[8,2],[2,9],[5,3]] - r,q = rq(a) - assert_array_almost_equal(dot(transpose(q),q),identity(2)) - assert_array_almost_equal(dot(r,q),a) - - def test_simple_fat(self): - a = [[8,2,5],[2,9,3]] - r,q = rq(a) - assert_array_almost_equal(dot(transpose(q),q),identity(3)) - assert_array_almost_equal(dot(r,q),a) - - def test_simple_complex(self): - a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] - r,q = rq(a) - assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3)) - assert_array_almost_equal(dot(r,q),a) - - def test_random_tall(self): - m = 200 - n = 100 - for k in range(2): - a = random([m,n]) - r,q = rq(a) - assert_array_almost_equal(dot(q, transpose(q)),identity(n)) - assert_array_almost_equal(dot(r,q),a) - - def test_random_trap(self): - m = 100 - n = 200 - for k in range(2): - a = random([m,n]) - r,q = rq(a) - assert_array_almost_equal(dot(q, transpose(q)),identity(n)) - assert_array_almost_equal(dot(r,q),a) - - def test_random_trap_economic(self): - m = 100 - n = 200 - for k in range(2): - a = random([m,n]) - r,q = rq(a, mode='economic') - assert_array_almost_equal(dot(q,transpose(q)),identity(m)) - assert_array_almost_equal(dot(r,q),a) - assert_equal(q.shape, (m, n)) - assert_equal(r.shape, (m, m)) - - def test_random_complex(self): - n = 20 - for k in range(2): - a = random([n,n])+1j*random([n,n]) - r,q = rq(a) - assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n)) - assert_array_almost_equal(dot(r,q),a) - - def test_random_complex_economic(self): - m = 100 - n = 200 - for k in range(2): - a = random([m,n])+1j*random([m,n]) - r,q = rq(a, mode='economic') - assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m)) - assert_array_almost_equal(dot(r,q),a) - assert_equal(q.shape, (m, n)) - assert_equal(r.shape, (m, m)) - -transp = transpose -any = sometrue - -class TestSchur(TestCase): - - def test_simple(self): - a = [[8,12,3],[2,9,3],[10,3,6]] - t,z = schur(a) - assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a) - tc,zc = schur(a,'complex') - assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc)))) - assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a) - tc2,zc2 = rsf2csf(tc,zc) - assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a) - - def test_sort(self): - a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]] - s,u,sdim = schur(a,sort='lhp') - assert_array_almost_equal([[0.1134,0.5436,0.8316,0.], - [-0.1134,-0.8245,0.5544,0.], - [-0.8213,0.1308,0.0265,-0.5547], - [-0.5475,0.0872,0.0177,0.8321]], - u,3) - assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174], - [0.,-0.5000,9.4472,-0.7184], - [0.,0.,1.4142,-0.1456], - [0.,0.,0.,0.5]], - s,3) - assert_equal(2,sdim) - - s,u,sdim = schur(a,sort='rhp') - assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071], - [-0.4862,0.4930,-0.1434,-0.7071], - [0.6042,0.3944,-0.6924,0.], - [0.4028,0.5986,0.6924,0.]], - u,3) - assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130], - [0.,0.5,6.5809,-3.1870], - [0.,0.,-1.4142,0.9270], - [0.,0.,0.,-0.5]], - s,3) - assert_equal(2,sdim) - - s,u,sdim = schur(a,sort='iuc') - assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042], - [-0.8321,0.,-0.3814,-0.4028], - [0.,0.7071,-0.5134,0.4862], - [0.,0.7071,0.5134,-0.4862]], - u,3) - assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974], - [0.,0.5000,-3.3191,-14.4130], - [0.,0.,1.4142,2.1573], - [0.,0.,0.,-1.4142]], - s,3) - assert_equal(2,sdim) - - s,u,sdim = schur(a,sort='ouc') - assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.], - [-0.4862,0.5134,0.7071,0.], - [0.6042,0.5721,0.,-0.5547], - [0.4028,0.3814,0.,0.8321]], - u,3) - assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974], - [0.,-1.4142,3.3191,6.5809], - [0.,0.,-0.5000,0.], - [0.,0.,0.,0.5000]], - s,3) - assert_equal(2,sdim) - - rhp_function = lambda x: x >= 0.0 - s,u,sdim = schur(a,sort=rhp_function) - assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071], - [-0.4862,0.4930,-0.1434,-0.7071], - [0.6042,0.3944,-0.6924,0.], - [0.4028,0.5986,0.6924,0.]], - u,3) - assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130], - [0.,0.5,6.5809,-3.1870], - [0.,0.,-1.4142,0.9270], - [0.,0.,0.,-0.5]], - s,3) - assert_equal(2,sdim) - - def test_sort_errors(self): - a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]] - assert_raises(ValueError, schur, a, sort='unsupported') - assert_raises(ValueError, schur, a, sort=1) - -class TestHessenberg(TestCase): - - def test_simple(self): - a = [[-149, -50,-154], - [ 537, 180, 546], - [ -27, -9, -25]] - h1 = [[-149.0000,42.2037,-156.3165], - [-537.6783,152.5511,-554.9272], - [0,0.0728, 2.4489]] - h,q = hessenberg(a,calc_q=1) - assert_array_almost_equal(dot(transp(q),dot(a,q)),h) - assert_array_almost_equal(h,h1,decimal=4) - - def test_simple_complex(self): - a = [[-149, -50,-154], - [ 537, 180j, 546], - [ -27j, -9, -25]] - h,q = hessenberg(a,calc_q=1) - h1 = dot(transp(conj(q)),dot(a,q)) - assert_array_almost_equal(h1,h) - - def test_simple2(self): - a = [[1,2,3,4,5,6,7], - [0,2,3,4,6,7,2], - [0,2,2,3,0,3,2], - [0,0,2,8,0,0,2], - [0,3,1,2,0,1,2], - [0,1,2,3,0,1,0], - [0,0,0,0,0,1,2]] - h,q = hessenberg(a,calc_q=1) - assert_array_almost_equal(dot(transp(q),dot(a,q)),h) - - def test_random(self): - n = 20 - for k in range(2): - a = random([n,n]) - h,q = hessenberg(a,calc_q=1) - assert_array_almost_equal(dot(transp(q),dot(a,q)),h) - - def test_random_complex(self): - n = 20 - for k in range(2): - a = random([n,n])+1j*random([n,n]) - h,q = hessenberg(a,calc_q=1) - h1 = dot(transp(conj(q)),dot(a,q)) - assert_array_almost_equal(h1,h) - - - -class TestDatacopied(TestCase): - - def test_datacopied(self): - from scipy.linalg.decomp import _datacopied - - M = matrix([[0,1],[2,3]]) - A = asarray(M) - L = M.tolist() - M2 = M.copy() - - class Fake1: - def __array__(self): - return A - - class Fake2: - __array_interface__ = A.__array_interface__ - - F1 = Fake1() - F2 = Fake2() - - AF1 = asarray(F1) - AF2 = asarray(F2) - - for item, status in [(M, False), (A, False), (L, True), - (M2, False), (F1, False), (F2, False)]: - arr = asarray(item) - assert_equal(_datacopied(arr, item), status, - err_msg=repr(item)) - - -def test_aligned_mem_float(): - """Check linalg works with non-aligned memory""" - # Allocate 402 bytes of memory (allocated on boundary) - a = arange(402, dtype=np.uint8) - - # Create an array with boundary offset 4 - z = np.frombuffer(a.data, offset=2, count=100, dtype=float32) - z.shape = 10, 10 - - eig(z, overwrite_a=True) - eig(z.T, overwrite_a=True) - - -def test_aligned_mem(): - """Check linalg works with non-aligned memory""" - # Allocate 804 bytes of memory (allocated on boundary) - a = arange(804, dtype=np.uint8) - - # Create an array with boundary offset 4 - z = np.frombuffer(a.data, offset=4, count=100, dtype=float) - z.shape = 10, 10 - - eig(z, overwrite_a=True) - eig(z.T, overwrite_a=True) - -def test_aligned_mem_complex(): - """Check that complex objects don't need to be completely aligned""" - # Allocate 1608 bytes of memory (allocated on boundary) - a = zeros(1608, dtype=np.uint8) - - # Create an array with boundary offset 8 - z = np.frombuffer(a.data, offset=8, count=100, dtype=complex) - z.shape = 10, 10 - - eig(z, overwrite_a=True) - # This does not need special handling - eig(z.T, overwrite_a=True) - -def check_lapack_misaligned(func, args, kwargs): - args = list(args) - for i in range(len(args)): - a = args[:] - if isinstance(a[i],np.ndarray): - # Try misaligning a[i] - aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8) - aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype) - aa.shape = a[i].shape - aa[...] = a[i] - a[i] = aa - func(*a,**kwargs) - if len(a[i].shape)>1: - a[i] = a[i].T - func(*a,**kwargs) - - -@dec.knownfailureif(True, "Ticket #1152, triggers a segfault in rare cases.") -def test_lapack_misaligned(): - M = np.eye(10,dtype=float) - R = np.arange(100) - R.shape = 10,10 - S = np.arange(20000,dtype=np.uint8) - S = np.frombuffer(S.data, offset=4, count=100, dtype=np.float) - S.shape = 10, 10 - b = np.ones(10) - v = np.ones(3,dtype=float) - LU, piv = lu_factor(S) - for (func, args, kwargs) in [ - (eig,(S,),dict(overwrite_a=True)), # crash - (eigvals,(S,),dict(overwrite_a=True)), # no crash - (lu,(S,),dict(overwrite_a=True)), # no crash - (lu_factor,(S,),dict(overwrite_a=True)), # no crash - (lu_solve,((LU,piv),b),dict(overwrite_b=True)), - (solve,(S,b),dict(overwrite_a=True,overwrite_b=True)), - (svd,(M,),dict(overwrite_a=True)), # no crash - (svd,(R,),dict(overwrite_a=True)), # no crash - (svd,(S,),dict(overwrite_a=True)), # crash - (svdvals,(S,),dict()), # no crash - (svdvals,(S,),dict(overwrite_a=True)), #crash - (cholesky,(M,),dict(overwrite_a=True)), # no crash - (qr,(S,),dict(overwrite_a=True)), # crash - (rq,(S,),dict(overwrite_a=True)), # crash - (hessenberg,(S,),dict(overwrite_a=True)), # crash - (schur,(S,),dict(overwrite_a=True)), # crash - ]: - yield check_lapack_misaligned, func, args, kwargs -# not properly tested -# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd - - -class TestOverwrite(object): - def test_eig(self): - assert_no_overwrite(eig, [(3,3)]) - assert_no_overwrite(eig, [(3,3), (3,3)]) - def test_eigh(self): - assert_no_overwrite(eigh, [(3,3)]) - assert_no_overwrite(eigh, [(3,3), (3,3)]) - def test_eig_banded(self): - assert_no_overwrite(eig_banded, [(3,2)]) - def test_eigvals(self): - assert_no_overwrite(eigvals, [(3,3)]) - def test_eigvalsh(self): - assert_no_overwrite(eigvalsh, [(3,3)]) - def test_eigvals_banded(self): - assert_no_overwrite(eigvals_banded, [(3,2)]) - def test_hessenberg(self): - assert_no_overwrite(hessenberg, [(3,3)]) - def test_lu_factor(self): - assert_no_overwrite(lu_factor, [(3,3)]) - def test_lu_solve(self): - x = np.array([[1,2,3], [4,5,6], [7,8,8]]) - xlu = lu_factor(x) - assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)]) - def test_lu(self): - assert_no_overwrite(lu, [(3,3)]) - def test_qr(self): - assert_no_overwrite(qr, [(3,3)]) - def test_rq(self): - assert_no_overwrite(rq, [(3,3)]) - def test_schur(self): - assert_no_overwrite(schur, [(3,3)]) - def test_schur_complex(self): - assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)], - dtypes=[np.float32, np.float64]) - def test_svd(self): - assert_no_overwrite(svd, [(3,3)]) - def test_svdvals(self): - assert_no_overwrite(svdvals, [(3,3)]) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/linalg/tests/test_decomp_cholesky.py b/scipy-0.10.1/scipy/linalg/tests/test_decomp_cholesky.py deleted file mode 100644 index 40d388d94d..0000000000 --- a/scipy-0.10.1/scipy/linalg/tests/test_decomp_cholesky.py +++ /dev/null @@ -1,159 +0,0 @@ - - -from numpy.testing import TestCase, assert_array_almost_equal - -from numpy import array, transpose, dot, conjugate, zeros_like -from numpy.random import rand -from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \ - cho_factor, cho_solve - -from scipy.linalg._testutils import assert_no_overwrite - -def random(size): - return rand(*size) - - -class TestCholesky(TestCase): - - def test_simple(self): - a = [[8,2,3],[2,9,3],[3,3,6]] - c = cholesky(a) - assert_array_almost_equal(dot(transpose(c),c),a) - c = transpose(c) - a = dot(c,transpose(c)) - assert_array_almost_equal(cholesky(a,lower=1),c) - - def test_simple_complex(self): - m = array([[3+1j,3+4j,5],[0,2+2j,2+7j],[0,0,7+4j]]) - a = dot(transpose(conjugate(m)),m) - c = cholesky(a) - a1 = dot(transpose(conjugate(c)),c) - assert_array_almost_equal(a,a1) - c = transpose(c) - a = dot(c,transpose(conjugate(c))) - assert_array_almost_equal(cholesky(a,lower=1),c) - - def test_random(self): - n = 20 - for k in range(2): - m = random([n,n]) - for i in range(n): - m[i,i] = 20*(.1+m[i,i]) - a = dot(transpose(m),m) - c = cholesky(a) - a1 = dot(transpose(c),c) - assert_array_almost_equal(a,a1) - c = transpose(c) - a = dot(c,transpose(c)) - assert_array_almost_equal(cholesky(a,lower=1),c) - - def test_random_complex(self): - n = 20 - for k in range(2): - m = random([n,n])+1j*random([n,n]) - for i in range(n): - m[i,i] = 20*(.1+abs(m[i,i])) - a = dot(transpose(conjugate(m)),m) - c = cholesky(a) - a1 = dot(transpose(conjugate(c)),c) - assert_array_almost_equal(a,a1) - c = transpose(c) - a = dot(c,transpose(conjugate(c))) - assert_array_almost_equal(cholesky(a,lower=1),c) - - -class TestCholeskyBanded(TestCase): - """Tests for cholesky_banded() and cho_solve_banded.""" - - def test_upper_real(self): - # Symmetric positive definite banded matrix `a` - a = array([[4.0, 1.0, 0.0, 0.0], - [1.0, 4.0, 0.5, 0.0], - [0.0, 0.5, 4.0, 0.2], - [0.0, 0.0, 0.2, 4.0]]) - # Banded storage form of `a`. - ab = array([[-1.0, 1.0, 0.5, 0.2], - [4.0, 4.0, 4.0, 4.0]]) - c = cholesky_banded(ab, lower=False) - ufac = zeros_like(a) - ufac[range(4),range(4)] = c[-1] - ufac[(0,1,2),(1,2,3)] = c[0,1:] - assert_array_almost_equal(a, dot(ufac.T, ufac)) - - b = array([0.0, 0.5, 4.2, 4.2]) - x = cho_solve_banded((c, False), b) - assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) - - def test_upper_complex(self): - # Hermitian positive definite banded matrix `a` - a = array([[4.0, 1.0, 0.0, 0.0], - [1.0, 4.0, 0.5, 0.0], - [0.0, 0.5, 4.0, -0.2j], - [0.0, 0.0, 0.2j, 4.0]]) - # Banded storage form of `a`. - ab = array([[-1.0, 1.0, 0.5, -0.2j], - [4.0, 4.0, 4.0, 4.0]]) - c = cholesky_banded(ab, lower=False) - ufac = zeros_like(a) - ufac[range(4),range(4)] = c[-1] - ufac[(0,1,2),(1,2,3)] = c[0,1:] - assert_array_almost_equal(a, dot(ufac.conj().T, ufac)) - - b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0]) - x = cho_solve_banded((c, False), b) - assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) - - def test_lower_real(self): - # Symmetric positive definite banded matrix `a` - a = array([[4.0, 1.0, 0.0, 0.0], - [1.0, 4.0, 0.5, 0.0], - [0.0, 0.5, 4.0, 0.2], - [0.0, 0.0, 0.2, 4.0]]) - # Banded storage form of `a`. - ab = array([[4.0, 4.0, 4.0, 4.0], - [1.0, 0.5, 0.2, -1.0]]) - c = cholesky_banded(ab, lower=True) - lfac = zeros_like(a) - lfac[range(4),range(4)] = c[0] - lfac[(1,2,3),(0,1,2)] = c[1,:3] - assert_array_almost_equal(a, dot(lfac, lfac.T)) - - b = array([0.0, 0.5, 4.2, 4.2]) - x = cho_solve_banded((c, True), b) - assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) - - def test_lower_complex(self): - # Hermitian positive definite banded matrix `a` - a = array([[4.0, 1.0, 0.0, 0.0], - [1.0, 4.0, 0.5, 0.0], - [0.0, 0.5, 4.0, -0.2j], - [0.0, 0.0, 0.2j, 4.0]]) - # Banded storage form of `a`. - ab = array([[4.0, 4.0, 4.0, 4.0], - [1.0, 0.5, 0.2j, -1.0]]) - c = cholesky_banded(ab, lower=True) - lfac = zeros_like(a) - lfac[range(4),range(4)] = c[0] - lfac[(1,2,3),(0,1,2)] = c[1,:3] - assert_array_almost_equal(a, dot(lfac, lfac.conj().T)) - - b = array([0.0, 0.5j, 3.8j, 3.8]) - x = cho_solve_banded((c, True), b) - assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0]) - -class TestOverwrite(object): - def test_cholesky(self): - assert_no_overwrite(cholesky, [(3,3)]) - def test_cho_factor(self): - assert_no_overwrite(cho_factor, [(3,3)]) - def test_cho_solve(self): - x = array([[2,-1,0], [-1,2,-1], [0,-1,2]]) - xcho = cho_factor(x) - assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)]) - def test_cholesky_banded(self): - assert_no_overwrite(cholesky_banded, [(2,3)]) - def test_cho_solve_banded(self): - x = array([[0, -1, -1], [2, 2, 2]]) - xcho = cholesky_banded(x) - assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b), - [(3,)]) diff --git a/scipy-0.10.1/scipy/linalg/tests/test_fblas.py b/scipy-0.10.1/scipy/linalg/tests/test_fblas.py deleted file mode 100644 index 3e7173dba6..0000000000 --- a/scipy-0.10.1/scipy/linalg/tests/test_fblas.py +++ /dev/null @@ -1,576 +0,0 @@ -# Test interfaces to fortran blas. -# -# The tests are more of interface than they are of the underlying blas. -# Only very small matrices checked -- N=3 or so. -# -# !! Complex calculations really aren't checked that carefully. -# !! Only real valued complex numbers are used in tests. - -from numpy import float32, float64, complex64, complex128, arange, array, \ - zeros, shape, transpose, newaxis, common_type, conjugate -from scipy.linalg import fblas - -from numpy.testing import TestCase, run_module_suite, assert_array_equal, \ - assert_array_almost_equal, assert_ - - -#decimal accuracy to require between Python and LAPACK/BLAS calculations -accuracy = 5 - -# Since numpy.dot likely uses the same blas, use this routine -# to check. -def matrixmultiply(a, b): - if len(b.shape) == 1: - b_is_vector = True - b = b[:,newaxis] - else: - b_is_vector = False - assert_(a.shape[1] == b.shape[0]) - c = zeros((a.shape[0], b.shape[1]), common_type(a, b)) - for i in xrange(a.shape[0]): - for j in xrange(b.shape[1]): - s = 0 - for k in xrange(a.shape[1]): - s += a[i,k] * b[k, j] - c[i,j] = s - if b_is_vector: - c = c.reshape((a.shape[0],)) - return c - -################################################## -### Test blas ?axpy - -class BaseAxpy(object): - ''' Mixin class for axpy tests ''' - - def test_default_a(self): - x = arange(3.,dtype=self.dtype) - y = arange(3.,dtype=x.dtype) - real_y = x*1.+y - self.blas_func(x,y) - assert_array_equal(real_y,y) - - def test_simple(self): - x = arange(3.,dtype=self.dtype) - y = arange(3.,dtype=x.dtype) - real_y = x*3.+y - self.blas_func(x,y,a=3.) - assert_array_equal(real_y,y) - - def test_x_stride(self): - x = arange(6.,dtype=self.dtype) - y = zeros(3,x.dtype) - y = arange(3.,dtype=x.dtype) - real_y = x[::2]*3.+y - self.blas_func(x,y,a=3.,n=3,incx=2) - assert_array_equal(real_y,y) - - def test_y_stride(self): - x = arange(3.,dtype=self.dtype) - y = zeros(6,x.dtype) - real_y = x*3.+y[::2] - self.blas_func(x,y,a=3.,n=3,incy=2) - assert_array_equal(real_y,y[::2]) - - def test_x_and_y_stride(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - real_y = x[::4]*3.+y[::2] - self.blas_func(x,y,a=3.,n=3,incx=4,incy=2) - assert_array_equal(real_y,y[::2]) - - def test_x_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=4,incx=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - - def test_y_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=3,incy=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - -try: - class TestSaxpy(TestCase, BaseAxpy): - blas_func = fblas.saxpy - dtype = float32 -except AttributeError: - class TestSaxpy: pass - -class TestDaxpy(TestCase, BaseAxpy): - blas_func = fblas.daxpy - dtype = float64 - -try: - class TestCaxpy(TestCase, BaseAxpy): - blas_func = fblas.caxpy - dtype = complex64 -except AttributeError: - class TestCaxpy: pass - -class TestZaxpy(TestCase, BaseAxpy): - blas_func = fblas.zaxpy - dtype = complex128 - - -################################################## -### Test blas ?scal - -class BaseScal(object): - ''' Mixin class for scal testing ''' - - def test_simple(self): - x = arange(3.,dtype=self.dtype) - real_x = x*3. - self.blas_func(3.,x) - assert_array_equal(real_x,x) - - def test_x_stride(self): - x = arange(6.,dtype=self.dtype) - real_x = x.copy() - real_x[::2] = x[::2]*array(3.,self.dtype) - self.blas_func(3.,x,n=3,incx=2) - assert_array_equal(real_x,x) - - def test_x_bad_size(self): - x = arange(12.,dtype=self.dtype) - try: - self.blas_func(2.,x,n=4,incx=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - -try: - class TestSscal(TestCase, BaseScal): - blas_func = fblas.sscal - dtype = float32 -except AttributeError: - class TestSscal: pass - -class TestDscal(TestCase, BaseScal): - blas_func = fblas.dscal - dtype = float64 - -try: - class TestCscal(TestCase, BaseScal): - blas_func = fblas.cscal - dtype = complex64 -except AttributeError: - class TestCscal: pass - -class TestZscal(TestCase, BaseScal): - blas_func = fblas.zscal - dtype = complex128 - - -################################################## -### Test blas ?copy - -class BaseCopy(object): - ''' Mixin class for copy testing ''' - - def test_simple(self): - x = arange(3.,dtype=self.dtype) - y = zeros(shape(x),x.dtype) - self.blas_func(x,y) - assert_array_equal(x,y) - - def test_x_stride(self): - x = arange(6.,dtype=self.dtype) - y = zeros(3,x.dtype) - self.blas_func(x,y,n=3,incx=2) - assert_array_equal(x[::2],y) - - def test_y_stride(self): - x = arange(3.,dtype=self.dtype) - y = zeros(6,x.dtype) - self.blas_func(x,y,n=3,incy=2) - assert_array_equal(x,y[::2]) - - def test_x_and_y_stride(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - self.blas_func(x,y,n=3,incx=4,incy=2) - assert_array_equal(x[::4],y[::2]) - - def test_x_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=4,incx=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - - def test_y_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=3,incy=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - - #def test_y_bad_type(self): - ## Hmmm. Should this work? What should be the output. - # x = arange(3.,dtype=self.dtype) - # y = zeros(shape(x)) - # self.blas_func(x,y) - # assert_array_equal(x,y) - -try: - class TestScopy(TestCase, BaseCopy): - blas_func = fblas.scopy - dtype = float32 -except AttributeError: - class TestScopy: pass - -class TestDcopy(TestCase, BaseCopy): - blas_func = fblas.dcopy - dtype = float64 - -try: - class TestCcopy(TestCase, BaseCopy): - blas_func = fblas.ccopy - dtype = complex64 -except AttributeError: - class TestCcopy: pass - -class TestZcopy(TestCase, BaseCopy): - blas_func = fblas.zcopy - dtype = complex128 - - -################################################## -### Test blas ?swap - -class BaseSwap(object): - ''' Mixin class for swap tests ''' - - def test_simple(self): - x = arange(3.,dtype=self.dtype) - y = zeros(shape(x),x.dtype) - desired_x = y.copy() - desired_y = x.copy() - self.blas_func(x,y) - assert_array_equal(desired_x,x) - assert_array_equal(desired_y,y) - - def test_x_stride(self): - x = arange(6.,dtype=self.dtype) - y = zeros(3,x.dtype) - desired_x = y.copy() - desired_y = x.copy()[::2] - self.blas_func(x,y,n=3,incx=2) - assert_array_equal(desired_x,x[::2]) - assert_array_equal(desired_y,y) - - def test_y_stride(self): - x = arange(3.,dtype=self.dtype) - y = zeros(6,x.dtype) - desired_x = y.copy()[::2] - desired_y = x.copy() - self.blas_func(x,y,n=3,incy=2) - assert_array_equal(desired_x,x) - assert_array_equal(desired_y,y[::2]) - - def test_x_and_y_stride(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - desired_x = y.copy()[::2] - desired_y = x.copy()[::4] - self.blas_func(x,y,n=3,incx=4,incy=2) - assert_array_equal(desired_x,x[::4]) - assert_array_equal(desired_y,y[::2]) - - def test_x_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=4,incx=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - - def test_y_bad_size(self): - x = arange(12.,dtype=self.dtype) - y = zeros(6,x.dtype) - try: - self.blas_func(x,y,n=3,incy=5) - except: # what kind of error should be caught? - return - # should catch error and never get here - assert_(0) - -try: - class TestSswap(TestCase, BaseSwap): - blas_func = fblas.sswap - dtype = float32 -except AttributeError: - class TestSswap: pass - -class TestDswap(TestCase, BaseSwap): - blas_func = fblas.dswap - dtype = float64 - -try: - class TestCswap(TestCase, BaseSwap): - blas_func = fblas.cswap - dtype = complex64 -except AttributeError: - class TestCswap: pass - -class TestZswap(TestCase, BaseSwap): - blas_func = fblas.zswap - dtype = complex128 - -################################################## -### Test blas ?gemv -### This will be a mess to test all cases. - -class BaseGemv(object): - ''' Mixin class for gemv tests ''' - - def get_data(self,x_stride=1,y_stride=1): - mult = array(1, dtype = self.dtype) - if self.dtype in [complex64, complex128]: - mult = array(1+1j, dtype = self.dtype) - from numpy.random import normal, seed - seed(1234) - alpha = array(1., dtype = self.dtype) * mult - beta = array(1.,dtype = self.dtype) * mult - a = normal(0.,1.,(3,3)).astype(self.dtype) * mult - x = arange(shape(a)[0]*x_stride,dtype=self.dtype) * mult - y = arange(shape(a)[1]*y_stride,dtype=self.dtype) * mult - return alpha,beta,a,x,y - - def test_simple(self): - alpha,beta,a,x,y = self.get_data() - desired_y = alpha*matrixmultiply(a,x)+beta*y - y = self.blas_func(alpha,a,x,beta,y) - assert_array_almost_equal(desired_y,y) - - def test_default_beta_y(self): - alpha,beta,a,x,y = self.get_data() - desired_y = matrixmultiply(a,x) - y = self.blas_func(1,a,x) - assert_array_almost_equal(desired_y,y) - - def test_simple_transpose(self): - alpha,beta,a,x,y = self.get_data() - desired_y = alpha*matrixmultiply(transpose(a),x)+beta*y - y = self.blas_func(alpha,a,x,beta,y,trans=1) - assert_array_almost_equal(desired_y,y) - - def test_simple_transpose_conj(self): - alpha,beta,a,x,y = self.get_data() - desired_y = alpha*matrixmultiply(transpose(conjugate(a)),x)+beta*y - y = self.blas_func(alpha,a,x,beta,y,trans=2) - assert_array_almost_equal(desired_y,y) - - def test_x_stride(self): - alpha,beta,a,x,y = self.get_data(x_stride=2) - desired_y = alpha*matrixmultiply(a,x[::2])+beta*y - y = self.blas_func(alpha,a,x,beta,y,incx=2) - assert_array_almost_equal(desired_y,y) - - def test_x_stride_transpose(self): - alpha,beta,a,x,y = self.get_data(x_stride=2) - desired_y = alpha*matrixmultiply(transpose(a),x[::2])+beta*y - y = self.blas_func(alpha,a,x,beta,y,trans=1,incx=2) - assert_array_almost_equal(desired_y, y) - - def test_x_stride_assert(self): - # What is the use of this test? - alpha,beta,a,x,y = self.get_data(x_stride=2) - try: - y = self.blas_func(1,a,x,1,y,trans=0,incx=3) - assert_(0) - except: - pass - try: - y = self.blas_func(1,a,x,1,y,trans=1,incx=3) - assert_(0) - except: - pass - - def test_y_stride(self): - alpha,beta,a,x,y = self.get_data(y_stride=2) - desired_y = y.copy() - desired_y[::2] = alpha*matrixmultiply(a,x)+beta*y[::2] - y = self.blas_func(alpha,a,x,beta,y,incy=2) - assert_array_almost_equal(desired_y,y) - - def test_y_stride_transpose(self): - alpha,beta,a,x,y = self.get_data(y_stride=2) - desired_y = y.copy() - desired_y[::2] = alpha*matrixmultiply(transpose(a),x)+beta*y[::2] - y = self.blas_func(alpha,a,x,beta,y,trans=1,incy=2) - assert_array_almost_equal(desired_y,y) - - def test_y_stride_assert(self): - # What is the use of this test? - alpha,beta,a,x,y = self.get_data(y_stride=2) - try: - y = self.blas_func(1,a,x,1,y,trans=0,incy=3) - assert_(0) - except: - pass - try: - y = self.blas_func(1,a,x,1,y,trans=1,incy=3) - assert_(0) - except: - pass - -try: - class TestSgemv(TestCase, BaseGemv): - blas_func = fblas.sgemv - dtype = float32 -except AttributeError: - class TestSgemv: pass - -class TestDgemv(TestCase, BaseGemv): - blas_func = fblas.dgemv - dtype = float64 - -try: - class TestCgemv(TestCase, BaseGemv): - blas_func = fblas.cgemv - dtype = complex64 -except AttributeError: - class TestCgemv: pass - -class TestZgemv(TestCase, BaseGemv): - blas_func = fblas.zgemv - dtype = complex128 - -""" -################################################## -### Test blas ?ger -### This will be a mess to test all cases. - -class BaseGer(TestCase): - def get_data(self,x_stride=1,y_stride=1): - from numpy.random import normal, seed - seed(1234) - alpha = array(1., dtype = self.dtype) - a = normal(0.,1.,(3,3)).astype(self.dtype) - x = arange(shape(a)[0]*x_stride,dtype=self.dtype) - y = arange(shape(a)[1]*y_stride,dtype=self.dtype) - return alpha,a,x,y - def test_simple(self): - alpha,a,x,y = self.get_data() - # tranpose takes care of Fortran vs. C(and Python) memory layout - desired_a = alpha*transpose(x[:,newaxis]*y) + a - self.blas_func(x,y,a) - assert_array_almost_equal(desired_a,a) - def test_x_stride(self): - alpha,a,x,y = self.get_data(x_stride=2) - desired_a = alpha*transpose(x[::2,newaxis]*y) + a - self.blas_func(x,y,a,incx=2) - assert_array_almost_equal(desired_a,a) - def test_x_stride_assert(self): - alpha,a,x,y = self.get_data(x_stride=2) - try: - self.blas_func(x,y,a,incx=3) - assert(0) - except: - pass - def test_y_stride(self): - alpha,a,x,y = self.get_data(y_stride=2) - desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a - self.blas_func(x,y,a,incy=2) - assert_array_almost_equal(desired_a,a) - - def test_y_stride_assert(self): - alpha,a,x,y = self.get_data(y_stride=2) - try: - self.blas_func(a,x,y,incy=3) - assert(0) - except: - pass - -class TestSger(BaseGer): - blas_func = fblas.sger - dtype = float32 -class TestDger(BaseGer): - blas_func = fblas.dger - dtype = float64 -""" -################################################## -### Test blas ?gerc -### This will be a mess to test all cases. - -""" -class BaseGerComplex(BaseGer): - def get_data(self,x_stride=1,y_stride=1): - from numpy.random import normal, seed - seed(1234) - alpha = array(1+1j, dtype = self.dtype) - a = normal(0.,1.,(3,3)).astype(self.dtype) - a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype) - x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype) - x = x + x * array(1j, dtype = self.dtype) - y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype) - y = y + y * array(1j, dtype = self.dtype) - return alpha,a,x,y - def test_simple(self): - alpha,a,x,y = self.get_data() - # tranpose takes care of Fortran vs. C(and Python) memory layout - a = a * array(0.,dtype = self.dtype) - #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a - desired_a = alpha*transpose(x[:,newaxis]*y) + a - #self.blas_func(x,y,a,alpha = alpha) - fblas.cgeru(x,y,a,alpha = alpha) - assert_array_almost_equal(desired_a,a) - - #def test_x_stride(self): - # alpha,a,x,y = self.get_data(x_stride=2) - # desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a - # self.blas_func(x,y,a,incx=2) - # assert_array_almost_equal(desired_a,a) - #def test_y_stride(self): - # alpha,a,x,y = self.get_data(y_stride=2) - # desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a - # self.blas_func(x,y,a,incy=2) - # assert_array_almost_equal(desired_a,a) - -class TestCgeru(BaseGerComplex): - blas_func = fblas.cgeru - dtype = complex64 - def transform(self,x): - return x -class TestZgeru(BaseGerComplex): - blas_func = fblas.zgeru - dtype = complex128 - def transform(self,x): - return x - -class TestCgerc(BaseGerComplex): - blas_func = fblas.cgerc - dtype = complex64 - def transform(self,x): - return conjugate(x) - -class TestZgerc(BaseGerComplex): - blas_func = fblas.zgerc - dtype = complex128 - def transform(self,x): - return conjugate(x) -""" - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/linalg/tests/test_lapack.py b/scipy-0.10.1/scipy/linalg/tests/test_lapack.py deleted file mode 100644 index 6751232eb2..0000000000 --- a/scipy-0.10.1/scipy/linalg/tests/test_lapack.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python -# -# Created by: Pearu Peterson, September 2002 -# - -from numpy.testing import TestCase, run_module_suite, assert_equal, \ - assert_array_almost_equal, assert_ -from numpy import ones - -from scipy.linalg import flapack, clapack - - -class TestFlapackSimple(TestCase): - - def test_gebal(self): - a = [[1,2,3],[4,5,6],[7,8,9]] - a1 = [[1,0,0,3e-4], - [4,0,0,2e-3], - [7,1,0,0], - [0,1,0,0]] - for p in 'sdzc': - f = getattr(flapack,p+'gebal',None) - if f is None: continue - ba,lo,hi,pivscale,info = f(a) - assert_(not info,`info`) - assert_array_almost_equal(ba,a) - assert_equal((lo,hi),(0,len(a[0])-1)) - assert_array_almost_equal(pivscale,ones(len(a))) - - ba,lo,hi,pivscale,info = f(a1,permute=1,scale=1) - assert_(not info,`info`) - #print a1 - #print ba,lo,hi,pivscale - - def test_gehrd(self): - a = [[-149, -50,-154], - [ 537, 180, 546], - [ -27, -9, -25]] - for p in 'd': - f = getattr(flapack,p+'gehrd',None) - if f is None: continue - ht,tau,info = f(a) - assert_(not info,`info`) - -class TestLapack(TestCase): - - def test_flapack(self): - if hasattr(flapack,'empty_module'): - #flapack module is empty - pass - - def test_clapack(self): - if hasattr(clapack,'empty_module'): - #clapack module is empty - pass - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/linalg/tests/test_matfuncs.py b/scipy-0.10.1/scipy/linalg/tests/test_matfuncs.py deleted file mode 100644 index d665254259..0000000000 --- a/scipy-0.10.1/scipy/linalg/tests/test_matfuncs.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python -# -# Created by: Pearu Peterson, March 2002 -# -""" Test functions for linalg.matfuncs module - -""" - -from numpy import array, identity, dot, sqrt -from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal - -from scipy.linalg import signm, logm, sqrtm, expm, expm2, expm3 - - -class TestSignM(TestCase): - - def test_nils(self): - a = array([[ 29.2, -24.2, 69.5, 49.8, 7. ], - [ -9.2, 5.2, -18. , -16.8, -2. ], - [-10. , 6. , -20. , -18. , -2. ], - [ -9.6, 9.6, -25.5, -15.4, -2. ], - [ 9.8, -4.8, 18. , 18.2, 2. ]]) - cr = array([[ 11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333], - [ -3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667], - [ -4.08,0.56,-4.92,-7.6 ,0.56], - [ -4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667], - [4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]]) - r = signm(a) - assert_array_almost_equal(r,cr) - - def test_defective1(self): - a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]]) - r = signm(a, disp=False) - #XXX: what would be the correct result? - - def test_defective2(self): - a = array(( - [29.2,-24.2,69.5,49.8,7.0], - [-9.2,5.2,-18.0,-16.8,-2.0], - [-10.0,6.0,-20.0,-18.0,-2.0], - [-9.6,9.6,-25.5,-15.4,-2.0], - [9.8,-4.8,18.0,18.2,2.0])) - r = signm(a, disp=False) - #XXX: what would be the correct result? - - def test_defective3(self): - a = array([[ -2., 25., 0., 0., 0., 0., 0.], - [ 0., -3., 10., 3., 3., 3., 0.], - [ 0., 0., 2., 15., 3., 3., 0.], - [ 0., 0., 0., 0., 15., 3., 0.], - [ 0., 0., 0., 0., 3., 10., 0.], - [ 0., 0., 0., 0., 0., -2., 25.], - [ 0., 0., 0., 0., 0., 0., -3.]]) - r = signm(a, disp=False) - #XXX: what would be the correct result? - -class TestLogM(TestCase): - - def test_nils(self): - a = array([[ -2., 25., 0., 0., 0., 0., 0.], - [ 0., -3., 10., 3., 3., 3., 0.], - [ 0., 0., 2., 15., 3., 3., 0.], - [ 0., 0., 0., 0., 15., 3., 0.], - [ 0., 0., 0., 0., 3., 10., 0.], - [ 0., 0., 0., 0., 0., -2., 25.], - [ 0., 0., 0., 0., 0., 0., -3.]]) - m = (identity(7)*3.1+0j)-a - logm(m, disp=False) - #XXX: what would be the correct result? - - -class TestSqrtM(TestCase): - def test_bad(self): - # See http://www.maths.man.ac.uk/~nareports/narep336.ps.gz - e = 2**-5 - se = sqrt(e) - a = array([[1.0,0,0,1], - [0,e,0,0], - [0,0,e,0], - [0,0,0,1]]) - sa = array([[1,0,0,0.5], - [0,se,0,0], - [0,0,se,0], - [0,0,0,1]]) - assert_array_almost_equal(dot(sa,sa),a) - esa = sqrtm(a, disp=False)[0] - assert_array_almost_equal(dot(esa,esa),a) - -class TestExpM(TestCase): - def test_zero(self): - a = array([[0.,0],[0,0]]) - assert_array_almost_equal(expm(a),[[1,0],[0,1]]) - assert_array_almost_equal(expm2(a),[[1,0],[0,1]]) - assert_array_almost_equal(expm3(a),[[1,0],[0,1]]) - - def test_consistency(self): - a = array([[0.,1],[-1,0]]) - assert_array_almost_equal(expm(a), expm2(a)) - assert_array_almost_equal(expm(a), expm3(a)) - - a = array([[1j,1],[-1,-2j]]) - assert_array_almost_equal(expm(a), expm2(a)) - assert_array_almost_equal(expm(a), expm3(a)) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/linalg/tests/test_special_matrices.py b/scipy-0.10.1/scipy/linalg/tests/test_special_matrices.py deleted file mode 100644 index 4d61ac5577..0000000000 --- a/scipy-0.10.1/scipy/linalg/tests/test_special_matrices.py +++ /dev/null @@ -1,441 +0,0 @@ -"""Tests for functions in special_matrices.py.""" - -from numpy import arange, add, array, eye, copy -from numpy.testing import TestCase, run_module_suite, assert_raises, \ - assert_equal, assert_array_equal, assert_array_almost_equal, \ - assert_allclose - -from scipy.linalg import toeplitz, hankel, circulant, hadamard, leslie, \ - companion, tri, triu, tril, kron, block_diag, \ - hilbert, invhilbert -from numpy.linalg import cond - - -def get_mat(n): - data = arange(n) - data = add.outer(data,data) - return data - - -class TestTri(TestCase): - def test_basic(self): - assert_equal(tri(4),array([[1,0,0,0], - [1,1,0,0], - [1,1,1,0], - [1,1,1,1]])) - assert_equal(tri(4,dtype='f'),array([[1,0,0,0], - [1,1,0,0], - [1,1,1,0], - [1,1,1,1]],'f')) - def test_diag(self): - assert_equal(tri(4,k=1),array([[1,1,0,0], - [1,1,1,0], - [1,1,1,1], - [1,1,1,1]])) - assert_equal(tri(4,k=-1),array([[0,0,0,0], - [1,0,0,0], - [1,1,0,0], - [1,1,1,0]])) - def test_2d(self): - assert_equal(tri(4,3),array([[1,0,0], - [1,1,0], - [1,1,1], - [1,1,1]])) - assert_equal(tri(3,4),array([[1,0,0,0], - [1,1,0,0], - [1,1,1,0]])) - def test_diag2d(self): - assert_equal(tri(3,4,k=2),array([[1,1,1,0], - [1,1,1,1], - [1,1,1,1]])) - assert_equal(tri(4,3,k=-2),array([[0,0,0], - [0,0,0], - [1,0,0], - [1,1,0]])) - -class TestTril(TestCase): - def test_basic(self): - a = (100*get_mat(5)).astype('l') - b = a.copy() - for k in range(5): - for l in range(k+1,5): - b[k,l] = 0 - assert_equal(tril(a),b) - - def test_diag(self): - a = (100*get_mat(5)).astype('f') - b = a.copy() - for k in range(5): - for l in range(k+3,5): - b[k,l] = 0 - assert_equal(tril(a,k=2),b) - b = a.copy() - for k in range(5): - for l in range(max((k-1,0)),5): - b[k,l] = 0 - assert_equal(tril(a,k=-2),b) - - -class TestTriu(TestCase): - def test_basic(self): - a = (100*get_mat(5)).astype('l') - b = a.copy() - for k in range(5): - for l in range(k+1,5): - b[l,k] = 0 - assert_equal(triu(a),b) - - def test_diag(self): - a = (100*get_mat(5)).astype('f') - b = a.copy() - for k in range(5): - for l in range(max((k-1,0)),5): - b[l,k] = 0 - assert_equal(triu(a,k=2),b) - b = a.copy() - for k in range(5): - for l in range(k+3,5): - b[l,k] = 0 - assert_equal(triu(a,k=-2),b) - - -class TestToeplitz(TestCase): - - def test_basic(self): - y = toeplitz([1,2,3]) - assert_array_equal(y,[[1,2,3],[2,1,2],[3,2,1]]) - y = toeplitz([1,2,3],[1,4,5]) - assert_array_equal(y,[[1,4,5],[2,1,4],[3,2,1]]) - - def test_complex_01(self): - data = (1.0 + arange(3.0)) * (1.0 + 1.0j) - x = copy(data) - t = toeplitz(x) - # Calling toeplitz should not change x. - assert_array_equal(x, data) - # According to the docstring, x should be the first column of t. - col0 = t[:,0] - assert_array_equal(col0, data) - assert_array_equal(t[0,1:], data[1:].conj()) - - def test_scalar_00(self): - """Scalar arguments still produce a 2D array.""" - t = toeplitz(10) - assert_array_equal(t, [[10]]) - t = toeplitz(10, 20) - assert_array_equal(t, [[10]]) - - def test_scalar_01(self): - c = array([1,2,3]) - t = toeplitz(c, 1) - assert_array_equal(t, [[1],[2],[3]]) - - def test_scalar_02(self): - c = array([1,2,3]) - t = toeplitz(c, array(1)) - assert_array_equal(t, [[1],[2],[3]]) - - def test_scalar_03(self): - c = array([1,2,3]) - t = toeplitz(c, array([1])) - assert_array_equal(t, [[1],[2],[3]]) - - def test_scalar_04(self): - r = array([10,2,3]) - t = toeplitz(1, r) - assert_array_equal(t, [[1,2,3]]) - - -class TestHankel(TestCase): - def test_basic(self): - y = hankel([1,2,3]) - assert_array_equal(y, [[1,2,3], [2,3,0], [3,0,0]]) - y = hankel([1,2,3], [3,4,5]) - assert_array_equal(y, [[1,2,3], [2,3,4], [3,4,5]]) - - -class TestCirculant(TestCase): - def test_basic(self): - y = circulant([1,2,3]) - assert_array_equal(y, [[1,3,2], [2,1,3], [3,2,1]]) - - -class TestHadamard(TestCase): - - def test_basic(self): - - y = hadamard(1) - assert_array_equal(y, [[1]]) - - y = hadamard(2, dtype=float) - assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]]) - - y = hadamard(4) - assert_array_equal(y, [[1,1,1,1], [1,-1,1,-1], [1,1,-1,-1], [1,-1,-1,1]]) - - assert_raises(ValueError, hadamard, 0) - assert_raises(ValueError, hadamard, 5) - - -class TestLeslie(TestCase): - - def test_bad_shapes(self): - assert_raises(ValueError, leslie, [[1,1],[2,2]], [3,4,5]) - assert_raises(ValueError, leslie, [3,4,5], [[1,1],[2,2]]) - assert_raises(ValueError, leslie, [1,2], [1,2]) - assert_raises(ValueError, leslie, [1], []) - - def test_basic(self): - a = leslie([1, 2, 3], [0.25, 0.5]) - expected = array([ - [1.0, 2.0, 3.0], - [0.25, 0.0, 0.0], - [0.0, 0.5, 0.0]]) - assert_array_equal(a, expected) - - -class TestCompanion(TestCase): - - def test_bad_shapes(self): - assert_raises(ValueError, companion, [[1,1],[2,2]]) - assert_raises(ValueError, companion, [0,4,5]) - assert_raises(ValueError, companion, [1]) - assert_raises(ValueError, companion, []) - - def test_basic(self): - c = companion([1, 2, 3]) - expected = array([ - [-2.0, -3.0], - [ 1.0, 0.0]]) - assert_array_equal(c, expected) - - c = companion([2.0, 5.0, -10.0]) - expected = array([ - [-2.5, 5.0], - [ 1.0, 0.0]]) - assert_array_equal(c, expected) - - -class TestBlockDiag: - def test_basic(self): - x = block_diag(eye(2), [[1,2], [3,4], [5,6]], [[1, 2, 3]]) - assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0], - [0, 0, 1, 2, 0, 0, 0], - [0, 0, 3, 4, 0, 0, 0], - [0, 0, 5, 6, 0, 0, 0], - [0, 0, 0, 0, 1, 2, 3]]) - - def test_dtype(self): - x = block_diag([[1.5]]) - assert_equal(x.dtype, float) - - x = block_diag([[True]]) - assert_equal(x.dtype, bool) - - def test_scalar_and_1d_args(self): - a = block_diag(1) - assert_equal(a.shape, (1,1)) - assert_array_equal(a, [[1]]) - - a = block_diag([2,3], 4) - assert_array_equal(a, [[2, 3, 0], [0, 0, 4]]) - - def test_bad_arg(self): - assert_raises(ValueError, block_diag, [[[1]]]) - - def test_no_args(self): - a = block_diag() - assert_equal(a.ndim, 2) - assert_equal(a.nbytes, 0) - - -class TestKron: - - def test_basic(self): - - a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]])) - assert_array_equal(a, array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]])) - - m1 = array([[1, 2], [3, 4]]) - m2 = array([[10], [11]]) - a = kron(m1, m2) - expected = array([[ 10, 20 ], - [ 11, 22 ], - [ 30, 40 ], - [ 33, 44 ]]) - assert_array_equal(a, expected) - - -class TestHilbert(TestCase): - - def test_basic(self): - h3 = array([[1.0, 1/2., 1/3.], - [1/2., 1/3., 1/4.], - [1/3., 1/4., 1/5.]]) - assert_array_almost_equal(hilbert(3), h3) - - assert_array_equal(hilbert(1), [[1.0]]) - - h0 = hilbert(0) - assert_equal(h0.shape, (0,0)) - - -class TestInvHilbert(TestCase): - - def test_basic(self): - invh1 = array([[1]]) - assert_array_equal(invhilbert(1, exact=True), invh1) - assert_array_equal(invhilbert(1), invh1) - - invh2 = array([[ 4, -6], - [-6, 12]]) - assert_array_equal(invhilbert(2, exact=True), invh2) - assert_array_almost_equal(invhilbert(2), invh2) - - invh3 = array([[ 9, -36, 30], - [-36, 192, -180], - [30, -180, 180]]) - assert_array_equal(invhilbert(3, exact=True), invh3) - assert_array_almost_equal(invhilbert(3), invh3) - - invh4 = array([[ 16, -120, 240, -140], - [-120, 1200, -2700, 1680], - [ 240, -2700, 6480, -4200], - [-140, 1680, -4200, 2800]]) - assert_array_equal(invhilbert(4, exact=True), invh4) - assert_array_almost_equal(invhilbert(4), invh4) - - invh5 = array([[ 25, -300, 1050, -1400, 630], - [ -300, 4800, -18900, 26880, -12600], - [ 1050, -18900, 79380, -117600, 56700], - [-1400, 26880, -117600, 179200, -88200], - [ 630, -12600, 56700, -88200, 44100]]) - assert_array_equal(invhilbert(5, exact=True), invh5) - assert_array_almost_equal(invhilbert(5), invh5) - - invh17 = array([ - [289, -41616, 1976760, -46124400, 629598060, -5540462928, - 33374693352, -143034400080, 446982500250, -1033026222800, - 1774926873720, -2258997839280, 2099709530100, -1384423866000, - 613101997800, -163493866080, 19835652870], - [-41616, 7990272, -426980160, 10627061760, -151103534400, 1367702848512, - -8410422724704, 36616806420480, -115857864064800, 270465047424000, - -468580694662080, 600545887119360, -561522320049600, 372133135180800, - -165537539406000, 44316454993920, -5395297580640], - [1976760, -426980160, 24337869120, -630981792000, 9228108708000, - -85267724461920, 532660105897920, -2348052711713280, 7504429831470000, - -17664748409880000, 30818191841236800, -39732544853164800, - 37341234283298400, -24857330514030000, 11100752642520000, - -2982128117299200, 364182586693200], - [-46124400, 10627061760, -630981792000, 16826181120000, - -251209625940000, 2358021022156800, -14914482965141760, - 66409571644416000, -214015221119700000, 507295338950400000, - -890303319857952000, 1153715376477081600, -1089119333262870000, - 727848632044800000, -326170262829600000, 87894302404608000, - -10763618673376800], - [629598060, -151103534400, 9228108708000, - -251209625940000, 3810012660090000, -36210360321495360, - 231343968720664800, -1038687206500944000, 3370739732635275000, - -8037460526495400000, 14178080368737885600, -18454939322943942000, - 17489975175339030000, -11728977435138600000, 5272370630081100000, - -1424711708039692800, 174908803442373000], - [-5540462928, 1367702848512, -85267724461920, 2358021022156800, - -36210360321495360, 347619459086355456, -2239409617216035264, - 10124803292907663360, -33052510749726468000, 79217210949138662400, - -140362995650505067440, 183420385176741672960, -174433352415381259200, - 117339159519533952000, -52892422160973595200, 14328529177999196160, - -1763080738699119840], - [33374693352, -8410422724704, 532660105897920, - -14914482965141760, 231343968720664800, -2239409617216035264, - 14527452132196331328, -66072377044391477760, 216799987176909536400, - -521925895055522958000, 928414062734059661760, -1217424500995626443520, - 1161358898976091015200, -783401860847777371200, 354015418167362952000, - -96120549902411274240, 11851820521255194480], - [-143034400080, 36616806420480, -2348052711713280, 66409571644416000, - -1038687206500944000, 10124803292907663360, -66072377044391477760, - 302045152202932469760, -995510145200094810000, 2405996923185123840000, - -4294704507885446054400, 5649058909023744614400, - -5403874060541811254400, 3654352703663101440000, - -1655137020003255360000, 450325202737117593600, -55630994283442749600], - [446982500250, -115857864064800, 7504429831470000, -214015221119700000, - 3370739732635275000, -33052510749726468000, 216799987176909536400, - -995510145200094810000, 3293967392206196062500, - -7988661659013106500000, 14303908928401362270000, - -18866974090684772052000, 18093328327706957325000, - -12263364009096700500000, 5565847995255512250000, - -1517208935002984080000, 187754605706619279900], - [-1033026222800, 270465047424000, -17664748409880000, - 507295338950400000, -8037460526495400000, 79217210949138662400, - -521925895055522958000, 2405996923185123840000, - -7988661659013106500000, 19434404971634224000000, - -34894474126569249192000, 46141453390504792320000, - -44349976506971935800000, 30121928988527376000000, - -13697025107665828500000, 3740200989399948902400, - -463591619028689580000], - [1774926873720, -468580694662080, - 30818191841236800, -890303319857952000, 14178080368737885600, - -140362995650505067440, 928414062734059661760, -4294704507885446054400, - 14303908928401362270000, -34894474126569249192000, - 62810053427824648545600, -83243376594051600326400, - 80177044485212743068000, -54558343880470209780000, - 24851882355348879230400, -6797096028813368678400, 843736746632215035600], - [-2258997839280, 600545887119360, -39732544853164800, - 1153715376477081600, -18454939322943942000, 183420385176741672960, - -1217424500995626443520, 5649058909023744614400, - -18866974090684772052000, 46141453390504792320000, - -83243376594051600326400, 110552468520163390156800, - -106681852579497947388000, 72720410752415168870400, - -33177973900974346080000, 9087761081682520473600, - -1129631016152221783200], - [2099709530100, -561522320049600, 37341234283298400, - -1089119333262870000, 17489975175339030000, -174433352415381259200, - 1161358898976091015200, -5403874060541811254400, - 18093328327706957325000, -44349976506971935800000, - 80177044485212743068000, -106681852579497947388000, - 103125790826848015808400, -70409051543137015800000, - 32171029219823375700000, -8824053728865840192000, - 1098252376814660067000], - [-1384423866000, 372133135180800, - -24857330514030000, 727848632044800000, -11728977435138600000, - 117339159519533952000, -783401860847777371200, 3654352703663101440000, - -12263364009096700500000, 30121928988527376000000, - -54558343880470209780000, 72720410752415168870400, - -70409051543137015800000, 48142941226076592000000, - -22027500987368499000000, 6049545098753157120000, - -753830033789944188000], - [613101997800, -165537539406000, - 11100752642520000, -326170262829600000, 5272370630081100000, - -52892422160973595200, 354015418167362952000, -1655137020003255360000, - 5565847995255512250000, -13697025107665828500000, - 24851882355348879230400, -33177973900974346080000, - 32171029219823375700000, -22027500987368499000000, - 10091416708498869000000, -2774765838662800128000, 346146444087219270000], - [-163493866080, 44316454993920, -2982128117299200, 87894302404608000, - -1424711708039692800, 14328529177999196160, -96120549902411274240, - 450325202737117593600, -1517208935002984080000, 3740200989399948902400, - -6797096028813368678400, 9087761081682520473600, - -8824053728865840192000, 6049545098753157120000, - -2774765838662800128000, 763806510427609497600, -95382575704033754400], - [19835652870, -5395297580640, 364182586693200, -10763618673376800, - 174908803442373000, -1763080738699119840, 11851820521255194480, - -55630994283442749600, 187754605706619279900, -463591619028689580000, - 843736746632215035600, -1129631016152221783200, 1098252376814660067000, - -753830033789944188000, 346146444087219270000, -95382575704033754400, - 11922821963004219300] - ]) - assert_array_equal(invhilbert(17, exact=True), invh17) - assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12) - - def test_inverse(self): - for n in xrange(1, 10): - a = hilbert(n) - b = invhilbert(n) - # The Hilbert matrix is increasingly badly conditioned, - # so take that into account in the test - c = cond(a) - assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/maxentropy/__init__.py b/scipy-0.10.1/scipy/maxentropy/__init__.py deleted file mode 100644 index 2062c81ea4..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from info import __doc__ -from maxentropy import * -from scipy.misc import logsumexp - -from numpy.testing import Tester -test = Tester().test - -import warnings -warnings.warn(""" -The scipy.maxentropy module is deprecated in scipy 0.10, and scheduled to be -removed in 0.11. - -If you are using some of the functionality in this module and are of the -opinion that it should be kept or moved somewhere - or you are even interested -to maintain/improve this whole module - please ask on the scipy-dev mailing -list. - -The logsumexp function has already been moved to scipy.misc.""", -DeprecationWarning) diff --git a/scipy-0.10.1/scipy/maxentropy/examples/bergerexample.py b/scipy-0.10.1/scipy/maxentropy/examples/bergerexample.py deleted file mode 100644 index 4a241960aa..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/examples/bergerexample.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python - -""" Example use of the maximum entropy module: - - Machine translation example -- English to French -- from the paper 'A - maximum entropy approach to natural language processing' by Berger et - al., 1996. - - Consider the translation of the English word 'in' into French. We - notice in a corpus of parallel texts the following facts: - - (1) p(dans) + p(en) + p(a) + p(au cours de) + p(pendant) = 1 - (2) p(dans) + p(en) = 3/10 - (3) p(dans) + p(a) = 1/2 - - This code finds the probability distribution with maximal entropy - subject to these constraints. -""" - -__author__ = 'Ed Schofield' -__version__= '2.1' - -from scipy import maxentropy - -a_grave = u'\u00e0' - -samplespace = ['dans', 'en', a_grave, 'au cours de', 'pendant'] - -def f0(x): - return x in samplespace - -def f1(x): - return x=='dans' or x=='en' - -def f2(x): - return x=='dans' or x==a_grave - -f = [f0, f1, f2] - -model = maxentropy.model(f, samplespace) - -# Now set the desired feature expectations -K = [1.0, 0.3, 0.5] - -model.verbose = True - -# Fit the model -model.fit(K) - -# Output the distribution -print "\nFitted model parameters are:\n" + str(model.params) -print "\nFitted distribution is:" -p = model.probdist() -for j in range(len(model.samplespace)): - x = model.samplespace[j] - print ("\tx = %-15s" %(x + ":",) + " p(x) = "+str(p[j])).encode('utf-8') - - -# Now show how well the constraints are satisfied: -print -print "Desired constraints:" -print "\tp['dans'] + p['en'] = 0.3" -print ("\tp['dans'] + p['" + a_grave + "'] = 0.5").encode('utf-8') -print -print "Actual expectations under the fitted model:" -print "\tp['dans'] + p['en'] =", p[0] + p[1] -print ("\tp['dans'] + p['" + a_grave + "'] = " + str(p[0]+p[2])).encode('utf-8') -# (Or substitute "x.encode('latin-1')" if you have a primitive terminal.) diff --git a/scipy-0.10.1/scipy/maxentropy/examples/bergerexamplesimulated.py b/scipy-0.10.1/scipy/maxentropy/examples/bergerexamplesimulated.py deleted file mode 100644 index 9fe85d6a3c..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/examples/bergerexamplesimulated.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python - -""" Example use of the maximum entropy module fit a model using - simulation: - - Machine translation example -- English to French -- from the paper 'A - maximum entropy approach to natural language processing' by Berger et - al., 1996. - - Consider the translation of the English word 'in' into French. We - notice in a corpus of parallel texts the following facts: - - (1) p(dans) + p(en) + p(a) + p(au cours de) + p(pendant) = 1 - (2) p(dans) + p(en) = 3/10 - (3) p(dans) + p(a) = 1/2 - - This code finds the probability distribution with maximal entropy - subject to these constraints. - - This problem is small enough to solve analytically, but this code - shows the steps one would take to fit a model on a continuous or - large discrete sample space. -""" - -__author__ = 'Ed Schofield' -__version__ = '2.1' - - -import sys -from scipy import maxentropy -from scipy.sandbox import montecarlo - -try: - algorithm = sys.argv[1] -except IndexError: - algorithm = 'CG' -else: - assert algorithm in ['CG', 'BFGS', 'LBFGSB', 'Powell', 'Nelder-Mead'] - -a_grave = u'\u00e0' - -samplespace = ['dans', 'en', a_grave, 'au cours de', 'pendant'] - -def f0(x): - return x in samplespace - -def f1(x): - return x == 'dans' or x == 'en' - -def f2(x): - return x == 'dans' or x == a_grave - -f = [f0, f1, f2] - -model = maxentropy.bigmodel() - -# Now set the desired feature expectations -K = [1.0, 0.3, 0.5] - -# Define a uniform instrumental distribution for sampling -samplefreq = {} -for e in samplespace: - samplefreq[e] = 1 - -sampler = montecarlo.dictsampler(samplefreq) - -n = 10**4 -m = 3 - -# Now create a generator of features of random points - -SPARSEFORMAT = 'csc_matrix' -# Could also specify 'csr_matrix', 'dok_matrix', or (PySparse's) 'll_mat' - -def sampleFgen(sampler, f, n): - while True: - xs, logprobs = sampler.sample(n, return_probs=2) - F = maxentropy.sparsefeaturematrix(f, xs, SPARSEFORMAT) - yield F, logprobs - -print "Generating an initial sample ..." -model.setsampleFgen(sampleFgen(sampler, f, n)) - -model.verbose = True - -# Fit the model -model.avegtol = 1e-4 -model.fit(K, algorithm=algorithm) - -# Output the true distribution -print "\nFitted model parameters are:\n" + str(model.params) -smallmodel = maxentropy.model(f, samplespace) -smallmodel.setparams(model.params) -print "\nFitted distribution is:" -p = smallmodel.probdist() -for j in range(len(smallmodel.samplespace)): - x = smallmodel.samplespace[j] - print ("\tx = %-15s" %(x + ":",) + " p(x) = "+str(p[j])).encode('utf-8') - - -# Now show how well the constraints are satisfied: -print -print "Desired constraints:" -print "\tp['dans'] + p['en'] = 0.3" -print ("\tp['dans'] + p['" + a_grave + "'] = 0.5").encode('utf-8') -print -print "Actual expectations under the fitted model:" -print "\tp['dans'] + p['en'] =", p[0] + p[1] -print ("\tp['dans'] + p['" + a_grave + "'] = " + \ - str(p[0]+p[2])).encode('utf-8') -# (Or substitute "x.encode('latin-1')" if you have a primitive terminal.) - -print "\nEstimated error in constraint satisfaction (should be close to 0):\n" \ - + str(abs(model.expectations() - K)) -print "\nTrue error in constraint satisfaction (should be close to 0):\n" + \ - str(abs(smallmodel.expectations() - K)) diff --git a/scipy-0.10.1/scipy/maxentropy/examples/conditionalexample1.py b/scipy-0.10.1/scipy/maxentropy/examples/conditionalexample1.py deleted file mode 100644 index 698bb2f20a..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/examples/conditionalexample1.py +++ /dev/null @@ -1,60 +0,0 @@ -# Test for conditional models -# Ed Schofield, 2006 - -from numpy import * -from scipy.maxentropy import * - -# Two contexts W, four labels x -# E_p f_0(W, X) = 0.4 -# where f_0(w, x) = indicator func "is the label x=0 and is the context w=0?" -# So we want the distribution: -# x \ w 0 1 -# 0 0.4 0.25 -# 1 0.2 0.25 -# 2 0.2 0.25 -# 3 0.2 0.25 - -# We can achieve this by creating a feature matrix with one row per constraint, -# as follows: -F = array([[1, 0, 0, 0, 0, 0, 0, 0]]) -# Each column represents one (w, x) pair. The number of columns is the product -# |w| * |x|, here 8. The order is (w0,x0), (w0,x1), (w0, x2), ..., (w1, x0), -# etc. -numcontexts = 2 -numlabels = 4 - -# OLD: -# These can be in any order. The indices_context parameter to the -# conditionalmodel constructor records this order, so indices_context[0] is an -# array of indices all labels x in context w=0. The simplest ordering is: -# (w0, x0), (w0, x1), ..., (w0, x{n-1}), (w1, x0), ... -# in which case the parameter is: -# indices_context = array([[0, 1, 2, 3], [4, 5, 6, 7]]) - -# The counts of each (w, x) pair, estimated from a corpus or training dataset, is -# stored as an array with |w| * |x| elements in same order as before. -counts = array([4, 3, 2, 1, 4, 3, 2, 1]) -# Note that, since the only active feature was for the first element (w0, x0), -# only the first value is relevant. The others are subject to no constraints, -# and will be chosen to maximize entropy. - -model = conditionalmodel(F, counts, numcontexts) -model.verbose = True -model.fit() -# Do it again, since the CG algorithm gets stuck sometimes. Why is this?? -model.fit() -# Note: to use the bound-constrained limited memory BFGS algorithm instead, we -# would use: -# model.fit(algorithm='LBFGSB') - -# Display the fitted model -pmf = model.pmf() -# The elements of this are flatted like the rows of F and p_tilde. We display -# them nicely: -print "x \ w \t 0 \t 1", -for x in range(4): - print '\n' + str(x), - for w in range(2): - print ' \t %.3f' % pmf[w*numlabels + x], - # print ' \t %.3f' % pmf[indices_context[w]][x], -print diff --git a/scipy-0.10.1/scipy/maxentropy/examples/conditionalexample2.py b/scipy-0.10.1/scipy/maxentropy/examples/conditionalexample2.py deleted file mode 100644 index ae602ff048..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/examples/conditionalexample2.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" Example use of the maximum entropy package for a classification task. - - An extension of the machine translation example from the paper 'A maximum - entropy approach to natural language processing' by Berger et al., 1996. - - Consider the translation of the English word 'in' into French. Suppose we - notice the following facts in a corpus of parallel texts: - - (1) p(dans) + p(en) + p(à) + p(au cours de) + p(pendant) = 1 - (2) p(dans | next English word = 'a' or 'the') = 8/10 - (3) p(dans | c) + p(à | c) = 1/2 for all other c - - This code finds the probability distribution with maximal entropy - subject to these constraints. -""" - -__author__ = 'Ed Schofield' - -from scipy import maxentropy, sparse - -samplespace = ['dans', 'en', 'à', 'au cours de', 'pendant'] -# Occurrences of French words, and their 'next English word' contexts, in -# a hypothetical parallel corpus: -corpus = [('dans', 'a'), ('dans', 'a'), ('dans', 'a'), ('dans', 'the'), \ - ('pendant', 'a'), ('dans', 'happy'), ('au cours de', 'healthy')] -contexts = list(set([c for (x, c) in corpus])) - -def f0(x, c): - return x in samplespace - -def f1(x, c): - if x == 'dans' and c in ['a', 'the']: - return True - else: - return False - -def f2(x, c): - return (x=='dans' or x=='à') and c not in ['a', 'the'] - -f = [f0, f1, f2] - -numcontexts = len(contexts) -numsamplepoints = len(samplespace) - -# Utility data structures: store the indices of each context and label in a -# dict for fast lookups of their indices into their respective lists: -samplespace_index = dict([(x, i) for i, x in enumerate(samplespace)]) -context_index = dict([(c, i) for i, c in enumerate(contexts)]) - -# # Dense array version: -# F = numpy.array([[f_i(x, c) for c in contexts for x in samplespace] for f_i in f]) - -# NEW: Sparse matrix version: -# Sparse matrices are only two dimensional in SciPy. Store as m x size, where -# size is |W|*|X|. -F = sparse.lil_matrix((len(f), numcontexts * numsamplepoints)) -for i, f_i in enumerate(f): - for c, context in enumerate(contexts): - for x, samplepoint in enumerate(samplespace): - F[i, c * numsamplepoints + x] = f_i(samplepoint, context) - - -# Store the counts of each (context, sample point) pair in the corpus, in a -# sparse matrix of dimensions (1 x size), where size is |W| x |X|. The element -# N[0, i*numcontexts+x] is the number of occurrences of x in context c in the -# training data. -# (The maxentropy module infers the empirical pmf etc. from the counts N) - -N = sparse.lil_matrix((1, numcontexts * len(samplespace))) # initialized to zero -for (x, c) in corpus: - N[0, context_index[c] * numsamplepoints + samplespace_index[x]] += 1 - -# Ideally, this could be stored as a sparse matrix of size C x X, whose ith row -# vector contains all points x_j in the sample space X in context c_i: -# N = sparse.lil_matrix((len(contexts), len(samplespace))) # initialized to zero -# for (c, x) in corpus: -# N[c, x] += 1 - -# This would be a nicer input format, but computations are more efficient -# internally with one long row vector. What we really need is for sparse -# matrices to offer a .reshape method so this conversion could be done -# internally and transparently. Then the numcontexts argument to the -# conditionalmodel constructor could also be inferred from the matrix -# dimensions. - -# Create a model -model = maxentropy.conditionalmodel(F, N, numcontexts) - -model.verbose = True - -# Fit the model -model.fit() - -# Output the distribution -print "\nFitted model parameters are:\n" + str(model.params) - -p = model.probdist() - -print "\npmf table p(x | c), where c is the context 'the':" -c = contexts.index('the') -print p[c*numsamplepoints:(c+1)*numsamplepoints] - -print "\nFitted distribution is:" -print "%12s" % ("c \ x"), -for label in samplespace: - print "%12s" % label, - -for c, context in enumerate(contexts): - print "\n%12s" % context, - for x, label in enumerate(samplespace): - print ("%12.3f" % p[c*numsamplepoints+x]), - -print diff --git a/scipy-0.10.1/scipy/maxentropy/info.py b/scipy-0.10.1/scipy/maxentropy/info.py deleted file mode 100644 index 8051dd9252..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/info.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -================================================ -Maximum entropy models (:mod:`scipy.maxentropy`) -================================================ - -.. currentmodule:: scipy.maxentropy - -.. warning:: This module is deprecated in scipy 0.10, and will be removed in - 0.11. Do not use this module in your new code. For questions about - this deprecation, please ask on the scipy-dev mailing list. - -Package content -=============== - -Models: - -.. autosummary:: - :toctree: generated/ - - model - bigmodel - basemodel - conditionalmodel - -Utilities: - -.. autosummary:: - :toctree: generated/ - - arrayexp - arrayexpcomplex - columnmeans - columnvariances - densefeaturematrix - densefeatures - dotprod - flatten - innerprod - innerprodtranspose - logsumexp - logsumexp_naive - robustlog - rowmeans - sample_wr - sparsefeaturematrix - sparsefeatures - - -Usage information -================= - -Contains two classes for fitting maximum entropy models (also known -as "exponential family" models) subject to linear constraints on the -expectations of arbitrary feature statistics. One class, "model", is -for small discrete sample spaces, using explicit summation. The other, -"bigmodel", is for sample spaces that are either continuous (and -perhaps high-dimensional) or discrete but too large to sum over, and -uses importance sampling. conditional Monte Carlo methods. - -The maximum entropy model has exponential form - -.. - p(x) = exp(theta^T f(x)) / Z(theta) - -.. math:: - p\\left(x\\right)=\\exp\\left(\\frac{\\theta^{T}f\\left(x\\right)} - {Z\\left(\\theta\\right)}\\right) - -with a real parameter vector theta of the same length as the feature -statistic f(x), For more background, see, for example, Cover and -Thomas (1991), *Elements of Information Theory*. - -See the file bergerexample.py for a walk-through of how to use these -routines when the sample space is small enough to be enumerated. - -See bergerexamplesimulated.py for a a similar walk-through using -simulation. - -""" - -# Copyright: Ed Schofield, 2003-2006 -# License: BSD-style (see LICENSE.txt in main source directory) - -postpone_import = 1 -depends = ['optimize'] diff --git a/scipy-0.10.1/scipy/maxentropy/maxentropy.py b/scipy-0.10.1/scipy/maxentropy/maxentropy.py deleted file mode 100644 index b4b13fc3ab..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/maxentropy.py +++ /dev/null @@ -1,1718 +0,0 @@ -# maxentropy.py: Routines for fitting maximum entropy models. - -# Copyright: Ed Schofield, 2003-2006 -# License: BSD-style (see LICENSE.txt in main source directory) - -# Future imports must come before any code in 2.5 -from __future__ import division - -__author__ = "Ed Schofield" -__version__ = '2.1' -__changelog__ = """ -This module is an adaptation of "ftwmaxent" by Ed Schofield, first posted -on SourceForge as part of the "textmodeller" project in 2002. The -official repository is now SciPy (since Nov 2005); the SourceForge -ftwmaxent code will not be developed further. - ------------- - -Change log: - -Since 2.0: -* Code simplification. Removed dualapprox(), gradapprox() and other - alias methods for bigmodel objects. Use dual(), grad() etc. instead. -* Added support for testing on an external sample during optimization. -* Removed incomplete support for the (slow) GIS algorithm - -Since 2.0-alpha4: -* Name change maxent -> maxentropy -* Removed online (sequential) estimation of feature expectations and - variances. - -Since v2.0-alpha3: -(1) Name change ftwmaxent -> scipy/maxent -(2) Modification for inclusion in scipy tree. Broke one big class into - two smaller classes, one for small models, the other for large models. - Here a 'small' model is one defined on a sample space small enough to sum - over in practice, whereas a 'large' model is on a sample space that is - high-dimensional and continuous or discrete but too large to sum over, - and requires Monte Carlo simulation. -(3) Refactoring: - self.Eapprox -> self.mu - p_0 -> aux_dist - p0 -> aux_dist - p_dot -> aux_dist_dot - qdot -> p_dot - q_dot -> p_dot - q_theta -> p_theta - E_p -> E_p_tilde - E_q -> E_p - -Since v2.0-alpha2: -Using multiple static feature matrices is now supported. The generator -function supplied to generate feature matrices is called matrixtrials' -times each iteration. This is useful for variance estimation of the E -and log Z estimators across the trials, without drawing another sample -each iteration (when staticsample = True). - -Since v2.0-alpha1: -Sample feature matrices, if used, are sampled on the fly with a supplied -generator function, optionally multiple times to estimate the sample -variance of the feature expectation estimates. An alternative is the -online estimation alg. - -Since v0.8.5: -Added code for online (sequential) estimation of feature expectations and -variances. - - -""" - - -import math, types, cPickle -import numpy as np -from numpy import exp, asarray -from scipy import optimize -from scipy.linalg import norm -from scipy.misc import logsumexp -from scipy.maxentropy.maxentutils import arrayexp, \ - innerprod, innerprodtranspose, columnmeans, columnvariances, \ - flatten, DivergenceError, sparsefeaturematrix - - -class basemodel(object): - """A base class providing generic functionality for both small and - large maximum entropy models. Cannot be instantiated. - """ - - def __init__(self): - self.format = self.__class__.__name__[:4] - if self.format == 'base': - raise ValueError("this class cannot be instantiated directly") - self.verbose = False - - self.maxgtol = 1e-5 - # Required tolerance of gradient on average (closeness to zero,axis=0) for - # CG optimization: - self.avegtol = 1e-3 - # Default tolerance for the other optimization algorithms: - self.tol = 1e-4 - # Default tolerance for stochastic approximation: stop if - # ||params_k - params_{k-1}|| < paramstol: - self.paramstol = 1e-5 - - self.maxiter = 1000 - self.maxfun = 1500 - self.mindual = -100. # The entropy dual must actually be - # non-negative, but the estimate may be - # slightly out with bigmodel instances - # without implying divergence to -inf - self.callingback = False - self.iters = 0 # the number of iterations so far of the - # optimization algorithm - self.fnevals = 0 - self.gradevals = 0 - - # Variances for a Gaussian prior on the parameters for smoothing - self.sigma2 = None - - # Store the duals for each fn evaluation during fitting? - self.storeduals = False - self.duals = {} - self.storegradnorms = False - self.gradnorms = {} - - # Do we seek to minimize the KL divergence between the model and a - # prior density p_0? If not, set this to None; then we maximize the - # entropy. If so, set this to an array of the log probability densities - # p_0(x) for each x in the sample space. For bigmodel objects, set this - # to an array of the log probability densities p_0(x) for each x in the - # random sample from the auxiliary distribution. - self.priorlogprobs = None - - # By default, use the sample matrix sampleF to estimate the - # entropy dual and its gradient. Otherwise, set self.external to - # the index of the sample feature matrix in the list self.externalFs. - # This applies to 'bigmodel' objects only, but setting this here - # simplifies the code in dual() and grad(). - self.external = None - self.externalpriorlogprobs = None - - - def fit(self, K, algorithm='CG'): - """Fit the maxent model p whose feature expectations are given - by the vector K. - - Model expectations are computed either exactly or using Monte - Carlo simulation, depending on the 'func' and 'grad' parameters - passed to this function. - - For 'model' instances, expectations are computed exactly, by summing - over the given sample space. If the sample space is continuous or too - large to iterate over, use the 'bigmodel' class instead. - - For 'bigmodel' instances, the model expectations are not computed - exactly (by summing or integrating over a sample space) but - approximately (by Monte Carlo simulation). Simulation is necessary - when the sample space is too large to sum or integrate over in - practice, like a continuous sample space in more than about 4 - dimensions or a large discrete space like all possible sentences in a - natural language. - - Approximating the expectations by sampling requires an instrumental - distribution that should be close to the model for fast convergence. - The tails should be fatter than the model. This instrumental - distribution is specified by calling setsampleFgen() with a - user-supplied generator function that yields a matrix of features of a - random sample and its log pdf values. - - The algorithm can be 'CG', 'BFGS', 'LBFGSB', 'Powell', or - 'Nelder-Mead'. - - The CG (conjugate gradients) method is the default; it is quite fast - and requires only linear space in the number of parameters, (not - quadratic, like Newton-based methods). - - The BFGS (Broyden-Fletcher-Goldfarb-Shanno) algorithm is a - variable metric Newton method. It is perhaps faster than the CG - method but requires O(N^2) instead of O(N) memory, so it is - infeasible for more than about 10^3 parameters. - - The Powell algorithm doesn't require gradients. For small models - it is slow but robust. For big models (where func and grad are - simulated) with large variance in the function estimates, this - may be less robust than the gradient-based algorithms. - """ - dual = self.dual - grad = self.grad - - if isinstance(self, bigmodel): - # Ensure the sample matrix has been set - if not hasattr(self, 'sampleF') and hasattr(self, 'samplelogprobs'): - raise AttributeError("first specify a sample feature matrix" - " using sampleFgen()") - else: - # Ensure the feature matrix for the sample space has been set - if not hasattr(self, 'F'): - raise AttributeError("first specify a feature matrix" - " using setfeaturesandsamplespace()") - - # First convert K to a numpy array if necessary - K = np.asarray(K, float) - - # Store the desired feature expectations as a member variable - self.K = K - - # Sanity checks - try: - self.params - except AttributeError: - self.reset(len(K)) - else: - assert len(self.params) == len(K) - - # Don't reset the number of function and gradient evaluations to zero - # self.fnevals = 0 - # self.gradevals = 0 - - # Make a copy of the parameters - oldparams = np.array(self.params) - - callback = self.log - - if algorithm == 'CG': - retval = optimize.fmin_cg(dual, oldparams, grad, (), self.avegtol, \ - maxiter=self.maxiter, full_output=1, \ - disp=self.verbose, retall=0, - callback=callback) - - (newparams, fopt, func_calls, grad_calls, warnflag) = retval - - elif algorithm == 'LBFGSB': - if callback is not None: - raise NotImplementedError("L-BFGS-B optimization algorithm" - " does not yet support callback functions for" - " testing with an external sample") - retval = optimize.fmin_l_bfgs_b(dual, oldparams, \ - grad, args=(), bounds=self.bounds, pgtol=self.maxgtol, - maxfun=self.maxfun) - (newparams, fopt, d) = retval - warnflag, func_calls = d['warnflag'], d['funcalls'] - if self.verbose: - print algorithm + " optimization terminated successfully." - print "\tFunction calls: " + str(func_calls) - # We don't have info on how many gradient calls the LBFGSB - # algorithm makes - - elif algorithm == 'BFGS': - retval = optimize.fmin_bfgs(dual, oldparams, \ - grad, (), self.tol, \ - maxiter=self.maxiter, full_output=1, \ - disp=self.verbose, retall=0, \ - callback=callback) - - (newparams, fopt, gopt, Lopt, func_calls, grad_calls, warnflag) = retval - - elif algorithm == 'Powell': - retval = optimize.fmin_powell(dual, oldparams, args=(), \ - xtol=self.tol, ftol = self.tol, \ - maxiter=self.maxiter, full_output=1, \ - disp=self.verbose, retall=0, \ - callback=callback) - - (newparams, fopt, direc, numiter, func_calls, warnflag) = retval - - elif algorithm == 'Nelder-Mead': - retval = optimize.fmin(dual, oldparams, args=(), \ - xtol=self.tol, ftol = self.tol, \ - maxiter=self.maxiter, full_output=1, \ - disp=self.verbose, retall=0, \ - callback=callback) - - (newparams, fopt, numiter, func_calls, warnflag) = retval - - else: - raise AttributeError("the specified algorithm '" + str(algorithm) - + "' is unsupported. Options are 'CG', 'LBFGSB', " - "'Nelder-Mead', 'Powell', and 'BFGS'") - - if np.any(self.params != newparams): - self.setparams(newparams) - self.func_calls = func_calls - - - - def dual(self, params=None, ignorepenalty=False, ignoretest=False): - """Computes the Lagrangian dual L(theta) of the entropy of the - model, for the given vector theta=params. Minimizing this - function (without constraints) should fit the maximum entropy - model subject to the given constraints. These constraints are - specified as the desired (target) values self.K for the - expectations of the feature statistic. - - This function is computed as: - L(theta) = log(Z) - theta^T . K - - For 'bigmodel' objects, it estimates the entropy dual without - actually computing p_theta. This is important if the sample - space is continuous or innumerable in practice. We approximate - the norm constant Z using importance sampling as in - [Rosenfeld01whole]. This estimator is deterministic for any - given sample. Note that the gradient of this estimator is equal - to the importance sampling *ratio estimator* of the gradient of - the entropy dual [see my thesis], justifying the use of this - estimator in conjunction with grad() in optimization methods that - use both the function and gradient. Note, however, that - convergence guarantees break down for most optimization - algorithms in the presence of stochastic error. - - Note that, for 'bigmodel' objects, the dual estimate is - deterministic for any given sample. It is given as: - - L_est = log Z_est - sum_i{theta_i K_i} - - where - Z_est = 1/m sum_{x in sample S_0} p_dot(x) / aux_dist(x), - - and m = # observations in sample S_0, and K_i = the empirical - expectation E_p_tilde f_i (X) = sum_x {p(x) f_i(x)}. - """ - - if self.external is None and not self.callingback: - if self.verbose: - print "Function eval #", self.fnevals - - if params is not None: - self.setparams(params) - - # Subsumes both small and large cases: - L = self.lognormconst() - np.dot(self.params, self.K) - - if self.verbose and self.external is None: - print " dual is ", L - - # Use a Gaussian prior for smoothing if requested. - # This adds the penalty term \sum_{i=1}^m \params_i^2 / {2 \sigma_i^2}. - # Define 0 / 0 = 0 here; this allows a variance term of - # sigma_i^2==0 to indicate that feature i should be ignored. - if self.sigma2 is not None and ignorepenalty==False: - ratios = np.nan_to_num(self.params**2 / self.sigma2) - # Why does the above convert inf to 1.79769e+308? - - L += 0.5 * ratios.sum() - if self.verbose and self.external is None: - print " regularized dual is ", L - - if not self.callingback and self.external is None: - if hasattr(self, 'callback_dual') \ - and self.callback_dual is not None: - # Prevent infinite recursion if the callback function - # calls dual(): - self.callingback = True - self.callback_dual(self) - self.callingback = False - - if self.external is None and not self.callingback: - self.fnevals += 1 - - # (We don't reset self.params to its prior value.) - return L - - - # An alias for the dual function: - entropydual = dual - - def log(self, params): - """This method is called every iteration during the optimization - process. It calls the user-supplied callback function (if any), - logs the evolution of the entropy dual and gradient norm, and - checks whether the process appears to be diverging, which would - indicate inconsistent constraints (or, for bigmodel instances, - too large a variance in the estimates). - """ - - if self.external is None and not self.callingback: - if self.verbose: - print "Iteration #", self.iters - - # Store new dual and/or gradient norm - if not self.callingback: - if self.storeduals: - self.duals[self.iters] = self.dual() - if self.storegradnorms: - self.gradnorms[self.iters] = norm(self.grad()) - - if not self.callingback and self.external is None: - if hasattr(self, 'callback'): - # Prevent infinite recursion if the callback function - # calls dual(): - self.callingback = True - self.callback(self) - self.callingback = False - - # Do we perform a test on external sample(s) every iteration? - # Only relevant to bigmodel objects - if hasattr(self, 'testevery') and self.testevery > 0: - if (self.iters + 1) % self.testevery != 0: - if self.verbose: - print "Skipping test on external sample(s) ..." - else: - self.test() - - if not self.callingback and self.external is None: - if self.mindual > -np.inf and self.dual() < self.mindual: - raise DivergenceError("dual is below the threshold 'mindual'" - " and may be diverging to -inf. Fix the constraints" - " or lower the threshold!") - - self.iters += 1 - - - def grad(self, params=None, ignorepenalty=False): - """Computes or estimates the gradient of the entropy dual. - """ - - if self.verbose and self.external is None and not self.callingback: - print "Grad eval #" + str(self.gradevals) - - if params is not None: - self.setparams(params) - - G = self.expectations() - self.K - - if self.verbose and self.external is None: - print " norm of gradient =", norm(G) - - # (We don't reset params to its prior value.) - - # Use a Gaussian prior for smoothing if requested. The ith - # partial derivative of the penalty term is \params_i / - # \sigma_i^2. Define 0 / 0 = 0 here; this allows a variance term - # of sigma_i^2==0 to indicate that feature i should be ignored. - if self.sigma2 is not None and ignorepenalty==False: - penalty = self.params / self.sigma2 - G += penalty - features_to_kill = np.where(np.isnan(penalty))[0] - G[features_to_kill] = 0.0 - if self.verbose and self.external is None: - normG = norm(G) - print " norm of regularized gradient =", normG - - if not self.callingback and self.external is None: - if hasattr(self, 'callback_grad') \ - and self.callback_grad is not None: - # Prevent infinite recursion if the callback function - # calls grad(): - self.callingback = True - self.callback_grad(self) - self.callingback = False - - if self.external is None and not self.callingback: - self.gradevals += 1 - - return G - - - def crossentropy(self, fx, log_prior_x=None, base=np.e): - """Returns the cross entropy H(q, p) of the empirical - distribution q of the data (with the given feature matrix fx) - with respect to the model p. For discrete distributions this is - defined as: - - H(q, p) = - n^{-1} \sum_{j=1}^n log p(x_j) - - where x_j are the data elements assumed drawn from q whose - features are given by the matrix fx = {f(x_j)}, j=1,...,n. - - The 'base' argument specifies the base of the logarithm, which - defaults to e. - - For continuous distributions this makes no sense! - """ - H = -self.logpdf(fx, log_prior_x).mean() - if base != np.e: - # H' = H * log_{base} (e) - return H / np.log(base) - else: - return H - - - def normconst(self): - """Returns the normalization constant, or partition function, for - the current model. Warning -- this may be too large to represent; - if so, this will result in numerical overflow. In this case use - lognormconst() instead. - - For 'bigmodel' instances, estimates the normalization term as - Z = E_aux_dist [{exp (params.f(X))} / aux_dist(X)] using a sample - from aux_dist. - """ - return np.exp(self.lognormconst()) - - - def setsmooth(self, sigma): - """Specifies that the entropy dual and gradient should be - computed with a quadratic penalty term on magnitude of the - parameters. This 'smooths' the model to account for noise in the - target expectation values or to improve robustness when using - simulation to fit models and when the sampling distribution has - high variance. The smoothing mechanism is described in Chen and - Rosenfeld, 'A Gaussian prior for smoothing maximum entropy - models' (1999). - - The parameter 'sigma' will be squared and stored as self.sigma2. - """ - self.sigma2 = sigma**2 - - - def setparams(self, params): - """Set the parameter vector to params, replacing the existing - parameters. params must be a list or numpy array of the same - length as the model's feature vector f. - """ - - self.params = np.array(params, float) # make a copy - - # Log the new params to disk - self.logparams() - - # Delete params-specific stuff - self.clearcache() - - - def clearcache(self): - """Clears the interim results of computations depending on the - parameters and the sample. - """ - for var in ['mu', 'logZ', 'logZapprox', 'logv']: - if hasattr(self, var): - exec('del self.' + var) - - def reset(self, numfeatures=None): - """Resets the parameters self.params to zero, clearing the cache - variables dependent on them. Also resets the number of function - and gradient evaluations to zero. - """ - - if numfeatures: - m = numfeatures - else: - # Try to infer the number of parameters from existing state - if hasattr(self, 'params'): - m = len(self.params) - elif hasattr(self, 'F'): - m = self.F.shape[0] - elif hasattr(self, 'sampleF'): - m = self.sampleF.shape[0] - elif hasattr(self, 'K'): - m = len(self.K) - else: - raise ValueError("specify the number of features / parameters") - - # Set parameters, clearing cache variables - self.setparams(np.zeros(m, float)) - - # These bounds on the param values are only effective for the - # L-BFGS-B optimizer: - self.bounds = [(-100., 100.)]*len(self.params) - - self.fnevals = 0 - self.gradevals = 0 - self.iters = 0 - self.callingback = False - - # Clear the stored duals and gradient norms - self.duals = {} - self.gradnorms = {} - if hasattr(self, 'external_duals'): - self.external_duals = {} - if hasattr(self, 'external_gradnorms'): - self.external_gradnorms = {} - if hasattr(self, 'external'): - self.external = None - - - def setcallback(self, callback=None, callback_dual=None, \ - callback_grad=None): - """Sets callback functions to be called every iteration, every - function evaluation, or every gradient evaluation. All callback - functions are passed one argument, the current model object. - - Note that line search algorithms in e.g. CG make potentially - several function and gradient evaluations per iteration, some of - which we expect to be poor. - """ - self.callback = callback - self.callback_dual = callback_dual - self.callback_grad = callback_grad - - def logparams(self): - """Saves the model parameters if logging has been - enabled and the # of iterations since the last save has reached - self.paramslogfreq. - """ - if not hasattr(self, 'paramslogcounter'): - # Assume beginlogging() was never called - return - self.paramslogcounter += 1 - if not (self.paramslogcounter % self.paramslogfreq == 0): - return - - # Check whether the params are NaN - if not np.all(self.params == self.params): - raise FloatingPointError("some of the parameters are NaN") - - if self.verbose: - print "Saving parameters ..." - paramsfile = open(self.paramslogfilename + '.' + \ - str(self.paramslogcounter) + '.pickle', 'wb') - cPickle.dump(self.params, paramsfile, cPickle.HIGHEST_PROTOCOL) - paramsfile.close() - #self.paramslog += 1 - #self.paramslogcounter = 0 - if self.verbose: - print "Done." - - def beginlogging(self, filename, freq=10): - """Enable logging params for each fn evaluation to files named - 'filename.freq.pickle', 'filename.(2*freq).pickle', ... each - 'freq' iterations. - """ - if self.verbose: - print "Logging to files " + filename + "*" - self.paramslogcounter = 0 - self.paramslogfilename = filename - self.paramslogfreq = freq - #self.paramslog = 1 - - def endlogging(self): - """Stop logging param values whenever setparams() is called. - """ - del self.paramslogcounter - del self.paramslogfilename - del self.paramslogfreq - - - - - -class model(basemodel): - """A maximum-entropy (exponential-form) model on a discrete sample - space. - """ - def __init__(self, f=None, samplespace=None): - super(model, self).__init__() - - if f is not None and samplespace is not None: - self.setfeaturesandsamplespace(f, samplespace) - elif f is not None and samplespace is None: - raise ValueError("not supported: specify both features and" - " sample space or neither") - - - def setfeaturesandsamplespace(self, f, samplespace): - """Creates a new matrix self.F of features f of all points in the - sample space. f is a list of feature functions f_i mapping the - sample space to real values. The parameter vector self.params is - initialized to zero. - - We also compute f(x) for each x in the sample space and store - them as self.F. This uses lots of memory but is much faster. - - This is only appropriate when the sample space is finite. - """ - self.f = f - self.reset(numfeatures=len(f)) - self.samplespace = samplespace - self.F = sparsefeaturematrix(f, samplespace, 'csr_matrix') - - - def lognormconst(self): - """Compute the log of the normalization constant (partition - function) Z=sum_{x \in samplespace} p_0(x) exp(params . f(x)). - The sample space must be discrete and finite. - """ - # See if it's been precomputed - if hasattr(self, 'logZ'): - return self.logZ - - # Has F = {f_i(x_j)} been precomputed? - if not hasattr(self, 'F'): - raise AttributeError("first create a feature matrix F") - - # Good, assume the feature matrix exists - log_p_dot = innerprodtranspose(self.F, self.params) - - # Are we minimizing KL divergence? - if self.priorlogprobs is not None: - log_p_dot += self.priorlogprobs - - self.logZ = logsumexp(log_p_dot) - return self.logZ - - - def expectations(self): - """The vector E_p[f(X)] under the model p_params of the vector of - feature functions f_i over the sample space. - """ - # For discrete models, use the representation E_p[f(X)] = p . F - if not hasattr(self, 'F'): - raise AttributeError("first set the feature matrix F") - - # A pre-computed matrix of features exists - p = self.pmf() - return innerprod(self.F, p) - - def logpmf(self): - """Returns an array indexed by integers representing the - logarithms of the probability mass function (pmf) at each point - in the sample space under the current model (with the current - parameter vector self.params). - """ - # Have the features already been computed and stored? - if not hasattr(self, 'F'): - raise AttributeError("first set the feature matrix F") - - # Yes: - # p(x) = exp(params.f(x)) / sum_y[exp params.f(y)] - # = exp[log p_dot(x) - logsumexp{log(p_dot(y))}] - - log_p_dot = innerprodtranspose(self.F, self.params) - # Do we have a prior distribution p_0? - if self.priorlogprobs is not None: - log_p_dot += self.priorlogprobs - if not hasattr(self, 'logZ'): - # Compute the norm constant (quickly!) - self.logZ = logsumexp(log_p_dot) - return log_p_dot - self.logZ - - - def pmf(self): - """Returns an array indexed by integers representing the values - of the probability mass function (pmf) at each point in the - sample space under the current model (with the current parameter - vector self.params). - - Equivalent to exp(self.logpmf()) - """ - return arrayexp(self.logpmf()) - - # An alias for pmf - probdist = pmf - - def pmf_function(self, f=None): - """Returns the pmf p_theta(x) as a function taking values on the - model's sample space. The returned pmf is defined as: - - p_theta(x) = exp(theta.f(x) - log Z) - - where theta is the current parameter vector self.params. The - returned function p_theta also satisfies - all([p(x) for x in self.samplespace] == pmf()). - - The feature statistic f should be a list of functions - [f1(),...,fn(x)]. This must be passed unless the model already - contains an equivalent attribute 'model.f'. - - Requires that the sample space be discrete and finite, and stored - as self.samplespace as a list or array. - """ - - if hasattr(self, 'logZ'): - logZ = self.logZ - else: - logZ = self.lognormconst() - - if f is None: - try: - f = self.f - except AttributeError: - raise AttributeError("either pass a list f of feature" - " functions or set this as a member variable self.f") - - # Do we have a prior distribution p_0? - priorlogpmf = None - if self.priorlogprobs is not None: - try: - priorlogpmf = self.priorlogpmf - except AttributeError: - raise AttributeError("prior probability mass function not set") - - def p(x): - f_x = np.array([f[i](x) for i in range(len(f))], float) - - # Do we have a prior distribution p_0? - if priorlogpmf is not None: - priorlogprob_x = priorlogpmf(x) - return math.exp(np.dot(self.params, f_x) + priorlogprob_x \ - - logZ) - else: - return math.exp(np.dot(self.params, f_x) - logZ) - return p - - -class conditionalmodel(model): - """ - A conditional maximum-entropy (exponential-form) model p(x|w) on a - discrete sample space. - - This is useful for classification problems: - given the context w, what is the probability of each class x? - - The form of such a model is:: - - p(x | w) = exp(theta . f(w, x)) / Z(w; theta) - - where Z(w; theta) is a normalization term equal to:: - - Z(w; theta) = sum_x exp(theta . f(w, x)). - - The sum is over all classes x in the set Y, which must be supplied to - the constructor as the parameter 'samplespace'. - - Such a model form arises from maximizing the entropy of a conditional - model p(x | w) subject to the constraints:: - - K_i = E f_i(W, X) - - where the expectation is with respect to the distribution:: - - q(w) p(x | w) - - where q(w) is the empirical probability mass function derived from - observations of the context w in a training set. Normally the vector - K = {K_i} of expectations is set equal to the expectation of f_i(w, - x) with respect to the empirical distribution. - - This method minimizes the Lagrangian dual L of the entropy, which is - defined for conditional models as:: - - L(theta) = sum_w q(w) log Z(w; theta) - - sum_{w,x} q(w,x) [theta . f(w,x)] - - Note that both sums are only over the training set {w,x}, not the - entire sample space, since q(w,x) = 0 for all w,x not in the training - set. - - The partial derivatives of L are:: - - dL / dtheta_i = K_i - E f_i(X, Y) - - where the expectation is as defined above. - - """ - def __init__(self, F, counts, numcontexts): - """The F parameter should be a (sparse) m x size matrix, where m - is the number of features and size is |W| * |X|, where |W| is the - number of contexts and |X| is the number of elements X in the - sample space. - - The 'counts' parameter should be a row vector stored as a (1 x - |W|*|X|) sparse matrix, whose element i*|W|+j is the number of - occurrences of x_j in context w_i in the training set. - - This storage format allows efficient multiplication over all - contexts in one operation. - """ - # Ideally, the 'counts' parameter could be represented as a sparse - # matrix of size C x X, whose ith row # vector contains all points x_j - # in the sample space X in context c_i. For example: - # N = sparse.lil_matrix((len(contexts), len(samplespace))) - # for (c, x) in corpus: - # N[c, x] += 1 - - # This would be a nicer input format, but computations are more - # efficient internally with one long row vector. What we really need is - # for sparse matrices to offer a .reshape method so this conversion - # could be done internally and transparently. Then the numcontexts - # argument to the conditionalmodel constructor could also be inferred - # from the matrix dimensions. - - super(conditionalmodel, self).__init__() - self.F = F - self.numcontexts = numcontexts - - S = F.shape[1] // numcontexts # number of sample point - assert isinstance(S, int) - - # Set the empirical pmf: p_tilde(w, x) = N(w, x) / \sum_c \sum_y N(c, y). - # This is always a rank-2 beast with only one row (to support either - # arrays or dense/sparse matrices. - if not hasattr(counts, 'shape'): - # Not an array or dense/sparse matrix - p_tilde = asarray(counts).reshape(1, len(counts)) - else: - if counts.ndim == 1: - p_tilde = counts.reshape(1, len(counts)) - elif counts.ndim == 2: - # It needs to be flat (a row vector) - if counts.shape[0] > 1: - try: - # Try converting to a row vector - p_tilde = counts.reshape((1, counts.size)) - except AttributeError: - raise ValueError("the 'counts' object needs to be a" - " row vector (1 x n) rank-2 array/matrix) or have" - " a .reshape method to convert it into one") - else: - p_tilde = counts - # Make a copy -- don't modify 'counts' - self.p_tilde = p_tilde / p_tilde.sum() - - # As an optimization, p_tilde need not be copied or stored at all, since - # it is only used by this function. - - self.p_tilde_context = np.empty(numcontexts, float) - for w in xrange(numcontexts): - self.p_tilde_context[w] = self.p_tilde[0, w*S : (w+1)*S].sum() - - # Now compute the vector K = (K_i) of expectations of the - # features with respect to the empirical distribution p_tilde(w, x). - # This is given by: - # - # K_i = \sum_{w, x} q(w, x) f_i(w, x) - # - # This is independent of the model parameters. - self.K = flatten(innerprod(self.F, self.p_tilde.transpose())) - self.numsamplepoints = S - - - def lognormconst(self): - """Compute the elementwise log of the normalization constant - (partition function) Z(w)=sum_{y \in Y(w)} exp(theta . f(w, y)). - The sample space must be discrete and finite. This is a vector - with one element for each context w. - """ - # See if it's been precomputed - if hasattr(self, 'logZ'): - return self.logZ - - numcontexts = self.numcontexts - S = self.numsamplepoints - # Has F = {f_i(x_j)} been precomputed? - if not hasattr(self, 'F'): - raise AttributeError("first create a feature matrix F") - - # Good, assume F has been precomputed - - log_p_dot = innerprodtranspose(self.F, self.params) - - # Are we minimizing KL divergence? - if self.priorlogprobs is not None: - log_p_dot += self.priorlogprobs - - self.logZ = np.zeros(numcontexts, float) - for w in xrange(numcontexts): - self.logZ[w] = logsumexp(log_p_dot[w*S: (w+1)*S]) - return self.logZ - - - def dual(self, params=None, ignorepenalty=False): - """The entropy dual function is defined for conditional models as - - L(theta) = sum_w q(w) log Z(w; theta) - - sum_{w,x} q(w,x) [theta . f(w,x)] - - or equivalently as - - L(theta) = sum_w q(w) log Z(w; theta) - (theta . k) - - where K_i = \sum_{w, x} q(w, x) f_i(w, x), and where q(w) is the - empirical probability mass function derived from observations of the - context w in a training set. Normally q(w, x) will be 1, unless the - same class label is assigned to the same context more than once. - - Note that both sums are only over the training set {w,x}, not the - entire sample space, since q(w,x) = 0 for all w,x not in the training - set. - - The entropy dual function is proportional to the negative log - likelihood. - - Compare to the entropy dual of an unconditional model: - L(theta) = log(Z) - theta^T . K - """ - if not self.callingback: - if self.verbose: - print "Function eval #", self.fnevals - - if params is not None: - self.setparams(params) - - logZs = self.lognormconst() - - L = np.dot(self.p_tilde_context, logZs) - np.dot(self.params, self.K) - - if self.verbose and self.external is None: - print " dual is ", L - - # Use a Gaussian prior for smoothing if requested. - # This adds the penalty term \sum_{i=1}^m \theta_i^2 / {2 \sigma_i^2} - if self.sigma2 is not None and ignorepenalty==False: - penalty = 0.5 * (self.params**2 / self.sigma2).sum() - L += penalty - if self.verbose and self.external is None: - print " regularized dual is ", L - - if not self.callingback: - if hasattr(self, 'callback_dual'): - # Prevent infinite recursion if the callback function calls - # dual(): - self.callingback = True - self.callback_dual(self) - self.callingback = False - self.fnevals += 1 - - # (We don't reset params to its prior value.) - return L - - - # These do not need to be overridden: - # grad - # pmf - # probdist - - - def fit(self, algorithm='CG'): - """Fits the conditional maximum entropy model subject to the - constraints - - sum_{w, x} p_tilde(w) p(x | w) f_i(w, x) = k_i - - for i=1,...,m, where k_i is the empirical expectation - k_i = sum_{w, x} p_tilde(w, x) f_i(w, x). - """ - - # Call base class method - return model.fit(self, self.K, algorithm) - - - def expectations(self): - """The vector of expectations of the features with respect to the - distribution p_tilde(w) p(x | w), where p_tilde(w) is the - empirical probability mass function value stored as - self.p_tilde_context[w]. - """ - if not hasattr(self, 'F'): - raise AttributeError("need a pre-computed feature matrix F") - - # A pre-computed matrix of features exists - - numcontexts = self.numcontexts - S = self.numsamplepoints - p = self.pmf() - # p is now an array representing p(x | w) for each class w. Now we - # multiply the appropriate elements by p_tilde(w) to get the hybrid pmf - # required for conditional modelling: - for w in xrange(numcontexts): - p[w*S : (w+1)*S] *= self.p_tilde_context[w] - - # Use the representation E_p[f(X)] = p . F - return flatten(innerprod(self.F, p)) - - # # We only override to modify the documentation string. The code - # # is the same as for the model class. - # return model.expectations(self) - - - def logpmf(self): - """Returns a (sparse) row vector of logarithms of the conditional - probability mass function (pmf) values p(x | c) for all pairs (c, - x), where c are contexts and x are points in the sample space. - The order of these is log p(x | c) = logpmf()[c * numsamplepoints - + x]. - """ - # Have the features already been computed and stored? - if not hasattr(self, 'F'): - raise AttributeError("first set the feature matrix F") - - # p(x | c) = exp(theta.f(x, c)) / sum_c[exp theta.f(x, c)] - # = exp[log p_dot(x) - logsumexp{log(p_dot(y))}] - - numcontexts = self.numcontexts - S = self.numsamplepoints - log_p_dot = flatten(innerprodtranspose(self.F, self.params)) - # Do we have a prior distribution p_0? - if self.priorlogprobs is not None: - log_p_dot += self.priorlogprobs - if not hasattr(self, 'logZ'): - # Compute the norm constant (quickly!) - self.logZ = np.zeros(numcontexts, float) - for w in xrange(numcontexts): - self.logZ[w] = logsumexp(log_p_dot[w*S : (w+1)*S]) - # Renormalize - for w in xrange(numcontexts): - log_p_dot[w*S : (w+1)*S] -= self.logZ[w] - return log_p_dot - - -class bigmodel(basemodel): - """A maximum-entropy (exponential-form) model on a large sample - space. - - The model expectations are not computed exactly (by summing or - integrating over a sample space) but approximately (by Monte Carlo - estimation). Approximation is necessary when the sample space is too - large to sum or integrate over in practice, like a continuous sample - space in more than about 4 dimensions or a large discrete space like - all possible sentences in a natural language. - - Approximating the expectations by sampling requires an instrumental - distribution that should be close to the model for fast convergence. - The tails should be fatter than the model. - """ - - def __init__(self): - super(bigmodel, self).__init__() - - # Number of sample matrices to generate and use to estimate E and logZ - self.matrixtrials = 1 - - # Store the lowest dual estimate observed so far in the fitting process - self.bestdual = float('inf') - - # Most of the attributes below affect only the stochastic - # approximation procedure. They should perhaps be removed, and made - # arguments of stochapprox() instead. - - # Use Kersten-Deylon accelerated convergence fo stoch approx - self.deylon = False - - # By default, use a stepsize decreasing as k^(-3/4) - self.stepdecreaserate = 0.75 - - # If true, check convergence using the exact model. Only useful for - # testing small problems (e.g. with different parameters) when - # simulation is unnecessary. - self.exacttest = False - - # By default use Ruppert-Polyak averaging for stochastic approximation - self.ruppertaverage = True - - # Use the stoch approx scaling modification of Andradottir (1996) - self.andradottir = False - - # Number of iterations to hold the stochastic approximation stepsize - # a_k at a_0 for before decreasing it - self.a_0_hold = 0 - - # Whether or not to use the same sample for all iterations - self.staticsample = True - - # How many iterations of stochastic approximation between testing for - # convergence - self.testconvergefreq = 0 - - # How many sample matrices to average over when testing for convergence - # in stochastic approx - self.testconvergematrices = 10 - - # Test for convergence every 'testevery' iterations, using one or - # more external samples. If None, don't test. - self.testevery = None - # self.printevery = 1000 - - - def resample(self): - """(Re)samples the matrix F of sample features. - """ - - if self.verbose >= 3: - print "(sampling)" - - # First delete the existing sample matrix to save memory - # This matters, since these can be very large - for var in ['sampleF, samplelogprobs, sample']: - if hasattr(self, var): - exec('del self.' + var) - - # Now generate a new sample - output = self.sampleFgen.next() - try: - len(output) - except TypeError: - raise ValueError("output of sampleFgen.next() not recognized") - if len(output) == 2: - # Assume the format is (F, lp) - (self.sampleF, self.samplelogprobs) = output - elif len(output) == 3: - # Assume the format is (F, lp, sample) - (self.sampleF, self.samplelogprobs, self.sample) = output - else: - raise ValueError("output of sampleFgen.next() not recognized") - - # Check whether the number m of features is correct - try: - # The number of features is defined as the length of - # self.params, so first check if it exists: - self.params - m = len(self.params) - except AttributeError: - (m, n) = self.sampleF.shape - self.reset(m) - else: - if self.sampleF.shape[0] != m: - raise ValueError("the sample feature generator returned" - " a feature matrix of incorrect dimensions") - if self.verbose >= 3: - print "(done)" - - # Now clear the temporary variables that are no longer correct for this - # sample - self.clearcache() - - - def lognormconst(self): - """Estimate the normalization constant (partition function) using - the current sample matrix F. - """ - # First see whether logZ has been precomputed - if hasattr(self, 'logZapprox'): - return self.logZapprox - - # Compute log v = log [p_dot(s_j)/aux_dist(s_j)] for - # j=1,...,n=|sample| using a precomputed matrix of sample - # features. - logv = self._logv() - - # Good, we have our logv. Now: - n = len(logv) - self.logZapprox = logsumexp(logv) - math.log(n) - return self.logZapprox - - - def expectations(self): - """Estimates the feature expectations E_p[f(X)] under the current - model p = p_theta using the given sample feature matrix. If - self.staticsample is True, uses the current feature matrix - self.sampleF. If self.staticsample is False or self.matrixtrials - is > 1, draw one or more sample feature matrices F afresh using - the generator function supplied to sampleFgen(). - """ - # See if already computed - if hasattr(self, 'mu'): - return self.mu - self.estimate() - return self.mu - - def _logv(self): - """This function helps with caching of interim computational - results. It is designed to be called internally, not by a user. - - This is defined as the array of unnormalized importance sampling - weights corresponding to the sample x_j whose features are - represented as the columns of self.sampleF. - logv_j = p_dot(x_j) / q(x_j), - where p_dot(x_j) = p_0(x_j) exp(theta . f(x_j)) is the - unnormalized pdf value of the point x_j under the current model. - """ - # First see whether logv has been precomputed - if hasattr(self, 'logv'): - return self.logv - - # Compute log v = log [p_dot(s_j)/aux_dist(s_j)] for - # j=1,...,n=|sample| using a precomputed matrix of sample - # features. - if self.external is None: - paramsdotF = innerprodtranspose(self.sampleF, self.params) - logv = paramsdotF - self.samplelogprobs - # Are we minimizing KL divergence between the model and a prior - # density p_0? - if self.priorlogprobs is not None: - logv += self.priorlogprobs - else: - e = self.external - paramsdotF = innerprodtranspose(self.externalFs[e], self.params) - logv = paramsdotF - self.externallogprobs[e] - # Are we minimizing KL divergence between the model and a prior - # density p_0? - if self.externalpriorlogprobs is not None: - logv += self.externalpriorlogprobs[e] - - # Good, we have our logv. Now: - self.logv = logv - return logv - - - def estimate(self): - """This function approximates both the feature expectation vector - E_p f(X) and the log of the normalization term Z with importance - sampling. - - It also computes the sample variance of the component estimates - of the feature expectations as: varE = var(E_1, ..., E_T) where T - is self.matrixtrials and E_t is the estimate of E_p f(X) - approximated using the 't'th auxiliary feature matrix. - - It doesn't return anything, but stores the member variables - logZapprox, mu and varE. (This is done because some optimization - algorithms retrieve the dual fn and gradient fn in separate - function calls, but we can compute them more efficiently - together.) - - It uses a supplied generator sampleFgen whose .next() method - returns features of random observations s_j generated according - to an auxiliary distribution aux_dist. It uses these either in a - matrix (with multiple runs) or with a sequential procedure, with - more updating overhead but potentially stopping earlier (needing - fewer samples). In the matrix case, the features F={f_i(s_j)} - and vector [log_aux_dist(s_j)] of log probabilities are generated - by calling resample(). - - We use [Rosenfeld01Wholesentence]'s estimate of E_p[f_i] as: - {sum_j p(s_j)/aux_dist(s_j) f_i(s_j) } - / {sum_j p(s_j) / aux_dist(s_j)}. - - Note that this is consistent but biased. - - This equals: - {sum_j p_dot(s_j)/aux_dist(s_j) f_i(s_j) } - / {sum_j p_dot(s_j) / aux_dist(s_j)} - - Compute the estimator E_p f_i(X) in log space as: - num_i / denom, - where - num_i = exp(logsumexp(theta.f(s_j) - log aux_dist(s_j) - + log f_i(s_j))) - and - denom = [n * Zapprox] - - where Zapprox = exp(self.lognormconst()). - - We can compute the denominator n*Zapprox directly as: - exp(logsumexp(log p_dot(s_j) - log aux_dist(s_j))) - = exp(logsumexp(theta.f(s_j) - log aux_dist(s_j))) - """ - - if self.verbose >= 3: - print "(estimating dual and gradient ...)" - - # Hereafter is the matrix code - - mus = [] - logZs = [] - - for trial in range(self.matrixtrials): - if self.verbose >= 2 and self.matrixtrials > 1: - print "(trial " + str(trial) + " ...)" - - # Resample if necessary - if (not self.staticsample) or self.matrixtrials > 1: - self.resample() - - logv = self._logv() - n = len(logv) - logZ = self.lognormconst() - logZs.append(logZ) - - # We don't need to handle negative values separately, - # because we don't need to take the log of the feature - # matrix sampleF. See my thesis, Section 4.4 - - logu = logv - logZ - if self.external is None: - averages = innerprod(self.sampleF, arrayexp(logu)) - else: - averages = innerprod(self.externalFs[self.external], \ - arrayexp(logu)) - averages /= n - mus.append(averages) - - # Now we have T=trials vectors of the sample means. If trials > 1, - # estimate st dev of means and confidence intervals - ttrials = len(mus) # total number of trials performed - if ttrials == 1: - self.mu = mus[0] - self.logZapprox = logZs[0] - try: - del self.varE # make explicit that this has no meaning - except AttributeError: - pass - return - else: - # The log of the variance of logZ is: - # -log(n-1) + logsumexp(2*log|Z_k - meanZ|) - - self.logZapprox = logsumexp(logZs) - math.log(ttrials) - stdevlogZ = np.array(logZs).std() - mus = np.array(mus) - self.varE = columnvariances(mus) - self.mu = columnmeans(mus) - return - - - def setsampleFgen(self, sampler, staticsample=True): - """Initializes the Monte Carlo sampler to use the supplied - generator of samples' features and log probabilities. This is an - alternative to defining a sampler in terms of a (fixed size) - feature matrix sampleF and accompanying vector samplelogprobs of - log probabilities. - - Calling sampler.next() should generate tuples (F, lp), where F is - an (m x n) matrix of features of the n sample points x_1,...,x_n, - and lp is an array of length n containing the (natural) log - probability density (pdf or pmf) of each point under the - auxiliary sampling distribution. - - The output of sampler.next() can optionally be a 3-tuple (F, lp, - sample) instead of a 2-tuple (F, lp). In this case the value - 'sample' is then stored as a class variable self.sample. This is - useful for inspecting the output and understanding the model - characteristics. - - If matrixtrials > 1 and staticsample = True, (which is useful for - estimating variance between the different feature estimates), - sampler.next() will be called once for each trial - (0,...,matrixtrials) for each iteration. This allows using a set - of feature matrices, each of which stays constant over all - iterations. - - We now insist that sampleFgen.next() return the entire sample - feature matrix to be used each iteration to avoid overhead in - extra function calls and memory copying (and extra code). - - An alternative was to supply a list of samplers, - sampler=[sampler0, sampler1, ..., sampler_{m-1}, samplerZ], one - for each feature and one for estimating the normalization - constant Z. But this code was unmaintained, and has now been - removed (but it's in Ed's CVS repository :). - - Example use: - >>> import spmatrix - >>> model = bigmodel() - >>> def sampler(): - ... n = 0 - ... while True: - ... f = spmatrix.ll_mat(1,3) - ... f[0,0] = n+1; f[0,1] = n+1; f[0,2] = n+1 - ... yield f, 1.0 - ... n += 1 - ... - >>> model.setsampleFgen(sampler()) - >>> type(model.sampleFgen) - - >>> [model.sampleF[0,i] for i in range(3)] - [1.0, 1.0, 1.0] - - We now set matrixtrials as a class property instead, rather than - passing it as an argument to this function, where it can be - written over (perhaps with the default function argument by - accident) when we re-call this func (e.g. to change the matrix - size.) - """ - - # if not sequential: - assert type(sampler) is types.GeneratorType - self.sampleFgen = sampler - self.staticsample = staticsample - if staticsample: - self.resample() - - - def pdf(self, fx): - """Returns the estimated density p_theta(x) at the point x with - feature statistic fx = f(x). This is defined as - p_theta(x) = exp(theta.f(x)) / Z(theta), - where Z is the estimated value self.normconst() of the partition - function. - """ - return exp(self.logpdf(fx)) - - def pdf_function(self): - """Returns the estimated density p_theta(x) as a function p(f) - taking a vector f = f(x) of feature statistics at any point x. - This is defined as: - p_theta(x) = exp(theta.f(x)) / Z - """ - log_Z_est = self.lognormconst() - - def p(fx): - return np.exp(innerprodtranspose(fx, self.params) - log_Z_est) - return p - - - def logpdf(self, fx, log_prior_x=None): - """Returns the log of the estimated density p(x) = p_theta(x) at - the point x. If log_prior_x is None, this is defined as: - log p(x) = theta.f(x) - log Z - where f(x) is given by the (m x 1) array fx. - - If, instead, fx is a 2-d (m x n) array, this function interprets - each of its rows j=0,...,n-1 as a feature vector f(x_j), and - returns an array containing the log pdf value of each point x_j - under the current model. - - log Z is estimated using the sample provided with - setsampleFgen(). - - The optional argument log_prior_x is the log of the prior density - p_0 at the point x (or at each point x_j if fx is 2-dimensional). - The log pdf of the model is then defined as - log p(x) = log p0(x) + theta.f(x) - log Z - and p then represents the model of minimum KL divergence D(p||p0) - instead of maximum entropy. - """ - log_Z_est = self.lognormconst() - if len(fx.shape) == 1: - logpdf = np.dot(self.params, fx) - log_Z_est - else: - logpdf = innerprodtranspose(fx, self.params) - log_Z_est - if log_prior_x is not None: - logpdf += log_prior_x - return logpdf - - - def stochapprox(self, K): - """Tries to fit the model to the feature expectations K using - stochastic approximation, with the Robbins-Monro stochastic - approximation algorithm: theta_{k+1} = theta_k + a_k g_k - a_k - e_k where g_k is the gradient vector (= feature expectations E - - K) evaluated at the point theta_k, a_k is the sequence a_k = a_0 - / k, where a_0 is some step size parameter defined as self.a_0 in - the model, and e_k is an unknown error term representing the - uncertainty of the estimate of g_k. We assume e_k has nice - enough properties for the algorithm to converge. - """ - if self.verbose: - print "Starting stochastic approximation..." - - # If we have resumed fitting, adopt the previous parameter k - try: - k = self.paramslogcounter - #k = (self.paramslog-1)*self.paramslogfreq - except: - k = 0 - - try: - a_k = self.a_0 - except AttributeError: - raise AttributeError("first define the initial step size a_0") - - avgparams = self.params - if self.exacttest: - # store exact error each testconvergefreq iterations - self.SAerror = [] - while True: - k += 1 - if k > self.a_0_hold: - if not self.deylon: - n = k - self.a_0_hold - elif k <= 2 + self.a_0_hold: # why <= 2? - # Initialize n for the first non-held iteration - n = k - self.a_0_hold - else: - # Use Kersten-Deylon accelerated SA, based on the rate of - # changes of sign of the gradient. (If frequent swaps, the - # stepsize is too large.) - #n += (np.dot(y_k, y_kminus1) < 0) # an indicator fn - if np.dot(y_k, y_kminus1) < 0: - n += 1 - else: - # Store iterations of sign switches (for plotting - # purposes) - try: - self.nosignswitch.append(k) - except AttributeError: - self.nosignswitch = [k] - print "No sign switch at iteration " + str(k) - if self.verbose >= 2: - print "(using Deylon acceleration. n is " + str(n) + " instead of " + str(k - self.a_0_hold) + "...)" - if self.ruppertaverage: - if self.stepdecreaserate is None: - # Use log n / n as the default. Note: this requires a - # different scaling of a_0 than a stepsize decreasing - # as, e.g., n^(-1/2). - a_k = 1.0 * self.a_0 * math.log(n) / n - else: - # I think that with Ruppert averaging, we need a - # stepsize decreasing as n^(-p), where p is in the open - # interval (0.5, 1) for almost sure convergence. - a_k = 1.0 * self.a_0 / (n ** self.stepdecreaserate) - else: - # I think we need a stepsize decreasing as n^-1 for almost - # sure convergence - a_k = 1.0 * self.a_0 / (n ** self.stepdecreaserate) - # otherwise leave step size unchanged - if self.verbose: - print " step size is: " + str(a_k) - - self.matrixtrials = 1 - self.staticsample = False - if self.andradottir: # use Andradottir (1996)'s scaling? - self.estimate() # resample and reestimate - y_k_1 = self.mu - K - self.estimate() # resample and reestimate - y_k_2 = self.mu - K - y_k = y_k_1 / max(1.0, norm(y_k_2)) + \ - y_k_2 / max(1.0, norm(y_k_1)) - else: - # Standard Robbins-Monro estimator - if not self.staticsample: - self.estimate() # resample and reestimate - try: - y_kminus1 = y_k # store this for the Deylon acceleration - except NameError: - pass # if we're on iteration k=1, ignore this - y_k = self.mu - K - norm_y_k = norm(y_k) - if self.verbose: - print "SA: after iteration " + str(k) - print " approx dual fn is: " + str(self.logZapprox \ - - np.dot(self.params, K)) - print " norm(mu_est - k) = " + str(norm_y_k) - - # Update params (after the convergence tests too ... don't waste the - # computation.) - if self.ruppertaverage: - # Use a simple average of all estimates so far, which - # Ruppert and Polyak show can converge more rapidly - newparams = self.params - a_k*y_k - avgparams = (k-1.0)/k*avgparams + 1.0/k * newparams - if self.verbose: - print " new params[0:5] are: " + str(avgparams[0:5]) - self.setparams(avgparams) - else: - # Use the standard Robbins-Monro estimator - self.setparams(self.params - a_k*y_k) - - if k >= self.maxiter: - print "Reached maximum # iterations during stochastic" \ - " approximation without convergence." - break - - - def settestsamples(self, F_list, logprob_list, testevery=1, priorlogprob_list=None): - """Requests that the model be tested every 'testevery' iterations - during fitting using the provided list F_list of feature - matrices, each representing a sample {x_j} from an auxiliary - distribution q, together with the corresponding log probabiltiy - mass or density values log {q(x_j)} in logprob_list. This is - useful as an external check on the fitting process with sample - path optimization, which could otherwise reflect the vagaries of - the single sample being used for optimization, rather than the - population as a whole. - - If self.testevery > 1, only perform the test every self.testevery - calls. - - If priorlogprob_list is not None, it should be a list of arrays - of log(p0(x_j)) values, j = 0,. ..., n - 1, specifying the prior - distribution p0 for the sample points x_j for each of the test - samples. - """ - # Sanity check - assert len(F_list) == len(logprob_list) - - self.testevery = testevery - self.externalFs = F_list - self.externallogprobs = logprob_list - self.externalpriorlogprobs = priorlogprob_list - - # Store the dual and mean square error based on the internal and - # external (test) samples. (The internal sample is used - # statically for sample path optimization; the test samples are - # used as a control for the process.) The hash keys are the - # number of function or gradient evaluations that have been made - # before now. - - # The mean entropy dual and mean square error estimates among the - # t external (test) samples, where t = len(F_list) = - # len(logprob_list). - self.external_duals = {} - self.external_gradnorms = {} - - - def test(self): - """Estimate the dual and gradient on the external samples, - keeping track of the parameters that yield the minimum such dual. - The vector of desired (target) feature expectations is stored as - self.K. - """ - if self.verbose: - print " max(params**2) = " + str((self.params**2).max()) - - if self.verbose: - print "Now testing model on external sample(s) ..." - - # Estimate the entropy dual and gradient for each sample. These - # are not regularized (smoothed). - dualapprox = [] - gradnorms = [] - for e in xrange(len(self.externalFs)): - self.external = e - self.clearcache() - if self.verbose >= 2: - print "(testing with sample %d)" % e - dualapprox.append(self.dual(ignorepenalty=True, ignoretest=True)) - gradnorms.append(norm(self.grad(ignorepenalty=True))) - - # Reset to using the normal sample matrix sampleF - self.external = None - self.clearcache() - - meandual = np.average(dualapprox,axis=0) - self.external_duals[self.iters] = dualapprox - self.external_gradnorms[self.iters] = gradnorms - - if self.verbose: - print "** Mean (unregularized) dual estimate from the %d" \ - " external samples is %f" % \ - (len(self.externalFs), meandual) - print "** Mean mean square error of the (unregularized) feature" \ - " expectation estimates from the external samples =" \ - " mean(|| \hat{\mu_e} - k ||,axis=0) =", np.average(gradnorms,axis=0) - # Track the parameter vector params with the lowest mean dual estimate - # so far: - if meandual < self.bestdual: - self.bestdual = meandual - self.bestparams = self.params - if self.verbose: - print "\n\t\t\tStored new minimum entropy dual: %f\n" % meandual - - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() diff --git a/scipy-0.10.1/scipy/maxentropy/maxentutils.py b/scipy-0.10.1/scipy/maxentropy/maxentutils.py deleted file mode 100644 index 0f5dbbec96..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/maxentutils.py +++ /dev/null @@ -1,500 +0,0 @@ -""" -Utility routines for the maximum entropy module. - -Most of them are either Python replacements for the corresponding Fortran -routines or wrappers around matrices to allow the maxent module to -manipulate ndarrays, scipy sparse matrices, and PySparse matrices a -common interface. - -Perhaps the logsumexp() function belongs under the utils/ branch where other -modules can access it more easily. - -Copyright: Ed Schofield, 2003-2006 -License: BSD-style (see LICENSE.txt in main source directory) - -""" - -# Future imports must come before any code in 2.5 -from __future__ import division - -__author__ = "Ed Schofield" -__version__ = '2.0' - -import random -import math -import cmath -import numpy -from numpy import log, exp, asarray, ndarray, empty -from scipy import sparse -from scipy.misc import logsumexp - - -def _logsumexpcomplex(values): - """A version of logsumexp that should work if the values passed are - complex-numbered, such as the output of robustarraylog(). So we - expect: - - cmath.exp(logsumexpcomplex(robustarraylog(values))) ~= sum(values,axis=0) - - except for a small rounding error in both real and imag components. - The output is complex. (To recover just the real component, use - A.real, where A is the complex return value.) - """ - if len(values) == 0: - return 0.0 - iterator = iter(values) - # Get the first element - while True: - # Loop until we have a value greater than -inf - try: - b_i = iterator.next() + 0j - except StopIteration: - # empty - return float('-inf') - if b_i.real != float('-inf'): - break - - # Now the rest - for a_i in iterator: - a_i += 0j - if b_i.real > a_i.real: - increment = robustlog(1.+cmath.exp(a_i - b_i)) - # print "Increment is " + str(increment) - b_i = b_i + increment - else: - increment = robustlog(1.+cmath.exp(b_i - a_i)) - # print "Increment is " + str(increment) - b_i = a_i + increment - - return b_i - - -def logsumexp_naive(values): - """For testing logsumexp(). Subject to numerical overflow for large - values (e.g. 720). - """ - - s = 0.0 - for x in values: - s += math.exp(x) - return math.log(s) - - -def robustlog(x): - """Returns log(x) if x > 0, the complex log cmath.log(x) if x < 0, - or float('-inf') if x == 0. - """ - if x == 0.: - return float('-inf') - elif type(x) is complex or (type(x) is float and x < 0): - return cmath.log(x) - else: - return math.log(x) - - -def _robustarraylog(x): - """ An array version of robustlog. Operates on a real array x. - """ - arraylog = empty(len(x), numpy.complex64) - for i in range(len(x)): - xi = x[i] - if xi > 0: - arraylog[i] = math.log(xi) - elif xi == 0.: - arraylog[i] = float('-inf') - else: - arraylog[i] = cmath.log(xi) - return arraylog - -#try: -# from logsumexp import logsumexp, logsumexpcomplex, robustarraylog -#except: -# print "Warning: could not load the fast FORTRAN library for logsumexp()." -# logsumexp = _logsumexp -# logsumexpcomplex = _logsumexpcomplex -# robustarraylog = _robustarraylog -# pass - - -def arrayexp(x): - """ - Returns the elementwise antilog of the real array x. - - We try to exponentiate with numpy.exp() and, if that fails, with - python's math.exp(). numpy.exp() is about 10 times faster but throws - an OverflowError exception for numerical underflow (e.g. exp(-800), - whereas python's math.exp() just returns zero, which is much more - helpful. - - """ - try: - ex = numpy.exp(x) - except OverflowError: - print "Warning: OverflowError using numpy.exp(). Using slower Python"\ - " routines instead!" - ex = numpy.empty(len(x), float) - for j in range(len(x)): - ex[j] = math.exp(x[j]) - return ex - -def arrayexpcomplex(x): - """ - Returns the elementwise antilog of the vector x. - - We try to exponentiate with numpy.exp() and, if that fails, with python's - math.exp(). numpy.exp() is about 10 times faster but throws an - OverflowError exception for numerical underflow (e.g. exp(-800), - whereas python's math.exp() just returns zero, which is much more - helpful. - - """ - try: - ex = numpy.exp(x).real - except OverflowError: - ex = numpy.empty(len(x), float) - try: - for j in range(len(x)): - ex[j] = math.exp(x[j]) - except TypeError: - # Perhaps x[j] is complex. If so, try using the complex - # exponential and returning the real part. - for j in range(len(x)): - ex[j] = cmath.exp(x[j]).real - return ex - - -def sample_wr(population, k): - """Chooses k random elements (with replacement) from a population. - (From the Python Cookbook). - """ - n = len(population) - _random, _int = random.random, int # speed hack - return [population[_int(_random() * n)] for i in xrange(k)] - - -def densefeatures(f, x): - """Returns a dense array of non-zero evaluations of the functions fi - in the list f at the point x. - """ - - return numpy.array([fi(x) for fi in f]) - -def densefeaturematrix(f, sample): - """Returns an (m x n) dense array of non-zero evaluations of the - scalar functions fi in the list f at the points x_1,...,x_n in the - list sample. - """ - - # Was: return numpy.array([[fi(x) for fi in f] for x in sample]) - - m = len(f) - n = len(sample) - - F = numpy.empty((m, n), float) - for i in xrange(m): - f_i = f[i] - for j in xrange(n): - x = sample[j] - F[i,j] = f_i(x) - - #for j in xrange(n): - # x = sample[j] - # for i in xrange(m): - # F[j,i] = f[i](x) - - return F - - -def sparsefeatures(f, x, format='csc_matrix'): - """ Returns an Mx1 sparse matrix of non-zero evaluations of the - scalar functions f_1,...,f_m in the list f at the point x. - - If format='ll_mat', the PySparse module (or a symlink to it) must be - available in the Python site-packages/ directory. A trimmed-down - version, patched for NumPy compatibility, is available in the SciPy - sandbox/pysparse directory. - """ - m = len(f) - if format == 'll_mat': - import spmatrix - sparsef = spmatrix.ll_mat(m, 1) - elif format in ('dok_matrix', 'csc_matrix', 'csr_matrix'): - sparsef = sparse.dok_matrix((m, 1)) - - for i in xrange(m): - f_i_x = f[i](x) - if f_i_x != 0: - sparsef[i, 0] = f_i_x - - if format == 'csc_matrix': - print "Converting to CSC matrix ..." - return sparsef.tocsc() - elif format == 'csr_matrix': - print "Converting to CSR matrix ..." - return sparsef.tocsr() - else: - return sparsef - -def sparsefeaturematrix(f, sample, format='csc_matrix'): - """Returns an (m x n) sparse matrix of non-zero evaluations of the scalar - or vector functions f_1,...,f_m in the list f at the points - x_1,...,x_n in the sequence 'sample'. - - If format='ll_mat', the PySparse module (or a symlink to it) must be - available in the Python site-packages/ directory. A trimmed-down - version, patched for NumPy compatibility, is available in the SciPy - sandbox/pysparse directory. - """ - - m = len(f) - n = len(sample) - if format == 'll_mat': - import spmatrix - sparseF = spmatrix.ll_mat(m, n) - elif format in ('dok_matrix', 'csc_matrix', 'csr_matrix'): - sparseF = sparse.dok_matrix((m, n)) - else: - raise ValueError("sparse matrix format not recognized") - - for i in xrange(m): - f_i = f[i] - for j in xrange(n): - x = sample[j] - f_i_x = f_i(x) - if f_i_x != 0: - sparseF[i,j] = f_i_x - - if format == 'csc_matrix': - return sparseF.tocsc() - elif format == 'csr_matrix': - return sparseF.tocsr() - else: - return sparseF - - - -def dotprod(u,v): - """ - This is a wrapper around general dense or sparse dot products. - - It is not necessary except as a common interface for supporting - ndarray, scipy spmatrix, and PySparse arrays. - - Returns the dot product of the (1 x m) sparse array u with the - (m x 1) (dense) numpy array v. - - """ - #print "Taking the dot product u.v, where" - #print "u has shape " + str(u.shape) - #print "v = " + str(v) - - try: - dotprod = numpy.array([0.0]) # a 1x1 array. Required by spmatrix. - u.matvec(v, dotprod) - return dotprod[0] # extract the scalar - except AttributeError: - # Assume u is a dense array. - return numpy.dot(u,v) - - - -def innerprod(A,v): - """ - This is a wrapper around general dense or sparse dot products. - - It is not necessary except as a common interface for supporting - ndarray, scipy spmatrix, and PySparse arrays. - - Returns the inner product of the (m x n) dense or sparse matrix A - with the n-element dense array v. This is a wrapper for A.dot(v) for - dense arrays and spmatrix objects, and for A.matvec(v, result) for - PySparse matrices. - - """ - - # We assume A is sparse. - (m, n) = A.shape - vshape = v.shape - try: - (p,) = vshape - except ValueError: - (p, q) = vshape - if n != p: - raise TypeError("matrix dimensions are incompatible") - if isinstance(v, ndarray): - try: - # See if A is sparse - A.matvec - except AttributeError: - # It looks like A is dense - return numpy.dot(A, v) - else: - # Assume A is sparse - if sparse.isspmatrix(A): - innerprod = A.matvec(v) # This returns a float32 type. Why??? - return innerprod - else: - # Assume PySparse format - innerprod = numpy.empty(m, float) - A.matvec(v, innerprod) - return innerprod - elif sparse.isspmatrix(v): - return A * v - else: - raise TypeError("unsupported types for inner product") - - -def innerprodtranspose(A,v): - """ - This is a wrapper around general dense or sparse dot products. - - It is not necessary except as a common interface for supporting - ndarray, scipy spmatrix, and PySparse arrays. - - Computes A^T V, where A is a dense or sparse matrix and V is a numpy - array. If A is sparse, V must be a rank-1 array, not a matrix. This - function is efficient for large matrices A. This is a wrapper for - u.T.dot(v) for dense arrays and spmatrix objects, and for - u.matvec_transp(v, result) for pysparse matrices. - - """ - - (m, n) = A.shape - #pdb.set_trace() - if hasattr(A, 'matvec_transp'): - # A looks like a PySparse matrix - if len(v.shape) == 1: - innerprod = numpy.empty(n, float) - A.matvec_transp(v, innerprod) - else: - raise TypeError("innerprodtranspose(A,v) requires that v be " - "a vector (rank-1 dense array) if A is sparse.") - return innerprod - elif sparse.isspmatrix(A): - return (A.conj().transpose() * v).transpose() - else: - # Assume A is dense - if isinstance(v, numpy.ndarray): - # v is also dense - if len(v.shape) == 1: - # We can't transpose a rank-1 matrix into a row vector, so - # we reshape it. - vm = v.shape[0] - vcolumn = numpy.reshape(v, (1, vm)) - x = numpy.dot(vcolumn, A) - return numpy.reshape(x, (n,)) - else: - #(vm, vn) = v.shape - # Assume vm == m - x = numpy.dot(numpy.transpose(v), A) - return numpy.transpose(x) - else: - raise TypeError("unsupported types for inner product") - - -def rowmeans(A): - """ - This is a wrapper for general dense or sparse dot products. - - It is only necessary as a common interface for supporting ndarray, - scipy spmatrix, and PySparse arrays. - - Returns a dense (m x 1) vector representing the mean of the rows of A, - which be an (m x n) sparse or dense matrix. - - >>> a = numpy.array([[1,2],[3,4]], float) - >>> rowmeans(a) - array([ 1.5, 3.5]) - - """ - if type(A) is numpy.ndarray: - return A.mean(1) - else: - # Assume it's sparse - try: - n = A.shape[1] - except AttributeError: - raise TypeError("rowmeans() only works with sparse and dense " - "arrays") - rowsum = innerprod(A, numpy.ones(n, float)) - return rowsum / float(n) - -def columnmeans(A): - """ - This is a wrapper for general dense or sparse dot products. - - It is only necessary as a common interface for supporting ndarray, - scipy spmatrix, and PySparse arrays. - - Returns a dense (1 x n) vector with the column averages of A, which can - be an (m x n) sparse or dense matrix. - - >>> a = numpy.array([[1,2],[3,4]],'d') - >>> columnmeans(a) - array([ 2., 3.]) - - """ - if type(A) is numpy.ndarray: - return A.mean(0) - else: - # Assume it's sparse - try: - m = A.shape[0] - except AttributeError: - raise TypeError("columnmeans() only works with sparse and dense " - "arrays") - columnsum = innerprodtranspose(A, numpy.ones(m, float)) - return columnsum / float(m) - -def columnvariances(A): - """ - This is a wrapper for general dense or sparse dot products. - - It is not necessary except as a common interface for supporting ndarray, - scipy spmatrix, and PySparse arrays. - - Returns a dense (1 x n) vector with unbiased estimators for the column - variances for each column of the (m x n) sparse or dense matrix A. (The - normalization is by (m - 1).) - - >>> a = numpy.array([[1,2], [3,4]], 'd') - >>> columnvariances(a) - array([ 2., 2.]) - - """ - if type(A) is numpy.ndarray: - return numpy.std(A,0)**2 - else: - try: - m = A.shape[0] - except AttributeError: - raise TypeError("columnvariances() only works with sparse " - "and dense arrays") - means = columnmeans(A) - return columnmeans((A-means)**2) * (m/(m-1.0)) - -def flatten(a): - """Flattens the sparse matrix or dense array/matrix 'a' into a - 1-dimensional array - """ - if sparse.isspmatrix(a): - return a.A.flatten() - else: - return numpy.asarray(a).flatten() - -class DivergenceError(Exception): - """Exception raised if the entropy dual has no finite minimum. - """ - def __init__(self, message): - self.message = message - Exception.__init__(self) - - def __str__(self): - return repr(self.message) - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() diff --git a/scipy-0.10.1/scipy/maxentropy/setup.py b/scipy-0.10.1/scipy/maxentropy/setup.py deleted file mode 100644 index 774f194a69..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/setup.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python - -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration -from os.path import join - -def configuration(parent_package='', top_path=None): - - config = Configuration('maxentropy', parent_package, top_path) - - config.add_data_dir('tests') - config.add_data_dir('examples') - - return config - -if __name__ == '__main__': - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/maxentropy/setupscons.py b/scipy-0.10.1/scipy/maxentropy/setupscons.py deleted file mode 100644 index 774f194a69..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/setupscons.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python - -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration -from os.path import join - -def configuration(parent_package='', top_path=None): - - config = Configuration('maxentropy', parent_package, top_path) - - config.add_data_dir('tests') - config.add_data_dir('examples') - - return config - -if __name__ == '__main__': - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/maxentropy/tests/test_maxentropy.py b/scipy-0.10.1/scipy/maxentropy/tests/test_maxentropy.py deleted file mode 100644 index 1cdf035d41..0000000000 --- a/scipy-0.10.1/scipy/maxentropy/tests/test_maxentropy.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python - -""" Test functions for maximum entropy module. - -Author: Ed Schofield, 2003-2005 -Copyright: Ed Schofield, 2003-2005 -""" - -from numpy.testing import assert_almost_equal, TestCase, run_module_suite -from numpy import arange, log, exp, ones -from scipy.maxentropy.maxentropy import logsumexp - -class TestMaxentropy(TestCase): - """Test whether logsumexp() function correctly handles large - inputs. - """ - def test_logsumexp(self): - a = arange(200) - desired = log(sum(exp(a))) - assert_almost_equal(logsumexp(a), desired) - - # Now test with large numbers - b = [1000,1000] - desired = 1000.0 + log(2.0) - assert_almost_equal(logsumexp(b), desired) - - n = 1000 - b = ones(n)*10000 - desired = 10000.0 + log(n) - assert_almost_equal(logsumexp(b), desired) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/misc/__init__.py b/scipy-0.10.1/scipy/misc/__init__.py deleted file mode 100644 index 73d5a4fa2c..0000000000 --- a/scipy-0.10.1/scipy/misc/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -========================================== -Miscellaneous routines (:mod:`scipy.misc`) -========================================== - -.. currentmodule:: scipy.misc - -Various utilities that don't have another home. - -Note that the Python Imaging Library (PIL) is not a dependency -of SciPy and therefore the `pilutil` module is not available on -systems that don't have PIL installed. - -.. autosummary:: - :toctree: generated/ - - bytescale - Byte scales an array (image) - central_diff_weights - Weights for an n-point central m-th derivative - comb - Combinations of N things taken k at a time, "N choose k" - derivative -\tFind the n-th derivative of a function at a point - factorial - The factorial function, n! = special.gamma(n+1) - factorial2 - Double factorial, (n!)! - factorialk - (...((n!)!)!...)! where there are k '!' - fromimage - Return a copy of a PIL image as a numpy array - imfilter - Simple filtering of an image - imread - Read an image file from a filename - imresize - Resize an image - imrotate - Rotate an image counter-clockwise - imsave - Save an array to an image file - imshow - Simple showing of an image through an external viewer - info - Get help information for a function, class, or module - lena - Get classic image processing example image Lena - pade - Pade approximation to function as the ratio of two polynomials - radon - - toimage - Takes a numpy array and returns a PIL image - -""" - -__all__ = ['who', 'source', 'info', 'doccer'] - -import doccer -from common import * -from numpy import who, source, info as _info - -import sys -def info(object=None,maxwidth=76,output=sys.stdout,toplevel='scipy'): - return _info(object, maxwidth, output, toplevel) -info.__doc__ = _info.__doc__ -del sys - -try: - from pilutil import * - import pilutil - __all__ += pilutil.__all__ -except ImportError: - pass - -import common -__all__ += common.__all__ - -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/misc/common.py b/scipy-0.10.1/scipy/misc/common.py deleted file mode 100644 index aea6d90252..0000000000 --- a/scipy-0.10.1/scipy/misc/common.py +++ /dev/null @@ -1,404 +0,0 @@ -""" -Functions which are common and require SciPy Base and Level 1 SciPy -(special, linalg) -""" - -from numpy import exp, log, asarray, arange, newaxis, hstack, product, array, \ - where, zeros, extract, place, pi, sqrt, eye, poly1d, dot, r_ - -__all__ = ['logsumexp', 'factorial','factorial2','factorialk','comb', - 'central_diff_weights', 'derivative', 'pade', 'lena'] - -# XXX: the factorial functions could move to scipy.special, and the others -# to numpy perhaps? - -def logsumexp(a): - """Compute the log of the sum of exponentials of input elements. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - res : ndarray - The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically - more stable way. - - See Also - -------- - numpy.logaddexp, numpy.logaddexp2 - - Notes - ----- - Numpy has a logaddexp function which is very similar to `logsumexp`. - - """ - a = asarray(a) - a_max = a.max() - return a_max + log((exp(a-a_max)).sum()) - -def factorial(n,exact=0): - """ - The factorial function, n! = special.gamma(n+1). - - If exact is 0, then floating point precision is used, otherwise - exact long integer is computed. - - - Array argument accepted only for exact=0 case. - - If n<0, the return value is 0. - - Parameters - ---------- - n : int or array_like of ints - Calculate ``n!``. Arrays are only supported with `exact` set - to False. If ``n < 0``, the return value is 0. - exact : bool, optional - The result can be approximated rapidly using the gamma-formula - above. If `exact` is set to True, calculate the - answer exactly using integer arithmetic. Default is False. - - Returns - ------- - nf : float or int - Factorial of `n`, as an integer or a float depending on `exact`. - - Examples - -------- - >>> arr = np.array([3,4,5]) - >>> sc.factorial(arr, exact=False) - array([ 6., 24., 120.]) - >>> sc.factorial(5, exact=True) - 120L - - """ - if exact: - if n < 0: - return 0L - val = 1L - for k in xrange(1,n+1): - val *= k - return val - else: - from scipy import special - n = asarray(n) - sv = special.errprint(0) - vals = special.gamma(n+1) - sv = special.errprint(sv) - return where(n>=0,vals,0) - - -def factorial2(n, exact=False): - """ - Double factorial. - - This is the factorial with every second value skipped, i.e., - ``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as:: - - n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd - = 2**(n/2) * (n/2)! n even - - Parameters - ---------- - n : int or array_like - Calculate ``n!!``. Arrays are only supported with `exact` set - to False. If ``n < 0``, the return value is 0. - exact : bool, optional - The result can be approximated rapidly using the gamma-formula - above (default). If `exact` is set to True, calculate the - answer exactly using integer arithmetic. - - Returns - ------- - nff : float or int - Double factorial of `n`, as an int or a float depending on - `exact`. - - Examples - -------- - >>> factorial2(7, exact=False) - array(105.00000000000001) - >>> factorial2(7, exact=True) - 105L - - """ - if exact: - if n < -1: - return 0L - if n <= 0: - return 1L - val = 1L - for k in xrange(n,0,-2): - val *= k - return val - else: - from scipy import special - n = asarray(n) - vals = zeros(n.shape,'d') - cond1 = (n % 2) & (n >= -1) - cond2 = (1-(n % 2)) & (n >= -1) - oddn = extract(cond1,n) - evenn = extract(cond2,n) - nd2o = oddn / 2.0 - nd2e = evenn / 2.0 - place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5)) - place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e)) - return vals - -def factorialk(n,k,exact=1): - """ - n(!!...!) = multifactorial of order k - k times - - - Parameters - ---------- - n : int, array_like - Calculate multifactorial. Arrays are only supported with exact - set to False. If n < 0, the return value is 0. - exact : bool, optional - If exact is set to True, calculate the answer exactly using - integer arithmetic. - - Returns - ------- - val : int - Multi factorial of n. - - Raises - ------ - NotImplementedError - Raises when exact is False - - Examples - -------- - >>> sc.factorialk(5, 1, exact=True) - 120L - >>> sc.factorialk(5, 3, exact=True) - 10L - - """ - if exact: - if n < 1-k: - return 0L - if n<=0: - return 1L - val = 1L - for j in xrange(n,0,-k): - val = val*j - return val - else: - raise NotImplementedError - - -def comb(N,k,exact=0): - """ - The number of combinations of N things taken k at a time. - This is often expressed as "N choose k". - - Parameters - ---------- - N : int, array - Number of things. - k : int, array - Number of elements taken. - exact : int, optional - If exact is 0, then floating point precision is used, otherwise - exact long integer is computed. - - Returns - ------- - val : int, array - The total number of combinations. - - Notes - ----- - - Array arguments accepted only for exact=0 case. - - If k > N, N < 0, or k < 0, then a 0 is returned. - - Examples - -------- - >>> k = np.array([3, 4]) - >>> n = np.array([10, 10]) - >>> sc.comb(n, k, exact=False) - array([ 120., 210.]) - >>> sc.comb(10, 3, exact=True) - 120L - - """ - if exact: - if (k > N) or (N < 0) or (k < 0): - return 0L - val = 1L - for j in xrange(min(k, N-k)): - val = (val*(N-j))//(j+1) - return val - else: - from scipy import special - k,N = asarray(k), asarray(N) - lgam = special.gammaln - cond = (k <= N) & (N >= 0) & (k >= 0) - sv = special.errprint(0) - vals = exp(lgam(N+1) - lgam(N-k+1) - lgam(k+1)) - sv = special.errprint(sv) - return where(cond, vals, 0.0) - -def central_diff_weights(Np, ndiv=1): - """ - Return weights for an Np-point central derivative of order ndiv - assuming equally-spaced function points. - - If weights are in the vector w, then - derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx) - - Notes - ----- - Can be inaccurate for large number of points. - - """ - if Np < ndiv + 1: - raise ValueError("Number of points must be at least the derivative order + 1.") - if Np % 2 == 0: - raise ValueError("The number of points must be odd.") - from scipy import linalg - ho = Np >> 1 - x = arange(-ho,ho+1.0) - x = x[:,newaxis] - X = x**0.0 - for k in range(1,Np): - X = hstack([X,x**k]) - w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv] - return w - -def derivative(func, x0, dx=1.0, n=1, args=(), order=3): - """ - Find the n-th derivative of a function at point x0. - - Given a function, use a central difference formula with spacing `dx` to - compute the n-th derivative at `x0`. - - Parameters - ---------- - func : function - Input function. - x0 : float - The point at which nth derivative is found. - dx : int, optional - Spacing. - n : int, optional - Order of the derivative. Default is 1. - args : tuple, optional - Arguments - order : int, optional - Number of points to use, must be odd. - - Notes - ----- - Decreasing the step size too small can result in round-off error. - - Examples - -------- - >>> def x2(x): - ... return x*x - ... - >>> derivative(x2, 2) - 4.0 - - """ - if order < n + 1: - raise ValueError("'order' (the number of points used to compute the derivative), " - "must be at least the derivative order 'n' + 1.") - if order % 2 == 0: - raise ValueError("'order' (the number of points used to compute the derivative) " - "must be odd.") - # pre-computed for n=1 and 2 and low-order for speed. - if n==1: - if order == 3: - weights = array([-1,0,1])/2.0 - elif order == 5: - weights = array([1,-8,0,8,-1])/12.0 - elif order == 7: - weights = array([-1,9,-45,0,45,-9,1])/60.0 - elif order == 9: - weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0 - else: - weights = central_diff_weights(order,1) - elif n==2: - if order == 3: - weights = array([1,-2.0,1]) - elif order == 5: - weights = array([-1,16,-30,16,-1])/12.0 - elif order == 7: - weights = array([2,-27,270,-490,270,-27,2])/180.0 - elif order == 9: - weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0 - else: - weights = central_diff_weights(order,2) - else: - weights = central_diff_weights(order, n) - val = 0.0 - ho = order >> 1 - for k in range(order): - val += weights[k]*func(x0+(k-ho)*dx,*args) - return val / product((dx,)*n,axis=0) - -def pade(an, m): - """Given Taylor series coefficients in an, return a Pade approximation to - the function as the ratio of two polynomials p / q where the order of q is m. - """ - from scipy import linalg - an = asarray(an) - N = len(an) - 1 - n = N - m - if n < 0: - raise ValueError("Order of q must be smaller than len(an)-1.") - Akj = eye(N+1, n+1) - Bkj = zeros((N+1, m), 'd') - for row in range(1, m+1): - Bkj[row,:row] = -(an[:row])[::-1] - for row in range(m+1, N+1): - Bkj[row,:] = -(an[row-m:row])[::-1] - C = hstack((Akj, Bkj)) - pq = linalg.solve(C, an) - p = pq[:n+1] - q = r_[1.0, pq[n+1:]] - return poly1d(p[::-1]), poly1d(q[::-1]) - -def lena(): - """ - Get classic image processing example image, Lena, at 8-bit grayscale - bit-depth, 512 x 512 size. - - Parameters - ---------- - None - - Returns - ------- - lena : ndarray - Lena image - - Examples - -------- - >>> import scipy.misc - >>> lena = scipy.misc.lena() - >>> lena.shape - (512, 512) - >>> lena.max() - 245 - >>> lena.dtype - dtype('int32') - - >>> import matplotlib.pyplot as plt - >>> plt.gray() - >>> plt.imshow(lena) - >>> plt.show() - - """ - import cPickle, os - fname = os.path.join(os.path.dirname(__file__),'lena.dat') - f = open(fname,'rb') - lena = array(cPickle.load(f)) - f.close() - return lena diff --git a/scipy-0.10.1/scipy/misc/doccer.py b/scipy-0.10.1/scipy/misc/doccer.py deleted file mode 100644 index acf6512852..0000000000 --- a/scipy-0.10.1/scipy/misc/doccer.py +++ /dev/null @@ -1,139 +0,0 @@ -''' Utilities to allow inserting docstring fragments for common -parameters into function and method docstrings''' - -import sys - -__all__ = ['docformat', 'indentcount_lines', 'filldoc', - 'unindent_dict', 'unindent_string'] - - -def docformat(docstring, docdict=None): - ''' Fill a function docstring from variables in dictionary - - Adapt the indent of the inserted docs - - Parameters - ---------- - docstring : string - docstring from function, possibly with dict formatting strings - docdict : dict - dictionary with keys that match the dict formatting strings - and values that are docstring fragments to be inserted. The - indentation of the inserted docstrings is set to match the - minimum indentation of the ``docstring`` by adding this - indentation to all lines of the inserted string, except the - first - - Returns - ------- - outstring : string - string with requested ``docdict`` strings inserted - - Examples - -------- - >>> docformat(' Test string with %(value)s', {'value':'inserted value'}) - ' Test string with inserted value' - >>> docstring = 'First line\\n Second line\\n %(value)s' - >>> inserted_string = "indented\\nstring" - >>> docdict = {'value': inserted_string} - >>> docformat(docstring, docdict) - 'First line\\n Second line\\n indented\\n string' - ''' - if not docstring: - return docstring - if docdict is None: - docdict = {} - if not docdict: - return docstring - lines = docstring.expandtabs().splitlines() - # Find the minimum indent of the main docstring, after first line - if len(lines) < 2: - icount = 0 - else: - icount = indentcount_lines(lines[1:]) - indent = ' ' * icount - # Insert this indent to dictionary docstrings - indented = {} - for name, dstr in docdict.items(): - lines = dstr.expandtabs().splitlines() - try: - newlines = [lines[0]] - for line in lines[1:]: - newlines.append(indent+line) - indented[name] = '\n'.join(newlines) - except IndexError: - indented[name] = dstr - return docstring % indented - - -def indentcount_lines(lines): - ''' Minumum indent for all lines in line list - - >>> lines = [' one', ' two', ' three'] - >>> indentcount_lines(lines) - 1 - >>> lines = [] - >>> indentcount_lines(lines) - 0 - >>> lines = [' one'] - >>> indentcount_lines(lines) - 1 - >>> indentcount_lines([' ']) - 0 - ''' - indentno = sys.maxint - for line in lines: - stripped = line.lstrip() - if stripped: - indentno = min(indentno, len(line) - len(stripped)) - if indentno == sys.maxint: - return 0 - return indentno - - -def filldoc(docdict, unindent_params=True): - ''' Return docstring decorator using docdict variable dictionary - - Parameters - ---------- - docdict : dictionary - dictionary containing name, docstring fragment pairs - unindent_params : {False, True}, boolean, optional - If True, strip common indentation from all parameters in - docdict - - Returns - ------- - decfunc : function - decorator that applies dictionary to input function docstring - - ''' - if unindent_params: - docdict = unindent_dict(docdict) - def decorate(f): - f.__doc__ = docformat(f.__doc__, docdict) - return f - return decorate - - -def unindent_dict(docdict): - ''' Unindent all strings in a docdict ''' - can_dict = {} - for name, dstr in docdict.items(): - can_dict[name] = unindent_string(dstr) - return can_dict - - -def unindent_string(docstring): - ''' Set docstring to minimum indent for all lines, including first - - >>> unindent_string(' two') - 'two' - >>> unindent_string(' two\\n three') - 'two\\n three' - ''' - lines = docstring.expandtabs().splitlines() - icount = indentcount_lines(lines) - if icount == 0: - return docstring - return '\n'.join([line[icount:] for line in lines]) diff --git a/scipy-0.10.1/scipy/misc/lena.dat b/scipy-0.10.1/scipy/misc/lena.dat deleted file mode 100644 index 29db172942..0000000000 --- a/scipy-0.10.1/scipy/misc/lena.dat +++ /dev/null @@ -1,3 +0,0 @@ -]q(]q(K¢K¢K¢K¡K¢KK£K¡K¥K¡K¢K K›K£K K›KKœK¡K¡KšKœKšKK›KK›K˜KœKšKšKœKšKžK›K KžK§K K¦K¦K¥K¦K¬K«K¯K­KªK¬K¬K§K¯K¨K§K¢K¡K”K”K›KK‚KwKvKjKaKaK^K\KXKaKfK`KgKdKhKiKiKiKhKnKnKlKkKiKhKmKmKmKlKkKkKlKnKmKkKmKmKnKnKlKhKjKjKlKmKoKuKnKqKvKyKwKpKzKyKzK}KzKyK|K|K‚KK{KzKK„KƒK„KK‡KK€KƒKƒKK‡KˆK„K†KƒKƒK€K‚K†K~KK~K…K‚KKƒK€K„KK€K‡K…KˆK†K€K†K‚K‡K…K…K†K‡K„K‡KƒKˆK†K„K…K…KˆK…K†K†K„K…KƒK„K…K‡K‰K‡K…KƒK…KƒK‡KˆK„K‡KˆK‰K…KŠKˆK†K…K‡K‡KˆK„K…KŒK„KƒK‹K†KƒKK„K„KKƒK„K„KƒKK„KƒK€K‡K…K…K‡KƒK„K„K„KƒK…K‚KƒKƒK…KˆK†K„K‡K„K‡K†KŠK„K„K…K†KKK…KŠKŠKŽK†K„K„KK€K€K„K†KKƒK†K…KƒK€KKKƒK„K~K‚K‡K†KƒKK~KƒKKƒK†KK~K„KƒK€KƒKKƒK}KK‚K}KKK„K~KKyK|K|KyKwKzKyKwKuKsKpKqKsKiKfKgKhKpKtKvKzK„K‡K‹KŒKK’K–K™K˜K™KŸKŸKŸK£KžK¢K¡K›K›K”K–K™K–KšK—K—K›K—KK›K˜KKœKšKšKŸKžK›KKšK™K—KK™K—KšK˜KšK™K˜K™K™K—K—KžKšKšK˜K˜K™KKšKžKžKKšKšKœKŸKŸKŸKKœKœK˜K›K›KžK±KÀKÆKÎKÐKÕKÔKÕKØKÚKÚKÚKÚKÜKØKÓKÌKÀK¬K˜KzKjKiKeKgKhKoKiKqKtKuKwKuKwKvKyK{KKyKwKuKxK}KyKvK{KzK}K€KyKwK{KvKxK{K{KwKzKyKyK{KzK|KKvK|K~K|K~K~KK{K|K|K}KvKvK€KzK~K~K~K‚K|K~K}KyKK|K~KKwK{KrKyKtKwKvKsKxK|KŠK¤K¨KªK«KªK›K€e]q(K¢K¢K¢K¡K¢KK£K¡K¥K¡K¢K K›K£K K›KKœK¡K¡KšKœKšKK›KK›K˜KœKšKšKœKšKžK›K KžK§K K¦K¦K¥K¦K¬K«K¯K­KªK¬K¬K§K¯K¨K§K¢K¡K”K”K›KK‚KwKvKjKaKaK^K\KXKaKfK`KgKdKhKiKiKiKhKnKnKlKkKiKhKmKmKmKlKkKkKlKnKmKkKmKmKnKnKlKhKjKjKlKmKoKuKnKqKvKyKwKpKzKyKzK}KzKyK|K|K‚KK{KzKK„KƒK„KK‡KK€KƒKƒKK‡KˆK„K†KƒKƒK€K‚K†K~KK~K…K‚KKƒK€K„KK€K‡K…KˆK†K€K†K‚K‡K…K…K†K‡K„K‡KƒKˆK†K„K…K…KˆK…K†K†K„K…KƒK„K…K‡K‰K‡K…KƒK…KƒK‡KˆK„K‡KˆK‰K…KŠKˆK†K…K‡K‡KˆK„K…KŒK„KƒK‹K†KƒKK„K„KKƒK„K„KƒKK„KƒK€K‡K…K…K‡KƒK„K„K„KƒK…K‚KƒKƒK…KˆK†K„K‡K„K‡K†KŠK„K„K…K†KKK…KŠKŠKŽK†K„K„KK€K€K„K†KKƒK†K…KƒK€KKKƒK„K~K‚K‡K†KƒKK~KƒKKƒK†KK~K„KƒK€KƒKKƒK}KK‚K}KKK„K~KKyK|K|KyKwKzKyKwKuKsKpKqKsKiKfKgKhKpKtKvKzK„K‡K‹KŒKK’K–K™K˜K™KŸKŸKŸK£KžK¢K¡K›K›K”K–K™K–KšK—K—K›K—KK›K˜KKœKšKšKŸKžK›KKšK™K—KK™K—KšK˜KšK™K˜K™K™K—K—KžKšKšK˜K˜K™KKšKžKžKKšKšKœKŸKŸKŸKKœKœK˜K›K›KžK±KÀKÆKÎKÐKÕKÔKÕKØKÚKÚKÚKÚKÜKØKÓKÌKÀK¬K˜KzKjKiKeKgKhKoKiKqKtKuKwKuKwKvKyK{KKyKwKuKxK}KyKvK{KzK}K€KyKwK{KvKxK{K{KwKzKyKyK{KzK|KKvK|K~K|K~K~KK{K|K|K}KvKvK€KzK~K~K~K‚K|K~K}KyKK|K~KKwK{KrKyKtKwKvKsKxK|KŠK¤K¨KªK«KªK›K€e]q(K¢K¢K¢K¡K¢KK£K¡K¥K¡K¢K K›K£K K›KKœK¡K¡KšKœKšKK›KK›K˜KœKšKšKœKšKžK›K KžK§K K¦K¦K¥K¦K¬K«K¯K­KªK¬K¬K§K¯K¨K§K¢K¡K”K”K›KK‚KwKvKjKaKaK^K\KXKaKfK`KgKdKhKiKiKiKhKnKnKlKkKiKhKmKmKmKlKkKkKlKnKmKkKmKmKnKnKlKhKjKjKlKmKoKuKnKqKvKyKwKpKzKyKzK}KzKyK|K|K‚KK{KzKK„KƒK„KK‡KK€KƒKƒKK‡KˆK„K†KƒKƒK€K‚K†K~KK~K…K‚KKƒK€K„KK€K‡K…KˆK†K€K†K‚K‡K…K…K†K‡K„K‡KƒKˆK†K„K…K…KˆK…K†K†K„K…KƒK„K…K‡K‰K‡K…KƒK…KƒK‡KˆK„K‡KˆK‰K…KŠKˆK†K…K‡K‡KˆK„K…KŒK„KƒK‹K†KƒKK„K„KKƒK„K„KƒKK„KƒK€K‡K…K…K‡KƒK„K„K„KƒK…K‚KƒKƒK…KˆK†K„K‡K„K‡K†KŠK„K„K…K†KKK…KŠKŠKŽK†K„K„KK€K€K„K†KKƒK†K…KƒK€KKKƒK„K~K‚K‡K†KƒKK~KƒKKƒK†KK~K„KƒK€KƒKKƒK}KK‚K}KKK„K~KKyK|K|KyKwKzKyKwKuKsKpKqKsKiKfKgKhKpKtKvKzK„K‡K‹KŒKK’K–K™K˜K™KŸKŸKŸK£KžK¢K¡K›K›K”K–K™K–KšK—K—K›K—KK›K˜KKœKšKšKŸKžK›KKšK™K—KK™K—KšK˜KšK™K˜K™K™K—K—KžKšKšK˜K˜K™KKšKžKžKKšKšKœKŸKŸKŸKKœKœK˜K›K›KžK±KÀKÆKÎKÐKÕKÔKÕKØKÚKÚKÚKÚKÜKØKÓKÌKÀK¬K˜KzKjKiKeKgKhKoKiKqKtKuKwKuKwKvKyK{KKyKwKuKxK}KyKvK{KzK}K€KyKwK{KvKxK{K{KwKzKyKyK{KzK|KKvK|K~K|K~K~KK{K|K|K}KvKvK€KzK~K~K~K‚K|K~K}KyKK|K~KKwK{KrKyKtKwKvKsKxK|KŠK¤K¨KªK«KªK›K€e]q(K¢K¢K¢K¡K¢KK£K¡K¥K¡K¢K K›K£K K›KKœK¡K¡KšKœKšKK›KK›K˜KœKšKšKœKšKžK›K KžK§K K¦K¦K¥K¦K¬K«K¯K­KªK¬K¬K§K¯K¨K§K¢K¡K”K”K›KK‚KwKvKjKaKaK^K\KXKaKfK`KgKdKhKiKiKiKhKnKnKlKkKiKhKmKmKmKlKkKkKlKnKmKkKmKmKnKnKlKhKjKjKlKmKoKuKnKqKvKyKwKpKzKyKzK}KzKyK|K|K‚KK{KzKK„KƒK„KK‡KK€KƒKƒKK‡KˆK„K†KƒKƒK€K‚K†K~KK~K…K‚KKƒK€K„KK€K‡K…KˆK†K€K†K‚K‡K…K…K†K‡K„K‡KƒKˆK†K„K…K…KˆK…K†K†K„K…KƒK„K…K‡K‰K‡K…KƒK…KƒK‡KˆK„K‡KˆK‰K…KŠKˆK†K…K‡K‡KˆK„K…KŒK„KƒK‹K†KƒKK„K„KKƒK„K„KƒKK„KƒK€K‡K…K…K‡KƒK„K„K„KƒK…K‚KƒKƒK…KˆK†K„K‡K„K‡K†KŠK„K„K…K†KKK…KŠKŠKŽK†K„K„KK€K€K„K†KKƒK†K…KƒK€KKKƒK„K~K‚K‡K†KƒKK~KƒKKƒK†KK~K„KƒK€KƒKKƒK}KK‚K}KKK„K~KKyK|K|KyKwKzKyKwKuKsKpKqKsKiKfKgKhKpKtKvKzK„K‡K‹KŒKK’K–K™K˜K™KŸKŸKŸK£KžK¢K¡K›K›K”K–K™K–KšK—K—K›K—KK›K˜KKœKšKšKŸKžK›KKšK™K—KK™K—KšK˜KšK™K˜K™K™K—K—KžKšKšK˜K˜K™KKšKžKžKKšKšKœKŸKŸKŸKKœKœK˜K›K›KžK±KÀKÆKÎKÐKÕKÔKÕKØKÚKÚKÚKÚKÜKØKÓKÌKÀK¬K˜KzKjKiKeKgKhKoKiKqKtKuKwKuKwKvKyK{KKyKwKuKxK}KyKvK{KzK}K€KyKwK{KvKxK{K{KwKzKyKyK{KzK|KKvK|K~K|K~K~KK{K|K|K}KvKvK€KzK~K~K~K‚K|K~K}KyKK|K~KKwK{KrKyKtKwKvKsKxK|KŠK¤K¨KªK«KªK›K€e]q(K¢K¢K¢K¡K¢KK£K¡K¥K¡K¢K K›K£K K›KKœK¡K¡KšKœKšKK›KK›K˜KœKšKšKœKšKžK›K KžK§K K¦K¦K¥K¦K¬K«K¯K­KªK¬K¬K§K¯K¨K§K¢K¡K”K”K›KK‚KwKvKjKaKaK^K\KXKaKfK`KgKdKhKiKiKiKhKnKnKlKkKiKhKmKmKmKlKkKkKlKnKmKkKmKmKnKnKlKhKjKjKlKmKoKuKnKqKvKyKwKpKzKyKzK}KzKyK|K|K‚KK{KzKK„KƒK„KK‡KK€KƒKƒKK‡KˆK„K†KƒKƒK€K‚K†K~KK~K…K‚KKƒK€K„KK€K‡K…KˆK†K€K†K‚K‡K…K…K†K‡K„K‡KƒKˆK†K„K…K…KˆK…K†K†K„K…KƒK„K…K‡K‰K‡K…KƒK…KƒK‡KˆK„K‡KˆK‰K…KŠKˆK†K…K‡K‡KˆK„K…KŒK„KƒK‹K†KƒKK„K„KKƒK„K„KƒKK„KƒK€K‡K…K…K‡KƒK„K„K„KƒK…K‚KƒKƒK…KˆK†K„K‡K„K‡K†KŠK„K„K…K†KKK…KŠKŠKŽK†K„K„KK€K€K„K†KKƒK†K…KƒK€KKKƒK„K~K‚K‡K†KƒKK~KƒKKƒK†KK~K„KƒK€KƒKKƒK}KK‚K}KKK„K~KKyK|K|KyKwKzKyKwKuKsKpKqKsKiKfKgKhKpKtKvKzK„K‡K‹KŒKK’K–K™K˜K™KŸKŸKŸK£KžK¢K¡K›K›K”K–K™K–KšK—K—K›K—KK›K˜KKœKšKšKŸKžK›KKšK™K—KK™K—KšK˜KšK™K˜K™K™K—K—KžKšKšK˜K˜K™KKšKžKžKKšKšKœKŸKŸKŸKKœKœK˜K›K›KžK±KÀKÆKÎKÐKÕKÔKÕKØKÚKÚKÚKÚKÜKØKÓKÌKÀK¬K˜KzKjKiKeKgKhKoKiKqKtKuKwKuKwKvKyK{KKyKwKuKxK}KyKvK{KzK}K€KyKwK{KvKxK{K{KwKzKyKyK{KzK|KKvK|K~K|K~K~KK{K|K|K}KvKvK€KzK~K~K~K‚K|K~K}KyKK|K~KKwK{KrKyKtKwKvKsKxK|KŠK¤K¨KªK«KªK›K€e]q(K¤K¤KžK›K¡KŸKŸK K K K›KŸKšKšKœKšKœKœK™KK˜K™K™K–KKœKšK›KœK˜K›KKK™K˜K—KKŸK¡K¥K¤K¨KªKªK«K­K°K«KªK°K«K«K¨K§KžKžK–K”KKK{KwKoKnKfK`K\K_KYK_K`KaKeKeKbKdKlKhKjKjKiKkKlKiKdKjKhKlKkKeKjKhKjKlKiKfKlKhKoKgKiKkKoKhKlKoKmKpKwKsKvKxKxKzKwKyK|KzKzK€K}KzK~KK|K|K}K|K‚K…KƒKK…K‚KKKƒK…K€K€K‡KƒKƒK„K„K}K€K‚K‚K…KƒK‚K€K‚KƒK‚K‰K‡K‡K…K‚K‡K†K‰K‡KŠKŠKˆK†K†K…KˆK†KˆKˆK„K†K†KŠK†KƒK„KK…KˆKK„K„KƒKƒK…K†K†K…K…K…K…K†K„K‚K†K†KˆK„K„K…K„KˆKƒK†K‡K‰K€K„K€KKKK€K„KƒKƒKK„K„KK~KƒK„KŠKK{KK‚K€K‚K‚K€K€KˆK‚K…KƒK‡K„KƒK†KKƒK†KK‚K‚K„K†K‚K„KˆK†K†K„K‚K„K}K…K€KƒKK~KK‚K…K€K€KƒK‚K€KK†K„K|KzK|KK…KKƒK‚KK~KƒKƒKK…KKK~KƒKKKKK~K}K{KxKzK{K|K|KwKtKsKtKvKmKoKiKjKhKeKnKmKxKzK€K€K‹K‰KK‘K–K—K•K™K›KK¡KŸK¤KŸKKŸK™K˜K˜K™KšK˜K—KšK—K’K™K›K”KšKœK—K™K™K–KœKK›KšKšK˜K˜KšK—K—K˜K˜K™K›K—K›K–K›KžKŸK›K˜KšK›K˜KšKœKžK›KœKŸK™K›KœKKœKšK˜K–K˜KK©KºKÂKÈKÐKÒKÕKÖK×KØKÚKÚKÙKÚKÛK×KÑKÈKºK£KŒKoKeKaKhKfKiKpKoKoKxKtKwKuKvKxKyKyK}KzKzKuKzKzK|KzK|K{KzKyK|KvKwKtK|KxKuKzKxK{KK~K{K}K|KKzKxK}K€K|K{KK|K{KvKvKKzKyK|K€KKK|K~K~KK}K|K}KyK{KwKsKrK{KzKzK~K~K‰KK•K’K‹K|KgKMe]q(K K K£KžK K¢KŸKœKŸK¢KœK¢K›K›KœK˜K™K KšKšKžK–K›K˜K›K›K™K›K—K•K™K KžK›K›KšKŸKŸK¤K¦K¨K§K©K«K«K«K«K®K¬K­K©K§K§K¦KŸKœK”K“K‹KƒKzK~KkKkKaKbKYK^K[KcK\KcKaK`K`KgKhKmKoKhKfKcKjKgKjKeKjKfKcKgKhKlKlKlKeKkKhKjKkKhKfKfKsKgKkKkKnKpKxKxKsKwKuKxKxK{K€KyK|K{K|K}KxK~K|KzKK„K…KKK‚K†K‡K}KK~K†K€K„K„KKK{K„K~K…K…K‚K‚KƒKK}K†KƒK€K‚K‚K‚K‚K†KƒKˆK†K‡K…KˆK…K„K†K†K„K†K†KŠKƒK…K‡KƒKƒK†KƒK†KK…KKK‚KƒKƒK…KK‡K†K‡K†K…KƒK…KƒKˆK†K„KƒK…K€K…K…KˆK†KƒK‰K…KŠK‚K}K…KƒKK~KƒK†K€K€KƒK‚K…K„KˆKŠKK€KƒKƒKK~K‚K„KƒKƒK„K†K‚K‡K„K†K…KK„K†KƒKK„KˆKƒKKƒK‡K†K„K…K‚KK~K‚K}K€K|K~KK„KƒKK{K„KK€K€K~KKK€KKK€KƒK…K}KƒK€K‚K€K~KK~KKKKKK€K{K}K{K{KzK|KzK{K~KwKtKtKpKoKoKnKgKgKhKfKfKrKvKzK~K}K„K‡KŒK‘K–KK–KšKKžKKžKŸK£KK›K›KšK—K™K™K˜K™K™K˜K—K—KK˜KžK•KšK˜KžKšK›KšKšK™K™K—K™K™K—K™K—KšKœKKœKœK›K™KšK™K˜K—KšK˜KšKžK›KšKœK›KœK˜KœKœKšKœK›KšK—K–K˜K¡K®KÁKÇKËKÐKÕKÕK×KØKÙKÚKÜKÜKÛKÙKÔKÍKÃK²K›KKlKiKbKfKgKmKiKqKuKtKsKwK|KvKsKxK{KyKvKyKzKKzKzK|K}K~KxKxKvKyK|KyK|KyK~KvKzKK}K|K{KKKxK}KzK|KzKwKK}KzK}KzKzK{K~K{K}KKyKzK{K}K}K~K}K~K~K|KwKxKvKK~K~K‚KKK€KzKpK\KOK@K:e]q (KŸKŸK›KKžKŸKœKKŸK¡K¡KžK™K™K™K—K›KšKœK™K›K™K˜K˜KšK›KšK˜K˜K˜K™KšKœKK˜K™KKžK¤K¢K¦K©K©K©K¯K­K­K«K«K¯KªK¬K¦K¤K£KšK–K—KˆK…K|KyKrKnK^K^KZKSKVKVKXK]K\KdKaKcKiKnKjKmKjKhKgKeKcKiKjKlKfKhKgKcKjKgKiKkKgKgKkKjKjKiKkKiKmKoKpKoKqKsKsKwKyKzK}K}K~K€KK|K€K}K{KzKKxK{KKK„KK€KKKKƒKKƒK‚KƒK~K„K†KK„K†K…K†KƒK†KƒKƒKƒK‚KƒK‚K†K…K€KƒKKƒKƒK„K„K†K†K„K‡K„KK‚K…KˆK†KƒKƒK„K‚KƒK„K†K„K„KKƒK†K„K‚K‚K„K†K‚K‡KK†KƒKƒK„K†K„K‚KKƒKK†K…KK‰K‰KKKŒKˆK„K‚K‰KƒKƒKƒK‚K‚KK€K€KKˆK‰K‚K„KƒKKK„K~K}K€K„K…KK†K†K„K…K…K…K…KKƒK…K„K}K‚K‡K…K„K…KˆKƒK„KƒK…K‚K‚K€K~KKKK€K†K‚KKK€KK}K‚K‚KK~K€K}K…K€K‚K„K€KKƒK‚K‚K‚KƒK}K‚K€K‚K}K€K~K}K‚K|K~KzKxKuKvKyKxKtKwKqKqKtKoKjKjKmKcKeKqKtKzK{K~KˆK‰KKK‘K”KœK˜KšK›KKžK£K KŸK K›KžKœK•K˜K•K—K›K™K“K™KšK˜KšK˜K™K—K›K˜K˜K KœK™K›KšKœKšK™KšK™K–K™K˜K›K›K˜KšK›K—K—K—K˜K—K›K—K›KšK˜KšK KKKKœK™KœK™K›K—K•K–K¥K´KÂKÉKÍKÐKÔK×K×K×KÙKÙKÚKÚKÚK×KÑKÉK»K©KŒKzKfKaKgKhKoKlKrKqKuKrKvKwKrKtKwKuKwKxKyKtK{KwKvKyK|K{K~KyK~K|K{K|KwK{KzK|KK|K|K}KK}KzK€K}KxK|K{KwK~K{KyK}KyKyKwKzK~K~K€KK€KyKKK‚K€K€K}K}K{K|KxK€KK~KK|KrK_KSKFKCK2K7K/e]q -(K›K›KžKžKŸK KKK£KKžKŸKŸKšKœKK›K›K¢KžKK™KœK˜KžK›K—KšK˜K—KKšK›KšK™KœK¡K K¨K¥KªK¨K¯K¨K¬K©KªK®K­K§K¨KªK¥K¢K K—K“K”KŠK‰K|KxKnKrKaK^KZKWKXK\K]K_K^KfKcKhKiKgKhKfKiKnKkKgKhKeKlKlKgKfKkKiKkKgKlKgKfKmKsKgKjKkKgKrKnKqKnKpKnKrKwKuKwKxKyK|KwKxKKzK~K~K‚K|K}K}KK€KzK~K~KKK}K…K…K„KK…KƒK‚KƒKKK„K€KK„K‚K‚K„KK‚K€K„KK‚K†KK‹K€K…K„K‚K…K…K…K†K„K…K…K‰K†K‚K…KƒK„KƒK‡K…KˆK‚K…K…K‚KKKƒK‡K€K…KƒK„KƒK†K…K„K„K…K‡K…KƒK„K‚K‚KˆKK„K„K{K…KƒK†KƒKˆK†KƒKK}KK|KK‚KK‚K†KˆK…K‹K„K†K„KKKK‚K„KƒK„KK„K†K‚K…K‚K‚K…KƒKK‚K„K‚K†K†K†KKƒK„KˆKƒK…K„KK€KƒKK{KƒK‚KKƒK‚K}K|K„K|KK~KƒK€K|K~KK|K‚K~KKKK‚KƒKK€K~K€K‚K€KƒKK}K~K|K€KxK~K€KzK~KzKwKzKtKrKrKrKmKoKqKoKjKfKgKkKjKsKuKyKƒK†K‡KŒK“KK™K•K˜KšKœKœKžK¢KŸKžKœK›KœK›K™K™K™K—K˜K™K›KœKK™KœKšKšKšKK›KK›KKžKŸK™KKKšKœKœK˜K™KšKœK™K›K˜KšKœK›KšKœKœK™KšKœK›K™K˜K›KœK™K›K™KœK˜K—KšK˜K•KœK©KºKÃKËKÎKÔKÓKÔKØKØKÚKØKÙKÛKØKÕKÌKÄK³KžK‚KoKgKfKmKlKjKpKoKsKqKtKvK{KwKvK}KwKzKuKyK}KvKwKzKwKwKvK|K|K|KwKuKxKzKxK|K}K|K~K}K{KvK{K~K{KyK|K~K{K|KxK|K}K{K~K|K|K}K€KK}K€K{K€KK~K€KKK}K€K‚K}KK€KK{KlK_KIK@K1K2K.K/K1e]q (K›K›KKžK›KšK›KK¡KšK›KKKKK˜KœK›K™K™KœKK›K™K˜KšK™KœKšK—K›KK˜K–K›K£K£K¢K¤K¦K©K§KªK¨KªK­K©K¬K¬K¬K©K¨KªK K›KšK–KŒKŒK…KyKyKqKjKaK^K[K\KZK[KYK[KaK]KeKfKbKcKeKfKlKeKiKgKcKhKiKpKiKgKkKkKiKhKgKfKcKkKgKcKdKnKiKmKmKmKiKrKrKtKtKrKuKtK}KwKvK|KKxK}K~K}KyK~K|K~K€K~K‚K€KK}K€KƒKKƒK€KK€KK€KˆK‚KƒKƒK€KKƒK€KK‚K„K„KK€K‚K€KƒK…K‚K„K„KK…KƒK†K‡KˆK†KˆK†K‰KŠK…K„K…K€K…K‡KŒKK„K…K…KƒKˆKŠK…K‰K„K„KŒK…KƒK‚K€K|K…K…KK‚KƒK‡K„KK}KKƒK€K†KƒK‚K}KƒK‚K‚K€KƒK}K€KK€KKK†K†KKK‚KƒK„K~K{K|K€K€K~KƒKƒK‚K€KKƒK„KƒK„K‚K‚K„K€KƒK€K€KKƒK‚K…KˆK‚KK„K…K€K‚K„KK…K€K~K‚KKK}KƒKKKKK}K}K~KK|K|K€KzK|KK}K‚KKK{K~K€KK‚KwK{K}K‚K}K}K|KK|K}KtKwKxKxKtKsKqKpKoKqKgKoKiKgKfKlKoKuKvK|K‚K‡K‹K‹KK•K–K•K›K™KžKK¡K¢K KKK™KžK›KšKœKšK˜KšK¡KœKŸKK›KœK›KŸK›K™K˜K™K™KšKKœKKœKšK™K˜KšK˜KšK—KKœK–K—K™KšKœKšKKœK—KžK™K›K™KK™K˜KžK”K™KšK™KšK›K•K–K¡K³K¾KÇKÍKÒKÓKÕK×KØKÙKØKØKÙKÚK×KÔKÌK½KªK”KzKnKiKhKhKlKnKqKpKsKuKuKuKvKyKzKuKyKwKyKzKwK|KwKwKK~KxKzKxKuKxKzKyK}KzK{K|K~KzK{K{K}KK{KuKzKK}KzK~K~KKzKzK|K~KK}K|K}KK~KK~K}K~K}KƒKƒK|K‚KK~K}KwKiKaKRKK/K1K0K*K(K0K,K2K1K0K-K*K1K5e]q(KKK›KKŸK™K–KšKœKŸKžKKšKK›KœK›K™K™KKK™KœKœK›K—KšK™K˜K K£KŸK¡K¥K¦K©K¨K¦K¥K§K¦K§K¦K¦K£K¨K¤K¥K¦K£K K¡KžKžK˜K“K‘KŠK‡KKzKpKlKhK_KXK\KZKTKXK^K_KWK`KhKdKoKeKdKeKmKhKeKhKiKgKkKnKlKiKkKjKcKjKnKiKeKgKfKdKfKhKgKfKlKoKpKoKqKtKqKyKuKuKuKxKwK}KyK~K}K{K}KyKyKƒK~K}KK€KKK~K€K„K…KK‚KK„KƒK„KƒK€KK‚K„K„K„KK„K„K…K‡K…K~K‚KK‚KƒK„KƒK‚KˆKˆKK…K€K…K…K„K‡K„K‡K„K…K‘K‰K†K…K…K†K~KƒK†K€K„K†K‡KŠK…K„K†K„K‚KƒKKŽKƒK‡K†KƒK‚KƒK…K‡K‡K†K„K‡K‡K‰K‡K‡K…K‚KŒKKK†K…K{K„K}KK‚K„KK‚KˆKƒKKƒK‚K~K‚K„K‚K‚K„KƒKŠKŠK…K‡K‡K‰KˆK…K„K‚K„KƒK…KƒK€K‚KˆK€K„KƒKƒKƒK„K…K…KƒKK€KKƒK€K{K‡KƒK~K€KKKK€K~K}K~KKK}KK„KƒK…K€KKƒKK‚KƒK‚KKK~KˆK€K„K€K|K~KK}K~K{KtKqKrKqKqKrKuKxKoKoKmKtKiKlKiKiKfKpKrKyKKŒKK—KK”K“K–K˜KK™KšK¢K K¢K£K¡K¡K¤K£K¡KŸK¡KŸKŸKŸK£K£K¤K¢K£K K¢K¡K¢KŸK¡KœK¢K¡KžK¡K¡KœKžKžKœKžK›K˜K›K›K›K–K˜KœK™KžKK•K˜K›K•K—K•K™K›K—K–K—K—K–K˜K–K”K”K•K“K˜KªK·KÂKÌKÏKÓK×KÙKØKØKÙKÜKÚKÜKÛK×KÑKÊK»K«KŽKwKdKgKpKkKhKjKnKqKtKuKtKtKwKyKyKwKxKvK{KwK{KyK}KwKyKzK}K~K{KyKKzKyKzK~K|K~K~KKK{KzK€KK~K~K}KKK€KK‚K}K„KƒK„K‡K‰KƒK‚K|KqK`KPK=K/K-K2K+K-K.K(K1K3K/K0K,K(K-K5e]q(KKKŸK›KžKKšKšKKKšKšKKŸK¡KKŸKžKŸK K›K™KšKœKšK›KšKžK K K¡K¢K¦K¦K§K§K¨K¦K¥K©K§KªK¦K¥K§K§K¢K¡K©K¤K£K¡K K›K™K•KŽKK†KƒK}KtKmKdK^KdK^KWK^KXK]KaK_KbKgKiKbKdKeKgKeKjKdKhKnKiKgKjKgKgKmKhKgKiKgKiKbKbKhKeKeKhKmKnKkKmKrKpKrKxKrKyKvKvKvKvKxKzK}KyKzK|K€K|KxK|KK€KKKƒK‚KK€KƒK~KK~KƒKƒK…K‡K‚K„K}KˆKK‚K„K„K„K„K„K…KK‚KK…KK…KƒK„K‚K„K‡K‚K‚KK€K‡K‚K‡K„KˆK„KƒK†K‚KŠK‰K†KƒK‚K„KKƒKˆK‚K†K‡KƒK…K‚KˆK€KKƒK‚K‰K‹KƒK…KƒK†K‚KŠKƒK‚K„KKƒK€K‡K…K‚K…KŽKƒKKK‚KK‚K~K€K€K‡KƒKˆK„KKKKK~K€K}K…K‚K†K„K‰KˆKˆKˆKˆK‡KˆK†K†K„KƒK…K‡K‚KƒK„K‚K‚K‚K…K…K‡K‚K…KƒKK‚KKƒK~K{K€KK‚K‡K„K€KKƒK‚K{K~K€K~K}K}KKKƒK€KƒK‚K‚K~K„K„KK{K|KK€K‚KK€K}KK}K}K|K{KyKtKtKuKpKqKvKtKmKpKlKqKhKeKjKfKgKnKsKxKK€K‰K‹KŽKK–KšK›KšKžK KŸKŸK¡K¦K¡K K¡K K¤K¢K KŸK¡KŸK¦K£K¥K¢K¥K¡KžK¤K¢K£K¥KŸKžKŸKœKœKžK K K¡K¡KšKKK›KšKšK›K—KžKšK™KKšK˜K™K™KšK–K—K˜K˜K—KšK˜K—K–K–K”K”K“K’K’K™K¬K»KÇKÌKÑKÔK×KÚKÙKÚKÙKÜKÛKÚKÛKÔKÍKÆK±KœK‡KiKfKgKhKmKlKkKoKpKuKoKzKrKxKyKtKwKxKyK}K}KKxK}KK|K‚K~KzKwK{K{KzK{K†KyK|K|KyK|KKwK}K|K‚K~K~K€KƒK€K‡KK‚K„K‡K†KŠKŠKK{KnK]KSK9K2K+K(K.K4K.K.K+K2K.K2K.K0K1K.K2e]q(KšKšKšK›KK KKœKKKŸK›K¡K™KŸKŸK KŸK KœKžK›KœKŸKœK™KšKšKK›KŸK£K¡K¥K¦K¥K¥K¢K¦K©K£K¨K¥K¦KªK£K¤K K¦K¦K£K¡KšKšKœK•K”KKKKˆK~KKqK_KcKXKVKWKYK_K_K_KbK`KbKkKcKdKcKiKlKnKkKiKrKlKkKjKhKhKiKhKmKgKjKmKfKnKiKfKeKjKkKoKrKuKnKuKsKsKsK{K{K}KzKxKyKxKvKK}K~KzKzK{KKKK‚K}K€KK‚KƒKƒKK‚K€KKƒKƒK„KƒK‚K…K‚K…K‰K…KƒK…K…K‡KƒK‚K€KƒK…K…K‚K€K€K…K‚K„K€K†KƒK€K‚K†K„K‡K†K…K„K„K€K„KK…K†KK‡K€K„K„KKK‚KKƒKƒK€K‚K‚K„K‹K‡K†K†K‚K„K„K‚K…K†KƒKƒKƒKK‚K€K„KˆK‰KƒK‚K„K†KˆK„K„K‚K‚K‡K€K‡KK„KŒK€K~KƒKKK…K‚K‚KƒK‡K†KƒK…KŠKŒK‡K…K†K‰KƒKƒK…K‚K†K†K„KƒK‡KKK‚KK‘KK€K~KKKK€KKƒKKƒKƒK‚K‚KˆKƒK~K„K}KƒKK…KKK}K„KK†K„K„KK„K}K~K}K‚KƒKK€K~KyKƒK{KzK~KzKvKxKvKxKsKqKtKrKlKoKmKqKkKlKeKeKhKmKqKsKzK‚KˆKK‹K’K˜K•KšK™KžKžKžK¢K¡K£K£K¢K¢K¡K¥K¡K¢KœK¢K¢K¡K¡K K¦K¦K£K¢K£K¢K£K¡KŸK›KžKŸKŸKK KKK KŸKœK›KKšKšKšKœK›K›K™K™K›K–K—K—K—K™K—K—KœK–K•K—K˜K›K™K—K•K“K—K’K—KŸK´KÀKÉKÏKÓKÖK×KÚKÚKÙKÚKÛKÛKÛK×KÓKËK¿K­K’KuKgKgKfKhKjKjKmKqKrKrKuKpKrKwKuKxKyK~KxK}KwKyKyK{K|KxK{KzK|K{KzKxK~K{K{KyK|K|K‚K}K}KK‚K‚K~K}KKƒKK‡K†K‡KˆK†KˆK‡KƒK~KmKbKLKAK0K&K-K2K2K-K2K/K1KK/K3K4K+K0K1K1K8K4K6e]q(KœKœKK™K˜KœKœKKœK¢KœK KžKKžKK KK›KœKžKšKŸKKšKœK›K›K KK¢K¦K©K§KªK¦K¦KªKŸK¤K¥K£K¤K£K£K£K K£K¡K K¥K¤KK›K›K—K˜KK‹K…K…K~KtKeKcK`KVKYKWKXK^K_KaKcKfKbKcKkKkKhKjKgKeKfKgKhKkKgKeKgKhKgKkKnKmKgKcKeKkKiKhKjKkKgKjKjKlKlKoKqKqKsKqKtK}KxKuK|KyKvKyK~KxK}K~KKK|KK‚KK†K€K€K†KK‚KK€KKƒK„KKK„K†K‚K‡K…KƒKK€K„K€KKK~K…KK€K€KƒKƒKƒK„KˆKK‚K€K‚KƒK…K„K…K„K‚K…KƒK€KK€K„K€KKK…K}KK‰K€K|KKƒKKƒK‡K€K~KƒKƒK„K‚KK~K€K€K„K€KƒK|K}KKKK€K„KKK~K…KK|K‚KƒK‚K„KˆK}K€KKƒK…KK€KKK€KK„K…K‡K‹KƒKK„KƒKƒK„K„K†KKƒK…KƒK‚K~KK~KKƒK‚K€K‚K„KK‚K„K…KƒKƒK~KK‚K€K„KƒK{K~K~KK}KKK|KK}KK€K|KK~KƒK~K|KKK€K{K|K~K‡K€KKK~K~K{KK|KxK}KwKxKxKvK{KrKsKqKqKlKlKpKpKiKkKmKiKmKpKpKrKyKK„K‹K’K“K•K“K˜KKžKœKK¤K¢K¢K¡K£K¥K¡KŸK¡K£K K K¢KŸK KžK K¤K¢KŸK KŸK K¢K¡KŸK¡KK›K›KœK™KœKœKžKžK™KK™KšK˜KšKšK—K—K›K˜K–K•K™K˜K™K—K˜K–K–K–K•K”K˜K—K•K™K˜KKK“K“KŸK¬K¿KÇKÌKÐKÔKØKØKÙKÛKÙKÜKÛKÝKÛKÕKÎKÂK³K™K~KqKlKeKlKsKlKmKoKqKtKrKtKxKrKuKyKsKxKvKzKwKyK{KxKyKyK{KtK{KyK{KxKyK|K|K~KKKKKK€K„KƒK…K‡K…K…KŠKˆKŒK}KqKaKGK8K3K:K:K.K6K?K3K2K0K6K3K/K/K/K0K0K0K/K/K0K=e]q(KœKœKK˜K˜KKŸK¡KŸK K›KŸKžKžKžKŸKŸK¢K›KœKžKœK KžKšKKŸKK¢K£K¤K§K¦K¥K¥K©K©K¦K£K¦K¤K£K¥K K¥K¦K›KžKŸKžK K¡KŸKœKšK›K–KKŠKK€KxKqKjKcK]KYKYKYKVK\K^KaK]K^KcKfKeKfKkKnKfKjKgKiKhKgKcKeKoKjKkKgKdKgKiKjKcKeKhKhKkKkKoKkKjKmKtKlKoKpKsKxKxKwK}KyK{K|KxKyK{KuK|KKƒK}KK‚K€KƒK€K}KxKƒK|KKK‹KK~K‚KKƒK‚K~K~K‚K‚KƒK‚KƒKK~KKK‚KK‚KƒKƒK€KƒK}K…K‚K€K„KK€K…K„KˆK†K…KKK‚KK†K„K‚K„K‚K‚KƒKK‚KˆK„K†KƒK‚K‚K…K†KK}K„K„KKK„K€KK€K€KKK~KƒK„K~KƒK|K|KK~KK~K{K‚KK€K€K}K€K€KKK‚KK€K~K‚K~K}KKKK…K…K‚K€K…KƒK†K‚K‚K‰K‡K‚K…K…K€K…K…K€KK„K‡K‚K‡K€KKƒKƒK~K~KKKKK€K„K‚KK€K€K{K~K€KK~KzK}K}K€K}KK‚K}KK|K€K|K{K€K}K|KK€K€K€K|K|KyKKKxKyK{KyKxKvKtKuKnKoKnKoKlKkKnKjKjKkKlKoKvKuKuK}K…K…K‡KŽK•K–K—K˜K›KžKŸKžK¤K¡K¥K¥K£K¤K K¡K£K§K£K¡KK›K¡K K¡KŸK¢K¡K KžK¢KžK¡K K¢K KœKœK™KKœKŸK›KŸK›K™KKžK›KœK™K–K™K”KœK›K˜K–K˜K–K˜K—K˜KšK˜K–K—K–K—K•K•K–K‘K’KKK–K§K´KÅKÉKÏKÔKÕKÙKÚKÚKÜKÛKÛKÜKÜKØKÓKËKÀK©K–KtKfKjKhKnKlKpKqKpKvKvK{K|KvKzK}KyKtKwKzKvKzKKxKyKyKxKzKwKvKxKwK{KyKwKKK~K}K~K‚KK„KKKŠK‡KŒK…K„K€KqK^KJK;KKK?K/K0K+K-K0K,K2K(K,K4K,K-K,K6K1K4K7K7K6K6e]q(K›K›K™KšK—KžKKšKœK£K¢KK¡KžKœKŸKŸKžKœKKKKœKžKžKKŸK¢K K¤K§K¦K¨K¨K¥K§K¨K¤K§K¥K¡K£K£K£K£KKKK¢K¢K¢KŸK£KK—K—K’KKŒK†K}KxKrKgK`K_KXKUKRKXK[KeK_K]KaKaK`KeKiKkKiKnKgKmKjKjKhKdKhKjKmKfKhKgKhKeKcKhKjKbKhKhKnKkKpKqKoKnKqKpKsKqKxKwK{KyKyK{KxKvKzK}KxK~K~K}KKK€K}K‚KK€K|KK{K…KƒKK‚K‚K„KK„KKƒK‚KKK‚K„K„K{KK†K€K‚K€K~K€K‚KKKƒKƒKK‚KK~K€K„K…KˆK…KK…K…K~KƒKK€K€KK€K…K~KKK€K‚K‚KK“K„K†KƒK†KˆK‚KK€K€KK~K‚KK}KK}K}KyK}K{KK~K~K~KKK|K}K~KKK}K€KKzK€KK|K~K~K‚K‚K~KzKƒK|K~KK€K~KƒKK€K†K„KK„K€K†K‚KK‚K„KK‚KƒK…K‚KƒK‚K†K}K€KK€K~K‚K~K{KK|K€K€K€K€K‚KzKKK}K{K€K}K€KKK€K‚KKK}K~K|K}K|K}K~K€K…KKK}K~K{K~KK}K{KyK€K…KzKuKtKpKoKkKlKnKmKjKiKiKhKnKpKpKxKyK€K~KˆKKK‘K”K›KšKšKžKŸK¡K£K¡K¢K¢K£K¡K¢K¢K¨K¦K£K¡KžK K¡K K KŸK¡KŸKžKŸK¢KŸKŸKŸK KžKK˜KœK—K›KžK›K˜K›K›KK™KšKœKœK™KšKšK˜K˜K™K›K˜K—K—K˜K—K˜K˜K–K˜K–K”K—K“K•K•K“K“K‘K“KK­K½KÆKËKÑKÕKÕKÙKÙKÜKÛKÝKÝKÜKÛKÖKÐKÇKºK¥KƒKjKeKdKjKjKnKkKrKwKrKvKyKzKwKzKzKvK~K|K{K}K|KyK|K}KzKzKxKyKzKtKvK}K|KK~KzKzK}K„K„K‚K€K…K‡KŠKŒK†K‚KoK^KHK8K>KJKJK1K0K(K/K+K.K0K(K'K.K.K,K*K/K1K3K4K7K6K:e]q(K™K™K™KœK™KžKœKKžKKœKŸK KžKŸKœKžKœKœK£KžKœK›KœK›K K¡K§K K£K¥K¦K¦K¨K¦K¨K©K¦K¦K¤K¡K¢K KŸK£KŸKŸKŸK K¡K¡K K¢KœK–KšK•KKŒK…KzKtKmKcK\KVKWKVKNKSKTKaK]K_KaKeKdKhKgKeKiKdKlKeKkKgKhKgKiKjKeKhKfKdKeKiKlKgKfKeKiKhKlKjKnKoKlKkKqKrKqKrKwKzKzKwKxKwK~KvKzKzK|K}K{K{K}K~K~KKƒK‚KK|KƒK|K}KKƒKKƒK‚KK„K}KKƒK}K‚K€K}KƒKKKƒKƒK‚K„KKƒKKƒK„K€K€K‚KK€K~K}K‚K€KKK~K‰K€K€K„K…K‚KƒK~K‚K‚KKˆK…KˆK†K†K€K‹KŒK‚K~K…KKKƒKƒKƒKKyK~KzK~KK|K€KK~K{K}K~KK€K{K„K~KK€KKKƒK€K|K}K}K}K~KƒKKKK|KyK~K€KKyKKK„K~K‚K…K„KƒK„KK†K…KK‚KK‚K„KƒK‡K‚KƒK†K‚KK~KKKK~KK|K~K}KK~KK~K€K|KˆK}KK|KK€KƒK€K}K‚K€K{KKKK€KKKK‚KK€KKKKK|K}K{KyKyK|K|K|KKwKvKrKmKqKqKlKkKjKhKjKjKmKlKsKqKvKwKK‚K‰K‹KŽKK“K™KœKK¢KžKK¢K¡K K¤K K¡K¡K£K¢K¢KK KŸKŸKžK KœK¤KŸKžK KžKKžK KžKŸKžKžKKKžK›K¡K˜KœKšKœKœK›K˜K™K˜KšK˜K—K–KšK˜K˜KšK—K˜K˜K˜K—K™K”K˜K—K–K™K“KK•K’K‘K“K’K£K³KÄKÌKÎKÔKÕKÙKØKÙKÛKÝKÜKÞKÝKÛKÕKÎKÂK²K’KyKdKhKjKlKmKnKpKrKrKtKsK|K}KzK{K~K~KyK|K{KxKzK|KvKxKwKxKzKxKK|K}K}KwK{K{KKKK€K‡K†KˆK‡K‡K‰K~KoK`KRK5K2K'K0K;K4K.K&K'K%K)K/K*K*K.K-K.K/K/K7K3K;KK:K8K=K6K8K5K4K2K8K5K/K,K1K/K/K4K4e]q)(K¤K¤K K¢K¢KŸKžKŸKœK K¡KŸK¤K¡KžK¤K K§K§K¨K©K©K©K¦K¢K¤K¥K¤K¥K¡KžK¤K¢K¤K¦K KŸK¢KŸKŸKžKKžKžKžK£K¡K¡K¢KžK£K¢KžK›KœK™K‘K‘K‰KˆK~KvKmK`KWK[KWKRKSKSKVK\KcKcKcKcKfKdKfKeKeKhKmKoKeKfKjKfKeKfKeKgKcKjKiKgKdKjKjKfKbKeKhKhKhKpKqKmKqKpKvKuKtKuKvKuKxK{KvKuKzKuKsK{KwKvK|K|K}K{K}K|KK|KK}K|K{K}KK€K€KKzKK|KKzKyKxK}K‚K|K…K~K„KK‚KKƒKˆK‚K…K~K‚K‚KK~K{KzK€K…K€K‡K}K€K€KKƒKƒKƒK„K†K~KKK„K€K~K€KK„KƒKƒK€K|K~KƒKK€K~KK€K}K{K~K~K{K„KzKzKzKxKyK|K‚KyKzK|KyKvKzKzKzK|KzKuKtKuK|KuKxKxKzKtKwK{KwK|KzK~KzKzK~K}K|K€K‚K~K|K{K…KƒKƒKKK†K‚KƒKƒK€KKKK~K~K}K€K€KzK€K}KK‚K‚K„K~KK}K~K‚K€K‚K€K€K|K}K~KKK{K|K{K{KKKK~K~K€K|KK}KK~K}KzK{KK|KzKxKwKsKvKsKtKsKuKsKsKuKpKoKqKjKmKnKmKsKvKwK|K~KƒK„K†K‰KŽKŽK”K”K’K˜K“K–K˜K˜K›KšKšK™K›KœKœKžKœK¢K K KœK›KžKœKKœKœK›KœKŸK¡K™KK›KžK™K›KœKœK›K›K™K•K™KœKKŸKœKŸKœKŸK›KžK˜KŸKœKšK K˜KšK•K™K—KœK”K”K’K•K–K‘K“K•K‘K”K•KŽKKKKŽK‹KK™K°KÀKÉKÏKÓK×KÚKÜKÝKÞKÞKßKáKàKßKÝK×KÐKÅK°KKuKbKbKcKqKoKpKlKmKpKrKrKvKxKyKqKwKyK}K}KƒK€K€K„K„K†KxKlK]KKK5K,K'K-K(K.K1K,K1K1K-K(K/K;KAKCKKKDK?K?K?K@K9K>K3K4K5K1K3K4K3K.K1K-K-K.K.K:e]q*(K¤K¤K¦K¢K£K¡KK£K K¢K£K¢K¡K¢K¡K§K¥K¤K¥K§KªK¨K¦K¥K¥K¥K¥K£K K¡K¤K¥K¤K§K¦KžK¡K¤KœK¡KžKŸKŸK K¢K¡K¡K K¢K¡K¢K¥KžKKšK™K’KŽK‡KKzKtKhKdK`KYKSKOKQKUK\KdK\K[KeKbKjKjKgKgKiKdKgKcKdKiKgKkKfKfKfKeKfKdKdKmKeKcKhKfKkKfKjKiKjKmKtKpKsKsKvKrKrKxKxKuKyKuK{KzKyKzK}KxKxKyKzKyK~KKwKyKyK{K€K~K€K{K{K}K~KK}K€K~K~K|K€KyK}KzKK|K{KxKƒKK€K|KK…K€KƒKK‚K‚K|KƒK~K~K„K‚K|K‚K~KK~K€KKK„K„KKK„K‚KK€K‚KK€KKƒK‚K…K}K~KKK‚K~KK}K‚KK~K}K€K„KKvK‚KyKyKyK€K|K{KKxKvKsKyKwKzKyKzKyK{KyKyK{KuKwKxKwK{KxKxKvK{KzKwK|K}KKKK}K~KKƒK‚KK‚K„K…KƒK‚KK~K€KK{KKK}K‚K|KzK~K}KKƒK€KK}KK€K}K~K€KyKzKKK€K~K€K€K€K}K}KKKzK~K|K{K~K€KK‚K}K~K|K|K~K{KzKxKuKyKrKsKtKuKwKvKsKnKuKrKqKsKtKmKmKoKrKrKzK|K|KK‚K„KŠK‹KKŽKK”K–K•K“K”K™K˜KžK˜KšK˜KœK K›K›KšK KKœKK™K KžKžKšK KKK˜K›KšKK˜KKK˜K›K—K—KšK™K˜K›KšKKžKžKKK›KK›K™K›KžKšK™K˜K•K›K˜K™K˜K”K–K•K–K—K”K“KK“K‘KŽKKKKŒKŽK‹K“K¦K·KÅKÍKÔKÖKØKÚKÝKÝKÞKÝKàKàKáKÞKÚKÒKÉKÀK¢K„KkKdKeKhKhKrKhKkKpKoKnKrKuKsKwKxKzK{KyK€K…KK„K…KyKpK[KKK9K1K-K&K-K2K,K)K*K.K4K-K)K-K0K2K3K7K5K1K8K1K4K3K5K1K0K1K/K/K0K4K0K-K-K/K,K/K2e]q+(K£K£K¡KžK¡K¤KŸK¡KžK K¤K¤K¡K K¦K£K¥K¨KªK«KªK¨K§K¦K¤K¥K£K K K£K¢K¤K K K¡K KK¢K¡K K¢KŸKŸK¡K¡KžKŸK£K¡K¢KŸK¡KKKšK—K“K‹K‰KKyKsKfKdK_KXKTKQKSKXKZKYKcKWKYKlKfKfKqKiKfKlKeKcKcKdKeKfKeKhKfKcKdKkKeKfKcKiKbKgKfKfKcKhKgKhKkKnKpKoKrKrKvK{KxKyKuKxKxKzKwKxK~KzKxK€KvKwKzK|KxKzK{KwK{KK~KK€K|K{K}K{K€KzK}KyK|K|K~K}K~K{KK}K}K‚K€K|K‚K‚K€K|K„K€K‚KK‚K‚K}K€KƒK„K‚KKKK†K|KK€K€K}K€K€KƒKƒKKK|KKK€K€K†K|KKKK€KKKzKK‚K€K}K~K|K|KzK€K}K}K~K}K€KzKzKzKzKxKuKtKvKxKyKuKuKyKvKxKyKxKxKyKvKxKsK{KxKyKyKxK|K€KK€K|KƒKKƒK†K„K„K~KK‚KKƒK€K~K‚K|K~KƒK~K‚KK}KK€K~K{K€KK‚KKKK~K}K|K}K€K}K‚KK}K}K{KƒKKKK€K~KzKKK‡K~K{K‚K~K}KxKK|KzKyKxKxKsKuKqKtKrKuKsKnKxKuKnKoKmKmKsKnKpKsK~KzK|K‚K…K†K‡K‹K’KŽK‘K“K“K”K“K–K˜K˜K™KžK™KœKœKžKšKœKœKŸK›KŸKŸKšK™KœKœKžKšKœK˜KžK™KœKžKšK™KKKK˜K™K˜KšK˜KœK˜KšKKŸKžK KœKKšK™KšKœK˜K™K™K—K˜K˜K–K•K•K–K”K”K–KK’K”KKKKKKKŠKŒK‹KK™K®KÀKÌKÑKÕK×KÚKÚKÝKÞKÞKàKàKáKÞKÜKÕKÎKÆK®K•KxKdKbKiKmKkKmKjKpKqKuKqKqKoKqKtKxK|KzK}K„KƒK~K|KqKaKQK5K6K/K+K,K,K-K*K.K)K+K/K4K/K4K:K5K5K3K9K3KK>K:K9K1K4K6K4K2K8K3K3K/K.K,K-K-K0K3K.K)K-e]q.(K¢K¢K¡KœK¡K¤K¤KK K¡K K¡K¢K¤K§K¨K­KªKªK¯K¦K£KŸK¡KKK KK›KœKœKœK›K™KœK›KœK K¡K K¤KŸK K¡KKžKŸKžK¢K¤K KžK¢KK™K™K”KK…K€K|KuKjKaK`K]KTKOKWKVK[K^K\KaKdKeKhKaKcKmKjKcKeKgKkKdKeKjKeKfKjKpKiKdKfKgKeKdKiKfKeKeKfKjKmKjKkKmKoKrKrKwKuKtKyKuKxKzKzKxKyKzK}K|KKKzK}K€K€K{KK~K{K{KK{K}K}K}K}K~K|KK{K|K|K~K~KK~KK~KK€K}K„K€KK~KKK~KK}K|KK{K~K€K€K‚KK€K„KK€KK‚K…KƒKKƒK…KK€KKKK€K~K~K€K†K†KK~KKK„K~K{KK{K…K‚K‚KƒK‚K~KK|K}KxK{K}KyKuKxK|K~KyK€K’KK€K€KŒKŠK}KyK}K|KuKuKyKtKuKsKtKxKwKwKxKwK}K~KyKvKxKxK{K{K‡K}KK{K~K}KK|KzKƒKK€K€K€K€KKK€KK{K‚KKKK‚K‚KKKƒK}K€K‚K‚K}KK~K|K€KK…K„K~KK€K}K}K|K~K€K}K|KK{K{KyK{K{K|K|KtKvKzKqKpKpKsKtKuKuKqKsKnKkKlKlKoKqKpKrKsK{K€K~K…K‡K‹K‹KŠKKK‘KK’K”KK•K–K—K–K˜K—K˜K—KKœKšK›KœKšKžKKœKœK™KšK˜KšKK—K—KžK—K™K›K˜K™K™KK˜KœKšKKšK›K™K—K™KœK˜KœK˜K˜K˜K˜K”KšK”K—K•K”KKK“KKŽK‘K‘KKKKKKKKKŽKKKŒKKK’KK¶KÅKÌKÑKÔKÙKÙKÛKÞKßKÞKÞKàKàKßKÛKÓKÎKÁK¨K‡KkKbKjKfKlKkKjKnKpKrKsKpKtKxKyKyK‚K‚K{KkKaKOK9K2K,K+K)K*K/K0K.K)K1K1K1K3K3K6K1K6KK7K:K5K4K2K4K5K2K-K+K,K.K.K/K1K/K+K0e]q/(K¡K¡KŸKžK¡KŸKK¢K K K K¢K¢K¢K§K¬KªK­KªK£K¢K¡KK›K™KšKœK›KšKœKžK›KžK›K KšK K£K¥K¤K K KŸKŸKŸK¡KžK¡K£K¢K¡K¦K¡K K˜K˜K–KKˆK‚K~KqKjKaK`KXKSKNKRKTK`K\K]KaK`KbKcKeKdKdKiKgKkKfKjKgKcKjKbKfKcKhKkKiKfKeKlKeKfKgKkKhKhKlKrKmKkKnKoKuKxKvKuKwKxKxKyKsKvKzKvKzKK|K{KzK|KyKwK{K€K|K}K|K|K~K€K~K‚K~KyK{K{K}KzKK€K{KK€K‚KKKK‰KKK‹KK~K‚KKK~K}K|K}K|K…KK†K„K„K€K~K‚K‚K‚KK…K|K€K„K€K~K|KK‚K|K}K}K‚K‚K„K„K‚KK{K~K~KyKKK{KzKzKK€KzK|K~K}K|K~K{K}K‚K{K„K”K‹KK«K±KšKK¨K´K¢K’KšK£K“K‹KK”KƒKvKzK|KwKtKwKyKvK{K|K{K|KwKwK{KyKzK{KyKxK|K{K~KyKzKzK‚KK}K€KzKK€KK€KKKKKKK‚K„K‚K~K}KK~K}K€K€K„K{KK‚KKK}K}K‚KK‚K}K}KKƒKKyK}K€K}K‚K|KzKzKuKxKwKqKtKvKoKuKvKrKpKoKoKgKkKnKkKoKtKpKtKzK‚K~K†K‹KŠKK‹KK“K–KK“K˜K‘K•K–K˜KšKšK—K–K™K™K™KžKœKžK›KœKšKšKšK›K˜K›K›KšK›KšKšKœK˜KŸKK™K›K™K›KžKK™KœKœK˜K—KœKžKœK™K™K™K˜K˜K—K•K”K•K–K–K“KKKŒKŽK“K•KKŽKKKK‘KKŒKKKŠK’KKŒKK˜KªKºKÈKÐKÐKÖKÙKÜKÝKÝKßKßKàKßKßKÝK×KÐKÈK²KšKvKeKdKjKfKiKlKlKpKyKpKoKrKyKK}KKxKfK^KQK;K/K,K1K0K0K+K-K+K,K*K/K2K,K1K1K5K6K2K:K9K:K>KK5K*K;K2K/K6K,K.K-K/K.K4K-K,K/e]q1(K£K£KŸK¥K¤K£K KKŸK K¢K¤K¤K£K«K«K©K«K¢K K KKšK›K–K˜K•K™K•K–K”K”K—K˜KšKœKŸK¤K¢K¢K¥K¡K¡K£KKŸK¤K¨K¤KŸK¢K£KŸKKžK™K“KK’K‚K„K|KkKfKaK[KZKQKVKTK]KZKcKbKaKdKaKgKgKhKgKlKhKeKlKnKqKkKfKdKhKfKdKfKeKlKfKgKiKdKgKjKfKlKrKjKoKoKpKrKqKsKvKwKyKyKxKxKzK}KvK{K{K{KzKzK|KzKK~K}K~K|KK}KK|K}KKK„K~K|K~KKzK€KKK~K„K‚KK}K}K~K€KK‚KƒK…K~KK‚KK}K}K€KzK}K‚K„KKK„KK„K…KˆKK†K~K‚K‚K€K~K{K}KKKK|K|K}K…K„K‡KK~K~K~K~KyKƒK…KK}K‚KšKœKˆKK‘KK“KŒK•KšK™KžK´K³KœK¢K·K¯K¤K§KµK±KŸK¯K¾K½K®K±K»KÀK¾K¯K¨K§K¨KKKzKxKuKqKtKvKqKwKvKqKuKzKyK|K{K}KzK{K|K|K}KK}K{K}K€K€K|K€KKƒKƒKƒKK„KƒKƒKK‚K‚K€KyK}K~K}KK}K}K|K|KKK{K…KyK~K}K}K|K|K{K|K|KxKxKxKyKwKwKtKvKuKqKuKvKrKpKoKnKlKjKpKgKlKqKqKuKwKxKK„KƒKŠKK‹K‹K“KŽK”K‘K‘K‘KKK’K•K”K•K˜K•K–K™KK™K™KšK›KœK›K›KKKœK—K™K–K›K˜K›K›K—K›K™K˜K™KœKœKšK˜KšK›KœK™K™K–K–K˜K•KšK—K–K“K˜K“KK”K“K“K‘K’KK‘K“KK•KKK’K‘KŽKŽKK‹KK‘K‘KŒKŽKŒKŽKK–K¨K¹KÆKÌKÓKÖKØKÛKÜKÞKàKÞKßKßKàKÝKÚKÒKÌK½K K~KoKmK}KpKoKoKpKuKoKsK|KvK}KxKjKYKBK0K-K/K,K.K+K,K1K1K1K-K*K.K2K1K4K2K2K9K6K7K8K9K@K>K;K;K8K8K3K3K3K-K0K+K+K1K+K-K0K.K+K/K0K0K-e]q2(K£K£K K K¢K KŸK¤K K¢K£K£KªK§KªK«K¬K¨K¤K£KŸK›KšK™K–K—K”K–K•K—K—K“K”K”KšKžK¢K¦K¢K K£K K£KK¡K KœK¡K¥K£K¤K¢K¡KžK›K—K™KŠK‹K„K‚K~KmKhK\K_KYKSKRKUK\KZK^K_K\K^K_KgKdKfKgKlKkKhKkKiKjKnKeKeKhKfKlKdKhKfKfKjKfKgKhKiKkKlKpKoKqKrKoKpKqKlKpKwKvKyKzK}KzKwKzK}K|K|K|K€K|KzK{K}K|K}K|K|K€K~KKKK‚KzK}K~K€K}K}KK€K‚KƒK‚K~KKK…KƒKK|K}K~K€KK€K‚KKKK~KK€K}K„K}K€KƒKKK‚K‡KƒK~KK~K‚KK‚KK‚KK€KK€K}K€K€K~KKˆK„K~K„KK{K‰KKŒK„K–KšKKKKŽKK˜KK KŸK§K­K©KšK¬K¹K¯K¡K³K³K¨K£K³KÀK·K¨K²KÂKÁK´K¯K»KÄKÃK¶K¤K¢K K¡KK}K{KyKrKuKvKuKtKtKvKyK{KyK{KKzKK|K|KzK€KyK~K}K~KƒK„KƒKƒKKKƒK„KƒK‚KƒK„K€K~K€KKƒKzK~KKKK€K€K}KƒKK}K}K}KKƒK~K}KyK{KzKwKyKyKyKwKuKqKpKpKoKtKoKrKrKpKlKmKkKnKmKpKrKwKyK|K|K€KŠKˆKKKKKK‘KK’KK‘K–K’K“K–K–K”K˜K˜K˜KœKšKKšK›KžK›KšK›KžK˜K–K™KšK—KK™KšK™K›K™KœK›K›K›K—K›KšKžK˜K™K•K˜K–K–K–K™K–K–K–K“K•KK’KŽK“K—K’K’KŽK‘K’K‘KK’KKŽKK‘KŠK“KŒKKŒKŒKŒKK‘KK—K°KÀKËKÎKÑK×KÚKÜKÝKßKÞKÝKàKßKßKÜK×KÐKÆK¯K•KtKiKlKiKfKnKuKrKrK|K|KzKsKnKbKNKK3K(K K K¥K¡KªK©KªK¬K¬K¬K¨K¨K¢K¡KœKšK™K–KŽKˆK€KxKtKpKnKhKqKxK€K‡KŠKšK–K›KK¥K¢K©K¨K¨K£K¦K©K¤K¥K¡K¤K¥K¢K K¦K¥K¡KžKŸKœK•KŠKK†K‚KwKjK`KYKTKPKNKRKVKVK_KcK[K]K\K`K_KdKhKjKcKcKaKeKeKbK_KbKfKgKbKbKdKeKdKcKhKmKfKkKnKlKjKjKmKnKpKpKpKrKkKxKrK}KpKtKtKxKqKwKxK|KKwKyKwK{KuKxKyKwKKyKyK~K|KyKzKxK~KzK~K{K{K|KƒKK~K~KK}K}KK}K}K|K}K|K~K|KyK|KŠK®K´KšKK—K£KšK–K“K‘KŽK†KKKK|KzK~K}KwKyK~KxK„KK†KˆK…KK…K†K„KˆK…KƒK€KŠKšKŽKK™KKK K˜K•K•K—KK˜K•K™K¡K—K•K§K¯K¤K«K K©K®K¨K©K­KŸK›K«K¬K¸K²K°K®K¶K·K°K©K¬K»K»KÀK¿KµK´KºKÀK¾KÂK¾K»K¼K¿KÃKÇKÉKÃK¾K½KÁK¿KÅKÈKÊKÊKÂKºK¥K„KpKoKnKyKyKxKqKsKzKwKwKwKuK}KyKvKxK}K{KtKzKzKKvK{KxKzK|KyK{KxK|KzKyKsKvKqKtKrKpKrKpKvKqKqKmKoKiKjKmKiKlKdKfKcKiKkKjKnKsKuKyK€K†K„KŠKŽK“KK’K‘K“K’K’K“K‘KŒK”KKKŽK’K‘K•K“K“K“K‘K’K‘K•K’KšK—KKšK•K™KšK–K–K›K•K—K•KKKK“K’KK’KKK‘KŽK’K‘K’KK’K’K•KKK‘K“K‹KKKKKKŽKKKŒKŒK‡K‹KŽKKKKŠKKŽK‰KŒKKŽKŽKKŒKKKŒK‹K‹K‹KŠK‹K“KªKÂKÇKÏKÔKÚKÜKÝKßKàKäKåKäKçKåKâKàKÖK´KpK/K"K!K)K&K(K$K(K)K-K0K.K0K3K1K)K.K4K:K5K5K2K3K0K3K3K3K5K4K3K.K0K-K.K-K0K0K2K1KMK7K4K2K5K/K/K,K7K6K8K/K-K.K9K3K2K8e]q?(K¢K¢KŸK¥K¬K§K©KªK«K«K©K¨K¡KŸKKœK—K–K‹K‡KzKyKnKjKkKeKlKuK~KˆKˆK”K—K™KœK KŸKªK©K¥K¦K£K¦K¥K¥K¢K£K§K£K¥K¥K¥K¢K K¡KœK™K”KŽKŒK€K{KpKaK]KVKYKSKJKKKLKTKZKQKVKTK\K^K\KdKaKfKnKjKaK]KdKdKdKcKbKgK`KeKaKkKfKiKfKgKcKjKiKhKkKoKnKoKpKoKqKqK~KsKvKrKyK{KwKuKzK|K|KwKxKyKzKyKzKzKyKKK}KzK|KKyKzKzKK€KK}K~K|K‚K}K€K|K‚KK~K…K€K|KvK|KyK€K|KwKˆK‘K§K¬K–KK—K“KKKˆK‰K„K†K~K|K…KzK{KzKKwK{K}KvK†K‰KŠKKK‡K„K|KzKƒK„KKK‘K’KŒK…K‹K~KK›K‘KK¤K—K“K’KK˜KK K­KªKžK˜K¦K K¥K´K²K¥K¡K¢K§K±K©K«KªKºK¼K·K°K§K¥K·KÁK¿K³K²K¶K¼KÂKÂK¸K³KµKÀKÁKÃKÂK½KÁK¾KÀK¿KÅKÈKÇKÆKÂKÄKÅKÉKÆK·KŸK‚KjKsKtKtKlKuKtKqKqKuKqKxKzKyK}KxKyKvKxKzKzKwKyKxKtKwKzKyKzKzKvKvKwKvKsKuKuKsKuKrKtKpKpKqKpKlKpKnKmKkKiKgKgKgKoKjKpKpKuKuK}KƒK‡K†KˆKKK’K’K”K”K•K”KK‘K‘KK’KŽKŽK“K”K”K–K•K“KKK“K‘K˜K—K“K•K”K—K—K™KšKšK•K—K‘K‘KKKKKŽKKŽKŽKK‘KKK“K’KKK‘K”KKK‘KKŽKK‘KKŒKKŽKŒKŽKKKŒKKŽKKKŒKŒKKKKŽKK“KŽKKKKKKŠKŠK‡K‡KŽK“K±KÂKÉKÐKÖKØKÜKÝKàKâKäKäKæKçKåKßK×K¹KpK)K%K#K-K(K)K*K.K.K-K)K0K3K(K-K3K5K4K8K9KK8K3K6K.K3K+K,K0K,K+K:K6K/K9K?KEKJKHKDK6K2K,K K KK"K4Kbe]qK(K«K«K­KªK¨K§K¢KKœK–K”KŠK„K}KxKmK`KSKTKKKKKHKQKXKcKeKmKxKK†KŒK’K—KšKŸK£K¨K©KªKªK©K¨KªK¦K©K¦K¤K£K¢K£K¢KžK¢KKœK–K“KŠKKƒK{KyKlK^KUKMKLKHKFKHKOKQKVK\KVKYK]KdK`KdKfK_KgKdKgK]K_KfKbKaKfKdKbK^KaKgK`KcKeKfK`KeKhKgKfKmKkKlKjKmKlKlKuKmKqKnKqKwKtKvKqKtKuKvKsKuKyKvKwK}KvKyKwKyKzKyKyKyKzKxKzK}K}KzK{K~KyKyK|KKuK{K‹K£KˆKvKjKoKvKpKrKyKvKvKqKoKtKlKnKoKuKrKrKwKsKvKyK}K|K|K|K‚K|KuK}K‰KŠKyKsK~K„KK‹KŠK…K„K„KŠKKKŒKzK|KˆK‚K„K’K‡K„K‰K–K•KƒK„K–K–K“K‰KK—K£KœK‘KœK¨K¬K¤K˜K¡K«KµK´K¦K›K¡K³K½K½K¸K¸K¯K²K¹K¼KÀKÀK¼KÀK½K¼KÁKÆKÅK¾K¸K¹KÀKÅKÇKÅKÁKºKÁKÅKÅKÆKÈKÆKÄK½K¼KÃKÅKÈKÇKÊKÈKÂKÂKÈKÌKÊKËKÌKËKËKÃKµK¡KpK]K_K\K_K^K\K]KeKbKdKfKiKlKkKtKoKlKiKnKoKpKnKlKhKiKfKlKoKlKlKpKnKiKmKfKhKfKdKeKhKiKnKvKxK|K…K‡KKK“K“K•K™KœK˜K™K˜KšKKžK˜K˜KšK•K”K”K’KKŽKŠK‡KˆK†K„K‡K‡K„K~K€K‚K‚K‚KˆKˆKŒKK•K”K™K•K”K–K‘KŽKŽKK’KKK‘K‘K’KK‘K–K’K’K‘KKŽKŽKKK‘K‘KKŒK’KŽKŽKK’KKK‘K’KKKŽK”KKK’K“KK‹K“K•K–K•K”K”K•K•K•KKšKK›KžK¤K˜KˆKnKPK4K3K,K/K0K2K'K+K-K/K(K/K4K0K1K*K1K4K5K6K4K4K7K2K5K6K1K,K0K2K.K,K+K(K,K/K6K;K:K7K2K2K-K=K3K-K-K0K0K6K6K7K9K@KEKAKFKKCKCKGKK7K'K%K)K,K1K;K[KvK‰KšK£K¨e]qQ(K§K§K¢KžK›K˜K—KŽKˆK†KyKqKhKZKQKPKXKUKWKZKTKRKJKXKaKgKnK|KK„KK•K”KKK¡K¥KªK¥K¨K§K¨K©K¬K«K©K¦K¦K£K¤K¡KŸK¢KKšK˜K‘KKK‚KzKtKhK`KWKPKJKLKJKQKTKSKXKYKWKXK^KeKfKbKjKbK_KgK`KcKcKfKiKbK]KfKfKdKbKcKgKgKdKdKfKgKfKfKkKkKhKiKrKnKsKoKyKrKuK{K~KqKoKrKuKrKtKoKyKqKsKpK|K|KwKwK|KxKuK}K}KyKyK{K|KwKyK|K{KK}K{KK‚KK‰KvKvKrKqKpKnKkKmKdKgKjKuKpKiKqK|KwK|KyKvKuKsKxKqKkK{K‚K{K}K{K„KzKwKyKƒK„KˆKzK‚K…KKxKwKƒK‚K„KŒK‹K†K|K‹K‹K„K~K†KK…KƒKŽK‰K‹KŠKK–K”K”KŽK‹K–KK’K‰KžK£KªK¢KœK¦K®K®K¶K´K®K²K´K¼KÁKÁKÁKºKµK¶KµK¾KÀKÀK»K³K³KºKÀKÃKÇKÆKÂKµK¸KÄKÍKÆKÄK¾K¹K¼KÁKÅK½K½K²K¬KµK½KÈKÈKÏKÐKÕKÖK×KÕKÖK×KÙKÙKØKØKØKÜKÞKâKãKäKæKäKÛK»KzKUKRKSKSKZKaKaKeKiKiKiKeKeKcKgKdKfKcKeKcKiKgKhKeKhKdKaK^KcKfKhKlKmKlKwK}K…K‰KŠK’K”K—K•K–KžK›KœK›K˜KKK˜K™K™K—K–K˜K™K—K‘K’KˆKˆK|KwKsKnKfKaKfKiKgKhKoKxK{K€K‡KŒKŒKK•K•K–K–K”K”K’KKK’K’K“K’K“K’KK•KK“KŽKŽKK’KŽKKK’K‘KŽK’K’K‹KŽKKKKKŽKKK’K“KK’K•K–K•K•K”K“K—K—K˜KKKŸKœK‘K|K[K@K9K/K.K-K+K+K+K-K-K%K&K/K2K-K.K1K4K3K2K5K4K4K3K3K4K1K3K,K*K)K.K*K+K+K6K0K8K?K:K1K-K2K8K/K1K1K.K1K.K0K6K:KAKHKOKVKMK;K3K)K%K%K1K>KWKlKƒK—K K£K«e]qR(K¢K¢K£KŸK˜K—K“KŽK‡K~KtKnK`K_KQKMKTKSKUKVKVKXKQKWK`KkKtK~K…K‰KK‘K˜KšKŸK¤K¤K¤K©K¦K¥KªK§K§K§K¨K¨K¤K¤K¥K£K¡K£KžKšK•K“KK‹K~K{KrKkK]KXKVKMKKKCKLKMKTKRKSKYK\K_K^K_K`K\KbK`K[KcKaKcKgKhKdKcKeK^KaKcKfKfKjKeKfKbKfKfKiKhKlKlKlKhKqKoKnKqKlKpKsKvKwKoKwKyKpKvKuK|KzKwKyK{KxKwKwK~KKyK}KwKKzK|KvKwKwKwKzK‚K~K|K€KŒK†K|KzKtKoKjKoKjKnKnKiKjKpKoKmKoKsKuKsK~KrKpKsKwKrKoK|KKKK{K~K}K}KxKK‡K{KwK…KƒK|KzK~K~KxKK‰K‡K|KKˆKK„K„K”K•KKxK…K‡K†K‚K‡KšK™KŠKŠK•K’K K’K•KŸK©K¤K”KK­K¯K·K´K°KªK²K¾K¿KÄKÁK¸K±K­K¼K¼K¿K½K¶K°K´KÀK¿KÁKÂK¿K¿KÀK»K¿KÂKÄKÃKÁK¸K¶K¾K¾K»K·K¶K°K·KÀKÉKÐKÓKÓKÒKÓKÒKÖKÛK×KÔKÔKÒKÖK×KÖK×KÙKÛKßKãKâKæKæKãKÕK¢KbKSKMKQKVK]KaKdKcKiKhKgKbKfKhKhKfKdKfKgKhKcKeKhKeKfK_KfKdKjKgKlKrKsKsKƒKKˆK‰KK“K”K—KœK™K™KKžK˜KK›KšKK™K—K™K˜KK–KKK‰K‡KKxKpKlKgK]KaKZKcKeKhKrKyKK‚K„KˆKŽK‘K—KšK–K“K”K•KKKŽKKKK‹K‘K”K“KKKŽKKKŽKKKK‘KK‘KK–KK‘K”K“KKŒK‘K‹K•K‘KKK‘K’K”K‘K˜K–KK—K–K™KKžK˜KŽK„KmKHK.K*K.K1K,K,K-K1K)K*K*K/K5K5K.K1K-K1K9K=K.K1K2KKEKKKZKRKIK=K4K+K,K1KCKZKqK…K‘K—KŸK¦K©e]qS(KžKžK¤KšK—K—K’K…K„KwKqKkKYKSKPKRKSKSKTKPKTKVKTKYKcKjKrKK‚K†KK—K–K›KœK¤K¢K¥K¥K¨K§K§K¦K¥K§K«K§K¤K§K£K¥K£KŸKœK›K˜K•K‹KŠK~KxKsKhKZKWKPKKKKKDKKKPKUKZKWK\K[K^KYK^KdKaKbKaKgKfK^K\K^KbKjKeKiKaKfKhKgKaKjKfKcKbKhKhKhKjKjKmKnKqKpKlKnKpKnKsKsKrKrKoKoKsKqKzKxKzKrK|K‚K}KzK{K{KKwKyKwKzKzKxKzKwKvK{KwK|K~KK~K„KŒK~KuKuKkKmKpKhKmKmKrKmKnKpKnKxKrKuKwK€K|KqKsKpKoKwKKKzKxK{K|KxK|KrKvK~K|KtK†K‚K€KK‚KKwKzK„KƒKKˆK‡K‡K{K†K‡KKƒK}KŠKŠK†K|KŠK—K¡KK~KŒK’K“K•K–KK¦K¤KŽKK£K©K®K©KªK«KµKºKÂK¾K±K¬K·K¸KºKÁKÆK¶K«K´K¹K»KÄKÅK¿K¹K´K¸KÂKÄKÆKÆK¾K¸K·K»K½K¼KµK±K·KÂKÃKÉKÓKÒK×KÓKØKÔKÓKÒKÒKØKØKÖKÓKÓKÕKÖK×KÚK×KØKÜKàKãKâKäKáKÝK»KƒKZKJKSKZK[K\KcKaKdKfKhKeKlKfKdKcKhKhKfKfKdKdKaKeKgK_K^K[KaKfKnKnKqKtK€K„K‡K‰KK•K”K–K™K›KœKšK™K˜KœKœK™K˜K›K›KšKœK™K–K“KKK†KKyKqKeKaK^K\K\KUKYKbKoKmKwKƒKˆK‹KŒK“K‘K–K“K–K’K”KŽKKK‘KŽK‘KK’K“K’K‘KKKKK‘KK“KKŒK‘K“KK“K”KKK”K’KK•KK“K“K’K“K”K”K‘K”K—K–K‘K™K—KžKK›K“KKoKOK6K-K*K2K4K(K,K9K&K,K,K%K:K+K1K.K3K1K:K8K6K2K2K6K4K8K7K6K/K.K/K.K'K+K0K6KAK6KK>KNKZKeKnKwK€K…KƒKŒKKK“K“K“K“K‘K—KKŒKK”K•K’K—K‘K“KKKK’KK“KŽKKKŽKKKK“K“KKŽKŒK”KKŽK’K•K’K”K•KœK•K˜KšKšKœKžK—K‹KtKUK6K+K*K$K*K*K-K(K)K)K(K,K+K.K.KK=KFKOKRKPKCK@K8K=KKK[KjKwK†KŒK–K—K“K•K–K˜K•K¡e]qX(K•K•K”KKŠK€KtKnKcKXKOKPKUKUKVKWKYK`KWKWKTKQK\K\KYKfKqKwK€K‹KK’K˜KšK¡K K¤K©K©K©K§K¨K«K¨K©K§K§K¦K¥K¦K¢K§KKšK›K–KKŒK†KKxKsKjKaKZKUKPKLKUKKKQKWK[KXK[K\KXK\K\K[KaKaKeKbKaKfKcKaKfKcKeKgKmKhKgKeKeKgKdKbKeKfKgKfKiKkKiKkKjKjKnKqKoKpKqKqKtKrKrKuKvKsKsKuKsKuKvKwKyKxKzKyK}KwKyK{KzK{KyK}K…KzK{K}KKK‚KuKtKiKiKjKfKoKmKiKrKkKrKpK{KqKyKuKqKpKsKrKK~KxK|KyKsKzK{KuK|KyKxKuKxK}KuKrKoKzKƒK|KƒK‡K„KKƒKˆKK~KˆKK•K‚KKˆKŽKKƒK‡KˆKKK‚KŠKK“K†K‡KK’K’K‚K’K›KK§K”K•KK¥K«KžKK­K¬K¼KÄKµK§K¥K±K¼K¼K¿K¶K»K³K»KÂKÁKÄK¹K¥K K«K©K¥K©KµK¾KÉKÍKÏKÐKÛKØKÐKÖKÑKÑKÏKÎKÌKÑKÐKÏKÐKÔKÓKÐKÏKÏKÐKÍKÍKÐKÒKÔKÔKÓKËKËKÎKÕK×KÛK×KàKáKàKÞKàKàKÒK¨KmKWKJKPKOKVKVKaKZK]KcK`KbK_K^K_KbKjKfK^KbKaK]KbKYK`KcKhKpKpKrK}KK…K‹KK’K—KšK˜K K›K›KšK˜K˜KœK›KšKšK›K—KšK™K—K‘K’KŽKŠKƒKKwKfKaKOKAK5K9K9K;KBKVK]KrKpK{K~K…KˆK‹KK“K”K”K”K”K‘KK‘KKKŽKK’K”K–KKKŽKŽKK‘K–K’K‹KKKKK‘K•K•KKŽK’K‘KŽKK•KK‘K‘K’K”K—KšKKžK›KK€K^K8K*K*K$K$K&K(K+K&K)K'K%K.K)K/K-K4K2K5K?K3KBK6K8K5K9K7K5K3K,K)K&K&K.K2K4K6K5K:K9K.K0K.K)K5K8K,K3K;KBKMKIKK6K1K/K+K,K%K(K,K.K-K5KDKAK?K=KFKFKCKKKUKKKKKCKQKZKkK|KKK”K–K™K™K–KKŽKKŽKKe]q[(KKK‹K‚K{KrKeKYKUKNKUKWKWKXK[KUK[KXKXKSKVKXKUK\K_KkKpK{KKKŽK•K•K–K¡KŸK¢K¦K¥K¨K¥K¨K©K¦K£K¦K§K§K¦K¦K¥K KŸK›K—K“K•K‹K‚KƒKtKqKjK_KVKTKNKKKJKMKRKXKUKVK\K[KYKaKaKcKaK`K^KfK`KaKeKmKeKaKgKeKmKfKgKjKdKeKgKaKgKbKgKdKkKkKoKpKjKoKnKiKoKvKpKwKyKpKsKuKwKvKuKtKwKzKxKvKzKxKxK|K{K~K}K{KvKyKzKzK{K…K€K€KwKmKlKsKpKfKoKfKiKrKlKoKpKsKuKsKvKlKpKmKzKtKwKyKuKwKpKsKmKqK€K{KlKsKKtKpKqKuKwKtKzKK}K|K…KˆKKtK†K‡KŽK…K†KKˆKˆK{KKKK’KK‡K‹K‘K—K†K…K‹KKKŒK–K—KšKŽK„K‘K™K™K—K‹KžK£K©K¤K˜K¤K¬K°K·K¹K²KªK®K¯K¸K¾KÀK­K¥KŸKŸK¡K›K¥K¹KÃKÇKËKËKÔK×KÔKÔKÔKÏKÎKÏKÎKÏKÎKÌKÏKÊKÇKÏKÐKÒKÏKÍKÍKÏKÎKÑKËKÊKÎKÐKÎKÎKÐKÌKÒKÕKÐKÍKÌKÔKÖKÑKÒK×KàKäKßKÜKÞKÙKÅK”K\KJKRKMKOKUKXKWKXK\KbK_K^K^K]K^K]K_K^KaK_KbKeK_KgKnKqKtKyKK†K‰KŽK“K–K˜K™K›K˜K›K›KœK›KœKšK˜K•K–K•KšK—K•K”K”K‘K…KƒK}K{KmKkKSKIK@K4K.K0K5K;KGKWK`KmKtKwK€K…K‰KŽKK”K”K—K˜K•K’KK‘KŽK”K‘KK”K’KKK‘KŽKKK’KK’KKK”KK”K•K‘KK‹KK•K‘K‘K‘K“K•K•K˜KœKœK™KK€KZK=K2K5K,K'K1K)K(K(K,K.K+K1K.K5K;K=K:K3K5K9K0K=K4K6K7K2K2K/K-K0K.K+K+K5KK;K:K4K3K2K-K-K3K0K3K0K>K6K:KKAKFKBKJKPKIKJKMKXKhKsKK‰KK–K™K—KK•KKŽK‰KˆKKe]q\(KˆKˆK†K}KrKcK[KYKSKSKTKWKUKZKYKWKZKZK[KWKVKYKUKZKaKkKpKzK}K‰KK“K˜K•KK›K¤K¥K¥K©K¦KªK¨K¥K¥K§K¦K¨K¥K¥K§K£KK›K–K“KKŠK‚K€KtKoKkK`KZKPKOKIKIKMKTKXK\KYK_K\K^K`K_KaKaK^K]KdKiKbKgKhKfK`KkKjKhKgKhKbKfKfK`KgKeKbKfKiKkKkKnKoKjKjKkKpKnKrKqKyKrKsKtKuKoKuKwKrKzKvK~K~K}KzK{KxKyKsK|KyKyKvKtK{K|KK‚K{KtKhKoKkKgKjKoKiKjKpKkKkKtKoKqKtKqKkKvKyK}KzKxKqKuKvKqKnKvK}KwKoKqKsKrKmKxKzK|KvKyK{K}K‚KˆKK{KrK}K‹KˆK…K‰K’KŠKKK…K‘K‘K‹K‰KK’K‘KŽK‹KŽKŒK’KK‹K˜K–KšK’KŒK“K‘K“K–K›KšK¥K¨K—K•K¥K¬K²K¼K½K¬K­K´K²K²KÀK¿K±KŸKžK›K˜K K®K¶KÅKÏKÐKÏKÏKÐKØK×KÐKÏKÏKËKÉKÈKËKÌKÏKÑKÐKÆKÇKÐKÐKÐKÎKÎKÏKÍKÏKÐKËKÉKÎKÐKÑKÎKÊKÌKÏKÒKÑKÑKÕKØKÔKÖKÚKÝKâKáKÞKÚKÝKØK°KpKTKNKOKPKTKTKZK[KZKYKYKZKfKdKbKdK_KaKaK^K_KaK_KeKhKpKsK}KK†K‰KŠK‘K•K˜K™KœKŸKšK›KœKšKK™K—K™K™K˜K—K˜K•K—K•KŽK‰K‰K€K|KsKeK]KQK9K3K)K*K-K4KCKLK\KfKlKqKzKK†K‰K’K•K”KK—K—K“KKKKK‘K‘KK‘KKKŽKŠKKKŒKK’K•KK‘KK“K’K‘K‘KK”K“K“K˜K‘K—K—KœKšK›K›KK~KfKFK0K*K'KK:K;K0K+K,K2K5K5K3K7K5K3K4K1K6K6K7K:K7K0K/K,K/K-K)K1K4K6K8K:K8K6K2K-K1K-K0K8K4K0K.K8K?K8K?KBKCKJKMKJKKKCKRK\KdKpKyK†KK’K”K“K•K›K›K˜KKK…K‰K–K›e]q^(K‚K‚K|KtKfK^KOKTKXK[KVKZK]K\KZKVK^KZKRKSKZKVKXKZK]KhKrK}K}K„KK‘K™K—KšKžK¢K¥K¦K¥K¤K§K¨K«K¤K§K¥K¥K¨K¦K¢K¡KžK K™K•KK‰K‡K~K{KpKcKbK\KPKLKNKGKKKTKXKTKVKYK_KaK_KaK`K`K^K]KcKbK]KbKhKaKbKcKcKdKgKcKgKcK_KdKaKhKcKeKkKjKjKiKkKkKjKnKnKkKtKlKtKuKtKsKuKsKuKvKvKxKvKxKyK}KvK}K|KzKvKxKyK|KyK{KzK€K~KtKjKkKoKpKgKhKoKnKqKrKzKoKrKrKtKqKlKvKxKvKzKxKtKuKtKuKvKyKuKlKzKxKkKdKgKtKrKsK~K‚KK{K~KˆK}KvK‚K†KƒKKKKŽKŽK…KƒK‘K”K’KˆK†K‹K”KŒK„K†K–K’K—KŒK’K•K’K•KŠKŠKKKKˆK“K–K•K•KžK K¦K±K·K´K­K¨K¶K¶K·K³K¬K£K§K¡K—KK©K½KÆKÉKÌKÌKÈKÉKÍKÏKÎKÌKÇKÌKÎKÊKÊKÏKÑKÖKÓKÎKÏKËKÉKÍKÍKÆKÌKÍKÏKÌKÌKÌKÍKËKËKÊKÊKÇKËKÑKÐKÔKÏK×KØKÙKÕKÚKÙKÙKÛKàKâKäKæKáKÝKÜKÛKÁKŽKYKKKLKNKKKVKRKSKUKXKZKXKZKUKXKYKZK[K]K`K]KdKhKgKtKwK|KK…K‹KK”K–KšKšKœK K˜KœKšK™K˜KœK›KœK˜KK˜KšK˜K’K˜K‘K“KŒK…K|KxKgK]KNK@K0K-K)K-K-K:KFKTKbKdKjKsKvKƒK„KŽK‘K‘K’K•K—K˜K–KK’KŒK˜KK’KK‘KŒK’KKKKKK‘K’K‹K–K’KK”K“K‘K‘KK’K“K•K•K—K˜KKKžKŽKvKZK>K+K3K3K+K*K1K-K-K0K-K/K*K.K1K/K4K7K2K3K8K0K4K5K4K9K9K7K0K0K+K-K,K/K3K=K8K;K3K0K/K/K0K/K4K/K4K7K2K1K5K5KK0K&K$K,K-K1K6KDKPKdKaKfKuKyKKˆK‰KˆK‘KK”K’K•K–K—KK‘KKK‘KKKKKŠKŒKKŽK“K‘K‘KK’KK‘K•K’K‘KK–K’K–K˜KšKœK›KK~KdK?K+K)K*K&K#K)K0K*K,K,K1K2K1K3K7K;K2K5K:K7K8K3K9K6K5K5K1K&K-K)K%K(K-K0K8KK;K7K9K1K,K,K-K.K6K5K5K7K;K:K;KKFKRKcKlKK€KˆKŽK’K”K•K˜K•K–K“K‡K‡KK‘K›KšKŸK¢K¤K¢K¥e]qd(KaKaKWKQKUKWKXKUKXKYK\K`K\K`K[K\KZK^K[KXKVKYKUKYKhKgKkKwK€K„KˆKK“K˜KšK K¡K¢K¤K¤K¤K©K¥K§K§K©K§K¤K£K¡K¤KŸKŸKžK˜K”KŒK‡KƒK~K{KoKiKaKZKSKNKLKKKNKMKNKXKXK_K^K`K_KaKaKdKhK`KjKbKfKdKgKaKbKdKdK`KdKcKcKhKgKiKcKnKhKiKgKjKiKqKjKgKmKoKlKtKtKrK{KvK}KtKwKsKuKsKqK{KuK|KyK}KzKwK|KwKyKzKK‰K‡KxKoKqKrKpKrKtKyKvKoKnKnKoKqKlKpKqKqKoKoKtKvKoKrK{KtKkKpK}KqKwKqK{KxKkKK}KrKmKK}KzKwK€KŠK…K{KK‰K‰K„KzKxKŠK‰K“K…K~KˆK‘K‹KƒK|K…K–KK„KKK–K”KŒK…KK‘KŒKKŠKKK‘KKŽK—K–K™K‹KK˜K§K«K¯K¤KK“KŠK{KyK›K¯K·KÄKÇKÁKÆKËKÎKÉKÉKÆKÁKÅKËKÎKÏKÐKÑKÎKÊKÄKÃKËKËKÉKÅKÃK¿KÁKÅKÁKÁKÆKÌKÆKÎKÔKÐKÉKÍKÄKÄKËKÆKÅKÃKÆKÆKÇKÊKÈKÊKÉKÍKÒKÐKÓKÑKÒKÔKÒKÓKÑKÕKÔKÓKÖKØK×KÛKÜKßKãKâKÝKÍKµK’KaKGKDKCKHKJKLKIKWKWKSKWKUKWK]KbKeKoKwK}KƒK„KŠKKK•K–K›KžK™KœK›K›KœK£K›K˜KšKšK™K—KšK˜K“K–K”KK‰K‚K|KrKdKYKSK7K3K*K-K(K-K=K;K3K1K=KOK]K`KhKzKyK|KƒK‰KKK‘K“K•K“K“K“K’K“KŽKKKŽKK‹KKŽKŠKKK’KK‘K’K–KK‘K“K”K™KšK™K›K”KK€KgK=K'K)K%K)K)K%K(K(K'K0K2K2K.K=KDK7K7K7K6K5K6K9K3K7K2K9K1K-K.K/K+K1K-K1K7K?K9K9K4K/K0K*K*K3K8K9K2K4K9K4K3K8K8K7K:K=K=KFKFKXKgKxK‚KˆKKK‘K“K•KšK—K–KK‹K‰K“KšKžK¢K©KŸK£K¥K£e]qe(KZKZKTKSKTKZKTKYK[KXKWK^K]KaK[KZKYK`K[KZKUKVKSKWK\KkKkKxKzK‚K‡KŽK™K”K™KžK¡K K¢K§K¦K§K¥K£K§K£K§K¡K£K¤K¢KŸKŸKŸK™K–KK‰K†K~KyKqKjKaK`KSKJKHKKKJKNKLKTK_KVK\K`K_KeK]KbK_KbKbKdK`KcKhKaK`KdKaK^KcKcKcKaKdKeKaKhKbKlKhKkKhKeKjKlKlKqKmKlKrKqKvKuKsKqKrKzKpKsKoKyKtKtKvKwK~K{KyKyK~K~KˆKŽKxKwKoKjKsKlKmKmKrKwKuKuKqKpKmKpKmKsKpKuKtKrKuKwKzKuKvKrKrKyKxK}KxK|KyK|KvKuKwKyKxKrKzK‰KŠK…KxK}K…KŠKyK|KƒK†KKŒK’KKˆKŽK”K„K„K‹K’K“K‹K†KKKŽKŠK‚KKK‹K‚KzK‡KŽKˆK†KŒK›K‘KK‡K‹KšK›K©KªK KœKœKŠK{KƒK¢K»K¿K¿K¼KÈKÅK¾KÂKÊKËKÉKÊKÄKÁKÌKÐKÑKÑKÑKÏKÊKÆKÁKÃKÊKÏKËKÈK¾K¾KÇKÍKÃK¿KÎKÇKÍKÒKÐKÈKÆKÁKÆKÉKÄKÅKÉKÏKÑKËKËKÎKÈKÌKÏKÑKÓKÓKÒKÐKÔKÔKÓKÔKÑKÔKÔKÕK×KÖKØKÜKÛKÝKâKáKßKÚKÂK’K_KNKRKNKDKGKLKSKMKTKNKRKWKbKaKhKnKvK|KK‰KKK”K“K”K›KšK™KK™K—KœK›KœKœK™K—K™K™K™KœKžK—K‘KK‰K‚K|KqKdKXKSK;K3K.K-K,K.K*K#K,K0K3K?KVKYKbKlKvK{KKƒKˆKŽKK•K˜K—K“K“K‘K•KKŽK’KKŒKŒKŽKŒKKKKK‘K“KK’K’K”K—K—K›K™KœKœK“K†KkKKK8K,K.K-K%K*K%K'K)K'K,K1K-K:K3K:K;K>K7K:K:K9K7K7K7K5K0K/K*K)K$K.K-K1K2K;K>KKEKHKMKQKRKQKVKXK\KaKgKkKrK|K~K†K‹KŽK“KšK—K˜KžK™KœKœK—KžK™K—KšK˜K–K›KœKœKKªK™K‘KŠK‡K‚KyKmKfKYKTK>K0K(K*K&K'K,K+K(K2K1K>KFKPK]KeKrKwK}K€K†KˆKŠK“K–K“K”K’K’K“K‘KŽKKŽKK‹KŽK‘KKK•KŠK“KK‘K“K•K–K•K˜K™KœK›K–K‡KuKXK2K*K%K+K(K%K(K&K)K/K+K+K+K1K=K7K2K4K;K7KDK:K8K8K2K2K4K9K7K+K+K&K/K2K8KBK:K:K6K0K-K+K1K:K/K,K;K7K6K=K2K6K7KKPK]KkK{K†KŒK’K‘KŽKKŽK“K•K—K“KŒKŒK•KžK¤K¥K¤K¥K¤K¤K¤K¤K£e]qh(KYKYK[KQKSK\KYK\K`K\KYKaKZK^K]KZK^K_K\KWKVKXKXKZK]KfKoKtK}K„KŒK‘K’K—KžKžK¢K¦K¥K¦K¦K¦K¦K¨K¨K¬K§K©K¨K£K¢K KŸKœK™K–K“KŠK„K~K~KrKoKcK_KWKNKJKHKMKOKSKVKVK\K\K_K`K_KaKfKcK_KcKcK_K^KcKaKbKfKfKaKbKdKdKeKgKhKgKaKcKhKhKeKeKnKlKmKlKqKpKoKqKvKvKwKuKvKyKuKwKvKxKuKyKvK{KzKwKyKxKzK€KK‚K{KtKkKoKtKpKfKjKoKpKnKrKtKrKoKtKnKqKqKnKuKqKiKpKsKqKmKqKsKuKtKwKsKqKvKyK~KwKqK|K„K‡K‹KƒKˆK†K‡KxKzKˆKŠKK…K…KK•K”KK…K€K‘K“KK‰KKŠK˜K‹K…K‰K‹KŠK‡KƒK…KKŠK…K~K€K‹KŠK’K„K„K’K“KŒK‚KƒK”K•K…K‚K‡K§K³K¿KÈKÅKÆKÇKÄKÄKÃK·KÂKËKÅKÁK½KÃKÈKÆKÃKÃKÂKÇKÇKÅKÇKÃKÅKÅKÆKÇKÈKÎKËKÄK»KÆKÅKÂKÆKÍKÎKÊKÃKÅKÎKÐKÒKÑKÍKÏKÎKÌKÍKÐKÏKÍKÒKÑKÐKÐKÐKÎKÒKÑKÑKÑKÐKÔKÓKÖKÎKÏKÔKÖKÖKÔKØKÔKÔKÙKÜKÜKáKæKÜKÅK—KaKAK@KDKCKGKIKOKNKVKXK]KaKkKsK€K„K‰KŠKK‘K–K™K›KœK›KœK™K›KšK›K˜K•KK—KšK˜K¢K˜KšK”KŽK‹KˆK~KxKmKfK\KPKAK4K-K+K1K*K(K$K(K+K.K0K8KFKWKZKjKhKvK€KK„KŠKK•K”K–K’K–K“K’KK‘KK‹KK‹KŒKŽK‘K‘KKK”K˜K•K•K–K˜K›K›K—K“K‰KqKHK-K3K(K#K)K%K+K+K'K+K)K7K/K5K/K5KBK8K7K9K3K5K0K7K8K:K6K0K/K3K.K2K.K6K:K:K@KGK;K6K.K,K.K9K=K1K4K6K3K5K3K7K4K2K6K8K8K>KMK[KqKzK…K’KK‘KKŽKK‘K“K“KKŒKK’KšKžK¢K¥K¦K¤K¨K¤K¤K£K¢e]qi(KZKZKWKYKWK_K]K_KZK]K\KYK]K[KbK[KVKXK]KXKUKZK[K\KbKgKmKsK€K†K‰K‘K”K•K™KžKK¤K¦K¤K©K¥K¥K¦K¦KªK¥K¤K¨K£K¤K¢KŸKžK›K”KK‹K…K~K|KnKlKbKcKWKNKIKFKLKRKQKVKRKZK\K[K`K[K^KcKdK^KdKeKcKiKfKeKgKeKgKhKbKfKcKdKgKcKhK`KdKgKdKgKjKiKkKnKrKnKnKpKsKuKrKuKuKuKsKyKxKtKzKyKtKsKsKtKwKxKyKK’K‰KxKvKkKjKrKoKjKhKdKiKmKnKvKvKvKrKnKqKxKpKoKpKsKuKwKrKiKqKqKoKvKvKrKlKyK|KqKvKvKK‚K‚KK…K…K…KKyKzKKŒK‰KŽK‰K“K”KKŒKŠKˆK‹K“K–KˆK‚K‰K“K‹K|K„KKŽKK‚KKŠKŽKŠK|K|KˆKŽKK‰KK‘KKKuK€KK‡K{K„K’KŸK»KÂK¿KÀKÃKÅKÇKÃKÇKÇKÁK¹KÍKÊKÄK¼K¼KºKÃKÄKÂK¿K¿K¿K¿KÄKÄKÆKÅKËKÈKÉKÆKÈKÁK¾KÄKÉKÉKÉKÏKÓKÑKËKÁKÄKÌKÓKÓKÎKÏKÒKÍKÍKÍKÎKÒKÎKÑKÓKÎKÐKËKÎKÏKÐKÐKÎKÓKÒKÐKËKÉKÓKÔKÕKÔKÔKÑKÐKÔKØKÕKÝKäKãKÙKÃK}KLK=KBKK5K6K?KFKGKLKSKYKYKeKmKsK|KK„K…K‹K‘K—K—K—KšKšK›KšKœK™K™K™K›K˜K˜K™K˜KœKšK˜K“K’KŒKˆK€KvKqKeKZKOKFK3K/K,K(K+K+K%K&K&K-K)K/K5K:KLKXK^KiK{KyKKƒK…K‹KKK“K’K‘K•KK‹KŒKˆKŽK‰KŒKŒKK‹K’K•KK”K”K”K—K›K—KK“K‚KcK@K.K-K,K'K)K-K.K0K*K)K-K-K5K.K2K2K9K4K=K7K5K/K3K4K6KHK4K4K0K0K,K3K1K4K4KK6K>KCKKKXKaKnKvK€K€KˆK‡KK”K˜K™K•K•K˜K˜K—K™K˜KšK–KK—K›KŸKœK›K K—KKK„KK}KqKlKcKRKDK>K8K3K-K,K)K*K/K0K+K,K&K1K*K%K(K%K(K-K5KCKOKUKdK…K¦KÃKÃKÉKÒKÚKÚKÛKÙKÐKÃKªKœK¬K´KÇKÅK«K•K’K‰KrK\K6K,K+K'K'K+K/K-K*K:K4K5K3K/K7K6K0K8K:K6K7K2K1K.K8K4K8K9K=K8K4K/K9K0K1K;K8K4K6K,K-K.K7K7K5K=K4K1K7K2K2K5K1K.K'K(K-KKCKKKSK_KiKpK~K‚K…K‰K‘K‘KšK™K–K•K˜K•K˜KœK›K—K›KšK–K—KœK˜K›KœK—K”KŒK‡KK~KvKgK]KRK?K?K2K+K/K/K-K2K.K(K0K+K)K,K&K(K)K#K(K.K.K>KNKiK›K¿KÉKÉKÓKÙKÞKÚKÖKÕKÕKÏKÉK¿K¹KÇKÌKÓKÍKÁK©K“KKYK@K/K.K-K.K*K*K,K1K/K1K.K)K1K1K6K8K2K8K4K8K9K6K7K8K;KAKBK;K:K7K1KKCKBK8K4K2K2K;K8K5K:K3K.K0K-K-K3K6K2K6KKGKPK[KhKqK{KK‡KŠKK‘K–K˜K˜K—K™KŸK˜K›K›K—K›K›KšKœKžK˜K˜K›K›K’KŒK‹K}KKoKlKgKRKGK:K/K2K.K-K/K0K-K.K(K(K*K(K&K3K&K'K-KFK…K¼KÓKÕKÜKÝKÚKÕKÏKÄK»K¸KÁKÍKÐKÐKÑKÕKØKÙKÖKÙKÙKÚKÚKÒK¸KPK"K#K%K(K/K+K1K-K0K1K4K*K1K9K5K>KAK=K4K:K8K9K>K/K1K8K1K6K4K/K=K5K9K7K7K9KK5K.K+K'K%K)K-K-K?KXKiKyKŠK‘K”K™K–K•K‘KKK‹K‡KK’K™KœKœK¡K¢K§K¡KŸK¡KŸK¡K¡K K¢K KŸKžKKKŸKKK e]qw(KVKVKXKYKXKVK]K]KZKYKZKZKYK]K\K^K\KbK]KTKTKOKVKQKWKbKkKuKyK‚KŒK‹K’K™KšK›K¡K¢K¥K¦K¥K¥K¤K K¦K¤K¦K¤K£K¤K¡K¢KŸK¡K—K˜KKŒKŠKKxKsKgKaK\KPKFKBKCKJKPKOKRKSKVKZKZKUKXKXK[K`K^K]K_K_K^KaK\K`KbK`K_KaK^K_KdKbKeK_KfKiKmKhKfKhKeKkKgKjKjKoKiKkKmKlKoKlKpKpKjKiKeKbKdKcK‹K×KØKÉK¥K’K{KmKfK`KfKmKiKeKdKgKmKmKnKnKrKlKpKxK|KqKgKoKnKuKqKwKtKoKiKuK{KzKtKnKiKzKƒK|KK~KK‰KK„KˆK”KŽKŒK…K~K~K‚K‘K“KˆKKvK|KƒK‹K‡KŠK‡K‘KŠK}KxKnKoKyKzKpKrKnKsKqKvKŠK™KžK«K®K³K±K²K¬K¯KÁKÉKÁKºKºK±K¹K¬KªKªK«K°K²K¹KÁKÆK»K­K¯K°K¹K¹K»KÅKÃK¶KµKÁK¾K¼K½KÅKÇKÃKÄKÄKÄKÇKÈKÄK¹K»KÈKËKÅKÅKÀKÂKÄKÈKÆKÆKÂKÆKÎKÌKÉKÆKÃKÀK¿KÀKÂKÄKÍKÐKÎKÑKÍKÉKÊKÏKÐKÐKÎKÈKÃKÈKËKÌKÎKÏKÎKÏKÎKÑKÒKÐKÑKÎKÒKÒKÑKÔKÑKÒKÑKÓKÕKÖKÖKÞKäKÞKàKàKØKÃKxK?K=KHKLKYKaKnKvKK‰KˆK’K•K˜KšK–K—K™K˜KšKKšK›KšKšKœKœK—K—KœK•K–K–KKˆKKyKnKkKaKVKIK;K2K*K/K)K.K(K*K(K%K%K$K'K!K%K(K.KOK—KÀKÎKÙKÚKÜKÕKÐKÉK·K¶K¿KÇKÎKÐKÒKÕKÛKßKßKâKâKâKâKÚKÓKÔKÈKpK&K&K*K0K,K-K(K0K/K.K/K1K2K4K7K7K7K4K7K0K3KAKAKMK8K3K,K3K4K1K5K6K4K6K6K8K3K5K2K0K/K5K7KDK9KK4K6K5K1K6K2K=K7K>K9K7K:K;K2K3K3K3K0K7K4K;K5K0K0K3K/K8K7K4K9K3K7K5K0K-K'K)K*K.KDKWKoK€K‹K”K™K”K”K‘KK‘KŠK‹KŒKK”K™KŸK¦KŸK§K£K¨K¢K K¡K KŸKžKœK¡KžKK KœK›KžK KŸK e]qy(K_K_KWKYK\KYKXKVKYKVKYKZKZK\KZKYKXKZKYKVKUKVKUKXKWKaKhKsK}K…K…K‹K‘K—KœKžK¡K£K¤K¥K¢K£K£K¥K£K¢K£K¤K£K£K§K¦K¥KŸK™K”KKŽK‡K…KzKtKhK_KTKQKEKJKEKNKIKQKMKYKRKTKZKTK^K[K_K^K`K`KfKaKaKbK_K^KaK^KbK_K^KZKbK^KeKdKeKdKhKlKhKiKmKkKlKjKjKhKjKjKlKtKjKnKoKsKlKhKgKgKmKyKÃKÚKÌK¶K§KŽK}KoKhKdK]K`KjKfKjKkKiKrKqKhKoKtKkKkKmKxKuKrKsKvKtKuKuKyKsKwKsKyK€K{KsK|KKzK€KƒK„K‚K{K…K†K‡K†KK‚K„KŠKKxKtKzK†K†KˆKˆKˆKŠKK†K}KyK|K„KƒKvKmKdKkKiKpK‡K KŸK«K©K¤K¦K®K»K»K¾K·K¦K¶K¿K·K°KªK¬K±K²K³K´K´K³K°K°KžK¡K¬K¸K¹KºKÃKÁK½K·K¶K¿K¼K³K²K¹KÂKÃKÂKÆKÃKÀKÀKÂKÄKÆKÇKÀK»KÁKÆKÇK¾KÀKÄKÁKÅKÈKÅKÆKÂK¼K¾K»KÂKÄKÉKÈKÈKÉKÈKÌKÌKÏKÎKÐKÉKÉKËKÏKÏKÍKÎKÉKÊKÈKÍKÍKÏKÐKÏKÌKÌKÏKÐKÒKÐKÏKËKÊKÏKÒKÒKÏKÓKÓKÖKØKÙKàKâKãKßKØK¿KsKBKCKNKVKbKjKqK€K†K„KŽK–K“K•K—KšK˜K–K™KK›K™K—K˜K—KžKœK˜K–K–K–K•KŒK‰KKwKmKbK\KNKCK:K1K.K+K+K)K)K'K"K"K(K&K)K+K@KxK±KÇKÒKÚKßKÚKÑKÂK«K°KÀKÏKÓKÐK×KÝKàKâKâKãKàKäKäKåKçKèKçKæKãKßK¾KOK"K*K-K)K.K'K,K6K.K+K/K;K7KK6K2K-K+K*K=KCKKKgKuKƒKK™K˜K”K—K”K’KKŒKŠKŽK’K˜KKK¡K¢K¢K£K£K¤K K KKŸK KŸK K KžK¡KœK KŸKœKœKKŸe]q{(KWKWKUKXKWK]K[KYK]K]K_K]K^K_K]KbK_K_KZKXKXK[KTKYKZKbKmKtKzK„K†KKK–K™KK¡K¢K£K¨K£K¨K¨K¥K¥K¢K§K£K£K¦K¢K¤K¡KœKK›K‘KK„K‚K~KtKjK]KXKNKDKBKIKJKLKNKSKRKTKVKWK^KZK]K[KcK`KbKaKaKbK`KaKbKdKcKeKaKbKeKcKaKaKaKhK`KdKgKmKgKlKfKjKiKjKkKnKmKtKqKsKnKnKlKpKkKbKcKlK K×KÐKÃK­K«K KKrKfKgKhKbKkKdKdKkKmKrKoKrKkKcKlKvKvKtKrKlKrKvKuKwKrKuKwKxKyK}KqKqK{KŠKƒK}K~KƒKŠK†K€KzKxKwK…K†K~KsKeKrK‡K‡KKŒKŒKƒKŠK‡KK|KxK|K…K{KqKjKoKlKfKvKŽKšK¦K¦K®K°K¯K¶KµKºK»K»K­K¢K¢K¤K£K¬K´K°K²K²K·KµK¹K¬K¡K K©K±K°K±KµK¸K¼K±K·K¿KºK»K¼K»K¾KÂK¼KºK¾KÆK½KÂK½K¿KÉKÃK¿KÂKÄKÅKÃK¹K½KÇKÃK½K½KÂK»K¶K¸K¼KÁKÆKÈKÊKÍKÐKËKÉKÉKËKËKÉKËKÑKÐKÊKÉKÉKÄKÃKÇKÏKÎKÌKÇKÁKÇKËKÌKÍKÎKÏKÊKÍKÍKËKÍKÌKÊKÈKÉKËKÎKÏKÐKÐKÕKÙKÛKàKáKàKÝKÙK¾K€KMKGKKKTKjKrK€KƒKˆK‘K™K›K—KšK™KšKKšKžK™K™KŸKœK›K›K™K™K›KšK–K’KK…K~KwKkKhK^KUKDK0K3K,K+K$K)K!K%K%K'K(K2KaKœK¼KÎKÔKÛKÙKÓKÆK«KŸK°KÉKÑKÓKÙKÞKÙKÝKÜKÞKàKßKÞKÜKÝKÝKßKáKâKåKêKêKåKÛK K4K(K+K+K(K)K-K/K+K2K1K0K1K2K5K;K6K6K6K:K2K3K/K/K+K0K2K/K9K.K6K3K/K2K/K,K/K.K:K4K;K?K5K:K-K5K-K0K/K3K2KEKWKqK…KŒK•K›K™K™K“K’K‘K…KŒK’K’K˜K›KžK¡KŸKžK¢KŸK¡K K¢K KŸK¥K¡K K K K KŸKŸK KKšK KKe]q|(KZKZKUKXK[KXKUK[KZKZKYKZKZK_K^KZK`K`K_KUKZKZKZKXK]KcKiKwK~KK…KŠK”K–K™KœK¢K¥K¢K¥K©K¤K©K¨K¨K¨K§K¥K£K¦K£K K¡KKK˜K”KŒK…K„K|KuKeKYKVKMKJKBKDKIKGKKKPKOKPKYKZK]K_K_K[K^K^KfKbKbKhKcKbKcKiKbKaKcK`KcKcKdKhKcK_KfKeKfKiKcKlKiKgKiKiKjKiKqKsKrKoKoKpKhKjKiKeKhK{K¬KÕKÒKÀK²K§K†KwKjKcKjKaKdKeK`KiKaKiKpKlKeKgKjKoKwK~KsKnKlKnKxKvKnKkKyK|KxKkKpKnK€KK~K„KƒKƒKKK|KuKnK|KƒK€KqKkK]KuKK†KK‰KŒKK„K‰KƒK|K€K‚K€KzKrKoKnKlKjKxK•K¡K£KŸK§K¬KºK¸K´K´K¶K³K­K©K¡KŸK¢K§K£K¦K³K¸KÁK¿K´K¦K¢K­K®K²K·KºK´K«K²KºK´K´KºK¾K½K¿K½K¸K¿KÀKÀKºKÄKÄKÂK¹KµK¿KÉKÅKÂKÂKÅKÆKÀK¾K¾KÄKÁKºK·K¶K¶KÁKÅKËKËKÇKÉKÉKÎKÌKÈKÉKÊKÍKÊKËKÎKÊKÈKÈKÉKÇKÅKÂKÇKÉKËKÆKÄKÆKÇKÏKÌKÐKÏKËKÊKÎKÊKÌKÐKÍKÌKËKËKÓKÓKÐKÎKÎKÕKØKÝKâKáKÞKÝKÓK²KiKLKKKVKhKpK|K‚K‹KK‹K’KšK¡KK–K˜KKœKšKžKœK˜K›K™K˜K™K™K–K™KKŒK…KK{KoKiKZKHK8K+K'K'K(K!K$K#K%K$K.KDKlK²KÇKÑKÖK×KÖKÍK»K¡KK¸KÏKÓKÓKÙKÚKÜKÛKØKÚKÚKÚKÛKÙKØKÜKßKÝKÞKàKâKêKéKçKßK¶KBK'K+K*K-K,K*K)K-K5K.K/K1K6K2K3K9KK3K4K2K.K2K-K0K-K+K+K,K/K)K*K6K2K,K8K5K1K3K0K3K7K*K*K%K"K&K9K7KJKgKK‘K™K™K™K›K“KK‘KKK‘KšKšK¡K K K¢K£KŸKŸKžK KKžKŸKžKŸKŸKœKKœK KKŸKKžKšKœKœKšK›KœK¢KœK›e]q…(K_K_K_KeK^KcK`KbKcKcK^KdKeKcKbKeK`KeKcK_KaK^K^K]K\KdKpKqK|KƒKKK’K“K˜KŸK K¤K¨K¨K¨K¨K¨K¦K¨K§K¤K¥K¦K¤K¤K¨K£K¢KŸKKšK•K‹K…KzKpKkKeKXKTKVKIKNKPKTKTKQKXKYK\KaK]K`K\KbKcKfKeKdKgKhKdKgKaKaKcKnKeKhKaKgKfKeKgKgKfKkKkKfKhKiKmKoKpKnKmKoKqKqKrKqKpKlKkKeKfKqK¶KÙKÔKÈKÇK«KK†KwKuKhKcKdKfKiKdK^KlKkKiKgKjKfKZK`KkKuKxKzKyKtKvKoKwK~K„K}KvKzK~KKzKvKtKqK{KƒK~KzKxKmK{K„K…K‚K…K„KK–KŒK‚K‚K†K‚KKnKaKbKiKK“KšK–K•KœK«K²K®K§K¤K—KK¡K¢KŸK›K£K¤KµKºK¬KKKœK¡K¬K«K´K¾K·K¶K±K³K¶K¯K·KµK®K²KµK»K¹K³K¶KÁKÀK³K¸KºK·K¹K°K¯KºKÀKµK²K·KÁKµKªK¨KK«K½K»K´K¾KÃK½K´K¶KÀKÂKÀKÂKÇKËKÆKÃKÄKÇKÃKÃKÃKÀK¿KÅKÅKÅKÄKÅKÈKÄKÀKÁKÈKÉKÌKËKÈKÇKÀK¿K¶KºK¶K¥K¯KÁKÉKÊKÅKÆKÃKÄKÌKÌKÌKÊKËKÌKÌKËKÆKÍKÎKËKÍKÍKËKÐKÍKÌKÍKÒKÔK×K×KÔKØKÖKÍKÀKžK†K„K†K”K•KšK–K˜KœK–KšKK™K›K›KšK˜K™K•K‘K“KK„K|KwKfKZKPKDKKKaK”K¸KÉKÒKÜKØKÏKÁK¸K°K«KªK½KËKÏKÔK×KÜKØKÖKÔKÐKÓKÒKÏKÑKÍKÏKÊKÈKÊKÊKÍKÌKÍKËKÍKÐKÓKÕK×KÛKÝKÝKáKâKâKàKÓK†K(K(K%K)K)K.K6K6K6K3K8K7K7K:K8K)K2K2K2K*K.K,K.K,K,K)K*K,K-K1K5K2K8K2K6K3K1K/K(K$K!K!K%K-KGKZKwKŽK—K™KœK˜K•K–K‘K“KKK–K›KŸK K¡K¢K¤K¢KŸKŸKžKK¢KKKžKœKKK K¡KK¢K›K›K›KœK›K—K•K˜KšKœKŸK›e]q†(K^K^KeK`KcKcKaK^KbKbKfKlKcKbKbKbKcKaKbK\KaK`K`K^K_KgKlKrK~K„K‚K‘K‘K—K›KKžK¢K©K§K¦K¥K¥K¦KªKªKªK¥K¥K§K¨K¨K¥K¤KœK–K›K”KKˆKzKoKlKcKWKTKNKJKLKPKMKSKQKZK[K_KaK\K\KbKbKdKfKbKaKaKjKgKcKgKbKfKjKgKqKgKhKgKdKeKeKfKgKmKmKkKlKkKpKmKlKpKqKqKoKuKrKpKiKgKeKcKwKÎKÛKËKÆKÉKžK”K}KKgKnK[KbKaK_KcKcKgKeKjKhKcK^KiKlKqKuK}KwKrKqKwKzKrKqKtKsKzK~K|KwKiKgKpKzKyK{KKyKwK{KƒK†KˆKƒK„K…K~KKK{KƒKKzKnK\K[KpK}KšKŸK£K K”K˜KŸK±KµK«KžKŽKK”K›KœKŸK¨K´K»K­KK‘KK¤K¬K®K²K¬K³K¾KºK°K³K®K®K·KºK¼K·KºK©K´K³K³K¸KÀK¿K·KµKµK¼K¹K²K®K¸K¾K¸K¯K±K´KK£K©K±K¾KÃK½K½KÀK¾K¹KµK¶K¸KÀKÃKÂKÃKÄKÄKÂKÇKÆKÈKÆKÄKÄKÂKÃKÅKÁKÅKÈKÇKÃK½KÃKËKÉKÍKËKÇK¿K¶K©K™K§K­KÀKÆKÊKÊKÈKÉKÅKÅKÇKÍKÌKÉKÉKÊKËKÉKÉKÈKÊKÊKÌKÍKÌKËKËKÈKÌKÍKÏKÕKÔKÕKÕKØKÙKÖKÂK¡K‰K…KK“K–K“K˜K–K™K™K—K›KšKœK˜K–K–K•K”KŒKŒK„KxKqKdKYKVK[KwK¥KÁKÓKÝKÕKÏKÊK¿K°KµK®K®KÀKÌKÑKÖK×KÕKÖKÖKÒKÏKÏKÒKÓKÐKÎKÌKÌKÉKÉKÈKÊKÊKËKÌKÌKÐKÔKÕKÖK×KÚKÛKßKãKâKáKßKÑK|K'K0K(K-K2K2K,K1K1K3K4K6KK2K6K8K>K0K(K K#K K$K)KAKUKkK†KK•KœKšK–K–K“K‘KKK’K›KK¡KŸK¢K£K¢K¢K KK KŸKKœKŸKžKœKŸKKœKžKKšK›KžKŸKžKŸK™K™K›KœK›K›K¡e]q‡(KcKcKeK`KcKbK]KeKdKcKcKcKdKcK`KeKaKaK`K^KeK]KXK\KbKfKjKrK~K„K‡KŒK‘K•KœK™K¤K¤K¢K¨K¤K¨K¤K©K¥K¥K©KªKªK¬K©K§K©K¤KžK›K—KKŠKKyKsKkK`KXKUKOKJKOKQKQKPKVKXKZKbKaK^K]KeKdKhKfKbKdKfKhKlKiKlKdKgKhKeKmKhKjKgKaKfKeKjKfKmKnKnKrKpKkKmKoKoKtKvKqKtKtKoKmKjKdKdKƒKÐKØKÊKÐKÃK–K’K„KxKrKlKeK]K\KaKdKaKbKgKdKaK^KlKpKnKqKrKyKuKlKrKK}KsKnKnK~K…K€KvKoKfKqK€K}K{KxKxK|K‚K„K{KKŒK‚K„K}KzKuK}K{K„KuKgK\K_KdK{K”KœK¨K¢K¢KžKKšK K­K¦KžK”K•K’KKœK°K¶K®K£KK“KŸK­K­K¬K®K®K«K«K°K»K¸K®K°K­KºK½K¿KºK±K¨KªK°K²KµK³K¾K½K±K®K°K¶K¸K´K±K¹K¿K¬KšK K©K­KµK°K·K»KÁKÃK¿K¾K¶K¹K´KµK¹KÁKÀK¾K¼KÃKÆKÇKÅKÂKÅKÀKÅKÃKÇKÄKÄKÆKÄKÊKÇKÃKÂKÊKËKËKÍKÉKºK K™K›K³KÅKÅKÉKÌKÆKÊKÉKÆKÉKÈKËKÌKÉKÅKÉKÊKËKÊKÈKÇKÉKÊKÊKÉKÇKÉKÈKÍKËKÌKÑKÔKÖKÒKÕKÚKÚKÕKÁKKˆKK‘K—K—K™K˜K˜K˜K™K˜KšK”K˜KšK”K”KK‰K‰K~KtKqK`KdKrKKºKÍK×KÛKÕKËK¿K²KÀK¹K±K³KÅKÊKÐK×KÙKÖKÓKÓKÓKÐKÏKÏKÎKÍKÍKËKÊKËKÉKÅKÈKÈKËKËKËKÏKÐKÔKÖKÙKØKÚKÛKÞKáKãKßKÝKÍKmK'K,K)K*K1K1K2K3K4K4K3K5K4KFK1K1K0K3K3K0K.K-K4K.K7K-K1K7K9K7K8K9K6K9K;K4K0K,K#K KK%K1KLKcKKK–K—K›K˜K–K”KŒKKKK•KšKŸK¢KŸK K KŸK¡KK KKœK›KžK›KšKŸKžK›KžKšK™K›KžKKKœKK›KšKKšKšK™Kœe]qˆ(KcKcKaKcKdKdKeKcKdKcKdKbKgKfKbKdKaK`KaK`K_KbKaK^KbKfKjKoKyKƒK‰K‹K—K”KšKK¢K¢K¤K§K¨K¥K§K¨KªK©K§K£K¨K©KªK¨K¥K£KŸK›K”K’KK„K{KrKjK`KXKOKNKKKOKPKRKQK[KTKVKZK`K^KaK_KdK_KdKbKfKfKeKhKcKfKiKjKfKkKiKkKdKfKcKcKdKdKhKhKiKrKiKmKlKqKlKtKoKlKwKoKsKqKjKjKhKgKKÕKÒKÐKÎK±K“KKwKwKoKfKeK]K]KcK^KdKhKhKaKeKnKnKnKpKrKoKqKnKqKvKrKjK`KkKxK„KuKqKeKjK{K~K€KKvKvK~K€KƒK|KvKK†KK{KsKqKzK€K…K|KmKfK`KpK}KK˜K˜KŸK¦KŸK§K¦K–KKKŸKKœKK‘K—K¢K´K²K˜K’K—K¢K³K­K·K®K«KªKªK«KªK°K¶KºKµKµK¶K¼K¿K¿K²K©K­K¬K¯K¶K±K±K¿KºKµK­K´K·KµK¶K®K­K£K˜K¨K°K·K´KµK²K°K¶K¿KÁK¾K¸K¼K¼KµK±K¾KÂK¾KÀKÁKÄKÅKÃKÆKÄK¿K»KÇKÈKÊKÈKÅKÃKÅKÇKËKÃKÇKËKÉKÆKÄK®K˜KŸK©K·KÄKÃKÃKÈKÇKÊKÈKÇKÉKÊKÍKÌKÈKÆKÉKÉKÌKÊKËKÊKÈKÇKÇKÄKÂKÇKÆKÌKÊKÊKÌKÐKÑKÔKÓKÙK×KÙKÎK»K—KK‘K•K™K›K›K™K–K˜K™K˜K–K”K˜K”KŒKŒK†K†KKvKpKwK’K¶KÇKÓKÝK×KÏKÈK¾K¶K½K¿K²K¼KÆKÎKÑKÖKÕKÔKÕKÒKÒKÑKÐKËKÊKÍKÏKÌKÊKÉKÉKÈKÈKÉKÍKÍKÊKÎKÏKÐKÒKÕKÙKÛKÛKÜKÝKàKáKÞKÝKÊKZK-K/K*K'K&K-K.K1K.K/K.K.K4K>K6K8KBK7K6K5K0K2K3K/K.K5K1K3K1K2K-K7K4K1K8K4K,K'K"K K"K,K:K]KvK„K‘KKœKœK™K–K”KKK“KšKšKKžK¡K¡K¡K¢KŸK¡K¡KŸK›KžKŸK¤K›K™KŸKK›KŸKœKKœKKšK™KœKžKžKžK˜KK˜KžK›e]q‰(KbKbKaKcKcKdKbKjK`KeKfKcKdKcKdKjKcKdKjK_K]K]K`KcK\KeKjKqK~K€KˆKŒK“KšKšKKŸK¡K¥K¤K§K¨K§K¦K¦K¨K§K¤K¥K«K¦K«K¥K¤KžKK–K•KKK}KtKjKbK[KVKLKJKRKOKTKPKVKSKTKWK`K^K_K\KeKbKdKbKdKbKbKeKbKgKeKcKiKkKkKbKjKeKeKdKcKdKeKhKoKrKkKlKmKoKoKpKqKmKsKqKqKrKmKdKfKiK«KßKÓKÕKÈK¶K¡K{KuKtKsKtKfK`KbKXK[KcK`KdKiKrKnKqKuKsKkKfKjKqKyKkK`K\KgK|KvKtKuKhKiKwK|KKK}K~K‚KKKƒK|KK…KwKqKmKpKxKyK{KwKjKbKfKoK‚K˜K›K”K›K™K£K¤KŸK K’KKŽKK˜K›K–K–K£K§K–K“KK™K¨KªK´K³K¯K¬K­K©K¯K­K¯K«K©K¿K¿KºK¯KªKºK¿K·K®K²K­K§K°KµK´K°K¸K¼K±K¶K³KµK²K©K”KžK±K¶K·K¼K¹K¶K´K³K±K¹KÀKÀK¾K¼KºK¹KµK»K¼K½KÄK¾K¼KÁKÃKÅKÃKÁK½KÃKÈKÌKÊKÅKÅKÂKÆKÍKÄKÆKÊKÅK°K©KšK§K±K¹K»K»K¾KÃKÄKÆKÆKÉKÊKÉKÉKËKÍKÉKÉKÊKÌKËKÊKÊKÈKÈKÅKÅKÅKÄKÄKÂKÈKÈKÌKÉKÌKÏKÑKÑKÔKÓKÕKÓKËKªKK’K˜K›KœK›KšK™K˜K—K‘K–K—K•KKK‹K„KƒKK„K“K¬KÄKÑKÙKÛKÚKÌKÄK·K¿K¼K½K¶KÀKÉKÎKÔKÖKÖKÑKÒKÓKÑKÐKÐKÍKÉKÊKÌKÎKÌKÈKÅKÉKÇKËKÌKÌKËKÎKÒKÐKÐKÓKÖKØKÚKÛKÜKÞKàKáKÜKßKºK?K(K1K-K*K+K,K3K2K-K2K1K3K6K4K7K7K4K8K8K2K/K,K+K*K-K-K0K5KK@KEKKK]KkK€KŽK–K–K˜K–K•K‘K’K‘K‘K”K˜KšKžKK£K¡K¡K KŸKžK K KŸKŸK¡KŸK›KœKœKKK›KšK KžKKšKK—KœKKšK›K›K™KœK—K—K—KšKžK™K˜e]q”(KbKbKcKaKgKbKeKjKjKhKcKaK^K]KcKeKaKbK`KaKfKbKaKhKgKmKpKzKK‚K‹KK“KšKšKŸK¡K£K¦K©KªK­K¬K¬K®K­K®KªK¯K«K­K®K¬KªK¤K KK“K‘K†K}KuKnKhK[KTKPKJKGKKKNKOKTKTKTK\KZK]K\KbK^KcKbKaKbK`KfKfKgKaKeKeK`KbKbKdKbKcKaKgKfKbKcKdKjKfKhKdKiKgKjKiKhKjKeKfKbKWKYKdK§KÛKÜKÑKÆK´KµK§K–KKKƒK€KzKtKjKoKdKbKjKnKiKoKkKlKeK`KcKlKnKkKpKmKrKuKtKtKsKpKsK{KxKuKoKlKxK}K{K…KK~K}KyKsKmKjKwK‰K™K‘KK”K•K—K•KK–K’KKˆK†KŒKŽK’K›KšK{K{K~K“K–KœK•K‘KKžK£K£K§K¡K¨K§K KšK¥K§K¡KœK¡K™KžK«K°K±K·K²K²K¶KªKŸK K°K®K¯KœK‘KŸK¢K¬K³K®K±K²K¶K¯K¥K¦K¦K®K¹K·K³K²K´K¬K­K°K­K°K¯K®K¤K©K¹K·K²K«K­K½KÂKÁKÁK¿KÅKÀK¹K±K¬K°K¯K´K³K®K±K³K´K¶K¾KÀK¾KÀK½KÀK¹KµKºK½K¼KÀK¿KÀKÂKÄK»K·KÄKÁKÀK½K»K¶K¹K¼K¼KºK´KµK·K¹KºK¼K¾KºK½KºK±KºK¾K½K¶K¹K¸K°K³K¿K½K·K¸K»K¾KºK½KÂKÆK¾K¿KÇKÍKÏKÑKÉKÆK¹K¼K¾KÄKÂK¹K¾KÃKÅKÊKÏKÏKÒKÍKÌKÍKÍKÎKÍKËKÉKÉKÊKËKÊKÈKËKÃKÅKÅKÈKÈKÇKÉKÌKÎKÍKÎKÎKÌKÉKÌKËKÐKÇKÃKÂKÃKÄKÎKÔKÒKÔKÓKÓKÕKÝKÞKÞKÜKàKáKÞKÃKKK%K$K,K,K)K(K2K-K'K,K0K2K7KGK7K?K8K/K2K3K)K(K)K4K0K.K,K-K1K7K2K8KBK?KEKNKIKXKlKxKƒKK–K™K™K•K“K‘KKK“K˜K›KKœK¡K K¢K£K¢K¡K¢K£KŸK¢KKœK KŸKžKœK›KKžKžK›KžKžKžKK›K™KKKœKK›K—KžK˜KšKšKKK™e]q•(KdKdKaKeKdKeKcKgKhK`KeKaKaKaKeK_KlKeKcKcKbK_KeKgKiKnKtKvKKƒKˆKK•KŸK›KžK¡KªK¦K¬K«K«K¬KªK°K¯K³K®K­K¯K¯K«K¨K«K£KŸKšK”K‘KˆK~KuKqKbKXKRKLKMKPKLKSKOKWKXK[KZK]K_KaKaKYK^KeKcKgKdKgKgKaKaKeKaKdKbKdK_K_KdKeKgK_K^KfKbKaKaKcKjKfKlKsKgKkKgKeKaK^K^KZKlK¾KÜK×KÑKÀKµK·KžKŸK”KŒK‰KKwKpKmKnKcKfKjKsKrKmKhKnKbKdKhKhKlKpKiKpKrKsKuKuK{KyKtKrKwKsKpK{KwKzK}K„K{KzKtKkKjKfKvKŒK•KšK”KKK—K™K–K—KKKKKŽK“KžK…KyKwKvK‹K“KK“K—K™K™K•KK›KªK´KªK™K™K“K¡K K¢K¡KžK¢K¨K¡K¢K¯K¸K¹K¸K´K¼K·K¡K K£K¦KŸK•K™K¬K·K±K¶K±K©K¬K±K³K°K­K§K¦K¥K¶K¹K±K­K¯K¬K³K²K¬K©K©K¨K¦K°K¸K¶K°K®K²KÂKÆKÅKºK¹K±K¬K±K¶K·K¸K¸KµK¶K¶K¶K³K±K¹KÁK¼KÀK¼KÀK»K»K¹K¸K»K»KÁKÁK¾KÁK»K»KºKÁKÀKÀKºK°K²K¶KºKµK·K¸K¹K·K¹KºKºK¶K¼K»K´KµK»KºK±K²K´K­K®K±K·K·K´K¯K»K¹K¹KºKÁKÇKÅKÊKÎKÍKÇKÂK¸K¼KÇKÃKÀKÁKÈKÇKÈKËKÐKÒKÏKÍKËKÈKÌKÌKÊKËKÊKÊKÇKÆKÉKÈKÆKÇKÄKÅKÉKÊKÉKÊKÉKÉKÎKËKÌKÏKËKÊKÍKÌKÉKÃKÂKÀKÇKÈKÏKÑKÑKÑKÑKÒKÔKÛKÝKÞKßKàKßKÛK©K4K(K,K%K+K0K*K,K3K6K1K,K,K2K9K2K1K6K/K5K.K'K.K)K,K7K4KKHK\KhKvKˆK‘K™K˜K—KšK–K‘K“KK“K–K™KžKžKŸK¡K£K¡K¦K¢K¢K KžK KžKKžKœKŸK KœKœKœKK›KœKŸKžK›KœKžKœK›KK›K›KœKšKœK˜KžKœKKœKe]q—(KjKjKdK`KeKcKbKdKcK^KVK_KbK_KhK_KeKaKdKfKfKgKcKeKjKkKpKvKzK€KŠKŽK‘K›K™K£K£K¥K¨K«K¯K«K«K¬K¯K²K¯K­K­K¬K­KªK«K¨K¦K KœK”KK…K{KrKsK`K[KSKRKJKGKKKSKOKTKWKZKYK`KbK[K]K^KcK_K^KbKeKbKdK_KdKdKbKaK`KeKcKdKcKaKbK`KjK_KhKbKfKdKkKmKkKhKiKeKeKgKaK[KZK`KvKÎKâKÓKÌKÇK¬KµK¼KšKK“K›K~K}K…K|KnK{KƒKtKkKnKiKbKhKrKcKcKkKnKjKuKlKqKsKvKqKqKlKsK|KzK…KwKyKzK~KK€KyKxKmKfKsKK”KKK‘K—K›KKˆKKŠK…K‹KKKžK˜KŠK{KxK{K‰K’KKŽK”K”KK“K—KœKžK’K’K£K£KšK–K”K–KœK£K K§K¤K©K°K³KªKžK¥K«K®K³K­KªK¡KK•K¥K¥K§K¤K²K¼KµK¶KªK©K¥K¨K²K°K³K´K«K¨K¤K­K³K±K®K¯K²K¯K®K¥K¬K¨K¦K¤K¨K´K¸K»K³K®K­K°K·K¸K¶KµKµKµK¶K³KµK·K¼K½K²K°K²K²K³K¹K½K½K¿KÅKÂK»K·K¸K½K¿K¿K¾K½K¿KºK¶K±K±K·K¼K·K¬K§K°K²K°KºK»K¼K¹K¹K´K®K¸K·K¶K°K¸K¸K±K©K©K¤K¥K¨K±K³K³KºKÄKÂKÍKÍKÊKÄK´KªK«K»KÇKÉKÃKÂKÃKÃKÊKÏKÒKÓKÓKÒKÏKÌKËKÅKÂKÅKÉKÉKÉKÈKÈKÈKÈKÈKÇKÇKÇKÇKÃKÇKÊKÊKËKÉKÏKÌKÉKÍKÊKÍKÎKÍKÄK¼K·K¼K¾KÅKËKÌKÏKÓKÑKÑKÔKÔKÞKÜKÛKÝKÙKÙKËKiK)K(K&K$K2K+K.K.K4K/K.K+K4K5K4K/K.K,K.K*K0K'K>K1K0K1K7K8K>K7K3K:K.K6K:KPKNKiKuKˆKK”K›K—K˜KK”KKK’K–KœKKŸK KžK¤K¢K K KžK K KŸK K KKŸKžK›KŸKœK›KžKKKœK¡KœKšKžKœKžK›KŸKšK™KœKœKœKœK™K›KK™Kše]q˜(KjKjKgK`K_KbK`KeK`KcKcKdKaKaKbKaKaKaKaKaKcK`KcKeKkKfKqKrK|K~KˆKK“K›K›K¡K¡K©K¬K­K¬K¬K®K®K°K°K­K¬K¬K«K°K­K¬K¨K§KŸKšK–KK‡KKuKoKcKYKQKJKGKMKKKNKLKWK[K[KVKZK_KbKcK]KfKfKdK_K^KdKdK_KeKdK_KbKcKdKeKbKcK_K_KhK`KfKeKaKeKeKlKpKkKgKjKeKfKkKaK]KXK^K{KÕKÛKÕKÍK»K³KÃK¸KK¡K”K“K‚KŠKƒKxKwK„K‚KvKsKlKpKoKkKmKgKhKnKkKkKhKoKsK|KnKhKcKjKrKzKyKtKtKzKzKK€KxKyKrKhKpKKK’K”K”K”K“K™K–KKK†KˆKKKK¨KˆKzK…KKˆK“KœK–KKK”K“KK˜K˜K¨KšK€K…K—K KšK“K’K™K«K¨K¬K­K¬K°K­K³K§K¥KžK K¨K«K K–K˜K¥K³K²KªK£K¥K±K³K¯KªK©K©K«K¬K°K±K³K³K©K©K§K£K´K¹K®K°K¬K®K°K±K©K¦KªK¬K©KªKºK´K­K¬K®K±K¹KÀK¼K¹K´K´K¯K±K±K¹KºK¯K©K²K·KµK¸KºK·K¼KÁKÂK¾KºK»K¶KºK¿KÁK»K¹K¸KµK´K±K¬K²K´K²KªKªK­K²K¸K¸K¹KºK¸K¼K®K«KµK³K¬K²K³K¬K¥K¢K§K£K£K«K»KÀKÇKÑKÌKÃK¾K·K®K¦K©K¹KÃKÆKÈKÁKÅKÆKÌKÑKÒKÔKÔKÐKÏKÎKÍKÈK½K¿KÆKÇKÈKÈKÇKÈKÈKÊKÈKÇKÄKÆKÆKÄKÉKÌKÉKÈKÉKÎKÍKËKËKÍKÌKÐKÉKºK·K°K´K¹KÃKÇKÊKÎKÏKÒKÑKÓKÔKØKÚKÛKÙKØKÚK¶KBK)K%K K&K,K/K0K)K+K1K.K2K-K1K3K7K1K6K-K'K)K.K-K,K,K4K:K7K5K6K+K4K(K/KK2K8K+K+K%K0K@KWKqKKK”KšKK™K’K’KK‘K“K—KK›KŸK£K¡K K K K¢K¡K¤KŸKK¢K£K¡KŸK¡KŸK¢KŸKžK KšKžKžKŸKKŸK›K¡KžK KŸKœK›KžKKžKœKœKKŸKžK¢K¡Kše]q›(KcKcK_K`KcKcK_KgK_KmKjKcKaKaK`KbKcK`KeK`K[KaKaKaKbKeKhKsK~K€KŽKK‘K—KK K¤K¥K¦K¦K­K­K­KªK±K­KªK­K­K­K¯K«K­K§K¢K¤K—K•KK‰K‚KxKtKcKYKPKLKNKJKLKOKNKSKWK[K[KcK`KbKaK_K`KcKdKdKaKeKeK`KcKbKcKgKcKaKcK_KdK`KfKaKcKdKbKdKgKiKeKcKhKfKkKeKeKhKcK_K[K`K‘KßKÚKÎKÈKÂK±K³K±KKŒKžKŒK…K‹KKˆKsKyK“K…K{KuKmKjKgKdKhKhKgKhKxKqKjKkKnKrKqKjKrKwK}KsKwKqKlKjKqKoKhKyK„KˆK”K‘KˆKŽK˜K•K‘K”KK‚KˆKK”K˜K’K‹KuKlKuKˆKŒKK”K™K˜K”K”K“KŸK™K™K™K•KŠKƒKK‹K’KŒKK”K¢K­K¦K›K K³K´KµK¨K¢K§K£K™K‹KKžK¬K²KµK³K¯K²K²K¬K©K«K K™K£K¬KµK´K®KªK®K±K²K³K¸K´K­K§K¯KªK¯K²K­K°K«K©K¬K¢KœK¦K¯K¯K­K¦K®K´K¹KµKµK²K®K´K§K¤K±K´K·K¸K»K¹K¶K³K­K¬K¶KºK¼K¾KÀK½KÀK¾K¼KµKµK¬K°K²K·K·K´K¹KºK¸K³K¯K¨K®K¸K­K£K²K¸K¶K´K¸KµK¬K¤K¯K§K—KšK¡K¡K´K¿KÍKÎKÍKÂK¹KµK«KªK¹K¹KÄKÅKÅKÇKÄKÉKÌKÑKÐKÍKÎKÏKÌKÌKËKÍKÈKÆKÄKÃKÃKÇKÉKÈKÈKÈKÉKÈKÈKÈKÇKÈKÉKÈKÇKÊKËKÌKËKÉKÍKÍKÉKÌKËKÏKÒKÆK¶K¦K¤K¥K§K¥KªK³K»KÁKÍKÑKËKÈKÊKÑKÓKÛK×KÔKØKºK>K-K*K*K&K*K*K.K/K-K0K+K4K4K2K1K:KAK:K2K&K+K/K7K,K2K3K5K7K=K8K4K1K*K)K6KHKaKyK†K”KŸKK˜K“K”KKK“K—K™KœKžK¡K K KœK K¢K KžK¢K¡KŸK KŸK K¥K¡K KŸK¢K K KKŸKžKœKžK¡K¢KKœKKKŸKŸKžKžKKšKKŸKK K KœKše]qœ(K_K_KcKcKgKeKaKcK\KaK]K`KaKaK\KfKeK_KgKcK]K_K^KfKdKfKqKuKzKƒKŒK‘KK™K›KŸK£K¦K©K«K«KªK«K«K¯K¬K®K«K«KªK­K­KªK¦K¤K£K›K“KKˆK~KyKsKaK\KVKLKLKMKPKSKVKYK\KXK]K^K^K[K^K`KaKeKcKdKbKeKhKeK`K_KaKfKeKcKcKaK`KcKcK]KcKcKaKgKiKgKfKiKhKkKkKgKjKdK`K_KWK_K™KàKÛKÒKÊK²KÀK¹K K›K¨K¢K…KƒK”K‘K‹KzKK…K€K‚KzKiKjKsKdKeKlKiKhKlKlKfKlKtKuKlKfKqK€KƒK€KsKeK`KeKfKnKxKŠK”KŽKˆKKŽKŽK“K˜KŠKˆKKŒK†K“KœKžKyKmKqKvK‚K‘K•K’K”KK’K›KšK›K—KœKœK”K‘KŠK‡K‡KKKK—K“K™K K«KœK˜KŸK­K±K¯KžKžK•K“K”KœK¦K³K¯K±K·KµK°K¯K«K¥K¦K¨K¤KžK£K³K³K°K¯K§K¬K²K­K¶KµK³K¬K¦K¤K¥K›K£K¬K­K±K²K¨K KžK¡K¬K­K­K±K®K³K·K¶K§K¢KªK°K°K±K°K²K·K¼K»K¸K¹K¶K«K²K»K¼K¿K¼K¼K»K»K¸K³K°K®K°K­K°K´K´K³KµK¶K¼K·K¬K¥K±K¸K§K¢K·K´K²K³K®K©K›K£K¡K˜K›K²KÂKÊKÏKÐKÄK¾KµK«K¯K¼K¾KºKºKÃKÄKÅKÈKËKÐKÎKÏKÏKÊKÌKÌKËKÊKÌKÇKÆKÅKÄKÅKÃKÇKÆKÈKÆKÈKÈKÈKÈKÂKÆKÉKÇKÉKËKÈKÊKÎKÌKÊKËKÊKÎKÎKÊKÏKÊK¸K®K¢KžKœK¥K¡K¨K³KºKÅKÍKÆKÀKÀKÅKÍKÔKÕKÒKÕKÓK•K*K.K-K%K)K/K,K,K-K/K1K9K1K0K,K/K6K1K.K5K3K+K.K.K2K-K1K0K2K1K5K4K1K0K-K>KRKmK€KK•KœK˜K˜K•K‘K‘KŒK”K–KKŸK K¢K K KŸK¤K¡K£K¢K¤KŸKŸK›K¡KŸK¢K¢K¡K›K£KKKKŸKžK›KKœK KKœKŸKžKžK›KžK¢K¡KŸKœKžKKœK¢KK™e]q(K_K_KfKeKcKhK^KbK_KaKdKbKcKbKbKeKeK_KiKbKbKbK_KaKeKhKrKuK{K~K‰KK‘K˜K£K K¦K§K§KªK¯K­K¯K¬K¬K°K¬K«K¯K¬K²K¬K©K«K¢KK˜K•KKŠK}KvKrKdK[KVKSKRKOKQKTKUK_K\K[K]K^K^K_K[KdK]KfKfKgKeKfKdKbKfK_K`KeKbKeKgKaKaKgKeKcKeKbKbKhKhKhKgKeKjKiKiKcKeKcKbK^KZK^K”KÚKÛKÒKÂK·K½K´KKªK·K¥K—K“KŽKŒK–KŽKKK~K€KvKiKpKnKiKnKoKhKdKcKcKeKuKtKsKlKoKwKK~KsKlKfKbKeKdKqKˆK–K—K’K„KˆK“K‘K‰K“KŽK…KK•K’KK’KŽKsKnK|K„K‡K‘K”K—KKKK–K›K—K›K™K˜K˜KŠKŠKˆKK—K˜KœKK–K‰K‘K§K¤KœKK—K©K¬KKŽK•KžK¨KªK¦K°KªK«K­K¬K±K¦K¦K¬K¬K¥K§K§KŸK¡K§K´K¶K±K®K­K¬K«K°K²K±KªK K K•K™K›K¦K²K³K°K¦K¥K¥K¦K¦K±K³K°K¤KªK«K£K£K¨K®K¶K¸K²K±K±K»K¼K½KºK»K±K¯K°K¸K»K¼KºK¸K¸K²K´K¶K°K°K³K³K³K·K³K²K¶K¿K·KºK«K¥K¹K«KK«K²K²K®K©K¦KK˜K K¥KºKÉKÐKÎKÉK¿K´K²K¯K¼KÃKÃKÀKÂKÃKÃKÆKÉKÍKÌKÏKÌKÌKËKÊKÉKÈKÈKÊKÅKÇKÅKÆKÅKÆKÇKÇKÉKÉKËKÈKÇKÈKÅKÉKÈKÈKÊKÈKÉKÉKÊKÍKÍKÍKÌKÍKÏKËKÌKÍK½K­K©K KžK¡KŸKŸK¦K¯K¼KÉKÂK­KµK»K¼KÊKÑKÒKÐKÕKÅK]K(K&K)K*K4K*K1K-K,K4K.K/K6K1K-K0K2K/K1K+K+K-K1K/K/K.K1K+K2K1K3K8K:K2K>KIKdKvK‚K‘K˜KšK›K–K’K”K“K“K–KœKœK K K¡KŸK£K¤K¢K¥K¡K¢K K¢K¡KŸKžKžK¡K¢KŸK¢KŸKœK›KœKK K™KŸKŸKžK›K›KŸKšKšKœKžKŸKœKžK›K KœKœKšK K›e]qž(KcKcK`KaKaKaKaKdKdK_KcKjK`KaK`KbKaK^KfK_K^KcKbKbKkKhKmKtKKƒK‰KK”K–KžK¢K¥K¡K¬K¬K¬K¬K«K­K°K¯K­K¯K­K¬K­K­K®K¦K£KžKœK–KŽK‰K‚KtKlKbKUKXKNKQKLKNKQKXKZKYK`K_KaKbKbK_KbKdKbKcKhKeKfK`KhKgKaK`KcK`KfKcKcKcK`KcKdKaKeKcKcKeKbKjKcKaKgKgKhKdKcKbKYKZKYK…KÚKÙKÌKµKÄK¶K¥K²K®K©K“K°K«K‰K…KšKœK‡K„KŠKzKuKoKrKtKnKqKnKaKSKUKiKoKsKhKgKmKwK~KxKvKrKiKjKfKgKrK†KŽK•K’KŽKŠK†KˆKKŽK‘KK‡KKžKK†KqKsKvKuKK”KKK–K—K—K’K”K‘KšK¡K—K‘K•K”KŒKŒKKšKšKœK¨K¢KK‹K‰KK§K¦K™K•K–K™KK‘K¢K­K²K¯K¯K­K«K¨K¦K¥K§K¤K¬K©K©K¨K¦K§K£KŸKžK¤K¨KµK´K®KªK¨K¢K©K¨K¢K¦K£K£K¤K¡KœK¥K®K¬K¨K¯KªK©K¬K¢K­K¦K›K K KªK²K¨KªK®KµK³K²K±K³K»K½K»K¼KµK·K®K³KµK¸K¸K´K±K±K°K´K°K®K©K¯K·KºK¶K¹K»K¶K·K´K²KžK°K±K KžK²K«K¦K¢KKœK¢KµKÃKÍKÑKÐK¿K¶K·K°K¹KÃKÂK»KÅKÃKÃKÈKÊKÉKÎKÉKÌKÌKÉKËKÇKÈKÃKÆKÂKÆKÃKÃKÄKÆKÅKÆKÆKÇKÊKÉKÈKÈKÆKÇKÆKÇKËKÊKÊKËKÇKÊKËKÍKÌKÐKÎKÐKËKÊKÏKÇK´K§K¦KžKŸK›K›K K¥K´K¿K»KKŸK«K´KÀKÉKÍKÏKÓKÔK¢K>K(K-K*K(KK2K0K2K+K1K)K)K/K/K4K0K)K(K)K-K4KCKLKQKOKTKiKyK‰K‘K–K—K˜K’K“K’KKK–K˜KKœKžKŸKžK¡KŸK¡K¢K£K¡K¢KK K KŸK¡KžK›K¢K¢K KKœKžKŸKŸKžKšKŸKŸKžKžKKŸKžKŸK¡KŸKžKŸKKœKœKKŸK KšKœe]q (K^K^KbK^K\K`KeKgKaK]K_KbK_KaK^KeK`KcK\K^K\KbKfKcKgKgKiKxK~K„KˆK‘K˜KœKžK¢K¨KªKªK¬K©K«K«K©K¬K±K¬K©KªK«K­K®K§K§K£KKœK”K‘KˆK„KvKkKfKZKQKUKKKOKMKTKRKYKXKbK_K^K`K_K]KbKdKcKeKaK_K`KaKdKdK`KcKdK`KeKbKfKaKcKaKbKcKjKfKbKbKdKgKfKjKdKhKhKbKfK^K`K\K^KšKÝKÚKÊKÃK¬KÃKµKŸK‘K©KŸK—K‰KŽKK‘KŒK†KŠK„KKˆK~K}KyKpKiK[KXKaKiKaK_KgKfKsK|KwKtKoKuKsKeK`KxK†KKŒK“KŽKK‹KKK€K€KˆK‹K…K˜KŸK“KuKoKoKqK‚K‹KKK—K–K’K–KœK—K–KKKŽK”K‚K‹K•KžK˜KK K–K•KK‹KŽKKŒKŽK—KŸK¥K•KŠK‹KžK¦K©K¥K«K²K²K®K«K«K¢K¤K¥K¤K¤K¦K«KªK¨K©K­K¦KªK¤K¡KŸK¦K¬K¯K¬K£KŸK¢K¥K§K®K¨K K¥K«K©KK¢K©K«K¨K©KŽK“KšK¡K£K¬K¬K­K°K«K®K¬K­K°K°K²K©K°KµKºK¼K¹K¹K¸K®K°K®K±K±K²K³K¯K®K°K®K¬K£K¤K¯K±KµKºKµK¯K®K­K§K•K¥K£K”KK K¡KªK¸KÁKÆKÏKÁK¹K³K¶K¶KºK¾K»KÁKÃKÄKÆKÉKÍKÒKÎKÍKÆKÉKÅKÅKËKÇKÅKÁK¾KºKÀKÂK¾KÃKÂKÃKÄKÁKÂKÈKÈKÇKÆKÅKÉKÈKÄKÆKÈKÉKÊKÉKÇKÉKÎKÌKÉKÏKÎKÏKÍKÉKÑKÆK©K¥K›K–K˜KK¡K©K±K½K¸KKKŠK•K¦K¶K»KÅKÄKÎKÖK¹K?K-K3K-K#K'KKDKGKIKZKmK|KŒK•KœKK—K”K”KK‘K“K“K˜KžK¡K KŸKK¢K£K¡KžK K¢K¡K¤K¢KžK K K›K›KžK¡KKŸKžKžKŸKK KŸKžK KŸKžKžKžKKKœK›K™KœKžKŸKœK›KšKšK KKŸK›Ke]q¥(K`K`KmKjKaK_KgK_K\K^KbKdK`KbK]K[K_K]KbK_K^K[KaKeKcKkKsKvKK„KŠKK—KK¡K K£K¦K§K¨K®K¬K©K­K¬K¬K­K¬K¯K©K­K«K©K©K¤K KšK—K“KK‚KvKmKfKZKUKNKNKJKQKTKTKYK\K_KaK]K`KcK`KeKcKaKdK^KaK_KcKeKaK^K_K`K_KdKbKaKbK`KbKfKdKdKcKiKhKdK]KbK_KfK`KdK\KZKXKUKQKiKÇKÝKÌKÃK«KºK¹K³K¢K¤K©KžKK—K—K”K”KK‘K†KyK†K™KƒKsKjKtKtKdKaKlKiKmKlKnKoKmKgKmKgKtK†KŠKKKKˆKŠK‡KŠK„KyKyKƒKŒKKŽK}KmKwKK}KvK|KŠKKK”KK”KŒKŒKŒK‰K‘K’KŠK‡KKK“K–K‰KvKK‰KKK˜K£K›K›K™K™K•KŠKŽKKŸK’K›K˜K›K£K¦K¥KžKšKK’KžK¨K KœKK²K´K²K KžK™KžKœKžKœK K¤K¤K¥K«K¦K«K¦KŸKK¡K¯K¯K¥K‡K‡K’K™K¡K¤K±K®K¦K£KŸK¡K¨K§K¡KšKªK©K­K«K KªK²K­K¤K¬K¡K¡K§K°K«K¥K K©KªKªK¨K§K±K­K§K¢K¦K¨K­K¬K¦K¢K™K¨K£KšKŸK˜KŸK¢K¢K¯KºKÁKÊKÆK·K­KªK¨K±K»K¸K»KºKÄKÇKÈKÉKÌKÎKÏKÎKÉKÊKÈKÅKÄKÂKÀKÁK¾K¿KÄKÂKÂKÁKÂKÀKÂKÀKÀKÁKÇKÄKÆKÈKÈKÈKÇKÇKËKÉKÉKÇKÈKËKÉKÊKÌKÌKÌKËKÍKÎKÏKÐKÐKÓKÓKÐKÏK©KšK›KK¢K©K²K¹K·K”K…K‰KŽKšK™K©K¬K«K®K®KºKÍKÛKÏKnK'K'K&K+K(K/K)K*K0K0K2K/K3K5K/K0K&K-K&K*K.K/K5K9K4K0K.K-K1K5K8KEKQKOKbK{KƒK‘KšKŸK˜K”K’K‘KŽKK”K—K˜K›KŸKžKŸKŸKžK£K¢K¢KžKŸKŸK K KœKœKœKŸKœKŸKžKžK›K¡KŸK KžK¡KžK KK›KœKK›KKžKK›KKŸKŸK›K›KŸKK™KŸKŸKKK¢e]q¦(KaKaK`KeKdK\K\K]K\K`KaKaKaK\K^K\K`K]K`KbKfK^KaKaKaKnKuKvKKˆKŒKK—KšKžK K¤K§K©K«K¯K¬K«K­K¬KªKªK«KªKªK­KªK¬KªK¢KŸK›K•K‘KŠK‡KvKlKfK\KXKPKLKNKQKTKSKUKUK]K]K]KcKeK\KbKcKdKcKcKdKbKeK_KcK`K`KfKeK_K`KbKaKcKgKgKkKjKcKeKfKcKbKjK_KeKaK_K^K\KYK\KVKtKÒKÛKÇK¶K±K½K¶K KªK´K¬K’KˆK©K KŒKŒK’KˆKyK{KŽKK|KtKwKqKxKmKkKnKfKiKgKsKnKfK]KtKK…KK‘K‘KŽKŽK‹K‡K‹K‡K†K~KƒK~K“KœKKaKuKƒK‚K{KƒKŽKKŽK‘KKK“KšKŒK…KˆKŠKK‰K‹KšKŽK‡K‘K}K‚KK”K•K”K‘KšKšK•KKŽK‡KŒK›KŸK£KªK K›K™KšK¤K¡K KžK™K˜K—K¡K¦K¦KžKK®K§K¨K K—KœK KžKœK K K¥K¡K¥K¨K¬K«K«KžKœK¢K¤KŠK„KŽK™KªK®K§K K¨K¬K¦K¦K¢K©KªK£KŸKœK£K®K°K«K¢K°K°KªK§K£K¦K KªK®K¨K K K¢K¡K¦K«K®K«K«K©K¤K¡K§K¬K«K¡KšK˜K¤K˜K•KŒK˜K¥K°KÁKÇKÅK¿KµK¥K«K¸K¸K¸K¸KÀKÄKÅKÊKÌKÌKÊKÍKÈKÉKÇKÄKÅKÅKÂKÂKÀKÃK¾K¿KÁKÂKÂKÂKÃKÂKÁKÆKÃKÂKÃKÇKÇKÈKÈKÈKÍKÅKÉKÉKÊKÇKÊKÊKÊKÍKÉKÈKËKÍKÍKÏKÎKÐKÓKÐKÒKÏKÓKÀKK™K™KžK¦K±K¶K´KšK‡K…K”K™K£K§K©K§K¦K­K¹KÅKÖKÝK¶K>K'K%K.K&K+K*K0K)K2K0K5K.K1K4K6KFK*K'K+K.K+K.K3K6K5K/K-K,K6K5KDKGKFKaKpK€K‹K–KŸK™K–K•K”KK’K’KK•KœK›KŸK¡KžK¢K¢KŸK¢K§KŸK KžKšKŸKK›KœK›KšKœKœKžK KšKžK¡KKœKŸKžK K›K›KKœK™K KŸKKK KœKœKK›KŸK˜KKšK›KœKœe]q§(K^K^K^KbKcK^K]K`K_K]K]K^KZKfK^K^K\KZK[K`K\K^KaKdKeKhKnKuKƒK‡KKK™KœK¡K¢K¦K¥K©K¬K«K©K©K®K­K§KªK¬K¬K¬K©K«KªK«K©KžKšK“K‘K‹KKyKlKcKaKXKSKMKMKUKSKUK\K]KXK]K]K^K`KhKbKcK]KcK_KcK^KcK`KfK^KbKdKaKdKfKdKaKaKdKeKgKhK`KbKfKdKbKfKeKdK`K_K]K]KZKTKSK}KÏKÕKÁKÀKÂK½K³K±KºK¹K¡K›K”KK™K’K¤K“K€KyK”KKK‡KƒK|KsKvKtKoKoKcKhKhKhKdK`KaK~K‹KŠK”K‘KŽK‹K‹KŒKˆK…K€K„KƒKŠK‡KŒKƒKlKjK{K‚KxKKK•K“K•KŠKˆK–K™K™KK…K†KŠK‘KŒK”K‘K‹KvK€K†K‘K‘K”KžK–K’KŽK˜K‘KKƒK†K•K K K¢K¢K¦K›K˜KšKšKžKšKK£K¢K–K–K™K¨KKžKœKKŸKœK•KšK£K¢K K§K¦K¥KŸK§K§K«KªK©K›KK”K‹K‘K–K–K¡K¨K¬K´K§KŸK¢K¥KªK­K­K©K©K¤KŸKK¥KªK±K¦K¤K³K®KŸK¨K­K¤KK¢K©K§K¡KKœK£K¡K­K­K¨K§K¢KœKœK®K¦K¤K˜KKžK›KŸKK¬K³KÁKÊKÃKµK­K«K¯K»K¼K«K¶KÃKÃKÃKÈKÌKÎKÈKÊKÈKÇKÅKÄKÅKÂK¿KÀK¿KÄK¼K¼K¿KÂKÃKÁKÃKÅKÄKÃKÅKÆKÅKÇKÅKÈKÆKÈKÊKÊKÊKÍKËKÌKÇKÊKÉKËKÌKËKÌKÊKÎKËKÐKÑKÒKÒKÑKÒKÏKÍK¤KœK—K—KžK¨KµK¯KœKŠKK–K¤K©K©KªK¨K—K˜K©KÀKÑKÚK×K”K/K(K)K.K1K,K4K,K1K0K/K/K8K4K/K*KK6K5K=K-K8K8K.K2KK6K;K/K5K7K3KCKDK9K;K1K0K2K5K6K0K?KIKbKrK‡K–K˜K™K—K•K”K’K‘KK”K˜KœKžKK›K KžK KŸK¡KŸK KœK KKŸKKKœKšKœKKžKšK›K›KK›K KŸKœKžKžKK›KœKœKœKžKšKKšKKŸKŸKœKžKœKœKžKŸKžKKKšKœK›e]q¬(K_K_K`KaKbK^KcKbKcK`KeK_KaK^K_K^KbKbKbKbKhKhKlKlKnKnK{KyKK…KŒKK—K›K¡K K¤K©K«K«K­K°K®K«K©K¬K¯K¬K¬K­KªKªK¨K§K£K¤KžK–K—KŠK‡K€KkK]K[KRKKKIKLKIKNKMKQKUK\K]KcK\KbK\KaK[K`KdKbKaKaKcK`KbK]K_KfKaKeKcKcKdKaKaKhKbKcKbK`KbKaK_KeKbK_K]K_K_KTKPKOKSKKØKÎKËKÐKÍK½K±KÂKÎKºKœK©K¶K®KK™K®KžK…KK•KKK˜K’K’K€KxK‚KmKcK[KUKdKyKŠKƒKŒKKŠK…K‰K{KzK€KƒK‰KƒKKKqKhKuKKKzKxK‡K“K•KKŒKŠKˆK‰KK‘K‰KŠKŠK‹KŽK“KyKxK€K…KK‡KˆKŽK’K¡K˜K’K—KŒKzKuK‚KK–K–K™K¢K¨K¥KžK KžK”K™KžK‘KˆKK™K—KKˆKK“K”K’KK—KK”K’KK{K„K‹KKKKtKkKaKLKFKK©K¦K£KœKœKŸK¦K¡KžKKžKžK KŸK£K¡K¥K¤K K¤K¡KŸK K¦K£K¦K¢K¡K–KœK¤KŸK¡KšK™K¤K¨KŸKžK¡K¢KK˜K¤K§KK‘K‹KœKžKšK“K—KªK»KÃKÌK¿K´K±K®K­K­K¯K¶KÄKÆKÅK»KÁKÉKÌKÊKÊKÄK»K¾K¼K»K¼K¼K¾K¼K¾KºK½K½K¾KÄKÅKÁKÃKÂKÃKÂKÅKÃKÆKÇKÈKÆKÇKÆKÇKÌKËKÉKËKÉKËKÉKÊKÊKËKÌKÍKËKÈKËKÍKÍKÌKÍKÐKÒKÐKÑKÒKÑKÔKÐK«K“KšK¥K¬KŸKŽKKžKKKxKnKeKUK`KlKvKK—K°KÆKÖKßKÛKªK6K#K(K)K#K%K-K.K1K6K6KK1K.K2K8K/K2K.K.K9KIKBK5K9K2K2K.K1K6K?K@KXKjKK‘K—KKšK–K“K”K’K”K‘K”K›K›KŸKŸK¡K¡K¢KŸKKŸK›K KžKKKKžK›KK›K K™KžKšK™K›KžKžKœKKœK›KšK›KšKKK›KK›KšKžK›KœKœKšKœKŸKœKžK˜K—KK™KœK›K™e]q­(KbKbK`KcKbK`KdKeKcK`KbK`K`K^K_K`KbKdKgKcKdKgKkKhKiKrKtK{KƒK…K‰KK”K˜K K£K£K¦KªK«K«K­K«KªK¬K°K­KªKªKªK¬K«KªK§K¥K¢KœK”K“KKˆKwKmK]K\KRKQKKKHKHKLKXKVKZKZK[K\K`KdKeK_KaK`KfKfKcK_K_K`K[K_K^KeKaKdKaKaK\K^KaK`KeKaKeKcKdKcKcKaKbK_KaK\KWKUKPKNKXK¢KßKÑKÕKÎK¹K·KÅKÌKÃK¡K°K±K«K•K©K¦K¡KK‹K¤K˜K‹K‹K©K–K’K€K„K†KqKbKZK]KsKƒK„K}K…KŠKˆK‡K‹K€KyK|KƒKKŠKŽKƒKfKmK‚KKKwK„K–K“KŽK‹K…K‰KŒKŒK‰KK‰K‰KŒK‹K{KvKyKK…K‹K‹KKK‹K‘KžK KŽKˆK‰KK„KK‰K‘K—KœK™K¡KœK£KžK”KŒKŒKtKmKfKkKiKnKrKuKwK€KKƒKKœKK™K‘K€KyK”KK“KŒKšKŠK{KkKYKKK>K=K1K7K8K9K@KMK^KsK‹KK›K“K•K‘K˜K’K‘KK”K™KšK K K¤K¡K›KK¡K›KœK›KŸK›KžK¢KšKŸKœKžK›KžKšKœKœKžKžKœKKšKžK›KKšK›K›K™K™K›K›K˜KKžK™K™KžK™K›K™KœKšKK™KšK—K–K™K›K™K—e]q²(KcKcKbKbKaKaKbKgKbKcKaK_K^KaKeKcKhKaK^KbKdKdKdKjKlKpKuK~K„K‰KK•K–K™KŸK¢K¥KªK®K­K«K¬K¯K­K¯K±K­K«K­K«K©K¯K©K¨K§K¤KœK›KKKƒK{KoKeK[KPKKKDKLKOKOKVKSKRKVKXKYK]K^KbKaKbKhKaKaK]KaKeKfK^KeK^KdK`KfKbKdKcKgKfKbKfKcKcKdKbKbK`KeKbKdK\KXKXKTKMKOKPK‹KÙKÜKÚKÈK·KÊKÒKËK´K·KÂK¼K¯KŸK«K¬KœKK‘K­K—KˆK‡K¥K£KŒK‡K…KˆKwKvKqKwK€KvKwKzKoKyK€K}K}K‰K‡K†KrKeKyKvKvKtKKK“KKKŽK‰KˆK…KKˆK‹KˆK„K|K‚KtKoKK~K†K‚K†K‡K‡K‡KŒKŠKK’KƒKyKƒKŠK–K˜K–K“K‘K”K™K™KKKkKgKwK‡K‹K‰KK“K‹K|K„K~K|KyKxKiKmKiKoKfK\KRKMK9KTK€K˜K¬K«K¦K¨K§K©K¤K›K„KzK‚KƒKwKoKrK|KvK~KsKiKdKXKiKyKKK’K˜K’K¬KšK‹KKnKgKhK\KCKQKcK[K@KBKpK~KqK`K3K#K/K{K¥K›K”K•K¡K²KÄKÅK´KK™K¤K¢K£K«K¶K¾K½K¹K¼KÅKÈKÆKÈKÉKÆKÀKÂKÁKÁKÄKÃKÃKÂKÁKÂKÄKÂKÅKÀK½K¿KÃKÃKÃKÅKÃKÆKÆKÉKÆKÇKÉKÊKÈKÈKÉKÉKËKÈKÈKÎKËKÉKÍKÎKÎKËKËKËKÊKËKËKÊKËKÊKÍKÌKÎKÐKÑKÖKÔKÓKÌK½K«K K–K“KKmKVKLKTKYKWK[KZKjK`KaKmKwKˆK›KµKÇKÔKÙKÚKÝK¯KBK(K!K'K'K/K=K*K-K.K1K/K-K/K3K2K-K+K+K(K.K7K6K6K=K7K9K4K/K-K5K9KJKVKhK{KK”K—K—K“K“K“KK”K•K‘K—K›KK K¤K¡K¥KŸK¡KžKŸKK¡K K¢KœK¡K KœKžKšK›KšK›KŸKŸKœKžK›KžK™KœK›K›K›KœK›KžKšK˜K™K™KšK™K™KœKšK˜KšKšK™KœK™K›K˜K•K™K›K˜K”e]q³(KcKcKdKaK`KaKcKcKcKfKdKaK_KcKbKbKhKdKaKcKcKiKeKjKtKoKwK}K„K‰KK‘K•KœKK¤K¤K©K©K¬K¬K¬KªK¬K±K°K¯K­K­K¬K®K®K«K¬K§K¡KŸK›K—KŒK„KxKqKcK[KPKMKEKEKOKKKRKOKRKTK]KZKaKbK_K_KhK_K]K_KcKcKeKcKfKdK`KcKfKfKfK_K`KhKcKeK`KaKgKfKdKcKaKeK]K`KYKZKVKVKNKQKQK€KÖKÝKÌKÄKÍKÒKÆK»K²KÉKÆKµK¢K²K¹K¤KŠK˜KžKšK€K†K¥KžK”KŽKœK‘KˆKyKxKpKsK}KyKtKsKpKtK{K{KKˆKƒKiKkKmK{KxKtKK‰KKKKˆKŒK‰KˆK‡KƒKK‹KKwKpKuK}KxK†K~KƒKƒK…K†KˆKŠK‹KŠK†K€KK~KK’K‹KK”K”K•K”K‘K”KŒKzKrK~KK”K‹KKŽK„KyKqKpKoKjKlKhK]K^KYKVKYK[KbKKK:KFK~KKK¦K¬K¤K K£K¡KKrKKƒKŒK…K†KŒK…K€K„KtKiKTKFKBKEKRK]KoKhKbKrKgKVKaKaK_KWKXK3K1K8KAKFK2K4KTK^KFK+K"K-KYK’K–KœK±K»KÆK½K©KšKK¡K›K KµK¶K¾KÆK¿KÀKÀKÅKÄKÅKÈKÅKÁK¾KÂKÂKÂKÆKÅKÃKÅKÃKÃKÁKÃKÄK¿KÀKÀKÀKÂKÁKÄKÅKÅKÆKÉKÉKÈKÉKÆKÊKÇKÈKËKÊKÈKÆKÉKÊKÊKÎKÏKÏKÌKÈKÊKÌKÉKÎKÍKËKÍKÍKÍKÏKÑKÕKÕKÏKÁK¬K£KŸK”KƒKiKXKYKPKMKTK[KcKWK\K_K^KfKyK…KŸK³KÁKËKÕKÙKÚKËKiK)K-K+K$K$K*K.K0K/K0K4K.K-K/K7K.K*K*K+K*K+K4K3K6K3K/K4K-K0K0K2K6KFKdKwK†K—K—KšK›K•K•KŽKK’K•K˜K¢KžK£KŸK K¥KŸK K¡K KžKŸKžK¡KŸKK K K KŸK KœKšK›KžKšKœK›KKžK›KšKœK›KœKœKKœKK˜K›K˜KœK—K›K›KœKšK›K–KšK›KšKšK™K™KœKžK›K˜e]q´(KcKcKdKbKgKaKbKaKbK`KfKlK]KcKdKkKlKfKfKiKgKhKhKjKrKtK}KKˆK‹K‹K“K™KšK›KŸK¦K¦K­K«K±K­KªK®K¬K°K°K©K®KªK«K«K¬K§K£K¦K£K—K’K‹K„KtKnKbKWKTKKKHKJKJKPKNKSKUKWK\KbK\K[K\K_K^K`KcKcK_KkKeKhKkKdK`K_KfKiKlKcKcKfKbKcKcKaKbKfKaKcKcKgKcKbK^KXKYKVKRKRKUK}KÜK×KÂKÓK×KÊK¶KÀKÊKËK¼K¯K·K¶K³K”KžK¤K“K‚KwKšK£KK‰K¡K²K•KKoKmKqKgKkKK|KuKtKrKyK„K†K‚KrK_KfKtK~K|K€KˆKK‹KŠKKˆK…KŠK†K‰K†K‹K‡K†KlKrK{K„KƒKƒK€K€K…KŠK…K†K†KK‡KK{KKŠK’K‘KŠK‹K‘K”KšK•KŠK†KwKsKŒKK”K•KK‹K‰KKrKgKdKeKfKkKbKRKJKEK>K;K?KHKDK2K@KyK”KK”K£K¦K¨K¢K—K„KxKŠK‡K|K…K…K{K{KuKnKaK`K[KTKQKWKuK`KPKEKHKRKRKQKYKCKHKCKPKK?K=KFK7K:KEK5K,K+K2K/K.K/K+K'K&K'K+K*KIKŽK¹KÉKÈK°K•K“K“KžK¦K°K¹K»KÂKÈKÊKÍKÇKÄKÂKÇKÇKÅKÂKÃK¾KÀK¿KÂKÃKÅKÅKÄKÇKÀK¿KÀKÁKÁK½K¾KÀKÁKÅKÄKÅKÄKÆKÈKÉKÇKÉKÊKËKÉKÆKÈKÊKÉKÇKÈKÈKÌKÎKÎKÌKËKÊKËKÏKÎKÍKÐKÎKÎKÌKÎKÒKÍKÆK²KŸK“KKKlK]K\KXKTKPKWKUKXKVKZK_KXKbKnKrK€K’K¯K¿KÃKËKÓKÖK×K¹KPK(K(K*K$K-K)K0K.K2K6K.K3K0K,K1K7K*K,K0K2K/K.K9K4K?K2K1K*K)K,K3K2KLKcKvK†K–K•K™K—K”K”K’KŽKŒK’KšKŸKŸK¡K¡K¢KŸKŸK¡KœKŸKŸKŸKŸKŸKžK KžK¤K KœKœKKKšK™KKœKKKžKK›K›K™KšK›K›KšKšKžKšKœK›K™KœK›K–K™KšKœK›K™KœKK™KšKšKœK˜KšK™e]q¶(KfKfKgKcKcKdKfKdKfKbKfKaK`KgKaKeKdKbKjKiKmKhKjKjKjKyKwK}K…KŽKK”K›K›KŸK¡K¦K¬K­K¬K­K®K®K®K­K´K®K®K®K­K­K±K©KªK¨K£KŸK—K“KŠK…K}KoKfK[KSKJKKKFKKKOKPKUKQKRK]KbK^K[K_KeKbK^K_KbKbKaKeKcKcKhK`K`KgKgKdK`K`KcK\KdK_KbKcKbKgKbKaKcK_K]KbK_K]K[KVKSKLKoKÃKÛKÚKÑKÀK¾KÍKÎK¹K±KËKÊK¿KªKªK¬K¡K€K{KŽK…K{KqK“K¨K™K…KvKiKmKjKjKoKuKqK~KKwK~KƒKwKoK]K`KtKzKxKwK†KKK‰K…K‰K…KK‰K‰K‰K‘KŒKwKrK~KK‚K}K‡K~K‚K†KˆK‰K„KŽK†K{KKzK}K‰KŒKŽK”K”KK†K‡K‰KŒK‡KvKdKxK€K†K“K˜KŒKKrKhK\KbKdKhKnKeKbKXKWK_KiKkKaKTKHKXKZK`K}KK|K•K™KŽK„K‡K‹KˆK†KKoKMKEK?KJKCKHKEK>K9KMKbKzK€KdK7K:KBKZK@KAK8K3K5K1K6K7K,K)K/K/K+K,K.K'K&K$K#K+KRKwK®KÄKÀK®K–K‘K™KžK«K½KÂKÂKÃKÇKÇKÇKÉKÄKÃKÂKÊKÅKÃKÀKÁKÂKÁKÃKÄKÅKÅKÆKÄKÁKÁKÁKÁK½KÀK»KÁKÄKÃKÃKÂKÄKÇKÇKÇKÇKÈKÉKÉKÇKËKÈKÆKÇKÉKÈKÆKÇKÎKÍKÒKÎKÏKËKÏKÑKÎKÐKÏKÌKÐKËKÌKÇK­K•KˆK~KlKeK]K[K[K[KXKUKWKXKXKZK^KYK^KZKcKuK‡KšK«K»K¾KÄKÏKØKÚKÐKƒK,K,K(K,K&K)K.K2K+K/K2K1K2K1K-K-K3K'K,K6K;K6K3K5K5K9K-K,K,K*K(K1K7KLKnK~KK”K•K’K“KKKK‹K’K–KK£K¡KŸK K K¡K¡K K¢KŸK KŸK KŸK KœK¡K KžKœKŸK¢KžK›KŸKK›KœKœKKK—K˜KšK˜K›KœKšKžKœK›KKK›KšK›K˜KšK™KšK™K—K›KKœK˜KšK›K˜K›K˜e]q·(KcKcK_KbKbKlKhKgKeKdKbKaKcKcK`KbKgKcKeKhKkKmKmKrKnKuKtK}K„KK’K”KšK›KŸK¡K¦K«K¬K®KªK«K¬KªK­K­KªK­K®K¯K¬K­K©K«K¥K¦KžK–K—KŽK†KzKsKfKTKSKQKOKBKKKLKRKTKPKUK_KaK^K]K`K_K_KdK_KeKbK`KlKiK`KcK]KaKhKfKjKbK`KaKcKdKdKaKbKeKeKbKeKdK_KdKbK`K_KXKVKRKNKcK°KßK×KÄKÄKÑKÌK¾K·KÌKÙKÅK°KºK®K£KK—KK‹K{KlK‚K™K“KŠK|KvKjKeKnKqKmKrKtKtK{KK}KKlK\KaKjKwKxKxK‚K“KKKŠKƒKˆK…K…K‡KŠKŠKŽKxKwK€K€K‡K€K‚K…KƒK~KˆKˆK‹KˆK‰K…KKwKtK‰K“K‹KK’KŒKKŽKŽK‡KyKjKfK{K†K‹KŠKŠKŠK‰K~KqKXK`KrKvKwKxKdKYK]KqKuKwKzKmKfK`KhK^KgK…KKhKpKuK{K€K‹KŒK~KhKZKXKNKLKHKK>K9KDKZKVKHK,K,K0K(K$K%K#K%K)KGKHK(K%K3KcK^KmK‘K·K¾K²K˜K†KK•K¢K¹KÇKÇKËKÊKËKËKÉKÇKÄKÅKÄKÁK¾KÃKÄK¿K½KÁKÁKÀKÀKÂKÂKÀKÃKÂKÆKÅK¿K¾K¼KÀKÁKÁKÁKÃKÄKÁKÄKÆKÅKÇKÈKÆKÆKÊKÉKÈKÅKÈKÊKÃK¾KÄKËKÓKÍKÌKÌKÍKÎKÑKÐKÐKÌKÉK¹K”KƒKnKaKYK[KVKWKVK[K^K]KZKXK^KTKXK[KVKcKfKoKqKwKKŸK´K³K¸KÁKÆKÍKÕKÕK´KYK-K&K1K.K0K%K*K,K2K0K6K6K-K9K4K5K.K-K0K+K)K.K0K:K5K4K.K+K+K7K&K-K8KLK^K|K‹K’K”K–K’K“K’KKK”K•KK¢KŸK¨K£KŸK K K K¢K¤KK¢KKŸKžKŸK£KŸK¡KžK KžKœKŸKKžK KŸKK K KœK›K™K™K—KšK˜K™K™K™K›K›KœKšK™K›K›KœK›KKšKœK—K™K™KKšKšK›KšK›e]q¹(K_K_KfKdKdKeKiKhKkKeKbKbK`KcK`KbKeKiKdKeKhKlKoKnKnKtKwK|KK‰K‹K“K˜KK¡K£K¬K«K§K«K°K®K¬K«K­K©K­K±KªK­K®K¯K«K©K¦K£K KK•KŽK†K~KvKdKUKPKGKHKJKLKQKQKSKWKVK]K]K\K\KcK[K_KaKcKcK_K_KaK\KcKbKbKcKgKeKjK`KcKfKcKaKbKeK_KiKhKgKcKcKdKaK`K[KbKZKWKQKNKYK‡KÏKÔKÖKÒKÂK½KËK×KËK¹KÈKÎK¾KžK§K¡K‰KuKuKpKxK{KtK{KŠKKrKvKxKsKrKwKmKrKsKyK‚KKjK`KqKyKsKrK{KˆKŠK‹KŠK‡K‚KˆKˆK…KˆK†KŠKvKnKqKzK‚K‚K„K…K…K†KŒK„KƒKK‚KƒK‡K}K~KƒKŽKK‹K‘KKŒKKŒKŠK‚KtKaKtKxK|K†KKŒK…K~KwKgKhKwK…KŠKKiKaKiKkKKŽKŽKwKxKsKUK@KSKSKJKeK`KOKKK_KtK}KpK\KPKTKHKVKUK0K0KFKBKHKmK†KmKBK0KK9K7K:K9KK7K8KOKDK;K@KJKgK~K6K&KK"KK$K*K$K+K1K>KwK KºK¶K£KŠKtKkKKœK¹KÅKÆKÄKÂK¼K»K¸K¶K¹KºKÀK¿KÃKÃKÃKÄK¿KÁKÁK¿KÃK¿K¿K¾K¾K¿KºK¾K¾KÀKÆKÀK¾KÀK»KÀK¾K¾K¿KÀKÂKÂKÂK¿KÃKÂKÄKÅKÃKÇKÇKÅKÅKÊKÊKÆKÇKÉKËKÏKËKÌKËKÁK KKaK\K]K[K^K^KbK]K\KZK\KZKVKRK]KZK\KdKkKtK{KƒK‰KK–K K«K±K·KÂKÇKÈKÇKÍKÏKžK4K&K*K+K*K*K.K3K+K-K.K0K4K2K7K/K5K,K-K/K0K1K1K'K,K.K-K3K2K8K;K7K7K0K.K-K3K8KQKkK€KŽK—K–K”K•K–K‘KŽKK™K›K K£K¤K¦K£K£K¥K£K¢KŸK¤K£K KœKžK¡KžKŸK¢K¡KžK¡KœKœK¡KK K¢KŸKžKKžKŸKœKœKŸKKšK›KœKšK›K›KK—KK—KšKšKšK™K˜K›K›K™K˜K›K™K˜K›K›K™K˜K—Kœe]q¾(KiKiKjKkKjKgKkKgKiKgKeKcKbKcKdKbKbKcKbKmKiKjKmKmKoKtKvK€K†K…KŽK“K•K˜KK¥K§K©KªKªK­KªKªK¨K«K­K¯K°K©K­K«K¬K®KªK£K¥KžKœK•KK‡KKnKaK[KUKPKJKLKPKKKMKTKSKVK\K^K`K\K_K\KZKcKcKeKdKbKcKbK`KdKcKcKcKbKmKcK`KaK^KbKfKdKdKaKdKaK\K^K^K`K[KbKcK`K]KTKRKSKRKxKÕKàKÉKºKÊKÔKÏK¾KÃKÕKÓK¹K¬K¦KKƒKxKƒK{K˜K•KŽK‘KK‰KxKŠK—KK}K~KsKjKeKgKhKmKxKsK€K…KƒK…KK‚K„KK‰KK‰K‚KvKmKrK|KKK{K}KK‹KƒK‚KƒK†K„K„K„K|KzKyK„KKKˆKŠKŒKŒK‰K„K‚KwKbKcK|K}K„KƒK‹KK€K~KK}KfK`KpKtKdK_KPKMKNKLKFKGKOKNKFK@K2K5K8K?K?KBK:K@KDKWKmKlK[KzK“KmKSKdKaKQK7KLKlKfK[KCKvK‰KvKfK3KFKIKOKTK:K,K3KpKxK9K%K!K$K!K"K$K,KGKWKvK¨KÂK¹KžK‚KqKnKwK›K´K¿KÃKÆKÃKÀK¹K½KºK¹K¼K¶KºK½KÂKÂKÁKÀKÁK½K¾K½K¿KÄKÃK¼K¶K¹K»K¾KÄKÁKÃK»K½KÂK¿KºK¾KÁKÂKÂKÂKÂKÁKÃKÇKÂKÂKÄKÂKÈKÄKÆKÆKÉKÆKÄKÆKÉKËKÊKÅKºK¤KK}KyKoKYK]K[KZK^K]K\KZKZK\KXKXK\K[K[KgKvKxKƒKŠK˜KKKK¤K°K´K¾KÁKÃKÃKÏKÒK´KUK+K$K)K)K2K/K*K,K,K,K.K6K6K0K5K8K8K.K+K0K1K0K+K/K1K2K3K1K;KBK8K9K;K7K5K4KKK9K5K?K[K`KQK\KK^K[KKKKK^KcKaKOK1K%K-KyK{K(K%K"K!KK K%K@KYKƒK«KÁK³KšK}KtKsK}KšK¶K¼KÁKÆKÄK½K»K³K°K³KµK¶K´K½KÁKÀKÁKÀK¿K¿K½K¾K¿KÁKÁK¼KµK¶K¼K¼K¿K¿K¼K¿KÀK¾KÀKºKºKÂKÅKÁKÁK¾KÄKÄKÄKÄKÅKÅKÅKÅKÇKÄKÊKÆKÇKÄKÆKÈKÈKÄK¹K‹KhK~K…K‚KK~KcK`K`K^KfK`KVKXKXKWK[K^KaKfKnK|KˆKK”KšK K¦K£K¨K®K²K¶K¼K¾KÁKÈKÐKºKcK.K*K0K2K1K0K1K.K5K1K1K3K5K4K4K2K3K4K/K.K-K)K)K*K.K2K:K=K?K>KAK@KMK2K-K/K1K4KJKiKzKŠKK•K—K•K“K”K‹K‹K–KK¥KžK¥K¦K¤K¦K¡K§K¥K¢K¢K¢K¡K¢KK¦K¤K£KK¢KŸKŸK¢KžKœK KžK K¢KžKœK›KœKžKžKžKžKœKœKšK›K K›KKœKKŸKœKKK™KšK™KšKžKœK˜K›KšKK™K˜KK›K–K—e]qÀ(KkKkKiKkKkKpKfKeKfKfKaK_KfKbKeK_KdKdKeKgKcKkKiKlKlKpK|KK€K…K‘KK–K˜KœK¤K§K¨K¬K©K¯KªK«K®K«KªKªK¬K­K°K¬K©K¬KªK¤K£KžK™K”KK„KzKnKdKXKYKJKEKHKGKLKUKQKVKeKWK\K]KaKaKbK^K_KdKbK^KcKbK^K`KdK`KfKjKfKeKdKcKbK^K`KfKdKjKdKaK^K_KaKeKeKbKfKbK`K_K]KSKKKOK^K²KÔKÖKÖKËKÀK¾KÏKÍKºKÁKÌKºK–K’KœK‘K‰K‚KˆK‚K“K‘K‰K¡K’K‹K…KK„KwKbK_KgKrKuKoKwKƒK‘K…K‚K‡K‚KˆKŽKƒK‡K‚K†KtKqK{KKK€K~K~K}K€K‚K…KŽK…K€KyK}K|KuK†KK’KKŠKƒKŠK†KƒK†K~K}KjKGK‚KK…K‡K‰K„K~K€KxKcKaKPKLKWK@K;KK7K;K3K1K3K5K5K4K,K:KJK`K€K…K^KjKKfK`KKK1K.K)K3K/K/KZKqKvKBK8KNKZKlKdKYK=K5K;K!K)K.K~KGK+K(KKK#K3K[KŒK²K»KžKŒK~KxKyKŒK®KÀK½K¹KÁKÀKÂK·K´K±K°K·K²K²K»K¾K»K¾KÀK¼K½K¼K½K¾KÀK½K¿K¶KºK¾K½K¾KºK¼K¼K½K¾K¾K¾K¾KÄKÁKÀK¿KÄKÁKÁKÂKÃKÃKÄKÆKÆKÅKÅKÅKÅKÉKÌKÉKÄK¼KŸKnKiKgKbK^KsK…KK„KK|KmKcK^K`KgKaKeKmKvK†KŽKK“KŸK¦K¤K¤K£K¦KªKªK¬K±K²K±K»KÅKÊKÍK®KTK1K.K'K'K*K2K.K2K,K8K.K4K:K>K;K5K4K:K?K.K*K/K)K.K/K,K,K-K5K=KCKHKKQKiKzK^K;K*K%K+K"K#KIK}K@KUK/KK#K:KhKŽK°K½K¡K‰K}K{K†K•K¬KÅKÅK½K¾KÂK¿K½K·K´K³K´KµKµK¼K¼K¼K¼K¹K¿K½K¹K»K¾K¼K¿K¹K¾K¹K¼K¼K¼K¸K¹K»K¿K¾K¾KÀKÁK»K½K½K¿KÀKÁKÃKÂKÃKÅKÁKÃKÈKÉKÈKÈKÆKÆKÈKÉK¾K«K„KYKKKbKlKnKbKxKKŒKK„KK€KnKmKqK{KyK}KŠK“K›K¢K K¦K©K§K¥K¢K¥K£K£K¨K«K±K´K¶KÂKÎKÏK¨KRK-K,K'K*K-K-K,K0K.K-K.K7K/K8K5K2K1K8K8K;K0K1K4K0K+K'K)K-K0K4K9KK9KSKhKfKQK4K3K8K3K*K)K-K6K:KDK6K,K%K&K4K8K/K3KbKtKKdK?KPKVKCK.K,K,K2K:KGKmK†KƒKgK[KJKMKZKiK@K%K&K$K0KJK,K K-K4K‰KHK0KcKKK²K­K“KyKtK…K‹KŸK¸K¾KÂKÆKÇKÅK¾KµK·KµK¹K´K°K±KµK¸K¶K¹K¸K¸K·K¸KµK³K³K¯K´KµK´KµK±K´K»K¸K¸K·K·K½K¼K½K¿KÀKÂKÀK¾K¼K¾KÁKÂKÄKÁKÅKÅKÆKÇKÇKÆKÀK K\K\KDK.K0KK1K2KPKhKKK—K—K”K‘K‘K”KK‘K—KK K¤K¦K¥K£K¥K¦K¢K¥K¥K£K¤K¦K¦K¥K¥K¡K¢K£K K¤K¤K£KžK¥K¡K¡K£KšK¢KœKžKŸK›KžK¢KžKŸKœKœK KŸKœKœKœK KšK›KšKšKKœKžKšKœK˜KœK›K—K˜K˜KKšKœK˜K˜K™K™K”e]qÇ(KkKkKkKoKjKiKhKhKiKgKlKcKiKlK_KfKeKaKaKgKeKeKjKmKkKtKrKK‚K†KŽK“K”K›KŸK£K§K§K«K°KªK­K­K¨K§K¬K«K«K«K¯K­KªKªK¬KªK¥KŸK™K”KŽKˆK~KrKmK[KZKTKEKFKMKLKRKXKWKXKYK_KWK[K`K]KaKdK\K[K_K^KdKeKdKbKeKeKhKdKbKfKbKeKbKcKgKcKhKdK`KbK^KhKfKeKaKdKeKdKdK^K\K^KTKTKSK[K­KÝKÞKÐK¼KÆKÐKÓK¾K³KÍKÏKÅKªK¦KªKŸK¤KµKK†K—K«K”KhK]KgKlKzK~K…K…KˆKŽKˆKˆK…K…KŠK‹KKjK`KcKrKxK}KyK}KzK…K‡KxKyKzKzKK€K|KxKwKvKKKK…K}KuKpKbKZKNKSKuK€K‹KŒK‹K€KoKZKVKXK?K4KGKSKBK@KRKbKUKDK9K6KAKBK1K-K.K.K=KEKFK1K+K#K,K4K9K.KGKtK}KzKMK3KLKIK2K2K0K.K8KJKoKKtKiK[K^KhKfKKXK'K"K$K0KbKRK KKKGKŽKMK_KŒKžK´KžK‘K€KvK€KKªK»K´KºKÄKÇKÈKÁK½K´K·K·K²K®K²K·K·K±K»KºKºK¸K¶KµKµK±K¶K²K·K²K°K³K´K¸K´K¶KºKµK¸K¼KºK½KÂKÂKÀKÃKÂK¾K¾KÂKÃKÁKÃKÇKÄKÁKÁKÁK³KzKAK7KPKaK?K3KCKbKK~KvKdKeKqKyKiKbK‚K•KK”K¥K¦K§K£K¥K¦K¦K¨K¤KªK¦K¥KªK¦K§K¬K¯K·K»KÀKÇKÍKÑK»KqK.K)K%K'K(K'K$K+K.K1K4K6K/K5K5K2K8K9K=K7K8KK6K1K+K7K4K8KXKpK„K”K™K“K’K“KŽK“KŽK‘K•KžKžK¥K¥K£K©K¤K¦K¥K¤K£K¢K¤K¨K¦K¤K¦K¢KŸK¥K£K£K¡K K¤K¤K£KŸKžKžKžKžKŸKKžKžKK K¡KŸKŸKœKK›KžKœKœK›KœKœKœKœKKK›KšKœKšKœKšK™K—K›K—K—K™KšK™K—K–e]qÈ(KjKjKiKkKnKgKjKgKhKjKjKeKiKdKbKhKdKiKfKiKbKeKgKjKjKrKyK€KƒKˆKŒK“K—KšKœK£K¢K¨K§K­K­K«K­K«K¬K­K«K¬K¬K«K®KªK¬K¬K£K¡K KšK“KK…K|KrKiKZK[KKKFKHKQKKKSKYKSKXK^K\K\K^K`KYKbK^K]KeK^KeKaK_KgK_KaK_KfKaK_KeK^K_KcK\KaKeKfKbK_KdKcKgKdKfKeKeKcKcKjKcK]KYK\KSKOKVKŠKÉKàKÉKÊKÓKÓKÅK¶KÈKÑKÃK°KœK¤K³K²KŸK°K®KKˆK›KœKnK_KfKuKKK}KƒKŠKŒK†K‰KˆK…K‡KƒKxKfKqKxKvKzKzK|KKK€KKxKxKuK~KƒK~K|KwKsK|K‹KŽKKˆKŽKK„KxKnKdKQKKKXKxKyK…KKuKYKUK@K:K=K@KOKWKVKSKMKAKAK>KAK8K7K:K.K0K>KOKNK/K&K#K$K(K.K6KHKfKlKtK]K;K;K=K7K,K2K4K1K8KcK{KqKOKNK\KzKwKpK\K3K"K&K)K`KwKIKKK!K\K˜KrKK­K«K–K‰KzKyK|KŽKªK¼KÂKµKÀKÅKÆKÇK¿K¼K²K¶K±K«K°K´K·KºK´K¸K±K»K»K±KµK·K²K´K³K±K´KµK²K±K¬KµK²K»K½K»K¼K¿KÀKºK¿K¼K»K»KÀKÂKÂKÃKÂKÂKÂKÁKÀK¾K¢KZK0K2K>KXKgK]K?KAKWK†KŽK€KtKfKtKyKwKdKwK‹KK–K¥K¢K¤KŸK¤K¨KªK©K¨KªKªKªK©K¯K­K·K¸K¼KÀKÄKÏKÐKÂK}K-K'K(K(K%K*K*K-K/K,K0K0K2K0K7K9K0K2K9K7K2K4K5K7K3K2K-K,K.K.K0K2K1K1K8K3K0K4K,K3K.K5K3KJKeKwKˆK‘K•K’KKKKKK”KšK¡K¤K¨K¡K¤K§K¦K©K¥K¥K¤K¡K¢KªK§K¤K¤K¢K¢K¤K¤K¡K¢K§K¤K K¥K¤K KžK KŸKžKŸK¢KŸKŸK¢K¢KžK›KKšKœKKœKŸKKKšK™KšKšKœKœKšKœK—KK™K™KšKœKK—K›KšK™K–K˜e]qÉ(KlKlKjKmKjKiKkKhKfKeKhKgKeKdKdKhKaKiKaKjKgKcKeKgKlKmKtK{K€K‡KŒK’K”K—K K£K¤K¦K«K¬KªKªK¨K°K¬K¯K¬K­K«K­K¨K«K«K©K¨KŸKŸKœK—KKˆK…KwKiKZKWKMKGKJKJKJKQKYKXKZK_K_K\K_K`K^KbK^KfKbK\KaKcKaKaKaKaKdKeKhK_K_K^K_K]K_KdK`KbKdKeKaKcKfKiKdKjKlKkKgKfKaKeK]K[KXKSKUKhK¦KÕKÓKÚKÓKÈK¾KÈKÕKÆK³K¬K K K¯K¹K¬KžK¨K¢KƒKƒKKgKbKfKoK…KKzK{K~K…KŠKŒKˆK€KK|KkKmK{K‚K€K}K|K…K}K~KƒKyKvKwKyK{KK}KxKsKxKŠKŽKŒKˆKŠKŒK‰KKK‚K{KkKkKWKUKPKdKoKlKPK;K:KBK@KBKEKKKVKIKDKOK>K9K:KKeKxKdKJK@K^KxK~KtKeKCK*K+K,KVKƒKvK"KKK/KxKžKK±K²K‹K{KwKuKyKŽK¤K½KÈKÅKÁKÂKÁKÆKÄKÀK»K¸K¶K²KµK´K²KµK³K·KºK³K³K·K´K¹K·K¶K¯K²K²K´K¶K²K¨K«K±K·K·K½K½K¿K¿KºK»KÀK½K»K»KÀKÂKÀKÄKÃKÃK¾KÀK»K—KEK-K#K*KGK_KrKpK[K=KGKxK•KKyKiKjKwK~KeKjK€K‹KšKKžK KœK¨K«K©K¬K­K©K©K¯K±K¯K±K¼K¼KÂKÃKÇKÍK¼K‚K;K&K%K+K9K(K,K/K3K.K/K3K0K6K5K5K4K;K5K6K5K0K1K7K4K5K5K.K+K+K.K8K,K4K3K7K:K0K2K:K3K;K5KK7K,K0K-K.K-K+K4K*K6K5K4K2K3K2K.K.K6K2KHKaKpKƒK‰K’K‘K‘KKŒK‰KK’KœK¡K¢K©K£K¨K§K§K¦K¥KªK¦K¦K¤K¤K¤K¤K¦K¥K§K¤K§K¥K£K¢K£K K£K¢K¡K K¡K KŸK¡KKœKŸKœK¡K KŸKKK›KŸKžK¡KžKžKšKžKšKKK›KšK›KšKžKšKœK—K›KšKŸK–K™K›K›K–K™e]qË(KjKjKkKjKjKhKiKhKlKgKfKeKdKcKdKbKjKdKdKcKcKlKfKjKiKlKrKzK€K‚KŠK”K–KšK¢K¤K¨KªK«KªK¬K¬K«KªK®K¯K©K«K¬K®K­K¬K®K¬K©K¤KŸKK’K’KŒK€KuKkKYKUKNKGKCKGKGKOKPKQKXK]KZKZK]K[K^KaKcK_K^K\KcKaKbKfK_KbK`KdKbKbKeK^KaKaKcKbKeKbKbK_K`KcKdKdKfKfKmKgKeKkKfKfK`KcK[KVKZKTK_K¦KÜKÝKÍKÕKÛKÎK¼KÄK¾K«K›KK­K³K¶K´K›K®KšKlKYKXKgKKŒK€K†K†K†KzKzK‚K‰K}KtKiKcKkK~K„KzKƒKKzK{K€KzKxKzKxKsKyKzKKsKxKƒK‰KK†K‹KŽK“K‰K‹KˆKoKVK=K7KJK]K]KGKDKDKPK[KWKTKEKDKMKbKhKQKGK>KEK?K>K-K+K3K9K]KxKbK-K+K&K$K&K'K(KHKMKnKK'K&KMK‚KK}K*KKK-KoK¥KŸKŸKKKtKtKK‘K¬K¿KÁKÄKÈKÆKÄKÁK¼K¿K¿K¼K»K¸K¸KµK·K¯KµKµK´K¶K¯K³K·KµKµKµK°K®K®K­K®K±K¯K´K·K±K«K°K¶K»K¼K½K½KÀKÂKÁKÁK¿K¾K¾K¾KÄKÁKµK¶KŸKqKVKMKGKFK9K2K7KHKjKmKsKnKRKFKcK‹K’K‚KjKtKŠKzKoKjK|K‘K™KK™K˜K©K©K©K«K©K¯K­K±K·KµK¹KÁKÉKÊK¹K~K;K2K*K*K5K#K%K'K*K*K,K-K0K2K6K8K6K4K4K4K6K;K;K;K=K4K8K3K0K6K6K1K-K,K.K2K3K3K0K7K/K1K3K8K8K:KTKdKxK…KKŽKŽKK”KŽK†K‘K–KœK¡K¤K©K¥K§K¨K¦K£KŸK§K¨K¢K£K¤K£K¤K¢K¨K¢K¡K¦K¡K¥K£K¡K¤K¥K¥K¢K¢K¡K›K£K¡KžK™K¢K KžKKKœKŸKKžKKŸKžKžKKžKœKœKŸKKœKžKKœKšKšK›K—K›KšK™K”K•K™K™K—e]qÌ(KkKkKnKjKiKlKqKlKlKfKhKgKhKcKeKdKjKaKcKdKhKeKfK_KbKpKsKyKKK‹KK—K›K K¦K¥K«K­K­K«K­KªK¬K«K©K«KªK¬K¬K¬K®K­K©K£K¢K K›K”K‹KˆKKwKfK\KSKJKAKDKKKIKXKTKWKWKYKYKYKXK]K_K]K[KZK`KbKbK[K_KcKaK^K]KbKaK_K`KdKhK`K_K_K`KdKaKbKaKdKgKkKiKgKgKmKkKlKeKkKfKdK_K\KWKTKYKKÆKØKØKÝKÓKÆKÇKÐK½K©K K‘K®K¯K«K¼K°K§K¡KtK_KbKiKˆKKK‚K‹K„K{K|KK‚K|KtKlKoKuK€K€K{K~K…KyK}KyK|KzKxKxK|K{K|K~KxKƒKK‰KˆKŒKŠK‹KKˆKxKrKcKNK8K2K4K=KCKKBK>K+K$K)K-KTKrKkKAK)K)K(K(K,K*K/KRKXKlK5K^KEK*K)K&K/K-K7K;KKIKdKhK;K1K'K.KdK¤K‘KlK"K K*KFK’K­K’KqK{KyKwKƒK™K²K¾KÁKÄKÆKÈKÃKÃKÂK¿KÁK½K¼K½K»KºK¶K·KµKºK¶KµK´K³K´KµK¶K´K®K°K³K¯K«K¬K°K¶K²KºK¯K±K°K¶K¼K¾KÀKÁK¾KÁKÀKÁKÁK¿K¾K¹K½K·K¯K¦K˜KqKZKRKHKPK\KVK>KEKSKgKoKuKfK_KPKtKŽKŽK{KmK}KK}KcKtKŽK›K˜K–KšK¤K§K£K¨K«K®KªK­K¶K½KÅKÎKÍK­KfK4K&K*K-K'K%K K$K*K0K-K,K-K2K1K3K4K5K8K@KK5K.K*K-K2K=K`KKsK6K*K%K%K-K&K+K3KTK`KaK/KdK8K+K/K/K4K>KPKKK>K?KBK2K.K.K9KWKiK\K*K(K%K6K|KžKŠK9K#K*KFKiK•KKzK_KnKxK†KšK¶KÆKÅK¾KÃKÇKÄKÂKÀKÀK½K¾K¼K¿K¾K¹K¸K²K·K¹KµK²K²K¶K·K¸K¸K¹K±K¬K°K®KªK¨K«KµKµK´K¶K³KµK¯K¹K»K»KÁK½KÀK¼K¿K½K»K½K»K¶K»K¹K³K³K¥K‘KwK]KLKIK\KhK_KQK>KGKiKfKgKeKcKKKK’K„KoKoKKwKlKgKƒK”K˜K˜K“K¢K§K§K§K¯K°K¯KµK½KÊKÐKÆK™K^K4K(K)K*K)K/K6K)K(K,K-K.K2K4K0K0K-K/K2K4K;K7K>K9K8KEK5K2K0K/K,K5K+K-K)K,K)K0K3K4K7K2K2K/K2K@KAKKKXKpKKŠK‰K…K…K†K‰K†K‰K“K›KŸK¥K¦K¤K¦K¬KªK¨K©K§K¥K£K£K§K§K¦K§K¢K¢KžK¡K¤K¥K¥K¤K¢K K¢K£K¡K£K¢K¡K¢K¡KŸKŸKK£KKžK›KKKK KœKžKK KK›KœK›KžKœKžK›KšKœK›KšK™K—KšK›K˜K—K—K•KšK—e]qÎ(KgKgKmKmKnKmKpKhKrKjKoKlKiKjKgKcK`KbKcKcKeKdKfKbKfKkKtKwK€K€K‹KŽK”K›KŸK£K¥K¨K§K­K©K¬K­KªKªK®K®K¯K¬K®K¯K¬K­K¨K¤K¡KK˜K•KKˆK}KuKcKWKWKMKKKKKFKMKPKSKRKTKYK\KZK]K`K\K]K^KbKeKeKdKgK`KaKdKcKbKaKaK^KaK^KiK\KaK]KdKcKbKcK`KeKfKfKfKkKiKlKnKiKfKfKeKbKbKcK^KWKTKVKlK­KÝKÎKÌKÙKØKÊK¿K¿KµK¨K¬K©K´K¸K¿K°K“KtKgK€K‚K†K…K‚KK€K„K†K|K}KxKkKkKuKvK‚KK†K‡K‰KƒK~KƒK}KyK}K€K{KyKwK}K|K‰KˆKŠKKyKgKiKhKiKjKvKuKkKiKoKZKXKTKSKTKLKCKK;K:K8K5K;K3K5K-K0K'K>KeKtKNK'K#K#KAK„K“KmK%KAK[KeK‡K}KuKtKaKgK†K£K¸KÀKÇKÆKÃKÂKÅKÂKÀKÂKÂK¿K¹K»KºKºK¶K²K¶KµK³K±K­K¶K¹K¹K·K¸K¶K¶K¯K¬K¥K§K©K°K´K»K¶KµK²KµK±KºKºK¼K¾K»KºK¿K¿K¸K½K·K±K¹KÀKÀK¿K¼K®K¨K†KfKQKKKJKXKnKlKRKMKYK`K]K_KaKXK[K‹KŽK|KrKuKwKhKdK{KŒK›K˜K˜K¡K¤K¬K®K³K¶K»KÃKÈKÎK¿KKKK5K+K+K+K6K)K1K&K,K'K-K-K.K/K3K6K3K1K4K.K:K9K6K5K@K6K9K0K4K*K,K/K9K/K*K,K/K+K5K2K6K2K9K1K5KCKDKBKRKgKwK†K‰KˆKƒK†K…KƒKKK—KŸK§K¨K¨K§K¦K¥K©K§K¢K¥K§K¤K K¤K¦KŸK£K£K¡K K¤K¢K£K£K¥K¢K£K¢K¥K¢K¢K¡K¡K K K£K¢K¡K¢KŸK¡K K K£K¡K¡K K¡KŸKžK KŸKKŸKžKžK›K K›KšKœK›KšK›K›K™K˜K˜K•K–K™K—e]qÏ(KkKkKjKiKhKlKmKpKjKjKiKeKkKcKhKeKdKaKbKbKaK^K\KcKaKjKoKwK€K€KŠKK–K›KK K¨K§K«K­K®K­K­K«K¬K¬K®K©K¯K±K«K«K­KªK¦K¥K¡KK–K”K‰K|KuKfKVKQKJKJKIKKKOKUKUKXKNKTKWK[K[K]K]K^K`K_K^KcKbKbKcKdKhK_K_KaKaKcKfKbKbKbK`K_K^KcKeKaKdKfKgKlKjKhKhKkKiKfKlKnKiKfKfKfKcK]KYKSKWKKÂKÍKÞKÚKÑKÈK»K½KÃKºK°K¬K·K»KºK©K•KkKxKšK~K|K|KƒK‡K„K†KƒK~KuKrKnKpKzK{K~KK„K€K…K€K‚KK}K~KyKvK~KzKpKsKŠKŽK‡KˆKzKwKxK}KwK|KyKwKrKrKdKfKiKeKYK\KUKAK:K?KEKQK[KZKOKKnKwKBK-K"K,KKK€KˆKZKCKpKK|KŠKRKcKqKbKoK£K¾KÄK½KÅKÆKÄKÀKÁKÀKÁKÂK¿KºK¹KºK¸K¸K¶K¶K·K±K¯K±K²KµK´K·K°K¹KµK¯K«K¦K¥K©K¬K°K¶K·K³K±K¶K»K³K¸K½K¹K¼KºKºK¼K¶K°K³K¶K²K¿KÂKÄKÁK¾K²K¬K”KxK_KTKLKEKJKpKuKcKLKSKdKaKYKeKYKpKK‚K{KuK}KlKfK}KˆK“K–K™K¡K©KµK´K»KÃKÅKÊKÃK·KƒK@K,K,K(K*K+K.K+K*K/K%K+K1K1K4K5K7K:K6K6K2K2K:K6K7K:K?K5K8K1K2K-K/K*K)K0K1K/K,K/K9K;K7K>K2K3K9KHKFKDKYKsKyKˆKˆKƒK‚K„K‚K€K„K‘K›K¡K¨K§K¨K¨K¥K¥K¨K£K£K§K¢K£K£K£K¥K¢K¢K£K¢K¢K¢K¡K¢K£K¢K¤K£K¤K£K¤K¡K£K¡K¡KžK¢K¥K K£KœKžK£K¢KŸK¡KœKKKŸKžKœK¡KK›K›KžKœKœKK›KšK™K˜KœK›KšK–KœK–K’K–K–e]qÐ(KnKnKmKjKlKjKfKnKhKcKiKgKiKfKfKfKcKeKdKdK\KcK_KcKcKfKpKtK{K‡K‰KK”K™KžK¡K§K¦K§K§K®K­K­K«K­K²K®K­K¬K¬K­KªK­K§K¬K£KŸKœK•K‘KˆKKwKiKYKQKIKFKIKHKMKOKRKUKWKTK[K[K\K]KZKXKeKcKbK_KcKaKaKaKcK_KaK`KdKdKdKcK`K_K^K`KbKeKfK_KeKiKdKiKhKgKjKlKiKhKiKhKhKeKfKaK^K`K[KTKTKcK“KÒKàKÑKÏKÉK½KÁKÄKÈK¸K¶K»K¹K¯K‘KiK]K‡K K‰KuKuK|KƒK…K…K‚K€K€KtKvKtK{K|KKK…KKK…K‹K‚K{KKzKtKzKtKmKpKK‹K…KˆKƒK~KKK~KƒKzKuKyKuK^K_KdKcKZKNKKK?KIKFKWKbK^K^KUK>K;K,K-K'K&K)K(K,KGKCKgKrKƒKNK*K&K/K*K0K0K)K0KWKKKZK;KXK8K'K+K-KKDK]KYKIKAKQKXKSK_KZK`K`KhKQKEK6K3K1K.K2K,K*K$K/K9KRKxKlKrK6K"K$K(K+K2K\KbK+K;K5K;K:K5K7K9K?KKBKIKaKuK‚K‡KŠK†K€K€KwK{K…KŒK›K¡K©K§K©K¬K¦K¦K©K©K¢K¤K£K£K£K¤K£K£K£K£K¢K¢K¥K¥K¦K£K¦K¦K K¡K¢K£K¢K¤K¢K¢KŸKŸK K K K¡K¡K K KŸK K K K¡KžKŸKŸKœKœK£KžKžKšKšKœKKšKžK™K™KšK›KœK–K•K›K—K–K™e]qÓ(KlKlKjKlKlKmKkKmKdKiKiKgKeKfKdKdKbKbKhKcK`KbK_K_K\KgKiKoKyK†KŠK‹KK•K K£K¨K«KªK­KªK«K©K¯K¬K­KªK«K®K°K¯K¯K¯K«KªK¢KžK˜K˜KK‡K€KxKeKYKYKLKKKGKGKFKOKNKSKRKUK]KVK^KbK]K\KgK`KbKaKaKeK`KcK_KcKdKaKaK`K`K^KbKaKdKfKbKaKdKfKeKiKjKhKeKjKkKnKkKqKpKoKnKjKlKjKhKeK]K[KXKYKRKtKµKßKÐKÈKÇKÇKÂKÁK¼KÁKÆKÁK™KmKvK¥KˆK†KzKqKvKpKvKzKrKqKlKnKxK‡KKK‚K‚K†K„KKvKzKyKuKnKmKoKvKxK€KƒKKyKzKxKuKnKqKkKeKdKZK\KQKHK>KGKOKKKGKSK[KXKMKYKdKbKdKZK@KCK9K6K:K9K6K*K'K1K2KKoKkKTKVKcK‹KŽKzKjK—K¸K­KŸK¨K¼KºK¹K¹K´KµK±K·K¸K»K»K¶K¹KµK´K¯K¨K¬K¯K°K°K®K³K°K­K­K®K³K¬K³K®K¯K´K­K¯K«K¯K°K´K²KµK´K³K±K¶K»K¿KÀKÀKÁKÁKÄKÂKÁKÄKÅKÂKÄKÊKËKÊKÇKÅKÁKºK¨K™KŒKqKcKXKWKXKPK;K@KgKKZKSKUKSKGK@KƒKŒK†K†K„K‰KK‹K™KžK¥KKK‚KmKRK6K)K+K,K,K'K*K/K,K)K0K3K2K3K2K0K/K1K9K9K4K9K2K7K6K7K:K7K=K9K:KK@KMKYKkKK~K†K„K‚K|K}K€K~KˆK”K K¦K§K©K©KªK¢K¦K©K¤K©K£K§K¥K¡K§K¢K¦K¡K£K£K¨K¦K¤K£K¥K¢K K¢K K¡KžK K£K£K£K£K£K£K¢K¡K¡K£KŸK KŸK K K¡K KžKŸKœK¡KžKžK KžKKŸKœKKšKŸKKšK˜K›KœKšK—K–K˜K—K—e]qÕ(KmKmKmKnKpKkKmKlKeKhKgKhKfKiKfKiKfKeKaKcKbKaK]K]K_K_KjKsK|K‚K‡K’K“KšKŸK¢K¡KªK«K®K­K®K­K«K­K©K¬K³K¯K­K¯K«K­KªKªK¤KžK›KšK‘K‰KKtKiKaKRKMKAKFKKKGKQKUKUKTKUKZK]KXK[KZK\KcK^K_K\KdKbK`KbK\KaK`K]K_KbKdKbKcKeKbK^KdKbKdKbKeKiKhKfKnKjKlKlKkKpKrKpKnKoKlKlKlKiKfK`K]KYKUK[KKÕKßKàKÖKÉKÃKÅKÄKÁKÉKÁKšK—KKŽK}KyKzKzKxKtKxKsKoKcKkKuKxK|KK…K‚KKzKwKnKlKkKpKkKpKKŠKŠKˆK|K€K€KsKxK„KK…K|KqKiKVKDK@K@KFKMK?K>KKCKIKVKRKcKPK3K?KGKGKFKHK-K+K/K-K/K/K2K1K3K2K9K0K,K1K8K:K=K*K-K,K.KCK`K7KLKKK@KDKlKdKEK_K{KŒKŠKvK|K¦KªKK›KªK´K·K°K³K¯K¯K´K¹KµK¼KºK¶KµK¯K­KªK«K«K©KªK¬K´K±K©K«K¨K­K¬K¯K´K°K®K°K²K¯K®K²K®K±K°K­K³K²KºK¾KÄKÄKÀKÃKÁK¿KÁK¾KÆKÃKÅKÂKÅKÇKÉKÊKÆKÃKÂKÁK²K KKzKhK^KSKRKRKEK/K6K~KKUKSKSKZKAK[K„KK†K„K‹KƒKK—KK£KK{KrKqKYK9K)K)K%K)K)K/K4K-K)K/K,K+K/K1K5K1K4K4K8K7K:K=KKHKFKKnKZK]KTKQK6K_K„K„K‚K†KKK›KšK“K‹K~KrKiKTK8K2K)K$K(K-K,K)K)K,K+K0K-K5K7K;K7K8K4K8K5K9KK8K8K6K;K5K3K0K.K5K/K.K*K&K*K0K2K4K9K2K3K2K0K0K>KBKCKOKbKxK„KŠK‰KƒK…KzKwKvK‚KŒK’KšK¡K¦K¥K¨K¦K¤K¥K¨K¨K¥K¦K¦K¤K¤K¦K§K¬K¢K¢K¤K¤K£K¢K£K¤K¥K¨K¥K K£K¢K K¤K K£K¢K£K K¡K¡K K K¤K K£K¢K K KŸKKŸKK¡KžKžKŸKšKKžKžKK›KœK›KšKK˜KKžK•K™K™K˜K›K—e]qØ(KjKjKjKlKhKiKjKiKkKgKeKjKjKdKdKfKdKfKdKeKaK]K[KZKVKcKiKnKtK}K…KK”K•K—KžK£K«K¨K©KªK¬K«K©K¬KªK¬K¬K¯K°K­K¬K¬K®K§K¦K¢K›K˜KŒK…K‚KvKmK]KPKLKDKFKKKOKTKJKPKSKTK]K`K^K_K\KbKaKfKdKfKbKcKgKfKfKcKfK`K`K`K`KaKaKdKbK^KcKdKhKdKeKgKjKjKgKhKmKnKtKtKoKlKoKlKpKkKoKiKhKdK`KVKVKZKKÓKÜKÖKÔKÙKÖKÇKÊKÄKÉK¿KµK›K†KvK{KK}KzKKKzKlKkKoKzK{KyK~KyKwKyKqKjKuK„K|KK‹KŠKŽKK‚K~KK„K€KvKcK[KxKvKtKmKcKdKVKCKOKGK@K>K;K6K8K>K.K.K6K=KVKdKDKSKEKDKHKMKRKAK7K>K5K3K:KGKUK;K(K4KfKkKoKfKXKJK(K'K(K-K/K4K/K2K.K3K0K/K-K5KK,K%K=KiKVK\KNKCKAKrK†KƒK„KˆK–K•KŸK™K’K‚KuKhKJK4K3K1K*K'K*K)K'K(K/K0K.K3K8K=K>K9K8K6K5K6K;K;KAKKGKZKnK~KƒK‰K„KKƒK{KvKK…KK•K™K K¢K¦K§K¨K¥K©K¦K¨K¥K¤K¦K§K¦K£K¦K¢K K§K¥K¤K¢K¤K¡K¤K¢K§K¢K¦K¥K¦K£K¤K¡K¡K£K¡K¢K£KK KŸK K¡KžK£K¤K£K KžKŸK K KšK›KŸK›KžKKKK KœKKšK—K™KKšK—K›KžKšK˜K•e]qÙ(KlKlKjKgKnKiKgKjKjKgKhKiKfKeKfKbK`KfKdK`KbK\K[KXKWKeKeKoK}K}KŠKŒK‘K—KœKŸK£K§KªKªK®K¬KªKªK®K©KªK¯K±K­K¬K±K¯K®K«K§KžKK—K‘K‡K„KvKiKZKNKMKEKCKIKMKPKMKOKSKTKYK]KZKXKZK\KcKdKbKbK`KdKeK\KbKbKdKeK`KgKfKaKeKcKbK_KcKbKeKiKbKhKjKjKcKgKnKsKpKsKqKrKoKmKnKkKmKmKlKbK`K[K[KVK€KÓKÜKÛKÒKÖKÚKÎKÎKÆKÂK»K£K…KwKqKvKvK}KwK|KwKwKrKyKvK}K€KzKzKtKqKtKqKtKxKK„KKK†K‡K†K„K€K~K|KvKoK\K[K|KqKjKYKWKTKIKJKQKMKLK=K>K;K2K0K)K*K8K9KFKLKHKNKCKAKEKEKUKQKGK>K;K'K6KLKRK8K&K6KqK[KyKxK_KXK2K(K,K+K/K:K.K,K-K0K0K6K1K0K:K/K)K*K,K)K'K)K,KAK_KlK„KkKXKUKYKaKOKrK§K¡KƒKKµK¯K­K©K±K·K±K°K³KµK·K»K¼K¼K·K³K¬K­K­K¥K£K¦K«K¬K¯K¬K³K³K¯K²K´K³K®K¨K§K K¯K³K¬K­KªK¢K¨K±K¼K½KÁKÄKÃKÃKÇKÉKÇKÀKÄKÆKÇKÄKÄKÈKÆKÄKÂKÅKÅKÇKÇKÄKÉKÈK½KµK°KŸKK„KjK\KbKNKQK?K8K0K(K0KWKjKbKJKOKMKgK‰KK‰K„K™K“KœK—K—K‚KvK`K;K.K-K.K&K,K)K&K(K+K,K-K/K-K1K:K8K9K7K8K6K5K7KK9K6K7K;K;K8K8K;K3K:KBK8K9K7K2K2K4KEK.K/K(K-K(K*K1K2K9K3K1K-K0K/K:KKKWKmK|K…KŒKŽK†K‚K|KwKK~KŒK—KK¢KŸK¡K¢K¡K¥K¦K¥K¤K£K¥K¥K¢K£K¤K¦K¡K¡K¢K¤K¢KžK¤K K£K¦K¢K¥KŸK£K¡K£K¢K§K¥K¢K K¥K¢K£K K¢K¡K£K£K¢K KŸKžKžK K¡K¡KŸK KŸK¦KžKŸKŸKKKžKKšKŸKœKKœK™K˜K–K–K•K–K—e]qÛ(KjKjKdKjKfKgKhKlKjKiKmKiKlKfKaKbKdKbKcK_K]K\KXKYK\K]KjKrKuKK„KK’K–KœKŸK¡K¦K¬K«K«K¬K­K­K®K®K²K®K±K«K­K®K¬K°K«KžKžKžK˜K‘KŒKKwKmK^KQKNKHKGKAKOKOKRKWKWK\K^K\K[K]KcKcKbKdKdKcKbKbKhKeKcKjKcKgKbKdKaKcKeKdKbK`KcK^K`KcKdKgKfKjKjKpKtKmKqKpKqKoKvKpKmKlKiKkKgKgKcK]KXKYK|KÏKÚKÙKÚKÑK×K×K×KÌK¼K¥KvKiKeKjKvK|K€K}KtKtKyKtKwK|K{KKuKqKgKgKfKxKˆK†KŠKŒKˆKKKˆKKK„K}KyKmK^KVKCKJKSKDK3K1K3KJKfKYKIKAK.K3K4K*K*K0K+K/K3K2KFKaKKK;KEKDKXKUK[KYKEK)K-K7KHKQK8K#K/K;KZKvK…K|KoKKK*K+K,K0K-K1K9K0K+K)K)K)K,K-K(K*K#K$K$K'K6K\KiKoKzKkKOKOK_KoK‰K‚K}K±K—K…K²K·K³K¶K±K¶K²K¶K»K¶K»K½K»K¸K´K´K±KªK¨K¡K¢K£K¥K§K¦K¦K¥K«K±K¶K¶K°K¬K«K£K™KK¥K§K©K§K¯K¹K½K½KÁKÄKÃKÆKÀKÆKËKÍKËKÇKÇKÃKÄKÃKÃKÄKÆKÅKÉKÍKËKÄKÅKÈKÃKÁKÀKºK®K¨KžK„KyKeK_KQKIK=K7K.K0K+K2KNK_KUKKKTKGKpKŠK…K~KK’K™K”K˜K‡KqKQK5K4K,K.K-K+K/K,K0K1K0K/K1K/K3K3K5KKEKGKRKTKaK;K+K/K6KIKVKBK'K(K.KGKvKuK~KKbK9K*K*K-K.K.K0K1K4K+K/K,K.K#K"K"K$K&K)K7K`KwK}KnKuK\KNK[KwK“K£K‹KzK±K˜K“K¸KµK³K¶K´K¸K¶K¸KºK¼K»K½K»K¶KµK³K«K¥K¥K¢K K¡K K¦K¤K¦K«K¬K³K¹K´K¯K¬K§K™K›K£KžK¥K®KµK¹KºK½K½KÂKÄKÄKÈKÄKÈKÈKÌKÊKÆKÊKÇKÈKÅK¿KÃKÈKÇKÆKÌKÈKÊKÇKÌKÅKÄKÆK¾K·K¨KžK†KtKhK\KSKSKCK0K0K2K0K,K6KMKSKOKHKIK]KK‘K„K‰KK–K™KžKƒK`KGK-K:K5K-K.K6K1K2K.K4K0K,K/K1K4K4K3K6KK>K9K;KK?K>K6K4K0K+K/K'K3K2K)K0K.K8K3K1K1K(K-K4K8K:KYKjKxK…KKŽKˆK€KK|K}K…KŒK•K›KœK™KŸK¡KœK¢K¦K£K§K¨K¥K§K¤K¥K§K¤K§K£K§K¤K¦K¡K K¢K K¢K¡K K¤K¦K¤K K¢K¤K¤K£K K¥K¤K¦K K K¢KŸK K KŸK K K¡KŸK¡KžK›KKKKKžKŸKžK›KKžK¡K KœKœK›KœKœKœKšKšK›K™K”e]qÝ(KiKiKoKlKhKhKjKjKgKjKhKhKiKgKlK`K`KeKcK`KaKWKYK[KYK\KcKiKtK‚K…K‘K‘K”KœKžK£K¦K«K¬KªKªK«K®K®K«K­K­K«K°K®K®KªK­K©K¢KžK™K–KK‰K}KpKmK]KRKMKHKJKLKFKMKPKUKOK_K[KaK^K\KdK]KbKcKaKbKbK`K^KhKaKbKaKeKaKhK_KdKeK`K`KdKcKfKfKbKgKjKmKfKhKlKpKrKtKtKsKsKuKqKrKmKrKmKjKlKbK`K[K[KzKÀKÞKØKØKØKÎKÒKÚKÐKªKxKcKdKaKkKqKxKxKoKqKrKwKyK~K{K}KvKnKjKpKwK~K…K‚K‹K†KK…K„KƒK„KwK{KzKxK€KvKRK.K'K+K1K;K4K,KK>K?KKK8K>K?K;K4K7K.K/K7K-K0K.K(K/K*K3K2K6K.K5K1K:K7K8KHKXKqKK†KKKˆK†KK‚K„KK’K–K™K›K–KšK•K”K—K˜KžK™KKžKžK¤K¡K¥K¨K©K¥K¥K¦K¥K§K£K¦K¤K¡K¤K£K¢KŸK¡K K£K¡K¡K K¤K¢K¥K£K£K¨K¡K¡K K K¢K K›KŸK KžK KžKžKœK KžK KŸKKKœKŸKŸKŸKžKšKšKK˜KžK–K™KšKœK”K–e]qà(KdKdKlKjKkKnKgKiKhKgK`KgKgKcKcKeKfKfKeKdK\KcK[K\K[KdKiKrK{K~K†K‰K‘KšKžKK¥K¨K¥K¯K±K­K­K©K«K©K°K±K¬K¬K®K¯K®K«K¨K¤KžKžK™K“K‹KKxKfKcKOKHKHKIKFKSKSKRKSKUK[K]K[K[K\KaK_KaK]KgKbKfKgKdKeKeKcKiKkKeKbK_KfKbKcKbKcKdKgKhKgKhKhKjKhKlKnKlKnKsKmKtKvKwKvKoKpKmKpKnKlKjK^KgKbKcKKÎKßKÛKÕKÚK×KÊK™KlKjKjKhKjKkKhKiKjKpKwKuKrKwKyKvKnKuK|KKKƒK„K„K~K„KkKWKwKzKrKpKrKrKyKoKnKuKiKGK-K'K+K-K>KWK`KVK,K-K)K/K&K+K/K6K4K-K+K2K0K,K1K3K8KJKVKOKEK:K?KIKKKEK@K1KEKcKZK+K-KK@KJKAK>K.K)K&K$K#K#KK!K)KVK‚KšK KˆKrKmKhKxKŸK¸KµK K§K¢K˜K€K{KŽK K»KºK±K´K¦K·K¼KÃK¼K¼K¾K¸KµK­KªK¬K¢K¤K£K K¡KžKŸK§K±K´K¶K¼K±K°KŸK˜KšK£K¨K¨K§K¯K°K´KºK¹K»K½K¾KÀKÄKÄKÂKÂKÆKÉKËKÈKÉKÊKÉKÊKÉKËKËKÌKËKËKÒKÑKÒKÐKÏKÑKÑKÎKËKÁK¹K¯K¤K‰KsKhKYKYKMK>K.K&K*K.K/K.KBKIKUKKK>KeK’KšK“K›K­K£KšK}KMK-K-K/K3K1K*K+K/K2K-K2K+K3K3K7K1K5K6K5K:K1K@K:K=K7K@K?KEKGK:K4K/K+K,K,K1K/K3K-K/K2K=K1K*K-K-K1K6K?KNK_KvKƒKK‹KˆK…K„KKK‡K“K—K›KšK›K–K–K˜K–K—K›K—K˜KšK›KŸKžKœK¡KŸK¢K¦K§K¥K¢K¤K K§K¢K¥K£K¤K¦K¡K KŸK K K¡K£K¢K¢K£K¡K¢K§K¡K¢K¡K K£K¡KŸK¡K KŸKŸK¡KœKœKœK›K¡KŸKŸKKžK KŸK¢K KšKœKœKšKœK—KšK›K˜K”K—e]qá(KfKfKfKdKdKjKcKhKiKhKhKbKeKeKiKaKdKgKdKgKdK`K\KYKZKaKfKnKvKƒK†KK’K—KšK K£K§K§KªK®K«K«K­KªK¬K¬K°K®K¯K®K­K­K©K¨K§KžKžK K–K‰KKzKnKjKUKQKJKFKCKOKMKOKTKRKVK[KaKZKaK\KfKeKdKaK`KfK_KaKeKfKaKgKaKeKgKcKcKfKfKdKfKdKeKjKeKjKfKgKgKkKmKjKjKpKoKrKxKwKxKrKqKoKoKpKmKkKiKcK_K^KwKºKÜKÞKÖKÚKÚKÉK‰KkKhKpKoKqKnKsKiKiKqKuKpKmKnKvKqKtK†K‡K‚KyK~K€KKK‚KtKWKeKsKqKoKqKoKfKVKWKPKVK?K/K.K2K3KQK_KaKNK2K+K*K)K'K-K2K7K3K/K1K;K3K1K5K.K3K8KSKTKHKHKEKBKEKAKCK:K@KaKWK+K.K>K@K7KK1K1K/K5K1K3K@KQKLKJKAK=KDK9KAKMKFKYKPK)K+KKK;K5K-KYKˆKŠK^K'K)K&K$K.K'K#K#K!KKKK&KAKsK¤K¬KˆKqKhKtK‡KŸK¶K¿K­K¦K¤K¦K¥K£KK†K€K›K¦K¤K¬K¬K¢K°KÀK¿K¶K­K¬K½K®K§K­KªKªK«K¢K§K K¨K£K®K°K·KµKžKK™KŸK¦K¤KªK±K­K¯K´K»K´K¹K½KÁKÄKÂKÄKÌKÈKÇKÆKÈKÉKÊKÉKËKÊKÇKÉKÉKÌKËKËKÏKÏKÑKÑKÒKÐKÓKÕKÕKÑKÍKÉKÂK¸K§K–KKlKbK\KQKFK1K&K#K&K.K/K0K?KEKMKFK=KuK–K›KœK²K®KœKKMK/K/K2K1K6K.K.K1KCKEK7K8K?K8K5K8K9K4K5K6K7K;K=KAKCK9K?KAK:K5K0K3K+K0K/K,K1K2K2K2K:K8K0K(K,K+K4K6KMK`KuKKK”KK‡K‡K|K}K‹K’KšKœK¢KœKK™K•K›K™K—K™K˜K–K“K“K™K›K—K™K˜KšKšK KœKK¡K£KŸKKŸKžKŸK£K¡K¢K¢K¡K¡K K K£KŸK¨K¡K¤K¥K¥K¥KŸKžK¡K¡KŸK K¢K¡K K¡KœKœKœK¡K›K KœKžKKžKKœK™KœKK™KKšK™K—K–K™K™K–e]qã(KcKcKcKmKhKeKlKgKjKdKiKhKiKgKeKbKbK_KbKaK`K^K[K[KZKYKcKmKvK…K†KŽK”K–KšK K¤K£K¨K§K«K©K¬K°K«K«K­K²K±K®K«K­K¯K¯K­K¨K¡K›K”KŽKŠK‡KzKsKaKYKQKMKIKPKQKKKPKQKTKTK_K`K]K`KbKaKfKhKhKcKkKeKbKiKlKiKbKhKdKcKfKfKhKeKfKdKiKeKeKiKfKfKaKcKgKoKŒK§K{KlKqKuKrKwKoKqKvKsKnKpKmKgKdKbK`KhK“KÑKàKÞKÜKÖKµKyKrKtKkKqKuK{KxKuKtKqKmKoKgKbKiK{K†KŠK‚K}K€KzKwKlKcKhKPKAK@K:KHKWKOKRKIKKKMK\K\KZKCK9K.K7KTKhK\K3K-K)K*K-K&K-K@KCK7K.KAK:K2K+K.K4K+K0K:KK@KKBK9K6K4KDKZK`KdK;KBKjK7K,K/K4KPKKK7K)K-K*K&K$K!K#K K!K$K4KkK›K KKxKkKjK}KKµKµK¬K«K¥K¥K¦K¦K§K£K”K„KK§K¯K¬K¢KŸK±KºKÁK¾K½K¸K·K±K§K§K§K¨KªK«K«KªK«K­K®K¬K§KœK“K•K¢K K¢K©K©K«K®K¯K®K¶KºK¼K»K·KÃKÄKÁKÃKÄKÉKÆKÄKÈKËKÈKÈKÈKÉKÈKÊKËKÌKÎKÎKÒKÐKÎKÏKÒKÔKÐKÐKÓKÔKÎKÊKÄK¿K±K¥KŠKzKmKbKXKLK7K1K.K-K*K'K+K*K7KFKRK?KBKŠK˜K›K¥KªK¦K†KeK:K3K@K?K6K2K+K3KK7K8K4K.K*K,K*K0K4K2K7K6K?K7K0K)K.K,K-K:KQKqK€KK‘KŽKŠK…KƒKƒK‡KKšKŸK¡K£KžKKK—KœK—K›K—K˜K—K”K•K˜K—K•K”K–K˜K•K˜K–K—KšK™KšK—KŸK¢K¡K K¡K KK¡K£K¡KŸK£KŸK K¢KK K K K£K£K£KŸKŸKžK¡KKœKžK¡KŸKšKKŸKœKKœK›KœKžKKKšK™KœKžKœKšK™KšK—K•K˜e]qå(KhKhKkKgKfKhKeKhKhKfKeKdKgKcKfKcKiKcKaKdK_K^K_K\KYK`KiKiKuKK‡K‹K‘K–KŸK¢K¢K¦KªK­KªK­K­K«K¯KªK®K²K°K®K®K­K­K®KªK©KŸKœKšK“KŠKKvKoK`KXKOKIKGKQKHKMKOKUKPKXK]KZK]K\K_KcKbKfKhKgKbKjKhKgKgKiKcKjKlKcKhKcKbKeKhKaKcKdKcKbKaKfKfKpKK°K£KxKmKoKsKnKxKtKtKqKpKtKpKsKoKpKnKgKlKiKoK›KÜKåKÝKÕK£KqKmKuKqKrKvKxKxK}KsKmKcKaKrKvKK‡K…KzK~KyK…KŒK†KsKjKzKkKgK`KIKGKQKSKCK/K.K3K?K7K.K*K,K,KKGK?K2K3K;K5K2K4K1K7K6KDK.K+K.K;K>K@K9K2K2K7KPKcKnKgK\KzK?K7K-K3K;K€K‰KGK%K$K(KK#K#K(K#K)K+KbK‘K¥K•KKtKrK{K KµK¹K¥K®K­K¨K©K¡K¨K¨KªK–K‡KŽK©K¶K±K¨K§K¯K¹K¼KÀKÆK¹K¸K¯K§KªK«K©K«K­KªK«K®KªK¯K¡K–K“K•KœK£K¦K¥K©K¦K®KªK±K°KºK½K¿K¹KºK¾KÃKÂKÂKÄKÆKÅKÆKÅKÉKÈKÉKÊKÉKÉKÈKÍKÌKÑKÐKÑKÑKÑKÐKÑKÒKÑKÔKÖKÔKÑKÌKÃKÁKµK¤K”K~KoKaKQKMK:K;K2K,K&K'K(K+K0K7KLKFK,KwK˜K—K¡K®K©K†KlKAK3K3K3K9K.K)K/K-K1K5K3K3K7K;K9KK0K8K@KBK+K)K-K8K4K8K6K*K+K.K:KHK;K-K9K8K3KHKIKgKlKoKyKYKLK;K;K-K^K˜KjKAK%KKKKK%K.KYKƒK¡K–K…K|KsK{K“K´K¿KªKªK°K¸K¹K®K®K©K¨K«K®K™KŠKK§K¨K¦K¦KµKºK½K¿K¿KÄK¿K³K«K¥K©K¬K§K§KªK§K¢K¥K™K‘K–K›KKœKŸK¦K¦K¨K§K©K¬K®K¶K¸KºKÁKÂK¾KÀKÀKÁKÅKÃKÄKÊKÈKÉKÊKÄKÊKËKÊKËKÉKÉKÍKËKÐKÑKÒKÑKÒKÑKÑKÒKÖKÔKÓKÑKÏKÎKÊKÈK½K«K™K†KsKgKWKQKFK7K.K&K!KK#K*K.K1KKKYKKK?K8K5K8K6K4KCK-K)K&K'K.K(K+KWKnK>K(K+K.K*K8K@K?K/K5K7KDK4K,K/K3K7K=K7K1K2K1K.KDKKhK¤KŒKoK6K"KKK K)KRK…K—K›K€KyKrK{KŽK¯KÀK·K¦K­KºKµKºK°K²K°K³K³K±K¡KKK¤K¦K¥K©K±K·KÀKÇKÅKÊK¾K°K¨K¦K­K­K«K¥K¢K£K¥KK•K’K›KœK¡K K¢K¦KªK«K«KªK¨K«K´K¸K»K½KÀKÁKÁKÁKÃKÁKÃKÆKÅKÈKÉKËKÄKÊKÌKÌKÌKËKËKÌKÉKÑKÍKÑKÑKÑKÓKÒKÑKÒKÓKÔKÑKÑKÏKËKÆK½K¯KŸKKtKdKUKWKGK;K-K(K$K#K"K(K&K2KAKZKEK?K“KªK¢K¤K¬KKcKZK9K8K3K2K4KCK9K7KKCK>KBKBKBK7K>K4K2K-K4K+K1K/K/K4K4K6K6K0K0K+K.K3K8KQK`KvK†KŒK‘KKŠKˆKƒKƒKK–KœK¥K¨K¨K¨K¥KžK£KœKKšKšKšK›K—K–K—K–K™K—K•K—K˜K”K‘K•KK“K–K•K–K•K”K—K–K–K–K“KK’K–K—K™K˜K˜KKœKŸKžK¡K K¡K¡K¢KžKžKŸKžKK›KœKžKšKKK›KœK™KšK›K›K™KK›KšK™K–K–K”K•K˜K–K”K•e]qé(KfKfKjKhKjKfKiKgKfKeKcK_KdKdKcKcKdKdKbK^KZKZK[K\KjKeKgKhKuK|KƒKŠKK•K™K›K¤K§K©K«K©K«K®K®K­K°K¯K°K­K¯K°K­K±K®K­K¥K¡KœK•KKŽK‚KpKrK`K[KQKNKOKRKWKUKSKTKVK[KcKeKbKcKeKgKcKeKcKcKhKdKeKeKdKfKfKgKhKdKcKdKjKaKeKeKfKjKgKfKiKjKmKtKwKqKtKrKrKvKsKsKuKxKsKvKzKyKuKvKtKtKtKtKnKqKiKjKtK¤KÙKâKÎK—KvK{KvKuKrKrKjKiKrKƒK€K}K}KzK…K‰KzKyK‡K‡KƒK~KtKkKfKbKdK_KIK.K1K1K0K3K@KOK?K+K'K%K,K)K)K1KOKoKJK-K3K6K8K>K3K.K-K5K=KGK0K-K3K7K2K8KBK1K1K7K;KHK=K5K&K$K.KGKiKZKNKdKK4K,K&K"K)K)K(K1K;KTKOK7K„K°K§K©KªK|KbKZKAK:K6K:K*K+K-K6K9KKKDKCK:K9K1K-K+K-K1K2K-K0K2K1K5K8K1K+K*K'K.K8KRKrK‚KK‘KKŽKˆKƒK‚KŠK‘K—KžK¥K¢K¤K¢K¥K£K¢K¦KžK¡KžKK›KKšK•K˜K˜K•K—K–K˜K”K“K’K•K”KK—K•K–KšK”K•K“KŽK’K‘K’K—K“K–K‘K—K•KœKK¡K›KŸK KK›KŸKŸKœK›KKœK›KœKŸKœK›KšK›KKœKšK—KšK›KšK›K—K˜KšK–K—K˜K™K“K˜e]qê(KkKkKjKlKhKkKlKkKeKeKaK`KeK_KiKjK^KbKeKZK_K^K^K]K`K`KeKlKsK}K€KˆK‘K”K—KŸK¤K¨K§K©K«K®K¯K¯K¯K®K¯K®K¯K­K°K®K°K«K­K¦K¢KK–KK‹K„KzKmKaKYKUKNKRKOKTK[KYKXKZK\K]K\KaK\KcKfKaKeKfKfKgKeKiK`KeKdKfKgKjKaKhKeKjKdKbKlKcKeKjKjKeKmKpKpKwKuKrKuKrKmKqKvKvKxKuKxKxKuKtKxKsKvKwKtKtKsKnKnKnKŠKÆKâKÐKžKKwKxKtKoKtKmKjK{K‰KKwK|K}KK†K‹K|KK…K‚KxKhKaK^KfKaKMK9K;K;K0K1KAKSK\K/K%K'K)K#K-K(K/KEKiKXK4KGKKKNK5K+K,K/K:KBK1K1K1K/K7K7K4K6K3K2K9KHKKK:K9K&K'K,K8KkK]KK5K.K(K"K&K(K*K(K2KGK_K5KeK­K¤KªKªKƒKcKnKNK,K/K2K.K-K3K4K9K@K6K;K4K;K9K=K=K>K8K6K=KEKDK;K>K6K6K6K.K+K-K3K5K3K0K7K7K8KK=K4K/K?KAKHKKK1K/K*K2K8KFK1K1K,K0K=KRK?K8K8K/K(K,K8KhKaK.KJKQKLKHK/K'K"K$K.KnK‘KiK.KK"K5KiKŠK¢KŠKxKzKrK{K–KºK«K¦K¦K¬K®K¬K´K¿KÂK¼KºKºK´K¹K¯K‰K˜K‡K‹K©K¥K¬K´K¼KÅKÉKÀK²K¬K§K¢K¡KŸK¡KªK£KžK˜KšK›K¡K KœK£K¦K©KªK«K®KªK®KªK°K«K²K½K»K¸K¿KÁKÄKÀKÃKÀKÇKÈKÃKÉKÈKÆKÈKÌKÊKËKÉKËKÌKÌKÏKÍKÎKÎKÐKÒKÒKÒKÓKÔKÓKÔKÔKÕKÓKÑKÌKÃK¼KªK˜K†KxKkKZKNKIK8K.K*K+K)K+K/K/K+K;KWK:KPK¤K©K©K«KK]KtKQK/K-K4K1K0K5K6K6K8K8KK;K9K=K;KK3K+KRKqK]KhK9K'K#K(K)K.K)K*K*K5KIKlK`K;K1K+K/K0K3KMK\KFK6K-K,K5K7K:K-K,K.K3KJKTK/K4K;K(K/K.K4KRKVK*K3KMKQKAK+K(K#K K+KEK”K{KKK K,KfK‡K KKzKwKK€K•K±KªKK§K§K¯K±K®K°K¼KÃK»K¸K»K¹K¹K¬KŒK›K}KˆK°K«K±KºKÀKÅKÁK¶K¯K©K¤K KœK›K©K¢K”KžKžKŸKK©KK¡K¨K©K©K«K®K®K­K®K®K³K°K¯K¹K¸K¸KºKÃKÃK¾KÁK¾KÅKÇKÇKÇKÇKÈKÈKÎKÍKÍKÊKÊKÎKÍKÍKÐKÏKÑKÐKÑKÒKÏKÒKÖKÖKÖK×KÖKÖKÑKÎKÉK»K©K™KŒKwKfK^KYKNK>K1K/K/K(K(K)K*K3K8KUKCKHK—KªK©K±KžK[KqK>K.K(K1K4K7K5K7KK@K:K9K2K1K8K3K/K,K.K,K2K:K8K6K6K1K*K0K-K/K:KSKtK…K’K‘KKŽKŠK†K„K‹K’KœKŸK¥K§K©K§K¦K¡K£K¤K¢K¢K¢K K¢K¡KžK KKŸKœKŸKŸK›KŸKšKšKšKšK›K˜K˜K—K™K˜K˜K“K•K–K‘K’K“K“K‘KKK’KK’K•K’K‘K’KK•K–K•K™K—K—K›K—KšK›K™K›K—K™K™K—K™K™K—K™K™K›KšK›K˜K—K—K˜K˜K–K•K“e]qí(KdKdKmKgKhKgKjK`KcKkKdKfKgKaKkKaK^KaK\K]KXK_KXKXKXK[KaKoK|K}K…K‹KK˜KœK K¤K¥KªK©K«K®K­K¯K­K°K¬K±K¯K¯K®K®K®K¨KªK¥K¢KžK•K”KŒKˆKuKhK`KXKKKKKLKNKOKRKWK_KZK]K]K_K]KaK^KcKcKcKeKlKfKfKfKhKeKhKfKiKiKdKjKhKmKdKgKkKbKiKiKhKmKkKiKmKuK|KnKoKvK|KuKuKzKwKzKvKwKyKyKwKvKzKwKyKwKtKoKvKyKwK}KK¸K—KxK~KyKrKiKoKƒK‹KƒKK„K…KˆKKK…K„KŠKŠK„KnK^K]KCK3K-K1KAK8K(K&KBKPKdKgK3K,K)K&K6K7K(K)K+K,K4KJKhKKK6K%K2K.K>KWKNK5K4K-KKCKAK:K;K8KK6KOKeK?K$KK!K K K,KOK}KgK^KrK›KŸK‡KtKoKzK„K«K¯K£K¤K¢KK¦K¦KªK¶K¶K½K»KºK¶K¿K»K¸K±K K˜KtK}K¨K¯KºKÃKÀK¶K¯K¦K¢K¡K™K—K›K–K›KšKœK™K›K¡K¤K¡K£K¥K©KªK­KªK©K¬K¯K±K°K³K°K¶KµK·KµK½KÁKÁKÃKÂKÅKÃKÃKÇKÇKËKÄKÆKÇKÈKÉKËKÊKËKËKÉKÍKÍKÏKÒKÓKÑKÓKÒKÖKÕKÔKÔKÔKÖKÓKÓKÍKÄK²K£K’KƒKoK]K`KWKMK>K3K/K)K,K+K'K'K1KGKbKBKiK¡K©K®K¤KiKYKBK3KDKK;K9K7K-K0K2K6K4K3K/K3K9K7K6K3K,K+K)K/K5KEK[KsK}KˆK“KK‹K‹K†KKŠKK•K¢K¡K¢K¨K¤K¡K£K¤K¢K£K¥K¤K¤K¤KŸK¢KŸKžK£K¦K¦K¥KžK K£K K¢KŸK KœKKžKœK˜KšKœK˜KœK™K™KŒK“KKŒK’KK•K“K•K–K“KŽKK“KŽKKŽK’K•KKKK“K”K–K˜K™K—K™K™K—K•K–KšKšKšK›KžKšK™K˜K›K—K—K•K—e]qï(KdKdKfKjKbKiKcKdKcKgKgKdKaKeKcK\KbKeK]KdK_K^K[KZKYK`KdKmKsK~KŠKŠKK“K”KœK¨K¤K©K­K«K«K«K¯K®K­K®K®K²K¯K°K®K®K­K«K¢K¡KK›K–KKKvKjK]KTKUKQKKKLKOKRKYKWKYK]K`K`K_K]K`KcKhKeKeKgKhKdKjKeKfKeKjKhKmKeKeKiKfKcKfKeKgKeKjKmKmKjKlKpKpKsKtKwKtKqKpKqKpKsKlKqKkKsKoKnKoKpKjKeKfKyK}K…KŒK„KqKnKKŒKqKmKuKpKzK~K€K„K‹KŽKŒKK†K}K€K€KzK}KlK`KYKNK9K0K.KEKYK8K,K!K%K&KEKvK@K8K*K'KJKTK2K1KK9KZKbKSK=K2K&K(K)K1K7K7K*K)K)K@KfK;K'K;KZK1K4K2K%K(K4KFK4KOKzKSK!K K#KKK K1KbKgKŽKŽK KˆK}KjKtKˆK£K²K¥K K¤K¡KœK¢K¢K¥K°K´KÀK¼KºK¹K»K¸K»K¹K¯K¤KŠK„K«K°K·KÂK¸K®K¨K¦KŸKœKžKœK›K˜KžK KŸKŸKŸK£K K¢K¦K¥K¬KªK®K¯K«K¬K¬K°K¯K¯K´K²K³K·K¶K¼K½K¿KÂKÃKÅKÄKÆKÆKÆKËKÊKÆKÇKÆKÇKÊKÉKÊKËKÌKÌKÏKÍKÏKÑKÑKÐKÐKÓKÔKÓKÖK×KÔKÒKÔKÎKÄK¸KªK•K†KuKeKcKXKPK=K2K+K-K)K(K)K(K1KGKeKPKiK¤K¬K®K«KnKXKHK1KCK8K,K)K.K8K>K@K9K:KKMKcKiKbK?K6K7K(K)K0K9K;K.K+K,K-KRKdK-K&K-KJKK9K)K1K4K.K.KcKŒKaK)KKKKK!K(K>KlKŽK—K„K{KqKnK†K§K¼K­KKžK£K KŸK¢K¡K¦K­K¯K»K¼KºK»K¼K»K»K»K²K®KKK«K´K¼K·K§K©K§K¦K¡KžKšK›KœK K¥K¦K£KžKK£K¡K¦K¡K¨K¨K°K«K°K­K­K°K°K®K³KµKµK²K¶K¸K¹K¾K½K¿KÁKÄKÃKÆKÇKÄKÇKÃKÅKÇKÃKÄKÅKËKÈKÊKÉKÊKÎKËKÌKÏKÏKÏKÑKÒKÒKÓKÖKÖKÓKÑKÎKÊKÇKÀK¬K›KŒKtKeK\KYKQK@K4K*K%K%K*K)K,K1K7K]KIKXK–KªK®KªKƒKYKVK-K9KK@K@KBK@K:K8K1K/K*K+K3K/K2K4K8K2K9K2K3K-K)K-K3K:KSKrK{K„K‹KKŽK…KˆK‡K†KK”K™K K£K£K§K¦K£K¡K¡K¢K¡KŸK KŸKžKžKžK K K¢K¢K¥K¢K£K¤K¡K¤K§K£K¤K£KŸKžKžKšKKKšKK˜KšK’K–KK’K‘K“K“K“K”K•K”KKKKKŽKKKK‘KKKŒK‘KKK’K’K“K“K•K”K›K˜KœKšK•K˜K˜K–K˜K—K•K–K”K˜e]qñ(KfKfKeKkKfKcKjKiKfKbKbKjKdKeK_K_KaK^K_KgK_K^K[K[KVK`KeKjKyKzK„KK“K•K˜KœK¦KªK¬K¬K¬K®K®K¯K®K°K°K°K±K¯K±K¯K®K¬K«K¥K£KK“KK‰KƒKwKkKeKWKOKKKLKHKOKTKVKTKSKUK^K_K_KbKhKcKdKcKgKfKcKgKiKjKiKjKhKeKgKlKiKdKeKfKcKgKlKgKjKkKiKjKlKnKrKqKtKyKwKvKzKxKwKvKtKuKtKvKrKqKqKnKqKwKuKzK‰K‹KqKYK[KUKaKyK{KwK`KpK‡KƒK{K€K‚K‚K€KK|KyK{KKKxKmKfKWKFK,K+K=KbKZK,K'K&K"K%KPK}KRK@K/K@KOK+K$K3K7K=K/K3K.K0K7KBKkKhKeKGK6K;K*K%K"K+K:K6K,K&K)K7KYKdK*K%K*KGKQKXK9K@KDK/K(K=KxKqKAK0K#KKK K K2KSKrKuK~KlKbKjKƒKžK´K®K¢KK¡K¡K KŸK¡K£K¡K¦K­K¼K»K¾K¾KµK·K¹K·K´K¬KœK¢K´K¹K´K¬K£K§K¦K¤K›KšKšKœK¡K¦K©K¤K¢K£K£KŸK¤K©K§K¦K¨K­K°K­K®K­K®K¯K°K±K´K´K±K²K´K¸K¾KÀK½KÂKÃKÅKÆKÅKÃKÄKÁKÆKÃKÆKÄKÉKÈKÇKÉKÌKÌKÌKÈKÎKÐKÏKÑKÒKÒKÓKÓKÔKÔKÔKÐKÌKËKËKÆK³KŸK‰KwKfK`KbKMKDK3K+K'K'K*K(K(K.K6K\KGKWKK«K°KªK‰KeKXK.K/K9K0K3KK;K=KCKFK@KK(K,K(K%K K)K+KIKxK‡KuKeK\KeKyKšK»K°K¡K K¤K£K¥K¢KœKžKŸK£K¦K¯K¼K¾KÂK¼K·K´K¼K·K¸K²KªK®K¹KµK«K©K¦K¦K§K›K˜KšK¡K¤K¨K¢K¤K¦K¦K¤K¢K¦K£K©K§K§K¬K«K´K²K­K°K°K°K²K²K²K´K®K®K±K¶K¼K½K¿K¿KÂKÄKÄKÈKÄKÆKÃKÂKÁKÅKÂKÉKÈKÉKÈKÌKÊKÉKÌKÌKÌKÐKÑKÓKÐKÑKÔKÔKÔKÖKÒKÓKÎKÎKÆK´K£K‰KwKdK`K[KUK@K4K+K&K*K*K*K(K.K4KQKHKNKxK¬K«K±K‘KaK[K6K6KAK2K9K:K>KIK;K=K?K>K;KK1K%K+KDKrK3K*K(K+K?K^KrK>K6K*K-KAK}K:K)K$K-K4K.K!K"K>KvKŒKKiKVKYKqKK¹K¸KŸK K§K§K¥K¨KªK¥KKŸK¡K©K°K¾K¾K¾K½KºKµKÀKºK·K­K¯K²K·K°K­K²K¨KžK•K•K˜K K K¡K£K£K§K¥K¥K§K¤K§K©K§K©K«K«K®K¯K´K±K±K¯K±K²K²K±K¯K°K´K²K¸K¸K»K¸K¿KÁK¾KÄKÅKÅKÇKÂKÄKÃKÃKÅKÈKÈKÈKÇKÉKÉKËKÍKÌKËKÐKÐKÎKÒKÓKÕKÒKÔKÐKÒKÒKÒKÌKÆK±K¥K‘KKjKaKUKNK;K2K'K&K'K'K.K+K4K7KTKOKGKwK¬K²K°K”KlKYK7K8KKK3K6K;KAKJK=K5K:KCK?K;K9KCKK1K,K,K-K-K1K=K9K2K,K+KGKoK/K'K(K&K8KdKuK=K)K&K-K]KgK*K+K0K8K2K&K$K.KdKŽKšKsK^KYKhK”K´K¹K¬K K©K¦K°K«K®K©K¥KŸK¡KŸK¨K±K½K¿K¾KºKºK¹K¿K¼K·K²K¶K²K°K´K¬K¢KŒK|K…K‹K“K—K–KžKŸK¢K¡K£K¤K¨K¤K§K¯K©K¨K®K°K®K±K±K²K±K³K¶KµK±KµK²K¯K¶K¶K·K·K·KºKºK¾K¾KÄKÂKÃKÄKÃKÁKÅKÄKÁKÈKÅKÇKÆKÆKÉKÍKËKÊKÈKÎKÏKÌKÒKÑKÔKÔKÔKÏKÏKÏKÍKËKÆK¶K¥K”KKjK_KRKGKK;K5K1K1K0K4K4K0K@K3K7K@K8K8K1K.K*K5KK:K8K@KFK:KEK>K6K8K1K3K2K.K3K;K/K/K7K3K9K6K/K,K*K-K3KBKTKhKvK€KŒKŠKK†K‡KK„K‹K–K˜KK K¡K£KK¥K¢K¡K£K K¡K¡KŸK¢K¡KŸK KK¢KŸK£KŸKŸK¥K¥K£K¥K£K§K¦K¦K¥K£K¢K¢K¡KŸK KœK¢KKžK KžK›K›K›K—K—K˜K”K—K–KKK‘K’KK’KKK”KKKK“KKK“KKKŒK‰KKKKKKŽKK‹KŒKKKŽKKK“e]qö(KiKiKhKeKnKcKcKhKcKaKbKfKaKaKaKdKeK]KaK_K]K]K]K_KWK_KfKiKrK}KŠKK‰K–KKžK¦K¥K¬K«K¬K­K­K®K¯K­K­K±K®K²K®K®K­K­K©K¨K¤KŸK•K’KŒK€KuKiK]KVKSKJKFKSKSKRKVKXKRKXKXK^K`K`K`KaKdKgKdKiKlKfKgKiKdKhKfKhKhKeKeKdKgKeKiKdKjKmKhKkKhKpKpKpKoKtKoKvKwKzKtKuKyKwKvKxKuKxKuKtK}K—K©K¶K™K‰KŸKK8K3K>KCKCKNKYKbKvKK†K|K}K‚K‚KK€KyKqKmKK~KqK^K4K.K2KMK^KGKwKWK"K#K)K%K/K;KZKKjK:K$K&K"K0K7K2K5KHKMKGK?KKCK-K(K,K(K%K*K(K-K1K0K2K:K2K/KWKGK$K$K&K,KTKeKlKK@K=KEKCKOKSK>K?K7K9K.K1K+K7K4K1K6K4K8K>K9K0K,K1K1K4KFKWKrK~KŠKŠKŽKŠKŠKKK…K‹K“KK¡K¢K¤K¢KŸK¢KžKŸK K¢K¡K£KŸK¢K KKžKŸK¡K¡KK K¡K£K£K¤K¥K¤K¢K¢K¥K¤K£K£K¥KŸKžKšKKK›KŸKœK›K›KK›KœK™K”K˜K–KšK•K’K–K“K‘K’K•K”K‘K‘K’K‘KK‘KKKŽKŒKKŒK‹KŽKKKŒKKKKK‹KŽKKŒKŽK“e]q÷(KgKgKdKaKgKcKfKeKeK_K`KcKdKdKdKaKdKbKeKdKeK]K`KYKYKZK_KkKsK~K‚K‹KK“K›K¡K¤K¥K¨K©K­K®K¯K°K±K¯K±K°K±K°K±K±K­K¬K¨K§K¡KK˜K’K‹KƒKwKkKcKWKQKPKDKKKSKQKXKWK[K`KaKZK`K`KcKgKhKiKhKiKfKdKaKgKaKYKgKeKdKeKhKcK_KeKbKgKhKhKmKjKkKmKkKuKsKvKsKvKuKvKxKyKuKvKyKyKwKwKuKzK–KÍKÍK½K¥KœKŠKFK$K&K%K&K&K'K+K>KnK‚K‡KˆK‚K}KKƒKyKmKiKrKqK^KTKEK/K>K?KbKFKZKmK?K%K#K$K+K4KOKuKKrK+K#K#K&K=K2K3K9K?K:K5K0K/K/K3K@K2K(K1K)K*K+K4K,K.K5K3K3K0K,KGK7K*K)K)K0KeKUKlK9K)K2K[K[K7K2K.K)KK#K;KyKŒK KxK_KdKqK—KÆKºK©K«K¦K§K°K¹K¹K·K«K£KšK›K–K›K¤K³K½KÃKÁKÀKÂKÅKÀK¾K»K¹K°K‰KOKDKGKTKYK]KeKjKnKhKtKyK}K~K}K„KˆKŠK”K˜KžK¦K¦K¬K±K¯KµK´K´K·KµKµK³K®K²K²K¯K¯K²K¯K´K·K¸K·K½K»KÃKÂKÃKÃKÁKÂK¿KÀK¾KÄKÄKÂKÃKÄKÈKÄKÉKÊKÉKÈKÊKËKÏKÎKÍKÐKÐKÌKÍKÈKÂK¹K®K¤K˜K…KpKRKAKK@KEK9K8K5K9K=K=K:K;KIKIKGKFKHK=K5K2K3K3K/K3K3K2K0K1K5K4K2K1K/K3K+K2KHK]KtK‚KŠK‹K‰K†K„K€K€K†K‘K—KŸK¡KŸK¢KŸK K¡KžK KžK KžK K›KKžK¥KK£KžKŸKœKžKK¢K KŸK K K¢K¤KžK£K¨K¡K¡K¤K¡KŸK›KœKšK›KžKœK™K›K›KžK›K˜K™K˜K–K˜K”K”K’K—K–K’K•K–K•K–K›KK’K’KKK‹KŽKKŽK‘KŠK’KKŒKŒK‹KK‹K‹KŠK‹KŠK‹e]qø(KbKbKgKeKeKbKgKaKeKdKeKbKcKcKgKeKeKcKaK`K`K\K`K[K[K_KeKgKuKKˆK‰K“K˜KšK¢K¤K¨K¦KªK®K­K°K°K±K¯K°K°K±K±K±K±K­K«K¬KªK¡KžK—K“K‹K€KzKkKaKXKXKHKJKRKNKUKXKWKTKYKZKYK[K`K`KfKeKfKjKhKgKjKiKhKlKeKgKdKfKjKgKbKdKaKgKjKeKfKkKdKgKhKlKsKnKrKoKxKyKrKwKvKwKwKxKvKyKvKwKƒK¯KäKÊKKoKsKVK8K%K!K!K%K%K)K%KIKŒKŠK—K¥K«K™K|K{KpKiKeKtKjKOK9K0K5K@KYKfKAKkK_K(K"K(K+K'K:K^KKKjK KK&K+KAK,K,K.K8K4K3K:K*K4K5K:K0K+K/K*K,K5K:K7K.K1K8K7K4K.KJKFK/K+K,K8K`KIKlK>K0K3K`KBK+K%K KK#K+KmK‹K™K‚KcK]KlKK´KÁK«K¥K¬K®K³K²K¿KÀK»K­KšKK™KœKžK¯K¸K½KÀKÃKÄKÆKÅKÀKÀK¿KªKxKUKSKWKXK`KaK`KfKcK[K`KhKdKdKVKfKnKtKzK}K‡KKœK£K¦K­K¯K³K³K¶K¶KµK±K²K³K°K°K¯KµK­KªK¯KµK¹K¶KºK»KÂKÂKÀKÁKÀKÂK¼K¿K¿K¿KÃK¿K¿KÃKÁKÅKÇKÉKÉKËKÈKÌKÎKÐKÎKÎKÎKËKÆKÇK³K®K¤KK†KpKdKLK?K;K6K-K+K&K'K(K(K'K*K*K/KEK`K8K>K›K»KµKžK‰KxKRK3K5K3K;KGKMKCK1K5KBK8K9KBK>KFKDKEK;K9K.K4K/K2K/K1K1K2K7K5K6K9K6K/K*K1K4K;KSKeKxK„KKŽK‰K†K„K~K„KKK—K¡K£K£K K¢KŸKžKŸK KœKžKžKžK¢K¡KŸKŸKžKœKKKœKžKšKžKŸKKŸK K£KŸK¡K£K¢K¢K¤K¢K¤K¡KŸK›K™KšKžK—KKKšK›KŸKšKœK™K˜K˜K™K—K”K•K“K’K•K—K”K”K—K”KKKŠKKKKŠK‘KŒK‘KKŒKKŽKŠKKŠKˆKˆK‡K‰K‡e]qù(KcKcKgKdKeKdKdKeKdKhKgKdKiKaKcKdKhKeKeKaKbK]K\K\K_K\KcKgKvK~KƒK‰K‘K—K›KŸK¤K¦K©KªK¯K¯K®K³K¯K´K³K±K¯K°K±K®K¬K¬K­K¤K KK˜K“K‹KKyKlK^KWKQKJKMKIKNKYKSKXKRKXK\K\KcK^KaKdK_KaKhKeKgKiKfKgKiKiKgKhK`KgKiKcKjKeKiKiKgKgKjKiKmKmKmKmKqKuKwKxKxKuKtKwKzKzKxK{KyKyKxKK¶KßK·KlK]K4K5K5K,K'K'K-K'K)K8KaKK˜K®KÔKÖK¼K{KvKjKUKQKlKWKFK2K1K4KK:K>K:K9K@K@KLK>KBK5K6K5K.K/K-K2K4K0K7K:K1K7K4K4K1K+K+K3KBKUKjK{K…K‰KŽKK‡KˆKK€KŽK™KœK¢K£K£K¡KŸK¢KŸKŸK KœK¡KžKKŸK›K KžKžKKžKžKK¡K KŸKœK›K K K¢K£K¢KŸKKŸK¢K¡K¡K£K›KšK™K™K™K›KžKžKKœK›KšKšK›KœK›K™KšK™K˜KšK˜KK˜K”K•K–K•K•KKKKŒKKKKŽKŒKŠKK‰KŒKŽK‹KKŒKˆK‡K†K‰e]qú(KcKcKfKbKfKeKdKdKhKdKeKgKjKdKcKaKdKdKdKcK^K^K_K[KXKZKbKfKrK}KƒKŽK’K—KšK K K§K¨K¬K¬K¬K­K±K°K´K¯K±K®K²K²K±K²K°KªK¨K¢KžKœK“KŽKKtKiK_KSKPKJKNKPKPKOKWKWKUKXKaKeKaK]KcKgKfKgKdKhKeKiKeKdKiKfKhKmKlKjKgKgKfKhKhKhKeK`KgKjKiKiKjKnKsKyKsKtKuKyKuKyK{K{KzK}KyKzK}K†K²KÑK®K‡KdK3K6K4K/K-K&K(K0K;KUK{K‹K–K¹KËKÀKºKKoKYKIKDKTKOKAK7K4K?KAKaKHKKxK,K(K-K'K)K=KYKdKxK—KrK'K!K$K+K4K#K*K4K5KKuK—KŸKnK[K`K{KªK¸K­K¥K¬K¬K´K²K¸KÀKÂKÄKµK¥KšKœK˜KŸK©K¶K¼K»K½KÂKÃKÂKÁKºK™KyKvKzK‚K‰KK‹KŽKK‰KKyKuKnKkKdK[KPK?K?KEKGKJKLKYKjKoKzKKžK¥K¨K°K³K°K¯K°K²K´K³K´K°K¬K¬K¬K¯K²K´KºKºKÁKÄKÂKÂKÂK¿K½K¸K¼K¼K¾K¾KÁKÂKÂKÅKÈKÊKÇKÇKÈKÌKÍKÌKÆKÃK¼K®K¦K›K‹KKzKoKiK^KUKCK>K9K8K/K+K'K)K&K(K+K)K,K0KK?K=K;K8K3K8K;K1K5K,K:K8K4K0K5KKXKSKCK=K=K6KWK]K[K‚KHK+K(K.K%K4KWKYK[KVKKoK)K!K!K(K'K&K*KK=KDK8K3K(K(K,K(K+K*K/K1K;KDK9K-KyK¬K¼K©K’KŒKaK5KK;K;K8K4K7KK6K8KBK[KOKlKTK.K0K.K6K-KFKaK^KUK;KKK.K"K%K(K'K'K3KMK?KBK:K,K?KSK*K3KJKTK4K)K6K>K*K(K#K+K(K K'K)K4KPKfKYK=KiKaK%K3K]K:K/K@K#K(K'K*KPK…K¢KKrKdKsKšK¿K½K¦KžKªK¯K²K¸K»KÁKÆKÄK³K¦K›K”KœK¥K©K³K»K½KÀKÅKÃK½K±KˆKvKyKƒK‹K‘K‘K˜K™K™K—K’KK¢K›K–K•KKKzKmK`K]KRKFKMKCKAK?K9KFKOKYKgKK–K K©K¯K«K©K¬KªK¨K«KªK«K°K¯K¯K¶K¹K¼KºK¿KÀKÀK¿KÀK¾K½K¹K½KÁKÃKÂK¾K¿K¾KÃKÄK½K½K»K²K«KšK‘KKrKjKXKOKTKBKKKUKVKZK^KXKVKVKVKGK6K/K.K1K,K,K+K0K/K8K@K;K.KuK®K·K¥K“K‰KuK@K>K4K8K5K;KOK=KCKPKEKAKBKGKAK=K7K5K+K-K5K1K2K/K1K-K-K3K9K:K8K2K1K6K1K:KOKlK‚K‹KŽKˆK‡KƒK}K‚K„KK•KK¤K¢K K¡K¤K¡K K K KŸKžKšKKœKœK K›K¡KžK¡KŸKžK›K™K›K›K™K›KœKšKžKK¡KžKžK¡K KžK KK™K›KœKœKœK™K›K›K›K™K›KšKšK—KžK™K™KœKœK™KšKžKœKžKšKšK—K˜K›K’K•KK‘K’KKK‘KK‘KŽK‰K‰KK‡KƒK‡K…K‡K…K‚e]qý(KhKhKfKiKfKdKfKdKhKjKgKhKhKbKcKbKiKeKeKcK`K_K^K^KZK[K_KgKnK|KƒKŠK“K˜K•K™K£K§KªK¯K«K¯K­K­K±K¯K®K±K°K±K²K´K°K¯K©K©K¡KK–K’KK‡K~KiK]KSKQKNKTKUKRKSKRKXKWK^KVK\KbKcKcKeKaKhKlKjKeKhKfKhKfKdKaKcKaKeK]KeKfKcKlKeKcKfKfKmKnKlKlKiKpKoKrKtKuKrKyKsKvKuKK‰K’KŽKKŠK‡K‡KK¦K’KHKAK8K0K,K#K&K)K%KRK€K€KK‡KhKZKXK\KbKIK=KLK[KTK9K5KBK`K_KQKZK5K0K*K*K'KEK_K]KgK@K0K~K„K?K K&K$K/K3KEKaKLKGKK>K4K3K4KVKBK?KDKAK=KDKCK=K=K6K4K1K,K7K2K0K6K4K/K4K8K5KCK8K7K7K/K8KKIK?KEK3K4K9K/K.KBK5K3K=K4K1K4K4KK=KJKhKxK‡KKŒKŠK‚K…K‚K‡K‘K˜KŸKžK K K¡K K KžKŸK¢K£KK¡K›K›KœKKžKKŸKžKœKKŸKšK˜KKœK™KšK™KœKœK¡KžKžKœK¡KžKŸK›KŸK›KK—KK™K›KœK”K–KœKœKœK˜KœKKœKŸK›K˜KK™KšKœKK™KšKšKKšK—K˜K–K—K™K–K˜K“K”K‘KK‰KŒKŠK…K„K†K†K†KK‚e]r(KcKcKhKcKeKhKeKiKjKlKcKkKfKkKcKbKeKbKcK`KaKXKZKWKSKUK^KjKrK|K€KK’K”KšK›K¤K§K¬K«K¯K¯K­K±K¬K®K°K­K±K¯K³K´K¶KµK®K©K¡KœKœK•KŒK…KzKrKbKXKQKKKHKHKUKSKXKXK[KZKfK^K^KbKgKiKiKeKgKcKcKeKgKeKcKeKdKfKcKhKcKdKfKhKaKhKcKcKcKdKhKfKlKoKmKeKnKmKkKqKpKzKK–KšKœK›K™K–K”K’KK‹K°K¸K‰KyKVKlK¥KŒKfKPKGKgK„K}K|K”KKvKWK;KYKRK7K1K+K3K:KHKSKmKxKeKGKAK?KDKKKYK{KcK7K#K$K&K?KqKKDK0K/K)K*KKDKRKqK}K‡KŠKŒK†KƒK~K}KŠKŒK™KK¡KŸK K KŸK¡KŸKŸKœKK›KžKœKœKœKšKŸK›KžK›KšK›K›K™K›K™KšKžKšK™K›KKKKžKK¡KŸKœKžKKKšKšKœKžK™KšK™K›KšK™KšK™K›K™K—K™K›KœKKœKKžKžK›KžKK K›K–K™K‘K–K˜K—K›K–K•K”KKK‰K‹K‰K‰KˆKˆK†K…Kƒe]r(KcKcKdKiKgKhKfKiKjKiKbKlKfKhKeKdKfKfKaK\K^KZKUK\KXK[KaKhKtKKƒKK’K”KœKžKŸK¦K¯K­K¯K¯K¯K°K¯K±K®K±K®K±K¯K±K³K­K¬K¨K¥K¡KK”K‘K‰KyKpK_K\KPKKKLKLKNKTKSKRKXKZKYK]KfKbKaKhKdKfKiKdKbKhKhKhKfKbKiKhKcKeKaK`KkKfKeKcKdKgKjKfKjKhKnKiKiKmKnKkKkKtKsKK•K—K™KžKŸK›K–K™K™K•KK±KÂK³K¬K‹KŒK¬K KˆKxKxKƒKzKiKuK‚KƒKnKK(K3K:KGKMKRKZK{KqK=KFKCKMKHKPKeKsKaK,K"K"K#K7K[KkKWK,K,K1K*K0K[KWK5K/K*K%K#K$K0KMKgKlKbK4K(K+K%K(K&K*K2K2K.K-K&K,K*K%K(K-K;KUK&KKK"K-KbK”K¡KsKhKkKŒK¹KÁK°K¤K¬K§K±K¹K»KÂKÄKËKÈKÃK¹K±K¨K¨K¦K²KºK¼KÄKÌK²KuKUKdK`KhKlKkKeK^K_KbKfKfKiKlKjKWK>K/KEKoK‘K„K|KSKDKeKpKIKhKnK|K}KkK_KuKtKoKmKqKpKfKmKyKKŠK“K›KžKšKžK£K£K¢K£K¨K«K²K¸KÀKÄKÄKÄKÆKÄKÁK¾K»K½KÀKÀK½K·K¬K¤KšK‹K|KzKzKKzKoKnKvKpK_KFKWKKKEKZK_KUK]KNKKKMKQKJKCK>KAKMK9K*K)K'K-K-K9KBKLK-KdKœKºK®K‘K€KnKcK_K.K-K/K3K7K^K;K>KDK?KAK?K:K;K2K3K,K-K-K5K7K6K7K5K8K7K4K0K.K2K5K=KHK^KxK„K‹KK‰K†K€K€K€K‡K”KŸK K K¢K KK KžKœK›KžKKšKŸKžK›KšKœKK›KœKœK K™KžKœK›KKžK™KœK›K¡KšK›KžKžK KžK KŸKœKšK›K›K›KšK›KKšK˜K›K—K›K™K˜K›K•K–KšK›KšKœKK KKŸKœKœKŸKšK—KšK–K”K–K˜K•K™K™K—K–K‘K‘K‘KŽK‰K‹K‰KŒK‰K†K€e]r(K_K_KeKbKdKcKgKmKhKkKeKlKbKeKeK_KdKbKaK^K^K[KUKWKRKZK_KgKsK}K€KK’K™KšKK¢K¦K¬K¯K¯K°K°K¯K­K¯K°K°K±K°K­K°K°K¬K¬K¥K£K K›K–KŽK‹KxKiKcKUKOKPKJKMKNK\KUKTKTKWK\KeK^K\KaKdKcKeKiKbKfKfKeKcKcK`KaKiKaKbKbKhKiKdK^KaKcKeKiKdKbKdKjKgKhKiKpKqKyK…K‡K‰KK’K–KžKžKK™K–K•K–K“K¨K¼K´K·K¬K¥K®K§K›KŽK†KfKVKRKUK\KHK:K,KKdKxK`K%K K"K"K1KTKRKnK1K0K-K-KK_K`KlKWK4K%K0K+K*K+K-K-K4K*K.K+K+K*K.K'K'KDK?KKK#K?K‚K¢KKjKiK|K°KÆKµK¤KªK¨K­K°KºKÀKÅKÈKÈKÊKÀKµK³K©K®K­K·K½KÆKÊK®KeKTKUK_KbK`KdKgKdK`KJKKKIKCKPKHKPK:K6K/K8KQKiK^KKK0K.KCKEK@KVKpKzK€KWKeK€KwKzKuKsKtKtKwKzK€K†KŒK“K™KœKK¢K KKŸK§K®K°K¼K¾KÃKÁKÃKÆKÆKÅK¿K½K¿K½K¿K¾K·K©KžKKŠKˆKK~KˆKxKZKQKXKIK7K6K7K9K9KAKGK@KAKEK?KEKLKFKBKDKIKAK.K.K*K%K'K.K6KHKLK-K\KœK·K±K“KtKpKgKWK0K1K0K7K;KTKK7K;K8K4K2K,K2K5KKFKGKBK@KFKNK3K(K+K0K(K$K+K-KHKSK+KVKšK¶K«K›KpKwKrKSKDK4K2K/KKdKyKŽK‘K‹KŠKˆK‚K‚K†KK–K›K¡K KžKŸK¡KžKžK›KŸKŸK›KK™KœK›K›K˜K—K˜KK›K—KšK K›K™K¡KKK˜K›KœKKKœK›KšKšKœKšKKœKžK™KšK›K›K˜K™KžKšK—K™KšK›K˜K—K•KšK›K›K˜K˜K™K™K™KK™K›K—K˜K–K–K•K‘KK”KKŽKK’K‘KKŽKKŒKŠK‰K‰K‰K‡K†K‡e]r(KcKcKiKgKeKbK^KbKaKgKdKdK`K^K`K]K^K^KbK\K]KYKVKRKRKTKRKbKnK{KƒK‡KŽK•K›KŸK¥K©K©K¨K¬K³K®K°K³K°K±K³K²K³K±K¯K®K±K©K£K KžK—K•KŽK‰KyKnKcKYKYKPKJKGKJKUKSKTKTKZKYK^KWKWKeKaK^KeKiKjKdKhKbKeKbKfKcK_KcKfK`K`KdKcK\K_K\K]KZKZK_KeKwK–K K¢KÈKÎK¾K}K†KŽK“K“K–K’K–K™K™K›K™K›KKK•K}KkKRKFK;KOKjKeKDK(K&K$KK$K)K?KeKtKiK[KNKPKYKkK‰K]KXKdKPKFKLKlKPKIKaK|K|KUK&K&K.K'K0K_K>K)K7KVK:K/K2KYKsK/K'K*K(K$K'K%K%K K0K5KpKyKqKrK`K@K&K%K#K'K+K/K3K4K*K K&K*KKK&KZK‘K­KwKgK{KšKÁKÃK¦K¦K¬K°K­K¶K´K¹KÄKÈKËKÃKÁKÂKºK¼K¾K¹KÂKÆK¢KXKFKAKAKKKK:K1K0K0K4K2K3K7K7K;KAK6K5K7K@K?K3K2K.K4K0K,K-K$K,K0KFKEK/KFK”K§KµKŸKƒKoKfK^KGKGKJK1K4KLK>K@KK;K2K/K0K4K8KAKK?K7KTKFK;K;K=K:K4K1K/K/K/K3K4K3K0K7K.K1K2K>K5K1K.K)K+K5KSKmKƒKŽKK‹KˆK„K…K‡K’K™KŸK K¡KK KžKŸKK›KœK›K™K—K—K™K™K›K—KšKšK›K—KœKšKK™KžK—KžK¢K™K™K›KžKšKKšK›KŸKžKœKK™K›KšK™K—K›K›K–K›K™KšKœKšK–K˜K—K™K™K•K–K”KœK˜K›K–K•K™K™K•K“K‘K’KKK‘KŽKK’K‹KŠKŽK‹KKŠK‹KŠKˆK‹K‹K‡K‰KˆK‡e]r(K]K]K\K]KbKaK]KXK\K]K\KaK^KZK[K]K^K[K_KUKYKSKPKLKMKNKTK]KmKsKK‡KK•K—KœK¤K«KªK®K­K°K¯K¯K­K¯K°K¯K¯K±K°K¯K°K®K¨K¦K KK™K“K‹KK{KrKbKUKNKFKLKHKKKNKSKTKWKYKXKXK[K\K`K\K`K_K_KbKgKcKaK]K^KaKaK`K_KaKeKaK_K_KYKVKXKWKSKUKmK®KÎKÁK¯KÐKÑKÅKˆKyK‹KŽKŽKK‘K–K•KœKžKžK’KŠKˆK‰K‹KtKWK7K3KFKNK;K/K(K!K"K!K&K4KxKKmK[KFK2K/KTK~K_KFK7K9KZKXKNK^KZKEKQK„K“KpKLK*K#K%K2KKnKzKpKmKeKRK:K+K'K'K-K)K"K"K&K!K!K3KjK“K§KqK`K~K¬KÉK½K§K©KªK«K¯K³K¹K¹KÃKÉKËKÉKÂKÄK¾K¾KÃKÁKÅK¦KYKCKKKIK?KKEK^K€K¢KÁKÀK¯K•KwKbK;K+K'K&K(K*K,K3KQKnKK‡KK~K}K€K~K†KˆK…KKŒK“K•K˜K™K¡K£K§K¯K¹K¿KËKÌKÒKÔKÑKÓKÏKÅK§KpKAK1K2K3K-K.K*K0K;K9KQKqK\KKKK–KˆKsK_K@K.K'K)K+K0K7K8K6K:K7K7K.K)K0K4K2K.K*K$K.K.KOK@K-KBK“K K·K¨K’K`KSKfKLKBKAK;K?KVKPK=K>KKKzK}K?K-K5K+K,K(K*K+K)K3K0K/K1K.K/K)K*K,K@KHKAK8K:K%K&K'K!K%KRKK¡K~K`KlKœKÆK½K¤K­K¯K±K·K²K±K·K¹KÃKÎKËKÈKÆKÁKÈKÅKËK·KkKQKRKXKUKNKDK9K1K6K,K*K,K3KAKRKZKQK2KKhKlKTKKKdK{K|KxKzKwKyK€K‡K‚K†K‹K‹K–KK‘KšK K£KªK³K¼KÇKÐKÒKØK×KÖKÓKÁKK\KCK2K2K5KKPKGK-K6K‹K¤K²K¨KŒKeKeKVKlK@K?KIK:K_KBK=K?K;K8K6K-K.K/K/K0K+K6K=K3K;K2K.K0K0K/K1K:KJKaKxK‰KK‘K‘KŒKŒK…KK’KšKžKŸKœK¡K KžKšKKK KœKžKœKK•K—KœKœK™KšK™K˜K˜KœK™KšKšK—K˜KžK›KœK™K˜K˜KšK›K™K—KšK˜K™KšKšK™K›K™KœKšK›K™K”K•K˜K˜K™K˜K˜K—K•K’K•K’K™K•K”K–K˜K—K‘KKKKK‹KŽKŽKŒK‰KˆK‰K…K‰K‰K‡K…KˆK‰K„K‰KK…KƒKƒK„K‚e]r (KbKbKcKbK_KfK\K`KbK]KaK`K[K^K^K[KZKWKUKSKQKSKQKJKHKLKQK_KpKxK…K‡K‹K•KšK K£K©K®K°K­K´K³K²K¯K®K¯K²K°K´K¯K´K³KµK²KªKŸKšK™KKŒK‚K{KkK_KZKMKIKLKNKQKVKWKSK_K`KaK`KcK]K^KdKcKdKeK`K]KeKbK\KbKcKfKaKfK`KaK^K^K[K\KZKVKXKRKIK^K KÀK´K¾KÆKµK§K“KˆK‰K’K“K—K–K”K–K–K–KKgK‚K˜KšK†KHK-KLKQK+K,K%K&KBKHKUKmK\K>K.K0K&K7KMKfKGK@K/K(K$K)K2KRKVKƒKwKWKZKRK‚KqK5K4K5K.K*K.K,K,K%K$K(K5K+K,K,K4KfKKJK2K>K5K)K'K)K(K.K.K/K,K:K2K2K)K*K*K4K:K4K*K)K/KIKKK9K;KtK˜KŽKgK]KŽKÂKÉK´K¤K¯K¶K³K¹K¹K¸K·KÀKÌKÏKÊKÇKÈKÉKÄKÉK½KzKPK[K^KVKQKDK8K9K5K4K*K.K4K8KFK\KgKZK2K9K^K]KLK/K(K*KDKpKMK8KBKKÌKÌKÆKÆKÆKÁK¬KKcKGKEK]KƒKnKTK[KtKzKuKtKqKsKˆK‚K„KˆK‹K‰KKŒKK™KœK§K¬K¶K¿KÇKÓKÚKÛKÚK×KÌKžKoKVKEK8K2KAKCK?K6K-K&KK+K4KˆK§K¯K©K”KrKYKWKfKOK@KDKHKUK?K?K;K:K6K3K1K,K/K9K=K1K.K1K4K7K-K*K+K/K2K5KBKPKmK{KŒKKKŠKŽK†KŠK‹K™K›K£KŸKœK K›K K›K›KœKŸKKKšK—K™K—K˜K˜KšK™K˜K—K˜KœK—KŸKšK™K˜K—KšK™K˜K—KœKšK—K—K–K˜KšK™K˜KšK”K—KK˜K—K˜KšK—K™K™K˜K—K•K—K”K”K‘K‘K‘K“K–K•K”K“K•K‘KK“KŽKKKKŽKKŒKKˆK‡KƒKƒK†K†K†K‚K„K…K‚KKKKƒK‚e]r (KfKfKdKbK`KcK]KdK]K`K_KdK]KcK]K\K]KVKUKXKSKSKOKJKGKLKWK^KmKvK†KˆKŽK”K™K K¤K¨K­K°K²K²K°K°K­K±K´K±KµK²K¶K±K±K²KµK¨K¡KŸK›KK‹KK{KqKbKTKNKGKIKJKOKUKVKVK[K[KcKeK\KdKcKdKcKgKfKeKdKaKcKcKeKcKlKfKfKdKcKeKaK]K\K\K[KTKCK:KLKK¸KœK•K®KºKÂK°K¤K—K–K“KKšK–K•K•K•KyKiK’K™KŽKfK1K)K+K*K$K&K)K-KKKpKiK]KEK*K%K/K0KNKYKK`K;K+K;K„K«K±K®K–K{KHKUKhKWKFKEKWKWK7K;KBK6K6K2K3K0K2K1K0K.K3K1K2K0K1K+K.K0K7K8KAKXKrK€K‘KK‘KKˆKK‰KK–KšKK¡KKŸKKžKžKKœKŸKK K K™K›K™K–K—K–KšK—KœK–K—K˜K™K™K—K–K˜K—K˜KšK™K™K—K™K—K˜K—K™K•KšK˜K–K™K›K˜K›KšK—K˜KšK–K–K›K“K”K–K•K“K•KK”K”K“K’K’KK’KŽKKŽKKKŒKŠKKˆKˆK…K…KƒK†K‡KƒK‚K‚KK}K„K{K€K‡K‹KŽe]r(KcKcKcKbKbKjK]K`KcKdK]KdK_KZK]K\K[KVKWKYKUKTKOKOKIKJKRK_KlKvKƒK…KŒK—K™K¡K¨K¥K¬K±K°K®K°K±K±K¯K³K²KµK¶K²K¶K³K­K®K¨K¤KžK—KK‹KƒK{KpKgK\KPKHKHKJKNKVKWKSK`KbK]KiKcKeKfKbKfKeKgKaKdKkKhKeKeKgKkKbKbKbKfKfK^K`K`K_KWKFK2K2KJK—K¹K…KUKSKlK¢K¾KÄKµK©K–K—K”K—K™K–K‰KK]KmKsKVKAK%K'K(K,K'K"K/K9KuKxK[KLK7K)K'K9KNKWKIK5K7KLKNK0K/K2K;KCKfK€KcKNKQKeK•KpK9K/K.K/K-K*K/K0K1K,K8K.K+K)K+K1KEKzKxKFK@K,K)K*K,K*K-K,K*K0K:K.K*K&K'K(K)K+K%K'K#K%K%K.KJKiK„KrKVKlK“K¾KÀK¨K¡K™K•K™K¨K­K³K¾KÃKÊKÊKÊKÅKÁKÆKÇKÇK“KiKbKgKtKnKiK\KTKRKNKCK;K.K+K-KEKJK]KuKyKcKCK:KTKbKTKTK[KSKHK9KBK~KÅKÑKÎKÐKÒKÍKÎKÈK·K•KmKLKFKlK~KjK\K`KpKpKmKpKvKyKzK|KK…KŠKˆKK•KœK¡K¥K±K¹KÁKÈKÔKØK×KÔKËK¨K|KZKUKgKaK=K=K^KdKYKKKCKWKiKUKKK:KWK¢K°K¦KKKtKNK>K;K-K,K+K(K+K-K/K1K/K2K)K-K1K*K'K+K,KK4K4K/K-K3K3K4K7K:K7K5K4K.K-K.K0K-K6KRKhKKŒKK‹KŒKŠK‡KƒK‹K•K—K™KKK›KšKœKKŸKœKšKKŸKœK–K˜K˜K˜KK–KœK™K–K™K™K–K—K˜K—K–K˜KšKœKK˜K›KšKžK™K˜K˜K™KKœKšK™KœK•K—K–K—K›K˜K–K–K“K”KŽK“K—K”K—KŽK”K’K•K’K‘K‘K‘KKK‘KKŒK‡KKŒK‹K‰KˆK€K„K€K†K€KKKK€K‚KŠK‹K‘K“K›K£K e]r(KiKiK`KdKcKcK`K`KbK_KcKeKhK_K\K\K\K\KXKUKQKPKNKJKDKGKOKWKgKxK€K„KK•KšK K£K§K©K¯K²K¯K°K±K¯K±K´K¶K´K³K¶K°K²K±K±K¬K£K K—K‘KˆK‡K{KrKdK\KUKNKIKTKSKWKRK[KZK`KbK]KaKeKfKiKlKkKdK`KfKkKkKlK]KgKkKhKlKiKrKzKkKUKGKHKAK:K0K*K+K?KšK¬KqKnK‰K–K­KÁKÕKÍKÓKÆK¶K­KšKlK9K*K(K*K)K-K1K#K$K'K/K0K=K]KaKRKEK8K7K3K0K=KCKRK>KJK8K=KDKVK:K8K>KHKUKƒKˆKvKaKWKK™KuK@K.K'K1K*K0K4K/K/K6K@K.K,K(K,K(K:KTKuKMK+K,K4K0K5K,K-K+K+K5K4K0K*K/K+K'K'K+K'K.K(K%K,K@K}KŠKgKQKfKŠK¦K¢K’K—K£K¨K«K»K¹KÁKÆKÅKÈKÇKÅKÄKÂKÆKÃK˜KlKcKpKrKzK~KK}KtKuKyKoKeKaKKK2K+K3KDK\KiKrK}KqKYK>K9K6KKK>KHKUKNKWKPKXKŒKvKhKXKQKtK‘K}KHK5K$K(K-K:K0K0K2K9KKVK]KYKRK\KPK;K;K8K8K6K.K0K.K7K5K3K3K?K:K5K0K-K,K,K+K0KK6K3K0K6K6K3K5K0K.K4K8K3K3K0K[KXK6K1K9K[KK«K¿K£KˆKgKEKKKdKNKaKSKNKHK8K1K-K1K2K-K2K-K,K0K5K7K:K:K/K-K2K/K+K3KMKgKK‹KKˆKŠK†K‡KKK–KšKžK¢KœKKœKžKŸKœKœKšK›K˜K—KšK™K˜K›K—K—K•K˜K—K–K˜K—K›K¢K¥KšK™K”K–K™KšK–K–K›K—KœKšK˜KšK—K˜K™K—K”K•K–K–K•K™K’K–K”K•K•K‘K’K”K”KKKKKŠKŽKŠK‡K‘KŒKŒKKK†K†K„KˆK‚KƒKKK‡KK“K™K›K K¢K¦K©K­K­K²K²KµK·Kºe]r(KcKcK`KeKfKfKbKcK`KcKeKaKcK[K[K]K\KWKZK[KVKOKIKJKKKJKSKYKkK{KK‰KŒK“KšKžK¤K§K¬K­K®K°K°K³K´K²K³K¶K¶KµK´K²K´K³K®K¬K¤K£KšK”KˆK…KKrKgK]KVKRKTKUKRKXK\K\KYKcKeKaKgKjKjKmKoKmKiKoKmKkKlKrK|KjK@K8K/K*K*K+K6K9KJK~KxKxK„K™K˜K›K¨K¶K½K«KžKµKØKëKÚK½K‚KSK=K,K3K:KnKWK*K(K'K-K.K/KAK[KyKƒK€KsKkKVK/K,K/K&K#K(KBKdK6K>KYKaKTKMKAKWKMKJKhKKvKxK1KOKtK¥KƒKiK[KGKKK2K-K.K%K.K9K,K'K$K'K)K1K4K-K1K-K0KK8K8K5K:K8K1K0K2K6K2K3K3K6K[KVK8K8K8K\KK¥K½K°K‹KoKSKUK]KTKeKGK?K>K3K5K-K.K)K/K,K-K,K1K/K7K5K8K.K+K(K0K/KCKYKuK„KŠKŒK‰KŠK‡KKK“K›KœKK K›K›K˜KšKœKŸK™K—K™K•K™KšK™K–K™K™K“K•K™K›KšK˜KšKšK™K¢K¢KœK–K›K™K›K–K˜K™K›KšK—K›KšK™K˜K–K—K”K–K–K•K™K“K“K’K”KK‘K‘KK’K”KK’KŽKŽKˆK‹K‰K†KŽKŒK‹K‰K‰K…K‡KˆK†K„K„KˆKK˜K›KŸK¢K¢K¥K¬K°K°K±K³K¶K¶KºK»K¿e]r(KhKhKgKhKgKgKdK`KcKbKcK`KbKcKYKYK]KZK[KTK]KPKLKKKIKAKQKYKiKwK‚K†KŽK™K˜KK¤K¥K©K¬K±K³K³K³K´K¶K²K´K·K·KµKµK³K¶K¯KªK¥K¡KŸK”K‰KŠKKqKfKaKYKVKSKXKUKYK[K\K_KdKiKcKfKmKnKlKoKqKjKnKnKrKgKzK¡KUK.K1K,K+K.K2K;KFKaK‡KKK›K¡KžK•KŸK°KÄKÀK¥KÍKÍK´K“KSK8K2K.K0K.K4KTKdK@K.K3K:KJKgK{K…KˆK„KhK]KlK2K$K!K+K K#K1KMKnK(K7KYK[KOKNKHKNK^KPKcK‹K†KxK7KTKK§KuKtKpKJKAK2KFKRK$K2K+K(K$K%K+K%K,K/K5K2K2K1K1K8K2K.K9K0K1K/K*K8K/K'K%K,K"K KK#K,KRK„KŸKmKVKlK›KÂKËK¯K£K§K®K¶K¹K¼K¾KÈKÉKÊKÆKÄKÅK¾K”KaK\KlKwK{K|K{KK†K~KK}K„K“K“K‘KKKŽK‡KˆKKpKmKrKiKlKxKrKoKtK…K…KŽK—KšK—KžK©K§K¬K²K²KªK±K´K»K½K²K¡K›K˜K”KˆKˆK‰K„KƒK~KyKwKyKsKxKK‚KK€KyK‚KK”K—KK©K¶KÁKÏKÛKßKßKÖKÉK©KšK“KK|KvKyKuKKŒK˜K“K“K—K—KœK KKžK—K‹K†K€KqK^KLK9K,K0K?KKKCK>K9K8K;K8K4K0K/K1KJK7K2K8K9K[KUK/K0K5KXK›K K¼KµK’KpKUKLKWKYKlKCK6KDKAKGK/K.K+K/K:K-K+K2K4K;K5K:K:K1K+K6KBKLKbKvK‡KŽK‘KŠKŒK‡K†K‘K’K—K¡KŸK KœK›K™KKœKžKžK™KœK•K–KœK–KšK—K—K–KK˜K•K—K—KšKœK™K¦K¨K¨K–K™KšKšK•K™K™KšKžK›K›K—K—K“K™K˜K–K–K–K–K”K“K•K”K’K“KK‘KŽKŒKŽK‘KKK’K‹KˆKˆK‹KŠK‹KK†K…KˆK‡K‚K}K‡KK‘K™KK¡K¤K©K«K­K³KµK¶K·K·K¸K¹K½K½K¿e]r(KkKkKgKgKcKcKgKbKaKhKjKcK`K]K[K\K[KWKUKRKTKPKMKJKHKHKOK\KeKtK€KƒKŽK•K™KžK¦K©K«K­K²K°K²K²K´K´K·KµKµK·KµKµK´K´K³K«K¤K£KK”KK‰K{KpKiK]KUKRKPKPKVK\K`K`KbK_KfKiKkKkKjKpKqKmKoKpKpKkKmKKªK>K%K.K$K*K7K?KNKcK€K™KœK’KK”KKžKšKK¤KŸKšKÄK³K‰KMK/K4K/K+K*K%K1KHKqK_KPK_KkKtK„K…KlKSKIKGK^KUK+K$K'K$K(K)K5K]KyK-K4KaKYKQKKKOKNKNK]K^KK•K{KRKeK‚K©KpKoKsK@K8K:K`KIK+K/K.K0K*K#K)K*K/K,K7K-K0K/K0K0K+K*K+K2K,K:K-K9K(K$K%K"K$K#K"K"K9KwKœK‡K_KaKK·KÊK¹K¦K¨KªK±K¶KºK¾KÃKÊKËKÉKÂKÂK¿KŠK^K_KhKuKwKK‚K|K‚KŠKK‡K‚K…K’KŽKKKKKKK†KxKvK}K~KK~KlKmKkK|KoK`KyK…KˆK’K–K‘K“K§K KKŸK¢K¤K¯K­K¬K«K¯K­KŸK”KŒKƒK~K€K|KKtK{K‚K{K~KK‚KƒK†KK’K•KK«K·KÀKÐKÛKßKâK×KÉK³K­K®K¥KŸK KK…KˆK‘K–KŽK…KKK™K›KKKˆKKuKbK[KNKAK2K/K8KTKSKLKFK;K7K5K>K9K4K)K1KHK7K6K4K9KSKKK,K.K4KSK•KœK¸K¶KKK[KMKNKYKfKJK7KDKBK3K,K2K2K2K0K/K)K(K2K:KK=K?K@KCK?KCKXK[KUKFK7K8K7K9K4K/K-K0K7K4K5K6KCKSKPK4K0K2KTK—K™K´K¹K“K‚K^KUKEKUKcKSK@K9KK3K2K;KQKMKTK3K,K6KOK‘KšKµKºKŸKKoKZK:K[KcKNK>K2K;K4K3K3K5K6K1K'K/K6K>K@K1K-K-K6K4KEKeKpKKKŽKŠK…KˆK„KŠKK–KžK¡K›KžKŸK¡KžK™K™K›K™K›KšK—K—KšK™K–KšK›K˜K™K—K–KšK”K•K—K˜K™K–K–K—K™K–KšKšK“K–K”K˜K–K—K˜K˜K–K•K”K™K˜K—K’K’K•KKK’K‘KŽKKŽKKŠK‰K…K†K†K„KˆK…K‹K…K„KˆK‹KK–KœK¢K¢K©K­K±K²K·K¸KºK¸K¸KÀK½K½K¾KÀK½K½K»K»K½K»K¾e]r(KgKgKhKiKgKkKjKdK_KbKgKbK[KYKYKVKTKVKWKVKSKRKKKJKGKHKGKUKfKtK€K…KK”K›KK¥K¦K«K®K¯K³KµK³K±KµK¶KµK·KµKµK»K¼K´K³K®K§K¦KK–KK‡K€KuKkKeKWKLKOKWKTKUK[K[K`KdKgKkKmKiKmKpKqKrKsKnKmKoKnKpKŽKƒKaKoKuKxKŠK‰KKrK|K„K“K—K‘K‚KsKNKAKJKYKTKQKZKYKIK9K8K1K'K*K,K0K.K%K'KKyKœKKsKkKoK–KÁK²K«K¦KªK±K·K¼KÀKÊKÌKÏKÌK¶K`KGKQK\K_KgKnKrKzKzK{K‚K‹KˆKK’K”K™KšKšK˜K—K“K–KŸK”KœKœKŸKšKKšK•K—K‘KŽK’K‘K“KKŽKK“K‘K’K˜K–KœKK K¡K¨K¨K§K£K¤K¤KžK¢K£K§K K’K†KƒK…K„K„K†KŠK‹K‡K„KKK|KƒK|KK„KŽK K¥KªK¼KÈKÐKÕKÞKÞKÖKÈK¼K»K¸K­K¨K¢K£KœK–K•K”KŽKKˆK„K€K~K|K|KwKsKoKtKlKhKmKnKmKhKfKUKQK@K;KDK;K/K)K*K9K6K-K=K=K=KFKSK3K6K5KFKŽKœK¬K¼K¦K‚K†K_K9K:K^KBK7K9K,K8K:K/K8K3K-K1K2K?K;K7K-K'K(K)KBKUKsK‚K‹KŠKˆK‚KˆKƒK‰K“K˜KKKKšK›KK›KšK˜K˜K™K›KKK–KšK–K–K•K“KšK–K™K˜K•K™K–K—K–K—K•K–K“K–K–K—K™K™K–K–K•K˜KšK•K™K—K•K‘KK’K‘K”KK‘KŽKŒKŠKKŽKKŽKKŠK‰KˆK…K†K…KƒK~K„K–K™K¡KªK¯K´K¹KºK¼K¾K¼K¿KÂKÁKÁKÀKÀKÀK¿K¿K¾K½K¿K½K¿K¾K½K¼K½K¿K¿e]r(KhKhKbKcKcKbK]KaK^K]KZK\KYKZKWKSKQKTKTKKKMKFKAKKLKIKFK5K6KOKvKKwKeKKKXKaK`KhKaKXKXKKjK;K?KZK^KKKIK9K4K$K-K6K7K0K@K;K>KTKKK3K,K4K?K‰KŸK¤KºK°KKKjK=KBKWK]K.K/K9K1KK?KBKKKXKkK|K…K‹K–K˜KœK¤K¦K©K¯K²K¯K±KµK³K·K²K·KµK·KµK¶K¶K¸K²K¬K§K§KK•K‘K„K€KtKgKZKZKPKKKOKRK\K]K]K_KdKbKgKoKiKpKpKnKlKlKjKrKkKmKmKnKuKuKƒK…K„KpKlKhKrKoKlKLK?K;KJKGKPK]KoK`KBK0K0KK@K8K8K*K3K7K3K:K@KKTKUK-K/K5K1K9K2K/K(K,K2K4K6K4K*K,K+K*K6KPKkK|KˆK†K‡K…K‡KƒK†K’K—KŸKK›K™KKšKKžKK—K™K™K™K˜K›K™K—K™K˜K˜K“K˜K—K•K”K˜KšK–K–K•K™K™K–K•K“K–K—K—K–K•K’K‘K“K‘K‘K•K’K’KK–K“K’K–KŽKŒK‘KŽKKKKKˆK‰K…K„KƒK…K„KƒKŽK K¦K¬K²K¶K¼KÀKÃKÄKÃKÅKÄKÄKÃKÂKÁK¾KÁK½KÀK¿K¿K¿K¼K¼K½K¼K¾K¼K½K¼K¾KÀe]r!(KcKcK\K`K^KYK[KXKWK[KYKTKPKRKPKPKRKSKNKKKFKFK=K?K=K;KBKNK\KlK}K„KŒK“K–K˜K¤K¦K©K¬K®K³K°K³K¶K¸K²K¹K¸K¸K¶K¹K·KµK¶K®K¨K¥KœK˜K’K‰KKwKgKaK\KOKMKUKVKUKZKYK^KdK^KhKgKkKiKsKnKpKqKoKrKlKlKsK„KuKzKKxKpKmKlKtKsKgKOKDKYKbKwK|KyKwK]KMK9K3K8K3K3K*K4K/KKXKuKƒKŠK‡K…K„K†KƒK‰K•K›KKŸK KœK™KŸK›KŸK™KšKœKšK—K•K–K—K–K—K”K—K”K–K˜KK˜KšKšK˜K›K–K™K”K•K‘K’K“K–K•K›K•K—K“K—K’K“K‘K•K‘K“K”KKŽK‹KŒK‘K’KŒKKŽKŒKˆK‡K…K‡K„K‚K„K„KK›K¨K®KµK¹K¼K¿KÂKÄKÅKÅKÄKÄKÃKÅKÀKÀK¿K½K¿K¿K½K¼K½K½K½K¾KÀK¾K¿K»K¾K¾K¿e]r"(KaKaK^K_K^K]KXK[K]KXKUKUKWKTKOKTKUKPKQKPKEKBKDK=K?K>KIKVKZKgKyK†K‰K’KšKšKŸK¨K§K©K­K¯K²K²K±KµK¶K´K¸K¸K³K¶K·K¶K°K­K¨K¢K¢K—KKŠKKuKiKaKWKOKMKRKSKXK[KcKaKdK`KfKhKfKjKnKoKiKlKnKpKsKnKqKuKzKzKoK`KnKsK‚KKyKoKeKqK…K”KK‘K„KwK\K?K8K8K9K2K0K1K/K7KOKmKaKMKPKhK‚KtK_KuKjK_K?KAKGKlKKlKPK/K&K$K&K'K0K2K.K9KOKbKrKKiKWKPK=KK/K,K-K-K6K3K.K/K,K-K)K0K1K,K4K1K7K,K+K2K,K)KRKŠK¥K}KhKgKpKŒKÀK²K«KµK¼K¾K½KÂKÆKÉKÉK»KZK6K7KEKQKRKaKkKpKqKoKyK~KyKƒKKKŠK‹K‹KKK’K—KKšK¡K›KŸKŸK¢KªK©K¤K¥K¥KªK­K©K±K­K­K¬K«K­KªK¬K¬K«KªK¬K­K¨K©K¨K©K©K°K°K­K¨K­K¡KœKšKšK•K”KKˆKŽKŽK–K‰K‹K‰K€KK€KK€KzK†KzK~K‡KŽK˜K¥K¯KÄKËKÑKÛKàKÜKÒKÆK¼K¸KµK±K¯K¨K§K K›KšK˜K•KKK‰K„KˆK‡KƒK‚K…K~K|KzK{KzKrKqKnKeK]KJKAK/K8K4K(K-K8K?KAK1K?K?KRKCK1K,K*K:K|K K¨K°K·K¥KKuK9K7KLKJK*K%K1KAK9K1K0K*K,K4K3K1K.K.K/K-KKCKFKFKBK9K?K`KzKXKGKRKnKyKuK{KXK\KpK_KhKyKsK]KJK*K'K$K)K)K%K)K2K-K1K4K:KDK[KmKuKTKbKnKlKbKXKZKNK\K•KK KŸK˜KvKEKCKjKbK_KRKQKUKMKBK'K%K(K4K3K4K*K'K/K-K*K)K/K-K-K.K+K)K(K-KBKwKœK–KmK`KjKvK¤K¾K§K±K¼K¿K»K½KÆKÌKÎKÁK{K6K4K=KGKKKVKgKmKrKpKuKxK|K{KƒKK€K„K‡KŒKKK”K˜KœK–K›KKŸK¡K¥K¥K©K¨K¬K§K­K­K¬K°K­K®K­K°K²K±K­K®K¬KªK¬K­K¨K¤K­K±K±K²K®K²K¬K¨K¢KœKœK˜K•K“KKŠKŽKK”KK‹KˆK„K†K€K‚K†K|K€KzKKƒK‹KœK£K±K¾KÈKÓKÝKàKßKÒKÊK¿K·K°K°K­K®K«K£K›K”K”KKKK‹KˆKˆK‹KˆK„KƒK~K}KKyK|KvKtKnKlK]KNKGK8KK(K#K&K5KyKŸK¢K¬K³K¦K•KwKFK4KGKKK3KIKeKjKTKIK\KiKyKqKSKSKbKyKrK€KvKcKEK)K(K%K)K*K'K!K+K8K2K3K.K+K5KKKTKjKfKqKvKzK}KpKkK_KVKhKŠKK‡KiK@K1K_K}K€KvKfKSKVKYKSK/K'K.K2K6K-K,K1K0K,K0K5K0K+K*K,K)K'K(K-KTK“K¢K|KfKfKpKˆK»K´K®K¹K¼KÂK¿KÂKËKÎKÉK’K4K2K3K?KLKSK_K_KoKrKuKyKxKzKzKƒKKƒK„KˆKŠK‹KK’K—K™K•KšKK K¡K¢K§K©K¨K®K§K§K±K¬K´K°K­K¨K±K¶KµK¯K°K¯K¯K¬K«K¥K©K®K²K®K²K«K®KªK©K£K¡KœK˜K–K–KKŽKŽKK‘KKŒK‹K†KK~K‡K‡K|KK€K€K„KK–KœK¬K½KÆK×KØKáKÞKÓKÌKÀK¸K¬K­K­K¬K­K©KžK™K•K•K‘K‹KŠKˆK‹K…K‡K…K…K}K‚K~K{K|KvKxKrKkK\KGK9K9K;K0K,K1K8KAK7K5KIK;KSK7K0K+K'K/KtK KžK±K®KžKœKyKJK2KAK5K"K"K(KCK=K-K#K0K9K5K8K5K/K.K/K)KDKeKvK„KŽKŒK‰K…KƒK†K‰KšKK KŸKŸKŸK›KšK›K›K›KšK•KšK™K˜K–K˜K›K™K—K˜K–K•K–K•K˜KšK—KšK–K–K˜K™K–K“K•K“K”K›K–K’K“K“KK’K‘K”K•KŽK‘KKŽKŽK‹KŠKˆKˆK‹KŽKˆK†K†K‡K‡KˆK†KˆKœK¤K©K²K¸K¾K¿KÂKÆKÆKÈKÅKÅKÆKÈKÇKÅKÁKÁKÁKÁKÃK¾K¿K¿K½K¾K½K¾K¿KÀKÀKÁK¾KÀKÂKÂKÃe]r&(KZKZK\K]KgKdKVKXKSKQKTKOKSKSKSKLKJKKKIKEKFKAK?K;K9K:KDKNKbKlK{K…K‹K’K—KŸK K§K©K®K®K®K±K²K´K²K³K²K¶K¶K·K¶K³K¸K±K­K¤K KK™KŒK‰K„KtKkK^KTKOKNKOKUKYK[KZKZK^KcKbKfKgKgKkKsK„K”K¡K£KKK¡K™K”K–KŽK™K¢KKK…KMKK>K?KBKFKFK:KQKmKgKVKLKVKwKrKOKQKbKuK€KKyKcKVK?K/K,K)K2K)K,K'K)K(K.K*K)K/K0K>KTKPKCKGKoKuK|KKK|KvKjKMK;K5K1K.K2K_K†K†K‚KrKcKaK_KJK0K-K3K5K4K/K:K-K/K,K0K0K-K+K0K)K)K)K&K1KoKžK“KnKfKnKvK¦KÀK«K´K½K¼KÄKÂKÉKÐKÌK¨K>K+K*K6KAKKKTKZKfKqKuKtKuKzK}K}KK}KK†KŠKŒKK’KK”K˜K˜K•KžKžK£KŸK¥K¨K®K¬K­K©K­K¯K¶KµK®K¨KªK«K²K²K±K¯K¶K³K±K°K°K­K®K¯K±K°K°K©K«K¥K£KŸKšK“K‘K•K•K”K‘K“KKŒKŒK…K…K„K…K†K}KKƒK‚K…K‹K“KžK¨K»KÈK×KÚKáKÝKÔKÍKÂK¸K­K­K©K§K¤K¦K¢KŸKŸK—K”KKŠKˆK…KƒK†K‚K‚K}K‚KKKzKuKvKnKiKXKFK:K8K=K4K-K3K@KIK6K:KEK=KUK9K+K3K+K,KgKK¤K´K«KK¤KŒKQK/KDK@K/K.K*K;K9K,K(K-K3K9K3K+K.K/K-K4KQKiKzK‹KKˆK‡K‹K‚K…K’KšKšKžK KŸK›KKšK›KœK—K–K™KšKžKšK—K•K˜K™K•K—K–K•K—K—K”K–K•K•K›KK˜K—K—K–KK‘K‘K—KŽK•KK’K‘K‘K—K—KK‘KK‘KKŽKˆK‹KŠKˆKŒKˆK‰K„KŽKƒK„K†K‰K˜K¤KªK³K¸K½K¿KÀKÂKÅKÆKÆKÄKÃKÄKÃKÂKÂKÃKÂKÃKÀKÀKÀKÀK¾KÀK¿K½K¾K¾K¿KÁK¿KÁKÀKÁKÁKÃe]r'(KWKWKVKXKZKXKWKUKVKSKPKUKLKMKSKOKMKNKKKHKJK?K?K;K5K8KAKMKaKtK{K~K‰K’K“KœK£K¡K¬K«K®K±K²K°K²KµK¶K´K¶K¶K³K¶K´K´K³K®K¥K¢KžK•KŽK‰K€KuKhK[KPKJKNKOKNKRKSKYKYK[KaKhKkKhKuKƒKK–KžK™K‰K‡K’KŽK€K{KKŠK€KK~KpKCK0K5K.KK6K5KK5K/K-K.K.K.K/K-K/K1K.K-K.K/K1K0K+K,K6KSK‰KªK€KsKpKtK}KµKµK³K¼KÀKÃKÃKÆKÏKÏK½KXK)K%K/K4KDKMKWK_KiKoKwKuKwKyK|K}K~K„K‚KƒKK‰KK”K“K—K˜K—K˜KšKžK¢K¢K¥K¨K©K«K®K­K«K®K³K²K´K°K¬KªK´K³K°K±K±K²K±K°K­K®K²K¯K²K¯K­KªK©K¨K¥K›KœK”K“K”K”K˜K•KŽK’KŠKK†K‚KˆK…KƒKK„KƒK‚K…K‰K‘KžK§KºKÊKÓK×KßKÚKÔKÏKÄK¸K¯K¨K¦K¥K¢K¡K¡KžK™K—K“K‘K‰KˆK‡K†K†K€K‚K„K‚KKK}KwK~KqKhK_KKK9K?KK5KCKCKUK8K,K+K)K/K_K”K§K´K§K—K¦KŽKcK:KCK6K)K*K*K4K2K,K2K6K8K9K+K0K)K+K.K4KWKoKKŽKˆKŽKˆKˆKK‡K—K™K KžK¢K¡KžKŸK›KžK™K˜KœKšKšKšK›K—K–K“K–K”K™K—K™K–KœK•K’K•K”K›K˜K•K•K•K•K“K’K’K‘K’K”KK“K‘K‘K‘KK’KŒK’KK‘KK‹KKŠKˆK†K‡K„K…K‚KƒK„K‡KK¢K¥K¯K¸KºKÀKÀKÂKÄKÅKÅKÄKÃKÄKÅKÅKÆKÃKÂKÂKÃKÁKÂK¿KÀKÀK¿KºK¾K¿KÁKÃKÃKÂKÄKÂKÅKÆKÅe]r((KYKYKTKSKYK_KXKTKQKPKJKNKQK^KMKLKMKOKHKGKIKCK?K9K;KK@K?KAK=K9KIK=K4K?KHKVKdKXKFKSK]KwKwKcKoKŒK†K~KKuK:K5K5K0K5K6K4K0K*K/K1K3K.K5K,K(K0K-K2K4KGK^KXKAK6K?K?KSKjKMK/K$K$K(K1K2KDKaK€KKŒKKtKgKFK7K8K)K.K/K2K.K.K-K-K.K/K*K/K3K)K)K*K0K:KlK”K—KrKsKvKtKK¼K¨K¶KÁKÁKÃKÃKÍKÒKÉK|K-K!K(K.K8KGKUK`KbKlKoKwK|K{KzK~K€KKK‡KKŠKˆKŽK”K”K‘K“K—K—KKKžK¥K¦K§K§KªK®K­K¬K°K±K³K°KµK¯K°K´K±K¶K´K®K®K¯K´K³K°K³K²K²K­KªK¨K¦K¢K¡KšKK™KšKK’K’K”KK”KŠKKŠK€K…K†K…KƒK‚K‚K{KK‰KK K¦K»KËKÑK×KÝKÛK×KÐKÇK·K±K«K§K£K¥K£K¢K¢K—K˜K”KKˆKŠK†K‡K†K„K~KƒK‚K~K€KxK{KzKuKjKZKFK9K@KAK1K-K/KBKK:K>KKKZKkKyK‚KŒK“K•KœK¤K¨K«K°K°K²K¯K°K³K³K²K³K¶K»KµK±K¸K¯K°K°K§K¤KK–KK‡K~KtKlK_KZKOKIKNKSKWKVKVK_K`KdKlKkKwKnKmKoKpKsKrKuK€KƒK…KyKhKiKfKjK]KiK`K_KVKUKTKZK[K\K>K:KBKDK5K/K:K=KCKLKbKoKpK=K7KKKRKsK~K|K‘KKvKNKHKK+KAK^K@K-K&K'K"K$K/K^KbKkKŒKK}KkKZKfK_K7K0K+K#K)K+K/K,K'K'K2K3K0K&K!K*K,KKDKXKlK{KƒKŠK‘K›KK¢K¥K©K­K¯K±K°K³K±K³K²K´KµKµK³K³K´K±K±K¬K§K¢K˜K”K’K‰K}KoKgKaKXKJKKKPKRK[K[K[K]KaKiKiKnKfKeKlKmKoKpKqKpK|KKƒKyK\KZK]KNKHK[KbKYKUKYKhK_KJKOKK:K)K0K>K4K4K5K:KGK^KLK5K,K*K'K.KLK†K¢K¬K¥K|K’K¨K’KTK(K/K#K&K,K*K0KKK6K5K7K1K.K1K.K+K8KWKnK‚KŠK“KŽKŒK†K‡KK˜K¡K K¡K£K¢K K™K›K›K˜K—K™K™KKšKœK›K–K˜K˜K“K—K–K“K˜K’K˜K’K–K–K–K’K“K’K‘K’K•K”K•K“K•K”KKŽK”KK•KšK“K‘KKKKKKˆK‡K…K‡K‰K€K€K„K‚K’KžK§K³KµKºK¿KÂKÄKÀKÂKÁKÁKÂKÂKÂKÂKÁKÂKÁKÃKÄKÂKÁKÁKÂKÄKÁKÁKÀK¿KÃKÆKÆKÅKÈKËKÉKÉKÉKËKËKËe]r,(KTKTKQKMKPKRKRKMKNKKKMKGKKKMKPKHKHKMKJKEKDKBKKK8K8K?KEKHKYKkKzKK‡KK—K›K K¦K©K©K®K²K±K¯K²K´K³K·KµK³K´K¶KµK°K°K«K§K¢K˜K“K‘K‰K†KwKgK^KWKMKOKMKZKYKZK_K]KbKcKbKnKiKfKoKnKlKnKwKtK|K„K}KnKNK]K[KHKHKRKUKKK^KfKcKMK@KZK]KOKKK4K2KK9K;KKGK]KjKyK„KˆK’K•K˜K£K¦K¨KªK¬K¯K­K³K³K³K²K±KµK°K¯K´K´K²K°K®K¥KŸKšK—K–K„K€KoKhK\KXKNKKKTKOKWKTKZKbKcKaK`KjKjKnKoKlKrKoKrKuKtK}K~KjKQKQKXKTKZKPKJKfKwKqK\KBK_K~KeKYKCKK'K%K$K'K,K'K$K"K&K2K%K"K"KK)K[K‰K”KvKoKoKsK„K¿KÇK»K½KÆK½K½KÃKÎK¬K:K/K+K-K6K.K3K7K@KOKYKeKlKrKqKuKuK{K{K‚K‚K~KK‰K…KˆK†K„K‹KK‘KK“KKK•K—KšKžK¢K§K©K¥K¨K«K¬K°K²K´K¶K³K°K®K³K·K³K­K¯K®K©K¯K°K¯K«K­K©K¢K£KŸKKœK™K–K‘KK“KKK’K”KŒK‘KˆKˆK…K†K~K~K|K‚K„K…K…K‰KK—K§K®KºKÒK×KÛKÝKÛKØKÍK·K°K©K¥K¤K¦K¡K¢K›K™K˜K–K‘KŽKKŽKŠK…K…K‰K€KƒK€KK~KzKrKiKaKEK=K;K7K1K(K1K9K8K4K5K:KHK`KKK2K*K)K'K,KHKŒK¦KªK£K…K˜K¥K˜KaK,K.K$K+K;K)K3KK3K3K/K-K,K*K/KIKaKyKƒKŒK‹KŠKK‡K‹K—K™K¡K¡K¢K KœKšKœKžK—KœK•KKœK˜KšKšK™K˜KœK˜KšK–K–K’K”K–K“K—K•K’K˜K”K’K‘K”K”K–KKŽK‘K•K–KK’KKK‘K’K“KKK‹K‹KŠKŠKŠK‰K…KƒK†KKzKƒK“KŸK¨K²K»K»KÂKÅKÆKÅKÆKÄKÂKÁKÁKÀKÂKÁKÁKÃKÄKÄKÂKÁKÃKÂKÂKÂKÄKÂKÆKÇKÉKÇKÉKÊKÌKÎKÌKÍKÍKÎKÊKÌe]r.(KSKSKTKRKLKKKGKSKHKJKJKHKKKHKHKFKIK@KAKGK@K?K=K9K8K5KCKLKXKgK}K„KŠK’K–KžK¢K¢K«K®K«K®K²K°K¯KµK°K¯K´K®K°K±K±K¯K®K«K¤K¥KžK™KK‰K„KuKiK^KRKKKIKMKQKVKXKYKYK^KjKjKhKeKiKmKmKlKqKrKsKqKuKtKfK@K@KZKwKgKhK|K‚K|K^KHKGK_KKjK_KJKGKJK\KlKwKkKOK;K/K+K5K;KVKQKgK›K‚K]KYKmKkKBK>KMK:KCK;K+K)K+K,K4K>K3K3K6K3K7K1K.K3K1K2K3K-K-KIK*K.K-K'K1K@K[KK3K-K4K2K1K1K>K9K5K,K:KAK/K0K0KJKDK)K%K"K:KAK`KfKqK{KgKZKK°KšKKzKEK)K"K+K/K*K#K!K"K)K&K"K$K*KSK…KŸKxKsKtKwKzK¶KÅK¸K½KÄKÅKÉKÐKÌK—K2K)K)K)K5KDK,K1KK:K4K)K'K=K1K2K/K/K3KEK\KIK-K,K)K%K+K@KyK­K´K¯K‘KˆKžKžK}KK:K7K8K=KCKFKUKgKvKKK“K”K›KK¥K§K©K¬K«K¯K¯K°K±K­K®K°K°K³K°K¯K±K®KªK¬K¡KžK—KK‰K…KvKfK\KPKIKMKZKRKYK`KYKbK`K]KbKeKhKfKkKkKlKnKsKrKrKrKhKRKUKK—K”K“K™KvKZKFKCKTKmKjKpKbK?KFK]KsK…KlKLK:K9K9K@K8KCK2K8K3KGKrKK{K^KQKzKLK2KK0K4K=KCKOKQKcKhKlKpKzKzK{K{K{K~K€KƒKˆK‚K‚K†KŒKŠKKKK’K˜K•KšK™KœK—KšKKK K¥K¥K¥K­K§K®K±K°K¬K¯K³K±K°K¯K­K«K©K«KªK¬KªK§K§K¥K¡K¡KKœK’K“K“KKŒKˆKŠKŠKKˆKKˆK…KƒK€KKzK}KyK‚K‚KƒK‰KKšKK¦K±KºKÐK×KßKßKÛKÔK¿K¬K¦K¦K¤K¢KŸKœK›K–K—K“K”K™KKKŠKˆK†KK†KK|K~KyKzKmKbKOK>K?K1K,K)K0K7K/K3K1K5K9KKK_KLK0K&K&K(K$K5KwK¨K°K¬K“KˆK™K›KŽKHK(K1K(K)K$K-KAK=K:K6K0K4K.K2KFKaKxK‡KKˆK‹K‰K†K…KK¥K­K³K²KžK›KœKœKžKžK™K—KŸKžKšK˜K˜KšK—K—K™K™K™K•K“K”K”K”K–K’K•K”K–K“K”K”K•K“K“KK’KKKŒKKŽK“K‘K’K‘KŽKKKŽKŒK‹K†K‡K†K€K‡K‚KƒKK›K¨K±K¸K¾K¿KÃKÄKÄKÅKÀKÂKÂKÂKÁKÁKÀKÁKÁKÀKÅKÁKÂKÃKÃKÄKÃKÇKÆKÊKËKËKÎKÎKÍKÎKÌKÌKÎKÍKÍKÍKÌKÊKÊe]r1(KNKNKLKKKIKMKMKHKQKLKJKJKLKKKKKEKDK@KCKDK=K9K6K5K2K4K?KIKUKjKyK„KK“K•KKŸK¡K¥K¬K¨K¬K¯K®K®K¬K¬K®K¯K³K²K®K±K¯K¯K¨K¨KŸK™K˜KŽKŠK‚KtKeK]KRKLKIKIKNKYKXKhKrKoKbK`KeKcKfKjKiKoKkKnKoKoKpKvKuK‚K£KœKˆKrK`KAKKKFKPKpK}KiKjKFK;K^KK|K^K;K:K7K9KIKNK8KRK5K3K3K@K^K‰K~KkKMK€KbK8K2K8KFKK?K*K(K0K3K:K.K5K9K>K>KLKWKFK1K'K&K(K%K9KsK¢K¯K¦K˜KŒKšK–KŽKWK8K=K#K!K&K4K7K?K:K/K6K2K1K5KKKfK~K‰K‰KˆKŠK‡KƒKŽK”KžK£K¡K¦K¡KKžKKšKœK›K˜KœK—K›K˜KšK˜KšK–K˜K™K›K—K“K–K”K—K•K’K’KKK“K”K“K’K“KKK’KKKKKK‘K‘KK’KKKŒKŠK‹K‡K…K†K‚KKK€KˆK˜K¥K°KµK½K¿KÂKÃKÃKÅKÄKÄKÁKÀKÂKÁKÀK¾K¿K¿KÀKÃKÁKÂKÃKÃKÄKÈKÉKÊKÊKÌKÎKÌKÍKÎKÍKÌKÏKÍKÌKÌKÊKÊKËKÊe]r2(KJKJKLKPKRKNKMKKKLKKKOKMKNKRKHKFKJKGKIKCK;K@KKVKIK>K7KMKrKK‚KtKXK„K€KEK(K3KAK:KBK:K@K5K3KIK9K4K-K2K7K+K,K)K+K*K3K(K'K/KLKIKUK2K(K.K3K=K9K-K-K-K=K`KnKvK†KƒKhKOKOK‰KyK“K²K˜KnK2K!K/K"K&K,K+K(K'K0K`KK•KmKdKpKyK…KÃKÉKÅKÈKËKÎKÑKÔK§K/K"K%K%K(K/K9KHK8K8K=KGKQKSK^KeKnKmKwKtKzK€K~K‚K‚K~KƒK‚K†K‡KŠKKKKK—K”K˜K–K˜K˜KœKK›KKœK¥K©K§K­K«K«K´K¯K­K¯K°K®K²K¯K¬K¨K§K¬K¬K§K©K§K¤K¡K¢KžK›K™K”K‘KŠK‰K…KˆK†K…K‡KˆK‘K†KƒK~KK€KzK}K|K|K„K‰KˆKŽK‘K–K£K«K»KËKØKÜKßKÚKÖKÈKªK¡K¡K¢KžKžKœKšKšK•K’K•K‘K‰KŽK‹K‰K†K„K†K€KKKvKwKkK\KGKK8K9KAKOK\KGK*K!K$K"K$K1KlKžK©K§KœK…K˜K“KKeK4KDK$K1K(K.K;K4KK>K5K3K7K5K=KHKXKkKxKKK’K“K›KK¢K¥K©K«K°K±K¯K®K®K«K®K°K²K±K²K±K°K©K¨K¢KžKšK”K’KŠKKvKiKUKUKFKHKJKMKPK\KyK‰KKhK_KaK`KiKiKgKrK{KŠK›K K¥KžKKbKDK:K;K6K1KFKSKnKŽK”KzKWK?K\K}KK`K9K.K,K.K5K`KuKHKHKWK6K8K:KsKKŠKKKSKlK’KYK=K8K7K8K7K4KK9K2K*K5KK9K7K:KLK`KOK/K(K%K%K&K3KgK˜K¤K¥KšKK–K‹KKhK=K9K K#K-K8K?K5K8K7K1K,K,K=K^KyKƒKKŒK‰KŒKˆK‰K™K›K£KŸK K¢K¡KKœK›KKžK›KšK›KœK™KšKKšK“KšK“K—K•K”K—K’K•K“K’K”K’K“K“K“K“KKK“K”KKKŒKK“KKKKK‰K‘K‘KŠK‹KŒK‡K‹K‡K‚KK‚K€K‹KšK¤K®K¸K¼KÀKÃKÅKÂKÃKÂKÂKÃKÂKÀKÁKÃKÂKÁKÁKÁKÁKÁKÄKÂKÇKÆKÉKÉKÌKÎKÌKÐKÐKÒKÑKÐKÐKÌKÍKÌKÊKËKËKÉKÊKËe]r4(KTKTKNKQKHKOKLKKKKKJKQKIKHKKKJKMKEKAK@KBK;K9K2K4K1K0KAKJK]KlKwKKŒKŽK”KKžK¤K¦K¦K«K®K¬KªK­K¬KªK«K¯K²K³K°KµK²K°K¤K£K K™K—KKKKvKnK^KPKKKMKMKKKLK_K{K‡KtK]KZK^K]KiKjKwKK¤K©K©K™K‘KŠKoKFK*K.K7K:K5KQKkKKœKœKvKIKQK{K{K[K1K8K3K/K+K;KtKnKIKMKVK7K6KWK—K‰KƒKŒK…K\KZK’KpK2K5K4K6K:K7KRK>K7KCK7K,K+K%K.K&K'K*K%K)K0K(K*K+KAKSKMKLKCKGKMK:K.K8KHKNK[K\KxKœK¡K˜K‰KaK=K5KSKtK›K­K˜KiK&KK!K%K#KK K&K[KŒK”K|KnKkKtKK¶KÎKÆKËKÏKÍKÍKÉK‹K1K"K'K'K$K+K&KDKUK8K1K7K9KLKVKZKeKfKoKqKsKvK~KxK‚K…KK‚K…K…K…K†K‡K‹K‹KK“KK•K˜K˜KšK˜K KK K¤KœKK K£K¨K¤K­K±K©KªK§K¬K¨K®K°K«KªK¬K¬K¥K§K K¥K¦KŸKŸK™K™K•K‘KŠK…K†K†K†K„KˆK†K‚K…K€K€KKzKzKxK{K|K‚K‚KƒKƒKŒK›K¦K®K¹KËK×KÝKÞKÝKÛKÐKµK£K£K¥K KK›KK–K“K“K“KKŠKŒK„K…K†K„KK‚KKKzKwKdKPKCK7K0K)K(K-K7K7K6KDK9K5K?KKKWKWK0K$K&K$K,K,KbK‘K¡K¦K›K}K–K“KŠKoKLK6K)K$K/K8K8K9K5K1K.K'K0KNKfK{K‡K‹KŠK‰K‹KˆK‹K–KK¤K¤KžK£K K¢K¡KœK™KžKK›KžKK˜K˜KšK™KšKšK™K›K™K˜K”K–K•K”K–K“K‘K‘K–K‘KK“K‘KK‘KKKKKK’KKKK‹KKK‰KŒKŽKˆK…K„KƒK‚KƒK…KK¡K«K¶K¼KÁKÂKÃKÇKÂKÄKÄKÂKÂK¿KÀK¿KÁKÂKÂKÀKÁKÁKÄKÄKÆKÇKÉKÊKÎKÌKÍKÐKÑKÑKÐKÒKÏKÏKÏKÎKËKÈKÉKÈKÇKÇKËe]r5(KPKPKJKGKLKNKHKRKOKPKMKJKIKGKFKCKFKDK@KGKAK:K7K1K1K3KAKKK_KiKxK€K‰KK•KšK¡K£K£KªK®K¬K²K¬K«K®K®K¬K°K¸K²K±K²K°K¨K¨K£K¢K™K”KŽK†KKsKeKYKKKJKGKOKEKQK[KKK{K`KaK]KdKtK‡K›K¤K¢K”K…KwKKxKeKAK'K,K9K1K@KoK|KŠKšK…KbK`KyKxKHK=K2KK2K;K>K.K,K&K&K)K'K(K%K*K0K,K1K>KRK[K[KPKMKUKOK2K-KHKbKWKPKcK‰K¤K§K©K KuKHK3KCKbK~K“KK`K&KKK$KKK$K;KKœKKuKnKpK~K KÆKÇKÊKÌKÌKÎKÌK¥K?K&K#K#K#K!K!K+KAKWKKFK:K=KAKLKVKXK-K%K)K(K/K2K^K‹K¢K¬K›K{KžKšKŒKuKQK.KK K,K:K7K4K2K,K-K(K@KWKlK~K‹K’KŒKŒK‰K…KŒK™KK£K¢KK K K£K KŸKšKœKK™KK›K™K™KKK—K™K—K—K•K”K˜K“K–K•K˜K“K“K•KK“K–KK“K’K”KŽKKŽKK‘K‘KŒKŒK‹KŠKŽK‘KŠKŠKˆKKŠK‡K€KK‚KŽKœK¥K±K¹K¼KÀKÁKÄKÃKÄKÄKÂKÂK¿KÀKÀKÁKÂK¾KÀKÀK¿KÃKÅKÈKÈKÉKËKÌKÎKÎKÏKÐKÓKÑKÒKÒKÏKÎKÏKÌKÌKÊKÉKËKÊKÊKËe]r6(KSKSKLKPKMKNKNKLKNKOKOKKKMKPKLKEKAK>KEK>K:K8K3K/K2K,K:KNKVKeKxK‚KŠKK“K›KžK¤K¨K«K¬K­K­K¬K¬K¬K¬K«K¬K¯K°K²K­K±K¬K§K¥KžKK“KŽK‹KKwKcKXKNKJKIKIKHKMKVKsK”KŽKoK\KeKxK‘K¤K¤K˜KKoKqK|KˆK€KXK1K*K7K6K=K`K~K€K‹KƒKSKRKzKyKHK;K>K8K>K9K1K7KcKpK]KNK’KjKAKMKyK‘KzKdKsKKrKAKnK—KbK8KKBKK2K9K=K8K1K;K7K0K)K"K'K*K%K%K$K)K6K-K;KDKoK‚K{KfKVKCK;K3K0KNKWKfK{KƒKŒK»K·K·KµK”K^K)K3K=K\K…KvKFK$KKKKK#K7KpK K‹KmK~KvKuK‘K¾KÃKÉKÊKËKÏKÃKdK(K!K#K%K&K!KK'K.KKKVKDK5KKDKNKCKNKXK-K)K!KK"K3KIK‚KœK¦K¦K‘KŒK§K‰KKTK+K KK*K8K0K(K,K,K,K2KIKeKwKKŒKŒK‰K‹KˆKˆK™KKžK KžK KŸK KžKœK›K™K˜K˜K™KšKšK™KKšKšKœK–K™K—K–K—K“K˜K”K•K“K”K”K“K‘K’KK•K”K“K’K‘K’KK“K’K’K’KKŠKŒKŒKŽKŒK‹K…K†K‡KƒKK…KŽKK§K±K»K¾KÂKÂKÄKÅKÄKÂKÂKÁK¿KÀKÂKÁKÂKÁKÂKÁKÄKÄKÇKÉKËKÌKÍKÎKÎKÎKÐKÑKÒKÑKÑKÑKÑKÒKÍKÎKÎKÌKËKËKËKÊKÌKÎe]r8(KOKOKOKLKLKOKKKIKIKPKHKGKJKJKGKEKGKAK=K?K;K:K8K2K-K/K7KBKUKgK}KKŠK’K—KK¡K¢K©K©K¨KªK±K¬K­K­K«K­K¨K°K°K¯K²K­K¬K«K¤K¡KŸK”KŒK‰K~KuKbKYKRKGKEKGKJKLKLK\KlK’KœKK†K†K‹KwKnKsKnK|K…KK{KuKYKK^KrKKoKOK>K8K:KAKFKfKvK{KxKvK´K¼K¹K¶K™KhK,K&K2KLKsKMK&K K!KKKK%KPKƒK•KyKoKyKvKxK¥KÇKÈKÎKÎKÍKÌK‰K1K%K&K%K'K!K#K)K'K)KIK\KHK6K=KEKIKMKZKbKfKlKrKtKuK‚KƒK|KzKKƒK†K~K„K‡K‰KŠKŠK†K‰KKšK’K‘K—K—K›K™K—K¡K¡K¡K£K KžKŸK K¢K£K¢K¤K¨K¤K£K¨K©K¨K¤K¥K¨K¢K¦K¦KŸK K›K—K‘KK‰KŠKˆKƒKƒKƒK‚K„K„KKK{K{K}KxKwKxKzK{K|K}KK‰KŒK•KŸK¢K®K¹KÂKÔKáKæKãKÜKÎK¨K™K¡K K™K›K–KšK”K‘K‘K‘K‹K‹KŒKˆK‚K„KKƒK}KvKlKgKXKEK2K(K(K+K,K9K0K,K;KCKDKAKMKHKDKXK3K$K$K"K#K5KHK{KšK¤K§K–K…K§KŠK‡KXK,KK%K-K;K+K)K.K0K*K0KQKdK~KKK‹K‹K‰KˆKŽKšKšK¡KŸKžKžKKŸKžK KK˜K“K“K™K›K›KœKœK™K›K›K™KœK—K–K›K™K˜K—K”K“K”KK”K’K•K”K”K“K’K’K“K’KK“K‹KŽKŒKŒK‰K‹KKŽKŽK‹K…K…K„K‚K„K†K”KŸK®K·KºKÁKÂKÄKÄKÂKÃKÁKÂKÀKÀKÂKÂKÂKÁKÂKÂKÃKÃKÆKÈKÌKÍKÏKÏKÏKÎKÏKÐKÐKÐKÐKÐKÐKÐKÐKÑKÌKÎKÌKÍKÎKÌKÊKÌKÐe]r9(KPKPKQKNKOKQKJKIKJKKKKKIKFKGKGKCKDK?KAK>K6K7K6K1K,K)K5KBKZKeKyK‚KƒKŒK”KœK¡K¤K©K®K¬K®K¬K­K°K®K«K®K®K¯K±K±K³K®K®K©K§K£K™K—KKŒK€KrKfKYKPKIKKKRKIKIKOKWK]KnK…KKŠKƒKuKnKeKiKkKkKrK|KuKvKdKHK7KCK=KnK„KlKqKQKAKiKaKZKHKKK`KVK5K?KHK`KbK[KQKqKKhKKK:KcK…KoKgKlKyK¡K•K\KDK]K‰KIK&K-K)K)K8K9K@K7K;K/K-K&K)K,K*K%K(K0K8K0KEKKSKhKwK}K†K‘K—K›KŸK¥K§K¬KªK¬K®K®KªK²K²K¯K°K®K±K¯K²K±K¯K©K¤KŸKžK—KKŠK~KrKhKWKPKJKKKFKFKOKMKZK\KhKoKqKxK~KvKwKzKtKpKrK}K„K‚K~KfKIK4K5K4KcKvKnKiKOKLKcKTKWKSK^KJK2K9KVKYKYKXKMK]K…KxKdKTK=K[KzKcKZKpK{KKšK_K:KDK{KSK!K*K(K*K-K'K1K0K9K/K+K.K(K,K#K"K)K6K/K6KZKIKGKPKXK_KSKKJKIKLKPK3K'K"K!K"K1KLKnK—K§KªK˜KK¥KKˆKmK,K&K#K/K0K'K*K(K.K;KK?KKHK_KVKSKMKWKdKlK`K_KfKBK9KXK`KbKUKJK@KKKzK…KFKFKNK>K\KqKTKFKhKˆK“K£KlK?K2KbK‚K6K$K'K-K/K.K3K+K+K,K5K1K/K9K3K0K/K/K*K1KSKgKIK?KEKOKGKRKcK…KKvKaKcKYKCK2KDKXK_KGK;K2K*K%K1K,K7K/KKK#K.KOKKKmK‡KvKrK€K{K~KžKÉKÊKÊK«KŸK¢KK@K8K7K3K.K5K+K'K0K>KNKaKxKK„KŽK“KœK K¤K©KªK¬K®K°K¬K¯K°K¯K°K´K±K²K¶K²K´K±K«K«K¤KœK”KK‹K~KvKnKZKOKHKEKIKKKMKQKWK_K`KiKuKK„KxKkKhKmKrKzK{K|KwKhK:K;KLKVKcK\KZK=KDKeKtKsKeK\K@KK(KRKRKNKLKMKNKOKOKOKMKQKIKKKNKEKIKAK:K;K6K6K5K0K+K+K%K2K=KTKaKvKzKƒKK–K™K K¥K¨KªK®K¬K®K±K«K°K¯K®K¯K¯K±K³K´K´K°K¬K§K¥KžK˜KK…K}KvKiK\KNKIKDKBKJKSKVKYKgK}K}KKKzKlKjKmKkKnKsKtKrKnKUK6KCKGKUKVKXKVK2KGKkKKxKbKLK?KWK[KOKFKDK@K?KGK~KKYK3K3K=KNKrKKWKKYK[KlK…KŸKXKFKTK^KmK–K¢K”K€K_KKKK„K“K­KŸKŸK–KK•K‹K_K3KKIKK”KšKKUK2K0KPKˆKFK+K0K0K9KHKLK6K4K.K+K&K7K9K$K K#K5K9KBKPKhK¡K“KŸKiK@K5K@KPKYKgKqKeK?K2K*K-K)K'K!K'K$K&K1K'K K!K*KxKªK¡K¦K¤K™K–K’KK€KuKwK‚K¡K«K½K±KKeK1K$K$K$K(K&K2K'K#K'K(K%K,K]KqKcK[KEKEKJKLK\KZKiKmKoKtKxK{K€KxK|KK‚K€KK…KKƒKˆK‡K‰KŒK‹K’KKK’K‘KK“K•K—K—K–K™K•K˜K¡K›KKŸKœKKžKŸKŸKŸK¢KŸKžKŸKK¡KžKšK•K’K’KKŒKŒK~KxKhKpKvK}K†K†KKKŽK‰KŒKŒKŒKŽKŽKŠK‚K|KyK|K~K‚K‹K“KK¬K´K½KÈKÔKÞKâKÜKÔK¿K‘KŽK“K–K•K’K“KKK‰K‹KˆK†KK€K‚KzK|KzKrK[K?K.K%K*K)K(K&K0K-K3K0K8K?KKIKFKDK1K/K(K-K:KIK=K#K%K)K)K0KK-K,K+K+K:KNKXKiKuKK„KK–KšK¡K§K§K«K«K­K«K­K®K­K¬K®K±K¯K±K³KµKµK¯K®K±K£KœKKK‡KKwKqKbK[KYKmKKKyK~KnKgK^K^KbKdKeKgKlKqKuKxKzKwKjKYKGKMK>KK‹KŒKBK0K+K8K=K7K+K0K+K2K;KPKCK*K K!K(K+K7KJK8K8K/K/K,K(K(K*K2K?KiK‚KrKpKFK-K%K$K$K'K"K.K*K KKKKbK¨KžKªK©K§K¯K©K¬K§K±KµKµK¸K½K­KŒKwKNK5K(K#K&K&K'K,K)K1K1K/K&K'K2KXKiK\KVK?K:KGKUKZK`KeKnKtKsKvK}K|K~K‚K}K„K€KKƒK€K…K†KƒK‹KKŽK‰KKŽK—KŽKK˜KK–K™K“K—K•K—K—K”K›KœK›KŸKšK—KœK¡KŸKŸKžKžKœK›KšK›K–K‘K“K‘KŒK”K€KtKlKtK|K}K‡KŠK‡K†K‡K†KƒK…KK‡K„K‹K„K~KwKwKvKzK…K‘K”K K®KºKÄKËKÎKÔKÒKËK§KK“K”K‘K”K‘KKKK‘KŒKˆKŠK€K€K~K~KxKoKYKDK1K)K*K+K,K)K)K6K9K3K2K5K@K=KHKRK=KFKOKK]KKK‘K‘K“K’KKŽK”KžK¡KžKŸKœKžK›KšKšKšK™K›K™K™KšKšK—K”K™K—K˜K˜K–K–K”K–K™K—K•K”K–K˜K•K•K‘K•K–K˜K“K”K–K“K’KK”KKK“KKŠKŠK‹K‹K†KŠK…K€KƒKƒKK¡K©K±KºKÀKÃKÇKÃKÂKÄKÂKÃKÃKÀKÁKÂKÀKÄKÃKÉKÉKËKÍKÊKÍKÎKÎKÍKÏKÏKÑKÍKÎKÌKÎKÍKÏKÍKÍKÌKÍKÏKÏKÏKÑKÒKÑKÐKÒKÑKÐKÐKÐKÎe]rC(KZKZKQKOKSKMKPK\KRKLKRKRKGKHKMKIKBKDKAKK?K;K1K-K,K:KKKXKeK{KK†KK•K—KŸK¤KªK¨K­K¬K®K«K¯K®K«K¬K®K­K´K¯K²K²K°K¬K©K¤K™K›KKˆK€K}KnKbKeKiK`KgKdK\K]KYK]K^K^KaKeKdKeKkKoKxK}KvK{KdKYKTKKK2K5KJK]KcKiKbKhKzKkKjKXKLKAKKK/K$K3KQKlKK…KZKSK=KBK.K*K6KaKKoKSKIKUKvKŸK­K„KCK7K5K~K¦K_K0K(K-K2K,K.K0K.K3K:K_KHK%K K'K+K+K:KLKCK?K9K+K(K%K*K3KCKKKqKbKOKFK3K(K'K'K'K&K&K,K)K K!KK%KŒK®K¥K¨K¨K´K´K©K°K°K´K¹K´K¶K´K™K{KLK-K)K&K K"K"K"K0K-K+K&K(K)K%K4K`KoKdK\KHKMKLKUK\K]KfKmKtKoKyKzK}K~K|K}K„KK}KƒKƒK‚KˆK†KŒKŠK‘KKKKKŽK’K•K•K–K—K•KšK˜K•K—K•K—K™KœK›KŸK˜KKžKžKžK KŸK KK›KžK—K–KK”KŠK‹K„KqKlKqKKK‰K‡K…K…KƒKwKnKtK~KK€K‚K„K~KzKzKyKyKK„KK•K§K³KÁKÉKÍKÌKÍKºK™K’K—KK’K—K—K’KK“KKŠK‰K‹K„K‚K|KyKvKhKIK1K,K*K*K-K,K.K-K0K5K2K3K6K;K:KHKGK:K=KKK=K(K/K6K*K#K1K*K,KWKŠK¢K©K©K¡KŒKK…KvKVK)K$K/K"K!K)KBKeKK”KK–KKKKŒK•KK K K¡KžKKK›K˜K—K˜K˜KšK˜K—K”K”K•KšK•K—K›K˜K—K•KšK™K—K—K™K”K–K–K’K’K‘K”K—K•K“K“K•K—KK“KŽK’KKKŒKKŒKŽK‰K„K„K…K‚K„K“K¢K­KµK¼K¾KÂKÆKÆKÅKÄKÂKÂKÀKÂKÀKÃKÃKÆKÅKÉKÉKÌKËKËKÍKÎKÍKÌKÍKÎKÏKÎKÍKÍKÏKÐKÏKÏKÍKÏKÐKÑKÎKÏKÐKÓKÑKÐKÏKÏKÏKÎKÎKÍe]rD(KWKWK[KPKPKOKTKLKQKOKNKJKMKPKNKEKDK?KBKBK;K9K8K6K3K2KKCKAKMKLK7K@KNK:K0K/K4K0K$K*K(K&KXKK›K«K§K KŒKŒKK|K`K1K(K3K(K,K/KUKmK‹K‘KK“K’KKKK—KKžK¡K KžKœKšK˜KšK›K›K˜KœK˜K”K”K“K”K—K—K–K–K—K—K–K˜K™K•K–K”K•KŽKK“K“KK“KšK’K“KK‘K‘KK‘KK’KKKŠK‹KŽK‰KŠK‡KƒKK}KˆK˜K¦K®K¶K¾KÁKÃKÆKÄKÅKÄKÄKÁKÃKÅKÂKÄKÅKÆKÉKÈKÍKÎKÍKÍKÎKÏKÍKËKÏKÍKÍKÍKÎKÍKÏKÑKÏKÎKÏKÏKÑKÑKÒKÐKÑKÐKÒKÑKÐKÑKÏKÐKÏKÍe]rE(KMKMKSKPKQKNKNKSKQKIKPKHKHKMKJKFKDKBKCK;KDKAK9K:K0K5KAKTKbKoK}KƒK‹K’K–K˜KžK¤K¦KªK©K©K­K±K¯K®K±K­K°K±K°K´K³K±K±K°K«K¥KœK—KŒKKƒK}KqKiKaKRKDKOKLKOKTKVK]KZK`KbKjKiKhKkKkKoKtKrKjK]KNKOKTKKK@KUKeKYKQKfKzK’K}KcKVKPKNK5K%K$K6KgK¥K‡K]KXK;K@K^K>K+K9KRKyKmK^KcKXKlK”K¬K©KkK4K.KQKK~K;K(K.K1K1K6KHKMKBK=KOK=K%K#K#K(K!K!K!K)K/K;KOKBK0K5KgK°KªK~K4K#K%K"K&K+K*K.K,K,K&K%K"KK!KXK­K±K­KµKºKÄK´K·K½K¼K¾K·K®K¢KtK0K)K*K#K!K%K%K%K%K&K,K8K.K*K.K+K+K-KWKjKcK`KLKIKQK`K^K`KbKkKpKqKuKyKxKyKxKyK€KK‚K€K‚K‚K†KŠK‰KˆKˆK‹KŠKKKKKšK–K–K“K™K™K–K›KœK˜K•KšK›KšK˜KKœK KŸKžKŸKŸKžK KŸK¡K˜K“K•K“KKKŠK‚KwKyKK„K†KˆKƒKeKAK2K0K1K?KKKYK`KZKcKqKuKzK~KyK{KKŽKŸK©K¹KÀKÈKÇK­KŸKžK–K•K–K”K˜K•K“K‘KŽKŒK‹K…K‚KK€K{KvKjKKK1K+K(K.K2K.K-K)K0K9K6K5K4KK9K8K8K>KDKWKgKuKK†KŽK–K˜KšK K£K¥K¨KªK­KªK­K¬K¬K¯K¯K¯K°K³K²K¶K³K­K©K¨K¦KŸK–K‘KŠKKKqKeKYKJKKKIKNKNKWK\K\K[K]KaKgKgKjKgKpKnKqKoKkKOKK{K”K­K­KžK}KK˜K~K`K6K#K(K@K1KBKdKzKŽK”K”K“K“KKK”K›KžKœKœKKKœK™K™KšK•K›K–K˜K—K“K˜K—K–K•K•K“K˜K–K’K˜K—K•K–K–K–K“K–K’K–K“K”KK‘K’K‘KK’KK’K’K’K“KKKŽK‹KŠKŠK…K„K…K‡K‡K“KœK¨K¶K¼K¿KÂKÇKÆKÄKÆKÄKÅKÄKÃKÅKÆKÅKÆKÉKÊKÍKÍKÍKÎKÍKÏKÏKÎKÌKÎKÎKÏKÏKÐKÏKÏKÒKÒKÑKÑKÐKÑKÑKÒKÑKÓKÑKÓKÔKÒKÏKÐKÎKÎKÍe]rG(KNKNKUKJKPKIKPKMKKKQKJKOKQKNKIKJKGKAKDK>K@KGKFK=K=K@KNKZKhKxK}K‹K’K•K™KžKžK¦K¨K¨K­K«K°K®K¯K­K¯K±K¬K°K°K²K³K¯K°K­K«K§K K—K‘K‹K„K|KoKaKTKLKHKDKNKSKQKSKYK[KbK`KcKbKgKiKpKmKoKoKkKTKIKdKaKOK=K]K^K^KTKnK~K{KfKYKNKHK+K"K'K/KgK”KKhKAK5KKGKFKSK:K6KEKSK>K0K.K-K8KK7K3K-K$K-K.K,K;KFKiKK‰K“K›K¢K®K®K±K¶K¾KÀK¹K®KK)K%K&K'K$K*K/K)K+K4K.K,K.K/K2K(K1K,K1K0K`KpKnKeKMKNKPKcKeKfKlKuKwKqKuKvK}K{KxK~K€K€KƒK‚KƒK…K…KŠKˆK†KŠKŽK‘KK•KK‘KKK•K—K•K—K•K–KK—KšKœKK˜KšK›KšK›KŸKžK KKKœKžK¢K›K—K˜K’K•K”KKKK{KtKxK}KKKƒKK€K{K}KK~KK}KwKoKnKgKbKmKyKxKzK~K†K›K²K½KµK±K«K¦K¢KœK˜K”K”KKŒKŽKŽKŽKK…K€K‚K„KKsKbK@K0K/K1K1K7K8K,K*K-K*KBK2K3KKxKKoKbKRKpKuKsKœK¨KxKFKOKpK¥KkK"KKK K'K*K2K)K/KmK’KšK{KCK0K,K(K%K"K$K K"K+KWK‰KzKUK2K)K&K K1KBK3K0KNK=K;KUKSKLK^KdKpKpK{K‹K”K–K”K™KŸK¨K¤K®KžK_K7K'K&K K#K(K.K8K;K,K1K/K+K*K+K1K&K/K/K*K1KcKtKtKiKRKJKPK^KfKlKhKtKoKqKuKwK€K~KKzK{K|K€KK‚K‚KKˆKˆKŠK‰KŠK‹KKŽK•K”K’K”K“K•K•K“K‘K‡K–K–K“K–KšK—K–KœK˜K›KžKœK¡KŸKžKšK KžKšKœK—K”K•K–K’KK‰K‚K~K{KzK~K|K€K‚K„K‡K„K†K†K…K„K~KwKwKvKtKpKxKvK~KˆKšK­K¿K¼K³K®K¨K¤KŸKšK›K—K˜K’K‘KŒK‡K‰K†K„K„KK‡KvKmKZK4K0K3K-K0K8K4K1K+K+K0K@K7K5K@K9KJKMKUKBK@KIKLKCK1K+K,K7K8K+K%K#K-K^KK¦K¯KŸK’KzK’KK[KSK+K&K.KOKdK{KŽK’K‘K•K“KŽKŽK“K—KK KŸKžKžKœKœKšKšK™KœK—K“K™K™K—K™K™K”K—K—K˜K•KK•K™K–K”K”KšK”K’K–K—K“K–K’K“KK–KKK“K‘K”KK‘K‘KK‘K‰KK‡K†KŽK‰K€K…K‘KŸK¨K±K¸KÀKÁKÅKÄKÆKÃKÅKÄKÃKÅKÂKÈKÉKÈKÊKÍKÐKÑKÏKÎKÍKÑKÑKÐKÐKÏKÏKÐKÎKÐKÎKÐKÓKÑKÒKÔKÒKÑKÒKÑKÑKÒKÑKÑKÑKÏKÏKÏKÎKÎKÐKÐe]rJ(KNKNKQKOKJKHKRKPKNKHKJKCKJKHKLKGKDKBKAKBKCKJKCKMKLKLKVKgKpK}K„KŽK‘K˜KšK K£K¥K¦K¨K«KªK§K«K¬K®K­K­K®K®K°K²K¶K¶K´K®KªK¤KžK–KKK„KzKoKdKWKOKIKKKPKOKTK]KVK]KbKdKdKhKeKnKzK~KrKqKeKXKcKTK:K9K>KUK]KWKVK>KNKVKEK4K+K#K*KJKqK‡KuKjKNKDK]KHKYKWKRKJKCK5KEKvK„KjKKKFKeK{KqK€KªKKYKXK{K•KK2K!KK'K,K7K6K'K0KoK™K§KŒKLKPK7KKK%K&K#K$K2KpK|KmKMK,K/K#K"KJKZKDKLKeKiK^KeKmK`KoK~KmKmKzKK„K„K{K‡K›KµK´K·KcK1K'K#K#K$K&K*K;KQKJKKKFKEK,K,K-K-K'K3K;K4K,KWKrKyKjK]KSKQK\KaKiKkKpKwKvKtKvK~K~K}K~KzK~KKˆKK…K€K†K†K‡K†KŒK‡KK”K’KK‘K‘K˜K’K”KK”K‘K•K”K“K˜KœKšK’K–K˜KKœKKžKŸK£K˜K›KžKžKšK˜K”K—K™K“K“KK†K†K†K‚K}KzKzK€K‚KK†K„K†K†K‚K|KtKwKvKzK€K‰K‘KK©K´K¼K¿K»K´K¬K¨K¢K KœKœK”K”K—KKŒKŠK‰K†KƒK†K†KKxKiK?K/K,K/K4K.K9K4K.K/K3K-K=K3K;K9K8KNK^KNKAK9KPKQKDK2K-K,K;K1K(K'K$K+KOKŽK¦K¯KŸKŽK}K‘K”KeKYK;K'K'KPKpKKK”K•K•K”K‘KK”K›K KžKžKKžKœK˜K™KšKœKK™K—K–K“K–K•K˜K•K”K•K”K”K–K”K–K•K–K™K™K–K—K“KK”K‘KŽKK“K“K”KK‘KŽKŽKKKŒK“KKKŒKŠKˆK†KƒK„KŠK“KžKªK¶K¼KÀKÂKÅKÃKÄKÄKÃKÅKÄKÆKÄKÇKÊKÊKÌKÎKÐKÐKÒKÐKÎKÐKÎKÏKÑKÎKÎKÏKÎKÏKÑKÐKÑKÔKÔKÔKÒKÑKÑKÏKÒKÓKÏKÕKÐKÏKÐKÏKÏKÐKÐKÑe]rK(KOKOKTKRKTKLKRKMKPKHKKKNKOKNKLKGKAK@K?KFKGKKKGKJKOKSKZKfKuKKKK’K˜K›K K£K¤K¥K¨K§KªK«K©K¦K«K«K¬K¯K¯K¯K·KµKµKµK¯K«K¦KŸK–K’KK…K{KmK^KTKLKGKGKRKSKVK[K\K^KcK]KcKeKkKvKzKyKuKwKgKZKRK.K5K7K>KUKaKXKVKBKLKFKBK1K%K$K6KfK‡KzKiKHK?K>K\KUKUKDKOKGKXK=K?KkK}K`KXKTKcK„KqKcK¡KKoKcK~K…K†KNK#KK K*K=K6K*K/KrKKµKšKwKqK0KKKKKK&KWKyK^KFK^KOK(K#K(KJKaK`KcKiKuKoKsKvK‡K—K”KaKjKtKkKlKpKsK…K»KÏKÏKK-K)K%K,K#K)K*K2K9KBKMKZK4K5K1K*K'K0K#K*K+K/K5KcK~KuKdKdKOKRKYKiKpKqKoKqKuKxKxKzK|KzK~K€KKK‚K„KK‚K†K…K‡K‹KŒK†KŒK’K‘KŽK‘KŽK”K“K“KK”K“K•KšK”K–KžKK˜K™K—KK KKœKŸKžKŸKŸKžKŸKœKK˜K˜K’K•K”K•KKK‰KŽKˆKK}K€KKK€K€K…K‡KˆKKvKyKKŽKœK­K¶K¾KÀKÀK¿K¼K¸K²K¨K¦K¡KšKšK›K–K’K’K‘KKŠKˆK„K†K€K„KxKvKUK6K,K/K-K+K2K0K+K2K-K/K1K;K3K7KKJKEK?K6K-K-KK.K*K0KiK’K¥KªK K}K7KKKKKK9KxKaK5K6KUKFK0K3KEKZK]KkKmKhKwK~K‘KK­K¬KrKdKuKfKcKmKrKƒK¢KÓK×KÀKRK%K&K*K'K$K&K)K-K1K8KK4K/K1K/K/K4K1K,K/K,K2K4K>K7K:K?K;KSKXKMKOKKKSKBKFK:K4K1KBK*K1K+K"K%K?K…KžK«K§KKxKK„KUKcK=K/K0KWK}K‹KK“K‘K“K’K’K‘K™KK KžKžKšKKŸKšKKšK˜K™K”K™K™K—K˜K›K›K•K”K”K”K—K’K“K’K–K’K”K–K›K”KK”K”K“K“K•KŽK‘KŽK“K‘K‘K’KŽK’KKŽKŠKŒKˆK‰K…KˆK‚K}KŠK˜K£K®KºK¿KÂKÅKÄKÂKÅKÇKÄKÆKÅKÅKÇKÊKÌKÌKÌKÏKÐKÑKÑKÍKÎKÏKÎKÐKÑKÍKÍKÏKÏKÐKÑKÏKÓKÑKÑKÐKÒKÒKÑKÑKØKÏKÐKÐKÐKÎKÐKÐKÐKÑKÒKÓe]rM(KQKQKPKQKNKHKHKNKIKFKLKQKOKMKKKJKCKGKFKIKCKGKHKMKTKVK^KlKxKK‹K‘K•K›KšK K¥K¢K©K©K¤KªK©K©K¨K¨K­K­K®K±K°K°K²KµK²K²K­K¤KžKšK”KŠK„KxKlK^KVKSKKKHKRKRKWKVKZK[KdK^K_KeKkKoKtKrKrK{KKtKHK*K-K%K+KPKVKQKMKEKEK=K9K-K.KYK„K‚KbK@K9K/K2K>KGKTKVK>K=K\KyKPK\K~KsKeK]KcKzKzKfKJKgKœKŠKeK†KKˆKwK4K(K$K&K2K8K,K0KWK€KœK“KKƒK9K)K"K K#K)KXKbK6K(K;K;KJKSKSKdKcKaKfKnKtKK›K¦K¶K´KŠK_KdKjK^KeKuK~KŽKÀKÔKËKwK0K)K*K)K.K+K/K*K2K2K2K-KFKAK4K4K6K1K3K2K)K*K-K1KdKwKzKWKfKNKWK[K`KnKmKsKsKzKyKyKK{K|K{K}KzKKK„K~K}KƒKKKŠK‰K…K‰KK‹K‘K•K‘KKK’KKK•K”K”K•K–KKšK™K›KœK™K›K›KžK KŸK›KK¡KœKžK›KžKžKœK˜K–K“K–KKK‘KKŠK†K‡K…KˆK‹KŽK•K˜K¦K¤K–K“K”KK­KÁKÆKÇKÄKÃKÂKÁKµK´K¬K©K¢K™K—K—K–K–KŽKŽKK‹K‡K†KƒKKKvKVK1K2K1K0K.K)K0K4K0K3K+K1K3KKK/K5KMKJKbKkKdKpKqKkKoKwKŒK™K°K±KºKžK[KcKpK_KjKwK|KŠK¬KÏKÔK¡K:K2K.K-K4K8K6K5K2K:K8KK:K8K3K9K4K5K1K/K1K1K8K`KwKK_KhKQKTK\KeKmKmKrKuKxKyKyKyK{K|K|K}K{KxK}K‚K†K‚K‚K†K…K†KŽKŠKŒKŠKK“KŽKKŽKKŽK‘KK‘K•KŽK”K˜K™K›KšK™KœKšKŸKžKKžK¢K›KKžKKžK›K›KœK K–K–K˜K–K–K–KK•KKŠKKŠK‹KK‘KœK¤K²K¶K¥KšK–K£K°K½KÈKÅKÂKÂKÂKÀK¹K´K­K¤KžKœK˜K–K•KKŽKŒKŠK…K„KƒK…K€KKtKIK6K2K5K0K3K*K/K/K*K2K2K6K+K8KAK?K8KBK]KbKDK>KPK\KMK;K7K/K>KLK)K/K.K&K"K6KpK‘K¦K¬K–KoKK‡KTK`KJK:KJKdKƒK“K“KKK’K’KŽK’K›K KK›KšK—KšK–K™K›KšKšK—K•K›K—K™K›K˜K”K”K–K—K–K•K•KK“KK“K“K˜K–K’K“K‘K’KK‘K’K‘K”K•K‘KKK”KK’K‘KKK‹K†K„K‰K‚K„K‡KK›KªKµKºK¿KÃKÃKÄKÅKÂKÃKÂKÄKÄKÆKÉKËKÌKÎKÏKÑKÒKÏKÐKÏKÐKÐKÏKÍKÎKÍKÎKÑKÏKÐKÐKÒKÑKÒKÓKÒKÑKÑKÑKÒKÒKÑKÐKÏKÑKÒKÒKÓKÒKÒKÒKÔe]rO(KRKRKSKVKPKPKPKSKSKOKQKTKPKNKMKIKGKFKHKCKAKAKFKMKTK^KcKkK{K€K‹K—K˜KžKžK¡K£K¤K«K¨K¥K¨K§KªK­K­K¬K±K°K°K²K³K²K³KµK°K¬K¦K K›K˜KŠKˆK€KpKbKVKNKJKOKPKRKTKWKZKZK\K_KbKfKiKgKpKxK…K•KšK‡KPK9K,K,K0KIKWKSKGK>KOKHKJKFKxK“KpKWKCK8K5K,K.K>KGKMKMKLK?KRK[K\KjKrKcKhKsKlK}KnKWKBKPK–KšKkKpK“KK„KpKFK.K)K/K@K4K6KKKjK‡K{KdKIK-KK@KFKMKWKbKcKnK{K€KŠK•K’KœKœK K¢K£K¦K§K©K©KªK©K©K¬K®K±K°KµK²KµKµK¶K°K®K­K§K KšK”KŠK‰K}KlKcKUKPKJKNKLKUKUK]KaK`KbKaKaKkKhKnKvK‡KKŸK“KrKVK@K2K6K:KLKcK\KMKFKSKPKaKnK“K~KZK=K;K4K2K5K5KAKJKJKVKNKEKFKMK\KgKpKhKZKrKnK~KvKiKJKJKK KkK]K‚K•KKKYKKK>K@K?KMKLKGKlKjKoKpKDK6KCK>KJKNK:K+K*K(K1K8KSK€KŒK…K~K…K”KžK©K¬K²K¶K½KKQK`KcKYKbKvK“KKÃKÌKÃKtK8K5K7K6K6KAK@K>K7K7K3K8K:K4K=K?K8K6K4K,K.K2K'K-K,K2KZKoKpK]KoKVKVK`KdKjKpKtKvKxK{KxK}KKyKKK~KK‚K€K„KƒK„KˆKK‹K‰KKŽKŒKŽKKŠKŒKK“KK‘K‘K“K—K–K›K—KœK—K˜K˜K™KšKšKœK›KŸKœK KKKœKKšK–K”KœK˜K™K—K–K”K–K—K’K’K‘K‘KK”KK¡K«KºKÂKÅK»K¨K¡K§KµK¼KËKÇKÁKÂKÀKÁK¹K²K¨K¢KžK™K’KK“KK“KK‰K†K‡K‰KK‚KuKBK1K2K0K4K5K3K/K6K1K5K1K3K6K4KDKHK=K5KEK_K]KIKAKFKXKDK?K?K8KFK?K5KK2K/K>KBK+K%K$K4KJK{K¦K«K‰KŒKŸK¤K K¬K§K¯K¼K°KˆKSK\KjKYKZKbKeKŠKÃKÇK‘K7K,K1K&K,K2K(K4K1K.K1K3K.K-K0K6K1KAK=KK;KK?KWKiKpKaKVKOKKKSKJK+K&K&K7KGK3K(K/K@KTK•K¶KªK’K‘K¦K¦K­KµK´KÀK¼K¬K_KXKbKZKXK_KaKvK KÊKªK?K)K$K$K)K4K1K-K,K,K1K5K4K0K1K8K;KKUKOKXKHKKBK@KEKLKUK[KeKsK}K…KŒKŽK›KšKK¢K§K¤K¦K¥K«K«K¨K©K©KªK­K±K¯K¯K´KµK³K´K¶K¯K¬K¨K¡KœK”K‹KŠKzKoKcK\KHKCKMKKKMKRKUKQK[KZK_KiK„K—K—KrKkKpK|KxK`K?K6KDKBK8KRKTKgKyK†K|KMK%K+K+K-KZKdK4K;KEK8K0K8KIKFK5KFK_KAK=KBKOKTKBKMKWKMK|KpK7K@KKK_KfK†KŒKrKxK™KwK}KKIK2KJKŠKKbKBKK/KNKDK>KKpKK¢K£K}KwK‹KfKNKoK{KwK†KK’K’K”KKK“K—K›KŸKšK˜K™K—K˜KžKšK›K—K–K–K™K–K–K’K•K–K•K–K’K“K—K”K•K–K–K”K—K“K“K‘KKK‘K—KKK‹KKKKK’KK–KŽKK‹KŒKKŒKˆK‡KˆK}K…K„K‘KžK©K¶K¼KÁKÄKÃKÆKÄKÄKÄKÈKÊKËKÐKÏKÑKÔKÓKÐKÐKÐKÐKÑKÎKÎKÐKÑKÑKÐKÐKÐKÏKÐKÑKÑKÎKÏKÎKÒKÏKÑKÒKÕKÓKÓKÔKÓKÒKÑKÑKÕKÑKÒKÔKÓKÓKÔe]rV(KIKIKFKHKHKIKGKCKFKGKBK?KCKDKEKCKBK=KFK=KCKBKGKNKOKYKbKqK}KƒKŒK‘K–K˜KœKŸK£K£K¤K¨KªK©K«KªK®K¬K¯K®K¬K¯K±K³K³K´K²K°KªK¤K¢KšK–KŠKƒKyKpK_KTKOKFKHKNKMKUK^KTKVKWKfKxKK’KtKoKgKwK„KvKeK=KDKCKKIKBK;K7K]KSKAK>KEKMKFKHKAKRKyKqK;K2KLKXKtKK†K€KoKK|K{K‰KeK7KjKK™KSK/K8K,K(K,K?KKK2K6KDKWKfKmKeKjKtK~KK—K¢K¦K®K¸KÃK¿K±KeK]K[KVKYKZK_KrKšKÂK•K:K+K'K,K-K8K4K/K6KDK8K2K.K3K5K3K,K3K=K8KJKAK=KBK-K.K4K0K,K)K*KAKcKdKgK`K{KYKQK^KiKvKnKmKwKwKxK}K~K€KKKK€K‡KƒKK‚K„KƒK‹K‡K†K‰K‹KKŒKKŒK‹KˆK‰K‹K’KŠKŠKŽKKK‹KŽKŽK•K“K“K•K•K”K•K•K—K’K’K—KœKœK“K”K˜K—KK”K‘K–K”K•K›K›K™K›KšKK£KªK¯KÄKÔKÓKÎKÍKÅK±K§K®K·KÇKÌKÍKÄKÂKÀK¼K¯K¥KœK”KKK’K“KŒK‹K†KŒK‡KƒK|KNK/K.K7K-K1K.K,K8K/K-K7K2K/K,K3KK5KFKCK2KAK1K4K'K,K'K&KFKnK‰K¡K¬K…KqKŒKfKOKvKƒKKˆKKŽKŽK‘K’K‘K“K›KKœK˜K—KšK›K—KœK›K•K›KšK˜K–K–KšK”K”K“K•K—K˜K–KK”K—K—K’K–K“K“K‘K’K‘KK“K‘K•K“K“KK–K–K‘KK’KŠKKKŽK‡KŠK„K„K‚K„K†KKK–K£K«K¶K¾KÄKÄKÆKÄKÂKÄKÆKÉKÌKÍKÍKÍKÒKÒKÒKÑKÏKÎKÐKÏKÏKÐKÏKÐKÑKÑKÑKÏKÎKÏKÑKÏKÍKÏKÎKÏKÐKÒKÓKÖKÖKÔKÕKÓKÓKÒKÑKÓKÔKÒKÑKÓKÔKÕe]rW(KJKJKEKJKGKGKHKKK?KJKCKEKGKHKIKEKIKAKBK>K>KHKMKPKLKVK`KpKxK|KˆKŽK•K›K›KŸK£K¥K¦K§K¨K©K¬K¬KªK¬K¯K°K¯K®K­K³K³K±KµK±K­K¨K¤KžK—K”KƒKzKoK`KVKJKHKDKPKPKQKQKQK\K]KxKŠK“KqKpKlKkK{K†KvK_K4K?KBK@KWKoKmKpK|K~KhK6K)K0K:K;K5K;KTKSKHKCKDK@KPKAK5K1KTK`KKKJK9KIKOKDK?KPKgKuKTK4KIKCKQK{K€K{KyKiKtKmKƒKzKUK„K§K’K:K$K1KK)K-K*K%K*K7K6K0K4K8K7K7K+K2K;K0K0K.KAK4KDKLKMKBK6K7K/K.K0K+K'KBKjKbKeK`KzKdKJK\KbKiKmKuKuKuK|K|K~KzKKƒKKK~K€KK…K†K†KŠK‹KˆK‰K‹K‹K‘K‘KˆKŒKŠKK‹KK‡KˆK‹KK‹KKKŒKK‘K“K•K•K’KK‘K’K’KK–K˜K™K‘K•K”K”K’K’K“K—K—K˜K–KšK›K›K K¡K¥K©K®KÃKÏKÑKÍKÌKÀK­KªK±KºKÉKÎKËKÍKÅK»K¾K²K¥KœKKŒKKK‘KŽKŠK‰KŽKƒK‚KpK=K4K-K.K+K2K.K0K2K1K0K.K+K5K8K5K4K>KAK>KGKKKPKQKQKMKDKKKJK8K/KDK9K6K?K2K3K*K+K(K)K?KpKŠKK­KKtK’KhKIKtK„KƒK‰KKKŽK’K•KK“KšK›KšK˜KšKŸKžKšKšK˜K•K™K˜KœK˜KK–K•K•K•K–KœK”K”K“K”K“K”K”K”K”K’K’K‘K”K“K’K‘KKKK’K”KK“K‘K’KŽK‹KKŽKŠK†K‡K‚KƒKKK„K†K–K¢K®K¶K¾KÀKÅKÃKÄKÆKÇKÈKÊKÎKÍKÒKÎKÑKÒKÑKÎKÒKÑKÏKÐKÎKÐKÏKÑKÑKÐKÑKÏKÏKÏKÑKÏKÏKÐKÎKÑKÒKÒKÔKÕKÔKÒKÓKÓKÓKÑKÒKÔKÔKÕKÔKÓKÓKÔe]rX(KMKMKJKDKFKJK@KHKEKDKGKEKDKCKCKEKAKK>KBKCKNKNKRKTK^KpKwK}K„KK•K˜KKŸK£K¥K¦K©KªK©K§KªK­K«K®K®K³K°K°K±K¶K²K³K´K¬K¦K KK”K“K„K{KsKbKTKHKEKEKNKSKUKUKYK]KnKˆK›KKfKgKfKnK†K€KwKXK6K=KEKTKeKhK_KmKzKxKXK:K,K7K;K4K2K?KXKNK>KEKLKGKFKFK=KKkKhKfK\KqKeKSK\KbKgKjKsKoKvK‚K}KzK‚KK{KƒK}KK‚K~KƒK…KˆK‰K‰KˆKˆKˆKKK“K‹KŽKŒKKŒKˆK‰KŠK‹KŽKŠKŠKŒKKŠK’KŽKK’K’K’K’K“KKK‘K•K“KKK“K’K•KKK™K”K“K–K›K—K›K›KžKžK«K°KÄKÏKËKÑKÏK¾K±K±K¸KÉKÓKÓKÐKÌKËKÀKÀK´K¤K™KŽK‰K‘KK‹KŒK‹K‹KKK€KMK4K)K-K.K/K.K,K:K8K3K2K1K-K-K2KCK8K?KKRKuK“K”KvKqK|K“KKKK;KKKK'K)K1KIKiK~K…K‹K~KqKmKiKtKƒK‹K§K¦K¨K¸KÄKÀK¨KVKZKrKVKXKcKfKvK§K¾KrK*K&K)K$K&K(K7KMKQKAK4K1K0K/K(K+K8K0K7K1K4K8KGKHKHKGKAK0K.K2K&K,K(K5KeKhK`K]KoK_KLKQK[KaKgKxKpKtKyKyKyK|K„K{KKK‚K‚K‚K‰KƒK‡K…KŒKŠKK‘KŒKŒKK‰KK‹KKŠK‰KˆKˆKˆK‹KKŒKKKŽK‹KŽKK‘KK‘K‘K‘K”K’KŒK’K•KKKK’K’KŽKK‘K‘K“K•K›K™K›KžK¤K¤K²K¶K¾KÊKÍKÕKÑKËKÄKÂKÅKÍKÔKÊKÁKÁKÀK¿KÀKµK¨K•KŽK‰KŠKŠK‹K‰KŠK‹KˆKKyKKSKEKKEKEKHKKKSKOKRKEKAKJKLKCKKKEK;KGK>K0K-K$K$K'K&K*K^K{KšK®K›KxK†K~KNK}K„KK†KKKK’K“KŒK—KœKœK›KœK˜K›KKšK›K˜K—K™K”K™K”K—K—K™K”K‘K”K•K‘K•K–KšK“K–K”K’K”K’K’K”K“KŒKK“K”KKKK•KK‘K‘K‹KKKKKŠKŠKˆK‡K‚K…K}K€K‡K–K§K±K¹K½KÂKÆKÄKÈKÊKËKÌKÌKÐKÐKÑK×KÔKÏKÑKÑKÑKÎKÎKÐKÐKÑKÑKÓKÑKÒKÐKÑKÐKÑKÏKÐKÐKÒKÒKÓKÓKÓK×KÓKÕKÕKÔKÔKÓKÓKÕKÑKÓKÖKÓKÔKÓKÕe]r[(KGKGKBKKBK>K@K?KDKIKFKSKVK^KiKvKKˆK“K™K›KœK K K¨K¨K«K­KªK­K®KªK¯K±K°K­K¶KµK²KµKµK²K°K®KªK¡K™K–K‹KƒK|KnKeKSKEKGKJKFKLKMKWKtKK˜KnK^K`KdKdKiKpK{KwKkKJK;KHKDK6K'K4KkKxKhKXK:K2K7KCK2K?KMKFKAKKHKIKUKPKZKHKBKGKXKPKCKAK;KJKEK/K/K$K(K&K%K)KTKK›K¨K¢KuK~K„K]K}KˆKK‹KKK’KK”KK˜K˜KšK KœKœKšK›K™KšK›K›K•K–K›K˜K—K›K•K“K”K‘K”K•K•K™K˜K™K”K”K”K”K—K•K”K’KŒK‘K’KK“K’K“K‘KŽK’KKKKŒKŒK‹KŠKŽK‰K‹K€K‚KƒKƒK‹K™K¦K°K·K»KÂKÄKÆKÈKÊKÊKÍKÍKÑKÒKÓKÓKÓKÐKÒKÑKÑKÐKÐKÐKÓKÑKÑKÐKÐKÓKÑKÒKÐKÏKÐKÒKÒKÑKÑKÓKÑKÕKÙKÖKÕKÕK×KÔKÓKÔKÒKÕKÔKÕKÔKÒKÑKÔe]r\(KFKFK=K;K;K?K>KK:K)K(KKFKAK;K8K7KMKEK@K?KAKbKvKVKSK`K`KVKXKMKPKPKPKFK@KHKmKtKSKAK;K:KBKUKXKWKmK„KK‹KƒK^K+KKK'KSKsKcKeKmK„K¤K°KÆKÊKÅK½K¿KÏKÊK¿KÆK³KKWK_KmK[KRK]KrK€KªKÅKuK*K(K(K-K%K'K*K*K@K_KqKRK5K+K-K3K*K*K;K5K4K8K6K=K>KGKHKGKDK4K.K5K,K'K+K9KUKgKPKYKiKbK[KHKVK_KdKlKqKsKzKK}K|K{K„KK‚K„KK~KK†K„KƒK…KˆK„K‰K‡KŠKŒKKK‹KŽKK†KŠK‰KˆKŒKŽK‹KŠK‡K†K‡K‡K‚K‚K‚K…K‡K‰KK‰KŽKK’KKKK’K’KK“K•K’K“KK“K‘KŽK‰KˆKŒK…K‰KˆKK”K‘K’K˜KŸK¢KœK™K™K“K‘KK–KšKšKK‘K~KuK{K~K‡KŽKŒKK‹KƒKXK0K/K2K+K3K1K0K5K:K3K6K1K2K4K3K6K:K4K?K@KJKEK;K?KTKLKZKHKDKFKQKPKBKAK;KFKHK7K-K&K&K%K%K.KBK~KœK¥K KtKwK†KcK~K‰KKK–K‘K‘KK“K‘K–KžKœKKšK™KK™K˜K˜K˜K›K–K–KšK“K—K•K–KK”K•K‘K‘K“K”K—K™K“KK“K’K•K˜K•K•KK‘K’K“K•KKK’K‘KKKKKŒKŒKŠK‹K‡K‡KˆK„KK}KKŒKœK¤K®K½K¿KÁKÅKÆKÊKÌKÌKÍKÐKÑKÓKÓKÒKÑKÏKÒKÐKÐKÎKÏKÑKÑKÒKÒKÒKÒKÒKÑKÒKÏKÐKÑKÒKÑKÒKÔKÒKÓKÔKÕKÓKÔKÖKÖKÓKÕKÕKÒKÔKÕKÔKÕKÔKÓKÕe]r](KFKFK=K>KKFKUKGKXKLKIKPKZKAKAK>K7KK;K>K=KDKHKBKHK@K@K:KKAK@KBKBK2K;K;K4K9K9KK9K;KAKPKOKCK9K6K2K1K7K?KIKiK„K}KeKaKeKjKdKjKcKPKPKPKIK@K>KHK]K{KoKWKGKaKoKWKQK\KwKyKaKUKWKtK‰K”K¯K«KKvKvK‹K§K¾K½KÅKÖKÍKÏKÊKÁK©KeKVKiKtKfKYKaKkK„K°KÆK|K-K)K/K0K+K-K4K0K-K'K%K=KQKNK0K&K*K1K2K/K.K1K2KK:K@K@KJKAKAK@KK/K9KK>K1K2K=K6KMKnKKK{KgKnKlK]KgKiKfK[KLKMKNK?K>KMKXKzKxKaKyKyKNK:KGKaKsK`KJK:K_K‰K˜KšKK}KsKŠK«K·KÁKºKÅKÐKÑKÑKÀK´KzKMK`KoKkKbK\KdKK¥KÊK®KKLKEKQKTKRKFK@K,K.K?KUKGKK@K@KAK@K>KKKOKOKQKbKdKrK|KƒKK•K™K›KK£K¤K¨K«K­K¯K¯K²K²K´K±K²K³K¶K±K´K³KµK´K±K­K¬K§K¡K›K“KˆKKtKnK]KVKLKCKYK‚KžK‰KkK\KPKVKTK]KgK}K—K“KžK‹KwKZKK]KLKAKAK@KSKoK{KzKxKwK{K‘K®KÀKÈKÇKÐKÕKÕKÎKÀK¤KWKWKhKqKXKaKdKvK”KÅK»K`K&K&K(K0K,K0K2K2K/K-K*K.K3K>KIKGK0K'K*K+K1K,K5K5K8K9K?K7KBK?KLKRKNK/K%K&K*K(K2KRKmK?KRKRKcKXKKKIKQK^KfKkKiKwKxKwKyK~K‚K‚KK‚K{KK„K{K}KƒK‚K„KƒKˆKˆKK‰KŠKKŠK‘KŒK”K”KKK“KKKKsKkKdKnKdKbKYKZKSKRKWKTKRKVK]KZK^KfKbKlKpKjKhKgKfK`KcK`KWKYKYKaK^KfKoKpKsKwKuKuK{KtKlKoKnKhKdKlKtKƒK‚K~KxK|KŠKŠKK“K‘K‹KwK@K-K*K-K0K,K1K.K2K4K7K2K,K*K*K/K4K;K3K2KAKLKBK=K1K;KSKWK@KPKHKOKQKOK=K:K+K+K:KXKNK@K1K'K.K1K,K5KYK„K K£K‰K~K‘K‘K„K‘K˜K•K“K•K”K”K•K‘K KžK›K›K¤KœK›K˜KœK•KšK˜K™K—K™K˜K™K˜K—K•K–K“K”K”KK˜K–K’K•K‘KK‘KK—K•K‘K’KŽKŽK‘KŒKK“K’KKKKKKKŒK‹KK†K„KƒK€KKzKˆK’KK±K·KºKÃKÆKÉKÍKÐKÒKÓK×KÓKÓKÒKÓKÒKÒKÒKÓKÒKÑKÑKÐKÑKÒKÎKÏKÑKÑKÒKÓKÒKÑKÐKÒKÓKÔKÓKÔKÔKÔKÕKÔKÖKÖK×K×KÚKÖKÕKÕKÖKØKÔKÔKÕKÔKÔe]rb(K:K:K7K6K5K9K5KK@K@K@KKGKQKPKUK\KeKqK{K‚KŽK’K—KœKŸK£K¤KªK­K±K®K¬K±K°K¶K±K²K±K²K°K³K¶KµK´K´K°K­K¥KŸKšK“K‰KƒKwKpK_KQKLKUK|KžKŒKtK]KWKPKTKZKZKtKŒK–KžKŠK…KtKXK@K/K9KQKOKKLKNKYK6KCKNKHK6K4KXK?KKJKPKSK9K*K+K/K(K4KUKgKCKRKSKbKYKSKMKSK]KYKeKiKsKzKyK}K}K{KKƒK€KK}K|K~K}KƒK€K…K„K‰K‡KŠK‡K‰K‰KŽK‹KK—K“K‘K“K‘KKKƒKxKzKxK}K|KtKnKkKbK^K_K^KaK`KjKlKiKqKsKsKsKnKpKqKnKlKpKjKlKkKlKoKmKkKoKsKzK~KKƒK€K‚KƒK‚KK}KxKyKƒK…KKƒK€KƒKK’K‘K‘K‹K‡KTK4K*K-K2K+K+K-K1K2K3K;K.K+K-K+K,K/K8K2K5KGKJKAK9K6K=KWKMKHKPK?KJKSKRKDK:K+K1K7KOKPK>K2K$K&K1K*K2KQK|KšK¡KK}KŒK™KŠK‘K™K”K‘K”K—K•K“K’KžKKœKKKœKšKšK˜K˜K™K˜KšK•K—K™K—K—K“K—K˜K•K•KK‘K—K•K–K•K‘K”K”K–K•K“K•K“K“KK‘KKK”KK’K‘K‘KK‘K‘K‹K‹K‰K‰K…KƒKKKK†K•K¤K­K¸KÀKÆKÉKÌKÐKÐKÓKÓKÕKÔKÕKÓKÓKÓKÒKÒKÓKÒKÑKÐKÑKÐKÑKÎKÒKÑKÓKÕKÕKÒKÒKÑKÔKÓKÑKÓKÓKÕKÕKÕKÕKÕKÖKÔK×KØKÕKÕKÕKÕKÕKÖKÕKÕKÔKÔe]rc(K3K3K:K9K6K>K:K;K?K=KK@KCKAKJKYKYKWKZKgKqKzK†KŽK–K•KšKžK¥K¥K©K«K®K¯K¯K¬K²K¯K¯K±K¯K³KµK¶K´K·K¶K´KªK©K¢K¡K›KKK…KxKqKYKUKZKuK K˜KqKaKUKNKSKRK\KeK„KŒK“K‰K{KKoKTK=K0K>KUK:K8K?K8K2K&K+KCKKK+K+K3K/K2KSKpK9KSKWKVKVKIKMKNK[K`KeKfKpKqKvKwKwK|KyK„KzKyKzK~K€K~K€KƒKƒK‚KƒKKˆK‡K†K‡K…K‹K“K–K‘K”K”KK’KK‰KKKKK}KK~K|KsKtKoKlKnKnKrKuKtKwK{K~K|K|KK~K€KK‚K~K}K{KKKK}K‚K‡K‹KK”K“K•K›KžK›KˆK†K‚K‚K‰K‹K‹KŠKK‹KK‘K’KK‰KuK8K0K-K/K.K4K.K2K0K7K;K4K/K.K/K1K2K3KK]KaK?K/K*K,K/K+K4KQKyK—K£KKyKŒK¢KˆKK˜K•KŽK–K’K–K—K“KœKœKKšKšKœK˜KžK—K•K˜K™K˜K˜K•KšK–K˜K—K—KšKœK•K“K”K‘K“K”KK’K’K”K–K”K•K–K‘K‘K’K”K’KK’K‘KK’K“KKŒK’KKˆK†K‹K…K‚KƒK{K„K‰K˜K¦K²K¾K¿KÆKÌKÎKÐKÒKÓKÓKÓKÒKÓKÓKÑKÑKÑKÔKÓKÒKÐKÒKÔKÓKÑKÐKÒKÒKÓKÒKÔKÒKÒKÒKÓKÓKÒKÔKÕKÔKÕKÕKÕKÕKÖKÖKÖKÖKÕKÕKÕKÔKÕKÕKÓKÖKÔKÔe]rd(K6K6K6K:K7K8K;K;K:K@K:K;KBK>KBK>KK.K)K0K1K3K=K8K@K8K@K@K2K;KEKJKPKBK,K,K(K,K4KNKhKCKJKQKYKVKLKOKPKVKbKbKeKqKsKuKxKxK~K{KKzKzK{K|K~K~K€K~KKK†KˆK…K‡K‹K‰K‰KŠK’K‘K•K“K“K“K“KŒKKŒK‰K†K€K€K€K}KKK|K€K{KxKxK{KyK{KK€K‡KƒK‰K…K‚K…KŒK”K‘KKK‘K—K™K™K™KŸK©K¨K´K°K³K¶K­K£K’KŠK†KƒKˆKŽKK“K’KKK”KŽKŒK}KLK2K,K*K.K1K-K/K1K-K8KKPK@K3K:K9KFK`KJKEKKKRKIKSKUKNKEK/K2K;KMKaK=K2K1K2K.K)K0KMKkK•K£KšK|K‡K£KKK›K—KK”K”K—K—K•KKKšKšKœK™K™K—K”K™KK–K›K™K—K’K™K˜K–K˜K™K™K•K‘K”K’K”K’K‘KŽKKKK•K“K‘K”K”K“KK’KŽK“K“K‘KKŽKK–KKŒK‹KŽKˆK…KƒKK{K|KŒK™K¥K±K»KÃKÇKËKÍKÑKÔKÓKÖKÔKÔKÕKÓKÔKÒKÐKÓKÑKÓKÒKÔKÑKÒKÐKÐKÑKÖKÓKÎKÔKÓKÓKÐKÓKÕKÕKÔKÔKÕKÕKÖKÕKÕKÖKÔKÔKÕKÔKÔKÔKÓKÓKÕKÓKÔKÓKÓe]re(K8K8K5KK?K;K=KK:K@KhKyKTK:KdK]KzKzK„K›KœK•K‰K†KˆKKuK|K²KÝKÚKÌK¬K‘K_K]KnKfKeKoKK®K´K½K}K9K1K-K3K0K7K9K6K5K5K5K-K1K'K'K4KBK8KK=KK1K*K-K2K:KNK]KJK?KVKTKNKSKLKHKMKXK`KaKkKsKqKvKuKqKvKKvKvKyKwK‚KzK€K{K|K„KKKƒK‚K‡K‡K‹K‹K‘K‘K–K“K‘K‘K‘KK—K”KKŠK„K„K…K€K}K~KKK~K‚K~K|KyKxKzK€K~K€K‰KˆKˆK–K™KžK«K¶K¹K¼K¯K¾KÁK´K¹KÊKÆKÅKËKÊKÄK²K£KŽK‡K‡KˆK‡KŽK”KK˜KŽKŒKˆKƒKxKCK.K+K+K)K+K.K0K4K2K3K5K6K,K)K*K*K(K7K3K9K2K5K6K.K3K8K@KKKfKKKCKJKNKLKSKKK8K6K/K7KEKGKZKLK4K&K)K2K+K2KOKdKœK¦K K„KŒKŸK’KŽK˜K˜KK•K–K–K•K˜KšKœKœK›KŸKœKœK›K•K–K›K™K›K™K•KšK—K›K™K›K—K—K–K”K•K•K›K“K•K•K“K”K™K•K“KŽK“K’K“KK“K–K”K“K‘K’K“KKKŽKŽKˆK„K„K†K…K‚K{KKŒK˜K¥K¶K¾KÇKËKÏKÔKÔKÓKÕKÖKÒKÔKÓKÒKÒKÓKÒKÒKÒKÒKÒKÑKÓKÓKÒKÐKÒKÓKÔKÔKÓKÓKÓKÕKÓKÖKÔKÔKÕK×KÕK×KÕKÔKÔKÕKÔKÔKÓKÕKÓKÓKÓKÒKÒKÔKÐKÐe]rg(K5K5K8K7K;K7K8KAK9K=K@KCKDK:KAK?KAK4K;K?KIKMKSKTK]K\KbKqKzK„KŠK“K–K›KK¡K K¤K®K¯K¯K¯K±K³K³K¯K°K·K±KµKµK¶K´K³K³K®K¦K¨K¡K›K˜K‰K}K{K‰K“K–K…KdKJKIKHKLKRKRKUKcKKŽK›KuKfKgKtKkKfKdK?K@KAK2K@KpK~KAK>KK7KMK_KpKcKmKƒK|K„KKK›K”KŽKˆK‚KtKxK‚K¬KÑKÊK™KmK[KjKrKjKK•K®K½K¼K©KVK.K.K7K9K6K:K7K5K3K6K2K6K(K,K1K-K/K-K6K>K0KK0K5K3K6KEKNKlKMK=KNKNKOKRKJK>K7K3KBKLKK@K@K7K5K=KEKTKWK[K\KbKdKnKxK~K‹K’KœKšKKŸK¢K¨K¨K©K­K®K°K¯K³K²K«K±KµK¶K²K·K´K±K®K®KªK¨K£K¥KœK‚K‚K‡K‘KK|K\KKKGKGKIKLKMKTK[KlK‚K—KKkKhKhKlKbKhKUKAKJKKK8KOKŒKƒKFKCK-K'K'K.K3K%K*KBKdKcKEKFK;K9KQKlK?K.KBK—K±K›KKnKOK1K-KBKQKWKYKZKlKyKyKkK]K\KbKHK:KRK:KCKZK`K`KiKŒK€K…K‚K’K›K•KˆK†KxKtKŽKœKÁKÑK¹KiKUKlKsKrK‚K¢K¦K´K¬K¢KoK2K/K3K3K+K1K5K5K1K3K5K5K2K0K-K0K(K/K+K3KK8KCKKEKTKTKXK[K]K`KmK|K‚KŠK’K•K™KžK¤KŸK§K¨K®K±K­K±K´K´K±K±K¶K²K³K´KµK¶K²K°K¯K©K§K«K±K¦KŸK”KˆK‘K}KcKIKEKAKCKGKLKPKSKjKK‰KžKKlKhKnKjKaKXKDKAKLKWK?KfK˜KwKPK8K'K)K%K-K,K(K2KRKfKcK8KOKFK5KAKnKPKKQKLK]K_KFK@KK;K6K?K:K7K6K6K=KFKPKQKYK_K^K`KkKxKK‹K’K—KšKœK KŸK©K©K­K«K¯K°K´K±K²K³K·K´K¶K´K¶K·K¸K²K°K®K§K¨K¬KµK´KšK‹KyKiKRKAK>K@KEKEKPKOKZKtKŽK¢KˆKoKiKfKjKmKmKOK8K=K@KTKFK|K•KbKGK=K3K4K4K/K0K)K-KSKjK_K:KEKcKJK3KiK`K?KbK¨KªKˆKpK{K…KYK5K8KKKUKZKWK@K@KWK}KƒKrKYKjKsKSK_KLKgKdKcKkK‹KšK—K“K„K™KK‰KxKoK|K°KÖKÔK¾K\K_KuK~KŠK­K”KÊK»K—KyK4K3K-K5K1K/K.K-K1K5K5K8K7KKDK@KIK;K1K0K.K7KJKTKXK;KNKUKTKIK9KCKHKNK[K^KcKhKmKkKoKoKvKqKsKrKvKqKyKxKxK{KK~K€K}KƒK„K‚K~K„K‡KƒKŠKŒKŽKŽK‹K‘K’K”K’KKKŒKŒK‰K…K…K…K†K~K}KzK{KzKzKKK„K‚K„KK}K~KKŒK‹KK‘KKŽK•K—K–K“K“KK‘KŽKKK”K‰K‡KˆKŽKŽK•K”K˜K™KKŠK‰KƒKiK=K/K)K0K2K/K0K-K-K-K2K=K0K/K4K1K3K/K/K5K6K+K8KEK4K/K/K.K9KCKQKmKFK=KHKGKaKUKGKBK?KDKHK=K8KTK^K:K-K1K-K6K3KKK`K“K¥KžK›K—K™KKKŸK›K’K–K—K”K–K›K™KžK›KšKœKK›KšK›K—KšK—KœK˜KšK›K—K›K˜K˜KšK˜K•K–K™K–K”K•K—K“K˜K•K–K“K”K’K’K‘K•K’K‘K’KKKKK’KK’K“KŒKŠKˆK‰KƒKK~KzK~K‹KKªK¼KÈKÎKÓKÕKÕK×K×K×K×KÕKÕKÖKÔKÕKÓKÓKÓKÑKÔKÓKÖKÖKÔKÕKÖKÖKÕKÕKÕKÖKÖK×KØK×K×K×KÖKÔKÓKÒKÑKÒKÑKÐKÑKÏKÎKÍKÏKÐKÍKÎKÎKÍKÍKÍKÎe]rk(KK2K.K:KEK7K2K7K-K9KEKUKfKBK>KJKEKaKSKJKOKEKEK@K;K8K_K^K2K-K9K.K.K/KFKkK’K¦KKŸK“KšK¡KŽKŸKœK“K•KšK•KšKšKšKKžKšKœKšK›K˜K”K™K›K–KœK›K–K™K›K–K›K–KœK˜KšK”K”KšK–K’K–K”K—K™K”K”K’K’K’KK“K•K‘K’KK’K“K‘KKŒK‘KKŒK‡KƒKƒK…KK{KyK{KŽK K«K¿KÉKÏKÔKÖK×KØK×K×KÖKÕKÔKÕKÓKÔKÔKÑKÓKÕKÔKÖKÖK×KØKÖKÖKÖKÖKÖKÖK×KÖKÖKØK×KØKÖKÕKÔKÒKÑKÒKÒKÐKÏKÏKÍKÍKÍKÍKÍKËKËKÌKÍKËKÎKÍe]rl(K4K4K.K1K3K:K8K7K6K7K;K9KK9KGKFK@KFKK:KXK…KˆKmKaKŠKƒKK}K‚KfKaK{K˜K²K¬K}KKŽKuKrKK©KÄKÈKK‡K•K¨K±K›KÑKÇKKYK=K7K5K7K8K9K6K6K6K2K7KK>K7K7KLK:K6K4K4K6K9K5K:K9K3K0KKVKiK@K/K4K,K3K+K:KeK‰K¦K£K KK–K£KK KšK•K—K›K•K›K—KžKœKšKšKKšKšKšK›KšK—K˜KKšKŸKœK˜KœK™K—K–K—K–K˜K•K˜K˜K˜K•K“K“K•K”K–K”KK’KK”KKKŽKKK‰KKŠKŠKKŠKˆK‚KƒKƒK†K{KzKsK|KŒK¡K±KÂKÇKÏKÕKØK×KÚKÚKØK×KÖKÖKÓKÓKÕKÔKÕKÕKÔKÖKÔKÕKÖKØK×KÚKØKÙKØKÖK×KÖKÔKÕKÓKÔKÐKÓKÏKÏKÍKÎKÌKÎKÍKÍKËKÎKÊKÌKÎKÎKÐKÌKÍKÍKÎKÎe]rn(KAKAK3K3K-K0K3K1K6K7K?K?K8K:KK9K6K4K6K8K6K:K3K5K8K;K7KK=K>KDK7K6K>KGKDK;K7K=KFKCKCK:K:K2K5K=KDKOKGK:KYKUKRKCK7KGKIKJKHKTKWKaKeKlKkKiKjKhKlKpKsKpKwKuKuKuKwKyK{K~KƒKƒKK€K‚KˆK‰K„K‡K†K‹KK†KKKKŽKŒK‹KŒKŠK…K‡K…K…KK‡KƒKK‡K…K€KKƒKzKtKuKwKsKuKwKyKvKwK}K{K€KxKvKuK{KrKzK€K‡K—KœK–K›K”K‘KKKKŒKKoK:KUKBK6K-K)K,K.K+K'K'K*K0K4K.K*K1K9K9K7K;K:K2K2K5K7K;K:K2K/K0K?KQK`KYKFKIKUK@K\KTK?KQKEK?K6K8KEKPK_K>K0K0K/K/K5K=KiK‰KŸK§KœK‹KšKžK‘KœK›K”K–K˜K—KžK—K›KœK˜K—K›K˜KK˜K˜K˜K™KšK™K›K—KœK›K›KšK˜K–K™K›K›K˜K™K—K•K–K”K˜K•K”K•K”K—K‘K‘K’K”KKŽKKKKŽKK‹K‰K‹K‡K‰KƒKK‚KyKvKuK}KŽKœK²KÄKËKÐK×KØKÙKØKÛKØKÖKÖKÖKÕKÔKÕKÔKÕKÕKÕKÖKÖKØK×KÚKÙKÚKØK×KÙK×K×KÓKÓKÓKÓKÑKÐKÏKÎKÏKÍKÎKÍKÍKÍKÍKÎKÍKÌKÍKÏKÎKÐKÏKÐKÐKÏKÐe]ro(K9K9K3K3K4K5K6K2K0K2K;K4K8K5K:K2KKDKKK2KDK`KŽK™KˆK|K_KXKyKKsK5KxK‘K„KiKŽK“KyK{K’K·KÒKÁK§KK³KžK©KÜKÂKvK:K4K1K4K6K-K/K3K5K9K;K8K6K4K2K7K4K8K0K-K.K.K(K3K5K;K0K0K4K0K,K8K=KK?KDK7K5K0K5K9K?KRKOK9KLKRKJK=K:K@KAKGKGKTKSKXKcKeKlKlKhKfKfKnKmKnKpKxKuKvKzKvK{K{KKK€K‚KK‡K…KƒK‡KŒK‰K‡K‹K‹KKKŒK‹K‹KKŒK‹KˆK†K‡K„K…K‚K‚K‚KƒKKK~K}KK}KKuKuKtKuKsKwKwKoKtKsKrKwKwK|K…KŠK”K K—K–K•KKK‘K”KK‰K|KEK6KYK=K)K,K1K-K(K&K+K.K/K/K0K,K0K7KKEKKKdK‡K£K}KeK[KdKmKnKkKfKEK2K*K5K>K=KYKrK–KuKAK/K.K/K5K/K.K'KHKdK^K@K;K6K1K)K-K?KPKaK†K»K®K‰K”KgKCK=K[KKKtKtKwK€KoKGKLKLKTK8KLKyK‹K„KŽK„KzKkK`KuKfK:KnKŠK{KVKzKŽKuK{K£KÃKÏK®K‡KšK³KœKËKÈKŒK=K0K/K.K0K0K1K,K0K5K0K0K5K3K6K:KK8KAK?K8K3K5K3K6K?KLKJK=KIKYKRK@K9K;KGKKKNKPKQKUK]KbKiKiKhKiKiKnKjKjKjKtKvKwKvKvKK|KKK„K‚KKƒK„K†K„KKŠKŠKŽKŠK’KKKKŒKˆK‰K„K‡K‡KˆK…KKK„K…KKK‚K†K…KK…K€K~KyKxKuKvKzKuK|K|K~K}KKˆKK•K™KœKšK›K˜KKŒK‰KŒKK‰K‰KaK1K9KVK;K+K+K-K-K3K(K.K,K,K-K6K1K4K8K4K3KLKCK7K4K?K7K3K9K6KAK)K.K:KJKtK[K>KHKMKCK^KfKKKHKEK@K=K@K?KNK[KCK.K=KHK7K7KK8K5K3K8KBK[K}K‹K[KEK.K,K0K)K,K&K4KOKcKYK?KBK>K3K3K+K0K=KdK˜K»K–K}KœK{K\K;KWKƒKšK}KrK€KKpKUKZKXK]KTKkK‚KnKvK€KˆK€K~KeKbKXKNKmK…K|KRKfKˆKmKuK¦KËKÑKœKƒK¶K›KÎKÇKŠKCK4K,K3K5K1K8KK9K1K,K3K,K6K8K7K/K)K2K0K6K0K2K7K8K7K;K5K4K9K6K;K=K6K9K@K=K6K:K/K6K7KFKDK7KCKOKUKBKAK3KHKEKOKSKNKUK[KZKZK`KeKcKfKoKgKlKiKqKrKsKuKzKyK~K~K€KK€KƒK‡K‚K…K‡K‹K‰K‹KˆK‰KŠKKK†KŒKK‰K‡K‡KŒK„KˆK†K…K…KŠKƒKK…K†K…KˆKK„K‰K‡KKK€K†K…KƒKŠKŒKŒK”K‘K™K”K™K•K–KšK“K‡K‹KŒK’KŽKˆKKAK.KKK@KBK:K>KCKEKBK;K1K6K6KCKCK6KAKOKSKGK>K7K>KGKUKVKNKXK[K]KXKcKeKiKgKlKnKlKoKqKrKyKwK{K|KK‚KK}K€KƒK…K‰KˆK†KˆKˆKŒKŽK‡KŒK‹K†KŒK‹K‹K‡K†KK†K†KŒKˆKˆK‹K‰KƒK…K‡KˆKKŽKKŒK‘KKK‘K’K—KŽK”K‘K”K•K•KšKšK™K–KžKK•K‹K‹KK‘KŒKKˆK^K-K/K?KPK6K-K0K-K+K+K,K3K/K-K.K=K8K1K:K4K5K@KEK6K4K5K1K1K,K1K=K)K-K6KIKqK[KEKMKKKFK]K`KJKAKHK=K=K>K9KMK_KQK/K2K/K5K2K1K\KˆK©K¨K¥KšK’K¥KœK™K”K“K’KšKšK—K˜KœKKK™K™KšKœK™K—K™K˜K˜KœKšKK™KšKK˜K˜KœKœKœKšKšKšK–K˜K˜K•K–K—K’K•K’K•KK“K‘KKKŒKKKKŒKKŽKK‰K‹K„KKK}K|K{KuKxK†KK°KÂKÎKÓKØKÚKÚKÜKÚKÚKÛK×KÖKÖKØK×KÕK×KØKÚK×K×KÖK×KØKÙK×KÖK×KÕKÕKÓKÓKÐKÐKÒKÏKÐKÒKÐKÑKÒKÐKÑKÏKÑKÒKÑKÓKÒKÔKÓKÔKÔKÔKÓKÓKÓKÔe]rs(K>K>K3K/K6K5K2K5K1K4K1K/K5K4K0K3K.K5K@KTKfKbKiKbKjK_KgKmKvK}KƒK‡KŽKK“K—KœK£K£K«K­K±K²K¯K´K²K²K²K±K²K³KµK·KµK²K°K¬K§K¥K›K”KK‚K~KoKZKVKGK=KCK@KJKbK†K–KyKeKWKaKcKWKXKUKMKIK:K;KCKEKKKrKfK6K;K2K5K2K+K#K*K7KQK`KSK7K4KAKEK6K/K1KAK~K¢K¹K†KK‹KwK‘KsKMKIK‰KœK‡K…KzKkKiKaKvK†K€K\KPKRKQK€K~KuKŠKrKGK7KK1K*K*K+K'K1K2K-K2K:K4K3K=K6K7K8K=K7K1K4K8K8K0K3K8K,K+K:KOKsK^KGKSKPKJK^KYKHKHKHKBKBKBKDKNKWKOK4K2K3K9K6K.KYK†K£KªKŸKœK’K¢KœK—KKK“K K›K˜K˜KœK›KŸKœK›K›KšK™K—K›K˜KšK›KšKšK˜KœKšK•K›KK•K™K—K˜K–K—K˜K–K‘K•K”K–K“K“K•K’K”KKK•K‘K‹KKKK‹K‹K‹K‰K…K…K€K~K|KxKzKsKuK‚KKµKÆKÍKØKÙKÚKÙKÜKÚKÚKÚKÙKÕKÕKÖK×KÕKÙKÙKÙKÙK×K×KØKØKÙKÕKÒKÔKÔKÒKÑKÐKÒKÒKÐKÐKÑKÒKÓKÓKÔKÓKÒKÐKÓKÔKÑKÒKÒKÓKÒKÓKÔKÓKÔKÑKÓKÑe]rt(K5K5K4K2K/K2K7K1K/K1K2K0K0K1K1K0K+K3KGKWKdKiKlKhKeKbKhKoKrKK†K‡KˆK’KK—K›K¡K£K¬K­K­K®K±K³K®K´K®K²K±K²K¶K±K´K´K«K«K¦K¤K¢K—KK‡KyKnK^KOKGK@KJKDKKKZKyKK‚KmK\KdKaKYK`KYKJKBK;KGKPKJKMKeKWK-K7K7K-K-K.K'K*K5KVKbKJK3K8K@KGK5K9KCKBKiKK¼K‡KK‰K^KvK‹KvKSKkK¡KšK„KKgKmKfK~K‚KlKBKGKWKVK|KƒKuK“KzKAK7K8K^K…K}KfKrKŒK|KƒK­KÖKÜKÍK¶KÖKÂK~KPK8K,K)K)K-K-K5K9K5K0K1K5K0K,K.K0K>K9K5K/K;K;K3K2K&K2K3KBK3K/K/K3K2K7K2K1K2K0K5K8K:KK9KK:K4K;KDKJKGKFKOKTKZK\KXKaKjKgKgKlKmKqKoKtKsKxK~K|KK€KyK‚K„K‰K‡K‡K„K‹KŽK‡K‡K‡K‡KŠKŠK‹KK‰KŠKKK‹KŠKŠKˆKKKKŽKKK”K™K˜K—K˜KšK¡KŸK¡K¦K¡K¢KžKKK›KžKKKžKœK˜K•K“KKK‘KŽKK‰KZK2K,K3KTKDK4K2K"K(K+K+K0K4K.K0K3K5K6K1KCK9K7K9K>K@K2K2K5K7K+K5K>K+K1K;KRKtKaKEKLKCKBK_KXKTKLKMKBKFKFKBKPKUKMK9K4K0K;K8K(KRK‹K¤K¬K§K¢K–K¡K¡K—KKŽK˜KœKK™K›KžK›K›KKKKœKK™KšK•K—KœKšK˜K™KšKœK™K•K™K•KšK—K™KšK˜KšK“K“K—K–K–K‘K“K–K•K’KK•K‘K‘KKŒKŽKK‘K‰KŽK…K„K„KK|K~K|KsKvKvKKžK¸KÄKÎKÓKÙKÚKÛKÚKÛKÚKÙKØKØKØK×KØKÕK×KÙK×K×KÖKÖKÕKÖKÚKÔKÔKÔKÓKÔKÑKÒKÑKÑKÑKÓKÓKÕKÔKÓKÔKÔKÔKÓKÒKÓKÓKÒKÒKÒKÒKÓKÒKÒKÓKÑKÑKÑe]ru(K4K4K0K7K3K7K1K2K0K3K/K/K.K)K-K/K2K7KNKZKfKiKgKiKgKfKhKkK|K}KƒK‰KŒK‘KK˜K™K¢K¦K¬K¯K­K°K±K°K±K°K²K²K²K±K³K¸K³K²K®K«K¥K KžK•KŒK…K~KqK`KUKJK>KBKHKFKUKpKK}KrKiKqKbKZK_KVKGKAKDKWKZKPKEKdKDK5K?K:K.K.K)K)K)K>KZKZKLK-K2K=KDK=K8K,K3KNK”K³K‚KzKK]KaKwK‡KkK[K‘K¢KˆK{KmKoKtKsKqKZKHKMKbK\K\KpKvK‘KƒKVKPKHKjK„KqKIKvK‚KvKŒKÃKÚKàKÈKËKÐK›KIK8K6K2K.K1K/K4K4K4K6K-K2K0K0K2K6K5K7K>K:K:K@K9K:K-K'K.K0K8K9K2K/K6K8K;K2K-K0K6K3K3KK;K;K6K;K>K:KDK?K@K:K,K.K2K8K>K?K7K=KK:K5K9KGKFKDK;KLKTKZKWKYK^KdKhKhKgKjKmKnKnKpKuK{K~K€K~K|K|K„K‚K„KˆK…KŠKˆKŠK‡KŒKŠK‹K‰KˆKKKKŠK‹KŽK‘KŽKK‘K•K’KK”K‘K•K—K™KžKŸK¤K¥K¥K©KªK§K¤K KžKŸKK›KKŸK›K›K—K˜K’K“K“K”K‘K‹KK;K-K+K3KNKDK/K-K.K+K1K,K.K3K0K2K4K1K1K6KLK6K8K:K?KAK1K1K?K8K+K3K8K*K2K;KXKwK`KDKLKPKHKbKQKMKJKOKGKEKKKTKSKOKFKAK0K1K@KK8K2K3KGKŠK¥KlKmKuKcKtKfKxKyK\KuKšK’K‚KjKpKyKiKfK_KNKOKXKTKVKaK„KzKƒK`KcKcKtK†KiK3KgKƒKbKvK¥KÏKÚKÏKÑK¡KRK5K1K3K.K1K3K5K5K5K;KKK7K7KAKŠK§KhKdKhKVKrKxKrKrKaKiK…K™K„KuKpKyKlKcKeK[KWKOKHKLKUK‹K~KKsKpK€KK…KgKK/K6K1K6K9K8K/K,K-K/K-K0K7K=K9K9K5K8K8KDKCK;K7K1K1K0K7K:KBKBK;K;KAKFK6K2K3K=KIKNKCKKKK:K;K:K7K-KIKŒK©K¯K¤K K—K¥K¢K‘K‰K›K›K—K—K¡KžK KšKœKœK™KšKœKžK›K™K–KœKœKšK—K–K—K™K›K•K—K–K–K–K™KšK˜K™K“K‘K”K“K–K—K•K”KK‘K•KKKKK‡KŽKŽK‹KŒK‹KŠK‰KK‚K~K{KyKvKpKvKˆK§K¾KÇKÒK×KÚKÛKÚK×KÙKÚKÛKØKØKÖKÕKÔKÕKÕKÕKÕKÕKÕKÒKÔKÕKÔKÕKÖKÔKÔKÔKÔKÓKÖKÔKÔKÕKÔKÔKÖKÓKÒKÓKÑKÎKÏKÏKÐKÐKÎKÏKÐKÏKÐKÏKÏKÌKÎKÍe]rx(K-K-K1K1K2K.K-K5K9K1K7K/K1K3K3K0K6KKGKMKRK]KhKnKlKeKcKlKdKHKIKOKEKXK]K_K`K`KVK;K5K>K4K.K)K'K(K/KIKdK[KAK+K*K.K3KK0K,K,K1K1K1K0K8K7K9K8K7K:K:KEKKMK4K-K)K0K)K-K4K,K/K1K;K6K;K@K;KK=K;K1K0K;KAKGK>K0K-K4K4K/KEK\KqKeKEKPKRKNKiKTKYKLK?KEKDKLKKK[KMK3KMKBK?K7KKFKUKKKRKUKaKbKbK^KhKmKnKqKvKvK{K‚K†K†K‡KƒKˆKK…K…KKŠKˆK‰KŒKK’KKK‹K“K•K•K“KKKŸK¥K£K¢KœKœKŸK K¡K§KªK®K±KªK¨K¡K¨K©K¯K¤K¦K¥K£K£K¢KK•K•K’KšK—K’K‹K`K4K1K-K-KLKPK1K1K)K0K.K.K1K4K+K3K=K4KK7KJKaKsK^K8K.KAK7K5K5K&K&K.K@KdKTK@K,K'K/K:K7K?KLKPKOKnK›KWKmKnKGK6KCKEKFK^K{K„K{KnK…K‡KK|KxKKwKuKjKUKFKPKuK¢K‰KcK`K€KK¡KmK.K;K`K[K}K|K†KÄK¾KUK2K6K[KAK1K,K2K5K4K6K4K5K:K1K.K8K8K0K7K7K`KAK>K;K;K7K/K(K&K1K7K8K=K3K2K/K0K3K?K5K3K0K.K0K4K3K0K2K;K5K;K?K7KK=K@K4K-K0K5KKK7K/K:K0K0K7K7KMKbK}KkKOKNKKKJKqKJKVKOK@KPKMKNKMKSKAK3KIKYK>K1K7K,K>K‰KªK®K§KžKšKžK¡K‘KK•K—K˜KK£KšK¢K›KœK™KšK™K›K’K—KœK—KšK˜K˜K–K“K–K”K”K˜K•K•K”K–K—K™K™K™K”K‘K•K’K“K‘K”K“KK‹KŽK’KKŠKŽKKŒK‹K‡KŠKŒKƒK…K~K€K~K{KtKsKuK€KšK¸KÊKÐK×KÚKÜKÝKÛKÚKÜKÙKØKÕKÒKÕKÓKÔKÔKÓKÔKÔKÕKÖKÕKÖK×KÕK×KÖK×KÖK×KÕKÔKÔKÒKÒKÏKÑKÎKÏKÍKÍKÍKÍKÎKÎKÍKÏKÎKÌKÏKÐKÌKÍKÍKÍKÉKÇKÈe]r|(K/K/K.K-K1K9K2K3K-K-K,K-K,K3K5K,K6KDKUKUKWK\K`K_K`K`KfKoKzK}K„KˆK‰KŒK•K›K›K£KªK©K®K±K±K¯K°K°K¯K¬K°K°K°K³KµK´K±K²K«K¨K¤KœK“KŽK…KKpK_KTKIKGKEKHKQKTKZKaKcK`KeKVKKKIKK5K-K0K3K@KZKUKDK,K2K6K>K7K-K;KQK[KK™KVKbKeKFK9K3K9K7K5KJKUKlK…K‹K‰KŒKˆKjKpK€K‚K‚KxKiKdKdKšK”K}KwK|KK˜KƒKnK^KMKHKyK]KBKVKQK:K@KkKFK2K3K3K8K5K3K@K:K3K2K0K1K7K6K7K/K9KAK=KK:K0K3K8K5K.K2K>K7K2K.K2K.K6K2K4K5K;K:K4KCK;K7K7K8K8K9K:K7K;K;K;K6K:KCK@KBK6K5KK3K7K?K>KIK8K4K2K*K1K5K?K8K+K0K2K5K7K6KDK[KzKiKRKOKSKMKsKMKMKMK@KPKXKIKRKVKFK/KJKWKK8K1K1K0KK.K.K)KKKEKQKSKVKXK]KlKmKqKxKzKzK{K|K~K‚K€K†K‡K†K‹KˆKˆKŠKK•K‘KK’KK—K–K˜K–K—K›K¨K¥K¤K¡K¥K¨K¦K¨K¬K¬KªKªK¥K¦K¨KªK¨KªK©K©K¥KœKK‘K’K’K—KŠK„KpKRK,K%K+K2KEK@K1K5K6K2K9K;K:K6K5K4K5K7K9K:K;KBKLK8K4K2K1K7K8K1K7K2K4K9K.K;K?KMKhKuKhKGKJKPKMKoKDKMKFKAKPKWKOKFKMKKK7KFKXKGK4KK9K4K8K@KLKDKOKKK;K3K0K-K*K)K)K0K7K?K@KFKRKQK\KcKjKoKuKqKuKvKtKvK}K~K}K…KƒKƒK‚KŠKŠKKŽK“K’K”K“K—K“K—K”KšKœKŸKŸK¤K¤K©K¦K©K¬K¬K±K¨K«K¨K¥K¥K¨K¦K¦K¦K¡K¤K›KK’K‘K“K–KŒK‚KnKRK-K(K+K/K?K9K3K3K8KK=KEKIK:K1KJKHKBKCK2K-K.K8K4K.K)K(KKKˆK˜K„KxK‹KtKMK9K.KNKSKKKQKfK\KaKzKYKcK|KxK{KŠK”K—K‡K…KK‰KoKrK…KŽKŒK™K¥KƒKQKBKHKVK]KUK]KOK?K5K6K4KKDKCKFKMK@K2K3K-K*K-K7KEK>K/K/K2K4K:K5K5K3K9K3K.K.K/K7K3K3K0K.K2K7KKCK:K4K9KK8K4K5K4K.K/K3K=K7K?KOKPKWKYKbKdKfKcKkKpKrKyK|K~K|KK„KƒK€K†K‹KŠK‰KKK‘K“K’K“K’K˜K–KœKKK›K¥K§K¦K¨K¤K¨K«K­K¥K¡K¤K£K¥K¦K¥KžKŸKK™KšK’K“K‘KKŠK}KnKXK/K,K1K4KBK9K5K4K9K;KK9KEK]KNKJKRKWK\KxKmK]KKnK\KlK…K™K•KšK™KzKyKrKyK‹K–KŸKµK¢K]KFKNKlKoKkKyKZK9K3K3K0K7K0K2K3K2K6K1K1K5K2K8K6K3K9K8K9K=KBKCKHKQKAK8K8K0K1K8K:KIK?K7K/K3K5K>K3K:K9KK=KBKAK9K9K5K8K9K=K:K?K9K9K8K3KK@KOKIKJKLK\KVK]KXK[K`KkKpKvKpKtKzKuKrK€KK€KƒK…K…K‹K‘K‹K‘K”K”K–KK˜K—K™KœK¨K¤K¨K¦K§KªK¨K£K¡KŸKŸKžKžK£K™KŸK¡K˜K“KKŽKKŒKŠK|KjKUK1K,K+K5K@K;K=K@K:K6KK;K6K@KTKIK9K:K4K;KHK3K7KKTKbKoKgKJKXKUKRKbKNKZKLKJKOKaKQKVKTKJK?KSK\KGK?K;K1KPK‰K­K«KžK›K™KžK›K’KŠK‡KƒK‚K‰KˆKŒK‘KŒK‘K‘KK‘KK’KK”K›K“K–K’K˜K˜K•K–K–K“K˜K“K•K”K–K”K•K•K˜K‘K•K‘K‘KŽKKKKŽKŒKŠKˆK‰K‰KŒKˆK„K‹K‡K‡K‡KK~K}KzKwKsKtK€KžK¸KÅKÏK×KÚKÜKÞKÛKÚKÙKØKØKÕKÔKÔKÒKÕKÕKÕKÕKÓKÔKÕK×KÕKÖKÖKÖKÖKÓKÓKÓKÓKÑKÎKÐKÎKÏKÎKÍKÍKËKÊKÇKÆKÄK¿KºK´K§K¡K”K‚KtKaKNKBK9K3K1K0e]r‚(K-K-K,K6K/K/K1K2K5K2K0K,K*K+K4K7K1K1K9K=KLKUK_KaKeKiKkKqKzK‚K€KŒKŒK–KšKœKŸK¥K¦KªK«K®K­K­K®K®K«K¯K²K¯K°K±K°K³K°K­K­K¨K¦K›K’KK‚KxKiKZKQKDK6K7KAKIKdKvKKbKWKFKXKeK2K-K+K+KCKPK`KlKLK>KMKNK>K2K9KAKFK;K>KAKUK>K9KEK2K)K+K7K6K;K$K!K0KyK¤K‰KxKŠKƒKWKAK8K@KQKMKGKEKAKVKkKyKhKKqK[K]KeK…K˜K…KrKpK{KKzKŒK—K›K£K¥KvKsKzKˆKwKrKKoKBK2K2K0K.K0K2K6KK4K8K2K6K3K1K:K4K8K8K4K0K3K3K7K/K*K/K2K,K8K3K6K8K?K;KK;KBKFKFKGKEKDKAKFKMKTKZK^K^KfKoKrKwKqKyK{KyKƒK…KƒK‡KŒKKK“K˜K—K™K—K‘KœKŸK¢K¦K¦K¤K¤K§K¢KŸK—K›K™KšKšKšKžK–K“KKŒKK‰KˆK‡KvKaKAK+K-K.K/K9K9K2K6K8K9K6K3K7K3K;K8K7K7K1K9K6KKK@K4K3K4K7K:K4K6K/K3K8K6K@K>K@KUKaKrKfKOKWKUKLKbKMK[KHKNKUK`KRK\KZKIK8KMK\KHKK7K9KKKBKK;K>K9K@KHKEKAKBKBKAK4KK>KLKHKGKNKMKGK=K=K:K>KDKKK=K1KYKK±K°K–KšK¥K•K’KˆK€K}KxKwKxK}K‚K„K…KƒKˆK‡KŠKŒKKK”K’KKK•K’K•K—K•K“K˜K•K—K”KšK•K”K“K”K”K”KKK’K‘KKKŽKKŒK’KKK‰KŠK‡KƒKƒK‰K‡KK‚K|KxKwKqKuK|KK¯KÀKËKÓK×KÜKÜKÜKÛKØKØK×KÖKÕKÑKÓKÕKÕKÖKÑKÔKÓKÔKÔKÔKÓKÔKÔKÐKÒKÑKÑKÏKÏKÑKÏKÏKÌKÊKÈKÄKÁK¾K»KµK±K¦K›KˆKKfKJK:K4K-K+K+K+K.K.K+K+e]r„(K/K/K0K,K-K-K1K0K+K/K*K,K/K*K5K*K)K2K?K@KBKTK^K_K_K^K[KdKtKzKƒK‰K‹K”K–KŸKŸKŸK¥K¬K¬KªK®KªK­K¬K¬K¯K­K°K°K±K¶K´K±K®K¨K¢K K›K’K‹KKpKeKYKCK9KAKJKlK‘K®KÈKŒK^KeKsK KwK0K-K*K)K1KFKQKUKHK=K;KK?KAK;KBKAKEKDKHKNKRK^K\KZK`K`KbKgK]KfKmKrKxKyKtKwKKK‚K‚K…K…K†KŠK“K”KK“K–K–K”KœKžK¢K¦K¡K¡K¡K—K–K–K‘K˜K˜K‘K’K‹K‡KK†K‚K‚KŠKKiK\K@K9K3K4K8K5K5K6K;K7K8K7K5K9K2K?K7K5K8K:K6K8KMK>K3K7K:K7K.K-K4K4K.K/K9KAK?K>KSK\KqKmKQKTKWKTKbKSKXKLKVKIKYKPKXK^KGKAKEKKKRKCKMK4KUKŽK³K±K”KœK£K”K“K‡K|KKqKpKwKxKyKzK‚K†K}K„KˆK„K†KŠK‡K‰KŽKKŠKKŒK“K•K“K‘KK“K’K•K’K˜K–K”K’K“K•K’K‘K•K‘K‘KK‹KŠKKŽK‰K†K…K…KK„K‰K‚KzK{K{KwKuKsKvKyK•K³KÃKÍKÒKÖKÜKÜKÛKÙK×KÕKÕKÓKÔKÓKÒKÔKÔKÕKÒKÓKÓKÔKÓKÔKÔKÑKÒKÓKÓKÑKÒKÏKÐKÏKËKÉKÉKÆKÅK¾K¸K±K¨K K™K‡KsK_KKKK3K0K/K)K,K,K1K(K)K'K*K.K1K?KFKLKXKcKaKYK]KdKlK{K€K†K‰K’K˜KžKK£K¦K¬K¬K¯K¯K®K­K«K­K¯K®K¯K­K²K²KµK²K­K«K¡K KšKK‡KzKrKaKTKEK=KLKvKœK»KÓK¡KZKfK[K‰K¦KfK2K'K+K.K/K?KSKNKZKAK4K6KK=K?K>K9K8K9K>K;K;K@KAK5K.K6K6KKFKIKWK^K`K\KaKfKdKbKkKmKnKrKsKyKuKxKyKuKwKwKyKyKƒK†KƒKzK}KŠKKŒK‹KŠKšK›K™K—K KžKKžK¢KŸK¥K©K¨K¦K¤K¢K£K£K K§K¢K›K–KKŒK…K‰K†K‰K‰K~KkK`KAK2K1K0K8K3K9K;K8KBK8K?KCK3K:K7KAK;K8K-K7K;KPKJK9K:K:K;K0K4K>K8K2K.K8KDK7KAKOK^KsKgKMKWK^KZK`KVKUKGKSKNKQKRKYKWKKK;KLKIKSK=KGK5KYKKµK±K’KŸKŸK“K†K{KqK~KrKnKsKpKqKtK{K€KKKKƒKƒK…K‹K…K…K‡K‚K‡KŠKˆKŽKK“KKŒK“KK’K’K˜K•K—KKKŒK‘KŽK‘KKŒKŽKŒK‹KŠKˆK†KŠK‡K‡K…KƒKK€KK|KvKsKuKtKK KµKÆKÏKÓKÙKÝKÛKÙKÙK×KÔKÔKÓKÒKÒKÓKÔKÑKÔKÒKÒKÔKÒKÒKÑKÒKÐKÒKÐKÒKÐKÑKÍKÎKÌKÈKÄKÂKÁK¼K¶K­K¡K™K‡KuK_KIK@K7K-K.K5K/K%K(K&K/K.K1K/K3e]r†(K/K/K.K/K1K0K,K0K(K(K)K(K,K*K*K&K'K/K3K@KIKKKWK]K]KXK^K^KhKuK~K…KŽKŽK—K›K¡K¤K¦K¨K­K«K¯K±K«K©K«K±K­K®K°K³K°K­K¬K¯K¨KžK K›KK‡KyKmK\KTKIKVKyK¤KÊKÕKŸKRKLKKKGKƒK™KcK5K-K(K&K'K:KKK_KZKCK2K1K6KFKDK8K7KK9K@K?K8KK;K7K5K6K7KK?K5K2K8K;K6K5K5K9K8K5K7K;K=KAK6K7K5K5K0K1K9K8K5K/K8KJK:K:KSK\KlKmKRKSKPKWKjKVKTKLKJKLKZKVK\KZKHK=KFKIKFKKKFK:KdK™KµK©KK¡KK†K€KsKfK{KvKlKhKmKqKsKqKtKwKuK|KzKuK|KxKKK{K~K‚KƒK†KƒKƒK„K‰K‡K‡K‡KŠK•K’K–K’K’KK•KKKKŽKŽKKŠKŠK‰KˆK‡KŠK†K†KK€K~K€K{KzKvKrKuKvKK®KÂKËKÔK×K×KÚKÙKØKÔKÓKÑKÑKÒKÐKÓKÐKÐKÐKÔKÑKÒKÐKÒKÐKÑKÑKÏKÍKÏKÎKÐKÌKÊKÇKÃK½K¹K²K°K£K™K‡KpK^KHK9K0K,K$K)K)K+K*K*K(K1K2K1Ke]rˆ(K2K2K;K>K7K3K4K9K/K-K)K'K+K&K(K-K,K)K4K?KDKIKNKVKQKLKMKWKhKyK€K†K‹KKšK™KŸK¡K¦K­KªK°K¬K®K®K­K¬K®K«K«K­K®K¯K±K±K°K§K¤KœK”KKƒKvKnKdKlK‡K­KÒKÛKœKIKYKWKBK?K]K²K‹KKK@K7KAK2K'K0KFK]K[KLKFKAKBKBKCK:KCKOK7K2K4K:K@KEK3K*K/K6K.K4K3K)K9KhK‘KzKwKyKpKK˜KzK`KKKLKYKOK=K9K3K>KGKUKeK[KKKRKoK‰K¤KšK€KaKJKUK|K˜KK¤KK‰KvKGK@K?K6K?KtK£K´K K¥K“KrKDK+K(K)K+K/K+K/K-K1K3K2K7K>KKKHKOKWKFK1K-K&K(K.K3KJKPK3K2K3K-K1K.K-K/K6K3K8K5K8K-K1K1K0K0K1K-K,K2K5K3K2K4K9K9K:KK2K4K5K*K'K3K8K3K0K4KIK>K;KNKXKqKmKRKSKPKWKcKQKYKMKQKSKUKRK^KbKMKJKJKQKHKGK=KDKeKžK²KœKK¤K™KKKsKeKyKvKsKiKiKmKpKkKmKqKqKzKyKsKyKwKyKzKqKwK{K{KzK}KK|K‚KK„K‡K„KŒKŠK‰KŽKKŒK’KŒKK‹KŽKŒKKKŠKˆKKˆKŽK‡K„KK~KK{K{KwKxKnKrK{K“K³KÁKÌKÔK×KÙKÙKÙK×KÑKÑKÐKÑKÑKÔKÑKÐKÏKÑKÖKÑKÑKÏKÏKÐKÏKÏKÏKÏKÎKÌKÍKÊKÇKÃK¿K»K°K¬K¨K›KŠKuKbKPK:K/K-K+K1K.K,K)K:K%K+K,K3K>KFKEKBKEe]r‰(K9K9K4K6K7K:K;K5K.K-K0K)K+K+K(K*K+K)K3K=KDKFKKKRKOKLKJKRKbKuKK‡KŠKK”K™KK¡K§K«KªK¯K­K®K­K®K®K®K®KªK®K«K®K°K®K¬K©K¤KK’KŠK…KvKtKqK‘K±KÙKÕK—KMKDKvKfKIKK:K?K;KJKRKGK8K2K5K4KRKNK2K/K)K1K1K0K/K/KBKpKK€KwKKhK~K˜KK~KhKKKOKRKK9K9K?KMKSK>K7K9KKlK‚KˆKyK‚KqKbK‰K”KŽKƒK[KHKFK?K+K)K,K;KPKwK~KeKnK‘K›K˜KqKkKUKQK_KqK~KK˜K˜KxKiK„K‚K]KJK;KAKVK©K¬K³K¯KžK—KuK?K,K0K/K1K.K4K.K7K4K:KDKDKOKVKRK@K7K6K*K3K+K;KVK_K2K.K0K1K4K5K2K7K1K2K9K:K8K2K.K.K0K.K2K1K2K6K0K3K1K5K9K:K9K9K=K5K2K3K9KEK;K5K9K-K0K5K-K-K@KFKJKRKQKUK`KfKjKpKrKwK}K‚KƒKKKK…K{KyK€KK‚K„K…KƒK†K‡K†K„KˆK„KˆKˆKˆKŽKŽKKŽK“K•K•K˜K”K–KšK›K˜K›K–K•K™K–K“K–K“K•K”K–K˜K˜KKŸK K¨KªK®K´K³K¹KÁKÁKÁKÅKÇKÇKÈKÇKÇKÉKÄK·K¡KˆKjKKK=K:K3K:K6K:K3K4K-K6K3K9K7K4K.K3K/K(K*K.K1K,K6K/K5KSK@KOKXKiKrKRKLKTKUKWKNKdKGKRKMKFKOKZK]KIKJKMKPKAKHKHK@KoK£K²K˜KšK K’K~KzKsKiKzKnK{KyKoKfKpKmKqKtKoKmKvKrKoKrKqKsKsKsKuKxKpKtKvKuKtKtKvK{K{K{K‚K~KK~K„KƒK…K‡KˆK‡K‰K‡K‰KŠK…K…KŒK†K„K„K~K€KK}KzKvKtKpKtK~KœKµKÇKÏKÓKÕKØKÙKÖKÔKÐKÍKÏKÐKÏKÏKÐKÐKÒKÓKÐKÑKÐKÑKÑKÐKÐKÐKËKÌKÌKËKÈKÉKÁKÁK¹K²K¬K¢K”K†KqK\KEK7K0K1K'K$K&K)K)K.K2K:KK3K0K.K4K-K@KGKRK8K4K3K.K2K5K:K,K0K1K1K5K7K7K-K/K0K4K3K1K+K/K2K4K4K7K5K7KKAK@K:K;K;K@K8K1K8K4K6K9K3K6KK@K;K5K7K;K5K7K6K=K:K6K/K2K-K4K1K=K5KKUKPKSKZKgKrKWKHKQKQKXKUKcKMKTKRKLKQK_K]KJKPKRKTKHKMK>KEKoKªK­K’KšK™KK~KzKzKuKKtKzK‚KKyKrKqKsKqKoKoKwKuKqKsKoKpKtKpKoKrKpKmKpKqKsKtKtKvKvKyKKyK{K€K€K~K€K‚K}K„KˆK‚K‡K„K‚K…K†K„K„K„K„K€K|K|KyKxKsKvKtKK£K¼KÊKÑKÓKÖKÖKÖKÔKÑKÍKÍKÏKÎKÎKÐKÎKÌKÑKÑKÑKÑKÑKÑKÑKÒKÑKÎKÉKÍKÊKÄKÃKÄK¿K¾K·K±K©KŸKKxK_KJK@K1K,K.K*K+K/K-K3K5K=K>KEKOKUKRKUKQKOKIe]rŒ(K@K@K?KFKIKBKAK:K9K4K3K4K2K2K.K&K*K'K-K7K;K@KJKJKFKBKKKMKYKjKvK|KƒK‹KŽK•K›K K£K¨KªKªK¬K®K­KªKªK«KªK®K©K¯K®K®K©K©K¦K¡KŸK–K‘K’K K»KÍK®KkKGK6K@KPKVKJK2K0KVK›KwK5K(K%K'K)K%K!K'K9KTK]KYKJKEKEK8K=K?KEK>K/K8K8K:KCKvK[K>KK>KAKDK9K8K7K8K2K5K>K6K0K2K5KKFKZKXKZKoKsK_KKKTKPK[KTK^KEKQKRKLKNK`K[KLKKKNKQKOKSKBKIKtK²KªK”K›K–KŠKKxK~KuK†K~K}K€KK{KuKtKyKwKqKsKsKsKsKlKpKpKpKoKrKnKoKjKoKlKmKiKoKlKnKuKoKrKwK{KxKxKyKwK|KzKyKyK}K‚KKKƒKƒKKK~K|K{K}K{KxKrKuKsKK£K¹KÆKÎKÑKØK×KÕKÓKÏKÎKÌKÍKÍKÎKÏKÏKÑKÒKÓKÐKÐKÏKÒKÑKÒKÒKÐKÊKÉKÆKÂK¿K½K¼K¸K±K­KžK“KwKgKKK:K.K-K'K/K/K.K2K7K9KKBKKKNKUKLKSKNKJKIe]r(KKKKKCKHK>K>KCK=KK5KBK…K©KoK2K)K.K.K-K(K(K,K4KKKfKXKNK9K3K2K6KIKIK3K8K8K9K:K@KnKjKNK>K;K5K9K7K0K6KIKDKKKiK€KKŠKuKYKaK€KKŽKuKTKSKTKVKUKaKpK†K‘KŸKœK„KUKMKCKHKhKnKVKSK\KeK{K‰K‰KnKGKJKFKcK`KRKsK˜K™KwKwKyKK²KKPK.K,K/K7K1K6K?K>KDKCKKKXKNKK,K+K5KEK=K.K5K5K9K6KCKxKkKUKGKAK>KLK@K8K7K>K5K9KGKqK„K†K~KeKRK\KƒKK„KxK{KxKzK~KxKKšKœK¦K‘K\KHKRKOKOKUKwKoKXKaK[KjK€K’KKWKCK>K^K‘KuKvK¡KKwKvKWKXK¦K¬K‚KIK:K+K,K2K6K6KK5K/K.K3K:K4K2K3K2K3K,K3K9KK6K6K4K1K.K1K5K4KBKDK@KBK;K4K:K=KBKKKVKiKsK~KƒKK”K™K£K¥K¨K«K¬K©K¨K­KªKªK¬K«K©K©K¨K°K°K®K«K©KªK¬K±KÂK¿KšK{KfKXKFK>KAKAKLKXK?K`KŒK§KK6K'K!K'K$K*K-K&K"K+K1KFKWKOKJK/K+K9K>K;K-K0K4K0K8KHKkKqKiKQK:K;KEKCKK>K)K*K0KNKˆKKƒKsK\K[KjKtKoKvK‰K‰KKŽKŽK–KžKKKkK?KBKSKcK]KLK[KsKlK`KaKdKtK{KxKdKPKMKdK”K˜K”K·KŸK…K}KSK5KyK·KªKpK=K)K+K*K0K:K6K4KAKIKWKHK;K.K1K,K3K.K7KTKUK=K6K2K/K:K8K1K,K2K+K0K-K6K;K6K7K5K5K-K0K3K.K4K/K7K3K0K+K0K6K>K>K?K9KKFK8K;K:K@K4K4K7K9K;K1K2K4K>KOKIKYKWKbKmKvKyKzKzKzKqKzK~K€K}K}KK{K~K}K}K|K~K|KK„KŒKŠKŠK„K‡K…K‡KK‹K‡K‹KKŒKKŽKŽK‹KˆKŽKKŒKK‹K’KKKKKKK”K™KšK¡K¥K¨K¬K¬KµK¹KºK¼K¾K¿KÄKÅKÃKÇKÇKÇKÇKÇKÉKÇKÊKÌKËKËKÊKÈKÌKÍKÏKÑKÎKÇK³K—KyKWK9K2K/K)K'K$K(K%K$K$K)K6K2KBKQKNKKKRKsKaKDKJKOKZKMKVKGKPKPKAKQK\KMKIK@KJKKK@KIK5KSKŠK³K•K˜K–K“K„K}K…KƒK‰K‹K‡K„K„K…KŒK‘K†KK€K‚K‚K€K}K~KwKvK~KtKtKoKrKmKkKjKkKhKiKbKfKcKeKfKhKbKdKZKaKcKlKfKmKeKdKeKfKeKiKiKeKhKmKkKqKiKdKbKaKeK_KaK‚K KµKÀKÉKÎKÑKÑKÑKÍKÎKÌKÉKËKÏKÏKÐKÐKÍKÐKÐKÏKÑKÐKÑKÑKÓKÑKÏKÌKÄKÀK¶K²K¨K™KyKaKFK3K/K-K+K/K0K2K7K>KCK=K@KAKEKDKJKTKRKRKRKQKRKUKVKUK[e]r‘(KKK~KyKrKoKhKYKVKOKOKLKJK?K;K8K5KK6K1K7K9KDKOK\KgKsK~KŠK•KœK K¥K¨K©KªK¬K«K­K­KªK«KªKªK­K­K¬K±K¹K½KÉKÂK´KžK˜KK‚KvKhK[KKKDKHKUK{K]KŽK¨KK^K>K/K)K(K*K#K)K'K)K+K/K-K4K=KHK]KPKJKYKSK$KK&K+K4K1KKK6K5K1K3K0K6K1K/K-K1K,K4K8K:K:K?KKKAK9K9K9K.K-K1K9KNKQKTK[K^KjKoKvK}KyKtK{K{KzK~K{K~KK}KK}KK€KyK…KKƒK‹KˆKŠK†KˆK‰K†K‡K‹KŠKŽKŒKŒK‘KŒKŠK‹KKŒKKŒK‹KK‹KKŽKŽKK•K“K™K™KžK¢K©KªK¬K­K´K¸K¾K¾K¿KÁKÁKÄKÁKÆKÇKÅKÆKÉKÊKÉKÉKÉKÍKËKÊKÉKËKÌKÍKÌKÐKÑKÐKÈK´K“KjKHK2K)K&K&K*K$K#K$K(K2K2KCKOKIK]KUKlK_KAKGKOKXKOKYKOKLKOKCKPKUKFKIKAKOKCK9KPK1K[K‹K³KšKšKŒK“K‰K~KK‰K‹KŒKˆK†K‹K‰KŠKKŒK…K‚K†KK~KƒKKK‚K~K{KyKtKuKzKsKrKmKkKbKhKiKfKeKcKdKdKgKaK`K_KaKeKcKaK_KeK]KcKdKaKdKnKnKlKnKnKdK]KZKXKTKYKwK—K¬KÀKÉKÐKÑKÑKÏKËKÍKÌKËKÎKÎKÏKÑKÏKÐKÍKÓKÐKÏKÎKÑKÎKÍKÌKËKÊKÅK»K²K©K˜K„K`KEK4K.K,K3K2K2K6K=K;KEKBKFKCKCKIKLKIKVKWKUKRKRKUKWKWKYKYe]r’(K€K€K€KƒK€KzKqKjK_KYKUKTKQKAKEK:K=K@KGKEK@K:K5K8K4K0K3K;KBKLKVKiKpK}KˆKK—KK¡KªK«K©KªKªK®K¬K­K¬K®K«K«K­K­K¶K½KÃK½K«K¤K KšKŠK‚KxKmK[KOKBKOKoKuKrK¶K¬K`K>K.K*K$K%K'K)K)K'K&K&K-K/K,K.KAKXKbKaKqKMK K K#K0K1K9K9KMKuKlKbKUKMKKKBKQKkKCK%K%K!K(K8KLKƒK…K„K|KxKhKVKJKLKTKbKKKQK|KŒKxK8KAKeKpKaK`KcKZKcKgKfKcK|KKpKuKoKtKpKlKpKsK_K–K«KˆKRKBKWKJKQKNKKKiK•KdK3K/K4K:K5K1K?KLK[K@K5K/K.K'K%K,K:KPK_K:K1K-K.K8K2K1K-K-K5K1K/K/K6K=K7K8K8K:K2K4K1K0K.K-K1KK=K7K:KEKIK@K7K4KKAKBKGKEKDKGKKKNKPKOKVKTKSKSKRKYKUKSKSKTe]r“(K…K…K„K„KƒKK}KxKlKaK\KVKWKOKGKBKCK?KNKKK;K?K8K9K2K6K/K7KBKFKVKgKrK~K‰K’K—KK¢K§K¨K«K«K¨K©K¬K¯K¬K«K­K©K¬K¶K´K´K´K±K¥K¢K K—K‹K…KyKjK[KQKQKaK‰KhKKÍK¢K;K2K.K%K'K.K&K(K(K(K&K*K0K)K*K*K4KMK_KwKwKGKKK!K&K*K2K9KDKvKxKjKSKIKOKLKbKrKAK%K"K#K$K*K9KRKuK“KK„K†KwKeKYKkK]K@KZK|KŽKiK?K3KTKwKKgKgKdKiKtKlKTKZK†KK}KnKsKnKpKwK‚KkK†K¤K¢K~KZKiKDK5KSK_KPK“KKWK3K7K6K4K8KDKFKSKBK3K1K0K,K'K+K8KRKOK5K5K-K+K5K4K/K0K.K0K-K(K,K4K8K7K=K6K2K3K4K3K.K.K2K0K5K9K;K:K6K>K=K>K?KLKJKAK8K7K8K6K2K1K2K7K>KFKQK_KbKgKgKsKtKrKwKzKxK|K|KzK{KzKxK~K€K}K€KƒK€K}KK‡KˆKƒK‡K…K‹K‰K‡K‹K‰KŠKŽK‹KK‰KŠKŽK‰KK‰KŠKKKKKK”K–K–K˜KšK K¡K£K§K¨K¬K²K¹K·KºK¿K¾K¾K¿K¿KÁKÃKÀKÁKÃKÅKÇKÆKÇKÊKÈKËKÌKÎKÌKÊKÌKÐKËKÐKÏKÓKÔKÓKÏK¿K™KoKJK,K+K)K!K$K%K8K5KNKGKHKWKRKhK`KFKHKJKVKGKOKFKHKJKK9KKEKCKHKMKCK=K6K?KK8K3K:KOKGK2K3K2K2K0K5K4K5K1K:K5K:K:K=K;KAKFKLKCK8KAK9K9K5K0K8K>KOKPKYK_KcKmKoKnKpKvKvKuK{KzKyK€K|K~K|KK„K‚KK~KxKK€KƒKˆKŠK…KŽKK‹KŒKK‘KKŒKŒKKŒKŒKKŒK‰KŒK‘K“K“KŠK’K“K–K–KœKœKžK¢K¦K­K­K¯K²K¶K¶KºK»K¼K¼K¿K¾K½K½K¾KÁKÁKÄKÂKÄKÄKÁKÊKÇKËKÈKËKËKÎKÊKËKÍKÏKÑKÒKÒKÒKÔKÔKÌKºK‘KcK:K.K"K&K+K-K>KK:KCK:KrK K¥KšK—KŽK‰KƒK‰K•K“K“K”KKKKKŽKKŒK‘K‰K‹K†KŠKˆKˆK‰K‰KˆK…K†K†K‰K‡K€K{K€K{KxK~KyKrKuKrKtKnKjKnKhKgK\KfKcK\KVKYKVK`K_KjKvKyKKzKwKvKqKfKYKOKLKLKcKK°KÁKÌKÏKÒKÐKÎKÏKÎKÎKÑKÒKÔKÔKÓKÐKÐKÐKÏKÐKÐKÏKËKÆK¼K±K«K£K‘KƒK`KPK;K.K-K3K1K@KFKHKLKNKVKPKJKPKMKRKSKSKUKUKXKYKZK\KYKRKSKYKXKTKUe]r–(KŒKŒKˆKŠKŒK‡K‡K…K„KKyKvKnKgK`KZKUKRKTKSKOKFKGK8K2K/K+K/K@KHKTKhKoK}K‡K‘KšK£K£K§K§K§KªK¨K¬K¦K¬K«KªKªK¬K¬K¯K«K®K¬KªK¦K£KK–KKKtKoKzKpKK¡KUKlKÀKK;K.K-K'K&K,K+K(K.K,K'K*K*K-K%KAKiKcK]KXKvKiK@K'KK!KK#K*KAKqK‰KŒKcKBKIKUKnKeKDK.K2K:K/K#K+KDKeKYK]KCK9KFKWKuKmKjKyKšK„KaKMKBKAKSKUKEK>K]K‚KzKrKvKpKuKbKVKnK“K£K‡KuKKƒKˆK{K_KeKqK~K¡K–KbK3K)K3KOKeKmK£K…KCK2K9K9KEKKK?K3K7K,K.K(K.K/K9K_KIKDK1K1K3K8K8K6K6K3K:K/K1K.K1K5K9K6K:KFKEK4K3K7K3K5K,K2K7K5K6K2K6K>K>K;KFKLKSKLK:K8K8K9K3K1K7KDKTKQKXKcKeKjKwKvKwKrKsKtKtKzKxK}KzK€K|KKƒK€K|KzK{K„K‡K…K‡KŒK‰KKŽK‘K‹KKK‰KK†KŒK†K‹KŽK†KˆK‹K‹KK’KK’K“K—K›K›KœK K¦K®K­K®K±K¶K¶K¸K¹K»K¿K¿K½KÀK½K¼K¼KÂKÁKÅKÄKÁKÃKÁKÇKÇKÉKÉKÊKÊKÌKÌKÌKÏKÏKÎKÐKÒKÑKÒKÕKÔKÐKÃKŸKsKBK1K.K1K.K;K:K4K9K]KcK_KGK@KLKMKNKPKHKDKQKIK[KTKJK@KHKLK?K4KFK=KtK¨K KK”K‹K‡K„KK—K‘K’K‘KŽK“K“K‘KKKKŽK‹K†KŒKKˆK†K‰K†K‰K†K‹K†K‰KƒK…KK€KKxKzKyKzK~KuKpKpKrKiKhKfKlKlKeK]KZKVKYKhKjKiKrK‚KƒK~K}KsKlKjKVKIKFKQKiK—K±KÄKÊKÐKÒKÐKÒKÎKÐKÍKÑKÓKÖKÕKÕKÓKÒKÑKÐKÐKÎKÌKÊKÂK±K©K›K‰KyKYK@K/K/K*K2K6K>KEKKKOKQKPKJKPKOKTKNKRKRKVKWKYK_KWKbK^KYKUKTKSKWKQKVe]r—(K‡K‡KƒK†KŒKKŒKˆK…KƒK„K{KwKoKsKcKbK^K`K[KSKOKFK>K0K,K+K2K@KKKUKeKqKyKˆKK˜KžK£K¤K©K¨K§K«K«K©K©KªK¬K­K­K¬K«K«K®K­K©K£K¡KœK•K‹K}KyK‡K’K~K³KŽK`KtK›KtK.K/K,K+K)K0K)K(K&K$K'K)K!K$K,K6KkK~K|KuK|KmKXKDK.K-K+K3KKKpKˆKKtKGK;KHKZKYKJK=K@K6K-K-K*KK5K3K-K.K-K+K0K=KZKOKHK:K2K3K;K4K3K8K2K6K1K3K1K5K1K=K7K7K@KKKDK5K3K7K4K:K4K4K;K>K=K4KBKAK8K?KMKQKQKEKKNKGKGKPKBKAKQKEKPKSKCKDKEKFK5K6KFK;K{KªK˜KK•K‰KŠKŽK“K›KKœK”K’KK‘K“K’KK‘K‹KKŒKŒKK‡KŒK‰KŒK‹K‰K‡K‹K…KˆK…K„K|K‚K|K{KyK{KyKxK{KsKuKnKoKoKoKrKfKdKfKZKbKcKlKtKxK€K‡K‚K}KrKoKbKTKNKEKVKuK›K¸KÅKÊKÑKÑKÒKÒKÒKÏKÐKÒKÓKÖKÖKÔKÓKÒKÒKÑKÏKÏKÍKÉK¿K¬K›K‡KlKVKKFKCKQKDKFKPKYK0K'K-K:K_KxKvKOKK8K7K6K7K4K2K3K6K>K@K=K>KDK9K7KJKOKOKHK7K4K6K=K8K2K=KXKRK[KcKdKqKqKuKsKtKxKxKuKtKxKK‚KK~K{K|K~KzK„K‚K‡K†K†K‹KŒKŒKKKK‘KK‘KŽKŠKK†K‡KˆKŒK‹K†KŒKKK’K’K‘K˜KšKžKK¢K¢K¤K¦K¨KªK­K®K³KµK¶K»K½KºK»K»K¼K»K¾K¿K¾K¿KÀKÂKÃKÁKÃKÇKÆKÈKÌKÊKÊKËKËKÌKÎKÐKÏKÐKÎKÒKÒKÔKÕKÔKÔKÊK±K€KLK0K.K7K/K+K.KNK\KXKAK=KFKDKEKFKKEKBKHKWKCK)K,K=KgK}KK^K@K9K;K>KIKfKŽKzKbKHKK`KOK>K:K6K0KK8K5K4K3K2K.K2KK„K«K“KšK”K†KŒKŽK–K˜K“K™K‘K”K”KK‘K”K”K‘K‘K‘KKKKŽKŠK‹K‹KŒKKˆK‡K‰KŠKˆKŠK…K…K€KƒKKƒK‚K}KxKwKwKvKyKtKqKtKqKmKkKeKeKeKmKwKxK‚K†KŠK}KxKpKhKYKSKUKjKŽK©K¿KËKÐKÐKÒKÒKÔKÑKÔKÓKÑKÓKÕKÕKÔKÑKÒKÐKÎKÍKÎKÉKÂK³K˜K€KZKDK0K+K6K.K2K=KKKTK^KZKXKVKNKNKNKTKLKOKSKYK]KaK]KcKfKaK`K`K_KTKTKSKTKXK[e]rš(K‚K‚K€KŠKŒKK‘K•K™K”K“KKŒK…KK|KsKpKjK`KZKRKJKDK7K1K*K-K6KEKVKcKvK}KŒKK˜K£K¤K«K§K­K­K®K¬K­K«K¬K«K«K­K¬K®K°K®K­K¨K£K K—KK‹K„K—KÂKÇKÌKžKFKtK`KpKUKDK9K8K1K'K'K(K)K*K(K)K/K,K&K+K'K2K=KCKDKK-K.K2K@KJKzK‘KVKMK:K9KEKEK7KKNKfKcK_KLKLKaKzKsK~KK~K‡K“K†KˆK‚K–KKƒKK—KnK3K*K&K7K‹K¯KœKaK.K+KAK\K^K¯K–KBK/K?K;K8K1K.K(K1K*K0K/K5K^KKKDKBK1K0K@K6K4K6K7K6K2K+K-K1K.K4KAK=K@KK;K9KIKJKTKIK@KQK?KEK,K/K6KFKˆK¨K–KK”KˆK…KK•K–K‘K•K–K“K“K‘KK•K‘KŽKŠKK‘KŒK‘KK’KŠKKK‰KˆK‹KŠK‹K…K…KŒK‡K…K€KKƒK~KK{K}KzKzKyKtKvKyKnK~KkKgKiKnKrKuK€K„K‡K‰KKxKlKeKZKVK^KzK˜K±KÅKÍKÐKÕKÕKÓKÓKÓKÒKÔKÔKÔKÒKÕKÔKÓKÐKÐKËKÌKÉKÆK¹K¤KKxKUK:K,K*K*K;K:KOKZK\K[K^KZKZKWKUKQKRKNKQK[KaK_KdK`KeKcKbKaK_K[KUKOKSKPK[K^e]r›(K~K~K‚KŒKŒKŒK“K–K™K•K•KKŽKK„KKzKvKpKdKZKQKGKCKK5K2K>K?KFK=KAK3K/K2K9K5K7K5K6K9KNK5K?KCK?K?KUKVK^KcKIK=K2K0K/K*K7KWKSKWKZKpKsKqKsKqKvKyK€K„KK~K|K~KrKnKvK{K~K‡KˆK‹K‹KŠKŠK†K‡KŠKŠKŽKŽKŽKŠKKKŒK‰K†K‡K‡K‰KŠKŒK‡KŽK‘K”K–KœKKœK K K¢K¢K¥K§K¨K§KªK­K®K³K³K³K¶K·K¸K¹KºK¹K¼K½K¼K¸KÀK½K¿KÃKÅKÆKÆKÇKÃKÄKÉKÉKÊKÌKÍKÏKÐKÎKÐKÓKÓKÔKÓKÕKÖKÔK×K×KÒKºK‰KUK1K)K6KAKPKTKBK2K>K9KBK7K7K@KJKGKPKCK?KKK;KDK)K.K5KLKK¨K•KK”K…K„KKšK•K•K’K˜K”K‘KK’KK‘K’KKKKŽKKKŒKKK‘KŒK‹K‡K†K‰K†K†K‡K†K†K…K€K„K{K|KK~KKxKwK~KwKzKwKuKtKnKkKmKqKzK~KK„K„KKwKpKjK`KZKcKˆK¤KºKÇKÌKÑKÔKÒKÕKÖKÖKÕKÔKÓKÓKÕKÖKÖKÑKÒKËKÊKÊKÈK¿K²K¡K‚KgKGK0K%K+K1K7KFKPK[K[K^KZKWKSKVKYKVKQKQKSK\KaKaKgKdKoKeK`K\K\KYK\KZKSKWK]K`e]rœ(K|K|K…K…KŠK‘K‘K“K˜KšK—K“K•K“KK†KƒKzKtKkKcKVKGKBK;K8K+K'K6KIKUKfKpK|K†K“KK¢K¥K©K­K¯K«K®K¯K¯K«K®K®K«K¬K­K°K²K¯KªK¦K§K K™KŽKŒKKÄK×KÞK¬KcKZKdKGKYK:K:K>K@K8K2K/K,K-K1K)K/K)K&K*K0K4KCKQKRK9K K%K%K3KFKNKCK>KBK@KCK:KKAKVK>KAK=K?KLKeKjKgKSKVK]KXKdKqKsK†K„K„KŒKƒK„K£K—K„K¤KŸKgK1K#K%KKqKUKLK¥KŸKGK/K2K.K)K*K+K0K-K*K2K5KVKHKDK3K4K3K9K6K/K7K4K8K6K5K-K4K7K7KKEKBKDK3K7K3K1K5K8K5K9K;KSK;KEKCK9KOKOK^KcKgKfKKK6K1K,K-K+KCKLKNKSKpKvKqKxKtKwKwK~K~KKzKqKpKrKxK€K†KƒK‰KˆKŠK’K‹K‰K‡K‡KŠKŠK‹KŽKK‰KKKK‹KKK‹KKK’K’K”K”K˜K–K›KšK K K K¤K£K¦K¥K©K¥K¬KªK¯K²K²K¯K´K²K²K¸KµK¶K·K»KºK½K¾K½KÁK¾KÃKÁKÁKÁKÂKÅKÇKÉKÊKÊKÊKÍKÍKÎKÑKÒKÑKÑKÕKÕKÓKÔKÖK×KÖKØKÑKµKvKDK-K9KAKPK?K0K;K4KK8K9K?K8K?K@KBK4K6K8K;K1K4K,K/K>K2K2K/K)K-K:KVK‰KŽKrKPK3K)K8K:KDKRKQKMKHKHKAKDKUKoKmKZK[KPKUK\KiKKŒK’KˆKKKŒKšKŠK]K“K‡KsKGK5K8KIK›K¢KÈK—KXKyKUK>KƒK¸KkK8K:K5K8K/K2K4K5K3K8K;KXKIKCK8K>K7KK7K4K@KDKOKFK;KMKJKNKGK)K0K8KaK¤K§KŽKK•K‡K‰K“K–K“K’K”K”K™K“K’K“K‘K’K‹KŽKˆK‘K“KŽKKŽKŽKŽK‹KŒKŽK‹K‹K‹KŠKŒKŠKŒKˆK‡KŠKƒK„K„K€K„KK€K€KKKK{KxKyKzKtKxKwK}K|KK„K…K‚K€K~KqKoKnK…KŸK¶KÄKËKÑKÕKÓKÓKÕKÔKÔKÓKÔKÕKÓK×KÖKÕKÐKÎKÊKÃK½K´K§KšK…KgKHK5K5K0K9KFKVKZKYK[KWK]K\K_KaK\KZKWKUKXKdKiKjKuKnKjKiKhK^K_K]KWKVKSKWK_K`K_e]rŸ(KyKyKuK~K~K„KK’K“K–K˜K˜K™K–K•KK‰KKˆK†KyKmK[KYKFK2K-K-K>KIKQK^KnK~K‰K–K›K¡K¦K¨K®K°K²K¯K­K­K¬K®K­K¬K®K°K±K±K«KªK¦K¦KŸK—K•KŽK•KÇKÜKÇKuK\KtKŽKÆKiK$K%K!K'K#K%K0K1K8K8KKCK9K/K/K9KNKpKkKpKhK=K)K-K8KGKRKJKLKSKHKOKKKNK`KyKgKeKeKSKUKgKvK…K“KK…K‹K|K‹K‰K_KuK‡K…KnK]KTKEK‰K‡K±K®KsK`KBK9K[K¸KŒK@K3K.K,K,K-K1K7K4K3K=KbKKKEK;K=K9KFK5K>K6K=K:K8K3K9K4K7K8KKHKNKOKKGKFKUKIKIKKKoKvKfK`KVKOK]KvKˆKKŽK„KŠK…K€K~KdKiKK‚KŠKuKnKcKyKyK{K—K‹K\K0K2K@K©K¯KWK.K0K2K2K0K.K4K,K2K:K^KGK?KBK=K8KGK4K6K7K1K5K2K,K.K/K3K5KK:KK8K8K9K/K1K2K.K4K8KBKFKCKCK?KKK\KiKvKvKgKdKOK6K)K0K+K6KQKLKWKpKzKvK~KtKrKqKlKnK|K€KƒK€KƒK€K‰K†KˆKŒKŽKKŠK‹KˆK‰K’KŠK‹K‹KˆK’KˆKˆKŽK’KŒK‰KKKŒKKŽK–K“K•K—K›K˜K›KŸK¡K¡KŸKžK¢K¤K¥K¢K§K¦K§K­K°K¯K°K®K¯K±K±K¯K¹KµK³K·KºK¶K·K¸K»K½KÂKÂKÆKÄKÄKÇKÈKÈKËKÈKÍKÎKÍKÐKÒKÐKÑKÕKÓKÓKÖKÖKØKÙKÙKÙKÛKØKÉK‘KGK1K:K0K*K'K-K3K2K?KKKBKKIKLKcKvKtKeKWKQKrK‹K—K‹K€K€K…KKŒKˆKsKŠK‘KKlK;KiK}KPK8KdKÃK°KUK.K4KqKÓK¡K7K$K(K&K$K-K(K&K.KKK&K&K0KIKŒK®K›K‰K‰KŒKˆK–K”KK•KK’KK•K“KK”K’K‘KK‘KŽK“K‘KKŽKŒKKKKKKK‘KŒKŒKŽKŽK‹KŠK‰K‹K†K…K‡KƒK†K‰KK‚K~KƒK€KƒK}K~K}KK}K€K…K…KˆKKŽKˆKˆK~K}K|KŒK¯K¿KÉKËKÕK×KØK×KÔKÔKÕKÖKÔKÕKÓKÕKÓKÒKÏKÌKÉKÀK²KœKŒK{KbKTKFKGKEKMK[K_KcKcK]KTK[K]K_KbK_KaKdKcKdKjKmKtKpKoKmKjKdKeKbK[K_KYK_K`KcKbKgKgKhe]r¤(K3K3K8KOK`KiKvK|K†K‡K’K›K£K§K£K¥K¤K¡K™K“K‰KƒKwKgKUK6K*K'K5KBKSK]KmKK‹K“KKŸK§KªK±K²K³K²K¯K²K¯K¯K±K±K±K²K´K³K®K¯K«K£KK™K KKŽK¿KÅKzK}K‚KMKTK?K,K(K*K'K*K(K3K+K+K+K+K6K,K.K.K;K_KOK4K/K0K;KK+K9KdKÂK®KZK;KUKÐKÎKfK,K(K%K&K+K&K+K.K7KYKBK=K@K6K:KDK6K7K8K6K8K3K,K2K6K5K:K5K>KAKDK;K5KK?KEKTKiKoKzKxKsKiK`KIK3K/K+K1KPKTKVKbKsKhKsKgKuK€KK…KKŠK†K„K~KK„K‡KˆKŠKˆK‰KŠKKŠK†KŽKKK‰KŠKK‹KŠK‹K‹KŒKKKKŽKK’K‘KK•K–K™K˜K™K˜KKœK¡K¥KžK¢K K£K¢K¥K¥K¥K§K«K®K¯KªK­K°K²KµK·K³K°K¶K¹K¾K¹KºK¼K¿K¿K½KÁKÄKÅKÅKÁKÅKÉKÌKÌKÌKÌKÏKÐKÑKÓKÑKÔKÕKÖKÖK×K×KÚKÜKÞKÝKÛKÐK›KTK6K0K-K,K0K1KKDK7K7K>K8K4K3K3K)K.K2KBKJK;K5KMKXKoKuK|KvKqKkKcKKK5K-K)K.KJKNKVKdKlK`KuKnKzK}K€KK‡K‡KƒK„K‚KKƒK†K‡K†K‡KŠKŒKŽKKŽKŽKŒKŒKŠKˆKKŒKŠKŒK‹KKŽK‘KŒKKK•KK‘K–K˜K•K•K•K—K˜KœKŸK KK¡K K¤K¢K¦K§K§K¨K«K®K¬K®K²K¬K²K±KµK²K²KµK¶K½K¹KºK»K¿K¿K¼KÂKÃKÃKÅKÄKÅKÉKÉKËKÍKÊKÌKÎKÏKÒKÒKÑKÒKÖKÖKØKÛKÚKÚKÜKÞKÜKØKÁK€KJK,K(K*K0K3K0KK>K.K/K.KAK;K7K7KOK`KhKvKxK~KqKqKkKQK2K&K)K(KOKIKSKfKcKkK{KvK~K|KKƒK‡K‰K…K…K…KŠK…K†K‚K‰K‰K‰K‰K‡K‹K‰KŽKKŽKŽKKŽKKŽKŽKŽK‰KŒKŒK‹KKKŽKKšK”K•K™K˜K™K˜KœK›KžK KŸK¡K¢K¢K¡K£K¥K§K¨K¬K¨K¨K°K¬K¯K­K±K²K°K´K¶K¶K·K»K»K½K¿KÀK¾KÂKÄKÄKÆKÈKÇKÇKÉKÌKÌKÌKÌKÎKÑKÏKÏKÑKÓKÖKÖKÙKØKÙKÙKÝKÞKÝKÜKÓK´KfK1K&K'K/K-K2K=K;K.K-K3KFK5K%K&K9KaKŸK¢KŽKŒK‚K‹KKKŒK–K’K—K“KŽK”K”KK‘KKKK‘KŽKK“KŒKŽKŽK‹K’K’KŒKK‹KŒKK‹K‹KŠKŒKŒK‰KˆK‡K‰KK…K†K…KˆK‚KK~K…K‚K€KŠK}KK}KK„KˆK†K‰KˆK‹KŠKŠK‡K‹K§K·KÇKÌKÓK×KÙKØKÕKÓKÔKÕK×K×KÓKÓKÓKÒKÑKÏKÉK¿K³KŸK…KjKVKIKIKPKPKTKTK[K`K^KWKXK^KcKcKaK_KcKfKjKoKrKrKsKoKlKaK^KZKWKaK[K_K\KbKeKdKhKhKlKdKfe]r§(K&K&K*K4KCKSKcKnKwK†K˜K¤K¥KŸK¦K«K§K¨K K™KK†K‚KrK^K>K-K(K/K:KKK`KsK‚KK—KK¡K§K¬K°K±K±K±K¯K³K²K³K±K®K¯K±K´KµK³K®K¨K§KŸKœK•K‘K‰KŽK»KŽKƒKsKIK{KK(KK&K K+K*K1K&K(K(K6K0K%K,KFKfKRK3K+K*K3K1K*K2K@K0K9K2K2K2K0K/KUKsK~KqKRK,K+K:KNK>K;K'K%K(K#K!K,KJKbKbKCK3K&K.KKEKJK_K]KpK€K^KRKmK‚K„K€KqKlKuK™K—K®K¦K•K{KPK,K$K#K%K3KcKˆKiKOK¹KÚKŸK0K$K%K&K%K$K'K+K3KPKK$K%K(K.K8KlK:K(K.K7KBK3K$K&K7KlK£KœKŽK‡KƒKŽK‰K’K—K”K˜KK“K‘K’K‘K‘K’K“K“KŽK’K‘K–K’KK’KŽK‰KKŽKŒK‰K‰KKŒKKŽK†K‹K‰KŒK‰KˆK‹K‰K‡K„K…K…KK…K€K}KƒK|KKyK~KyKK„K‚K‡KŒKŽK‘K‘K’KK˜K­KºKÇKÍKÔK×KÙKÙKÕKÔKÖKÖKÖKÕKÓKÓKÑKÑKÑKÍKÈK½K­K—KtK]KLKKKKKOKXKVK^K`K_KYKVKWKaKfKeKdKbKeKfKlKrKtKnKpKiK`KbK[KXK^KZK]K^KeKgKiKiKiKkKiKjKde]r¨(K#K#K'K,K4KBKOKeKrK‰K–K K©K¥K©K«K©K«K¢KK’KŒKƒK|K_KDK)K'K-K2KMKaKrKKK™KK K§K«K¯K±K²K¯K²K¯K¯K±K±K¶KµK³K²K°K²K¬KªK¤K¥KK”KKˆK€K†KqKKKKK“KxK2K"K$K#K.K5K+K0K(K+K3K1K-K4K[KRKBK5K,K+K*K2K5K2K6K1K2K.K4K-K/K4KLKnKƒKsKLK)K&K4KKK;KHK.K'K+K$K"K*K8KFK`KbKJK1K%K/KaKVKEK:K8KKKdKhKYKOKLKfKfKeKfKeK\KLK@KDKSK`KtKxK]KRKqKK…K‚KjKhK‚KŽK‘K¶K¦KKxKdK8K'K KK+KHK‰K}KNKºKÜK²K5K"K3K$K)K$K)K%K:KOKEK>KKNKOK2K6K4K/K5K-K/K3K6K4K.K2K9KJKTK:K2K/K8K6K9K/K*K0K7K;K7K;KNK\KhKsKqKoKqKuKlK]KOK.K(K'K%KBKJKYKhKkKuKpKqK€KKK…K…K}K€K‚KˆK…K†K†KˆK„K„KK‹KŠK‰KŠK‡KKKŠKŒKŠKKKŒK‹K‘KK‹K‘K“K’K‘KŽK•K’K“K“K˜K–K–K–K›KKšK¢K K KžK¥K¤K¦K£K¥K¥K©K¨KªK¬K«K®K°K±K³KµK¹K¶K´K´K·K»K½K½K¾K½KÂKÃKÄKÂKÅKÊKÉKÊKÎKÐKÏKÐKÑKÏKÐKÐKÓKÕKÕKØKÕKØKÛKÚKÜKÝKÜKÝKÛKÑKšKDK:K%K-K+K3K2K/K*K4K0K'K!K(K=KK¡K—K˜KˆK‚KŒKK˜K™K’K•K’K•K•K”K’KK’K‘KKKKK’KKŒKŽK‰KKŠKŽKŒKK‹KŒKŒKŠK…KŒKŠKŠK‹KŽKˆK„KˆK„KˆKˆK…KK„K‚K€K|K}K}K|KzK|KxK€K‡K‘KœKK¤KªK°K´K¹KÁKÅKÊKÏKÓK×KÕKØK×K×KØKÖKÕKÖKÔKÒKÓKÑKÎKÊKÂK¯KšKKjKXKNKPKVKXK[K`K\KZKWK[K\K`KdKgKkKjKlKoKrKrKpKnKiK`K_KaKYKXKSKXKZK^KfKgKiKhKgKiK_KbKhKce]rª(KKKK%K0K4KK,K(K'K+K1K-K,K'K-K0K?K3K9KKK?K5K0K-K-K-K/K4K2K/K4K-K+K0K*K0K1K0KRKtK‡KsKDK+K.K/KHKTKLK?K'K'K(K"K'K/K$K-KJKcKcKZK=K?KgKKK8K.K;KOKMK[K[KXKLK^KsKtKqKvKmKnKLK?KSK[K^KWKPKhKŽK“K€KyK”K”KzK—K¦KŽK\K>KKEKsKÃKœK±K×K·K?K,K,K+K)K K'K(KDKUKDK4K;KEKOKOK2K0K4K6K4K+K/K3K0K9K4K;K8KGKBK7K*K2K.K/K2K5K2K-K6K5K6KKMK\KhKaKiK`KDK-K1KNKAK;KVKiKVKJKrKsKlKnKfKjKbKMKSKRKZKRKIKYKvK“K–K¡K˜KK‡K›KšK”KKK0K6K4K.K5K.K/K,K.K2K:K,K9K1K.K*K,K3KMKlKKKqK6K$K&K+K:KTKNK2K1K(K(K)K2K5K2K)K8K7K6K@KOKUKVK[KJK:K2KYKOK6KHK[KcKWK`KsKjKiK[K^KSKPKQK[K]KOKPK\KvK’K˜K–K—KŒK—K›KK¡K‘KSK'K$K-K[KOK1K2KjK¼K´K¶KÐK K.K$K)K$K&K"K$K)KKKFKJK1K4KOKhKYK1K+K0K8K.K*K*K4K2K-K*K6K7KNK>K3K4K.K/K0K9K0K2K4KK:KXKjKZKVKfKqKeKQKcKUKGKRK]KYKLKVKbKvK’KK”K•K•K•KyK¡K¦KKjK(K K1KCK5K-K1KgK»K»K¶KÅKK/K K#K)K(K*K%K)KMKPKNK/K9KHKgK]K3K/K8K8K5K.K1KK$K(K0KGK?K1K&K5K>KDK8K)K#K4K=K2K.K0K8KdKPK5KNKeKgK`KVKlKkK[KXK[KKKLKYKQKQKRKbKqKK„K—KšK…KmKsK¯K¥K‚KwKMK*K,K.K,K0K9K[K¸K¾K¸K¹KrK*K(K"K(K,K+K.K8KWKSKVK3K7KGK_KOK;K8K5K:K5K5K5K7K6K4K1K=K;KCKK6K4KYKXK=KBKdKlKeKYKrKvKdKXKdK\KCKRKHK=KVKeKoKŒKˆKKŠKlKqK†K¨KŸKkKrKnK1K&K/K#K+K1KTK¸KºKºKšKUK*K&K!KK$K%K)K9KWKUKWK)K;KJKUKMK/K/K9K;K4K7K8K0K2K2K2K7KAKHK=K@K3K-K+K0K-K1K;K;K5K=KRK[K\KcK]KdKdKgKvK~KzKtKcKHK6K/K2KK1K+K'K$K.K3K9K:K*K+K,K-K&K3KMK|K•KŽKvKYK6K$K-K(K*K2K@KDK9K*K!K9KMKAK3K+K*K?KJK>K,K(K'K2K;K0K0K7KNK_K>K:KSKgKYK`KjKxKsKaKbK\KXKOKHK8KXKiK{KK’KK~KkKƒKžK¡KžKkKgK€KMK-K1K+K+K1KTK·K¶K­KnK=K4K:K(K"K#K'K+K3KVKRKGK+K:KMKQKKK0K0K9KKŽK“KKŽK’KŠK›K“K’K—K“K–K•K•K”K•K“K”K’K“K”K•K“K”KKŽK’KKKŠK‹KKKKŒK‡K‡KˆK…K…K‡K‡K†K‡K‡K~K€K€KK‚KKK~KK}KxKxK|K†KŸK³KÂKÉKÏKÒKÕKÕKÒKÑKÍKÍKËKÊKÏKÑKÕKÙKÚK×KØKØKØKØKÖKÖKÕKÐKËKÅKºKªK˜K‡KoK`K^KWK\K^K_K]K[KZKYKaKeKeKeKeKkKqKrKpKpKtKrKrKfKeK_KYKYKZK\K_KaKbKiKkKgKjKiKhKdKaK^KeKhKiKfKbe]r³(KKK KK!K%K-K=KWKtKˆK˜K™KžK§K¨K®K¬K«K¦KK•K†KpKSK8K!K#K%K0K?KRKiK}KŠK˜K™K¡K§KªK­K¯KµK²K³K±K²K´K²K¯K´K±K·KµK¶K°K¯K©K¦K KšK—KK…K–K¬K‚KhKHKFK+K.K:K0K&K0K1K=KDK;K1K3K6K*K'K3K*K,K-K*K(K-K8K5K&KK$K%K.K-KUKˆKKKŽKnKZK=K0K2K-K0K3K5KAK2K-K)KFKUK7K7K5K5K4K2K1K@K4K'K5K8K,K/K,K8KOKKK=KVKbKCKYKrK{KvKlKNKDKKKEKFK:K?KQKuK¤K˜KK†KKK–K˜KyKYKsK¤KžKwK]KPK>KGKŠK¶KœKƒK=KqK¢KK/KK&KK%KKKXKOKLK2KEKLKAK?K2K3K9K6K1K>KKrK—K…KzKˆKmKbKK8K4KK/K2KFK@K/K5K8K2K%K*K-K0K+K1K:K:K3K3K/K)KAKCKAK=KFKAK8KdK}K†KKdKKVKzKœK³K«KœK‚K‹K²KÈK¨KFK#K#K$K(KFKMK=K?K1K>KFK8KDKAK:K9K1K9KBKCK4K.K/K5K2K:K1K0K4K0K5K>K6K0K9K8K;KNKVKjKqKzKvK}KK|K{K}KxKvKvKcKRK.K+KK%KK$K*K7KPKmKƒK‰K‘KK¢K¥K¯K³K·K³K´K³K²K´K²K³KµK³K´K¶KµK¶K¯K®K¬K¥K¡KK“KK‡K€K™K}KKJK>KBK5K2K4KFKmK‡K†KnKEK.KBKFKUKOK^KˆKŒKKK‹K’K–KœK¡K•KwKJKTKŽKKDK;KqKœK§KŸKœKŠK•K­K¬K{K0K'K.K)K.KFKMK>K5K0K;KKnKqKBK8K2K)K.K(K*K/K.K+K9KKHK;KKK2K,K1K5K.K2K.K4KIKAK>KQKSKeKrK{KyKwKK{K}KzK|K{K|KpK[KEK3K,KCKZKZK`KjKnK|KwK{K|KK‚K„KƒK‡KƒKˆK†KˆK‰KˆK‹KŒK‹KŠKŠK‹KŠK‰K…K†K‰KˆK‡K†KŒKŒKK‹KŒK†KŽKŽKKŒKŒK‘KKKK•KKK”K—K“K–KšKœKœKœKKžK KKK K£K¡KžK¦K§K¨K¦KªK®K°K¯K«K®K³K±K·K¶K¸K¸K¹K¼K¾K¾KÀKÂKÄKÄKÆKÈKÉKÉKÈKÉKÐKÑKÏKÍKÐKÒKÑKÒKÓKÔKÔKÖKÖKÖKØKÙKÚKÚKÜKÞKßKÝKÚKÄKtK#KKKK"K K4KkKK‡K‹KŒK“K‹KKŸKœK•KŽK•K“K’K’K‘K“KKKKŽK’K‹KKK‹K‹KŽKŒKˆK‹KKK‹K‹K‰K‰K…K…KƒKK„K„K„K…K‚K~K€K€K|K{K{KwK}K{KwK}K‡K¥K¾KËKÑKÕKÙKÜKÛKÙKÕKÓKÒKÑKÑKÓKÕKÔKÖKØKÚKØKÙKÙKÖKÖKÒKÐKÊKÃK´K£K–K…KuKfKhKfK_KcK_K]KYK^KWK\K`K[KbKkKlKpKoKsKoKsKnKlKcK_K\K_K_KYK^K]KaKeKhKjKgKiKgKgKkKaKcKeKbKcK^KYKWKSe]r¸(KKK K$KK-K+KK2K1K.K+K)K)K2K3K6KFKPKFK7K'K(K3K:K*K'K/K(K0K/K7KEK:K5K'K K.KSKIK5K4KKBK8K>K8KEK2K.K2K?K:K6K1K7K?K:K3K2K5K1K1K8K3KK.K.KMK…KaKHKUKJK0K-K)K.K/K3K8K=KKKQKHK6K/K&K8K8K+K&K$K$K,K1K:KDKFK5K(KK)KOK^K;K@K8K>K]KxKnKiKoKwKzKvKiKqKhKrK…KŠK†KˆK’KKuKDK?KsK‹KŠK”K›K›KŸK¸K¿K±KŸK_K9K)K#K$K K"K4KaKCK,K7KMK=K1K/K7K=K>K0KK€KŒK„KŒKŒK“KKK—KšK–K‘K“KK‘K’K’K‘K“KKK“K’KKKŒKŠKK‰KŽKˆK‹K‹KK‹KŒKŒK‡K‹K‰KŠKƒK‰KƒKƒK†K…K€KzK}K~K{K{K|KxKxKzK~K”K±KÄKÎKÒKÖKÙKÚKØKÖKÓKÏKÐKÐKÎKÒKÕKÖKÙKÙKØKÙKØKØKÖKÓKÐKËKÃK´K§K•KŠK|KqKmKiKjKkKfKfKdK_K]KaK`K_KbKaKnKrKwKvKuKoKiKkKgKfK]K_KXK[K[K_KbKfKjKkKlKlKgKgKaKdK^K^K]K]K\KZKRKRKPe]rº(KKK"K!KK"K(K6KNK]K}KŽK–KžK¥K«K±K®KªK§K K–KŽK~KoKFK-K&K$K(K3KPKdKyKŠK—K K¡K¨K«K²K²K±K²K²K°K²K²K°K¶K³K²K²K²K´K±K¬KªK£K¢KœK˜K’KˆKyKvKkKSKCK:K;KDKGK7K,K+K+K+K+K*K/K0K.K+K+K.K.K6K.K(K-K*K8K;K;KJKYKLK/K'K*K4K^KKUKPKSKKQK€K…K|K…K—KžK¯K¸KªKŠKxKWK@K)K$K'K3KZKUK2K)KCKHK>K4K3K=KNK=K+K=K:KAK@K0K9K?K;K2K,K5K>K;K1K/K2K4K2K1K5KCKFKFKPKWKdKoKxK|K|K€KzK}K|K|K~K{KvKmKMK;K/K.KTK]KXK`KjKnKyKƒK‚KƒKK„K…K‰KK„K„KˆKŒKŽK‹K‰KŒKK‘K‹KˆKˆK‡KˆKŽK‡K‰K‰KŒK†KˆK‹KŠKŠK‰KK‡KŒK‹KKK‹KŠKKKK’K–KK‘K’K•K—KœK˜KšKŸKKK›KŸK¡K¢K¡K¡K¦K¥K¨KªKªK«K­K¯K±K°K±K±K³K´K¸K»K»K¾K¾K¿KÀKÃKÀKÄKÇKÅKÅKÉKÌKÐKÏKÐKÌKÐKÑKÏKÑKÓKÔKÔKØKÔKÕKÖKÕKÖKØKÚKÚKÛKÜKÜKÙKÄKjKKKKK K=K}K‡KƒKK‡K”K“K’K‘K™K˜KK‘KK”KK•KKKK‘KKŽKK‰KŽKKŒKKŒK‰K‹KŒKŽKŽKKŒKŠK‹K†KˆK†K„KƒKƒK„K„KK}K}K€K€K{K~K{KxKKK“K¯KÃKÍKÓK×K×KØKÖKÓKÐKÎKÍKÎKÑKÒKÖKÖKÚKÛKÙKÙKÙKØKÕKÓKÌKÇK¼K¯KœKŽK‡KyKpKoKjKjKjKcKjKfKbKaKfKbKbK`KcKmKtKqKtKuKrKoKiKjKiK`K`K\K\K_K_KaKaKhKhKlKdKjKhKhKbKiK[KaK_KWKVKRKTKOe]r»(KKK"K KK%K(K2KIKmK|KŠK–KK¥K«K®KªK«K¦K¢K–KK}KmKWK4K'K'K+K>KNKcK~KˆK”K™K£K§K­K°K³K®K¯K®K°K²K®K­K­K­K¬K¬K±K±K­K¦K£KŸKžK˜K›K’KŒKKuKtKeKKKK/K/K0K-K,K$K/KFKXKK2K0K1K2K6K0K=KKKGKKKNKXKfKoKyK}K~K€K|KK{KzK{K€K{KjKHK8K0K0KRKYK^KgKjKrK|KK‰K€K…K…K…K…KK†K…K‹K‡K†K‰KŠKKKŒKKŠK‰KKK‰KˆK‰K‰K‡K†KŠK‰K‹K‰K‰K‹KŒKK‹K‹KˆK‹KKKKK“K”K“K˜K‘K“K™K˜KžK—K›KžK›KžKŸK¡K¡K¢K¢K¦K§K«K§K©K©K¬K­K®K±K±K°K´KµK»K»K¼K¿KÁK¿K½KÁKÃKÅKÆKÆKÊKÈKÍKÍKÎKÍKÍKÏKÏKÐKÒKÑKÔKÓKÔKÕKÕKÕKÙKØK×KÚKÛKÜKÚKßKÜKÍK…K#KKKK!KGKƒKˆK‰KŠK‹K•K‘KK–KœKŸKKKŒK’KŒK‘KK”KŽKKŠK’K‰KK‹KKŠK‹KK‹KŠKŽKKˆK‰KŠKˆK†K‡K…K†K†K‰K„K‚K€K„K~K~KKKzK{KzK~K‡K‚K™K·KÆKÐKÕKÖKÙKÕKÓKÑKÎKÍKÍKÏKÓKÓKÖKØKÚKÛKÙKÚKØKÖKÓKÒKÍKÂK¶K¤K™K†K~KxKuKtKlKnKgKfKfKdKaKaKcKaKcKfKeKkKkKqKqKpKoKoKiKdK`K]KZKZK^K^K\KdKiKgKhKfKfKiKgK\K]KaK`K[KYK\KTKRKQKLe]r¼(K!K!K"KK#K"K(K/KAKgK€K‡K–KœK§K©K¬K­K«K§K K›K‘K}KuK[K7K)K$K/K>KOKcKyK†K•K™K£KªKªK¯K±K¯K¯KªK¬KªK¬K«K«K§K©KªK©K¬K«KªK¢K¡K›K˜K™KŒKŒKƒKzKKwKGK;K-K/K9K0K-K-K1K,K0K0K+K-K+K.K4K6K4K1K.K.K9KTK_K^K>K6K2K/K(K,K;K9KdK[KMKZK[KTK8K+K8K,K&K)K4KMKRK2K@KCK6K'K*K4K0K*K%K.K0K3K>KIK1K-K4K'K.KOK^KXKAK5K/K/K/K8K6K7KBKaKxK{KŒKKK€KKrK†K•K}K{KkKnKVKRKfKgKoKK™KŽKyK’K©K±K¶K¥KKyKrKˆKrK?K5K=KRKOK9K2K1K4KBK0K4K@KK]KLKMK[KZKKK.K&K8K1K4K-KK:KRK[KSK=KCK1K+K,K'K4K;KAKYKxK€KŽKKKyK|KpK‰KŽKxK†KcKmKhKFK>K2K7KPKgKrKYKjKKK£K—KŠK•K’KŸKˆKrKlKwKvKhKSK@K9KFKHK.K7KBKHK\K[KJK9K;K3K2K5K8K8K7K/K1K3K/K/K6KHKIKIKJKRK]KiKrK~KK{KzKxK~K}K}KKxKwK_KFK7K+K;KTK`KbKeKlK{K{K}K|K~K‚K…K„K…K„K„K…K‰KˆKK‰KKKKK‹KŒK‰KŒKˆK‰KK†K„K‚K‡K…K†K‡KŒK‰K‰KŠKŒKK‹KKKKKŒKKK“K“K”K—K–K•KšK›K—KœKžKKœK¡K KžK£K¡KŸK¤K¤K¤K¦KªK«K§K§K«K°KµK³K¸K¹K¸K¸K¾K¾K¼KÀKÁKÀKÅKÇKÉKÇKÊKËKÎKËKÎKÍKÏKÏKÏKÑKÓKÓKÙKÓKÕKÕKÖK×KÖK×KØKÙKÚKÛKÛKÚKÖK¶KWKKKK$K]KKƒK‰K‹KŒK•KŒK’K—K“KžK™K’KŽKŽKŽK‘KKŽKK†KKKŠKK‹KŒK‹KˆKˆKŒKKŒK‰KˆKŠKŒK‹K‰KˆKˆK‹K†K‰K„KƒK€K€K~K~K~K|K|K}K~K€KƒKŒKK¶KÉKÎKÔK×KØK×KÓKÑKÎKËKËKÍKÒKÓKÔKÙKÚKÙKÝKÖKÖKÕKÎKÉKÄKµK§K”KˆK~KzKwKuK{KqKhKgKhK_K_K_KcKdKcKiKlKiKfKnKnKsKkKnKfKaKcK\K_K[K[K\KZK^KgKlKhKgKbKiKlKdKbKcKiKbKTKQKUK\KXKQKQe]r¾(KKKKK%K"K$K4K@K[KmK‰K”K K©K¨K¬K­K«K¦K£K›K‘K…KuK]K4K%K"K,K8KKK`KrK„K“K›K¢K¨K­K®K±K²K´K®K®K¬K®K«K¬K«K«K¯K±K°K®K©K§K£KŸKšK”KŠK†KŒKˆK“KqKK;K9K-K*K.K5K:K6K2K/K0K1K/K>KHKKKNKTKXKeKnKvK{K€KyK|K|K‚K€KK|KzKxK]KKK>K1KFKUKcKmKgKrK{K{KK}KK†K†K†KˆK‡K…K€KŒK†KŒKŒKKKŒKŒKKˆK‹KK‰K‹K‰K‰K…K‚KˆK…K†K†K‰K‰KŒKŒKŠKŽK‡KKKKKKKŽK’K”K—K™K™K”K˜KšKœKœKKK–KKŸK¡K¢K K¡K¢K¥K¦K©KªK©K¨K¥KªK°KµK±K¸K·K¶K¹K¾K½K¾K¾KÂK¿KÄKÃKÆKÈKÈKÊKËKÊKÎKÎKÍKÎKÎKÐKÒKÑKÔKÔKÕKÓKÕKÙKÖKØKØKÙKÛKÙKÙKÛK×KÇKzK"K KK%K_KƒKKŽK‰KŒK—KK‘K•K“K˜KŸK’K’KKŽKKKKKŽK‘KK’K‹KŽKŠK‹KŒKˆKŒKKŠKKK‹KŠKKK‹KŠK†KŠK„KK‚K|KKxK}K|K{K€K€KK‡KK‘K£K»KËKÐKÓKÖKÖKÕKÑKÎKÍKÌKËKÎKÏKÒKÖKØKÚKØKÚK×KÕKÕKÐKÉK½K¬KœKŒK‚KzKwKrKwKyKsKjKjKkKfKaKfKcKbK]KcKeKhKlKpKlKmKnKmKaKZK]KYK\K[KYK_KbK^KmKlKhKhKiKcKgKbK^K]KeK_KTKTKWK]KYK[KXe]r¿(KKKKKK#K%K/KJKYKqKƒK™KŸK¨K©K­K®K®K©K¤KœK“K‡KuKVK0K%K&K*K;KKKcKxK„K”K›K¡K©K«K±K²K±K±K°K´K¯K¯K²K¯K°K®K´K³K±K°K©K©KŸK K˜K“KK‘KšK‡KKkK6KFKOK,K*K(K*K1K-K)K9K-K-K.K+K,K6K9K7K-K,K1K1KIK0K:K/K1K5K:K5K5K+K4K]KWKVKOKFK?K7K5K,K.K>KBKLK8K3K0K7KCK=K+K-K7KKEK,K+K7K4K,K(K-K;K,K.K-K4K.K4K3K1K,K,K*K0K1K2K4K3K5K>K6K7K.K1K4KNKXKTKOKGKAK5K@K7K)KAKAK4K4K'K/K.KDK?K.K7K8K1K-K+K#K&K(K7KAK;K1K(K9KUKoKiK9K%K5KIK7K K'K8K5KAKSKbK}K‰KuKUKqKŒK’KnK\KkK’KˆKdKbKtKPK*K!K$K*KFK|K2K$K-K.K*KMKOK(K!K"K-KLKJKZKIKDKHKyK¨K§K~KmKmKhKFKK=K/K3K1K6K?KCK4K2K/K,K2KCKEK6K+K+K(K/K*K.K#K6K8KEKKSK-K+K5KAKEK[K_KtK‡KrKMKeKKKqK]KXKyK’KƒK]KKvK8K%K+K.K;K{KBK#K-K*K3KWK/K!K K%K/KDKCKBK5K5K:KOKeKXK>KIKeKƒKiKUKNK=K4K3K5K8K8K=K3K)K5K5K3KGKDKKKUKVKWKdKjKuK|K|KzK€K|KKƒKK€K~K~KdKOKGK+K;KMKTKgK^KlKxKƒK„K…KKƒK†K…K…KˆKˆKˆKŠKŒKŒKŒKKŒKŒKŽKŒKŠKŠKŠKŠKŠK‹KŽKˆKˆK‚K„K…K‡K†KˆK‰KˆKŒK‡K‹KK‹KŒKKKŽKKK‘K’K‘K‘K–K˜K•KšK”K–K˜KœKŸKŸKœKžKKKK K¢K¨K£K¤K§KªK§K®K®K°K®K±K²KµK³K¹KºK½KÂK¿KÃKÀKÃKÆKÅKÆKÈKËKÊKÍKÎKÏKÏKÎKÎKÏKÒKÒKÓKÔKÓKÕKÖKÖKÖKÖK×KÙKØK×KØKØKÖK¾K_KKK5KuK„K…KKK‰K’KŒK’K—K”K“K”K KŽKKŽK“K‘KK‘KKŽKŒKŒKKˆKŠKŠKKŒK‰K‰K‰K‹KK‰KŒK‰KŽK‰KŒK†K†K‡KŠK‡K€K€K|K€K}K‚KK}K€K†KKŽK§K¾KÅKËKÑKÕKÙK×KÑKÎKËKÈKÉKÎKÑKÓKØKÚKÜKÛKÖKÔKÑKÎKÇK¼K­KšKŽK€KxKvKuKpKwKxKjKkKgKfKfKcK\K[K`KhKbKfKjKfKoKfKdKdKbK]KZKUKUKVKWK^KfKdKiKiKhKhKiKkK_K[K\K]KbK\KXK\KWK_K]KWK`K[e]rÂ(KKK!K!K%K K#K0K.KNKcK~KKšK K¥K®K­K®K¬KªK¡K™KŠKqKWKDK'K'K+KGKGK_KuKˆKK˜KŸK¥K­K¬K­K³K°K¯K³K³K°K³K±K³K´K³K´K²K²K¯K¨K¦K¤KK’K‹KK›K{KkKUK4K5K/K)K-K3K4K9K3K+K-K.K.K-K.K-K2K.K/K3K-K-K8K2K0K9KAK=K5K0K1K+K:KAKGKLKGKAK9K:K4K,K5K7KK?K5K0K-K1K1K8KCKPKBKGK9K:K2K3KKlK]KAKKKPK[KaKgKzKxKzKxKsKaKvK‚KvKiKqKƒKKK…K{KCK)K0KKGK}KiKAK?K8K.K/K.K3K-K1K2K9KCKGKKKSKXK\K\KkKqKyK{KzK€K€K~K€K‚K€K‚K€KyKaKRK5K0KPKWKeKfKfKvKK†KƒK…K„K„KƒKˆK…KK‰K‹K…KŠKŠK’KŽKŠK‡KKŠKŠKKŽKŽKŠK‹KŠK…K„K„K‰K…K‰KˆK†KŒKŠK‰KKŒKŠKKŒKˆKŒKŽKŽKKK’KŽK“K•K”K“K–K”K•K›KKK™K K›KŸKŸK¡K¤K¢K£K§KªK¦K¤KªK°K¬K«KªK®K±K¶K¶K¹K¼K»KÁK¿K¾K¿KÇKÆKÅKÇKÇKÉKÈKËKËKÐKÒKÐKÐKÏKÐKÐKÑKÓKÕK×KÒKÖKÔKÖKÖKÖKÔKØKÛKÚKÖKÎKœK)K!KK]K]K4K&K-KVKlKVKGKNKaKrKaKkKuKtKyK{KdKjKiK`KrKuKuKzK“KKKkK1K+K1KjKvK)K*K0K:K)K'K'K/K;KDK>K=K?K;K.K4KLK=K.KKKzKˆK“KK‘K—K‘K–KšK”K•KKKK“KKK’KK’K‹K”K•K‘K‹K‹K‘KKŽKŽKŒK‹K‰KKKKŠK‰K‡K†K‰K…K‡K‚K„K€K|K€K‚K„K†K‡K‚K€K„KK‚KŠK–KªKµK¿KÊKÌKÍKÊKÌKËKÉKÇKÉKÏKÒKÖKÛKÜKÛKÖKÕKÓKÌKÆK¿K±K¡KKzKuKvKuKxKqKpKmKlKhKpKhKgKaKZKbK_K^KcKbKiKjK_KZK]KTKLKRKXKWKfKcKeKjKiKhKcKcKcKiK_K]K]K`KeK_K]K`K[K[K]KYK\K]K`K`e]rÅ(K&K&K%K)K"K!K&K'K2KCK`KuKˆKœK™KžK©K«K«K©K¥KŸK›K‰KzK^KDK/K-K4KAKMK`KwK‚K’K—KK§KªK¬K­K°K±K¯K±K°K²K±K²K±K³K´K±K·K°K±K«K©K¢KšKŠK’K¡K£KKqKiK6K;K7K;K3K'K4K4K+K+K,K.K)K(K)K4K9K2K.K+K3K.K:KEKEKDK=KBK1K(K0K6K0K4K@KGK=KK?KCK@K-K+K)K1K6K1K1K3K3KKKZK6K)K'K*K+K-K3K>KBK.K#K)K>K^K[K>K6KUKlKTKKUKhKfKSKOKeKNKAKWKfKŠKƒKgKnKjK]KKKZKrK}K‰K•KŒKŒKWK.K6KXKsK+K,K6K/K*K'K0K.KK;K?K6K8K+K0K;K>K6KBK5K4K2K(K,K-K-K3K9KMKOKSKVKUKdKiKjKqKxK|K~KzK}KƒK€K…K€K|KxKmKSKHK4K8KVK[KgKnKlK}K…K„KK†K‚K…KˆKŠKK†K…KŒKŠK‹KŠKŠKŽKKKŽK“KŽKK‹KŽKKKŒK‡K‰K…K‡KŠKˆK…K‡K‰KˆKK‰KŽKŠKŠK‰KKŒKŽKKKKK“KK–K–K”K–K–K™K›KšKœK›K™K›K¡K£K¢K£K£K¤K§K§K¤KªK¨K§K¥K«K¬K±K±K¶K·K·K¸K¹K¼KÀKÂKÁKÃKÂKÇKÇKÆKÊKÉKÊKÊKËKÎKÍKÍKÍKÑKÐKÐKÓKÑKÑK×KÔKÖKØKÖKÕK×K×K×K×K×KÕKÀKgK#KEK{K}K‰KŽKK‘K”K‡KœK“K—K”K’K’K•K’KKKKKKŽKKŽK’KŒKŽKKŽKŒKK‰KKŒKŽKŽKK‹KŠKK‹K‰KƒKˆK†K€KKK„K‡K€K„K‡K…KƒKŠK„KˆK‡KŽKžK¬KºKÀKÆKÈKÆKÈKÅKÈKÆKÉKÐKÓKØKÚKÛKÚKÙKÕKÑKÍKÉKºK¬K™K‰K}KwKrKtKrKqKoKsKpKfKmKkKbKaKaK\KYKcKeKeKjKeK]KVKMKIKKKNKUK^KcKgKiKjKhKlKfKcKdKcK^KZKUKbKcK[K^KaK_KdK]KXK]KeKbKce]rÆ(K!K!K#K$K"KK"K,K2KAK^KsKˆK•K˜K¢K¨K«K«KªK¢K¢K˜KŠKzKbKJK6K/K4KKPK]KdKeKpKyKƒKƒK…KƒK‚K†K‰K‹KŒK‹K‹KŽK‹KŒK‹K‹KŠK‹KŽK‹KKKKŽKŠKŠK‹KK‹K†KˆKŠK‹K‡KƒKˆKˆKˆKKŒK‹KˆKˆKŠKŽK‡KŒKŒK‹KŽK•K—K’K‘K•K”K‘K•K•K•K›KœKœKKžKžKKK¡K§K¡K¥K¨K¦K§K©K§K¦K¨K¬K­K²K¶K¶KµK¹K¹K½K¿KÁKÃKÃKÁKÄKÇKÅKÊKÇKÈKÉKËKÌKÍKÍKÎKÐKÎKÐKÕKÐKÓKÔKÖKÖKÖKÔKÖKÖK×K×K×KÖKÕKÎK“K0KNKyKzKK’K‹KK”KŒK“K–K”K–K‘KKKŽK‹KKKŽKKKŒKŠK‘KKKKKKŒKKKŽKŽKŒK‹KŒKK‹KK‹K‰KŠKˆK„K„K‚KKƒK„K†KK†K‡K†K†K‡K†K‰K’KK¨K¹K¾K½K½KÀKÀKÀKÄKÉKÍKÓK×KÛKÚKÙKÖKÒKÏKÍKÇK½K§K˜K…K}KtKsKqKoKpKrKmKkKeKiKcK\K`K^KYKZK`KeKgKfK`KYKPKFKBKLKPK\KdKhKhKhKiKgKdKfKaKeK\K[KWK\K]K^K^KXK^K`K\KdK]K_KdK_K[e]rÇ(K%K%K"K K&K&K"K&K4KKIK]KyKƒKK˜KžK¡K¨K®K°K±K²K°K±K®K¯K²K³K³K¯K°K²KµK«K¬K¨K¢KžK™KK’K KžKKˆKyK7K1KGKFK/K'K=K3K*K*K.K%K%K'K)K9K5K2K4K7KUKcKeKVK;K8K6K/K,K-K-K,K4K>KDKK5K&K,KVKcKFK@K6KDKMKfKbKAK'K%KK6KIK:K5K6K1K4K1K1K2KAK2K/K'K6K,K/K4KKMKYK`KdKoKxKK‚K€K„K‰K„K‹KŒKŒKŽKŠKKŽK‹KŽK‹K“KŽKK‹KK‰K‹KŠKŠKŒKŠKˆKŒK‹KŠK‰K‹K‡K†KŠKŠKŒK„K„K‰KŠK‡K‹KŠK‹KŒKŽKŽKKK’KK’K“K•K–K™K–K™K›K˜KœK›KšKœKKœKKŸKžKŸK¦K¡K£K¤K¡K§K©K¬K©K«K«K®K´K±K¸KºK»K¸KºKÁKÁK¿KÁKÃKÉKÆKÉKÆKÊKÌKËKÌKÐKÐKÎKÍKÏKÓKÒKÓKÔKÕKÖKÕKÔKÔKÕKÖKÖKÖKÖKÕKÔKÂKkK\KyK„KK‘KŒKK†K|K…KŠK‹KŽKKKŽKŒK‘K‘KKK‘K–KK“K•KK“K‘KKŽKŽK‘KŒKŒK‘KŽKKŽKŽKKˆK‰K‰K†K‡KƒK~K…K‚K‡K†K‡K†KˆK‰K‹KKŠK‰KŒKŽKŠKK˜K K¥K©K¬K©K²KÁKÉKÑKÕKÖKßKØKØKÕKÓKÎKÇKÀK´K¢KKKzKoKoKiKlKkKhKhKmKaKcKaKXK[K]K_KeKiKbKbK_K]KZKPKNKFKRKXKbKfKnKpKjKfKbKdKeKcKZKZKVKXKaKZK^K`K^KeKYK\KaKdKgK_KYKUe]rÉ(K,K,K,K/K(K%K#K(K+K6KQKhK…KKœK¡K§K¬K«KªK£K£K—KK„KmKXKAK6K5KCKOK]KuK„KK‘KœK¨K©K­K±K¯K°K­K³K²K¯K±K°K¯K²K²K²K´K²K«KªK¦KŸKžKKK™K¥KŸK…K_KAK9KeKUK4K,K7K$K%K%K)K-K#K(K;K6K@KUK^K]KOK9K.K2K'K*K.K+K.K(K+K6K>KBKAK=KK:KK?K6K3K.K9K5K0K0K3K3K7K,K.K,K-K2K4K@KKKUKVKUKVKeKmKsKvKtKyKzK€K}K~K€KzKK{KvKmKWK:K0KAKSKaKaKgKmKzKƒK…K‚K‹KˆK‰KŒKŠK‹KKŽKŠK‰K‡K‹KŒKŽK“K”KK‰KŒKŠK‹KŠK…K‰KK‡K‰K‰KŠKŠK‡KˆK‰KKˆKŠK‰K‡K†KˆK‡K‡K‹KŒKŽK‹KKKKK’K—K’K“K›K•K•K•K–K™KšK”KžKœKžKžKŸKœK¡KŸK¢K¤K¦K¥K©K¦KªK®K¬K®K­K°K°KµK¹KºK¸K¼K¾KÃK¿KÀKÅKÄKÅKÇKÉKÇKÉKÎKÎKÎKÐKÏKÑKÑKÔKÒKÒKÕKÓKÔKÖKÔKÔKÕKÖK×KÖKÖKÖKÒKÉK‰KfKxKˆK’K‘KŒKŠK€KtK{KƒK„K†K†K†K‡KˆK‘KŽKŒKŽK’K‘K“K”K”K‘KKK”KKKŽKKKKKŽKKKK‡KˆKŠK‡K‰K…KƒKˆK†K„KŒKŒK‹KŽKŒKK‡KŠKŒKK‹KŠKŽKŠK’K™KKžK¢K±KÃKÌKÑKÕK×KÝKÚKÕKÔKÑKÍKÊKÂK°KŸKŒK€KwKqKqKqKpKkKfKeKgK_K^KZKTK^K`KbKhKdK_K^K^KUKMKKKKKTKSK]KcKfKoKnKjKhKgKaK`KbKYK]KXKVK[KXK[KeK]KdK`K_K^KaK_K[KQKKe]rÊ(K*K*K&K K$KKK$K'K1KNKhK|KKšKžK¤K¨K«K¬K¦K¥KšKK†KsK^KBK:K:KDKIK\KqK†KŒK—KK¥K¨K¬K®K°K³K¯K°K¯K¯K°K±K­K²K³K°K³K±K«K¨K¢KŸK™KŽKK•K£K¤K†KbK?K=K`KWK8K5K2K&K(K'K*K*K+K?KNKNKZK\KOK>K@K1K/K.K0K*K,K)K)K+K+K2KBKIK5K8KHKK:KDK=KDKEK>K:K;K0K.K0K@KIK:KHK9K(K*K*K4KLKnKhK8K)K+K-K3K8K0KCK^KhK_K=KCK*K*K1K?K:K9KQKaKmKtK~K’KjKRKEKK7K0K4K4K.K3K2K.K:K+K/K.K2K/K.K1K/K8K=KCKHKPK^KaKjKmKrKtKwKwKyK{K‚K~K{K}KzK{K|KoKVKDK5K7KNK_K_KbKrK{K€K„K‚K†K…KˆK‰KŠK‰KŠK‹KŽKŽK’KKŽK‹KKK’K“KKKK‹K‹KK‹K‹KKŒK‹KŠK‰K†KˆKˆKˆKŠK†K‡KŠK‹KˆK‡KŠKKK“KKKK‘KK‘K“K“K“K–K‘K“K—K˜K˜K˜K›K›K—K›KšK›KK›K›K K¡KžK¡K£K§K¤K­K±K¯K¯K¯K¶KµK·K¸K¶K¼K¿K¿KÁKÃKÃKÅKÇKÅKÅKÉKÉKÊKËKÏKÐKÐKÒKÑKÔKÓKÔKÕKÓKÓKÔKØKÕKÒKÔK×KÖKÔK×K×KÔK¿K~KzK‘K’K’KƒKKiKkKmKsKzKqKxKxK{K|KKƒK„K~K†KˆKKKKK‘K‘K“K•KKŽKK’KŽK‹KŽK‘KKKŽK‹K‹KKKŒKƒKŠK‹KˆKŒKK‘KŽK•K’K’K”KKKŠK‰K‰KƒKKƒKK’K£K³KÅKÎKÒKÛKÝKÜKÚKÙKÕKÐKËKÁK·K¥K‘K†KvKsKrKrKnKkKgK^KbKfKaK]KYKXKZK`KfK`K`K_KVKOKTKKKJKRKXKXKgKiKfKgKlKeKdK_K_K^K[KYKWKTK^KVK]K\K`K^K_K]KaKcKfKZKTKKKEe]rÌ(K(K(K%K"K!K!K-K)K*K3KFK[KrKK˜KœK¢K§K¢K¢K¥KŸK™KK‚KqKdKDK2K;K?KNKXKpK‚KK”KœK£K¦K­K­K±K°K®K¯K­K¯K°K²K¯KµK±K²K´K°K«K­K¤K¢K›K“K†K‹K•K®K‘KkK:KNK]KaK2K7K7K*K*K1K+K1K2K8K=K7KKUKoKWK5K,K+K'K+K7K4KGKjKkKbKAKCKOK*K"K1K/K-K7K8KDKbKK•KƒKnKRK9K,K0K1K?KEKHK„K{KœK³K«K´K£KjK)K,K3K2K4K;K=K.K1K+K4K@K6K9KAK4K5K0K3K7K0K-K.K+K4K-K0K*K1K,K8K@KKKPKPKUKdKpKmKnKpKvKtK{KzKK|KzK{K|K{KyKiKRK7K/KCKTKbKXKkKtK~K‚KƒKˆKŠK„K‰K†KKK‰K‹K‹KŒKŒKŽKŽKŒK‹KK•K—K‘K‘KK‰KK‰K‰KKŽKŒKŒKK‹KˆK‹K‡KˆK†K‹K‰K‹KKŒKŠK‡K‹KKŽKKŒKKKK“K“K”K–K•K–K”K•K–K—KšK™K›K™KžKKžKšKžKŸKžK¡KŸK K¤K¦K¤K­K­K®K­K®K²K²K¶KµKºK´K¼K½KÁKÁKÀKÅKÄKÃKÇKÉKÉKÊKÌKÌKÏKÑKÒKÒKÓKÑKÒKÔKÓKÔKÔKÖKÕKÕKÕKÖKÖKÔKÖKØKØKÊK—KzKK•K‹K{KvKaKbKgKlKrKhKoKpKuKtKwK~KK~K‚K†K†K‰KŒKŽKKKK’KK‘K•KKKK“KŒKK‘K‰KKŠK‡KŽKK†KˆKŽKK”K‘K•KK—K‘K’K‘KKŽK‹KKŠK…K…K…KKK¢K¸KÇKÏKÖKÜKÝKÝKÚK×KÓKÐKÉK¾K°K£KŽK‚KxKrKmKpKlKlKjKjKeK\K`KSK^K[K]KfKdKfK`KWKTKWKQKLKOKSKZK\KdKhKiKjKbKeK_K`K\K_KXKRKXK]KWKXK]K`KaK_K\KYK_KcK[K[KTKGK?e]rÍ(K%K%K)K#K'K#K&K+K$K-KOKfKK’K˜KKŸKªK¢K¤K¢K K—KŒK†KwK[KFK:K;K=KMK]KpKK‰K“KšK¢K¦K¨K®K®K¯K¯K°K±K®K®K±K¯K°K±K³K±K°K±K§KªK¤KžK”K‡K‚KŒK¨K’KbK8KGK`KEKK4K+K2K=KKK5K9KCKAK9K7K5K4K7K?KLKPKCK9K.K-K4K9K1K5KAKEK7K.K+K)K'K&K)K+KGKhKPK1K4K6K1K+K+K6KHKkK{KxKwKoKCK6K_K=K+K=K;K3K)K)K$K>K]K›K–K^K3K/K5KBKWKYKDK\KAKWK€KŸK€K[K&K!K1K3K1K9K6K/K'K%K*K9K?K8K3K3K4K*K+K7K/K,K,K%K.K(K/K;K/K+K1KK,K@KGKKfKlKoKgKJKLKUKšK{KKK)K'K1K0K5KDKK4e]rÐ(K%K%K'K!K#K!K$K#K*K4KEKeKsK†K’K›K£KªK¯K¯KªK©K¢K—K‰KqK\KEK=K=KAKPK]KlKKˆK“K›K¢K£K°K­K¬K«K±K°K°K±K®K±K±K²K±KµK´K±K°KªK¢K¤KžK”K‹K€K|K†K‡KaKHKMKaKIK=KDK,K&K(K-K.K,K3K2K3K5K4K-K8K0K/K0K+K2K,K0K0K3K2K=KHK9K1KEKJK=K4K0K.K5K9KGKPKRKHK;K.K,K*K1K*K)K/K8K7K2K,K,K)K+K&K4KPKYK0K'K1KAKAK6K/K7K=KOKyK}K{K}KŠKIK2KEKLK@K,K'K K(K/K7KK=KCKFKFKOKUK_KiKpKoKsKwKsKsKuKyKK~KK{K{KyKqKVKDK6K2K=KQK[KaKnK„KŠK‡K„K„KˆK…K†K†K„K‰KŒK‡KKKŒK‰KKKKKK‘K‘K‹K”KK’KKK‹KK‹KŽKK‰K‹K‰KŽK‹K†KŽK‰KŒK‡KˆK‡KŠK‰K‡K‹KŠKŠKŒKŒK•KK‘K‘K•K‘K•KœK“K•K—K›K›K™K›K™K›K˜K™KœKŸK¢K¤K¡K K¤K£K¥K¦K§K¨KªK¬K©K¬KªK´KµK¶KµK¼K½K»K¾K¿K¾KÄKÄKÄKÄKÈKËKÊKÌKÎKÐKÑKÒKÓKÒKÒKÓK×K×KÖKÕKÙKÕKÖKØKÖK×KÖKÙKÜKÙKÓK®K€KuKXKRKPKNKGKOKNKbKLKLKRKWKUKVKaK_KbKcKfKgKjKmKpKpKsK{KwKyKzK}KK†K„K„K…K‰K‡KˆKŠKˆK‹KŒKK‘K’K—K‘K“K›K˜KšKšK‘K‘K‹KŠK‰K„K†K…K†K‹KK—K¥K¸KÆKÏKÕKÙKÛKÝKÛKÕKÐKÌKÅK¼K­K—KŠK~KuKpKkKgKfK[KVKMKOKMKNKVKYK_KfKeKbKZKSKNKGKEKKKSKOK[KaKnKfKeKaKdK_K[KbKcK^K[K]KYK_K[KaKhKfKiKcKZK^K^KUKWKRKLK@K=KAK5e]rÑ(K#K#K$KK!K(K)K#K(K.KEK\KoK„K”KK§K®K¯K®K°K¨K¢KžK‹KyKbKCK>KBKBKJKWKlKK‹K”KšK¡K¥K«K«K®K«K­K°K®K¯K±K²K±K³K³K²K³K³KªK«K¦K£K K—KŽKK|K‚KwKaKSKNK_KQKJK>K,K$K(K*K)K/K3K:K;KAK6K/K1K6K0K:K+K.K0K,K;K6KKJKOK>K5KKIKMKEKCKPKQKPKQKZKYK]KYKUK`KaKeKbKhKnKoKsKvKuKqKuKKKK|KK‚K~K„K‡K‰KKŠK”KK—K›KœKKšKŸK˜K‘KŽKK…KK}KKKƒKˆK’K›K¯KÁKËKÓKÕKÜKÞKÝKÚKÔKÐKÉKÀK²K¦KKƒKyKuKmKeKeKYKZKWKMKKKXKQK\K]K`KcK_K^KWKKKIKGKJKMKUKRKcKkKkKiKdK^K`K_KeKbK^K]K_K\KVK\K^KhKjKiKiKaK_K^K]KSK\KOKNKDKK/K-K3K4KSKFK*KHKuKzKoKjK\KdK‡K„KˆKœKuKCK:K1K6KIKcKLK+K1KFK4K7K?KKCK3K2K2K0K.K/K/K,K8K/K/K,K5KAKLK7K8K3K-KCKEK>K4K+K2K7K=KKWKWK_KnKyKK†K…K†KK„K‹KŠK„K†K‚K‡KŠKˆK“K‰KŽKŒKKKKŽKK“KŽKŽK‹KŒKK‹K‘KKŽKK‰KˆKŠKK‰KˆK‡KŒKˆK‹KŒKˆK‹K…KŠK…KˆKŠKKˆKKŠKKKKK‹K“KK’KK’K”K—K’K•K—K˜KšK˜K›KœKœKKžKŸKŸK K£K¥K¥K©K¬K¨K¨K¯K¬K®K­K­K°KµK»K»K»K¹K¾K¿K¿KÂKÃKÆKÇKÈKÊKÊKÍKÎKÏKÐKÏKÑKÐKÔKÓKÕKÔKÔKÕKÖKÕKÖKÖKØKØKÙKÛKÛKÚKÕK¥K^KIKQKTKXKGKXK_KTKIKRKPKQKNKLKOKHKQKJKIKLKRKQKVKSKSKRKRKOKXKUKXK]K\KbK_KfKcKdKeKoKnK{KzK†K‰KŒK‘K”KžKžK KœK—K‘KŽK‘K‘K•K–KK‹KK”K¦K½KÊKÒKÙKÚKÜKÝKØKÔKÎKÄK¹K§KšKŠKKwKkK_K\KUKUKRKIKKKGKLKSKYKbKcK`K^KZKSKHKHKEKQKYKaKgKcKhKmKfK]K]K_K\KaK_K]K\K[KVK_K_KeKcKiKeKeK`K\KdK]KWKKKKKFK;K7K2K6KK@KGK6K6K9K9K4K6K-K.K/K,K1K+K8K9K9K1K1K%K1K0KGKtK/K"K$K0K_KtK[KPK]KjK„KpKwK¬KžKpKUKPK8K0K/KAKLKCKZKVK`K[KRK^KbK—KuK,K)K5K3K;K7K7K7K5K7K3K.K+K*K%K(K1KAK/K(K(K,K/K-K.K+K-K+K.K+K1K)K;K:K:K?KHKTKOKSK[KdKsKpKwKsKyK{K{KsKwKvKxKvKxKtKsK]K9K-K-K0KEKYKcKfKuK~K‚K†KˆKˆKƒK…KŠK‰KˆK„K†KŠK‰K‡K‰KŠKK‹K‹KŽKKŒKK‘KK‹K‰KK‰KŠKŽK‘KKˆKŒKŠKŠK’KKŠK‡K‡KŠK‹K‹K…K‡K†K‰KˆKˆK‰KˆK†K‹KK‘KKŽKŽKŽK’K‘K’K’K“K‘K•K”K’K™K•KšK•K›K™KšKŸK¢KK K K¡K¥K¦K§K¨K§KªKªKªK¬K«K­K²K¶K¸KºK¶K½K¾K¿K½KÅKÄKÅKÉKËKÉKÉKÌKÍKÏKÎKÏKÑKÑKÒKÒKÓKÓKÕKÖKÖKÖKÖKØKÙKØKÛKÙKÙKÛK×K¸KbKFKRKTKZKTKYK_KSKSKQKSKRKNKQKSKJKRKSKMKJKIKKKFKMKHKHKNKLKNKPKQKQKSKVKNK^K^KZKaKdKlKmKpK}KK†KK•K™K›K›KšKKŽK–K¢K K›KšK•K–K“KšK­KÀKËKÓKÙKÝKÝKÜKÖKÓKÊK¿K³KœKKƒKxKkKaK_KUKOKKKHKEKJKPKTKVK^K`KZKWKWKQKOKKKKKPKTK^KdKhKkKgKcKdK`K_KZK_KaK]KYK[KZK\KgK\KfKiKoKiK_KgKbKaKZKWKOKEKJKBK:K-K5K4e]rÖ(K'K'K%K%K"KKKK"K'K/KFKoKŠK—KŸKªK«K°K°K±K¯KªK K“K‹K~KlKCKK1K3K-K)K&K/K0K5K1K)K-K+K+K6K,K.K)K,K5K)K+K.K/K:K9K>KJKOKNKWK_KkKqKwKzKxKxK{KvKtKtKzKwKwKuKqKcKCK/K*K&K2KEK^KiKkKxKK‚K…K…K‡K†K‡K†K†K„K†K‹KˆK…KŒK‰KŒK“K‰K‹KŒKKŠK‘K”KŒKŠK‰KŒK‹KKŽKŽKKKŽKŠKŠK’KKŒK‰KŠK‹K‡KK‡K‡K…KƒK„K‰K‡K…KˆKŠKŒKŽKK‰KŒK’K’KŠKK‘K‘K“K’K•K•K‘K™K˜K˜K˜K–K™KžKžK¢KžK¡K¢K¤K¤K¥KªK©K¨KªK¨K¬K¨K­K±K´KµKºK¶K»K¼K¾K¾KÂKÂKÆKÇKÊKÊKËKÊKÍKÎKÏKÐKÏKÑKÒKÔKÕKÔKÓKÓKÕK×KÕK×K×KÙKÛKÙKÛKÛKÙKÆKyKMKJKTKVKQKaK`KVKZK^KYKTKWKRKSKMKWKLKNKDKHK@KKKGKJKAK?KDKFKJKHKLKGKKKHKQKPKKKJKSKWKaKeKuKwK~KˆKŒKŒKKK–K“K›K¡K©K¥K¡KšK—K–KšK£K¶KÆKÏKÔKÛKÞKÜKÚKÔKÐKÅKºK¦K™KŒK~KpKbKZKPKLKGK;K>K=KFKIKPKUKXK\KZKSKVKOKJKLKLKQK\KbKcKlKnK^KVK]K[K^K`K]K_KdK^KVK[KYK[KaKeKgKiKdKXK^KbK^K\KQKPKOK?K7K:K3K3K5e]r×(K'K'K&K"K!K!KK"K!K)K4KGKgKK–KŸK¨K¬K°K°K²K¯K¨K£KšKK|KlKEK5K=KKKSKkKuK†KK–KK¤K¤KªK­K¬K­K±K±K°K²K±K¯K°K²K´K´K´K¯K°K©K£KœK›K“KK^KRKqK‚KeKSKYK]K_K9K&K$K)K2K;K4K)K-K1K/K.K.K)K-K*K-K0KHKHKHK?K6K0K/K4K4KLKKK9K/K.K2K3K3K9KFKAKKCKEK?KDK=K?K@KDK?KAKCK?KBKBKRKVKXK\KkKwKyK{K€K‡KK“KKªK¯K°KªK§KšK•K“KŸK¬K¼KÈKÒKÕKÛKÚKÚKÙKÓKÊKÀK°KžK“K…KsKiK^KSKNKJKAK:K>KKAKEKCK=K3K:K4K(K)K/K7K=K*K8K4K7K5K2K0K$K$K+K#K,K4K.K)K;KaKgKZKfK{K~KqK}KKKK’KhKLKqK€KWKjKWKfK}KsK]KHK1K'K$K/K+K-K.K1K7K4K;K4K,K.K(K-K-K2K1K*K)K)K&K&K-K*K,K'K)K,K)K)K/K3K7K;K;KFKXKXKWK`KhKpKuKwKrKrKuKxKvKvKwKyKxKuKlKOK5K'K$K/K4KMKWKfKnKyK‚K‚KˆKKˆK‰KŠK‰K†KŠK†KˆKˆK‹KŠKŒK‰KK‹KŒKŽKŽKŒKKKŽK‰KKKŽK’KKKŒK“KŠK‰KKŒK‹KŒK‹K‹K‹KK‰K‹KˆKˆK‰K‰K†K‰KŠK…KKŒKŠKŽK‹K‹K‹KK“K“KK‘K‘KK“K’K–K“K–K—K’K˜K—K™K›K™K›K¢KšKŸK¢K£K£K¥K¥K¨K¨K¦K«KªK±K°K²K´K·K¸K½K»K¼K¼KÀKÁKÂKÇKÉKÈKÄKÉKËKÑKÐKÐKÍKÑKÑKÐKÐKÔKÒKÔKÖKÔKÕKÕKÖKØKÙKÚK×KÚKÛKÔK¬KXKEKNKZKYKdK]K`KbK^KaKWK]K^K`KWKWKRKXKLKUKLKNKDKKKDK?KCK?K;K>K4K:K8K6K2K4K0K5K4KK8K;KAKWKPKQKYKYKLKJKFKJKQKMKUK\KXKlKmKoKiKfK`K[K]K_KZK\KXK[K`KXK[K\KYK]KeKfKeKjKfKaKcK]K[KYKOKEK?K?K9K9K7K4K5e]rÙ(K+K+K*K)K*K&K,K-K)K'K1KGKdK†K—K¢K¨K¬K³K´K¶K®K«K¤KžK‘K…KoKHK=K9KKK\KcKzK…KKšK¡K£K¥KªK©K¯K²K²K°K®K±K¯K­K¯K´K´K³KµK³K¯K­K§KŸKKKdKBK_KvKvK^KSKTKQKVK7K'K-K.K8K8K(K'K&K'K-K,K%K*K+K0K;KNKTK5K2K;K6K/K+K/K1KIKFK6K1K)K1K6KK;K?K9K7KK+K-K:K>K>K:K9K,K)K/K5K;K8K-K$K,KGKlKjK^KoKK‰KK{KsK‡K”KŒKiKsK‹K_KfK^KOKNKcKpK]KIK/K(K.K'K+K/K0K3K6K=K3K,K.K,K)K+K6K1K.K*K'K&K)K'K)K'K'K)K+K'K,K,K2KKK6K0K.K0KAK@K6K-K+K+K0KDKPKFK?K3K:K?K6K9K8K?K8K4K=K:K4K2K2K3K-K'K)K/K8K?K=K4K*K,K2K8K/K/K$K(K,KLKcKmKmKqK‡K™K“KxKdKvK’KˆKkKnKrKPKXK_K9KCKnKpKVKDK*K'K(K'K0K/K7K;K1K.K*K7K0K-K3K.K.K)K+K*K.K/K+K(K"K)K*K-K+K*K,K3K8KBKDKPKUK[K^KiKlKqKrKrKwKnKwKwKsKxKxKsKqKhKGK2K+K%K+K7KOK\KfKrK|KƒK„K‚K„K†KˆKˆKK…K‡K‡K‰KˆKŠKŒKŒK‰KŠKŽK‹KŒKŒK‰KˆKŒKKKŽK‰KKKŽK‹KK‘KŽK‹KŽKŽKKŽKŽKˆKKŒKŽK‡K‹KŒKŠKˆK‰K‡KˆK…KˆKŒK‹KŠKŒK‰K‡K‹K‹KKKKKK”K‘K“KšK˜K—K™K™KšKšKœKšKšK›KžKžK¡KœK¢K K¤K¤K¢K¤K©K«K¬K­K±K°K¯K³K¸K¶KºK¹K¼K¼K½KÄKÃKÆKÅKÈKÇKÌKËKÍKÍKÎKÎKÐKÐKÐKÓKÑKÔKÔKÖKÔKÕKÖKØKØKØKÚKÙKØKØKËK‰KNKQKTK]K`K`KeKfKdKfKfKjK_KZKbK\KZK[KXKZKTKQKNKMKFKDK?K?K=K;K:K4K9K/K-K/K.K1K-K-K,K1K5K;KAK\KWKoKˆK¥K·KÂKÄKÂKÁK¼K³K¨K K¡K¦K¹KÄKÍKÒKÖKØKØKÖKÓKÌKÀK±K¡K‹KyKkKWKMKKKDK;K9K:K?K>KIKPKMKSKRKMKOKKKIKNKZK_KcKhKdKpKkKqKiKeK_K[K[KXKUKYKVKSKYK\K]K[KaKeKbKjKiKbKcKbK`KXKUKLKIK;K:KKK7KK=K>K;K8K0K*K;KK2K0K,K(K-K'K2K=K>K9K=K/K2K7K1K+K(K&K,K,KCK\KjKhKoK˜K¦K“KyKfKrKŠK…KeKYKaKMKbKZKKKcK^KNKKK-K)K'K*K,K5K7K2K+K(K,K-K0K3K>K;K;K:K,K.K+K2K(K&K$K*K*K*K(K(K.KK8K4K3K3K5e]rÜ(K.K.K9KFKCKGKEKCKAKBKDKTKgKyKK¢K¨K°K²K¶K·K·K²K±K­K›KƒKhKUK>KAKJKZKhKxK…K’K—KžKžK¦K¬KªK®K±K®K²K­K®KªK°K³K²K¶K¸K³K´K°K¬K§K£K™K€K\K^KXKK‘KhKGKAKK8K+K)K6KGKDKGK8K?K?K8K:K9K7K=K@K8K1K8KBK5K/K&K(K,K#K#K'K0KAKHK:K'K*K6K:K4K/K/K.K+K%KKLK^KkKtKƒKK€KƒKƒK‡KˆK‹K‡K‰KKˆKˆK‰K‰KˆK†K‰K‰KˆK‰K‰K‹KŠKŠK†KŒKKŽKŽKKK”KKŽKK“K‘KKKŽKKŽKŽK‹K‹KŒKKŒK‰K‰K‡KˆK‰K‰K†K†K‰K‹K‹KKŠK…K‹KŠKŠK‹KKKŽKK‘KK”K“K”K‘K”K˜K›KšK–KšKšKžK›KšKžK›KžK K¢K¤K¤K¤K©K¦K¬K±K²K®K±K³K·K·K¸K¸K»K½KÀKÁKÂKÅKÃKÅKÆKÈKÌKËKÍKÎKÏKÐKÒKÐKÏKÒKÓKÕK×KÕKÕKÖKØKØKØKØKÙKØKØKÔK¸KfKQKVK\K_KdKcKjKkKfKhKdKfKhKfKfKgK_K`K[KYKXKUKQKRKQKHKKKEK=K=K7K2K5K3K.K.K&K'K"K#K%K'K.K6KAKeK‹K©K¿KÅKÊKÊKÊKÃK½K«KŸK˜KžK°KÂKËKÑKÖKÖKÙKÖKÓKÌKÁK·K¢KKxK`KOKFK?K;K9K?K9KBKFKMKLKTKSKTKRKGK@KDKJKSKZKZKgKeKpKlKlKfKbK\KYKVKVKSKSKPK[KOKPK_KZK`KaKkKlKhKgKgKdK\K\KWKRKEK;K;KBK7K6K:K4K3K9e]rÝ(K;K;KMKQKNKLKLKJKDKHKFKUKgK{KK›K«K²K±K·K·K¸K´K²K«KšK†KsKZKEKK6KDK?K4K4KK9K.K1K0K*K3K$K%K%K1K&K#K*KDKsK”KµKÃKÉKÍKÍKÓKÆK¹K¢K™K•K£K·KÅKÌKÓKÖKØKØKÓKÑKÈK¾K«K”K{K]KVKKKIK5K;KK4K7K?KEKK2K2K1K/K/K*K/K)K4K3K,K'K%K+K4KK>K7K6KK>K8K6KK9KKIKOKHKLKPKUKaKhKrKsKvKqKwKsKwKuK{KzKrKgK?K-K*K)K@K.K:KPKWKeKvKzKƒK‚KƒK…K€K†KŽK…K‰K†K…K†K…KŠKˆKKKKK‡KKˆKŠK‰KŒKKKŠKK‹K‹K†KŒK’K‘K‹KKKKKKKKŒKŽKK‘K‹KŒKŠK‰KK‰KKŽK‰KŠK‹K‰K‡KŒK‰KK‹K‰K‹KŽKK‰K…KŠKŠK‹KK’K’KK”K“KK‘K‘K—K™KšKšKšKK›KK›K™KŸK¢K¢KŸK¡K§K¬K¬K©K¨K«K­K®K°KµKµK¸K¼K¾K¾K¿KÁKÂKÃKÆKÆKÈKÆKÉKÌKÍKÎKÎKÑKÏKÐKÐKÒKÐKÐKÓKÔKÔKÔK×KØKÔKØKÙKÙKÚKØKÄKvKOKTKUKXKbKhKjKmKlKfKkKkKlKlKeKhKhKfK^KaK^KTKZK]KVKUKNKJKQKIKDK=KK=K:K9K;KBe]rá(KYKYKZK`K]K\KZK[KeKbK\KhKwKzKˆK™K¥KªK²KµK·K¸K´K®K«K¤K•KKpKTKKKWKbKeKsKƒKK–KœK K¥K¨K¬K°K±K´K³K°K±K®K²K°KµK¹KºK·K´K¯K©K¦K¡K‡KrKpKaKpK}K|KcK]K[K9K,K,K'K'K6K,K2K>K2K/K3K4K0K,K,K3K3K0K1K2K0K0K-K3K3K1K3K1K1K3K0K2K2KDKAK8K+K/K>KIKPK;K?KIKKAKBK+K,K/K0K)K*K+K)K1K8K9K7K,K(K'K;KOKEK5KGK=K7KK9KK-K+K*K2K4K2K/K,K+K.K1K1K5K6K,K-K.K0K,K+K8K7K4KCK?K1K4K2K3K*K-K*K.K:K1K1K/K9K>KBKNKKGKOK>KFKJKdK€KuKŽK—K€K^KbKwKXKLKAK3K/K3K5K/K+K+K*K+K(K.K-K(K-K0K1K(K#K#K$K#K%K*K-K6K.K.K4K:KEKIKMKPKRKSKkKjKjKtKzK}KsKvKqKyKtKuKfK>K+K%K$K#K,K=KLKVKeKoK{KK}K~KK‚K…K„K„K…KˆK‡K†KŠK†K…K‹K‹KŠK‹KKŒKˆKŒKŠKŠK‰KKKKŠK“K‡KKŽKKKKKKKKKKKK“KK’KK‹K‹KKKKK‹K‰KŠK‹K‰K‰K‰K†K‹KŒKŠK‰KŒKŽKŒKŒKˆKŒKŠKŠKŽKŽK‘K“KK”K‘K’K•K˜KšK–K™K™K–K˜KKœKžKŸK¡K£K¢KŸK¤K¤K¤K£K§K§KªK¬K°K¯K³K¹K·K¶K½K»K¿KÁKÂKÁKÅKÅKÇKËKÉKËKËKÏKÐKÏKÑKÏKÐKÑKÑKÕKÔKÖK×KÕKÔKÓKÕKØKÖKØKØKÐK¢KbKKKQKYK_K_KfKkKfKnKkKkKmKmKpKkKkKhKbKbKYKdK[K_KYKUKNKPKRKPKJKHK?KFKKAKGKAK-K*K0K1K1K%K&K6K9K?KFK:K%K-K0K:K@KIKJK,K6K2K1K?KCKVK:K;KAKJKUK;K;KHKsKLKhK†KŒK{KxK{K>K4K0K*K0K1K2K/K3K3K)K.K6K5K)K+K&K2K)K(K(K#K!K'K'K1K2K5K.K6KK/K#K#K"K,K4KOKWKfKkKuK~KKK€KKƒK‚KK„K…KˆK‡K†KŒK‡K‡K‰KŠKŒK‹K†KKŽK‰KŽKŒKŒK‰K‹KKŠK‹K‹K‹KˆK‰KK‘KŒK”KKKŒKŠK‹KŽKKK“KKŒK‰K‹KŒKKKŠKKŠK‡KŒKˆKˆK†K‹KKŒKŠKKŒKŽK‘KKŒK‹K‹KŠKKŽK‘KK‘KK“K•K˜K˜K•K”K–K™K™KKšKžK K K¡K K¥K£K¦K K£K§K§K­K°K±K®K³K»K·K¹K¸K¹K½KÁKÀK¿KÃKÅKÈKËKÈKÌKÌKÎKÎKÏKÒKÏKÑKÑKÑKÐKÕKÔKÕKÖKÒKÐKÓKÖKÙKÚKØKÔK¶KkKMKSKWK_K`KhKjKnKiKnKmKkKlKhKeKhKbKjKbK^KcK^K\K^KXKRKUKXKUKMKFKGKBK>K;K7K4K4K-K0K7K8KJKcK…K¡KµKÄKÉKËKÉKÇKÆKÂKÀK¿K¾KÂKÆK¿KÁK¼K®K¥KKzKYKEK>K6K3K3K4K4K5K6KAKGKLKCKLKAK?KDKDKNKNKTKZK]KcKkKkKlK_KcKgKSKRKLKKKLKPKRKRKMKLKRKPKVKZKcKjKlKqKrKsKhKgKhKeK[KVKTKQKRKSKIKFK:K;K>K@K>KCK@e]rä(KVKVK^KZKWKbKcKbKdKlKnKuKƒKŠKŒK K§KªK¯K´KµK·K¶K³K°KªK™K‰KwK^KHKYK\KfKtKƒK‹K“KšK§KªK©K­K±K¯K¯K²K´K³K±KµK²K¸K¸K¸KµK³K±K­K¨K¦K‹K\KYKoK{KxKqK[KXKJK,K,K-K-K9K0K-K+K0K.K1K+K2K9K/K.K2K.K0K-K2K6K2K7KAK;K.K,K:K3K7K6K0K/K*K,K*K2KKIKZK€K K·KÂKÈKÊKÉKÈKÉKÈKÁKÃK¾K»K¿K´K±K©K KKvKjKPKEK@K2K4K6K9K5K:K@KEKEKLKEK?KAKAKGKGKWKTKbK`KbKfKhKgKfKaK]K^KWKTKKKFKPKKKKKNKRKTKRKUKZK`KhKhKlKrKuKlKiKaK^K\K_KVKRKRKLKPKDKEK=K;KK^KLK,K:K/K.K@K]KnKKK0K8KAKiK^KhKKPKBKUKfKyK–K›KsK@K2K3K3K/K.K-K-K2K0K/K2K1K.K*K'K+K,K*K'K+K*K+K2K-K-K/K8K:KIKKKEKMKQK^KgKeKeKnKqKyKyKwKpKqKgKNK*K)K!K'K,K2KAKLKRKdKeKrKyK}K~K{K|K~K}KK„KƒK€KƒK‰K„KˆK…K‚K‰KŽKŠK‹K‹KŒKŒK‹K‰K‰K‹KŒK…K‹KK‹KŠKŠKŒK‰K‹K‹K‘K’KK“KŒKK‹KŒKŒKKKK‘KŽK“KKKŒKKKKK‰K‹KŠK‹K‰KŠKK‹KŠKKKŠK‹KKŽKKŠK‹KKKK”K“K‘KK“KK“K”K•K–K˜K™K˜K™KžKKŸK¡K£KŸK¤K§KŸK¢K¥K¥K©K¬K°K­K®K±K±K³K¶K¹K»K¿K¿KÀKÀKÃKÅKÆKÈKÈKÊKÌKÍKÍKÎKÐKÒKÑKÔKÕKÓKÒKÖKÕKÔKÒKÔKØK×KØK×K×KÎKŸKaKQKTKTK[KaK^KgKfKiKmKlKoKiKhKiKgKkK]K]KaKeK^KbK^KYKaKUKTKRKKKHKTKJKHKGKCKBKDKLKJKKKOK`K€KŸK¶KÄKÉKÎKÍKÎKÏKÊKÆKÃK¸KµK¯K§K¡K˜K‹KtK`KUKLK;K6K2K4K4K9K6K@K@KBKJKLK>KAKAKAKAKLKTKXK_KaKnKiKhKiKgK\KXKZKOKPKOKJKSKLKMKLKRKNKQKYKeKbKlKqKrKtKoKhKeKaK^K^KbKVKYKQKIKDKDK?K4K?K>KDKJKGKEe]ræ(KIKIKVK]KcKcKkKkKlKsKxK{K„K‡KŽK›K¨K¯K¯K³KµK´K±K«K¬K£K™K‰K{KfKKKRK`KgKtKKŠK’KœK K§K¤K®K®K¯K°K¯K³K²K³K¶K¶K·K¹K¸K³K³K±K­K§K¡K•KZKMK{K~KwKhK\KLK4K)K(K'K8K2K1K0K5K/K,K-K0K3K(K4K-K;K:K4K+K+K9K0K6KAK7K0K-K.K4K4K-K*K.K0K3KKAKCKIKAKKQKQKBKOKLK=K,K-K9K=KLKVKXKUK=K*K)K,K7K6K+K%K/KTKHKYKgKQK=K7K9K9K=KmKCK*K8KKK;KBKXKWKOKCKPKWKyK‹K–KvK@K:KOK0K5KFKbKcK?K*K1K.K,K-K/K)K*K1K1K4K-K+K+K%K*K#K#K$K%K,K3K/K2K;KEKFKNKLKQKYKhKbKfKmKiKjKkKrKeKOK6K'K"K.K#K+K.K;KOK^KcKfKiKpKuKuKwK|KxK„KK€K„KƒK†K‚K„K†K†KŠK‰K‹K‡KˆKˆKŒKˆKKKˆKˆK‡KŒKKK‹KŠK‹K‹KKŒKKKŽKKKKŒK‹KŒKŒKŠKKŒKŽKKKKKŽK’KKŒK‘KŒKŽKŒKK‹K‡KŠK‘KŒK‰KˆKŽKKK‡KˆKŠK‹K‰K‰KŒK‹KŽKKKŽKŽK“K‘K–K–K“K“K’K•KžKšK›KšK˜KŸK¡K¢K K¤K K¤K¨K¥KªKªK¥K§KªK¬K°K¯K´K²K·K¸K¿K½KÀK½K¾KÄKÇKÆKÅKÊKÉKÍKÎKÏKÑKÏKÑKÒKÑKÒKÓKÕKÕKÔKÒKÓK×KÙK×KØKÙKÕKÃK|KNKNKWKZKaKfK`KcKdKiKhKkKgKfKfKlKdKhK_K\K]KcKhK`K`KbKcK_KSKUKRKSKNKNKLKUKOKVKPKOKWK\KsKˆKŸKµKÅKÍKÓKÕKÓKÔKÍKÆK»K«K¤K’K†K}KbK]KUKMK=K@K7K5K4K5KAK?KCKLKQKJKHKFKDKAKDKJKRKVKZKaKgKjKkKmKeKiK`K[KTKJKIKKKHKJKIKOKRKMKVK[K[KcKhKkKoKuKqKnKdKhKbK^KXKbKZKYKQKOKJKDK;K>KDKGKFKBK>K7KK:K/K-K@K6K-K.K9KAK6K1K*K6K2K1K6K8K=K7K2K1K/K'K,K-K0K*K7KEKZKTK=K`K^KKJK@KNKFKJKFKCKAKLKGKPKZK]KdKiKhKlKiKfKaK`KZKcKTKJKHKFKHKEKEKDKSKQKTK[K]K]KiKnKqKrKsKnKjKeKiKfKgK_K\K^KPKHKHKBK6K@KDK@KFKIKEKKK@K:e]rê(K7K7KMKUK^KhKpKoKqKsKlKiKoKwK„KŠK K¤K­K®K®K²K¯K®KªK£K“K†KwKcKUKXK`KgKnKKK“KœK¡K¦KªK­K­K²K°K²K°K²K±K³K³K¶K·K·K´K´K°K­K¨K¥KšKpKDKVKvKmKkKaK>K,K'K%K)K0K)K.K-K0K>K.K-K?K:K/K3K;KCK9K.K+K3K.K1K+KEKLKGKDK3K0K(K-K3K/K-K6KOKRKBKK2K?KDKCK9KZKOKUKCKGKaKŒK–K‚KHKOKGK(K9KIKBKSK9K(K)K&K"K(K-K1K1K-K)K&K+K$K%K-K/K&K-K)K*K,K6KGKHKPKPKWKRK]KZKRKCK:K9KIK*K&K.K'K$K2K;KCKTK]K`KdKdKiKmKpKvKwKwKvKyK}K|K|K~K{K„KƒK‚KƒKƒKƒK‚KˆK‰KˆK†KˆKŠK†K‡K†K‹K…K†KˆKˆK‘K‹K‹KKK‰K‹KKŠKK‘KKŽKŽKŽKKKKK”K“KŽK•KK’K”K–KK’K‘K“K’KK‘KK“KŽKKKˆKŒK‹K‹K‹KŽKKˆKŒKŒK‹KKŽK‹KˆKŒKŠKK’KKK‘K–K”K–K’K—K•K•K™K˜KšKK›KžKKžKŸK KŸK£K§KªKªK¨K­K­K©K°K°K®K²KµK¹K´K»KºK½K¿KÄKÁKÄKÆKÇKÉKÈKÈKÏKÍKÎKÏKÓKÑKÓKÖKÖKÖKÖKÔKÓKÖKØK×K×K×KÕKÒK³KhKIKOKTKYK`KaK^KaKhKcKdKfKbKhKdKgKgKfKbK_KiKdKaKcKgKeKdKUKXKWKWK[KXKcK\K^K`K_K[KWK[KqK‰KžK±KÃKÉKÎKÍKÉKÁK¹KªK—KzKaK[KNKNKKKLK@KAKEK;KKKKEKEKFKGKEK:K6e]rë(K0K0KBKRKVK]KfKiKdKfKfKeKgKuKKK™K¢K¥K­K±K³K³K²K¬K¦K™K‡KyKfKZKWKaKjKqK}KŠKKšKžK¤KªK®K°K°K±K²K³K³KµK´KµKµK·K·K³K°K¯KªK¥K£K›KvKPKUKoKgKmK[K3K,K+K)K+K/K9K0K2K>K5K1K3K6K6K'K4KK~K`K;K0K/K:KAKWKXKjK`KQK_KLK-K(K.K7K4K,K*KFKnKWKKRK4KFKBK1K,K*K%K1K6K/K2K*K%K)K,K-K(K/K4K3K,K+K2K5K:KBKDKIKPKRKFK:K7K2K1K,K3K&K%K,K-K6K;KJKSKTK[KcKfKcKiKnKrKuK{K{K~KxK{K}K}K~K‚KK„KƒK„KK„KK‰KˆKŠK‡K‡K„K†KŒK‰KŒK‰K…K‰K‹KŒK’K‹KKKŽK†K‡K‹KŠK‰K‘KKKKKKŽKKKK‘K‘K”K”K’K”K‘K‘K“K‘K‘KK’KKŽKKKŒK‘KKŒK‘K‹K‹KŠK‹K‹KŒKŽKˆKŒKKŒKŒKŒK‰K‹K’K‘K‹KK—KK‘K“K•K“K—K˜K›KœK™K™KœKK K¡K¡KŸK¥K¥K¨K¨K¬K©K­K¯K­K¯K®K²KµK¶K·K¸K»K¿KÁKÀKÁKÃKÇKÈKÇKÈKÌKÐKÍKÐKÎKÐKÐKÒKÓKÓKÓKÒKÒKÓKÕKÖK×KØKÖKÕKÖKÁK†KRKNKQKSKZKcKaKfKeKdKfKaKmKfKgKiKgKcKcKdKgKgKbKgKeK`KbK]K^KbK_KcKbKbKgKdK]K^K[K^KbKnKƒKšK«KºKÄKÆKÆKÂK¹K©K–K‚KbKXKRKHKPKUKMKHKGKFK>KFKMKIKFKKKLKLKGKJKCKKKMKQKWK\KbKlKsKnKpKsKhK_K`KRKNKLKOKLKGK@K?KAKK;K#K,KEK6K#K!K,K/K-K,K&K&K4K4K0K3K-K,K1K,K,K,K,K$K,K,K)K(K(K)K(K6K:K:KAKDKMKUKMKUK]K`KYKbKnKpKsKvKzK{KK€KƒK~KK‚K}KKKƒK„K€K€K‚K€KK}KƒKƒK†K„K‡K‡KˆK…KƒK…KKˆK…K‰K…K†KŽK‹KŽKˆKŒK‰KKŠK‡KŽKŠKKŠK‹KŒKKŠK‰KKŽK’KK“KŒKKK“K‘K‘K‘K’K‘K‘K“KKK”K‰KKKKKŽKŒK‘KK‡KŠKŠKŒK‹KŠK‘KKKŽK‹KŒKŽKŒKKK“KK‘K”K‘K–K•K–K–K”K›K™KKKžK¡KžK K K¢K¤K¤K¤K¨K¨K¬K¯K±K²K±K±K³K¶KµK¸KºKºKºK¿KÀK¿KÃKÅKÆKÇKÊKÈKÌKÍKÌKÎKÐKÒKÓKÔKÕKÓKÔKÓK×KÕK×K×K×KÖKÕKÑK»KqKHKPKSKYKUKZK[K\KcK`KaKaKdKgKnKkKnKfKkKkKlKjKjKfKeKeKiKbKjKiKiKkKmKpKoKoKmKeKkKiKfKoKKŠKšK•KKŽK…KxKgKYKUKTKOKHKKKJKKKOKKKNKPKUKRKUKOKLKMKKKGKGKOKPKOKSKaK^KfKlKjKoKoKdK[K[KXKFKKKFKEKEKFKBK=K@KAKIKOK`KgKhKsKkKqKsKtKyKwKqKsKhKeKbKiK[KTKIKHKDKBKBK>KK>K7KZKpKKK6K/K(K5K8K]KbKjKtKvKcKlKgKKK4K-K/K*K5KCK.KbKpK`K0K$K-K"K(KMK5KAK„KyKAKLKMK:K=KJKUKKmKzKzKcK|K‘KˆKoKaKLK5K3KJK(K*K*K)K+K)K-K(K)K+K1K-K3K:K)K)K+K2K)K&K%K'K(K*K9K4K7K?KLKLKNKRKVKVKYK]K]KdKeKcKiKpKrKvKvKyKwKyK|K|KyK|K}K€KKƒK€KK~K‚K€K€K|K‚KK„K‚KK…K„K†K„KK‡K‹K‰K…KŽKˆK‹K‹KŒKˆKŒK‡K‡K…K‰KKK‹KŒKŽKŠK‹KKŽKŽKKŽK•K“KŒKŽKŽK“K’KK’K“K•K‘K”K“KK‘K‘KKKŽKKKKKKKŒKŠK‰KŽK‰KK’KŽKŽKKŒKK‘KŽK‘KŒKK‘K”K‘K’K”KšK–KšK™K—K™KžK KŸK KŸK¡KžK¡K£K¥K©K§K©K©K®K±K±K²K²K´KºK·KºK¹KºK»K¿K¾KÀKÃKÂKÇKÆKÆKÉKÌKÌKÎKÏKÐKÐKÕKÒKÔKÕKÒKÓKÖK×K×KÖK×KØKØKÖKÄK‡KKKMKRKSKTKVKZKbK`KcKfKeKdKeKkKgKjKjKnKqKmKhKfKcKbKfKeKgKmKnKlKtKtKoKrKlKoKjKjKnKlKpKsK{K}KK~KxKoKfKXKVKVKQKNKQKSKPKNKUKTKTKVK[KZKPKTKPKQKMKAKGKIKTKXKbKeKnKgKgKjKhKfK^K[KQKPKJKBKDKAKIK:KK8K:KK:KDKTKYKRKYKZKZKjKsK}KK›K¥K¨K®K®K®K¬K¬K¤KžKKKyKuK]KcKmKrKxK†KK™KžK¥K®K°K­K°K³KµK²KµK²K³K´K·K·KºKºK¶K®K§K¦K¥K¢K•KoK?K?KNKLK.K#K-K8K*K.K&K,K2K.K3K3K-K/K1K.K:K9K=K?K>K4K@K.K/KDK7K2KSKjK?K(K,K9K7K/K3K5KDKFKIKKCK>KAKAKMKRK\KjKkKjKoKmKpKrKwK{KtKvKqKjKmKiK]KYKOKIKEKFKBK>K>K@K9K=KAK=K4K5K5K2K/e]rñ(K/K/K9K5K5K:KBKIKOKPKXKTKYKdKtK}KK›K¢K¥K®K®K¯K­KªK¤KšKKKxKnKaK`KiKvK|K†KŒK•KŸK¦K©KªK¯K±K¯K²K´K²K¶KµK´K´KµK¸K¶K³K®K§K©K¢K¢K”KnK>K:KGKRK.K+K0K4K+K4K-K8KJKGK1K,K,K)K8K2K6K@KEKDK;K8K>K8K8K@K?K2KKKwKOK,K'K/K2K*K2K1K=K=KFKJK[K^K:K4K'K+K,K6KJKYKgKcKlK}KnKyKdK3K'K.K%K.KOK`KIKiK‚KmK1K%KK'K1KRKUK@K[K}K]K/KPKNK1KHKzKzK_K{KwK?K6K`KvKjKqKLK3K9K/K-K,K5K9K;K=KBK9K9K/K'K)K(K(K&K,K(K)K3K;K=KAKJKEKKKSKNKYKUKVKZKcK`K^KfKoKsKrKrKuKxK}K|KzKxKyKxKzK}KyK~K|KK€K‚K~K‚K…K~K|K‚K€K€K€KK~KKK‰KˆK‰K‚K„K†K…K‰KŠK‰KŒKŠK‚KˆK‰K†K‹K‡K‹K‘K’KŠKŽKŽKŽKŒKŠKKŒKKŽKK‹KK—KK’K•KK”K”K’K“KKŽK‹KK“K–KŽKK‘K‘KK‘K“KKK‰KŽKK‘KŽKK‹KŒKKKŒKŽKŽK’KKKK’K‘K“K—K˜K—K–K“K•KšKšK™KœKœK›K›KœKŸK§K¤K¡K¥K©K¤KªK¬K®K²K°K´K·KµK¹KºK»KºK½K½K¼KÁKÀKÃKÄKÅKÉKËKÇKÊKËKÏKÎKÎKÓK×KÕKÓKÓKÓKÓKÔK×KÔKØKÕKÖKÐK®KgKHKRKOKPKSK]KZK\KdK`K`KaKeKdKmKmKjKrKpKmKqKlKeKjKkKlKoKjKxKqKtKtKtKxKpKxKqKpKrKnKnKjKqKpKnKoKiKfKaKZKVKXKZKZK_K[KWK\KdKbK_KUK\KZKXKPKFKJKFKOKMKPKXK]K_KhKnKlKbKaK[KXKaKSKPKGKOKIKLKFKKOKFK2K2K9K0K.K*K-K9K9K5K4K7K*K+K/K2K8KAKCKIKJK9KAK4K8K@K=K+K?KmK\K6K%K8K.K(K0K0K9K?KTK]K_KTK8K/K,K+K'K5KJK^KqK[KKK}K|KxKiK3K$K/K$K K2KYK[K`KuK€KNK)K K'K;K9KYKJK:KxKzK;K1KVKMK;KeKyKbKzK}KbK8K@KoKqKaKWK6K6K.K-K+K-K.K1K*K*K(K%K!K*K&K#K,K,K5K:K9KBKFKIKLKMKUKXKXKWK^K`KfKeKbKjKmKoKtKsKxKtKvKxKvKxKzKxK{KzK}K|KKK~K}KKK„K†KƒKK|KK‚KKK|K€KƒK~K„K‰K‡K‡K‡K‰K†K…KˆK‡K‰K†K†K†K…KˆKŠKKKKK‰KK‹KŒKKKKKŽK‹KŒKŒKŽKŒKK”K’K‘KK”KK‘K‘KŽK‰K’K”K“KŽKKKKK’K–K’KŽKŒK‹KKKK‹KŽKŽKK‹KKK“KKKKKK”K•K‘KK–K—K’K“KšK™KœKœKK›K™KŸKŸK K¢K¥K§K¨K«K¨K©K°K¯K¯K±K±K¶K³K¹K¼K½K¾K¾K½K¾K¿KÄKÄKÇKÈKÅKÈKÌKÍKÍKÍKÏKÑKÔKÓKÓKÓKÒKÔKÔK×KÕKØKÕKÔKÔK½K|KNKNKKKTKWKRKTK\K]KaKbKbKeKgKlKkKmKhKsKnKnKoKkKiKoKlKsKrKuKsKtKtKqKtKoKrKsKtKwKqKmKiKnKfKgKhKgKbK`K`K[KZK[KYKaKaKaK_KgKbK`K[KVKUKXKNKNKNKRKQKQKVKYK^KgKgKgKfKcKfKTKOK[KNKOKVKKKKKJK@KCKAK?K=KOKTK]KgKiKlKmKuKwKxKzK}KqKqKjKhKhKeKbKSKSKEKKKGKBKKdKrKZK>KlK~K{KwKOK,K&K)K K$KEKmKiKmK‹KeK:KK"KK'K=KNKIKUKuK`K.K5KFKDKHKrKhKpKyK‚KgKLKSKiKPK]KDK3K%K&K%K+K(KNK8K#K'K$K&K,K.K*K,K4K5K7K9K5KCKFKOKTKVK[KYKXKeKgKmKnKkKlKoKvKrKuKvKzK|KxK|K|K}KzK{K}K{KK|K{K{KƒKKK‚K„K‚K‚K~K~KKKK|K‚K‚K†KƒKƒK…K†K†K‰KŒK‡K‰K‰KˆK†K‡KˆK†K‡KŒKˆK’K‹K“KKŒKK‹KKKKŽK‹K‰KŽKKŽKŽK•KK‘K“KK’KKK”K”KK‘K‘K’K’K’KK‘KK‘K•K”KKKK‘KKK‰K‘KKŒKKŒK’K“K‘KŒKK’K’K”K“K“K”K”K–K’K•K™KœKšKœKK›K™K KŸK¡K¢K¥K¨K¥K¤K¨K©K®KªK¬K­K°KµK²KµK·K·K½K»K½K½K½KÁKÄKÃKÂKÄKÈKÊKÍKÌKÎKÏKÑKÕKÓKÔKÓKÔKÕKÓKÒKÕK×KÖKÖKÖKÆKKSKJKHKNKZKXK]K[K^KdKbK^KeKgKgKmKoKmKsKpKkKmKmKpKoKpKtKqKzKuKwKwKrKsKoKrKsKlKpKtKqKnKkKgKhKhKfK^K\K]K^K[K\KbKfKhKhKeKdKiK`KXKWK]KTKOKJKUKTKQKTKWK^KbKmKjKcK_K^KXKQKUKKKMKOKJKSKKKNK>KJKHKDKEKSK]KcKgKzKtKoKuK{KwKKuKnKkKjKiKeKaK^KTKFKHKCK>K=K>K=K;K6K?K;K6K9K2K3K:K4K3e]rô(K2K2KCK:K?K?K@K=KCKMKMKPKLK[KnKvKŒKœK¦K¯K³KºK¿K¼KºK³KªK K‹K‡K{KmKaKlKnKK…KK˜K K¥K¬K¯K°K´K³K±K´K´K´K±K¸K¹K»KºKµK´K²K¬K¦K¢KœKuKZKGK;KEKHKK>K>K3K4K@KeKQK2K2K.K(K-K*K.K>KDKfKbK8K/K(K.K+K'K2KDKdKpKjK=KJKwK‚K|KiK6K"K(K'K%K2KdKmKeK‘KmKDK-K$KK K*K>KbKVKtKwKHK#K0KCKHKfKrKkKwKyKsKiKUKLKDKIKGK)K(K%K&K+K%K7K.K*K)K(K.K1KKCKHKJKTKgKtKKžKªK¸K½KÃKÃKÀKÁK½K¶K¬K–K‡KuKhKbKjKuK}K†KKšKžK¤KªK¬K±K³K³K²K³K»KµK´K¸K¹K¶K¹KºK³KµK°K¦K—KuKNKFK:K.KAKIK@K=K4K6K*K1K0K6K4K,K4K1K/K+KK7K-K;K;K1K-K7K>K;K-K-K0K-K0K:K3K6K@KCK5K7K9K9K4K8K>KCK=K2K0KMKeKUK2K(K-K0K6K(K=KWK~KoK;KDK&K4K1K.K)K:KhKjKxKCK0KTKŠKƒKƒKfK;K8K3K(K'KQKrKdKxKwKGKMK=K"K%K#K/KDKSKJKK†KWK+K$K*K6KVKSK^KkKyKkKAK1K-K'K=K4K'K,K4K(K(K3K&K5K4K*K-K/K2K3K2K/K:K=KNKQKTK[K]KdKdKfKnKmKmKqKtKsKwKsKrKrKtKvKvKxKzK}K~K~K}K|K}K|KzKƒK~KK€K†K€K|KƒK†K€KK‚K€KƒKƒKK€KK‚KˆKˆKK…KˆK‹KŠK„K†KˆKŠKƒKˆKŽK‹KˆKŠKˆK‰KŒK‹KKŠKŠK†KˆKKK‹KKŽKKK‘KKŽKŽK•K’K‘KŽK’KŽK’K‘KKŽK”K”K•K“K‘KK“KK’K”K“K’KK‘K‘K“KKKŽKK‘KKK‘K˜KK‘K’K“K“K”K“K”K”K˜K•K•K—K–K–K•K›K™KŸKŸKœK¡K£K¢K¤K¥K§K§K§K§K«K©K°K°K±K¶K´KµK·KµKºK»KºK¼K¹K¾KÁKÃKÈKÆKÈKÈKÉKÊKÉKÎKÎKÎKÒKÐKÑKÒKÑKÓKÐKÓKÖKÖKÔKÔKÒK¾KxKFKCKMKVKQKUKbK]K^KfKiKkKjKjKiKnKpKnKnKnKrKrK~KvKzKxKwKtKyKyKtKpKuKuKuKpKvKqKqKqKpKlKoKkKgKgKdKbKbKcKdKmKpKpKqKrKjKkKdK^K\K[KZKTKVKQK\K[KZKbKfKbKcK]K\KVKUKLKLKDKBK@KJKHKLKFKCKQKBKHKOKMKWKeKjKpKqKxKrKvK¥KsKwKuKqKiKbKaKUK[KVKSKFKFK>K?KKAKBK@KHK`KyKK«K»KÆKÈKÎKËKÊKÉKÆKÇK½K°KœKKiKdK{KrK{K†K‘K›KŸK¦K©K°K°K±K²K³K´K¸K²KµK¶KºK¸K¹K¹K¶K±K¥K™KwKhKRKJK:K6K?K?K8K1K3K2K/K'K+K:K5K-K(K7K.K-K4K3K8KEKAK:K3K3K9K=K>K;K?K>K0K+KAKVKiK9K*K)K+K)K,K>KkK‰KoKCK2K#K/K/K1K4KFKuKmKyKDK4K@KwKK…K}KTK?K7K*K-KHKUKeKeK‚KOKIKOK0K K!K#KKK1K4K7K3K8K3K8KAK@e]rø(K0K0K8K?K=K3K2K7KK1K+K5K1K.K.K+K0K-K7K/K7K=K=K5K;K9K4K9K9KIKAK>K3K4K:KFKiKMK&K&K+K-K1KFKsKKcKBK1K)K2K4K5K*K?KuKhK}KKK?K@KfKŽKŠK…KmK>K2K+K$K%K9KQKhKuKYK-KMKBK$K!K$K0KVKPKAKlK†KeK8K%K*K;KWKDKNKiK|KoKaKNK*K5KSK2K%K-K K+K(K$K)K1K1K3K7K0K7K9K=KKKNKXKWK`K^KfKgKfKlKpKrKrKuKuKsKwKtKuKsKtKtKuKyKxK}K~KxK}KK~K}K}KzKK}KKK|K|K‚K~KƒK|KƒK€K‚K‡K‡K‚K‚KK„KƒKƒKƒKƒK†K…KŠK†K„K‡KK‹K‰K‹K‹K‡KŠKˆK‡K‡KŒKŠK‡KKK‰KK‰KK‰KKŠKKŒKŒKK’K“K‘K‹KK“K“K–K’K•K’K“K—K“KK’KK”K“K™K“K‘K‘K•K“K”K“KK“K—K”K“K’KŽK–K“KK‘K“K—K–K”K‘K”K–K•K™K—K—K•K–KšK™KœK£K›KŸKŸK£K¥K¤K¤K¥K¨K®K©K©K«K°K³K²KµK¶K²K²K·K¹K¼K¹KºK»K¿KÄKÅKÉKÈKÅKÇKÌKÉKÌKÌKÍKÏKÏKÏKÒKÑKÑKÐKÔKÑK×KÖKÕKÓKËK¡KTKMKMKPKPKUKXKVK\K[KVKVKZKfKnKmKkKfKlKiKsKwKxKzK{KwK{K{KwKtKuKxKuKvKpKtKsKsKsKtKlKsKkKkKkKjKkKhKnKnKqKuKsKuKuKqKpKjKjKcK^K^KXKVKWK]KbK`K`KcKfKdKeKWKUKSKOKDKDKJKCKCKPKBKAKHKKKFKGKQK_KZKgKjKnKuKzK{KzKzKvKxKoKlKmKiK^K[KZKNKOKFKIK:K@K4K;K8K?K:K9K7K3K6KK=KHKiK|K‘K·KÄKÆKÉKÌKÌKËKÊKÈKÈKÄK·K«K™KsKiKtKpK|K…KŒK˜K K¤K¨K­K°K²K®K¯K°K¶K´K³K´K²K·K´K´K¯K³K¯K§K–KKtKmKNKEK>K5K2K(K2K=KNK9K)K;K0K3K(K1K4K*K@KCK6K4K6K;K4K?K?K@KCK;KBKNK9K)K3K4KZK_K%K'K/K-K.KRKƒKŠKVKFK1K+K1K4K/K)KGKsKkK‚KAKBKIKSK…K‘K‰KuKNK,K+K(K'K,K>KaKmKfK.KK5K6K1K.K1K?K;KDKKK\e]rú(K0K0K5K1K5K9K3K5K;KAK8K:K:KJKhK‚K›KÀKÃKÅKÇKËKÌKÊKÌKÉKÇKÃK»K²K¥K{KfKsKsKK‚K‘K—KK£K§K¬K±K¯K°K³K°K¶K³K³K²K¸K¶K¶K³K²K´KªK©K”K{K…KwKVKGK9K7K/K/K.K9KAK0K0K7K?K1K/K.K@K1KRKBK:KK(K(K;KQKWKBKHKnKpK;K#K3K_KIKGKXKyKjK>K`KsKpKcK+K#K&KKK#K$K)K+K,K3K4KK4K8K4K1K4K8K:K9K;KKK9K3K=KGKFKGKNKVe]rû(K/K/K7K5K2K7K4K3K8KK@KIKNKQK\K]KXKcKcKjKjKgKeKkKkKwKpKmKrKxKwKuKyKsKyK}KuKwKzKzK{K|KK‚KK~KKK}KK}K~K|KK€K‚K„K‡KK…K€K€K…K†KƒK…K€KƒK‚KK„K‚K†K‰K†K„KˆKŠK‡KŠK†K†K„K†K†K‹KŽKKKŠKŠKŠKŠKŽKKŒK‰KŠKŽKŽK‘KŽKK”KK‘KK‘K‘K”K‘KK“KK“K’KK“K–K•K’K’K’K‘K’K˜K–K—K•KK–K–K–K—K‘K•K”K•K•K‘KK‘K‘K˜K—K—K”K™K›K›K›KšKšK™K”K™KK K¢K¡KŸK¤K§K¦K¦K§K¦K¥K§K§K¬K«K²K°K®K®K³K´K¸K¸K»KºKºK»K¾K¿KÁKÃKÅKÆKÉKÄKÉKÈKÍKÏKÏKÎKÏKÑKÑKÒKÑKÒKÒKÒKÕKÒKÕKÓKÆK‹KLKOKQKEKVKVKYKVKZKkKdKfKfKaKdKjKhKhKqKvKtKwKyKxKuKuKuKyKKvK{KxK~KxKsKwKuKtKsKlKqKrKuKrKpKuKxKyK~KKK‚KwKvKpKkKiKaK^K^K`K^K^KcKgKhKbKZK\KUKSKXKSKZKVKWKNKXKUKIKOKNKEKMKMKNKPKYKbKhKpKwK{K~K€KKyK}KvKvKiKkKiKcKWKXKQKRK@KKAK?KYK4K'K6KMKOKQKpK|KHKTK`KvKˆKvKPK/K"K'K%K&K+K*K1K7K:K>KIKNKQKYKVK[K_KaKfKfKjKkKiKqKvKtKtKtKsKyK~KwKwKxKyKuKxKxKwK{KzK€K‚KK€K~K~K{K€KK|KK~KK†K„K„K‚KƒKƒKƒK†K†K‚K…KK€K}K„KKˆK†KˆKŠK‹K‡KŒK†KˆK‡KŠK†K†K‡KŠKKŒKKŠKŠK‰K‹KKŠKK‹KŒKKK‘KŽKK“K‘KK’K‘K–K“K”KK”K’KŒKKK—K“K“KK“KK’KK“K–KšK•K“K•K™K”K‘KšK’K•K–KK‘K”K’K”K–K˜K•K–KœK›K›KœKšKœKKšK›KšKžK¢K¡K¡K¥K¤K¦K¢K¤KŸK¢K¨K¨K¨K¬K±K±K­K®K³K²K¹K¹K»K¹K¸K¾K½KÀKÁKÂKÄKÃKÈKÉKÉKÈKÊKÍKÍKÍKÎKÓKÐKÑKÓKÓKÒKÓKÓKÓKÔKÔKÍK K]KEKOKMKPKRKUKSKWKYK^KcKhKcKdKjKhKkKqKsKpKsKwKzKvKzKwK{K{K}K‚K{KuKzKtKuKuKsKqKtKvKwKxKxKsKuK{K€K~KK~KƒKyKuKrKjKhK]K^K\K]KYK_K_KcK]KWKYKWKWKMKOKVKOKSK[KUKNKWKQKKKSKJKKKOKYKZK_KhKuKuK{KK„K}K‚KK|KuKoKnKjKaK^KYKOKJKFK@K;KAK7K,K,K/K;K7K7K7K:KCKBK:KBKKKNKaKZKXK_e]rý(K6K6K0K/K5K5K2K7K8KKMKSKIKYKHKOKGK1K1K7K>KAKQK=K1KK.K3K5K>K>KLKoKrKyK)K+KVK^KJKHKqKKKsKCK,K#K0K;KQK„KdK~KeK4K3K3K3KDKGK9KNK_K[KAKAKK0K1KDK?K9KQKwKvKwK+K)KKK^KKKJKLKgK{K†KfK=K)K,K:KNKƒKxK{KiK8K0K'K#K(K:KKGKKKQKYK]KdKfKbK`e]rÿ(K+K+K2K/K6K/K2K1KK[KDKSKKK?KCKAK.K)K4K,K9KSKoKEK9KLKPKlKmKdKsKZK7K9K3K=KOKRKQKNKUKYKSKXKZKeKcKjKhKiKnKqKpKuKrKuKwKvKsKwK{K}KzKzK}K|K|K~K|KK~K{K{K~K}K|K„KK~K~KK‚K}KK‚K‚K}KK€K‚KK„K…KK‡K‚KKƒK€KƒK„K„K…K…KˆK†KŠKKŒKŽK„K…K„K…KŠK…K‡K‰K‡K‹KŒKŠKˆKŠKŒKK‘K‰KKŽKŽK‘K’K‘K”K’KKK‘K”KŽK‘KŽK‹KŽKK–K—K’K’K”K’KKK•K–K•K“KšK”K–K”K•K“K”K˜K–K˜K”K“K”K—K–K–K™K˜K’K–K–KœKœK K›K›KKžKœKžK¢K K£K¢K¦K§K¨K¦K¤K¤K¨K§KªKªK¬K­K¬K®K®K´K´K·K¶K»K¾K½KÁKÀKÁKÂKÃKÄKÃKÉKÇKÈKÌKÊKÌKÌKÍKÏKÓKÔKÔKÐKÑKÔKÓKÔKÖKÕKÅKŠKeK\K`K]KSKXKYK^K^K\KUKXK\KdKaKeKoKlKhKnKmKwKqKuK|K~K…K…K‚K…K~K|KxK|KyKzK{KzKxKxK~K„K‚KˆK†KŽK†K‰K€K}K}KsKrKiKdK`K\K[KbKYKXKQKWKRKLKOKHKKKKKOKWK]K_K`KUKYKZKTKUKQKUKOKTK\K^KfKjKtKvK}KKƒK„KK}K€KwKuKiK`KXKOKNKFK?K:K5K2K4K.K-K1K-K0K4K.K4K5K;KFKEKOKQK\KaKdKhKdKbe]r(K,K,K7K3K6K/K2K3K:K3K.K2K5K5KOKmK‘K¸KÃKÄKÄKÈKÆKÆKÇKÂKÁKÀK·K²KŸKxKiKnKwKƒK‡KŽK›KŸK¤K£K§K°K®K¶K´K³K´KµK²K¶K´K·K¶K·K±K³K¯K£KKNK_K^KEK8K5KGK0K+K'K+K'K'K6K>KFKK^KeKIKRKyKZK:K9KKKSKQKRKRKTK^KZK[KZK^KfKdKkKkKlKnKnKpKqKpKyKvKzKvK}KyKyKxK~K{K}KzK|K~K}KƒK‚K‚K‚K~K}K€K~KKKK}KKK€K~K‚K„K†K„KƒK†K„K‡K‚K…KƒK‚KƒK„KƒK‡K‡K‰K‰KK‹K†K†K‰K…K„K„KŠK†K‰K†K‡KŠK‰KŒKKŒKŠKKŽKŒKK‹K‹KK’KKK’K”KKKKK‘KKŽKKK•K’K“KK’K’K’KK“K“K–K”K—K•K™K•K•K”K”K”K—K”K—K•K˜K˜K“K–K˜K–K™K•K›KŸKœK¤K˜KœKžK›KKžKK¤KŸK£K¥K¨K¥K£K¦K£K¨K©K©K©K¬K¬K®K°K­K³K¸K¸K¹K¸KºKÀK¿KÀKÀKÃKÂKÂKÁKÅKÇKÇKËKÊKËKÌKÎKÏKÏKÑKÑKÏKÐKÔKÔKÒKÔKÔKÌKKvKmKpKfK_K^KXK]K[K[KYK\KaK^KaKbKkKhKjKiKfKxK|K~KK‚K†KˆK…K„KK~K€KyKK}K~K~KyK}K€K„KŠKŠK‹KŒK‡K‡K‚K|KwKvKlKdK_KVK\KZKYKUKRKNKHKMKJKHKNKEKQKVKYK[KZK]KYK\KZKTKWKSKPKTKYK`K^KfKpKwK~K€K{K‚KKK|K{KwKqKeK`KTKKK>KKFKK^KeKIKRKyKZK:K9KKKSKQKRKRKTK^KZK[KZK^KfKdKkKkKlKnKnKpKqKpKyKvKzKvK}KyKyKxK~K{K}KzK|K~K}KƒK‚K‚K‚K~K}K€K~KKKK}KKK€K~K‚K„K†K„KƒK†K„K‡K‚K…KƒK‚KƒK„KƒK‡K‡K‰K‰KK‹K†K†K‰K…K„K„KŠK†K‰K†K‡KŠK‰KŒKKŒKŠKKŽKŒKK‹K‹KK’KKK’K”KKKKK‘KKŽKKK•K’K“KK’K’K’KK“K“K–K”K—K•K™K•K•K”K”K”K—K”K—K•K˜K˜K“K–K˜K–K™K•K›KŸKœK¤K˜KœKžK›KKžKK¤KŸK£K¥K¨K¥K£K¦K£K¨K©K©K©K¬K¬K®K°K­K³K¸K¸K¹K¸KºKÀK¿KÀKÀKÃKÂKÂKÁKÅKÇKÇKËKÊKËKÌKÎKÏKÏKÑKÑKÏKÐKÔKÔKÒKÔKÔKÌKKvKmKpKfK_K^KXK]K[K[KYK\KaK^KaKbKkKhKjKiKfKxK|K~KK‚K†KˆK…K„KK~K€KyKK}K~K~KyK}K€K„KŠKŠK‹KŒK‡K‡K‚K|KwKvKlKdK_KVK\KZKYKUKRKNKHKMKJKHKNKEKQKVKYK[KZK]KYK\KZKTKWKSKPKTKYK`K^KfKpKwK~K€K{K‚KKK|K{KwKqKeK`KTKKK>K>> img = array([[ 91.06794177, 3.39058326, 84.4221549 ], - [ 73.88003259, 80.91433048, 4.88878881], - [ 51.53875334, 34.45808177, 27.5873488 ]]) - >>> bytescale(img) - array([[255, 0, 236], - [205, 225, 4], - [140, 90, 70]], dtype=uint8) - >>> bytescale(img, high=200, low=100) - array([[200, 100, 192], - [180, 188, 102], - [155, 135, 128]], dtype=uint8) - >>> bytescale(img, cmin=0, cmax=255) - array([[91, 3, 84], - [74, 81, 5], - [52, 34, 28]], dtype=uint8) - - """ - if data.dtype == uint8: - return data - high = high - low - if cmin is None: cmin = data.min() - if cmax is None: cmax = data.max() - scale = high *1.0 / (cmax-cmin or 1) - bytedata = ((data*1.0-cmin)*scale + 0.4999).astype(uint8) - return bytedata + cast[uint8](low) - -def imread(name,flatten=0): - """ - Read an image file from a filename. - - Parameters - ---------- - name : str - The file name to be read. - flatten : bool, optional - If True, flattens the color layers into a single gray-scale layer. - - Returns - ------- - imread : ndarray - The array obtained by reading image from file `name`. - - Notes - ----- - The image is flattened by calling convert('F') on - the resulting image object. - - """ - - im = Image.open(name) - return fromimage(im,flatten=flatten) - -def imsave(name, arr): - """ - Save an array as an image. - - Parameters - ---------- - filename : str - Output filename. - image : ndarray, MxN or MxNx3 or MxNx4 - Array containing image values. If the shape is ``MxN``, the array - represents a grey-level image. Shape ``MxNx3`` stores the red, green - and blue bands along the last dimension. An alpha layer may be - included, specified as the last colour band of an ``MxNx4`` array. - - Examples - -------- - Construct an array of gradient intensity values and save to file: - - >>> x = np.zeros((255, 255)) - >>> x = np.zeros((255, 255), dtype=np.uint8) - >>> x[:] = np.arange(255) - >>> imsave('/tmp/gradient.png', x) - - Construct an array with three colour bands (R, G, B) and store to file: - - >>> rgb = np.zeros((255, 255, 3), dtype=np.uint8) - >>> rgb[..., 0] = np.arange(255) - >>> rgb[..., 1] = 55 - >>> rgb[..., 2] = 1 - np.arange(255) - >>> imsave('/tmp/rgb_gradient.png', rgb) - - """ - im = toimage(arr) - im.save(name) - return - -def fromimage(im, flatten=0): - """ - Return a copy of a PIL image as a numpy array. - - Parameters - ---------- - im : PIL image - Input image. - flatten : bool - If true, convert the output to grey-scale. - - Returns - ------- - fromimage : ndarray - The different colour bands/channels are stored in the - third dimension, such that a grey-image is MxN, an - RGB-image MxNx3 and an RGBA-image MxNx4. - - """ - if not Image.isImageType(im): - raise TypeError("Input is not a PIL image.") - if flatten: - im = im.convert('F') - return array(im) - -_errstr = "Mode is unknown or incompatible with input array shape." - -def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None, - mode=None, channel_axis=None): - """Takes a numpy array and returns a PIL image. The mode of the - PIL image depends on the array shape, the pal keyword, and the mode - keyword. - - For 2-D arrays, if pal is a valid (N,3) byte-array giving the RGB values - (from 0 to 255) then mode='P', otherwise mode='L', unless mode is given - as 'F' or 'I' in which case a float and/or integer array is made - - For 3-D arrays, the channel_axis argument tells which dimension of the - array holds the channel data. - For 3-D arrays if one of the dimensions is 3, the mode is 'RGB' - by default or 'YCbCr' if selected. - if the - - The numpy array must be either 2 dimensional or 3 dimensional. - """ - data = asarray(arr) - if iscomplexobj(data): - raise ValueError("Cannot convert a complex-valued array.") - shape = list(data.shape) - valid = len(shape)==2 or ((len(shape)==3) and \ - ((3 in shape) or (4 in shape))) - if not valid: - raise ValueError("'arr' does not have a suitable array shape for any mode.") - if len(shape) == 2: - shape = (shape[1],shape[0]) # columns show up first - if mode == 'F': - data32 = data.astype(numpy.float32) - image = Image.fromstring(mode,shape,data32.tostring()) - return image - if mode in [None, 'L', 'P']: - bytedata = bytescale(data,high=high,low=low,cmin=cmin,cmax=cmax) - image = Image.fromstring('L',shape,bytedata.tostring()) - if pal is not None: - image.putpalette(asarray(pal,dtype=uint8).tostring()) - # Becomes a mode='P' automagically. - elif mode == 'P': # default gray-scale - pal = arange(0,256,1,dtype=uint8)[:,newaxis] * \ - ones((3,),dtype=uint8)[newaxis,:] - image.putpalette(asarray(pal,dtype=uint8).tostring()) - return image - if mode == '1': # high input gives threshold for 1 - bytedata = (data > high) - image = Image.fromstring('1',shape,bytedata.tostring()) - return image - if cmin is None: - cmin = amin(ravel(data)) - if cmax is None: - cmax = amax(ravel(data)) - data = (data*1.0 - cmin)*(high-low)/(cmax-cmin) + low - if mode == 'I': - data32 = data.astype(numpy.uint32) - image = Image.fromstring(mode,shape,data32.tostring()) - else: - raise ValueError(_errstr) - return image - - # if here then 3-d array with a 3 or a 4 in the shape length. - # Check for 3 in datacube shape --- 'RGB' or 'YCbCr' - if channel_axis is None: - if (3 in shape): - ca = numpy.flatnonzero(asarray(shape) == 3)[0] - else: - ca = numpy.flatnonzero(asarray(shape) == 4) - if len(ca): - ca = ca[0] - else: - raise ValueError("Could not find channel dimension.") - else: - ca = channel_axis - - numch = shape[ca] - if numch not in [3,4]: - raise ValueError("Channel axis dimension is not valid.") - - bytedata = bytescale(data,high=high,low=low,cmin=cmin,cmax=cmax) - if ca == 2: - strdata = bytedata.tostring() - shape = (shape[1],shape[0]) - elif ca == 1: - strdata = transpose(bytedata,(0,2,1)).tostring() - shape = (shape[2],shape[0]) - elif ca == 0: - strdata = transpose(bytedata,(1,2,0)).tostring() - shape = (shape[2],shape[1]) - if mode is None: - if numch == 3: mode = 'RGB' - else: mode = 'RGBA' - - - if mode not in ['RGB','RGBA','YCbCr','CMYK']: - raise ValueError(_errstr) - - if mode in ['RGB', 'YCbCr']: - if numch != 3: - raise ValueError("Invalid array shape for mode.") - if mode in ['RGBA', 'CMYK']: - if numch != 4: - raise ValueError("Invalid array shape for mode.") - - # Here we know data and mode is correct - image = Image.fromstring(mode, shape, strdata) - return image - -def imrotate(arr,angle,interp='bilinear'): - """ - Rotate an image counter-clockwise by angle degrees. - - Parameters - ---------- - arr : nd_array - Input array of image to be rotated. - angle : float - The angle of rotation. - interp : str, optional - Interpolation - - - Returns - ------- - imrotate : nd_array - The rotated array of image. - - Notes - ----- - - Interpolation methods can be: - * 'nearest' : for nearest neighbor - * 'bilinear' : for bilinear - * 'cubic' : cubic - * 'bicubic' : for bicubic - - """ - arr = asarray(arr) - func = {'nearest':0,'bilinear':2,'bicubic':3,'cubic':3} - im = toimage(arr) - im = im.rotate(angle,resample=func[interp]) - return fromimage(im) - -def imshow(arr): - """ - Simple showing of an image through an external viewer. - - Uses the image viewer specified by the environment variable - SCIPY_PIL_IMAGE_VIEWER, or if that is not defined then `see`, - to view a temporary file generated from array data. - - Parameters - ---------- - arr : ndarray - Array of image data to show. - - Returns - ------- - None - - Examples - -------- - >>> a = np.tile(np.arange(255), (255,1)) - >>> from scipy import misc - >>> misc.pilutil.imshow(a) - - """ - im = toimage(arr) - fnum,fname = tempfile.mkstemp('.png') - try: - im.save(fname) - except: - raise RuntimeError("Error saving temporary image data.") - - import os - os.close(fnum) - - cmd = os.environ.get('SCIPY_PIL_IMAGE_VIEWER','see') - status = os.system("%s %s" % (cmd,fname)) - - os.unlink(fname) - if status != 0: - raise RuntimeError('Could not execute image viewer.') - -def imresize(arr, size, interp='bilinear', mode=None): - """ - Resize an image. - - Parameters - ---------- - arr : nd_array - The array of image to be resized. - - size : int, float or tuple - * int - Percentage of current size. - * float - Fraction of current size. - * tuple - Size of the output image. - - interp : str - Interpolation to use for re-sizing ('nearest', 'bilinear', 'bicubic' - or 'cubic'). - - mode : str - The PIL image mode ('P', 'L', etc.). - - Returns - ------- - imresize : ndarray - The resized array of image. - - """ - im = toimage(arr, mode=mode) - ts = type(size) - if issubdtype(ts,int): - size = size / 100.0 - elif issubdtype(type(size),float): - size = (array(im.size)*size).astype(int) - else: - size = (size[1],size[0]) - func = {'nearest':0,'bilinear':2,'bicubic':3,'cubic':3} - imnew = im.resize(size, resample=func[interp]) - return fromimage(imnew) - - -def imfilter(arr,ftype): - """ - Simple filtering of an image. - - Parameters - ---------- - arr : ndarray - The array of Image in which the filter is to be applied. - ftype : str - The filter that has to be applied. Legal values are: - 'blur', 'contour', 'detail', 'edge_enhance', 'edge_enhance_more', - 'emboss', 'find_edges', 'smooth', 'smooth_more', 'sharpen'. - - Returns - ------- - imfilter : ndarray - The array with filter applied. - - Raises - ------ - ValueError - *Unknown filter type.* . If the filter you are trying - to apply is unsupported. - - """ - _tdict = {'blur':ImageFilter.BLUR, - 'contour':ImageFilter.CONTOUR, - 'detail':ImageFilter.DETAIL, - 'edge_enhance':ImageFilter.EDGE_ENHANCE, - 'edge_enhance_more':ImageFilter.EDGE_ENHANCE_MORE, - 'emboss':ImageFilter.EMBOSS, - 'find_edges':ImageFilter.FIND_EDGES, - 'smooth':ImageFilter.SMOOTH, - 'smooth_more':ImageFilter.SMOOTH_MORE, - 'sharpen':ImageFilter.SHARPEN - } - - im = toimage(arr) - if ftype not in _tdict.keys(): - raise ValueError("Unknown filter type.") - return fromimage(im.filter(_tdict[ftype])) - - -def radon(arr,theta=None): - if theta is None: - theta = mgrid[0:180] - s = zeros((arr.shape[1],len(theta)), float) - k = 0 - for th in theta: - im = imrotate(arr,-th) - s[:,k] = sum(im,axis=0) - k += 1 - return s diff --git a/scipy-0.10.1/scipy/misc/setup.py b/scipy-0.10.1/scipy/misc/setup.py deleted file mode 100644 index 7ac655cd9a..0000000000 --- a/scipy-0.10.1/scipy/misc/setup.py +++ /dev/null @@ -1,11 +0,0 @@ - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('misc',parent_package,top_path) - config.add_data_files('lena.dat') - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/misc/setupscons.py b/scipy-0.10.1/scipy/misc/setupscons.py deleted file mode 100644 index 7ac655cd9a..0000000000 --- a/scipy-0.10.1/scipy/misc/setupscons.py +++ /dev/null @@ -1,11 +0,0 @@ - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('misc',parent_package,top_path) - config.add_data_files('lena.dat') - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/misc/tests/data/icon.png b/scipy-0.10.1/scipy/misc/tests/data/icon.png deleted file mode 100644 index e9037e282b..0000000000 Binary files a/scipy-0.10.1/scipy/misc/tests/data/icon.png and /dev/null differ diff --git a/scipy-0.10.1/scipy/misc/tests/data/icon_mono.png b/scipy-0.10.1/scipy/misc/tests/data/icon_mono.png deleted file mode 100644 index 612c9c604e..0000000000 Binary files a/scipy-0.10.1/scipy/misc/tests/data/icon_mono.png and /dev/null differ diff --git a/scipy-0.10.1/scipy/misc/tests/data/icon_mono_flat.png b/scipy-0.10.1/scipy/misc/tests/data/icon_mono_flat.png deleted file mode 100644 index c42b9a025a..0000000000 Binary files a/scipy-0.10.1/scipy/misc/tests/data/icon_mono_flat.png and /dev/null differ diff --git a/scipy-0.10.1/scipy/misc/tests/test_common.py b/scipy-0.10.1/scipy/misc/tests/test_common.py deleted file mode 100644 index c226eec485..0000000000 --- a/scipy-0.10.1/scipy/misc/tests/test_common.py +++ /dev/null @@ -1,48 +0,0 @@ -import numpy as np -from numpy.testing import assert_array_equal, assert_almost_equal, \ - assert_array_almost_equal - -from scipy.misc import pade, logsumexp - - -def test_pade_trivial(): - nump, denomp = pade([1.0], 0) - assert_array_equal(nump.c, [1.0]) - assert_array_equal(denomp.c, [1.0]) - -def test_pade_4term_exp(): - # First four Taylor coefficients of exp(x). - # Unlike poly1d, the first array element is the zero-order term. - an = [1.0, 1.0, 0.5, 1.0/6] - - nump, denomp = pade(an, 0) - assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0]) - assert_array_almost_equal(denomp.c, [1.0]) - - nump, denomp = pade(an, 1) - assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0]) - assert_array_almost_equal(denomp.c, [-1.0/3, 1.0]) - - nump, denomp = pade(an, 2) - assert_array_almost_equal(nump.c, [1.0/3, 1.0]) - assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0]) - - nump, denomp = pade(an, 3) - assert_array_almost_equal(nump.c, [1.0]) - assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0]) - -def test_logsumexp(): - """Test whether logsumexp() function correctly handles large inputs.""" - a = np.arange(200) - desired = np.log(np.sum(np.exp(a))) - assert_almost_equal(logsumexp(a), desired) - - # Now test with large numbers - b = [1000,1000] - desired = 1000.0 + np.log(2.0) - assert_almost_equal(logsumexp(b), desired) - - n = 1000 - b = np.ones(n)*10000 - desired = 10000.0 + np.log(n) - assert_almost_equal(logsumexp(b), desired) diff --git a/scipy-0.10.1/scipy/misc/tests/test_doccer.py b/scipy-0.10.1/scipy/misc/tests/test_doccer.py deleted file mode 100644 index 6204a9b603..0000000000 --- a/scipy-0.10.1/scipy/misc/tests/test_doccer.py +++ /dev/null @@ -1,89 +0,0 @@ -''' Some tests for the documenting decorator and support functions ''' - -import numpy as np - -from numpy.testing import assert_equal, assert_raises - -from nose.tools import assert_true - -from scipy.misc import doccer - -docstring = \ -"""Docstring - %(strtest1)s - %(strtest2)s - %(strtest3)s -""" -param_doc1 = \ -"""Another test - with some indent""" - -param_doc2 = \ -"""Another test, one line""" - -param_doc3 = \ -""" Another test - with some indent""" - -doc_dict = {'strtest1':param_doc1, - 'strtest2':param_doc2, - 'strtest3':param_doc3} - -filled_docstring = \ -"""Docstring - Another test - with some indent - Another test, one line - Another test - with some indent -""" - - -def test_unindent(): - yield assert_equal, doccer.unindent_string(param_doc1), param_doc1 - yield assert_equal, doccer.unindent_string(param_doc2), param_doc2 - yield assert_equal, doccer.unindent_string(param_doc3), param_doc1 - - -def test_unindent_dict(): - d2 = doccer.unindent_dict(doc_dict) - yield assert_equal, d2['strtest1'], doc_dict['strtest1'] - yield assert_equal, d2['strtest2'], doc_dict['strtest2'] - yield assert_equal, d2['strtest3'], doc_dict['strtest1'] - - -def test_docformat(): - udd = doccer.unindent_dict(doc_dict) - formatted = doccer.docformat(docstring, udd) - yield assert_equal, formatted, filled_docstring - single_doc = 'Single line doc %(strtest1)s' - formatted = doccer.docformat(single_doc, doc_dict) - # Note - initial indent of format string does not - # affect subsequent indent of inserted parameter - yield assert_equal, formatted, """Single line doc Another test - with some indent""" - - -def test_decorator(): - # with unindentation of parameters - decorator = doccer.filldoc(doc_dict, True) - @decorator - def func(): - """ Docstring - %(strtest3)s - """ - yield assert_equal, func.__doc__, """ Docstring - Another test - with some indent - """ - # without unindentation of parameters - decorator = doccer.filldoc(doc_dict, False) - @decorator - def func(): - """ Docstring - %(strtest3)s - """ - yield assert_equal, func.__doc__, """ Docstring - Another test - with some indent - """ diff --git a/scipy-0.10.1/scipy/misc/tests/test_pilutil.py b/scipy-0.10.1/scipy/misc/tests/test_pilutil.py deleted file mode 100644 index 139313aa3f..0000000000 --- a/scipy-0.10.1/scipy/misc/tests/test_pilutil.py +++ /dev/null @@ -1,64 +0,0 @@ -import os.path -import numpy as np - -from numpy.testing import assert_, assert_equal, \ - dec, decorate_methods, TestCase, run_module_suite - -try: - import PIL.Image -except ImportError: - _have_PIL = False -else: - _have_PIL = True - import scipy.misc.pilutil as pilutil - -# Function / method decorator for skipping PIL tests on import failure -_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test') - -datapath = os.path.dirname(__file__) - -class TestPILUtil(TestCase): - def test_imresize(self): - im = np.random.random((10,20)) - for T in np.sctypes['float'] + [float]: - # 1.1 rounds to below 1.1 for float16, 1.101 works - im1 = pilutil.imresize(im,T(1.101)) - assert_equal(im1.shape,(11,22)) - - def test_imresize2(self): - im = np.random.random((20,30)) - im2 = pilutil.imresize(im, (30,40), interp='bicubic') - assert_equal(im2.shape, (30,40)) - - def test_imresize3(self): - im = np.random.random((15,30)) - im2 = pilutil.imresize(im, (30,60), interp='nearest') - assert_equal(im2.shape, (30,60)) - - def test_bytescale(self): - x = np.array([0,1,2],np.uint8) - y = np.array([0,1,2]) - assert_equal(pilutil.bytescale(x),x) - assert_equal(pilutil.bytescale(y),[0,127,255]) - -def tst_fromimage(filename, irange): - fp = open(filename, "rb") - img = pilutil.fromimage(PIL.Image.open(fp)) - fp.close() - imin,imax = irange - assert_(img.min() >= imin) - assert_(img.max() <= imax) - -@_pilskip -def test_fromimage(): - ''' Test generator for parametric tests ''' - data = {'icon.png':(0,255), - 'icon_mono.png':(0,2), - 'icon_mono_flat.png':(0,1)} - for fn, irange in data.iteritems(): - yield tst_fromimage, os.path.join(datapath,'data',fn), irange - -decorate_methods(TestPILUtil, _pilskip) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/ndimage/SConscript b/scipy-0.10.1/scipy/ndimage/SConscript deleted file mode 100644 index a8ca9a1a30..0000000000 --- a/scipy-0.10.1/scipy/ndimage/SConscript +++ /dev/null @@ -1,12 +0,0 @@ -# Last Change: Fri Oct 10 03:00 PM 2008 J -from os.path import join - -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.AppendUnique(CPPPATH = 'src') - -ndimage_src = ["nd_image.c", "ni_filters.c", "ni_fourier.c", "ni_interpolation.c", - "ni_measure.c", "ni_morphology.c", "ni_support.c"] -env.NumpyPythonExtension('_nd_image', source = [join('src', i) for i in ndimage_src]) diff --git a/scipy-0.10.1/scipy/ndimage/SConstruct b/scipy-0.10.1/scipy/ndimage/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/ndimage/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/ndimage/__init__.py b/scipy-0.10.1/scipy/ndimage/__init__.py deleted file mode 100644 index 2d92d2791d..0000000000 --- a/scipy-0.10.1/scipy/ndimage/__init__.py +++ /dev/null @@ -1,180 +0,0 @@ -""" -========================================================= -Multi-dimensional image processing (:mod:`scipy.ndimage`) -========================================================= - -.. currentmodule:: scipy.ndimage - -This package contains various functions for multi-dimensional image -processing. - - -Filters :mod:`scipy.ndimage.filters` -==================================== - -.. module:: scipy.ndimage.filters - -.. autosummary:: - :toctree: generated/ - - convolve - Multi-dimensional convolution - convolve1d - 1-D convolution along the given axis - correlate - Multi-dimensional correlation - correlate1d - 1-D correlation along the given axis - gaussian_filter - gaussian_filter1d - gaussian_gradient_magnitude - gaussian_laplace - generic_filter - Multi-dimensional filter using a given function - generic_filter1d - 1-D generic filter along the given axis - generic_gradient_magnitude - generic_laplace - laplace - n-D Laplace filter based on approximate second derivatives - maximum_filter - maximum_filter1d - median_filter - Calculates a multi-dimensional median filter - minimum_filter - minimum_filter1d - percentile_filter - Calculates a multi-dimensional percentile filter - prewitt - rank_filter - Calculates a multi-dimensional rank filter - sobel - uniform_filter - Multi-dimensional uniform filter - uniform_filter1d - 1-D uniform filter along the given axis - -Fourier filters :mod:`scipy.ndimage.fourier` -============================================ - -.. module:: scipy.ndimage.fourier - -.. autosummary:: - :toctree: generated/ - - fourier_ellipsoid - fourier_gaussian - fourier_shift - fourier_uniform - -Interpolation :mod:`scipy.ndimage.interpolation` -================================================ - -.. module:: scipy.ndimage.interpolation - -.. autosummary:: - :toctree: generated/ - - affine_transform - Apply an affine transformation - geometric_transform - Apply an arbritrary geometric transform - map_coordinates - Map input array to new coordinates by interpolation - rotate - Rotate an array - shift - Shift an array - spline_filter - spline_filter1d - zoom - Zoom an array - -Measurements :mod:`scipy.ndimage.measurements` -============================================== - -.. module:: scipy.ndimage.measurements - -.. autosummary:: - :toctree: generated/ - - center_of_mass - The center of mass of the values of an array at labels - extrema - Min's and max's of an array at labels, with their positions - find_objects - Find objects in a labeled array - histogram - Histogram of the values of an array, optionally at labels - label - Label features in an array - maximum - maximum_position - mean - Mean of the values of an array at labels - minimum - minimum_position - standard_deviation - Standard deviation of an n-D image array - sum - Sum of the values of the array - variance - Variance of the values of an n-D image array - watershed_ift - -Morphology :mod:`scipy.ndimage.morphology` -========================================== - -.. module:: scipy.ndimage.morphology - -.. autosummary:: - :toctree: generated/ - - binary_closing - binary_dilation - binary_erosion - binary_fill_holes - binary_hit_or_miss - binary_opening - binary_propagation - black_tophat - distance_transform_bf - distance_transform_cdt - distance_transform_edt - generate_binary_structure - grey_closing - grey_dilation - grey_erosion - grey_opening - iterate_structure - morphological_gradient - morphological_laplace - white_tophat - -Utility -======= - -.. currentmodule:: scipy.ndimage - -.. autosummary:: - :toctree: generated/ - - imread - Load an image from a file - -""" - -# Copyright (C) 2003-2005 Peter J. Verveer -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from filters import * -from fourier import * -from interpolation import * -from measurements import * -from morphology import * -from io import * - -__version__ = '2.0' - -__all__ = filter(lambda s: not s.startswith('_'), dir()) -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/ndimage/_ni_support.py b/scipy-0.10.1/scipy/ndimage/_ni_support.py deleted file mode 100644 index 3850391276..0000000000 --- a/scipy-0.10.1/scipy/ndimage/_ni_support.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) 2003-2005 Peter J. Verveer -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import types -import numpy - -def _extend_mode_to_code(mode): - """Convert an extension mode to the corresponding integer code. - """ - if mode == 'nearest': - return 0 - elif mode == 'wrap': - return 1 - elif mode == 'reflect': - return 2 - elif mode == 'mirror': - return 3 - elif mode == 'constant': - return 4 - else: - raise RuntimeError('boundary mode not supported') - -def _normalize_sequence(input, rank, array_type=None): - """If input is a scalar, create a sequence of length equal to the - rank by duplicating the input. If input is a sequence, - check if its length is equal to the length of array. - """ - if (isinstance(input, (types.IntType, types.LongType, - types.FloatType))): - normalized = [input] * rank - else: - normalized = list(input) - if len(normalized) != rank: - err = "sequence argument must have length equal to input rank" - raise RuntimeError(err) - return normalized - -def _get_output(output, input, shape=None): - if shape is None: - shape = input.shape - if output is None: - output = numpy.zeros(shape, dtype = input.dtype.name) - return_value = output - elif type(output) in [type(types.TypeType), type(numpy.zeros((4,)).dtype)]: - output = numpy.zeros(shape, dtype = output) - return_value = output - elif type(output) is types.StringType: - output = numpy.typeDict[output] - output = numpy.zeros(shape, dtype = output) - return_value = output - else: - if output.shape != shape: - raise RuntimeError("output shape not correct") - return_value = None - return output, return_value - -def _check_axis(axis, rank): - if axis < 0: - axis += rank - if axis < 0 or axis >= rank: - raise ValueError('invalid axis') - return axis diff --git a/scipy-0.10.1/scipy/ndimage/filters.py b/scipy-0.10.1/scipy/ndimage/filters.py deleted file mode 100644 index e668bba1d2..0000000000 --- a/scipy-0.10.1/scipy/ndimage/filters.py +++ /dev/null @@ -1,1140 +0,0 @@ -# Copyright (C) 2003-2005 Peter J. Verveer -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import math -import numpy -import _ni_support -import _nd_image -from scipy.misc import doccer - -__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', - 'prewitt', 'sobel', 'generic_laplace', 'laplace', - 'gaussian_laplace', 'generic_gradient_magnitude', - 'gaussian_gradient_magnitude', 'correlate', 'convolve', - 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', - 'maximum_filter1d', 'minimum_filter', 'maximum_filter', - 'rank_filter', 'median_filter', 'percentile_filter', - 'generic_filter1d', 'generic_filter'] - - -_input_doc = \ -"""input : array-like - input array to filter""" -_axis_doc = \ -"""axis : integer, optional - axis of ``input`` along which to calculate. Default is -1""" -_output_doc = \ -"""output : array, optional - The ``output`` parameter passes an array in which to store the - filter output.""" -_size_foot_doc = \ -"""size : scalar or tuple, optional - See footprint, below -footprint : array, optional - Either ``size`` or ``footprint`` must be defined. ``size`` gives - the shape that is taken from the input array, at every element - position, to define the input to the filter function. - ``footprint`` is a boolean array that specifies (implicitly) a - shape, but also which of the elements within this shape will get - passed to the filter function. Thus ``size=(n,m)`` is equivalent - to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number - of dimensions of the input array, so that, if the input array is - shape (10,10,10), and ``size`` is 2, then the actual size used is - (2,2,2). -""" -_mode_doc = \ -"""mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - The ``mode`` parameter determines how the array borders are - handled, where ``cval`` is the value when mode is equal to - 'constant'. Default is 'reflect'""" -_cval_doc = \ -"""cval : scalar, optional - Value to fill past edges of input if ``mode`` is 'constant'. Default - is 0.0""" -_origin_doc = \ -"""origin : scalar, optional -The ``origin`` parameter controls the placement of the filter. Default 0""" -_extra_arguments_doc = \ -"""extra_arguments : sequence, optional - Sequence of extra positional arguments to pass to passed function""" -_extra_keywords_doc = \ -"""extra_keywords : dict, optional - dict of extra keyword arguments to pass to passed function""" - -docdict = { - 'input':_input_doc, - 'axis':_axis_doc, - 'output':_output_doc, - 'size_foot':_size_foot_doc, - 'mode':_mode_doc, - 'cval':_cval_doc, - 'origin':_origin_doc, - 'extra_arguments':_extra_arguments_doc, - 'extra_keywords':_extra_keywords_doc, - } - -docfiller = doccer.filldoc(docdict) - -@docfiller -def correlate1d(input, weights, axis = -1, output = None, mode = "reflect", - cval = 0.0, origin = 0): - """Calculate a one-dimensional correlation along the given axis. - - The lines of the array along the given axis are correlated with the - given weights. - - Parameters - ---------- - %(input)s - weights : array - one-dimensional sequence of numbers - %(axis)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - output, return_value = _ni_support._get_output(output, input) - weights = numpy.asarray(weights, dtype=numpy.float64) - if weights.ndim != 1 or weights.shape[0] < 1: - raise RuntimeError('no filter weights given') - if not weights.flags.contiguous: - weights = weights.copy() - axis = _ni_support._check_axis(axis, input.ndim) - if ((len(weights) // 2 + origin < 0) or - (len(weights) // 2 + origin > len(weights))): - raise ValueError('invalid origin') - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.correlate1d(input, weights, axis, output, mode, cval, - origin) - return return_value - - -@docfiller -def convolve1d(input, weights, axis = -1, output = None, mode = "reflect", - cval = 0.0, origin = 0): - """Calculate a one-dimensional convolution along the given axis. - - The lines of the array along the given axis are convolved with the - given weights. - - Parameters - ---------- - %(input)s - weights : ndarray - one-dimensional sequence of numbers - %(axis)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - weights = weights[::-1] - origin = -origin - if not len(weights) & 1: - origin -= 1 - return correlate1d(input, weights, axis, output, mode, cval, origin) - - -@docfiller -def gaussian_filter1d(input, sigma, axis = -1, order = 0, output = None, - mode = "reflect", cval = 0.0): - """One-dimensional Gaussian filter. - - Parameters - ---------- - %(input)s - sigma : scalar - standard deviation for Gaussian kernel - %(axis)s - order : {0, 1, 2, 3}, optional - An order of 0 corresponds to convolution with a Gaussian - kernel. An order of 1, 2, or 3 corresponds to convolution with - the first, second or third derivatives of a Gaussian. Higher - order derivatives are not implemented - %(output)s - %(mode)s - %(cval)s - """ - if order not in range(4): - raise ValueError('Order outside 0..3 not implemented') - sd = float(sigma) - # make the length of the filter equal to 4 times the standard - # deviations: - lw = int(4.0 * sd + 0.5) - weights = [0.0] * (2 * lw + 1) - weights[lw] = 1.0 - sum = 1.0 - sd = sd * sd - # calculate the kernel: - for ii in range(1, lw + 1): - tmp = math.exp(-0.5 * float(ii * ii) / sd) - weights[lw + ii] = tmp - weights[lw - ii] = tmp - sum += 2.0 * tmp - for ii in range(2 * lw + 1): - weights[ii] /= sum - # implement first, second and third order derivatives: - if order == 1 : # first derivative - weights[lw] = 0.0 - for ii in range(1, lw + 1): - x = float(ii) - tmp = -x / sd * weights[lw + ii] - weights[lw + ii] = -tmp - weights[lw - ii] = tmp - elif order == 2: # second derivative - weights[lw] *= -1.0 / sd - for ii in range(1, lw + 1): - x = float(ii) - tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd - weights[lw + ii] = tmp - weights[lw - ii] = tmp - elif order == 3: # third derivative - weights[lw] = 0.0 - sd2 = sd * sd - for ii in range(1, lw + 1): - x = float(ii) - tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2 - weights[lw + ii] = -tmp - weights[lw - ii] = tmp - return correlate1d(input, weights, axis, output, mode, cval, 0) - - -@docfiller -def gaussian_filter(input, sigma, order = 0, output = None, - mode = "reflect", cval = 0.0): - """Multi-dimensional Gaussian filter. - - Parameters - ---------- - %(input)s - sigma : scalar or sequence of scalars - standard deviation for Gaussian kernel. The standard - deviations of the Gaussian filter are given for each axis as a - sequence, or as a single number, in which case it is equal for - all axes. - order : {0, 1, 2, 3} or sequence from same set, optional - The order of the filter along each axis is given as a sequence - of integers, or as a single number. An order of 0 corresponds - to convolution with a Gaussian kernel. An order of 1, 2, or 3 - corresponds to convolution with the first, second or third - derivatives of a Gaussian. Higher order derivatives are not - implemented - %(output)s - %(mode)s - %(cval)s - - Notes - ----- - The multi-dimensional filter is implemented as a sequence of - one-dimensional convolution filters. The intermediate arrays are - stored in the same data type as the output. Therefore, for output - types with a limited precision, the results may be imprecise - because intermediate results may be stored with insufficient - precision. - """ - input = numpy.asarray(input) - output, return_value = _ni_support._get_output(output, input) - orders = _ni_support._normalize_sequence(order, input.ndim) - if not set(orders).issubset(set(range(4))): - raise ValueError('Order outside 0..4 not implemented') - sigmas = _ni_support._normalize_sequence(sigma, input.ndim) - axes = range(input.ndim) - axes = [(axes[ii], sigmas[ii], orders[ii]) - for ii in range(len(axes)) if sigmas[ii] > 1e-15] - if len(axes) > 0: - for axis, sigma, order in axes: - gaussian_filter1d(input, sigma, axis, order, output, - mode, cval) - input = output - else: - output[...] = input[...] - return return_value - - -@docfiller -def prewitt(input, axis = -1, output = None, mode = "reflect", cval = 0.0): - """Calculate a Prewitt filter. - - Parameters - ---------- - %(input)s - %(axis)s - %(output)s - %(mode)s - %(cval)s - """ - input = numpy.asarray(input) - axis = _ni_support._check_axis(axis, input.ndim) - output, return_value = _ni_support._get_output(output, input) - correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0) - axes = [ii for ii in range(input.ndim) if ii != axis] - for ii in axes: - correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,) - return return_value - - -@docfiller -def sobel(input, axis = -1, output = None, mode = "reflect", cval = 0.0): - """Calculate a Sobel filter. - - Parameters - ---------- - %(input)s - %(axis)s - %(output)s - %(mode)s - %(cval)s - """ - input = numpy.asarray(input) - axis = _ni_support._check_axis(axis, input.ndim) - output, return_value = _ni_support._get_output(output, input) - correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0) - axes = [ii for ii in range(input.ndim) if ii != axis] - for ii in axes: - correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0) - return return_value - - -@docfiller -def generic_laplace(input, derivative2, output = None, mode = "reflect", - cval = 0.0, - extra_arguments = (), - extra_keywords = None): - """Calculate a multidimensional laplace filter using the provided - second derivative function. - - Parameters - ---------- - %(input)s - derivative2 : callable - Callable with the following signature:: - derivative2(input, axis, output, mode, cval, - *extra_arguments, **extra_keywords) - See ``extra_arguments``, ``extra_keywords`` below - %(output)s - %(mode)s - %(cval)s - %(extra_keywords)s - %(extra_arguments)s - """ - if extra_keywords is None: - extra_keywords = {} - input = numpy.asarray(input) - output, return_value = _ni_support._get_output(output, input) - axes = range(input.ndim) - if len(axes) > 0: - derivative2(input, axes[0], output, mode, cval, - *extra_arguments, **extra_keywords) - for ii in range(1, len(axes)): - tmp = derivative2(input, axes[ii], output.dtype, mode, cval, - *extra_arguments, **extra_keywords) - output += tmp - else: - output[...] = input[...] - return return_value - - -@docfiller -def laplace(input, output = None, mode = "reflect", cval = 0.0): - """Calculate a multidimensional laplace filter using an estimation - for the second derivative based on differences. - - Parameters - ---------- - %(input)s - %(output)s - %(mode)s - %(cval)s - """ - def derivative2(input, axis, output, mode, cval): - return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) - return generic_laplace(input, derivative2, output, mode, cval) - - -@docfiller -def gaussian_laplace(input, sigma, output = None, mode = "reflect", - cval = 0.0): - """Calculate a multidimensional laplace filter using gaussian - second derivatives. - - Parameters - ---------- - %(input)s - sigma : scalar or sequence of scalars - The standard deviations of the Gaussian filter are given for - each axis as a sequence, or as a single number, in which case - it is equal for all axes.. - %(output)s - %(mode)s - %(cval)s - """ - input = numpy.asarray(input) - def derivative2(input, axis, output, mode, cval, sigma): - order = [0] * input.ndim - order[axis] = 2 - return gaussian_filter(input, sigma, order, output, mode, cval) - return generic_laplace(input, derivative2, output, mode, cval, - extra_arguments = (sigma,)) - - -@docfiller -def generic_gradient_magnitude(input, derivative, output = None, - mode = "reflect", cval = 0.0, - extra_arguments = (), extra_keywords = None): - """Calculate a gradient magnitude using the provided function for - the gradient. - - Parameters - ---------- - %(input)s - derivative : callable - Callable with the following signature:: - derivative(input, axis, output, mode, cval, - *extra_arguments, **extra_keywords) - See ``extra_arguments``, ``extra_keywords`` below - ``derivative`` can assume that ``input`` and ``output`` are - ndarrays. - Note that the output from ``derivative`` is modified inplace; - be careful to copy important inputs before returning them. - %(output)s - %(mode)s - %(cval)s - %(extra_keywords)s - %(extra_arguments)s - """ - if extra_keywords is None: - extra_keywords = {} - input = numpy.asarray(input) - output, return_value = _ni_support._get_output(output, input) - axes = range(input.ndim) - if len(axes) > 0: - derivative(input, axes[0], output, mode, cval, - *extra_arguments, **extra_keywords) - numpy.multiply(output, output, output) - for ii in range(1, len(axes)): - tmp = derivative(input, axes[ii], output.dtype, mode, cval, - *extra_arguments, **extra_keywords) - numpy.multiply(tmp, tmp, tmp) - output += tmp - # This allows the sqrt to work with a different default casting - if numpy.version.short_version > '1.6.1': - numpy.sqrt(output, output, casting='unsafe') - else: - numpy.sqrt(output, output) - else: - output[...] = input[...] - return return_value - - -@docfiller -def gaussian_gradient_magnitude(input, sigma, output = None, - mode = "reflect", cval = 0.0): - """Calculate a multidimensional gradient magnitude using gaussian - derivatives. - - Parameters - ---------- - %(input)s - sigma : scalar or sequence of scalars - The standard deviations of the Gaussian filter are given for - each axis as a sequence, or as a single number, in which case - it is equal for all axes.. - %(output)s - %(mode)s - %(cval)s - """ - input = numpy.asarray(input) - def derivative(input, axis, output, mode, cval, sigma): - order = [0] * input.ndim - order[axis] = 1 - return gaussian_filter(input, sigma, order, output, mode, cval) - return generic_gradient_magnitude(input, derivative, output, mode, - cval, extra_arguments = (sigma,)) - - -def _correlate_or_convolve(input, weights, output, mode, cval, origin, - convolution): - input = numpy.asarray(input) - if numpy.iscomplexobj(int): - raise TypeError('Complex type not supported') - origins = _ni_support._normalize_sequence(origin, input.ndim) - weights = numpy.asarray(weights, dtype=numpy.float64) - wshape = [ii for ii in weights.shape if ii > 0] - if len(wshape) != input.ndim: - raise RuntimeError('filter weights array has incorrect shape.') - if convolution: - weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] - for ii in range(len(origins)): - origins[ii] = -origins[ii] - if not weights.shape[ii] & 1: - origins[ii] -= 1 - for origin, lenw in zip(origins, wshape): - if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw): - raise ValueError('invalid origin') - if not weights.flags.contiguous: - weights = weights.copy() - output, return_value = _ni_support._get_output(output, input) - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.correlate(input, weights, output, mode, cval, origins) - return return_value - - -@docfiller -def correlate(input, weights, output = None, mode = 'reflect', cval = 0.0, - origin = 0): - """ - Multi-dimensional correlation. - - The array is correlated with the given kernel. - - Parameters - ---------- - input : array-like - input array to filter - weights : ndarray - array of weights, same number of dimensions as input - output : array, optional - The ``output`` parameter passes an array in which to store the - filter output. - mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - The ``mode`` parameter determines how the array borders are - handled, where ``cval`` is the value when mode is equal to - 'constant'. Default is 'reflect' - cval : scalar, optional - Value to fill past edges of input if ``mode`` is 'constant'. Default - is 0.0 - origin : scalar, optional - The ``origin`` parameter controls the placement of the filter. - Default 0 - - See Also - -------- - convolve : Convolve an image with a kernel. - - """ - return _correlate_or_convolve(input, weights, output, mode, cval, - origin, False) - - -@docfiller -def convolve(input, weights, output = None, mode = 'reflect', cval = 0.0, - origin = 0): - """ - Multi-dimensional convolution. - - The array is convolved with the given kernel. - - Parameters - ---------- - input : array_like - Input array to filter. - weights : array_like - Array of weights, same number of dimensions as input - output : ndarray, optional - The `output` parameter passes an array in which to store the - filter output. - mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - the `mode` parameter determines how the array borders are - handled. For 'constant' mode, values beyond borders are set to be - `cval`. Default is 'reflect'. - cval : scalar, optional - Value to fill past edges of input if `mode` is 'constant'. Default - is 0.0 - origin : scalar, optional - The `origin` parameter controls the placement of the filter. - Default is 0. - - Returns - ------- - result : ndarray - The result of convolution of `input` with `weights`. - - See Also - -------- - correlate : Correlate an image with a kernel. - - Notes - ----- - Each value in result is :math:`C_i = \\sum_j{I_{i+j-k} W_j}`, where - W is the `weights` kernel, - j is the n-D spatial index over :math:`W`, - I is the `input` and k is the coordinate of the center of - W, specified by `origin` in the input parameters. - - Examples - -------- - Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, - because in this case borders (i.e. where the `weights` kernel, centered - on any one value, extends beyond an edge of `input`. - - >>> a = np.array([[1, 2, 0, 0], - .... [5, 3, 0, 4], - .... [0, 0, 0, 7], - .... [9, 3, 0, 0]]) - >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) - >>> from scipy import ndimage - >>> ndimage.convolve(a, k, mode='constant', cval=0.0) - array([[11, 10, 7, 4], - [10, 3, 11, 11], - [15, 12, 14, 7], - [12, 3, 7, 0]]) - - Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` - with 1.0's (and then extracting only the original region of the result). - - >>> ndimage.convolve(a, k, mode='constant', cval=1.0) - array([[13, 11, 8, 7], - [11, 3, 11, 14], - [16, 12, 14, 10], - [15, 6, 10, 5]]) - - With ``mode='reflect'`` (the default), outer values are reflected at the - edge of `input` to fill in missing values. - - >>> b = np.array([[2, 0, 0], - [1, 0, 0], - [0, 0, 0]]) - >>> k = np.array([[0,1,0],[0,1,0],[0,1,0]]) - >>> ndimage.convolve(b, k, mode='reflect') - array([[5, 0, 0], - [3, 0, 0], - [1, 0, 0]]) - - This includes diagonally at the corners. - - >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) - >>> ndimage.convolve(b, k) - array([[4, 2, 0], - [3, 2, 0], - [1, 1, 0]]) - - With ``mode='nearest'``, the single nearest value in to an edge in - `input` is repeated as many times as needed to match the overlapping - `weights`. - - >>> c = np.array([[2, 0, 1], - [1, 0, 0], - [0, 0, 0]]) - >>> k = np.array([[0, 1, 0], - [0, 1, 0], - [0, 1, 0], - [0, 1, 0], - [0, 1, 0]]) - >>> ndimage.convolve(c, k, mode='nearest') - array([[7, 0, 3], - [5, 0, 2], - [3, 0, 1]]) - - """ - return _correlate_or_convolve(input, weights, output, mode, cval, - origin, True) - - -@docfiller -def uniform_filter1d(input, size, axis = -1, output = None, - mode = "reflect", cval = 0.0, origin = 0): - """Calculate a one-dimensional uniform filter along the given axis. - - The lines of the array along the given axis are filtered with a - uniform filter of given size. - - Parameters - ---------- - %(input)s - size : integer - length of uniform filter - %(axis)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - axis = _ni_support._check_axis(axis, input.ndim) - if size < 1: - raise RuntimeError('incorrect filter size') - output, return_value = _ni_support._get_output(output, input) - if (size // 2 + origin < 0) or (size // 2 + origin >= size): - raise ValueError('invalid origin') - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, - origin) - return return_value - - -@docfiller -def uniform_filter(input, size = 3, output = None, mode = "reflect", - cval = 0.0, origin = 0): - """Multi-dimensional uniform filter. - - Parameters - ---------- - %(input)s - size : int or sequence of ints - The sizes of the uniform filter are given for each axis as a - sequence, or as a single number, in which case the size is - equal for all axes. - %(output)s - %(mode)s - %(cval)s - %(origin)s - - Notes - ----- - The multi-dimensional filter is implemented as a sequence of - one-dimensional uniform filters. The intermediate arrays are stored - in the same data type as the output. Therefore, for output types - with a limited precision, the results may be imprecise because - intermediate results may be stored with insufficient precision. - """ - input = numpy.asarray(input) - output, return_value = _ni_support._get_output(output, input) - sizes = _ni_support._normalize_sequence(size, input.ndim) - origins = _ni_support._normalize_sequence(origin, input.ndim) - axes = range(input.ndim) - axes = [(axes[ii], sizes[ii], origins[ii]) - for ii in range(len(axes)) if sizes[ii] > 1] - if len(axes) > 0: - for axis, size, origin in axes: - uniform_filter1d(input, int(size), axis, output, mode, - cval, origin) - input = output - else: - output[...] = input[...] - return return_value - - -@docfiller -def minimum_filter1d(input, size, axis = -1, output = None, - mode = "reflect", cval = 0.0, origin = 0): - """Calculate a one-dimensional minimum filter along the given axis. - - The lines of the array along the given axis are filtered with a - minimum filter of given size. - - Parameters - ---------- - %(input)s - size : int - length along which to calculate 1D minimum - %(axis)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - axis = _ni_support._check_axis(axis, input.ndim) - if size < 1: - raise RuntimeError('incorrect filter size') - output, return_value = _ni_support._get_output(output, input) - if (size // 2 + origin < 0) or (size // 2 + origin >= size): - raise ValueError('invalid origin') - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, - origin, 1) - return return_value - - -@docfiller -def maximum_filter1d(input, size, axis = -1, output = None, - mode = "reflect", cval = 0.0, origin = 0): - """Calculate a one-dimensional maximum filter along the given axis. - - The lines of the array along the given axis are filtered with a - maximum filter of given size. - - Parameters - ---------- - %(input)s - size : int - length along which to calculate 1D maximum - %(axis)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - axis = _ni_support._check_axis(axis, input.ndim) - if size < 1: - raise RuntimeError('incorrect filter size') - output, return_value = _ni_support._get_output(output, input) - if (size // 2 + origin < 0) or (size // 2 + origin >= size): - raise ValueError('invalid origin') - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, - origin, 0) - return return_value - - -def _min_or_max_filter(input, size, footprint, structure, output, mode, - cval, origin, minimum): - if structure is None: - if footprint is None: - if size is None: - raise RuntimeError("no footprint provided") - separable= True - else: - footprint = numpy.asarray(footprint) - footprint = footprint.astype(bool) - if numpy.alltrue(numpy.ravel(footprint),axis=0): - size = footprint.shape - footprint = None - separable = True - else: - separable = False - else: - structure = numpy.asarray(structure, dtype=numpy.float64) - separable = False - if footprint is None: - footprint = numpy.ones(structure.shape, bool) - else: - footprint = numpy.asarray(footprint) - footprint = footprint.astype(bool) - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - output, return_value = _ni_support._get_output(output, input) - origins = _ni_support._normalize_sequence(origin, input.ndim) - if separable: - sizes = _ni_support._normalize_sequence(size, input.ndim) - axes = range(input.ndim) - axes = [(axes[ii], sizes[ii], origins[ii]) - for ii in range(len(axes)) if sizes[ii] > 1] - if minimum: - filter_ = minimum_filter1d - else: - filter_ = maximum_filter1d - if len(axes) > 0: - for axis, size, origin in axes: - filter_(input, int(size), axis, output, mode, cval, origin) - input = output - else: - output[...] = input[...] - else: - fshape = [ii for ii in footprint.shape if ii > 0] - if len(fshape) != input.ndim: - raise RuntimeError('footprint array has incorrect shape.') - for origin, lenf in zip(origins, fshape): - if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): - raise ValueError('invalid origin') - if not footprint.flags.contiguous: - footprint = footprint.copy() - if structure is not None: - if len(structure.shape) != input.ndim: - raise RuntimeError('structure array has incorrect shape') - if not structure.flags.contiguous: - structure = structure.copy() - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.min_or_max_filter(input, footprint, structure, output, - mode, cval, origins, minimum) - return return_value - - -@docfiller -def minimum_filter(input, size = None, footprint = None, output = None, - mode = "reflect", cval = 0.0, origin = 0): - """Calculates a multi-dimensional minimum filter. - - Parameters - ---------- - %(input)s - %(size_foot)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - return _min_or_max_filter(input, size, footprint, None, output, mode, - cval, origin, 1) - - -@docfiller -def maximum_filter(input, size = None, footprint = None, output = None, - mode = "reflect", cval = 0.0, origin = 0): - """Calculates a multi-dimensional maximum filter. - - Parameters - ---------- - %(input)s - %(size_foot)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - return _min_or_max_filter(input, size, footprint, None, output, mode, - cval, origin, 0) - - -@docfiller -def _rank_filter(input, rank, size = None, footprint = None, output = None, - mode = "reflect", cval = 0.0, origin = 0, operation = 'rank'): - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - origins = _ni_support._normalize_sequence(origin, input.ndim) - if footprint is None: - if size is None: - raise RuntimeError("no footprint or filter size provided") - sizes = _ni_support._normalize_sequence(size, input.ndim) - footprint = numpy.ones(sizes, dtype=bool) - else: - footprint = numpy.asarray(footprint, dtype=bool) - fshape = [ii for ii in footprint.shape if ii > 0] - if len(fshape) != input.ndim: - raise RuntimeError('filter footprint array has incorrect shape.') - for origin, lenf in zip(origins, fshape): - if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): - raise ValueError('invalid origin') - if not footprint.flags.contiguous: - footprint = footprint.copy() - filter_size = numpy.where(footprint, 1, 0).sum() - if operation == 'median': - rank = filter_size // 2 - elif operation == 'percentile': - percentile = rank - if percentile < 0.0: - percentile += 100.0 - if percentile < 0 or percentile > 100: - raise RuntimeError('invalid percentile') - if percentile == 100.0: - rank = filter_size - 1 - else: - rank = int(float(filter_size) * percentile / 100.0) - if rank < 0: - rank += filter_size - if rank < 0 or rank >= filter_size: - raise RuntimeError('rank not within filter footprint size') - if rank == 0: - return minimum_filter(input, None, footprint, output, mode, cval, - origin) - elif rank == filter_size - 1: - return maximum_filter(input, None, footprint, output, mode, cval, - origin) - else: - output, return_value = _ni_support._get_output(output, input) - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.rank_filter(input, rank, footprint, output, mode, cval, - origins) - return return_value - - -@docfiller -def rank_filter(input, rank, size = None, footprint = None, output = None, - mode = "reflect", cval = 0.0, origin = 0): - """Calculates a multi-dimensional rank filter. - - Parameters - ---------- - %(input)s - rank : integer - The rank parameter may be less then zero, i.e., rank = -1 - indicates the largest element. - %(size_foot)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - return _rank_filter(input, rank, size, footprint, output, mode, cval, - origin, 'rank') - - -@docfiller -def median_filter(input, size = None, footprint = None, output = None, - mode = "reflect", cval = 0.0, origin = 0): - """ - Calculates a multi-dimensional median filter. - - Parameters - ---------- - input : array-like - input array to filter - size : scalar or tuple, optional - See footprint, below - footprint : array, optional - Either ``size`` or ``footprint`` must be defined. ``size`` gives - the shape that is taken from the input array, at every element - position, to define the input to the filter function. - ``footprint`` is a boolean array that specifies (implicitly) a - shape, but also which of the elements within this shape will get - passed to the filter function. Thus ``size=(n,m)`` is equivalent - to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number - of dimensions of the input array, so that, if the input array is - shape (10,10,10), and ``size`` is 2, then the actual size used is - (2,2,2). - output : array, optional - The ``output`` parameter passes an array in which to store the - filter output. - mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - The ``mode`` parameter determines how the array borders are - handled, where ``cval`` is the value when mode is equal to - 'constant'. Default is 'reflect' - cval : scalar, optional - Value to fill past edges of input if ``mode`` is 'constant'. Default - is 0.0 - origin : scalar, optional - The ``origin`` parameter controls the placement of the filter. - Default 0 - - """ - return _rank_filter(input, 0, size, footprint, output, mode, cval, - origin, 'median') - - -@docfiller -def percentile_filter(input, percentile, size = None, footprint = None, - output = None, mode = "reflect", cval = 0.0, origin = 0): - """Calculates a multi-dimensional percentile filter. - - Parameters - ---------- - %(input)s - percentile : scalar - The percentile parameter may be less then zero, i.e., - percentile = -20 equals percentile = 80 - %(size_foot)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - """ - return _rank_filter(input, percentile, size, footprint, output, mode, - cval, origin, 'percentile') - - -@docfiller -def generic_filter1d(input, function, filter_size, axis = -1, - output = None, mode = "reflect", cval = 0.0, origin = 0, - extra_arguments = (), extra_keywords = None): - """Calculate a one-dimensional filter along the given axis. - - generic_filter1d iterates over the lines of the array, calling the - given function at each line. The arguments of the line are the - input line, and the output line. The input and output lines are 1D - double arrays. The input line is extended appropriately according - to the filter size and origin. The output line must be modified - in-place with the result. - - Parameters - ---------- - %(input)s - function : callable - function to apply along given axis - filter_size : scalar - length of the filter - %(axis)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - %(extra_arguments)s - %(extra_keywords)s - """ - if extra_keywords is None: - extra_keywords = {} - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - output, return_value = _ni_support._get_output(output, input) - if filter_size < 1: - raise RuntimeError('invalid filter size') - axis = _ni_support._check_axis(axis, input.ndim) - if ((filter_size // 2 + origin < 0) or - (filter_size // 2 + origin >= filter_size)): - raise ValueError('invalid origin') - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.generic_filter1d(input, function, filter_size, axis, output, - mode, cval, origin, extra_arguments, extra_keywords) - return return_value - - -@docfiller -def generic_filter(input, function, size = None, footprint = None, - output = None, mode = "reflect", cval = 0.0, origin = 0, - extra_arguments = (), extra_keywords = None): - """Calculates a multi-dimensional filter using the given function. - - At each element the provided function is called. The input values - within the filter footprint at that element are passed to the function - as a 1D array of double values. - - Parameters - ---------- - %(input)s - function : callable - function to apply at each element - %(size_foot)s - %(output)s - %(mode)s - %(cval)s - %(origin)s - %(extra_arguments)s - %(extra_keywords)s - """ - if extra_keywords is None: - extra_keywords = {} - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - origins = _ni_support._normalize_sequence(origin, input.ndim) - if footprint is None: - if size is None: - raise RuntimeError("no footprint or filter size provided") - sizes = _ni_support._normalize_sequence(size, input.ndim) - footprint = numpy.ones(sizes, dtype=bool) - else: - footprint = numpy.asarray(footprint) - footprint = footprint.astype(bool) - fshape = [ii for ii in footprint.shape if ii > 0] - if len(fshape) != input.ndim: - raise RuntimeError('filter footprint array has incorrect shape.') - for origin, lenf in zip(origins, fshape): - if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): - raise ValueError('invalid origin') - if not footprint.flags.contiguous: - footprint = footprint.copy() - output, return_value = _ni_support._get_output(output, input) - mode = _ni_support._extend_mode_to_code(mode) - _nd_image.generic_filter(input, function, footprint, output, mode, - cval, origins, extra_arguments, extra_keywords) - return return_value diff --git a/scipy-0.10.1/scipy/ndimage/fourier.py b/scipy-0.10.1/scipy/ndimage/fourier.py deleted file mode 100644 index 05f08e2e4c..0000000000 --- a/scipy-0.10.1/scipy/ndimage/fourier.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright (C) 2003-2005 Peter J. Verveer -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import types -import numpy -import _ni_support -import _nd_image - -__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid', - 'fourier_shift'] - - -def _get_output_fourier(output, input): - if output is None: - if input.dtype.type in [numpy.complex64, numpy.complex128, - numpy.float32]: - output = numpy.zeros(input.shape, dtype = input.dtype) - else: - output = numpy.zeros(input.shape, dtype = numpy.float64) - return_value = output - elif type(output) is types.TypeType: - if output not in [numpy.complex64, numpy.complex128, - numpy.float32, numpy.float64]: - raise RuntimeError("output type not supported") - output = numpy.zeros(input.shape, dtype = output) - return_value = output - else: - if output.shape != input.shape: - raise RuntimeError("output shape not correct") - return_value = None - return output, return_value - -def _get_output_fourier_complex(output, input): - if output is None: - if input.dtype.type in [numpy.complex64, numpy.complex128]: - output = numpy.zeros(input.shape, dtype = input.dtype) - else: - output = numpy.zeros(input.shape, dtype = numpy.complex128) - return_value = output - elif type(output) is types.TypeType: - if output not in [numpy.complex64, numpy.complex128]: - raise RuntimeError("output type not supported") - output = numpy.zeros(input.shape, dtype = output) - return_value = output - else: - if output.shape != input.shape: - raise RuntimeError("output shape not correct") - return_value = None - return output, return_value - -def fourier_gaussian(input, sigma, n = -1, axis = -1, output = None): - """ - Multi-dimensional Gaussian fourier filter. - - The array is multiplied with the fourier transform of a Gaussian - kernel. - - Parameters - ---------- - input : array_like - The input array. - sigma : float or sequence - The sigma of the Gaussian kernel. If a float, `sigma` is the same for - all axes. If a sequence, `sigma` has to contain one value for each - axis. - n : int, optional - If `n` is negative (default), then the input is assumed to be the - result of a complex fft. - If `n` is larger than or equal to zero, the input is assumed to be the - result of a real fft, and `n` gives the length of the array before - transformation along the real transform direction. - axis : int, optional - The axis of the real transform. - output : ndarray, optional - If given, the result of filtering the input is placed in this array. - None is returned in this case. - - Returns - ------- - return_value : ndarray or None - The filtered input. If `output` is given as a parameter, None is - returned. - - """ - input = numpy.asarray(input) - output, return_value = _get_output_fourier(output, input) - axis = _ni_support._check_axis(axis, input.ndim) - sigmas = _ni_support._normalize_sequence(sigma, input.ndim) - sigmas = numpy.asarray(sigmas, dtype = numpy.float64) - if not sigmas.flags.contiguous: - sigmas = sigmas.copy() - - _nd_image.fourier_filter(input, sigmas, n, axis, output, 0) - return return_value - -def fourier_uniform(input, size, n = -1, axis = -1, output = None): - """ - Multi-dimensional uniform fourier filter. - - The array is multiplied with the fourier transform of a box of given - size. - - Parameters - ---------- - input : array_like - The input array. - size : float or sequence - The size of the box used for filtering. - If a float, `size` is the same for all axes. If a sequence, `size` has - to contain one value for each axis. - n : int, optional - If `n` is negative (default), then the input is assumed to be the - result of a complex fft. - If `n` is larger than or equal to zero, the input is assumed to be the - result of a real fft, and `n` gives the length of the array before - transformation along the real transform direction. - axis : int, optional - The axis of the real transform. - output : ndarray, optional - If given, the result of filtering the input is placed in this array. - None is returned in this case. - - Returns - ------- - return_value : ndarray or None - The filtered input. If `output` is given as a parameter, None is - returned. - - """ - input = numpy.asarray(input) - output, return_value = _get_output_fourier(output, input) - axis = _ni_support._check_axis(axis, input.ndim) - sizes = _ni_support._normalize_sequence(size, input.ndim) - sizes = numpy.asarray(sizes, dtype = numpy.float64) - if not sizes.flags.contiguous: - sizes = sizes.copy() - _nd_image.fourier_filter(input, sizes, n, axis, output, 1) - return return_value - -def fourier_ellipsoid(input, size, n = -1, axis = -1, output = None): - """ - Multi-dimensional ellipsoid fourier filter. - - The array is multiplied with the fourier transform of a ellipsoid of - given sizes. - - Parameters - ---------- - input : array_like - The input array. - size : float or sequence - The size of the box used for filtering. - If a float, `size` is the same for all axes. If a sequence, `size` has - to contain one value for each axis. - n : int, optional - If `n` is negative (default), then the input is assumed to be the - result of a complex fft. - If `n` is larger than or equal to zero, the input is assumed to be the - result of a real fft, and `n` gives the length of the array before - transformation along the real transform direction. - axis : int, optional - The axis of the real transform. - output : ndarray, optional - If given, the result of filtering the input is placed in this array. - None is returned in this case. - - Returns - ------- - return_value : ndarray or None - The filtered input. If `output` is given as a parameter, None is - returned. - - Notes - ----- - This function is implemented for arrays of rank 1, 2, or 3. - - """ - input = numpy.asarray(input) - output, return_value = _get_output_fourier(output, input) - axis = _ni_support._check_axis(axis, input.ndim) - sizes = _ni_support._normalize_sequence(size, input.ndim) - sizes = numpy.asarray(sizes, dtype = numpy.float64) - if not sizes.flags.contiguous: - sizes = sizes.copy() - _nd_image.fourier_filter(input, sizes, n, axis, output, 2) - return return_value - -def fourier_shift(input, shift, n = -1, axis = -1, output = None): - """ - Multi-dimensional fourier shift filter. - - The array is multiplied with the fourier transform of a shift operation. - - Parameters - ---------- - input : array_like - The input array. - shift : float or sequence - The size of the box used for filtering. - If a float, `shift` is the same for all axes. If a sequence, `shift` - has to contain one value for each axis. - n : int, optional - If `n` is negative (default), then the input is assumed to be the - result of a complex fft. - If `n` is larger than or equal to zero, the input is assumed to be the - result of a real fft, and `n` gives the length of the array before - transformation along the real transform direction. - axis : int, optional - The axis of the real transform. - output : ndarray, optional - If given, the result of shifting the input is placed in this array. - None is returned in this case. - - Returns - ------- - return_value : ndarray or None - The shifted input. If `output` is given as a parameter, None is - returned. - - """ - input = numpy.asarray(input) - output, return_value = _get_output_fourier_complex(output, input) - axis = _ni_support._check_axis(axis, input.ndim) - shifts = _ni_support._normalize_sequence(shift, input.ndim) - shifts = numpy.asarray(shifts, dtype = numpy.float64) - if not shifts.flags.contiguous: - shifts = shifts.copy() - _nd_image.fourier_shift(input, shifts, n, axis, output) - return return_value diff --git a/scipy-0.10.1/scipy/ndimage/interpolation.py b/scipy-0.10.1/scipy/ndimage/interpolation.py deleted file mode 100644 index fc4723b2f8..0000000000 --- a/scipy-0.10.1/scipy/ndimage/interpolation.py +++ /dev/null @@ -1,670 +0,0 @@ -# Copyright (C) 2003-2005 Peter J. Verveer -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import math -import numpy -import _ni_support -import _nd_image - -__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform', - 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate'] - - -def _extend_mode_to_code(mode): - mode = _ni_support._extend_mode_to_code(mode) - return mode - -def spline_filter1d(input, order=3, axis=-1, output=numpy.float64): - """ - Calculates a one-dimensional spline filter along the given axis. - - The lines of the array along the given axis are filtered by a - spline filter. The order of the spline must be >= 2 and <= 5. - - Parameters - ---------- - input : array_like - The input array. - order : int, optional - The order of the spline, default is 3. - axis : int, optional - The axis along which the spline filter is applied. Default is the last - axis. - output : ndarray or dtype, optional - The array in which to place the output, or the dtype of the returned - array. Default is `numpy.float64`. - - Returns - ------- - return_value : ndarray or None - The filtered input. If `output` is given as a parameter, None is - returned. - - """ - if order < 0 or order > 5: - raise RuntimeError('spline order not supported') - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - output, return_value = _ni_support._get_output(output, input) - if order in [0, 1]: - output[...] = numpy.array(input) - else: - axis = _ni_support._check_axis(axis, input.ndim) - _nd_image.spline_filter1d(input, order, axis, output) - return return_value - - -def spline_filter(input, order=3, output = numpy.float64): - """ - Multi-dimensional spline filter. - - For more details, see `spline_filter1d`. - - See Also - -------- - spline_filter1d - - Notes - ----- - The multi-dimensional filter is implemented as a sequence of - one-dimensional spline filters. The intermediate arrays are stored - in the same data type as the output. Therefore, for output types - with a limited precision, the results may be imprecise because - intermediate results may be stored with insufficient precision. - - """ - if order < 2 or order > 5: - raise RuntimeError('spline order not supported') - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - output, return_value = _ni_support._get_output(output, input) - if order not in [0, 1] and input.ndim > 0: - for axis in range(input.ndim): - spline_filter1d(input, order, axis, output = output) - input = output - else: - output[...] = input[...] - return return_value - -def geometric_transform(input, mapping, output_shape=None, - output=None, order=3, - mode='constant', cval=0.0, prefilter=True, - extra_arguments=(), extra_keywords={}): - """ - Apply an arbritrary geometric transform. - - The given mapping function is used to find, for each point in the - output, the corresponding coordinates in the input. The value of the - input at those coordinates is determined by spline interpolation of - the requested order. - - Parameters - ---------- - input : array_like - The input array. - mapping : callable - A callable object that accepts a tuple of length equal to the output - array rank, and returns the corresponding input coordinates as a tuple - of length equal to the input array rank. - output_shape : tuple of ints - Shape tuple. - output : ndarray or dtype, optional - The array in which to place the output, or the dtype of the returned - array. - order : int, optional - The order of the spline interpolation, default is 3. - The order has to be in the range 0-5. - mode : str, optional - Points outside the boundaries of the input are filled according - to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). - Default is 'constant'. - cval : scalar, optional - Value used for points outside the boundaries of the input if - ``mode='constant'``. Default is 0.0 - prefilter : bool, optional - The parameter prefilter determines if the input is pre-filtered with - `spline_filter` before interpolation (necessary for spline - interpolation of order > 1). If False, it is assumed that the input is - already filtered. Default is True. - extra_arguments : tuple, optional - Extra arguments passed to `mapping`. - extra_keywords : dict, optional - Extra keywords passed to `mapping`. - - Returns - ------- - return_value : ndarray or None - The filtered input. If `output` is given as a parameter, None is - returned. - - See Also - -------- - map_coordinates, affine_transform, spline_filter1d - - Examples - -------- - >>> a = np.arange(12.).reshape((4, 3)) - >>> def shift_func(output_coords): - ... return (output_coords[0] - 0.5, output_coords[1] - 0.5) - ... - >>> sp.ndimage.geometric_transform(a, shift_func) - array([[ 0. , 0. , 0. ], - [ 0. , 1.362, 2.738], - [ 0. , 4.812, 6.187], - [ 0. , 8.263, 9.637]]) - - """ - if order < 0 or order > 5: - raise RuntimeError('spline order not supported') - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - if output_shape is None: - output_shape = input.shape - if input.ndim < 1 or len(output_shape) < 1: - raise RuntimeError('input and output rank must be > 0') - mode = _extend_mode_to_code(mode) - if prefilter and order > 1: - filtered = spline_filter(input, order, output = numpy.float64) - else: - filtered = input - output, return_value = _ni_support._get_output(output, input, - shape=output_shape) - _nd_image.geometric_transform(filtered, mapping, None, None, None, - output, order, mode, cval, extra_arguments, extra_keywords) - return return_value - - -def map_coordinates(input, coordinates, output=None, order=3, - mode='constant', cval=0.0, prefilter=True): - """ - Map the input array to new coordinates by interpolation. - - The array of coordinates is used to find, for each point in the output, - the corresponding coordinates in the input. The value of the input at - those coordinates is determined by spline interpolation of the - requested order. - - The shape of the output is derived from that of the coordinate - array by dropping the first axis. The values of the array along - the first axis are the coordinates in the input array at which the - output value is found. - - Parameters - ---------- - input : ndarray - The input array. - coordinates : array_like - The coordinates at which `input` is evaluated. - output : ndarray or dtype, optional - The array in which to place the output, or the dtype of the returned - array. - order : int, optional - The order of the spline interpolation, default is 3. - The order has to be in the range 0-5. - mode : str, optional - Points outside the boundaries of the input are filled according - to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). - Default is 'constant'. - cval : scalar, optional - Value used for points outside the boundaries of the input if - ``mode='constant'``. Default is 0.0 - prefilter : bool, optional - The parameter prefilter determines if the input is pre-filtered with - `spline_filter` before interpolation (necessary for spline - interpolation of order > 1). If False, it is assumed that the input is - already filtered. Default is True. - - Returns - ------- - return_value : ndarray - The result of transforming the input. The shape of the output is - derived from that of `coordinates` by dropping the first axis. - - See Also - -------- - spline_filter, geometric_transform, scipy.interpolate - - Examples - -------- - >>> from scipy import ndimage - >>> a = np.arange(12.).reshape((4, 3)) - >>> a - array([[ 0., 1., 2.], - [ 3., 4., 5.], - [ 6., 7., 8.], - [ 9., 10., 11.]]) - >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1) - [ 2. 7.] - - Above, the interpolated value of a[0.5, 0.5] gives output[0], while - a[2, 1] is output[1]. - - >>> inds = np.array([[0.5, 2], [0.5, 4]]) - >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3) - array([ 2. , -33.3]) - >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest') - array([ 2., 8.]) - >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool) - array([ True, False], dtype=bool - - """ - if order < 0 or order > 5: - raise RuntimeError('spline order not supported') - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - coordinates = numpy.asarray(coordinates) - if numpy.iscomplexobj(coordinates): - raise TypeError('Complex type not supported') - output_shape = coordinates.shape[1:] - if input.ndim < 1 or len(output_shape) < 1: - raise RuntimeError('input and output rank must be > 0') - if coordinates.shape[0] != input.ndim: - raise RuntimeError('invalid shape for coordinate array') - mode = _extend_mode_to_code(mode) - if prefilter and order > 1: - filtered = spline_filter(input, order, output = numpy.float64) - else: - filtered = input - output, return_value = _ni_support._get_output(output, input, - shape=output_shape) - _nd_image.geometric_transform(filtered, None, coordinates, None, None, - output, order, mode, cval, None, None) - return return_value - - -def affine_transform(input, matrix, offset=0.0, output_shape=None, - output=None, order=3, - mode='constant', cval=0.0, prefilter=True): - """ - Apply an affine transformation. - - The given matrix and offset are used to find for each point in the - output the corresponding coordinates in the input by an affine - transformation. The value of the input at those coordinates is - determined by spline interpolation of the requested order. Points - outside the boundaries of the input are filled according to the given - mode. - - Parameters - ---------- - input : ndarray - The input array. - matrix : ndarray - The matrix must be two-dimensional or can also be given as a - one-dimensional sequence or array. In the latter case, it is assumed - that the matrix is diagonal. A more efficient algorithms is then - applied that exploits the separability of the problem. - offset : float or sequence, optional - The offset into the array where the transform is applied. If a float, - `offset` is the same for each axis. If a sequence, `offset` should - contain one value for each axis. - output_shape : tuple of ints, optional - Shape tuple. - output : ndarray or dtype, optional - The array in which to place the output, or the dtype of the returned - array. - order : int, optional - The order of the spline interpolation, default is 3. - The order has to be in the range 0-5. - mode : str, optional - Points outside the boundaries of the input are filled according - to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). - Default is 'constant'. - cval : scalar, optional - Value used for points outside the boundaries of the input if - ``mode='constant'``. Default is 0.0 - prefilter : bool, optional - The parameter prefilter determines if the input is pre-filtered with - `spline_filter` before interpolation (necessary for spline - interpolation of order > 1). If False, it is assumed that the input is - already filtered. Default is True. - - Returns - ------- - return_value : ndarray or None - The transformed input. If `output` is given as a parameter, None is - returned. - - """ - if order < 0 or order > 5: - raise RuntimeError('spline order not supported') - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - if output_shape is None: - output_shape = input.shape - if input.ndim < 1 or len(output_shape) < 1: - raise RuntimeError('input and output rank must be > 0') - mode = _extend_mode_to_code(mode) - if prefilter and order > 1: - filtered = spline_filter(input, order, output = numpy.float64) - else: - filtered = input - output, return_value = _ni_support._get_output(output, input, - shape=output_shape) - matrix = numpy.asarray(matrix, dtype = numpy.float64) - if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: - raise RuntimeError('no proper affine matrix provided') - if matrix.shape[0] != input.ndim: - raise RuntimeError('affine matrix has wrong number of rows') - if matrix.ndim == 2 and matrix.shape[1] != output.ndim: - raise RuntimeError('affine matrix has wrong number of columns') - if not matrix.flags.contiguous: - matrix = matrix.copy() - offset = _ni_support._normalize_sequence(offset, input.ndim) - offset = numpy.asarray(offset, dtype = numpy.float64) - if offset.ndim != 1 or offset.shape[0] < 1: - raise RuntimeError('no proper offset provided') - if not offset.flags.contiguous: - offset = offset.copy() - if matrix.ndim == 1: - _nd_image.zoom_shift(filtered, matrix, offset, output, order, - mode, cval) - else: - _nd_image.geometric_transform(filtered, None, None, matrix, offset, - output, order, mode, cval, None, None) - return return_value - - -def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, - prefilter=True): - """ - Shift an array. - - The array is shifted using spline interpolation of the requested order. - Points outside the boundaries of the input are filled according to the - given mode. - - Parameters - ---------- - input : ndarray - The input array. - shift : float or sequence, optional - The shift along the axes. If a float, `shift` is the same for each - axis. If a sequence, `shift` should contain one value for each axis. - output : ndarray or dtype, optional - The array in which to place the output, or the dtype of the returned - array. - order : int, optional - The order of the spline interpolation, default is 3. - The order has to be in the range 0-5. - mode : str, optional - Points outside the boundaries of the input are filled according - to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). - Default is 'constant'. - cval : scalar, optional - Value used for points outside the boundaries of the input if - ``mode='constant'``. Default is 0.0 - prefilter : bool, optional - The parameter prefilter determines if the input is pre-filtered with - `spline_filter` before interpolation (necessary for spline - interpolation of order > 1). If False, it is assumed that the input is - already filtered. Default is True. - - Returns - ------- - return_value : ndarray or None - The shifted input. If `output` is given as a parameter, None is - returned. - - """ - if order < 0 or order > 5: - raise RuntimeError('spline order not supported') - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - if input.ndim < 1: - raise RuntimeError('input and output rank must be > 0') - mode = _extend_mode_to_code(mode) - if prefilter and order > 1: - filtered = spline_filter(input, order, output = numpy.float64) - else: - filtered = input - output, return_value = _ni_support._get_output(output, input) - shift = _ni_support._normalize_sequence(shift, input.ndim) - shift = [-ii for ii in shift] - shift = numpy.asarray(shift, dtype = numpy.float64) - if not shift.flags.contiguous: - shift = shift.copy() - _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval) - return return_value - - -def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, - prefilter=True): - """ - Zoom an array. - - The array is zoomed using spline interpolation of the requested order. - - Parameters - ---------- - input : ndarray - The input array. - zoom : float or sequence, optional - The zoom factor along the axes. If a float, `zoom` is the same for each - axis. If a sequence, `zoom` should contain one value for each axis. - output : ndarray or dtype, optional - The array in which to place the output, or the dtype of the returned - array. - order : int, optional - The order of the spline interpolation, default is 3. - The order has to be in the range 0-5. - mode : str, optional - Points outside the boundaries of the input are filled according - to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). - Default is 'constant'. - cval : scalar, optional - Value used for points outside the boundaries of the input if - ``mode='constant'``. Default is 0.0 - prefilter : bool, optional - The parameter prefilter determines if the input is pre-filtered with - `spline_filter` before interpolation (necessary for spline - interpolation of order > 1). If False, it is assumed that the input is - already filtered. Default is True. - - Returns - ------- - return_value : ndarray or None - The zoomed input. If `output` is given as a parameter, None is - returned. - - """ - if order < 0 or order > 5: - raise RuntimeError('spline order not supported') - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - if input.ndim < 1: - raise RuntimeError('input and output rank must be > 0') - mode = _extend_mode_to_code(mode) - if prefilter and order > 1: - filtered = spline_filter(input, order, output = numpy.float64) - else: - filtered = input - zoom = _ni_support._normalize_sequence(zoom, input.ndim) - output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)]) - - zoom_div = numpy.array(output_shape, float) - 1 - zoom = (numpy.array(input.shape) - 1) / zoom_div - - # Zooming to infinity is unpredictable, so just choose - # zoom factor 1 instead - zoom[numpy.isinf(zoom)] = 1 - - output, return_value = _ni_support._get_output(output, input, - shape=output_shape) - zoom = numpy.asarray(zoom, dtype = numpy.float64) - zoom = numpy.ascontiguousarray(zoom) - _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval) - return return_value - -def _minmax(coor, minc, maxc): - if coor[0] < minc[0]: - minc[0] = coor[0] - if coor[0] > maxc[0]: - maxc[0] = coor[0] - if coor[1] < minc[1]: - minc[1] = coor[1] - if coor[1] > maxc[1]: - maxc[1] = coor[1] - return minc, maxc - -def rotate(input, angle, axes=(1, 0), reshape=True, - output=None, order=3, - mode='constant', cval=0.0, prefilter=True): - """ - Rotate an array. - - The array is rotated in the plane defined by the two axes given by the - `axes` parameter using spline interpolation of the requested order. - - Parameters - ---------- - input : ndarray - The input array. - angle : float - The rotation angle in degrees. - axes : tuple of 2 ints, optional - The two axes that define the plane of rotation. Default is the first - two axes. - reshape : bool, optional - If `reshape` is true, the output shape is adapted so that the input - array is contained completely in the output. Default is True. - output : ndarray or dtype, optional - The array in which to place the output, or the dtype of the returned - array. - order : int, optional - The order of the spline interpolation, default is 3. - The order has to be in the range 0-5. - mode : str, optional - Points outside the boundaries of the input are filled according - to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). - Default is 'constant'. - cval : scalar, optional - Value used for points outside the boundaries of the input if - ``mode='constant'``. Default is 0.0 - prefilter : bool, optional - The parameter prefilter determines if the input is pre-filtered with - `spline_filter` before interpolation (necessary for spline - interpolation of order > 1). If False, it is assumed that the input is - already filtered. Default is True. - - Returns - ------- - return_value : ndarray or None - The rotated input. If `output` is given as a parameter, None is - returned. - - """ - input = numpy.asarray(input) - axes = list(axes) - rank = input.ndim - if axes[0] < 0: - axes[0] += rank - if axes[1] < 0: - axes[1] += rank - if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank: - raise RuntimeError('invalid rotation plane specified') - if axes[0] > axes[1]: - axes = axes[1], axes[0] - angle = numpy.pi / 180 * angle - m11 = math.cos(angle) - m12 = math.sin(angle) - m21 = -math.sin(angle) - m22 = math.cos(angle) - matrix = numpy.array([[m11, m12], - [m21, m22]], dtype = numpy.float64) - iy = input.shape[axes[0]] - ix = input.shape[axes[1]] - if reshape: - mtrx = numpy.array([[ m11, -m21], - [-m12, m22]], dtype = numpy.float64) - minc = [0, 0] - maxc = [0, 0] - coor = numpy.dot(mtrx, [0, ix]) - minc, maxc = _minmax(coor, minc, maxc) - coor = numpy.dot(mtrx, [iy, 0]) - minc, maxc = _minmax(coor, minc, maxc) - coor = numpy.dot(mtrx, [iy, ix]) - minc, maxc = _minmax(coor, minc, maxc) - oy = int(maxc[0] - minc[0] + 0.5) - ox = int(maxc[1] - minc[1] + 0.5) - else: - oy = input.shape[axes[0]] - ox = input.shape[axes[1]] - offset = numpy.zeros((2,), dtype = numpy.float64) - offset[0] = float(oy) / 2.0 - 0.5 - offset[1] = float(ox) / 2.0 - 0.5 - offset = numpy.dot(matrix, offset) - tmp = numpy.zeros((2,), dtype = numpy.float64) - tmp[0] = float(iy) / 2.0 - 0.5 - tmp[1] = float(ix) / 2.0 - 0.5 - offset = tmp - offset - output_shape = list(input.shape) - output_shape[axes[0]] = oy - output_shape[axes[1]] = ox - output_shape = tuple(output_shape) - output, return_value = _ni_support._get_output(output, input, - shape=output_shape) - if input.ndim <= 2: - affine_transform(input, matrix, offset, output_shape, output, - order, mode, cval, prefilter) - else: - coordinates = [] - size = numpy.product(input.shape,axis=0) - size //= input.shape[axes[0]] - size //= input.shape[axes[1]] - for ii in range(input.ndim): - if ii not in axes: - coordinates.append(0) - else: - coordinates.append(slice(None, None, None)) - iter_axes = range(input.ndim) - iter_axes.reverse() - iter_axes.remove(axes[0]) - iter_axes.remove(axes[1]) - os = (output_shape[axes[0]], output_shape[axes[1]]) - for ii in range(size): - ia = input[tuple(coordinates)] - oa = output[tuple(coordinates)] - affine_transform(ia, matrix, offset, os, oa, order, mode, - cval, prefilter) - for jj in iter_axes: - if coordinates[jj] < input.shape[jj] - 1: - coordinates[jj] += 1 - break - else: - coordinates[jj] = 0 - return return_value diff --git a/scipy-0.10.1/scipy/ndimage/io.py b/scipy-0.10.1/scipy/ndimage/io.py deleted file mode 100644 index 20ab6e4471..0000000000 --- a/scipy-0.10.1/scipy/ndimage/io.py +++ /dev/null @@ -1,43 +0,0 @@ -__all__ = ['imread'] - -from numpy import array - -def imread(fname, flatten=False): - """ - Load an image from file. - - Parameters - ---------- - fname : str - Image file name, e.g. ``test.jpg``. - flatten : bool, optional - If true, convert the output to grey-scale. Default is False. - - Returns - ------- - img_array : ndarray - The different colour bands/channels are stored in the - third dimension, such that a grey-image is MxN, an - RGB-image MxNx3 and an RGBA-image MxNx4. - - Raises - ------ - ImportError - If the Python Imaging Library (PIL) can not be imported. - - """ - try: - from PIL import Image - except ImportError: - raise ImportError("Could not import the Python Imaging Library (PIL)" - " required to load image files. Please refer to" - " http://pypi.python.org/pypi/PIL/ for installation" - " instructions.") - - fp = open(fname, "rb") - im = Image.open(fp) - if flatten: - im = im.convert('F') - result = array(im) - fp.close() - return result diff --git a/scipy-0.10.1/scipy/ndimage/measurements.py b/scipy-0.10.1/scipy/ndimage/measurements.py deleted file mode 100644 index 3a965ee911..0000000000 --- a/scipy-0.10.1/scipy/ndimage/measurements.py +++ /dev/null @@ -1,1257 +0,0 @@ -# Copyright (C) 2003-2005 Peter J. Verveer -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import numpy -import numpy as np -import _ni_support -import _nd_image -import morphology - -__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean', - 'variance', 'standard_deviation', 'minimum', 'maximum', 'median', - 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass', - 'histogram', 'watershed_ift'] - - -def label(input, structure=None, output=None): - """ - Label features in an array. - - Parameters - ---------- - input : array_like - An array-like object to be labeled. Any non-zero values in `input` are - counted as features and zero values are considered the background. - structure : array_like, optional - A structuring element that defines feature connections. - `structure` must be symmetric. If no structuring element is provided, - one is automatically generated with a squared connectivity equal to - one. That is, for a 2-D `input` array, the default structuring element - is:: - - [[0,1,0], - [1,1,1], - [0,1,0]] - - output : (None, data-type, array_like), optional - If `output` is a data type, it specifies the type of the resulting - labeled feature array - If `output` is an array-like object, then `output` will be updated - with the labeled features from this function - - Returns - ------- - labeled_array : array_like - An array-like object where each unique feature has a unique value - num_features : int - How many objects were found - - If `output` is None or a data type, this function returns a tuple, - (`labeled_array`, `num_features`). - - If `output` is an array, then it will be updated with values in - `labeled_array` and only `num_features` will be returned by this function. - - - See Also - -------- - find_objects : generate a list of slices for the labeled features (or - objects); useful for finding features' position or - dimensions - - Examples - -------- - - Create an image with some features, then label it using the default - (cross-shaped) structuring element: - - >>> a = array([[0,0,1,1,0,0], - ... [0,0,0,1,0,0], - ... [1,1,0,0,1,0], - ... [0,0,0,1,0,0]]) - >>> labeled_array, num_features = label(a) - - Each of the 4 features are labeled with a different integer: - - >>> print num_features - 4 - >>> print labeled_array - array([[0, 0, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0], - [2, 2, 0, 0, 3, 0], - [0, 0, 0, 4, 0, 0]]) - - Generate a structuring element that will consider features connected even - if they touch diagonally: - - >>> s = generate_binary_structure(2,2) - - or, - - >>> s = [[1,1,1], - [1,1,1], - [1,1,1]] - - Label the image using the new structuring element: - - >>> labeled_array, num_features = label(a, structure=s) - - Show the 2 labeled features (note that features 1, 3, and 4 from above are - now considered a single feature): - - >>> print num_features - 2 - >>> print labeled_array - array([[0, 0, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0], - [2, 2, 0, 0, 1, 0], - [0, 0, 0, 1, 0, 0]]) - - """ - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - if structure is None: - structure = morphology.generate_binary_structure(input.ndim, 1) - structure = numpy.asarray(structure, dtype = bool) - if structure.ndim != input.ndim: - raise RuntimeError('structure and input must have equal rank') - for ii in structure.shape: - if ii != 3: - raise RuntimeError('structure dimensions must be equal to 3') - if not structure.flags.contiguous: - structure = structure.copy() - if isinstance(output, numpy.ndarray): - if output.dtype.type != numpy.int32: - raise RuntimeError('output type must be int32') - else: - output = numpy.int32 - output, return_value = _ni_support._get_output(output, input) - max_label = _nd_image.label(input, structure, output) - if return_value is None: - return max_label - else: - return return_value, max_label - -def find_objects(input, max_label=0): - """ - Find objects in a labeled array. - - Parameters - ---------- - input : ndarray of ints - Array containing objects defined by different labels. - max_label : int, optional - Maximum label to be searched for in `input`. If max_label is not - given, the positions of all objects are returned. - - Returns - ------- - object_slices : list of slices - A list of slices, one for the extent of each labeled object. - Slices correspond to the minimal parallelepiped that contains - the object. If a number is missing, None is returned instead - of a slice. - - See Also - -------- - label, center_of_mass - - Notes - ----- - This function is very useful for isolating a volume of interest inside - a 3-D array, that cannot be "seen through". - - Examples - -------- - >>> a = np.zeros((6,6), dtype=np.int) - >>> a[2:4, 2:4] = 1 - >>> a[4, 4] = 1 - >>> a[:2, :3] = 2 - >>> a[0, 5] = 3 - >>> a - array([[2, 2, 2, 0, 0, 3], - [2, 2, 2, 0, 0, 0], - [0, 0, 1, 1, 0, 0], - [0, 0, 1, 1, 0, 0], - [0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 0]]) - >>> ndimage.find_objects(a) - [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None)), (slice(0, 1, None), slice(5, 6, None))] - >>> ndimage.find_objects(a, max_label=2) - [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))] - >>> ndimage.find_objects(a == 1, max_label=2) - [(slice(2, 5, None), slice(2, 5, None)), None] - - """ - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - if max_label < 1: - max_label = input.max() - return _nd_image.find_objects(input, max_label) - -def labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False): - """ - Roughly equivalent to [func(input[labels == i]) for i in index]. - - Sequentially applies an arbitrary function (that works on array_like input) - to subsets of an n-D image array specified by `labels` and `index`. - The option exists to provide the function with positional parameters as the - second argument. - - Parameters - ---------- - input : array_like - Data from which to select `labels` to process. - labels : array_like, or None - Labels to objects in `input`. - If not None, array must be same shape as `input`. - If None, `func` is applied to raveled `input`. - index : int, sequence of int, or None - Subset of `labels` to which to apply `func`. - If a scalar, a single value is returned. - If None, `func` is applied to all non-zero values of `labels`. - func : callable - Python function to apply to `labels` from `input`. - out_dtype : dtype - Dtype to use for `result`. - default : int, float, or None - Default return value when a element of `index` does not exist - in `labels`. - pass_positions : bool, optional - If True, pass linear indices to `func` as a second argument. - Default is False. - - Returns - ------- - result : ndarray - Result of applying `func` to each of `labels` to `input` in `index`. - - Examples - -------- - >>> a = np.array([[1, 2, 0, 0], - [5, 3, 0, 4], - [0, 0, 0, 7], - [9, 3, 0, 0]]) - >>> from scipy import ndimage - >>> lbl, nlbl = ndimage.label(a) - >>> lbls = np.arange(1, nlbl+1) - >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0) - array([ 2.75, 5.5 , 6. ]) - - Falling back to `default`: - - >>> lbls = np.arange(1, nlbl+2) - >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1) - array([ 2.75, 5.5 , 6. , -1. ]) - - Passing positions: - - >>> def fn(val, pos): - ... print "fn says:", val, ":", pos - ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum()) - ... - >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True) - fn says: [1 2 5 3] : [0 1 4 5] - fn says: [4 7] : [7 11] - fn says: [9 3] : [12 13] - array([ 11., 11., -12.]) - - """ - - as_scalar = numpy.isscalar(index) - input = numpy.asarray(input) - - if pass_positions: - positions = numpy.arange(input.size).reshape(input.shape) - - if labels is None: - if index is not None: - raise ValueError("index without defined labels") - if not pass_positions: - return func(input.ravel()) - else: - return func(input.ravel(), positions.ravel()) - - try: - input, labels = numpy.broadcast_arrays(input, labels) - except ValueError: - raise ValueError("input and labels must have the same shape " - "(excepting dimensions with width 1)") - - if index is None: - if not pass_positions: - return func(input[labels > 0]) - else: - return func(input[labels > 0], positions[labels > 0]) - - index = numpy.atleast_1d(index) - if np.any(index.astype(labels.dtype).astype(index.dtype) != index): - raise ValueError("Cannot convert index values from <%s> to <%s> " - "(labels' type) without loss of precision" % - (index.dtype, labels.dtype)) - index = index.astype(labels.dtype) - - # optimization: find min/max in index, and select those parts of labels, input, and positions - lo = index.min() - hi = index.max() - mask = (labels >= lo) & (labels <= hi) - - # this also ravels the arrays - labels = labels[mask] - input = input[mask] - if pass_positions: - positions = positions[mask] - - # sort everything by labels - label_order = labels.argsort() - labels = labels[label_order] - input = input[label_order] - if pass_positions: - positions = positions[label_order] - - index_order = index.argsort() - sorted_index = index[index_order] - - def do_map(inputs, output): - '''labels must be sorted''' - - nlabels = labels.size - nidx = sorted_index.size - - # Find boundaries for each stretch of constant labels - # This could be faster, but we already paid N log N to sort labels. - lo = numpy.searchsorted(labels, sorted_index, side='left') - hi = numpy.searchsorted(labels, sorted_index, side='right') - - for i, l, h in zip(range(nidx), lo, hi): - if l == h: - continue - idx = sorted_index[i] - output[i] = func(*[inp[l:h] for inp in inputs]) - - temp = numpy.empty(index.shape, out_dtype) - temp[:] = default - if not pass_positions: - do_map([input], temp) - else: - do_map([input, positions], temp) - output = numpy.zeros(index.shape, out_dtype) - output[index_order] = temp - - if as_scalar: - output = output[0] - - return output - -def _safely_castable_to_int(dt): - """Test whether the numpy data type `dt` can be safely cast to an int.""" - int_size = np.dtype(int).itemsize - safe = ((np.issubdtype(dt, int) and dt.itemsize <= int_size) or - (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size)) - return safe - -def _stats(input, labels=None, index=None, centered=False): - '''Count, sum, and optionally compute (sum - centre)^2 of input by label - - Parameters - ---------- - input : array_like, n-dimensional - The input data to be analyzed. - labels : array_like (n-dimensional) or None - The labels of the data in `input`. This array must be broadcast - compatible with `input`; typically it is the same shape as `input`. - If `labels` is None, all nonzero values in `input` are treated as - the single labeled group. - index: label, sequence of labels, or None - These are the labels of the groups for which the stats are computed. - If `index` is None, the stats are computed for the single group where - `labels` is greater than 0. - centered: bool - If True, the centered sum of squares for each labeled group is - also returned. - - Return value - ------------ - counts: - The number of elements in each labeled group. - sums: - The sums of the values in each labeled group. - sums_c: - The sums of mean-centered squares of the values in each labeled group. - This is only returned if `centered` is True. - ''' - - def single_group(vals): - if centered: - vals_c = vals - vals.mean() - return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum() - else: - return vals.size, vals.sum() - - if labels is None: - return single_group(input) - - # ensure input and labels match sizes - input, labels = numpy.broadcast_arrays(input, labels) - - if index is None: - return single_group(input[labels > 0]) - - if numpy.isscalar(index): - return single_group(input[labels == index]) - - def _sum_centered(labels): - # `labels` is expected to be an ndarray with the same shape as `input`. - # It must contain the label indices (which are not necessarily the labels - # themselves). - means = sums / counts - centered_input = input - means[labels] - # bincount expects 1d inputs, so we ravel the arguments. - bc = numpy.bincount(labels.ravel(), - weights=(centered_input * \ - centered_input.conjugate()).ravel()) - return bc - - # Remap labels to unique integers if necessary, or if the largest - # label is larger than the number of values. - - if (not _safely_castable_to_int(labels.dtype) or - labels.min() < 0 or labels.max() > labels.size): - # Use numpy.unique to generate the label indices. `new_labels` will - # be 1-d, but it should be interpreted as the flattened n-d array of - # label indices. - unique_labels, new_labels = numpy.unique(labels, return_inverse=True) - counts = numpy.bincount(new_labels) - sums = numpy.bincount(new_labels, weights=input.ravel()) - if centered: - # Compute the sum of the mean-centered squares. - # We must reshape new_labels to the n-d shape of `input` before - # passing it _sum_centered. - sums_c = _sum_centered(new_labels.reshape(labels.shape)) - idxs = numpy.searchsorted(unique_labels, index) - # make all of idxs valid - idxs[idxs >= unique_labels.size] = 0 - found = (unique_labels[idxs] == index) - else: - # labels are an integer type allowed by bincount, and there aren't too - # many, so call bincount directly. - counts = numpy.bincount(labels.ravel()) - sums = numpy.bincount(labels.ravel(), weights=input.ravel()) - if centered: - sums_c = _sum_centered(labels) - # make sure all index values are valid - idxs = numpy.asanyarray(index, numpy.int).copy() - found = (idxs >= 0) & (idxs < counts.size) - idxs[~found] = 0 - - counts = counts[idxs] - counts[~found] = 0 - sums = sums[idxs] - sums[~found] = 0 - - if not centered: - return (counts, sums) - else: - sums_c = sums_c[idxs] - sums_c[~found] = 0 - return (counts, sums, sums_c) - - -def sum(input, labels=None, index=None): - """ - Calculate the sum of the values of the array. - - Parameters - ---------- - input : array_like - Values of `input` inside the regions defined by `labels` - are summed together. - labels : array_like of ints, optional - Assign labels to the values of the array. Has to have the same shape as - `input`. - index : scalar or array_like, optional - A single label number or a sequence of label numbers of - the objects to be measured. - - Returns - ------- - output : list - A list of the sums of the values of `input` inside the regions - defined by `labels`. - - See also - -------- - mean, median - - Examples - -------- - >>> input = [0,1,2,3] - >>> labels = [1,1,2,2] - >>> sum(input, labels, index=[1,2]) - [1.0, 5.0] - - """ - count, sum = _stats(input, labels, index) - return sum - -def mean(input, labels=None, index=None): - """ - Calculate the mean of the values of an array at labels. - - Parameters - ---------- - input : array_like - Array on which to compute the mean of elements over distinct - regions. - labels : array_like, optional - Array of labels of same shape, or broadcastable to the same shape as - `input`. All elements sharing the same label form one region over - which the mean of the elements is computed. - index : int or sequence of ints, optional - Labels of the objects over which the mean is to be computed. - Default is None, in which case the mean for all values where label is - greater than 0 is calculated. - - Returns - ------- - out : list - Sequence of same length as `index`, with the mean of the different - regions labeled by the labels in `index`. - - See also - -------- - ndimage.variance, ndimage.standard_deviation, ndimage.minimum, - ndimage.maximum, ndimage.sum - ndimage.label - - Examples - -------- - >>> a = np.arange(25).reshape((5,5)) - >>> labels = np.zeros_like(a) - >>> labels[3:5,3:5] = 1 - >>> index = np.unique(labels) - >>> labels - array([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 1, 1], - [0, 0, 0, 1, 1]]) - >>> index - array([0, 1]) - >>> ndimage.mean(a, labels=labels, index=index) - [10.285714285714286, 21.0] - - """ - - count, sum = _stats(input, labels, index) - return sum / numpy.asanyarray(count).astype(numpy.float) - -def variance(input, labels=None, index=None): - """ - Calculate the variance of the values of an n-D image array, optionally at - specified sub-regions. - - Parameters - ---------- - input : array_like - Nd-image data to process. - labels : array_like, optional - Labels defining sub-regions in `input`. - If not None, must be same shape as `input`. - index : int or sequence of ints, optional - `labels` to include in output. If None (default), all values where - `labels` is non-zero are used. - - Returns - ------- - vars : float or ndarray - Values of variance, for each sub-region if `labels` and `index` are - specified. - - See Also - -------- - label, standard_deviation, maximum, minimum, extrema - - Examples - -------- - >>> a = np.array([[1, 2, 0, 0], - [5, 3, 0, 4], - [0, 0, 0, 7], - [9, 3, 0, 0]]) - >>> from scipy import ndimage - >>> ndimage.variance(a) - 7.609375 - - Features to process can be specified using `labels` and `index`: - - >>> lbl, nlbl = ndimage.label(a) - >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1)) - array([ 2.1875, 2.25 , 9. ]) - - If no index is given, all non-zero `labels` are processed: - - >>> ndimage.variance(a, lbl) - 6.1875 - - """ - - count, sum, sum_c_sq = _stats(input, labels, index, centered=True) - - return sum_c_sq / np.asanyarray(count).astype(float) - -def standard_deviation(input, labels=None, index=None): - """ - Calculate the standard deviation of the values of an n-D image array, - optionally at specified sub-regions. - - Parameters - ---------- - input : array_like - Nd-image data to process. - labels : array_like, optional - Labels to identify sub-regions in `input`. - If not None, must be same shape as `input`. - index : int or sequence of ints, optional - `labels` to include in output. If None (default), all values where - `labels` is non-zero are used. - - Returns - ------- - std : float or ndarray - Values of standard deviation, for each sub-region if `labels` and - `index` are specified. - - See Also - -------- - label, variance, maximum, minimum, extrema - - Examples - -------- - >>> a = np.array([[1, 2, 0, 0], - [5, 3, 0, 4], - [0, 0, 0, 7], - [9, 3, 0, 0]]) - >>> from scipy import ndimage - >>> ndimage.standard_deviation(a) - 2.7585095613392387 - - Features to process can be specified using `labels` and `index`: - - >>> lbl, nlbl = ndimage.label(a) - >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1)) - array([ 1.479, 1.5 , 3. ]) - - If no index is given, non-zero `labels` are processed: - - >>> ndimage.standard_deviation(a, lbl) - 2.4874685927665499 - - """ - - return numpy.sqrt(variance(input, labels, index)) - -def _select(input, labels=None, index=None, find_min=False, find_max=False, - find_min_positions=False, find_max_positions=False, - find_median=False): - """Returns min, max, or both, plus their positions (if requested), and - median.""" - - input = numpy.asanyarray(input) - - find_positions = find_min_positions or find_max_positions - positions = None - if find_positions: - positions = numpy.arange(input.size).reshape(input.shape) - - def single_group(vals, positions): - result = [] - if find_min: - result += [vals.min()] - if find_min_positions: - result += [positions[vals == vals.min()][0]] - if find_max: - result += [vals.max()] - if find_max_positions: - result += [positions[vals == vals.max()][0]] - if find_median: - result += [numpy.median(vals)] - return result - - if labels is None: - return single_group(input, positions) - - # ensure input and labels match sizes - input, labels = numpy.broadcast_arrays(input, labels) - - if index is None: - mask = (labels > 0) - masked_positions = None - if find_positions: - masked_positions = positions[mask] - return single_group(input[mask], masked_positions) - - if numpy.isscalar(index): - mask = (labels == index) - masked_positions = None - if find_positions: - masked_positions = positions[mask] - return single_group(input[mask], masked_positions) - - # remap labels to unique integers if necessary, or if the largest - # label is larger than the number of values. - if (not _safely_castable_to_int(labels.dtype) or - labels.min() < 0 or labels.max() > labels.size): - # remap labels, and indexes - unique_labels, labels = numpy.unique(labels, return_inverse=True) - idxs = numpy.searchsorted(unique_labels, index) - - # make all of idxs valid - idxs[idxs >= unique_labels.size] = 0 - found = (unique_labels[idxs] == index) - else: - # labels are an integer type, and there aren't too many. - idxs = numpy.asanyarray(index, numpy.int).copy() - found = (idxs >= 0) & (idxs <= labels.max()) - - idxs[~ found] = labels.max() + 1 - - if find_median: - order = numpy.lexsort((input.ravel(), labels.ravel())) - else: - order = input.ravel().argsort() - input = input.ravel()[order] - labels = labels.ravel()[order] - if find_positions: - positions = positions.ravel()[order] - - result = [] - if find_min: - mins = numpy.zeros(labels.max() + 2, input.dtype) - mins[labels[::-1]] = input[::-1] - result += [mins[idxs]] - if find_min_positions: - minpos = numpy.zeros(labels.max() + 2) - minpos[labels[::-1]] = positions[::-1] - result += [minpos[idxs]] - if find_max: - maxs = numpy.zeros(labels.max() + 2, input.dtype) - maxs[labels] = input - result += [maxs[idxs]] - if find_max_positions: - maxpos = numpy.zeros(labels.max() + 2) - maxpos[labels] = positions - result += [maxpos[idxs]] - if find_median: - locs = numpy.arange(len(labels)) - lo = numpy.zeros(labels.max() + 2, numpy.int) - lo[labels[::-1]] = locs[::-1] - hi = numpy.zeros(labels.max() + 2, numpy.int) - hi[labels] = locs - lo = lo[idxs] - hi = hi[idxs] - # lo is an index to the lowest value in input for each label, - # hi is an index to the largest value. - # move them to be either the same ((hi - lo) % 2 == 0) or next - # to each other ((hi - lo) % 2 == 1), then average. - step = (hi - lo) // 2 - lo += step - hi -= step - result += [(input[lo] + input[hi]) / 2.0] - - return result - -def minimum(input, labels=None, index=None): - """ - Calculate the minimum of the values of an array over labeled regions. - - Parameters - ---------- - input: array_like - Array_like of values. For each region specified by `labels`, the - minimal values of `input` over the region is computed. - labels: array_like, optional - An array_like of integers marking different regions over which the - minimum value of `input` is to be computed. `labels` must have the - same shape as `input`. If `labels` is not specified, the minimum - over the whole array is returned. - index: array_like, optional - A list of region labels that are taken into account for computing the - minima. If index is None, the minimum over all elements where `labels` - is non-zero is returned. - - Returns - ------- - output : float or list of floats - List of minima of `input` over the regions determined by `labels` and - whose index is in `index`. If `index` or `labels` are not specified, a - float is returned: the minimal value of `input` if `labels` is None, - and the minimal value of elements where `labels` is greater than zero - if `index` is None. - - See also - -------- - label, maximum, median, minimum_position, extrema, sum, mean, variance, - standard_deviation - - Notes - ----- - The function returns a Python list and not a Numpy array, use - `np.array` to convert the list to an array. - - Examples - -------- - >>> a = np.array([[1, 2, 0, 0], - ... [5, 3, 0, 4], - ... [0, 0, 0, 7], - ... [9, 3, 0, 0]]) - >>> labels, labels_nb = ndimage.label(a) - >>> labels - array([[1, 1, 0, 0], - [1, 1, 0, 2], - [0, 0, 0, 2], - [3, 3, 0, 0]]) - >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1)) - [1.0, 4.0, 3.0] - >>> ndimage.minimum(a) - 0.0 - >>> ndimage.minimum(a, labels=labels) - 1.0 - - """ - return _select(input, labels, index, find_min=True)[0] - -def maximum(input, labels=None, index=None): - """ - Calculate the maximum of the values of an array over labeled regions. - - Parameters - ---------- - input : array_like - Array_like of values. For each region specified by `labels`, the - maximal values of `input` over the region is computed. - labels : array_like, optional - An array of integers marking different regions over which the - maximum value of `input` is to be computed. `labels` must have the - same shape as `input`. If `labels` is not specified, the maximum - over the whole array is returned. - index : array_like, optional - A list of region labels that are taken into account for computing the - maxima. If index is None, the maximum over all elements where `labels` - is non-zero is returned. - - Returns - ------- - output : float or list of floats - List of maxima of `input` over the regions determined by `labels` and - whose index is in `index`. If `index` or `labels` are not specified, a - float is returned: the maximal value of `input` if `labels` is None, - and the maximal value of elements where `labels` is greater than zero - if `index` is None. - - See also - -------- - label, minimum, median, maximum_position, extrema, sum, mean, variance, - standard_deviation - - Notes - ----- - The function returns a Python list and not a Numpy array, use - `np.array` to convert the list to an array. - - Examples - -------- - >>> a = np.arange(16).reshape((4,4)) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - >>> labels = np.zeros_like(a) - >>> labels[:2,:2] = 1 - >>> labels[2:, 1:3] = 2 - >>> labels - array([[1, 1, 0, 0], - [1, 1, 0, 0], - [0, 2, 2, 0], - [0, 2, 2, 0]]) - >>> from scipy import ndimage - >>> ndimage.maximum(a) - 15.0 - >>> ndimage.maximum(a, labels=labels, index=[1,2]) - [5.0, 14.0] - >>> ndimage.maximum(a, labels=labels) - 14.0 - - >>> b = np.array([[1, 2, 0, 0], - [5, 3, 0, 4], - [0, 0, 0, 7], - [9, 3, 0, 0]]) - >>> labels, labels_nb = ndimage.label(b) - >>> labels - array([[1, 1, 0, 0], - [1, 1, 0, 2], - [0, 0, 0, 2], - [3, 3, 0, 0]]) - >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1)) - [5.0, 7.0, 9.0] - - """ - return _select(input, labels, index, find_max=True)[0] - -def median(input, labels=None, index=None): - """ - Calculate the median of the values of an array over labeled regions. - - Parameters - ---------- - input: array_like - Array_like of values. For each region specified by `labels`, the - median value of `input` over the region is computed. - labels: array_like, optional - An array_like of integers marking different regions over which the - median value of `input` is to be computed. `labels` must have the - same shape as `input`. If `labels` is not specified, the median - over the whole array is returned. - index: array_like, optional - A list of region labels that are taken into account for computing the - medians. If index is None, the minimum over all elements where `labels` - is non-zero is returned. - - Returns - ------- - output : float or list of floats - List of medians of `input` over the regions determined by `labels` and - whose index is in `index`. If `index` or `labels` are not specified, a - float is returned: the median value of `input` if `labels` is None, - and the median value of elements where `labels` is greater than zero - if `index` is None. - - See also - -------- - label, minimum, maximum, extrema, sum, mean, variance, standard_deviation - - Notes - ----- - The function returns a Python list and not a Numpy array, use - `np.array` to convert the list to an array. - - Examples - -------- - >>> a = np.array([[1, 2, 0, 1], - ... [5, 3, 0, 4], - ... [0, 0, 0, 7], - ... [9, 3, 0, 0]]) - >>> labels, labels_nb = ndimage.label(a) - >>> labels - array([[1, 1, 0, 2], - [1, 1, 0, 2], - [0, 0, 0, 2], - [3, 3, 0, 0]]) - >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1)) - [2.5, 4.0, 6.0] - >>> ndimage.median(a) - 1.0 - >>> ndimage.median(a, labels=labels) - 3.0 - - """ - return _select(input, labels, index, find_median=True)[0] - -def minimum_position(input, labels=None, index=None): - """Find the positions of the minimums of the values of an array at labels. - - Labels must be None or an array of the same dimensions as the input. - - Index must be None, a single label or sequence of labels. If - none, all values where label is greater than zero are used. - """ - - dims = numpy.array(numpy.asarray(input).shape) - # see numpy.unravel_index to understand this line. - dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] - - result = _select(input, labels, index, find_min_positions=True)[0] - - if numpy.isscalar(result): - return tuple((result // dim_prod) % dims) - - return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] - -def maximum_position(input, labels=None, index=None): - """Find the positions of the maximums of the values of an array at labels. - - Labels must be None or an array of the same dimensions as the input. - - Index must be None, a single label or sequence of labels. If - none, all values where label is greater than zero are used. - """ - - dims = numpy.array(numpy.asarray(input).shape) - # see numpy.unravel_index to understand this line. - dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] - - result = _select(input, labels, index, find_max_positions=True)[0] - - if numpy.isscalar(result): - return tuple((result // dim_prod) % dims) - - return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] - -def extrema(input, labels=None, index=None): - """ - Calculate the minimums and maximums of the values of an array - at labels, along with their positions. - - Parameters - ---------- - input : ndarray - Nd-image data to process. - labels : ndarray, optional - Labels of features in input. - If not None, must be same shape as `input`. - index : int or sequence of ints, optional - Labels to include in output. If None (default), all values where - non-zero `labels` are used. - - Returns - ------- - minimums, maximums : int or ndarray - Values of minimums and maximums in each feature. - min_positions, max_positions : tuple or list of tuples - Each tuple gives the n-D coordinates of the corresponding minimum - or maximum. - - See Also - -------- - maximum, minimum, maximum_position, minimum_position, center_of_mass - - Examples - -------- - >>> a = np.array([[1, 2, 0, 0], - [5, 3, 0, 4], - [0, 0, 0, 7], - [9, 3, 0, 0]]) - >>> from scipy import ndimage - >>> ndimage.extrema(a) - (0, 9, (0, 2), (3, 0)) - - Features to process can be specified using `labels` and `index`: - - >>> lbl, nlbl = ndimage.label(a) - >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1)) - (array([1, 4, 3]), - array([5, 7, 9]), - [(0.0, 0.0), (1.0, 3.0), (3.0, 1.0)], - [(1.0, 0.0), (2.0, 3.0), (3.0, 0.0)]) - - If no index is given, non-zero `labels` are processed: - - >>> ndimage.extrema(a, lbl) - (1, 9, (0, 0), (3, 0)) - - """ - - dims = numpy.array(numpy.asarray(input).shape) - # see numpy.unravel_index to understand this line. - dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] - - minimums, min_positions, maximums, max_positions = _select(input, labels, index, - find_min=True, find_max=True, - find_min_positions=True, find_max_positions=True) - - - if numpy.isscalar(minimums): - return minimums, maximums, tuple((min_positions // dim_prod) % dims), tuple((max_positions // dim_prod) % dims) - - min_positions = [tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims] - max_positions = [tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims] - - return minimums, maximums, min_positions, max_positions - -def center_of_mass(input, labels=None, index=None): - """ - Calculate the center of mass of the values of an array at labels. - - Parameters - ---------- - input : ndarray - Data from which to calculate center-of-mass. - labels : ndarray, optional - Labels for objects in `input`, as generated by ndimage.labels. - Dimensions must be the same as `input`. - index : int or sequence of ints, optional - Labels for which to calculate centers-of-mass. If not specified, - all labels greater than zero are used. - - Returns - ------- - centerofmass : tuple, or list of tuples - Co-ordinates of centers-of-masses. - - Examples - -------- - >>> a = np.array(([0,0,0,0], - [0,1,1,0], - [0,1,1,0], - [0,1,1,0])) - >>> from scipy import ndimage - >>> ndimage.measurements.center_of_mass(a) - (2.0, 1.5) - - Calculation of multiple objects in an image - - >>> b = np.array(([0,1,1,0], - [0,1,0,0], - [0,0,0,0], - [0,0,1,1], - [0,0,1,1])) - >>> lbl = ndimage.label(b)[0] - >>> ndimage.measurements.center_of_mass(b, lbl, [1,2]) - [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)] - - """ - - normalizer = sum(input, labels, index) - grids = numpy.ogrid[[slice(0, i) for i in input.shape]] - - results = [sum(input * grids[dir].astype(float), labels, index) / normalizer for dir in range(input.ndim)] - - if numpy.isscalar(results[0]): - return tuple(results) - - return [tuple(v) for v in numpy.array(results).T] - -def histogram(input, min, max, bins, labels=None, index=None): - """ - Calculate the histogram of the values of an array, optionally at labels. - - Histogram calculates the frequency of values in an array within bins - determined by `min`, `max`, and `bins`. `Labels` and `index` can limit - the scope of the histogram to specified sub-regions within the array. - - Parameters - ---------- - input : array_like - Data for which to calculate histogram. - min, max : int - Minimum and maximum values of range of histogram bins. - bins : int - Number of bins. - labels : array_like, optional - Labels for objects in `input`. - If not None, must be same shape as `input`. - index : int or sequence of ints, optional - Label or labels for which to calculate histogram. If None, all values - where label is greater than zero are used - - Returns - ------- - hist : ndarray - Histogram counts. - - Examples - -------- - >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ], - [ 0. , 0.7778, 0. , 0. ], - [ 0. , 0. , 0. , 0. ], - [ 0. , 0. , 0.7181, 0.2787], - [ 0. , 0. , 0.6573, 0.3094]]) - >>> from scipy import ndimage - >>> ndimage.measurements.histogram(a, 0, 1, 10) - array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0]) - - With labels and no indices, non-zero elements are counted: - - >>> lbl, nlbl = ndimage.label(a) - >>> ndimage.measurements.histogram(a, 0, 1, 10, lbl) - array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0]) - - Indices can be used to count only certain objects: - - >>> ndimage.measurements.histogram(a, 0, 1, 10, lbl, 2) - array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0]) - - """ - - _bins = numpy.linspace(min, max, bins + 1) - - def _hist(vals): - return numpy.histogram(vals, _bins)[0] - - return labeled_comprehension(input, labels, index, _hist, object, None, pass_positions=False) - -def watershed_ift(input, markers, structure=None, output=None): - """Apply watershed from markers using a iterative forest transform - algorithm. - - Negative markers are considered background markers which are - processed after the other markers. A structuring element defining - the connectivity of the object can be provided. If none is - provided, an element is generated with a squared connectivity equal - to one. An output array can optionally be provided. - """ - input = numpy.asarray(input) - if input.dtype.type not in [numpy.uint8, numpy.uint16]: - raise TypeError('only 8 and 16 unsigned inputs are supported') - if structure is None: - structure = morphology.generate_binary_structure(input.ndim, 1) - structure = numpy.asarray(structure, dtype = bool) - if structure.ndim != input.ndim: - raise RuntimeError('structure and input must have equal rank') - for ii in structure.shape: - if ii != 3: - raise RuntimeError('structure dimensions must be equal to 3') - if not structure.flags.contiguous: - structure = structure.copy() - markers = numpy.asarray(markers) - if input.shape != markers.shape: - raise RuntimeError('input and markers must have equal shape') - - integral_types = [numpy.int0, - numpy.int8, - numpy.int16, - numpy.int32, - numpy.int_, - numpy.int64, - numpy.intc, - numpy.intp] - - if markers.dtype.type not in integral_types: - raise RuntimeError('marker should be of integer type') - if isinstance(output, numpy.ndarray): - if output.dtype.type not in integral_types: - raise RuntimeError('output should be of integer type') - else: - output = markers.dtype - output, return_value = _ni_support._get_output(output, input) - _nd_image.watershed_ift(input, markers, structure, output) - return return_value diff --git a/scipy-0.10.1/scipy/ndimage/morphology.py b/scipy-0.10.1/scipy/ndimage/morphology.py deleted file mode 100644 index 297509fe66..0000000000 --- a/scipy-0.10.1/scipy/ndimage/morphology.py +++ /dev/null @@ -1,2209 +0,0 @@ -# Copyright (C) 2003-2005 Peter J. Verveer -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import numpy -import _ni_support -import _nd_image -import filters - -__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion', - 'binary_dilation', 'binary_opening', 'binary_closing', - 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes', - 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing', - 'morphological_gradient', 'morphological_laplace', 'white_tophat', - 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt', - 'distance_transform_edt'] - - -def _center_is_true(structure, origin): - structure = numpy.array(structure) - coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, - origin)]) - return bool(structure[coor]) - -def iterate_structure(structure, iterations, origin = None): - """ - Iterate a structure by dilating it with itself. - - Parameters - ---------- - - structure : array_like - Structuring element (an array of bools, for example), to be dilated with - itself. - - iterations : int - number of dilations performed on the structure with itself - - origin : optional - If origin is None, only the iterated structure is returned. If - not, a tuple of the iterated structure and the modified origin is - returned. - - - Returns - ------- - - output: ndarray of bools - A new structuring element obtained by dilating `structure` - (`iterations` - 1) times with itself. - - See also - -------- - - generate_binary_structure - - Examples - -------- - - >>> struct = ndimage.generate_binary_structure(2, 1) - >>> struct.astype(int) - array([[0, 1, 0], - [1, 1, 1], - [0, 1, 0]]) - >>> ndimage.iterate_structure(struct, 2).astype(int) - array([[0, 0, 1, 0, 0], - [0, 1, 1, 1, 0], - [1, 1, 1, 1, 1], - [0, 1, 1, 1, 0], - [0, 0, 1, 0, 0]]) - >>> ndimage.iterate_structure(struct, 3).astype(int) - array([[0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 1, 1], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0]]) - - """ - structure = numpy.asarray(structure) - if iterations < 2: - return structure.copy() - ni = iterations - 1 - shape = [ii + ni * (ii - 1) for ii in structure.shape] - pos = [ni * (structure.shape[ii] / 2) for ii in range(len(shape))] - slc = [slice(pos[ii], pos[ii] + structure.shape[ii], None) - for ii in range(len(shape))] - out = numpy.zeros(shape, bool) - out[slc] = structure != 0 - out = binary_dilation(out, structure, iterations = ni) - if origin is None: - return out - else: - origin = _ni_support._normalize_sequence(origin, structure.ndim) - origin = [iterations * o for o in origin] - return out, origin - -def generate_binary_structure(rank, connectivity): - """ - Generate a binary structure for binary morphological operations. - - Parameters - ---------- - - rank : int - Number of dimensions of the array to which the structuring element - will be applied, as returned by `np.ndim`. - - connectivity : int - `connectivity` determines which elements of the output array belong - to the structure, i.e. are considered as neighbors of the central - element. Elements up to a squared distance of `connectivity` from - the center are considered neighbors. `connectivity` may range from 1 - (no diagonal elements are neighbors) to `rank` (all elements are - neighbors). - - - Returns - ------- - - output : ndarray of bools - Structuring element which may be used for binary morphological - operations, with `rank` dimensions and all dimensions equal to 3. - - See also - -------- - - iterate_structure, binary_dilation, binary_erosion - - - Notes - ----- - - `generate_binary_structure` can only create structuring elements with - dimensions equal to 3, i.e. minimal dimensions. For larger structuring - elements, that are useful e.g. for eroding large objects, one may either - use `iterate_structure`, or create directly custom arrays with - numpy functions such as `numpy.ones`. - - Examples - -------- - - >>> struct = ndimage.generate_binary_structure(2, 1) - >>> struct - array([[False, True, False], - [ True, True, True], - [False, True, False]], dtype=bool) - >>> a = np.zeros((5,5)) - >>> a[2, 2] = 1 - >>> a - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype) - >>> b - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 1., 1., 1., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype) - array([[ 0., 0., 1., 0., 0.], - [ 0., 1., 1., 1., 0.], - [ 1., 1., 1., 1., 1.], - [ 0., 1., 1., 1., 0.], - [ 0., 0., 1., 0., 0.]]) - >>> struct = ndimage.generate_binary_structure(2, 2) - >>> struct - array([[ True, True, True], - [ True, True, True], - [ True, True, True]], dtype=bool) - >>> struct = ndimage.generate_binary_structure(3, 1) - >>> struct # no diagonal elements - array([[[False, False, False], - [False, True, False], - [False, False, False]], - [[False, True, False], - [ True, True, True], - [False, True, False]], - [[False, False, False], - [False, True, False], - [False, False, False]]], dtype=bool) - - """ - if connectivity < 1: - connectivity = 1 - if rank < 1: - if connectivity < 1: - return numpy.array(0, dtype = bool) - else: - return numpy.array(1, dtype = bool) - output = numpy.fabs(numpy.indices([3] * rank) - 1) - output = numpy.add.reduce(output, 0) - return numpy.asarray(output <= connectivity, dtype = bool) - - -def _binary_erosion(input, structure, iterations, mask, output, - border_value, origin, invert, brute_force): - input = numpy.asarray(input) - if numpy.iscomplexobj(input): - raise TypeError('Complex type not supported') - if structure is None: - structure = generate_binary_structure(input.ndim, 1) - else: - structure = numpy.asarray(structure) - structure = structure.astype(bool) - if structure.ndim != input.ndim: - raise RuntimeError('structure rank must equal input rank') - if not structure.flags.contiguous: - structure = structure.copy() - if numpy.product(structure.shape,axis=0) < 1: - raise RuntimeError('structure must not be empty') - if mask is not None: - mask = numpy.asarray(mask) - if mask.shape != input.shape: - raise RuntimeError('mask and input must have equal sizes') - origin = _ni_support._normalize_sequence(origin, input.ndim) - cit = _center_is_true(structure, origin) - if isinstance(output, numpy.ndarray): - if numpy.iscomplexobj(output): - raise TypeError('Complex output type not supported') - else: - output = bool - output, return_value = _ni_support._get_output(output, input) - - - if iterations == 1: - _nd_image.binary_erosion(input, structure, mask, output, - border_value, origin, invert, cit, 0) - return return_value - elif cit and not brute_force: - changed, coordinate_list = _nd_image.binary_erosion(input, - structure, mask, output, border_value, origin, invert, cit, 1) - structure = structure[tuple([slice(None, None, -1)] * - structure.ndim)] - for ii in range(len(origin)): - origin[ii] = -origin[ii] - if not structure.shape[ii] & 1: - origin[ii] -= 1 - if mask is not None: - msk = numpy.asarray(mask) - msk = mask.astype(numpy.int8) - if msk is mask: - msk = mask.copy() - mask = msk - if not structure.flags.contiguous: - structure = structure.copy() - _nd_image.binary_erosion2(output, structure, mask, iterations - 1, - origin, invert, coordinate_list) - return return_value - else: - tmp_in = numpy.zeros(input.shape, bool) - if return_value is None: - tmp_out = output - else: - tmp_out = numpy.zeros(input.shape, bool) - if not iterations & 1: - tmp_in, tmp_out = tmp_out, tmp_in - changed = _nd_image.binary_erosion(input, structure, mask, - tmp_out, border_value, origin, invert, cit, 0) - ii = 1 - while (ii < iterations) or (iterations < 1) and changed: - tmp_in, tmp_out = tmp_out, tmp_in - changed = _nd_image.binary_erosion(tmp_in, structure, mask, - tmp_out, border_value, origin, invert, cit, 0) - ii += 1 - if return_value is not None: - return tmp_out - - -def binary_erosion(input, structure = None, iterations = 1, mask = None, - output = None, border_value = 0, origin = 0, brute_force = False): - """ - Multi-dimensional binary erosion with a given structuring element. - - Binary erosion is a mathematical morphology operation used for image - processing. - - Parameters - ---------- - - input : array_like - Binary image to be eroded. Non-zero (True) elements form - the subset to be eroded. - - structure : array_like, optional - Structuring element used for the erosion. Non-zero elements are - considered True. If no structuring element is provided, an element - is generated with a square connectivity equal to one. - - iterations : {int, float}, optional - The erosion is repeated `iterations` times (one, by default). - If iterations is less than 1, the erosion is repeated until the - result does not change anymore. - - mask : array_like, optional - If a mask is given, only those elements with a True value at - the corresponding mask element are modified at each iteration. - - output : ndarray, optional - Array of the same shape as input, into which the output is placed. - By default, a new array is created. - - origin: int or tuple of ints, optional - Placement of the filter, by default 0. - - border_value: int (cast to 0 or 1) - Value at the border in the output array. - - - Returns - ------- - - out: ndarray of bools - Erosion of the input by the structuring element. - - - See also - -------- - - grey_erosion, binary_dilation, binary_closing, binary_opening, - generate_binary_structure - - Notes - ----- - - Erosion [1]_ is a mathematical morphology operation [2]_ that uses a - structuring element for shrinking the shapes in an image. The binary - erosion of an image by a structuring element is the locus of the points - where a superimposition of the structuring element centered on the point - is entirely contained in the set of non-zero elements of the image. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29 - - .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology - - Examples - -------- - - >>> a = np.zeros((7,7), dtype=np.int) - >>> a[1:6, 2:5] = 1 - >>> a - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> ndimage.binary_erosion(a).astype(a.dtype) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> #Erosion removes objects smaller than the structure - >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - - """ - return _binary_erosion(input, structure, iterations, mask, - output, border_value, origin, 0, brute_force) - -def binary_dilation(input, structure = None, iterations = 1, mask = None, - output = None, border_value = 0, origin = 0, brute_force = False): - """ - Multi-dimensional binary dilation with the given structuring element. - - - Parameters - ---------- - - input : array_like - Binary array_like to be dilated. Non-zero (True) elements form - the subset to be dilated. - - structure : array_like, optional - Structuring element used for the dilation. Non-zero elements are - considered True. If no structuring element is provided an element - is generated with a square connectivity equal to one. - - iterations : {int, float}, optional - The dilation is repeated `iterations` times (one, by default). - If iterations is less than 1, the dilation is repeated until the - result does not change anymore. - - mask : array_like, optional - If a mask is given, only those elements with a True value at - the corresponding mask element are modified at each iteration. - - - output : ndarray, optional - Array of the same shape as input, into which the output is placed. - By default, a new array is created. - - origin : int or tuple of ints, optional - Placement of the filter, by default 0. - - border_value : int (cast to 0 or 1) - Value at the border in the output array. - - - Returns - ------- - - out : ndarray of bools - Dilation of the input by the structuring element. - - - See also - -------- - - grey_dilation, binary_erosion, binary_closing, binary_opening, - generate_binary_structure - - Notes - ----- - - Dilation [1]_ is a mathematical morphology operation [2]_ that uses a - structuring element for expanding the shapes in an image. The binary - dilation of an image by a structuring element is the locus of the points - covered by the structuring element, when its center lies within the - non-zero points of the image. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29 - - .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology - - Examples - -------- - - >>> a = np.zeros((5, 5)) - >>> a[2, 2] = 1 - >>> a - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - >>> ndimage.binary_dilation(a) - array([[False, False, False, False, False], - [False, False, True, False, False], - [False, True, True, True, False], - [False, False, True, False, False], - [False, False, False, False, False]], dtype=bool) - >>> ndimage.binary_dilation(a).astype(a.dtype) - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 1., 1., 1., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - >>> # 3x3 structuring element with connectivity 1, used by default - >>> struct1 = ndimage.generate_binary_structure(2, 1) - >>> struct1 - array([[False, True, False], - [ True, True, True], - [False, True, False]], dtype=bool) - >>> # 3x3 structuring element with connectivity 2 - >>> struct2 = ndimage.generate_binary_structure(2, 2) - >>> struct2 - array([[ True, True, True], - [ True, True, True], - [ True, True, True]], dtype=bool) - >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype) - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 1., 1., 1., 0.], - [ 0., 0., 1., 0., 0.], - [ 0., 0., 0., 0., 0.]]) - >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype) - array([[ 0., 0., 0., 0., 0.], - [ 0., 1., 1., 1., 0.], - [ 0., 1., 1., 1., 0.], - [ 0., 1., 1., 1., 0.], - [ 0., 0., 0., 0., 0.]]) - >>> ndimage.binary_dilation(a, structure=struct1,\\ - ... iterations=2).astype(a.dtype) - array([[ 0., 0., 1., 0., 0.], - [ 0., 1., 1., 1., 0.], - [ 1., 1., 1., 1., 1.], - [ 0., 1., 1., 1., 0.], - [ 0., 0., 1., 0., 0.]]) - - """ - input = numpy.asarray(input) - if structure is None: - structure = generate_binary_structure(input.ndim, 1) - origin = _ni_support._normalize_sequence(origin, input.ndim) - structure = numpy.asarray(structure) - structure = structure[tuple([slice(None, None, -1)] * - structure.ndim)] - for ii in range(len(origin)): - origin[ii] = -origin[ii] - if not structure.shape[ii] & 1: - origin[ii] -= 1 - return _binary_erosion(input, structure, iterations, mask, - output, border_value, origin, 1, brute_force) - - -def binary_opening(input, structure = None, iterations = 1, output = None, - origin = 0): - """ - Multi-dimensional binary opening with the given structuring element. - - The *opening* of an input image by a structuring element is the - *dilation* of the *erosion* of the image by the structuring element. - - Parameters - ---------- - - input : array_like - Binary array_like to be opened. Non-zero (True) elements form - the subset to be opened. - - structure : array_like, optional - Structuring element used for the opening. Non-zero elements are - considered True. If no structuring element is provided an element - is generated with a square connectivity equal to one (i.e., only - nearest neighbors are connected to the center, diagonally-connected - elements are not considered neighbors). - - iterations : {int, float}, optional - The erosion step of the opening, then the dilation step are each - repeated `iterations` times (one, by default). If `iterations` is - less than 1, each operation is repeated until the result does - not change anymore. - - output : ndarray, optional - Array of the same shape as input, into which the output is placed. - By default, a new array is created. - - origin : int or tuple of ints, optional - Placement of the filter, by default 0. - - Returns - ------- - - out : ndarray of bools - Opening of the input by the structuring element. - - - See also - -------- - - grey_opening, binary_closing, binary_erosion, binary_dilation, - generate_binary_structure - - Notes - ----- - - *Opening* [1]_ is a mathematical morphology operation [2]_ that - consists in the succession of an erosion and a dilation of the - input with the same structuring element. Opening therefore removes - objects smaller than the structuring element. - - Together with *closing* (`binary_closing`), opening can be used for - noise removal. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Opening_%28morphology%29 - - .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology - - Examples - -------- - - >>> a = np.zeros((5,5), dtype=np.int) - >>> a[1:4, 1:4] = 1; a[4, 4] = 1 - >>> a - array([[0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 1]]) - >>> # Opening removes small objects - >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(np.int) - array([[0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]]) - >>> # Opening can also smooth corners - >>> ndimage.binary_opening(a).astype(np.int) - array([[0, 0, 0, 0, 0], - [0, 0, 1, 0, 0], - [0, 1, 1, 1, 0], - [0, 0, 1, 0, 0], - [0, 0, 0, 0, 0]]) - >>> # Opening is the dilation of the erosion of the input - >>> ndimage.binary_erosion(a).astype(np.int) - array([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 1, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]]) - >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(np.int) - array([[0, 0, 0, 0, 0], - [0, 0, 1, 0, 0], - [0, 1, 1, 1, 0], - [0, 0, 1, 0, 0], - [0, 0, 0, 0, 0]]) - - """ - input = numpy.asarray(input) - if structure is None: - rank = input.ndim - structure = generate_binary_structure(rank, 1) - tmp = binary_erosion(input, structure, iterations, None, None, 0, - origin) - return binary_dilation(tmp, structure, iterations, None, output, 0, - origin) - - -def binary_closing(input, structure = None, iterations = 1, output = None, - origin = 0): - """ - Multi-dimensional binary closing with the given structuring element. - - The *closing* of an input image by a structuring element is the - *erosion* of the *dilation* of the image by the structuring element. - - Parameters - ---------- - - input : array_like - Binary array_like to be closed. Non-zero (True) elements form - the subset to be closed. - - structure : array_like, optional - Structuring element used for the closing. Non-zero elements are - considered True. If no structuring element is provided an element - is generated with a square connectivity equal to one (i.e., only - nearest neighbors are connected to the center, diagonally-connected - elements are not considered neighbors). - - iterations : {int, float}, optional - The dilation step of the closing, then the erosion step are each - repeated `iterations` times (one, by default). If iterations is - less than 1, each operations is repeated until the result does - not change anymore. - - output : ndarray, optional - Array of the same shape as input, into which the output is placed. - By default, a new array is created. - - origin : int or tuple of ints, optional - Placement of the filter, by default 0. - - Returns - ------- - - out : ndarray of bools - Closing of the input by the structuring element. - - - See also - -------- - - grey_closing, binary_opening, binary_dilation, binary_erosion, - generate_binary_structure - - Notes - ----- - - *Closing* [1]_ is a mathematical morphology operation [2]_ that - consists in the succession of a dilation and an erosion of the - input with the same structuring element. Closing therefore fills - holes smaller than the structuring element. - - Together with *opening* (`binary_opening`), closing can be used for - noise removal. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Closing_%28morphology%29 - - .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology - - Examples - -------- - - >>> a = np.zeros((5,5), dtype=np.int) - >>> a[1:-1, 1:-1] = 1; a[2,2] = 0 - >>> a - array([[0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 0, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]]) - >>> # Closing removes small holes - >>> ndimage.binary_closing(a).astype(np.int) - array([[0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]]) - >>> # Closing is the erosion of the dilation of the input - >>> ndimage.binary_dilation(a).astype(np.int) - array([[0, 1, 1, 1, 0], - [1, 1, 1, 1, 1], - [1, 1, 1, 1, 1], - [1, 1, 1, 1, 1], - [0, 1, 1, 1, 0]]) - >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(np.int) - array([[0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]]) - - - >>> a = np.zeros((7,7), dtype=np.int) - >>> a[1:6, 2:5] = 1; a[1:3,3] = 0 - >>> a - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 1, 0, 0], - [0, 0, 1, 0, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> # In addition to removing holes, closing can also - >>> # coarsen boundaries with fine hollows. - >>> ndimage.binary_closing(a).astype(np.int) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(np.int) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - - """ - input = numpy.asarray(input) - if structure is None: - rank = input.ndim - structure = generate_binary_structure(rank, 1) - tmp = binary_dilation(input, structure, iterations, None, None, 0, - origin) - return binary_erosion(tmp, structure, iterations, None, output, 0, - origin) - - -def binary_hit_or_miss(input, structure1 = None, structure2 = None, - output = None, origin1 = 0, origin2 = None): - """ - Multi-dimensional binary hit-or-miss transform. - - The hit-or-miss transform finds the locations of a given pattern - inside the input image. - - Parameters - ---------- - - input : array_like (cast to booleans) - Binary image where a pattern is to be detected. - - structure1 : array_like (cast to booleans), optional - Part of the structuring element to be fitted to the foreground - (non-zero elements) of `input`. If no value is provided, a - structure of square connectivity 1 is chosen. - - structure2 : array_like (cast to booleans), optional - Second part of the structuring element that has to miss completely - the foreground. If no value is provided, the complementary of - `structure1` is taken. - - output : ndarray, optional - Array of the same shape as input, into which the output is placed. - By default, a new array is created. - - origin1 : int or tuple of ints, optional - Placement of the first part of the structuring element `structure1`, - by default 0 for a centered structure. - - origin2 : int or tuple of ints, optional - Placement of the second part of the structuring element `structure2`, - by default 0 for a centered structure. If a value is provided for - `origin1` and not for `origin2`, then `origin2` is set to `origin1`. - - Returns - ------- - - output : ndarray - Hit-or-miss transform of `input` with the given structuring - element (`structure1`, `structure2`). - - See also - -------- - - ndimage.morphology, binary_erosion - - - Notes - ----- - - - - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Hit-or-miss_transform - - Examples - -------- - - >>> a = np.zeros((7,7), dtype=np.int) - >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1 - >>> a - array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 1, 1, 0], - [0, 0, 0, 0, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) - >>> structure1 - array([[1, 0, 0], - [0, 1, 1], - [0, 1, 1]]) - >>> # Find the matches of structure1 in the array a - >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(np.int) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> # Change the origin of the filter - >>> # origin1=1 is equivalent to origin1=(1,1) here - >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\ - ... origin1=1).astype(np.int) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 0, 0]]) - - """ - input = numpy.asarray(input) - if structure1 is None: - structure1 = generate_binary_structure(input.ndim, 1) - if structure2 is None: - structure2 = numpy.logical_not(structure1) - origin1 = _ni_support._normalize_sequence(origin1, input.ndim) - if origin2 is None: - origin2 = origin1 - else: - origin2 = _ni_support._normalize_sequence(origin2, input.ndim) - - tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, - 0, False) - inplace = isinstance(output, numpy.ndarray) - result = _binary_erosion(input, structure2, 1, None, output, 0, - origin2, 1, False) - if inplace: - numpy.logical_not(output, output) - numpy.logical_and(tmp1, output, output) - else: - numpy.logical_not(result, result) - return numpy.logical_and(tmp1, result) - -def binary_propagation(input, structure = None, mask = None, - output = None, border_value = 0, origin = 0): - """ - Multi-dimensional binary propagation with the given structuring element. - - - Parameters - ---------- - - input : array_like - Binary image to be propagated inside `mask`. - - structure : array_like - Structuring element used in the successive dilations. The output - may depend on the structuring element, especially if `mask` has - several connex components. If no structuring element is - provided, an element is generated with a squared connectivity equal - to one. - - mask : array_like - Binary mask defining the region into which `input` is allowed to - propagate. - - output : ndarray, optional - Array of the same shape as input, into which the output is placed. - By default, a new array is created. - - origin : int or tuple of ints, optional - Placement of the filter, by default 0. - - Returns - ------- - - ouput : ndarray - Binary propagation of `input` inside `mask`. - - Notes - ----- - - This function is functionally equivalent to calling binary_dilation - with the number of iterations less then one: iterative dilation until - the result does not change anymore. - - The succession of an erosion and propagation inside the original image - can be used instead of an *opening* for deleting small objects while - keeping the contours of larger objects untouched. - - References - ---------- - - .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15. - - .. [2] http://www.qi.tnw.tudelft.nl/Courses/FIP/noframes/fip-Morpholo.html#Heading102 - - Examples - -------- - - >>> input = np.zeros((8, 8), dtype=np.int) - >>> input[2, 2] = 1 - >>> mask = np.zeros((8, 8), dtype=np.int) - >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1 - >>> input - array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - >>> mask - array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 1, 1], - [0, 0, 0, 0, 0, 0, 1, 1]]) - >>> ndimage.binary_propagation(input, mask=mask).astype(np.int) - array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - >>> ndimage.binary_propagation(input, mask=mask,\\ - ... structure=np.ones((3,3))).astype(np.int) - array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - - >>> # Comparison between opening and erosion+propagation - >>> a = np.zeros((6,6), dtype=np.int) - >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1 - >>> a - array([[1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 1]]) - >>> ndimage.binary_opening(a).astype(np.int) - array([[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0], - [0, 0, 1, 1, 1, 0], - [0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0]]) - >>> b = ndimage.binary_erosion(a) - >>> b.astype(int) - array([[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]) - >>> ndimage.binary_propagation(b, mask=a).astype(np.int) - array([[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0]]) - - """ - return binary_dilation(input, structure, -1, mask, output, - border_value, origin) - -def binary_fill_holes(input, structure = None, output = None, origin = 0): - """ - Fill the holes in binary objects. - - - Parameters - ---------- - - input: array_like - n-dimensional binary array with holes to be filled - - structure: array_like, optional - Structuring element used in the computation; large-size elements - make computations faster but may miss holes separated from the - background by thin regions. The default element (with a square - connectivity equal to one) yields the intuitive result where all - holes in the input have been filled. - - output: ndarray, optional - Array of the same shape as input, into which the output is placed. - By default, a new array is created. - - origin: int, tuple of ints, optional - Position of the structuring element. - - Returns - ------- - - out: ndarray - Transformation of the initial image `input` where holes have been - filled. - - See also - -------- - - binary_dilation, binary_propagation, label - - Notes - ----- - - The algorithm used in this function consists in invading the complementary - of the shapes in `input` from the outer boundary of the image, - using binary dilations. Holes are not connected to the boundary and are - therefore not invaded. The result is the complementary subset of the - invaded region. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Mathematical_morphology - - - Examples - -------- - - >>> a = np.zeros((5, 5), dtype=int) - >>> a[1:4, 1:4] = 1 - >>> a[2,2] = 0 - >>> a - array([[0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 0, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]]) - >>> ndimage.binary_fill_holes(a).astype(int) - array([[0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]]) - >>> # Too big structuring element - >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int) - array([[0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 0, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]]) - - """ - mask = numpy.logical_not(input) - tmp = numpy.zeros(mask.shape, bool) - inplace = isinstance(output, numpy.ndarray) - if inplace: - binary_dilation(tmp, structure, -1, mask, output, 1, origin) - numpy.logical_not(output, output) - else: - output = binary_dilation(tmp, structure, -1, mask, None, 1, - origin) - numpy.logical_not(output, output) - return output - -def grey_erosion(input, size = None, footprint = None, structure = None, - output = None, mode = "reflect", cval = 0.0, origin = 0): - """ - Calculate a greyscale erosion, using either a structuring element, - or a footprint corresponding to a flat structuring element. - - Grayscale erosion is a mathematical morphology operation. For the - simple case of a full and flat structuring element, it can be viewed - as a minimum filter over a sliding window. - - Parameters - ---------- - - input : array_like - Array over which the grayscale erosion is to be computed. - - size : tuple of ints - Shape of a flat and full structuring element used for the - grayscale erosion. Optional if `footprint` is provided. - - footprint : array of ints, optional - Positions of non-infinite elements of a flat structuring element - used for the grayscale erosion. Non-zero values give the set of - neighbors of the center over which the minimum is chosen. - - structure : array of ints, optional - Structuring element used for the grayscale erosion. `structure` - may be a non-flat structuring element. - - output : array, optional - An array used for storing the ouput of the erosion may be provided. - - mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - The `mode` parameter determines how the array borders are - handled, where `cval` is the value when mode is equal to - 'constant'. Default is 'reflect' - - cval : scalar, optional - Value to fill past edges of input if `mode` is 'constant'. Default - is 0.0. - - origin : scalar, optional - The `origin` parameter controls the placement of the filter. - Default 0 - - - Returns - ------- - - output : ndarray - Grayscale erosion of `input`. - - See also - -------- - - binary_erosion, grey_dilation, grey_opening, grey_closing - - generate_binary_structure - - ndimage.minimum_filter - - Notes - ----- - - The grayscale erosion of an image input by a structuring element s defined - over a domain E is given by: - - (input+s)(x) = min {input(y) - s(x-y), for y in E} - - In particular, for structuring elements defined as - s(y) = 0 for y in E, the grayscale erosion computes the minimum of the - input image inside a sliding window defined by E. - - Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29 - - .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology - - Examples - -------- - - >>> a = np.zeros((7,7), dtype=np.int) - >>> a[1:6, 1:6] = 3 - >>> a[4,4] = 2; a[2,3] = 1 - >>> a - array([[0, 0, 0, 0, 0, 0, 0], - [0, 3, 3, 3, 3, 3, 0], - [0, 3, 3, 1, 3, 3, 0], - [0, 3, 3, 3, 3, 3, 0], - [0, 3, 3, 3, 2, 3, 0], - [0, 3, 3, 3, 3, 3, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> ndimage.grey_erosion(a, size=(3,3)) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 3, 2, 2, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> footprint = ndimage.generate_binary_structure(2, 1) - >>> footprint - array([[False, True, False], - [ True, True, True], - [False, True, False]], dtype=bool) - >>> # Diagonally-connected elements are not considered neighbors - >>> ndimage.grey_erosion(a, size=(3,3), footprint=footprint) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 3, 1, 2, 0, 0], - [0, 0, 3, 2, 2, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - - """ - return filters._min_or_max_filter(input, size, footprint, structure, - output, mode, cval, origin, 1) - - -def grey_dilation(input, size = None, footprint = None, structure = None, - output = None, mode = "reflect", cval = 0.0, origin = 0): - """ - Calculate a greyscale dilation, using either a structuring element, - or a footprint corresponding to a flat structuring element. - - Grayscale dilation is a mathematical morphology operation. For the - simple case of a full and flat structuring element, it can be viewed - as a maximum filter over a sliding window. - - Parameters - ---------- - - input : array_like - Array over which the grayscale dilation is to be computed. - - size : tuple of ints - Shape of a flat and full structuring element used for the - grayscale dilation. Optional if `footprint` is provided. - - footprint : array of ints, optional - Positions of non-infinite elements of a flat structuring element - used for the grayscale dilation. Non-zero values give the set of - neighbors of the center over which the maximum is chosen. - - structure : array of ints, optional - Structuring element used for the grayscale dilation. `structure` - may be a non-flat structuring element. - - output : array, optional - An array used for storing the ouput of the dilation may be provided. - - mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - The `mode` parameter determines how the array borders are - handled, where `cval` is the value when mode is equal to - 'constant'. Default is 'reflect' - - cval : scalar, optional - Value to fill past edges of input if `mode` is 'constant'. Default - is 0.0. - - origin : scalar, optional - The `origin` parameter controls the placement of the filter. - Default 0 - - - Returns - ------- - - output : ndarray - Grayscale dilation of `input`. - - See also - -------- - - binary_dilation, grey_erosion, grey_closing, grey_opening - - generate_binary_structure - - ndimage.maximum_filter - - Notes - ----- - - The grayscale dilation of an image input by a structuring element s defined - over a domain E is given by: - - (input+s)(x) = max {input(y) + s(x-y), for y in E} - - In particular, for structuring elements defined as - s(y) = 0 for y in E, the grayscale dilation computes the maximum of the - input image inside a sliding window defined by E. - - Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29 - - .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology - - - Examples - -------- - - >>> a = np.zeros((7,7), dtype=np.int) - >>> a[2:5, 2:5] = 1 - >>> a[4,4] = 2; a[2,3] = 3 - >>> a - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 3, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 2, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> ndimage.grey_dilation(a, size=(3,3)) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 3, 3, 3, 1, 0], - [0, 1, 3, 3, 3, 1, 0], - [0, 1, 3, 3, 3, 2, 0], - [0, 1, 1, 2, 2, 2, 0], - [0, 1, 1, 2, 2, 2, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> ndimage.grey_dilation(a, footprint=np.ones((3,3))) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 3, 3, 3, 1, 0], - [0, 1, 3, 3, 3, 1, 0], - [0, 1, 3, 3, 3, 2, 0], - [0, 1, 1, 2, 2, 2, 0], - [0, 1, 1, 2, 2, 2, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> s = ndimage.generate_binary_structure(2,1) - >>> s - array([[False, True, False], - [ True, True, True], - [False, True, False]], dtype=bool) - >>> ndimage.grey_dilation(a, footprint=s) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 3, 1, 0, 0], - [0, 1, 3, 3, 3, 1, 0], - [0, 1, 1, 3, 2, 1, 0], - [0, 1, 1, 2, 2, 2, 0], - [0, 0, 1, 1, 2, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3))) - array([[1, 1, 1, 1, 1, 1, 1], - [1, 2, 4, 4, 4, 2, 1], - [1, 2, 4, 4, 4, 2, 1], - [1, 2, 4, 4, 4, 3, 1], - [1, 2, 2, 3, 3, 3, 1], - [1, 2, 2, 3, 3, 3, 1], - [1, 1, 1, 1, 1, 1, 1]]) - - """ - if structure is not None: - structure = numpy.asarray(structure) - structure = structure[tuple([slice(None, None, -1)] * - structure.ndim)] - if footprint is not None: - footprint = numpy.asarray(footprint) - footprint = footprint[tuple([slice(None, None, -1)] * - footprint.ndim)] - input = numpy.asarray(input) - origin = _ni_support._normalize_sequence(origin, input.ndim) - for ii in range(len(origin)): - origin[ii] = -origin[ii] - if footprint is not None: - sz = footprint.shape[ii] - else: - sz = size[ii] - if not sz & 1: - origin[ii] -= 1 - return filters._min_or_max_filter(input, size, footprint, structure, - output, mode, cval, origin, 0) - - -def grey_opening(input, size = None, footprint = None, structure = None, - output = None, mode = "reflect", cval = 0.0, origin = 0): - """ - Multi-dimensional greyscale opening. - - A greyscale opening consists in the succession of a greyscale erosion, - and a greyscale dilation. - - Parameters - ---------- - - input : array_like - Array over which the grayscale opening is to be computed. - - size : tuple of ints - Shape of a flat and full structuring element used for the - grayscale opening. Optional if `footprint` is provided. - - footprint : array of ints, optional - Positions of non-infinite elements of a flat structuring element - used for the grayscale opening. - - structure : array of ints, optional - Structuring element used for the grayscale opening. `structure` - may be a non-flat structuring element. - - output : array, optional - An array used for storing the ouput of the opening may be provided. - - mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - The `mode` parameter determines how the array borders are - handled, where `cval` is the value when mode is equal to - 'constant'. Default is 'reflect' - - cval : scalar, optional - Value to fill past edges of input if `mode` is 'constant'. Default - is 0.0. - - origin : scalar, optional - The `origin` parameter controls the placement of the filter. - Default 0 - - Returns - ------- - - output : ndarray - Result of the grayscale opening of `input` with `structure`. - - See also - -------- - - binary_opening, grey_dilation, grey_erosion, grey_closing - - generate_binary_structure - - Notes - ----- - - The action of a grayscale opening with a flat structuring element amounts - to smoothen high local maxima, whereas binary opening erases small objects. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Mathematical_morphology - - - Examples - -------- - - >>> a = np.arange(36).reshape((6,6)) - >>> a[3, 3] = 50 - >>> a - array([[ 0, 1, 2, 3, 4, 5], - [ 6, 7, 8, 9, 10, 11], - [12, 13, 14, 15, 16, 17], - [18, 19, 20, 50, 22, 23], - [24, 25, 26, 27, 28, 29], - [30, 31, 32, 33, 34, 35]]) - >>> ndimage.grey_opening(a, size=(3,3)) - array([[ 0, 1, 2, 3, 4, 4], - [ 6, 7, 8, 9, 10, 10], - [12, 13, 14, 15, 16, 16], - [18, 19, 20, 22, 22, 22], - [24, 25, 26, 27, 28, 28], - [24, 25, 26, 27, 28, 28]]) - >>> # Note that the local maximum a[3,3] has disappeared - - """ - tmp = grey_erosion(input, size, footprint, structure, None, mode, - cval, origin) - return grey_dilation(tmp, size, footprint, structure, output, mode, - cval, origin) - - -def grey_closing(input, size = None, footprint = None, structure = None, - output = None, mode = "reflect", cval = 0.0, origin = 0): - """ - Multi-dimensional greyscale closing. - - A greyscale closing consists in the succession of a greyscale dilation, - and a greyscale erosion. - - Parameters - ---------- - - input : array_like - Array over which the grayscale closing is to be computed. - - size : tuple of ints - Shape of a flat and full structuring element used for the - grayscale closing. Optional if `footprint` is provided. - - footprint : array of ints, optional - Positions of non-infinite elements of a flat structuring element - used for the grayscale closing. - - structure : array of ints, optional - Structuring element used for the grayscale closing. `structure` - may be a non-flat structuring element. - - output : array, optional - An array used for storing the ouput of the closing may be provided. - - mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - The `mode` parameter determines how the array borders are - handled, where `cval` is the value when mode is equal to - 'constant'. Default is 'reflect' - - cval : scalar, optional - Value to fill past edges of input if `mode` is 'constant'. Default - is 0.0. - - origin : scalar, optional - The `origin` parameter controls the placement of the filter. - Default 0 - - Returns - ------- - - output : ndarray - Result of the grayscale closing of `input` with `structure`. - - See also - -------- - - binary_closing, grey_dilation, grey_erosion, grey_opening - - generate_binary_structure - - Notes - ----- - - The action of a grayscale closing with a flat structuring element amounts - to smoothen deep local minima, whereas binary closing fills small holes. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Mathematical_morphology - - - Examples - -------- - - >>> a = np.arange(36).reshape((6,6)) - >>> a[3,3] = 0 - >>> a - array([[ 0, 1, 2, 3, 4, 5], - [ 6, 7, 8, 9, 10, 11], - [12, 13, 14, 15, 16, 17], - [18, 19, 20, 0, 22, 23], - [24, 25, 26, 27, 28, 29], - [30, 31, 32, 33, 34, 35]]) - >>> ndimage.grey_closing(a, size=(3,3)) - array([[ 7, 7, 8, 9, 10, 11], - [ 7, 7, 8, 9, 10, 11], - [13, 13, 14, 15, 16, 17], - [19, 19, 20, 20, 22, 23], - [25, 25, 26, 27, 28, 29], - [31, 31, 32, 33, 34, 35]]) - >>> # Note that the local minimum a[3,3] has disappeared - - """ - tmp = grey_dilation(input, size, footprint, structure, None, mode, - cval, origin) - return grey_erosion(tmp, size, footprint, structure, output, mode, - cval, origin) - - -def morphological_gradient(input, size = None, footprint = None, - structure = None, output = None, mode = "reflect", - cval = 0.0, origin = 0): - """ - Multi-dimensional morphological gradient. - - The morphological gradient is calculated as the difference between a - dilation and an erosion of the input with a given structuring element. - - - Parameters - ---------- - - input : array_like - Array over which to compute the morphlogical gradient. - - size : tuple of ints - Shape of a flat and full structuring element used for the - mathematical morphology operations. Optional if `footprint` - is provided. A larger `size` yields a more blurred gradient. - - footprint : array of ints, optional - Positions of non-infinite elements of a flat structuring element - used for the morphology operations. Larger footprints - give a more blurred morphological gradient. - - structure : array of ints, optional - Structuring element used for the morphology operations. - `structure` may be a non-flat structuring element. - - output : array, optional - An array used for storing the ouput of the morphological gradient - may be provided. - - mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional - The `mode` parameter determines how the array borders are - handled, where `cval` is the value when mode is equal to - 'constant'. Default is 'reflect' - - cval : scalar, optional - Value to fill past edges of input if `mode` is 'constant'. Default - is 0.0. - - origin : scalar, optional - The `origin` parameter controls the placement of the filter. - Default 0 - - Returns - ------- - - output : ndarray - Morphological gradient of `input`. - - See also - -------- - - grey_dilation, grey_erosion - - ndimage.gaussian_gradient_magnitude - - Notes - ----- - - For a flat structuring element, the morphological gradient - computed at a given point corresponds to the maximal difference - between elements of the input among the elements covered by the - structuring element centered on the point. - - References - ---------- - - .. [1] http://en.wikipedia.org/wiki/Mathematical_morphology - - Examples - -------- - - >>> a = np.zeros((7,7), dtype=np.int) - >>> a[2:5, 2:5] = 1 - >>> ndimage.morphological_gradient(a, size=(3,3)) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 0, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> # The morphological gradient is computed as the difference - >>> # between a dilation and an erosion - >>> ndimage.grey_dilation(a, size=(3,3)) -\\ - ... ndimage.grey_erosion(a, size=(3,3)) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 0, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> a = np.zeros((7,7), dtype=np.int) - >>> a[2:5, 2:5] = 1 - >>> a[4,4] = 2; a[2,3] = 3 - >>> a - array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 3, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 2, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]]) - >>> ndimage.morphological_gradient(a, size=(3,3)) - array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 3, 3, 3, 1, 0], - [0, 1, 3, 3, 3, 1, 0], - [0, 1, 3, 2, 3, 2, 0], - [0, 1, 1, 2, 2, 2, 0], - [0, 1, 1, 2, 2, 2, 0], - [0, 0, 0, 0, 0, 0, 0]]) - - """ - tmp = grey_dilation(input, size, footprint, structure, None, mode, - cval, origin) - if isinstance(output, numpy.ndarray): - grey_erosion(input, size, footprint, structure, output, mode, - cval, origin) - return numpy.subtract(tmp, output, output) - else: - return (tmp - grey_erosion(input, size, footprint, structure, - None, mode, cval, origin)) - - -def morphological_laplace(input, size = None, footprint = None, - structure = None, output = None, - mode = "reflect", cval = 0.0, origin = 0): - """Multi-dimensional morphological laplace. - - Either a size or a footprint, or the structure must be provided. An - output array can optionally be provided. The origin parameter - controls the placement of the filter. The mode parameter - determines how the array borders are handled, where cval is the - value when mode is equal to 'constant'. - """ - tmp1 = grey_dilation(input, size, footprint, structure, None, mode, - cval, origin) - if isinstance(output, numpy.ndarray): - grey_erosion(input, size, footprint, structure, output, mode, - cval, origin) - numpy.add(tmp1, output, output) - del tmp1 - numpy.subtract(output, input, output) - return numpy.subtract(output, input, output) - else: - tmp2 = grey_erosion(input, size, footprint, structure, None, mode, - cval, origin) - numpy.add(tmp1, tmp2, tmp2) - del tmp1 - numpy.subtract(tmp2, input, tmp2) - numpy.subtract(tmp2, input, tmp2) - return tmp2 - - -def white_tophat(input, size = None, footprint = None, structure = None, - output = None, mode = "reflect", cval = 0.0, origin = 0): - """Multi-dimensional white tophat filter. - - Either a size or a footprint, or the structure must be provided. An - output array can optionally be provided. The origin parameter - controls the placement of the filter. The mode parameter - determines how the array borders are handled, where cval is the - value when mode is equal to 'constant'. - """ - tmp = grey_erosion(input, size, footprint, structure, None, mode, - cval, origin) - if isinstance(output, numpy.ndarray): - grey_dilation(tmp, size, footprint, structure, output, mode, cval, - origin) - del tmp - return numpy.subtract(input, output, output) - else: - tmp = grey_dilation(tmp, size, footprint, structure, None, mode, - cval, origin) - return input - tmp - - -def black_tophat(input, size = None, footprint = None, - structure = None, output = None, mode = "reflect", - cval = 0.0, origin = 0): - """ - Multi-dimensional black tophat filter. - - Either a size or a footprint, or the structure must be provided. An - output array can optionally be provided. The origin parameter - controls the placement of the filter. The mode parameter - determines how the array borders are handled, where cval is the - value when mode is equal to 'constant'. - - See also - -------- - - grey_opening, grey_closing - - References - ---------- - - .. [1] http://cmm.ensmp.fr/Micromorph/course/sld011.htm, and following slides - .. [2] http://en.wikipedia.org/wiki/Top-hat_transform - - """ - tmp = grey_dilation(input, size, footprint, structure, None, mode, - cval, origin) - if isinstance(output, numpy.ndarray): - grey_erosion(tmp, size, footprint, structure, output, mode, cval, - origin) - del tmp - return numpy.subtract(output, input, output) - else: - tmp = grey_erosion(tmp, size, footprint, structure, None, mode, - cval, origin) - return tmp - input - - -def distance_transform_bf(input, metric = "euclidean", sampling = None, - return_distances = True, return_indices = False, - distances = None, indices = None): - """Distance transform function by a brute force algorithm. - - This function calculates the distance transform of the input, by - replacing each background element (zero values), with its - shortest distance to the foreground (any element non-zero). Three - types of distance metric are supported: 'euclidean', 'taxicab' - and 'chessboard'. - - In addition to the distance transform, the feature transform can - be calculated. In this case the index of the closest background - element is returned along the first axis of the result. - - The return_distances, and return_indices flags can be used to - indicate if the distance transform, the feature transform, or both - must be returned. - - Optionally the sampling along each axis can be given by the - sampling parameter which should be a sequence of length equal to - the input rank, or a single number in which the sampling is assumed - to be equal along all axes. This parameter is only used in the - case of the euclidean distance transform. - - This function employs a slow brute force algorithm, see also the - function distance_transform_cdt for more efficient taxicab and - chessboard algorithms. - - the distances and indices arguments can be used to give optional - output arrays that must be of the correct size and type (float64 - and int32). - """ - if (not return_distances) and (not return_indices): - msg = 'at least one of distances/indices must be specified' - raise RuntimeError(msg) - tmp1 = numpy.asarray(input) != 0 - struct = generate_binary_structure(tmp1.ndim, tmp1.ndim) - tmp2 = binary_dilation(tmp1, struct) - tmp2 = numpy.logical_xor(tmp1, tmp2) - tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8) - del tmp2 - metric = metric.lower() - if metric == 'euclidean': - metric = 1 - elif metric in ['taxicab', 'cityblock', 'manhattan']: - metric = 2 - elif metric == 'chessboard': - metric = 3 - else: - raise RuntimeError('distance metric not supported') - if sampling is not None: - sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim) - sampling = numpy.asarray(sampling, dtype = numpy.float64) - if not sampling.flags.contiguous: - sampling = sampling.copy() - if return_indices: - ft = numpy.zeros(tmp1.shape, dtype = numpy.int32) - else: - ft = None - if return_distances: - if distances is None: - if metric == 1: - dt = numpy.zeros(tmp1.shape, dtype = numpy.float64) - else: - dt = numpy.zeros(tmp1.shape, dtype = numpy.uint32) - else: - if distances.shape != tmp1.shape: - raise RuntimeError('distances array has wrong shape') - if metric == 1: - if distances.dtype.type != numpy.float64: - raise RuntimeError('distances array must be float64') - else: - if distances.dtype.type != numpy.uint32: - raise RuntimeError('distances array must be uint32') - dt = distances - else: - dt = None - _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft) - if return_indices: - if isinstance(indices, numpy.ndarray): - if indices.dtype.type != numpy.int32: - raise RuntimeError('indices must of int32 type') - if indices.shape != (tmp1.ndim,) + tmp1.shape: - raise RuntimeError('indices has wrong shape') - tmp2 = indices - else: - tmp2 = numpy.indices(tmp1.shape, dtype = numpy.int32) - ft = numpy.ravel(ft) - for ii in range(tmp2.shape[0]): - rtmp = numpy.ravel(tmp2[ii, ...])[ft] - rtmp.shape = tmp1.shape - tmp2[ii, ...] = rtmp - ft = tmp2 - # construct and return the result - result = [] - if return_distances and not isinstance(distances, numpy.ndarray): - result.append(dt) - if return_indices and not isinstance(indices, numpy.ndarray): - result.append(ft) - if len(result) == 2: - return tuple(result) - elif len(result) == 1: - return result[0] - else: - return None - -def distance_transform_cdt(input, metric = 'chessboard', - return_distances = True, return_indices = False, - distances = None, indices = None): - """Distance transform for chamfer type of transforms. - - The metric determines the type of chamfering that is done. If - the metric is equal to 'taxicab' a structure is generated - using generate_binary_structure with a squared distance equal to - 1. If the metric is equal to 'chessboard', a metric is - generated using generate_binary_structure with a squared distance - equal to the rank of the array. These choices correspond to the - common interpretations of the taxicab and the chessboard - distance metrics in two dimensions. - - In addition to the distance transform, the feature transform can - be calculated. In this case the index of the closest background - element is returned along the first axis of the result. - - The return_distances, and return_indices flags can be used to - indicate if the distance transform, the feature transform, or both - must be returned. - - The distances and indices arguments can be used to give optional - output arrays that must be of the correct size and type (both int32). - """ - if (not return_distances) and (not return_indices): - msg = 'at least one of distances/indices must be specified' - raise RuntimeError(msg) - ft_inplace = isinstance(indices, numpy.ndarray) - dt_inplace = isinstance(distances, numpy.ndarray) - input = numpy.asarray(input) - if metric in ['taxicab', 'cityblock', 'manhattan']: - rank = input.ndim - metric = generate_binary_structure(rank, 1) - elif metric == 'chessboard': - rank = input.ndim - metric = generate_binary_structure(rank, rank) - else: - try: - metric = numpy.asarray(metric) - except: - raise RuntimeError('invalid metric provided') - for s in metric.shape: - if s != 3: - raise RuntimeError('metric sizes must be equal to 3') - if not metric.flags.contiguous: - metric = metric.copy() - if dt_inplace: - if distances.dtype.type != numpy.int32: - raise RuntimeError('distances must be of int32 type') - if distances.shape != input.shape: - raise RuntimeError('distances has wrong shape') - dt = distances - dt[...] = numpy.where(input, -1, 0).astype(numpy.int32) - else: - dt = numpy.where(input, -1, 0).astype(numpy.int32) - rank = dt.ndim - if return_indices: - sz = numpy.product(dt.shape,axis=0) - ft = numpy.arange(sz, dtype = numpy.int32) - ft.shape = dt.shape - else: - ft = None - _nd_image.distance_transform_op(metric, dt, ft) - dt = dt[tuple([slice(None, None, -1)] * rank)] - if return_indices: - ft = ft[tuple([slice(None, None, -1)] * rank)] - _nd_image.distance_transform_op(metric, dt, ft) - dt = dt[tuple([slice(None, None, -1)] * rank)] - if return_indices: - ft = ft[tuple([slice(None, None, -1)] * rank)] - ft = numpy.ravel(ft) - if ft_inplace: - if indices.dtype.type != numpy.int32: - raise RuntimeError('indices must of int32 type') - if indices.shape != (dt.ndim,) + dt.shape: - raise RuntimeError('indices has wrong shape') - tmp = indices - else: - tmp = numpy.indices(dt.shape, dtype = numpy.int32) - for ii in range(tmp.shape[0]): - rtmp = numpy.ravel(tmp[ii, ...])[ft] - rtmp.shape = dt.shape - tmp[ii, ...] = rtmp - ft = tmp - - # construct and return the result - result = [] - if return_distances and not dt_inplace: - result.append(dt) - if return_indices and not ft_inplace: - result.append(ft) - if len(result) == 2: - return tuple(result) - elif len(result) == 1: - return result[0] - else: - return None - - -def distance_transform_edt(input, sampling = None, - return_distances = True, return_indices = False, - distances = None, indices = None): - """ - Exact euclidean distance transform. - - In addition to the distance transform, the feature transform can - be calculated. In this case the index of the closest background - element is returned along the first axis of the result. - - Parameters - ---------- - input : array_like - Input data to transform. Can be any type but will be converted - into binary: 1 wherever input equates to True, 0 elsewhere. - sampling : float or int, or sequence of same, optional - Spacing of elements along each dimension. If a sequence, must be of - length equal to the input rank; if a single number, this is used for - all axes. If not specified, a grid spacing of unity is implied. - return_distances : bool, optional - Whether to return distance matrix. At least one of - return_distances/return_indices must be True. Default is True. - return_indices : bool, optional - Whether to return indices matrix. Default is False. - distance : ndarray, optional - Used for output of distance array, must be of type float64. - indices : ndarray, optional - Used for output of indices, must be of type int32. - - Returns - ------- - result : ndarray or list of ndarray - Either distance matrix, index matrix, or a list of the two, - depending on `return_x` flags and `distance` and `indices` - input parameters. - - Notes - ----- - The euclidean distance transform gives values of the euclidean - distance:: - - n - y_i = sqrt(sum (x[i]-b[i])**2) - i - - where b[i] is the background point (value 0) with the smallest - Euclidean distance to input points x[i], and n is the - number of dimensions. - - Examples - -------- - >>> a = np.array(([0,1,1,1,1], - [0,0,1,1,1], - [0,1,1,1,1], - [0,1,1,1,0], - [0,1,1,0,0])) - >>> from scipy import ndimage - >>> ndimage.distance_transform_edt(a) - array([[ 0. , 1. , 1.4142, 2.2361, 3. ], - [ 0. , 0. , 1. , 2. , 2. ], - [ 0. , 1. , 1.4142, 1.4142, 1. ], - [ 0. , 1. , 1.4142, 1. , 0. ], - [ 0. , 1. , 1. , 0. , 0. ]]) - - With a sampling of 2 units along x, 1 along y: - - >>> ndimage.distance_transform_edt(a, sampling=[2,1]) - array([[ 0. , 1. , 2. , 2.8284, 3.6056], - [ 0. , 0. , 1. , 2. , 3. ], - [ 0. , 1. , 2. , 2.2361, 2. ], - [ 0. , 1. , 2. , 1. , 0. ], - [ 0. , 1. , 1. , 0. , 0. ]]) - - Asking for indices as well: - - >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True) - >>> inds - array([[[0, 0, 1, 1, 3], - [1, 1, 1, 1, 3], - [2, 2, 1, 3, 3], - [3, 3, 4, 4, 3], - [4, 4, 4, 4, 4]], - [[0, 0, 1, 1, 4], - [0, 1, 1, 1, 4], - [0, 0, 1, 4, 4], - [0, 0, 3, 3, 4], - [0, 0, 3, 3, 4]]]) - - With arrays provided for inplace outputs: - - >>> indices = np.zeros(((np.rank(a),) + a.shape), dtype=np.int32) - >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices) - array([[ 0. , 1. , 1.4142, 2.2361, 3. ], - [ 0. , 0. , 1. , 2. , 2. ], - [ 0. , 1. , 1.4142, 1.4142, 1. ], - [ 0. , 1. , 1.4142, 1. , 0. ], - [ 0. , 1. , 1. , 0. , 0. ]]) - >>> indices - array([[[0, 0, 1, 1, 3], - [1, 1, 1, 1, 3], - [2, 2, 1, 3, 3], - [3, 3, 4, 4, 3], - [4, 4, 4, 4, 4]], - [[0, 0, 1, 1, 4], - [0, 1, 1, 1, 4], - [0, 0, 1, 4, 4], - [0, 0, 3, 3, 4], - [0, 0, 3, 3, 4]]]) - - """ - if (not return_distances) and (not return_indices): - msg = 'at least one of distances/indices must be specified' - raise RuntimeError(msg) - ft_inplace = isinstance(indices, numpy.ndarray) - dt_inplace = isinstance(distances, numpy.ndarray) - # calculate the feature transform - input = numpy.where(input, 1, 0).astype(numpy.int8) - if sampling is not None: - sampling = _ni_support._normalize_sequence(sampling, input.ndim) - sampling = numpy.asarray(sampling, dtype = numpy.float64) - if not sampling.flags.contiguous: - sampling = sampling.copy() - if ft_inplace: - ft = indices - if ft.shape != (input.ndim,) + input.shape: - raise RuntimeError('indices has wrong shape') - if ft.dtype.type != numpy.int32: - raise RuntimeError('indices must be of int32 type') - else: - ft = numpy.zeros((input.ndim,) + input.shape, - dtype = numpy.int32) - _nd_image.euclidean_feature_transform(input, sampling, ft) - # if requested, calculate the distance transform - if return_distances: - dt = ft - numpy.indices(input.shape, dtype = ft.dtype) - dt = dt.astype(numpy.float64) - if sampling is not None: - for ii in range(len(sampling)): - dt[ii, ...] *= sampling[ii] - numpy.multiply(dt, dt, dt) - if dt_inplace: - dt = numpy.add.reduce(dt, axis = 0) - if distances.shape != dt.shape: - raise RuntimeError('indices has wrong shape') - if distances.dtype.type != numpy.float64: - raise RuntimeError('indices must be of float64 type') - numpy.sqrt(dt, distances) - del dt - else: - dt = numpy.add.reduce(dt, axis = 0) - dt = numpy.sqrt(dt) - # construct and return the result - result = [] - if return_distances and not dt_inplace: - result.append(dt) - if return_indices and not ft_inplace: - result.append(ft) - if len(result) == 2: - return tuple(result) - elif len(result) == 1: - return result[0] - else: - return None diff --git a/scipy-0.10.1/scipy/ndimage/setup.py b/scipy-0.10.1/scipy/ndimage/setup.py deleted file mode 100644 index 115769fa10..0000000000 --- a/scipy-0.10.1/scipy/ndimage/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration -from numpy import get_include - -def configuration(parent_package='', top_path=None): - - config = Configuration('ndimage', parent_package, top_path) - - config.add_extension("_nd_image", - sources=["src/nd_image.c","src/ni_filters.c", - "src/ni_fourier.c","src/ni_interpolation.c", - "src/ni_measure.c", - "src/ni_morphology.c","src/ni_support.c"], - include_dirs=['src']+[get_include()], - ) - - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/ndimage/setupscons.py b/scipy-0.10.1/scipy/ndimage/setupscons.py deleted file mode 100644 index d870ce818e..0000000000 --- a/scipy-0.10.1/scipy/ndimage/setupscons.py +++ /dev/null @@ -1,15 +0,0 @@ -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration -from numpy import get_include - -def configuration(parent_package='', top_path=None): - - config = Configuration('ndimage', parent_package, top_path) - - config.add_sconscript("SConstruct") - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/ndimage/src/nd_image.c b/scipy-0.10.1/scipy/ndimage/src/nd_image.c deleted file mode 100644 index 5956ff1fa7..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/nd_image.c +++ /dev/null @@ -1,1057 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#define ND_IMPORT_ARRAY -#include "nd_image.h" -#undef ND_IMPORT_ARRAY -#include "ni_support.h" -#include "ni_filters.h" -#include "ni_fourier.h" -#include "ni_morphology.h" -#include "ni_interpolation.h" -#include "ni_measure.h" - -#include "numpy/npy_3kcompat.h" - -typedef struct { - PyObject *function; - PyObject *extra_arguments; - PyObject *extra_keywords; -} NI_PythonCallbackData; - -/* Convert an input array of any type, not necessarily contiguous */ -static int -NI_ObjectToInputArray(PyObject *object, PyArrayObject **array) -{ - *array = NA_InputArray(object, tAny, NPY_ALIGNED|NPY_NOTSWAPPED); - return *array ? 1 : 0; -} - -/* Convert an input array of any type, not necessarily contiguous */ -static int -NI_ObjectToOptionalInputArray(PyObject *object, PyArrayObject **array) -{ - if (object == Py_None) { - *array = NULL; - return 1; - } else { - *array = NA_InputArray(object, tAny, NPY_ALIGNED|NPY_NOTSWAPPED); - return *array ? 1 : 0; - } -} - -/* Convert an output array of any type, not necessarily contiguous */ -static int -NI_ObjectToOutputArray(PyObject *object, PyArrayObject **array) -{ - *array = NA_OutputArray(object, tAny, NPY_ALIGNED|NPY_NOTSWAPPED); - return *array ? 1 : 0; -} - -/* Convert an output array of any type, not necessarily contiguous */ -static int -NI_ObjectToOptionalOutputArray(PyObject *object, PyArrayObject **array) -{ - if (object == Py_None) { - *array = NULL; - return 1; - } else { - *array = NA_OutputArray(object, tAny, NPY_ALIGNED|NPY_NOTSWAPPED); - return *array ? 1 : 0; - } -} - -/* Convert an input/output array of any type, not necessarily contiguous */ -static int -NI_ObjectToIoArray(PyObject *object, PyArrayObject **array) -{ - *array = NA_IoArray(object, tAny, NPY_ALIGNED|NPY_NOTSWAPPED); - return *array ? 1 : 0; -} - -/* Convert an Long sequence */ -static npy_intp -NI_ObjectToLongSequenceAndLength(PyObject *object, npy_intp **sequence) -{ - npy_intp *pa, ii; - PyArrayObject *array = NA_InputArray(object, PyArray_INTP, NPY_CARRAY); - npy_intp length = PyArray_SIZE(array); - - *sequence = (npy_intp*)malloc(length * sizeof(npy_intp)); - if (!*sequence) { - PyErr_NoMemory(); - Py_XDECREF(array); - return -1; - } - pa = (npy_intp*)PyArray_DATA(array); - for(ii = 0; ii < length; ii++) - (*sequence)[ii] = pa[ii]; - Py_XDECREF(array); - return length; -} - -static int -NI_ObjectToLongSequence(PyObject *object, npy_intp **sequence) -{ - return NI_ObjectToLongSequenceAndLength(object, sequence) >= 0; -} - -/*********************************************************************/ -/* wrapper functions: */ -/*********************************************************************/ - -static PyObject *Py_Correlate1D(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *weights = NULL; - int axis, mode; - double cval; -#if PY_VERSION_HEX < 0x02050000 - long origin; -#define FMT "l" -#else - npy_intp origin; -#define FMT "n" -#endif - - if (!PyArg_ParseTuple(args, "O&O&iO&id" FMT, - NI_ObjectToInputArray, &input, - NI_ObjectToInputArray, &weights, &axis, - NI_ObjectToOutputArray, &output, &mode, &cval, - &origin)) - goto exit; - -#undef FMT - - if (!NI_Correlate1D(input, weights, axis, output, - (NI_ExtendMode)mode, cval, origin)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(weights); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_Correlate(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *weights = NULL; - npy_intp *origin = NULL; - int mode; - double cval; - - if (!PyArg_ParseTuple(args, "O&O&O&idO&", NI_ObjectToInputArray, &input, - NI_ObjectToInputArray, &weights, - NI_ObjectToOutputArray, &output, - &mode, &cval, - NI_ObjectToLongSequence, &origin)) - goto exit; - if (!NI_Correlate(input, weights, output, (NI_ExtendMode)mode, cval, - origin)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(weights); - Py_XDECREF(output); - if (origin) - free(origin); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_UniformFilter1D(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL; - int axis, mode; -#if PY_VERSION_HEX < 0x02050000 - long filter_size, origin; -#define FMT "l" -#else - npy_intp filter_size, origin; -#define FMT "n" -#endif - double cval; - - if (!PyArg_ParseTuple(args, "O&" FMT "iO&id" FMT, - NI_ObjectToInputArray, &input, - &filter_size, &axis, - NI_ObjectToOutputArray, &output, - &mode, &cval, &origin)) - goto exit; - if (!NI_UniformFilter1D(input, filter_size, axis, output, - (NI_ExtendMode)mode, cval, origin)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_MinOrMaxFilter1D(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL; - int axis, mode, minimum; -#if PY_VERSION_HEX < 0x02050000 - long filter_size, origin; -#define FMT "l" -#else - npy_intp filter_size, origin; -#define FMT "n" -#endif - double cval; - - if (!PyArg_ParseTuple(args, "O&" FMT "iO&id" FMT "i", - NI_ObjectToInputArray, &input, - &filter_size, &axis, - NI_ObjectToOutputArray, &output, - &mode, &cval, &origin, &minimum)) - goto exit; -#undef FMT - if (!NI_MinOrMaxFilter1D(input, filter_size, axis, output, - (NI_ExtendMode)mode, cval, origin, minimum)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_MinOrMaxFilter(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *footprint = NULL; - PyArrayObject *structure = NULL; - npy_intp *origin = NULL; - int mode, minimum; - double cval; - - if (!PyArg_ParseTuple(args, "O&O&O&O&idO&i", - NI_ObjectToInputArray, &input, - NI_ObjectToInputArray, &footprint, - NI_ObjectToOptionalInputArray, &structure, - NI_ObjectToOutputArray, &output, - &mode, &cval, - NI_ObjectToLongSequence, &origin, - &minimum)) - goto exit; - if (!NI_MinOrMaxFilter(input, footprint, structure, output, - (NI_ExtendMode)mode, cval, origin, minimum)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(footprint); - Py_XDECREF(structure); - Py_XDECREF(output); - if (origin) - free(origin); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_RankFilter(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *footprint = NULL; - npy_intp *origin = NULL; - int mode, rank; - double cval; - - if (!PyArg_ParseTuple(args, "O&iO&O&idO&", - NI_ObjectToInputArray, &input, &rank, - NI_ObjectToInputArray, &footprint, - NI_ObjectToOutputArray, &output, - &mode, &cval, - NI_ObjectToLongSequence, &origin)) - goto exit; - if (!NI_RankFilter(input, rank, footprint, output, (NI_ExtendMode)mode, - cval, origin)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(footprint); - Py_XDECREF(output); - if (origin) - free(origin); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static int Py_Filter1DFunc(double *iline, npy_intp ilen, - double *oline, npy_intp olen, void *data) -{ - PyArrayObject *py_ibuffer = NULL, *py_obuffer = NULL; - PyObject *rv = NULL, *args = NULL, *tmp = NULL; - npy_intp ii; - double *po = NULL; - NI_PythonCallbackData *cbdata = (NI_PythonCallbackData*)data; - - py_ibuffer = NA_NewArray(iline, PyArray_DOUBLE, 1, &ilen); - py_obuffer = NA_NewArray(NULL, PyArray_DOUBLE, 1, &olen); - if (!py_ibuffer || !py_obuffer) - goto exit; - tmp = Py_BuildValue("(OO)", py_ibuffer, py_obuffer); - if (!tmp) - goto exit; - args = PySequence_Concat(tmp, cbdata->extra_arguments); - if (!args) - goto exit; - rv = PyObject_Call(cbdata->function, args, cbdata->extra_keywords); - if (!rv) - goto exit; - po = (double*)PyArray_DATA(py_obuffer); - for(ii = 0; ii < olen; ii++) - oline[ii] = po[ii]; -exit: - Py_XDECREF(py_ibuffer); - Py_XDECREF(py_obuffer); - Py_XDECREF(rv); - Py_XDECREF(args); - Py_XDECREF(tmp); - return PyErr_Occurred() ? 0 : 1; -} - -static PyObject *Py_GenericFilter1D(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL; - PyObject *fnc = NULL, *extra_arguments = NULL, *extra_keywords = NULL; - void *func = Py_Filter1DFunc, *data = NULL; - NI_PythonCallbackData cbdata; - int axis, mode; -#if PY_VERSION_HEX < 0x02050000 - long origin, filter_size; -#define FMT "l" -#else - npy_intp origin, filter_size; -#define FMT "n" -#endif - double cval; - - if (!PyArg_ParseTuple(args, "O&O" FMT "iO&id" FMT "OO", - NI_ObjectToInputArray, &input, - &fnc, &filter_size, &axis, - NI_ObjectToOutputArray, &output, - &mode, &cval, &origin, - &extra_arguments, &extra_keywords)) - goto exit; -#undef FMT - - if (!PyTuple_Check(extra_arguments)) { - PyErr_SetString(PyExc_RuntimeError, "extra_arguments must be a tuple"); - goto exit; - } - if (!PyDict_Check(extra_keywords)) { - PyErr_SetString(PyExc_RuntimeError, - "extra_keywords must be a dictionary"); - goto exit; - } - if (NpyCapsule_Check(fnc)) { - func = NpyCapsule_AsVoidPtr(fnc); - data = NpyCapsule_GetDesc(fnc); - } else if (PyCallable_Check(fnc)) { - cbdata.function = fnc; - cbdata.extra_arguments = extra_arguments; - cbdata.extra_keywords = extra_keywords; - data = (void*)&cbdata; - } else { - PyErr_SetString(PyExc_RuntimeError, - "function parameter is not callable"); - goto exit; - } - if (!NI_GenericFilter1D(input, func, data, filter_size, axis, output, - (NI_ExtendMode)mode, cval, origin)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static int Py_FilterFunc(double *buffer, npy_intp filter_size, - double *output, void *data) -{ - PyArrayObject *py_buffer = NULL; - PyObject *rv = NULL, *args = NULL, *tmp = NULL; - NI_PythonCallbackData *cbdata = (NI_PythonCallbackData*)data; - - py_buffer = NA_NewArray(buffer, PyArray_DOUBLE, 1, &filter_size); - if (!py_buffer) - goto exit; - tmp = Py_BuildValue("(O)", py_buffer); - if (!tmp) - goto exit; - args = PySequence_Concat(tmp, cbdata->extra_arguments); - if (!args) - goto exit; - rv = PyObject_Call(cbdata->function, args, cbdata->extra_keywords); - if (!rv) - goto exit; - *output = PyFloat_AsDouble(rv); -exit: - Py_XDECREF(py_buffer); - Py_XDECREF(rv); - Py_XDECREF(args); - Py_XDECREF(tmp); - return PyErr_Occurred() ? 0 : 1; -} - -static PyObject *Py_GenericFilter(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *footprint = NULL; - PyObject *fnc = NULL, *extra_arguments = NULL, *extra_keywords = NULL; - void *func = Py_FilterFunc, *data = NULL; - NI_PythonCallbackData cbdata; - int mode; - npy_intp *origin = NULL; - double cval; - - if (!PyArg_ParseTuple(args, "O&OO&O&idO&OO", - NI_ObjectToInputArray, &input, - &fnc, - NI_ObjectToInputArray, &footprint, - NI_ObjectToOutputArray, &output, - &mode, &cval, - NI_ObjectToLongSequence, &origin, - &extra_arguments, &extra_keywords)) - goto exit; - if (!PyTuple_Check(extra_arguments)) { - PyErr_SetString(PyExc_RuntimeError, "extra_arguments must be a tuple"); - goto exit; - } - if (!PyDict_Check(extra_keywords)) { - PyErr_SetString(PyExc_RuntimeError, - "extra_keywords must be a dictionary"); - goto exit; - } - if (NpyCapsule_Check(fnc)) { - func = NpyCapsule_AsVoidPtr(fnc); - data = NpyCapsule_GetDesc(fnc); - } else if (PyCallable_Check(fnc)) { - cbdata.function = fnc; - cbdata.extra_arguments = extra_arguments; - cbdata.extra_keywords = extra_keywords; - data = (void*)&cbdata; - } else { - PyErr_SetString(PyExc_RuntimeError, - "function parameter is not callable"); - goto exit; - } - if (!NI_GenericFilter(input, func, data, footprint, output, - (NI_ExtendMode)mode, cval, origin)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(output); - Py_XDECREF(footprint); - if (origin) - free(origin); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_FourierFilter(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *parameters = NULL; - int axis, filter_type; -#if PY_VERSION_HEX < 0x02050000 - long n; -#define FMT "l" -#else - npy_intp n; -#define FMT "n" -#endif - - if (!PyArg_ParseTuple(args, "O&O&" FMT "iO&i", - NI_ObjectToInputArray, &input, - NI_ObjectToInputArray, ¶meters, - &n, &axis, - NI_ObjectToOutputArray, &output, - &filter_type)) - goto exit; -#undef FMT - - if (!NI_FourierFilter(input, parameters, n, axis, output, filter_type)) - goto exit; - -exit: - Py_XDECREF(input); - Py_XDECREF(parameters); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_FourierShift(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *shifts = NULL; - int axis; -#if PY_VERSION_HEX < 0x02050000 - long n; -#define FMT "l" -#else - npy_intp n; -#define FMT "n" -#endif - - if (!PyArg_ParseTuple(args, "O&O&" FMT "iO&", - NI_ObjectToInputArray, &input, - NI_ObjectToInputArray, &shifts, - &n, &axis, - NI_ObjectToOutputArray, &output)) - goto exit; -#undef FMT - - if (!NI_FourierShift(input, shifts, n, axis, output)) - goto exit; - -exit: - Py_XDECREF(input); - Py_XDECREF(shifts); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_SplineFilter1D(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL; - int axis, order; - - if (!PyArg_ParseTuple(args, "O&iiO&", - NI_ObjectToInputArray, &input, - &order, &axis, - NI_ObjectToOutputArray, &output)) - goto exit; - - if (!NI_SplineFilter1D(input, order, axis, output)) - goto exit; - -exit: - Py_XDECREF(input); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static int Py_Map(npy_intp *ocoor, double* icoor, int orank, int irank, - void *data) -{ - PyObject *coors = NULL, *rets = NULL, *args = NULL, *tmp = NULL; - npy_intp ii; - NI_PythonCallbackData *cbdata = (NI_PythonCallbackData*)data; - - coors = PyTuple_New(orank); - if (!coors) - goto exit; - for(ii = 0; ii < orank; ii++) { -#if PY_VERSION_HEX < 0x02060000 - PyTuple_SetItem(coors, ii, PyLong_FromLong(ocoor[ii])); -#else - PyTuple_SetItem(coors, ii, PyLong_FromSsize_t(ocoor[ii])); -#endif - if (PyErr_Occurred()) - goto exit; - } - tmp = Py_BuildValue("(O)", coors); - if (!tmp) - goto exit; - args = PySequence_Concat(tmp, cbdata->extra_arguments); - if (!args) - goto exit; - rets = PyObject_Call(cbdata->function, args, cbdata->extra_keywords); - if (!rets) - goto exit; - for(ii = 0; ii < irank; ii++) { - icoor[ii] = PyFloat_AsDouble(PyTuple_GetItem(rets, ii)); - if (PyErr_Occurred()) - goto exit; - } -exit: - Py_XDECREF(coors); - Py_XDECREF(tmp); - Py_XDECREF(rets); - Py_XDECREF(args); - return PyErr_Occurred() ? 0 : 1; -} - - -static PyObject *Py_GeometricTransform(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL; - PyArrayObject *coordinates = NULL, *matrix = NULL, *shift = NULL; - PyObject *fnc = NULL, *extra_arguments = NULL, *extra_keywords = NULL; - int mode, order; - double cval; - void *func = NULL, *data = NULL; - NI_PythonCallbackData cbdata; - - if (!PyArg_ParseTuple(args, "O&OO&O&O&O&iidOO", - NI_ObjectToInputArray, &input, - &fnc, - NI_ObjectToOptionalInputArray, &coordinates, - NI_ObjectToOptionalInputArray, &matrix, - NI_ObjectToOptionalInputArray, &shift, - NI_ObjectToOutputArray, &output, - &order, &mode, &cval, - &extra_arguments, &extra_keywords)) - goto exit; - - if (fnc != Py_None) { - if (!PyTuple_Check(extra_arguments)) { - PyErr_SetString(PyExc_RuntimeError, - "extra_arguments must be a tuple"); - goto exit; - } - if (!PyDict_Check(extra_keywords)) { - PyErr_SetString(PyExc_RuntimeError, - "extra_keywords must be a dictionary"); - goto exit; - } - if (NpyCapsule_Check(fnc)) { - func = NpyCapsule_AsVoidPtr(fnc); - data = NpyCapsule_GetDesc(fnc); - } else if (PyCallable_Check(fnc)) { - func = Py_Map; - cbdata.function = fnc; - cbdata.extra_arguments = extra_arguments; - cbdata.extra_keywords = extra_keywords; - data = (void*)&cbdata; - } else { - PyErr_SetString(PyExc_RuntimeError, - "function parameter is not callable"); - goto exit; - } - } - - if (!NI_GeometricTransform(input, func, data, matrix, shift, coordinates, - output, order, (NI_ExtendMode)mode, cval)) - goto exit; - -exit: - Py_XDECREF(input); - Py_XDECREF(output); - Py_XDECREF(coordinates); - Py_XDECREF(matrix); - Py_XDECREF(shift); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_ZoomShift(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *shift = NULL; - PyArrayObject *zoom = NULL; - int mode, order; - double cval; - - if (!PyArg_ParseTuple(args, "O&O&O&O&iid", - NI_ObjectToInputArray, &input, - NI_ObjectToOptionalInputArray, &zoom, - NI_ObjectToOptionalInputArray, &shift, - NI_ObjectToOutputArray, &output, - &order, &mode, &cval)) - goto exit; - - if (!NI_ZoomShift(input, zoom, shift, output, order, (NI_ExtendMode)mode, - cval)) - goto exit; - -exit: - Py_XDECREF(input); - Py_XDECREF(shift); - Py_XDECREF(zoom); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_Label(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *strct = NULL; - npy_intp max_label; - - if (!PyArg_ParseTuple(args, "O&O&O&", - NI_ObjectToInputArray, &input, - NI_ObjectToInputArray, &strct, - NI_ObjectToOutputArray, &output)) - goto exit; - - if (!NI_Label(input, strct, &max_label, output)) - goto exit; - -exit: - Py_XDECREF(input); - Py_XDECREF(strct); - Py_XDECREF(output); -#if PY_VERSION_HEX < 0x02050000 - return PyErr_Occurred() ? NULL : Py_BuildValue("l", (long)max_label); -#else - return PyErr_Occurred() ? NULL : Py_BuildValue("n", (npy_intp)max_label); -#endif -} - -static PyObject *Py_FindObjects(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL; - PyObject *result = NULL, *tuple = NULL, *start = NULL, *end = NULL; - PyObject *slc = NULL; - int jj; -#if PY_VERSION_HEX < 0x02050000 - long max_label; -#define FMT "l" -#else - npy_intp max_label; -#define FMT "n" -#endif - npy_intp ii, *regions = NULL; - - if (!PyArg_ParseTuple(args, "O&" FMT, - NI_ObjectToInputArray, &input, &max_label)) - goto exit; -#undef FMT - - if (max_label < 0) - max_label = 0; - if (max_label > 0) { - if (input->nd > 0) { - regions = (npy_intp*)malloc(2 * max_label * input->nd * - sizeof(npy_intp)); - } else { - regions = (npy_intp*)malloc(max_label * sizeof(npy_intp)); - } - if (!regions) { - PyErr_NoMemory(); - goto exit; - } - } - - if (!NI_FindObjects(input, max_label, regions)) - goto exit; - - result = PyList_New(max_label); - if (!result) { - PyErr_NoMemory(); - goto exit; - } - - for(ii = 0; ii < max_label; ii++) { - npy_intp idx = input->nd > 0 ? 2 * input->nd * ii : ii; - if (regions[idx] >= 0) { - PyObject *tuple = PyTuple_New(input->nd); - if (!tuple) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < input->nd; jj++) { -#if PY_VERSION_HEX < 0x02060000 - start = PyLong_FromLong(regions[idx + jj]); - end = PyLong_FromLong(regions[idx + jj + input->nd]); -#else - start = PyLong_FromSsize_t(regions[idx + jj]); - end = PyLong_FromSsize_t(regions[idx + jj + input->nd]); -#endif - if (!start || !end) { - PyErr_NoMemory(); - goto exit; - } - slc = PySlice_New(start, end, NULL); - if (!slc) { - PyErr_NoMemory(); - goto exit; - } - Py_XDECREF(start); - Py_XDECREF(end); - start = end = NULL; - PyTuple_SetItem(tuple, jj, slc); - slc = NULL; - } - PyList_SetItem(result, ii, tuple); - tuple = NULL; - } else { - Py_INCREF(Py_None); - PyList_SetItem(result, ii, Py_None); - } - } - - Py_INCREF(result); - - exit: - Py_XDECREF(input); - Py_XDECREF(result); - Py_XDECREF(tuple); - Py_XDECREF(start); - Py_XDECREF(end); - Py_XDECREF(slc); - if (regions) - free(regions); - if (PyErr_Occurred()) { - Py_XDECREF(result); - return NULL; - } else { - return result; - } -} - -static PyObject *Py_WatershedIFT(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *markers = NULL; - PyArrayObject *strct = NULL; - - if (!PyArg_ParseTuple(args, "O&O&O&O&", NI_ObjectToInputArray, &input, - NI_ObjectToInputArray, &markers, NI_ObjectToInputArray, - &strct, NI_ObjectToOutputArray, &output)) - goto exit; - - if (!NI_WatershedIFT(input, markers, strct, output)) - goto exit; - -exit: - Py_XDECREF(input); - Py_XDECREF(markers); - Py_XDECREF(strct); - Py_XDECREF(output); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_DistanceTransformBruteForce(PyObject *obj, - PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *features = NULL; - PyArrayObject *sampling = NULL; - int metric; - - if (!PyArg_ParseTuple(args, "O&iO&O&O&", - NI_ObjectToInputArray, &input, - &metric, - NI_ObjectToOptionalInputArray, &sampling, - NI_ObjectToOptionalOutputArray, &output, - NI_ObjectToOptionalOutputArray, &features)) - goto exit; - if (!NI_DistanceTransformBruteForce(input, metric, sampling, - output, features)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(sampling); - Py_XDECREF(output); - Py_XDECREF(features); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_DistanceTransformOnePass(PyObject *obj, PyObject *args) -{ - PyArrayObject *strct = NULL, *distances = NULL, *features = NULL; - - if (!PyArg_ParseTuple(args, "O&O&O&", - NI_ObjectToInputArray, &strct, - NI_ObjectToIoArray, &distances, - NI_ObjectToOptionalOutputArray, &features)) - goto exit; - if (!NI_DistanceTransformOnePass(strct, distances, features)) - goto exit; -exit: - Py_XDECREF(strct); - Py_XDECREF(distances); - Py_XDECREF(features); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyObject *Py_EuclideanFeatureTransform(PyObject *obj, - PyObject *args) -{ - PyArrayObject *input = NULL, *features = NULL, *sampling = NULL; - - if (!PyArg_ParseTuple(args, "O&O&O&", - NI_ObjectToInputArray, &input, - NI_ObjectToOptionalInputArray, &sampling, - NI_ObjectToOutputArray, &features)) - goto exit; - if (!NI_EuclideanFeatureTransform(input, sampling, features)) - goto exit; -exit: - Py_XDECREF(input); - Py_XDECREF(sampling); - Py_XDECREF(features); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -#ifdef NPY_PY3K -static void _FreeCoordinateList(PyObject *obj) -{ - NI_FreeCoordinateList((NI_CoordinateList*)PyCapsule_GetPointer(obj, NULL)); -} -#else -static void _FreeCoordinateList(void* ptr) -{ - NI_FreeCoordinateList((NI_CoordinateList*)ptr); -} -#endif - -static PyObject *Py_BinaryErosion(PyObject *obj, PyObject *args) -{ - PyArrayObject *input = NULL, *output = NULL, *strct = NULL; - PyArrayObject *mask = NULL; - PyObject *cobj = NULL; - int border_value, invert, center_is_true; - int changed = 0, return_coordinates; - NI_CoordinateList *coordinate_list = NULL; - npy_intp *origins = NULL; - - if (!PyArg_ParseTuple(args, "O&O&O&O&iO&iii", - NI_ObjectToInputArray, &input, - NI_ObjectToInputArray, &strct, - NI_ObjectToOptionalInputArray, &mask, - NI_ObjectToOutputArray, &output, - &border_value, - NI_ObjectToLongSequence, &origins, - &invert, ¢er_is_true, &return_coordinates)) - goto exit; - if (!NI_BinaryErosion(input, strct, mask, output, border_value, - origins, invert, center_is_true, &changed, - return_coordinates ? &coordinate_list : NULL)) - goto exit; - if (return_coordinates) { - cobj = NpyCapsule_FromVoidPtr(coordinate_list, _FreeCoordinateList); - } -exit: - Py_XDECREF(input); - Py_XDECREF(strct); - Py_XDECREF(mask); - Py_XDECREF(output); - if (origins) - free(origins); - if (PyErr_Occurred()) { - Py_XDECREF(cobj); - return NULL; - } else { - if (return_coordinates) { - return Py_BuildValue("iN", changed, cobj); - } else { - return Py_BuildValue("i", changed); - } - } -} - -static PyObject *Py_BinaryErosion2(PyObject *obj, PyObject *args) -{ - PyArrayObject *array = NULL, *strct = NULL, *mask = NULL; - PyObject *cobj = NULL; - int invert, niter; - npy_intp *origins = NULL; - - if (!PyArg_ParseTuple(args, "O&O&O&iO&iO", - NI_ObjectToIoArray, &array, - NI_ObjectToInputArray, &strct, - NI_ObjectToOptionalInputArray, - &mask, &niter, - NI_ObjectToLongSequence, &origins, - &invert, &cobj)) - goto exit; - - if (NpyCapsule_Check(cobj)) { - NI_CoordinateList *cobj_data = NpyCapsule_AsVoidPtr(cobj); - if (!NI_BinaryErosion2(array, strct, mask, niter, origins, invert, - &cobj_data)) - goto exit; - } else { - PyErr_SetString(PyExc_RuntimeError, "cannot convert CObject"); - goto exit; - } -exit: - Py_XDECREF(array); - Py_XDECREF(strct); - Py_XDECREF(mask); - if (origins) free(origins); - return PyErr_Occurred() ? NULL : Py_BuildValue(""); -} - -static PyMethodDef methods[] = { - {"correlate1d", (PyCFunction)Py_Correlate1D, - METH_VARARGS, NULL}, - {"correlate", (PyCFunction)Py_Correlate, - METH_VARARGS, NULL}, - {"uniform_filter1d", (PyCFunction)Py_UniformFilter1D, - METH_VARARGS, NULL}, - {"min_or_max_filter1d", (PyCFunction)Py_MinOrMaxFilter1D, - METH_VARARGS, NULL}, - {"min_or_max_filter", (PyCFunction)Py_MinOrMaxFilter, - METH_VARARGS, NULL}, - {"rank_filter", (PyCFunction)Py_RankFilter, - METH_VARARGS, NULL}, - {"generic_filter", (PyCFunction)Py_GenericFilter, - METH_VARARGS, NULL}, - {"generic_filter1d", (PyCFunction)Py_GenericFilter1D, - METH_VARARGS, NULL}, - {"fourier_filter", (PyCFunction)Py_FourierFilter, - METH_VARARGS, NULL}, - {"fourier_shift", (PyCFunction)Py_FourierShift, - METH_VARARGS, NULL}, - {"spline_filter1d", (PyCFunction)Py_SplineFilter1D, - METH_VARARGS, NULL}, - {"geometric_transform", (PyCFunction)Py_GeometricTransform, - METH_VARARGS, NULL}, - {"zoom_shift", (PyCFunction)Py_ZoomShift, - METH_VARARGS, NULL}, - {"label", (PyCFunction)Py_Label, - METH_VARARGS, NULL}, - {"find_objects", (PyCFunction)Py_FindObjects, - METH_VARARGS, NULL}, - {"watershed_ift", (PyCFunction)Py_WatershedIFT, - METH_VARARGS, NULL}, - {"distance_transform_bf", (PyCFunction)Py_DistanceTransformBruteForce, - METH_VARARGS, NULL}, - {"distance_transform_op", (PyCFunction)Py_DistanceTransformOnePass, - METH_VARARGS, NULL}, - {"euclidean_feature_transform", - (PyCFunction)Py_EuclideanFeatureTransform, - METH_VARARGS, NULL}, - {"binary_erosion", (PyCFunction)Py_BinaryErosion, - METH_VARARGS, NULL}, - {"binary_erosion2", (PyCFunction)Py_BinaryErosion2, - METH_VARARGS, NULL}, - {NULL, NULL, 0, NULL} -}; - -#ifdef NPY_PY3K -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_nd_image", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__nd_image(void) -{ - PyObject *m, *s, *d; - - m = PyModule_Create(&moduledef); - import_array(); - - return m; -} -#else -PyMODINIT_FUNC init_nd_image(void) -{ - Py_InitModule("_nd_image", methods); - import_array(); -} -#endif diff --git a/scipy-0.10.1/scipy/ndimage/src/nd_image.h b/scipy-0.10.1/scipy/ndimage/src/nd_image.h deleted file mode 100644 index 8e7d542173..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/nd_image.h +++ /dev/null @@ -1,276 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef ND_IMAGE_H -#define ND_IMAGE_H - -#include "Python.h" - -#ifndef ND_IMPORT_ARRAY -#define NO_IMPORT_ARRAY -#endif - -#include -#undef NO_IMPORT_ARRAY - -#include "numpy/npy_3kcompat.h" - -/* Eventually get rid of everything below this line */ - -typedef enum -{ - tAny=-1, - tBool=PyArray_BOOL, - tInt8=PyArray_INT8, - tUInt8=PyArray_UINT8, - tInt16=PyArray_INT16, - tUInt16=PyArray_UINT16, - tInt32=PyArray_INT32, - tUInt32=PyArray_UINT32, - tInt64=PyArray_INT64, - tUInt64=PyArray_UINT64, - tFloat32=PyArray_FLOAT32, - tFloat64=PyArray_FLOAT64, - tComplex64=PyArray_COMPLEX64, - tComplex128=PyArray_COMPLEX128, - tObject=PyArray_OBJECT, /* placeholder... does nothing */ - tMaxType=PyArray_NTYPES, - tDefault=PyArray_FLOAT64 -} NumarrayType; - -#define NI_MAXDIM NPY_MAXDIMS - -#define MAXDIM NPY_MAXDIMS - -#define HAS_UINT64 1 - - -#ifdef ND_IMPORT_ARRAY - -/* Numarray Helper Functions */ - -static PyArrayObject* -NA_InputArray(PyObject *a, NumarrayType t, int requires) -{ - PyArray_Descr *descr; - if (t == tAny) descr = NULL; - else descr = PyArray_DescrFromType(t); - return (PyArrayObject *) \ - PyArray_CheckFromAny(a, descr, 0, 0, requires, NULL); -} - -/* satisfies ensures that 'a' meets a set of requirements and matches -the specified type. -*/ -static int -satisfies(PyArrayObject *a, int requirements, NumarrayType t) -{ - int type_ok = (a->descr->type_num == t) || (t == tAny); - - if (PyArray_ISCARRAY(a)) - return type_ok; - if (PyArray_ISBYTESWAPPED(a) && (requirements & NPY_NOTSWAPPED)) - return 0; - if (!PyArray_ISALIGNED(a) && (requirements & NPY_ALIGNED)) - return 0; - if (!PyArray_ISCONTIGUOUS(a) && (requirements & NPY_CONTIGUOUS)) - return 0; - if (!PyArray_ISWRITEABLE(a) && (requirements & NPY_WRITEABLE)) - return 0; - if (requirements & NPY_ENSURECOPY) - return 0; - return type_ok; -} - -static PyArrayObject * -NA_OutputArray(PyObject *a, NumarrayType t, int requires) -{ - PyArray_Descr *dtype; - PyArrayObject *ret; - - if (!PyArray_Check(a) || !PyArray_ISWRITEABLE(a)) { - PyErr_Format(PyExc_TypeError, - "NA_OutputArray: only writeable arrays work for output."); - return NULL; - } - - if (satisfies((PyArrayObject *)a, requires, t)) { - Py_INCREF(a); - return (PyArrayObject *)a; - } - if (t == tAny) { - dtype = PyArray_DESCR(a); - Py_INCREF(dtype); - } - else { - dtype = PyArray_DescrFromType(t); - } - ret = (PyArrayObject *)PyArray_Empty(PyArray_NDIM(a), PyArray_DIMS(a), - dtype, 0); - ret->flags |= NPY_UPDATEIFCOPY; - ret->base = a; - PyArray_FLAGS(a) &= ~NPY_WRITEABLE; - Py_INCREF(a); - return ret; -} - -/* NA_IoArray is a combination of NA_InputArray and NA_OutputArray. - -Unlike NA_OutputArray, if a temporary is required it is initialized to a copy -of the input array. - -Unlike NA_InputArray, deallocating any resulting temporary array results in a -copy from the temporary back to the original. -*/ -static PyArrayObject * -NA_IoArray(PyObject *a, NumarrayType t, int requires) -{ - PyArrayObject *shadow = NA_InputArray(a, t, requires | NPY_UPDATEIFCOPY ); - - if (!shadow) return NULL; - - /* Guard against non-writable, but otherwise satisfying requires. - In this case, shadow == a. - */ - if (!PyArray_ISWRITEABLE(shadow)) { - PyErr_Format(PyExc_TypeError, - "NA_IoArray: I/O array must be writable array"); - PyArray_XDECREF_ERR(shadow); - return NULL; - } - - return shadow; -} - -#define NUM_LITTLE_ENDIAN 0 -#define NUM_BIG_ENDIAN 1 - -static int -NA_ByteOrder(void) -{ - unsigned int byteorder_test; - byteorder_test = 1; - if (*((char *) &byteorder_test)) - return NUM_LITTLE_ENDIAN; - else - return NUM_BIG_ENDIAN; -} - -/* ignores bytestride */ -static PyArrayObject * -NA_NewAllFromBuffer(int ndim, npy_intp *shape, NumarrayType type, - PyObject *bufferObject, npy_intp byteoffset, - npy_intp bytestride, int byteorder, int aligned, - int writeable) -{ - PyArrayObject *self = NULL; - PyArray_Descr *dtype; - - if (type == tAny) - type = tDefault; - - dtype = PyArray_DescrFromType(type); - if (dtype == NULL) return NULL; - - if (byteorder != NA_ByteOrder()) { - PyArray_Descr *temp; - temp = PyArray_DescrNewByteorder(dtype, PyArray_SWAP); - Py_DECREF(dtype); - if (temp == NULL) return NULL; - dtype = temp; - } - - if (bufferObject == Py_None || bufferObject == NULL) { - self = (PyArrayObject *) \ - PyArray_NewFromDescr(&PyArray_Type, dtype, - ndim, shape, NULL, NULL, - 0, NULL); - } - else { - npy_intp size = 1; - int i; - PyArrayObject *newself; - PyArray_Dims newdims; - for(i=0; idata, buffer, PyArray_NBYTES(result)); - } else { - memset(result->data, 0, PyArray_NBYTES(result)); - } - } - } - return result; -} - -/* Create a new numarray which is initially a C_array, or which -references a C_array: aligned, !byteswapped, contiguous, ... -Call with buffer==NULL to allocate storage. -*/ -static PyArrayObject * -NA_NewArray(void *buffer, NumarrayType type, int ndim, npy_intp *shape) -{ - return (PyArrayObject *) NA_NewAll(ndim, shape, type, buffer, 0, 0, - NA_ByteOrder(), 1, 1); -} - -#endif /* ND_IMPORT_ARRAY */ - -#endif /* ND_IMAGE_H */ diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_filters.c b/scipy-0.10.1/scipy/ndimage/src/ni_filters.c deleted file mode 100644 index 8dd02aa7f5..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_filters.c +++ /dev/null @@ -1,889 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "ni_support.h" -#include "ni_filters.h" -#include -#include - -#define BUFFER_SIZE 256000 - -int NI_Correlate1D(PyArrayObject *input, PyArrayObject *weights, - int axis, PyArrayObject *output, NI_ExtendMode mode, - double cval, npy_intp origin) -{ - int symmetric = 0, more; - npy_intp ii, jj, ll, lines, length, size1, size2, filter_size; - double *ibuffer = NULL, *obuffer = NULL; - Float64 *fw; - NI_LineBuffer iline_buffer, oline_buffer; - - /* test for symmetry or anti-symmetry: */ - filter_size = weights->dimensions[0]; - size1 = filter_size / 2; - size2 = filter_size - size1 - 1; - fw = (void *)PyArray_DATA(weights); - if (filter_size & 0x1) { - symmetric = 1; - for(ii = 1; ii <= filter_size / 2; ii++) { - if (fabs(fw[ii + size1] - fw[size1 - ii]) > DBL_EPSILON) { - symmetric = 0; - break; - } - } - if (symmetric == 0) { - symmetric = -1; - for(ii = 1; ii <= filter_size / 2; ii++) { - if (fabs(fw[size1 + ii] + fw[size1 - ii]) > DBL_EPSILON) { - symmetric = 0; - break; - } - } - } - } - /* allocate and initialize the line buffers: */ - lines = -1; - if (!NI_AllocateLineBuffer(input, axis, size1 + origin, size2 - origin, - &lines, BUFFER_SIZE, &ibuffer)) - goto exit; - if (!NI_AllocateLineBuffer(output, axis, 0, 0, &lines, BUFFER_SIZE, - &obuffer)) - goto exit; - if (!NI_InitLineBuffer(input, axis, size1 + origin, size2 - origin, - lines, ibuffer, mode, cval, &iline_buffer)) - goto exit; - if (!NI_InitLineBuffer(output, axis, 0, 0, lines, obuffer, mode, 0.0, - &oline_buffer)) - goto exit; - length = input->nd > 0 ? input->dimensions[axis] : 1; - fw += size1; - /* iterate over all the array lines: */ - do { - /* copy lines from array to buffer: */ - if (!NI_ArrayToLineBuffer(&iline_buffer, &lines, &more)) - goto exit; - /* iterate over the lines in the buffers: */ - for(ii = 0; ii < lines; ii++) { - /* get lines: */ - double *iline = NI_GET_LINE(iline_buffer, ii) + size1; - double *oline = NI_GET_LINE(oline_buffer, ii); - /* the correlation calculation: */ - if (symmetric > 0) { - for(ll = 0; ll < length; ll++) { - oline[ll] = iline[0] * fw[0]; - for(jj = -size1 ; jj < 0; jj++) - oline[ll] += (iline[jj] + iline[-jj]) * fw[jj]; - ++iline; - } - } else if (symmetric < 0) { - for(ll = 0; ll < length; ll++) { - oline[ll] = iline[0] * fw[0]; - for(jj = -size1 ; jj < 0; jj++) - oline[ll] += (iline[jj] - iline[-jj]) * fw[jj]; - ++iline; - } - } else { - for(ll = 0; ll < length; ll++) { - oline[ll] = iline[size2] * fw[size2]; - for(jj = -size1; jj < size2; jj++) - oline[ll] += iline[jj] * fw[jj]; - ++iline; - } - } - } - /* copy lines from buffer to array: */ - if (!NI_LineBufferToArray(&oline_buffer)) - goto exit; - } while(more); -exit: - if (ibuffer) free(ibuffer); - if (obuffer) free(obuffer); - return PyErr_Occurred() ? 0 : 1; -} - -#define CASE_CORRELATE_POINT(_pi, _weights, _offsets, _filter_size, \ - _cvalue, _type, _res, _mv) \ -case t ## _type: \ -{ \ - npy_intp _ii, _offset; \ - for(_ii = 0; _ii < _filter_size; _ii++) { \ - _offset = _offsets[_ii]; \ - if (_offset == _mv) \ - _res += _weights[_ii] * _cvalue; \ - else \ - _res += _weights[_ii] * (double)*(_type*)(_pi + _offset); \ - } \ -} \ -break - -#define CASE_FILTER_OUT(_po, _tmp, _type) \ -case t ## _type: \ - *(_type*)_po = (_type)_tmp; \ - break - -int NI_Correlate(PyArrayObject* input, PyArrayObject* weights, - PyArrayObject* output, NI_ExtendMode mode, - double cvalue, npy_intp *origins) -{ - Bool *pf = NULL; - npy_intp fsize, jj, kk, filter_size = 0, border_flag_value; - npy_intp *offsets = NULL, *oo, size; - NI_FilterIterator fi; - NI_Iterator ii, io; - char *pi, *po; - Float64 *pw; - Float64 *ww = NULL; - int ll; - - /* get the the footprint: */ - fsize = 1; - for(ll = 0; ll < weights->nd; ll++) - fsize *= weights->dimensions[ll]; - pw = (Float64*)PyArray_DATA(weights); - pf = (Bool*)malloc(fsize * sizeof(Bool)); - if (!pf) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < fsize; jj++) { - if (fabs(pw[jj]) > DBL_EPSILON) { - pf[jj] = 1; - ++filter_size; - } else { - pf[jj] = 0; - } - } - /* copy the weights to contiguous memory: */ - ww = (Float64*)malloc(filter_size * sizeof(Float64)); - if (!ww) { - PyErr_NoMemory(); - goto exit; - } - jj = 0; - for(kk = 0; kk < fsize; kk++) { - if (pf[kk]) { - ww[jj++] = pw[kk]; - } - } - /* initialize filter offsets: */ - if (!NI_InitFilterOffsets(input, pf, weights->dimensions, origins, - mode, &offsets, &border_flag_value, NULL)) - goto exit; - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(input->nd, weights->dimensions, filter_size, - input->dimensions, origins, &fi)) - goto exit; - /* initialize input element iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* initialize output element iterator: */ - if (!NI_InitPointIterator(output, &io)) - goto exit; - /* get data pointers an array size: */ - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - size = 1; - for(ll = 0; ll < input->nd; ll++) - size *= input->dimensions[ll]; - /* iterator over the elements: */ - oo = offsets; - for(jj = 0; jj < size; jj++) { - double tmp = 0.0; - switch (input->descr->type_num) { - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, Bool, - tmp, border_flag_value); - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, UInt8, - tmp, border_flag_value); - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, UInt16, - tmp, border_flag_value); - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, UInt32, - tmp, border_flag_value); -#if HAS_UINT64 - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, UInt64, - tmp, border_flag_value); -#endif - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, Int8, - tmp, border_flag_value); - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, Int16, - tmp, border_flag_value); - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, Int32, - tmp, border_flag_value); - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, Int64, - tmp, border_flag_value); - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, Float32, - tmp, border_flag_value); - CASE_CORRELATE_POINT(pi, ww, oo, filter_size, cvalue, Float64, - tmp, border_flag_value); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - goto exit; - } - switch (output->descr->type_num) { - CASE_FILTER_OUT(po, tmp, Bool); - CASE_FILTER_OUT(po, tmp, UInt8); - CASE_FILTER_OUT(po, tmp, UInt16); - CASE_FILTER_OUT(po, tmp, UInt32); -#if HAS_UINT64 - CASE_FILTER_OUT(po, tmp, UInt64); -#endif - CASE_FILTER_OUT(po, tmp, Int8); - CASE_FILTER_OUT(po, tmp, Int16); - CASE_FILTER_OUT(po, tmp, Int32); - CASE_FILTER_OUT(po, tmp, Int64); - CASE_FILTER_OUT(po, tmp, Float32); - CASE_FILTER_OUT(po, tmp, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - goto exit; - } - NI_FILTER_NEXT2(fi, ii, io, oo, pi, po); - } -exit: - if (offsets) free(offsets); - if (ww) free(ww); - if (pf) free(pf); - return PyErr_Occurred() ? 0 : 1; -} - -int -NI_UniformFilter1D(PyArrayObject *input, npy_intp filter_size, - int axis, PyArrayObject *output, NI_ExtendMode mode, - double cval, npy_intp origin) -{ - npy_intp lines, kk, ll, length, size1, size2; - int more; - double *ibuffer = NULL, *obuffer = NULL; - NI_LineBuffer iline_buffer, oline_buffer; - - size1 = filter_size / 2; - size2 = filter_size - size1 - 1; - /* allocate and initialize the line buffers: */ - lines = -1; - if (!NI_AllocateLineBuffer(input, axis, size1 + origin, size2 - origin, - &lines, BUFFER_SIZE, &ibuffer)) - goto exit; - if (!NI_AllocateLineBuffer(output, axis, 0, 0, &lines, BUFFER_SIZE, - &obuffer)) - goto exit; - if (!NI_InitLineBuffer(input, axis, size1 + origin, size2 - origin, - lines, ibuffer, mode, cval, &iline_buffer)) - goto exit; - if (!NI_InitLineBuffer(output, axis, 0, 0, lines, obuffer, mode, 0.0, - &oline_buffer)) - goto exit; - length = input->nd > 0 ? input->dimensions[axis] : 1; - - /* iterate over all the array lines: */ - do { - /* copy lines from array to buffer: */ - if (!NI_ArrayToLineBuffer(&iline_buffer, &lines, &more)) - goto exit; - /* iterate over the lines in the buffers: */ - for(kk = 0; kk < lines; kk++) { - /* get lines: */ - double *iline = NI_GET_LINE(iline_buffer, kk); - double *oline = NI_GET_LINE(oline_buffer, kk); - /* do the uniform filter: */ - double tmp = 0.0; - double *l1 = iline; - double *l2 = iline + filter_size; - for(ll = 0; ll < filter_size; ll++) - tmp += iline[ll]; - tmp /= (double)filter_size; - oline[0] = tmp; - for(ll = 1; ll < length; ll++) { - tmp += (*l2++ - *l1++) / (double)filter_size; - oline[ll] = tmp; - } - } - /* copy lines from buffer to array: */ - if (!NI_LineBufferToArray(&oline_buffer)) - goto exit; - } while(more); - - exit: - if (ibuffer) free(ibuffer); - if (obuffer) free(obuffer); - return PyErr_Occurred() ? 0 : 1; -} - -int -NI_MinOrMaxFilter1D(PyArrayObject *input, npy_intp filter_size, - int axis, PyArrayObject *output, NI_ExtendMode mode, - double cval, npy_intp origin, int minimum) -{ - npy_intp lines, kk, jj, ll, length, size1, size2; - int more; - double *ibuffer = NULL, *obuffer = NULL; - NI_LineBuffer iline_buffer, oline_buffer; - - size1 = filter_size / 2; - size2 = filter_size - size1 - 1; - /* allocate and initialize the line buffers: */ - lines = -1; - if (!NI_AllocateLineBuffer(input, axis, size1 + origin, size2 - origin, - &lines, BUFFER_SIZE, &ibuffer)) - goto exit; - if (!NI_AllocateLineBuffer(output, axis, 0, 0, &lines, BUFFER_SIZE, - &obuffer)) - goto exit; - if (!NI_InitLineBuffer(input, axis, size1 + origin, size2 - origin, - lines, ibuffer, mode, cval, &iline_buffer)) - goto exit; - if (!NI_InitLineBuffer(output, axis, 0, 0, lines, obuffer, mode, 0.0, - &oline_buffer)) - goto exit; - length = input->nd > 0 ? input->dimensions[axis] : 1; - - /* iterate over all the array lines: */ - do { - /* copy lines from array to buffer: */ - if (!NI_ArrayToLineBuffer(&iline_buffer, &lines, &more)) - goto exit; - /* iterate over the lines in the buffers: */ - for(kk = 0; kk < lines; kk++) { - /* get lines: */ - double *iline = NI_GET_LINE(iline_buffer, kk) + size1; - double *oline = NI_GET_LINE(oline_buffer, kk); - for(ll = 0; ll < length; ll++) { - /* find minimum or maximum filter: */ - double val = iline[ll - size1]; - for(jj = -size1 + 1; jj <= size2; jj++) { - double tmp = iline[ll + jj]; - if (minimum) { - if (tmp < val) - val = tmp; - } else { - if (tmp > val) - val = tmp; - } - } - oline[ll] = val; - } - } - /* copy lines from buffer to array: */ - if (!NI_LineBufferToArray(&oline_buffer)) - goto exit; - } while(more); - - exit: - if (ibuffer) free(ibuffer); - if (obuffer) free(obuffer); - return PyErr_Occurred() ? 0 : 1; -} - - -#define CASE_MIN_OR_MAX_POINT(_pi, _offsets, _filter_size, _cval, \ - _type, _minimum, _res, _mv, _ss) \ -case t ## _type: \ -{ \ - npy_intp _ii, _oo = *_offsets; \ - _type _cv = (_type)_cval, _tmp; \ - _res = _oo == _mv ? _cv : *(_type*)(_pi + _oo); \ - if (_ss) \ - _res += *_ss; \ - for(_ii = 1; _ii < _filter_size; _ii++) { \ - _oo = _offsets[_ii]; \ - _tmp = _oo == _mv ? _cv : *(_type*)(_pi + _oo); \ - if (_ss) \ - _tmp += _ss[_ii]; \ - if (_minimum) { \ - if (_tmp < _res) \ - _res = (_type)_tmp; \ - } else { \ - if (_tmp > _res) \ - _res = (_type)_tmp; \ - } \ - } \ -} \ -break - -int NI_MinOrMaxFilter(PyArrayObject* input, PyArrayObject* footprint, - PyArrayObject* structure, PyArrayObject* output, - NI_ExtendMode mode, double cvalue, npy_intp *origins, - int minimum) -{ - Bool *pf = NULL; - npy_intp fsize, jj, kk, filter_size = 0, border_flag_value; - npy_intp *offsets = NULL, *oo, size; - NI_FilterIterator fi; - NI_Iterator ii, io; - char *pi, *po; - int ll; - double *ss = NULL; - Float64 *ps; - - /* get the the footprint: */ - fsize = 1; - for(ll = 0; ll < footprint->nd; ll++) - fsize *= footprint->dimensions[ll]; - pf = (Bool*)PyArray_DATA(footprint); - for(jj = 0; jj < fsize; jj++) { - if (pf[jj]) { - ++filter_size; - } - } - /* get the structure: */ - if (structure) { - ss = (double*)malloc(filter_size * sizeof(double)); - if (!ss) { - PyErr_NoMemory(); - goto exit; - } - /* copy the weights to contiguous memory: */ - ps = (Float64*)PyArray_DATA(structure); - jj = 0; - for(kk = 0; kk < fsize; kk++) - if (pf[kk]) - ss[jj++] = minimum ? -ps[kk] : ps[kk]; - } - /* initialize filter offsets: */ - if (!NI_InitFilterOffsets(input, pf, footprint->dimensions, origins, - mode, &offsets, &border_flag_value, NULL)) - goto exit; - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(input->nd, footprint->dimensions, - filter_size, input->dimensions, origins, &fi)) - goto exit; - /* initialize input element iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* initialize output element iterator: */ - if (!NI_InitPointIterator(output, &io)) - goto exit; - /* get data pointers an array size: */ - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - size = 1; - for(ll = 0; ll < input->nd; ll++) - size *= input->dimensions[ll]; - /* iterator over the elements: */ - oo = offsets; - for(jj = 0; jj < size; jj++) { - double tmp = 0.0; - switch (input->descr->type_num) { - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, Bool, - minimum, tmp, border_flag_value, ss); - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, UInt8, - minimum, tmp, border_flag_value, ss); - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, UInt16, - minimum, tmp, border_flag_value, ss); - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, UInt32, - minimum, tmp, border_flag_value, ss); -#if HAS_UINT64 - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, UInt64, - minimum, tmp, border_flag_value, ss); -#endif - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, Int8, - minimum, tmp, border_flag_value, ss); - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, Int16, - minimum, tmp, border_flag_value, ss); - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, Int32, - minimum, tmp, border_flag_value, ss); - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, Int64, - minimum, tmp, border_flag_value, ss); - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, Float32, - minimum, tmp, border_flag_value, ss); - CASE_MIN_OR_MAX_POINT(pi, oo, filter_size, cvalue, Float64, - minimum, tmp, border_flag_value, ss); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - goto exit; - } - switch (output->descr->type_num) { - CASE_FILTER_OUT(po, tmp, Bool); - CASE_FILTER_OUT(po, tmp, UInt8); - CASE_FILTER_OUT(po, tmp, UInt16); - CASE_FILTER_OUT(po, tmp, UInt32); -#if HAS_UINT64 - CASE_FILTER_OUT(po, tmp, UInt64); -#endif - CASE_FILTER_OUT(po, tmp, Int8); - CASE_FILTER_OUT(po, tmp, Int16); - CASE_FILTER_OUT(po, tmp, Int32); - CASE_FILTER_OUT(po, tmp, Int64); - CASE_FILTER_OUT(po, tmp, Float32); - CASE_FILTER_OUT(po, tmp, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - goto exit; - } - NI_FILTER_NEXT2(fi, ii, io, oo, pi, po); - } -exit: - if (offsets) free(offsets); - if (ss) free(ss); - return PyErr_Occurred() ? 0 : 1; -} - -static double NI_Select(double *buffer, int min, int max, int rank) -{ - int ii, jj; - double x, t; - - if (min == max) - return buffer[min]; - - x = buffer[min]; - ii = min - 1; - jj = max + 1; - for(;;) { - do - jj--; - while(buffer[jj] > x); - do - ii++; - while(buffer[ii] < x); - if (ii < jj) { - t = buffer[ii]; - buffer[ii] = buffer[jj]; - buffer[jj] = t; - } else { - break; - } - } - - ii = jj - min + 1; - if (rank < ii) - return NI_Select(buffer, min, jj, rank); - else - return NI_Select(buffer, jj + 1, max, rank - ii); -} - -#define CASE_RANK_POINT(_pi, _offsets, _filter_size, _cval, _type, \ - _rank, _buffer, _res, _mv) \ -case t ## _type: \ -{ \ - npy_intp _ii; \ - for(_ii = 0; _ii < _filter_size; _ii++) { \ - npy_intp _offset = _offsets[_ii]; \ - if (_offset == _mv) \ - _buffer[_ii] = (_type)_cval; \ - else \ - _buffer[_ii] = *(_type*)(_pi + _offsets[_ii]); \ - } \ - _res = (_type)NI_Select(_buffer, 0, _filter_size - 1, _rank); \ -} \ -break - -int NI_RankFilter(PyArrayObject* input, int rank, - PyArrayObject* footprint, PyArrayObject* output, - NI_ExtendMode mode, double cvalue, npy_intp *origins) -{ - npy_intp fsize, jj, filter_size = 0, border_flag_value; - npy_intp *offsets = NULL, *oo, size; - NI_FilterIterator fi; - NI_Iterator ii, io; - char *pi, *po; - Bool *pf = NULL; - double *buffer = NULL; - int ll; - - /* get the the footprint: */ - fsize = 1; - for(ll = 0; ll < footprint->nd; ll++) - fsize *= footprint->dimensions[ll]; - pf = (Bool*)PyArray_DATA(footprint); - for(jj = 0; jj < fsize; jj++) { - if (pf[jj]) { - ++filter_size; - } - } - /* buffer for rank calculation: */ - buffer = (double*)malloc(filter_size * sizeof(double)); - if (!buffer) { - PyErr_NoMemory(); - goto exit; - } - /* iterator over the elements: */ - oo = offsets; - /* initialize filter offsets: */ - if (!NI_InitFilterOffsets(input, pf, footprint->dimensions, origins, - mode, &offsets, &border_flag_value, NULL)) - goto exit; - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(input->nd, footprint->dimensions, - filter_size, input->dimensions, origins, &fi)) - goto exit; - /* initialize input element iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* initialize output element iterator: */ - if (!NI_InitPointIterator(output, &io)) - goto exit; - /* get data pointers an array size: */ - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - size = 1; - for(ll = 0; ll < input->nd; ll++) - size *= input->dimensions[ll]; - /* iterator over the elements: */ - oo = offsets; - for(jj = 0; jj < size; jj++) { - double tmp = 0.0; - switch (input->descr->type_num) { - CASE_RANK_POINT(pi, oo, filter_size, cvalue, Bool, - rank, buffer, tmp, border_flag_value); - CASE_RANK_POINT(pi, oo, filter_size, cvalue, UInt8, - rank, buffer, tmp, border_flag_value); - CASE_RANK_POINT(pi, oo, filter_size, cvalue, UInt16, - rank, buffer, tmp, border_flag_value); - CASE_RANK_POINT(pi, oo, filter_size, cvalue, UInt32, - rank, buffer, tmp, border_flag_value); -#if HAS_UINT64 - CASE_RANK_POINT(pi, oo, filter_size, cvalue, UInt64, - rank, buffer, tmp, border_flag_value); -#endif - CASE_RANK_POINT(pi, oo, filter_size, cvalue, Int8, - rank, buffer, tmp, border_flag_value); - CASE_RANK_POINT(pi, oo, filter_size, cvalue, Int16, - rank, buffer, tmp, border_flag_value); - CASE_RANK_POINT(pi, oo, filter_size, cvalue, Int32, - rank, buffer, tmp, border_flag_value); - CASE_RANK_POINT(pi, oo, filter_size, cvalue, Int64, - rank, buffer, tmp, border_flag_value); - CASE_RANK_POINT(pi, oo, filter_size, cvalue, Float32, - rank, buffer, tmp, border_flag_value); - CASE_RANK_POINT(pi, oo, filter_size, cvalue, Float64, - rank, buffer, tmp, border_flag_value); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - goto exit; - } - switch (output->descr->type_num) { - CASE_FILTER_OUT(po, tmp, Bool); - CASE_FILTER_OUT(po, tmp, UInt8); - CASE_FILTER_OUT(po, tmp, UInt16); - CASE_FILTER_OUT(po, tmp, UInt32); -#if HAS_UINT64 - CASE_FILTER_OUT(po, tmp, UInt64); -#endif - CASE_FILTER_OUT(po, tmp, Int8); - CASE_FILTER_OUT(po, tmp, Int16); - CASE_FILTER_OUT(po, tmp, Int32); - CASE_FILTER_OUT(po, tmp, Int64); - CASE_FILTER_OUT(po, tmp, Float32); - CASE_FILTER_OUT(po, tmp, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - goto exit; - } - NI_FILTER_NEXT2(fi, ii, io, oo, pi, po); - } -exit: - if (offsets) free(offsets); - if (buffer) free(buffer); - return PyErr_Occurred() ? 0 : 1; -} - -int NI_GenericFilter1D(PyArrayObject *input, - int (*function)(double*, npy_intp, double*, npy_intp, void*), - void* data, npy_intp filter_size, int axis, PyArrayObject *output, - NI_ExtendMode mode, double cval, npy_intp origin) -{ - int more; - npy_intp ii, lines, length, size1, size2; - double *ibuffer = NULL, *obuffer = NULL; - NI_LineBuffer iline_buffer, oline_buffer; - - /* allocate and initialize the line buffers: */ - size1 = filter_size / 2; - size2 = filter_size - size1 - 1; - lines = -1; - if (!NI_AllocateLineBuffer(input, axis, size1 + origin, size2 - origin, - &lines, BUFFER_SIZE, &ibuffer)) - goto exit; - if (!NI_AllocateLineBuffer(output, axis, 0, 0, &lines, BUFFER_SIZE, - &obuffer)) - goto exit; - if (!NI_InitLineBuffer(input, axis, size1 + origin, size2 - origin, - lines, ibuffer, mode, cval, &iline_buffer)) - goto exit; - if (!NI_InitLineBuffer(output, axis, 0, 0, lines, obuffer, mode, 0.0, - &oline_buffer)) - goto exit; - length = input->nd > 0 ? input->dimensions[axis] : 1; - /* iterate over all the array lines: */ - do { - /* copy lines from array to buffer: */ - if (!NI_ArrayToLineBuffer(&iline_buffer, &lines, &more)) - goto exit; - /* iterate over the lines in the buffers: */ - for(ii = 0; ii < lines; ii++) { - /* get lines: */ - double *iline = NI_GET_LINE(iline_buffer, ii); - double *oline = NI_GET_LINE(oline_buffer, ii); - if (!function(iline, length + size1 + size2, oline, length, data)) { - if (!PyErr_Occurred()) - PyErr_SetString(PyExc_RuntimeError, - "unknown error in line processing function"); - goto exit; - } - } - /* copy lines from buffer to array: */ - if (!NI_LineBufferToArray(&oline_buffer)) - goto exit; - } while(more); -exit: - if (ibuffer) free(ibuffer); - if (obuffer) free(obuffer); - return PyErr_Occurred() ? 0 : 1; -} - -#define CASE_FILTER_POINT(_pi, _offsets, _filter_size, _cvalue, _type, \ - _res, _mv, _function, _data, _buffer) \ -case t ## _type: \ -{ \ - npy_intp _ii, _offset; \ - for(_ii = 0; _ii < _filter_size; _ii++) { \ - _offset = _offsets[_ii]; \ - if (_offset == _mv) \ - _buffer[_ii] = (double)_cvalue; \ - else \ - _buffer[_ii] = (double)*(_type*)(_pi + _offset); \ - } \ - if (!_function(_buffer, _filter_size, &_res, _data)) { \ - if (!PyErr_Occurred()) \ - PyErr_SetString(PyExc_RuntimeError, \ - "unknown error in filter function"); \ - goto exit; \ - } \ -} \ -break - - -int NI_GenericFilter(PyArrayObject* input, - int (*function)(double*, npy_intp, double*, void*), void *data, - PyArrayObject* footprint, PyArrayObject* output, - NI_ExtendMode mode, double cvalue, npy_intp *origins) -{ - Bool *pf = NULL; - npy_intp fsize, jj, filter_size = 0, border_flag_value; - npy_intp *offsets = NULL, *oo, size; - NI_FilterIterator fi; - NI_Iterator ii, io; - char *pi, *po; - double *buffer = NULL; - int ll; - - /* get the the footprint: */ - fsize = 1; - for(ll = 0; ll < footprint->nd; ll++) - fsize *= footprint->dimensions[ll]; - pf = (Bool*)PyArray_DATA(footprint); - for(jj = 0; jj < fsize; jj++) { - if (pf[jj]) - ++filter_size; - } - /* initialize filter offsets: */ - if (!NI_InitFilterOffsets(input, pf, footprint->dimensions, origins, - mode, &offsets, &border_flag_value, NULL)) - goto exit; - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(input->nd, footprint->dimensions, - filter_size, input->dimensions, origins, &fi)) - goto exit; - /* initialize input element iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* initialize output element iterator: */ - if (!NI_InitPointIterator(output, &io)) - goto exit; - /* get data pointers an array size: */ - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - size = 1; - for(ll = 0; ll < input->nd; ll++) - size *= input->dimensions[ll]; - /* buffer for filter calculation: */ - buffer = (double*)malloc(filter_size * sizeof(double)); - if (!buffer) { - PyErr_NoMemory(); - goto exit; - } - /* iterate over the elements: */ - oo = offsets; - for(jj = 0; jj < size; jj++) { - double tmp = 0.0; - switch (input->descr->type_num) { - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, Bool, - tmp, border_flag_value, function, data, buffer); - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, UInt8, - tmp, border_flag_value, function, data, buffer); - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, UInt16, - tmp, border_flag_value, function, data, buffer); - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, UInt32, - tmp, border_flag_value, function, data, buffer); -#if HAS_UINT64 - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, UInt64, - tmp, border_flag_value, function, data, buffer); -#endif - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, Int8, - tmp, border_flag_value, function, data, buffer); - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, Int16, - tmp, border_flag_value, function, data, buffer); - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, Int32, - tmp, border_flag_value, function, data, buffer); - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, Int64, - tmp, border_flag_value, function, data, buffer); - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, Float32, - tmp, border_flag_value, function, data, buffer); - CASE_FILTER_POINT(pi, oo, filter_size, cvalue, Float64, - tmp, border_flag_value, function, data, buffer); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - goto exit; - } - switch (output->descr->type_num) { - CASE_FILTER_OUT(po, tmp, Bool); - CASE_FILTER_OUT(po, tmp, UInt8); - CASE_FILTER_OUT(po, tmp, UInt16); - CASE_FILTER_OUT(po, tmp, UInt32); -#if HAS_UINT64 - CASE_FILTER_OUT(po, tmp, UInt64); -#endif - CASE_FILTER_OUT(po, tmp, Int8); - CASE_FILTER_OUT(po, tmp, Int16); - CASE_FILTER_OUT(po, tmp, Int32); - CASE_FILTER_OUT(po, tmp, Int64); - CASE_FILTER_OUT(po, tmp, Float32); - CASE_FILTER_OUT(po, tmp, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - goto exit; - } - NI_FILTER_NEXT2(fi, ii, io, oo, pi, po); - } -exit: - if (offsets) free(offsets); - if (buffer) free(buffer); - return PyErr_Occurred() ? 0 : 1; -} diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_filters.h b/scipy-0.10.1/scipy/ndimage/src/ni_filters.h deleted file mode 100644 index c35506881b..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_filters.h +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef NI_FILTERS_H -#define NI_FILTERS_H - -int NI_Correlate1D(PyArrayObject*, PyArrayObject*, int, PyArrayObject*, - NI_ExtendMode, double, npy_intp); -int NI_Correlate(PyArrayObject*, PyArrayObject*, PyArrayObject*, - NI_ExtendMode, double, npy_intp*); -int NI_UniformFilter1D(PyArrayObject*, npy_intp, int, PyArrayObject*, - NI_ExtendMode, double, npy_intp); -int NI_MinOrMaxFilter1D(PyArrayObject*, npy_intp, int, PyArrayObject*, - NI_ExtendMode, double, npy_intp, int); -int NI_MinOrMaxFilter(PyArrayObject*, PyArrayObject*, PyArrayObject*, - PyArrayObject*, NI_ExtendMode, double, npy_intp*, - int); -int NI_RankFilter(PyArrayObject*, int, PyArrayObject*, PyArrayObject*, - NI_ExtendMode, double, npy_intp*); -int NI_GenericFilter1D(PyArrayObject*, int (*)(double*, npy_intp, - double*, npy_intp, void*), void*, npy_intp, int, - PyArrayObject*, NI_ExtendMode, double, npy_intp); -int NI_GenericFilter(PyArrayObject*, int (*)(double*, npy_intp, double*, - void*), void*, PyArrayObject*, PyArrayObject*, - NI_ExtendMode, double, npy_intp*); -#endif diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_fourier.c b/scipy-0.10.1/scipy/ndimage/src/ni_fourier.c deleted file mode 100644 index 1e2373293b..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_fourier.c +++ /dev/null @@ -1,549 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "ni_support.h" -#include -#include -#include - -#if !defined(M_PI) -#define M_PI 3.14159265358979323846 -#endif - -#define _NI_GAUSSIAN 0 -#define _NI_UNIFORM 1 -#define _NI_ELLIPSOID 2 - -static double polevl(double x, const double coef[], int N) -{ - double ans; - const double *p = coef; - int i = N; - - ans = *p++; - do - ans = ans * x + *p++; - while(--i); - - return ans ; -} - -double p1evl(double x, const double coef[], int N) -{ - double ans; - const double *p = coef; - int i = N - 1; - - ans = x + *p++; - do - ans = ans * x + *p++; - while(--i); - - return ans; -} - -#define THPIO4 2.35619449019234492885 -#define SQ2OPI .79788456080286535588 -#define Z1 1.46819706421238932572E1 -#define Z2 4.92184563216946036703E1 - -static double _bessel_j1(double x) -{ - double w, z, p, q, xn; - const double RP[4] = { - -8.99971225705559398224E8, - 4.52228297998194034323E11, - -7.27494245221818276015E13, - 3.68295732863852883286E15, - }; - const double RQ[8] = { - 6.20836478118054335476E2, - 2.56987256757748830383E5, - 8.35146791431949253037E7, - 2.21511595479792499675E10, - 4.74914122079991414898E12, - 7.84369607876235854894E14, - 8.95222336184627338078E16, - 5.32278620332680085395E18, - }; - const double PP[7] = { - 7.62125616208173112003E-4, - 7.31397056940917570436E-2, - 1.12719608129684925192E0, - 5.11207951146807644818E0, - 8.42404590141772420927E0, - 5.21451598682361504063E0, - 1.00000000000000000254E0, - }; - const double PQ[7] = { - 5.71323128072548699714E-4, - 6.88455908754495404082E-2, - 1.10514232634061696926E0, - 5.07386386128601488557E0, - 8.39985554327604159757E0, - 5.20982848682361821619E0, - 9.99999999999999997461E-1, - }; - const double QP[8] = { - 5.10862594750176621635E-2, - 4.98213872951233449420E0, - 7.58238284132545283818E1, - 3.66779609360150777800E2, - 7.10856304998926107277E2, - 5.97489612400613639965E2, - 2.11688757100572135698E2, - 2.52070205858023719784E1, - }; - const double QQ[7] = { - 7.42373277035675149943E1, - 1.05644886038262816351E3, - 4.98641058337653607651E3, - 9.56231892404756170795E3, - 7.99704160447350683650E3, - 2.82619278517639096600E3, - 3.36093607810698293419E2, - }; - - w = x; - if (x < 0) - w = -x; - - if (w <= 5.0) { - z = x * x; - w = polevl(z, RP, 3) / p1evl(z, RQ, 8); - w = w * x * (z - Z1) * (z - Z2); - return w ; - } - - w = 5.0 / x; - z = w * w; - p = polevl(z, PP, 6) / polevl(z, PQ, 6); - q = polevl(z, QP, 7) / p1evl(z, QQ, 7); - xn = x - THPIO4; - p = p * cos(xn) - w * q * sin(xn); - return p * SQ2OPI / sqrt(x); -} - -#define CASE_FOURIER_OUT_RR(_po, _tmp, _type) \ -case t ## _type: \ - *(_type*)_po = _tmp; \ - break - -#define CASE_FOURIER_OUT_RC(_po, _tmp, _type) \ -case t ## _type: \ - (*(_type*)_po).real = tmp; \ - (*(_type*)_po).imag = 0.0; \ - break - -#define CASE_FOURIER_OUT_CC(_po, _tmp_r, _tmp_i, _type) \ -case t ## _type: \ - (*(_type*)_po).real = _tmp_r; \ - (*(_type*)_po).imag = _tmp_i; \ - break - -#define CASE_FOURIER_FILTER_RC(_pi, _tmp, _tmp_r, _tmp_i, _type) \ -case t ## _type: \ - _tmp_r = (*(_type*)_pi).real * _tmp; \ - _tmp_i = (*(_type*)_pi).imag * _tmp; \ - break; - -#define CASE_FOURIER_FILTER_RR(_pi, _tmp, _type) \ -case t ## _type: \ - _tmp *= *(_type*)_pi; \ - break; - -int NI_FourierFilter(PyArrayObject *input, PyArrayObject* parameter_array, - npy_intp n, int axis, PyArrayObject* output, - int filter_type) -{ - NI_Iterator ii, io; - char *pi, *po; - double *parameters = NULL, **params = NULL; - npy_intp kk, hh, size; - Float64 *iparameters = (void *)PyArray_DATA(parameter_array); - int ll; - - /* precalculate the parameters: */ - parameters = (double*)malloc(input->nd * sizeof(double)); - if (!parameters) { - PyErr_NoMemory(); - goto exit; - } - for(kk = 0; kk < input->nd; kk++) { - /* along the direction of the real transform we must use the given - length of that dimensons, unless a complex transform is assumed - (n < 0): */ - int shape = kk == axis ? - (n < 0 ? input->dimensions[kk] : n) : input->dimensions[kk]; - switch (filter_type) { - case _NI_GAUSSIAN: - parameters[kk] = *iparameters++ * M_PI / (double)shape; - parameters[kk] = -2.0 * parameters[kk] * parameters[kk]; - break; - case _NI_ELLIPSOID: - case _NI_UNIFORM: - parameters[kk] = *iparameters++; - break; - } - } - /* allocate memory for tables: */ - params = (double**) malloc(input->nd * sizeof(double*)); - if (!params) { - PyErr_NoMemory(); - goto exit; - } - for(kk = 0; kk < input->nd; kk++) - params[kk] = NULL; - for(kk = 0; kk < input->nd; kk++) { - if (input->dimensions[kk] > 1) { - params[kk] = (double*)malloc(input->dimensions[kk] * sizeof(double)); - if (!params[kk]) { - PyErr_NoMemory(); - goto exit; - } - } - } - switch (filter_type) { - case _NI_GAUSSIAN: - /* calculate the tables of exponentials: */ - for (hh = 0; hh < input->nd; hh++) { - if (params[hh]) { - if (hh == axis && n >= 0) { - for(kk = 0; kk < input->dimensions[hh]; kk++) { - double tmp = parameters[hh] * kk * kk; - params[hh][kk] = fabs(tmp) > 50.0 ? 0.0 : exp(tmp); - } - } else { - int jj = 0; - for(kk = 0; kk < (input->dimensions[hh] + 1) / 2; kk++) { - double tmp = parameters[hh] * kk * kk; - params[hh][jj++] = fabs(tmp) > 50.0 ? 0.0 : exp(tmp); - } - for(kk = -(input->dimensions[hh] / 2); kk < 0; kk++) { - double tmp = parameters[hh] * kk * kk; - params[hh][jj++] = fabs(tmp) > 50.0 ? 0.0 : exp(tmp); - } - } - } - } - break; - case _NI_UNIFORM: - /* calculate the tables of parameters: */ - for (hh = 0; hh < input->nd; hh++) { - if (params[hh]) { - params[hh][0] = 1.0; - if (hh == axis && n >= 0) { - double tmp = M_PI * parameters[hh] / n; - for(kk = 1; kk < input->dimensions[hh]; kk++) - params[hh][kk] = tmp > 0.0 ? - sin(tmp * kk) / (tmp * kk) : 0.0; - } else { - double tmp = M_PI * parameters[hh] / input->dimensions[hh]; - int jj = 1; - for(kk = 1; kk < (input->dimensions[hh] + 1) / 2; kk++) - params[hh][jj++] = tmp > 0.0 ? - sin(tmp * kk) / (tmp * kk) : 0.0; - for(kk = -(input->dimensions[hh] / 2); kk < 0; kk++) - params[hh][jj++] = tmp > 0.0 ? - sin(tmp * kk) / (tmp * kk) : 0.0; - } - } - } - break; - case _NI_ELLIPSOID: - /* calculate the tables of parameters: */ - for (hh = 0; hh < input->nd; hh++) { - if (params[hh]) { - params[hh][0] = 1.0; - if (hh == axis && n >= 0) { - double tmp = M_PI * parameters[hh] / n; - for(kk = 0; kk < input->dimensions[hh]; kk++) - params[hh][kk] = (double)kk * tmp; - } else { - double tmp = M_PI * parameters[hh] / input->dimensions[hh]; - int jj = 0; - for(kk = 0; kk < (input->dimensions[hh] + 1) / 2; kk++) - params[hh][jj++] = (double)kk * tmp; - for(kk = -(input->dimensions[hh] / 2); kk < 0; kk++) - params[hh][jj++] = (double)kk * tmp; - } - } else if (input->dimensions[hh] > 0) { - params[hh][0] = 1.0; - } - } - if (input->nd > 1) - for(hh = 0; hh < input->nd; hh++) - for(kk = 0; kk < input->dimensions[hh]; kk++) - params[hh][kk] = params[hh][kk] * params[hh][kk]; - break; - default: - break; - } - /* initialize input element iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* initialize output element iterator: */ - if (!NI_InitPointIterator(output, &io)) - goto exit; - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - size = 1; - for(ll = 0; ll < input->nd; ll++) - size *= input->dimensions[ll]; - /* iterator over the elements: */ - for(hh = 0; hh < size; hh++) { - double tmp = 1.0; - switch (filter_type) { - case _NI_GAUSSIAN: - case _NI_UNIFORM: - for(kk = 0; kk < input->nd; kk++) - if (params[kk]) - tmp *= params[kk][ii.coordinates[kk]]; - break; - case _NI_ELLIPSOID: - switch (input->nd) { - case 1: - tmp = params[0][ii.coordinates[0]]; - tmp = tmp > 0.0 ? sin(tmp) / (tmp) : 1.0; - break; - case 2: - tmp = 0.0; - for(kk = 0; kk < 2; kk++) - tmp += params[kk][ii.coordinates[kk]]; - tmp = sqrt(tmp); - tmp = tmp > 0.0 ? 2.0 * _bessel_j1(tmp) / tmp : 1.0; - break; - case 3: - { - double r = 0.0; - for(kk = 0; kk < 3; kk++) - r += params[kk][ii.coordinates[kk]]; - r = sqrt(r); - if (r > 0.0) { - tmp = 3.0 * (sin(r) - r * cos(r)); - tmp /= r * r * r; - } else { - tmp = 1.0; - } - } - break; - } - break; - default: - break; - } - if (input->descr->type_num == tComplex64 || - input->descr->type_num == tComplex128) { - double tmp_r = 0.0, tmp_i = 0.0; - switch (input->descr->type_num) { - CASE_FOURIER_FILTER_RC(pi, tmp, tmp_r, tmp_i, Complex64); - CASE_FOURIER_FILTER_RC(pi, tmp, tmp_r, tmp_i, Complex128); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - switch (output->descr->type_num) { - CASE_FOURIER_OUT_CC(po, tmp_r, tmp_i, Complex64); - CASE_FOURIER_OUT_CC(po, tmp_r, tmp_i, Complex128); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - } else { - switch (input->descr->type_num) { - CASE_FOURIER_FILTER_RR(pi, tmp, Bool) - CASE_FOURIER_FILTER_RR(pi, tmp, UInt8) - CASE_FOURIER_FILTER_RR(pi, tmp, UInt16) - CASE_FOURIER_FILTER_RR(pi, tmp, UInt32) -#if HAS_UINT64 - CASE_FOURIER_FILTER_RR(pi, tmp, UInt64) -#endif - CASE_FOURIER_FILTER_RR(pi, tmp, Int8) - CASE_FOURIER_FILTER_RR(pi, tmp, Int16) - CASE_FOURIER_FILTER_RR(pi, tmp, Int32) - CASE_FOURIER_FILTER_RR(pi, tmp, Int64) - CASE_FOURIER_FILTER_RR(pi, tmp, Float32) - CASE_FOURIER_FILTER_RR(pi, tmp, Float64) - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - switch (output->descr->type_num) { - CASE_FOURIER_OUT_RR(po, tmp, Float32); - CASE_FOURIER_OUT_RR(po, tmp, Float64); - CASE_FOURIER_OUT_RC(po, tmp, Complex64); - CASE_FOURIER_OUT_RC(po, tmp, Complex128); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - } - NI_ITERATOR_NEXT2(ii, io, pi, po); - } - - exit: - if (parameters) free(parameters); - if (params) { - for(kk = 0; kk < input->nd; kk++) - if (params[kk]) free(params[kk]); - free(params); - } - return PyErr_Occurred() ? 0 : 1; -} - -#define CASE_FOURIER_SHIFT_R(_pi, _tmp, _r, _i, _cost, _sint, _type) \ -case t ## _type: \ - _tmp = *(_type*)_pi; \ - _r = _tmp * _cost; \ - _i = _tmp * _sint; \ - break; - -#define CASE_FOURIER_SHIFT_C(_pi, _r, _i, _cost, _sint, _type) \ -case t ## _type: \ - _r = (*(_type*)_pi).real * _cost - (*(_type*)_pi).imag * _sint; \ - _i = (*(_type*)_pi).real * _sint + (*(_type*)_pi).imag * _cost; \ - break; - -int NI_FourierShift(PyArrayObject *input, PyArrayObject* shift_array, - npy_intp n, int axis, PyArrayObject* output) -{ - NI_Iterator ii, io; - char *pi, *po; - double *shifts = NULL, **params = NULL; - npy_intp kk, hh, size; - Float64 *ishifts = (void *)PyArray_DATA(shift_array); - int ll; - - /* precalculate the shifts: */ - shifts = (double*)malloc(input->nd * sizeof(double)); - if (!shifts) { - PyErr_NoMemory(); - goto exit; - } - for(kk = 0; kk < input->nd; kk++) { - /* along the direction of the real transform we must use the given - length of that dimensons, unless a complex transform is assumed - (n < 0): */ - int shape = kk == axis ? - (n < 0 ? input->dimensions[kk] : n) : input->dimensions[kk]; - shifts[kk] = -2.0 * M_PI * *ishifts++ / (double)shape; - } - /* allocate memory for tables: */ - params = (double**) malloc(input->nd * sizeof(double*)); - if (!params) { - PyErr_NoMemory(); - goto exit; - } - for(kk = 0; kk < input->nd; kk++) - params[kk] = NULL; - for(kk = 0; kk < input->nd; kk++) { - if (input->dimensions[kk] > 1) { - params[kk] = (double*)malloc(input->dimensions[kk] * sizeof(double)); - if (!params[kk]) { - PyErr_NoMemory(); - goto exit; - } - } - } - for (hh = 0; hh < input->nd; hh++) { - if (params[hh]) { - if (hh == axis && n >= 0) { - for(kk = 0; kk < input->dimensions[hh]; kk++) - params[hh][kk] = shifts[hh] * kk; - } else { - int jj = 0; - for(kk = 0; kk < (input->dimensions[hh] + 1) / 2; kk++) { - params[hh][jj++] = shifts[hh] * kk; - } - for(kk = -(input->dimensions[hh] / 2); kk < 0; kk++) { - params[hh][jj++] = shifts[hh] * kk; - } - } - } - } - /* initialize input element iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* initialize output element iterator: */ - if (!NI_InitPointIterator(output, &io)) - goto exit; - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - size = 1; - for(ll = 0; ll < input->nd; ll++) - size *= input->dimensions[ll]; - /* iterator over the elements: */ - for(hh = 0; hh < size; hh++) { - double tmp = 0.0, sint, cost, r = 0.0, i = 0.0; - for(kk = 0; kk < input->nd; kk++) - if (params[kk]) - tmp += params[kk][ii.coordinates[kk]]; - sint = sin(tmp); - cost = cos(tmp); - switch (input->descr->type_num) { - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, Bool) - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, UInt8) - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, UInt16) - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, UInt32) -#if HAS_UINT64 - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, UInt64) -#endif - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, Int8) - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, Int16) - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, Int32) - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, Int64) - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, Float32) - CASE_FOURIER_SHIFT_R(pi, tmp, r, i, cost, sint, Float64) - CASE_FOURIER_SHIFT_C(pi, r, i, cost, sint, Complex64) - CASE_FOURIER_SHIFT_C(pi, r, i, cost, sint, Complex128) - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - switch (output->descr->type_num) { - CASE_FOURIER_OUT_CC(po, r, i, Complex64); - CASE_FOURIER_OUT_CC(po, r, i, Complex128); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - NI_ITERATOR_NEXT2(ii, io, pi, po); - } - - exit: - if (shifts) free(shifts); - if (params) { - for(kk = 0; kk < input->nd; kk++) - if (params[kk]) free(params[kk]); - free(params); - } - return PyErr_Occurred() ? 0 : 1; -} diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_fourier.h b/scipy-0.10.1/scipy/ndimage/src/ni_fourier.h deleted file mode 100644 index ff1f98cb71..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_fourier.h +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef NI_FOURIER_H -#define NI_FOURIER_H - -int NI_FourierFilter(PyArrayObject*, PyArrayObject*, npy_intp, int, - PyArrayObject*, int); -int NI_FourierShift(PyArrayObject*, PyArrayObject*, npy_intp, int, - PyArrayObject*); - -#endif diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_interpolation.c b/scipy-0.10.1/scipy/ndimage/src/ni_interpolation.c deleted file mode 100644 index 4a935c149e..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_interpolation.c +++ /dev/null @@ -1,975 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "ni_support.h" -#include "ni_interpolation.h" -#include -#include - -/* calculate the B-spline interpolation coefficients for given x: */ -static void -spline_coefficients(double x, int order, double *result) -{ - int hh; - double y, start; - - if (order & 1) { - start = (int)floor(x) - order / 2; - } else { - start = (int)floor(x + 0.5) - order / 2; - } - - for(hh = 0; hh <= order; hh++) { - y = fabs(start - x + hh); - - switch(order) { - case 1: - result[hh] = y > 1.0 ? 0.0 : 1.0 - y; - break; - case 2: - if (y < 0.5) { - result[hh] = 0.75 - y * y; - } else if (y < 1.5) { - y = 1.5 - y; - result[hh] = 0.5 * y * y; - } else { - result[hh] = 0.0; - } - break; - case 3: - if (y < 1.0) { - result[hh] = - (y * y * (y - 2.0) * 3.0 + 4.0) / 6.0; - } else if (y < 2.0) { - y = 2.0 - y; - result[hh] = y * y * y / 6.0; - } else { - result[hh] = 0.0; - } - break; - case 4: - if (y < 0.5) { - y *= y; - result[hh] = y * (y * 0.25 - 0.625) + 115.0 / 192.0; - } else if (y < 1.5) { - result[hh] = y * (y * (y * (5.0 / 6.0 - y / 6.0) - 1.25) + - 5.0 / 24.0) + 55.0 / 96.0; - } else if (y < 2.5) { - y -= 2.5; - y *= y; - result[hh] = y * y / 24.0; - } else { - result[hh] = 0.0; - } - break; - case 5: - if (y < 1.0) { - double f = y * y; - result[hh] = - f * (f * (0.25 - y / 12.0) - 0.5) + 0.55; - } else if (y < 2.0) { - result[hh] = y * (y * (y * (y * (y / 24.0 - 0.375) - + 1.25) - 1.75) + 0.625) + 0.425; - } else if (y < 3.0) { - double f = 3.0 - y; - y = f * f; - result[hh] = f * y * y / 120.0; - } else { - result[hh] = 0.0; - } - break; - } - } -} - -/* map a coordinate outside the borders, according to the requested - boundary condition: */ -static double -map_coordinate(double in, npy_intp len, int mode) -{ - if (in < 0) { - switch (mode) { - case NI_EXTEND_MIRROR: - if (len <= 1) { - in = 0; - } else { - npy_intp sz2 = 2 * len - 2; - in = sz2 * (npy_intp)(-in / sz2) + in; - in = in <= 1 - len ? in + sz2 : -in; - } - break; - case NI_EXTEND_REFLECT: - if (len <= 1) { - in = 0; - } else { - npy_intp sz2 = 2 * len; - if (in < -sz2) - in = sz2 * (npy_intp)(-in / sz2) + in; - in = in < -len ? in + sz2 : -in - 1; - } - break; - case NI_EXTEND_WRAP: - if (len <= 1) { - in = 0; - } else { - npy_intp sz = len - 1; - // Integer division of -in/sz gives (-in mod sz) - // Note that 'in' is negative - in += sz * ((npy_intp)(-in / sz) + 1); - } - break; - case NI_EXTEND_NEAREST: - in = 0; - break; - case NI_EXTEND_CONSTANT: - in = -1; - break; - } - } else if (in > len-1) { - switch (mode) { - case NI_EXTEND_MIRROR: - if (len <= 1) { - in = 0; - } else { - npy_intp sz2 = 2 * len - 2; - in -= sz2 * (npy_intp)(in / sz2); - if (in >= len) - in = sz2 - in; - } - break; - case NI_EXTEND_REFLECT: - if (len <= 1) { - in = 0; - } else { - npy_intp sz2 = 2 * len; - in -= sz2 * (npy_intp)(in / sz2); - if (in >= len) - in = sz2 - in - 1; - } - break; - case NI_EXTEND_WRAP: - if (len <= 1) { - in = 0; - } else { - npy_intp sz = len - 1; - in -= sz * (npy_intp)(in / sz); - } - break; - case NI_EXTEND_NEAREST: - in = len - 1; - break; - case NI_EXTEND_CONSTANT: - in = -1; - break; - } - } - - return in; -} - -#define BUFFER_SIZE 256000 -#define TOLERANCE 1e-15 - -/* one-dimensional spline filter: */ -int NI_SplineFilter1D(PyArrayObject *input, int order, int axis, - PyArrayObject *output) -{ - int hh, npoles = 0, more; - npy_intp kk, ll, lines, len; - double *buffer = NULL, weight, pole[2]; - NI_LineBuffer iline_buffer, oline_buffer; - - len = input->nd > 0 ? input->dimensions[axis] : 1; - if (len < 1) - goto exit; - - /* these are used in the spline filter calculation below: */ - switch (order) { - case 2: - npoles = 1; - pole[0] = sqrt(8.0) - 3.0; - break; - case 3: - npoles = 1; - pole[0] = sqrt(3.0) - 2.0; - break; - case 4: - npoles = 2; - pole[0] = sqrt(664.0 - sqrt(438976.0)) + sqrt(304.0) - 19.0; - pole[1] = sqrt(664.0 + sqrt(438976.0)) - sqrt(304.0) - 19.0; - break; - case 5: - npoles = 2; - pole[0] = sqrt(67.5 - sqrt(4436.25)) + sqrt(26.25) - 6.5; - pole[1] = sqrt(67.5 + sqrt(4436.25)) - sqrt(26.25) - 6.5; - break; - default: - break; - } - - weight = 1.0; - for(hh = 0; hh < npoles; hh++) - weight *= (1.0 - pole[hh]) * (1.0 - 1.0 / pole[hh]); - - /* allocate an initialize the line buffer, only a single one is used, - because the calculation is in-place: */ - lines = -1; - if (!NI_AllocateLineBuffer(input, axis, 0, 0, &lines, BUFFER_SIZE, - &buffer)) - goto exit; - if (!NI_InitLineBuffer(input, axis, 0, 0, lines, buffer, - NI_EXTEND_DEFAULT, 0.0, &iline_buffer)) - goto exit; - if (!NI_InitLineBuffer(output, axis, 0, 0, lines, buffer, - NI_EXTEND_DEFAULT, 0.0, &oline_buffer)) - goto exit; - - /* iterate over all the array lines: */ - do { - /* copy lines from array to buffer: */ - if (!NI_ArrayToLineBuffer(&iline_buffer, &lines, &more)) - goto exit; - /* iterate over the lines in the buffer: */ - for(kk = 0; kk < lines; kk++) { - /* get line: */ - double *ln = NI_GET_LINE(iline_buffer, kk); - /* spline filter: */ - if (len > 1) { - for(ll = 0; ll < len; ll++) - ln[ll] *= weight; - for(hh = 0; hh < npoles; hh++) { - double p = pole[hh]; - int max = (int)ceil(log(TOLERANCE) / log(fabs(p))); - if (max < len) { - double zn = p; - double sum = ln[0]; - for(ll = 1; ll < max; ll++) { - sum += zn * ln[ll]; - zn *= p; - } - ln[0] = sum; - } else { - double zn = p; - double iz = 1.0 / p; - double z2n = pow(p, (double)(len - 1)); - double sum = ln[0] + z2n * ln[len - 1]; - z2n *= z2n * iz; - for(ll = 1; ll <= len - 2; ll++) { - sum += (zn + z2n) * ln[ll]; - zn *= p; - z2n *= iz; - } - ln[0] = sum / (1.0 - zn * zn); - } - for(ll = 1; ll < len; ll++) - ln[ll] += p * ln[ll - 1]; - ln[len-1] = (p / (p * p - 1.0)) * (ln[len-1] + p * ln[len-2]); - for(ll = len - 2; ll >= 0; ll--) - ln[ll] = p * (ln[ll + 1] - ln[ll]); - } - } - } - /* copy lines from buffer to array: */ - if (!NI_LineBufferToArray(&oline_buffer)) - goto exit; - } while(more); - - exit: - if (buffer) free(buffer); - return PyErr_Occurred() ? 0 : 1; -} - -/* copy row of coordinate array from location at _p to _coor */ -#define CASE_MAP_COORDINATES(_p, _coor, _rank, _stride, _type) \ -case t ## _type: \ -{ \ - npy_intp _hh; \ - for(_hh = 0; _hh < _rank; _hh++) { \ - _coor[_hh] = *(_type*)_p; \ - _p += _stride; \ - } \ -} \ -break; - -#define CASE_INTERP_COEFF(_coeff, _pi, _idx, _type) \ -case t ## _type: \ - _coeff = *(_type*)(_pi + _idx); \ - break; - -#define CASE_INTERP_OUT(_po, _t, _type) \ -case t ## _type: \ - *(_type*)_po = (_type)_t; \ - break; - -#define CASE_INTERP_OUT_UINT(_po, _t, _type, type_min, type_max) \ -case t ## _type: \ - _t = _t > 0 ? _t + 0.5 : 0; \ - _t = _t > type_max ? type_max : t; \ - _t = _t < type_min ? type_min : t; \ - *(_type*)_po = (_type)_t; \ - break; - -#define CASE_INTERP_OUT_INT(_po, _t, _type, type_min, type_max) \ -case t ## _type: \ - _t = _t > 0 ? _t + 0.5 : _t - 0.5; \ - _t = _t > type_max ? type_max : t; \ - _t = _t < type_min ? type_min : t; \ - *(_type*)_po = (_type)_t; \ - break; - -int -NI_GeometricTransform(PyArrayObject *input, int (*map)(npy_intp*, double*, - int, int, void*), void* map_data, PyArrayObject* matrix_ar, - PyArrayObject* shift_ar, PyArrayObject *coordinates, - PyArrayObject *output, int order, int mode, double cval) -{ - char *po, *pi, *pc = NULL; - npy_intp **edge_offsets = NULL, **data_offsets = NULL, filter_size; - npy_intp ftmp[MAXDIM], *fcoordinates = NULL, *foffsets = NULL; - npy_intp cstride = 0, kk, hh, ll, jj, *idxs = NULL; - npy_intp size; - double **splvals = NULL, icoor[MAXDIM]; - npy_intp idimensions[MAXDIM], istrides[MAXDIM]; - NI_Iterator io, ic; - Float64 *matrix = matrix_ar ? (Float64*)PyArray_DATA(matrix_ar) : NULL; - Float64 *shift = shift_ar ? (Float64*)PyArray_DATA(shift_ar) : NULL; - int irank = 0, orank, qq; - - for(kk = 0; kk < input->nd; kk++) { - idimensions[kk] = input->dimensions[kk]; - istrides[kk] = input->strides[kk]; - } - irank = input->nd; - orank = output->nd; - - /* if the mapping is from array coordinates: */ - if (coordinates) { - /* initialze a line iterator along the first axis: */ - if (!NI_InitPointIterator(coordinates, &ic)) - goto exit; - cstride = ic.strides[0]; - if (!NI_LineIterator(&ic, 0)) - goto exit; - pc = (void *)(PyArray_DATA(coordinates)); - } - - /* offsets used at the borders: */ - edge_offsets = (npy_intp**)malloc(irank * sizeof(npy_intp*)); - data_offsets = (npy_intp**)malloc(irank * sizeof(npy_intp*)); - if (!edge_offsets || !data_offsets) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < irank; jj++) - data_offsets[jj] = NULL; - for(jj = 0; jj < irank; jj++) { - data_offsets[jj] = (npy_intp*)malloc((order + 1) * sizeof(npy_intp)); - if (!data_offsets[jj]) { - PyErr_NoMemory(); - goto exit; - } - } - /* will hold the spline coefficients: */ - splvals = (double**)malloc(irank * sizeof(double*)); - if (!splvals) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < irank; jj++) - splvals[jj] = NULL; - for(jj = 0; jj < irank; jj++) { - splvals[jj] = (double*)malloc((order + 1) * sizeof(double)); - if (!splvals[jj]) { - PyErr_NoMemory(); - goto exit; - } - } - - filter_size = 1; - for(jj = 0; jj < irank; jj++) - filter_size *= order + 1; - idxs = (npy_intp*)malloc(filter_size * sizeof(idxs)); - if (!idxs) { - PyErr_NoMemory(); - goto exit; - } - - /* initialize output iterator: */ - if (!NI_InitPointIterator(output, &io)) - goto exit; - - /* get data pointers: */ - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - - /* make a table of all possible coordinates within the spline filter: */ - fcoordinates = (npy_intp*)malloc(irank * filter_size * sizeof(npy_intp)); - /* make a table of all offsets within the spline filter: */ - foffsets = (npy_intp*)malloc(filter_size * sizeof(npy_intp)); - if (!fcoordinates || !foffsets) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < irank; jj++) - ftmp[jj] = 0; - kk = 0; - for(hh = 0; hh < filter_size; hh++) { - for(jj = 0; jj < irank; jj++) - fcoordinates[jj + hh * irank] = ftmp[jj]; - foffsets[hh] = kk; - for(jj = irank - 1; jj >= 0; jj--) { - if (ftmp[jj] < order) { - ftmp[jj]++; - kk += istrides[jj]; - break; - } else { - ftmp[jj] = 0; - kk -= istrides[jj] * order; - } - } - } - - size = 1; - for(qq = 0; qq < output->nd; qq++) - size *= output->dimensions[qq]; - for(kk = 0; kk < size; kk++) { - double t = 0.0; - int constant = 0, edge = 0, offset = 0; - if (map) { - /* call mappint functions: */ - if (!map(io.coordinates, icoor, orank, irank, map_data)) { - if (!PyErr_Occurred()) - PyErr_SetString(PyExc_RuntimeError, - "unknown error in mapping function"); - goto exit; - } - } else if (matrix) { - /* do an affine transformation: */ - Float64 *p = matrix; - for(hh = 0; hh < irank; hh++) { - icoor[hh] = 0.0; - for(ll = 0; ll < orank; ll++) - icoor[hh] += io.coordinates[ll] * *p++; - icoor[hh] += shift[hh]; - } - } else if (coordinates) { - /* mapping is from an coordinates array: */ - char *p = pc; - switch (NI_NormalizeType(coordinates->descr->type_num)) { - CASE_MAP_COORDINATES(p, icoor, irank, cstride, Bool); - CASE_MAP_COORDINATES(p, icoor, irank, cstride, UInt8); - CASE_MAP_COORDINATES(p, icoor, irank, cstride, UInt16); - CASE_MAP_COORDINATES(p, icoor, irank, cstride, UInt32); -#if HAS_UINT64 - CASE_MAP_COORDINATES(p, icoor, irank, cstride, UInt64); -#endif - CASE_MAP_COORDINATES(p, icoor, irank, cstride, Int8); - CASE_MAP_COORDINATES(p, icoor, irank, cstride, Int16); - CASE_MAP_COORDINATES(p, icoor, irank, cstride, Int32); - CASE_MAP_COORDINATES(p, icoor, irank, cstride, Int64); - CASE_MAP_COORDINATES(p, icoor, irank, cstride, Float32); - CASE_MAP_COORDINATES(p, icoor, irank, cstride, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, - "coordinate array data type not supported"); - goto exit; - } - } - /* iterate over axes: */ - for(hh = 0; hh < irank; hh++) { - /* if the input coordinate is outside the borders, map it: */ - double cc = map_coordinate(icoor[hh], idimensions[hh], mode); - if (cc > -1.0) { - /* find the filter location along this axis: */ - int start; - if (order & 1) { - start = (int)floor(cc) - order / 2; - } else { - start = (int)floor(cc + 0.5) - order / 2; - } - /* get the offset to the start of the filter: */ - offset += istrides[hh] * start; - if (start < 0 || start + order >= idimensions[hh]) { - /* implement border mapping, if outside border: */ - edge = 1; - edge_offsets[hh] = data_offsets[hh]; - for(ll = 0; ll <= order; ll++) { - int idx = start + ll; - int len = idimensions[hh]; - if (len <= 1) { - idx = 0; - } else { - int s2 = 2 * len - 2; - if (idx < 0) { - idx = s2 * (int)(-idx / s2) + idx; - idx = idx <= 1 - len ? idx + s2 : -idx; - } else if (idx >= len) { - idx -= s2 * (int)(idx / s2); - if (idx >= len) - idx = s2 - idx; - } - } - /* calculate and store the offests at this edge: */ - edge_offsets[hh][ll] = istrides[hh] * (idx - start); - } - } else { - /* we are not at the border, use precalculated offsets: */ - edge_offsets[hh] = NULL; - } - spline_coefficients(cc, order, splvals[hh]); - } else { - /* we use the constant border condition: */ - constant = 1; - break; - } - } - - if (!constant) { - npy_intp *ff = fcoordinates; - for(hh = 0; hh < filter_size; hh++) { - int idx = 0; - if (edge) { - for(ll = 0; ll < irank; ll++) { - if (edge_offsets[ll]) - idx += edge_offsets[ll][ff[ll]]; - else - idx += ff[ll] * istrides[ll]; - } - } else { - idx = foffsets[hh]; - } - idx += offset; - idxs[hh] = idx; - ff += irank; - } - } - if (!constant) { - npy_intp *ff = fcoordinates; - t = 0.0; - for(hh = 0; hh < filter_size; hh++) { - double coeff = 0.0; - switch (NI_NormalizeType(input->descr->type_num)) { - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Bool); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], UInt8); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], UInt16); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], UInt32); -#if HAS_UINT64 - CASE_INTERP_COEFF(coeff, pi, idxs[hh], UInt64); -#endif - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Int8); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Int16); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Int32); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Int64); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Float32); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Float64); - default: - PyErr_SetString(PyExc_RuntimeError, - "data type not supported"); - goto exit; - } - /* calculate the interpolated value: */ - for(ll = 0; ll < irank; ll++) - if (order > 0) - coeff *= splvals[ll][ff[ll]]; - t += coeff; - ff += irank; - } - } else { - t = cval; - } - /* store output value: */ - switch (NI_NormalizeType(output->descr->type_num)) { - CASE_INTERP_OUT(po, t, Bool); - CASE_INTERP_OUT_UINT(po, t, UInt8, 0, MAX_UINT8); - CASE_INTERP_OUT_UINT(po, t, UInt16, 0, MAX_UINT16); - CASE_INTERP_OUT_UINT(po, t, UInt32, 0, MAX_UINT32); -#if HAS_UINT64 - /* There was a bug in numpy as of (at least) <= 1.6.1 such that - * MAX_UINT64 was incorrectly defined, leading to a compiler error. - * NPY_MAX_UINT64 is correctly defined - */ - CASE_INTERP_OUT_UINT(po, t, UInt64, 0, NPY_MAX_UINT64); -#endif - CASE_INTERP_OUT_INT(po, t, Int8, MIN_INT8, MAX_INT8); - CASE_INTERP_OUT_INT(po, t, Int16, MIN_INT16, MAX_INT16); - CASE_INTERP_OUT_INT(po, t, Int32, MIN_INT32, MAX_INT32); - CASE_INTERP_OUT_INT(po, t, Int64, MIN_INT64, MAX_INT64); - CASE_INTERP_OUT(po, t, Float32); - CASE_INTERP_OUT(po, t, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - if (coordinates) { - NI_ITERATOR_NEXT2(io, ic, po, pc); - } else { - NI_ITERATOR_NEXT(io, po); - } - } - - exit: - if (edge_offsets) - free(edge_offsets); - if (data_offsets) { - for(jj = 0; jj < irank; jj++) - free(data_offsets[jj]); - free(data_offsets); - } - if (splvals) { - for(jj = 0; jj < irank; jj++) - free(splvals[jj]); - free(splvals); - } - if (foffsets) - free(foffsets); - if (fcoordinates) - free(fcoordinates); - if (idxs) - free(idxs); - return PyErr_Occurred() ? 0 : 1; -} - -int NI_ZoomShift(PyArrayObject *input, PyArrayObject* zoom_ar, - PyArrayObject* shift_ar, PyArrayObject *output, - int order, int mode, double cval) -{ - char *po, *pi; - npy_intp **zeros = NULL, **offsets = NULL, ***edge_offsets = NULL; - npy_intp ftmp[MAXDIM], *fcoordinates = NULL, *foffsets = NULL; - npy_intp jj, hh, kk, filter_size, odimensions[MAXDIM]; - npy_intp idimensions[MAXDIM], istrides[MAXDIM], *idxs = NULL; - npy_intp size; - double ***splvals = NULL; - NI_Iterator io; - Float64 *zooms = zoom_ar ? (Float64*)PyArray_DATA(zoom_ar) : NULL; - Float64 *shifts = shift_ar ? (Float64*)PyArray_DATA(shift_ar) : NULL; - int rank = 0, qq; - - for(kk = 0; kk < input->nd; kk++) { - idimensions[kk] = input->dimensions[kk]; - istrides[kk] = input->strides[kk]; - odimensions[kk] = output->dimensions[kk]; - } - rank = input->nd; - - /* if the mode is 'constant' we need some temps later: */ - if (mode == NI_EXTEND_CONSTANT) { - zeros = (npy_intp**)malloc(rank * sizeof(npy_intp*)); - if (!zeros) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < rank; jj++) - zeros[jj] = NULL; - for(jj = 0; jj < rank; jj++) { - zeros[jj] = (npy_intp*)malloc(odimensions[jj] * sizeof(npy_intp)); - if(!zeros[jj]) { - PyErr_NoMemory(); - goto exit; - } - } - } - - /* store offsets, along each axis: */ - offsets = (npy_intp**)malloc(rank * sizeof(npy_intp*)); - /* store spline coefficients, along each axis: */ - splvals = (double***)malloc(rank * sizeof(double**)); - /* store offsets at all edges: */ - edge_offsets = (npy_intp***)malloc(rank * sizeof(npy_intp**)); - if (!offsets || !splvals || !edge_offsets) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < rank; jj++) { - offsets[jj] = NULL; - splvals[jj] = NULL; - edge_offsets[jj] = NULL; - } - for(jj = 0; jj < rank; jj++) { - offsets[jj] = (npy_intp*)malloc(odimensions[jj] * sizeof(npy_intp)); - splvals[jj] = (double**)malloc(odimensions[jj] * sizeof(double*)); - edge_offsets[jj] = (npy_intp**)malloc(odimensions[jj] * sizeof(npy_intp*)); - if (!offsets[jj] || !splvals[jj] || !edge_offsets[jj]) { - PyErr_NoMemory(); - goto exit; - } - for(hh = 0; hh < odimensions[jj]; hh++) { - splvals[jj][hh] = NULL; - edge_offsets[jj][hh] = NULL; - } - } - - /* precalculate offsets, and offsets at the edge: */ - for(jj = 0; jj < rank; jj++) { - double shift = 0.0, zoom = 0.0; - if (shifts) - shift = shifts[jj]; - if (zooms) - zoom = zooms[jj]; - for(kk = 0; kk < odimensions[jj]; kk++) { - double cc = (double)kk; - if (shifts) - cc += shift; - if (zooms) - cc *= zoom; - cc = map_coordinate(cc, idimensions[jj], mode); - if (cc > -1.0) { - int start; - if (zeros && zeros[jj]) - zeros[jj][kk] = 0; - if (order & 1) { - start = (int)floor(cc) - order / 2; - } else { - start = (int)floor(cc + 0.5) - order / 2; - } - offsets[jj][kk] = istrides[jj] * start; - if (start < 0 || start + order >= idimensions[jj]) { - edge_offsets[jj][kk] = (npy_intp*)malloc((order + 1) * sizeof(npy_intp)); - if (!edge_offsets[jj][kk]) { - PyErr_NoMemory(); - goto exit; - } - for(hh = 0; hh <= order; hh++) { - int idx = start + hh; - int len = idimensions[jj]; - if (len <= 1) { - idx = 0; - } else { - int s2 = 2 * len - 2; - if (idx < 0) { - idx = s2 * (int)(-idx / s2) + idx; - idx = idx <= 1 - len ? idx + s2 : -idx; - } else if (idx >= len) { - idx -= s2 * (int)(idx / s2); - if (idx >= len) - idx = s2 - idx; - } - } - edge_offsets[jj][kk][hh] = istrides[jj] * (idx - start); - } - } - if (order > 0) { - splvals[jj][kk] = (double*)malloc((order + 1) * sizeof(double)); - if (!splvals[jj][kk]) { - PyErr_NoMemory(); - goto exit; - } - spline_coefficients(cc, order, splvals[jj][kk]); - } - } else { - zeros[jj][kk] = 1; - } - } - } - - filter_size = 1; - for(jj = 0; jj < rank; jj++) - filter_size *= order + 1; - idxs = (npy_intp*)malloc(filter_size * sizeof(idxs)); - if (!idxs) { - PyErr_NoMemory(); - goto exit; - } - - if (!NI_InitPointIterator(output, &io)) - goto exit; - - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - - /* store all coordinates and offsets with filter: */ - fcoordinates = (npy_intp*)malloc(rank * filter_size * sizeof(npy_intp)); - foffsets = (npy_intp*)malloc(filter_size * sizeof(npy_intp)); - if (!fcoordinates || !foffsets) { - PyErr_NoMemory(); - goto exit; - } - - for(jj = 0; jj < rank; jj++) - ftmp[jj] = 0; - kk = 0; - for(hh = 0; hh < filter_size; hh++) { - for(jj = 0; jj < rank; jj++) - fcoordinates[jj + hh * rank] = ftmp[jj]; - foffsets[hh] = kk; - for(jj = rank - 1; jj >= 0; jj--) { - if (ftmp[jj] < order) { - ftmp[jj]++; - kk += istrides[jj]; - break; - } else { - ftmp[jj] = 0; - kk -= istrides[jj] * order; - } - } - } - size = 1; - for(qq = 0; qq < output->nd; qq++) - size *= output->dimensions[qq]; - for(kk = 0; kk < size; kk++) { - double t = 0.0; - int edge = 0, oo = 0, zero = 0; - - for(hh = 0; hh < rank; hh++) { - if (zeros && zeros[hh][io.coordinates[hh]]) { - /* we use constant border condition */ - zero = 1; - break; - } - oo += offsets[hh][io.coordinates[hh]]; - if (edge_offsets[hh][io.coordinates[hh]]) - edge = 1; - } - - if (!zero) { - npy_intp *ff = fcoordinates; - for(hh = 0; hh < filter_size; hh++) { - int idx = 0; - if (edge) { - /* use precalculated edge offsets: */ - for(jj = 0; jj < rank; jj++) { - if (edge_offsets[jj][io.coordinates[jj]]) - idx += edge_offsets[jj][io.coordinates[jj]][ff[jj]]; - else - idx += ff[jj] * istrides[jj]; - } - idx += oo; - } else { - /* use normal offsets: */ - idx += oo + foffsets[hh]; - } - idxs[hh] = idx; - ff += rank; - } - } - if (!zero) { - npy_intp *ff = fcoordinates; - t = 0.0; - for(hh = 0; hh < filter_size; hh++) { - double coeff = 0.0; - switch (NI_NormalizeType(input->descr->type_num)) { - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Bool); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], UInt8); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], UInt16); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], UInt32); -#if HAS_UINT64 - CASE_INTERP_COEFF(coeff, pi, idxs[hh], UInt64); -#endif - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Int8); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Int16); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Int32); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Int64); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Float32); - CASE_INTERP_COEFF(coeff, pi, idxs[hh], Float64); - default: - PyErr_SetString(PyExc_RuntimeError, - "data type not supported"); - goto exit; - } - /* calculate interpolated value: */ - for(jj = 0; jj < rank; jj++) - if (order > 0) - coeff *= splvals[jj][io.coordinates[jj]][ff[jj]]; - t += coeff; - ff += rank; - } - } else { - t = cval; - } - /* store output: */ - switch (NI_NormalizeType(output->descr->type_num)) { - CASE_INTERP_OUT(po, t, Bool); - CASE_INTERP_OUT_UINT(po, t, UInt8, 0, MAX_UINT8); - CASE_INTERP_OUT_UINT(po, t, UInt16, 0, MAX_UINT16); - CASE_INTERP_OUT_UINT(po, t, UInt32, 0, MAX_UINT32); -#if HAS_UINT64 - /* There was a bug in numpy as of (at least) <= 1.6.1 such that - * MAX_UINT64 was incorrectly defined, leading to a compiler error. - * NPY_MAX_UINT64 is correctly defined - */ - CASE_INTERP_OUT_UINT(po, t, UInt64, 0, NPY_MAX_UINT64); -#endif - CASE_INTERP_OUT_INT(po, t, Int8, MIN_INT8, MAX_INT8); - CASE_INTERP_OUT_INT(po, t, Int16, MIN_INT16, MAX_INT16); - CASE_INTERP_OUT_INT(po, t, Int32, MIN_INT32, MAX_INT32); - CASE_INTERP_OUT_INT(po, t, Int64, MIN_INT64, MAX_INT64); - CASE_INTERP_OUT(po, t, Float32); - CASE_INTERP_OUT(po, t, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - NI_ITERATOR_NEXT(io, po); - } - - exit: - if (zeros) { - for(jj = 0; jj < rank; jj++) - if (zeros[jj]) - free(zeros[jj]); - free(zeros); - } - if (offsets) { - for(jj = 0; jj < rank; jj++) - if (offsets[jj]) - free(offsets[jj]); - free(offsets); - } - if (splvals) { - for(jj = 0; jj < rank; jj++) { - if (splvals[jj]) { - for(hh = 0; hh < odimensions[jj]; hh++) - if (splvals[jj][hh]) - free(splvals[jj][hh]); - free(splvals[jj]); - } - } - free(splvals); - } - if (edge_offsets) { - for(jj = 0; jj < rank; jj++) { - if (edge_offsets[jj]) { - for(hh = 0; hh < odimensions[jj]; hh++) - if (edge_offsets[jj][hh]) - free(edge_offsets[jj][hh]); - free(edge_offsets[jj]); - } - } - free(edge_offsets); - } - if (foffsets) - free(foffsets); - if (fcoordinates) - free(fcoordinates); - if (idxs) - free(idxs); - return PyErr_Occurred() ? 0 : 1; -} diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_interpolation.h b/scipy-0.10.1/scipy/ndimage/src/ni_interpolation.h deleted file mode 100644 index a663f3edd5..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_interpolation.h +++ /dev/null @@ -1,43 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef NI_INTERPOLATION_H -#define NI_INTERPOLATION_H - -int NI_SplineFilter1D(PyArrayObject*, int, int, PyArrayObject*); -int NI_GeometricTransform(PyArrayObject*, int (*)(npy_intp*, double*, int, int, - void*), void*, PyArrayObject*, PyArrayObject*, - PyArrayObject*, PyArrayObject*, int, int, - double); -int NI_ZoomShift(PyArrayObject*, PyArrayObject*, PyArrayObject*, - PyArrayObject*, int, int, double); - -#endif diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_measure.c b/scipy-0.10.1/scipy/ndimage/src/ni_measure.c deleted file mode 100644 index 1aa27d9eb6..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_measure.c +++ /dev/null @@ -1,1201 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "ni_support.h" -#include "ni_measure.h" -#include -#include -#include -#include - -typedef struct { - Int32 index1, index2; - void* next; -} _index_pair; - -#define CASE_LABEL(_p, _pi, _type) \ -case t ## _type: \ - *_p = *(_type*)_pi ? -1 : 0; \ - break - -int NI_Label(PyArrayObject* input, PyArrayObject* strct, - npy_intp *max_label, PyArrayObject* output) -{ - int kk; - npy_intp jj, ll, ssize, size, filter_size, *offsets = NULL; - npy_intp mask_value, *oo; - Bool *ps, *footprint = NULL; - char *pi, *po; - Int32 index = 0, *index_map = NULL; - NI_FilterIterator fi; - NI_Iterator ii, io; - _index_pair *pairs = NULL; - - /* structure size */ - ssize = 1; - for(kk = 0; kk < strct->nd; kk++) - ssize *= strct->dimensions[kk]; - /* we only use the first half of the structure data, so we make a - temporary structure for use with the filter functions: */ - footprint = (Bool*)malloc(ssize * sizeof(Bool)); - if (!footprint) { - PyErr_NoMemory(); - goto exit; - } - ps = (Bool*)PyArray_DATA(strct); - filter_size = 0; - for(jj = 0; jj < ssize / 2; jj++) { - footprint[jj] = ps[jj]; - if (ps[jj]) - ++filter_size; - } - for(jj = ssize / 2; jj < ssize; jj++) - footprint[jj] = 0; - /* get data and size */ - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - size = 1; - for(kk = 0; kk < output->nd; kk++) - size *= output->dimensions[kk]; - if (!NI_InitPointIterator(input, &ii)) - goto exit; - if (!NI_InitPointIterator(output, &io)) - goto exit; - /* set all elements in the output corresponding to non-zero elements - in input to -1: */ - for(jj = 0; jj < size; jj++) { - Int32 *p = (Int32*)po; - switch (input->descr->type_num) { - CASE_LABEL(p, pi, Bool); - CASE_LABEL(p, pi, UInt8); - CASE_LABEL(p, pi, UInt16); - CASE_LABEL(p, pi, UInt32); -#if HAS_UINT64 - CASE_LABEL(p, pi, UInt64); -#endif - CASE_LABEL(p, pi, Int8); - CASE_LABEL(p, pi, Int16); - CASE_LABEL(p, pi, Int32); - CASE_LABEL(p, pi, Int64); - CASE_LABEL(p, pi, Float32); - CASE_LABEL(p, pi, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - NI_ITERATOR_NEXT2(ii, io, pi, po); - } - - /* calculate the filter offsets: */ - if (!NI_InitFilterOffsets(output, footprint, strct->dimensions, NULL, - NI_EXTEND_CONSTANT, &offsets, &mask_value, NULL)) - goto exit; - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(input->nd, strct->dimensions, filter_size, - input->dimensions, NULL, &fi)) - goto exit; - /* reset output iterator: */ - NI_ITERATOR_RESET(io); - po = (void *)PyArray_DATA(output); - /* iterator over the elements: */ - oo = offsets; - for(jj = 0; jj < size; jj++) { - if (*(Int32*)po < 0) { - Int32 neighbor = 0; - /* iterate over structuring element: */ - for(ll = 0; ll < filter_size; ll++) { - npy_intp offset = oo[ll]; - if (offset != mask_value) { - Int32 tt = *(Int32*)(po + offset); - if (tt > 0) { - /* this element is next to an already found object: */ - if (neighbor && neighbor != tt) { - /* we have two objects that must be merged later: */ - _index_pair* tp = (_index_pair*)malloc(sizeof(_index_pair)); - if (!tp) { - PyErr_NoMemory(); - goto exit; - } - tp->next = pairs; - /* the pairs must be ordered: */ - if (neighbor < tt) { - tp->index1 = neighbor; - tp->index2 = tt; - } else { - tp->index1 = tt; - tp->index2 = neighbor; - } - pairs = tp; - } else { - neighbor = tt; - } - } - } - } - if (neighbor) { - /* this point belongs to an existing object */ - *(Int32*)po = neighbor; - } else { - /* this may be a new object: */ - *(Int32*)po = ++index; - } - } - NI_FILTER_NEXT(fi, io, oo, po); - } - *max_label = index; - /* merge any touching objects: */ - if (pairs) { - Int32 counter; - index_map = (Int32*)malloc(index * sizeof(Int32)); - if (!index_map) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < index; jj++) - index_map[jj] = (Int32)jj; - while (pairs) { - Int32 idx1 = pairs->index1 - 1; - Int32 idx2 = pairs->index2 - 1; - if (index_map[idx2] == idx1 || index_map[idx2] == idx2) { - /* if this pair was already processed, or if idx2 was not - mapped yet, we delete this pair and map idx2 to idx1: */ - _index_pair *tp = pairs; - pairs = tp->next; - free(tp); - index_map[idx2] = idx1; - } else { - /* idx2 was already mapped, therefore we find what it was - mapped to and change the current pair to the result of - that and idx1. Since the pair is not destroyed, it will be - re-processed with the adapted values. */ - idx2 = index_map[idx2]; - /* keep the pairs ordered: */ - if (idx1 < idx2) { - pairs->index1 = idx1 + 1; - pairs->index2 = idx2 + 1; - } else { - pairs->index1 = idx2 + 1; - pairs->index2 = idx1 + 1; - } - } - } - for(jj = 0; jj < index; jj++) { - /* if the current index maps to a index that is also mapped, - change it to map to that index. Since an index always maps to - a lower index or to itself, this will make sure that at the - end all indices map to an unmapped index. */ - if (index_map[index_map[jj]] < index_map[jj]) - index_map[jj] = index_map[index_map[jj]]; - } - /* renumber the indices that are not mapped: */ - counter = 0; - for(jj = 0; jj < index; jj++) - if (index_map[jj] == jj) - index_map[jj] = ++counter; - else - index_map[jj] = index_map[index_map[jj]]; - } - - /* relabel the output if we merged some objects: */ - if (index_map) { - *max_label = 0; - NI_ITERATOR_RESET(io); - po = (void *)PyArray_DATA(output); - for(jj = 0; jj < size; jj++) { - Int32 p = *(Int32*)po; - if (p > 0 ) - *(Int32*)po = index_map[p - 1]; - if (*(Int32*)po > *max_label) - *max_label = *(Int32*)po; - NI_ITERATOR_NEXT(io, po); - } - } - exit: - if (offsets) free(offsets); - if (index_map) free(index_map); - while (pairs) { - _index_pair *tp = pairs; - pairs = (_index_pair*)pairs->next; - free(tp); - } - if (footprint) - free(footprint); - return PyErr_Occurred() ? 0 : 1; -} - -#define CASE_FIND_OBJECT_POINT(_pi, _regions, _rank, _dimensions, \ - _max_label, _ii, _type) \ -case t ## _type: \ -{ \ - int _kk; \ - npy_intp _sindex = *(_type*)_pi - 1; \ - if (_sindex >= 0 && _sindex < _max_label) { \ - if (_rank > 0) { \ - _sindex *= 2 * _rank; \ - if (_regions[_sindex] < 0) { \ - for(_kk = 0; _kk < _rank; _kk++) { \ - npy_intp _cc = _ii.coordinates[_kk]; \ - _regions[_sindex + _kk] = _cc; \ - _regions[_sindex + _kk + _rank] = _cc + 1; \ - } \ - } else { \ - for(_kk = 0; _kk < _rank; _kk++) { \ - npy_intp _cc = _ii.coordinates[_kk]; \ - if (_cc < _regions[_sindex + _kk]) \ - _regions[_sindex + _kk] = _cc; \ - if (_cc + 1 > _regions[_sindex + _kk + _rank]) \ - _regions[_sindex + _kk + _rank] = _cc + 1; \ - } \ - } \ - } else { \ - _regions[_sindex] = 1; \ - } \ - } \ -} \ -break - -int NI_FindObjects(PyArrayObject* input, npy_intp max_label, - npy_intp* regions) -{ - int kk; - npy_intp size, jj; - NI_Iterator ii; - char *pi; - - /* get input data, size and iterator: */ - pi = (void *)PyArray_DATA(input); - size = 1; - for(kk = 0; kk < input->nd; kk++) - size *= input->dimensions[kk]; - if (!NI_InitPointIterator(input, &ii)) - goto exit; - if (input->nd > 0) { - for(jj = 0; jj < 2 * input->nd * max_label; jj++) - regions[jj] = -1; - } else { - for(jj = 0; jj < max_label; jj++) - regions[jj] = -1; - } - /* iterate over all points: */ - for(jj = 0 ; jj < size; jj++) { - switch (input->descr->type_num) { - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, Bool); - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, UInt8); - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, UInt16); - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, UInt32); -#if HAS_UINT64 - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, UInt64); -#endif - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, Int8); - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, Int16); - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, Int32); - CASE_FIND_OBJECT_POINT(pi, regions, input->nd, input->dimensions, - max_label, ii, Int64); - break; - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - NI_ITERATOR_NEXT(ii, pi); - } - exit: - return PyErr_Occurred() ? 0 : 1; -} - - -/* macro to get input value: */ -#if HAS_UINT64 -#define NI_GET_VALUE(_pi, _v, _type) \ -{ \ - switch(_type) { \ - case tBool: \ - _v = (*(Bool*)_pi) != 0; \ - break; \ - case tUInt8: \ - _v = *(UInt8*)_pi; \ - break; \ - case tUInt16: \ - _v = *(UInt16*)_pi; \ - break; \ - case tUInt32: \ - _v = *(UInt32*)_pi; \ - break; \ - case tInt8: \ - _v = *(Int8*)_pi; \ - break; \ - case tInt16: \ - _v = *(Int16*)_pi; \ - break; \ - case tInt32: \ - _v = *(Int32*)_pi; \ - break; \ - case tInt64: \ - _v = *(Int64*)_pi; \ - break; \ - case tUInt64: \ - _v = *(UInt64*)_pi; \ - break; \ - case tFloat32: \ - _v = *(Float32*)_pi; \ - break; \ - case tFloat64: \ - _v = *(Float64*)_pi; \ - break; \ - default: \ - PyErr_SetString(PyExc_RuntimeError, \ - "data type not supported"); \ - return 0; \ - } \ -} -#else -#define NI_GET_VALUE(_pi, _v, _type) \ -{ \ - switch(_type) { \ - case tBool: \ - _v = (*(Bool*)_pi) != 0; \ - break; \ - case tUInt8: \ - _v = *(UInt8*)_pi; \ - break; \ - case tUInt16: \ - _v = *(UInt16*)_pi; \ - break; \ - case tUInt32: \ - _v = *(UInt32*)_pi; \ - break; \ - case tInt8: \ - _v = *(Int8*)_pi; \ - break; \ - case tInt16: \ - _v = *(Int16*)_pi; \ - break; \ - case tInt32: \ - _v = *(Int32*)_pi; \ - break; \ - case tInt64: \ - _v = *(Int64*)_pi; \ - break; \ - case tFloat32: \ - _v = *(Float32*)_pi; \ - break; \ - case tFloat64: \ - _v = *(Float64*)_pi; \ - break; \ - default: \ - PyErr_SetString(PyExc_RuntimeError, \ - "data type not supported"); \ - return 0; \ - } \ -} -#endif - -/* macro to get label value: */ -#if HAS_UINT64 -#define NI_GET_LABEL(_pm, _label, _type) \ -{ \ - if (_pm) { \ - switch(_type) { \ - case tBool: \ - _label = *(Bool*)_pm; \ - break; \ - case tUInt8: \ - _label = *(UInt8*)_pm; \ - break; \ - case tUInt16: \ - _label = *(UInt16*)_pm; \ - break; \ - case tUInt32: \ - _label = *(UInt32*)_pm; \ - break; \ - case tUInt64: \ - _label = *(UInt64*)_pm; \ - break; \ - case tInt8: \ - _label = *(Int8*)_pm; \ - break; \ - case tInt16: \ - _label = *(Int16*)_pm; \ - break; \ - case tInt32: \ - _label = *(Int32*)_pm; \ - break; \ - case tInt64: \ - _label = *(Int64*)_pm; \ - break; \ - case tFloat32: \ - _label = *(Float32*)_pm; \ - break; \ - case tFloat64: \ - _label = *(Float64*)_pm; \ - break; \ - default: \ - PyErr_SetString(PyExc_RuntimeError, \ - "data type not supported"); \ - return 0; \ - } \ - } \ -} -#else -#define NI_GET_LABEL(_pm, _label, _type) \ -{ \ - if (_pm) { \ - switch(_type) { \ - case tBool: \ - _label = *(Bool*)_pm; \ - break; \ - case tUInt8: \ - _label = *(UInt8*)_pm; \ - break; \ - case tUInt16: \ - _label = *(UInt16*)_pm; \ - break; \ - case tUInt32: \ - _label = *(UInt32*)_pm; \ - break; \ - case tInt8: \ - _label = *(Int8*)_pm; \ - break; \ - case tInt16: \ - _label = *(Int16*)_pm; \ - break; \ - case tInt32: \ - _label = *(Int32*)_pm; \ - break; \ - case tInt64: \ - _label = *(Int64*)_pm; \ - break; \ - case tFloat32: \ - _label = *(Float32*)_pm; \ - break; \ - case tFloat64: \ - _label = *(Float64*)_pm; \ - break; \ - default: \ - PyErr_SetString(PyExc_RuntimeError, \ - "data type not supported"); \ - return 0; \ - } \ - } \ -} -#endif - -int NI_Statistics(PyArrayObject *input, PyArrayObject *labels, - npy_intp min_label, npy_intp max_label, npy_intp *indices, - npy_intp n_results, double *sum, npy_intp *total, double *variance, - double *minimum, double *maximum, npy_intp* min_pos, npy_intp* max_pos) -{ - char *pi = NULL, *pm = NULL; - NI_Iterator ii, mi; - npy_intp jj, size, idx = 0, label = 1, doit = 1; - int qq; - - /* input iterator: */ - if (!NI_InitPointIterator(input, &ii)) - return 0; - /* input data: */ - pi = (void *)PyArray_DATA(input); - if (labels) { - if (!NI_InitPointIterator(labels, &mi)) - return 0; - pm = (void *)PyArray_DATA(labels); - } - /* input size: */ - size = 1; - for(qq = 0; qq < input->nd; qq++) - size *= input->dimensions[qq]; - for(jj = 0; jj < n_results; jj++) { - if (sum) - sum[jj] = 0.0; - if (total) - total[jj] = 0; - if (variance) - variance[jj] = 0; - if (minimum) - minimum[jj] = DBL_MAX; - if (maximum) - maximum[jj] = -DBL_MAX; - if (min_pos) - min_pos[jj] = 0; - if (max_pos) - max_pos[jj] = 0; - } - /* iterate over array: */ - for(jj = 0; jj < size; jj++) { - NI_GET_LABEL(pm, label, labels->descr->type_num); - if (min_label >= 0) { - if (label >= min_label && label <= max_label) { - idx = indices[label - min_label]; - doit = idx >= 0; - } else { - doit = 0; - } - } else { - doit = label != 0; - } - if (doit) { - double val; - NI_GET_VALUE(pi, val, input->descr->type_num); - if (sum) - sum[idx] += val; - if (total) - total[idx]++; - if (minimum && val < minimum[idx]) { - minimum[idx] = val; - if (min_pos) - min_pos[idx] = jj; - } - if (maximum && (val > maximum[idx])) { - maximum[idx] = val; - if (max_pos) - max_pos[idx] = jj; - } - } - if (labels) { - NI_ITERATOR_NEXT2(ii, mi, pi, pm); - } else { - NI_ITERATOR_NEXT(ii, pi); - } - } - if (minimum) { - for(jj = 0; jj < n_results; jj++) { - if (!(minimum[jj] < DBL_MAX)) - minimum[jj] = 0.0; - } - } - if (maximum) { - for(jj = 0; jj < n_results; jj++) { - if (!(maximum[jj] > -DBL_MAX)) - maximum[jj] = 0.0; - } - } - if (variance) { - int do_var = 0; - for(jj = 0; jj < n_results; jj++) - if (total[jj] > 1) { - do_var = 1; - break; - } - if (do_var) { - /* reset input iterator: */ - NI_ITERATOR_RESET(ii); - pi = (void *)PyArray_DATA(input); - if (labels) { - /* reset label iterator: */ - NI_ITERATOR_RESET(mi); - pm = (void *)PyArray_DATA(labels); - } - for(jj = 0; jj < size; jj++) { - NI_GET_LABEL(pm, label, labels->descr->type_num); - if (min_label >= 0) { - if (label >= min_label && label <= max_label) { - idx = indices[label - min_label]; - doit = idx >= 0; - } else { - doit = 0; - } - } else { - doit = label != 0; - } - if (doit) { - double val; - NI_GET_VALUE(pi, val, input->descr->type_num); - val = val - sum[idx] / total[idx]; - variance[idx] += val * val; - } - if (labels) { - NI_ITERATOR_NEXT2(ii, mi, pi, pm); - } else { - NI_ITERATOR_NEXT(ii, pi); - } - } - for(jj = 0; jj < n_results; jj++) - variance[jj] = (total[jj] > 1 ? - variance[jj] / (total[jj] - 1) : 0.0); - } - } - return 1; -} - - -int NI_CenterOfMass(PyArrayObject *input, PyArrayObject *labels, - npy_intp min_label, npy_intp max_label, npy_intp *indices, - npy_intp n_results, double *center_of_mass) -{ - char *pi = NULL, *pm = NULL; - NI_Iterator ii, mi; - npy_intp jj, kk, size, idx = 0, label = 1, doit = 1; - double *sum = NULL; - int qq; - - /* input iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* input data: */ - pi = (void *)PyArray_DATA(input); - if (labels) { - if (!NI_InitPointIterator(labels, &mi)) - goto exit; - pm = (void *)PyArray_DATA(labels); - } - /* input size: */ - size = 1; - for(qq = 0; qq < input->nd; qq++) - size *= input->dimensions[qq]; - sum = (double*)malloc(n_results * sizeof(double)); - if (!sum) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < n_results; jj++) { - sum[jj] = 0.0; - for(kk = 0; kk < input->nd; kk++) - center_of_mass[jj * input->nd + kk] = 0.0; - } - /* iterate over array: */ - for(jj = 0; jj < size; jj++) { - NI_GET_LABEL(pm, label, labels->descr->type_num); - if (min_label >= 0) { - if (label >= min_label && label <= max_label) { - idx = indices[label - min_label]; - doit = idx >= 0; - } else { - doit = 0; - } - } else { - doit = label != 0; - } - if (doit) { - double val; - NI_GET_VALUE(pi, val, input->descr->type_num); - sum[idx] += val; - for(kk = 0; kk < input->nd; kk++) - center_of_mass[idx * input->nd + kk] += val * ii.coordinates[kk]; - } - if (labels) { - NI_ITERATOR_NEXT2(ii, mi, pi, pm); - } else { - NI_ITERATOR_NEXT(ii, pi); - } - } - for(jj = 0; jj < n_results; jj++) - for(kk = 0; kk < input->nd; kk++) - center_of_mass[jj * input->nd + kk] /= sum[jj]; - exit: - if (sum) - free(sum); - return PyErr_Occurred() == NULL; -} - - -int NI_Histogram(PyArrayObject *input, PyArrayObject *labels, - npy_intp min_label, npy_intp max_label, npy_intp *indices, - npy_intp n_results, PyArrayObject **histograms, - double min, double max, npy_intp nbins) -{ - char *pi = NULL, *pm = NULL; - NI_Iterator ii, mi; - npy_intp jj, kk, size, idx = 0, label = 1, doit = 1; - Int32 **ph = NULL; - double bsize; - int qq; - - /* input iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* input data: */ - pi = (void *)PyArray_DATA(input); - if (labels) { - if (!NI_InitPointIterator(labels, &mi)) - goto exit; - pm = (void *)PyArray_DATA(labels); - } - ph = (Int32**)malloc(n_results * sizeof(Int32*)); - if (!ph) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < n_results; jj++) { - ph[jj] = (Int32*)PyArray_DATA(histograms[jj]); - for(kk = 0; kk < nbins; kk++) - ph[jj][kk] = 0; - } - bsize = (max - min) / (double)nbins; - /* input size: */ - size = 1; - for(qq = 0; qq < input->nd; qq++) - size *= input->dimensions[qq]; - /* iterate over array: */ - for(jj = 0; jj < size; jj++) { - NI_GET_LABEL(pm, label, labels->descr->type_num); - if (min_label >= 0) { - if (label >= min_label && label <= max_label) { - idx = indices[label - min_label]; - doit = idx >= 0; - } else { - doit = 0; - } - } else { - doit = label != 0; - } - if (doit) { - int bin; - double val; - NI_GET_VALUE(pi, val, input->descr->type_num); - if (val >= min && val < max) { - bin = (int)((val - min) / bsize); - ++(ph[idx][bin]); - } - } - if (labels) { - NI_ITERATOR_NEXT2(ii, mi, pi, pm); - } else { - NI_ITERATOR_NEXT(ii, pi); - } - } - exit: - if (ph) - free(ph); - return PyErr_Occurred() == NULL; -} - -#define WS_GET_INDEX(_index, _c_strides, _b_strides, _rank, _out, \ - _contiguous, _type) \ -do { \ - if (_contiguous) { \ - _out = _index * sizeof(_type); \ - } else { \ - int _qq; \ - npy_intp _cc, _idx = _index; \ - _out = 0; \ - for (_qq = 0; _qq < _rank; _qq++) { \ - _cc = _idx / _c_strides[_qq]; \ - _idx -= _cc * _c_strides[_qq]; \ - _out += _b_strides[_qq] * _cc; \ - } \ - } \ -} while(0) - -#define CASE_GET_INPUT(_ival, _pi, _type) \ -case t ## _type: \ - _ival = *((_type*)_pi); \ - break - -#define CASE_GET_LABEL(_label, _pm, _type) \ -case t ## _type: \ - _label = *(_type*)_pm; \ - break - -#define CASE_PUT_LABEL(_label, _pl, _type) \ -case t ## _type: \ - *((_type*)_pl) = _label; \ - break - -#define CASE_WINDEX1(_v_index, _p_index, _strides, _istrides, _irank, \ - _icont, _p_idx, _v_idx, _pi, _vval, _pval, _type) \ -case t ## _type: \ - WS_GET_INDEX(_v_index, _strides, _istrides, _irank, _p_idx, _icont, \ - _type); \ - WS_GET_INDEX(_p_index, _strides, _istrides, _irank, _v_idx, _icont, \ - _type); \ - _vval = *(_type*)(_pi + _v_idx); \ - _pval = *(_type*)(_pi + _p_idx); \ - break - -#define CASE_WINDEX2(_v_index, _strides, _ostrides, _irank, _idx, \ - _ocont, _label, _pl, _type) \ -case t ## _type: \ - WS_GET_INDEX(_v_index, _strides, _ostrides, _irank, _idx, \ - _ocont, _type); \ - _label = *(_type*)(_pl + _idx); \ - break - -#define CASE_WINDEX3(_p_index, _strides, _ostrides, _irank, _idx, \ - _ocont, _label, _pl, _type) \ -case t ## _type: \ - WS_GET_INDEX(_p_index, _strides, _ostrides, _irank, _idx, \ - _ocont, _type); \ - *(_type*)(_pl + _idx) = _label; \ -break - -#define DONE_TYPE UInt8 -#define COST_TYPE UInt16 -#define WS_MAXDIM 7 - -typedef struct { - npy_intp index; - COST_TYPE cost; - void *next, *prev; - DONE_TYPE done; -} NI_WatershedElement; - -int NI_WatershedIFT(PyArrayObject* input, PyArrayObject* markers, - PyArrayObject* strct, PyArrayObject* output) -{ - char *pl, *pm, *pi; - int ll; - npy_intp size, jj, hh, kk, maxval; - npy_intp strides[WS_MAXDIM], coordinates[WS_MAXDIM]; - npy_intp *nstrides = NULL, nneigh, ssize; - int i_contiguous, o_contiguous; - NI_WatershedElement *temp = NULL, **first = NULL, **last = NULL; - Bool *ps = NULL; - NI_Iterator mi, ii, li; - - i_contiguous = PyArray_ISCONTIGUOUS(input); - o_contiguous = PyArray_ISCONTIGUOUS(output); - ssize = 1; - for(ll = 0; ll < strct->nd; ll++) - ssize *= strct->dimensions[ll]; - if (input->nd > WS_MAXDIM) { - PyErr_SetString(PyExc_RuntimeError, "too many dimensions"); - goto exit; - } - size = 1; - for(ll = 0; ll < input->nd; ll++) - size *= input->dimensions[ll]; - /* Storage for the temporary queue data. */ - temp = (NI_WatershedElement*)malloc(size * sizeof(NI_WatershedElement)); - if (!temp) { - PyErr_NoMemory(); - goto exit; - } - pi = (void *)PyArray_DATA(input); - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* Initialization and find the maximum of the input. */ - maxval = 0; - for(jj = 0; jj < size; jj++) { - int ival = 0; - switch(input->descr->type_num) { - CASE_GET_INPUT(ival, pi, UInt8); - CASE_GET_INPUT(ival, pi, UInt16); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - temp[jj].index = jj; - temp[jj].done = 0; - if (ival > maxval) - maxval = ival; - NI_ITERATOR_NEXT(ii, pi); - } - pi = (void *)PyArray_DATA(input); - /* Allocate and initialize the storage for the queue. */ - first = (NI_WatershedElement**)malloc((maxval + 1) * - sizeof(NI_WatershedElement*)); - last = (NI_WatershedElement**)malloc((maxval + 1) * - sizeof(NI_WatershedElement*)); - if (!first || !last) { - PyErr_NoMemory(); - goto exit; - } - for(hh = 0; hh <= maxval; hh++) { - first[hh] = NULL; - last[hh] = NULL; - } - if (!NI_InitPointIterator(markers, &mi)) - goto exit; - if (!NI_InitPointIterator(output, &li)) - goto exit; - pm = (void *)PyArray_DATA(markers); - pl = (void *)PyArray_DATA(output); - /* initialize all nodes */ - for(ll = 0; ll < input->nd; ll++) - coordinates[ll] = 0; - for(jj = 0; jj < size; jj++) { - /* get marker */ - int label = 0; - switch(markers->descr->type_num) { - CASE_GET_LABEL(label, pm, UInt8); - CASE_GET_LABEL(label, pm, UInt16); - CASE_GET_LABEL(label, pm, UInt32); -#if HAS_UINT64 - CASE_GET_LABEL(label, pm, UInt64); -#endif - CASE_GET_LABEL(label, pm, Int8); - CASE_GET_LABEL(label, pm, Int16); - CASE_GET_LABEL(label, pm, Int32); - CASE_GET_LABEL(label, pm, Int64); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - switch(output->descr->type_num) { - CASE_PUT_LABEL(label, pl, UInt8); - CASE_PUT_LABEL(label, pl, UInt16); - CASE_PUT_LABEL(label, pl, UInt32); -#if HAS_UINT64 - CASE_PUT_LABEL(label, pl, UInt64); -#endif - CASE_PUT_LABEL(label, pl, Int8); - CASE_PUT_LABEL(label, pl, Int16); - CASE_PUT_LABEL(label, pl, Int32); - CASE_PUT_LABEL(label, pl, Int64); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - NI_ITERATOR_NEXT2(mi, li, pm, pl); - if (label != 0) { - /* This node is a marker */ - temp[jj].cost = 0; - if (!first[0]) { - first[0] = &(temp[jj]); - first[0]->next = NULL; - first[0]->prev = NULL; - last[0] = first[0]; - } else { - if (label > 0) { - /* object markers are enqueued at the beginning, so they - are processed first. */ - temp[jj].next = first[0]; - temp[jj].prev = NULL; - first[0]->prev = &(temp[jj]); - first[0] = &(temp[jj]); - } else { - /* background markers are enqueued at the end, so they are - processed after the object markers. */ - temp[jj].next = NULL; - temp[jj].prev = last[0]; - last[0]->next = &(temp[jj]); - last[0] = &(temp[jj]); - } - } - } else { - /* This node is not a marker */ - temp[jj].cost = maxval + 1; - temp[jj].next = NULL; - temp[jj].prev = NULL; - } - for(ll = input->nd - 1; ll >= 0; ll--) - if (coordinates[ll] < input->dimensions[ll] - 1) { - coordinates[ll]++; - break; - } else { - coordinates[ll] = 0; - } - } - - pl = (void *)PyArray_DATA(output); - ps = (Bool*)PyArray_DATA(strct); - nneigh = 0; - for (kk = 0; kk < ssize; kk++) - if (ps[kk] && kk != (ssize / 2)) - ++nneigh; - nstrides = (npy_intp*)malloc(nneigh * sizeof(npy_intp)); - if (!nstrides) { - PyErr_NoMemory(); - goto exit; - } - strides[input->nd - 1] = 1; - for(ll = input->nd - 2; ll >= 0; ll--) - strides[ll] = input->dimensions[ll + 1] * strides[ll + 1]; - for(ll = 0; ll < input->nd; ll++) - coordinates[ll] = -1; - for(kk = 0; kk < nneigh; kk++) - nstrides[kk] = 0; - jj = 0; - for(kk = 0; kk < ssize; kk++) { - if (ps[kk]) { - int offset = 0; - for(ll = 0; ll < input->nd; ll++) - offset += coordinates[ll] * strides[ll]; - if (offset != 0) - nstrides[jj++] += offset; - } - for(ll = input->nd - 1; ll >= 0; ll--) - if (coordinates[ll] < 1) { - coordinates[ll]++; - break; - } else { - coordinates[ll] = -1; - } - } - /* Propagation phase: */ - for(jj = 0; jj <= maxval; jj++) { - while (first[jj]) { - /* dequeue first element: */ - NI_WatershedElement *v = first[jj]; - first[jj] = first[jj]->next; - if (first[jj]) - first[jj]->prev = NULL; - v->prev = NULL; - v->next = NULL; - /* Mark element as done: */ - v->done = 1; - /* Iterate over the neighbors of the element: */ - for(hh = 0; hh < nneigh; hh++) { - npy_intp v_index = v->index, p_index = v->index, idx, cc; - int qq, outside = 0; - p_index += nstrides[hh]; - /* check if the neighbor is within the extent of the array: */ - idx = p_index; - for (qq = 0; qq < input->nd; qq++) { - cc = idx / strides[qq]; - if (cc < 0 || cc >= input->dimensions[qq]) { - outside = 1; - break; - } - idx -= cc * strides[qq]; - } - if (!outside) { - NI_WatershedElement *p = &(temp[p_index]); - if (!(p->done)) { - /* If the neighbor was not processed yet: */ - int max, pval, vval, wvp, pcost, label, p_idx, v_idx; - switch(input->descr->type_num) { - CASE_WINDEX1(v_index, p_index, strides, input->strides, - input->nd, i_contiguous, p_idx, v_idx, pi, - vval, pval, UInt8); - CASE_WINDEX1(v_index, p_index, strides, input->strides, - input->nd, i_contiguous, p_idx, v_idx, pi, - vval, pval, UInt16); - default: - PyErr_SetString(PyExc_RuntimeError, - "data type not supported"); - goto exit; - } - /* Calculate cost: */ - wvp = pval - vval; - if (wvp < 0) - wvp = -wvp; - /* Find the maximum of this cost and the current - element cost: */ - pcost = p->cost; - max = v->cost > wvp ? v->cost : wvp; - if (max < pcost) { - /* If this maximum is less than the neighbors cost, - adapt the cost and the label of the neighbor: */ - int idx; - p->cost = max; - switch(output->descr->type_num) { - CASE_WINDEX2(v_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, UInt8); - CASE_WINDEX2(v_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, UInt16); - CASE_WINDEX2(v_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, UInt32); -#if HAS_UINT64 - CASE_WINDEX2(v_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, UInt64); -#endif - CASE_WINDEX2(v_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, Int8); - CASE_WINDEX2(v_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, Int16); - CASE_WINDEX2(v_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, Int32); - CASE_WINDEX2(v_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, Int64); - default: - PyErr_SetString(PyExc_RuntimeError, - "data type not supported"); - goto exit; - } - switch(output->descr->type_num) { - CASE_WINDEX3(p_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, UInt8); - CASE_WINDEX3(p_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, UInt16); - CASE_WINDEX3(p_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, UInt32); -#if HAS_UINT64 - CASE_WINDEX3(p_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, UInt64); -#endif - CASE_WINDEX3(p_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, Int8); - CASE_WINDEX3(p_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, Int16); - CASE_WINDEX3(p_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, Int32); - CASE_WINDEX3(p_index, strides, output->strides, input->nd, - idx, o_contiguous, label, pl, Int64); - default: - PyErr_SetString(PyExc_RuntimeError, - "data type not supported"); - goto exit; - } - /* If the neighbor is in a queue, remove it: */ - if (p->next || p->prev) { - NI_WatershedElement *prev = p->prev, *next = p->next; - if (first[pcost] == p) - first[pcost] = next; - if (last[pcost] == p) - last[pcost] = prev; - if (prev) - prev->next = next; - if (next) - next->prev = prev; - } - /* Insert the neighbor in the appropiate queue: */ - if (label < 0) { - p->prev = last[max]; - p->next = NULL; - if (last[max]) - last[max]->next = p; - last[max] = p; - if (!first[max]) - first[max] = p; - } else { - p->next = first[max]; - p->prev = NULL; - if (first[max]) - first[max]->prev = p; - first[max] = p; - if (!last[max]) - last[max] = p; - } - } - } - } - } - } - } - exit: - if (temp) - free(temp); - if (first) - free(first); - if (last) - free(last); - if (nstrides) - free(nstrides); - return PyErr_Occurred() ? 0 : 1; -} diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_measure.h b/scipy-0.10.1/scipy/ndimage/src/ni_measure.h deleted file mode 100644 index 215987ff8c..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_measure.h +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef NI_MEASURE_H -#define NI_MEASURE_H - -#include "nd_image.h" - -/* structure for array regions to find objects: */ -typedef struct { - int start[NI_MAXDIM], end[NI_MAXDIM]; -} NI_ObjectRegion; - -int NI_Label(PyArrayObject*, PyArrayObject*, npy_intp*, PyArrayObject*); - -int NI_FindObjects(PyArrayObject*, npy_intp, npy_intp*); - -int NI_CenterOfMass(PyArrayObject*, PyArrayObject*, npy_intp, npy_intp, - npy_intp*, npy_intp, double*); - -int NI_Histogram(PyArrayObject*, PyArrayObject*, npy_intp, npy_intp, - npy_intp*, npy_intp, PyArrayObject**, double, double, - npy_intp); - -int NI_Statistics(PyArrayObject*, PyArrayObject*, npy_intp, npy_intp, - npy_intp*, npy_intp, double*, npy_intp*, double*, - double*, double*, npy_intp*, npy_intp*); - -int NI_WatershedIFT(PyArrayObject*, PyArrayObject*, PyArrayObject*, - PyArrayObject*); - -#endif diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_morphology.c b/scipy-0.10.1/scipy/ndimage/src/ni_morphology.c deleted file mode 100644 index 5a85462d55..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_morphology.c +++ /dev/null @@ -1,956 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "ni_support.h" -#include "ni_morphology.h" -#include -#include -#include -#include - -#define LIST_SIZE 100000 - -#define CASE_GET_MASK(_msk_value, _pm, _type) \ -case t ## _type: \ - _msk_value = *(_type*)_pm ? 1 : 0; \ - break - -#define CASE_OUTPUT(_po, _out, _type) \ -case t ## _type: \ - *(_type*)_po = (_type)_out; \ - break - -#define CASE_NI_ERODE_POINT(_pi, _out, _offsets, _filter_size, _type, \ - _mv, _border_value, _bv, _center_is_true,\ - _true, _false, _changed) \ -case t ## _type: \ -{ \ - npy_intp _ii, _oo; \ - int _in = *(_type*)_pi ? 1 : 0; \ - if (_mv) { \ - if (_center_is_true && _in == false) { \ - _changed = 0; \ - _out = _in; \ - } else { \ - _out = _true; \ - for(_ii = 0; _ii < _filter_size; _ii++) { \ - _oo = _offsets[_ii]; \ - if (_oo == _bv) { \ - if (!_border_value) { \ - _out = _false; \ - break; \ - } \ - } else { \ - int _nn = *(_type*)(_pi + _oo) ? _true : _false; \ - if (!_nn) { \ - _out = _false; \ - break; \ - } \ - } \ - } \ - _changed = _out != _in; \ - } \ - } else { \ - _out = _in; \ - } \ -} \ -break - -int NI_BinaryErosion(PyArrayObject* input, PyArrayObject* strct, - PyArrayObject* mask, PyArrayObject* output, int bdr_value, - npy_intp *origins, int invert, int center_is_true, - int* changed, NI_CoordinateList **coordinate_list) -{ - npy_intp struct_size = 0, *offsets = NULL, size, *oo, jj; - npy_intp ssize, block_size = 0, *current = NULL, border_flag_value; - int kk, true, false, msk_value; - NI_Iterator ii, io, mi; - NI_FilterIterator fi; - Bool *ps, out = 0; - char *pi, *po, *pm = NULL; - NI_CoordinateBlock *block = NULL; - - ps = (Bool*)PyArray_DATA(strct); - ssize = 1; - for(kk = 0; kk < strct->nd; kk++) - ssize *= strct->dimensions[kk]; - for(jj = 0; jj < ssize; jj++) - if (ps[jj]) ++struct_size; - if (mask) { - if (!NI_InitPointIterator(mask, &mi)) - return 0; - pm = (void *)PyArray_DATA(mask); - } - /* calculate the filter offsets: */ - if (!NI_InitFilterOffsets(input, ps, strct->dimensions, origins, - NI_EXTEND_CONSTANT, &offsets, &border_flag_value, NULL)) - goto exit; - /* initialize input element iterator: */ - if (!NI_InitPointIterator(input, &ii)) - goto exit; - /* initialize output element iterator: */ - if (!NI_InitPointIterator(output, &io)) - goto exit; - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(input->nd, strct->dimensions, struct_size, - input->dimensions, origins, &fi)) - goto exit; - - /* get data pointers an size: */ - pi = (void *)PyArray_DATA(input); - po = (void *)PyArray_DATA(output); - size = 1; - for(kk = 0; kk < input->nd; kk++) - size *= input->dimensions[kk]; - if (invert) { - bdr_value = bdr_value ? 0 : 1; - true = 0; - false = 1; - } else { - bdr_value = bdr_value ? 1 : 0; - true = 1; - false = 0; - } - if (coordinate_list) { - block_size = LIST_SIZE / input->nd / sizeof(int); - if (block_size < 1) - block_size = 1; - if (block_size > size) - block_size = size; - *coordinate_list = NI_InitCoordinateList(block_size, input->nd); - if (!*coordinate_list) - goto exit; - } - /* iterator over the elements: */ - oo = offsets; - *changed = 0; - msk_value = 1; - for(jj = 0; jj < size; jj++) { - int pchange = 0; - if (mask) { - switch(mask->descr->type_num) { - CASE_GET_MASK(msk_value, pm, Bool); - CASE_GET_MASK(msk_value, pm, UInt8); - CASE_GET_MASK(msk_value, pm, UInt16); - CASE_GET_MASK(msk_value, pm, UInt32); -#if HAS_UINT64 - CASE_GET_MASK(msk_value, pm, UInt64); -#endif - CASE_GET_MASK(msk_value, pm, Int8); - CASE_GET_MASK(msk_value, pm, Int16); - CASE_GET_MASK(msk_value, pm, Int32); - CASE_GET_MASK(msk_value, pm, Int64); - CASE_GET_MASK(msk_value, pm, Float32); - CASE_GET_MASK(msk_value, pm, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - return 0; - } - } - switch (input->descr->type_num) { - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, Bool, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, UInt8, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, UInt16, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, UInt32, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); -#if HAS_UINT64 - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, UInt64, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); -#endif - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, Int8, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, Int16, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, Int32, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, Int64, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, Float32, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - CASE_NI_ERODE_POINT(pi, out, oo, struct_size, Float64, msk_value, - bdr_value, border_flag_value, center_is_true, - true, false, pchange); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - switch (output->descr->type_num) { - CASE_OUTPUT(po, out, Bool); - CASE_OUTPUT(po, out, UInt8); - CASE_OUTPUT(po, out, UInt16); - CASE_OUTPUT(po, out, UInt32); -#if HAS_UINT64 - CASE_OUTPUT(po, out, UInt64); -#endif - CASE_OUTPUT(po, out, Int8); - CASE_OUTPUT(po, out, Int16); - CASE_OUTPUT(po, out, Int32); - CASE_OUTPUT(po, out, Int64); - CASE_OUTPUT(po, out, Float32); - CASE_OUTPUT(po, out, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - if (pchange) { - *changed = 1; - if (coordinate_list) { - if (block == NULL || block->size == block_size) { - block = NI_CoordinateListAddBlock(*coordinate_list); - current = block->coordinates; - } - for(kk = 0; kk < input->nd; kk++) - *current++ = ii.coordinates[kk]; - block->size++; - } - } - if (mask) { - NI_FILTER_NEXT3(fi, ii, io, mi, oo, pi, po, pm); - } else { - NI_FILTER_NEXT2(fi, ii, io, oo, pi, po); - } - } - - exit: - if (offsets) - free(offsets); - if (PyErr_Occurred()) { - if (coordinate_list) { - NI_FreeCoordinateList(*coordinate_list); - *coordinate_list = NULL; - } - return 0; - } else { - return 1; - } - return PyErr_Occurred() ? 0 : 1; -} - -#define CASE_ERODE_POINT2(_struct_size, _offsets, _coordinate_offsets, \ - _pi, _oo, _irank, _list1, _list2, \ - _current_coors1, _current_coors2, _block1, \ - _block2, _bf_value, _true, _false, _type, \ - _mklist) \ -case t ## _type: \ -{ \ - npy_intp _hh, _kk; \ - for(_hh = 0; _hh < _struct_size; _hh++) { \ - npy_intp _to = _offsets[_oo + _hh]; \ - if (_to != _bf_value && *(_type*)(_pi + _to) == _true) { \ - if (_mklist) { \ - npy_intp *_tc = &(_coordinate_offsets[(_oo + _hh) * _irank]); \ - if (_block2 == NULL || _block2->size == _list2->block_size) { \ - _block2 = NI_CoordinateListAddBlock(_list2); \ - _current_coors2 = _block2->coordinates; \ - } \ - for(_kk = 0; _kk < _irank; _kk++) \ - *_current_coors2++ = _current_coors1[_kk] + _tc[_kk]; \ - _block2->size++; \ - } \ - *(_type*)(_pi + _to) = _false; \ - } \ - } \ -} \ -break - -int NI_BinaryErosion2(PyArrayObject* array, PyArrayObject* strct, - PyArrayObject* mask, int niter, npy_intp *origins, - int invert, NI_CoordinateList **iclist) -{ - npy_intp struct_size = 0, *offsets = NULL, oo, jj, ssize; - npy_intp *coordinate_offsets = NULL, size = 0; - npy_intp *current_coordinates1 = NULL, *current_coordinates2 = NULL; - npy_intp kk, border_flag_value, current = 0; - int true, false; - NI_Iterator ii, mi; - NI_FilterIterator fi, ci; - Bool *ps; - char *pi, *ibase, *pm = NULL; - NI_CoordinateBlock *block1 = NULL, *block2 = NULL; - NI_CoordinateList *list1 = NULL, *list2 = NULL; - - ps = (Bool*)PyArray_DATA(strct); - ssize = 1; - for(kk = 0; kk < strct->nd; kk++) - ssize *= strct->dimensions[kk]; - for(jj = 0; jj < ssize; jj++) - if (ps[jj]) ++struct_size; - - /* calculate the filter offsets: */ - if (!NI_InitFilterOffsets(array, ps, strct->dimensions, origins, - NI_EXTEND_CONSTANT, &offsets, - &border_flag_value, &coordinate_offsets)) - goto exit; - - /* initialize input element iterator: */ - if (!NI_InitPointIterator(array, &ii)) - goto exit; - - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(array->nd, strct->dimensions, struct_size, - array->dimensions, origins, &fi)) - goto exit; - if (!NI_InitFilterIterator(array->nd, strct->dimensions, - struct_size * array->nd, array->dimensions, - origins, &ci)) - goto exit; - - /* get data pointers and size: */ - ibase = pi = (void *)PyArray_DATA(array); - - if (invert) { - true = 0; - false = 1; - } else { - true = 1; - false = 0; - } - - if (mask) { - /* iterator, data pointer and type of mask array: */ - if (!NI_InitPointIterator(mask, &mi)) - return 0; - pm = (void *)PyArray_DATA(mask); - - size = 1; - for(kk = 0; kk < array->nd; kk++) - size *= array->dimensions[kk]; - - for(jj = 0; jj < size; jj++) { - if (*(Int8*)pm) { - *(Int8*)pm = -1; - } else { - *(Int8*)pm = (Int8)*(Bool*)pi; - *(Bool*)pi = false; - } - NI_ITERATOR_NEXT2(ii, mi, pi, pm) - } - NI_ITERATOR_RESET(ii) - pi = (void *)PyArray_DATA(array); - } - - list1 = NI_InitCoordinateList((*iclist)->block_size, (*iclist)->rank); - list2 = NI_InitCoordinateList((*iclist)->block_size, (*iclist)->rank); - if (!list1 || !list2) - goto exit; - if (NI_CoordinateListStealBlocks(list2, *iclist)) - goto exit; - block2 = list2->blocks; - jj = 0; - while(block1 || block2) { - int mklist = 1; - if (!block1) { - if (niter <= 0 || jj < niter) { - if (NI_CoordinateListStealBlocks(list1, list2)) - goto exit; - block1 = list1->blocks; - block2 = NULL; - current_coordinates1 = block1->coordinates; - current = 0; - ++jj; - mklist = niter <= 0 || jj < niter; - } else { - break; - } - } - NI_ITERATOR_GOTO(ii, current_coordinates1, ibase, pi); - NI_FILTER_GOTO(fi, ii, 0, oo); - - switch (array->descr->type_num) { - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, Bool, mklist); - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, UInt8, mklist); - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, UInt16, mklist); - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, UInt32, mklist); -#if HAS_UINT64 - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, UInt64, mklist); -#endif - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, Int8, mklist); - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, Int16, mklist); - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, Int32, mklist); - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, Int64, mklist); - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, Float32, mklist); - CASE_ERODE_POINT2(struct_size, offsets, coordinate_offsets, pi, - oo, array->nd, list1, list2, current_coordinates1, - current_coordinates2, block1, block2, - border_flag_value, true, false, Float64, mklist); - default: - PyErr_SetString(PyExc_RuntimeError, "data type not supported"); - goto exit; - } - - ++current; - if (current == block1->size) { - block1 = NI_CoordinateListDeleteBlock(list1); - if (block1) { - current_coordinates1 = block1->coordinates; - current = 0; - } - } else { - current_coordinates1 += array->nd; - } - } - - if (mask) { - NI_ITERATOR_RESET(ii) - NI_ITERATOR_RESET(mi) - pi = (void *)PyArray_DATA(array); - pm = (void *)PyArray_DATA(mask); - for(jj = 0; jj < size; jj++) { - int value = *(Int8*)pm; - if (value >= 0) - *(Bool*)pi = value; - NI_ITERATOR_NEXT2(ii, mi, pi, pm) - } - } - - exit: - if (offsets) - free(offsets); - if (coordinate_offsets) - free(coordinate_offsets); - NI_FreeCoordinateList(list1); - NI_FreeCoordinateList(list2); - if (PyErr_Occurred()) { - return 0; - } else { - return 1; - } - return PyErr_Occurred() ? 0 : 1; -} - - -#define NI_DISTANCE_EUCLIDIAN 1 -#define NI_DISTANCE_CITY_BLOCK 2 -#define NI_DISTANCE_CHESSBOARD 3 - -typedef struct { - npy_intp *coordinates; - npy_intp index; - void *next; -} NI_BorderElement; - -int NI_DistanceTransformBruteForce(PyArrayObject* input, int metric, - PyArrayObject *sampling_arr, - PyArrayObject* distances, - PyArrayObject* features) -{ - npy_intp size, jj, min_index = 0; - int kk; - NI_BorderElement *border_elements = NULL, *temp; - NI_Iterator ii, di, fi; - char *pi, *pd = NULL, *pf = NULL; - Float64 *sampling = sampling_arr ? (void *)PyArray_DATA(sampling_arr) : NULL; - - /* check the output arrays: */ - if (distances) { - pd = (void *)PyArray_DATA(distances); - if (!NI_InitPointIterator(distances, &di)) - goto exit; - } - - if (features) { - pf = (void *)PyArray_DATA(features); - if (!NI_InitPointIterator(features, &fi)) - goto exit; - } - - size = 1; - for(kk = 0; kk < input->nd; kk++) - size *= input->dimensions[kk]; - pi = (void *)PyArray_DATA(input); - - if (!NI_InitPointIterator(input, &ii)) - goto exit; - - for(jj = 0; jj < size; jj++) { - if (*(Int8*)pi < 0) { - temp = (NI_BorderElement*)malloc(sizeof(NI_BorderElement)); - if (!temp) { - PyErr_NoMemory(); - goto exit; - } - temp->next = border_elements; - border_elements = temp; - temp->index = jj; - temp->coordinates = (npy_intp*)malloc(input->nd * sizeof(npy_intp)); - for(kk = 0; kk < input->nd; kk++) - temp->coordinates[kk] = ii.coordinates[kk]; - } - NI_ITERATOR_NEXT(ii, pi); - } - - NI_ITERATOR_RESET(ii); - pi = (void *)PyArray_DATA(input); - - switch(metric) { - case NI_DISTANCE_EUCLIDIAN: - for(jj = 0; jj < size; jj++) { - if (*(Int8*)pi > 0) { - double distance = DBL_MAX; - temp = border_elements; - while(temp) { - double d = 0.0, t; - for(kk = 0; kk < input->nd; kk++) { - t = ii.coordinates[kk] - temp->coordinates[kk]; - if (sampling) - t *= sampling[kk]; - d += t * t; - } - if (d < distance) { - distance = d; - if (features) - min_index = temp->index; - } - temp = temp->next; - } - if (distances) - *(Float64*)pd = sqrt(distance); - if (features) - *(Int32*)pf = min_index; - } else { - if (distances) - *(Float64*)pd = 0.0; - if (features) - *(Int32*)pf = jj; - } - if (features && distances) { - NI_ITERATOR_NEXT3(ii, di, fi, pi, pd, pf); - } else if (distances) { - NI_ITERATOR_NEXT2(ii, di, pi, pd); - } else { - NI_ITERATOR_NEXT2(ii, fi, pi, pf); - } - } - break; - case NI_DISTANCE_CITY_BLOCK: - case NI_DISTANCE_CHESSBOARD: - for(jj = 0; jj < size; jj++) { - if (*(Int8*)pi > 0) { - unsigned int distance = UINT_MAX; - temp = border_elements; - while(temp) { - unsigned int d = 0; - npy_intp t; - for(kk = 0; kk < input->nd; kk++) { - t = ii.coordinates[kk] - temp->coordinates[kk]; - if (t < 0) - t = -t; - if (metric == NI_DISTANCE_CITY_BLOCK) { - d += t; - } else { - if ((unsigned int)t > d) - d = t; - } - } - if (d < distance) { - distance = d; - if (features) - min_index = temp->index; - } - temp = temp->next; - } - if (distances) - *(UInt32*)pd = distance; - if (features) - *(Int32*)pf = min_index; - } else { - if (distances) - *(UInt32*)pd = 0; - if (features) - *(Int32*)pf = jj; - } - if (features && distances) { - NI_ITERATOR_NEXT3(ii, di, fi, pi, pd, pf); - } else if (distances) { - NI_ITERATOR_NEXT2(ii, di, pi, pd); - } else { - NI_ITERATOR_NEXT2(ii, fi, pi, pf); - } - } - break; - default: - PyErr_SetString(PyExc_RuntimeError, "distance metric not supported"); - goto exit; - } - - exit: - while (border_elements) { - temp = border_elements; - border_elements = border_elements->next; - if (temp->coordinates) - free(temp->coordinates); - free(temp); - } - return PyErr_Occurred() ? 0 : 1; -} - - -int NI_DistanceTransformOnePass(PyArrayObject *strct, - PyArrayObject* distances, - PyArrayObject *features) -{ - int kk; - npy_intp jj, ii, ssize, size, filter_size, mask_value, *oo; - npy_intp *foffsets = NULL, *foo = NULL, *offsets = NULL; - Bool *ps, *pf = NULL, *footprint = NULL; - char *pd; - NI_FilterIterator si, ti; - NI_Iterator di, fi; - - ssize = 1; - for(kk = 0; kk < strct->nd; kk++) - ssize *= strct->dimensions[kk]; - - /* we only use the first half of the structure data, so we make a - temporary structure for use with the filter functions: */ - footprint = (Bool*)malloc(ssize * sizeof(Bool)); - if (!footprint) { - PyErr_NoMemory(); - goto exit; - } - ps = (Bool*)PyArray_DATA(strct); - filter_size = 0; - for(jj = 0; jj < ssize / 2; jj++) { - footprint[jj] = ps[jj]; - if (ps[jj]) - ++filter_size; - } - for(jj = ssize / 2; jj < ssize; jj++) - footprint[jj] = 0; - /* get data and size */ - pd = (void *)PyArray_DATA(distances); - size = 1; - for(kk = 0; kk < distances->nd; kk++) - size *= distances->dimensions[kk]; - if (!NI_InitPointIterator(distances, &di)) - goto exit; - /* calculate the filter offsets: */ - if (!NI_InitFilterOffsets(distances, footprint, strct->dimensions, NULL, - NI_EXTEND_CONSTANT, &offsets, &mask_value, NULL)) - goto exit; - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(distances->nd, strct->dimensions, - filter_size, distances->dimensions, NULL, &si)) - goto exit; - - if (features) { - npy_intp dummy; - /* initialize point iterator: */ - pf = (void *)PyArray_DATA(features); - if (!NI_InitPointIterator(features, &fi)) - goto exit; - /* calculate the filter offsets: */ - if (!NI_InitFilterOffsets(features, footprint, strct->dimensions, - NULL, NI_EXTEND_CONSTANT, &foffsets, &dummy, NULL)) - goto exit; - /* initialize filter iterator: */ - if (!NI_InitFilterIterator(distances->nd, strct->dimensions, - filter_size, distances->dimensions, NULL, &ti)) - goto exit; - } - /* iterator over the elements: */ - oo = offsets; - if (features) - foo = foffsets; - for(jj = 0; jj < size; jj++) { - Int32 value = *(Int32*)pd; - if (value != 0) { - Int32 min = value; - npy_intp min_offset = 0; - /* iterate over structuring element: */ - for(ii = 0; ii < filter_size; ii++) { - npy_intp offset = oo[ii]; - Int32 tt = -1; - if (offset < mask_value) - tt = *(Int32*)(pd + offset); - if (tt >= 0) { - if ((min < 0) || (tt + 1 < min)) { - min = tt + 1; - if (features) - min_offset = foo[ii]; - } - } - } - *(Int32*)pd = min; - if (features) - *(Int32*)pf = *(Int32*)(pf + min_offset); - } - if (features) { - NI_FILTER_NEXT(ti, fi, foo, pf); - } - NI_FILTER_NEXT(si, di, oo, pd); - } - - exit: - if (offsets) free(offsets); - if (foffsets) free(foffsets); - if (footprint) - free(footprint); - return PyErr_Occurred() ? 0 : 1; -} - -static void _VoronoiFT(char *pf, npy_intp len, npy_intp *coor, int rank, - int d, npy_intp stride, npy_intp cstride, - npy_intp **f, npy_intp *g, Float64 *sampling) -{ - npy_intp l = -1, ii, maxl, idx1, idx2; - int jj; - - for(ii = 0; ii < len; ii++) - for(jj = 0; jj < rank; jj++) - f[ii][jj] = *(Int32*)(pf + ii * stride + cstride * jj); - for(ii = 0; ii < len; ii++) { - if (*(Int32*)(pf + ii * stride) >= 0) { - double fd = f[ii][d]; - double wR = 0.0; - for(jj = 0; jj < rank; jj++) { - if (jj != d) { - double tw = f[ii][jj] - coor[jj]; - if (sampling) - tw *= sampling[jj]; - wR += tw * tw; - } - } - while(l >= 1) { - double a, b, c, uR = 0.0, vR = 0.0, f1; - idx1 = g[l]; - f1 = f[idx1][d]; - idx2 = g[l - 1]; - a = f1 - f[idx2][d]; - b = fd - f1; - if (sampling) { - a *= sampling[d]; - b *= sampling[d]; - } - c = a + b; - for(jj = 0; jj < rank; jj++) { - if (jj != d) { - double cc = coor[jj]; - double tu = f[idx2][jj] - cc; - double tv = f[idx1][jj] - cc; - if (sampling) { - tu *= sampling[jj]; - tv *= sampling[jj]; - } - uR += tu * tu; - vR += tv * tv; - } - } - if (c * vR - b * uR - a * wR - a * b * c <= 0.0) - break; - --l; - } - ++l; - g[l] = ii; - } - } - maxl = l; - if (maxl >= 0) { - l = 0; - for (ii = 0; ii < len; ii++) { - double delta1 = 0.0, t; - for(jj = 0; jj < rank; jj++) { - t = jj == d ? f[g[l]][jj] - ii : f[g[l]][jj] - coor[jj]; - if (sampling) - t *= sampling[jj]; - delta1 += t * t; - } - while (l < maxl) { - double delta2 = 0.0; - for(jj = 0; jj < rank; jj++) { - t = jj == d ? f[g[l + 1]][jj] - ii : f[g[l + 1]][jj] - coor[jj]; - if (sampling) - t *= sampling[jj]; - delta2 += t * t; - } - if (delta1 <= delta2) - break; - delta1 = delta2; - ++l; - } - idx1 = g[l]; - for(jj = 0; jj < rank; jj++) - *(Int32*)(pf + ii * stride + jj * cstride) = f[idx1][jj]; - } - } -} - - -/* Recursive feature transform */ -static void _ComputeFT(char *pi, char *pf, npy_intp *ishape, - npy_intp *istrides, npy_intp *fstrides, int rank, - int d, npy_intp *coor, npy_intp **f, npy_intp *g, - PyArrayObject *features, Float64 *sampling) -{ - int kk; - npy_intp jj; - - if (d == 0) { - char *tf1 = pf; - for(jj = 0; jj < ishape[0]; jj++) { - if (*(Int8*)pi) { - *(Int32*)tf1 = -1; - } else { - char *tf2 = tf1; - *(Int32*)tf2 = jj; - for(kk = 1; kk < rank; kk++) { - tf2 += fstrides[0]; - *(Int32*)tf2 = coor[kk]; - } - } - pi += istrides[0]; - tf1 += fstrides[1]; - } - _VoronoiFT(pf, ishape[0], coor, rank, 0, fstrides[1], fstrides[0], f, - g, sampling); - } else { - UInt32 axes = 0; - char *tf = pf; - npy_intp size = 1; - NI_Iterator ii; - - for(jj = 0; jj < ishape[d]; jj++) { - coor[d] = jj; - _ComputeFT(pi, tf, ishape, istrides, fstrides, rank, d - 1, coor, f, - g, features, sampling); - pi += istrides[d]; - tf += fstrides[d + 1]; - } - - for(jj = 0; jj < d; jj++) { - axes |= (UInt32)1 << (jj + 1); - size *= ishape[jj]; - } - NI_InitPointIterator(features, &ii); - NI_SubspaceIterator(&ii, axes); - tf = pf; - for(jj = 0; jj < size; jj++) { - for(kk = 0; kk < d; kk++) - coor[kk] = ii.coordinates[kk]; - _VoronoiFT(tf, ishape[d], coor, rank, d, fstrides[d + 1], - fstrides[0], f, g, sampling); - NI_ITERATOR_NEXT(ii, tf); - } - for(kk = 0; kk < d; kk++) - coor[kk] = 0; - } -} - -/* Exact euclidean feature transform, as described in: C. R. Maurer, - Jr., R. Qi, V. Raghavan, "A linear time algorithm for computing - exact euclidean distance transforms of binary images in arbitrary - dimensions. IEEE Trans. PAMI 25, 265-270, 2003. */ -int NI_EuclideanFeatureTransform(PyArrayObject* input, - PyArrayObject *sampling_arr, - PyArrayObject* features) -{ - int ii; - npy_intp coor[NI_MAXDIM], mx = 0, jj; - npy_intp *tmp = NULL, **f = NULL, *g = NULL; - char *pi, *pf; - Float64 *sampling = sampling_arr ? ((void *)PyArray_DATA(sampling_arr)) : NULL; - - pi = (void *)PyArray_DATA(input); - pf = (void *)PyArray_DATA(features); - for(ii = 0; ii < input->nd; ii++) { - coor[ii] = 0; - if (input->dimensions[ii] > mx) - mx = input->dimensions[ii]; - } - - /* Some temporaries */ - f = (npy_intp**)malloc(mx * sizeof(npy_intp*)); - g = (npy_intp*)malloc(mx * sizeof(npy_intp)); - tmp = (npy_intp*)malloc(mx * input->nd * sizeof(npy_intp)); - if (!f || !g || !tmp) { - PyErr_NoMemory(); - goto exit; - } - for(jj = 0; jj < mx; jj++) - f[jj] = tmp + jj * input->nd; - - /* First call of recursive feature transform */ - _ComputeFT(pi, pf, input->dimensions, input->strides, features->strides, - input->nd, input->nd - 1, coor, f, g, features, sampling); - - exit: - if (f) - free(f); - if (g) - free(g); - if (tmp) - free(tmp); - - return PyErr_Occurred() ? 0 : 1; -} diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_morphology.h b/scipy-0.10.1/scipy/ndimage/src/ni_morphology.h deleted file mode 100644 index 33e68352e7..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_morphology.h +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef NI_MORPHOLOGY_H -#define NI_MORPHOLOGY_H - -int NI_BinaryErosion(PyArrayObject*, PyArrayObject*, PyArrayObject*, - PyArrayObject*, int, npy_intp*, int, int, int*, NI_CoordinateList**); -int NI_BinaryErosion2(PyArrayObject*, PyArrayObject*, PyArrayObject*, - int, npy_intp*, int, NI_CoordinateList**); -int NI_DistanceTransformBruteForce(PyArrayObject*, int, PyArrayObject*, - PyArrayObject*, PyArrayObject*); -int NI_DistanceTransformOnePass(PyArrayObject*, PyArrayObject *, - PyArrayObject*); -int NI_EuclideanFeatureTransform(PyArrayObject*, PyArrayObject*, - PyArrayObject*); - -#endif diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_support.c b/scipy-0.10.1/scipy/ndimage/src/ni_support.c deleted file mode 100644 index 8cfcf8efd6..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_support.c +++ /dev/null @@ -1,789 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "ni_support.h" - -/* initialize iterations over single array elements: */ -int NI_InitPointIterator(PyArrayObject *array, NI_Iterator *iterator) -{ - int ii; - - iterator->rank_m1 = array->nd - 1; - for(ii = 0; ii < array->nd; ii++) { - /* adapt dimensions for use in the macros: */ - iterator->dimensions[ii] = array->dimensions[ii] - 1; - /* initialize coordinates: */ - iterator->coordinates[ii] = 0; - /* initialize strides: */ - iterator->strides[ii] = array->strides[ii]; - /* calculate the strides to move back at the end of an axis: */ - iterator->backstrides[ii] = - array->strides[ii] * iterator->dimensions[ii]; - } - return 1; -} - - -/* initialize iteration over a lower sub-space: */ -int NI_SubspaceIterator(NI_Iterator *iterator, UInt32 axes) -{ - int ii, last = 0; - - for(ii = 0; ii <= iterator->rank_m1; ii++) { - if (axes & (((UInt32)1) << ii)) { - if (last != ii) { - iterator->dimensions[last] = iterator->dimensions[ii]; - iterator->strides[last] = iterator->strides[ii]; - iterator->backstrides[last] = iterator->backstrides[ii]; - } - ++last; - } - } - iterator->rank_m1 = last - 1; - return 1; -} - -/* initialize iteration over array lines: */ -int NI_LineIterator(NI_Iterator *iterator, int axis) -{ - UInt32 axes = ((UInt32)1) << axis; - return NI_SubspaceIterator(iterator, ~axes); -} - - -/******************************************************************/ -/* Line buffers */ -/******************************************************************/ - -/* Allocate line buffer data */ -int NI_AllocateLineBuffer(PyArrayObject* array, int axis, npy_intp size1, - npy_intp size2, npy_intp *lines, npy_intp max_size, double **buffer) -{ - npy_intp line_size, max_lines; - int ii; - - /* the number of lines of the array is an upper limit for the - number of lines in the buffer: */ - max_lines = 1; - for(ii = 0; ii < array->nd; ii++) - max_lines *= array->dimensions[ii]; - if (array->nd > 0 && array->dimensions[axis] > 0) - max_lines /= array->dimensions[axis]; - /* calculate the space needed for one line, including space to - support the boundary conditions: */ - line_size = sizeof(double) * (array->dimensions[axis] + size1 + size2); - /* if *lines < 1, no number of lines is proposed, so we calculate it - from the maximum size allowed: */ - if (*lines < 1) { - *lines = line_size > 0 ? max_size / line_size : 0; - if (*lines < 1) - *lines = 1; - } - /* no need to allocate too many lines: */ - if (*lines > max_lines) - *lines = max_lines; - /* allocate data for the buffer: */ - *buffer = (double*)malloc(*lines * line_size); - if (!*buffer) { - PyErr_NoMemory(); - return 0; - } - return 1; -} - -/* Some NumPy types are ambiguous */ -int NI_CanonicalType(int type_num) -{ - switch (type_num) { - case NPY_INT: - return NPY_INT32; - - case NPY_LONG: -#if NPY_SIZEOF_LONG == 4 - return NPY_INT32; -#else - return NPY_INT64; -#endif - - case NPY_LONGLONG: - return NPY_INT64; - - case NPY_UINT: - return NPY_UINT32; - - case NPY_ULONG: -#if NPY_SIZEOF_LONG == 4 - return NPY_UINT32; -#else - return NPY_UINT64; -#endif - - case NPY_ULONGLONG: - return NPY_UINT64; - - default: - return type_num; - } -} - -/* Initialize a line buffer */ -int NI_InitLineBuffer(PyArrayObject *array, int axis, npy_intp size1, - npy_intp size2, npy_intp buffer_lines, double *buffer_data, - NI_ExtendMode extend_mode, double extend_value, NI_LineBuffer *buffer) -{ - npy_intp line_length = 0, array_lines = 0, size; - int ii; - - size = 1; - for(ii = 0; ii < array->nd; ii++) - size *= array->dimensions[ii]; - /* check if the buffer is big enough: */ - if (size > 0 && buffer_lines < 1) { - PyErr_SetString(PyExc_RuntimeError, "buffer too small"); - return 0; - } - /* Initialize a line iterator to move over the array: */ - if (!NI_InitPointIterator(array, &(buffer->iterator))) - return 0; - if (!NI_LineIterator(&(buffer->iterator), axis)) - return 0; - line_length = array->nd > 0 ? array->dimensions[axis] : 1; - if (line_length > 0) - array_lines = line_length > 0 ? size / line_length : 1; - /* initialize the buffer structure: */ - buffer->array_data = (void *)PyArray_DATA(array); - buffer->buffer_data = buffer_data; - buffer->buffer_lines = buffer_lines; - buffer->array_type = NI_CanonicalType(PyArray_DESCR(array)->type_num); - buffer->array_lines = array_lines; - buffer->next_line = 0; - buffer->size1 = size1; - buffer->size2 = size2; - buffer->line_length = line_length; - buffer->line_stride = array->nd > 0 ? array->strides[axis] : 0; - buffer->extend_mode = extend_mode; - buffer->extend_value = extend_value; - return 1; -} - -/* Extend a line in memory to implement boundary conditions: */ -int NI_ExtendLine(double *line, npy_intp length, npy_intp size1, - npy_intp size2, NI_ExtendMode mode, double constant_value) -{ - npy_intp ii, jj, length1, nextend, rextend; - double *l1, *l2, *l3, val; - - switch (mode) { - case NI_EXTEND_WRAP: - /* deal with situation where data is shorter than needed - for filling the line */ - nextend = size1 / length; - rextend = size1 - nextend * length; - l1 = line + size1 + length - rextend; - l2 = line; - for(ii = 0; ii < rextend; ii++) - *l2++ = *l1++; - for(ii = 0; ii < nextend; ii++) { - l1 = line + size1; - for(jj = 0; jj < length; jj++) - *l2++ = *l1++; - } - nextend = size2 / length; - rextend = size2 - nextend * length; - l1 = line + size1; - l2 = line + size1 + length; - for(ii = 0; ii < nextend; ii++) { - l3 = l1; - for(jj = 0; jj < length; jj++) - *l2++ = *l3++; - } - for(ii = 0; ii < rextend; ii++) - *l2++ = *l1++; - break; - case NI_EXTEND_MIRROR: - if (length == 1) { - l1 = line; - val = line[size1]; - for(ii = 0; ii < size1; ii++) - *l1++ = val; - l1 = line + size1 + length; - val = line[size1 + length - 1]; - for(ii = 0; ii < size2; ii++) - *l1++ = val; - } else { - length1 = length - 1; - nextend = size1 / length1; - rextend = size1 - nextend * length1; - l1 = line + size1 + 1; - l2 = l1 - 2; - for(ii = 0; ii < nextend; ii++) { - l3 = l1; - for(jj = 0; jj < length1; jj++) - *l2-- = *l3++; - l1 -= length1; - } - for(ii = 0; ii < rextend; ii++) - *l2-- = *l1++; - nextend = size2 / length1; - rextend = size2 - nextend * length1; - l1 = line + size1 + length1 - 1; - l2 = l1 + 2; - for(ii = 0; ii < nextend; ii++) { - l3 = l1; - for(jj = 0; jj < length1; jj++) - *l2++ = *l3--; - l1 += length1; - } - for(ii = 0; ii < rextend; ii++) - *l2++ = *l1--; - } - break; - case NI_EXTEND_REFLECT: - nextend = size1 / length; - rextend = size1 - nextend * length; - l1 = line + size1; - l2 = l1 - 1; - for(ii = 0; ii < nextend; ii++) { - l3 = l1; - for(jj = 0; jj < length; jj++) - *l2-- = *l3++; - l1 -= length; - } - l3 = l1; - for(ii = 0; ii < rextend; ii++) - *l2-- = *l3++; - nextend = size2 / length; - rextend = size2 - nextend * length; - l1 = line + size1 + length - 1; - l2 = l1 + 1; - for(ii = 0; ii < nextend; ii++) { - l3 = l1; - for(jj = 0; jj < length; jj++) - *l2++ = *l3--; - l1 += length; - } - for(ii = 0; ii < rextend; ii++) - *l2++ = *l1--; - break; - case NI_EXTEND_NEAREST: - l1 = line; - val = line[size1]; - for(ii = 0; ii < size1; ii++) - *l1++ = val; - l1 = line + size1 + length; - val = line[size1 + length - 1]; - for(ii = 0; ii < size2; ii++) - *l1++ = val; - break; - case NI_EXTEND_CONSTANT: - l1 = line; - for(ii = 0; ii < size1; ii++) - *l1++ = constant_value; - l1 = line + size1 + length; - for(ii = 0; ii < size2; ii++) - *l1++ = constant_value; - break; - default: - PyErr_SetString(PyExc_RuntimeError, "mode not supported"); - return 0; - } - return 1; -} - - -#define CASE_COPY_DATA_TO_LINE(_pi, _po, _length, _stride, _type) \ -case t ## _type: \ -{ \ - npy_intp _ii; \ - for(_ii = 0; _ii < _length; _ii++) { \ - _po[_ii] = (double)*(_type*)_pi; \ - _pi += _stride; \ - } \ -} \ -break - - -/* Copy a line from an array to a buffer: */ -int NI_ArrayToLineBuffer(NI_LineBuffer *buffer, - npy_intp *number_of_lines, int *more) -{ - double *pb = buffer->buffer_data; - char *pa; - npy_intp length = buffer->line_length; - - pb += buffer->size1; - *number_of_lines = 0; - /* fill until all lines in the array have been processed, or until - the buffer is full: */ - while (buffer->next_line < buffer->array_lines && - *number_of_lines < buffer->buffer_lines) { - pa = buffer->array_data; - /* copy the data from the array to the buffer: */ - switch (buffer->array_type) { - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, Bool); - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, UInt8); - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, UInt16); - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, UInt32); -#if HAS_UINT64 - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, UInt64); -#endif - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, Int8); - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, Int16); - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, Int32); - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, Int64); - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, Float32); - CASE_COPY_DATA_TO_LINE(pa, pb, length, buffer->line_stride, Float64); - default: - PyErr_Format(PyExc_RuntimeError, "array type %d not supported", - buffer->array_type); - return 0; - } - /* goto next line in the array: */ - NI_ITERATOR_NEXT(buffer->iterator, buffer->array_data); - /* implement boundary conditions to the line: */ - if (buffer->size1 + buffer->size2 > 0) - if (!NI_ExtendLine(pb - buffer->size1, length, buffer->size1, - buffer->size2, buffer->extend_mode, - buffer->extend_value)) - return 0; - /* The number of the array lines copied: */ - ++(buffer->next_line); - /* keep track of (and return) the number of lines in the buffer: */ - ++(*number_of_lines); - pb += buffer->line_length + buffer->size1 + buffer->size2; - } - /* if not all array lines were processed, *more is set true: */ - *more = buffer->next_line < buffer->array_lines; - return 1; -} - -#define CASE_COPY_LINE_TO_DATA(_pi, _po, _length, _stride, _type) \ -case t ## _type: \ -{ \ - npy_intp _ii; \ - for(_ii = 0; _ii < _length; _ii++) { \ - *(_type*)_po = (_type)_pi[_ii]; \ - _po += _stride; \ - } \ -} \ -break - -/* Copy a line from a buffer to an array: */ -int NI_LineBufferToArray(NI_LineBuffer *buffer) -{ - double *pb = buffer->buffer_data; - char *pa; - npy_intp jj, length = buffer->line_length; - - pb += buffer->size1; - for(jj = 0; jj < buffer->buffer_lines; jj++) { - /* if all array lines are copied return: */ - if (buffer->next_line == buffer->array_lines) - break; - pa = buffer->array_data; - /* copy data from the buffer to the array: */ - switch (buffer->array_type) { - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, Bool); - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, UInt8); - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, UInt16); - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, UInt32); -#if HAS_UINT64 - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, UInt64); -#endif - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, Int8); - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, Int16); - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, Int32); - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, Int64); - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, Float32); - CASE_COPY_LINE_TO_DATA(pb, pa, length, buffer->line_stride, Float64); - default: - PyErr_SetString(PyExc_RuntimeError, "array type not supported"); - return 0; - } - /* move to the next line in the array: */ - NI_ITERATOR_NEXT(buffer->iterator, buffer->array_data); - /* number of lines copied: */ - ++(buffer->next_line); - /* move the buffer data pointer to the next line: */ - pb += buffer->line_length + buffer->size1 + buffer->size2; - } - return 1; -} - -/******************************************************************/ -/* Multi-dimensional filter support functions */ -/******************************************************************/ - -/* Initialize a filter iterator: */ -int -NI_InitFilterIterator(int rank, npy_intp *filter_shape, - npy_intp filter_size, npy_intp *array_shape, - npy_intp *origins, NI_FilterIterator *iterator) -{ - int ii; - npy_intp fshape[MAXDIM], forigins[MAXDIM]; - - for(ii = 0; ii < rank; ii++) { - fshape[ii] = *filter_shape++; - forigins[ii] = origins ? *origins++ : 0; - } - /* calculate the strides, used to move the offsets pointer through - the offsets table: */ - if (rank > 0) { - iterator->strides[rank - 1] = filter_size; - for(ii = rank - 2; ii >= 0; ii--) { - npy_intp step = array_shape[ii + 1] < fshape[ii + 1] ? - array_shape[ii + 1] : fshape[ii + 1]; - iterator->strides[ii] = iterator->strides[ii + 1] * step; - } - } - for(ii = 0; ii < rank; ii++) { - npy_intp step = array_shape[ii] < fshape[ii] ? - array_shape[ii] : fshape[ii]; - npy_intp orgn = fshape[ii] / 2 + forigins[ii]; - /* stride for stepping back to previous offsets: */ - iterator->backstrides[ii] = (step - 1) * iterator->strides[ii]; - /* initialize boundary extension sizes: */ - iterator->bound1[ii] = orgn; - iterator->bound2[ii] = array_shape[ii] - fshape[ii] + orgn; - } - return 1; -} - -/* Calculate the offsets to the filter points, for all border regions and - the interior of the array: */ -int NI_InitFilterOffsets(PyArrayObject *array, Bool *footprint, - npy_intp *filter_shape, npy_intp* origins, - NI_ExtendMode mode, npy_intp **offsets, npy_intp *border_flag_value, - npy_intp **coordinate_offsets) -{ - int rank, ii; - npy_intp kk, ll, filter_size = 1, offsets_size = 1, max_size = 0; - npy_intp max_stride = 0, *ashape = NULL, *astrides = NULL; - npy_intp footprint_size = 0, coordinates[MAXDIM], position[MAXDIM]; - npy_intp fshape[MAXDIM], forigins[MAXDIM], *po, *pc = NULL; - - rank = array->nd; - ashape = array->dimensions; - astrides = array->strides; - for(ii = 0; ii < rank; ii++) { - fshape[ii] = *filter_shape++; - forigins[ii] = origins ? *origins++ : 0; - } - /* the size of the footprint array: */ - for(ii = 0; ii < rank; ii++) - filter_size *= fshape[ii]; - /* calculate the number of non-zero elements in the footprint: */ - if (footprint) { - for(kk = 0; kk < filter_size; kk++) - if (footprint[kk]) - ++footprint_size; - } else { - footprint_size = filter_size; - } - /* calculate how many sets of offsets must be stored: */ - for(ii = 0; ii < rank; ii++) - offsets_size *= (ashape[ii] < fshape[ii] ? ashape[ii] : fshape[ii]); - /* allocate offsets data: */ - *offsets = (npy_intp*)malloc(offsets_size * footprint_size * - sizeof(npy_intp)); - if (!*offsets) { - PyErr_NoMemory(); - goto exit; - } - if (coordinate_offsets) { - *coordinate_offsets = (npy_intp*)malloc(offsets_size * rank * - footprint_size * sizeof(npy_intp)); - if (!*coordinate_offsets) { - PyErr_NoMemory(); - goto exit; - } - } - for(ii = 0; ii < rank; ii++) { - npy_intp stride; - /* find maximum axis size: */ - if (ashape[ii] > max_size) - max_size = ashape[ii]; - /* find maximum stride: */ - stride = astrides[ii] < 0 ? -astrides[ii] : astrides[ii]; - if (stride > max_stride) - max_stride = stride; - /* coordinates for iterating over the kernel elements: */ - coordinates[ii] = 0; - /* keep track of the kernel position: */ - position[ii] = 0; - } - /* the flag to indicate that we are outside the border must have a - value that is larger than any possible offset: */ - *border_flag_value = max_size * max_stride + 1; - /* calculate all possible offsets to elements in the filter kernel, - for all regions in the array (interior and border regions): */ - po = *offsets; - if (coordinate_offsets) { - pc = *coordinate_offsets; - } - /* iterate over all regions: */ - for(ll = 0; ll < offsets_size; ll++) { - /* iterate over the elements in the footprint array: */ - for(kk = 0; kk < filter_size; kk++) { - npy_intp offset = 0; - /* only calculate an offset if the footprint is 1: */ - if (!footprint || footprint[kk]) { - /* find offsets along all axes: */ - for(ii = 0; ii < rank; ii++) { - npy_intp orgn = fshape[ii] / 2 + forigins[ii]; - npy_intp cc = coordinates[ii] - orgn + position[ii]; - npy_intp len = ashape[ii]; - /* apply boundary conditions, if necessary: */ - switch (mode) { - case NI_EXTEND_MIRROR: - if (cc < 0) { - if (len <= 1) { - cc = 0; - } else { - int sz2 = 2 * len - 2; - cc = sz2 * (int)(-cc / sz2) + cc; - cc = cc <= 1 - len ? cc + sz2 : -cc; - } - } else if (cc >= len) { - if (len <= 1) { - cc = 0; - } else { - int sz2 = 2 * len - 2; - cc -= sz2 * (int)(cc / sz2); - if (cc >= len) - cc = sz2 - cc; - } - } - break; - case NI_EXTEND_REFLECT: - if (cc < 0) { - if (len <= 1) { - cc = 0; - } else { - int sz2 = 2 * len; - if (cc < -sz2) - cc = sz2 * (int)(-cc / sz2) + cc; - cc = cc < -len ? cc + sz2 : -cc - 1; - } - } else if (cc >= len) { - if (len <= 1) {cc = 0; - } else { - int sz2 = 2 * len; - cc -= sz2 * (int)(cc / sz2); - if (cc >= len) - cc = sz2 - cc - 1; - } - } - break; - case NI_EXTEND_WRAP: - if (cc < 0) { - if (len <= 1) { - cc = 0; - } else { - int sz = len; - cc += sz * (int)(-cc / sz); - if (cc < 0) - cc += sz; - } - } else if (cc >= len) { - if (len <= 1) { - cc = 0; - } else { - int sz = len; - cc -= sz * (int)(cc / sz); - } - } - break; - case NI_EXTEND_NEAREST: - if (cc < 0) { - cc = 0; - } else if (cc >= len) { - cc = len - 1; - } - break; - case NI_EXTEND_CONSTANT: - if (cc < 0 || cc >= len) - cc = *border_flag_value; - break; - default: - PyErr_SetString(PyExc_RuntimeError, - "boundary mode not supported"); - goto exit; - } - - /* calculate offset along current axis: */ - if (cc == *border_flag_value) { - /* just flag that we are outside the border */ - offset = *border_flag_value; - if (coordinate_offsets) - pc[ii] = 0; - break; - } else { - /* use an offset that is possibly mapped from outside - the border: */ - cc = cc - position[ii]; - offset += astrides[ii] * cc; - if (coordinate_offsets) - pc[ii] = cc; - } - } - /* store the offset */ - *po++ = offset; - if (coordinate_offsets) - pc += rank; - } - /* next point in the filter: */ - for(ii = rank - 1; ii >= 0; ii--) { - if (coordinates[ii] < fshape[ii] - 1) { - coordinates[ii]++; - break; - } else { - coordinates[ii] = 0; - } - } - } - - /* move to the next array region: */ - for(ii = rank - 1; ii >= 0; ii--) { - int orgn = fshape[ii] / 2 + forigins[ii]; - if (position[ii] == orgn) { - position[ii] += ashape[ii] - fshape[ii] + 1; - if (position[ii] <= orgn) - position[ii] = orgn + 1; - } else { - position[ii]++; - } - if (position[ii] < ashape[ii]) { - break; - } else { - position[ii] = 0; - } - } - } - - exit: - if (PyErr_Occurred()) { - if (*offsets) - free(*offsets); - if (coordinate_offsets && *coordinate_offsets) - free(*coordinate_offsets); - return 0; - } else { - return 1; - } -} - -NI_CoordinateList* NI_InitCoordinateList(int size, int rank) -{ - NI_CoordinateList *list = \ - (NI_CoordinateList*)malloc(sizeof(NI_CoordinateList)); - if (!list) { - PyErr_NoMemory(); - return NULL; - } - list->block_size = size; - list->rank = rank; - list->blocks = NULL; - return list; -} - -int NI_CoordinateListStealBlocks(NI_CoordinateList *list1, - NI_CoordinateList *list2) -{ - if (list1->block_size != list2->block_size || - list1->rank != list2->rank) { - PyErr_SetString(PyExc_RuntimeError, "coordinate lists not compatible"); - return 1; - } - if (list1->blocks) { - PyErr_SetString(PyExc_RuntimeError, "first is list not empty"); - return 1; - } - list1->blocks = list2->blocks; - list2->blocks = NULL; - return 0; -} - -NI_CoordinateBlock* NI_CoordinateListAddBlock(NI_CoordinateList *list) -{ - NI_CoordinateBlock* block = NULL; - block = (NI_CoordinateBlock*)malloc(sizeof(NI_CoordinateBlock)); - if (!block) { - PyErr_NoMemory(); - goto exit; - } - block->coordinates = (npy_intp*)malloc(list->block_size * list->rank * - sizeof(npy_intp)); - if (!block->coordinates) { - PyErr_NoMemory(); - goto exit; - } - block->next = list->blocks; - list->blocks = block; - block->size = 0; - -exit: - if (PyErr_Occurred()) { - if (block) - free(block); - return NULL; - } - return block; -} - -NI_CoordinateBlock* NI_CoordinateListDeleteBlock(NI_CoordinateList *list) -{ - NI_CoordinateBlock* block = list->blocks; - if (block) { - list->blocks = block->next; - if (block->coordinates) - free(block->coordinates); - free(block); - } - return list->blocks; -} - -void NI_FreeCoordinateList(NI_CoordinateList *list) -{ - if (list) { - NI_CoordinateBlock *block = list->blocks; - while (block) { - NI_CoordinateBlock *tmp = block; - block = block->next; - if (tmp->coordinates) - free(tmp->coordinates); - free(tmp); - } - list->blocks = NULL; - free(list); - } -} diff --git a/scipy-0.10.1/scipy/ndimage/src/ni_support.h b/scipy-0.10.1/scipy/ndimage/src/ni_support.h deleted file mode 100644 index 04790c50d9..0000000000 --- a/scipy-0.10.1/scipy/ndimage/src/ni_support.h +++ /dev/null @@ -1,340 +0,0 @@ -/* Copyright (C) 2003-2005 Peter J. Verveer - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef NI_SUPPORT_H -#define NI_SUPPORT_H - -#include "nd_image.h" -#include -#include -#include -#include - -/* The different boundary conditions. The mirror condition is not used - by the python code, but C code is kept around in case we might wish - to add it. */ -typedef enum { - NI_EXTEND_FIRST = 0, - NI_EXTEND_NEAREST = 0, - NI_EXTEND_WRAP = 1, - NI_EXTEND_REFLECT = 2, - NI_EXTEND_MIRROR = 3, - NI_EXTEND_CONSTANT = 4, - NI_EXTEND_LAST = NI_EXTEND_CONSTANT, - NI_EXTEND_DEFAULT = NI_EXTEND_MIRROR -} NI_ExtendMode; - -/******************************************************************/ -/* Data types */ -/******************************************************************/ - -/* - * Numpy basic types codes correspond to C basic types, but they remain - * different even if the corresponding types have the same size. - * - * Most commonly: int and long, so normalize to long. - */ -static NPY_INLINE -int NI_NormalizeType(int type_num) -{ -#if NPY_SIZEOF_INT == NPY_SIZEOF_LONG - if (type_num == NPY_INT) type_num = NPY_LONG; - if (type_num == NPY_UINT) type_num = NPY_ULONG; -#endif - return type_num; -} - -/******************************************************************/ -/* Iterators */ -/******************************************************************/ - -/* the iterator structure: */ -typedef struct { - int rank_m1; - npy_intp dimensions[MAXDIM]; - npy_intp coordinates[MAXDIM]; - npy_intp strides[MAXDIM]; - npy_intp backstrides[MAXDIM]; -} NI_Iterator; - -/* initialize iterations over single array elements: */ -int NI_InitPointIterator(PyArrayObject*, NI_Iterator*); - -/* initialize iterations over an arbritrary sub-space: */ -int NI_SubspaceIterator(NI_Iterator*, UInt32); - -/* initialize iteration over array lines: */ -int NI_LineIterator(NI_Iterator*, int); - -/* reset an iterator */ -#define NI_ITERATOR_RESET(iterator) \ -{ \ - int _ii; \ - for(_ii = 0; _ii <= (iterator).rank_m1; _ii++) \ - (iterator).coordinates[_ii] = 0; \ -} - -/* go to the next point in a single array */ -#define NI_ITERATOR_NEXT(iterator, pointer) \ -{ \ - int _ii; \ - for(_ii = (iterator).rank_m1; _ii >= 0; _ii--) \ - if ((iterator).coordinates[_ii] < (iterator).dimensions[_ii]) { \ - (iterator).coordinates[_ii]++; \ - pointer += (iterator).strides[_ii]; \ - break; \ - } else { \ - (iterator).coordinates[_ii] = 0; \ - pointer -= (iterator).backstrides[_ii]; \ - } \ -} - -/* go to the next point in two arrays of the same size */ -#define NI_ITERATOR_NEXT2(iterator1, iterator2, pointer1, pointer2) \ -{ \ - int _ii; \ - for(_ii = (iterator1).rank_m1; _ii >= 0; _ii--) \ - if ((iterator1).coordinates[_ii] < (iterator1).dimensions[_ii]) { \ - (iterator1).coordinates[_ii]++; \ - pointer1 += (iterator1).strides[_ii]; \ - pointer2 += (iterator2).strides[_ii]; \ - break; \ - } else { \ - (iterator1).coordinates[_ii] = 0; \ - pointer1 -= (iterator1).backstrides[_ii]; \ - pointer2 -= (iterator2).backstrides[_ii]; \ - } \ -} - -/* go to the next point in three arrays of the same size */ -#define NI_ITERATOR_NEXT3(iterator1, iterator2, iterator3, \ - pointer1, pointer2, pointer3) \ -{ \ - int _ii; \ - for(_ii = (iterator1).rank_m1; _ii >= 0; _ii--) \ - if ((iterator1).coordinates[_ii] < (iterator1).dimensions[_ii]) { \ - (iterator1).coordinates[_ii]++; \ - pointer1 += (iterator1).strides[_ii]; \ - pointer2 += (iterator2).strides[_ii]; \ - pointer3 += (iterator3).strides[_ii]; \ - break; \ - } else { \ - (iterator1).coordinates[_ii] = 0; \ - pointer1 -= (iterator1).backstrides[_ii]; \ - pointer2 -= (iterator2).backstrides[_ii]; \ - pointer3 -= (iterator3).backstrides[_ii]; \ - } \ -} - -/* go to an arbitrary point in a single array */ -#define NI_ITERATOR_GOTO(iterator, destination, base, pointer) \ -{ \ - int _ii; \ - pointer = base; \ - for(_ii = (iterator).rank_m1; _ii >= 0; _ii--) { \ - pointer += destination[_ii] * (iterator).strides[_ii]; \ - (iterator).coordinates[_ii] = destination[_ii]; \ - } \ -} - -/******************************************************************/ -/* Line buffers */ -/******************************************************************/ - -/* the linebuffer structure: */ -typedef struct { - double *buffer_data; - npy_intp buffer_lines, line_length, line_stride; - npy_intp size1, size2, array_lines, next_line; - NI_Iterator iterator; - char* array_data; - NumarrayType array_type; - NI_ExtendMode extend_mode; - double extend_value; -} NI_LineBuffer; - -/* Get the next line being processed: */ -#define NI_GET_LINE(_buffer, _line) \ - ((_buffer).buffer_data + (_line) * ((_buffer).line_length + \ - (_buffer).size1 + (_buffer).size2)) -/* Allocate line buffer data */ -int NI_AllocateLineBuffer(PyArrayObject*, int, npy_intp, npy_intp, - npy_intp*, npy_intp, double**); - -/* Initialize a line buffer */ -int NI_InitLineBuffer(PyArrayObject*, int, npy_intp, npy_intp, npy_intp, - double*, NI_ExtendMode, double, NI_LineBuffer*); - -/* Extend a line in memory to implement boundary conditions: */ -int NI_ExtendLine(double*, npy_intp, npy_intp, npy_intp, NI_ExtendMode, double); - -/* Copy a line from an array to a buffer: */ -int NI_ArrayToLineBuffer(NI_LineBuffer*, npy_intp*, int*); - -/* Copy a line from a buffer to an array: */ -int NI_LineBufferToArray(NI_LineBuffer*); - -/******************************************************************/ -/* Multi-dimensional filter support functions */ -/******************************************************************/ - -/* the filter iterator structure: */ -typedef struct { - npy_intp strides[MAXDIM], backstrides[MAXDIM]; - npy_intp bound1[MAXDIM], bound2[MAXDIM]; -} NI_FilterIterator; - -/* Initialize a filter iterator: */ -int NI_InitFilterIterator(int, npy_intp*, npy_intp, npy_intp*, - npy_intp*, NI_FilterIterator*); - -/* Calculate the offsets to the filter points, for all border regions and - the interior of the array: */ -int NI_InitFilterOffsets(PyArrayObject*, Bool*, npy_intp*, - npy_intp*, NI_ExtendMode, npy_intp**, - npy_intp*, npy_intp**); - -/* Move to the next point in an array, possible changing the filter - offsets, to adapt to boundary conditions: */ -#define NI_FILTER_NEXT(iteratorf, iterator1, pointerf, pointer1) \ -{ \ - int _ii; \ - for(_ii = (iterator1).rank_m1; _ii >= 0; _ii--) { \ - npy_intp _pp = (iterator1).coordinates[_ii]; \ - if (_pp < (iterator1).dimensions[_ii]) { \ - if (_pp < (iteratorf).bound1[_ii] || \ - _pp >= (iteratorf).bound2[_ii]) \ - pointerf += (iteratorf).strides[_ii]; \ - (iterator1).coordinates[_ii]++; \ - pointer1 += (iterator1).strides[_ii]; \ - break; \ - } else { \ - (iterator1).coordinates[_ii] = 0; \ - pointer1 -= (iterator1).backstrides[_ii]; \ - pointerf -= (iteratorf).backstrides[_ii]; \ - } \ - } \ -} - -/* Move to the next point in two arrays, possible changing the pointer - to the filter offsets when moving into a different region in the - array: */ -#define NI_FILTER_NEXT2(iteratorf, iterator1, iterator2, \ - pointerf, pointer1, pointer2) \ -{ \ - int _ii; \ - for(_ii = (iterator1).rank_m1; _ii >= 0; _ii--) { \ - npy_intp _pp = (iterator1).coordinates[_ii]; \ - if (_pp < (iterator1).dimensions[_ii]) { \ - if (_pp < (iteratorf).bound1[_ii] || \ - _pp >= (iteratorf).bound2[_ii]) \ - pointerf += (iteratorf).strides[_ii]; \ - (iterator1).coordinates[_ii]++; \ - pointer1 += (iterator1).strides[_ii]; \ - pointer2 += (iterator2).strides[_ii]; \ - break; \ - } else { \ - (iterator1).coordinates[_ii] = 0; \ - pointer1 -= (iterator1).backstrides[_ii]; \ - pointer2 -= (iterator2).backstrides[_ii]; \ - pointerf -= (iteratorf).backstrides[_ii]; \ - } \ - } \ -} - -/* Move to the next point in three arrays, possible changing the pointer - to the filter offsets when moving into a different region in the - array: */ -#define NI_FILTER_NEXT3(iteratorf, iterator1, iterator2, iterator3, \ - pointerf, pointer1, pointer2, pointer3) \ -{ \ - int _ii; \ - for(_ii = (iterator1).rank_m1; _ii >= 0; _ii--) { \ - npy_intp _pp = (iterator1).coordinates[_ii]; \ - if (_pp < (iterator1).dimensions[_ii]) { \ - if (_pp < (iteratorf).bound1[_ii] || \ - _pp >= (iteratorf).bound2[_ii]) \ - pointerf += (iteratorf).strides[_ii]; \ - (iterator1).coordinates[_ii]++; \ - pointer1 += (iterator1).strides[_ii]; \ - pointer2 += (iterator2).strides[_ii]; \ - pointer3 += (iterator3).strides[_ii]; \ - break; \ - } else { \ - (iterator1).coordinates[_ii] = 0; \ - pointer1 -= (iterator1).backstrides[_ii]; \ - pointer2 -= (iterator2).backstrides[_ii]; \ - pointer3 -= (iterator3).backstrides[_ii]; \ - pointerf -= (iteratorf).backstrides[_ii]; \ - } \ - } \ -} - -/* Move the pointer to the filter offsets according to the given - coordinates: */ -#define NI_FILTER_GOTO(iteratorf, iterator, fbase, pointerf) \ -{ \ - int _ii; \ - npy_intp _jj; \ - pointerf = fbase; \ - for(_ii = iterator.rank_m1; _ii >= 0; _ii--) { \ - npy_intp _pp = iterator.coordinates[_ii]; \ - npy_intp b1 = (iteratorf).bound1[_ii]; \ - npy_intp b2 = (iteratorf).bound2[_ii]; \ - if (_pp < b1) { \ - _jj = _pp; \ - } else if (_pp > b2 && b2 >= b1) { \ - _jj = _pp + b1 - b2; \ - } else { \ - _jj = b1; \ - } \ - pointerf += (iteratorf).strides[_ii] * _jj; \ - } \ -} - -typedef struct { - npy_intp *coordinates; - int size; - void *next; -} NI_CoordinateBlock; - -typedef struct { - int block_size, rank; - void *blocks; -} NI_CoordinateList; - -NI_CoordinateList* NI_InitCoordinateList(int, int); -int NI_CoordinateListStealBlocks(NI_CoordinateList*, NI_CoordinateList*); -NI_CoordinateBlock* NI_CoordinateListAddBlock(NI_CoordinateList*); -NI_CoordinateBlock* NI_CoordinateListDeleteBlock(NI_CoordinateList*); -void NI_FreeCoordinateList(NI_CoordinateList*); - -#endif diff --git a/scipy-0.10.1/scipy/ndimage/tests/dots.png b/scipy-0.10.1/scipy/ndimage/tests/dots.png deleted file mode 100644 index 640030ca13..0000000000 Binary files a/scipy-0.10.1/scipy/ndimage/tests/dots.png and /dev/null differ diff --git a/scipy-0.10.1/scipy/ndimage/tests/test_datatypes.py b/scipy-0.10.1/scipy/ndimage/tests/test_datatypes.py deleted file mode 100644 index 59a168828d..0000000000 --- a/scipy-0.10.1/scipy/ndimage/tests/test_datatypes.py +++ /dev/null @@ -1,60 +0,0 @@ -""" Testing data types for ndimage calls -""" - -import numpy as np - -from scipy import ndimage - -from numpy.testing import (assert_array_almost_equal, - assert_array_equal) - -from nose.tools import assert_true, assert_equal, assert_raises - - -def test_map_coordinates_dts(): - # check that ndimage accepts different data types for interpolation - data = np.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - shifted_data = np.array([[0, 0, 0, 0], - [0, 4, 1, 3], - [0, 7, 6, 8]]) - idx = np.indices(data.shape) - dts = (np.uint8, np.uint16, np.uint32, np.uint64, - np.int8, np.int16, np.int32, np.int64, - np.intp, np.uintp, np.float32, np.float64) - for order in range(0, 6): - for data_dt in dts: - these_data = data.astype(data_dt) - for coord_dt in dts: - # affine mapping - mat = np.eye(2, dtype=coord_dt) - off = np.zeros((2,), dtype=coord_dt) - out = ndimage.affine_transform(these_data, mat, off) - assert_array_almost_equal(these_data, out) - # map coordinates - coords_m1 = idx.astype(coord_dt) - 1 - coords_p10 = idx.astype(coord_dt) + 10 - out = ndimage.map_coordinates(these_data, coords_m1, order=order) - assert_array_almost_equal(out, shifted_data) - # check constant fill works - out = ndimage.map_coordinates(these_data, coords_p10, order=order) - assert_array_almost_equal(out, np.zeros((3,4))) - # check shift and zoom - out = ndimage.shift(these_data, 1) - assert_array_almost_equal(out, shifted_data) - out = ndimage.zoom(these_data, 1) - assert_array_almost_equal(these_data, out) - - -def test_uint64_max(): - # Test interpolation respects uint64 max - big = 2**64-1 - arr = np.array([big, big, big], dtype=np.uint64) - # Tests geometric transform (map_coordinates, affine_transform) - inds = np.indices(arr.shape) - 0.1 - x = ndimage.map_coordinates(arr, inds) - assert_true(x[1] > (2**63)) - # Tests zoom / shift - x = ndimage.shift(arr, 0.1) - assert_true(x[1] > (2**63)) diff --git a/scipy-0.10.1/scipy/ndimage/tests/test_filters.py b/scipy-0.10.1/scipy/ndimage/tests/test_filters.py deleted file mode 100644 index cc1b4ab28e..0000000000 --- a/scipy-0.10.1/scipy/ndimage/tests/test_filters.py +++ /dev/null @@ -1,54 +0,0 @@ -''' Some tests for filters ''' - -import numpy as np - -from numpy.testing import assert_equal, assert_raises - -import scipy.ndimage as sndi - - -def test_ticket_701(): - # Test generic filter sizes - arr = np.arange(4).reshape((2,2)) - func = lambda x: np.min(x) - res = sndi.generic_filter(arr, func, size=(1,1)) - # The following raises an error unless ticket 701 is fixed - res2 = sndi.generic_filter(arr, func, size=1) - assert_equal(res, res2) - - -def test_orders_gauss(): - # Check order inputs to Gaussians - arr = np.zeros((1,)) - yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=0) - yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=3) - yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, -1 - yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, 4 - yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0) - yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3) - yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1 - yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, 4 - - -def test_valid_origins(): - """Regression test for #1311.""" - func = lambda x: np.mean(x) - data = np.array([1,2,3,4,5], dtype=np.float64) - assert_raises(ValueError, sndi.generic_filter, data, func, size=3, - origin=2) - func2 = lambda x, y: np.mean(x + y) - assert_raises(ValueError, sndi.generic_filter1d, data, func, - filter_size=3, origin=2) - assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3, - origin=2) - - for filter in [sndi.uniform_filter, sndi.minimum_filter, - sndi.maximum_filter, sndi.maximum_filter1d, - sndi.median_filter, sndi.minimum_filter1d]: - # This should work, since for size == 3, the valid range for origin is - # -1 to 1. - filter(data, 3, origin=-1) - filter(data, 3, origin=1) - # Just check this raises an error instead of silently accepting or - # segfaulting. - assert_raises(ValueError, filter, data, 3, origin=2) diff --git a/scipy-0.10.1/scipy/ndimage/tests/test_io.py b/scipy-0.10.1/scipy/ndimage/tests/test_io.py deleted file mode 100644 index 2ebcb335af..0000000000 --- a/scipy-0.10.1/scipy/ndimage/tests/test_io.py +++ /dev/null @@ -1,22 +0,0 @@ -from numpy.testing import assert_array_equal, dec, run_module_suite -import scipy.ndimage as ndi - -import os - -try: - from PIL import Image - pil_missing = False -except ImportError: - pil_missing = True - -@dec.skipif(pil_missing, msg="The Python Image Library could not be found.") -def test_imread(): - lp = os.path.join(os.path.dirname(__file__), 'dots.png') - img = ndi.imread(lp) - assert_array_equal(img.shape, (300, 420, 3)) - - img = ndi.imread(lp, flatten=True) - assert_array_equal(img.shape, (300, 420)) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/ndimage/tests/test_measurements.py b/scipy-0.10.1/scipy/ndimage/tests/test_measurements.py deleted file mode 100644 index 43a133bddc..0000000000 --- a/scipy-0.10.1/scipy/ndimage/tests/test_measurements.py +++ /dev/null @@ -1,1019 +0,0 @@ -from numpy.testing import assert_, assert_array_almost_equal, assert_equal, \ - assert_almost_equal, assert_array_equal, \ - run_module_suite, TestCase -import numpy as np - -import scipy.ndimage as ndimage - -types = [np.int8, np.uint8, np.int16, - np.uint16, np.int32, np.uint32, - np.int64, np.uint64, - np.float32, np.float64] - - -np.mod(1., 1) # Silence fmod bug on win-amd64. See #1408 and #1238. - -class Test_measurements_stats(TestCase): - """ndimage.measurements._stats() is a utility function used by other functions.""" - - def test_a(self): - x = [0,1,2,6] - labels = [0,0,1,1] - index = [0,1] - for shp in [(4,), (2,2)]: - x = np.array(x).reshape(shp) - labels = np.array(labels).reshape(shp) - counts, sums = ndimage.measurements._stats(x, labels=labels, index=index) - assert_array_equal(counts, [2, 2]) - assert_array_equal(sums, [1.0, 8.0]) - - def test_b(self): - # Same data as test_a, but different labels. The label 9 exceeds the - # length of 'labels', so this test will follow a different code path. - x = [0,1,2,6] - labels = [0,0,9,9] - index = [0,9] - for shp in [(4,), (2,2)]: - x = np.array(x).reshape(shp) - labels = np.array(labels).reshape(shp) - counts, sums = ndimage.measurements._stats(x, labels=labels, index=index) - assert_array_equal(counts, [2, 2]) - assert_array_equal(sums, [1.0, 8.0]) - - def test_a_centered(self): - x = [0,1,2,6] - labels = [0,0,1,1] - index = [0,1] - for shp in [(4,), (2,2)]: - x = np.array(x).reshape(shp) - labels = np.array(labels).reshape(shp) - counts, sums, centers = ndimage.measurements._stats(x, labels=labels, - index=index, centered=True) - assert_array_equal(counts, [2, 2]) - assert_array_equal(sums, [1.0, 8.0]) - assert_array_equal(centers, [0.5, 8.0]) - - def test_b_centered(self): - x = [0,1,2,6] - labels = [0,0,9,9] - index = [0,9] - for shp in [(4,), (2,2)]: - x = np.array(x).reshape(shp) - labels = np.array(labels).reshape(shp) - counts, sums, centers = ndimage.measurements._stats(x, labels=labels, - index=index, centered=True) - assert_array_equal(counts, [2, 2]) - assert_array_equal(sums, [1.0, 8.0]) - assert_array_equal(centers, [0.5, 8.0]) - - def test_nonint_labels(self): - x = [0,1,2,6] - labels = [0.0, 0.0, 9.0, 9.0] - index = [0.0, 9.0] - for shp in [(4,), (2,2)]: - x = np.array(x).reshape(shp) - labels = np.array(labels).reshape(shp) - counts, sums, centers = ndimage.measurements._stats(x, labels=labels, - index=index, centered=True) - assert_array_equal(counts, [2, 2]) - assert_array_equal(sums, [1.0, 8.0]) - assert_array_equal(centers, [0.5, 8.0]) - - -class Test_measurements_select(TestCase): - """ndimage.measurements._select() is a utility function used by other functions.""" - - def test_basic(self): - x = [0,1,6,2] - cases = [ - ([0,0,1,1], [0,1]), # "Small" integer labels - ([0,0,9,9], [0,9]), # A label larger than len(labels) - ([0.0,0.0,7.0,7.0], [0.0, 7.0]), # Non-integer labels - ] - for labels, index in cases: - result = ndimage.measurements._select(x, labels=labels, index=index) - assert_(len(result) == 0) - result = ndimage.measurements._select(x, labels=labels, index=index, find_max=True) - assert_(len(result) == 1) - assert_array_equal(result[0], [1, 6]) - result = ndimage.measurements._select(x, labels=labels, index=index, find_min=True) - assert_(len(result) == 1) - assert_array_equal(result[0], [0, 2]) - result = ndimage.measurements._select(x, labels=labels, index=index, - find_min=True, find_min_positions=True) - assert_(len(result) == 2) - assert_array_equal(result[0], [0, 2]) - assert_array_equal(result[1], [0, 3]) - result = ndimage.measurements._select(x, labels=labels, index=index, - find_max=True, find_max_positions=True) - assert_(len(result) == 2) - assert_array_equal(result[0], [1, 6]) - assert_array_equal(result[1], [1, 2]) - - -def test_label01(): - "label 1" - data = np.ones([]) - out, n = ndimage.label(data) - assert_array_almost_equal(out, 1) - -def test_label02(): - "label 2" - data = np.zeros([]) - out, n = ndimage.label(data) - assert_array_almost_equal(out, 0) - -def test_label03(): - "label 3" - data = np.ones([1]) - out, n = ndimage.label(data) - assert_array_almost_equal(out, [1]) - -def test_label04(): - "label 4" - data = np.zeros([1]) - out, n = ndimage.label(data) - assert_array_almost_equal(out, [0]) - -def test_label05(): - "label 5" - data = np.ones([5]) - out, n = ndimage.label(data) - assert_array_almost_equal(out, [1, 1, 1, 1, 1]) - -def test_label06(): - "label 6" - data = np.array([1, 0, 1, 1, 0, 1]) - out, n = ndimage.label(data) - assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3]) - -def test_label07(): - "label 7" - data = np.array([[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]) - out, n = ndimage.label(data) - assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]) - -def test_label08(): - "label 8" - data = np.array([[1, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0], - [1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 0]]) - out, n = ndimage.label(data) - assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], - [0, 0, 2, 2, 0, 0], - [0, 0, 2, 2, 2, 0], - [3, 3, 0, 0, 0, 0], - [3, 3, 0, 0, 0, 0], - [0, 0, 0, 4, 4, 0]]) - -def test_label09(): - "label 9" - data = np.array([[1, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0], - [1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 0]]) - struct = ndimage.generate_binary_structure(2, 2) - out, n = ndimage.label(data, struct) - assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], - [0, 0, 2, 2, 0, 0], - [0, 0, 2, 2, 2, 0], - [2, 2, 0, 0, 0, 0], - [2, 2, 0, 0, 0, 0], - [0, 0, 0, 3, 3, 0]]) - -def test_label10(): - "label 10" - data = np.array([[0, 0, 0, 0, 0, 0], - [0, 1, 1, 0, 1, 0], - [0, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0]]) - struct = ndimage.generate_binary_structure(2, 2) - out, n = ndimage.label(data, struct) - assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], - [0, 1, 1, 0, 1, 0], - [0, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0]]) - -def test_label11(): - "label 11" - for type in types: - data = np.array([[1, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0], - [1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 0]], type) - out, n = ndimage.label(data) - expected = [[1, 0, 0, 0, 0, 0], - [0, 0, 2, 2, 0, 0], - [0, 0, 2, 2, 2, 0], - [3, 3, 0, 0, 0, 0], - [3, 3, 0, 0, 0, 0], - [0, 0, 0, 4, 4, 0]] - assert_array_almost_equal(out, expected) - assert_equal(n, 4) - -def test_label12(): - "label 12" - for type in types: - data = np.array([[0, 0, 0, 0, 1, 1], - [0, 0, 0, 0, 0, 1], - [0, 0, 1, 0, 1, 1], - [0, 0, 1, 1, 1, 1], - [0, 0, 0, 1, 1, 0]], type) - out, n = ndimage.label(data) - expected = [[0, 0, 0, 0, 1, 1], - [0, 0, 0, 0, 0, 1], - [0, 0, 1, 0, 1, 1], - [0, 0, 1, 1, 1, 1], - [0, 0, 0, 1, 1, 0]] - assert_array_almost_equal(out, expected) - assert_equal(n, 1) - -def test_label13(): - "label 13" - for type in types: - data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], - [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], - [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], - type) - out, n = ndimage.label(data) - expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], - [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], - [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] - assert_array_almost_equal(out, expected) - assert_equal(n, 1) - -def test_find_objects01(): - "find_objects 1" - data = np.ones([], dtype=int) - out = ndimage.find_objects(data) - assert_(out == [()]) - -def test_find_objects02(): - "find_objects 2" - data = np.zeros([], dtype=int) - out = ndimage.find_objects(data) - assert_(out == []) - -def test_find_objects03(): - "find_objects 3" - data = np.ones([1], dtype=int) - out = ndimage.find_objects(data) - assert_equal(out, [(slice(0, 1, None),)]) - -def test_find_objects04(): - "find_objects 4" - data = np.zeros([1], dtype=int) - out = ndimage.find_objects(data) - assert_equal(out, []) - -def test_find_objects05(): - "find_objects 5" - data = np.ones([5], dtype=int) - out = ndimage.find_objects(data) - assert_equal(out, [(slice(0, 5, None),)]) - -def test_find_objects06(): - "find_objects 6" - data = np.array([1, 0, 2, 2, 0, 3]) - out = ndimage.find_objects(data) - assert_equal(out, [(slice(0, 1, None),), - (slice(2, 4, None),), - (slice(5, 6, None),)]) - -def test_find_objects07(): - "find_objects 7" - data = np.array([[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]) - out = ndimage.find_objects(data) - assert_equal(out, []) - -def test_find_objects08(): - "find_objects 8" - data = np.array([[1, 0, 0, 0, 0, 0], - [0, 0, 2, 2, 0, 0], - [0, 0, 2, 2, 2, 0], - [3, 3, 0, 0, 0, 0], - [3, 3, 0, 0, 0, 0], - [0, 0, 0, 4, 4, 0]]) - out = ndimage.find_objects(data) - assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), - (slice(1, 3, None), slice(2, 5, None)), - (slice(3, 5, None), slice(0, 2, None)), - (slice(5, 6, None), slice(3, 5, None))]) - -def test_find_objects09(): - "find_objects 9" - data = np.array([[1, 0, 0, 0, 0, 0], - [0, 0, 2, 2, 0, 0], - [0, 0, 2, 2, 2, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 4, 4, 0]]) - out = ndimage.find_objects(data) - assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), - (slice(1, 3, None), slice(2, 5, None)), - None, - (slice(5, 6, None), slice(3, 5, None))]) - -def test_sum01(): - "sum 1" - for type in types: - input = np.array([], type) - output = ndimage.sum(input) - assert_equal(output, 0.0) - -def test_sum02(): - "sum 2" - for type in types: - input = np.zeros([0, 4], type) - output = ndimage.sum(input) - assert_equal(output, 0.0) - -def test_sum03(): - "sum 3" - for type in types: - input = np.ones([], type) - output = ndimage.sum(input) - assert_almost_equal(output, 1.0) - -def test_sum04(): - "sum 4" - for type in types: - input = np.array([1, 2], type) - output = ndimage.sum(input) - assert_almost_equal(output, 3.0) - -def test_sum05(): - "sum 5" - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.sum(input) - assert_almost_equal(output, 10.0) - -def test_sum06(): - "sum 6" - labels = np.array([], bool) - for type in types: - input = np.array([], type) - output = ndimage.sum(input, labels=labels) - assert_equal(output, 0.0) - -def test_sum07(): - "sum 7" - labels = np.ones([0, 4], bool) - for type in types: - input = np.zeros([0, 4], type) - output = ndimage.sum(input, labels=labels) - assert_equal(output, 0.0) - -def test_sum08(): - "sum 8" - labels = np.array([1, 0], bool) - for type in types: - input = np.array([1, 2], type) - output = ndimage.sum(input, labels=labels) - assert_equal(output, 1.0) - -def test_sum09(): - "sum 9" - labels = np.array([1, 0], bool) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.sum(input, labels=labels) - assert_almost_equal(output, 4.0) - -def test_sum10(): - "sum 10" - labels = np.array([1, 0], bool) - input = np.array([[1, 2], [3, 4]], bool) - output = ndimage.sum(input, labels=labels) - assert_almost_equal(output, 2.0) - -def test_sum11(): - "sum 11" - labels = np.array([1, 2], np.int8) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.sum(input, labels=labels, - index=2) - assert_almost_equal(output, 6.0) - -def test_sum12(): - "sum 12" - labels = np.array([[1, 2], [2, 4]], np.int8) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.sum(input, labels=labels, - index=[4, 8, 2]) - assert_array_almost_equal(output, [4.0, 0.0, 5.0]) - -def test_mean01(): - "mean 1" - labels = np.array([1, 0], bool) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.mean(input, labels=labels) - assert_almost_equal(output, 2.0) - -def test_mean02(): - "mean 2" - labels = np.array([1, 0], bool) - input = np.array([[1, 2], [3, 4]], bool) - output = ndimage.mean(input, labels=labels) - assert_almost_equal(output, 1.0) - -def test_mean03(): - "mean 3" - labels = np.array([1, 2]) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.mean(input, labels=labels, - index=2) - assert_almost_equal(output, 3.0) - -def test_mean04(): - "mean 4" - labels = np.array([[1, 2], [2, 4]], np.int8) - olderr = np.seterr(all='ignore') - try: - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.mean(input, labels=labels, - index=[4, 8, 2]) - assert_array_almost_equal(output[[0,2]], [4.0, 2.5]) - assert_(np.isnan(output[1])) - finally: - np.seterr(**olderr) - -def test_minimum01(): - "minimum 1" - labels = np.array([1, 0], bool) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.minimum(input, labels=labels) - assert_almost_equal(output, 1.0) - -def test_minimum02(): - "minimum 2" - labels = np.array([1, 0], bool) - input = np.array([[2, 2], [2, 4]], bool) - output = ndimage.minimum(input, labels=labels) - assert_almost_equal(output, 1.0) - -def test_minimum03(): - "minimum 3" - labels = np.array([1, 2]) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.minimum(input, labels=labels, - index=2) - assert_almost_equal(output, 2.0) - -def test_minimum04(): - "minimum 4" - labels = np.array([[1, 2], [2, 3]]) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.minimum(input, labels=labels, - index=[2, 3, 8]) - assert_array_almost_equal(output, [2.0, 4.0, 0.0]) - -def test_maximum01(): - "maximum 1" - labels = np.array([1, 0], bool) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.maximum(input, labels=labels) - assert_almost_equal(output, 3.0) - -def test_maximum02(): - "maximum 2" - labels = np.array([1, 0], bool) - input = np.array([[2, 2], [2, 4]], bool) - output = ndimage.maximum(input, labels=labels) - assert_almost_equal(output, 1.0) - -def test_maximum03(): - "maximum 3" - labels = np.array([1, 2]) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.maximum(input, labels=labels, - index=2) - assert_almost_equal(output, 4.0) - -def test_maximum04(): - "maximum 4" - labels = np.array([[1, 2], [2, 3]]) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.maximum(input, labels=labels, - index=[2, 3, 8]) - assert_array_almost_equal(output, [3.0, 4.0, 0.0]) - -def test_maximum05(): - "Ticket #501" - x = np.array([-3,-2,-1]) - assert_equal(ndimage.maximum(x),-1) - -def test_median01(): - "median 1" - a = np.array([[1, 2, 0, 1], - [5, 3, 0, 4], - [0, 0, 0, 7], - [9, 3, 0, 0]]) - labels = np.array([[1, 1, 0, 2], - [1, 1, 0, 2], - [0, 0, 0, 2], - [3, 3, 0, 0]]) - output = ndimage.median(a, labels=labels, index=[1, 2, 3]) - assert_array_almost_equal(output, [2.5, 4.0, 6.0]) - -def test_median02(): - "median 2" - a = np.array([[1, 2, 0, 1], - [5, 3, 0, 4], - [0, 0, 0, 7], - [9, 3, 0, 0]]) - output = ndimage.median(a) - assert_almost_equal(output, 1.0) - -def test_median03(): - "median 3" - a = np.array([[1, 2, 0, 1], - [5, 3, 0, 4], - [0, 0, 0, 7], - [9, 3, 0, 0]]) - labels = np.array([[1, 1, 0, 2], - [1, 1, 0, 2], - [0, 0, 0, 2], - [3, 3, 0, 0]]) - output = ndimage.median(a, labels=labels) - assert_almost_equal(output, 3.0) - -def test_variance01(): - "variance 1" - olderr = np.seterr(all='ignore') - try: - for type in types: - input = np.array([], type) - output = ndimage.variance(input) - assert_(np.isnan(output)) - finally: - np.seterr(**olderr) - -def test_variance02(): - "variance 2" - for type in types: - input = np.array([1], type) - output = ndimage.variance(input) - assert_almost_equal(output, 0.0) - -def test_variance03(): - "variance 3" - for type in types: - input = np.array([1, 3], type) - output = ndimage.variance(input) - assert_almost_equal(output, 1.0) - -def test_variance04(): - "variance 4" - input = np.array([1, 0], bool) - output = ndimage.variance(input) - assert_almost_equal(output, 0.25) - -def test_variance05(): - "variance 5" - labels = [2, 2, 3] - for type in types: - input = np.array([1, 3, 8], type) - output = ndimage.variance(input, labels, 2) - assert_almost_equal(output, 1.0) - -def test_variance06(): - "variance 6" - labels = [2, 2, 3, 3, 4] - olderr = np.seterr(all='ignore') - try: - for type in types: - input = np.array([1, 3, 8, 10, 8], type) - output = ndimage.variance(input, labels, [2, 3, 4]) - assert_array_almost_equal(output, [1.0, 1.0, 0.0]) - finally: - np.seterr(**olderr) - -def test_standard_deviation01(): - "standard deviation 1" - olderr = np.seterr(all='ignore') - try: - for type in types: - input = np.array([], type) - output = ndimage.standard_deviation(input) - assert_(np.isnan(output)) - finally: - np.seterr(**olderr) - -def test_standard_deviation02(): - "standard deviation 2" - for type in types: - input = np.array([1], type) - output = ndimage.standard_deviation(input) - assert_almost_equal(output, 0.0) - -def test_standard_deviation03(): - "standard deviation 3" - for type in types: - input = np.array([1, 3], type) - output = ndimage.standard_deviation(input) - assert_almost_equal(output, np.sqrt(1.0)) - -def test_standard_deviation04(): - "standard deviation 4" - input = np.array([1, 0], bool) - output = ndimage.standard_deviation(input) - assert_almost_equal(output, 0.5) - -def test_standard_deviation05(): - "standard deviation 5" - labels = [2, 2, 3] - for type in types: - input = np.array([1, 3, 8], type) - output = ndimage.standard_deviation(input, labels, 2) - assert_almost_equal(output, 1.0) - -def test_standard_deviation06(): - "standard deviation 6" - labels = [2, 2, 3, 3, 4] - olderr = np.seterr(all='ignore') - try: - for type in types: - input = np.array([1, 3, 8, 10, 8], type) - output = ndimage.standard_deviation(input, labels, [2, 3, 4]) - assert_array_almost_equal(output, [1.0, 1.0, 0.0]) - finally: - np.seterr(**olderr) - -def test_standard_deviation07(): - "standard deviation 7" - labels = [1] - olderr = np.seterr(all='ignore') - try: - for type in types: - input = np.array([-0.00619519], type) - output = ndimage.standard_deviation(input, labels, [1]) - assert_array_almost_equal(output, [0]) - finally: - np.seterr(**olderr) - -def test_minimum_position01(): - "minimum position 1" - labels = np.array([1, 0], bool) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.minimum_position(input, labels=labels) - assert_equal(output, (0, 0)) - -def test_minimum_position02(): - "minimum position 2" - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 0, 2], - [1, 5, 1, 1]], type) - output = ndimage.minimum_position(input) - assert_equal(output, (1, 2)) - -def test_minimum_position03(): - "minimum position 3" - input = np.array([[5, 4, 2, 5], - [3, 7, 0, 2], - [1, 5, 1, 1]], bool) - output = ndimage.minimum_position(input) - assert_equal(output, (1, 2)) - -def test_minimum_position04(): - "minimum position 4" - input = np.array([[5, 4, 2, 5], - [3, 7, 1, 2], - [1, 5, 1, 1]], bool) - output = ndimage.minimum_position(input) - assert_equal(output, (0, 0)) - -def test_minimum_position05(): - "minimum position 5" - labels = [1, 2, 0, 4] - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 0, 2], - [1, 5, 2, 3]], type) - output = ndimage.minimum_position(input, labels) - assert_equal(output, (2, 0)) - -def test_minimum_position06(): - "minimum position 6" - labels = [1, 2, 3, 4] - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 0, 2], - [1, 5, 1, 1]], type) - output = ndimage.minimum_position(input, labels, 2) - assert_equal(output, (0, 1)) - -def test_minimum_position07(): - "minimum position 7" - labels = [1, 2, 3, 4] - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 0, 2], - [1, 5, 1, 1]], type) - output = ndimage.minimum_position(input, labels, - [2, 3]) - assert_equal(output[0], (0, 1)) - assert_equal(output[1], (1, 2)) - -def test_maximum_position01(): - "maximum position 1" - labels = np.array([1, 0], bool) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output = ndimage.maximum_position(input, - labels=labels) - assert_equal(output, (1, 0)) - -def test_maximum_position02(): - "maximum position 2" - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 8, 2], - [1, 5, 1, 1]], type) - output = ndimage.maximum_position(input) - assert_equal(output, (1, 2)) - -def test_maximum_position03(): - "maximum position 3" - input = np.array([[5, 4, 2, 5], - [3, 7, 8, 2], - [1, 5, 1, 1]], bool) - output = ndimage.maximum_position(input) - assert_equal(output, (0, 0)) - -def test_maximum_position04(): - "maximum position 4" - labels = [1, 2, 0, 4] - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 8, 2], - [1, 5, 1, 1]], type) - output = ndimage.maximum_position(input, labels) - assert_equal(output, (1, 1)) - -def test_maximum_position05(): - "maximum position 5" - labels = [1, 2, 0, 4] - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 8, 2], - [1, 5, 1, 1]], type) - output = ndimage.maximum_position(input, labels, 1) - assert_equal(output, (0, 0)) - -def test_maximum_position06(): - "maximum position 6" - labels = [1, 2, 0, 4] - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 8, 2], - [1, 5, 1, 1]], type) - output = ndimage.maximum_position(input, labels, - [1, 2]) - assert_equal(output[0], (0, 0)) - assert_equal(output[1], (1, 1)) - -def test_maximum_position07(): - "maximum position 7 - float labels" - labels = np.array([1.0, 2.5, 0.0, 4.5]) - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 8, 2], - [1, 5, 1, 1]], type) - output = ndimage.maximum_position(input, labels, - [1.0, 4.5]) - assert_equal(output[0], (0, 0)) - assert_equal(output[1], (0, 3)) - -def test_extrema01(): - "extrema 1" - labels = np.array([1, 0], bool) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output1 = ndimage.extrema(input, labels=labels) - output2 = ndimage.minimum(input, labels=labels) - output3 = ndimage.maximum(input, labels=labels) - output4 = ndimage.minimum_position(input, - labels=labels) - output5 = ndimage.maximum_position(input, - labels=labels) - assert_equal(output1, (output2, output3, output4, output5)) - -def test_extrema02(): - "extrema 2" - labels = np.array([1, 2]) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output1 = ndimage.extrema(input, labels=labels, - index=2) - output2 = ndimage.minimum(input, labels=labels, - index=2) - output3 = ndimage.maximum(input, labels=labels, - index=2) - output4 = ndimage.minimum_position(input, - labels=labels, index=2) - output5 = ndimage.maximum_position(input, - labels=labels, index=2) - assert_equal(output1, (output2, output3, output4, output5)) - -def test_extrema03(): - "extrema 3" - labels = np.array([[1, 2], [2, 3]]) - for type in types: - input = np.array([[1, 2], [3, 4]], type) - output1 = ndimage.extrema(input, labels=labels, - index=[2, 3, 8]) - output2 = ndimage.minimum(input, labels=labels, - index=[2, 3, 8]) - output3 = ndimage.maximum(input, labels=labels, - index=[2, 3, 8]) - output4 = ndimage.minimum_position(input, - labels=labels, index=[2, 3, 8]) - output5 = ndimage.maximum_position(input, - labels=labels, index=[2, 3, 8]) - assert_array_almost_equal(output1[0], output2) - assert_array_almost_equal(output1[1], output3) - assert_array_almost_equal(output1[2], output4) - assert_array_almost_equal(output1[3], output5) - -def test_extrema04(): - "extrema 4" - labels = [1, 2, 0, 4] - for type in types: - input = np.array([[5, 4, 2, 5], - [3, 7, 8, 2], - [1, 5, 1, 1]], type) - output1 = ndimage.extrema(input, labels, [1, 2]) - output2 = ndimage.minimum(input, labels, [1, 2]) - output3 = ndimage.maximum(input, labels, [1, 2]) - output4 = ndimage.minimum_position(input, labels, - [1, 2]) - output5 = ndimage.maximum_position(input, labels, - [1, 2]) - assert_array_almost_equal(output1[0], output2) - assert_array_almost_equal(output1[1], output3) - assert_array_almost_equal(output1[2], output4) - assert_array_almost_equal(output1[3], output5) - -def test_center_of_mass01(): - "center of mass 1" - expected = [0.0, 0.0] - for type in types: - input = np.array([[1, 0], [0, 0]], type) - output = ndimage.center_of_mass(input) - assert_array_almost_equal(output, expected) - -def test_center_of_mass02(): - "center of mass 2" - expected = [1, 0] - for type in types: - input = np.array([[0, 0], [1, 0]], type) - output = ndimage.center_of_mass(input) - assert_array_almost_equal(output, expected) - -def test_center_of_mass03(): - "center of mass 3" - expected = [0, 1] - for type in types: - input = np.array([[0, 1], [0, 0]], type) - output = ndimage.center_of_mass(input) - assert_array_almost_equal(output, expected) - -def test_center_of_mass04(): - "center of mass 4" - expected = [1, 1] - for type in types: - input = np.array([[0, 0], [0, 1]], type) - output = ndimage.center_of_mass(input) - assert_array_almost_equal(output, expected) - -def test_center_of_mass05(): - "center of mass 5" - expected = [0.5, 0.5] - for type in types: - input = np.array([[1, 1], [1, 1]], type) - output = ndimage.center_of_mass(input) - assert_array_almost_equal(output, expected) - -def test_center_of_mass06(): - "center of mass 6" - expected = [0.5, 0.5] - input = np.array([[1, 2], [3, 1]], bool) - output = ndimage.center_of_mass(input) - assert_array_almost_equal(output, expected) - -def test_center_of_mass07(): - "center of mass 7" - labels = [1, 0] - expected = [0.5, 0.0] - input = np.array([[1, 2], [3, 1]], bool) - output = ndimage.center_of_mass(input, labels) - assert_array_almost_equal(output, expected) - -def test_center_of_mass08(): - "center of mass 8" - labels = [1, 2] - expected = [0.5, 1.0] - input = np.array([[5, 2], [3, 1]], bool) - output = ndimage.center_of_mass(input, labels, 2) - assert_array_almost_equal(output, expected) - -def test_center_of_mass09(): - "center of mass 9" - labels = [1, 2] - expected = [(0.5, 0.0), (0.5, 1.0)] - input = np.array([[1, 2], [1, 1]], bool) - output = ndimage.center_of_mass(input, labels, [1, 2]) - assert_array_almost_equal(output, expected) - -def test_histogram01(): - "histogram 1" - expected = np.ones(10) - input = np.arange(10) - output = ndimage.histogram(input, 0, 10, 10) - assert_array_almost_equal(output, expected) - -def test_histogram02(): - "histogram 2" - labels = [1, 1, 1, 1, 2, 2, 2, 2] - expected = [0, 2, 0, 1, 1] - input = np.array([1, 1, 3, 4, 3, 3, 3, 3]) - output = ndimage.histogram(input, 0, 4, 5, labels, 1) - assert_array_almost_equal(output, expected) - -def test_histogram03(): - "histogram 3" - labels = [1, 0, 1, 1, 2, 2, 2, 2] - expected1 = [0, 1, 0, 1, 1] - expected2 = [0, 0, 0, 3, 0] - input = np.array([1, 1, 3, 4, 3, 5, 3, 3]) - output = ndimage.histogram(input, 0, 4, 5, labels, (1,2)) - - assert_array_almost_equal(output[0], expected1) - assert_array_almost_equal(output[1], expected2) - - -def test_stat_funcs_2d(): - """Apply the stat funcs to a 2-d array.""" - a = np.array([[5,6,0,0,0], [8,9,0,0,0], [0,0,0,3,5]]) - lbl = np.array([[1,1,0,0,0], [1,1,0,0,0], [0,0,0,2,2]]) - - mean= ndimage.mean(a, labels=lbl, index=[1, 2]) - assert_array_equal(mean, [7.0, 4.0]) - - var = ndimage.variance(a, labels=lbl, index=[1, 2]) - assert_array_equal(var, [2.5, 1.0]) - - std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2]) - assert_array_almost_equal(std, np.sqrt([2.5, 1.0])) - - med = ndimage.median(a, labels=lbl, index=[1, 2]) - assert_array_equal(med, [7.0, 4.0]) - - min = ndimage.minimum(a, labels=lbl, index=[1, 2]) - assert_array_equal(min, [5, 3]) - - max = ndimage.maximum(a, labels=lbl, index=[1, 2]) - assert_array_equal(max, [9, 5]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/ndimage/tests/test_ndimage.py b/scipy-0.10.1/scipy/ndimage/tests/test_ndimage.py deleted file mode 100644 index fecbab0756..0000000000 --- a/scipy-0.10.1/scipy/ndimage/tests/test_ndimage.py +++ /dev/null @@ -1,4723 +0,0 @@ -# Copyright (C) 2003-2005 Peter J. Verveer -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# 3. The name of the author may not be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import division - -import math -import numpy -import numpy as np -from numpy import fft -from numpy.testing import assert_, assert_equal, assert_array_equal, \ - TestCase, run_module_suite, \ - assert_array_almost_equal, assert_almost_equal -import scipy.ndimage as ndimage - -eps = 1e-12 - -def sumsq(a, b): - return math.sqrt(((a - b)**2).sum()) - -class TestNdimage: - - def setUp(self): - # list of numarray data types - self.integer_types = [numpy.int8, numpy.uint8, numpy.int16, - numpy.uint16, numpy.int32, numpy.uint32, - numpy.int64, numpy.uint64] - - self.float_types = [numpy.float32, numpy.float64] - - self.types = self.integer_types + self.float_types - - # list of boundary modes: - self.modes = ['nearest', 'wrap', 'reflect', 'mirror', 'constant'] - - def test_correlate01(self): - "correlation 1" - array = numpy.array([1, 2]) - weights = numpy.array([2]) - expected = [2, 4] - - output = ndimage.correlate(array, weights) - assert_array_almost_equal(output, expected) - - output = ndimage.convolve(array, weights) - assert_array_almost_equal(output, expected) - - output = ndimage.correlate1d(array, weights) - assert_array_almost_equal(output, expected) - - output = ndimage.convolve1d(array, weights) - assert_array_almost_equal(output, expected) - - def test_correlate02(self): - "correlation 2" - array = numpy.array([1, 2, 3]) - kernel = numpy.array([1]) - - output = ndimage.correlate(array, kernel) - assert_array_almost_equal(array, output) - - output = ndimage.convolve(array, kernel) - assert_array_almost_equal(array, output) - - output = ndimage.correlate1d(array, kernel) - assert_array_almost_equal(array, output) - - output = ndimage.convolve1d(array, kernel) - assert_array_almost_equal(array, output) - - def test_correlate03(self): - "correlation 3" - array = numpy.array([1]) - weights = numpy.array([1, 1]) - expected = [2] - - output = ndimage.correlate(array, weights) - assert_array_almost_equal(output, expected) - - output = ndimage.convolve(array, weights) - assert_array_almost_equal(output, expected) - - output = ndimage.correlate1d(array, weights) - assert_array_almost_equal(output, expected) - - output = ndimage.convolve1d(array, weights) - assert_array_almost_equal(output, expected) - - def test_correlate04(self): - "correlation 4" - array = numpy.array([1, 2]) - tcor = [2, 3] - tcov = [3, 4] - weights = numpy.array([1, 1]) - output = ndimage.correlate(array, weights) - assert_array_almost_equal(output, tcor) - output = ndimage.convolve(array, weights) - assert_array_almost_equal(output, tcov) - output = ndimage.correlate1d(array, weights) - assert_array_almost_equal(output, tcor) - output = ndimage.convolve1d(array, weights) - assert_array_almost_equal(output, tcov) - - def test_correlate05(self): - "correlation 5" - array = numpy.array([1, 2, 3]) - tcor = [2, 3, 5] - tcov = [3, 5, 6] - kernel = numpy.array([1, 1]) - output = ndimage.correlate(array, kernel) - assert_array_almost_equal(tcor, output) - output = ndimage.convolve(array, kernel) - assert_array_almost_equal(tcov, output) - output = ndimage.correlate1d(array, kernel) - assert_array_almost_equal(tcor, output) - output = ndimage.convolve1d(array, kernel) - assert_array_almost_equal(tcov, output) - - def test_correlate06(self): - "correlation 6" - array = numpy.array([1, 2, 3]) - tcor = [9, 14, 17] - tcov = [7, 10, 15] - weights = numpy.array([1, 2, 3]) - output = ndimage.correlate(array, weights) - assert_array_almost_equal(output, tcor) - output = ndimage.convolve(array, weights) - assert_array_almost_equal(output, tcov) - output = ndimage.correlate1d(array, weights) - assert_array_almost_equal(output, tcor) - output = ndimage.convolve1d(array, weights) - assert_array_almost_equal(output, tcov) - - def test_correlate07(self): - "correlation 7" - array = numpy.array([1, 2, 3]) - expected = [5, 8, 11] - weights = numpy.array([1, 2, 1]) - output = ndimage.correlate(array, weights) - assert_array_almost_equal(output, expected) - output = ndimage.convolve(array, weights) - assert_array_almost_equal(output, expected) - output = ndimage.correlate1d(array, weights) - assert_array_almost_equal(output, expected) - output = ndimage.convolve1d(array, weights) - assert_array_almost_equal(output, expected) - - def test_correlate08(self): - "correlation 8" - array = numpy.array([1, 2, 3]) - tcor = [1, 2, 5] - tcov = [3, 6, 7] - weights = numpy.array([1, 2, -1]) - output = ndimage.correlate(array, weights) - assert_array_almost_equal(output, tcor) - output = ndimage.convolve(array, weights) - assert_array_almost_equal(output, tcov) - output = ndimage.correlate1d(array, weights) - assert_array_almost_equal(output, tcor) - output = ndimage.convolve1d(array, weights) - assert_array_almost_equal(output, tcov) - - def test_correlate09(self): - "correlation 9" - array = [] - kernel = numpy.array([1, 1]) - output = ndimage.correlate(array, kernel) - assert_array_almost_equal(array, output) - output = ndimage.convolve(array, kernel) - assert_array_almost_equal(array, output) - output = ndimage.correlate1d(array, kernel) - assert_array_almost_equal(array, output) - output = ndimage.convolve1d(array, kernel) - assert_array_almost_equal(array, output) - - def test_correlate10(self): - "correlation 10" - array = [[]] - kernel = numpy.array([[1, 1]]) - output = ndimage.correlate(array, kernel) - assert_array_almost_equal(array, output) - output = ndimage.convolve(array, kernel) - assert_array_almost_equal(array, output) - - def test_correlate11(self): - "correlation 11" - array = numpy.array([[1, 2, 3], - [4, 5, 6]]) - kernel = numpy.array([[1, 1], - [1, 1]]) - output = ndimage.correlate(array, kernel) - assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output) - output = ndimage.convolve(array, kernel) - assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output) - - def test_correlate12(self): - "correlation 12" - array = numpy.array([[1, 2, 3], - [4, 5, 6]]) - kernel = numpy.array([[1, 0], - [0, 1]]) - output = ndimage.correlate(array, kernel) - assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) - output = ndimage.convolve(array, kernel) - assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) - - def test_correlate13(self): - "correlation 13" - kernel = numpy.array([[1, 0], - [0, 1]]) - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [4, 5, 6]], type1) - for type2 in self.types: - output = ndimage.correlate(array, kernel, - output=type2) - assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) - assert_equal(output.dtype.type, type2) - - output = ndimage.convolve(array, kernel, - output=type2) - assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) - assert_equal(output.dtype.type, type2) - - def test_correlate14(self): - "correlation 14" - kernel = numpy.array([[1, 0], - [0, 1]]) - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [4, 5, 6]], type1) - for type2 in self.types: - output = numpy.zeros(array.shape, type2) - ndimage.correlate(array, kernel, - output=output) - assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) - assert_equal(output.dtype.type, type2) - - ndimage.convolve(array, kernel, output=output) - assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) - assert_equal(output.dtype.type, type2) - - def test_correlate15(self): - "correlation 15" - kernel = numpy.array([[1, 0], - [0, 1]]) - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [4, 5, 6]], type1) - output = ndimage.correlate(array, kernel, - output=numpy.float32) - assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) - assert_equal(output.dtype.type, numpy.float32) - - output = ndimage.convolve(array, kernel, - output=numpy.float32) - assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) - assert_equal(output.dtype.type, numpy.float32) - - def test_correlate16(self): - "correlation 16" - kernel = numpy.array([[0.5, 0 ], - [0, 0.5]]) - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [4, 5, 6]], type1) - output = ndimage.correlate(array, kernel, - output=numpy.float32) - assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output) - assert_equal(output.dtype.type, numpy.float32) - - output = ndimage.convolve(array, kernel, - output=numpy.float32) - assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output) - assert_equal(output.dtype.type, numpy.float32) - - def test_correlate17(self): - "correlation 17" - array = numpy.array([1, 2, 3]) - tcor = [3, 5, 6] - tcov = [2, 3, 5] - kernel = numpy.array([1, 1]) - output = ndimage.correlate(array, kernel, origin=-1) - assert_array_almost_equal(tcor, output) - output = ndimage.convolve(array, kernel, origin=-1) - assert_array_almost_equal(tcov, output) - output = ndimage.correlate1d(array, kernel, origin=-1) - assert_array_almost_equal(tcor, output) - output = ndimage.convolve1d(array, kernel, origin=-1) - assert_array_almost_equal(tcov, output) - - def test_correlate18(self): - "correlation 18" - kernel = numpy.array([[1, 0], - [0, 1]]) - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [4, 5, 6]], type1) - output = ndimage.correlate(array, kernel, - output=numpy.float32, - mode='nearest', origin=-1) - assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) - assert_equal(output.dtype.type, numpy.float32) - - output = ndimage.convolve(array, kernel, - output=numpy.float32, - mode='nearest', origin=-1) - assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) - assert_equal(output.dtype.type, numpy.float32) - - def test_correlate19(self): - "correlation 19" - kernel = numpy.array([[1, 0], - [0, 1]]) - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [4, 5, 6]], type1) - output = ndimage.correlate(array, kernel, - output=numpy.float32, - mode='nearest', origin=[-1, 0]) - assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output) - assert_equal(output.dtype.type, numpy.float32) - - output = ndimage.convolve(array, kernel, - output=numpy.float32, - mode='nearest', origin=[-1, 0]) - assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output) - assert_equal(output.dtype.type, numpy.float32) - - def test_correlate20(self): - "correlation 20" - weights = numpy.array([1, 2, 1]) - expected = [[5, 10, 15], [7, 14, 21]] - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [2, 4, 6]], type1) - for type2 in self.types: - output = numpy.zeros((2, 3), type2) - ndimage.correlate1d(array, weights, axis=0, - output=output) - assert_array_almost_equal(output, expected) - ndimage.convolve1d(array, weights, axis=0, - output=output) - assert_array_almost_equal(output, expected) - - def test_correlate21(self): - "correlation 21" - array = numpy.array([[1, 2, 3], - [2, 4, 6]]) - expected = [[5, 10, 15], [7, 14, 21]] - weights = numpy.array([1, 2, 1]) - output = ndimage.correlate1d(array, weights, axis=0) - assert_array_almost_equal(output, expected) - output = ndimage.convolve1d(array, weights, axis=0) - assert_array_almost_equal(output, expected) - - def test_correlate22(self): - "correlation 22" - weights = numpy.array([1, 2, 1]) - expected = [[6, 12, 18], [6, 12, 18]] - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [2, 4, 6]], type1) - for type2 in self.types: - output = numpy.zeros((2, 3), type2) - ndimage.correlate1d(array, weights, axis=0, - mode='wrap', output=output) - assert_array_almost_equal(output, expected) - ndimage.convolve1d(array, weights, axis=0, - mode='wrap', output=output) - assert_array_almost_equal(output, expected) - - def test_correlate23(self): - "correlation 23" - weights = numpy.array([1, 2, 1]) - expected = [[5, 10, 15], [7, 14, 21]] - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [2, 4, 6]], type1) - for type2 in self.types: - output = numpy.zeros((2, 3), type2) - ndimage.correlate1d(array, weights, axis=0, - mode='nearest', output=output) - assert_array_almost_equal(output, expected) - ndimage.convolve1d(array, weights, axis=0, - mode='nearest', output=output) - assert_array_almost_equal(output, expected) - - def test_correlate24(self): - "correlation 24" - weights = numpy.array([1, 2, 1]) - tcor = [[7, 14, 21], [8, 16, 24]] - tcov = [[4, 8, 12], [5, 10, 15]] - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [2, 4, 6]], type1) - for type2 in self.types: - output = numpy.zeros((2, 3), type2) - ndimage.correlate1d(array, weights, axis=0, - mode='nearest', output=output, origin=-1) - assert_array_almost_equal(output, tcor) - ndimage.convolve1d(array, weights, axis=0, - mode='nearest', output=output, origin=-1) - assert_array_almost_equal(output, tcov) - - def test_correlate25(self): - "correlation 25" - weights = numpy.array([1, 2, 1]) - tcor = [[4, 8, 12], [5, 10, 15]] - tcov = [[7, 14, 21], [8, 16, 24]] - for type1 in self.types: - array = numpy.array([[1, 2, 3], - [2, 4, 6]], type1) - for type2 in self.types: - output = numpy.zeros((2, 3), type2) - ndimage.correlate1d(array, weights, axis=0, - mode='nearest', output=output, origin=1) - assert_array_almost_equal(output, tcor) - ndimage.convolve1d(array, weights, axis=0, - mode='nearest', output=output, origin=1) - assert_array_almost_equal(output, tcov) - - def test_gauss01(self): - "gaussian filter 1" - input = numpy.array([[1, 2, 3], - [2, 4, 6]], numpy.float32) - output = ndimage.gaussian_filter(input, 0) - assert_array_almost_equal(output, input) - - def test_gauss02(self): - "gaussian filter 2" - input = numpy.array([[1, 2, 3], - [2, 4, 6]], numpy.float32) - output = ndimage.gaussian_filter(input, 1.0) - assert_equal(input.dtype, output.dtype) - assert_equal(input.shape, output.shape) - - def test_gauss03(self): - "gaussian filter 3 - single precision data" - input = numpy.arange(100 * 100).astype(numpy.float32) - input.shape = (100, 100) - output = ndimage.gaussian_filter(input, [1.0, 1.0]) - - assert_equal(input.dtype, output.dtype) - assert_equal(input.shape, output.shape) - - # input.sum() is 49995000.0. With single precision floats, we can't - # expect more than 8 digits of accuracy, so use decimal=0 in this test. - assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'), decimal=0) - assert_(sumsq(input, output) > 1.0) - - def test_gauss04(self): - "gaussian filter 4" - input = numpy.arange(100 * 100).astype(numpy.float32) - input.shape = (100, 100) - otype = numpy.float64 - output = ndimage.gaussian_filter(input, [1.0, 1.0], - output=otype) - assert_equal(output.dtype.type, numpy.float64) - assert_equal(input.shape, output.shape) - assert_(sumsq(input, output) > 1.0) - - def test_gauss05(self): - "gaussian filter 5" - input = numpy.arange(100 * 100).astype(numpy.float32) - input.shape = (100, 100) - otype = numpy.float64 - output = ndimage.gaussian_filter(input, [1.0, 1.0], - order=1, output=otype) - assert_equal(output.dtype.type, numpy.float64) - assert_equal(input.shape, output.shape) - assert_(sumsq(input, output) > 1.0) - - def test_gauss06(self): - "gaussian filter 6" - input = numpy.arange(100 * 100).astype(numpy.float32) - input.shape = (100, 100) - otype = numpy.float64 - output1 = ndimage.gaussian_filter(input, [1.0, 1.0], - output=otype) - output2 = ndimage.gaussian_filter(input, 1.0, - output=otype) - assert_array_almost_equal(output1, output2) - - def test_prewitt01(self): - "prewitt filter 1" - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) - t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1) - output = ndimage.prewitt(array, 0) - assert_array_almost_equal(t, output) - - - def test_prewitt02(self): - "prewitt filter 2" - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) - t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1) - output = numpy.zeros(array.shape, type) - ndimage.prewitt(array, 0, output) - assert_array_almost_equal(t, output) - - def test_prewitt03(self): - "prewitt filter 3" - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1) - t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0) - output = ndimage.prewitt(array, 1) - assert_array_almost_equal(t, output) - - def test_prewitt04(self): - "prewitt filter 4" - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - t = ndimage.prewitt(array, -1) - output = ndimage.prewitt(array, 1) - assert_array_almost_equal(t, output) - - def test_sobel01(self): - "sobel filter 1" - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) - t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1) - output = ndimage.sobel(array, 0) - assert_array_almost_equal(t, output) - - def test_sobel02(self): - "sobel filter 2" - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) - t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1) - output = numpy.zeros(array.shape, type) - ndimage.sobel(array, 0, output) - assert_array_almost_equal(t, output) - - def test_sobel03(self): - "sobel filter 3" - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1) - t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0) - output = numpy.zeros(array.shape, type) - output = ndimage.sobel(array, 1) - assert_array_almost_equal(t, output) - - def test_sobel04(self): - "sobel filter 4" - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - t = ndimage.sobel(array, -1) - output = ndimage.sobel(array, 1) - assert_array_almost_equal(t, output) - - def test_laplace01(self): - "laplace filter 1" - for type in [numpy.int32, numpy.float32, numpy.float64]: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) * 100 - tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0) - tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1) - output = ndimage.laplace(array) - assert_array_almost_equal(tmp1 + tmp2, output) - - def test_laplace02(self): - "laplace filter 2" - for type in [numpy.int32, numpy.float32, numpy.float64]: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) * 100 - tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0) - tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1) - output = numpy.zeros(array.shape, type) - ndimage.laplace(array, output=output) - assert_array_almost_equal(tmp1 + tmp2, output) - - def test_gaussian_laplace01(self): - "gaussian laplace filter 1" - for type in [numpy.int32, numpy.float32, numpy.float64]: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) * 100 - tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0]) - tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2]) - output = ndimage.gaussian_laplace(array, 1.0) - assert_array_almost_equal(tmp1 + tmp2, output) - - def test_gaussian_laplace02(self): - "gaussian laplace filter 2" - for type in [numpy.int32, numpy.float32, numpy.float64]: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) * 100 - tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0]) - tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2]) - output = numpy.zeros(array.shape, type) - ndimage.gaussian_laplace(array, 1.0, output) - assert_array_almost_equal(tmp1 + tmp2, output) - - def test_generic_laplace01(self): - "generic laplace filter 1" - def derivative2(input, axis, output, mode, cval, a, b): - sigma = [a, b / 2.0] - input = numpy.asarray(input) - order = [0] * input.ndim - order[axis] = 2 - return ndimage.gaussian_filter(input, sigma, order, - output, mode, cval) - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - output = numpy.zeros(array.shape, type) - tmp = ndimage.generic_laplace(array, derivative2, - extra_arguments=(1.0,), extra_keywords={'b': 2.0}) - ndimage.gaussian_laplace(array, 1.0, output) - assert_array_almost_equal(tmp, output) - - def test_gaussian_gradient_magnitude01(self): - "gaussian gradient magnitude filter 1" - for type in [numpy.int32, numpy.float32, numpy.float64]: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) * 100 - tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0]) - tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1]) - output = ndimage.gaussian_gradient_magnitude(array, - 1.0) - expected = tmp1 * tmp1 + tmp2 * tmp2 - expected = numpy.sqrt(expected).astype(type) - assert_array_almost_equal(expected, output) - - def test_gaussian_gradient_magnitude02(self): - "gaussian gradient magnitude filter 2" - for type in [numpy.int32, numpy.float32, numpy.float64]: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) * 100 - tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0]) - tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1]) - output = numpy.zeros(array.shape, type) - ndimage.gaussian_gradient_magnitude(array, 1.0, - output) - expected = tmp1 * tmp1 + tmp2 * tmp2 - expected = numpy.sqrt(expected).astype(type) - assert_array_almost_equal(expected, output) - - def test_generic_gradient_magnitude01(self): - "generic gradient magnitude 1" - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], numpy.float64) - def derivative(input, axis, output, mode, cval, a, b): - sigma = [a, b / 2.0] - input = numpy.asarray(input) - order = [0] * input.ndim - order[axis] = 1 - return ndimage.gaussian_filter(input, sigma, order, - output, mode, cval) - tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0) - tmp2 = ndimage.generic_gradient_magnitude(array, - derivative, extra_arguments=(1.0,), - extra_keywords={'b': 2.0}) - assert_array_almost_equal(tmp1, tmp2) - - def test_uniform01(self): - "uniform filter 1" - array = numpy.array([2, 4, 6]) - size = 2 - output = ndimage.uniform_filter1d(array, size, - origin=-1) - assert_array_almost_equal([3, 5, 6], output) - - def test_uniform02(self): - "uniform filter 2" - array = numpy.array([1, 2, 3]) - filter_shape = [0] - output = ndimage.uniform_filter(array, filter_shape) - assert_array_almost_equal(array, output) - - def test_uniform03(self): - "uniform filter 3" - array = numpy.array([1, 2, 3]) - filter_shape = [1] - output = ndimage.uniform_filter(array, filter_shape) - assert_array_almost_equal(array, output) - - def test_uniform04(self): - "uniform filter 4" - array = numpy.array([2, 4, 6]) - filter_shape = [2] - output = ndimage.uniform_filter(array, filter_shape) - assert_array_almost_equal([2, 3, 5], output) - - def test_uniform05(self): - "uniform filter 5" - array = [] - filter_shape = [1] - output = ndimage.uniform_filter(array, filter_shape) - assert_array_almost_equal([], output) - - def test_uniform06(self): - "uniform filter 6" - filter_shape = [2, 2] - for type1 in self.types: - array = numpy.array([[4, 8, 12], - [16, 20, 24]], type1) - for type2 in self.types: - output = ndimage.uniform_filter(array, - filter_shape, output=type2) - assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output) - assert_equal(output.dtype.type, type2) - - def test_minimum_filter01(self): - "minimum filter 1" - array = numpy.array([1, 2, 3, 4, 5]) - filter_shape = numpy.array([2]) - output = ndimage.minimum_filter(array, filter_shape) - assert_array_almost_equal([1, 1, 2, 3, 4], output) - - def test_minimum_filter02(self): - "minimum filter 2" - array = numpy.array([1, 2, 3, 4, 5]) - filter_shape = numpy.array([3]) - output = ndimage.minimum_filter(array, filter_shape) - assert_array_almost_equal([1, 1, 2, 3, 4], output) - - def test_minimum_filter03(self): - "minimum filter 3" - array = numpy.array([3, 2, 5, 1, 4]) - filter_shape = numpy.array([2]) - output = ndimage.minimum_filter(array, filter_shape) - assert_array_almost_equal([3, 2, 2, 1, 1], output) - - def test_minimum_filter04(self): - "minimum filter 4" - array = numpy.array([3, 2, 5, 1, 4]) - filter_shape = numpy.array([3]) - output = ndimage.minimum_filter(array, filter_shape) - assert_array_almost_equal([2, 2, 1, 1, 1], output) - - def test_minimum_filter05(self): - "minimum filter 5" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - filter_shape = numpy.array([2, 3]) - output = ndimage.minimum_filter(array, filter_shape) - assert_array_almost_equal([[2, 2, 1, 1, 1], - [2, 2, 1, 1, 1], - [5, 3, 3, 1, 1]], output) - - def test_minimum_filter06(self): - "minimum filter 6" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 1, 1], [1, 1, 1]] - output = ndimage.minimum_filter(array, - footprint=footprint) - assert_array_almost_equal([[2, 2, 1, 1, 1], - [2, 2, 1, 1, 1], - [5, 3, 3, 1, 1]], output) - - def test_minimum_filter07(self): - "minimum filter 7" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.minimum_filter(array, - footprint=footprint) - assert_array_almost_equal([[2, 2, 1, 1, 1], - [2, 3, 1, 3, 1], - [5, 5, 3, 3, 1]], output) - - def test_minimum_filter08(self): - "minimum filter 8" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.minimum_filter(array, - footprint=footprint, origin=-1) - assert_array_almost_equal([[3, 1, 3, 1, 1], - [5, 3, 3, 1, 1], - [3, 3, 1, 1, 1]], output) - - def test_minimum_filter09(self): - "minimum filter 9" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.minimum_filter(array, - footprint=footprint, origin=[-1, 0]) - assert_array_almost_equal([[2, 3, 1, 3, 1], - [5, 5, 3, 3, 1], - [5, 3, 3, 1, 1]], output) - - def test_maximum_filter01(self): - "maximum filter 1" - array = numpy.array([1, 2, 3, 4, 5]) - filter_shape = numpy.array([2]) - output = ndimage.maximum_filter(array, filter_shape) - assert_array_almost_equal([1, 2, 3, 4, 5], output) - - def test_maximum_filter02(self): - "maximum filter 2" - array = numpy.array([1, 2, 3, 4, 5]) - filter_shape = numpy.array([3]) - output = ndimage.maximum_filter(array, filter_shape) - assert_array_almost_equal([2, 3, 4, 5, 5], output) - - def test_maximum_filter03(self): - "maximum filter 3" - array = numpy.array([3, 2, 5, 1, 4]) - filter_shape = numpy.array([2]) - output = ndimage.maximum_filter(array, filter_shape) - assert_array_almost_equal([3, 3, 5, 5, 4], output) - - def test_maximum_filter04(self): - "maximum filter 4" - array = numpy.array([3, 2, 5, 1, 4]) - filter_shape = numpy.array([3]) - output = ndimage.maximum_filter(array, filter_shape) - assert_array_almost_equal([3, 5, 5, 5, 4], output) - - def test_maximum_filter05(self): - "maximum filter 5" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - filter_shape = numpy.array([2, 3]) - output = ndimage.maximum_filter(array, filter_shape) - assert_array_almost_equal([[3, 5, 5, 5, 4], - [7, 9, 9, 9, 5], - [8, 9, 9, 9, 7]], output) - - def test_maximum_filter06(self): - "maximum filter 6" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 1, 1], [1, 1, 1]] - output = ndimage.maximum_filter(array, - footprint=footprint) - assert_array_almost_equal([[3, 5, 5, 5, 4], - [7, 9, 9, 9, 5], - [8, 9, 9, 9, 7]], output) - - def test_maximum_filter07(self): - "maximum filter 7" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.maximum_filter(array, - footprint=footprint) - assert_array_almost_equal([[3, 5, 5, 5, 4], - [7, 7, 9, 9, 5], - [7, 9, 8, 9, 7]], output) - - def test_maximum_filter08(self): - "maximum filter 8" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.maximum_filter(array, - footprint=footprint, origin=-1) - assert_array_almost_equal([[7, 9, 9, 5, 5], - [9, 8, 9, 7, 5], - [8, 8, 7, 7, 7]], output) - - def test_maximum_filter09(self): - "maximum filter 9" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.maximum_filter(array, - footprint=footprint, origin=[-1, 0]) - assert_array_almost_equal([[7, 7, 9, 9, 5], - [7, 9, 8, 9, 7], - [8, 8, 8, 7, 7]], output) - - def test_rank01(self): - "rank filter 1" - array = numpy.array([1, 2, 3, 4, 5]) - output = ndimage.rank_filter(array, 1, size=2) - assert_array_almost_equal(array, output) - output = ndimage.percentile_filter(array, 100, size=2) - assert_array_almost_equal(array, output) - output = ndimage.median_filter(array, 2) - assert_array_almost_equal(array, output) - - def test_rank02(self): - "rank filter 2" - array = numpy.array([1, 2, 3, 4, 5]) - output = ndimage.rank_filter(array, 1, size=[3]) - assert_array_almost_equal(array, output) - output = ndimage.percentile_filter(array, 50, size=3) - assert_array_almost_equal(array, output) - output = ndimage.median_filter(array, (3,)) - assert_array_almost_equal(array, output) - - def test_rank03(self): - "rank filter 3" - array = numpy.array([3, 2, 5, 1, 4]) - output = ndimage.rank_filter(array, 1, size=[2]) - assert_array_almost_equal([3, 3, 5, 5, 4], output) - output = ndimage.percentile_filter(array, 100, size=2) - assert_array_almost_equal([3, 3, 5, 5, 4], output) - - def test_rank04(self): - "rank filter 4" - array = numpy.array([3, 2, 5, 1, 4]) - expected = [3, 3, 2, 4, 4] - output = ndimage.rank_filter(array, 1, size=3) - assert_array_almost_equal(expected, output) - output = ndimage.percentile_filter(array, 50, size=3) - assert_array_almost_equal(expected, output) - output = ndimage.median_filter(array, size=3) - assert_array_almost_equal(expected, output) - - def test_rank05(self): - "rank filter 5" - array = numpy.array([3, 2, 5, 1, 4]) - expected = [3, 3, 2, 4, 4] - output = ndimage.rank_filter(array, -2, size=3) - assert_array_almost_equal(expected, output) - - def test_rank06(self): - "rank filter 6" - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]]) - expected = [[2, 2, 1, 1, 1], - [3, 3, 2, 1, 1], - [5, 5, 3, 3, 1]] - output = ndimage.rank_filter(array, 1, size=[2, 3]) - assert_array_almost_equal(expected, output) - output = ndimage.percentile_filter(array, 17, - size=(2, 3)) - assert_array_almost_equal(expected, output) - - def test_rank07(self): - "rank filter 7" - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]]) - expected = [[3, 5, 5, 5, 4], - [5, 5, 7, 5, 4], - [6, 8, 8, 7, 5]] - output = ndimage.rank_filter(array, -2, size=[2, 3]) - assert_array_almost_equal(expected, output) - - def test_rank08(self): - "median filter 8" - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]]) - expected = [[3, 3, 2, 4, 4], - [5, 5, 5, 4, 4], - [5, 6, 7, 5, 5]] - kernel = numpy.array([2, 3]) - output = ndimage.percentile_filter(array, 50.0, - size=(2, 3)) - assert_array_almost_equal(expected, output) - output = ndimage.rank_filter(array, 3, size=(2, 3)) - assert_array_almost_equal(expected, output) - output = ndimage.median_filter(array, size=(2, 3)) - assert_array_almost_equal(expected, output) - - def test_rank09(self): - "rank filter 9" - expected = [[3, 3, 2, 4, 4], - [3, 5, 2, 5, 1], - [5, 5, 8, 3, 5]] - footprint = [[1, 0, 1], [0, 1, 0]] - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - output = ndimage.rank_filter(array, 1, - footprint=footprint) - assert_array_almost_equal(expected, output) - output = ndimage.percentile_filter(array, 35, - footprint=footprint) - assert_array_almost_equal(expected, output) - - def test_rank10(self): - "rank filter 10" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - expected = [[2, 2, 1, 1, 1], - [2, 3, 1, 3, 1], - [5, 5, 3, 3, 1]] - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.rank_filter(array, 0, - footprint=footprint) - assert_array_almost_equal(expected, output) - output = ndimage.percentile_filter(array, 0.0, - footprint=footprint) - assert_array_almost_equal(expected, output) - - def test_rank11(self): - "rank filter 11" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - expected = [[3, 5, 5, 5, 4], - [7, 7, 9, 9, 5], - [7, 9, 8, 9, 7]] - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.rank_filter(array, -1, - footprint=footprint) - assert_array_almost_equal(expected, output) - output = ndimage.percentile_filter(array, 100.0, - footprint=footprint) - assert_array_almost_equal(expected, output) - - - def test_rank12(self): - "rank filter 12" - expected = [[3, 3, 2, 4, 4], - [3, 5, 2, 5, 1], - [5, 5, 8, 3, 5]] - footprint = [[1, 0, 1], [0, 1, 0]] - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - output = ndimage.rank_filter(array, 1, - footprint=footprint) - assert_array_almost_equal(expected, output) - output = ndimage.percentile_filter(array, 50.0, - footprint=footprint) - assert_array_almost_equal(expected, output) - output = ndimage.median_filter(array, - footprint=footprint) - assert_array_almost_equal(expected, output) - - def test_rank13(self): - "rank filter 13" - expected = [[5, 2, 5, 1, 1], - [5, 8, 3, 5, 5], - [6, 6, 5, 5, 5]] - footprint = [[1, 0, 1], [0, 1, 0]] - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - output = ndimage.rank_filter(array, 1, - footprint=footprint, origin=-1) - assert_array_almost_equal(expected, output) - - def test_rank14(self): - "rank filter 14" - expected = [[3, 5, 2, 5, 1], - [5, 5, 8, 3, 5], - [5, 6, 6, 5, 5]] - footprint = [[1, 0, 1], [0, 1, 0]] - for type in self.types: - array = numpy.array([[3, 2, 5, 1, 4], - [5, 8, 3, 7, 1], - [5, 6, 9, 3, 5]], type) - output = ndimage.rank_filter(array, 1, - footprint=footprint, origin=[-1, 0]) - assert_array_almost_equal(expected, output) - - def test_generic_filter1d01(self): - "generic 1d filter 1" - weights = numpy.array([1.1, 2.2, 3.3]) - def _filter_func(input, output, fltr, total): - fltr = fltr / total - for ii in range(input.shape[0] - 2): - output[ii] = input[ii] * fltr[0] - output[ii] += input[ii + 1] * fltr[1] - output[ii] += input[ii + 2] * fltr[2] - for type in self.types: - a = numpy.arange(12, dtype=type) - a.shape = (3,4) - r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, - origin=-1) - r2 = ndimage.generic_filter1d(a, _filter_func, 3, - axis=0, origin=-1, extra_arguments=(weights,), - extra_keywords={'total': weights.sum()}) - assert_array_almost_equal(r1, r2) - - def test_generic_filter01(self): - "generic filter 1" - filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]]) - footprint = numpy.array([[1, 0], [0, 1]]) - cf = numpy.array([1., 4.]) - def _filter_func(buffer, weights, total=1.0): - weights = cf / total - return (buffer * weights).sum() - for type in self.types: - a = numpy.arange(12, dtype=type) - a.shape = (3,4) - r1 = ndimage.correlate(a, filter_ * footprint) - if type in self.float_types: - r1 /= 5 - else: - r1 //= 5 - r2 = ndimage.generic_filter(a, _filter_func, - footprint=footprint, extra_arguments=(cf,), - extra_keywords={'total': cf.sum()}) - assert_array_almost_equal(r1, r2) - - def test_extend01(self): - "line extension 1" - array = numpy.array([1, 2, 3]) - weights = numpy.array([1, 0]) - expected_values = [[1, 1, 2], - [3, 1, 2], - [1, 1, 2], - [2, 1, 2], - [0, 1, 2]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate1d(array, weights, 0, - mode=mode, cval=0) - assert_array_equal(output,expected_value) - - def test_extend02(self): - "line extension 2" - array = numpy.array([1, 2, 3]) - weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0]) - expected_values = [[1, 1, 1], - [3, 1, 2], - [3, 3, 2], - [1, 2, 3], - [0, 0, 0]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate1d(array, weights, 0, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - def test_extend03(self): - "line extension 3" - array = numpy.array([1, 2, 3]) - weights = numpy.array([0, 0, 1]) - expected_values = [[2, 3, 3], - [2, 3, 1], - [2, 3, 3], - [2, 3, 2], - [2, 3, 0]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate1d(array, weights, 0, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - def test_extend04(self): - "line extension 4" - array = numpy.array([1, 2, 3]) - weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) - expected_values = [[3, 3, 3], - [2, 3, 1], - [2, 1, 1], - [1, 2, 3], - [0, 0, 0]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate1d(array, weights, 0, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - - def test_extend05(self): - "line extension 5" - array = numpy.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - weights = numpy.array([[1, 0], [0, 0]]) - expected_values = [[[1, 1, 2], [1, 1, 2], [4, 4, 5]], - [[9, 7, 8], [3, 1, 2], [6, 4, 5]], - [[1, 1, 2], [1, 1, 2], [4, 4, 5]], - [[5, 4, 5], [2, 1, 2], [5, 4, 5]], - [[0, 0, 0], [0, 1, 2], [0, 4, 5]]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate(array, weights, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - - def test_extend06(self): - "line extension 6" - array = numpy.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]]) - expected_values = [[[5, 6, 6], [8, 9, 9], [8, 9, 9]], - [[5, 6, 4], [8, 9, 7], [2, 3, 1]], - [[5, 6, 6], [8, 9, 9], [8, 9, 9]], - [[5, 6, 5], [8, 9, 8], [5, 6, 5]], - [[5, 6, 0], [8, 9, 0], [0, 0, 0]]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate(array, weights, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - - def test_extend07(self): - "line extension 7" - array = numpy.array([1, 2, 3]) - weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) - expected_values = [[3, 3, 3], - [2, 3, 1], - [2, 1, 1], - [1, 2, 3], - [0, 0, 0]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate(array, weights, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - def test_extend08(self): - "line extension 8" - array = numpy.array([[1], [2], [3]]) - weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], - [0], [1]]) - expected_values = [[[3], [3], [3]], - [[2], [3], [1]], - [[2], [1], [1]], - [[1], [2], [3]], - [[0], [0], [0]]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate(array, weights, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - def test_extend09(self): - "line extension 9" - array = numpy.array([1, 2, 3]) - weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) - expected_values = [[3, 3, 3], - [2, 3, 1], - [2, 1, 1], - [1, 2, 3], - [0, 0, 0]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate(array, weights, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - def test_extend10(self): - "line extension 10" - array = numpy.array([[1], [2], [3]]) - weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], - [0], [1]]) - expected_values = [[[3], [3], [3]], - [[2], [3], [1]], - [[2], [1], [1]], - [[1], [2], [3]], - [[0], [0], [0]]] - for mode, expected_value in zip(self.modes, expected_values): - output = ndimage.correlate(array, weights, - mode=mode, cval=0) - assert_array_equal(output, expected_value) - - def test_boundaries(self): - "boundary modes" - def shift(x): - return (x[0] + 0.5,) - - data = numpy.array([1,2,3,4.]) - expected = {'constant': [1.5,2.5,3.5,-1,-1,-1,-1], - 'wrap': [1.5,2.5,3.5,1.5,2.5,3.5,1.5], - 'mirror' : [1.5,2.5,3.5,3.5,2.5,1.5,1.5], - 'nearest' : [1.5,2.5,3.5,4,4,4,4]} - - for mode in expected.keys(): - assert_array_equal(expected[mode], - ndimage.geometric_transform(data,shift, - cval=-1,mode=mode, - output_shape=(7,), - order=1)) - - def test_boundaries2(self): - "boundary modes 2" - def shift(x): - return (x[0] - 0.9,) - - data = numpy.array([1,2,3,4]) - expected = {'constant': [-1,1,2,3], - 'wrap': [3,1,2,3], - 'mirror' : [2,1,2,3], - 'nearest' : [1,1,2,3]} - - for mode in expected.keys(): - assert_array_equal(expected[mode], - ndimage.geometric_transform(data,shift, - cval=-1,mode=mode, - output_shape=(4,))) - - def test_fourier_gaussian_real01(self): - "gaussian fourier filter for real transforms 1" - for shape in [(32, 16), (31, 15)]: - for type in [numpy.float32, numpy.float64]: - a = numpy.zeros(shape, type) - a[0, 0] = 1.0 - a = fft.rfft(a, shape[0], 0) - a = fft.fft(a, shape[1], 1) - a = ndimage.fourier_gaussian(a, [5.0, 2.5], - shape[0], 0) - a = fft.ifft(a, shape[1], 1) - a = fft.irfft(a, shape[0], 0) - assert_almost_equal(ndimage.sum(a), 1) - - def test_fourier_gaussian_complex01(self): - "gaussian fourier filter for complex transforms 1" - for shape in [(32, 16), (31, 15)]: - for type in [numpy.complex64, numpy.complex128]: - a = numpy.zeros(shape, type) - a[0, 0] = 1.0 - a = fft.fft(a, shape[0], 0) - a = fft.fft(a, shape[1], 1) - a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, - 0) - a = fft.ifft(a, shape[1], 1) - a = fft.ifft(a, shape[0], 0) - assert_almost_equal(ndimage.sum(a.real), 1.0) - - def test_fourier_uniform_real01(self): - "uniform fourier filter for real transforms 1" - for shape in [(32, 16), (31, 15)]: - for type in [numpy.float32, numpy.float64]: - a = numpy.zeros(shape, type) - a[0, 0] = 1.0 - a = fft.rfft(a, shape[0], 0) - a = fft.fft(a, shape[1], 1) - a = ndimage.fourier_uniform(a, [5.0, 2.5], - shape[0], 0) - a = fft.ifft(a, shape[1], 1) - a = fft.irfft(a, shape[0], 0) - assert_almost_equal(ndimage.sum(a), 1.0) - - def test_fourier_uniform_complex01(self): - "uniform fourier filter for complex transforms 1" - for shape in [(32, 16), (31, 15)]: - for type in [numpy.complex64, numpy.complex128]: - a = numpy.zeros(shape, type) - a[0, 0] = 1.0 - a = fft.fft(a, shape[0], 0) - a = fft.fft(a, shape[1], 1) - a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0) - a = fft.ifft(a, shape[1], 1) - a = fft.ifft(a, shape[0], 0) - assert_almost_equal(ndimage.sum(a.real), 1.0) - - def test_fourier_shift_real01(self): - "shift filter for real transforms 1" - for shape in [(32, 16), (31, 15)]: - for dtype in [numpy.float32, numpy.float64]: - expected = numpy.arange(shape[0] * shape[1], dtype=dtype) - expected.shape = shape - a = fft.rfft(expected, shape[0], 0) - a = fft.fft(a, shape[1], 1) - a = ndimage.fourier_shift(a, [1, 1], shape[0], 0) - a = fft.ifft(a, shape[1], 1) - a = fft.irfft(a, shape[0], 0) - assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1]) - assert_array_almost_equal(a.imag, numpy.zeros(shape)) - - def test_fourier_shift_complex01(self): - "shift filter for complex transforms 1" - for shape in [(32, 16), (31, 15)]: - for type in [numpy.complex64, numpy.complex128]: - expected = numpy.arange(shape[0] * shape[1], - dtype=type) - expected.shape = shape - a = fft.fft(expected, shape[0], 0) - a = fft.fft(a, shape[1], 1) - a = ndimage.fourier_shift(a, [1, 1], -1, 0) - a = fft.ifft(a, shape[1], 1) - a = fft.ifft(a, shape[0], 0) - assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1]) - assert_array_almost_equal(a.imag, numpy.zeros(shape)) - - def test_fourier_ellipsoid_real01(self): - "ellipsoid fourier filter for real transforms 1" - for shape in [(32, 16), (31, 15)]: - for type in [numpy.float32, numpy.float64]: - a = numpy.zeros(shape, type) - a[0, 0] = 1.0 - a = fft.rfft(a, shape[0], 0) - a = fft.fft(a, shape[1], 1) - a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], - shape[0], 0) - a = fft.ifft(a, shape[1], 1) - a = fft.irfft(a, shape[0], 0) - assert_almost_equal(ndimage.sum(a), 1.0) - - def test_fourier_ellipsoid_complex01(self): - "ellipsoid fourier filter for complex transforms 1" - for shape in [(32, 16), (31, 15)]: - for type in [numpy.complex64, numpy.complex128]: - a = numpy.zeros(shape, type) - a[0, 0] = 1.0 - a = fft.fft(a, shape[0], 0) - a = fft.fft(a, shape[1], 1) - a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, - 0) - a = fft.ifft(a, shape[1], 1) - a = fft.ifft(a, shape[0], 0) - assert_almost_equal(ndimage.sum(a.real), 1.0) - - def test_spline01(self): - "spline filter 1" - for type in self.types: - data = numpy.ones([], type) - for order in range(2, 6): - out = ndimage.spline_filter(data, order=order) - assert_array_almost_equal(out, 1) - - def test_spline02(self): - "spline filter 2" - for type in self.types: - data = numpy.array([1]) - for order in range(2, 6): - out = ndimage.spline_filter(data, order=order) - assert_array_almost_equal(out, [1]) - - def test_spline03(self): - "spline filter 3" - for type in self.types: - data = numpy.ones([], type) - for order in range(2, 6): - out = ndimage.spline_filter(data, order, - output=type) - assert_array_almost_equal(out, 1) - - def test_spline04(self): - "spline filter 4" - for type in self.types: - data = numpy.ones([4], type) - for order in range(2, 6): - out = ndimage.spline_filter(data, order) - assert_array_almost_equal(out, [1, 1, 1, 1]) - - def test_spline05(self): - "spline filter 5" - for type in self.types: - data = numpy.ones([4, 4], type) - for order in range(2, 6): - out = ndimage.spline_filter(data, order=order) - assert_array_almost_equal(out, [[1, 1, 1, 1], - [1, 1, 1, 1], - [1, 1, 1, 1], - [1, 1, 1, 1]]) - - def test_geometric_transform01(self): - "geometric transform 1" - data = numpy.array([1]) - def mapping(x): - return x - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - data.shape, - order=order) - assert_array_almost_equal(out, [1]) - - def test_geometric_transform02(self): - "geometric transform 2" - data = numpy.ones([4]) - def mapping(x): - return x - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - data.shape, order=order) - assert_array_almost_equal(out, [1, 1, 1, 1]) - - def test_geometric_transform03(self): - "geometric transform 3" - data = numpy.ones([4]) - def mapping(x): - return (x[0] - 1,) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - data.shape, order=order) - assert_array_almost_equal(out, [0, 1, 1, 1]) - - def test_geometric_transform04(self): - "geometric transform 4" - data = numpy.array([4, 1, 3, 2]) - def mapping(x): - return (x[0] - 1,) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - data.shape, order=order) - assert_array_almost_equal(out, [0, 4, 1, 3]) - - def test_geometric_transform05(self): - "geometric transform 5" - data = numpy.array([[1, 1, 1, 1], - [1, 1, 1, 1], - [1, 1, 1, 1]]) - def mapping(x): - return (x[0], x[1] - 1) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - data.shape, order=order) - assert_array_almost_equal(out, [[0, 1, 1, 1], - [0, 1, 1, 1], - [0, 1, 1, 1]]) - - def test_geometric_transform06(self): - "geometric transform 6" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - def mapping(x): - return (x[0], x[1] - 1) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - data.shape, order=order) - assert_array_almost_equal(out, [[0, 4, 1, 3], - [0, 7, 6, 8], - [0, 3, 5, 3]]) - - def test_geometric_transform07(self): - "geometric transform 7" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - def mapping(x): - return (x[0] - 1, x[1]) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - data.shape, order=order) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [4, 1, 3, 2], - [7, 6, 8, 5]]) - - def test_geometric_transform08(self): - "geometric transform 8" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - def mapping(x): - return (x[0] - 1, x[1] - 1) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - data.shape, order=order) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [0, 4, 1, 3], - [0, 7, 6, 8]]) - - def test_geometric_transform10(self): - "geometric transform 10" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - def mapping(x): - return (x[0] - 1, x[1] - 1) - for order in range(0, 6): - if (order > 1): - filtered = ndimage.spline_filter(data, - order=order) - else: - filtered = data - out = ndimage.geometric_transform(filtered, mapping, - data.shape, order=order, prefilter=False) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [0, 4, 1, 3], - [0, 7, 6, 8]]) - - def test_geometric_transform13(self): - "geometric transform 13" - data = numpy.ones([2], numpy.float64) - def mapping(x): - return (x[0] // 2,) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - [4], order=order) - assert_array_almost_equal(out, [1, 1, 1, 1]) - - def test_geometric_transform14(self): - "geometric transform 14" - data = [1, 5, 2, 6, 3, 7, 4, 4] - def mapping(x): - return (2 * x[0],) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - [4], order=order) - assert_array_almost_equal(out, [1, 2, 3, 4]) - - def test_geometric_transform15(self): - "geometric transform 15" - data = [1, 2, 3, 4] - def mapping(x): - return (x[0] / 2,) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - [8], order=order) - assert_array_almost_equal(out[::2], [1, 2, 3, 4]) - - def test_geometric_transform16(self): - "geometric transform 16" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9.0, 10, 11, 12]] - def mapping(x): - return (x[0], x[1] * 2) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - (3, 2), order=order) - assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]]) - - def test_geometric_transform17(self): - "geometric transform 17" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - def mapping(x): - return (x[0] * 2, x[1]) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - (1, 4), order=order) - assert_array_almost_equal(out, [[1, 2, 3, 4]]) - - def test_geometric_transform18(self): - "geometric transform 18" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - def mapping(x): - return (x[0] * 2, x[1] * 2) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - (1, 2), order=order) - assert_array_almost_equal(out, [[1, 3]]) - - def test_geometric_transform19(self): - "geometric transform 19" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - def mapping(x): - return (x[0], x[1] / 2) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - (3, 8), order=order) - assert_array_almost_equal(out[..., ::2], data) - - def test_geometric_transform20(self): - "geometric transform 20" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - def mapping(x): - return (x[0] / 2, x[1]) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - (6, 4), order=order) - assert_array_almost_equal(out[::2, ...], data) - - def test_geometric_transform21(self): - "geometric transform 21" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - def mapping(x): - return (x[0] / 2, x[1] / 2) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - (6, 8), order=order) - assert_array_almost_equal(out[::2, ::2], data) - - - def test_geometric_transform22(self): - "geometric transform 22" - data = numpy.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]], numpy.float64) - def mapping1(x): - return (x[0] / 2, x[1] / 2) - def mapping2(x): - return (x[0] * 2, x[1] * 2) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping1, - (6, 8), order=order) - out = ndimage.geometric_transform(out, mapping2, - (3, 4), order=order) - assert_array_almost_equal(out, data) - - def test_geometric_transform23(self): - "geometric transform 23" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - def mapping(x): - return (1, x[0] * 2) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - (2,), order=order) - out = out.astype(numpy.int32) - assert_array_almost_equal(out, [5, 7]) - - def test_geometric_transform24(self): - "geometric transform 24" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - def mapping(x, a, b): - return (a, x[0] * b) - for order in range(0, 6): - out = ndimage.geometric_transform(data, mapping, - (2,), order=order, extra_arguments=(1,), - extra_keywords={'b': 2}) - assert_array_almost_equal(out, [5, 7]) - - def test_map_coordinates01(self): - "map coordinates 1" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - idx = numpy.indices(data.shape) - idx -= 1 - for order in range(0, 6): - out = ndimage.map_coordinates(data, idx, order=order) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [0, 4, 1, 3], - [0, 7, 6, 8]]) - - def test_map_coordinates02(self): - "map coordinates 2" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - idx = numpy.indices(data.shape, numpy.float64) - idx -= 0.5 - for order in range(0, 6): - out1 = ndimage.shift(data, 0.5, order=order) - out2 = ndimage.map_coordinates(data, idx, - order=order) - assert_array_almost_equal(out1, out2) - - def test_affine_transform01(self): - "affine_transform 1" - data = numpy.array([1]) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1]], - order=order) - assert_array_almost_equal(out, [1]) - - def test_affine_transform02(self): - "affine transform 2" - data = numpy.ones([4]) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1]], - order=order) - assert_array_almost_equal(out, [1, 1, 1, 1]) - - def test_affine_transform03(self): - "affine transform 3" - data = numpy.ones([4]) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1]], -1, - order=order) - assert_array_almost_equal(out, [0, 1, 1, 1]) - - def test_affine_transform04(self): - "affine transform 4" - data = numpy.array([4, 1, 3, 2]) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1]], -1, - order=order) - assert_array_almost_equal(out, [0, 4, 1, 3]) - - def test_affine_transform05(self): - "affine transform 5" - data = numpy.array([[1, 1, 1, 1], - [1, 1, 1, 1], - [1, 1, 1, 1]]) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1, 0], - [0, 1]], - [0, -1], order=order) - assert_array_almost_equal(out, [[0, 1, 1, 1], - [0, 1, 1, 1], - [0, 1, 1, 1]]) - - def test_affine_transform06(self): - "affine transform 6" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1, 0], - [0, 1]], - [0, -1], order=order) - assert_array_almost_equal(out, [[0, 4, 1, 3], - [0, 7, 6, 8], - [0, 3, 5, 3]]) - - def test_affine_transform07(self): - "affine transform 7" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1, 0], - [0, 1]], - [-1, 0], order=order) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [4, 1, 3, 2], - [7, 6, 8, 5]]) - - def test_affine_transform08(self): - "affine transform 8" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1, 0], - [0, 1]], - [-1, -1], order=order) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [0, 4, 1, 3], - [0, 7, 6, 8]]) - - def test_affine_transform09(self): - "affine transform 9" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - for order in range(0, 6): - if (order > 1): - filtered = ndimage.spline_filter(data, - order=order) - else: - filtered = data - out = ndimage.affine_transform(filtered,[[1, 0], - [0, 1]], - [-1, -1], order=order, prefilter=False) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [0, 4, 1, 3], - [0, 7, 6, 8]]) - - def test_affine_transform10(self): - "affine transform 10" - data = numpy.ones([2], numpy.float64) - for order in range(0, 6): - out = ndimage.affine_transform(data, [[0.5]], - output_shape=(4,), order=order) - assert_array_almost_equal(out, [1, 1, 1, 0]) - - def test_affine_transform11(self): - "affine transform 11" - data = [1, 5, 2, 6, 3, 7, 4, 4] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[2]], 0, (4,), - order=order) - assert_array_almost_equal(out, [1, 2, 3, 4]) - - def test_affine_transform12(self): - "affine transform 12" - data = [1, 2, 3, 4] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[0.5]], 0, - (8,), order=order) - assert_array_almost_equal(out[::2], [1, 2, 3, 4]) - - def test_affine_transform13(self): - "affine transform 13" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9.0, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1, 0], - [0, 2]], 0, - (3, 2), order=order) - assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]]) - - def test_affine_transform14(self): - "affine transform 14" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[2, 0], - [0, 1]], 0, - (1, 4), order=order) - assert_array_almost_equal(out, [[1, 2, 3, 4]]) - - def test_affine_transform15(self): - "affine transform 15" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[2, 0], - [0, 2]], 0, - (1, 2), order=order) - assert_array_almost_equal(out, [[1, 3]]) - - def test_affine_transform16(self): - "affine transform 16" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[1, 0.0], - [0, 0.5]], 0, - (3, 8), order=order) - assert_array_almost_equal(out[..., ::2], data) - - def test_affine_transform17(self): - "affine transform 17" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[0.5, 0], - [0, 1]], 0, - (6, 4), order=order) - assert_array_almost_equal(out[::2, ...], data) - - def test_affine_transform18(self): - "affine transform 18" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, - [[0.5, 0], - [0, 0.5]], 0, - (6, 8), order=order) - assert_array_almost_equal(out[::2, ::2], data) - - def test_affine_transform19(self): - "affine transform 19" - data = numpy.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]], numpy.float64) - for order in range(0, 6): - out = ndimage.affine_transform(data, - [[0.5, 0], - [0, 0.5]], 0, - (6, 8), order=order) - out = ndimage.affine_transform(out, - [[2.0, 0], - [0, 2.0]], 0, - (3, 4), order=order) - assert_array_almost_equal(out, data) - - def test_affine_transform20(self): - "affine transform 20" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[0], [2]], 0, - (2,), order=order) - assert_array_almost_equal(out, [1, 3]) - - def test_affine_transform21(self): - "affine transform 21" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, [[2], [0]], 0, - (2,), order=order) - assert_array_almost_equal(out, [1, 9]) - - def test_shift01(self): - "shift 1" - data = numpy.array([1]) - for order in range(0, 6): - out = ndimage.shift(data, [1], order=order) - assert_array_almost_equal(out, [0]) - - def test_shift02(self): - "shift 2" - data = numpy.ones([4]) - for order in range(0, 6): - out = ndimage.shift(data, [1], order=order) - assert_array_almost_equal(out, [0, 1, 1, 1]) - - def test_shift03(self): - "shift 3" - data = numpy.ones([4]) - for order in range(0, 6): - out = ndimage.shift(data, -1, order=order) - assert_array_almost_equal(out, [1, 1, 1, 0]) - - def test_shift04(self): - "shift 4" - data = numpy.array([4, 1, 3, 2]) - for order in range(0, 6): - out = ndimage.shift(data, 1, order=order) - assert_array_almost_equal(out, [0, 4, 1, 3]) - - def test_shift05(self): - "shift 5" - data = numpy.array([[1, 1, 1, 1], - [1, 1, 1, 1], - [1, 1, 1, 1]]) - for order in range(0, 6): - out = ndimage.shift(data, [0, 1], order=order) - assert_array_almost_equal(out, [[0, 1, 1, 1], - [0, 1, 1, 1], - [0, 1, 1, 1]]) - - def test_shift06(self): - "shift 6" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - for order in range(0, 6): - out = ndimage.shift(data, [0, 1], order=order) - assert_array_almost_equal(out, [[0, 4, 1, 3], - [0, 7, 6, 8], - [0, 3, 5, 3]]) - - def test_shift07(self): - "shift 7" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - for order in range(0, 6): - out = ndimage.shift(data, [1, 0], order=order) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [4, 1, 3, 2], - [7, 6, 8, 5]]) - - - def test_shift08(self): - "shift 8" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - for order in range(0, 6): - out = ndimage.shift(data, [1, 1], order=order) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [0, 4, 1, 3], - [0, 7, 6, 8]]) - - def test_shift09(self): - "shift 9" - data = numpy.array([[4, 1, 3, 2], - [7, 6, 8, 5], - [3, 5, 3, 6]]) - for order in range(0, 6): - if (order > 1): - filtered = ndimage.spline_filter(data, - order=order) - else: - filtered = data - out = ndimage.shift(filtered, [1, 1], order=order, - prefilter=False) - assert_array_almost_equal(out, [[0, 0, 0, 0], - [0, 4, 1, 3], - [0, 7, 6, 8]]) - - def test_zoom1(self): - "zoom 1" - for order in range(0,6): - for z in [2,[2,2]]: - arr = numpy.array(range(25)).reshape((5,5)).astype(float) - arr = ndimage.zoom(arr, z, order=order) - assert_equal(arr.shape,(10,10)) - assert_(numpy.all(arr[-1,:] != 0)) - assert_(numpy.all(arr[-1,:] >= (20 - eps))) - assert_(numpy.all(arr[0,:] <= (5 + eps))) - assert_(numpy.all(arr >= (0 - eps))) - assert_(numpy.all(arr <= (24 + eps))) - - def test_zoom2(self): - "zoom 2" - arr = numpy.arange(12).reshape((3,4)) - out = ndimage.zoom(ndimage.zoom(arr,2),0.5) - assert_array_equal(out,arr) - - def test_zoom_affine01(self): - "zoom by affine transformation 1" - data = [[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]] - for order in range(0, 6): - out = ndimage.affine_transform(data, [0.5, 0.5], 0, - (6, 8), order=order) - assert_array_almost_equal(out[::2, ::2], data) - - def test_zoom_infinity(self): - """Ticket #1419""" - err = numpy.seterr(divide='ignore') - - try: - dim = 8 - ndimage.zoom(numpy.zeros((dim, dim)), 1./dim, mode='nearest') - finally: - numpy.seterr(**err) - - def test_rotate01(self): - "rotate 1" - data = numpy.array([[0, 0, 0, 0], - [0, 1, 1, 0], - [0, 0, 0, 0]], dtype=numpy.float64) - for order in range(0, 6): - out = ndimage.rotate(data, 0) - assert_array_almost_equal(out, data) - - def test_rotate02(self): - "rotate 2" - data = numpy.array([[0, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 0, 0]], dtype=numpy.float64) - expected = numpy.array([[0, 0, 0], - [0, 0, 0], - [0, 1, 0], - [0, 0, 0]], dtype=numpy.float64) - for order in range(0, 6): - out = ndimage.rotate(data, 90) - assert_array_almost_equal(out, expected) - - def test_rotate03(self): - "rotate 3" - data = numpy.array([[0, 0, 0, 0, 0], - [0, 1, 1, 0, 0], - [0, 0, 0, 0, 0]], dtype=numpy.float64) - expected = numpy.array([[0, 0, 0], - [0, 0, 0], - [0, 1, 0], - [0, 1, 0], - [0, 0, 0]], dtype=numpy.float64) - for order in range(0, 6): - out = ndimage.rotate(data, 90) - assert_array_almost_equal(out, expected) - - def test_rotate04(self): - "rotate 4" - data = numpy.array([[0, 0, 0, 0, 0], - [0, 1, 1, 0, 0], - [0, 0, 0, 0, 0]], dtype=numpy.float64) - expected = numpy.array([[0, 0, 0, 0, 0], - [0, 0, 1, 0, 0], - [0, 0, 1, 0, 0]], dtype=numpy.float64) - for order in range(0, 6): - out = ndimage.rotate(data, 90, reshape=False) - assert_array_almost_equal(out, expected) - - def test_rotate05(self): - "rotate 5" - data = numpy.empty((4,3,3)) - for i in range(3): - data[:,:,i] = numpy.array([[0,0,0], - [0,1,0], - [0,1,0], - [0,0,0]], dtype=numpy.float64) - - expected = numpy.array([[0,0,0,0], - [0,1,1,0], - [0,0,0,0]], dtype=numpy.float64) - - for order in range(0, 6): - out = ndimage.rotate(data, 90) - for i in range(3): - assert_array_almost_equal(out[:,:,i], expected) - - def test_rotate06(self): - "rotate 6" - data = numpy.empty((3,4,3)) - for i in range(3): - data[:,:,i] = numpy.array([[0,0,0,0], - [0,1,1,0], - [0,0,0,0]], dtype=numpy.float64) - - expected = numpy.array([[0,0,0], - [0,1,0], - [0,1,0], - [0,0,0]], dtype=numpy.float64) - - for order in range(0, 6): - out = ndimage.rotate(data, 90) - for i in range(3): - assert_array_almost_equal(out[:,:,i], expected) - - def test_rotate07(self): - "rotate 7" - data = numpy.array([[[0, 0, 0, 0, 0], - [0, 1, 1, 0, 0], - [0, 0, 0, 0, 0]]] * 2, - dtype=numpy.float64) - data = data.transpose() - expected = numpy.array([[[0, 0, 0], - [0, 1, 0], - [0, 1, 0], - [0, 0, 0], - [0, 0, 0]]] * 2, dtype=numpy.float64) - expected = expected.transpose([2,1,0]) - - for order in range(0, 6): - out = ndimage.rotate(data, 90, axes=(0, 1)) - assert_array_almost_equal(out, expected) - - def test_rotate08(self): - "rotate 8" - data = numpy.array([[[0, 0, 0, 0, 0], - [0, 1, 1, 0, 0], - [0, 0, 0, 0, 0]]] * 2, - dtype=numpy.float64) - data = data.transpose() - expected = numpy.array([[[0, 0, 1, 0, 0], - [0, 0, 1, 0, 0], - [0, 0, 0, 0, 0]]] * 2, - dtype=numpy.float64) - expected = expected.transpose() - for order in range(0, 6): - out = ndimage.rotate(data, 90, axes=(0, 1), - reshape=False) - assert_array_almost_equal(out, expected) - - def test_watershed_ift01(self): - "watershed_ift 1" - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) - markers = numpy.array([[ -1, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 1, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0]], - numpy.int8) - out = ndimage.watershed_ift(data, markers, - structure=[[1,1,1], - [1,1,1], - [1,1,1]]) - expected = [[-1, -1, -1, -1, -1, -1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, -1, -1, -1, -1, -1, -1], - [-1, -1, -1, -1, -1, -1, -1]] - assert_array_almost_equal(out, expected) - - def test_watershed_ift02(self): - "watershed_ift 2" - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) - markers = numpy.array([[ -1, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 1, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0]], - numpy.int8) - out = ndimage.watershed_ift(data, markers) - expected = [[-1, -1, -1, -1, -1, -1, -1], - [-1, -1, 1, 1, 1, -1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, -1, 1, 1, 1, -1, -1], - [-1, -1, -1, -1, -1, -1, -1], - [-1, -1, -1, -1, -1, -1, -1]] - assert_array_almost_equal(out, expected) - - def test_watershed_ift03(self): - "watershed_ift 3" - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) - markers = numpy.array([[ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 2, 0, 3, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, -1]], - numpy.int8) - out = ndimage.watershed_ift(data, markers) - expected = [[-1, -1, -1, -1, -1, -1, -1], - [-1, -1, 2, -1, 3, -1, -1], - [-1, 2, 2, 3, 3, 3, -1], - [-1, 2, 2, 3, 3, 3, -1], - [-1, 2, 2, 3, 3, 3, -1], - [-1, -1, 2, -1, 3, -1, -1], - [-1, -1, -1, -1, -1, -1, -1]] - assert_array_almost_equal(out, expected) - - def test_watershed_ift04(self): - "watershed_ift 4" - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) - markers = numpy.array([[ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 2, 0, 3, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, -1]], - numpy.int8) - out = ndimage.watershed_ift(data, markers, - structure=[[1,1,1], - [1,1,1], - [1,1,1]]) - expected = [[-1, -1, -1, -1, -1, -1, -1], - [-1, 2, 2, 3, 3, 3, -1], - [-1, 2, 2, 3, 3, 3, -1], - [-1, 2, 2, 3, 3, 3, -1], - [-1, 2, 2, 3, 3, 3, -1], - [-1, 2, 2, 3, 3, 3, -1], - [-1, -1, -1, -1, -1, -1, -1]] - assert_array_almost_equal(out, expected) - - def test_watershed_ift05(self): - "watershed_ift 5" - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 0, 1, 0, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) - markers = numpy.array([[ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 3, 0, 2, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, -1]], - numpy.int8) - out = ndimage.watershed_ift(data, markers, - structure=[[1,1,1], - [1,1,1], - [1,1,1]]) - expected = [[-1, -1, -1, -1, -1, -1, -1], - [-1, 3, 3, 2, 2, 2, -1], - [-1, 3, 3, 2, 2, 2, -1], - [-1, 3, 3, 2, 2, 2, -1], - [-1, 3, 3, 2, 2, 2, -1], - [-1, 3, 3, 2, 2, 2, -1], - [-1, -1, -1, -1, -1, -1, -1]] - assert_array_almost_equal(out, expected) - - def test_watershed_ift06(self): - "watershed_ift 6" - data = numpy.array([[0, 1, 0, 0, 0, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) - markers = numpy.array([[ -1, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 1, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0]], - numpy.int8) - out = ndimage.watershed_ift(data, markers, - structure=[[1,1,1], - [1,1,1], - [1,1,1]]) - expected = [[-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, -1, -1, -1, -1, -1, -1], - [-1, -1, -1, -1, -1, -1, -1]] - assert_array_almost_equal(out, expected) - - def test_watershed_ift07(self): - "watershed_ift 7" - shape = (7, 6) - data = numpy.zeros(shape, dtype=numpy.uint8) - data = data.transpose() - data[...] = numpy.array([[0, 1, 0, 0, 0, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) - markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 1, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0]], - numpy.int8) - out = numpy.zeros(shape, dtype = numpy.int16) - out = out.transpose() - ndimage.watershed_ift(data, markers, - structure=[[1,1,1], - [1,1,1], - [1,1,1]], - output=out) - expected = [[-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, 1, 1, 1, 1, 1, -1], - [-1, -1, -1, -1, -1, -1, -1], - [-1, -1, -1, -1, -1, -1, -1]] - assert_array_almost_equal(out, expected) - - def test_distance_transform_bf01(self): - "brute force distance transform 1" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - out, ft = ndimage.distance_transform_bf(data, 'euclidean', - return_indices=True) - expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 2, 4, 2, 1, 0, 0], - [0, 0, 1, 4, 8, 4, 1, 0, 0], - [0, 0, 1, 2, 4, 2, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]] - assert_array_almost_equal(out * out, expected) - - expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [2, 2, 2, 2, 1, 2, 2, 2, 2], - [3, 3, 3, 2, 1, 2, 3, 3, 3], - [4, 4, 4, 4, 6, 4, 4, 4, 4], - [5, 5, 6, 6, 7, 6, 6, 5, 5], - [6, 6, 6, 7, 7, 7, 6, 6, 6], - [7, 7, 7, 7, 7, 7, 7, 7, 7], - [8, 8, 8, 8, 8, 8, 8, 8, 8]], - [[0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 2, 4, 6, 6, 7, 8], - [0, 1, 1, 2, 4, 6, 7, 7, 8], - [0, 1, 1, 1, 6, 7, 7, 7, 8], - [0, 1, 2, 2, 4, 6, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8]]] - assert_array_almost_equal(ft, expected) - - def test_distance_transform_bf02(self): - "brute force distance transform 2" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - out, ft = ndimage.distance_transform_bf(data, 'cityblock', - return_indices=True) - - expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 2, 2, 2, 1, 0, 0], - [0, 0, 1, 2, 3, 2, 1, 0, 0], - [0, 0, 1, 2, 2, 2, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]] - assert_array_almost_equal(out, expected) - - expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [2, 2, 2, 2, 1, 2, 2, 2, 2], - [3, 3, 3, 3, 1, 3, 3, 3, 3], - [4, 4, 4, 4, 7, 4, 4, 4, 4], - [5, 5, 6, 7, 7, 7, 6, 5, 5], - [6, 6, 6, 7, 7, 7, 6, 6, 6], - [7, 7, 7, 7, 7, 7, 7, 7, 7], - [8, 8, 8, 8, 8, 8, 8, 8, 8]], - [[0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 2, 4, 6, 6, 7, 8], - [0, 1, 1, 1, 4, 7, 7, 7, 8], - [0, 1, 1, 1, 4, 7, 7, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8]]] - assert_array_almost_equal(expected, ft) - - def test_distance_transform_bf03(self): - "brute force distance transform 3" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - out, ft = ndimage.distance_transform_bf(data, 'chessboard', - return_indices=True) - - expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 2, 1, 1, 0, 0], - [0, 0, 1, 2, 2, 2, 1, 0, 0], - [0, 0, 1, 1, 2, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]] - assert_array_almost_equal(out, expected) - - expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [2, 2, 2, 2, 1, 2, 2, 2, 2], - [3, 3, 4, 2, 2, 2, 4, 3, 3], - [4, 4, 5, 6, 6, 6, 5, 4, 4], - [5, 5, 6, 6, 7, 6, 6, 5, 5], - [6, 6, 6, 7, 7, 7, 6, 6, 6], - [7, 7, 7, 7, 7, 7, 7, 7, 7], - [8, 8, 8, 8, 8, 8, 8, 8, 8]], - [[0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 2, 5, 6, 6, 7, 8], - [0, 1, 1, 2, 6, 6, 7, 7, 8], - [0, 1, 1, 2, 6, 7, 7, 7, 8], - [0, 1, 2, 2, 6, 6, 7, 7, 8], - [0, 1, 2, 4, 5, 6, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8]]] - assert_array_almost_equal(ft, expected) - - def test_distance_transform_bf04(self): - "brute force distance transform 4" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - tdt, tft = ndimage.distance_transform_bf(data, - return_indices=1) - dts = [] - fts = [] - dt = numpy.zeros(data.shape, dtype=numpy.float64) - ndimage.distance_transform_bf(data, distances=dt) - dts.append(dt) - ft = ndimage.distance_transform_bf(data, - return_distances=False, return_indices=1) - fts.append(ft) - ft = numpy.indices(data.shape, dtype=numpy.int32) - ndimage.distance_transform_bf(data, - return_distances=False, return_indices=True, indices=ft) - fts.append(ft) - dt, ft = ndimage.distance_transform_bf(data, - return_indices=1) - dts.append(dt) - fts.append(ft) - dt = numpy.zeros(data.shape, dtype=numpy.float64) - ft = ndimage.distance_transform_bf(data, distances=dt, - return_indices=True) - dts.append(dt) - fts.append(ft) - ft = numpy.indices(data.shape, dtype=numpy.int32) - dt = ndimage.distance_transform_bf(data, - return_indices=True, indices=ft) - dts.append(dt) - fts.append(ft) - dt = numpy.zeros(data.shape, dtype=numpy.float64) - ft = numpy.indices(data.shape, dtype=numpy.int32) - ndimage.distance_transform_bf(data, distances=dt, - return_indices=True, indices=ft) - dts.append(dt) - fts.append(ft) - for dt in dts: - assert_array_almost_equal(tdt, dt) - for ft in fts: - assert_array_almost_equal(tft, ft) - - def test_distance_transform_bf05(self): - "brute force distance transform 5" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - out, ft = ndimage.distance_transform_bf(data, - 'euclidean', return_indices=True, sampling=[2, 2]) - expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 4, 4, 4, 0, 0, 0], - [0, 0, 4, 8, 16, 8, 4, 0, 0], - [0, 0, 4, 16, 32, 16, 4, 0, 0], - [0, 0, 4, 8, 16, 8, 4, 0, 0], - [0, 0, 0, 4, 4, 4, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]] - assert_array_almost_equal(out * out, expected) - - expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [2, 2, 2, 2, 1, 2, 2, 2, 2], - [3, 3, 3, 2, 1, 2, 3, 3, 3], - [4, 4, 4, 4, 6, 4, 4, 4, 4], - [5, 5, 6, 6, 7, 6, 6, 5, 5], - [6, 6, 6, 7, 7, 7, 6, 6, 6], - [7, 7, 7, 7, 7, 7, 7, 7, 7], - [8, 8, 8, 8, 8, 8, 8, 8, 8]], - [[0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 2, 4, 6, 6, 7, 8], - [0, 1, 1, 2, 4, 6, 7, 7, 8], - [0, 1, 1, 1, 6, 7, 7, 7, 8], - [0, 1, 2, 2, 4, 6, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8]]] - assert_array_almost_equal(ft, expected) - - def test_distance_transform_bf06(self): - "brute force distance transform 6" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - out, ft = ndimage.distance_transform_bf(data, - 'euclidean', return_indices=True, sampling=[2, 1]) - expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 4, 1, 0, 0, 0], - [0, 0, 1, 4, 8, 4, 1, 0, 0], - [0, 0, 1, 4, 9, 4, 1, 0, 0], - [0, 0, 1, 4, 8, 4, 1, 0, 0], - [0, 0, 0, 1, 4, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]] - assert_array_almost_equal(out * out, expected) - - expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [2, 2, 2, 2, 2, 2, 2, 2, 2], - [3, 3, 3, 3, 2, 3, 3, 3, 3], - [4, 4, 4, 4, 4, 4, 4, 4, 4], - [5, 5, 5, 5, 6, 5, 5, 5, 5], - [6, 6, 6, 6, 7, 6, 6, 6, 6], - [7, 7, 7, 7, 7, 7, 7, 7, 7], - [8, 8, 8, 8, 8, 8, 8, 8, 8]], - [[0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 2, 6, 6, 6, 7, 8], - [0, 1, 1, 1, 6, 7, 7, 7, 8], - [0, 1, 1, 1, 7, 7, 7, 7, 8], - [0, 1, 1, 1, 6, 7, 7, 7, 8], - [0, 1, 2, 2, 4, 6, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8]]] - assert_array_almost_equal(ft, expected) - - def test_distance_transform_cdt01(self): - "chamfer type distance transform 1" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - out, ft = ndimage.distance_transform_cdt(data, - 'cityblock', return_indices=True) - bf = ndimage.distance_transform_bf(data, 'cityblock') - assert_array_almost_equal(bf, out) - - expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [2, 2, 2, 1, 1, 1, 2, 2, 2], - [3, 3, 2, 1, 1, 1, 2, 3, 3], - [4, 4, 4, 4, 1, 4, 4, 4, 4], - [5, 5, 5, 5, 7, 7, 6, 5, 5], - [6, 6, 6, 6, 7, 7, 6, 6, 6], - [7, 7, 7, 7, 7, 7, 7, 7, 7], - [8, 8, 8, 8, 8, 8, 8, 8, 8]], - [[0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 1, 1, 4, 7, 7, 7, 8], - [0, 1, 1, 1, 4, 5, 6, 7, 8], - [0, 1, 2, 2, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8],]] - assert_array_almost_equal(ft, expected) - - def test_distance_transform_cdt02(self): - "chamfer type distance transform 2" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - out, ft = ndimage.distance_transform_cdt(data, 'chessboard', - return_indices=True) - bf = ndimage.distance_transform_bf(data, 'chessboard') - assert_array_almost_equal(bf, out) - - expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [2, 2, 2, 1, 1, 1, 2, 2, 2], - [3, 3, 2, 2, 1, 2, 2, 3, 3], - [4, 4, 3, 2, 2, 2, 3, 4, 4], - [5, 5, 4, 6, 7, 6, 4, 5, 5], - [6, 6, 6, 6, 7, 7, 6, 6, 6], - [7, 7, 7, 7, 7, 7, 7, 7, 7], - [8, 8, 8, 8, 8, 8, 8, 8, 8]], - [[0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 2, 3, 4, 6, 7, 8], - [0, 1, 1, 2, 2, 6, 6, 7, 8], - [0, 1, 1, 1, 2, 6, 7, 7, 8], - [0, 1, 1, 2, 6, 6, 7, 7, 8], - [0, 1, 2, 2, 5, 6, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 2, 3, 4, 5, 6, 7, 8],]] - assert_array_almost_equal(ft, expected) - - def test_distance_transform_cdt03(self): - "chamfer type distance transform 3" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - tdt, tft = ndimage.distance_transform_cdt(data, - return_indices=True) - dts = [] - fts = [] - dt = numpy.zeros(data.shape, dtype = numpy.int32) - ndimage.distance_transform_cdt(data, distances = dt) - dts.append(dt) - ft = ndimage.distance_transform_cdt(data, - return_distances=False, return_indices=True) - fts.append(ft) - ft = numpy.indices(data.shape, dtype=numpy.int32) - ndimage.distance_transform_cdt(data, - return_distances=False, return_indices=True, indices=ft) - fts.append(ft) - dt, ft = ndimage.distance_transform_cdt(data, - return_indices=True) - dts.append(dt) - fts.append(ft) - dt = numpy.zeros(data.shape, dtype=numpy.int32) - ft = ndimage.distance_transform_cdt(data, distances=dt, - return_indices = True) - dts.append(dt) - fts.append(ft) - ft = numpy.indices(data.shape, dtype=numpy.int32) - dt = ndimage.distance_transform_cdt(data, - return_indices=True, indices=ft) - dts.append(dt) - fts.append(ft) - dt = numpy.zeros(data.shape, dtype=numpy.int32) - ft = numpy.indices(data.shape, dtype=numpy.int32) - ndimage.distance_transform_cdt(data, distances=dt, - return_indices=True, indices=ft) - dts.append(dt) - fts.append(ft) - for dt in dts: - assert_array_almost_equal(tdt, dt) - for ft in fts: - assert_array_almost_equal(tft, ft) - - def test_distance_transform_edt01(self): - "euclidean distance transform 1" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - out, ft = ndimage.distance_transform_edt(data, - return_indices=True) - bf = ndimage.distance_transform_bf(data, 'euclidean') - assert_array_almost_equal(bf, out) - - dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype) - dt = dt.astype(numpy.float64) - numpy.multiply(dt, dt, dt) - dt = numpy.add.reduce(dt, axis=0) - numpy.sqrt(dt, dt) - - assert_array_almost_equal(bf, dt) - - def test_distance_transform_edt02(self): - "euclidean distance transform 2" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - tdt, tft = ndimage.distance_transform_edt(data, - return_indices=True) - dts = [] - fts = [] - dt = numpy.zeros(data.shape, dtype=numpy.float64) - ndimage.distance_transform_edt(data, distances=dt) - dts.append(dt) - ft = ndimage.distance_transform_edt(data, - return_distances=0, return_indices=True) - fts.append(ft) - ft = numpy.indices(data.shape, dtype=numpy.int32) - ndimage.distance_transform_edt(data, - return_distances=False,return_indices=True, indices=ft) - fts.append(ft) - dt, ft = ndimage.distance_transform_edt(data, - return_indices=True) - dts.append(dt) - fts.append(ft) - dt = numpy.zeros(data.shape, dtype=numpy.float64) - ft = ndimage.distance_transform_edt(data, distances=dt, - return_indices=True) - dts.append(dt) - fts.append(ft) - ft = numpy.indices(data.shape, dtype=numpy.int32) - dt = ndimage.distance_transform_edt(data, - return_indices=True, indices=ft) - dts.append(dt) - fts.append(ft) - dt = numpy.zeros(data.shape, dtype=numpy.float64) - ft = numpy.indices(data.shape, dtype=numpy.int32) - ndimage.distance_transform_edt(data, distances=dt, - return_indices=True, indices=ft) - dts.append(dt) - fts.append(ft) - for dt in dts: - assert_array_almost_equal(tdt, dt) - for ft in fts: - assert_array_almost_equal(tft, ft) - - def test_distance_transform_edt03(self): - "euclidean distance transform 3" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - ref = ndimage.distance_transform_bf(data, 'euclidean', - sampling=[2, 2]) - out = ndimage.distance_transform_edt(data, - sampling=[2, 2]) - assert_array_almost_equal(ref, out) - - - def test_distance_transform_edt4(self): - "euclidean distance transform 4" - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], type) - ref = ndimage.distance_transform_bf(data, 'euclidean', - sampling=[2, 1]) - out = ndimage.distance_transform_edt(data, - sampling=[2, 1]) - assert_array_almost_equal(ref, out) - - def test_generate_structure01(self): - "generation of a binary structure 1" - struct = ndimage.generate_binary_structure(0, 1) - assert_array_almost_equal(struct, 1) - - def test_generate_structure02(self): - "generation of a binary structure 2" - struct = ndimage.generate_binary_structure(1, 1) - assert_array_almost_equal(struct, [1, 1, 1]) - - def test_generate_structure03(self): - "generation of a binary structure 3" - struct = ndimage.generate_binary_structure(2, 1) - assert_array_almost_equal(struct, [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]]) - - def test_generate_structure04(self): - "generation of a binary structure 4" - struct = ndimage.generate_binary_structure(2, 2) - assert_array_almost_equal(struct, [[1, 1, 1], - [1, 1, 1], - [1, 1, 1]]) - - def test_iterate_structure01(self): - "iterating a structure 1" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - out = ndimage.iterate_structure(struct, 2) - assert_array_almost_equal(out, [[0, 0, 1, 0, 0], - [0, 1, 1, 1, 0], - [1, 1, 1, 1, 1], - [0, 1, 1, 1, 0], - [0, 0, 1, 0, 0]]) - - def test_iterate_structure02(self): - "iterating a structure 2" - struct = [[0, 1], - [1, 1], - [0, 1]] - out = ndimage.iterate_structure(struct, 2) - assert_array_almost_equal(out, [[0, 0, 1], - [0, 1, 1], - [1, 1, 1], - [0, 1, 1], - [0, 0, 1]]) - - def test_iterate_structure03(self): - "iterating a structure 3" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - out = ndimage.iterate_structure(struct, 2, 1) - expected = [[0, 0, 1, 0, 0], - [0, 1, 1, 1, 0], - [1, 1, 1, 1, 1], - [0, 1, 1, 1, 0], - [0, 0, 1, 0, 0]] - assert_array_almost_equal(out[0], expected) - assert_equal(out[1], [2, 2]) - - def test_binary_erosion01(self): - "binary erosion 1" - for type in self.types: - data = numpy.ones([], type) - out = ndimage.binary_erosion(data) - assert_array_almost_equal(out, 1) - - def test_binary_erosion02(self): - "binary erosion 2" - for type in self.types: - data = numpy.ones([], type) - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, 1) - - def test_binary_erosion03(self): - "binary erosion 3" - for type in self.types: - data = numpy.ones([1], type) - out = ndimage.binary_erosion(data) - assert_array_almost_equal(out, [0]) - - def test_binary_erosion04(self): - "binary erosion 4" - for type in self.types: - data = numpy.ones([1], type) - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, [1]) - - def test_binary_erosion05(self): - "binary erosion 5" - for type in self.types: - data = numpy.ones([3], type) - out = ndimage.binary_erosion(data) - assert_array_almost_equal(out, [0, 1, 0]) - - def test_binary_erosion06(self): - "binary erosion 6" - for type in self.types: - data = numpy.ones([3], type) - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, [1, 1, 1]) - - def test_binary_erosion07(self): - "binary erosion 7" - for type in self.types: - data = numpy.ones([5], type) - out = ndimage.binary_erosion(data) - assert_array_almost_equal(out, [0, 1, 1, 1, 0]) - - def test_binary_erosion08(self): - "binary erosion 8" - for type in self.types: - data = numpy.ones([5], type) - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, [1, 1, 1, 1, 1]) - - def test_binary_erosion09(self): - "binary erosion 9" - for type in self.types: - data = numpy.ones([5], type) - data[2] = 0 - out = ndimage.binary_erosion(data) - assert_array_almost_equal(out, [0, 0, 0, 0, 0]) - - def test_binary_erosion10(self): - "binary erosion 10" - for type in self.types: - data = numpy.ones([5], type) - data[2] = 0 - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, [1, 0, 0, 0, 1]) - - def test_binary_erosion11(self): - "binary erosion 11" - for type in self.types: - data = numpy.ones([5], type) - data[2] = 0 - struct = [1, 0, 1] - out = ndimage.binary_erosion(data, struct, - border_value=1) - assert_array_almost_equal(out, [1, 0, 1, 0, 1]) - - def test_binary_erosion12(self): - "binary erosion 12" - for type in self.types: - data = numpy.ones([5], type) - data[2] = 0 - struct = [1, 0, 1] - out = ndimage.binary_erosion(data, struct, - border_value=1, - origin=-1) - assert_array_almost_equal(out, [0, 1, 0, 1, 1]) - - def test_binary_erosion13(self): - "binary erosion 13" - for type in self.types: - data = numpy.ones([5], type) - data[2] = 0 - struct = [1, 0, 1] - out = ndimage.binary_erosion(data, struct, - border_value=1, - origin=1) - assert_array_almost_equal(out, [1, 1, 0, 1, 0]) - - def test_binary_erosion14(self): - "binary erosion 14" - for type in self.types: - data = numpy.ones([5], type) - data[2] = 0 - struct = [1, 1] - out = ndimage.binary_erosion(data, struct, - border_value=1) - assert_array_almost_equal(out, [1, 1, 0, 0, 1]) - - def test_binary_erosion15(self): - "binary erosion 15" - for type in self.types: - data = numpy.ones([5], type) - data[2] = 0 - struct = [1, 1] - out = ndimage.binary_erosion(data, struct, - border_value=1, - origin=-1) - assert_array_almost_equal(out, [1, 0, 0, 1, 1]) - - def test_binary_erosion16(self): - "binary erosion 16" - for type in self.types: - data = numpy.ones([1, 1], type) - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, [[1]]) - - def test_binary_erosion17(self): - "binary erosion 17" - for type in self.types: - data = numpy.ones([1, 1], type) - out = ndimage.binary_erosion(data) - assert_array_almost_equal(out, [[0]]) - - def test_binary_erosion18(self): - "binary erosion 18" - for type in self.types: - data = numpy.ones([1, 3], type) - out = ndimage.binary_erosion(data) - assert_array_almost_equal(out, [[0, 0, 0]]) - - def test_binary_erosion19(self): - "binary erosion 19" - for type in self.types: - data = numpy.ones([1, 3], type) - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, [[1, 1, 1]]) - - def test_binary_erosion20(self): - "binary erosion 20" - for type in self.types: - data = numpy.ones([3, 3], type) - out = ndimage.binary_erosion(data) - assert_array_almost_equal(out, [[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - - def test_binary_erosion21(self): - "binary erosion 21" - for type in self.types: - data = numpy.ones([3, 3], type) - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, [[1, 1, 1], - [1, 1, 1], - [1, 1, 1]]) - - def test_binary_erosion22(self): - "binary erosion 22" - expected = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 1, 1, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 0, 0, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_erosion(data, border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_erosion23(self): - "binary erosion 23" - struct = ndimage.generate_binary_structure(2, 2) - expected = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 1, 1, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 0, 0, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_erosion(data, struct, - border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_erosion24(self): - "binary erosion 24" - struct = [[0, 1], - [1, 1]] - expected = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 1, 1, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 0, 0, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_erosion(data, struct, - border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_erosion25(self): - "binary erosion 25" - struct = [[0, 1, 0], - [1, 0, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 1, 1, 1, 0, 1, 1], - [0, 0, 1, 0, 1, 1, 0, 0], - [0, 1, 0, 1, 1, 1, 1, 0], - [0, 1, 1, 0, 0, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_erosion(data, struct, - border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_erosion26(self): - "binary erosion 26" - struct = [[0, 1, 0], - [1, 0, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 1, 0, 0, 1], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 1]] - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 1, 1, 1, 0, 1, 1], - [0, 0, 1, 0, 1, 1, 0, 0], - [0, 1, 0, 1, 1, 1, 1, 0], - [0, 1, 1, 0, 0, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_erosion(data, struct, - border_value=1, origin=(-1, -1)) - assert_array_almost_equal(out, expected) - - def test_binary_erosion27(self): - "binary erosion 27" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], bool) - out = ndimage.binary_erosion(data, struct, - border_value=1, iterations=2) - assert_array_almost_equal(out, expected) - - def test_binary_erosion28(self): - "binary erosion 28" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], bool) - out = numpy.zeros(data.shape, bool) - ndimage.binary_erosion(data, struct, border_value=1, - iterations=2, output=out) - assert_array_almost_equal(out, expected) - - def test_binary_erosion29(self): - "binary erosion 29" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - data = numpy.array([[0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 1, 1], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0]], bool) - out = ndimage.binary_erosion(data, struct, - border_value=1, iterations=3) - assert_array_almost_equal(out, expected) - - def test_binary_erosion30(self): - "binary erosion 30" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - data = numpy.array([[0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 1, 1], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0]], bool) - out = numpy.zeros(data.shape, bool) - ndimage.binary_erosion(data, struct, border_value=1, - iterations=3, output=out) - assert_array_almost_equal(out, expected) - - def test_binary_erosion31(self): - "binary erosion 31" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0], - [1, 1, 1, 1, 1, 0, 1], - [0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 1]] - data = numpy.array([[0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 1, 1], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0]], bool) - out = numpy.zeros(data.shape, bool) - ndimage.binary_erosion(data, struct, border_value=1, - iterations=1, output=out, origin=(-1, -1)) - assert_array_almost_equal(out, expected) - - def test_binary_erosion32(self): - "binary erosion 32" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], bool) - out = ndimage.binary_erosion(data, struct, - border_value=1, iterations=2) - assert_array_almost_equal(out, expected) - - def test_binary_erosion33(self): - "binary erosion 33" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 1, 1], - [0, 0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - mask = [[1, 1, 1, 1, 1, 0, 0], - [1, 1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1]] - data = numpy.array([[0, 0, 0, 0, 0, 1, 1], - [0, 0, 0, 1, 0, 0, 1], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], bool) - out = ndimage.binary_erosion(data, struct, - border_value=1, mask=mask, iterations=-1) - assert_array_almost_equal(out, expected) - - def test_binary_erosion34(self): - "binary erosion 34" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - mask = [[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 0, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - data = numpy.array([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], bool) - out = ndimage.binary_erosion(data, struct, - border_value=1, mask=mask) - assert_array_almost_equal(out, expected) - - def test_binary_erosion35(self): - "binary erosion 35" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - mask = [[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 0, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]] - data = numpy.array([[0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 1, 1], - [0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 0, 0, 0]], bool) - tmp = [[0, 0, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0], - [1, 1, 1, 1, 1, 0, 1], - [0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 1]] - expected = numpy.logical_and(tmp, mask) - tmp = numpy.logical_and(data, numpy.logical_not(mask)) - expected = numpy.logical_or(expected, tmp) - out = numpy.zeros(data.shape, bool) - ndimage.binary_erosion(data, struct, border_value=1, - iterations=1, output=out, - origin=(-1, -1), mask=mask) - assert_array_almost_equal(out, expected) - - def test_binary_erosion36(self): - "binary erosion 36" - struct = [[0, 1, 0], - [1, 0, 1], - [0, 1, 0]] - mask = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - tmp = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 1, 0, 0, 1], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 1]] - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 1, 1, 1, 0, 1, 1], - [0, 0, 1, 0, 1, 1, 0, 0], - [0, 1, 0, 1, 1, 1, 1, 0], - [0, 1, 1, 0, 0, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - expected = numpy.logical_and(tmp, mask) - tmp = numpy.logical_and(data, numpy.logical_not(mask)) - expected = numpy.logical_or(expected, tmp) - out = ndimage.binary_erosion(data, struct, mask=mask, - border_value=1, origin=(-1, -1)) - assert_array_almost_equal(out, expected) - - def test_binary_dilation01(self): - "binary dilation 1" - for type in self.types: - data = numpy.ones([], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, 1) - - def test_binary_dilation02(self): - "binary dilation 2" - for type in self.types: - data = numpy.zeros([], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, 0) - - def test_binary_dilation03(self): - "binary dilation 3" - for type in self.types: - data = numpy.ones([1], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [1]) - - def test_binary_dilation04(self): - "binary dilation 4" - for type in self.types: - data = numpy.zeros([1], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [0]) - - def test_binary_dilation05(self): - "binary dilation 5" - for type in self.types: - data = numpy.ones([3], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [1, 1, 1]) - - def test_binary_dilation06(self): - "binary dilation 6" - for type in self.types: - data = numpy.zeros([3], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [0, 0, 0]) - - def test_binary_dilation07(self): - "binary dilation 7" - struct = ndimage.generate_binary_structure(1, 1) - for type in self.types: - data = numpy.zeros([3], type) - data[1] = 1 - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [1, 1, 1]) - - def test_binary_dilation08(self): - "binary dilation 8" - for type in self.types: - data = numpy.zeros([5], type) - data[1] = 1 - data[3] = 1 - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [1, 1, 1, 1, 1]) - - def test_binary_dilation09(self): - "binary dilation 9" - for type in self.types: - data = numpy.zeros([5], type) - data[1] = 1 - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [1, 1, 1, 0, 0]) - - def test_binary_dilation10(self): - "binary dilation 10" - for type in self.types: - data = numpy.zeros([5], type) - data[1] = 1 - out = ndimage.binary_dilation(data, origin=-1) - assert_array_almost_equal(out, [0, 1, 1, 1, 0]) - - def test_binary_dilation11(self): - "binary dilation 11" - for type in self.types: - data = numpy.zeros([5], type) - data[1] = 1 - out = ndimage.binary_dilation(data, origin=1) - assert_array_almost_equal(out, [1, 1, 0, 0, 0]) - - def test_binary_dilation12(self): - "binary dilation 12" - for type in self.types: - data = numpy.zeros([5], type) - data[1] = 1 - struct = [1, 0, 1] - out = ndimage.binary_dilation(data, struct) - assert_array_almost_equal(out, [1, 0, 1, 0, 0]) - - def test_binary_dilation13(self): - "binary dilation 13" - for type in self.types: - data = numpy.zeros([5], type) - data[1] = 1 - struct = [1, 0, 1] - out = ndimage.binary_dilation(data, struct, - border_value=1) - assert_array_almost_equal(out, [1, 0, 1, 0, 1]) - - def test_binary_dilation14(self): - "binary dilation 14" - for type in self.types: - data = numpy.zeros([5], type) - data[1] = 1 - struct = [1, 0, 1] - out = ndimage.binary_dilation(data, struct, - origin=-1) - assert_array_almost_equal(out, [0, 1, 0, 1, 0]) - - def test_binary_dilation15(self): - "binary dilation 15" - for type in self.types: - data = numpy.zeros([5], type) - data[1] = 1 - struct = [1, 0, 1] - out = ndimage.binary_dilation(data, struct, - origin=-1, border_value=1) - assert_array_almost_equal(out, [1, 1, 0, 1, 0]) - - def test_binary_dilation16(self): - "binary dilation 16" - for type in self.types: - data = numpy.ones([1, 1], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [[1]]) - - def test_binary_dilation17(self): - "binary dilation 17" - for type in self.types: - data = numpy.zeros([1, 1], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [[0]]) - - def test_binary_dilation18(self): - "binary dilation 18" - for type in self.types: - data = numpy.ones([1, 3], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [[1, 1, 1]]) - - def test_binary_dilation19(self): - "binary dilation 19" - for type in self.types: - data = numpy.ones([3, 3], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [[1, 1, 1], - [1, 1, 1], - [1, 1, 1]]) - - def test_binary_dilation20(self): - "binary dilation 20" - for type in self.types: - data = numpy.zeros([3, 3], type) - data[1, 1] = 1 - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]]) - - def test_binary_dilation21(self): - "binary dilation 21" - struct = ndimage.generate_binary_structure(2, 2) - for type in self.types: - data = numpy.zeros([3, 3], type) - data[1, 1] = 1 - out = ndimage.binary_dilation(data, struct) - assert_array_almost_equal(out, [[1, 1, 1], - [1, 1, 1], - [1, 1, 1]]) - - def test_binary_dilation22(self): - "binary dilation 22" - expected = [[0, 1, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_dilation(data) - assert_array_almost_equal(out, expected) - - def test_binary_dilation23(self): - "binary dilation 23" - expected = [[1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 0, 0, 0, 0, 1], - [1, 1, 0, 0, 0, 1, 0, 1], - [1, 0, 0, 1, 1, 1, 1, 1], - [1, 0, 1, 1, 1, 1, 0, 1], - [1, 1, 1, 1, 1, 1, 1, 1], - [1, 0, 1, 0, 0, 1, 0, 1], - [1, 1, 1, 1, 1, 1, 1, 1]] - - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_dilation(data, border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_dilation24(self): - "binary dilation 24" - expected = [[1, 1, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 0, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 0, 0], - [0, 1, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_dilation(data, origin=(1, 1)) - assert_array_almost_equal(out, expected) - - def test_binary_dilation25(self): - "binary dilation 25" - expected = [[1, 1, 0, 0, 0, 0, 1, 1], - [1, 0, 0, 0, 1, 0, 1, 1], - [0, 0, 1, 1, 1, 1, 1, 1], - [0, 1, 1, 1, 1, 0, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1], - [0, 1, 0, 0, 1, 0, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1]] - - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_dilation(data, origin=(1, 1), - border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_dilation26(self): - "binary dilation 26" - struct = ndimage.generate_binary_structure(2, 2) - expected = [[1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_dilation(data, struct) - assert_array_almost_equal(out, expected) - - def test_binary_dilation27(self): - "binary dilation 27" - struct = [[0, 1], - [1, 1]] - expected = [[0, 1, 0, 0, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 1, 1, 0, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_dilation(data, struct) - assert_array_almost_equal(out, expected) - - def test_binary_dilation28(self): - "binary dilation 28" - expected = [[1, 1, 1, 1], - [1, 0, 0, 1], - [1, 0, 0, 1], - [1, 1, 1, 1]] - - for type in self.types: - data = numpy.array([[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]], type) - out = ndimage.binary_dilation(data, border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_dilation29(self): - "binary dilation 29" - struct = [[0, 1], - [1, 1]] - expected = [[0, 0, 0, 0, 0], - [0, 0, 0, 1, 0], - [0, 0, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]] - - data = numpy.array([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 1, 0], - [0, 0, 0, 0, 0]], bool) - out = ndimage.binary_dilation(data, struct, - iterations=2) - assert_array_almost_equal(out, expected) - - def test_binary_dilation30(self): - "binary dilation 30" - struct = [[0, 1], - [1, 1]] - expected = [[0, 0, 0, 0, 0], - [0, 0, 0, 1, 0], - [0, 0, 1, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0]] - - data = numpy.array([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 1, 0], - [0, 0, 0, 0, 0]], bool) - out = numpy.zeros(data.shape, bool) - ndimage.binary_dilation(data, struct, iterations=2, - output=out) - assert_array_almost_equal(out, expected) - - def test_binary_dilation31(self): - "binary dilation 31" - struct = [[0, 1], - [1, 1]] - expected = [[0, 0, 0, 1, 0], - [0, 0, 1, 1, 0], - [0, 1, 1, 1, 0], - [1, 1, 1, 1, 0], - [0, 0, 0, 0, 0]] - - data = numpy.array([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 1, 0], - [0, 0, 0, 0, 0]], bool) - out = ndimage.binary_dilation(data, struct, - iterations=3) - assert_array_almost_equal(out, expected) - - def test_binary_dilation32(self): - "binary dilation 32" - struct = [[0, 1], - [1, 1]] - expected = [[0, 0, 0, 1, 0], - [0, 0, 1, 1, 0], - [0, 1, 1, 1, 0], - [1, 1, 1, 1, 0], - [0, 0, 0, 0, 0]] - - data = numpy.array([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 1, 0], - [0, 0, 0, 0, 0]], bool) - out = numpy.zeros(data.shape, bool) - ndimage.binary_dilation(data, struct, iterations=3, - output=out) - assert_array_almost_equal(out, expected) - - def test_binary_dilation33(self): - "binary dilation 33" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0, 0], - [0, 1, 1, 0, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0, 0], - [0, 1, 1, 0, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - - out = ndimage.binary_dilation(data, struct, - iterations=-1, mask=mask, border_value=0) - assert_array_almost_equal(out, expected) - - def test_binary_dilation34(self): - "binary dilation 34" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 1, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - data = numpy.zeros(mask.shape, bool) - out = ndimage.binary_dilation(data, struct, - iterations=-1, mask=mask, border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_dilation35(self): - "binary dilation 35" - tmp = [[1, 1, 0, 0, 0, 0, 1, 1], - [1, 0, 0, 0, 1, 0, 1, 1], - [0, 0, 1, 1, 1, 1, 1, 1], - [0, 1, 1, 1, 1, 0, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1], - [0, 1, 0, 0, 1, 0, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1]] - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - mask = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - expected = numpy.logical_and(tmp, mask) - tmp = numpy.logical_and(data, numpy.logical_not(mask)) - expected = numpy.logical_or(expected, tmp) - for type in self.types: - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_dilation(data, mask=mask, - origin=(1, 1), border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_propagation01(self): - "binary propagation 1" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0, 0], - [0, 1, 1, 0, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 0, 0, 0], - [0, 1, 1, 0, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - - out = ndimage.binary_propagation(data, struct, - mask=mask, border_value=0) - assert_array_almost_equal(out, expected) - - def test_binary_propagation02(self): - "binary propagation 2" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 1, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - data = numpy.zeros(mask.shape, bool) - out = ndimage.binary_propagation(data, struct, - mask=mask, border_value=1) - assert_array_almost_equal(out, expected) - - def test_binary_opening01(self): - "binary opening 1" - expected = [[0, 1, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 1, 1, 1, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 0, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_opening(data) - assert_array_almost_equal(out, expected) - - def test_binary_opening02(self): - "binary opening 2" - struct = ndimage.generate_binary_structure(2, 2) - expected = [[1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 0, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_opening(data, struct) - assert_array_almost_equal(out, expected) - - def test_binary_closing01(self): - "binary closing 1" - expected = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 0, 1, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_closing(data) - assert_array_almost_equal(out, expected) - - def test_binary_closing02(self): - "binary closing 2" - struct = ndimage.generate_binary_structure(2, 2) - expected = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 1, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 0, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_closing(data, struct) - assert_array_almost_equal(out, expected) - - def test_binary_fill_holes01(self): - "binary fill holes 1" - expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - out = ndimage.binary_fill_holes(data) - assert_array_almost_equal(out, expected) - - def test_binary_fill_holes02(self): - "binary fill holes 2" - expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 1, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - out = ndimage.binary_fill_holes(data) - assert_array_almost_equal(out, expected) - - def test_binary_fill_holes03(self): - "binary fill holes 3" - expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 1, 1, 1, 0, 1, 1, 1], - [0, 1, 1, 1, 0, 1, 1, 1], - [0, 1, 1, 1, 0, 1, 1, 1], - [0, 0, 1, 0, 0, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 1, 0, 1, 0, 1, 1, 1], - [0, 1, 0, 1, 0, 1, 0, 1], - [0, 1, 0, 1, 0, 1, 0, 1], - [0, 0, 1, 0, 0, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 0, 0]], bool) - out = ndimage.binary_fill_holes(data) - assert_array_almost_equal(out, expected) - - def test_grey_erosion01(self): - "grey erosion 1" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - output = ndimage.grey_erosion(array, - footprint=footprint) - assert_array_almost_equal([[2, 2, 1, 1, 1], - [2, 3, 1, 3, 1], - [5, 5, 3, 3, 1]], output) - - def test_grey_erosion02(self): - "grey erosion 2" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - output = ndimage.grey_erosion(array, - footprint=footprint, structure=structure) - assert_array_almost_equal([[2, 2, 1, 1, 1], - [2, 3, 1, 3, 1], - [5, 5, 3, 3, 1]], output) - - def test_grey_erosion03(self): - "grey erosion 3" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[1, 1, 1], [1, 1, 1]] - output = ndimage.grey_erosion(array, - footprint=footprint, structure=structure) - assert_array_almost_equal([[1, 1, 0, 0, 0], - [1, 2, 0, 2, 0], - [4, 4, 2, 2, 0]], output) - - def test_grey_dilation01(self): - "grey dilation 1" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[0, 1, 1], [1, 0, 1]] - output = ndimage.grey_dilation(array, - footprint=footprint) - assert_array_almost_equal([[7, 7, 9, 9, 5], - [7, 9, 8, 9, 7], - [8, 8, 8, 7, 7]], output) - - def test_grey_dilation02(self): - "grey dilation 2" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[0, 1, 1], [1, 0, 1]] - structure = [[0, 0, 0], [0, 0, 0]] - output = ndimage.grey_dilation(array, - footprint=footprint, structure=structure) - assert_array_almost_equal([[7, 7, 9, 9, 5], - [7, 9, 8, 9, 7], - [8, 8, 8, 7, 7]], output) - - def test_grey_dilation03(self): - "grey dilation 3" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[0, 1, 1], [1, 0, 1]] - structure = [[1, 1, 1], [1, 1, 1]] - output = ndimage.grey_dilation(array, - footprint=footprint, structure=structure) - assert_array_almost_equal([[8, 8, 10, 10, 6], - [8, 10, 9, 10, 8], - [9, 9, 9, 8, 8]], output) - - def test_grey_opening01(self): - "grey opening 1" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - tmp = ndimage.grey_erosion(array, footprint=footprint) - expected = ndimage.grey_dilation(tmp, footprint=footprint) - output = ndimage.grey_opening(array, - footprint=footprint) - assert_array_almost_equal(expected, output) - - - def test_grey_opening02(self): - "grey opening 2" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp = ndimage.grey_erosion(array, footprint=footprint, - structure=structure) - expected = ndimage.grey_dilation(tmp, footprint=footprint, - structure=structure) - output = ndimage.grey_opening(array, - footprint=footprint, structure=structure) - assert_array_almost_equal(expected, output) - - def test_grey_closing01(self): - "grey closing 1" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - tmp = ndimage.grey_dilation(array, footprint=footprint) - expected = ndimage.grey_erosion(tmp, footprint=footprint) - output = ndimage.grey_closing(array, - footprint=footprint) - assert_array_almost_equal(expected, output) - - def test_grey_closing02(self): - "grey closing 2" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp = ndimage.grey_dilation(array, footprint=footprint, - structure=structure) - expected = ndimage.grey_erosion(tmp, footprint=footprint, - structure=structure) - output = ndimage.grey_closing(array, - footprint=footprint, structure=structure) - assert_array_almost_equal(expected, output) - - def test_morphological_gradient01(self): - "morphological gradient 1" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp1 = ndimage.grey_dilation(array, - footprint=footprint, structure=structure) - tmp2 = ndimage.grey_erosion(array, footprint=footprint, - structure=structure) - expected = tmp1 - tmp2 - output = numpy.zeros(array.shape, array.dtype) - ndimage.morphological_gradient(array, - footprint=footprint, structure=structure, output=output) - assert_array_almost_equal(expected, output) - - def test_morphological_gradient02(self): - "morphological gradient 2" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp1 = ndimage.grey_dilation(array, - footprint=footprint, structure=structure) - tmp2 = ndimage.grey_erosion(array, footprint=footprint, - structure=structure) - expected = tmp1 - tmp2 - output =ndimage.morphological_gradient(array, - footprint=footprint, structure=structure) - assert_array_almost_equal(expected, output) - - def test_morphological_laplace01(self): - "morphological laplace 1" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp1 = ndimage.grey_dilation(array, - footprint=footprint, structure=structure) - tmp2 = ndimage.grey_erosion(array, footprint=footprint, - structure=structure) - expected = tmp1 + tmp2 - 2 * array - output = numpy.zeros(array.shape, array.dtype) - ndimage.morphological_laplace(array, footprint=footprint, - structure=structure, output=output) - assert_array_almost_equal(expected, output) - - def test_morphological_laplace02(self): - "morphological laplace 2" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp1 = ndimage.grey_dilation(array, - footprint=footprint, structure=structure) - tmp2 = ndimage.grey_erosion(array, footprint=footprint, - structure=structure) - expected = tmp1 + tmp2 - 2 * array - output = ndimage.morphological_laplace(array, - footprint=footprint, structure=structure) - assert_array_almost_equal(expected, output) - - def test_white_tophat01(self): - "white tophat 1" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp = ndimage.grey_opening(array, footprint=footprint, - structure=structure) - expected = array - tmp - output = numpy.zeros(array.shape, array.dtype) - ndimage.white_tophat(array, footprint=footprint, - structure=structure, output=output) - assert_array_almost_equal(expected, output) - - def test_white_tophat02(self): - "white tophat 2" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp = ndimage.grey_opening(array, footprint=footprint, - structure=structure) - expected = array - tmp - output = ndimage.white_tophat(array, footprint=footprint, - structure=structure) - assert_array_almost_equal(expected, output) - - def test_black_tophat01(self): - "black tophat 1" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp = ndimage.grey_closing(array, footprint=footprint, - structure=structure) - expected = tmp - array - output = numpy.zeros(array.shape, array.dtype) - ndimage.black_tophat(array, footprint=footprint, - structure=structure, output=output) - assert_array_almost_equal(expected, output) - - def test_black_tophat02(self): - "black tophat 2" - array = numpy.array([[3, 2, 5, 1, 4], - [7, 6, 9, 3, 5], - [5, 8, 3, 7, 1]]) - footprint = [[1, 0, 1], [1, 1, 0]] - structure = [[0, 0, 0], [0, 0, 0]] - tmp = ndimage.grey_closing(array, footprint=footprint, - structure=structure) - expected = tmp - array - output = ndimage.black_tophat(array, footprint=footprint, - structure=structure) - assert_array_almost_equal(expected, output) - - def test_hit_or_miss01(self): - "binary hit-or-miss transform 1" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0], - [0, 1, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 1, 0, 0, 0], - [1, 1, 1, 0, 0], - [0, 1, 0, 1, 1], - [0, 0, 1, 1, 1], - [0, 1, 1, 1, 0], - [0, 1, 1, 1, 1], - [0, 1, 1, 1, 1], - [0, 0, 0, 0, 0]], type) - out = numpy.zeros(data.shape, bool) - ndimage.binary_hit_or_miss(data, struct, - output=out) - assert_array_almost_equal(expected, out) - - def test_hit_or_miss02(self): - "binary hit-or-miss transform 2" - struct = [[0, 1, 0], - [1, 1, 1], - [0, 1, 0]] - expected = [[0, 0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0], - [1, 1, 1, 0, 0, 1, 0, 0], - [0, 1, 0, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_hit_or_miss(data, struct) - assert_array_almost_equal(expected, out) - - def test_hit_or_miss03(self): - "binary hit-or-miss transform 3" - struct1 = [[0, 0, 0], - [1, 1, 1], - [0, 0, 0]] - struct2 = [[1, 1, 1], - [0, 0, 0], - [1, 1, 1]] - expected = [[0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]] - for type in self.types: - data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [0, 1, 0, 1, 1, 1, 1, 0], - [0, 0, 1, 1, 1, 1, 1, 0], - [0, 1, 1, 1, 0, 1, 1, 0], - [0, 0, 0, 0, 1, 1, 1, 0], - [0, 1, 1, 1, 1, 1, 1, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], type) - out = ndimage.binary_hit_or_miss(data, struct1, - struct2) - assert_array_almost_equal(expected, out) - - -#class NDImageTestResult(unittest.TestResult): -# separator1 = '=' * 70 + '\n' -# separator2 = '-' * 70 + '\n' -# -# def __init__(self, stream, verbose): -# unittest.TestResult.__init__(self) -# self.stream = stream -# self.verbose = verbose -# -# def getDescription(self, test): -# return test.shortDescription() or str(test) -# -# def startTest(self, test): -# unittest.TestResult.startTest(self, test) -# if self.verbose: -# self.stream.write(self.getDescription(test)) -# self.stream.write(" ... ") -# -# def addSuccess(self, test): -# unittest.TestResult.addSuccess(self, test) -# if self.verbose: -# self.stream.write("ok\n") -# -# def addError(self, test, err): -# unittest.TestResult.addError(self, test, err) -# if self.verbose: -# self.stream.write("ERROR\n") -# -# def addFailure(self, test, err): -# unittest.TestResult.addFailure(self, test, err) -# if self.verbose: -# self.stream.write("FAIL\n") -# -# def printErrors(self): -# self.printErrorList('ERROR', self.errors) -# self.printErrorList('FAIL', self.failures) -# -# def printErrorList(self, flavour, errors): -# for test, err in errors: -# self.stream.write(self.separator1) -# description = self.getDescription(test) -# self.stream.write("%s: %s\n" % (flavour, description)) -# self.stream.write(self.separator2) -# self.stream.write(err) -# -#def test(): -# if '-v' in sys.argv[1:]: -# verbose = 1 -# else: -# verbose = 0 -# suite = unittest.TestSuite() -# suite.addTest(unittest.makeSuite(NDImageTest)) -# result = NDImageTestResult(sys.stdout, verbose) -# suite(result) -# result.printErrors() -# return len(result.failures), result.testsRun - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/ndimage/tests/test_regression.py b/scipy-0.10.1/scipy/ndimage/tests/test_regression.py deleted file mode 100644 index 28d7553d43..0000000000 --- a/scipy-0.10.1/scipy/ndimage/tests/test_regression.py +++ /dev/null @@ -1,34 +0,0 @@ -import numpy as np -from numpy.testing import assert_array_almost_equal, run_module_suite - -import scipy.ndimage as ndimage - -def test_byte_order_median(): - """Regression test for #413: median_filter does not handle bytes orders.""" - a = np.arange(9, dtype=' thresh - rank = len(mask.shape) - la, co = ndimage.label(mask, - ndimage.generate_binary_structure(rank, rank)) - slices = ndimage.find_objects(la) - - if np.dtype(np.intp) != np.dtype('i'): - shape=(3,1240,1240) - a = np.random.rand(np.product(shape)).reshape(shape) - # shouldn't crash - SE(a) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/odr/SConscript b/scipy-0.10.1/scipy/odr/SConscript deleted file mode 100644 index 40c35abb9b..0000000000 --- a/scipy-0.10.1/scipy/odr/SConscript +++ /dev/null @@ -1,57 +0,0 @@ -# Last Change: Thu Jun 12 07:00 PM 2008 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numscons import GetNumpyEnvironment -from numscons import CheckF77BLAS, CheckF77Clib - -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckBLAS' : CheckF77BLAS}) - -if not config.CheckBLAS(): - raise RuntimeError("Could not check F/C runtime library for %s/%s, " \ - "contact the maintainer" % (env['CC'], env['F77'])) - -#-------------- -# Checking Blas -#-------------- -st = config.CheckBLAS() -if not st: - has_blas = 0 -else: - has_blas = 1 - -config.Finish() -write_info(env) - -#========== -# Build -#========== - -# odr lib -libodr_src = [pjoin('odrpack', i) for i in ['d_odr.f', 'd_mprec.f', 'dlunoc.f']] -if has_blas: - libodr_src.append(pjoin('odrpack', 'd_lpk.f')) -else: - libodr_src.append(pjoin('odrpack', 'd_lpkbls.f')) - -env.DistutilsStaticExtLibrary('odrpack', source = libodr_src) - -env.PrependUnique(LIBS = 'odrpack') -env.PrependUnique(LIBPATH = '.') - -# odr pyextension -env.NumpyPythonExtension('__odrpack', '__odrpack.c') diff --git a/scipy-0.10.1/scipy/odr/SConstruct b/scipy-0.10.1/scipy/odr/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/odr/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/odr/__init__.py b/scipy-0.10.1/scipy/odr/__init__.py deleted file mode 100644 index c90e1e2f0f..0000000000 --- a/scipy-0.10.1/scipy/odr/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -================================================= -Orthogonal distance regression (:mod:`scipy.odr`) -================================================= - -.. currentmodule:: scipy.odr - -Package Content -=============== - -.. autosummary:: - :toctree: generated/ - - odr -- Perform orthogonal distance regression - - ODR -- Gathers all info & manages the main fitting routine. - Data -- Stores the data to fit. - Model -- Stores information about the function to be fit. - Output - RealData -- Weights as actual std. dev.s and/or covariances. - - odr_error - odr_stop - -Usage information -================= - -Introduction ------------- - -Why Orthogonal Distance Regression (ODR)? Sometimes one has -measurement errors in the explanatory (a.k.a., "independent") -variable(s), not just the response (a.k.a., "dependent") variable(s). -Ordinary Least Squares (OLS) fitting procedures treat the data for -explanatory variables as fixed, i.e., not subject to error of any kind. -Furthermore, OLS procedures require that the response variables be an -explicit function of the explanatory variables; sometimes making the -equation explicit is impractical and/or introduces errors. ODR can -handle both of these cases with ease, and can even reduce to the OLS -case if that is sufficient for the problem. - -ODRPACK is a FORTRAN-77 library for performing ODR with possibly -non-linear fitting functions. It uses a modified trust-region -Levenberg-Marquardt-type algorithm [1]_ to estimate the function -parameters. The fitting functions are provided by Python functions -operating on NumPy arrays. The required derivatives may be provided -by Python functions as well, or may be estimated numerically. ODRPACK -can do explicit or implicit ODR fits, or it can do OLS. Input and -output variables may be multi-dimensional. Weights can be provided to -account for different variances of the observations, and even -covariances between dimensions of the variables. - -odr provides two interfaces: a single function, and a set of -high-level classes that wrap that function; please refer to their -docstrings for more information. While the docstring of the function -odr does not have a full explanation of its arguments, the classes do, -and arguments of the same name usually have the same requirements. -Furthermore, the user is urged to at least skim the `ODRPACK User's -Guide `_ - -"Know Thy Algorithm." - -Use ---- - -See the docstrings of `odr.odrpack` and the functions and classes for -usage instructions. The ODRPACK User's Guide (linked above) is also -quite helpful. - -References ----------- -.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression," - in "Statistical analysis of measurement error models and - applications: proceedings of the AMS-IMS-SIAM joint summer research - conference held June 10-16, 1989," Contemporary Mathematics, - vol. 112, pg. 186, 1990. - -""" -# version: 0.7 -# author: Robert Kern -# date: 2006-09-21 - -from odrpack import * -from models import * - -__all__ = filter(lambda s: not s.startswith('_'), dir()) - -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/odr/__odrpack.c b/scipy-0.10.1/scipy/odr/__odrpack.c deleted file mode 100644 index 0bf835ec05..0000000000 --- a/scipy-0.10.1/scipy/odr/__odrpack.c +++ /dev/null @@ -1,1401 +0,0 @@ -/* Anti-Copyright - * - * I hereby release this code into the PUBLIC DOMAIN AS IS. There is no - * support, warranty, or guarantee. I will gladly accept comments, bug - * reports, and patches, however. - * - * Robert Kern - * kern@caltech.edu - * - */ - -#include "odrpack.h" - - -void F_FUNC(dodrc,DODRC)(void (*fcn)(int *n, int *m, int *np, int *nq, int *ldn, int *ldm, - int *ldnp, double *beta, double *xplusd, int *ifixb, int *ifixx, - int *ldifx, int *ideval, double *f, double *fjacb, double *fjacd, - int *istop), - int *n, int *m, int *np, int *nq, double *beta, double *y, int *ldy, - double *x, int *ldx, double *we, int *ldwe, int *ld2we, double *wd, - int *ldwd, int *ld2wd, int *ifixb, int *ifixx, int *ldifx, int *job, - int *ndigit, double *taufac, double *sstol, double *partol, - int *maxit, int *iprint, int *lunerr, int *lunrpt, double *stpb, - double *stpd, int *ldstpd, double *sclb, double *scld, int *ldscld, - double *work, int *lwork, int *iwork, int *liwork, int *info); -void F_FUNC(dwinf,DWINF)(int *n, int *m, int *np, int *nq, int *ldwe, int *ld2we, int *isodr, - int *delta, int *eps, int *xplus, int *fn, int *sd, int *vcv, int *rvar, - int *wss, int *wssde, int *wssep, int *rcond, int *eta, int *olmav, - int *tau, int *alpha, int *actrs, int *pnorm, int *rnors, int *prers, - int *partl, int *sstol, int *taufc, int *apsma, int *betao, int *betac, - int *betas, int *betan, int *s, int *ss, int *ssf, int *qraux, int *u, - int *fs, int *fjacb, int *we1, int *diff, int *delts, int *deltn, - int *t, int *tt, int *omega, int *fjacd, int *wrk1, int *wrk2, - int *wrk3, int *wrk4, int *wrk5, int *wrk6, int *wrk7, int *lwkmn); -void F_FUNC(dluno,DLUNO)(int *lun, char *fn, int fnlen); -void F_FUNC(dlunc,DLUNC)(int *lun); - - - -/* callback to pass to DODRC; calls the Python function in the global structure |odr_global| */ -void fcn_callback(int *n, int *m, int *np, int *nq, int *ldn, int *ldm, - int *ldnp, double *beta, double *xplusd, int *ifixb, - int *ifixx, int *ldfix, int *ideval, double *f, - double *fjacb, double *fjacd, int *istop) -{ - PyObject *arg01, *arglist; - PyObject *result; - PyArrayObject *result_array = NULL; - PyArrayObject *pyXplusD; - - arg01 = PyTuple_New(2); - - if (*m != 1) - { - npy_intp dim2[2]; - dim2[0] = *m; - dim2[1] = *n; - pyXplusD = (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_DOUBLE); - memcpy(pyXplusD->data, (void *)xplusd, (*m) * (*n) * sizeof(double)); - } - else - { - npy_intp dim1[1]; - dim1[0] = *n; - pyXplusD = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - memcpy(pyXplusD->data, (void *)xplusd, (*n) * sizeof(double)); - } - - PyTuple_SetItem(arg01, 0, odr_global.pyBeta); - Py_INCREF(odr_global.pyBeta); - PyTuple_SetItem(arg01, 1, (PyObject *) pyXplusD); - Py_INCREF((PyObject *) pyXplusD); - - if (odr_global.extra_args != NULL) - { - arglist = PySequence_Concat(arg01, odr_global.extra_args); - } - else - { - arglist = PySequence_Tuple(arg01); /* make a copy */ - } - - Py_DECREF(arg01); - *istop = 0; - - memcpy(((PyArrayObject *) (odr_global.pyBeta))->data, (void *)beta, - (*np) * sizeof(double)); - - if ((*ideval % 10) >= 1) - { - /* compute f with odr_global.fcn */ - - if (odr_global.fcn == NULL) - { - /* we don't have a function to call */ - PYERR2(odr_error, "Function has not been initialized"); - } - - if ((result = PyEval_CallObject(odr_global.fcn, arglist)) == NULL) - { - PyObject *tmpobj, *str1; - - if (PyErr_ExceptionMatches(odr_stop)) - { - /* stop, don't fail */ - *istop = 1; - - Py_DECREF(arglist); - return; - } - - PyErr_Print(); - tmpobj = PyObject_GetAttrString(odr_global.fcn, "func_name"); - if (tmpobj == NULL) - goto fail; - - str1 = - PyString_FromString - ("Error occurred while calling the Python function named "); - if (str1 == NULL) - { - Py_DECREF(tmpobj); - goto fail; - } - PyString_ConcatAndDel(&str1, tmpobj); - PyErr_SetString(odr_error, PyString_AsString(str1)); - Py_DECREF(str1); - goto fail; - } - - if ((result_array = - (PyArrayObject *) PyArray_ContiguousFromObject(result, - PyArray_DOUBLE, 0, - 2)) == NULL) - { - PYERR2(odr_error, - "Result from function call is not a proper array of floats."); - } - - memcpy((void *)f, result_array->data, (*n) * (*nq) * sizeof(double)); - Py_DECREF(result_array); - } - - if (((*ideval) / 10) % 10 >= 1) - { - /* compute fjacb with odr_global.fjacb */ - - if (odr_global.fjacb == NULL) - { - /* we don't have a function to call */ - PYERR2(odr_error, "Function has not been initialized"); - } - - if ((result = PyEval_CallObject(odr_global.fjacb, arglist)) == NULL) - { - PyObject *tmpobj, *str1; - - if (PyErr_ExceptionMatches(odr_stop)) - { - /* stop, don't fail */ - *istop = 1; - - Py_DECREF(arglist); - return; - } - - PyErr_Print(); - tmpobj = PyObject_GetAttrString(odr_global.fjacb, "func_name"); - if (tmpobj == NULL) - goto fail; - - str1 = - PyString_FromString - ("Error occurred while calling the Python function named "); - if (str1 == NULL) - { - Py_DECREF(tmpobj); - goto fail; - } - PyString_ConcatAndDel(&str1, tmpobj); - PyErr_SetString(odr_error, PyString_AsString(str1)); - Py_DECREF(str1); - goto fail; - } - - if ((result_array = - (PyArrayObject *) PyArray_ContiguousFromObject(result, - PyArray_DOUBLE, 0, - 2)) == NULL) - { - PYERR2(odr_error, - "Result from function call is not a proper array of floats."); - } - - if (*nq != 1 && *np != 1) - { - /* result_array should be rank-3 */ - - if (result_array->nd != 3) - { - Py_DECREF(result_array); - PYERR2(odr_error, "Beta Jacobian is not rank-3"); - } - } - else if (*nq == 1) - { - /* result_array should be rank-2 */ - - if (result_array->nd != 2) - { - Py_DECREF(result_array); - PYERR2(odr_error, "Beta Jacobian is not rank-2"); - } - } - - memcpy((void *)fjacb, result_array->data, - (*n) * (*nq) * (*np) * sizeof(double)); - Py_DECREF(result_array); - - } - - if (((*ideval) / 100) % 10 >= 1) - { - /* compute fjacd with odr_global.fjacd */ - - if (odr_global.fjacd == NULL) - { - /* we don't have a function to call */ - PYERR2(odr_error, "fjcad has not been initialized"); - } - - if ((result = PyEval_CallObject(odr_global.fjacd, arglist)) == NULL) - { - PyObject *tmpobj, *str1; - - if (PyErr_ExceptionMatches(odr_stop)) - { - /* stop, don't fail */ - *istop = 1; - - Py_DECREF(arglist); - return; - } - - PyErr_Print(); - tmpobj = PyObject_GetAttrString(odr_global.fjacd, "func_name"); - if (tmpobj == NULL) - goto fail; - - str1 = - PyString_FromString - ("Error occurred while calling the Python function named "); - if (str1 == NULL) - { - Py_DECREF(tmpobj); - goto fail; - } - PyString_ConcatAndDel(&str1, tmpobj); - PyErr_SetString(odr_error, PyString_AsString(str1)); - Py_DECREF(str1); - goto fail; - } - - if ((result_array = - (PyArrayObject *) PyArray_ContiguousFromObject(result, - PyArray_DOUBLE, 0, - 2)) == NULL) - { - PYERR2(odr_error, - "Result from function call is not a proper array of floats."); - } - - if (*nq != 1 && *m != 1) - { - /* result_array should be rank-3 */ - - if (result_array->nd != 3) - { - Py_DECREF(result_array); - PYERR2(odr_error, "xplusd Jacobian is not rank-3"); - } - } - else if (*nq == 1 && *m != 1) - { - /* result_array should be rank-2 */ - - if (result_array->nd != 2) - { - Py_DECREF(result_array); - PYERR2(odr_error, "xplusd Jacobian is not rank-2"); - } - } - else if (*nq == 1 && *m == 1) - { - /* result_array should be rank-1 */ - - if (result_array->nd != 1) - { - Py_DECREF(result_array); - PYERR2(odr_error, "xplusd Jacobian is not rank-1"); - } - } - - memcpy((void *)fjacd, result_array->data, - (*n) * (*nq) * (*m) * sizeof(double)); - Py_DECREF(result_array); - } - - Py_DECREF(result); - Py_DECREF(arglist); - Py_DECREF(pyXplusD); - - return; - -fail: - Py_XDECREF(result); - Py_XDECREF(arglist); - Py_XDECREF(pyXplusD); - *istop = -1; - return; -} - - -/* generates Python output from the raw output from DODRC */ -PyObject *gen_output(int n, int m, int np, int nq, int ldwe, int ld2we, - PyArrayObject * beta, PyArrayObject * work, - PyArrayObject * iwork, int isodr, int info, - int full_output) -{ - PyArrayObject *sd_beta, *cov_beta; - - int delta, eps, xplus, fn, sd, vcv, rvar, wss, wssde, wssep, rcond; - int eta, olmav, tau, alpha, actrs, pnorm, rnors, prers, partl, sstol; - int taufc, apsma, betao, betac, betas, betan, s, ss, ssf, qraux, u; - int fs, fjacb, we1, diff, delts, deltn, t, tt, omega, fjacd; - int wrk1, wrk2, wrk3, wrk4, wrk5, wrk6, wrk7, lwkmn; - - PyObject *retobj; - - npy_intp dim1[1], dim2[2]; - - if (info == 50005) { - /* fatal error in fcn call; return NULL to propogate the exception */ - - return NULL; - } - - lwkmn = work->dimensions[0]; - - F_FUNC(dwinf,DWINF)(&n, &m, &np, &nq, &ldwe, &ld2we, &isodr, - &delta, &eps, &xplus, &fn, &sd, &vcv, &rvar, &wss, &wssde, - &wssep, &rcond, &eta, &olmav, &tau, &alpha, &actrs, &pnorm, - &rnors, &prers, &partl, &sstol, &taufc, &apsma, &betao, &betac, - &betas, &betan, &s, &ss, &ssf, &qraux, &u, &fs, &fjacb, &we1, - &diff, &delts, &deltn, &t, &tt, &omega, &fjacd, &wrk1, &wrk2, - &wrk3, &wrk4, &wrk5, &wrk6, &wrk7, &lwkmn); - - /* convert FORTRAN indices to C indices */ - delta--; - eps--; - xplus--; - fn--; - sd--; - vcv--; - rvar--; - wss--; - wssde--; - wssep--; - rcond--; - eta--; - olmav--; - tau--; - alpha--; - actrs--; - pnorm--; - rnors--; - prers--; - partl--; - sstol--; - taufc--; - apsma--; - betao--; - betac--; - betas--; - betan--; - s--; - ss--; - ssf--; - qraux--; - u--; - fs--; - fjacb--; - we1--; - diff--; - delts--; - deltn--; - t--; - tt--; - omega--; - fjacd--; - wrk1--; - wrk2--; - wrk3--; - wrk4--; - wrk5--; - wrk6--; - wrk7--; - - dim1[0] = beta->dimensions[0]; - sd_beta = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - dim2[0] = beta->dimensions[0]; - dim2[1] = beta->dimensions[0]; - cov_beta = (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_DOUBLE); - - memcpy(sd_beta->data, (void *)((double *)(work->data) + sd), - np * sizeof(double)); - memcpy(cov_beta->data, (void *)((double *)(work->data) + vcv), - np * np * sizeof(double)); - - if (!full_output) - { - retobj = Py_BuildValue("OOO", PyArray_Return(beta), - PyArray_Return(sd_beta), - PyArray_Return(cov_beta)); - Py_DECREF((PyObject *) sd_beta); - Py_DECREF((PyObject *) cov_beta); - - return retobj; - } - else - { - PyArrayObject *deltaA, *epsA, *xplusA, *fnA; - double res_var, sum_square, sum_square_delta, sum_square_eps; - double inv_condnum, rel_error; - PyObject *work_ind; - - work_ind = - Py_BuildValue - ("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i}", - "delta", delta, "eps", eps, "xplus", xplus, "fn", fn, "sd", sd, "sd", - vcv, "rvar", rvar, "wss", wss, "wssde", wssde, "wssep", wssep, - "rcond", rcond, "eta", eta, "olmav", olmav, "tau", tau, "alpha", - alpha, "actrs", actrs, "pnorm", pnorm, "rnors", rnors, "prers", - prers, "partl", partl, "sstol", sstol, "taufc", taufc, "apsma", - apsma, "betao", betao, "betac", betac, "betas", betas, "betan", - betan, "s", s, "ss", ss, "ssf", ssf, "qraux", qraux, "u", u, "fs", - fs, "fjacb", fjacb, "we1", we1, "diff", diff, "delts", delts, - "deltn", deltn, "t", t, "tt", tt, "omega", omega, "fjacd", fjacd, - "wrk1", wrk1, "wrk2", wrk2, "wrk3", wrk3, "wrk4", wrk4, "wrk5", wrk5, - "wrk6", wrk6, "wrk7", wrk7); - - if (m == 1) - { - dim1[0] = n; - deltaA = - (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - xplusA = - (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - } - else - { - dim2[0] = m; - dim2[1] = n; - deltaA = - (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_DOUBLE); - xplusA = - (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_DOUBLE); - } - - if (nq == 1) - { - dim1[0] = n; - epsA = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - fnA = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - } - else - { - dim2[0] = nq; - dim2[1] = n; - epsA = (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_DOUBLE); - fnA = (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_DOUBLE); - } - - memcpy(deltaA->data, (void *)((double *)(work->data) + delta), - m * n * sizeof(double)); - memcpy(epsA->data, (void *)((double *)(work->data) + eps), - nq * n * sizeof(double)); - memcpy(xplusA->data, (void *)((double *)(work->data) + xplus), - m * n * sizeof(double)); - memcpy(fnA->data, (void *)((double *)(work->data) + fn), - nq * n * sizeof(double)); - - res_var = *((double *)(work->data) + rvar); - sum_square = *((double *)(work->data) + wss); - sum_square_delta = *((double *)(work->data) + wssde); - sum_square_eps = *((double *)(work->data) + wssep); - inv_condnum = *((double *)(work->data) + rcond); - rel_error = *((double *)(work->data) + eta); - - retobj = - Py_BuildValue - ("OOO{s:O,s:O,s:O,s:O,s:d,s:d,s:d,s:d,s:d,s:d,s:O,s:O,s:O,s:i}", - PyArray_Return(beta), PyArray_Return(sd_beta), - PyArray_Return(cov_beta), "delta", PyArray_Return(deltaA), "eps", - PyArray_Return(epsA), "xplus", PyArray_Return(xplusA), "y", - PyArray_Return(fnA), "res_var", res_var, "sum_square", sum_square, - "sum_square_delta", sum_square_delta, "sum_square_eps", - sum_square_eps, "inv_condnum", inv_condnum, "rel_error", rel_error, - "work", PyArray_Return(work), "work_ind", work_ind, "iwork", - PyArray_Return(iwork), "info", info); - Py_DECREF((PyObject *) sd_beta); - Py_DECREF((PyObject *) cov_beta); - Py_DECREF((PyObject *) deltaA); - Py_DECREF((PyObject *) epsA); - Py_DECREF((PyObject *) xplusA); - Py_DECREF((PyObject *) fnA); - Py_DECREF((PyObject *) work_ind); - - return retobj; - } -} - -PyObject *odr(PyObject * self, PyObject * args, PyObject * kwds) -{ - PyObject *fcn, *initbeta, *py, *px, *pwe = NULL, *pwd = NULL, *fjacb = NULL; - PyObject *fjacd = NULL, *pifixb = NULL, *pifixx = NULL; - PyObject *pstpb = NULL, *pstpd = NULL, *psclb = NULL, *pscld = NULL; - PyObject *pwork = NULL, *piwork = NULL, *extra_args = NULL; - int job = 0, ndigit = 0, maxit = -1, iprint = 0; - int full_output = 0; - double taufac = 0.0, sstol = -1.0, partol = -1.0; - char *errfile = NULL, *rptfile = NULL; - int lerrfile = 0, lrptfile = 0; - PyArrayObject *beta = NULL, *y = NULL, *x = NULL, *we = NULL, *wd = NULL; - PyArrayObject *ifixb = NULL, *ifixx = NULL; - PyArrayObject *stpb = NULL, *stpd = NULL, *sclb = NULL, *scld = NULL; - PyArrayObject *work = NULL, *iwork = NULL; - int n, m, np, nq, ldy, ldx, ldwe, ld2we, ldwd, ld2wd, ldifx; - int lunerr = -1, lunrpt = -1, ldstpd, ldscld, lwork, liwork, info = 0; - static char *kw_list[] = { "fcn", "initbeta", "y", "x", "we", "wd", "fjacb", - "fjacd", "extra_args", "ifixb", "ifixx", "job", "iprint", "errfile", - "rptfile", "ndigit", "taufac", "sstol", "partol", - "maxit", "stpb", "stpd", "sclb", "scld", "work", - "iwork", "full_output", NULL - }; - int isodr = 1; - PyObject *result; - npy_intp dim1[1], dim2[2], dim3[3]; - int implicit; /* flag for implicit model */ - - - if (kwds == NULL) - { - if (!PyArg_ParseTuple(args, "OOOO|OOOOOOOiiz#z#idddiOOOOOOi:odr", - &fcn, &initbeta, &py, &px, &pwe, &pwd, - &fjacb, &fjacd, &extra_args, &pifixb, &pifixx, - &job, &iprint, &errfile, &lerrfile, &rptfile, - &lrptfile, &ndigit, &taufac, &sstol, &partol, - &maxit, &pstpb, &pstpd, &psclb, &pscld, &pwork, - &piwork, &full_output)) - { - return NULL; - } - } - else - { - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "OOOO|OOOOOOOiiz#z#idddiOOOOOOi:odr", - kw_list, &fcn, &initbeta, &py, &px, - &pwe, &pwd, &fjacb, &fjacd, - &extra_args, &pifixb, &pifixx, &job, - &iprint, &errfile, &lerrfile, &rptfile, - &lrptfile, &ndigit, &taufac, &sstol, - &partol, &maxit, &pstpb, &pstpd, - &psclb, &pscld, &pwork, &piwork, - &full_output)) - { - return NULL; - } - } - - /* Check the validity of all arguments */ - - if (!PyCallable_Check(fcn)) - { - PYERR(PyExc_TypeError, "fcn must be callable"); - } - if (!PySequence_Check(initbeta)) - { - PYERR(PyExc_TypeError, "initbeta must be a sequence"); - } - if (!PySequence_Check(py)) - { - /* Checking whether py is an int - * - * XXX: PyInt_Check for np.int32 instances does not work on python 2.6 - - * we should fix this in numpy, workaround by trying to cast to an int - * for now */ - long val; - - PyErr_Clear(); - val = PyInt_AsLong(py); - if (val == -1 && PyErr_Occurred()) { - PYERR(PyExc_TypeError, - "y must be a sequence or integer (if model is implicit)"); - } - } - if (!PySequence_Check(px)) - { - PYERR(PyExc_TypeError, "x must be a sequence"); - } - if (pwe != NULL && !PySequence_Check(pwe) && !PyNumber_Check(pwe)) - { - PYERR(PyExc_TypeError, "we must be a sequence or a number"); - } - if (pwd != NULL && !PySequence_Check(pwd) && !PyNumber_Check(pwd)) - { - PYERR(PyExc_TypeError, "wd must be a sequence or a number"); - } - if (fjacb != NULL && !PyCallable_Check(fjacb)) - { - PYERR(PyExc_TypeError, "fjacb must be callable"); - } - if (fjacd != NULL && !PyCallable_Check(fjacd)) - { - PYERR(PyExc_TypeError, "fjacd must be callable"); - } - if (extra_args != NULL && !PySequence_Check(extra_args)) - { - PYERR(PyExc_TypeError, "extra_args must be a sequence"); - } - if (pifixx != NULL && !PySequence_Check(pifixx)) - { - PYERR(PyExc_TypeError, "ifixx must be a sequence"); - } - if (pifixb != NULL && !PySequence_Check(pifixb)) - { - PYERR(PyExc_TypeError, "ifixb must be a sequence"); - } - if (pstpb != NULL && !PySequence_Check(pstpb)) - { - PYERR(PyExc_TypeError, "stpb must be a sequence"); - } - if (pstpd != NULL && !PySequence_Check(pstpd)) - { - PYERR(PyExc_TypeError, "stpd must be a sequence"); - } - if (psclb != NULL && !PySequence_Check(psclb)) - { - PYERR(PyExc_TypeError, "sclb must be a sequence"); - } - if (pscld != NULL && !PySequence_Check(pscld)) - { - PYERR(PyExc_TypeError, "scld must be a sequence"); - } - if (pwork != NULL && !PyArray_Check(pwork)) - { - PYERR(PyExc_TypeError, "work must be an array"); - } - if (piwork != NULL && !PyArray_Check(piwork)) - { - PYERR(PyExc_TypeError, "iwork must be an array"); - } - - /* start processing the arguments and check for errors on the way */ - - /* check for implicit model */ - - implicit = (job % 10 == 1); - - if (!implicit) - { - if ((y = - (PyArrayObject *) PyArray_CopyFromObject(py, PyArray_DOUBLE, 1, - 2)) == NULL) - { - PYERR(PyExc_ValueError, - "y could not be made into a suitable array"); - } - n = y->dimensions[y->nd - 1]; /* pick the last dimension */ - if ((x = - (PyArrayObject *) PyArray_CopyFromObject(px, PyArray_DOUBLE, 1, - 2)) == NULL) - { - PYERR(PyExc_ValueError, - "x could not be made into a suitable array"); - } - if (n != x->dimensions[x->nd - 1]) - { - PYERR(PyExc_ValueError, - "x and y don't have matching numbers of observations"); - } - if (y->nd == 1) - { - nq = 1; - } - else - { - nq = y->dimensions[0]; - } - - ldx = ldy = n; - } - else - { /* we *do* have an implicit model */ - ldy = 1; - nq = (int)PyInt_AsLong(py); - dim1[0] = 1; - - /* initialize y to a dummy array; never referenced */ - y = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - - if ((x = - (PyArrayObject *) PyArray_CopyFromObject(px, PyArray_DOUBLE, 1, - 2)) == NULL) - { - PYERR(PyExc_ValueError, - "x could not be made into a suitable array"); - } - - n = x->dimensions[x->nd - 1]; - ldx = n; - } - - if (x->nd == 1) - { - m = 1; - } - else - { - m = x->dimensions[0]; - } /* x, y */ - - if ((beta = - (PyArrayObject *) PyArray_CopyFromObject(initbeta, PyArray_DOUBLE, 1, - 1)) == NULL) - { - PYERR(PyExc_ValueError, - "initbeta could not be made into a suitable array"); - } - np = beta->dimensions[0]; - - if (pwe == NULL) - { - ldwe = ld2we = 1; - dim1[0] = n; - we = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - ((double *)(we->data))[0] = -1.0; - } - else if (PyNumber_Check(pwe) && !PyArray_Check(pwe)) - { - /* we is a single weight, set the first value of we to -pwe */ - PyObject *tmp; - double val; - - tmp = PyNumber_Float(pwe); - if (tmp == NULL) - PYERR(PyExc_ValueError, "could not convert we to a suitable array"); - val = PyFloat_AsDouble(tmp); - Py_DECREF(tmp); - - dim3[0] = nq; - dim3[1] = 1; - dim3[2] = 1; - we = (PyArrayObject *) PyArray_SimpleNew(3, dim3, PyArray_DOUBLE); - if (implicit) - { - ((double *)(we->data))[0] = val; - } - else - { - ((double *)(we->data))[0] = -val; - } - ldwe = ld2we = 1; - } - else if (PySequence_Check(pwe)) - { - /* we needs to be turned into an array */ - - if ((we = - (PyArrayObject *) PyArray_CopyFromObject(pwe, PyArray_DOUBLE, 1, - 3)) == NULL) - { - PYERR(PyExc_ValueError, "could not convert we to a suitable array"); - } - - if (we->nd == 1 && nq == 1) - { - - ldwe = n; - ld2we = 1; - } - else if (we->nd == 1 && we->dimensions[0] == nq) - { - /* we is a rank-1 array with diagonal weightings to be broadcast - * to all observations */ - ldwe = 1; - ld2we = 1; - } - else if (we->nd == 3 && we->dimensions[0] == nq - && we->dimensions[1] == nq && we->dimensions[2] == 1) - { - /* we is a rank-3 array with the covariant weightings - to be broadcast to all observations */ - ldwe = 1; - ld2we = nq; - } - else if (we->nd == 2 && we->dimensions[0] == nq - && we->dimensions[1] == nq) - { - /* we is a rank-2 array with the full covariant weightings - to be broadcast to all observations */ - ldwe = 1; - ld2we = nq; - } - - else if (we->nd == 2 && we->dimensions[0] == nq - && we->dimensions[1] == n) - { - /* we is a rank-2 array with the diagonal elements of the - covariant weightings for each observation */ - ldwe = n; - ld2we = 1; - } - else if (we->nd == 3 && we->dimensions[0] == nq - && we->dimensions[1] == nq && we->dimensions[2] == n) - { - /* we is the full specification of the covariant weights - for each observation */ - ldwe = n; - ld2we = nq; - } - else - { - PYERR(PyExc_ValueError, "could not convert we to a suitable array"); - } - } /* we */ - - if (pwd == NULL) - { - ldwd = ld2wd = 1; - - dim1[0] = m; - wd = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - ((double *)(wd->data))[0] = -1.0; - } - else if (PyNumber_Check(pwd) && !PyArray_Check(pwd)) - { - /* wd is a single weight, set the first value of wd to -pwd */ - PyObject *tmp; - double val; - - tmp = PyNumber_Float(pwd); - if (tmp == NULL) - PYERR(PyExc_ValueError, "could not convert wd to a suitable array"); - val = PyFloat_AsDouble(tmp); - Py_DECREF(tmp); - - dim3[0] = 1; - dim3[1] = 1; - dim3[2] = m; - wd = (PyArrayObject *) PyArray_SimpleNew(3, dim3, PyArray_DOUBLE); - ((double *)(wd->data))[0] = -val; - ldwd = ld2wd = 1; - } - else if (PySequence_Check(pwd)) - { - /* wd needs to be turned into an array */ - - if ((wd = - (PyArrayObject *) PyArray_CopyFromObject(pwd, PyArray_DOUBLE, 1, - 3)) == NULL) - { - PYERR(PyExc_ValueError, "could not convert wd to a suitable array"); - } - - if (wd->nd == 1 && m == 1) - { - ldwd = n; - ld2wd = 1; - } - else if (wd->nd == 1 && wd->dimensions[0] == m) - { - /* wd is a rank-1 array with diagonal weightings to be broadcast - * to all observations */ - ldwd = 1; - ld2wd = 1; - } - - else if (wd->nd == 3 && wd->dimensions[0] == m - && wd->dimensions[1] == m && wd->dimensions[2] == 1) - { - /* wd is a rank-3 array with the covariant wdightings - to be broadcast to all observations */ - ldwd = 1; - ld2wd = m; - } - else if (wd->nd == 2 && wd->dimensions[0] == m - && wd->dimensions[1] == m) - { - /* wd is a rank-2 array with the full covariant weightings - to be broadcast to all observations */ - ldwd = 1; - ld2wd = m; - } - - else if (wd->nd == 2 && wd->dimensions[0] == m - && wd->dimensions[1] == n) - { - /* wd is a rank-2 array with the diagonal elements of the - covariant weightings for each observation */ - ldwd = n; - ld2wd = 1; - } - else if (wd->nd == 3 && wd->dimensions[0] == m - && wd->dimensions[1] == m && wd->dimensions[2] == n) - { - /* wd is the full specification of the covariant weights - for each observation */ - ldwd = n; - ld2wd = m; - } - else - { - PYERR(PyExc_ValueError, "could not convert wd to a suitable array"); - } - - } /* wd */ - - - if (pifixb == NULL) - { - dim1[0] = np; - ifixb = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_INT); - *(int *)(ifixb->data) = -1; /* set first element negative */ - } - else - { - /* pifixb is a sequence as checked before */ - - if ((ifixb = - (PyArrayObject *) PyArray_CopyFromObject(pifixb, PyArray_INT, 1, - 1)) == NULL) - { - PYERR(PyExc_ValueError, - "could not convert ifixb to a suitable array"); - } - - if (ifixb->dimensions[0] != np) - { - PYERR(PyExc_ValueError, - "could not convert ifixb to a suitable array"); - } - } /* ifixb */ - - if (pifixx == NULL) - { - dim2[0] = m; - dim2[1] = 1; - ifixx = (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_INT); - *(int *)(ifixx->data) = -1; /* set first element negative */ - ldifx = 1; - } - else - { - /* pifixx is a sequence as checked before */ - - if ((ifixx = - (PyArrayObject *) PyArray_CopyFromObject(pifixx, PyArray_INT, 1, - 2)) == NULL) - { - PYERR(PyExc_ValueError, - "could not convert ifixx to a suitable array"); - } - - if (ifixx->nd == 1 && ifixx->dimensions[0] == m) - { - ldifx = 1; - } - else if (ifixx->nd == 1 && ifixx->dimensions[0] == n && m == 1) - { - ldifx = n; - } - else if (ifixx->nd == 2 && ifixx->dimensions[0] == m - && ifixx->dimensions[1] == n) - { - ldifx = n; - } - else - { - PYERR(PyExc_ValueError, - "could not convert ifixx to a suitable array"); - } - } /* ifixx */ - - if (errfile != NULL) - { - /* call FORTRAN's OPEN to open the file with a logical unit of 18 */ - lunerr = 18; - F_FUNC(dluno,DLUNO)(&lunerr, errfile, lerrfile); - } - - if (rptfile != NULL) - { - /* call FORTRAN's OPEN to open the file with a logical unit of 19 */ - lunrpt = 19; - F_FUNC(dluno,DLUNO)(&lunrpt, rptfile, lrptfile); - } - - if (pstpb == NULL) - { - dim1[0] = np; - stpb = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - *(double *)(stpb->data) = 0.0; - } - else /* pstpb is a sequence */ - { - if ((stpb = - (PyArrayObject *) PyArray_CopyFromObject(pstpb, PyArray_DOUBLE, 1, - 1)) == NULL - || stpb->dimensions[0] != np) - { - PYERR(PyExc_ValueError, - "could not convert stpb to a suitable array"); - } - } /* stpb */ - - if (pstpd == NULL) - { - dim2[0] = 1; - dim2[1] = m; - stpd = (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_DOUBLE); - *(double *)(stpd->data) = 0.0; - ldstpd = 1; - } - else - { - if ((stpd = - (PyArrayObject *) PyArray_CopyFromObject(pstpd, PyArray_DOUBLE, 1, - 2)) == NULL) - { - PYERR(PyExc_ValueError, - "could not convert stpb to a suitable array"); - } - - if (stpd->nd == 1 && stpd->dimensions[0] == m) - { - ldstpd = 1; - } - else if (stpd->nd == 1 && stpd->dimensions[0] == n && m == 1) - { - ldstpd = n; - } - else if (stpd->nd == 2 && stpd->dimensions[0] == n && - stpd->dimensions[1] == m) - { - ldstpd = n; - } - } /* stpd */ - - if (psclb == NULL) - { - dim1[0] = np; - sclb = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - *(double *)(sclb->data) = 0.0; - } - else /* psclb is a sequence */ - { - if ((sclb = - (PyArrayObject *) PyArray_CopyFromObject(psclb, PyArray_DOUBLE, 1, - 1)) == NULL - || sclb->dimensions[0] != np) - { - PYERR(PyExc_ValueError, - "could not convert sclb to a suitable array"); - } - } /* sclb */ - - if (pscld == NULL) - { - dim2[0] = 1; - dim2[1] = n; - scld = (PyArrayObject *) PyArray_SimpleNew(2, dim2, PyArray_DOUBLE); - *(double *)(scld->data) = 0.0; - ldscld = 1; - } - else - { - if ((scld = - (PyArrayObject *) PyArray_CopyFromObject(pscld, PyArray_DOUBLE, 1, - 2)) == NULL) - { - PYERR(PyExc_ValueError, - "could not convert stpb to a suitable array"); - } - - if (scld->nd == 1 && scld->dimensions[0] == m) - { - ldscld = 1; - } - else if (scld->nd == 1 && scld->dimensions[0] == n && m == 1) - { - ldscld = n; - } - else if (scld->nd == 2 && scld->dimensions[0] == n && - scld->dimensions[1] == m) - { - ldscld = n; - } - } /* scld */ - - if (job % 10 < 2) - { - /* ODR, not OLS */ - - lwork = - 18 + 11 * np + np * np + m + m * m + 4 * n * nq + 6 * n * m + - 2 * n * nq * np + 2 * n * nq * m + nq * nq + 5 * nq + nq * (np + m) + - ldwe * ld2we * nq; - - isodr = 1; - } - else - { - /* OLS, not ODR */ - - lwork = - 18 + 11 * np + np * np + m + m * m + 4 * n * nq + 2 * n * m + - 2 * n * nq * np + 5 * nq + nq * (np + m) + ldwe * ld2we * nq; - - isodr = 0; - } - - liwork = 20 + np + nq * (np + m); - - if ((job / 10000) % 10 >= 1) - { - /* fit is a restart, make sure work and iwork are input */ - - if (pwork == NULL || piwork == NULL) - { - PYERR(PyExc_ValueError, - "need to input work and iwork arrays to restart"); - } - } - - if ((job / 1000) % 10 >= 1) - { - /* delta should be supplied, make sure the user does */ - - if (pwork == NULL) - { - PYERR(PyExc_ValueError, - "need to input work array for delta initialization"); - } - } - - if (pwork != NULL) - { - if ((work = - (PyArrayObject *) PyArray_CopyFromObject(pwork, PyArray_DOUBLE, 1, - 1)) == NULL) - { - PYERR(PyExc_ValueError, - "could not convert work to a suitable array"); - } - if (work->dimensions[0] < lwork) - { - printf("%d %d\n", work->dimensions[0], lwork); - PYERR(PyExc_ValueError, "work is too small"); - } - } - else - { - /* initialize our own work array */ - dim1[0] = lwork; - work = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_DOUBLE); - } /* work */ - - if (piwork != NULL) - { - if ((iwork = - (PyArrayObject *) PyArray_CopyFromObject(piwork, PyArray_INT, 1, - 1)) == NULL) - { - PYERR(PyExc_ValueError, - "could not convert iwork to a suitable array"); - } - - if (iwork->dimensions[0] < liwork) - { - PYERR(PyExc_ValueError, "iwork is too small"); - } - } - else - { - /* initialize our own iwork array */ - dim1[0] = liwork; - iwork = (PyArrayObject *) PyArray_SimpleNew(1, dim1, PyArray_INT); - } /* iwork */ - - /* check if what JOB requests can be done with what the user has - input into the function */ - - if ((job / 10) % 10 >= 2) - { - /* derivatives are supposed to be supplied */ - - if (fjacb == NULL || fjacd == NULL) - { - PYERR(PyExc_ValueError, - "need fjacb and fjacd to calculate derivatives"); - } - } - - /* setup the global data for the callback */ - odr_global.fcn = fcn; - Py_INCREF(fcn); - odr_global.fjacb = fjacb; - Py_XINCREF(fjacb); - odr_global.fjacd = fjacd; - Py_XINCREF(fjacd); - odr_global.pyBeta = (PyObject *) beta; - Py_INCREF(beta); - odr_global.extra_args = extra_args; - Py_XINCREF(extra_args); - - /* now call DODRC */ - F_FUNC(dodrc,DODRC)(fcn_callback, &n, &m, &np, &nq, (double *)(beta->data), - (double *)(y->data), &ldy, (double *)(x->data), &ldx, - (double *)(we->data), &ldwe, &ld2we, - (double *)(wd->data), &ldwd, &ld2wd, - (int *)(ifixb->data), (int *)(ifixx->data), &ldifx, - &job, &ndigit, &taufac, &sstol, &partol, &maxit, - &iprint, &lunerr, &lunrpt, - (double *)(stpb->data), (double *)(stpd->data), &ldstpd, - (double *)(sclb->data), (double *)(scld->data), &ldscld, - (double *)(work->data), &lwork, (int *)(iwork->data), &liwork, - &info); - - result = gen_output(n, m, np, nq, ldwe, ld2we, - beta, work, iwork, isodr, info, full_output); - - if (result == NULL) - PYERR(PyExc_RuntimeError, "could not generate output"); - - if (lunerr != -1) - { - F_FUNC(dlunc,DLUNC)(&lunerr); - } - if (lunrpt != -1) - { - F_FUNC(dlunc,DLUNC)(&lunrpt); - } - - Py_DECREF(odr_global.fcn); - Py_XDECREF(odr_global.fjacb); - Py_XDECREF(odr_global.fjacd); - Py_DECREF(odr_global.pyBeta); - Py_XDECREF(odr_global.extra_args); - - odr_global.fcn = odr_global.fjacb = odr_global.fjacd = odr_global.pyBeta = - odr_global.extra_args = NULL; - - Py_DECREF(beta); - Py_DECREF(y); - Py_DECREF(x); - Py_DECREF(we); - Py_DECREF(wd); - Py_DECREF(ifixb); - Py_DECREF(ifixx); - Py_DECREF(stpb); - Py_DECREF(stpd); - Py_DECREF(sclb); - Py_DECREF(scld); - Py_DECREF(work); - Py_DECREF(iwork); - - return result; - -fail: - - - if (lunerr != -1) - { - F_FUNC(dlunc,DLUNC)(&lunerr); - } - if (lunrpt != -1) - { - F_FUNC(dlunc,DLUNC)(&lunrpt); - } - - Py_XDECREF(beta); - Py_XDECREF(y); - Py_XDECREF(x); - Py_XDECREF(we); - Py_XDECREF(wd); - Py_XDECREF(ifixb); - Py_XDECREF(ifixx); - Py_XDECREF(stpb); - Py_XDECREF(stpd); - Py_XDECREF(sclb); - Py_XDECREF(scld); - Py_XDECREF(work); - Py_XDECREF(iwork); - - return NULL; -} - -static void check_args(int n, int m, int np, int nq, - PyArrayObject * beta, - PyArrayObject * y, int ldy, - PyArrayObject * x, int ldx, - PyArrayObject * we, int ldwe, int ld2we, - PyArrayObject * wd, int ldwd, int ld2wd, - PyArrayObject * ifixb, PyArrayObject * ifixx, - int ldifx, int job, int ndigit, double taufac, - double sstol, double partol, int maxit, - PyArrayObject * stpb, PyArrayObject * stpd, - int ldstpd, PyArrayObject * sclb, - PyArrayObject * scld, int ldscld, - PyArrayObject * work, int lwork, - PyArrayObject * iwork, int liwork, int info) -{ - PyObject *printdict; - - printdict = - Py_BuildValue - ("{s:i,s:i,s:i,s:i,s:O,s:O,s:i,s:O,s:i,s:O,s:i,s:i,s:O,s:i,s:i,s:O,s:O,s:i,s:i,s:i,s:d,s:d,s:d,s:i,s:O,s:O,s:i,s:O,s:O,s:i,s:O,s:i,s:O,s:i,s:i}", - "n", n, "m", m, "np", np, "nq", nq, "beta", (PyObject *) beta, "y", - (PyObject *) y, "ldy", ldy, "x", (PyObject *) x, "ldx", ldx, "we", - (PyObject *) we, "ldwe", ldwe, "ld2we", ld2we, "wd", (PyObject *) wd, - "ldwd", ldwd, "ld2wd", ld2wd, "ifixb", (PyObject *) ifixb, "ifixx", - (PyObject *) ifixx, "ldifx", ldifx, "job", job, "ndigit", ndigit, - "taufac", taufac, "sstol", sstol, "partol", partol, "maxit", maxit, - "stpb", (PyObject *) stpb, "stpd", (PyObject *) stpd, "ldstpd", ldstpd, - "sclb", (PyObject *) sclb, "scld", (PyObject *) scld, "ldscld", ldscld, - "work", (PyObject *) work, "lwork", lwork, "iwork", (PyObject *) iwork, - "liwork", liwork, "info", info); - if (printdict == NULL) - { - PyErr_Print(); - return; - } - - PyObject_Print(printdict, stdout, Py_PRINT_RAW); - printf("\n"); - Py_XDECREF(printdict); -} - -static char odr__doc__[] = - "odr(fcn, beta0, y, x,\nwe=None, wd=None, fjacb=None, fjacd=None,\nextra_args=None, ifixx=None, ifixb=None, job=0, iprint=0,\nerrfile=None, rptfile=None, ndigit=0,\ntaufac=0.0, sstol=-1.0, partol=-1.0,\nmaxit=-1, stpb=None, stpd=None,\nsclb=None, scld=None, work=None, iwork=None,\nfull_output=0)"; - -static PyMethodDef methods[] = { - {"odr", (PyCFunction) odr, METH_VARARGS | METH_KEYWORDS, odr__doc__}, - {NULL, NULL}, -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_odrpack", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit___odrpack(void) -{ - PyObject *m, *s, *d; - - m = PyModule_Create(&moduledef); - import_array(); - - d = PyModule_GetDict(m); - odr_error = PyErr_NewException("odr.odrpack.odr_error", NULL, NULL); - odr_stop = PyErr_NewException("odr.odrpack.odr_stop", NULL, NULL); - PyDict_SetItemString(d, "odr_error", odr_error); - PyDict_SetItemString(d, "odr_stop", odr_stop); - - return m; -} -#else -PyMODINIT_FUNC init__odrpack(void) -{ - PyObject *m, *d; - - import_array(); - - m = Py_InitModule("__odrpack", methods); - d = PyModule_GetDict(m); - odr_error = PyErr_NewException("odr.odrpack.odr_error", NULL, NULL); - odr_stop = PyErr_NewException("odr.odrpack.odr_stop", NULL, NULL); - PyDict_SetItemString(d, "odr_error", odr_error); - PyDict_SetItemString(d, "odr_stop", odr_stop); -} -#endif diff --git a/scipy-0.10.1/scipy/odr/models.py b/scipy-0.10.1/scipy/odr/models.py deleted file mode 100644 index a7f228127a..0000000000 --- a/scipy-0.10.1/scipy/odr/models.py +++ /dev/null @@ -1,164 +0,0 @@ -""" Collection of Model instances for use with the odrpack fitting package. -""" - -import numpy as np -from scipy.odr.odrpack import Model - -__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic', - 'polynomial'] - - -def _lin_fcn(B, x): - a, b = B[0], B[1:] - b.shape = (b.shape[0], 1) - - return a + (x*b).sum(axis=0) - -def _lin_fjb(B, x): - a = np.ones(x.shape[-1], float) - res = np.concatenate((a, x.ravel())) - res.shape = (B.shape[-1], x.shape[-1]) - return res - -def _lin_fjd(B, x): - b = B[1:] - b = np.repeat(b, (x.shape[-1],)*b.shape[-1],axis=0) - b.shape = x.shape - return b - -def _lin_est(data): - # Eh. The answer is analytical, so just return all ones. - # Don't return zeros since that will interfere with - # ODRPACK's auto-scaling procedures. - - if len(data.x.shape) == 2: - m = data.x.shape[0] - else: - m = 1 - - return np.ones((m + 1,), float) - -def _poly_fcn(B, x, powers): - a, b = B[0], B[1:] - b.shape = (b.shape[0], 1) - - return a + np.sum(b * np.power(x, powers), axis=0) - -def _poly_fjacb(B, x, powers): - res = np.concatenate((np.ones(x.shape[-1], float), np.power(x, - powers).flat)) - res.shape = (B.shape[-1], x.shape[-1]) - return res - -def _poly_fjacd(B, x, powers): - b = B[1:] - b.shape = (b.shape[0], 1) - - b = b * powers - - return np.sum(b * np.power(x, powers-1),axis=0) - -def _exp_fcn(B, x): - return B[0] + np.exp(B[1] * x) - -def _exp_fjd(B, x): - return B[1] * np.exp(B[1] * x) - -def _exp_fjb(B, x): - res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) - res.shape = (2, x.shape[-1]) - return res - -def _exp_est(data): - # Eh. - return np.array([1., 1.]) - -multilinear = Model(_lin_fcn, fjacb=_lin_fjb, - fjacd=_lin_fjd, estimate=_lin_est, - meta={'name': 'Arbitrary-dimensional Linear', - 'equ':'y = B_0 + Sum[i=1..m, B_i * x_i]', - 'TeXequ':'$y=\\beta_0 + \sum_{i=1}^m \\beta_i x_i$'}) - -def polynomial(order): - """ - Factory function for a general polynomial model. - - Parameters - ---------- - order : int or sequence - If an integer, it becomes the order of the polynomial to fit. If - a sequence of numbers, then these are the explicit powers in the - polynomial. - A constant term (power 0) is always included, so don't include 0. - Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)). - - Returns - ------- - polynomial : Model instance - Model instance. - - """ - - powers = np.asarray(order) - if powers.shape == (): - # Scalar. - powers = np.arange(1, powers + 1) - - powers.shape = (len(powers), 1) - len_beta = len(powers) + 1 - - def _poly_est(data, len_beta=len_beta): - # Eh. Ignore data and return all ones. - return np.ones((len_beta,), float) - - return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb, - estimate=_poly_est, extra_args=(powers,), - meta={'name': 'Sorta-general Polynomial', - 'equ':'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1), - 'TeXequ':'$y=\\beta_0 + \sum_{i=1}^{%s} \\beta_i x^i$' %\ - (len_beta-1)}) - -exponential = Model(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, - estimate=_exp_est, meta={'name':'Exponential', - 'equ':'y= B_0 + exp(B_1 * x)', - 'TeXequ':'$y=\\beta_0 + e^{\\beta_1 x}$'}) - -def _unilin(B, x): - return x*B[0] + B[1] - -def _unilin_fjd(B, x): - return np.ones(x.shape, float) * B[0] - -def _unilin_fjb(B, x): - _ret = np.concatenate((x, np.ones(x.shape, float))) - _ret.shape = (2,) + x.shape - - return _ret - -def _unilin_est(data): - return (1., 1.) - -def _quadratic(B, x): - return x*(x*B[0] + B[1]) + B[2] - -def _quad_fjd(B, x): - return 2*x*B[0] + B[1] - -def _quad_fjb(B, x): - _ret = np.concatenate((x*x, x, np.ones(x.shape, float))) - _ret.shape = (3,) + x.shape - - return _ret - -def _quad_est(data): - return (1.,1.,1.) - -unilinear = Model(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, - estimate=_unilin_est, meta={'name': 'Univariate Linear', - 'equ': 'y = B_0 * x + B_1', - 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) - -quadratic = Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, - estimate=_quad_est, meta={'name': 'Quadratic', - 'equ': 'y = B_0*x**2 + B_1*x + B_2', - 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'}) diff --git a/scipy-0.10.1/scipy/odr/odrpack.h b/scipy-0.10.1/scipy/odr/odrpack.h deleted file mode 100644 index 20a7b0b3a3..0000000000 --- a/scipy-0.10.1/scipy/odr/odrpack.h +++ /dev/null @@ -1,71 +0,0 @@ -#include "Python.h" -#include "numpy/arrayobject.h" - -#include "numpy/npy_3kcompat.h" - -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif - -#define PYERR(errobj,message) {PyErr_SetString(errobj,message); goto fail;} -#define PYERR2(errobj,message) {PyErr_Print(); PyErr_SetString(errobj, message); goto fail;} -#define ISCONTIGUOUS(m) ((m)->flags & CONTIGUOUS) - -#define MAX(n1,n2) ((n1) > (n2))?(n1):(n2); -#define MIN(n1,n2) ((n1) > (n2))?(n2):(n1); - -struct ODR_info_ { - PyObject* fcn; - PyObject* fjacb; - PyObject* fjacd; - PyObject* pyBeta; - PyObject* extra_args; -}; - -typedef struct ODR_info_ ODR_info; - -static ODR_info odr_global; - -static PyObject *odr_error=NULL; -static PyObject *odr_stop=NULL; - -void fcn_callback(int *n, int *m, int *np, int *nq, int *ldn, int *ldm, - int *ldnp, double *beta, double *xplusd, int *ifixb, - int *ifixx, int *ldfix, int *ideval, double *f, - double *fjacb, double *fjacd, int *istop); - -PyObject *gen_output(int n, int m, int np, int nq, int ldwe, int ld2we, - PyArrayObject *beta, PyArrayObject *work, PyArrayObject *iwork, - int isodr, int info, int full_output); - -PyObject *odr(PyObject *self, PyObject *args, PyObject *kwds); - -#define PyArray_CONTIGUOUS(m) (ISCONTIGUOUS(m) ? Py_INCREF(m), m : \ -(PyArrayObject *)(PyArray_ContiguousFromObject((PyObject *)(m), \ -(m)->descr->type_num, 0,0))) -#define D(dbg) printf("we're here: %i\n", dbg) -#define EXIST(name,obj) if (obj==NULL){printf("%s\n",name);} -static void check_args(int n, int m, int np, int nq, - PyArrayObject *beta, - PyArrayObject *y, int ldy, - PyArrayObject *x, int ldx, - PyArrayObject *we, int ldwe, int ld2we, - PyArrayObject *wd, int ldwd, int ld2wd, - PyArrayObject *ifixb, PyArrayObject *ifixx, int ldifx, - int job, int ndigit, double taufac, double sstol, - double partol, int maxit, - PyArrayObject *stpb, PyArrayObject *stpd, int ldstpd, - PyArrayObject *sclb, PyArrayObject *scld, int ldscld, - PyArrayObject *work, int lwork, - PyArrayObject *iwork, int liwork, - int info); diff --git a/scipy-0.10.1/scipy/odr/odrpack.py b/scipy-0.10.1/scipy/odr/odrpack.py deleted file mode 100644 index 6cd689f18c..0000000000 --- a/scipy-0.10.1/scipy/odr/odrpack.py +++ /dev/null @@ -1,1121 +0,0 @@ -""" -Python wrappers for Orthogonal Distance Regression (ODRPACK). - -Classes -======= - -Data -- stores the data and weights to fit against - -RealData -- stores data with standard deviations and covariance matrices - -Model -- stores the model and its related information - -Output -- stores all of the output from an ODR run - -ODR -- collects all data and runs the fitting routine - - -Exceptions -========== - -odr_error -- error sometimes raised inside odr() and can be raised in the - fitting functions to tell ODRPACK to halt the procedure - -odr_stop -- error to raise in fitting functions to tell ODRPACK that the data or - parameters given are invalid - -Use -=== - -Basic use: - -1) Define the function you want to fit against. -:: - - def f(B, x): - ''' Linear function y = m*x + b ''' - return B[0]*x + B[1] - - # B is a vector of the parameters. - # x is an array of the current x values. - # x is same format as the x passed to Data or RealData. - - # Return an array in the same format as y passed to Data or RealData. - -2) Create a Model. -:: - - linear = Model(f) - -3) Create a Data or RealData instance. -:: - - mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2)) - -or - -:: - - mydata = RealData(x, y, sx=sx, sy=sy) - -4) Instantiate ODR with your data, model and initial parameter estimate. -:: - - myodr = ODR(mydata, linear, beta0=[1., 2.]) - -5) Run the fit. -:: - - myoutput = myodr.run() - -6) Examine output. -:: - - myoutput.pprint() - -Read the docstrings and the accompanying tests for more advanced usage. - -Notes -===== - -* Array formats -- FORTRAN stores its arrays in memory column first, i.e. an - array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently, - NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For - efficiency and convenience, the input and output arrays of the fitting - function (and its Jacobians) are passed to FORTRAN without transposition. - Therefore, where the ODRPACK documentation says that the X array is of shape - (N, M), it will be passed to the Python function as an array of shape (M, N). - If M==1, the one-dimensional case, then nothing matters; if M>1, then your - Python functions will be dealing with arrays that are indexed in reverse of - the ODRPACK documentation. No real biggie, but watch out for your indexing of - the Jacobians: the i,j'th elements (@f_i/@x_j) evaluated at the n'th - observation will be returned as jacd[j, i, n]. Except for the Jacobians, it - really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course, - you can always use the transpose() function from scipy explicitly. - -* Examples -- See the accompanying file test/test.py for examples of how to set - up fits of your own. Some are taken from the User's Guide; some are from - other sources. - -* Models -- Some common models are instantiated in the accompanying module - models.py . Contributions are welcome. - -Credits -======= - -* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs. - -Robert Kern -robert.kern@gmail.com - -""" - -import numpy -from scipy.odr import __odrpack - -__all__ = ['odr', 'odr_error', 'odr_stop', 'Data', 'RealData', 'Model', - 'Output', 'ODR'] - - -odr = __odrpack.odr -odr_error = __odrpack.odr_error -odr_stop = __odrpack.odr_stop - - -def _conv(obj, dtype=None): - """ Convert an object to the preferred form for input to the odr routine. - """ - - if obj is None: - return obj - else: - if dtype is None: - obj = numpy.asarray(obj) - else: - obj = numpy.asarray(obj, dtype) - if obj.shape == (): - # Scalar. - return obj.dtype.type(obj) - else: - return obj - - -def _report_error(info): - """ Interprets the return code of the odr routine. - - Parameters - ---------- - info : int - The return code of the odr routine. - - Returns - ------- - problems : list(str) - A list of messages about why the odr() routine stopped. - """ - - stopreason = ('Blank', - 'Sum of squares convergence', - 'Parameter convergence', - 'Both sum of squares and parameter convergence', - 'Iteration limit reached')[info % 5] - - if info >= 5: - # questionable results or fatal error - - I = (info/10000 % 10, - info/1000 % 10, - info/100 % 10, - info/10 % 10, - info % 10) - problems = [] - - if I[0] == 0: - if I[1] != 0: - problems.append('Derivatives possibly not correct') - if I[2] != 0: - problems.append('Error occurred in callback') - if I[3] != 0: - problems.append('Problem is not full rank at solution') - problems.append(stopreason) - elif I[0] == 1: - if I[1] != 0: - problems.append('N < 1') - if I[2] != 0: - problems.append('M < 1') - if I[3] != 0: - problems.append('NP < 1 or NP > N') - if I[4] != 0: - problems.append('NQ < 1') - elif I[0] == 2: - if I[1] != 0: - problems.append('LDY and/or LDX incorrect') - if I[2] != 0: - problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect') - if I[3] != 0: - problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect') - if I[4] != 0: - problems.append('LWORK and/or LIWORK too small') - elif I[0] == 3: - if I[1] != 0: - problems.append('STPB and/or STPD incorrect') - if I[2] != 0: - problems.append('SCLB and/or SCLD incorrect') - if I[3] != 0: - problems.append('WE incorrect') - if I[4] != 0: - problems.append('WD incorrect') - elif I[0] == 4: - problems.append('Error in derivatives') - elif I[0] == 5: - problems.append('Error occurred in callback') - elif I[0] == 6: - problems.append('Numerical error detected') - - return problems - - else: - return [stopreason] - - -class Data(object): - """ - scipy.odr.Data(x, y=None, we=None, wd=None, fix=None, meta={}) - - The Data class stores the data to fit. - - Parameters - ---------- - x : array_like - Input data for regression. - y : array_like, optional - Input data for regression. - we : array_like, optional - If `we` is a scalar, then that value is used for all data points (and - all dimensions of the response variable). - If `we` is a rank-1 array of length q (the dimensionality of the - response variable), then this vector is the diagonal of the covariant - weighting matrix for all data points. - If `we` is a rank-1 array of length n (the number of data points), then - the i'th element is the weight for the i'th response variable - observation (single-dimensional only). - If `we` is a rank-2 array of shape (q, q), then this is the full - covariant weighting matrix broadcast to each observation. - If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the - diagonal of the covariant weighting matrix for the i'th observation. - If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the - full specification of the covariant weighting matrix for each - observation. - If the fit is implicit, then only a positive scalar value is used. - wd : array_like, optional - If `wd` is a scalar, then that value is used for all data points - (and all dimensions of the input variable). If `wd` = 0, then the - covariant weighting matrix for each observation is set to the identity - matrix (so each dimension of each observation has the same weight). - If `wd` is a rank-1 array of length m (the dimensionality of the input - variable), then this vector is the diagonal of the covariant weighting - matrix for all data points. - If `wd` is a rank-1 array of length n (the number of data points), then - the i'th element is the weight for the i'th input variable observation - (single-dimensional only). - If `wd` is a rank-2 array of shape (m, m), then this is the full - covariant weighting matrix broadcast to each observation. - If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the - diagonal of the covariant weighting matrix for the i'th observation. - If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the - full specification of the covariant weighting matrix for each - observation. - fix : array_like of ints, optional - The `fix` argument is the same as ifixx in the class ODR. It is an - array of integers with the same shape as data.x that determines which - input observations are treated as fixed. One can use a sequence of - length m (the dimensionality of the input observations) to fix some - dimensions for all observations. A value of 0 fixes the observation, - a value > 0 makes it free. - meta : dict, optional - Freeform dictionary for metadata. - - Notes - ----- - Each argument is attached to the member of the instance of the same name. - The structures of `x` and `y` are described in the Model class docstring. - If `y` is an integer, then the Data instance can only be used to fit with - implicit models where the dimensionality of the response is equal to the - specified value of `y`. - - The `we` argument weights the effect a deviation in the response variable - has on the fit. The `wd` argument weights the effect a deviation in the - input variable has on the fit. To handle multidimensional inputs and - responses easily, the structure of these arguments has the n'th - dimensional axis first. These arguments heavily use the structured - arguments feature of ODRPACK to conveniently and flexibly support all - options. See the ODRPACK User's Guide for a full explanation of how these - weights are used in the algorithm. Basically, a higher value of the weight - for a particular data point makes a deviation at that point more - detrimental to the fit. - - """ - - def __init__(self, x, y=None, we=None, wd=None, fix=None, meta={}): - self.x = _conv(x) - self.y = _conv(y) - self.we = _conv(we) - self.wd = _conv(wd) - self.fix = _conv(fix) - self.meta = meta - - - def set_meta(self, **kwds): - """ Update the metadata dictionary with the keywords and data provided - by keywords. - - Examples - -------- - data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay") - """ - - self.meta.update(kwds) - - - def __getattr__(self, attr): - """ Dispatch aatribute access to the metadata dictionary. - """ - - if attr in self.meta.keys(): - return self.meta[attr] - else: - raise AttributeError("'%s' not in metadata" % attr) - - -class RealData(Data): - """ The RealData class stores the weightings as actual standard deviations - and/or covariances. - - The weights needed for ODRPACK are generated on-the-fly with __getattr__ - trickery. - - sx and sy are standard deviations of x and y and are converted to weights by - dividing 1.0 by their squares. - - E.g. wd = 1./numpy.power(sx, 2) - - covx and covy are arrays of covariance matrices and are converted to weights - by performing a matrix inversion on each observation's covariance matrix. - - E.g. we[i] = numpy.linalg.inv(covy[i]) # i in range(len(covy)) - # if covy.shape == (n,q,q) - - These arguments follow the same structured argument conventions as wd and we - only restricted by their natures: sx and sy can't be rank-3, but covx and - covy can be. - - Only set *either* sx or covx (not both). Setting both will raise an - exception. Same with sy and covy. - - The argument and member fix is the same as Data.fix and ODR.ifixx: - It is an array of integers with the same shape as data.x that determines - which input observations are treated as fixed. One can use a sequence of - length m (the dimensionality of the input observations) to fix some - dimensions for all observations. A value of 0 fixes the observation, - a value > 0 makes it free. - """ - - def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None, - fix=None, meta={}): - if (sx is not None) and (covx is not None): - raise ValueError("cannot set both sx and covx") - if (sy is not None) and (covy is not None): - raise ValueError("cannot set both sy and covy") - - # Set flags for __getattr__ - self._ga_flags = {} - if sx is not None: - self._ga_flags['wd'] = 'sx' - else: - self._ga_flags['wd'] = 'covx' - if sy is not None: - self._ga_flags['we'] = 'sy' - else: - self._ga_flags['we'] = 'covy' - - self.x = _conv(x) - self.y = _conv(y) - self.sx = _conv(sx) - self.sy = _conv(sy) - self.covx = _conv(covx) - self.covy = _conv(covy) - self.fix = _conv(fix) - self.meta = meta - - - def _sd2wt(self, sd): - """ Convert standard deviation to weights. - """ - - return 1./numpy.power(sd, 2) - - def _cov2wt(self, cov): - """ Convert covariance matrix(-ices) to weights. - """ - - from numpy.dual import inv - - if len(cov.shape) == 2: - return inv(cov) - else: - weights = numpy.zeros(cov.shape, float) - - for i in range(cov.shape[-1]): # n - weights[:,:,i] = inv(cov[:,:,i]) - - return weights - - - def __getattr__(self, attr): - lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx), - ('wd', 'covx'): (self._cov2wt, self.covx), - ('we', 'sy'): (self._sd2wt, self.sy), - ('we', 'covy'): (self._cov2wt, self.covy)} - - - if attr not in ('wd', 'we'): - if attr in self.meta.keys(): - return self.meta[attr] - else: - raise AttributeError("'%s' not in metadata" % attr) - else: - func, arg = lookup_tbl[(attr, self._ga_flags[attr])] - - if arg is not None: - return apply(func, (arg,)) - else: - return None - - -class Model(object): - """ - The Model class stores information about the function you wish to fit. - - It stores the function itself, at the least, and optionally stores - functions which compute the Jacobians used during fitting. Also, one - can provide a function that will provide reasonable starting values - for the fit parameters possibly given the set of data. - - Parameters - ---------- - fcn : function - fcn(beta, x) --> y - fjacb : function - Jacobian of fcn wrt the fit parameters beta. - - fjacb(beta, x) --> @f_i(x,B)/@B_j - fjacd : function - Jacobian of fcn wrt the (possibly multidimensional) input - variable. - - fjacd(beta, x) --> @f_i(x,B)/@x_j - extra_args : tuple, optional - If specified, `extra_args` should be a tuple of extra - arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called - by `apply(fcn, (beta, x) + extra_args)` - estimate : array_like of rank-1 - Provides estimates of the fit parameters from the data - - estimate(data) --> estbeta - implicit : boolean - If TRUE, specifies that the model - is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit - against - meta : dict, optional - freeform dictionary of metadata for the model - - Notes - ----- - Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and - return a NumPy array. The `estimate` object takes an instance of the - Data class. - - Here are the rules for the shapes of the argument and return arrays : - - x -- if the input data is single-dimensional, then x is rank-1 - array; i.e. x = array([1, 2, 3, ...]); x.shape = (n,) - If the input data is multi-dimensional, then x is a rank-2 array; - i.e., x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n) In - all cases, it has the same shape as the input data array passed to - odr(). m is the dimensionality of the input data, n is the number - of observations. - - y -- if the response variable is single-dimensional, then y is a - rank-1 array, i.e., y = array([2, 4, ...]); y.shape = (n,) - If the response variable is multi-dimensional, then y is a rank-2 - array, i.e., y = array([[2, 4, ...], [3, 6, ...]]); y.shape = - (q, n) where q is the dimensionality of the response variable. - - beta -- rank-1 array of length p where p is the number of parameters; - i.e. beta = array([B_1, B_2, ..., B_p]) - - fjacb -- if the response variable is multi-dimensional, then the - return array's shape is (q, p, n) such that fjacb(x,beta)[l,k,i] = - @f_l(X,B)/@B_k evaluated at the i'th data point. If q == 1, then - the return array is only rank-2 and with shape (p, n). - - fjacd -- as with fjacb, only the return array's shape is (q, m, n) - such that fjacd(x,beta)[l,j,i] = @f_l(X,B)/@X_j at the i'th data - point. If q == 1, then the return array's shape is (m, n). If - m == 1, the shape is (q, n). If m == q == 1, the shape is (n,). - - """ - - def __init__(self, fcn, fjacb=None, fjacd=None, - extra_args=None, estimate=None, implicit=0, meta=None): - - self.fcn = fcn - self.fjacb = fjacb - self.fjacd = fjacd - - if extra_args is not None: - extra_args = tuple(extra_args) - - self.extra_args = extra_args - self.estimate = estimate - self.implicit = implicit - self.meta = meta - - - def set_meta(self, **kwds): - """ Update the metadata dictionary with the keywords and data provided - here. - - Examples - -------- - set_meta(name="Exponential", equation="y = a exp(b x) + c") - """ - - self.meta.update(kwds) - - - def __getattr__(self, attr): - """ Dispatch attribute access to the metadata. - """ - - if attr in self.meta.keys(): - return self.meta[attr] - else: - raise AttributeError("'%s' not in metadata" % attr) - - -class Output(object): - """ - The Output class stores the output of an ODR run. - - Takes one argument for initialization, the return value from the - function `odr`. - - Attributes - ---------- - beta : ndarray - Estimated parameter values, of shape (q,). - sd_beta : ndarray - Standard errors of the estimated parameters, of shape (p,). - cov_beta : ndarray - Covariance matrix of the estimated parameters, of shape (p,p). - delta : ndarray, optional - Array of estimated errors in input variables, of same shape as `x`. - eps : ndarray, optional - Array of estimated errors in response variables, of same shape as `y`. - xplus : ndarray, optional - Array of ``x + delta``. - y : ndarray, optional - Array ``y = fcn(x + delta)``. - res_var : float, optional - Residual variance. - sum_sqare : float, optional - Sum of squares error. - sum_square_delta : float, optional - Sum of squares of delta error. - sum_square_eps : float, optional - Sum of squares of eps error. - inv_condnum : float, optional - Inverse condition number (cf. ODRPACK UG p. 77). - rel_error : float, optional - Relative error in function values computed within fcn. - work : ndarray, optional - Final work array. - work_ind : dict, optional - Indices into work for drawing out values (cf. ODRPACK UG p. 83). - info : int, optional - Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38). - stopreason : list of str, optional - `info` interpreted into English. - - Notes - ----- - The attributes listed as "optional" above are only present if `odr` was run - with ``full_output=1``. - - """ - - def __init__(self, output): - self.beta = output[0] - self.sd_beta = output[1] - self.cov_beta = output[2] - - if len(output) == 4: - # full output - self.__dict__.update(output[3]) - self.stopreason = _report_error(self.info) - - - def pprint(self): - """ Pretty-print important results. - """ - - print 'Beta:', self.beta - print 'Beta Std Error:', self.sd_beta - print 'Beta Covariance:', self.cov_beta - if hasattr(self, 'info'): - print 'Residual Variance:',self.res_var - print 'Inverse Condition #:', self.inv_condnum - print 'Reason(s) for Halting:' - for r in self.stopreason: - print ' %s' % r - - -class ODR(object): - """ - The ODR class gathers all information and coordinates the running of the - main fitting routine. - - Members of instances of the ODR class have the same names as the arguments - to the initialization routine. - - Parameters - ---------- - data : Data class instance - instance of the Data class - model : Model class instance - instance of the Model class - beta0 : array_like of rank-1 - a rank-1 sequence of initial parameter values. Optional if - model provides an "estimate" function to estimate these values. - delta0 : array_like of floats of rank-1, optional - a (double-precision) float array to hold the initial values of - the errors in the input variables. Must be same shape as data.x - ifixb : array_like of ints of rank-1, optional - sequence of integers with the same length as beta0 that determines - which parameters are held fixed. A value of 0 fixes the parameter, - a value > 0 makes the parameter free. - ifixx : array_like of ints with same shape as data.x, optional - an array of integers with the same shape as data.x that determines - which input observations are treated as fixed. One can use a sequence - of length m (the dimensionality of the input observations) to fix some - dimensions for all observations. A value of 0 fixes the observation, - a value > 0 makes it free. - job : int, optional - an integer telling ODRPACK what tasks to perform. See p. 31 of the - ODRPACK User's Guide if you absolutely must set the value here. Use the - method set_job post-initialization for a more readable interface. - iprint : int, optional - an integer telling ODRPACK what to print. See pp. 33-34 of the - ODRPACK User's Guide if you absolutely must set the value here. Use the - method set_iprint post-initialization for a more readable interface. - errfile : str, optional - string with the filename to print ODRPACK errors to. *Do Not Open - This File Yourself!* - rptfile : str, optional - string with the filename to print ODRPACK summaries to. *Do Not - Open This File Yourself!* - ndigit : int, optional - integer specifying the number of reliable digits in the computation - of the function. - taufac : float, optional - float specifying the initial trust region. The default value is 1. - The initial trust region is equal to taufac times the length of the - first computed Gauss-Newton step. taufac must be less than 1. - sstol : float, optional - float specifying the tolerance for convergence based on the relative - change in the sum-of-squares. The default value is eps**(1/2) where eps - is the smallest value such that 1 + eps > 1 for double precision - computation on the machine. sstol must be less than 1. - partol : float, optional - float specifying the tolerance for convergence based on the relative - change in the estimated parameters. The default value is eps**(2/3) for - explicit models and eps**(1/3) for implicit models. partol must be less - than 1. - maxit : int, optional - integer specifying the maximum number of iterations to perform. For - first runs, maxit is the total number of iterations performed and - defaults to 50. For restarts, maxit is the number of additional - iterations to perform and defaults to 10. - stpb : array_like, optional - sequence (len(stpb) == len(beta0)) of relative step sizes to compute - finite difference derivatives wrt the parameters. - stpd : optional - array (stpd.shape == data.x.shape or stpd.shape == (m,)) of relative - step sizes to compute finite difference derivatives wrt the input - variable errors. If stpd is a rank-1 array with length m (the - dimensionality of the input variable), then the values are broadcast to - all observations. - sclb : array_like, optional - sequence (len(stpb) == len(beta0)) of scaling factors for the - parameters. The purpose of these scaling factors are to scale all of - the parameters to around unity. Normally appropriate scaling factors - are computed if this argument is not specified. Specify them yourself - if the automatic procedure goes awry. - scld : array_like, optional - array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling - factors for the *errors* in the input variables. Again, these factors - are automatically computed if you do not provide them. If scld.shape == - (m,), then the scaling factors are broadcast to all observations. - work : ndarray, optional - array to hold the double-valued working data for ODRPACK. When - restarting, takes the value of self.output.work. - iwork : ndarray, optional - array to hold the integer-valued working data for ODRPACK. When - restarting, takes the value of self.output.iwork. - output : Output class instance - an instance if the Output class containing all of the returned - data from an invocation of ODR.run() or ODR.restart() - - """ - - def __init__(self, data, model, beta0=None, delta0=None, ifixb=None, - ifixx=None, job=None, iprint=None, errfile=None, rptfile=None, - ndigit=None, taufac=None, sstol=None, partol=None, maxit=None, - stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None): - - self.data = data - self.model = model - - if beta0 is None: - if self.model.estimate is not None: - self.beta0 = _conv(self.model.estimate(self.data)) - else: - raise ValueError( - "must specify beta0 or provide an estimater with the model" - ) - else: - self.beta0 = _conv(beta0) - - self.delta0 = _conv(delta0) - # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit - # platforms. - # XXX: some other FORTRAN compilers may not agree. - self.ifixx = _conv(ifixx, dtype=numpy.int32) - self.ifixb = _conv(ifixb, dtype=numpy.int32) - self.job = job - self.iprint = iprint - self.errfile = errfile - self.rptfile = rptfile - self.ndigit = ndigit - self.taufac = taufac - self.sstol = sstol - self.partol = partol - self.maxit = maxit - self.stpb = _conv(stpb) - self.stpd = _conv(stpd) - self.sclb = _conv(sclb) - self.scld = _conv(scld) - self.work = _conv(work) - self.iwork = _conv(iwork) - - self.output = None - - self._check() - - def _check(self): - """ Check the inputs for consistency, but don't bother checking things - that the builtin function odr will check. - """ - - x_s = list(self.data.x.shape) - - if isinstance(self.data.y, numpy.ndarray): - y_s = list(self.data.y.shape) - if self.model.implicit: - raise odr_error("an implicit model cannot use response data") - else: - # implicit model with q == self.data.y - y_s = [self.data.y, x_s[-1]] - if not self.model.implicit: - raise odr_error("an explicit model needs response data") - self.set_job(fit_type=1) - - if x_s[-1] != y_s[-1]: - raise odr_error("number of observations do not match") - - n = x_s[-1] - - if len(x_s) == 2: - m = x_s[0] - else: - m = 1 - if len(y_s) == 2: - q = y_s[0] - else: - q = 1 - - p = len(self.beta0) - - # permissible output array shapes - - fcn_perms = [(q, n)] - fjacd_perms = [(q, m, n)] - fjacb_perms = [(q, p, n)] - - if q == 1: - fcn_perms.append((n,)) - fjacd_perms.append((m, n)) - fjacb_perms.append((p, n)) - if m == 1: - fjacd_perms.append((q, n)) - if p == 1: - fjacb_perms.append((q, n)) - if m == q == 1: - fjacd_perms.append((n,)) - if p == q == 1: - fjacb_perms.append((n,)) - - # try evaluating the supplied functions to make sure they provide - # sensible outputs - - arglist = (self.beta0, self.data.x) - if self.model.extra_args is not None: - arglist = arglist + self.model.extra_args - res = self.model.fcn(*arglist) - - if res.shape not in fcn_perms: - print res.shape - print fcn_perms - raise odr_error("fcn does not output %s-shaped array" % y_s) - - if self.model.fjacd is not None: - res = self.model.fjacd(*arglist) - if res.shape not in fjacd_perms: - raise odr_error( - "fjacd does not output %s-shaped array" % (q, m, n)) - if self.model.fjacb is not None: - res = self.model.fjacb(*arglist) - if res.shape not in fjacb_perms: - raise odr_error( - "fjacb does not output %s-shaped array" % (q, p, n)) - - # check shape of delta0 - - if self.delta0 is not None and self.delta0.shape != self.data.x.shape: - raise odr_error( - "delta0 is not a %s-shaped array" % self.data.x.shape) - - def _gen_work(self): - """ Generate a suitable work array if one does not already exist. - """ - - n = self.data.x.shape[-1] - p = self.beta0.shape[0] - - if len(self.data.x.shape) == 2: - m = self.data.x.shape[0] - else: - m = 1 - - if self.model.implicit: - q = self.data.y - elif len(self.data.y.shape) == 2: - q = self.data.y.shape[0] - else: - q = 1 - - if self.data.we is None: - ldwe = ld2we = 1 - elif len(self.data.we.shape) == 3: - ld2we, ldwe = self.data.we.shape[1:] - else: - # Okay, this isn't precisely right, but for this calculation, - # it's fine - ldwe = 1 - ld2we = self.data.we.shape[1] - - if self.job % 10 < 2: - # ODR not OLS - lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p + - 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q) - else: - # OLS not ODR - lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p + - 5*q + q*(p+m) + ldwe*ld2we*q) - - if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\ - and self.work.dtype.str.endswith('f8'): - # the existing array is fine - return - else: - self.work = numpy.zeros((lwork,), float) - - - def set_job(self, fit_type=None, deriv=None, var_calc=None, - del_init=None, restart=None): - """ - Sets the "job" parameter is a hopefully comprehensible way. - - If an argument is not specified, then the value is left as is. The - default value from class initialization is for all of these options set - to 0. - - Parameters - ---------- - fit_type : {0, 1, 2} int - 0 -> explicit ODR - - 1 -> implicit ODR - - 2 -> ordinary least-squares - deriv : {0, 1, 2, 3} int - 0 -> forward finite differences - - 1 -> central finite differences - - 2 -> user-supplied derivatives (Jacobians) with results - checked by ODRPACK - - 3 -> user-supplied derivatives, no checking - var_calc : {0, 1, 2} int - 0 -> calculate asymptotic covariance matrix and fit - parameter uncertainties (V_B, s_B) using derivatives - recomputed at the final solution - - 1 -> calculate V_B and s_B using derivatives from last iteration - - 2 -> do not calculate V_B and s_B - del_init : {0, 1} int - 0 -> initial input variable offsets set to 0 - - 1 -> initial offsets provided by user in variable "work" - restart : {0, 1} int - 0 -> fit is not a restart - - 1 -> fit is a restart - - Notes - ----- - The permissible values are different from those given on pg. 31 of the - ODRPACK User's Guide only in that one cannot specify numbers greater than - the last value for each variable. - - If one does not supply functions to compute the Jacobians, the fitting - procedure will change deriv to 0, finite differences, as a default. To - initialize the input variable offsets by yourself, set del_init to 1 and - put the offsets into the "work" variable correctly. - - """ - - if self.job is None: - job_l = [0, 0, 0, 0, 0] - else: - job_l = [self.job / 10000 % 10, - self.job / 1000 % 10, - self.job / 100 % 10, - self.job / 10 % 10, - self.job % 10] - - if fit_type in (0, 1, 2): - job_l[4] = fit_type - if deriv in (0, 1, 2, 3): - job_l[3] = deriv - if var_calc in (0, 1, 2): - job_l[2] = var_calc - if del_init in (0, 1): - job_l[1] = del_init - if restart in (0, 1): - job_l[0] = restart - - self.job = (job_l[0]*10000 + job_l[1]*1000 + - job_l[2]*100 + job_l[3]*10 + job_l[4]) - - - def set_iprint(self, init=None, so_init=None, - iter=None, so_iter=None, iter_step=None, final=None, so_final=None): - """ Set the iprint parameter for the printing of computation reports. - - If any of the arguments are specified here, then they are set in the - iprint member. If iprint is not set manually or with this method, then - ODRPACK defaults to no printing. If no filename is specified with the - member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to - print to stdout in addition to the specified filename by setting the - so_* arguments to this function, but one cannot specify to print to - stdout but not a file since one can do that by not specifying a rptfile - filename. - - There are three reports: initialization, iteration, and final reports. - They are represented by the arguments init, iter, and final - respectively. The permissible values are 0, 1, and 2 representing "no - report", "short report", and "long report" respectively. - - The argument iter_step (0 <= iter_step <= 9) specifies how often to make - the iteration report; the report will be made for every iter_step'th - iteration starting with iteration one. If iter_step == 0, then no - iteration report is made, regardless of the other arguments. - - If the rptfile is None, then any so_* arguments supplied will raise an - exception. - """ - if self.iprint is None: - self.iprint = 0 - - ip = [self.iprint / 1000 % 10, - self.iprint / 100 % 10, - self.iprint / 10 % 10, - self.iprint % 10] - - # make a list to convert iprint digits to/from argument inputs - # rptfile, stdout - ip2arg = [[0, 0], # none, none - [1, 0], # short, none - [2, 0], # long, none - [1, 1], # short, short - [2, 1], # long, short - [1, 2], # short, long - [2, 2]] # long, long - - if (self.rptfile is None and - (so_init is not None or - so_iter is not None or - so_final is not None)): - raise odr_error( - "no rptfile specified, cannot output to stdout twice") - - iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]] - - if init is not None: - iprint_l[0] = init - if so_init is not None: - iprint_l[1] = so_init - if iter is not None: - iprint_l[2] = iter - if so_iter is not None: - iprint_l[3] = so_iter - if final is not None: - iprint_l[4] = final - if so_final is not None: - iprint_l[5] = so_final - - if iter_step in range(10): - # 0..9 - ip[2] = iter_step - - ip[0] = ip2arg.index(iprint_l[0:2]) - ip[1] = ip2arg.index(iprint_l[2:4]) - ip[3] = ip2arg.index(iprint_l[4:6]) - - self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3] - - - def run(self): - """ Run the fitting routine with all of the information given. - - Returns - ------- - output : Output instance - This object is also assigned to the attribute .output . - """ - - args = (self.model.fcn, self.beta0, self.data.y, self.data.x) - kwds = {'full_output': 1} - kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile', - 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb', - 'stpd', 'sclb', 'scld', 'work', 'iwork'] - - if self.delta0 is not None and self.job % 1000 / 10 == 1: - # delta0 provided and fit is not a restart - self._gen_work() - - d0 = numpy.ravel(self.delta0) - - self.work[:len(d0)] = d0 - - # set the kwds from other objects explicitly - if self.model.fjacb is not None: - kwds['fjacb'] = self.model.fjacb - if self.model.fjacd is not None: - kwds['fjacd'] = self.model.fjacd - if self.data.we is not None: - kwds['we'] = self.data.we - if self.data.wd is not None: - kwds['wd'] = self.data.wd - if self.model.extra_args is not None: - kwds['extra_args'] = self.model.extra_args - - # implicitly set kwds from self's members - for attr in kwd_l: - obj = getattr(self, attr) - if obj is not None: - kwds[attr] = obj - - self.output = Output(apply(odr, args, kwds)) - - return self.output - - - def restart(self, iter=None): - """ Restarts the run with iter more iterations. - - Parameters - ---------- - iter : int, optional - ODRPACK's default for the number of new iterations is 10. - - Returns - ------- - output : Output instance - This object is also assigned to the attribute .output . - """ - - if self.output is None: - raise odr_error("cannot restart: run() has not been called before") - - self.set_job(restart=1) - self.work = self.output.work - self.iwork = self.output.iwork - - self.maxit = iter - - return self.run() diff --git a/scipy-0.10.1/scipy/odr/odrpack/d_lpk.f b/scipy-0.10.1/scipy/odr/odrpack/d_lpk.f deleted file mode 100644 index 484eb0a1e8..0000000000 --- a/scipy-0.10.1/scipy/odr/odrpack/d_lpk.f +++ /dev/null @@ -1,1211 +0,0 @@ -*DCHEX - SUBROUTINE DCHEX(R,LDR,P,K,L,Z,LDZ,NZ,C,S,JOB) -C***BEGIN PROLOGUE DCHEX -C***DATE WRITTEN 780814 (YYMMDD) -C***REVISION DATE 820801 (YYMMDD) -C***CATEGORY NO. D7B -C***KEYWORDS CHOLESKY DECOMPOSITION,DOUBLE PRECISION,EXCHANGE, -C LINEAR ALGEBRA,LINPACK,MATRIX,POSITIVE DEFINITE -C***AUTHOR STEWART, G. W., (U. OF MARYLAND) -C***PURPOSE UPDATES THE CHOLESKY FACTORIZATION A=TRANS(R)*R OF A -C POSITIVE DEFINITE MATRIX A OF ORDER P UNDER DIAGONAL -C PERMUTATIONS OF THE FORM TRANS(E)*A*E WHERE E IS A -C PERMUTATION MATRIX. -C***DESCRIPTION -C DCHEX UPDATES THE CHOLESKY FACTORIZATION -C A = TRANS(R)*R -C OF A POSITIVE DEFINITE MATRIX A OF ORDER P UNDER DIAGONAL -C PERMUTATIONS OF THE FORM -C TRANS(E)*A*E -C WHERE E IS A PERMUTATION MATRIX. SPECIFICALLY, GIVEN -C AN UPPER TRIANGULAR MATRIX R AND A PERMUTATION MATRIX -C E (WHICH IS SPECIFIED BY K, L, AND JOB), DCHEX DETERMINES -C AN ORTHOGONAL MATRIX U SUCH THAT -C U*R*E = RR, -C WHERE RR IS UPPER TRIANGULAR. AT THE USERS OPTION, THE -C TRANSFORMATION U WILL BE MULTIPLIED INTO THE ARRAY Z. -C IF A = TRANS(X)*X, SO THAT R IS THE TRIANGULAR PART OF THE -C QR FACTORIZATION OF X, THEN RR IS THE TRIANGULAR PART OF THE -C QR FACTORIZATION OF X*E, I.E. X WITH ITS COLUMNS PERMUTED. -C FOR A LESS TERSE DESCRIPTION OF WHAT DCHEX DOES AND HOW -C IT MAY BE APPLIED, SEE THE LINPACK GUIDE. -C THE MATRIX Q IS DETERMINED AS THE PRODUCT U(L-K)*...*U(1) -C OF PLANE ROTATIONS OF THE FORM -C ( C(I) S(I) ) -C ( ) , -C ( -S(I) C(I) ) -C WHERE C(I) IS DOUBLE PRECISION. THE ROWS THESE ROTATIONS OPERATE -C ON ARE DESCRIBED BELOW. -C THERE ARE TWO TYPES OF PERMUTATIONS, WHICH ARE DETERMINED -C BY THE VALUE OF JOB. -C 1. RIGHT CIRCULAR SHIFT (JOB = 1). -C THE COLUMNS ARE REARRANGED IN THE FOLLOWING ORDER. -C 1,...,K-1,L,K,K+1,...,L-1,L+1,...,P. -C U IS THE PRODUCT OF L-K ROTATIONS U(I), WHERE U(I) -C ACTS IN THE (L-I,L-I+1)-PLANE. -C 2. LEFT CIRCULAR SHIFT (JOB = 2). -C THE COLUMNS ARE REARRANGED IN THE FOLLOWING ORDER -C 1,...,K-1,K+1,K+2,...,L,K,L+1,...,P. -C U IS THE PRODUCT OF L-K ROTATIONS U(I), WHERE U(I) -C ACTS IN THE (K+I-1,K+I)-PLANE. -C ON ENTRY -C R DOUBLE PRECISION(LDR,P), WHERE LDR .GE. P. -C R CONTAINS THE UPPER TRIANGULAR FACTOR -C THAT IS TO BE UPDATED. ELEMENTS OF R -C BELOW THE DIAGONAL ARE NOT REFERENCED. -C LDR INTEGER. -C LDR IS THE LEADING DIMENSION OF THE ARRAY R. -C P INTEGER. -C P IS THE ORDER OF THE MATRIX R. -C K INTEGER. -C K IS THE FIRST COLUMN TO BE PERMUTED. -C L INTEGER. -C L IS THE LAST COLUMN TO BE PERMUTED. -C L MUST BE STRICTLY GREATER THAN K. -C Z DOUBLE PRECISION(LDZ,N)Z), WHERE LDZ .GE. P. -C Z IS AN ARRAY OF NZ P-VECTORS INTO WHICH THE -C TRANSFORMATION U IS MULTIPLIED. Z IS -C NOT REFERENCED IF NZ = 0. -C LDZ INTEGER. -C LDZ IS THE LEADING DIMENSION OF THE ARRAY Z. -C NZ INTEGER. -C NZ IS THE NUMBER OF COLUMNS OF THE MATRIX Z. -C JOB INTEGER. -C JOB DETERMINES THE TYPE OF PERMUTATION. -C JOB = 1 RIGHT CIRCULAR SHIFT. -C JOB = 2 LEFT CIRCULAR SHIFT. -C ON RETURN -C R CONTAINS THE UPDATED FACTOR. -C Z CONTAINS THE UPDATED MATRIX Z. -C C DOUBLE PRECISION(P). -C C CONTAINS THE COSINES OF THE TRANSFORMING ROTATIONS. -C S DOUBLE PRECISION(P). -C S CONTAINS THE SINES OF THE TRANSFORMING ROTATIONS. -C LINPACK. THIS VERSION DATED 08/14/78 . -C G. W. STEWART, UNIVERSITY OF MARYLAND, ARGONNE NATIONAL LAB. -C***REFERENCES DONGARRA J.J., BUNCH J.R., MOLER C.B., STEWART G.W., -C *LINPACK USERS GUIDE*, SIAM, 1979. -C***ROUTINES CALLED DROTG -C***END PROLOGUE DCHEX - -C...SCALAR ARGUMENTS - INTEGER - + JOB,K,L,LDR,LDZ,NZ,P - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + C(*),R(LDR,*),S(*),Z(LDZ,*) - -C...LOCAL SCALARS - DOUBLE PRECISION - + T,T1 - INTEGER - + I,II,IL,IU,J,JJ,KM1,KP1,LM1,LMK - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DROTG - -C...INTRINSIC FUNCTIONS - INTRINSIC - + MAX0,MIN0 - - -C***FIRST EXECUTABLE STATEMENT DCHEX - - - KM1 = K - 1 - KP1 = K + 1 - LMK = L - K - LM1 = L - 1 - -C PERFORM THE APPROPRIATE TASK. - - GO TO (10,130), JOB - -C RIGHT CIRCULAR SHIFT. - - 10 CONTINUE - -C REORDER THE COLUMNS. - - DO 20 I = 1, L - II = L - I + 1 - S(I) = R(II,L) - 20 CONTINUE - DO 40 JJ = K, LM1 - J = LM1 - JJ + K - DO 30 I = 1, J - R(I,J+1) = R(I,J) - 30 CONTINUE - R(J+1,J+1) = 0.0D0 - 40 CONTINUE - IF (K .EQ. 1) GO TO 60 - DO 50 I = 1, KM1 - II = L - I + 1 - R(I,K) = S(II) - 50 CONTINUE - 60 CONTINUE - -C CALCULATE THE ROTATIONS. - - T = S(1) - DO 70 I = 1, LMK - T1 = S(I) - CALL DROTG(S(I+1),T,C(I),T1) - S(I) = T1 - T = S(I+1) - 70 CONTINUE - R(K,K) = T - DO 90 J = KP1, P - IL = MAX0(1,L-J+1) - DO 80 II = IL, LMK - I = L - II - T = C(II)*R(I,J) + S(II)*R(I+1,J) - R(I+1,J) = C(II)*R(I+1,J) - S(II)*R(I,J) - R(I,J) = T - 80 CONTINUE - 90 CONTINUE - -C IF REQUIRED, APPLY THE TRANSFORMATIONS TO Z. - - IF (NZ .LT. 1) GO TO 120 - DO 110 J = 1, NZ - DO 100 II = 1, LMK - I = L - II - T = C(II)*Z(I,J) + S(II)*Z(I+1,J) - Z(I+1,J) = C(II)*Z(I+1,J) - S(II)*Z(I,J) - Z(I,J) = T - 100 CONTINUE - 110 CONTINUE - 120 CONTINUE - GO TO 260 - -C LEFT CIRCULAR SHIFT - - 130 CONTINUE - -C REORDER THE COLUMNS - - DO 140 I = 1, K - II = LMK + I - S(II) = R(I,K) - 140 CONTINUE - DO 160 J = K, LM1 - DO 150 I = 1, J - R(I,J) = R(I,J+1) - 150 CONTINUE - JJ = J - KM1 - S(JJ) = R(J+1,J+1) - 160 CONTINUE - DO 170 I = 1, K - II = LMK + I - R(I,L) = S(II) - 170 CONTINUE - DO 180 I = KP1, L - R(I,L) = 0.0D0 - 180 CONTINUE - -C REDUCTION LOOP. - - DO 220 J = K, P - IF (J .EQ. K) GO TO 200 - -C APPLY THE ROTATIONS. - - IU = MIN0(J-1,L-1) - DO 190 I = K, IU - II = I - K + 1 - T = C(II)*R(I,J) + S(II)*R(I+1,J) - R(I+1,J) = C(II)*R(I+1,J) - S(II)*R(I,J) - R(I,J) = T - 190 CONTINUE - 200 CONTINUE - IF (J .GE. L) GO TO 210 - JJ = J - K + 1 - T = S(JJ) - CALL DROTG(R(J,J),T,C(JJ),S(JJ)) - 210 CONTINUE - 220 CONTINUE - -C APPLY THE ROTATIONS TO Z. - - IF (NZ .LT. 1) GO TO 250 - DO 240 J = 1, NZ - DO 230 I = K, LM1 - II = I - KM1 - T = C(II)*Z(I,J) + S(II)*Z(I+1,J) - Z(I+1,J) = C(II)*Z(I+1,J) - S(II)*Z(I,J) - Z(I,J) = T - 230 CONTINUE - 240 CONTINUE - 250 CONTINUE - 260 CONTINUE - RETURN - END -*DPODI - SUBROUTINE DPODI(A,LDA,N,DET,JOB) -C***BEGIN PROLOGUE DPODI -C***DATE WRITTEN 780814 (YYMMDD) -C***REVISION DATE 820801 (YYMMDD) -C***CATEGORY NO. D2B1B,D3B1B -C***KEYWORDS DETERMINANT,DOUBLE PRECISION,FACTOR,INVERSE, -C LINEAR ALGEBRA,LINPACK,MATRIX,POSITIVE DEFINITE -C***AUTHOR MOLER, C. B., (U. OF NEW MEXICO) -C***PURPOSE COMPUTES THE DETERMINANT AND INVERSE OF A CERTAIN DOUBLE -C PRECISION SYMMETRIC POSITIVE DEFINITE MATRIX (SEE ABSTRACT) -C USING THE FACTORS COMPUTED BY DPOCO, DPOFA OR DQRDC. -C***DESCRIPTION -C DPODI COMPUTES THE DETERMINANT AND INVERSE OF A CERTAIN -C DOUBLE PRECISION SYMMETRIC POSITIVE DEFINITE MATRIX (SEE BELOW) -C USING THE FACTORS COMPUTED BY DPOCO, DPOFA OR DQRDC. -C ON ENTRY -C A DOUBLE PRECISION(LDA, N) -C THE OUTPUT A FROM DPOCO OR DPOFA -C OR THE OUTPUT X FROM DQRDC. -C LDA INTEGER -C THE LEADING DIMENSION OF THE ARRAY A . -C N INTEGER -C THE ORDER OF THE MATRIX A . -C JOB INTEGER -C = 11 BOTH DETERMINANT AND INVERSE. -C = 01 INVERSE ONLY. -C = 10 DETERMINANT ONLY. -C ON RETURN -C A IF DPOCO OR DPOFA WAS USED TO FACTOR A , THEN -C DPODI PRODUCES THE UPPER HALF OF INVERSE(A) . -C IF DQRDC WAS USED TO DECOMPOSE X , THEN -C DPODI PRODUCES THE UPPER HALF OF INVERSE(TRANS(X)*X) -C WHERE TRANS(X) IS THE TRANSPOSE. -C ELEMENTS OF A BELOW THE DIAGONAL ARE UNCHANGED. -C IF THE UNITS DIGIT OF JOB IS ZERO, A IS UNCHANGED. -C DET DOUBLE PRECISION(2) -C DETERMINANT OF A OR OF TRANS(X)*X IF REQUESTED. -C OTHERWISE NOT REFERENCED. -C DETERMINANT = DET(1) * 10.0**DET(2) -C WITH 1.0 .LE. DET(1) .LT. 10.0 -C OR DET(1) .EQ. 0.0 . -C ERROR CONDITION -C A DIVISION BY ZERO WILL OCCUR IF THE INPUT FACTOR CONTAINS -C A ZERO ON THE DIAGONAL AND THE INVERSE IS REQUESTED. -C IT WILL NOT OCCUR IF THE SUBROUTINES ARE CALLED CORRECTLY -C AND IF DPOCO OR DPOFA HAS SET INFO .EQ. 0 . -C LINPACK. THIS VERSION DATED 08/14/78 . -C CLEVE MOLER, UNIVERSITY OF NEW MEXICO, ARGONNE NATIONAL LAB. -C***REFERENCES DONGARRA J.J., BUNCH J.R., MOLER C.B., STEWART G.W., -C *LINPACK USERS GUIDE*, SIAM, 1979. -C***ROUTINES CALLED DAXPY,DSCAL -C***END PROLOGUE DPODI - -C...SCALAR ARGUMENTS - INTEGER JOB,LDA,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION A(LDA,*),DET(*) - -C...LOCAL SCALARS - DOUBLE PRECISION S,T - INTEGER I,J,JM1,K,KP1 - -C...EXTERNAL SUBROUTINES - EXTERNAL DAXPY,DSCAL - -C...INTRINSIC FUNCTIONS - INTRINSIC MOD - - -C***FIRST EXECUTABLE STATEMENT DPODI - - - IF (JOB/10 .EQ. 0) GO TO 70 - DET(1) = 1.0D0 - DET(2) = 0.0D0 - S = 10.0D0 - DO 50 I = 1, N - DET(1) = A(I,I)**2*DET(1) -C ...EXIT - IF (DET(1) .EQ. 0.0D0) GO TO 60 - 10 IF (DET(1) .GE. 1.0D0) GO TO 20 - DET(1) = S*DET(1) - DET(2) = DET(2) - 1.0D0 - GO TO 10 - 20 CONTINUE - 30 IF (DET(1) .LT. S) GO TO 40 - DET(1) = DET(1)/S - DET(2) = DET(2) + 1.0D0 - GO TO 30 - 40 CONTINUE - 50 CONTINUE - 60 CONTINUE - 70 CONTINUE - -C COMPUTE INVERSE(R) - - IF (MOD(JOB,10) .EQ. 0) GO TO 140 - DO 100 K = 1, N - A(K,K) = 1.0D0/A(K,K) - T = -A(K,K) - CALL DSCAL(K-1,T,A(1,K),1) - KP1 = K + 1 - IF (N .LT. KP1) GO TO 90 - DO 80 J = KP1, N - T = A(K,J) - A(K,J) = 0.0D0 - CALL DAXPY(K,T,A(1,K),1,A(1,J),1) - 80 CONTINUE - 90 CONTINUE - 100 CONTINUE - -C FORM INVERSE(R) * TRANS(INVERSE(R)) - - DO 130 J = 1, N - JM1 = J - 1 - IF (JM1 .LT. 1) GO TO 120 - DO 110 K = 1, JM1 - T = A(K,J) - CALL DAXPY(K,T,A(1,J),1,A(1,K),1) - 110 CONTINUE - 120 CONTINUE - T = A(J,J) - CALL DSCAL(J,T,A(1,J),1) - 130 CONTINUE - 140 CONTINUE - RETURN - END -*DQRDC - SUBROUTINE DQRDC(X,LDX,N,P,QRAUX,JPVT,WORK,JOB) -C***BEGIN PROLOGUE DQRDC -C***DATE WRITTEN 780814 (YYMMDD) -C***REVISION DATE 820801 (YYMMDD) -C***CATEGORY NO. D5 -C***KEYWORDS DECOMPOSITION,DOUBLE PRECISION,LINEAR ALGEBRA,LINPACK, -C MATRIX,ORTHOGONAL TRIANGULAR -C***AUTHOR STEWART, G. W., (U. OF MARYLAND) -C***PURPOSE USES HOUSEHOLDER TRANSFORMATIONS TO COMPUTE THE QR FACTORI- -C ZATION OF N BY P MATRIX X. COLUMN PIVOTING IS OPTIONAL. -C***DESCRIPTION -C DQRDC USES HOUSEHOLDER TRANSFORMATIONS TO COMPUTE THE QR -C FACTORIZATION OF AN N BY P MATRIX X. COLUMN PIVOTING -C BASED ON THE 2-NORMS OF THE REDUCED COLUMNS MAY BE -C PERFORMED AT THE USER'S OPTION. -C ON ENTRY -C X DOUBLE PRECISION(LDX,P), WHERE LDX .GE. N. -C X CONTAINS THE MATRIX WHOSE DECOMPOSITION IS TO BE -C COMPUTED. -C LDX INTEGER. -C LDX IS THE LEADING DIMENSION OF THE ARRAY X. -C N INTEGER. -C N IS THE NUMBER OF ROWS OF THE MATRIX X. -C P INTEGER. -C P IS THE NUMBER OF COLUMNS OF THE MATRIX X. -C JPVT INTEGER(P). -C JPVT CONTAINS INTEGERS THAT CONTROL THE SELECTION -C OF THE PIVOT COLUMNS. THE K-TH COLUMN X(K) OF X -C IS PLACED IN ONE OF THREE CLASSES ACCORDING TO THE -C VALUE OF JPVT(K). -C IF JPVT(K) .GT. 0, THEN X(K) IS AN INITIAL -C COLUMN. -C IF JPVT(K) .EQ. 0, THEN X(K) IS A FREE COLUMN. -C IF JPVT(K) .LT. 0, THEN X(K) IS A FINAL COLUMN. -C BEFORE THE DECOMPOSITION IS COMPUTED, INITIAL COLUMNS -C ARE MOVED TO THE BEGINNING OF THE ARRAY X AND FINAL -C COLUMNS TO THE END. BOTH INITIAL AND FINAL COLUMNS -C ARE FROZEN IN PLACE DURING THE COMPUTATION AND ONLY -C FREE COLUMNS ARE MOVED. AT THE K-TH STAGE OF THE -C REDUCTION, IF X(K) IS OCCUPIED BY A FREE COLUMN -C IT IS INTERCHANGED WITH THE FREE COLUMN OF LARGEST -C REDUCED NORM. JPVT IS NOT REFERENCED IF -C JOB .EQ. 0. -C WORK DOUBLE PRECISION(P). -C WORK IS A WORK ARRAY. WORK IS NOT REFERENCED IF -C JOB .EQ. 0. -C JOB INTEGER. -C JOB IS AN INTEGER THAT INITIATES COLUMN PIVOTING. -C IF JOB .EQ. 0, NO PIVOTING IS DONE. -C IF JOB .NE. 0, PIVOTING IS DONE. -C ON RETURN -C X X CONTAINS IN ITS UPPER TRIANGLE THE UPPER -C TRIANGULAR MATRIX R OF THE QR FACTORIZATION. -C BELOW ITS DIAGONAL X CONTAINS INFORMATION FROM -C WHICH THE ORTHOGONAL PART OF THE DECOMPOSITION -C CAN BE RECOVERED. NOTE THAT IF PIVOTING HAS -C BEEN REQUESTED, THE DECOMPOSITION IS NOT THAT -C OF THE ORIGINAL MATRIX X BUT THAT OF X -C WITH ITS COLUMNS PERMUTED AS DESCRIBED BY JPVT. -C QRAUX DOUBLE PRECISION(P). -C QRAUX CONTAINS FURTHER INFORMATION REQUIRED TO RECOVER -C THE ORTHOGONAL PART OF THE DECOMPOSITION. -C JPVT JPVT(K) CONTAINS THE INDEX OF THE COLUMN OF THE -C ORIGINAL MATRIX THAT HAS BEEN INTERCHANGED INTO -C THE K-TH COLUMN, IF PIVOTING WAS REQUESTED. -C LINPACK. THIS VERSION DATED 08/14/78 . -C G. W. STEWART, UNIVERSITY OF MARYLAND, ARGONNE NATIONAL LAB. -C***REFERENCES DONGARRA J.J., BUNCH J.R., MOLER C.B., STEWART G.W., -C *LINPACK USERS GUIDE*, SIAM, 1979. -C***ROUTINES CALLED DAXPY,DDOT,DNRM2,DSCAL,DSWAP -C***END PROLOGUE DQRDC - -C...SCALAR ARGUMENTS - INTEGER - + JOB,LDX,N,P - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + QRAUX(*),WORK(*),X(LDX,*) - INTEGER - + JPVT(*) - -C...LOCAL SCALARS - DOUBLE PRECISION - + MAXNRM,NRMXL,T,TT - INTEGER - + J,JJ,JP,L,LP1,LUP,MAXJ,PL,PU - LOGICAL - + NEGJ,SWAPJ - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DDOT,DNRM2 - EXTERNAL - + DDOT,DNRM2 - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DAXPY,DSCAL,DSWAP - -C...INTRINSIC FUNCTIONS - INTRINSIC - + DABS,DMAX1,DSIGN,DSQRT,MIN0 - - -C***FIRST EXECUTABLE STATEMENT DQRDC - - - PL = 1 - PU = 0 - IF (JOB .EQ. 0) GO TO 60 - -C PIVOTING HAS BEEN REQUESTED. REARRANGE THE COLUMNS -C ACCORDING TO JPVT. - - DO 20 J = 1, P - SWAPJ = JPVT(J) .GT. 0 - NEGJ = JPVT(J) .LT. 0 - JPVT(J) = J - IF (NEGJ) JPVT(J) = -J - IF (.NOT.SWAPJ) GO TO 10 - IF (J .NE. PL) CALL DSWAP(N,X(1,PL),1,X(1,J),1) - JPVT(J) = JPVT(PL) - JPVT(PL) = J - PL = PL + 1 - 10 CONTINUE - 20 CONTINUE - PU = P - DO 50 JJ = 1, P - J = P - JJ + 1 - IF (JPVT(J) .GE. 0) GO TO 40 - JPVT(J) = -JPVT(J) - IF (J .EQ. PU) GO TO 30 - CALL DSWAP(N,X(1,PU),1,X(1,J),1) - JP = JPVT(PU) - JPVT(PU) = JPVT(J) - JPVT(J) = JP - 30 CONTINUE - PU = PU - 1 - 40 CONTINUE - 50 CONTINUE - 60 CONTINUE - -C COMPUTE THE NORMS OF THE FREE COLUMNS. - - IF (PU .LT. PL) GO TO 80 - DO 70 J = PL, PU - QRAUX(J) = DNRM2(N,X(1,J),1) - WORK(J) = QRAUX(J) - 70 CONTINUE - 80 CONTINUE - -C PERFORM THE HOUSEHOLDER REDUCTION OF X. - - LUP = MIN0(N,P) - DO 200 L = 1, LUP - IF (L .LT. PL .OR. L .GE. PU) GO TO 120 - -C LOCATE THE COLUMN OF LARGEST NORM AND BRING IT -C INTO THE PIVOT POSITION. - - MAXNRM = 0.0D0 - MAXJ = L - DO 100 J = L, PU - IF (QRAUX(J) .LE. MAXNRM) GO TO 90 - MAXNRM = QRAUX(J) - MAXJ = J - 90 CONTINUE - 100 CONTINUE - IF (MAXJ .EQ. L) GO TO 110 - CALL DSWAP(N,X(1,L),1,X(1,MAXJ),1) - QRAUX(MAXJ) = QRAUX(L) - WORK(MAXJ) = WORK(L) - JP = JPVT(MAXJ) - JPVT(MAXJ) = JPVT(L) - JPVT(L) = JP - 110 CONTINUE - 120 CONTINUE - QRAUX(L) = 0.0D0 - IF (L .EQ. N) GO TO 190 - -C COMPUTE THE HOUSEHOLDER TRANSFORMATION FOR COLUMN L. - - NRMXL = DNRM2(N-L+1,X(L,L),1) - IF (NRMXL .EQ. 0.0D0) GO TO 180 - IF (X(L,L) .NE. 0.0D0) NRMXL = DSIGN(NRMXL,X(L,L)) - CALL DSCAL(N-L+1,1.0D0/NRMXL,X(L,L),1) - X(L,L) = 1.0D0 + X(L,L) - -C APPLY THE TRANSFORMATION TO THE REMAINING COLUMNS, -C UPDATING THE NORMS. - - LP1 = L + 1 - IF (P .LT. LP1) GO TO 170 - DO 160 J = LP1, P - T = -DDOT(N-L+1,X(L,L),1,X(L,J),1)/X(L,L) - CALL DAXPY(N-L+1,T,X(L,L),1,X(L,J),1) - IF (J .LT. PL .OR. J .GT. PU) GO TO 150 - IF (QRAUX(J) .EQ. 0.0D0) GO TO 150 - TT = 1.0D0 - (DABS(X(L,J))/QRAUX(J))**2 - TT = DMAX1(TT,0.0D0) - T = TT - TT = 1.0D0 + 0.05D0*TT*(QRAUX(J)/WORK(J))**2 - IF (TT .EQ. 1.0D0) GO TO 130 - QRAUX(J) = QRAUX(J)*DSQRT(T) - GO TO 140 - 130 CONTINUE - QRAUX(J) = DNRM2(N-L,X(L+1,J),1) - WORK(J) = QRAUX(J) - 140 CONTINUE - 150 CONTINUE - 160 CONTINUE - 170 CONTINUE - -C SAVE THE TRANSFORMATION. - - QRAUX(L) = X(L,L) - X(L,L) = -NRMXL - 180 CONTINUE - 190 CONTINUE - 200 CONTINUE - RETURN - END -*DQRSL - SUBROUTINE DQRSL(X,LDX,N,K,QRAUX,Y,QY,QTY,B,RSD,XB,JOB,INFO) -C***BEGIN PROLOGUE DQRSL -C***DATE WRITTEN 780814 (YYMMDD) -C***REVISION DATE 820801 (YYMMDD) -C***CATEGORY NO. D9,D2A1 -C***KEYWORDS DOUBLE PRECISION,LINEAR ALGEBRA,LINPACK,MATRIX, -C ORTHOGONAL TRIANGULAR,SOLVE -C***AUTHOR STEWART, G. W., (U. OF MARYLAND) -C***PURPOSE APPLIES THE OUTPUT OF DQRDC TO COMPUTE COORDINATE -C TRANSFORMATIONS, PROJECTIONS, AND LEAST SQUARES SOLUTIONS. -C***DESCRIPTION -C DQRSL APPLIES THE OUTPUT OF DQRDC TO COMPUTE COORDINATE -C TRANSFORMATIONS, PROJECTIONS, AND LEAST SQUARES SOLUTIONS. -C FOR K .LE. MIN(N,P), LET XK BE THE MATRIX -C XK = (X(JPVT(1)),X(JPVT(2)), ... ,X(JPVT(K))) -C FORMED FROM COLUMNNS JPVT(1), ... ,JPVT(K) OF THE ORIGINAL -C N X P MATRIX X THAT WAS INPUT TO DQRDC (IF NO PIVOTING WAS -C DONE, XK CONSISTS OF THE FIRST K COLUMNS OF X IN THEIR -C ORIGINAL ORDER). DQRDC PRODUCES A FACTORED ORTHOGONAL MATRIX Q -C AND AN UPPER TRIANGULAR MATRIX R SUCH THAT -C XK = Q * (R) -C (0) -C THIS INFORMATION IS CONTAINED IN CODED FORM IN THE ARRAYS -C X AND QRAUX. -C ON ENTRY -C X DOUBLE PRECISION(LDX,P). -C X CONTAINS THE OUTPUT OF DQRDC. -C LDX INTEGER. -C LDX IS THE LEADING DIMENSION OF THE ARRAY X. -C N INTEGER. -C N IS THE NUMBER OF ROWS OF THE MATRIX XK. IT MUST -C HAVE THE SAME VALUE AS N IN DQRDC. -C K INTEGER. -C K IS THE NUMBER OF COLUMNS OF THE MATRIX XK. K -C MUST NOT BE GREATER THAN MIN(N,P), WHERE P IS THE -C SAME AS IN THE CALLING SEQUENCE TO DQRDC. -C QRAUX DOUBLE PRECISION(P). -C QRAUX CONTAINS THE AUXILIARY OUTPUT FROM DQRDC. -C Y DOUBLE PRECISION(N) -C Y CONTAINS AN N-VECTOR THAT IS TO BE MANIPULATED -C BY DQRSL. -C JOB INTEGER. -C JOB SPECIFIES WHAT IS TO BE COMPUTED. JOB HAS -C THE DECIMAL EXPANSION ABCDE, WITH THE FOLLOWING -C MEANING. -C IF A .NE. 0, COMPUTE QY. -C IF B,C,D, OR E .NE. 0, COMPUTE QTY. -C IF C .NE. 0, COMPUTE B. -C IF D .NE. 0, COMPUTE RSD. -C IF E .NE. 0, COMPUTE XB. -C NOTE THAT A REQUEST TO COMPUTE B, RSD, OR XB -C AUTOMATICALLY TRIGGERS THE COMPUTATION OF QTY, FOR -C WHICH AN ARRAY MUST BE PROVIDED IN THE CALLING -C SEQUENCE. -C ON RETURN -C QY DOUBLE PRECISION(N). -C QY CONTAINS Q*Y, IF ITS COMPUTATION HAS BEEN -C REQUESTED. -C QTY DOUBLE PRECISION(N). -C QTY CONTAINS TRANS(Q)*Y, IF ITS COMPUTATION HAS -C BEEN REQUESTED. HERE TRANS(Q) IS THE -C TRANSPOSE OF THE MATRIX Q. -C B DOUBLE PRECISION(K) -C B CONTAINS THE SOLUTION OF THE LEAST SQUARES PROBLEM -C MINIMIZE NORM2(Y - XK*B), -C IF ITS COMPUTATION HAS BEEN REQUESTED. (NOTE THAT -C IF PIVOTING WAS REQUESTED IN DQRDC, THE J-TH -C COMPONENT OF B WILL BE ASSOCIATED WITH COLUMN JPVT(J) -C OF THE ORIGINAL MATRIX X THAT WAS INPUT INTO DQRDC.) -C RSD DOUBLE PRECISION(N). -C RSD CONTAINS THE LEAST SQUARES RESIDUAL Y - XK*B, -C IF ITS COMPUTATION HAS BEEN REQUESTED. RSD IS -C ALSO THE ORTHOGONAL PROJECTION OF Y ONTO THE -C ORTHOGONAL COMPLEMENT OF THE COLUMN SPACE OF XK. -C XB DOUBLE PRECISION(N). -C XB CONTAINS THE LEAST SQUARES APPROXIMATION XK*B, -C IF ITS COMPUTATION HAS BEEN REQUESTED. XB IS ALSO -C THE ORTHOGONAL PROJECTION OF Y ONTO THE COLUMN SPACE -C OF X. -C INFO INTEGER. -C INFO IS ZERO UNLESS THE COMPUTATION OF B HAS -C BEEN REQUESTED AND R IS EXACTLY SINGULAR. IN -C THIS CASE, INFO IS THE INDEX OF THE FIRST ZERO -C DIAGONAL ELEMENT OF R AND B IS LEFT UNALTERED. -C THE PARAMETERS QY, QTY, B, RSD, AND XB ARE NOT REFERENCED -C IF THEIR COMPUTATION IS NOT REQUESTED AND IN THIS CASE -C CAN BE REPLACED BY DUMMY VARIABLES IN THE CALLING PROGRAM. -C TO SAVE STORAGE, THE USER MAY IN SOME CASES USE THE SAME -C ARRAY FOR DIFFERENT PARAMETERS IN THE CALLING SEQUENCE. A -C FREQUENTLY OCCURING EXAMPLE IS WHEN ONE WISHES TO COMPUTE -C ANY OF B, RSD, OR XB AND DOES NOT NEED Y OR QTY. IN THIS -C CASE ONE MAY IDENTIFY Y, QTY, AND ONE OF B, RSD, OR XB, WHILE -C PROVIDING SEPARATE ARRAYS FOR ANYTHING ELSE THAT IS TO BE -C COMPUTED. THUS THE CALLING SEQUENCE -C CALL DQRSL(X,LDX,N,K,QRAUX,Y,DUM,Y,B,Y,DUM,110,INFO) -C WILL RESULT IN THE COMPUTATION OF B AND RSD, WITH RSD -C OVERWRITING Y. MORE GENERALLY, EACH ITEM IN THE FOLLOWING -C LIST CONTAINS GROUPS OF PERMISSIBLE IDENTIFICATIONS FOR -C A SINGLE CALLING SEQUENCE. -C 1. (Y,QTY,B) (RSD) (XB) (QY) -C 2. (Y,QTY,RSD) (B) (XB) (QY) -C 3. (Y,QTY,XB) (B) (RSD) (QY) -C 4. (Y,QY) (QTY,B) (RSD) (XB) -C 5. (Y,QY) (QTY,RSD) (B) (XB) -C 6. (Y,QY) (QTY,XB) (B) (RSD) -C IN ANY GROUP THE VALUE RETURNED IN THE ARRAY ALLOCATED TO -C THE GROUP CORRESPONDS TO THE LAST MEMBER OF THE GROUP. -C LINPACK. THIS VERSION DATED 08/14/78 . -C G. W. STEWART, UNIVERSITY OF MARYLAND, ARGONNE NATIONAL LAB. -C***REFERENCES DONGARRA J.J., BUNCH J.R., MOLER C.B., STEWART G.W., -C *LINPACK USERS GUIDE*, SIAM, 1979. -C***ROUTINES CALLED DAXPY,DCOPY,DDOT -C***END PROLOGUE DQRSL - -C...SCALAR ARGUMENTS - INTEGER - + INFO,JOB,K,LDX,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + B(*),QRAUX(*),QTY(*),QY(*),RSD(*),X(LDX,*),XB(*), - + Y(*) - -C...LOCAL SCALARS - DOUBLE PRECISION - + T,TEMP - INTEGER - + I,J,JJ,JU,KP1 - LOGICAL - + CB,CQTY,CQY,CR,CXB - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DDOT - EXTERNAL - + DDOT - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DAXPY,DCOPY - -C...INTRINSIC FUNCTIONS - INTRINSIC - + MIN0,MOD - - -C***FIRST EXECUTABLE STATEMENT DQRSL - - - INFO = 0 - -C DETERMINE WHAT IS TO BE COMPUTED. - - CQY = JOB/10000 .NE. 0 - CQTY = MOD(JOB,10000) .NE. 0 - CB = MOD(JOB,1000)/100 .NE. 0 - CR = MOD(JOB,100)/10 .NE. 0 - CXB = MOD(JOB,10) .NE. 0 - JU = MIN0(K,N-1) - -C SPECIAL ACTION WHEN N=1. - - IF (JU .NE. 0) GO TO 40 - IF (CQY) QY(1) = Y(1) - IF (CQTY) QTY(1) = Y(1) - IF (CXB) XB(1) = Y(1) - IF (.NOT.CB) GO TO 30 - IF (X(1,1) .NE. 0.0D0) GO TO 10 - INFO = 1 - GO TO 20 - 10 CONTINUE - B(1) = Y(1)/X(1,1) - 20 CONTINUE - 30 CONTINUE - IF (CR) RSD(1) = 0.0D0 - GO TO 250 - 40 CONTINUE - -C SET UP TO COMPUTE QY OR QTY. - - IF (CQY) CALL DCOPY(N,Y,1,QY,1) - IF (CQTY) CALL DCOPY(N,Y,1,QTY,1) - IF (.NOT.CQY) GO TO 70 - -C COMPUTE QY. - - DO 60 JJ = 1, JU - J = JU - JJ + 1 - IF (QRAUX(J) .EQ. 0.0D0) GO TO 50 - TEMP = X(J,J) - X(J,J) = QRAUX(J) - T = -DDOT(N-J+1,X(J,J),1,QY(J),1)/X(J,J) - CALL DAXPY(N-J+1,T,X(J,J),1,QY(J),1) - X(J,J) = TEMP - 50 CONTINUE - 60 CONTINUE - 70 CONTINUE - IF (.NOT.CQTY) GO TO 100 - -C COMPUTE TRANS(Q)*Y. - - DO 90 J = 1, JU - IF (QRAUX(J) .EQ. 0.0D0) GO TO 80 - TEMP = X(J,J) - X(J,J) = QRAUX(J) - T = -DDOT(N-J+1,X(J,J),1,QTY(J),1)/X(J,J) - CALL DAXPY(N-J+1,T,X(J,J),1,QTY(J),1) - X(J,J) = TEMP - 80 CONTINUE - 90 CONTINUE - 100 CONTINUE - -C SET UP TO COMPUTE B, RSD, OR XB. - - IF (CB) CALL DCOPY(K,QTY,1,B,1) - KP1 = K + 1 - IF (CXB) CALL DCOPY(K,QTY,1,XB,1) - IF (CR .AND. K .LT. N) CALL DCOPY(N-K,QTY(KP1),1,RSD(KP1),1) - IF (.NOT.CXB .OR. KP1 .GT. N) GO TO 120 - DO 110 I = KP1, N - XB(I) = 0.0D0 - 110 CONTINUE - 120 CONTINUE - IF (.NOT.CR) GO TO 140 - DO 130 I = 1, K - RSD(I) = 0.0D0 - 130 CONTINUE - 140 CONTINUE - IF (.NOT.CB) GO TO 190 - -C COMPUTE B. - - DO 170 JJ = 1, K - J = K - JJ + 1 - IF (X(J,J) .NE. 0.0D0) GO TO 150 - INFO = J -C ......EXIT - GO TO 180 - 150 CONTINUE - B(J) = B(J)/X(J,J) - IF (J .EQ. 1) GO TO 160 - T = -B(J) - CALL DAXPY(J-1,T,X(1,J),1,B,1) - 160 CONTINUE - 170 CONTINUE - 180 CONTINUE - 190 CONTINUE - IF (.NOT.CR .AND. .NOT.CXB) GO TO 240 - -C COMPUTE RSD OR XB AS REQUIRED. - - DO 230 JJ = 1, JU - J = JU - JJ + 1 - IF (QRAUX(J) .EQ. 0.0D0) GO TO 220 - TEMP = X(J,J) - X(J,J) = QRAUX(J) - IF (.NOT.CR) GO TO 200 - T = -DDOT(N-J+1,X(J,J),1,RSD(J),1)/X(J,J) - CALL DAXPY(N-J+1,T,X(J,J),1,RSD(J),1) - 200 CONTINUE - IF (.NOT.CXB) GO TO 210 - T = -DDOT(N-J+1,X(J,J),1,XB(J),1)/X(J,J) - CALL DAXPY(N-J+1,T,X(J,J),1,XB(J),1) - 210 CONTINUE - X(J,J) = TEMP - 220 CONTINUE - 230 CONTINUE - 240 CONTINUE - 250 CONTINUE - RETURN - END -*DTRCO - SUBROUTINE DTRCO(T,LDT,N,RCOND,Z,JOB) -C***BEGIN PROLOGUE DTRCO -C***DATE WRITTEN 780814 (YYMMDD) -C***REVISION DATE 820801 (YYMMDD) -C***CATEGORY NO. D2A3 -C***KEYWORDS CONDITION,DOUBLE PRECISION,FACTOR,LINEAR ALGEBRA,LINPACK, -C MATRIX,TRIANGULAR -C***AUTHOR MOLER, C. B., (U. OF NEW MEXICO) -C***PURPOSE ESTIMATES THE CONDITION OF A DOUBLE PRECISION TRIANGULAR -C MATRIX. -C***DESCRIPTION -C DTRCO ESTIMATES THE CONDITION OF A DOUBLE PRECISION TRIANGULAR -C MATRIX. -C ON ENTRY -C T DOUBLE PRECISION(LDT,N) -C T CONTAINS THE TRIANGULAR MATRIX. THE ZERO -C ELEMENTS OF THE MATRIX ARE NOT REFERENCED, AND -C THE CORRESPONDING ELEMENTS OF THE ARRAY CAN BE -C USED TO STORE OTHER INFORMATION. -C LDT INTEGER -C LDT IS THE LEADING DIMENSION OF THE ARRAY T. -C N INTEGER -C N IS THE ORDER OF THE SYSTEM. -C JOB INTEGER -C = 0 T IS LOWER TRIANGULAR. -C = NONZERO T IS UPPER TRIANGULAR. -C ON RETURN -C RCOND DOUBLE PRECISION -C AN ESTIMATE OF THE RECIPROCAL CONDITION OF T . -C FOR THE SYSTEM T*X = B , RELATIVE PERTURBATIONS -C IN T AND B OF SIZE EPSILON MAY CAUSE -C RELATIVE PERTURBATIONS IN X OF SIZE EPSILON/RCOND . -C IF RCOND IS SO SMALL THAT THE LOGICAL EXPRESSION -C 1.0 + RCOND .EQ. 1.0 -C IS TRUE, THEN T MAY BE SINGULAR TO WORKING -C PRECISION. IN PARTICULAR, RCOND IS ZERO IF -C EXACT SINGULARITY IS DETECTED OR THE ESTIMATE -C UNDERFLOWS. -C Z DOUBLE PRECISION(N) -C A WORK VECTOR WHOSE CONTENTS ARE USUALLY UNIMPORTANT. -C IF T IS CLOSE TO A SINGULAR MATRIX, THEN Z IS -C AN APPROXIMATE NULL VECTOR IN THE SENSE THAT -C NORM(A*Z) = RCOND*NORM(A)*NORM(Z) . -C LINPACK. THIS VERSION DATED 08/14/78 . -C CLEVE MOLER, UNIVERSITY OF NEW MEXICO, ARGONNE NATIONAL LAB. -C***REFERENCES DONGARRA J.J., BUNCH J.R., MOLER C.B., STEWART G.W., -C *LINPACK USERS GUIDE*, SIAM, 1979. -C***ROUTINES CALLED DASUM,DAXPY,DSCAL -C***END PROLOGUE DTRCO - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + RCOND - INTEGER - + JOB,LDT,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + T(LDT,*),Z(*) - -C...LOCAL SCALARS - DOUBLE PRECISION - + EK,S,SM,TNORM,W,WK,WKM,YNORM - INTEGER - + I1,J,J1,J2,K,KK,L - LOGICAL - + LOWER - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DASUM - EXTERNAL - + DASUM - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DAXPY,DSCAL - -C...INTRINSIC FUNCTIONS - INTRINSIC - + DABS,DMAX1,DSIGN - - -C***FIRST EXECUTABLE STATEMENT DTRCO - - - LOWER = JOB .EQ. 0 - -C COMPUTE 1-NORM OF T - - TNORM = 0.0D0 - DO 10 J = 1, N - L = J - IF (LOWER) L = N + 1 - J - I1 = 1 - IF (LOWER) I1 = J - TNORM = DMAX1(TNORM,DASUM(L,T(I1,J),1)) - 10 CONTINUE - -C RCOND = 1/(NORM(T)*(ESTIMATE OF NORM(INVERSE(T)))) . -C ESTIMATE = NORM(Z)/NORM(Y) WHERE T*Z = Y AND TRANS(T)*Y = E . -C TRANS(T) IS THE TRANSPOSE OF T . -C THE COMPONENTS OF E ARE CHOSEN TO CAUSE MAXIMUM LOCAL -C GROWTH IN THE ELEMENTS OF Y . -C THE VECTORS ARE FREQUENTLY RESCALED TO AVOID OVERFLOW. - -C SOLVE TRANS(T)*Y = E - - EK = 1.0D0 - DO 20 J = 1, N - Z(J) = 0.0D0 - 20 CONTINUE - DO 100 KK = 1, N - K = KK - IF (LOWER) K = N + 1 - KK - IF (Z(K) .NE. 0.0D0) EK = DSIGN(EK,-Z(K)) - IF (DABS(EK-Z(K)) .LE. DABS(T(K,K))) GO TO 30 - S = DABS(T(K,K))/DABS(EK-Z(K)) - CALL DSCAL(N,S,Z,1) - EK = S*EK - 30 CONTINUE - WK = EK - Z(K) - WKM = -EK - Z(K) - S = DABS(WK) - SM = DABS(WKM) - IF (T(K,K) .EQ. 0.0D0) GO TO 40 - WK = WK/T(K,K) - WKM = WKM/T(K,K) - GO TO 50 - 40 CONTINUE - WK = 1.0D0 - WKM = 1.0D0 - 50 CONTINUE - IF (KK .EQ. N) GO TO 90 - J1 = K + 1 - IF (LOWER) J1 = 1 - J2 = N - IF (LOWER) J2 = K - 1 - DO 60 J = J1, J2 - SM = SM + DABS(Z(J)+WKM*T(K,J)) - Z(J) = Z(J) + WK*T(K,J) - S = S + DABS(Z(J)) - 60 CONTINUE - IF (S .GE. SM) GO TO 80 - W = WKM - WK - WK = WKM - DO 70 J = J1, J2 - Z(J) = Z(J) + W*T(K,J) - 70 CONTINUE - 80 CONTINUE - 90 CONTINUE - Z(K) = WK - 100 CONTINUE - S = 1.0D0/DASUM(N,Z,1) - CALL DSCAL(N,S,Z,1) - - YNORM = 1.0D0 - -C SOLVE T*Z = Y - - DO 130 KK = 1, N - K = N + 1 - KK - IF (LOWER) K = KK - IF (DABS(Z(K)) .LE. DABS(T(K,K))) GO TO 110 - S = DABS(T(K,K))/DABS(Z(K)) - CALL DSCAL(N,S,Z,1) - YNORM = S*YNORM - 110 CONTINUE - IF (T(K,K) .NE. 0.0D0) Z(K) = Z(K)/T(K,K) - IF (T(K,K) .EQ. 0.0D0) Z(K) = 1.0D0 - I1 = 1 - IF (LOWER) I1 = K + 1 - IF (KK .GE. N) GO TO 120 - W = -Z(K) - CALL DAXPY(N-KK,W,T(I1,K),1,Z(I1),1) - 120 CONTINUE - 130 CONTINUE -C MAKE ZNORM = 1.0 - S = 1.0D0/DASUM(N,Z,1) - CALL DSCAL(N,S,Z,1) - YNORM = S*YNORM - - IF (TNORM .NE. 0.0D0) RCOND = YNORM/TNORM - IF (TNORM .EQ. 0.0D0) RCOND = 0.0D0 - RETURN - END -*DTRSL - SUBROUTINE DTRSL(T,LDT,N,B,JOB,INFO) -C***BEGIN PROLOGUE DTRSL -C***DATE WRITTEN 780814 (YYMMDD) -C***REVISION DATE 820801 (YYMMDD) -C***CATEGORY NO. D2A3 -C***KEYWORDS DOUBLE PRECISION,LINEAR ALGEBRA,LINPACK,MATRIX,SOLVE, -C TRIANGULAR -C***AUTHOR STEWART, G. W., (U. OF MARYLAND) -C***PURPOSE SOLVES SYSTEMS OF THE FORM T*X=B OR TRANS(T)*X=B WHERE T -C IS A TRIANGULAR MATRIX OF ORDER N. -C***DESCRIPTION -C DTRSL SOLVES SYSTEMS OF THE FORM -C T * X = B -C OR -C TRANS(T) * X = B -C WHERE T IS A TRIANGULAR MATRIX OF ORDER N. HERE TRANS(T) -C DENOTES THE TRANSPOSE OF THE MATRIX T. -C ON ENTRY -C T DOUBLE PRECISION(LDT,N) -C T CONTAINS THE MATRIX OF THE SYSTEM. THE ZERO -C ELEMENTS OF THE MATRIX ARE NOT REFERENCED, AND -C THE CORRESPONDING ELEMENTS OF THE ARRAY CAN BE -C USED TO STORE OTHER INFORMATION. -C LDT INTEGER -C LDT IS THE LEADING DIMENSION OF THE ARRAY T. -C N INTEGER -C N IS THE ORDER OF THE SYSTEM. -C B DOUBLE PRECISION(N). -C B CONTAINS THE RIGHT HAND SIDE OF THE SYSTEM. -C JOB INTEGER -C JOB SPECIFIES WHAT KIND OF SYSTEM IS TO BE SOLVED. -C IF JOB IS -C 00 SOLVE T*X=B, T LOWER TRIANGULAR, -C 01 SOLVE T*X=B, T UPPER TRIANGULAR, -C 10 SOLVE TRANS(T)*X=B, T LOWER TRIANGULAR, -C 11 SOLVE TRANS(T)*X=B, T UPPER TRIANGULAR. -C ON RETURN -C B B CONTAINS THE SOLUTION, IF INFO .EQ. 0. -C OTHERWISE B IS UNALTERED. -C INFO INTEGER -C INFO CONTAINS ZERO IF THE SYSTEM IS NONSINGULAR. -C OTHERWISE INFO CONTAINS THE INDEX OF -C THE FIRST ZERO DIAGONAL ELEMENT OF T. -C LINPACK. THIS VERSION DATED 08/14/78 . -C G. W. STEWART, UNIVERSITY OF MARYLAND, ARGONNE NATIONAL LAB. -C***REFERENCES DONGARRA J.J., BUNCH J.R., MOLER C.B., STEWART G.W., -C *LINPACK USERS GUIDE*, SIAM, 1979. -C***ROUTINES CALLED DAXPY,DDOT -C***END PROLOGUE DTRSL - -C...SCALAR ARGUMENTS - INTEGER - + INFO,JOB,LDT,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + B(*),T(LDT,*) - -C...LOCAL SCALARS - DOUBLE PRECISION - + TEMP - INTEGER - + CASE,J,JJ - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DDOT - EXTERNAL - + DDOT - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DAXPY - -C...INTRINSIC FUNCTIONS - INTRINSIC - + MOD - - -C***FIRST EXECUTABLE STATEMENT DTRSL - - -C BEGIN BLOCK PERMITTING ...EXITS TO 150 - -C CHECK FOR ZERO DIAGONAL ELEMENTS. - - DO 10 INFO = 1, N -C ......EXIT - IF (T(INFO,INFO) .EQ. 0.0D0) GO TO 150 - 10 CONTINUE - INFO = 0 - -C DETERMINE THE TASK AND GO TO IT. - - CASE = 1 - IF (MOD(JOB,10) .NE. 0) CASE = 2 - IF (MOD(JOB,100)/10 .NE. 0) CASE = CASE + 2 - GO TO (20,50,80,110), CASE - -C SOLVE T*X=B FOR T LOWER TRIANGULAR - - 20 CONTINUE - B(1) = B(1)/T(1,1) - IF (N .LT. 2) GO TO 40 - DO 30 J = 2, N - TEMP = -B(J-1) - CALL DAXPY(N-J+1,TEMP,T(J,J-1),1,B(J),1) - B(J) = B(J)/T(J,J) - 30 CONTINUE - 40 CONTINUE - GO TO 140 - -C SOLVE T*X=B FOR T UPPER TRIANGULAR. - - 50 CONTINUE - B(N) = B(N)/T(N,N) - IF (N .LT. 2) GO TO 70 - DO 60 JJ = 2, N - J = N - JJ + 1 - TEMP = -B(J+1) - CALL DAXPY(J,TEMP,T(1,J+1),1,B(1),1) - B(J) = B(J)/T(J,J) - 60 CONTINUE - 70 CONTINUE - GO TO 140 - -C SOLVE TRANS(T)*X=B FOR T LOWER TRIANGULAR. - - 80 CONTINUE - B(N) = B(N)/T(N,N) - IF (N .LT. 2) GO TO 100 - DO 90 JJ = 2, N - J = N - JJ + 1 - B(J) = B(J) - DDOT(JJ-1,T(J+1,J),1,B(J+1),1) - B(J) = B(J)/T(J,J) - 90 CONTINUE - 100 CONTINUE - GO TO 140 - -C SOLVE TRANS(T)*X=B FOR T UPPER TRIANGULAR. - - 110 CONTINUE - B(1) = B(1)/T(1,1) - IF (N .LT. 2) GO TO 130 - DO 120 J = 2, N - B(J) = B(J) - DDOT(J-1,T(1,J),1,B(1),1) - B(J) = B(J)/T(J,J) - 120 CONTINUE - 130 CONTINUE - 140 CONTINUE - 150 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/odr/odrpack/d_mprec.f b/scipy-0.10.1/scipy/odr/odrpack/d_mprec.f deleted file mode 100644 index 648f1d3593..0000000000 --- a/scipy-0.10.1/scipy/odr/odrpack/d_mprec.f +++ /dev/null @@ -1,203 +0,0 @@ -*DMPREC - DOUBLE PRECISION FUNCTION DMPREC() -C***BEGIN PROLOGUE DPREC -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE DETERMINE MACHINE PRECISION FOR TARGET MACHINE AND COMPILER -C ASSUMING FLOATING-POINT NUMBERS ARE REPRESENTED IN THE -C T-DIGIT, BASE-B FORM -C SIGN (B**E)*( (X(1)/B) + ... + (X(T)/B**T) ) -C WHERE 0 .LE. X(I) .LT. B FOR I=1,...,T, AND -C 0 .LT. X(1). -C TO ALTER THIS FUNCTION FOR A PARTICULAR TARGET MACHINE, -C EITHER -C ACTIVATE THE DESIRED SET OF DATA STATEMENTS BY -C REMOVING THE C FROM COLUMN 1 -C OR -C SET B, TD AND TS USING I1MACH BY ACTIVATING -C THE DECLARATION STATEMENTS FOR I1MACH -C AND THE STATEMENTS PRECEEDING THE FIRST -C EXECUTABLE STATEMENT BELOW. -C***END PROLOGUE DPREC - -C...LOCAL SCALARS - DOUBLE PRECISION - + B - INTEGER - + TD,TS - -C...EXTERNAL FUNCTIONS -C INTEGER -C + I1MACH -C EXTERNAL -C + I1MACH - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) - -C DOUBLE PRECISION B -C THE BASE OF THE TARGET MACHINE. -C (MAY BE DEFINED USING I1MACH(10).) -C INTEGER TD -C THE NUMBER OF BASE-B DIGITS IN DOUBLE PRECISION. -C (MAY BE DEFINED USING I1MACH(14).) -C INTEGER TS -C THE NUMBER OF BASE-B DIGITS IN SINGLE PRECISION. -C (MAY BE DEFINED USING I1MACH(11).) - - -C MACHINE CONSTANTS FOR COMPUTERS FOLLOWING IEEE ARITHMETIC STANDARD -C (E.G., MOTOROLA 68000 BASED MACHINES SUCH AS SUN AND SPARC -C WORKSTATIONS, AND AT&T PC 7300; AND 8087 BASED MICROS SUCH AS THE -C IBM PC AND THE AT&T 6300). - DATA B / 2 / - DATA TS / 24 / - DATA TD / 53 / - -C MACHINE CONSTANTS FOR THE BURROUGHS 1700 SYSTEM. -C DATA B / 2 / -C DATA TS / 24 / -C DATA TD / 60 / - -C MACHINE CONSTANTS FOR THE BURROUGHS 5700 SYSTEM -C THE BURROUGHS 6700/7700 SYSTEMS -C DATA B / 8 / -C DATA TS / 13 / -C DATA TD / 26 / - -C MACHINE CONSTANTS FOR THE CDC 6000/7000 (FTN5 COMPILER) -C THE CYBER 170/180 SERIES UNDER NOS -C DATA B / 2 / -C DATA TS / 48 / -C DATA TD / 96 / - -C MACHINE CONSTANTS FOR THE CDC 6000/7000 (FTN COMPILER) -C THE CYBER 170/180 SERIES UNDER NOS/VE -C THE CYBER 200 SERIES -C DATA B / 2 / -C DATA TS / 47 / -C DATA TD / 94 / - -C MACHINE CONSTANTS FOR THE CRAY -C DATA B / 2 / -C DATA TS / 47 / -C DATA TD / 94 / - -C MACHINE CONSTANTS FOR THE DATA GENERAL ECLIPSE S/200 -C DATA B / 16 / -C DATA TS / 6 / -C DATA TD / 14 / - -C MACHINE CONSTANTS FOR THE HARRIS COMPUTER -C DATA B / 2 / -C DATA TS / 23 / -C DATA TD / 38 / - -C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 -C THE HONEYWELL 600/6000 SERIES -C DATA B / 2 / -C DATA TS / 27 / -C DATA TD / 63 / - -C MACHINE CONSTANTS FOR THE HP 2100 -C (3 WORD DOUBLE PRECISION OPTION WITH FTN4) -C DATA B / 2 / -C DATA TS / 23 / -C DATA TD / 39 / - -C MACHINE CONSTANTS FOR THE HP 2100 -C (4 WORD DOUBLE PRECISION OPTION WITH FTN4) -C DATA B / 2 / -C DATA TS / 23 / -C DATA TD / 55 / - -C MACHINE CONSTANTS FOR THE IBM 360/370 SERIES -C DATA B / 16 / -C DATA TS / 6 / -C DATA TD / 14 / - -C MACHINE CONSTANTS FOR THE IBM PC -C DATA B / 2 / -C DATA TS / 24 / -C DATA TD / 53 / - -C MACHINE CONSTANTS FOR THE INTERDATA (PERKIN ELMER) 7/32 -C INTERDATA (PERKIN ELMER) 8/32 -C DATA B / 16 / -C DATA TS / 6 / -C DATA TD / 14 / - -C MACHINE CONSTANTS FOR THE PDP-10 (KA PROCESSOR). -C DATA B / 2 / -C DATA TS / 27 / -C DATA TD / 54 / - -C MACHINE CONSTANTS FOR THE PDP-10 (KI PROCESSOR). -C DATA B / 2 / -C DATA TS / 27 / -C DATA TD / 62 / - -C MACHINE CONSTANTS FOR THE PDP-11 SYSTEM -C DATA B / 2 / -C DATA TS / 24 / -C DATA TD / 56 / - -C MACHINE CONSTANTS FOR THE PERKIN-ELMER 3230 -C DATA B / 16 / -C DATA TS / 6 / -C DATA TD / 14 / - -C MACHINE CONSTANTS FOR THE PRIME 850 AND PRIME 4050 -C DATA B / 2 / -C DATA TS / 23 / -C DATA TD / 47 / - -C MACHINE CONSTANTS FOR THE SEL SYSTEMS 85/86 -C DATA B / 16 / -C DATA TS / 6 / -C DATA TD / 14 / - -C MACHINE CONSTANTS FOR SUN AND SPARC WORKSTATIONS -C DATA B / 2 / -C DATA TS / 24 / -C DATA TD / 53 / - -C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES -C DATA B / 2 / -C DATA TS / 27 / -C DATA TD / 60 / - -C MACHINE CONSTANTS FOR THE VAX-11 WITH FORTRAN IV-PLUS COMPILER -C DATA B / 2 / -C DATA TS / 24 / -C DATA TD / 56 / - -C MACHINE CONSTANTS FOR THE VAX/VMS SYSTEM WITHOUT G_FLOATING -C DATA B / 2 / -C DATA TS / 24 / -C DATA TD / 56 / - -C MACHINE CONSTANTS FOR THE VAX/VMS SYSTEM WITH G_FLOATING -C DATA B / 2 / -C DATA TS / 24 / -C DATA TD / 53 / - -C MACHINE CONSTANTS FOR THE XEROX SIGMA 5/7/9 -C DATA B / 16 / -C DATA TS / 6 / -C DATA TD / 14 / - - -C***FIRST EXECUTABLE STATEMENT DMPREC - - -C B = I1MACH(10) -C TS = I1MACH(11) -C TD = I1MACH(14) - - DMPREC = B ** (1-TD) - - RETURN - - END diff --git a/scipy-0.10.1/scipy/odr/odrpack/d_odr.f b/scipy-0.10.1/scipy/odr/odrpack/d_odr.f deleted file mode 100644 index df0db44c4e..0000000000 --- a/scipy-0.10.1/scipy/odr/odrpack/d_odr.f +++ /dev/null @@ -1,10985 +0,0 @@ -*DODR - SUBROUTINE DODR - + (FCN, - + N,M,NP,NQ, - + BETA, - + Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, - + JOB, - + IPRINT,LUNERR,LUNRPT, - + WORK,LWORK,IWORK,LIWORK, - + INFO) -C***BEGIN PROLOGUE DODR -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***CATEGORY NO. G2E,I1B1 -C***KEYWORDS ORTHOGONAL DISTANCE REGRESSION, -C NONLINEAR LEAST SQUARES, -C MEASUREMENT ERROR MODELS, -C ERRORS IN VARIABLES -C***AUTHOR BOGGS, PAUL T. -C APPLIED AND COMPUTATIONAL MATHEMATICS DIVISION -C NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY -C GAITHERSBURG, MD 20899 -C BYRD, RICHARD H. -C DEPARTMENT OF COMPUTER SCIENCE -C UNIVERSITY OF COLORADO, BOULDER, CO 80309 -C ROGERS, JANET E. -C APPLIED AND COMPUTATIONAL MATHEMATICS DIVISION -C NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY -C BOULDER, CO 80303-3328 -C SCHNABEL, ROBERT B. -C DEPARTMENT OF COMPUTER SCIENCE -C UNIVERSITY OF COLORADO, BOULDER, CO 80309 -C AND -C APPLIED AND COMPUTATIONAL MATHEMATICS DIVISION -C NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY -C BOULDER, CO 80303-3328 -C***PURPOSE DOUBLE PRECISION DRIVER ROUTINE FOR FINDING -C THE WEIGHTED EXPLICIT OR IMPLICIT ORTHOGONAL DISTANCE -C REGRESSION (ODR) OR ORDINARY LINEAR OR NONLINEAR LEAST -C SQUARES (OLS) SOLUTION (SHORT CALL STATEMENT) -C***DESCRIPTION -C FOR DETAILS, SEE ODRPACK USER'S REFERENCE GUIDE. -C***REFERENCES BOGGS, P. T., R. H. BYRD, J. R. DONALDSON, AND -C R. B. SCHNABEL (1989), -C "ALGORITHM 676 --- ODRPACK: SOFTWARE FOR WEIGHTED -C ORTHOGONAL DISTANCE REGRESSION," -C ACM TRANS. MATH. SOFTWARE., 15(4):348-364. -C BOGGS, P. T., R. H. BYRD, J. E. ROGERS, AND -C R. B. SCHNABEL (1992), -C "USER'S REFERENCE GUIDE FOR ODRPACK VERSION 2.01, -C SOFTWARE FOR WEIGHTED ORTHOGONAL DISTANCE REGRESSION," -C NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY -C INTERNAL REPORT NUMBER 92-4834. -C BOGGS, P. T., R. H. BYRD, AND R. B. SCHNABEL (1987), -C "A STABLE AND EFFICIENT ALGORITHM FOR NONLINEAR -C ORTHOGONAL DISTANCE REGRESSION," -C SIAM J. SCI. STAT. COMPUT., 8(6):1052-1078. -C***ROUTINES CALLED DODCNT -C***END PROLOGUE DODR - -C...SCALAR ARGUMENTS - INTEGER - + INFO,JOB,LDWD,LDWE,LDX,LDY,LD2WD,LD2WE,LIWORK,LWORK, - + M,N,NDIGIT,NP,NQ - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),WD(LDWD,LD2WD,M),WE(LDWE,LD2WE,NQ),WORK(LWORK), - + X(LDX,M),Y(LDY,NQ) - INTEGER - + IWORK(LIWORK) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + NEGONE,PARTOL,SSTOL,TAUFAC,ZERO - INTEGER - + IPRINT,LDIFX,LDSCLD,LDSTPD,LUNERR,LUNRPT,MAXIT - LOGICAL - + SHORT - -C...LOCAL ARRAYS - DOUBLE PRECISION - + SCLB(1),SCLD(1,1),STPB(1),STPD(1,1),WD1(1,1,1) - INTEGER - + IFIXB(1),IFIXX(1,1) - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DODCNT - -C...DATA STATEMENTS - DATA - + NEGONE,ZERO - + /-1.0D0,0.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER-SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C IPRINT: THE PRINT CONTROL VARIABLE. -C IWORK: THE INTEGER WORK SPACE. -C JOB: THE VARIABLE CONTROLLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSCLD: THE LEADING DIMENSION OF ARRAY SCLD. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LIWORK: THE LENGTH OF VECTOR IWORK. -C LUNERR: THE LOGICAL UNIT NUMBER FOR ERROR MESSAGES. -C LUNRPT: THE LOGICAL UNIT NUMBER FOR COMPUTATION REPORTS. -C LWORK: THE LENGTH OF VECTOR WORK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C N: THE NUMBER OF OBSERVATIONS. -C NEGONE: THE VALUE -1.0D0. -C NDIGIT: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS, AS -C SUPPLIED BY THE USER. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C PARTOL: THE PARAMETER CONVERGENCE STOPPING TOLERANCE. -C SCLB: THE SCALING VALUES FOR BETA. -C SCLD: THE SCALING VALUES FOR DELTA. -C STPB: THE RELATIVE STEP FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO BETA. -C STPD: THE RELATIVE STEP FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C SHORT: THE VARIABLE DESIGNATING WHETHER THE USER HAS INVOKED -C ODRPACK BY THE SHORT-CALL (SHORT=.TRUE.) OR THE LONG-CALL -C (SHORT=.FALSE.). -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING TOLERANCE. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C WD: THE DELTA WEIGHTS. -C WD1: A DUMMY ARRAY USED WHEN WD(1,1,1)=0.0D0. -C WE: THE EPSILON WEIGHTS. -C WORK: THE DOUBLE PRECISION WORK SPACE. -C X: THE EXPLANATORY VARIABLE. -C Y: THE DEPENDENT VARIABLE. UNUSED WHEN THE MODEL IS IMPLICIT. - - -C***FIRST EXECUTABLE STATEMENT DODR - - -C INITIALIZE NECESSARY VARIABLES TO INDICATE USE OF DEFAULT VALUES - - IFIXB(1) = -1 - IFIXX(1,1) = -1 - LDIFX = 1 - NDIGIT = -1 - TAUFAC = NEGONE - SSTOL = NEGONE - PARTOL = NEGONE - MAXIT = -1 - STPB(1) = NEGONE - STPD(1,1) = NEGONE - LDSTPD = 1 - SCLB(1) = NEGONE - SCLD(1,1) = NEGONE - LDSCLD = 1 - - SHORT = .TRUE. - - IF (WD(1,1,1).NE.ZERO) THEN - CALL DODCNT - + (SHORT, FCN, N,M,NP,NQ, BETA, Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, IFIXB,IFIXX,LDIFX, - + JOB,NDIGIT,TAUFAC, SSTOL,PARTOL,MAXIT, - + IPRINT,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + INFO) - ELSE - WD1(1,1,1) = NEGONE - CALL DODCNT - + (SHORT, FCN, N,M,NP,NQ, BETA, Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD1,1,1, IFIXB,IFIXX,LDIFX, - + JOB,NDIGIT,TAUFAC, SSTOL,PARTOL,MAXIT, - + IPRINT,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + INFO) - END IF - - RETURN - - END -*DODRC - SUBROUTINE DODRC - + (FCN, - + N,M,NP,NQ, - + BETA, - + Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, - + IFIXB,IFIXX,LDIFX, - + JOB,NDIGIT,TAUFAC, - + SSTOL,PARTOL,MAXIT, - + IPRINT,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, - + SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + INFO) -C***BEGIN PROLOGUE DODRC -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***CATEGORY NO. G2E,I1B1 -C***KEYWORDS ORTHOGONAL DISTANCE REGRESSION, -C NONLINEAR LEAST SQUARES, -C MEASUREMENT ERROR MODELS, -C ERRORS IN VARIABLES -C***AUTHOR BOGGS, PAUL T. -C APPLIED AND COMPUTATIONAL MATHEMATICS DIVISION -C NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY -C GAITHERSBURG, MD 20899 -C BYRD, RICHARD H. -C DEPARTMENT OF COMPUTER SCIENCE -C UNIVERSITY OF COLORADO, BOULDER, CO 80309 -C ROGERS, JANET E. -C APPLIED AND COMPUTATIONAL MATHEMATICS DIVISION -C NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY -C BOULDER, CO 80303-3328 -C SCHNABEL, ROBERT B. -C DEPARTMENT OF COMPUTER SCIENCE -C UNIVERSITY OF COLORADO, BOULDER, CO 80309 -C AND -C APPLIED AND COMPUTATIONAL MATHEMATICS DIVISION -C NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY -C BOULDER, CO 80303-3328 -C***PURPOSE DOUBLE PRECISION DRIVER ROUTINE FOR FINDING -C THE WEIGHTED EXPLICIT OR IMPLICIT ORTHOGONAL DISTANCE -C REGRESSION (ODR) OR ORDINARY LINEAR OR NONLINEAR LEAST -C SQUARES (OLS) SOLUTION (LONG CALL STATEMENT) -C***DESCRIPTION -C FOR DETAILS, SEE ODRPACK USER'S REFERENCE GUIDE. -C***REFERENCES BOGGS, P. T., R. H. BYRD, J. R. DONALDSON, AND -C R. B. SCHNABEL (1989), -C "ALGORITHM 676 --- ODRPACK: SOFTWARE FOR WEIGHTED -C ORTHOGONAL DISTANCE REGRESSION," -C ACM TRANS. MATH. SOFTWARE., 15(4):348-364. -C BOGGS, P. T., R. H. BYRD, J. E. ROGERS, AND -C R. B. SCHNABEL (1992), -C "USER'S REFERENCE GUIDE FOR ODRPACK VERSION 2.01, -C SOFTWARE FOR WEIGHTED ORTHOGONAL DISTANCE REGRESSION," -C NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY -C INTERNAL REPORT NUMBER 92-4834. -C BOGGS, P. T., R. H. BYRD, AND R. B. SCHNABEL (1987), -C "A STABLE AND EFFICIENT ALGORITHM FOR NONLINEAR -C ORTHOGONAL DISTANCE REGRESSION," -C SIAM J. SCI. STAT. COMPUT., 8(6):1052-1078. -C***ROUTINES CALLED DODCNT -C***END PROLOGUE DODRC - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + PARTOL,SSTOL,TAUFAC - INTEGER - + INFO,IPRINT,JOB,LDIFX,LDSCLD,LDSTPD,LDWD,LDWE,LDX,LDY, - + LD2WD,LD2WE,LIWORK,LUNERR,LUNRPT,LWORK,M,MAXIT,N,NDIGIT,NP,NQ - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),SCLB(NP),SCLD(LDSCLD,M),STPB(NP),STPD(LDSTPD,M), - + WD(LDWD,LD2WD,M),WE(LDWE,LD2WE,NQ),WORK(LWORK), - + X(LDX,M),Y(LDY,NQ) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),IWORK(LIWORK) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + NEGONE,ZERO - LOGICAL - + SHORT - -C...LOCAL ARRAYS - DOUBLE PRECISION - + WD1(1,1,1) - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DODCNT - -C...DATA STATEMENTS - DATA - + NEGONE,ZERO - + /-1.0D0,0.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER-SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C IPRINT: THE PRINT CONTROL VARIABLE. -C IWORK: THE INTEGER WORK SPACE. -C JOB: THE VARIABLE CONTROLLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSCLD: THE LEADING DIMENSION OF ARRAY SCLD. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LIWORK: THE LENGTH OF VECTOR IWORK. -C LUNERR: THE LOGICAL UNIT NUMBER FOR ERROR MESSAGES. -C LUNRPT: THE LOGICAL UNIT NUMBER FOR COMPUTATION REPORTS. -C LWORK: THE LENGTH OF VECTOR WORK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C N: THE NUMBER OF OBSERVATIONS. -C NDIGIT: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS, AS -C SUPPLIED BY THE USER. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C PARTOL: THE PARAMETER CONVERGENCE STOPPING TOLERANCE. -C SCLB: THE SCALING VALUES FOR BETA. -C SCLD: THE SCALING VALUES FOR DELTA. -C STPB: THE RELATIVE STEP FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO BETA. -C STPD: THE RELATIVE STEP FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C SHORT: THE VARIABLE DESIGNATING WHETHER THE USER HAS INVOKED -C ODRPACK BY THE SHORT-CALL (SHORT=.TRUE.) OR THE LONG-CALL -C (SHORT=.FALSE.). -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING TOLERANCE. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C WD: THE DELTA WEIGHTS. -C WD1: A DUMMY ARRAY USED WHEN WD(1,1,1)=0.0D0. -C WE: THE EPSILON WEIGHTS. -C WORK: THE DOUBLE PRECISION WORK SPACE. -C X: THE EXPLANATORY VARIABLE. -C Y: THE DEPENDENT VARIABLE. UNUSED WHEN THE MODEL IS IMPLICIT. - - -C***FIRST EXECUTABLE STATEMENT DODRC - - - SHORT = .FALSE. - - IF (WD(1,1,1).NE.ZERO) THEN - CALL DODCNT - + (SHORT, FCN, N,M,NP,NQ, BETA, Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, IFIXB,IFIXX,LDIFX, - + JOB,NDIGIT,TAUFAC, SSTOL,PARTOL,MAXIT, - + IPRINT,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + INFO) - ELSE - WD1(1,1,1) = NEGONE - CALL DODCNT - + (SHORT, FCN, N,M,NP,NQ, BETA, Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD1,1,1, IFIXB,IFIXX,LDIFX, - + JOB,NDIGIT,TAUFAC, SSTOL,PARTOL,MAXIT, - + IPRINT,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + INFO) - END IF - - RETURN - - END -*DACCES - SUBROUTINE DACCES - + (N,M,NP,NQ,LDWE,LD2WE, - + WORK,LWORK,IWORK,LIWORK, - + ACCESS,ISODR, - + JPVT,OMEGA,U,QRAUX,SD,VCV,WRK1,WRK2,WRK3,WRK4,WRK5,WRK6, - + NNZW,NPP, - + JOB,PARTOL,SSTOL,MAXIT,TAUFAC,ETA,NETA, - + LUNRPT,IPR1,IPR2,IPR2F,IPR3, - + WSS,RVAR,IDF, - + TAU,ALPHA,NITER,NFEV,NJEV,INT2,OLMAVG, - + RCOND,IRANK,ACTRS,PNORM,PRERS,RNORMS,ISTOP) -C***BEGIN PROLOGUE DACCES -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DIWINF,DWINF -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE ACCESS OR STORE VALUES IN THE WORK ARRAYS -C***END PROLOGUE DACESS - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + ACTRS,ALPHA,ETA,OLMAVG,PARTOL,PNORM,PRERS,RCOND, - + RNORMS,RVAR,SSTOL,TAU,TAUFAC - INTEGER - + IDF,INT2,IPR1,IPR2,IPR2F,IPR3,IRANK,ISTOP,ISTOPI,JOB,JPVT, - + LDWE,LD2WE,LIWORK,LUNRPT,LWORK,M,MAXIT,N,NETA,NFEV,NITER,NJEV, - + NNZW,NP,NPP,NQ,OMEGA,QRAUX,SD,U,VCV, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK6 - LOGICAL - + ACCESS,ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + WORK(LWORK),WSS(3) - INTEGER - + IWORK(LIWORK) - -C...LOCAL SCALARS - INTEGER - + ACTRSI,ALPHAI,BETACI,BETANI,BETASI,BETA0I, - + DELTAI,DELTNI,DELTSI,DIFFI,EPSI, - + EPSMAI,ETAI,FJACBI,FJACDI,FNI,FSI,IDFI,INT2I,IPRINI,IPRINT, - + IRANKI,JOBI,JPVTI,LDTTI,LIWKMN,LUNERI,LUNRPI,LWKMN,MAXITI, - + MSGB,MSGD,NETAI,NFEVI,NITERI,NJEVI,NNZWI,NPPI,NROWI, - + NTOLI,OLMAVI,OMEGAI,PARTLI,PNORMI,PRERSI,QRAUXI,RCONDI, - + RNORSI,RVARI,SDI,SI,SSFI,SSI,SSTOLI,TAUFCI,TAUI,TI,TTI,UI, - + VCVI,WE1I,WRK1I,WRK2I,WRK3I,WRK4I,WRK5I,WRK6I,WRK7I, - + WSSI,WSSDEI,WSSEPI,XPLUSI -C...EXTERNAL SUBROUTINES - EXTERNAL - + DIWINF,DWINF - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ACCESS: THE VARIABLE DESIGNATING WHETHER INFORMATION IS TO BE -C ACCESSED FROM THE WORK ARRAYS (ACCESS=TRUE) OR STORED IN -C THEM (ACCESS=FALSE). -C ACTRS: THE SAVED ACTUAL RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C ACTRSI: THE LOCATION IN ARRAY WORK OF VARIABLE ACTRS. -C ALPHA: THE LEVENBERG-MARQUARDT PARAMETER. -C ALPHAI: THE LOCATION IN ARRAY WORK OF VARIABLE ALPHA. -C BETACI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAC. -C BETANI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAN. -C BETASI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAS. -C BETA0I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETA0. -C DELTAI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTA. -C DELTNI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTAN. -C DELTSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTAS. -C DIFFI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DIFF. -C EPSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY EPS. -C EPSMAI: THE LOCATION IN ARRAY WORK OF VARIABLE EPSMAC. -C ETA: THE RELATIVE NOISE IN THE FUNCTION RESULTS. -C ETAI: THE LOCATION IN ARRAY WORK OF VARIABLE ETA. -C FJACBI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FJACB. -C FJACDI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FJACD. -C FNI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FN. -C FSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FS. -C IDF: THE DEGREES OF FREEDOM OF THE FIT, EQUAL TO THE NUMBER OF -C OBSERVATIONS WITH NONZERO WEIGHTED DERIVATIVES MINUS THE -C NUMBER OF PARAMETERS BEING ESTIMATED. -C IDFI: THE STARTING LOCATION IN ARRAY IWORK OF VARIABLE IDF. -C INT2: THE NUMBER OF INTERNAL DOUBLING STEPS. -C INT2I: THE LOCATION IN ARRAY IWORK OF VARIABLE INT2. -C IPR1: THE VALUE OF THE FOURTH DIGIT (FROM THE RIGHT) OF IPRINT, -C WHICH CONTROLS THE INITIAL SUMMARY REPORT. -C IPR2: THE VALUE OF THE THIRD DIGIT (FROM THE RIGHT) OF IPRINT, -C WHICH CONTROLS THE ITERATION REPORTS. -C IPR2F: THE VALUE OF THE SECOND DIGIT (FROM THE RIGHT) OF IPRINT, -C WHICH CONTROLS THE FREQUENCY OF THE ITERATION REPORTS. -C IPR3: THE VALUE OF THE FIRST DIGIT (FROM THE RIGHT) OF IPRINT, -C WHICH CONTROLS THE FINAL SUMMARY REPORT. -C IPRINI: THE LOCATION IN ARRAY IWORK OF VARIABLE IPRINT. -C IPRINT: THE PRINT CONTROL VARIABLE. -C IRANK: THE RANK DEFICIENCY OF THE JACOBIAN WRT BETA. -C IRANKI: THE LOCATION IN ARRAY IWORK OF VARIABLE IRANK. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS TO BE -C FOUND BY ODR (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C ISTOPI: THE LOCATION IN ARRAY IWORK OF VARIABLE ISTOP. -C IWORK: THE INTEGER WORK SPACE. -C JOB: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C JOBI: THE LOCATION IN ARRAY IWORK OF VARIABLE JOB. -C JPVT: THE PIVOT VECTOR. -C JPVTI: THE STARTING LOCATION IN ARRAY IWORK OF VARIABLE JPVT. -C LDTTI: THE STARTING LOCATION IN ARRAY IWORK OF VARIABLE LDTT. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LIWORK: THE LENGTH OF VECTOR IWORK. -C LUNERI: THE LOCATION IN ARRAY IWORK OF VARIABLE LUNERR. -C LUNERR: THE LOGICAL UNIT NUMBER USED FOR ERROR MESSAGES. -C LUNRPI: THE LOCATION IN ARRAY IWORK OF VARIABLE LUNRPT. -C LUNRPT: THE LOGICAL UNIT NUMBER USED FOR COMPUTATION REPORTS. -C LWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY WORK. -C LWORK: THE LENGTH OF VECTOR WORK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C MAXITI: THE LOCATION IN ARRAY IWORK OF VARIABLE MAXIT. -C MSGB: THE STARTING LOCATION IN ARRAY IWORK OF ARRAY MSGB. -C MSGD: THE STARTING LOCATION IN ARRAY IWORK OF ARRAY MSGD. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS. -C NETAI: THE LOCATION IN ARRAY IWORK OF VARIABLE NETA. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NFEVI: THE LOCATION IN ARRAY IWORK OF VARIABLE NFEV. -C NITER: THE NUMBER OF ITERATIONS TAKEN. -C NITERI: THE LOCATION IN ARRAY IWORK OF VARIABLE NITER. -C NJEV: THE NUMBER OF JACOBIAN EVALUATIONS. -C NJEVI: THE LOCATION IN ARRAY IWORK OF VARIABLE NJEV. -C NNZW: THE NUMBER OF NONZERO WEIGHTED OBSERVATIONS. -C NNZWI: THE LOCATION IN ARRAY IWORK OF VARIABLE NNZW. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPP: THE NUMBER OF FUNCTION PARAMETERS ACTUALLY ESTIMATED. -C NPPI: THE LOCATION IN ARRAY IWORK OF VARIABLE NPP. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROWI: THE LOCATION IN ARRAY IWORK OF VARIABLE NROW. -C NTOLI: THE LOCATION IN ARRAY IWORK OF VARIABLE NTOL. -C OLMAVG: THE AVERAGE NUMBER OF LEVENBERG-MARQUARDT STEPS PER -C ITERATION. -C OLMAVI: THE LOCATION IN ARRAY WORK OF VARIABLE OLMAVG. -C OMEGA: THE STARTING LOCATION IN ARRAY WORK OF ARRAY OMEGA. -C OMEGAI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY OMEGA. -C PARTLI: THE LOCATION IN ARRAY WORK OF VARIABLE PARTOL. -C PARTOL: THE PARAMETER CONVERGENCE STOPPING TOLERANCE. -C PNORM: THE NORM OF THE SCALED ESTIMATED PARAMETERS. -C PNORMI: THE LOCATION IN ARRAY WORK OF VARIABLE PNORM. -C PRERS: THE SAVED PREDICTED RELATIVE REDUCTION IN THE -C SUM-OF-SQUARES. -C PRERSI: THE LOCATION IN ARRAY WORK OF VARIABLE PRERS. -C QRAUX: THE STARTING LOCATION IN ARRAY WORK OF ARRAY QRAUX. -C QRAUXI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY QRAUX. -C RCOND: THE APPROXIMATE RECIPROCAL CONDITION OF FJACB. -C RCONDI: THE LOCATION IN ARRAY WORK OF VARIABLE RCOND. -C RESTRT: THE VARIABLE DESIGNATING WHETHER THE CALL IS A RESTART -C (RESTRT=TRUE) OR NOT (RESTRT=FALSE). -C RNORMS: THE NORM OF THE SAVED WEIGHTED EPSILONS AND DELTAS. -C RNORSI: THE LOCATION IN ARRAY WORK OF VARIABLE RNORMS. -C RVAR: THE RESIDUAL VARIANCE, I.E. STANDARD DEVIATION SQUARED. -C RVARI: THE LOCATION IN ARRAY WORK OF VARIABLE RVAR. -C SCLB: THE SCALING VALUES USED FOR BETA. -C SCLD: THE SCALING VALUES USED FOR DELTA. -C SD: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SD. -C SDI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SD. -C SHORT: THE VARIABLE DESIGNATING WHETHER THE USER HAS INVOKED -C ODRPACK BY THE SHORT-CALL (SHORT=TRUE) OR THE LONG- -C CALL (SHORT=FALSE). -C SI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY S. -C SSFI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SSF. -C SSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SS. -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING TOLERANCE. -C SSTOLI: THE LOCATION IN ARRAY WORK OF VARIABLE SSTOL. -C TAU: THE TRUST REGION DIAMETER. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C TAUFCI: THE LOCATION IN ARRAY WORK OF VARIABLE TAUFAC. -C TAUI: THE LOCATION IN ARRAY WORK OF VARIABLE TAU. -C TI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY T. -C TTI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY TT. -C U: THE STARTING LOCATION IN ARRAY WORK OF ARRAY U. -C UI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY U. -C VCV: THE STARTING LOCATION IN ARRAY WORK OF ARRAY VCV. -C VCVI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY VCV. -C WE1I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WE1. -C WORK: THE DOUBLE PRECISION WORK SPACE. -C WRK1: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK1. -C WRK1I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK1. -C WRK2: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK2. -C WRK2I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK2. -C WRK3: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK3. -C WRK3I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK3. -C WRK4: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK4. -C WRK4I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK4. -C WRK5: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK5. -C WRK5I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK5. -C WRK6: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK6. -C WRK6I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK6. -C WRK7I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK7. -C WSS: THE SUM OF THE SQUARES OF THE WEIGHTED EPSILONS AND DELTAS, -C THE SUM OF THE SQUARES OF THE WEIGHTED DELTAS, AND -C THE SUM OF THE SQUARES OF THE WEIGHTED EPSILONS. -C WSSI: THE STARTING LOCATION IN ARRAY WORK OF VARIABLE WSS(1). -C WSSDEI: THE STARTING LOCATION IN ARRAY WORK OF VARIABLE WSS(2). -C WSSEPI: THE STARTING LOCATION IN ARRAY WORK OF VARIABLE WSS(3). -C XPLUSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY XPLUSD. - - -C***FIRST EXECUTABLE STATEMENT DACCES - - -C FIND STARTING LOCATIONS WITHIN INTEGER WORKSPACE - - CALL DIWINF(M,NP,NQ, - + MSGB,MSGD,JPVTI,ISTOPI, - + NNZWI,NPPI,IDFI, - + JOBI,IPRINI,LUNERI,LUNRPI, - + NROWI,NTOLI,NETAI, - + MAXITI,NITERI,NFEVI,NJEVI,INT2I,IRANKI,LDTTI, - + LIWKMN) - -C FIND STARTING LOCATIONS WITHIN DOUBLE PRECISION WORK SPACE - - CALL DWINF(N,M,NP,NQ,LDWE,LD2WE,ISODR, - + DELTAI,EPSI,XPLUSI,FNI,SDI,VCVI, - + RVARI,WSSI,WSSDEI,WSSEPI,RCONDI,ETAI, - + OLMAVI,TAUI,ALPHAI,ACTRSI,PNORMI,RNORSI,PRERSI, - + PARTLI,SSTOLI,TAUFCI,EPSMAI, - + BETA0I,BETACI,BETASI,BETANI,SI,SSI,SSFI,QRAUXI,UI, - + FSI,FJACBI,WE1I,DIFFI, - + DELTSI,DELTNI,TI,TTI,OMEGAI,FJACDI, - + WRK1I,WRK2I,WRK3I,WRK4I,WRK5I,WRK6I,WRK7I, - + LWKMN) - - IF (ACCESS) THEN - -C SET STARTING LOCATIONS FOR WORK VECTORS - - JPVT = JPVTI - OMEGA = OMEGAI - QRAUX = QRAUXI - SD = SDI - VCV = VCVI - U = UI - WRK1 = WRK1I - WRK2 = WRK2I - WRK3 = WRK3I - WRK4 = WRK4I - WRK5 = WRK5I - WRK6 = WRK6I - -C ACCESS VALUES FROM THE WORK VECTORS - - ACTRS = WORK(ACTRSI) - ALPHA = WORK(ALPHAI) - ETA = WORK(ETAI) - OLMAVG = WORK(OLMAVI) - PARTOL = WORK(PARTLI) - PNORM = WORK(PNORMI) - PRERS = WORK(PRERSI) - RCOND = WORK(RCONDI) - WSS(1) = WORK(WSSI) - WSS(2) = WORK(WSSDEI) - WSS(3) = WORK(WSSEPI) - RVAR = WORK(RVARI) - RNORMS = WORK(RNORSI) - SSTOL = WORK(SSTOLI) - TAU = WORK(TAUI) - TAUFAC = WORK(TAUFCI) - - NETA = IWORK(NETAI) - IRANK = IWORK(IRANKI) - JOB = IWORK(JOBI) - LUNRPT = IWORK(LUNRPI) - MAXIT = IWORK(MAXITI) - NFEV = IWORK(NFEVI) - NITER = IWORK(NITERI) - NJEV = IWORK(NJEVI) - NNZW = IWORK(NNZWI) - NPP = IWORK(NPPI) - IDF = IWORK(IDFI) - INT2 = IWORK(INT2I) - -C SET UP PRINT CONTROL VARIABLES - - IPRINT = IWORK(IPRINI) - - IPR1 = MOD(IPRINT,10000)/1000 - IPR2 = MOD(IPRINT,1000)/100 - IPR2F = MOD(IPRINT,100)/10 - IPR3 = MOD(IPRINT,10) - - ELSE - -C STORE VALUES INTO THE WORK VECTORS - - WORK(ACTRSI) = ACTRS - WORK(ALPHAI) = ALPHA - WORK(OLMAVI) = OLMAVG - WORK(PARTLI) = PARTOL - WORK(PNORMI) = PNORM - WORK(PRERSI) = PRERS - WORK(RCONDI) = RCOND - WORK(WSSI) = WSS(1) - WORK(WSSDEI) = WSS(2) - WORK(WSSEPI) = WSS(3) - WORK(RVARI) = RVAR - WORK(RNORSI) = RNORMS - WORK(SSTOLI) = SSTOL - WORK(TAUI) = TAU - - IWORK(IRANKI) = IRANK - IWORK(ISTOPI) = ISTOP - IWORK(NFEVI) = NFEV - IWORK(NITERI) = NITER - IWORK(NJEVI) = NJEV - IWORK(IDFI) = IDF - IWORK(INT2I) = INT2 - END IF - - RETURN - END -*DESUBI - SUBROUTINE DESUBI - + (N,M,WD,LDWD,LD2WD,ALPHA,TT,LDTT,I,E) -C***BEGIN PROLOGUE DESUBI -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DZERO -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE COMPUTE E = WD + ALPHA*TT**2 -C***END PROLOGUE DESUBI - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + ALPHA - INTEGER - + LDTT,LDWD,LD2WD,M,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + E(M,M),TT(LDTT,M),WD(LDWD,LD2WD,M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ZERO - INTEGER - + I,J,J1,J2 - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DZERO - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ALPHA: THE LEVENBERG-MARQUARDT PARAMETER. -C E: THE VALUE OF THE ARRAY E = WD + ALPHA*TT**2 -C I: AN INDEXING VARIABLE. -C J: AN INDEXING VARIABLE. -C J1: AN INDEXING VARIABLE. -C J2: AN INDEXING VARIABLE. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NP: THE NUMBER OF RESPONSES PER OBSERVATION. -C TT: THE SCALING VALUES USED FOR DELTA. -C WD: THE SQUARED DELTA WEIGHTS, D**2. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DESUBI - - -C N.B. THE LOCATIONS OF WD AND TT ACCESSED DEPEND ON THE VALUE -C OF THE FIRST ELEMENT OF EACH ARRAY AND THE LEADING DIMENSIONS -C OF THE MULTIPLY SUBSCRIPTED ARRAYS. - - IF (N.EQ.0 .OR. M.EQ.0) RETURN - - IF (WD(1,1,1).GE.ZERO) THEN - IF (LDWD.GE.N) THEN -C THE ELEMENTS OF WD HAVE BEEN INDIVIDUALLY SPECIFIED - - IF (LD2WD.EQ.1) THEN -C THE ARRAYS STORED IN WD ARE DIAGONAL - CALL DZERO(M,M,E,M) - DO 10 J=1,M - E(J,J) = WD(I,1,J) - 10 CONTINUE - ELSE -C THE ARRAYS STORED IN WD ARE FULL POSITIVE SEMIDEFINITE MATRICES - DO 30 J1=1,M - DO 20 J2=1,M - E(J1,J2) = WD(I,J1,J2) - 20 CONTINUE - 30 CONTINUE - END IF - - IF (TT(1,1).GT.ZERO) THEN - IF (LDTT.GE.N) THEN - DO 110 J=1,M - E(J,J) = E(J,J) + ALPHA*TT(I,J)**2 - 110 CONTINUE - ELSE - DO 120 J=1,M - E(J,J) = E(J,J) + ALPHA*TT(1,J)**2 - 120 CONTINUE - END IF - ELSE - DO 130 J=1,M - E(J,J) = E(J,J) + ALPHA*TT(1,1)**2 - 130 CONTINUE - END IF - ELSE -C WD IS AN M BY M MATRIX - - IF (LD2WD.EQ.1) THEN -C THE ARRAY STORED IN WD IS DIAGONAL - CALL DZERO(M,M,E,M) - DO 140 J=1,M - E(J,J) = WD(1,1,J) - 140 CONTINUE - ELSE -C THE ARRAY STORED IN WD IS A FULL POSITIVE SEMIDEFINITE MATRICES - DO 160 J1=1,M - DO 150 J2=1,M - E(J1,J2) = WD(1,J1,J2) - 150 CONTINUE - 160 CONTINUE - END IF - - IF (TT(1,1).GT.ZERO) THEN - IF (LDTT.GE.N) THEN - DO 210 J=1,M - E(J,J) = E(J,J) + ALPHA*TT(I,J)**2 - 210 CONTINUE - ELSE - DO 220 J=1,M - E(J,J) = E(J,J) + ALPHA*TT(1,J)**2 - 220 CONTINUE - END IF - ELSE - DO 230 J=1,M - E(J,J) = E(J,J) + ALPHA*TT(1,1)**2 - 230 CONTINUE - END IF - END IF - ELSE -C WD IS A DIAGONAL MATRIX WITH ELEMENTS ABS(WD(1,1,1)) - CALL DZERO(M,M,E,M) - IF (TT(1,1).GT.ZERO) THEN - IF (LDTT.GE.N) THEN - DO 310 J=1,M - E(J,J) = ABS(WD(1,1,1)) + ALPHA*TT(I,J)**2 - 310 CONTINUE - ELSE - DO 320 J=1,M - E(J,J) = ABS(WD(1,1,1)) + ALPHA*TT(1,J)**2 - 320 CONTINUE - END IF - ELSE - DO 330 J=1,M - E(J,J) = ABS(WD(1,1,1)) + ALPHA*TT(1,1)**2 - 330 CONTINUE - END IF - END IF - - RETURN - END -*DETAF - SUBROUTINE DETAF - + (FCN, - + N,M,NP,NQ, - + XPLUSD,BETA,EPSMAC,NROW, - + PARTMP,PV0, - + IFIXB,IFIXX,LDIFX, - + ISTOP,NFEV,ETA,NETA, - + WRK1,WRK2,WRK6,WRK7) -C***BEGIN PROLOGUE DETAF -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE COMPUTE NOISE AND NUMBER OF GOOD DIGITS IN FUNCTION RESULTS -C (ADAPTED FROM STARPAC SUBROUTINE ETAFUN) -C***END PROLOGUE DETAF - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + EPSMAC,ETA - INTEGER - + ISTOP,LDIFX,M,N,NETA,NFEV,NP,NQ,NROW - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),PARTMP(NP),PV0(N,NQ), - + WRK1(N,M,NQ),WRK2(N,NQ),WRK6(N,NP,NQ),WRK7(-2:2,NQ),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + A,B,FAC,HUNDRD,ONE,P1,P2,P5,STP,TWO,ZERO - INTEGER - + J,K,L - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,INT,LOG10,MAX,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,P1,P2,P5,ONE,TWO,HUNDRD - + /0.0D0,0.1D0,0.2D0,0.5D0,1.0D0,2.0D0,1.0D2/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C A: PARAMETERS OF THE LOCAL FIT. -C B: PARAMETERS OF THE LOCAL FIT. -C BETA: THE FUNCTION PARAMETERS. -C EPSMAC: THE VALUE OF MACHINE PRECISION. -C ETA: THE NOISE IN THE MODEL RESULTS. -C FAC: A FACTOR USED IN THE COMPUTATIONS. -C HUNDRD: THE VALUE 1.0D2. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C J: AN INDEX VARIABLE. -C K: AN INDEX VARIABLE. -C L: AN INDEX VARIABLE. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF ACCURATE DIGITS IN THE MODEL RESULTS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER AT WHICH THE DERIVATIVE IS TO BE CHECKED. -C ONE: THE VALUE 1.0D0. -C P1: THE VALUE 0.1D0. -C P2: THE VALUE 0.2D0. -C P5: THE VALUE 0.5D0. -C PARTMP: THE MODEL PARAMETERS. -C PV0: THE ORIGINAL PREDICTED VALUES. -C STP: A SMALL VALUE USED TO PERTURB THE PARAMETERS. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C WRK7: A WORK ARRAY OF (5 BY NQ) ELEMENTS. -C XPLUSD: THE VALUES OF X + DELTA. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DETAF - - - STP = HUNDRD*EPSMAC - ETA = EPSMAC - - DO 40 J=-2,2 - IF (J.EQ.0) THEN - DO 10 L=1,NQ - WRK7(J,L) = PV0(NROW,L) - 10 CONTINUE - ELSE - DO 20 K=1,NP - IF (IFIXB(1).LT.0) THEN - PARTMP(K) = BETA(K) + J*STP*BETA(K) - ELSE IF (IFIXB(K).NE.0) THEN - PARTMP(K) = BETA(K) + J*STP*BETA(K) - ELSE - PARTMP(K) = BETA(K) - END IF - 20 CONTINUE - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + PARTMP,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 003,WRK2,WRK6,WRK1,ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NFEV = NFEV + 1 - END IF - DO 30 L=1,NQ - WRK7(J,L) = WRK2(NROW,L) - 30 CONTINUE - END IF - 40 CONTINUE - - DO 100 L=1,NQ - A = ZERO - B = ZERO - DO 50 J=-2,2 - A = A + WRK7(J,L) - B = B + J*WRK7(J,L) - 50 CONTINUE - A = P2*A - B = P1*B - IF ((WRK7(0,L).NE.ZERO) .AND. - + (ABS(WRK7(1,L)+WRK7(-1,L)).GT.HUNDRD*EPSMAC)) THEN - FAC = ONE/ABS(WRK7(0,L)) - ELSE - FAC = ONE - END IF - DO 60 J=-2,2 - WRK7(J,L) = ABS((WRK7(J,L)-(A+J*B))*FAC) - ETA = MAX(WRK7(J,L),ETA) - 60 CONTINUE - 100 CONTINUE - NETA = MAX(TWO,P5-LOG10(ETA)) - - RETURN - END -*DEVJAC - SUBROUTINE DEVJAC - + (FCN, - + ANAJAC,CDJAC, - + N,M,NP,NQ, - + BETAC,BETA,STPB, - + IFIXB,IFIXX,LDIFX, - + X,LDX,DELTA,XPLUSD,STPD,LDSTPD, - + SSF,TT,LDTT,NETA,FN, - + STP,WRK1,WRK2,WRK3,WRK6, - + FJACB,ISODR,FJACD,WE1,LDWE,LD2WE, - + NJEV,NFEV,ISTOP,INFO) -C***BEGIN PROLOGUE DEVJAC -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN,DDOT,DIFIX,DJACCD,DJACFD,DWGHT,DUNPAC,DXPY -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE COMPUTE THE WEIGHTED JACOBIANS WRT BETA AND DELTA -C***END PROLOGUE DEVJAC - -C...SCALAR ARGUMENTS - INTEGER - + INFO,ISTOP,LDIFX,LDSTPD,LDTT,LDWE,LDX,LD2WE, - + M,N,NETA,NFEV,NJEV,NP,NQ - LOGICAL - + ANAJAC,CDJAC,ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),BETAC(NP),DELTA(N,M),FJACB(N,NP,NQ),FJACD(N,M,NQ), - + FN(N,NQ),SSF(NP),STP(N),STPB(NP),STPD(LDSTPD,M),TT(LDTT,M), - + WE1(LDWE,LD2WE,NQ),WRK1(N,M,NQ),WRK2(N,NQ),WRK3(NP), - + WRK6(N,NP,NQ),X(LDX,M),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - INTEGER - + IDEVAL,J,K,K1,L - DOUBLE PRECISION - + ZERO - LOGICAL - + ERROR - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DIFIX,DJACCD,DJACFD,DWGHT,DUNPAC,DXPY - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DDOT - EXTERNAL - + DDOT - -C...DATA STATEMENTS - DATA ZERO - + /0.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER-SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE -C COMPUTED BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT -C (ANAJAC=TRUE). -C BETA: THE FUNCTION PARAMETERS. -C BETAC: THE CURRENT ESTIMATED VALUES OF THE UNFIXED BETA'S. -C CDJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE -C COMPUTED BY CENTRAL DIFFERENCES (CDJAC=TRUE) OR BY FORWARD -C DIFFERENCES (CDJAC=FALSE). -C DELTA: THE ESTIMATED VALUES OF DELTA. -C ERROR: THE VARIABLE DESIGNATING WHETHER ODRPACK DETECTED NONZERO -C VALUES IN ARRAY DELTA IN THE OLS CASE, AND THUS WHETHER -C THE USER MAY HAVE OVERWRITTEN IMPORTANT INFORMATION -C BY COMPUTING FJACD IN THE OLS CASE. -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C FN: THE PREDICTED VALUES OF THE FUNCTION AT THE CURRENT POINT. -C IDEVAL: THE VARIABLE DESIGNATING WHAT COMPUTATIONS ARE TO BE -C PERFORMED BY USER-SUPPLIED SUBROUTINE FCN. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF DELTA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C ISTOP: THE VARIABLE DESIGNATING THAT THE USER WISHES THE -C COMPUTATIONS STOPPED. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR OLS (ISODR=FALSE). -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C K1: AN INDEXING VARIABLE. -C L: AN INDEXING VARIABLE. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDWE: THE LEADING DIMENSION OF ARRAYS WE AND WE1. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LD2WE: THE SECOND DIMENSION OF ARRAYS WE AND WE1. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NJEV: THE NUMBER OF JACOBIAN EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C SSF: THE SCALE USED FOR THE BETA'S. -C STP: THE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C STPB: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO BETA. -C STPD: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C TT: THE SCALING VALUES USED FOR DELTA. -C WE1: THE SQUARE ROOTS OF THE EPSILON WEIGHTS IN ARRAY WE. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK3: A WORK ARRAY OF (NP) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C X: THE INDEPENDENT VARIABLE. -C XPLUSD: THE VALUES OF X + DELTA. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DEVJAC - - -C INSERT CURRENT UNFIXED BETA ESTIMATES INTO BETA - - CALL DUNPAC(NP,BETAC,BETA,IFIXB) - -C COMPUTE XPLUSD = X + DELTA - - CALL DXPY(N,M,X,LDX,DELTA,N,XPLUSD,N) - -C COMPUTE THE JACOBIAN WRT THE ESTIMATED BETAS (FJACB) AND -C THE JACOBIAN WRT DELTA (FJACD) - - ISTOP = 0 - IF (ISODR) THEN - IDEVAL = 110 - ELSE - IDEVAL = 010 - END IF - IF (ANAJAC) THEN - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + IDEVAL,WRK2,FJACB,FJACD, - + ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NJEV = NJEV+1 - END IF -C MAKE SURE FIXED ELEMENTS OF FJACD ARE ZERO - IF (ISODR) THEN - DO 10 L=1,NQ - CALL DIFIX(N,M,IFIXX,LDIFX,FJACD(1,1,L),N,FJACD(1,1,L),N) - 10 CONTINUE - END IF - ELSE IF (CDJAC) THEN - CALL DJACCD(FCN, - + N,M,NP,NQ, - + BETA,X,LDX,DELTA,XPLUSD,IFIXB,IFIXX,LDIFX, - + STPB,STPD,LDSTPD, - + SSF,TT,LDTT,NETA,STP,WRK1,WRK2,WRK3,WRK6, - + FJACB,ISODR,FJACD,NFEV,ISTOP) - ELSE - CALL DJACFD(FCN, - + N,M,NP,NQ, - + BETA,X,LDX,DELTA,XPLUSD,IFIXB,IFIXX,LDIFX, - + STPB,STPD,LDSTPD, - + SSF,TT,LDTT,NETA,FN,STP,WRK1,WRK2,WRK3,WRK6, - + FJACB,ISODR,FJACD,NFEV,ISTOP) - END IF - IF (ISTOP.LT.0) THEN - RETURN - ELSE IF (.NOT.ISODR) THEN -C TRY TO DETECT WHETHER THE USER HAS COMPUTED JFACD -C WITHIN FCN IN THE OLS CASE - ERROR = DDOT(N*M,DELTA,1,DELTA,1).NE.ZERO - IF (ERROR) THEN - INFO = 50300 - RETURN - END IF - END IF - -C WEIGHT THE JACOBIAN WRT THE ESTIMATED BETAS - - IF (IFIXB(1).LT.0) THEN - DO 20 K=1,NP - CALL DWGHT(N,NQ,WE1,LDWE,LD2WE, - + FJACB(1,K,1),N*NP,FJACB(1,K,1),N*NP) - 20 CONTINUE - ELSE - K1 = 0 - DO 30 K=1,NP - IF (IFIXB(K).GE.1) THEN - K1 = K1 + 1 - CALL DWGHT(N,NQ,WE1,LDWE,LD2WE, - + FJACB(1,K,1),N*NP,FJACB(1,K1,1),N*NP) - END IF - 30 CONTINUE - END IF - -C WEIGHT THE JACOBIAN'S WRT DELTA AS APPROPRIATE - - IF (ISODR) THEN - DO 40 J=1,M - CALL DWGHT(N,NQ,WE1,LDWE,LD2WE, - + FJACD(1,J,1),N*M,FJACD(1,J,1),N*M) - 40 CONTINUE - END IF - - RETURN - END -*DFCTR - SUBROUTINE DFCTR(OKSEMI,A,LDA,N,INFO) -C***BEGIN PROLOGUE DFCTR -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DDOT -C***DATE WRITTEN 910706 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE FACTOR THE POSITIVE (SEMI)DEFINITE MATRIX A USING A -C MODIFIED CHOLESKY FACTORIZATION -C (ADAPTED FROM LINPACK SUBROUTINE DPOFA) -C***REFERENCES DONGARRA J.J., BUNCH J.R., MOLER C.B., STEWART G.W., -C *LINPACK USERS GUIDE*, SIAM, 1979. -C***END PROLOGUE DFCTR - -C...SCALAR ARGUMENTS - INTEGER INFO,LDA,N - LOGICAL OKSEMI - -C...ARRAY ARGUMENTS - DOUBLE PRECISION A(LDA,N) - -C...LOCAL SCALARS - DOUBLE PRECISION XI,S,T,TEN,ZERO - INTEGER J,K - -C...EXTERNAL FUNCTIONS - EXTERNAL DMPREC,DDOT - DOUBLE PRECISION DMPREC,DDOT - -C...INTRINSIC FUNCTIONS - INTRINSIC SQRT - -C...DATA STATEMENTS - DATA - + ZERO,TEN - + /0.0D0,10.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C A: THE ARRAY TO BE FACTORED. UPON RETURN, A CONTAINS THE -C UPPER TRIANGULAR MATRIX R SO THAT A = TRANS(R)*R -C WHERE THE STRICT LOWER TRIANGLE IS SET TO ZERO -C IF INFO .NE. 0 , THE FACTORIZATION IS NOT COMPLETE. -C I: AN INDEXING VARIABLE. -C INFO: AN IDICATOR VARIABLE, WHERE IF -C INFO = 0 THEN FACTORIZATION WAS COMPLETED -C INFO = K SIGNALS AN ERROR CONDITION. THE LEADING MINOR -C OF ORDER K IS NOT POSITIVE (SEMI)DEFINITE. -C J: AN INDEXING VARIABLE. -C LDA: THE LEADING DIMENSION OF ARRAY A. -C N: THE NUMBER OF ROWS AND COLUMNS OF DATA IN ARRAY A. -C OKSEMI: THE INDICATING WHETHER THE FACTORED ARRAY CAN BE POSITIVE -C SEMIDEFINITE (OKSEMI=TRUE) OR WHETHER IT MUST BE FOUND TO -C BE POSITIVE DEFINITE (OKSEMI=FALSE). -C TEN: THE VALUE 10.0D0. -C XI: A VALUE USED TO TEST FOR NON POSITIVE SEMIDEFINITENESS. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DFCTR - - -C SET RELATIVE TOLERANCE FOR DETECTING NON POSITIVE SEMIDEFINITENESS. - XI = -TEN*DMPREC() - -C COMPUTE FACTORIZATION, STORING IN UPPER TRIANGULAR PORTION OF A - DO 20 J=1,N - INFO = J - S = ZERO - DO 10 K=1,J-1 - IF (A(K,K).EQ.ZERO) THEN - T = ZERO - ELSE - T = A(K,J) - DDOT(K-1,A(1,K),1,A(1,J),1) - T = T/A(K,K) - END IF - A(K,J) = T - S = S + T*T - 10 CONTINUE - S = A(J,J) - S -C ......EXIT - IF (A(J,J).LT.ZERO .OR. S.LT.XI*ABS(A(J,J))) THEN - RETURN - ELSE IF (.NOT.OKSEMI .AND. S.LE.ZERO) THEN - RETURN - ELSE IF (S.LE.ZERO) THEN - A(J,J) = ZERO - ELSE - A(J,J) = SQRT(S) - END IF - 20 CONTINUE - INFO = 0 - -C ZERO OUT LOWER PORTION OF A - DO 40 J=2,N - DO 30 K=1,J-1 - A(J,K) = ZERO - 30 CONTINUE - 40 CONTINUE - - RETURN - END -*DFCTRW - SUBROUTINE DFCTRW - + (N,M,NQ,NPP, - + ISODR, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, - + WRK0,WRK4, - + WE1,NNZW,INFO) -C***BEGIN PROLOGUE DFCTRW -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DFCTR -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE CHECK INPUT PARAMETERS, INDICATING ERRORS FOUND USING -C NONZERO VALUES OF ARGUMENT INFO AS DESCRIBED IN THE -C ODRPACK REFERENCE GUIDE -C***END PROLOGUE DFCTRW - -C...SCALAR ARGUMENTS - INTEGER - + INFO,LDWD,LDWE,LD2WD,LD2WE, - + M,N,NNZW,NPP,NQ - LOGICAL - + ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + WE(LDWE,LD2WE,NQ),WE1(LDWE,LD2WE,NQ),WD(LDWD,LD2WD,M), - + WRK0(NQ,NQ),WRK4(M,M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ZERO - INTEGER - + I,INF,J,J1,J2,L,L1,L2 - LOGICAL - + NOTZRO - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DFCTR - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEXING VARIABLE. -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C J: AN INDEXING VARIABLE. -C J1: AN INDEXING VARIABLE. -C J2: AN INDEXING VARIABLE. -C L: AN INDEXING VARIABLE. -C L1: AN INDEXING VARIABLE. -C L2: AN INDEXING VARIABLE. -C LAST: THE LAST ROW OF THE ARRAY TO BE ACCESSED. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NNZW: THE NUMBER OF NONZERO WEIGHTED OBSERVATIONS. -C NOTZRO: THE VARIABLE DESIGNATING WHETHER A GIVEN COMPONENT OF THE -C WEIGHT ARRAY WE CONTAINS A NONZERO ELEMENT (NOTZRO=FALSE) -C OR NOT (NOTZRO=TRUE). -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATIONS. -C WE: THE (SQUARED) EPSILON WEIGHTS. -C WE1: THE FACTORED EPSILON WEIGHTS, S.T. TRANS(WE1)*WE1 = WE. -C WD: THE (SQUARED) DELTA WEIGHTS. -C WRK0: A WORK ARRAY OF (NQ BY NQ) ELEMENTS. -C WRK4: A WORK ARRAY OF (M BY M) ELEMENTS. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DFCTRW - - -C CHECK EPSILON WEIGHTS, AND STORE FACTORIZATION IN WE1 - - IF (WE(1,1,1).LT.ZERO) THEN -C WE CONTAINS A SCALAR - WE1(1,1,1) = -SQRT(ABS(WE(1,1,1))) - NNZW = N - - ELSE - NNZW = 0 - - IF (LDWE.EQ.1) THEN - - IF (LD2WE.EQ.1) THEN -C WE CONTAINS A DIAGONAL MATRIX - DO 110 L=1,NQ - IF (WE(1,1,L).GT.ZERO) THEN - NNZW = N - WE1(1,1,L) = SQRT(WE(1,1,L)) - ELSE IF (WE(1,1,L).LT.ZERO) THEN - INFO = 30010 - GO TO 300 - END IF - 110 CONTINUE - ELSE - -C WE CONTAINS A FULL NQ BY NQ SEMIDEFINITE MATRIX - DO 130 L1=1,NQ - DO 120 L2=L1,NQ - WRK0(L1,L2) = WE(1,L1,L2) - 120 CONTINUE - 130 CONTINUE - CALL DFCTR(.TRUE.,WRK0,NQ,NQ,INF) - IF (INF.NE.0) THEN - INFO = 30010 - GO TO 300 - ELSE - DO 150 L1=1,NQ - DO 140 L2=1,NQ - WE1(1,L1,L2) = WRK0(L1,L2) - 140 CONTINUE - IF (WE1(1,L1,L1).NE.ZERO) THEN - NNZW = N - END IF - 150 CONTINUE - END IF - END IF - - ELSE - - IF (LD2WE.EQ.1) THEN -C WE CONTAINS AN ARRAY OF DIAGONAL MATRIX - DO 220 I=1,N - NOTZRO = .FALSE. - DO 210 L=1,NQ - IF (WE(I,1,L).GT.ZERO) THEN - NOTZRO = .TRUE. - WE1(I,1,L) = SQRT(WE(I,1,L)) - ELSE IF (WE(I,1,L).LT.ZERO) THEN - INFO = 30010 - GO TO 300 - END IF - 210 CONTINUE - IF (NOTZRO) THEN - NNZW = NNZW + 1 - END IF - 220 CONTINUE - ELSE - -C WE CONTAINS AN ARRAY OF FULL NQ BY NQ SEMIDEFINITE MATRICES - DO 270 I=1,N - DO 240 L1=1,NQ - DO 230 L2=L1,NQ - WRK0(L1,L2) = WE(I,L1,L2) - 230 CONTINUE - 240 CONTINUE - CALL DFCTR(.TRUE.,WRK0,NQ,NQ,INF) - IF (INF.NE.0) THEN - INFO = 30010 - GO TO 300 - ELSE - NOTZRO = .FALSE. - DO 260 L1=1,NQ - DO 250 L2=1,NQ - WE1(I,L1,L2) = WRK0(L1,L2) - 250 CONTINUE - IF (WE1(I,L1,L1).NE.ZERO) THEN - NOTZRO = .TRUE. - END IF - 260 CONTINUE - END IF - IF (NOTZRO) THEN - NNZW = NNZW + 1 - END IF - 270 CONTINUE - END IF - END IF - END IF - -C CHECK FOR A SUFFICIENT NUMBER OF NONZERO EPSILON WEIGHTS - - IF (NNZW.LT.NPP) THEN - INFO = 30020 - END IF - - -C CHECK DELTA WEIGHTS - - 300 CONTINUE - IF (.NOT.ISODR .OR. WD(1,1,1).LT.ZERO) THEN -C PROBLEM IS NOT ODR, OR WD CONTAINS A SCALAR - RETURN - - ELSE - - IF (LDWD.EQ.1) THEN - - IF (LD2WD.EQ.1) THEN -C WD CONTAINS A DIAGONAL MATRIX - DO 310 J=1,M - IF (WD(1,1,J).LE.ZERO) THEN - INFO = MAX(30001,INFO+1) - RETURN - END IF - 310 CONTINUE - ELSE - -C WD CONTAINS A FULL M BY M POSITIVE DEFINITE MATRIX - DO 330 J1=1,M - DO 320 J2=J1,M - WRK4(J1,J2) = WD(1,J1,J2) - 320 CONTINUE - 330 CONTINUE - CALL DFCTR(.FALSE.,WRK4,M,M,INF) - IF (INF.NE.0) THEN - INFO = MAX(30001,INFO+1) - RETURN - END IF - END IF - - ELSE - - IF (LD2WD.EQ.1) THEN -C WD CONTAINS AN ARRAY OF DIAGONAL MATRICES - DO 420 I=1,N - DO 410 J=1,M - IF (WD(I,1,J).LE.ZERO) THEN - INFO = MAX(30001,INFO+1) - RETURN - END IF - 410 CONTINUE - 420 CONTINUE - ELSE - -C WD CONTAINS AN ARRAY OF FULL M BY M POSITIVE DEFINITE MATRICES - DO 470 I=1,N - DO 440 J1=1,M - DO 430 J2=J1,M - WRK4(J1,J2) = WD(I,J1,J2) - 430 CONTINUE - 440 CONTINUE - CALL DFCTR(.FALSE.,WRK4,M,M,INF) - IF (INF.NE.0) THEN - INFO = MAX(30001,INFO+1) - RETURN - END IF - 470 CONTINUE - END IF - END IF - END IF - - RETURN - END -*DFLAGS - SUBROUTINE DFLAGS - + (JOB,RESTRT,INITD,DOVCV,REDOJ,ANAJAC,CDJAC,CHKJAC,ISODR,IMPLCT) -C***BEGIN PROLOGUE DFLAGS -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SET FLAGS INDICATING CONDITIONS SPECIFIED BY JOB -C***END PROLOGUE DFLAGS - -C...SCALAR ARGUMENTS - INTEGER - + JOB - LOGICAL - + ANAJAC,CDJAC,CHKJAC,DOVCV,IMPLCT,INITD,ISODR,REDOJ,RESTRT - -C...LOCAL SCALARS - INTEGER - + J - -C...INTRINSIC FUNCTIONS - INTRINSIC - + MOD - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT (ANAJAC=TRUE). -C CDJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY CENTRAL DIFFERENCES (CDJAC=TRUE) OR BY FORWARD -C DIFFERENCES (CDJAC=FALSE). -C CHKJAC: THE VARIABLE DESIGNATING WHETHER THE USER-SUPPLIED -C JACOBIANS ARE TO BE CHECKED (CHKJAC=TRUE) OR NOT -C (CHKJAC=FALSE). -C DOVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX IS -C TO BE COMPUTED (DOVCV=TRUE) OR NOT (DOVCV=FALSE). -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INITD: THE VARIABLE DESIGNATING WHETHER DELTA IS TO BE INITIALIZED -C TO ZERO (INITD=TRUE) OR TO THE FIRST N BY M ELEMENTS OF -C ARRAY WORK (INITD=FALSE). -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C J: THE VALUE OF A SPECIFIC DIGIT OF JOB. -C JOB: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C REDOJ: THE VARIABLE DESIGNATING WHETHER THE JACOBIAN MATRIX IS TO -C BE RECOMPUTED FOR THE COMPUTATION OF THE COVARIANCE MATRIX -C (REDOJ=TRUE) OR NOT (REDOJ=FALSE). -C RESTRT: THE VARIABLE DESIGNATING WHETHER THE CALL IS A RESTART -C (RESTRT=TRUE) OR NOT (RESTRT=FALSE). - - -C***FIRST EXECUTABLE STATEMENT DFLAGS - - - IF (JOB.GE.0) THEN - - RESTRT= JOB.GE.10000 - - INITD = MOD(JOB,10000)/1000.EQ.0 - - J = MOD(JOB,1000)/100 - IF (J.EQ.0) THEN - DOVCV = .TRUE. - REDOJ = .TRUE. - ELSE IF (J.EQ.1) THEN - DOVCV = .TRUE. - REDOJ = .FALSE. - ELSE - DOVCV = .FALSE. - REDOJ = .FALSE. - END IF - - J = MOD(JOB,100)/10 - IF (J.EQ.0) THEN - ANAJAC = .FALSE. - CDJAC = .FALSE. - CHKJAC = .FALSE. - ELSE IF (J.EQ.1) THEN - ANAJAC = .FALSE. - CDJAC = .TRUE. - CHKJAC = .FALSE. - ELSE IF (J.EQ.2) THEN - ANAJAC = .TRUE. - CDJAC = .FALSE. - CHKJAC = .TRUE. - ELSE - ANAJAC = .TRUE. - CDJAC = .FALSE. - CHKJAC = .FALSE. - END IF - - J = MOD(JOB,10) - IF (J.EQ.0) THEN - ISODR = .TRUE. - IMPLCT = .FALSE. - ELSE IF (J.EQ.1) THEN - ISODR = .TRUE. - IMPLCT = .TRUE. - ELSE - ISODR = .FALSE. - IMPLCT = .FALSE. - END IF - - ELSE - - RESTRT = .FALSE. - INITD = .TRUE. - DOVCV = .TRUE. - REDOJ = .TRUE. - ANAJAC = .FALSE. - CDJAC = .FALSE. - CHKJAC = .FALSE. - ISODR = .TRUE. - IMPLCT = .FALSE. - - END IF - - RETURN - END -*DHSTEP - DOUBLE PRECISION FUNCTION DHSTEP - + (ITYPE,NETA,I,J,STP,LDSTP) -C***BEGIN PROLOGUE DHSTEP -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SET RELATIVE STEP SIZE FOR FINITE DIFFERENCE DERIVATIVES -C***END PROLOGUE DHSTEP - -C...SCALAR ARGUMENTS - INTEGER - + I,ITYPE,J,LDSTP,NETA - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + STP(LDSTP,J) - -C...LOCAL SCALARS - DOUBLE PRECISION - + TEN,THREE,TWO,ZERO - -C...DATA STATEMENTS - DATA - + ZERO,TWO,THREE,TEN - + /0.0D0,2.0D0,3.0D0,10.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN IDENTIFIER FOR SELECTING USER SUPPLIED STEP SIZES. -C ITYPE: THE FINITE DIFFERENCE METHOD BEING USED, WHERE -C ITYPE = 0 INDICATES FORWARD FINITE DIFFERENCES, AND -C ITYPE = 1 INDICATES CENTRAL FINITE DIFFERENCES. -C J: AN IDENTIFIER FOR SELECTING USER SUPPLIED STEP SIZES. -C LDSTP: THE LEADING DIMENSION OF ARRAY STP. -C NETA: THE NUMBER OF GOOD DIGITS IN THE FUNCTION RESULTS. -C STP: THE STEP SIZE FOR THE FINITE DIFFERENCE DERIVATIVE. -C TEN: THE VALUE 10.0D0. -C THREE: THE VALUE 3.0D0. -C TWO: THE VALUE 2.0D0. -C ZERO: THE VALUE 0.0D0. - - - -C***FIRST EXECUTABLE STATEMENT DHSTEP - - -C SET DHSTEP TO RELATIVE FINITE DIFFERENCE STEP SIZE - - IF (STP(1,1).LE.ZERO) THEN - - IF (ITYPE.EQ.0) THEN -C USE DEFAULT FORWARD FINITE DIFFERENCE STEP SIZE - DHSTEP = TEN**(-ABS(NETA)/TWO - TWO) - - ELSE -C USE DEFAULT CENTRAL FINITE DIFFERENCE STEP SIZE - DHSTEP = TEN**(-ABS(NETA)/THREE) - END IF - - ELSE IF (LDSTP.EQ.1) THEN - DHSTEP = STP(1,J) - - ELSE - DHSTEP = STP(I,J) - END IF - - RETURN - END -*DIFIX - SUBROUTINE DIFIX - + (N,M,IFIX,LDIFIX,T,LDT,TFIX,LDTFIX) -C***BEGIN PROLOGUE DIFIX -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 910612 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SET ELEMENTS OF T TO ZERO ACCORDING TO IFIX -C***END PROLOGUE DIFIX - -C...SCALAR ARGUMENTS - INTEGER - + LDIFIX,LDT,LDTFIX,M,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + T(LDT,M),TFIX(LDTFIX,M) - INTEGER - + IFIX(LDIFIX,M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ZERO - INTEGER - + I,J - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEXING VARIABLE. -C IFIX: THE ARRAY DESIGNATING WHETHER AN ELEMENT OF T IS TO BE -C SET TO ZERO. -C J: AN INDEXING VARIABLE. -C LDT: THE LEADING DIMENSION OF ARRAY T. -C LDIFIX: THE LEADING DIMENSION OF ARRAY IFIX. -C LDTFIX: THE LEADING DIMENSION OF ARRAY TFIX. -C M: THE NUMBER OF COLUMNS OF DATA IN THE ARRAY. -C N: THE NUMBER OF ROWS OF DATA IN THE ARRAY. -C T: THE ARRAY BEING SET TO ZERO ACCORDING TO THE ELEMENTS -C OF IFIX. -C TFIX: THE RESULTING ARRAY. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DIFIX - - - IF (N.EQ.0 .OR. M.EQ.0) RETURN - - IF (IFIX(1,1).GE.ZERO) THEN - IF (LDIFIX.GE.N) THEN - DO 20 J=1,M - DO 10 I=1,N - IF (IFIX(I,J).EQ.0) THEN - TFIX(I,J) = ZERO - ELSE - TFIX(I,J) = T(I,J) - END IF - 10 CONTINUE - 20 CONTINUE - ELSE - DO 100 J=1,M - IF (IFIX(1,J).EQ.0) THEN - DO 30 I=1,N - TFIX(I,J) = ZERO - 30 CONTINUE - ELSE - DO 90 I=1,N - TFIX(I,J) = T(I,J) - 90 CONTINUE - END IF - 100 CONTINUE - END IF - END IF - - RETURN - END -*DINIWK - SUBROUTINE DINIWK - + (N,M,NP,WORK,LWORK,IWORK,LIWORK, - + X,LDX,IFIXX,LDIFX,SCLD,LDSCLD, - + BETA,SCLB, - + SSTOL,PARTOL,MAXIT,TAUFAC, - + JOB,IPRINT,LUNERR,LUNRPT, - + EPSMAI,SSTOLI,PARTLI,MAXITI,TAUFCI, - + JOBI,IPRINI,LUNERI,LUNRPI, - + SSFI,TTI,LDTTI,DELTAI) -C***BEGIN PROLOGUE DINIWK -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DFLAGS,DMPREC,DSCLB,DSCLD,DZERO -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE INITIALIZE WORK VECTORS AS NECESSARY -C***END PROLOGUE DINIWK - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + PARTOL,SSTOL,TAUFAC - INTEGER - + DELTAI,EPSMAI,IPRINI,IPRINT,JOB,JOBI,LDIFX, - + LDSCLD,LDTTI,LDX,LIWORK,LUNERI,LUNERR,LUNRPI,LUNRPT,LWORK,M, - + MAXIT,MAXITI,N,NP,PARTLI,SSFI,SSTOLI,TAUFCI,TTI - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),SCLB(NP),SCLD(LDSCLD,M),WORK(LWORK),X(LDX,M) - INTEGER - + IFIXX(LDIFX,M),IWORK(LIWORK) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ONE,THREE,TWO,ZERO - INTEGER - + I,J - LOGICAL - + ANAJAC,CDJAC,CHKJAC,DOVCV,IMPLCT,INITD,ISODR,REDOJ,RESTRT - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DMPREC - EXTERNAL - + DMPREC - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DCOPY,DFLAGS,DSCLB,DSCLD,DZERO - -C...INTRINSIC FUNCTIONS - INTRINSIC - + MIN,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,ONE,TWO,THREE - + /0.0D0,1.0D0,2.0D0,3.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE -C COMPUTED BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT -C (ANAJAC=TRUE). -C BETA: THE FUNCTION PARAMETERS. -C CDJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE -C COMPUTED BY CENTRAL DIFFERENCES (CDJAC=TRUE) OR BY FORWARD -C DIFFERENCES (CDJAC=FALSE). -C CHKJAC: THE VARIABLE DESIGNATING WHETHER THE USER-SUPPLIED -C JACOBIANS ARE TO BE CHECKED (CHKJAC=TRUE) OR NOT -C (CHKJAC=FALSE). -C DELTAI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTA. -C DOVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX IS -C TO BE COMPUTED (DOVCV=TRUE) OR NOT (DOVCV=FALSE). -C EPSMAI: THE LOCATION IN ARRAY WORK OF VARIABLE EPSMAC. -C I: AN INDEXING VARIABLE. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE FIXED -C AT THEIR INPUT VALUES OR NOT. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INITD: THE VARIABLE DESIGNATING WHETHER DELTA IS TO BE INITIALIZED -C TO ZERO (INITD=TRUE) OR TO THE VALUES IN THE FIRST N BY M -C ELEMENTS OF ARRAY WORK (INITD=FALSE). -C IPRINI: THE LOCATION IN ARRAY IWORK OF VARIABLE IPRINT. -C IPRINT: THE PRINT CONTROL VARIABLE. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C IWORK: THE INTEGER WORK SPACE. -C J: AN INDEXING VARIABLE. -C JOB: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C JOBI: THE LOCATION IN ARRAY IWORK OF VARIABLE JOB. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSCLD: THE LEADING DIMENSION OF ARRAY SCLD. -C LDTTI: THE LEADING DIMENSION OF ARRAY TT. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LIWORK: THE LENGTH OF VECTOR IWORK. -C LUNERI: THE LOCATION IN ARRAY IWORK OF VARIABLE LUNERR. -C LUNERR: THE LOGICAL UNIT NUMBER USED FOR ERROR MESSAGES. -C LUNRPI: THE LOCATION IN ARRAY IWORK OF VARIABLE LUNRPT. -C LUNRPT: THE LOGICAL UNIT NUMBER USED FOR COMPUTATION REPORTS. -C LWORK: THE LENGTH OF VECTOR WORK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C MAXITI: THE LOCATION IN ARRAY IWORK OF VARIABLE MAXIT. -C N: THE NUMBER OF OBSERVATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C ONE: THE VALUE 1.0D0. -C PARTLI: THE LOCATION IN ARRAY WORK OF VARIABLE PARTOL. -C PARTOL: THE PARAMETER CONVERGENCE STOPPING CRITERIA. -C REDOJ: THE VARIABLE DESIGNATING WHETHER THE JACOBIAN MATRIX IS TO -C BE RECOMPUTED FOR THE COMPUTATION OF THE COVARIANCE MATRIX -C (REDOJ=TRUE) OR NOT (REDOJ=FALSE). -C RESTRT: THE VARIABLE DESIGNATING WHETHER THE CALL IS A RESTART -C (RESTRT=TRUE) OR NOT (RESTRT=FALSE). -C SCLB: THE SCALING VALUES FOR BETA. -C SCLD: THE SCALING VALUES FOR DELTA. -C SSFI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SSF. -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING CRITERIA. -C SSTOLI: THE LOCATION IN ARRAY WORK OF VARIABLE SSTOL. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C TAUFCI: THE LOCATION IN ARRAY WORK OF VARIABLE TAUFAC. -C THREE: THE VALUE 3.0D0. -C TTI: THE STARTING LOCATION IN ARRAY WORK OF THE ARRAY TT. -C TWO: THE VALUE 2.0D0. -C WORK: THE DOUBLE PRECISION WORK SPACE. -C X: THE INDEPENDENT VARIABLE. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DINIWK - - - CALL DFLAGS(JOB,RESTRT,INITD,DOVCV,REDOJ, - + ANAJAC,CDJAC,CHKJAC,ISODR,IMPLCT) - -C STORE VALUE OF MACHINE PRECISION IN WORK VECTOR - - WORK(EPSMAI) = DMPREC() - -C SET TOLERANCE FOR STOPPING CRITERIA BASED ON THE CHANGE IN THE -C PARAMETERS (SEE ALSO SUBPROGRAM DODCNT) - - IF (PARTOL.LT.ZERO) THEN - WORK(PARTLI) = WORK(EPSMAI)**(TWO/THREE) - ELSE - WORK(PARTLI) = MIN(PARTOL, ONE) - END IF - -C SET TOLERANCE FOR STOPPING CRITERIA BASED ON THE CHANGE IN THE -C SUM OF SQUARES OF THE WEIGHTED OBSERVATIONAL ERRORS - - IF (SSTOL.LT.ZERO) THEN - WORK(SSTOLI) = SQRT(WORK(EPSMAI)) - ELSE - WORK(SSTOLI) = MIN(SSTOL, ONE) - END IF - -C SET FACTOR FOR COMPUTING TRUST REGION DIAMETER AT FIRST ITERATION - - IF (TAUFAC.LE.ZERO) THEN - WORK(TAUFCI) = ONE - ELSE - WORK(TAUFCI) = MIN(TAUFAC, ONE) - END IF - -C SET MAXIMUM NUMBER OF ITERATIONS - - IF (MAXIT.LT.0) THEN - IWORK(MAXITI) = 50 - ELSE - IWORK(MAXITI) = MAXIT - END IF - -C STORE PROBLEM INITIALIZATION AND COMPUTATIONAL METHOD CONTROL -C VARIABLE - - IF (JOB.LE.0) THEN - IWORK(JOBI) = 0 - ELSE - IWORK(JOBI) = JOB - END IF - -C SET PRINT CONTROL - - IF (IPRINT.LT.0) THEN - IWORK(IPRINI) = 2001 - ELSE - IWORK(IPRINI) = IPRINT - END IF - -C SET LOGICAL UNIT NUMBER FOR ERROR MESSAGES - - IF (LUNERR.LT.0) THEN - IWORK(LUNERI) = 6 - ELSE - IWORK(LUNERI) = LUNERR - END IF - -C SET LOGICAL UNIT NUMBER FOR COMPUTATION REPORTS - - IF (LUNRPT.LT.0) THEN - IWORK(LUNRPI) = 6 - ELSE - IWORK(LUNRPI) = LUNRPT - END IF - -C COMPUTE SCALING FOR BETA'S AND DELTA'S - - IF (SCLB(1).LE.ZERO) THEN - CALL DSCLB(NP,BETA,WORK(SSFI)) - ELSE - CALL DCOPY(NP,SCLB,1,WORK(SSFI),1) - END IF - IF (ISODR) THEN - IF (SCLD(1,1).LE.ZERO) THEN - IWORK(LDTTI) = N - CALL DSCLD(N,M,X,LDX,WORK(TTI),IWORK(LDTTI)) - ELSE - IF (LDSCLD.EQ.1) THEN - IWORK(LDTTI) = 1 - CALL DCOPY(M,SCLD(1,1),1,WORK(TTI),1) - ELSE - IWORK(LDTTI) = N - DO 10 J=1,M - CALL DCOPY(N,SCLD(1,J),1, - + WORK(TTI+(J-1)*IWORK(LDTTI)),1) - 10 CONTINUE - END IF - END IF - END IF - -C INITIALIZE DELTA'S AS NECESSARY - - IF (ISODR) THEN - IF (INITD) THEN - CALL DZERO(N,M,WORK(DELTAI),N) - ELSE - IF (IFIXX(1,1).GE.0) THEN - IF (LDIFX.EQ.1) THEN - DO 20 J=1,M - IF (IFIXX(1,J).EQ.0) THEN - CALL DZERO(N,1,WORK(DELTAI+(J-1)*N),N) - END IF - 20 CONTINUE - ELSE - DO 40 J=1,M - DO 30 I=1,N - IF (IFIXX(I,J).EQ.0) THEN - WORK(DELTAI-1+I+(J-1)*N) = ZERO - END IF - 30 CONTINUE - 40 CONTINUE - END IF - END IF - END IF - ELSE - CALL DZERO(N,M,WORK(DELTAI),N) - END IF - - RETURN - END -*DIWINF - SUBROUTINE DIWINF - + (M,NP,NQ, - + MSGBI,MSGDI,IFIX2I,ISTOPI, - + NNZWI,NPPI,IDFI, - + JOBI,IPRINI,LUNERI,LUNRPI, - + NROWI,NTOLI,NETAI, - + MAXITI,NITERI,NFEVI,NJEVI,INT2I,IRANKI,LDTTI, - + LIWKMN) -C***BEGIN PROLOGUE DIWINF -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SET STORAGE LOCATIONS WITHIN INTEGER WORK SPACE -C***END PROLOGUE DIWINF - -C...SCALAR ARGUMENTS - INTEGER - + IDFI,INT2I,IPRINI,IRANKI,ISTOPI,JOBI,IFIX2I,LDTTI,LIWKMN, - + LUNERI,LUNRPI,M,MAXITI,MSGBI,MSGDI,NETAI,NFEVI,NITERI,NJEVI, - + NNZWI,NP,NPPI,NQ,NROWI,NTOLI - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C IDFI: THE LOCATION IN ARRAY IWORK OF VARIABLE IDF. -C IFIX2I: THE STARTING LOCATION IN ARRAY IWORK OF ARRAY IFIX2. -C INT2I: THE LOCATION IN ARRAY IWORK OF VARIABLE INT2. -C IPRINI: THE LOCATION IN ARRAY IWORK OF VARIABLE IPRINT. -C IRANKI: THE LOCATION IN ARRAY IWORK OF VARIABLE IRANK. -C ISTOPI: THE LOCATION IN ARRAY IWORK OF VARIABLE ISTOP. -C JOBI: THE LOCATION IN ARRAY IWORK OF VARIABLE JOB. -C LDTTI: THE LOCATION IN ARRAY IWORK OF VARIABLE LDTT. -C LIWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY IWORK. -C LUNERI: THE LOCATION IN ARRAY IWORK OF VARIABLE LUNERR. -C LUNRPI: THE LOCATION IN ARRAY IWORK OF VARIABLE LUNRPT. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C MAXITI: THE LOCATION IN ARRAY IWORK OF VARIABLE MAXIT. -C MSGBI: THE STARTING LOCATION IN ARRAY IWORK OF ARRAY MSGB. -C MSGDI: THE STARTING LOCATION IN ARRAY IWORK OF ARRAY MSGD. -C NETAI: THE LOCATION IN ARRAY IWORK OF VARIABLE NETA. -C NFEVI: THE LOCATION IN ARRAY IWORK OF VARIABLE NFEV. -C NITERI: THE LOCATION IN ARRAY IWORK OF VARIABEL NITER. -C NJEVI: THE LOCATION IN ARRAY IWORK OF VARIABLE NJEV. -C NNZWI: THE LOCATION IN ARRAY IWORK OF VARIABLE NNZW. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPPI: THE LOCATION IN ARRAY IWORK OF VARIABLE NPP. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROWI: THE LOCATION IN ARRAY IWORK OF VARIABLE NROW. -C NTOLI: THE LOCATION IN ARRAY IWORK OF VARIABLE NTOL. - - -C***FIRST EXECUTABLE STATEMENT DIWINF - - - IF (NP.GE.1 .AND. M.GE.1) THEN - MSGBI = 1 - MSGDI = MSGBI + NQ*NP+1 - IFIX2I = MSGDI + NQ*M+1 - ISTOPI = IFIX2I + NP - NNZWI = ISTOPI + 1 - NPPI = NNZWI + 1 - IDFI = NPPI + 1 - JOBI = IDFI + 1 - IPRINI = JOBI + 1 - LUNERI = IPRINI + 1 - LUNRPI = LUNERI + 1 - NROWI = LUNRPI + 1 - NTOLI = NROWI + 1 - NETAI = NTOLI + 1 - MAXITI = NETAI + 1 - NITERI = MAXITI + 1 - NFEVI = NITERI + 1 - NJEVI = NFEVI + 1 - INT2I = NJEVI + 1 - IRANKI = INT2I + 1 - LDTTI = IRANKI + 1 - LIWKMN = LDTTI - ELSE - MSGBI = 1 - MSGDI = 1 - IFIX2I = 1 - ISTOPI = 1 - NNZWI = 1 - NPPI = 1 - IDFI = 1 - JOBI = 1 - IPRINI = 1 - LUNERI = 1 - LUNRPI = 1 - NROWI = 1 - NTOLI = 1 - NETAI = 1 - MAXITI = 1 - NITERI = 1 - NFEVI = 1 - NJEVI = 1 - INT2I = 1 - IRANKI = 1 - LDTTI = 1 - LIWKMN = 1 - END IF - - RETURN - END -*DJACCD - SUBROUTINE DJACCD - + (FCN, - + N,M,NP,NQ, - + BETA,X,LDX,DELTA,XPLUSD,IFIXB,IFIXX,LDIFX, - + STPB,STPD,LDSTPD, - + SSF,TT,LDTT,NETA,STP,WRK1,WRK2,WRK3,WRK6, - + FJACB,ISODR,FJACD,NFEV,ISTOP) -C***BEGIN PROLOGUE DJACCD -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN,DHSTEP,DZERO -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE COMPUTE CENTRAL DIFFERENCE APPROXIMATIONS TO THE -C JACOBIAN WRT THE ESTIMATED BETAS AND WRT THE DELTAS -C***END PROLOGUE DJACCD - -C...SCALAR ARGUMENTS - INTEGER - + ISTOP,LDIFX,LDSTPD,LDTT,LDX,M,N,NETA,NFEV,NP,NQ - LOGICAL - + ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),DELTA(N,M),FJACB(N,NP,NQ),FJACD(N,M,NQ), - + SSF(NP),STP(N),STPB(NP),STPD(LDSTPD,M),TT(LDTT,M), - + WRK1(N,M,NQ),WRK2(N,NQ),WRK3(NP),WRK6(N,NP,NQ), - + X(LDX,M),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + BETAK,ONE,TYPJ,ZERO - INTEGER - + I,J,K,L - LOGICAL - + DOIT,SETZRO - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DZERO - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DHSTEP - EXTERNAL - + DHSTEP - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,MAX,SIGN,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,ONE - + /0.0D0,1.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C BETAK: THE K-TH FUNCTION PARAMETER. -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DOIT: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVE WRT A GIVEN -C BETA OR DELTA NEEDS TO BE COMPUTED (DOIT=TRUE) OR NOT -C (DOIT=FALSE). -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C I: AN INDEXING VARIABLE. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE FIXED -C AT THEIR INPUT VALUES OR NOT. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C L: AN INDEXING VARIABLE. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF GOOD DIGITS IN THE FUNCTION RESULTS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C ONE: THE VALUE 1.0D0. -C SETZRO: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVE WRT SOME -C DELTA NEEDS TO BE SET TO ZERO (SETZRO=TRUE) OR NOT -C (SETZRO=FALSE). -C SSF: THE SCALING VALUES USED FOR BETA. -C STP: THE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO EACH DELTA. -C STPB: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO EACH BETA. -C STPD: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO EACH DELTA. -C TT: THE SCALING VALUES USED FOR DELTA. -C TYPJ: THE TYPICAL SIZE OF THE J-TH UNKNOWN BETA OR DELTA. -C X: THE EXPLANATORY VARIABLE. -C XPLUSD: THE VALUES OF X + DELTA. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK3: A WORK ARRAY OF (NP) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DJACCD - - -C COMPUTE THE JACOBIAN WRT THE ESTIMATED BETAS - - DO 60 K=1,NP - IF (IFIXB(1).GE.0) THEN - IF (IFIXB(K).EQ.0) THEN - DOIT = .FALSE. - ELSE - DOIT = .TRUE. - END IF - ELSE - DOIT = .TRUE. - END IF - IF (.NOT.DOIT) THEN - DO 10 L=1,NQ - CALL DZERO(N,1,FJACB(1,K,L),N) - 10 CONTINUE - ELSE - BETAK = BETA(K) - IF (BETAK.EQ.ZERO) THEN - IF (SSF(1).LT.ZERO) THEN - TYPJ = ONE/ABS(SSF(1)) - ELSE - TYPJ = ONE/SSF(K) - END IF - ELSE - TYPJ = ABS(BETAK) - END IF - WRK3(K) = BETAK - + + SIGN(ONE,BETAK)*TYPJ*DHSTEP(1,NETA,1,K,STPB,1) - WRK3(K) = WRK3(K) - BETAK - - BETA(K) = BETAK + WRK3(K) - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 001,WRK2,WRK6,WRK1, - + ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NFEV = NFEV + 1 - DO 30 L=1,NQ - DO 20 I=1,N - FJACB(I,K,L) = WRK2(I,L) - 20 CONTINUE - 30 CONTINUE - END IF - - BETA(K) = BETAK - WRK3(K) - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 001,WRK2,WRK6,WRK1, - + ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NFEV = NFEV + 1 - END IF - - DO 50 L=1,NQ - DO 40 I=1,N - FJACB(I,K,L) = (FJACB(I,K,L)-WRK2(I,L))/(2*WRK3(K)) - 40 CONTINUE - 50 CONTINUE - BETA(K) = BETAK - END IF - 60 CONTINUE - -C COMPUTE THE JACOBIAN WRT THE X'S - - IF (ISODR) THEN - DO 220 J=1,M - IF (IFIXX(1,1).LT.0) THEN - DOIT = .TRUE. - SETZRO = .FALSE. - ELSE IF (LDIFX.EQ.1) THEN - IF (IFIXX(1,J).EQ.0) THEN - DOIT = .FALSE. - ELSE - DOIT = .TRUE. - END IF - SETZRO = .FALSE. - ELSE - DOIT = .FALSE. - SETZRO = .FALSE. - DO 100 I=1,N - IF (IFIXX(I,J).NE.0) THEN - DOIT = .TRUE. - ELSE - SETZRO = .TRUE. - END IF - 100 CONTINUE - END IF - IF (.NOT.DOIT) THEN - DO 110 L=1,NQ - CALL DZERO(N,1,FJACD(1,J,L),N) - 110 CONTINUE - ELSE - DO 120 I=1,N - IF (XPLUSD(I,J).EQ.ZERO) THEN - IF (TT(1,1).LT.ZERO) THEN - TYPJ = ONE/ABS(TT(1,1)) - ELSE IF (LDTT.EQ.1) THEN - TYPJ = ONE/TT(1,J) - ELSE - TYPJ = ONE/TT(I,J) - END IF - ELSE - TYPJ = ABS(XPLUSD(I,J)) - END IF - STP(I) = XPLUSD(I,J) - + + SIGN(ONE,XPLUSD(I,J)) - + *TYPJ*DHSTEP(1,NETA,I,J,STPD,LDSTPD) - STP(I) = STP(I) - XPLUSD(I,J) - XPLUSD(I,J) = XPLUSD(I,J) + STP(I) - 120 CONTINUE - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 001,WRK2,WRK6,WRK1, - + ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NFEV = NFEV + 1 - DO 140 L=1,NQ - DO 130 I=1,N - FJACD(I,J,L) = WRK2(I,L) - 130 CONTINUE - 140 CONTINUE - END IF - - DO 150 I=1,N - XPLUSD(I,J) = X(I,J) + DELTA(I,J) - STP(I) - 150 CONTINUE - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 001,WRK2,WRK6,WRK1, - + ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NFEV = NFEV + 1 - END IF - - IF (SETZRO) THEN - DO 180 I=1,N - IF (IFIXX(I,J).EQ.0) THEN - DO 160 L=1,NQ - FJACD(I,J,L) = ZERO - 160 CONTINUE - ELSE - DO 170 L=1,NQ - FJACD(I,J,L) = (FJACD(I,J,L)-WRK2(I,L))/ - + (2*STP(I)) - 170 CONTINUE - END IF - 180 CONTINUE - ELSE - DO 200 L=1,NQ - DO 190 I=1,N - FJACD(I,J,L) = (FJACD(I,J,L)-WRK2(I,L))/ - + (2*STP(I)) - 190 CONTINUE - 200 CONTINUE - END IF - DO 210 I=1,N - XPLUSD(I,J) = X(I,J) + DELTA(I,J) - 210 CONTINUE - END IF - 220 CONTINUE - END IF - - RETURN - END -*DJACFD - SUBROUTINE DJACFD - + (FCN, - + N,M,NP,NQ, - + BETA,X,LDX,DELTA,XPLUSD,IFIXB,IFIXX,LDIFX, - + STPB,STPD,LDSTPD, - + SSF,TT,LDTT,NETA,FN,STP,WRK1,WRK2,WRK3,WRK6, - + FJACB,ISODR,FJACD,NFEV,ISTOP) -C***BEGIN PROLOGUE DJACFD -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN,DHSTEP,DZERO -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE COMPUTE FORWARD DIFFERENCE APPROXIMATIONS TO THE -C JACOBIAN WRT THE ESTIMATED BETAS AND WRT THE DELTAS -C***END PROLOGUE DJACFD - -C...SCALAR ARGUMENTS - INTEGER - + ISTOP,LDIFX,LDSTPD,LDTT,LDX,M,N,NETA,NFEV,NP,NQ - LOGICAL - + ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),DELTA(N,M),FJACB(N,NP,NQ),FJACD(N,M,NQ),FN(N,NQ), - + SSF(NP),STP(N),STPB(NP),STPD(LDSTPD,M),TT(LDTT,M), - + WRK1(N,M,NQ),WRK2(N,NQ),WRK3(NP),WRK6(N,NP,NQ), - + X(LDX,M),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + BETAK,ONE,TYPJ,ZERO - INTEGER - + I,J,K,L - LOGICAL - + DOIT,SETZRO - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DZERO - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DHSTEP - EXTERNAL - + DHSTEP - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,MAX,SIGN,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,ONE - + /0.0D0,1.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C BETAK: THE K-TH FUNCTION PARAMETER. -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DOIT: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVE WRT A -C GIVEN BETA OR DELTA NEEDS TO BE COMPUTED (DOIT=TRUE) -C OR NOT (DOIT=FALSE). -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C FN: THE NEW PREDICTED VALUES FROM THE FUNCTION. -C I: AN INDEXING VARIABLE. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C L: AN INDEXING VARIABLE. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF GOOD DIGITS IN THE FUNCTION RESULTS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C ONE: THE VALUE 1.0D0. -C SETZRO: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVE WRT SOME -C DELTA NEEDS TO BE SET TO ZERO (SETZRO=TRUE) OR NOT -C (SETZRO=FALSE). -C SSF: THE SCALE USED FOR THE BETA'S. -C STP: THE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C STPB: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO BETA. -C STPD: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C TT: THE SCALING VALUES USED FOR DELTA. -C TYPJ: THE TYPICAL SIZE OF THE J-TH UNKNOWN BETA OR DELTA. -C X: THE EXPLANATORY VARIABLE. -C XPLUSD: THE VALUES OF X + DELTA. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK3: A WORK ARRAY OF (NP) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DJACFD - - -C COMPUTE THE JACOBIAN WRT THE ESTIMATED BETAS - - DO 40 K=1,NP - IF (IFIXB(1).GE.0) THEN - IF (IFIXB(K).EQ.0) THEN - DOIT = .FALSE. - ELSE - DOIT = .TRUE. - END IF - ELSE - DOIT = .TRUE. - END IF - IF (.NOT.DOIT) THEN - DO 10 L=1,NQ - CALL DZERO(N,1,FJACB(1,K,L),N) - 10 CONTINUE - ELSE - BETAK = BETA(K) - IF (BETAK.EQ.ZERO) THEN - IF (SSF(1).LT.ZERO) THEN - TYPJ = ONE/ABS(SSF(1)) - ELSE - TYPJ = ONE/SSF(K) - END IF - ELSE - TYPJ = ABS(BETAK) - END IF - WRK3(K) = BETAK - + + SIGN(ONE,BETAK)*TYPJ*DHSTEP(0,NETA,1,K,STPB,1) - WRK3(K) = WRK3(K) - BETAK - BETA(K) = BETAK + WRK3(K) - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 001,WRK2,WRK6,WRK1, - + ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NFEV = NFEV + 1 - END IF - DO 30 L=1,NQ - DO 20 I=1,N - FJACB(I,K,L) = (WRK2(I,L)-FN(I,L))/WRK3(K) - 20 CONTINUE - 30 CONTINUE - BETA(K) = BETAK - END IF - 40 CONTINUE - -C COMPUTE THE JACOBIAN WRT THE X'S - - IF (ISODR) THEN - DO 220 J=1,M - IF (IFIXX(1,1).LT.0) THEN - DOIT = .TRUE. - SETZRO = .FALSE. - ELSE IF (LDIFX.EQ.1) THEN - IF (IFIXX(1,J).EQ.0) THEN - DOIT = .FALSE. - ELSE - DOIT = .TRUE. - END IF - SETZRO = .FALSE. - ELSE - DOIT = .FALSE. - SETZRO = .FALSE. - DO 100 I=1,N - IF (IFIXX(I,J).NE.0) THEN - DOIT = .TRUE. - ELSE - SETZRO = .TRUE. - END IF - 100 CONTINUE - END IF - IF (.NOT.DOIT) THEN - DO 110 L=1,NQ - CALL DZERO(N,1,FJACD(1,J,L),N) - 110 CONTINUE - ELSE - DO 120 I=1,N - IF (XPLUSD(I,J).EQ.ZERO) THEN - IF (TT(1,1).LT.ZERO) THEN - TYPJ = ONE/ABS(TT(1,1)) - ELSE IF (LDTT.EQ.1) THEN - TYPJ = ONE/TT(1,J) - ELSE - TYPJ = ONE/TT(I,J) - END IF - ELSE - TYPJ = ABS(XPLUSD(I,J)) - END IF - - STP(I) = XPLUSD(I,J) - + + SIGN(ONE,XPLUSD(I,J)) - + *TYPJ*DHSTEP(0,NETA,I,J,STPD,LDSTPD) - STP(I) = STP(I) - XPLUSD(I,J) - XPLUSD(I,J) = XPLUSD(I,J) + STP(I) - 120 CONTINUE - - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 001,WRK2,WRK6,WRK1, - + ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NFEV = NFEV + 1 - DO 140 L=1,NQ - DO 130 I=1,N - FJACD(I,J,L) = WRK2(I,L) - 130 CONTINUE - 140 CONTINUE - - END IF - - IF (SETZRO) THEN - DO 180 I=1,N - IF (IFIXX(I,J).EQ.0) THEN - DO 160 L=1,NQ - FJACD(I,J,L) = ZERO - 160 CONTINUE - ELSE - DO 170 L=1,NQ - FJACD(I,J,L) = (FJACD(I,J,L)-FN(I,L))/STP(I) - 170 CONTINUE - END IF - 180 CONTINUE - ELSE - DO 200 L=1,NQ - DO 190 I=1,N - FJACD(I,J,L) = (FJACD(I,J,L)-FN(I,L))/STP(I) - 190 CONTINUE - 200 CONTINUE - END IF - DO 210 I=1,N - XPLUSD(I,J) = X(I,J) + DELTA(I,J) - 210 CONTINUE - END IF - 220 CONTINUE - END IF - - RETURN - END -*DJCK - SUBROUTINE DJCK - + (FCN, - + N,M,NP,NQ, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX,STPB,STPD,LDSTPD, - + SSF,TT,LDTT, - + ETA,NETA,NTOL,NROW,ISODR,EPSMAC, - + PV0,FJACB,FJACD, - + MSGB,MSGD,DIFF,ISTOP,NFEV,NJEV, - + WRK1,WRK2,WRK6) -C***BEGIN PROLOGUE DJCK -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN,DHSTEP,DJCKM -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE DRIVER ROUTINE FOR THE DERIVATIVE CHECKING PROCESS -C (ADAPTED FROM STARPAC SUBROUTINE DCKCNT) -C***END PROLOGUE DJCK - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + EPSMAC,ETA - INTEGER - + ISTOP,LDIFX,LDSTPD,LDTT, - + M,N,NETA,NFEV,NJEV,NP,NQ,NROW,NTOL - LOGICAL - + ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),DIFF(NQ,NP+M),FJACB(N,NP,NQ),FJACD(N,M,NQ), - + PV0(N,NQ),SSF(NP),STPB(NP),STPD(LDSTPD,M),TT(LDTT,M), - + WRK1(N,M,NQ),WRK2(N,NQ),WRK6(N,NP,NQ),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),MSGB(1+NQ*NP),MSGD(1+NQ*M) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + DIFFJ,H0,HC0,ONE,P5,PV,TOL,TYPJ,ZERO - INTEGER - + IDEVAL,J,LQ,MSGB1,MSGD1 - LOGICAL - + ISFIXD,ISWRTB - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DJCKM - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DHSTEP - EXTERNAL - + DHSTEP - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,INT,LOG10 - -C...DATA STATEMENTS - DATA - + ZERO,P5,ONE - + /0.0D0,0.5D0,1.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C DIFF: THE RELATIVE DIFFERENCES BETWEEN THE USER SUPPLIED AND -C FINITE DIFFERENCE DERIVATIVES FOR EACH DERIVATIVE CHECKED. -C DIFFJ: THE RELATIVE DIFFERENCES BETWEEN THE USER SUPPLIED AND -C FINITE DIFFERENCE DERIVATIVES FOR THE DERIVATIVE BEING -C CHECKED. -C EPSMAC: THE VALUE OF MACHINE PRECISION. -C ETA: THE RELATIVE NOISE IN THE FUNCTION RESULTS. -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C H0: THE INITIAL RELATIVE STEP SIZE FOR FORWARD DIFFERENCES. -C HC0: THE INITIAL RELATIVE STEP SIZE FOR CENTRAL DIFFERENCES. -C IDEVAL: THE VARIABLE DESIGNATING WHAT COMPUTATIONS ARE TO BE -C PERFORMED BY USER SUPPLIED SUBROUTINE FCN. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISFIXD: THE VARIABLE DESIGNATING WHETHER THE PARAMETER IS FIXED -C (ISFIXD=TRUE) OR NOT (ISFIXD=FALSE). -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=.TRUE.) OR BY OLS (ISODR=.FALSE.). -C ISWRTB: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVES WRT BETA -C (ISWRTB=TRUE) OR DELTA (ISWRTB=FALSE) ARE BEING CHECKED. -C J: AN INDEX VARIABLE. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LQ: THE RESPONSE CURRENTLY BEING EXAMINED. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MSGB: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGB1: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGD: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C MSGD1: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF RELIABLE DIGITS IN THE MODEL RESULTS, EITHER -C SET BY THE USER OR COMPUTED BY DETAF. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NJEV: THE NUMBER OF JACOBIAN EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE EXPLANATORY VARIABLE ARRAY AT WHICH -C THE DERIVATIVE IS CHECKED. -C NTOL: THE NUMBER OF DIGITS OF AGREEMENT REQUIRED BETWEEN THE -C NUMERICAL DERIVATIVES AND THE USER SUPPLIED DERIVATIVES. -C ONE: THE VALUE 1.0D0. -C P5: THE VALUE 0.5D0. -C PV: THE SCALAR IN WHICH THE PREDICTED VALUE FROM THE MODEL FOR -C ROW NROW IS STORED. -C PV0: THE PREDICTED VALUES USING THE CURRENT PARAMETER ESTIMATES. -C SSF: THE SCALING VALUES USED FOR BETA. -C STPB: THE STEP SIZE FOR FINITE DIFFERENCE DERIVATIVES WRT BETA. -C STPD: THE STEP SIZE FOR FINITE DIFFERENCE DERIVATIVES WRT DELTA. -C TOL: THE AGREEMENT TOLERANCE. -C TT: THE SCALING VALUES USED FOR DELTA. -C TYPJ: THE TYPICAL SIZE OF THE J-TH UNKNOWN BETA OR DELTA. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C XPLUSD: THE VALUES OF X + DELTA. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DJCK - - -C SET TOLERANCE FOR CHECKING DERIVATIVES - - TOL = ETA**(0.25D0) - NTOL = MAX(ONE,P5-LOG10(TOL)) - - -C COMPUTE USER SUPPLIED DERIVATIVE VALUES - - ISTOP = 0 - IF (ISODR) THEN - IDEVAL = 110 - ELSE - IDEVAL = 010 - END IF - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + IDEVAL,WRK2,FJACB,FJACD, - + ISTOP) - IF (ISTOP.NE.0) THEN - RETURN - ELSE - NJEV = NJEV + 1 - END IF - -C CHECK DERIVATIVES WRT BETA FOR EACH RESPONSE OF OBSERVATION NROW - - MSGB1 = 0 - MSGD1 = 0 - - DO 30 LQ=1,NQ - -C SET PREDICTED VALUE OF MODEL AT CURRENT PARAMETER ESTIMATES - PV = PV0(NROW,LQ) - - ISWRTB = .TRUE. - DO 10 J=1,NP - - IF (IFIXB(1).LT.0) THEN - ISFIXD = .FALSE. - ELSE IF (IFIXB(J).EQ.0) THEN - ISFIXD = .TRUE. - ELSE - ISFIXD = .FALSE. - END IF - - IF (ISFIXD) THEN - MSGB(1+LQ+(J-1)*NQ) = -1 - ELSE - IF (BETA(J).EQ.ZERO) THEN - IF (SSF(1).LT.ZERO) THEN - TYPJ = ONE/ABS(SSF(1)) - ELSE - TYPJ = ONE/SSF(J) - END IF - ELSE - TYPJ = ABS(BETA(J)) - END IF - - H0 = DHSTEP(0,NETA,1,J,STPB,1) - HC0 = H0 - -C CHECK DERIVATIVE WRT THE J-TH PARAMETER AT THE NROW-TH ROW - - CALL DJCKM(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + ETA,TOL,NROW,EPSMAC,J,LQ,TYPJ,H0,HC0, - + ISWRTB,PV,FJACB(NROW,J,LQ), - + DIFFJ,MSGB1,MSGB(2),ISTOP,NFEV, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - MSGB(1) = -1 - RETURN - ELSE - DIFF(LQ,J) = DIFFJ - END IF - END IF - - 10 CONTINUE - -C CHECK DERIVATIVES WRT X FOR EACH RESPONSE OF OBSERVATION NROW - - IF (ISODR) THEN - ISWRTB = .FALSE. - DO 20 J=1,M - - IF (IFIXX(1,1).LT.0) THEN - ISFIXD = .FALSE. - ELSE IF (LDIFX.EQ.1) THEN - IF (IFIXX(1,J).EQ.0) THEN - ISFIXD = .TRUE. - ELSE - ISFIXD = .FALSE. - END IF - ELSE - ISFIXD = .FALSE. - END IF - - IF (ISFIXD) THEN - MSGD(1+LQ+(J-1)*NQ) = -1 - ELSE - - IF (XPLUSD(NROW,J).EQ.ZERO) THEN - IF (TT(1,1).LT.ZERO) THEN - TYPJ = ONE/ABS(TT(1,1)) - ELSE IF (LDTT.EQ.1) THEN - TYPJ = ONE/TT(1,J) - ELSE - TYPJ = ONE/TT(NROW,J) - END IF - ELSE - TYPJ = ABS(XPLUSD(NROW,J)) - END IF - - H0 = DHSTEP(0,NETA,NROW,J,STPD,LDSTPD) - HC0 = DHSTEP(1,NETA,NROW,J,STPD,LDSTPD) - -C CHECK DERIVATIVE WRT THE J-TH COLUMN OF DELTA AT ROW NROW - - CALL DJCKM(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + ETA,TOL,NROW,EPSMAC,J,LQ,TYPJ,H0,HC0, - + ISWRTB,PV,FJACD(NROW,J,LQ), - + DIFFJ,MSGD1,MSGD(2),ISTOP,NFEV, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - MSGD(1) = -1 - RETURN - ELSE - DIFF(LQ,NP+J) = DIFFJ - END IF - END IF - - 20 CONTINUE - END IF - 30 CONTINUE - MSGB(1) = MSGB1 - MSGD(1) = MSGD1 - - RETURN - END -*DJCKC - SUBROUTINE DJCKC - + (FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + ETA,TOL,NROW,EPSMAC,J,LQ,HC,ISWRTB, - + FD,TYPJ,PVPSTP,STP0, - + PV,D, - + DIFFJ,MSG,ISTOP,NFEV, - + WRK1,WRK2,WRK6) -C***BEGIN PROLOGUE DJCKC -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DJCKF,DPVB,DPVD -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE CHECK WHETHER HIGH CURVATURE COULD BE THE CAUSE OF THE -C DISAGREEMENT BETWEEN THE NUMERICAL AND ANALYTIC DERVIATIVES -C (ADAPTED FROM STARPAC SUBROUTINE DCKCRV) -C***END PROLOGUE DJCKC - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + D,DIFFJ,EPSMAC,ETA,FD,HC,PV,PVPSTP,STP0,TOL,TYPJ - INTEGER - + ISTOP,J,LDIFX,LQ,M,N,NFEV,NP,NQ,NROW - LOGICAL - + ISWRTB - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),WRK1(N,M,NQ),WRK2(N,NQ),WRK6(N,NP,NQ),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),MSG(NQ,J) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + CURVE,ONE,PVMCRV,PVPCRV,P01,STP,STPCRV,TEN,TWO - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DJCKF,DPVB,DPVD - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,SIGN - -C...DATA STATEMENTS - DATA - + P01,ONE,TWO,TEN - + /0.01D0,1.0D0,2.0D0,10.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C CURVE: A MEASURE OF THE CURVATURE IN THE MODEL. -C D: THE DERIVATIVE WITH RESPECT TO THE JTH UNKNOWN PARAMETER. -C DIFFJ: THE RELATIVE DIFFERENCES BETWEEN THE USER SUPPLIED AND -C FINITE DIFFERENCE DERIVATIVES FOR THE DERIVATIVE BEING -C CHECKED. -C EPSMAC: THE VALUE OF MACHINE PRECISION. -C ETA: THE RELATIVE NOISE IN THE MODEL -C FD: THE FORWARD DIFFERENCE DERIVATIVE WRT THE JTH PARAMETER. -C HC: THE RELATIVE STEP SIZE FOR CENTRAL FINITE DIFFERENCES. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C ISWRTB: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVES WRT BETA -C (ISWRTB=TRUE) OR DELTA(ISWRTB=FALSE) ARE BEING CHECKED. -C J: THE INDEX OF THE PARTIAL DERIVATIVE BEING EXAMINED. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LQ: THE RESPONSE CURRENTLY BEING EXAMINED. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MSG: THE ERROR CHECKING RESULTS. -C N: THE NUMBER OF OBSERVATIONS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE EXPLANATORY VARIABLE ARRAY AT WHICH -C THE DERIVATIVE IS TO BE CHECKED. -C ONE: THE VALUE 1.0D0. -C PV: THE PREDICTED VALUE OF THE MODEL FOR ROW NROW . -C PVMCRV: THE PREDICTED VALUE FOR ROW NROW OF THE MODEL -C BASED ON THE CURRENT PARAMETER ESTIMATES FOR ALL BUT THE -C JTH PARAMETER VALUE, WHICH IS BETA(J)-STPCRV. -C PVPCRV: THE PREDICTED VALUE FOR ROW NROW OF THE MODEL -C BASED ON THE CURRENT PARAMETER ESTIMATES FOR ALL BUT THE -C JTH PARAMETER VALUE, WHICH IS BETA(J)+STPCRV. -C PVPSTP: THE PREDICTED VALUE FOR ROW NROW OF THE MODEL -C BASED ON THE CURRENT PARAMETER ESTIMATES FOR ALL BUT THE -C JTH PARAMETER VALUE, WHICH IS BETA(J) + STP0. -C P01: THE VALUE 0.01D0. -C STP0: THE INITIAL STEP SIZE FOR THE FINITE DIFFERENCE DERIVATIVE. -C STP: A STEP SIZE FOR THE FINITE DIFFERENCE DERIVATIVE. -C STPCRV: THE STEP SIZE SELECTED TO CHECK FOR CURVATURE IN THE MODEL. -C TEN: THE VALUE 10.0D0. -C TOL: THE AGREEMENT TOLERANCE. -C TWO: THE VALUE 2.0D0. -C TYPJ: THE TYPICAL SIZE OF THE J-TH UNKNOWN BETA OR DELTA. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C XPLUSD: THE VALUES OF X + DELTA. - - -C***FIRST EXECUTABLE STATEMENT DJCKC - - - IF (ISWRTB) THEN - -C PERFORM CENTRAL DIFFERENCE COMPUTATIONS FOR DERIVATIVES WRT BETA - - STPCRV = (HC*TYPJ*SIGN(ONE,BETA(J))+BETA(J)) - BETA(J) - CALL DPVB(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STPCRV, - + ISTOP,NFEV,PVPCRV, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - RETURN - END IF - CALL DPVB(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,-STPCRV, - + ISTOP,NFEV,PVMCRV, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - RETURN - END IF - ELSE - -C PERFORM CENTRAL DIFFERENCE COMPUTATIONS FOR DERIVATIVES WRT DELTA - - STPCRV = (HC*TYPJ*SIGN(ONE,XPLUSD(NROW,J))+XPLUSD(NROW,J)) - - + XPLUSD(NROW,J) - CALL DPVD(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STPCRV, - + ISTOP,NFEV,PVPCRV, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - RETURN - END IF - CALL DPVD(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,-STPCRV, - + ISTOP,NFEV,PVMCRV, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - RETURN - END IF - END IF - -C ESTIMATE CURVATURE BY SECOND DERIVATIVE OF MODEL - - CURVE = ABS((PVPCRV-PV)+(PVMCRV-PV)) / (STPCRV*STPCRV) - CURVE = CURVE + - + ETA*(ABS(PVPCRV)+ABS(PVMCRV)+TWO*ABS(PV)) / (STPCRV**2) - - -C CHECK IF FINITE PRECISION ARITHMETIC COULD BE THE CULPRIT. - CALL DJCKF(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + ETA,TOL,NROW,J,LQ,ISWRTB, - + FD,TYPJ,PVPSTP,STP0,CURVE,PV,D, - + DIFFJ,MSG,ISTOP,NFEV, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - RETURN - END IF - IF (MSG(LQ,J).EQ.0) THEN - RETURN - END IF - -C CHECK IF HIGH CURVATURE COULD BE THE PROBLEM. - - STP = TWO*MAX(TOL*ABS(D)/CURVE,EPSMAC) - IF (STP.LT.ABS(TEN*STP0)) THEN - STP = MIN(STP,P01*ABS(STP0)) - END IF - - - IF (ISWRTB) THEN - -C PERFORM COMPUTATIONS FOR DERIVATIVES WRT BETA - STP = (STP*SIGN(ONE,BETA(J)) + BETA(J)) - BETA(J) - CALL DPVB(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STP, - + ISTOP,NFEV,PVPSTP, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - RETURN - END IF - ELSE - -C PERFORM COMPUTATIONS FOR DERIVATIVES WRT DELTA - STP = (STP*SIGN(ONE,XPLUSD(NROW,J)) + XPLUSD(NROW,J)) - - + XPLUSD(NROW,J) - CALL DPVD(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STP, - + ISTOP,NFEV,PVPSTP, - + WRK1,WRK2,WRK6) - IF (ISTOP.NE.0) THEN - RETURN - END IF - END IF - -C COMPUTE THE NEW NUMERICAL DERIVATIVE - - FD = (PVPSTP-PV)/STP - DIFFJ = MIN(DIFFJ,ABS(FD-D)/ABS(D)) - -C CHECK WHETHER THE NEW NUMERICAL DERIVATIVE IS OK - IF (ABS(FD-D).LE.TOL*ABS(D)) THEN - MSG(LQ,J) = 0 - -C CHECK IF FINITE PRECISION MAY BE THE CULPRIT (FUDGE FACTOR = 2) - ELSE IF (ABS(STP*(FD-D)).LT.TWO*ETA*(ABS(PV)+ABS(PVPSTP)) - + + CURVE*(EPSMAC*TYPJ)**2) THEN - MSG(LQ,J) = 5 - END IF - - RETURN - END -*DJCKF - SUBROUTINE DJCKF - + (FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + ETA,TOL,NROW,J,LQ,ISWRTB, - + FD,TYPJ,PVPSTP,STP0,CURVE,PV,D, - + DIFFJ,MSG,ISTOP,NFEV, - + WRK1,WRK2,WRK6) -C***BEGIN PROLOGUE DJCKF -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DPVB,DPVD -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE CHECK WHETHER FINITE PRECISION ARITHMETIC COULD BE THE -C CAUSE OF THE DISAGREEMENT BETWEEN THE DERIVATIVES -C (ADAPTED FROM STARPAC SUBROUTINE DCKFPA) -C***END PROLOGUE DJCKF - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + CURVE,D,DIFFJ,ETA,FD,PV,PVPSTP,STP0,TOL,TYPJ - INTEGER - + ISTOP,J,LDIFX,LQ,M,N,NFEV,NP,NQ,NROW - LOGICAL - + ISWRTB - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),WRK1(N,M,NQ),WRK2(N,NQ),WRK6(N,NP,NQ),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),MSG(NQ,J) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + HUNDRD,ONE,P1,STP,TWO - LOGICAL - + LARGE - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DPVB,DPVD - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,SIGN - -C...DATA STATEMENTS - DATA - + P1,ONE,TWO,HUNDRD - + /0.1D0,1.0D0,2.0D0,100.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C CURVE: A MEASURE OF THE CURVATURE IN THE MODEL. -C D: THE DERIVATIVE WITH RESPECT TO THE JTH UNKNOWN PARAMETER. -C DIFFJ: THE RELATIVE DIFFERENCES BETWEEN THE USER SUPPLIED AND -C FINITE DIFFERENCE DERIVATIVES FOR THE DERIVATIVE BEING -C CHECKED. -C ETA: THE RELATIVE NOISE IN THE MODEL -C FD: THE FORWARD DIFFERENCE DERIVATIVE WRT THE JTH PARAMETER. -C HUNDRD: THE VALUE 100.0D0. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C ISWRTB: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVES WRT BETA -C (ISWRTB=TRUE) OR DELTA(ISWRTB=FALSE) ARE BEING CHECKED. -C J: THE INDEX OF THE PARTIAL DERIVATIVE BEING EXAMINED. -C LARGE: THE VALUE DESIGNATING WHETHER THE RECOMMENDED INCREASE IN -C THE STEP SIZE WOULD BE GREATER THAN TYPJ. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LQ: THE RESPONSE CURRENTLY BEING EXAMINED. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MSG: THE ERROR CHECKING RESULTS. -C N: THE NUMBER OF OBSERVATIONS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE EXPLANATORY VARIABLE ARRAY AT WHICH -C THE DERIVATIVE IS TO BE CHECKED. -C ONE: THE VALUE 1.0D0. -C PV: THE PREDICTED VALUE FOR ROW NROW . -C PVPSTP: THE PREDICTED VALUE FOR ROW NROW OF THE MODEL -C BASED ON THE CURRENT PARAMETER ESTIMATES FOR ALL BUT THE -C JTH PARAMETER VALUE, WHICH IS BETA(J) + STP0. -C P1: THE VALUE 0.1D0. -C STP0: THE STEP SIZE FOR THE FINITE DIFFERENCE DERIVATIVE. -C TOL: THE AGREEMENT TOLERANCE. -C TWO: THE VALUE 2.0D0. -C TYPJ: THE TYPICAL SIZE OF THE J-TH UNKNOWN BETA OR DELTA. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C XPLUSD: THE VALUES OF X + DELTA. - - -C***FIRST EXECUTABLE STATEMENT DJCKF - - -C FINITE PRECISION ARITHMETIC COULD BE THE PROBLEM. -C TRY A LARGER STEP SIZE BASED ON ESTIMATE OF CONDITION ERROR - - STP = ETA*(ABS(PV)+ABS(PVPSTP))/(TOL*ABS(D)) - IF (STP.GT.ABS(P1*STP0)) THEN - STP = MAX(STP,HUNDRD*ABS(STP0)) - END IF - IF (STP.GT.TYPJ) THEN - STP = TYPJ - LARGE = .TRUE. - ELSE - LARGE = .FALSE. - END IF - - IF (ISWRTB) THEN - -C PERFORM COMPUTATIONS FOR DERIVATIVES WRT BETA - STP = (STP*SIGN(ONE,BETA(J))+BETA(J)) - BETA(J) - CALL DPVB(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STP, - + ISTOP,NFEV,PVPSTP, - + WRK1,WRK2,WRK6) - ELSE - -C PERFORM COMPUTATIONS FOR DERIVATIVES WRT DELTA - STP = (STP*SIGN(ONE,XPLUSD(NROW,J)) + XPLUSD(NROW,J)) - - + XPLUSD(NROW,J) - CALL DPVD(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STP, - + ISTOP,NFEV,PVPSTP, - + WRK1,WRK2,WRK6) - END IF - IF (ISTOP.NE.0) THEN - RETURN - END IF - - FD = (PVPSTP-PV)/STP - DIFFJ = MIN(DIFFJ,ABS(FD-D)/ABS(D)) - -C CHECK FOR AGREEMENT - - IF ((ABS(FD-D)).LE.TOL*ABS(D)) THEN -C FORWARD DIFFERENCE QUOTIENT AND ANALYTIC DERIVATIVES AGREE. - MSG(LQ,J) = 0 - - ELSE IF ((ABS(FD-D).LE.ABS(TWO*CURVE*STP)) .OR. LARGE) THEN -C CURVATURE MAY BE THE CULPRIT (FUDGE FACTOR = 2) - IF (LARGE) THEN - MSG(LQ,J) = 4 - ELSE - MSG(LQ,J) = 5 - END IF - END IF - - RETURN - END -*DJCKM - SUBROUTINE DJCKM - + (FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + ETA,TOL,NROW,EPSMAC,J,LQ,TYPJ,H0,HC0, - + ISWRTB,PV,D, - + DIFFJ,MSG1,MSG,ISTOP,NFEV, - + WRK1,WRK2,WRK6) -C***BEGIN PROLOGUE DJCKM -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DJCKC,DJCKZ,DPVB,DPVD -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE CHECK USER SUPPLIED ANALYTIC DERIVATIVES AGAINST NUMERICAL -C DERIVATIVES -C (ADAPTED FROM STARPAC SUBROUTINE DCKMN) -C***END PROLOGUE DJCKM - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + D,DIFFJ,EPSMAC,ETA,H0,HC0,PV,TOL,TYPJ - INTEGER - + ISTOP,J,LDIFX,LQ,M,MSG1,N,NFEV,NP,NQ,NROW - LOGICAL - + ISWRTB - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),WRK1(N,M,NQ),WRK2(N,NQ),WRK6(N,NP,NQ),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),MSG(NQ,J) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + BIG,FD,H,HC,H1,HC1,HUNDRD,ONE,PVPSTP,P01,P1,STP0, - + TEN,THREE,TOL2,TWO,ZERO - INTEGER - + I - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DJCKC,DJCKZ,DPVB,DPVD - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,MAX,SIGN,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,P01,P1,ONE,TWO,THREE,TEN,HUNDRD - + /0.0D0,0.01D0,0.1D0,1.0D0,2.0D0,3.0D0,1.0D1,1.0D2/ - DATA - + BIG,TOL2 - + /1.0D19,5.0D-2/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C BIG: A BIG VALUE, USED TO INITIALIZE DIFFJ. -C D: THE DERIVATIVE WITH RESPECT TO THE JTH UNKNOWN PARAMETER. -C DIFFJ: THE RELATIVE DIFFERENCES BETWEEN THE USER SUPPLIED AND -C FINITE DIFFERENCE DERIVATIVES FOR THE DERIVATIVE BEING -C CHECKED. -C EPSMAC: THE VALUE OF MACHINE PRECISION. -C ETA: THE RELATIVE NOISE IN THE FUNCTION RESULTS. -C FD: THE FORWARD DIFFERENCE DERIVATIVE WRT THE JTH PARAMETER. -C H: THE RELATIVE STEP SIZE FOR FORWARD DIFFERENCES. -C H0: THE INITIAL RELATIVE STEP SIZE FOR FORWARD DIFFERENCES. -C H1: THE DEFAULT RELATIVE STEP SIZE FOR FORWARD DIFFERENCES. -C HC: THE RELATIVE STEP SIZE FOR CENTRAL DIFFERENCES. -C HC0: THE INITIAL RELATIVE STEP SIZE FOR CENTRAL DIFFERENCES. -C HC1: THE DEFAULT RELATIVE STEP SIZE FOR CENTRAL DIFFERENCES. -C HUNDRD: THE VALUE 100.0D0. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C ISWRTB: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVES WRT BETA -C (ISWRTB=TRUE) OR DELTAS (ISWRTB=FALSE) ARE BEING CHECKED. -C J: THE INDEX OF THE PARTIAL DERIVATIVE BEING EXAMINED. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LQ: THE RESPONSE CURRENTLY BEING EXAMINED. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MSG: THE ERROR CHECKING RESULTS. -C MSG1: THE ERROR CHECKING RESULTS SUMMARY. -C N: THE NUMBER OF OBSERVATIONS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE EXPLANATORY VARIABLE ARRAY AT WHICH -C THE DERIVATIVE IS TO BE CHECKED. -C ONE: THE VALUE 1.0D0. -C PV: THE PREDICTED VALUE FROM THE MODEL FOR ROW NROW . -C PVPSTP: THE PREDICTED VALUE FOR ROW NROW OF THE MODEL -C USING THE CURRENT PARAMETER ESTIMATES FOR ALL BUT THE JTH -C PARAMETER VALUE, WHICH IS BETA(J) + STP0. -C P01: THE VALUE 0.01D0. -C P1: THE VALUE 0.1D0. -C STP0: THE INITIAL STEP SIZE FOR THE FINITE DIFFERENCE DERIVATIVE. -C TEN: THE VALUE 10.0D0. -C THREE: THE VALUE 3.0D0. -C TWO: THE VALUE 2.0D0. -C TOL: THE AGREEMENT TOLERANCE. -C TOL2: A MINIMUM AGREEMENT TOLERANCE. -C TYPJ: THE TYPICAL SIZE OF THE J-TH UNKNOWN BETA OR DELTA. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C XPLUSD: THE VALUES OF X + DELTA. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DJCKM - - -C CALCULATE THE JTH PARTIAL DERIVATIVE USING FORWARD DIFFERENCE -C QUOTIENTS AND DECIDE IF IT AGREES WITH USER SUPPLIED VALUES - - H1 = SQRT(ETA) - HC1 = ETA**(ONE/THREE) - - MSG(LQ,J) = 7 - DIFFJ = BIG - - DO 10 I=1,3 - - IF (I.EQ.1) THEN -C TRY INITIAL RELATIVE STEP SIZE - H = H0 - HC = HC0 - - ELSE IF (I.EQ.2) THEN -C TRY LARGER RELATIVE STEP SIZE - H = MAX(TEN*H1, MIN(HUNDRD*H0, ONE)) - HC = MAX(TEN*HC1,MIN(HUNDRD*HC0,ONE)) - - ELSE IF (I.EQ.3) THEN -C TRY SMALLER RELATIVE STEP SIZE - H = MIN(P1*H1, MAX(P01*H,TWO*EPSMAC)) - HC = MIN(P1*HC1,MAX(P01*HC,TWO*EPSMAC)) - END IF - - IF (ISWRTB) THEN - -C PERFORM COMPUTATIONS FOR DERIVATIVES WRT BETA - - STP0 = (H*TYPJ*SIGN(ONE,BETA(J))+BETA(J)) - BETA(J) - CALL DPVB(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STP0, - + ISTOP,NFEV,PVPSTP, - + WRK1,WRK2,WRK6) - ELSE - -C PERFORM COMPUTATIONS FOR DERIVATIVES WRT DELTA - - STP0 = (H*TYPJ*SIGN(ONE,XPLUSD(NROW,J))+XPLUSD(NROW,J)) - + - XPLUSD(NROW,J) - CALL DPVD(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STP0, - + ISTOP,NFEV,PVPSTP, - + WRK1,WRK2,WRK6) - END IF - IF (ISTOP.NE.0) THEN - RETURN - END IF - - FD = (PVPSTP-PV)/STP0 - -C CHECK FOR AGREEMENT - - IF (ABS(FD-D).LE.TOL*ABS(D)) THEN -C NUMERICAL AND ANALYTIC DERIVATIVES AGREE - -C SET RELATIVE DIFFERENCE FOR DERIVATIVE CHECKING REPORT - IF ((D.EQ.ZERO) .OR. (FD.EQ.ZERO)) THEN - DIFFJ = ABS(FD-D) - ELSE - DIFFJ = ABS(FD-D)/ABS(D) - END IF - -C SET MSG FLAG. - IF (D.EQ.ZERO) THEN - -C JTH ANALYTIC AND NUMERICAL DERIVATIVES ARE BOTH ZERO. - MSG(LQ,J) = 1 - - ELSE -C JTH ANALYTIC AND NUMERICAL DERIVATIVES ARE BOTH NONZERO. - MSG(LQ,J) = 0 - END IF - - ELSE - -C NUMERICAL AND ANALYTIC DERIVATIVES DISAGREE. CHECK WHY - IF ((D.EQ.ZERO) .OR. (FD.EQ.ZERO)) THEN - CALL DJCKZ(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,EPSMAC,J,LQ,ISWRTB, - + TOL,D,FD,TYPJ,PVPSTP,STP0,PV, - + DIFFJ,MSG,ISTOP,NFEV, - + WRK1,WRK2,WRK6) - ELSE - CALL DJCKC(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + ETA,TOL,NROW,EPSMAC,J,LQ,HC,ISWRTB, - + FD,TYPJ,PVPSTP,STP0,PV,D, - + DIFFJ,MSG,ISTOP,NFEV, - + WRK1,WRK2,WRK6) - END IF - IF (MSG(LQ,J).LE.2) THEN - GO TO 20 - END IF - END IF - 10 CONTINUE - -C SET SUMMARY FLAG TO INDICATE QUESTIONABLE RESULTS - 20 CONTINUE - IF ((MSG(LQ,J).GE.7) .AND. (DIFFJ.LE.TOL2)) MSG(LQ,J) = 6 - IF ((MSG(LQ,J).GE.1) .AND. (MSG(LQ,J).LE.6)) THEN - MSG1 = MAX(MSG1,1) - ELSE IF (MSG(LQ,J).GE.7) THEN - MSG1 = 2 - END IF - - RETURN - END -*DJCKZ - SUBROUTINE DJCKZ - + (FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,EPSMAC,J,LQ,ISWRTB, - + TOL,D,FD,TYPJ,PVPSTP,STP0,PV, - + DIFFJ,MSG,ISTOP,NFEV, - + WRK1,WRK2,WRK6) -C***BEGIN PROLOGUE DJCKZ -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DPVB,DPVD -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE RECHECK THE DERIVATIVES IN THE CASE WHERE THE FINITE -C DIFFERENCE DERIVATIVE DISAGREES WITH THE ANALYTIC -C DERIVATIVE AND THE ANALYTIC DERIVATIVE IS ZERO -C (ADAPTED FROM STARPAC SUBROUTINE DCKZRO) -C***END PROLOGUE DJCKZ - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + D,DIFFJ,EPSMAC,FD,PV,PVPSTP,STP0,TOL,TYPJ - INTEGER - + ISTOP,J,LDIFX,LQ,M,N,NFEV,NP,NQ,NROW - LOGICAL - + ISWRTB - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),WRK1(N,M,NQ),WRK2(N,NQ),WRK6(N,NP,NQ),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),MSG(NQ,J) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + CD,ONE,PVMSTP,THREE,TWO,ZERO - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DPVB,DPVD - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,MIN - -C...DATA STATEMENTS - DATA - + ZERO,ONE,TWO,THREE - + /0.0D0,1.0D0,2.0D0,3.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C CD: THE CENTRAL DIFFERENCE DERIVATIVE WRT THE JTH PARAMETER. -C D: THE DERIVATIVE WITH RESPECT TO THE JTH UNKNOWN PARAMETER. -C DIFFJ: THE RELATIVE DIFFERENCES BETWEEN THE USER SUPPLIED AND -C FINITE DIFFERENCE DERIVATIVES FOR THE DERIVATIVE BEING -C CHECKED. -C EPSMAC: THE VALUE OF MACHINE PRECISION. -C FD: THE FORWARD DIFFERENCE DERIVATIVE WRT THE JTH PARAMETER. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C ISWRTB: THE VARIABLE DESIGNATING WHETHER THE DERIVATIVES WRT BETA -C (ISWRTB=TRUE) OR X (ISWRTB=FALSE) ARE BEING CHECKED. -C J: THE INDEX OF THE PARTIAL DERIVATIVE BEING EXAMINED. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LQ: THE RESPONSE CURRENTLY BEING EXAMINED. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MSG: THE ERROR CHECKING RESULTS. -C N: THE NUMBER OF OBSERVATIONS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE EXPLANATORY VARIABLE ARRAY AT WHICH -C THE DERIVATIVE IS TO BE CHECKED. -C ONE: THE VALUE 1.0D0. -C PV: THE PREDICTED VALUE FROM THE MODEL FOR ROW NROW . -C PVMSTP: THE PREDICTED VALUE FOR ROW NROW OF THE MODEL -C USING THE CURRENT PARAMETER ESTIMATES FOR ALL BUT THE -C JTH PARAMETER VALUE, WHICH IS BETA(J) - STP0. -C PVPSTP: THE PREDICTED VALUE FOR ROW NROW OF THE MODEL -C USING THE CURRENT PARAMETER ESTIMATES FOR ALL BUT THE -C JTH PARAMETER VALUE, WHICH IS BETA(J) + STP0. -C STP0: THE INITIAL STEP SIZE FOR THE FINITE DIFFERENCE DERIVATIVE. -C THREE: THE VALUE 3.0D0. -C TWO: THE VALUE 2.0D0. -C TOL: THE AGREEMENT TOLERANCE. -C TYPJ: THE TYPICAL SIZE OF THE J-TH UNKNOWN BETA OR DELTA. -C WRK1: A WORK ARRAY OF (N BY M BY NQ) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK6: A WORK ARRAY OF (N BY NP BY NQ) ELEMENTS. -C XPLUSD: THE VALUES OF X + DELTA. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DJCKZ - - -C RECALCULATE NUMERICAL DERIVATIVE USING CENTRAL DIFFERENCE AND STEP -C SIZE OF 2*STP0 - - IF (ISWRTB) THEN - -C PERFORM COMPUTATIONS FOR DERIVATIVES WRT BETA - - CALL DPVB(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,-STP0, - + ISTOP,NFEV,PVMSTP, - + WRK1,WRK2,WRK6) - ELSE - -C PERFORM COMPUTATIONS FOR DERIVATIVES WRT DELTA - - CALL DPVD(FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,-STP0, - + ISTOP,NFEV,PVMSTP, - + WRK1,WRK2,WRK6) - END IF - IF (ISTOP.NE.0) THEN - RETURN - END IF - - CD = (PVPSTP-PVMSTP)/(TWO*STP0) - DIFFJ = MIN(ABS(CD-D),ABS(FD-D)) - -C CHECK FOR AGREEMENT - - IF (DIFFJ.LE.TOL*ABS(D)) THEN - -C FINITE DIFFERENCE AND ANALYTIC DERIVATIVES NOW AGREE. - IF (D.EQ.ZERO) THEN - MSG(LQ,J) = 1 - ELSE - MSG(LQ,J) = 0 - END IF - - ELSE IF (DIFFJ*TYPJ.LE.ABS(PV*EPSMAC**(ONE/THREE))) THEN -C DERIVATIVES ARE BOTH CLOSE TO ZERO - MSG(LQ,J) = 2 - - ELSE -C DERIVATIVES ARE NOT BOTH CLOSE TO ZERO - MSG(LQ,J) = 3 - END IF - - RETURN - END -*DODCHK - SUBROUTINE DODCHK - + (N,M,NP,NQ, - + ISODR,ANAJAC,IMPLCT, - + IFIXB, - + LDX,LDIFX,LDSCLD,LDSTPD,LDWE,LD2WE,LDWD,LD2WD, - + LDY, - + LWORK,LWKMN,LIWORK,LIWKMN, - + SCLB,SCLD,STPB,STPD, - + INFO) -C***BEGIN PROLOGUE DODCHK -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE CHECK INPUT PARAMETERS, INDICATING ERRORS FOUND USING -C NONZERO VALUES OF ARGUMENT INFO -C***END PROLOGUE DODCHK - -C...SCALAR ARGUMENTS - INTEGER - + INFO,LDIFX,LDSCLD,LDSTPD,LDWD,LDWE,LDX,LDY,LD2WD,LD2WE, - + LIWKMN,LIWORK,LWKMN,LWORK,M,N,NP,NQ - LOGICAL - + ANAJAC,IMPLCT,ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + SCLB(NP),SCLD(LDSCLD,M),STPB(NP),STPD(LDSTPD,M) - INTEGER - + IFIXB(NP) - -C...LOCAL SCALARS - INTEGER - + I,J,K,LAST,NPP - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE -C COMPUTED BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT -C (ANAJAC=TRUE). -C I: AN INDEXING VARIABLE. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C LAST: THE LAST ROW OF THE ARRAY TO BE ACCESSED. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSCLD: THE LEADING DIMENSION OF ARRAY SCLD. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDY: THE LEADING DIMENSION OF ARRAY X. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LIWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY IWORK. -C LIWORK: THE LENGTH OF VECTOR IWORK. -C LWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY WORK. -C LWORK: THE LENGTH OF VECTOR WORK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATIONS. -C SCLB: THE SCALING VALUES FOR BETA. -C SCLD: THE SCALING VALUE FOR DELTA. -C STPB: THE STEP FOR THE FINITE DIFFERENCE DERIVITIVE WRT BETA. -C STPD: THE STEP FOR THE FINITE DIFFERENCE DERIVITIVE WRT DELTA. - - -C***FIRST EXECUTABLE STATEMENT DODCHK - - -C FIND ACTUAL NUMBER OF PARAMETERS BEING ESTIMATED - - IF (NP.LE.0 .OR. IFIXB(1).LT.0) THEN - NPP = NP - ELSE - NPP = 0 - DO 10 K=1,NP - IF (IFIXB(K).NE.0) THEN - NPP = NPP + 1 - END IF - 10 CONTINUE - END IF - -C CHECK PROBLEM SPECIFICATION PARAMETERS - - IF (N.LE.0 .OR. - + M.LE.0 .OR. - + (NPP.LE.0 .OR. NPP.GT.N) .OR. - + (NQ.LE.0)) THEN - - INFO = 10000 - IF (N.LE.0) THEN - INFO = INFO + 1000 - END IF - IF (M.LE.0) THEN - INFO = INFO + 100 - END IF - IF (NPP.LE.0 .OR. NPP.GT.N) THEN - INFO = INFO + 10 - END IF - IF (NQ.LE.0) THEN - INFO = INFO + 1 - END IF - - RETURN - - END IF - -C CHECK DIMENSION SPECIFICATION PARAMETERS - - IF ((.NOT.IMPLCT .AND. LDY.LT.N) .OR. - + (LDX.LT.N) .OR. - + (LDWE.NE.1 .AND. LDWE.LT.N) .OR. - + (LD2WE.NE.1 .AND. LD2WE.LT.NQ) .OR. - + (ISODR .AND. (LDWD.NE.1 .AND. LDWD.LT.N)) .OR. - + (ISODR .AND. (LD2WD.NE.1 .AND. LD2WD.LT.M)) .OR. - + (ISODR .AND. (LDIFX.NE.1 .AND. LDIFX.LT.N)) .OR. - + (ISODR .AND. (LDSTPD.NE.1 .AND. LDSTPD.LT.N)) .OR. - + (ISODR .AND. (LDSCLD.NE.1 .AND. LDSCLD.LT.N)) .OR. - + (LWORK.LT.LWKMN) .OR. - + (LIWORK.LT.LIWKMN)) THEN - - INFO = 20000 - IF (.NOT.IMPLCT .AND. LDY.LT.N) THEN - INFO = INFO + 1000 - END IF - IF (LDX.LT.N) THEN - INFO = INFO + 2000 - END IF - - IF ((LDWE.NE.1 .AND. LDWE.LT.N) .OR. - + (LD2WE.NE.1 .AND. LD2WE.LT.NQ)) THEN - INFO = INFO + 100 - END IF - IF (ISODR .AND. ((LDWD.NE.1 .AND. LDWD.LT.N) .OR. - + (LD2WD.NE.1 .AND. LD2WD.LT.M))) THEN - INFO = INFO + 200 - END IF - - IF (ISODR .AND. (LDIFX.NE.1 .AND. LDIFX.LT.N)) THEN - INFO = INFO + 10 - END IF - IF (ISODR .AND. (LDSTPD.NE.1 .AND. LDSTPD.LT.N)) THEN - INFO = INFO + 20 - END IF - IF (ISODR .AND. (LDSCLD.NE.1 .AND. LDSCLD.LT.N)) THEN - INFO = INFO + 40 - END IF - - IF (LWORK.LT.LWKMN) THEN - INFO = INFO + 1 - END IF - IF (LIWORK.LT.LIWKMN) THEN - INFO = INFO + 2 - END IF - RETURN - - END IF - -C CHECK DELTA SCALING - - IF (ISODR .AND. SCLD(1,1).GT.0) THEN - IF (LDSCLD.GE.N) THEN - LAST = N - ELSE - LAST = 1 - END IF - DO 120 J=1,M - DO 110 I=1,LAST - IF (SCLD(I,J).LE.0) THEN - INFO = 30200 - GO TO 130 - END IF - 110 CONTINUE - 120 CONTINUE - END IF - 130 CONTINUE - -C CHECK BETA SCALING - - IF (SCLB(1).GT.0) THEN - DO 210 K=1,NP - IF (SCLB(K).LE.0) THEN - IF (INFO.EQ.0) THEN - INFO = 30100 - ELSE - INFO = INFO + 100 - END IF - GO TO 220 - END IF - 210 CONTINUE - END IF - 220 CONTINUE - -C CHECK DELTA FINITE DIFFERENCE STEP SIZES - - IF (ANAJAC .AND. ISODR .AND. STPD(1,1).GT.0) THEN - IF (LDSTPD.GE.N) THEN - LAST = N - ELSE - LAST = 1 - END IF - DO 320 J=1,M - DO 310 I=1,LAST - IF (STPD(I,J).LE.0) THEN - IF (INFO.EQ.0) THEN - INFO = 32000 - ELSE - INFO = INFO + 2000 - END IF - GO TO 330 - END IF - 310 CONTINUE - 320 CONTINUE - END IF - 330 CONTINUE - -C CHECK BETA FINITE DIFFERENCE STEP SIZES - - IF (ANAJAC .AND. STPB(1).GT.0) THEN - DO 410 K=1,NP - IF (STPB(K).LE.0) THEN - IF (INFO.EQ.0) THEN - INFO = 31000 - ELSE - INFO = INFO + 1000 - END IF - GO TO 420 - END IF - 410 CONTINUE - END IF - 420 CONTINUE - - RETURN - END -*DODCNT - SUBROUTINE DODCNT - + (SHORT, FCN, N,M,NP,NQ, BETA, Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, IFIXB,IFIXX,LDIFX, - + JOB,NDIGIT,TAUFAC, SSTOL,PARTOL,MAXIT, IPRINT,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + INFO) -C***BEGIN PROLOGUE DODCNT -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DODDRV -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE DOUBLE PRECISION DRIVER ROUTINE FOR FINDING -C THE WEIGHTED EXPLICIT OR IMPLICIT ORTHOGONAL DISTANCE -C REGRESSION (ODR) OR ORDINARY LINEAR OR NONLINEAR LEAST -C SQUARES (OLS) SOLUTION -C***END PROLOGUE DODCNT - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + PARTOL,SSTOL,TAUFAC - INTEGER - + INFO,IPRINT,JOB,LDIFX,LDSCLD,LDSTPD,LDWD,LDWE,LDX,LDY, - + LD2WD,LD2WE,LIWORK,LUNERR,LUNRPT,LWORK,M,MAXIT,N,NDIGIT,NP,NQ - LOGICAL - + SHORT - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),SCLB(NP),SCLD(LDSCLD,M),STPB(NP),STPD(LDSTPD,M), - + WD(LDWD,LD2WD,M),WE(LDWE,LD2WE,NQ),WORK(LWORK), - + X(LDX,M),Y(LDY,NQ) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),IWORK(LIWORK) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + CNVTOL,ONE,PCHECK,PFAC,PSTART,THREE,TSTIMP,ZERO - INTEGER - + IPRNTI,IPR1,IPR2,IPR2F,IPR3,JOBI,JOB1,JOB2,JOB3,JOB4,JOB5, - + MAXITI,MAXIT1 - LOGICAL - + DONE,FSTITR,HEAD,IMPLCT,PRTPEN - -C...LOCAL ARRAYS - DOUBLE PRECISION - + PNLTY(1,1,1) - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DODDRV - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DMPREC - EXTERNAL - + DMPREC - -C...DATA STATEMENTS - DATA - + PCHECK,PSTART,PFAC,ZERO,ONE,THREE - + /1.0D3,1.0D1,1.0D1,0.0D0,1.0D0,3.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER-SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C CNVTOL: THE CONVERGENCE TOLERANCE FOR IMPLICIT MODELS. -C DONE: THE VARIABLE DESIGNATING WHETHER THE INPLICIT SOLUTION HAS -C BEEN FOUND (DONE=TRUE) OR NOT (DONE=FALSE). -C FSTITR: THE VARIABLE DESIGNATING WHETHER THIS IS THE FIRST -C ITERATION (FSTITR=TRUE) OR NOT (FSTITR=FALSE). -C HEAD: THE VARIABLE DESIGNATING WHETHER THE HEADING IS TO BE -C PRINTED (HEAD=TRUE) OR NOT (HEAD=FALSE). -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C IPRINT: THE PRINT CONTROL VARIABLES. -C IPRNTI: THE PRINT CONTROL VARIABLES. -C IPR1: THE 1ST DIGIT OF THE PRINT CONTROL VARIABLE. -C IPR2: THE 2ND DIGIT OF THE PRINT CONTROL VARIABLE. -C IPR3: THE 3RD DIGIT OF THE PRINT CONTROL VARIABLE. -C IPR4: THE 4TH DIGIT OF THE PRINT CONTROL VARIABLE. -C IWORK: THE INTEGER WORK SPACE. -C JOB: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C JOBI: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C JOB1: THE 1ST DIGIT OF THE VARIABLE CONTROLING PROBLEM -C INITIALIZATION AND COMPUTATIONAL METHOD. -C JOB2: THE 2ND DIGIT OF THE VARIABLE CONTROLING PROBLEM -C INITIALIZATION AND COMPUTATIONAL METHOD. -C JOB3: THE 3RD DIGIT OF THE VARIABLE CONTROLING PROBLEM -C INITIALIZATION AND COMPUTATIONAL METHOD. -C JOB4: THE 4TH DIGIT OF THE VARIABLE CONTROLING PROBLEM -C INITIALIZATION AND COMPUTATIONAL METHOD. -C JOB5: THE 5TH DIGIT OF THE VARIABLE CONTROLING PROBLEM -C INITIALIZATION AND COMPUTATIONAL METHOD. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSCLD: THE LEADING DIMENSION OF ARRAY SCLD. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LIWORK: THE LENGTH OF VECTOR IWORK. -C LUNERR: THE LOGICAL UNIT NUMBER USED FOR ERROR MESSAGES. -C LUNRPT: THE LOGICAL UNIT NUMBER USED FOR COMPUTATION REPORTS. -C LWORK: THE LENGTH OF VECTOR WORK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C MAXITI: FOR IMPLICIT MODELS, THE NUMBER OF ITERATIONS ALLOWED FOR -C THE CURRENT PENALTY PARAMETER VALUE. -C MAXIT1: FOR IMPLICIT MODELS, THE NUMBER OF ITERATIONS ALLOWED FOR -C THE NEXT PENALTY PARAMETER VALUE. -C N: THE NUMBER OF OBSERVATIONS. -C NDIGIT: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS, AS -C SUPPLIED BY THE USER. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C ONE: THE VALUE 1.0D0. -C PARTOL: THE USER SUPPLIED PARAMETER CONVERGENCE STOPPING TOLERANCE. -C PCHECK: THE VALUE DESIGNATING THE MINIMUM PENALTY PARAMETER ALLOWED -C BEFORE THE IMPLICIT PROBLEM CAN BE CONSIDERED SOLVED. -C PFAC: THE FACTOR FOR INCREASING THE PENALTY PARAMETER. -C PNLTY: THE PENALTY PARAMETER FOR AN IMPLICIT MODEL. -C PRTPEN: THE VALUE DESIGNATING WHETHER THE PENALTY PARAMETER IS TO BE -C PRINTED IN THE ITERATION REPORT (PRTPEN=TRUE) OR NOT -C (PRTPEN=FALSE). -C PSTART: THE FACTOR FOR INCREASING THE PENALTY PARAMETER. -C SCLB: THE SCALING VALUES FOR BETA. -C SCLD: THE SCALING VALUES FOR DELTA. -C STPB: THE RELATIVE STEP FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO BETA. -C STPD: THE RELATIVE STEP FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C SHORT: THE VARIABLE DESIGNATING WHETHER THE USER HAS INVOKED -C ODRPACK BY THE SHORT-CALL (SHORT=.TRUE.) OR THE LONG-CALL -C (SHORT=.FALSE.). -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING TOLERANCE. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C THREE: THE VALUE 3.0D0. -C TSTIMP: THE RELATIVE CHANGE IN THE PARAMETERS BETWEEN THE INITIAL -C VALUES AND THE SOLUTION. -C WD: THE DELTA WEIGHTS. -C WE: THE EPSILON WEIGHTS. -C WORK: THE DOUBLE PRECISION WORK SPACE. -C X: THE INDEPENDENT VARIABLE. -C Y: THE DEPENDENT VARIABLE. UNUSED WHEN THE MODEL IS IMPLICIT. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DODCNT - - - IMPLCT = MOD(JOB,10).EQ.1 - FSTITR = .TRUE. - HEAD = .TRUE. - PRTPEN = .FALSE. - - IF (IMPLCT) THEN - -C SET UP FOR IMPLICIT PROBLEM - - IF (IPRINT.GE.0) THEN - IPR1 = MOD(IPRINT,10000)/1000 - IPR2 = MOD(IPRINT,1000)/100 - IPR2F = MOD(IPRINT,100)/10 - IPR3 = MOD(IPRINT,10) - ELSE - IPR1 = 2 - IPR2 = 0 - IPR2F = 0 - IPR3 = 1 - END IF - IPRNTI = IPR1*1000 + IPR2*100 + IPR2F*10 - - JOB5 = MOD(JOB,100000)/10000 - JOB4 = MOD(JOB,10000)/1000 - JOB3 = MOD(JOB,1000)/100 - JOB2 = MOD(JOB,100)/10 - JOB1 = MOD(JOB,10) - JOBI = JOB5*10000 + JOB4*1000 + JOB3*100 + JOB2*10 + JOB1 - - IF (WE(1,1,1).LE.ZERO) THEN - PNLTY(1,1,1) = -PSTART - ELSE - PNLTY(1,1,1) = -WE(1,1,1) - END IF - - IF (PARTOL.LT.ZERO) THEN - CNVTOL = DMPREC()**(ONE/THREE) - ELSE - CNVTOL = MIN(PARTOL,ONE) - END IF - - IF (MAXIT.GE.1) THEN - MAXITI = MAXIT - ELSE - MAXITI = 100 - END IF - - DONE = MAXITI.EQ.0 - PRTPEN = .TRUE. - - 10 CONTINUE - CALL DODDRV - + (SHORT,HEAD,FSTITR,PRTPEN, - + FCN, N,M,NP,NQ, BETA, Y,LDY,X,LDX, - + PNLTY,1,1,WD,LDWD,LD2WD, IFIXB,IFIXX,LDIFX, - + JOBI,NDIGIT,TAUFAC, SSTOL,CNVTOL,MAXITI, - + IPRNTI,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + MAXIT1,TSTIMP, INFO) - - IF (DONE) THEN - RETURN - ELSE - DONE = MAXIT1.LE.0 .OR. - + (ABS(PNLTY(1,1,1)).GE.PCHECK .AND. - + TSTIMP.LE.CNVTOL) - END IF - - IF (DONE) THEN - IF (TSTIMP.LE.CNVTOL) THEN - INFO = (INFO/10)*10 + 2 - ELSE - INFO = (INFO/10)*10 + 4 - END IF - JOBI = 10000 + 1000 + JOB3*100 + JOB2*10 + JOB1 - MAXITI = 0 - IPRNTI = IPR3 - ELSE - PRTPEN = .TRUE. - PNLTY(1,1,1) = PFAC*PNLTY(1,1,1) - JOBI = 10000 + 1000 + 000 + JOB2*10 + JOB1 - MAXITI = MAXIT1 - IPRNTI = 0000 + IPR2*100 + IPR2F*10 - END IF - GO TO 10 - ELSE - CALL DODDRV - + (SHORT,HEAD,FSTITR,PRTPEN, - + FCN, N,M,NP,NQ, BETA, Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, IFIXB,IFIXX,LDIFX, - + JOB,NDIGIT,TAUFAC, SSTOL,PARTOL,MAXIT, - + IPRINT,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + MAXIT1,TSTIMP, INFO) - END IF - - RETURN - - END -*DODDRV - SUBROUTINE DODDRV - + (SHORT,HEAD,FSTITR,PRTPEN, - + FCN, N,M,NP,NQ, BETA, Y,LDY,X,LDX, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, IFIXB,IFIXX,LDIFX, - + JOB,NDIGIT,TAUFAC, SSTOL,PARTOL,MAXIT, - + IPRINT,LUNERR,LUNRPT, - + STPB,STPD,LDSTPD, SCLB,SCLD,LDSCLD, - + WORK,LWORK,IWORK,LIWORK, - + MAXIT1,TSTIMP, INFO) -C***BEGIN PROLOGUE DODDRV -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN,DCOPY,DDOT,DETAF,DFCTRW,DFLAGS, -C DINIWK,DIWINF,DJCK,DNRM2,DODCHK,DODMN, -C DODPER,DPACK,DSETN,DUNPAC,DWGHT,DWINF,DXMY,DXPY -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE PERFORM ERROR CHECKING AND INITIALIZATION, AND BEGIN -C PROCEDURE FOR PERFORMING ORTHOGONAL DISTANCE REGRESSION -C (ODR) OR ORDINARY LINEAR OR NONLINEAR LEAST SQUARES (OLS) -C***END PROLOGUE DODDRV - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + PARTOL,SSTOL,TAUFAC,TSTIMP - INTEGER - + INFO,IPRINT,JOB,LDIFX,LDSCLD,LDSTPD,LDWD,LDWE,LDX,LDY, - + LD2WD,LD2WE,LIWORK,LUNERR,LUNRPT,LWORK,M,MAXIT,MAXIT1, - + N,NDIGIT,NP,NQ - LOGICAL - + FSTITR,HEAD,PRTPEN,SHORT - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),SCLB(NP),SCLD(LDSCLD,M),STPB(NP),STPD(LDSTPD,M), - + WE(LDWE,LD2WE,NQ),WD(LDWD,LD2WD,M),WORK(LWORK), - + X(LDX,M),Y(LDY,NQ) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),IWORK(LIWORK) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + EPSMAC,ETA,P5,ONE,TEN,ZERO - INTEGER - + ACTRSI,ALPHAI,BETACI,BETANI,BETASI,BETA0I,DELTAI,DELTNI,DELTSI, - + DIFFI,EPSMAI,ETAI,FI,FJACBI,FJACDI,FNI,FSI,I,IDFI,INT2I,IPRINI, - + IRANKI,ISTOP,ISTOPI,JOBI,JPVTI,K,LDTT,LDTTI,LIWKMN, - + LUNERI,LUNRPI,LWKMN,LWRK,MAXITI,MSGB,MSGD,NETA,NETAI, - + NFEV,NFEVI,NITERI,NJEV,NJEVI,NNZW,NNZWI,NPP,NPPI,NROW,NROWI, - + NTOL,NTOLI,OLMAVI,OMEGAI,PARTLI,PNORMI,PRERSI,QRAUXI,RCONDI, - + RNORSI,RVARI,SDI,SI,SSFI,SSI,SSTOLI,TAUFCI,TAUI,TI,TTI,UI, - + VCVI,WE1I,WRK1I,WRK2I,WRK3I,WRK4I,WRK5I,WRK6I,WRK7I,WRK, - + WSSI,WSSDEI,WSSEPI,XPLUSI - LOGICAL - + ANAJAC,CDJAC,CHKJAC,DOVCV,IMPLCT,INITD,ISODR,REDOJ,RESTRT - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DDOT,DNRM2 - EXTERNAL - + DDOT,DNRM2 - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DCOPY,DETAF,DFCTRW,DFLAGS,DINIWK,DIWINF,DJCK,DODCHK, - + DODMN,DODPER,DPACK,DSETN,DUNPAC,DWGHT,DWINF,DXMY,DXPY - -C...DATA STATEMENTS - DATA - + ZERO,P5,ONE,TEN - + /0.0D0,0.5D0,1.0D0,10.0D0/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ACTRSI: THE LOCATION IN ARRAY WORK OF VARIABLE ACTRS. -C ALPHAI: THE LOCATION IN ARRAY WORK OF VARIABLE ALPHA. -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE -C COMPUTED BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT -C (ANAJAC=TRUE). -C BETA: THE FUNCTION PARAMETERS. -C BETACI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAC. -C BETANI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAN. -C BETASI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAS. -C BETA0I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETA0. -C CDJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE -C COMPUTED BY CENTRAL DIFFERENCES (CDJAC=TRUE) OR FORWARD -C DIFFERENCES (CDJAC=FALSE). -C CHKJAC: THE VARIABLE DESIGNATING WHETHER THE USER SUPPLIED -C JACOBIANS ARE TO BE CHECKED (CHKJAC=TRUE) OR NOT -C (CHKJAC=FALSE). -C DELTAI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTA. -C DELTNI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTAN. -C DELTSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTAS. -C DIFFI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DIFF. -C DOVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX IS -C TO BE COMPUTED (DOVCV=TRUE) OR NOT (DOVCV=FALSE). -C EPSMAI: THE LOCATION IN ARRAY WORK OF VARIABLE EPSMAC. -C ETA: THE RELATIVE NOISE IN THE FUNCTION RESULTS. -C ETAI: THE LOCATION IN ARRAY WORK OF VARIABLE ETA. -C FI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY F. -C FJACBI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FJACB. -C FJACDI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FJACD. -C FNI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FN. -C FSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FS. -C FSTITR: THE VARIABLE DESIGNATING WHETHER THIS IS THE FIRST -C ITERATION (FSTITR=TRUE) OR NOT (FSTITR=FALSE). -C HEAD: THE VARIABLE DESIGNATING WHETHER THE HEADING IS TO BE -C PRINTED (HEAD=TRUE) OR NOT (HEAD=FALSE). -C I: AN INDEX VARIABLE. -C IDFI: THE LOCATION IN ARRAY IWORK OF VARIABLE IDF. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C INITD: THE VARIABLE DESIGNATING WHETHER DELTA IS TO BE INITIALIZED -C TO ZERO (INITD=TRUE) OR TO THE VALUES IN THE FIRST N BY M -C ELEMENTS OF ARRAY WORK (INITD=FALSE). -C INT2I: THE IN ARRAY IWORK OF VARIABLE INT2. -C IPRINI: THE LOCATION IN ARRAY IWORK OF VARIABLE IPRINT. -C IPRINT: THE PRINT CONTROL VARIABLE. -C IRANKI: THE LOCATION IN ARRAY IWORK OF VARIABLE IRANK. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C ISTOPI: THE LOCATION IN ARRAY IWORK OF VARIABLE ISTOP. -C IWORK: THE INTEGER WORK SPACE. -C JOB: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C JOBI: THE LOCATION IN ARRAY IWORK OF VARIABLE JOB. -C JPVTI: THE STARTING LOCATION IN ARRAY IWORK OF ARRAY JPVT. -C K: AN INDEX VARIABLE. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSCLD: THE LEADING DIMENSION OF ARRAY SCLD. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDTTI: THE LOCATION IN ARRAY IWORK OF VARIABLE LDTT. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LIWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY IWORK. -C LIWORK: THE LENGTH OF VECTOR IWORK. -C LUNERI: THE LOCATION IN ARRAY IWORK OF VARIABLE LUNERR. -C LUNERR: THE LOGICAL UNIT NUMBER USED FOR ERROR MESSAGES. -C LUNRPI: THE LOCATION IN ARRAY IWORK OF VARIABLE LUNRPT. -C LUNRPT: THE LOGICAL UNIT NUMBER USED FOR COMPUTATION REPORTS. -C LWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY WORK. -C LWORK: THE LENGTH OF VECTOR WORK. -C LWRK: THE LENGTH OF VECTOR WRK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C MAXIT1: FOR IMPLICIT MODELS, THE ITERATIONS ALLOWED FOR THE NEXT -C PENALTY PARAMETER VALUE. -C MAXITI: THE LOCATION IN ARRAY IWORK OF VARIABLE MAXIT. -C MSGB: THE STARTING LOCATION IN ARRAY IWORK OF ARRAY MSGB. -C MSGD: THE STARTING LOCATION IN ARRAY IWORK OF ARRAY MSGD. -C N: THE NUMBER OF OBSERVATIONS. -C NDIGIT: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS, AS -C SUPPLIED BY THE USER. -C NETA: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS. -C NETAI: THE LOCATION IN ARRAY IWORK OF VARIABLE NETA. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NFEVI: THE LOCATION IN ARRAY IWORK OF VARIABLE NFEV. -C NITERI: THE LOCATION IN ARRAY IWORK OF VARIABLE NITER. -C NJEV: THE NUMBER OF JACOBIAN EVALUATIONS. -C NJEVI: THE LOCATION IN ARRAY IWORK OF VARIABLE NJEV. -C NNZW: THE NUMBER OF NONZERO OBSERVATIONAL ERROR WEIGHTS. -C NNZWI: THE LOCATION IN ARRAY IWORK OF VARIABLE NNZW. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C NPPI: THE LOCATION IN ARRAY IWORK OF VARIABLE NPP. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER AT WHICH THE DERIVATIVE IS TO BE CHECKED. -C NROWI: THE LOCATION IN ARRAY IWORK OF VARIABLE NROW. -C NTOL: THE NUMBER OF DIGITS OF AGREEMENT REQUIRED BETWEEN THE -C NUMERICAL DERIVATIVES AND THE USER SUPPLIED DERIVATIVES, -C SET BY DJCK. -C NTOLI: THE LOCATION IN ARRAY IWORK OF VARIABLE NTOL. -C OLMAVI: THE LOCATION IN ARRAY WORK OF VARIABLE OLMAVG. -C OMEGAI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY OMEGA. -C ONE: THE VALUE 1.0D0. -C PARTLI: THE LOCATION IN ARRAY WORK OF VARIABLE PARTOL. -C PARTOL: THE PARAMETER CONVERGENCE STOPPING TOLERANCE. -C PNORM: THE NORM OF THE SCALED ESTIMATED PARAMETERS. -C PNORMI: THE LOCATION IN ARRAY WORK OF VARIABLE PNORM. -C PRERSI: THE LOCATION IN ARRAY WORK OF VARIABLE PRERS. -C PRTPEN: THE VARIABLE DESIGNATING WHETHER THE PENALTY PARAMETER IS -C TO BE PRINTED IN THE ITERATION REPORT (PRTPEN=TRUE) OR NOT -C (PRTPEN=FALSE). -C P5: THE VALUE 0.5D0. -C QRAUXI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY QRAUX. -C RCONDI: THE LOCATION IN ARRAY WORK OF VARIABLE RCOND. -C REDOJ: THE VARIABLE DESIGNATING WHETHER THE JACOBIAN MATRIX IS TO -C BE RECOMPUTED FOR THE COMPUTATION OF THE COVARIANCE MATRIX -C (REDOJ=TRUE) OR NOT (REDOJ=FALSE). -C RESTRT: THE VARIABLE DESIGNATING WHETHER THE CALL IS A RESTART -C (RESTRT=TRUE) OR NOT (RESTRT=FALSE). -C RNORSI: THE LOCATION IN ARRAY WORK OF VARIABLE RNORMS. -C RVARI: THE LOCATION IN ARRAY WORK OF VARIABLE RVAR. -C SCLB: THE SCALING VALUES FOR BETA. -C SCLD: THE SCALING VALUES FOR DELTA. -C SDI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SD. -C SHORT: THE VARIABLE DESIGNATING WHETHER THE USER HAS INVOKED -C ODRPACK BY THE SHORT-CALL (SHORT=TRUE) OR THE LONG-CALL -C (SHORT=FALSE). -C SI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY S. -C SSFI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SSF. -C SSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SS. -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING TOLERANCE. -C SSTOLI: THE LOCATION IN ARRAY WORK OF VARIABLE SSTOL. -C STPB: THE STEP SIZE FOR FINITE DIFFERENCE DERIVATIVES WRT BETA. -C STPD: THE STEP SIZE FOR FINITE DIFFERENCE DERIVATIVES WRT DELTA. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C TAUFCI: THE LOCATION IN ARRAY WORK OF VARIABLE TAUFAC. -C TAUI: THE LOCATION IN ARRAY WORK OF VARIABLE TAU. -C TEN: THE VALUE 10.0D0. -C TI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY T. -C TSTIMP: THE RELATIVE CHANGE IN THE PARAMETERS BETWEEN THE INITIAL -C VALUES AND THE SOLUTION. -C TTI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY TT. -C UI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY U. -C VCVI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY VCV. -C WD: THE DELTA WEIGHTS. -C WE: THE EPSILON WEIGHTS. -C WE1I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WE1. -C WORK: THE DOUBLE PRECISION WORK SPACE. -C WRK: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK, -C EQUIVALENCED TO WRK1 AND WRK2. -C WRK1I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK1. -C WRK2I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK2. -C WRK3I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK3. -C WRK4I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK4. -C WRK5I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK5. -C WRK6I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK6. -C WRK7I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK7. -C WSSI: THE LOCATION IN ARRAY WORK OF VARIABLE WSS. -C WSSDEI: THE LOCATION IN ARRAY WORK OF VARIABLE WSSDEL. -C WSSEPI: THE LOCATION IN ARRAY WORK OF VARIABLE WSSEPS. -C X: THE EXPLANATORY VARIABLE. -C XPLUSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY XPLUSD. -C Y: THE DEPENDENT VARIABLE. UNUSED WHEN THE MODEL IS IMPLICIT. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DODDRV - - -C INITIALIZE NECESSARY VARIABLES - - CALL DFLAGS(JOB,RESTRT,INITD,DOVCV,REDOJ, - + ANAJAC,CDJAC,CHKJAC,ISODR,IMPLCT) - -C SET STARTING LOCATIONS WITHIN INTEGER WORKSPACE -C (INVALID VALUES OF M, NP AND/OR NQ ARE HANDLED REASONABLY BY DIWINF) - - CALL DIWINF(M,NP,NQ, - + MSGB,MSGD,JPVTI,ISTOPI, - + NNZWI,NPPI,IDFI, - + JOBI,IPRINI,LUNERI,LUNRPI, - + NROWI,NTOLI,NETAI, - + MAXITI,NITERI,NFEVI,NJEVI,INT2I,IRANKI,LDTTI, - + LIWKMN) - -C SET STARTING LOCATIONS WITHIN DOUBLE PRECISION WORK SPACE -C (INVALID VALUES OF N, M, NP, NQ, LDWE AND/OR LD2WE -C ARE HANDLED REASONABLY BY DWINF) - - CALL DWINF(N,M,NP,NQ,LDWE,LD2WE,ISODR, - + DELTAI,FI,XPLUSI,FNI,SDI,VCVI, - + RVARI,WSSI,WSSDEI,WSSEPI,RCONDI,ETAI, - + OLMAVI,TAUI,ALPHAI,ACTRSI,PNORMI,RNORSI,PRERSI, - + PARTLI,SSTOLI,TAUFCI,EPSMAI, - + BETA0I,BETACI,BETASI,BETANI,SI,SSI,SSFI,QRAUXI,UI, - + FSI,FJACBI,WE1I,DIFFI, - + DELTSI,DELTNI,TI,TTI,OMEGAI,FJACDI, - + WRK1I,WRK2I,WRK3I,WRK4I,WRK5I,WRK6I,WRK7I, - + LWKMN) - IF (ISODR) THEN - WRK = WRK1I - LWRK = N*M*NQ + N*NQ - ELSE - WRK = WRK2I - LWRK = N*NQ - END IF - -C UPDATE THE PENALTY PARAMETERS -C (WE(1,1,1) IS NOT A USER SUPPLIED ARRAY IN THIS CASE) - IF (RESTRT .AND. IMPLCT) THEN - WE(1,1,1) = MAX(WORK(WE1I)**2,ABS(WE(1,1,1))) - WORK(WE1I) = -SQRT(ABS(WE(1,1,1))) - END IF - - IF (RESTRT) THEN - -C RESET MAXIMUM NUMBER OF ITERATIONS - - IF (MAXIT.GE.0) THEN - IWORK(MAXITI) = IWORK(NITERI) + MAXIT - ELSE - IWORK(MAXITI) = IWORK(NITERI) + 10 - END IF - - IF (IWORK(NITERI).LT.IWORK(MAXITI)) THEN - INFO = 0 - END IF - - IF (JOB.GE.0) IWORK(JOBI) = JOB - IF (IPRINT.GE.0) IWORK(IPRINI) = IPRINT - IF (PARTOL.GE.ZERO .AND. PARTOL.LT.ONE) WORK(PARTLI) = PARTOL - IF (SSTOL.GE.ZERO .AND. SSTOL.LT.ONE) WORK(SSTOLI) = SSTOL - - WORK(OLMAVI) = WORK(OLMAVI)*IWORK(NITERI) - - IF (IMPLCT) THEN - CALL DCOPY(N*NQ,WORK(FNI),1,WORK(FI),1) - ELSE - CALL DXMY(N,NQ,WORK(FNI),N,Y,LDY,WORK(FI),N) - END IF - CALL DWGHT(N,NQ,WORK(WE1I),LDWE,LD2WE,WORK(FI),N,WORK(FI),N) - WORK(WSSEPI) = DDOT(N*NQ,WORK(FI),1,WORK(FI),1) - WORK(WSSI) = WORK(WSSEPI) + WORK(WSSDEI) - - ELSE - -C PERFORM ERROR CHECKING - - INFO = 0 - - CALL DODCHK(N,M,NP,NQ, - + ISODR,ANAJAC,IMPLCT, - + IFIXB, - + LDX,LDIFX,LDSCLD,LDSTPD,LDWE,LD2WE,LDWD,LD2WD, - + LDY, - + LWORK,LWKMN,LIWORK,LIWKMN, - + SCLB,SCLD,STPB,STPD, - + INFO) - IF (INFO.GT.0) THEN - GO TO 50 - END IF - -C INITIALIZE WORK VECTORS AS NECESSARY - - DO 10 I=N*M+N*NQ+1,LWORK - WORK(I) = ZERO - 10 CONTINUE - DO 20 I=1,LIWORK - IWORK(I) = 0 - 20 CONTINUE - - CALL DINIWK(N,M,NP, - + WORK,LWORK,IWORK,LIWORK, - + X,LDX,IFIXX,LDIFX,SCLD,LDSCLD, - + BETA,SCLB, - + SSTOL,PARTOL,MAXIT,TAUFAC, - + JOB,IPRINT,LUNERR,LUNRPT, - + EPSMAI,SSTOLI,PARTLI,MAXITI,TAUFCI, - + JOBI,IPRINI,LUNERI,LUNRPI, - + SSFI,TTI,LDTTI,DELTAI) - - IWORK(MSGB) = -1 - IWORK(MSGD) = -1 - WORK(TAUI) = -WORK(TAUFCI) - -C SET UP FOR PARAMETER ESTIMATION - -C PULL BETA'S TO BE ESTIMATED AND CORRESPONDING SCALE VALUES -C AND STORE IN WORK(BETACI) AND WORK(SSI), RESPECTIVELY - - CALL DPACK(NP,IWORK(NPPI),WORK(BETACI),BETA,IFIXB) - CALL DPACK(NP,IWORK(NPPI),WORK(SSI),WORK(SSFI),IFIXB) - NPP = IWORK(NPPI) - -C CHECK THAT WD IS POSITIVE DEFINITE AND WE IS POSITIVE SEMIDEFINITE, -C SAVING FACTORIZATION OF WE, AND COUNTING NUMBER OF NONZERO WEIGHTS - - CALL DFCTRW(N,M,NQ,NPP, - + ISODR, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, - + WORK(WRK2I),WORK(WRK4I), - + WORK(WE1I),NNZW,INFO) - IWORK(NNZWI) = NNZW - - IF (INFO.NE.0) THEN - GO TO 50 - END IF - -C EVALUATE THE PREDICTED VALUES AND -C WEIGHTED EPSILONS AT THE STARTING POINT - - CALL DUNPAC(NP,WORK(BETACI),BETA,IFIXB) - CALL DXPY(N,M,X,LDX,WORK(DELTAI),N,WORK(XPLUSI),N) - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,WORK(XPLUSI), - + IFIXB,IFIXX,LDIFX, - + 002,WORK(FNI),WORK(WRK6I),WORK(WRK1I), - + ISTOP) - IWORK(ISTOPI) = ISTOP - IF (ISTOP.EQ.0) THEN - IWORK(NFEVI) = IWORK(NFEVI) + 1 - IF (IMPLCT) THEN - CALL DCOPY(N*NQ,WORK(FNI),1,WORK(FI),1) - ELSE - CALL DXMY(N,NQ,WORK(FNI),N,Y,LDY,WORK(FI),N) - END IF - CALL DWGHT(N,NQ,WORK(WE1I),LDWE,LD2WE,WORK(FI),N,WORK(FI),N) - ELSE - INFO = 52000 - GO TO 50 - END IF - -C COMPUTE NORM OF THE INITIAL ESTIMATES - - CALL DWGHT(NPP,1,WORK(SSI),NPP,1,WORK(BETACI),NPP, - + WORK(WRK),NPP) - IF (ISODR) THEN - CALL DWGHT(N,M,WORK(TTI),IWORK(LDTTI),1,WORK(DELTAI),N, - + WORK(WRK+NPP),N) - WORK(PNORMI) = DNRM2(NPP+N*M,WORK(WRK),1) - ELSE - WORK(PNORMI) = DNRM2(NPP,WORK(WRK),1) - END IF - -C COMPUTE SUM OF SQUARES OF THE WEIGHTED EPSILONS AND WEIGHTED DELTAS - - WORK(WSSEPI) = DDOT(N*NQ,WORK(FI),1,WORK(FI),1) - IF (ISODR) THEN - CALL DWGHT(N,M,WD,LDWD,LD2WD,WORK(DELTAI),N,WORK(WRK),N) - WORK(WSSDEI) = DDOT(N*M,WORK(DELTAI),1,WORK(WRK),1) - ELSE - WORK(WSSDEI) = ZERO - END IF - WORK(WSSI) = WORK(WSSEPI) + WORK(WSSDEI) - -C SELECT FIRST ROW OF X + DELTA THAT CONTAINS NO ZEROS - - NROW = -1 - CALL DSETN(N,M,WORK(XPLUSI),N,NROW) - IWORK(NROWI) = NROW - -C SET NUMBER OF GOOD DIGITS IN FUNCTION RESULTS - - EPSMAC = WORK(EPSMAI) - IF (NDIGIT.LT.2) THEN - IWORK(NETAI) = -1 - NFEV = IWORK(NFEVI) - CALL DETAF(FCN, - + N,M,NP,NQ, - + WORK(XPLUSI),BETA,EPSMAC,NROW, - + WORK(BETANI),WORK(FNI), - + IFIXB,IFIXX,LDIFX, - + ISTOP,NFEV,ETA,NETA, - + WORK(WRK1I),WORK(WRK2I),WORK(WRK6I),WORK(WRK7I)) - IWORK(ISTOPI) = ISTOP - IWORK(NFEVI) = NFEV - IF (ISTOP.NE.0) THEN - INFO = 53000 - IWORK(NETAI) = 0 - WORK(ETAI) = ZERO - GO TO 50 - ELSE - IWORK(NETAI) = -NETA - WORK(ETAI) = ETA - END IF - ELSE - IWORK(NETAI) = MIN(NDIGIT,INT(P5-LOG10(EPSMAC))) - WORK(ETAI) = MAX(EPSMAC,TEN**(-NDIGIT)) - END IF - -C CHECK DERIVATIVES IF NECESSARY - - IF (CHKJAC .AND. ANAJAC) THEN - NTOL = -1 - NFEV = IWORK(NFEVI) - NJEV = IWORK(NJEVI) - NETA = IWORK(NETAI) - LDTT = IWORK(LDTTI) - ETA = WORK(ETAI) - EPSMAC = WORK(EPSMAI) - CALL DJCK(FCN, - + N,M,NP,NQ, - + BETA,WORK(XPLUSI), - + IFIXB,IFIXX,LDIFX,STPB,STPD,LDSTPD, - + WORK(SSFI),WORK(TTI),LDTT, - + ETA,NETA,NTOL,NROW,ISODR,EPSMAC, - + WORK(FNI),WORK(FJACBI),WORK(FJACDI), - + IWORK(MSGB),IWORK(MSGD),WORK(DIFFI), - + ISTOP,NFEV,NJEV, - + WORK(WRK1I),WORK(WRK2I),WORK(WRK6I)) - IWORK(ISTOPI) = ISTOP - IWORK(NFEVI) = NFEV - IWORK(NJEVI) = NJEV - IWORK(NTOLI) = NTOL - IF (ISTOP.NE.0) THEN - INFO = 54000 - ELSE IF (IWORK(MSGB).NE.0 .OR. IWORK(MSGD).NE.0) THEN - INFO = 40000 - END IF - ELSE - -C INDICATE USER SUPPLIED DERIVATIVES WERE NOT CHECKED - IWORK(MSGB) = -1 - IWORK(MSGD) = -1 - END IF - -C PRINT APPROPRIATE ERROR MESSAGES - - 50 IF ((INFO.NE.0) .OR. (IWORK(MSGB).NE.-1)) THEN - IF (LUNERR.NE.0 .AND. IPRINT.NE.0) THEN - CALL DODPER - + (INFO,LUNERR,SHORT, - + N,M,NP,NQ, - + LDSCLD,LDSTPD,LDWE,LD2WE,LDWD,LD2WD, - + LWKMN,LIWKMN, - + WORK(FJACBI),WORK(FJACDI), - + WORK(DIFFI),IWORK(MSGB),ISODR,IWORK(MSGD), - + WORK(XPLUSI),IWORK(NROWI),IWORK(NETAI),IWORK(NTOLI)) - END IF - -C SET INFO TO REFLECT ERRORS IN THE USER SUPPLIED JACOBIANS - - IF (INFO.EQ.40000) THEN - IF (IWORK(MSGB).EQ.2 .OR. IWORK(MSGD).EQ.2) THEN - IF (IWORK(MSGB).EQ.2) THEN - INFO = INFO + 1000 - END IF - IF (IWORK(MSGD).EQ.2) THEN - INFO = INFO + 100 - END IF - ELSE - INFO = 0 - END IF - END IF - IF (INFO.NE.0) THEN - RETURN - END IF - END IF - END IF - -C SAVE THE INITIAL VALUES OF BETA - CALL DCOPY(NP,BETA,1,WORK(BETA0I),1) - -C FIND LEAST SQUARES SOLUTION - - CALL DCOPY(N*NQ,WORK(FNI),1,WORK(FSI),1) - LDTT = IWORK(LDTTI) - CALL DODMN(HEAD,FSTITR,PRTPEN, - + FCN, N,M,NP,NQ, JOB, BETA,Y,LDY,X,LDX, - + WE,WORK(WE1I),LDWE,LD2WE,WD,LDWD,LD2WD, - + IFIXB,IFIXX,LDIFX, - + WORK(BETACI),WORK(BETANI),WORK(BETASI),WORK(SI), - + WORK(DELTAI),WORK(DELTNI),WORK(DELTSI), - + WORK(TI),WORK(FI),WORK(FNI),WORK(FSI), - + WORK(FJACBI),IWORK(MSGB),WORK(FJACDI),IWORK(MSGD), - + WORK(SSFI),WORK(SSI),WORK(TTI),LDTT, - + STPB,STPD,LDSTPD, - + WORK(XPLUSI),WORK(WRK),LWRK, - + WORK,LWORK,IWORK,LIWORK,INFO) - MAXIT1 = IWORK(MAXITI) - IWORK(NITERI) - TSTIMP = ZERO - DO 100 K=1,NP - IF (BETA(K).EQ.ZERO) THEN - TSTIMP = MAX(TSTIMP, - + ABS(BETA(K)-WORK(BETA0I-1+K))/WORK(SSFI-1+K)) - ELSE - TSTIMP = MAX(TSTIMP, - + ABS(BETA(K)-WORK(BETA0I-1+K))/ABS(BETA(K))) - END IF - 100 CONTINUE - - RETURN - - END -*DODLM - SUBROUTINE DODLM - + (N,M,NP,NQ,NPP, - + F,FJACB,FJACD, - + WD,LDWD,LD2WD,SS,TT,LDTT,DELTA, - + ALPHA2,TAU,EPSFCN,ISODR, - + TFJACB,OMEGA,U,QRAUX,JPVT, - + S,T,NLMS,RCOND,IRANK, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK,LWRK,ISTOPC) -C***BEGIN PROLOGUE DODLM -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DDOT,DNRM2,DODSTP,DSCALE,DWGHT -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE COMPUTE LEVENBERG-MARQUARDT PARAMETER AND STEPS S AND T -C USING ANALOG OF THE TRUST-REGION LEVENBERG-MARQUARDT -C ALGORITHM -C***END PROLOGUE DODLM - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + ALPHA2,EPSFCN,RCOND,TAU - INTEGER - + IRANK,ISTOPC,LDTT,LDWD,LD2WD,LWRK,M,N,NLMS,NP,NPP,NQ - LOGICAL - + ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + DELTA(N,M),F(N,NQ),FJACB(N,NP,NQ),FJACD(N,M,NQ), - + OMEGA(NQ,NQ),QRAUX(NP),S(NP),SS(NP), - + T(N,M),TFJACB(N,NQ,NP),TT(LDTT,M),U(NP),WD(LDWD,LD2WD,M), - + WRK(LWRK),WRK1(N,NQ,M),WRK2(N,NQ),WRK3(NP),WRK4(M,M),WRK5(M) - INTEGER - + JPVT(NP) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ALPHA1,ALPHAN,BOT,P001,P1,PHI1,PHI2,SA,TOP,ZERO - INTEGER - + I,IWRK,J,K,L - LOGICAL - + FORVCV - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DDOT,DNRM2 - EXTERNAL - + DDOT,DNRM2 - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DODSTP,DSCALE,DWGHT - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,MAX,MIN,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,P001,P1 - + /0.0D0,0.001D0,0.1D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ALPHAN: THE NEW LEVENBERG-MARQUARDT PARAMETER. -C ALPHA1: THE PREVIOUS LEVENBERG-MARQUARDT PARAMETER. -C ALPHA2: THE CURRENT LEVENBERG-MARQUARDT PARAMETER. -C BOT: THE LOWER LIMIT FOR SETTING ALPHA. -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C EPSFCN: THE FUNCTION'S PRECISION. -C F: THE (WEIGHTED) ESTIMATED VALUES OF EPSILON. -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C FORVCV: THE VARIABLE DESIGNATING WHETHER THIS SUBROUTINE WAS -C CALLED TO SET UP FOR THE COVARIANCE MATRIX COMPUTATIONS -C (FORVCV=TRUE) OR NOT (FORVCV=FALSE). -C I: AN INDEXING VARIABLE. -C IRANK: THE RANK DEFICIENCY OF THE JACOBIAN WRT BETA. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOPC: THE VARIABLE DESIGNATING WHETHER THE COMPUTATIONS WERE -C STOPED DUE TO SOME NUMERICAL ERROR DETECTED WITHIN -C SUBROUTINE DODSTP. -C IWRK: AN INDEXING VARIABLE. -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C L: AN INDEXING VARIABLE. -C JPVT: THE PIVOT VECTOR. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LWRK: THE LENGTH OF VECTOR WRK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NLMS: THE NUMBER OF LEVENBERG-MARQUARDT STEPS TAKEN. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C OMEGA: THE ARRAY (I-FJACD*INV(P)*TRANS(FJACD))**(-1/2) WHERE -C P = TRANS(FJACD)*FJACD + D**2 + ALPHA*TT**2 -C P001: THE VALUE 0.001D0 -C P1: THE VALUE 0.1D0 -C PHI1: THE PREVIOUS DIFFERENCE BETWEEN THE NORM OF THE SCALED STEP -C AND THE TRUST REGION DIAMETER. -C PHI2: THE CURRENT DIFFERENCE BETWEEN THE NORM OF THE SCALED STEP -C AND THE TRUST REGION DIAMETER. -C QRAUX: THE ARRAY REQUIRED TO RECOVER THE ORTHOGONAL PART OF THE -C Q-R DECOMPOSITION. -C RCOND: THE APPROXIMATE RECIPROCAL CONDITION OF TFJACB. -C S: THE STEP FOR BETA. -C SA: THE SCALAR PHI2*(ALPHA1-ALPHA2)/(PHI1-PHI2). -C SS: THE SCALING VALUES USED FOR THE UNFIXED BETAS. -C T: THE STEP FOR DELTA. -C TAU: THE TRUST REGION DIAMETER. -C TFJACB: THE ARRAY OMEGA*FJACB. -C TOP: THE UPPER LIMIT FOR SETTING ALPHA. -C TT: THE SCALE USED FOR THE DELTA'S. -C U: THE APPROXIMATE NULL VECTOR FOR TFJACB. -C WD: THE DELTA WEIGHTS. -C WRK: A WORK ARRAY OF (LWRK) ELEMENTS, -C EQUIVALENCED TO WRK1 AND WRK2. -C WRK1: A WORK ARRAY OF (N BY NQ BY M) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK3: A WORK ARRAY OF (NP) ELEMENTS. -C WRK4: A WORK ARRAY OF (M BY M) ELEMENTS. -C WRK5: A WORK ARRAY OF (M) ELEMENTS. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DODLM - - FORVCV = .FALSE. - ISTOPC = 0 - -C COMPUTE FULL GAUSS-NEWTON STEP (ALPHA=0) - - ALPHA1 = ZERO - CALL DODSTP(N,M,NP,NQ,NPP, - + F,FJACB,FJACD, - + WD,LDWD,LD2WD,SS,TT,LDTT,DELTA, - + ALPHA1,EPSFCN,ISODR, - + TFJACB,OMEGA,U,QRAUX,JPVT, - + S,T,PHI1,IRANK,RCOND,FORVCV, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK,LWRK,ISTOPC) - IF (ISTOPC.NE.0) THEN - RETURN - END IF - -C INITIALIZE TAU IF NECESSARY - - IF (TAU.LT.ZERO) THEN - TAU = ABS(TAU)*PHI1 - END IF - -C CHECK IF FULL GAUSS-NEWTON STEP IS OPTIMAL - - IF ((PHI1-TAU).LE.P1*TAU) THEN - NLMS = 1 - ALPHA2 = ZERO - RETURN - END IF - -C FULL GAUSS-NEWTON STEP IS OUTSIDE TRUST REGION - -C FIND LOCALLY CONSTRAINED OPTIMAL STEP - - PHI1 = PHI1 - TAU - -C INITIALIZE UPPER AND LOWER BOUNDS FOR ALPHA - - BOT = ZERO - - DO 30 K=1,NPP - DO 20 L=1,NQ - DO 10 I=1,N - TFJACB(I,L,K) = FJACB(I,K,L) - 10 CONTINUE - 20 CONTINUE - WRK(K) = DDOT(N*NQ,TFJACB(1,1,K),1,F(1,1),1) - 30 CONTINUE - CALL DSCALE(NPP,1,SS,NPP,WRK,NPP,WRK,NPP) - - IF (ISODR) THEN - CALL DWGHT(N,M,WD,LDWD,LD2WD,DELTA,N,WRK(NPP+1),N) - IWRK = NPP - DO 50 J=1,M - DO 40 I=1,N - IWRK = IWRK + 1 - WRK(IWRK) = WRK(IWRK) + - + DDOT(NQ,FJACD(I,J,1),N*M,F(I,1),N) - 40 CONTINUE - 50 CONTINUE - CALL DSCALE(N,M,TT,LDTT,WRK(NPP+1),N,WRK(NPP+1),N) - TOP = DNRM2(NPP+N*M,WRK,1)/TAU - ELSE - TOP = DNRM2(NPP,WRK,1)/TAU - END IF - - IF (ALPHA2.GT.TOP .OR. ALPHA2.EQ.ZERO) THEN - ALPHA2 = P001*TOP - END IF - -C MAIN LOOP - - DO 60 I=1,10 - -C COMPUTE LOCALLY CONSTRAINED STEPS S AND T AND PHI(ALPHA) FOR -C CURRENT VALUE OF ALPHA - - CALL DODSTP(N,M,NP,NQ,NPP, - + F,FJACB,FJACD, - + WD,LDWD,LD2WD,SS,TT,LDTT,DELTA, - + ALPHA2,EPSFCN,ISODR, - + TFJACB,OMEGA,U,QRAUX,JPVT, - + S,T,PHI2,IRANK,RCOND,FORVCV, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK,LWRK,ISTOPC) - IF (ISTOPC.NE.0) THEN - RETURN - END IF - PHI2 = PHI2-TAU - -C CHECK WHETHER CURRENT STEP IS OPTIMAL - - IF (ABS(PHI2).LE.P1*TAU .OR. - + (ALPHA2.EQ.BOT .AND. PHI2.LT.ZERO)) THEN - NLMS = I+1 - RETURN - END IF - -C CURRENT STEP IS NOT OPTIMAL - -C UPDATE BOUNDS FOR ALPHA AND COMPUTE NEW ALPHA - - IF (PHI1-PHI2.EQ.ZERO) THEN - NLMS = 12 - RETURN - END IF - SA = PHI2*(ALPHA1-ALPHA2)/(PHI1-PHI2) - IF (PHI2.LT.ZERO) THEN - TOP = MIN(TOP,ALPHA2) - ELSE - BOT = MAX(BOT,ALPHA2) - END IF - IF (PHI1*PHI2.GT.ZERO) THEN - BOT = MAX(BOT,ALPHA2-SA) - ELSE - TOP = MIN(TOP,ALPHA2-SA) - END IF - - ALPHAN = ALPHA2 - SA*(PHI1+TAU)/TAU - IF (ALPHAN.GE.TOP .OR. ALPHAN.LE.BOT) THEN - ALPHAN = MAX(P001*TOP,SQRT(TOP*BOT)) - END IF - -C GET READY FOR NEXT ITERATION - - ALPHA1 = ALPHA2 - ALPHA2 = ALPHAN - PHI1 = PHI2 - 60 CONTINUE - -C SET NLMS TO INDICATE AN OPTIMAL STEP COULD NOT BE FOUND IN 10 TRYS - - NLMS = 12 - - RETURN - END -*DODMN - SUBROUTINE DODMN - + (HEAD,FSTITR,PRTPEN, - + FCN, N,M,NP,NQ, JOB, BETA,Y,LDY,X,LDX, - + WE,WE1,LDWE,LD2WE,WD,LDWD,LD2WD, - + IFIXB,IFIXX,LDIFX, - + BETAC,BETAN,BETAS,S,DELTA,DELTAN,DELTAS, - + T,F,FN,FS,FJACB,MSGB,FJACD,MSGD, - + SSF,SS,TT,LDTT,STPB,STPD,LDSTPD, - + XPLUSD,WRK,LWRK,WORK,LWORK,IWORK,LIWORK,INFO) -C***BEGIN PROLOGUE DODMN -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN,DACCES,DCOPY,DDOT,DEVJAC,DFLAGS,DNRM2,DODLM, -C DODPCR,DODVCV,DUNPAC,DWGHT,DXMY,DXPY -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE ITERATIVELY COMPUTE LEAST SQUARES SOLUTION -C***END PROLOGUE DODMN - -C...SCALAR ARGUMENTS - INTEGER - + INFO,JOB,LDIFX,LDSTPD,LDTT,LDWD,LDWE,LDX,LDY,LD2WD,LD2WE, - + LIWORK,LWORK,LWRK,M,N,NP,NQ - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),BETAC(NP),BETAN(NP),BETAS(NP), - + DELTA(N,M),DELTAN(N,M),DELTAS(N,M), - + F(N,NQ),FJACB(N,NP,NQ),FJACD(N,M,NQ),FN(N,NQ),FS(N,NQ), - + S(NP),SS(NP),SSF(NP),STPB(NP),STPD(LDSTPD,M), - + T(N,M),TT(LDTT,M), - + WD(LDWD,LD2WD,M),WE(LDWE,LD2WE,NQ),WE1(LDWE,LD2WE,NQ), - + WORK(LWORK),X(LDX,M),XPLUSD(N,M),WRK(LWRK),Y(LDY,NQ) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),IWORK(LIWORK), - + MSGB(NQ*NP+1),MSGD(NQ*M+1) - LOGICAL - + FSTITR,HEAD,PRTPEN - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + ACTRED,ACTRS,ALPHA,DIRDER,ETA,OLMAVG,ONE, - + P0001,P1,P25,P5,P75,PARTOL,PNORM,PRERED,PRERS, - + RATIO,RCOND,RNORM,RNORMN,RNORMS,RSS,RVAR,SSTOL,TAU,TAUFAC, - + TEMP,TEMP1,TEMP2,TSNORM,ZERO - INTEGER - + I,IDF,IFLAG,INT2,IPR,IPR1,IPR2,IPR2F,IPR3,IRANK, - + ISTOP,ISTOPC,IWRK,J,JPVT,L,LOOPED,LUDFLT,LUNR,LUNRPT, - + MAXIT,NETA,NFEV,NITER,NJEV,NLMS,NNZW,NPP,NPR,OMEGA,QRAUX, - + SD,U,VCV,WRK1,WRK2,WRK3,WRK4,WRK5,WRK6 - LOGICAL - + ACCESS,ANAJAC,CDJAC,CHKJAC,CNVPAR,CNVSS,DIDVCV,DOVCV, - + IMPLCT,INITD,INTDBL,ISODR,LSTEP,REDOJ,RESTRT - -C...LOCAL ARRAYS - DOUBLE PRECISION - + WSS(3) - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DDOT,DNRM2 - EXTERNAL - + DDOT,DNRM2 - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DACCES,DCOPY,DEVJAC,DFLAGS, - + DODLM,DODPCR,DODVCV,DUNPAC,DWGHT,DXMY,DXPY - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,MIN,MOD,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,P0001,P1,P25,P5,P75,ONE - + /0.0D0,0.00010D0,0.10D0,0.250D0, - + 0.50D0,0.750D0,1.0D0/ - DATA - + LUDFLT - + /6/ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ACCESS: THE VARIABLE DESIGNATING WHETHER INFORMATION IS TO BE -C ACCESSED FROM THE WORK ARRAYS (ACCESS=TRUE) OR STORED IN -C THEM (ACCESS=FALSE). -C ACTRED: THE ACTUAL RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C ACTRS: THE SAVED ACTUAL RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C ALPHA: THE LEVENBERG-MARQUARDT PARAMETER. -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT (ANAJAC=TRUE). -C BETA: THE FUNCTION PARAMETERS. -C BETAC: THE CURRENT ESTIMATED VALUES OF THE UNFIXED BETA'S. -C BETAN: THE NEW ESTIMATED VALUES OF THE UNFIXED BETA'S. -C BETAS: THE SAVED ESTIMATED VALUES OF THE UNFIXED BETA'S. -C CDJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY CENTRAL DIFFERENCES (CDJAC=TRUE) OR BY FORWARD -C DIFFERENCES (CDJAC=FALSE). -C CHKJAC: THE VARIABLE DESIGNATING WHETHER THE USER SUPPLIED -C JACOBIANS ARE TO BE CHECKED (CHKJAC=TRUE) OR NOT -C (CHKJAC=FALSE). -C CNVPAR: THE VARIABLE DESIGNATING WHETHER PARAMETER CONVERGENCE WAS -C ATTAINED (CNVPAR=TRUE) OR NOT (CNVPAR=FALSE). -C CNVSS: THE VARIABLE DESIGNATING WHETHER SUM-OF-SQUARES CONVERGENCE -C WAS ATTAINED (CNVSS=TRUE) OR NOT (CNVSS=FALSE). -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DELTAN: THE NEW ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DELTAS: THE SAVED ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DIDVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX WAS -C COMPUTED (DIDVCV=TRUE) OR NOT (DIDVCV=FALSE). -C DIRDER: THE DIRECTIONAL DERIVATIVE. -C DOVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX -C SHOULD TO BE COMPUTED (DOVCV=TRUE) OR NOT (DOVCV=FALSE). -C ETA: THE RELATIVE NOISE IN THE FUNCTION RESULTS. -C F: THE (WEIGHTED) ESTIMATED VALUES OF EPSILON. -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C FN: THE NEW PREDICTED VALUES FROM THE FUNCTION. -C FS: THE SAVED PREDICTED VALUES FROM THE FUNCTION. -C FSTITR: THE VARIABLE DESIGNATING WHETHER THIS IS THE FIRST -C ITERATION (FSTITR=TRUE) OR NOT (FSTITR=FALSE). -C HEAD: THE VARIABLE DESIGNATING WHETHER THE HEADING IS TO BE -C PRINTED (HEAD=TRUE) OR NOT (HEAD=FALSE). -C I: AN INDEXING VARIABLE. -C IDF: THE DEGREES OF FREEDOM OF THE FIT, EQUAL TO THE NUMBER OF -C OBSERVATIONS WITH NONZERO WEIGHTED DERIVATIVES MINUS THE -C NUMBER OF PARAMETERS BEING ESTIMATED. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFLAG: THE VARIABLE DESIGNATING WHICH REPORT IS TO BE PRINTED. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C INITD: THE VARIABLE DESIGNATING WHETHER DELTA IS INITIALIZED TO -C ZERO (INITD=TRUE) OR TO THE VALUES IN THE FIRST N BY M -C ELEMENTS OF ARRAY WORK (INITD=FALSE). -C INT2: THE NUMBER OF INTERNAL DOUBLING STEPS TAKEN. -C INTDBL: THE VARIABLE DESIGNATING WHETHER INTERNAL DOUBLING IS TO BE -C USED (INTDBL=TRUE) OR NOT (INTDBL=FALSE). -C IPR: THE VALUES DESIGNATING THE LENGTH OF THE PRINTED REPORT. -C IPR1: THE VALUE OF THE 4TH DIGIT (FROM THE RIGHT) OF IPRINT, -C WHICH CONTROLS THE INITIAL SUMMARY REPORT. -C IPR2: THE VALUE OF THE 3RD DIGIT (FROM THE RIGHT) OF IPRINT, -C WHICH CONTROLS THE ITERATION REPORT. -C IPR2F: THE VALUE OF THE 2ND DIGIT (FROM THE RIGHT) OF IPRINT, -C WHICH CONTROLS THE FREQUENCY OF THE ITERATION REPORTS. -C IPR3: THE VALUE OF THE 1ST DIGIT (FROM THE RIGHT) OF IPRINT, -C WHICH CONTROLS THE FINAL SUMMARY REPORT. -C IRANK: THE RANK DEFICIENCY OF THE JACOBIAN WRT BETA. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR OLS (ISODR=FALSE). -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C ISTOPC: THE VARIABLE DESIGNATING WHETHER THE COMPUTATIONS WERE -C STOPED DUE TO SOME NUMERICAL ERROR WITHIN ROUTINE DODSTP. -C IWORK: THE INTEGER WORK SPACE. -C IWRK: AN INDEX VARIABLE. -C J: AN INDEX VARIABLE. -C JOB: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C JPVT: THE STARTING LOCATION IN IWORK OF ARRAY JPVT. -C L: AN INDEX VARIABLE. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE AND WE1. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE AND WE1. -C LIWORK: THE LENGTH OF VECTOR IWORK. -C LOOPED: A COUNTER USED TO DETERMINE HOW MANY TIMES THE SUBLOOP -C HAS BEEN EXECUTED, WHERE IF THE COUNT BECOMES LARGE -C ENOUGH THE COMPUTATIONS WILL BE STOPPED. -C LSTEP: THE VARIABLE DESIGNATING WHETHER A SUCCESSFUL STEP HAS -C BEEN FOUND (LSTEP=TRUE) OR NOT (LSTEP=FALSE). -C LUDFLT: THE DEFAULT LOGICAL UNIT NUMBER, USED FOR COMPUTATION -C REPORTS TO THE SCREEN. -C LUNR: THE LOGICAL UNIT NUMBER USED FOR COMPUTATION REPORTS. -C LUNRPT: THE LOGICAL UNIT NUMBER USED FOR COMPUTATION REPORTS. -C LWORK: THE LENGTH OF VECTOR WORK. -C LWRK: THE LENGTH OF VECTOR WRK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C MSGB: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGD: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NITER: THE NUMBER OF ITERATIONS TAKEN. -C NJEV: THE NUMBER OF JACOBIAN EVALUATIONS. -C NLMS: THE NUMBER OF LEVENBERG-MARQUARDT STEPS TAKEN. -C NNZW: THE NUMBER OF NONZERO WEIGHTED OBSERVATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C NPR: THE NUMBER OF TIMES THE REPORT IS TO BE WRITTEN. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C OLMAVG: THE AVERAGE NUMBER OF LEVENBERG-MARQUARDT STEPS PER -C ITERATION. -C OMEGA: THE STARTING LOCATION IN WORK OF ARRAY OMEGA. -C ONE: THE VALUE 1.0D0. -C P0001: THE VALUE 0.0001D0. -C P1: THE VALUE 0.1D0. -C P25: THE VALUE 0.25D0. -C P5: THE VALUE 0.5D0. -C P75: THE VALUE 0.75D0. -C PARTOL: THE PARAMETER CONVERGENCE STOPPING TOLERANCE. -C PNORM: THE NORM OF THE SCALED ESTIMATED PARAMETERS. -C PRERED: THE PREDICTED RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C PRERS: THE OLD PREDICTED RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C PRTPEN: THE VALUE DESIGNATING WHETHER THE PENALTY PARAMETER IS TO -C BE PRINTED IN THE ITERATION REPORT (PRTPEN=TRUE) OR NOT -C (PRTPEN=FALSE). -C QRAUX: THE STARTING LOCATION IN ARRAY WORK OF ARRAY QRAUX. -C RATIO: THE RATIO OF THE ACTUAL RELATIVE REDUCTION TO THE PREDICTED -C RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C RCOND: THE APPROXIMATE RECIPROCAL CONDITION OF FJACB. -C REDOJ: THE VARIABLE DESIGNATING WHETHER THE JACOBIAN MATRIX IS TO -C BE RECOMPUTED FOR THE COMPUTATION OF THE COVARIANCE MATRIX -C (REDOJ=TRUE) OR NOT (REDOJ=FALSE). -C RESTRT: THE VARIABLE DESIGNATING WHETHER THE CALL IS A RESTART -C (RESTRT=TRUE) OR NOT (RESTRT=FALSE). -C RNORM: THE NORM OF THE WEIGHTED ERRORS. -C RNORMN: THE NEW NORM OF THE WEIGHTED ERRORS. -C RNORMS: THE SAVED NORM OF THE WEIGHTED ERRORS. -C RSS: THE RESIDUAL SUM OF SQUARES. -C RVAR: THE RESIDUAL VARIANCE. -C S: THE STEP FOR BETA. -C SD: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SD. -C SS: THE SCALING VALUES USED FOR THE UNFIXED BETAS. -C SSF: THE SCALING VALUES USED FOR BETA. -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING TOLERANCE. -C STPB: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO EACH BETA. -C STPD: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C T: THE STEP FOR DELTA. -C TAU: THE TRUST REGION DIAMETER. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C TEMP: A TEMPORARY STORAGE LOCATION. -C TEMP1: A TEMPORARY STORAGE LOCATION. -C TEMP2: A TEMPORARY STORAGE LOCATION. -C TSNORM: THE NORM OF THE SCALED STEP. -C TT: THE SCALING VALUES USED FOR DELTA. -C U: THE STARTING LOCATION IN ARRAY WORK OF ARRAY U. -C VCV: THE STARTING LOCATION IN ARRAY WORK OF ARRAY VCV. -C WE: THE EPSILON WEIGHTS. -C WE1: THE SQUARE ROOT OF THE EPSILON WEIGHTS. -C WD: THE DELTA WEIGHTS. -C WORK: THE DOUBLE PRECISION WORK SPACE. -C WSS: THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS AND DELTAS, -C THE SUM-OF-SQUARES OF THE WEIGHTED DELTAS, AND -C THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS. -C WRK: A WORK ARRAY, EQUIVALENCED TO WRK1 AND WRK2 -C WRK1: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK1. -C WRK2: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK2. -C WRK3: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK3. -C WRK4: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK4. -C WRK5: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK5. -C WRK6: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK6. -C X: THE EXPLANATORY VARIABLE. -C XPLUSD: THE VALUES OF X + DELTA. -C Y: THE DEPENDENT VARIABLE. UNUSED WHEN THE MODEL IS IMPLICIT. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DODMN - - -C INITIALIZE NECESSARY VARIABLES - - CALL DFLAGS(JOB,RESTRT,INITD,DOVCV,REDOJ, - + ANAJAC,CDJAC,CHKJAC,ISODR,IMPLCT) - ACCESS = .TRUE. - CALL DACCES(N,M,NP,NQ,LDWE,LD2WE, - + WORK,LWORK,IWORK,LIWORK, - + ACCESS,ISODR, - + JPVT,OMEGA,U,QRAUX,SD,VCV, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK6, - + NNZW,NPP, - + JOB,PARTOL,SSTOL,MAXIT,TAUFAC,ETA,NETA, - + LUNRPT,IPR1,IPR2,IPR2F,IPR3, - + WSS,RVAR,IDF, - + TAU,ALPHA,NITER,NFEV,NJEV,INT2,OLMAVG, - + RCOND,IRANK,ACTRS,PNORM,PRERS,RNORMS,ISTOP) - RNORM = SQRT(WSS(1)) - - DIDVCV = .FALSE. - INTDBL = .FALSE. - LSTEP = .TRUE. - -C PRINT INITIAL SUMMARY IF DESIRED - - IF (IPR1.NE.0 .AND. LUNRPT.NE.0) THEN - IFLAG = 1 - IF (IPR1.GE.3 .AND. LUNRPT.NE.LUDFLT) THEN - NPR = 2 - ELSE - NPR = 1 - END IF - IF (IPR1.GE.6) THEN - IPR = 2 - ELSE - IPR = 2 - MOD(IPR1,2) - END IF - LUNR = LUNRPT - DO 10 I=1,NPR - CALL DODPCR(IPR,LUNR, - + HEAD,PRTPEN,FSTITR,DIDVCV,IFLAG, - + N,M,NP,NQ,NPP,NNZW, - + MSGB,MSGD, BETA,Y,LDY,X,LDX,DELTA, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, - + IFIXB,IFIXX,LDIFX, - + SSF,TT,LDTT,STPB,STPD,LDSTPD, - + JOB,NETA,TAUFAC,SSTOL,PARTOL,MAXIT, - + WSS,RVAR,IDF,WORK(SD), - + NITER,NFEV,NJEV,ACTRED,PRERED, - + TAU,PNORM,ALPHA,F,RCOND,IRANK,INFO,ISTOP) - IF (IPR1.GE.5) THEN - IPR = 2 - ELSE - IPR = 1 - END IF - LUNR = LUDFLT - 10 CONTINUE - - END IF - -C STOP IF INITIAL ESTIMATES ARE EXACT SOLUTION - - IF (RNORM.EQ.ZERO) THEN - INFO = 1 - OLMAVG = ZERO - ISTOP = 0 - GO TO 150 - END IF - -C STOP IF NUMBER OF ITERATIONS ALREADY EQUALS MAXIMUM PERMITTED - - IF (RESTRT .AND. (NITER.GE.MAXIT)) THEN - ISTOP = 0 - GO TO 150 - ELSE IF (NITER.GE.MAXIT) THEN - INFO = 4 - ISTOP = 0 - GO TO 150 - END IF - -C MAIN LOOP - - 100 CONTINUE - - NITER = NITER + 1 - RNORMS = RNORM - LOOPED = 0 - -C EVALUATE JACOBIAN USING BEST ESTIMATE OF FUNCTION (FS) - - IF ((NITER.EQ.1) .AND. (ANAJAC.AND.CHKJAC)) THEN - ISTOP = 0 - ELSE - CALL DEVJAC(FCN, - + ANAJAC,CDJAC, - + N,M,NP,NQ, - + BETAC,BETA,STPB, - + IFIXB,IFIXX,LDIFX, - + X,LDX,DELTA,XPLUSD,STPD,LDSTPD, - + SSF,TT,LDTT,NETA,FS, - + T,WORK(WRK1),WORK(WRK2),WORK(WRK3),WORK(WRK6), - + FJACB,ISODR,FJACD,WE1,LDWE,LD2WE, - + NJEV,NFEV,ISTOP,INFO) - END IF - IF (ISTOP.NE.0) THEN - INFO = 51000 - GO TO 200 - ELSE IF (INFO.EQ.50300) THEN - GO TO 200 - END IF - -C SUB LOOP FOR -C INTERNAL DOUBLING OR -C COMPUTING NEW STEP WHEN OLD FAILED - - 110 CONTINUE - -C COMPUTE STEPS S AND T - - IF (LOOPED.GT.100) THEN - INFO = 60000 - GO TO 200 - ELSE - LOOPED = LOOPED + 1 - CALL DODLM(N,M,NP,NQ,NPP, - + F,FJACB,FJACD, - + WD,LDWD,LD2WD,SS,TT,LDTT,DELTA, - + ALPHA,TAU,ETA,ISODR, - + WORK(WRK6),WORK(OMEGA), - + WORK(U),WORK(QRAUX),IWORK(JPVT), - + S,T,NLMS,RCOND,IRANK, - + WORK(WRK1),WORK(WRK2),WORK(WRK3),WORK(WRK4), - + WORK(WRK5),WRK,LWRK,ISTOPC) - END IF - IF (ISTOPC.NE.0) THEN - INFO = ISTOPC - GO TO 200 - END IF - OLMAVG = OLMAVG+NLMS - -C COMPUTE BETAN = BETAC + S -C DELTAN = DELTA + T - - CALL DXPY(NPP,1,BETAC,NPP,S,NPP,BETAN,NPP) - IF (ISODR) CALL DXPY(N,M,DELTA,N,T,N,DELTAN,N) - -C COMPUTE NORM OF SCALED STEPS S AND T (TSNORM) - - CALL DWGHT(NPP,1,SS,NPP,1,S,NPP,WRK,NPP) - IF (ISODR) THEN - CALL DWGHT(N,M,TT,LDTT,1,T,N,WRK(NPP+1),N) - TSNORM = DNRM2(NPP+N*M,WRK,1) - ELSE - TSNORM = DNRM2(NPP,WRK,1) - END IF - -C COMPUTE SCALED PREDICTED REDUCTION - - IWRK = 0 - DO 130 L=1,NQ - DO 120 I=1,N - IWRK = IWRK + 1 - WRK(IWRK) = DDOT(NPP,FJACB(I,1,L),N,S,1) - IF (ISODR) WRK(IWRK) = WRK(IWRK) + - + DDOT(M,FJACD(I,1,L),N,T(I,1),N) - 120 CONTINUE - 130 CONTINUE - IF (ISODR) THEN - CALL DWGHT(N,M,WD,LDWD,LD2WD,T,N,WRK(N*NQ+1),N) - TEMP1 = DDOT(N*NQ,WRK,1,WRK,1) + DDOT(N*M,T,1,WRK(N*NQ+1),1) - TEMP1 = SQRT(TEMP1)/RNORM - ELSE - TEMP1 = DNRM2(N*NQ,WRK,1)/RNORM - END IF - TEMP2 = SQRT(ALPHA)*TSNORM/RNORM - PRERED = TEMP1**2+TEMP2**2/P5 - - DIRDER = -(TEMP1**2+TEMP2**2) - -C EVALUATE PREDICTED VALUES AT NEW POINT - - CALL DUNPAC(NP,BETAN,BETA,IFIXB) - CALL DXPY(N,M,X,LDX,DELTAN,N,XPLUSD,N) - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 002,FN,WORK(WRK6),WORK(WRK1), - + ISTOP) - IF (ISTOP.EQ.0) THEN - NFEV = NFEV + 1 - END IF - - IF (ISTOP.LT.0) THEN - -C SET INFO TO INDICATE USER HAS STOPPED THE COMPUTATIONS IN FCN - - INFO = 51000 - GO TO 200 - ELSE IF (ISTOP.GT.0) THEN - -C SET NORM TO INDICATE STEP SHOULD BE REJECTED - - RNORMN = RNORM/(P1*P75) - ELSE - -C COMPUTE NORM OF NEW WEIGHTED EPSILONS AND WEIGHTED DELTAS (RNORMN) - - IF (IMPLCT) THEN - CALL DCOPY(N*NQ,FN,1,WRK,1) - ELSE - CALL DXMY(N,NQ,FN,N,Y,LDY,WRK,N) - END IF - CALL DWGHT(N,NQ,WE1,LDWE,LD2WE,WRK,N,WRK,N) - IF (ISODR) THEN - CALL DWGHT(N,M,WD,LDWD,LD2WD,DELTAN,N,WRK(N*NQ+1),N) - RNORMN = SQRT(DDOT(N*NQ,WRK,1,WRK,1) + - + DDOT(N*M,DELTAN,1,WRK(N*NQ+1),1)) - ELSE - RNORMN = DNRM2(N*NQ,WRK,1) - END IF - END IF - -C COMPUTE SCALED ACTUAL REDUCTION - - IF (P1*RNORMN.LT.RNORM) THEN - ACTRED = ONE - (RNORMN/RNORM)**2 - ELSE - ACTRED = -ONE - END IF - -C COMPUTE RATIO OF ACTUAL REDUCTION TO PREDICTED REDUCTION - - IF(PRERED .EQ. ZERO) THEN - RATIO = ZERO - ELSE - RATIO = ACTRED/PRERED - END IF - -C CHECK ON LACK OF REDUCTION IN INTERNAL DOUBLING CASE - - IF (INTDBL .AND. (RATIO.LT.P0001 .OR. RNORMN.GT.RNORMS)) THEN - ISTOP = 0 - TAU = TAU*P5 - ALPHA = ALPHA/P5 - CALL DCOPY(NPP,BETAS,1,BETAN,1) - CALL DCOPY(N*M,DELTAS,1,DELTAN,1) - CALL DCOPY(N*NQ,FS,1,FN,1) - ACTRED = ACTRS - PRERED = PRERS - RNORMN = RNORMS - RATIO = P5 - END IF - -C UPDATE STEP BOUND - - INTDBL = .FALSE. - IF (RATIO.LT.P25) THEN - IF (ACTRED.GE.ZERO) THEN - TEMP = P5 - ELSE - TEMP = P5*DIRDER/(DIRDER+P5*ACTRED) - END IF - IF (P1*RNORMN.GE.RNORM .OR. TEMP.LT.P1) THEN - TEMP = P1 - END IF - TAU = TEMP*MIN(TAU,TSNORM/P1) - ALPHA = ALPHA/TEMP - - ELSE IF (ALPHA.EQ.ZERO) THEN - TAU = TSNORM/P5 - - ELSE IF (RATIO.GE.P75 .AND. NLMS.LE.11) THEN - -C STEP QUALIFIES FOR INTERNAL DOUBLING -C - UPDATE TAU AND ALPHA -C - SAVE INFORMATION FOR CURRENT POINT - - INTDBL = .TRUE. - - TAU = TSNORM/P5 - ALPHA = ALPHA*P5 - - CALL DCOPY(NPP,BETAN,1,BETAS,1) - CALL DCOPY(N*M,DELTAN,1,DELTAS,1) - CALL DCOPY(N*NQ,FN,1,FS,1) - ACTRS = ACTRED - PRERS = PRERED - RNORMS = RNORMN - END IF - -C IF INTERNAL DOUBLING, SKIP CONVERGENCE CHECKS - - IF (INTDBL .AND. TAU.GT.ZERO) THEN - INT2 = INT2+1 - GO TO 110 - END IF - -C CHECK ACCEPTANCE - - IF (RATIO.GE.P0001) THEN - CALL DCOPY(N*NQ,FN,1,FS,1) - IF (IMPLCT) THEN - CALL DCOPY(N*NQ,FS,1,F,1) - ELSE - CALL DXMY(N,NQ,FS,N,Y,LDY,F,N) - END IF - CALL DWGHT(N,NQ,WE1,LDWE,LD2WE,F,N,F,N) - CALL DCOPY(NPP,BETAN,1,BETAC,1) - CALL DCOPY(N*M,DELTAN,1,DELTA,1) - RNORM = RNORMN - CALL DWGHT(NPP,1,SS,NPP,1,BETAC,NPP,WRK,NPP) - IF (ISODR) THEN - CALL DWGHT(N,M,TT,LDTT,1,DELTA,N,WRK(NPP+1),N) - PNORM = DNRM2(NPP+N*M,WRK,1) - ELSE - PNORM = DNRM2(NPP,WRK,1) - END IF - LSTEP = .TRUE. - ELSE - LSTEP = .FALSE. - END IF - -C TEST CONVERGENCE - - INFO = 0 - CNVSS = RNORM.EQ.ZERO - + .OR. - + (ABS(ACTRED).LE.SSTOL .AND. - + PRERED.LE.SSTOL .AND. - + P5*RATIO.LE.ONE) - CNVPAR = (TAU.LE.PARTOL*PNORM) .AND. (.NOT.IMPLCT) - IF (CNVSS) INFO = 1 - IF (CNVPAR) INFO = 2 - IF (CNVSS .AND. CNVPAR) INFO = 3 - -C PRINT ITERATION REPORT - - IF (INFO.NE.0 .OR. LSTEP) THEN - IF (IPR2.NE.0 .AND. IPR2F.NE.0 .AND. LUNRPT.NE.0) THEN - IF (IPR2F.EQ.1 .OR. MOD(NITER,IPR2F).EQ.1) THEN - IFLAG = 2 - CALL DUNPAC(NP,BETAC,BETA,IFIXB) - WSS(1) = RNORM*RNORM - IF (IPR2.GE.3. AND. LUNRPT.NE.LUDFLT) THEN - NPR = 2 - ELSE - NPR = 1 - END IF - IF (IPR2.GE.6) THEN - IPR = 2 - ELSE - IPR = 2 - MOD(IPR2,2) - END IF - LUNR = LUNRPT - DO 140 I=1,NPR - CALL DODPCR(IPR,LUNR, - + HEAD,PRTPEN,FSTITR,DIDVCV,IFLAG, - + N,M,NP,NQ,NPP,NNZW, - + MSGB,MSGD, BETA,Y,LDY,X,LDX,DELTA, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, - + IFIXB,IFIXX,LDIFX, - + SSF,TT,LDTT,STPB,STPD,LDSTPD, - + JOB,NETA,TAUFAC,SSTOL,PARTOL,MAXIT, - + WSS,RVAR,IDF,WORK(SD), - + NITER,NFEV,NJEV,ACTRED,PRERED, - + TAU,PNORM,ALPHA,F,RCOND,IRANK,INFO,ISTOP) - IF (IPR2.GE.5) THEN - IPR = 2 - ELSE - IPR = 1 - END IF - LUNR = LUDFLT - 140 CONTINUE - FSTITR = .FALSE. - PRTPEN = .FALSE. - END IF - END IF - END IF - -C CHECK IF FINISHED - - IF (INFO.EQ.0) THEN - IF (LSTEP) THEN - -C BEGIN NEXT INTERATION UNLESS A STOPPING CRITERIA HAS BEEN MET - - IF (NITER.GE.MAXIT) THEN - INFO = 4 - ELSE - GO TO 100 - END IF - ELSE - -C STEP FAILED - RECOMPUTE UNLESS A STOPPING CRITERIA HAS BEEN MET - - GO TO 110 - END IF - END IF - - 150 CONTINUE - - IF (ISTOP.GT.0) INFO = INFO + 100 - -C STORE UNWEIGHTED EPSILONS AND X+DELTA TO RETURN TO USER - - IF (IMPLCT) THEN - CALL DCOPY(N*NQ,FS,1,F,1) - ELSE - CALL DXMY(N,NQ,FS,N,Y,LDY,F,N) - END IF - CALL DUNPAC(NP,BETAC,BETA,IFIXB) - CALL DXPY(N,M,X,LDX,DELTA,N,XPLUSD,N) - -C COMPUTE COVARIANCE MATRIX OF ESTIMATED PARAMETERS -C IN UPPER NP BY NP PORTION OF WORK(VCV) IF REQUESTED - - IF (DOVCV .AND. ISTOP.EQ.0) THEN - -C RE-EVALUATE JACOBIAN AT FINAL SOLUTION, IF REQUESTED -C OTHERWISE, JACOBIAN FROM BEGINNING OF LAST ITERATION WILL BE USED -C TO COMPUTE COVARIANCE MATRIX - - IF (REDOJ) THEN - CALL DEVJAC(FCN, - + ANAJAC,CDJAC, - + N,M,NP,NQ, - + BETAC,BETA,STPB, - + IFIXB,IFIXX,LDIFX, - + X,LDX,DELTA,XPLUSD,STPD,LDSTPD, - + SSF,TT,LDTT,NETA,FS, - + T,WORK(WRK1),WORK(WRK2),WORK(WRK3),WORK(WRK6), - + FJACB,ISODR,FJACD,WE1,LDWE,LD2WE, - + NJEV,NFEV,ISTOP,INFO) - - - IF (ISTOP.NE.0) THEN - INFO = 51000 - GO TO 200 - ELSE IF (INFO.EQ.50300) THEN - GO TO 200 - END IF - END IF - - IF (IMPLCT) THEN - CALL DWGHT(N,M,WD,LDWD,LD2WD,DELTA,N,WRK(N*NQ+1),N) - RSS = DDOT(N*M,DELTA,1,WRK(N*NQ+1),1) - ELSE - RSS = RNORM*RNORM - END IF - IF (REDOJ .OR. NITER.GE.1) THEN - CALL DODVCV(N,M,NP,NQ,NPP, - + F,FJACB,FJACD, - + WD,LDWD,LD2WD,SSF,SS,TT,LDTT,DELTA, - + ETA,ISODR, - + WORK(VCV),WORK(SD), - + WORK(WRK6),WORK(OMEGA), - + WORK(U),WORK(QRAUX),IWORK(JPVT), - + S,T,IRANK,RCOND,RSS,IDF,RVAR,IFIXB, - + WORK(WRK1),WORK(WRK2),WORK(WRK3),WORK(WRK4), - + WORK(WRK5),WRK,LWRK,ISTOPC) - IF (ISTOPC.NE.0) THEN - INFO = ISTOPC - GO TO 200 - END IF - DIDVCV = .TRUE. - END IF - - END IF - -C SET JPVT TO INDICATE DROPPED, FIXED AND ESTIMATED PARAMETERS - - 200 DO 210 I=0,NP-1 - WORK(WRK3+I) = IWORK(JPVT+I) - IWORK(JPVT+I) = -2 - 210 CONTINUE - IF (REDOJ .OR. NITER.GE.1) THEN - DO 220 I=0,NPP-1 - J = WORK(WRK3+I) - 1 - IF (I.LE.NPP-IRANK-1) THEN - IWORK(JPVT+J) = 1 - ELSE - IWORK(JPVT+J) = -1 - END IF - 220 CONTINUE - IF (NPP.LT.NP) THEN - J = NPP-1 - DO 230 I=NP-1,0,-1 - IF (IFIXB(I+1).EQ.0) THEN - IWORK(JPVT+I) = 0 - ELSE - IWORK(JPVT+I) = IWORK(JPVT+J) - J = J - 1 - END IF - 230 CONTINUE - END IF - END IF - -C STORE VARIOUS SCALARS IN WORK ARRAYS FOR RETURN TO USER - - IF (NITER.GE.1) THEN - OLMAVG = OLMAVG/NITER - ELSE - OLMAVG = ZERO - END IF - -C COMPUTE WEIGHTED SUMS OF SQUARES FOR RETURN TO USER - - CALL DWGHT(N,NQ,WE1,LDWE,LD2WE,F,N,WRK,N) - WSS(3) = DDOT(N*NQ,WRK,1,WRK,1) - IF (ISODR) THEN - CALL DWGHT(N,M,WD,LDWD,LD2WD,DELTA,N,WRK(N*NQ+1),N) - WSS(2) = DDOT(N*M,DELTA,1,WRK(N*NQ+1),1) - ELSE - WSS(2) = ZERO - END IF - WSS(1) = WSS(2) + WSS(3) - - ACCESS = .FALSE. - CALL DACCES(N,M,NP,NQ,LDWE,LD2WE, - + WORK,LWORK,IWORK,LIWORK, - + ACCESS,ISODR, - + JPVT,OMEGA,U,QRAUX,SD,VCV, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK6, - + NNZW,NPP, - + JOB,PARTOL,SSTOL,MAXIT,TAUFAC,ETA,NETA, - + LUNRPT,IPR1,IPR2,IPR2F,IPR3, - + WSS,RVAR,IDF, - + TAU,ALPHA,NITER,NFEV,NJEV,INT2,OLMAVG, - + RCOND,IRANK,ACTRS,PNORM,PRERS,RNORMS,ISTOP) - -C ENCODE EXISTANCE OF QUESTIONABLE RESULTS INTO INFO - - IF (INFO.LE.9 .OR. INFO.GE.60000) THEN - IF (MSGB(1).EQ.1 .OR. MSGD(1).EQ.1) THEN - INFO = INFO + 1000 - END IF - IF (ISTOP.NE.0) THEN - INFO = INFO + 100 - END IF - IF (IRANK.GE.1) THEN - IF (NPP.GT.IRANK) THEN - INFO = INFO + 10 - ELSE - INFO = INFO + 20 - END IF - END IF - END IF - -C PRINT FINAL SUMMARY - - IF (IPR3.NE.0 .AND. LUNRPT.NE.0) THEN - IFLAG = 3 - - IF (IPR3.GE.3. AND. LUNRPT.NE.LUDFLT) THEN - NPR = 2 - ELSE - NPR = 1 - END IF - IF (IPR3.GE.6) THEN - IPR = 2 - ELSE - IPR = 2 - MOD(IPR3,2) - END IF - LUNR = LUNRPT - DO 240 I=1,NPR - CALL DODPCR(IPR,LUNR, - + HEAD,PRTPEN,FSTITR,DIDVCV,IFLAG, - + N,M,NP,NQ,NPP,NNZW, - + MSGB,MSGD, BETA,Y,LDY,X,LDX,DELTA, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, - + IWORK(JPVT),IFIXX,LDIFX, - + SSF,TT,LDTT,STPB,STPD,LDSTPD, - + JOB,NETA,TAUFAC,SSTOL,PARTOL,MAXIT, - + WSS,RVAR,IDF,WORK(SD), - + NITER,NFEV,NJEV,ACTRED,PRERED, - + TAU,PNORM,ALPHA,F,RCOND,IRANK,INFO,ISTOP) - IF (IPR3.GE.5) THEN - IPR = 2 - ELSE - IPR = 1 - END IF - LUNR = LUDFLT - 240 CONTINUE - END IF - - RETURN - - END -*DODPC1 - SUBROUTINE DODPC1 - + (IPR,LUNRPT, - + ANAJAC,CDJAC,CHKJAC,INITD,RESTRT,ISODR,IMPLCT,DOVCV,REDOJ, - + MSGB1,MSGB,MSGD1,MSGD, - + N,M,NP,NQ,NPP,NNZW, - + X,LDX,IFIXX,LDIFX,DELTA,WD,LDWD,LD2WD,TT,LDTT,STPD,LDSTPD, - + Y,LDY,WE,LDWE,LD2WE,PNLTY, - + BETA,IFIXB,SSF,STPB, - + JOB,NETA,TAUFAC,SSTOL,PARTOL,MAXIT, - + WSS,WSSDEL,WSSEPS) -C***BEGIN PROLOGUE DODPC1 -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DHSTEP -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE GENERATE INITIAL SUMMARY REPORT -C***END PROLOGUE DODPC1 - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + PARTOL,PNLTY,SSTOL,TAUFAC,WSS,WSSDEL,WSSEPS - INTEGER - + IPR,JOB,LDIFX,LDSTPD,LDTT,LDWD,LDWE,LDX,LDY,LD2WD,LD2WE, - + LUNRPT,M,MAXIT,MSGB1,MSGD1,N,NETA,NNZW,NP,NPP,NQ - LOGICAL - + ANAJAC,CDJAC,CHKJAC,DOVCV,IMPLCT,INITD,ISODR,REDOJ,RESTRT - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),DELTA(N,M),SSF(NP),STPB(NP),STPD(LDSTPD,M), - + TT(LDTT,M),WD(LDWD,LD2WD,M),WE(LDWE,LD2WE,NQ),X(LDX,M), - + Y(LDY,NQ) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),MSGB(NQ,NP),MSGD(NQ,M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + TEMP1,TEMP2,TEMP3,ZERO - INTEGER - + I,ITEMP,J,JOB1,JOB2,JOB3,JOB4,JOB5,L - -C...LOCAL ARRAYS - CHARACTER TEMPC0*2,TEMPC1*5,TEMPC2*13 - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DHSTEP - EXTERNAL - + DHSTEP - - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,MIN - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT (ANAJAC=TRUE). -C BETA: THE FUNCTION PARAMETERS. -C CDJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY CENTRAL DIFFERENCES (CDJAC=TRUE) OR FORWARD DIFFERENCES -C (CDJAC=FALSE). -C CHKJAC: THE VARIABLE DESIGNATING WHETHER THE USER SUPPLIED -C JACOBIANS ARE TO BE CHECKED (CHKJAC=TRUE) OR NOT -C (CHKJAC=FALSE). -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DOVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX IS -C TO BE COMPUTED (DOVCV=TRUE) OR NOT (DOVCV=FALSE). -C I: AN INDEXING VARIABLE. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INITD: THE VARIABLE DESIGNATING WHETHER DELTA IS INITIALIZED TO -C ZERO (INITD=TRUE) OR TO THE VALUES IN THE FIRST N BY M -C ELEMENTS OF ARRAY WORK (INITD=FALSE). -C IPR: THE VALUE INDICATING THE REPORT TO BE PRINTED. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ITEMP: A TEMPORARY INTEGER VALUE. -C J: AN INDEXING VARIABLE. -C JOB: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C JOB1: THE 1ST DIGIT (FROM THE LEFT) OF VARIABLE JOB. -C JOB2: THE 2ND DIGIT (FROM THE LEFT) OF VARIABLE JOB. -C JOB3: THE 3RD DIGIT (FROM THE LEFT) OF VARIABLE JOB. -C JOB4: THE 4TH DIGIT (FROM THE LEFT) OF VARIABLE JOB. -C JOB5: THE 5TH DIGIT (FROM THE LEFT) OF VARIABLE JOB. -C L: AN INDEXING VARIABLE. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LUNRPT: THE LOGICAL UNIT NUMBER FOR THE COMPUTATION REPORTS. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C MSGB: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGB1: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGD: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C MSGD1: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS. -C A NEGATIVE VALUE INDICATES THAT NETA WAS ESTIMATED BY -C ODRPACK. A POSITIVE VALUE INDICTES THE VALUE WAS SUPPLIED -C BY THE USER. -C NNZW: THE NUMBER OF NONZERO OBSERVATIONAL ERROR WEIGHTS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C PARTOL: THE PARAMETER CONVERGENCE STOPPING TOLERANCE. -C PNLTY: THE PENALTY PARAMETER FOR AN IMPLICIT MODEL. -C REDOJ: THE VARIABLE DESIGNATING WHETHER THE JACOBIAN MATRIX IS TO -C BE RECOMPUTED FOR THE COMPUTATION OF THE COVARIANCE MATRIX -C (REDOJ=TRUE) OR NOT (REDOJ=FALSE). -C RESTRT: THE VARIABLE DESIGNATING WHETHER THE CALL IS A RESTART -C (RESTRT=TRUE) OR NOT (RESTRT=FALSE). -C SSF: THE SCALING VALUES FOR BETA. -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING TOLERANCE. -C STPB: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO BETA. -C STPD: THE RELATIVE STEP USED FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C TEMPC0: A TEMPORARY CHARACTER*2 VALUE. -C TEMPC1: A TEMPORARY CHARACTER*5 VALUE. -C TEMPC2: A TEMPORARY CHARACTER*13 VALUE. -C TEMP1: A TEMPORARY DOUBLE PRECISION VALUE. -C TEMP2: A TEMPORARY DOUBLE PRECISION VALUE. -C TEMP3: A TEMPORARY DOUBLE PRECISION VALUE. -C TT: THE SCALING VALUES FOR DELTA. -C WD: THE DELTA WEIGHTS. -C WE: THE EPSILON WEIGHTS. -C WSS: THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS AND DELTAS. -C WSSDEL: THE SUM-OF-SQUARES OF THE WEIGHTED DELTAS. -C WSSEPS: THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS. -C X: THE EXPLANATORY VARIABLE. -C Y: THE RESPONSE VARIABLE. UNUSED WHEN THE MODEL IS IMPLICIT. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DODPC1 - - -C PRINT PROBLEM SIZE SPECIFICATION - - WRITE (LUNRPT,1000) N,NNZW,NQ,M,NP,NPP - - -C PRINT CONTROL VALUES - - JOB1 = JOB/10000 - JOB2 = MOD(JOB,10000)/1000 - JOB3 = MOD(JOB,1000)/100 - JOB4 = MOD(JOB,100)/10 - JOB5 = MOD(JOB,10) - WRITE (LUNRPT,1100) JOB - IF (RESTRT) THEN - WRITE (LUNRPT,1110) JOB1 - ELSE - WRITE (LUNRPT,1111) JOB1 - END IF - IF (ISODR) THEN - IF (INITD) THEN - WRITE (LUNRPT,1120) JOB2 - ELSE - WRITE (LUNRPT,1121) JOB2 - END IF - ELSE - WRITE (LUNRPT,1122) JOB2,JOB5 - END IF - IF (DOVCV) THEN - WRITE (LUNRPT,1130) JOB3 - IF (REDOJ) THEN - WRITE (LUNRPT,1131) - ELSE - WRITE (LUNRPT,1132) - END IF - ELSE - WRITE (LUNRPT,1133) JOB3 - END IF - IF (ANAJAC) THEN - WRITE (LUNRPT,1140) JOB4 - IF (CHKJAC) THEN - IF (MSGB1.GE.1 .OR. MSGD1.GE.1) THEN - WRITE (LUNRPT,1141) - ELSE - WRITE (LUNRPT,1142) - END IF - ELSE - WRITE (LUNRPT,1143) - END IF - ELSE IF (CDJAC) THEN - WRITE (LUNRPT,1144) JOB4 - ELSE - WRITE (LUNRPT,1145) JOB4 - END IF - IF (ISODR) THEN - IF (IMPLCT) THEN - WRITE (LUNRPT,1150) JOB5 - ELSE - WRITE (LUNRPT,1151) JOB5 - END IF - ELSE - WRITE (LUNRPT,1152) JOB5 - END IF - IF (NETA.LT.0) THEN - WRITE (LUNRPT,1200) -NETA - ELSE - WRITE (LUNRPT,1210) NETA - END IF - WRITE (LUNRPT,1300) TAUFAC - - -C PRINT STOPPING CRITERIA - - WRITE (LUNRPT,1400) SSTOL,PARTOL,MAXIT - - -C PRINT INITIAL SUM OF SQUARES - - IF (IMPLCT) THEN - WRITE (LUNRPT,1500) WSSDEL - IF (ISODR) THEN - WRITE (LUNRPT,1510) WSS,WSSEPS,PNLTY - END IF - ELSE - WRITE (LUNRPT,1600) WSS - IF (ISODR) THEN - WRITE (LUNRPT,1610) WSSDEL,WSSEPS - END IF - END IF - - - IF (IPR.GE.2) THEN - - -C PRINT FUNCTION PARAMETER DATA - - WRITE (LUNRPT,4000) - IF (CHKJAC .AND. ((MSGB1.GE.1) .OR. (MSGD1.GE.1))) THEN - WRITE (LUNRPT,4110) - ELSE IF (ANAJAC) THEN - WRITE (LUNRPT,4120) - ELSE - WRITE (LUNRPT,4200) - END IF - DO 130 J=1,NP - IF (IFIXB(1).LT.0) THEN - TEMPC1 = ' NO' - ELSE - IF (IFIXB(J).NE.0) THEN - TEMPC1 = ' NO' - ELSE - TEMPC1 = ' YES' - END IF - END IF - IF (ANAJAC) THEN - IF (CHKJAC .AND. ((MSGB1.GE.1) .OR. (MSGD1.GE.1))) THEN - ITEMP = -1 - DO 110 L=1,NQ - ITEMP = MAX(ITEMP,MSGB(L,J)) - 110 CONTINUE - IF (ITEMP.LE.-1) THEN - TEMPC2 = ' UNCHECKED' - ELSE IF (ITEMP.EQ.0) THEN - TEMPC2 = ' VERIFIED' - ELSE IF (ITEMP.GE.1) THEN - TEMPC2 = ' QUESTIONABLE' - END IF - ELSE - TEMPC2 = ' ' - END IF - ELSE - TEMPC2 = ' ' - END IF - IF (SSF(1).LT.ZERO) THEN - TEMP1 = ABS(SSF(1)) - ELSE - TEMP1 = SSF(J) - END IF - IF (ANAJAC) THEN - WRITE (LUNRPT,4310) J,BETA(J),TEMPC1,TEMP1,TEMPC2 - ELSE - IF (CDJAC) THEN - TEMP2 = DHSTEP(1,NETA,1,J,STPB,1) - ELSE - TEMP2 = DHSTEP(0,NETA,1,J,STPB,1) - END IF - WRITE (LUNRPT,4320) J,BETA(J),TEMPC1,TEMP1,TEMP2 - END IF - 130 CONTINUE - -C PRINT EXPLANATORY VARIABLE DATA - - IF (ISODR) THEN - WRITE (LUNRPT,2010) - IF (CHKJAC .AND. ((MSGB1.GE.1) .OR. (MSGD1.GE.1))) THEN - WRITE (LUNRPT,2110) - ELSE IF (ANAJAC) THEN - WRITE (LUNRPT,2120) - ELSE - WRITE (LUNRPT,2130) - END IF - ELSE - WRITE (LUNRPT,2020) - WRITE (LUNRPT,2140) - END IF - IF (ISODR) THEN - DO 240 J = 1,M - TEMPC0 = '1,' - DO 230 I=1,N,N-1 - - IF (IFIXX(1,1).LT.0) THEN - TEMPC1 = ' NO' - ELSE - IF (LDIFX.EQ.1) THEN - IF (IFIXX(1,J).EQ.0) THEN - TEMPC1 = ' YES' - ELSE - TEMPC1 = ' NO' - END IF - ELSE - IF (IFIXX(I,J).EQ.0) THEN - TEMPC1 = ' YES' - ELSE - TEMPC1 = ' NO' - END IF - END IF - END IF - - IF (TT(1,1).LT.ZERO) THEN - TEMP1 = ABS(TT(1,1)) - ELSE - IF (LDTT.EQ.1) THEN - TEMP1 = TT(1,J) - ELSE - TEMP1 = TT(I,J) - END IF - END IF - - IF (WD(1,1,1).LT.ZERO) THEN - TEMP2 = ABS(WD(1,1,1)) - ELSE - IF (LDWD.EQ.1) THEN - IF (LD2WD.EQ.1) THEN - TEMP2 = WD(1,1,J) - ELSE - TEMP2 = WD(1,J,J) - END IF - ELSE - IF (LD2WD.EQ.1) THEN - TEMP2 = WD(I,1,J) - ELSE - TEMP2 = WD(I,J,J) - END IF - END IF - END IF - - IF (ANAJAC) THEN - IF (CHKJAC .AND. - + (((MSGB1.GE.1) .OR. (MSGD1.GE.1)) .AND. - + (I.EQ.1))) THEN - ITEMP = -1 - DO 210 L=1,NQ - ITEMP = MAX(ITEMP,MSGD(L,J)) - 210 CONTINUE - IF (ITEMP.LE.-1) THEN - TEMPC2 = ' UNCHECKED' - ELSE IF (ITEMP.EQ.0) THEN - TEMPC2 = ' VERIFIED' - ELSE IF (ITEMP.GE.1) THEN - TEMPC2 = ' QUESTIONABLE' - END IF - ELSE - TEMPC2 = ' ' - END IF - IF (M.LE.9) THEN - WRITE (LUNRPT,5110) - + TEMPC0,J,X(I,J), - + DELTA(I,J),TEMPC1,TEMP1,TEMP2,TEMPC2 - ELSE - WRITE (LUNRPT,5120) - + TEMPC0,J,X(I,J), - + DELTA(I,J),TEMPC1,TEMP1,TEMP2,TEMPC2 - END IF - ELSE - TEMPC2 = ' ' - IF (CDJAC) THEN - TEMP3 = DHSTEP(1,NETA,I,J,STPD,LDSTPD) - ELSE - TEMP3 = DHSTEP(0,NETA,I,J,STPD,LDSTPD) - END IF - IF (M.LE.9) THEN - WRITE (LUNRPT,5210) - + TEMPC0,J,X(I,J), - + DELTA(I,J),TEMPC1,TEMP1,TEMP2,TEMP3 - ELSE - WRITE (LUNRPT,5220) - + TEMPC0,J,X(I,J), - + DELTA(I,J),TEMPC1,TEMP1,TEMP2,TEMP3 - END IF - END IF - - TEMPC0 = 'N,' - - 230 CONTINUE - IF (J.LT.M) WRITE (LUNRPT,6000) - 240 CONTINUE - ELSE - - DO 260 J = 1,M - TEMPC0 = '1,' - DO 250 I=1,N,N-1 - IF (M.LE.9) THEN - WRITE (LUNRPT,5110) - + TEMPC0,J,X(I,J) - ELSE - WRITE (LUNRPT,5120) - + TEMPC0,J,X(I,J) - END IF - TEMPC0 = 'N,' - 250 CONTINUE - IF (J.LT.M) WRITE (LUNRPT,6000) - 260 CONTINUE - END IF - -C PRINT RESPONSE VARIABLE DATA AND OBSERVATION ERROR WEIGHTS - - IF (.NOT.IMPLCT) THEN - WRITE (LUNRPT,3000) - WRITE (LUNRPT,3100) - DO 310 L=1,NQ - TEMPC0 = '1,' - DO 300 I=1,N,N-1 - IF (WE(1,1,1).LT.ZERO) THEN - TEMP1 = ABS(WE(1,1,1)) - ELSE IF (LDWE.EQ.1) THEN - IF (LD2WE.EQ.1) THEN - TEMP1 = WE(1,1,L) - ELSE - TEMP1 = WE(1,L,L) - END IF - ELSE - IF (LD2WE.EQ.1) THEN - TEMP1 = WE(I,1,L) - ELSE - TEMP1 = WE(I,L,L) - END IF - END IF - IF (NQ.LE.9) THEN - WRITE (LUNRPT,5110) - + TEMPC0,L,Y(I,L),TEMP1 - ELSE - WRITE (LUNRPT,5120) - + TEMPC0,L,Y(I,L),TEMP1 - END IF - TEMPC0 = 'N,' - 300 CONTINUE - IF (L.LT.NQ) WRITE (LUNRPT,6000) - 310 CONTINUE - END IF - END IF - - RETURN - -C FORMAT STATEMENTS - - 1000 FORMAT - + (/' --- PROBLEM SIZE:'/ - + ' N = ',I5, - + ' (NUMBER WITH NONZERO WEIGHT = ',I5,')'/ - + ' NQ = ',I5/ - + ' M = ',I5/ - + ' NP = ',I5, - + ' (NUMBER UNFIXED = ',I5,')') - 1100 FORMAT - + (/' --- CONTROL VALUES:'/ - + ' JOB = ',I5.5/ - + ' = ABCDE, WHERE') - 1110 FORMAT - + (' A=',I1,' ==> FIT IS A RESTART.') - 1111 FORMAT - + (' A=',I1,' ==> FIT IS NOT A RESTART.') - 1120 FORMAT - + (' B=',I1,' ==> DELTAS ARE INITIALIZED', - + ' TO ZERO.') - 1121 FORMAT - + (' B=',I1,' ==> DELTAS ARE INITIALIZED', - + ' BY USER.') - 1122 FORMAT - + (' B=',I1,' ==> DELTAS ARE FIXED AT', - + ' ZERO SINCE E=',I1,'.') - 1130 FORMAT - + (' C=',I1,' ==> COVARIANCE MATRIX WILL', - + ' BE COMPUTED USING') - 1131 FORMAT - + (' DERIVATIVES RE-', - + 'EVALUATED AT THE SOLUTION.') - 1132 FORMAT - + (' DERIVATIVES FROM THE', - + ' LAST ITERATION.') - 1133 FORMAT - + (' C=',I1,' ==> COVARIANCE MATRIX WILL', - + ' NOT BE COMPUTED.') - 1140 FORMAT - + (' D=',I1,' ==> DERIVATIVES ARE', - + ' SUPPLIED BY USER.') - 1141 FORMAT - + (' DERIVATIVES WERE CHECKED.'/ - + ' RESULTS APPEAR QUESTIONABLE.') - 1142 FORMAT - + (' DERIVATIVES WERE CHECKED.'/ - + ' RESULTS APPEAR CORRECT.') - 1143 FORMAT - + (' DERIVATIVES WERE NOT', - + ' CHECKED.') - 1144 FORMAT - + (' D=',I1,' ==> DERIVATIVES ARE', - + ' ESTIMATED BY CENTRAL', - + ' DIFFERENCES.') - 1145 FORMAT - + (' D=',I1,' ==> DERIVATIVES ARE', - + ' ESTIMATED BY FORWARD', - + ' DIFFERENCES.') - 1150 FORMAT - + (' E=',I1,' ==> METHOD IS IMPLICIT ODR.') - 1151 FORMAT - + (' E=',I1,' ==> METHOD IS EXPLICIT ODR.') - 1152 FORMAT - + (' E=',I1,' ==> METHOD IS EXPLICIT OLS.') - 1200 FORMAT - + (' NDIGIT = ',I5,' (ESTIMATED BY ODRPACK)') - 1210 FORMAT - + (' NDIGIT = ',I5,' (SUPPLIED BY USER)') - 1300 FORMAT - + (' TAUFAC = ',1P,D12.2) - 1400 FORMAT - + (/' --- STOPPING CRITERIA:'/ - + ' SSTOL = ',1P,D12.2, - + ' (SUM OF SQUARES STOPPING TOLERANCE)'/ - + ' PARTOL = ',1P,D12.2, - + ' (PARAMETER STOPPING TOLERANCE)'/ - + ' MAXIT = ',I5, - + ' (MAXIMUM NUMBER OF ITERATIONS)') - 1500 FORMAT - + (/' --- INITIAL SUM OF SQUARED WEIGHTED DELTAS =', - + 17X,1P,D17.8) - 1510 FORMAT - + ( ' INITIAL PENALTY FUNCTION VALUE =',1P,D17.8/ - + ' PENALTY TERM =',1P,D17.8/ - + ' PENALTY PARAMETER =',1P,D10.1) - 1600 FORMAT - + (/' --- INITIAL WEIGHTED SUM OF SQUARES =', - + 17X,1P,D17.8) - 1610 FORMAT - + ( ' SUM OF SQUARED WEIGHTED DELTAS =',1P,D17.8/ - + ' SUM OF SQUARED WEIGHTED EPSILONS =',1P,D17.8) - 2010 FORMAT - + (/' --- EXPLANATORY VARIABLE AND DELTA WEIGHT SUMMARY:') - 2020 FORMAT - + (/' --- EXPLANATORY VARIABLE SUMMARY:') - 2110 FORMAT - + (/' INDEX X(I,J) DELTA(I,J) FIXED', - + ' SCALE WEIGHT DERIVATIVE'/ - + ' ', - + ' ASSESSMENT'/, - + ' (I,J) (IFIXX)', - + ' (SCLD) (WD) '/) - 2120 FORMAT - + (/' INDEX X(I,J) DELTA(I,J) FIXED', - + ' SCALE WEIGHT '/ - + ' ', - + ' '/, - + ' (I,J) (IFIXX)', - + ' (SCLD) (WD) '/) - 2130 FORMAT - + (/' INDEX X(I,J) DELTA(I,J) FIXED', - + ' SCALE WEIGHT DERIVATIVE'/ - + ' ', - + ' STEP SIZE'/, - + ' (I,J) (IFIXX)', - + ' (SCLD) (WD) (STPD)'/) - 2140 FORMAT - + (/' INDEX X(I,J)'/ - + ' (I,J) '/) - 3000 FORMAT - + (/' --- RESPONSE VARIABLE AND EPSILON ERROR WEIGHT', - + ' SUMMARY:') - 3100 FORMAT - + (/' INDEX Y(I,L) WEIGHT'/ - + ' (I,L) (WE)'/) - 4000 FORMAT - + (/' --- FUNCTION PARAMETER SUMMARY:') - 4110 FORMAT - + (/' INDEX BETA(K) FIXED SCALE', - + ' DERIVATIVE'/ - + ' ', - + ' ASSESSMENT'/, - + ' (K) (IFIXB) (SCLB)', - + ' '/) - 4120 FORMAT - + (/' INDEX BETA(K) FIXED SCALE', - + ' '/ - + ' ', - + ' '/, - + ' (K) (IFIXB) (SCLB)', - + ' '/) - 4200 FORMAT - + (/' INDEX BETA(K) FIXED SCALE', - + ' DERIVATIVE'/ - + ' ', - + ' STEP SIZE'/, - + ' (K) (IFIXB) (SCLB)', - + ' (STPB)'/) - 4310 FORMAT - + (7X,I5,1P,D16.8,4X,A5,D16.8,1X,A13) - 4320 FORMAT - + (7X,I5,1P,D16.8,4X,A5,D16.8,1X,D13.5) - 5110 FORMAT - + (9X,A2,I1,1P,2D12.3,4X,A5,2D10.2,1X,A13) - 5120 FORMAT - + (8X,A2,I2,1P,2D12.3,4X,A5,2D10.2,1X,A13) - 5210 FORMAT - + (9X,A2,I1,1P,2D12.3,4X,A5,2D10.2,1X,D13.5) - 5220 FORMAT - + (8X,A2,I2,1P,2D12.3,4X,A5,2D10.2,1X,D13.5) - 6000 FORMAT - + (' ') - END -*DODPC2 - SUBROUTINE DODPC2 - + (IPR,LUNRPT, FSTITR,IMPLCT,PRTPEN, - + PNLTY, - + NITER,NFEV,WSS,ACTRED,PRERED,ALPHA,TAU,PNORM,NP,BETA) -C***BEGIN PROLOGUE DODPC2 -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE GENERATE ITERATION REPORTS -C***END PROLOGUE DODPC2 - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + ACTRED,ALPHA,PNLTY,PNORM,PRERED,TAU,WSS - INTEGER - + IPR,LUNRPT,NFEV,NITER,NP - LOGICAL - + FSTITR,IMPLCT,PRTPEN - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP) - -C...LOCAL SCALARS - DOUBLE PRECISION - + RATIO,ZERO - INTEGER - + J,K,L - CHARACTER GN*3 - -C...INTRINSIC FUNCTIONS - INTRINSIC - + MIN - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ACTRED: THE ACTUAL RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C ALPHA: THE LEVENBERG-MARQUARDT PARAMETER. -C BETA: THE FUNCTION PARAMETERS. -C FSTITR: THE VARIABLE DESIGNATING WHETHER THIS IS THE FIRST -C ITERATION (FSTITR=.TRUE.) OR NOT (FSTITR=.FALSE.). -C GN: THE CHARACTER*3 VARIABLE INDICATING WHETHER A GAUSS-NEWTON -C STEP WAS TAKEN. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C IPR: THE VALUE INDICATING THE REPORT TO BE PRINTED. -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C L: AN INDEXING VARIABLE. -C LUNRPT: THE LOGICAL UNIT NUMBER USED FOR COMPUTATION REPORTS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NITER: THE NUMBER OF ITERATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C PNLTY: THE PENALTY PARAMETER FOR AN IMPLICIT MODEL. -C PNORM: THE NORM OF THE SCALED ESTIMATED PARAMETERS. -C PRERED: THE PREDICTED RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C PRTPEN: THE VARIABLE DESIGNATING WHETHER THE PENALTY PARAMETER IS -C TO BE PRINTED IN THE ITERATION REPORT (PRTPEN=TRUE) OR NOT -C (PRTPEN=FALSE). -C RATIO: THE RATIO OF TAU TO PNORM. -C TAU: THE TRUST REGION DIAMETER. -C WSS: THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS AND DELTAS. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DODPC2 - - - IF (FSTITR) THEN - IF (IPR.EQ.1) THEN - IF (IMPLCT) THEN - WRITE (LUNRPT,1121) - ELSE - WRITE (LUNRPT,1122) - END IF - ELSE - IF (IMPLCT) THEN - WRITE (LUNRPT,1131) - ELSE - WRITE (LUNRPT,1132) - END IF - END IF - END IF - IF (PRTPEN) THEN - WRITE (LUNRPT,1133) PNLTY - END IF - - IF (ALPHA.EQ.ZERO) THEN - GN = 'YES' - ELSE - GN = ' NO' - END IF - IF (PNORM.NE.ZERO) THEN - RATIO = TAU/PNORM - ELSE - RATIO = ZERO - END IF - IF (IPR.EQ.1) THEN - WRITE (LUNRPT,1141) NITER,NFEV,WSS,ACTRED,PRERED, - + RATIO,GN - ELSE - J = 1 - K = MIN(3,NP) - IF (J.EQ.K) THEN - WRITE (LUNRPT,1141) NITER,NFEV,WSS,ACTRED,PRERED, - + RATIO,GN,J,BETA(J) - ELSE - WRITE (LUNRPT,1142) NITER,NFEV,WSS,ACTRED,PRERED, - + RATIO,GN,J,K,(BETA(L),L=J,K) - END IF - IF (NP.GT.3) THEN - DO 10 J=4,NP,3 - K = MIN(J+2,NP) - IF (J.EQ.K) THEN - WRITE (LUNRPT,1151) J,BETA(J) - ELSE - WRITE (LUNRPT,1152) J,K,(BETA(L),L=J,K) - END IF - 10 CONTINUE - END IF - END IF - - RETURN - -C FORMAT STATEMENTS - - 1121 FORMAT - + (// - + ' CUM. PENALTY ACT. REL. PRED. REL.'/ - + ' IT. NO. FN FUNCTION SUM-OF-SQS SUM-OF-SQS', - + ' G-N'/ - + ' NUM. EVALS VALUE REDUCTION REDUCTION', - + ' TAU/PNORM STEP'/ - + ' ---- ------ ----------- ----------- -----------', - + ' --------- ----') - 1122 FORMAT - + (// - + ' CUM. ACT. REL. PRED. REL.'/ - + ' IT. NO. FN WEIGHTED SUM-OF-SQS SUM-OF-SQS', - + ' G-N'/ - + ' NUM. EVALS SUM-OF-SQS REDUCTION REDUCTION', - + ' TAU/PNORM STEP'/ - + ' ---- ------ ----------- ----------- -----------', - + ' --------- ----'/) - 1131 FORMAT - + (// - + ' CUM. PENALTY ACT. REL. PRED. REL.'/ - + ' IT. NO. FN FUNCTION SUM-OF-SQS SUM-OF-SQS', - + ' G-N BETA -------------->'/ - + ' NUM. EVALS VALUE REDUCTION REDUCTION', - + ' TAU/PNORM STEP INDEX VALUE'/ - + ' ---- ------ ----------- ----------- -----------', - + ' --------- ---- ----- -----') - 1132 FORMAT - + (// - + ' CUM. ACT. REL. PRED. REL.'/ - + ' IT. NO. FN WEIGHTED SUM-OF-SQS SUM-OF-SQS', - + ' G-N BETA -------------->'/ - + ' NUM. EVALS SUM-OF-SQS REDUCTION REDUCTION', - + ' TAU/PNORM STEP INDEX VALUE'/ - + ' ---- ------ ----------- ----------- -----------', - + ' --------- ---- ----- -----'/) - 1133 FORMAT - + (/' PENALTY PARAMETER VALUE = ', 1P,E10.1) - 1141 FORMAT - + (1X,I4,I8,1X,1P,D12.5,2D13.4,D11.3,3X,A3,7X,I3,3D16.8) - 1142 FORMAT - + (1X,I4,I8,1X,1P,D12.5,2D13.4,D11.3,3X,A3,1X,I3,' TO',I3,3D16.8) - 1151 FORMAT - + (76X,I3,1P,D16.8) - 1152 FORMAT - + (70X,I3,' TO',I3,1P,3D16.8) - END -*DODPC3 - SUBROUTINE DODPC3 - + (IPR,LUNRPT, - + ISODR,IMPLCT,DIDVCV,DOVCV,REDOJ,ANAJAC, - + N,M,NP,NQ,NPP, - + INFO,NITER,NFEV,NJEV,IRANK,RCOND,ISTOP, - + WSS,WSSDEL,WSSEPS,PNLTY,RVAR,IDF, - + BETA,SDBETA,IFIXB2,F,DELTA) -C***BEGIN PROLOGUE DODPC3 -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DPPT -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE GENERATE FINAL SUMMARY REPORT -C***END PROLOGUE DODPC3 - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + PNLTY,RCOND,RVAR,WSS,WSSDEL,WSSEPS - INTEGER - + IDF,INFO,IPR,IRANK,ISTOP,LUNRPT,M, - + N,NFEV,NITER,NJEV,NP,NPP,NQ - LOGICAL - + ANAJAC,DIDVCV,DOVCV,IMPLCT,ISODR,REDOJ - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),DELTA(N,M),F(N,NQ),SDBETA(NP) - INTEGER - + IFIXB2(NP) - -C...LOCAL SCALARS - DOUBLE PRECISION - + TVAL - INTEGER - + D1,D2,D3,D4,D5,I,J,K,L,NPLM1 - CHARACTER FMT1*90 - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DPPT - EXTERNAL - + DPPT - -C...INTRINSIC FUNCTIONS - INTRINSIC - + MIN,MOD - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT (ANAJAC=TRUE). -C BETA: THE FUNCTION PARAMETERS. -C D1: THE FIRST DIGIT OF INFO. -C D2: THE SECOND DIGIT OF INFO. -C D3: THE THIRD DIGIT OF INFO. -C D4: THE FOURTH DIGIT OF INFO. -C D5: THE FIFTH DIGIT OF INFO. -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DIDVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX WAS -C COMPUTED (DIDVCV=TRUE) OR NOT (DIDVCV=FALSE). -C DOVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX WAS -C TO BE COMPUTED (DOVCV=TRUE) OR NOT (DOVCV=FALSE). -C F: THE ESTIMATED VALUES OF EPSILON. -C FMT1: A CHARACTER*90 VARIABLE USED FOR FORMATS. -C I: AN INDEXING VARIABLE. -C IDF: THE DEGREES OF FREEDOM OF THE FIT, EQUAL TO THE NUMBER OF -C OBSERVATIONS WITH NONZERO WEIGHTED DERIVATIVES MINUS THE -C NUMBER OF PARAMETERS BEING ESTIMATED. -C IFIXB2: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA WERE -C ESTIMATED, FIXED, OR DROPPED BECAUSE THEY CAUSED RANK -C DEFICIENCY, CORRESPONDING TO VALUES OF IFIXB2 EQUALING 1, -C 0, AND -1, RESPECTIVELY. IF IFIXB2 IS -2, THEN NO ATTEMPT -C WAS MADE TO ESTIMATE THE PARAMETERS BECAUSE MAXIT = 0. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C IPR: THE VARIABLE INDICATING WHAT IS TO BE PRINTED. -C IRANK: THE RANK DEFICIENCY OF THE JACOBIAN WRT BETA. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C L: AN INDEXING VARIABLE. -C LUNRPT: THE LOGICAL UNIT NUMBER USED FOR COMPUTATION REPORTS. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NITER: THE NUMBER OF ITERATIONS. -C NJEV: THE NUMBER OF JACOBIAN EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPLM1: THE NUMBER OF ITEMS TO BE PRINTED PER LINE, MINUS ONE. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C PNLTY: THE PENALTY PARAMETER FOR AN IMPLICIT MODEL. -C RCOND: THE APPROXIMATE RECIPROCAL CONDITION OF TFJACB. -C REDOJ: THE VARIABLE DESIGNATING WHETHER THE JACOBIAN MATRIX IS -C TO BE RECOMPUTED FOR THE COMPUTATION OF THE COVARIANCE -C MATRIX (REDOJ=TRUE) OR NOT (REDOJ=FALSE). -C RVAR: THE RESIDUAL VARIANCE. -C SDBETA: THE STANDARD ERRORS OF THE ESTIMATED PARAMETERS. -C TVAL: THE VALUE OF THE 97.5 PERCENT POINT FUNCTION FOR THE -C T DISTRIBUTION. -C WSS: THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS AND DELTAS. -C WSSDEL: THE SUM-OF-SQUARES OF THE WEIGHTED DELTAS. -C WSSEPS: THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS. - - -C***FIRST EXECUTABLE STATEMENT DODPC3 - - - D1 = INFO/10000 - D2 = MOD(INFO,10000)/1000 - D3 = MOD(INFO,1000)/100 - D4 = MOD(INFO,100)/10 - D5 = MOD(INFO,10) - -C PRINT STOPPING CONDITIONS - - WRITE (LUNRPT,1000) - IF (INFO.LE.9) THEN - IF (INFO.EQ.1) THEN - WRITE (LUNRPT,1011) INFO - ELSE IF (INFO.EQ.2) THEN - WRITE (LUNRPT,1012) INFO - ELSE IF (INFO.EQ.3) THEN - WRITE (LUNRPT,1013) INFO - ELSE IF (INFO.EQ.4) THEN - WRITE (LUNRPT,1014) INFO - ELSE IF (INFO.LE.9) THEN - WRITE (LUNRPT,1015) INFO - END IF - ELSE IF (INFO.LE.9999) THEN - -C PRINT WARNING DIAGNOSTICS - - WRITE (LUNRPT,1020) INFO - IF (D2.EQ.1) WRITE (LUNRPT,1021) - IF (D3.EQ.1) WRITE (LUNRPT,1022) - IF (D4.EQ.1) WRITE (LUNRPT,1023) - IF (D4.EQ.2) WRITE (LUNRPT,1024) - IF (D5.EQ.1) THEN - WRITE (LUNRPT,1031) - ELSE IF (D5.EQ.2) THEN - WRITE (LUNRPT,1032) - ELSE IF (D5.EQ.3) THEN - WRITE (LUNRPT,1033) - ELSE IF (D5.EQ.4) THEN - WRITE (LUNRPT,1034) - ELSE IF (D5.LE.9) THEN - WRITE (LUNRPT,1035) D5 - END IF - ELSE - -C PRINT ERROR MESSAGES - - WRITE (LUNRPT,1040) INFO - IF (D1.EQ.5) THEN - WRITE (LUNRPT,1042) - IF (D2.NE.0) WRITE (LUNRPT,1043) D2 - IF (D3.EQ.3) THEN - WRITE (LUNRPT,1044) D3 - ELSE IF (D3.NE.0) THEN - WRITE (LUNRPT,1045) D3 - END IF - ELSE IF (D1.EQ.6) THEN - WRITE (LUNRPT,1050) - ELSE - WRITE (LUNRPT,1060) D1 - END IF - END IF - -C PRINT MISC. STOPPING INFO - - WRITE (LUNRPT,1300) NITER - WRITE (LUNRPT,1310) NFEV - IF (ANAJAC) WRITE (LUNRPT,1320) NJEV - WRITE (LUNRPT,1330) IRANK - WRITE (LUNRPT,1340) RCOND - WRITE (LUNRPT,1350) ISTOP - -C PRINT FINAL SUM OF SQUARES - - IF (IMPLCT) THEN - WRITE (LUNRPT,2000) WSSDEL - IF (ISODR) THEN - WRITE (LUNRPT,2010) WSS,WSSEPS,PNLTY - END IF - ELSE - WRITE (LUNRPT,2100) WSS - IF (ISODR) THEN - WRITE (LUNRPT,2110) WSSDEL,WSSEPS - END IF - END IF - IF (DIDVCV) THEN - WRITE (LUNRPT,2200) SQRT(RVAR),IDF - END IF - - NPLM1 = 3 - -C PRINT ESTIMATED BETA'S, AND, -C IF, FULL RANK, THEIR STANDARD ERRORS - - WRITE (LUNRPT,3000) - IF (DIDVCV) THEN - WRITE (LUNRPT,7300) - TVAL = DPPT(0.975D0,IDF) - DO 10 J=1,NP - IF (IFIXB2(J).GE.1) THEN - WRITE (LUNRPT,8400) J,BETA(J),SDBETA(J), - + BETA(J)-TVAL*SDBETA(J), - + BETA(J)+TVAL*SDBETA(J) - ELSE IF (IFIXB2(J).EQ.0) THEN - WRITE (LUNRPT,8600) J,BETA(J) - ELSE - WRITE (LUNRPT,8700) J,BETA(J) - END IF - 10 CONTINUE - IF (.NOT.REDOJ) WRITE (LUNRPT,7310) - ELSE - IF (DOVCV) THEN - IF (D1.LE.5) THEN - WRITE (LUNRPT,7410) - ELSE - WRITE (LUNRPT,7420) - END IF - END IF - - IF ((IRANK.EQ.0 .AND. NPP.EQ.NP) .OR. NITER.EQ.0) THEN - IF (NP.EQ.1) THEN - WRITE (LUNRPT,7100) - ELSE - WRITE (LUNRPT,7200) - END IF - DO 20 J=1,NP,NPLM1+1 - K = MIN(J+NPLM1,NP) - IF (K.EQ.J) THEN - WRITE (LUNRPT,8100) J,BETA(J) - ELSE - WRITE (LUNRPT,8200) J,K,(BETA(L),L=J,K) - END IF - 20 CONTINUE - IF (NITER.GE.1) THEN - WRITE (LUNRPT,8800) - ELSE - WRITE (LUNRPT,8900) - END IF - ELSE - WRITE (LUNRPT,7500) - DO 30 J=1,NP - IF (IFIXB2(J).GE.1) THEN - WRITE (LUNRPT,8500) J,BETA(J) - ELSE IF (IFIXB2(J).EQ.0) THEN - WRITE (LUNRPT,8600) J,BETA(J) - ELSE - WRITE (LUNRPT,8700) J,BETA(J) - END IF - 30 CONTINUE - END IF - END IF - - IF (IPR.EQ.1) RETURN - - -C PRINT EPSILON'S AND DELTA'S TOGETHER IN A COLUMN IF THE NUMBER OF -C COLUMNS OF DATA IN EPSILON AND DELTA IS LESS THAN OR EQUAL TO THREE. - - IF (IMPLCT .AND. (M.LE.4)) THEN - WRITE (LUNRPT,4100) - WRITE (FMT1,9110) M - WRITE (LUNRPT,FMT1) (J,J=1,M) - DO 40 I=1,N - WRITE (LUNRPT,4130) I,(DELTA(I,J),J=1,M) - 40 CONTINUE - - ELSE IF (ISODR .AND. (NQ+M.LE.4)) THEN - WRITE (LUNRPT,4110) - WRITE (FMT1,9120) NQ,M - WRITE (LUNRPT,FMT1) (L,L=1,NQ),(J,J=1,M) - DO 50 I=1,N - WRITE (LUNRPT,4130) I,(F(I,L),L=1,NQ),(DELTA(I,J),J=1,M) - 50 CONTINUE - - ELSE IF (.NOT.ISODR .AND. ((NQ.GE.2) .AND. (NQ.LE.4))) THEN - WRITE (LUNRPT,4120) - WRITE (FMT1,9130) NQ - WRITE (LUNRPT,FMT1) (L,L=1,NQ) - DO 60 I=1,N - WRITE (LUNRPT,4130) I,(F(I,L),L=1,NQ) - 60 CONTINUE - ELSE - -C PRINT EPSILON'S AND DELTA'S SEPARATELY - - IF (.NOT.IMPLCT) THEN - -C PRINT EPSILON'S - - DO 80 J=1,NQ - WRITE (LUNRPT,4200) J - IF (N.EQ.1) THEN - WRITE (LUNRPT,7100) - ELSE - WRITE (LUNRPT,7200) - END IF - DO 70 I=1,N,NPLM1+1 - K = MIN(I+NPLM1,N) - IF (I.EQ.K) THEN - WRITE (LUNRPT,8100) I,F(I,J) - ELSE - WRITE (LUNRPT,8200) I,K,(F(L,J),L=I,K) - END IF - 70 CONTINUE - 80 CONTINUE - END IF - -C PRINT DELTA'S - - IF (ISODR) THEN - DO 100 J=1,M - WRITE (LUNRPT,4300) J - IF (N.EQ.1) THEN - WRITE (LUNRPT,7100) - ELSE - WRITE (LUNRPT,7200) - END IF - DO 90 I=1,N,NPLM1+1 - K = MIN(I+NPLM1,N) - IF (I.EQ.K) THEN - WRITE (LUNRPT,8100) I,DELTA(I,J) - ELSE - WRITE (LUNRPT,8200) I,K,(DELTA(L,J),L=I,K) - END IF - 90 CONTINUE - 100 CONTINUE - END IF - END IF - - RETURN - -C FORMAT STATEMENTS - - 1000 FORMAT - + (/' --- STOPPING CONDITIONS:') - 1011 FORMAT - + (' INFO = ',I5,' ==> SUM OF SQUARES CONVERGENCE.') - 1012 FORMAT - + (' INFO = ',I5,' ==> PARAMETER CONVERGENCE.') - 1013 FORMAT - + (' INFO = ',I5,' ==> SUM OF SQUARES CONVERGENCE AND', - + ' PARAMETER CONVERGENCE.') - 1014 FORMAT - + (' INFO = ',I5,' ==> ITERATION LIMIT REACHED.') - 1015 FORMAT - + (' INFO = ',I5,' ==> UNEXPECTED VALUE,', - + ' PROBABLY INDICATING'/ - + ' INCORRECTLY SPECIFIED', - + ' USER INPUT.') - 1020 FORMAT - + (' INFO = ',I5.4/ - + ' = ABCD, WHERE A NONZERO VALUE FOR DIGIT A,', - + ' B, OR C INDICATES WHY'/ - + ' THE RESULTS MIGHT BE QUESTIONABLE,', - + ' AND DIGIT D INDICATES'/ - + ' THE ACTUAL STOPPING CONDITION.') - 1021 FORMAT - + (' A=1 ==> DERIVATIVES ARE', - + ' QUESTIONABLE.') - 1022 FORMAT - + (' B=1 ==> USER SET ISTOP TO', - + ' NONZERO VALUE DURING LAST'/ - + ' CALL TO SUBROUTINE FCN.') - 1023 FORMAT - + (' C=1 ==> DERIVATIVES ARE NOT', - + ' FULL RANK AT THE SOLUTION.') - 1024 FORMAT - + (' C=2 ==> DERIVATIVES ARE ZERO', - + ' RANK AT THE SOLUTION.') - 1031 FORMAT - + (' D=1 ==> SUM OF SQUARES CONVERGENCE.') - 1032 FORMAT - + (' D=2 ==> PARAMETER CONVERGENCE.') - 1033 FORMAT - + (' D=3 ==> SUM OF SQUARES CONVERGENCE', - + ' AND PARAMETER CONVERGENCE.') - 1034 FORMAT - + (' D=4 ==> ITERATION LIMIT REACHED.') - 1035 FORMAT - + (' D=',I1,' ==> UNEXPECTED VALUE,', - + ' PROBABLY INDICATING'/ - + ' INCORRECTLY SPECIFIED', - + ' USER INPUT.') - 1040 FORMAT - + (' INFO = ',I5.5/ - + ' = ABCDE, WHERE A NONZERO VALUE FOR A GIVEN', - + ' DIGIT INDICATES AN'/ - + ' ABNORMAL STOPPING CONDITION.') - 1042 FORMAT - + (' A=5 ==> USER STOPPED COMPUTATIONS', - + ' IN SUBROUTINE FCN.') - 1043 FORMAT - + (' B=',I1,' ==> COMPUTATIONS WERE', - + ' STOPPED DURING THE'/ - + ' FUNCTION EVALUATION.') - 1044 FORMAT - + (' C=',I1,' ==> COMPUTATIONS WERE', - + ' STOPPED BECAUSE'/ - + ' DERIVATIVES WITH', - + ' RESPECT TO DELTA WERE'/ - + ' COMPUTED BY', - + ' SUBROUTINE FCN WHEN'/ - + ' FIT IS OLS.') - 1045 FORMAT - + (' C=',I1,' ==> COMPUTATIONS WERE', - + ' STOPPED DURING THE'/ - + ' JACOBIAN EVALUATION.') - 1050 FORMAT - + (' A=6 ==> NUMERICAL INSTABILITIES', - + ' HAVE BEEN DETECTED,'/ - + ' POSSIBLY INDICATING', - + ' A DISCONTINUITY IN THE'/ - + ' DERIVATIVES OR A POOR', - + ' POOR CHOICE OF PROBLEM'/ - + ' SCALE OR WEIGHTS.') - 1060 FORMAT - + (' A=',I1,' ==> UNEXPECTED VALUE,', - + ' PROBABLY INDICATING'/ - + ' INCORRECTLY SPECIFIED', - + ' USER INPUT.') - 1300 FORMAT - + (' NITER = ',I5, - + ' (NUMBER OF ITERATIONS)') - 1310 FORMAT - + (' NFEV = ',I5, - + ' (NUMBER OF FUNCTION EVALUATIONS)') - 1320 FORMAT - + (' NJEV = ',I5, - + ' (NUMBER OF JACOBIAN EVALUATIONS)') - 1330 FORMAT - + (' IRANK = ',I5, - + ' (RANK DEFICIENCY)') - 1340 FORMAT - + (' RCOND = ',1P,D12.2, - + ' (INVERSE CONDITION NUMBER)') -*1341 FORMAT -* + (' ==> POSSIBLY FEWER THAN 2 SIGNIFICANT', -* + ' DIGITS IN RESULTS;'/ -* + ' SEE ODRPACK REFERENCE', -* + ' GUIDE, SECTION 4.C.') - 1350 FORMAT - + (' ISTOP = ',I5, - + ' (RETURNED BY USER FROM', - + ' SUBROUTINE FCN)') - 2000 FORMAT - + (/' --- FINAL SUM OF SQUARED WEIGHTED DELTAS = ', - + 17X,1P,D17.8) - 2010 FORMAT - + ( ' FINAL PENALTY FUNCTION VALUE = ',1P,D17.8/ - + ' PENALTY TERM = ',1P,D17.8/ - + ' PENALTY PARAMETER = ',1P,D10.1) - 2100 FORMAT - + (/' --- FINAL WEIGHTED SUMS OF SQUARES = ',17X,1P,D17.8) - 2110 FORMAT - + ( ' SUM OF SQUARED WEIGHTED DELTAS = ',1P,D17.8/ - + ' SUM OF SQUARED WEIGHTED EPSILONS = ',1P,D17.8) - 2200 FORMAT - + (/' --- RESIDUAL STANDARD DEVIATION = ', - + 17X,1P,D17.8/ - + ' DEGREES OF FREEDOM =',I5) - 3000 FORMAT - + (/' --- ESTIMATED BETA(J), J = 1, ..., NP:') - 4100 FORMAT - + (/' --- ESTIMATED DELTA(I,*), I = 1, ..., N:') - 4110 FORMAT - + (/' --- ESTIMATED EPSILON(I) AND DELTA(I,*), I = 1, ..., N:') - 4120 FORMAT - + (/' --- ESTIMATED EPSILON(I), I = 1, ..., N:') - 4130 FORMAT(5X,I5,1P,5D16.8) - 4200 FORMAT - + (/' --- ESTIMATED EPSILON(I,',I3,'), I = 1, ..., N:') - 4300 FORMAT - + (/' --- ESTIMATED DELTA(I,',I3,'), I = 1, ..., N:') - 7100 FORMAT - + (/' INDEX VALUE'/) - 7200 FORMAT - + (/' INDEX VALUE -------------->'/) - 7300 FORMAT - + (/' BETA S.D. BETA', - + ' ---- 95% CONFIDENCE INTERVAL ----'/) - 7310 FORMAT - + (/' N.B. STANDARD ERRORS AND CONFIDENCE INTERVALS ARE', - + ' COMPUTED USING'/ - + ' DERIVATIVES CALCULATED AT THE BEGINNING', - + ' OF THE LAST ITERATION,'/ - + ' AND NOT USING DERIVATIVES RE-EVALUATED AT THE', - + ' FINAL SOLUTION.') - 7410 FORMAT - + (/' N.B. THE STANDARD ERRORS OF THE ESTIMATED BETAS WERE', - + ' NOT COMPUTED BECAUSE'/ - + ' THE DERIVATIVES WERE NOT AVAILABLE. EITHER MAXIT', - + ' IS 0 AND THE THIRD'/ - + ' DIGIT OF JOB IS GREATER THAN 1, OR THE MOST', - + ' RECENTLY TRIED VALUES OF'/ - + ' BETA AND/OR X+DELTA WERE IDENTIFIED AS', - + ' UNACCEPTABLE BY USER SUPPLIED'/ - + ' SUBROUTINE FCN.') - 7420 FORMAT - + (/' N.B. THE STANDARD ERRORS OF THE ESTIMATED BETAS WERE', - + ' NOT COMPUTED.'/ - + ' (SEE INFO ABOVE.)') - 7500 FORMAT - + (/' BETA STATUS') - 8100 FORMAT - + (11X,I5,1P,D16.8) - 8200 FORMAT - + (3X,I5,' TO',I5,1P,7D16.8) - 8400 FORMAT - + (3X,I5,1X,1P,D16.8,3X,D12.4,3X,D16.8,1X,'TO',D16.8) - 8500 FORMAT - + (3X,I5,1X,1P,D16.8,6X,'ESTIMATED') - 8600 FORMAT - + (3X,I5,1X,1P,D16.8,6X,' FIXED') - 8700 FORMAT - + (3X,I5,1X,1P,D16.8,6X,' DROPPED') - 8800 FORMAT - + (/' N.B. NO PARAMETERS WERE FIXED BY THE USER OR', - + ' DROPPED AT THE LAST'/ - + ' ITERATION BECAUSE THEY CAUSED THE MODEL TO BE', - + ' RANK DEFICIENT.') - 8900 FORMAT - + (/' N.B. NO CHANGE WAS MADE TO THE USER SUPPLIED PARAMETER', - + ' VALUES BECAUSE'/ - + ' MAXIT=0.') - 9110 FORMAT - + ('(/'' I'',', - + I2,'('' DELTA(I,'',I1,'')'')/)') - 9120 FORMAT - + ('(/'' I'',', - + I2,'('' EPSILON(I,'',I1,'')''),', - + I2,'('' DELTA(I,'',I1,'')'')/)') - 9130 FORMAT - + ('(/'' I'',', - + I2,'('' EPSILON(I,'',I1,'')'')/)') - - END -*DODPCR - SUBROUTINE DODPCR - + (IPR,LUNRPT, - + HEAD,PRTPEN,FSTITR,DIDVCV,IFLAG, - + N,M,NP,NQ,NPP,NNZW, - + MSGB,MSGD, BETA,Y,LDY,X,LDX,DELTA, - + WE,LDWE,LD2WE,WD,LDWD,LD2WD, - + IFIXB,IFIXX,LDIFX, - + SSF,TT,LDTT,STPB,STPD,LDSTPD, - + JOB,NETA,TAUFAC,SSTOL,PARTOL,MAXIT, - + WSS,RVAR,IDF,SDBETA, - + NITER,NFEV,NJEV,ACTRED,PRERED, - + TAU,PNORM,ALPHA,F,RCOND,IRANK,INFO,ISTOP) -C***BEGIN PROLOGUE DODPCR -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DFLAGS,DODPC1,DODPC2,DODPC3,DODPHD -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE GENERATE COMPUTATION REPORTS -C***END PROLOGUE DODPCR - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + ACTRED,ALPHA,PARTOL,PNORM,PRERED,RCOND,RVAR, - + SSTOL,TAU,TAUFAC - INTEGER - + IDF,IFLAG,INFO,IPR,IRANK,ISTOP,JOB,LDIFX,LDSTPD,LDTT,LDWD,LDWE, - + LDX,LDY,LD2WD,LD2WE,LUNRPT,M,MAXIT,N,NETA,NFEV, - + NITER,NJEV,NNZW,NP,NPP,NQ - LOGICAL - + DIDVCV,FSTITR,HEAD,PRTPEN - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),DELTA(N,M),F(N,NQ),SDBETA(NP),SSF(NP), - + STPB(NP),STPD(LDSTPD,M),TT(LDTT,M), - + WD(LDWD,LD2WD,M),WE(LDWE,LD2WE,NQ),WSS(3),X(LDX,M),Y(LDY,NQ) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M),MSGB(NQ*NP+1),MSGD(NQ*M+1) - -C...LOCAL SCALARS - DOUBLE PRECISION - + PNLTY - LOGICAL - + ANAJAC,CDJAC,CHKJAC,DOVCV,IMPLCT,INITD,ISODR,REDOJ,RESTRT - CHARACTER TYP*3 - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DFLAGS,DODPC1,DODPC2,DODPC3,DODPHD - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ACTRED: THE ACTUAL RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C ALPHA: THE LEVENBERG-MARQUARDT PARAMETER. -C ANAJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY FINITE DIFFERENCES (ANAJAC=FALSE) OR NOT (ANAJAC=TRUE). -C BETA: THE FUNCTION PARAMETERS. -C CDJAC: THE VARIABLE DESIGNATING WHETHER THE JACOBIANS ARE COMPUTED -C BY CENTRAL DIFFERENCES (CDJAC=TRUE) OR BY FORWARD -C DIFFERENCES (CDJAC=FALSE). -C CHKJAC: THE VARIABLE DESIGNATING WHETHER THE USER SUPPLIED -C JACOBIANS ARE TO BE CHECKED (CHKJAC=TRUE) OR NOT -C (CHKJAC=FALSE). -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DIDVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX WAS -C COMPUTED (DIDVCV=TRUE) OR NOT (DIDVCV=FALSE). -C DOVCV: THE VARIABLE DESIGNATING WHETHER THE COVARIANCE MATRIX IS -C TO BE COMPUTED (DOVCV=TRUE) OR NOT (DOVCV=FALSE). -C F: THE (WEIGHTED) ESTIMATED VALUES OF EPSILON. -C FSTITR: THE VARIABLE DESIGNATING WHETHER THIS IS THE FIRST -C ITERATION (FSTITR=TRUE) OR NOT (FSTITR=FALSE). -C HEAD: THE VARIABLE DESIGNATING WHETHER THE HEADING IS TO BE -C PRINTED (HEAD=TRUE) OR NOT (HEAD=FALSE). -C IDF: THE DEGREES OF FREEDOM OF THE FIT, EQUAL TO THE NUMBER OF -C OBSERVATIONS WITH NONZERO WEIGHTED DERIVATIVES MINUS THE -C NUMBER OF PARAMETERS BEING ESTIMATED. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFLAG: THE VARIABLE DESIGNATING WHAT IS TO BE PRINTED. -C IMPLCT: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY -C IMPLICIT ODR (IMPLCT=TRUE) OR EXPLICIT ODR (IMPLCT=FALSE). -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C INITD: THE VARIABLE DESIGNATING WHETHER DELTA IS INITIALIZED TO -C ZERO (INITD=TRUE) OR TO THE VALUES IN THE FIRST N BY M -C ELEMENTS OF ARRAY WORK (INITD=FALSE). -C IPR: THE VALUE INDICATING THE REPORT TO BE PRINTED. -C IRANK: THE RANK DEFICIENCY OF THE JACOBIAN WRT BETA. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C JOB: THE VARIABLE CONTROLING PROBLEM INITIALIZATION AND -C COMPUTATIONAL METHOD. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LUNRPT: THE LOGICAL UNIT NUMBER FOR COMPUTATION REPORTS. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED. -C MSGB: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGD: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF ACCURATE DIGITS IN THE FUNCTION RESULTS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NITER: THE NUMBER OF ITERATIONS. -C NJEV: THE NUMBER OF JACOBIAN EVALUATIONS. -C NNZW: THE NUMBER OF NONZERO WEIGHTED OBSERVATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C PARTOL: THE PARAMETER CONVERGENCE STOPPING TOLERANCE. -C PNLTY: THE PENALTY PARAMETER FOR AN IMPLICIT MODEL. -C PNORM: THE NORM OF THE SCALED ESTIMATED PARAMETERS. -C PRERED: THE PREDICTED RELATIVE REDUCTION IN THE SUM-OF-SQUARES. -C PRTPEN: THE VARIABLE DESIGNATING WHETHER THE PENALTY PARAMETER IS -C TO BE PRINTED IN THE ITERATION REPORT (PRTPEN=TRUE) OR NOT -C (PRTPEN=FALSE). -C RCOND: THE APPROXIMATE RECIPROCAL CONDITION NUMBER OF TFJACB. -C REDOJ: THE VARIABLE DESIGNATING WHETHER THE JACOBIAN MATRIX IS TO -C BE RECOMPUTED FOR THE COMPUTATION OF THE COVARIANCE MATRIX -C (REDOJ=TRUE) OR NOT (REDOJ=FALSE). -C RESTRT: THE VARIABLE DESIGNATING WHETHER THE CALL IS A RESTART -C (RESTRT=TRUE) OR NOT (RESTRT=FALSE). -C RVAR: THE RESIDUAL VARIANCE. -C SDBETA: THE STANDARD DEVIATIONS OF THE ESTIMATED BETA'S. -C SSF: THE SCALING VALUES FOR BETA. -C SSTOL: THE SUM-OF-SQUARES CONVERGENCE STOPPING TOLERANCE. -C STPB: THE RELATIVE STEP FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO BETA. -C STPD: THE RELATIVE STEP FOR COMPUTING FINITE DIFFERENCE -C DERIVATIVES WITH RESPECT TO DELTA. -C TAU: THE TRUST REGION DIAMETER. -C TAUFAC: THE FACTOR USED TO COMPUTE THE INITIAL TRUST REGION -C DIAMETER. -C TT: THE SCALING VALUES FOR DELTA. -C TYP: THE CHARACTER*3 STRING "ODR" OR "OLS". -C WE: THE EPSILON WEIGHTS. -C WD: THE DELTA WEIGHTS. -C WSS: THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS AND DELTAS, -C THE SUM-OF-SQUARES OF THE WEIGHTED DELTAS, AND -C THE SUM-OF-SQUARES OF THE WEIGHTED EPSILONS. -C X: THE EXPLANATORY VARIABLE. -C Y: THE DEPENDENT VARIABLE. UNUSED WHEN THE MODEL IS IMPLICIT. - - -C***FIRST EXECUTABLE STATEMENT DODPCR - - - CALL DFLAGS(JOB,RESTRT,INITD,DOVCV,REDOJ, - + ANAJAC,CDJAC,CHKJAC,ISODR,IMPLCT) - PNLTY = ABS(WE(1,1,1)) - - IF (HEAD) THEN - CALL DODPHD(HEAD,LUNRPT) - END IF - IF (ISODR) THEN - TYP = 'ODR' - ELSE - TYP = 'OLS' - END IF - -C PRINT INITIAL SUMMARY - - IF (IFLAG.EQ.1) THEN - WRITE (LUNRPT,1200) TYP - CALL DODPC1 - + (IPR,LUNRPT, - + ANAJAC,CDJAC,CHKJAC,INITD,RESTRT,ISODR,IMPLCT,DOVCV,REDOJ, - + MSGB(1),MSGB(2),MSGD(1),MSGD(2), - + N,M,NP,NQ,NPP,NNZW, - + X,LDX,IFIXX,LDIFX,DELTA,WD,LDWD,LD2WD,TT,LDTT,STPD,LDSTPD, - + Y,LDY,WE,LDWE,LD2WE,PNLTY, - + BETA,IFIXB,SSF,STPB, - + JOB,NETA,TAUFAC,SSTOL,PARTOL,MAXIT, - + WSS(1),WSS(2),WSS(3)) - -C PRINT ITERATION REPORTS - - ELSE IF (IFLAG.EQ.2) THEN - - IF (FSTITR) THEN - WRITE (LUNRPT,1300) TYP - END IF - CALL DODPC2 - + (IPR,LUNRPT, FSTITR,IMPLCT,PRTPEN, - + PNLTY, - + NITER,NFEV,WSS(1),ACTRED,PRERED,ALPHA,TAU,PNORM,NP,BETA) - -C PRINT FINAL SUMMARY - - ELSE IF (IFLAG.EQ.3) THEN - - WRITE (LUNRPT,1400) TYP - CALL DODPC3 - + (IPR,LUNRPT, - + ISODR,IMPLCT,DIDVCV,DOVCV,REDOJ,ANAJAC, - + N,M,NP,NQ,NPP, - + INFO,NITER,NFEV,NJEV,IRANK,RCOND,ISTOP, - + WSS(1),WSS(2),WSS(3),PNLTY,RVAR,IDF, - + BETA,SDBETA,IFIXB,F,DELTA) - END IF - - RETURN - -C FORMAT STATEMENTS - - 1200 FORMAT - + (/' *** INITIAL SUMMARY FOR FIT BY METHOD OF ',A3, ' ***') - 1300 FORMAT - + (/' *** ITERATION REPORTS FOR FIT BY METHOD OF ',A3, ' ***') - 1400 FORMAT - + (/' *** FINAL SUMMARY FOR FIT BY METHOD OF ',A3, ' ***') - - END -*DODPE1 - SUBROUTINE DODPE1 - + (UNIT,D1,D2,D3,D4,D5, - + N,M,NQ, - + LDSCLD,LDSTPD,LDWE,LD2WE,LDWD,LD2WD, - + LWKMN,LIWKMN) -C***BEGIN PROLOGUE DODPE1 -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE PRINT ERROR REPORTS -C***END PROLOGUE DODPE1 - -C...SCALAR ARGUMENTS - INTEGER - + D1,D2,D3,D4,D5,LDSCLD,LDSTPD,LDWD,LDWE,LD2WD,LD2WE, - + LIWKMN,LWKMN,M,N,NQ,UNIT - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C D1: THE 1ST DIGIT (FROM THE LEFT) OF INFO. -C D2: THE 2ND DIGIT (FROM THE LEFT) OF INFO. -C D3: THE 3RD DIGIT (FROM THE LEFT) OF INFO. -C D4: THE 4TH DIGIT (FROM THE LEFT) OF INFO. -C D5: THE 5TH DIGIT (FROM THE LEFT) OF INFO. -C LDSCLD: THE LEADING DIMENSION OF ARRAY SCLD. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LIWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY IWORK. -C LWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY WORK. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C UNIT: THE LOGICAL UNIT NUMBER USED FOR ERROR MESSAGES. - - -C***FIRST EXECUTABLE STATEMENT DODPE1 - - -C PRINT APPROPRIATE MESSAGES FOR ERRORS IN PROBLEM SPECIFICATION -C PARAMETERS - - IF (D1.EQ.1) THEN - IF (D2.NE.0) THEN - WRITE(UNIT,1100) - END IF - IF (D3.NE.0) THEN - WRITE(UNIT,1200) - END IF - IF (D4.NE.0) THEN - WRITE(UNIT,1300) - END IF - IF (D5.NE.0) THEN - WRITE(UNIT,1400) - END IF - -C PRINT APPROPRIATE MESSAGES FOR ERRORS IN DIMENSION SPECIFICATION -C PARAMETERS - - ELSE IF (D1.EQ.2) THEN - - IF (D2.NE.0) THEN - IF (D2.EQ.1 .OR. D2.EQ.3) THEN - WRITE(UNIT,2110) - END IF - IF (D2.EQ.2 .OR. D2.EQ.3) THEN - WRITE(UNIT,2120) - END IF - END IF - - IF (D3.NE.0) THEN - IF (D3.EQ.1 .OR. D3.EQ.3 .OR. D3.EQ.5 .OR. D3.EQ.7) THEN - WRITE(UNIT,2210) - END IF - IF (D3.EQ.2 .OR. D3.EQ.3 .OR. D3.EQ.6 .OR. D3.EQ.7) THEN - WRITE(UNIT,2220) - END IF - IF (D3.EQ.4 .OR. D3.EQ.5 .OR. D3.EQ.6 .OR. D3.EQ.7) THEN - WRITE(UNIT,2230) - END IF - END IF - - IF (D4.NE.0) THEN - IF (D4.EQ.1 .OR. D4.EQ.3) THEN - WRITE(UNIT,2310) - END IF - IF (D4.EQ.2 .OR. D4.EQ.3) THEN - WRITE(UNIT,2320) - END IF - END IF - - IF (D5.NE.0) THEN - IF (D5.EQ.1 .OR. D5.EQ.3) THEN - WRITE(UNIT,2410) LWKMN - END IF - IF (D5.EQ.2 .OR. D5.EQ.3) THEN - WRITE(UNIT,2420) LIWKMN - END IF - END IF - - ELSE IF (D1.EQ.3) THEN - -C PRINT APPROPRIATE MESSAGES FOR ERRORS IN SCALE VALUES - - IF (D2.NE.0) THEN - IF (D2.EQ.1 .OR. D2.EQ.3) THEN - IF (LDSCLD.GE.N) THEN - WRITE(UNIT,3110) - ELSE - WRITE(UNIT,3120) - END IF - END IF - IF (D2.EQ.2 .OR. D2.EQ.3) THEN - WRITE(UNIT,3130) - END IF - END IF - -C PRINT APPROPRIATE MESSAGES FOR ERRORS IN DERIVATIVE STEP VALUES - - IF (D3.NE.0) THEN - IF (D3.EQ.1 .OR. D3.EQ.3) THEN - IF (LDSTPD.GE.N) THEN - WRITE(UNIT,3210) - ELSE - WRITE(UNIT,3220) - END IF - END IF - IF (D3.EQ.2 .OR. D3.EQ.3) THEN - WRITE(UNIT,3230) - END IF - END IF - -C PRINT APPROPRIATE MESSAGES FOR ERRORS IN OBSERVATIONAL ERROR WEIGHTS - - IF (D4.NE.0) THEN - IF (D4.EQ.1) THEN - IF (LDWE.GE.N) THEN - IF (LD2WE.GE.NQ) THEN - WRITE(UNIT,3310) - ELSE - WRITE(UNIT,3320) - END IF - ELSE - IF (LD2WE.GE.NQ) THEN - WRITE(UNIT,3410) - ELSE - WRITE(UNIT,3420) - END IF - END IF - END IF - IF (D4.EQ.2) THEN - WRITE(UNIT,3500) - END IF - END IF - -C PRINT APPROPRIATE MESSAGES FOR ERRORS IN DELTA WEIGHTS - - IF (D5.NE.0) THEN - IF (LDWD.GE.N) THEN - IF (LD2WD.GE.M) THEN - WRITE(UNIT,4310) - ELSE - WRITE(UNIT,4320) - END IF - ELSE - IF (LD2WD.GE.M) THEN - WRITE(UNIT,4410) - ELSE - WRITE(UNIT,4420) - END IF - END IF - END IF - - END IF - -C FORMAT STATEMENTS - - 1100 FORMAT - + (/' ERROR : N IS LESS THAN ONE.') - 1200 FORMAT - + (/' ERROR : M IS LESS THAN ONE.') - 1300 FORMAT - + (/' ERROR : NP IS LESS THAN ONE'/ - + ' OR NP IS GREATER THAN N.') - 1400 FORMAT - + (/' ERROR : NQ IS LESS THAN ONE.') - 2110 FORMAT - + (/' ERROR : LDX IS LESS THAN N.') - 2120 FORMAT - + (/' ERROR : LDY IS LESS THAN N.') - 2210 FORMAT - + (/' ERROR : LDIFX IS LESS THAN N'/ - + ' AND LDIFX IS NOT EQUAL TO ONE.') - 2220 FORMAT - + (/' ERROR : LDSCLD IS LESS THAN N'/ - + ' AND LDSCLD IS NOT EQUAL TO ONE.') - 2230 FORMAT - + (/' ERROR : LDSTPD IS LESS THAN N'/ - + ' AND LDSTPD IS NOT EQUAL TO ONE.') - 2310 FORMAT - + (/' ERROR : LDWE IS LESS THAN N'/ - + ' AND LDWE IS NOT EQUAL TO ONE OR'/ - + ' OR'/ - + ' LD2WE IS LESS THAN NQ'/ - + ' AND LD2WE IS NOT EQUAL TO ONE.') - 2320 FORMAT - + (/' ERROR : LDWD IS LESS THAN N'/ - + ' AND LDWD IS NOT EQUAL TO ONE.') - 2410 FORMAT - + (/' ERROR : LWORK IS LESS THAN ',I7, ','/ - + ' THE SMALLEST ACCEPTABLE DIMENSION OF ARRAY WORK.') - 2420 FORMAT - + (/' ERROR : LIWORK IS LESS THAN ',I7, ','/ - + ' THE SMALLEST ACCEPTABLE DIMENSION OF ARRAY', - + ' IWORK.') - 3110 FORMAT - + (/' ERROR : SCLD(I,J) IS LESS THAN OR EQUAL TO ZERO'/ - + ' FOR SOME I = 1, ..., N AND J = 1, ..., M.'// - + ' WHEN SCLD(1,1) IS GREATER THAN ZERO'/ - + ' AND LDSCLD IS GREATER THAN OR EQUAL TO N THEN'/ - + ' EACH OF THE N BY M ELEMENTS OF'/ - + ' SCLD MUST BE GREATER THAN ZERO.') - 3120 FORMAT - + (/' ERROR : SCLD(1,J) IS LESS THAN OR EQUAL TO ZERO'/ - + ' FOR SOME J = 1, ..., M.'// - + ' WHEN SCLD(1,1) IS GREATER THAN ZERO'/ - + ' AND LDSCLD IS EQUAL TO ONE THEN'/ - + ' EACH OF THE 1 BY M ELEMENTS OF'/ - + ' SCLD MUST BE GREATER THAN ZERO.') - 3130 FORMAT - + (/' ERROR : SCLB(K) IS LESS THAN OR EQUAL TO ZERO'/ - + ' FOR SOME K = 1, ..., NP.'// - + ' ALL NP ELEMENTS OF', - + ' SCLB MUST BE GREATER THAN ZERO.') - 3210 FORMAT - + (/' ERROR : STPD(I,J) IS LESS THAN OR EQUAL TO ZERO'/ - + ' FOR SOME I = 1, ..., N AND J = 1, ..., M.'// - + ' WHEN STPD(1,1) IS GREATER THAN ZERO'/ - + ' AND LDSTPD IS GREATER THAN OR EQUAL TO N THEN'/ - + ' EACH OF THE N BY M ELEMENTS OF'/ - + ' STPD MUST BE GREATER THAN ZERO.') - 3220 FORMAT - + (/' ERROR : STPD(1,J) IS LESS THAN OR EQUAL TO ZERO'/ - + ' FOR SOME J = 1, ..., M.'// - + ' WHEN STPD(1,1) IS GREATER THAN ZERO'/ - + ' AND LDSTPD IS EQUAL TO ONE THEN'/ - + ' EACH OF THE 1 BY M ELEMENTS OF'/ - + ' STPD MUST BE GREATER THAN ZERO.') - 3230 FORMAT - + (/' ERROR : STPB(K) IS LESS THAN OR EQUAL TO ZERO'/ - + ' FOR SOME K = 1, ..., NP.'// - + ' ALL NP ELEMENTS OF', - + ' STPB MUST BE GREATER THAN ZERO.') - 3310 FORMAT - + (/' ERROR : AT LEAST ONE OF THE (NQ BY NQ) ARRAYS STARTING'/ - + ' IN WE(I,1,1), I = 1, ..., N, IS NOT POSITIVE'/ - + ' SEMIDEFINITE. WHEN WE(1,1,1) IS GREATER THAN'/ - + ' OR EQUAL TO ZERO, AND LDWE IS GREATER THAN OR'/ - + ' EQUAL TO N, AND LD2WE IS GREATER THAN OR EQUAL'/ - + ' TO NQ, THEN EACH OF THE (NQ BY NQ) ARRAYS IN WE'/ - + ' MUST BE POSITIVE SEMIDEFINITE.') - 3320 FORMAT - + (/' ERROR : AT LEAST ONE OF THE (1 BY NQ) ARRAYS STARTING'/ - + ' IN WE(I,1,1), I = 1, ..., N, HAS A NEGATIVE'/ - + ' ELEMENT. WHEN WE(1,1,1) IS GREATER THAN OR'/ - + ' EQUAL TO ZERO, AND LDWE IS GREATER THAN OR EQUAL'/ - + ' TO N, AND LD2WE IS EQUAL TO 1, THEN EACH OF THE'/ - + ' (1 BY NQ) ARRAYS IN WE MUST HAVE ONLY NON-'/ - + ' NEGATIVE ELEMENTS.') - 3410 FORMAT - + (/' ERROR : THE (NQ BY NQ) ARRAY STARTING IN WE(1,1,1) IS'/ - + ' NOT POSITIVE SEMIDEFINITE. WHEN WE(1,1,1) IS'/ - + ' GREATER THAN OR EQUAL TO ZERO, AND LDWE IS EQUAL'/ - + ' TO 1, AND LD2WE IS GREATER THAN OR EQUAL TO NQ,'/ - + ' THEN THE (NQ BY NQ) ARRAY IN WE MUST BE POSITIVE'/ - + ' SEMIDEFINITE.') - 3420 FORMAT - + (/' ERROR : THE (1 BY NQ) ARRAY STARTING IN WE(1,1,1) HAS'/ - + ' A NEGATIVE ELEMENT. WHEN WE(1,1,1) IS GREATER'/ - + ' THAN OR EQUAL TO ZERO, AND LDWE IS EQUAL TO 1,'/ - + ' AND LD2WE IS EQUAL TO 1, THEN THE (1 BY NQ)'/ - + ' ARRAY IN WE MUST HAVE ONLY NONNEGATIVE ELEMENTS.') - 3500 FORMAT - + (/' ERROR : THE NUMBER OF NONZERO ARRAYS IN ARRAY WE IS'/ - + ' LESS THAN NP.') - 4310 FORMAT - + (/' ERROR : AT LEAST ONE OF THE (M BY M) ARRAYS STARTING'/ - + ' IN WD(I,1,1), I = 1, ..., N, IS NOT POSITIVE'/ - + ' DEFINITE. WHEN WD(1,1,1) IS GREATER THAN ZERO,'/ - + ' AND LDWD IS GREATER THAN OR EQUAL TO N, AND'/ - + ' LD2WD IS GREATER THAN OR EQUAL TO M, THEN EACH'/ - + ' OF THE (M BY M) ARRAYS IN WD MUST BE POSITIVE'/ - + ' DEFINITE.') - 4320 FORMAT - + (/' ERROR : AT LEAST ONE OF THE (1 BY M) ARRAYS STARTING'/ - + ' IN WD(I,1,1), I = 1, ..., N, HAS A NONPOSITIVE'/ - + ' ELEMENT. WHEN WD(1,1,1) IS GREATER THAN ZERO,'/ - + ' AND LDWD IS GREATER THAN OR EQUAL TO N, AND'/ - + ' LD2WD IS EQUAL TO 1, THEN EACH OF THE (1 BY M)'/ - + ' ARRAYS IN WD MUST HAVE ONLY POSITIVE ELEMENTS.') - 4410 FORMAT - + (/' ERROR : THE (M BY M) ARRAY STARTING IN WD(1,1,1) IS'/ - + ' NOT POSITIVE DEFINITE. WHEN WD(1,1,1) IS'/ - + ' GREATER THAN ZERO, AND LDWD IS EQUAL TO 1, AND'/ - + ' LD2WD IS GREATER THAN OR EQUAL TO M, THEN THE'/ - + ' (M BY M) ARRAY IN WD MUST BE POSITIVE DEFINITE.') - 4420 FORMAT - + (/' ERROR : THE (1 BY M) ARRAY STARTING IN WD(1,1,1) HAS A'/ - + ' NONPOSITIVE ELEMENT. WHEN WD(1,1,1) IS GREATER'/ - + ' THAN ZERO, AND LDWD IS EQUAL TO 1, AND LD2WD IS'/ - + ' EQUAL TO 1, THEN THE (1 BY M) ARRAY IN WD MUST'/ - + ' HAVE ONLY POSITIVE ELEMENTS.') - END -*DODPE2 - SUBROUTINE DODPE2 - + (UNIT, - + N,M,NP,NQ, - + FJACB,FJACD, - + DIFF,MSGB1,MSGB,ISODR,MSGD1,MSGD, - + XPLUSD,NROW,NETA,NTOL) -C***BEGIN PROLOGUE DODPE2 -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE GENERATE THE DERIVATIVE CHECKING REPORT -C***END PROLOGUE DODPE2 - -C...SCALAR ARGUMENTS - INTEGER - + M,MSGB1,MSGD1,N,NETA,NP,NQ,NROW,NTOL,UNIT - LOGICAL - + ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + DIFF(NQ,NP+M),FJACB(N,NP,NQ),FJACD(N,M,NQ),XPLUSD(N,M) - INTEGER - + MSGB(NQ,NP),MSGD(NQ,M) - -C...LOCAL SCALARS - INTEGER - + I,J,K,L - CHARACTER FLAG*1,TYP*3 - -C...LOCAL ARRAYS - LOGICAL - + FTNOTE(0:7) - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C DIFF: THE RELATIVE DIFFERENCES BETWEEN THE USER SUPPLIED AND -C FINITE DIFFERENCE DERIVATIVES FOR EACH DERIVATIVE CHECKED. -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C FLAG: THE CHARACTER STRING INDICATING HIGHLY QUESTIONABLE RESULTS. -C FTNOTE: THE ARRAY CONTROLING FOOTNOTES. -C I: AN INDEX VARIABLE. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=.TRUE.) OR BY OLS (ISODR=.FALSE.). -C J: AN INDEX VARIABLE. -C K: AN INDEX VARIABLE. -C L: AN INDEX VARIABLE. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MSGB: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGB1: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGD: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C MSGD1: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF RELIABLE DIGITS IN THE MODEL. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE EXPLANATORY VARIABLE ARRAY AT -C WHICH THE DERIVATIVE IS TO BE CHECKED. -C NTOL: THE NUMBER OF DIGITS OF AGREEMENT REQUIRED BETWEEN THE -C FINITE DIFFERENCE AND THE USER SUPPLIED DERIVATIVES. -C TYP: THE CHARACTER STRING INDICATING SOLUTION TYPE, ODR OR OLS. -C UNIT: THE LOGICAL UNIT NUMBER USED FOR ERROR MESSAGES. -C XPLUSD: THE VALUES OF X + DELTA. - - -C***FIRST EXECUTABLE STATEMENT DODPE2 - - -C SET UP FOR FOOTNOTES - - DO 10 I=0,7 - FTNOTE(I) = .FALSE. - 10 CONTINUE - - DO 40 L=1,NQ - IF (MSGB1.GE.1) THEN - DO 20 I=1,NP - IF (MSGB(L,I).GE.1) THEN - FTNOTE(0) = .TRUE. - FTNOTE(MSGB(L,I)) = .TRUE. - END IF - 20 CONTINUE - END IF - - IF (MSGD1.GE.1) THEN - DO 30 I=1,M - IF (MSGD(L,I).GE.1) THEN - FTNOTE(0) = .TRUE. - FTNOTE(MSGD(L,I)) = .TRUE. - END IF - 30 CONTINUE - END IF - 40 CONTINUE - -C PRINT REPORT - - IF (ISODR) THEN - TYP = 'ODR' - ELSE - TYP = 'OLS' - END IF - WRITE (UNIT,1000) TYP - - DO 70 L=1,NQ - - WRITE (UNIT,2100) L,NROW - WRITE (UNIT,2200) - - DO 50 I=1,NP - K = MSGB(L,I) - IF (K.GE.7) THEN - FLAG = '*' - ELSE - FLAG = ' ' - END IF - IF (K.LE.-1) THEN - WRITE (UNIT,3100) I - ELSE IF (K.EQ.0) THEN - WRITE (UNIT,3200) I,FJACB(NROW,I,L),DIFF(L,I),FLAG - ELSE IF (K.GE.1) THEN - WRITE (UNIT,3300) I,FJACB(NROW,I,L),DIFF(L,I),FLAG,K - END IF - 50 CONTINUE - IF (ISODR) THEN - DO 60 I=1,M - K = MSGD(L,I) - IF (K.GE.7) THEN - FLAG = '*' - ELSE - FLAG = ' ' - END IF - IF (K.LE.-1) THEN - WRITE (UNIT,4100) NROW,I - ELSE IF (K.EQ.0) THEN - WRITE (UNIT,4200) NROW,I, - + FJACD(NROW,I,L),DIFF(L,NP+I),FLAG - ELSE IF (K.GE.1) THEN - WRITE (UNIT,4300) NROW,I, - + FJACD(NROW,I,L),DIFF(L,NP+I),FLAG,K - END IF - 60 CONTINUE - END IF - 70 CONTINUE - -C PRINT FOOTNOTES - - IF (FTNOTE(0)) THEN - - WRITE (UNIT,5000) - IF (FTNOTE(1)) WRITE (UNIT,5100) - IF (FTNOTE(2)) WRITE (UNIT,5200) - IF (FTNOTE(3)) WRITE (UNIT,5300) - IF (FTNOTE(4)) WRITE (UNIT,5400) - IF (FTNOTE(5)) WRITE (UNIT,5500) - IF (FTNOTE(6)) WRITE (UNIT,5600) - IF (FTNOTE(7)) WRITE (UNIT,5700) - END IF - - IF (NETA.LT.0) THEN - WRITE (UNIT,6000) -NETA - ELSE - WRITE (UNIT,6100) NETA - END IF - WRITE (UNIT,7000) NTOL - -C PRINT OUT ROW OF EXPLANATORY VARIABLE WHICH WAS CHECKED. - - WRITE (UNIT,8100) NROW - - DO 80 J=1,M - WRITE (UNIT,8110) NROW,J,XPLUSD(NROW,J) - 80 CONTINUE - - RETURN - -C FORMAT STATEMENTS - - 1000 FORMAT - + (//' *** DERIVATIVE CHECKING REPORT FOR FIT BY METHOD OF ',A3, - + ' ***'/) - 2100 FORMAT (/' FOR RESPONSE ',I2,' OF OBSERVATION ', I5/) - 2200 FORMAT (' ',' USER', - + ' ',' '/ - + ' ',' SUPPLIED', - + ' RELATIVE',' DERIVATIVE '/ - + ' DERIVATIVE WRT',' VALUE', - + ' DIFFERENCE',' ASSESSMENT '/) - 3100 FORMAT (' BETA(',I3,')', ' --- ', - + ' --- ',' UNCHECKED') - 3200 FORMAT (' BETA(',I3,')', 1P,2D13.2,3X,A1, - + 'VERIFIED') - 3300 FORMAT (' BETA(',I3,')', 1P,2D13.2,3X,A1, - + 'QUESTIONABLE (SEE NOTE ',I1,')') - 4100 FORMAT (' DELTA(',I2,',',I2,')', ' --- ', - + ' --- ',' UNCHECKED') - 4200 FORMAT (' DELTA(',I2,',',I2,')', 1P,2D13.2,3X,A1, - + 'VERIFIED') - 4300 FORMAT (' DELTA(',I2,',',I2,')', 1P,2D13.2,3X,A1, - + 'QUESTIONABLE (SEE NOTE ',I1,')') - 5000 FORMAT - + (/' NOTES:') - 5100 FORMAT - + (/' (1) USER SUPPLIED AND FINITE DIFFERENCE DERIVATIVES', - + ' AGREE, BUT'/ - + ' RESULTS ARE QUESTIONABLE BECAUSE BOTH ARE ZERO.') - 5200 FORMAT - + (/' (2) USER SUPPLIED AND FINITE DIFFERENCE DERIVATIVES', - + ' AGREE, BUT'/ - + ' RESULTS ARE QUESTIONABLE BECAUSE ONE IS', - + ' IDENTICALLY ZERO'/ - + ' AND THE OTHER IS ONLY APPROXIMATELY ZERO.') - 5300 FORMAT - + (/' (3) USER SUPPLIED AND FINITE DIFFERENCE DERIVATIVES', - + ' DISAGREE, BUT'/ - + ' RESULTS ARE QUESTIONABLE BECAUSE ONE IS', - + ' IDENTICALLY ZERO'/ - + ' AND THE OTHER IS NOT.') - 5400 FORMAT - + (/' (4) USER SUPPLIED AND FINITE DIFFERENCE DERIVATIVES', - + ' DISAGREE, BUT'/ - + ' FINITE DIFFERENCE DERIVATIVE IS QUESTIONABLE', - + ' BECAUSE EITHER'/ - + ' THE RATIO OF RELATIVE CURVATURE TO RELATIVE', - + ' SLOPE IS TOO HIGH'/ - + ' OR THE SCALE IS WRONG.') - 5500 FORMAT - + (/' (5) USER SUPPLIED AND FINITE DIFFERENCE DERIVATIVES', - + ' DISAGREE, BUT'/ - + ' FINITE DIFFERENCE DERIVATIVE IS QUESTIONABLE', - + ' BECAUSE THE'/ - + ' RATIO OF RELATIVE CURVATURE TO RELATIVE SLOPE IS', - + ' TOO HIGH.') - 5600 FORMAT - + (/' (6) USER SUPPLIED AND FINITE DIFFERENCE DERIVATIVES', - + ' DISAGREE, BUT'/ - + ' HAVE AT LEAST 2 DIGITS IN COMMON.') - 5700 FORMAT - + (/' (7) USER SUPPLIED AND FINITE DIFFERENCE DERIVATIVES', - + ' DISAGREE, AND'/ - + ' HAVE FEWER THAN 2 DIGITS IN COMMON. DERIVATIVE', - + ' CHECKING MUST'/ - + ' BE TURNED OFF IN ORDER TO PROCEED.') - 6000 FORMAT - + (/' NUMBER OF RELIABLE DIGITS IN FUNCTION RESULTS ', - + I5/ - + ' (ESTIMATED BY ODRPACK)') - 6100 FORMAT - + (/' NUMBER OF RELIABLE DIGITS IN FUNCTION RESULTS ', - + I5/ - + ' (SUPPLIED BY USER)') - 7000 FORMAT - + (/' NUMBER OF DIGITS OF AGREEMENT REQUIRED BETWEEN '/ - + ' USER SUPPLIED AND FINITE DIFFERENCE DERIVATIVE FOR '/ - + ' USER SUPPLIED DERIVATIVE TO BE CONSIDERED VERIFIED ', - + I5) - 8100 FORMAT - + (/' ROW NUMBER AT WHICH DERIVATIVES WERE CHECKED ', - + I5// - + ' -VALUES OF THE EXPLANATORY VARIABLES AT THIS ROW'/) - 8110 FORMAT - + (10X,'X(',I2,',',I2,')',1X,1P,3D16.8) - END -*DODPE3 - SUBROUTINE DODPE3 - + (UNIT,D2,D3) -C***BEGIN PROLOGUE DODPE3 -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE PRINT ERROR REPORTS INDICATING THAT COMPUTATIONS WERE -C STOPPED IN USER SUPPLIED SUBROUTINES FCN -C***END PROLOGUE DODPE3 - -C...SCALAR ARGUMENTS - INTEGER - + D2,D3,UNIT - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C D2: THE 2ND DIGIT (FROM THE LEFT) OF INFO. -C D3: THE 3RD DIGIT (FROM THE LEFT) OF INFO. -C UNIT: THE LOGICAL UNIT NUMBER USED FOR ERROR MESSAGES. - - -C***FIRST EXECUTABLE STATEMENT DODPE3 - - -C PRINT APPROPRIATE MESSAGES TO INDICATE WHERE COMPUTATIONS WERE -C STOPPED - - IF (D2.EQ.2) THEN - WRITE(UNIT,1100) - ELSE IF (D2.EQ.3) THEN - WRITE(UNIT,1200) - ELSE IF (D2.EQ.4) THEN - WRITE(UNIT,1300) - END IF - IF (D3.EQ.2) THEN - WRITE(UNIT,1400) - END IF - -C FORMAT STATEMENTS - - 1100 FORMAT - + (//' VARIABLE ISTOP HAS BEEN RETURNED WITH A NONZERO VALUE '/ - + ' FROM USER SUPPLIED SUBROUTINE FCN WHEN INVOKED USING THE'/ - + ' INITIAL ESTIMATES OF BETA AND DELTA SUPPLIED BY THE '/ - + ' USER. THE INITIAL ESTIMATES MUST BE ADJUSTED TO ALLOW '/ - + ' PROPER EVALUATION OF SUBROUTINE FCN BEFORE THE '/ - + ' REGRESSION PROCEDURE CAN CONTINUE.') - 1200 FORMAT - + (//' VARIABLE ISTOP HAS BEEN RETURNED WITH A NONZERO VALUE '/ - + ' FROM USER SUPPLIED SUBROUTINE FCN. THIS OCCURRED DURING'/ - + ' THE COMPUTATION OF THE NUMBER OF RELIABLE DIGITS IN THE '/ - + ' PREDICTED VALUES (F) RETURNED FROM SUBROUTINE FCN, INDI-'/ - + ' CATING THAT CHANGES IN THE INITIAL ESTIMATES OF BETA(K),'/ - + ' K=1,NP, AS SMALL AS 2*BETA(K)*SQRT(MACHINE PRECISION), '/ - + ' WHERE MACHINE PRECISION IS DEFINED AS THE SMALLEST VALUE'/ - + ' E SUCH THAT 1+E>1 ON THE COMPUTER BEING USED, PREVENT '/ - + ' SUBROUTINE FCN FROM BEING PROPERLY EVALUATED. THE '/ - + ' INITIAL ESTIMATES MUST BE ADJUSTED TO ALLOW PROPER '/ - + ' EVALUATION OF SUBROUTINE FCN DURING THESE COMPUTATIONS '/ - + ' BEFORE THE REGRESSION PROCEDURE CAN CONTINUE.') - 1300 FORMAT - + (//' VARIABLE ISTOP HAS BEEN RETURNED WITH A NONZERO VALUE '/ - + ' FROM USER SUPPLIED SUBROUTINE FCN. THIS OCCURRED DURING'/ - + ' THE DERIVATIVE CHECKING PROCEDURE, INDICATING THAT '/ - + ' CHANGES IN THE INITIAL ESTIMATES OF BETA(K), K=1,NP, AS '/ - + ' SMALL AS MAX[BETA(K),1/SCLB(K)]*10**(-NETA/2), AND/OR '/ - + ' OF DELTA(I,J), I=1,N AND J=1,M, AS SMALL AS '/ - + ' MAX[DELTA(I,J),1/SCLD(I,J)]*10**(-NETA/2), WHERE NETA '/ - + ' IS DEFINED TO BE THE NUMBER OF RELIABLE DIGITS IN '/ - + ' PREDICTED VALUES (F) RETURNED FROM SUBROUTINE FCN, '/ - + ' PREVENT SUBROUTINE FCN FROM BEING PROPERLY EVALUATED. '/ - + ' THE INITIAL ESTIMATES MUST BE ADJUSTED TO ALLOW PROPER '/ - + ' EVALUATION OF SUBROUTINE FCN DURING THESE COMPUTATIONS '/ - + ' BEFORE THE REGRESSION PROCEDURE CAN CONTINUE.') - 1400 FORMAT - + (//' VARIABLE ISTOP HAS BEEN RETURNED WITH A NONZERO VALUE '/ - + ' FROM USER SUPPLIED SUBROUTINE FCN WHEN INVOKED FOR '/ - + ' DERIVATIVE EVALUATIONS USING THE INITIAL ESTIMATES OF '/ - + ' BETA AND DELTA SUPPLIED BY THE USER. THE INITIAL '/ - + ' ESTIMATES MUST BE ADJUSTED TO ALLOW PROPER EVALUATION '/ - + ' OF SUBROUTINE FCN BEFORE THE REGRESSION PROCEDURE CAN '/ - + ' CONTINUE.') - END -*DODPER - SUBROUTINE DODPER - + (INFO,LUNERR,SHORT, - + N,M,NP,NQ, - + LDSCLD,LDSTPD,LDWE,LD2WE,LDWD,LD2WD, - + LWKMN,LIWKMN, - + FJACB,FJACD, - + DIFF,MSGB,ISODR,MSGD, - + XPLUSD,NROW,NETA,NTOL) -C***BEGIN PROLOGUE DODPER -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DODPE1,DODPE2,DODPE3,DODPHD -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE CONTROLLING ROUTINE FOR PRINTING ERROR REPORTS -C***END PROLOGUE DODPER - -C...SCALAR ARGUMENTS - INTEGER - + INFO,LDSCLD,LDSTPD,LDWD,LDWE,LD2WD,LD2WE,LIWKMN,LUNERR,LWKMN, - + M,N,NETA,NP,NQ,NROW,NTOL - LOGICAL - + ISODR,SHORT - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + DIFF(NQ,NP+M),FJACB(N,NP,NQ),FJACD(N,M,NQ),XPLUSD(N,M) - INTEGER - + MSGB(NQ*NP+1),MSGD(NQ*M+1) - -C...LOCAL SCALARS - INTEGER - + D1,D2,D3,D4,D5,UNIT - LOGICAL - + HEAD - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DODPE1,DODPE2,DODPE3,DODPHD - -C...INTRINSIC FUNCTIONS - INTRINSIC - + MOD - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C D1: THE 1ST DIGIT (FROM THE LEFT) OF INFO. -C D2: THE 2ND DIGIT (FROM THE LEFT) OF INFO. -C D3: THE 3RD DIGIT (FROM THE LEFT) OF INFO. -C D4: THE 4TH DIGIT (FROM THE LEFT) OF INFO. -C D5: THE 5TH DIGIT (FROM THE LEFT) OF INFO. -C DIFF: THE RELATIVE DIFFERENCES BETWEEN THE USER SUPPLIED AND -C FINITE DIFFERENCE DERIVATIVES FOR EACH DERIVATIVE CHECKED. -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C HEAD: THE VARIABLE DESIGNATING WHETHER THE HEADING IS TO BE -C PRINTED (HEAD=.TRUE.) OR NOT (HEAD=.FALSE.). -C INFO: THE VARIABLE DESIGNATING WHY THE COMPUTATIONS WERE STOPPED. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=.TRUE.) OR BY OLS (ISODR=.FALSE.). -C LDSCLD: THE LEADING DIMENSION OF ARRAY SCLD. -C LDSTPD: THE LEADING DIMENSION OF ARRAY STPD. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LIWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY IWORK. -C LUNERR: THE LOGICAL UNIT NUMBER USED FOR ERROR MESSAGES. -C LWKMN: THE MINIMUM ACCEPTABLE LENGTH OF ARRAY WORK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C MSGB: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT BETA. -C MSGD: THE ERROR CHECKING RESULTS FOR THE JACOBIAN WRT DELTA. -C N: THE NUMBER OF OBSERVATIONS. -C NETA: THE NUMBER OF RELIABLE DIGITS IN THE MODEL. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE EXPLANATORY VARIABLE ARRAY AT -C WHICH THE DERIVATIVE IS TO BE CHECKED. -C NTOL: THE NUMBER OF DIGITS OF AGREEMENT REQUIRED BETWEEN THE -C FINITE DIFFERENCE AND THE USER SUPPLIED DERIVATIVES. -C SHORT: THE VARIABLE DESIGNATING WHETHER THE USER HAS INVOKED -C ODRPACK BY THE SHORT-CALL (SHORT=.TRUE.) OR THE LONG-CALL -C (SHORT=.FALSE.). -C UNIT: THE LOGICAL UNIT NUMBER FOR ERROR MESSAGES. -C XPLUSD: THE VALUES X + DELTA. - - -C***FIRST EXECUTABLE STATEMENT DODPER - - -C SET LOGICAL UNIT NUMBER FOR ERROR REPORT - - IF (LUNERR.EQ.0) THEN - RETURN - ELSE IF (LUNERR.LT.0) THEN - UNIT = 6 - ELSE - UNIT = LUNERR - END IF - -C PRINT HEADING - - HEAD = .TRUE. - CALL DODPHD(HEAD,UNIT) - -C EXTRACT INDIVIDUAL DIGITS FROM VARIABLE INFO - - D1 = MOD(INFO,100000)/10000 - D2 = MOD(INFO,10000)/1000 - D3 = MOD(INFO,1000)/100 - D4 = MOD(INFO,100)/10 - D5 = MOD(INFO,10) - -C PRINT APPROPRIATE ERROR MESSAGES FOR ODRPACK INVOKED STOP - - IF (D1.GE.1 .AND. D1.LE.3) THEN - -C PRINT APPROPRIATE MESSAGES FOR ERRORS IN -C PROBLEM SPECIFICATION PARAMETERS -C DIMENSION SPECIFICATION PARAMETERS -C NUMBER OF GOOD DIGITS IN X -C WEIGHTS - - CALL DODPE1(UNIT,D1,D2,D3,D4,D5, - + N,M,NQ, - + LDSCLD,LDSTPD,LDWE,LD2WE,LDWD,LD2WD, - + LWKMN,LIWKMN) - - ELSE IF ((D1.EQ.4) .OR. (MSGB(1).GE.0)) THEN - -C PRINT APPROPRIATE MESSAGES FOR DERIVATIVE CHECKING - - CALL DODPE2(UNIT, - + N,M,NP,NQ, - + FJACB,FJACD, - + DIFF,MSGB(1),MSGB(2),ISODR,MSGD(1),MSGD(2), - + XPLUSD,NROW,NETA,NTOL) - - ELSE IF (D1.EQ.5) THEN - -C PRINT APPROPRIATE ERROR MESSAGE FOR USER INVOKED STOP FROM FCN - - CALL DODPE3(UNIT,D2,D3) - - END IF - -C PRINT CORRECT FORM OF CALL STATEMENT - - IF ((D1.GE.1 .AND. D1.LE.3) .OR. - + (D1.EQ.4 .AND. (D2.EQ.2 .OR. D3.EQ.2)) .OR. - + (D1.EQ.5)) THEN - IF (SHORT) THEN - WRITE (UNIT,1100) - ELSE - WRITE (UNIT,1200) - END IF - END IF - - RETURN - -C FORMAT STATEMENTS - - 1100 FORMAT - + (//' THE CORRECT FORM OF THE CALL STATEMENT IS '// - + ' CALL DODR'/ - + ' + (FCN,'/ - + ' + N,M,NP,NQ,'/ - + ' + BETA,'/ - + ' + Y,LDY,X,LDX,'/ - + ' + WE,LDWE,LD2WE,WD,LDWD,LD2WD,'/ - + ' + JOB,'/ - + ' + IPRINT,LUNERR,LUNRPT,'/ - + ' + WORK,LWORK,IWORK,LIWORK,'/ - + ' + INFO)') - 1200 FORMAT - + (//' THE CORRECT FORM OF THE CALL STATEMENT IS '// - + ' CALL DODRC'/ - + ' + (FCN,'/ - + ' + N,M,NP,NQ,'/ - + ' + BETA,'/ - + ' + Y,LDY,X,LDX,'/ - + ' + WE,LDWE,LD2WE,WD,LDWD,LD2WD,'/ - + ' + IFIXB,IFIXX,LDIFX,'/ - + ' + JOB,NDIGIT,TAUFAC,'/ - + ' + SSTOL,PARTOL,MAXIT,'/ - + ' + IPRINT,LUNERR,LUNRPT,'/ - + ' + STPB,STPD,LDSTPD,'/ - + ' + SCLB,SCLD,LDSCLD,'/ - + ' + WORK,LWORK,IWORK,LIWORK,'/ - + ' + INFO)') - - END -*DODPHD - SUBROUTINE DODPHD - + (HEAD,UNIT) -C***BEGIN PROLOGUE DODPHD -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE PRINT ODRPACK HEADING -C***END PROLOGUE DODPHD - -C...SCALAR ARGUMENTS - INTEGER - + UNIT - LOGICAL - + HEAD - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C HEAD: THE VARIABLE DESIGNATING WHETHER THE HEADING IS TO BE -C PRINTED (HEAD=.TRUE.) OR NOT (HEAD=.FALSE.). -C UNIT: THE LOGICAL UNIT NUMBER TO WHICH THE HEADING IS WRITTEN. - - -C***FIRST EXECUTABLE STATEMENT DODPHD - - - IF (HEAD) THEN - WRITE(UNIT,1000) - HEAD = .FALSE. - END IF - - RETURN - -C FORMAT STATEMENTS - - 1000 FORMAT ( - + ' ******************************************************* '/ - + ' * ODRPACK VERSION 2.01 OF 06-19-92 (DOUBLE PRECISION) * '/ - + ' ******************************************************* '/) - END -*DODSTP - SUBROUTINE DODSTP - + (N,M,NP,NQ,NPP, - + F,FJACB,FJACD, - + WD,LDWD,LD2WD,SS,TT,LDTT,DELTA, - + ALPHA,EPSFCN,ISODR, - + TFJACB,OMEGA,U,QRAUX,KPVT, - + S,T,PHI,IRANK,RCOND,FORVCV, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK,LWRK,ISTOPC) -C***BEGIN PROLOGUE DODSTP -C***REFER TO DODR,DODRC -C***ROUTINES CALLED IDAMAX,DCHEX,DESUBI,DFCTR,DNRM2,DQRDC,DQRSL,DROT, -C DROTG,DSOLVE,DTRCO,DTRSL,DVEVTR,DWGHT,DZERO -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE COMPUTE LOCALLY CONSTRAINED STEPS S AND T, AND PHI(ALPHA) -C***END PROLOGUE DODSTP - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + ALPHA,EPSFCN,PHI,RCOND - INTEGER - + IRANK,ISTOPC,LDTT,LDWD,LD2WD,LWRK,M,N,NP,NPP,NQ - LOGICAL - + ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + DELTA(N,M),F(N,NQ),FJACB(N,NP,NQ),FJACD(N,M,NQ), - + OMEGA(NQ,NQ),QRAUX(NP),S(NP),SS(NP), - + T(N,M),TFJACB(N,NQ,NP),TT(LDTT,M),U(NP),WD(LDWD,LD2WD,M), - + WRK1(N,NQ,M),WRK2(N,NQ),WRK3(NP),WRK4(M,M),WRK5(M),WRK(LWRK) - INTEGER - + KPVT(NP) - -C...LOCAL SCALARS - DOUBLE PRECISION - + CO,ONE,SI,TEMP,ZERO - INTEGER - + I,IMAX,INF,IPVT,J,K,K1,K2,KP,L - LOGICAL - + ELIM,FORVCV - -C...LOCAL ARRAYS - DOUBLE PRECISION - + DUM(2) - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DNRM2 - INTEGER - + IDAMAX - EXTERNAL - + DNRM2,IDAMAX - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DCHEX,DESUBI,DFCTR,DQRDC,DQRSL,DROT,DROTG, - + DSOLVE,DTRCO,DTRSL,DVEVTR,DWGHT,DZERO - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,ONE - + /0.0D0,1.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ALPHA: THE LEVENBERG-MARQUARDT PARAMETER. -C CO: THE COSINE FROM THE PLANE ROTATION. -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C DUM: A DUMMY ARRAY. -C ELIM: THE VARIABLE DESIGNATING WHETHER COLUMNS OF THE JACOBIAN -C WRT BETA HAVE BEEN ELIMINATED (ELIM=TRUE) OR NOT -C (ELIM=FALSE). -C EPSFCN: THE FUNCTION'S PRECISION. -C F: THE (WEIGHTED) ESTIMATED VALUES OF EPSILON. -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C FORVCV: THE VARIABLE DESIGNATING WHETHER THIS SUBROUTINE WAS -C CALLED TO SET UP FOR THE COVARIANCE MATRIX COMPUTATIONS -C (FORVCV=TRUE) OR NOT (FORVCV=FALSE). -C I: AN INDEXING VARIABLE. -C IMAX: THE INDEX OF THE ELEMENT OF U HAVING THE LARGEST ABSOLUTE -C VALUE. -C INF: THE RETURN CODE FROM LINPACK ROUTINES. -C IPVT: THE VARIABLE DESIGNATING WHETHER PIVOTING IS TO BE DONE. -C IRANK: THE RANK DEFICIENCY OF THE JACOBIAN WRT BETA. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOPC: THE VARIABLE DESIGNATING WHETHER THE COMPUTATIONS WERE -C STOPED DUE TO A NUMERICAL ERROR WITHIN SUBROUTINE DODSTP. -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C K1: AN INDEXING VARIABLE. -C K2: AN INDEXING VARIABLE. -C KP: THE RANK OF THE JACOBIAN WRT BETA. -C KPVT: THE PIVOT VECTOR. -C L: AN INDEXING VARIABLE. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LWRK: THE LENGTH OF VECTOR WRK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C OMEGA: THE ARRAY DEFINED S.T. -C OMEGA*TRANS(OMEGA) = INV(I+FJACD*INV(E)*TRANS(FJACD)) -C = (I-FJACD*INV(P)*TRANS(FJACD)) -C WHERE E = D**2 + ALPHA*TT**2 -C P = TRANS(FJACD)*FJACD + D**2 + ALPHA*TT**2 -C ONE: THE VALUE 1.0D0. -C PHI: THE DIFFERENCE BETWEEN THE NORM OF THE SCALED STEP -C AND THE TRUST REGION DIAMETER. -C QRAUX: THE ARRAY REQUIRED TO RECOVER THE ORTHOGONAL PART OF THE -C Q-R DECOMPOSITION. -C RCOND: THE APPROXIMATE RECIPROCAL CONDITION NUMBER OF TFJACB. -C S: THE STEP FOR BETA. -C SI: THE SINE FROM THE PLANE ROTATION. -C SS: THE SCALING VALUES FOR THE UNFIXED BETAS. -C T: THE STEP FOR DELTA. -C TEMP: A TEMPORARY STORAGE LOCATION. -C TFJACB: THE ARRAY OMEGA*FJACB. -C TT: THE SCALING VALUES FOR DELTA. -C U: THE APPROXIMATE NULL VECTOR FOR TFJACB. -C WD: THE (SQUARED) DELTA WEIGHTS. -C WRK: A WORK ARRAY OF (LWRK) ELEMENTS, -C EQUIVALENCED TO WRK1 AND WRK2. -C WRK1: A WORK ARRAY OF (N BY NQ BY M) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK3: A WORK ARRAY OF (NP) ELEMENTS. -C WRK4: A WORK ARRAY OF (M BY M) ELEMENTS. -C WRK5: A WORK ARRAY OF (M) ELEMENTS. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DODSTP - - -C COMPUTE LOOP PARAMETERS WHICH DEPEND ON WEIGHT STRUCTURE - -C SET UP KPVT IF ALPHA = 0 - - IF (ALPHA.EQ.ZERO) THEN - KP = NPP - DO 10 K=1,NP - KPVT(K) = K - 10 CONTINUE - ELSE - IF (NPP.GE.1) THEN - KP = NPP-IRANK - ELSE - KP = NPP - END IF - END IF - - IF (ISODR) THEN - -C T = WD * DELTA = D*G2 - CALL DWGHT(N,M,WD,LDWD,LD2WD,DELTA,N,T,N) - - DO 300 I=1,N - -C COMPUTE WRK4, SUCH THAT -C TRANS(WRK4)*WRK4 = E = (D**2 + ALPHA*TT**2) - CALL DESUBI(N,M,WD,LDWD,LD2WD,ALPHA,TT,LDTT,I,WRK4) - CALL DFCTR(.FALSE.,WRK4,M,M,INF) - IF (INF.NE.0) THEN - ISTOPC = 60000 - RETURN - END IF - -C COMPUTE OMEGA, SUCH THAT -C TRANS(OMEGA)*OMEGA = I+FJACD*INV(E)*TRANS(FJACD) -C INV(TRANS(OMEGA)*OMEGA) = I-FJACD*INV(P)*TRANS(FJACD) - CALL DVEVTR(M,NQ,I, - + FJACD,N,M, WRK4,M, WRK1,N,NQ, OMEGA,NQ, WRK5) - DO 110 L=1,NQ - OMEGA(L,L) = ONE + OMEGA(L,L) - 110 CONTINUE - CALL DFCTR(.FALSE.,OMEGA,NQ,NQ,INF) - IF (INF.NE.0) THEN - ISTOPC = 60000 - RETURN - END IF - -C COMPUTE WRK1 = TRANS(FJACD)*(I-FJACD*INV(P)*TRANS(JFACD)) -C = TRANS(FJACD)*INV(TRANS(OMEGA)*OMEGA) - DO 130 J=1,M - DO 120 L=1,NQ - WRK1(I,L,J) = FJACD(I,J,L) - 120 CONTINUE - CALL DSOLVE(NQ,OMEGA,NQ,WRK1(I,1,J),N,4) - CALL DSOLVE(NQ,OMEGA,NQ,WRK1(I,1,J),N,2) - 130 CONTINUE - -C COMPUTE WRK5 = INV(E)*D*G2 - DO 140 J=1,M - WRK5(J) = T(I,J) - 140 CONTINUE - CALL DSOLVE(M,WRK4,M,WRK5,1,4) - CALL DSOLVE(M,WRK4,M,WRK5,1,2) - -C COMPUTE TFJACB = INV(TRANS(OMEGA))*FJACB - DO 170 K=1,KP - DO 150 L=1,NQ - TFJACB(I,L,K) = FJACB(I,KPVT(K),L) - 150 CONTINUE - CALL DSOLVE(NQ,OMEGA,NQ,TFJACB(I,1,K),N,4) - DO 160 L=1,NQ - IF (SS(1).GT.ZERO) THEN - TFJACB(I,L,K) = TFJACB(I,L,K)/SS(KPVT(K)) - ELSE - TFJACB(I,L,K) = TFJACB(I,L,K)/ABS(SS(1)) - END IF - 160 CONTINUE - 170 CONTINUE - -C COMPUTE WRK2 = (V*INV(E)*D**2*G2 - G1) - DO 190 L=1,NQ - WRK2(I,L) = ZERO - DO 180 J=1,M - WRK2(I,L) = WRK2(I,L) + FJACD(I,J,L)*WRK5(J) - 180 CONTINUE - WRK2(I,L) = WRK2(I,L) - F(I,L) - 190 CONTINUE - -C COMPUTE WRK2 = INV(TRANS(OMEGA))*(V*INV(E)*D**2*G2 - G1) - CALL DSOLVE(NQ,OMEGA,NQ,WRK2(I,1),N,4) - 300 CONTINUE - - ELSE - DO 360 I=1,N - DO 350 L=1,NQ - DO 340 K=1,KP - TFJACB(I,L,K) = FJACB(I,KPVT(K),L) - IF (SS(1).GT.ZERO) THEN - TFJACB(I,L,K) = TFJACB(I,L,K)/SS(KPVT(K)) - ELSE - TFJACB(I,L,K) = TFJACB(I,L,K)/ABS(SS(1)) - END IF - 340 CONTINUE - WRK2(I,L) = -F(I,L) - 350 CONTINUE - 360 CONTINUE - END IF - -C COMPUTE S - -C DO QR FACTORIZATION (WITH COLUMN PIVOTING OF TRJACB IF ALPHA = 0) - - IF (ALPHA.EQ.ZERO) THEN - IPVT = 1 - DO 410 K=1,NP - KPVT(K) = 0 - 410 CONTINUE - ELSE - IPVT = 0 - END IF - - CALL DQRDC(TFJACB,N*NQ,N*NQ,KP,QRAUX,KPVT,WRK3,IPVT) - CALL DQRSL(TFJACB,N*NQ,N*NQ,KP, - + QRAUX,WRK2,DUM,WRK2,DUM,DUM,DUM,1000,INF) - IF (INF.NE.0) THEN - ISTOPC = 60000 - RETURN - END IF - -C ELIMINATE ALPHA PART USING GIVENS ROTATIONS - - IF (ALPHA.NE.ZERO) THEN - CALL DZERO(NPP,1,S,NPP) - DO 430 K1=1,KP - CALL DZERO(KP,1,WRK3,KP) - WRK3(K1) = SQRT(ALPHA) - DO 420 K2=K1,KP - CALL DROTG(TFJACB(K2,1,K2),WRK3(K2),CO,SI) - IF (KP-K2.GE.1) THEN - CALL DROT(KP-K2,TFJACB(K2,1,K2+1),N*NQ, - + WRK3(K2+1),1,CO,SI) - END IF - TEMP = CO*WRK2(K2,1) + SI*S(KPVT(K1)) - S(KPVT(K1)) = -SI*WRK2(K2,1) + CO*S(KPVT(K1)) - WRK2(K2,1) = TEMP - 420 CONTINUE - 430 CONTINUE - END IF - -C COMPUTE SOLUTION - ELIMINATE VARIABLES IF NECESSARY - - IF (NPP.GE.1) THEN - IF (ALPHA.EQ.ZERO) THEN - KP = NPP - -C ESTIMATE RCOND - U WILL CONTAIN APPROX NULL VECTOR - - 440 CALL DTRCO(TFJACB,N*NQ,KP,RCOND,U,1) - IF (RCOND.LE.EPSFCN) THEN - ELIM = .TRUE. - IMAX = IDAMAX(KP,U,1) - -C IMAX IS THE COLUMN TO REMOVE - USE DCHEX AND FIX KPVT - - IF (IMAX.NE.KP) THEN - CALL DCHEX(TFJACB,N*NQ,KP,IMAX,KP,WRK2,N*NQ,1, - + QRAUX,WRK3,2) - K = KPVT(IMAX) - DO 450 I=IMAX,KP-1 - KPVT(I) = KPVT(I+1) - 450 CONTINUE - KPVT(KP) = K - END IF - KP = KP-1 - ELSE - ELIM = .FALSE. - END IF - IF (ELIM .AND. KP.GE.1) THEN - GO TO 440 - ELSE - IRANK = NPP-KP - END IF - END IF - END IF - - IF (FORVCV) RETURN - -C BACKSOLVE AND UNSCRAMBLE - - IF (NPP.GE.1) THEN - DO 510 I=KP+1,NPP - WRK2(I,1) = ZERO - 510 CONTINUE - IF (KP.GE.1) THEN - CALL DTRSL(TFJACB,N*NQ,KP,WRK2,01,INF) - IF (INF.NE.0) THEN - ISTOPC = 60000 - RETURN - END IF - END IF - DO 520 I=1,NPP - IF (SS(1).GT.ZERO) THEN - S(KPVT(I)) = WRK2(I,1)/SS(KPVT(I)) - ELSE - S(KPVT(I)) = WRK2(I,1)/ABS(SS(1)) - END IF - 520 CONTINUE - END IF - - IF (ISODR) THEN - -C NOTE: T AND WRK1 HAVE BEEN INITIALIZED ABOVE, -C WHERE T = WD * DELTA = D*G2 -C WRK1 = TRANS(FJACD)*(I-FJACD*INV(P)*TRANS(JFACD)) - - DO 670 I=1,N - -C COMPUTE WRK4, SUCH THAT -C TRANS(WRK4)*WRK4 = E = (D**2 + ALPHA*TT**2) - CALL DESUBI(N,M,WD,LDWD,LD2WD,ALPHA,TT,LDTT,I,WRK4) - CALL DFCTR(.FALSE.,WRK4,M,M,INF) - IF (INF.NE.0) THEN - ISTOPC = 60000 - RETURN - END IF - -C COMPUTE WRK5 = INV(E)*D*G2 - DO 610 J=1,M - WRK5(J) = T(I,J) - 610 CONTINUE - CALL DSOLVE(M,WRK4,M,WRK5,1,4) - CALL DSOLVE(M,WRK4,M,WRK5,1,2) - - DO 640 L=1,NQ - WRK2(I,L) = F(I,L) - DO 620 K=1,NPP - WRK2(I,L) = WRK2(I,L) + FJACB(I,K,L)*S(K) - 620 CONTINUE - DO 630 J=1,M - WRK2(I,L) = WRK2(I,L) - FJACD(I,J,L)*WRK5(J) - 630 CONTINUE - 640 CONTINUE - - DO 660 J=1,M - WRK5(J) = ZERO - DO 650 L=1,NQ - WRK5(J) = WRK5(J) + WRK1(I,L,J)*WRK2(I,L) - 650 CONTINUE - T(I,J) = -(WRK5(J) + T(I,J)) - 660 CONTINUE - CALL DSOLVE(M,WRK4,M,T(I,1),N,4) - CALL DSOLVE(M,WRK4,M,T(I,1),N,2) - 670 CONTINUE - - END IF - -C COMPUTE PHI(ALPHA) FROM SCALED S AND T - - CALL DWGHT(NPP,1,SS,NPP,1,S,NPP,WRK,NPP) - IF (ISODR) THEN - CALL DWGHT(N,M,TT,LDTT,1,T,N,WRK(NPP+1),N) - PHI = DNRM2(NPP+N*M,WRK,1) - ELSE - PHI = DNRM2(NPP,WRK,1) - END IF - - RETURN - END -*DODVCV - SUBROUTINE DODVCV - + (N,M,NP,NQ,NPP, - + F,FJACB,FJACD, - + WD,LDWD,LD2WD,SSF,SS,TT,LDTT,DELTA, - + EPSFCN,ISODR, - + VCV,SD, - + WRK6,OMEGA,U,QRAUX,JPVT, - + S,T,IRANK,RCOND,RSS,IDF,RVAR,IFIXB, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK,LWRK,ISTOPC) -C***BEGIN PROLOGUE DODVCV -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DPODI,DODSTP -C***DATE WRITTEN 901207 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE COMPUTE COVARIANCE MATRIX OF ESTIMATED PARAMETERS -C***END PROLOGUE DODVCV - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + EPSFCN,RCOND,RSS,RVAR - INTEGER - + IDF,IRANK,ISTOPC,LDTT,LDWD,LD2WD,LWRK,M,N,NP,NPP,NQ - LOGICAL - + ISODR - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + DELTA(N,M),F(N,NQ), - + FJACB(N,NP,NQ),FJACD(N,M,NQ), - + OMEGA(NQ,NQ),QRAUX(NP),S(NP),SD(NP),SS(NP),SSF(NP), - + T(N,M),TT(LDTT,M),U(NP),VCV(NP,NP),WD(LDWD,LD2WD,M), - + WRK1(N,NQ,M),WRK2(N,NQ),WRK3(NP),WRK4(M,M),WRK5(M), - + WRK6(N*NQ,NP),WRK(LWRK) - INTEGER - + IFIXB(NP),JPVT(NP) - -C...LOCAL SCALARS - DOUBLE PRECISION - + TEMP,ZERO - INTEGER - + I,IUNFIX,J,JUNFIX,KP,L - LOGICAL - + FORVCV - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DPODI,DODSTP - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,SQRT - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C DELTA: THE ESTIMATED ERRORS IN THE EXPLANATORY VARIABLES. -C EPSFCN: THE FUNCTION'S PRECISION. -C F: THE (WEIGHTED) ESTIMATED VALUES OF EPSILON. -C FJACB: THE JACOBIAN WITH RESPECT TO BETA. -C FJACD: THE JACOBIAN WITH RESPECT TO DELTA. -C FORVCV: THE VARIABLE DESIGNATING WHETHER SUBROUTINE DODSTP IS -C CALLED TO SET UP FOR THE COVARIANCE MATRIX COMPUTATIONS -C (FORVCV=TRUE) OR NOT (FORVCV=FALSE). -C I: AN INDEXING VARIABLE. -C IDF: THE DEGREES OF FREEDOM OF THE FIT, EQUAL TO THE NUMBER OF -C OBSERVATIONS WITH NONZERO WEIGHTED DERIVATIVES MINUS THE -C NUMBER OF PARAMETERS BEING ESTIMATED. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IMAX: THE INDEX OF THE ELEMENT OF U HAVING THE LARGEST ABSOLUTE -C VALUE. -C IRANK: THE RANK DEFICIENCY OF THE JACOBIAN WRT BETA. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C ISTOPC: THE VARIABLE DESIGNATING WHETHER THE COMPUTATIONS WERE -C STOPED DUE TO A NUMERICAL ERROR WITHIN SUBROUTINE DODSTP. -C IUNFIX: THE INDEX OF THE NEXT UNFIXED PARAMETER. -C J: AN INDEXING VARIABLE. -C JPVT: THE PIVOT VECTOR. -C JUNFIX: THE INDEX OF THE NEXT UNFIXED PARAMETER. -C KP: THE RANK OF THE JACOBIAN WRT BETA. -C L: AN INDEXING VARIABLE. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDWD: THE LEADING DIMENSION OF ARRAY WD. -C LD2WD: THE SECOND DIMENSION OF ARRAY WD. -C LWRK: THE LENGTH OF VECTOR WRK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NPP: THE NUMBER OF FUNCTION PARAMETERS BEING ESTIMATED. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C OMEGA: THE ARRAY DEFINED S.T. -C OMEGA*TRANS(OMEGA) = INV(I+FJACD*INV(E)*TRANS(FJACD)) -C = (I-FJACD*INV(P)*TRANS(FJACD)) -C WHERE E = D**2 + ALPHA*TT**2 -C P = TRANS(FJACD)*FJACD + D**2 + ALPHA*TT**2 -C QRAUX: THE ARRAY REQUIRED TO RECOVER THE ORTHOGONAL PART OF THE -C Q-R DECOMPOSITION. -C RCOND: THE APPROXIMATE RECIPROCAL CONDITION OF FJACB. -C RSS: THE RESIDUAL SUM OF SQUARES. -C RVAR: THE RESIDUAL VARIANCE. -C S: THE STEP FOR BETA. -C SD: THE STANDARD DEVIATIONS OF THE ESTIMATED BETAS. -C SS: THE SCALING VALUES FOR THE UNFIXED BETAS. -C SSF: THE SCALING VALUES USED FOR BETA. -C T: THE STEP FOR DELTA. -C TEMP: A TEMPORARY STORAGE LOCATION -C TT: THE SCALING VALUES FOR DELTA. -C U: THE APPROXIMATE NULL VECTOR FOR FJACB. -C VCV: THE COVARIANCE MATRIX OF THE ESTIMATED BETAS. -C WD: THE DELTA WEIGHTS. -C WRK: A WORK ARRAY OF (LWRK) ELEMENTS, -C EQUIVALENCED TO WRK1 AND WRK2. -C WRK1: A WORK ARRAY OF (N BY NQ BY M) ELEMENTS. -C WRK2: A WORK ARRAY OF (N BY NQ) ELEMENTS. -C WRK3: A WORK ARRAY OF (NP) ELEMENTS. -C WRK4: A WORK ARRAY OF (M BY M) ELEMENTS. -C WRK5: A WORK ARRAY OF (M) ELEMENTS. -C WRK6: A WORK ARRAY OF (N*NQ BY P) ELEMENTS. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DODVCV - - - FORVCV = .TRUE. - ISTOPC = 0 - - CALL DODSTP(N,M,NP,NQ,NPP, - + F,FJACB,FJACD, - + WD,LDWD,LD2WD,SS,TT,LDTT,DELTA, - + ZERO,EPSFCN,ISODR, - + WRK6,OMEGA,U,QRAUX,JPVT, - + S,T,TEMP,IRANK,RCOND,FORVCV, - + WRK1,WRK2,WRK3,WRK4,WRK5,WRK,LWRK,ISTOPC) - IF (ISTOPC.NE.0) THEN - RETURN - END IF - KP = NPP - IRANK - CALL DPODI (WRK6,N*NQ,KP,WRK3,1) - - IDF = 0 - DO 150 I=1,N - DO 120 J=1,NPP - DO 110 L=1,NQ - IF (FJACB(I,J,L).NE.ZERO) THEN - IDF = IDF + 1 - GO TO 150 - END IF - 110 CONTINUE - 120 CONTINUE - IF (ISODR) THEN - DO 140 J=1,M - DO 130 L=1,NQ - IF (FJACD(I,J,L).NE.ZERO) THEN - IDF = IDF + 1 - GO TO 150 - END IF - 130 CONTINUE - 140 CONTINUE - END IF - 150 CONTINUE - - IF (IDF.GT.KP) THEN - IDF = IDF - KP - RVAR = RSS/IDF - ELSE - IDF = 0 - RVAR = RSS - END IF - -C STORE VARIANCES IN SD, RESTORING ORIGINAL ORDER - - DO 200 I=1,NP - SD(I) = ZERO - 200 CONTINUE - DO 210 I=1,KP - SD(JPVT(I)) = WRK6(I,I) - 210 CONTINUE - IF (NP.GT.NPP) THEN - JUNFIX = NPP - DO 220 J=NP,1,-1 - IF (IFIXB(J).EQ.0) THEN - SD(J) = ZERO - ELSE - SD(J) = SD(JUNFIX) - JUNFIX = JUNFIX - 1 - END IF - 220 CONTINUE - END IF - -C STORE COVARIANCE MATRIX IN VCV, RESTORING ORIGINAL ORDER - - DO 310 I=1,NP - DO 300 J=1,I - VCV(I,J) = ZERO - 300 CONTINUE - 310 CONTINUE - DO 330 I=1,KP - DO 320 J=I+1,KP - IF (JPVT(I).GT.JPVT(J)) THEN - VCV(JPVT(I),JPVT(J))=WRK6(I,J) - ELSE - VCV(JPVT(J),JPVT(I))=WRK6(I,J) - END IF - 320 CONTINUE - 330 CONTINUE - IF (NP.GT.NPP) THEN - IUNFIX = NPP - DO 360 I=NP,1,-1 - IF (IFIXB(I).EQ.0) THEN - DO 340 J=I,1,-1 - VCV(I,J) = ZERO - 340 CONTINUE - ELSE - JUNFIX = NPP - DO 350 J=NP,1,-1 - IF (IFIXB(J).EQ.0) THEN - VCV(I,J) = ZERO - ELSE - VCV(I,J) = VCV(IUNFIX,JUNFIX) - JUNFIX = JUNFIX - 1 - END IF - 350 CONTINUE - IUNFIX = IUNFIX - 1 - END IF - 360 CONTINUE - END IF - - DO 380 I=1,NP - VCV(I,I) = SD(I) - SD(I) = SQRT(RVAR*SD(I)) - DO 370 J=1,I - VCV(J,I) = VCV(I,J) - 370 CONTINUE - 380 CONTINUE - -C UNSCALE STANDARD ERRORS AND COVARIANCE MATRIX - DO 410 I=1,NP - IF (SSF(1).GT.ZERO) THEN - SD(I) = SD(I)/SSF(I) - ELSE - SD(I) = SD(I)/ABS(SSF(1)) - END IF - DO 400 J=1,NP - IF (SSF(1).GT.ZERO) THEN - VCV(I,J) = VCV(I,J)/(SSF(I)*SSF(J)) - ELSE - VCV(I,J) = VCV(I,J)/(SSF(1)*SSF(1)) - END IF - 400 CONTINUE - 410 CONTINUE - - RETURN - END -*DPACK - SUBROUTINE DPACK - + (N2,N1,V1,V2,IFIX) -C***BEGIN PROLOGUE DPACK -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DCOPY -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SELECT THE UNFIXED ELEMENTS OF V2 AND RETURN THEM IN V1 -C***END PROLOGUE DPACK - -C...SCALAR ARGUMENTS - INTEGER - + N1,N2 - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + V1(N2),V2(N2) - INTEGER - + IFIX(N2) - -C...LOCAL SCALARS - INTEGER - + I - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DCOPY - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEXING VARIABLE. -C IFIX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF V2 ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C N1: THE NUMBER OF ITEMS IN V1. -C N2: THE NUMBER OF ITEMS IN V2. -C V1: THE VECTOR OF THE UNFIXED ITEMS FROM V2. -C V2: THE VECTOR OF THE FIXED AND UNFIXED ITEMS FROM WHICH THE -C UNFIXED ELEMENTS ARE TO BE EXTRACTED. - - -C***FIRST EXECUTABLE STATEMENT DPACK - - - N1 = 0 - IF (IFIX(1).GE.0) THEN - DO 10 I=1,N2 - IF (IFIX(I).NE.0) THEN - N1 = N1+1 - V1(N1) = V2(I) - END IF - 10 CONTINUE - ELSE - N1 = N2 - CALL DCOPY(N2,V2,1,V1,1) - END IF - - RETURN - END -*DPPNML - DOUBLE PRECISION FUNCTION DPPNML - + (P) -C***BEGIN PROLOGUE DPPNML -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 901207 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***AUTHOR FILLIBEN, JAMES J., -C STATISTICAL ENGINEERING DIVISION -C NATIONAL BUREAU OF STANDARDS -C WASHINGTON, D. C. 20234 -C (ORIGINAL VERSION--JUNE 1972. -C (UPDATED --SEPTEMBER 1975, -C NOVEMBER 1975, AND -C OCTOBER 1976. -C***PURPOSE COMPUTE THE PERCENT POINT FUNCTION VALUE FOR THE -C NORMAL (GAUSSIAN) DISTRIBUTION WITH MEAN 0 AND STANDARD -C DEVIATION 1, AND WITH PROBABILITY DENSITY FUNCTION -C F(X) = (1/SQRT(2*PI))*EXP(-X*X/2). -C (ADAPTED FROM DATAPAC SUBROUTINE TPPF, WITH MODIFICATIONS -C TO FACILITATE CONVERSION TO DOUBLE PRECISION AUTOMATICALLY) -C***DESCRIPTION -C --THE CODING AS PRESENTED BELOW IS ESSENTIALLY -C IDENTICAL TO THAT PRESENTED BY ODEH AND EVANS -C AS ALGORTIHM 70 OF APPLIED STATISTICS. -C --AS POINTED OUT BY ODEH AND EVANS IN APPLIED -C STATISTICS, THEIR ALGORITHM REPRESENTES A -C SUBSTANTIAL IMPROVEMENT OVER THE PREVIOUSLY EMPLOYED -C HASTINGS APPROXIMATION FOR THE NORMAL PERCENT POINT -C FUNCTION, WITH ACCURACY IMPROVING FROM 4.5*(10**-4) -C TO 1.5*(10**-8). -C***REFERENCES ODEH AND EVANS, THE PERCENTAGE POINTS OF THE NORMAL -C DISTRIBUTION, ALGORTIHM 70, APPLIED STATISTICS, 1974, -C PAGES 96-97. -C EVANS, ALGORITHMS FOR MINIMAL DEGREE POLYNOMIAL AND -C RATIONAL APPROXIMATION, M. SC. THESIS, 1972, -C UNIVERSITY OF VICTORIA, B. C., CANADA. -C HASTINGS, APPROXIMATIONS FOR DIGITAL COMPUTERS, 1955, -C PAGES 113, 191, 192. -C NATIONAL BUREAU OF STANDARDS APPLIED MATHEMATICS -C SERIES 55, 1964, PAGE 933, FORMULA 26.2.23. -C FILLIBEN, SIMPLE AND ROBUST LINEAR ESTIMATION OF THE -C LOCATION PARAMETER OF A SYMMETRIC DISTRIBUTION -C (UNPUBLISHED PH.D. DISSERTATION, PRINCETON -C UNIVERSITY), 1969, PAGES 21-44, 229-231. -C FILLIBEN, "THE PERCENT POINT FUNCTION", -C (UNPUBLISHED MANUSCRIPT), 1970, PAGES 28-31. -C JOHNSON AND KOTZ, CONTINUOUS UNIVARIATE DISTRIBUTIONS, -C VOLUME 1, 1970, PAGES 40-111. -C KELLEY STATISTICAL TABLES, 1948. -C OWEN, HANDBOOK OF STATISTICAL TABLES, 1962, PAGES 3-16. -C PEARSON AND HARTLEY, BIOMETRIKA TABLES FOR -C STATISTICIANS, VOLUME 1, 1954, PAGES 104-113. -C***END PROLOGUE DPPNML - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + P - -C...LOCAL SCALARS - DOUBLE PRECISION - + ADEN,ANUM,HALF,ONE,P0,P1,P2,P3,P4,Q0,Q1,Q2,Q3,Q4,R,T,TWO,ZERO - -C...INTRINSIC FUNCTIONS - INTRINSIC - + LOG,SQRT - -C...DATA STATEMENTS - DATA - + P0,P1,P2,P3,P4 - + /-0.322232431088D0,-1.0D0,-0.342242088547D0, - + -0.204231210245D-1,-0.453642210148D-4/ - DATA - + Q0,Q1,Q2,Q3,Q4 - + /0.993484626060D-1,0.588581570495D0, - + 0.531103462366D0,0.103537752850D0,0.38560700634D-2/ - DATA - + ZERO,HALF,ONE,TWO - + /0.0D0,0.5D0,1.0D0,2.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ADEN: A VALUE USED IN THE APPROXIMATION. -C ANUM: A VALUE USED IN THE APPROXIMATION. -C HALF: THE VALUE 0.5D0. -C ONE: THE VALUE 1.0D0. -C P: THE PROBABILITY AT WHICH THE PERCENT POINT IS TO BE -C EVALUATED. P MUST BE BETWEEN 0.0D0 AND 1.0D0, EXCLUSIVE. -C P0: A PARAMETER USED IN THE APPROXIMATION. -C P1: A PARAMETER USED IN THE APPROXIMATION. -C P2: A PARAMETER USED IN THE APPROXIMATION. -C P3: A PARAMETER USED IN THE APPROXIMATION. -C P4: A PARAMETER USED IN THE APPROXIMATION. -C Q0: A PARAMETER USED IN THE APPROXIMATION. -C Q1: A PARAMETER USED IN THE APPROXIMATION. -C Q2: A PARAMETER USED IN THE APPROXIMATION. -C Q3: A PARAMETER USED IN THE APPROXIMATION. -C Q4: A PARAMETER USED IN THE APPROXIMATION. -C R: THE PROBABILITY AT WHICH THE PERCENT POINT IS EVALUATED. -C T: A VALUE USED IN THE APPROXIMATION. -C TWO: THE VALUE 2.0D0. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DPPT - - - IF (P.EQ.HALF) THEN - DPPNML = ZERO - - ELSE - R = P - IF (P.GT.HALF) R = ONE - R - T = SQRT(-TWO*LOG(R)) - ANUM = ((((T*P4+P3)*T+P2)*T+P1)*T+P0) - ADEN = ((((T*Q4+Q3)*T+Q2)*T+Q1)*T+Q0) - DPPNML = T + (ANUM/ADEN) - - IF (P.LT.HALF) DPPNML = -DPPNML - END IF - - RETURN - - END -*DPPT - DOUBLE PRECISION FUNCTION DPPT - + (P, IDF) -C***BEGIN PROLOGUE DPPT -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DPPNML -C***DATE WRITTEN 901207 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***AUTHOR FILLIBEN, JAMES J., -C STATISTICAL ENGINEERING DIVISION -C NATIONAL BUREAU OF STANDARDS -C WASHINGTON, D. C. 20234 -C (ORIGINAL VERSION--OCTOBER 1975.) -C (UPDATED --NOVEMBER 1975.) -C***PURPOSE COMPUTE THE PERCENT POINT FUNCTION VALUE FOR THE -C STUDENT'S T DISTRIBUTION WITH IDF DEGREES OF FREEDOM. -C (ADAPTED FROM DATAPAC SUBROUTINE TPPF, WITH MODIFICATIONS -C TO FACILITATE CONVERSION TO DOUBLE PRECISION AUTOMATICALLY) -C***DESCRIPTION -C --FOR IDF = 1 AND IDF = 2, THE PERCENT POINT FUNCTION -C FOR THE T DISTRIBUTION EXISTS IN SIMPLE CLOSED FORM -C AND SO THE COMPUTED PERCENT POINTS ARE EXACT. -C --FOR IDF BETWEEN 3 AND 6, INCLUSIVELY, THE APPROXIMATION -C IS AUGMENTED BY 3 ITERATIONS OF NEWTON'S METHOD TO -C IMPROVE THE ACCURACY, ESPECIALLY FOR P NEAR 0 OR 1. -C***REFERENCES NATIONAL BUREAU OF STANDARDS APPLIED MATHMATICS -C SERIES 55, 1964, PAGE 949, FORMULA 26.7.5. -C JOHNSON AND KOTZ, CONTINUOUS UNIVARIATE DISTRIBUTIONS, -C VOLUME 2, 1970, PAGE 102, FORMULA 11. -C FEDERIGHI, "EXTENDED TABLES OF THE PERCENTAGE POINTS -C OF STUDENT"S T DISTRIBUTION, JOURNAL OF THE AMERICAN -C STATISTICAL ASSOCIATION, 1969, PAGES 683-688. -C HASTINGS AND PEACOCK, STATISTICAL DISTRIBUTIONS, A -C HANDBOOK FOR STUDENTS AND PRACTITIONERS, 1975, -C PAGES 120-123. -C***END PROLOGUE DPPT - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + P - INTEGER - + IDF - -C...LOCAL SCALARS - DOUBLE PRECISION - + ARG,B21,B31,B32,B33,B34,B41,B42,B43,B44,B45, - + B51,B52,B53,B54,B55,B56,C,CON,D1,D3,D5,D7,D9,DF,EIGHT,FIFTN, - + HALF,ONE,PI,PPFN,S,TERM1,TERM2,TERM3,TERM4,TERM5,THREE,TWO, - + Z,ZERO - INTEGER - + IPASS,MAXIT - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DPPNML - EXTERNAL - + DPPNML - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ATAN,COS,SIN,SQRT - -C...DATA STATEMENTS - DATA - + B21 - + /4.0D0/ - DATA - + B31, B32, B33, B34 - + /96.0D0,5.0D0,16.0D0,3.0D0/ - DATA - + B41, B42, B43, B44, B45 - + /384.0D0,3.0D0,19.0D0,17.0D0,-15.0D0/ - DATA - + B51,B52,B53,B54,B55,B56 - + /9216.0D0,79.0D0,776.0D0,1482.0D0,-1920.0D0,-945.0D0/ - DATA - + ZERO,HALF,ONE,TWO,THREE,EIGHT,FIFTN - + /0.0D0,0.5D0,1.0D0,2.0D0,3.0D0,8.0D0,15.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ARG: A VALUE USED IN THE APPROXIMATION. -C B21: A PARAMETER USED IN THE APPROXIMATION. -C B31: A PARAMETER USED IN THE APPROXIMATION. -C B32: A PARAMETER USED IN THE APPROXIMATION. -C B33: A PARAMETER USED IN THE APPROXIMATION. -C B34: A PARAMETER USED IN THE APPROXIMATION. -C B41: A PARAMETER USED IN THE APPROXIMATION. -C B42: A PARAMETER USED IN THE APPROXIMATION. -C B43: A PARAMETER USED IN THE APPROXIMATION. -C B44: A PARAMETER USED IN THE APPROXIMATION. -C B45: A PARAMETER USED IN THE APPROXIMATION. -C B51: A PARAMETER USED IN THE APPROXIMATION. -C B52: A PARAMETER USED IN THE APPROXIMATION. -C B53: A PARAMETER USED IN THE APPROXIMATION. -C B54: A PARAMETER USED IN THE APPROXIMATION. -C B55: A PARAMETER USED IN THE APPROXIMATION. -C B56: A PARAMETER USED IN THE APPROXIMATION. -C C: A VALUE USED IN THE APPROXIMATION. -C CON: A VALUE USED IN THE APPROXIMATION. -C DF: THE DEGREES OF FREEDOM. -C D1: A VALUE USED IN THE APPROXIMATION. -C D3: A VALUE USED IN THE APPROXIMATION. -C D5: A VALUE USED IN THE APPROXIMATION. -C D7: A VALUE USED IN THE APPROXIMATION. -C D9: A VALUE USED IN THE APPROXIMATION. -C EIGHT: THE VALUE 8.0D0. -C FIFTN: THE VALUE 15.0D0. -C HALF: THE VALUE 0.5D0. -C IDF: THE (POSITIVE INTEGER) DEGREES OF FREEDOM. -C IPASS: A VALUE USED IN THE APPROXIMATION. -C MAXIT: THE MAXIMUM NUMBER OF ITERATIONS ALLOWED FOR THE APPROX. -C ONE: THE VALUE 1.0D0. -C P: THE PROBABILITY AT WHICH THE PERCENT POINT IS TO BE -C EVALUATED. P MUST LIE BETWEEN 0.0DO AND 1.0D0, EXCLUSIVE. -C PI: THE VALUE OF PI. -C PPFN: THE NORMAL PERCENT POINT VALUE. -C S: A VALUE USED IN THE APPROXIMATION. -C TERM1: A VALUE USED IN THE APPROXIMATION. -C TERM2: A VALUE USED IN THE APPROXIMATION. -C TERM3: A VALUE USED IN THE APPROXIMATION. -C TERM4: A VALUE USED IN THE APPROXIMATION. -C TERM5: A VALUE USED IN THE APPROXIMATION. -C THREE: THE VALUE 3.0D0. -C TWO: THE VALUE 2.0D0. -C Z: A VALUE USED IN THE APPROXIMATION. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DPPT - - - PI = 3.141592653589793238462643383279D0 - DF = IDF - MAXIT = 5 - - IF (IDF.LE.0) THEN - -C TREAT THE IDF < 1 CASE - DPPT = ZERO - - ELSE IF (IDF.EQ.1) THEN - -C TREAT THE IDF = 1 (CAUCHY) CASE - ARG = PI*P - DPPT = -COS(ARG)/SIN(ARG) - - ELSE IF (IDF.EQ.2) THEN - -C TREAT THE IDF = 2 CASE - TERM1 = SQRT(TWO)/TWO - TERM2 = TWO*P - ONE - TERM3 = SQRT(P*(ONE-P)) - DPPT = TERM1*TERM2/TERM3 - - ELSE IF (IDF.GE.3) THEN - -C TREAT THE IDF GREATER THAN OR EQUAL TO 3 CASE - PPFN = DPPNML(P) - D1 = PPFN - D3 = PPFN**3 - D5 = PPFN**5 - D7 = PPFN**7 - D9 = PPFN**9 - TERM1 = D1 - TERM2 = (ONE/B21)*(D3+D1)/DF - TERM3 = (ONE/B31)*(B32*D5+B33*D3+B34*D1)/(DF**2) - TERM4 = (ONE/B41)*(B42*D7+B43*D5+B44*D3+B45*D1)/(DF**3) - TERM5 = (ONE/B51)*(B52*D9+B53*D7+B54*D5+B55*D3+B56*D1)/(DF**4) - DPPT = TERM1 + TERM2 + TERM3 + TERM4 + TERM5 - - IF (IDF.EQ.3) THEN - -C AUGMENT THE RESULTS FOR THE IDF = 3 CASE - CON = PI*(P-HALF) - ARG = DPPT/SQRT(DF) - Z = ATAN(ARG) - DO 70 IPASS=1,MAXIT - S = SIN(Z) - C = COS(Z) - Z = Z - (Z+S*C-CON)/(TWO*C**2) - 70 CONTINUE - DPPT = SQRT(DF)*S/C - - ELSE IF (IDF.EQ.4) THEN - -C AUGMENT THE RESULTS FOR THE IDF = 4 CASE - CON = TWO*(P-HALF) - ARG = DPPT/SQRT(DF) - Z = ATAN(ARG) - DO 90 IPASS=1,MAXIT - S = SIN(Z) - C = COS(Z) - Z = Z - ((ONE+HALF*C**2)*S-CON)/((ONE+HALF)*C**3) - 90 CONTINUE - DPPT = SQRT(DF)*S/C - - ELSE IF (IDF.EQ.5) THEN - -C AUGMENT THE RESULTS FOR THE IDF = 5 CASE - - CON = PI*(P-HALF) - ARG = DPPT/SQRT(DF) - Z = ATAN(ARG) - DO 110 IPASS=1,MAXIT - S = SIN(Z) - C = COS(Z) - Z = Z - (Z+(C+(TWO/THREE)*C**3)*S-CON)/ - + ((EIGHT/THREE)*C**4) - 110 CONTINUE - DPPT = SQRT(DF)*S/C - - ELSE IF (IDF.EQ.6) THEN - -C AUGMENT THE RESULTS FOR THE IDF = 6 CASE - CON = TWO*(P-HALF) - ARG = DPPT/SQRT(DF) - Z = ATAN(ARG) - DO 130 IPASS=1,MAXIT - S = SIN(Z) - C = COS(Z) - Z = Z - ((ONE+HALF*C**2 + (THREE/EIGHT)*C**4)*S-CON)/ - + ((FIFTN/EIGHT)*C**5) - 130 CONTINUE - DPPT = SQRT(DF)*S/C - END IF - END IF - - RETURN - - END -*DPVB - SUBROUTINE DPVB - + (FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STP, - + ISTOP,NFEV,PVB, - + WRK1,WRK2,WRK6) -C***BEGIN PROLOGUE DPVB -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE COMPUTE THE NROW-TH FUNCTION VALUE USING BETA(J) + STP -C***END PROLOGUE DPVB - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + PVB,STP - INTEGER - + ISTOP,J,LDIFX,LQ,M,N,NFEV,NP,NQ,NROW - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),WRK1(N,M,NQ),WRK2(N,NQ),WRK6(N,NP,NQ),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + BETAJ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER-SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C BETAJ: THE CURRENT ESTIMATE OF THE JTH PARAMETER. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C J: THE INDEX OF THE PARTIAL DERIVATIVE BEING EXAMINED. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LQ: THE RESPONSE CURRENTLY BEING EXAMINED. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE INDEPENDENT VARIABLE ARRAY AT -C WHICH THE DERIVATIVE IS TO BE CHECKED. -C PVB: THE FUNCTION VALUE FOR THE SELECTED OBSERVATION & RESPONSE. -C STP: THE STEP SIZE FOR THE FINITE DIFFERENCE DERIVATIVE. -C XPLUSD: THE VALUES OF X + DELTA. - - -C***FIRST EXECUTABLE STATEMENT DPVB - - -C COMPUTE PREDICTED VALUES - - BETAJ = BETA(J) - BETA(J) = BETA(J) + STP - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 003,WRK2,WRK6,WRK1, - + ISTOP) - IF (ISTOP.EQ.0) THEN - NFEV = NFEV + 1 - ELSE - RETURN - END IF - BETA(J) = BETAJ - - PVB = WRK2(NROW,LQ) - - RETURN - END -*DPVD - SUBROUTINE DPVD - + (FCN, - + N,M,NP,NQ, - + BETA,XPLUSD,IFIXB,IFIXX,LDIFX, - + NROW,J,LQ,STP, - + ISTOP,NFEV,PVD, - + WRK1,WRK2,WRK6) -C***BEGIN PROLOGUE DPVD -C***REFER TO DODR,DODRC -C***ROUTINES CALLED FCN -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE COMPUTE NROW-TH FUNCTION VALUE USING -C X(NROW,J) + DELTA(NROW,J) + STP -C***END PROLOGUE DPVD - -C...SCALAR ARGUMENTS - DOUBLE PRECISION - + PVD,STP - INTEGER - + ISTOP,J,LDIFX,LQ,M,N,NFEV,NP,NQ,NROW - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),WRK1(N,M,NQ),WRK2(N,NQ),WRK6(N,NP,NQ),XPLUSD(N,M) - INTEGER - + IFIXB(NP),IFIXX(LDIFX,M) - -C...SUBROUTINE ARGUMENTS - EXTERNAL - + FCN - -C...LOCAL SCALARS - DOUBLE PRECISION - + XPDJ - -C...ROUTINE NAMES USED AS SUBPROGRAM ARGUMENTS -C FCN: THE USER-SUPPLIED SUBROUTINE FOR EVALUATING THE MODEL. - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C IFIXB: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF BETA ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C IFIXX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF X ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ISTOP: THE VARIABLE DESIGNATING WHETHER THERE ARE PROBLEMS -C COMPUTING THE FUNCTION AT THE CURRENT BETA AND DELTA. -C J: THE INDEX OF THE PARTIAL DERIVATIVE BEING EXAMINED. -C LDIFX: THE LEADING DIMENSION OF ARRAY IFIXX. -C LQ: THE RESPONSE CURRENTLY BEING EXAMINED. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NFEV: THE NUMBER OF FUNCTION EVALUATIONS. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C NROW: THE ROW NUMBER OF THE INDEPENDENT VARIABLE ARRAY AT -C WHICH THE DERIVATIVE IS TO BE CHECKED. -C PVD: THE FUNCTION VALUE FOR THE SELECTED OBSERVATION & RESPONSE. -C STP: THE STEP SIZE FOR THE FINITE DIFFERENCE DERIVATIVE. -C XPDJ: THE (NROW,J)TH ELEMENT OF XPLUSD. -C XPLUSD: THE VALUES OF X + DELTA. - - -C***FIRST EXECUTABLE STATEMENT DPVD - - -C COMPUTE PREDICTED VALUES - - XPDJ = XPLUSD(NROW,J) - XPLUSD(NROW,J) = XPLUSD(NROW,J) + STP - ISTOP = 0 - CALL FCN(N,M,NP,NQ, - + N,M,NP, - + BETA,XPLUSD, - + IFIXB,IFIXX,LDIFX, - + 003,WRK2,WRK6,WRK1, - + ISTOP) - IF (ISTOP.EQ.0) THEN - NFEV = NFEV + 1 - ELSE - RETURN - END IF - XPLUSD(NROW,J) = XPDJ - - PVD = WRK2(NROW,LQ) - - RETURN - END -*DSCALE - SUBROUTINE DSCALE - + (N,M,SCL,LDSCL,T,LDT,SCLT,LDSCLT) -C***BEGIN PROLOGUE DSCALE -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SCALE T BY THE INVERSE OF SCL, I.E., COMPUTE T/SCL -C***END PROLOGUE DSCALE - -C...SCALAR ARGUMENTS - INTEGER - + LDT,LDSCL,LDSCLT,M,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + T(LDT,M),SCL(LDSCL,M),SCLT(LDSCLT,M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ONE,TEMP,ZERO - INTEGER - + I,J - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS - -C...DATA STATEMENTS - DATA - + ONE,ZERO - + /1.0D0,0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEXING VARIABLE. -C J: AN INDEXING VARIABLE. -C LDSCL: THE LEADING DIMENSION OF ARRAY SCL. -C LDSCLT: THE LEADING DIMENSION OF ARRAY SCLT. -C LDT: THE LEADING DIMENSION OF ARRAY T. -C M: THE NUMBER OF COLUMNS OF DATA IN T. -C N: THE NUMBER OF ROWS OF DATA IN T. -C ONE: THE VALUE 1.0D0. -C SCL: THE SCALE VALUES. -C SCLT: THE INVERSELY SCALED MATRIX. -C T: THE ARRAY TO BE INVERSELY SCALED BY SCL. -C TEMP: A TEMPORARY SCALAR. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DSCALE - - - IF (N.EQ.0 .OR. M.EQ.0) RETURN - - IF (SCL(1,1).GE.ZERO) THEN - IF (LDSCL.GE.N) THEN - DO 80 J=1,M - DO 70 I=1,N - SCLT(I,J) = T(I,J)/SCL(I,J) - 70 CONTINUE - 80 CONTINUE - ELSE - DO 100 J=1,M - TEMP = ONE/SCL(1,J) - DO 90 I=1,N - SCLT(I,J) = T(I,J)*TEMP - 90 CONTINUE - 100 CONTINUE - END IF - ELSE - TEMP = ONE/ABS(SCL(1,1)) - DO 120 J=1,M - DO 110 I=1,N - SCLT(I,J) = T(I,J)*TEMP - 110 CONTINUE - 120 CONTINUE - END IF - - RETURN - END -*DSCLB - SUBROUTINE DSCLB - + (NP,BETA,SSF) -C***BEGIN PROLOGUE DSCLB -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SELECT SCALING VALUES FOR BETA ACCORDING TO THE -C ALGORITHM GIVEN IN THE ODRPACK REFERENCE GUIDE -C***END PROLOGUE DSCLB - -C...SCALAR ARGUMENTS - INTEGER - + NP - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + BETA(NP),SSF(NP) - -C...LOCAL SCALARS - DOUBLE PRECISION - + BMAX,BMIN,ONE,TEN,ZERO - INTEGER - + K - LOGICAL - + BIGDIF - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,LOG10,MAX,MIN,SQRT - -C...DATA STATEMENTS - DATA - + ZERO,ONE,TEN - + /0.0D0,1.0D0,10.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BETA: THE FUNCTION PARAMETERS. -C BIGDIF: THE VARIABLE DESIGNATING WHETHER THERE IS A SIGNIFICANT -C DIFFERENCE IN THE MAGNITUDES OF THE NONZERO ELEMENTS OF -C BETA (BIGDIF=.TRUE.) OR NOT (BIGDIF=.FALSE.). -C BMAX: THE LARGEST NONZERO MAGNITUDE. -C BMIN: THE SMALLEST NONZERO MAGNITUDE. -C K: AN INDEXING VARIABLE. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C ONE: THE VALUE 1.0D0. -C SSF: THE SCALING VALUES FOR BETA. -C TEN: THE VALUE 10.0D0. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DSCLB - - - BMAX = ABS(BETA(1)) - DO 10 K=2,NP - BMAX = MAX(BMAX,ABS(BETA(K))) - 10 CONTINUE - - IF (BMAX.EQ.ZERO) THEN - -C ALL INPUT VALUES OF BETA ARE ZERO - - DO 20 K=1,NP - SSF(K) = ONE - 20 CONTINUE - - ELSE - -C SOME OF THE INPUT VALUES ARE NONZERO - - BMIN = BMAX - DO 30 K=1,NP - IF (BETA(K).NE.ZERO) THEN - BMIN = MIN(BMIN,ABS(BETA(K))) - END IF - 30 CONTINUE - BIGDIF = LOG10(BMAX)-LOG10(BMIN).GE.ONE - DO 40 K=1,NP - IF (BETA(K).EQ.ZERO) THEN - SSF(K) = TEN/BMIN - ELSE - IF (BIGDIF) THEN - SSF(K) = ONE/ABS(BETA(K)) - ELSE - SSF(K) = ONE/BMAX - END IF - END IF - 40 CONTINUE - - END IF - - RETURN - END -*DSCLD - SUBROUTINE DSCLD - + (N,M,X,LDX,TT,LDTT) -C***BEGIN PROLOGUE DSCLD -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SELECT SCALING VALUES FOR DELTA ACCORDING TO THE -C ALGORITHM GIVEN IN THE ODRPACK REFERENCE GUIDE -C***END PROLOGUE DSCLD - -C...SCALAR ARGUMENTS - INTEGER - + LDTT,LDX,M,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + TT(LDTT,M),X(LDX,M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ONE,TEN,XMAX,XMIN,ZERO - INTEGER - + I,J - LOGICAL - + BIGDIF - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS,LOG10,MAX,MIN - -C...DATA STATEMENTS - DATA - + ZERO,ONE,TEN - + /0.0D0,1.0D0,10.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C BIGDIF: THE VARIABLE DESIGNATING WHETHER THERE IS A SIGNIFICANT -C DIFFERENCE IN THE MAGNITUDES OF THE NONZERO ELEMENTS OF -C X (BIGDIF=.TRUE.) OR NOT (BIGDIF=.FALSE.). -C I: AN INDEXING VARIABLE. -C J: AN INDEXING VARIABLE. -C LDTT: THE LEADING DIMENSION OF ARRAY TT. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C ONE: THE VALUE 1.0D0. -C TT: THE SCALING VALUES FOR DELTA. -C X: THE INDEPENDENT VARIABLE. -C XMAX: THE LARGEST NONZERO MAGNITUDE. -C XMIN: THE SMALLEST NONZERO MAGNITUDE. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DSCLD - - - DO 50 J=1,M - XMAX = ABS(X(1,J)) - DO 10 I=2,N - XMAX = MAX(XMAX,ABS(X(I,J))) - 10 CONTINUE - - IF (XMAX.EQ.ZERO) THEN - -C ALL INPUT VALUES OF X(I,J), I=1,...,N, ARE ZERO - - DO 20 I=1,N - TT(I,J) = ONE - 20 CONTINUE - - ELSE - -C SOME OF THE INPUT VALUES ARE NONZERO - - XMIN = XMAX - DO 30 I=1,N - IF (X(I,J).NE.ZERO) THEN - XMIN = MIN(XMIN,ABS(X(I,J))) - END IF - 30 CONTINUE - BIGDIF = LOG10(XMAX)-LOG10(XMIN).GE.ONE - DO 40 I=1,N - IF (X(I,J).NE.ZERO) THEN - IF (BIGDIF) THEN - TT(I,J) = ONE/ABS(X(I,J)) - ELSE - TT(I,J) = ONE/XMAX - END IF - ELSE - TT(I,J) = TEN/XMIN - END IF - 40 CONTINUE - END IF - 50 CONTINUE - - RETURN - END -*DSETN - SUBROUTINE DSETN - + (N,M,X,LDX,NROW) -C***BEGIN PROLOGUE DSETN -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SELECT THE ROW AT WHICH THE DERIVATIVE WILL BE CHECKED -C***END PROLOGUE DSETN - -C...SCALAR ARGUMENTS - INTEGER - + LDX,M,N,NROW - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + X(LDX,M) - -C...LOCAL SCALARS - INTEGER - + I,J - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEX VARIABLE. -C J: AN INDEX VARIABLE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NROW: THE SELECTED ROW NUMBER OF THE INDEPENDENT VARIABLE. -C X: THE INDEPENDENT VARIABLE. - - -C***FIRST EXECUTABLE STATEMENT DSETN - - - IF ((NROW.GE.1) .AND. (NROW.LE.N)) RETURN - -C SELECT FIRST ROW OF INDEPENDENT VARIABLES WHICH CONTAINS NO ZEROS -C IF THERE IS ONE, OTHERWISE FIRST ROW IS USED. - - DO 20 I = 1, N - DO 10 J = 1, M - IF (X(I,J).EQ.0.0) GO TO 20 - 10 CONTINUE - NROW = I - RETURN - 20 CONTINUE - - NROW = 1 - - RETURN - END -*DSOLVE - SUBROUTINE DSOLVE(N,T,LDT,B,LDB,JOB) -C***BEGIN PROLOGUE DSOLVE -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DAXPY,DDOT -C***DATE WRITTEN 920220 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE SOLVE SYSTEMS OF THE FORM -C T * X = B OR TRANS(T) * X = B -C WHERE T IS AN UPPER OR LOWER TRIANGULAR MATRIX OF ORDER N, -C AND THE SOLUTION X OVERWRITES THE RHS B. -C (ADAPTED FROM LINPACK SUBROUTINE DTRSL) -C***REFERENCES DONGARRA J.J., BUNCH J.R., MOLER C.B., STEWART G.W., -C *LINPACK USERS GUIDE*, SIAM, 1979. -C***END PROLOGUE DSOLVE - -C...SCALAR ARGUMENTS - INTEGER - + JOB,LDB,LDT,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + B(LDB,N),T(LDT,N) - -C...LOCAL SCALARS - DOUBLE PRECISION - + TEMP,ZERO - INTEGER - + J1,J,JN - -C...EXTERNAL FUNCTIONS - DOUBLE PRECISION - + DDOT - EXTERNAL - + DDOT - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DAXPY - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C B: ON INPUT: THE RIGHT HAND SIDE; ON EXIT: THE SOLUTION -C J1: THE FIRST NONZERO ENTRY IN T. -C J: AN INDEXING VARIABLE. -C JN: THE LAST NONZERO ENTRY IN T. -C JOB: WHAT KIND OF SYSTEM IS TO BE SOLVED, WHERE IF JOB IS -C 1 SOLVE T*X=B, T LOWER TRIANGULAR, -C 2 SOLVE T*X=B, T UPPER TRIANGULAR, -C 3 SOLVE TRANS(T)*X=B, T LOWER TRIANGULAR, -C 4 SOLVE TRANS(T)*X=B, T UPPER TRIANGULAR. -C LDB: THE LEADING DIMENSION OF ARRAY B. -C LDT: THE LEADING DIMENSION OF ARRAY T. -C N: THE NUMBER OF ROWS AND COLUMNS OF DATA IN ARRAY T. -C T: THE UPPER OR LOWER TRIDIAGONAL SYSTEM. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DSOLVE - - -C FIND FIRST NONZERO DIAGONAL ENTRY IN T - J1 = 0 - DO 10 J=1,N - IF (J1.EQ.0 .AND. T(J,J).NE.ZERO) THEN - J1 = J - ELSE IF (T(J,J).EQ.ZERO) THEN - B(1,J) = ZERO - END IF - 10 CONTINUE - IF (J1.EQ.0) RETURN - -C FIND LAST NONZERO DIAGONAL ENTRY IN T - JN = 0 - DO 20 J=N,J1,-1 - IF (JN.EQ.0 .AND. T(J,J).NE.ZERO) THEN - JN = J - ELSE IF (T(J,J).EQ.ZERO) THEN - B(1,J) = ZERO - END IF - 20 CONTINUE - - IF (JOB.EQ.1) THEN - -C SOLVE T*X=B FOR T LOWER TRIANGULAR - B(1,J1) = B(1,J1)/T(J1,J1) - DO 30 J = J1+1, JN - TEMP = -B(1,J-1) - CALL DAXPY(JN-J+1,TEMP,T(J,J-1),1,B(1,J),LDB) - IF (T(J,J).NE.ZERO) THEN - B(1,J) = B(1,J)/T(J,J) - ELSE - B(1,J) = ZERO - END IF - 30 CONTINUE - - ELSE IF (JOB.EQ.2) THEN - -C SOLVE T*X=B FOR T UPPER TRIANGULAR. - B(1,JN) = B(1,JN)/T(JN,JN) - DO 40 J = JN-1,J1,-1 - TEMP = -B(1,J+1) - CALL DAXPY(J,TEMP,T(1,J+1),1,B(1,1),LDB) - IF (T(J,J).NE.ZERO) THEN - B(1,J) = B(1,J)/T(J,J) - ELSE - B(1,J) = ZERO - END IF - 40 CONTINUE - - ELSE IF (JOB.EQ.3) THEN - -C SOLVE TRANS(T)*X=B FOR T LOWER TRIANGULAR. - B(1,JN) = B(1,JN)/T(JN,JN) - DO 50 J = JN-1,J1,-1 - B(1,J) = B(1,J) - DDOT(JN-J+1,T(J+1,J),1,B(1,J+1),LDB) - IF (T(J,J).NE.ZERO) THEN - B(1,J) = B(1,J)/T(J,J) - ELSE - B(1,J) = ZERO - END IF - 50 CONTINUE - - ELSE IF (JOB.EQ.4) THEN - -C SOLVE TRANS(T)*X=B FOR T UPPER TRIANGULAR. - B(1,J1) = B(1,J1)/T(J1,J1) - DO 60 J = J1+1,JN - B(1,J) = B(1,J) - DDOT(J-1,T(1,J),1,B(1,1),LDB) - IF (T(J,J).NE.ZERO) THEN - B(1,J) = B(1,J)/T(J,J) - ELSE - B(1,J) = ZERO - END IF - 60 CONTINUE - END IF - - RETURN - END -*DUNPAC - SUBROUTINE DUNPAC - + (N2,V1,V2,IFIX) -C***BEGIN PROLOGUE DUNPAC -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DCOPY -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE COPY THE ELEMENTS OF V1 INTO THE LOCATIONS OF V2 WHICH ARE -C UNFIXED -C***END PROLOGUE DUNPAC - -C...SCALAR ARGUMENTS - INTEGER - + N2 - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + V1(N2),V2(N2) - INTEGER - + IFIX(N2) - -C...LOCAL SCALARS - INTEGER - + I,N1 - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DCOPY - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEXING VARIABLE. -C IFIX: THE VALUES DESIGNATING WHETHER THE ELEMENTS OF V2 ARE -C FIXED AT THEIR INPUT VALUES OR NOT. -C ODRPACK REFERENCE GUIDE.) -C N1: THE NUMBER OF ITEMS IN V1. -C N2: THE NUMBER OF ITEMS IN V2. -C V1: THE VECTOR OF THE UNFIXED ITEMS. -C V2: THE VECTOR OF THE FIXED AND UNFIXED ITEMS INTO WHICH THE -C ELEMENTS OF V1 ARE TO BE INSERTED. - - -C***FIRST EXECUTABLE STATEMENT DUNPAC - - - N1 = 0 - IF (IFIX(1).GE.0) THEN - DO 10 I = 1,N2 - IF (IFIX(I).NE.0) THEN - N1 = N1 + 1 - V2(I) = V1(N1) - END IF - 10 CONTINUE - ELSE - N1 = N2 - CALL DCOPY(N2,V1,1,V2,1) - END IF - - RETURN - END -*DVEVTR - SUBROUTINE DVEVTR - + (M,NQ,INDX, - + V,LDV,LD2V, E,LDE, VE,LDVE,LD2VE, VEV,LDVEV, - + WRK5) -C***BEGIN PROLOGUE DVEVTR -C***REFER TO DODR,DODRC -C***ROUTINES CALLED DSOLVE -C***DATE WRITTEN 910613 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE COMPUTE V*E*TRANS(V) FOR THE (INDX)TH M BY NQ ARRAY IN V -C***END PROLOGUE DVEVTR - -C...SCALAR ARGUMENTS - INTEGER - + INDX,LDE,LDV,LDVE,LDVEV,LD2V,LD2VE,M,NQ - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + E(LDE,M),V(LDV,LD2V,NQ),VE(LDVE,LD2VE,M),VEV(LDVEV,NQ),WRK5(M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ZERO - INTEGER - + J,L1,L2 - -C...EXTERNAL SUBROUTINES - EXTERNAL - + DSOLVE - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C INDX: THE ROW IN V IN WHICH THE M BY NQ ARRAY IS STORED. -C J: AN INDEXING VARIABLE. -C LDE: THE LEADING DIMENSION OF ARRAY E. -C LDV: THE LEADING DIMENSION OF ARRAY V. -C LDVE: THE LEADING DIMENSION OF ARRAY VE. -C LDVEV: THE LEADING DIMENSION OF ARRAY VEV. -C LD2V: THE SECOND DIMENSION OF ARRAY V. -C L1: AN INDEXING VARIABLE. -C L2: AN INDEXING VARIABLE. -C M: THE NUMBER OF COLUMNS OF DATA IN THE INDEPENDENT VARIABLE. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C E: THE M BY M MATRIX OF THE FACTORS SO ETE = (D**2 + ALPHA*T**2). -C V: AN ARRAY OF NQ BY M MATRICES. -C VE: THE NQ BY M ARRAY VE = V * INV(E) -C VEV: THE NQ BY NQ ARRAY VEV = V * INV(ETE) * TRANS(V). -C WRK5: AN M WORK VECTOR. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DVEVTR - - - IF (NQ.EQ.0 .OR. M.EQ.0) RETURN - - DO 140 L1 = 1,NQ - DO 110 J = 1,M - WRK5(J) = V(INDX,J,L1) - 110 CONTINUE - CALL DSOLVE(M,E,LDE,WRK5,1,4) - DO 120 J = 1,M - VE(INDX,L1,J) = WRK5(J) - 120 CONTINUE - 140 CONTINUE - - DO 230 L1 = 1,NQ - DO 220 L2 = 1,L1 - VEV(L1,L2) = ZERO - DO 210 J = 1,M - VEV(L1,L2) = VEV(L1,L2) + VE(INDX,L1,J)*VE(INDX,L2,J) - 210 CONTINUE - VEV(L2,L1) = VEV(L1,L2) - 220 CONTINUE - 230 CONTINUE - - RETURN - END -*DWGHT - SUBROUTINE DWGHT - + (N,M,WT,LDWT,LD2WT,T,LDT,WTT,LDWTT) -C***BEGIN PROLOGUE DWGHT -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SCALE MATRIX T USING WT, I.E., COMPUTE WTT = WT*T -C***END PROLOGUE DWGHT - -C...SCALAR ARGUMENTS - INTEGER - + LDT,LDWT,LDWTT,LD2WT,M,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + T(LDT,M),WT(LDWT,LD2WT,M),WTT(LDWTT,M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + TEMP,ZERO - INTEGER - + I,J,K - -C...INTRINSIC FUNCTIONS - INTRINSIC - + ABS - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEXING VARIABLE. -C J: AN INDEXING VARIABLE. -C K: AN INDEXING VARIABLE. -C LDT: THE LEADING DIMENSION OF ARRAY T. -C LDWT: THE LEADING DIMENSION OF ARRAY WT. -C LDWTT: THE LEADING DIMENSION OF ARRAY WTT. -C LD2WT: THE SECOND DIMENSION OF ARRAY WT. -C M: THE NUMBER OF COLUMNS OF DATA IN T. -C N: THE NUMBER OF ROWS OF DATA IN T. -C T: THE ARRAY BEING SCALED BY WT. -C TEMP: A TEMPORARY SCALAR. -C WT: THE WEIGHTS. -C WTT: THE RESULTS OF WEIGHTING ARRAY T BY WT. -C ARRAY WTT CAN BE THE SAME AS T ONLY IF THE ARRAYS IN WT -C ARE UPPER TRIANGULAR WITH ZEROS BELOW THE DIAGONAL. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DWGHT - - - IF (N.EQ.0 .OR. M.EQ.0) RETURN - - IF (WT(1,1,1).GE.ZERO) THEN - IF (LDWT.GE.N) THEN - IF (LD2WT.GE.M) THEN -C WT IS AN N-ARRAY OF M BY M MATRICES - DO 130 I=1,N - DO 120 J=1,M - TEMP = ZERO - DO 110 K=1,M - TEMP = TEMP + WT(I,J,K)*T(I,K) - 110 CONTINUE - WTT(I,J) = TEMP - 120 CONTINUE - 130 CONTINUE - ELSE -C WT IS AN N-ARRAY OF DIAGONAL MATRICES - DO 230 I=1,N - DO 220 J=1,M - WTT(I,J) = WT(I,1,J)*T(I,J) - 220 CONTINUE - 230 CONTINUE - END IF - ELSE - IF (LD2WT.GE.M) THEN -C WT IS AN M BY M MATRIX - DO 330 I=1,N - DO 320 J=1,M - TEMP = ZERO - DO 310 K=1,M - TEMP = TEMP + WT(1,J,K)*T(I,K) - 310 CONTINUE - WTT(I,J) = TEMP - 320 CONTINUE - 330 CONTINUE - ELSE -C WT IS A DIAGONAL MATRICE - DO 430 I=1,N - DO 420 J=1,M - WTT(I,J) = WT(1,1,J)*T(I,J) - 420 CONTINUE - 430 CONTINUE - END IF - END IF - ELSE -C WT IS A SCALAR - DO 520 J=1,M - DO 510 I=1,N - WTT(I,J) = ABS(WT(1,1,1))*T(I,J) - 510 CONTINUE - 520 CONTINUE - END IF - - RETURN - END -*DWINF - SUBROUTINE DWINF - + (N,M,NP,NQ,LDWE,LD2WE,ISODR, - + DELTAI,EPSI,XPLUSI,FNI,SDI,VCVI, - + RVARI,WSSI,WSSDEI,WSSEPI,RCONDI,ETAI, - + OLMAVI,TAUI,ALPHAI,ACTRSI,PNORMI,RNORSI,PRERSI, - + PARTLI,SSTOLI,TAUFCI,EPSMAI, - + BETA0I,BETACI,BETASI,BETANI,SI,SSI,SSFI,QRAUXI,UI, - + FSI,FJACBI,WE1I,DIFFI, - + DELTSI,DELTNI,TI,TTI,OMEGAI,FJACDI, - + WRK1I,WRK2I,WRK3I,WRK4I,WRK5I,WRK6I,WRK7I, - + LWKMN) -C***BEGIN PROLOGUE DWINF -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920619 (YYMMDD) -C***PURPOSE SET STORAGE LOCATIONS WITHIN DOUBLE PRECISION WORK SPACE -C***END PROLOGUE DWINF - -C...SCALAR ARGUMENTS - INTEGER - + ACTRSI,ALPHAI,BETACI,BETANI,BETASI,BETA0I,DELTAI,DELTNI,DELTSI, - + DIFFI,EPSI,EPSMAI,ETAI,FJACBI,FJACDI,FNI,FSI,LDWE,LD2WE,LWKMN, - + M,N,NP,NQ,OLMAVI,OMEGAI,PARTLI,PNORMI,PRERSI,QRAUXI,RCONDI, - + RNORSI,RVARI,SDI,SI,SSFI,SSI,SSTOLI,TAUFCI,TAUI,TI,TTI,UI,VCVI, - + WE1I,WRK1I,WRK2I,WRK3I,WRK4I,WRK5I,WRK6I,WRK7I, - + WSSI,WSSDEI,WSSEPI,XPLUSI - LOGICAL - + ISODR - -C...LOCAL SCALARS - INTEGER - + NEXT - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C ACTRSI: THE LOCATION IN ARRAY WORK OF VARIABLE ACTRS. -C ALPHAI: THE LOCATION IN ARRAY WORK OF VARIABLE ALPHA. -C BETACI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAC. -C BETANI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAN. -C BETASI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETAS. -C BETA0I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY BETA0. -C DELTAI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTA. -C DELTNI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTAN. -C DELTSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DELTAS. -C DIFFI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY DIFF. -C EPSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY EPS. -C EPSMAI: THE LOCATION IN ARRAY WORK OF VARIABLE EPSMAC. -C ETAI: THE LOCATION IN ARRAY WORK OF VARIABLE ETA. -C FJACBI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FJACB. -C FJACDI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FJACD. -C FNI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FN. -C FSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY FS. -C ISODR: THE VARIABLE DESIGNATING WHETHER THE SOLUTION IS BY ODR -C (ISODR=TRUE) OR BY OLS (ISODR=FALSE). -C LDWE: THE LEADING DIMENSION OF ARRAY WE. -C LD2WE: THE SECOND DIMENSION OF ARRAY WE. -C LWKMN: THE MINIMUM ACCEPTABLE LENGTH OF VECTOR WORK. -C M: THE NUMBER OF COLUMNS OF DATA IN THE EXPLANATORY VARIABLE. -C N: THE NUMBER OF OBSERVATIONS. -C NEXT: THE NEXT AVAILABLE LOCATION WITH WORK. -C NP: THE NUMBER OF FUNCTION PARAMETERS. -C NQ: THE NUMBER OF RESPONSES PER OBSERVATION. -C OLMAVI: THE LOCATION IN ARRAY WORK OF VARIABLE OLMAVG. -C OMEGAI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY OMEGA. -C PARTLI: THE LOCATION IN ARRAY WORK OF VARIABLE PARTOL. -C PNORMI: THE LOCATION IN ARRAY WORK OF VARIABLE PNORM. -C PRERSI: THE LOCATION IN ARRAY WORK OF VARIABLE PRERS. -C QRAUXI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY QRAUX. -C RCONDI: THE LOCATION IN ARRAY WORK OF VARIABLE RCONDI. -C RNORSI: THE LOCATION IN ARRAY WORK OF VARIABLE RNORMS. -C RVARI: THE LOCATION IN ARRAY WORK OF VARIABLE RVAR. -C SDI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SD. -C SI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY S. -C SSFI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SSF. -C SSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY SS. -C SSTOLI: THE LOCATION IN ARRAY WORK OF VARIABLE SSTOL. -C TAUFCI: THE LOCATION IN ARRAY WORK OF VARIABLE TAUFAC. -C TAUI: THE LOCATION IN ARRAY WORK OF VARIABLE TAU. -C TI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY T. -C TTI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY TT. -C UI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY U. -C VCVI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY VCV. -C WE1I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WE1. -C WRK1I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK1. -C WRK2I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK2. -C WRK3I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK3. -C WRK4I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK4. -C WRK5I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK5. -C WRK6I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK6. -C WRK7I: THE STARTING LOCATION IN ARRAY WORK OF ARRAY WRK7. -C WSSI: THE LOCATION IN ARRAY WORK OF VARIABLE WSS. -C WSSDEI: THE LOCATION IN ARRAY WORK OF VARIABLE WSSDEL. -C WSSEPI: THE LOCATION IN ARRAY WORK OF VARIABLE WSSEPS. -C XPLUSI: THE STARTING LOCATION IN ARRAY WORK OF ARRAY XPLUSD. - - -C***FIRST EXECUTABLE STATEMENT DWINF - - - IF (N.GE.1 .AND. M.GE.1 .AND. NP.GE.1 .AND. NQ.GE.1 .AND. - + LDWE.GE.1 .AND. LD2WE.GE.1) THEN - - DELTAI = 1 - EPSI = DELTAI + N*M - XPLUSI = EPSI + N*NQ - FNI = XPLUSI + N*M - SDI = FNI + N*NQ - VCVI = SDI + NP - RVARI = VCVI + NP*NP - - WSSI = RVARI + 1 - WSSDEI = WSSI + 1 - WSSEPI = WSSDEI + 1 - RCONDI = WSSEPI + 1 - ETAI = RCONDI + 1 - OLMAVI = ETAI + 1 - - TAUI = OLMAVI + 1 - ALPHAI = TAUI + 1 - ACTRSI = ALPHAI + 1 - PNORMI = ACTRSI + 1 - RNORSI = PNORMI + 1 - PRERSI = RNORSI + 1 - PARTLI = PRERSI + 1 - SSTOLI = PARTLI + 1 - TAUFCI = SSTOLI + 1 - EPSMAI = TAUFCI + 1 - BETA0I = EPSMAI + 1 - - BETACI = BETA0I + NP - BETASI = BETACI + NP - BETANI = BETASI + NP - SI = BETANI + NP - SSI = SI + NP - SSFI = SSI + NP - QRAUXI = SSFI + NP - UI = QRAUXI + NP - FSI = UI + NP - - FJACBI = FSI + N*NQ - - WE1I = FJACBI + N*NP*NQ - - DIFFI = WE1I + LDWE*LD2WE*NQ - - NEXT = DIFFI + NQ*(NP+M) - - IF (ISODR) THEN - DELTSI = NEXT - DELTNI = DELTSI + N*M - TI = DELTNI + N*M - TTI = TI + N*M - OMEGAI = TTI + N*M - FJACDI = OMEGAI + NQ*NQ - WRK1I = FJACDI + N*M*NQ - NEXT = WRK1I + N*M*NQ - ELSE - DELTSI = DELTAI - DELTNI = DELTAI - TI = DELTAI - TTI = DELTAI - OMEGAI = DELTAI - FJACDI = DELTAI - WRK1I = DELTAI - END IF - - WRK2I = NEXT - WRK3I = WRK2I + N*NQ - WRK4I = WRK3I + NP - WRK5I = WRK4I + M*M - WRK6I = WRK5I + M - WRK7I = WRK6I + N*NQ*NP - NEXT = WRK7I + 5*NQ - - LWKMN = NEXT - ELSE - DELTAI = 1 - EPSI = 1 - XPLUSI = 1 - FNI = 1 - SDI = 1 - VCVI = 1 - RVARI = 1 - WSSI = 1 - WSSDEI = 1 - WSSEPI = 1 - RCONDI = 1 - ETAI = 1 - OLMAVI = 1 - TAUI = 1 - ALPHAI = 1 - ACTRSI = 1 - PNORMI = 1 - RNORSI = 1 - PRERSI = 1 - PARTLI = 1 - SSTOLI = 1 - TAUFCI = 1 - EPSMAI = 1 - BETA0I = 1 - BETACI = 1 - BETASI = 1 - BETANI = 1 - SI = 1 - SSI = 1 - SSFI = 1 - QRAUXI = 1 - FSI = 1 - UI = 1 - FJACBI = 1 - WE1I = 1 - DIFFI = 1 - DELTSI = 1 - DELTNI = 1 - TI = 1 - TTI = 1 - FJACDI = 1 - OMEGAI = 1 - WRK1I = 1 - WRK2I = 1 - WRK3I = 1 - WRK4I = 1 - WRK5I = 1 - WRK6I = 1 - WRK7I = 1 - LWKMN = 1 - END IF - - RETURN - END -*DXMY - SUBROUTINE DXMY - + (N,M,X,LDX,Y,LDY,XMY,LDXMY) -C***BEGIN PROLOGUE DXMY -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE COMPUTE XMY = X - Y -C***END PROLOGUE DXMY - -C...SCALAR ARGUMENTS - INTEGER - + LDX,LDXMY,LDY,M,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + X(LDX,M),XMY(LDXMY,M),Y(LDY,M) - -C...LOCAL SCALARS - INTEGER - + I,J - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEXING VARIABLE. -C J: AN INDEXING VARIABLE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDXMY: THE LEADING DIMENSION OF ARRAY XMY. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C M: THE NUMBER OF COLUMNS OF DATA IN ARRAYS X AND Y. -C N: THE NUMBER OF ROWS OF DATA IN ARRAYS X AND Y. -C X: THE FIRST OF THE TWO ARRAYS. -C XMY: THE VALUES OF X-Y. -C Y: THE SECOND OF THE TWO ARRAYS. - - -C***FIRST EXECUTABLE STATEMENT DXMY - - - DO 20 J=1,M - DO 10 I=1,N - XMY(I,J) = X(I,J) - Y(I,J) - 10 CONTINUE - 20 CONTINUE - - RETURN - END -*DXPY - SUBROUTINE DXPY - + (N,M,X,LDX,Y,LDY,XPY,LDXPY) -C***BEGIN PROLOGUE DXPY -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE COMPUTE XPY = X + Y -C***END PROLOGUE DXPY - -C...SCALAR ARGUMENTS - INTEGER - + LDX,LDXPY,LDY,M,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + X(LDX,M),XPY(LDXPY,M),Y(LDY,M) - -C...LOCAL SCALARS - INTEGER - + I,J - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C I: AN INDEXING VARIABLE. -C J: AN INDEXING VARIABLE. -C LDX: THE LEADING DIMENSION OF ARRAY X. -C LDXPY: THE LEADING DIMENSION OF ARRAY XPY. -C LDY: THE LEADING DIMENSION OF ARRAY Y. -C M: THE NUMBER OF COLUMNS OF DATA IN ARRAYS X AND Y. -C N: THE NUMBER OF ROWS OF DATA IN ARRAYS X AND Y. -C X: THE FIRST OF THE TWO ARRAYS TO BE ADDED TOGETHER. -C XPY: THE VALUES OF X+Y. -C Y: THE SECOND OF THE TWO ARRAYS TO BE ADDED TOGETHER. - - -C***FIRST EXECUTABLE STATEMENT DXPY - - - DO 20 J=1,M - DO 10 I=1,N - XPY(I,J) = X(I,J) + Y(I,J) - 10 CONTINUE - 20 CONTINUE - - RETURN - END -*DZERO - SUBROUTINE DZERO - + (N,M,A,LDA) -C***BEGIN PROLOGUE DZERO -C***REFER TO DODR,DODRC -C***ROUTINES CALLED (NONE) -C***DATE WRITTEN 860529 (YYMMDD) -C***REVISION DATE 920304 (YYMMDD) -C***PURPOSE SET A = ZERO -C***END PROLOGUE DZERO - -C...SCALAR ARGUMENTS - INTEGER - + LDA,M,N - -C...ARRAY ARGUMENTS - DOUBLE PRECISION - + A(LDA,M) - -C...LOCAL SCALARS - DOUBLE PRECISION - + ZERO - INTEGER - + I,J - -C...DATA STATEMENTS - DATA - + ZERO - + /0.0D0/ - -C...VARIABLE DEFINITIONS (ALPHABETICALLY) -C A: THE ARRAY TO BE SET TO ZERO. -C I: AN INDEXING VARIABLE. -C J: AN INDEXING VARIABLE. -C LDA: THE LEADING DIMENSION OF ARRAY A. -C M: THE NUMBER OF COLUMNS TO BE SET TO ZERO. -C N: THE NUMBER OF ROWS TO BE SET TO ZERO. -C ZERO: THE VALUE 0.0D0. - - -C***FIRST EXECUTABLE STATEMENT DZERO - - - DO 20 J=1,M - DO 10 I=1,N - A(I,J) = ZERO - 10 CONTINUE - 20 CONTINUE - - RETURN - END diff --git a/scipy-0.10.1/scipy/odr/odrpack/dlunoc.f b/scipy-0.10.1/scipy/odr/odrpack/dlunoc.f deleted file mode 100644 index 934ac343ad..0000000000 --- a/scipy-0.10.1/scipy/odr/odrpack/dlunoc.f +++ /dev/null @@ -1,22 +0,0 @@ - subroutine dluno - + (lun, fn) - - integer lun - character*(*) fn - - open(unit=lun, file=fn, status='new') - - return - - end - - subroutine dlunc - + (lun) - - integer lun - - close(unit=lun) - - return - - end diff --git a/scipy-0.10.1/scipy/odr/setup.py b/scipy-0.10.1/scipy/odr/setup.py deleted file mode 100644 index efc8ad1e6e..0000000000 --- a/scipy-0.10.1/scipy/odr/setup.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='', top_path=None): - import warnings - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info, BlasNotFoundError - config = Configuration('odr', parent_package, top_path) - - libodr_files = ['d_odr.f', - 'd_mprec.f', - 'dlunoc.f'] - - blas_info = get_info('blas_opt') - if blas_info: - libodr_files.append('d_lpk.f') - else: - warnings.warn(BlasNotFoundError.__doc__) - libodr_files.append('d_lpkbls.f') - - libodr = [join('odrpack', x) for x in libodr_files] - config.add_library('odrpack', sources=libodr) - sources = ['__odrpack.c'] - libraries = ['odrpack'] + blas_info.pop('libraries', []) - include_dirs = ['.'] + blas_info.pop('include_dirs', []) - config.add_extension('__odrpack', - sources=sources, - libraries=libraries, - include_dirs=include_dirs, - depends=['odrpack.h'], - **blas_info - ) - - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/odr/setupscons.py b/scipy-0.10.1/scipy/odr/setupscons.py deleted file mode 100644 index a9b5af4d6a..0000000000 --- a/scipy-0.10.1/scipy/odr/setupscons.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('odr', parent_package, top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/odr/tests/test_odr.py b/scipy-0.10.1/scipy/odr/tests/test_odr.py deleted file mode 100644 index 4f3272f514..0000000000 --- a/scipy-0.10.1/scipy/odr/tests/test_odr.py +++ /dev/null @@ -1,328 +0,0 @@ -# Scipy imports. -import numpy as np -from numpy import pi -from numpy.testing import assert_array_almost_equal, TestCase, \ - run_module_suite, assert_equal -from scipy.odr import Data, Model, ODR, RealData, odr_stop - - -class TestODR(TestCase): - - # Explicit Example - - def explicit_fcn(self, B, x): - ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) - return ret - - def explicit_fjd(self, B, x): - eBx = np.exp(B[2]*x) - ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx - return ret - - def explicit_fjb(self, B, x): - eBx = np.exp(B[2]*x) - res = np.vstack([np.ones(x.shape[-1]), - np.power(eBx-1.0, 2), - B[1]*2.0*(eBx-1.0)*eBx*x]) - return res - - def test_explicit(self): - explicit_mod = Model( - self.explicit_fcn, - fjacb=self.explicit_fjb, - fjacd=self.explicit_fjd, - meta=dict(name='Sample Explicit Model', - ref='ODRPACK UG, pg. 39'), - ) - explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], - [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, - 1213.8,1215.5,1212.]) - explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], - ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) - explicit_odr.set_job(deriv=2) - - out = explicit_odr.run() - assert_array_almost_equal( - out.beta, - np.array([ 1.2646548050648876e+03, -5.4018409956678255e+01, - -8.7849712165253724e-02]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 1.0349270280543437, 1.583997785262061 , 0.0063321988657267]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 4.4949592379003039e-01, -3.7421976890364739e-01, - -8.0978217468468912e-04], - [ -3.7421976890364739e-01, 1.0529686462751804e+00, - -1.9453521827942002e-03], - [ -8.0978217468468912e-04, -1.9453521827942002e-03, - 1.6827336938454476e-05]]), - ) - - - # Implicit Example - - def implicit_fcn(self, B, x): - return (B[2]*np.power(x[0]-B[0], 2) + - 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + - B[4]*np.power(x[1]-B[1], 2) - 1.0) - - def test_implicit(self): - implicit_mod = Model( - self.implicit_fcn, - implicit=1, - meta=dict(name='Sample Implicit Model', - ref='ODRPACK UG, pg. 49'), - ) - implicit_dat = Data([ - [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, - -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], - [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, - -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], - 1, - ) - implicit_odr = ODR(implicit_dat, implicit_mod, - beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) - - out = implicit_odr.run() - assert_array_almost_equal( - out.beta, - np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, - 0.0162299708984738, 0.0797537982976416]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 0.1113840353364371, 0.1097673310686467, 0.0041060738314314, - 0.0027500347539902, 0.0034962501532468]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 2.1089274602333052e+00, -1.9437686411979040e+00, - 7.0263550868344446e-02, -4.7175267373474862e-02, - 5.2515575927380355e-02], - [ -1.9437686411979040e+00, 2.0481509222414456e+00, - -6.1600515853057307e-02, 4.6268827806232933e-02, - -5.8822307501391467e-02], - [ 7.0263550868344446e-02, -6.1600515853057307e-02, - 2.8659542561579308e-03, -1.4628662260014491e-03, - 1.4528860663055824e-03], - [ -4.7175267373474862e-02, 4.6268827806232933e-02, - -1.4628662260014491e-03, 1.2855592885514335e-03, - -1.2692942951415293e-03], - [ 5.2515575927380355e-02, -5.8822307501391467e-02, - 1.4528860663055824e-03, -1.2692942951415293e-03, - 2.0778813389755596e-03]]), - ) - - - # Multi-variable Example - - def multi_fcn(self, B, x): - if (x < 0.0).any(): - raise odr_stop - theta = pi*B[3]/2. - ctheta = np.cos(theta) - stheta = np.sin(theta) - omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) - phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) - r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + - np.power(omega*stheta, 2)), -B[4]) - ret = np.vstack([B[1] + r*np.cos(B[4]*phi), - r*np.sin(B[4]*phi)]) - return ret - - def test_multi(self): - multi_mod = Model( - self.multi_fcn, - meta=dict(name='Sample Multi-Response Model', - ref='ODRPACK UG, pg. 56'), - ) - - multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, - 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, - 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) - multi_y = np.array([ - [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, - 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, - 2.934, 2.876, 2.838, 2.798, 2.759], - [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, - 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, - 0.202, 0.182, 0.168, 0.153, 0.139], - ]) - n = len(multi_x) - multi_we = np.zeros((2, 2, n), dtype=float) - multi_ifixx = np.ones(n, dtype=int) - multi_delta = np.zeros(n, dtype=float) - - multi_we[0,0,:] = 559.6 - multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 - multi_we[1,1,:] = 8397.0 - - for i in range(n): - if multi_x[i] < 100.0: - multi_ifixx[i] = 0 - elif multi_x[i] <= 150.0: - pass # defaults are fine - elif multi_x[i] <= 1000.0: - multi_delta[i] = 25.0 - elif multi_x[i] <= 10000.0: - multi_delta[i] = 560.0 - elif multi_x[i] <= 100000.0: - multi_delta[i] = 9500.0 - else: - multi_delta[i] = 144000.0 - if multi_x[i] == 100.0 or multi_x[i] == 150.0: - multi_we[:,:,i] = 0.0 - - multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), - we=multi_we) - multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], - delta0=multi_delta, ifixx=multi_ifixx) - multi_odr.set_job(deriv=1, del_init=1) - - out = multi_odr.run() - assert_array_almost_equal( - out.beta, - np.array([ 4.3799880305938963, 2.4333057577497703, 8.0028845899503978, - 0.5101147161764654, 0.5173902330489161]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 0.0130625231081944, 0.0130499785273277, 0.1167085962217757, - 0.0132642749596149, 0.0288529201353984]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 0.0064918418231375, 0.0036159705923791, 0.0438637051470406, - -0.0058700836512467, 0.011281212888768 ], - [ 0.0036159705923791, 0.0064793789429006, 0.0517610978353126, - -0.0051181304940204, 0.0130726943624117], - [ 0.0438637051470406, 0.0517610978353126, 0.5182263323095322, - -0.0563083340093696, 0.1269490939468611], - [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, - 0.0066939246261263, -0.0140184391377962], - [ 0.011281212888768 , 0.0130726943624117, 0.1269490939468611, - -0.0140184391377962, 0.0316733013820852]]), - ) - - - # Pearson's Data - # K. Pearson, Philosophical Magazine, 2, 559 (1901) - - def pearson_fcn(self, B, x): - return B[0] + B[1]*x - - def test_pearson(self): - p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) - p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) - p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) - p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) - - p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) - - # Reverse the data to test invariance of results - pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) - - p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) - - p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) - pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) - - out = p_odr.run() - assert_array_almost_equal( - out.beta, - np.array([ 5.4767400299231674, -0.4796082367610305]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 0.3590121690702467, 0.0706291186037444]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 0.0854275622946333, -0.0161807025443155], - [-0.0161807025443155, 0.003306337993922 ]]), - ) - - rout = pr_odr.run() - assert_array_almost_equal( - rout.beta, - np.array([ 11.4192022410781231, -2.0850374506165474]), - ) - assert_array_almost_equal( - rout.sd_beta, - np.array([ 0.9820231665657161, 0.3070515616198911]), - ) - assert_array_almost_equal( - rout.cov_beta, - np.array([[ 0.6391799462548782, -0.1955657291119177], - [-0.1955657291119177, 0.0624888159223392]]), - ) - - # Lorentz Peak - # The data is taken from one of the undergraduate physics labs I performed. - - def lorentz(self, beta, x): - return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - - beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) - - def test_lorentz(self): - l_sy = np.array([.29]*18) - l_sx = np.array([.000972971,.000948268,.000707632,.000706679, - .000706074, .000703918,.000698955,.000456856, - .000455207,.000662717,.000654619,.000652694, - .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) - - l_dat = RealData( - [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, - 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, - 3.6562, 3.62498, 3.55525, 3.41886], - [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, - 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], - sx=l_sx, - sy=l_sy, - ) - l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) - l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) - - out = l_odr.run() - assert_array_almost_equal( - out.beta, - np.array([ 1.4306780846149925e+03, 1.3390509034538309e-01, - 3.7798193600109009e+00]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 7.3621186811330963e-01, 3.5068899941471650e-04, - 2.4451209281408992e-04]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 2.4714409064597873e-01, -6.9067261911110836e-05, - -3.1236953270424990e-05], - [ -6.9067261911110836e-05, 5.6077531517333009e-08, - 3.6133261832722601e-08], - [ -3.1236953270424990e-05, 3.6133261832722601e-08, - 2.7261220025171730e-08]]), - ) - - - def test_ticket_1253(self): - def linear(c, x): - return c[0]*x+c[1] - - c = [2.0, 3.0] - x = np.linspace(0, 10) - y = linear(c, x) - - model = Model(linear) - data = Data(x, y, wd=1.0, we=1.0) - job = ODR(data, model, beta0=[1.0, 1.0]) - result = job.run() - assert_equal(result.info, 2) - - -if __name__ == "__main__": - run_module_suite() -#### EOF ####################################################################### diff --git a/scipy-0.10.1/scipy/optimize/SConscript b/scipy-0.10.1/scipy/optimize/SConscript deleted file mode 100644 index 295e07b039..0000000000 --- a/scipy-0.10.1/scipy/optimize/SConscript +++ /dev/null @@ -1,87 +0,0 @@ -# Last Change: Sun Jan 04 07:00 PM 2009 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK, CheckF77Clib - -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('f2py') -env.Append(CPPPATH = ['Zeros']) -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK}) - -#----------------- -# Checking Lapack -#----------------- -st = config.CheckLAPACK() -if not st: - has_lapack = 0 -else: - has_lapack = 1 - -config.Finish() -write_info(env) - -#========== -# Build -#========== - -# minpack lib -minpack_src = [pjoin("minpack", s) for s in ["chkder.f", "dogleg.f", -"dpmpar.f", "enorm.f", "fdjac1.f", "fdjac2.f", "hybrd.f", "hybrd1.f", -"hybrj.f", "hybrj1.f", "lmder.f", "lmder1.f", "lmdif.f", "lmdif1.f", "lmpar.f", -"lmstr.f", "lmstr1.f", "qform.f", "qrfac.f", "qrsolv.f", -"r1mpyq.f", "r1updt.f", "rwupdt.f"]] -env.DistutilsStaticExtLibrary('minpack', source = minpack_src) - -# rootfind lib -rootfind_src = [pjoin("Zeros", s) for s in ["bisect.c", "brenth.c", -"brentq.c", "ridder.c"]] -env.DistutilsStaticExtLibrary('rootfind', source = rootfind_src) - -env.AppendUnique(LIBS = ['minpack', 'rootfind']) -env.AppendUnique(LIBPATH = '.') - -# _minpack pyextension -env.NumpyPythonExtension('_minpack', '_minpackmodule.c') - -# _zeros pyextension -env.NumpyPythonExtension('_zeros', 'zeros.c') - -# _lbfgsb pyextension -src = [pjoin('lbfgsb', i) for i in ['lbfgsb.pyf', 'routines.f']] -env.NumpyPythonExtension('_lbfgsb', source = src) - -# _cobyla pyextension -src = [pjoin('cobyla', i) for i in ['cobyla2.f', 'trstlp.f', 'cobyla.pyf']] -env.NumpyPythonExtension('_cobyla', source = src) - -# _minpack2 pyextension -src = [pjoin('minpack2', i) for i in ['dcsrch.f', 'dcstep.f', 'minpack2.pyf']] -env.NumpyPythonExtension('minpack2', source = src) - -# _nnls pyextension -src = [pjoin('nnls', i) for i in ['nnls.f', 'nnls.pyf']] -env.NumpyPythonExtension('_nnls', source = src) - -# moduleTNC pyextension -env.NumpyPythonExtension('moduleTNC', - source = [pjoin('tnc', i) for i in \ - ['moduleTNC.c', 'tnc.c']]) - -# _slsqp pyextension -src = [pjoin('slsqp', i) for i in ['slsqp_optmz.f', 'slsqp.pyf']] -env.NumpyPythonExtension('_slsqp', source = src) diff --git a/scipy-0.10.1/scipy/optimize/SConstruct b/scipy-0.10.1/scipy/optimize/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/optimize/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/optimize/Zeros/bisect.c b/scipy-0.10.1/scipy/optimize/Zeros/bisect.c deleted file mode 100644 index 3b8bfb4d73..0000000000 --- a/scipy-0.10.1/scipy/optimize/Zeros/bisect.c +++ /dev/null @@ -1,34 +0,0 @@ -/* Written by Charles Harris charles.harris@sdl.usu.edu */ - -#include "zeros.h" - -double -bisect(callback_type f, double xa, double xb, double xtol, double rtol, int iter, default_parameters *params) -{ - int i; - double dm,xm,fm,fa,fb,tol; - - tol = xtol + rtol*(fabs(xa) + fabs(xb)); - - fa = (*f)(xa,params); - fb = (*f)(xb,params); - params->funcalls = 2; - if (fa*fb > 0) {ERROR(params,SIGNERR,0.0);} - if (fa == 0) return xa; - if (fb == 0) return xb; - dm = xb - xa; - params->iterations = 0; - for(i=0; iiterations++; - dm *= .5; - xm = xa + dm; - fm = (*f)(xm,params); - params->funcalls++; - if (fm*fa >= 0) { - xa = xm; - } - if (fm == 0 || fabs(dm) < tol) - return xm; - } - ERROR(params,CONVERR,xa); -} diff --git a/scipy-0.10.1/scipy/optimize/Zeros/brenth.c b/scipy-0.10.1/scipy/optimize/Zeros/brenth.c deleted file mode 100644 index ed4b283bf0..0000000000 --- a/scipy-0.10.1/scipy/optimize/Zeros/brenth.c +++ /dev/null @@ -1,103 +0,0 @@ - -/* Written by Charles Harris charles.harris@sdl.usu.edu */ - -#include "zeros.h" - -/* - At the top of the loop the situation is the following: - - 1. the root is bracketed between xa and xb - 2. xa is the most recent estimate - 3. xp is the previous estimate - 4. |fp| < |fb| - - The order of xa and xp doesn't matter, but assume xp < xb. Then xa lies to - the right of xp and the assumption is that xa is increasing towards the root. - In this situation we will attempt quadratic extrapolation as long as the - condition - - * |fa| < |fp| < |fb| - - is satisfied. That is, the function value is decreasing as we go along. - Note the 4 above implies that the right inequlity already holds. - - The first check is that xa is still to the left of the root. If not, xb is - replaced by xp and the interval reverses, with xb < xa. In this situation - we will try linear interpolation. That this has happened is signaled by the - equality xb == xp; - - - The second check is that |fa| < |fb|. If this is not the case, we swap - xa and xb and resort to bisection. - -*/ - -double -brenth(callback_type f, double xa, double xb, double xtol, double rtol, int iter, default_parameters *params) -{ - double xpre = xa, xcur = xb; - double xblk = 0.0, fpre, fcur, fblk = 0.0, spre = 0.0, scur = 0.0, sbis, tol; - double stry, dpre, dblk; - int i; - - fpre = (*f)(xpre,params); - fcur = (*f)(xcur,params); - params->funcalls = 2; - if (fpre*fcur > 0) {ERROR(params,SIGNERR,0.0);} - if (fpre == 0) return xpre; - if (fcur == 0) return xcur; - params->iterations = 0; - for(i = 0; i < iter; i++) { - params->iterations++; - if (fpre*fcur < 0) { - xblk = xpre; - fblk = fpre; - spre = scur = xcur - xpre; - } - if (fabs(fblk) < fabs(fcur)) { - xpre = xcur; xcur = xblk; xblk = xpre; - fpre = fcur; fcur = fblk; fblk = fpre; - } - - tol = xtol + rtol*fabs(xcur); - sbis = (xblk - xcur)/2; - if (fcur == 0 || fabs(sbis) < tol) - return xcur; - - if (fabs(spre) > tol && fabs(fcur) < fabs(fpre)) { - if (xpre == xblk) { - /* interpolate */ - stry = -fcur*(xcur - xpre)/(fcur - fpre); - } - else { - /* extrapolate */ - dpre = (fpre - fcur)/(xpre - xcur); - dblk = (fblk - fcur)/(xblk - xcur); - stry = -fcur*(fblk - fpre)/(fblk*dpre - fpre*dblk); - } - - if (2*fabs(stry) < DMIN(fabs(spre), 3*fabs(sbis) - tol)) { - /* accept step */ - spre = scur; scur = stry; - } - else { - /* bisect */ - spre = sbis; scur = sbis; - } - } - else { - /* bisect */ - spre = sbis; scur = sbis; - } - - xpre = xcur; fpre = fcur; - if (fabs(scur) > tol) - xcur += scur; - else - xcur += (sbis > 0 ? tol : -tol); - - fcur = (*f)(xcur, params); - params->funcalls++; - } - ERROR(params,CONVERR,xcur); -} diff --git a/scipy-0.10.1/scipy/optimize/Zeros/brentq.c b/scipy-0.10.1/scipy/optimize/Zeros/brentq.c deleted file mode 100644 index b5c15fa298..0000000000 --- a/scipy-0.10.1/scipy/optimize/Zeros/brentq.c +++ /dev/null @@ -1,103 +0,0 @@ - -/* Written by Charles Harris charles.harris@sdl.usu.edu */ - -#include "zeros.h" - - -/* - - At the top of the loop the situation is the following: - - 1. the root is bracketed between xa and xb - 2. xa is the most recent estimate - 3. xp is the previous estimate - 4. |fp| < |fb| - - The order of xa and xp doesn't matter, but assume xp < xb. Then xa lies to - the right of xp and the assumption is that xa is increasing towards the root. - In this situation we will attempt quadratic extrapolation as long as the - condition - - * |fa| < |fp| < |fb| - - is satisfied. That is, the function value is decreasing as we go along. - Note the 4 above implies that the right inequlity already holds. - - The first check is that xa is still to the left of the root. If not, xb is - replaced by xp and the interval reverses, with xb < xa. In this situation - we will try linear interpolation. That this has happened is signaled by the - equality xb == xp; - - The second check is that |fa| < |fb|. If this is not the case, we swap - xa and xb and resort to bisection. - -*/ - -double -brentq(callback_type f, double xa, double xb, double xtol, double rtol, int iter, default_parameters *params) -{ - double xpre = xa, xcur = xb; - double xblk = 0.0, fpre, fcur, fblk = 0.0, spre = 0.0, scur = 0.0, sbis, tol; - double stry, dpre, dblk; - int i; - - fpre = (*f)(xpre, params); - fcur = (*f)(xcur, params); - params->funcalls = 2; - if (fpre*fcur > 0) {ERROR(params,SIGNERR,0.0);} - if (fpre == 0) return xpre; - if (fcur == 0) return xcur; - params->iterations = 0; - for(i = 0; i < iter; i++) { - params->iterations++; - if (fpre*fcur < 0) { - xblk = xpre; - fblk = fpre; - spre = scur = xcur - xpre; - } - if (fabs(fblk) < fabs(fcur)) { - xpre = xcur; xcur = xblk; xblk = xpre; - fpre = fcur; fcur = fblk; fblk = fpre; - } - - tol = xtol + rtol*fabs(xcur); - sbis = (xblk - xcur)/2; - if (fcur == 0 || fabs(sbis) < tol) - return xcur; - - if (fabs(spre) > tol && fabs(fcur) < fabs(fpre)) { - if (xpre == xblk) { - /* interpolate */ - stry = -fcur*(xcur - xpre)/(fcur - fpre); - } - else { - /* extrapolate */ - dpre = (fpre - fcur)/(xpre - xcur); - dblk = (fblk - fcur)/(xblk - xcur); - stry = -fcur*(fblk*dblk - fpre*dpre) - /(dblk*dpre*(fblk - fpre)); - } - if (2*fabs(stry) < DMIN(fabs(spre), 3*fabs(sbis) - tol)) { - /* good short step */ - spre = scur; scur = stry; - } else { - /* bisect */ - spre = sbis; scur = sbis; - } - } - else { - /* bisect */ - spre = sbis; scur = sbis; - } - - xpre = xcur; fpre = fcur; - if (fabs(scur) > tol) - xcur += scur; - else - xcur += (sbis > 0 ? tol : -tol); - - fcur = (*f)(xcur, params); - params->funcalls++; - } - ERROR(params,CONVERR, xcur); -} diff --git a/scipy-0.10.1/scipy/optimize/Zeros/ridder.c b/scipy-0.10.1/scipy/optimize/Zeros/ridder.c deleted file mode 100644 index 1ea749a7a4..0000000000 --- a/scipy-0.10.1/scipy/optimize/Zeros/ridder.c +++ /dev/null @@ -1,46 +0,0 @@ -/* Originally written by Charles Harris charles.harris@sdl.usu.edu */ -/* Modified by Travis Oliphant to not depend on Python */ - -#include "zeros.h" - -/* Sets params->error_num SIGNERR for sign_error; - CONVERR for convergence_error; -*/ - -double -ridder(callback_type f, double xa, double xb, double xtol, double rtol, int iter, default_parameters *params) -{ - int i; - double dm,dn,xm,xn=0.0,fn,fm,fa,fb,tol; - - tol = xtol + rtol*(fabs(xa) + fabs(xb)); - fa = (*f)(xa,params); - fb = (*f)(xb,params); - params->funcalls = 2; - if (fa*fb > 0) {ERROR(params,SIGNERR,0.0);} - if (fa == 0) return xa; - if (fb == 0) return xb; - params->iterations=0; - for(i=0; iiterations++; - dm = 0.5*(xb - xa); - xm = xa + dm; - fm = (*f)(xm,params); - dn = SIGN(fb - fa)*dm*fm/sqrt(fm*fm - fa*fb); - xn = xm - SIGN(dn)*DMIN(fabs(dn),fabs(dm) - .5*tol); - fn = (*f)(xn,params); - params->funcalls += 2; - if (fn*fm < 0.0) { - xa = xn; fa = fn; xb = xm; fb = fm; - } - else if (fn*fa < 0.0) { - xb = xn; fb = fn; - } - else { - xa = xn; fa = fn; - } - if (fn == 0.0 || fabs(xb - xa) < tol) - return xn; - } - ERROR(params,CONVERR,xn); -} diff --git a/scipy-0.10.1/scipy/optimize/Zeros/zeros.h b/scipy-0.10.1/scipy/optimize/Zeros/zeros.h deleted file mode 100644 index 3758698d6e..0000000000 --- a/scipy-0.10.1/scipy/optimize/Zeros/zeros.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Written by Charles Harris charles.harris@sdl.usu.edu */ - -/* Modified to not depend on Python everywhere by Travis Oliphant. - */ - - -#ifndef ZEROS_H -#define ZEROS_H - -#define ZEROS_PARAM_HEAD int funcalls; int iterations; int error_num - -typedef struct { - ZEROS_PARAM_HEAD; -} default_parameters; - -static double dminarg1,dminarg2; -#define DMIN(a,b) (dminarg1=(a),dminarg2=(b),(dminarg1) < (dminarg2) ?\ - (dminarg1) : (dminarg2)) - -#define SIGN(a) ((a) > 0.0 ? 1.0 : -1.0) -#define ERROR(params,num,val) (params)->error_num=(num); return (val) -#define SIGNERR -1 -#define CONVERR -2 - -typedef double (*callback_type)(double,void*); -typedef double (*solver_type)(callback_type, double, double, double, double, int,default_parameters*); - -extern double bisect(callback_type f, double xa, double xb, double xtol, double rtol, int iter, default_parameters *params); -extern double ridder(callback_type f, double xa, double xb, double xtol, double rtol, int iter, default_parameters *params); -extern double brenth(callback_type f, double xa, double xb, double xtol, double rtol, int iter, default_parameters *params); -extern double brentq(callback_type f, double xa, double xb, double xtol, double rtol, int iter, default_parameters *params); - - -extern double fabs(double); -extern double sqrt(double); - -#endif diff --git a/scipy-0.10.1/scipy/optimize/__init__.py b/scipy-0.10.1/scipy/optimize/__init__.py deleted file mode 100644 index c0e59e9fea..0000000000 --- a/scipy-0.10.1/scipy/optimize/__init__.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -===================================================== -Optimization and root finding (:mod:`scipy.optimize`) -===================================================== - -.. currentmodule:: scipy.optimize - -Optimization -============ - -General-purpose ---------------- - -.. autosummary:: - :toctree: generated/ - - fmin - Nelder-Mead Simplex algorithm - fmin_powell - Powell's (modified) level set method - fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm - fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno) - fmin_ncg - Line-search Newton Conjugate Gradient - leastsq - Minimize the sum of squares of M equations in N unknowns - -Constrained (multivariate) --------------------------- - -.. autosummary:: - :toctree: generated/ - - fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer - fmin_tnc - Truncated Newton code - fmin_cobyla - Constrained optimization by linear approximation - fmin_slsqp - Minimization using sequential least-squares programming - nnls - Linear least-squares problem with non-negativity constraint - -Global ------- - -.. autosummary:: - :toctree: generated/ - - anneal - Simulated annealing - brute - Brute force searching optimizer - -Scalar function minimizers --------------------------- - -.. autosummary:: - :toctree: generated/ - - fminbound - Bounded minimization of a scalar function - brent - 1-D function minimization using Brent method - golden - 1-D function minimization using Golden Section method - bracket - Bracket a minimum, given two starting points - -Fitting -======= - -.. autosummary:: - :toctree: generated/ - - curve_fit -- Fit curve to a set of points - -Root finding -============ - -Scalar functions ----------------- - -.. autosummary:: - :toctree: generated/ - - brentq - quadratic interpolation Brent method - brenth - Brent method, modified by Harris with hyperbolic extrapolation - ridder - Ridder's method - bisect - Bisection method - newton - Secant method or Newton's method - -Fixed point finding: - -.. autosummary:: - :toctree: generated/ - - fixed_point - Single-variable fixed-point solver - -Multidimensional ----------------- - -General nonlinear solvers: - -.. autosummary:: - :toctree: generated/ - - fsolve - Non-linear multi-variable equation solver - broyden1 - Broyden's first method - broyden2 - Broyden's second method - -Large-scale nonlinear solvers: - -.. autosummary:: - :toctree: generated/ - - newton_krylov - anderson - -Simple iterations: - -.. autosummary:: - :toctree: generated/ - - excitingmixing - linearmixing - diagbroyden - -:mod:`Additional information on the nonlinear solvers ` - -Utility Functions -================= - -.. autosummary:: - :toctree: generated/ - - line_search - Return a step that satisfies the strong Wolfe conditions - check_grad - Check the supplied derivative using finite differences - -""" - -from optimize import * -from minpack import * -from zeros import * -from anneal import * -from lbfgsb import fmin_l_bfgs_b -from tnc import fmin_tnc -from cobyla import fmin_cobyla -from nonlin import * -from slsqp import fmin_slsqp -from nnls import nnls - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/scipy-0.10.1/scipy/optimize/__minpack.h b/scipy-0.10.1/scipy/optimize/__minpack.h deleted file mode 100644 index 904e0c6761..0000000000 --- a/scipy-0.10.1/scipy/optimize/__minpack.h +++ /dev/null @@ -1,694 +0,0 @@ -/* This file is used to make _multipackmodule.c */ -/* $Revision$ */ -/* module_methods: - {"_hybrd", minpack_hybrd, METH_VARARGS, doc_hybrd}, - {"_hybrj", minpack_hybrj, METH_VARARGS, doc_hybrj}, - {"_lmdif", minpack_lmdif, METH_VARARGS, doc_lmdif}, - {"_lmder", minpack_lmder, METH_VARARGS, doc_lmder}, - {"_chkder", minpack_chkder, METH_VARARGS, doc_chkder}, - */ - -/* link libraries: - minpack - linpack_lite - blas -*/ - -/* python files: - minpack.py -*/ - -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -/* nothing to do in that case */ -#else -#define CHKDER chkder -#define HYBRD hybrd -#define HYBRJ hybrj -#define LMDIF lmdif -#define LMDER lmder -#define LMSTR lmstr -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define CHKDER CHKDER_ -#define HYBRD HYBRD_ -#define HYBRJ HYBRJ_ -#define LMDIF LMDIF_ -#define LMDER LMDER_ -#define LMSTR LMSTR_ -#else -#define CHKDER chkder_ -#define HYBRD hybrd_ -#define HYBRJ hybrj_ -#define LMDIF lmdif_ -#define LMDER lmder_ -#define LMSTR lmstr_ -#endif -#endif - -extern void CHKDER(int*,int*,double*,double*,double*,int*,double*,double*,int*,double*); -extern void HYBRD(void*,int*,double*,double*,double*,int*,int*,int*,double*,double*,int*,double*,int*,int*,int*,double*,int*,double*,int*,double*,double*,double*,double*,double*); -extern void HYBRJ(void*,int*,double*,double*,double*,int*,double*,int*,double*,int*,double*,int*,int*,int*,int*,double*,int*,double*,double*,double*,double*,double*); -extern void LMDIF(void*,int*,int*,double*,double*,double*,double*,double*,int*,double*,double*,int*,double*,int*,int*,int*,double*,int*,int*,double*,double*,double*,double*,double*); -extern void LMDER(void*,int*,int*,double*,double*,double*,int*,double*,double*,double*,int*,double*,int*,double*,int*,int*,int*,int*,int*,double*,double*,double*,double*,double*); -extern void LMSTR(void*,int*,int*,double*,double*,double*,int*,double*,double*,double*,int*,double*,int*,double*,int*,int*,int*,int*,int*,double*,double*,double*,double*,double*); - -int raw_multipack_calling_function(int *n, double *x, double *fvec, int *iflag) -{ - /* This is the function called from the Fortran code it should - -- use call_python_function to get a multiarrayobject result - -- check for errors and return -1 if any - -- otherwise place result of calculation in *fvec - */ - - PyArrayObject *result_array = NULL; - - result_array = (PyArrayObject *)call_python_function(multipack_python_function, *n, x, multipack_extra_arguments, 1, minpack_error); - if (result_array == NULL) { - *iflag = -1; - return -1; - } - memcpy(fvec, result_array->data, (*n)*sizeof(double)); - Py_DECREF(result_array); - return 0; - -} - - -int jac_multipack_calling_function(int *n, double *x, double *fvec, double *fjac, int *ldfjac, int *iflag) -{ - /* This is the function called from the Fortran code it should - -- use call_python_function to get a multiarrayobject result - -- check for errors and return -1 if any - -- otherwise place result of calculation in *fvec or *fjac. - - If iflag = 1 this should compute the function. - If iflag = 2 this should compute the jacobian (derivative matrix) - */ - - PyArrayObject *result_array; - - if (*iflag == 1) { - result_array = (PyArrayObject *)call_python_function(multipack_python_function, *n, x, multipack_extra_arguments, 1, minpack_error); - if (result_array == NULL) { - *iflag = -1; - return -1; - } - memcpy(fvec, result_array->data, (*n)*sizeof(double)); - } - else { /* iflag == 2 */ - result_array = (PyArrayObject *)call_python_function(multipack_python_jacobian, *n, x, multipack_extra_arguments, 2, minpack_error); - if (result_array == NULL) { - *iflag = -1; - return -1; - } - if (multipack_jac_transpose == 1) - MATRIXC2F(fjac, result_array->data, *n, *ldfjac) - else - memcpy(fjac, result_array->data, (*n)*(*ldfjac)*sizeof(double)); - } - - Py_DECREF(result_array); - return 0; -} - -int raw_multipack_lm_function(int *m, int *n, double *x, double *fvec, int *iflag) -{ - /* This is the function called from the Fortran code it should - -- use call_python_function to get a multiarrayobject result - -- check for errors and return -1 if any - -- otherwise place result of calculation in *fvec - */ - - PyArrayObject *result_array = NULL; - - result_array = (PyArrayObject *)call_python_function(multipack_python_function,*n, x, multipack_extra_arguments, 1, minpack_error); - if (result_array == NULL) { - *iflag = -1; - return -1; - } - memcpy(fvec, result_array->data, (*m)*sizeof(double)); - Py_DECREF(result_array); - return 0; -} - - -int jac_multipack_lm_function(int *m, int *n, double *x, double *fvec, double *fjac, int *ldfjac, int *iflag) -{ - /* This is the function called from the Fortran code it should - -- use call_python_function to get a multiarrayobject result - -- check for errors and return -1 if any - -- otherwise place result of calculation in *fvec or *fjac. - - If iflag = 1 this should compute the function. - If iflag = 2 this should compute the jacobian (derivative matrix) - */ - - PyArrayObject *result_array; - - if (*iflag == 1) { - result_array = (PyArrayObject *)call_python_function(multipack_python_function, *n, x, multipack_extra_arguments, 1, minpack_error); - if (result_array == NULL) { - *iflag = -1; - return -1; - } - memcpy(fvec, result_array->data, (*m)*sizeof(double)); - } - else { /* iflag == 2 */ - result_array = (PyArrayObject *)call_python_function(multipack_python_jacobian, *n, x, multipack_extra_arguments, 2, minpack_error); - if (result_array == NULL) { - *iflag = -1; - return -1; - } - if (multipack_jac_transpose == 1) - MATRIXC2F(fjac, result_array->data, *n, *ldfjac) - else - memcpy(fjac, result_array->data, (*n)*(*ldfjac)*sizeof(double)); - } - - Py_DECREF(result_array); - return 0; -} - -int smjac_multipack_lm_function(int *m, int *n, double *x, double *fvec, double *fjrow, int *iflag) -{ - /* This is the function called from the Fortran code it should - -- use call_python_function to get a multiarrayobject result - -- check for errors and return -1 if any - -- otherwise place result of calculation in *fvec or *fjac. - - If iflag = 1 this should compute the function. - If iflag = i this should compute the (i-1)-st row of the jacobian. - */ - int row; - PyObject *newargs, *ob_row; - PyArrayObject *result_array; - - if (*iflag == 1) { - result_array = (PyArrayObject *)call_python_function(multipack_python_function, *n, x, multipack_extra_arguments, 1, minpack_error); - if (result_array == NULL) { - *iflag = -1; - return -1; - } - memcpy(fvec, result_array->data, (*m)*sizeof(double)); - } - else { /* iflag == i */ - /* append row number to argument list and call row-based jacobian */ - row = *iflag - 2; - - if ((ob_row = PyInt_FromLong((long)row)) == NULL) { - *iflag = -1; - return -1; - } - newargs = PySequence_Concat( ob_row, multipack_extra_arguments); - Py_DECREF(ob_row); - if (newargs == NULL) { - PyErr_SetString(minpack_error, "Internal error constructing argument list."); - *iflag = -1; - return -1; - } - - result_array = (PyArrayObject *)call_python_function(multipack_python_jacobian, *n, x, newargs, 2, minpack_error); - if (result_array == NULL) { - Py_DECREF(newargs); - *iflag = -1; - return -1; - } - memcpy(fjrow, result_array->data, (*n)*sizeof(double)); - } - - Py_DECREF(result_array); - return 0; -} - - -static char doc_hybrd[] = "[x,infodict,info] = _hybrd(fun, x0, args, full_output, xtol, maxfev, ml, mu, epsfcn, factor, diag)"; - -static PyObject *minpack_hybrd(PyObject *dummy, PyObject *args) { - PyObject *fcn, *x0, *extra_args = NULL, *o_diag = NULL; - int full_output = 0, maxfev = -10, ml = -10, mu = -10; - double xtol = 1.49012e-8, epsfcn = 0.0, factor = 1.0e2; - int mode = 2, nprint = 0, info, nfev, ldfjac; - npy_intp n,lr; - double *x, *fvec, *diag, *fjac, *r, *qtf; - - PyArrayObject *ap_x = NULL, *ap_fvec = NULL; - PyArrayObject *ap_fjac = NULL, *ap_r = NULL, *ap_qtf = NULL; - PyArrayObject *ap_diag = NULL; - - npy_intp dims[2]; - int allocated = 0; - double *wa = NULL; - - STORE_VARS(); /* Define storage variables for global variables. */ - - if (!PyArg_ParseTuple(args, "OO|OidiiiddO", &fcn, &x0, &extra_args, &full_output, &xtol, &maxfev, &ml, &mu, &epsfcn, &factor, &o_diag)) return NULL; - - INIT_FUNC(fcn,extra_args,minpack_error); - - /* Initial input vector */ - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x0, PyArray_DOUBLE, 1, 1); - if (ap_x == NULL) goto fail; - x = (double *) ap_x->data; - n = ap_x->dimensions[0]; - - lr = n * (n + 1) / 2; - if (ml < 0) ml = n-1; - if (mu < 0) mu = n-1; - if (maxfev < 0) maxfev = 200*(n+1); - - /* Setup array to hold the function evaluations */ - ap_fvec = (PyArrayObject *)call_python_function(fcn, n, x, extra_args, 1, minpack_error); - if (ap_fvec == NULL) goto fail; - fvec = (double *) ap_fvec->data; - if (ap_fvec->nd == 0) - n = 1; - else if (ap_fvec->dimensions[0] < n) - n = ap_fvec->dimensions[0]; - - SET_DIAG(ap_diag,o_diag,mode); - - dims[0] = n; dims[1] = n; - ap_r = (PyArrayObject *)PyArray_SimpleNew(1,&lr,PyArray_DOUBLE); - ap_qtf = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_DOUBLE); - ap_fjac = (PyArrayObject *)PyArray_SimpleNew(2,dims,PyArray_DOUBLE); - - if (ap_r == NULL || ap_qtf == NULL || ap_fjac ==NULL) goto fail; - - r = (double *) ap_r->data; - qtf = (double *) ap_qtf->data; - fjac = (double *) ap_fjac->data; - ldfjac = dims[1]; - - if ((wa = malloc(4*n * sizeof(double)))==NULL) { - PyErr_NoMemory(); - goto fail; - } - allocated = 1; - - /* Call the underlying FORTRAN routines. */ - HYBRD(raw_multipack_calling_function, &n, x, fvec, &xtol, &maxfev, &ml, &mu, &epsfcn, diag, &mode, &factor, &nprint, &info, &nfev, fjac, &ldfjac, r, &lr, qtf, wa, wa+n, wa+2*n, wa+3*n); - - RESTORE_FUNC(); - - if (info < 0) goto fail; /* Python Terminated */ - - - free(wa); - Py_DECREF(extra_args); - Py_DECREF(ap_diag); - - if (full_output) { - return Py_BuildValue("N{s:N,s:i,s:N,s:N,s:N}i",PyArray_Return(ap_x),"fvec",PyArray_Return(ap_fvec),"nfev",nfev,"fjac",PyArray_Return(ap_fjac),"r",PyArray_Return(ap_r),"qtf",PyArray_Return(ap_qtf),info); - } - else { - Py_DECREF(ap_fvec); - Py_DECREF(ap_fjac); - Py_DECREF(ap_r); - Py_DECREF(ap_qtf); - return Py_BuildValue("Ni",PyArray_Return(ap_x),info); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_x); - Py_XDECREF(ap_fvec); - Py_XDECREF(ap_diag); - Py_XDECREF(ap_fjac); - Py_XDECREF(ap_r); - Py_XDECREF(ap_qtf); - if (allocated) free(wa); - return NULL; -} - - -static char doc_hybrj[] = "[x,infodict,info] = _hybrj(fun, Dfun, x0, args, full_output, col_deriv, xtol, maxfev, factor, diag)"; - -static PyObject *minpack_hybrj(PyObject *dummy, PyObject *args) { - PyObject *fcn, *Dfun, *x0, *extra_args = NULL, *o_diag = NULL; - int full_output = 0, maxfev = -10, col_deriv = 1; - double xtol = 1.49012e-8, factor = 1.0e2; - int mode = 2, nprint = 0, info, nfev, njev, ldfjac; - npy_intp n, lr; - double *x, *fvec, *diag, *fjac, *r, *qtf; - - PyArrayObject *ap_x = NULL, *ap_fvec = NULL; - PyArrayObject *ap_fjac = NULL, *ap_r = NULL, *ap_qtf = NULL; - PyArrayObject *ap_diag = NULL; - - npy_intp dims[2]; - int allocated = 0; - double *wa = NULL; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "OOO|OiididO", &fcn, &Dfun, &x0, &extra_args, &full_output, &col_deriv, &xtol, &maxfev, &factor, &o_diag)) return NULL; - - INIT_JAC_FUNC(fcn,Dfun,extra_args,col_deriv,minpack_error); - - /* Initial input vector */ - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x0, PyArray_DOUBLE, 1, 1); - if (ap_x == NULL) goto fail; - x = (double *) ap_x->data; - n = ap_x->dimensions[0]; - lr = n * (n + 1) / 2; - - if (maxfev < 0) maxfev = 100*(n+1); - - /* Setup array to hold the function evaluations */ - ap_fvec = (PyArrayObject *)call_python_function(fcn, n, x, extra_args, 1, minpack_error); - if (ap_fvec == NULL) goto fail; - fvec = (double *) ap_fvec->data; - if (ap_fvec->nd == 0) - n = 1; - else if (ap_fvec->dimensions[0] < n) - n = ap_fvec->dimensions[0]; - - SET_DIAG(ap_diag,o_diag,mode); - - dims[0] = n; dims[1] = n; - ap_r = (PyArrayObject *)PyArray_SimpleNew(1,&lr,PyArray_DOUBLE); - ap_qtf = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_DOUBLE); - ap_fjac = (PyArrayObject *)PyArray_SimpleNew(2,dims,PyArray_DOUBLE); - - if (ap_r == NULL || ap_qtf == NULL || ap_fjac ==NULL) goto fail; - - r = (double *) ap_r->data; - qtf = (double *) ap_qtf->data; - fjac = (double *) ap_fjac->data; - - ldfjac = dims[1]; - - if ((wa = malloc(4*n * sizeof(double)))==NULL) { - PyErr_NoMemory(); - goto fail; - } - allocated = 1; - - /* Call the underlying FORTRAN routines. */ - HYBRJ(jac_multipack_calling_function, &n, x, fvec, fjac, &ldfjac, &xtol, &maxfev, diag, &mode, &factor, &nprint, &info, &nfev, &njev, r, &lr, qtf, wa, wa+n, wa+2*n, wa+3*n); - - RESTORE_JAC_FUNC(); - - if (info < 0) goto fail; /* Python Terminated */ - - free(wa); - Py_DECREF(extra_args); - Py_DECREF(ap_diag); - - if (full_output) { - return Py_BuildValue("N{s:N,s:i,s:i,s:N,s:N,s:N}i",PyArray_Return(ap_x),"fvec",PyArray_Return(ap_fvec),"nfev",nfev,"njev",njev,"fjac",PyArray_Return(ap_fjac),"r",PyArray_Return(ap_r),"qtf",PyArray_Return(ap_qtf),info); - } - else { - Py_DECREF(ap_fvec); - Py_DECREF(ap_fjac); - Py_DECREF(ap_r); - Py_DECREF(ap_qtf); - return Py_BuildValue("Ni",PyArray_Return(ap_x),info); - } - - fail: - RESTORE_JAC_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_x); - Py_XDECREF(ap_fvec); - Py_XDECREF(ap_fjac); - Py_XDECREF(ap_diag); - Py_XDECREF(ap_r); - Py_XDECREF(ap_qtf); - if (allocated) free(wa); - return NULL; - -} - -/************************ Levenberg-Marquardt *******************/ - -static char doc_lmdif[] = "[x,infodict,info] = _lmdif(fun, x0, args, full_output, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)"; - -static PyObject *minpack_lmdif(PyObject *dummy, PyObject *args) { - PyObject *fcn, *x0, *extra_args = NULL, *o_diag = NULL; - int full_output = 0, maxfev = -10; - double xtol = 1.49012e-8, ftol = 1.49012e-8; - double gtol = 0.0, epsfcn = 0.0, factor = 1.0e2; - int m, mode = 2, nprint = 0, info, nfev, ldfjac, *ipvt; - npy_intp n; - double *x, *fvec, *diag, *fjac, *qtf; - - PyArrayObject *ap_x = NULL, *ap_fvec = NULL; - PyArrayObject *ap_fjac = NULL, *ap_ipvt = NULL, *ap_qtf = NULL; - PyArrayObject *ap_diag = NULL; - - npy_intp dims[2]; - int allocated = 0; - double *wa = NULL; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "OO|OidddiddO", &fcn, &x0, &extra_args, &full_output, &ftol, &xtol, >ol, &maxfev, &epsfcn, &factor, &o_diag)) return NULL; - - INIT_FUNC(fcn,extra_args,minpack_error); - - /* Initial input vector */ - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x0, PyArray_DOUBLE, 1, 1); - if (ap_x == NULL) goto fail; - x = (double *) ap_x->data; - n = ap_x->dimensions[0]; - dims[0] = n; - - SET_DIAG(ap_diag,o_diag,mode); - - if (maxfev < 0) maxfev = 200*(n+1); - - /* Setup array to hold the function evaluations and find it's size*/ - ap_fvec = (PyArrayObject *)call_python_function(fcn, n, x, extra_args, 1, minpack_error); - if (ap_fvec == NULL) goto fail; - fvec = (double *) ap_fvec->data; - m = (ap_fvec->nd > 0 ? ap_fvec->dimensions[0] : 1); - - dims[0] = n; dims[1] = m; - ap_ipvt = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_INT); - ap_qtf = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_DOUBLE); - ap_fjac = (PyArrayObject *)PyArray_SimpleNew(2,dims,PyArray_DOUBLE); - - if (ap_ipvt == NULL || ap_qtf == NULL || ap_fjac ==NULL) goto fail; - - ipvt = (int *) ap_ipvt->data; - qtf = (double *) ap_qtf->data; - fjac = (double *) ap_fjac->data; - ldfjac = dims[1]; - wa = (double *)malloc((3*n + m)* sizeof(double)); - if (wa == NULL) { - PyErr_NoMemory(); - goto fail; - } - allocated = 1; - - /* Call the underlying FORTRAN routines. */ - LMDIF(raw_multipack_lm_function, &m, &n, x, fvec, &ftol, &xtol, >ol, &maxfev, &epsfcn, diag, &mode, &factor, &nprint, &info, &nfev, fjac, &ldfjac, ipvt, qtf, wa, wa+n, wa+2*n, wa+3*n); - - RESTORE_FUNC(); - - if (info < 0) goto fail; /* Python error */ - - free(wa); - Py_DECREF(extra_args); - Py_DECREF(ap_diag); - - if (full_output) { - return Py_BuildValue("N{s:N,s:i,s:N,s:N,s:N}i",PyArray_Return(ap_x),"fvec",PyArray_Return(ap_fvec),"nfev",nfev,"fjac",PyArray_Return(ap_fjac),"ipvt",PyArray_Return(ap_ipvt),"qtf",PyArray_Return(ap_qtf),info); - } - else { - Py_DECREF(ap_fvec); - Py_DECREF(ap_fjac); - Py_DECREF(ap_ipvt); - Py_DECREF(ap_qtf); - return Py_BuildValue("Ni",PyArray_Return(ap_x),info); - } - - fail: - RESTORE_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_x); - Py_XDECREF(ap_fvec); - Py_XDECREF(ap_fjac); - Py_XDECREF(ap_diag); - Py_XDECREF(ap_ipvt); - Py_XDECREF(ap_qtf); - if (allocated) free(wa); - return NULL; -} - - -static char doc_lmder[] = "[x,infodict,info] = _lmder(fun, Dfun, x0, args, full_output, col_deriv, ftol, xtol, gtol, maxfev, factor, diag)"; - -static PyObject *minpack_lmder(PyObject *dummy, PyObject *args) { - PyObject *fcn, *x0, *Dfun, *extra_args = NULL, *o_diag = NULL; - int full_output = 0, maxfev = -10, col_deriv = 1; - double xtol = 1.49012e-8, ftol = 1.49012e-8; - double gtol = 0.0, factor = 1.0e2; - int m, mode = 2, nprint = 0, info, nfev, njev, ldfjac, *ipvt; - npy_intp n; - double *x, *fvec, *diag, *fjac, *qtf; - - PyArrayObject *ap_x = NULL, *ap_fvec = NULL; - PyArrayObject *ap_fjac = NULL, *ap_ipvt = NULL, *ap_qtf = NULL; - PyArrayObject *ap_diag = NULL; - - npy_intp dims[2]; - int allocated = 0; - double *wa = NULL; - - STORE_VARS(); - - if (!PyArg_ParseTuple(args, "OOO|OiidddidO", &fcn, &Dfun, &x0, &extra_args, &full_output, &col_deriv, &ftol, &xtol, >ol, &maxfev, &factor, &o_diag)) return NULL; - - INIT_JAC_FUNC(fcn,Dfun,extra_args,col_deriv,minpack_error); - - /* Initial input vector */ - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(x0, PyArray_DOUBLE, 1, 1); - if (ap_x == NULL) goto fail; - x = (double *) ap_x->data; - n = ap_x->dimensions[0]; - - if (maxfev < 0) maxfev = 100*(n+1); - - /* Setup array to hold the function evaluations */ - ap_fvec = (PyArrayObject *)call_python_function(fcn, n, x, extra_args, 1, minpack_error); - if (ap_fvec == NULL) goto fail; - fvec = (double *) ap_fvec->data; - - SET_DIAG(ap_diag,o_diag,mode); - - m = (ap_fvec->nd > 0 ? ap_fvec->dimensions[0] : 1); - - dims[0] = n; dims[1] = m; - ap_ipvt = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_INT); - ap_qtf = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_DOUBLE); - ap_fjac = (PyArrayObject *)PyArray_SimpleNew(2,dims,PyArray_DOUBLE); - - if (ap_ipvt == NULL || ap_qtf == NULL || ap_fjac ==NULL) goto fail; - - ipvt = (int *) ap_ipvt->data; - qtf = (double *) ap_qtf->data; - fjac = (double *) ap_fjac->data; - ldfjac = dims[1]; - wa = (double *)malloc((3*n + m)* sizeof(double)); - if (wa == NULL) { - PyErr_NoMemory(); - goto fail; - } - allocated = 1; - - /* Call the underlying FORTRAN routines. */ - LMDER(jac_multipack_lm_function, &m, &n, x, fvec, fjac, &ldfjac, &ftol, &xtol, >ol, &maxfev, diag, &mode, &factor, &nprint, &info, &nfev, &njev, ipvt, qtf, wa, wa+n, wa+2*n, wa+3*n); - - RESTORE_JAC_FUNC(); - - if (info < 0) goto fail; /* Python error */ - - free(wa); - Py_DECREF(extra_args); - Py_DECREF(ap_diag); - - if (full_output) { - return Py_BuildValue("N{s:N,s:i,s:i,s:N,s:N,s:N}i",PyArray_Return(ap_x),"fvec",PyArray_Return(ap_fvec),"nfev",nfev,"njev",njev,"fjac",PyArray_Return(ap_fjac),"ipvt",PyArray_Return(ap_ipvt),"qtf",PyArray_Return(ap_qtf),info); - } - else { - Py_DECREF(ap_fvec); - Py_DECREF(ap_fjac); - Py_DECREF(ap_ipvt); - Py_DECREF(ap_qtf); - return Py_BuildValue("Ni",PyArray_Return(ap_x),info); - } - - fail: - RESTORE_JAC_FUNC(); - Py_XDECREF(extra_args); - Py_XDECREF(ap_x); - Py_XDECREF(ap_fvec); - Py_XDECREF(ap_fjac); - Py_XDECREF(ap_diag); - Py_XDECREF(ap_ipvt); - Py_XDECREF(ap_qtf); - if (allocated) free(wa); - return NULL; -} - - -/** Check gradient function **/ - -static char doc_chkder[] = "_chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,mode,err)"; - -static PyObject *minpack_chkder(PyObject *self, PyObject *args) -{ - PyArrayObject *ap_fvecp = NULL, *ap_fjac = NULL, *ap_err = NULL; - PyArrayObject *ap_x = NULL, *ap_fvec = NULL, *ap_xp = NULL; - PyObject *o_x, *o_fvec, *o_fjac, *o_fvecp; - double *xp, *fvecp, *fjac, *fvec, *x; - double *err; - int mode, m, n, ldfjac; - - if (!PyArg_ParseTuple(args,"iiOOOiO!OiO!",&m, &n, &o_x, &o_fvec, &o_fjac, &ldfjac, &PyArray_Type, (PyObject **)&ap_xp, &o_fvecp, &mode, &PyArray_Type, (PyObject **)&ap_err)) return NULL; - - ap_x = (PyArrayObject *)PyArray_ContiguousFromObject(o_x,PyArray_DOUBLE,1,1); - if (ap_x == NULL) goto fail; - if (n != ap_x->dimensions[0]) - PYERR(minpack_error,"Input data array (x) must have length n"); - x = (double *) ap_x -> data; - if (!ISCONTIGUOUS(ap_xp) || (ap_xp->descr->type_num != PyArray_DOUBLE)) - PYERR(minpack_error,"Seventh argument (xp) must be contiguous array of type Float64."); - - if (mode == 1) { - fvec = NULL; - fjac = NULL; - xp = (double *)ap_xp->data; - fvecp = NULL; - err = NULL; - CHKDER(&m, &n, x, fvec, fjac, &ldfjac, xp, fvecp, &mode, err); - } - else if (mode == 2) { - if (!ISCONTIGUOUS(ap_err) || (ap_err->descr->type_num != PyArray_DOUBLE)) - PYERR(minpack_error,"Last argument (err) must be contiguous array of type Float64."); - ap_fvec = (PyArrayObject *)PyArray_ContiguousFromObject(o_fvec,PyArray_DOUBLE,1,1); - ap_fjac = (PyArrayObject *)PyArray_ContiguousFromObject(o_fjac,PyArray_DOUBLE,2,2); - ap_fvecp = (PyArrayObject *)PyArray_ContiguousFromObject(o_fvecp,PyArray_DOUBLE,1,1); - if (ap_fvec == NULL || ap_fjac == NULL || ap_fvecp == NULL) goto fail; - - fvec = (double *)ap_fvec -> data; - fjac = (double *)ap_fjac -> data; - xp = (double *)ap_xp->data; - fvecp = (double *)ap_fvecp -> data; - err = (double *)ap_err->data; - - CHKDER(&m, &n, x, fvec, fjac, &m, xp, fvecp, &mode, err); - - Py_DECREF(ap_fvec); - Py_DECREF(ap_fjac); - Py_DECREF(ap_fvecp); - } - else - PYERR(minpack_error,"Invalid mode, must be 1 or 2."); - - Py_DECREF(ap_x); - - Py_INCREF(Py_None); - return Py_None; - - fail: - Py_XDECREF(ap_fvec); - Py_XDECREF(ap_fjac); - Py_XDECREF(ap_fvecp); - Py_XDECREF(ap_x); - return NULL; -} - - - - - - - - - diff --git a/scipy-0.10.1/scipy/optimize/_minpackmodule.c b/scipy-0.10.1/scipy/optimize/_minpackmodule.c deleted file mode 100644 index 703f8c5d7f..0000000000 --- a/scipy-0.10.1/scipy/optimize/_minpackmodule.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - Multipack project. - This file is generated by setmodules.py. Do not modify it. - */ -#include "minpack.h" -static PyObject *minpack_error; -#include "__minpack.h" -static struct PyMethodDef minpack_module_methods[] = { -{"_hybrd", minpack_hybrd, METH_VARARGS, doc_hybrd}, -{"_hybrj", minpack_hybrj, METH_VARARGS, doc_hybrj}, -{"_lmdif", minpack_lmdif, METH_VARARGS, doc_lmdif}, -{"_lmder", minpack_lmder, METH_VARARGS, doc_lmder}, -{"_chkder", minpack_chkder, METH_VARARGS, doc_chkder}, -{NULL, NULL, 0, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_minpack", - NULL, - -1, - minpack_module_methods, - NULL, - NULL, - NULL, - NULL -}; -PyObject *PyInit__minpack(void) -{ - PyObject *m, *d, *s; - - m = PyModule_Create(&moduledef); - import_array(); - - d = PyModule_GetDict(m); - - s = PyUnicode_FromString(" 1.10 "); - PyDict_SetItemString(d, "__version__", s); - Py_DECREF(s); - minpack_error = PyErr_NewException ("minpack.error", NULL, NULL); - PyDict_SetItemString(d, "error", minpack_error); - if (PyErr_Occurred()) - Py_FatalError("can't initialize module minpack"); - - return m; -} -#else -PyMODINIT_FUNC init_minpack(void) { - PyObject *m, *d, *s; - m = Py_InitModule("_minpack", minpack_module_methods); - import_array(); - d = PyModule_GetDict(m); - - s = PyString_FromString(" 1.10 "); - PyDict_SetItemString(d, "__version__", s); - Py_DECREF(s); - minpack_error = PyErr_NewException ("minpack.error", NULL, NULL); - PyDict_SetItemString(d, "error", minpack_error); - if (PyErr_Occurred()) - Py_FatalError("can't initialize module minpack"); -} -#endif diff --git a/scipy-0.10.1/scipy/optimize/_tstutils.py b/scipy-0.10.1/scipy/optimize/_tstutils.py deleted file mode 100644 index 995720a2d6..0000000000 --- a/scipy-0.10.1/scipy/optimize/_tstutils.py +++ /dev/null @@ -1,47 +0,0 @@ -''' Parameters used in test and benchmark methods ''' - -from random import random - -from scipy.optimize import zeros as cc - -def f1(x) : - return x*(x-1.) - -def f2(x) : - return x**2 - 1 - -def f3(x) : - return x*(x-1.)*(x-2.)*(x-3.) - -def f4(x) : - if x > 1 : return 1.0 + .1*x - if x < 1 : return -1.0 + .1*x - return 0 - -def f5(x) : - if x != 1 : return 1.0/(1. - x) - return 0 - -def f6(x) : - if x > 1 : return random() - elif x < 1 : return -random() - else : return 0 - -description = """ -f2 is a symmetric parabola, x**2 - 1 -f3 is a quartic polynomial with large hump in interval -f4 is step function with a discontinuity at 1 -f5 is a hyperbola with vertical asymptote at 1 -f6 has random values positive to left of 1, negative to right - -of course these are not real problems. They just test how the -'good' solvers behave in bad circumstances where bisection is -really the best. A good solver should not be much worse than -bisection in such circumstance, while being faster for smooth -monotone sorts of functions. -""" - -methods = [cc.bisect,cc.ridder,cc.brenth,cc.brentq] -mstrings = ['cc.bisect','cc.ridder','cc.brenth','cc.brentq'] -functions = [f2,f3,f4,f5,f6] -fstrings = ['f2','f3','f4','f5','f6'] diff --git a/scipy-0.10.1/scipy/optimize/anneal.py b/scipy-0.10.1/scipy/optimize/anneal.py deleted file mode 100644 index bfdd768610..0000000000 --- a/scipy-0.10.1/scipy/optimize/anneal.py +++ /dev/null @@ -1,376 +0,0 @@ -# Original Author: Travis Oliphant 2002 -# Bug-fixes in 2006 by Tim Leslie - - -import numpy -from numpy import asarray, tan, exp, ones, squeeze, sign, \ - all, log, sqrt, pi, shape, array, minimum, where -from numpy import random - -__all__ = ['anneal'] - -_double_min = numpy.finfo(float).min -_double_max = numpy.finfo(float).max -class base_schedule(object): - def __init__(self): - self.dwell = 20 - self.learn_rate = 0.5 - self.lower = -10 - self.upper = 10 - self.Ninit = 50 - self.accepted = 0 - self.tests = 0 - self.feval = 0 - self.k = 0 - self.T = None - - def init(self, **options): - self.__dict__.update(options) - self.lower = asarray(self.lower) - self.lower = where(self.lower == numpy.NINF, -_double_max, self.lower) - self.upper = asarray(self.upper) - self.upper = where(self.upper == numpy.PINF, _double_max, self.upper) - self.k = 0 - self.accepted = 0 - self.feval = 0 - self.tests = 0 - - def getstart_temp(self, best_state): - """ Find a matching starting temperature and starting parameters vector - i.e. find x0 such that func(x0) = T0. - - Parameters - ---------- - best_state : _state - A _state object to store the function value and x0 found. - - Returns - ------- - x0 : array - The starting parameters vector. - """ - - assert(not self.dims is None) - lrange = self.lower - urange = self.upper - fmax = _double_min - fmin = _double_max - for _ in range(self.Ninit): - x0 = random.uniform(size=self.dims)*(urange-lrange) + lrange - fval = self.func(x0, *self.args) - self.feval += 1 - if fval > fmax: - fmax = fval - if fval < fmin: - fmin = fval - best_state.cost = fval - best_state.x = array(x0) - - self.T0 = (fmax-fmin)*1.5 - return best_state.x - - def accept_test(self, dE): - T = self.T - self.tests += 1 - if dE < 0: - self.accepted += 1 - return 1 - p = exp(-dE*1.0/self.boltzmann/T) - if (p > random.uniform(0.0, 1.0)): - self.accepted += 1 - return 1 - return 0 - - def update_guess(self, x0): - pass - - def update_temp(self, x0): - pass - - -# A schedule due to Lester Ingber -class fast_sa(base_schedule): - def init(self, **options): - self.__dict__.update(options) - if self.m is None: - self.m = 1.0 - if self.n is None: - self.n = 1.0 - self.c = self.m * exp(-self.n * self.quench) - - def update_guess(self, x0): - x0 = asarray(x0) - u = squeeze(random.uniform(0.0, 1.0, size=self.dims)) - T = self.T - y = sign(u-0.5)*T*((1+1.0/T)**abs(2*u-1)-1.0) - xc = y*(self.upper - self.lower) - xnew = x0 + xc - return xnew - - def update_temp(self): - self.T = self.T0*exp(-self.c * self.k**(self.quench)) - self.k += 1 - return - -class cauchy_sa(base_schedule): - def update_guess(self, x0): - x0 = asarray(x0) - numbers = squeeze(random.uniform(-pi/2, pi/2, size=self.dims)) - xc = self.learn_rate * self.T * tan(numbers) - xnew = x0 + xc - return xnew - - def update_temp(self): - self.T = self.T0/(1+self.k) - self.k += 1 - return - -class boltzmann_sa(base_schedule): - def update_guess(self, x0): - std = minimum(sqrt(self.T)*ones(self.dims), (self.upper-self.lower)/3.0/self.learn_rate) - x0 = asarray(x0) - xc = squeeze(random.normal(0, 1.0, size=self.dims)) - - xnew = x0 + xc*std*self.learn_rate - return xnew - - def update_temp(self): - self.k += 1 - self.T = self.T0 / log(self.k+1.0) - return - -class _state(object): - def __init__(self): - self.x = None - self.cost = None - -# TODO: -# allow for general annealing temperature profile -# in that case use update given by alpha and omega and -# variation of all previous updates and temperature? - -# Simulated annealing - -def anneal(func, x0, args=(), schedule='fast', full_output=0, - T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400, - boltzmann=1.0, learn_rate=0.5, feps=1e-6, quench=1.0, m=1.0, n=1.0, - lower=-100, upper=100, dwell=50): - """Minimize a function using simulated annealing. - - Schedule is a schedule class implementing the annealing schedule. - Available ones are 'fast', 'cauchy', 'boltzmann' - - Parameters - ---------- - func : callable f(x, *args) - Function to be optimized. - x0 : ndarray - Initial guess. - args : tuple - Extra parameters to `func`. - schedule : base_schedule - Annealing schedule to use (a class). - full_output : bool - Whether to return optional outputs. - T0 : float - Initial Temperature (estimated as 1.2 times the largest - cost-function deviation over random points in the range). - Tf : float - Final goal temperature. - maxeval : int - Maximum function evaluations. - maxaccept : int - Maximum changes to accept. - maxiter : int - Maximum cooling iterations. - learn_rate : float - Scale constant for adjusting guesses. - boltzmann : float - Boltzmann constant in acceptance test - (increase for less stringent test at each temperature). - feps : float - Stopping relative error tolerance for the function value in - last four coolings. - quench, m, n : float - Parameters to alter fast_sa schedule. - lower, upper : float or ndarray - Lower and upper bounds on `x`. - dwell : int - The number of times to search the space at each temperature. - - Returns - ------- - xmin : ndarray - Point giving smallest value found. - Jmin : float - Minimum value of function found. - T : float - Final temperature. - feval : int - Number of function evaluations. - iters : int - Number of cooling iterations. - accept : int - Number of tests accepted. - retval : int - Flag indicating stopping condition:: - - 0 : Points no longer changing - 1 : Cooled to final temperature - 2 : Maximum function evaluations - 3 : Maximum cooling iterations reached - 4 : Maximum accepted query locations reached - 5 : Final point not the minimum amongst encountered points - - Notes - ----- - Simulated annealing is a random algorithm which uses no derivative - information from the function being optimized. In practice it has - been more useful in discrete optimization than continuous - optimization, as there are usually better algorithms for continuous - optimization problems. - - Some experimentation by trying the difference temperature - schedules and altering their parameters is likely required to - obtain good performance. - - The randomness in the algorithm comes from random sampling in numpy. - To obtain the same results you can call numpy.random.seed with the - same seed immediately before calling scipy.optimize.anneal. - - We give a brief description of how the three temperature schedules - generate new points and vary their temperature. Temperatures are - only updated with iterations in the outer loop. The inner loop is - over loop over xrange(dwell), and new points are generated for - every iteration in the inner loop. (Though whether the proposed - new points are accepted is probabilistic.) - - For readability, let d denote the dimension of the inputs to func. - Also, let x_old denote the previous state, and k denote the - iteration number of the outer loop. All other variables not - defined below are input variables to scipy.optimize.anneal itself. - - In the 'fast' schedule the updates are :: - - u ~ Uniform(0, 1, size=d) - y = sgn(u - 0.5) * T * ((1+ 1/T)**abs(2u-1) -1.0) - xc = y * (upper - lower) - x_new = x_old + xc - - c = n * exp(-n * quench) - T_new = T0 * exp(-c * k**quench) - - - In the 'cauchy' schedule the updates are :: - - u ~ Uniform(-pi/2, pi/2, size=d) - xc = learn_rate * T * tan(u) - x_new = x_old + xc - - T_new = T0 / (1+k) - - In the 'boltzmann' schedule the updates are :: - - std = minimum( sqrt(T) * ones(d), (upper-lower) / (3*learn_rate) ) - y ~ Normal(0, std, size=d) - x_new = x_old + learn_rate * y - - T_new = T0 / log(1+k) - - """ - x0 = asarray(x0) - lower = asarray(lower) - upper = asarray(upper) - - schedule = eval(schedule+'_sa()') - # initialize the schedule - schedule.init(dims=shape(x0),func=func,args=args,boltzmann=boltzmann,T0=T0, - learn_rate=learn_rate, lower=lower, upper=upper, - m=m, n=n, quench=quench, dwell=dwell) - - current_state, last_state, best_state = _state(), _state(), _state() - if T0 is None: - x0 = schedule.getstart_temp(best_state) - else: - best_state.x = None - best_state.cost = numpy.Inf - - last_state.x = asarray(x0).copy() - fval = func(x0,*args) - schedule.feval += 1 - last_state.cost = fval - if last_state.cost < best_state.cost: - best_state.cost = fval - best_state.x = asarray(x0).copy() - schedule.T = schedule.T0 - fqueue = [100, 300, 500, 700] - iters = 0 - while 1: - for n in xrange(dwell): - current_state.x = schedule.update_guess(last_state.x) - current_state.cost = func(current_state.x,*args) - schedule.feval += 1 - - dE = current_state.cost - last_state.cost - if schedule.accept_test(dE): - last_state.x = current_state.x.copy() - last_state.cost = current_state.cost - if last_state.cost < best_state.cost: - best_state.x = last_state.x.copy() - best_state.cost = last_state.cost - schedule.update_temp() - iters += 1 - # Stopping conditions - # 0) last saved values of f from each cooling step - # are all very similar (effectively cooled) - # 1) Tf is set and we are below it - # 2) maxeval is set and we are past it - # 3) maxiter is set and we are past it - # 4) maxaccept is set and we are past it - - fqueue.append(squeeze(last_state.cost)) - fqueue.pop(0) - af = asarray(fqueue)*1.0 - if all(abs((af-af[0])/af[0]) < feps): - retval = 0 - if abs(af[-1]-best_state.cost) > feps*10: - retval = 5 - print "Warning: Cooled to %f at %s but this is not" \ - % (squeeze(last_state.cost), str(squeeze(last_state.x))) \ - + " the smallest point found." - break - if (Tf is not None) and (schedule.T < Tf): - retval = 1 - break - if (maxeval is not None) and (schedule.feval > maxeval): - retval = 2 - break - if (iters > maxiter): - print "Warning: Maximum number of iterations exceeded." - retval = 3 - break - if (maxaccept is not None) and (schedule.accepted > maxaccept): - retval = 4 - break - - if full_output: - return best_state.x, best_state.cost, schedule.T, \ - schedule.feval, iters, schedule.accepted, retval - else: - return best_state.x, retval - - - -if __name__ == "__main__": - from numpy import cos - # minimum expected at ~-0.195 - func = lambda x: cos(14.5*x-0.3) + (x+0.2)*x - print anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='cauchy') - print anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='fast') - print anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='boltzmann') - - # minimum expected at ~[-0.195, -0.1] - func = lambda x: cos(14.5*x[0]-0.3) + (x[1]+0.2)*x[1] + (x[0]+0.2)*x[0] - print anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='cauchy') - print anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='fast') - print anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='boltzmann') diff --git a/scipy-0.10.1/scipy/optimize/benchmarks/bench_zeros.py b/scipy-0.10.1/scipy/optimize/benchmarks/bench_zeros.py deleted file mode 100644 index 3168b9c6b1..0000000000 --- a/scipy-0.10.1/scipy/optimize/benchmarks/bench_zeros.py +++ /dev/null @@ -1,35 +0,0 @@ -from math import sqrt - -from numpy.testing import * - -from scipy.optimize import zeros as cc - -# Import testing parameters -from scipy.optimize._tstutils import methods, mstrings, functions, \ - fstrings, description - -class BenchZeros(TestCase): - def bench_run(self): - a = .5 - b = sqrt(3) - repeat = 2000 - - print description - - print 'TESTING SPEED\n' - print 'times in seconds for %d iterations \n'%repeat - for i in range(len(functions)) : - print 'function %s\n'%fstrings[i] - func = functions[i] - for j in range(len(methods)) : - meth = methods[j] - try: - t = measure("meth(func,a,b)",repeat) - except: - print '%s : failed'%mstrings[j] - else: - print '%s : %5.3f'%(mstrings[j],t) - print '\n\n' - -if __name__ == '__main__' : - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/bento.info b/scipy-0.10.1/scipy/optimize/bento.info deleted file mode 100644 index a297e13367..0000000000 --- a/scipy-0.10.1/scipy/optimize/bento.info +++ /dev/null @@ -1,40 +0,0 @@ -HookFile: bscript - -Library: - CompiledLibrary: minpack - Sources: - minpack/*.f - CompiledLibrary: rootfind - Sources: - Zeros/bisect.c, - Zeros/brenth.c, - Zeros/brentq.c, - Zeros/ridder.c - Extension: _minpack - Sources: _minpackmodule.c - Extension: _zeros - Sources: zeros.c - Extension: _lbfgsb - Sources: lbfgsb/lbfgsb.pyf, lbfgsb/routines.f - Extension: _cobyla - Sources: - cobyla/cobyla2.f, - cobyla/trstlp.f, - cobyla/cobyla.pyf - Extension: minpack2 - Sources: - minpack2/dcsrch.f, - minpack2/dcstep.f, - minpack2/minpack2.pyf - Extension: _nnls - Sources: - nnls/nnls.f, - nnls/nnls.pyf - Extension: moduleTNC - Sources: - tnc/moduleTNC.c, - tnc/tnc.c - Extension: _slsqp - Sources: - slsqp/slsqp_optmz.f, - slsqp/slsqp.pyf diff --git a/scipy-0.10.1/scipy/optimize/bscript b/scipy-0.10.1/scipy/optimize/bscript deleted file mode 100644 index 6d4b8873e7..0000000000 --- a/scipy-0.10.1/scipy/optimize/bscript +++ /dev/null @@ -1,23 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - - def builder(extension): - return default_builder(extension, - features="c pyext bento cshlib", - use="minpack rootfind FLAPACK CLIB") - def builder_f2py(extension): - return default_builder(extension, - features="c pyext bento cshlib f2py", - use="minpack rootfind FLAPACK CLIB") - context.register_builder("_lbfgsb", builder_f2py) - context.register_builder("_cobyla", builder_f2py) - context.register_builder("minpack2", builder_f2py) - context.register_builder("_nnls", builder_f2py) - context.register_builder("_slsqp", builder_f2py) - - context.register_builder("_zeros", builder) - context.register_builder("_minpack", builder) - context.register_builder("moduleTNC", builder) diff --git a/scipy-0.10.1/scipy/optimize/cobyla.py b/scipy-0.10.1/scipy/optimize/cobyla.py deleted file mode 100644 index 5eb4df643a..0000000000 --- a/scipy-0.10.1/scipy/optimize/cobyla.py +++ /dev/null @@ -1,158 +0,0 @@ -"""Interface to Constrained Optimization By Linear Approximation - -Functions: -fmin_coblya(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4, - iprint=1, maxfun=1000) - Minimize a function using the Constrained Optimization BY Linear - Approximation (COBYLA) method - -""" - -import _cobyla -from numpy import copy - -__all__ = ['fmin_cobyla'] - - -def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4, - iprint=1, maxfun=1000, disp=None): - """ - Minimize a function using the Constrained Optimization BY Linear - Approximation (COBYLA) method. This method wraps a FORTRAN - implentation of the algorithm. - - Parameters - ---------- - func : callable - Function to minimize. In the form func(x, \\*args). - x0 : ndarray - Initial guess. - cons : sequence - Constraint functions; must all be ``>=0`` (a single function - if only 1 constraint). Each function takes the parameters `x` - as its first argument. - args : tuple - Extra arguments to pass to function. - consargs : tuple - Extra arguments to pass to constraint functions (default of None means - use same extra arguments as those passed to func). - Use ``()`` for no extra arguments. - rhobeg : - Reasonable initial changes to the variables. - rhoend : - Final accuracy in the optimization (not precisely guaranteed). This - is a lower bound on the size of the trust region. - iprint : {0, 1, 2, 3} - Controls the frequency of output; 0 implies no output. Deprecated. - disp : {0, 1, 2, 3} - Over-rides the iprint interface. Preferred. - maxfun : int - Maximum number of function evaluations. - - Returns - ------- - x : ndarray - The argument that minimises `f`. - - Notes - ----- - This algorithm is based on linear approximations to the objective - function and each constraint. We briefly describe the algorithm. - - Suppose the function is being minimized over k variables. At the - jth iteration the algorithm has k+1 points v_1, ..., v_(k+1), - an approximate solution x_j, and a radius RHO_j. - (i.e. linear plus a constant) approximations to the objective - function and constraint functions such that their function values - agree with the linear approximation on the k+1 points v_1,.., v_(k+1). - This gives a linear program to solve (where the linear approximations - of the constraint functions are constrained to be non-negative). - - However the linear approximations are likely only good - approximations near the current simplex, so the linear program is - given the further requirement that the solution, which - will become x_(j+1), must be within RHO_j from x_j. RHO_j only - decreases, never increases. The initial RHO_j is rhobeg and the - final RHO_j is rhoend. In this way COBYLA's iterations behave - like a trust region algorithm. - - Additionally, the linear program may be inconsistent, or the - approximation may give poor improvement. For details about - how these issues are resolved, as well as how the points v_i are - updated, refer to the source code or the references below. - - - References - ---------- - Powell M.J.D. (1994), "A direct search optimization method that models - the objective and constraint functions by linear interpolation.", in - Advances in Optimization and Numerical Analysis, eds. S. Gomez and - J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67 - - Powell M.J.D. (1998), "Direct search algorithms for optimization - calculations", Acta Numerica 7, 287-336 - - Powell M.J.D. (2007), "A view of algorithms for optimization without - derivatives", Cambridge University Technical Report DAMTP 2007/NA03 - - - Examples - -------- - Minimize the objective function f(x,y) = x*y subject - to the constraints x**2 + y**2 < 1 and y > 0:: - - >>> def objective(x): - ... return x[0]*x[1] - ... - >>> def constr1(x): - ... return 1 - (x[0]**2 + x[1]**2) - ... - >>> def constr2(x): - ... return x[1] - ... - >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7) - - Normal return from subroutine COBYLA - - NFVALS = 64 F =-5.000000E-01 MAXCV = 1.998401E-14 - X =-7.071069E-01 7.071067E-01 - array([-0.70710685, 0.70710671]) - - The exact solution is (-sqrt(2)/2, sqrt(2)/2). - - - - """ - err = "cons must be a sequence of callable functions or a single"\ - " callable function." - try: - m = len(cons) - except TypeError: - if callable(cons): - m = 1 - cons = [cons] - else: - raise TypeError(err) - else: - for thisfunc in cons: - if not callable(thisfunc): - raise TypeError(err) - - if consargs is None: - consargs = args - - if disp is not None: - iprint = disp - - def calcfc(x, con): - f = func(x, *args) - k = 0 - for constraints in cons: - con[k] = constraints(x, *consargs) - k += 1 - return f - - xopt = _cobyla.minimize(calcfc, m=m, x=copy(x0), rhobeg=rhobeg, - rhoend=rhoend, iprint=iprint, maxfun=maxfun) - - return xopt diff --git a/scipy-0.10.1/scipy/optimize/cobyla/cobyla.pyf b/scipy-0.10.1/scipy/optimize/cobyla/cobyla.pyf deleted file mode 100644 index 7c029ea950..0000000000 --- a/scipy-0.10.1/scipy/optimize/cobyla/cobyla.pyf +++ /dev/null @@ -1,33 +0,0 @@ -! -*- f90 -*- -python module _cobyla__user__routines - interface _cobyla_user_interface - subroutine calcfc(n,m,x,f,con) - integer intent(in,hide) :: n - integer intent(in,hide) :: m - double precision dimension(n),depend(n),intent(in) :: x - double precision intent(out) :: f - double precision intent(in), dimension(m), depend(m) :: con - end subroutine calcfc - end interface _cobyla_user_interface -end python module _cobyla__user__routines -python module _cobyla ! in - interface ! in :__cobyla - subroutine minimize(calcfc,n,m,x,rhobeg,rhoend,iprint,maxfun,w,iact) - fortranname cobyla - use _cobyla__user__routines - external calcfc - integer intent(hide),depend(x) :: n=len(x) - integer :: m - double precision dimension(n),intent(in,out) :: x - double precision :: rhobeg - double precision :: rhoend - integer optional,check(0<=iprint && iprint<=3) :: iprint=1 - integer :: maxfun = 100 - double precision dimension(n*(3*n+2*m+11)+4*m+6), intent(cache,hide),depend(n,m) :: w - integer dimension(m + 1),intent(cache,hide),depend(m) :: iact - end subroutine minimize - end interface -end python module _cobyla - -! This file was auto-generated with f2py (version:2.39.235_1703). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/optimize/cobyla/cobyla2.f b/scipy-0.10.1/scipy/optimize/cobyla/cobyla2.f deleted file mode 100644 index e765a83d3e..0000000000 --- a/scipy-0.10.1/scipy/optimize/cobyla/cobyla2.f +++ /dev/null @@ -1,555 +0,0 @@ -C------------------------------------------------------------------------ -C - SUBROUTINE COBYLA (CALCFC, N,M,X,RHOBEG,RHOEND,IPRINT,MAXFUN, - & W,IACT) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - EXTERNAL CALCFC - DIMENSION X(*),W(*),IACT(*) -C -C This subroutine minimizes an objective function F(X) subject to M -C inequality constraints on X, where X is a vector of variables that has -C N components. The algorithm employs linear approximations to the -C objective and constraint functions, the approximations being formed by -C linear interpolation at N+1 points in the space of the variables. -C We regard these interpolation points as vertices of a simplex. The -C parameter RHO controls the size of the simplex and it is reduced -C automatically from RHOBEG to RHOEND. For each RHO the subroutine tries -C to achieve a good vector of variables for the current size, and then -C RHO is reduced until the value RHOEND is reached. Therefore RHOBEG and -C RHOEND should be set to reasonable initial changes to and the required -C accuracy in the variables respectively, but this accuracy should be -C viewed as a subject for experimentation because it is not guaranteed. -C The subroutine has an advantage over many of its competitors, however, -C which is that it treats each constraint individually when calculating -C a change to the variables, instead of lumping the constraints together -C into a single penalty function. The name of the subroutine is derived -C from the phrase Constrained Optimization BY Linear Approximations. -C -C The user must set the values of N, M, RHOBEG and RHOEND, and must -C provide an initial vector of variables in X. Further, the value of -C IPRINT should be set to 0, 1, 2 or 3, which controls the amount of -C printing during the calculation. Specifically, there is no output if -C IPRINT=0 and there is output only at the end of the calculation if -C IPRINT=1. Otherwise each new value of RHO and SIGMA is printed. -C Further, the vector of variables and some function information are -C given either when RHO is reduced or when each new value of F(X) is -C computed in the cases IPRINT=2 or IPRINT=3 respectively. Here SIGMA -C is a penalty parameter, it being assumed that a change to X is an -C improvement if it reduces the merit function -C F(X)+SIGMA*MAX(0.0,-C1(X),-C2(X),...,-CM(X)), -C where C1,C2,...,CM denote the constraint functions that should become -C nonnegative eventually, at least to the precision of RHOEND. In the -C printed output the displayed term that is multiplied by SIGMA is -C called MAXCV, which stands for 'MAXimum Constraint Violation'. The -C argument MAXFUN is an integer variable that must be set by the user to a -C limit on the number of calls of CALCFC, the purpose of this routine being -C given below. The value of MAXFUN will be altered to the number of calls -C of CALCFC that are made. The arguments W and IACT provide real and -C integer arrays that are used as working space. Their lengths must be at -C least N*(3*N+2*M+11)+4*M+6 and M+1 respectively. -C -C In order to define the objective and constraint functions, we require -C a subroutine that has the name and arguments -C SUBROUTINE CALCFC (N,M,X,F,CON) -C DIMENSION X(*),CON(*) . -C The values of N and M are fixed and have been defined already, while -C X is now the current vector of variables. The subroutine should return -C the objective and constraint functions at X in F and CON(1),CON(2), -C ...,CON(M). Note that we are trying to adjust X so that F(X) is as -C small as possible subject to the constraint functions being nonnegative. -C -C Partition the working space array W to provide the storage that is needed -C for the main calculation. -C - MPP=M+2 - ICON=1 - ISIM=ICON+MPP - ISIMI=ISIM+N*N+N - IDATM=ISIMI+N*N - IA=IDATM+N*MPP+MPP - IVSIG=IA+M*N+N - IVETA=IVSIG+N - ISIGB=IVETA+N - IDX=ISIGB+N - IWORK=IDX+N - CALL COBYLB (CALCFC,N,M,MPP,X,RHOBEG,RHOEND,IPRINT,MAXFUN,W(ICON), - 1 W(ISIM),W(ISIMI),W(IDATM),W(IA),W(IVSIG),W(IVETA),W(ISIGB), - 2 W(IDX),W(IWORK),IACT) - RETURN - END -C------------------------------------------------------------------------------ - SUBROUTINE COBYLB (CALCFC,N,M,MPP,X,RHOBEG,RHOEND,IPRINT,MAXFUN, - 1 CON,SIM,SIMI,DATMAT,A,VSIG,VETA,SIGBAR,DX,W,IACT) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(*),CON(*),SIM(N,*),SIMI(N,*),DATMAT(MPP,*), - 1 A(N,*),VSIG(*),VETA(*),SIGBAR(*),DX(*),W(*),IACT(*) - EXTERNAL CALCFC -C -C Set the initial values of some parameters. The last column of SIM holds -C the optimal vertex of the current simplex, and the preceding N columns -C hold the displacements from the optimal vertex to the other vertices. -C Further, SIMI holds the inverse of the matrix that is contained in the -C first N columns of SIM. -C - ITOTAL=N*(3*N+2*M+11)+4*M+6 - IPTEM=MIN0(N,5) - IPTEMP=IPTEM+1 - NP=N+1 - MP=M+1 - ALPHA=0.25d0 - BETA=2.1d0 - GAMMA=0.5d0 - DELTA=1.1d0 - RHO=RHOBEG - PARMU=0.0d0 - -C Fix compiler warnings - IFLAG=1 - PARSIG=0 - SUM=0 - PREREC=0 - PREREM=0 - CMIN=0 - CMAX=0 - - IF (IPRINT .GE. 2) PRINT 10, RHO - 10 FORMAT (/3X,'The initial value of RHO is',1PE13.6,2X, - 1 'and PARMU is set to zero.') - NFVALS=0 - TEMP=1.0d0/RHO - DO 30 I=1,N - SIM(I,NP)=X(I) - DO 20 J=1,N - SIM(I,J)=0.0d0 - 20 SIMI(I,J)=0.0d0 - SIM(I,I)=RHO - 30 SIMI(I,I)=TEMP - JDROP=NP - IBRNCH=0 -C -C Make the next call of the user-supplied subroutine CALCFC. These -C instructions are also used for calling CALCFC during the iterations of -C the algorithm. -C - 40 IF (NFVALS .GE. MAXFUN .AND. NFVALS .GT. 0) THEN - IF (IPRINT .GE. 1) PRINT 50 - 50 FORMAT (/3X,'Return from subroutine COBYLA because the ', - 1 'MAXFUN limit has been reached.') - GOTO 600 - END IF - NFVALS=NFVALS+1 - IF (IPRINT .EQ. 3) THEN - PRINT *, ' SIM = ', (SIM(J,NP),J=1,N) - PRINT *, ' DX = ', (DX(I),I=1,N) - PRINT *, ' BEFORE: ', N, M, (X(I),I=1,N), F, (CON(I),I=1,M) - END IF - CALL CALCFC (N,M,X,F,CON) - IF (IPRINT .EQ. 3) THEN - PRINT *, ' AFTER: ', N, M, (X(I),I=1,N), F, (CON(I),I=1,M) - END IF - RESMAX=0.0d0 - IF (M .GT. 0) THEN - DO 60 K=1,M - 60 RESMAX=DMAX1(RESMAX,-CON(K)) - END IF - IF (NFVALS .EQ. IPRINT-1 .OR. IPRINT .EQ. 3) THEN - PRINT 70, NFVALS,F,RESMAX,(X(I),I=1,IPTEM) - 70 FORMAT (/3X,'NFVALS =',I5,3X,'F =',1PE13.6,4X,'MAXCV =', - 1 1PE13.6/3X,'X =',1PE13.6,1P4E15.6) - IF (IPTEM .LT. N) PRINT 80, (X(I),I=IPTEMP,N) - 80 FORMAT (1PE19.6,1P4E15.6) - END IF - CON(MP)=F - CON(MPP)=RESMAX - IF (IBRNCH .EQ. 1) GOTO 440 -C -C Set the recently calculated function values in a column of DATMAT. This -C array has a column for each vertex of the current simplex, the entries of -C each column being the values of the constraint functions (if any) -C followed by the objective function and the greatest constraint violation -C at the vertex. -C - DO 90 K=1,MPP - 90 DATMAT(K,JDROP)=CON(K) - IF (NFVALS .GT. NP) GOTO 130 -C -C Exchange the new vertex of the initial simplex with the optimal vertex if -C necessary. Then, if the initial simplex is not complete, pick its next -C vertex and calculate the function values there. -C - IF (JDROP .LE. N) THEN - IF (DATMAT(MP,NP) .LE. F) THEN - X(JDROP)=SIM(JDROP,NP) - ELSE - SIM(JDROP,NP)=X(JDROP) - DO 100 K=1,MPP - DATMAT(K,JDROP)=DATMAT(K,NP) - 100 DATMAT(K,NP)=CON(K) - DO 120 K=1,JDROP - SIM(JDROP,K)=-RHO - TEMP=0.0 - DO 110 I=K,JDROP - 110 TEMP=TEMP-SIMI(I,K) - 120 SIMI(JDROP,K)=TEMP - END IF - END IF - IF (NFVALS .LE. N) THEN - JDROP=NFVALS - X(JDROP)=X(JDROP)+RHO - GOTO 40 - END IF - 130 IBRNCH=1 -C -C Identify the optimal vertex of the current simplex. -C - 140 PHIMIN=DATMAT(MP,NP)+PARMU*DATMAT(MPP,NP) - NBEST=NP - DO 150 J=1,N - TEMP=DATMAT(MP,J)+PARMU*DATMAT(MPP,J) - IF (TEMP .LT. PHIMIN) THEN - NBEST=J - PHIMIN=TEMP - ELSE IF (TEMP .EQ. PHIMIN .AND. PARMU .EQ. 0.0d0) THEN - IF (DATMAT(MPP,J) .LT. DATMAT(MPP,NBEST)) NBEST=J - END IF - 150 CONTINUE -C -C Switch the best vertex into pole position if it is not there already, -C and also update SIM, SIMI and DATMAT. -C - IF (NBEST .LE. N) THEN - DO 160 I=1,MPP - TEMP=DATMAT(I,NP) - DATMAT(I,NP)=DATMAT(I,NBEST) - 160 DATMAT(I,NBEST)=TEMP - DO 180 I=1,N - TEMP=SIM(I,NBEST) - SIM(I,NBEST)=0.0d0 - SIM(I,NP)=SIM(I,NP)+TEMP - TEMPA=0.0d0 - DO 170 K=1,N - SIM(I,K)=SIM(I,K)-TEMP - 170 TEMPA=TEMPA-SIMI(K,I) - 180 SIMI(NBEST,I)=TEMPA - END IF -C -C Make an error return if SIGI is a poor approximation to the inverse of -C the leading N by N submatrix of SIG. -C - ERROR=0.0d0 - DO 200 I=1,N - DO 200 J=1,N - TEMP=0.0d0 - IF (I .EQ. J) TEMP=TEMP-1.0d0 - DO 190 K=1,N - 190 TEMP=TEMP+SIMI(I,K)*SIM(K,J) - 200 ERROR=DMAX1(ERROR,ABS(TEMP)) - IF (ERROR .GT. 0.1d0) THEN - IF (IPRINT .GE. 1) PRINT 210 - 210 FORMAT (/3X,'Return from subroutine COBYLA because ', - 1 'rounding errors are becoming damaging.') - GOTO 600 - END IF -C -C Calculate the coefficients of the linear approximations to the objective -C and constraint functions, placing minus the objective function gradient -C after the constraint gradients in the array A. The vector W is used for -C working space. -C - DO 240 K=1,MP - CON(K)=-DATMAT(K,NP) - DO 220 J=1,N - 220 W(J)=DATMAT(K,J)+CON(K) - DO 240 I=1,N - TEMP=0.0d0 - DO 230 J=1,N - 230 TEMP=TEMP+W(J)*SIMI(J,I) - IF (K .EQ. MP) TEMP=-TEMP - 240 A(I,K)=TEMP -C -C Calculate the values of sigma and eta, and set IFLAG=0 if the current -C simplex is not acceptable. -C - IFLAG=1 - PARSIG=ALPHA*RHO - PARETA=BETA*RHO - DO 260 J=1,N - WSIG=0.0d0 - WETA=0.0d0 - DO 250 I=1,N - WSIG=WSIG+SIMI(J,I)**2 - 250 WETA=WETA+SIM(I,J)**2 - VSIG(J)=1.0d0/SQRT(WSIG) - VETA(J)=SQRT(WETA) - IF (VSIG(J) .LT. PARSIG .OR. VETA(J) .GT. PARETA) IFLAG=0 - 260 CONTINUE - IF (IPRINT .EQ. 3) THEN - print *, ' SIMI = ', ((SIMI(I,J),I=1,N),J=1,N) - print *, ' SIM = ', ((SIM(I,J),I=1,N),J=1,N) - PRINT *, ' VSIG = ', (VSIG(J),J=1,N), ' -- ', PARSIG - PRINT *, ' VETA = ', (VETA(J),J=1,N), ' -- ', PARETA - PRINT *, ' IBRNCH, IFLAG = ', IBRNCH, IFLAG - PRINT *, ' A = ', ((A(I,J),I=1,N),J=1,MP) - END IF -C -C If a new vertex is needed to improve acceptability, then decide which -C vertex to drop from the simplex. -C - IF (IBRNCH .EQ. 1 .OR. IFLAG .EQ. 1) GOTO 370 - JDROP=0 - TEMP=PARETA - DO 270 J=1,N - IF (VETA(J) .GT. TEMP) THEN - JDROP=J - TEMP=VETA(J) - END IF - 270 CONTINUE - IF (JDROP .EQ. 0) THEN - DO 280 J=1,N - IF (VSIG(J) .LT. TEMP) THEN - JDROP=J - TEMP=VSIG(J) - END IF - 280 CONTINUE - END IF -C -C Calculate the step to the new vertex and its sign. -C - TEMP=GAMMA*RHO*VSIG(JDROP) - IF (IPRINT .EQ. 3) THEN - PRINT *, ' SIMI =', (SIMI(JDROP,I),I=1,N) - END IF - DO 290 I=1,N - 290 DX(I)=TEMP*SIMI(JDROP,I) - IF (IPRINT .EQ. 3) THEN - PRINT *, ' DX =', (DX(I),I=1,N) - END IF - CVMAXP=0.0d0 - CVMAXM=0.0d0 - DO 310 K=1,MP - SUM=0.0d0 - DO 300 I=1,N - 300 SUM=SUM+A(I,K)*DX(I) - IF (K .LT. MP) THEN - TEMP=DATMAT(K,NP) - CVMAXP=DMAX1(CVMAXP,-SUM-TEMP) - CVMAXM=DMAX1(CVMAXM,SUM-TEMP) - END IF - 310 CONTINUE - DXSIGN=1.0d0 - IF (PARMU*(CVMAXP-CVMAXM) .GT. SUM+SUM) DXSIGN=-1.0d0 -C -C Update the elements of SIM and SIMI, and set the next X. -C - TEMP=0.0d0 - DO 320 I=1,N - DX(I)=DXSIGN*DX(I) - SIM(I,JDROP)=DX(I) - 320 TEMP=TEMP+SIMI(JDROP,I)*DX(I) - DO 330 I=1,N - 330 SIMI(JDROP,I)=SIMI(JDROP,I)/TEMP - DO 360 J=1,N - IF (J .NE. JDROP) THEN - TEMP=0.0d0 - DO 340 I=1,N - 340 TEMP=TEMP+SIMI(J,I)*DX(I) - DO 350 I=1,N - 350 SIMI(J,I)=SIMI(J,I)-TEMP*SIMI(JDROP,I) - END IF - 360 X(J)=SIM(J,NP)+DX(J) - GOTO 40 -C -C Calculate DX=x(*)-x(0). Branch if the length of DX is less than 0.5*RHO. -C - 370 IZ=1 - IZDOTA=IZ+N*N - IVMC=IZDOTA+N - ISDIRN=IVMC+MP - IDXNEW=ISDIRN+N - IVMD=IDXNEW+N - CALL TRSTLP (N,M,A,CON,RHO,DX,IFULL,IACT,W(IZ),W(IZDOTA), - 1 W(IVMC),W(ISDIRN),W(IDXNEW),W(IVMD),IPRINT) - IF (IFULL .EQ. 0) THEN - TEMP=0.0d0 - DO 380 I=1,N - 380 TEMP=TEMP+DX(I)**2 - IF (TEMP .LT. 0.25d0*RHO*RHO) THEN - IBRNCH=1 - GOTO 550 - END IF - END IF -C -C Predict the change to F and the new maximum constraint violation if the -C variables are altered from x(0) to x(0)+DX. -C - RESNEW=0.0d0 - CON(MP)=0.0d0 - DO 400 K=1,MP - SUM=CON(K) - DO 390 I=1,N - 390 SUM=SUM-A(I,K)*DX(I) - IF (K .LT. MP) RESNEW=DMAX1(RESNEW,SUM) - 400 CONTINUE -C -C Increase PARMU if necessary and branch back if this change alters the -C optimal vertex. Otherwise PREREM and PREREC will be set to the predicted -C reductions in the merit function and the maximum constraint violation -C respectively. -C - BARMU=0.0d0 - PREREC=DATMAT(MPP,NP)-RESNEW - IF (PREREC .GT. 0.0d0) BARMU=SUM/PREREC - IF (PARMU .LT. 1.5d0*BARMU) THEN - PARMU=2.0d0*BARMU - IF (IPRINT .GE. 2) PRINT 410, PARMU - 410 FORMAT (/3X,'Increase in PARMU to',1PE13.6) - PHI=DATMAT(MP,NP)+PARMU*DATMAT(MPP,NP) - DO 420 J=1,N - TEMP=DATMAT(MP,J)+PARMU*DATMAT(MPP,J) - IF (TEMP .LT. PHI) GOTO 140 - IF (TEMP .EQ. PHI .AND. PARMU .EQ. 0.0) THEN - IF (DATMAT(MPP,J) .LT. DATMAT(MPP,NP)) GOTO 140 - END IF - 420 CONTINUE - END IF - PREREM=PARMU*PREREC-SUM -C -C Calculate the constraint and objective functions at x(*). Then find the -C actual reduction in the merit function. -C - DO 430 I=1,N - 430 X(I)=SIM(I,NP)+DX(I) - IBRNCH=1 - GOTO 40 - 440 VMOLD=DATMAT(MP,NP)+PARMU*DATMAT(MPP,NP) - VMNEW=F+PARMU*RESMAX - TRURED=VMOLD-VMNEW - IF (PARMU .EQ. 0.0d0 .AND. F .EQ. DATMAT(MP,NP)) THEN - PREREM=PREREC - TRURED=DATMAT(MPP,NP)-RESMAX - END IF -C -C Begin the operations that decide whether x(*) should replace one of the -C vertices of the current simplex, the change being mandatory if TRURED is -C positive. Firstly, JDROP is set to the index of the vertex that is to be -C replaced. -C - RATIO=0.0d0 - IF (TRURED .LE. 0.0) RATIO=1.0 - JDROP=0 - DO 460 J=1,N - TEMP=0.0d0 - DO 450 I=1,N - 450 TEMP=TEMP+SIMI(J,I)*DX(I) - TEMP=ABS(TEMP) - IF (TEMP .GT. RATIO) THEN - JDROP=J - RATIO=TEMP - END IF - 460 SIGBAR(J)=TEMP*VSIG(J) -C -C Calculate the value of ell. -C - EDGMAX=DELTA*RHO - L=0 - DO 480 J=1,N - IF (SIGBAR(J) .GE. PARSIG .OR. SIGBAR(J) .GE. VSIG(J)) THEN - TEMP=VETA(J) - IF (TRURED .GT. 0.0d0) THEN - TEMP=0.0d0 - DO 470 I=1,N - 470 TEMP=TEMP+(DX(I)-SIM(I,J))**2 - TEMP=SQRT(TEMP) - END IF - IF (TEMP .GT. EDGMAX) THEN - L=J - EDGMAX=TEMP - END IF - END IF - 480 CONTINUE - IF (L .GT. 0) JDROP=L - IF (JDROP .EQ. 0) GOTO 550 -C -C Revise the simplex by updating the elements of SIM, SIMI and DATMAT. -C - TEMP=0.0d0 - DO 490 I=1,N - SIM(I,JDROP)=DX(I) - 490 TEMP=TEMP+SIMI(JDROP,I)*DX(I) - DO 500 I=1,N - 500 SIMI(JDROP,I)=SIMI(JDROP,I)/TEMP - DO 530 J=1,N - IF (J .NE. JDROP) THEN - TEMP=0.0d0 - DO 510 I=1,N - 510 TEMP=TEMP+SIMI(J,I)*DX(I) - DO 520 I=1,N - 520 SIMI(J,I)=SIMI(J,I)-TEMP*SIMI(JDROP,I) - END IF - 530 CONTINUE - DO 540 K=1,MPP - 540 DATMAT(K,JDROP)=CON(K) -C -C Branch back for further iterations with the current RHO. -C - IF (TRURED .GT. 0.0d0 .AND. TRURED .GE. 0.1d0*PREREM) GOTO 140 - 550 IF (IFLAG .EQ. 0) THEN - IBRNCH=0 - GOTO 140 - END IF -C -C Otherwise reduce RHO if it is not at its least value and reset PARMU. -C - IF (RHO .GT. RHOEND) THEN - RHO=0.5d0*RHO - IF (RHO .LE. 1.5d0*RHOEND) RHO=RHOEND - IF (PARMU .GT. 0.0d0) THEN - DENOM=0.0d0 - DO 570 K=1,MP - CMIN=DATMAT(K,NP) - CMAX=CMIN - DO 560 I=1,N - CMIN=DMIN1(CMIN,DATMAT(K,I)) - 560 CMAX=DMAX1(CMAX,DATMAT(K,I)) - IF (K .LE. M .AND. CMIN .LT. 0.5d0*CMAX) THEN - TEMP=DMAX1(CMAX,0.0d0)-CMIN - IF (DENOM .LE. 0.0d0) THEN - DENOM=TEMP - ELSE - DENOM=DMIN1(DENOM,TEMP) - END IF - END IF - 570 CONTINUE - IF (DENOM .EQ. 0.0d0) THEN - PARMU=0.0d0 - ELSE IF (CMAX-CMIN .LT. PARMU*DENOM) THEN - PARMU=(CMAX-CMIN)/DENOM - END IF - END IF - IF (IPRINT .GE. 2) PRINT 580, RHO,PARMU - 580 FORMAT (/3X,'Reduction in RHO to',1PE13.6,' and PARMU =', - 1 1PE13.6) - IF (IPRINT .EQ. 2) THEN - PRINT 70, NFVALS,DATMAT(MP,NP),DATMAT(MPP,NP), - 1 (SIM(I,NP),I=1,IPTEM) - IF (IPTEM .LT. N) PRINT 80, (X(I),I=IPTEMP,N) - END IF - GOTO 140 - END IF -C -C Return the best calculated values of the variables. -C - IF (IPRINT .GE. 1) PRINT 590 - 590 FORMAT (/3X,'Normal return from subroutine COBYLA') - IF (IFULL .EQ. 1) GOTO 620 - 600 DO 610 I=1,N - 610 X(I)=SIM(I,NP) - F=DATMAT(MP,NP) - RESMAX=DATMAT(MPP,NP) - 620 IF (IPRINT .GE. 1) THEN - PRINT 70, NFVALS,F,RESMAX,(X(I),I=1,IPTEM) - IF (IPTEM .LT. N) PRINT 80, (X(I),I=IPTEMP,N) - END IF - MAXFUN=NFVALS - RETURN - END diff --git a/scipy-0.10.1/scipy/optimize/cobyla/trstlp.f b/scipy-0.10.1/scipy/optimize/cobyla/trstlp.f deleted file mode 100644 index 1e5a5a0d6c..0000000000 --- a/scipy-0.10.1/scipy/optimize/cobyla/trstlp.f +++ /dev/null @@ -1,518 +0,0 @@ -C------------------------------------------------------------------------------ - SUBROUTINE TRSTLP (N,M,A,B,RHO,DX,IFULL,IACT,Z,ZDOTA,VMULTC, - 1 SDIRN,DXNEW,VMULTD,IPRINT) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DOUBLE PRECISION TEMP - DIMENSION A(N,*),B(*),DX(*),IACT(*),Z(N,*),ZDOTA(*), - 1 VMULTC(*),SDIRN(*),DXNEW(*),VMULTD(*) -C -C This subroutine calculates an N-component vector DX by applying the -C following two stages. In the first stage, DX is set to the shortest -C vector that minimizes the greatest violation of the constraints -C A(1,K)*DX(1)+A(2,K)*DX(2)+...+A(N,K)*DX(N) .GE. B(K), K=2,3,...,M, -C subject to the Euclidean length of DX being at most RHO. If its length is -C strictly less than RHO, then we use the resultant freedom in DX to -C minimize the objective function -C -A(1,M+1)*DX(1)-A(2,M+1)*DX(2)-...-A(N,M+1)*DX(N) -C subject to no increase in any greatest constraint violation. This -C notation allows the gradient of the objective function to be regarded as -C the gradient of a constraint. Therefore the two stages are distinguished -C by MCON .EQ. M and MCON .GT. M respectively. It is possible that a -C degeneracy may prevent DX from attaining the target length RHO. Then the -C value IFULL=0 would be set, but usually IFULL=1 on return. -C -C In general NACT is the number of constraints in the active set and -C IACT(1),...,IACT(NACT) are their indices, while the remainder of IACT -C contains a permutation of the remaining constraint indices. Further, Z is -C an orthogonal matrix whose first NACT columns can be regarded as the -C result of Gram-Schmidt applied to the active constraint gradients. For -C J=1,2,...,NACT, the number ZDOTA(J) is the scalar product of the J-th -C column of Z with the gradient of the J-th active constraint. DX is the -C current vector of variables and here the residuals of the active -C constraints should be zero. Further, the active constraints have -C nonnegative Lagrange multipliers that are held at the beginning of -C VMULTC. The remainder of this vector holds the residuals of the inactive -C constraints at DX, the ordering of the components of VMULTC being in -C agreement with the permutation of the indices of the constraints that is -C in IACT. All these residuals are nonnegative, which is achieved by the -C shift RESMAX that makes the least residual zero. -C -C Initialize Z and some other variables. The value of RESMAX will be -C appropriate to DX=0, while ICON will be the index of a most violated -C constraint if RESMAX is positive. Usually during the first stage the -C vector SDIRN gives a search direction that reduces all the active -C constraint violations by one simultaneously. -C - - IF (IPRINT .EQ. 3) THEN - print *, ' ' - print *, 'BEFORE trstlp:' - PRINT *, ' **DX = ', (DX(I),I=1,N) - PRINT *, ' **IACT = ', (IACT(I),I=1,M+1) - PRINT *, 'M,N,RHO,IFULL =', M, N, RHO, IFULL - PRINT *, ' **A = ', ((A(I,K),I=1,N),K=1,M+1) - PRINT *, ' **B = ', (B(I),I=1,M) - PRINT *, ' **Z = ', ((Z(I,K),I=1,N),K=1,N) - PRINT *, ' **ZDOTA = ', (ZDOTA(I),I=1,N) - PRINT *, ' **VMULTC = ', (VMULTC(I),I=1,M+1) - PRINT *, ' **SDIRN = ', (SDIRN(I),I=1,N) - PRINT *, ' **DXNEW = ', (DXNEW(I),I=1,N) - PRINT *, ' **VMULTD = ', (VMULTD(I),I=1,M+1) - PRINT *, ' ' - END IF - - ICON=0 - NACTX=0 - RESOLD=0 - - IFULL=1 - MCON=M - NACT=0 - RESMAX=0.0d0 - DO 20 I=1,N - DO 10 J=1,N - 10 Z(I,J)=0.0d0 - Z(I,I)=1.0d0 - 20 DX(I)=0.0d0 - IF (M .GE. 1) THEN - DO 30 K=1,M - IF (B(K) .GT. RESMAX) THEN - RESMAX=B(K) - ICON=K - END IF - 30 CONTINUE - DO 40 K=1,M - IACT(K)=K - 40 VMULTC(K)=RESMAX-B(K) - END IF - IF (IPRINT .EQ. 3) THEN - PRINT *, ' 1. VMULTC = ', (VMULTC(I),I=1,M+1) - END IF - IF (RESMAX .EQ. 0.0d0) GOTO 480 - DO 50 I=1,N - 50 SDIRN(I)=0.0d0 -C -C End the current stage of the calculation if 3 consecutive iterations -C have either failed to reduce the best calculated value of the objective -C function or to increase the number of active constraints since the best -C value was calculated. This strategy prevents cycling, but there is a -C remote possibility that it will cause premature termination. -C - 60 OPTOLD=0.0d0 - ICOUNT=0 - 70 IF (MCON .EQ. M) THEN - OPTNEW=RESMAX - ELSE - OPTNEW=0.0d0 - DO 80 I=1,N - 80 OPTNEW=OPTNEW-DX(I)*A(I,MCON) - END IF - IF (IPRINT .EQ. 3) THEN - PRINT *, ' ICOUNT, OPTNEW, OPTOLD = ', ICOUNT, OPTNEW, OPTOLD - END IF - IF (ICOUNT .EQ. 0 .OR. OPTNEW .LT. OPTOLD) THEN - OPTOLD=OPTNEW - NACTX=NACT - ICOUNT=3 - ELSE IF (NACT .GT. NACTX) THEN - NACTX=NACT - ICOUNT=3 - ELSE - ICOUNT=ICOUNT-1 - IF (ICOUNT .EQ. 0) GOTO 490 - END IF -C -C If ICON exceeds NACT, then we add the constraint with index IACT(ICON) to -C the active set. Apply Givens rotations so that the last N-NACT-1 columns -C of Z are orthogonal to the gradient of the new constraint, a scalar -C product being set to zero if its nonzero value could be due to computer -C rounding errors. The array DXNEW is used for working space. -C - IF (ICON .LE. NACT) GOTO 260 - KK=IACT(ICON) - DO 90 I=1,N - 90 DXNEW(I)=A(I,KK) - TOT=0.0D0 - K=N - 100 IF (K .GT. NACT) THEN - SP=0.0d0 - SPABS=0.0d0 - DO 110 I=1,N - TEMP=Z(I,K)*DXNEW(I) - SP=SP+TEMP - 110 SPABS=SPABS+DABS(TEMP) - ACCA=SPABS+0.1d0*DABS(SP) - ACCB=SPABS+0.2d0*DABS(SP) - IF ((SPABS .GE. ACCA) .OR. (ACCA .GE. ACCB)) SP=0.0D0 - IF (TOT .EQ. 0.0D0) THEN - TOT=SP - ELSE - KP=K+1 - TEMP=DSQRT(SP*SP+TOT*TOT) - ALPHA=SP/TEMP - BETA=TOT/TEMP - TOT=TEMP - DO 120 I=1,N - TEMP=ALPHA*Z(I,K)+BETA*Z(I,KP) - Z(I,KP)=ALPHA*Z(I,KP)-BETA*Z(I,K) - 120 Z(I,K)=TEMP - END IF - K=K-1 - GOTO 100 - END IF -C -C Add the new constraint if this can be done without a deletion from the -C active set. -C - IF (IPRINT .EQ. 3) THEN - PRINT *, '*TOT, NACT, ICON = ', TOT, NACT, ICON - END IF - IF (TOT .NE. 0.0d0) THEN - NACT=NACT+1 - ZDOTA(NACT)=TOT - VMULTC(ICON)=VMULTC(NACT) - VMULTC(NACT)=0.0d0 - GOTO 210 - END IF -C -C The next instruction is reached if a deletion has to be made from the -C active set in order to make room for the new active constraint, because -C the new constraint gradient is a linear combination of the gradients of -C the old active constraints. Set the elements of VMULTD to the multipliers -C of the linear combination. Further, set IOUT to the index of the -C constraint to be deleted, but branch if no suitable index can be found. -C - RATIO=-1.0d0 - K=NACT - 130 ZDOTV=0.0d0 - ZDVABS=0.0d0 - DO 140 I=1,N - TEMP=Z(I,K)*DXNEW(I) - ZDOTV=ZDOTV+TEMP - 140 ZDVABS=ZDVABS+DABS(TEMP) - ACCA=ZDVABS+0.1d0*DABS(ZDOTV) - ACCB=ZDVABS+0.2d0*DABS(ZDOTV) - IF (ZDVABS .LT. ACCA .AND. ACCA .LT. ACCB) THEN - TEMP=ZDOTV/ZDOTA(K) - IF (TEMP .GT. 0.0d0 .AND. IACT(K) .LE. M) THEN - TEMPA=VMULTC(K)/TEMP - IF (RATIO .LT. 0.0d0 .OR. TEMPA .LT. RATIO) THEN - RATIO=TEMPA - IOUT=K - END IF - END IF - IF (K .GE. 2) THEN - KW=IACT(K) - DO 150 I=1,N - 150 DXNEW(I)=DXNEW(I)-TEMP*A(I,KW) - END IF - VMULTD(K)=TEMP - ELSE - VMULTD(K)=0.0d0 - END IF - K=K-1 - IF (K .GT. 0) GOTO 130 - IF (IPRINT .EQ. 3) THEN - PRINT *, ' 1. VMULTD = ', (VMULTD(I),I=1,M+1) - END IF - IF (RATIO .LT. 0.0d0) GOTO 490 -C -C Revise the Lagrange multipliers and reorder the active constraints so -C that the one to be replaced is at the end of the list. Also calculate the -C new value of ZDOTA(NACT) and branch if it is not acceptable. -C - DO 160 K=1,NACT - 160 VMULTC(K)=DMAX1(0.0d0,VMULTC(K)-RATIO*VMULTD(K)) - IF (IPRINT .EQ. 3) THEN - PRINT *, ' 2. VMULTC = ', (VMULTC(I),I=1,M+1) - END IF - IF (ICON .LT. NACT) THEN - ISAVE=IACT(ICON) - VSAVE=VMULTC(ICON) - K=ICON - 170 KP=K+1 - KW=IACT(KP) - SP=0.0d0 - DO 180 I=1,N - 180 SP=SP+Z(I,K)*A(I,KW) - TEMP=SQRT(SP*SP+ZDOTA(KP)**2) - ALPHA=ZDOTA(KP)/TEMP - BETA=SP/TEMP - ZDOTA(KP)=ALPHA*ZDOTA(K) - ZDOTA(K)=TEMP - DO 190 I=1,N - TEMP=ALPHA*Z(I,KP)+BETA*Z(I,K) - Z(I,KP)=ALPHA*Z(I,K)-BETA*Z(I,KP) - 190 Z(I,K)=TEMP - IACT(K)=KW - VMULTC(K)=VMULTC(KP) - K=KP - IF (K .LT. NACT) GOTO 170 - IACT(K)=ISAVE - VMULTC(K)=VSAVE - END IF - TEMP=0.0d0 - DO 200 I=1,N - 200 TEMP=TEMP+Z(I,NACT)*A(I,KK) - IF (TEMP .EQ. 0.0d0) GOTO 490 - ZDOTA(NACT)=TEMP - VMULTC(ICON)=0.0d0 - VMULTC(NACT)=RATIO -C -C Update IACT and ensure that the objective function continues to be -C treated as the last active constraint when MCON>M. -C - 210 IACT(ICON)=IACT(NACT) - IACT(NACT)=KK - IF (MCON .GT. M .AND. KK .NE. MCON) THEN - K=NACT-1 - SP=0.0d0 - DO 220 I=1,N - 220 SP=SP+Z(I,K)*A(I,KK) - TEMP=SQRT(SP*SP+ZDOTA(NACT)**2) - ALPHA=ZDOTA(NACT)/TEMP - BETA=SP/TEMP - ZDOTA(NACT)=ALPHA*ZDOTA(K) - ZDOTA(K)=TEMP - DO 230 I=1,N - TEMP=ALPHA*Z(I,NACT)+BETA*Z(I,K) - Z(I,NACT)=ALPHA*Z(I,K)-BETA*Z(I,NACT) - 230 Z(I,K)=TEMP - IACT(NACT)=IACT(K) - IACT(K)=KK - TEMP=VMULTC(K) - VMULTC(K)=VMULTC(NACT) - VMULTC(NACT)=TEMP - END IF -C -C If stage one is in progress, then set SDIRN to the direction of the next -C change to the current vector of variables. -C - IF (MCON .GT. M) GOTO 320 - KK=IACT(NACT) - TEMP=0.0d0 - DO 240 I=1,N - 240 TEMP=TEMP+SDIRN(I)*A(I,KK) - TEMP=TEMP-1.0d0 - TEMP=TEMP/ZDOTA(NACT) - DO 250 I=1,N - 250 SDIRN(I)=SDIRN(I)-TEMP*Z(I,NACT) - GOTO 340 -C -C Delete the constraint that has the index IACT(ICON) from the active set. -C - 260 IF (ICON .LT. NACT) THEN - ISAVE=IACT(ICON) - VSAVE=VMULTC(ICON) - K=ICON - 270 KP=K+1 - KK=IACT(KP) - SP=0.0d0 - DO 280 I=1,N - 280 SP=SP+Z(I,K)*A(I,KK) - TEMP=SQRT(SP*SP+ZDOTA(KP)**2) - ALPHA=ZDOTA(KP)/TEMP - BETA=SP/TEMP - ZDOTA(KP)=ALPHA*ZDOTA(K) - ZDOTA(K)=TEMP - DO 290 I=1,N - TEMP=ALPHA*Z(I,KP)+BETA*Z(I,K) - Z(I,KP)=ALPHA*Z(I,K)-BETA*Z(I,KP) - 290 Z(I,K)=TEMP - IACT(K)=KK - VMULTC(K)=VMULTC(KP) - K=KP - IF (K .LT. NACT) GOTO 270 - IACT(K)=ISAVE - VMULTC(K)=VSAVE - END IF - NACT=NACT-1 -C -C If stage one is in progress, then set SDIRN to the direction of the next -C change to the current vector of variables. -C - IF (MCON .GT. M) GOTO 320 - TEMP=0.0d0 - DO 300 I=1,N - 300 TEMP=TEMP+SDIRN(I)*Z(I,NACT+1) - DO 310 I=1,N - 310 SDIRN(I)=SDIRN(I)-TEMP*Z(I,NACT+1) - GO TO 340 -C -C Pick the next search direction of stage two. -C - 320 TEMP=1.0d0/ZDOTA(NACT) - DO 330 I=1,N - 330 SDIRN(I)=TEMP*Z(I,NACT) -C -C Calculate the step to the boundary of the trust region or take the step -C that reduces RESMAX to zero. The two statements below that include the -C factor 1.0E-6 prevent some harmless underflows that occurred in a test -C calculation. Further, we skip the step if it could be zero within a -C reasonable tolerance for computer rounding errors. -C - 340 DD=RHO*RHO - SD=0.0d0 - SS=0.0d0 - DO 350 I=1,N - IF (ABS(DX(I)) .GE. 1.0E-6*RHO) DD=DD-DX(I)**2 - SD=SD+DX(I)*SDIRN(I) - 350 SS=SS+SDIRN(I)**2 - IF (DD .LE. 0.0d0) GOTO 490 - TEMP=SQRT(SS*DD) - IF (ABS(SD) .GE. 1.0E-6*TEMP) TEMP=SQRT(SS*DD+SD*SD) - STPFUL=DD/(TEMP+SD) - STEP=STPFUL - IF (MCON .EQ. M) THEN - ACCA=STEP+0.1d0*RESMAX - ACCB=STEP+0.2d0*RESMAX - IF (STEP .GE. ACCA .OR. ACCA .GE. ACCB) GOTO 480 - STEP=DMIN1(STEP,RESMAX) - END IF -C -C Set DXNEW to the new variables if STEP is the steplength, and reduce -C RESMAX to the corresponding maximum residual if stage one is being done. -C Because DXNEW will be changed during the calculation of some Lagrange -C multipliers, it will be restored to the following value later. - call s360_380(DXNEW,DX,STEP,SDIRN,N,M,MCON,RESMAX, - 1 NACT,IACT,B,A,RESOLD) - -C -C Set VMULTD to the VMULTC vector that would occur if DX became DXNEW. A -C device is included to force VMULTD(K)=0.0 if deviations from this value -C can be attributed to computer rounding errors. First calculate the new -C Lagrange multipliers. -C - K=NACT - 390 ZDOTW=0.0d0 - ZDWABS=0.0d0 - DO 400 I=1,N - TEMP=Z(I,K)*DXNEW(I) - ZDOTW=ZDOTW+TEMP - 400 ZDWABS=ZDWABS+ABS(TEMP) - ACCA=ZDWABS+0.1d0*ABS(ZDOTW) - ACCB=ZDWABS+0.2d0*ABS(ZDOTW) - IF (ZDWABS .GE. ACCA .OR. ACCA .GE. ACCB) ZDOTW=0.0d0 - VMULTD(K)=ZDOTW/ZDOTA(K) - IF (K .GE. 2) THEN - KK=IACT(K) - DO 410 I=1,N - 410 DXNEW(I)=DXNEW(I)-VMULTD(K)*A(I,KK) - K=K-1 - GOTO 390 - END IF - IF (MCON .GT. M) VMULTD(NACT)=DMAX1(0.0d0,VMULTD(NACT)) - IF (IPRINT .EQ. 3) THEN - PRINT *, ' 2. VMULTD = ', (VMULTD(I),I=1,M+1) - END IF -C -C Complete VMULTC by finding the new constraint residuals. -C - DO 420 I=1,N - 420 DXNEW(I)=DX(I)+STEP*SDIRN(I) - IF (MCON .GT. NACT) THEN - KL=NACT+1 - DO 440 K=KL,MCON - KK=IACT(K) - SUM=RESMAX-B(KK) - SUMABS=RESMAX+DABS(B(KK)) - DO 430 I=1,N - TEMP=A(I,KK)*DXNEW(I) - SUM=SUM+TEMP - 430 SUMABS=SUMABS+DABS(TEMP) - ACCA=SUMABS+0.1*DABS(SUM) - ACCB=SUMABS+0.2*DABS(SUM) - IF (SUMABS .GE. ACCA .OR. ACCA .GE. ACCB) SUM=0.0 - 440 VMULTD(K)=SUM - END IF - IF (IPRINT .EQ. 3) THEN - PRINT *, ' 3. VMULTD = ', (VMULTD(I),I=1,M+1) - END IF -C -C Calculate the fraction of the step from DX to DXNEW that will be taken. -C - RATIO=1.0d0 - ICON=0 -C - EPS = 2.2E-16 - DO 450 K=1,MCON - IF (VMULTD(K) .GT. -EPS .AND. VMULTD(K) .LT. EPS) VMULTD(K)=0.0D0 - IF (VMULTD(K) .LT. 0.0D0) THEN - TEMP=VMULTC(K)/(VMULTC(K)-VMULTD(K)) - IF (TEMP .LT. RATIO) THEN - RATIO=TEMP - ICON=K - END IF - END IF - 450 CONTINUE -C -C Update DX, VMULTC and RESMAX. -C - TEMP=1.0d0-RATIO - DO 460 I=1,N - 460 DX(I)=TEMP*DX(I)+RATIO*DXNEW(I) - DO 470 K=1,MCON - 470 VMULTC(K)=DMAX1(0.0d0,TEMP*VMULTC(K)+RATIO*VMULTD(K)) - IF (IPRINT .EQ. 3) THEN - PRINT *, ' 3. VMULTC = ', (VMULTC(I),I=1,M+1) - END IF - IF (MCON .EQ. M) RESMAX=RESOLD+RATIO*(RESMAX-RESOLD) - IF (IPRINT .EQ. 3) THEN - PRINT *, ' RESMAX, MCON, M, ICON = ', - 1 RESMAX, MCON, M, ICON - END IF -C -C If the full step is not acceptable then begin another iteration. -C Otherwise switch to stage two or end the calculation. -C - IF (ICON .GT. 0) GOTO 70 - IF (STEP .EQ. STPFUL) GOTO 500 - 480 MCON=M+1 - ICON=MCON - IACT(MCON)=MCON - VMULTC(MCON)=0.0d0 - GOTO 60 -C -C We employ any freedom that may be available to reduce the objective -C function before returning a DX whose length is less than RHO. -C - 490 IF (MCON .EQ. M) GOTO 480 - IFULL=0 - 500 CONTINUE - IF (IPRINT .EQ. 3) THEN - print *, ' ' - print *, 'AFTER trstlp:' - PRINT *, ' **DX = ', (DX(I),I=1,N) - PRINT *, ' **IACT = ', (IACT(I),I=1,M+1) - PRINT *, 'M,N,RHO,IFULL =', M, N, RHO, IFULL - PRINT *, ' **A = ', ((A(I,K),I=1,N),K=1,M+1) - PRINT *, ' **B = ', (B(I),I=1,M) - PRINT *, ' **Z = ', ((Z(I,K),I=1,N),K=1,N) - PRINT *, ' **ZDOTA = ', (ZDOTA(I),I=1,N) - PRINT *, ' **VMULTC = ', (VMULTC(I),I=1,M+1) - PRINT *, ' **SDIRN = ', (SDIRN(I),I=1,N) - PRINT *, ' **DXNEW = ', (DXNEW(I),I=1,N) - PRINT *, ' **VMULTD = ', (VMULTD(I),I=1,M+1) - PRINT *, ' ' - END IF -C 500 RETURN - END - - subroutine s360_380(DXNEW,DX,STEP,SDIRN,N,M,MCON,RESMAX, - 1 NACT,IACT,B,A,RESOLD) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(N,*),B(*),DX(*),IACT(*), SDIRN(*),DXNEW(*) - DO 360 I=1,N - 360 DXNEW(I)=DX(I)+STEP*SDIRN(I) - IF (MCON .EQ. M) THEN - RESOLD=RESMAX - RESMAX=0.0d0 - DO 380 K=1,NACT - KK=IACT(K) - TEMP=B(KK) - DO 370 I=1,N - 370 TEMP=TEMP-A(I,KK)*DXNEW(I) - RESMAX=DMAX1(RESMAX,TEMP) - 380 CONTINUE - END IF - end diff --git a/scipy-0.10.1/scipy/optimize/lbfgsb.py b/scipy-0.10.1/scipy/optimize/lbfgsb.py deleted file mode 100755 index 0ebc8ce7da..0000000000 --- a/scipy-0.10.1/scipy/optimize/lbfgsb.py +++ /dev/null @@ -1,265 +0,0 @@ - -## License for the Python wrapper -## ============================== - -## Copyright (c) 2004 David M. Cooke - -## Permission is hereby granted, free of charge, to any person obtaining a copy of -## this software and associated documentation files (the "Software"), to deal in -## the Software without restriction, including without limitation the rights to -## use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -## of the Software, and to permit persons to whom the Software is furnished to do -## so, subject to the following conditions: - -## The above copyright notice and this permission notice shall be included in all -## copies or substantial portions of the Software. - -## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -## SOFTWARE. - -## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy - -from numpy import array, asarray, float64, int32, zeros -import _lbfgsb -from optimize import approx_fprime -from numpy.compat import asbytes - -__all__ = ['fmin_l_bfgs_b'] - - -def fmin_l_bfgs_b(func, x0, fprime=None, args=(), - approx_grad=0, - bounds=None, m=10, factr=1e7, pgtol=1e-5, - epsilon=1e-8, - iprint=-1, maxfun=15000, disp=None): - """ - Minimize a function func using the L-BFGS-B algorithm. - - Parameters - ---------- - func : callable f(x,*args) - Function to minimise. - x0 : ndarray - Initial guess. - fprime : callable fprime(x,*args) - The gradient of `func`. If None, then `func` returns the function - value and the gradient (``f, g = func(x, *args)``), unless - `approx_grad` is True in which case `func` returns only ``f``. - args : sequence - Arguments to pass to `func` and `fprime`. - approx_grad : bool - Whether to approximate the gradient numerically (in which case - `func` returns only the function value). - bounds : list - ``(min, max)`` pairs for each element in ``x``, defining - the bounds on that parameter. Use None for one of ``min`` or - ``max`` when there is no bound in that direction. - m : int - The maximum number of variable metric corrections - used to define the limited memory matrix. (The limited memory BFGS - method does not store the full hessian but uses this many terms in an - approximation to it.) - factr : float - The iteration stops when - ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, - where ``eps`` is the machine precision, which is automatically - generated by the code. Typical values for `factr` are: 1e12 for - low accuracy; 1e7 for moderate accuracy; 10.0 for extremely - high accuracy. - pgtol : float - The iteration will stop when - ``max{|proj g_i | i = 1, ..., n} <= pgtol`` - where ``pg_i`` is the i-th component of the projected gradient. - epsilon : float - Step size used when `approx_grad` is True, for numerically - calculating the gradient - iprint : int - Controls the frequency of output. ``iprint < 0`` means no output. - disp : int, optional - If zero, then no output. If positive number, then this over-rides - `iprint`. - maxfun : int - Maximum number of function evaluations. - - Returns - ------- - x : array_like - Estimated position of the minimum. - f : float - Value of `func` at the minimum. - d : dict - Information dictionary. - - * d['warnflag'] is - - - 0 if converged, - - 1 if too many function evaluations, - - 2 if stopped for another reason, given in d['task'] - - * d['grad'] is the gradient at the minimum (should be 0 ish) - * d['funcalls'] is the number of function calls made. - - Notes - ----- - License of L-BFGS-B (Fortran code): - - The version included here (in fortran code) is 2.1 (released in 1997). - It was written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal - . It carries the following condition for use: - - This software is freely available, but we expect that all publications - describing work using this software , or all commercial products using it, - quote at least one of the references given below. - - References - ---------- - * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound - Constrained Optimization, (1995), SIAM Journal on Scientific and - Statistical Computing , 16, 5, pp. 1190-1208. - * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, - FORTRAN routines for large scale bound constrained optimization (1997), - ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. - - """ - x0 = asarray(x0).ravel() - n, = x0.shape - - if bounds is None: - bounds = [(None,None)] * n - if len(bounds) != n: - raise ValueError('length of x0 != length of bounds') - - if disp is not None: - if disp == 0: - iprint = -1 - else: - iprint = disp - - if approx_grad: - def func_and_grad(x): - f = func(x, *args) - g = approx_fprime(x, func, epsilon, *args) - return f, g - elif fprime is None: - def func_and_grad(x): - f, g = func(x, *args) - return f, g - else: - def func_and_grad(x): - f = func(x, *args) - g = fprime(x, *args) - return f, g - - nbd = zeros(n, int32) - low_bnd = zeros(n, float64) - upper_bnd = zeros(n, float64) - bounds_map = {(None, None): 0, - (1, None) : 1, - (1, 1) : 2, - (None, 1) : 3} - for i in range(0, n): - l,u = bounds[i] - if l is not None: - low_bnd[i] = l - l = 1 - if u is not None: - upper_bnd[i] = u - u = 1 - nbd[i] = bounds_map[l, u] - - x = array(x0, float64) - f = array(0.0, float64) - g = zeros((n,), float64) - wa = zeros(2*m*n+4*n + 12*m**2 + 12*m, float64) - iwa = zeros(3*n, int32) - task = zeros(1, 'S60') - csave = zeros(1,'S60') - lsave = zeros(4, int32) - isave = zeros(44, int32) - dsave = zeros(29, float64) - - task[:] = 'START' - - n_function_evals = 0 - while 1: -# x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ - _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, - pgtol, wa, iwa, task, iprint, csave, lsave, - isave, dsave) - task_str = task.tostring() - if task_str.startswith(asbytes('FG')): - # minimization routine wants f and g at the current x - n_function_evals += 1 - # Overwrite f and g: - f, g = func_and_grad(x) - elif task_str.startswith(asbytes('NEW_X')): - # new iteration - if n_function_evals > maxfun: - task[:] = 'STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT' - else: - break - - task_str = task.tostring().strip(asbytes('\x00')).strip() - if task_str.startswith(asbytes('CONV')): - warnflag = 0 - elif n_function_evals > maxfun: - warnflag = 1 - else: - warnflag = 2 - - - d = {'grad' : g, - 'task' : task_str, - 'funcalls' : n_function_evals, - 'warnflag' : warnflag - } - return x, f, d - -if __name__ == '__main__': - def func(x): - f = 0.25*(x[0]-1)**2 - for i in range(1, x.shape[0]): - f += (x[i] - x[i-1]**2)**2 - f *= 4 - return f - def grad(x): - g = zeros(x.shape, float64) - t1 = x[1] - x[0]**2 - g[0] = 2*(x[0]-1) - 16*x[0]*t1 - for i in range(1, g.shape[0]-1): - t2 = t1 - t1 = x[i+1] - x[i]**2 - g[i] = 8*t2 - 16*x[i]*t1 - g[-1] = 8*t1 - return g - - factr = 1e7 - pgtol = 1e-5 - - n=25 - m=10 - - bounds = [(None,None)] * n - for i in range(0, n, 2): - bounds[i] = (1.0, 100) - for i in range(1, n, 2): - bounds[i] = (-100, 100) - - x0 = zeros((n,), float64) - x0[:] = 3 - - x, f, d = fmin_l_bfgs_b(func, x0, fprime=grad, m=m, - factr=factr, pgtol=pgtol) - print x - print f - print d - x, f, d = fmin_l_bfgs_b(func, x0, approx_grad=1, - m=m, factr=factr, pgtol=pgtol) - print x - print f - print d diff --git a/scipy-0.10.1/scipy/optimize/lbfgsb/lbfgsb.pyf b/scipy-0.10.1/scipy/optimize/lbfgsb/lbfgsb.pyf deleted file mode 100644 index 67223d50b4..0000000000 --- a/scipy-0.10.1/scipy/optimize/lbfgsb/lbfgsb.pyf +++ /dev/null @@ -1,25 +0,0 @@ -! -*- f90 -*- -python module _lbfgsb ! in - interface ! in :_lbfgsb - subroutine setulb(n,m,x,l,u,nbd,f,g,factr,pgtol,wa,iwa,task,iprint,csave,lsave,isave,dsave) ! in :lbfsgb:routines.f - integer intent(in),optional,check(len(x)>=n),depend(x) :: n=len(x) - integer intent(in) :: m - double precision dimension(n),intent(inout) :: x - double precision dimension(n),depend(n),intent(in) :: l - double precision dimension(n),depend(n),intent(in) :: u - integer dimension(n),depend(n),intent(in) :: nbd - double precision intent(inout) :: f - double precision dimension(n),depend(n),intent(inout) :: g - double precision intent(in) :: factr - double precision intent(in) :: pgtol - double precision dimension(2*m*n+4*n+12*m*m+12*m),depend(n,m),intent(inout) :: wa - integer dimension(3 * n),depend(n),intent(inout) :: iwa - character*60 intent(inout) :: task - integer intent(in) :: iprint - character*60 intent(inout) :: csave - logical dimension(4),intent(inout) :: lsave - integer dimension(44),intent(inout) :: isave - double precision dimension(29),intent(inout) :: dsave - end subroutine setulb - end interface -end python module _lbfgsb diff --git a/scipy-0.10.1/scipy/optimize/lbfgsb/routines.f b/scipy-0.10.1/scipy/optimize/lbfgsb/routines.f deleted file mode 100644 index 687fb655fc..0000000000 --- a/scipy-0.10.1/scipy/optimize/lbfgsb/routines.f +++ /dev/null @@ -1,4069 +0,0 @@ -c Modified for SciPy by removing dependency on linpack -c - dnrm2, daxpy, dcopy, ddot, dscal are the same in linpack and -c LAPACK -c - wrappers that call LAPACK are used for dtrsl and dpofa -c================ L-BFGS-B (version 2.1) ========================== - - subroutine setulb(n, m, x, l, u, nbd, f, g, factr, pgtol, wa, iwa, - + task, iprint, csave, lsave, isave, dsave) - - character*60 task, csave - logical lsave(4) - integer n, m, iprint, - + nbd(n), iwa(3*n), isave(44) - double precision f, factr, pgtol, x(n), l(n), u(n), g(n), - + wa(2*m*n+4*n+12*m*m+12*m), dsave(29) - -c ************ -c -c Subroutine setulb -c -c This subroutine partitions the working arrays wa and iwa, and -c then uses the limited memory BFGS method to solve the bound -c constrained optimization problem by calling mainlb. -c (The direct method will be used in the subspace minimization.) -c -c n is an integer variable. -c On entry n is the dimension of the problem. -c On exit n is unchanged. -c -c m is an integer variable. -c On entry m is the maximum number of variable metric corrections -c used to define the limited memory matrix. -c On exit m is unchanged. -c -c x is a double precision array of dimension n. -c On entry x is an approximation to the solution. -c On exit x is the current approximation. -c -c l is a double precision array of dimension n. -c On entry l is the lower bound on x. -c On exit l is unchanged. -c -c u is a double precision array of dimension n. -c On entry u is the upper bound on x. -c On exit u is unchanged. -c -c nbd is an integer array of dimension n. -c On entry nbd represents the type of bounds imposed on the -c variables, and must be specified as follows: -c nbd(i)=0 if x(i) is unbounded, -c 1 if x(i) has only a lower bound, -c 2 if x(i) has both lower and upper bounds, and -c 3 if x(i) has only an upper bound. -c On exit nbd is unchanged. -c -c f is a double precision variable. -c On first entry f is unspecified. -c On final exit f is the value of the function at x. -c -c g is a double precision array of dimension n. -c On first entry g is unspecified. -c On final exit g is the value of the gradient at x. -c -c factr is a double precision variable. -c On entry factr >= 0 is specified by the user. The iteration -c will stop when -c -c (f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr*epsmch -c -c where epsmch is the machine precision, which is automatically -c generated by the code. Typical values for factr: 1.d+12 for -c low accuracy; 1.d+7 for moderate accuracy; 1.d+1 for extremely -c high accuracy. -c On exit factr is unchanged. -c -c pgtol is a double precision variable. -c On entry pgtol >= 0 is specified by the user. The iteration -c will stop when -c -c max{|proj g_i | i = 1, ..., n} <= pgtol -c -c where pg_i is the ith component of the projected gradient. -c On exit pgtol is unchanged. -c -c wa is a double precision working array of length -c (2mmax + 4)nmax + 12mmax^2 + 12mmax. -c -c iwa is an integer working array of length 3nmax. -c -c task is a working string of characters of length 60 indicating -c the current job when entering and quitting this subroutine. -c -c iprint is an integer variable that must be set by the user. -c It controls the frequency and type of output generated: -c iprint<0 no output is generated; -c iprint=0 print only one line at the last iteration; -c 0100 print details of every iteration including x and g; -c When iprint > 0, the file iterate.dat will be created to -c summarize the iteration. -c -c csave is a working string of characters of length 60. -c -c lsave is a logical working array of dimension 4. -c On exit with 'task' = NEW_X, the following information is -c available: -c If lsave(1) = .true. then the initial X has been replaced by -c its projection in the feasible set; -c If lsave(2) = .true. then the problem is constrained; -c If lsave(3) = .true. then each variable has upper and lower -c bounds; -c -c isave is an integer working array of dimension 44. -c On exit with 'task' = NEW_X, the following information is -c available: -c isave(22) = the total number of intervals explored in the -c search of Cauchy points; -c isave(26) = the total number of skipped BFGS updates before -c the current iteration; -c isave(30) = the number of current iteration; -c isave(31) = the total number of BFGS updates prior the current -c iteration; -c isave(33) = the number of intervals explored in the search of -c Cauchy point in the current iteration; -c isave(34) = the total number of function and gradient -c evaluations; -c isave(36) = the number of function value or gradient -c evaluations in the current iteration; -c if isave(37) = 0 then the subspace argmin is within the box; -c if isave(37) = 1 then the subspace argmin is beyond the box; -c isave(38) = the number of free variables in the current -c iteration; -c isave(39) = the number of active constraints in the current -c iteration; -c n + 1 - isave(40) = the number of variables leaving the set of -c active constraints in the current iteration; -c isave(41) = the number of variables entering the set of active -c constraints in the current iteration. -c -c dsave is a double precision working array of dimension 29. -c On exit with 'task' = NEW_X, the following information is -c available: -c dsave(1) = current 'theta' in the BFGS matrix; -c dsave(2) = f(x) in the previous iteration; -c dsave(3) = factr*epsmch; -c dsave(4) = 2-norm of the line search direction vector; -c dsave(5) = the machine precision epsmch generated by the code; -c dsave(7) = the accumulated time spent on searching for -c Cauchy points; -c dsave(8) = the accumulated time spent on -c subspace minimization; -c dsave(9) = the accumulated time spent on line search; -c dsave(11) = the slope of the line search function at -c the current point of line search; -c dsave(12) = the maximum relative step length imposed in -c line search; -c dsave(13) = the infinity norm of the projected gradient; -c dsave(14) = the relative step length in the line search; -c dsave(15) = the slope of the line search function at -c the starting point of the line search; -c dsave(16) = the square of the 2-norm of the line search -c direction vector. -c -c Subprograms called: -c -c L-BFGS-B Library ... mainlb. -c -c -c References: -c -c [1] R. H. Byrd, P. Lu, J. Nocedal and C. Zhu, ``A limited -c memory algorithm for bound constrained optimization'', -c SIAM J. Scientific Computing 16 (1995), no. 5, pp. 1190--1208. -c -c [2] C. Zhu, R.H. Byrd, P. Lu, J. Nocedal, ``L-BFGS-B: a -c limited memory FORTRAN code for solving bound constrained -c optimization problems'', Tech. Report, NAM-11, EECS Department, -c Northwestern University, 1994. -c -c (Postscript files of these papers are available via anonymous -c ftp to eecs.nwu.edu in the directory pub/lbfgs/lbfgs_bcm.) -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer l1,l2,l3,lws,lr,lz,lt,ld,lsg,lwa,lyg, - + lsgo,lwy,lsy,lss,lyy,lwt,lwn,lsnd,lygo - - if (task .eq. 'START') then - isave(1) = m*n - isave(2) = m**2 - isave(3) = 4*m**2 - isave(4) = 1 - isave(5) = isave(4) + isave(1) - isave(6) = isave(5) + isave(1) - isave(7) = isave(6) + isave(2) - isave(8) = isave(7) + isave(2) - isave(9) = isave(8) + isave(2) - isave(10) = isave(9) + isave(2) - isave(11) = isave(10) + isave(3) - isave(12) = isave(11) + isave(3) - isave(13) = isave(12) + n - isave(14) = isave(13) + n - isave(15) = isave(14) + n - isave(16) = isave(15) + n - isave(17) = isave(16) + 8*m - isave(18) = isave(17) + m - isave(19) = isave(18) + m - isave(20) = isave(19) + m - endif - l1 = isave(1) - l2 = isave(2) - l3 = isave(3) - lws = isave(4) - lwy = isave(5) - lsy = isave(6) - lss = isave(7) - lyy = isave(8) - lwt = isave(9) - lwn = isave(10) - lsnd = isave(11) - lz = isave(12) - lr = isave(13) - ld = isave(14) - lt = isave(15) - lwa = isave(16) - lsg = isave(17) - lsgo = isave(18) - lyg = isave(19) - lygo = isave(20) - - call mainlb(n,m,x,l,u,nbd,f,g,factr,pgtol, - + wa(lws),wa(lwy),wa(lsy),wa(lss),wa(lyy),wa(lwt), - + wa(lwn),wa(lsnd),wa(lz),wa(lr),wa(ld),wa(lt), - + wa(lwa),wa(lsg),wa(lsgo),wa(lyg),wa(lygo), - + iwa(1),iwa(n+1),iwa(2*n+1),task,iprint, - + csave,lsave,isave(22),dsave) - - return - - end - -c======================= The end of setulb ============================= - - subroutine mainlb(n, m, x, l, u, nbd, f, g, factr, pgtol, ws, wy, - + sy, ss, yy, wt, wn, snd, z, r, d, t, wa, sg, - + sgo, yg, ygo, index, iwhere, indx2, task, - + iprint, csave, lsave, isave, dsave) - - character*60 task, csave - logical lsave(4) - integer n, m, iprint, nbd(n), index(n), - + iwhere(n), indx2(n), isave(23) - double precision f, factr, pgtol, - + x(n), l(n), u(n), g(n), z(n), r(n), d(n), t(n), - + wa(8*m), sg(m), sgo(m), yg(m), ygo(m), - + ws(n, m), wy(n, m), sy(m, m), ss(m, m), yy(m, m), - + wt(m, m), wn(2*m, 2*m), snd(2*m, 2*m), dsave(29) - -c ************ -c -c Subroutine mainlb -c -c This subroutine solves bound constrained optimization problems by -c using the compact formula of the limited memory BFGS updates. -c -c n is an integer variable. -c On entry n is the number of variables. -c On exit n is unchanged. -c -c m is an integer variable. -c On entry m is the maximum number of variable metric -c corrections allowed in the limited memory matrix. -c On exit m is unchanged. -c -c x is a double precision array of dimension n. -c On entry x is an approximation to the solution. -c On exit x is the current approximation. -c -c l is a double precision array of dimension n. -c On entry l is the lower bound of x. -c On exit l is unchanged. -c -c u is a double precision array of dimension n. -c On entry u is the upper bound of x. -c On exit u is unchanged. -c -c nbd is an integer array of dimension n. -c On entry nbd represents the type of bounds imposed on the -c variables, and must be specified as follows: -c nbd(i)=0 if x(i) is unbounded, -c 1 if x(i) has only a lower bound, -c 2 if x(i) has both lower and upper bounds, -c 3 if x(i) has only an upper bound. -c On exit nbd is unchanged. -c -c f is a double precision variable. -c On first entry f is unspecified. -c On final exit f is the value of the function at x. -c -c g is a double precision array of dimension n. -c On first entry g is unspecified. -c On final exit g is the value of the gradient at x. -c -c factr is a double precision variable. -c On entry factr >= 0 is specified by the user. The iteration -c will stop when -c -c (f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr*epsmch -c -c where epsmch is the machine precision, which is automatically -c generated by the code. -c On exit factr is unchanged. -c -c pgtol is a double precision variable. -c On entry pgtol >= 0 is specified by the user. The iteration -c will stop when -c -c max{|proj g_i | i = 1, ..., n} <= pgtol -c -c where pg_i is the ith component of the projected gradient. -c On exit pgtol is unchanged. -c -c ws, wy, sy, and wt are double precision working arrays used to -c store the following information defining the limited memory -c BFGS matrix: -c ws, of dimension n x m, stores S, the matrix of s-vectors; -c wy, of dimension n x m, stores Y, the matrix of y-vectors; -c sy, of dimension m x m, stores S'Y; -c ss, of dimension m x m, stores S'S; -c yy, of dimension m x m, stores Y'Y; -c wt, of dimension m x m, stores the Cholesky factorization -c of (theta*S'S+LD^(-1)L'); see eq. -c (2.26) in [3]. -c -c wn is a double precision working array of dimension 2m x 2m -c used to store the LEL^T factorization of the indefinite matrix -c K = [-D -Y'ZZ'Y/theta L_a'-R_z' ] -c [L_a -R_z theta*S'AA'S ] -c -c where E = [-I 0] -c [ 0 I] -c -c snd is a double precision working array of dimension 2m x 2m -c used to store the lower triangular part of -c N = [Y' ZZ'Y L_a'+R_z'] -c [L_a +R_z S'AA'S ] -c -c z(n),r(n),d(n),t(n),wa(8*m) are double precision working arrays. -c z is used at different times to store the Cauchy point and -c the Newton point. -c -c sg(m),sgo(m),yg(m),ygo(m) are double precision working arrays. -c -c index is an integer working array of dimension n. -c In subroutine freev, index is used to store the free and fixed -c variables at the Generalized Cauchy Point (GCP). -c -c iwhere is an integer working array of dimension n used to record -c the status of the vector x for GCP computation. -c iwhere(i)=0 or -3 if x(i) is free and has bounds, -c 1 if x(i) is fixed at l(i), and l(i) .ne. u(i) -c 2 if x(i) is fixed at u(i), and u(i) .ne. l(i) -c 3 if x(i) is always fixed, i.e., u(i)=x(i)=l(i) -c -1 if x(i) is always free, i.e., no bounds on it. -c -c indx2 is an integer working array of dimension n. -c Within subroutine cauchy, indx2 corresponds to the array iorder. -c In subroutine freev, a list of variables entering and leaving -c the free set is stored in indx2, and it is passed on to -c subroutine formk with this information. -c -c task is a working string of characters of length 60 indicating -c the current job when entering and leaving this subroutine. -c -c iprint is an INTEGER variable that must be set by the user. -c It controls the frequency and type of output generated: -c iprint<0 no output is generated; -c iprint=0 print only one line at the last iteration; -c 0100 print details of every iteration including x and g; -c When iprint > 0, the file iterate.dat will be created to -c summarize the iteration. -c -c csave is a working string of characters of length 60. -c -c lsave is a logical working array of dimension 4. -c -c isave is an integer working array of dimension 23. -c -c dsave is a double precision working array of dimension 29. -c -c -c Subprograms called -c -c L-BFGS-B Library ... cauchy, subsm, lnsrlb, formk, -c -c errclb, prn1lb, prn2lb, prn3lb, active, projgr, -c -c freev, cmprlb, matupd, formt. -c -c Minpack2 Library ... timer, dpmeps. -c -c Linpack Library ... dcopy, ddot. -c -c -c References: -c -c [1] R. H. Byrd, P. Lu, J. Nocedal and C. Zhu, ``A limited -c memory algorithm for bound constrained optimization'', -c SIAM J. Scientific Computing 16 (1995), no. 5, pp. 1190--1208. -c -c [2] C. Zhu, R.H. Byrd, P. Lu, J. Nocedal, ``L-BFGS-B: FORTRAN -c Subroutines for Large Scale Bound Constrained Optimization'' -c Tech. Report, NAM-11, EECS Department, Northwestern University, -c 1994. -c -c [3] R. Byrd, J. Nocedal and R. Schnabel "Representations of -c Quasi-Newton Matrices and their use in Limited Memory Methods'', -c Mathematical Programming 63 (1994), no. 4, pp. 129-156. -c -c (Postscript files of these papers are available via anonymous -c ftp to eecs.nwu.edu in the directory pub/lbfgs/lbfgs_bcm.) -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - logical prjctd,cnstnd,boxed,updatd,wrk - character*3 word - integer i,k,nintol,itfile,iback,nskip, - + head,col,iter,itail,iupdat, - + nint,nfgv,info,ifun, - + iword,nfree,nact,ileave,nenter - double precision theta,fold,ddot,dr,rr,tol,dpmeps, - + xstep,sbgnrm,ddum,dnorm,dtd,epsmch, - + cpu1,cpu2,cachyt,sbtime,lnscht,time1,time2, - + gd,gdold,stp,stpmx,time - double precision one,zero - parameter (one=1.0d0,zero=0.0d0) - - if (task .eq. 'START') then - - call timer(time1) - -c Generate the current machine precision. - - epsmch = dpmeps() - -c Initialize counters and scalars when task='START'. - -c for the limited memory BFGS matrices: - col = 0 - head = 1 - theta = one - iupdat = 0 - updatd = .false. - -c for operation counts: - iter = 0 - nfgv = 0 - nint = 0 - nintol = 0 - nskip = 0 - nfree = n - -c for stopping tolerance: - tol = factr*epsmch - -c for measuring running time: - cachyt = 0 - sbtime = 0 - lnscht = 0 - -c 'word' records the status of subspace solutions. - word = '---' - -c 'info' records the termination information. - info = 0 - - if (iprint .ge. 1) then -c open a summary file 'iterate.dat' - open (8, file = 'iterate.dat', status = 'unknown') - itfile = 8 - endif - -c Check the input arguments for errors. - - call errclb(n,m,factr,l,u,nbd,task,info,k) - if (task(1:5) .eq. 'ERROR') then - call prn3lb(n,x,f,task,iprint,info,itfile, - + iter,nfgv,nintol,nskip,nact,sbgnrm, - + zero,nint,word,iback,stp,xstep,k, - + cachyt,sbtime,lnscht) - return - endif - - call prn1lb(n,m,l,u,x,iprint,itfile,epsmch) - -c Initialize iwhere & project x onto the feasible set. - - call active(n,l,u,nbd,x,iwhere,iprint,prjctd,cnstnd,boxed) - -c The end of the initialization. - - else -c restore local variables. - - prjctd = lsave(1) - cnstnd = lsave(2) - boxed = lsave(3) - updatd = lsave(4) - - nintol = isave(1) - itfile = isave(3) - iback = isave(4) - nskip = isave(5) - head = isave(6) - col = isave(7) - itail = isave(8) - iter = isave(9) - iupdat = isave(10) - nint = isave(12) - nfgv = isave(13) - info = isave(14) - ifun = isave(15) - iword = isave(16) - nfree = isave(17) - nact = isave(18) - ileave = isave(19) - nenter = isave(20) - - theta = dsave(1) - fold = dsave(2) - tol = dsave(3) - dnorm = dsave(4) - epsmch = dsave(5) - cpu1 = dsave(6) - cachyt = dsave(7) - sbtime = dsave(8) - lnscht = dsave(9) - time1 = dsave(10) - gd = dsave(11) - stpmx = dsave(12) - sbgnrm = dsave(13) - stp = dsave(14) - gdold = dsave(15) - dtd = dsave(16) - -c After returning from the driver go to the point where execution -c is to resume. - - if (task(1:5) .eq. 'FG_LN') goto 666 - if (task(1:5) .eq. 'NEW_X') goto 777 - if (task(1:5) .eq. 'FG_ST') goto 111 - if (task(1:4) .eq. 'STOP') then - if (task(7:9) .eq. 'CPU') then -c restore the previous iterate. - call dcopy(n,t,1,x,1) - call dcopy(n,r,1,g,1) - f = fold - endif - goto 999 - endif - endif - -c Compute f0 and g0. - - task = 'FG_START' -c return to the driver to calculate f and g; reenter at 111. - goto 1000 - 111 continue - nfgv = 1 - -c Compute the infinity norm of the (-) projected gradient. - - call projgr(n,l,u,nbd,x,g,sbgnrm) - - if (iprint .ge. 1) then - write (6,1002) iter,f,sbgnrm - write (itfile,1003) iter,nfgv,sbgnrm,f - endif - if (sbgnrm .le. pgtol) then -c terminate the algorithm. - task = 'CONVERGENCE: NORM OF PROJECTED GRADIENT <= PGTOL' - goto 999 - endif - -c ----------------- the beginning of the loop -------------------------- - - 222 continue - if (iprint .ge. 99) write (6,1001) iter + 1 - iword = -1 -c - if (.not. cnstnd .and. col .gt. 0) then -c skip the search for GCP. - call dcopy(n,x,1,z,1) - wrk = updatd - nint = 0 - goto 333 - endif - -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c -c Compute the Generalized Cauchy Point (GCP). -c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - - call timer(cpu1) - call cauchy(n,x,l,u,nbd,g,indx2,iwhere,t,d,z, - + m,wy,ws,sy,wt,theta,col,head, - + wa(1),wa(2*m+1),wa(4*m+1),wa(6*m+1),nint, - + sg,yg,iprint,sbgnrm,info,epsmch) - if (info .ne. 0) then -c singular triangular system detected; refresh the lbfgs memory. - if(iprint .ge. 1) write (6, 1005) - info = 0 - col = 0 - head = 1 - theta = one - iupdat = 0 - updatd = .false. - call timer(cpu2) - cachyt = cachyt + cpu2 - cpu1 - goto 222 - endif - call timer(cpu2) - cachyt = cachyt + cpu2 - cpu1 - nintol = nintol + nint - -c Count the entering and leaving variables for iter > 0; -c find the index set of free and active variables at the GCP. - - call freev(n,nfree,index,nenter,ileave,indx2, - + iwhere,wrk,updatd,cnstnd,iprint,iter) - - nact = n - nfree - - 333 continue - -c If there are no free variables or B=theta*I, then -c skip the subspace minimization. - - if (nfree .eq. 0 .or. col .eq. 0) goto 555 - -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c -c Subspace minimization. -c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - - call timer(cpu1) - -c Form the LEL^T factorization of the indefinite -c matrix K = [-D -Y'ZZ'Y/theta L_a'-R_z' ] -c [L_a -R_z theta*S'AA'S ] -c where E = [-I 0] -c [ 0 I] - - if (wrk) call formk(n,nfree,index,nenter,ileave,indx2,iupdat, - + updatd,wn,snd,m,ws,wy,sy,theta,col,head,info) - if (info .ne. 0) then -c nonpositive definiteness in Cholesky factorization; -c refresh the lbfgs memory and restart the iteration. - if(iprint .ge. 1) write (6, 1006) - info = 0 - col = 0 - head = 1 - theta = one - iupdat = 0 - updatd = .false. - call timer(cpu2) - sbtime = sbtime + cpu2 - cpu1 - goto 222 - endif - -c compute r=-Z'B(xcp-xk)-Z'g (using wa(2m+1)=W'(xcp-x) -c from 'cauchy'). - call cmprlb(n,m,x,g,ws,wy,sy,wt,z,r,wa,index, - + theta,col,head,nfree,cnstnd,info) - if (info .ne. 0) goto 444 -c call the direct method. - call subsm(n,m,nfree,index,l,u,nbd,z,r,ws,wy,theta, - + col,head,iword,wa,wn,iprint,info) - 444 continue - if (info .ne. 0) then -c singular triangular system detected; -c refresh the lbfgs memory and restart the iteration. - if(iprint .ge. 1) write (6, 1005) - info = 0 - col = 0 - head = 1 - theta = one - iupdat = 0 - updatd = .false. - call timer(cpu2) - sbtime = sbtime + cpu2 - cpu1 - goto 222 - endif - - call timer(cpu2) - sbtime = sbtime + cpu2 - cpu1 - 555 continue - -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c -c Line search and optimality tests. -c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - -c Generate the search direction d:=z-x. - - do 40 i = 1, n - d(i) = z(i) - x(i) - 40 continue - call timer(cpu1) - 666 continue - call lnsrlb(n,l,u,nbd,x,f,fold,gd,gdold,g,d,r,t,z,stp,dnorm, - + dtd,xstep,stpmx,iter,ifun,iback,nfgv,info,task, - + boxed,cnstnd,csave,isave(22),dsave(17)) - if (info .ne. 0 .or. iback .ge. 20) then -c restore the previous iterate. - call dcopy(n,t,1,x,1) - call dcopy(n,r,1,g,1) - f = fold - if (col .eq. 0) then -c abnormal termination. - if (info .eq. 0) then - info = -9 -c restore the actual number of f and g evaluations etc. - nfgv = nfgv - 1 - ifun = ifun - 1 - iback = iback - 1 - endif - task = 'ABNORMAL_TERMINATION_IN_LNSRCH' - iter = iter + 1 - goto 999 - else -c refresh the lbfgs memory and restart the iteration. - if(iprint .ge. 1) write (6, 1008) - if (info .eq. 0) nfgv = nfgv - 1 - info = 0 - col = 0 - head = 1 - theta = one - iupdat = 0 - updatd = .false. - task = 'RESTART_FROM_LNSRCH' - call timer(cpu2) - lnscht = lnscht + cpu2 - cpu1 - goto 222 - endif - else if (task(1:5) .eq. 'FG_LN') then -c return to the driver for calculating f and g; reenter at 666. - goto 1000 - else -c calculate and print out the quantities related to the new X. - call timer(cpu2) - lnscht = lnscht + cpu2 - cpu1 - iter = iter + 1 - -c Compute the infinity norm of the projected (-)gradient. - - call projgr(n,l,u,nbd,x,g,sbgnrm) - -c Print iteration information. - - call prn2lb(n,x,f,g,iprint,itfile,iter,nfgv,nact, - + sbgnrm,nint,word,iword,iback,stp,xstep) - goto 1000 - endif - 777 continue - -c Test for termination. - - if (sbgnrm .le. pgtol) then -c terminate the algorithm. - task = 'CONVERGENCE: NORM OF PROJECTED GRADIENT <= PGTOL' - goto 999 - endif - - ddum = max(abs(fold), abs(f), one) - if ((fold - f) .le. tol*ddum) then -c terminate the algorithm. - task = 'CONVERGENCE: REL_REDUCTION_OF_F <= FACTR*EPSMCH' - if (iback .ge. 10) info = -5 -c i.e., to issue a warning if iback>10 in the line search. - goto 999 - endif - -c Compute d=newx-oldx, r=newg-oldg, rr=y'y and dr=y's. - - do 42 i = 1, n - r(i) = g(i) - r(i) - 42 continue - rr = ddot(n,r,1,r,1) - if (stp .eq. one) then - dr = gd - gdold - ddum = -gdold - else - dr = (gd - gdold)*stp - call dscal(n,stp,d,1) - ddum = -gdold*stp - endif - - if (dr .le. epsmch*ddum) then -c skip the L-BFGS update. - nskip = nskip + 1 - updatd = .false. - if (iprint .ge. 1) write (6,1004) dr, ddum - goto 888 - endif - -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c -c Update the L-BFGS matrix. -c -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - - updatd = .true. - iupdat = iupdat + 1 - -c Update matrices WS and WY and form the middle matrix in B. - - call matupd(n,m,ws,wy,sy,ss,d,r,itail, - + iupdat,col,head,theta,rr,dr,stp,dtd) - -c Form the upper half of the pds T = theta*SS + L*D^(-1)*L'; -c Store T in the upper triangular of the array wt; -c Cholesky factorize T to J*J' with -c J' stored in the upper triangular of wt. - - call formt(m,wt,sy,ss,col,theta,info) - - if (info .ne. 0) then -c nonpositive definiteness in Cholesky factorization; -c refresh the lbfgs memory and restart the iteration. - if(iprint .ge. 1) write (6, 1007) - info = 0 - col = 0 - head = 1 - theta = one - iupdat = 0 - updatd = .false. - goto 222 - endif - -c Now the inverse of the middle matrix in B is - -c [ D^(1/2) O ] [ -D^(1/2) D^(-1/2)*L' ] -c [ -L*D^(-1/2) J ] [ 0 J' ] - - 888 continue - -c -------------------- the end of the loop ----------------------------- - - goto 222 - 999 continue - call timer(time2) - time = time2 - time1 - call prn3lb(n,x,f,task,iprint,info,itfile, - + iter,nfgv,nintol,nskip,nact,sbgnrm, - + time,nint,word,iback,stp,xstep,k, - + cachyt,sbtime,lnscht) - 1000 continue - -c Save local variables. - - lsave(1) = prjctd - lsave(2) = cnstnd - lsave(3) = boxed - lsave(4) = updatd - - isave(1) = nintol - isave(3) = itfile - isave(4) = iback - isave(5) = nskip - isave(6) = head - isave(7) = col - isave(8) = itail - isave(9) = iter - isave(10) = iupdat - isave(12) = nint - isave(13) = nfgv - isave(14) = info - isave(15) = ifun - isave(16) = iword - isave(17) = nfree - isave(18) = nact - isave(19) = ileave - isave(20) = nenter - - dsave(1) = theta - dsave(2) = fold - dsave(3) = tol - dsave(4) = dnorm - dsave(5) = epsmch - dsave(6) = cpu1 - dsave(7) = cachyt - dsave(8) = sbtime - dsave(9) = lnscht - dsave(10) = time1 - dsave(11) = gd - dsave(12) = stpmx - dsave(13) = sbgnrm - dsave(14) = stp - dsave(15) = gdold - dsave(16) = dtd - - 1001 format (//,'ITERATION ',i5) - 1002 format - + (/,'At iterate',i5,4x,'f= ',1p,d12.5,4x,'|proj g|= ',1p,d12.5) - 1003 format (2(1x,i4),5x,'-',5x,'-',3x,'-',5x,'-',5x,'-',8x,'-',3x, - + 1p,2(1x,d10.3)) - 1004 format (' ys=',1p,e10.3,' -gs=',1p,e10.3,' BFGS update SKIPPED') - 1005 format (/, - +' Singular triangular system detected;',/, - +' refresh the lbfgs memory and restart the iteration.') - 1006 format (/, - +' Nonpositive definiteness in Cholesky factorization in formk;',/, - +' refresh the lbfgs memory and restart the iteration.') - 1007 format (/, - +' Nonpositive definiteness in Cholesky factorization in formt;',/, - +' refresh the lbfgs memory and restart the iteration.') - 1008 format (/, - +' Bad direction in the line search;',/, - +' refresh the lbfgs memory and restart the iteration.') - - return - - end - -c======================= The end of mainlb ============================= - - subroutine active(n, l, u, nbd, x, iwhere, iprint, - + prjctd, cnstnd, boxed) - - logical prjctd, cnstnd, boxed - integer n, iprint, nbd(n), iwhere(n) - double precision x(n), l(n), u(n) - -c ************ -c -c Subroutine active -c -c This subroutine initializes iwhere and projects the initial x to -c the feasible set if necessary. -c -c iwhere is an integer array of dimension n. -c On entry iwhere is unspecified. -c On exit iwhere(i)=-1 if x(i) has no bounds -c 3 if l(i)=u(i) -c 0 otherwise. -c In cauchy, iwhere is given finer gradations. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer nbdd,i - double precision zero - parameter (zero=0.0d0) - -c Initialize nbdd, prjctd, cnstnd and boxed. - - nbdd = 0 - prjctd = .false. - cnstnd = .false. - boxed = .true. - -c Project the initial x to the easible set if necessary. - - do 10 i = 1, n - if (nbd(i) .gt. 0) then - if (nbd(i) .le. 2 .and. x(i) .le. l(i)) then - if (x(i) .lt. l(i)) then - prjctd = .true. - x(i) = l(i) - endif - nbdd = nbdd + 1 - else if (nbd(i) .ge. 2 .and. x(i) .ge. u(i)) then - if (x(i) .gt. u(i)) then - prjctd = .true. - x(i) = u(i) - endif - nbdd = nbdd + 1 - endif - endif - 10 continue - -c Initialize iwhere and assign values to cnstnd and boxed. - - do 20 i = 1, n - if (nbd(i) .ne. 2) boxed = .false. - if (nbd(i) .eq. 0) then -c this variable is always free - iwhere(i) = -1 - -c otherwise set x(i)=mid(x(i), u(i), l(i)). - else - cnstnd = .true. - if (nbd(i) .eq. 2 .and. u(i) - l(i) .le. zero) then -c this variable is always fixed - iwhere(i) = 3 - else - iwhere(i) = 0 - endif - endif - 20 continue - - if (iprint .ge. 0) then - if (prjctd) write (6,*) - + 'The initial X is infeasible. Restart with its projection.' - if (.not. cnstnd) - + write (6,*) 'This problem is unconstrained.' - endif - - if (iprint .gt. 0) write (6,1001) nbdd - - 1001 format (/,'At X0 ',i9,' variables are exactly at the bounds') - - return - - end - -c======================= The end of active ============================= - - subroutine bmv(m, sy, wt, col, v, p, info) - - integer m, col, info - double precision sy(m, m), wt(m, m), v(2*col), p(2*col) - -c ************ -c -c Subroutine bmv -c -c This subroutine computes the product of the 2m x 2m middle matrix -c in the compact L-BFGS formula of B and a 2m vector v; -c it returns the product in p. -c -c m is an integer variable. -c On entry m is the maximum number of variable metric corrections -c used to define the limited memory matrix. -c On exit m is unchanged. -c -c sy is a double precision array of dimension m x m. -c On entry sy specifies the matrix S'Y. -c On exit sy is unchanged. -c -c wt is a double precision array of dimension m x m. -c On entry wt specifies the upper triangular matrix J' which is -c the Cholesky factor of (thetaS'S+LD^(-1)L'). -c On exit wt is unchanged. -c -c col is an integer variable. -c On entry col specifies the number of s-vectors (or y-vectors) -c stored in the compact L-BFGS formula. -c On exit col is unchanged. -c -c v is a double precision array of dimension 2col. -c On entry v specifies vector v. -c On exit v is unchanged. -c -c p is a double precision array of dimension 2col. -c On entry p is unspecified. -c On exit p is the product Mv. -c -c info is an integer variable. -c On entry info is unspecified. -c On exit info = 0 for normal return, -c = nonzero for abnormal return when the system -c to be solved by dtrsl is singular. -c -c Subprograms called: -c -c Linpack ... dtrsl. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer i,k,i2 - double precision sum - - if (col .eq. 0) return - -c PART I: solve [ D^(1/2) O ] [ p1 ] = [ v1 ] -c [ -L*D^(-1/2) J ] [ p2 ] [ v2 ]. - -c solve Jp2=v2+LD^(-1)v1. - p(col + 1) = v(col + 1) - do 20 i = 2, col - i2 = col + i - sum = 0.0d0 - do 10 k = 1, i - 1 - sum = sum + sy(i,k)*v(k)/sy(k,k) - 10 continue - p(i2) = v(i2) + sum - 20 continue -c Solve the triangular system - call dtrsl(wt,m,col,p(col+1),11,info) - if (info .ne. 0) return - -c solve D^(1/2)p1=v1. - do 30 i = 1, col - p(i) = v(i)/sqrt(sy(i,i)) - 30 continue - -c PART II: solve [ -D^(1/2) D^(-1/2)*L' ] [ p1 ] = [ p1 ] -c [ 0 J' ] [ p2 ] [ p2 ]. - -c solve J^Tp2=p2. - call dtrsl(wt,m,col,p(col+1),01,info) - if (info .ne. 0) return - -c compute p1=-D^(-1/2)(p1-D^(-1/2)L'p2) -c =-D^(-1/2)p1+D^(-1)L'p2. - do 40 i = 1, col - p(i) = -p(i)/sqrt(sy(i,i)) - 40 continue - do 60 i = 1, col - sum = 0.d0 - do 50 k = i + 1, col - sum = sum + sy(k,i)*p(col+k)/sy(i,i) - 50 continue - p(i) = p(i) + sum - 60 continue - - return - - end - -c======================== The end of bmv =============================== - - subroutine cauchy(n, x, l, u, nbd, g, iorder, iwhere, t, d, xcp, - + m, wy, ws, sy, wt, theta, col, head, p, c, wbp, - + v, nint, sg, yg, iprint, sbgnrm, info, epsmch) - - integer n, m, head, col, nint, iprint, info, - + nbd(n), iorder(n), iwhere(n) - double precision theta, epsmch, - + x(n), l(n), u(n), g(n), t(n), d(n), xcp(n), - + sg(m), yg(m), wy(n, col), ws(n, col), sy(m, m), - + wt(m, m), p(2*m), c(2*m), wbp(2*m), v(2*m) - -c ************ -c -c Subroutine cauchy -c -c For given x, l, u, g (with sbgnrm > 0), and a limited memory -c BFGS matrix B defined in terms of matrices WY, WS, WT, and -c scalars head, col, and theta, this subroutine computes the -c generalized Cauchy point (GCP), defined as the first local -c minimizer of the quadratic -c -c Q(x + s) = g's + 1/2 s'Bs -c -c along the projected gradient direction P(x-tg,l,u). -c The routine returns the GCP in xcp. -c -c n is an integer variable. -c On entry n is the dimension of the problem. -c On exit n is unchanged. -c -c x is a double precision array of dimension n. -c On entry x is the starting point for the GCP computation. -c On exit x is unchanged. -c -c l is a double precision array of dimension n. -c On entry l is the lower bound of x. -c On exit l is unchanged. -c -c u is a double precision array of dimension n. -c On entry u is the upper bound of x. -c On exit u is unchanged. -c -c nbd is an integer array of dimension n. -c On entry nbd represents the type of bounds imposed on the -c variables, and must be specified as follows: -c nbd(i)=0 if x(i) is unbounded, -c 1 if x(i) has only a lower bound, -c 2 if x(i) has both lower and upper bounds, and -c 3 if x(i) has only an upper bound. -c On exit nbd is unchanged. -c -c g is a double precision array of dimension n. -c On entry g is the gradient of f(x). g must be a nonzero vector. -c On exit g is unchanged. -c -c iorder is an integer working array of dimension n. -c iorder will be used to store the breakpoints in the piecewise -c linear path and free variables encountered. On exit, -c iorder(1),...,iorder(nleft) are indices of breakpoints -c which have not been encountered; -c iorder(nleft+1),...,iorder(nbreak) are indices of -c encountered breakpoints; and -c iorder(nfree),...,iorder(n) are indices of variables which -c have no bound constraits along the search direction. -c -c iwhere is an integer array of dimension n. -c On entry iwhere indicates only the permanently fixed (iwhere=3) -c or free (iwhere= -1) components of x. -c On exit iwhere records the status of the current x variables. -c iwhere(i)=-3 if x(i) is free and has bounds, but is not moved -c 0 if x(i) is free and has bounds, and is moved -c 1 if x(i) is fixed at l(i), and l(i) .ne. u(i) -c 2 if x(i) is fixed at u(i), and u(i) .ne. l(i) -c 3 if x(i) is always fixed, i.e., u(i)=x(i)=l(i) -c -1 if x(i) is always free, i.e., it has no bounds. -c -c t is a double precision working array of dimension n. -c t will be used to store the break points. -c -c d is a double precision array of dimension n used to store -c the Cauchy direction P(x-tg)-x. -c -c xcp is a double precision array of dimension n used to return the -c GCP on exit. -c -c m is an integer variable. -c On entry m is the maximum number of variable metric corrections -c used to define the limited memory matrix. -c On exit m is unchanged. -c -c ws, wy, sy, and wt are double precision arrays. -c On entry they store information that defines the -c limited memory BFGS matrix: -c ws(n,m) stores S, a set of s-vectors; -c wy(n,m) stores Y, a set of y-vectors; -c sy(m,m) stores S'Y; -c wt(m,m) stores the -c Cholesky factorization of (theta*S'S+LD^(-1)L'). -c On exit these arrays are unchanged. -c -c theta is a double precision variable. -c On entry theta is the scaling factor specifying B_0 = theta I. -c On exit theta is unchanged. -c -c col is an integer variable. -c On entry col is the actual number of variable metric -c corrections stored so far. -c On exit col is unchanged. -c -c head is an integer variable. -c On entry head is the location of the first s-vector (or y-vector) -c in S (or Y). -c On exit col is unchanged. -c -c p is a double precision working array of dimension 2m. -c p will be used to store the vector p = W^(T)d. -c -c c is a double precision working array of dimension 2m. -c c will be used to store the vector c = W^(T)(xcp-x). -c -c wbp is a double precision working array of dimension 2m. -c wbp will be used to store the row of W corresponding -c to a breakpoint. -c -c v is a double precision working array of dimension 2m. -c -c nint is an integer variable. -c On exit nint records the number of quadratic segments explored -c in searching for the GCP. -c -c sg and yg are double precision arrays of dimension m. -c On entry sg and yg store S'g and Y'g correspondingly. -c On exit they are unchanged. -c -c iprint is an INTEGER variable that must be set by the user. -c It controls the frequency and type of output generated: -c iprint<0 no output is generated; -c iprint=0 print only one line at the last iteration; -c 0100 print details of every iteration including x and g; -c When iprint > 0, the file iterate.dat will be created to -c summarize the iteration. -c -c sbgnrm is a double precision variable. -c On entry sbgnrm is the norm of the projected gradient at x. -c On exit sbgnrm is unchanged. -c -c info is an integer variable. -c On entry info is 0. -c On exit info = 0 for normal return, -c = nonzero for abnormal return when the the system -c used in routine bmv is singular. -c -c Subprograms called: -c -c L-BFGS-B Library ... hpsolb, bmv. -c -c Linpack ... dscal dcopy, daxpy. -c -c -c References: -c -c [1] R. H. Byrd, P. Lu, J. Nocedal and C. Zhu, ``A limited -c memory algorithm for bound constrained optimization'', -c SIAM J. Scientific Computing 16 (1995), no. 5, pp. 1190--1208. -c -c [2] C. Zhu, R.H. Byrd, P. Lu, J. Nocedal, ``L-BFGS-B: FORTRAN -c Subroutines for Large Scale Bound Constrained Optimization'' -c Tech. Report, NAM-11, EECS Department, Northwestern University, -c 1994. -c -c (Postscript files of these papers are available via anonymous -c ftp to eecs.nwu.edu in the directory pub/lbfgs/lbfgs_bcm.) -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - logical xlower,xupper,bnded - integer i,j,col2,nfree,nbreak,pointr, - + ibp,nleft,ibkmin,iter - double precision f1,f2,dt,dtm,tsum,dibp,zibp,dibp2,bkmin, - + tu,tl,wmc,wmp,wmw,ddot,tj,tj0,neggi,sbgnrm, - + f2_org - double precision one,zero - parameter (one=1.0d0,zero=0.0d0) - -c Check the status of the variables, reset iwhere(i) if necessary; -c compute the Cauchy direction d and the breakpoints t; initialize -c the derivative f1 and the vector p = W'd (for theta = 1). - - if (sbgnrm .le. zero) then - if (iprint .ge. 0) write (6,*) 'Subgnorm = 0. GCP = X.' - call dcopy(n,x,1,xcp,1) - return - endif - bnded = .true. - nfree = n + 1 - nbreak = 0 - ibkmin = 0 - bkmin = zero - col2 = 2*col - f1 = zero - if (iprint .ge. 99) write (6,3010) - -c We set p to zero and build it up as we determine d. - - do 20 i = 1, col2 - p(i) = zero - 20 continue - -c In the following loop we determine for each variable its bound -c status and its breakpoint, and update p accordingly. -c Smallest breakpoint is identified. - - do 50 i = 1, n - neggi = -g(i) - if (iwhere(i) .ne. 3 .and. iwhere(i) .ne. -1) then -c if x(i) is not a constant and has bounds, -c compute the difference between x(i) and its bounds. - if (nbd(i) .le. 2) tl = x(i) - l(i) - if (nbd(i) .ge. 2) tu = u(i) - x(i) - -c If a variable is close enough to a bound -c we treat it as at bound. - xlower = nbd(i) .le. 2 .and. tl .le. zero - xupper = nbd(i) .ge. 2 .and. tu .le. zero - -c reset iwhere(i). - iwhere(i) = 0 - if (xlower) then - if (neggi .le. zero) iwhere(i) = 1 - else if (xupper) then - if (neggi .ge. zero) iwhere(i) = 2 - else - if (abs(neggi) .le. zero) iwhere(i) = -3 - endif - endif - pointr = head - if (iwhere(i) .ne. 0 .and. iwhere(i) .ne. -1) then - d(i) = zero - else - d(i) = neggi - f1 = f1 - neggi*neggi -c calculate p := p - W'e_i* (g_i). - do 40 j = 1, col - p(j) = p(j) + wy(i,pointr)* neggi - p(col + j) = p(col + j) + ws(i,pointr)*neggi - pointr = mod(pointr,m) + 1 - 40 continue - if (nbd(i) .le. 2 .and. nbd(i) .ne. 0 - + .and. neggi .lt. zero) then -c x(i) + d(i) is bounded; compute t(i). - nbreak = nbreak + 1 - iorder(nbreak) = i - t(nbreak) = tl/(-neggi) - if (nbreak .eq. 1 .or. t(nbreak) .lt. bkmin) then - bkmin = t(nbreak) - ibkmin = nbreak - endif - else if (nbd(i) .ge. 2 .and. neggi .gt. zero) then -c x(i) + d(i) is bounded; compute t(i). - nbreak = nbreak + 1 - iorder(nbreak) = i - t(nbreak) = tu/neggi - if (nbreak .eq. 1 .or. t(nbreak) .lt. bkmin) then - bkmin = t(nbreak) - ibkmin = nbreak - endif - else -c x(i) + d(i) is not bounded. - nfree = nfree - 1 - iorder(nfree) = i - if (abs(neggi) .gt. zero) bnded = .false. - endif - endif - 50 continue - -c The indices of the nonzero components of d are now stored -c in iorder(1),...,iorder(nbreak) and iorder(nfree),...,iorder(n). -c The smallest of the nbreak breakpoints is in t(ibkmin)=bkmin. - - if (theta .ne. one) then -c complete the initialization of p for theta not= one. - call dscal(col,theta,p(col+1),1) - endif - -c Initialize GCP xcp = x. - - call dcopy(n,x,1,xcp,1) - - if (nbreak .eq. 0 .and. nfree .eq. n + 1) then -c is a zero vector, return with the initial xcp as GCP. - if (iprint .gt. 100) write (6,1010) (xcp(i), i = 1, n) - return - endif - -c Initialize c = W'(xcp - x) = 0. - - do 60 j = 1, col2 - c(j) = zero - 60 continue - -c Initialize derivative f2. - - f2 = -theta*f1 - f2_org = f2 - if (col .gt. 0) then - call bmv(m,sy,wt,col,p,v,info) - if (info .ne. 0) return - f2 = f2 - ddot(col2,v,1,p,1) - endif - dtm = -f1/f2 - tsum = zero - nint = 1 - if (iprint .ge. 99) - + write (6,*) 'There are ',nbreak,' breakpoints ' - -c If there are no breakpoints, locate the GCP and return. - - if (nbreak .eq. 0) goto 888 - - nleft = nbreak - iter = 1 - - - tj = zero - -c------------------- the beginning of the loop ------------------------- - - 777 continue - -c Find the next smallest breakpoint; -c compute dt = t(nleft) - t(nleft + 1). - - tj0 = tj - if (iter .eq. 1) then -c Since we already have the smallest breakpoint we need not do -c heapsort yet. Often only one breakpoint is used and the -c cost of heapsort is avoided. - tj = bkmin - ibp = iorder(ibkmin) - else - if (iter .eq. 2) then -c Replace the already used smallest breakpoint with the -c breakpoint numbered nbreak > nlast, before heapsort call. - if (ibkmin .ne. nbreak) then - t(ibkmin) = t(nbreak) - iorder(ibkmin) = iorder(nbreak) - endif -c Update heap structure of breakpoints -c (if iter=2, initialize heap). - endif - call hpsolb(nleft,t,iorder,iter-2) - tj = t(nleft) - ibp = iorder(nleft) - endif - - dt = tj - tj0 - - if (dt .ne. zero .and. iprint .ge. 100) then - write (6,4011) nint,f1,f2 - write (6,5010) dt - write (6,6010) dtm - endif - -c If a minimizer is within this interval, locate the GCP and return. - - if (dtm .lt. dt) goto 888 - -c Otherwise fix one variable and -c reset the corresponding component of d to zero. - - tsum = tsum + dt - nleft = nleft - 1 - iter = iter + 1 - dibp = d(ibp) - d(ibp) = zero - if (dibp .gt. zero) then - zibp = u(ibp) - x(ibp) - xcp(ibp) = u(ibp) - iwhere(ibp) = 2 - else - zibp = l(ibp) - x(ibp) - xcp(ibp) = l(ibp) - iwhere(ibp) = 1 - endif - if (iprint .ge. 100) write (6,*) 'Variable ',ibp,' is fixed.' - if (nleft .eq. 0 .and. nbreak .eq. n) then -c all n variables are fixed, -c return with xcp as GCP. - dtm = dt - goto 999 - endif - -c Update the derivative information. - - nint = nint + 1 - dibp2 = dibp**2 - -c Update f1 and f2. - -c temporarily set f1 and f2 for col=0. - f1 = f1 + dt*f2 + dibp2 - theta*dibp*zibp - f2 = f2 - theta*dibp2 - - if (col .gt. 0) then -c update c = c + dt*p. - call daxpy(col2,dt,p,1,c,1) - -c choose wbp, -c the row of W corresponding to the breakpoint encountered. - pointr = head - do 70 j = 1,col - wbp(j) = wy(ibp,pointr) - wbp(col + j) = theta*ws(ibp,pointr) - pointr = mod(pointr,m) + 1 - 70 continue - -c compute (wbp)Mc, (wbp)Mp, and (wbp)M(wbp)'. - call bmv(m,sy,wt,col,wbp,v,info) - if (info .ne. 0) return - wmc = ddot(col2,c,1,v,1) - wmp = ddot(col2,p,1,v,1) - wmw = ddot(col2,wbp,1,v,1) - -c update p = p - dibp*wbp. - call daxpy(col2,-dibp,wbp,1,p,1) - -c complete updating f1 and f2 while col > 0. - f1 = f1 + dibp*wmc - f2 = f2 + 2.0d0*dibp*wmp - dibp2*wmw - endif - - f2 = max(epsmch*f2_org,f2) - if (nleft .gt. 0) then - dtm = -f1/f2 - goto 777 -c to repeat the loop for unsearched intervals. - else if(bnded) then - f1 = zero - f2 = zero - dtm = zero - else - dtm = -f1/f2 - endif - -c------------------- the end of the loop ------------------------------- - - 888 continue - if (iprint .ge. 99) then - write (6,*) - write (6,*) 'GCP found in this segment' - write (6,4010) nint,f1,f2 - write (6,6010) dtm - endif - if (dtm .le. zero) dtm = zero - tsum = tsum + dtm - -c Move free variables (i.e., the ones w/o breakpoints) and -c the variables whose breakpoints haven't been reached. - - call daxpy(n,tsum,d,1,xcp,1) - - 999 continue - -c Update c = c + dtm*p = W'(x^c - x) -c which will be used in computing r = Z'(B(x^c - x) + g). - - if (col .gt. 0) call daxpy(col2,dtm,p,1,c,1) - if (iprint .gt. 100) write (6,1010) (xcp(i),i = 1,n) - if (iprint .ge. 99) write (6,2010) - - 1010 format ('Cauchy X = ',/,(4x,1p,6(1x,d11.4))) - 2010 format (/,'---------------- exit CAUCHY----------------------',/) - 3010 format (/,'---------------- CAUCHY entered-------------------') - 4010 format ('Piece ',i3,' --f1, f2 at start point ',1p,2(1x,d11.4)) - 4011 format (/,'Piece ',i3,' --f1, f2 at start point ', - + 1p,2(1x,d11.4)) - 5010 format ('Distance to the next break point = ',1p,d11.4) - 6010 format ('Distance to the stationary point = ',1p,d11.4) - - return - - end - -c====================== The end of cauchy ============================== - - subroutine cmprlb(n, m, x, g, ws, wy, sy, wt, z, r, wa, index, - + theta, col, head, nfree, cnstnd, info) - - logical cnstnd - integer n, m, col, head, nfree, info, index(n) - double precision theta, - + x(n), g(n), z(n), r(n), wa(4*m), - + ws(n, m), wy(n, m), sy(m, m), wt(m, m) - -c ************ -c -c Subroutine cmprlb -c -c This subroutine computes r=-Z'B(xcp-xk)-Z'g by using -c wa(2m+1)=W'(xcp-x) from subroutine cauchy. -c -c Subprograms called: -c -c L-BFGS-B Library ... bmv. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer i,j,k,pointr - double precision a1,a2 - - if (.not. cnstnd .and. col .gt. 0) then - do 26 i = 1, n - r(i) = -g(i) - 26 continue - else - do 30 i = 1, nfree - k = index(i) - r(i) = -theta*(z(k) - x(k)) - g(k) - 30 continue - call bmv(m,sy,wt,col,wa(2*m+1),wa(1),info) - if (info .ne. 0) then - info = -8 - return - endif - pointr = head - do 34 j = 1, col - a1 = wa(j) - a2 = theta*wa(col + j) - do 32 i = 1, nfree - k = index(i) - r(i) = r(i) + wy(k,pointr)*a1 + ws(k,pointr)*a2 - 32 continue - pointr = mod(pointr,m) + 1 - 34 continue - endif - - return - - end - -c======================= The end of cmprlb ============================= - - subroutine errclb(n, m, factr, l, u, nbd, task, info, k) - - character*60 task - integer n, m, info, k, nbd(n) - double precision factr, l(n), u(n) - -c ************ -c -c Subroutine errclb -c -c This subroutine checks the validity of the input data. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer i - double precision one,zero - parameter (one=1.0d0,zero=0.0d0) - -c Check the input arguments for errors. - - if (n .le. 0) task = 'ERROR: N .LE. 0' - if (m .le. 0) task = 'ERROR: M .LE. 0' - if (factr .lt. zero) task = 'ERROR: FACTR .LT. 0' - -c Check the validity of the arrays nbd(i), u(i), and l(i). - - do 10 i = 1, n - if (nbd(i) .lt. 0 .or. nbd(i) .gt. 3) then -c return - task = 'ERROR: INVALID NBD' - info = -6 - k = i - endif - if (nbd(i) .eq. 2) then - if (l(i) .gt. u(i)) then -c return - task = 'ERROR: NO FEASIBLE SOLUTION' - info = -7 - k = i - endif - endif - 10 continue - - return - - end - -c======================= The end of errclb ============================= - - subroutine formk(n, nsub, ind, nenter, ileave, indx2, iupdat, - + updatd, wn, wn1, m, ws, wy, sy, theta, col, - + head, info) - - integer n, nsub, m, col, head, nenter, ileave, iupdat, - + info, ind(n), indx2(n) - double precision theta, wn(2*m, 2*m), wn1(2*m, 2*m), - + ws(n, m), wy(n, m), sy(m, m) - logical updatd - -c ************ -c -c Subroutine formk -c -c This subroutine forms the LEL^T factorization of the indefinite -c -c matrix K = [-D -Y'ZZ'Y/theta L_a'-R_z' ] -c [L_a -R_z theta*S'AA'S ] -c where E = [-I 0] -c [ 0 I] -c The matrix K can be shown to be equal to the matrix M^[-1]N -c occurring in section 5.1 of [1], as well as to the matrix -c Mbar^[-1] Nbar in section 5.3. -c -c n is an integer variable. -c On entry n is the dimension of the problem. -c On exit n is unchanged. -c -c nsub is an integer variable -c On entry nsub is the number of subspace variables in free set. -c On exit nsub is not changed. -c -c ind is an integer array of dimension nsub. -c On entry ind specifies the indices of subspace variables. -c On exit ind is unchanged. -c -c nenter is an integer variable. -c On entry nenter is the number of variables entering the -c free set. -c On exit nenter is unchanged. -c -c ileave is an integer variable. -c On entry indx2(ileave),...,indx2(n) are the variables leaving -c the free set. -c On exit ileave is unchanged. -c -c indx2 is an integer array of dimension n. -c On entry indx2(1),...,indx2(nenter) are the variables entering -c the free set, while indx2(ileave),...,indx2(n) are the -c variables leaving the free set. -c On exit indx2 is unchanged. -c -c iupdat is an integer variable. -c On entry iupdat is the total number of BFGS updates made so far. -c On exit iupdat is unchanged. -c -c updatd is a logical variable. -c On entry 'updatd' is true if the L-BFGS matrix is updatd. -c On exit 'updatd' is unchanged. -c -c wn is a double precision array of dimension 2m x 2m. -c On entry wn is unspecified. -c On exit the upper triangle of wn stores the LEL^T factorization -c of the 2*col x 2*col indefinite matrix -c [-D -Y'ZZ'Y/theta L_a'-R_z' ] -c [L_a -R_z theta*S'AA'S ] -c -c wn1 is a double precision array of dimension 2m x 2m. -c On entry wn1 stores the lower triangular part of -c [Y' ZZ'Y L_a'+R_z'] -c [L_a+R_z S'AA'S ] -c in the previous iteration. -c On exit wn1 stores the corresponding updated matrices. -c The purpose of wn1 is just to store these inner products -c so they can be easily updated and inserted into wn. -c -c m is an integer variable. -c On entry m is the maximum number of variable metric corrections -c used to define the limited memory matrix. -c On exit m is unchanged. -c -c ws, wy, sy, and wtyy are double precision arrays; -c theta is a double precision variable; -c col is an integer variable; -c head is an integer variable. -c On entry they store the information defining the -c limited memory BFGS matrix: -c ws(n,m) stores S, a set of s-vectors; -c wy(n,m) stores Y, a set of y-vectors; -c sy(m,m) stores S'Y; -c wtyy(m,m) stores the Cholesky factorization -c of (theta*S'S+LD^(-1)L') -c theta is the scaling factor specifying B_0 = theta I; -c col is the number of variable metric corrections stored; -c head is the location of the 1st s- (or y-) vector in S (or Y). -c On exit they are unchanged. -c -c info is an integer variable. -c On entry info is unspecified. -c On exit info = 0 for normal return; -c = -1 when the 1st Cholesky factorization failed; -c = -2 when the 2st Cholesky factorization failed. -c -c Subprograms called: -c -c Linpack ... dcopy, dpofa, dtrsl. -c -c -c References: -c [1] R. H. Byrd, P. Lu, J. Nocedal and C. Zhu, ``A limited -c memory algorithm for bound constrained optimization'', -c SIAM J. Scientific Computing 16 (1995), no. 5, pp. 1190--1208. -c -c [2] C. Zhu, R.H. Byrd, P. Lu, J. Nocedal, ``L-BFGS-B: a -c limited memory FORTRAN code for solving bound constrained -c optimization problems'', Tech. Report, NAM-11, EECS Department, -c Northwestern University, 1994. -c -c (Postscript files of these papers are available via anonymous -c ftp to eecs.nwu.edu in the directory pub/lbfgs/lbfgs_bcm.) -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer m2,ipntr,jpntr,iy,is,jy,js,is1,js1,k1,i,k, - + col2,pbegin,pend,dbegin,dend,upcl - double precision ddot,temp1,temp2,temp3,temp4 - double precision one,zero - parameter (one=1.0d0,zero=0.0d0) - -c Form the lower triangular part of -c WN1 = [Y' ZZ'Y L_a'+R_z'] -c [L_a+R_z S'AA'S ] -c where L_a is the strictly lower triangular part of S'AA'Y -c R_z is the upper triangular part of S'ZZ'Y. - - if (updatd) then - if (iupdat .gt. m) then -c shift old part of WN1. - do 10 jy = 1, m - 1 - js = m + jy - call dcopy(m-jy,wn1(jy+1,jy+1),1,wn1(jy,jy),1) - call dcopy(m-jy,wn1(js+1,js+1),1,wn1(js,js),1) - call dcopy(m-1,wn1(m+2,jy+1),1,wn1(m+1,jy),1) - 10 continue - endif - -c put new rows in blocks (1,1), (2,1) and (2,2). - pbegin = 1 - pend = nsub - dbegin = nsub + 1 - dend = n - iy = col - is = m + col - ipntr = head + col - 1 - if (ipntr .gt. m) ipntr = ipntr - m - jpntr = head - do 20 jy = 1, col - js = m + jy - temp1 = zero - temp2 = zero - temp3 = zero -c compute element jy of row 'col' of Y'ZZ'Y - do 15 k = pbegin, pend - k1 = ind(k) - temp1 = temp1 + wy(k1,ipntr)*wy(k1,jpntr) - 15 continue -c compute elements jy of row 'col' of L_a and S'AA'S - do 16 k = dbegin, dend - k1 = ind(k) - temp2 = temp2 + ws(k1,ipntr)*ws(k1,jpntr) - temp3 = temp3 + ws(k1,ipntr)*wy(k1,jpntr) - 16 continue - wn1(iy,jy) = temp1 - wn1(is,js) = temp2 - wn1(is,jy) = temp3 - jpntr = mod(jpntr,m) + 1 - 20 continue - -c put new column in block (2,1). - jy = col - jpntr = head + col - 1 - if (jpntr .gt. m) jpntr = jpntr - m - ipntr = head - do 30 i = 1, col - is = m + i - temp3 = zero -c compute element i of column 'col' of R_z - do 25 k = pbegin, pend - k1 = ind(k) - temp3 = temp3 + ws(k1,ipntr)*wy(k1,jpntr) - 25 continue - ipntr = mod(ipntr,m) + 1 - wn1(is,jy) = temp3 - 30 continue - upcl = col - 1 - else - upcl = col - endif - -c modify the old parts in blocks (1,1) and (2,2) due to changes -c in the set of free variables. - ipntr = head - do 45 iy = 1, upcl - is = m + iy - jpntr = head - do 40 jy = 1, iy - js = m + jy - temp1 = zero - temp2 = zero - temp3 = zero - temp4 = zero - do 35 k = 1, nenter - k1 = indx2(k) - temp1 = temp1 + wy(k1,ipntr)*wy(k1,jpntr) - temp2 = temp2 + ws(k1,ipntr)*ws(k1,jpntr) - 35 continue - do 36 k = ileave, n - k1 = indx2(k) - temp3 = temp3 + wy(k1,ipntr)*wy(k1,jpntr) - temp4 = temp4 + ws(k1,ipntr)*ws(k1,jpntr) - 36 continue - wn1(iy,jy) = wn1(iy,jy) + temp1 - temp3 - wn1(is,js) = wn1(is,js) - temp2 + temp4 - jpntr = mod(jpntr,m) + 1 - 40 continue - ipntr = mod(ipntr,m) + 1 - 45 continue - -c modify the old parts in block (2,1). - ipntr = head - do 60 is = m + 1, m + upcl - jpntr = head - do 55 jy = 1, upcl - temp1 = zero - temp3 = zero - do 50 k = 1, nenter - k1 = indx2(k) - temp1 = temp1 + ws(k1,ipntr)*wy(k1,jpntr) - 50 continue - do 51 k = ileave, n - k1 = indx2(k) - temp3 = temp3 + ws(k1,ipntr)*wy(k1,jpntr) - 51 continue - if (is .le. jy + m) then - wn1(is,jy) = wn1(is,jy) + temp1 - temp3 - else - wn1(is,jy) = wn1(is,jy) - temp1 + temp3 - endif - jpntr = mod(jpntr,m) + 1 - 55 continue - ipntr = mod(ipntr,m) + 1 - 60 continue - -c Form the upper triangle of WN = [D+Y' ZZ'Y/theta -L_a'+R_z' ] -c [-L_a +R_z S'AA'S*theta] - - m2 = 2*m - do 70 iy = 1, col - is = col + iy - is1 = m + iy - do 65 jy = 1, iy - js = col + jy - js1 = m + jy - wn(jy,iy) = wn1(iy,jy)/theta - wn(js,is) = wn1(is1,js1)*theta - 65 continue - do 66 jy = 1, iy - 1 - wn(jy,is) = -wn1(is1,jy) - 66 continue - do 67 jy = iy, col - wn(jy,is) = wn1(is1,jy) - 67 continue - wn(iy,iy) = wn(iy,iy) + sy(iy,iy) - 70 continue - -c Form the upper triangle of WN= [ LL' L^-1(-L_a'+R_z')] -c [(-L_a +R_z)L'^-1 S'AA'S*theta ] - -c first Cholesky factor (1,1) block of wn to get LL' -c with L' stored in the upper triangle of wn. - call dpofa(wn,m2,col,info) - if (info .ne. 0) then - info = -1 - return - endif -c then form L^-1(-L_a'+R_z') in the (1,2) block. - col2 = 2*col - do 71 js = col+1 ,col2 - call dtrsl(wn,m2,col,wn(1,js),11,info) - 71 continue - -c Form S'AA'S*theta + (L^-1(-L_a'+R_z'))'L^-1(-L_a'+R_z') in the -c upper triangle of (2,2) block of wn. - - - do 72 is = col+1, col2 - do 74 js = is, col2 - wn(is,js) = wn(is,js) + ddot(col,wn(1,is),1,wn(1,js),1) - 74 continue - 72 continue - -c Cholesky factorization of (2,2) block of wn. - - call dpofa(wn(col+1,col+1),m2,col,info) - if (info .ne. 0) then - info = -2 - return - endif - - return - - end - -c======================= The end of formk ============================== - - subroutine formt(m, wt, sy, ss, col, theta, info) - - integer m, col, info - double precision theta, wt(m, m), sy(m, m), ss(m, m) - -c ************ -c -c Subroutine formt -c -c This subroutine forms the upper half of the pos. def. and symm. -c T = theta*SS + L*D^(-1)*L', stores T in the upper triangle -c of the array wt, and performs the Cholesky factorization of T -c to produce J*J', with J' stored in the upper triangle of wt. -c -c Subprograms called: -c -c Linpack ... dpofa. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer i,j,k,k1 - double precision ddum - double precision zero - parameter (zero=0.0d0) - - -c Form the upper half of T = theta*SS + L*D^(-1)*L', -c store T in the upper triangle of the array wt. - - do 52 j = 1, col - wt(1,j) = theta*ss(1,j) - 52 continue - do 55 i = 2, col - do 54 j = i, col - k1 = min(i,j) - 1 - ddum = zero - do 53 k = 1, k1 - ddum = ddum + sy(i,k)*sy(j,k)/sy(k,k) - 53 continue - wt(i,j) = ddum + theta*ss(i,j) - 54 continue - 55 continue - -c Cholesky factorize T to J*J' with -c J' stored in the upper triangle of wt. - - call dpofa(wt,m,col,info) - if (info .ne. 0) then - info = -3 - endif - - return - - end - -c======================= The end of formt ============================== - - subroutine freev(n, nfree, index, nenter, ileave, indx2, - + iwhere, wrk, updatd, cnstnd, iprint, iter) - - integer n, nfree, nenter, ileave, iprint, iter, - + index(n), indx2(n), iwhere(n) - logical wrk, updatd, cnstnd - -c ************ -c -c Subroutine freev -c -c This subroutine counts the entering and leaving variables when -c iter > 0, and finds the index set of free and active variables -c at the GCP. -c -c cnstnd is a logical variable indicating whether bounds are present -c -c index is an integer array of dimension n -c for i=1,...,nfree, index(i) are the indices of free variables -c for i=nfree+1,...,n, index(i) are the indices of bound variables -c On entry after the first iteration, index gives -c the free variables at the previous iteration. -c On exit it gives the free variables based on the determination -c in cauchy using the array iwhere. -c -c indx2 is an integer array of dimension n -c On entry indx2 is unspecified. -c On exit with iter>0, indx2 indicates which variables -c have changed status since the previous iteration. -c For i= 1,...,nenter, indx2(i) have changed from bound to free. -c For i= ileave+1,...,n, indx2(i) have changed from free to bound. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer iact,i,k - - nenter = 0 - ileave = n + 1 - if (iter .gt. 0 .and. cnstnd) then -c count the entering and leaving variables. - do 20 i = 1, nfree - k = index(i) - if (iwhere(k) .gt. 0) then - ileave = ileave - 1 - indx2(ileave) = k - if (iprint .ge. 100) write (6,*) - + 'Variable ',k,' leaves the set of free variables' - endif - 20 continue - do 22 i = 1 + nfree, n - k = index(i) - if (iwhere(k) .le. 0) then - nenter = nenter + 1 - indx2(nenter) = k - if (iprint .ge. 100) write (6,*) - + 'Variable ',k,' enters the set of free variables' - endif - 22 continue - if (iprint .ge. 99) write (6,*) - + n+1-ileave,' variables leave; ',nenter,' variables enter' - endif - wrk = (ileave .lt. n+1) .or. (nenter .gt. 0) .or. updatd - -c Find the index set of free and active variables at the GCP. - - nfree = 0 - iact = n + 1 - do 24 i = 1, n - if (iwhere(i) .le. 0) then - nfree = nfree + 1 - index(nfree) = i - else - iact = iact - 1 - index(iact) = i - endif - 24 continue - if (iprint .ge. 99) write (6,*) - + nfree,' variables are free at GCP ',iter + 1 - - return - - end - -c======================= The end of freev ============================== - - subroutine hpsolb(n, t, iorder, iheap) - integer iheap, n, iorder(n) - double precision t(n) - -c ************ -c -c Subroutine hpsolb -c -c This subroutine sorts out the least element of t, and puts the -c remaining elements of t in a heap. -c -c n is an integer variable. -c On entry n is the dimension of the arrays t and iorder. -c On exit n is unchanged. -c -c t is a double precision array of dimension n. -c On entry t stores the elements to be sorted, -c On exit t(n) stores the least elements of t, and t(1) to t(n-1) -c stores the remaining elements in the form of a heap. -c -c iorder is an integer array of dimension n. -c On entry iorder(i) is the index of t(i). -c On exit iorder(i) is still the index of t(i), but iorder may be -c permuted in accordance with t. -c -c iheap is an integer variable specifying the task. -c On entry iheap should be set as follows: -c iheap .eq. 0 if t(1) to t(n) is not in the form of a heap, -c iheap .ne. 0 if otherwise. -c On exit iheap is unchanged. -c -c -c References: -c Algorithm 232 of CACM (J. W. J. Williams): HEAPSORT. -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c ************ - - integer i,j,k,indxin,indxou - double precision ddum,out - - if (iheap .eq. 0) then - -c Rearrange the elements t(1) to t(n) to form a heap. - - do 20 k = 2, n - ddum = t(k) - indxin = iorder(k) - -c Add ddum to the heap. - i = k - 10 continue - if (i.gt.1) then - j = i/2 - if (ddum .lt. t(j)) then - t(i) = t(j) - iorder(i) = iorder(j) - i = j - goto 10 - endif - endif - t(i) = ddum - iorder(i) = indxin - 20 continue - endif - -c Assign to 'out' the value of t(1), the least member of the heap, -c and rearrange the remaining members to form a heap as -c elements 1 to n-1 of t. - - if (n .gt. 1) then - i = 1 - out = t(1) - indxou = iorder(1) - ddum = t(n) - indxin = iorder(n) - -c Restore the heap - 30 continue - j = i+i - if (j .le. n-1) then - if (t(j+1) .lt. t(j)) j = j+1 - if (t(j) .lt. ddum ) then - t(i) = t(j) - iorder(i) = iorder(j) - i = j - goto 30 - endif - endif - t(i) = ddum - iorder(i) = indxin - -c Put the least member in t(n). - - t(n) = out - iorder(n) = indxou - endif - - return - - end - -c====================== The end of hpsolb ============================== - - subroutine lnsrlb(n, l, u, nbd, x, f, fold, gd, gdold, g, d, r, t, - + z, stp, dnorm, dtd, xstep, stpmx, iter, ifun, - + iback, nfgv, info, task, boxed, cnstnd, csave, - + isave, dsave) - - character*60 task, csave - logical boxed, cnstnd - integer n, iter, ifun, iback, nfgv, info, - + nbd(n), isave(2) - double precision f, fold, gd, gdold, stp, dnorm, dtd, xstep, - + stpmx, x(n), l(n), u(n), g(n), d(n), r(n), t(n), - + z(n), dsave(13) -c ********** -c -c Subroutine lnsrlb -c -c This subroutine calls subroutine dcsrch from the Minpack2 library -c to perform the line search. Subroutine dscrch is safeguarded so -c that all trial points lie within the feasible region. -c -c Subprograms called: -c -c Minpack2 Library ... dcsrch. -c -c Linpack ... dtrsl, ddot. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ********** - - integer i - double precision ddot,a1,a2 - double precision one,zero,big - parameter (one=1.0d0,zero=0.0d0,big=1.0d+10) - double precision ftol,gtol,xtol - parameter (ftol=1.0d-3,gtol=0.9d0,xtol=0.1d0) - - if (task(1:5) .eq. 'FG_LN') goto 556 - - dtd = ddot(n,d,1,d,1) - dnorm = sqrt(dtd) - -c Determine the maximum step length. - - stpmx = big - if (cnstnd) then - if (iter .eq. 0) then - stpmx = one - else - do 43 i = 1, n - a1 = d(i) - if (nbd(i) .ne. 0) then - if (a1 .lt. zero .and. nbd(i) .le. 2) then - a2 = l(i) - x(i) - if (a2 .ge. zero) then - stpmx = zero - else if (a1*stpmx .lt. a2) then - stpmx = a2/a1 - endif - else if (a1 .gt. zero .and. nbd(i) .ge. 2) then - a2 = u(i) - x(i) - if (a2 .le. zero) then - stpmx = zero - else if (a1*stpmx .gt. a2) then - stpmx = a2/a1 - endif - endif - endif - 43 continue - endif - endif - - if (iter .eq. 0 .and. .not. boxed) then - stp = min(one/dnorm, stpmx) - else - stp = one - endif - - call dcopy(n,x,1,t,1) - call dcopy(n,g,1,r,1) - fold = f - ifun = 0 - iback = 0 - csave = 'START' - 556 continue - gd = ddot(n,g,1,d,1) - if (ifun .eq. 0) then - gdold=gd - if (gd .ge. zero) then -c the directional derivative >=0. -c Line search is impossible. - info = -4 - return - endif - endif - - call dcsrch(f,gd,stp,ftol,gtol,xtol,zero,stpmx,csave,isave,dsave) - - xstep = stp*dnorm - if (csave(1:4) .ne. 'CONV' .and. csave(1:4) .ne. 'WARN') then - task = 'FG_LNSRCH' - ifun = ifun + 1 - nfgv = nfgv + 1 - iback = ifun - 1 - if (stp .eq. one) then - call dcopy(n,z,1,x,1) - else - do 41 i = 1, n - x(i) = stp*d(i) + t(i) - 41 continue - endif - else - task = 'NEW_X' - endif - - return - - end - -c======================= The end of lnsrlb ============================= - - subroutine matupd(n, m, ws, wy, sy, ss, d, r, itail, - + iupdat, col, head, theta, rr, dr, stp, dtd) - - integer n, m, itail, iupdat, col, head - double precision theta, rr, dr, stp, dtd, d(n), r(n), - + ws(n, m), wy(n, m), sy(m, m), ss(m, m) - -c ************ -c -c Subroutine matupd -c -c This subroutine updates matrices WS and WY, and forms the -c middle matrix in B. -c -c Subprograms called: -c -c Linpack ... dcopy, ddot. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer j,pointr - double precision ddot - double precision one - parameter (one=1.0d0) - -c Set pointers for matrices WS and WY. - - if (iupdat .le. m) then - col = iupdat - itail = mod(head+iupdat-2,m) + 1 - else - itail = mod(itail,m) + 1 - head = mod(head,m) + 1 - endif - -c Update matrices WS and WY. - - call dcopy(n,d,1,ws(1,itail),1) - call dcopy(n,r,1,wy(1,itail),1) - -c Set theta=yy/ys. - - theta = rr/dr - -c Form the middle matrix in B. - -c update the upper triangle of SS, -c and the lower triangle of SY: - if (iupdat .gt. m) then -c move old information - do 50 j = 1, col - 1 - call dcopy(j,ss(2,j+1),1,ss(1,j),1) - call dcopy(col-j,sy(j+1,j+1),1,sy(j,j),1) - 50 continue - endif -c add new information: the last row of SY -c and the last column of SS: - pointr = head - do 51 j = 1, col - 1 - sy(col,j) = ddot(n,d,1,wy(1,pointr),1) - ss(j,col) = ddot(n,ws(1,pointr),1,d,1) - pointr = mod(pointr,m) + 1 - 51 continue - if (stp .eq. one) then - ss(col,col) = dtd - else - ss(col,col) = stp*stp*dtd - endif - sy(col,col) = dr - - return - - end - -c======================= The end of matupd ============================= - - subroutine prn1lb(n, m, l, u, x, iprint, itfile, epsmch) - - integer n, m, iprint, itfile - double precision epsmch, x(n), l(n), u(n) - -c ************ -c -c Subroutine prn1lb -c -c This subroutine prints the input data, initial point, upper and -c lower bounds of each variable, machine precision, as well as -c the headings of the output. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer i - - if (iprint .ge. 0) then - write (6,7001) epsmch - write (6,*) 'N = ',n,' M = ',m - if (iprint .ge. 1) then - write (itfile,2001) epsmch - write (itfile,*)'N = ',n,' M = ',m - write (itfile,9001) - if (iprint .gt. 100) then - write (6,1004) 'L =',(l(i),i = 1,n) - write (6,1004) 'X0 =',(x(i),i = 1,n) - write (6,1004) 'U =',(u(i),i = 1,n) - endif - endif - endif - - 1004 format (/,a4, 1p, 6(1x,d11.4),/,(4x,1p,6(1x,d11.4))) - 2001 format ('RUNNING THE L-BFGS-B CODE',/,/, - + 'it = iteration number',/, - + 'nf = number of function evaluations',/, - + 'nint = number of segments explored during the Cauchy search',/, - + 'nact = number of active bounds at the generalized Cauchy point' - + ,/, - + 'sub = manner in which the subspace minimization terminated:' - + ,/,' con = converged, bnd = a bound was reached',/, - + 'itls = number of iterations performed in the line search',/, - + 'stepl = step length used',/, - + 'tstep = norm of the displacement (total step)',/, - + 'projg = norm of the projected gradient',/, - + 'f = function value',/,/, - + ' * * *',/,/, - + 'Machine precision =',1p,d10.3) - 7001 format ('RUNNING THE L-BFGS-B CODE',/,/, - + ' * * *',/,/, - + 'Machine precision =',1p,d10.3) - 9001 format (/,3x,'it',3x,'nf',2x,'nint',2x,'nact',2x,'sub',2x,'itls', - + 2x,'stepl',4x,'tstep',5x,'projg',8x,'f') - - return - - end - -c======================= The end of prn1lb ============================= - - subroutine prn2lb(n, x, f, g, iprint, itfile, iter, nfgv, nact, - + sbgnrm, nint, word, iword, iback, stp, xstep) - - character*3 word - integer n, iprint, itfile, iter, nfgv, nact, nint, - + iword, iback - double precision f, sbgnrm, stp, xstep, x(n), g(n) - -c ************ -c -c Subroutine prn2lb -c -c This subroutine prints out new information after a successful -c line search. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer i,imod - -c 'word' records the status of subspace solutions. - if (iword .eq. 0) then -c the subspace minimization converged. - word = 'con' - else if (iword .eq. 1) then -c the subspace minimization stopped at a bound. - word = 'bnd' - else if (iword .eq. 5) then -c the truncated Newton step has been used. - word = 'TNT' - else - word = '---' - endif - if (iprint .ge. 99) then - write (6,*) 'LINE SEARCH',iback,' times; norm of step = ',xstep - write (6,2001) iter,f,sbgnrm - if (iprint .gt. 100) then - write (6,1004) 'X =',(x(i), i = 1, n) - write (6,1004) 'G =',(g(i), i = 1, n) - endif - else if (iprint .gt. 0) then - imod = mod(iter,iprint) - if (imod .eq. 0) write (6,2001) iter,f,sbgnrm - endif - if (iprint .ge. 1) write (itfile,3001) - + iter,nfgv,nint,nact,word,iback,stp,xstep,sbgnrm,f - - 1004 format (/,a4, 1p, 6(1x,d11.4),/,(4x,1p,6(1x,d11.4))) - 2001 format - + (/,'At iterate',i5,4x,'f= ',1p,d12.5,4x,'|proj g|= ',1p,d12.5) - 3001 format(2(1x,i4),2(1x,i5),2x,a3,1x,i4,1p,2(2x,d7.1),1p,2(1x,d10.3)) - - return - - end - -c======================= The end of prn2lb ============================= - - subroutine prn3lb(n, x, f, task, iprint, info, itfile, - + iter, nfgv, nintol, nskip, nact, sbgnrm, - + time, nint, word, iback, stp, xstep, k, - + cachyt, sbtime, lnscht) - - character*60 task - character*3 word - integer n, iprint, info, itfile, iter, nfgv, nintol, - + nskip, nact, nint, iback, k - double precision f, sbgnrm, time, stp, xstep, cachyt, sbtime, - + lnscht, x(n) - -c ************ -c -c Subroutine prn3lb -c -c This subroutine prints out information when either a built-in -c convergence test is satisfied or when an error message is -c generated. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer i - - if (task(1:5) .eq. 'ERROR') goto 999 - - if (iprint .ge. 0) then - write (6,3003) - write (6,3004) - write(6,3005) n,iter,nfgv,nintol,nskip,nact,sbgnrm,f - if (iprint .ge. 100) then - write (6,1004) 'X =',(x(i),i = 1,n) - endif - if (iprint .ge. 1) write (6,*) ' F =',f - endif - 999 continue - if (iprint .ge. 0) then - write (6,3009) task - if (info .ne. 0) then - if (info .eq. -1) write (6,9011) - if (info .eq. -2) write (6,9012) - if (info .eq. -3) write (6,9013) - if (info .eq. -4) write (6,9014) - if (info .eq. -5) write (6,9015) - if (info .eq. -6) write (6,*)' Input nbd(',k,') is invalid.' - if (info .eq. -7) - + write (6,*)' l(',k,') > u(',k,'). No feasible solution.' - if (info .eq. -8) write (6,9018) - if (info .eq. -9) write (6,9019) - endif - if (iprint .ge. 1) write (6,3007) cachyt,sbtime,lnscht - write (6,3008) time - if (iprint .ge. 1) then - if (info .eq. -4 .or. info .eq. -9) then - write (itfile,3002) - + iter,nfgv,nint,nact,word,iback,stp,xstep - endif - write (itfile,3009) task - if (info .ne. 0) then - if (info .eq. -1) write (itfile,9011) - if (info .eq. -2) write (itfile,9012) - if (info .eq. -3) write (itfile,9013) - if (info .eq. -4) write (itfile,9014) - if (info .eq. -5) write (itfile,9015) - if (info .eq. -8) write (itfile,9018) - if (info .eq. -9) write (itfile,9019) - endif - write (itfile,3008) time - endif - endif - - 1004 format (/,a4, 1p, 6(1x,d11.4),/,(4x,1p,6(1x,d11.4))) - 3002 format(2(1x,i4),2(1x,i5),2x,a3,1x,i4,1p,2(2x,d7.1),6x,'-',10x,'-') - 3003 format (/, - + ' * * *',/,/, - + 'Tit = total number of iterations',/, - + 'Tnf = total number of function evaluations',/, - + 'Tnint = total number of segments explored during', - + ' Cauchy searches',/, - + 'Skip = number of BFGS updates skipped',/, - + 'Nact = number of active bounds at final generalized', - + ' Cauchy point',/, - + 'Projg = norm of the final projected gradient',/, - + 'F = final function value',/,/, - + ' * * *') - 3004 format (/,3x,'N',3x,'Tit',2x,'Tnf',2x,'Tnint',2x, - + 'Skip',2x,'Nact',5x,'Projg',8x,'F') - 3005 format (i5,2(1x,i4),(1x,i6),(2x,i4),(1x,i5),1p,2(2x,d10.3)) - 3006 format (i5,2(1x,i4),2(1x,i6),(1x,i4),(1x,i5),7x,'-',10x,'-') - 3007 format (/,' Cauchy time',1p,e10.3,' seconds.',/ - + ' Subspace minimization time',1p,e10.3,' seconds.',/ - + ' Line search time',1p,e10.3,' seconds.') - 3008 format (/,' Total User time',1p,e10.3,' seconds.',/) - 3009 format (/,a60) - 9011 format (/, - +' Matrix in 1st Cholesky factorization in formk is not Pos. Def.') - 9012 format (/, - +' Matrix in 2st Cholesky factorization in formk is not Pos. Def.') - 9013 format (/, - +' Matrix in the Cholesky factorization in formt is not Pos. Def.') - 9014 format (/, - +' Derivative >= 0, backtracking line search impossible.',/, - +' Previous x, f and g restored.',/, - +' Possible causes: 1 error in function or gradient evaluation;',/, - +' 2 rounding errors dominate computation.') - 9015 format (/, - +' Warning: more than 10 function and gradient',/, - +' evaluations in the last line search. Termination',/, - +' may possibly be caused by a bad search direction.') - 9018 format (/,' The triangular system is singular.') - 9019 format (/, - +' Line search cannot locate an adequate point after 20 function',/ - +,' and gradient evaluations. Previous x, f and g restored.',/, - +' Possible causes: 1 error in function or gradient evaluation;',/, - +' 2 rounding error dominate computation.') - - return - - end - -c======================= The end of prn3lb ============================= - - subroutine projgr(n, l, u, nbd, x, g, sbgnrm) - - integer n, nbd(n) - double precision sbgnrm, x(n), l(n), u(n), g(n) - -c ************ -c -c Subroutine projgr -c -c This subroutine computes the infinity norm of the projected -c gradient. -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer i - double precision gi - double precision one,zero - parameter (one=1.0d0,zero=0.0d0) - - sbgnrm = zero - do 15 i = 1, n - gi = g(i) - if (nbd(i) .ne. 0) then - if (gi .lt. zero) then - if (nbd(i) .ge. 2) gi = max((x(i)-u(i)),gi) - else - if (nbd(i) .le. 2) gi = min((x(i)-l(i)),gi) - endif - endif - sbgnrm = max(sbgnrm,abs(gi)) - 15 continue - - return - - end - -c======================= The end of projgr ============================= - - subroutine subsm(n, m, nsub, ind, l, u, nbd, x, d, ws, wy, theta, - + col, head, iword, wv, wn, iprint, info) - - integer n, m, nsub, col, head, iword, iprint, info, - + ind(nsub), nbd(n) - double precision theta, - + l(n), u(n), x(n), d(n), - + ws(n, m), wy(n, m), - + wv(2*m), wn(2*m, 2*m) - -c ************ -c -c Subroutine subsm -c -c Given xcp, l, u, r, an index set that specifies -c the active set at xcp, and an l-BFGS matrix B -c (in terms of WY, WS, SY, WT, head, col, and theta), -c this subroutine computes an approximate solution -c of the subspace problem -c -c (P) min Q(x) = r'(x-xcp) + 1/2 (x-xcp)' B (x-xcp) -c -c subject to l<=x<=u -c x_i=xcp_i for all i in A(xcp) -c -c along the subspace unconstrained Newton direction -c -c d = -(Z'BZ)^(-1) r. -c -c The formula for the Newton direction, given the L-BFGS matrix -c and the Sherman-Morrison formula, is -c -c d = (1/theta)r + (1/theta*2) Z'WK^(-1)W'Z r. -c -c where -c K = [-D -Y'ZZ'Y/theta L_a'-R_z' ] -c [L_a -R_z theta*S'AA'S ] -c -c Note that this procedure for computing d differs -c from that described in [1]. One can show that the matrix K is -c equal to the matrix M^[-1]N in that paper. -c -c n is an integer variable. -c On entry n is the dimension of the problem. -c On exit n is unchanged. -c -c m is an integer variable. -c On entry m is the maximum number of variable metric corrections -c used to define the limited memory matrix. -c On exit m is unchanged. -c -c nsub is an integer variable. -c On entry nsub is the number of free variables. -c On exit nsub is unchanged. -c -c ind is an integer array of dimension nsub. -c On entry ind specifies the coordinate indices of free variables. -c On exit ind is unchanged. -c -c l is a double precision array of dimension n. -c On entry l is the lower bound of x. -c On exit l is unchanged. -c -c u is a double precision array of dimension n. -c On entry u is the upper bound of x. -c On exit u is unchanged. -c -c nbd is a integer array of dimension n. -c On entry nbd represents the type of bounds imposed on the -c variables, and must be specified as follows: -c nbd(i)=0 if x(i) is unbounded, -c 1 if x(i) has only a lower bound, -c 2 if x(i) has both lower and upper bounds, and -c 3 if x(i) has only an upper bound. -c On exit nbd is unchanged. -c -c x is a double precision array of dimension n. -c On entry x specifies the Cauchy point xcp. -c On exit x(i) is the minimizer of Q over the subspace of -c free variables. -c -c d is a double precision array of dimension n. -c On entry d is the reduced gradient of Q at xcp. -c On exit d is the Newton direction of Q. -c -c ws and wy are double precision arrays; -c theta is a double precision variable; -c col is an integer variable; -c head is an integer variable. -c On entry they store the information defining the -c limited memory BFGS matrix: -c ws(n,m) stores S, a set of s-vectors; -c wy(n,m) stores Y, a set of y-vectors; -c theta is the scaling factor specifying B_0 = theta I; -c col is the number of variable metric corrections stored; -c head is the location of the 1st s- (or y-) vector in S (or Y). -c On exit they are unchanged. -c -c iword is an integer variable. -c On entry iword is unspecified. -c On exit iword specifies the status of the subspace solution. -c iword = 0 if the solution is in the box, -c 1 if some bound is encountered. -c -c wv is a double precision working array of dimension 2m. -c -c wn is a double precision array of dimension 2m x 2m. -c On entry the upper triangle of wn stores the LEL^T factorization -c of the indefinite matrix -c -c K = [-D -Y'ZZ'Y/theta L_a'-R_z' ] -c [L_a -R_z theta*S'AA'S ] -c where E = [-I 0] -c [ 0 I] -c On exit wn is unchanged. -c -c iprint is an INTEGER variable that must be set by the user. -c It controls the frequency and type of output generated: -c iprint<0 no output is generated; -c iprint=0 print only one line at the last iteration; -c 0100 print details of every iteration including x and g; -c When iprint > 0, the file iterate.dat will be created to -c summarize the iteration. -c -c info is an integer variable. -c On entry info is unspecified. -c On exit info = 0 for normal return, -c = nonzero for abnormal return -c when the matrix K is ill-conditioned. -c -c Subprograms called: -c -c Linpack dtrsl. -c -c -c References: -c -c [1] R. H. Byrd, P. Lu, J. Nocedal and C. Zhu, ``A limited -c memory algorithm for bound constrained optimization'', -c SIAM J. Scientific Computing 16 (1995), no. 5, pp. 1190--1208. -c -c -c -c * * * -c -c NEOS, November 1994. (Latest revision June 1996.) -c Optimization Technology Center. -c Argonne National Laboratory and Northwestern University. -c Written by -c Ciyou Zhu -c in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. -c -c -c ************ - - integer pointr,m2,col2,ibd,jy,js,i,j,k - double precision alpha,dk,temp1,temp2 - double precision one,zero - parameter (one=1.0d0,zero=0.0d0) - - if (nsub .le. 0) return - if (iprint .ge. 99) write (6,1001) - -c Compute wv = W'Zd. - - pointr = head - do 20 i = 1, col - temp1 = zero - temp2 = zero - do 10 j = 1, nsub - k = ind(j) - temp1 = temp1 + wy(k,pointr)*d(j) - temp2 = temp2 + ws(k,pointr)*d(j) - 10 continue - wv(i) = temp1 - wv(col + i) = theta*temp2 - pointr = mod(pointr,m) + 1 - 20 continue - -c Compute wv:=K^(-1)wv. - - m2 = 2*m - col2 = 2*col - call dtrsl(wn,m2,col2,wv,11,info) - if (info .ne. 0) return - do 25 i = 1, col - wv(i) = -wv(i) - 25 continue - call dtrsl(wn,m2,col2,wv,01,info) - if (info .ne. 0) return - -c Compute d = (1/theta)d + (1/theta**2)Z'W wv. - - pointr = head - do 40 jy = 1, col - js = col + jy - do 30 i = 1, nsub - k = ind(i) - d(i) = d(i) + wy(k,pointr)*wv(jy)/theta - + + ws(k,pointr)*wv(js) - 30 continue - pointr = mod(pointr,m) + 1 - 40 continue - do 50 i = 1, nsub - d(i) = d(i)/theta - 50 continue - -c Backtrack to the feasible region. - - alpha = one - temp1 = alpha - do 60 i = 1, nsub - k = ind(i) - dk = d(i) - if (nbd(k) .ne. 0) then - if (dk .lt. zero .and. nbd(k) .le. 2) then - temp2 = l(k) - x(k) - if (temp2 .ge. zero) then - temp1 = zero - else if (dk*alpha .lt. temp2) then - temp1 = temp2/dk - endif - else if (dk .gt. zero .and. nbd(k) .ge. 2) then - temp2 = u(k) - x(k) - if (temp2 .le. zero) then - temp1 = zero - else if (dk*alpha .gt. temp2) then - temp1 = temp2/dk - endif - endif - if (temp1 .lt. alpha) then - alpha = temp1 - ibd = i - endif - endif - 60 continue - - if (alpha .lt. one) then - dk = d(ibd) - k = ind(ibd) - if (dk .gt. zero) then - x(k) = u(k) - d(ibd) = zero - else if (dk .lt. zero) then - x(k) = l(k) - d(ibd) = zero - endif - endif - do 70 i = 1, nsub - k = ind(i) - x(k) = x(k) + alpha*d(i) - 70 continue - - if (iprint .ge. 99) then - if (alpha .lt. one) then - write (6,1002) alpha - else - write (6,*) 'SM solution inside the box' - end if - if (iprint .gt.100) write (6,1003) (x(i),i=1,n) - endif - - if (alpha .lt. one) then - iword = 1 - else - iword = 0 - endif - if (iprint .ge. 99) write (6,1004) - - 1001 format (/,'----------------SUBSM entered-----------------',/) - 1002 format ( 'ALPHA = ',f7.5,' backtrack to the BOX') - 1003 format ('Subspace solution X = ',/,(4x,1p,6(1x,d11.4))) - 1004 format (/,'----------------exit SUBSM --------------------',/) - - return - - end - -c====================== The end of subsm =============================== - - subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax, - + task,isave,dsave) - character*(*) task - integer isave(2) - double precision f,g,stp,ftol,gtol,xtol,stpmin,stpmax - double precision dsave(13) -c ********** -c -c Subroutine dcsrch -c -c This subroutine finds a step that satisfies a sufficient -c decrease condition and a curvature condition. -c -c Each call of the subroutine updates an interval with -c endpoints stx and sty. The interval is initially chosen -c so that it contains a minimizer of the modified function -c -c psi(stp) = f(stp) - f(0) - ftol*stp*f'(0). -c -c If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the -c interval is chosen so that it contains a minimizer of f. -c -c The algorithm is designed to find a step that satisfies -c the sufficient decrease condition -c -c f(stp) <= f(0) + ftol*stp*f'(0), -c -c and the curvature condition -c -c abs(f'(stp)) <= gtol*abs(f'(0)). -c -c If ftol is less than gtol and if, for example, the function -c is bounded below, then there is always a step which satisfies -c both conditions. -c -c If no step can be found that satisfies both conditions, then -c the algorithm stops with a warning. In this case stp only -c satisfies the sufficient decrease condition. -c -c A typical invocation of dcsrch has the following outline: -c -c task = 'START' -c 10 continue -c call dcsrch( ... ) -c if (task .eq. 'FG') then -c Evaluate the function and the gradient at stp -c goto 10 -c end if -c -c NOTE: The user must no alter work arrays between calls. -c -c The subroutine statement is -c -c subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax, -c task,isave,dsave) -c where -c -c f is a double precision variable. -c On initial entry f is the value of the function at 0. -c On subsequent entries f is the value of the -c function at stp. -c On exit f is the value of the function at stp. -c -c g is a double precision variable. -c On initial entry g is the derivative of the function at 0. -c On subsequent entries g is the derivative of the -c function at stp. -c On exit g is the derivative of the function at stp. -c -c stp is a double precision variable. -c On entry stp is the current estimate of a satisfactory -c step. On initial entry, a positive initial estimate -c must be provided. -c On exit stp is the current estimate of a satisfactory step -c if task = 'FG'. If task = 'CONV' then stp satisfies -c the sufficient decrease and curvature condition. -c -c ftol is a double precision variable. -c On entry ftol specifies a nonnegative tolerance for the -c sufficient decrease condition. -c On exit ftol is unchanged. -c -c gtol is a double precision variable. -c On entry gtol specifies a nonnegative tolerance for the -c curvature condition. -c On exit gtol is unchanged. -c -c xtol is a double precision variable. -c On entry xtol specifies a nonnegative relative tolerance -c for an acceptable step. The subroutine exits with a -c warning if the relative difference between sty and stx -c is less than xtol. -c On exit xtol is unchanged. -c -c stpmin is a double precision variable. -c On entry stpmin is a nonnegative lower bound for the step. -c On exit stpmin is unchanged. -c -c stpmax is a double precision variable. -c On entry stpmax is a nonnegative upper bound for the step. -c On exit stpmax is unchanged. -c -c task is a character variable of length at least 60. -c On initial entry task must be set to 'START'. -c On exit task indicates the required action: -c -c If task(1:2) = 'FG' then evaluate the function and -c derivative at stp and call dcsrch again. -c -c If task(1:4) = 'CONV' then the search is successful. -c -c If task(1:4) = 'WARN' then the subroutine is not able -c to satisfy the convergence conditions. The exit value of -c stp contains the best point found during the search. -c -c If task(1:5) = 'ERROR' then there is an error in the -c input arguments. -c -c On exit with convergence, a warning or an error, the -c variable task contains additional information. -c -c isave is an integer work array of dimension 2. -c -c dsave is a double precision work array of dimension 13. -c -c Subprograms called -c -c MINPACK-2 ... dcstep -c -c MINPACK-1 Project. June 1983. -c Argonne National Laboratory. -c Jorge J. More' and David J. Thuente. -c -c MINPACK-2 Project. October 1993. -c Argonne National Laboratory and University of Minnesota. -c Brett M. Averick, Richard G. Carter, and Jorge J. More'. -c -c ********** - double precision zero,p5,p66 - parameter(zero=0.0d0,p5=0.5d0,p66=0.66d0) - double precision xtrapl,xtrapu - parameter(xtrapl=1.1d0,xtrapu=4.0d0) - - logical brackt - integer stage - double precision finit,ftest,fm,fx,fxm,fy,fym,ginit,gtest, - + gm,gx,gxm,gy,gym,stx,sty,stmin,stmax,width,width1 - -c Initialization block. - - if (task(1:5) .eq. 'START') then - -c Check the input arguments for errors. - - if (stp .lt. stpmin) task = 'ERROR: STP .LT. STPMIN' - if (stp .gt. stpmax) task = 'ERROR: STP .GT. STPMAX' - if (g .ge. zero) task = 'ERROR: INITIAL G .GE. ZERO' - if (ftol .lt. zero) task = 'ERROR: FTOL .LT. ZERO' - if (gtol .lt. zero) task = 'ERROR: GTOL .LT. ZERO' - if (xtol .lt. zero) task = 'ERROR: XTOL .LT. ZERO' - if (stpmin .lt. zero) task = 'ERROR: STPMIN .LT. ZERO' - if (stpmax .lt. stpmin) task = 'ERROR: STPMAX .LT. STPMIN' - -c Exit if there are errors on input. - - if (task(1:5) .eq. 'ERROR') return - -c Initialize local variables. - - brackt = .false. - stage = 1 - finit = f - ginit = g - gtest = ftol*ginit - width = stpmax - stpmin - width1 = width/p5 - -c The variables stx, fx, gx contain the values of the step, -c function, and derivative at the best step. -c The variables sty, fy, gy contain the value of the step, -c function, and derivative at sty. -c The variables stp, f, g contain the values of the step, -c function, and derivative at stp. - - stx = zero - fx = finit - gx = ginit - sty = zero - fy = finit - gy = ginit - stmin = zero - stmax = stp + xtrapu*stp - task = 'FG' - - goto 1000 - - else - -c Restore local variables. - - if (isave(1) .eq. 1) then - brackt = .true. - else - brackt = .false. - endif - stage = isave(2) - ginit = dsave(1) - gtest = dsave(2) - gx = dsave(3) - gy = dsave(4) - finit = dsave(5) - fx = dsave(6) - fy = dsave(7) - stx = dsave(8) - sty = dsave(9) - stmin = dsave(10) - stmax = dsave(11) - width = dsave(12) - width1 = dsave(13) - - endif - -c If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the -c algorithm enters the second stage. - - ftest = finit + stp*gtest - if (stage .eq. 1 .and. f .le. ftest .and. g .ge. zero) - + stage = 2 - -c Test for warnings. - - if (brackt .and. (stp .le. stmin .or. stp .ge. stmax)) - + task = 'WARNING: ROUNDING ERRORS PREVENT PROGRESS' - if (brackt .and. stmax - stmin .le. xtol*stmax) - + task = 'WARNING: XTOL TEST SATISFIED' - if (stp .eq. stpmax .and. f .le. ftest .and. g .le. gtest) - + task = 'WARNING: STP = STPMAX' - if (stp .eq. stpmin .and. (f .gt. ftest .or. g .ge. gtest)) - + task = 'WARNING: STP = STPMIN' - -c Test for convergence. - - if (f .le. ftest .and. abs(g) .le. gtol*(-ginit)) - + task = 'CONVERGENCE' - -c Test for termination. - - if (task(1:4) .eq. 'WARN' .or. task(1:4) .eq. 'CONV') goto 1000 - -c A modified function is used to predict the step during the -c first stage if a lower function value has been obtained but -c the decrease is not sufficient. - - if (stage .eq. 1 .and. f .le. fx .and. f .gt. ftest) then - -c Define the modified function and derivative values. - - fm = f - stp*gtest - fxm = fx - stx*gtest - fym = fy - sty*gtest - gm = g - gtest - gxm = gx - gtest - gym = gy - gtest - -c Call dcstep to update stx, sty, and to compute the new step. - - call dcstep(stx,fxm,gxm,sty,fym,gym,stp,fm,gm, - + brackt,stmin,stmax) - -c Reset the function and derivative values for f. - - fx = fxm + stx*gtest - fy = fym + sty*gtest - gx = gxm + gtest - gy = gym + gtest - - else - -c Call dcstep to update stx, sty, and to compute the new step. - - call dcstep(stx,fx,gx,sty,fy,gy,stp,f,g, - + brackt,stmin,stmax) - - endif - -c Decide if a bisection step is needed. - - if (brackt) then - if (abs(sty-stx) .ge. p66*width1) stp = stx + p5*(sty - stx) - width1 = width - width = abs(sty-stx) - endif - -c Set the minimum and maximum steps allowed for stp. - - if (brackt) then - stmin = min(stx,sty) - stmax = max(stx,sty) - else - stmin = stp + xtrapl*(stp - stx) - stmax = stp + xtrapu*(stp - stx) - endif - -c Force the step to be within the bounds stpmax and stpmin. - - stp = max(stp,stpmin) - stp = min(stp,stpmax) - -c If further progress is not possible, let stp be the best -c point obtained during the search. - - if (brackt .and. (stp .le. stmin .or. stp .ge. stmax) - + .or. (brackt .and. stmax-stmin .le. xtol*stmax)) stp = stx - -c Obtain another function and derivative. - - task = 'FG' - - 1000 continue - -c Save local variables. - - if (brackt) then - isave(1) = 1 - else - isave(1) = 0 - endif - isave(2) = stage - dsave(1) = ginit - dsave(2) = gtest - dsave(3) = gx - dsave(4) = gy - dsave(5) = finit - dsave(6) = fx - dsave(7) = fy - dsave(8) = stx - dsave(9) = sty - dsave(10) = stmin - dsave(11) = stmax - dsave(12) = width - dsave(13) = width1 - - end - -c====================== The end of dcsrch ============================== - - subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt, - + stpmin,stpmax) - logical brackt - double precision stx,fx,dx,sty,fy,dy,stp,fp,dp,stpmin,stpmax -c ********** -c -c Subroutine dcstep -c -c This subroutine computes a safeguarded step for a search -c procedure and updates an interval that contains a step that -c satisfies a sufficient decrease and a curvature condition. -c -c The parameter stx contains the step with the least function -c value. If brackt is set to .true. then a minimizer has -c been bracketed in an interval with endpoints stx and sty. -c The parameter stp contains the current step. -c The subroutine assumes that if brackt is set to .true. then -c -c min(stx,sty) < stp < max(stx,sty), -c -c and that the derivative at stx is negative in the direction -c of the step. -c -c The subroutine statement is -c -c subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt, -c stpmin,stpmax) -c -c where -c -c stx is a double precision variable. -c On entry stx is the best step obtained so far and is an -c endpoint of the interval that contains the minimizer. -c On exit stx is the updated best step. -c -c fx is a double precision variable. -c On entry fx is the function at stx. -c On exit fx is the function at stx. -c -c dx is a double precision variable. -c On entry dx is the derivative of the function at -c stx. The derivative must be negative in the direction of -c the step, that is, dx and stp - stx must have opposite -c signs. -c On exit dx is the derivative of the function at stx. -c -c sty is a double precision variable. -c On entry sty is the second endpoint of the interval that -c contains the minimizer. -c On exit sty is the updated endpoint of the interval that -c contains the minimizer. -c -c fy is a double precision variable. -c On entry fy is the function at sty. -c On exit fy is the function at sty. -c -c dy is a double precision variable. -c On entry dy is the derivative of the function at sty. -c On exit dy is the derivative of the function at the exit sty. -c -c stp is a double precision variable. -c On entry stp is the current step. If brackt is set to .true. -c then on input stp must be between stx and sty. -c On exit stp is a new trial step. -c -c fp is a double precision variable. -c On entry fp is the function at stp -c On exit fp is unchanged. -c -c dp is a double precision variable. -c On entry dp is the the derivative of the function at stp. -c On exit dp is unchanged. -c -c brackt is an logical variable. -c On entry brackt specifies if a minimizer has been bracketed. -c Initially brackt must be set to .false. -c On exit brackt specifies if a minimizer has been bracketed. -c When a minimizer is bracketed brackt is set to .true. -c -c stpmin is a double precision variable. -c On entry stpmin is a lower bound for the step. -c On exit stpmin is unchanged. -c -c stpmax is a double precision variable. -c On entry stpmax is an upper bound for the step. -c On exit stpmax is unchanged. -c -c MINPACK-1 Project. June 1983 -c Argonne National Laboratory. -c Jorge J. More' and David J. Thuente. -c -c MINPACK-2 Project. October 1993. -c Argonne National Laboratory and University of Minnesota. -c Brett M. Averick and Jorge J. More'. -c -c ********** - double precision zero,p66,two,three - parameter(zero=0.0d0,p66=0.66d0,two=2.0d0,three=3.0d0) - - double precision gamma,p,q,r,s,sgnd,stpc,stpf,stpq,theta - - sgnd = dp*(dx/abs(dx)) - -c First case: A higher function value. The minimum is bracketed. -c If the cubic step is closer to stx than the quadratic step, the -c cubic step is taken, otherwise the average of the cubic and -c quadratic steps is taken. - - if (fp .gt. fx) then - theta = three*(fx - fp)/(stp - stx) + dx + dp - s = max(abs(theta),abs(dx),abs(dp)) - gamma = s*sqrt((theta/s)**2 - (dx/s)*(dp/s)) - if (stp .lt. stx) gamma = -gamma - p = (gamma - dx) + theta - q = ((gamma - dx) + gamma) + dp - r = p/q - stpc = stx + r*(stp - stx) - stpq = stx + ((dx/((fx - fp)/(stp - stx) + dx))/two)* - + (stp - stx) - if (abs(stpc-stx) .lt. abs(stpq-stx)) then - stpf = stpc - else - stpf = stpc + (stpq - stpc)/two - endif - brackt = .true. - -c Second case: A lower function value and derivatives of opposite -c sign. The minimum is bracketed. If the cubic step is farther from -c stp than the secant step, the cubic step is taken, otherwise the -c secant step is taken. - - else if (sgnd .lt. zero) then - theta = three*(fx - fp)/(stp - stx) + dx + dp - s = max(abs(theta),abs(dx),abs(dp)) - gamma = s*sqrt((theta/s)**2 - (dx/s)*(dp/s)) - if (stp .gt. stx) gamma = -gamma - p = (gamma - dp) + theta - q = ((gamma - dp) + gamma) + dx - r = p/q - stpc = stp + r*(stx - stp) - stpq = stp + (dp/(dp - dx))*(stx - stp) - if (abs(stpc-stp) .gt. abs(stpq-stp)) then - stpf = stpc - else - stpf = stpq - endif - brackt = .true. - -c Third case: A lower function value, derivatives of the same sign, -c and the magnitude of the derivative decreases. - - else if (abs(dp) .lt. abs(dx)) then - -c The cubic step is computed only if the cubic tends to infinity -c in the direction of the step or if the minimum of the cubic -c is beyond stp. Otherwise the cubic step is defined to be the -c secant step. - - theta = three*(fx - fp)/(stp - stx) + dx + dp - s = max(abs(theta),abs(dx),abs(dp)) - -c The case gamma = 0 only arises if the cubic does not tend -c to infinity in the direction of the step. - - gamma = s*sqrt(max(zero,(theta/s)**2-(dx/s)*(dp/s))) - if (stp .gt. stx) gamma = -gamma - p = (gamma - dp) + theta - q = (gamma + (dx - dp)) + gamma - r = p/q - if (r .lt. zero .and. gamma .ne. zero) then - stpc = stp + r*(stx - stp) - else if (stp .gt. stx) then - stpc = stpmax - else - stpc = stpmin - endif - stpq = stp + (dp/(dp - dx))*(stx - stp) - - if (brackt) then - -c A minimizer has been bracketed. If the cubic step is -c closer to stp than the secant step, the cubic step is -c taken, otherwise the secant step is taken. - - if (abs(stpc-stp) .lt. abs(stpq-stp)) then - stpf = stpc - else - stpf = stpq - endif - if (stp .gt. stx) then - stpf = min(stp+p66*(sty-stp),stpf) - else - stpf = max(stp+p66*(sty-stp),stpf) - endif - else - -c A minimizer has not been bracketed. If the cubic step is -c farther from stp than the secant step, the cubic step is -c taken, otherwise the secant step is taken. - - if (abs(stpc-stp) .gt. abs(stpq-stp)) then - stpf = stpc - else - stpf = stpq - endif - stpf = min(stpmax,stpf) - stpf = max(stpmin,stpf) - endif - -c Fourth case: A lower function value, derivatives of the same sign, -c and the magnitude of the derivative does not decrease. If the -c minimum is not bracketed, the step is either stpmin or stpmax, -c otherwise the cubic step is taken. - - else - if (brackt) then - theta = three*(fp - fy)/(sty - stp) + dy + dp - s = max(abs(theta),abs(dy),abs(dp)) - gamma = s*sqrt((theta/s)**2 - (dy/s)*(dp/s)) - if (stp .gt. sty) gamma = -gamma - p = (gamma - dp) + theta - q = ((gamma - dp) + gamma) + dy - r = p/q - stpc = stp + r*(sty - stp) - stpf = stpc - else if (stp .gt. stx) then - stpf = stpmax - else - stpf = stpmin - endif - endif - -c Update the interval which contains a minimizer. - - if (fp .gt. fx) then - sty = stp - fy = fp - dy = dp - else - if (sgnd .lt. zero) then - sty = stx - fy = fx - dy = dx - endif - stx = stp - fx = fp - dx = dp - endif - -c Compute the new step. - - stp = stpf - - end - -c====================== The end of dcstep ============================== - - subroutine timer(ttime) - double precision ttime - real t1 -c ********* -c -c Subroutine timer -c -c This subroutine is used to determine user time. In a typical -c application, the user time for a code segment requires calls -c to subroutine timer to determine the initial and final time. -c -c The subroutine statement is -c -c subroutine timer(ttime) -c -c where -c -c ttime is an output variable which specifies the user time. -c -c Argonne National Laboratory and University of Minnesota. -c MINPACK-2 Project. -c -c Modified October 1990 by Brett M. Averick. -c -c ********** - - call cpu_time(t1) - ttime = t1 - - return - - end - -c====================== The end of timer =============================== - - double precision function dpmeps() -c ********** -c -c Subroutine dpeps -c -c This subroutine computes the machine precision parameter -c dpmeps as the smallest floating point number such that -c 1 + dpmeps differs from 1. -c -c This subroutine is based on the subroutine machar described in -c -c W. J. Cody, -c MACHAR: A subroutine to dynamically determine machine parameters, -c ACM Transactions on Mathematical Software, 14, 1988, pages 303-311. -c -c The subroutine statement is: -c -c subroutine dpeps(dpmeps) -c -c where -c -c dpmeps is a double precision variable. -c On entry dpmeps need not be specified. -c On exit dpmeps is the machine precision. -c -c MINPACK-2 Project. February 1991. -c Argonne National Laboratory and University of Minnesota. -c Brett M. Averick. -c -c ******* - integer i,ibeta,irnd,it,itemp,negep - double precision a,b,beta,betain,betah,temp,tempa,temp1, - + zero,one,two - data zero,one,two /0.0d0,1.0d0,2.0d0/ - -c determine ibeta, beta ala malcolm. - - a = one - b = one - 10 continue - a = a + a - temp = a + one - temp1 = temp - a - if (temp1 - one .eq. zero) go to 10 - 20 continue - b = b + b - temp = a + b - itemp = int(temp - a) - if (itemp .eq. 0) go to 20 - ibeta = itemp - beta = dble(ibeta) - -c determine it, irnd. - - it = 0 - b = one - 30 continue - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if (temp1 - one .eq. zero) go to 30 - irnd = 0 - betah = beta/two - temp = a + betah - if (temp - a .ne. zero) irnd = 1 - tempa = a + beta - temp = tempa + betah - if ((irnd .eq. 0) .and. (temp - tempa .ne. zero)) irnd = 2 - -c determine dpmeps. - - negep = it + 3 - betain = one/beta - a = one - do 40 i = 1, negep - a = a*betain - 40 continue - 50 continue - temp = one + a - if (temp - one .ne. zero) go to 60 - a = a*beta - go to 50 - 60 continue - dpmeps = a - if ((ibeta .eq. 2) .or. (irnd .eq. 0)) go to 70 - a = (a*(one + a))/two - temp = one + a - if (temp - one .ne. zero) dpmeps = a - - 70 return - - end - -c====================== The end of dpmeps ============================== - - subroutine dpofa(a,lda,n,info) - integer lda,n,info - double precision a(lda,1) -c -c dpofa factors a double precision symmetric positive definite -c matrix. -c -c dpofa is usually called by dpoco, but it can be called -c directly with a saving in time if rcond is not needed. -c (time for dpoco) = (1 + 18/n)*(time for dpofa) . -c -c on entry -c -c a double precision(lda, n) -c the symmetric matrix to be factored. only the -c diagonal and upper triangle are used. -c -c lda integer -c the leading dimension of the array a . -c -c n integer -c the order of the matrix a . -c -c on return -c -c a an upper triangular matrix r so that a = trans(r)*r -c where trans(r) is the transpose. -c the strict lower triangle is unaltered. -c if info .ne. 0 , the factorization is not complete. -c -c info integer -c = 0 for normal return. -c = k signals an error condition. the leading minor -c of order k is not positive definite. -c -c This is just a wrapper that calls LAPACK, but with the LINPACK -c calling convention. - - call dpotrf('U', n, a, lda, info) - end - -c====================== The end of dpofa =============================== - - subroutine dtrsl(t, ldt, n, b, job, info) - integer ldt, n, job, info - double precision t(ldt,1), b(1) -c -c -c dtrsl solves systems of the form -c -c t * x = b -c or -c trans(t) * x = b -c -c where t is a triangular matrix of order n. here trans(t) -c denotes the transpose of the matrix t. -c -c on entry -c -c t double precision(ldt,n) -c t contains the matrix of the system. the zero -c elements of the matrix are not referenced, and -c the corresponding elements of the array can be -c used to store other information. -c -c ldt integer -c ldt is the leading dimension of the array t. -c -c n integer -c n is the order of the system. -c -c b double precision(n). -c b contains the right hand side of the system. -c -c job integer -c job specifies what kind of system is to be solved. -c if job is -c -c 00 solve t*x=b, t lower triangular, -c 01 solve t*x=b, t upper triangular, -c 10 solve trans(t)*x=b, t lower triangular, -c 11 solve trans(t)*x=b, t upper triangular. -c -c on return -c -c b b contains the solution, if info .eq. 0. -c otherwise b is unaltered. -c -c info integer -c info contains zero if the system is nonsingular. -c otherwise info contains the index of -c the first zero diagonal element of t. -c -c This is just a wrapper that calls LAPACK, but with the LINPACK -c calling convention. - - character*1 uplo, trans - - if (job .eq. 00) then - uplo = 'L' - trans = 'N' - else if (job .eq. 01) then - uplo = 'U' - trans = 'N' - else if (job .eq. 10) then - uplo = 'L' - trans = 'T' - else if (job .eq. 11) then - uplo = 'U' - trans = 'T' - endif - call dtrtrs(uplo, trans, 'N', n, 1, t, ldt, b, n, info) - end -c====================== The end of dtrsl ============================== diff --git a/scipy-0.10.1/scipy/optimize/linesearch.py b/scipy-0.10.1/scipy/optimize/linesearch.py deleted file mode 100644 index 5495e50a1f..0000000000 --- a/scipy-0.10.1/scipy/optimize/linesearch.py +++ /dev/null @@ -1,614 +0,0 @@ -from scipy.optimize import minpack2 -import numpy as np -from numpy.compat import asbytes - -__all__ = ['line_search_wolfe1', 'line_search_wolfe2', - 'scalar_search_wolfe1', 'scalar_search_wolfe2', - 'line_search_armijo'] - -#------------------------------------------------------------------------------ -# Minpack's Wolfe line and scalar searches -#------------------------------------------------------------------------------ - -def line_search_wolfe1(f, fprime, xk, pk, gfk=None, - old_fval=None, old_old_fval=None, - args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8, - xtol=1e-14): - """ - As `scalar_search_wolfe1` but do a line search to direction `pk` - - Parameters - ---------- - f : callable - Function `f(x)` - fprime : callable - Gradient of `f` - xk : array_like - Current point - pk : array_like - Search direction - - gfk : array_like, optional - Gradient of `f` at point `xk` - old_fval : float, optional - Value of `f` at point `xk` - old_old_fval : float, optional - Value of `f` at point preceding `xk` - - The rest of the parameters are the same as for `scalar_search_wolfe1`. - - Returns - ------- - stp, f_count, g_count, fval, old_fval - As in `line_search_wolfe1` - gval : array - Gradient of `f` at the final point - - """ - if gfk is None: - gfk = fprime(xk) - - if isinstance(fprime, tuple): - eps = fprime[1] - fprime = fprime[0] - newargs = (f, eps) + args - gradient = False - else: - newargs = args - gradient = True - - gval = [gfk] - gc = [0] - fc = [0] - - def phi(s): - fc[0] += 1 - return f(xk + s*pk, *args) - - def derphi(s): - gval[0] = fprime(xk + s*pk, *newargs) - if gradient: - gc[0] += 1 - else: - fc[0] += len(xk) + 1 - return np.dot(gval[0], pk) - - derphi0 = np.dot(gfk, pk) - - stp, fval, old_fval = scalar_search_wolfe1( - phi, derphi, old_fval, old_old_fval, derphi0, - c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol) - - return stp, fc[0], gc[0], fval, old_fval, gval[0] - - -def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, - c1=1e-4, c2=0.9, - amax=50, amin=1e-8, xtol=1e-14): - """ - Scalar function search for alpha that satisfies strong Wolfe conditions - - alpha > 0 is assumed to be a descent direction. - - Parameters - ---------- - phi : callable phi(alpha) - Function at point `alpha` - derphi : callable dphi(alpha) - Derivative `d phi(alpha)/ds`. Returns a scalar. - - phi0 : float, optional - Value of `f` at 0 - old_phi0 : float, optional - Value of `f` at the previous point - derphi0 : float, optional - Value `derphi` at 0 - amax : float, optional - Maximum step size - c1, c2 : float, optional - Wolfe parameters - - Returns - ------- - alpha : float - Step size, or None if no suitable step was found - phi : float - Value of `phi` at the new point `alpha` - phi0 : float - Value of `phi` at `alpha=0` - - Notes - ----- - Uses routine DCSRCH from MINPACK. - - """ - - if phi0 is None: - phi0 = phi(0.) - if derphi0 is None: - derphi0 = derphi(0.) - - if old_phi0 is not None: - alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) - if alpha1 < 0: - alpha1 = 1.0 - else: - alpha1 = 1.0 - - phi1 = phi0 - derphi1 = derphi0 - isave = np.zeros((2,), np.intc) - dsave = np.zeros((13,), float) - task = asbytes('START') - - while 1: - stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1, - c1, c2, xtol, task, - amin, amax, isave, dsave) - if task[:2] == asbytes('FG') and not np.isnan(phi1): - alpha1 = stp - phi1 = phi(stp) - derphi1 = derphi(stp) - else: - break - - if task[:5] == asbytes('ERROR') or task[:4] == asbytes('WARN'): - stp = None # failed - - return stp, phi1, phi0 - -line_search = line_search_wolfe1 - -#------------------------------------------------------------------------------ -# Pure-Python Wolfe line and scalar searches -#------------------------------------------------------------------------------ - -def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None, - old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=50): - """Find alpha that satisfies strong Wolfe conditions. - - Parameters - ---------- - f : callable f(x,*args) - Objective function. - myfprime : callable f'(x,*args) - Objective function gradient (can be None). - xk : ndarray - Starting point. - pk : ndarray - Search direction. - gfk : ndarray, optional - Gradient value for x=xk (xk being the current parameter - estimate). Will be recomputed if omitted. - old_fval : float, optional - Function value for x=xk. Will be recomputed if omitted. - old_old_fval : float, optional - Function value for the point preceding x=xk - args : tuple, optional - Additional arguments passed to objective function. - c1 : float, optional - Parameter for Armijo condition rule. - c2 : float, optional - Parameter for curvature condition rule. - - Returns - ------- - alpha0 : float - Alpha for which ``x_new = x0 + alpha * pk``. - fc : int - Number of function evaluations made. - gc : int - Number of gradient evaluations made. - - Notes - ----- - Uses the line search algorithm to enforce strong Wolfe - conditions. See Wright and Nocedal, 'Numerical Optimization', - 1999, pg. 59-60. - - For the zoom phase it uses an algorithm by [...]. - - """ - fc = [0] - gc = [0] - gval = [None] - - def phi(alpha): - fc[0] += 1 - return f(xk + alpha * pk, *args) - - if isinstance(myfprime, tuple): - def derphi(alpha): - fc[0] += len(xk)+1 - eps = myfprime[1] - fprime = myfprime[0] - newargs = (f,eps) + args - gval[0] = fprime(xk+alpha*pk, *newargs) # store for later use - return np.dot(gval[0], pk) - else: - fprime = myfprime - def derphi(alpha): - gc[0] += 1 - gval[0] = fprime(xk+alpha*pk, *args) # store for later use - return np.dot(gval[0], pk) - - derphi0 = np.dot(gfk, pk) - - alpha_star, phi_star, old_fval, derphi_star = \ - scalar_search_wolfe2(phi, derphi, old_fval, old_old_fval, - derphi0, c1, c2, amax) - - if derphi_star is not None: - # derphi_star is a number (derphi) -- so use the most recently - # calculated gradient used in computing it derphi = gfk*pk - # this is the gradient at the next step no need to compute it - # again in the outer loop. - derphi_star = gval[0] - - return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star - - -def scalar_search_wolfe2(phi, derphi=None, phi0=None, - old_phi0=None, derphi0=None, - c1=1e-4, c2=0.9, amax=50): - """Find alpha that satisfies strong Wolfe conditions. - - alpha > 0 is assumed to be a descent direction. - - Parameters - ---------- - phi : callable f(x,*args) - Objective scalar function. - - derphi : callable f'(x,*args), optional - Objective function derivative (can be None) - phi0 : float, optional - Value of phi at s=0 - old_phi0 : float, optional - Value of phi at previous point - derphi0 : float, optional - Value of derphi at s=0 - args : tuple - Additional arguments passed to objective function. - c1 : float - Parameter for Armijo condition rule. - c2 : float - Parameter for curvature condition rule. - - Returns - ------- - alpha_star : float - Best alpha - phi_star - phi at alpha_star - phi0 - phi at 0 - derphi_star - derphi at alpha_star - - Notes - ----- - Uses the line search algorithm to enforce strong Wolfe - conditions. See Wright and Nocedal, 'Numerical Optimization', - 1999, pg. 59-60. - - For the zoom phase it uses an algorithm by [...]. - - """ - - if phi0 is None: - phi0 = phi(0.) - - if derphi0 is None and derphi is not None: - derphi0 = derphi(0.) - - alpha0 = 0 - if old_phi0 is not None: - alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) - else: - alpha1 = 1.0 - - if alpha1 < 0: - alpha1 = 1.0 - - if alpha1 == 0: - # This shouldn't happen. Perhaps the increment has slipped below - # machine precision? For now, set the return variables skip the - # useless while loop, and raise warnflag=2 due to possible imprecision. - alpha_star = None - phi_star = phi0 - phi0 = old_phi0 - derphi_star = None - - phi_a1 = phi(alpha1) - #derphi_a1 = derphi(alpha1) evaluated below - - phi_a0 = phi0 - derphi_a0 = derphi0 - - i = 1 - maxiter = 10 - while 1: # bracketing phase - if alpha1 == 0: - break - if (phi_a1 > phi0 + c1*alpha1*derphi0) or \ - ((phi_a1 >= phi_a0) and (i > 1)): - alpha_star, phi_star, derphi_star = \ - _zoom(alpha0, alpha1, phi_a0, - phi_a1, derphi_a0, phi, derphi, - phi0, derphi0, c1, c2) - break - - derphi_a1 = derphi(alpha1) - if (abs(derphi_a1) <= -c2*derphi0): - alpha_star = alpha1 - phi_star = phi_a1 - derphi_star = derphi_a1 - break - - if (derphi_a1 >= 0): - alpha_star, phi_star, derphi_star = \ - _zoom(alpha1, alpha0, phi_a1, - phi_a0, derphi_a1, phi, derphi, - phi0, derphi0, c1, c2) - break - - alpha2 = 2 * alpha1 # increase by factor of two on each iteration - i = i + 1 - alpha0 = alpha1 - alpha1 = alpha2 - phi_a0 = phi_a1 - phi_a1 = phi(alpha1) - derphi_a0 = derphi_a1 - - # stopping test if lower function not found - if i > maxiter: - alpha_star = alpha1 - phi_star = phi_a1 - derphi_star = None - break - - return alpha_star, phi_star, phi0, derphi_star - - -def _cubicmin(a,fa,fpa,b,fb,c,fc): - """ - Finds the minimizer for a cubic polynomial that goes through the - points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa. - - If no minimizer can be found return None - - """ - # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D - - C = fpa - D = fa - db = b-a - dc = c-a - if (db == 0) or (dc == 0) or (b==c): return None - denom = (db*dc)**2 * (db-dc) - d1 = np.empty((2,2)) - d1[0,0] = dc**2 - d1[0,1] = -db**2 - d1[1,0] = -dc**3 - d1[1,1] = db**3 - [A,B] = np.dot(d1, np.asarray([fb-fa-C*db,fc-fa-C*dc]).flatten()) - A /= denom - B /= denom - radical = B*B-3*A*C - if radical < 0: return None - if (A == 0): return None - xmin = a + (-B + np.sqrt(radical))/(3*A) - return xmin - - -def _quadmin(a,fa,fpa,b,fb): - """ - Finds the minimizer for a quadratic polynomial that goes through - the points (a,fa), (b,fb) with derivative at a of fpa, - - """ - # f(x) = B*(x-a)^2 + C*(x-a) + D - D = fa - C = fpa - db = b-a*1.0 - if (db==0): return None - B = (fb-D-C*db)/(db*db) - if (B <= 0): return None - xmin = a - C / (2.0*B) - return xmin - -def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, - phi, derphi, phi0, derphi0, c1, c2): - """ - Part of the optimization algorithm in `scalar_search_wolfe2`. - """ - - maxiter = 10 - i = 0 - delta1 = 0.2 # cubic interpolant check - delta2 = 0.1 # quadratic interpolant check - phi_rec = phi0 - a_rec = 0 - while 1: - # interpolate to find a trial step length between a_lo and - # a_hi Need to choose interpolation here. Use cubic - # interpolation and then if the result is within delta * - # dalpha or outside of the interval bounded by a_lo or a_hi - # then use quadratic interpolation, if the result is still too - # close, then use bisection - - dalpha = a_hi-a_lo; - if dalpha < 0: a,b = a_hi,a_lo - else: a,b = a_lo, a_hi - - # minimizer of cubic interpolant - # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) - # - # if the result is too close to the end points (or out of the - # interval) then use quadratic interpolation with phi_lo, - # derphi_lo and phi_hi if the result is stil too close to the - # end points (or out of the interval) then use bisection - - if (i > 0): - cchk = delta1*dalpha - a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec) - if (i==0) or (a_j is None) or (a_j > b-cchk) or (a_j < a+cchk): - qchk = delta2*dalpha - a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) - if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): - a_j = a_lo + 0.5*dalpha - - # Check new value of a_j - - phi_aj = phi(a_j) - if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo): - phi_rec = phi_hi - a_rec = a_hi - a_hi = a_j - phi_hi = phi_aj - else: - derphi_aj = derphi(a_j) - if abs(derphi_aj) <= -c2*derphi0: - a_star = a_j - val_star = phi_aj - valprime_star = derphi_aj - break - if derphi_aj*(a_hi - a_lo) >= 0: - phi_rec = phi_hi - a_rec = a_hi - a_hi = a_lo - phi_hi = phi_lo - else: - phi_rec = phi_lo - a_rec = a_lo - a_lo = a_j - phi_lo = phi_aj - derphi_lo = derphi_aj - i += 1 - if (i > maxiter): - a_star = a_j - val_star = phi_aj - valprime_star = None - break - return a_star, val_star, valprime_star - - -#------------------------------------------------------------------------------ -# Armijo line and scalar searches -#------------------------------------------------------------------------------ - -def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): - """Minimize over alpha, the function ``f(xk+alpha pk)``. - - Parameters - ---------- - f : callable - Function to be minimized. - xk : array_like - Current point. - pk : array_like - Search direction. - gfk : array_like, optional - Gradient of `f` at point `xk`. - old_fval : float - Value of `f` at point `xk`. - args : tuple, optional - Optional arguments. - c1 : float, optional - Value to control stopping criterion. - alpha0 : scalar, optional - Value of `alpha` at start of the optimization. - - Returns - ------- - alpha - f_count - f_val_at_alpha - - Notes - ----- - Uses the interpolation algorithm (Armijo backtracking) as suggested by - Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 - - """ - xk = np.atleast_1d(xk) - fc = [0] - - def phi(alpha1): - fc[0] += 1 - return f(xk + alpha1*pk, *args) - - if old_fval is None: - phi0 = phi(0.) - else: - phi0 = old_fval # compute f(xk) -- done in past loop - - derphi0 = np.dot(gfk, pk) - alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, alpha0=alpha0) - return alpha, fc[0], phi1 - -def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): - """ - Compatibility wrapper for `line_search_armijo` - """ - r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1, - alpha0=alpha0) - return r[0], r[1], 0, r[2] - -def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0): - """Minimize over alpha, the function ``phi(alpha)``. - - Uses the interpolation algorithm (Armijo backtracking) as suggested by - Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 - - alpha > 0 is assumed to be a descent direction. - - Returns - ------- - alpha - phi1 - - """ - phi_a0 = phi(alpha0) - if phi_a0 <= phi0 + c1*alpha0*derphi0: - return alpha0, phi_a0 - - # Otherwise compute the minimizer of a quadratic interpolant: - - alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) - phi_a1 = phi(alpha1) - - if (phi_a1 <= phi0 + c1*alpha1*derphi0): - return alpha1, phi_a1 - - # Otherwise loop with cubic interpolation until we find an alpha which - # satifies the first Wolfe condition (since we are backtracking, we will - # assume that the value of alpha is not too small and satisfies the second - # condition. - - while alpha1 > amin: # we are assuming alpha>0 is a descent direction - factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) - a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ - alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) - a = a / factor - b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ - alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) - b = b / factor - - alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) - phi_a2 = phi(alpha2) - - if (phi_a2 <= phi0 + c1*alpha2*derphi0): - return alpha2, phi_a2 - - if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: - alpha2 = alpha1 / 2.0 - - alpha0 = alpha1 - alpha1 = alpha2 - phi_a0 = phi_a1 - phi_a1 = phi_a2 - - # Failed to find a suitable step length - return None, phi_a1 diff --git a/scipy-0.10.1/scipy/optimize/minpack.h b/scipy-0.10.1/scipy/optimize/minpack.h deleted file mode 100644 index 25d4ba8517..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack.h +++ /dev/null @@ -1,192 +0,0 @@ -/* MULTIPACK module by Travis Oliphant - -Copyright (c) 2002 Travis Oliphant all rights reserved -Oliphant.Travis@altavista.net -Permission to use, modify, and distribute this software is given under the -terms of the SciPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -*/ - - -/* This extension module is a collection of wrapper functions around -common FORTRAN code in the packages MINPACK, ODEPACK, and QUADPACK plus -some differential algebraic equation solvers. - -The wrappers are meant to be nearly direct translations between the -FORTAN code and Python. Some parameters like sizes do not need to be -passed since they are available from the objects. - -It is anticipated that a pure Python module be written to call these lower -level routines and make a simpler user interface. All of the routines define -default values for little-used parameters so that even the raw routines are -quite useful without a separate wrapper. - -FORTRAN Outputs that are not either an error indicator or the sought-after -results are placed in a dictionary and returned as an optional member of -the result tuple when the full_output argument is non-zero. -*/ - -#include "Python.h" -#if PY_VERSION_HEX >= 0x03000000 - #define PyString_FromString PyBytes_FromString - #define PyString_ConcatAndDel PyBytes_ConcatAndDel - #define PyString_AsString PyBytes_AsString - #define PyInt_FromLong PyLong_FromLong -#endif -#include "numpy/arrayobject.h" - -#define PYERR(errobj,message) {PyErr_SetString(errobj,message); goto fail;} -#define PYERR2(errobj,message) {PyErr_Print(); PyErr_SetString(errobj, message); goto fail;} -#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS) - -#define STORE_VARS() PyObject *store_multipack_globals[4]; int store_multipack_globals3; - -#define INIT_FUNC(fun,arg,errobj) { /* Get extra arguments or set to zero length tuple */ \ - store_multipack_globals[0] = multipack_python_function; \ - store_multipack_globals[1] = multipack_extra_arguments; \ - if (arg == NULL) { \ - if ((arg = PyTuple_New(0)) == NULL) goto fail; \ - } \ - else \ - Py_INCREF(arg); /* We decrement on exit. */ \ - if (!PyTuple_Check(arg)) \ - PYERR(errobj,"Extra Arguments must be in a tuple"); \ - /* Set up callback functions */ \ - if (!PyCallable_Check(fun)) \ - PYERR(errobj,"First argument must be a callable function."); \ - multipack_python_function = fun; \ - multipack_extra_arguments = arg; } - -#define INIT_JAC_FUNC(fun,Dfun,arg,col_deriv,errobj) { \ - store_multipack_globals[0] = multipack_python_function; \ - store_multipack_globals[1] = multipack_extra_arguments; \ - store_multipack_globals[2] = multipack_python_jacobian; \ - store_multipack_globals3 = multipack_jac_transpose; \ - if (arg == NULL) { \ - if ((arg = PyTuple_New(0)) == NULL) goto fail; \ - } \ - else \ - Py_INCREF(arg); /* We decrement on exit. */ \ - if (!PyTuple_Check(arg)) \ - PYERR(errobj,"Extra Arguments must be in a tuple"); \ - /* Set up callback functions */ \ - if (!PyCallable_Check(fun) || (Dfun != Py_None && !PyCallable_Check(Dfun))) \ - PYERR(errobj,"The function and its Jacobian must be callable functions."); \ - multipack_python_function = fun; \ - multipack_extra_arguments = arg; \ - multipack_python_jacobian = Dfun; \ - multipack_jac_transpose = !(col_deriv);} - -#define RESTORE_JAC_FUNC() multipack_python_function = store_multipack_globals[0]; \ - multipack_extra_arguments = store_multipack_globals[1]; \ - multipack_python_jacobian = store_multipack_globals[2]; \ - multipack_jac_transpose = store_multipack_globals3; - -#define RESTORE_FUNC() multipack_python_function = store_multipack_globals[0]; \ - multipack_extra_arguments = store_multipack_globals[1]; - -#define SET_DIAG(ap_diag,o_diag,mode) { /* Set the diag vector from input */ \ - if (o_diag == NULL || o_diag == Py_None) { \ - ap_diag = (PyArrayObject *)PyArray_SimpleNew(1,&n,PyArray_DOUBLE); \ - if (ap_diag == NULL) goto fail; \ - diag = (double *)ap_diag -> data; \ - mode = 1; \ - } \ - else { \ - ap_diag = (PyArrayObject *)PyArray_ContiguousFromObject(o_diag, PyArray_DOUBLE, 1, 1); \ - if (ap_diag == NULL) goto fail; \ - diag = (double *)ap_diag -> data; \ - mode = 2; } } - -#define MATRIXC2F(jac,data,m,n) {double *p1=(double *)(jac), *p2, *p3=(double *)(data);\ -int i,j;\ -for (j=0;j<(m);p3++,j++) \ - for (p2=p3,i=0;i<(n);p2+=(m),i++,p1++) \ - *p1 = *p2; } - -static PyObject *multipack_python_function=NULL; -static PyObject *multipack_python_jacobian=NULL; -static PyObject *multipack_extra_arguments=NULL; /* a tuple */ -static int multipack_jac_transpose=1; - -static PyObject *call_python_function(PyObject *func, npy_intp n, double *x, PyObject *args, int dim, PyObject *error_obj) -{ - /* - This is a generic function to call a python function that takes a 1-D - sequence as a first argument and optional extra_arguments (should be a - zero-length tuple if none desired). The result of the function is - returned in a multiarray object. - -- build sequence object from values in x. - -- add extra arguments (if any) to an argument list. - -- call Python callable object - -- check if error occurred: - if so return NULL - -- if no error, place result of Python code into multiarray object. - */ - - PyArrayObject *sequence = NULL; - PyObject *arglist = NULL, *tmpobj = NULL; - PyObject *arg1 = NULL, *str1 = NULL; - PyObject *result = NULL; - PyArrayObject *result_array = NULL; - - /* Build sequence argument from inputs */ - sequence = (PyArrayObject *)PyArray_SimpleNewFromData(1, &n, PyArray_DOUBLE, (char *)x); - if (sequence == NULL) PYERR2(error_obj,"Internal failure to make an array of doubles out of first\n argument to function call."); - - /* Build argument list */ - if ((arg1 = PyTuple_New(1)) == NULL) { - Py_DECREF(sequence); - return NULL; - } - PyTuple_SET_ITEM(arg1, 0, (PyObject *)sequence); - /* arg1 now owns sequence reference */ - if ((arglist = PySequence_Concat( arg1, args)) == NULL) - PYERR2(error_obj,"Internal error constructing argument list."); - - Py_DECREF(arg1); /* arglist has a reference to sequence, now. */ - arg1 = NULL; - - /* Call function object --- variable passed to routine. Extra - arguments are in another passed variable. - */ - if ((result = PyEval_CallObject(func, arglist))==NULL) { - PyErr_Print(); - tmpobj = PyObject_GetAttrString(func, "func_name"); - if (tmpobj == NULL) goto fail; - str1 = PyString_FromString("Error occurred while calling the Python function named "); - if (str1 == NULL) { Py_DECREF(tmpobj); goto fail;} - PyString_ConcatAndDel(&str1, tmpobj); - PyErr_SetString(error_obj, PyString_AsString(str1)); - Py_DECREF(str1); - goto fail; - } - - if ((result_array = (PyArrayObject *)PyArray_ContiguousFromObject(result, PyArray_DOUBLE, dim-1, dim))==NULL) - PYERR2(error_obj,"Result from function call is not a proper array of floats."); - - Py_DECREF(result); - Py_DECREF(arglist); - return (PyObject *)result_array; - - fail: - Py_XDECREF(arglist); - Py_XDECREF(result); - Py_XDECREF(arg1); - return NULL; -} - - - - - - - - - - - - - diff --git a/scipy-0.10.1/scipy/optimize/minpack.py b/scipy-0.10.1/scipy/optimize/minpack.py deleted file mode 100644 index 7c161879bd..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack.py +++ /dev/null @@ -1,535 +0,0 @@ -import warnings -import _minpack - -from numpy import atleast_1d, dot, take, triu, shape, eye, \ - transpose, zeros, product, greater, array, \ - all, where, isscalar, asarray, inf, abs - -error = _minpack.error - -__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] - -def _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape=None): - res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) - if (output_shape is not None) and (shape(res) != output_shape): - if (output_shape[0] != 1): - if len(output_shape) > 1: - if output_shape[1] == 1: - return shape(res) - msg = "%s: there is a mismatch between the input and output " \ - "shape of the '%s' argument" % (checker, argname) - func_name = getattr(thefunc, 'func_name', None) - if func_name: - msg += " '%s'." % func_name - else: - msg += "." - raise TypeError(msg) - return shape(res) - - -def fsolve(func, x0, args=(), fprime=None, full_output=0, - col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, - epsfcn=0.0, factor=100, diag=None): - """ - Find the roots of a function. - - Return the roots of the (non-linear) equations defined by - ``func(x) = 0`` given a starting estimate. - - Parameters - ---------- - func : callable f(x, *args) - A function that takes at least one (possibly vector) argument. - x0 : ndarray - The starting estimate for the roots of ``func(x) = 0``. - args : tuple - Any extra arguments to `func`. - fprime : callable(x) - A function to compute the Jacobian of `func` with derivatives - across the rows. By default, the Jacobian will be estimated. - full_output : bool - If True, return optional outputs. - col_deriv : bool - Specify whether the Jacobian function computes derivatives down - the columns (faster, because there is no transpose operation). - - Returns - ------- - x : ndarray - The solution (or the result of the last iteration for - an unsuccessful call). - infodict : dict - A dictionary of optional outputs with the keys:: - - * 'nfev': number of function calls - * 'njev': number of Jacobian calls - * 'fvec': function evaluated at the output - * 'fjac': the orthogonal matrix, q, produced by the QR - factorization of the final approximate Jacobian - matrix, stored column wise - * 'r': upper triangular matrix produced by QR factorization of same - matrix - * 'qtf': the vector (transpose(q) * fvec) - - ier : int - An integer flag. Set to 1 if a solution was found, otherwise refer - to `mesg` for more information. - mesg : str - If no solution is found, `mesg` details the cause of failure. - - Other Parameters - ---------------- - xtol : float - The calculation will terminate if the relative error between two - consecutive iterates is at most `xtol`. - maxfev : int - The maximum number of calls to the function. If zero, then - ``100*(N+1)`` is the maximum where N is the number of elements - in `x0`. - band : tuple - If set to a two-sequence containing the number of sub- and - super-diagonals within the band of the Jacobi matrix, the - Jacobi matrix is considered banded (only for ``fprime=None``). - epsfcn : float - A suitable step length for the forward-difference - approximation of the Jacobian (for ``fprime=None``). If - `epsfcn` is less than the machine precision, it is assumed - that the relative errors in the functions are of the order of - the machine precision. - factor : float - A parameter determining the initial step bound - (``factor * || diag * x||``). Should be in the interval - ``(0.1, 100)``. - diag : sequence - N positive entries that serve as a scale factors for the - variables. - - Notes - ----- - ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. - - """ - x0 = array(x0, ndmin=1) - n = len(x0) - if type(args) != type(()): args = (args,) - _check_func('fsolve', 'func', func, x0, args, n, (n,)) - Dfun = fprime - if Dfun is None: - if band is None: - ml, mu = -10,-10 - else: - ml, mu = band[:2] - if (maxfev == 0): - maxfev = 200*(n + 1) - retval = _minpack._hybrd(func, x0, args, full_output, xtol, - maxfev, ml, mu, epsfcn, factor, diag) - else: - _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n,n)) - if (maxfev == 0): - maxfev = 100*(n + 1) - retval = _minpack._hybrj(func, Dfun, x0, args, full_output, - col_deriv, xtol, maxfev, factor,diag) - - errors = {0:["Improper input parameters were entered.",TypeError], - 1:["The solution converged.", None], - 2:["The number of calls to function has " - "reached maxfev = %d." % maxfev, ValueError], - 3:["xtol=%f is too small, no further improvement " - "in the approximate\n solution " - "is possible." % xtol, ValueError], - 4:["The iteration is not making good progress, as measured " - "by the \n improvement from the last five " - "Jacobian evaluations.", ValueError], - 5:["The iteration is not making good progress, " - "as measured by the \n improvement from the last " - "ten iterations.", ValueError], - 'unknown': ["An error occurred.", TypeError]} - - info = retval[-1] # The FORTRAN return value - if (info != 1 and not full_output): - if info in [2,3,4,5]: - msg = errors[info][0] - warnings.warn(msg, RuntimeWarning) - else: - try: - raise errors[info][1](errors[info][0]) - except KeyError: - raise errors['unknown'][1](errors['unknown'][0]) - - if full_output: - try: - return retval + (errors[info][0],) # Return all + the message - except KeyError: - return retval + (errors['unknown'][0],) - else: - return retval[0] - - -def leastsq(func, x0, args=(), Dfun=None, full_output=0, - col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, - gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None): - """ - Minimize the sum of squares of a set of equations. - - :: - - x = arg min(sum(func(y)**2,axis=0)) - y - - Parameters - ---------- - func : callable - should take at least one (possibly length N vector) argument and - returns M floating point numbers. - x0 : ndarray - The starting estimate for the minimization. - args : tuple - Any extra arguments to func are placed in this tuple. - Dfun : callable - A function or method to compute the Jacobian of func with derivatives - across the rows. If this is None, the Jacobian will be estimated. - full_output : bool - non-zero to return all optional outputs. - col_deriv : bool - non-zero to specify that the Jacobian function computes derivatives - down the columns (faster, because there is no transpose operation). - ftol : float - Relative error desired in the sum of squares. - xtol : float - Relative error desired in the approximate solution. - gtol : float - Orthogonality desired between the function vector and the columns of - the Jacobian. - maxfev : int - The maximum number of calls to the function. If zero, then 100*(N+1) is - the maximum where N is the number of elements in x0. - epsfcn : float - A suitable step length for the forward-difference approximation of the - Jacobian (for Dfun=None). If epsfcn is less than the machine precision, - it is assumed that the relative errors in the functions are of the - order of the machine precision. - factor : float - A parameter determining the initial step bound - (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. - diag : sequence - N positive entries that serve as a scale factors for the variables. - - Returns - ------- - x : ndarray - The solution (or the result of the last iteration for an unsuccessful - call). - cov_x : ndarray - Uses the fjac and ipvt optional outputs to construct an - estimate of the jacobian around the solution. ``None`` if a - singular matrix encountered (indicates very flat curvature in - some direction). This matrix must be multiplied by the - residual standard deviation to get the covariance of the - parameter estimates -- see curve_fit. - infodict : dict - a dictionary of optional outputs with the key s:: - - - 'nfev' : the number of function calls - - 'fvec' : the function evaluated at the output - - 'fjac' : A permutation of the R matrix of a QR - factorization of the final approximate - Jacobian matrix, stored column wise. - Together with ipvt, the covariance of the - estimate can be approximated. - - 'ipvt' : an integer array of length N which defines - a permutation matrix, p, such that - fjac*p = q*r, where r is upper triangular - with diagonal elements of nonincreasing - magnitude. Column j of p is column ipvt(j) - of the identity matrix. - - 'qtf' : the vector (transpose(q) * fvec). - - mesg : str - A string message giving information about the cause of failure. - ier : int - An integer flag. If it is equal to 1, 2, 3 or 4, the solution was - found. Otherwise, the solution was not found. In either case, the - optional output variable 'mesg' gives more information. - - Notes - ----- - "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. - - cov_x is a Jacobian approximation to the Hessian of the least squares - objective function. - This approximation assumes that the objective function is based on the - difference between some observed target data (ydata) and a (non-linear) - function of the parameters `f(xdata, params)` :: - - func(params) = ydata - f(xdata, params) - - so that the objective function is :: - - min sum((ydata - f(xdata, params))**2, axis=0) - params - - """ - x0 = array(x0, ndmin=1) - n = len(x0) - if type(args) != type(()): - args = (args,) - m = _check_func('leastsq', 'func', func, x0, args, n)[0] - if n > m: - raise TypeError('Improper input: N=%s must not exceed M=%s' % (n,m)) - if Dfun is None: - if (maxfev == 0): - maxfev = 200*(n + 1) - retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, - gtol, maxfev, epsfcn, factor, diag) - else: - if col_deriv: - _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n,m)) - else: - _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m,n)) - if (maxfev == 0): - maxfev = 100*(n + 1) - retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv, - ftol, xtol, gtol, maxfev, factor, diag) - - errors = {0:["Improper input parameters.", TypeError], - 1:["Both actual and predicted relative reductions " - "in the sum of squares\n are at most %f" % ftol, None], - 2:["The relative error between two consecutive " - "iterates is at most %f" % xtol, None], - 3:["Both actual and predicted relative reductions in " - "the sum of squares\n are at most %f and the " - "relative error between two consecutive " - "iterates is at \n most %f" % (ftol,xtol), None], - 4:["The cosine of the angle between func(x) and any " - "column of the\n Jacobian is at most %f in " - "absolute value" % gtol, None], - 5:["Number of calls to function has reached " - "maxfev = %d." % maxfev, ValueError], - 6:["ftol=%f is too small, no further reduction " - "in the sum of squares\n is possible.""" % ftol, ValueError], - 7:["xtol=%f is too small, no further improvement in " - "the approximate\n solution is possible." % xtol, ValueError], - 8:["gtol=%f is too small, func(x) is orthogonal to the " - "columns of\n the Jacobian to machine " - "precision." % gtol, ValueError], - 'unknown':["Unknown error.", TypeError]} - - info = retval[-1] # The FORTRAN return value - - if (info not in [1,2,3,4] and not full_output): - if info in [5,6,7,8]: - warnings.warn(errors[info][0], RuntimeWarning) - else: - try: - raise errors[info][1](errors[info][0]) - except KeyError: - raise errors['unknown'][1](errors['unknown'][0]) - - mesg = errors[info][0] - if full_output: - cov_x = None - if info in [1,2,3,4]: - from numpy.dual import inv - from numpy.linalg import LinAlgError - perm = take(eye(n),retval[1]['ipvt']-1,0) - r = triu(transpose(retval[1]['fjac'])[:n,:]) - R = dot(r, perm) - try: - cov_x = inv(dot(transpose(R),R)) - except LinAlgError: - pass - return (retval[0], cov_x) + retval[1:-1] + (mesg, info) - else: - return (retval[0], info) - -def _general_function(params, xdata, ydata, function): - return function(xdata, *params) - ydata - -def _weighted_general_function(params, xdata, ydata, function, weights): - return weights * (function(xdata, *params) - ydata) - -def curve_fit(f, xdata, ydata, p0=None, sigma=None, **kw): - """ - Use non-linear least squares to fit a function, f, to data. - - Assumes ``ydata = f(xdata, *params) + eps`` - - Parameters - ---------- - f : callable - The model function, f(x, ...). It must take the independent - variable as the first argument and the parameters to fit as - separate remaining arguments. - xdata : An N-length sequence or an (k,N)-shaped array - for functions with k predictors. - The independent variable where the data is measured. - ydata : N-length sequence - The dependent data --- nominally f(xdata, ...) - p0 : None, scalar, or M-length sequence - Initial guess for the parameters. If None, then the initial - values will all be 1 (if the number of parameters for the function - can be determined using introspection, otherwise a ValueError - is raised). - sigma : None or N-length sequence - If not None, it represents the standard-deviation of ydata. - This vector, if given, will be used as weights in the - least-squares problem. - - Returns - ------- - popt : array - Optimal values for the parameters so that the sum of the squared error - of ``f(xdata, *popt) - ydata`` is minimized - pcov : 2d array - The estimated covariance of popt. The diagonals provide the variance - of the parameter estimate. - - See Also - -------- - leastsq - - Notes - ----- - The algorithm uses the Levenburg-Marquardt algorithm through `leastsq`. - Additional keyword arguments are passed directly to that algorithm. - - Examples - -------- - >>> import numpy as np - >>> from scipy.optimize import curve_fit - >>> def func(x, a, b, c): - ... return a*np.exp(-b*x) + c - - >>> x = np.linspace(0,4,50) - >>> y = func(x, 2.5, 1.3, 0.5) - >>> yn = y + 0.2*np.random.normal(size=len(x)) - - >>> popt, pcov = curve_fit(func, x, yn) - - """ - if p0 is None: - # determine number of parameters by inspecting the function - import inspect - args, varargs, varkw, defaults = inspect.getargspec(f) - if len(args) < 2: - msg = "Unable to determine number of fit parameters." - raise ValueError(msg) - if 'self' in args: - p0 = [1.0] * (len(args)-2) - else: - p0 = [1.0] * (len(args)-1) - - if isscalar(p0): - p0 = array([p0]) - - args = (xdata, ydata, f) - if sigma is None: - func = _general_function - else: - func = _weighted_general_function - args += (1.0/asarray(sigma),) - - # Remove full_output from kw, otherwise we're passing it in twice. - return_full = kw.pop('full_output', False) - res = leastsq(func, p0, args=args, full_output=1, **kw) - (popt, pcov, infodict, errmsg, ier) = res - - if ier not in [1,2,3,4]: - msg = "Optimal parameters not found: " + errmsg - raise RuntimeError(msg) - - if (len(ydata) > len(p0)) and pcov is not None: - s_sq = (func(popt, *args)**2).sum()/(len(ydata)-len(p0)) - pcov = pcov * s_sq - else: - pcov = inf - - if return_full: - return popt, pcov, infodict, errmsg, ier - else: - return popt, pcov - -def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): - """Perform a simple check on the gradient for correctness. - - """ - - x = atleast_1d(x0) - n = len(x) - x = x.reshape((n,)) - fvec = atleast_1d(fcn(x,*args)) - m = len(fvec) - fvec = fvec.reshape((m,)) - ldfjac = m - fjac = atleast_1d(Dfcn(x,*args)) - fjac = fjac.reshape((m,n)) - if col_deriv == 0: - fjac = transpose(fjac) - - xp = zeros((n,), float) - err = zeros((m,), float) - fvecp = None - _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) - - fvecp = atleast_1d(fcn(xp,*args)) - fvecp = fvecp.reshape((m,)) - _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) - - good = (product(greater(err, 0.5), axis=0)) - - return (good, err) - - -# Steffensen's Method using Aitken's Del^2 convergence acceleration. -def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500): - """Find the point where func(x) == x - - Given a function of one or more variables and a starting point, find a - fixed-point of the function: i.e. where func(x)=x. - - Uses Steffensen's Method using Aitken's Del^2 convergence acceleration. - See Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 - - Examples - -------- - >>> from numpy import sqrt, array - >>> from scipy.optimize import fixed_point - >>> def func(x, c1, c2): - return sqrt(c1/(x+c2)) - >>> c1 = array([10,12.]) - >>> c2 = array([3, 5.]) - >>> fixed_point(func, [1.2, 1.3], args=(c1,c2)) - array([ 1.4920333 , 1.37228132]) - - """ - if not isscalar(x0): - x0 = asarray(x0) - p0 = x0 - for iter in range(maxiter): - p1 = func(p0, *args) - p2 = func(p1, *args) - d = p2 - 2.0 * p1 + p0 - p = where(d == 0, p2, p0 - (p1 - p0)*(p1 - p0) / d) - relerr = where(p0 == 0, p, (p-p0)/p0) - if all(abs(relerr) < xtol): - return p - p0 = p - else: - p0 = x0 - for iter in range(maxiter): - p1 = func(p0, *args) - p2 = func(p1, *args) - d = p2 - 2.0 * p1 + p0 - if d == 0.0: - return p2 - else: - p = p0 - (p1 - p0)*(p1 - p0) / d - if p0 == 0: - relerr = p - else: - relerr = (p - p0)/p0 - if abs(relerr) < xtol: - return p - p0 = p - msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) - raise RuntimeError(msg) diff --git a/scipy-0.10.1/scipy/optimize/minpack/chkder.f b/scipy-0.10.1/scipy/optimize/minpack/chkder.f deleted file mode 100644 index 29578fc418..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/chkder.f +++ /dev/null @@ -1,140 +0,0 @@ - subroutine chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,mode,err) - integer m,n,ldfjac,mode - double precision x(n),fvec(m),fjac(ldfjac,n),xp(n),fvecp(m), - * err(m) -c ********** -c -c subroutine chkder -c -c this subroutine checks the gradients of m nonlinear functions -c in n variables, evaluated at a point x, for consistency with -c the functions themselves. the user must call chkder twice, -c first with mode = 1 and then with mode = 2. -c -c mode = 1. on input, x must contain the point of evaluation. -c on output, xp is set to a neighboring point. -c -c mode = 2. on input, fvec must contain the functions and the -c rows of fjac must contain the gradients -c of the respective functions each evaluated -c at x, and fvecp must contain the functions -c evaluated at xp. -c on output, err contains measures of correctness of -c the respective gradients. -c -c the subroutine does not perform reliably if cancellation or -c rounding errors cause a severe loss of significance in the -c evaluation of a function. therefore, none of the components -c of x should be unusually small (in particular, zero) or any -c other value which may cause loss of significance. -c -c the subroutine statement is -c -c subroutine chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,mode,err) -c -c where -c -c m is a positive integer input variable set to the number -c of functions. -c -c n is a positive integer input variable set to the number -c of variables. -c -c x is an input array of length n. -c -c fvec is an array of length m. on input when mode = 2, -c fvec must contain the functions evaluated at x. -c -c fjac is an m by n array. on input when mode = 2, -c the rows of fjac must contain the gradients of -c the respective functions evaluated at x. -c -c ldfjac is a positive integer input parameter not less than m -c which specifies the leading dimension of the array fjac. -c -c xp is an array of length n. on output when mode = 1, -c xp is set to a neighboring point of x. -c -c fvecp is an array of length m. on input when mode = 2, -c fvecp must contain the functions evaluated at xp. -c -c mode is an integer input variable set to 1 on the first call -c and 2 on the second. other values of mode are equivalent -c to mode = 1. -c -c err is an array of length m. on output when mode = 2, -c err contains measures of correctness of the respective -c gradients. if there is no severe loss of significance, -c then if err(i) is 1.0 the i-th gradient is correct, -c while if err(i) is 0.0 the i-th gradient is incorrect. -c for values of err between 0.0 and 1.0, the categorization -c is less certain. in general, a value of err(i) greater -c than 0.5 indicates that the i-th gradient is probably -c correct, while a value of err(i) less than 0.5 indicates -c that the i-th gradient is probably incorrect. -c -c subprograms called -c -c minpack supplied ... dpmpar -c -c fortran supplied ... dabs,dlog10,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,j - double precision eps,epsf,epslog,epsmch,factor,one,temp,zero - double precision dpmpar - data factor,one,zero /1.0d2,1.0d0,0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c - eps = dsqrt(epsmch) -c - if (mode .eq. 2) go to 20 -c -c mode = 1. -c - do 10 j = 1, n - temp = eps*dabs(x(j)) - if (temp .eq. zero) temp = eps - xp(j) = x(j) + temp - 10 continue - go to 70 - 20 continue -c -c mode = 2. -c - epsf = factor*epsmch - epslog = dlog10(eps) - do 30 i = 1, m - err(i) = zero - 30 continue - do 50 j = 1, n - temp = dabs(x(j)) - if (temp .eq. zero) temp = one - do 40 i = 1, m - err(i) = err(i) + temp*fjac(i,j) - 40 continue - 50 continue - do 60 i = 1, m - temp = one - if (fvec(i) .ne. zero .and. fvecp(i) .ne. zero - * .and. dabs(fvecp(i)-fvec(i)) .ge. epsf*dabs(fvec(i))) - * temp = eps*dabs((fvecp(i)-fvec(i))/eps-err(i)) - * /(dabs(fvec(i)) + dabs(fvecp(i))) - err(i) = one - if (temp .gt. epsmch .and. temp .lt. eps) - * err(i) = (dlog10(temp) - epslog)/epslog - if (temp .ge. eps) err(i) = zero - 60 continue - 70 continue -c - return -c -c last card of subroutine chkder. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/dogleg.f b/scipy-0.10.1/scipy/optimize/minpack/dogleg.f deleted file mode 100644 index b812f1966e..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/dogleg.f +++ /dev/null @@ -1,177 +0,0 @@ - subroutine dogleg(n,r,lr,diag,qtb,delta,x,wa1,wa2) - integer n,lr - double precision delta - double precision r(lr),diag(n),qtb(n),x(n),wa1(n),wa2(n) -c ********** -c -c subroutine dogleg -c -c given an m by n matrix a, an n by n nonsingular diagonal -c matrix d, an m-vector b, and a positive number delta, the -c problem is to determine the convex combination x of the -c gauss-newton and scaled gradient directions that minimizes -c (a*x - b) in the least squares sense, subject to the -c restriction that the euclidean norm of d*x be at most delta. -c -c this subroutine completes the solution of the problem -c if it is provided with the necessary information from the -c qr factorization of a. that is, if a = q*r, where q has -c orthogonal columns and r is an upper triangular matrix, -c then dogleg expects the full upper triangle of r and -c the first n components of (q transpose)*b. -c -c the subroutine statement is -c -c subroutine dogleg(n,r,lr,diag,qtb,delta,x,wa1,wa2) -c -c where -c -c n is a positive integer input variable set to the order of r. -c -c r is an input array of length lr which must contain the upper -c triangular matrix r stored by rows. -c -c lr is a positive integer input variable not less than -c (n*(n+1))/2. -c -c diag is an input array of length n which must contain the -c diagonal elements of the matrix d. -c -c qtb is an input array of length n which must contain the first -c n elements of the vector (q transpose)*b. -c -c delta is a positive input variable which specifies an upper -c bound on the euclidean norm of d*x. -c -c x is an output array of length n which contains the desired -c convex combination of the gauss-newton direction and the -c scaled gradient direction. -c -c wa1 and wa2 are work arrays of length n. -c -c subprograms called -c -c minpack-supplied ... dpmpar,enorm -c -c fortran-supplied ... dabs,dmax1,dmin1,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,j,jj,jp1,k,l - double precision alpha,bnorm,epsmch,gnorm,one,qnorm,sgnorm,sum, - * temp,zero - double precision dpmpar,enorm - data one,zero /1.0d0,0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c -c first, calculate the gauss-newton direction. -c - jj = (n*(n + 1))/2 + 1 - do 50 k = 1, n - j = n - k + 1 - jp1 = j + 1 - jj = jj - k - l = jj + 1 - sum = zero - if (n .lt. jp1) go to 20 - do 10 i = jp1, n - sum = sum + r(l)*x(i) - l = l + 1 - 10 continue - 20 continue - temp = r(jj) - if (temp .ne. zero) go to 40 - l = j - do 30 i = 1, j - temp = dmax1(temp,dabs(r(l))) - l = l + n - i - 30 continue - temp = epsmch*temp - if (temp .eq. zero) temp = epsmch - 40 continue - x(j) = (qtb(j) - sum)/temp - 50 continue -c -c test whether the gauss-newton direction is acceptable. -c - do 60 j = 1, n - wa1(j) = zero - wa2(j) = diag(j)*x(j) - 60 continue - qnorm = enorm(n,wa2) - if (qnorm .le. delta) go to 140 -c -c the gauss-newton direction is not acceptable. -c next, calculate the scaled gradient direction. -c - l = 1 - do 80 j = 1, n - temp = qtb(j) - do 70 i = j, n - wa1(i) = wa1(i) + r(l)*temp - l = l + 1 - 70 continue - wa1(j) = wa1(j)/diag(j) - 80 continue -c -c calculate the norm of the scaled gradient and test for -c the special case in which the scaled gradient is zero. -c - gnorm = enorm(n,wa1) - sgnorm = zero - alpha = delta/qnorm - if (gnorm .eq. zero) go to 120 -c -c calculate the point along the scaled gradient -c at which the quadratic is minimized. -c - do 90 j = 1, n - wa1(j) = (wa1(j)/gnorm)/diag(j) - 90 continue - l = 1 - do 110 j = 1, n - sum = zero - do 100 i = j, n - sum = sum + r(l)*wa1(i) - l = l + 1 - 100 continue - wa2(j) = sum - 110 continue - temp = enorm(n,wa2) - sgnorm = (gnorm/temp)/temp -c -c test whether the scaled gradient direction is acceptable. -c - alpha = zero - if (sgnorm .ge. delta) go to 120 -c -c the scaled gradient direction is not acceptable. -c finally, calculate the point along the dogleg -c at which the quadratic is minimized. -c - bnorm = enorm(n,qtb) - temp = (bnorm/gnorm)*(bnorm/qnorm)*(sgnorm/delta) - temp = temp - (delta/qnorm)*(sgnorm/delta)**2 - * + dsqrt((temp-(delta/qnorm))**2 - * +(one-(delta/qnorm)**2)*(one-(sgnorm/delta)**2)) - alpha = ((delta/qnorm)*(one - (sgnorm/delta)**2))/temp - 120 continue -c -c form appropriate convex combination of the gauss-newton -c direction and the scaled gradient direction. -c - temp = (one - alpha)*dmin1(sgnorm,delta) - do 130 j = 1, n - x(j) = temp*wa1(j) + alpha*x(j) - 130 continue - 140 continue - return -c -c last card of subroutine dogleg. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/dpmpar.f b/scipy-0.10.1/scipy/optimize/minpack/dpmpar.f deleted file mode 100644 index cb6545a928..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/dpmpar.f +++ /dev/null @@ -1,177 +0,0 @@ - double precision function dpmpar(i) - integer i -c ********** -c -c Function dpmpar -c -c This function provides double precision machine parameters -c when the appropriate set of data statements is activated (by -c removing the c from column 1) and all other data statements are -c rendered inactive. Most of the parameter values were obtained -c from the corresponding Bell Laboratories Port Library function. -c -c The function statement is -c -c double precision function dpmpar(i) -c -c where -c -c i is an integer input variable set to 1, 2, or 3 which -c selects the desired machine parameter. If the machine has -c t base b digits and its smallest and largest exponents are -c emin and emax, respectively, then these parameters are -c -c dpmpar(1) = b**(1 - t), the machine precision, -c -c dpmpar(2) = b**(emin - 1), the smallest magnitude, -c -c dpmpar(3) = b**emax*(1 - b**(-t)), the largest magnitude. -c -c Argonne National Laboratory. MINPACK Project. November 1996. -c Burton S. Garbow, Kenneth E. Hillstrom, Jorge J. More' -c -c ********** - integer mcheps(4) - integer minmag(4) - integer maxmag(4) - double precision dmach(3) - equivalence (dmach(1),mcheps(1)) - equivalence (dmach(2),minmag(1)) - equivalence (dmach(3),maxmag(1)) -c -c Machine constants for the IBM 360/370 series, -c the Amdahl 470/V6, the ICL 2900, the Itel AS/6, -c the Xerox Sigma 5/7/9 and the Sel systems 85/86. -c -c data mcheps(1),mcheps(2) / z34100000, z00000000 / -c data minmag(1),minmag(2) / z00100000, z00000000 / -c data maxmag(1),maxmag(2) / z7fffffff, zffffffff / -c -c Machine constants for the Honeywell 600/6000 series. -c -c data mcheps(1),mcheps(2) / o606400000000, o000000000000 / -c data minmag(1),minmag(2) / o402400000000, o000000000000 / -c data maxmag(1),maxmag(2) / o376777777777, o777777777777 / -c -c Machine constants for the CDC 6000/7000 series. -c -c data mcheps(1) / 15614000000000000000b / -c data mcheps(2) / 15010000000000000000b / -c -c data minmag(1) / 00604000000000000000b / -c data minmag(2) / 00000000000000000000b / -c -c data maxmag(1) / 37767777777777777777b / -c data maxmag(2) / 37167777777777777777b / -c -c Machine constants for the PDP-10 (KA processor). -c -c data mcheps(1),mcheps(2) / "114400000000, "000000000000 / -c data minmag(1),minmag(2) / "033400000000, "000000000000 / -c data maxmag(1),maxmag(2) / "377777777777, "344777777777 / -c -c Machine constants for the PDP-10 (KI processor). -c -c data mcheps(1),mcheps(2) / "104400000000, "000000000000 / -c data minmag(1),minmag(2) / "000400000000, "000000000000 / -c data maxmag(1),maxmag(2) / "377777777777, "377777777777 / -c -c Machine constants for the PDP-11. -c -c data mcheps(1),mcheps(2) / 9472, 0 / -c data mcheps(3),mcheps(4) / 0, 0 / -c -c data minmag(1),minmag(2) / 128, 0 / -c data minmag(3),minmag(4) / 0, 0 / -c -c data maxmag(1),maxmag(2) / 32767, -1 / -c data maxmag(3),maxmag(4) / -1, -1 / -c -c Machine constants for the Burroughs 6700/7700 systems. -c -c data mcheps(1) / o1451000000000000 / -c data mcheps(2) / o0000000000000000 / -c -c data minmag(1) / o1771000000000000 / -c data minmag(2) / o7770000000000000 / -c -c data maxmag(1) / o0777777777777777 / -c data maxmag(2) / o7777777777777777 / -c -c Machine constants for the Burroughs 5700 system. -c -c data mcheps(1) / o1451000000000000 / -c data mcheps(2) / o0000000000000000 / -c -c data minmag(1) / o1771000000000000 / -c data minmag(2) / o0000000000000000 / -c -c data maxmag(1) / o0777777777777777 / -c data maxmag(2) / o0007777777777777 / -c -c Machine constants for the Burroughs 1700 system. -c -c data mcheps(1) / zcc6800000 / -c data mcheps(2) / z000000000 / -c -c data minmag(1) / zc00800000 / -c data minmag(2) / z000000000 / -c -c data maxmag(1) / zdffffffff / -c data maxmag(2) / zfffffffff / -c -c Machine constants for the Univac 1100 series. -c -c data mcheps(1),mcheps(2) / o170640000000, o000000000000 / -c data minmag(1),minmag(2) / o000040000000, o000000000000 / -c data maxmag(1),maxmag(2) / o377777777777, o777777777777 / -c -c Machine constants for the Data General Eclipse S/200. -c -c Note - it may be appropriate to include the following card - -c static dmach(3) -c -c data minmag/20k,3*0/,maxmag/77777k,3*177777k/ -c data mcheps/32020k,3*0/ -c -c Machine constants for the Harris 220. -c -c data mcheps(1),mcheps(2) / '20000000, '00000334 / -c data minmag(1),minmag(2) / '20000000, '00000201 / -c data maxmag(1),maxmag(2) / '37777777, '37777577 / -c -c Machine constants for the Cray-1. -c -c data mcheps(1) / 0376424000000000000000b / -c data mcheps(2) / 0000000000000000000000b / -c -c data minmag(1) / 0200034000000000000000b / -c data minmag(2) / 0000000000000000000000b / -c -c data maxmag(1) / 0577777777777777777777b / -c data maxmag(2) / 0000007777777777777776b / -c -c Machine constants for the Prime 400. -c -c data mcheps(1),mcheps(2) / :10000000000, :00000000123 / -c data minmag(1),minmag(2) / :10000000000, :00000100000 / -c data maxmag(1),maxmag(2) / :17777777777, :37777677776 / -c -c Machine constants for the VAX-11. -c -c data mcheps(1),mcheps(2) / 9472, 0 / -c data minmag(1),minmag(2) / 128, 0 / -c data maxmag(1),maxmag(2) / -32769, -1 / -c -c Machine constants for IEEE machines. -c - data dmach(1) /2.22044604926d-16/ - data dmach(2) /2.22507385852d-308/ - data dmach(3) /1.79769313485d+308/ -c - dpmpar = dmach(i) - return -c -c Last card of function dpmpar. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/enorm.f b/scipy-0.10.1/scipy/optimize/minpack/enorm.f deleted file mode 100644 index 2cb5b607e1..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/enorm.f +++ /dev/null @@ -1,108 +0,0 @@ - double precision function enorm(n,x) - integer n - double precision x(n) -c ********** -c -c function enorm -c -c given an n-vector x, this function calculates the -c euclidean norm of x. -c -c the euclidean norm is computed by accumulating the sum of -c squares in three different sums. the sums of squares for the -c small and large components are scaled so that no overflows -c occur. non-destructive underflows are permitted. underflows -c and overflows do not occur in the computation of the unscaled -c sum of squares for the intermediate components. -c the definitions of small, intermediate and large components -c depend on two constants, rdwarf and rgiant. the main -c restrictions on these constants are that rdwarf**2 not -c underflow and rgiant**2 not overflow. the constants -c given here are suitable for every known computer. -c -c the function statement is -c -c double precision function enorm(n,x) -c -c where -c -c n is a positive integer input variable. -c -c x is an input array of length n. -c -c subprograms called -c -c fortran-supplied ... dabs,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i - double precision agiant,floatn,one,rdwarf,rgiant,s1,s2,s3,xabs, - * x1max,x3max,zero - data one,zero,rdwarf,rgiant /1.0d0,0.0d0,3.834d-20,1.304d19/ - s1 = zero - s2 = zero - s3 = zero - x1max = zero - x3max = zero - floatn = n - agiant = rgiant/floatn - do 90 i = 1, n - xabs = dabs(x(i)) - if (xabs .gt. rdwarf .and. xabs .lt. agiant) go to 70 - if (xabs .le. rdwarf) go to 30 -c -c sum for large components. -c - if (xabs .le. x1max) go to 10 - s1 = one + s1*(x1max/xabs)**2 - x1max = xabs - go to 20 - 10 continue - s1 = s1 + (xabs/x1max)**2 - 20 continue - go to 60 - 30 continue -c -c sum for small components. -c - if (xabs .le. x3max) go to 40 - s3 = one + s3*(x3max/xabs)**2 - x3max = xabs - go to 50 - 40 continue - if (xabs .ne. zero) s3 = s3 + (xabs/x3max)**2 - 50 continue - 60 continue - go to 80 - 70 continue -c -c sum for intermediate components. -c - s2 = s2 + xabs**2 - 80 continue - 90 continue -c -c calculation of norm. -c - if (s1 .eq. zero) go to 100 - enorm = x1max*dsqrt(s1+(s2/x1max)/x1max) - go to 130 - 100 continue - if (s2 .eq. zero) go to 110 - if (s2 .ge. x3max) - * enorm = dsqrt(s2*(one+(x3max/s2)*(x3max*s3))) - if (s2 .lt. x3max) - * enorm = dsqrt(x3max*((s2/x3max)+(x3max*s3))) - go to 120 - 110 continue - enorm = x3max*dsqrt(s3) - 120 continue - 130 continue - return -c -c last card of function enorm. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/fdjac1.f b/scipy-0.10.1/scipy/optimize/minpack/fdjac1.f deleted file mode 100644 index 031ed46528..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/fdjac1.f +++ /dev/null @@ -1,151 +0,0 @@ - subroutine fdjac1(fcn,n,x,fvec,fjac,ldfjac,iflag,ml,mu,epsfcn, - * wa1,wa2) - integer n,ldfjac,iflag,ml,mu - double precision epsfcn - double precision x(n),fvec(n),fjac(ldfjac,n),wa1(n),wa2(n) -c ********** -c -c subroutine fdjac1 -c -c this subroutine computes a forward-difference approximation -c to the n by n jacobian matrix associated with a specified -c problem of n functions in n variables. if the jacobian has -c a banded form, then function evaluations are saved by only -c approximating the nonzero terms. -c -c the subroutine statement is -c -c subroutine fdjac1(fcn,n,x,fvec,fjac,ldfjac,iflag,ml,mu,epsfcn, -c wa1,wa2) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions. fcn must be declared -c in an external statement in the user calling -c program, and should be written as follows. -c -c subroutine fcn(n,x,fvec,iflag) -c integer n,iflag -c double precision x(n),fvec(n) -c ---------- -c calculate the functions at x and -c return this vector in fvec. -c ---------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of fdjac1. -c in this case set iflag to a negative integer. -c -c n is a positive integer input variable set to the number -c of functions and variables. -c -c x is an input array of length n. -c -c fvec is an input array of length n which must contain the -c functions evaluated at x. -c -c fjac is an output n by n array which contains the -c approximation to the jacobian matrix evaluated at x. -c -c ldfjac is a positive integer input variable not less than n -c which specifies the leading dimension of the array fjac. -c -c iflag is an integer variable which can be used to terminate -c the execution of fdjac1. see description of fcn. -c -c ml is a nonnegative integer input variable which specifies -c the number of subdiagonals within the band of the -c jacobian matrix. if the jacobian is not banded, set -c ml to at least n - 1. -c -c epsfcn is an input variable used in determining a suitable -c step length for the forward-difference approximation. this -c approximation assumes that the relative errors in the -c functions are of the order of epsfcn. if epsfcn is less -c than the machine precision, it is assumed that the relative -c errors in the functions are of the order of the machine -c precision. -c -c mu is a nonnegative integer input variable which specifies -c the number of superdiagonals within the band of the -c jacobian matrix. if the jacobian is not banded, set -c mu to at least n - 1. -c -c wa1 and wa2 are work arrays of length n. if ml + mu + 1 is at -c least n, then the jacobian is considered dense, and wa2 is -c not referenced. -c -c subprograms called -c -c minpack-supplied ... dpmpar -c -c fortran-supplied ... dabs,dmax1,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,j,k,msum - double precision eps,epsmch,h,temp,zero - double precision dpmpar - data zero /0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c - eps = dsqrt(dmax1(epsfcn,epsmch)) - msum = ml + mu + 1 - if (msum .lt. n) go to 40 -c -c computation of dense approximate jacobian. -c - do 20 j = 1, n - temp = x(j) - h = eps*dabs(temp) - if (h .eq. zero) h = eps - x(j) = temp + h - call fcn(n,x,wa1,iflag) - if (iflag .lt. 0) go to 30 - x(j) = temp - do 10 i = 1, n - fjac(i,j) = (wa1(i) - fvec(i))/h - 10 continue - 20 continue - 30 continue - go to 110 - 40 continue -c -c computation of banded approximate jacobian. -c - do 90 k = 1, msum - do 60 j = k, n, msum - wa2(j) = x(j) - h = eps*dabs(wa2(j)) - if (h .eq. zero) h = eps - x(j) = wa2(j) + h - 60 continue - call fcn(n,x,wa1,iflag) - if (iflag .lt. 0) go to 100 - do 80 j = k, n, msum - x(j) = wa2(j) - h = eps*dabs(wa2(j)) - if (h .eq. zero) h = eps - do 70 i = 1, n - fjac(i,j) = zero - if (i .ge. j - mu .and. i .le. j + ml) - * fjac(i,j) = (wa1(i) - fvec(i))/h - 70 continue - 80 continue - 90 continue - 100 continue - 110 continue - return -c -c last card of subroutine fdjac1. -c - end - diff --git a/scipy-0.10.1/scipy/optimize/minpack/fdjac2.f b/scipy-0.10.1/scipy/optimize/minpack/fdjac2.f deleted file mode 100644 index 218ab94c17..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/fdjac2.f +++ /dev/null @@ -1,107 +0,0 @@ - subroutine fdjac2(fcn,m,n,x,fvec,fjac,ldfjac,iflag,epsfcn,wa) - integer m,n,ldfjac,iflag - double precision epsfcn - double precision x(n),fvec(m),fjac(ldfjac,n),wa(m) -c ********** -c -c subroutine fdjac2 -c -c this subroutine computes a forward-difference approximation -c to the m by n jacobian matrix associated with a specified -c problem of m functions in n variables. -c -c the subroutine statement is -c -c subroutine fdjac2(fcn,m,n,x,fvec,fjac,ldfjac,iflag,epsfcn,wa) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions. fcn must be declared -c in an external statement in the user calling -c program, and should be written as follows. -c -c subroutine fcn(m,n,x,fvec,iflag) -c integer m,n,iflag -c double precision x(n),fvec(m) -c ---------- -c calculate the functions at x and -c return this vector in fvec. -c ---------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of fdjac2. -c in this case set iflag to a negative integer. -c -c m is a positive integer input variable set to the number -c of functions. -c -c n is a positive integer input variable set to the number -c of variables. n must not exceed m. -c -c x is an input array of length n. -c -c fvec is an input array of length m which must contain the -c functions evaluated at x. -c -c fjac is an output m by n array which contains the -c approximation to the jacobian matrix evaluated at x. -c -c ldfjac is a positive integer input variable not less than m -c which specifies the leading dimension of the array fjac. -c -c iflag is an integer variable which can be used to terminate -c the execution of fdjac2. see description of fcn. -c -c epsfcn is an input variable used in determining a suitable -c step length for the forward-difference approximation. this -c approximation assumes that the relative errors in the -c functions are of the order of epsfcn. if epsfcn is less -c than the machine precision, it is assumed that the relative -c errors in the functions are of the order of the machine -c precision. -c -c wa is a work array of length m. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... dpmpar -c -c fortran-supplied ... dabs,dmax1,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,j - double precision eps,epsmch,h,temp,zero - double precision dpmpar - data zero /0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c - eps = dsqrt(dmax1(epsfcn,epsmch)) - do 20 j = 1, n - temp = x(j) - h = eps*dabs(temp) - if (h .eq. zero) h = eps - x(j) = temp + h - call fcn(m,n,x,wa,iflag) - if (iflag .lt. 0) go to 30 - x(j) = temp - do 10 i = 1, m - fjac(i,j) = (wa(i) - fvec(i))/h - 10 continue - 20 continue - 30 continue - return -c -c last card of subroutine fdjac2. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/hybrd.f b/scipy-0.10.1/scipy/optimize/minpack/hybrd.f deleted file mode 100644 index fc0b4c26af..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/hybrd.f +++ /dev/null @@ -1,459 +0,0 @@ - subroutine hybrd(fcn,n,x,fvec,xtol,maxfev,ml,mu,epsfcn,diag, - * mode,factor,nprint,info,nfev,fjac,ldfjac,r,lr, - * qtf,wa1,wa2,wa3,wa4) - integer n,maxfev,ml,mu,mode,nprint,info,nfev,ldfjac,lr - double precision xtol,epsfcn,factor - double precision x(n),fvec(n),diag(n),fjac(ldfjac,n),r(lr), - * qtf(n),wa1(n),wa2(n),wa3(n),wa4(n) - external fcn -c ********** -c -c subroutine hybrd -c -c the purpose of hybrd is to find a zero of a system of -c n nonlinear functions in n variables by a modification -c of the powell hybrid method. the user must provide a -c subroutine which calculates the functions. the jacobian is -c then calculated by a forward-difference approximation. -c -c the subroutine statement is -c -c subroutine hybrd(fcn,n,x,fvec,xtol,maxfev,ml,mu,epsfcn, -c diag,mode,factor,nprint,info,nfev,fjac, -c ldfjac,r,lr,qtf,wa1,wa2,wa3,wa4) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions. fcn must be declared -c in an external statement in the user calling -c program, and should be written as follows. -c -c subroutine fcn(n,x,fvec,iflag) -c integer n,iflag -c double precision x(n),fvec(n) -c ---------- -c calculate the functions at x and -c return this vector in fvec. -c --------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of hybrd. -c in this case set iflag to a negative integer. -c -c n is a positive integer input variable set to the number -c of functions and variables. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length n which contains -c the functions evaluated at the output x. -c -c xtol is a nonnegative input variable. termination -c occurs when the relative error between two consecutive -c iterates is at most xtol. -c -c maxfev is a positive integer input variable. termination -c occurs when the number of calls to fcn is at least maxfev -c by the end of an iteration. -c -c ml is a nonnegative integer input variable which specifies -c the number of subdiagonals within the band of the -c jacobian matrix. if the jacobian is not banded, set -c ml to at least n - 1. -c -c mu is a nonnegative integer input variable which specifies -c the number of superdiagonals within the band of the -c jacobian matrix. if the jacobian is not banded, set -c mu to at least n - 1. -c -c epsfcn is an input variable used in determining a suitable -c step length for the forward-difference approximation. this -c approximation assumes that the relative errors in the -c functions are of the order of epsfcn. if epsfcn is less -c than the machine precision, it is assumed that the relative -c errors in the functions are of the order of the machine -c precision. -c -c diag is an array of length n. if mode = 1 (see -c below), diag is internally set. if mode = 2, diag -c must contain positive entries that serve as -c multiplicative scale factors for the variables. -c -c mode is an integer input variable. if mode = 1, the -c variables will be scaled internally. if mode = 2, -c the scaling is specified by the input diag. other -c values of mode are equivalent to mode = 1. -c -c factor is a positive input variable used in determining the -c initial step bound. this bound is set to the product of -c factor and the euclidean norm of diag*x if nonzero, or else -c to factor itself. in most cases factor should lie in the -c interval (.1,100.). 100. is a generally recommended value. -c -c nprint is an integer input variable that enables controlled -c printing of iterates if it is positive. in this case, -c fcn is called with iflag = 0 at the beginning of the first -c iteration and every nprint iterations thereafter and -c immediately prior to return, with x and fvec available -c for printing. if nprint is not positive, no special calls -c of fcn with iflag = 0 are made. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 relative error between two consecutive iterates -c is at most xtol. -c -c info = 2 number of calls to fcn has reached or exceeded -c maxfev. -c -c info = 3 xtol is too small. no further improvement in -c the approximate solution x is possible. -c -c info = 4 iteration is not making good progress, as -c measured by the improvement from the last -c five jacobian evaluations. -c -c info = 5 iteration is not making good progress, as -c measured by the improvement from the last -c ten iterations. -c -c nfev is an integer output variable set to the number of -c calls to fcn. -c -c fjac is an output n by n array which contains the -c orthogonal matrix q produced by the qr factorization -c of the final approximate jacobian. -c -c ldfjac is a positive integer input variable not less than n -c which specifies the leading dimension of the array fjac. -c -c r is an output array of length lr which contains the -c upper triangular matrix produced by the qr factorization -c of the final approximate jacobian, stored rowwise. -c -c lr is a positive integer input variable not less than -c (n*(n+1))/2. -c -c qtf is an output array of length n which contains -c the vector (q transpose)*fvec. -c -c wa1, wa2, wa3, and wa4 are work arrays of length n. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... dogleg,dpmpar,enorm,fdjac1, -c qform,qrfac,r1mpyq,r1updt -c -c fortran-supplied ... dabs,dmax1,dmin1,min0,mod -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,iflag,iter,j,jm1,l,msum,ncfail,ncsuc,nslow1,nslow2 - integer iwa(1) - logical jeval,sing - double precision actred,delta,epsmch,fnorm,fnorm1,one,pnorm, - * prered,p1,p5,p001,p0001,ratio,sum,temp,xnorm, - * zero - double precision dpmpar,enorm - data one,p1,p5,p001,p0001,zero - * /1.0d0,1.0d-1,5.0d-1,1.0d-3,1.0d-4,0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c - info = 0 - iflag = 0 - nfev = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. xtol .lt. zero .or. maxfev .le. 0 - * .or. ml .lt. 0 .or. mu .lt. 0 .or. factor .le. zero - * .or. ldfjac .lt. n .or. lr .lt. (n*(n + 1))/2) go to 300 - if (mode .ne. 2) go to 20 - do 10 j = 1, n - if (diag(j) .le. zero) go to 300 - 10 continue - 20 continue -c -c evaluate the function at the starting point -c and calculate its norm. -c - iflag = 1 - call fcn(n,x,fvec,iflag) - nfev = 1 - if (iflag .lt. 0) go to 300 - fnorm = enorm(n,fvec) -c -c determine the number of calls to fcn needed to compute -c the jacobian matrix. -c - msum = min0(ml+mu+1,n) -c -c initialize iteration counter and monitors. -c - iter = 1 - ncsuc = 0 - ncfail = 0 - nslow1 = 0 - nslow2 = 0 -c -c beginning of the outer loop. -c - 30 continue - jeval = .true. -c -c calculate the jacobian matrix. -c - iflag = 2 - call fdjac1(fcn,n,x,fvec,fjac,ldfjac,iflag,ml,mu,epsfcn,wa1, - * wa2) - nfev = nfev + msum - if (iflag .lt. 0) go to 300 -c -c compute the qr factorization of the jacobian. -c - call qrfac(n,n,fjac,ldfjac,.false.,iwa,1,wa1,wa2,wa3) -c -c on the first iteration and if mode is 1, scale according -c to the norms of the columns of the initial jacobian. -c - if (iter .ne. 1) go to 70 - if (mode .eq. 2) go to 50 - do 40 j = 1, n - diag(j) = wa2(j) - if (wa2(j) .eq. zero) diag(j) = one - 40 continue - 50 continue -c -c on the first iteration, calculate the norm of the scaled x -c and initialize the step bound delta. -c - do 60 j = 1, n - wa3(j) = diag(j)*x(j) - 60 continue - xnorm = enorm(n,wa3) - delta = factor*xnorm - if (delta .eq. zero) delta = factor - 70 continue -c -c form (q transpose)*fvec and store in qtf. -c - do 80 i = 1, n - qtf(i) = fvec(i) - 80 continue - do 120 j = 1, n - if (fjac(j,j) .eq. zero) go to 110 - sum = zero - do 90 i = j, n - sum = sum + fjac(i,j)*qtf(i) - 90 continue - temp = -sum/fjac(j,j) - do 100 i = j, n - qtf(i) = qtf(i) + fjac(i,j)*temp - 100 continue - 110 continue - 120 continue -c -c copy the triangular factor of the qr factorization into r. -c - sing = .false. - do 150 j = 1, n - l = j - jm1 = j - 1 - if (jm1 .lt. 1) go to 140 - do 130 i = 1, jm1 - r(l) = fjac(i,j) - l = l + n - i - 130 continue - 140 continue - r(l) = wa1(j) - if (wa1(j) .eq. zero) sing = .true. - 150 continue -c -c accumulate the orthogonal factor in fjac. -c - call qform(n,n,fjac,ldfjac,wa1) -c -c rescale if necessary. -c - if (mode .eq. 2) go to 170 - do 160 j = 1, n - diag(j) = dmax1(diag(j),wa2(j)) - 160 continue - 170 continue -c -c beginning of the inner loop. -c - 180 continue -c -c if requested, call fcn to enable printing of iterates. -c - if (nprint .le. 0) go to 190 - iflag = 0 - if (mod(iter-1,nprint) .eq. 0) call fcn(n,x,fvec,iflag) - if (iflag .lt. 0) go to 300 - 190 continue -c -c determine the direction p. -c - call dogleg(n,r,lr,diag,qtf,delta,wa1,wa2,wa3) -c -c store the direction p and x + p. calculate the norm of p. -c - do 200 j = 1, n - wa1(j) = -wa1(j) - wa2(j) = x(j) + wa1(j) - wa3(j) = diag(j)*wa1(j) - 200 continue - pnorm = enorm(n,wa3) -c -c on the first iteration, adjust the initial step bound. -c - if (iter .eq. 1) delta = dmin1(delta,pnorm) -c -c evaluate the function at x + p and calculate its norm. -c - iflag = 1 - call fcn(n,wa2,wa4,iflag) - nfev = nfev + 1 - if (iflag .lt. 0) go to 300 - fnorm1 = enorm(n,wa4) -c -c compute the scaled actual reduction. -c - actred = -one - if (fnorm1 .lt. fnorm) actred = one - (fnorm1/fnorm)**2 -c -c compute the scaled predicted reduction. -c - l = 1 - do 220 i = 1, n - sum = zero - do 210 j = i, n - sum = sum + r(l)*wa1(j) - l = l + 1 - 210 continue - wa3(i) = qtf(i) + sum - 220 continue - temp = enorm(n,wa3) - prered = zero - if (temp .lt. fnorm) prered = one - (temp/fnorm)**2 -c -c compute the ratio of the actual to the predicted -c reduction. -c - ratio = zero - if (prered .gt. zero) ratio = actred/prered -c -c update the step bound. -c - if (ratio .ge. p1) go to 230 - ncsuc = 0 - ncfail = ncfail + 1 - delta = p5*delta - go to 240 - 230 continue - ncfail = 0 - ncsuc = ncsuc + 1 - if (ratio .ge. p5 .or. ncsuc .gt. 1) - * delta = dmax1(delta,pnorm/p5) - if (dabs(ratio-one) .le. p1) delta = pnorm/p5 - 240 continue -c -c test for successful iteration. -c - if (ratio .lt. p0001) go to 260 -c -c successful iteration. update x, fvec, and their norms. -c - do 250 j = 1, n - x(j) = wa2(j) - wa2(j) = diag(j)*x(j) - fvec(j) = wa4(j) - 250 continue - xnorm = enorm(n,wa2) - fnorm = fnorm1 - iter = iter + 1 - 260 continue -c -c determine the progress of the iteration. -c - nslow1 = nslow1 + 1 - if (actred .ge. p001) nslow1 = 0 - if (jeval) nslow2 = nslow2 + 1 - if (actred .ge. p1) nslow2 = 0 -c -c test for convergence. -c - if (delta .le. xtol*xnorm .or. fnorm .eq. zero) info = 1 - if (info .ne. 0) go to 300 -c -c tests for termination and stringent tolerances. -c - if (nfev .ge. maxfev) info = 2 - if (p1*dmax1(p1*delta,pnorm) .le. epsmch*xnorm) info = 3 - if (nslow2 .eq. 5) info = 4 - if (nslow1 .eq. 10) info = 5 - if (info .ne. 0) go to 300 -c -c criterion for recalculating jacobian approximation -c by forward differences. -c - if (ncfail .eq. 2) go to 290 -c -c calculate the rank one modification to the jacobian -c and update qtf if necessary. -c - do 280 j = 1, n - sum = zero - do 270 i = 1, n - sum = sum + fjac(i,j)*wa4(i) - 270 continue - wa2(j) = (sum - wa3(j))/pnorm - wa1(j) = diag(j)*((diag(j)*wa1(j))/pnorm) - if (ratio .ge. p0001) qtf(j) = sum - 280 continue -c -c compute the qr factorization of the updated jacobian. -c - call r1updt(n,n,r,lr,wa1,wa2,wa3,sing) - call r1mpyq(n,n,fjac,ldfjac,wa2,wa3) - call r1mpyq(1,n,qtf,1,wa2,wa3) -c -c end of the inner loop. -c - jeval = .false. - go to 180 - 290 continue -c -c end of the outer loop. -c - go to 30 - 300 continue -c -c termination, either normal or user imposed. -c - if (iflag .lt. 0) info = iflag - iflag = 0 - if (nprint .gt. 0) call fcn(n,x,fvec,iflag) - return -c -c last card of subroutine hybrd. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/hybrd1.f b/scipy-0.10.1/scipy/optimize/minpack/hybrd1.f deleted file mode 100644 index c0a859275d..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/hybrd1.f +++ /dev/null @@ -1,123 +0,0 @@ - subroutine hybrd1(fcn,n,x,fvec,tol,info,wa,lwa) - integer n,info,lwa - double precision tol - double precision x(n),fvec(n),wa(lwa) - external fcn -c ********** -c -c subroutine hybrd1 -c -c the purpose of hybrd1 is to find a zero of a system of -c n nonlinear functions in n variables by a modification -c of the powell hybrid method. this is done by using the -c more general nonlinear equation solver hybrd. the user -c must provide a subroutine which calculates the functions. -c the jacobian is then calculated by a forward-difference -c approximation. -c -c the subroutine statement is -c -c subroutine hybrd1(fcn,n,x,fvec,tol,info,wa,lwa) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions. fcn must be declared -c in an external statement in the user calling -c program, and should be written as follows. -c -c subroutine fcn(n,x,fvec,iflag) -c integer n,iflag -c double precision x(n),fvec(n) -c ---------- -c calculate the functions at x and -c return this vector in fvec. -c --------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of hybrd1. -c in this case set iflag to a negative integer. -c -c n is a positive integer input variable set to the number -c of functions and variables. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length n which contains -c the functions evaluated at the output x. -c -c tol is a nonnegative input variable. termination occurs -c when the algorithm estimates that the relative error -c between x and the solution is at most tol. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 algorithm estimates that the relative error -c between x and the solution is at most tol. -c -c info = 2 number of calls to fcn has reached or exceeded -c 200*(n+1). -c -c info = 3 tol is too small. no further improvement in -c the approximate solution x is possible. -c -c info = 4 iteration is not making good progress. -c -c wa is a work array of length lwa. -c -c lwa is a positive integer input variable not less than -c (n*(3*n+13))/2. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... hybrd -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer index,j,lr,maxfev,ml,mode,mu,nfev,nprint - double precision epsfcn,factor,one,xtol,zero - data factor,one,zero /1.0d2,1.0d0,0.0d0/ - info = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. tol .lt. zero .or. lwa .lt. (n*(3*n + 13))/2) - * go to 20 -c -c call hybrd. -c - maxfev = 200*(n + 1) - xtol = tol - ml = n - 1 - mu = n - 1 - epsfcn = zero - mode = 2 - do 10 j = 1, n - wa(j) = one - 10 continue - nprint = 0 - lr = (n*(n + 1))/2 - index = 6*n + lr - call hybrd(fcn,n,x,fvec,xtol,maxfev,ml,mu,epsfcn,wa(1),mode, - * factor,nprint,info,nfev,wa(index+1),n,wa(6*n+1),lr, - * wa(n+1),wa(2*n+1),wa(3*n+1),wa(4*n+1),wa(5*n+1)) - if (info .eq. 5) info = 4 - 20 continue - return -c -c last card of subroutine hybrd1. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/hybrj.f b/scipy-0.10.1/scipy/optimize/minpack/hybrj.f deleted file mode 100644 index 3070dad3fa..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/hybrj.f +++ /dev/null @@ -1,440 +0,0 @@ - subroutine hybrj(fcn,n,x,fvec,fjac,ldfjac,xtol,maxfev,diag,mode, - * factor,nprint,info,nfev,njev,r,lr,qtf,wa1,wa2, - * wa3,wa4) - integer n,ldfjac,maxfev,mode,nprint,info,nfev,njev,lr - double precision xtol,factor - double precision x(n),fvec(n),fjac(ldfjac,n),diag(n),r(lr), - * qtf(n),wa1(n),wa2(n),wa3(n),wa4(n) -c ********** -c -c subroutine hybrj -c -c the purpose of hybrj is to find a zero of a system of -c n nonlinear functions in n variables by a modification -c of the powell hybrid method. the user must provide a -c subroutine which calculates the functions and the jacobian. -c -c the subroutine statement is -c -c subroutine hybrj(fcn,n,x,fvec,fjac,ldfjac,xtol,maxfev,diag, -c mode,factor,nprint,info,nfev,njev,r,lr,qtf, -c wa1,wa2,wa3,wa4) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions and the jacobian. fcn must -c be declared in an external statement in the user -c calling program, and should be written as follows. -c -c subroutine fcn(n,x,fvec,fjac,ldfjac,iflag) -c integer n,ldfjac,iflag -c double precision x(n),fvec(n),fjac(ldfjac,n) -c ---------- -c if iflag = 1 calculate the functions at x and -c return this vector in fvec. do not alter fjac. -c if iflag = 2 calculate the jacobian at x and -c return this matrix in fjac. do not alter fvec. -c --------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of hybrj. -c in this case set iflag to a negative integer. -c -c n is a positive integer input variable set to the number -c of functions and variables. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length n which contains -c the functions evaluated at the output x. -c -c fjac is an output n by n array which contains the -c orthogonal matrix q produced by the qr factorization -c of the final approximate jacobian. -c -c ldfjac is a positive integer input variable not less than n -c which specifies the leading dimension of the array fjac. -c -c xtol is a nonnegative input variable. termination -c occurs when the relative error between two consecutive -c iterates is at most xtol. -c -c maxfev is a positive integer input variable. termination -c occurs when the number of calls to fcn with iflag = 1 -c has reached maxfev. -c -c diag is an array of length n. if mode = 1 (see -c below), diag is internally set. if mode = 2, diag -c must contain positive entries that serve as -c multiplicative scale factors for the variables. -c -c mode is an integer input variable. if mode = 1, the -c variables will be scaled internally. if mode = 2, -c the scaling is specified by the input diag. other -c values of mode are equivalent to mode = 1. -c -c factor is a positive input variable used in determining the -c initial step bound. this bound is set to the product of -c factor and the euclidean norm of diag*x if nonzero, or else -c to factor itself. in most cases factor should lie in the -c interval (.1,100.). 100. is a generally recommended value. -c -c nprint is an integer input variable that enables controlled -c printing of iterates if it is positive. in this case, -c fcn is called with iflag = 0 at the beginning of the first -c iteration and every nprint iterations thereafter and -c immediately prior to return, with x and fvec available -c for printing. fvec and fjac should not be altered. -c if nprint is not positive, no special calls of fcn -c with iflag = 0 are made. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 relative error between two consecutive iterates -c is at most xtol. -c -c info = 2 number of calls to fcn with iflag = 1 has -c reached maxfev. -c -c info = 3 xtol is too small. no further improvement in -c the approximate solution x is possible. -c -c info = 4 iteration is not making good progress, as -c measured by the improvement from the last -c five jacobian evaluations. -c -c info = 5 iteration is not making good progress, as -c measured by the improvement from the last -c ten iterations. -c -c nfev is an integer output variable set to the number of -c calls to fcn with iflag = 1. -c -c njev is an integer output variable set to the number of -c calls to fcn with iflag = 2. -c -c r is an output array of length lr which contains the -c upper triangular matrix produced by the qr factorization -c of the final approximate jacobian, stored rowwise. -c -c lr is a positive integer input variable not less than -c (n*(n+1))/2. -c -c qtf is an output array of length n which contains -c the vector (q transpose)*fvec. -c -c wa1, wa2, wa3, and wa4 are work arrays of length n. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... dogleg,dpmpar,enorm, -c qform,qrfac,r1mpyq,r1updt -c -c fortran-supplied ... dabs,dmax1,dmin1,mod -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,iflag,iter,j,jm1,l,ncfail,ncsuc,nslow1,nslow2 - integer iwa(1) - logical jeval,sing - double precision actred,delta,epsmch,fnorm,fnorm1,one,pnorm, - * prered,p1,p5,p001,p0001,ratio,sum,temp,xnorm, - * zero - double precision dpmpar,enorm - data one,p1,p5,p001,p0001,zero - * /1.0d0,1.0d-1,5.0d-1,1.0d-3,1.0d-4,0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c - info = 0 - iflag = 0 - nfev = 0 - njev = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. ldfjac .lt. n .or. xtol .lt. zero - * .or. maxfev .le. 0 .or. factor .le. zero - * .or. lr .lt. (n*(n + 1))/2) go to 300 - if (mode .ne. 2) go to 20 - do 10 j = 1, n - if (diag(j) .le. zero) go to 300 - 10 continue - 20 continue -c -c evaluate the function at the starting point -c and calculate its norm. -c - iflag = 1 - call fcn(n,x,fvec,fjac,ldfjac,iflag) - nfev = 1 - if (iflag .lt. 0) go to 300 - fnorm = enorm(n,fvec) -c -c initialize iteration counter and monitors. -c - iter = 1 - ncsuc = 0 - ncfail = 0 - nslow1 = 0 - nslow2 = 0 -c -c beginning of the outer loop. -c - 30 continue - jeval = .true. -c -c calculate the jacobian matrix. -c - iflag = 2 - call fcn(n,x,fvec,fjac,ldfjac,iflag) - njev = njev + 1 - if (iflag .lt. 0) go to 300 -c -c compute the qr factorization of the jacobian. -c - call qrfac(n,n,fjac,ldfjac,.false.,iwa,1,wa1,wa2,wa3) -c -c on the first iteration and if mode is 1, scale according -c to the norms of the columns of the initial jacobian. -c - if (iter .ne. 1) go to 70 - if (mode .eq. 2) go to 50 - do 40 j = 1, n - diag(j) = wa2(j) - if (wa2(j) .eq. zero) diag(j) = one - 40 continue - 50 continue -c -c on the first iteration, calculate the norm of the scaled x -c and initialize the step bound delta. -c - do 60 j = 1, n - wa3(j) = diag(j)*x(j) - 60 continue - xnorm = enorm(n,wa3) - delta = factor*xnorm - if (delta .eq. zero) delta = factor - 70 continue -c -c form (q transpose)*fvec and store in qtf. -c - do 80 i = 1, n - qtf(i) = fvec(i) - 80 continue - do 120 j = 1, n - if (fjac(j,j) .eq. zero) go to 110 - sum = zero - do 90 i = j, n - sum = sum + fjac(i,j)*qtf(i) - 90 continue - temp = -sum/fjac(j,j) - do 100 i = j, n - qtf(i) = qtf(i) + fjac(i,j)*temp - 100 continue - 110 continue - 120 continue -c -c copy the triangular factor of the qr factorization into r. -c - sing = .false. - do 150 j = 1, n - l = j - jm1 = j - 1 - if (jm1 .lt. 1) go to 140 - do 130 i = 1, jm1 - r(l) = fjac(i,j) - l = l + n - i - 130 continue - 140 continue - r(l) = wa1(j) - if (wa1(j) .eq. zero) sing = .true. - 150 continue -c -c accumulate the orthogonal factor in fjac. -c - call qform(n,n,fjac,ldfjac,wa1) -c -c rescale if necessary. -c - if (mode .eq. 2) go to 170 - do 160 j = 1, n - diag(j) = dmax1(diag(j),wa2(j)) - 160 continue - 170 continue -c -c beginning of the inner loop. -c - 180 continue -c -c if requested, call fcn to enable printing of iterates. -c - if (nprint .le. 0) go to 190 - iflag = 0 - if (mod(iter-1,nprint) .eq. 0) - * call fcn(n,x,fvec,fjac,ldfjac,iflag) - if (iflag .lt. 0) go to 300 - 190 continue -c -c determine the direction p. -c - call dogleg(n,r,lr,diag,qtf,delta,wa1,wa2,wa3) -c -c store the direction p and x + p. calculate the norm of p. -c - do 200 j = 1, n - wa1(j) = -wa1(j) - wa2(j) = x(j) + wa1(j) - wa3(j) = diag(j)*wa1(j) - 200 continue - pnorm = enorm(n,wa3) -c -c on the first iteration, adjust the initial step bound. -c - if (iter .eq. 1) delta = dmin1(delta,pnorm) -c -c evaluate the function at x + p and calculate its norm. -c - iflag = 1 - call fcn(n,wa2,wa4,fjac,ldfjac,iflag) - nfev = nfev + 1 - if (iflag .lt. 0) go to 300 - fnorm1 = enorm(n,wa4) -c -c compute the scaled actual reduction. -c - actred = -one - if (fnorm1 .lt. fnorm) actred = one - (fnorm1/fnorm)**2 -c -c compute the scaled predicted reduction. -c - l = 1 - do 220 i = 1, n - sum = zero - do 210 j = i, n - sum = sum + r(l)*wa1(j) - l = l + 1 - 210 continue - wa3(i) = qtf(i) + sum - 220 continue - temp = enorm(n,wa3) - prered = zero - if (temp .lt. fnorm) prered = one - (temp/fnorm)**2 -c -c compute the ratio of the actual to the predicted -c reduction. -c - ratio = zero - if (prered .gt. zero) ratio = actred/prered -c -c update the step bound. -c - if (ratio .ge. p1) go to 230 - ncsuc = 0 - ncfail = ncfail + 1 - delta = p5*delta - go to 240 - 230 continue - ncfail = 0 - ncsuc = ncsuc + 1 - if (ratio .ge. p5 .or. ncsuc .gt. 1) - * delta = dmax1(delta,pnorm/p5) - if (dabs(ratio-one) .le. p1) delta = pnorm/p5 - 240 continue -c -c test for successful iteration. -c - if (ratio .lt. p0001) go to 260 -c -c successful iteration. update x, fvec, and their norms. -c - do 250 j = 1, n - x(j) = wa2(j) - wa2(j) = diag(j)*x(j) - fvec(j) = wa4(j) - 250 continue - xnorm = enorm(n,wa2) - fnorm = fnorm1 - iter = iter + 1 - 260 continue -c -c determine the progress of the iteration. -c - nslow1 = nslow1 + 1 - if (actred .ge. p001) nslow1 = 0 - if (jeval) nslow2 = nslow2 + 1 - if (actred .ge. p1) nslow2 = 0 -c -c test for convergence. -c - if (delta .le. xtol*xnorm .or. fnorm .eq. zero) info = 1 - if (info .ne. 0) go to 300 -c -c tests for termination and stringent tolerances. -c - if (nfev .ge. maxfev) info = 2 - if (p1*dmax1(p1*delta,pnorm) .le. epsmch*xnorm) info = 3 - if (nslow2 .eq. 5) info = 4 - if (nslow1 .eq. 10) info = 5 - if (info .ne. 0) go to 300 -c -c criterion for recalculating jacobian. -c - if (ncfail .eq. 2) go to 290 -c -c calculate the rank one modification to the jacobian -c and update qtf if necessary. -c - do 280 j = 1, n - sum = zero - do 270 i = 1, n - sum = sum + fjac(i,j)*wa4(i) - 270 continue - wa2(j) = (sum - wa3(j))/pnorm - wa1(j) = diag(j)*((diag(j)*wa1(j))/pnorm) - if (ratio .ge. p0001) qtf(j) = sum - 280 continue -c -c compute the qr factorization of the updated jacobian. -c - call r1updt(n,n,r,lr,wa1,wa2,wa3,sing) - call r1mpyq(n,n,fjac,ldfjac,wa2,wa3) - call r1mpyq(1,n,qtf,1,wa2,wa3) -c -c end of the inner loop. -c - jeval = .false. - go to 180 - 290 continue -c -c end of the outer loop. -c - go to 30 - 300 continue -c -c termination, either normal or user imposed. -c - if (iflag .lt. 0) info = iflag - iflag = 0 - if (nprint .gt. 0) call fcn(n,x,fvec,fjac,ldfjac,iflag) - return -c -c last card of subroutine hybrj. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/hybrj1.f b/scipy-0.10.1/scipy/optimize/minpack/hybrj1.f deleted file mode 100644 index 9f51c49657..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/hybrj1.f +++ /dev/null @@ -1,127 +0,0 @@ - subroutine hybrj1(fcn,n,x,fvec,fjac,ldfjac,tol,info,wa,lwa) - integer n,ldfjac,info,lwa - double precision tol - double precision x(n),fvec(n),fjac(ldfjac,n),wa(lwa) - external fcn -c ********** -c -c subroutine hybrj1 -c -c the purpose of hybrj1 is to find a zero of a system of -c n nonlinear functions in n variables by a modification -c of the powell hybrid method. this is done by using the -c more general nonlinear equation solver hybrj. the user -c must provide a subroutine which calculates the functions -c and the jacobian. -c -c the subroutine statement is -c -c subroutine hybrj1(fcn,n,x,fvec,fjac,ldfjac,tol,info,wa,lwa) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions and the jacobian. fcn must -c be declared in an external statement in the user -c calling program, and should be written as follows. -c -c subroutine fcn(n,x,fvec,fjac,ldfjac,iflag) -c integer n,ldfjac,iflag -c double precision x(n),fvec(n),fjac(ldfjac,n) -c ---------- -c if iflag = 1 calculate the functions at x and -c return this vector in fvec. do not alter fjac. -c if iflag = 2 calculate the jacobian at x and -c return this matrix in fjac. do not alter fvec. -c --------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of hybrj1. -c in this case set iflag to a negative integer. -c -c n is a positive integer input variable set to the number -c of functions and variables. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length n which contains -c the functions evaluated at the output x. -c -c fjac is an output n by n array which contains the -c orthogonal matrix q produced by the qr factorization -c of the final approximate jacobian. -c -c ldfjac is a positive integer input variable not less than n -c which specifies the leading dimension of the array fjac. -c -c tol is a nonnegative input variable. termination occurs -c when the algorithm estimates that the relative error -c between x and the solution is at most tol. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 algorithm estimates that the relative error -c between x and the solution is at most tol. -c -c info = 2 number of calls to fcn with iflag = 1 has -c reached 100*(n+1). -c -c info = 3 tol is too small. no further improvement in -c the approximate solution x is possible. -c -c info = 4 iteration is not making good progress. -c -c wa is a work array of length lwa. -c -c lwa is a positive integer input variable not less than -c (n*(n+13))/2. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... hybrj -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer j,lr,maxfev,mode,nfev,njev,nprint - double precision factor,one,xtol,zero - data factor,one,zero /1.0d2,1.0d0,0.0d0/ - info = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. ldfjac .lt. n .or. tol .lt. zero - * .or. lwa .lt. (n*(n + 13))/2) go to 20 -c -c call hybrj. -c - maxfev = 100*(n + 1) - xtol = tol - mode = 2 - do 10 j = 1, n - wa(j) = one - 10 continue - nprint = 0 - lr = (n*(n + 1))/2 - call hybrj(fcn,n,x,fvec,fjac,ldfjac,xtol,maxfev,wa(1),mode, - * factor,nprint,info,nfev,njev,wa(6*n+1),lr,wa(n+1), - * wa(2*n+1),wa(3*n+1),wa(4*n+1),wa(5*n+1)) - if (info .eq. 5) info = 4 - 20 continue - return -c -c last card of subroutine hybrj1. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/lmder.f b/scipy-0.10.1/scipy/optimize/minpack/lmder.f deleted file mode 100644 index 8797d8bed8..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/lmder.f +++ /dev/null @@ -1,452 +0,0 @@ - subroutine lmder(fcn,m,n,x,fvec,fjac,ldfjac,ftol,xtol,gtol, - * maxfev,diag,mode,factor,nprint,info,nfev,njev, - * ipvt,qtf,wa1,wa2,wa3,wa4) - integer m,n,ldfjac,maxfev,mode,nprint,info,nfev,njev - integer ipvt(n) - double precision ftol,xtol,gtol,factor - double precision x(n),fvec(m),fjac(ldfjac,n),diag(n),qtf(n), - * wa1(n),wa2(n),wa3(n),wa4(m) -c ********** -c -c subroutine lmder -c -c the purpose of lmder is to minimize the sum of the squares of -c m nonlinear functions in n variables by a modification of -c the levenberg-marquardt algorithm. the user must provide a -c subroutine which calculates the functions and the jacobian. -c -c the subroutine statement is -c -c subroutine lmder(fcn,m,n,x,fvec,fjac,ldfjac,ftol,xtol,gtol, -c maxfev,diag,mode,factor,nprint,info,nfev, -c njev,ipvt,qtf,wa1,wa2,wa3,wa4) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions and the jacobian. fcn must -c be declared in an external statement in the user -c calling program, and should be written as follows. -c -c subroutine fcn(m,n,x,fvec,fjac,ldfjac,iflag) -c integer m,n,ldfjac,iflag -c double precision x(n),fvec(m),fjac(ldfjac,n) -c ---------- -c if iflag = 1 calculate the functions at x and -c return this vector in fvec. do not alter fjac. -c if iflag = 2 calculate the jacobian at x and -c return this matrix in fjac. do not alter fvec. -c ---------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of lmder. -c in this case set iflag to a negative integer. -c -c m is a positive integer input variable set to the number -c of functions. -c -c n is a positive integer input variable set to the number -c of variables. n must not exceed m. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length m which contains -c the functions evaluated at the output x. -c -c fjac is an output m by n array. the upper n by n submatrix -c of fjac contains an upper triangular matrix r with -c diagonal elements of nonincreasing magnitude such that -c -c t t t -c p *(jac *jac)*p = r *r, -c -c where p is a permutation matrix and jac is the final -c calculated jacobian. column j of p is column ipvt(j) -c (see below) of the identity matrix. the lower trapezoidal -c part of fjac contains information generated during -c the computation of r. -c -c ldfjac is a positive integer input variable not less than m -c which specifies the leading dimension of the array fjac. -c -c ftol is a nonnegative input variable. termination -c occurs when both the actual and predicted relative -c reductions in the sum of squares are at most ftol. -c therefore, ftol measures the relative error desired -c in the sum of squares. -c -c xtol is a nonnegative input variable. termination -c occurs when the relative error between two consecutive -c iterates is at most xtol. therefore, xtol measures the -c relative error desired in the approximate solution. -c -c gtol is a nonnegative input variable. termination -c occurs when the cosine of the angle between fvec and -c any column of the jacobian is at most gtol in absolute -c value. therefore, gtol measures the orthogonality -c desired between the function vector and the columns -c of the jacobian. -c -c maxfev is a positive integer input variable. termination -c occurs when the number of calls to fcn with iflag = 1 -c has reached maxfev. -c -c diag is an array of length n. if mode = 1 (see -c below), diag is internally set. if mode = 2, diag -c must contain positive entries that serve as -c multiplicative scale factors for the variables. -c -c mode is an integer input variable. if mode = 1, the -c variables will be scaled internally. if mode = 2, -c the scaling is specified by the input diag. other -c values of mode are equivalent to mode = 1. -c -c factor is a positive input variable used in determining the -c initial step bound. this bound is set to the product of -c factor and the euclidean norm of diag*x if nonzero, or else -c to factor itself. in most cases factor should lie in the -c interval (.1,100.).100. is a generally recommended value. -c -c nprint is an integer input variable that enables controlled -c printing of iterates if it is positive. in this case, -c fcn is called with iflag = 0 at the beginning of the first -c iteration and every nprint iterations thereafter and -c immediately prior to return, with x, fvec, and fjac -c available for printing. fvec and fjac should not be -c altered. if nprint is not positive, no special calls -c of fcn with iflag = 0 are made. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 both actual and predicted relative reductions -c in the sum of squares are at most ftol. -c -c info = 2 relative error between two consecutive iterates -c is at most xtol. -c -c info = 3 conditions for info = 1 and info = 2 both hold. -c -c info = 4 the cosine of the angle between fvec and any -c column of the jacobian is at most gtol in -c absolute value. -c -c info = 5 number of calls to fcn with iflag = 1 has -c reached maxfev. -c -c info = 6 ftol is too small. no further reduction in -c the sum of squares is possible. -c -c info = 7 xtol is too small. no further improvement in -c the approximate solution x is possible. -c -c info = 8 gtol is too small. fvec is orthogonal to the -c columns of the jacobian to machine precision. -c -c nfev is an integer output variable set to the number of -c calls to fcn with iflag = 1. -c -c njev is an integer output variable set to the number of -c calls to fcn with iflag = 2. -c -c ipvt is an integer output array of length n. ipvt -c defines a permutation matrix p such that jac*p = q*r, -c where jac is the final calculated jacobian, q is -c orthogonal (not stored), and r is upper triangular -c with diagonal elements of nonincreasing magnitude. -c column j of p is column ipvt(j) of the identity matrix. -c -c qtf is an output array of length n which contains -c the first n elements of the vector (q transpose)*fvec. -c -c wa1, wa2, and wa3 are work arrays of length n. -c -c wa4 is a work array of length m. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... dpmpar,enorm,lmpar,qrfac -c -c fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,iflag,iter,j,l - double precision actred,delta,dirder,epsmch,fnorm,fnorm1,gnorm, - * one,par,pnorm,prered,p1,p5,p25,p75,p0001,ratio, - * sum,temp,temp1,temp2,xnorm,zero - double precision dpmpar,enorm - data one,p1,p5,p25,p75,p0001,zero - * /1.0d0,1.0d-1,5.0d-1,2.5d-1,7.5d-1,1.0d-4,0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c - info = 0 - iflag = 0 - nfev = 0 - njev = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. m .lt. n .or. ldfjac .lt. m - * .or. ftol .lt. zero .or. xtol .lt. zero .or. gtol .lt. zero - * .or. maxfev .le. 0 .or. factor .le. zero) go to 300 - if (mode .ne. 2) go to 20 - do 10 j = 1, n - if (diag(j) .le. zero) go to 300 - 10 continue - 20 continue -c -c evaluate the function at the starting point -c and calculate its norm. -c - iflag = 1 - call fcn(m,n,x,fvec,fjac,ldfjac,iflag) - nfev = 1 - if (iflag .lt. 0) go to 300 - fnorm = enorm(m,fvec) -c -c initialize levenberg-marquardt parameter and iteration counter. -c - par = zero - iter = 1 -c -c beginning of the outer loop. -c - 30 continue -c -c calculate the jacobian matrix. -c - iflag = 2 - call fcn(m,n,x,fvec,fjac,ldfjac,iflag) - njev = njev + 1 - if (iflag .lt. 0) go to 300 -c -c if requested, call fcn to enable printing of iterates. -c - if (nprint .le. 0) go to 40 - iflag = 0 - if (mod(iter-1,nprint) .eq. 0) - * call fcn(m,n,x,fvec,fjac,ldfjac,iflag) - if (iflag .lt. 0) go to 300 - 40 continue -c -c compute the qr factorization of the jacobian. -c - call qrfac(m,n,fjac,ldfjac,.true.,ipvt,n,wa1,wa2,wa3) -c -c on the first iteration and if mode is 1, scale according -c to the norms of the columns of the initial jacobian. -c - if (iter .ne. 1) go to 80 - if (mode .eq. 2) go to 60 - do 50 j = 1, n - diag(j) = wa2(j) - if (wa2(j) .eq. zero) diag(j) = one - 50 continue - 60 continue -c -c on the first iteration, calculate the norm of the scaled x -c and initialize the step bound delta. -c - do 70 j = 1, n - wa3(j) = diag(j)*x(j) - 70 continue - xnorm = enorm(n,wa3) - delta = factor*xnorm - if (delta .eq. zero) delta = factor - 80 continue -c -c form (q transpose)*fvec and store the first n components in -c qtf. -c - do 90 i = 1, m - wa4(i) = fvec(i) - 90 continue - do 130 j = 1, n - if (fjac(j,j) .eq. zero) go to 120 - sum = zero - do 100 i = j, m - sum = sum + fjac(i,j)*wa4(i) - 100 continue - temp = -sum/fjac(j,j) - do 110 i = j, m - wa4(i) = wa4(i) + fjac(i,j)*temp - 110 continue - 120 continue - fjac(j,j) = wa1(j) - qtf(j) = wa4(j) - 130 continue -c -c compute the norm of the scaled gradient. -c - gnorm = zero - if (fnorm .eq. zero) go to 170 - do 160 j = 1, n - l = ipvt(j) - if (wa2(l) .eq. zero) go to 150 - sum = zero - do 140 i = 1, j - sum = sum + fjac(i,j)*(qtf(i)/fnorm) - 140 continue - gnorm = dmax1(gnorm,dabs(sum/wa2(l))) - 150 continue - 160 continue - 170 continue -c -c test for convergence of the gradient norm. -c - if (gnorm .le. gtol) info = 4 - if (info .ne. 0) go to 300 -c -c rescale if necessary. -c - if (mode .eq. 2) go to 190 - do 180 j = 1, n - diag(j) = dmax1(diag(j),wa2(j)) - 180 continue - 190 continue -c -c beginning of the inner loop. -c - 200 continue -c -c determine the levenberg-marquardt parameter. -c - call lmpar(n,fjac,ldfjac,ipvt,diag,qtf,delta,par,wa1,wa2, - * wa3,wa4) -c -c store the direction p and x + p. calculate the norm of p. -c - do 210 j = 1, n - wa1(j) = -wa1(j) - wa2(j) = x(j) + wa1(j) - wa3(j) = diag(j)*wa1(j) - 210 continue - pnorm = enorm(n,wa3) -c -c on the first iteration, adjust the initial step bound. -c - if (iter .eq. 1) delta = dmin1(delta,pnorm) -c -c evaluate the function at x + p and calculate its norm. -c - iflag = 1 - call fcn(m,n,wa2,wa4,fjac,ldfjac,iflag) - nfev = nfev + 1 - if (iflag .lt. 0) go to 300 - fnorm1 = enorm(m,wa4) -c -c compute the scaled actual reduction. -c - actred = -one - if (p1*fnorm1 .lt. fnorm) actred = one - (fnorm1/fnorm)**2 -c -c compute the scaled predicted reduction and -c the scaled directional derivative. -c - do 230 j = 1, n - wa3(j) = zero - l = ipvt(j) - temp = wa1(l) - do 220 i = 1, j - wa3(i) = wa3(i) + fjac(i,j)*temp - 220 continue - 230 continue - temp1 = enorm(n,wa3)/fnorm - temp2 = (dsqrt(par)*pnorm)/fnorm - prered = temp1**2 + temp2**2/p5 - dirder = -(temp1**2 + temp2**2) -c -c compute the ratio of the actual to the predicted -c reduction. -c - ratio = zero - if (prered .ne. zero) ratio = actred/prered -c -c update the step bound. -c - if (ratio .gt. p25) go to 240 - if (actred .ge. zero) temp = p5 - if (actred .lt. zero) - * temp = p5*dirder/(dirder + p5*actred) - if (p1*fnorm1 .ge. fnorm .or. temp .lt. p1) temp = p1 - delta = temp*dmin1(delta,pnorm/p1) - par = par/temp - go to 260 - 240 continue - if (par .ne. zero .and. ratio .lt. p75) go to 250 - delta = pnorm/p5 - par = p5*par - 250 continue - 260 continue -c -c test for successful iteration. -c - if (ratio .lt. p0001) go to 290 -c -c successful iteration. update x, fvec, and their norms. -c - do 270 j = 1, n - x(j) = wa2(j) - wa2(j) = diag(j)*x(j) - 270 continue - do 280 i = 1, m - fvec(i) = wa4(i) - 280 continue - xnorm = enorm(n,wa2) - fnorm = fnorm1 - iter = iter + 1 - 290 continue -c -c tests for convergence. -c - if (dabs(actred) .le. ftol .and. prered .le. ftol - * .and. p5*ratio .le. one) info = 1 - if (delta .le. xtol*xnorm) info = 2 - if (dabs(actred) .le. ftol .and. prered .le. ftol - * .and. p5*ratio .le. one .and. info .eq. 2) info = 3 - if (info .ne. 0) go to 300 -c -c tests for termination and stringent tolerances. -c - if (nfev .ge. maxfev) info = 5 - if (dabs(actred) .le. epsmch .and. prered .le. epsmch - * .and. p5*ratio .le. one) info = 6 - if (delta .le. epsmch*xnorm) info = 7 - if (gnorm .le. epsmch) info = 8 - if (info .ne. 0) go to 300 -c -c end of the inner loop. repeat if iteration unsuccessful. -c - if (ratio .lt. p0001) go to 200 -c -c end of the outer loop. -c - go to 30 - 300 continue -c -c termination, either normal or user imposed. -c - if (iflag .lt. 0) info = iflag - iflag = 0 - if (nprint .gt. 0) call fcn(m,n,x,fvec,fjac,ldfjac,iflag) - return -c -c last card of subroutine lmder. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/lmder1.f b/scipy-0.10.1/scipy/optimize/minpack/lmder1.f deleted file mode 100644 index d691940fd7..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/lmder1.f +++ /dev/null @@ -1,156 +0,0 @@ - subroutine lmder1(fcn,m,n,x,fvec,fjac,ldfjac,tol,info,ipvt,wa, - * lwa) - integer m,n,ldfjac,info,lwa - integer ipvt(n) - double precision tol - double precision x(n),fvec(m),fjac(ldfjac,n),wa(lwa) - external fcn -c ********** -c -c subroutine lmder1 -c -c the purpose of lmder1 is to minimize the sum of the squares of -c m nonlinear functions in n variables by a modification of the -c levenberg-marquardt algorithm. this is done by using the more -c general least-squares solver lmder. the user must provide a -c subroutine which calculates the functions and the jacobian. -c -c the subroutine statement is -c -c subroutine lmder1(fcn,m,n,x,fvec,fjac,ldfjac,tol,info, -c ipvt,wa,lwa) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions and the jacobian. fcn must -c be declared in an external statement in the user -c calling program, and should be written as follows. -c -c subroutine fcn(m,n,x,fvec,fjac,ldfjac,iflag) -c integer m,n,ldfjac,iflag -c double precision x(n),fvec(m),fjac(ldfjac,n) -c ---------- -c if iflag = 1 calculate the functions at x and -c return this vector in fvec. do not alter fjac. -c if iflag = 2 calculate the jacobian at x and -c return this matrix in fjac. do not alter fvec. -c ---------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of lmder1. -c in this case set iflag to a negative integer. -c -c m is a positive integer input variable set to the number -c of functions. -c -c n is a positive integer input variable set to the number -c of variables. n must not exceed m. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length m which contains -c the functions evaluated at the output x. -c -c fjac is an output m by n array. the upper n by n submatrix -c of fjac contains an upper triangular matrix r with -c diagonal elements of nonincreasing magnitude such that -c -c t t t -c p *(jac *jac)*p = r *r, -c -c where p is a permutation matrix and jac is the final -c calculated jacobian. column j of p is column ipvt(j) -c (see below) of the identity matrix. the lower trapezoidal -c part of fjac contains information generated during -c the computation of r. -c -c ldfjac is a positive integer input variable not less than m -c which specifies the leading dimension of the array fjac. -c -c tol is a nonnegative input variable. termination occurs -c when the algorithm estimates either that the relative -c error in the sum of squares is at most tol or that -c the relative error between x and the solution is at -c most tol. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 algorithm estimates that the relative error -c in the sum of squares is at most tol. -c -c info = 2 algorithm estimates that the relative error -c between x and the solution is at most tol. -c -c info = 3 conditions for info = 1 and info = 2 both hold. -c -c info = 4 fvec is orthogonal to the columns of the -c jacobian to machine precision. -c -c info = 5 number of calls to fcn with iflag = 1 has -c reached 100*(n+1). -c -c info = 6 tol is too small. no further reduction in -c the sum of squares is possible. -c -c info = 7 tol is too small. no further improvement in -c the approximate solution x is possible. -c -c ipvt is an integer output array of length n. ipvt -c defines a permutation matrix p such that jac*p = q*r, -c where jac is the final calculated jacobian, q is -c orthogonal (not stored), and r is upper triangular -c with diagonal elements of nonincreasing magnitude. -c column j of p is column ipvt(j) of the identity matrix. -c -c wa is a work array of length lwa. -c -c lwa is a positive integer input variable not less than 5*n+m. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... lmder -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer maxfev,mode,nfev,njev,nprint - double precision factor,ftol,gtol,xtol,zero - data factor,zero /1.0d2,0.0d0/ - info = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. m .lt. n .or. ldfjac .lt. m .or. tol .lt. zero - * .or. lwa .lt. 5*n + m) go to 10 -c -c call lmder. -c - maxfev = 100*(n + 1) - ftol = tol - xtol = tol - gtol = zero - mode = 1 - nprint = 0 - call lmder(fcn,m,n,x,fvec,fjac,ldfjac,ftol,xtol,gtol,maxfev, - * wa(1),mode,factor,nprint,info,nfev,njev,ipvt,wa(n+1), - * wa(2*n+1),wa(3*n+1),wa(4*n+1),wa(5*n+1)) - if (info .eq. 8) info = 4 - 10 continue - return -c -c last card of subroutine lmder1. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/lmdif.f b/scipy-0.10.1/scipy/optimize/minpack/lmdif.f deleted file mode 100644 index dd3d4ee256..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/lmdif.f +++ /dev/null @@ -1,454 +0,0 @@ - subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn, - * diag,mode,factor,nprint,info,nfev,fjac,ldfjac, - * ipvt,qtf,wa1,wa2,wa3,wa4) - integer m,n,maxfev,mode,nprint,info,nfev,ldfjac - integer ipvt(n) - double precision ftol,xtol,gtol,epsfcn,factor - double precision x(n),fvec(m),diag(n),fjac(ldfjac,n),qtf(n), - * wa1(n),wa2(n),wa3(n),wa4(m) - external fcn -c ********** -c -c subroutine lmdif -c -c the purpose of lmdif is to minimize the sum of the squares of -c m nonlinear functions in n variables by a modification of -c the levenberg-marquardt algorithm. the user must provide a -c subroutine which calculates the functions. the jacobian is -c then calculated by a forward-difference approximation. -c -c the subroutine statement is -c -c subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn, -c diag,mode,factor,nprint,info,nfev,fjac, -c ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions. fcn must be declared -c in an external statement in the user calling -c program, and should be written as follows. -c -c subroutine fcn(m,n,x,fvec,iflag) -c integer m,n,iflag -c double precision x(n),fvec(m) -c ---------- -c calculate the functions at x and -c return this vector in fvec. -c ---------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of lmdif. -c in this case set iflag to a negative integer. -c -c m is a positive integer input variable set to the number -c of functions. -c -c n is a positive integer input variable set to the number -c of variables. n must not exceed m. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length m which contains -c the functions evaluated at the output x. -c -c ftol is a nonnegative input variable. termination -c occurs when both the actual and predicted relative -c reductions in the sum of squares are at most ftol. -c therefore, ftol measures the relative error desired -c in the sum of squares. -c -c xtol is a nonnegative input variable. termination -c occurs when the relative error between two consecutive -c iterates is at most xtol. therefore, xtol measures the -c relative error desired in the approximate solution. -c -c gtol is a nonnegative input variable. termination -c occurs when the cosine of the angle between fvec and -c any column of the jacobian is at most gtol in absolute -c value. therefore, gtol measures the orthogonality -c desired between the function vector and the columns -c of the jacobian. -c -c maxfev is a positive integer input variable. termination -c occurs when the number of calls to fcn is at least -c maxfev by the end of an iteration. -c -c epsfcn is an input variable used in determining a suitable -c step length for the forward-difference approximation. this -c approximation assumes that the relative errors in the -c functions are of the order of epsfcn. if epsfcn is less -c than the machine precision, it is assumed that the relative -c errors in the functions are of the order of the machine -c precision. -c -c diag is an array of length n. if mode = 1 (see -c below), diag is internally set. if mode = 2, diag -c must contain positive entries that serve as -c multiplicative scale factors for the variables. -c -c mode is an integer input variable. if mode = 1, the -c variables will be scaled internally. if mode = 2, -c the scaling is specified by the input diag. other -c values of mode are equivalent to mode = 1. -c -c factor is a positive input variable used in determining the -c initial step bound. this bound is set to the product of -c factor and the euclidean norm of diag*x if nonzero, or else -c to factor itself. in most cases factor should lie in the -c interval (.1,100.). 100. is a generally recommended value. -c -c nprint is an integer input variable that enables controlled -c printing of iterates if it is positive. in this case, -c fcn is called with iflag = 0 at the beginning of the first -c iteration and every nprint iterations thereafter and -c immediately prior to return, with x and fvec available -c for printing. if nprint is not positive, no special calls -c of fcn with iflag = 0 are made. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 both actual and predicted relative reductions -c in the sum of squares are at most ftol. -c -c info = 2 relative error between two consecutive iterates -c is at most xtol. -c -c info = 3 conditions for info = 1 and info = 2 both hold. -c -c info = 4 the cosine of the angle between fvec and any -c column of the jacobian is at most gtol in -c absolute value. -c -c info = 5 number of calls to fcn has reached or -c exceeded maxfev. -c -c info = 6 ftol is too small. no further reduction in -c the sum of squares is possible. -c -c info = 7 xtol is too small. no further improvement in -c the approximate solution x is possible. -c -c info = 8 gtol is too small. fvec is orthogonal to the -c columns of the jacobian to machine precision. -c -c nfev is an integer output variable set to the number of -c calls to fcn. -c -c fjac is an output m by n array. the upper n by n submatrix -c of fjac contains an upper triangular matrix r with -c diagonal elements of nonincreasing magnitude such that -c -c t t t -c p *(jac *jac)*p = r *r, -c -c where p is a permutation matrix and jac is the final -c calculated jacobian. column j of p is column ipvt(j) -c (see below) of the identity matrix. the lower trapezoidal -c part of fjac contains information generated during -c the computation of r. -c -c ldfjac is a positive integer input variable not less than m -c which specifies the leading dimension of the array fjac. -c -c ipvt is an integer output array of length n. ipvt -c defines a permutation matrix p such that jac*p = q*r, -c where jac is the final calculated jacobian, q is -c orthogonal (not stored), and r is upper triangular -c with diagonal elements of nonincreasing magnitude. -c column j of p is column ipvt(j) of the identity matrix. -c -c qtf is an output array of length n which contains -c the first n elements of the vector (q transpose)*fvec. -c -c wa1, wa2, and wa3 are work arrays of length n. -c -c wa4 is a work array of length m. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... dpmpar,enorm,fdjac2,lmpar,qrfac -c -c fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,iflag,iter,j,l - double precision actred,delta,dirder,epsmch,fnorm,fnorm1,gnorm, - * one,par,pnorm,prered,p1,p5,p25,p75,p0001,ratio, - * sum,temp,temp1,temp2,xnorm,zero - double precision dpmpar,enorm - data one,p1,p5,p25,p75,p0001,zero - * /1.0d0,1.0d-1,5.0d-1,2.5d-1,7.5d-1,1.0d-4,0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c - info = 0 - iflag = 0 - nfev = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. m .lt. n .or. ldfjac .lt. m - * .or. ftol .lt. zero .or. xtol .lt. zero .or. gtol .lt. zero - * .or. maxfev .le. 0 .or. factor .le. zero) go to 300 - if (mode .ne. 2) go to 20 - do 10 j = 1, n - if (diag(j) .le. zero) go to 300 - 10 continue - 20 continue -c -c evaluate the function at the starting point -c and calculate its norm. -c - iflag = 1 - call fcn(m,n,x,fvec,iflag) - nfev = 1 - if (iflag .lt. 0) go to 300 - fnorm = enorm(m,fvec) -c -c initialize levenberg-marquardt parameter and iteration counter. -c - par = zero - iter = 1 -c -c beginning of the outer loop. -c - 30 continue -c -c calculate the jacobian matrix. -c - iflag = 2 - call fdjac2(fcn,m,n,x,fvec,fjac,ldfjac,iflag,epsfcn,wa4) - nfev = nfev + n - if (iflag .lt. 0) go to 300 -c -c if requested, call fcn to enable printing of iterates. -c - if (nprint .le. 0) go to 40 - iflag = 0 - if (mod(iter-1,nprint) .eq. 0) call fcn(m,n,x,fvec,iflag) - if (iflag .lt. 0) go to 300 - 40 continue -c -c compute the qr factorization of the jacobian. -c - call qrfac(m,n,fjac,ldfjac,.true.,ipvt,n,wa1,wa2,wa3) -c -c on the first iteration and if mode is 1, scale according -c to the norms of the columns of the initial jacobian. -c - if (iter .ne. 1) go to 80 - if (mode .eq. 2) go to 60 - do 50 j = 1, n - diag(j) = wa2(j) - if (wa2(j) .eq. zero) diag(j) = one - 50 continue - 60 continue -c -c on the first iteration, calculate the norm of the scaled x -c and initialize the step bound delta. -c - do 70 j = 1, n - wa3(j) = diag(j)*x(j) - 70 continue - xnorm = enorm(n,wa3) - delta = factor*xnorm - if (delta .eq. zero) delta = factor - 80 continue -c -c form (q transpose)*fvec and store the first n components in -c qtf. -c - do 90 i = 1, m - wa4(i) = fvec(i) - 90 continue - do 130 j = 1, n - if (fjac(j,j) .eq. zero) go to 120 - sum = zero - do 100 i = j, m - sum = sum + fjac(i,j)*wa4(i) - 100 continue - temp = -sum/fjac(j,j) - do 110 i = j, m - wa4(i) = wa4(i) + fjac(i,j)*temp - 110 continue - 120 continue - fjac(j,j) = wa1(j) - qtf(j) = wa4(j) - 130 continue -c -c compute the norm of the scaled gradient. -c - gnorm = zero - if (fnorm .eq. zero) go to 170 - do 160 j = 1, n - l = ipvt(j) - if (wa2(l) .eq. zero) go to 150 - sum = zero - do 140 i = 1, j - sum = sum + fjac(i,j)*(qtf(i)/fnorm) - 140 continue - gnorm = dmax1(gnorm,dabs(sum/wa2(l))) - 150 continue - 160 continue - 170 continue -c -c test for convergence of the gradient norm. -c - if (gnorm .le. gtol) info = 4 - if (info .ne. 0) go to 300 -c -c rescale if necessary. -c - if (mode .eq. 2) go to 190 - do 180 j = 1, n - diag(j) = dmax1(diag(j),wa2(j)) - 180 continue - 190 continue -c -c beginning of the inner loop. -c - 200 continue -c -c determine the levenberg-marquardt parameter. -c - call lmpar(n,fjac,ldfjac,ipvt,diag,qtf,delta,par,wa1,wa2, - * wa3,wa4) -c -c store the direction p and x + p. calculate the norm of p. -c - do 210 j = 1, n - wa1(j) = -wa1(j) - wa2(j) = x(j) + wa1(j) - wa3(j) = diag(j)*wa1(j) - 210 continue - pnorm = enorm(n,wa3) -c -c on the first iteration, adjust the initial step bound. -c - if (iter .eq. 1) delta = dmin1(delta,pnorm) -c -c evaluate the function at x + p and calculate its norm. -c - iflag = 1 - call fcn(m,n,wa2,wa4,iflag) - nfev = nfev + 1 - if (iflag .lt. 0) go to 300 - fnorm1 = enorm(m,wa4) -c -c compute the scaled actual reduction. -c - actred = -one - if (p1*fnorm1 .lt. fnorm) actred = one - (fnorm1/fnorm)**2 -c -c compute the scaled predicted reduction and -c the scaled directional derivative. -c - do 230 j = 1, n - wa3(j) = zero - l = ipvt(j) - temp = wa1(l) - do 220 i = 1, j - wa3(i) = wa3(i) + fjac(i,j)*temp - 220 continue - 230 continue - temp1 = enorm(n,wa3)/fnorm - temp2 = (dsqrt(par)*pnorm)/fnorm - prered = temp1**2 + temp2**2/p5 - dirder = -(temp1**2 + temp2**2) -c -c compute the ratio of the actual to the predicted -c reduction. -c - ratio = zero - if (prered .ne. zero) ratio = actred/prered -c -c update the step bound. -c - if (ratio .gt. p25) go to 240 - if (actred .ge. zero) temp = p5 - if (actred .lt. zero) - * temp = p5*dirder/(dirder + p5*actred) - if (p1*fnorm1 .ge. fnorm .or. temp .lt. p1) temp = p1 - delta = temp*dmin1(delta,pnorm/p1) - par = par/temp - go to 260 - 240 continue - if (par .ne. zero .and. ratio .lt. p75) go to 250 - delta = pnorm/p5 - par = p5*par - 250 continue - 260 continue -c -c test for successful iteration. -c - if (ratio .lt. p0001) go to 290 -c -c successful iteration. update x, fvec, and their norms. -c - do 270 j = 1, n - x(j) = wa2(j) - wa2(j) = diag(j)*x(j) - 270 continue - do 280 i = 1, m - fvec(i) = wa4(i) - 280 continue - xnorm = enorm(n,wa2) - fnorm = fnorm1 - iter = iter + 1 - 290 continue -c -c tests for convergence. -c - if (dabs(actred) .le. ftol .and. prered .le. ftol - * .and. p5*ratio .le. one) info = 1 - if (delta .le. xtol*xnorm) info = 2 - if (dabs(actred) .le. ftol .and. prered .le. ftol - * .and. p5*ratio .le. one .and. info .eq. 2) info = 3 - if (info .ne. 0) go to 300 -c -c tests for termination and stringent tolerances. -c - if (nfev .ge. maxfev) info = 5 - if (dabs(actred) .le. epsmch .and. prered .le. epsmch - * .and. p5*ratio .le. one) info = 6 - if (delta .le. epsmch*xnorm) info = 7 - if (gnorm .le. epsmch) info = 8 - if (info .ne. 0) go to 300 -c -c end of the inner loop. repeat if iteration unsuccessful. -c - if (ratio .lt. p0001) go to 200 -c -c end of the outer loop. -c - go to 30 - 300 continue -c -c termination, either normal or user imposed. -c - if (iflag .lt. 0) info = iflag - iflag = 0 - if (nprint .gt. 0) call fcn(m,n,x,fvec,iflag) - return -c -c last card of subroutine lmdif. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/lmdif1.f b/scipy-0.10.1/scipy/optimize/minpack/lmdif1.f deleted file mode 100644 index 70f8aae052..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/lmdif1.f +++ /dev/null @@ -1,135 +0,0 @@ - subroutine lmdif1(fcn,m,n,x,fvec,tol,info,iwa,wa,lwa) - integer m,n,info,lwa - integer iwa(n) - double precision tol - double precision x(n),fvec(m),wa(lwa) - external fcn -c ********** -c -c subroutine lmdif1 -c -c the purpose of lmdif1 is to minimize the sum of the squares of -c m nonlinear functions in n variables by a modification of the -c levenberg-marquardt algorithm. this is done by using the more -c general least-squares solver lmdif. the user must provide a -c subroutine which calculates the functions. the jacobian is -c then calculated by a forward-difference approximation. -c -c the subroutine statement is -c -c subroutine lmdif1(fcn,m,n,x,fvec,tol,info,iwa,wa,lwa) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions. fcn must be declared -c in an external statement in the user calling -c program, and should be written as follows. -c -c subroutine fcn(m,n,x,fvec,iflag) -c integer m,n,iflag -c double precision x(n),fvec(m) -c ---------- -c calculate the functions at x and -c return this vector in fvec. -c ---------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of lmdif1. -c in this case set iflag to a negative integer. -c -c m is a positive integer input variable set to the number -c of functions. -c -c n is a positive integer input variable set to the number -c of variables. n must not exceed m. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length m which contains -c the functions evaluated at the output x. -c -c tol is a nonnegative input variable. termination occurs -c when the algorithm estimates either that the relative -c error in the sum of squares is at most tol or that -c the relative error between x and the solution is at -c most tol. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 algorithm estimates that the relative error -c in the sum of squares is at most tol. -c -c info = 2 algorithm estimates that the relative error -c between x and the solution is at most tol. -c -c info = 3 conditions for info = 1 and info = 2 both hold. -c -c info = 4 fvec is orthogonal to the columns of the -c jacobian to machine precision. -c -c info = 5 number of calls to fcn has reached or -c exceeded 200*(n+1). -c -c info = 6 tol is too small. no further reduction in -c the sum of squares is possible. -c -c info = 7 tol is too small. no further improvement in -c the approximate solution x is possible. -c -c iwa is an integer work array of length n. -c -c wa is a work array of length lwa. -c -c lwa is a positive integer input variable not less than -c m*n+5*n+m. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... lmdif -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer maxfev,mode,mp5n,nfev,nprint - double precision epsfcn,factor,ftol,gtol,xtol,zero - data factor,zero /1.0d2,0.0d0/ - info = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. m .lt. n .or. tol .lt. zero - * .or. lwa .lt. m*n + 5*n + m) go to 10 -c -c call lmdif. -c - maxfev = 200*(n + 1) - ftol = tol - xtol = tol - gtol = zero - epsfcn = zero - mode = 1 - nprint = 0 - mp5n = m + 5*n - call lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,wa(1), - * mode,factor,nprint,info,nfev,wa(mp5n+1),m,iwa, - * wa(n+1),wa(2*n+1),wa(3*n+1),wa(4*n+1),wa(5*n+1)) - if (info .eq. 8) info = 4 - 10 continue - return -c -c last card of subroutine lmdif1. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/lmpar.f b/scipy-0.10.1/scipy/optimize/minpack/lmpar.f deleted file mode 100644 index 26c422a79e..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/lmpar.f +++ /dev/null @@ -1,264 +0,0 @@ - subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,wa1, - * wa2) - integer n,ldr - integer ipvt(n) - double precision delta,par - double precision r(ldr,n),diag(n),qtb(n),x(n),sdiag(n),wa1(n), - * wa2(n) -c ********** -c -c subroutine lmpar -c -c given an m by n matrix a, an n by n nonsingular diagonal -c matrix d, an m-vector b, and a positive number delta, -c the problem is to determine a value for the parameter -c par such that if x solves the system -c -c a*x = b , sqrt(par)*d*x = 0 , -c -c in the least squares sense, and dxnorm is the euclidean -c norm of d*x, then either par is zero and -c -c (dxnorm-delta) .le. 0.1*delta , -c -c or par is positive and -c -c abs(dxnorm-delta) .le. 0.1*delta . -c -c this subroutine completes the solution of the problem -c if it is provided with the necessary information from the -c qr factorization, with column pivoting, of a. that is, if -c a*p = q*r, where p is a permutation matrix, q has orthogonal -c columns, and r is an upper triangular matrix with diagonal -c elements of nonincreasing magnitude, then lmpar expects -c the full upper triangle of r, the permutation matrix p, -c and the first n components of (q transpose)*b. on output -c lmpar also provides an upper triangular matrix s such that -c -c t t t -c p *(a *a + par*d*d)*p = s *s . -c -c s is employed within lmpar and may be of separate interest. -c -c only a few iterations are generally needed for convergence -c of the algorithm. if, however, the limit of 10 iterations -c is reached, then the output par will contain the best -c value obtained so far. -c -c the subroutine statement is -c -c subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag, -c wa1,wa2) -c -c where -c -c n is a positive integer input variable set to the order of r. -c -c r is an n by n array. on input the full upper triangle -c must contain the full upper triangle of the matrix r. -c on output the full upper triangle is unaltered, and the -c strict lower triangle contains the strict upper triangle -c (transposed) of the upper triangular matrix s. -c -c ldr is a positive integer input variable not less than n -c which specifies the leading dimension of the array r. -c -c ipvt is an integer input array of length n which defines the -c permutation matrix p such that a*p = q*r. column j of p -c is column ipvt(j) of the identity matrix. -c -c diag is an input array of length n which must contain the -c diagonal elements of the matrix d. -c -c qtb is an input array of length n which must contain the first -c n elements of the vector (q transpose)*b. -c -c delta is a positive input variable which specifies an upper -c bound on the euclidean norm of d*x. -c -c par is a nonnegative variable. on input par contains an -c initial estimate of the levenberg-marquardt parameter. -c on output par contains the final estimate. -c -c x is an output array of length n which contains the least -c squares solution of the system a*x = b, sqrt(par)*d*x = 0, -c for the output par. -c -c sdiag is an output array of length n which contains the -c diagonal elements of the upper triangular matrix s. -c -c wa1 and wa2 are work arrays of length n. -c -c subprograms called -c -c minpack-supplied ... dpmpar,enorm,qrsolv -c -c fortran-supplied ... dabs,dmax1,dmin1,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,iter,j,jm1,jp1,k,l,nsing - double precision dxnorm,dwarf,fp,gnorm,parc,parl,paru,p1,p001, - * sum,temp,zero - double precision dpmpar,enorm - data p1,p001,zero /1.0d-1,1.0d-3,0.0d0/ -c -c dwarf is the smallest positive magnitude. -c - dwarf = dpmpar(2) -c -c compute and store in x the gauss-newton direction. if the -c jacobian is rank-deficient, obtain a least squares solution. -c - nsing = n - do 10 j = 1, n - wa1(j) = qtb(j) - if (r(j,j) .eq. zero .and. nsing .eq. n) nsing = j - 1 - if (nsing .lt. n) wa1(j) = zero - 10 continue - if (nsing .lt. 1) go to 50 - do 40 k = 1, nsing - j = nsing - k + 1 - wa1(j) = wa1(j)/r(j,j) - temp = wa1(j) - jm1 = j - 1 - if (jm1 .lt. 1) go to 30 - do 20 i = 1, jm1 - wa1(i) = wa1(i) - r(i,j)*temp - 20 continue - 30 continue - 40 continue - 50 continue - do 60 j = 1, n - l = ipvt(j) - x(l) = wa1(j) - 60 continue -c -c initialize the iteration counter. -c evaluate the function at the origin, and test -c for acceptance of the gauss-newton direction. -c - iter = 0 - do 70 j = 1, n - wa2(j) = diag(j)*x(j) - 70 continue - dxnorm = enorm(n,wa2) - fp = dxnorm - delta - if (fp .le. p1*delta) go to 220 -c -c if the jacobian is not rank deficient, the newton -c step provides a lower bound, parl, for the zero of -c the function. otherwise set this bound to zero. -c - parl = zero - if (nsing .lt. n) go to 120 - do 80 j = 1, n - l = ipvt(j) - wa1(j) = diag(l)*(wa2(l)/dxnorm) - 80 continue - do 110 j = 1, n - sum = zero - jm1 = j - 1 - if (jm1 .lt. 1) go to 100 - do 90 i = 1, jm1 - sum = sum + r(i,j)*wa1(i) - 90 continue - 100 continue - wa1(j) = (wa1(j) - sum)/r(j,j) - 110 continue - temp = enorm(n,wa1) - parl = ((fp/delta)/temp)/temp - 120 continue -c -c calculate an upper bound, paru, for the zero of the function. -c - do 140 j = 1, n - sum = zero - do 130 i = 1, j - sum = sum + r(i,j)*qtb(i) - 130 continue - l = ipvt(j) - wa1(j) = sum/diag(l) - 140 continue - gnorm = enorm(n,wa1) - paru = gnorm/delta - if (paru .eq. zero) paru = dwarf/dmin1(delta,p1) -c -c if the input par lies outside of the interval (parl,paru), -c set par to the closer endpoint. -c - par = dmax1(par,parl) - par = dmin1(par,paru) - if (par .eq. zero) par = gnorm/dxnorm -c -c beginning of an iteration. -c - 150 continue - iter = iter + 1 -c -c evaluate the function at the current value of par. -c - if (par .eq. zero) par = dmax1(dwarf,p001*paru) - temp = dsqrt(par) - do 160 j = 1, n - wa1(j) = temp*diag(j) - 160 continue - call qrsolv(n,r,ldr,ipvt,wa1,qtb,x,sdiag,wa2) - do 170 j = 1, n - wa2(j) = diag(j)*x(j) - 170 continue - dxnorm = enorm(n,wa2) - temp = fp - fp = dxnorm - delta -c -c if the function is small enough, accept the current value -c of par. also test for the exceptional cases where parl -c is zero or the number of iterations has reached 10. -c - if (dabs(fp) .le. p1*delta - * .or. parl .eq. zero .and. fp .le. temp - * .and. temp .lt. zero .or. iter .eq. 10) go to 220 -c -c compute the newton correction. -c - do 180 j = 1, n - l = ipvt(j) - wa1(j) = diag(l)*(wa2(l)/dxnorm) - 180 continue - do 210 j = 1, n - wa1(j) = wa1(j)/sdiag(j) - temp = wa1(j) - jp1 = j + 1 - if (n .lt. jp1) go to 200 - do 190 i = jp1, n - wa1(i) = wa1(i) - r(i,j)*temp - 190 continue - 200 continue - 210 continue - temp = enorm(n,wa1) - parc = ((fp/delta)/temp)/temp -c -c depending on the sign of the function, update parl or paru. -c - if (fp .gt. zero) parl = dmax1(parl,par) - if (fp .lt. zero) paru = dmin1(paru,par) -c -c compute an improved estimate for par. -c - par = dmax1(parl,par+parc) -c -c end of an iteration. -c - go to 150 - 220 continue -c -c termination. -c - if (iter .eq. 0) par = zero - return -c -c last card of subroutine lmpar. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/lmstr.f b/scipy-0.10.1/scipy/optimize/minpack/lmstr.f deleted file mode 100644 index d9a7893f85..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/lmstr.f +++ /dev/null @@ -1,466 +0,0 @@ - subroutine lmstr(fcn,m,n,x,fvec,fjac,ldfjac,ftol,xtol,gtol, - * maxfev,diag,mode,factor,nprint,info,nfev,njev, - * ipvt,qtf,wa1,wa2,wa3,wa4) - integer m,n,ldfjac,maxfev,mode,nprint,info,nfev,njev - integer ipvt(n) - logical sing - double precision ftol,xtol,gtol,factor - double precision x(n),fvec(m),fjac(ldfjac,n),diag(n),qtf(n), - * wa1(n),wa2(n),wa3(n),wa4(m) -c ********** -c -c subroutine lmstr -c -c the purpose of lmstr is to minimize the sum of the squares of -c m nonlinear functions in n variables by a modification of -c the levenberg-marquardt algorithm which uses minimal storage. -c the user must provide a subroutine which calculates the -c functions and the rows of the jacobian. -c -c the subroutine statement is -c -c subroutine lmstr(fcn,m,n,x,fvec,fjac,ldfjac,ftol,xtol,gtol, -c maxfev,diag,mode,factor,nprint,info,nfev, -c njev,ipvt,qtf,wa1,wa2,wa3,wa4) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions and the rows of the jacobian. -c fcn must be declared in an external statement in the -c user calling program, and should be written as follows. -c -c subroutine fcn(m,n,x,fvec,fjrow,iflag) -c integer m,n,iflag -c double precision x(n),fvec(m),fjrow(n) -c ---------- -c if iflag = 1 calculate the functions at x and -c return this vector in fvec. -c if iflag = i calculate the (i-1)-st row of the -c jacobian at x and return this vector in fjrow. -c ---------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of lmstr. -c in this case set iflag to a negative integer. -c -c m is a positive integer input variable set to the number -c of functions. -c -c n is a positive integer input variable set to the number -c of variables. n must not exceed m. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length m which contains -c the functions evaluated at the output x. -c -c fjac is an output n by n array. the upper triangle of fjac -c contains an upper triangular matrix r such that -c -c t t t -c p *(jac *jac)*p = r *r, -c -c where p is a permutation matrix and jac is the final -c calculated jacobian. column j of p is column ipvt(j) -c (see below) of the identity matrix. the lower triangular -c part of fjac contains information generated during -c the computation of r. -c -c ldfjac is a positive integer input variable not less than n -c which specifies the leading dimension of the array fjac. -c -c ftol is a nonnegative input variable. termination -c occurs when both the actual and predicted relative -c reductions in the sum of squares are at most ftol. -c therefore, ftol measures the relative error desired -c in the sum of squares. -c -c xtol is a nonnegative input variable. termination -c occurs when the relative error between two consecutive -c iterates is at most xtol. therefore, xtol measures the -c relative error desired in the approximate solution. -c -c gtol is a nonnegative input variable. termination -c occurs when the cosine of the angle between fvec and -c any column of the jacobian is at most gtol in absolute -c value. therefore, gtol measures the orthogonality -c desired between the function vector and the columns -c of the jacobian. -c -c maxfev is a positive integer input variable. termination -c occurs when the number of calls to fcn with iflag = 1 -c has reached maxfev. -c -c diag is an array of length n. if mode = 1 (see -c below), diag is internally set. if mode = 2, diag -c must contain positive entries that serve as -c multiplicative scale factors for the variables. -c -c mode is an integer input variable. if mode = 1, the -c variables will be scaled internally. if mode = 2, -c the scaling is specified by the input diag. other -c values of mode are equivalent to mode = 1. -c -c factor is a positive input variable used in determining the -c initial step bound. this bound is set to the product of -c factor and the euclidean norm of diag*x if nonzero, or else -c to factor itself. in most cases factor should lie in the -c interval (.1,100.). 100. is a generally recommended value. -c -c nprint is an integer input variable that enables controlled -c printing of iterates if it is positive. in this case, -c fcn is called with iflag = 0 at the beginning of the first -c iteration and every nprint iterations thereafter and -c immediately prior to return, with x and fvec available -c for printing. if nprint is not positive, no special calls -c of fcn with iflag = 0 are made. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 both actual and predicted relative reductions -c in the sum of squares are at most ftol. -c -c info = 2 relative error between two consecutive iterates -c is at most xtol. -c -c info = 3 conditions for info = 1 and info = 2 both hold. -c -c info = 4 the cosine of the angle between fvec and any -c column of the jacobian is at most gtol in -c absolute value. -c -c info = 5 number of calls to fcn with iflag = 1 has -c reached maxfev. -c -c info = 6 ftol is too small. no further reduction in -c the sum of squares is possible. -c -c info = 7 xtol is too small. no further improvement in -c the approximate solution x is possible. -c -c info = 8 gtol is too small. fvec is orthogonal to the -c columns of the jacobian to machine precision. -c -c nfev is an integer output variable set to the number of -c calls to fcn with iflag = 1. -c -c njev is an integer output variable set to the number of -c calls to fcn with iflag = 2. -c -c ipvt is an integer output array of length n. ipvt -c defines a permutation matrix p such that jac*p = q*r, -c where jac is the final calculated jacobian, q is -c orthogonal (not stored), and r is upper triangular. -c column j of p is column ipvt(j) of the identity matrix. -c -c qtf is an output array of length n which contains -c the first n elements of the vector (q transpose)*fvec. -c -c wa1, wa2, and wa3 are work arrays of length n. -c -c wa4 is a work array of length m. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... dpmpar,enorm,lmpar,qrfac,rwupdt -c -c fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, dudley v. goetschel, kenneth e. hillstrom, -c jorge j. more -c -c ********** - integer i,iflag,iter,j,l - double precision actred,delta,dirder,epsmch,fnorm,fnorm1,gnorm, - * one,par,pnorm,prered,p1,p5,p25,p75,p0001,ratio, - * sum,temp,temp1,temp2,xnorm,zero - double precision dpmpar,enorm - data one,p1,p5,p25,p75,p0001,zero - * /1.0d0,1.0d-1,5.0d-1,2.5d-1,7.5d-1,1.0d-4,0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c - info = 0 - iflag = 0 - nfev = 0 - njev = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. m .lt. n .or. ldfjac .lt. n - * .or. ftol .lt. zero .or. xtol .lt. zero .or. gtol .lt. zero - * .or. maxfev .le. 0 .or. factor .le. zero) go to 340 - if (mode .ne. 2) go to 20 - do 10 j = 1, n - if (diag(j) .le. zero) go to 340 - 10 continue - 20 continue -c -c evaluate the function at the starting point -c and calculate its norm. -c - iflag = 1 - call fcn(m,n,x,fvec,wa3,iflag) - nfev = 1 - if (iflag .lt. 0) go to 340 - fnorm = enorm(m,fvec) -c -c initialize levenberg-marquardt parameter and iteration counter. -c - par = zero - iter = 1 -c -c beginning of the outer loop. -c - 30 continue -c -c if requested, call fcn to enable printing of iterates. -c - if (nprint .le. 0) go to 40 - iflag = 0 - if (mod(iter-1,nprint) .eq. 0) call fcn(m,n,x,fvec,wa3,iflag) - if (iflag .lt. 0) go to 340 - 40 continue -c -c compute the qr factorization of the jacobian matrix -c calculated one row at a time, while simultaneously -c forming (q transpose)*fvec and storing the first -c n components in qtf. -c - do 60 j = 1, n - qtf(j) = zero - do 50 i = 1, n - fjac(i,j) = zero - 50 continue - 60 continue - iflag = 2 - do 70 i = 1, m - call fcn(m,n,x,fvec,wa3,iflag) - if (iflag .lt. 0) go to 340 - temp = fvec(i) - call rwupdt(n,fjac,ldfjac,wa3,qtf,temp,wa1,wa2) - iflag = iflag + 1 - 70 continue - njev = njev + 1 -c -c if the jacobian is rank deficient, call qrfac to -c reorder its columns and update the components of qtf. -c - sing = .false. - do 80 j = 1, n - if (fjac(j,j) .eq. zero) sing = .true. - ipvt(j) = j - wa2(j) = enorm(j,fjac(1,j)) - 80 continue - if (.not.sing) go to 130 - call qrfac(n,n,fjac,ldfjac,.true.,ipvt,n,wa1,wa2,wa3) - do 120 j = 1, n - if (fjac(j,j) .eq. zero) go to 110 - sum = zero - do 90 i = j, n - sum = sum + fjac(i,j)*qtf(i) - 90 continue - temp = -sum/fjac(j,j) - do 100 i = j, n - qtf(i) = qtf(i) + fjac(i,j)*temp - 100 continue - 110 continue - fjac(j,j) = wa1(j) - 120 continue - 130 continue -c -c on the first iteration and if mode is 1, scale according -c to the norms of the columns of the initial jacobian. -c - if (iter .ne. 1) go to 170 - if (mode .eq. 2) go to 150 - do 140 j = 1, n - diag(j) = wa2(j) - if (wa2(j) .eq. zero) diag(j) = one - 140 continue - 150 continue -c -c on the first iteration, calculate the norm of the scaled x -c and initialize the step bound delta. -c - do 160 j = 1, n - wa3(j) = diag(j)*x(j) - 160 continue - xnorm = enorm(n,wa3) - delta = factor*xnorm - if (delta .eq. zero) delta = factor - 170 continue -c -c compute the norm of the scaled gradient. -c - gnorm = zero - if (fnorm .eq. zero) go to 210 - do 200 j = 1, n - l = ipvt(j) - if (wa2(l) .eq. zero) go to 190 - sum = zero - do 180 i = 1, j - sum = sum + fjac(i,j)*(qtf(i)/fnorm) - 180 continue - gnorm = dmax1(gnorm,dabs(sum/wa2(l))) - 190 continue - 200 continue - 210 continue -c -c test for convergence of the gradient norm. -c - if (gnorm .le. gtol) info = 4 - if (info .ne. 0) go to 340 -c -c rescale if necessary. -c - if (mode .eq. 2) go to 230 - do 220 j = 1, n - diag(j) = dmax1(diag(j),wa2(j)) - 220 continue - 230 continue -c -c beginning of the inner loop. -c - 240 continue -c -c determine the levenberg-marquardt parameter. -c - call lmpar(n,fjac,ldfjac,ipvt,diag,qtf,delta,par,wa1,wa2, - * wa3,wa4) -c -c store the direction p and x + p. calculate the norm of p. -c - do 250 j = 1, n - wa1(j) = -wa1(j) - wa2(j) = x(j) + wa1(j) - wa3(j) = diag(j)*wa1(j) - 250 continue - pnorm = enorm(n,wa3) -c -c on the first iteration, adjust the initial step bound. -c - if (iter .eq. 1) delta = dmin1(delta,pnorm) -c -c evaluate the function at x + p and calculate its norm. -c - iflag = 1 - call fcn(m,n,wa2,wa4,wa3,iflag) - nfev = nfev + 1 - if (iflag .lt. 0) go to 340 - fnorm1 = enorm(m,wa4) -c -c compute the scaled actual reduction. -c - actred = -one - if (p1*fnorm1 .lt. fnorm) actred = one - (fnorm1/fnorm)**2 -c -c compute the scaled predicted reduction and -c the scaled directional derivative. -c - do 270 j = 1, n - wa3(j) = zero - l = ipvt(j) - temp = wa1(l) - do 260 i = 1, j - wa3(i) = wa3(i) + fjac(i,j)*temp - 260 continue - 270 continue - temp1 = enorm(n,wa3)/fnorm - temp2 = (dsqrt(par)*pnorm)/fnorm - prered = temp1**2 + temp2**2/p5 - dirder = -(temp1**2 + temp2**2) -c -c compute the ratio of the actual to the predicted -c reduction. -c - ratio = zero - if (prered .ne. zero) ratio = actred/prered -c -c update the step bound. -c - if (ratio .gt. p25) go to 280 - if (actred .ge. zero) temp = p5 - if (actred .lt. zero) - * temp = p5*dirder/(dirder + p5*actred) - if (p1*fnorm1 .ge. fnorm .or. temp .lt. p1) temp = p1 - delta = temp*dmin1(delta,pnorm/p1) - par = par/temp - go to 300 - 280 continue - if (par .ne. zero .and. ratio .lt. p75) go to 290 - delta = pnorm/p5 - par = p5*par - 290 continue - 300 continue -c -c test for successful iteration. -c - if (ratio .lt. p0001) go to 330 -c -c successful iteration. update x, fvec, and their norms. -c - do 310 j = 1, n - x(j) = wa2(j) - wa2(j) = diag(j)*x(j) - 310 continue - do 320 i = 1, m - fvec(i) = wa4(i) - 320 continue - xnorm = enorm(n,wa2) - fnorm = fnorm1 - iter = iter + 1 - 330 continue -c -c tests for convergence. -c - if (dabs(actred) .le. ftol .and. prered .le. ftol - * .and. p5*ratio .le. one) info = 1 - if (delta .le. xtol*xnorm) info = 2 - if (dabs(actred) .le. ftol .and. prered .le. ftol - * .and. p5*ratio .le. one .and. info .eq. 2) info = 3 - if (info .ne. 0) go to 340 -c -c tests for termination and stringent tolerances. -c - if (nfev .ge. maxfev) info = 5 - if (dabs(actred) .le. epsmch .and. prered .le. epsmch - * .and. p5*ratio .le. one) info = 6 - if (delta .le. epsmch*xnorm) info = 7 - if (gnorm .le. epsmch) info = 8 - if (info .ne. 0) go to 340 -c -c end of the inner loop. repeat if iteration unsuccessful. -c - if (ratio .lt. p0001) go to 240 -c -c end of the outer loop. -c - go to 30 - 340 continue -c -c termination, either normal or user imposed. -c - if (iflag .lt. 0) info = iflag - iflag = 0 - if (nprint .gt. 0) call fcn(m,n,x,fvec,wa3,iflag) - return -c -c last card of subroutine lmstr. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/lmstr1.f b/scipy-0.10.1/scipy/optimize/minpack/lmstr1.f deleted file mode 100644 index 2fa8ee1c50..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/lmstr1.f +++ /dev/null @@ -1,156 +0,0 @@ - subroutine lmstr1(fcn,m,n,x,fvec,fjac,ldfjac,tol,info,ipvt,wa, - * lwa) - integer m,n,ldfjac,info,lwa - integer ipvt(n) - double precision tol - double precision x(n),fvec(m),fjac(ldfjac,n),wa(lwa) - external fcn -c ********** -c -c subroutine lmstr1 -c -c the purpose of lmstr1 is to minimize the sum of the squares of -c m nonlinear functions in n variables by a modification of -c the levenberg-marquardt algorithm which uses minimal storage. -c this is done by using the more general least-squares solver -c lmstr. the user must provide a subroutine which calculates -c the functions and the rows of the jacobian. -c -c the subroutine statement is -c -c subroutine lmstr1(fcn,m,n,x,fvec,fjac,ldfjac,tol,info, -c ipvt,wa,lwa) -c -c where -c -c fcn is the name of the user-supplied subroutine which -c calculates the functions and the rows of the jacobian. -c fcn must be declared in an external statement in the -c user calling program, and should be written as follows. -c -c subroutine fcn(m,n,x,fvec,fjrow,iflag) -c integer m,n,iflag -c double precision x(n),fvec(m),fjrow(n) -c ---------- -c if iflag = 1 calculate the functions at x and -c return this vector in fvec. -c if iflag = i calculate the (i-1)-st row of the -c jacobian at x and return this vector in fjrow. -c ---------- -c return -c end -c -c the value of iflag should not be changed by fcn unless -c the user wants to terminate execution of lmstr1. -c in this case set iflag to a negative integer. -c -c m is a positive integer input variable set to the number -c of functions. -c -c n is a positive integer input variable set to the number -c of variables. n must not exceed m. -c -c x is an array of length n. on input x must contain -c an initial estimate of the solution vector. on output x -c contains the final estimate of the solution vector. -c -c fvec is an output array of length m which contains -c the functions evaluated at the output x. -c -c fjac is an output n by n array. the upper triangle of fjac -c contains an upper triangular matrix r such that -c -c t t t -c p *(jac *jac)*p = r *r, -c -c where p is a permutation matrix and jac is the final -c calculated jacobian. column j of p is column ipvt(j) -c (see below) of the identity matrix. the lower triangular -c part of fjac contains information generated during -c the computation of r. -c -c ldfjac is a positive integer input variable not less than n -c which specifies the leading dimension of the array fjac. -c -c tol is a nonnegative input variable. termination occurs -c when the algorithm estimates either that the relative -c error in the sum of squares is at most tol or that -c the relative error between x and the solution is at -c most tol. -c -c info is an integer output variable. if the user has -c terminated execution, info is set to the (negative) -c value of iflag. see description of fcn. otherwise, -c info is set as follows. -c -c info = 0 improper input parameters. -c -c info = 1 algorithm estimates that the relative error -c in the sum of squares is at most tol. -c -c info = 2 algorithm estimates that the relative error -c between x and the solution is at most tol. -c -c info = 3 conditions for info = 1 and info = 2 both hold. -c -c info = 4 fvec is orthogonal to the columns of the -c jacobian to machine precision. -c -c info = 5 number of calls to fcn with iflag = 1 has -c reached 100*(n+1). -c -c info = 6 tol is too small. no further reduction in -c the sum of squares is possible. -c -c info = 7 tol is too small. no further improvement in -c the approximate solution x is possible. -c -c ipvt is an integer output array of length n. ipvt -c defines a permutation matrix p such that jac*p = q*r, -c where jac is the final calculated jacobian, q is -c orthogonal (not stored), and r is upper triangular. -c column j of p is column ipvt(j) of the identity matrix. -c -c wa is a work array of length lwa. -c -c lwa is a positive integer input variable not less than 5*n+m. -c -c subprograms called -c -c user-supplied ...... fcn -c -c minpack-supplied ... lmstr -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, dudley v. goetschel, kenneth e. hillstrom, -c jorge j. more -c -c ********** - integer maxfev,mode,nfev,njev,nprint - double precision factor,ftol,gtol,xtol,zero - data factor,zero /1.0d2,0.0d0/ - info = 0 -c -c check the input parameters for errors. -c - if (n .le. 0 .or. m .lt. n .or. ldfjac .lt. n .or. tol .lt. zero - * .or. lwa .lt. 5*n + m) go to 10 -c -c call lmstr. -c - maxfev = 100*(n + 1) - ftol = tol - xtol = tol - gtol = zero - mode = 1 - nprint = 0 - call lmstr(fcn,m,n,x,fvec,fjac,ldfjac,ftol,xtol,gtol,maxfev, - * wa(1),mode,factor,nprint,info,nfev,njev,ipvt,wa(n+1), - * wa(2*n+1),wa(3*n+1),wa(4*n+1),wa(5*n+1)) - if (info .eq. 8) info = 4 - 10 continue - return -c -c last card of subroutine lmstr1. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/qform.f b/scipy-0.10.1/scipy/optimize/minpack/qform.f deleted file mode 100644 index 087b2478b9..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/qform.f +++ /dev/null @@ -1,95 +0,0 @@ - subroutine qform(m,n,q,ldq,wa) - integer m,n,ldq - double precision q(ldq,m),wa(m) -c ********** -c -c subroutine qform -c -c this subroutine proceeds from the computed qr factorization of -c an m by n matrix a to accumulate the m by m orthogonal matrix -c q from its factored form. -c -c the subroutine statement is -c -c subroutine qform(m,n,q,ldq,wa) -c -c where -c -c m is a positive integer input variable set to the number -c of rows of a and the order of q. -c -c n is a positive integer input variable set to the number -c of columns of a. -c -c q is an m by m array. on input the full lower trapezoid in -c the first min(m,n) columns of q contains the factored form. -c on output q has been accumulated into a square matrix. -c -c ldq is a positive integer input variable not less than m -c which specifies the leading dimension of the array q. -c -c wa is a work array of length m. -c -c subprograms called -c -c fortran-supplied ... min0 -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,j,jm1,k,l,minmn,np1 - double precision one,sum,temp,zero - data one,zero /1.0d0,0.0d0/ -c -c zero out upper triangle of q in the first min(m,n) columns. -c - minmn = min0(m,n) - if (minmn .lt. 2) go to 30 - do 20 j = 2, minmn - jm1 = j - 1 - do 10 i = 1, jm1 - q(i,j) = zero - 10 continue - 20 continue - 30 continue -c -c initialize remaining columns to those of the identity matrix. -c - np1 = n + 1 - if (m .lt. np1) go to 60 - do 50 j = np1, m - do 40 i = 1, m - q(i,j) = zero - 40 continue - q(j,j) = one - 50 continue - 60 continue -c -c accumulate q from its factored form. -c - do 120 l = 1, minmn - k = minmn - l + 1 - do 70 i = k, m - wa(i) = q(i,k) - q(i,k) = zero - 70 continue - q(k,k) = one - if (wa(k) .eq. zero) go to 110 - do 100 j = k, m - sum = zero - do 80 i = k, m - sum = sum + q(i,j)*wa(i) - 80 continue - temp = sum/wa(k) - do 90 i = k, m - q(i,j) = q(i,j) - temp*wa(i) - 90 continue - 100 continue - 110 continue - 120 continue - return -c -c last card of subroutine qform. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/qrfac.f b/scipy-0.10.1/scipy/optimize/minpack/qrfac.f deleted file mode 100644 index cb686086c5..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/qrfac.f +++ /dev/null @@ -1,164 +0,0 @@ - subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa) - integer m,n,lda,lipvt - integer ipvt(lipvt) - logical pivot - double precision a(lda,n),rdiag(n),acnorm(n),wa(n) -c ********** -c -c subroutine qrfac -c -c this subroutine uses householder transformations with column -c pivoting (optional) to compute a qr factorization of the -c m by n matrix a. that is, qrfac determines an orthogonal -c matrix q, a permutation matrix p, and an upper trapezoidal -c matrix r with diagonal elements of nonincreasing magnitude, -c such that a*p = q*r. the householder transformation for -c column k, k = 1,2,...,min(m,n), is of the form -c -c t -c i - (1/u(k))*u*u -c -c where u has zeros in the first k-1 positions. the form of -c this transformation and the method of pivoting first -c appeared in the corresponding linpack subroutine. -c -c the subroutine statement is -c -c subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa) -c -c where -c -c m is a positive integer input variable set to the number -c of rows of a. -c -c n is a positive integer input variable set to the number -c of columns of a. -c -c a is an m by n array. on input a contains the matrix for -c which the qr factorization is to be computed. on output -c the strict upper trapezoidal part of a contains the strict -c upper trapezoidal part of r, and the lower trapezoidal -c part of a contains a factored form of q (the non-trivial -c elements of the u vectors described above). -c -c lda is a positive integer input variable not less than m -c which specifies the leading dimension of the array a. -c -c pivot is a logical input variable. if pivot is set true, -c then column pivoting is enforced. if pivot is set false, -c then no column pivoting is done. -c -c ipvt is an integer output array of length lipvt. ipvt -c defines the permutation matrix p such that a*p = q*r. -c column j of p is column ipvt(j) of the identity matrix. -c if pivot is false, ipvt is not referenced. -c -c lipvt is a positive integer input variable. if pivot is false, -c then lipvt may be as small as 1. if pivot is true, then -c lipvt must be at least n. -c -c rdiag is an output array of length n which contains the -c diagonal elements of r. -c -c acnorm is an output array of length n which contains the -c norms of the corresponding columns of the input matrix a. -c if this information is not needed, then acnorm can coincide -c with rdiag. -c -c wa is a work array of length n. if pivot is false, then wa -c can coincide with rdiag. -c -c subprograms called -c -c minpack-supplied ... dpmpar,enorm -c -c fortran-supplied ... dmax1,dsqrt,min0 -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,j,jp1,k,kmax,minmn - double precision ajnorm,epsmch,one,p05,sum,temp,zero - double precision dpmpar,enorm - data one,p05,zero /1.0d0,5.0d-2,0.0d0/ -c -c epsmch is the machine precision. -c - epsmch = dpmpar(1) -c -c compute the initial column norms and initialize several arrays. -c - do 10 j = 1, n - acnorm(j) = enorm(m,a(1,j)) - rdiag(j) = acnorm(j) - wa(j) = rdiag(j) - if (pivot) ipvt(j) = j - 10 continue -c -c reduce a to r with householder transformations. -c - minmn = min0(m,n) - do 110 j = 1, minmn - if (.not.pivot) go to 40 -c -c bring the column of largest norm into the pivot position. -c - kmax = j - do 20 k = j, n - if (rdiag(k) .gt. rdiag(kmax)) kmax = k - 20 continue - if (kmax .eq. j) go to 40 - do 30 i = 1, m - temp = a(i,j) - a(i,j) = a(i,kmax) - a(i,kmax) = temp - 30 continue - rdiag(kmax) = rdiag(j) - wa(kmax) = wa(j) - k = ipvt(j) - ipvt(j) = ipvt(kmax) - ipvt(kmax) = k - 40 continue -c -c compute the householder transformation to reduce the -c j-th column of a to a multiple of the j-th unit vector. -c - ajnorm = enorm(m-j+1,a(j,j)) - if (ajnorm .eq. zero) go to 100 - if (a(j,j) .lt. zero) ajnorm = -ajnorm - do 50 i = j, m - a(i,j) = a(i,j)/ajnorm - 50 continue - a(j,j) = a(j,j) + one -c -c apply the transformation to the remaining columns -c and update the norms. -c - jp1 = j + 1 - if (n .lt. jp1) go to 100 - do 90 k = jp1, n - sum = zero - do 60 i = j, m - sum = sum + a(i,j)*a(i,k) - 60 continue - temp = sum/a(j,j) - do 70 i = j, m - a(i,k) = a(i,k) - temp*a(i,j) - 70 continue - if (.not.pivot .or. rdiag(k) .eq. zero) go to 80 - temp = a(j,k)/rdiag(k) - rdiag(k) = rdiag(k)*dsqrt(dmax1(zero,one-temp**2)) - if (p05*(rdiag(k)/wa(k))**2 .gt. epsmch) go to 80 - rdiag(k) = enorm(m-j,a(jp1,k)) - wa(k) = rdiag(k) - 80 continue - 90 continue - 100 continue - rdiag(j) = -ajnorm - 110 continue - return -c -c last card of subroutine qrfac. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/qrsolv.f b/scipy-0.10.1/scipy/optimize/minpack/qrsolv.f deleted file mode 100644 index f48954b359..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/qrsolv.f +++ /dev/null @@ -1,193 +0,0 @@ - subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa) - integer n,ldr - integer ipvt(n) - double precision r(ldr,n),diag(n),qtb(n),x(n),sdiag(n),wa(n) -c ********** -c -c subroutine qrsolv -c -c given an m by n matrix a, an n by n diagonal matrix d, -c and an m-vector b, the problem is to determine an x which -c solves the system -c -c a*x = b , d*x = 0 , -c -c in the least squares sense. -c -c this subroutine completes the solution of the problem -c if it is provided with the necessary information from the -c qr factorization, with column pivoting, of a. that is, if -c a*p = q*r, where p is a permutation matrix, q has orthogonal -c columns, and r is an upper triangular matrix with diagonal -c elements of nonincreasing magnitude, then qrsolv expects -c the full upper triangle of r, the permutation matrix p, -c and the first n components of (q transpose)*b. the system -c a*x = b, d*x = 0, is then equivalent to -c -c t t -c r*z = q *b , p *d*p*z = 0 , -c -c where x = p*z. if this system does not have full rank, -c then a least squares solution is obtained. on output qrsolv -c also provides an upper triangular matrix s such that -c -c t t t -c p *(a *a + d*d)*p = s *s . -c -c s is computed within qrsolv and may be of separate interest. -c -c the subroutine statement is -c -c subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa) -c -c where -c -c n is a positive integer input variable set to the order of r. -c -c r is an n by n array. on input the full upper triangle -c must contain the full upper triangle of the matrix r. -c on output the full upper triangle is unaltered, and the -c strict lower triangle contains the strict upper triangle -c (transposed) of the upper triangular matrix s. -c -c ldr is a positive integer input variable not less than n -c which specifies the leading dimension of the array r. -c -c ipvt is an integer input array of length n which defines the -c permutation matrix p such that a*p = q*r. column j of p -c is column ipvt(j) of the identity matrix. -c -c diag is an input array of length n which must contain the -c diagonal elements of the matrix d. -c -c qtb is an input array of length n which must contain the first -c n elements of the vector (q transpose)*b. -c -c x is an output array of length n which contains the least -c squares solution of the system a*x = b, d*x = 0. -c -c sdiag is an output array of length n which contains the -c diagonal elements of the upper triangular matrix s. -c -c wa is a work array of length n. -c -c subprograms called -c -c fortran-supplied ... dabs,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,j,jp1,k,kp1,l,nsing - double precision cos,cotan,p5,p25,qtbpj,sin,sum,tan,temp,zero - data p5,p25,zero /5.0d-1,2.5d-1,0.0d0/ -c -c copy r and (q transpose)*b to preserve input and initialize s. -c in particular, save the diagonal elements of r in x. -c - do 20 j = 1, n - do 10 i = j, n - r(i,j) = r(j,i) - 10 continue - x(j) = r(j,j) - wa(j) = qtb(j) - 20 continue -c -c eliminate the diagonal matrix d using a givens rotation. -c - do 100 j = 1, n -c -c prepare the row of d to be eliminated, locating the -c diagonal element using p from the qr factorization. -c - l = ipvt(j) - if (diag(l) .eq. zero) go to 90 - do 30 k = j, n - sdiag(k) = zero - 30 continue - sdiag(j) = diag(l) -c -c the transformations to eliminate the row of d -c modify only a single element of (q transpose)*b -c beyond the first n, which is initially zero. -c - qtbpj = zero - do 80 k = j, n -c -c determine a givens rotation which eliminates the -c appropriate element in the current row of d. -c - if (sdiag(k) .eq. zero) go to 70 - if (dabs(r(k,k)) .ge. dabs(sdiag(k))) go to 40 - cotan = r(k,k)/sdiag(k) - sin = p5/dsqrt(p25+p25*cotan**2) - cos = sin*cotan - go to 50 - 40 continue - tan = sdiag(k)/r(k,k) - cos = p5/dsqrt(p25+p25*tan**2) - sin = cos*tan - 50 continue -c -c compute the modified diagonal element of r and -c the modified element of ((q transpose)*b,0). -c - r(k,k) = cos*r(k,k) + sin*sdiag(k) - temp = cos*wa(k) + sin*qtbpj - qtbpj = -sin*wa(k) + cos*qtbpj - wa(k) = temp -c -c accumulate the tranformation in the row of s. -c - kp1 = k + 1 - if (n .lt. kp1) go to 70 - do 60 i = kp1, n - temp = cos*r(i,k) + sin*sdiag(i) - sdiag(i) = -sin*r(i,k) + cos*sdiag(i) - r(i,k) = temp - 60 continue - 70 continue - 80 continue - 90 continue -c -c store the diagonal element of s and restore -c the corresponding diagonal element of r. -c - sdiag(j) = r(j,j) - r(j,j) = x(j) - 100 continue -c -c solve the triangular system for z. if the system is -c singular, then obtain a least squares solution. -c - nsing = n - do 110 j = 1, n - if (sdiag(j) .eq. zero .and. nsing .eq. n) nsing = j - 1 - if (nsing .lt. n) wa(j) = zero - 110 continue - if (nsing .lt. 1) go to 150 - do 140 k = 1, nsing - j = nsing - k + 1 - sum = zero - jp1 = j + 1 - if (nsing .lt. jp1) go to 130 - do 120 i = jp1, nsing - sum = sum + r(i,j)*wa(i) - 120 continue - 130 continue - wa(j) = (wa(j) - sum)/sdiag(j) - 140 continue - 150 continue -c -c permute the components of z back to components of x. -c - do 160 j = 1, n - l = ipvt(j) - x(l) = wa(j) - 160 continue - return -c -c last card of subroutine qrsolv. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/r1mpyq.f b/scipy-0.10.1/scipy/optimize/minpack/r1mpyq.f deleted file mode 100644 index ec99b96ce9..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/r1mpyq.f +++ /dev/null @@ -1,92 +0,0 @@ - subroutine r1mpyq(m,n,a,lda,v,w) - integer m,n,lda - double precision a(lda,n),v(n),w(n) -c ********** -c -c subroutine r1mpyq -c -c given an m by n matrix a, this subroutine computes a*q where -c q is the product of 2*(n - 1) transformations -c -c gv(n-1)*...*gv(1)*gw(1)*...*gw(n-1) -c -c and gv(i), gw(i) are givens rotations in the (i,n) plane which -c eliminate elements in the i-th and n-th planes, respectively. -c q itself is not given, rather the information to recover the -c gv, gw rotations is supplied. -c -c the subroutine statement is -c -c subroutine r1mpyq(m,n,a,lda,v,w) -c -c where -c -c m is a positive integer input variable set to the number -c of rows of a. -c -c n is a positive integer input variable set to the number -c of columns of a. -c -c a is an m by n array. on input a must contain the matrix -c to be postmultiplied by the orthogonal matrix q -c described above. on output a*q has replaced a. -c -c lda is a positive integer input variable not less than m -c which specifies the leading dimension of the array a. -c -c v is an input array of length n. v(i) must contain the -c information necessary to recover the givens rotation gv(i) -c described above. -c -c w is an input array of length n. w(i) must contain the -c information necessary to recover the givens rotation gw(i) -c described above. -c -c subroutines called -c -c fortran-supplied ... dabs,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more -c -c ********** - integer i,j,nmj,nm1 - double precision cos,one,sin,temp - data one /1.0d0/ -c -c apply the first set of givens rotations to a. -c - nm1 = n - 1 - if (nm1 .lt. 1) go to 50 - do 20 nmj = 1, nm1 - j = n - nmj - if (dabs(v(j)) .gt. one) cos = one/v(j) - if (dabs(v(j)) .gt. one) sin = dsqrt(one-cos**2) - if (dabs(v(j)) .le. one) sin = v(j) - if (dabs(v(j)) .le. one) cos = dsqrt(one-sin**2) - do 10 i = 1, m - temp = cos*a(i,j) - sin*a(i,n) - a(i,n) = sin*a(i,j) + cos*a(i,n) - a(i,j) = temp - 10 continue - 20 continue -c -c apply the second set of givens rotations to a. -c - do 40 j = 1, nm1 - if (dabs(w(j)) .gt. one) cos = one/w(j) - if (dabs(w(j)) .gt. one) sin = dsqrt(one-cos**2) - if (dabs(w(j)) .le. one) sin = w(j) - if (dabs(w(j)) .le. one) cos = dsqrt(one-sin**2) - do 30 i = 1, m - temp = cos*a(i,j) + sin*a(i,n) - a(i,n) = -sin*a(i,j) + cos*a(i,n) - a(i,j) = temp - 30 continue - 40 continue - 50 continue - return -c -c last card of subroutine r1mpyq. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/r1updt.f b/scipy-0.10.1/scipy/optimize/minpack/r1updt.f deleted file mode 100644 index e034973d99..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/r1updt.f +++ /dev/null @@ -1,207 +0,0 @@ - subroutine r1updt(m,n,s,ls,u,v,w,sing) - integer m,n,ls - logical sing - double precision s(ls),u(m),v(n),w(m) -c ********** -c -c subroutine r1updt -c -c given an m by n lower trapezoidal matrix s, an m-vector u, -c and an n-vector v, the problem is to determine an -c orthogonal matrix q such that -c -c t -c (s + u*v )*q -c -c is again lower trapezoidal. -c -c this subroutine determines q as the product of 2*(n - 1) -c transformations -c -c gv(n-1)*...*gv(1)*gw(1)*...*gw(n-1) -c -c where gv(i), gw(i) are givens rotations in the (i,n) plane -c which eliminate elements in the i-th and n-th planes, -c respectively. q itself is not accumulated, rather the -c information to recover the gv, gw rotations is returned. -c -c the subroutine statement is -c -c subroutine r1updt(m,n,s,ls,u,v,w,sing) -c -c where -c -c m is a positive integer input variable set to the number -c of rows of s. -c -c n is a positive integer input variable set to the number -c of columns of s. n must not exceed m. -c -c s is an array of length ls. on input s must contain the lower -c trapezoidal matrix s stored by columns. on output s contains -c the lower trapezoidal matrix produced as described above. -c -c ls is a positive integer input variable not less than -c (n*(2*m-n+1))/2. -c -c u is an input array of length m which must contain the -c vector u. -c -c v is an array of length n. on input v must contain the vector -c v. on output v(i) contains the information necessary to -c recover the givens rotation gv(i) described above. -c -c w is an output array of length m. w(i) contains information -c necessary to recover the givens rotation gw(i) described -c above. -c -c sing is a logical output variable. sing is set true if any -c of the diagonal elements of the output s are zero. otherwise -c sing is set false. -c -c subprograms called -c -c minpack-supplied ... dpmpar -c -c fortran-supplied ... dabs,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, kenneth e. hillstrom, jorge j. more, -c john l. nazareth -c -c ********** - integer i,j,jj,l,nmj,nm1 - double precision cos,cotan,giant,one,p5,p25,sin,tan,tau,temp, - * zero - double precision dpmpar - data one,p5,p25,zero /1.0d0,5.0d-1,2.5d-1,0.0d0/ -c -c giant is the largest magnitude. -c - giant = dpmpar(3) -c -c initialize the diagonal element pointer. -c - jj = (n*(2*m - n + 1))/2 - (m - n) -c -c move the nontrivial part of the last column of s into w. -c - l = jj - do 10 i = n, m - w(i) = s(l) - l = l + 1 - 10 continue -c -c rotate the vector v into a multiple of the n-th unit vector -c in such a way that a spike is introduced into w. -c - nm1 = n - 1 - if (nm1 .lt. 1) go to 70 - do 60 nmj = 1, nm1 - j = n - nmj - jj = jj - (m - j + 1) - w(j) = zero - if (v(j) .eq. zero) go to 50 -c -c determine a givens rotation which eliminates the -c j-th element of v. -c - if (dabs(v(n)) .ge. dabs(v(j))) go to 20 - cotan = v(n)/v(j) - sin = p5/dsqrt(p25+p25*cotan**2) - cos = sin*cotan - tau = one - if (dabs(cos)*giant .gt. one) tau = one/cos - go to 30 - 20 continue - tan = v(j)/v(n) - cos = p5/dsqrt(p25+p25*tan**2) - sin = cos*tan - tau = sin - 30 continue -c -c apply the transformation to v and store the information -c necessary to recover the givens rotation. -c - v(n) = sin*v(j) + cos*v(n) - v(j) = tau -c -c apply the transformation to s and extend the spike in w. -c - l = jj - do 40 i = j, m - temp = cos*s(l) - sin*w(i) - w(i) = sin*s(l) + cos*w(i) - s(l) = temp - l = l + 1 - 40 continue - 50 continue - 60 continue - 70 continue -c -c add the spike from the rank 1 update to w. -c - do 80 i = 1, m - w(i) = w(i) + v(n)*u(i) - 80 continue -c -c eliminate the spike. -c - sing = .false. - if (nm1 .lt. 1) go to 140 - do 130 j = 1, nm1 - if (w(j) .eq. zero) go to 120 -c -c determine a givens rotation which eliminates the -c j-th element of the spike. -c - if (dabs(s(jj)) .ge. dabs(w(j))) go to 90 - cotan = s(jj)/w(j) - sin = p5/dsqrt(p25+p25*cotan**2) - cos = sin*cotan - tau = one - if (dabs(cos)*giant .gt. one) tau = one/cos - go to 100 - 90 continue - tan = w(j)/s(jj) - cos = p5/dsqrt(p25+p25*tan**2) - sin = cos*tan - tau = sin - 100 continue -c -c apply the transformation to s and reduce the spike in w. -c - l = jj - do 110 i = j, m - temp = cos*s(l) + sin*w(i) - w(i) = -sin*s(l) + cos*w(i) - s(l) = temp - l = l + 1 - 110 continue -c -c store the information necessary to recover the -c givens rotation. -c - w(j) = tau - 120 continue -c -c test for zero diagonal elements in the output s. -c - if (s(jj) .eq. zero) sing = .true. - jj = jj + (m - j + 1) - 130 continue - 140 continue -c -c move w back into the last column of the output s. -c - l = jj - do 150 i = n, m - s(l) = w(i) - l = l + 1 - 150 continue - if (s(jj) .eq. zero) sing = .true. - return -c -c last card of subroutine r1updt. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack/rwupdt.f b/scipy-0.10.1/scipy/optimize/minpack/rwupdt.f deleted file mode 100644 index 05282b5569..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack/rwupdt.f +++ /dev/null @@ -1,113 +0,0 @@ - subroutine rwupdt(n,r,ldr,w,b,alpha,cos,sin) - integer n,ldr - double precision alpha - double precision r(ldr,n),w(n),b(n),cos(n),sin(n) -c ********** -c -c subroutine rwupdt -c -c given an n by n upper triangular matrix r, this subroutine -c computes the qr decomposition of the matrix formed when a row -c is added to r. if the row is specified by the vector w, then -c rwupdt determines an orthogonal matrix q such that when the -c n+1 by n matrix composed of r augmented by w is premultiplied -c by (q transpose), the resulting matrix is upper trapezoidal. -c the matrix (q transpose) is the product of n transformations -c -c g(n)*g(n-1)* ... *g(1) -c -c where g(i) is a givens rotation in the (i,n+1) plane which -c eliminates elements in the (n+1)-st plane. rwupdt also -c computes the product (q transpose)*c where c is the -c (n+1)-vector (b,alpha). q itself is not accumulated, rather -c the information to recover the g rotations is supplied. -c -c the subroutine statement is -c -c subroutine rwupdt(n,r,ldr,w,b,alpha,cos,sin) -c -c where -c -c n is a positive integer input variable set to the order of r. -c -c r is an n by n array. on input the upper triangular part of -c r must contain the matrix to be updated. on output r -c contains the updated triangular matrix. -c -c ldr is a positive integer input variable not less than n -c which specifies the leading dimension of the array r. -c -c w is an input array of length n which must contain the row -c vector to be added to r. -c -c b is an array of length n. on input b must contain the -c first n elements of the vector c. on output b contains -c the first n elements of the vector (q transpose)*c. -c -c alpha is a variable. on input alpha must contain the -c (n+1)-st element of the vector c. on output alpha contains -c the (n+1)-st element of the vector (q transpose)*c. -c -c cos is an output array of length n which contains the -c cosines of the transforming givens rotations. -c -c sin is an output array of length n which contains the -c sines of the transforming givens rotations. -c -c subprograms called -c -c fortran-supplied ... dabs,dsqrt -c -c argonne national laboratory. minpack project. march 1980. -c burton s. garbow, dudley v. goetschel, kenneth e. hillstrom, -c jorge j. more -c -c ********** - integer i,j,jm1 - double precision cotan,one,p5,p25,rowj,tan,temp,zero - data one,p5,p25,zero /1.0d0,5.0d-1,2.5d-1,0.0d0/ -c - do 60 j = 1, n - rowj = w(j) - jm1 = j - 1 -c -c apply the previous transformations to -c r(i,j), i=1,2,...,j-1, and to w(j). -c - if (jm1 .lt. 1) go to 20 - do 10 i = 1, jm1 - temp = cos(i)*r(i,j) + sin(i)*rowj - rowj = -sin(i)*r(i,j) + cos(i)*rowj - r(i,j) = temp - 10 continue - 20 continue -c -c determine a givens rotation which eliminates w(j). -c - cos(j) = one - sin(j) = zero - if (rowj .eq. zero) go to 50 - if (dabs(r(j,j)) .ge. dabs(rowj)) go to 30 - cotan = r(j,j)/rowj - sin(j) = p5/dsqrt(p25+p25*cotan**2) - cos(j) = sin(j)*cotan - go to 40 - 30 continue - tan = rowj/r(j,j) - cos(j) = p5/dsqrt(p25+p25*tan**2) - sin(j) = cos(j)*tan - 40 continue -c -c apply the current transformation to r(j,j), b(j), and alpha. -c - r(j,j) = cos(j)*r(j,j) + sin(j)*rowj - temp = cos(j)*b(j) + sin(j)*alpha - alpha = -sin(j)*b(j) + cos(j)*alpha - b(j) = temp - 50 continue - 60 continue - return -c -c last card of subroutine rwupdt. -c - end diff --git a/scipy-0.10.1/scipy/optimize/minpack2/dcsrch.f b/scipy-0.10.1/scipy/optimize/minpack2/dcsrch.f deleted file mode 100644 index 589d564e9c..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack2/dcsrch.f +++ /dev/null @@ -1,349 +0,0 @@ - subroutine dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax, - + isave,dsave) - character*(*) task - integer isave(2) - double precision f, g, stp, ftol, gtol, xtol, stpmin, stpmax - double precision dsave(13) -c ********** -c -c Subroutine dcsrch -c -c This subroutine finds a step that satisfies a sufficient -c decrease condition and a curvature condition. -c -c Each call of the subroutine updates an interval with -c endpoints stx and sty. The interval is initially chosen -c so that it contains a minimizer of the modified function -c -c psi(stp) = f(stp) - f(0) - ftol*stp*f'(0). -c -c If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the -c interval is chosen so that it contains a minimizer of f. -c -c The algorithm is designed to find a step that satisfies -c the sufficient decrease condition -c -c f(stp) <= f(0) + ftol*stp*f'(0), -c -c and the curvature condition -c -c abs(f'(stp)) <= gtol*abs(f'(0)). -c -c If ftol is less than gtol and if, for example, the function -c is bounded below, then there is always a step which satisfies -c both conditions. -c -c If no step can be found that satisfies both conditions, then -c the algorithm stops with a warning. In this case stp only -c satisfies the sufficient decrease condition. -c -c A typical invocation of dcsrch has the following outline: -c -c Evaluate the function at stp = 0.0d0; store in f. -c Evaluate the gradient at stp = 0.0d0; store in g. -c Choose a starting step stp. -c -c task = 'START' -c 10 continue -c call dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax, -c + isave,dsave) -c if (task .eq. 'FG') then -c Evaluate the function and the gradient at stp -c go to 10 -c end if -c -c NOTE: The user must not alter work arrays between calls. -c -c The subroutine statement is -c -c subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax, -c task,isave,dsave) -c where -c -c stp is a double precision variable. -c On entry stp is the current estimate of a satisfactory -c step. On initial entry, a positive initial estimate -c must be provided. -c On exit stp is the current estimate of a satisfactory step -c if task = 'FG'. If task = 'CONV' then stp satisfies -c the sufficient decrease and curvature condition. -c -c f is a double precision variable. -c On initial entry f is the value of the function at 0. -c On subsequent entries f is the value of the -c function at stp. -c On exit f is the value of the function at stp. -c -c g is a double precision variable. -c On initial entry g is the derivative of the function at 0. -c On subsequent entries g is the derivative of the -c function at stp. -c On exit g is the derivative of the function at stp. -c -c ftol is a double precision variable. -c On entry ftol specifies a nonnegative tolerance for the -c sufficient decrease condition. -c On exit ftol is unchanged. -c -c gtol is a double precision variable. -c On entry gtol specifies a nonnegative tolerance for the -c curvature condition. -c On exit gtol is unchanged. -c -c xtol is a double precision variable. -c On entry xtol specifies a nonnegative relative tolerance -c for an acceptable step. The subroutine exits with a -c warning if the relative difference between sty and stx -c is less than xtol. -c On exit xtol is unchanged. -c -c task is a character variable of length at least 60. -c On initial entry task must be set to 'START'. -c On exit task indicates the required action: -c -c If task(1:2) = 'FG' then evaluate the function and -c derivative at stp and call dcsrch again. -c -c If task(1:4) = 'CONV' then the search is successful. -c -c If task(1:4) = 'WARN' then the subroutine is not able -c to satisfy the convergence conditions. The exit value of -c stp contains the best point found during the search. -c -c If task(1:5) = 'ERROR' then there is an error in the -c input arguments. -c -c On exit with convergence, a warning or an error, the -c variable task contains additional information. -c -c stpmin is a double precision variable. -c On entry stpmin is a nonnegative lower bound for the step. -c On exit stpmin is unchanged. -c -c stpmax is a double precision variable. -c On entry stpmax is a nonnegative upper bound for the step. -c On exit stpmax is unchanged. -c -c isave is an integer work array of dimension 2. -c -c dsave is a double precision work array of dimension 13. -c -c Subprograms called -c -c MINPACK-2 ... dcstep -c -c MINPACK-1 Project. June 1983. -c Argonne National Laboratory. -c Jorge J. More' and David J. Thuente. -c -c MINPACK-2 Project. November 1993. -c Argonne National Laboratory and University of Minnesota. -c Brett M. Averick, Richard G. Carter, and Jorge J. More'. -c -c ********** - double precision zero, p5, p66 - parameter (zero=0.0d0,p5=0.5d0,p66=0.66d0) - double precision xtrapl, xtrapu - parameter (xtrapl=1.1d0,xtrapu=4.0d0) - - logical brackt - integer stage - double precision finit, ftest, fm, fx, fxm, fy, fym, ginit, gtest, - + gm, gx, gxm, gy, gym, stx, sty, stmin, stmax, - + width, width1 - - external dcstep - -c Initialization block. - - if (task(1:5) .eq. 'START') then - -c Check the input arguments for errors. - - if (stp .lt. stpmin) task = 'ERROR: STP .LT. STPMIN' - if (stp .gt. stpmax) task = 'ERROR: STP .GT. STPMAX' - if (g .ge. zero) task = 'ERROR: INITIAL G .GE. ZERO' - if (ftol .lt. zero) task = 'ERROR: FTOL .LT. ZERO' - if (gtol .lt. zero) task = 'ERROR: GTOL .LT. ZERO' - if (xtol .lt. zero) task = 'ERROR: XTOL .LT. ZERO' - if (stpmin .lt. zero) task = 'ERROR: STPMIN .LT. ZERO' - if (stpmax .lt. stpmin) task = 'ERROR: STPMAX .LT. STPMIN' - -c Exit if there are errors on input. - - if (task(1:5) .eq. 'ERROR') return - -c Initialize local variables. - - brackt = .false. - stage = 1 - finit = f - ginit = g - gtest = ftol*ginit - width = stpmax - stpmin - width1 = width/p5 - -c The variables stx, fx, gx contain the values of the step, -c function, and derivative at the best step. -c The variables sty, fy, gy contain the value of the step, -c function, and derivative at sty. -c The variables stp, f, g contain the values of the step, -c function, and derivative at stp. - - stx = zero - fx = finit - gx = ginit - sty = zero - fy = finit - gy = ginit - stmin = zero - stmax = stp + xtrapu*stp - task = 'FG' - - go to 10 - - else - -c Restore local variables. - - if (isave(1) .eq. 1) then - brackt = .true. - else - brackt = .false. - end if - stage = isave(2) - ginit = dsave(1) - gtest = dsave(2) - gx = dsave(3) - gy = dsave(4) - finit = dsave(5) - fx = dsave(6) - fy = dsave(7) - stx = dsave(8) - sty = dsave(9) - stmin = dsave(10) - stmax = dsave(11) - width = dsave(12) - width1 = dsave(13) - - end if - -c If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the -c algorithm enters the second stage. - - ftest = finit + stp*gtest - if (stage .eq. 1 .and. f .le. ftest .and. g .ge. zero) stage = 2 - -c Test for warnings. - - if (brackt .and. (stp .le. stmin .or. stp .ge. stmax)) - + task = 'WARNING: ROUNDING ERRORS PREVENT PROGRESS' - if (brackt .and. stmax-stmin .le. xtol*stmax) - + task = 'WARNING: XTOL TEST SATISFIED' - if (stp .eq. stpmax .and. f .le. ftest .and. g .le. gtest) - + task = 'WARNING: STP = STPMAX' - if (stp .eq. stpmin .and. (f .gt. ftest .or. g .ge. gtest)) - + task = 'WARNING: STP = STPMIN' - -c Test for convergence. - - if (f .le. ftest .and. abs(g) .le. gtol*(-ginit)) - + task = 'CONVERGENCE' - -c Test for termination. - - if (task(1:4) .eq. 'WARN' .or. task(1:4) .eq. 'CONV') go to 10 - -c A modified function is used to predict the step during the -c first stage if a lower function value has been obtained but -c the decrease is not sufficient. - - if (stage .eq. 1 .and. f .le. fx .and. f .gt. ftest) then - -c Define the modified function and derivative values. - - fm = f - stp*gtest - fxm = fx - stx*gtest - fym = fy - sty*gtest - gm = g - gtest - gxm = gx - gtest - gym = gy - gtest - -c Call dcstep to update stx, sty, and to compute the new step. - - call dcstep(stx,fxm,gxm,sty,fym,gym,stp,fm,gm,brackt,stmin, - + stmax) - -c Reset the function and derivative values for f. - - fx = fxm + stx*gtest - fy = fym + sty*gtest - gx = gxm + gtest - gy = gym + gtest - - else - -c Call dcstep to update stx, sty, and to compute the new step. - - call dcstep(stx,fx,gx,sty,fy,gy,stp,f,g,brackt,stmin,stmax) - - end if - -c Decide if a bisection step is needed. - - if (brackt) then - if (abs(sty-stx) .ge. p66*width1) stp = stx + p5*(sty-stx) - width1 = width - width = abs(sty-stx) - end if - -c Set the minimum and maximum steps allowed for stp. - - if (brackt) then - stmin = min(stx,sty) - stmax = max(stx,sty) - else - stmin = stp + xtrapl*(stp-stx) - stmax = stp + xtrapu*(stp-stx) - end if - -c Force the step to be within the bounds stpmax and stpmin. - - stp = max(stp,stpmin) - stp = min(stp,stpmax) - -c If further progress is not possible, let stp be the best -c point obtained during the search. - - if (brackt .and. (stp .le. stmin .or. stp .ge. stmax) .or. - + (brackt .and. stmax-stmin .le. xtol*stmax)) stp = stx - -c Obtain another function and derivative. - - task = 'FG' - - 10 continue - -c Save local variables. - - if (brackt) then - isave(1) = 1 - else - isave(1) = 0 - end if - isave(2) = stage - dsave(1) = ginit - dsave(2) = gtest - dsave(3) = gx - dsave(4) = gy - dsave(5) = finit - dsave(6) = fx - dsave(7) = fy - dsave(8) = stx - dsave(9) = sty - dsave(10) = stmin - dsave(11) = stmax - dsave(12) = width - dsave(13) = width1 - - end diff --git a/scipy-0.10.1/scipy/optimize/minpack2/dcstep.f b/scipy-0.10.1/scipy/optimize/minpack2/dcstep.f deleted file mode 100644 index a6c0d9e233..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack2/dcstep.f +++ /dev/null @@ -1,254 +0,0 @@ - subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt,stpmin, - + stpmax) - logical brackt - double precision stx, fx, dx, sty, fy, dy, stp, fp, dp, stpmin, - + stpmax -c ********** -c -c Subroutine dcstep -c -c This subroutine computes a safeguarded step for a search -c procedure and updates an interval that contains a step that -c satisfies a sufficient decrease and a curvature condition. -c -c The parameter stx contains the step with the least function -c value. If brackt is set to .true. then a minimizer has -c been bracketed in an interval with endpoints stx and sty. -c The parameter stp contains the current step. -c The subroutine assumes that if brackt is set to .true. then -c -c min(stx,sty) < stp < max(stx,sty), -c -c and that the derivative at stx is negative in the direction -c of the step. -c -c The subroutine statement is -c -c subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt, -c stpmin,stpmax) -c -c where -c -c stx is a double precision variable. -c On entry stx is the best step obtained so far and is an -c endpoint of the interval that contains the minimizer. -c On exit stx is the updated best step. -c -c fx is a double precision variable. -c On entry fx is the function at stx. -c On exit fx is the function at stx. -c -c dx is a double precision variable. -c On entry dx is the derivative of the function at -c stx. The derivative must be negative in the direction of -c the step, that is, dx and stp - stx must have opposite -c signs. -c On exit dx is the derivative of the function at stx. -c -c sty is a double precision variable. -c On entry sty is the second endpoint of the interval that -c contains the minimizer. -c On exit sty is the updated endpoint of the interval that -c contains the minimizer. -c -c fy is a double precision variable. -c On entry fy is the function at sty. -c On exit fy is the function at sty. -c -c dy is a double precision variable. -c On entry dy is the derivative of the function at sty. -c On exit dy is the derivative of the function at the exit sty. -c -c stp is a double precision variable. -c On entry stp is the current step. If brackt is set to .true. -c then on input stp must be between stx and sty. -c On exit stp is a new trial step. -c -c fp is a double precision variable. -c On entry fp is the function at stp -c On exit fp is unchanged. -c -c dp is a double precision variable. -c On entry dp is the the derivative of the function at stp. -c On exit dp is unchanged. -c -c brackt is an logical variable. -c On entry brackt specifies if a minimizer has been bracketed. -c Initially brackt must be set to .false. -c On exit brackt specifies if a minimizer has been bracketed. -c When a minimizer is bracketed brackt is set to .true. -c -c stpmin is a double precision variable. -c On entry stpmin is a lower bound for the step. -c On exit stpmin is unchanged. -c -c stpmax is a double precision variable. -c On entry stpmax is an upper bound for the step. -c On exit stpmax is unchanged. -c -c MINPACK-1 Project. June 1983 -c Argonne National Laboratory. -c Jorge J. More' and David J. Thuente. -c -c MINPACK-2 Project. November 1993. -c Argonne National Laboratory and University of Minnesota. -c Brett M. Averick and Jorge J. More'. -c -c ********** - double precision zero, p66, two, three - parameter (zero=0.0d0,p66=0.66d0,two=2.0d0,three=3.0d0) - - double precision gamma, p, q, r, s, sgnd, stpc, stpf, stpq, theta - - sgnd = dp*(dx/abs(dx)) - -c First case: A higher function value. The minimum is bracketed. -c If the cubic step is closer to stx than the quadratic step, the -c cubic step is taken, otherwise the average of the cubic and -c quadratic steps is taken. - - if (fp .gt. fx) then - theta = three*(fx-fp)/(stp-stx) + dx + dp - s = max(abs(theta),abs(dx),abs(dp)) - gamma = s*sqrt((theta/s)**2-(dx/s)*(dp/s)) - if (stp .lt. stx) gamma = -gamma - p = (gamma-dx) + theta - q = ((gamma-dx)+gamma) + dp - r = p/q - stpc = stx + r*(stp-stx) - stpq = stx + ((dx/((fx-fp)/(stp-stx)+dx))/two)*(stp-stx) - if (abs(stpc-stx) .lt. abs(stpq-stx)) then - stpf = stpc - else - stpf = stpc + (stpq-stpc)/two - end if - brackt = .true. - -c Second case: A lower function value and derivatives of opposite -c sign. The minimum is bracketed. If the cubic step is farther from -c stp than the secant step, the cubic step is taken, otherwise the -c secant step is taken. - - else if (sgnd .lt. zero) then - theta = three*(fx-fp)/(stp-stx) + dx + dp - s = max(abs(theta),abs(dx),abs(dp)) - gamma = s*sqrt((theta/s)**2-(dx/s)*(dp/s)) - if (stp .gt. stx) gamma = -gamma - p = (gamma-dp) + theta - q = ((gamma-dp)+gamma) + dx - r = p/q - stpc = stp + r*(stx-stp) - stpq = stp + (dp/(dp-dx))*(stx-stp) - if (abs(stpc-stp) .gt. abs(stpq-stp)) then - stpf = stpc - else - stpf = stpq - end if - brackt = .true. - -c Third case: A lower function value, derivatives of the same sign, -c and the magnitude of the derivative decreases. - - else if (abs(dp) .lt. abs(dx)) then - -c The cubic step is computed only if the cubic tends to infinity -c in the direction of the step or if the minimum of the cubic -c is beyond stp. Otherwise the cubic step is defined to be the -c secant step. - - theta = three*(fx-fp)/(stp-stx) + dx + dp - s = max(abs(theta),abs(dx),abs(dp)) - -c The case gamma = 0 only arises if the cubic does not tend -c to infinity in the direction of the step. - - gamma = s*sqrt(max(zero,(theta/s)**2-(dx/s)*(dp/s))) - if (stp .gt. stx) gamma = -gamma - p = (gamma-dp) + theta - q = (gamma+(dx-dp)) + gamma - r = p/q - if (r .lt. zero .and. gamma .ne. zero) then - stpc = stp + r*(stx-stp) - else if (stp .gt. stx) then - stpc = stpmax - else - stpc = stpmin - end if - stpq = stp + (dp/(dp-dx))*(stx-stp) - - if (brackt) then - -c A minimizer has been bracketed. If the cubic step is -c closer to stp than the secant step, the cubic step is -c taken, otherwise the secant step is taken. - - if (abs(stpc-stp) .lt. abs(stpq-stp)) then - stpf = stpc - else - stpf = stpq - end if - if (stp .gt. stx) then - stpf = min(stp+p66*(sty-stp),stpf) - else - stpf = max(stp+p66*(sty-stp),stpf) - end if - else - -c A minimizer has not been bracketed. If the cubic step is -c farther from stp than the secant step, the cubic step is -c taken, otherwise the secant step is taken. - - if (abs(stpc-stp) .gt. abs(stpq-stp)) then - stpf = stpc - else - stpf = stpq - end if - stpf = min(stpmax,stpf) - stpf = max(stpmin,stpf) - end if - -c Fourth case: A lower function value, derivatives of the same sign, -c and the magnitude of the derivative does not decrease. If the -c minimum is not bracketed, the step is either stpmin or stpmax, -c otherwise the cubic step is taken. - - else - if (brackt) then - theta = three*(fp-fy)/(sty-stp) + dy + dp - s = max(abs(theta),abs(dy),abs(dp)) - gamma = s*sqrt((theta/s)**2-(dy/s)*(dp/s)) - if (stp .gt. sty) gamma = -gamma - p = (gamma-dp) + theta - q = ((gamma-dp)+gamma) + dy - r = p/q - stpc = stp + r*(sty-stp) - stpf = stpc - else if (stp .gt. stx) then - stpf = stpmax - else - stpf = stpmin - end if - end if - -c Update the interval which contains a minimizer. - - if (fp .gt. fx) then - sty = stp - fy = fp - dy = dp - else - if (sgnd .lt. zero) then - sty = stx - fy = fx - dy = dx - end if - stx = stp - fx = fp - dx = dp - end if - -c Compute the new step. - - stp = stpf - - end diff --git a/scipy-0.10.1/scipy/optimize/minpack2/minpack2.pyf b/scipy-0.10.1/scipy/optimize/minpack2/minpack2.pyf deleted file mode 100644 index 68f6600141..0000000000 --- a/scipy-0.10.1/scipy/optimize/minpack2/minpack2.pyf +++ /dev/null @@ -1,35 +0,0 @@ -! -*- f90 -*- -python module minpack2 ! in - interface ! in :minpack2 - subroutine dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax,isave,dsave) ! in :minpack2:dcsrch.f - double precision, intent(in,out) :: stp - double precision, intent(in,out) :: f - double precision, intent(in,out) :: g - double precision, intent(in) :: ftol - double precision, intent(in) :: gtol - double precision, intent(in) :: xtol - character*60, intent(in, out) :: task - double precision, intent(in) :: stpmin - double precision, intent(in) :: stpmax - integer dimension(2), intent(inout) :: isave - double precision dimension(13), intent(inout) :: dsave - end subroutine dcsrch - subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt,stpmin,stpmax) ! in :minpack2:dcstep.f - double precision, intent(in,out) :: stx - double precision, intent(in,out) :: fx - double precision, intent(in,out) :: dx - double precision, intent(in,out) :: sty - double precision, intent(in,out) :: fy - double precision, intent(in,out) :: dy - double precision, intent(in,out) :: stp - double precision :: fp - double precision :: dp - logical, intent(in,out) :: brackt - double precision :: stpmin - double precision :: stpmax - end subroutine dcstep - end interface -end python module minpack2 - -! This file was auto-generated with f2py (version:2.39.235_1703). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/optimize/nnls.py b/scipy-0.10.1/scipy/optimize/nnls.py deleted file mode 100644 index 2c4aa2ee1f..0000000000 --- a/scipy-0.10.1/scipy/optimize/nnls.py +++ /dev/null @@ -1,58 +0,0 @@ -import _nnls -from numpy import asarray_chkfinite, zeros, double - -__all__ = ['nnls'] - - -def nnls(A,b): - """ - Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper - for a FORTAN non-negative least squares solver. - - Parameters - ---------- - A : ndarray - Matrix ``A`` as shown above. - b : ndarray - Right-hand side vector. - - Returns - ------- - x : ndarray - Solution vector. - rnorm : float - The residual, ``|| Ax-b ||_2``. - - Notes - ----- - The FORTRAN code was published in the book below. The algorithm - is an active set method. It solves the KKT (Karush-Kuhn-Tucker) - conditions for the non-negative least squares problem. - - References - ---------- - Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM - - """ - - A,b = map(asarray_chkfinite, (A,b)) - - if len(A.shape)!=2: - raise ValueError("expected matrix") - if len(b.shape)!=1: - raise ValueError("expected vector") - - m,n = A.shape - - if m != b.shape[0]: - raise ValueError("incompatible dimensions") - - w = zeros((n,), dtype=double) - zz = zeros((m,), dtype=double) - index=zeros((n,), dtype=int) - - x,rnorm,mode = _nnls.nnls(A,m,n,b,w,zz,index) - if mode != 1: - raise RuntimeError("too many iterations") - - return x, rnorm diff --git a/scipy-0.10.1/scipy/optimize/nnls/nnls.f b/scipy-0.10.1/scipy/optimize/nnls/nnls.f deleted file mode 100644 index 9868f62857..0000000000 --- a/scipy-0.10.1/scipy/optimize/nnls/nnls.f +++ /dev/null @@ -1,477 +0,0 @@ -C SUBROUTINE NNLS (A,MDA,M,N,B,X,RNORM,W,ZZ,INDEX,MODE) -C -C Algorithm NNLS: NONNEGATIVE LEAST SQUARES -C -c The original version of this code was developed by -c Charles L. Lawson and Richard J. Hanson at Jet Propulsion Laboratory -c 1973 JUN 15, and published in the book -c "SOLVING LEAST SQUARES PROBLEMS", Prentice-HalL, 1974. -c Revised FEB 1995 to accompany reprinting of the book by SIAM. -c -C GIVEN AN M BY N MATRIX, A, AND AN M-VECTOR, B, COMPUTE AN -C N-VECTOR, X, THAT SOLVES THE LEAST SQUARES PROBLEM -C -C A * X = B SUBJECT TO X .GE. 0 -C ------------------------------------------------------------------ -c Subroutine Arguments -c -C A(),MDA,M,N MDA IS THE FIRST DIMENSIONING PARAMETER FOR THE -C ARRAY, A(). ON ENTRY A() CONTAINS THE M BY N -C MATRIX, A. ON EXIT A() CONTAINS -C THE PRODUCT MATRIX, Q*A , WHERE Q IS AN -C M BY M ORTHOGONAL MATRIX GENERATED IMPLICITLY BY -C THIS SUBROUTINE. -C B() ON ENTRY B() CONTAINS THE M-VECTOR, B. ON EXIT B() CON- -C TAINS Q*B. -C X() ON ENTRY X() NEED NOT BE INITIALIZED. ON EXIT X() WILL -C CONTAIN THE SOLUTION VECTOR. -C RNORM ON EXIT RNORM CONTAINS THE EUCLIDEAN NORM OF THE -C RESIDUAL VECTOR. -C W() AN N-ARRAY OF WORKING SPACE. ON EXIT W() WILL CONTAIN -C THE DUAL SOLUTION VECTOR. W WILL SATISFY W(I) = 0. -C FOR ALL I IN SET P AND W(I) .LE. 0. FOR ALL I IN SET Z -C ZZ() AN M-ARRAY OF WORKING SPACE. -C INDEX() AN INTEGER WORKING ARRAY OF LENGTH AT LEAST N. -C ON EXIT THE CONTENTS OF THIS ARRAY DEFINE THE SETS -C P AND Z AS FOLLOWS.. -C -C INDEX(1) THRU INDEX(NSETP) = SET P. -C INDEX(IZ1) THRU INDEX(IZ2) = SET Z. -C IZ1 = NSETP + 1 = NPP1 -C IZ2 = N -C MODE THIS IS A SUCCESS-FAILURE FLAG WITH THE FOLLOWING -C MEANINGS. -C 1 THE SOLUTION HAS BEEN COMPUTED SUCCESSFULLY. -C 2 THE DIMENSIONS OF THE PROBLEM ARE BAD. -C EITHER M .LE. 0 OR N .LE. 0. -C 3 ITERATION COUNT EXCEEDED. MORE THAN 3*N ITERATIONS. -C -C ------------------------------------------------------------------ - SUBROUTINE NNLS (A,MDA,M,N,B,X,RNORM,W,ZZ,INDEX,MODE) -C ------------------------------------------------------------------ - integer I, II, IP, ITER, ITMAX, IZ, IZ1, IZ2, IZMAX, J, JJ, JZ, L - integer M, MDA, MODE,N, NPP1, NSETP, RTNKEY -c integer INDEX(N) -c double precision A(MDA,N), B(M), W(N), X(N), ZZ(M) - integer INDEX(*) - double precision A(MDA,*), B(*), W(*), X(*), ZZ(*) - double precision ALPHA, ASAVE, CC, DIFF, DUMMY, FACTOR, RNORM - double precision SM, SS, T, TEMP, TWO, UNORM, UP, WMAX - double precision ZERO, ZTEST - parameter(FACTOR = 0.01d0) - parameter(TWO = 2.0d0, ZERO = 0.0d0) -C ------------------------------------------------------------------ - MODE=1 - IF (M .le. 0 .or. N .le. 0) then - MODE=2 - RETURN - endif - ITER=0 - ITMAX=3*N -C -C INITIALIZE THE ARRAYS INDEX() AND X(). -C - DO 20 I=1,N - X(I)=ZERO - 20 INDEX(I)=I -C - IZ2=N - IZ1=1 - NSETP=0 - NPP1=1 -C ****** MAIN LOOP BEGINS HERE ****** - 30 CONTINUE -C QUIT IF ALL COEFFICIENTS ARE ALREADY IN THE SOLUTION. -C OR IF M COLS OF A HAVE BEEN TRIANGULARIZED. -C - IF (IZ1 .GT.IZ2.OR.NSETP.GE.M) GO TO 350 -C -C COMPUTE COMPONENTS OF THE DUAL (NEGATIVE GRADIENT) VECTOR W(). -C - DO 50 IZ=IZ1,IZ2 - J=INDEX(IZ) - SM=ZERO - DO 40 L=NPP1,M - 40 SM=SM+A(L,J)*B(L) - W(J)=SM - 50 continue -C FIND LARGEST POSITIVE W(J). - 60 continue - WMAX=ZERO - DO 70 IZ=IZ1,IZ2 - J=INDEX(IZ) - IF (W(J) .gt. WMAX) then - WMAX=W(J) - IZMAX=IZ - endif - 70 CONTINUE -C -C IF WMAX .LE. 0. GO TO TERMINATION. -C THIS INDICATES SATISFACTION OF THE KUHN-TUCKER CONDITIONS. -C - IF (WMAX .le. ZERO) go to 350 - IZ=IZMAX - J=INDEX(IZ) -C -C THE SIGN OF W(J) IS OK FOR J TO BE MOVED TO SET P. -C BEGIN THE TRANSFORMATION AND CHECK NEW DIAGONAL ELEMENT TO AVOID -C NEAR LINEAR DEPENDENCE. -C - ASAVE=A(NPP1,J) - CALL H12 (1,NPP1,NPP1+1,M,A(1,J),1,UP,DUMMY,1,1,0) - UNORM=ZERO - IF (NSETP .ne. 0) then - DO 90 L=1,NSETP - 90 UNORM=UNORM+A(L,J)**2 - endif - UNORM=sqrt(UNORM) - IF (DIFF(UNORM+ABS(A(NPP1,J))*FACTOR,UNORM) .gt. ZERO) then -C -C COL J IS SUFFICIENTLY INDEPENDENT. COPY B INTO ZZ, UPDATE ZZ -C AND SOLVE FOR ZTEST ( = PROPOSED NEW VALUE FOR X(J) ). -C - DO 120 L=1,M - 120 ZZ(L)=B(L) - CALL H12 (2,NPP1,NPP1+1,M,A(1,J),1,UP,ZZ,1,1,1) - ZTEST=ZZ(NPP1)/A(NPP1,J) -C -C SEE IF ZTEST IS POSITIVE -C - IF (ZTEST .gt. ZERO) go to 140 - endif -C -C REJECT J AS A CANDIDATE TO BE MOVED FROM SET Z TO SET P. -C RESTORE A(NPP1,J), SET W(J)=0., AND LOOP BACK TO TEST DUAL -C COEFFS AGAIN. -C - A(NPP1,J)=ASAVE - W(J)=ZERO - GO TO 60 -C -C THE INDEX J=INDEX(IZ) HAS BEEN SELECTED TO BE MOVED FROM -C SET Z TO SET P. UPDATE B, UPDATE INDICES, APPLY HOUSEHOLDER -C TRANSFORMATIONS TO COLS IN NEW SET Z, ZERO SUBDIAGONAL ELTS IN -C COL J, SET W(J)=0. -C - 140 continue - DO 150 L=1,M - 150 B(L)=ZZ(L) -C - INDEX(IZ)=INDEX(IZ1) - INDEX(IZ1)=J - IZ1=IZ1+1 - NSETP=NPP1 - NPP1=NPP1+1 -C - IF (IZ1 .le. IZ2) then - DO 160 JZ=IZ1,IZ2 - JJ=INDEX(JZ) - CALL H12 (2,NSETP,NPP1,M,A(1,J),1,UP,A(1,JJ),1,MDA,1) - 160 continue - endif -C - IF (NSETP .ne. M) then - DO 180 L=NPP1,M - 180 A(L,J)=ZERO - endif -C - W(J)=ZERO -C SOLVE THE TRIANGULAR SYSTEM. -C STORE THE SOLUTION TEMPORARILY IN ZZ(). - RTNKEY = 1 - GO TO 400 - 200 CONTINUE -C -C ****** SECONDARY LOOP BEGINS HERE ****** -C -C ITERATION COUNTER. -C - 210 continue - ITER=ITER+1 - IF (ITER .gt. ITMAX) then - MODE=3 - write (*,'(/a)') ' NNLS quitting on iteration count.' - GO TO 350 - endif -C -C SEE IF ALL NEW CONSTRAINED COEFFS ARE FEASIBLE. -C IF NOT COMPUTE ALPHA. -C - ALPHA=TWO - DO 240 IP=1,NSETP - L=INDEX(IP) - IF (ZZ(IP) .le. ZERO) then - T=-X(L)/(ZZ(IP)-X(L)) - IF (ALPHA .gt. T) then - ALPHA=T - JJ=IP - endif - endif - 240 CONTINUE -C -C IF ALL NEW CONSTRAINED COEFFS ARE FEASIBLE THEN ALPHA WILL -C STILL = 2. IF SO EXIT FROM SECONDARY LOOP TO MAIN LOOP. -C - IF (ALPHA.EQ.TWO) GO TO 330 -C -C OTHERWISE USE ALPHA WHICH WILL BE BETWEEN 0. AND 1. TO -C INTERPOLATE BETWEEN THE OLD X AND THE NEW ZZ. -C - DO 250 IP=1,NSETP - L=INDEX(IP) - X(L)=X(L)+ALPHA*(ZZ(IP)-X(L)) - 250 continue -C -C MODIFY A AND B AND THE INDEX ARRAYS TO MOVE COEFFICIENT I -C FROM SET P TO SET Z. -C - I=INDEX(JJ) - 260 continue - X(I)=ZERO -C - IF (JJ .ne. NSETP) then - JJ=JJ+1 - DO 280 J=JJ,NSETP - II=INDEX(J) - INDEX(J-1)=II - CALL G1 (A(J-1,II),A(J,II),CC,SS,A(J-1,II)) - A(J,II)=ZERO - DO 270 L=1,N - IF (L.NE.II) then -c -c Apply procedure G2 (CC,SS,A(J-1,L),A(J,L)) -c - TEMP = A(J-1,L) - A(J-1,L) = CC*TEMP + SS*A(J,L) - A(J,L) =-SS*TEMP + CC*A(J,L) - endif - 270 CONTINUE -c -c Apply procedure G2 (CC,SS,B(J-1),B(J)) -c - TEMP = B(J-1) - B(J-1) = CC*TEMP + SS*B(J) - B(J) =-SS*TEMP + CC*B(J) - 280 continue - endif -c - NPP1=NSETP - NSETP=NSETP-1 - IZ1=IZ1-1 - INDEX(IZ1)=I -C -C SEE IF THE REMAINING COEFFS IN SET P ARE FEASIBLE. THEY SHOULD -C BE BECAUSE OF THE WAY ALPHA WAS DETERMINED. -C IF ANY ARE INFEASIBLE IT IS DUE TO ROUND-OFF ERROR. ANY -C THAT ARE NONPOSITIVE WILL BE SET TO ZERO -C AND MOVED FROM SET P TO SET Z. -C - DO 300 JJ=1,NSETP - I=INDEX(JJ) - IF (X(I) .le. ZERO) go to 260 - 300 CONTINUE -C -C COPY B( ) INTO ZZ( ). THEN SOLVE AGAIN AND LOOP BACK. -C - DO 310 I=1,M - 310 ZZ(I)=B(I) - RTNKEY = 2 - GO TO 400 - 320 CONTINUE - GO TO 210 -C ****** END OF SECONDARY LOOP ****** -C - 330 continue - DO 340 IP=1,NSETP - I=INDEX(IP) - 340 X(I)=ZZ(IP) -C ALL NEW COEFFS ARE POSITIVE. LOOP BACK TO BEGINNING. - GO TO 30 -C -C ****** END OF MAIN LOOP ****** -C -C COME TO HERE FOR TERMINATION. -C COMPUTE THE NORM OF THE FINAL RESIDUAL VECTOR. -C - 350 continue - SM=ZERO - IF (NPP1 .le. M) then - DO 360 I=NPP1,M - 360 SM=SM+B(I)**2 - else - DO 380 J=1,N - 380 W(J)=ZERO - endif - RNORM=sqrt(SM) - RETURN -C -C THE FOLLOWING BLOCK OF CODE IS USED AS AN INTERNAL SUBROUTINE -C TO SOLVE THE TRIANGULAR SYSTEM, PUTTING THE SOLUTION IN ZZ(). -C - 400 continue - DO 430 L=1,NSETP - IP=NSETP+1-L - IF (L .ne. 1) then - DO 410 II=1,IP - ZZ(II)=ZZ(II)-A(II,JJ)*ZZ(IP+1) - 410 continue - endif - JJ=INDEX(IP) - ZZ(IP)=ZZ(IP)/A(IP,JJ) - 430 continue - go to (200, 320), RTNKEY - END - - - double precision FUNCTION DIFF(X,Y) -c -c Function used in tests that depend on machine precision. -c -c The original version of this code was developed by -c Charles L. Lawson and Richard J. Hanson at Jet Propulsion Laboratory -c 1973 JUN 7, and published in the book -c "SOLVING LEAST SQUARES PROBLEMS", Prentice-HalL, 1974. -c Revised FEB 1995 to accompany reprinting of the book by SIAM. -C - double precision X, Y - DIFF=X-Y - RETURN - END - - -C SUBROUTINE H12 (MODE,LPIVOT,L1,M,U,IUE,UP,C,ICE,ICV,NCV) -C -C CONSTRUCTION AND/OR APPLICATION OF A SINGLE -C HOUSEHOLDER TRANSFORMATION.. Q = I + U*(U**T)/B -C -c The original version of this code was developed by -c Charles L. Lawson and Richard J. Hanson at Jet Propulsion Laboratory -c 1973 JUN 12, and published in the book -c "SOLVING LEAST SQUARES PROBLEMS", Prentice-HalL, 1974. -c Revised FEB 1995 to accompany reprinting of the book by SIAM. -C ------------------------------------------------------------------ -c Subroutine Arguments -c -C MODE = 1 OR 2 Selects Algorithm H1 to construct and apply a -c Householder transformation, or Algorithm H2 to apply a -c previously constructed transformation. -C LPIVOT IS THE INDEX OF THE PIVOT ELEMENT. -C L1,M IF L1 .LE. M THE TRANSFORMATION WILL BE CONSTRUCTED TO -C ZERO ELEMENTS INDEXED FROM L1 THROUGH M. IF L1 GT. M -C THE SUBROUTINE DOES AN IDENTITY TRANSFORMATION. -C U(),IUE,UP On entry with MODE = 1, U() contains the pivot -c vector. IUE is the storage increment between elements. -c On exit when MODE = 1, U() and UP contain quantities -c defining the vector U of the Householder transformation. -c on entry with MODE = 2, U() and UP should contain -c quantities previously computed with MODE = 1. These will -c not be modified during the entry with MODE = 2. -C C() ON ENTRY with MODE = 1 or 2, C() CONTAINS A MATRIX WHICH -c WILL BE REGARDED AS A SET OF VECTORS TO WHICH THE -c HOUSEHOLDER TRANSFORMATION IS TO BE APPLIED. -c ON EXIT C() CONTAINS THE SET OF TRANSFORMED VECTORS. -C ICE STORAGE INCREMENT BETWEEN ELEMENTS OF VECTORS IN C(). -C ICV STORAGE INCREMENT BETWEEN VECTORS IN C(). -C NCV NUMBER OF VECTORS IN C() TO BE TRANSFORMED. IF NCV .LE. 0 -C NO OPERATIONS WILL BE DONE ON C(). -C ------------------------------------------------------------------ - SUBROUTINE H12 (MODE,LPIVOT,L1,M,U,IUE,UP,C,ICE,ICV,NCV) -C ------------------------------------------------------------------ - integer I, I2, I3, I4, ICE, ICV, INCR, IUE, J - integer L1, LPIVOT, M, MODE, NCV - double precision B, C(*), CL, CLINV, ONE, SM -c double precision U(IUE,M) - double precision U(IUE,*) - double precision UP - parameter(ONE = 1.0d0) -C ------------------------------------------------------------------ - IF (0.GE.LPIVOT.OR.LPIVOT.GE.L1.OR.L1.GT.M) RETURN - CL=abs(U(1,LPIVOT)) - IF (MODE.EQ.2) GO TO 60 -C ****** CONSTRUCT THE TRANSFORMATION. ****** - DO 10 J=L1,M - 10 CL=MAX(abs(U(1,J)),CL) - IF (CL) 130,130,20 - 20 CLINV=ONE/CL - SM=(U(1,LPIVOT)*CLINV)**2 - DO 30 J=L1,M - 30 SM=SM+(U(1,J)*CLINV)**2 - CL=CL*SQRT(SM) - IF (U(1,LPIVOT)) 50,50,40 - 40 CL=-CL - 50 UP=U(1,LPIVOT)-CL - U(1,LPIVOT)=CL - GO TO 70 -C ****** APPLY THE TRANSFORMATION I+U*(U**T)/B TO C. ****** -C - 60 IF (CL) 130,130,70 - 70 IF (NCV.LE.0) RETURN - B= UP*U(1,LPIVOT) -C B MUST BE NONPOSITIVE HERE. IF B = 0., RETURN. -C - IF (B) 80,130,130 - 80 B=ONE/B - I2=1-ICV+ICE*(LPIVOT-1) - INCR=ICE*(L1-LPIVOT) - DO 120 J=1,NCV - I2=I2+ICV - I3=I2+INCR - I4=I3 - SM=C(I2)*UP - DO 90 I=L1,M - SM=SM+C(I3)*U(1,I) - 90 I3=I3+ICE - IF (SM) 100,120,100 - 100 SM=SM*B - C(I2)=C(I2)+SM*UP - DO 110 I=L1,M - C(I4)=C(I4)+SM*U(1,I) - 110 I4=I4+ICE - 120 CONTINUE - 130 RETURN - END - - - - SUBROUTINE G1 (A,B,CTERM,STERM,SIG) -c -C COMPUTE ORTHOGONAL ROTATION MATRIX.. -c -c The original version of this code was developed by -c Charles L. Lawson and Richard J. Hanson at Jet Propulsion Laboratory -c 1973 JUN 12, and published in the book -c "SOLVING LEAST SQUARES PROBLEMS", Prentice-HalL, 1974. -c Revised FEB 1995 to accompany reprinting of the book by SIAM. -C -C COMPUTE.. MATRIX (C, S) SO THAT (C, S)(A) = (SQRT(A**2+B**2)) -C (-S,C) (-S,C)(B) ( 0 ) -C COMPUTE SIG = SQRT(A**2+B**2) -C SIG IS COMPUTED LAST TO ALLOW FOR THE POSSIBILITY THAT -C SIG MAY BE IN THE SAME LOCATION AS A OR B . -C ------------------------------------------------------------------ - double precision A, B, CTERM, ONE, SIG, STERM, XR, YR, ZERO - parameter(ONE = 1.0d0, ZERO = 0.0d0) -C ------------------------------------------------------------------ - if (abs(A) .gt. abs(B)) then - XR=B/A - YR=sqrt(ONE+XR**2) - CTERM=sign(ONE/YR,A) - STERM=CTERM*XR - SIG=abs(A)*YR - RETURN - endif - - if (B .ne. ZERO) then - XR=A/B - YR=sqrt(ONE+XR**2) - STERM=sign(ONE/YR,B) - CTERM=STERM*XR - SIG=abs(B)*YR - RETURN - endif - - SIG=ZERO - CTERM=ZERO - STERM=ONE - RETURN - END diff --git a/scipy-0.10.1/scipy/optimize/nnls/nnls.pyf b/scipy-0.10.1/scipy/optimize/nnls/nnls.pyf deleted file mode 100644 index 3630a466b6..0000000000 --- a/scipy-0.10.1/scipy/optimize/nnls/nnls.pyf +++ /dev/null @@ -1,22 +0,0 @@ -! -*- f90 -*- -! Note: the context of this file is case sensitive. - -python module _nnls ! in - interface ! in :_nnls - subroutine nnls(a,mda,m,n,b,x,rnorm,w,zz,index_bn,mode) ! in :nnls:NNLS.F - double precision dimension(mda,*), intent(copy) :: a - integer optional,check(shape(a,0)==mda),depend(a) :: mda=shape(a,0) - integer :: m - integer :: n - double precision dimension(*), intent(copy) :: b - double precision dimension(n), intent(out) :: x - double precision, intent(out) :: rnorm - double precision dimension(*) :: w - double precision dimension(*) :: zz - integer dimension(*) :: index_bn - integer , intent(out):: mode - end subroutine nnls -end python module _nnls - -! This file was auto-generated with f2py (version:2_5878). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/optimize/nonlin.py b/scipy-0.10.1/scipy/optimize/nonlin.py deleted file mode 100644 index 15f386aa91..0000000000 --- a/scipy-0.10.1/scipy/optimize/nonlin.py +++ /dev/null @@ -1,1477 +0,0 @@ -r""" -.. module:: scipy.optimize.nonlin - -================= -Nonlinear solvers -================= - -.. currentmodule:: scipy.optimize - -This is a collection of general-purpose nonlinear multidimensional -solvers. These solvers find *x* for which *F(x) = 0*. Both *x* -and *F* can be multidimensional. - -Routines -======== - -Large-scale nonlinear solvers: - -.. autosummary:: - - newton_krylov - anderson - -General nonlinear solvers: - -.. autosummary:: - - broyden1 - broyden2 - -Simple iterations: - -.. autosummary:: - - excitingmixing - linearmixing - diagbroyden - - -Examples -======== - -Small problem -------------- - ->>> def F(x): -... return np.cos(x) + x[::-1] - [1, 2, 3, 4] ->>> import scipy.optimize ->>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14) ->>> x -array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251]) ->>> np.cos(x) + x[::-1] -array([ 1., 2., 3., 4.]) - - -Large problem -------------- - -Suppose that we needed to solve the following integrodifferential -equation on the square :math:`[0,1]\times[0,1]`: - -.. math:: - - \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 - -with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of -the square. - -The solution can be found using the `newton_krylov` solver: - -.. plot:: - - import numpy as np - from scipy.optimize import newton_krylov - from numpy import cosh, zeros_like, mgrid, zeros - - # parameters - nx, ny = 75, 75 - hx, hy = 1./(nx-1), 1./(ny-1) - - P_left, P_right = 0, 0 - P_top, P_bottom = 1, 0 - - def residual(P): - d2x = zeros_like(P) - d2y = zeros_like(P) - - d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx - d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx - d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx - - d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy - d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy - d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy - - return d2x + d2y - 10*cosh(P).mean()**2 - - # solve - guess = zeros((nx, ny), float) - sol = newton_krylov(residual, guess, method='lgmres', verbose=1) - print 'Residual', abs(residual(sol)).max() - - # visualize - import matplotlib.pyplot as plt - x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)] - plt.pcolor(x, y, sol) - plt.colorbar() - plt.show() - -""" -# Copyright (C) 2009, Pauli Virtanen -# Distributed under the same license as Scipy. - -import sys -import numpy as np -from scipy.linalg import norm, solve, inv, qr, svd, lstsq, LinAlgError -from numpy import asarray, dot, vdot -import scipy.sparse.linalg -import scipy.sparse -from scipy.linalg import get_blas_funcs -import inspect -from linesearch import scalar_search_wolfe1, scalar_search_armijo - -__all__ = [ - 'broyden1', 'broyden2', 'anderson', 'linearmixing', - 'diagbroyden', 'excitingmixing', 'newton_krylov'] - -#------------------------------------------------------------------------------ -# Utility functions -#------------------------------------------------------------------------------ - -class NoConvergence(Exception): - pass - -def maxnorm(x): - return np.absolute(x).max() - -def _as_inexact(x): - """Return `x` as an array, of either floats or complex floats""" - x = asarray(x) - if not np.issubdtype(x.dtype, np.inexact): - return asarray(x, dtype=np.float_) - return x - -def _array_like(x, x0): - """Return ndarray `x` as same array subclass and shape as `x0`""" - x = np.reshape(x, np.shape(x0)) - wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) - return wrap(x) - -def _safe_norm(v): - if not np.isfinite(v).all(): - return np.array(np.inf) - return norm(v) - -#------------------------------------------------------------------------------ -# Generic nonlinear solver machinery -#------------------------------------------------------------------------------ - -_doc_parts = dict( - params_basic=""" - F : function(x) -> f - Function whose root to find; should take and return an array-like - object. - x0 : array-like - Initial guess for the solution - """.strip(), - params_extra=""" - iter : int, optional - Number of iterations to make. If omitted (default), make as many - as required to meet tolerances. - verbose : bool, optional - Print status to stdout on every iteration. - maxiter : int, optional - Maximum number of iterations to make. If more are needed to - meet convergence, `NoConvergence` is raised. - f_tol : float, optional - Absolute tolerance (in max-norm) for the residual. - If omitted, default is 6e-6. - f_rtol : float, optional - Relative tolerance for the residual. If omitted, not used. - x_tol : float, optional - Absolute minimum step size, as determined from the Jacobian - approximation. If the step size is smaller than this, optimization - is terminated as successful. If omitted, not used. - x_rtol : float, optional - Relative minimum step size. If omitted, not used. - tol_norm : function(vector) -> scalar, optional - Norm to use in convergence check. Default is the maximum norm. - line_search : {None, 'armijo' (default), 'wolfe'}, optional - Which type of a line search to use to determine the step size in the - direction given by the Jacobian approximation. Defaults to 'armijo'. - callback : function, optional - Optional callback function. It is called on every iteration as - ``callback(x, f)`` where `x` is the current solution and `f` - the corresponding residual. - - Returns - ------- - sol : array-like - An array (of similar array type as `x0`) containing the final solution. - - Raises - ------ - NoConvergence - When a solution was not found. - - """.strip() -) - -def _set_doc(obj): - if obj.__doc__: - obj.__doc__ = obj.__doc__ % _doc_parts - -def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, - maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, - tol_norm=None, line_search='armijo', callback=None): - """ - Find a root of a function, in a way suitable for large-scale problems. - - Parameters - ---------- - %(params_basic)s - jacobian : Jacobian - A Jacobian approximation: `Jacobian` object or something that - `asjacobian` can transform to one. Alternatively, a string specifying - which of the builtin Jacobian approximations to use: - - krylov, broyden1, broyden2, anderson - diagbroyden, linearmixing, excitingmixing - - %(params_extra)s - - See Also - -------- - asjacobian, Jacobian - - Notes - ----- - This algorithm implements the inexact Newton method, with - backtracking or full line searches. Several Jacobian - approximations are available, including Krylov and Quasi-Newton - methods. - - References - ---------- - .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear - Equations\". Society for Industrial and Applied Mathematics. (1995) - http://www.siam.org/books/kelley/ - - """ - - condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, - x_tol=x_tol, x_rtol=x_rtol, - iter=iter, norm=tol_norm) - - x0 = _as_inexact(x0) - func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten() - x = x0.flatten() - - dx = np.inf - Fx = func(x) - Fx_norm = norm(Fx) - - jacobian = asjacobian(jacobian) - jacobian.setup(x.copy(), Fx, func) - - if maxiter is None: - if iter is not None: - maxiter = iter + 1 - else: - maxiter = 100*(x.size+1) - - if line_search is True: - line_search = 'armijo' - elif line_search is False: - line_search = None - - if line_search not in (None, 'armijo', 'wolfe'): - raise ValueError("Invalid line search") - - # Solver tolerance selection - gamma = 0.9 - eta_max = 0.9999 - eta_treshold = 0.1 - eta = 1e-3 - - for n in xrange(maxiter): - if condition.check(Fx, x, dx): - break - - # The tolerance, as computed for scipy.sparse.linalg.* routines - tol = min(eta, eta*Fx_norm) - dx = -jacobian.solve(Fx, tol=tol) - - if norm(dx) == 0: - raise ValueError("Jacobian inversion yielded zero vector. " - "This indicates a bug in the Jacobian " - "approximation.") - - # Line search, or Newton step - if line_search: - s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, - line_search) - else: - s = 1.0 - x += dx - Fx = func(x) - Fx_norm_new = norm(Fx) - - jacobian.update(x.copy(), Fx) - - if callback: - callback(x, Fx) - - # Adjust forcing parameters for inexact methods - eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 - if gamma * eta**2 < eta_treshold: - eta = min(eta_max, eta_A) - else: - eta = min(eta_max, max(eta_A, gamma*eta**2)) - - Fx_norm = Fx_norm_new - - # Print status - if verbose: - sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % ( - n, norm(Fx), s, eta)) - sys.stdout.flush() - else: - raise NoConvergence(_array_like(x, x0)) - - return _array_like(x, x0) - -_set_doc(nonlin_solve) - -def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, - smin=1e-2): - tmp_s = [0] - tmp_Fx = [Fx] - tmp_phi = [norm(Fx)**2] - s_norm = norm(x) / norm(dx) - - def phi(s, store=True): - if s == tmp_s[0]: - return tmp_phi[0] - xt = x + s*dx - v = func(xt) - p = _safe_norm(v)**2 - if store: - tmp_s[0] = s - tmp_phi[0] = p - tmp_Fx[0] = v - return p - - def derphi(s): - ds = (abs(s) + s_norm + 1) * rdiff - return (phi(s+ds, store=False) - phi(s)) / ds - - if search_type == 'wolfe': - s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], - xtol=1e-2, amin=smin) - elif search_type == 'armijo': - s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], - amin=smin) - - if s is None: - # XXX: No suitable step length found. Take the full Newton step, - # and hope for the best. - s = 1.0 - - x = x + s*dx - if s == tmp_s[0]: - Fx = tmp_Fx[0] - else: - Fx = func(x) - Fx_norm = norm(Fx) - - return s, x, Fx, Fx_norm - -class TerminationCondition(object): - """ - Termination condition for an iteration. It is terminated if - - - |F| < f_rtol*|F_0|, AND - - |F| < f_tol - - AND - - - |dx| < x_rtol*|x|, AND - - |dx| < x_tol - - """ - def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, - iter=None, norm=maxnorm): - - if f_tol is None: - f_tol = np.finfo(np.float_).eps ** (1./3) - if f_rtol is None: - f_rtol = np.inf - if x_tol is None: - x_tol = np.inf - if x_rtol is None: - x_rtol = np.inf - - self.x_tol = x_tol - self.x_rtol = x_rtol - self.f_tol = f_tol - self.f_rtol = f_rtol - - self.norm = maxnorm - self.iter = iter - - self.f0_norm = None - self.iteration = 0 - - def check(self, f, x, dx): - self.iteration += 1 - f_norm = self.norm(f) - x_norm = self.norm(x) - dx_norm = self.norm(dx) - - if self.f0_norm is None: - self.f0_norm = f_norm - - if f_norm == 0: - return True - - if self.iter is not None: - # backwards compatibility with Scipy 0.6.0 - return self.iteration > self.iter - - # NB: condition must succeed for rtol=inf even if norm == 0 - return ((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm) - and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm)) - - -#------------------------------------------------------------------------------ -# Generic Jacobian approximation -#------------------------------------------------------------------------------ - -class Jacobian(object): - """ - Common interface for Jacobians or Jacobian approximations. - - The optional methods come useful when implementing trust region - etc. algorithms that often require evaluating transposes of the - Jacobian. - - Methods - ------- - solve - Returns J^-1 * v - update - Updates Jacobian to point `x` (where the function has residual `Fx`) - - matvec : optional - Returns J * v - rmatvec : optional - Returns A^H * v - rsolve : optional - Returns A^-H * v - matmat : optional - Returns A * V, where V is a dense matrix with dimensions (N,K). - todense : optional - Form the dense Jacobian matrix. Necessary for dense trust region - algorithms, and useful for testing. - - Attributes - ---------- - shape - Matrix dimensions (M, N) - dtype - Data type of the matrix. - func : callable, optional - Function the Jacobian corresponds to - - """ - - def __init__(self, **kw): - names = ["solve", "update", "matvec", "rmatvec", "rsolve", - "matmat", "todense", "shape", "dtype"] - for name, value in kw.items(): - if name not in names: - raise ValueError("Unknown keyword argument %s" % name) - if value is not None: - setattr(self, name, kw[name]) - - if hasattr(self, 'todense'): - self.__array__ = lambda: self.todense() - - def aspreconditioner(self): - return InverseJacobian(self) - - def solve(self, v, tol=0): - raise NotImplementedError - - def update(self, x, F): - pass - - def setup(self, x, F, func): - self.func = func - self.shape = (F.size, x.size) - self.dtype = F.dtype - if self.__class__.setup is Jacobian.setup: - # Call on the first point unless overridden - self.update(self, x, F) - -class InverseJacobian(object): - def __init__(self, jacobian): - self.jacobian = jacobian - self.matvec = jacobian.solve - self.update = jacobian.update - if hasattr(jacobian, 'setup'): - self.setup = jacobian.setup - if hasattr(jacobian, 'rsolve'): - self.rmatvec = jacobian.rsolve - - @property - def shape(self): - return self.jacobian.shape - - @property - def dtype(self): - return self.jacobian.dtype - -def asjacobian(J): - """ - Convert given object to one suitable for use as a Jacobian. - """ - spsolve = scipy.sparse.linalg.spsolve - if isinstance(J, Jacobian): - return J - elif inspect.isclass(J) and issubclass(J, Jacobian): - return J() - elif isinstance(J, np.ndarray): - if J.ndim > 2: - raise ValueError('array must have rank <= 2') - J = np.atleast_2d(np.asarray(J)) - if J.shape[0] != J.shape[1]: - raise ValueError('array must be square') - - return Jacobian(matvec=lambda v: dot(J, v), - rmatvec=lambda v: dot(J.conj().T, v), - solve=lambda v: solve(J, v), - rsolve=lambda v: solve(J.conj().T, v), - dtype=J.dtype, shape=J.shape) - elif scipy.sparse.isspmatrix(J): - if J.shape[0] != J.shape[1]: - raise ValueError('matrix must be square') - return Jacobian(matvec=lambda v: J*v, - rmatvec=lambda v: J.conj().T * v, - solve=lambda v: spsolve(J, v), - rsolve=lambda v: spsolve(J.conj().T, v), - dtype=J.dtype, shape=J.shape) - elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): - return Jacobian(matvec=getattr(J, 'matvec'), - rmatvec=getattr(J, 'rmatvec'), - solve=J.solve, - rsolve=getattr(J, 'rsolve'), - update=getattr(J, 'update'), - setup=getattr(J, 'setup'), - dtype=J.dtype, - shape=J.shape) - elif callable(J): - # Assume it's a function J(x) that returns the Jacobian - class Jac(Jacobian): - def update(self, x, F): - self.x = x - def solve(self, v, tol=0): - m = J(self.x) - if isinstance(m, np.ndarray): - return solve(m, v) - elif scipy.sparse.isspmatrix(m): - return spsolve(m, v) - else: - raise ValueError("Unknown matrix type") - def matvec(self, v): - m = J(self.x) - if isinstance(m, np.ndarray): - return dot(m, v) - elif scipy.sparse.isspmatrix(m): - return m*v - else: - raise ValueError("Unknown matrix type") - def rsolve(self, v, tol=0): - m = J(self.x) - if isinstance(m, np.ndarray): - return solve(m.conj().T, v) - elif scipy.sparse.isspmatrix(m): - return spsolve(m.conj().T, v) - else: - raise ValueError("Unknown matrix type") - def rmatvec(self, v): - m = J(self.x) - if isinstance(m, np.ndarray): - return dot(m.conj().T, v) - elif scipy.sparse.isspmatrix(m): - return m.conj().T * v - else: - raise ValueError("Unknown matrix type") - return Jac() - elif isinstance(J, str): - return dict(broyden1=BroydenFirst, - broyden2=BroydenSecond, - anderson=Anderson, - diagbroyden=DiagBroyden, - linearmixing=LinearMixing, - excitingmixing=ExcitingMixing, - krylov=KrylovJacobian)[J]() - else: - raise TypeError('Cannot convert object to a Jacobian') - - -#------------------------------------------------------------------------------ -# Broyden -#------------------------------------------------------------------------------ - -class GenericBroyden(Jacobian): - def setup(self, x0, f0, func): - Jacobian.setup(self, x0, f0, func) - self.last_f = f0 - self.last_x = x0 - - if hasattr(self, 'alpha') and self.alpha is None: - # autoscale the initial Jacobian parameter - self.alpha = 0.5*max(norm(x0), 1) / norm(f0) - - def _update(self, x, f, dx, df, dx_norm, df_norm): - raise NotImplementedError - - def update(self, x, f): - df = f - self.last_f - dx = x - self.last_x - self._update(x, f, dx, df, norm(dx), norm(df)) - self.last_f = f - self.last_x = x - -class LowRankMatrix(object): - r""" - A matrix represented as - - .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger - - However, if the rank of the matrix reaches the dimension of the vectors, - full matrix representation will be used thereon. - - """ - - def __init__(self, alpha, n, dtype): - self.alpha = alpha - self.cs = [] - self.ds = [] - self.n = n - self.dtype = dtype - self.collapsed = None - - @staticmethod - def _matvec(v, alpha, cs, ds): - axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'], - cs[:1] + [v]) - w = alpha * v - for c, d in zip(cs, ds): - a = dotc(d, v) - w = axpy(c, w, w.size, a) - return w - - @staticmethod - def _solve(v, alpha, cs, ds): - """Evaluate w = M^-1 v""" - if len(cs) == 0: - return v/alpha - - # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 - - axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) - - c0 = cs[0] - A = alpha * np.identity(len(cs), dtype=c0.dtype) - for i, d in enumerate(ds): - for j, c in enumerate(cs): - A[i,j] += dotc(d, c) - - q = np.zeros(len(cs), dtype=c0.dtype) - for j, d in enumerate(ds): - q[j] = dotc(d, v) - q /= alpha - q = solve(A, q) - - w = v/alpha - for c, qc in zip(cs, q): - w = axpy(c, w, w.size, -qc) - - return w - - def matvec(self, v): - """Evaluate w = M v""" - if self.collapsed is not None: - return np.dot(self.collapsed, v) - return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) - - def rmatvec(self, v): - """Evaluate w = M^H v""" - if self.collapsed is not None: - return np.dot(self.collapsed.T.conj(), v) - return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) - - def solve(self, v, tol=0): - """Evaluate w = M^-1 v""" - if self.collapsed is not None: - return solve(self.collapsed, v) - return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) - - def rsolve(self, v, tol=0): - """Evaluate w = M^-H v""" - if self.collapsed is not None: - return solve(self.collapsed.T.conj(), v) - return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) - - def append(self, c, d): - if self.collapsed is not None: - self.collapsed += c[:,None] * d[None,:].conj() - return - - self.cs.append(c) - self.ds.append(d) - - if len(self.cs) > c.size: - self.collapse() - - def __array__(self): - if self.collapsed is not None: - return self.collapsed - - Gm = self.alpha*np.identity(self.n, dtype=self.dtype) - for c, d in zip(self.cs, self.ds): - Gm += c[:,None]*d[None,:].conj() - return Gm - - def collapse(self): - """Collapse the low-rank matrix to a full-rank one.""" - self.collapsed = np.array(self) - self.cs = None - self.ds = None - self.alpha = None - - def restart_reduce(self, rank): - """ - Reduce the rank of the matrix by dropping all vectors. - """ - if self.collapsed is not None: - return - assert rank > 0 - if len(self.cs) > rank: - del self.cs[:] - del self.ds[:] - - def simple_reduce(self, rank): - """ - Reduce the rank of the matrix by dropping oldest vectors. - """ - if self.collapsed is not None: - return - assert rank > 0 - while len(self.cs) > rank: - del self.cs[0] - del self.ds[0] - - def svd_reduce(self, max_rank, to_retain=None): - """ - Reduce the rank of the matrix by retaining some SVD components. - - This corresponds to the \"Broyden Rank Reduction Inverse\" - algorithm described in [vR]_. - - Note that the SVD decomposition can be done by solving only a - problem whose size is the effective rank of this matrix, which - is viable even for large problems. - - Parameters - ---------- - max_rank : int - Maximum rank of this matrix after reduction. - to_retain : int, optional - Number of SVD components to retain when reduction is done - (ie. rank > max_rank). Default is ``max_rank - 2``. - - References - ---------- - .. [vR] B.A. van der Rotten, PhD thesis, - \"A limited memory Broyden method to solve high-dimensional - systems of nonlinear equations\". Mathematisch Instituut, - Universiteit Leiden, The Netherlands (2003). - - http://www.math.leidenuniv.nl/scripties/Rotten.pdf - - """ - if self.collapsed is not None: - return - - p = max_rank - if to_retain is not None: - q = to_retain - else: - q = p - 2 - - if self.cs: - p = min(p, len(self.cs[0])) - q = max(0, min(q, p-1)) - - m = len(self.cs) - if m < p: - # nothing to do - return - - C = np.array(self.cs).T - D = np.array(self.ds).T - - D, R = qr(D, mode='qr', econ=True) - C = dot(C, R.T.conj()) - - U, S, WH = svd(C, full_matrices=False, compute_uv=True) - - C = dot(C, inv(WH)) - D = dot(D, WH.T.conj()) - - for k in xrange(q): - self.cs[k] = C[:,k].copy() - self.ds[k] = D[:,k].copy() - - del self.cs[q:] - del self.ds[q:] - -_doc_parts['broyden_params'] = """ - alpha : float, optional - Initial guess for the Jacobian is (-1/alpha). - reduction_method : str or tuple, optional - Method used in ensuring that the rank of the Broyden matrix - stays low. Can either be a string giving the name of the method, - or a tuple of the form ``(method, param1, param2, ...)`` - that gives the name of the method and values for additional parameters. - - Methods available: - - ``restart``: drop all matrix columns. Has no extra parameters. - - ``simple``: drop oldest matrix column. Has no extra parameters. - - ``svd``: keep only the most significant SVD components. - Extra parameters: - - ``to_retain`: number of SVD components to retain when - rank reduction is done. Default is ``max_rank - 2``. - max_rank : int, optional - Maximum rank for the Broyden matrix. - Default is infinity (ie., no rank reduction). - """.strip() - -class BroydenFirst(GenericBroyden): - r""" - Find a root of a function, using Broyden's first Jacobian approximation. - - This method is also known as \"Broyden's good method\". - - Parameters - ---------- - %(params_basic)s - %(broyden_params)s - %(params_extra)s - - Notes - ----- - This algorithm implements the inverse Jacobian Quasi-Newton update - - .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df) - - which corresponds to Broyden's first Jacobian update - - .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx - - - References - ---------- - .. [vR] B.A. van der Rotten, PhD thesis, - \"A limited memory Broyden method to solve high-dimensional - systems of nonlinear equations\". Mathematisch Instituut, - Universiteit Leiden, The Netherlands (2003). - - http://www.math.leidenuniv.nl/scripties/Rotten.pdf - - """ - - def __init__(self, alpha=None, reduction_method='restart', max_rank=None): - GenericBroyden.__init__(self) - self.alpha = alpha - self.Gm = None - - if max_rank is None: - max_rank = np.inf - self.max_rank = max_rank - - if isinstance(reduction_method, str): - reduce_params = () - else: - reduce_params = reduction_method[1:] - reduction_method = reduction_method[0] - reduce_params = (max_rank - 1,) + reduce_params - - if reduction_method == 'svd': - self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) - elif reduction_method == 'simple': - self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) - elif reduction_method == 'restart': - self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) - else: - raise ValueError("Unknown rank reduction method '%s'" % - reduction_method) - - def setup(self, x, F, func): - GenericBroyden.setup(self, x, F, func) - self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) - - def todense(self): - return inv(self.Gm) - - def solve(self, f, tol=0): - r = self.Gm.matvec(f) - if not np.isfinite(r).all(): - # singular; reset the Jacobian approximation - self.setup(self.last_x, self.last_f, self.func) - return self.Gm.matvec(f) - - def matvec(self, f): - return self.Gm.solve(f) - - def rsolve(self, f, tol=0): - return self.Gm.rmatvec(f) - - def rmatvec(self, f): - return self.Gm.rsolve(f) - - def _update(self, x, f, dx, df, dx_norm, df_norm): - self._reduce() # reduce first to preserve secant condition - - v = self.Gm.rmatvec(dx) - c = dx - self.Gm.matvec(df) - d = v / vdot(df, v) - - self.Gm.append(c, d) - - -class BroydenSecond(BroydenFirst): - """ - Find a root of a function, using Broyden\'s second Jacobian approximation. - - This method is also known as \"Broyden's bad method\". - - Parameters - ---------- - %(params_basic)s - %(broyden_params)s - %(params_extra)s - - Notes - ----- - This algorithm implements the inverse Jacobian Quasi-Newton update - - .. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df) - - corresponding to Broyden's second method. - - References - ---------- - .. [vR] B.A. van der Rotten, PhD thesis, - \"A limited memory Broyden method to solve high-dimensional - systems of nonlinear equations\". Mathematisch Instituut, - Universiteit Leiden, The Netherlands (2003). - - http://www.math.leidenuniv.nl/scripties/Rotten.pdf - - """ - - def _update(self, x, f, dx, df, dx_norm, df_norm): - self._reduce() # reduce first to preserve secant condition - - v = df - c = dx - self.Gm.matvec(df) - d = v / df_norm**2 - self.Gm.append(c, d) - - -#------------------------------------------------------------------------------ -# Broyden-like (restricted memory) -#------------------------------------------------------------------------------ - -class Anderson(GenericBroyden): - """ - Find a root of a function, using (extended) Anderson mixing. - - The Jacobian is formed by for a 'best' solution in the space - spanned by last `M` vectors. As a result, only a MxM matrix - inversions and MxN multiplications are required. [Ey]_ - - Parameters - ---------- - %(params_basic)s - alpha : float, optional - Initial guess for the Jacobian is (-1/alpha). - M : float, optional - Number of previous vectors to retain. Defaults to 5. - w0 : float, optional - Regularization parameter for numerical stability. - Compared to unity, good values of the order of 0.01. - %(params_extra)s - - References - ---------- - .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). - - """ - - # Note: - # - # Anderson method maintains a rank M approximation of the inverse Jacobian, - # - # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v - # A = W + dF^H dF - # W = w0^2 diag(dF^H dF) - # - # so that for w0 = 0 the secant condition applies for last M iterates, ie., - # - # J^-1 df_j = dx_j - # - # for all j = 0 ... M-1. - # - # Moreover, (from Sherman-Morrison-Woodbury formula) - # - # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v - # C = (dX + alpha dF) A^-1 - # b = -1/alpha - # - # and after simplification - # - # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v - # - - def __init__(self, alpha=None, w0=0.01, M=5): - GenericBroyden.__init__(self) - self.alpha = alpha - self.M = M - self.dx = [] - self.df = [] - self.gamma = None - self.w0 = w0 - - def solve(self, f, tol=0): - dx = -self.alpha*f - - n = len(self.dx) - if n == 0: - return dx - - df_f = np.empty(n, dtype=f.dtype) - for k in xrange(n): - df_f[k] = vdot(self.df[k], f) - - try: - gamma = solve(self.a, df_f) - except LinAlgError: - # singular; reset the Jacobian approximation - del self.dx[:] - del self.df[:] - return dx - - for m in xrange(n): - dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) - return dx - - def matvec(self, f): - dx = -f/self.alpha - - n = len(self.dx) - if n == 0: - return dx - - df_f = np.empty(n, dtype=f.dtype) - for k in xrange(n): - df_f[k] = vdot(self.df[k], f) - - b = np.empty((n, n), dtype=f.dtype) - for i in xrange(n): - for j in xrange(n): - b[i,j] = vdot(self.df[i], self.dx[j]) - if i == j and self.w0 != 0: - b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha - gamma = solve(b, df_f) - - for m in xrange(n): - dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) - return dx - - def _update(self, x, f, dx, df, dx_norm, df_norm): - if self.M == 0: - return - - self.dx.append(dx) - self.df.append(df) - - while len(self.dx) > self.M: - self.dx.pop(0) - self.df.pop(0) - - n = len(self.dx) - a = np.zeros((n, n), dtype=f.dtype) - - for i in xrange(n): - for j in xrange(i, n): - if i == j: - wd = self.w0**2 - else: - wd = 0 - a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) - - a += np.triu(a, 1).T.conj() - self.a = a - -#------------------------------------------------------------------------------ -# Simple iterations -#------------------------------------------------------------------------------ - -class DiagBroyden(GenericBroyden): - """ - Find a root of a function, using diagonal Broyden Jacobian approximation. - - The Jacobian approximation is derived from previous iterations, by - retaining only the diagonal of Broyden matrices. - - .. warning:: - - This algorithm may be useful for specific problems, but whether - it will work may depend strongly on the problem. - - Parameters - ---------- - %(params_basic)s - alpha : float, optional - Initial guess for the Jacobian is (-1/alpha). - %(params_extra)s - """ - - def __init__(self, alpha=None): - GenericBroyden.__init__(self) - self.alpha = alpha - - def setup(self, x, F, func): - GenericBroyden.setup(self, x, F, func) - self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha - - def solve(self, f, tol=0): - return -f / self.d - - def matvec(self, f): - return -f * self.d - - def rsolve(self, f, tol=0): - return -f / self.d.conj() - - def rmatvec(self, f): - return -f * self.d.conj() - - def todense(self): - return np.diag(-self.d) - - def _update(self, x, f, dx, df, dx_norm, df_norm): - self.d -= (df + self.d*dx)*dx/dx_norm**2 - -class LinearMixing(GenericBroyden): - """ - Find a root of a function, using a scalar Jacobian approximation. - - .. warning:: - - This algorithm may be useful for specific problems, but whether - it will work may depend strongly on the problem. - - Parameters - ---------- - %(params_basic)s - alpha : float, optional - The Jacobian approximation is (-1/alpha). - %(params_extra)s - """ - - def __init__(self, alpha=None): - GenericBroyden.__init__(self) - self.alpha = alpha - - def solve(self, f, tol=0): - return -f*self.alpha - - def matvec(self, f): - return -f/self.alpha - - def rsolve(self, f, tol=0): - return -f*np.conj(self.alpha) - - def rmatvec(self, f): - return -f/np.conj(self.alpha) - - def todense(self): - return np.diag(-np.ones(self.shape[0])/self.alpha) - - def _update(self, x, f, dx, df, dx_norm, df_norm): - pass - -class ExcitingMixing(GenericBroyden): - """ - Find a root of a function, using a tuned diagonal Jacobian approximation. - - The Jacobian matrix is diagonal and is tuned on each iteration. - - .. warning:: - - This algorithm may be useful for specific problems, but whether - it will work may depend strongly on the problem. - - Parameters - ---------- - %(params_basic)s - alpha : float, optional - Initial Jacobian approximation is (-1/alpha). - alphamax : float, optional - The entries of the diagonal Jacobian are kept in the range - ``[alpha, alphamax]``. - %(params_extra)s - """ - - def __init__(self, alpha=None, alphamax=1.0): - GenericBroyden.__init__(self) - self.alpha = alpha - self.alphamax = alphamax - self.beta = None - - def setup(self, x, F, func): - GenericBroyden.setup(self, x, F, func) - self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype) - - def solve(self, f, tol=0): - return -f*self.beta - - def matvec(self, f): - return -f/self.beta - - def rsolve(self, f, tol=0): - return -f*self.beta.conj() - - def rmatvec(self, f): - return -f/self.beta.conj() - - def todense(self): - return np.diag(-1/self.beta) - - def _update(self, x, f, dx, df, dx_norm, df_norm): - incr = f*self.last_f > 0 - self.beta[incr] += self.alpha - self.beta[~incr] = self.alpha - np.clip(self.beta, 0, self.alphamax, out=self.beta) - - -#------------------------------------------------------------------------------ -# Iterative/Krylov approximated Jacobians -#------------------------------------------------------------------------------ - -class KrylovJacobian(Jacobian): - r""" - Find a root of a function, using Krylov approximation for inverse Jacobian. - - This method is suitable for solving large-scale problems. - - Parameters - ---------- - %(params_basic)s - rdiff : float, optional - Relative step size to use in numerical differentiation. - method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function - Krylov method to use to approximate the Jacobian. - Can be a string, or a function implementing the same interface as - the iterative solvers in `scipy.sparse.linalg`. - - The default is `scipy.sparse.linalg.lgmres`. - inner_M : LinearOperator or InverseJacobian - Preconditioner for the inner Krylov iteration. - Note that you can use also inverse Jacobians as (adaptive) - preconditioners. For example, - - >>> jac = BroydenFirst() - >>> kjac = KrylovJacobian(inner_M=jac.inverse). - - If the preconditioner has a method named 'update', it will be called - as ``update(x, f)`` after each nonlinear step, with ``x`` giving - the current point, and ``f`` the current function value. - inner_tol, inner_maxiter, ... - Parameters to pass on to the \"inner\" Krylov solver. - See `scipy.sparse.linalg.gmres` for details. - outer_k : int, optional - Size of the subspace kept across LGMRES nonlinear iterations. - See `scipy.sparse.linalg.lgmres` for details. - %(params_extra)s - - See Also - -------- - scipy.sparse.linalg.gmres - scipy.sparse.linalg.lgmres - - Notes - ----- - This function implements a Newton-Krylov solver. The basic idea is - to compute the inverse of the Jacobian with an iterative Krylov - method. These methods require only evaluating the Jacobian-vector - products, which are conveniently approximated by numerical - differentiation: - - .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega - - Due to the use of iterative matrix inverses, these methods can - deal with large nonlinear problems. - - Scipy's `scipy.sparse.linalg` module offers a selection of Krylov - solvers to choose from. The default here is `lgmres`, which is a - variant of restarted GMRES iteration that reuses some of the - information obtained in the previous Newton steps to invert - Jacobians in subsequent steps. - - For a review on Newton-Krylov methods, see for example [KK]_, - and for the LGMRES sparse inverse method, see [BJM]_. - - References - ---------- - .. [KK] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003). - .. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel, - SIAM J. Matrix Anal. Appl. 26, 962 (2005). - - """ - - def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, - inner_M=None, outer_k=10, **kw): - self.preconditioner = inner_M - self.rdiff = rdiff - self.method = dict( - bicgstab=scipy.sparse.linalg.bicgstab, - gmres=scipy.sparse.linalg.gmres, - lgmres=scipy.sparse.linalg.lgmres, - cgs=scipy.sparse.linalg.cgs, - minres=scipy.sparse.linalg.minres, - ).get(method, method) - - self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) - - if self.method is scipy.sparse.linalg.gmres: - # Replace GMRES's outer iteration with Newton steps - self.method_kw['restrt'] = inner_maxiter - self.method_kw['maxiter'] = 1 - elif self.method is scipy.sparse.linalg.lgmres: - self.method_kw['outer_k'] = outer_k - # Replace LGMRES's outer iteration with Newton steps - self.method_kw['maxiter'] = 1 - # Carry LGMRES's `outer_v` vectors across nonlinear iterations - self.method_kw.setdefault('outer_v', []) - # But don't carry the corresponding Jacobian*v products, in case - # the Jacobian changes a lot in the nonlinear step - # - # XXX: some trust-region inspired ideas might be more efficient... - # See eg. Brown & Saad. But needs to be implemented separately - # since it's not an inexact Newton method. - self.method_kw.setdefault('store_outer_Av', False) - - for key, value in kw.items(): - if not key.startswith('inner_'): - raise ValueError("Unknown parameter %s" % key) - self.method_kw[key[6:]] = value - - def _update_diff_step(self): - mx = abs(self.x0).max() - mf = abs(self.f0).max() - self.omega = self.rdiff * max(1, mx) / max(1, mf) - - def matvec(self, v): - nv = norm(v) - if nv == 0: - return 0*v - sc = self.omega / nv - r = (self.func(self.x0 + sc*v) - self.f0) / sc - if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): - raise ValueError('Function returned non-finite results') - return r - - def solve(self, rhs, tol=0): - sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw) - return sol - - def update(self, x, f): - self.x0 = x - self.f0 = f - self._update_diff_step() - - # Update also the preconditioner, if possible - if self.preconditioner is not None: - if hasattr(self.preconditioner, 'update'): - self.preconditioner.update(x, f) - - def setup(self, x, f, func): - Jacobian.setup(self, x, f, func) - self.x0 = x - self.f0 = f - self.op = scipy.sparse.linalg.aslinearoperator(self) - - if self.rdiff is None: - self.rdiff = np.finfo(x.dtype).eps ** (1./2) - - self._update_diff_step() - - - # Setup also the preconditioner, if possible - if self.preconditioner is not None: - if hasattr(self.preconditioner, 'setup'): - self.preconditioner.setup(x, f, func) - - -#------------------------------------------------------------------------------ -# Wrapper functions -#------------------------------------------------------------------------------ - -def _nonlin_wrapper(name, jac): - """ - Construct a solver wrapper with given name and jacobian approx. - - It inspects the keyword arguments of ``jac.__init__``, and allows to - use the same arguments in the wrapper function, in addition to the - keyword arguments of `nonlin_solve` - - """ - import inspect - args, varargs, varkw, defaults = inspect.getargspec(jac.__init__) - kwargs = zip(args[-len(defaults):], defaults) - kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs]) - if kw_str: - kw_str = ", " + kw_str - kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs]) - if kwkw_str: - kwkw_str = kwkw_str + ", " - - # Construct the wrapper function so that it's keyword arguments - # are visible in pydoc.help etc. - wrapper = """ -def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, - f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, - tol_norm=None, line_search='armijo', callback=None, **kw): - jac = %(jac)s(%(kwkw)s **kw) - return nonlin_solve(F, xin, jac, iter, verbose, maxiter, - f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, - callback) -""" - - wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, - kwkw=kwkw_str) - ns = {} - ns.update(globals()) - exec wrapper in ns - func = ns[name] - func.__doc__ = jac.__doc__ - _set_doc(func) - return func - -broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) -broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) -anderson = _nonlin_wrapper('anderson', Anderson) -linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) -diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) -excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) -newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian) - diff --git a/scipy-0.10.1/scipy/optimize/optimize.py b/scipy-0.10.1/scipy/optimize/optimize.py deleted file mode 100644 index e6ed1af4dd..0000000000 --- a/scipy-0.10.1/scipy/optimize/optimize.py +++ /dev/null @@ -1,1866 +0,0 @@ -#__docformat__ = "restructuredtext en" -# ******NOTICE*************** -# optimize.py module by Travis E. Oliphant -# -# You may copy and use this module as you see fit with no -# guarantee implied provided you keep this notice in all copies. -# *****END NOTICE************ - -# A collection of optimization algorithms. Version 0.5 -# CHANGES -# Added fminbound (July 2001) -# Added brute (Aug. 2002) -# Finished line search satisfying strong Wolfe conditions (Mar. 2004) -# Updated strong Wolfe conditions line search to use cubic-interpolation (Mar. 2004) - -# Minimization routines - -__all__ = ['fmin', 'fmin_powell','fmin_bfgs', 'fmin_ncg', 'fmin_cg', - 'fminbound','brent', 'golden','bracket','rosen','rosen_der', - 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', - 'line_search', 'check_grad'] - -__docformat__ = "restructuredtext en" - -import numpy -from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, \ - squeeze, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf -from linesearch import \ - line_search_BFGS, line_search_wolfe1, line_search_wolfe2, \ - line_search_wolfe2 as line_search - -# These have been copied from Numeric's MLab.py -# I don't think they made the transition to scipy_core -def max(m,axis=0): - """max(m,axis=0) returns the maximum of m along dimension axis. - """ - m = asarray(m) - return numpy.maximum.reduce(m,axis) - -def min(m,axis=0): - """min(m,axis=0) returns the minimum of m along dimension axis. - """ - m = asarray(m) - return numpy.minimum.reduce(m,axis) - -def is_array_scalar(x): - """Test whether `x` is either a scalar or an array scalar. - - """ - return len(atleast_1d(x) == 1) - -abs = absolute -import __builtin__ -pymin = __builtin__.min -pymax = __builtin__.max -__version__="0.7" -_epsilon = sqrt(numpy.finfo(float).eps) - -def vecnorm(x, ord=2): - if ord == Inf: - return numpy.amax(abs(x)) - elif ord == -Inf: - return numpy.amin(abs(x)) - else: - return numpy.sum(abs(x)**ord,axis=0)**(1.0/ord) - -def rosen(x): - """The Rosenbrock function. - - The function computed is - - sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0 - - Parameters - ---------- - x : array_like, 1D - The point at which the Rosenbrock function is to be computed. - - Returns - ------- - f : float - The value of the Rosenbrock function - - See Also - -------- - rosen_der, rosen_hess, rosen_hess_prod - """ - x = asarray(x) - return numpy.sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0,axis=0) - -def rosen_der(x): - """The derivative (i.e. gradient) of the Rosenbrock function. - - Parameters - ---------- - x : array_like, 1D - The point at which the derivative is to be computed. - - Returns - ------- - der : 1D numpy array - The gradient of the Rosenbrock function at `x`. - - See Also - -------- - rosen, rosen_hess, rosen_hess_prod - """ - x = asarray(x) - xm = x[1:-1] - xm_m1 = x[:-2] - xm_p1 = x[2:] - der = numpy.zeros_like(x) - der[1:-1] = 200*(xm-xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1-xm) - der[0] = -400*x[0]*(x[1]-x[0]**2) - 2*(1-x[0]) - der[-1] = 200*(x[-1]-x[-2]**2) - return der - -def rosen_hess(x): - """The Hessian matrix of the Rosenbrock function. - - Parameters - ---------- - x : array_like, 1D - The point at which the Hessian matrix is to be computed. - - Returns - ------- - hess : 2D numpy array - The Hessian matrix of the Rosenbrock function at `x`. - - See Also - -------- - rosen, rosen_der, rosen_hess_prod - """ - x = atleast_1d(x) - H = numpy.diag(-400*x[:-1],1) - numpy.diag(400*x[:-1],-1) - diagonal = numpy.zeros(len(x), dtype=x.dtype) - diagonal[0] = 1200*x[0]**2 - 400*x[1] + 2 - diagonal[-1] = 200 - diagonal[1:-1] = 202 + 1200*x[1:-1]**2 - 400*x[2:] - H = H + numpy.diag(diagonal) - return H - -def rosen_hess_prod(x,p): - """Product of the Hessian matrix of the Rosenbrock function with a vector. - - Parameters - ---------- - x : array_like, 1D - The point at which the Hessian matrix is to be computed. - p : array_like, 1D, same size as `x`. - The vector to be multiplied by the Hessian matrix. - - Returns - ------- - v : 1D numpy array - The Hessian matrix of the Rosenbrock function at `x` multiplied - by the vector `p`. - - See Also - -------- - rosen, rosen_der, rosen_hess - """ - x = atleast_1d(x) - Hp = numpy.zeros(len(x), dtype=x.dtype) - Hp[0] = (1200*x[0]**2 - 400*x[1] + 2)*p[0] - 400*x[0]*p[1] - Hp[1:-1] = -400*x[:-2]*p[:-2]+(202+1200*x[1:-1]**2-400*x[2:])*p[1:-1] \ - -400*x[1:-1]*p[2:] - Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1] - return Hp - -def wrap_function(function, args): - ncalls = [0] - def function_wrapper(x): - ncalls[0] += 1 - return function(x, *args) - return ncalls, function_wrapper - -def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, - full_output=0, disp=1, retall=0, callback=None): - """ - Minimize a function using the downhill simplex algorithm. - - This algorithm only uses function values, not derivatives or second - derivatives. - - Parameters - ---------- - func : callable func(x,*args) - The objective function to be minimized. - x0 : ndarray - Initial guess. - args : tuple - Extra arguments passed to func, i.e. ``f(x,*args)``. - callback : callable - Called after each iteration, as callback(xk), where xk is the - current parameter vector. - - Returns - ------- - xopt : ndarray - Parameter that minimizes function. - fopt : float - Value of function at minimum: ``fopt = func(xopt)``. - iter : int - Number of iterations performed. - funcalls : int - Number of function calls made. - warnflag : int - 1 : Maximum number of function evaluations made. - 2 : Maximum number of iterations reached. - allvecs : list - Solution at each iteration. - - Other parameters - ---------------- - xtol : float - Relative error in xopt acceptable for convergence. - ftol : number - Relative error in func(xopt) acceptable for convergence. - maxiter : int - Maximum number of iterations to perform. - maxfun : number - Maximum number of function evaluations to make. - full_output : bool - Set to True if fopt and warnflag outputs are desired. - disp : bool - Set to True to print convergence messages. - retall : bool - Set to True to return list of solutions at each iteration. - - Notes - ----- - Uses a Nelder-Mead simplex algorithm to find the minimum of function of - one or more variables. - - This algorithm has a long history of successful use in applications. - But it will usually be slower than an algorithm that uses first or - second derivative information. In practice it can have poor - performance in high-dimensional problems and is not robust to - minimizing complicated functions. Additionally, there currently is no - complete theory describing when the algorithm will successfully - converge to the minimum, or how fast it will if it does. - - References - ---------- - Nelder, J.A. and Mead, R. (1965), "A simplex method for function - minimization", The Computer Journal, 7, pp. 308-313 - Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now - Respectable", in Numerical Analysis 1995, Proceedings of the - 1995 Dundee Biennial Conference in Numerical Analysis, D.F. - Griffiths and G.A. Watson (Eds.), Addison Wesley Longman, - Harlow, UK, pp. 191-208. - - """ - fcalls, func = wrap_function(func, args) - x0 = asfarray(x0).flatten() - N = len(x0) - rank = len(x0.shape) - if not -1 < rank < 2: - raise ValueError("Initial guess must be a scalar or rank-1 sequence.") - if maxiter is None: - maxiter = N * 200 - if maxfun is None: - maxfun = N * 200 - - rho = 1; chi = 2; psi = 0.5; sigma = 0.5; - one2np1 = range(1,N+1) - - if rank == 0: - sim = numpy.zeros((N+1,), dtype=x0.dtype) - else: - sim = numpy.zeros((N+1,N), dtype=x0.dtype) - fsim = numpy.zeros((N+1,), float) - sim[0] = x0 - if retall: - allvecs = [sim[0]] - fsim[0] = func(x0) - nonzdelt = 0.05 - zdelt = 0.00025 - for k in range(0,N): - y = numpy.array(x0,copy=True) - if y[k] != 0: - y[k] = (1+nonzdelt)*y[k] - else: - y[k] = zdelt - - sim[k+1] = y - f = func(y) - fsim[k+1] = f - - ind = numpy.argsort(fsim) - fsim = numpy.take(fsim,ind,0) - # sort so sim[0,:] has the lowest function value - sim = numpy.take(sim,ind,0) - - iterations = 1 - - while (fcalls[0] < maxfun and iterations < maxiter): - if (max(numpy.ravel(abs(sim[1:]-sim[0]))) <= xtol \ - and max(abs(fsim[0]-fsim[1:])) <= ftol): - break - - xbar = numpy.add.reduce(sim[:-1],0) / N - xr = (1+rho)*xbar - rho*sim[-1] - fxr = func(xr) - doshrink = 0 - - if fxr < fsim[0]: - xe = (1+rho*chi)*xbar - rho*chi*sim[-1] - fxe = func(xe) - - if fxe < fxr: - sim[-1] = xe - fsim[-1] = fxe - else: - sim[-1] = xr - fsim[-1] = fxr - else: # fsim[0] <= fxr - if fxr < fsim[-2]: - sim[-1] = xr - fsim[-1] = fxr - else: # fxr >= fsim[-2] - # Perform contraction - if fxr < fsim[-1]: - xc = (1+psi*rho)*xbar - psi*rho*sim[-1] - fxc = func(xc) - - if fxc <= fxr: - sim[-1] = xc - fsim[-1] = fxc - else: - doshrink=1 - else: - # Perform an inside contraction - xcc = (1-psi)*xbar + psi*sim[-1] - fxcc = func(xcc) - - if fxcc < fsim[-1]: - sim[-1] = xcc - fsim[-1] = fxcc - else: - doshrink = 1 - - if doshrink: - for j in one2np1: - sim[j] = sim[0] + sigma*(sim[j] - sim[0]) - fsim[j] = func(sim[j]) - - ind = numpy.argsort(fsim) - sim = numpy.take(sim,ind,0) - fsim = numpy.take(fsim,ind,0) - if callback is not None: - callback(sim[0]) - iterations += 1 - if retall: - allvecs.append(sim[0]) - - x = sim[0] - fval = min(fsim) - warnflag = 0 - - if fcalls[0] >= maxfun: - warnflag = 1 - if disp: - print "Warning: Maximum number of function evaluations has "\ - "been exceeded." - elif iterations >= maxiter: - warnflag = 2 - if disp: - print "Warning: Maximum number of iterations has been exceeded" - else: - if disp: - print "Optimization terminated successfully." - print " Current function value: %f" % fval - print " Iterations: %d" % iterations - print " Function evaluations: %d" % fcalls[0] - - - if full_output: - retlist = x, fval, iterations, fcalls[0], warnflag - if retall: - retlist += (allvecs,) - else: - retlist = x - if retall: - retlist = (x, allvecs) - - return retlist - - -def approx_fprime(xk,f,epsilon,*args): - f0 = f(*((xk,)+args)) - grad = numpy.zeros((len(xk),), float) - ei = numpy.zeros((len(xk),), float) - for k in range(len(xk)): - ei[k] = epsilon - grad[k] = (f(*((xk+ei,)+args)) - f0)/epsilon - ei[k] = 0.0 - return grad - -def check_grad(func, grad, x0, *args): - """Check the correctness of a gradient function - by comparing it against a finite-difference approximation - of the gradient. - - Parameters - ---------- - func: callable func(x0,*args) - Function whose derivative is to be checked - grad: callable grad(x0, *args) - Gradient of func - x0: ndarray - Points to check grad against finite difference - approximation of grad using func. - args: optional - Extra arguments passed to func and grad - - Returns - ------- - err: float - The square root of the sum of squares (i.e. the 2-norm) - of the difference between grad(x0, *args) and the - finite difference approximation of grad using func at the - points x0. - - """ - return sqrt(sum((grad(x0,*args)-approx_fprime(x0,func,_epsilon,*args))**2)) - -def approx_fhess_p(x0,p,fprime,epsilon,*args): - f2 = fprime(*((x0+epsilon*p,)+args)) - f1 = fprime(*((x0,)+args)) - return (f2 - f1)/epsilon - - -def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, - epsilon=_epsilon, maxiter=None, full_output=0, disp=1, - retall=0, callback=None): - """Minimize a function using the BFGS algorithm. - - Parameters - ---------- - f : callable f(x,*args) - Objective function to be minimized. - x0 : ndarray - Initial guess. - fprime : callable f'(x,*args) - Gradient of f. - args : tuple - Extra arguments passed to f and fprime. - gtol : float - Gradient norm must be less than gtol before succesful termination. - norm : float - Order of norm (Inf is max, -Inf is min) - epsilon : int or ndarray - If fprime is approximated, use this value for the step size. - callback : callable - An optional user-supplied function to call after each - iteration. Called as callback(xk), where xk is the - current parameter vector. - - Returns - ------- - xopt : ndarray - Parameters which minimize f, i.e. f(xopt) == fopt. - fopt : float - Minimum value. - gopt : ndarray - Value of gradient at minimum, f'(xopt), which should be near 0. - Bopt : ndarray - Value of 1/f''(xopt), i.e. the inverse hessian matrix. - func_calls : int - Number of function_calls made. - grad_calls : int - Number of gradient calls made. - warnflag : integer - 1 : Maximum number of iterations exceeded. - 2 : Gradient and/or function calls not changing. - allvecs : list - Results at each iteration. Only returned if retall is True. - - Other Parameters - ---------------- - maxiter : int - Maximum number of iterations to perform. - full_output : bool - If True,return fopt, func_calls, grad_calls, and warnflag - in addition to xopt. - disp : bool - Print convergence message if True. - retall : bool - Return a list of results at each iteration if True. - - Notes - ----- - Optimize the function, f, whose gradient is given by fprime - using the quasi-Newton method of Broyden, Fletcher, Goldfarb, - and Shanno (BFGS) - - References - ---------- - Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. - - """ - x0 = asarray(x0).flatten() - if x0.ndim == 0: - x0.shape = (1,) - if maxiter is None: - maxiter = len(x0)*200 - func_calls, f = wrap_function(f, args) - if fprime is None: - grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) - else: - grad_calls, myfprime = wrap_function(fprime, args) - gfk = myfprime(x0) - k = 0 - N = len(x0) - I = numpy.eye(N,dtype=int) - Hk = I - old_fval = f(x0) - old_old_fval = old_fval + 5000 - xk = x0 - if retall: - allvecs = [x0] - sk = [2*gtol] - warnflag = 0 - gnorm = vecnorm(gfk,ord=norm) - while (gnorm > gtol) and (k < maxiter): - pk = -numpy.dot(Hk,gfk) - alpha_k, fc, gc, old_fval2, old_old_fval2, gfkp1 = \ - line_search_wolfe1(f,myfprime,xk,pk,gfk, - old_fval,old_old_fval) - if alpha_k is not None: - old_fval = old_fval2 - old_old_fval = old_old_fval2 - else: - # line search failed: try different one. - alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ - line_search_wolfe2(f,myfprime,xk,pk,gfk, - old_fval,old_old_fval) - if alpha_k is None: - # This line search also failed to find a better solution. - warnflag = 2 - break - xkp1 = xk + alpha_k * pk - if retall: - allvecs.append(xkp1) - sk = xkp1 - xk - xk = xkp1 - if gfkp1 is None: - gfkp1 = myfprime(xkp1) - - yk = gfkp1 - gfk - gfk = gfkp1 - if callback is not None: - callback(xk) - k += 1 - gnorm = vecnorm(gfk,ord=norm) - if (gnorm <= gtol): - break - - if not numpy.isfinite(old_fval): - # We correctly found +-Inf as optimal value, or something went - # wrong. - warnflag = 2 - break - - try: # this was handled in numeric, let it remaines for more safety - rhok = 1.0 / (numpy.dot(yk,sk)) - except ZeroDivisionError: - rhok = 1000.0 - print "Divide-by-zero encountered: rhok assumed large" - if isinf(rhok): # this is patch for numpy - rhok = 1000.0 - print "Divide-by-zero encountered: rhok assumed large" - A1 = I - sk[:,numpy.newaxis] * yk[numpy.newaxis,:] * rhok - A2 = I - yk[:,numpy.newaxis] * sk[numpy.newaxis,:] * rhok - Hk = numpy.dot(A1,numpy.dot(Hk,A2)) + rhok * sk[:,numpy.newaxis] \ - * sk[numpy.newaxis,:] - - if disp or full_output: - fval = old_fval - if warnflag == 2: - if disp: - print "Warning: Desired error not necessarily achieved" \ - "due to precision loss" - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] - - elif k >= maxiter: - warnflag = 1 - if disp: - print "Warning: Maximum number of iterations has been exceeded" - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] - else: - if disp: - print "Optimization terminated successfully." - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] - - if full_output: - retlist = xk, fval, gfk, Hk, func_calls[0], grad_calls[0], warnflag - if retall: - retlist += (allvecs,) - else: - retlist = xk - if retall: - retlist = (xk, allvecs) - - return retlist - - -def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, - maxiter=None, full_output=0, disp=1, retall=0, callback=None): - """Minimize a function using a nonlinear conjugate gradient algorithm. - - Parameters - ---------- - f : callable f(x,*args) - Objective function to be minimized. - x0 : ndarray - Initial guess. - fprime : callable f'(x,*args) - Function which computes the gradient of f. - args : tuple - Extra arguments passed to f and fprime. - gtol : float - Stop when norm of gradient is less than gtol. - norm : float - Order of vector norm to use. -Inf is min, Inf is max. - epsilon : float or ndarray - If fprime is approximated, use this value for the step - size (can be scalar or vector). - callback : callable - An optional user-supplied function, called after each - iteration. Called as callback(xk), where xk is the - current parameter vector. - - Returns - ------- - xopt : ndarray - Parameters which minimize f, i.e. f(xopt) == fopt. - fopt : float - Minimum value found, f(xopt). - func_calls : int - The number of function_calls made. - grad_calls : int - The number of gradient calls made. - warnflag : int - 1 : Maximum number of iterations exceeded. - 2 : Gradient and/or function calls not changing. - allvecs : ndarray - If retall is True (see other parameters below), then this - vector containing the result at each iteration is returned. - - Other Parameters - ---------------- - maxiter : int - Maximum number of iterations to perform. - full_output : bool - If True then return fopt, func_calls, grad_calls, and - warnflag in addition to xopt. - disp : bool - Print convergence message if True. - retall : bool - Return a list of results at each iteration if True. - - Notes - ----- - Optimize the function, f, whose gradient is given by fprime - using the nonlinear conjugate gradient algorithm of Polak and - Ribiere. See Wright & Nocedal, 'Numerical Optimization', - 1999, pg. 120-122. - - """ - x0 = asarray(x0).flatten() - if maxiter is None: - maxiter = len(x0)*200 - func_calls, f = wrap_function(f, args) - if fprime is None: - grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) - else: - grad_calls, myfprime = wrap_function(fprime, args) - gfk = myfprime(x0) - k = 0 - N = len(x0) - xk = x0 - old_fval = f(xk) - old_old_fval = old_fval + 5000 - - if retall: - allvecs = [xk] - sk = [2*gtol] - warnflag = 0 - pk = -gfk - gnorm = vecnorm(gfk,ord=norm) - while (gnorm > gtol) and (k < maxiter): - deltak = numpy.dot(gfk,gfk) - - # These values are modified by the line search, even if it fails - old_fval_backup = old_fval - old_old_fval_backup = old_old_fval - - alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ - line_search_wolfe1(f,myfprime,xk,pk,gfk,old_fval, - old_old_fval,c2=0.4) - if alpha_k is None: # line search failed -- use different one. - alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ - line_search_wolfe2(f,myfprime,xk,pk,gfk, - old_fval_backup,old_old_fval_backup) - if alpha_k is None or alpha_k == 0: - # This line search also failed to find a better solution. - warnflag = 2 - break - xk = xk + alpha_k*pk - if retall: - allvecs.append(xk) - if gfkp1 is None: - gfkp1 = myfprime(xk) - yk = gfkp1 - gfk - beta_k = pymax(0,numpy.dot(yk,gfkp1)/deltak) - pk = -gfkp1 + beta_k * pk - gfk = gfkp1 - gnorm = vecnorm(gfk,ord=norm) - if callback is not None: - callback(xk) - k += 1 - - - if disp or full_output: - fval = old_fval - if warnflag == 2: - if disp: - print "Warning: Desired error not necessarily achieved due to precision loss" - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] - - elif k >= maxiter: - warnflag = 1 - if disp: - print "Warning: Maximum number of iterations has been exceeded" - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] - else: - if disp: - print "Optimization terminated successfully." - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] - - - if full_output: - retlist = xk, fval, func_calls[0], grad_calls[0], warnflag - if retall: - retlist += (allvecs,) - else: - retlist = xk - if retall: - retlist = (xk, allvecs) - - return retlist - -def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, - epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, - callback=None): - """Unconstrained minimization of a function using the Newton-CG method. - - - Parameters - ---------- - f : callable f(x,*args) - Objective function to be minimized. - x0 : ndarray - Initial guess. - fprime : callable f'(x,*args) - Gradient of f. - fhess_p : callable fhess_p(x,p,*args) - Function which computes the Hessian of f times an - arbitrary vector, p. - fhess : callable fhess(x,*args) - Function to compute the Hessian matrix of f. - args : tuple - Extra arguments passed to f, fprime, fhess_p, and fhess - (the same set of extra arguments is supplied to all of - these functions). - epsilon : float or ndarray - If fhess is approximated, use this value for the step size. - callback : callable - An optional user-supplied function which is called after - each iteration. Called as callback(xk), where xk is the - current parameter vector. - - Returns - ------- - xopt : ndarray - Parameters which minimizer f, i.e. ``f(xopt) == fopt``. - fopt : float - Value of the function at xopt, i.e. ``fopt = f(xopt)``. - fcalls : int - Number of function calls made. - gcalls : int - Number of gradient calls made. - hcalls : int - Number of hessian calls made. - warnflag : int - Warnings generated by the algorithm. - 1 : Maximum number of iterations exceeded. - allvecs : list - The result at each iteration, if retall is True (see below). - - Other Parameters - ---------------- - avextol : float - Convergence is assumed when the average relative error in - the minimizer falls below this amount. - maxiter : int - Maximum number of iterations to perform. - full_output : bool - If True, return the optional outputs. - disp : bool - If True, print convergence message. - retall : bool - If True, return a list of results at each iteration. - - Notes - ----- - Only one of `fhess_p` or `fhess` need to be given. If `fhess` - is provided, then `fhess_p` will be ignored. If neither `fhess` - nor `fhess_p` is provided, then the hessian product will be - approximated using finite differences on `fprime`. `fhess_p` - must compute the hessian times an arbitrary vector. If it is not - given, finite-differences on `fprime` are used to compute - it. - - Newton-CG methods are also called truncated Newton methods. This - function differs from scipy.optimize.fmin_tnc because - - 1. scipy.optimize.fmin_ncg is written purely in python using numpy - and scipy while scipy.optimize.fmin_tnc calls a C function. - 2. scipy.optimize.fmin_ncg is only for unconstrained minimization - while scipy.optimize.fmin_tnc is for unconstrained minimization - or box constrained minimization. (Box constraints give - lower and upper bounds for each variable seperately.) - - References - ---------- - Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140. - - """ - x0 = asarray(x0).flatten() - fcalls, f = wrap_function(f, args) - gcalls, fprime = wrap_function(fprime, args) - hcalls = 0 - if maxiter is None: - maxiter = len(x0)*200 - - xtol = len(x0)*avextol - update = [2*xtol] - xk = x0 - if retall: - allvecs = [xk] - k = 0 - old_fval = f(x0) - while (numpy.add.reduce(abs(update)) > xtol) and (k < maxiter): - # Compute a search direction pk by applying the CG method to - # del2 f(xk) p = - grad f(xk) starting from 0. - b = -fprime(xk) - maggrad = numpy.add.reduce(abs(b)) - eta = min([0.5,numpy.sqrt(maggrad)]) - termcond = eta * maggrad - xsupi = zeros(len(x0), dtype=x0.dtype) - ri = -b - psupi = -ri - i = 0 - dri0 = numpy.dot(ri,ri) - - if fhess is not None: # you want to compute hessian once. - A = fhess(*(xk,)+args) - hcalls = hcalls + 1 - - while numpy.add.reduce(abs(ri)) > termcond: - if fhess is None: - if fhess_p is None: - Ap = approx_fhess_p(xk,psupi,fprime,epsilon) - else: - Ap = fhess_p(xk,psupi, *args) - hcalls = hcalls + 1 - else: - Ap = numpy.dot(A,psupi) - # check curvature - Ap = asarray(Ap).squeeze() # get rid of matrices... - curv = numpy.dot(psupi,Ap) - if 0 <= curv <= 3*numpy.finfo(numpy.float64).eps: - break - elif curv < 0: - if (i > 0): - break - else: - xsupi = xsupi + dri0/curv * psupi - break - alphai = dri0 / curv - xsupi = xsupi + alphai * psupi - ri = ri + alphai * Ap - dri1 = numpy.dot(ri,ri) - betai = dri1 / dri0 - psupi = -ri + betai * psupi - i = i + 1 - dri0 = dri1 # update numpy.dot(ri,ri) for next time. - - pk = xsupi # search direction is solution to system. - gfk = -b # gradient at xk - alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval) - - update = alphak * pk - xk = xk + update # upcast if necessary - if callback is not None: - callback(xk) - if retall: - allvecs.append(xk) - k += 1 - - if disp or full_output: - fval = old_fval - if k >= maxiter: - warnflag = 1 - if disp: - print "Warning: Maximum number of iterations has been exceeded" - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % fcalls[0] - print " Gradient evaluations: %d" % gcalls[0] - print " Hessian evaluations: %d" % hcalls - else: - warnflag = 0 - if disp: - print "Optimization terminated successfully." - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % fcalls[0] - print " Gradient evaluations: %d" % gcalls[0] - print " Hessian evaluations: %d" % hcalls - - if full_output: - retlist = xk, fval, fcalls[0], gcalls[0], hcalls, warnflag - if retall: - retlist += (allvecs,) - else: - retlist = xk - if retall: - retlist = (xk, allvecs) - - return retlist - - -def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, - full_output=0, disp=1): - """Bounded minimization for scalar functions. - - Parameters - ---------- - func : callable f(x,*args) - Objective function to be minimized (must accept and return scalars). - x1, x2 : float or array scalar - The optimization bounds. - args : tuple - Extra arguments passed to function. - xtol : float - The convergence tolerance. - maxfun : int - Maximum number of function evaluations allowed. - full_output : bool - If True, return optional outputs. - disp : int - If non-zero, print messages. - 0 : no message printing. - 1 : non-convergence notification messages only. - 2 : print a message on convergence too. - 3 : print iteration results. - - - Returns - ------- - xopt : ndarray - Parameters (over given interval) which minimize the - objective function. - fval : number - The function value at the minimum point. - ierr : int - An error flag (0 if converged, 1 if maximum number of - function calls reached). - numfunc : int - The number of function calls made. - - Notes - ----- - Finds a local minimizer of the scalar function `func` in the - interval x1 < xopt < x2 using Brent's method. (See `brent` - for auto-bracketing). - - """ - # Test bounds are of correct form - - if not (is_array_scalar(x1) and is_array_scalar(x2)): - raise ValueError("Optimisation bounds must be scalars" - " or array scalars.") - if x1 > x2: - raise ValueError("The lower bound exceeds the upper bound.") - - flag = 0 - header = ' Func-count x f(x) Procedure' - step=' initial' - - sqrt_eps = sqrt(2.2e-16) - golden_mean = 0.5*(3.0-sqrt(5.0)) - a, b = x1, x2 - fulc = a + golden_mean*(b-a) - nfc, xf = fulc, fulc - rat = e = 0.0 - x = xf - fx = func(x,*args) - num = 1 - fmin_data = (1, xf, fx) - - ffulc = fnfc = fx - xm = 0.5*(a+b) - tol1 = sqrt_eps*abs(xf) + xtol / 3.0 - tol2 = 2.0*tol1 - - if disp > 2: - print (" ") - print (header) - print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) - - - while ( abs(xf-xm) > (tol2 - 0.5*(b-a)) ): - golden = 1 - # Check for parabolic fit - if abs(e) > tol1: - golden = 0 - r = (xf-nfc)*(fx-ffulc) - q = (xf-fulc)*(fx-fnfc) - p = (xf-fulc)*q - (xf-nfc)*r - q = 2.0*(q-r) - if q > 0.0: p = -p - q = abs(q) - r = e - e = rat - - # Check for acceptability of parabola - if ( (abs(p) < abs(0.5*q*r)) and (p > q*(a-xf)) and \ - (p < q*(b-xf))): - rat = (p+0.0) / q; - x = xf + rat - step = ' parabolic' - - if ((x-a) < tol2) or ((b-x) < tol2): - si = numpy.sign(xm-xf) + ((xm-xf)==0) - rat = tol1*si - else: # do a golden section step - golden = 1 - - if golden: # Do a golden-section step - if xf >= xm: - e=a-xf - else: - e=b-xf - rat = golden_mean*e - step = ' golden' - - si = numpy.sign(rat) + (rat == 0) - x = xf + si*max([abs(rat), tol1]) - fu = func(x,*args) - num += 1 - fmin_data = (num, x, fu) - if disp > 2: - print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) - - if fu <= fx: - if x >= xf: - a = xf - else: - b = xf - fulc, ffulc = nfc, fnfc - nfc, fnfc = xf, fx - xf, fx = x, fu - else: - if x < xf: - a = x - else: - b = x - if (fu <= fnfc) or (nfc == xf): - fulc, ffulc = nfc, fnfc - nfc, fnfc = x, fu - elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): - fulc, ffulc = x, fu - - xm = 0.5*(a+b) - tol1 = sqrt_eps*abs(xf) + xtol/3.0 - tol2 = 2.0*tol1 - - if num >= maxfun: - flag = 1 - fval = fx - if disp > 0: - _endprint(x, flag, fval, maxfun, xtol, disp) - if full_output: - return xf, fval, flag, num - else: - return xf - - fval = fx - if disp > 0: - _endprint(x, flag, fval, maxfun, xtol, disp) - - if full_output: - return xf, fval, flag, num - else: - return xf - -class Brent: - #need to rethink design of __init__ - def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, - full_output=0): - self.func = func - self.args = args - self.tol = tol - self.maxiter = maxiter - self._mintol = 1.0e-11 - self._cg = 0.3819660 - self.xmin = None - self.fval = None - self.iter = 0 - self.funcalls = 0 - - #need to rethink design of set_bracket (new options, etc) - def set_bracket(self, brack = None): - self.brack = brack - def get_bracket_info(self): - #set up - func = self.func - args = self.args - brack = self.brack - ### BEGIN core bracket_info code ### - ### carefully DOCUMENT any CHANGES in core ## - if brack is None: - xa,xb,xc,fa,fb,fc,funcalls = bracket(func, args=args) - elif len(brack) == 2: - xa,xb,xc,fa,fb,fc,funcalls = bracket(func, xa=brack[0], - xb=brack[1], args=args) - elif len(brack) == 3: - xa,xb,xc = brack - if (xa > xc): # swap so xa < xc can be assumed - dum = xa; xa=xc; xc=dum - if not ((xa < xb) and (xb < xc)): - raise ValueError("Not a bracketing interval.") - fa = func(*((xa,)+args)) - fb = func(*((xb,)+args)) - fc = func(*((xc,)+args)) - if not ((fb=xmid): deltax=a-x # do a golden section step - else: deltax=b-x - rat = _cg*deltax - else: # do a parabolic step - tmp1 = (x-w)*(fx-fv) - tmp2 = (x-v)*(fx-fw) - p = (x-v)*tmp2 - (x-w)*tmp1; - tmp2 = 2.0*(tmp2-tmp1) - if (tmp2 > 0.0): p = -p - tmp2 = abs(tmp2) - dx_temp = deltax - deltax= rat - # check parabolic fit - if ((p > tmp2*(a-x)) and (p < tmp2*(b-x)) and (abs(p) < abs(0.5*tmp2*dx_temp))): - rat = p*1.0/tmp2 # if parabolic step is useful. - u = x + rat - if ((u-a) < tol2 or (b-u) < tol2): - if xmid-x >= 0: rat = tol1 - else: rat = -tol1 - else: - if (x>=xmid): deltax=a-x # if it's not do a golden section step - else: deltax=b-x - rat = _cg*deltax - - if (abs(rat) < tol1): # update by at least tol1 - if rat >= 0: u = x + tol1 - else: u = x - tol1 - else: - u = x + rat - fu = func(*((u,)+self.args)) # calculate new output value - funcalls += 1 - - if (fu > fx): # if it's bigger than current - if (u= x): a = x - else: b = x - v=w; w=x; x=u - fv=fw; fw=fx; fx=fu - - iter += 1 - ################################# - #END CORE ALGORITHM - ################################# - - self.xmin = x - self.fval = fx - self.iter = iter - self.funcalls = funcalls - - def get_result(self, full_output=False): - if full_output: - return self.xmin, self.fval, self.iter, self.funcalls - else: - return self.xmin - - -def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): - """Given a function of one-variable and a possible bracketing interval, - return the minimum of the function isolated to a fractional precision of - tol. - - Parameters - ---------- - func : callable f(x,*args) - Objective function. - args - Additional arguments (if present). - brack : tuple - Triple (a,b,c) where (a xc): # swap so xa < xc can be assumed - dum = xa; xa=xc; xc=dum - if not ((xa < xb) and (xb < xc)): - raise ValueError("Not a bracketing interval.") - fa = func(*((xa,)+args)) - fb = func(*((xb,)+args)) - fc = func(*((xc,)+args)) - if not ((fb abs(xb-xa)): - x1 = xb - x2 = xb + _gC*(xc-xb) - else: - x2 = xb - x1 = xb - _gC*(xb-xa) - f1 = func(*((x1,)+args)) - f2 = func(*((x2,)+args)) - funcalls += 2 - while (abs(x3-x0) > tol*(abs(x1)+abs(x2))): - if (f2 < f1): - x0 = x1; x1 = x2; x2 = _gR*x1 + _gC*x3 - f1 = f2; f2 = func(*((x2,)+args)) - else: - x3 = x2; x2 = x1; x1 = _gR*x2 + _gC*x0 - f2 = f1; f1 = func(*((x1,)+args)) - funcalls += 1 - if (f1 < f2): - xmin = x1 - fval = f1 - else: - xmin = x2 - fval = f2 - if full_output: - return xmin, fval, funcalls - else: - return xmin - - -def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): - """Given a function and distinct initial points, search in the - downhill direction (as defined by the initital points) and return - new points xa, xb, xc that bracket the minimum of the function - f(xa) > f(xb) < f(xc). It doesn't always mean that obtained - solution will satisfy xa<=x<=xb - - Parameters - ---------- - func : callable f(x,*args) - Objective function to minimize. - xa, xb : float - Bracketing interval. - args : tuple - Additional arguments (if present), passed to `func`. - grow_limit : float - Maximum grow limit. - maxiter : int - Maximum number of iterations to perform. - - Returns - ------- - xa, xb, xc : float - Bracket. - fa, fb, fc : float - Objective function values in bracket. - funcalls : int - Number of function evaluations made. - - """ - _gold = 1.618034 - _verysmall_num = 1e-21 - fa = func(*(xa,)+args) - fb = func(*(xb,)+args) - if (fa < fb): # Switch so fa > fb - dum = xa; xa = xb; xb = dum - dum = fa; fa = fb; fb = dum - xc = xb + _gold*(xb-xa) - fc = func(*((xc,)+args)) - funcalls = 3 - iter = 0 - while (fc < fb): - tmp1 = (xb - xa)*(fb-fc) - tmp2 = (xb - xc)*(fb-fa) - val = tmp2-tmp1 - if abs(val) < _verysmall_num: - denom = 2.0*_verysmall_num - else: - denom = 2.0*val - w = xb - ((xb-xc)*tmp2-(xb-xa)*tmp1)/denom - wlim = xb + grow_limit*(xc-xb) - if iter > maxiter: - raise RuntimeError("Too many iterations.") - iter += 1 - if (w-xc)*(xb-w) > 0.0: - fw = func(*((w,)+args)) - funcalls += 1 - if (fw < fc): - xa = xb; xb=w; fa=fb; fb=fw - return xa, xb, xc, fa, fb, fc, funcalls - elif (fw > fb): - xc = w; fc=fw - return xa, xb, xc, fa, fb, fc, funcalls - w = xc + _gold*(xc-xb) - fw = func(*((w,)+args)) - funcalls += 1 - elif (w-wlim)*(wlim-xc) >= 0.0: - w = wlim - fw = func(*((w,)+args)) - funcalls += 1 - elif (w-wlim)*(xc-w) > 0.0: - fw = func(*((w,)+args)) - funcalls += 1 - if (fw < fc): - xb=xc; xc=w; w=xc+_gold*(xc-xb) - fb=fc; fc=fw; fw=func(*((w,)+args)) - funcalls += 1 - else: - w = xc + _gold*(xc-xb) - fw = func(*((w,)+args)) - funcalls += 1 - xa=xb; xb=xc; xc=w - fa=fb; fb=fc; fc=fw - return xa, xb, xc, fa, fb, fc, funcalls - - - -def _linesearch_powell(func, p, xi, tol=1e-3): - """Line-search algorithm using fminbound. - - Find the minimium of the function ``func(x0+ alpha*direc)``. - - """ - def myfunc(alpha): - return func(p + alpha * xi) - alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol) - xi = alpha_min*xi - return squeeze(fret), p+xi, xi - - -def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, - maxfun=None, full_output=0, disp=1, retall=0, callback=None, - direc=None): - """ - Minimize a function using modified Powell's method. This method - only uses function values, not derivatives. - - Parameters - ---------- - func : callable f(x,*args) - Objective function to be minimized. - x0 : ndarray - Initial guess. - args : tuple - Extra arguments passed to func. - callback : callable - An optional user-supplied function, called after each - iteration. Called as ``callback(xk)``, where ``xk`` is the - current parameter vector. - direc : ndarray - Initial direction set. - - Returns - ------- - xopt : ndarray - Parameter which minimizes `func`. - fopt : number - Value of function at minimum: ``fopt = func(xopt)``. - direc : ndarray - Current direction set. - iter : int - Number of iterations. - funcalls : int - Number of function calls made. - warnflag : int - Integer warning flag: - 1 : Maximum number of function evaluations. - 2 : Maximum number of iterations. - allvecs : list - List of solutions at each iteration. - - Other Parameters - ---------------- - xtol : float - Line-search error tolerance. - ftol : float - Relative error in ``func(xopt)`` acceptable for convergence. - maxiter : int - Maximum number of iterations to perform. - maxfun : int - Maximum number of function evaluations to make. - full_output : bool - If True, fopt, xi, direc, iter, funcalls, and - warnflag are returned. - disp : bool - If True, print convergence messages. - retall : bool - If True, return a list of the solution at each iteration. - - Notes - ----- - Uses a modification of Powell's method to find the minimum of - a function of N variables. Powell's method is a conjugate - direction method. - - The algorithm has two loops. The outer loop - merely iterates over the inner loop. The inner loop minimizes - over each current direction in the direction set. At the end - of the inner loop, if certain conditions are met, the direction - that gave the largest decrease is dropped and replaced with - the difference between the current estiamted x and the estimated - x from the beginning of the inner-loop. - - The technical conditions for replacing the direction of greatest - increase amount to checking that - - 1. No further gain can be made along the direction of greatest increase - from that iteration. - 2. The direction of greatest increase accounted for a large sufficient - fraction of the decrease in the function value from that iteration of - the inner loop. - - - References - ---------- - Powell M.J.D. (1964) An efficient method for finding the minimum of a - function of several variables without calculating derivatives, - Computer Journal, 7 (2):155-162. - - Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.: - Numerical Recipes (any edition), Cambridge University Press - - """ - # we need to use a mutable object here that we can update in the - # wrapper function - fcalls, func = wrap_function(func, args) - x = asarray(x0).flatten() - if retall: - allvecs = [x] - N = len(x) - rank = len(x.shape) - if not -1 < rank < 2: - raise ValueError("Initial guess must be a scalar or rank-1 sequence.") - if maxiter is None: - maxiter = N * 1000 - if maxfun is None: - maxfun = N * 1000 - - - if direc is None: - direc = eye(N, dtype=float) - else: - direc = asarray(direc, dtype=float) - - fval = squeeze(func(x)) - x1 = x.copy() - iter = 0; - ilist = range(N) - while True: - fx = fval - bigind = 0 - delta = 0.0 - for i in ilist: - direc1 = direc[i] - fx2 = fval - fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) - if (fx2 - fval) > delta: - delta = fx2 - fval - bigind = i - iter += 1 - if callback is not None: - callback(x) - if retall: - allvecs.append(x) - if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break - if fcalls[0] >= maxfun: break - if iter >= maxiter: break - - # Construct the extrapolated point - direc1 = x - x1 - x2 = 2*x - x1 - x1 = x.copy() - fx2 = squeeze(func(x2)) - - if (fx > fx2): - t = 2.0*(fx+fx2-2.0*fval) - temp = (fx-fval-delta) - t *= temp*temp - temp = fx-fx2 - t -= delta*temp*temp - if t < 0.0: - fval, x, direc1 = _linesearch_powell(func, x, direc1, - tol=xtol*100) - direc[bigind] = direc[-1] - direc[-1] = direc1 - - warnflag = 0 - if fcalls[0] >= maxfun: - warnflag = 1 - if disp: - print "Warning: Maximum number of function evaluations has "\ - "been exceeded." - elif iter >= maxiter: - warnflag = 2 - if disp: - print "Warning: Maximum number of iterations has been exceeded" - else: - if disp: - print "Optimization terminated successfully." - print " Current function value: %f" % fval - print " Iterations: %d" % iter - print " Function evaluations: %d" % fcalls[0] - - x = squeeze(x) - - if full_output: - retlist = x, fval, direc, iter, fcalls[0], warnflag - if retall: - retlist += (allvecs,) - else: - retlist = x - if retall: - retlist = (x, allvecs) - - return retlist - - - - -def _endprint(x, flag, fval, maxfun, xtol, disp): - if flag == 0: - if disp > 1: - print "\nOptimization terminated successfully;\n" \ - "The returned value satisfies the termination criteria\n" \ - "(using xtol = ", xtol, ")" - if flag == 1: - print "\nMaximum number of function evaluations exceeded --- " \ - "increase maxfun argument.\n" - return - - -def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin): - """Minimize a function over a given range by brute force. - - Parameters - ---------- - func : callable ``f(x,*args)`` - Objective function to be minimized. - ranges : tuple - Each element is a tuple of parameters or a slice object to - be handed to ``numpy.mgrid``. - args : tuple - Extra arguments passed to function. - Ns : int - Default number of samples, if those are not provided. - full_output : bool - If True, return the evaluation grid. - finish : callable, optional - An optimization function that is called with the result of brute force - minimization as initial guess. `finish` should take the initial guess - as positional argument, and take take `args`, `full_output` and `disp` - as keyword arguments. See Notes for more details. - - Returns - ------- - x0 : ndarray - Value of arguments to `func`, giving minimum over the grid. - fval : int - Function value at minimum. - grid : tuple - Representation of the evaluation grid. It has the same - length as x0. - Jout : ndarray - Function values over grid: ``Jout = func(*grid)``. - - Notes - ----- - The range is respected by the brute force minimization, but if the `finish` - keyword specifies another optimization function (including the default - `fmin`), the returned value may still be (just) outside the range. In - order to ensure the range is specified, use ``finish=None``. - - """ - N = len(ranges) - if N > 40: - raise ValueError("Brute Force not possible with more " \ - "than 40 variables.") - lrange = list(ranges) - for k in range(N): - if type(lrange[k]) is not type(slice(None)): - if len(lrange[k]) < 3: - lrange[k] = tuple(lrange[k]) + (complex(Ns),) - lrange[k] = slice(*lrange[k]) - if (N==1): - lrange = lrange[0] - - def _scalarfunc(*params): - params = squeeze(asarray(params)) - return func(params,*args) - - vecfunc = vectorize(_scalarfunc) - grid = mgrid[lrange] - if (N==1): - grid = (grid,) - Jout = vecfunc(*grid) - Nshape = shape(Jout) - indx = argmin(Jout.ravel(),axis=-1) - Nindx = zeros(N,int) - xmin = zeros(N,float) - for k in range(N-1,-1,-1): - thisN = Nshape[k] - Nindx[k] = indx % Nshape[k] - indx = indx // thisN - for k in range(N): - xmin[k] = grid[k][tuple(Nindx)] - - Jmin = Jout[tuple(Nindx)] - if (N==1): - grid = grid[0] - xmin = xmin[0] - if callable(finish): - vals = finish(func,xmin,args=args,full_output=1, disp=0) - xmin = vals[0] - Jmin = vals[1] - if vals[-1] > 0: - print "Warning: Final optimization did not succeed" - if full_output: - return xmin, Jmin, grid, Jout - else: - return xmin - - -def main(): - import time - - times = [] - algor = [] - x0 = [0.8,1.2,0.7] - print "Nelder-Mead Simplex" - print "===================" - start = time.time() - x = fmin(rosen,x0) - print x - times.append(time.time() - start) - algor.append('Nelder-Mead Simplex\t') - - print - print "Powell Direction Set Method" - print "===========================" - start = time.time() - x = fmin_powell(rosen,x0) - print x - times.append(time.time() - start) - algor.append('Powell Direction Set Method.') - - print - print "Nonlinear CG" - print "============" - start = time.time() - x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) - print x - times.append(time.time() - start) - algor.append('Nonlinear CG \t') - - print - print "BFGS Quasi-Newton" - print "=================" - start = time.time() - x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) - print x - times.append(time.time() - start) - algor.append('BFGS Quasi-Newton\t') - - print - print "BFGS approximate gradient" - print "=========================" - start = time.time() - x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) - print x - times.append(time.time() - start) - algor.append('BFGS without gradient\t') - - - print - print "Newton-CG with Hessian product" - print "==============================" - start = time.time() - x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80) - print x - times.append(time.time() - start) - algor.append('Newton-CG with hessian product') - - - print - print "Newton-CG with full Hessian" - print "===========================" - start = time.time() - x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80) - print x - times.append(time.time() - start) - algor.append('Newton-CG with full hessian') - - print - print "\nMinimizing the Rosenbrock function of order 3\n" - print " Algorithm \t\t\t Seconds" - print "===========\t\t\t =========" - for k in range(len(algor)): - print algor[k], "\t -- ", times[k] - -if __name__ == "__main__": - main() diff --git a/scipy-0.10.1/scipy/optimize/setup.py b/scipy-0.10.1/scipy/optimize/setup.py deleted file mode 100755 index 72f75d86ea..0000000000 --- a/scipy-0.10.1/scipy/optimize/setup.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - config = Configuration('optimize',parent_package, top_path) - - config.add_library('minpack',sources=[join('minpack','*f')]) - config.add_extension('_minpack', - sources=['_minpackmodule.c'], - libraries=['minpack'], - depends=["minpack.h","__minpack.h"]) - - config.add_library('rootfind', - sources=[join('Zeros','*.c')], - headers=[join('Zeros','zeros.h')]) - - config.add_extension('_zeros', - sources=['zeros.c'], - libraries=['rootfind']) - - lapack = get_info('lapack_opt') - sources=['lbfgsb.pyf','routines.f'] - config.add_extension('_lbfgsb', - sources=[join('lbfgsb',x) for x in sources], - **lapack) - - sources=['moduleTNC.c','tnc.c'] - config.add_extension('moduleTNC', - sources=[join('tnc',x) for x in sources], - depends=[join('tnc','tnc.h')]) - - config.add_extension('_cobyla', - sources=[join('cobyla',x) for x in ['cobyla.pyf', - 'cobyla2.f', - 'trstlp.f']]) - sources = ['minpack2.pyf', 'dcsrch.f', 'dcstep.f'] - config.add_extension('minpack2', - sources=[join('minpack2',x) for x in sources]) - - sources = ['slsqp.pyf', 'slsqp_optmz.f'] - config.add_extension('_slsqp', sources=[join('slsqp', x) for x in sources]) - - config.add_extension('_nnls', sources=[join('nnls', x) \ - for x in ["nnls.f","nnls.pyf"]]) - - config.add_data_dir('tests') - config.add_data_dir('benchmarks') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/optimize/setupscons.py b/scipy-0.10.1/scipy/optimize/setupscons.py deleted file mode 100755 index 2fbc1d2f87..0000000000 --- a/scipy-0.10.1/scipy/optimize/setupscons.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - config = Configuration('optimize',parent_package, top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/optimize/slsqp.py b/scipy-0.10.1/scipy/optimize/slsqp.py deleted file mode 100644 index 134a164bb8..0000000000 --- a/scipy-0.10.1/scipy/optimize/slsqp.py +++ /dev/null @@ -1,381 +0,0 @@ -"""This module implements the Sequential Least SQuares Programming optimization -algorithm (SLSQP), orginally developed by Dieter Kraft. - -See http://www.netlib.org/toms/733 - -""" - -__all__ = ['approx_jacobian','fmin_slsqp'] - -from _slsqp import slsqp -from numpy import zeros, array, linalg, append, asfarray, concatenate, finfo, \ - sqrt, vstack -from optimize import approx_fprime, wrap_function - -__docformat__ = "restructuredtext en" - -_epsilon = sqrt(finfo(float).eps) - -def approx_jacobian(x,func,epsilon,*args): - """ - Approximate the Jacobian matrix of a callable function. - - Parameters - ---------- - x : array_like - The state vector at which to compute the Jacobian matrix. - func : callable f(x,*args) - The vector-valued function. - epsilon : float - The perturbation used to determine the partial derivatives. - args : sequence - Additional arguments passed to func. - - Returns - ------- - An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length - of the outputs of `func`, and ``lenx`` is the number of elements in - `x`. - - Notes - ----- - The approximation is done using forward differences. - - """ - x0 = asfarray(x) - f0 = func(*((x0,)+args)) - jac = zeros([len(x0),len(f0)]) - dx = zeros(len(x0)) - for i in range(len(x0)): - dx[i] = epsilon - jac[i] = (func(*((x0+dx,)+args)) - f0)/epsilon - dx[i] = 0.0 - return jac.transpose() - - -def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None, - bounds = [], fprime = None, fprime_eqcons=None, - fprime_ieqcons=None, args = (), iter = 100, acc = 1.0E-6, - iprint = 1, disp = None, full_output = 0, epsilon = _epsilon ): - """ - Minimize a function using Sequential Least SQuares Programming - - Python interface function for the SLSQP Optimization subroutine - originally implemented by Dieter Kraft. - - Parameters - ---------- - func : callable f(x,*args) - Objective function. - x0 : 1-D ndarray of float - Initial guess for the independent variable(s). - eqcons : list - A list of functions of length n such that - eqcons[j](x,*args) == 0.0 in a successfully optimized - problem. - f_eqcons : callable f(x,*args) - Returns a 1-D array in which each element must equal 0.0 in a - successfully optimized problem. If f_eqcons is specified, - eqcons is ignored. - ieqcons : list - A list of functions of length n such that - ieqcons[j](x,*args) >= 0.0 in a successfully optimized - problem. - f_ieqcons : callable f(x,*args) - Returns a 1-D ndarray in which each element must be greater or - equal to 0.0 in a successfully optimized problem. If - f_ieqcons is specified, ieqcons is ignored. - bounds : list - A list of tuples specifying the lower and upper bound - for each independent variable [(xl0, xu0),(xl1, xu1),...] - fprime : callable `f(x,*args)` - A function that evaluates the partial derivatives of func. - fprime_eqcons : callable `f(x,*args)` - A function of the form `f(x, *args)` that returns the m by n - array of equality constraint normals. If not provided, - the normals will be approximated. The array returned by - fprime_eqcons should be sized as ( len(eqcons), len(x0) ). - fprime_ieqcons : callable `f(x,*args)` - A function of the form `f(x, *args)` that returns the m by n - array of inequality constraint normals. If not provided, - the normals will be approximated. The array returned by - fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ). - args : sequence - Additional arguments passed to func and fprime. - iter : int - The maximum number of iterations. - acc : float - Requested accuracy. - iprint : int - The verbosity of fmin_slsqp : - - * iprint <= 0 : Silent operation - * iprint == 1 : Print summary upon completion (default) - * iprint >= 2 : Print status of each iterate and summary - disp : int - Over-rides the iprint interface (preferred). - full_output : bool - If False, return only the minimizer of func (default). - Otherwise, output final objective function and summary - information. - epsilon : float - The step size for finite-difference derivative estimates. - - Returns - ------- - out : ndarray of float - The final minimizer of func. - fx : ndarray of float, if full_output is true - The final value of the objective function. - its : int, if full_output is true - The number of iterations. - imode : int, if full_output is true - The exit mode from the optimizer (see below). - smode : string, if full_output is true - Message describing the exit mode from the optimizer. - - Notes - ----- - Exit modes are defined as follows :: - - -1 : Gradient evaluation required (g & a) - 0 : Optimization terminated successfully. - 1 : Function evaluation required (f & c) - 2 : More equality constraints than independent variables - 3 : More than 3*n iterations in LSQ subproblem - 4 : Inequality constraints incompatible - 5 : Singular matrix E in LSQ subproblem - 6 : Singular matrix C in LSQ subproblem - 7 : Rank-deficient equality constraint subproblem HFTI - 8 : Positive directional derivative for linesearch - 9 : Iteration limit exceeded - - Examples - -------- - Examples are given :ref:`in the tutorial `. - - """ - - exit_modes = { -1 : "Gradient evaluation required (g & a)", - 0 : "Optimization terminated successfully.", - 1 : "Function evaluation required (f & c)", - 2 : "More equality constraints than independent variables", - 3 : "More than 3*n iterations in LSQ subproblem", - 4 : "Inequality constraints incompatible", - 5 : "Singular matrix E in LSQ subproblem", - 6 : "Singular matrix C in LSQ subproblem", - 7 : "Rank-deficient equality constraint subproblem HFTI", - 8 : "Positive directional derivative for linesearch", - 9 : "Iteration limit exceeded" } - - if disp is not None: - iprint = disp - - # Now do a lot of function wrapping - - # Wrap func - feval, func = wrap_function(func, args) - # Wrap fprime, if provided, or approx_fprime if not - if fprime: - geval, fprime = wrap_function(fprime,args) - else: - geval, fprime = wrap_function(approx_fprime,(func,epsilon)) - - if f_eqcons: - # Equality constraints provided via f_eqcons - ceval, f_eqcons = wrap_function(f_eqcons,args) - if fprime_eqcons: - # Wrap fprime_eqcons - geval, fprime_eqcons = wrap_function(fprime_eqcons,args) - else: - # Wrap approx_jacobian - geval, fprime_eqcons = wrap_function(approx_jacobian, - (f_eqcons,epsilon)) - else: - # Equality constraints provided via eqcons[] - eqcons_prime = [] - for i in range(len(eqcons)): - eqcons_prime.append(None) - if eqcons[i]: - # Wrap eqcons and eqcons_prime - ceval, eqcons[i] = wrap_function(eqcons[i],args) - geval, eqcons_prime[i] = wrap_function(approx_fprime, - (eqcons[i],epsilon)) - - if f_ieqcons: - # Inequality constraints provided via f_ieqcons - ceval, f_ieqcons = wrap_function(f_ieqcons,args) - if fprime_ieqcons: - # Wrap fprime_ieqcons - geval, fprime_ieqcons = wrap_function(fprime_ieqcons,args) - else: - # Wrap approx_jacobian - geval, fprime_ieqcons = wrap_function(approx_jacobian, - (f_ieqcons,epsilon)) - else: - # Inequality constraints provided via ieqcons[] - ieqcons_prime = [] - for i in range(len(ieqcons)): - ieqcons_prime.append(None) - if ieqcons[i]: - # Wrap ieqcons and ieqcons_prime - ceval, ieqcons[i] = wrap_function(ieqcons[i],args) - geval, ieqcons_prime[i] = wrap_function(approx_fprime, - (ieqcons[i],epsilon)) - - - # Transform x0 into an array. - x = asfarray(x0).flatten() - - # Set the parameters that SLSQP will need - # meq = The number of equality constraints - if f_eqcons: - meq = len(f_eqcons(x)) - else: - meq = len(eqcons) - if f_ieqcons: - mieq = len(f_ieqcons(x)) - else: - mieq = len(ieqcons) - # m = The total number of constraints - m = meq + mieq - # la = The number of constraints, or 1 if there are no constraints - la = array([1,m]).max() - # n = The number of independent variables - n = len(x) - - # Define the workspaces for SLSQP - n1 = n+1 - mineq = m - meq + n1 + n1 - len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \ - + 2*meq + n1 +(n+1)*n/2 + 2*m + 3*n + 3*n1 + 1 - len_jw = mineq - w = zeros(len_w) - jw = zeros(len_jw) - - # Decompose bounds into xl and xu - if len(bounds) == 0: - bounds = [(-1.0E12, 1.0E12) for i in range(n)] - elif len(bounds) != n: - raise IndexError('SLSQP Error: If bounds is specified, ' - 'len(bounds) == len(x0)') - else: - for i in range(len(bounds)): - if bounds[i][0] > bounds[i][1]: - raise ValueError('SLSQP Error: lb > ub in bounds[' + str(i) - + '] ' + str(bounds[4])) - - xl = array( [ b[0] for b in bounds ] ) - xu = array( [ b[1] for b in bounds ] ) - - - - # Initialize the iteration counter and the mode value - mode = array(0,int) - acc = array(acc,float) - majiter = array(iter,int) - majiter_prev = 0 - - # Print the header if iprint >= 2 - if iprint >= 2: - print "%5s %5s %16s %16s" % ("NIT","FC","OBJFUN","GNORM") - - while 1: - - if mode == 0 or mode == 1: # objective and constraint evaluation requird - - # Compute objective function - fx = func(x) - # Compute the constraints - if f_eqcons: - c_eq = f_eqcons(x) - else: - c_eq = array([ eqcons[i](x) for i in range(meq) ]) - if f_ieqcons: - c_ieq = f_ieqcons(x) - else: - c_ieq = array([ ieqcons[i](x) for i in range(len(ieqcons)) ]) - - # Now combine c_eq and c_ieq into a single matrix - if m == 0: - # no constraints - c = zeros([la]) - else: - # constraints exist - if meq > 0 and mieq == 0: - # only equality constraints - c = c_eq - if meq == 0 and mieq > 0: - # only inequality constraints - c = c_ieq - if meq > 0 and mieq > 0: - # both equality and inequality constraints exist - c = append(c_eq, c_ieq) - - if mode == 0 or mode == -1: # gradient evaluation required - - # Compute the derivatives of the objective function - # For some reason SLSQP wants g dimensioned to n+1 - g = append(fprime(x),0.0) - - # Compute the normals of the constraints - if fprime_eqcons: - a_eq = fprime_eqcons(x) - else: - a_eq = zeros([meq,n]) - for i in range(meq): - a_eq[i] = eqcons_prime[i](x) - - if fprime_ieqcons: - a_ieq = fprime_ieqcons(x) - else: - a_ieq = zeros([mieq,n]) - for i in range(mieq): - a_ieq[i] = ieqcons_prime[i](x) - - # Now combine a_eq and a_ieq into a single a matrix - if m == 0: - # no constraints - a = zeros([la,n]) - elif meq > 0 and mieq == 0: - # only equality constraints - a = a_eq - elif meq == 0 and mieq > 0: - # only inequality constraints - a = a_ieq - elif meq > 0 and mieq > 0: - # both equality and inequality constraints exist - a = vstack((a_eq,a_ieq)) - a = concatenate((a,zeros([la,1])),1) - - # Call SLSQP - slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw) - - # Print the status of the current iterate if iprint > 2 and the - # major iteration has incremented - if iprint >= 2 and majiter > majiter_prev: - print "%5i %5i % 16.6E % 16.6E" % (majiter,feval[0], - fx,linalg.norm(g)) - - # If exit mode is not -1 or 1, slsqp has completed - if abs(mode) != 1: - break - - majiter_prev = int(majiter) - - # Optimization loop complete. Print status if requested - if iprint >= 1: - print exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')' - print " Current function value:", fx - print " Iterations:", majiter - print " Function evaluations:", feval[0] - print " Gradient evaluations:", geval[0] - - if not full_output: - return x - else: - return [list(x), - float(fx), - int(majiter), - int(mode), - exit_modes[int(mode)] ] diff --git a/scipy-0.10.1/scipy/optimize/slsqp/slsqp.pyf b/scipy-0.10.1/scipy/optimize/slsqp/slsqp.pyf deleted file mode 100644 index 52551e9454..0000000000 --- a/scipy-0.10.1/scipy/optimize/slsqp/slsqp.pyf +++ /dev/null @@ -1,30 +0,0 @@ -! -*- f90 -*- -! Note: the context of this file is case sensitive. - -python module _slsqp ! in - interface ! in :slsqp - subroutine slsqp(m,meq,la,n,x,xl,xu,f,c,g,a,acc,iter,mode,w,l_w,jw,l_jw) ! in :slsqp:slsqp_optmz.f - integer :: m - integer :: meq - integer optional,check(len(c)>=la),depend(c) :: la=len(c) - integer optional,check(len(x)>=n),depend(x) :: n=len(x) - double precision dimension(n), intent(inout) :: x - double precision dimension(n),depend(n) :: xl - double precision dimension(n),depend(n) :: xu - double precision :: f - double precision dimension(la) :: c - double precision dimension(n + 1),depend(n) :: g - double precision dimension(la,n + 1),depend(la,n) :: a - double precision, intent(inout) :: acc - integer, intent(inout) :: iter - integer, intent(inout) :: mode - double precision dimension(l_w) :: w - integer optional,check(len(w)>=l_w),depend(w) :: l_w=len(w) - integer dimension(l_jw) :: jw - integer optional,check(len(jw)>=l_jw),depend(jw) :: l_jw=len(jw) - end subroutine slsqp - end interface -end python module slsqp - -! This file was auto-generated with f2py (version:2_3844). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/optimize/slsqp/slsqp_optmz.f b/scipy-0.10.1/scipy/optimize/slsqp/slsqp_optmz.f deleted file mode 100644 index 23c2a61b29..0000000000 --- a/scipy-0.10.1/scipy/optimize/slsqp/slsqp_optmz.f +++ /dev/null @@ -1,2112 +0,0 @@ -C -C ALGORITHM 733, COLLECTED ALGORITHMS FROM ACM. -C TRANSACTIONS ON MATHEMATICAL SOFTWARE, -C VOL. 20, NO. 3, SEPTEMBER, 1994, PP. 262-281. -C http://doi.acm.org/10.1145/192115.192124 -C -C -C http://permalink.gmane.org/gmane.comp.python.scientific.devel/6725 -C ------ -C From: Deborah Cotton -C Date: Fri, 14 Sep 2007 12:35:55 -0500 -C Subject: RE: Algorithm License requested -C To: Alan Isaac -C -C Prof. Issac, -C -C In that case, then because the author consents to [the ACM] releasing -C the code currently archived at http://www.netlib.org/toms/733 under the -C BSD license, the ACM hereby releases this code under the BSD license. -C -C Regards, -C -C Deborah Cotton, Copyright & Permissions -C ACM Publications -C 2 Penn Plaza, Suite 701** -C New York, NY 10121-0701 -C permissions@acm.org -C 212.869.7440 ext. 652 -C Fax. 212.869.0481 -C ------ -C - -************************************************************************ -* optimizer * -************************************************************************ - - SUBROUTINE slsqp (m, meq, la, n, x, xl, xu, f, c, g, a, - * acc, iter, mode, w, l_w, jw, l_jw) - -C SLSQP S EQUENTIAL L EAST SQ UARES P ROGRAMMING -C TO SOLVE GENERAL NONLINEAR OPTIMIZATION PROBLEMS - -C*********************************************************************** -C* * -C* * -C* A NONLINEAR PROGRAMMING METHOD WITH * -C* QUADRATIC PROGRAMMING SUBPROBLEMS * -C* * -C* * -C* THIS SUBROUTINE SOLVES THE GENERAL NONLINEAR PROGRAMMING PROBLEM * -C* * -C* MINIMIZE F(X) * -C* * -C* SUBJECT TO C (X) .EQ. 0 , J = 1,...,MEQ * -C* J * -C* * -C* C (X) .GE. 0 , J = MEQ+1,...,M * -C* J * -C* * -C* XL .LE. X .LE. XU , I = 1,...,N. * -C* I I I * -C* * -C* THE ALGORITHM IMPLEMENTS THE METHOD OF HAN AND POWELL * -C* WITH BFGS-UPDATE OF THE B-MATRIX AND L1-TEST FUNCTION * -C* WITHIN THE STEPLENGTH ALGORITHM. * -C* * -C* PARAMETER DESCRIPTION: * -C* ( * MEANS THIS PARAMETER WILL BE CHANGED DURING CALCULATION ) * -C* * -C* M IS THE TOTAL NUMBER OF CONSTRAINTS, M .GE. 0 * -C* MEQ IS THE NUMBER OF EQUALITY CONSTRAINTS, MEQ .GE. 0 * -C* LA SEE A, LA .GE. MAX(M,1) * -C* N IS THE NUMBER OF VARIBLES, N .GE. 1 * -C* * X() X() STORES THE CURRENT ITERATE OF THE N VECTOR X * -C* ON ENTRY X() MUST BE INITIALIZED. ON EXIT X() * -C* STORES THE SOLUTION VECTOR X IF MODE = 0. * -C* XL() XL() STORES AN N VECTOR OF LOWER BOUNDS XL TO X. * -C* XU() XU() STORES AN N VECTOR OF UPPER BOUNDS XU TO X. * -C* F IS THE VALUE OF THE OBJECTIVE FUNCTION. * -C* C() C() STORES THE M VECTOR C OF CONSTRAINTS, * -C* EQUALITY CONSTRAINTS (IF ANY) FIRST. * -C* DIMENSION OF C MUST BE GREATER OR EQUAL LA, * -C* which must be GREATER OR EQUAL MAX(1,M). * -C* G() G() STORES THE N VECTOR G OF PARTIALS OF THE * -C* OBJECTIVE FUNCTION; DIMENSION OF G MUST BE * -C* GREATER OR EQUAL N+1. * -C* A(),LA,M,N THE LA BY N + 1 ARRAY A() STORES * -C* THE M BY N MATRIX A OF CONSTRAINT NORMALS. * -C* A() HAS FIRST DIMENSIONING PARAMETER LA, * -C* WHICH MUST BE GREATER OR EQUAL MAX(1,M). * -C* F,C,G,A MUST ALL BE SET BY THE USER BEFORE EACH CALL. * -C* * ACC ABS(ACC) CONTROLS THE FINAL ACCURACY. * -C* IF ACC .LT. ZERO AN EXACT LINESEARCH IS PERFORMED,* -C* OTHERWISE AN ARMIJO-TYPE LINESEARCH IS USED. * -C* * ITER PRESCRIBES THE MAXIMUM NUMBER OF ITERATIONS. * -C* ON EXIT ITER INDICATES THE NUMBER OF ITERATIONS. * -C* * MODE MODE CONTROLS CALCULATION: * -C* REVERSE COMMUNICATION IS USED IN THE SENSE THAT * -C* THE PROGRAM IS INITIALIZED BY MODE = 0; THEN IT IS* -C* TO BE CALLED REPEATEDLY BY THE USER UNTIL A RETURN* -C* WITH MODE .NE. IABS(1) TAKES PLACE. * -C* IF MODE = -1 GRADIENTS HAVE TO BE CALCULATED, * -C* WHILE WITH MODE = 1 FUNCTIONS HAVE TO BE CALCULATED -C* MODE MUST NOT BE CHANGED BETWEEN SUBSEQUENT CALLS * -C* OF SQP. * -C* EVALUATION MODES: * -C* MODE = -1: GRADIENT EVALUATION, (G&A) * -C* 0: ON ENTRY: INITIALIZATION, (F,G,C&A) * -C* ON EXIT : REQUIRED ACCURACY FOR SOLUTION OBTAINED * -C* 1: FUNCTION EVALUATION, (F&C) * -C* * -C* FAILURE MODES: * -C* 2: NUMBER OF EQUALITY CONTRAINTS LARGER THAN N * -C* 3: MORE THAN 3*N ITERATIONS IN LSQ SUBPROBLEM * -C* 4: INEQUALITY CONSTRAINTS INCOMPATIBLE * -C* 5: SINGULAR MATRIX E IN LSQ SUBPROBLEM * -C* 6: SINGULAR MATRIX C IN LSQ SUBPROBLEM * -C* 7: RANK-DEFICIENT EQUALITY CONSTRAINT SUBPROBLEM HFTI* -C* 8: POSITIVE DIRECTIONAL DERIVATIVE FOR LINESEARCH * -C* 9: MORE THAN ITER ITERATIONS IN SQP * -C* >=10: WORKING SPACE W OR JW TOO SMALL, * -C* W SHOULD BE ENLARGED TO L_W=MODE/1000 * -C* JW SHOULD BE ENLARGED TO L_JW=MODE-1000*L_W * -C* * W(), L_W W() IS A ONE DIMENSIONAL WORKING SPACE, * -C* THE LENGTH L_W OF WHICH SHOULD BE AT LEAST * -C* (3*N1+M)*(N1+1) for LSQ * -C* +(N1-MEQ+1)*(MINEQ+2) + 2*MINEQ for LSI * -C* +(N1+MINEQ)*(N1-MEQ) + 2*MEQ + N1 for LSEI * -C* + N1*N/2 + 2*M + 3*N + 3*N1 + 1 for SLSQPB * -C* with MINEQ = M - MEQ + 2*N1 & N1 = N+1 * -C* NOTICE: FOR PROPER DIMENSIONING OF W IT IS RECOMMENDED TO * -C* COPY THE FOLLOWING STATEMENTS INTO THE HEAD OF * -C* THE CALLING PROGRAM (AND REMOVE THE COMMENT C) * -c####################################################################### -C INTEGER LEN_W, LEN_JW, M, N, N1, MEQ, MINEQ -C PARAMETER (M=... , MEQ=... , N=... ) -C PARAMETER (N1= N+1, MINEQ= M-MEQ+N1+N1) -C PARAMETER (LEN_W= -c $ (3*N1+M)*(N1+1) -c $ +(N1-MEQ+1)*(MINEQ+2) + 2*MINEQ -c $ +(N1+MINEQ)*(N1-MEQ) + 2*MEQ + N1 -c $ +(N+1)*N/2 + 2*M + 3*N + 3*N1 + 1, -c $ LEN_JW=MINEQ) -C DOUBLE PRECISION W(LEN_W) -C INTEGER JW(LEN_JW) -c####################################################################### -C* THE FIRST M+N+N*N1/2 ELEMENTS OF W MUST NOT BE * -C* CHANGED BETWEEN SUBSEQUENT CALLS OF SLSQP. * -C* ON RETURN W(1) ... W(M) CONTAIN THE MULTIPLIERS * -C* ASSOCIATED WITH THE GENERAL CONSTRAINTS, WHILE * -C* W(M+1) ... W(M+N(N+1)/2) STORE THE CHOLESKY FACTOR* -C* L*D*L(T) OF THE APPROXIMATE HESSIAN OF THE * -C* LAGRANGIAN COLUMNWISE DENSE AS LOWER TRIANGULAR * -C* UNIT MATRIX L WITH D IN ITS 'DIAGONAL' and * -C* W(M+N(N+1)/2+N+2 ... W(M+N(N+1)/2+N+2+M+2N) * -C* CONTAIN THE MULTIPLIERS ASSOCIATED WITH ALL * -C* ALL CONSTRAINTS OF THE QUADRATIC PROGRAM FINDING * -C* THE SEARCH DIRECTION TO THE SOLUTION X* * -C* * JW(), L_JW JW() IS A ONE DIMENSIONAL INTEGER WORKING SPACE * -C* THE LENGTH L_JW OF WHICH SHOULD BE AT LEAST * -C* MINEQ * -C* with MINEQ = M - MEQ + 2*N1 & N1 = N+1 * -C* * -C* THE USER HAS TO PROVIDE THE FOLLOWING SUBROUTINES: * -C* LDL(N,A,Z,SIG,W) : UPDATE OF THE LDL'-FACTORIZATION. * -C* LINMIN(A,B,F,TOL) : LINESEARCH ALGORITHM IF EXACT = 1 * -C* LSQ(M,MEQ,LA,N,NC,C,D,A,B,XL,XU,X,LAMBDA,W,....) : * -C* * -C* SOLUTION OF THE QUADRATIC PROGRAM * -C* QPSOL IS RECOMMENDED: * -C* PE GILL, W MURRAY, MA SAUNDERS, MH WRIGHT: * -C* USER'S GUIDE FOR SOL/QPSOL: * -C* A FORTRAN PACKAGE FOR QUADRATIC PROGRAMMING, * -C* TECHNICAL REPORT SOL 83-7, JULY 1983 * -C* DEPARTMENT OF OPERATIONS RESEARCH, STANFORD UNIVERSITY * -C* STANFORD, CA 94305 * -C* QPSOL IS THE MOST ROBUST AND EFFICIENT QP-SOLVER * -C* AS IT ALLOWS WARM STARTS WITH PROPER WORKING SETS * -C* * -C* IF IT IS NOT AVAILABLE USE LSEI, A CONSTRAINT LINEAR LEAST * -C* SQUARES SOLVER IMPLEMENTED USING THE SOFTWARE HFTI, LDP, NNLS * -C* FROM C.L. LAWSON, R.J.HANSON: SOLVING LEAST SQUARES PROBLEMS, * -C* PRENTICE HALL, ENGLEWOOD CLIFFS, 1974. * -C* LSEI COMES WITH THIS PACKAGE, together with all necessary SR's. * -C* * -C* TOGETHER WITH A COUPLE OF SUBROUTINES FROM BLAS LEVEL 1 * -C* * -C* SQP IS HEAD SUBROUTINE FOR BODY SUBROUTINE SQPBDY * -C* IN WHICH THE ALGORITHM HAS BEEN IMPLEMENTED. * -C* * -C* IMPLEMENTED BY: DIETER KRAFT, DFVLR OBERPFAFFENHOFEN * -C* as described in Dieter Kraft: A Software Package for * -C* Sequential Quadratic Programming * -C* DFVLR-FB 88-28, 1988 * -C* which should be referenced if the user publishes results of SLSQP * -C* * -C* DATE: APRIL - OCTOBER, 1981. * -C* STATUS: DECEMBER, 31-ST, 1984. * -C* STATUS: MARCH , 21-ST, 1987, REVISED TO FORTAN 77 * -C* STATUS: MARCH , 20-th, 1989, REVISED TO MS-FORTRAN * -C* STATUS: APRIL , 14-th, 1989, HESSE in-line coded * -C* STATUS: FEBRUARY, 28-th, 1991, FORTRAN/2 Version 1.04 * -C* accepts Statement Functions * -C* STATUS: MARCH , 1-st, 1991, tested with SALFORD * -C* FTN77/386 COMPILER VERS 2.40* -C* in protected mode * -C* * -C*********************************************************************** -C* * -C* Copyright 1991: Dieter Kraft, FHM * -C* * -C*********************************************************************** - - INTEGER il, im, ir, is, iter, iu, iv, iw, ix, l_w, l_jw, - * jw(l_jw), la, m, meq, mineq, mode, n, n1 - - DOUBLE PRECISION acc, a(la,n+1), c(la), f, g(n+1), - * x(n), xl(n), xu(n), w(l_w) - -c dim(W) = N1*(N1+1) + MEQ*(N1+1) + MINEQ*(N1+1) for LSQ -c +(N1-MEQ+1)*(MINEQ+2) + 2*MINEQ for LSI -c +(N1+MINEQ)*(N1-MEQ) + 2*MEQ + N1 for LSEI -c + N1*N/2 + 2*M + 3*N +3*N1 + 1 for SLSQPB -c with MINEQ = M - MEQ + 2*N1 & N1 = N+1 - -C CHECK LENGTH OF WORKING ARRAYS - - n1 = n+1 - mineq = m-meq+n1+n1 - il = (3*n1+m)*(n1+1) + - .(n1-meq+1)*(mineq+2) + 2*mineq + - .(n1+mineq)*(n1-meq) + 2*meq + - .n1*n/2 + 2*m + 3*n + 4*n1 + 1 - im = MAX(mineq, n1-meq) - IF (l_w .LT. il .OR. l_jw .LT. im) THEN - mode = 1000*MAX(10,il) - mode = mode+MAX(10,im) - RETURN - ENDIF - -C PREPARE DATA FOR CALLING SQPBDY - INITIAL ADDRESSES IN W - - im = 1 - il = im + MAX(1,m) - il = im + la - ix = il + n1*n/2 + 1 - ir = ix + n - is = ir + n + n + MAX(1,m) - is = ir + n + n + la - iu = is + n1 - iv = iu + n1 - iw = iv + n1 - - CALL slsqpb (m, meq, la, n, x, xl, xu, f, c, g, a, acc, iter, - * mode, w(ir), w(il), w(ix), w(im), w(is), w(iu), w(iv), w(iw), jw) - - END - - SUBROUTINE slsqpb (m, meq, la, n, x, xl, xu, f, c, g, a, acc, - * iter, mode, r, l, x0, mu, s, u, v, w, iw) - -C NONLINEAR PROGRAMMING BY SOLVING SEQUENTIALLY QUADRATIC PROGRAMS - -C - L1 - LINE SEARCH, POSITIVE DEFINITE BFGS UPDATE - - -C BODY SUBROUTINE FOR SLSQP - - INTEGER iw(*), i, iexact, incons, ireset, iter, itermx, - * k, j, la, line, m, meq, mode, n, n1, n2, n3 - - DOUBLE PRECISION a(la,n+1), c(la), g(n+1), l((n+1)*(n+2)/2), - * mu(la), r(m+n+n+2), s(n+1), u(n+1), v(n+1), w(*), - * x(n), xl(n), xu(n), x0(n), - * ddot_sl, dnrm2_, linmin, - * acc, alfmin, alpha, f, f0, gs, h1, h2, h3, h4, - * hun, one, t, t0, ten, tol, two, ZERO - -c dim(W) = N1*(N1+1) + MEQ*(N1+1) + MINEQ*(N1+1) for LSQ -c +(N1-MEQ+1)*(MINEQ+2) + 2*MINEQ -c +(N1+MINEQ)*(N1-MEQ) + 2*MEQ + N1 for LSEI -c with MINEQ = M - MEQ + 2*N1 & N1 = N+1 - - SAVE alpha, f0, gs, h1, h2, h3, h4, t, t0, tol, - * iexact, incons, ireset, itermx, line, n1, n2, n3 - - DATA ZERO /0.0d0/, one /1.0d0/, alfmin /1.0d-1/, - * hun /1.0d+2/, ten /1.0d+1/, two /2.0d0/ - - IF (mode) 260, 100, 220 - - 100 itermx = iter - IF (acc.GE.ZERO) THEN - iexact = 0 - ELSE - iexact = 1 - ENDIF - acc = ABS(acc) - tol = ten*acc - iter = 0 - ireset = 0 - n1 = n + 1 - n2 = n1*n/2 - n3 = n2 + 1 - s(1) = ZERO - mu(1) = ZERO - CALL dcopy_(n, s(1), 0, s, 1) - CALL dcopy_(m, mu(1), 0, mu, 1) - -C RESET BFGS MATRIX - - 110 ireset = ireset + 1 - IF (ireset.GT.5) GO TO 255 - l(1) = ZERO - CALL dcopy_(n2, l(1), 0, l, 1) - j = 1 - DO 120 i=1,n - l(j) = one - j = j + n1 - i - 120 CONTINUE - -C MAIN ITERATION : SEARCH DIRECTION, STEPLENGTH, LDL'-UPDATE - - 130 iter = iter + 1 - mode = 9 - IF (iter.GT.itermx) GO TO 330 - -C SEARCH DIRECTION AS SOLUTION OF QP - SUBPROBLEM - - CALL dcopy_(n, xl, 1, u, 1) - CALL dcopy_(n, xu, 1, v, 1) - CALL daxpy_sl(n, -one, x, 1, u, 1) - CALL daxpy_sl(n, -one, x, 1, v, 1) - h4 = one - CALL lsq (m, meq, n , n3, la, l, g, a, c, u, v, s, r, w, iw, mode) - -C AUGMENTED PROBLEM FOR INCONSISTENT LINEARIZATION - - IF (mode.EQ.6) THEN - IF (n.EQ.meq) THEN - mode = 4 - ENDIF - ENDIF - IF (mode.EQ.4) THEN - DO 140 j=1,m - IF (j.LE.meq) THEN - a(j,n1) = -c(j) - ELSE - a(j,n1) = MAX(-c(j),ZERO) - ENDIF - 140 CONTINUE - s(1) = ZERO - CALL dcopy_(n, s(1), 0, s, 1) - h3 = ZERO - g(n1) = ZERO - l(n3) = hun - s(n1) = one - u(n1) = ZERO - v(n1) = one - incons = 0 - 150 CALL lsq (m, meq, n1, n3, la, l, g, a, c, u, v, s, r, - * w, iw, mode) - h4 = one - s(n1) - IF (mode.EQ.4) THEN - l(n3) = ten*l(n3) - incons = incons + 1 - IF (incons.GT.5) GO TO 330 - GOTO 150 - ELSE IF (mode.NE.1) THEN - GOTO 330 - ENDIF - ELSE IF (mode.NE.1) THEN - GOTO 330 - ENDIF - -C UPDATE MULTIPLIERS FOR L1-TEST - - DO 160 i=1,n - v(i) = g(i) - ddot_sl(m,a(1,i),1,r,1) - 160 CONTINUE - f0 = f - CALL dcopy_(n, x, 1, x0, 1) - gs = ddot_sl(n, g, 1, s, 1) - h1 = ABS(gs) - h2 = ZERO - DO 170 j=1,m - IF (j.LE.meq) THEN - h3 = c(j) - ELSE - h3 = ZERO - ENDIF - h2 = h2 + MAX(-c(j),h3) - h3 = ABS(r(j)) - mu(j) = MAX(h3,(mu(j)+h3)/two) - h1 = h1 + h3*ABS(c(j)) - 170 CONTINUE - -C CHECK CONVERGENCE - - mode = 0 - IF (h1.LT.acc .AND. h2.LT.acc) GO TO 330 - h1 = ZERO - DO 180 j=1,m - IF (j.LE.meq) THEN - h3 = c(j) - ELSE - h3 = ZERO - ENDIF - h1 = h1 + mu(j)*MAX(-c(j),h3) - 180 CONTINUE - t0 = f + h1 - h3 = gs - h1*h4 - mode = 8 - IF (h3.GE.ZERO) GO TO 110 - -C LINE SEARCH WITH AN L1-TESTFUNCTION - - line = 0 - alpha = one - IF (iexact.EQ.1) GOTO 210 - -C INEXACT LINESEARCH - - 190 line = line + 1 - h3 = alpha*h3 - CALL dscal_sl(n, alpha, s, 1) - CALL dcopy_(n, x0, 1, x, 1) - CALL daxpy_sl(n, one, s, 1, x, 1) - mode = 1 - GO TO 330 - 200 IF (h1.LE.h3/ten .OR. line.GT.10) GO TO 240 - alpha = MAX(h3/(two*(h3-h1)),alfmin) - GO TO 190 - -C EXACT LINESEARCH - - 210 IF (line.NE.3) THEN - alpha = linmin(line,alfmin,one,t,tol) - CALL dcopy_(n, x0, 1, x, 1) - CALL daxpy_sl(n, alpha, s, 1, x, 1) - mode = 1 - GOTO 330 - ENDIF - CALL dscal_sl(n, alpha, s, 1) - GOTO 240 - -C CALL FUNCTIONS AT CURRENT X - - 220 t = f - DO 230 j=1,m - IF (j.LE.meq) THEN - h1 = c(j) - ELSE - h1 = ZERO - ENDIF - t = t + mu(j)*MAX(-c(j),h1) - 230 CONTINUE - h1 = t - t0 - GOTO (200, 210) iexact+1 - -C CHECK CONVERGENCE - - 240 h3 = ZERO - DO 250 j=1,m - IF (j.LE.meq) THEN - h1 = c(j) - ELSE - h1 = ZERO - ENDIF - h3 = h3 + MAX(-c(j),h1) - 250 CONTINUE - IF ((ABS(f-f0).LT.acc .OR. dnrm2_(n,s,1).LT.acc) .AND. h3.LT.acc) - * THEN - mode = 0 - ELSE - mode = -1 - ENDIF - GO TO 330 - -C CHECK relaxed CONVERGENCE in case of positive directional derivative - - 255 CONTINUE - IF ((ABS(f-f0).LT.tol .OR. dnrm2_(n,s,1).LT.tol) .AND. h3.LT.tol) - * THEN - mode = 0 - ELSE - mode = 8 - ENDIF - GO TO 330 - -C CALL JACOBIAN AT CURRENT X - -C UPDATE CHOLESKY-FACTORS OF HESSIAN MATRIX BY MODIFIED BFGS FORMULA - - 260 DO 270 i=1,n - u(i) = g(i) - ddot_sl(m,a(1,i),1,r,1) - v(i) - 270 CONTINUE - -C L'*S - - k = 0 - DO 290 i=1,n - h1 = ZERO - k = k + 1 - DO 280 j=i+1,n - k = k + 1 - h1 = h1 + l(k)*s(j) - 280 CONTINUE - v(i) = s(i) + h1 - 290 CONTINUE - -C D*L'*S - - k = 1 - DO 300 i=1,n - v(i) = l(k)*v(i) - k = k + n1 - i - 300 CONTINUE - -C L*D*L'*S - - DO 320 i=n,1,-1 - h1 = ZERO - k = i - DO 310 j=1,i - 1 - h1 = h1 + l(k)*v(j) - k = k + n - j - 310 CONTINUE - v(i) = v(i) + h1 - 320 CONTINUE - - h1 = ddot_sl(n,s,1,u,1) - h2 = ddot_sl(n,s,1,v,1) - h3 = 0.2d0*h2 - IF (h1.LT.h3) THEN - h4 = (h2-h3)/(h2-h1) - h1 = h3 - CALL dscal_sl(n, h4, u, 1) - CALL daxpy_sl(n, one-h4, v, 1, u, 1) - ENDIF - CALL ldl(n, l, u, +one/h1, v) - CALL ldl(n, l, v, -one/h2, u) - -C END OF MAIN ITERATION - - GO TO 130 - -C END OF SLSQPB - - 330 END - - - SUBROUTINE lsq(m,meq,n,nl,la,l,g,a,b,xl,xu,x,y,w,jw,mode) - -C MINIMIZE with respect to X - -C ||E*X - F|| -C 1/2 T -C WITH UPPER TRIANGULAR MATRIX E = +D *L , - -C -1/2 -1 -C AND VECTOR F = -D *L *G, - -C WHERE THE UNIT LOWER TRIDIANGULAR MATRIX L IS STORED COLUMNWISE -C DENSE IN THE N*(N+1)/2 ARRAY L WITH VECTOR D STORED IN ITS -C 'DIAGONAL' THUS SUBSTITUTING THE ONE-ELEMENTS OF L - -C SUBJECT TO - -C A(J)*X - B(J) = 0 , J=1,...,MEQ, -C A(J)*X - B(J) >=0, J=MEQ+1,...,M, -C XL(I) <= X(I) <= XU(I), I=1,...,N, -C ON ENTRY, THE USER HAS TO PROVIDE THE ARRAYS L, G, A, B, XL, XU. -C WITH DIMENSIONS: L(N*(N+1)/2), G(N), A(LA,N), B(M), XL(N), XU(N) -C THE WORKING ARRAY W MUST HAVE AT LEAST THE FOLLOWING DIMENSION: -c DIM(W) = (3*N+M)*(N+1) for LSQ -c +(N-MEQ+1)*(MINEQ+2) + 2*MINEQ for LSI -c +(N+MINEQ)*(N-MEQ) + 2*MEQ + N for LSEI -c with MINEQ = M - MEQ + 2*N -C ON RETURN, NO ARRAY WILL BE CHANGED BY THE SUBROUTINE. -C X STORES THE N-DIMENSIONAL SOLUTION VECTOR -C Y STORES THE VECTOR OF LAGRANGE MULTIPLIERS OF DIMENSION -C M+N+N (CONSTRAINTS+LOWER+UPPER BOUNDS) -C MODE IS A SUCCESS-FAILURE FLAG WITH THE FOLLOWING MEANINGS: -C MODE=1: SUCCESSFUL COMPUTATION -C 2: ERROR RETURN BECAUSE OF WRONG DIMENSIONS (N<1) -C 3: ITERATION COUNT EXCEEDED BY NNLS -C 4: INEQUALITY CONSTRAINTS INCOMPATIBLE -C 5: MATRIX E IS NOT OF FULL RANK -C 6: MATRIX C IS NOT OF FULL RANK -C 7: RANK DEFECT IN HFTI - -c coded Dieter Kraft, april 1987 -c revised march 1989 - - DOUBLE PRECISION l,g,a,b,w,xl,xu,x,y, - . diag,ZERO,one,ddot_sl,xnorm - - INTEGER jw(*),i,ic,id,ie,IF,ig,ih,il,im,ip,iu,iw, - . i1,i2,i3,i4,la,m,meq,mineq,mode,m1,n,nl,n1,n2,n3 - - DIMENSION a(la,n), b(la), g(n), l(nl), - . w(*), x(n), xl(n), xu(n), y(m+n+n) - - DATA ZERO/0.0d0/, one/1.0d0/ - - n1 = n + 1 - mineq = m - meq - m1 = mineq + n + n - -c determine whether to solve problem -c with inconsistent linerarization (n2=1) -c or not (n2=0) - - n2 = n1*n/2 + 1 - IF (n2.EQ.nl) THEN - n2 = 0 - ELSE - n2 = 1 - ENDIF - n3 = n-n2 - -C RECOVER MATRIX E AND VECTOR F FROM L AND G - - i2 = 1 - i3 = 1 - i4 = 1 - ie = 1 - IF = n*n+1 - DO 10 i=1,n3 - i1 = n1-i - diag = SQRT (l(i2)) - w(i3) = ZERO - CALL dcopy_ (i1 , w(i3), 0, w(i3), 1) - CALL dcopy_ (i1-n2, l(i2), 1, w(i3), n) - CALL dscal_sl (i1-n2, diag, w(i3), n) - w(i3) = diag - w(IF-1+i) = (g(i) - ddot_sl (i-1, w(i4), 1, w(IF), 1))/diag - i2 = i2 + i1 - n2 - i3 = i3 + n1 - i4 = i4 + n - 10 CONTINUE - IF (n2.EQ.1) THEN - w(i3) = l(nl) - w(i4) = ZERO - CALL dcopy_ (n3, w(i4), 0, w(i4), 1) - w(IF-1+n) = ZERO - ENDIF - CALL dscal_sl (n, - one, w(IF), 1) - - ic = IF + n - id = ic + meq*n - - IF (meq .GT. 0) THEN - -C RECOVER MATRIX C FROM UPPER PART OF A - - DO 20 i=1,meq - CALL dcopy_ (n, a(i,1), la, w(ic-1+i), meq) - 20 CONTINUE - -C RECOVER VECTOR D FROM UPPER PART OF B - - CALL dcopy_ (meq, b(1), 1, w(id), 1) - CALL dscal_sl (meq, - one, w(id), 1) - - ENDIF - - ig = id + meq - - IF (mineq .GT. 0) THEN - -C RECOVER MATRIX G FROM LOWER PART OF A - - DO 30 i=1,mineq - CALL dcopy_ (n, a(meq+i,1), la, w(ig-1+i), m1) - 30 CONTINUE - - ENDIF - -C AUGMENT MATRIX G BY +I AND -I - - ip = ig + mineq - DO 40 i=1,n - w(ip-1+i) = ZERO - CALL dcopy_ (n, w(ip-1+i), 0, w(ip-1+i), m1) - 40 CONTINUE - w(ip) = one - CALL dcopy_ (n, w(ip), 0, w(ip), m1+1) - - im = ip + n - DO 50 i=1,n - w(im-1+i) = ZERO - CALL dcopy_ (n, w(im-1+i), 0, w(im-1+i), m1) - 50 CONTINUE - w(im) = -one - CALL dcopy_ (n, w(im), 0, w(im), m1+1) - - ih = ig + m1*n - - IF (mineq .GT. 0) THEN - -C RECOVER H FROM LOWER PART OF B - - CALL dcopy_ (mineq, b(meq+1), 1, w(ih), 1) - CALL dscal_sl (mineq, - one, w(ih), 1) - - ENDIF - -C AUGMENT VECTOR H BY XL AND XU - - il = ih + mineq - CALL dcopy_ (n, xl, 1, w(il), 1) - iu = il + n - CALL dcopy_ (n, xu, 1, w(iu), 1) - CALL dscal_sl (n, - one, w(iu), 1) - - iw = iu + n - - CALL lsei (w(ic), w(id), w(ie), w(IF), w(ig), w(ih), MAX(1,meq), - . meq, n, n, m1, m1, n, x, xnorm, w(iw), jw, mode) - - IF (mode .EQ. 1) THEN - -c restore Lagrange multipliers - - CALL dcopy_ (m, w(iw), 1, y(1), 1) - CALL dcopy_ (n3, w(iw+m), 1, y(m+1), 1) - CALL dcopy_ (n3, w(iw+m+n), 1, y(m+n3+1), 1) - - ENDIF - -C END OF SUBROUTINE LSQ - - END - - - SUBROUTINE lsei(c,d,e,f,g,h,lc,mc,LE,me,lg,mg,n,x,xnrm,w,jw,mode) - -C FOR MODE=1, THE SUBROUTINE RETURNS THE SOLUTION X OF -C EQUALITY & INEQUALITY CONSTRAINED LEAST SQUARES PROBLEM LSEI : - -C MIN ||E*X - F|| -C X - -C S.T. C*X = D, -C G*X >= H. - -C USING QR DECOMPOSITION & ORTHOGONAL BASIS OF NULLSPACE OF C -C CHAPTER 23.6 OF LAWSON & HANSON: SOLVING LEAST SQUARES PROBLEMS. - -C THE FOLLOWING DIMENSIONS OF THE ARRAYS DEFINING THE PROBLEM -C ARE NECESSARY -C DIM(E) : FORMAL (LE,N), ACTUAL (ME,N) -C DIM(F) : FORMAL (LE ), ACTUAL (ME ) -C DIM(C) : FORMAL (LC,N), ACTUAL (MC,N) -C DIM(D) : FORMAL (LC ), ACTUAL (MC ) -C DIM(G) : FORMAL (LG,N), ACTUAL (MG,N) -C DIM(H) : FORMAL (LG ), ACTUAL (MG ) -C DIM(X) : FORMAL (N ), ACTUAL (N ) -C DIM(W) : 2*MC+ME+(ME+MG)*(N-MC) for LSEI -C +(N-MC+1)*(MG+2)+2*MG for LSI -C DIM(JW): MAX(MG,L) -C ON ENTRY, THE USER HAS TO PROVIDE THE ARRAYS C, D, E, F, G, AND H. -C ON RETURN, ALL ARRAYS WILL BE CHANGED BY THE SUBROUTINE. -C X STORES THE SOLUTION VECTOR -C XNORM STORES THE RESIDUUM OF THE SOLUTION IN EUCLIDIAN NORM -C W STORES THE VECTOR OF LAGRANGE MULTIPLIERS IN ITS FIRST -C MC+MG ELEMENTS -C MODE IS A SUCCESS-FAILURE FLAG WITH THE FOLLOWING MEANINGS: -C MODE=1: SUCCESSFUL COMPUTATION -C 2: ERROR RETURN BECAUSE OF WRONG DIMENSIONS (N<1) -C 3: ITERATION COUNT EXCEEDED BY NNLS -C 4: INEQUALITY CONSTRAINTS INCOMPATIBLE -C 5: MATRIX E IS NOT OF FULL RANK -C 6: MATRIX C IS NOT OF FULL RANK -C 7: RANK DEFECT IN HFTI - -C 18.5.1981, DIETER KRAFT, DFVLR OBERPFAFFENHOFEN -C 20.3.1987, DIETER KRAFT, DFVLR OBERPFAFFENHOFEN - - INTEGER jw(*),i,ie,IF,ig,iw,j,k,krank,l,lc,LE,lg, - . mc,mc1,me,mg,mode,n - DOUBLE PRECISION c(lc,n),e(LE,n),g(lg,n),d(lc),f(LE),h(lg),x(n), - . w(*),t,ddot_sl,xnrm,dnrm2_,epmach,ZERO - DATA epmach/2.22d-16/,ZERO/0.0d+00/ - - mode=2 - IF(mc.GT.n) GOTO 75 - l=n-mc - mc1=mc+1 - iw=(l+1)*(mg+2)+2*mg+mc - ie=iw+mc+1 - IF=ie+me*l - ig=IF+me - -C TRIANGULARIZE C AND APPLY FACTORS TO E AND G - - DO 10 i=1,mc - j=MIN(i+1,lc) - CALL h12(1,i,i+1,n,c(i,1),lc,w(iw+i),c(j,1),lc,1,mc-i) - CALL h12(2,i,i+1,n,c(i,1),lc,w(iw+i),e ,LE,1,me) - 10 CALL h12(2,i,i+1,n,c(i,1),lc,w(iw+i),g ,lg,1,mg) - -C SOLVE C*X=D AND MODIFY F - - mode=6 - DO 15 i=1,mc - IF(ABS(c(i,i)).LT.epmach) GOTO 75 - x(i)=(d(i)-ddot_sl(i-1,c(i,1),lc,x,1))/c(i,i) - 15 CONTINUE - mode=1 - w(mc1) = ZERO - CALL dcopy_ (mg-mc,w(mc1),0,w(mc1),1) - - IF(mc.EQ.n) GOTO 50 - - DO 20 i=1,me - 20 w(IF-1+i)=f(i)-ddot_sl(mc,e(i,1),LE,x,1) - -C STORE TRANSFORMED E & G - - DO 25 i=1,me - 25 CALL dcopy_(l,e(i,mc1),LE,w(ie-1+i),me) - DO 30 i=1,mg - 30 CALL dcopy_(l,g(i,mc1),lg,w(ig-1+i),mg) - - IF(mg.GT.0) GOTO 40 - -C SOLVE LS WITHOUT INEQUALITY CONSTRAINTS - - mode=7 - k=MAX(LE,n) - t=SQRT(epmach) - CALL hfti (w(ie),me,me,l,w(IF),k,1,t,krank,xnrm,w,w(l+1),jw) - CALL dcopy_(l,w(IF),1,x(mc1),1) - IF(krank.NE.l) GOTO 75 - mode=1 - GOTO 50 -C MODIFY H AND SOLVE INEQUALITY CONSTRAINED LS PROBLEM - - 40 DO 45 i=1,mg - 45 h(i)=h(i)-ddot_sl(mc,g(i,1),lg,x,1) - CALL lsi - . (w(ie),w(IF),w(ig),h,me,me,mg,mg,l,x(mc1),xnrm,w(mc1),jw,mode) - IF(mc.EQ.0) GOTO 75 - t=dnrm2_(mc,x,1) - xnrm=SQRT(xnrm*xnrm+t*t) - IF(mode.NE.1) GOTO 75 - -C SOLUTION OF ORIGINAL PROBLEM AND LAGRANGE MULTIPLIERS - - 50 DO 55 i=1,me - 55 f(i)=ddot_sl(n,e(i,1),LE,x,1)-f(i) - DO 60 i=1,mc - 60 d(i)=ddot_sl(me,e(1,i),1,f,1)-ddot_sl(mg,g(1,i),1,w(mc1),1) - - DO 65 i=mc,1,-1 - 65 CALL h12(2,i,i+1,n,c(i,1),lc,w(iw+i),x,1,1,1) - - DO 70 i=mc,1,-1 - j=MIN(i+1,lc) - w(i)=(d(i)-ddot_sl(mc-i,c(j,i),1,w(j),1))/c(i,i) - 70 CONTINUE - -C END OF SUBROUTINE LSEI - - 75 END - - - SUBROUTINE lsi(e,f,g,h,LE,me,lg,mg,n,x,xnorm,w,jw,mode) - -C FOR MODE=1, THE SUBROUTINE RETURNS THE SOLUTION X OF -C INEQUALITY CONSTRAINED LINEAR LEAST SQUARES PROBLEM: - -C MIN ||E*X-F|| -C X - -C S.T. G*X >= H - -C THE ALGORITHM IS BASED ON QR DECOMPOSITION AS DESCRIBED IN -C CHAPTER 23.5 OF LAWSON & HANSON: SOLVING LEAST SQUARES PROBLEMS - -C THE FOLLOWING DIMENSIONS OF THE ARRAYS DEFINING THE PROBLEM -C ARE NECESSARY -C DIM(E) : FORMAL (LE,N), ACTUAL (ME,N) -C DIM(F) : FORMAL (LE ), ACTUAL (ME ) -C DIM(G) : FORMAL (LG,N), ACTUAL (MG,N) -C DIM(H) : FORMAL (LG ), ACTUAL (MG ) -C DIM(X) : N -C DIM(W) : (N+1)*(MG+2) + 2*MG -C DIM(JW): LG -C ON ENTRY, THE USER HAS TO PROVIDE THE ARRAYS E, F, G, AND H. -C ON RETURN, ALL ARRAYS WILL BE CHANGED BY THE SUBROUTINE. -C X STORES THE SOLUTION VECTOR -C XNORM STORES THE RESIDUUM OF THE SOLUTION IN EUCLIDIAN NORM -C W STORES THE VECTOR OF LAGRANGE MULTIPLIERS IN ITS FIRST -C MG ELEMENTS -C MODE IS A SUCCESS-FAILURE FLAG WITH THE FOLLOWING MEANINGS: -C MODE=1: SUCCESSFUL COMPUTATION -C 2: ERROR RETURN BECAUSE OF WRONG DIMENSIONS (N<1) -C 3: ITERATION COUNT EXCEEDED BY NNLS -C 4: INEQUALITY CONSTRAINTS INCOMPATIBLE -C 5: MATRIX E IS NOT OF FULL RANK - -C 03.01.1980, DIETER KRAFT: CODED -C 20.03.1987, DIETER KRAFT: REVISED TO FORTRAN 77 - - INTEGER i,j,LE,lg,me,mg,mode,n,jw(lg) - DOUBLE PRECISION e(LE,n),f(LE),g(lg,n),h(lg),x(n),w(*), - . ddot_sl,xnorm,dnrm2_,epmach,t,one - DATA epmach/2.22d-16/,one/1.0d+00/ - -C QR-FACTORS OF E AND APPLICATION TO F - - DO 10 i=1,n - j=MIN(i+1,n) - CALL h12(1,i,i+1,me,e(1,i),1,t,e(1,j),1,LE,n-i) - 10 CALL h12(2,i,i+1,me,e(1,i),1,t,f ,1,1 ,1 ) - -C TRANSFORM G AND H TO GET LEAST DISTANCE PROBLEM - - mode=5 - DO 30 i=1,mg - DO 20 j=1,n - IF (ABS(e(j,j)).LT.epmach) GOTO 50 - 20 g(i,j)=(g(i,j)-ddot_sl(j-1,g(i,1),lg,e(1,j),1))/e(j,j) - 30 h(i)=h(i)-ddot_sl(n,g(i,1),lg,f,1) - -C SOLVE LEAST DISTANCE PROBLEM - - CALL ldp(g,lg,mg,n,h,x,xnorm,w,jw,mode) - IF (mode.NE.1) GOTO 50 - -C SOLUTION OF ORIGINAL PROBLEM - - CALL daxpy_sl(n,one,f,1,x,1) - DO 40 i=n,1,-1 - j=MIN(i+1,n) - 40 x(i)=(x(i)-ddot_sl(n-i,e(i,j),LE,x(j),1))/e(i,i) - j=MIN(n+1,me) - t=dnrm2_(me-n,f(j),1) - xnorm=SQRT(xnorm*xnorm+t*t) - -C END OF SUBROUTINE LSI - - 50 END - - SUBROUTINE ldp(g,mg,m,n,h,x,xnorm,w,INDEX,mode) - -C T -C MINIMIZE 1/2 X X SUBJECT TO G * X >= H. - -C C.L. LAWSON, R.J. HANSON: 'SOLVING LEAST SQUARES PROBLEMS' -C PRENTICE HALL, ENGLEWOOD CLIFFS, NEW JERSEY, 1974. - -C PARAMETER DESCRIPTION: - -C G(),MG,M,N ON ENTRY G() STORES THE M BY N MATRIX OF -C LINEAR INEQUALITY CONSTRAINTS. G() HAS FIRST -C DIMENSIONING PARAMETER MG -C H() ON ENTRY H() STORES THE M VECTOR H REPRESENTING -C THE RIGHT SIDE OF THE INEQUALITY SYSTEM - -C REMARK: G(),H() WILL NOT BE CHANGED DURING CALCULATIONS BY LDP - -C X() ON ENTRY X() NEED NOT BE INITIALIZED. -C ON EXIT X() STORES THE SOLUTION VECTOR X IF MODE=1. -C XNORM ON EXIT XNORM STORES THE EUCLIDIAN NORM OF THE -C SOLUTION VECTOR IF COMPUTATION IS SUCCESSFUL -C W() W IS A ONE DIMENSIONAL WORKING SPACE, THE LENGTH -C OF WHICH SHOULD BE AT LEAST (M+2)*(N+1) + 2*M -C ON EXIT W() STORES THE LAGRANGE MULTIPLIERS -C ASSOCIATED WITH THE CONSTRAINTS -C AT THE SOLUTION OF PROBLEM LDP -C INDEX() INDEX() IS A ONE DIMENSIONAL INTEGER WORKING SPACE -C OF LENGTH AT LEAST M -C MODE MODE IS A SUCCESS-FAILURE FLAG WITH THE FOLLOWING -C MEANINGS: -C MODE=1: SUCCESSFUL COMPUTATION -C 2: ERROR RETURN BECAUSE OF WRONG DIMENSIONS (N.LE.0) -C 3: ITERATION COUNT EXCEEDED BY NNLS -C 4: INEQUALITY CONSTRAINTS INCOMPATIBLE - - DOUBLE PRECISION g,h,x,xnorm,w,u,v, - . ZERO,one,fac,rnorm,dnrm2_,ddot_sl,diff - INTEGER INDEX,i,IF,iw,iwdual,iy,iz,j,m,mg,mode,n,n1 - DIMENSION g(mg,n),h(m),x(n),w(*),INDEX(m) - diff(u,v)= u-v - DATA ZERO,one/0.0d0,1.0d0/ - - mode=2 - IF(n.LE.0) GOTO 50 - -C STATE DUAL PROBLEM - - mode=1 - x(1)=ZERO - CALL dcopy_(n,x(1),0,x,1) - xnorm=ZERO - IF(m.EQ.0) GOTO 50 - iw=0 - DO 20 j=1,m - DO 10 i=1,n - iw=iw+1 - 10 w(iw)=g(j,i) - iw=iw+1 - 20 w(iw)=h(j) - IF=iw+1 - DO 30 i=1,n - iw=iw+1 - 30 w(iw)=ZERO - w(iw+1)=one - n1=n+1 - iz=iw+2 - iy=iz+n1 - iwdual=iy+m - -C SOLVE DUAL PROBLEM - - CALL nnls (w,n1,n1,m,w(IF),w(iy),rnorm,w(iwdual),w(iz),INDEX,mode) - - IF(mode.NE.1) GOTO 50 - mode=4 - IF(rnorm.LE.ZERO) GOTO 50 - -C COMPUTE SOLUTION OF PRIMAL PROBLEM - - fac=one-ddot_sl(m,h,1,w(iy),1) - IF(diff(one+fac,one).LE.ZERO) GOTO 50 - mode=1 - fac=one/fac - DO 40 j=1,n - 40 x(j)=fac*ddot_sl(m,g(1,j),1,w(iy),1) - xnorm=dnrm2_(n,x,1) - -C COMPUTE LAGRANGE MULTIPLIERS FOR PRIMAL PROBLEM - - w(1)=ZERO - CALL dcopy_(m,w(1),0,w,1) - CALL daxpy_sl(m,fac,w(iy),1,w,1) - -C END OF SUBROUTINE LDP - - 50 END - - - SUBROUTINE nnls (a, mda, m, n, b, x, rnorm, w, z, INDEX, mode) - -C C.L.LAWSON AND R.J.HANSON, JET PROPULSION LABORATORY: -C 'SOLVING LEAST SQUARES PROBLEMS'. PRENTICE-HALL.1974 - -C ********** NONNEGATIVE LEAST SQUARES ********** - -C GIVEN AN M BY N MATRIX, A, AND AN M-VECTOR, B, COMPUTE AN -C N-VECTOR, X, WHICH SOLVES THE LEAST SQUARES PROBLEM - -C A*X = B SUBJECT TO X >= 0 - -C A(),MDA,M,N -C MDA IS THE FIRST DIMENSIONING PARAMETER FOR THE ARRAY,A(). -C ON ENTRY A() CONTAINS THE M BY N MATRIX,A. -C ON EXIT A() CONTAINS THE PRODUCT Q*A, -C WHERE Q IS AN M BY M ORTHOGONAL MATRIX GENERATED -C IMPLICITLY BY THIS SUBROUTINE. -C EITHER M>=N OR M= M. EITHER M >= N OR M < N IS PERMITTED. -C THERE IS NO RESTRICTION ON THE RANK OF A. -C THE MATRIX A WILL BE MODIFIED BY THE SUBROUTINE. -C B(*,*),MDB,NB IF NB = 0 THE SUBROUTINE WILL MAKE NO REFERENCE -C TO THE ARRAY B. IF NB > 0 THE ARRAY B() MUST -C INITIALLY CONTAIN THE M x NB MATRIX B OF THE -C THE LEAST SQUARES PROBLEM AX = B AND ON RETURN -C THE ARRAY B() WILL CONTAIN THE N x NB SOLUTION X. -C IF NB>1 THE ARRAY B() MUST BE DOUBLE SUBSCRIPTED -C WITH FIRST DIMENSIONING PARAMETER MDB>=MAX(M,N), -C IF NB=1 THE ARRAY B() MAY BE EITHER SINGLE OR -C DOUBLE SUBSCRIPTED. -C TAU ABSOLUTE TOLERANCE PARAMETER FOR PSEUDORANK -C DETERMINATION, PROVIDED BY THE USER. -C KRANK PSEUDORANK OF A, SET BY THE SUBROUTINE. -C RNORM ON EXIT, RNORM(J) WILL CONTAIN THE EUCLIDIAN -C NORM OF THE RESIDUAL VECTOR FOR THE PROBLEM -C DEFINED BY THE J-TH COLUMN VECTOR OF THE ARRAY B. -C H(), G() ARRAYS OF WORKING SPACE OF LENGTH >= N. -C IP() INTEGER ARRAY OF WORKING SPACE OF LENGTH >= N -C RECORDING PERMUTATION INDICES OF COLUMN VECTORS - - INTEGER i,j,jb,k,kp1,krank,l,ldiag,lmax,m, - . mda,mdb,n,nb,ip(n) - DOUBLE PRECISION a(mda,n),b(mdb,nb),h(n),g(n),rnorm(nb),factor, - . tau,ZERO,hmax,diff,tmp,ddot_sl,dnrm2_,u,v - diff(u,v)= u-v - DATA ZERO/0.0d0/, factor/1.0d-3/ - - k=0 - ldiag=MIN(m,n) - IF(ldiag.LE.0) GOTO 270 - -C COMPUTE LMAX - - DO 80 j=1,ldiag - IF(j.EQ.1) GOTO 20 - lmax=j - DO 10 l=j,n - h(l)=h(l)-a(j-1,l)**2 - 10 IF(h(l).GT.h(lmax)) lmax=l - IF(diff(hmax+factor*h(lmax),hmax).GT.ZERO) - . GOTO 50 - 20 lmax=j - DO 40 l=j,n - h(l)=ZERO - DO 30 i=j,m - 30 h(l)=h(l)+a(i,l)**2 - 40 IF(h(l).GT.h(lmax)) lmax=l - hmax=h(lmax) - -C COLUMN INTERCHANGES IF NEEDED - - 50 ip(j)=lmax - IF(ip(j).EQ.j) GOTO 70 - DO 60 i=1,m - tmp=a(i,j) - a(i,j)=a(i,lmax) - 60 a(i,lmax)=tmp - h(lmax)=h(j) - -C J-TH TRANSFORMATION AND APPLICATION TO A AND B - - 70 i=MIN(j+1,n) - CALL h12(1,j,j+1,m,a(1,j),1,h(j),a(1,i),1,mda,n-j) - 80 CALL h12(2,j,j+1,m,a(1,j),1,h(j),b,1,mdb,nb) - -C DETERMINE PSEUDORANK - - DO 90 j=1,ldiag - 90 IF(ABS(a(j,j)).LE.tau) GOTO 100 - k=ldiag - GOTO 110 - 100 k=j-1 - 110 kp1=k+1 - -C NORM OF RESIDUALS - - DO 130 jb=1,nb - 130 rnorm(jb)=dnrm2_(m-k,b(kp1,jb),1) - IF(k.GT.0) GOTO 160 - DO 150 jb=1,nb - DO 150 i=1,n - 150 b(i,jb)=ZERO - GOTO 270 - 160 IF(k.EQ.n) GOTO 180 - -C HOUSEHOLDER DECOMPOSITION OF FIRST K ROWS - - DO 170 i=k,1,-1 - 170 CALL h12(1,i,kp1,n,a(i,1),mda,g(i),a,mda,1,i-1) - 180 DO 250 jb=1,nb - -C SOLVE K*K TRIANGULAR SYSTEM - - DO 210 i=k,1,-1 - j=MIN(i+1,n) - 210 b(i,jb)=(b(i,jb)-ddot_sl(k-i,a(i,j),mda,b(j,jb),1))/a(i,i) - -C COMPLETE SOLUTION VECTOR - - IF(k.EQ.n) GOTO 240 - DO 220 j=kp1,n - 220 b(j,jb)=ZERO - DO 230 i=1,k - 230 CALL h12(2,i,kp1,n,a(i,1),mda,g(i),b(1,jb),1,mdb,1) - -C REORDER SOLUTION ACCORDING TO PREVIOUS COLUMN INTERCHANGES - - 240 DO 250 j=ldiag,1,-1 - IF(ip(j).EQ.j) GOTO 250 - l=ip(j) - tmp=b(l,jb) - b(l,jb)=b(j,jb) - b(j,jb)=tmp - 250 CONTINUE - 270 krank=k - END - - SUBROUTINE h12 (mode,lpivot,l1,m,u,iue,up,c,ice,icv,ncv) - -C C.L.LAWSON AND R.J.HANSON, JET PROPULSION LABORATORY, 1973 JUN 12 -C TO APPEAR IN 'SOLVING LEAST SQUARES PROBLEMS', PRENTICE-HALL, 1974 - -C CONSTRUCTION AND/OR APPLICATION OF A SINGLE -C HOUSEHOLDER TRANSFORMATION Q = I + U*(U**T)/B - -C MODE = 1 OR 2 TO SELECT ALGORITHM H1 OR H2 . -C LPIVOT IS THE INDEX OF THE PIVOT ELEMENT. -C L1,M IF L1 <= M THE TRANSFORMATION WILL BE CONSTRUCTED TO -C ZERO ELEMENTS INDEXED FROM L1 THROUGH M. -C IF L1 > M THE SUBROUTINE DOES AN IDENTITY TRANSFORMATION. -C U(),IUE,UP -C ON ENTRY TO H1 U() STORES THE PIVOT VECTOR. -C IUE IS THE STORAGE INCREMENT BETWEEN ELEMENTS. -C ON EXIT FROM H1 U() AND UP STORE QUANTITIES DEFINING -C THE VECTOR U OF THE HOUSEHOLDER TRANSFORMATION. -C ON ENTRY TO H2 U() AND UP -C SHOULD STORE QUANTITIES PREVIOUSLY COMPUTED BY H1. -C THESE WILL NOT BE MODIFIED BY H2. -C C() ON ENTRY TO H1 OR H2 C() STORES A MATRIX WHICH WILL BE -C REGARDED AS A SET OF VECTORS TO WHICH THE HOUSEHOLDER -C TRANSFORMATION IS TO BE APPLIED. -C ON EXIT C() STORES THE SET OF TRANSFORMED VECTORS. -C ICE STORAGE INCREMENT BETWEEN ELEMENTS OF VECTORS IN C(). -C ICV STORAGE INCREMENT BETWEEN VECTORS IN C(). -C NCV NUMBER OF VECTORS IN C() TO BE TRANSFORMED. -C IF NCV <= 0 NO OPERATIONS WILL BE DONE ON C(). - - INTEGER incr, ice, icv, iue, lpivot, l1, mode, ncv - INTEGER i, i2, i3, i4, j, m - DOUBLE PRECISION u,up,c,cl,clinv,b,sm,one,ZERO - DIMENSION u(iue,*), c(*) - DATA one/1.0d+00/, ZERO/0.0d+00/ - - IF (0.GE.lpivot.OR.lpivot.GE.l1.OR.l1.GT.m) GOTO 80 - cl=ABS(u(1,lpivot)) - IF (mode.EQ.2) GOTO 30 - -C ****** CONSTRUCT THE TRANSFORMATION ****** - - DO 10 j=l1,m - sm=ABS(u(1,j)) - 10 cl=MAX(sm,cl) - IF (cl.LE.ZERO) GOTO 80 - clinv=one/cl - sm=(u(1,lpivot)*clinv)**2 - DO 20 j=l1,m - 20 sm=sm+(u(1,j)*clinv)**2 - cl=cl*SQRT(sm) - IF (u(1,lpivot).GT.ZERO) cl=-cl - up=u(1,lpivot)-cl - u(1,lpivot)=cl - GOTO 40 -C ****** APPLY THE TRANSFORMATION I+U*(U**T)/B TO C ****** - - 30 IF (cl.LE.ZERO) GOTO 80 - 40 IF (ncv.LE.0) GOTO 80 - b=up*u(1,lpivot) - IF (b.GE.ZERO) GOTO 80 - b=one/b - i2=1-icv+ice*(lpivot-1) - incr=ice*(l1-lpivot) - DO 70 j=1,ncv - i2=i2+icv - i3=i2+incr - i4=i3 - sm=c(i2)*up - DO 50 i=l1,m - sm=sm+c(i3)*u(1,i) - 50 i3=i3+ice - IF (sm.EQ.ZERO) GOTO 70 - sm=sm*b - c(i2)=c(i2)+sm*up - DO 60 i=l1,m - c(i4)=c(i4)+sm*u(1,i) - 60 i4=i4+ice - 70 CONTINUE - 80 END - - SUBROUTINE ldl (n,a,z,sigma,w) -C LDL LDL' - RANK-ONE - UPDATE - -C PURPOSE: -C UPDATES THE LDL' FACTORS OF MATRIX A BY RANK-ONE MATRIX -C SIGMA*Z*Z' - -C INPUT ARGUMENTS: (* MEANS PARAMETERS ARE CHANGED DURING EXECUTION) -C N : ORDER OF THE COEFFICIENT MATRIX A -C * A : POSITIVE DEFINITE MATRIX OF DIMENSION N; -C ONLY THE LOWER TRIANGLE IS USED AND IS STORED COLUMN BY -C COLUMN AS ONE DIMENSIONAL ARRAY OF DIMENSION N*(N+1)/2. -C * Z : VECTOR OF DIMENSION N OF UPDATING ELEMENTS -C SIGMA : SCALAR FACTOR BY WHICH THE MODIFYING DYADE Z*Z' IS -C MULTIPLIED - -C OUTPUT ARGUMENTS: -C A : UPDATED LDL' FACTORS - -C WORKING ARRAY: -C W : VECTOR OP DIMENSION N (USED ONLY IF SIGMA .LT. ZERO) - -C METHOD: -C THAT OF FLETCHER AND POWELL AS DESCRIBED IN : -C FLETCHER,R.,(1974) ON THE MODIFICATION OF LDL' FACTORIZATION. -C POWELL,M.J.D. MATH.COMPUTATION 28, 1067-1078. - -C IMPLEMENTED BY: -C KRAFT,D., DFVLR - INSTITUT FUER DYNAMIK DER FLUGSYSTEME -C D-8031 OBERPFAFFENHOFEN - -C STATUS: 15. JANUARY 1980 - -C SUBROUTINES REQUIRED: NONE - - INTEGER i, ij, j, n - DOUBLE PRECISION a(*), t, v, w(*), z(*), u, tp, one, beta, four, - * ZERO, alpha, delta, gamma, sigma, epmach - DATA ZERO, one, four, epmach /0.0d0, 1.0d0, 4.0d0, 2.22d-16/ - - IF(sigma.EQ.ZERO) GOTO 280 - ij=1 - t=one/sigma - IF(sigma.GT.ZERO) GOTO 220 -C PREPARE NEGATIVE UPDATE - DO 150 i=1,n - 150 w(i)=z(i) - DO 170 i=1,n - v=w(i) - t=t+v*v/a(ij) - DO 160 j=i+1,n - ij=ij+1 - 160 w(j)=w(j)-v*a(ij) - 170 ij=ij+1 - IF(t.GE.ZERO) t=epmach/sigma - DO 210 i=1,n - j=n+1-i - ij=ij-i - u=w(j) - w(j)=t - 210 t=t-u*u/a(ij) - 220 CONTINUE -C HERE UPDATING BEGINS - DO 270 i=1,n - v=z(i) - delta=v/a(ij) - IF(sigma.LT.ZERO) tp=w(i) - IF(sigma.GT.ZERO) tp=t+delta*v - alpha=tp/t - a(ij)=alpha*a(ij) - IF(i.EQ.n) GOTO 280 - beta=delta/tp - IF(alpha.GT.four) GOTO 240 - DO 230 j=i+1,n - ij=ij+1 - z(j)=z(j)-v*a(ij) - 230 a(ij)=a(ij)+beta*z(j) - GOTO 260 - 240 gamma=t/tp - DO 250 j=i+1,n - ij=ij+1 - u=a(ij) - a(ij)=gamma*u+beta*z(j) - 250 z(j)=z(j)-v*u - 260 ij=ij+1 - 270 t=tp - 280 RETURN -C END OF LDL - END - - DOUBLE PRECISION FUNCTION linmin (mode, ax, bx, f, tol) -C LINMIN LINESEARCH WITHOUT DERIVATIVES - -C PURPOSE: - -C TO FIND THE ARGUMENT LINMIN WHERE THE FUNCTION F TAKES IT'S MINIMUM -C ON THE INTERVAL AX, BX. -C COMBINATION OF GOLDEN SECTION AND SUCCESSIVE QUADRATIC INTERPOLATION. - -C INPUT ARGUMENTS: (* MEANS PARAMETERS ARE CHANGED DURING EXECUTION) - -C *MODE SEE OUTPUT ARGUMENTS -C AX LEFT ENDPOINT OF INITIAL INTERVAL -C BX RIGHT ENDPOINT OF INITIAL INTERVAL -C F FUNCTION VALUE AT LINMIN WHICH IS TO BE BROUGHT IN BY -C REVERSE COMMUNICATION CONTROLLED BY MODE -C TOL DESIRED LENGTH OF INTERVAL OF UNCERTAINTY OF FINAL RESULT - -C OUTPUT ARGUMENTS: - -C LINMIN ABSCISSA APPROXIMATING THE POINT WHERE F ATTAINS A MINIMUM -C MODE CONTROLS REVERSE COMMUNICATION -C MUST BE SET TO 0 INITIALLY, RETURNS WITH INTERMEDIATE -C VALUES 1 AND 2 WHICH MUST NOT BE CHANGED BY THE USER, -C ENDS WITH CONVERGENCE WITH VALUE 3. - -C WORKING ARRAY: - -C NONE - -C METHOD: - -C THIS FUNCTION SUBPROGRAM IS A SLIGHTLY MODIFIED VERSION OF THE -C ALGOL 60 PROCEDURE LOCALMIN GIVEN IN -C R.P. BRENT: ALGORITHMS FOR MINIMIZATION WITHOUT DERIVATIVES, -C PRENTICE-HALL (1973). - -C IMPLEMENTED BY: - -C KRAFT, D., DFVLR - INSTITUT FUER DYNAMIK DER FLUGSYSTEME -C D-8031 OBERPFAFFENHOFEN - -C STATUS: 31. AUGUST 1984 - -C SUBROUTINES REQUIRED: NONE - - INTEGER mode - DOUBLE PRECISION f, tol, a, b, c, d, e, p, q, r, u, v, w, x, m, - & fu, fv, fw, fx, eps, tol1, tol2, ZERO, ax, bx - DATA c /0.381966011d0/, eps /1.5d-8/, ZERO /0.0d0/ - -C EPS = SQUARE - ROOT OF MACHINE PRECISION -C C = GOLDEN SECTION RATIO = (3-SQRT(5))/2 - - GOTO (10, 55), mode - -C INITIALIZATION - - a = ax - b = bx - e = ZERO - v = a + c*(b - a) - w = v - x = w - linmin = x - mode = 1 - GOTO 100 - -C MAIN LOOP STARTS HERE - - 10 fx = f - fv = fx - fw = fv - 20 m = 0.5d0*(a + b) - tol1 = eps*ABS(x) + tol - tol2 = tol1 + tol1 - -C TEST CONVERGENCE - - IF (ABS(x - m) .LE. tol2 - 0.5d0*(b - a)) GOTO 90 - r = ZERO - q = r - p = q - IF (ABS(e) .LE. tol1) GOTO 30 - -C FIT PARABOLA - - r = (x - w)*(fx - fv) - q = (x - v)*(fx - fw) - p = (x - v)*q - (x - w)*r - q = q - r - q = q + q - IF (q .GT. ZERO) p = -p - IF (q .LT. ZERO) q = -q - r = e - e = d - -C IS PARABOLA ACCEPTABLE - - 30 IF (ABS(p) .GE. 0.5d0*ABS(q*r) .OR. - & p .LE. q*(a - x) .OR. p .GE. q*(b-x)) GOTO 40 - -C PARABOLIC INTERPOLATION STEP - - d = p/q - -C F MUST NOT BE EVALUATED TOO CLOSE TO A OR B - - IF (u - a .LT. tol2) d = SIGN(tol1, m - x) - IF (b - u .LT. tol2) d = SIGN(tol1, m - x) - GOTO 50 - -C GOLDEN SECTION STEP - - 40 IF (x .GE. m) e = a - x - IF (x .LT. m) e = b - x - d = c*e - -C F MUST NOT BE EVALUATED TOO CLOSE TO X - - 50 IF (ABS(d) .LT. tol1) d = SIGN(tol1, d) - u = x + d - linmin = u - mode = 2 - GOTO 100 - 55 fu = f - -C UPDATE A, B, V, W, AND X - - IF (fu .GT. fx) GOTO 60 - IF (u .GE. x) a = x - IF (u .LT. x) b = x - v = w - fv = fw - w = x - fw = fx - x = u - fx = fu - GOTO 85 - 60 IF (u .LT. x) a = u - IF (u .GE. x) b = u - IF (fu .LE. fw .OR. w .EQ. x) GOTO 70 - IF (fu .LE. fv .OR. v .EQ. x .OR. v .EQ. w) GOTO 80 - GOTO 85 - 70 v = w - fv = fw - w = u - fw = fu - GOTO 85 - 80 v = u - fv = fu - 85 GOTO 20 - -C END OF MAIN LOOP - - 90 linmin = x - mode = 3 - 100 RETURN - -C END OF LINMIN - - END - -C## Following a selection from BLAS Level 1 - - SUBROUTINE daxpy_sl(n,da,dx,incx,dy,incy) - -C CONSTANT TIMES A VECTOR PLUS A VECTOR. -C USES UNROLLED LOOPS FOR INCREMENTS EQUAL TO ONE. -C JACK DONGARRA, LINPACK, 3/11/78. - - DOUBLE PRECISION dx(*),dy(*),da - INTEGER i,incx,incy,ix,iy,m,mp1,n - - IF(n.LE.0)RETURN - IF(da.EQ.0.0d0)RETURN - IF(incx.EQ.1.AND.incy.EQ.1)GO TO 20 - -C CODE FOR UNEQUAL INCREMENTS OR EQUAL INCREMENTS -C NOT EQUAL TO 1 - - ix = 1 - iy = 1 - IF(incx.LT.0)ix = (-n+1)*incx + 1 - IF(incy.LT.0)iy = (-n+1)*incy + 1 - DO 10 i = 1,n - dy(iy) = dy(iy) + da*dx(ix) - ix = ix + incx - iy = iy + incy - 10 CONTINUE - RETURN - -C CODE FOR BOTH INCREMENTS EQUAL TO 1 - -C CLEAN-UP LOOP - - 20 m = MOD(n,4) - IF( m .EQ. 0 ) GO TO 40 - DO 30 i = 1,m - dy(i) = dy(i) + da*dx(i) - 30 CONTINUE - IF( n .LT. 4 ) RETURN - 40 mp1 = m + 1 - DO 50 i = mp1,n,4 - dy(i) = dy(i) + da*dx(i) - dy(i + 1) = dy(i + 1) + da*dx(i + 1) - dy(i + 2) = dy(i + 2) + da*dx(i + 2) - dy(i + 3) = dy(i + 3) + da*dx(i + 3) - 50 CONTINUE - RETURN - END - - SUBROUTINE dcopy_(n,dx,incx,dy,incy) - -C COPIES A VECTOR, X, TO A VECTOR, Y. -C USES UNROLLED LOOPS FOR INCREMENTS EQUAL TO ONE. -C JACK DONGARRA, LINPACK, 3/11/78. - - DOUBLE PRECISION dx(*),dy(*) - INTEGER i,incx,incy,ix,iy,m,mp1,n - - IF(n.LE.0)RETURN - IF(incx.EQ.1.AND.incy.EQ.1)GO TO 20 - -C CODE FOR UNEQUAL INCREMENTS OR EQUAL INCREMENTS -C NOT EQUAL TO 1 - - ix = 1 - iy = 1 - IF(incx.LT.0)ix = (-n+1)*incx + 1 - IF(incy.LT.0)iy = (-n+1)*incy + 1 - DO 10 i = 1,n - dy(iy) = dx(ix) - ix = ix + incx - iy = iy + incy - 10 CONTINUE - RETURN - -C CODE FOR BOTH INCREMENTS EQUAL TO 1 - -C CLEAN-UP LOOP - - 20 m = MOD(n,7) - IF( m .EQ. 0 ) GO TO 40 - DO 30 i = 1,m - dy(i) = dx(i) - 30 CONTINUE - IF( n .LT. 7 ) RETURN - 40 mp1 = m + 1 - DO 50 i = mp1,n,7 - dy(i) = dx(i) - dy(i + 1) = dx(i + 1) - dy(i + 2) = dx(i + 2) - dy(i + 3) = dx(i + 3) - dy(i + 4) = dx(i + 4) - dy(i + 5) = dx(i + 5) - dy(i + 6) = dx(i + 6) - 50 CONTINUE - RETURN - END - - DOUBLE PRECISION FUNCTION ddot_sl(n,dx,incx,dy,incy) - -C FORMS THE DOT PRODUCT OF TWO VECTORS. -C USES UNROLLED LOOPS FOR INCREMENTS EQUAL TO ONE. -C JACK DONGARRA, LINPACK, 3/11/78. - - DOUBLE PRECISION dx(*),dy(*),dtemp - INTEGER i,incx,incy,ix,iy,m,mp1,n - - ddot_sl = 0.0d0 - dtemp = 0.0d0 - IF(n.LE.0)RETURN - IF(incx.EQ.1.AND.incy.EQ.1)GO TO 20 - -C CODE FOR UNEQUAL INCREMENTS OR EQUAL INCREMENTS -C NOT EQUAL TO 1 - - ix = 1 - iy = 1 - IF(incx.LT.0)ix = (-n+1)*incx + 1 - IF(incy.LT.0)iy = (-n+1)*incy + 1 - DO 10 i = 1,n - dtemp = dtemp + dx(ix)*dy(iy) - ix = ix + incx - iy = iy + incy - 10 CONTINUE - ddot_sl = dtemp - RETURN - -C CODE FOR BOTH INCREMENTS EQUAL TO 1 - -C CLEAN-UP LOOP - - 20 m = MOD(n,5) - IF( m .EQ. 0 ) GO TO 40 - DO 30 i = 1,m - dtemp = dtemp + dx(i)*dy(i) - 30 CONTINUE - IF( n .LT. 5 ) GO TO 60 - 40 mp1 = m + 1 - DO 50 i = mp1,n,5 - dtemp = dtemp + dx(i)*dy(i) + dx(i + 1)*dy(i + 1) + - * dx(i + 2)*dy(i + 2) + dx(i + 3)*dy(i + 3) + dx(i + 4)*dy(i + 4) - 50 CONTINUE - 60 ddot_sl = dtemp - RETURN - END - - DOUBLE PRECISION FUNCTION dnrm1(n,x,i,j) - INTEGER n, i, j, k - DOUBLE PRECISION snormx, sum, x(n), ZERO, one, scale, temp - DATA ZERO/0.0d0/, one/1.0d0/ - -C DNRM1 - COMPUTES THE I-NORM OF A VECTOR -C BETWEEN THE ITH AND THE JTH ELEMENTS - -C INPUT - -C N LENGTH OF VECTOR -C X VECTOR OF LENGTH N -C I INITIAL ELEMENT OF VECTOR TO BE USED -C J FINAL ELEMENT TO USE - -C OUTPUT - -C DNRM1 NORM - - snormx=ZERO - DO 10 k=i,j - 10 snormx=MAX(snormx,ABS(x(k))) - dnrm1 = snormx - IF (snormx.EQ.ZERO) RETURN - scale = snormx - IF (snormx.GE.one) scale=SQRT(snormx) - sum=ZERO - DO 20 k=i,j - temp=ZERO - IF (ABS(x(k))+scale .NE. scale) temp = x(k)/snormx - IF (one+temp.NE.one) sum = sum+temp*temp - 20 CONTINUE - sum=SQRT(sum) - dnrm1=snormx*sum - RETURN - END - - DOUBLE PRECISION FUNCTION dnrm2_ ( n, dx, incx) - INTEGER n, i, j, nn, next, incx - DOUBLE PRECISION dx(*), cutlo, cuthi, hitest, sum, xmax, ZERO, one - DATA ZERO, one /0.0d0, 1.0d0/ - -C EUCLIDEAN NORM OF THE N-VECTOR STORED IN DX() WITH STORAGE -C INCREMENT INCX . -C IF N .LE. 0 RETURN WITH RESULT = 0. -C IF N .GE. 1 THEN INCX MUST BE .GE. 1 - -C C.L.LAWSON, 1978 JAN 08 - -C FOUR PHASE METHOD USING TWO BUILT-IN CONSTANTS THAT ARE -C HOPEFULLY APPLICABLE TO ALL MACHINES. -C CUTLO = MAXIMUM OF SQRT(U/EPS) OVER ALL KNOWN MACHINES. -C CUTHI = MINIMUM OF SQRT(V) OVER ALL KNOWN MACHINES. -C WHERE -C EPS = SMALLEST NO. SUCH THAT EPS + 1. .GT. 1. -C U = SMALLEST POSITIVE NO. (UNDERFLOW LIMIT) -C V = LARGEST NO. (OVERFLOW LIMIT) - -C BRIEF OUTLINE OF ALGORITHM.. - -C PHASE 1 SCANS ZERO COMPONENTS. -C MOVE TO PHASE 2 WHEN A COMPONENT IS NONZERO AND .LE. CUTLO -C MOVE TO PHASE 3 WHEN A COMPONENT IS .GT. CUTLO -C MOVE TO PHASE 4 WHEN A COMPONENT IS .GE. CUTHI/M -C WHERE M = N FOR X() REAL AND M = 2*N FOR COMPLEX. - -C VALUES FOR CUTLO AND CUTHI.. -C FROM THE ENVIRONMENTAL PARAMETERS LISTED IN THE IMSL CONVERTER -C DOCUMENT THE LIMITING VALUES ARE AS FOLLOWS.. -C CUTLO, S.P. U/EPS = 2**(-102) FOR HONEYWELL. CLOSE SECONDS ARE -C UNIVAC AND DEC AT 2**(-103) -C THUS CUTLO = 2**(-51) = 4.44089E-16 -C CUTHI, S.P. V = 2**127 FOR UNIVAC, HONEYWELL, AND DEC. -C THUS CUTHI = 2**(63.5) = 1.30438E19 -C CUTLO, D.P. U/EPS = 2**(-67) FOR HONEYWELL AND DEC. -C THUS CUTLO = 2**(-33.5) = 8.23181D-11 -C CUTHI, D.P. SAME AS S.P. CUTHI = 1.30438D19 -C DATA CUTLO, CUTHI / 8.232D-11, 1.304D19 / -C DATA CUTLO, CUTHI / 4.441E-16, 1.304E19 / - DATA cutlo, cuthi / 8.232d-11, 1.304d19 / - - IF(n .GT. 0) GO TO 10 - dnrm2_ = ZERO - GO TO 300 - - 10 assign 30 to next - sum = ZERO - nn = n * incx -C BEGIN MAIN LOOP - i = 1 - 20 GO TO next,(30, 50, 70, 110) - 30 IF( ABS(dx(i)) .GT. cutlo) GO TO 85 - assign 50 to next - xmax = ZERO - -C PHASE 1. SUM IS ZERO - - 50 IF( dx(i) .EQ. ZERO) GO TO 200 - IF( ABS(dx(i)) .GT. cutlo) GO TO 85 - -C PREPARE FOR PHASE 2. - - assign 70 to next - GO TO 105 - -C PREPARE FOR PHASE 4. - - 100 i = j - assign 110 to next - sum = (sum / dx(i)) / dx(i) - 105 xmax = ABS(dx(i)) - GO TO 115 - -C PHASE 2. SUM IS SMALL. -C SCALE TO AVOID DESTRUCTIVE UNDERFLOW. - - 70 IF( ABS(dx(i)) .GT. cutlo ) GO TO 75 - -C COMMON CODE FOR PHASES 2 AND 4. -C IN PHASE 4 SUM IS LARGE. SCALE TO AVOID OVERFLOW. - - 110 IF( ABS(dx(i)) .LE. xmax ) GO TO 115 - sum = one + sum * (xmax / dx(i))**2 - xmax = ABS(dx(i)) - GO TO 200 - - 115 sum = sum + (dx(i)/xmax)**2 - GO TO 200 - -C PREPARE FOR PHASE 3. - - 75 sum = (sum * xmax) * xmax - -C FOR REAL OR D.P. SET HITEST = CUTHI/N -C FOR COMPLEX SET HITEST = CUTHI/(2*N) - - 85 hitest = cuthi/float( n ) - -C PHASE 3. SUM IS MID-RANGE. NO SCALING. - - DO 95 j =i,nn,incx - IF(ABS(dx(j)) .GE. hitest) GO TO 100 - 95 sum = sum + dx(j)**2 - dnrm2_ = SQRT( sum ) - GO TO 300 - - 200 CONTINUE - i = i + incx - IF ( i .LE. nn ) GO TO 20 - -C END OF MAIN LOOP. - -C COMPUTE SQUARE ROOT AND ADJUST FOR SCALING. - - dnrm2_ = xmax * SQRT(sum) - 300 CONTINUE - RETURN - END - - SUBROUTINE dsrot (n,dx,incx,dy,incy,c,s) - -C APPLIES A PLANE ROTATION. -C JACK DONGARRA, LINPACK, 3/11/78. - - DOUBLE PRECISION dx(*),dy(*),dtemp,c,s - INTEGER i,incx,incy,ix,iy,n - - IF(n.LE.0)RETURN - IF(incx.EQ.1.AND.incy.EQ.1)GO TO 20 - -C CODE FOR UNEQUAL INCREMENTS OR EQUAL INCREMENTS NOT EQUAL -C TO 1 - - ix = 1 - iy = 1 - IF(incx.LT.0)ix = (-n+1)*incx + 1 - IF(incy.LT.0)iy = (-n+1)*incy + 1 - DO 10 i = 1,n - dtemp = c*dx(ix) + s*dy(iy) - dy(iy) = c*dy(iy) - s*dx(ix) - dx(ix) = dtemp - ix = ix + incx - iy = iy + incy - 10 CONTINUE - RETURN - -C CODE FOR BOTH INCREMENTS EQUAL TO 1 - - 20 DO 30 i = 1,n - dtemp = c*dx(i) + s*dy(i) - dy(i) = c*dy(i) - s*dx(i) - dx(i) = dtemp - 30 CONTINUE - RETURN - END - - SUBROUTINE dsrotg(da,db,c,s) - -C CONSTRUCT GIVENS PLANE ROTATION. -C JACK DONGARRA, LINPACK, 3/11/78. -C MODIFIED 9/27/86. - - DOUBLE PRECISION da,db,c,s,roe,scale,r,z,one,ZERO - DATA one, ZERO /1.0d+00, 0.0d+00/ - - roe = db - IF( ABS(da) .GT. ABS(db) ) roe = da - scale = ABS(da) + ABS(db) - IF( scale .NE. ZERO ) GO TO 10 - c = one - s = ZERO - r = ZERO - GO TO 20 - 10 r = scale*SQRT((da/scale)**2 + (db/scale)**2) - r = SIGN(one,roe)*r - c = da/r - s = db/r - 20 z = s - IF( ABS(c) .GT. ZERO .AND. ABS(c) .LE. s ) z = one/c - da = r - db = z - RETURN - END - - SUBROUTINE dscal_sl(n,da,dx,incx) - -C SCALES A VECTOR BY A CONSTANT. -C USES UNROLLED LOOPS FOR INCREMENT EQUAL TO ONE. -C JACK DONGARRA, LINPACK, 3/11/78. - - DOUBLE PRECISION da,dx(*) - INTEGER i,incx,m,mp1,n,nincx - - IF(n.LE.0)RETURN - IF(incx.EQ.1)GO TO 20 - - -C CODE FOR INCREMENT NOT EQUAL TO 1 - - nincx = n*incx - DO 10 i = 1,nincx,incx - dx(i) = da*dx(i) - 10 CONTINUE - RETURN - -C CODE FOR INCREMENT EQUAL TO 1 - -C CLEAN-UP LOOP - - 20 m = MOD(n,5) - IF( m .EQ. 0 ) GO TO 40 - DO 30 i = 1,m - dx(i) = da*dx(i) - 30 CONTINUE - IF( n .LT. 5 ) RETURN - 40 mp1 = m + 1 - DO 50 i = mp1,n,5 - dx(i) = da*dx(i) - dx(i + 1) = da*dx(i + 1) - dx(i + 2) = da*dx(i + 2) - dx(i + 3) = da*dx(i + 3) - dx(i + 4) = da*dx(i + 4) - 50 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/optimize/tests/test_cobyla.py b/scipy-0.10.1/scipy/optimize/tests/test_cobyla.py deleted file mode 100644 index 8c124aee8a..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_cobyla.py +++ /dev/null @@ -1,21 +0,0 @@ -import math - -from numpy.testing import assert_almost_equal, TestCase, run_module_suite - -from scipy.optimize import cobyla as co - -class TestCobyla(TestCase): - def test_simple(self): - - function = lambda x: x[0]**2 + abs(x[1])**3 - con1 = lambda x: x[0]**2 + x[1]**2 - 25 - con2 = lambda x: -con1(x) - - x = co.fmin_cobyla(function, [4.95,0.66], [con1, con2], rhobeg=1, - rhoend=1e-5, iprint=0, maxfun=100) - x1 = 2.0/3 - x0 = math.sqrt(25-x1*x1) - assert_almost_equal(x, [x0, x1], decimal=5) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/tests/test_linesearch.py b/scipy-0.10.1/scipy/optimize/tests/test_linesearch.py deleted file mode 100644 index e405bbfb24..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_linesearch.py +++ /dev/null @@ -1,243 +0,0 @@ -""" -Tests for line search routines -""" - -from numpy.testing import assert_, assert_equal, \ - assert_array_almost_equal, assert_array_almost_equal_nulp -import scipy.optimize.linesearch as ls -import numpy as np - -def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""): - """ - Check that strong Wolfe conditions apply - """ - phi1 = phi(s) - phi0 = phi(0) - derphi0 = derphi(0) - derphi1 = derphi(s) - msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % ( - s, phi0, phi1, derphi0, derphi1, err_msg) - - assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: "+ msg) - assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: "+ msg) - -def assert_armijo(s, phi, c1=1e-4, err_msg=""): - """ - Check that Armijo condition applies - """ - phi1 = phi(s) - phi0 = phi(0) - msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg) - assert_(phi1 <= (1 - c1*s)*phi0, msg) - -def assert_line_wolfe(x, p, s, f, fprime, **kw): - assert_wolfe(s, phi=lambda sp: f(x + p*sp), - derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw) - -def assert_line_armijo(x, p, s, f, **kw): - assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw) - -def assert_fp_equal(x, y, err_msg="", nulp=50): - """Assert two arrays are equal, up to some floating-point rounding error""" - try: - assert_array_almost_equal_nulp(x, y, nulp) - except AssertionError, e: - raise AssertionError("%s\n%s" % (e, err_msg)) - -class TestLineSearch(object): - # -- scalar functions; must have dphi(0.) < 0 - def _scalar_func_1(self, s): - self.fcount += 1 - p = -s - s**3 + s**4 - dp = -1 - 3*s**2 + 4*s**3 - return p, dp - - def _scalar_func_2(self, s): - self.fcount += 1 - p = np.exp(-4*s) + s**2 - dp = -4*np.exp(-4*s) + 2*s - return p, dp - - def _scalar_func_3(self, s): - self.fcount += 1 - p = -np.sin(10*s) - dp = -10*np.cos(10*s) - return p, dp - - # -- n-d functions - - def _line_func_1(self, x): - self.fcount += 1 - f = np.dot(x, x) - df = 2*x - return f, df - - def _line_func_2(self, x): - self.fcount += 1 - f = np.dot(x, np.dot(self.A, x)) + 1 - df = np.dot(self.A + self.A.T, x) - return f, df - - # -- - - def __init__(self): - self.scalar_funcs = [] - self.line_funcs = [] - self.N = 20 - self.fcount = 0 - - def bind_index(func, idx): - # Remember Python's closure semantics! - return lambda *a, **kw: func(*a, **kw)[idx] - - for name in sorted(dir(self)): - if name.startswith('_scalar_func_'): - value = getattr(self, name) - self.scalar_funcs.append( - (name, bind_index(value, 0), bind_index(value, 1))) - elif name.startswith('_line_func_'): - value = getattr(self, name) - self.line_funcs.append( - (name, bind_index(value, 0), bind_index(value, 1))) - - def setUp(self): - np.random.seed(1234) - self.A = np.random.randn(self.N, self.N) - - def scalar_iter(self): - for name, phi, derphi in self.scalar_funcs: - for old_phi0 in np.random.randn(3): - yield name, phi, derphi, old_phi0 - - def line_iter(self): - for name, f, fprime in self.line_funcs: - k = 0 - while k < 9: - x = np.random.randn(self.N) - p = np.random.randn(self.N) - if np.dot(p, fprime(x)) >= 0: - # always pick a descent direction - continue - k += 1 - old_fv = float(np.random.randn()) - yield name, f, fprime, x, p, old_fv - - # -- Generic scalar searches - - def test_scalar_search_wolfe1(self): - c = 0 - for name, phi, derphi, old_phi0 in self.scalar_iter(): - c += 1 - s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), - old_phi0, derphi(0)) - assert_fp_equal(phi0, phi(0), name) - assert_fp_equal(phi1, phi(s), name) - assert_wolfe(s, phi, derphi, err_msg=name) - - assert_(c > 3) # check that the iterator really works... - - def test_scalar_search_wolfe2(self): - for name, phi, derphi, old_phi0 in self.scalar_iter(): - s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2( - phi, derphi, phi(0), old_phi0, derphi(0)) - assert_fp_equal(phi0, phi(0), name) - assert_fp_equal(phi1, phi(s), name) - if derphi1 is not None: - assert_fp_equal(derphi1, derphi(s), name) - assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0)) - - def test_scalar_search_armijo(self): - for name, phi, derphi, old_phi0 in self.scalar_iter(): - s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0)) - assert_fp_equal(phi1, phi(s), name) - assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0)) - - # -- Generic line searches - - def test_line_search_wolfe1(self): - c = 0 - smax = 100 - for name, f, fprime, x, p, old_f in self.line_iter(): - f0 = f(x) - g0 = fprime(x) - self.fcount = 0 - s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p, - g0, f0, old_f, - amax=smax) - assert_equal(self.fcount, fc+gc) - assert_fp_equal(ofv, f(x)) - if s is None: - continue - assert_fp_equal(fv, f(x + s*p)) - assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) - if s < smax: - c += 1 - assert_line_wolfe(x, p, s, f, fprime, err_msg=name) - - assert_(c > 3) # check that the iterator really works... - - def test_line_search_wolfe2(self): - c = 0 - smax = 100 - for name, f, fprime, x, p, old_f in self.line_iter(): - f0 = f(x) - g0 = fprime(x) - self.fcount = 0 - s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p, - g0, f0, old_f, - amax=smax) - assert_equal(self.fcount, fc+gc) - assert_fp_equal(ofv, f(x)) - assert_fp_equal(fv, f(x + s*p)) - if gv is not None: - assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) - if s < smax: - c += 1 - assert_line_wolfe(x, p, s, f, fprime, err_msg=name) - assert_(c > 3) # check that the iterator really works... - - def test_line_search_armijo(self): - c = 0 - for name, f, fprime, x, p, old_f in self.line_iter(): - f0 = f(x) - g0 = fprime(x) - self.fcount = 0 - s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0) - c += 1 - assert_equal(self.fcount, fc) - assert_fp_equal(fv, f(x + s*p)) - assert_line_armijo(x, p, s, f, err_msg=name) - assert_(c >= 9) - - # -- More specific tests - - def test_armijo_terminate_1(self): - # Armijo should evaluate the function only once if the trial step - # is already suitable - count = [0] - def phi(s): - count[0] += 1 - return -s + 0.01*s**2 - s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1) - assert_equal(s, 1) - assert_equal(count[0], 2) - assert_armijo(s, phi) - - def test_wolfe_terminate(self): - # wolfe1 and wolfe2 should also evaluate the function only a few - # times if the trial step is already suitable - - def phi(s): - count[0] += 1 - return -s + 0.05*s**2 - - def derphi(s): - count[0] += 1 - return -1 + 0.05*2*s - - for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]: - count = [0] - r = func(phi, derphi, phi(0), None, derphi(0)) - assert_(r[0] is not None, (r, func)) - assert_(count[0] <= 2 + 2, (count, func)) - assert_wolfe(r[0], phi, derphi, err_msg=str(func)) diff --git a/scipy-0.10.1/scipy/optimize/tests/test_minpack.py b/scipy-0.10.1/scipy/optimize/tests/test_minpack.py deleted file mode 100644 index 72b9c23f90..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_minpack.py +++ /dev/null @@ -1,304 +0,0 @@ -""" -Unit tests for optimization routines from minpack.py. -""" - -from numpy.testing import assert_, assert_almost_equal, assert_array_equal, \ - assert_array_almost_equal, TestCase, run_module_suite, assert_raises -import numpy as np -from numpy import array, float64 - -from scipy import optimize -from scipy.optimize.minpack import leastsq, curve_fit, fixed_point - - -class ReturnShape(object): - """This class exists to create a callable that does not have a 'func_name' attribute. - - __init__ takes the argument 'shape', which should be a tuple of ints. When an instance - it called with a single argument 'x', it returns numpy.ones(shape). - """ - def __init__(self, shape): - self.shape = shape - - def __call__(self, x): - return np.ones(self.shape) - -def dummy_func(x, shape): - """A function that returns an array of ones of the given shape. - `x` is ignored. - """ - return np.ones(shape) - - -class TestFSolve(object): - def pressure_network(self, flow_rates, Qtot, k): - """Evaluate non-linear equation system representing - the pressures and flows in a system of n parallel pipes:: - - f_i = P_i - P_0, for i = 1..n - f_0 = sum(Q_i) - Qtot - - Where Q_i is the flow rate in pipe i and P_i the pressure in that pipe. - Pressure is modeled as a P=kQ**2 where k is a valve coefficient and - Q is the flow rate. - - Parameters - ---------- - flow_rates : float - A 1D array of n flow rates [kg/s]. - k : float - A 1D array of n valve coefficients [1/kg m]. - Qtot : float - A scalar, the total input flow rate [kg/s]. - - Returns - ------- - F : float - A 1D array, F[i] == f_i. - - """ - P = k * flow_rates**2 - F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot)) - return F - - def pressure_network_jacobian(self, flow_rates, Qtot, k): - """Return the jacobian of the equation system F(flow_rates) - computed by `pressure_network` with respect to - *flow_rates*. See `pressure_network` for the detailed - description of parrameters. - - Returns - ------- - jac : float - *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)`` - and *f_i* and *Q_i* are described in the doc for `pressure_network` - """ - n = len(flow_rates) - pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0]) - - jac = np.empty((n, n)) - jac[:n-1, :n-1] = pdiff - jac[:n-1, n-1] = 0 - jac[n-1, :] = np.ones(n) - - return jac - - def test_pressure_network_no_gradient(self): - """fsolve without gradient, equal pipes -> equal flows""" - k = np.ones(4) * 0.5 - Qtot = 4 - initial_guess = array([2., 0., 2., 0.]) - final_flows = optimize.fsolve( - self.pressure_network, initial_guess, args=(Qtot, k)) - assert_array_almost_equal(final_flows, np.ones(4)) - - def test_pressure_network_with_gradient(self): - """fsolve with gradient, equal pipes -> equal flows""" - k = np.ones(4) * 0.5 - Qtot = 4 - initial_guess = array([2., 0., 2., 0.]) - final_flows = optimize.fsolve( - self.pressure_network, initial_guess, args=(Qtot, k), - fprime=self.pressure_network_jacobian) - assert_array_almost_equal(final_flows, np.ones(4)) - - def test_wrong_shape_func_callable(self): - """The callable 'func' has no 'func_name' attribute.""" - func = ReturnShape(1) - # x0 is a list of two elements, but func will return an array with - # length 1, so this should result in a TypeError. - x0 = [1.5, 2.0] - assert_raises(TypeError, optimize.fsolve, func, x0) - - def test_wrong_shape_func_function(self): - # x0 is a list of two elements, but func will return an array with - # length 1, so this should result in a TypeError. - x0 = [1.5, 2.0] - assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),)) - - def test_wrong_shape_fprime_callable(self): - """The callables 'func' and 'deriv_func' have no 'func_name' attribute.""" - func = ReturnShape(1) - deriv_func = ReturnShape((2,2)) - assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) - - def test_wrong_shape_fprime_function(self): - func = lambda x: dummy_func(x, (2,)) - deriv_func = lambda x: dummy_func(x, (3,3)) - assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) - - -class TestLeastSq(TestCase): - def setUp(self): - x = np.linspace(0, 10, 40) - a,b,c = 3.1, 42, -304.2 - self.x = x - self.abc = a,b,c - y_true = a*x**2 + b*x + c - np.random.seed(0) - self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape) - - def residuals(self, p, y, x): - a,b,c = p - err = y-(a*x**2 + b*x + c) - return err - - def test_basic(self): - p0 = array([0,0,0]) - params_fit, ier = leastsq(self.residuals, p0, - args=(self.y_meas, self.x)) - assert_(ier in (1,2,3,4), 'solution not found (ier=%d)'%ier) - # low precision due to random - assert_array_almost_equal(params_fit, self.abc, decimal=2) - - def test_full_output(self): - p0 = array([0,0,0]) - full_output = leastsq(self.residuals, p0, - args=(self.y_meas, self.x), - full_output=True) - params_fit, cov_x, infodict, mesg, ier = full_output - assert_(ier in (1,2,3,4), 'solution not found: %s'%mesg) - - def test_input_untouched(self): - p0 = array([0,0,0],dtype=float64) - p0_copy = array(p0, copy=True) - full_output = leastsq(self.residuals, p0, - args=(self.y_meas, self.x), - full_output=True) - params_fit, cov_x, infodict, mesg, ier = full_output - assert_(ier in (1,2,3,4), 'solution not found: %s'%mesg) - assert_array_equal(p0, p0_copy) - - def test_wrong_shape_func_callable(self): - """The callable 'func' has no 'func_name' attribute.""" - func = ReturnShape(1) - # x0 is a list of two elements, but func will return an array with - # length 1, so this should result in a TypeError. - x0 = [1.5, 2.0] - assert_raises(TypeError, optimize.leastsq, func, x0) - - def test_wrong_shape_func_function(self): - # x0 is a list of two elements, but func will return an array with - # length 1, so this should result in a TypeError. - x0 = [1.5, 2.0] - assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),)) - - def test_wrong_shape_Dfun_callable(self): - """The callables 'func' and 'deriv_func' have no 'func_name' attribute.""" - func = ReturnShape(1) - deriv_func = ReturnShape((2,2)) - assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) - - def test_wrong_shape_Dfun_function(self): - func = lambda x: dummy_func(x, (2,)) - deriv_func = lambda x: dummy_func(x, (3,3)) - assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) - - -class TestCurveFit(TestCase): - def setUp(self): - self.y = array([1.0, 3.2, 9.5, 13.7]) - self.x = array([1.0, 2.0, 3.0, 4.0]) - - def test_one_argument(self): - def func(x,a): - return x**a - popt, pcov = curve_fit(func, self.x, self.y) - assert_(len(popt) == 1) - assert_(pcov.shape == (1,1)) - assert_almost_equal(popt[0], 1.9149, decimal=4) - assert_almost_equal(pcov[0,0], 0.0016, decimal=4) - - # Test if we get the same with full_output. Regression test for #1415. - res = curve_fit(func, self.x, self.y, full_output=1) - (popt2, pcov2, infodict, errmsg, ier) = res - assert_array_almost_equal(popt, popt2) - - def test_two_argument(self): - def func(x, a, b): - return b*x**a - popt, pcov = curve_fit(func, self.x, self.y) - assert_(len(popt) == 2) - assert_(pcov.shape == (2,2)) - assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) - assert_array_almost_equal(pcov, [[0.0852, -0.1260],[-0.1260, 0.1912]], - decimal=4) - - def test_func_is_classmethod(self): - class test_self(object): - """This class tests if curve_fit passes the correct number of - arguments when the model function is a class instance method. - """ - def func(self, x, a, b): - return b * x**a - - test_self_inst = test_self() - popt, pcov = curve_fit(test_self_inst.func, self.x, self.y) - assert_(pcov.shape == (2,2)) - assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) - assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], - decimal=4) - - -class TestFixedPoint(TestCase): - - def test_scalar_trivial(self): - """f(x) = 2x; fixed point should be x=0""" - def func(x): - return 2.0*x - x0 = 1.0 - x = fixed_point(func, x0) - assert_almost_equal(x, 0.0) - - def test_scalar_basic1(self): - """f(x) = x**2; x0=1.05; fixed point should be x=1""" - def func(x): - return x**2 - x0 = 1.05 - x = fixed_point(func, x0) - assert_almost_equal(x, 1.0) - - def test_scalar_basic2(self): - """f(x) = x**0.5; x0=1.05; fixed point should be x=1""" - def func(x): - return x**0.5 - x0 = 1.05 - x = fixed_point(func, x0) - assert_almost_equal(x, 1.0) - - def test_array_trivial(self): - def func(x): - return 2.0*x - x0 = [0.3, 0.15] - olderr = np.seterr(all='ignore') - try: - x = fixed_point(func, x0) - finally: - np.seterr(**olderr) - assert_almost_equal(x, [0.0, 0.0]) - - def test_array_basic1(self): - """f(x) = c * x**2; fixed point should be x=1/c""" - def func(x, c): - return c * x**2 - c = array([0.75, 1.0, 1.25]) - x0 = [1.1, 1.15, 0.9] - olderr = np.seterr(all='ignore') - try: - x = fixed_point(func, x0, args=(c,)) - finally: - np.seterr(**olderr) - assert_almost_equal(x, 1.0/c) - - def test_array_basic2(self): - """f(x) = c * x**0.5; fixed point should be x=c**2""" - def func(x, c): - return c * x**0.5 - c = array([0.75, 1.0, 1.25]) - x0 = [0.8, 1.1, 1.1] - x = fixed_point(func, x0, args=(c,)) - assert_almost_equal(x, c**2) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/tests/test_nnls.py b/scipy-0.10.1/scipy/optimize/tests/test_nnls.py deleted file mode 100644 index 6ff1fd7ebc..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_nnls.py +++ /dev/null @@ -1,24 +0,0 @@ -""" Unit tests for nonnegative least squares -Author: Uwe Schmitt -Sep 2008 -""" - -from numpy.testing import assert_, TestCase, run_module_suite - -from scipy.optimize import nnls -from numpy import arange, dot -from numpy.linalg import norm - - -class TestNNLS(TestCase): - - def test_nnls(self): - a = arange(25.0).reshape(-1,5) - x = arange(5.0) - y = dot(a,x) - x, res = nnls(a,y) - assert_(res < 1e-7) - assert_(norm(dot(a,x)-y) < 1e-7) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/tests/test_nonlin.py b/scipy-0.10.1/scipy/optimize/tests/test_nonlin.py deleted file mode 100644 index a5be7b9254..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_nonlin.py +++ /dev/null @@ -1,345 +0,0 @@ -""" Unit tests for nonlinear solvers -Author: Ondrej Certik -May 2007 -""" - -from numpy.testing import assert_, dec, TestCase, run_module_suite - -from scipy.optimize import nonlin -from numpy import matrix, diag, dot -from numpy.linalg import inv -import numpy as np - -SOLVERS = [nonlin.anderson, nonlin.diagbroyden, nonlin.linearmixing, - nonlin.excitingmixing, nonlin.broyden1, nonlin.broyden2, - nonlin.newton_krylov] -MUST_WORK = [nonlin.anderson, nonlin.broyden1, nonlin.broyden2, - nonlin.newton_krylov] - -#------------------------------------------------------------------------------- -# Test problems -#------------------------------------------------------------------------------- - -def F(x): - x = np.asmatrix(x).T - d = matrix(diag([3,2,1.5,1,0.5])) - c = 0.01 - f = -d*x - c*float(x.T*x)*x - return f -F.xin = [1,1,1,1,1] -F.KNOWN_BAD = [] - -def F2(x): - return x -F2.xin = [1,2,3,4,5,6] -F2.KNOWN_BAD = [nonlin.linearmixing, nonlin.excitingmixing] - -def F3(x): - A = np.mat('-2 1 0; 1 -2 1; 0 1 -2') - b = np.mat('1 2 3') - return np.dot(A, x) - b -F3.xin = [1,2,3] -F3.KNOWN_BAD = [] - -def F4_powell(x): - A = 1e4 - return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)] -F4_powell.xin = [-1, -2] -F4_powell.KNOWN_BAD = [nonlin.linearmixing, nonlin.excitingmixing, - nonlin.diagbroyden] - -from test_minpack import TestFSolve as F5_class -F5_object = F5_class() -def F5(x): - return F5_object.pressure_network(x, 4, np.array([.5, .5, .5, .5])) -F5.xin = [2., 0, 2, 0] -F5.KNOWN_BAD = [nonlin.excitingmixing, nonlin.linearmixing, nonlin.diagbroyden] - -def F6(x): - x1, x2 = x - J0 = np.array([[ -4.256 , 14.7 ], - [ 0.8394989 , 0.59964207]]) - v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6, - np.sin(x2 * np.exp(x1) - 1)]) - return -np.linalg.solve(J0, v) -F6.xin = [-0.5, 1.4] -F6.KNOWN_BAD = [nonlin.excitingmixing, nonlin.linearmixing, nonlin.diagbroyden] - -#------------------------------------------------------------------------------- -# Tests -#------------------------------------------------------------------------------- - -class TestNonlin(object): - """ - Check the Broyden methods for a few test problems. - - broyden1, broyden2, and newton_krylov must succeed for - all functions. Some of the others don't -- tests in KNOWN_BAD are skipped. - - """ - - def _check_func(self, f, func, f_tol=1e-2): - x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0) - assert_(np.absolute(f(x)).max() < f_tol) - - @dec.knownfailureif(True) - def _check_func_fail(self, *a, **kw): - pass - - def test_problem(self): - for f in [F, F2, F3, F4_powell, F5, F6]: - for func in SOLVERS: - if func in f.KNOWN_BAD: - if func in MUST_WORK: - yield self._check_func_fail, f, func - continue - yield self._check_func, f, func - - -class TestSecant(TestCase): - """Check that some Jacobian approximations satisfy the secant condition""" - - xs = [np.array([1,2,3,4,5], float), - np.array([2,3,4,5,1], float), - np.array([3,4,5,1,2], float), - np.array([4,5,1,2,3], float), - np.array([9,1,9,1,3], float), - np.array([0,1,9,1,3], float), - np.array([5,5,7,1,1], float), - np.array([1,2,7,5,1], float),] - fs = [x**2 - 1 for x in xs] - - def _check_secant(self, jac_cls, npoints=1, **kw): - """ - Check that the given Jacobian approximation satisfies secant - conditions for last `npoints` points. - """ - jac = jac_cls(**kw) - jac.setup(self.xs[0], self.fs[0], None) - for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): - jac.update(x, f) - - for k in xrange(min(npoints, j+1)): - dx = self.xs[j-k+1] - self.xs[j-k] - df = self.fs[j-k+1] - self.fs[j-k] - assert_(np.allclose(dx, jac.solve(df))) - - # Check that the `npoints` secant bound is strict - if j >= npoints: - dx = self.xs[j-npoints+1] - self.xs[j-npoints] - df = self.fs[j-npoints+1] - self.fs[j-npoints] - assert_(not np.allclose(dx, jac.solve(df))) - - def test_broyden1(self): - self._check_secant(nonlin.BroydenFirst) - - def test_broyden2(self): - self._check_secant(nonlin.BroydenSecond) - - def test_broyden1_update(self): - # Check that BroydenFirst update works as for a dense matrix - jac = nonlin.BroydenFirst(alpha=0.1) - jac.setup(self.xs[0], self.fs[0], None) - - B = np.identity(5) * (-1/0.1) - - for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): - df = f - self.fs[last_j] - dx = x - self.xs[last_j] - B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx) - jac.update(x, f) - assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13)) - - def test_broyden2_update(self): - # Check that BroydenSecond update works as for a dense matrix - jac = nonlin.BroydenSecond(alpha=0.1) - jac.setup(self.xs[0], self.fs[0], None) - - H = np.identity(5) * (-0.1) - - for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): - df = f - self.fs[last_j] - dx = x - self.xs[last_j] - H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df) - jac.update(x, f) - assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13)) - - def test_anderson(self): - # Anderson mixing (with w0=0) satisfies secant conditions - # for the last M iterates, see [Ey]_ - # - # .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). - self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3) - -class TestLinear(TestCase): - """Solve a linear equation; - some methods find the exact solution in a finite number of steps""" - - def _check(self, jac, N, maxiter, complex=False, **kw): - np.random.seed(123) - - A = np.random.randn(N, N) - if complex: - A = A + 1j*np.random.randn(N, N) - b = np.random.randn(N) - if complex: - b = b + 1j*np.random.randn(N) - - def func(x): - return dot(A, x) - b - - sol = nonlin.nonlin_solve(func, b*0, jac, maxiter=maxiter, - f_tol=1e-6, line_search=None, verbose=0) - assert_(np.allclose(dot(A, sol), b, atol=1e-6)) - - def test_broyden1(self): - # Broyden methods solve linear systems exactly in 2*N steps - self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False) - self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True) - - def test_broyden2(self): - # Broyden methods solve linear systems exactly in 2*N steps - self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False) - self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True) - - def test_anderson(self): - # Anderson is rather similar to Broyden, if given enough storage space - self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False) - self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True) - - def test_krylov(self): - # Krylov methods solve linear systems exactly in N inner steps - self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10) - self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10) - - -class TestJacobianDotSolve(object): - """Check that solve/dot methods in Jacobian approximations are consistent""" - - def _func(self, x): - return x**2 - 1 + np.dot(self.A, x) - - def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw): - np.random.seed(123) - - N = 7 - def rand(*a): - q = np.random.rand(*a) - if complex: - q = q + 1j*np.random.rand(*a) - return q - - def assert_close(a, b, msg): - d = abs(a - b).max() - f = tol + abs(b).max()*tol - if d > f: - raise AssertionError('%s: err %g' % (msg, d)) - - self.A = rand(N, N) - - # initialize - x0 = np.random.rand(N) - jac = jac_cls(**kw) - jac.setup(x0, self._func(x0), self._func) - - # check consistency - for k in xrange(2*N): - v = rand(N) - - if hasattr(jac, '__array__'): - Jd = np.array(jac) - if hasattr(jac, 'solve'): - Gv = jac.solve(v) - Gv2 = np.linalg.solve(Jd, v) - assert_close(Gv, Gv2, 'solve vs array') - if hasattr(jac, 'rsolve'): - Gv = jac.rsolve(v) - Gv2 = np.linalg.solve(Jd.T.conj(), v) - assert_close(Gv, Gv2, 'rsolve vs array') - if hasattr(jac, 'matvec'): - Jv = jac.matvec(v) - Jv2 = np.dot(Jd, v) - assert_close(Jv, Jv2, 'dot vs array') - if hasattr(jac, 'rmatvec'): - Jv = jac.rmatvec(v) - Jv2 = np.dot(Jd.T.conj(), v) - assert_close(Jv, Jv2, 'rmatvec vs array') - - if hasattr(jac, 'matvec') and hasattr(jac, 'solve'): - Jv = jac.matvec(v) - Jv2 = jac.solve(jac.matvec(Jv)) - assert_close(Jv, Jv2, 'dot vs solve') - - if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'): - Jv = jac.rmatvec(v) - Jv2 = jac.rmatvec(jac.rsolve(Jv)) - assert_close(Jv, Jv2, 'rmatvec vs rsolve') - - x = rand(N) - jac.update(x, self._func(x)) - - def test_broyden1(self): - self._check_dot(nonlin.BroydenFirst, complex=False) - self._check_dot(nonlin.BroydenFirst, complex=True) - - def test_broyden2(self): - self._check_dot(nonlin.BroydenSecond, complex=False) - self._check_dot(nonlin.BroydenSecond, complex=True) - - def test_anderson(self): - self._check_dot(nonlin.Anderson, complex=False) - self._check_dot(nonlin.Anderson, complex=True) - - def test_diagbroyden(self): - self._check_dot(nonlin.DiagBroyden, complex=False) - self._check_dot(nonlin.DiagBroyden, complex=True) - - def test_linearmixing(self): - self._check_dot(nonlin.LinearMixing, complex=False) - self._check_dot(nonlin.LinearMixing, complex=True) - - def test_excitingmixing(self): - self._check_dot(nonlin.ExcitingMixing, complex=False) - self._check_dot(nonlin.ExcitingMixing, complex=True) - - def test_krylov(self): - self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-4) - self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-4) - -class TestNonlinOldTests(TestCase): - """ Test case for a simple constrained entropy maximization problem - (the machine translation example of Berger et al in - Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) - """ - - def test_broyden1(self): - x= nonlin.broyden1(F,F.xin,iter=12,alpha=1) - assert_(nonlin.norm(x) < 1e-9) - assert_(nonlin.norm(F(x)) < 1e-9) - - def test_broyden2(self): - x= nonlin.broyden2(F,F.xin,iter=12,alpha=1) - assert_(nonlin.norm(x) < 1e-9) - assert_(nonlin.norm(F(x)) < 1e-9) - - def test_anderson(self): - x= nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5) - assert_(nonlin.norm(x) < 0.33) - - def test_linearmixing(self): - x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5) - assert_(nonlin.norm(x) < 1e-7) - assert_(nonlin.norm(F(x)) < 1e-7) - - def test_exciting(self): - x= nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5) - assert_(nonlin.norm(x) < 1e-5) - assert_(nonlin.norm(F(x)) < 1e-5) - - def test_diagbroyden(self): - x= nonlin.diagbroyden(F,F.xin,iter=11,alpha=1) - assert_(nonlin.norm(x) < 1e-8) - assert_(nonlin.norm(F(x)) < 1e-8) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/tests/test_optimize.py b/scipy-0.10.1/scipy/optimize/tests/test_optimize.py deleted file mode 100644 index 56af7bfcf5..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_optimize.py +++ /dev/null @@ -1,381 +0,0 @@ -""" -Unit tests for optimization routines from optimize.py and tnc.py - -Authors: - Ed Schofield, Nov 2005 - Andrew Straw, April 2008 - -To run it in its simplest form:: - nosetests test_optimize.py - -""" - -from numpy.testing import assert_raises, assert_almost_equal, \ - assert_equal, assert_, TestCase, run_module_suite - -from scipy import optimize -from numpy import array, zeros, float64, dot, log, exp, inf, sin, cos -import numpy as np -from scipy.optimize.tnc import RCSTRINGS, MSG_NONE -import numpy.random -from math import pow - -class TestOptimize(TestCase): - """ Test case for a simple constrained entropy maximization problem - (the machine translation example of Berger et al in - Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) - """ - def setUp(self): - self.F = array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]]) - self.K = array([1., 0.3, 0.5]) - self.startparams = zeros(3, float64) - self.solution = array([0., -0.524869316, 0.487525860]) - self.maxiter = 1000 - self.funccalls = 0 - self.gradcalls = 0 - self.trace = [] - - - def func(self, x): - self.funccalls += 1 - if self.funccalls > 6000: - raise RuntimeError("too many iterations in optimization routine") - log_pdot = dot(self.F, x) - logZ = log(sum(exp(log_pdot))) - f = logZ - dot(self.K, x) - self.trace.append(x) - return f - - - def grad(self, x): - self.gradcalls += 1 - log_pdot = dot(self.F, x) - logZ = log(sum(exp(log_pdot))) - p = exp(log_pdot - logZ) - return dot(self.F.transpose(), p) - self.K - - - def test_cg(self): - """ conjugate gradient optimization routine - """ - retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (), \ - maxiter=self.maxiter, \ - full_output=True, disp=False, retall=False) - - (params, fopt, func_calls, grad_calls, warnflag) = retval - - err = abs(self.func(params) - self.func(self.solution)) - #print "CG: Difference is: " + str(err) - assert_(err < 1e-6) - - # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. - assert_(self.funccalls == 9, self.funccalls) - assert_(self.gradcalls == 7, self.gradcalls) - - # Ensure that the function behaves the same; this is from Scipy 0.7.0 - assert_(np.allclose(self.trace[2:4], - [[0, -0.5, 0.5], - [0, -5.05700028e-01, 4.95985862e-01]], - atol=1e-14, rtol=1e-7), self.trace[2:4]) - - - def test_bfgs(self): - """ Broyden-Fletcher-Goldfarb-Shanno optimization routine - """ - retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad, \ - args=(), maxiter=self.maxiter, \ - full_output=True, disp=False, retall=False) - - (params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval - - err = abs(self.func(params) - self.func(self.solution)) - #print "BFGS: Difference is: " + str(err) - assert_(err < 1e-6) - - # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. - assert_(self.funccalls == 10, self.funccalls) - assert_(self.gradcalls == 8, self.gradcalls) - - # Ensure that the function behaves the same; this is from Scipy 0.7.0 - assert_(np.allclose(self.trace[6:8], - [[0, -5.25060743e-01, 4.87748473e-01], - [0, -5.24885582e-01, 4.87530347e-01]], - atol=1e-14, rtol=1e-7), self.trace[6:8]) - - - def test_bfgs_infinite(self): - """Test corner case where -Inf is the minimum. See #1494.""" - func = lambda x: -np.e**-x - fprime = lambda x: -func(x) - x0 = [0] - olderr = np.seterr(over='ignore') - try: - x = optimize.fmin_bfgs(func, x0, fprime, disp=False) - assert_(not np.isfinite(func(x))) - finally: - np.seterr(**olderr) - - - def test_powell(self): - """ Powell (direction set) optimization routine - """ - retval = optimize.fmin_powell(self.func, self.startparams, \ - args=(), maxiter=self.maxiter, \ - full_output=True, disp=False, retall=False) - - (params, fopt, direc, numiter, func_calls, warnflag) = retval - - err = abs(self.func(params) - self.func(self.solution)) - #print "Powell: Difference is: " + str(err) - assert_(err < 1e-6) - - # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. - # - # However, some leeway must be added: the exact evaluation - # count is sensitive to numerical error, and floating-point - # computations are not bit-for-bit reproducible across - # machines, and when using e.g. MKL, data alignment - # etc. affect the rounding error. - # - assert_(self.funccalls <= 116 + 20, self.funccalls) - assert_(self.gradcalls == 0, self.gradcalls) - - # Ensure that the function behaves the same; this is from Scipy 0.7.0 - assert_(np.allclose(self.trace[34:39], - [[ 0.72949016, -0.44156936, 0.47100962], - [ 0.72949016, -0.44156936, 0.48052496], - [ 1.45898031, -0.88313872, 0.95153458], - [ 0.72949016, -0.44156936, 0.47576729], - [ 1.72949016, -0.44156936, 0.47576729]], - atol=1e-14, rtol=1e-7), self.trace[34:39]) - - def test_neldermead(self): - """ Nelder-Mead simplex algorithm - """ - retval = optimize.fmin(self.func, self.startparams, \ - args=(), maxiter=self.maxiter, \ - full_output=True, disp=False, retall=False) - - (params, fopt, numiter, func_calls, warnflag) = retval - - err = abs(self.func(params) - self.func(self.solution)) - #print "Nelder-Mead: Difference is: " + str(err) - assert_(err < 1e-6) - - # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. - assert_(self.funccalls == 167, self.funccalls) - assert_(self.gradcalls == 0, self.gradcalls) - - # Ensure that the function behaves the same; this is from Scipy 0.7.0 - assert_(np.allclose(self.trace[76:78], - [[0.1928968 , -0.62780447, 0.35166118], - [0.19572515, -0.63648426, 0.35838135]], - atol=1e-14, rtol=1e-7), self.trace[76:78]) - - def test_ncg(self): - """ line-search Newton conjugate gradient optimization routine - """ - retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, - args=(), maxiter=self.maxiter, - full_output=False, disp=False, - retall=False) - - params = retval - - err = abs(self.func(params) - self.func(self.solution)) - #print "NCG: Difference is: " + str(err) - assert_(err < 1e-6) - - # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. - assert_(self.funccalls == 7, self.funccalls) - assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0 - #assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 - #assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0 - - # Ensure that the function behaves the same; this is from Scipy 0.7.0 - assert_(np.allclose(self.trace[3:5], - [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], - [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], - atol=1e-6, rtol=1e-7), self.trace[:5]) - - - def test_l_bfgs_b(self): - """ limited-memory bound-constrained BFGS algorithm - """ - retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, - self.grad, args=(), - maxfun=self.maxiter) - - (params, fopt, d) = retval - - err = abs(self.func(params) - self.func(self.solution)) - #print "LBFGSB: Difference is: " + str(err) - assert_(err < 1e-6) - - # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. - assert_(self.funccalls == 7, self.funccalls) - assert_(self.gradcalls == 5, self.gradcalls) - - # Ensure that the function behaves the same; this is from Scipy 0.7.0 - assert_(np.allclose(self.trace[3:5], - [[0. , -0.52489628, 0.48753042], - [0. , -0.52489628, 0.48753042]], - atol=1e-14, rtol=1e-7), self.trace[3:5]) - - def test_brent(self): - """ brent algorithm - """ - x = optimize.brent(lambda x: (x-1.5)**2-0.8) - err1 = abs(x - 1.5) - x = optimize.brent(lambda x: (x-1.5)**2-0.8, brack = (-3,-2)) - err2 = abs(x - 1.5) - x = optimize.brent(lambda x: (x-1.5)**2-0.8, full_output=True) - err3 = abs(x[0] - 1.5) - x = optimize.brent(lambda x: (x-1.5)**2-0.8, brack = (-15,-1,15)) - err4 = abs(x - 1.5) - - assert_(max((err1,err2,err3,err4)) < 1e-6) - - - def test_fminbound(self): - """Test fminbound - """ - x = optimize.fminbound(lambda x: (x - 1.5)**2 - 0.8, 0, 1) - assert_(abs(x - 1) < 1e-5) - x = optimize.fminbound(lambda x: (x - 1.5)**2 - 0.8, 1, 5) - assert_(abs(x - 1.5) < 1e-6) - x = optimize.fminbound(lambda x: (x - 1.5)**2 - 0.8, - numpy.array([1]), numpy.array([5])) - assert_(abs(x - 1.5) < 1e-6) - assert_raises(ValueError, - optimize.fminbound, lambda x: (x - 1.5)**2 - 0.8, 5, 1) - - def test_fminbound_scalar(self): - assert_raises(ValueError, - optimize.fminbound, lambda x: (x - 1.5)**2 - 0.8, - np.zeros(2), 1) - - assert_almost_equal( - optimize.fminbound(lambda x: (x - 1.5)**2 - 0.8, 1, np.array(5)), - 1.5) - - -class TestTnc(TestCase): - """TNC non-linear optimization. - - These tests are taken from Prof. K. Schittkowski's test examples - for constrained non-linear programming. - - http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm - - """ - tests = [] - - def setUp(self): - def test1fg(x): - f = 100.0*pow((x[1]-pow(x[0],2)),2)+pow(1.0-x[0],2) - dif = [0,0] - dif[1] = 200.0*(x[1]-pow(x[0],2)) - dif[0] = -2.0*(x[0]*(dif[1]-1.0)+1.0) - return f, dif - self.tests.append((test1fg, [-2,1], ([-inf,None],[-1.5,None]), - [1,1])) - def test2fg(x): - f = 100.0*pow((x[1]-pow(x[0],2)),2)+pow(1.0-x[0],2) - dif = [0,0] - dif[1] = 200.0*(x[1]-pow(x[0],2)) - dif[0] = -2.0*(x[0]*(dif[1]-1.0)+1.0) - return f, dif - self.tests.append((test2fg, [-2,1], [(-inf,None),(1.5,None)], - [-1.2210262419616387,1.5])) - - def test3fg(x): - f = x[1]+pow(x[1]-x[0],2)*1.0e-5 - dif = [0,0] - dif[0] = -2.0*(x[1]-x[0])*1.0e-5 - dif[1] = 1.0-dif[0] - return f, dif - self.tests.append((test3fg, [10,1], [(-inf,None),(0.0, None)], - [0,0])) - - def test4fg(x): - f = pow(x[0]+1.0,3)/3.0+x[1] - dif = [0,0] - dif[0] = pow(x[0]+1.0,2) - dif[1] = 1.0 - return f, dif - self.tests.append((test4fg, [1.125,0.125], [(1, None),(0, None)], - [1,0])) - - def test5fg(x): - f = sin(x[0]+x[1])+pow(x[0]-x[1],2)-1.5*x[0]+2.5*x[1]+1.0 - dif = [0,0] - v1 = cos(x[0]+x[1]) - v2 = 2.0*(x[0]-x[1]) - - dif[0] = v1+v2-1.5 - dif[1] = v1-v2+2.5 - return f, dif - self.tests.append((test5fg, [0,0], [(-1.5, 4),(-3,3)], - [-0.54719755119659763, -1.5471975511965976])) - - def test38fg(x): - f = (100.0*pow(x[1]-pow(x[0],2),2) + \ - pow(1.0-x[0],2)+90.0*pow(x[3]-pow(x[2],2),2) + \ - pow(1.0-x[2],2)+10.1*(pow(x[1]-1.0,2)+pow(x[3]-1.0,2)) + \ - 19.8*(x[1]-1.0)*(x[3]-1.0))*1.0e-5 - dif = [0,0,0,0] - dif[0] = (-400.0*x[0]*(x[1]-pow(x[0],2))-2.0*(1.0-x[0]))*1.0e-5 - dif[1] = (200.0*(x[1]-pow(x[0],2))+20.2 \ - *(x[1]-1.0)+19.8*(x[3]-1.0))*1.0e-5 - dif[2] = (-360.0*x[2]*(x[3]-pow(x[2],2))-2.0\ - *(1.0-x[2]))*1.0e-5 - dif[3] = (180.0*(x[3]-pow(x[2],2))+20.2\ - *(x[3]-1.0)+19.8*(x[1]-1.0))*1.0e-5 - return f, dif - self.tests.append((test38fg, array([-3,-1,-3,-1]), [(-10,10)]*4, [1]*4)) - - def test45fg(x): - f = 2.0-x[0]*x[1]*x[2]*x[3]*x[4]/120.0 - dif = [0]*5 - dif[0] = -x[1]*x[2]*x[3]*x[4]/120.0 - dif[1] = -x[0]*x[2]*x[3]*x[4]/120.0 - dif[2] = -x[0]*x[1]*x[3]*x[4]/120.0 - dif[3] = -x[0]*x[1]*x[2]*x[4]/120.0 - dif[4] = -x[0]*x[1]*x[2]*x[3]/120.0 - return f, dif - self.tests.append((test45fg, [2]*5, [(0,1),(0,2),(0,3),(0,4),(0,5)], - [1,2,3,4,5])) - - def test_tnc(self): - for fg, x, bounds, xopt in self.tests: - x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, - messages=MSG_NONE, maxfun=200) - err = "Failed optimization of %s.\n" \ - "After %d function evaluations, TNC returned: %s.""" % \ - (fg.__name__, nf, RCSTRINGS[rc]) - - ef = abs(fg(xopt)[0] - fg(x)[0]) - if ef > 1e-8: - raise err - - -class TestRosen(TestCase): - - def test_hess(self): - """Compare rosen_hess(x) times p with rosen_hess_prod(x,p) (ticket #1248)""" - x = array([3, 4, 5]) - p = array([2, 2, 2]) - hp = optimize.rosen_hess_prod(x, p) - dothp = np.dot(optimize.rosen_hess(x), p) - assert_equal(hp, dothp) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/tests/test_regression.py b/scipy-0.10.1/scipy/optimize/tests/test_regression.py deleted file mode 100644 index aa078fb028..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_regression.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Regression tests for optimize. - -""" - -from numpy.testing import TestCase, run_module_suite, assert_almost_equal -import scipy.optimize - -class TestRegression(TestCase): - - def test_newton_x0_is_0(self): - """Ticket #1074""" - - tgt = 1 - res = scipy.optimize.newton(lambda x: x - 1, 0) - assert_almost_equal(res, tgt) - - def test_newton_integers(self): - """Ticket #1214""" - root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2, - fprime=lambda x: 2*x) - assert_almost_equal(root, 1.0) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/tests/test_slsqp.py b/scipy-0.10.1/scipy/optimize/tests/test_slsqp.py deleted file mode 100644 index efc7980ada..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_slsqp.py +++ /dev/null @@ -1,88 +0,0 @@ -from numpy.testing import assert_array_almost_equal, TestCase, run_module_suite -import numpy as np - -from scipy.optimize import fmin_slsqp - - -class TestSLSQP(TestCase): - """Test fmin_slsqp using Example 14.4 from Numerical Methods for - Engineers by Steven Chapra and Raymond Canale. This example - maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2, which - has a maximum at x=2,y=1. - - """ - - def _testfunc(self,d,*args): - """ - Arguments: - d - A list of two elements, where d[0] represents x and d[1] represents y - in the following equation. - sign - A multiplier for f. Since we want to optimize it, and the scipy - optimizers can only minimize functions, we need to multiply it by - -1 to achieve the desired solution - Returns: - 2*x*y + 2*x - x**2 - 2*y**2 - - """ - try: - sign = args[0] - except: - sign = 1.0 - x = d[0] - y = d[1] - return sign*(2*x*y + 2*x - x**2 - 2*y**2) - - def _testfunc_deriv(self,d,*args): - """ - This is the derivative of testfunc, returning a numpy array - representing df/dx and df/dy. - - """ - try: - sign = args[0] - except: - sign = 1.0 - x = d[0] - y = d[1] - dfdx = sign*(-2*x + 2*y + 2) - dfdy = sign*(2*x - 4*y) - return np.array([ dfdx, dfdy ],float) - - def test_unbounded_approximated(self): - res = fmin_slsqp(self._testfunc, [-1.0,1.0], args = (-1.0,), - iprint = 0, full_output = 1) - x,fx,its,imode,smode = res - assert_array_almost_equal(x,[2,1]) - - def test_unbounded_given(self): - res = fmin_slsqp(self._testfunc,[-1.0,1.0], args = (-1.0,), - iprint = 0, full_output = 1) - x,fx,its,imode,smode = res - assert_array_almost_equal(x,[2,1]) - - def test_bound_approximated(self): - res = fmin_slsqp(self._testfunc,[-1.0,1.0], args = (-1.0,), - eqcons = [lambda x, y: x[0]-x[1] ], - iprint = 0, full_output = 1) - x,fx,its,imode,smode = res - assert_array_almost_equal(x,[1,1]) - - def test_bound_equality_given(self): - res = fmin_slsqp(self._testfunc,[-1.0,1.0], - fprime = self._testfunc_deriv, - args = (-1.0,), eqcons = [lambda x, y: x[0]-x[1] ], - iprint = 0, full_output = 1) - x,fx,its,imode,smode = res - assert_array_almost_equal(x,[1,1]) - - def test_bound_equality_inequality_given(self): - res = fmin_slsqp(self._testfunc,[-1.0,1.0], - fprime = self._testfunc_deriv, - args = (-1.0,), - ieqcons = [lambda x, y: x[0]-x[1]-1.0], - iprint=0, full_output=1) - x,fx,its,imode,smode = res - assert_array_almost_equal(x,[2,1],decimal=3) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/tests/test_zeros.py b/scipy-0.10.1/scipy/optimize/tests/test_zeros.py deleted file mode 100644 index 7b218d2586..0000000000 --- a/scipy-0.10.1/scipy/optimize/tests/test_zeros.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python - -from math import sqrt - -from numpy.testing import TestCase, assert_almost_equal, assert_warns, \ - assert_, run_module_suite - -from scipy.optimize import zeros as cc - -# Import testing parameters -from scipy.optimize._tstutils import functions, fstrings - -class TestBasic(TestCase) : - def run_check(self, method, name): - a = .5 - b = sqrt(3) - for function, fname in zip(functions, fstrings): - zero, r = method(function, a, b, xtol=0.1e-12, full_output=True) - assert_(r.converged) - assert_almost_equal(zero, 1.0, decimal=12, - err_msg='method %s, function %s' % (name, fname)) - - def test_bisect(self): - self.run_check(cc.bisect, 'bisect') - def test_ridder(self): - self.run_check(cc.ridder, 'ridder') - def test_brentq(self): - self.run_check(cc.brentq, 'brentq') - def test_brenth(self): - self.run_check(cc.brenth, 'brenth') - - def test_deriv_zero_warning(self): - func = lambda x: x**2 - dfunc = lambda x: 2*x - assert_warns(RuntimeWarning, cc.newton, func, 0.0, dfunc) - -if __name__ == '__main__' : - run_module_suite() diff --git a/scipy-0.10.1/scipy/optimize/tnc.py b/scipy-0.10.1/scipy/optimize/tnc.py deleted file mode 100644 index 67bd0b2474..0000000000 --- a/scipy-0.10.1/scipy/optimize/tnc.py +++ /dev/null @@ -1,309 +0,0 @@ -# TNC Python interface -# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $ - -# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) - -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: - -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -""" -TNC: A python interface to the TNC non-linear optimizer - -TNC is a non-linear optimizer. To use it, you must provide a function to -minimize. The function must take one argument: the list of coordinates where to -evaluate the function; and it must return either a tuple, whose first element is the -value of the function, and whose second argument is the gradient of the function -(as a list of values); or None, to abort the minimization. -""" -from scipy.optimize import moduleTNC -from numpy import asarray, inf, array - -__all__ = ['fmin_tnc'] - - -MSG_NONE = 0 # No messages -MSG_ITER = 1 # One line per iteration -MSG_INFO = 2 # Informational messages -MSG_VERS = 4 # Version info -MSG_EXIT = 8 # Exit reasons -MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT - -MSGS = { - MSG_NONE : "No messages", - MSG_ITER : "One line per iteration", - MSG_INFO : "Informational messages", - MSG_VERS : "Version info", - MSG_EXIT : "Exit reasons", - MSG_ALL : "All messages" -} - -INFEASIBLE = -1 # Infeasible (low > up) -LOCALMINIMUM = 0 # Local minima reach (|pg| ~= 0) -FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0) -XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0) -MAXFUN = 3 # Max. number of function evaluations reach -LSFAIL = 4 # Linear search failed -CONSTANT = 5 # All lower bounds are equal to the upper bounds -NOPROGRESS = 6 # Unable to progress -USERABORT = 7 # User requested end of minimization - -RCSTRINGS = { - INFEASIBLE : "Infeasible (low > up)", - LOCALMINIMUM : "Local minima reach (|pg| ~= 0)", - FCONVERGED : "Converged (|f_n-f_(n-1)| ~= 0)", - XCONVERGED : "Converged (|x_n-x_(n-1)| ~= 0)", - MAXFUN : "Max. number of function evaluations reach", - LSFAIL : "Linear search failed", - CONSTANT : "All lower bounds are equal to the upper bounds", - NOPROGRESS : "Unable to progress", - USERABORT : "User requested end of minimization" -} - -# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in -# SciPy - -import optimize -approx_fprime = optimize.approx_fprime - -def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, - bounds=None, epsilon=1e-8, scale=None, offset=None, - messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, - stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, - rescale=-1, disp=None): - """ - Minimize a function with variables subject to bounds, using - gradient information in a truncated Newton algorithm. This - method wraps a C implementation of the algorithm. - - Parameters - ---------- - func : callable ``func(x, *args)`` - Function to minimize. Must do one of - 1. Return f and g, where f is - the value of the function and g its gradient (a list of - floats). - 2. Return the function value but supply gradient function - seperately as fprime - 3. Return the function value and set approx_grad=True. - If the function returns None, the minimization - is aborted. - x0 : list of floats - Initial estimate of minimum. - fprime : callable ``fprime(x, *args)`` - Gradient of func. If None, then either func must return the - function value and the gradient (``f,g = func(x, *args)``) - or approx_grad must be True. - args : tuple - Arguments to pass to function. - approx_grad : bool - If true, approximate the gradient numerically. - bounds : list - (min, max) pairs for each element in x0, defining the - bounds on that parameter. Use None or +/-inf for one of - min or max when there is no bound in that direction. - epsilon: float - Used if approx_grad is True. The stepsize in a finite - difference approximation for fprime. - scale : list of floats - Scaling factors to apply to each variable. If None, the - factors are up-low for interval bounded variables and - 1+|x] fo the others. Defaults to None - offset : float - Value to substract from each variable. If None, the - offsets are (up+low)/2 for interval bounded variables - and x for the others. - messages : - Bit mask used to select messages display during - minimization values defined in the MSGS dict. Defaults to - MGS_ALL. - disp : int - Integer interface to messages. 0 = no message, 5 = all messages - maxCGit : int - Maximum number of hessian*vector evaluations per main - iteration. If maxCGit == 0, the direction chosen is - -gradient if maxCGit < 0, maxCGit is set to - max(1,min(50,n/2)). Defaults to -1. - maxfun : int - Maximum number of function evaluation. if None, maxfun is - set to max(100, 10*len(x0)). Defaults to None. - eta : float - Severity of the line search. if < 0 or > 1, set to 0.25. - Defaults to -1. - stepmx : float - Maximum step for the line search. May be increased during - call. If too small, it will be set to 10.0. Defaults to 0. - accuracy : float - Relative precision for finite difference calculations. If - <= machine_precision, set to sqrt(machine_precision). - Defaults to 0. - fmin : float - Minimum function value estimate. Defaults to 0. - ftol : float - Precision goal for the value of f in the stoping criterion. - If ftol < 0.0, ftol is set to 0.0 defaults to -1. - xtol : float - Precision goal for the value of x in the stopping - criterion (after applying x scaling factors). If xtol < - 0.0, xtol is set to sqrt(machine_precision). Defaults to - -1. - pgtol : float - Precision goal for the value of the projected gradient in - the stopping criterion (after applying x scaling factors). - If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). - Setting it to 0.0 is not recommended. Defaults to -1. - rescale : float - Scaling factor (in log10) used to trigger f value - rescaling. If 0, rescale at each iteration. If a large - value, never rescale. If < 0, rescale is set to 1.3. - - Returns - ------- - x : list of floats - The solution. - nfeval : int - The number of function evaluations. - rc : int - Return code as defined in the RCSTRINGS dict. - - - Notes - ----- - The underlying algorithm is truncated Newton, also called - Newton Conjugate-Gradient. This method differs from - scipy.optimize.fmin_ncg in that - - 1. It wraps a C implementation of the algorithm - 2. It allows each variable to be given an upper and lower bound. - - - The algorithm incoporates the bound constraints by determining - the descent direction as in an unconstrained truncated Newton, - but never taking a step-size large enough to leave the space - of feasible x's. The algorithm keeps track of a set of - currently active constraints, and ignores them when computing - the minimum allowable step size. (The x's associated with the - active constraint are kept fixed.) If the maximum allowable - step size is zero then a new constraint is added. At the end - of each iteration one of the constraints may be deemed no - longer active and removed. A constraint is considered - no longer active is if it is currently active - but the gradient for that variable points inward from the - constraint. The specific constraint removed is the one - associated with the variable of largest index whose - constraint is no longer active. - - - References - ---------- - Wright S., Nocedal J. (2006), 'Numerical Optimization' - - Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method", - SIAM Journal of Numerical Analysis 21, pp. 770-778 - - - """ - x0 = asarray(x0, dtype=float).tolist() - n = len(x0) - - if bounds is None: - bounds = [(None,None)] * n - if len(bounds) != n: - raise ValueError('length of x0 != length of bounds') - - if disp is not None: - messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, - 4:MSG_EXIT, 5:MSG_ALL}.get(disp, MSG_ALL) - - if approx_grad: - def func_and_grad(x): - x = asarray(x) - f = func(x, *args) - g = approx_fprime(x, func, epsilon, *args) - return f, list(g) - elif fprime is None: - def func_and_grad(x): - x = asarray(x) - f, g = func(x, *args) - return f, list(g) - else: - def func_and_grad(x): - x = asarray(x) - f = func(x, *args) - g = fprime(x, *args) - return f, list(g) - - """ - low, up : the bounds (lists of floats) - if low is None, the lower bounds are removed. - if up is None, the upper bounds are removed. - low and up defaults to None - """ - low = [0]*n - up = [0]*n - for i in range(n): - if bounds[i] is None: l, u = -inf, inf - else: - l,u = bounds[i] - if l is None: - low[i] = -inf - else: - low[i] = l - if u is None: - up[i] = inf - else: - up[i] = u - - if scale is None: - scale = [] - - if offset is None: - offset = [] - - if maxfun is None: - maxfun = max(100, 10*len(x0)) - - rc, nf, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, offset, - messages, maxCGit, maxfun, eta, stepmx, accuracy, - fmin, ftol, xtol, pgtol, rescale) - return array(x), nf, rc - -if __name__ == '__main__': - # Examples for TNC - - def example(): - print "Example" - # A function to minimize - def function(x): - f = pow(x[0],2.0)+pow(abs(x[1]),3.0) - g = [0,0] - g[0] = 2.0*x[0] - g[1] = 3.0*pow(abs(x[1]),2.0) - if x[1]<0: - g[1] = -g[1] - return f, g - - # Optimizer call - x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10])) - - print "After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc] - print "x =", x - print "exact value = [0, 1]" - print - - example() diff --git a/scipy-0.10.1/scipy/optimize/tnc/moduleTNC.c b/scipy-0.10.1/scipy/optimize/tnc/moduleTNC.c deleted file mode 100644 index 60b746cbd6..0000000000 --- a/scipy-0.10.1/scipy/optimize/tnc/moduleTNC.c +++ /dev/null @@ -1,329 +0,0 @@ -/* Python TNC module */ - -/* - * Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -static char const rcsid[] = - "@(#) $Jeannot: moduleTNC.c,v 1.12 2005/01/28 18:27:31 js Exp $"; - -#include "Python.h" -#include -#include -#include - -#include "tnc.h" - -typedef struct _pytnc_state -{ - PyObject *py_function; - int n; - int failed; -} pytnc_state; - -static tnc_function function; -static PyObject *moduleTNC_minimize(PyObject *self, PyObject *args); -static int PyObject_AsDouble(PyObject *py_obj, double *x); -static double *PyList_AsDoubleArray(PyObject *py_list, int *size); -static PyObject *PyDoubleArray_AsList(int size, double *x); -static int PyList_IntoDoubleArray(PyObject *py_list, double *x, int size); - -int PyObject_AsDouble(PyObject *py_obj, double *x) -{ - PyObject *py_float; - - py_float = PyNumber_Float(py_obj); - - if (py_float == NULL) return -1; - - *x = PyFloat_AsDouble(py_float); - - Py_DECREF(py_float); - return 0; -} - -double *PyList_AsDoubleArray(PyObject *py_list, int *size) -{ - int i; - double *x; - - if (!PyList_Check(py_list)) - { - *size = -1; - return NULL; - } - - *size = PyList_Size(py_list); - if (*size <= 0) return NULL; - x = malloc((*size)*sizeof(*x)); - if (x == NULL) return NULL; - - for (i=0; i<(*size); i++) - { - PyObject *py_float = PyList_GetItem(py_list, i); - if (py_float == NULL || PyObject_AsDouble(py_float, &(x[i]))) - { - free(x); - return NULL; - } - } - - return x; -} - -int PyList_IntoDoubleArray(PyObject *py_list, double *x, int size) -{ - int i; - - if (py_list == NULL) return 1; - - if (!PyList_Check(py_list)) return 1; - - if (size != PyList_Size(py_list)) return 1; - - for (i=0; in, x); - if (py_list == NULL) - { - PyErr_SetString(PyExc_MemoryError, "tnc: memory allocation failed."); - goto failure; - } - - arglist = Py_BuildValue("(N)", py_list); - result = PyEval_CallObject(py_state->py_function, arglist); - Py_DECREF(arglist); - - if (result == NULL) - goto failure; - - if (result == Py_None) - { - Py_DECREF(result); - return 1; - } - - if (!PyArg_ParseTuple(result, "dO!", f, &PyList_Type, &py_grad)) - { - PyErr_SetString(PyExc_ValueError, - "tnc: invalid return value from minimized function."); - goto failure; - } - - if (PyList_IntoDoubleArray(py_grad, g, py_state->n)) - goto failure; - - Py_DECREF(result); - - return 0; - -failure: - py_state->failed = 1; - Py_XDECREF(result); - return 1; -} - -PyObject *moduleTNC_minimize(PyObject *self, PyObject *args) -{ - PyObject *py_x0, *py_low, *py_up, *py_list, *py_scale, *py_offset; - PyObject *py_function = NULL; - pytnc_state py_state; - int n, n1, n2, n3, n4; - - int rc, msg, maxCGit, maxnfeval, nfeval = 0; - double *x, *low, *up, *scale = NULL, *offset = NULL; - double f, eta, stepmx, accuracy, fmin, ftol, xtol, pgtol, rescale; - - if (!PyArg_ParseTuple(args, "OO!O!O!O!O!iiidddddddd", - &py_function, - &PyList_Type, &py_x0, - &PyList_Type, &py_low, - &PyList_Type, &py_up, - &PyList_Type, &py_scale, - &PyList_Type, &py_offset, - &msg, &maxCGit, &maxnfeval, &eta, &stepmx, &accuracy, &fmin, &ftol, - &xtol, &pgtol, - &rescale - )) - return NULL; - - if (!PyCallable_Check(py_function)) - { - PyErr_SetString(PyExc_TypeError, "tnc: function must be callable"); - return NULL; - } - - scale = PyList_AsDoubleArray(py_scale, &n3); - if (n3 != 0 && scale == NULL) - { - PyErr_SetString(PyExc_ValueError, "tnc: invalid scaling parameters."); - return NULL; - } - - offset = PyList_AsDoubleArray(py_offset, &n4); - if (n4 != 0 && offset == NULL) - { - PyErr_SetString(PyExc_ValueError, "tnc: invalid offset parameters."); - return NULL; - } - - x = PyList_AsDoubleArray(py_x0, &n); - if (n != 0 && x == NULL) - { - if (scale) free(scale); - - PyErr_SetString(PyExc_ValueError, "tnc: invalid initial vector."); - return NULL; - } - - low = PyList_AsDoubleArray(py_low, &n1); - up = PyList_AsDoubleArray(py_up, &n2); - - if ((n1 != 0 && low == NULL) || (n2 != 0 && up == NULL)) - { - if (scale) free(scale); - if (x) free(x); - if (low) free(low); - if (up) free(up); - - PyErr_SetString(PyExc_ValueError, "tnc: invalid bounds."); - return NULL; - } - - if (n1 != n2 || n != n1 || (scale != NULL && n != n3) - || (offset != NULL && n != n4)) - { - if (scale) free(scale); - if (offset) free(offset); - if (x) free(x); - if (low) free(low); - if (up) free(up); - - PyErr_SetString(PyExc_ValueError, "tnc: vector sizes must be equal."); - return NULL; - } - - py_state.py_function = py_function; - py_state.n = n; - py_state.failed = 0; - - Py_INCREF(py_function); - - rc = tnc(n, x, &f, NULL, function, &py_state, low, up, scale, offset, msg, - maxCGit, maxnfeval, eta, stepmx, accuracy, fmin, ftol, xtol, pgtol, rescale, - &nfeval); - - Py_DECREF(py_function); - - if (low) free(low); - if (up) free(up); - if (scale) free(scale); - if (offset) free(offset); - - if (py_state.failed) - { - if (x) free(x); - return NULL; - } - - if (rc == TNC_ENOMEM) - { - PyErr_SetString(PyExc_MemoryError, "tnc: memory allocation failed."); - if (x) free(x); - return NULL; - } - - py_list = PyDoubleArray_AsList(n, x); - if (x) free(x); - if (py_list == NULL) - { - PyErr_SetString(PyExc_MemoryError, "tnc: memory allocation failed."); - return NULL; - } - - return Py_BuildValue("(iiN)", rc, nfeval, py_list);; -} - -static PyMethodDef moduleTNC_methods[] = -{ - {"minimize", moduleTNC_minimize, METH_VARARGS}, - {NULL, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "moduleTNC", - NULL, - -1, - moduleTNC_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit_moduleTNC(void) -{ - return PyModule_Create(&moduledef); -} -#else -PyMODINIT_FUNC initmoduleTNC(void) -{ - (void) Py_InitModule("moduleTNC", moduleTNC_methods); -} -#endif diff --git a/scipy-0.10.1/scipy/optimize/tnc/tnc.c b/scipy-0.10.1/scipy/optimize/tnc/tnc.c deleted file mode 100644 index dfe1b412c5..0000000000 --- a/scipy-0.10.1/scipy/optimize/tnc/tnc.c +++ /dev/null @@ -1,1927 +0,0 @@ -/* tnc : truncated newton bound constrained minimization - using gradient information, in C */ - -/* - * Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org) - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * This software is a C implementation of TNBC, a truncated newton minimization - * package originally developed by Stephen G. Nash in Fortran. - * - * The original source code can be found at : - * http://iris.gmu.edu/~snash/nash/software/software.html - * - * Copyright for the original TNBC fortran routines: - * - * TRUNCATED-NEWTON METHOD: SUBROUTINES - * WRITTEN BY: STEPHEN G. NASH - * SCHOOL OF INFORMATION TECHNOLOGY & ENGINEERING - * GEORGE MASON UNIVERSITY - * FAIRFAX, VA 22030 - */ - -/* - * Conversion into C by Elisabeth Nguyen & Jean-Sebastien Roy - * Modifications by Jean-Sebastien Roy, 2001-2002 - */ - -static char const rcsid[] = - "@(#) $Jeannot: tnc.c,v 1.205 2005/01/28 18:27:31 js Exp $"; - -static char const copyright[] = - "(c) 2002-2003, Jean-Sebastien Roy (js@jeannot.org)"; - -#include -#include -#include - -#include "tnc.h" - -typedef enum -{ - TNC_FALSE = 0, - TNC_TRUE -} logical; - -/* - * Return code strings - */ - -char *tnc_rc_string[11] = -{ - "Memory allocation failed", - "Invalid parameters (n<0)", - "Infeasible (low bound > up bound)", - "Local minima reach (|pg| ~= 0)", - "Converged (|f_n-f_(n-1)| ~= 0)", - "Converged (|x_n-x_(n-1)| ~= 0)", - "Maximum number of function evaluations reached", - "Linear search failed", - "All lower bounds are equal to the upper bounds", - "Unable to progress", - "User requested end of minimization" -}; - -/* - * getptc return codes - */ -typedef enum -{ - GETPTC_OK = 0, /* Suitable point found */ - GETPTC_EVAL = 1, /* Function evaluation required */ - GETPTC_EINVAL = 2, /* Bad input values */ - GETPTC_FAIL = 3 /* No suitable point found */ -} getptc_rc; - -/* - * linearSearch return codes - */ -typedef enum -{ - LS_OK = 0, /* Suitable point found */ - LS_MAXFUN = 1, /* Max. number of function evaluations reach */ - LS_FAIL = 2, /* No suitable point found */ - LS_USERABORT = 3, /* User requested end of minimization */ - LS_ENOMEM = 4 /* Memory allocation failed */ -} ls_rc; - -/* - * Prototypes - */ -static tnc_rc tnc_minimize(int n, double x[], double *f, double g[], - tnc_function *function, void *state, - double xscale[], double xoffset[], double *fscale, - double low[], double up[], tnc_message messages, - int maxCGit, int maxnfeval, int *nfeval, - double eta, double stepmx, double accuracy, - double fmin, double ftol, double xtol, double pgtol, double rescale); - -static getptc_rc getptcInit(double *reltol, double *abstol, double tnytol, - double eta, double rmu, double xbnd, - double *u, double *fu, double *gu, double *xmin, - double *fmin, double *gmin, double *xw, double *fw, - double *gw, double *a, double *b, double *oldf, - double *b1, double *scxbnd, double *e, double *step, - double *factor, logical *braktd, double *gtest1, - double *gtest2, double *tol); - -static getptc_rc getptcIter(double big, double - rtsmll, double *reltol, double *abstol, double tnytol, - double fpresn, double xbnd, - double *u, double *fu, double *gu, double *xmin, - double *fmin, double *gmin, double *xw, double *fw, - double *gw, double *a, double *b, double *oldf, - double *b1, double *scxbnd, double *e, double *step, - double *factor, logical *braktd, double *gtest1, - double *gtest2, double *tol); - -static void printCurrentIteration(int n, double f, double g[], int niter, - int nfeval, int pivot[]); - -static double initialStep(double fnew, double fmin, double gtp, double smax); - -static ls_rc linearSearch(int n, tnc_function *function, void *state, - double low[], double up[], - double xscale[], double xoffset[], double fscale, int pivot[], - double eta, double ftol, double xbnd, - double p[], double x[], double *f, - double *alpha, double gfull[], int maxnfeval, int *nfeval); - -static int tnc_direction(double *zsol, double *diagb, - double *x, double *g, int n, - int maxCGit, int maxnfeval, int *nfeval, - logical upd1, double yksk, double yrsr, - double *sk, double *yk, double *sr, double *yr, - logical lreset, tnc_function *function, void *state, - double xscale[], double xoffset[], double fscale, - int *pivot, double accuracy, - double gnorm, double xnorm, double *low, double *up); - -static double stepMax(double step, int n, double x[], double p[], int pivot[], - double low[], double up[], double xscale[], double xoffset[]); - -/* Active set of constraints */ -static void setConstraints(int n, double x[], int pivot[], double xscale[], - double xoffset[], double low[], double up[]); - -static logical addConstraint(int n, double x[], double p[], int pivot[], - double low[], double up[], double xscale[], double xoffset[]); - -static logical removeConstraint(double gtpnew, double gnorm, double pgtolfs, - double f, double fLastConstraint, double g[], int pivot[], int n); - -static void project(int n, double x[], int pivot[]); - -static int hessianTimesVector(double v[], double gv[], int n, - double x[], double g[], tnc_function *function, void *state, - double xscale[], double xoffset[], double fscale, - double accuracy, double xnorm, double low[], double up[]); - -static int msolve(double g[], double *y, int n, - double sk[], double yk[], double diagb[], double sr[], - double yr[], logical upd1, double yksk, double yrsr, - logical lreset); - -static void diagonalScaling(int n, double e[], double v[], double gv[], - double r[]); - -static void ssbfgs(int n, double gamma, double sj[], double *hjv, - double hjyj[], double yjsj, - double yjhyj, double vsj, double vhyj, double hjp1v[]); - -static int initPreconditioner(double diagb[], double emat[], int n, - logical lreset, double yksk, double yrsr, - double sk[], double yk[], double sr[], double yr[], - logical upd1); - -/* Scaling */ -static void coercex(int n, double x[], double low[], double up[]); -static void unscalex(int n, double x[], double xscale[], double xoffset[]); -static void scaleg(int n, double g[], double xscale[], double fscale); -static void scalex(int n, double x[], double xscale[], double xoffset[]); -static void projectConstants(int n, double x[], double xscale[]); - -/* Machine precision */ -static double mchpr1(void); - -/* Special blas for incx=incy=1 */ -static double ddot1(int n, double dx[], double dy[]); -static void dxpy1(int n, double dx[], double dy[]); -static void daxpy1(int n, double da, double dx[], double dy[]); -static void dcopy1(int n, double dx[], double dy[]); -static double dnrm21(int n, double dx[]); - -/* additionnal blas-like functions */ -static void dneg1(int n, double v[]); - -/* - * This routine solves the optimization problem - * - * minimize f(x) - * x - * subject to low <= x <= up - * - * where x is a vector of n real variables. The method used is - * a truncated-newton algorithm (see "newton-type minimization via - * the lanczos method" by s.g. nash (siam j. numer. anal. 21 (1984), - * pp. 770-778). this algorithm finds a local minimum of f(x). It does - * not assume that the function f is convex (and so cannot guarantee a - * global solution), but does assume that the function is bounded below. - * it can solve problems having any number of variables, but it is - * especially useful when the number of variables (n) is large. - * - */ -extern int tnc(int n, double x[], double *f, double g[], tnc_function *function, - void *state, double low[], double up[], double scale[], double offset[], - int messages, int maxCGit, int maxnfeval, double eta, double stepmx, - double accuracy, double fmin, double ftol, double xtol, double pgtol, - double rescale, int *nfeval) -{ - int rc, frc, i, nc, nfeval_local, - free_low = TNC_FALSE, free_up = TNC_FALSE, - free_g = TNC_FALSE; - double *xscale = NULL, fscale, epsmch, rteps, *xoffset = NULL; - - if(nfeval==NULL) - { - /* Ignore nfeval */ - nfeval = &nfeval_local; - } - *nfeval = 0; - - /* Version info */ - if (messages & TNC_MSG_VERS) - { - fprintf(stderr, "tnc: Version %s, %s\n",TNC_VERSION,copyright); - fprintf(stderr, "tnc: RCS ID: %s\n",rcsid); - } - - /* Check for errors in the input parameters */ - if (n == 0) - { - rc = TNC_CONSTANT; - goto cleanup; - } - - if (n < 0) - { - rc = TNC_EINVAL; - goto cleanup; - } - - /* Check bounds arrays */ - if (low == NULL) - { - low = malloc(n*sizeof(*low)); - if (low == NULL) - { - rc = TNC_ENOMEM; - goto cleanup; - } - free_low = TNC_TRUE; - for (i = 0 ; i < n ; i++) low[i] = -HUGE_VAL; - } - if (up == NULL) - { - up = malloc(n*sizeof(*up)); - if (up == NULL) - { - rc = TNC_ENOMEM; - goto cleanup; - } - free_up = TNC_TRUE; - for (i = 0 ; i < n ; i++) up[i] = HUGE_VAL; - } - - /* Coherency check */ - for (i = 0 ; i < n ; i++) - { - if (low[i] > up [i]) - { - rc = TNC_INFEASIBLE; - goto cleanup; - } - } - - /* Coerce x into bounds */ - coercex(n, x, low, up); - - if (maxnfeval < 1) - { - rc = TNC_MAXFUN; - goto cleanup; - } - - /* Allocate g if necessary */ - if(g == NULL) - { - g = malloc(n*sizeof(*g)); - if (g == NULL) - { - rc = TNC_ENOMEM; - goto cleanup; - } - free_g = TNC_TRUE; - } - - /* Initial function evaluation */ - frc = function(x, f, g, state); - (*nfeval) ++; - if (frc) - { - rc = TNC_USERABORT; - goto cleanup; - } - - /* Constant problem ? */ - for (nc = 0, i = 0 ; i < n ; i++) - if ((low[i] == up[i]) || (scale != NULL && scale[i] == 0.0)) - nc ++; - - if (nc == n) - { - rc = TNC_CONSTANT; - goto cleanup; - } - - /* Scaling parameters */ - xscale = malloc(sizeof(*xscale)*n); - if (xscale == NULL) - { - rc = TNC_ENOMEM; - goto cleanup; - } - xoffset = malloc(sizeof(*xoffset)*n); - if (xoffset == NULL) - { - rc = TNC_ENOMEM; - goto cleanup; - } - fscale = 1.0; - - for (i = 0 ; i < n ; i++) - { - if (scale != NULL) - { - xscale[i] = fabs(scale[i]); - if (xscale[i] == 0.0) - xoffset[i] = low[i] = up[i] = x[i]; - } - else if (low[i] != -HUGE_VAL && up[i] != HUGE_VAL) - { - xscale[i] = up[i] - low[i]; - xoffset[i] = (up[i]+low[i])*0.5; - } - else - { - xscale[i] = 1.0+fabs(x[i]); - xoffset[i] = x[i]; - } - if (offset != NULL) - xoffset[i] = offset[i]; - } - - /* Default values for parameters */ - epsmch = mchpr1(); - rteps = sqrt(epsmch); - - if (stepmx < rteps * 10.0) stepmx = 1.0e1; - if (eta < 0.0 || eta >= 1.0) eta = 0.25; - if (rescale < 0) rescale = 1.3; - if (maxCGit < 0) /* maxCGit == 0 is valid */ - { - maxCGit = n / 2; - if (maxCGit < 1) maxCGit = 1; - else if (maxCGit > 50) maxCGit = 50; - } - if (maxCGit > n) maxCGit = n; - if (accuracy <= epsmch) accuracy = rteps; - if (ftol < 0.0) ftol = accuracy; - if (pgtol < 0.0) pgtol = 1e-2 * sqrt(accuracy); - if (xtol < 0.0) xtol = rteps; - - /* Optimisation */ - rc = tnc_minimize(n, x, f, g, function, state, - xscale, xoffset, &fscale, low, up, messages, - maxCGit, maxnfeval, nfeval, eta, stepmx, accuracy, fmin, ftol, xtol, pgtol, - rescale); - -cleanup: - if (messages & TNC_MSG_EXIT) - fprintf(stderr, "tnc: %s\n", tnc_rc_string[rc - TNC_MINRC]); - - if (xscale) free(xscale); - if (free_low) free(low); - if (free_up) free(up); - if (free_g) free(g); - if (xoffset) free(xoffset); - - return rc; -} - -/* Coerce x into bounds */ -static void coercex(int n, double x[], double low[], double up[]) -{ - int i; - - for (i = 0 ; i < n ; i++) - { - if (x[i]up[i]) x[i] = up[i]; - } -} - -/* Unscale x */ -static void unscalex(int n, double x[], double xscale[], double xoffset[]) -{ - int i; - for (i = 0 ; i < n ; i++) - x[i] = x[i]*xscale[i]+xoffset[i]; -} - -/* Scale x */ -static void scalex(int n, double x[], double xscale[], double xoffset[]) -{ - int i; - for (i = 0 ; i < n ; i++) - if (xscale[i]>0.0) - x[i] = (x[i]-xoffset[i])/xscale[i]; -} - -/* Scale g */ -static void scaleg(int n, double g[], double xscale[], double fscale) -{ - int i; - for (i = 0 ; i < n ; i++) - g[i] *= xscale[i]*fscale; -} - -/* Caculate the pivot vector */ -static void setConstraints(int n, double x[], int pivot[], double xscale[], - double xoffset[], double low[], double up[]) -{ - int i; - double epsmch; - - epsmch = mchpr1(); - - for (i = 0; i < n; i++) - { - /* tolerances should be better ajusted */ - if (xscale[i] == 0.0) - { - pivot[i] = 2; - } - else - { - if (low[i] != - HUGE_VAL && - (x[i]*xscale[i]+xoffset[i] - low[i] <= epsmch * 10.0 * (fabs(low[i]) + 1.0))) - pivot[i] = -1; - else - { - if (up[i] != HUGE_VAL && - (x[i]*xscale[i]+xoffset[i] - up[i] >= epsmch * 10.0 * (fabs(up[i]) + 1.0))) - pivot[i] = 1; - else - pivot[i] = 0; - } - } - } -} - -/* - * This routine is a bounds-constrained truncated-newton method. - * the truncated-newton method is preconditioned by a limited-memory - * quasi-newton method (this preconditioning strategy is developed - * in this routine) with a further diagonal scaling - * (see routine diagonalscaling). - */ -static tnc_rc tnc_minimize(int n, double x[], - double *f, double gfull[], tnc_function *function, void *state, - double xscale[], double xoffset[], double *fscale, - double low[], double up[], tnc_message messages, - int maxCGit, int maxnfeval, int *nfeval, double eta, double stepmx, - double accuracy, double fmin, double ftol, double xtol, double pgtol, - double rescale) -{ - double fLastReset, difnew, epsmch, epsred, oldgtp, - difold, oldf, xnorm, newscale, - gnorm, ustpmax, fLastConstraint, spe, yrsr, yksk, - *temp = NULL, *sk = NULL, *yk = NULL, *diagb = NULL, *sr = NULL, - *yr = NULL, *oldg = NULL, *pk = NULL, *g = NULL; - double alpha = 0.0; /* Default unused value */ - int i, icycle, niter = 0, oldnfeval, *pivot = NULL, frc; - logical lreset, newcon, upd1, remcon; - tnc_rc rc = TNC_ENOMEM; /* Default error */ - - /* Allocate temporary vectors */ - oldg = malloc(sizeof(*oldg)*n); - if (oldg == NULL) goto cleanup; - g = malloc(sizeof(*g)*n); - if (g == NULL) goto cleanup; - temp = malloc(sizeof(*temp)*n); - if (temp == NULL) goto cleanup; - diagb = malloc(sizeof(*diagb)*n); - if (diagb == NULL) goto cleanup; - pk = malloc(sizeof(*pk)*n); - if (pk == NULL) goto cleanup; - - sk = malloc(sizeof(*sk)*n); - if (sk == NULL) goto cleanup; - yk = malloc(sizeof(*yk)*n); - if (yk == NULL) goto cleanup; - sr = malloc(sizeof(*sr)*n); - if (sr == NULL) goto cleanup; - yr = malloc(sizeof(*yr)*n); - if (yr == NULL) goto cleanup; - - pivot = malloc(sizeof(*pivot)*n); - if (pivot == NULL) goto cleanup; - - /* Initialize variables */ - epsmch = mchpr1(); - - difnew = 0.0; - epsred = 0.05; - upd1 = TNC_TRUE; - icycle = n - 1; - newcon = TNC_TRUE; - - /* Uneeded initialisations */ - lreset = TNC_FALSE; - yrsr = 0.0; - yksk = 0.0; - - /* Initial scaling */ - scalex(n, x, xscale, xoffset); - (*f) *= *fscale; - - /* initial pivot calculation */ - setConstraints(n, x, pivot, xscale, xoffset, low, up); - - dcopy1(n, gfull, g); - scaleg(n, g, xscale, *fscale); - - /* Test the lagrange multipliers to see if they are non-negative. */ - for (i = 0; i < n; i++) - if (-pivot[i] * g[i] < 0.0) - pivot[i] = 0; - - project(n, g, pivot); - - /* Set initial values to other parameters */ - gnorm = dnrm21(n, g); - - fLastConstraint = *f; /* Value at last constraint */ - fLastReset = *f; /* Value at last reset */ - - if (messages & TNC_MSG_ITER) fprintf(stderr, - " NIT NF F GTG\n"); - if (messages & TNC_MSG_ITER) printCurrentIteration(n, *f / *fscale, gfull, - niter, *nfeval, pivot); - - /* Set the diagonal of the approximate hessian to unity. */ - for (i = 0; i < n; i++) diagb[i] = 1.0; - - /* Start of main iterative loop */ - while(TNC_TRUE) - { - /* Local minimum test */ - if (dnrm21(n, g) <= pgtol * (*fscale)) - { - /* |PG| == 0.0 => local minimum */ - dcopy1(n, gfull, g); - project(n, g, pivot); - if (messages & TNC_MSG_INFO) fprintf(stderr, - "tnc: |pg| = %g -> local minimum\n", dnrm21(n, g) / (*fscale)); - rc = TNC_LOCALMINIMUM; - break; - } - - /* Terminate if more than maxnfeval evaluations have been made */ - if (*nfeval >= maxnfeval) - { - rc = TNC_MAXFUN; - break; - } - - /* Rescale function if necessary */ - newscale = dnrm21(n, g); - if ((newscale > epsmch) && (fabs(log10(newscale)) > rescale)) - { - newscale = 1.0/newscale; - - *f *= newscale; - *fscale *= newscale; - gnorm *= newscale; - fLastConstraint *= newscale; - fLastReset *= newscale; - difnew *= newscale; - - for (i = 0; i < n; i++) g[i] *= newscale; - for (i = 0; i < n; i++) diagb[i] = 1.0; - - upd1 = TNC_TRUE; - icycle = n - 1; - newcon = TNC_TRUE; - - if (messages & TNC_MSG_INFO) fprintf(stderr, - "tnc: fscale = %g\n", *fscale); - } - - dcopy1(n, x, temp); - project(n, temp, pivot); - xnorm = dnrm21(n, temp); - oldnfeval = *nfeval; - - /* Compute the new search direction */ - frc = tnc_direction(pk, diagb, x, g, n, maxCGit, maxnfeval, nfeval, - upd1, yksk, yrsr, sk, yk, sr, yr, - lreset, function, state, xscale, xoffset, *fscale, - pivot, accuracy, gnorm, xnorm, low, up); - - if (frc == -1) - { - rc = TNC_ENOMEM; - break; - } - - if (frc) - { - rc = TNC_USERABORT; - break; - } - - if (!newcon) - { - if (!lreset) - { - /* Compute the accumulated step and its corresponding gradient - difference. */ - dxpy1(n, sk, sr); - dxpy1(n, yk, yr); - icycle++; - } - else - { - /* Initialize the sum of all the changes */ - dcopy1(n, sk, sr); - dcopy1(n, yk, yr); - fLastReset = *f; - icycle = 1; - } - } - - dcopy1(n, g, oldg); - oldf = *f; - oldgtp = ddot1(n, pk, g); - - /* Maximum unconstrained step length */ - ustpmax = stepmx / (dnrm21(n, pk) + epsmch); - - /* Maximum constrained step length */ - spe = stepMax(ustpmax, n, x, pk, pivot, low, up, xscale, xoffset); - - if (spe > 0.0) - { - ls_rc lsrc; - /* Set the initial step length */ - alpha = initialStep(*f, fmin / (*fscale), oldgtp, spe); - - /* Perform the linear search */ - lsrc = linearSearch(n, function, state, low, up, - xscale, xoffset, *fscale, pivot, - eta, ftol, spe, pk, x, f, &alpha, gfull, maxnfeval, nfeval); - - if (lsrc == LS_ENOMEM) - { - rc = TNC_ENOMEM; - break; - } - - if (lsrc == LS_USERABORT) - { - rc = TNC_USERABORT; - break; - } - - if (lsrc == LS_FAIL) - { - rc = TNC_LSFAIL; - break; - } - - /* If we went up to the maximum unconstrained step, increase it */ - if (alpha >= 0.9 * ustpmax) - { - stepmx *= 1e2; - if (messages & TNC_MSG_INFO) fprintf(stderr, - "tnc: stepmx = %g\n", stepmx); - } - - /* If we went up to the maximum constrained step, - a new constraint was encountered */ - if (alpha - spe >= -epsmch * 10.0) - { - newcon = TNC_TRUE; - } - else - { - /* Break if the linear search has failed to find a lower point */ - if (lsrc != LS_OK) - { - if (lsrc == LS_MAXFUN) rc = TNC_MAXFUN; - else rc = TNC_LSFAIL; - break; - } - newcon = TNC_FALSE; - } - } - else - { - /* Maximum constrained step == 0.0 => new constraint */ - newcon = TNC_TRUE; - } - - if (newcon) - { - if(!addConstraint(n, x, pk, pivot, low, up, xscale, xoffset)) - { - if(*nfeval == oldnfeval) - { - rc = TNC_NOPROGRESS; - break; - } - } - - fLastConstraint = *f; - } - - niter++; - - /* Set up parameters used in convergence and resetting tests */ - difold = difnew; - difnew = oldf - *f; - - /* If this is the first iteration of a new cycle, compute the - percentage reduction factor for the resetting test */ - if (icycle == 1) - { - if (difnew > difold * 2.0) epsred += epsred; - if (difnew < difold * 0.5) epsred *= 0.5; - } - - dcopy1(n, gfull, g); - scaleg(n, g, xscale, *fscale); - - dcopy1(n, g, temp); - project(n, temp, pivot); - gnorm = dnrm21(n, temp); - - /* Reset pivot */ - remcon = removeConstraint(oldgtp, gnorm, pgtol * (*fscale), *f, - fLastConstraint, g, pivot, n); - - /* If a constraint is removed */ - if (remcon) - { - /* Recalculate gnorm and reset fLastConstraint */ - dcopy1(n, g, temp); - project(n, temp, pivot); - gnorm = dnrm21(n, temp); - fLastConstraint = *f; - } - - if (!remcon && !newcon) - { - /* No constraint removed & no new constraint : tests for convergence */ - if (fabs(difnew) <= ftol * (*fscale)) - { - if (messages & TNC_MSG_INFO) fprintf(stderr, - "tnc: |fn-fn-1] = %g -> convergence\n", fabs(difnew) / (*fscale)); - rc = TNC_FCONVERGED; - break; - } - if (alpha * dnrm21(n, pk) <= xtol) - { - if (messages & TNC_MSG_INFO) fprintf(stderr, - "tnc: |xn-xn-1] = %g -> convergence\n", alpha * dnrm21(n, pk)); - rc = TNC_XCONVERGED; - break; - } - } - - project(n, g, pivot); - - if (messages & TNC_MSG_ITER) printCurrentIteration(n, *f / *fscale, gfull, - niter, *nfeval, pivot); - - /* Compute the change in the iterates and the corresponding change in the - gradients */ - if (!newcon) - { - for (i = 0; i < n; i++) - { - yk[i] = g[i] - oldg[i]; - sk[i] = alpha * pk[i]; - } - - /* Set up parameters used in updating the preconditioning strategy */ - yksk = ddot1(n, yk, sk); - - if (icycle == (n - 1) || difnew < epsred * (fLastReset - *f)) - lreset = TNC_TRUE; - else - { - yrsr = ddot1(n, yr, sr); - if (yrsr <= 0.0) lreset = TNC_TRUE; - else lreset = TNC_FALSE; - } - upd1 = TNC_FALSE; - } - } - - if (messages & TNC_MSG_ITER) printCurrentIteration(n, *f / *fscale, gfull, - niter, *nfeval, pivot); - - /* Unscaling */ - unscalex(n, x, xscale, xoffset); - coercex(n, x, low, up); - (*f) /= *fscale; - -cleanup: - if (oldg) free(oldg); - if (g) free(g); - if (temp) free(temp); - if (diagb) free(diagb); - if (pk) free(pk); - - if (sk) free(sk); - if (yk) free(yk); - if (sr) free(sr); - if (yr) free(yr); - - if (pivot) free(pivot); - - return rc; -} - -/* Print the results of the current iteration */ -static void printCurrentIteration(int n, double f, double g[], int niter, - int nfeval, int pivot[]) -{ - int i; - double gtg; - - gtg = 0.0; - for (i = 0; i < n; i++) - if (pivot[i] == 0) - gtg += g[i] * g[i]; - - fprintf(stderr, " %4d %4d %22.15E %15.8E\n", niter, nfeval, f, gtg); -} - -/* - * Set x[i] = 0.0 if direction i is currently constrained - */ -static void project(int n, double x[], int pivot[]) -{ - int i; - for (i = 0; i < n; i++) - if (pivot[i] != 0) - x[i] = 0.0; -} - -/* - * Set x[i] = 0.0 if direction i is constant - */ -static void projectConstants(int n, double x[], double xscale[]) -{ - int i; - for (i = 0; i < n; i++) - if (xscale[i] == 0.0) - x[i] = 0.0; -} - -/* - * Compute the maximum allowable step length - */ -static double stepMax(double step, int n, double x[], double dir[], - int pivot[], double low[], double up[], double xscale[], double xoffset[]) -{ - int i; - double t; - - /* Constrained maximum step */ - for (i = 0; i < n; i++) - { - if ((pivot[i] == 0) && (dir[i] != 0.0)) - { - if (dir[i] < 0.0) - { - t = (low[i]-xoffset[i])/xscale[i] - x[i]; - if (t > step * dir[i]) step = t / dir[i]; - } - else - { - t = (up[i]-xoffset[i])/xscale[i] - x[i]; - if (t < step * dir[i]) step = t / dir[i]; - } - } - } - - return step; -} - -/* - * Update the constraint vector pivot if a new constraint is encountered - */ -static logical addConstraint(int n, double x[], double p[], int pivot[], - double low[], double up[], double xscale[], double xoffset[]) -{ - int i, newcon = TNC_FALSE; - double tol, epsmch; - - epsmch = mchpr1(); - - for (i = 0; i < n; i++) - { - if ((pivot[i] == 0) && (p[i] != 0.0)) - { - if (p[i] < 0.0 && low[i] != - HUGE_VAL) - { - tol = epsmch * 10.0 * (fabs(low[i]) + 1.0); - if (x[i]*xscale[i]+xoffset[i] - low[i] <= tol) - { - pivot[i] = -1; - x[i] = (low[i]-xoffset[i])/xscale[i]; - newcon = TNC_TRUE; - } - } - else if (up[i] != HUGE_VAL) - { - tol = epsmch * 10.0 * (fabs(up[i]) + 1.0); - if (up[i] - (x[i]*xscale[i]+xoffset[i]) <= tol) - { - pivot[i] = 1; - x[i] = (up[i]-xoffset[i])/xscale[i]; - newcon = TNC_TRUE; - } - } - } - } - return newcon; -} - -/* - * Check if a constraint is no more active - */ -static logical removeConstraint(double gtpnew, double gnorm, double pgtolfs, - double f, double fLastConstraint, double g[], int pivot[], int n) -{ - double cmax, t; - int imax, i; - - if (((fLastConstraint - f) <= (gtpnew * -0.5)) && (gnorm > pgtolfs)) - return TNC_FALSE; - - imax = -1; - cmax = 0.0; - - for (i = 0; i < n; i++) - { - if (pivot[i] == 2) - continue; - t = -pivot[i] * g[i]; - if (t < cmax) - { - cmax = t; - imax = i; - } - } - - if (imax != -1) - { - pivot[imax] = 0; - return TNC_TRUE; - } - else - return TNC_FALSE; - -/* - * For details, see gill, murray, and wright (1981, p. 308) and - * fletcher (1981, p. 116). The multiplier tests (here, testing - * the sign of the components of the gradient) may still need to - * modified to incorporate tolerances for zero. - */ -} - -/* - * This routine performs a preconditioned conjugate-gradient - * iteration in order to solve the newton equations for a search - * direction for a truncated-newton algorithm. - * When the value of the quadratic model is sufficiently reduced, - * the iteration is terminated. - */ -static int tnc_direction(double *zsol, double *diagb, - double *x, double g[], int n, - int maxCGit, int maxnfeval, int *nfeval, - logical upd1, double yksk, double yrsr, - double *sk, double *yk, double *sr, double *yr, - logical lreset, tnc_function *function, void *state, - double xscale[], double xoffset[], double fscale, - int *pivot, double accuracy, - double gnorm, double xnorm, double low[], double up[]) -{ - double alpha, beta, qold, qnew, rhsnrm, tol, vgv, rz, rzold, qtest, pr, gtp; - int i, k, frc; - /* Temporary vectors */ - double *r = NULL, *zk = NULL, *v = NULL, *emat = NULL, *gv = NULL; - - /* No CG it. => dir = -grad */ - if (maxCGit == 0) - { - dcopy1(n, g, zsol); - dneg1(n, zsol); - project(n, zsol, pivot); - return 0; - } - - /* General initialization */ - rhsnrm = gnorm; - tol = 1e-12; - qold = 0.0; - rzold = 0.0; /* Uneeded */ - - frc = -1; /* ENOMEM here */ - r = malloc(sizeof(*r)*n); /* Residual */ - if (r == NULL) goto cleanup; - v = malloc(sizeof(*v)*n); - if (v == NULL) goto cleanup; - zk = malloc(sizeof(*zk)*n); - if (zk == NULL) goto cleanup; - emat = malloc(sizeof(*emat)*n); /* Diagonal preconditoning matrix */ - if (emat == NULL) goto cleanup; - gv = malloc(sizeof(*gv)*n); /* hessian times v */ - if (gv == NULL) goto cleanup; - - /* Initialization for preconditioned conjugate-gradient algorithm */ - frc = initPreconditioner(diagb, emat, n, lreset, yksk, yrsr, sk, yk, sr, yr, - upd1); - if (frc) goto cleanup; - - for (i = 0; i < n; i++) - { - r[i] = -g[i]; - v[i] = 0.0; - zsol[i] = 0.0; /* Computed search direction */ - } - - /* Main iteration */ - for (k = 0; k < maxCGit; k++) - { - /* CG iteration to solve system of equations */ - project(n, r, pivot); - frc = msolve(r, zk, n, sk, yk, diagb, sr, yr, upd1, yksk, yrsr, lreset); - if (frc) goto cleanup; - project(n, zk, pivot); - rz = ddot1(n, r, zk); - - if ((rz / rhsnrm < tol) || ((*nfeval) >= (maxnfeval-1))) - { - /* Truncate algorithm in case of an emergency - or too many function evaluations */ - if (k == 0) - { - dcopy1(n, g, zsol); - dneg1(n, zsol); - project(n, zsol, pivot); - } - break; - } - if (k == 0) beta = 0.0; - else beta = rz / rzold; - - for (i = 0; i < n; i++) - v[i] = zk[i] + beta * v[i]; - - project(n, v, pivot); - frc = hessianTimesVector(v, gv, n, x, g, function, state, - xscale, xoffset, fscale, accuracy, xnorm, low, up); - ++(*nfeval); - if (frc) goto cleanup; - project(n, gv, pivot); - - vgv = ddot1(n, v, gv); - if (vgv / rhsnrm < tol) - { - /* Truncate algorithm in case of an emergency */ - if (k == 0) - { - frc = msolve(g, zsol, n, sk, yk, diagb, sr, yr, upd1, yksk, yrsr, - lreset); - if (frc) goto cleanup; - dneg1(n, zsol); - project(n, zsol, pivot); - } - break; - } - diagonalScaling(n, emat, v, gv, r); - - /* Compute linear step length */ - alpha = rz / vgv; - - /* Compute current solution and related vectors */ - daxpy1(n, alpha, v, zsol); - daxpy1(n, -alpha, gv, r); - - /* Test for convergence */ - gtp = ddot1(n, zsol, g); - pr = ddot1(n, r, zsol); - qnew = (gtp + pr) * 0.5; - qtest = (k + 1) * (1.0 - qold / qnew); - if (qtest <= 0.5) break; - - /* Perform cautionary test */ - if (gtp > 0.0) - { - /* Truncate algorithm in case of an emergency */ - daxpy1(n, -alpha, v, zsol); - break; - } - - qold = qnew; - rzold = rz; - } - - /* Terminate algorithm */ - /* Store (or restore) diagonal preconditioning */ - dcopy1(n, emat, diagb); - -cleanup: - if (r) free(r); - if (v) free(v); - if (zk) free(zk); - if (emat) free(emat); - if (gv) free(gv); - return frc; -} - -/* - * Update the preconditioning matrix based on a diagonal version - * of the bfgs quasi-newton update. - */ -static void diagonalScaling(int n, double e[], double v[], double gv[], - double r[]) -{ - int i; - double vr, vgv; - - vr = 1.0/ddot1(n, v, r); - vgv = 1.0/ddot1(n, v, gv); - for (i = 0; i < n; i++) - { - e[i] += - r[i]*r[i]*vr + gv[i]*gv[i]*vgv; - if (e[i] <= 1e-6) e[i] = 1.0; - } -} - -/* - * Returns the length of the initial step to be taken along the - * vector p in the next linear search. - */ -static double initialStep(double fnew, double fmin, double gtp, double smax) -{ - double d, alpha; - - d = fabs(fnew - fmin); - alpha = 1.0; - if (d * 2.0 <= -(gtp) && d >= mchpr1()) alpha = d * -2.0 / gtp; - if (alpha >= smax) alpha = smax; - - return alpha; -} - -/* - * Hessian vector product through finite differences - */ -static int hessianTimesVector(double v[], double gv[], int n, - double x[], double g[], tnc_function *function, void *state, - double xscale[], double xoffset[], double fscale, - double accuracy, double xnorm, double low[], double up[]) -{ - double dinv, f, delta, *xv; - int i, frc; - - xv = malloc(sizeof(*xv)*n); - if (xv == NULL) return -1; - - delta = accuracy * (xnorm + 1.0); - for (i = 0; i < n; i++) - xv[i] = x[i] + delta * v[i]; - - unscalex(n, xv, xscale, xoffset); - coercex(n, xv, low, up); - frc = function(xv, &f, gv, state); - free(xv); - if (frc) return 1; - scaleg(n, gv, xscale, fscale); - - dinv = 1.0 / delta; - for (i = 0; i < n; i++) - gv[i] = (gv[i] - g[i]) * dinv; - - projectConstants(n, gv, xscale); - - return 0; -} - -/* - * This routine acts as a preconditioning step for the - * linear conjugate-gradient routine. It is also the - * method of computing the search direction from the - * gradient for the non-linear conjugate-gradient code. - * It represents a two-step self-scaled bfgs formula. - */ -static int msolve(double g[], double y[], int n, - double sk[], double yk[], double diagb[], double sr[], - double yr[], logical upd1, double yksk, double yrsr, - logical lreset) -{ - double ghyk, ghyr, yksr, ykhyk, ykhyr, yrhyr, rdiagb, gsr, gsk; - int i, frc; - double *hg = NULL, *hyk = NULL, *hyr = NULL; - - if (upd1) - { - for (i = 0; i < n; i++) y[i] = g[i] / diagb[i]; - return 0; - } - - frc = -1; - gsk = ddot1(n, g, sk); - hg = malloc(sizeof(*hg)*n); - if (hg == NULL) goto cleanup; - hyr = malloc(sizeof(*hyr)*n); - if (hyr == NULL) goto cleanup; - hyk = malloc(sizeof(*hyk)*n); - if (hyk == NULL) goto cleanup; - frc = 0; - - /* Compute gh and hy where h is the inverse of the diagonals */ - if (lreset) - { - for (i = 0; i < n; i++) - { - rdiagb = 1.0 / diagb[i]; - hg[i] = g[i] * rdiagb; - hyk[i] = yk[i] * rdiagb; - } - ykhyk = ddot1(n, yk, hyk); - ghyk = ddot1(n, g, hyk); - ssbfgs(n, 1.0, sk, hg, hyk, yksk, ykhyk, gsk, ghyk, y); - } - else - { - for (i = 0; i < n; i++) - { - rdiagb = 1.0 / diagb[i]; - hg[i] = g[i] * rdiagb; - hyk[i] = yk[i] * rdiagb; - hyr[i] = yr[i] * rdiagb; - } - gsr = ddot1(n, g, sr); - ghyr = ddot1(n, g, hyr); - yrhyr = ddot1(n, yr, hyr); - ssbfgs(n, 1.0, sr, hg, hyr, yrsr, yrhyr, gsr, ghyr, hg); - yksr = ddot1(n, yk, sr); - ykhyr = ddot1(n, yk, hyr); - ssbfgs(n, 1.0, sr, hyk, hyr, yrsr, yrhyr, yksr, ykhyr, hyk); - ykhyk = ddot1(n, hyk, yk); - ghyk = ddot1(n, hyk, g); - ssbfgs(n, 1.0, sk, hg, hyk, yksk, ykhyk, gsk, ghyk, y); - } - -cleanup: - if (hg) free(hg); - if (hyk) free(hyk); - if (hyr) free(hyr); - - return frc; -} - -/* - * Self-scaled BFGS - */ -static void ssbfgs(int n, double gamma, double sj[], double hjv[], - double hjyj[], double yjsj, - double yjhyj, double vsj, double vhyj, double hjp1v[]) -{ - double beta, delta; - int i; - - if (yjsj == 0.0) - { - delta = 0.0; - beta = 0.0; - } - else - { - delta = (gamma * yjhyj / yjsj + 1.0) * vsj / yjsj - gamma * vhyj / yjsj; - beta = -gamma * vsj / yjsj; - } - - for (i = 0; i < n; i++) - hjp1v[i] = gamma * hjv[i] + delta * sj[i] + beta * hjyj[i]; -} - -/* - * Initialize the preconditioner - */ -static int initPreconditioner(double diagb[], double emat[], int n, - logical lreset, double yksk, double yrsr, - double sk[], double yk[], double sr[], double yr[], - logical upd1) -{ - double srds, yrsk, td, sds; - int i; - double *bsk; - - if (upd1) - { - dcopy1(n, diagb, emat); - return 0; - } - - bsk = malloc(sizeof(*bsk)*n); - if (bsk == NULL) return -1; - - if (lreset) - { - for (i = 0; i < n; i++) bsk[i] = diagb[i] * sk[i]; - sds = ddot1(n, sk, bsk); - if (yksk == 0.0) yksk = 1.0; - if (sds == 0.0) sds = 1.0; - for (i = 0; i < n; i++) - { - td = diagb[i]; - emat[i] = td - td * td * sk[i] * sk[i] / sds + yk[i] * yk[i] / yksk; - } - } - else - { - for (i = 0; i < n; i++) bsk[i] = diagb[i] * sr[i]; - sds = ddot1(n, sr, bsk); - srds = ddot1(n, sk, bsk); - yrsk = ddot1(n, yr, sk); - if (yrsr == 0.0) yrsr = 1.0; - if (sds == 0.0) sds = 1.0; - for (i = 0; i < n; i++) - { - td = diagb[i]; - bsk[i] = td * sk[i] - bsk[i] * srds / sds + yr[i] * yrsk / yrsr; - emat[i] = td - td * td * sr[i] * sr[i] / sds + yr[i] * yr[i] / yrsr; - } - sds = ddot1(n, sk, bsk); - if (yksk == 0.0) yksk = 1.0; - if (sds == 0.0) sds = 1.0; - for (i = 0; i < n; i++) - emat[i] = emat[i] - bsk[i] * bsk[i] / sds + yk[i] * yk[i] / yksk; - } - - free(bsk); - return 0; -} - - -/* - * Line search algorithm of gill and murray - */ -static ls_rc linearSearch(int n, tnc_function *function, void *state, - double low[], double up[], - double xscale[], double xoffset[], double fscale, int pivot[], - double eta, double ftol, double xbnd, - double p[], double x[], double *f, - double *alpha, double gfull[], int maxnfeval, int *nfeval) -{ - double b1, big, tol, rmu, fpresn, fu, gu, fw, gw, gtest1, gtest2, - oldf, fmin, gmin, rtsmll, step, a, b, e, u, ualpha, factor, scxbnd, xw, - epsmch, reltol, abstol, tnytol, pe, xnorm, rteps; - double *temp = NULL, *tempgfull = NULL, *newgfull = NULL; - int maxlsit = 64, i, itcnt, frc; - ls_rc rc; - getptc_rc itest; - logical braktd; - - rc = LS_ENOMEM; - temp = malloc(sizeof(*temp)*n); - if (temp == NULL) goto cleanup; - tempgfull = malloc(sizeof(*tempgfull)*n); - if (tempgfull == NULL) goto cleanup; - newgfull = malloc(sizeof(*newgfull)*n); - if (newgfull == NULL) goto cleanup; - - dcopy1(n, gfull, temp); - scaleg(n, temp, xscale, fscale); - gu = ddot1(n, temp, p); - - dcopy1(n, x, temp); - project(n, temp, pivot); - xnorm = dnrm21(n, temp); - - /* Compute the absolute and relative tolerances for the linear search */ - epsmch = mchpr1(); - rteps = sqrt(epsmch); - pe = dnrm21(n, p) + epsmch; - reltol = rteps * (xnorm + 1.0) / pe; - abstol = -epsmch * (1.0 + fabs(*f)) / (gu - epsmch); - - /* Compute the smallest allowable spacing between points in the linear - search */ - tnytol = epsmch * (xnorm + 1.0) / pe; - - rtsmll = epsmch; - big = 1.0 / (epsmch * epsmch); - itcnt = 0; - - /* Set the estimated relative precision in f(x). */ - fpresn = ftol; - - u = *alpha; - fu = *f; - fmin = *f; - rmu = 1e-4; - - /* Setup */ - itest = getptcInit(&reltol, &abstol, tnytol, eta, rmu, - xbnd, &u, &fu, &gu, alpha, &fmin, &gmin, &xw, &fw, &gw, &a, &b, - &oldf, &b1, &scxbnd, &e, &step, &factor, &braktd, >est1, >est2, &tol); - - /* If itest == GETPTC_EVAL, the algorithm requires the function value to be - calculated */ - while(itest == GETPTC_EVAL) - { - /* Test for too many iterations or too many function evals */ - if ((++itcnt > maxlsit) || ((*nfeval) >= maxnfeval)) break; - - ualpha = *alpha + u; - for (i = 0; i < n; i++) - temp[i] = x[i] + ualpha * p[i]; - - /* Function evaluation */ - unscalex(n, temp, xscale, xoffset); - coercex(n, temp, low, up); - - frc = function(temp, &fu, tempgfull, state); - ++(*nfeval); - if (frc) - { - rc = LS_USERABORT; - goto cleanup; - } - - fu *= fscale; - - dcopy1(n, tempgfull, temp); - scaleg(n, temp, xscale, fscale); - gu = ddot1(n, temp, p); - - itest = getptcIter(big, rtsmll, &reltol, &abstol, tnytol, fpresn, - xbnd, &u, &fu, &gu, alpha, &fmin, &gmin, &xw, &fw, &gw, &a, &b, - &oldf, &b1, &scxbnd, &e, &step, &factor, &braktd, >est1, >est2, &tol); - - /* New best point ? */ - if (*alpha == ualpha) - dcopy1(n, tempgfull, newgfull); - } - - if (itest == GETPTC_OK) - { - /* A successful search has been made */ - *f = fmin; - daxpy1(n, *alpha, p, x); - dcopy1(n, newgfull, gfull); - rc = LS_OK; - } - /* Too many iterations ? */ - else if (itcnt > maxlsit) rc = LS_FAIL; - /* If itest=GETPTC_FAIL or GETPTC_EINVAL a lower point could not be found */ - else if (itest != GETPTC_EVAL) rc = LS_FAIL; - /* Too many function evaluations */ - else rc = LS_MAXFUN; - -cleanup: - if (temp) free(temp); - if (tempgfull) free(tempgfull); - if (newgfull) free(newgfull); - - return rc; -} - -/* - * getptc, an algorithm for finding a steplength, called repeatedly by - * routines which require a step length to be computed using cubic - * interpolation. The parameters contain information about the interval - * in which a lower point is to be found and from this getptc computes a - * point at which the function can be evaluated by the calling program. - */ -static getptc_rc getptcInit(double *reltol, double *abstol, double tnytol, - double eta, double rmu, double xbnd, - double *u, double *fu, double *gu, double *xmin, - double *fmin, double *gmin, double *xw, double *fw, - double *gw, double *a, double *b, double *oldf, - double *b1, double *scxbnd, double *e, double *step, - double *factor, logical *braktd, double *gtest1, - double *gtest2, double *tol) -{ - /* Check input parameters */ - if (*u <= 0.0 || xbnd <= tnytol || *gu > 0.0) - return GETPTC_EINVAL; - if (xbnd < *abstol) *abstol = xbnd; - *tol = *abstol; - - /* a and b define the interval of uncertainty, x and xw are points */ - /* with lowest and second lowest function values so far obtained. */ - /* initialize a,smin,xw at origin and corresponding values of */ - /* function and projection of the gradient along direction of search */ - /* at values for latest estimate at minimum. */ - - *a = 0.0; - *xw = 0.0; - *xmin = 0.0; - *oldf = *fu; - *fmin = *fu; - *fw = *fu; - *gw = *gu; - *gmin = *gu; - *step = *u; - *factor = 5.0; - - /* The minimum has not yet been bracketed. */ - *braktd = TNC_FALSE; - - /* Set up xbnd as a bound on the step to be taken. (xbnd is not computed */ - /* explicitly but scxbnd is its scaled value.) Set the upper bound */ - /* on the interval of uncertainty initially to xbnd + tol(xbnd). */ - *scxbnd = xbnd; - *b = *scxbnd + *reltol * fabs(*scxbnd) + *abstol; - *e = *b + *b; - *b1 = *b; - - /* Compute the constants required for the two convergence criteria. */ - *gtest1 = -rmu * *gu; - *gtest2 = -eta * *gu; - - /* If the step is too large, replace by the scaled bound (so as to */ - /* compute the new point on the boundary). */ - if (*step >= *scxbnd) - { - *step = *scxbnd; - /* Move sxbd to the left so that sbnd + tol(xbnd) = xbnd. */ - *scxbnd -= (*reltol * fabs(xbnd) + *abstol) / (1.0 + *reltol); - } - *u = *step; - if (fabs(*step) < *tol && *step < 0.0) *u = -(*tol); - if (fabs(*step) < *tol && *step >= 0.0) *u = *tol; - return GETPTC_EVAL; -} - -static getptc_rc getptcIter(double big, double - rtsmll, double *reltol, double *abstol, double tnytol, - double fpresn, double xbnd, - double *u, double *fu, double *gu, double *xmin, - double *fmin, double *gmin, double *xw, double *fw, - double *gw, double *a, double *b, double *oldf, - double *b1, double *scxbnd, double *e, double *step, - double *factor, logical *braktd, double *gtest1, - double *gtest2, double *tol) -{ - double abgw, absr, p, q, r, s, scale, denom, - a1, d1, d2, sumsq, abgmin, chordm, chordu, - xmidpt, twotol; - logical convrg; - - /* Update a,b,xw, and xmin */ - if (*fu <= *fmin) - { - /* If function value not increased, new point becomes next */ - /* origin and other points are scaled accordingly. */ - chordu = *oldf - (*xmin + *u) * *gtest1; - if (*fu > chordu) - { - /* The new function value does not satisfy the sufficient decrease */ - /* criterion. prepare to move the upper bound to this point and */ - /* force the interpolation scheme to either bisect the interval of */ - /* uncertainty or take the linear interpolation step which estimates */ - /* the root of f(alpha)=chord(alpha). */ - - chordm = *oldf - *xmin * *gtest1; - *gu = -(*gmin); - denom = chordm - *fmin; - if (fabs(denom) < 1e-15) - { - denom = 1e-15; - if (chordm - *fmin < 0.0) denom = -denom; - } - if (*xmin != 0.0) *gu = *gmin * (chordu - *fu) / denom; - *fu = 0.5 * *u * (*gmin + *gu) + *fmin; - if (*fu < *fmin) *fu = *fmin; - } - else - { - *fw = *fmin; - *fmin = *fu; - *gw = *gmin; - *gmin = *gu; - *xmin += *u; - *a -= *u; - *b -= *u; - *xw = -(*u); - *scxbnd -= *u; - if (*gu <= 0.0) - { - *a = 0.0; - } - else - { - *b = 0.0; - *braktd = TNC_TRUE; - } - *tol = fabs(*xmin) * *reltol + *abstol; - goto ConvergenceCheck; - } - } - - /* If function value increased, origin remains unchanged */ - /* but new point may now qualify as w. */ - if (*u < 0.0) - *a = *u; - else - { - *b = *u; - *braktd = TNC_TRUE; - } - *xw = *u; - *fw = *fu; - *gw = *gu; - -ConvergenceCheck: - twotol = *tol + *tol; - xmidpt = 0.5 * (*a + *b); - - /* Check termination criteria */ - convrg = (fabs(xmidpt) <= twotol - 0.5 * (*b - *a)) || - (fabs(*gmin) <= *gtest2 && *fmin < *oldf && ((fabs(*xmin - xbnd) > *tol) || - (! (*braktd)))); - if (convrg) - { - if (*xmin != 0.0) return GETPTC_OK; - - /* - * If the function has not been reduced, check to see that the relative - * change in f(x) is consistent with the estimate of the delta- - * unimodality constant, tol. If the change in f(x) is larger than - * expected, reduce the value of tol. - */ - if (fabs(*oldf - *fw) <= fpresn) - return GETPTC_FAIL; - *tol = 0.1 * *tol; - if (*tol < tnytol) return GETPTC_FAIL; - *reltol = 0.1 * *reltol; - *abstol = 0.1 * *abstol; - twotol = 0.1 * twotol; - } - - /* Continue with the computation of a trial step length */ - r = 0.0; - q = 0.0; - s = 0.0; - if (fabs(*e) > *tol) - { - /* Fit cubic through xmin and xw */ - r = 3.0 * (*fmin - *fw) / *xw + *gmin + *gw; - absr = fabs(r); - q = absr; - if (*gw != 0.0 && *gmin != 0.0) - { - /* Compute the square root of (r*r - gmin*gw) in a way - which avoids underflow and overflow. */ - abgw = fabs(*gw); - abgmin = fabs(*gmin); - s = sqrt(abgmin) * sqrt(abgw); - if (*gw / abgw * *gmin > 0.0) - { - if (r >= s || r <= -s) - { - /* Compute the square root of r*r - s*s */ - q = sqrt(fabs(r + s)) * sqrt(fabs(r - s)); - } - else - { - r = 0.0; - q = 0.0; - goto MinimumFound; - } - } - else - { - /* Compute the square root of r*r + s*s. */ - sumsq = 1.0; - p = 0.0; - if (absr >= s) - { - /* There is a possibility of underflow. */ - if (absr > rtsmll) p = absr * rtsmll; - if (s >= p) - { - double value = s / absr; - sumsq = 1.0 + value * value; - } - scale = absr; - } - else - { - /* There is a possibility of overflow. */ - if (s > rtsmll) p = s * rtsmll; - if (absr >= p) - { - double value = absr / s; - sumsq = 1.0 + value * value; - } - scale = s; - } - sumsq = sqrt(sumsq); - q = big; - if (scale < big / sumsq) q = scale * sumsq; - } - } - - /* Compute the minimum of fitted cubic */ - if (*xw < 0.0) q = -q; - s = *xw * (*gmin - r - q); - q = *gw - *gmin + q + q; - if (q > 0.0) s = -s; - if (q <= 0.0) q = -q; - r = *e; - if (*b1 != *step || *braktd) *e = *step; - } - -MinimumFound: - /* Construct an artificial bound on the estimated steplength */ - a1 = *a; - *b1 = *b; - *step = xmidpt; - if ( (! *braktd) || ((*a == 0.0 && *xw < 0.0) || (*b == 0.0 && *xw > 0.0)) ) - { - if (*braktd) - { - /* If the minimum is not bracketed by 0 and xw the step must lie - within (a1,b1). */ - d1 = *xw; - d2 = *a; - if (*a == 0.0) d2 = *b; - /* This line might be : */ - /* if (*a == 0.0) d2 = *e */ - *u = -d1 / d2; - *step = 5.0 * d2 * (0.1 + 1.0 / *u) / 11.0; - if (*u < 1.0) *step = 0.5 * d2 * sqrt(*u); - } - else - { - *step = -(*factor) * *xw; - if (*step > *scxbnd) *step = *scxbnd; - if (*step != *scxbnd) *factor = 5.0 * *factor; - } - /* If the minimum is bracketed by 0 and xw the step must lie within (a,b) */ - if (*step <= 0.0) a1 = *step; - if (*step > 0.0) *b1 = *step; - } - -/* - * Reject the step obtained by interpolation if it lies outside the - * required interval or it is greater than half the step obtained - * during the last-but-one iteration. - */ - if (fabs(s) <= fabs(0.5 * q * r) || s <= q * a1 || s >= q * *b1) - *e = *b - *a; - else - { - /* A cubic interpolation step */ - *step = s / q; - - /* The function must not be evaluated too close to a or b. */ - if (*step - *a < twotol || *b - *step < twotol) - { - if (xmidpt <= 0.0) - *step = -(*tol); - else - *step = *tol; - } - } - - /* If the step is too large, replace by the scaled bound (so as to */ - /* compute the new point on the boundary). */ - if (*step >= *scxbnd) - { - *step = *scxbnd; - /* Move sxbd to the left so that sbnd + tol(xbnd) = xbnd. */ - *scxbnd -= (*reltol * fabs(xbnd) + *abstol) / (1.0 + *reltol); - } - *u = *step; - if (fabs(*step) < *tol && *step < 0.0) *u = -(*tol); - if (fabs(*step) < *tol && *step >= 0.0) *u = *tol; - return GETPTC_EVAL; -} - -/* - * Return epsmch, where epsmch is the smallest possible - * power of 2 such that 1.0 + epsmch > 1.0 - */ -static double mchpr1(void) -{ - static double epsmch = 0.0; - - if (epsmch == 0.0) - { - double eps = 1.0; - while((1.0 + (eps*0.5)) > 1.0) - eps *= 0.5; - epsmch = eps; - } - - return epsmch; -} - -/* Blas like routines */ - -/* dy+=dx */ -static void dxpy1(int n, double dx[], double dy[]) -{ - int i; - for (i = 0; i < n; i++) - dy[i] += dx[i]; -} - -/* dy+=da*dx */ -static void daxpy1(int n, double da, double dx[], double dy[]) -{ - int i; - for (i = 0; i < n; i++) - dy[i] += da*dx[i]; -} - -/* Copy dx -> dy */ -/* Could use memcpy */ -static void dcopy1(int n, double dx[], double dy[]) -{ - int i; - for (i = 0; i < n; i++) - dy[i] = dx[i]; -} - -/* Negate */ -static void dneg1(int n, double v[]) -{ - int i; - for (i = 0; i < n; i++) - v[i] = -v[i]; -} - -/* Dot product */ -static double ddot1(int n, double dx[], double dy[]) -{ - int i; - double dtemp = 0.0; - for (i = 0; i < n; i++) - dtemp += dy[i]*dx[i]; - return dtemp; -} - -/* Euclidian norm */ -static double dnrm21(int n, double dx[]) -{ - int i; - double dssq = 1.0, dscale = 0.0; - - for (i = 0; i < n; i++) - { - if (dx[i] != 0.0) - { - double dabsxi = fabs(dx[i]); - if (dscale up bound) */ - TNC_LOCALMINIMUM = 0, /* Local minima reach (|pg| ~= 0) */ - TNC_FCONVERGED = 1, /* Converged (|f_n-f_(n-1)| ~= 0) */ - TNC_XCONVERGED = 2, /* Converged (|x_n-x_(n-1)| ~= 0) */ - TNC_MAXFUN = 3, /* Max. number of function evaluations reach */ - TNC_LSFAIL = 4, /* Linear search failed */ - TNC_CONSTANT = 5, /* All lower bounds are equal to the upper bounds */ - TNC_NOPROGRESS = 6, /* Unable to progress */ - TNC_USERABORT = 7 /* User requested end of minization */ -} tnc_rc; - -/* - * Return code strings - * use tnc_rc_string[rc - TNC_MINRC] to get the message associated with - * return code rc. - */ - -extern char *tnc_rc_string[11]; - -/* - * A function as required by tnc - * state is a void pointer provided to the function at each call - * - * x : on input, then vector of variables (should not be modified) - * f : on output, the value of the function - * g : on output, the value of the gradient - * state : on input, the value of the state variable as provided to tnc - * - * must returns 0 if no error occurs or 1 to immediately end the minimization. - * - */ -typedef int tnc_function(double x[], double *f, double g[], void *state); - -/* - * tnc : minimize a function with variables subject to bounds, using - * gradient information. - * - * n : number of variables (must be >= 0) - * x : on input, initial estimate ; on output, the solution - * f : on output, the function value at the solution - * g : on output, the gradient value at the solution - * g should be an allocated vector of size n or NULL, - * in which case the gradient value is not returned. - * function : the function to minimize (see tnc_function) - * state : used by function (see tnc_function) - * low, up : the bounds - * set low[i] to -HUGE_VAL to remove the lower bound - * set up[i] to HUGE_VAL to remove the upper bound - * if low == NULL, the lower bounds are removed. - * if up == NULL, the upper bounds are removed. - * scale : scaling factors to apply to each variable - * if NULL, the factors are up-low for interval bounded variables - * and 1+|x] for the others. - * offset : constant to substract to each variable - * if NULL, the constant are (up+low)/2 for interval bounded - * variables and x for the others. - * messages : see the tnc_message enum - * maxCGit : max. number of hessian*vector evaluation per main iteration - * if maxCGit == 0, the direction chosen is -gradient - * if maxCGit < 0, maxCGit is set to max(1,min(50,n/2)) - * maxnfeval : max. number of function evaluation - * eta : severity of the line search. if < 0 or > 1, set to 0.25 - * stepmx : maximum step for the line search. may be increased during call - * if too small, will be set to 10.0 - * accuracy : relative precision for finite difference calculations - * if <= machine_precision, set to sqrt(machine_precision) - * fmin : minimum function value estimate - * ftol : precision goal for the value of f in the stoping criterion - * if ftol < 0.0, ftol is set to accuracy - * xtol : precision goal for the value of x in the stopping criterion - * (after applying x scaling factors) - * if xtol < 0.0, xtol is set to sqrt(machine_precision) - * pgtol : precision goal for the value of the projected gradient in the - * stopping criterion (after applying x scaling factors) - * if pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy) - * setting it to 0.0 is not recommended - * rescale : f scaling factor (in log10) used to trigger f value rescaling - * if 0, rescale at each iteration - * if a big value, never rescale - * if < 0, rescale is set to 1.3 - * nfeval : on output, the number of function evaluations. - * ignored if nfeval==NULL. - * - * The tnc function returns a code defined in the tnc_rc enum. - * On output, x, f and g may be very slightly out of sync because of scaling. - * - */ -extern int tnc(int n, double x[], double *f, double g[], - tnc_function *function, void *state, - double low[], double up[], double scale[], double offset[], - int messages, int maxCGit, int maxnfeval, double eta, double stepmx, - double accuracy, double fmin, double ftol, double xtol, double pgtol, - double rescale, int *nfeval); - -#ifdef __cplusplus -} -#endif - -#endif /* _TNC_ */ diff --git a/scipy-0.10.1/scipy/optimize/zeros.c b/scipy-0.10.1/scipy/optimize/zeros.c deleted file mode 100644 index 3bfef890a1..0000000000 --- a/scipy-0.10.1/scipy/optimize/zeros.c +++ /dev/null @@ -1,219 +0,0 @@ - -/* Written by Charles Harris charles.harris@sdl.usu.edu */ - -/* Modifications by Travis Oliphant to separate Python code from C - routines */ - -#include "Python.h" -#include - -typedef struct { - int funcalls; - int iterations; - int error_num; - PyObject *function; - PyObject *args; - jmp_buf env; -} scipy_zeros_parameters; - -/* - * Storage for the relative precision of doubles. This is computed when the module - * is initialized. - */ - -#include "Zeros/zeros.h" - -#define SIGNERR -1 -#define CONVERR -2 - -static double scipy_zeros_rtol=0; - -double -scipy_zeros_functions_func(double x, void *params) -{ - scipy_zeros_parameters *myparams = params; - PyObject *args, *f, *retval=NULL; - double val; - - args = myparams->args; - f = myparams->function; - PyTuple_SetItem(args, 0, Py_BuildValue("d",x)); - retval=PyObject_CallObject(f,args); - if (retval == NULL) { - longjmp(myparams->env, 1); - } - val = PyFloat_AsDouble(retval); - Py_XDECREF(retval); - return val; -} - -/* - * Helper function that calls a Python function with extended arguments - */ - -static PyObject * -call_solver(solver_type solver, PyObject *self, PyObject *args) -{ - double a,b,xtol,zero; - int iter,i, len, fulloutput, disp=1, flag=0; - scipy_zeros_parameters params; - jmp_buf env; - PyObject *f, *xargs, *item, *fargs=NULL; - - if (!PyArg_ParseTuple(args, "OdddiOi|i", - &f, &a, &b, &xtol, &iter, &xargs, &fulloutput, &disp)) - { - PyErr_SetString(PyExc_RuntimeError, "Unable to parse arguments"); - return NULL; - } - if (xtol < 0) { - PyErr_SetString(PyExc_ValueError, "xtol must be >= 0"); - return NULL; - } - if (iter < 0) { - PyErr_SetString(PyExc_ValueError, "maxiter should be > 0"); - return NULL; - } - - len = PyTuple_Size(xargs); - /* Make room for the double as first argument */ - fargs = PyTuple_New(len + 1); - if (fargs == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "Failed to allocate argument tuple"); - return NULL; - } - - for (i = 0; i < len; i++) { - item = PyTuple_GetItem(xargs, i); - if (item == NULL) { - Py_DECREF(fargs); - return NULL; - } - Py_INCREF(item); - PyTuple_SET_ITEM(fargs, i+1, item); - } - - params.function = f; - params.args = fargs; - - if (!setjmp(env)) { - /* direct return */ - memcpy(params.env, env, sizeof(jmp_buf)); - params.error_num = 0; - zero = solver(scipy_zeros_functions_func, a, b, - xtol, scipy_zeros_rtol, iter, (default_parameters*)¶ms); - Py_DECREF(fargs); - if (params.error_num != 0) { - if (params.error_num == SIGNERR) { - PyErr_SetString(PyExc_ValueError, - "f(a) and f(b) must have different signs"); - return NULL; - } - if (params.error_num == CONVERR) { - if (disp) { - char msg[100]; - PyOS_snprintf(msg, sizeof(msg), - "Failed to converge after %d iterations.", - params.iterations); - PyErr_SetString(PyExc_RuntimeError, msg); - flag = 1; - return NULL; - } - } - } - if (fulloutput) { - return Py_BuildValue("diii", - zero, params.funcalls, params.iterations, flag); - } - else { - return Py_BuildValue("d", zero); - } - } - else { - /* error return from Python function */ - Py_DECREF(fargs); - return NULL; - } -} - -/* - * These routines interface with the solvers through call_solver - */ - -static PyObject * -_bisect(PyObject *self, PyObject *args) -{ - return call_solver(bisect,self,args); -} - -static PyObject * -_ridder(PyObject *self, PyObject *args) -{ - return call_solver(ridder,self,args); -} - -static PyObject * -_brenth(PyObject *self, PyObject *args) -{ - return call_solver(brenth,self,args); -} - -static PyObject * -_brentq(PyObject *self, PyObject *args) -{ - return call_solver(brentq,self,args); -} - -/* - * Standard Python module inteface - */ - -static PyMethodDef -Zerosmethods[] = { - {"_bisect", _bisect, METH_VARARGS, "a"}, - {"_ridder", _ridder, METH_VARARGS, "a"}, - {"_brenth", _brenth, METH_VARARGS, "a"}, - {"_brentq", _brentq, METH_VARARGS, "a"}, - {NULL, NULL} -}; - -static double __compute_relative_precision() -{ - double tol; - - /* Determine relative precision of doubles, assumes binary */ - for(tol = 1; tol + 1 != 1; tol /= 2); - return 2*tol; -} - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_zeros", - NULL, - -1, - Zerosmethods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__zeros(void) -{ - PyObject *m, *d, *s; - - m = PyModule_Create(&moduledef); - - scipy_zeros_rtol = __compute_relative_precision(); - - return m; -} -#else -PyMODINIT_FUNC init_zeros(void) -{ - Py_InitModule("_zeros", Zerosmethods); - scipy_zeros_rtol = __compute_relative_precision(); -} -#endif diff --git a/scipy-0.10.1/scipy/optimize/zeros.py b/scipy-0.10.1/scipy/optimize/zeros.py deleted file mode 100644 index de775c9e12..0000000000 --- a/scipy-0.10.1/scipy/optimize/zeros.py +++ /dev/null @@ -1,440 +0,0 @@ - -import warnings - -import _zeros -from numpy import finfo - -_iter = 100 -_xtol = 1e-12 -# not actually used at the moment -_rtol = finfo(float).eps * 2 - -__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth'] - -CONVERGED = 'converged' -SIGNERR = 'sign error' -CONVERR = 'convergence error' -flag_map = {0 : CONVERGED, -1 : SIGNERR, -2 : CONVERR} - - -class RootResults(object): - def __init__(self, root, iterations, function_calls, flag): - self.root = root - self.iterations = iterations - self.function_calls = function_calls - self.converged = flag == 0 - try: - self.flag = flag_map[flag] - except KeyError: - self.flag = 'unknown error %d' % (flag,) - - -def results_c(full_output, r): - if full_output: - x, funcalls, iterations, flag = r - results = RootResults(root=x, - iterations=iterations, - function_calls=funcalls, - flag=flag) - return x, results - else: - return r - - -# Newton-Raphson method -def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50): - """ - Find a zero using the Newton-Raphson or secant method. - - Find a zero of the function `func` given a nearby starting point `x0`. - The Newton-Raphson method is used if the derivative `fprime` of `func` - is provided, otherwise the secant method is used. - - Parameters - ---------- - func : function - The function whose zero is wanted. It must be a function of a - single variable of the form f(x,a,b,c...), where a,b,c... are extra - arguments that can be passed in the `args` parameter. - x0 : float - An initial estimate of the zero that should be somewhere near the - actual zero. - fprime : function, optional - The derivative of the function when available and convenient. If it - is None (default), then the secant method is used. - args : tuple, optional - Extra arguments to be used in the function call. - tol : float, optional - The allowable error of the zero value. - maxiter : int, optional - Maximum number of iterations. - - Returns - ------- - zero : float - Estimated location where function is zero. - - See Also - -------- - brentq, brenth, ridder, bisect : find zeroes in one dimension. - fsolve : find zeroes in n dimensions. - - Notes - ----- - The convergence rate of the Newton-Raphson method is quadratic while - that of the secant method is somewhat less. This means that if the - function is well behaved the actual error in the estimated zero is - approximately the square of the requested tolerance up to roundoff - error. However, the stopping criterion used here is the step size and - there is no guarantee that a zero has been found. Consequently the - result should be verified. Safer algorithms are brentq, brenth, ridder, - and bisect, but they all require that the root first be bracketed in an - interval where the function changes sign. The brentq algorithm is - recommended for general use in one dimensional problems when such an - interval has been found. - - """ - if fprime is not None: - # Newton-Rapheson method - # Multiply by 1.0 to convert to floating point. We don't use float(x0) - # so it still works if x0 is complex. - p0 = 1.0 * x0 - for iter in range(maxiter): - myargs = (p0,) + args - fder = fprime(*myargs) - if fder == 0: - msg = "derivative was zero." - warnings.warn(msg, RuntimeWarning) - return p0 - p = p0 - func(*myargs) / fder - if abs(p - p0) < tol: - return p - p0 = p - else: - # Secant method - p0 = x0 - if x0 >= 0: - p1 = x0*(1 + 1e-4) + 1e-4 - else: - p1 = x0*(1 + 1e-4) - 1e-4 - q0 = func(*((p0,) + args)) - q1 = func(*((p1,) + args)) - for iter in range(maxiter): - if q1 == q0: - if p1 != p0: - msg = "Tolerance of %s reached" % (p1 - p0) - warnings.warn(msg, RuntimeWarning) - return (p1 + p0)/2.0 - else: - p = p1 - q1*(p1 - p0)/(q1 - q0) - if abs(p - p1) < tol: - return p - p0 = p1 - q0 = q1 - p1 = p - q1 = func(*((p1,) + args)) - msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) - raise RuntimeError(msg) - - -def bisect(f, a, b, args=(), - xtol=_xtol, rtol=_rtol, maxiter=_iter, - full_output=False, disp=True): - """Find root of f in [a,b]. - - Basic bisection routine to find a zero of the function f between the - arguments a and b. f(a) and f(b) can not have the same signs. Slow but - sure. - - Parameters - ---------- - f : function - Python function returning a number. f must be continuous, and f(a) and - f(b) must have opposite signs. - a : number - One end of the bracketing interval [a,b]. - b : number - The other end of the bracketing interval [a,b]. - xtol : number, optional - The routine converges when a root is known to lie within xtol of the - value return. Should be >= 0. The routine modifies this to take into - account the relative precision of doubles. - maxiter : number, optional - if convergence is not achieved in maxiter iterations, and error is - raised. Must be >= 0. - args : tuple, optional - containing extra arguments for the function `f`. - `f` is called by ``apply(f, (x)+args)``. - full_output : bool, optional - If `full_output` is False, the root is returned. If `full_output` is - True, the return value is ``(x, r)``, where `x` is the root, and `r` is - a RootResults object. - disp : bool, optional - If True, raise RuntimeError if the algorithm didn't converge. - - Returns - ------- - x0 : float - Zero of `f` between `a` and `b`. - r : RootResults (present if ``full_output = True``) - Object containing information about the convergence. In particular, - ``r.converged`` is True if the routine converged. - - See Also - -------- - brentq, brenth, bisect, newton : one-dimensional root-finding - fixed_point : scalar fixed-point finder - fsolve : n-dimensional root-finding - - """ - if type(args) != type(()) : - args = (args,) - r = _zeros._bisect(f,a,b,xtol,maxiter,args,full_output,disp) - return results_c(full_output, r) - - -def ridder(f, a, b, args=(), - xtol=_xtol, rtol=_rtol, maxiter=_iter, - full_output=False, disp=True): - """ - Find a root of a function in an interval. - - Parameters - ---------- - f : function - Python function returning a number. f must be continuous, and f(a) and - f(b) must have opposite signs. - a : number - One end of the bracketing interval [a,b]. - b : number - The other end of the bracketing interval [a,b]. - xtol : number, optional - The routine converges when a root is known to lie within xtol of the - value return. Should be >= 0. The routine modifies this to take into - account the relative precision of doubles. - maxiter : number, optional - if convergence is not achieved in maxiter iterations, and error is - raised. Must be >= 0. - args : tuple, optional - containing extra arguments for the function `f`. - `f` is called by ``apply(f, (x)+args)``. - full_output : bool, optional - If `full_output` is False, the root is returned. If `full_output` is - True, the return value is ``(x, r)``, where `x` is the root, and `r` is - a RootResults object. - disp : bool, optional - If True, raise RuntimeError if the algorithm didn't converge. - - Returns - ------- - x0 : float - Zero of `f` between `a` and `b`. - r : RootResults (present if ``full_output = True``) - Object containing information about the convergence. - In particular, ``r.converged`` is True if the routine converged. - - See Also - -------- - brentq, brenth, bisect, newton : one-dimensional root-finding - fixed_point : scalar fixed-point finder - - Notes - ----- - Uses [Ridders1979]_ method to find a zero of the function `f` between the - arguments `a` and `b`. Ridders' method is faster than bisection, but not - generally as fast as the Brent rountines. [Ridders1979]_ provides the - classic description and source of the algorithm. A description can also be - found in any recent edition of Numerical Recipes. - - The routine used here diverges slightly from standard presentations in - order to be a bit more careful of tolerance. - - References - ---------- - .. [Ridders1979] - Ridders, C. F. J. "A New Algorithm for Computing a - Single Root of a Real Continuous Function." - IEEE Trans. Circuits Systems 26, 979-980, 1979. - - """ - if type(args) != type(()) : - args = (args,) - r = _zeros._ridder(f,a,b,xtol,maxiter,args,full_output,disp) - return results_c(full_output, r) - - -def brentq(f, a, b, args=(), - xtol=_xtol, rtol=_rtol, maxiter=_iter, - full_output=False, disp=True): - """ - Find a root of a function in given interval. - - Return float, a zero of `f` between `a` and `b`. `f` must be a continuous - function, and [a,b] must be a sign changing interval. - - Description: - Uses the classic Brent (1973) method to find a zero of the function `f` on - the sign changing interval [a , b]. Generally considered the best of the - rootfinding routines here. It is a safe version of the secant method that - uses inverse quadratic extrapolation. Brent's method combines root - bracketing, interval bisection, and inverse quadratic interpolation. It is - sometimes known as the van Wijngaarden-Deker-Brent method. Brent (1973) - claims convergence is guaranteed for functions computable within [a,b]. - - [Brent1973]_ provides the classic description of the algorithm. Another - description can be found in a recent edition of Numerical Recipes, including - [PressEtal1992]_. Another description is at - http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to - understand the algorithm just by reading our code. Our code diverges a bit - from standard presentations: we choose a different formula for the - extrapolation step. - - Parameters - ---------- - f : function - Python function returning a number. f must be continuous, and f(a) and - f(b) must have opposite signs. - a : number - One end of the bracketing interval [a,b]. - b : number - The other end of the bracketing interval [a,b]. - xtol : number, optional - The routine converges when a root is known to lie within xtol of the - value return. Should be >= 0. The routine modifies this to take into - account the relative precision of doubles. - maxiter : number, optional - if convergence is not achieved in maxiter iterations, and error is - raised. Must be >= 0. - args : tuple, optional - containing extra arguments for the function `f`. - `f` is called by ``apply(f, (x)+args)``. - full_output : bool, optional - If `full_output` is False, the root is returned. If `full_output` is - True, the return value is ``(x, r)``, where `x` is the root, and `r` is - a RootResults object. - disp : bool, optional - If True, raise RuntimeError if the algorithm didn't converge. - - Returns - ------- - x0 : float - Zero of `f` between `a` and `b`. - r : RootResults (present if ``full_output = True``) - Object containing information about the convergence. In particular, - ``r.converged`` is True if the routine converged. - - See Also - -------- - multivariate local optimizers - `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` - nonlinear least squares minimizer - `leastsq` - constrained multivariate optimizers - `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` - global optimizers - `anneal`, `brute` - local scalar minimizers - `fminbound`, `brent`, `golden`, `bracket` - n-dimensional root-finding - `fsolve` - one-dimensional root-finding - `brentq`, `brenth`, `ridder`, `bisect`, `newton` - scalar fixed-point finder - `fixed_point` - - Notes - ----- - `f` must be continuous. f(a) and f(b) must have opposite signs. - - - References - ---------- - .. [Brent1973] - Brent, R. P., - *Algorithms for Minimization Without Derivatives*. - Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. - - .. [PressEtal1992] - Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. - *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. - Cambridge, England: Cambridge University Press, pp. 352-355, 1992. - Section 9.3: "Van Wijngaarden-Dekker-Brent Method." - - """ - if type(args) != type(()) : - args = (args,) - r = _zeros._brentq(f,a,b,xtol,maxiter,args,full_output,disp) - return results_c(full_output, r) - - -def brenth(f, a, b, args=(), - xtol=_xtol, rtol=_rtol, maxiter=_iter, - full_output=False, disp=True): - """Find root of f in [a,b]. - - A variation on the classic Brent routine to find a zero of the function f - between the arguments a and b that uses hyperbolic extrapolation instead of - inverse quadratic extrapolation. There was a paper back in the 1980's ... - f(a) and f(b) can not have the same signs. Generally on a par with the - brent routine, but not as heavily tested. It is a safe version of the - secant method that uses hyperbolic extrapolation. The version here is by - Chuck Harris. - - Parameters - ---------- - f : function - Python function returning a number. f must be continuous, and f(a) and - f(b) must have opposite signs. - a : number - One end of the bracketing interval [a,b]. - b : number - The other end of the bracketing interval [a,b]. - xtol : number, optional - The routine converges when a root is known to lie within xtol of the - value return. Should be >= 0. The routine modifies this to take into - account the relative precision of doubles. - maxiter : number, optional - if convergence is not achieved in maxiter iterations, and error is - raised. Must be >= 0. - args : tuple, optional - containing extra arguments for the function `f`. - `f` is called by ``apply(f, (x)+args)``. - full_output : bool, optional - If `full_output` is False, the root is returned. If `full_output` is - True, the return value is ``(x, r)``, where `x` is the root, and `r` is - a RootResults object. - disp : bool, optional - If True, raise RuntimeError if the algorithm didn't converge. - - Returns - ------- - x0 : float - Zero of `f` between `a` and `b`. - r : RootResults (present if ``full_output = True``) - Object containing information about the convergence. In particular, - ``r.converged`` is True if the routine converged. - - See Also - -------- - fmin, fmin_powell, fmin_cg, - fmin_bfgs, fmin_ncg : multivariate local optimizers - - leastsq : nonlinear least squares minimizer - - fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers - - anneal, brute : global optimizers - - fminbound, brent, golden, bracket : local scalar minimizers - - fsolve : n-dimensional root-finding - - brentq, brenth, ridder, bisect, newton : one-dimensional root-finding - - fixed_point : scalar fixed-point finder - - """ - if type(args) != type(()) : - args = (args,) - r = _zeros._brenth(f,a, b, xtol, maxiter, args, full_output, disp) - return results_c(full_output, r) diff --git a/scipy-0.10.1/scipy/setup.py b/scipy-0.10.1/scipy/setup.py deleted file mode 100644 index fbf46fdb7d..0000000000 --- a/scipy-0.10.1/scipy/setup.py +++ /dev/null @@ -1,29 +0,0 @@ - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('scipy',parent_package,top_path) - config.add_subpackage('cluster') - config.add_subpackage('constants') - config.add_subpackage('fftpack') - config.add_subpackage('integrate') - config.add_subpackage('interpolate') - config.add_subpackage('io') - config.add_subpackage('lib') - config.add_subpackage('linalg') - config.add_subpackage('maxentropy') - config.add_subpackage('misc') - config.add_subpackage('odr') - config.add_subpackage('optimize') - config.add_subpackage('signal') - config.add_subpackage('sparse') - config.add_subpackage('spatial') - config.add_subpackage('special') - config.add_subpackage('stats') - config.add_subpackage('ndimage') - config.add_subpackage('weave') - config.make_config_py() - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/setupscons.py b/scipy-0.10.1/scipy/setupscons.py deleted file mode 100644 index 2c536f3c49..0000000000 --- a/scipy-0.10.1/scipy/setupscons.py +++ /dev/null @@ -1,44 +0,0 @@ -from os.path import join as pjoin - -def configuration(parent_package='', top_path=None, setup_name='setupscons.py'): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.misc_util import scons_generate_config_py - - pkgname = 'scipy' - config = Configuration(pkgname, parent_package, top_path, - setup_name = 'setupscons.py') - config.add_subpackage('cluster') - config.add_subpackage('constants') - config.add_subpackage('fftpack') - config.add_subpackage('integrate') - config.add_subpackage('interpolate') - config.add_subpackage('io') - config.add_subpackage('lib') - config.add_subpackage('linalg') - config.add_subpackage('maxentropy') - config.add_subpackage('misc') - config.add_subpackage('odr') - config.add_subpackage('optimize') - config.add_subpackage('signal') - config.add_subpackage('sparse') - config.add_subpackage('spatial') - config.add_subpackage('special') - config.add_subpackage('stats') - config.add_subpackage('ndimage') - config.add_subpackage('weave') - - def add_config(*args, **kw): - # Generate __config__, handle inplace issues. - if kw['scons_cmd'].inplace: - target = pjoin(kw['pkg_name'], '__config__.py') - else: - target = pjoin(kw['scons_cmd'].build_lib, kw['pkg_name'], - '__config__.py') - scons_generate_config_py(target) - config.add_sconscript(None, post_hook = add_config) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/signal/C_bspline_util.c b/scipy-0.10.1/scipy/signal/C_bspline_util.c deleted file mode 100644 index 05c54cfaef..0000000000 --- a/scipy-0.10.1/scipy/signal/C_bspline_util.c +++ /dev/null @@ -1,307 +0,0 @@ -#include "Python.h" -#include -#include -#include -#include -#include -#define NO_IMPORT_ARRAY -#include "numpy/arrayobject.h" - -void compute_root_from_lambda(double, double *, double *); - - - - - -#define CONJ(a) (~(a)) -#define ABSQ(a) (__real__ (a*CONJ(a))) -#ifdef __GNUC__ - -/* Implement the following difference equation */ -/* y[n] = a1 * x[n] + a2 * y[n-1] */ -/* with a given starting value loaded into the array */ - -void C_IIR_order1 (__complex__ float,__complex__ float,__complex__ float*,__complex__ float*,int,int,int); -void C_IIR_order2 (__complex__ float,__complex__ float,__complex__ float,__complex__ float*,__complex__ float*,int,int,int); -void C_IIR_order2_cascade (__complex__ float,__complex__ float,__complex__ float,__complex__ float,__complex__ float*,__complex__ float*,int,int,int); -int C_IIR_forback1(__complex__ float,__complex__ float,__complex__ float*,__complex__ float*,int,int,int,float); -void C_FIR_mirror_symmetric(__complex__ float*,__complex__ float*,int,__complex__ float*,int,int,int); -int C_separable_2Dconvolve_mirror(__complex__ float*,__complex__ float*,int,int,__complex__ float*,__complex__ float*,int,int,npy_intp*,npy_intp*); - -void -C_IIR_order1 (a1, a2, x, y, N, stridex, stridey) - __complex__ float a1; - __complex__ float a2; - __complex__ float *x; - __complex__ float *y; - int N, stridex, stridey; -{ - __complex__ float *yvec = y+stridey; - __complex__ float *xvec = x+stridex; - int n; - - for (n=1; n < N; n++) { - *yvec = *xvec * a1 + *(yvec-stridey) * a2; - yvec += stridey; - xvec += stridex; - } -} - - -/* Implement the following difference equation */ -/* y[n] = a1 * x[n] + a2 * y[n-1] + a3 * y[n-2] */ -/* with two starting values loaded into the array */ -void -C_IIR_order2 (a1, a2, a3, x, y, N, stridex, stridey) - __complex__ float a1; - __complex__ float a2; - __complex__ float a3; - __complex__ float *x; - __complex__ float *y; - int N, stridex, stridey; -{ - __complex__ float *yvec = y+2*stridey; - __complex__ float *xvec = x+2*stridex; - int n; - - for (n=2; n < N; n++) { - *yvec = *xvec * a1 + *(yvec-stridey) * a2 + *(yvec-2*stridey) * a3; - yvec += stridey; - xvec += stridex; - } -} - -/* Implement a second order IIR difference equation using a cascade - of first order sections. Suppose the transfer function is - cs - H(z) = ------------------- - (1-z1/z) ( 1-z2/z) - - then the following pair is implemented with one starting value loaded in - the output array and the starting value for the intermediate array - passed in as yp0. - - y1[n] = x[n] + z1 y1[n-1] - yp[n] = cs y1[n] + z2 yp[n-1] - -*/ - -void -C_IIR_order2_cascade (cs, z1, z2, y1_0, x, yp, N, stridex, stridey) - __complex__ float cs; - __complex__ float z1; - __complex__ float z2; - __complex__ float y1_0; - __complex__ float *x; - __complex__ float *yp; - int N, stridex, stridey; -{ - __complex__ float *yvec = yp+stridey; - __complex__ float *xvec = x+stridex; - int n; - - for (n=1; n < N; n++) { - y1_0 = *xvec + y1_0 * z1; - *yvec = cs * y1_0 + *(yvec-stridey) * z2; - yvec += stridey; - xvec += stridex; - } -} - - -/* Implement a smoothing IIR filter with mirror-symmetric boundary conditions - using a cascade of first-order sections. The second section uses a - reversed sequence. This implements the following transfer function: - c0 - H(z) = --------------------------- - (1-z1/z) (1 - z1 z) - - with the following difference equations: - - yp[n] = x[n] + z1 yp[n-1] - with starting condition: - yp[0] = x[0] + Sum(z1^(k+1) x[k],k=0..Infinity) - - and - - y[n] = z1 y[n+1] + c0 yp[n] - with starting condition: - y[N-1] = z1 / (z1-1) yp[N-1] - - The resulting signal will have mirror symmetric boundary conditions as well. - - If memory could not be allocated for the temporary vector yp, the - function returns -1 otherwise it returns 0. - - z1 should be less than 1; - -*/ - -int -C_IIR_forback1 (c0, z1, x, y, N, stridex, stridey, precision) - __complex__ float c0; - __complex__ float z1; - __complex__ float *x; - __complex__ float *y; - int N, stridex, stridey; - float precision; -{ - __complex__ float *yp = NULL; - __complex__ float *xptr = x; - __complex__ float yp0; - __complex__ float powz1; - __complex__ float diff; - float err; - int k; - - if (ABSQ(z1) >= 1.0) return -2; /* z1 not less than 1 */ - - /* Initialize memory for loop */ - if ((yp = malloc(N*sizeof(__complex__ float)))==NULL) return -1; - - /* Fix starting value assuming mirror-symmetric boundary conditions. */ - yp0 = x[0]; - powz1 = 1.0; - k = 0; - precision *= precision; - do { - yp[0] = yp0; - powz1 *= z1; - yp0 += powz1 * (*xptr); - diff = powz1; - err = ABSQ(diff); - xptr += stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) return -3; /* sum did not converge */ - yp[0] = yp0; - - C_IIR_order1(1.0, z1, x, yp, N, stridex, 1); - - *(y + (N-1)*stridey) = -c0 / (z1 - 1.0) * yp[N-1]; - - C_IIR_order1(c0, z1, yp+N-1, y+(N-1)*stridey, N, -1, -stridey); - - free(yp); - return 0; -} - - -/* h must be odd length */ -/* strides in units of sizeof(__complex__ float) bytes */ -void -C_FIR_mirror_symmetric (in, out, N, h, Nh, instride, outstride) - __complex__ float *in; - __complex__ float *out; - int N, Nh; - __complex__ float *h; - int instride, outstride; -{ - int n, k; - int Nhdiv2 = Nh >> 1; - __complex__ float *outptr; - __complex__ float *inptr; - __complex__ float *hptr; - - /* first part boundary conditions */ - outptr = out; - for (n=0; n < Nhdiv2; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (n+Nhdiv2)*instride; - for (k=-Nhdiv2; k <= n; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - inptr += instride; - for (k=n+1; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr += instride; - } - outptr += outstride; - } - - /* middle section */ - outptr = out + Nhdiv2*outstride; - for (n=Nhdiv2; n < N-Nhdiv2; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (n+Nhdiv2)*instride; - for (k=-Nhdiv2; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - outptr += outstride; - } - - /* end boundary conditions */ - outptr = out + (N-Nhdiv2)*outstride; - for (n=N-Nhdiv2; n < N; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (2*N-1-n-Nhdiv2)*instride; - for (k=-Nhdiv2; k <= n-N; k++) { - *outptr += *hptr++ * *inptr; - inptr += instride; - } - inptr -= instride; - for (k=n+1-N; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - outptr += outstride; - } - -} - -int -C_separable_2Dconvolve_mirror(in, out, M, N, hr, hc, Nhr, - Nhc, instrides, outstrides) - __complex__ float *in; - __complex__ float *out; - int M, N; - __complex__ float *hr, *hc; - int Nhr, Nhc; - npy_intp *instrides, *outstrides; -{ - int m, n; - __complex__ float *tmpmem; - __complex__ float *inptr=NULL, *outptr=NULL; - - tmpmem = malloc(M*N*sizeof(__complex__ float)); - if (tmpmem == NULL) return -1; - - if (Nhr > 0) { - /* filter across rows */ - inptr = in; - outptr = tmpmem; - for (m = 0; m < M; m++) { - C_FIR_mirror_symmetric (inptr, outptr, N, hr, Nhr, instrides[1], 1); - inptr += instrides[0]; - outptr += N; - } - } - else - memmove(tmpmem, inptr, M*N*sizeof(__complex__ float)); - - if (Nhc > 0) { - /* filter down columns */ - inptr = tmpmem; - outptr = out; - for (n = 0; n < N; n++) { - C_FIR_mirror_symmetric (inptr, outptr, M, hc, Nhc, N, outstrides[0]); - outptr += outstrides[1]; - inptr += 1; - } - } - else - memmove(outptr, tmpmem, M*N*sizeof(__complex__ float)); - - free(tmpmem); - return 0; -} -#endif - - - - diff --git a/scipy-0.10.1/scipy/signal/D_bspline_util.c b/scipy-0.10.1/scipy/signal/D_bspline_util.c deleted file mode 100644 index e07c395576..0000000000 --- a/scipy-0.10.1/scipy/signal/D_bspline_util.c +++ /dev/null @@ -1,646 +0,0 @@ -#include "Python.h" -#include -#include -#include -#include -#include -#define NO_IMPORT_ARRAY -#include "numpy/arrayobject.h" - -void compute_root_from_lambda(double, double *, double *); - -#ifndef M_PI -#define M_PI 3.14159265358979323846 /* pi */ -#endif - -#define CONJ(a) ((a)) -#define ABSQ(a) ( (a*CONJ(a))) - -void D_IIR_order1(double,double,double*,double*,int,int,int); -void D_IIR_order2(double,double,double,double*,double*,int,int,int); -void D_IIR_order2_cascade(double,double,double,double,double*,double*,int,int,int); -int D_IIR_forback1(double,double,double*,double*,int,int,int,double); -void D_FIR_mirror_symmetric(double*,double*,int,double*,int,int,int); -int D_separable_2Dconvolve_mirror(double*,double*,int,int,double*,double*,int,int,npy_intp*,npy_intp*); -int D_IIR_forback2(double,double,double*,double*,int,int,int,double); -int D_cubic_spline2D(double*,double*,int,int,double,npy_intp*,npy_intp*,double); -int D_quadratic_spline2D(double*,double*,int,int,double,npy_intp*,npy_intp*,double); - -/* Implement the following difference equation */ -/* y[n] = a1 * x[n] + a2 * y[n-1] */ -/* with a given starting value loaded into the array */ - -void -D_IIR_order1 (a1, a2, x, y, N, stridex, stridey) - double a1; - double a2; - double *x; - double *y; - int N, stridex, stridey; -{ - double *yvec = y+stridey; - double *xvec = x+stridex; - int n; - - for (n=1; n < N; n++) { - *yvec = *xvec * a1 + *(yvec-stridey) * a2; - yvec += stridey; - xvec += stridex; - } -} - - -/* Implement the following difference equation */ -/* y[n] = a1 * x[n] + a2 * y[n-1] + a3 * y[n-2] */ -/* with two starting values loaded into the array */ -void -D_IIR_order2 (a1, a2, a3, x, y, N, stridex, stridey) - double a1; - double a2; - double a3; - double *x; - double *y; - int N, stridex, stridey; -{ - double *yvec = y+2*stridey; - double *xvec = x+2*stridex; - int n; - - for (n=2; n < N; n++) { - *yvec = *xvec * a1 + *(yvec-stridey) * a2 + *(yvec-2*stridey) * a3; - yvec += stridey; - xvec += stridex; - } -} - -/* Implement a second order IIR difference equation using a cascade - of first order sections. Suppose the transfer function is - cs - H(z) = ------------------- - (1-z1/z) ( 1-z2/z) - - then the following pair is implemented with one starting value loaded in - the output array and the starting value for the intermediate array - passed in as yp0. - - y1[n] = x[n] + z1 y1[n-1] - yp[n] = cs y1[n] + z2 yp[n-1] - -*/ - -void -D_IIR_order2_cascade (cs, z1, z2, y1_0, x, yp, N, stridex, stridey) - double cs; - double z1; - double z2; - double y1_0; - double *x; - double *yp; - int N, stridex, stridey; -{ - double *yvec = yp+stridey; - double *xvec = x+stridex; - int n; - - for (n=1; n < N; n++) { - y1_0 = *xvec + y1_0 * z1; - *yvec = cs * y1_0 + *(yvec-stridey) * z2; - yvec += stridey; - xvec += stridex; - } -} - - -/* Implement a smoothing IIR filter with mirror-symmetric boundary conditions - using a cascade of first-order sections. The second section uses a - reversed sequence. This implements the following transfer function: - c0 - H(z) = --------------------------- - (1-z1/z) (1 - z1 z) - - with the following difference equations: - - yp[n] = x[n] + z1 yp[n-1] - with starting condition: - yp[0] = x[0] + Sum(z1^(k+1) x[k],k=0..Infinity) - - and - - y[n] = z1 y[n+1] + c0 yp[n] - with starting condition: - y[N-1] = z1 / (z1-1) yp[N-1] - - The resulting signal will have mirror symmetric boundary conditions as well. - - If memory could not be allocated for the temporary vector yp, the - function returns -1 otherwise it returns 0. - - z1 should be less than 1; - -*/ - -int -D_IIR_forback1 (c0, z1, x, y, N, stridex, stridey, precision) - double c0; - double z1; - double *x; - double *y; - int N, stridex, stridey; - double precision; -{ - double *yp = NULL; - double *xptr = x; - double yp0; - double powz1; - double diff; - double err; - int k; - - if (ABSQ(z1) >= 1.0) return -2; /* z1 not less than 1 */ - - /* Initialize memory for loop */ - if ((yp = malloc(N*sizeof(double)))==NULL) return -1; - - /* Fix starting value assuming mirror-symmetric boundary conditions. */ - yp0 = x[0]; - powz1 = 1.0; - k = 0; - precision *= precision; - do { - yp[0] = yp0; - powz1 *= z1; - yp0 += powz1 * (*xptr); - diff = powz1; - err = ABSQ(diff); - xptr += stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) return -3; /* sum did not converge */ - yp[0] = yp0; - - D_IIR_order1(1.0, z1, x, yp, N, stridex, 1); - - *(y + (N-1)*stridey) = -c0 / (z1 - 1.0) * yp[N-1]; - - D_IIR_order1(c0, z1, yp+N-1, y+(N-1)*stridey, N, -1, -stridey); - - free(yp); - return 0; -} - - -/* h must be odd length */ -/* strides in units of sizeof(double) bytes */ -void -D_FIR_mirror_symmetric (in, out, N, h, Nh, instride, outstride) - double *in; - double *out; - int N, Nh; - double *h; - int instride, outstride; -{ - int n, k; - int Nhdiv2 = Nh >> 1; - double *outptr; - double *inptr; - double *hptr; - - /* first part boundary conditions */ - outptr = out; - for (n=0; n < Nhdiv2; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (n+Nhdiv2)*instride; - for (k=-Nhdiv2; k <= n; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - inptr += instride; - for (k=n+1; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr += instride; - } - outptr += outstride; - } - - /* middle section */ - outptr = out + Nhdiv2*outstride; - for (n=Nhdiv2; n < N-Nhdiv2; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (n+Nhdiv2)*instride; - for (k=-Nhdiv2; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - outptr += outstride; - } - - /* end boundary conditions */ - outptr = out + (N-Nhdiv2)*outstride; - for (n=N-Nhdiv2; n < N; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (2*N-1-n-Nhdiv2)*instride; - for (k=-Nhdiv2; k <= n-N; k++) { - *outptr += *hptr++ * *inptr; - inptr += instride; - } - inptr -= instride; - for (k=n+1-N; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - outptr += outstride; - } - -} - -int -D_separable_2Dconvolve_mirror(in, out, M, N, hr, hc, Nhr, - Nhc, instrides, outstrides) - double *in; - double *out; - int M, N; - double *hr, *hc; - int Nhr, Nhc; - npy_intp *instrides, *outstrides; -{ - int m, n; - double *tmpmem; - double *inptr=0, *outptr=0; - - tmpmem = malloc(M*N*sizeof(double)); - if (tmpmem == NULL) return -1; - - if (Nhr > 0) { - /* filter across rows */ - inptr = in; - outptr = tmpmem; - for (m = 0; m < M; m++) { - D_FIR_mirror_symmetric (inptr, outptr, N, hr, Nhr, instrides[1], 1); - inptr += instrides[0]; - outptr += N; - } - } - else - memmove(tmpmem, inptr, M*N*sizeof(double)); - - if (Nhc > 0) { - /* filter down columns */ - inptr = tmpmem; - outptr = out; - for (n = 0; n < N; n++) { - D_FIR_mirror_symmetric (inptr, outptr, M, hc, Nhc, N, outstrides[0]); - outptr += outstrides[1]; - inptr += 1; - } - } - else - memmove(outptr, tmpmem, M*N*sizeof(double)); - - free(tmpmem); - return 0; -} - - -static double D_hc(int,double,double,double); -static double D_hs(int,double,double,double); - -double -D_hc(k, cs, r, omega) - int k; - double cs; - double r, omega; -{ - if (k < 0) return 0.0; - if (omega == 0.0) - return cs * pow(r, (double )k) * (k+1); - else if (omega == M_PI) - return cs * pow(r, (double )k) * (k+1) * (1 - 2*(k % 2)); - return cs * pow(r, (double) k) * sin(omega * (k+1)) / sin(omega); -} - -double -D_hs(k, cs, rsq, omega) - int k; - double cs; - double rsq, omega; -{ - double cssq; - double c0; - double gamma, rsupk; - - cssq = cs * cs; - k = abs(k); - rsupk = pow(rsq, ((double ) k) / 2.0); - if (omega == 0.0) { - c0 = (1+rsq)/ ((1-rsq)*(1-rsq)*(1-rsq)) * cssq; - gamma = (1-rsq) / (1+rsq); - return c0 * rsupk * (1 + gamma * k); - } - if (omega == M_PI) { - c0 = (1+rsq)/ ((1-rsq)*(1-rsq)*(1-rsq)) * cssq; - gamma = (1-rsq) / (1+rsq) * (1 - 2 * (k % 2)); - return c0 * rsupk * (1 + gamma * k); - } - c0 = cssq * (1.0+rsq)/(1.0-rsq) / (1-2*rsq*cos(2*omega) + rsq*rsq); - gamma = (1.0 - rsq)/ (1.0+rsq) / tan(omega); - return c0 * rsupk * (cos(omega*k) + gamma * sin(omega * k)); -} - - -/* Implement a smoothing IIR filter with mirror-symmetric boundary conditions - using a cascade of second-order sections. The second section uses a - reversed sequence. This implements the following transfer function: - - cs^2 - H(z) = -------------------------------------- - (1 - a2/z - a3/z^2) (1 - a2 z -a3 z^2 ) - - where a2 = (2 r cos omega) - a3 = - r^2 - cs = 1 - 2 r cos omega + r^2 - - with the following difference equations: - - yp[n] = cs*x[n] - b1 yp[n-1] - b2 yp[n-2] - with starting conditions: - yp[0] = hc[0] x[0] + Sum(hc[k+1]*x[k],k=0..Infinity) - yp[1] = hc[0] x[1] + hc[1] x[0] + Sum(hc[k+2] x[k], k=0..Infinity) - - and - - y[n] = cs*yp[n] - b1 y[n+1] -b2 y[n+2] - with starting conditions: - y[N-1] = Sum((hs[k] + hs[k+1])x[N-1-k],k=0..Infinity) - y[N-2] = Sum((hs[k-1] + hs[k+2])x[N-1-k],k=0..Infinity) - - The resulting signal will have mirror symmetric boundary conditions as well. - - If memory could not be allocated for the temporary vector yp, the - function returns -1 otherwise it returns 0. - - z1 should be less than 1; - -*/ - -int -D_IIR_forback2 (r, omega, x, y, N, stridex, stridey, precision) - double r,omega; - double *x; - double *y; - int N, stridex, stridey; - double precision; -{ - double cs; - double *yp = NULL; - double *yptr; - double *xptr; - double yp0; - double yp1; - double rsq; - double diff; - double err; - double a2, a3; - int k; - - if (r >= 1.0) return -2; /* z1 not less than 1 */ - - /* Initialize memory for loop */ - if ((yp = malloc(N*sizeof(double)))==NULL) return -1; - - rsq = r * r; - a2 = 2 * r * cos(omega); - a3 = -rsq; - cs = 1 - 2 * r * cos(omega) + rsq; - - /* Fix starting values assuming mirror-symmetric boundary conditions. */ - yp0 = D_hc(0, cs, r, omega) * x[0]; - k = 0; - precision *= precision; - xptr = x; - do { - yp[0] = yp0; - diff = D_hc(k+1, cs, r, omega); - yp0 += diff * (*xptr); - err = diff * diff; - xptr += stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) {free(yp); return -3;} /* sum did not converge */ - yp[0] = yp0; - - yp1 = D_hc(0, cs, r, omega) * (*(x+stridex)); - yp1 += D_hc(1, cs, r, omega) * x[0]; - k = 0; - xptr = x; - do { - yp[1] = yp1; - diff = D_hc(k+2, cs, r, omega); - yp1 += diff * (*xptr); - err = diff * diff; - xptr += stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) {free(yp); return -3;} /* sum did not converge */ - yp[1] = yp1; - - D_IIR_order2(cs, a2, a3, x, yp, N, stridex, 1); - - /* Fix starting values assuming mirror-symmetric boundary conditions. */ - yp0 = 0.0; - k = 0; - yptr = y + (N-1)*stridey; - xptr = x + (N-1)*stridex; - do { - *yptr = yp0; - diff = (D_hs(k, cs, rsq, omega) + D_hs(k+1, cs, rsq, omega)); - yp0 += diff * (*xptr); - err = diff * diff; - xptr -= stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) {free(yp); return -3;} /* sum did not converge */ - *yptr = yp0; - - yp1 = 0.0; - k = 0; - yptr -= stridey; /* Initialize in next-to-last slot in output array */ - xptr = x + (N-1)*stridex; - do { - *yptr = yp1; - diff = (D_hs(k-1, cs, rsq, omega) + D_hs(k+2, cs, rsq, omega)); - yp1 += diff * (*xptr); - err = diff * diff; - xptr -= stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) {free(yp); return -3;} /* sum did not converge */ - *yptr = yp1; - - D_IIR_order2(cs, a2, a3, yp+N-1, yptr+stridey, N, -1, -stridey); - - free(yp); - return 0; -} - -/* Find the cubic spline coefficients of an image - image is M rows by N columns stored rowise in memory (vary column number - first). It will be replaced with the spline coefficients. - lambda is a smoothing parameter (lambda = 100 approximately corresponds - to a cutoff frequency of 0.1*(sample freq)) - strides is an integer array [rowstride, colstride] - telling how much memory in units of sizeof(double) bytes to skip - to get to the next element. -*/ - -/* to get the (smoothed) image back mirror-symmetric convolve with a length - three separable FIR filter [1.0, 4.0, 1.0]/ 6.0 -*/ - -int -D_cubic_spline2D(image, coeffs, M, N, lambda, strides, cstrides, precision) - double *image; - double *coeffs; - int M, N; - double lambda; - npy_intp *strides, *cstrides; - double precision; -{ - double r, omega; - double *inptr; - double *coptr; - double *tmpmem; - double *tptr; - int m,n, retval=0; - - tmpmem = malloc(N*M*sizeof(double)); - if (tmpmem == NULL) return -1; - - if (lambda <= 1.0 / 144.0) { - /* normal cubic spline */ - r = -2 + sqrt(3.0); - - /* Loop over rows */ - inptr = image; - tptr = tmpmem; - for (m = 0; m < M; m++) { - retval = D_IIR_forback1 (-r*6.0, r, inptr, tptr, N, strides[1], 1, precision); - if (retval < 0) break; - inptr += strides[0]; - tptr += N; - } - - if (retval >=0) { - /* Loop over columns */ - tptr = tmpmem; - coptr = coeffs; - for (n = 0; n < N; n++) { - retval = D_IIR_forback1 (-r*6.0, r, tptr, coptr, M, N, cstrides[0], precision); - if (retval < 0) break; - coptr += cstrides[1]; - tptr += 1; - } - } - free(tmpmem); - return retval; - } - - /* Smoothing spline */ - - /* Compute r and omega from lambda */ - compute_root_from_lambda(lambda, &r, &omega); - - /* Loop over rows */ - inptr = image; - tptr = tmpmem; - for (m = 0; m < M; m++) { - retval = D_IIR_forback2 (r, omega, inptr, tptr, N, strides[1], - 1, precision); - if (retval < 0) break; - inptr += strides[0]; - tptr += N; - } - /* Loop over columns */ - tptr = tmpmem; - coptr = coeffs; - for (n = 0; n < N; n++) { - retval = D_IIR_forback2 (r, omega, tptr, coptr, M, N, - cstrides[0], precision); - if (retval < 0) break; - coptr += cstrides[1]; - tptr += 1; - } - - free(tmpmem); - return retval; -} - -/* Find the quadratic spline coefficients of an image - image is M rows by N columns stored rowise in memory (vary column number - first). It will be replaced with the spline coefficients. - lambda is a smoothing parameter (lambda = 100 approximately corresponds - to a cutoff frequency of 0.1*(sample freq)) - must be zero for now. - strides is an integer array [rowstride, colstride] - telling how much memory in units of sizeof(double) bytes to skip - to get to the next element. -*/ - -/* to get the (smoothed) image back mirror-symmetric convolve with a length - three separable FIR filter [1.0, 6.0, 1.0]/ 8.0 -*/ - -int -D_quadratic_spline2D(image, coeffs, M, N, lambda, strides, cstrides, precision) - double *image; - double *coeffs; - int M, N; - double lambda; - npy_intp *strides, *cstrides; - double precision; -{ - double r; - double *inptr; - double *coptr; - double *tmpmem; - double *tptr; - int m,n, retval=0; - - tmpmem = malloc(N*M*sizeof(double)); - if (tmpmem == NULL) return -1; - - if (lambda > 0) return -2; - /* normal quadratic spline */ - r = -3 + 2*sqrt(2.0); - - /* Loop over rows */ - inptr = image; - tptr = tmpmem; - for (m = 0; m < M; m++) { - retval = D_IIR_forback1 (-r*8.0, r, inptr, tptr, N, strides[1], 1, precision); - if (retval < 0) break; - inptr += strides[0]; - tptr += N; - } - - if (retval >=0) { - /* Loop over columns */ - tptr = tmpmem; - coptr = coeffs; - for (n = 0; n < N; n++) { - retval = D_IIR_forback1 (-r*8.0, r, tptr, coptr, M, N, cstrides[0], precision); - if (retval < 0) break; - coptr += cstrides[1]; - tptr += 1; - } - } - free(tmpmem); - return retval; -} - - - - - diff --git a/scipy-0.10.1/scipy/signal/SConscript b/scipy-0.10.1/scipy/signal/SConscript deleted file mode 100644 index a3600765af..0000000000 --- a/scipy-0.10.1/scipy/signal/SConscript +++ /dev/null @@ -1,21 +0,0 @@ -# Last Change: Mon Apr 20 04:00 PM 2009 J -# vim:syntax=python -from os.path import join - -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -src = env.FromCTemplate("lfilter.c.src") -src += env.FromCTemplate("correlate_nd.c.src") -env.NumpyPythonExtension('sigtools', - source = src + ['sigtoolsmodule.c',\ - 'firfilter.c', \ - 'medianfilter.c']) - -env.NumpyPythonExtension('spectral', source='spectral.c') - -env.NumpyPythonExtension('spline', - source = ['splinemodule.c', 'S_bspline_util.c', - 'D_bspline_util.c', 'C_bspline_util.c', - 'Z_bspline_util.c','bspline_util.c']) diff --git a/scipy-0.10.1/scipy/signal/SConstruct b/scipy-0.10.1/scipy/signal/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/signal/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/signal/S_bspline_util.c b/scipy-0.10.1/scipy/signal/S_bspline_util.c deleted file mode 100644 index a6c7f8ea2b..0000000000 --- a/scipy-0.10.1/scipy/signal/S_bspline_util.c +++ /dev/null @@ -1,587 +0,0 @@ -#include "Python.h" -#include -#include -#include -#include -#include -#define NO_IMPORT_ARRAY -#include "numpy/noprefix.h" - -void compute_root_from_lambda(double, double *, double *); - -#ifndef M_PI -#define M_PI 3.14159265358979323846 /* pi */ -#endif - -#define CONJ(a) ((a)) -#define ABSQ(a) ( (a*CONJ(a))) - -void S_IIR_order1(float,float,float*,float*,int,int,int); -void S_IIR_order2(float,float,float,float*,float*,int,int,int); -void S_IIR_order2_cascade(float,float,float,float,float*,float*,int,int,int); -int S_IIR_forback1(float,float,float*,float*,int,int,int,float); -void S_FIR_mirror_symmetric(float*,float*,int,float*,int,int,int); -int S_separable_2Dconvolve_mirror(float*,float*,int,int,float*,float*,int,int,intp*,intp*); -int S_IIR_forback2(double,double,float*,float*,int,int,int,float); -int S_cubic_spline2D(float*,float*,int,int,double,intp*,intp*,float); -int S_quadratic_spline2D(float*,float*,int,int,double,intp*,intp*,float); - -/* Implement the following difference equation */ -/* y[n] = a1 * x[n] + a2 * y[n-1] */ -/* with a given starting value loaded into the array */ - -void -S_IIR_order1 (float a1, float a2, float *x, float *y, - int N, int stridex, int stridey) { - float *yvec = y+stridey; - float *xvec = x+stridex; - int n; - - for (n=1; n < N; n++) { - *yvec = *xvec * a1 + *(yvec-stridey) * a2; - yvec += stridey; - xvec += stridex; - } -} - - -/* Implement the following difference equation */ -/* y[n] = a1 * x[n] + a2 * y[n-1] + a3 * y[n-2] */ -/* with two starting values loaded into the array */ -void -S_IIR_order2 (float a1, float a2, float a3, float *x, float *y, - int N, int stridex, int stridey) { - float *yvec = y+2*stridey; - float *xvec = x+2*stridex; - int n; - - for (n=2; n < N; n++) { - *yvec = *xvec * a1 + *(yvec-stridey) * a2 + *(yvec-2*stridey) * a3; - yvec += stridey; - xvec += stridex; - } -} - -/* Implement a second order IIR difference equation using a cascade - of first order sections. Suppose the transfer function is - cs - H(z) = ------------------- - (1-z1/z) ( 1-z2/z) - - then the following pair is implemented with one starting value loaded in - the output array and the starting value for the intermediate array - passed in as yp0. - - y1[n] = x[n] + z1 y1[n-1] - yp[n] = cs y1[n] + z2 yp[n-1] - -*/ - -void -S_IIR_order2_cascade (float cs, float z1, float z2, float y1_0, - float *x, float *yp, int N, int stridex, int stridey) { - float *yvec = yp+stridey; - float *xvec = x+stridex; - int n; - - for (n=1; n < N; n++) { - y1_0 = *xvec + y1_0 * z1; - *yvec = cs * y1_0 + *(yvec-stridey) * z2; - yvec += stridey; - xvec += stridex; - } -} - - -/* Implement a smoothing IIR filter with mirror-symmetric boundary conditions - using a cascade of first-order sections. The second section uses a - reversed sequence. This implements the following transfer function: - c0 - H(z) = --------------------------- - (1-z1/z) (1 - z1 z) - - with the following difference equations: - - yp[n] = x[n] + z1 yp[n-1] - with starting condition: - yp[0] = x[0] + Sum(z1^(k+1) x[k],k=0..Infinity) - - and - - y[n] = z1 y[n+1] + c0 yp[n] - with starting condition: - y[N-1] = z1 / (z1-1) yp[N-1] - - The resulting signal will have mirror symmetric boundary conditions as well. - - If memory could not be allocated for the temporary vector yp, the - function returns -1 otherwise it returns 0. - - z1 should be less than 1; - -*/ - -int -S_IIR_forback1 (float c0, float z1, float *x, float *y, - int N, int stridex, int stridey, float precision) { - float *yp = NULL; - float *xptr = x; - float yp0; - float powz1; - float diff; - float err; - int k; - - if (ABSQ(z1) >= 1.0) return -2; /* z1 not less than 1 */ - - /* Initialize memory for loop */ - if ((yp = malloc(N*sizeof(float)))==NULL) return -1; - - /* Fix starting value assuming mirror-symmetric boundary conditions. */ - yp0 = x[0]; - powz1 = 1.0; - k = 0; - precision *= precision; - do { - yp[0] = yp0; - powz1 *= z1; - yp0 += powz1 * (*xptr); - diff = powz1; - err = ABSQ(diff); - xptr += stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) return -3; /* sum did not converge */ - yp[0] = yp0; - - S_IIR_order1(1.0, z1, x, yp, N, stridex, 1); - - *(y + (N-1)*stridey) = -c0 / (z1 - 1.0) * yp[N-1]; - - S_IIR_order1(c0, z1, yp+N-1, y+(N-1)*stridey, N, -1, -stridey); - - free(yp); - return 0; -} - - -/* h must be odd length */ -/* strides in units of sizeof(float) bytes */ - -void -S_FIR_mirror_symmetric (float *in, float *out, int N, float *h, int Nh, - int instride, int outstride) { - int n, k; - int Nhdiv2 = Nh >> 1; - float *outptr; - float *inptr; - float *hptr; - - /* first part boundary conditions */ - outptr = out; - for (n=0; n < Nhdiv2; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (n+Nhdiv2)*instride; - for (k=-Nhdiv2; k <= n; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - inptr += instride; - for (k=n+1; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr += instride; - } - outptr += outstride; - } - - /* middle section */ - outptr = out + Nhdiv2*outstride; - for (n=Nhdiv2; n < N-Nhdiv2; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (n+Nhdiv2)*instride; - for (k=-Nhdiv2; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - outptr += outstride; - } - - /* end boundary conditions */ - outptr = out + (N-Nhdiv2)*outstride; - for (n=N-Nhdiv2; n < N; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (2*N-1-n-Nhdiv2)*instride; - for (k=-Nhdiv2; k <= n-N; k++) { - *outptr += *hptr++ * *inptr; - inptr += instride; - } - inptr -= instride; - for (k=n+1-N; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - outptr += outstride; - } - -} - -int -S_separable_2Dconvolve_mirror(float *in, float *out, int M, int N, - float *hr, float *hc, int Nhr, - int Nhc, intp *instrides, intp *outstrides) { - int m, n; - float *tmpmem; - float *inptr=NULL, *outptr=NULL; - - tmpmem = malloc(M*N*sizeof(float)); - if (tmpmem == NULL) return -1; - - if (Nhr > 0) { - /* filter across rows */ - inptr = in; - outptr = tmpmem; - for (m = 0; m < M; m++) { - S_FIR_mirror_symmetric (inptr, outptr, N, hr, Nhr, instrides[1], 1); - inptr += instrides[0]; - outptr += N; - } - } - else - memmove(tmpmem, inptr, M*N*sizeof(float)); - - if (Nhc > 0) { - /* filter down columns */ - inptr = tmpmem; - outptr = out; - for (n = 0; n < N; n++) { - S_FIR_mirror_symmetric (inptr, outptr, M, hc, Nhc, N, outstrides[0]); - outptr += outstrides[1]; - inptr += 1; - } - } - else - memmove(outptr, tmpmem, M*N*sizeof(float)); - - free(tmpmem); - return 0; -} - - -static float S_hc(int,float,double,double); -static float S_hs(int,float,double,double); - -float -S_hc(int k, float cs, double r, double omega) { - if (k < 0) return 0.0; - if (omega == 0.0) - return cs * pow(r, (double )k) * (k+1); - else if (omega == M_PI) - return cs * pow(r, (double )k) * (k+1) * (1 - 2*(k % 2)); - return cs * pow(r, (double) k) * sin(omega * (k+1)) / sin(omega); -} - -float -S_hs(int k, float cs, double rsq, double omega) { - float cssq; - float c0; - double gamma, rsupk; - - cssq = cs * cs; - k = abs(k); - rsupk = pow(rsq, ((double ) k) / 2.0); - if (omega == 0.0) { - c0 = (1+rsq)/ ((1-rsq)*(1-rsq)*(1-rsq)) * cssq; - gamma = (1-rsq) / (1+rsq); - return c0 * rsupk * (1 + gamma * k); - } - if (omega == M_PI) { - c0 = (1+rsq)/ ((1-rsq)*(1-rsq)*(1-rsq)) * cssq; - gamma = (1-rsq) / (1+rsq) * (1 - 2 * (k % 2)); - return c0 * rsupk * (1 + gamma * k); - } - c0 = cssq * (1.0+rsq)/(1.0-rsq) / (1-2*rsq*cos(2*omega) + rsq*rsq); - gamma = (1.0 - rsq)/ (1.0+rsq) / tan(omega); - return c0 * rsupk * (cos(omega*k) + gamma * sin(omega * k)); -} - - -/* Implement a smoothing IIR filter with mirror-symmetric boundary conditions - using a cascade of second-order sections. The second section uses a - reversed sequence. This implements the following transfer function: - - cs^2 - H(z) = -------------------------------------- - (1 - a2/z - a3/z^2) (1 - a2 z -a3 z^2 ) - - where a2 = (2 r cos omega) - a3 = - r^2 - cs = 1 - 2 r cos omega + r^2 - - with the following difference equations: - - yp[n] = cs*x[n] - b1 yp[n-1] - b2 yp[n-2] - with starting conditions: - yp[0] = hc[0] x[0] + Sum(hc[k+1]*x[k],k=0..Infinity) - yp[1] = hc[0] x[1] + hc[1] x[0] + Sum(hc[k+2] x[k], k=0..Infinity) - - and - - y[n] = cs*yp[n] - b1 y[n+1] -b2 y[n+2] - with starting conditions: - y[N-1] = Sum((hs[k] + hs[k+1])x[N-1-k],k=0..Infinity) - y[N-2] = Sum((hs[k-1] + hs[k+2])x[N-1-k],k=0..Infinity) - - The resulting signal will have mirror symmetric boundary conditions as well. - - If memory could not be allocated for the temporary vector yp, the - function returns -1 otherwise it returns 0. - - z1 should be less than 1; - -*/ - -int -S_IIR_forback2 (double r, double omega, float *x, float *y, - int N, int stridex, int stridey, float precision) { - float cs; - float *yp = NULL; - float *yptr; - float *xptr; - float yp0; - float yp1; - double rsq; - float diff; - float err; - float a2, a3; - int k; - - if (r >= 1.0) return -2; /* z1 not less than 1 */ - - /* Initialize memory for loop */ - if ((yp = malloc(N*sizeof(float)))==NULL) return -1; - - rsq = r * r; - a2 = 2 * r * cos(omega); - a3 = -rsq; - cs = 1 - 2 * r * cos(omega) + rsq; - - /* Fix starting values assuming mirror-symmetric boundary conditions. */ - yp0 = S_hc(0, cs, r, omega) * x[0]; - k = 0; - precision *= precision; - xptr = x; - do { - yp[0] = yp0; - diff = S_hc(k+1, cs, r, omega); - yp0 += diff * (*xptr); - err = diff * diff; - xptr += stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) {free(yp); return -3;} /* sum did not converge */ - yp[0] = yp0; - - yp1 = S_hc(0, cs, r, omega) * (*(x+stridex)); - yp1 += S_hc(1, cs, r, omega) * x[0]; - k = 0; - xptr = x; - do { - yp[1] = yp1; - diff = S_hc(k+2, cs, r, omega); - yp1 += diff * (*xptr); - err = diff * diff; - xptr += stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) {free(yp); return -3;} /* sum did not converge */ - yp[1] = yp1; - - S_IIR_order2(cs, a2, a3, x, yp, N, stridex, 1); - - /* Fix starting values assuming mirror-symmetric boundary conditions. */ - yp0 = 0.0; - k = 0; - yptr = y + (N-1)*stridey; - xptr = x + (N-1)*stridex; - do { - *yptr = yp0; - diff = (S_hs(k, cs, rsq, omega) + S_hs(k+1, cs, rsq, omega)); - yp0 += diff * (*xptr); - err = diff * diff; - xptr -= stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) {free(yp); return -3;} /* sum did not converge */ - *yptr = yp0; - - yp1 = 0.0; - k = 0; - yptr -= stridey; /* Initialize in next-to-last slot in output array */ - xptr = x + (N-1)*stridex; - do { - *yptr = yp1; - diff = (S_hs(k-1, cs, rsq, omega) + S_hs(k+2, cs, rsq, omega)); - yp1 += diff * (*xptr); - err = diff * diff; - xptr -= stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) {free(yp); return -3;} /* sum did not converge */ - *yptr = yp1; - - S_IIR_order2(cs, a2, a3, yp+N-1, yptr+stridey, N, -1, -stridey); - - free(yp); - return 0; -} - -/* Find the cubic spline coefficients of an image - image is M rows by N columns stored rowise in memory (vary column number - first). It will be replaced with the spline coefficients. - lambda is a smoothing parameter (lambda = 100 approximately corresponds - to a cutoff frequency of 0.1*(sample freq)) - strides is an integer array [rowstride, colstride] - telling how much memory in units of sizeof(float) bytes to skip - to get to the next element. -*/ - -/* to get the (smoothed) image back mirror-symmetric convolve with a length - three separable FIR filter [1.0, 4.0, 1.0]/ 6.0 -*/ - -int -S_cubic_spline2D(float *image, float *coeffs, int M, int N, double lambda, - intp *strides, intp *cstrides, float precision) { - double r, omega; - float *inptr; - float *coptr; - float *tmpmem; - float *tptr; - int m,n, retval=0; - - tmpmem = malloc(N*M*sizeof(float)); - if (tmpmem == NULL) return -1; - - if (lambda <= 1.0 / 144.0) { - /* normal cubic spline */ - r = -2 + sqrt(3.0); - - /* Loop over rows */ - inptr = image; - tptr = tmpmem; - for (m = 0; m < M; m++) { - retval = S_IIR_forback1 (-r*6.0, r, inptr, tptr, N, strides[1], 1, precision); - if (retval < 0) break; - inptr += strides[0]; - tptr += N; - } - - if (retval >=0) { - /* Loop over columns */ - tptr = tmpmem; - coptr = coeffs; - for (n = 0; n < N; n++) { - retval = S_IIR_forback1 (-r*6.0, r, tptr, coptr, M, N, cstrides[0], precision); - if (retval < 0) break; - coptr += cstrides[1]; - tptr += 1; - } - } - free(tmpmem); - return retval; - } - - /* Smoothing spline */ - - /* Compute r and omega from lambda */ - compute_root_from_lambda(lambda, &r, &omega); - - /* Loop over rows */ - inptr = image; - tptr = tmpmem; - for (m = 0; m < M; m++) { - retval = S_IIR_forback2 (r, omega, inptr, tptr, N, strides[1], - 1, precision); - if (retval < 0) break; - inptr += strides[0]; - tptr += N; - } - /* Loop over columns */ - tptr = tmpmem; - coptr = coeffs; - for (n = 0; n < N; n++) { - retval = S_IIR_forback2 (r, omega, tptr, coptr, M, N, - cstrides[0], precision); - if (retval < 0) break; - coptr += cstrides[1]; - tptr += 1; - } - - free(tmpmem); - return retval; -} - -/* Find the quadratic spline coefficients of an image - image is M rows by N columns stored rowise in memory (vary column number - first). It will be replaced with the spline coefficients. - lambda is a smoothing parameter (lambda = 100 approximately corresponds - to a cutoff frequency of 0.1*(sample freq)) - must be zero for now. - strides is an integer array [rowstride, colstride] - telling how much memory in units of sizeof(float) bytes to skip - to get to the next element. -*/ - -/* to get the (smoothed) image back mirror-symmetric convolve with a length - three separable FIR filter [1.0, 6.0, 1.0]/ 8.0 -*/ - -int -S_quadratic_spline2D(float *image, float *coeffs, int M, int N, double lambda, - intp *strides, intp *cstrides, float precision) { - double r; - float *inptr; - float *coptr; - float *tmpmem; - float *tptr; - int m,n, retval=0; - - tmpmem = malloc(N*M*sizeof(float)); - if (tmpmem == NULL) return -1; - - if (lambda > 0) return -2; - /* normal quadratic spline */ - r = -3 + 2*sqrt(2.0); - - /* Loop over rows */ - inptr = image; - tptr = tmpmem; - for (m = 0; m < M; m++) { - retval = S_IIR_forback1 (-r*8.0, r, inptr, tptr, N, strides[1], 1, precision); - if (retval < 0) break; - inptr += strides[0]; - tptr += N; - } - - if (retval >=0) { - /* Loop over columns */ - tptr = tmpmem; - coptr = coeffs; - for (n = 0; n < N; n++) { - retval = S_IIR_forback1 (-r*8.0, r, tptr, coptr, M, N, cstrides[0], precision); - if (retval < 0) break; - coptr += cstrides[1]; - tptr += 1; - } - } - free(tmpmem); - return retval; -} - - - - - diff --git a/scipy-0.10.1/scipy/signal/Z_bspline_util.c b/scipy-0.10.1/scipy/signal/Z_bspline_util.c deleted file mode 100644 index 95a4af4eb0..0000000000 --- a/scipy-0.10.1/scipy/signal/Z_bspline_util.c +++ /dev/null @@ -1,301 +0,0 @@ -#include "Python.h" -#include -#include -#include -#include -#include -#define NO_IMPORT_ARRAY -#include "numpy/arrayobject.h" - -void compute_root_from_lambda(double, double *, double *); - -#define CONJ(a) (~(a)) -#define ABSQ(a) (__real__ (a*CONJ(a))) -#ifdef __GNUC__ - -void Z_IIR_order1 (__complex__ double,__complex__ double,__complex__ double*,__complex__ double*,int,int,int); -void Z_IIR_order2 (__complex__ double,__complex__ double,__complex__ double,__complex__ double*,__complex__ double*,int,int,int); -void Z_IIR_order2_cascade (__complex__ double,__complex__ double,__complex__ double,__complex__ double,__complex__ double*,__complex__ double*,int,int,int); -int Z_IIR_forback1(__complex__ double,__complex__ double,__complex__ double*,__complex__ double*,int,int,int,double); -void Z_FIR_mirror_symmetric(__complex__ double*,__complex__ double*,int,__complex__ double*,int,int,int); -int Z_separable_2Dconvolve_mirror(__complex__ double*,__complex__ double*,int,int,__complex__ double*,__complex__ double*,int,int,npy_intp*,npy_intp*); - -/* Implement the following difference equation */ -/* y[n] = a1 * x[n] + a2 * y[n-1] */ -/* with a given starting value loaded into the array */ - -void -Z_IIR_order1 (a1, a2, x, y, N, stridex, stridey) - __complex__ double a1; - __complex__ double a2; - __complex__ double *x; - __complex__ double *y; - int N, stridex, stridey; -{ - __complex__ double *yvec = y+stridey; - __complex__ double *xvec = x+stridex; - int n; - - for (n=1; n < N; n++) { - *yvec = *xvec * a1 + *(yvec-stridey) * a2; - yvec += stridey; - xvec += stridex; - } -} - - -/* Implement the following difference equation */ -/* y[n] = a1 * x[n] + a2 * y[n-1] + a3 * y[n-2] */ -/* with two starting values loaded into the array */ - -void -Z_IIR_order2 (a1, a2, a3, x, y, N, stridex, stridey) - __complex__ double a1; - __complex__ double a2; - __complex__ double a3; - __complex__ double *x; - __complex__ double *y; - int N, stridex, stridey; -{ - __complex__ double *yvec = y+2*stridey; - __complex__ double *xvec = x+2*stridex; - int n; - - for (n=2; n < N; n++) { - *yvec = *xvec * a1 + *(yvec-stridey) * a2 + *(yvec-2*stridey) * a3; - yvec += stridey; - xvec += stridex; - } -} - -/* Implement a second order IIR difference equation using a cascade - of first order sections. Suppose the transfer function is - cs - H(z) = ------------------- - (1-z1/z) ( 1-z2/z) - - then the following pair is implemented with one starting value loaded in - the output array and the starting value for the intermediate array - passed in as yp0. - - y1[n] = x[n] + z1 y1[n-1] - yp[n] = cs y1[n] + z2 yp[n-1] - -*/ - -void -Z_IIR_order2_cascade (cs, z1, z2, y1_0, x, yp, N, stridex, stridey) - __complex__ double cs; - __complex__ double z1; - __complex__ double z2; - __complex__ double y1_0; - __complex__ double *x; - __complex__ double *yp; - int N, stridex, stridey; -{ - __complex__ double *yvec = yp+stridey; - __complex__ double *xvec = x+stridex; - int n; - - for (n=1; n < N; n++) { - y1_0 = *xvec + y1_0 * z1; - *yvec = cs * y1_0 + *(yvec-stridey) * z2; - yvec += stridey; - xvec += stridex; - } -} - - -/* Implement a smoothing IIR filter with mirror-symmetric boundary conditions - using a cascade of first-order sections. The second section uses a - reversed sequence. This implements the following transfer function: - c0 - H(z) = --------------------------- - (1-z1/z) (1 - z1 z) - - with the following difference equations: - - yp[n] = x[n] + z1 yp[n-1] - with starting condition: - yp[0] = x[0] + Sum(z1^(k+1) x[k],k=0..Infinity) - - and - - y[n] = z1 y[n+1] + c0 yp[n] - with starting condition: - y[N-1] = z1 / (z1-1) yp[N-1] - - The resulting signal will have mirror symmetric boundary conditions as well. - - If memory could not be allocated for the temporary vector yp, the - function returns -1 otherwise it returns 0. - - z1 should be less than 1; - -*/ - -int -Z_IIR_forback1 (c0, z1, x, y, N, stridex, stridey, precision) - __complex__ double c0; - __complex__ double z1; - __complex__ double *x; - __complex__ double *y; - int N, stridex, stridey; - double precision; -{ - __complex__ double *yp = NULL; - __complex__ double *xptr = x; - __complex__ double yp0; - __complex__ double powz1; - __complex__ double diff; - double err; - int k; - - if (ABSQ(z1) >= 1.0) return -2; /* z1 not less than 1 */ - - /* Initialize memory for loop */ - if ((yp = malloc(N*sizeof(__complex__ double)))==NULL) return -1; - - /* Fix starting value assuming mirror-symmetric boundary conditions. */ - yp0 = x[0]; - powz1 = 1.0; - k = 0; - precision *= precision; - do { - yp[0] = yp0; - powz1 *= z1; - yp0 += powz1 * (*xptr); - diff = powz1; - err = ABSQ(diff); - xptr += stridex; - k++; - } while((err > precision) && (k < N)); - if (k >= N) return -3; /* sum did not converge */ - yp[0] = yp0; - - Z_IIR_order1(1.0, z1, x, yp, N, stridex, 1); - - *(y + (N-1)*stridey) = -c0 / (z1 - 1.0) * yp[N-1]; - - Z_IIR_order1(c0, z1, yp+N-1, y+(N-1)*stridey, N, -1, -stridey); - - free(yp); - return 0; -} - - -/* h must be odd length */ -/* strides in units of sizeof(__complex__ double) bytes */ - -void -Z_FIR_mirror_symmetric (in, out, N, h, Nh, instride, outstride) - __complex__ double *in; - __complex__ double *out; - int N, Nh; - __complex__ double *h; - int instride, outstride; -{ - int n, k; - int Nhdiv2 = Nh >> 1; - __complex__ double *outptr; - __complex__ double *inptr; - __complex__ double *hptr; - - /* first part boundary conditions */ - outptr = out; - for (n=0; n < Nhdiv2; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (n+Nhdiv2)*instride; - for (k=-Nhdiv2; k <= n; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - inptr += instride; - for (k=n+1; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr += instride; - } - outptr += outstride; - } - - /* middle section */ - outptr = out + Nhdiv2*outstride; - for (n=Nhdiv2; n < N-Nhdiv2; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (n+Nhdiv2)*instride; - for (k=-Nhdiv2; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - outptr += outstride; - } - - /* end boundary conditions */ - outptr = out + (N-Nhdiv2)*outstride; - for (n=N-Nhdiv2; n < N; n++) { - *outptr = 0.0; - hptr = h; - inptr = in + (2*N-1-n-Nhdiv2)*instride; - for (k=-Nhdiv2; k <= n-N; k++) { - *outptr += *hptr++ * *inptr; - inptr += instride; - } - inptr -= instride; - for (k=n+1-N; k <= Nhdiv2; k++) { - *outptr += *hptr++ * *inptr; - inptr -= instride; - } - outptr += outstride; - } - -} - -int -Z_separable_2Dconvolve_mirror(in, out, M, N, hr, hc, Nhr, - Nhc, instrides, outstrides) - __complex__ double *in; - __complex__ double *out; - int M, N; - __complex__ double *hr, *hc; - int Nhr, Nhc; - npy_intp *instrides, *outstrides; -{ - int m, n; - __complex__ double *tmpmem; - __complex__ double *inptr=NULL, *outptr=NULL; - - tmpmem = malloc(M*N*sizeof(__complex__ double)); - if (tmpmem == NULL) return -1; - - if (Nhr > 0) { - /* filter across rows */ - inptr = in; - outptr = tmpmem; - for (m = 0; m < M; m++) { - Z_FIR_mirror_symmetric (inptr, outptr, N, hr, Nhr, instrides[1], 1); - inptr += instrides[0]; - outptr += N; - } - } - else - memmove(tmpmem, inptr, M*N*sizeof(__complex__ double)); - - if (Nhc > 0) { - /* filter down columns */ - inptr = tmpmem; - outptr = out; - for (n = 0; n < N; n++) { - Z_FIR_mirror_symmetric (inptr, outptr, M, hc, Nhc, N, outstrides[0]); - outptr += outstrides[1]; - inptr += 1; - } - } - else - memmove(outptr, tmpmem, M*N*sizeof(__complex__ double)); - - free(tmpmem); - return 0; -} -#endif diff --git a/scipy-0.10.1/scipy/signal/__init__.py b/scipy-0.10.1/scipy/signal/__init__.py deleted file mode 100644 index 24ab2c8b55..0000000000 --- a/scipy-0.10.1/scipy/signal/__init__.py +++ /dev/null @@ -1,213 +0,0 @@ -""" -======================================= -Signal processing (:mod:`scipy.signal`) -======================================= - -.. module:: scipy.signal - -Convolution -=========== - -.. autosummary:: - :toctree: generated/ - - convolve -- N-dimensional convolution. - correlate -- N-dimensional correlation. - fftconvolve -- N-dimensional convolution using the FFT. - convolve2d -- 2-dimensional convolution (more options). - correlate2d -- 2-dimensional correlation (more options). - sepfir2d -- Convolve with a 2-D separable FIR filter. - -B-splines -========= - -.. autosummary:: - :toctree: generated/ - - bspline -- B-spline basis function of order n. - gauss_spline -- Gaussian approximation to the B-spline basis function. - cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline. - qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline. - cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline. - qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline. - spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array. - -Filtering -========= - -.. autosummary:: - :toctree: generated/ - - order_filter -- N-dimensional order filter. - medfilt -- N-dimensional median filter. - medfilt2d -- 2-dimensional median filter (faster). - wiener -- N-dimensional wiener filter. - - symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems). - symiirorder2 -- 4th-order IIR filter (cascade of second-order systems). - lfilter -- 1-dimensional FIR and IIR digital linear filtering. - lfiltic -- Construct initial conditions for `lfilter`. - lfilter_zi -- Compute an initial state zi for the lfilter function that - -- corresponds to the steady state of the step response. - filtfilt -- A forward-backward filter. - - deconvolve -- 1-d deconvolution using lfilter. - - hilbert -- Compute the analytic signal of a 1-d signal. - get_window -- Create FIR window. - - decimate -- Downsample a signal. - detrend -- Remove linear and/or constant trends from data. - resample -- Resample using Fourier method. - -Filter design -============= - -.. autosummary:: - :toctree: generated/ - - bilinear -- Digital filter from an analog filter using - -- the bilinear transform. - firwin -- Windowed FIR filter design, with frequency response - -- defined as pass and stop bands. - firwin2 -- Windowed FIR filter design, with arbitrary frequency - -- response. - freqs -- Analog filter frequency response. - freqz -- Digital filter frequency response. - iirdesign -- IIR filter design given bands and gains. - iirfilter -- IIR filter design given order and critical frequencies. - kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given - -- the number of taps and the transition width at - -- discontinuities in the frequency response. - kaiser_beta -- Compute the Kaiser parameter beta, given the desired - -- FIR filter attenuation. - kaiserord -- Design a Kaiser window to limit ripple and width of - -- transition region. - remez -- Optimal FIR filter design. - - unique_roots -- Unique roots and their multiplicities. - residue -- Partial fraction expansion of b(s) / a(s). - residuez -- Partial fraction expansion of b(z) / a(z). - invres -- Inverse partial fraction expansion. - -Matlab-style IIR filter design -============================== - -.. autosummary:: - :toctree: generated/ - - butter -- Butterworth - buttord - cheby1 -- Chebyshev Type I - cheb1ord - cheby2 -- Chebyshev Type II - cheb2ord - ellip -- Elliptic (Cauer) - ellipord - bessel -- Bessel (no order selection available -- try butterod) - -Continuous-Time Linear Systems -============================== - -.. autosummary:: - :toctree: generated/ - - lti -- linear time invariant system object. - lsim -- continuous-time simulation of output to linear system. - lsim2 -- like lsim, but `scipy.integrate.odeint` is used. - impulse -- impulse response of linear, time-invariant (LTI) system. - impulse2 -- like impulse, but `scipy.integrate.odeint` is used. - step -- step response of continous-time LTI system. - step2 -- like step, but `scipy.integrate.odeint` is used. - -Discrete-Time Linear Systems -============================ - dlsim -- simulation of output to a discrete-time linear system. - dimpulse -- impulse response of a discrete-time LTI system. - dstep -- step response of a discrete-time LTI system. - -LTI Representations -=================== - -.. autosummary:: - :toctree: generated/ - - tf2zpk -- transfer function to zero-pole-gain. - zpk2tf -- zero-pole-gain to transfer function. - tf2ss -- transfer function to state-space. - ss2tf -- state-pace to transfer function. - zpk2ss -- zero-pole-gain to state-space. - ss2zpk -- state-space to pole-zero-gain. - cont2discrete -- continuous-time to discrete-time LTI conversion. - -Waveforms -========= - -.. autosummary:: - :toctree: generated/ - - chirp -- Frequency swept cosine signal, with several freq functions. - gausspulse -- Gaussian modulated sinusoid - sawtooth -- Periodic sawtooth - square -- Square wave - sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial - -Window functions -================ - -.. autosummary:: - :toctree: generated/ - - get_window -- Return a window of a given length and type. - barthann -- Bartlett-Hann window - bartlett -- Bartlett window - blackman -- Blackman window - blackmanharris -- Minimum 4-term Blackman-Harris window - bohman -- Bohman window - boxcar -- Boxcar window - chebwin -- Dolph-Chebyshev window - flattop -- Flat top window - gaussian -- Gaussian window - general_gaussian -- Generalized Gaussian window - hamming -- Hamming window - hann -- Hann window - kaiser -- Kaiser window - nuttall -- Nuttall's minimum 4-term Blackman-Harris window - parzen -- Parzen window - slepian -- Slepian window - triang -- Triangular window - -Wavelets -======== - -.. autosummary:: - :toctree: generated/ - - cascade -- compute scaling function and wavelet from coefficients - daub -- return low-pass - morlet -- Complex Morlet wavelet. - qmf -- return quadrature mirror filter from low-pass - -""" - -import sigtools -from waveforms import * - -# The spline module (a C extension) provides: -# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2 -from spline import * - -from bsplines import * -from cont2discrete import * -from dltisys import * -from filter_design import * -from fir_filter_design import * -from ltisys import * -from windows import * -from signaltools import * -from spectral import * -from wavelets import * - -__all__ = filter(lambda s: not s.startswith('_'), dir()) -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/signal/_arraytools.py b/scipy-0.10.1/scipy/signal/_arraytools.py deleted file mode 100644 index a9a34a2b12..0000000000 --- a/scipy-0.10.1/scipy/signal/_arraytools.py +++ /dev/null @@ -1,161 +0,0 @@ -""" -Functions for acting on a axis of an array. -""" - -import numpy as np - - -def axis_slice(a, start=None, stop=None, step=None, axis=-1): - """Take a slice along axis 'axis' from 'a'. - - Parameters - ---------- - a : numpy.ndarray - The array to be sliced. - start, stop, step : int or None - The slice parameters. - axis : int - The axis of `a` to be sliced. - - Examples - -------- - >>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - >>> axis_slice(a, start=0, stop=1, axis=1) - array([[1], - [4], - [7]]) - >>> axis_slice(a, start=1, axis=0) - array([[4, 5, 6], - [7, 8, 9]]) - - Notes - ----- - The keyword arguments start, stop and step are used by calling - slice(start, stop, step). This implies axis_slice() does not - handle its arguments the exacty the same as indexing. To select - a single index k, for example, use - axis_slice(a, start=k, stop=k+1) - In this case, the length of the axis 'axis' in the result will - be 1; the trivial dimension is not removed. (Use numpy.squeeze() - to remove trivial axes.) - """ - a_slice = [slice(None)] * a.ndim - a_slice[axis] = slice(start, stop, step) - b = a[a_slice] - return b - - -def axis_reverse(a, axis=-1): - """Reverse the 1-d slices of `a` along axis `axis`. - - Returns axis_slice(a, step=-1, axis=axis). - """ - return axis_slice(a, step=-1, axis=axis) - - -def odd_ext(x, n, axis=-1): - """Generate a new ndarray by making an odd extension of x along an axis. - - Parameters - ---------- - x : ndarray - The array to be extended. - n : int - The number of elements by which to extend x at each end of the axis. - axis : int - The axis along which to extend x. Default is -1. - - Examples - -------- - >>> a = array([[1.0,2.0,3.0,4.0,5.0], [0.0, 1.0, 4.0, 9.0, 16.0]]) - >>> _odd_ext(a, 2) - array([[-1., 0., 1., 2., 3., 4., 5., 6., 7.], - [-4., -1, 0., 1., 4., 9., 16., 23., 28.]]) - """ - if n < 1: - return x - if n > x.shape[axis] - 1: - raise ValueError(("The extension length n (%d) is too big. " + - "It must not exceed x.shape[axis]-1, which is %d.") - % (n, x.shape[axis] - 1)) - left_end = axis_slice(x, start=0, stop=1, axis=axis) - left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) - right_end = axis_slice(x, start=-1, axis=axis) - right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) - ext = np.concatenate((2 * left_end - left_ext, - x, - 2 * right_end - right_ext), - axis=axis) - return ext - - -def even_ext(x, n, axis=-1): - """Create an ndarray that is an even extension of x along an axis. - - Parameters - ---------- - x : ndarray - The array to be extended. - n : int - The number of elements by which to extend x at each end of the axis. - axis : int - The axis along which to extend x. Default is -1. - - Examples - -------- - >>> a = array([[1.0,2.0,3.0,4.0,5.0], [0.0, 1.0, 4.0, 9.0, 16.0]]) - >>> _even_ext(a, 2) - array([[ 3., 2., 1., 2., 3., 4., 5., 4., 3.], - [ 4., 1., 0., 1., 4., 9., 16., 9., 4.]]) - """ - if n < 1: - return x - if n > x.shape[axis] - 1: - raise ValueError(("The extension length n (%d) is too big. " + - "It must not exceed x.shape[axis]-1, which is %d.") - % (n, x.shape[axis] - 1)) - left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) - right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) - ext = np.concatenate((left_ext, - x, - right_ext), - axis=axis) - return ext - - -def const_ext(x, n, axis=-1): - """Create an ndarray that is a constant extension of x along an axis. - - The extension repeats the values at the first and last element of - the axis. - - Parameters - ---------- - x : ndarray - The array to be extended. - n : int - The number of elements by which to extend x at each end of the axis. - axis : int - The axis along which to extend x. Default is -1. - - Examples - -------- - >>> a = array([[1.0,2.0,3.0,4.0,5.0], [0.0, 1.0, 4.0, 9.0, 16.0]]) - >>> _const_ext(a, 2) - array([[ 1., 1., 1., 2., 3., 4., 5., 5., 5.], - [ 0., 0., 0., 1., 4., 9., 16., 16., 16.]]) - """ - if n < 1: - return x - left_end = axis_slice(x, start=0, stop=1, axis=axis) - ones_shape = [1] * x.ndim - ones_shape[axis] = n - ones = np.ones(ones_shape, dtype=x.dtype) - left_ext = ones * left_end - right_end = axis_slice(x, start=-1, axis=axis) - right_ext = ones * right_end - ext = np.concatenate((left_ext, - x, - right_ext), - axis=axis) - return ext diff --git a/scipy-0.10.1/scipy/signal/bento.info b/scipy-0.10.1/scipy/signal/bento.info deleted file mode 100644 index b321e80bd7..0000000000 --- a/scipy-0.10.1/scipy/signal/bento.info +++ /dev/null @@ -1,20 +0,0 @@ -HookFile: bscript - -Library: - Extension: sigtools - Sources: - lfilter.c.src, - correlate_nd.c.src, - sigtoolsmodule.c, - firfilter.c, - medianfilter.c - Extension: spectral - Sources: spectral.c - Extension: spline - Sources: - splinemodule.c, - S_bspline_util.c, - D_bspline_util.c, - C_bspline_util.c, - Z_bspline_util.c, - bspline_util.c diff --git a/scipy-0.10.1/scipy/signal/bscript b/scipy-0.10.1/scipy/signal/bscript deleted file mode 100644 index d22fbe5d77..0000000000 --- a/scipy-0.10.1/scipy/signal/bscript +++ /dev/null @@ -1,6 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - context.register_builder("sigtools", lambda e: default_builder(e, includes=".")) diff --git a/scipy-0.10.1/scipy/signal/bspline_util.c b/scipy-0.10.1/scipy/signal/bspline_util.c deleted file mode 100644 index 6a2526638f..0000000000 --- a/scipy-0.10.1/scipy/signal/bspline_util.c +++ /dev/null @@ -1,26 +0,0 @@ -#include -#include -#include -#include -#include - -void compute_root_from_lambda(double, double *, double *); - - -void -compute_root_from_lambda(lambda, r, omega) - double lambda; - double *r; - double *omega; -{ - double xi; - double tmp, tmp2; - - tmp = sqrt(3 + 144*lambda); - xi = 1 - 96*lambda + 24*lambda * tmp; - *omega = atan(sqrt((144*lambda - 1.0)/xi)); - tmp2 = sqrt(xi); - *r = (24*lambda - 1 - tmp2)/(24*lambda) \ - * sqrt((48*lambda + 24*lambda*tmp))/tmp2; - return; -} diff --git a/scipy-0.10.1/scipy/signal/bsplines.py b/scipy-0.10.1/scipy/signal/bsplines.py deleted file mode 100644 index 512a28dd0c..0000000000 --- a/scipy-0.10.1/scipy/signal/bsplines.py +++ /dev/null @@ -1,384 +0,0 @@ - -import scipy.special -from numpy import logical_and, asarray, pi, zeros_like, \ - piecewise, array, arctan2, tan, zeros, arange, floor -from numpy.core.umath import sqrt, exp, greater, less, cos, add, sin, \ - less_equal, greater_equal - -# From splinemodule.c -from spline import cspline2d, sepfir2d - -from scipy.misc import comb - -__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic', - 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval'] - -gamma = scipy.special.gamma - - -def factorial(n): - return gamma(n + 1) - - -def spline_filter(Iin, lmbda=5.0): - """Smoothing spline (cubic) filtering of a rank-2 array. - - Filter an input data set, `Iin`, using a (cubic) smoothing spline of - fall-off `lmbda`. - """ - intype = Iin.dtype.char - hcol = array([1.0, 4.0, 1.0], 'f') / 6.0 - if intype in ['F', 'D']: - Iin = Iin.astype('F') - ckr = cspline2d(Iin.real, lmbda) - cki = cspline2d(Iin.imag, lmbda) - outr = sepfir2d(ckr, hcol, hcol) - outi = sepfir2d(cki, hcol, hcol) - out = (outr + 1j * outi).astype(intype) - elif intype in ['f', 'd']: - ckr = cspline2d(Iin, lmbda) - out = sepfir2d(ckr, hcol, hcol) - out = out.astype(intype) - else: - raise TypeError("Invalid data type for Iin") - return out - - -_splinefunc_cache = {} - - -def _bspline_piecefunctions(order): - """Returns the function defined over the left-side pieces for a bspline of - a given order. - - The 0th piece is the first one less than 0. The last piece is a function - identical to 0 (returned as the constant 0). (There are order//2 + 2 total - pieces). - - Also returns the condition functions that when evaluated return boolean - arrays for use with `numpy.piecewise`. - """ - try: - return _splinefunc_cache[order] - except KeyError: - pass - - def condfuncgen(num, val1, val2): - if num == 0: - return lambda x: logical_and(less_equal(x, val1), - greater_equal(x, val2)) - elif num == 2: - return lambda x: less_equal(x, val2) - else: - return lambda x: logical_and(less(x, val1), - greater_equal(x, val2)) - - last = order // 2 + 2 - if order % 2: - startbound = -1.0 - else: - startbound = -0.5 - condfuncs = [condfuncgen(0, 0, startbound)] - bound = startbound - for num in xrange(1, last - 1): - condfuncs.append(condfuncgen(1, bound, bound - 1)) - bound = bound - 1 - condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0)) - - # final value of bound is used in piecefuncgen below - - # the functions to evaluate are taken from the left-hand-side - # in the general expression derived from the central difference - # operator (because they involve fewer terms). - - fval = factorial(order) - - def piecefuncgen(num): - Mk = order // 2 - num - if (Mk < 0): - return 0 # final function is 0 - coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval - for k in xrange(Mk + 1)] - shifts = [-bound - k for k in xrange(Mk + 1)] - #print "Adding piece number %d with coeffs %s and shifts %s" \ - # % (num, str(coeffs), str(shifts)) - - def thefunc(x): - res = 0.0 - for k in range(Mk + 1): - res += coeffs[k] * (x + shifts[k]) ** order - return res - return thefunc - - funclist = [piecefuncgen(k) for k in xrange(last)] - - _splinefunc_cache[order] = (funclist, condfuncs) - - return funclist, condfuncs - - -def bspline(x, n): - """B-spline basis function of order n. - - Notes - ----- - Uses numpy.piecewise and automatic function-generator. - - """ - ax = -abs(asarray(x)) - # number of pieces on the left-side is (n+1)/2 - funclist, condfuncs = _bspline_piecefunctions(n) - condlist = [func(ax) for func in condfuncs] - return piecewise(ax, condlist, funclist) - - -def gauss_spline(x, n): - """Gaussian approximation to B-spline basis function of order n. - """ - signsq = (n + 1) / 12.0 - return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq) - - -def cubic(x): - """A cubic B-spline. - - This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``. - """ - ax = abs(asarray(x)) - res = zeros_like(ax) - cond1 = less(ax, 1) - if cond1.any(): - ax1 = ax[cond1] - res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1) - cond2 = ~cond1 & less(ax, 2) - if cond2.any(): - ax2 = ax[cond2] - res[cond2] = 1.0 / 6 * (2 - ax2) ** 3 - return res - - -def quadratic(x): - """A quadratic B-spline. - - This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``. - """ - ax = abs(asarray(x)) - res = zeros_like(ax) - cond1 = less(ax, 0.5) - if cond1.any(): - ax1 = ax[cond1] - res[cond1] = 0.75 - ax1 ** 2 - cond2 = ~cond1 & less(ax, 1.5) - if cond2.any(): - ax2 = ax[cond2] - res[cond2] = (ax2 - 1.5) ** 2 / 2.0 - return res - - -def _coeff_smooth(lam): - xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam) - omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi)) - rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam) - rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi) - return rho, omeg - - -def _hc(k, cs, rho, omega): - return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) * - greater(k, -1)) - - -def _hs(k, cs, rho, omega): - c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) / - (1 - 2 * rho * rho * cos(2 * omega) + rho ** 4)) - gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega) - ak = abs(k) - return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak)) - - -def _cubic_smooth_coeff(signal, lamb): - rho, omega = _coeff_smooth(lamb) - cs = 1 - 2 * rho * cos(omega) + rho * rho - K = len(signal) - yp = zeros((K,), signal.dtype.char) - k = arange(K) - yp[0] = (_hc(0, cs, rho, omega) * signal[0] + - add.reduce(_hc(k + 1, cs, rho, omega) * signal)) - - yp[1] = (_hc(0, cs, rho, omega) * signal[0] + - _hc(1, cs, rho, omega) * signal[1] + - add.reduce(_hc(k + 2, cs, rho, omega) * signal)) - - for n in range(2, K): - yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] - - rho * rho * yp[n - 2]) - - y = zeros((K,), signal.dtype.char) - - y[K - 1] = add.reduce((_hs(k, cs, rho, omega) + - _hs(k + 1, cs, rho, omega)) * signal[::-1]) - y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) + - _hs(k + 2, cs, rho, omega)) * signal[::-1]) - - for n in range(K - 3, -1, -1): - y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] - - rho * rho * y[n + 2]) - - return y - - -def _cubic_coeff(signal): - zi = -2 + sqrt(3) - K = len(signal) - yplus = zeros((K,), signal.dtype.char) - powers = zi ** arange(K) - yplus[0] = signal[0] + zi * add.reduce(powers * signal) - for k in range(1, K): - yplus[k] = signal[k] + zi * yplus[k - 1] - output = zeros((K,), signal.dtype) - output[K - 1] = zi / (zi - 1) * yplus[K - 1] - for k in range(K - 2, -1, -1): - output[k] = zi * (output[k + 1] - yplus[k]) - return output * 6.0 - - -def _quadratic_coeff(signal): - zi = -3 + 2 * sqrt(2.0) - K = len(signal) - yplus = zeros((K,), signal.dtype.char) - powers = zi ** arange(K) - yplus[0] = signal[0] + zi * add.reduce(powers * signal) - for k in range(1, K): - yplus[k] = signal[k] + zi * yplus[k - 1] - output = zeros((K,), signal.dtype.char) - output[K - 1] = zi / (zi - 1) * yplus[K - 1] - for k in range(K - 2, -1, -1): - output[k] = zi * (output[k + 1] - yplus[k]) - return output * 8.0 - - -def cspline1d(signal, lamb=0.0): - """ - Compute cubic spline coefficients for rank-1 array. - - Find the cubic spline coefficients for a 1-D signal assuming - mirror-symmetric boundary conditions. To obtain the signal back from the - spline representation mirror-symmetric-convolve these coefficients with a - length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 . - - Parameters - ---------- - signal : ndarray - A rank-1 array representing samples of a signal. - lamb : float, optional - Smoothing coefficient, default is 0.0. - - Returns - ------- - c : ndarray - Cubic spline coefficients. - - """ - if lamb != 0.0: - return _cubic_smooth_coeff(signal, lamb) - else: - return _cubic_coeff(signal) - - -def qspline1d(signal, lamb=0.0): - """Compute quadratic spline coefficients for rank-1 array. - - Find the quadratic spline coefficients for a 1-D signal assuming - mirror-symmetric boundary conditions. To obtain the signal back from the - spline representation mirror-symmetric-convolve these coefficients with a - length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 . - - Parameters - ---------- - signal : ndarray - A rank-1 array representing samples of a signal. - lamb : float, optional - Smoothing coefficient (must be zero for now). - - Returns - ------- - c : ndarray - Cubic spline coefficients. - - """ - if lamb != 0.0: - raise ValueError("Smoothing quadratic splines not supported yet.") - else: - return _quadratic_coeff(signal) - - -def cspline1d_eval(cj, newx, dx=1.0, x0=0): - """Evaluate a spline at the new set of points. - - `dx` is the old sample-spacing while `x0` was the old origin. In - other-words the old-sample points (knot-points) for which the `cj` - represent spline coefficients were at equally-spaced points of: - - oldx = x0 + j*dx j=0...N-1, with N=len(cj) - - Edges are handled using mirror-symmetric boundary conditions. - - """ - newx = (asarray(newx) - x0) / float(dx) - res = zeros_like(newx) - if res.size == 0: - return res - N = len(cj) - cond1 = newx < 0 - cond2 = newx > (N - 1) - cond3 = ~(cond1 | cond2) - # handle general mirror-symmetry - res[cond1] = cspline1d_eval(cj, -newx[cond1]) - res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) - newx = newx[cond3] - if newx.size == 0: - return res - result = zeros_like(newx) - jlower = floor(newx - 2).astype(int) + 1 - for i in range(4): - thisj = jlower + i - indj = thisj.clip(0, N - 1) # handle edge cases - result += cj[indj] * cubic(newx - thisj) - res[cond3] = result - return res - - -def qspline1d_eval(cj, newx, dx=1.0, x0=0): - """Evaluate a quadratic spline at the new set of points. - - `dx` is the old sample-spacing while `x0` was the old origin. In - other-words the old-sample points (knot-points) for which the `cj` - represent spline coefficients were at equally-spaced points of:: - - oldx = x0 + j*dx j=0...N-1, with N=len(cj) - - Edges are handled using mirror-symmetric boundary conditions. - - """ - newx = (asarray(newx) - x0) / dx - res = zeros_like(newx) - if res.size == 0: - return res - N = len(cj) - cond1 = newx < 0 - cond2 = newx > (N - 1) - cond3 = ~(cond1 | cond2) - # handle general mirror-symmetry - res[cond1] = qspline1d_eval(cj, -newx[cond1]) - res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) - newx = newx[cond3] - if newx.size == 0: - return res - result = zeros_like(newx) - jlower = floor(newx - 1.5).astype(int) + 1 - for i in range(3): - thisj = jlower + i - indj = thisj.clip(0, N - 1) # handle edge cases - result += cj[indj] * quadratic(newx - thisj) - res[cond3] = result - return res diff --git a/scipy-0.10.1/scipy/signal/cont2discrete.py b/scipy-0.10.1/scipy/signal/cont2discrete.py deleted file mode 100644 index 9b4d3ad4a8..0000000000 --- a/scipy-0.10.1/scipy/signal/cont2discrete.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -Continuous to discrete transformations for state-space and transfer function. -""" - -# Author: Jeffrey Armstrong -# March 29, 2011 - -import numpy as np -from scipy import linalg - -from ltisys import tf2ss, ss2tf, zpk2ss, ss2zpk - -__all__ = ['cont2discrete'] - - -def cont2discrete(sys, dt, method="zoh", alpha=None): - """Transform a continuous to a discrete state-space system. - - Parameters - ----------- - sys : a tuple describing the system. - The following gives the number of elements in the tuple and - the interpretation: - * 2: (num, den) - * 3: (zeros, poles, gain) - * 4: (A, B, C, D) - - dt : float - The discretization time step. - method : {"gbt", "bilinear", "euler", "backward_diff", "zoh"} - Which method to use: - * gbt: generalized bilinear transformation - * bilinear: Tustin's approximation ("gbt" with alpha=0.5) - * euler: Euler (or forward differencing) method ("gbt" with - alpha=0) - * backward_diff: Backwards differencing ("gbt" with alpha=1.0) - * zoh: zero-order hold (default). - alpha : float within [0, 1] - The generalized bilinear transformation weighting parameter, which - should only be specified with method="gbt", and is ignored otherwise - - Returns - ------- - sysd : tuple containing the discrete system - Based on the input type, the output will be of the form - - (num, den, dt) for transfer function input - (zeros, poles, gain, dt) for zeros-poles-gain input - (A, B, C, D, dt) for state-space system input - - Notes - ----- - By default, the routine uses a Zero-Order Hold (zoh) method to perform - the transformation. Alternatively, a generalized bilinear transformation - may be used, which includes the common Tustin's bilinear approximation, - an Euler's method technique, or a backwards differencing technique. - - The Zero-Order Hold (zoh) method is based on: - http://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models - - Generalize bilinear approximation is based on: - http://techteach.no/publications/discretetime_signals_systems/discrete.pdf - and - G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized bilinear - transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754, 2009. - (http://www.ece.ualberta.ca/~gfzhang/research/ZCC07_preprint.pdf) - - """ - if len(sys) == 2: - sysd = cont2discrete(tf2ss(sys[0], sys[1]), dt, method=method, - alpha=alpha) - return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) - elif len(sys) == 3: - sysd = cont2discrete(zpk2ss(sys[0], sys[1], sys[2]), dt, method=method, - alpha=alpha) - return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) - elif len(sys) == 4: - a, b, c, d = sys - else: - raise ValueError("First argument must either be a tuple of 2 (tf), " - "3 (zpk), or 4 (ss) arrays.") - - if method == 'gbt': - if alpha is None: - raise ValueError("Alpha parameter must be specified for the " - "generalized bilinear transform (gbt) method") - elif alpha < 0 or alpha > 1: - raise ValueError("Alpha parameter must be within the interval " - "[0,1] for the gbt method") - - if method == 'gbt': - # This parameter is used repeatedly - compute once here - ima = np.eye(a.shape[0]) - alpha*dt*a - ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a) - bd = linalg.solve(ima, dt*b) - - # Similarly solve for the output equation matrices - cd = linalg.solve(ima.transpose(), c.transpose()) - cd = cd.transpose() - dd = d + alpha*np.dot(c, bd) - - elif method == 'bilinear' or method == 'tustin': - return cont2discrete(sys, dt, method="gbt", alpha=0.5) - - elif method == 'euler' or method == 'forward_diff': - return cont2discrete(sys, dt, method="gbt", alpha=0.0) - - elif method == 'backward_diff': - return cont2discrete(sys, dt, method="gbt", alpha=1.0) - - elif method == 'zoh': - # Build an exponential matrix - em_upper = np.hstack((a, b)) - - # Need to stack zeros under the a and b matrices - em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])), - np.zeros((b.shape[1], b.shape[1])) )) - - em = np.vstack((em_upper, em_lower)) - ms = linalg.expm(dt * em) - - # Dispose of the lower rows - ms = ms[:a.shape[0], :] - - ad = ms[:, 0:a.shape[1]] - bd = ms[:, a.shape[1]:] - - cd = c - dd = d - - else: - raise ValueError("Unknown transformation method '%s'" % method) - - return ad, bd, cd, dd, dt - diff --git a/scipy-0.10.1/scipy/signal/correlate_nd.c.src b/scipy-0.10.1/scipy/signal/correlate_nd.c.src deleted file mode 100644 index f3254afd08..0000000000 --- a/scipy-0.10.1/scipy/signal/correlate_nd.c.src +++ /dev/null @@ -1,328 +0,0 @@ -/* - * vim:syntax=c - * vim:sw=4 - */ -#include -#define PY_ARRAY_UNIQUE_SYMBOL _scipy_signal_ARRAY_API -#define NO_IMPORT_ARRAY -#include - -#include "sigtools.h" - -enum { - CORR_MODE_VALID=0, - CORR_MODE_SAME, - CORR_MODE_FULL -}; - -static int _correlate_nd_imp(PyArrayIterObject* x, PyArrayIterObject *y, - PyArrayIterObject *z, int typenum, int mode); - -PyObject * -scipy_signal_sigtools_correlateND(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *x, *y, *out; - PyArrayObject *ax, *ay, *aout; - PyArrayIterObject *itx, *ity, *itz; - int mode, typenum, st; - - if (!PyArg_ParseTuple(args, "OOOi", &x, &y, &out, &mode)) { - return NULL; - } - - typenum = PyArray_ObjectType(x, 0); - typenum = PyArray_ObjectType(y, typenum); - typenum = PyArray_ObjectType(out, typenum); - - ax = (PyArrayObject *)PyArray_FromObject(x, typenum, 0, 0); - if (ax == NULL) { - return NULL; - } - - ay = (PyArrayObject *)PyArray_FromObject(y, typenum, 0, 0); - if (ay == NULL) { - goto clean_ax; - } - - aout = (PyArrayObject *)PyArray_FromObject(out, typenum, 0, 0); - if (aout == NULL) { - goto clean_ay; - } - - if (ax->nd != ay->nd) { - PyErr_SetString(PyExc_ValueError, - "Arrays must have the same number of dimensions."); - goto clean_aout; - } - - if (ax->nd == 0) { - PyErr_SetString(PyExc_ValueError, "Cannot convolve zero-dimensional arrays."); - goto clean_aout; - } - - itx = (PyArrayIterObject*)PyArray_IterNew((PyObject*)ax); - if (itx == NULL) { - goto clean_aout; - } - ity = (PyArrayIterObject*)PyArray_IterNew((PyObject*)ay); - if (ity == NULL) { - goto clean_itx; - } - itz = (PyArrayIterObject*)PyArray_IterNew((PyObject*)aout); - if (itz == NULL) { - goto clean_ity; - } - - st = _correlate_nd_imp(itx, ity, itz, typenum, mode); - if (st) { - goto clean_itz; - } - - Py_DECREF(itz); - Py_DECREF(ity); - Py_DECREF(itx); - - Py_DECREF(ax); - Py_DECREF(ay); - - return PyArray_Return(aout); - -clean_itz: - Py_DECREF(itz); -clean_ity: - Py_DECREF(ity); -clean_itx: - Py_DECREF(itx); -clean_aout: - Py_DECREF(aout); -clean_ay: - Py_DECREF(ay); -clean_ax: - Py_DECREF(ax); - return NULL; -} - -/* - * Implementation of the type-specific correlation 'kernels' - */ - -/**begin repeat - * #fsuf = ubyte, byte, ushort, short, uint, int, ulong, long, ulonglong, - * longlong, float, double, longdouble# - * #type = ubyte, byte, ushort, short, uint, int, ulong, long, ulonglong, - * longlong, float, double, npy_longdouble# - */ - -static int _imp_correlate_nd_@fsuf@(PyArrayNeighborhoodIterObject *curx, - PyArrayNeighborhoodIterObject *curneighx, PyArrayIterObject *ity, - PyArrayIterObject *itz) -{ - npy_intp i, j; - @type@ acc; - - for(i = 0; i < curx->size; ++i) { - acc = 0; - PyArrayNeighborhoodIter_Reset(curneighx); - for(j = 0; j < curneighx->size; ++j) { - acc += *((@type@*)(curneighx->dataptr)) * *((@type@*)(ity->dataptr)); - - PyArrayNeighborhoodIter_Next(curneighx); - PyArray_ITER_NEXT(ity); - } - PyArrayNeighborhoodIter_Next(curx); - - *((@type@*)(itz->dataptr)) = acc; - PyArray_ITER_NEXT(itz); - - PyArray_ITER_RESET(ity); - } - - return 0; -} - -/**end repeat**/ - -/**begin repeat - * #fsuf = float, double, longdouble# - * #type = float, double, npy_longdouble# - */ - -static int _imp_correlate_nd_c@fsuf@(PyArrayNeighborhoodIterObject *curx, - PyArrayNeighborhoodIterObject *curneighx, PyArrayIterObject *ity, - PyArrayIterObject *itz) -{ - int i, j; - @type@ racc, iacc; - @type@ *ptr1, *ptr2; - - for(i = 0; i < curx->size; ++i) { - racc = 0; - iacc = 0; - PyArrayNeighborhoodIter_Reset(curneighx); - for(j = 0; j < curneighx->size; ++j) { - ptr1 = ((@type@*)(curneighx->dataptr)); - ptr2 = ((@type@*)(ity->dataptr)); - racc += ptr1[0] * ptr2[0] + ptr1[1] * ptr2[1]; - iacc += ptr1[1] * ptr2[0] - ptr1[0] * ptr2[1]; - - PyArrayNeighborhoodIter_Next(curneighx); - PyArray_ITER_NEXT(ity); - } - PyArrayNeighborhoodIter_Next(curx); - - ((@type@*)(itz->dataptr))[0] = racc; - ((@type@*)(itz->dataptr))[1] = iacc; - PyArray_ITER_NEXT(itz); - - PyArray_ITER_RESET(ity); - } - - return 0; -} - -/**end repeat**/ - -static int _imp_correlate_nd_object(PyArrayNeighborhoodIterObject *curx, - PyArrayNeighborhoodIterObject *curneighx, PyArrayIterObject *ity, - PyArrayIterObject *itz) -{ - int i, j; - PyObject *tmp, *tmp2; - char *zero; - PyArray_CopySwapFunc *copyswap = curx->ao->descr->f->copyswap; - - zero = PyArray_Zero(curx->ao); - - for(i = 0; i < curx->size; ++i) { - PyArrayNeighborhoodIter_Reset(curneighx); - copyswap(itz->dataptr, zero, 0, NULL); - - for(j = 0; j < curneighx->size; ++j) { - /* - * compute tmp2 = acc + x * y. Not all objects supporting the - * number protocol support inplace operations, so we do it the most - * straightfoward way. - */ - tmp = PyNumber_Multiply(*((PyObject**)curneighx->dataptr), - *((PyObject**)ity->dataptr)); - tmp2 = PyNumber_Add(*((PyObject**)itz->dataptr), tmp); - Py_DECREF(tmp); - - /* Update current output item (acc) */ - Py_DECREF(*((PyObject**)itz->dataptr)); - *((PyObject**)itz->dataptr) = tmp2; - - PyArrayNeighborhoodIter_Next(curneighx); - PyArray_ITER_NEXT(ity); - } - - PyArrayNeighborhoodIter_Next(curx); - - PyArray_ITER_NEXT(itz); - - PyArray_ITER_RESET(ity); - } - - PyDataMem_FREE(zero); - - return 0; -} - -static int _correlate_nd_imp(PyArrayIterObject* itx, PyArrayIterObject *ity, - PyArrayIterObject *itz, int typenum, int mode) -{ - PyArrayNeighborhoodIterObject *curneighx, *curx; - npy_intp i, nz, nx; - npy_intp bounds[NPY_MAXDIMS*2]; - - /* Compute boundaries for the neighborhood iterator curx: curx is used to - * traverse x directly, such as each point of the output is the - * innerproduct of y with the neighborhood around curx */ - switch(mode) { - case CORR_MODE_VALID: - /* Only walk through the input points such as the correponding - * output will not depend on 0 padding */ - for(i = 0; i < itx->ao->nd; ++i) { - bounds[2*i] = ity->ao->dimensions[i] - 1; - bounds[2*i+1] = itx->ao->dimensions[i] - 1; - } - break; - case CORR_MODE_SAME: - /* Only walk through the input such as the output will be centered - relatively to the output as computed in the full mode */ - for(i = 0; i < itx->ao->nd; ++i) { - nz = itx->ao->dimensions[i]; - /* Recover 'original' nx, before it was zero-padded */ - nx = nz - ity->ao->dimensions[i] + 1; - if ((nz - nx) % 2 == 0) { - bounds[2*i] = (nz - nx) / 2; - } else { - bounds[2*i] = (nz - nx - 1) / 2; - } - bounds[2*i+1] = bounds[2*i] + nx - 1; - } - break; - case CORR_MODE_FULL: - for(i = 0; i < itx->ao->nd; ++i) { - bounds[2*i] = 0; - bounds[2*i+1] = itx->ao->dimensions[i] - 1; - } - break; - default: - PyErr_BadInternalCall(); - return -1; - } - - curx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew(itx, - bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL); - if (curx == NULL) { - PyErr_SetString(PyExc_SystemError, "Could not create curx ?"); - return -1; - } - - /* Compute boundaries for the neighborhood iterator: the neighborhood for x - should have the same dimensions as y */ - for(i = 0; i < ity->ao->nd; ++i) { - bounds[2*i] = -ity->ao->dimensions[i] + 1; - bounds[2*i+1] = 0; - } - - curneighx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew( - (PyArrayIterObject*)curx, bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL); - if (curneighx == NULL) { - goto clean_curx; - } - - switch(typenum) { -/**begin repeat - * #TYPE = UBYTE, BYTE, USHORT, SHORT, UINT, INT, ULONG, LONG, ULONGLONG, - * LONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# - * #type = ubyte, byte, ushort, short, uint, int, ulong, long, ulonglong, - * longlong, float, double, longdouble, cfloat, cdouble, clongdouble# - */ - case PyArray_@TYPE@: - _imp_correlate_nd_@type@(curx, curneighx, ity, itz); - break; -/**end repeat**/ - - /* The object array case does not worth being optimized, since most of - the cost is numerical operations, not iterators moving in this case ? */ - case PyArray_OBJECT: - _imp_correlate_nd_object(curx, curneighx, ity, itz); - break; - default: - PyErr_SetString(PyExc_ValueError, "Unsupported type"); - goto clean_curneighx; - } - - Py_DECREF((PyArrayIterObject*)curx); - Py_DECREF((PyArrayIterObject*)curneighx); - - return 0; - -clean_curneighx: - Py_DECREF((PyArrayIterObject*)curneighx); -clean_curx: - Py_DECREF((PyArrayIterObject*)curx); - return -1; -} diff --git a/scipy-0.10.1/scipy/signal/dltisys.py b/scipy-0.10.1/scipy/signal/dltisys.py deleted file mode 100644 index 475e295338..0000000000 --- a/scipy-0.10.1/scipy/signal/dltisys.py +++ /dev/null @@ -1,269 +0,0 @@ -""" -dltisys - Code related to discrete linear time-invariant systems -""" - -# Author: Jeffrey Armstrong -# April 4, 2011 - -import numpy as np -from scipy.interpolate import interp1d -from ltisys import tf2ss, zpk2ss - -__all__ = ['dlsim', 'dstep', 'dimpulse'] - - -def dlsim(system, u, t=None, x0=None): - """ - Simulate output of a discrete-time linear system. - - Parameters - ---------- - system : class instance or tuple - An instance of the LTI class, or a tuple describing the system. - The following gives the number of elements in the tuple and - the interpretation: - - - 3: (num, den, dt) - - 4: (zeros, poles, gain, dt) - - 5: (A, B, C, D, dt) - - u : array_like - An input array describing the input at each time `t` (interpolation is - assumed between given times). If there are multiple inputs, then each - column of the rank-2 array represents an input. - t : array_like, optional - The time steps at which the input is defined. If `t` is given, the - final value in `t` determines the number of steps returned in the - output. - x0 : arry_like, optional - The initial conditions on the state vector (zero by default). - - Returns - ------- - tout : ndarray - Time values for the output, as a 1-D array. - yout : ndarray - System response, as a 1-D array. - xout : ndarray, optional - Time-evolution of the state-vector. Only generated if the input is a - state-space systems. - - See Also - -------- - lsim, dstep, dimpulse, cont2discrete - - Examples - -------- - A simple integrator transfer function with a discrete time step of 1.0 - could be implemented as: - - >>> from import signal - >>> tf = ([1.0,], [1.0, -1.0], 1.0) - >>> t_in = [0.0, 1.0, 2.0, 3.0] - >>> u = np.asarray([0.0, 0.0, 1.0, 1.0]) - >>> t_out, y = signal.dlsim(tf, u, t=t_in) - >>> y - array([ 0., 0., 0., 1.]) - - """ - if len(system) == 3: - a, b, c, d = tf2ss(system[0], system[1]) - dt = system[2] - elif len(system) == 4: - a, b, c, d = zpk2ss(system[0], system[1], system[2]) - dt = system[3] - elif len(system) == 5: - a, b, c, d, dt = system - else: - raise ValueError("System argument should be a discrete transfer " + - "function, zeros-poles-gain specification, or " + - "state-space system") - - if t is None: - out_samples = max(u.shape) - stoptime = (out_samples - 1) * dt - else: - stoptime = t[-1] - out_samples = int(np.floor(stoptime / dt)) + 1 - - # Pre-build output arrays - xout = np.zeros((out_samples, a.shape[0])) - yout = np.zeros((out_samples, c.shape[0])) - tout = np.linspace(0.0, stoptime, num=out_samples) - - # Check initial condition - if x0 is None: - xout[0,:] = np.zeros((a.shape[1],)) - else: - xout[0,:] = np.asarray(x0) - - # Pre-interpolate inputs into the desired time steps - if t is None: - u_dt = u - else: - if len(u.shape) == 1: - u = u[:, np.newaxis] - - u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True) - u_dt = u_dt_interp(tout).transpose() - - # Simulate the system - for i in range(0, out_samples - 1): - xout[i+1,:] = np.dot(a, xout[i,:]) + np.dot(b, u_dt[i,:]) - yout[i,:] = np.dot(c, xout[i,:]) + np.dot(d, u_dt[i,:]) - - # Last point - yout[out_samples-1,:] = np.dot(c, xout[out_samples-1,:]) + \ - np.dot(d, u_dt[out_samples-1,:]) - - if len(system) == 5: - return tout, yout, xout - else: - return tout, yout - - -def dimpulse(system, x0=None, t=None, n=None): - """Impulse response of discrete-time system. - - Parameters - ---------- - system : tuple - The following gives the number of elements in the tuple and - the interpretation. - * 3: (num, den, dt) - * 4: (zeros, poles, gain, dt) - * 5: (A, B, C, D, dt) - x0 : array_like, optional - Initial state-vector. Defaults to zero. - t : array_like, optional - Time points. Computed if not given. - n : int, optional - The number of time points to compute (if `t` is not given). - - Returns - ------- - t : ndarray - A 1-D array of time points. - yout : tuple of array_like - Step response of system. Each element of the tuple represents - the output of the system based on an impulse in each input. - - See Also - -------- - impulse, dstep, dlsim, cont2discrete - - """ - # Determine the system type and set number of inputs and time steps - if len(system) == 3: - n_inputs = 1 - dt = system[2] - elif len(system) == 4: - n_inputs = 1 - dt = system[3] - elif len(system) == 5: - n_inputs = system[1].shape[1] - dt = system[4] - else: - raise ValueError("System argument should be a discrete transfer " + - "function, zeros-poles-gain specification, or " + - "state-space system") - - # Default to 100 samples if unspecified - if n is None: - n = 100 - - # If time is not specified, use the number of samples - # and system dt - if t is None: - t = np.arange(0, n * dt, dt) - - # For each input, implement a step change - yout = None - for i in range(0, n_inputs): - u = np.zeros((t.shape[0], n_inputs)) - u[0,i] = 1.0 - - one_output = dlsim(system, u, t=t, x0=x0) - - if yout is None: - yout = (one_output[1],) - else: - yout = yout + (one_output[1],) - - tout = one_output[0] - - return tout, yout - - -def dstep(system, x0=None, t=None, n=None): - """Step response of discrete-time system. - - Parameters - ---------- - system : a tuple describing the system. - The following gives the number of elements in the tuple and - the interpretation. - * 3: (num, den, dt) - * 4: (zeros, poles, gain, dt) - * 5: (A, B, C, D, dt) - x0 : array_like, optional - Initial state-vector (default is zero). - t : array_like, optional - Time points (computed if not given). - n : int, optional - Number of time points to compute if `t` is not given. - - Returns - ------- - t : ndarray - Output time points, as a 1-D array. - yout : tuple of array_like - Step response of system. Each element of the tuple represents - the output of the system based on a step response to each input. - - See Also - -------- - step, dimpulse, dlsim, cont2discrete - - """ - # Determine the system type and set number of inputs and time steps - if len(system) == 3: - n_inputs = 1 - dt = system[2] - elif len(system) == 4: - n_inputs = 1 - dt = system[3] - elif len(system) == 5: - n_inputs = system[1].shape[1] - dt = system[4] - else: - raise ValueError("System argument should be a discrete transfer " + - "function, zeros-poles-gain specification, or " + - "state-space system") - - # Default to 100 samples if unspecified - if n is None: - n = 100 - - # If time is not specified, use the number of samples - # and system dt - if t is None: - t = np.arange(0, n * dt, dt) - - # For each input, implement a step change - yout = None - for i in range(0, n_inputs): - u = np.zeros((t.shape[0], n_inputs)) - u[:,i] = np.ones((t.shape[0],)) - - one_output = dlsim(system, u, t=t, x0=x0) - - if yout is None: - yout = (one_output[1],) - else: - yout = yout + (one_output[1],) - - tout = one_output[0] - - return tout, yout - diff --git a/scipy-0.10.1/scipy/signal/filter_design.py b/scipy-0.10.1/scipy/signal/filter_design.py deleted file mode 100644 index 42cb0aca3b..0000000000 --- a/scipy-0.10.1/scipy/signal/filter_design.py +++ /dev/null @@ -1,1711 +0,0 @@ -"""Filter design. -""" - -import types -import warnings - -import numpy -from numpy import atleast_1d, poly, polyval, roots, real, asarray, allclose, \ - resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, \ - cos, exp, cosh, arccosh, ceil, conjugate, zeros, sinh -from numpy import mintypecode -from scipy import special, optimize -from scipy.misc import comb - -__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', - 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', - 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', - 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', - 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', - 'filter_dict', 'band_dict', 'BadCoefficients'] - - -class BadCoefficients(UserWarning): - pass - -abs = absolute - - -def findfreqs(num, den, N): - ep = atleast_1d(roots(den)) + 0j - tz = atleast_1d(roots(num)) + 0j - - if len(ep) == 0: - ep = atleast_1d(-1000) + 0j - - ez = r_['-1', - numpy.compress(ep.imag >= 0, ep, axis=-1), - numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)] - - integ = abs(ez) < 1e-10 - hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) + - 1.5 * ez.imag)) + 0.5) - lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) + - 2 * ez.imag)) - 0.5) - - w = logspace(lfreq, hfreq, N) - return w - - -def freqs(b, a, worN=None, plot=None): - """ - Compute frequency response of analog filter. - - Given the numerator (b) and denominator (a) of a filter compute its - frequency response:: - - b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1] - H(w) = ------------------------------------------------------- - a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1] - - Parameters - ---------- - b : ndarray - Numerator of a linear filter. - a : ndarray - Denominator of a linear filter. - worN : {None, int}, optional - If None, then compute at 200 frequencies around the interesting parts - of the response curve (determined by pole-zero locations). If a single - integer, the compute at that many frequencies. Otherwise, compute the - response at frequencies given in worN. - plot : callable - A callable that takes two arguments. If given, the return parameters - `w` and `h` are passed to plot. Useful for plotting the frequency - response inside `freqs`. - - Returns - ------- - w : ndarray - The frequencies at which h was computed. - h : ndarray - The frequency response. - - See Also - -------- - freqz : Compute the frequency response of a digital filter. - - Notes - ----- - Using Matplotlib's "plot" function as the callable for `plot` produces - unexpected results, this plots the real part of the complex transfer - function, not the magnitude. - - """ - if worN is None: - w = findfreqs(b, a, 200) - elif isinstance(worN, types.IntType): - N = worN - w = findfreqs(b, a, N) - else: - w = worN - w = atleast_1d(w) - s = 1j * w - h = polyval(b, s) / polyval(a, s) - if not plot is None: - plot(w, h) - return w, h - - -def freqz(b, a=1, worN=None, whole=0, plot=None): - """ - Compute the frequency response of a digital filter. - - Given the numerator ``b`` and denominator ``a`` of a digital filter compute - its frequency response:: - - jw -jw -jmw - jw B(e) b[0] + b[1]e + .... + b[m]e - H(e) = ---- = ------------------------------------ - jw -jw -jnw - A(e) a[0] + a[1]e + .... + a[n]e - - Parameters - ---------- - b : ndarray - numerator of a linear filter - a : ndarray - denominator of a linear filter - worN : {None, int}, optional - If None, then compute at 512 frequencies around the unit circle. - If a single integer, the compute at that many frequencies. - Otherwise, compute the response at frequencies given in worN - whole : bool, optional - Normally, frequencies are computed from 0 to pi (upper-half of - unit-circle. If whole is True, compute frequencies from 0 to 2*pi. - plot : callable - A callable that takes two arguments. If given, the return parameters - `w` and `h` are passed to plot. Useful for plotting the frequency - response inside `freqz`. - - Returns - ------- - w : ndarray - The frequencies at which h was computed. - h : ndarray - The frequency response. - - Notes - ----- - Using Matplotlib's "plot" function as the callable for `plot` produces - unexpected results, this plots the real part of the complex transfer - function, not the magnitude. - - Examples - -------- - >>> import scipy.signal - >>> b = sp.signal.firwin(80, 0.5, window=('kaiser', 8)) - >>> h, w = sp.signal.freqz(b) - - >>> import matplotlib.pyplot as plt - >>> fig = plt.figure() - >>> plt.title('Digital filter frequency response') - >>> ax1 = fig.add_subplot(111) - - >>> plt.semilogy(h, np.abs(w), 'b') - >>> plt.ylabel('Amplitude (dB)', color='b') - >>> plt.xlabel('Frequency (rad/sample)') - >>> plt.grid() - >>> plt.legend() - - >>> ax2 = ax1.twinx() - >>> angles = np.unwrap(np.angle(w)) - >>> plt.plot(h, angles, 'g') - >>> plt.ylabel('Angle (radians)', color='g') - >>> plt.show() - - """ - b, a = map(atleast_1d, (b, a)) - if whole: - lastpoint = 2 * pi - else: - lastpoint = pi - if worN is None: - N = 512 - w = numpy.linspace(0, lastpoint, N, endpoint=False) - elif isinstance(worN, types.IntType): - N = worN - w = numpy.linspace(0, lastpoint, N, endpoint=False) - else: - w = worN - w = atleast_1d(w) - zm1 = exp(-1j * w) - h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1) - if not plot is None: - plot(w, h) - return w, h - - -def tf2zpk(b, a): - """Return zero, pole, gain (z,p,k) representation from a numerator, - denominator representation of a linear filter. - - Parameters - ---------- - b : ndarray - Numerator polynomial. - a : ndarray - Denominator polynomial. - - Returns - ------- - z : ndarray - Zeros of the transfer function. - p : ndarray - Poles of the transfer function. - k : float - System gain. - - If some values of b are too close to 0, they are removed. In that case, a - BadCoefficients warning is emitted. - """ - b, a = normalize(b, a) - b = (b + 0.0) / a[0] - a = (a + 0.0) / a[0] - k = b[0] - b /= b[0] - z = roots(b) - p = roots(a) - return z, p, k - - -def zpk2tf(z, p, k): - """Return polynomial transfer function representation from zeros - and poles - - Parameters - ---------- - z : ndarray - Zeros of the transfer function. - p : ndarray - Poles of the transfer function. - k : float - System gain. - - Returns - ------- - b : ndarray - Numerator polynomial. - a : ndarray - Denominator polynomial. - - """ - z = atleast_1d(z) - k = atleast_1d(k) - if len(z.shape) > 1: - temp = poly(z[0]) - b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char) - if len(k) == 1: - k = [k[0]] * z.shape[0] - for i in range(z.shape[0]): - b[i] = k[i] * poly(z[i]) - else: - b = k * poly(z) - a = atleast_1d(poly(p)) - return b, a - - -def normalize(b, a): - """Normalize polynomial representation of a transfer function. - - If values of b are too close to 0, they are removed. In that case, a - BadCoefficients warning is emitted. - """ - b, a = map(atleast_1d, (b, a)) - if len(a.shape) != 1: - raise ValueError("Denominator polynomial must be rank-1 array.") - if len(b.shape) > 2: - raise ValueError("Numerator polynomial must be rank-1 or" - " rank-2 array.") - if len(b.shape) == 1: - b = asarray([b], b.dtype.char) - while a[0] == 0.0 and len(a) > 1: - a = a[1:] - outb = b * (1.0) / a[0] - outa = a * (1.0) / a[0] - if allclose(outb[:, 0], 0, rtol=1e-14): - warnings.warn("Badly conditioned filter coefficients (numerator): the " - "results may be meaningless", BadCoefficients) - while allclose(outb[:, 0], 0, rtol=1e-14) and (outb.shape[-1] > 1): - outb = outb[:, 1:] - if outb.shape[0] == 1: - outb = outb[0] - return outb, outa - - -def lp2lp(b, a, wo=1.0): - """Return a low-pass filter with cutoff frequency `wo` - from a low-pass filter prototype with unity cutoff frequency. - """ - a, b = map(atleast_1d, (a, b)) - try: - wo = float(wo) - except TypeError: - wo = float(wo[0]) - d = len(a) - n = len(b) - M = max((d, n)) - pwo = pow(wo, numpy.arange(M - 1, -1, -1)) - start1 = max((n - d, 0)) - start2 = max((d - n, 0)) - b = b * pwo[start1] / pwo[start2:] - a = a * pwo[start1] / pwo[start1:] - return normalize(b, a) - - -def lp2hp(b, a, wo=1.0): - """Return a high-pass filter with cutoff frequency `wo` - from a low-pass filter prototype with unity cutoff frequency. - """ - a, b = map(atleast_1d, (a, b)) - try: - wo = float(wo) - except TypeError: - wo = float(wo[0]) - d = len(a) - n = len(b) - if wo != 1: - pwo = pow(wo, numpy.arange(max((d, n)))) - else: - pwo = numpy.ones(max((d, n)), b.dtype.char) - if d >= n: - outa = a[::-1] * pwo - outb = resize(b, (d,)) - outb[n:] = 0.0 - outb[:n] = b[::-1] * pwo[:n] - else: - outb = b[::-1] * pwo - outa = resize(a, (n,)) - outa[d:] = 0.0 - outa[:d] = a[::-1] * pwo[:d] - - return normalize(outb, outa) - - -def lp2bp(b, a, wo=1.0, bw=1.0): - """Return a band-pass filter with center frequency `wo` and bandwidth `bw` - from a low-pass filter prototype with unity cutoff frequency. - """ - a, b = map(atleast_1d, (a, b)) - D = len(a) - 1 - N = len(b) - 1 - artype = mintypecode((a, b)) - ma = max([N, D]) - Np = N + ma - Dp = D + ma - bprime = numpy.zeros(Np + 1, artype) - aprime = numpy.zeros(Dp + 1, artype) - wosq = wo * wo - for j in range(Np + 1): - val = 0.0 - for i in range(0, N + 1): - for k in range(0, i + 1): - if ma - i + 2 * k == j: - val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i - bprime[Np - j] = val - for j in range(Dp + 1): - val = 0.0 - for i in range(0, D + 1): - for k in range(0, i + 1): - if ma - i + 2 * k == j: - val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i - aprime[Dp - j] = val - - return normalize(bprime, aprime) - - -def lp2bs(b, a, wo=1, bw=1): - """Return a band-stop filter with center frequency `wo` and bandwidth `bw` - from a low-pass filter prototype with unity cutoff frequency. - """ - a, b = map(atleast_1d, (a, b)) - D = len(a) - 1 - N = len(b) - 1 - artype = mintypecode((a, b)) - M = max([N, D]) - Np = M + M - Dp = M + M - bprime = numpy.zeros(Np + 1, artype) - aprime = numpy.zeros(Dp + 1, artype) - wosq = wo * wo - for j in range(Np + 1): - val = 0.0 - for i in range(0, N + 1): - for k in range(0, M - i + 1): - if i + 2 * k == j: - val += (comb(M - i, k) * b[N - i] * - (wosq) ** (M - i - k) * bw ** i) - bprime[Np - j] = val - for j in range(Dp + 1): - val = 0.0 - for i in range(0, D + 1): - for k in range(0, M - i + 1): - if i + 2 * k == j: - val += (comb(M - i, k) * a[D - i] * - (wosq) ** (M - i - k) * bw ** i) - aprime[Dp - j] = val - - return normalize(bprime, aprime) - - -def bilinear(b, a, fs=1.0): - """Return a digital filter from an analog one using a bilinear transform. - - The bilinear transform substitutes ``(z-1) / (z+1``) for ``s``. - """ - fs = float(fs) - a, b = map(atleast_1d, (a, b)) - D = len(a) - 1 - N = len(b) - 1 - artype = float - M = max([N, D]) - Np = M - Dp = M - bprime = numpy.zeros(Np + 1, artype) - aprime = numpy.zeros(Dp + 1, artype) - for j in range(Np + 1): - val = 0.0 - for i in range(N + 1): - for k in range(i + 1): - for l in range(M - i + 1): - if k + l == j: - val += (comb(i, k) * comb(M - i, l) * b[N - i] * - pow(2 * fs, i) * (-1) ** k) - bprime[j] = real(val) - for j in range(Dp + 1): - val = 0.0 - for i in range(D + 1): - for k in range(i + 1): - for l in range(M - i + 1): - if k + l == j: - val += (comb(i, k) * comb(M - i, l) * a[D - i] * - pow(2 * fs, i) * (-1) ** k) - aprime[j] = real(val) - - return normalize(bprime, aprime) - - -def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'): - """Complete IIR digital and analog filter design. - - Given passband and stopband frequencies and gains construct an analog or - digital IIR filter of minimum order for a given basic type. Return the - output in numerator, denominator ('ba') or pole-zero ('zpk') form. - - Parameters - ---------- - wp, ws : float - Passband and stopband edge frequencies, normalized from 0 to 1 (1 - corresponds to pi radians / sample). For example: - - - Lowpass: wp = 0.2, ws = 0.3 - - Highpass: wp = 0.3, ws = 0.2 - - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] - - gpass : float - The maximum loss in the passband (dB). - gstop : float - The minimum attenuation in the stopband (dB). - analog : int, optional - Non-zero to design an analog filter (in this case `wp` and `ws` are in - radians / second). - ftype : str, optional - The type of IIR filter to design: - - - elliptic : 'ellip' - - Butterworth : 'butter', - - Chebyshev I : 'cheby1', - - Chebyshev II: 'cheby2', - - Bessel : 'bessel' - - output : ['ba', 'zpk'], optional - Type of output: numerator/denominator ('ba') or pole-zero ('zpk'). - Default is 'ba'. - - Returns - ------- - b, a : - Numerator and denominator of the IIR filter. Only returned if - ``output='ba'``. - z, p, k : Zeros, poles, and gain of the IIR filter. Only returned if - ``output='zpk'``. - - """ - - try: - ordfunc = filter_dict[ftype][1] - except KeyError: - raise ValueError("Invalid IIR filter type: %s" % ftype) - except IndexError: - raise ValueError(("%s does not have order selection use " - "iirfilter function.") % ftype) - - wp = atleast_1d(wp) - ws = atleast_1d(ws) - band_type = 2 * (len(wp) - 1) - band_type += 1 - if wp[0] >= ws[0]: - band_type += 1 - - btype = {1: 'lowpass', 2: 'highpass', - 3: 'bandstop', 4: 'bandpass'}[band_type] - - N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog) - return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, - ftype=ftype, output=output) - - -def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, - ftype='butter', output='ba'): - """IIR digital and analog filter design given order and critical points. - - Design an Nth order lowpass digital or analog filter and return the filter - coefficients in (B,A) (numerator, denominator) or (Z,P,K) form. - - Parameters - ---------- - N : int - The order of the filter. - Wn : array_like - A scalar or length-2 sequence giving the critical frequencies. - rp : float, optional - For Chebyshev and elliptic filters provides the maximum ripple - in the passband. - rs : float, optional - For chebyshev and elliptic filters provides the minimum attenuation in - the stop band. - btype : str, optional - The type of filter (lowpass, highpass, bandpass, bandstop). - Default is bandpass. - analog : int, optional - Non-zero to return an analog filter, otherwise a digital filter is - returned. - ftype : str, optional - The type of IIR filter to design: - - - elliptic : 'ellip' - - Butterworth : 'butter', - - Chebyshev I : 'cheby1', - - Chebyshev II: 'cheby2', - - Bessel : 'bessel' - - output : ['ba', 'zpk'], optional - Type of output: numerator/denominator ('ba') or pole-zero ('zpk'). - Default is 'ba'. - - See Also - -------- - butterord, cheb1ord, cheb2ord, ellipord - """ - - ftype, btype, output = [x.lower() for x in (ftype, btype, output)] - Wn = asarray(Wn) - try: - btype = band_dict[btype] - except KeyError: - raise ValueError("%s is an invalid bandtype for filter." % btype) - - try: - typefunc = filter_dict[ftype][0] - except KeyError: - raise ValueError("%s is not a valid basic iir filter." % ftype) - - if output not in ['ba', 'zpk']: - raise ValueError("%s is not a valid output form." % output) - - # pre-warp frequencies for digital filter design - if not analog: - fs = 2.0 - warped = 2 * fs * tan(pi * Wn / fs) - else: - warped = Wn - - # convert to low-pass prototype - if btype in ['lowpass', 'highpass']: - wo = warped - else: - bw = warped[1] - warped[0] - wo = sqrt(warped[0] * warped[1]) - - # Get analog lowpass prototype - if typefunc in [buttap, besselap]: - z, p, k = typefunc(N) - elif typefunc == cheb1ap: - if rp is None: - raise ValueError("passband ripple (rp) must be provided to " - "design a Chebyshev I filter.") - z, p, k = typefunc(N, rp) - elif typefunc == cheb2ap: - if rs is None: - raise ValueError("stopband atteunatuion (rs) must be provided to " - "design an Chebyshev II filter.") - z, p, k = typefunc(N, rs) - else: # Elliptic filters - if rs is None or rp is None: - raise ValueError("Both rp and rs must be provided to design an " - "elliptic filter.") - z, p, k = typefunc(N, rp, rs) - - b, a = zpk2tf(z, p, k) - - # transform to lowpass, bandpass, highpass, or bandstop - if btype == 'lowpass': - b, a = lp2lp(b, a, wo=wo) - elif btype == 'highpass': - b, a = lp2hp(b, a, wo=wo) - elif btype == 'bandpass': - b, a = lp2bp(b, a, wo=wo, bw=bw) - else: # 'bandstop' - b, a = lp2bs(b, a, wo=wo, bw=bw) - - # Find discrete equivalent if necessary - if not analog: - b, a = bilinear(b, a, fs=fs) - - # Transform to proper out type (pole-zero, state-space, numer-denom) - if output == 'zpk': - return tf2zpk(b, a) - else: - return b, a - - -def butter(N, Wn, btype='low', analog=0, output='ba'): - """Butterworth digital and analog filter design. - - Design an Nth order lowpass digital or analog Butterworth filter and return - the filter coefficients in (B,A) or (Z,P,K) form. - - See also - -------- - buttord. - """ - return iirfilter(N, Wn, btype=btype, analog=analog, - output=output, ftype='butter') - - -def cheby1(N, rp, Wn, btype='low', analog=0, output='ba'): - """Chebyshev type I digital and analog filter design. - - Design an Nth order lowpass digital or analog Chebyshev type I filter and - return the filter coefficients in (B,A) or (Z,P,K) form. - - See also - -------- - cheb1ord. - """ - return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, - output=output, ftype='cheby1') - - -def cheby2(N, rs, Wn, btype='low', analog=0, output='ba'): - """Chebyshev type I digital and analog filter design. - - Design an Nth order lowpass digital or analog Chebyshev type I filter and - return the filter coefficients in (B,A) or (Z,P,K) form. - - See also - -------- - cheb2ord. - """ - return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, - output=output, ftype='cheby2') - - -def ellip(N, rp, rs, Wn, btype='low', analog=0, output='ba'): - """Elliptic (Cauer) digital and analog filter design. - - Design an Nth order lowpass digital or analog elliptic filter and return - the filter coefficients in (B,A) or (Z,P,K) form. - - See also - -------- - ellipord. - """ - return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, - output=output, ftype='elliptic') - - -def bessel(N, Wn, btype='low', analog=0, output='ba'): - """Bessel digital and analog filter design. - - Design an Nth order lowpass digital or analog Bessel filter and return the - filter coefficients in (B,A) or (Z,P,K) form. - - """ - return iirfilter(N, Wn, btype=btype, analog=analog, - output=output, ftype='bessel') - - -def maxflat(): - pass - - -def yulewalk(): - pass - - -def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type): - """Band Stop Objective Function for order minimization. - - Returns the non-integer order for an analog band stop filter. - - Parameters - ---------- - wp : - Edge of passband `passb`. - ind : int - Index specifying which `passb` edge to vary (0 or 1). - passb : array_like - Two element sequence of fixed passband edges. - stopb : array_like - Two element sequence of fixed stopband edges. - gstop : float - Amount of attenuation in stopband in dB. - gpass : float - Amount of ripple in the passband in dB. - type : ['butter', 'cheby', 'ellip'] - Type of filter. - - Returns - ------- - n : scalar - Filter order (possibly non-integer). - """ - - passbC = passb.copy() - passbC[ind] = wp - nat = (stopb * (passbC[0] - passbC[1]) / - (stopb ** 2 - passbC[0] * passbC[1])) - nat = min(abs(nat)) - - if type == 'butter': - GSTOP = 10 ** (0.1 * abs(gstop)) - GPASS = 10 ** (0.1 * abs(gpass)) - n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))) - elif type == 'cheby': - GSTOP = 10 ** (0.1 * abs(gstop)) - GPASS = 10 ** (0.1 * abs(gpass)) - n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat) - elif type == 'ellip': - GSTOP = 10 ** (0.1 * gstop) - GPASS = 10 ** (0.1 * gpass) - arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) - arg0 = 1.0 / nat - d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) - d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) - n = (d0[0] * d1[1] / (d0[1] * d1[0])) - else: - raise ValueError("Incorrect type: %s" % type) - return n - - -def buttord(wp, ws, gpass, gstop, analog=0): - """Butterworth filter order selection. - - Return the order of the lowest order digital Butterworth filter that loses - no more than `gpass` dB in the passband and has at least `gstop` dB - attenuation in the stopband. - - Parameters - ---------- - wp, ws : float - Passband and stopband edge frequencies, normalized from 0 to 1 (1 - corresponds to pi radians / sample). For example: - - - Lowpass: wp = 0.2, ws = 0.3 - - Highpass: wp = 0.3, ws = 0.2 - - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] - - gpass : float - The maximum loss in the passband (dB). - gstop : float - The minimum attenuation in the stopband (dB). - analog : int, optional - Non-zero to design an analog filter (in this case `wp` and `ws` are in - radians / second). - - Returns - ------- - ord : int - The lowest order for a Butterworth filter which meets specs. - wn : ndarray or float - The Butterworth natural frequency (i.e. the "3dB frequency"). Should - be used with `butter` to give filter results. - - """ - - wp = atleast_1d(wp) - ws = atleast_1d(ws) - filter_type = 2 * (len(wp) - 1) - filter_type += 1 - if wp[0] >= ws[0]: - filter_type += 1 - - # Pre-warp frequencies - if not analog: - passb = tan(wp * pi / 2.0) - stopb = tan(ws * pi / 2.0) - else: - passb = wp * 1.0 - stopb = ws * 1.0 - - if filter_type == 1: # low - nat = stopb / passb - elif filter_type == 2: # high - nat = passb / stopb - elif filter_type == 3: # stop - wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, - args=(0, passb, stopb, gpass, gstop, - 'butter'), - disp=0) - passb[0] = wp0 - wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], - args=(1, passb, stopb, gpass, gstop, - 'butter'), - disp=0) - passb[1] = wp1 - nat = ((stopb * (passb[0] - passb[1])) / - (stopb ** 2 - passb[0] * passb[1])) - elif filter_type == 4: # pass - nat = ((stopb ** 2 - passb[0] * passb[1]) / - (stopb * (passb[0] - passb[1]))) - - nat = min(abs(nat)) - - GSTOP = 10 ** (0.1 * abs(gstop)) - GPASS = 10 ** (0.1 * abs(gpass)) - ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))) - - # Find the butterworth natural frequency W0 (or the "3dB" frequency") - # to give exactly gstop at nat. W0 will be between 1 and nat - try: - W0 = nat / ((10 ** (0.1 * abs(gstop)) - 1) ** (1.0 / (2.0 * ord))) - except ZeroDivisionError: - W0 = nat - print "Warning, order is zero...check input parametegstop." - - # now convert this frequency back from lowpass prototype - # to the original analog filter - - if filter_type == 1: # low - WN = W0 * passb - elif filter_type == 2: # high - WN = passb / W0 - elif filter_type == 3: # stop - WN = numpy.zeros(2, float) - discr = sqrt((passb[1] - passb[0]) ** 2 + - 4 * W0 ** 2 * passb[0] * passb[1]) - WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0) - WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0) - WN = numpy.sort(abs(WN)) - elif filter_type == 4: # pass - W0 = numpy.array([-W0, W0], float) - WN = (-W0 * (passb[1] - passb[0]) / 2.0 + - sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 + - passb[0] * passb[1])) - WN = numpy.sort(abs(WN)) - else: - raise ValueError("Bad type: %s" % filter_type) - - if not analog: - wn = (2.0 / pi) * arctan(WN) - else: - wn = WN - - if len(wn) == 1: - wn = wn[0] - return ord, wn - - -def cheb1ord(wp, ws, gpass, gstop, analog=0): - """Chebyshev type I filter order selection. - - Return the order of the lowest order digital Chebyshev Type I filter that - loses no more than `gpass` dB in the passband and has at least `gstop` dB - attenuation in the stopband. - - Parameters - ---------- - wp, ws : float - Passband and stopband edge frequencies, normalized from 0 to 1 (1 - corresponds to pi radians / sample). For example: - - - Lowpass: wp = 0.2, ws = 0.3 - - Highpass: wp = 0.3, ws = 0.2 - - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] - - gpass : float - The maximum loss in the passband (dB). - gstop : float - The minimum attenuation in the stopband (dB). - analog : int, optional - Non-zero to design an analog filter (in this case `wp` and `ws` are in - radians / second). - - Returns - ------- - ord : int - The lowest order for a Chebyshev type I filter that meets specs. - wn : ndarray or float - The Chebyshev natural frequency (the "3dB frequency") for use with - `cheby1` to give filter results. - - """ - wp = atleast_1d(wp) - ws = atleast_1d(ws) - filter_type = 2 * (len(wp) - 1) - if wp[0] < ws[0]: - filter_type += 1 - else: - filter_type += 2 - - # Pre-wagpass frequencies - if not analog: - passb = tan(pi * wp / 2.) - stopb = tan(pi * ws / 2.) - else: - passb = wp * 1.0 - stopb = ws * 1.0 - - if filter_type == 1: # low - nat = stopb / passb - elif filter_type == 2: # high - nat = passb / stopb - elif filter_type == 3: # stop - wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, - args=(0, passb, stopb, gpass, gstop, 'cheby'), - disp=0) - passb[0] = wp0 - wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], - args=(1, passb, stopb, gpass, gstop, 'cheby'), - disp=0) - passb[1] = wp1 - nat = ((stopb * (passb[0] - passb[1])) / - (stopb ** 2 - passb[0] * passb[1])) - elif filter_type == 4: # pass - nat = ((stopb ** 2 - passb[0] * passb[1]) / - (stopb * (passb[0] - passb[1]))) - - nat = min(abs(nat)) - - GSTOP = 10 ** (0.1 * abs(gstop)) - GPASS = 10 ** (0.1 * abs(gpass)) - ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / - arccosh(nat))) - - # Natural frequencies are just the passband edges - if not analog: - wn = (2.0 / pi) * arctan(passb) - else: - wn = passb - - if len(wn) == 1: - wn = wn[0] - return ord, wn - - -def cheb2ord(wp, ws, gpass, gstop, analog=0): - """Chebyshev type II filter order selection. - - Description: - - Return the order of the lowest order digital Chebyshev Type II filter - that loses no more than gpass dB in the passband and has at least - gstop dB attenuation in the stopband. - - Parameters - ---------- - wp, ws : float - Passband and stopband edge frequencies, normalized from 0 to 1 (1 - corresponds to pi radians / sample). For example: - - - Lowpass: wp = 0.2, ws = 0.3 - - Highpass: wp = 0.3, ws = 0.2 - - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] - - gpass : float - The maximum loss in the passband (dB). - gstop : float - The minimum attenuation in the stopband (dB). - analog : int, optional - Non-zero to design an analog filter (in this case `wp` and `ws` are in - radians / second). - - Returns - ------- - ord : int - The lowest order for a Chebyshev type II filter that meets specs. - wn : ndarray or float - The Chebyshev natural frequency (the "3dB frequency") for use with - `cheby2` to give filter results. - - """ - wp = atleast_1d(wp) - ws = atleast_1d(ws) - filter_type = 2 * (len(wp) - 1) - if wp[0] < ws[0]: - filter_type += 1 - else: - filter_type += 2 - - # Pre-wagpass frequencies - if not analog: - passb = tan(pi * wp / 2.0) - stopb = tan(pi * ws / 2.0) - else: - passb = wp * 1.0 - stopb = ws * 1.0 - - if filter_type == 1: # low - nat = stopb / passb - elif filter_type == 2: # high - nat = passb / stopb - elif filter_type == 3: # stop - wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, - args=(0, passb, stopb, gpass, gstop, 'cheby'), - disp=0) - passb[0] = wp0 - wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], - args=(1, passb, stopb, gpass, gstop, 'cheby'), - disp=0) - passb[1] = wp1 - nat = ((stopb * (passb[0] - passb[1])) / - (stopb ** 2 - passb[0] * passb[1])) - elif filter_type == 4: # pass - nat = ((stopb ** 2 - passb[0] * passb[1]) / - (stopb * (passb[0] - passb[1]))) - - nat = min(abs(nat)) - - GSTOP = 10 ** (0.1 * abs(gstop)) - GPASS = 10 ** (0.1 * abs(gpass)) - ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / - arccosh(nat))) - - # Find frequency where analog response is -gpass dB. - # Then convert back from low-pass prototype to the original filter. - - new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0)))) - new_freq = 1.0 / new_freq - - if filter_type == 1: - nat = passb / new_freq - elif filter_type == 2: - nat = passb * new_freq - elif filter_type == 3: - nat = numpy.zeros(2, float) - nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) + - sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 + - passb[1] * passb[0])) - nat[1] = passb[1] * passb[0] / nat[0] - elif filter_type == 4: - nat = numpy.zeros(2, float) - nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) + - sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) + - passb[1] * passb[0])) - nat[1] = passb[0] * passb[1] / nat[0] - - if not analog: - wn = (2.0 / pi) * arctan(nat) - else: - wn = nat - - if len(wn) == 1: - wn = wn[0] - return ord, wn - - -def ellipord(wp, ws, gpass, gstop, analog=0): - """Elliptic (Cauer) filter order selection. - - Return the order of the lowest order digital elliptic filter that loses no - more than gpass dB in the passband and has at least gstop dB attenuation in - the stopband. - - Parameters - ---------- - wp, ws : float - Passband and stopband edge frequencies, normalized from 0 to 1 (1 - corresponds to pi radians / sample). For example: - - - Lowpass: wp = 0.2, ws = 0.3 - - Highpass: wp = 0.3, ws = 0.2 - - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] - - gpass : float - The maximum loss in the passband (dB). - gstop : float - The minimum attenuation in the stopband (dB). - analog : int, optional - Non-zero to design an analog filter (in this case `wp` and `ws` are in - radians / second). - - Returns - ------ - ord : int - The lowest order for an Elliptic (Cauer) filter that meets specs. - wn : ndarray or float - The Chebyshev natural frequency (the "3dB frequency") for use with - `ellip` to give filter results.- - - """ - wp = atleast_1d(wp) - ws = atleast_1d(ws) - filter_type = 2 * (len(wp) - 1) - filter_type += 1 - if wp[0] >= ws[0]: - filter_type += 1 - - # Pre-wagpass frequencies - if analog: - passb = wp * 1.0 - stopb = ws * 1.0 - else: - passb = tan(wp * pi / 2.0) - stopb = tan(ws * pi / 2.0) - - if filter_type == 1: # low - nat = stopb / passb - elif filter_type == 2: # high - nat = passb / stopb - elif filter_type == 3: # stop - wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, - args=(0, passb, stopb, gpass, gstop, 'ellip'), - disp=0) - passb[0] = wp0 - wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], - args=(1, passb, stopb, gpass, gstop, 'ellip'), - disp=0) - passb[1] = wp1 - nat = ((stopb * (passb[0] - passb[1])) / - (stopb ** 2 - passb[0] * passb[1])) - elif filter_type == 4: # pass - nat = ((stopb ** 2 - passb[0] * passb[1]) / - (stopb * (passb[0] - passb[1]))) - - nat = min(abs(nat)) - - GSTOP = 10 ** (0.1 * gstop) - GPASS = 10 ** (0.1 * gpass) - arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) - arg0 = 1.0 / nat - d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) - d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) - ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0]))) - - if not analog: - wn = arctan(passb) * 2.0 / pi - else: - wn = passb - - if len(wn) == 1: - wn = wn[0] - return ord, wn - - -def buttap(N): - """Return (z,p,k) zero, pole, gain for analog prototype of an Nth - order Butterworth filter.""" - z = [] - n = numpy.arange(1, N + 1) - p = numpy.exp(1j * (2 * n - 1) / (2.0 * N) * pi) * 1j - k = 1 - return z, p, k - - -def cheb1ap(N, rp): - """Return (z,p,k) zero, pole, gain for Nth order Chebyshev type I lowpass - analog filter prototype with `rp` decibels of ripple in the passband. - """ - z = [] - eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0) - n = numpy.arange(1, N + 1) - mu = 1.0 / N * numpy.log((1.0 + numpy.sqrt(1 + eps * eps)) / eps) - theta = pi / 2.0 * (2 * n - 1.0) / N - p = (-numpy.sinh(mu) * numpy.sin(theta) + - 1j * numpy.cosh(mu) * numpy.cos(theta)) - k = numpy.prod(-p, axis=0).real - if N % 2 == 0: - k = k / sqrt((1 + eps * eps)) - return z, p, k - - -def cheb2ap(N, rs): - """Return (z,p,k) zero, pole, gain for Nth order Chebyshev type II lowpass - analog filter prototype with `rs` decibels of ripple in the stopband. - """ - de = 1.0 / sqrt(10 ** (0.1 * rs) - 1) - mu = arcsinh(1.0 / de) / N - - if N % 2: - m = N - 1 - n = numpy.concatenate((numpy.arange(1, N - 1, 2), - numpy.arange(N + 2, 2 * N, 2))) - else: - m = N - n = numpy.arange(1, 2 * N, 2) - - z = conjugate(1j / cos(n * pi / (2.0 * N))) - p = exp(1j * (pi * numpy.arange(1, 2 * N, 2) / (2.0 * N) + pi / 2.0)) - p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag - p = 1.0 / p - k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real - return z, p, k - - -EPSILON = 2e-16 - - -def _vratio(u, ineps, mp): - [s, c, d, phi] = special.ellipj(u, mp) - ret = abs(ineps - s / c) - return ret - - -def _kratio(m, k_ratio): - m = float(m) - if m < 0: - m = 0.0 - if m > 1: - m = 1.0 - if abs(m) > EPSILON and (abs(m) + EPSILON) < 1: - k = special.ellipk([m, 1 - m]) - r = k[0] / k[1] - k_ratio - elif abs(m) > EPSILON: - r = -k_ratio - else: - r = 1e20 - return abs(r) - - -def ellipap(N, rp, rs): - """Return (z,p,k) zeros, poles, and gain of an Nth order normalized - prototype elliptic analog lowpass filter with `rp` decibels of ripple in - the passband and a stopband `rs` decibels down. - - References - ---------- - Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5 - and 12. - - """ - if N == 1: - p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0)) - k = -p - z = [] - return z, p, k - - eps = numpy.sqrt(10 ** (0.1 * rp) - 1) - ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1) - ck1p = numpy.sqrt(1 - ck1 * ck1) - if ck1p == 1: - raise ValueError("Cannot design a filter with given rp and rs" - " specifications.") - - wp = 1 - val = special.ellipk([ck1 * ck1, ck1p * ck1p]) - if abs(1 - ck1p * ck1p) < EPSILON: - krat = 0 - else: - krat = N * val[0] / val[1] - - m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250, - disp=0) - if m < 0 or m > 1: - m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250, - maxiter=250, disp=0) - - capk = special.ellipk(m) - ws = wp / sqrt(m) - m1 = 1 - m - - j = numpy.arange(1 - N % 2, N, 2) - jj = len(j) - - [s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj)) - snew = numpy.compress(abs(s) > EPSILON, s, axis=-1) - z = 1.0 / (sqrt(m) * snew) - z = 1j * z - z = numpy.concatenate((z, conjugate(z))) - - r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p), - maxfun=250, maxiter=250, disp=0) - v0 = capk * r / (N * val[0]) - - [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) - p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) - - if N % 2: - newp = numpy.compress(abs(p.imag) > EPSILON * - numpy.sqrt(numpy.sum(p * numpy.conjugate(p), - axis=0).real), - p, axis=-1) - p = numpy.concatenate((p, conjugate(newp))) - else: - p = numpy.concatenate((p, conjugate(p))) - - k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real - if N % 2 == 0: - k = k / numpy.sqrt((1 + eps * eps)) - - return z, p, k - - -def besselap(N): - """Return (z,p,k) zero, pole, gain for analog prototype of an Nth order - Bessel filter.""" - z = [] - k = 1 - if N == 0: - p = [] - elif N == 1: - p = [-1] - elif N == 2: - p = [-.8660254037844386467637229 + .4999999999999999999999996j, - -.8660254037844386467637229 - .4999999999999999999999996j] - elif N == 3: - p = [-.9416000265332067855971980, - -.7456403858480766441810907 - .7113666249728352680992154j, - -.7456403858480766441810907 + .7113666249728352680992154j] - elif N == 4: - p = [-.6572111716718829545787781 - .8301614350048733772399715j, - -.6572111716718829545787788 + .8301614350048733772399715j, - -.9047587967882449459642637 - .2709187330038746636700923j, - -.9047587967882449459642624 + .2709187330038746636700926j] - elif N == 5: - p = [-.9264420773877602247196260, - -.8515536193688395541722677 - .4427174639443327209850002j, - -.8515536193688395541722677 + .4427174639443327209850002j, - -.5905759446119191779319432 - .9072067564574549539291747j, - -.5905759446119191779319432 + .9072067564574549539291747j] - elif N == 6: - p = [-.9093906830472271808050953 - .1856964396793046769246397j, - -.9093906830472271808050953 + .1856964396793046769246397j, - -.7996541858328288520243325 - .5621717346937317988594118j, - -.7996541858328288520243325 + .5621717346937317988594118j, - -.5385526816693109683073792 - .9616876881954277199245657j, - -.5385526816693109683073792 + .9616876881954277199245657j] - elif N == 7: - p = [-.9194871556490290014311619, - -.8800029341523374639772340 - .3216652762307739398381830j, - -.8800029341523374639772340 + .3216652762307739398381830j, - -.7527355434093214462291616 - .6504696305522550699212995j, - -.7527355434093214462291616 + .6504696305522550699212995j, - -.4966917256672316755024763 - 1.002508508454420401230220j, - -.4966917256672316755024763 + 1.002508508454420401230220j] - elif N == 8: - p = [-.9096831546652910216327629 - .1412437976671422927888150j, - -.9096831546652910216327629 + .1412437976671422927888150j, - -.8473250802359334320103023 - .4259017538272934994996429j, - -.8473250802359334320103023 + .4259017538272934994996429j, - -.7111381808485399250796172 - .7186517314108401705762571j, - -.7111381808485399250796172 + .7186517314108401705762571j, - -.4621740412532122027072175 - 1.034388681126901058116589j, - -.4621740412532122027072175 + 1.034388681126901058116589j] - elif N == 9: - p = [-.9154957797499037686769223, - -.8911217017079759323183848 - .2526580934582164192308115j, - -.8911217017079759323183848 + .2526580934582164192308115j, - -.8148021112269012975514135 - .5085815689631499483745341j, - -.8148021112269012975514135 + .5085815689631499483745341j, - -.6743622686854761980403401 - .7730546212691183706919682j, - -.6743622686854761980403401 + .7730546212691183706919682j, - -.4331415561553618854685942 - 1.060073670135929666774323j, - -.4331415561553618854685942 + 1.060073670135929666774323j] - elif N == 10: - p = [-.9091347320900502436826431 - .1139583137335511169927714j, - -.9091347320900502436826431 + .1139583137335511169927714j, - -.8688459641284764527921864 - .3430008233766309973110589j, - -.8688459641284764527921864 + .3430008233766309973110589j, - -.7837694413101441082655890 - .5759147538499947070009852j, - -.7837694413101441082655890 + .5759147538499947070009852j, - -.6417513866988316136190854 - .8175836167191017226233947j, - -.6417513866988316136190854 + .8175836167191017226233947j, - -.4083220732868861566219785 - 1.081274842819124562037210j, - -.4083220732868861566219785 + 1.081274842819124562037210j] - elif N == 11: - p = [-.9129067244518981934637318, - -.8963656705721166099815744 - .2080480375071031919692341j, - -.8963656705721166099815744 + .2080480375071031919692341j, - -.8453044014712962954184557 - .4178696917801248292797448j, - -.8453044014712962954184557 + .4178696917801248292797448j, - -.7546938934722303128102142 - .6319150050721846494520941j, - -.7546938934722303128102142 + .6319150050721846494520941j, - -.6126871554915194054182909 - .8547813893314764631518509j, - -.6126871554915194054182909 + .8547813893314764631518509j, - -.3868149510055090879155425 - 1.099117466763120928733632j, - -.3868149510055090879155425 + 1.099117466763120928733632j] - elif N == 12: - p = [-.9084478234140682638817772 - 95506365213450398415258360.0e-27j, - -.9084478234140682638817772 + 95506365213450398415258360.0e-27j, - -.8802534342016826507901575 - .2871779503524226723615457j, - -.8802534342016826507901575 + .2871779503524226723615457j, - -.8217296939939077285792834 - .4810212115100676440620548j, - -.8217296939939077285792834 + .4810212115100676440620548j, - -.7276681615395159454547013 - .6792961178764694160048987j, - -.7276681615395159454547013 + .6792961178764694160048987j, - -.5866369321861477207528215 - .8863772751320727026622149j, - -.5866369321861477207528215 + .8863772751320727026622149j, - -.3679640085526312839425808 - 1.114373575641546257595657j, - -.3679640085526312839425808 + 1.114373575641546257595657j] - elif N == 13: - p = [-.9110914665984182781070663, - -.8991314665475196220910718 - .1768342956161043620980863j, - -.8991314665475196220910718 + .1768342956161043620980863j, - -.8625094198260548711573628 - .3547413731172988997754038j, - -.8625094198260548711573628 + .3547413731172988997754038j, - -.7987460692470972510394686 - .5350752120696801938272504j, - -.7987460692470972510394686 + .5350752120696801938272504j, - -.7026234675721275653944062 - .7199611890171304131266374j, - -.7026234675721275653944062 + .7199611890171304131266374j, - -.5631559842430199266325818 - .9135900338325109684927731j, - -.5631559842430199266325818 + .9135900338325109684927731j, - -.3512792323389821669401925 - 1.127591548317705678613239j, - -.3512792323389821669401925 + 1.127591548317705678613239j] - elif N == 14: - p = [-.9077932138396487614720659 - 82196399419401501888968130.0e-27j, - -.9077932138396487614720659 + 82196399419401501888968130.0e-27j, - -.8869506674916445312089167 - .2470079178765333183201435j, - -.8869506674916445312089167 + .2470079178765333183201435j, - -.8441199160909851197897667 - .4131653825102692595237260j, - -.8441199160909851197897667 + .4131653825102692595237260j, - -.7766591387063623897344648 - .5819170677377608590492434j, - -.7766591387063623897344648 + .5819170677377608590492434j, - -.6794256425119233117869491 - .7552857305042033418417492j, - -.6794256425119233117869491 + .7552857305042033418417492j, - -.5418766775112297376541293 - .9373043683516919569183099j, - -.5418766775112297376541293 + .9373043683516919569183099j, - -.3363868224902037330610040 - 1.139172297839859991370924j, - -.3363868224902037330610040 + 1.139172297839859991370924j] - elif N == 15: - p = [-.9097482363849064167228581, - -.9006981694176978324932918 - .1537681197278439351298882j, - -.9006981694176978324932918 + .1537681197278439351298882j, - -.8731264620834984978337843 - .3082352470564267657715883j, - -.8731264620834984978337843 + .3082352470564267657715883j, - -.8256631452587146506294553 - .4642348752734325631275134j, - -.8256631452587146506294553 + .4642348752734325631275134j, - -.7556027168970728127850416 - .6229396358758267198938604j, - -.7556027168970728127850416 + .6229396358758267198938604j, - -.6579196593110998676999362 - .7862895503722515897065645j, - -.6579196593110998676999362 + .7862895503722515897065645j, - -.5224954069658330616875186 - .9581787261092526478889345j, - -.5224954069658330616875186 + .9581787261092526478889345j, - -.3229963059766444287113517 - 1.149416154583629539665297j, - -.3229963059766444287113517 + 1.149416154583629539665297j] - elif N == 16: - p = [-.9072099595087001356491337 - 72142113041117326028823950.0e-27j, - -.9072099595087001356491337 + 72142113041117326028823950.0e-27j, - -.8911723070323647674780132 - .2167089659900576449410059j, - -.8911723070323647674780132 + .2167089659900576449410059j, - -.8584264231521330481755780 - .3621697271802065647661080j, - -.8584264231521330481755780 + .3621697271802065647661080j, - -.8074790293236003885306146 - .5092933751171800179676218j, - -.8074790293236003885306146 + .5092933751171800179676218j, - -.7356166304713115980927279 - .6591950877860393745845254j, - -.7356166304713115980927279 + .6591950877860393745845254j, - -.6379502514039066715773828 - .8137453537108761895522580j, - -.6379502514039066715773828 + .8137453537108761895522580j, - -.5047606444424766743309967 - .9767137477799090692947061j, - -.5047606444424766743309967 + .9767137477799090692947061j, - -.3108782755645387813283867 - 1.158552841199330479412225j, - -.3108782755645387813283867 + 1.158552841199330479412225j] - elif N == 17: - p = [-.9087141161336397432860029, - -.9016273850787285964692844 - .1360267995173024591237303j, - -.9016273850787285964692844 + .1360267995173024591237303j, - -.8801100704438627158492165 - .2725347156478803885651973j, - -.8801100704438627158492165 + .2725347156478803885651973j, - -.8433414495836129204455491 - .4100759282910021624185986j, - -.8433414495836129204455491 + .4100759282910021624185986j, - -.7897644147799708220288138 - .5493724405281088674296232j, - -.7897644147799708220288138 + .5493724405281088674296232j, - -.7166893842372349049842743 - .6914936286393609433305754j, - -.7166893842372349049842743 + .6914936286393609433305754j, - -.6193710717342144521602448 - .8382497252826992979368621j, - -.6193710717342144521602448 + .8382497252826992979368621j, - -.4884629337672704194973683 - .9932971956316781632345466j, - -.4884629337672704194973683 + .9932971956316781632345466j, - -.2998489459990082015466971 - 1.166761272925668786676672j, - -.2998489459990082015466971 + 1.166761272925668786676672j] - elif N == 18: - p = [-.9067004324162775554189031 - 64279241063930693839360680.0e-27j, - -.9067004324162775554189031 + 64279241063930693839360680.0e-27j, - -.8939764278132455733032155 - .1930374640894758606940586j, - -.8939764278132455733032155 + .1930374640894758606940586j, - -.8681095503628830078317207 - .3224204925163257604931634j, - -.8681095503628830078317207 + .3224204925163257604931634j, - -.8281885016242836608829018 - .4529385697815916950149364j, - -.8281885016242836608829018 + .4529385697815916950149364j, - -.7726285030739558780127746 - .5852778162086640620016316j, - -.7726285030739558780127746 + .5852778162086640620016316j, - -.6987821445005273020051878 - .7204696509726630531663123j, - -.6987821445005273020051878 + .7204696509726630531663123j, - -.6020482668090644386627299 - .8602708961893664447167418j, - -.6020482668090644386627299 + .8602708961893664447167418j, - -.4734268069916151511140032 - 1.008234300314801077034158j, - -.4734268069916151511140032 + 1.008234300314801077034158j, - -.2897592029880489845789953 - 1.174183010600059128532230j, - -.2897592029880489845789953 + 1.174183010600059128532230j] - elif N == 19: - p = [-.9078934217899404528985092, - -.9021937639390660668922536 - .1219568381872026517578164j, - -.9021937639390660668922536 + .1219568381872026517578164j, - -.8849290585034385274001112 - .2442590757549818229026280j, - -.8849290585034385274001112 + .2442590757549818229026280j, - -.8555768765618421591093993 - .3672925896399872304734923j, - -.8555768765618421591093993 + .3672925896399872304734923j, - -.8131725551578197705476160 - .4915365035562459055630005j, - -.8131725551578197705476160 + .4915365035562459055630005j, - -.7561260971541629355231897 - .6176483917970178919174173j, - -.7561260971541629355231897 + .6176483917970178919174173j, - -.6818424412912442033411634 - .7466272357947761283262338j, - -.6818424412912442033411634 + .7466272357947761283262338j, - -.5858613321217832644813602 - .8801817131014566284786759j, - -.5858613321217832644813602 + .8801817131014566284786759j, - -.4595043449730988600785456 - 1.021768776912671221830298j, - -.4595043449730988600785456 + 1.021768776912671221830298j, - -.2804866851439370027628724 - 1.180931628453291873626003j, - -.2804866851439370027628724 + 1.180931628453291873626003j] - elif N == 20: - p = [-.9062570115576771146523497 - 57961780277849516990208850.0e-27j, - -.9062570115576771146523497 + 57961780277849516990208850.0e-27j, - -.8959150941925768608568248 - .1740317175918705058595844j, - -.8959150941925768608568248 + .1740317175918705058595844j, - -.8749560316673332850673214 - .2905559296567908031706902j, - -.8749560316673332850673214 + .2905559296567908031706902j, - -.8427907479956670633544106 - .4078917326291934082132821j, - -.8427907479956670633544106 + .4078917326291934082132821j, - -.7984251191290606875799876 - .5264942388817132427317659j, - -.7984251191290606875799876 + .5264942388817132427317659j, - -.7402780309646768991232610 - .6469975237605228320268752j, - -.7402780309646768991232610 + .6469975237605228320268752j, - -.6658120544829934193890626 - .7703721701100763015154510j, - -.6658120544829934193890626 + .7703721701100763015154510j, - -.5707026806915714094398061 - .8982829066468255593407161j, - -.5707026806915714094398061 + .8982829066468255593407161j, - -.4465700698205149555701841 - 1.034097702560842962315411j, - -.4465700698205149555701841 + 1.034097702560842962315411j, - -.2719299580251652601727704 - 1.187099379810885886139638j, - -.2719299580251652601727704 + 1.187099379810885886139638j] - elif N == 21: - p = [-.9072262653142957028884077, - -.9025428073192696303995083 - .1105252572789856480992275j, - -.9025428073192696303995083 + .1105252572789856480992275j, - -.8883808106664449854431605 - .2213069215084350419975358j, - -.8883808106664449854431605 + .2213069215084350419975358j, - -.8643915813643204553970169 - .3326258512522187083009453j, - -.8643915813643204553970169 + .3326258512522187083009453j, - -.8299435470674444100273463 - .4448177739407956609694059j, - -.8299435470674444100273463 + .4448177739407956609694059j, - -.7840287980408341576100581 - .5583186348022854707564856j, - -.7840287980408341576100581 + .5583186348022854707564856j, - -.7250839687106612822281339 - .6737426063024382240549898j, - -.7250839687106612822281339 + .6737426063024382240549898j, - -.6506315378609463397807996 - .7920349342629491368548074j, - -.6506315378609463397807996 + .7920349342629491368548074j, - -.5564766488918562465935297 - .9148198405846724121600860j, - -.5564766488918562465935297 + .9148198405846724121600860j, - -.4345168906815271799687308 - 1.045382255856986531461592j, - -.4345168906815271799687308 + 1.045382255856986531461592j, - -.2640041595834031147954813 - 1.192762031948052470183960j, - -.2640041595834031147954813 + 1.192762031948052470183960j] - elif N == 22: - p = [-.9058702269930872551848625 - 52774908289999045189007100.0e-27j, - -.9058702269930872551848625 + 52774908289999045189007100.0e-27j, - -.8972983138153530955952835 - .1584351912289865608659759j, - -.8972983138153530955952835 + .1584351912289865608659759j, - -.8799661455640176154025352 - .2644363039201535049656450j, - -.8799661455640176154025352 + .2644363039201535049656450j, - -.8534754036851687233084587 - .3710389319482319823405321j, - -.8534754036851687233084587 + .3710389319482319823405321j, - -.8171682088462720394344996 - .4785619492202780899653575j, - -.8171682088462720394344996 + .4785619492202780899653575j, - -.7700332930556816872932937 - .5874255426351153211965601j, - -.7700332930556816872932937 + .5874255426351153211965601j, - -.7105305456418785989070935 - .6982266265924524000098548j, - -.7105305456418785989070935 + .6982266265924524000098548j, - -.6362427683267827226840153 - .8118875040246347267248508j, - -.6362427683267827226840153 + .8118875040246347267248508j, - -.5430983056306302779658129 - .9299947824439872998916657j, - -.5430983056306302779658129 + .9299947824439872998916657j, - -.4232528745642628461715044 - 1.055755605227545931204656j, - -.4232528745642628461715044 + 1.055755605227545931204656j, - -.2566376987939318038016012 - 1.197982433555213008346532j, - -.2566376987939318038016012 + 1.197982433555213008346532j] - elif N == 23: - p = [-.9066732476324988168207439, - -.9027564979912504609412993 - .1010534335314045013252480j, - -.9027564979912504609412993 + .1010534335314045013252480j, - -.8909283242471251458653994 - .2023024699381223418195228j, - -.8909283242471251458653994 + .2023024699381223418195228j, - -.8709469395587416239596874 - .3039581993950041588888925j, - -.8709469395587416239596874 + .3039581993950041588888925j, - -.8423805948021127057054288 - .4062657948237602726779246j, - -.8423805948021127057054288 + .4062657948237602726779246j, - -.8045561642053176205623187 - .5095305912227258268309528j, - -.8045561642053176205623187 + .5095305912227258268309528j, - -.7564660146829880581478138 - .6141594859476032127216463j, - -.7564660146829880581478138 + .6141594859476032127216463j, - -.6965966033912705387505040 - .7207341374753046970247055j, - -.6965966033912705387505040 + .7207341374753046970247055j, - -.6225903228771341778273152 - .8301558302812980678845563j, - -.6225903228771341778273152 + .8301558302812980678845563j, - -.5304922463810191698502226 - .9439760364018300083750242j, - -.5304922463810191698502226 + .9439760364018300083750242j, - -.4126986617510148836149955 - 1.065328794475513585531053j, - -.4126986617510148836149955 + 1.065328794475513585531053j, - -.2497697202208956030229911 - 1.202813187870697831365338j, - -.2497697202208956030229911 + 1.202813187870697831365338j] - elif N == 24: - p = [-.9055312363372773709269407 - 48440066540478700874836350.0e-27j, - -.9055312363372773709269407 + 48440066540478700874836350.0e-27j, - -.8983105104397872954053307 - .1454056133873610120105857j, - -.8983105104397872954053307 + .1454056133873610120105857j, - -.8837358034555706623131950 - .2426335234401383076544239j, - -.8837358034555706623131950 + .2426335234401383076544239j, - -.8615278304016353651120610 - .3403202112618624773397257j, - -.8615278304016353651120610 + .3403202112618624773397257j, - -.8312326466813240652679563 - .4386985933597305434577492j, - -.8312326466813240652679563 + .4386985933597305434577492j, - -.7921695462343492518845446 - .5380628490968016700338001j, - -.7921695462343492518845446 + .5380628490968016700338001j, - -.7433392285088529449175873 - .6388084216222567930378296j, - -.7433392285088529449175873 + .6388084216222567930378296j, - -.6832565803536521302816011 - .7415032695091650806797753j, - -.6832565803536521302816011 + .7415032695091650806797753j, - -.6096221567378335562589532 - .8470292433077202380020454j, - -.6096221567378335562589532 + .8470292433077202380020454j, - -.5185914574820317343536707 - .9569048385259054576937721j, - -.5185914574820317343536707 + .9569048385259054576937721j, - -.4027853855197518014786978 - 1.074195196518674765143729j, - -.4027853855197518014786978 + 1.074195196518674765143729j, - -.2433481337524869675825448 - 1.207298683731972524975429j, - -.2433481337524869675825448 + 1.207298683731972524975429j] - elif N == 25: - p = [-.9062073871811708652496104, - -.9028833390228020537142561 - 93077131185102967450643820.0e-27j, - -.9028833390228020537142561 + 93077131185102967450643820.0e-27j, - -.8928551459883548836774529 - .1863068969804300712287138j, - -.8928551459883548836774529 + .1863068969804300712287138j, - -.8759497989677857803656239 - .2798521321771408719327250j, - -.8759497989677857803656239 + .2798521321771408719327250j, - -.8518616886554019782346493 - .3738977875907595009446142j, - -.8518616886554019782346493 + .3738977875907595009446142j, - -.8201226043936880253962552 - .4686668574656966589020580j, - -.8201226043936880253962552 + .4686668574656966589020580j, - -.7800496278186497225905443 - .5644441210349710332887354j, - -.7800496278186497225905443 + .5644441210349710332887354j, - -.7306549271849967721596735 - .6616149647357748681460822j, - -.7306549271849967721596735 + .6616149647357748681460822j, - -.6704827128029559528610523 - .7607348858167839877987008j, - -.6704827128029559528610523 + .7607348858167839877987008j, - -.5972898661335557242320528 - .8626676330388028512598538j, - -.5972898661335557242320528 + .8626676330388028512598538j, - -.5073362861078468845461362 - .9689006305344868494672405j, - -.5073362861078468845461362 + .9689006305344868494672405j, - -.3934529878191079606023847 - 1.082433927173831581956863j, - -.3934529878191079606023847 + 1.082433927173831581956863j, - -.2373280669322028974199184 - 1.211476658382565356579418j, - -.2373280669322028974199184 + 1.211476658382565356579418j] - else: - raise ValueError("Bessel Filter not supported for order %d" % N) - - return z, p, k - -filter_dict = {'butter': [buttap, buttord], - 'butterworth': [buttap, buttord], - 'cauer': [ellipap, ellipord], - 'elliptic': [ellipap, ellipord], - 'ellip': [ellipap, ellipord], - 'bessel': [besselap], - 'cheby1': [cheb1ap, cheb1ord], - 'chebyshev1': [cheb1ap, cheb1ord], - 'chebyshevi': [cheb1ap, cheb1ord], - 'cheby2': [cheb2ap, cheb2ord], - 'chebyshev2': [cheb2ap, cheb2ord], - 'chebyshevii': [cheb2ap, cheb2ord] - } - -band_dict = {'band': 'bandpass', - 'bandpass': 'bandpass', - 'pass': 'bandpass', - 'bp': 'bandpass', - 'bs': 'bandstop', - 'bandstop': 'bandstop', - 'bands': 'bandstop', - 'stop': 'bandstop', - 'l': 'lowpass', - 'low': 'lowpass', - 'lowpass': 'lowpass', - 'high': 'highpass', - 'highpass': 'highpass', - 'h': 'highpass' - } - -warnings.simplefilter("always", BadCoefficients) diff --git a/scipy-0.10.1/scipy/signal/fir_filter_design.py b/scipy-0.10.1/scipy/signal/fir_filter_design.py deleted file mode 100644 index decd01c005..0000000000 --- a/scipy-0.10.1/scipy/signal/fir_filter_design.py +++ /dev/null @@ -1,588 +0,0 @@ -"""Functions for FIR filter design.""" - -from math import ceil, log -import numpy as np -from numpy.fft import irfft -from scipy.special import sinc -import sigtools - -__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord', - 'firwin', 'firwin2', 'remez'] - - -# Some notes on function parameters: -# -# `cutoff` and `width` are given as a numbers between 0 and 1. These -# are relative frequencies, expressed as a fraction of the Nyquist rate. -# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width -# of 300 Hz. -# -# The `order` of a FIR filter is one less than the number of taps. -# This is a potential source of confusion, so in the following code, -# we will always use the number of taps as the parameterization of -# the 'size' of the filter. The "number of taps" means the number -# of coefficients, which is the same as the length of the impulse -# response of the filter. - - -def kaiser_beta(a): - """Compute the Kaiser parameter `beta`, given the attenuation `a`. - - Parameters - ---------- - a : float - The desired attenuation in the stopband and maximum ripple in - the passband, in dB. This should be a *positive* number. - - Returns - ------- - beta : float - The `beta` parameter to be used in the formula for a Kaiser window. - - References - ---------- - Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. - """ - if a > 50: - beta = 0.1102 * (a - 8.7) - elif a > 21: - beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21) - else: - beta = 0.0 - return beta - - -def kaiser_atten(numtaps, width): - """Compute the attenuation of a Kaiser FIR filter. - - Given the number of taps `N` and the transition width `width`, compute the - attenuation `a` in dB, given by Kaiser's formula: - - a = 2.285 * (N - 1) * pi * width + 7.95 - - Parameters - ---------- - N : int - The number of taps in the FIR filter. - width : float - The desired width of the transition region between passband and - stopband (or, in general, at any discontinuity) for the filter. - - Returns - ------- - a : float - The attenuation of the ripple, in dB. - - See Also - -------- - kaiserord, kaiser_beta - """ - a = 2.285 * (numtaps - 1) * np.pi * width + 7.95 - return a - - -def kaiserord(ripple, width): - """Design a Kaiser window to limit ripple and width of transition region. - - Parameters - ---------- - ripple : float - Positive number specifying maximum ripple in passband (dB) and minimum - ripple in stopband. - width : float - Width of transition region (normalized so that 1 corresponds to pi - radians / sample). - - Returns - ------- - numtaps : int - The length of the kaiser window. - beta : - The beta parameter for the kaiser window. - - Notes - ----- - There are several ways to obtain the Kaiser window: - - signal.kaiser(numtaps, beta, sym=0) - signal.get_window(beta, numtaps) - signal.get_window(('kaiser', beta), numtaps) - - The empirical equations discovered by Kaiser are used. - - See Also - -------- - kaiser_beta, kaiser_atten - - References - ---------- - Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. - - """ - A = abs(ripple) # in case somebody is confused as to what's meant - if A < 8: - # Formula for N is not valid in this range. - raise ValueError("Requested maximum ripple attentuation %f is too " - "small for the Kaiser formula." % A) - beta = kaiser_beta(A) - - # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter - # order, so we have to add 1 to get the number of taps. - numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1 - - return int(ceil(numtaps)), beta - - -def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True, - scale=True, nyq=1.0): - """ - FIR filter design using the window method. - - This function computes the coefficients of a finite impulse response - filter. The filter will have linear phase; it will be Type I if - `numtaps` is odd and Type II if `numtaps` is even. - - Type II filters always have zero response at the Nyquist rate, so a - ValueError exception is raised if firwin is called with `numtaps` even and - having a passband whose right end is at the Nyquist rate. - - Parameters - ---------- - numtaps : int - Length of the filter (number of coefficients, i.e. the filter - order + 1). `numtaps` must be even if a passband includes the - Nyquist frequency. - - cutoff : float or 1D array_like - Cutoff frequency of filter (expressed in the same units as `nyq`) - OR an array of cutoff frequencies (that is, band edges). In the - latter case, the frequencies in `cutoff` should be positive and - monotonically increasing between 0 and `nyq`. The values 0 and - `nyq` must not be included in `cutoff`. - - width : float or None - If `width` is not None, then assume it is the approximate width - of the transition region (expressed in the same units as `nyq`) - for use in Kaiser FIR filter design. In this case, the `window` - argument is ignored. - - window : string or tuple of string and parameter values - Desired window to use. See `scipy.signal.get_window` for a list - of windows and required parameters. - - pass_zero : bool - If True, the gain at the frequency 0 (i.e. the "DC gain") is 1. - Otherwise the DC gain is 0. - - scale : bool - Set to True to scale the coefficients so that the frequency - response is exactly unity at a certain frequency. - That frequency is either: - 0 (DC) if the first passband starts at 0 (i.e. pass_zero - is True); - `nyq` (the Nyquist rate) if the first passband ends at - `nyq` (i.e the filter is a single band highpass filter); - center of first passband otherwise. - - nyq : float - Nyquist frequency. Each frequency in `cutoff` must be between 0 - and `nyq`. - - Returns - ------- - h : 1D ndarray - Coefficients of length `numtaps` FIR filter. - - Raises - ------ - ValueError - If any value in `cutoff` is less than or equal to 0 or greater - than or equal to `nyq`, if the values in `cutoff` are not strictly - monotonically increasing, or if `numtaps` is even but a passband - includes the Nyquist frequency. - - Examples - -------- - - Low-pass from 0 to f:: - - >>> firwin(numtaps, f) - - Use a specific window function:: - - >>> firwin(numtaps, f, window='nuttall') - - High-pass ('stop' from 0 to f):: - - >>> firwin(numtaps, f, pass_zero=False) - - Band-pass:: - - >>> firwin(numtaps, [f1, f2], pass_zero=False) - - Band-stop:: - - >>> firwin(numtaps, [f1, f2]) - - Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):: - - >>>firwin(numtaps, [f1, f2, f3, f4]) - - Multi-band (passbands are [f1, f2] and [f3,f4]):: - - >>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False) - - See also - -------- - scipy.signal.firwin2 - - """ - - # The major enhancements to this function added in November 2010 were - # developed by Tom Krauss (see ticket #902). - - cutoff = np.atleast_1d(cutoff) / float(nyq) - - # Check for invalid input. - if cutoff.ndim > 1: - raise ValueError("The cutoff argument must be at most " - "one-dimensional.") - if cutoff.size == 0: - raise ValueError("At least one cutoff frequency must be given.") - if cutoff.min() <= 0 or cutoff.max() >= 1: - raise ValueError("Invalid cutoff frequency: frequencies must be " - "greater than 0 and less than nyq.") - if np.any(np.diff(cutoff) <= 0): - raise ValueError("Invalid cutoff frequencies: the frequencies " - "must be strictly increasing.") - - if width is not None: - # A width was given. Find the beta parameter of the Kaiser window - # and set `window`. This overrides the value of `window` passed in. - atten = kaiser_atten(numtaps, float(width) / nyq) - beta = kaiser_beta(atten) - window = ('kaiser', beta) - - pass_nyquist = bool(cutoff.size & 1) ^ pass_zero - if pass_nyquist and numtaps % 2 == 0: - raise ValueError("A filter with an even number of coefficients must " - "have zero response at the Nyquist rate.") - - # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff - # is even, and each pair in cutoff corresponds to passband. - cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist)) - - # `bands` is a 2D array; each row gives the left and right edges of - # a passband. - bands = cutoff.reshape(-1, 2) - - # Build up the coefficients. - alpha = 0.5 * (numtaps - 1) - m = np.arange(0, numtaps) - alpha - h = 0 - for left, right in bands: - h += right * sinc(right * m) - h -= left * sinc(left * m) - - # Get and apply the window function. - from signaltools import get_window - win = get_window(window, numtaps, fftbins=False) - h *= win - - # Now handle scaling if desired. - if scale: - # Get the first passband. - left, right = bands[0] - if left == 0: - scale_frequency = 0.0 - elif right == 1: - scale_frequency = 1.0 - else: - scale_frequency = 0.5 * (left + right) - c = np.cos(np.pi * m * scale_frequency) - s = np.sum(h * c) - h /= s - - return h - - -# Original version of firwin2 from scipy ticket #457, submitted by "tash". -# -# Rewritten by Warren Weckesser, 2010. - -def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0, antisymmetric=False): - """FIR filter design using the window method. - - From the given frequencies `freq` and corresponding gains `gain`, - this function constructs an FIR filter with linear phase and - (approximately) the given frequency response. - - Parameters - ---------- - numtaps : int - The number of taps in the FIR filter. `numtaps` must be less than - `nfreqs`. - - freq : array-like, 1D - The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being - Nyquist. The Nyquist frequency can be redefined with the argument - `nyq`. - - The values in `freq` must be nondecreasing. A value can be repeated - once to implement a discontinuity. The first value in `freq` must - be 0, and the last value must be `nyq`. - - gain : array-like - The filter gains at the frequency sampling points. Certain - constraints to gain values, depending on the filter type, are applied, - see Notes for details. - - nfreqs : int, optional - The size of the interpolation mesh used to construct the filter. - For most efficient behavior, this should be a power of 2 plus 1 - (e.g, 129, 257, etc). The default is one more than the smallest - power of 2 that is not less than `numtaps`. `nfreqs` must be greater - than `numtaps`. - - window : string or (string, float) or float, or None, optional - Window function to use. Default is "hamming". See - `scipy.signal.get_window` for the complete list of possible values. - If None, no window function is applied. - - nyq : float - Nyquist frequency. Each frequency in `freq` must be between 0 and - `nyq` (inclusive). - - antisymmetric : bool - Flag setting wither resulting impulse responce is symmetric/antisymmetric. - See Notes for more details. - - Returns - ------- - taps : numpy 1D array of length `numtaps` - The filter coefficients of the FIR filter. - - Examples - -------- - A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and - that decreases linearly on [0.5, 1.0] from 1 to 0: - - >>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) - >>> print(taps[72:78]) - [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961] - - See also - -------- - scipy.signal.firwin - - Notes - ----- - - From the given set of frequencies and gains, the desired response is - constructed in the frequency domain. The inverse FFT is applied to the - desired response to create the associated convolution kernel, and the - first `numtaps` coefficients of this kernel, scaled by `window`, are - returned. - - The FIR filter will have linear phase. The type of filter is determined by - the value of 'numtaps` and `antisymmetric` flag. - There are four possible combinations: - - odd `numtaps`, `antisymmetric` is False, type I filter is produced - - even `numtaps`, `antisymmetric` is False, type II filter is produced - - odd `numtaps`, `antisymmetric` is True, type III filter is produced - - even `numtaps`, `antisymmetric` is True, type IV filter is produced - - Magnitude response of all but type I filters are subjects to following - constraints: - - type II -- zero at the Nyquist frequency - - type III -- zero at zero and Nyquist frequencies - - type IV -- zero at zero frequency - - .. versionadded:: 0.9.0 - - References - ---------- - .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal - Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989). - (See, for example, Section 7.4.) - - .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital - Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm - - """ - - if len(freq) != len(gain): - raise ValueError('freq and gain must be of same length.') - - if nfreqs is not None and numtaps >= nfreqs: - raise ValueError(('ntaps must be less than nfreqs, but firwin2 was ' - 'called with ntaps=%d and nfreqs=%s') % - (numtaps, nfreqs)) - - if freq[0] != 0 or freq[-1] != nyq: - raise ValueError('freq must start with 0 and end with `nyq`.') - d = np.diff(freq) - if (d < 0).any(): - raise ValueError('The values in freq must be nondecreasing.') - d2 = d[:-1] + d[1:] - if (d2 == 0).any(): - raise ValueError('A value in freq must not occur more than twice.') - - if antisymmetric: - if numtaps % 2 == 0: - ftype = 4 - else: - ftype = 3 - else: - if numtaps % 2 == 0: - ftype = 2 - else: - ftype = 1 - - if ftype == 2 and gain[-1] != 0.0: - raise ValueError("A Type II filter must have zero gain at the Nyquist rate.") - elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0): - raise ValueError("A Type III filter must have zero gain at zero and Nyquist rates.") - elif ftype == 4 and gain[0] != 0.0: - raise ValueError("A Type IV filter must have zero gain at zero rate.") - - if nfreqs is None: - nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2))) - - # Tweak any repeated values in freq so that interp works. - eps = np.finfo(float).eps - for k in range(len(freq)): - if k < len(freq) - 1 and freq[k] == freq[k + 1]: - freq[k] = freq[k] - eps - freq[k + 1] = freq[k + 1] + eps - - # Linearly interpolate the desired response on a uniform mesh `x`. - x = np.linspace(0.0, nyq, nfreqs) - fx = np.interp(x, freq, gain) - - # Adjust the phases of the coefficients so that the first `ntaps` of the - # inverse FFT are the desired filter coefficients. - shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq) - if ftype > 2: - shift *= 1j - - fx2 = fx * shift - - - # Use irfft to compute the inverse FFT. - out_full = irfft(fx2) - - if window is not None: - # Create the window to apply to the filter coefficients. - from signaltools import get_window - wind = get_window(window, numtaps, fftbins=False) - else: - wind = 1 - - # Keep only the first `numtaps` coefficients in `out`, and multiply by - # the window. - out = out_full[:numtaps] * wind - - if ftype == 3: - out[out.size // 2] = 0.0 - - return out - - -def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass', - maxiter=25, grid_density=16): - """ - Calculate the minimax optimal filter using the Remez exchange algorithm. - - Calculate the filter-coefficients for the finite impulse response - (FIR) filter whose transfer function minimizes the maximum error - between the desired gain and the realized gain in the specified - frequency bands using the Remez exchange algorithm. - - Parameters - ---------- - numtaps : int - The desired number of taps in the filter. The number of taps is - the number of terms in the filter, or the filter order plus one. - bands : array_like - A monotonic sequence containing the band edges in Hz. - All elements must be non-negative and less than half the sampling - frequency as given by `Hz`. - desired : array_like - A sequence half the size of bands containing the desired gain - in each of the specified bands. - weight : array_like, optional - A relative weighting to give to each band region. The length of - `weight` has to be half the length of `bands`. - Hz : scalar, optional - The sampling frequency in Hz. Default is 1. - type : {'bandpass', 'differentiator', 'hilbert'}, optional - The type of filter: - - 'bandpass' : flat response in bands. This is the default. - - 'differentiator' : frequency proportional response in bands. - - 'hilbert' : filter with odd symmetry, that is, type III - (for even order) or type IV (for odd order) - linear phase filters. - - maxiter : int, optional - Maximum number of iterations of the algorithm. Default is 25. - grid_density : int, optional - Grid density. The dense grid used in `remez` is of size - ``(numtaps + 1) * grid_density``. Default is 16. - - Returns - ------- - out : ndarray - A rank-1 array containing the coefficients of the optimal - (in a minimax sense) filter. - - See Also - -------- - freqz : Compute the frequency response of a digital filter. - - References - ---------- - .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the - design of optimum FIR linear phase digital filters", - IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973. - .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer - Program for Designing Optimum FIR Linear Phase Digital - Filters", IEEE Trans. Audio Electroacoust., vol. AU-21, - pp. 506-525, 1973. - - Examples - -------- - We want to construct a filter with a passband at 0.2-0.4 Hz, and - stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the - behavior in the frequency ranges between those bands is unspecified and - may overshoot. - - >>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0]) - >>> freq, response = sp.signal.freqz(bpass) - >>> ampl = np.abs(response) - - >>> import matplotlib.pyplot as plt - >>> fig = plt.figure() - >>> ax1 = fig.add_subplot(111) - >>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz - [] - >>> plt.show() - - """ - # Convert type - try: - tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type] - except KeyError: - raise ValueError("Type must be 'bandpass', 'differentiator', " - "or 'hilbert'") - - # Convert weight - if weight is None: - weight = [1] * len(desired) - - bands = np.asarray(bands).copy() - return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz, - maxiter, grid_density) diff --git a/scipy-0.10.1/scipy/signal/firfilter.c b/scipy-0.10.1/scipy/signal/firfilter.c deleted file mode 100644 index 77a25b1ab4..0000000000 --- a/scipy-0.10.1/scipy/signal/firfilter.c +++ /dev/null @@ -1,193 +0,0 @@ -#define NO_IMPORT_ARRAY -#include "sigtools.h" - -static int elsizes[] = {sizeof(Bool), - sizeof(byte), - sizeof(ubyte), - sizeof(short), - sizeof(ushort), - sizeof(int), - sizeof(uint), - sizeof(long), - sizeof(ulong), - sizeof(longlong), - sizeof(ulonglong), - sizeof(float), - sizeof(double), - sizeof(longdouble), - sizeof(cfloat), - sizeof(cdouble), - sizeof(clongdouble), - sizeof(void *), - 0,0,0,0}; - -typedef void (OneMultAddFunction) (char *, char *, char *); - -#define MAKE_ONEMULTADD(fname, type) \ -static void fname ## _onemultadd(char *sum, char *term1, char *term2) { \ - (*((type *) sum)) += (*((type *) term1)) * \ - (*((type *) term2)); return; } - -MAKE_ONEMULTADD(UBYTE, ubyte) -MAKE_ONEMULTADD(USHORT, ushort) -MAKE_ONEMULTADD(UINT, uint) -MAKE_ONEMULTADD(ULONG, ulong) -MAKE_ONEMULTADD(ULONGLONG, ulonglong) - -MAKE_ONEMULTADD(BYTE, byte) -MAKE_ONEMULTADD(SHORT, short) -MAKE_ONEMULTADD(INT, int) -MAKE_ONEMULTADD(LONG, long) -MAKE_ONEMULTADD(LONGLONG, longlong) - -MAKE_ONEMULTADD(FLOAT, float) -MAKE_ONEMULTADD(DOUBLE, double) -MAKE_ONEMULTADD(LONGDOUBLE, longdouble) - -#ifdef __GNUC__ -MAKE_ONEMULTADD(CFLOAT, __complex__ float) -MAKE_ONEMULTADD(CDOUBLE, __complex__ double) -MAKE_ONEMULTADD(CLONGDOUBLE, __complex__ long double) -#else -#define MAKE_C_ONEMULTADD(fname, type) \ -static void fname ## _onemultadd(char *sum, char *term1, char *term2) { \ - ((type *) sum)[0] += ((type *) term1)[0] * ((type *) term2)[0] \ - - ((type *) term1)[1] * ((type *) term2)[1]; \ - ((type *) sum)[1] += ((type *) term1)[0] * ((type *) term2)[1] \ - + ((type *) term1)[1] * ((type *) term2)[0]; \ - return; } -MAKE_C_ONEMULTADD(CFLOAT, float) -MAKE_C_ONEMULTADD(CDOUBLE, double) -MAKE_C_ONEMULTADD(CLONGDOUBLE, longdouble) -#endif /* __GNUC__ */ - -static OneMultAddFunction *OneMultAdd[]={NULL, - BYTE_onemultadd, - UBYTE_onemultadd, - SHORT_onemultadd, - USHORT_onemultadd, - INT_onemultadd, - UINT_onemultadd, - LONG_onemultadd, - ULONG_onemultadd, - LONGLONG_onemultadd, - ULONGLONG_onemultadd, - FLOAT_onemultadd, - DOUBLE_onemultadd, - LONGDOUBLE_onemultadd, - CFLOAT_onemultadd, - CDOUBLE_onemultadd, - CLONGDOUBLE_onemultadd, - NULL, NULL, NULL, NULL}; - - -/* This could definitely be more optimized... */ - -int pylab_convolve_2d (char *in, /* Input data Ns[0] x Ns[1] */ - intp *instr, /* Input strides */ - char *out, /* Output data */ - intp *outstr, /* Ouput strides */ - char *hvals, /* coefficients in filter */ - intp *hstr, /* coefficients strides */ - intp *Nwin, /* Size of kernel Nwin[0] x Nwin[1] */ - intp *Ns, /* Size of image Ns[0] x Ns[1] */ - int flag, /* convolution parameters */ - char *fillvalue) /* fill value */ -{ - int bounds_pad_flag = 0; - int m, n, j, k, ind0, ind1; - int Os[2]; - char *sum=NULL, *value=NULL; - int new_m, new_n, ind0_memory=0; - int boundary, outsize, convolve, type_num, type_size; - OneMultAddFunction *mult_and_add; - - boundary = flag & BOUNDARY_MASK; /* flag can be fill, reflecting, circular */ - outsize = flag & OUTSIZE_MASK; - convolve = flag & FLIP_MASK; - type_num = (flag & TYPE_MASK) >> TYPE_SHIFT; - /*type_size*/ - - mult_and_add = OneMultAdd[type_num]; - if (mult_and_add == NULL) return -5; /* Not available for this type */ - - if (type_num < 0 || type_num > MAXTYPES) return -4; /* Invalid type */ - type_size = elsizes[type_num]; - - if ((sum = calloc(type_size,2))==NULL) return -3; /* No memory */ - value = sum + type_size; - - if (outsize == FULL) {Os[0] = Ns[0]+Nwin[0]-1; Os[1] = Ns[1]+Nwin[1]-1;} - else if (outsize == SAME) {Os[0] = Ns[0]; Os[1] = Ns[1];} - else if (outsize == VALID) {Os[0] = Ns[0]-Nwin[0]+1; Os[1] = Ns[1]-Nwin[1]+1;} - else return -1; /* Invalid output flag */ - - if ((boundary != PAD) && (boundary != REFLECT) && (boundary != CIRCULAR)) - return -2; /* Invalid boundary flag */ - - /* Speed this up by not doing any if statements in the for loop. Need 3*3*2=18 different - loops executed for different conditions */ - - for (m=0; m < Os[0]; m++) { - /* Reposition index into input image based on requested output size */ - if (outsize == FULL) new_m = convolve ? m : (m-Nwin[0]+1); - else if (outsize == SAME) new_m = convolve ? (m+((Nwin[0]-1)>>1)) : (m-((Nwin[0]-1) >> 1)); - else new_m = convolve ? (m+Nwin[0]-1) : m; /* VALID */ - - for (n=0; n < Os[1]; n++) { /* loop over columns */ - memset(sum, 0, type_size); /* sum = 0.0; */ - - if (outsize == FULL) new_n = convolve ? n : (n-Nwin[1]+1); - else if (outsize == SAME) new_n = convolve ? (n+((Nwin[1]-1)>>1)) : (n-((Nwin[1]-1) >> 1)); - else new_n = convolve ? (n+Nwin[1]-1) : n; - - /* Sum over kernel, if index into image is out of bounds - handle it according to boundary flag */ - for (j=0; j < Nwin[0]; j++) { - ind0 = convolve ? (new_m-j): (new_m+j); - bounds_pad_flag = 0; - - if (ind0 < 0) { - if (boundary == REFLECT) ind0 = -1-ind0; - else if (boundary == CIRCULAR) ind0 = Ns[0] + ind0; - else bounds_pad_flag = 1; - } - else if (ind0 >= Ns[0]) { - if (boundary == REFLECT) ind0 = Ns[0]+Ns[0]-1-ind0; - else if (boundary == CIRCULAR) ind0 = ind0 - Ns[0]; - else bounds_pad_flag = 1; - } - - if (!bounds_pad_flag) ind0_memory = ind0*instr[0]; - - for (k=0; k < Nwin[1]; k++) { - if (bounds_pad_flag) memcpy(value,fillvalue,type_size); - else { - ind1 = convolve ? (new_n-k) : (new_n+k); - if (ind1 < 0) { - if (boundary == REFLECT) ind1 = -1-ind1; - else if (boundary == CIRCULAR) ind1 = Ns[1] + ind1; - else bounds_pad_flag = 1; - } - else if (ind1 >= Ns[1]) { - if (boundary == REFLECT) ind1 = Ns[1]+Ns[1]-1-ind1; - else if (boundary == CIRCULAR) ind1 = ind1 - Ns[1]; - else bounds_pad_flag = 1; - } - - if (bounds_pad_flag) memcpy(value, fillvalue, type_size); - else memcpy(value, in+ind0_memory+ind1*instr[1], type_size); - bounds_pad_flag = 0; - } - mult_and_add(sum, hvals+j*hstr[0]+k*hstr[1], value); - } - memcpy(out+m*outstr[0]+n*outstr[1], sum, type_size); - } - } - } - free(sum); - return 0; -} - - - diff --git a/scipy-0.10.1/scipy/signal/lfilter.c.src b/scipy-0.10.1/scipy/signal/lfilter.c.src deleted file mode 100644 index 07c3dbcf49..0000000000 --- a/scipy-0.10.1/scipy/signal/lfilter.c.src +++ /dev/null @@ -1,590 +0,0 @@ -/* - * vim:syntax=c - * vim:sw=4 - */ -#include -#define PY_ARRAY_UNIQUE_SYMBOL _scipy_signal_ARRAY_API -#define NO_IMPORT_ARRAY -#include - -#if PY_VERSION_HEX >= 0x03000000 -#define PyNumber_Divide PyNumber_TrueDivide -#endif - -#include "sigtools.h" - -static void FLOAT_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); -static void DOUBLE_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); -static void EXTENDED_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); -static void CFLOAT_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); -static void CDOUBLE_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); -static void CEXTENDED_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); -static void OBJECT_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); - -typedef void (BasicFilterFunction) (char *, char *, char *, char *, char *, intp, uintp, intp, intp); - -static BasicFilterFunction *BasicFilterFunctions[256]; - -void -scipy_signal_sigtools_linear_filter_module_init() -{ - int k; - for (k = 0; k < 256; ++k) { - BasicFilterFunctions[k] = NULL; - } - BasicFilterFunctions[NPY_FLOAT] = FLOAT_filt; - BasicFilterFunctions[NPY_DOUBLE] = DOUBLE_filt; - BasicFilterFunctions[NPY_LONGDOUBLE] = EXTENDED_filt; - BasicFilterFunctions[NPY_CFLOAT] = CFLOAT_filt; - BasicFilterFunctions[NPY_CDOUBLE] = CDOUBLE_filt; - BasicFilterFunctions[NPY_CLONGDOUBLE] = CEXTENDED_filt; - BasicFilterFunctions[NPY_OBJECT] = OBJECT_filt; -} - -/* There is the start of an OBJECT_filt, but it may need work */ - -static int -RawFilter(const PyArrayObject * b, const PyArrayObject * a, - const PyArrayObject * x, const PyArrayObject * zi, - const PyArrayObject * zf, PyArrayObject * y, int axis, - BasicFilterFunction * filter_func); - -/* - * XXX: Error checking not done yet - */ -PyObject* -scipy_signal_sigtools_linear_filter(PyObject * NPY_UNUSED(dummy), PyObject * args) -{ - PyObject *b, *a, *X, *Vi; - PyArrayObject *arY, *arb, *ara, *arX, *arVi, *arVf; - int axis, typenum, theaxis, st; - char *ara_ptr, input_flag = 0, *azero; - intp na, nb, nal; - BasicFilterFunction *basic_filter; - - axis = -1; - Vi = NULL; - if (!PyArg_ParseTuple(args, "OOO|iO", &b, &a, &X, &axis, &Vi)) { - return NULL; - } - - typenum = PyArray_ObjectType(b, 0); - typenum = PyArray_ObjectType(a, typenum); - typenum = PyArray_ObjectType(X, typenum); - if (Vi != NULL) { - typenum = PyArray_ObjectType(Vi, typenum); - } - - arY = arVf = arVi = NULL; - ara = (PyArrayObject *) PyArray_ContiguousFromObject(a, typenum, 1, 1); - arb = (PyArrayObject *) PyArray_ContiguousFromObject(b, typenum, 1, 1); - arX = (PyArrayObject *) PyArray_FromObject(X, typenum, 0, 0); - /* XXX: fix failure handling here */ - if (ara == NULL || arb == NULL || arX == NULL) { - goto fail; - } - - if (axis < -arX->nd || axis > arX->nd - 1) { - PyErr_SetString(PyExc_ValueError, "selected axis is out of range"); - goto fail; - } - if (axis < 0) { - theaxis = arX->nd + axis; - } else { - theaxis = axis; - } - - if (Vi != NULL) { - arVi = (PyArrayObject *) PyArray_FromObject(Vi, typenum, - arX->nd, arX->nd); - if (arVi == NULL) - goto fail; - - input_flag = 1; - } - - arY = (PyArrayObject *) PyArray_SimpleNew(arX->nd, - arX->dimensions, typenum); - if (arY == NULL) { - goto fail; - } - - if (input_flag) { - arVf = (PyArrayObject *) PyArray_SimpleNew(arVi->nd, - arVi->dimensions, - typenum); - } - - if (arX->descr->type_num < 256) { - basic_filter = BasicFilterFunctions[(int) (arX->descr->type_num)]; - } - else { - basic_filter = NULL; - } - if (basic_filter == NULL) { - PyObject *msg, *str; - char *s; - - str = PyObject_Str((PyObject*)arX->descr); - if (str == NULL) { - goto fail; - } - s = PyString_AsString(str); - msg = PyString_FromFormat( - "input type '%s' not supported\n", s); - Py_DECREF(str); - if (msg == NULL) { - goto fail; - } - PyErr_SetObject(PyExc_NotImplementedError, msg); - Py_DECREF(msg); - goto fail; - } - - /* Skip over leading zeros in vector representing denominator (a) */ - /* XXX: handle this correctly */ - azero = PyArray_Zero(ara); - ara_ptr = ara->data; - nal = PyArray_ITEMSIZE(ara); - if (memcmp(ara_ptr, azero, nal) == 0) { - PyErr_SetString(PyExc_ValueError, - "BUG: filter coefficient a[0] == 0 not supported yet"); - goto fail; - } - PyDataMem_FREE(azero); - - na = PyArray_SIZE(ara); - nb = PyArray_SIZE(arb); - if (input_flag) { - if (arVi->dimensions[theaxis] != (na > nb ? na : nb) - 1) { - PyErr_SetString(PyExc_ValueError, - "The number of initial conditions must be max([len(a),len(b)]) - 1"); - goto fail; - } - } - - st = RawFilter(arb, ara, arX, arVi, arVf, arY, theaxis, basic_filter); - if (st) { - goto fail; - } - - Py_XDECREF(ara); - Py_XDECREF(arb); - Py_XDECREF(arX); - Py_XDECREF(arVi); - - if (!input_flag) { - return PyArray_Return(arY); - } else { - return Py_BuildValue("(NN)", arY, arVf); - } - - - fail: - Py_XDECREF(ara); - Py_XDECREF(arb); - Py_XDECREF(arX); - Py_XDECREF(arVi); - Py_XDECREF(arVf); - Py_XDECREF(arY); - return NULL; -} - -/* - * Copy the first nxzfilled items of x into xzfilled , and fill the rest with - * 0s - */ -static int -zfill(const PyArrayObject * x, intp nx, char *xzfilled, intp nxzfilled) -{ - char *xzero; - intp i, nxl; - PyArray_CopySwapFunc *copyswap = x->descr->f->copyswap; - - nxl = PyArray_ITEMSIZE(x); - - /* PyArray_Zero does not take const pointer, hence the cast */ - xzero = PyArray_Zero((PyArrayObject *) x); - - if (nx > 0) { - for (i = 0; i < nx; ++i) { - copyswap(xzfilled + i * nxl, x->data + i * nxl, 0, NULL); - } - } - for (i = nx; i < nxzfilled; ++i) { - copyswap(xzfilled + i * nxl, xzero, 0, NULL); - } - - PyDataMem_FREE(xzero); - - return 0; -} - -/* - * a and b assumed to be contiguous - * - * XXX: this code is very conservative, and could be considerably sped up for - * the usual cases (like contiguity). - * - * XXX: the code should be refactored (at least with/without initial - * condition), some code is wasteful here - */ -static int -RawFilter(const PyArrayObject * b, const PyArrayObject * a, - const PyArrayObject * x, const PyArrayObject * zi, - const PyArrayObject * zf, PyArrayObject * y, int axis, - BasicFilterFunction * filter_func) -{ - PyArrayIterObject *itx, *ity, *itzi, *itzf; - intp nitx, i, nxl, nzfl, j; - intp na, nb, nal, nbl; - intp nfilt; - char *azfilled, *bzfilled, *zfzfilled, *yoyo; - PyArray_CopySwapFunc *copyswap = x->descr->f->copyswap; - - itx = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *) x, - &axis); - if (itx == NULL) { - PyErr_SetString(PyExc_MemoryError, "Could not create itx"); - goto fail; - } - nitx = itx->size; - - ity = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *) y, - &axis); - if (ity == NULL) { - PyErr_SetString(PyExc_MemoryError, "Could not create ity"); - goto clean_itx; - } - - if (zi != NULL) { - itzi = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *) - zi, &axis); - if (itzi == NULL) { - PyErr_SetString(PyExc_MemoryError, "Could not create itzi"); - goto clean_ity; - } - - itzf = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *) - zf, &axis); - if (itzf == NULL) { - PyErr_SetString(PyExc_MemoryError, "Could not create itzf"); - goto clean_itzi; - } - } - - na = PyArray_SIZE(a); - nal = PyArray_ITEMSIZE(a); - nb = PyArray_SIZE(b); - nbl = PyArray_ITEMSIZE(b); - - nfilt = na > nb ? na : nb; - - azfilled = malloc(nal * nfilt); - if (azfilled == NULL) { - PyErr_SetString(PyExc_MemoryError, "Could not create azfilled"); - goto clean_itzf; - } - bzfilled = malloc(nbl * nfilt); - if (bzfilled == NULL) { - PyErr_SetString(PyExc_MemoryError, "Could not create bzfilled"); - goto clean_azfilled; - } - - nxl = PyArray_ITEMSIZE(x); - zfzfilled = malloc(nxl * (nfilt - 1)); - if (zfzfilled == NULL) { - PyErr_SetString(PyExc_MemoryError, "Could not create zfzfilled"); - goto clean_bzfilled; - } - /* Initialize zero filled buffers to 0, so that we can use - * Py_XINCREF/Py_XDECREF on it for object arrays (necessary for - * copyswap to work correctly). Stricly speaking, it is not needed for - * fundamental types (as values are copied instead of pointers, without - * refcounts), but oh well... - */ - memset(azfilled, 0, nal * nfilt); - memset(bzfilled, 0, nbl * nfilt); - memset(zfzfilled, 0, nxl * (nfilt - 1)); - - zfill(a, na, azfilled, nfilt); - zfill(b, nb, bzfilled, nfilt); - - /* XXX: Check that zf and zi have same type ? */ - if (zf != NULL) { - nzfl = PyArray_ITEMSIZE(zf); - } else { - nzfl = 0; - } - - /* Iterate over the input array */ - for (i = 0; i < nitx; ++i) { - if (zi != NULL) { - yoyo = itzi->dataptr; - /* Copy initial conditions zi in zfzfilled buffer */ - for (j = 0; j < nfilt - 1; ++j) { - copyswap(zfzfilled + j * nzfl, yoyo, 0, NULL); - yoyo += itzi->strides[axis]; - } - PyArray_ITER_NEXT(itzi); - } else { - zfill(x, 0, zfzfilled, nfilt - 1); - } - - filter_func(bzfilled, azfilled, - itx->dataptr, ity->dataptr, zfzfilled, - nfilt, PyArray_DIM(x, axis), itx->strides[axis], - ity->strides[axis]); - PyArray_ITER_NEXT(itx); - PyArray_ITER_NEXT(ity); - - /* Copy tmp buffer fo final values back into zf output array */ - if (zi != NULL) { - yoyo = itzf->dataptr; - for (j = 0; j < nfilt - 1; ++j) { - copyswap(yoyo, zfzfilled + j * nzfl, 0, NULL); - yoyo += itzf->strides[axis]; - } - PyArray_ITER_NEXT(itzf); - } - } - - /* Free up allocated memory */ - free(zfzfilled); - free(bzfilled); - free(azfilled); - - if (zi != NULL) { - Py_DECREF(itzf); - Py_DECREF(itzi); - } - Py_DECREF(ity); - Py_DECREF(itx); - - return 0; - -clean_bzfilled: - free(bzfilled); -clean_azfilled: - free(azfilled); -clean_itzf: - if (zf != NULL) { - Py_DECREF(itzf); - } -clean_itzi: - if (zi != NULL) { - Py_DECREF(itzi); - } -clean_ity: - Py_DECREF(ity); -clean_itx: - Py_DECREF(itx); -fail: - return -1; -} - -/***************************************************************** - * This is code for a 1-D linear-filter along an arbitrary * - * dimension of an N-D array. * - *****************************************************************/ - -/**begin repeat - * #type = float, double, npy_longdouble# - * #NAME = FLOAT, DOUBLE, EXTENDED# - */ -static void @NAME@_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y) -{ - char *ptr_x = x, *ptr_y = y; - @type@ *ptr_Z, *ptr_b; - @type@ *ptr_a; - @type@ *xn, *yn; - const @type@ a0 = *((@type@ *) a); - intp n; - uintp k; - - for (k = 0; k < len_x; k++) { - ptr_b = (@type@ *) b; /* Reset a and b pointers */ - ptr_a = (@type@ *) a; - xn = (@type@ *) ptr_x; - yn = (@type@ *) ptr_y; - if (len_b > 1) { - ptr_Z = ((@type@ *) Z); - *yn = *ptr_Z + *ptr_b / a0 * *xn; /* Calculate first delay (output) */ - ptr_b++; - ptr_a++; - /* Fill in middle delays */ - for (n = 0; n < len_b - 2; n++) { - *ptr_Z = - ptr_Z[1] + *xn * (*ptr_b / a0) - *yn * (*ptr_a / a0); - ptr_b++; - ptr_a++; - ptr_Z++; - } - /* Calculate last delay */ - *ptr_Z = *xn * (*ptr_b / a0) - *yn * (*ptr_a / a0); - } else { - *yn = *xn * (*ptr_b / a0); - } - - ptr_y += stride_Y; /* Move to next input/output point */ - ptr_x += stride_X; - } -} - -static void C@NAME@_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y) -{ - char *ptr_x = x, *ptr_y = y; - @type@ *ptr_Z, *ptr_b; - @type@ *ptr_a; - @type@ *xn, *yn; - @type@ a0r = ((@type@ *) a)[0]; - @type@ a0i = ((@type@ *) a)[1]; - @type@ a0_mag, tmpr, tmpi; - intp n; - uintp k; - - a0_mag = a0r * a0r + a0i * a0i; - for (k = 0; k < len_x; k++) { - ptr_b = (@type@ *) b; /* Reset a and b pointers */ - ptr_a = (@type@ *) a; - xn = (@type@ *) ptr_x; - yn = (@type@ *) ptr_y; - if (len_b > 1) { - ptr_Z = ((@type@ *) Z); - tmpr = ptr_b[0] * a0r + ptr_b[1] * a0i; - tmpi = ptr_b[1] * a0r - ptr_b[0] * a0i; - /* Calculate first delay (output) */ - yn[0] = ptr_Z[0] + (tmpr * xn[0] - tmpi * xn[1]) / a0_mag; - yn[1] = ptr_Z[1] + (tmpi * xn[0] + tmpr * xn[1]) / a0_mag; - ptr_b += 2; - ptr_a += 2; - /* Fill in middle delays */ - for (n = 0; n < len_b - 2; n++) { - tmpr = ptr_b[0] * a0r + ptr_b[1] * a0i; - tmpi = ptr_b[1] * a0r - ptr_b[0] * a0i; - ptr_Z[0] = - ptr_Z[2] + (tmpr * xn[0] - tmpi * xn[1]) / a0_mag; - ptr_Z[1] = - ptr_Z[3] + (tmpi * xn[0] + tmpr * xn[1]) / a0_mag; - tmpr = ptr_a[0] * a0r + ptr_a[1] * a0i; - tmpi = ptr_a[1] * a0r - ptr_a[0] * a0i; - ptr_Z[0] -= (tmpr * yn[0] - tmpi * yn[1]) / a0_mag; - ptr_Z[1] -= (tmpi * yn[0] + tmpr * yn[1]) / a0_mag; - ptr_b += 2; - ptr_a += 2; - ptr_Z += 2; - } - /* Calculate last delay */ - - tmpr = ptr_b[0] * a0r + ptr_b[1] * a0i; - tmpi = ptr_b[1] * a0r - ptr_b[0] * a0i; - ptr_Z[0] = (tmpr * xn[0] - tmpi * xn[1]) / a0_mag; - ptr_Z[1] = (tmpi * xn[0] + tmpr * xn[1]) / a0_mag; - tmpr = ptr_a[0] * a0r + ptr_a[1] * a0i; - tmpi = ptr_a[1] * a0r - ptr_a[0] * a0i; - ptr_Z[0] -= (tmpr * yn[0] - tmpi * yn[1]) / a0_mag; - ptr_Z[1] -= (tmpi * yn[0] + tmpr * yn[1]) / a0_mag; - } else { - tmpr = ptr_b[0] * a0r + ptr_b[1] * a0i; - tmpi = ptr_b[1] * a0r - ptr_b[0] * a0i; - yn[0] = (tmpr * xn[0] - tmpi * xn[1]) / a0_mag; - yn[1] = (tmpi * xn[0] + tmpr * xn[1]) / a0_mag; - } - - ptr_y += stride_Y; /* Move to next input/output point */ - ptr_x += stride_X; - - } -} -/**end repeat**/ - -static void OBJECT_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y) -{ - char *ptr_x = x, *ptr_y = y; - PyObject **ptr_Z, **ptr_b; - PyObject **ptr_a; - PyObject **xn, **yn; - PyObject **a0 = (PyObject **) a; - PyObject *tmp1, *tmp2, *tmp3; - intp n; - uintp k; - - /* My reference counting might not be right */ - for (k = 0; k < len_x; k++) { - ptr_b = (PyObject **) b; /* Reset a and b pointers */ - ptr_a = (PyObject **) a; - xn = (PyObject **) ptr_x; - yn = (PyObject **) ptr_y; - if (len_b > 1) { - ptr_Z = ((PyObject **) Z); - /* Calculate first delay (output) */ - tmp1 = PyNumber_Multiply(*ptr_b, *xn); - tmp2 = PyNumber_Divide(tmp1, *a0); - tmp3 = PyNumber_Add(tmp2, *ptr_Z); - Py_XDECREF(*yn); - *yn = tmp3; - Py_DECREF(tmp1); - Py_DECREF(tmp2); - ptr_b++; - ptr_a++; - - /* Fill in middle delays */ - for (n = 0; n < len_b - 2; n++) { - tmp1 = PyNumber_Multiply(*xn, *ptr_b); - tmp2 = PyNumber_Divide(tmp1, *a0); - tmp3 = PyNumber_Add(tmp2, ptr_Z[1]); - Py_DECREF(tmp1); - Py_DECREF(tmp2); - tmp1 = PyNumber_Multiply(*yn, *ptr_a); - tmp2 = PyNumber_Divide(tmp1, *a0); - Py_DECREF(tmp1); - Py_XDECREF(*ptr_Z); - *ptr_Z = PyNumber_Subtract(tmp3, tmp2); - Py_DECREF(tmp2); - Py_DECREF(tmp3); - ptr_b++; - ptr_a++; - ptr_Z++; - } - /* Calculate last delay */ - tmp1 = PyNumber_Multiply(*xn, *ptr_b); - tmp3 = PyNumber_Divide(tmp1, *a0); - Py_DECREF(tmp1); - tmp1 = PyNumber_Multiply(*yn, *ptr_a); - tmp2 = PyNumber_Divide(tmp1, *a0); - Py_DECREF(tmp1); - Py_XDECREF(*ptr_Z); - *ptr_Z = PyNumber_Subtract(tmp3, tmp2); - Py_DECREF(tmp2); - Py_DECREF(tmp3); - } else { - tmp1 = PyNumber_Multiply(*xn, *ptr_b); - Py_XDECREF(*yn); - *yn = PyNumber_Divide(tmp1, *a0); - Py_DECREF(tmp1); - } - - ptr_y += stride_Y; /* Move to next input/output point */ - ptr_x += stride_X; - } -} diff --git a/scipy-0.10.1/scipy/signal/ltisys.py b/scipy-0.10.1/scipy/signal/ltisys.py deleted file mode 100644 index 60020b1cdb..0000000000 --- a/scipy-0.10.1/scipy/signal/ltisys.py +++ /dev/null @@ -1,773 +0,0 @@ -""" -ltisys -- a collection of classes and functions for modeling linear -time invariant systems. -""" - -# -# Author: Travis Oliphant 2001 -# -# Feb 2010: Warren Weckesser -# Rewrote lsim2 and added impulse2. -# - -from filter_design import tf2zpk, zpk2tf, normalize -import numpy -from numpy import product, zeros, array, dot, transpose, ones, \ - nan_to_num, zeros_like, linspace -import scipy.interpolate as interpolate -import scipy.integrate as integrate -import scipy.linalg as linalg -from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \ - squeeze, diag, asarray - -__all__ = ['tf2ss', 'ss2tf', 'abcd_normalize', 'zpk2ss', 'ss2zpk', 'lti', - 'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2'] - - -def tf2ss(num, den): - """Transfer function to state-space representation. - - Parameters - ---------- - num, den : array_like - Sequences representing the numerator and denominator polynomials. - The denominator needs to be at least as long as the numerator. - - Returns - ------- - A, B, C, D : ndarray - State space representation of the system. - - """ - # Controller canonical state-space representation. - # if M+1 = len(num) and K+1 = len(den) then we must have M <= K - # states are found by asserting that X(s) = U(s) / D(s) - # then Y(s) = N(s) * X(s) - # - # A, B, C, and D follow quite naturally. - # - num, den = normalize(num, den) # Strips zeros, checks arrays - nn = len(num.shape) - if nn == 1: - num = asarray([num], num.dtype) - M = num.shape[1] - K = len(den) - if M > K: - msg = "Improper transfer function. `num` is longer than `den`." - raise ValueError(msg) - if M == 0 or K == 0: # Null system - return array([], float), array([], float), array([], float), \ - array([], float) - - # pad numerator to have same number of columns has denominator - num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num] - - if num.shape[-1] > 0: - D = num[:, 0] - else: - D = array([], float) - - if K == 1: - return array([], float), array([], float), array([], float), D - - frow = -array([den[1:]]) - A = r_[frow, eye(K - 2, K - 1)] - B = eye(K - 1, 1) - C = num[:, 1:] - num[:, 0] * den[1:] - return A, B, C, D - - -def _none_to_empty(arg): - if arg is None: - return [] - else: - return arg - - -def abcd_normalize(A=None, B=None, C=None, D=None): - """Check state-space matrices and ensure they are rank-2. - - """ - A, B, C, D = map(_none_to_empty, (A, B, C, D)) - A, B, C, D = map(atleast_2d, (A, B, C, D)) - - if ((len(A.shape) > 2) or (len(B.shape) > 2) or \ - (len(C.shape) > 2) or (len(D.shape) > 2)): - raise ValueError("A, B, C, D arrays can be no larger than rank-2.") - - MA, NA = A.shape - MB, NB = B.shape - MC, NC = C.shape - MD, ND = D.shape - - if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0): - MC, NC = MD, NA - C = zeros((MC, NC)) - if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0): - MB, NB = MA, ND - B = zeros(MB, NB) - if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0): - MD, ND = MC, NB - D = zeros(MD, ND) - if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0): - MA, NA = MB, NC - A = zeros(MA, NA) - - if MA != NA: - raise ValueError("A must be square.") - if MA != MB: - raise ValueError("A and B must have the same number of rows.") - if NA != NC: - raise ValueError("A and C must have the same number of columns.") - if MD != MC: - raise ValueError("C and D must have the same number of rows.") - if ND != NB: - raise ValueError("B and D must have the same number of columns.") - - return A, B, C, D - - -def ss2tf(A, B, C, D, input=0): - """State-space to transfer function. - - Parameters - ---------- - A, B, C, D : ndarray - State-space representation of linear system. - input : int, optional - For multiple-input systems, the input to use. - - Returns - ------- - num, den : 1D ndarray - Numerator and denominator polynomials (as sequences) - respectively. - - """ - # transfer function is C (sI - A)**(-1) B + D - A, B, C, D = map(asarray, (A, B, C, D)) - # Check consistency and - # make them all rank-2 arrays - A, B, C, D = abcd_normalize(A, B, C, D) - - nout, nin = D.shape - if input >= nin: - raise ValueError("System does not have the input specified.") - - # make MOSI from possibly MOMI system. - if B.shape[-1] != 0: - B = B[:, input] - B.shape = (B.shape[0], 1) - if D.shape[-1] != 0: - D = D[:, input] - - try: - den = poly(A) - except ValueError: - den = 1 - - if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0): - num = numpy.ravel(D) - if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0): - den = [] - return num, den - - num_states = A.shape[0] - type_test = A[:, 0] + B[:, 0] + C[0, :] + D - num = numpy.zeros((nout, num_states + 1), type_test.dtype) - for k in range(nout): - Ck = atleast_2d(C[k, :]) - num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den - - return num, den - - -def zpk2ss(z, p, k): - """Zero-pole-gain representation to state-space representation - - Parameters - ---------- - z, p : sequence - Zeros and poles. - k : float - System gain. - - Returns - ------- - A, B, C, D : ndarray - State-space matrices. - - """ - return tf2ss(*zpk2tf(z, p, k)) - - -def ss2zpk(A, B, C, D, input=0): - """State-space representation to zero-pole-gain representation. - - Parameters - ---------- - A, B, C, D : ndarray - State-space representation of linear system. - input : int, optional - For multiple-input systems, the input to use. - - Returns - ------- - z, p : sequence - Zeros and poles. - k : float - System gain. - - """ - return tf2zpk(*ss2tf(A, B, C, D, input=input)) - - -class lti(object): - """Linear Time Invariant class which simplifies representation. - """ - def __init__(self, *args, **kwords): - """Initialize the LTI system using either: - (numerator, denominator) - (zeros, poles, gain) - (A, B, C, D) -- state-space. - """ - N = len(args) - if N == 2: # Numerator denominator transfer function input - self.__dict__['num'], self.__dict__['den'] = normalize(*args) - self.__dict__['zeros'], self.__dict__['poles'], \ - self.__dict__['gain'] = tf2zpk(*args) - self.__dict__['A'], self.__dict__['B'], \ - self.__dict__['C'], \ - self.__dict__['D'] = tf2ss(*args) - self.inputs = 1 - if len(self.num.shape) > 1: - self.outputs = self.num.shape[0] - else: - self.outputs = 1 - elif N == 3: # Zero-pole-gain form - self.__dict__['zeros'], self.__dict__['poles'], \ - self.__dict__['gain'] = args - self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args) - self.__dict__['A'], self.__dict__['B'], \ - self.__dict__['C'], \ - self.__dict__['D'] = zpk2ss(*args) - self.inputs = 1 - if len(self.zeros.shape) > 1: - self.outputs = self.zeros.shape[0] - else: - self.outputs = 1 - elif N == 4: # State-space form - self.__dict__['A'], self.__dict__['B'], \ - self.__dict__['C'], \ - self.__dict__['D'] = abcd_normalize(*args) - self.__dict__['zeros'], self.__dict__['poles'], \ - self.__dict__['gain'] = ss2zpk(*args) - self.__dict__['num'], self.__dict__['den'] = ss2tf(*args) - self.inputs = self.B.shape[-1] - self.outputs = self.C.shape[0] - else: - raise ValueError("Needs 2, 3, or 4 arguments.") - - def __setattr__(self, attr, val): - if attr in ['num', 'den']: - self.__dict__[attr] = val - self.__dict__['zeros'], self.__dict__['poles'], \ - self.__dict__['gain'] = \ - tf2zpk(self.num, self.den) - self.__dict__['A'], self.__dict__['B'], \ - self.__dict__['C'], \ - self.__dict__['D'] = \ - tf2ss(self.num, self.den) - elif attr in ['zeros', 'poles', 'gain']: - self.__dict__[attr] = val - self.__dict__['num'], self.__dict__['den'] = \ - zpk2tf(self.zeros, - self.poles, self.gain) - self.__dict__['A'], self.__dict__['B'], \ - self.__dict__['C'], \ - self.__dict__['D'] = \ - zpk2ss(self.zeros, - self.poles, self.gain) - elif attr in ['A', 'B', 'C', 'D']: - self.__dict__[attr] = val - self.__dict__['zeros'], self.__dict__['poles'], \ - self.__dict__['gain'] = \ - ss2zpk(self.A, self.B, - self.C, self.D) - self.__dict__['num'], self.__dict__['den'] = \ - ss2tf(self.A, self.B, - self.C, self.D) - else: - self.__dict__[attr] = val - - def impulse(self, X0=None, T=None, N=None): - return impulse(self, X0=X0, T=T, N=N) - - def step(self, X0=None, T=None, N=None): - return step(self, X0=X0, T=T, N=N) - - def output(self, U, T, X0=None): - return lsim(self, U, T, X0=X0) - - -def lsim2(system, U=None, T=None, X0=None, **kwargs): - """ - Simulate output of a continuous-time linear system, by using - the ODE solver `scipy.integrate.odeint`. - - Parameters - ---------- - system : an instance of the LTI class or a tuple describing the system. - The following gives the number of elements in the tuple and - the interpretation: - - * 2: (num, den) - * 3: (zeros, poles, gain) - * 4: (A, B, C, D) - - U : array_like (1D or 2D), optional - An input array describing the input at each time T. Linear - interpolation is used between given times. If there are - multiple inputs, then each column of the rank-2 array - represents an input. If U is not given, the input is assumed - to be zero. - T : array_like (1D or 2D), optional - The time steps at which the input is defined and at which the - output is desired. The default is 101 evenly spaced points on - the interval [0,10.0]. - X0 : array_like (1D), optional - The initial condition of the state vector. If `X0` is not - given, the initial conditions are assumed to be 0. - kwargs : dict - Additional keyword arguments are passed on to the function - odeint. See the notes below for more details. - - Returns - ------- - T : 1D ndarray - The time values for the output. - yout : ndarray - The response of the system. - xout : ndarray - The time-evolution of the state-vector. - - Notes - ----- - This function uses :func:`scipy.integrate.odeint` to solve the - system's differential equations. Additional keyword arguments - given to `lsim2` are passed on to `odeint`. See the documentation - for :func:`scipy.integrate.odeint` for the full list of arguments. - - """ - if isinstance(system, lti): - sys = system - else: - sys = lti(*system) - - if X0 is None: - X0 = zeros(sys.B.shape[0], sys.A.dtype) - - if T is None: - # XXX T should really be a required argument, but U was - # changed from a required positional argument to a keyword, - # and T is after U in the argument list. So we either: change - # the API and move T in front of U; check here for T being - # None and raise an excpetion; or assign a default value to T - # here. This code implements the latter. - T = linspace(0, 10.0, 101) - - T = atleast_1d(T) - if len(T.shape) != 1: - raise ValueError("T must be a rank-1 array.") - - if U is not None: - U = atleast_1d(U) - if len(U.shape) == 1: - U = U.reshape(-1, 1) - sU = U.shape - if sU[0] != len(T): - raise ValueError("U must have the same number of rows " - "as elements in T.") - - if sU[1] != sys.inputs: - raise ValueError("The number of inputs in U (%d) is not " - "compatible with the number of system " - "inputs (%d)" % (sU[1], sys.inputs)) - # Create a callable that uses linear interpolation to - # calculate the input at any time. - ufunc = interpolate.interp1d(T, U, kind='linear', - axis=0, bounds_error=False) - - def fprime(x, t, sys, ufunc): - """The vector field of the linear system.""" - return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t])))) - xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs) - yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U)) - else: - def fprime(x, t, sys): - """The vector field of the linear system.""" - return dot(sys.A, x) - xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs) - yout = dot(sys.C, transpose(xout)) - - return T, squeeze(transpose(yout)), xout - - -def lsim(system, U, T, X0=None, interp=1): - """ - Simulate output of a continuous-time linear system. - - Parameters - ---------- - system : an instance of the LTI class or a tuple describing the system. - The following gives the number of elements in the tuple and - the interpretation: - - * 2: (num, den) - * 3: (zeros, poles, gain) - * 4: (A, B, C, D) - - U : array_like - An input array describing the input at each time `T` - (interpolation is assumed between given times). If there are - multiple inputs, then each column of the rank-2 array - represents an input. - T : array_like - The time steps at which the input is defined and at which the - output is desired. - X0 : - The initial conditions on the state vector (zero by default). - interp : {1, 0} - Whether to use linear (1) or zero-order hold (0) interpolation. - - Returns - ------- - T : 1D ndarray - Time values for the output. - yout : 1D ndarray - System response. - xout : ndarray - Time-evolution of the state-vector. - - """ - # system is an lti system or a sequence - # with 2 (num, den) - # 3 (zeros, poles, gain) - # 4 (A, B, C, D) - # describing the system - # U is an input vector at times T - # if system describes multiple inputs - # then U can be a rank-2 array with the number of columns - # being the number of inputs - if isinstance(system, lti): - sys = system - else: - sys = lti(*system) - U = atleast_1d(U) - T = atleast_1d(T) - if len(U.shape) == 1: - U = U.reshape((U.shape[0], 1)) - sU = U.shape - if len(T.shape) != 1: - raise ValueError("T must be a rank-1 array.") - if sU[0] != len(T): - raise ValueError("U must have the same number of rows " - "as elements in T.") - if sU[1] != sys.inputs: - raise ValueError("System does not define that many inputs.") - - if X0 is None: - X0 = zeros(sys.B.shape[0], sys.A.dtype) - - xout = zeros((len(T), sys.B.shape[0]), sys.A.dtype) - xout[0] = X0 - A = sys.A - AT, BT = transpose(sys.A), transpose(sys.B) - dt = T[1] - T[0] - lam, v = linalg.eig(A) - vt = transpose(v) - vti = linalg.inv(vt) - GT = dot(dot(vti, diag(numpy.exp(dt * lam))), vt).astype(xout.dtype) - ATm1 = linalg.inv(AT) - ATm2 = dot(ATm1, ATm1) - I = eye(A.shape[0], dtype=A.dtype) - GTmI = GT - I - F1T = dot(dot(BT, GTmI), ATm1) - if interp: - F2T = dot(BT, dot(GTmI, ATm2) / dt - ATm1) - - for k in xrange(1, len(T)): - dt1 = T[k] - T[k - 1] - if dt1 != dt: - dt = dt1 - GT = dot(dot(vti, diag(numpy.exp(dt * lam))), - vt).astype(xout.dtype) - GTmI = GT - I - F1T = dot(dot(BT, GTmI), ATm1) - if interp: - F2T = dot(BT, dot(GTmI, ATm2) / dt - ATm1) - - xout[k] = dot(xout[k - 1], GT) + dot(U[k - 1], F1T) - if interp: - xout[k] = xout[k] + dot((U[k] - U[k - 1]), F2T) - - yout = (squeeze(dot(U, transpose(sys.D))) + - squeeze(dot(xout, transpose(sys.C)))) - return T, squeeze(yout), squeeze(xout) - - -def _default_response_times(A, n): - """Compute a reasonable set of time samples for the response time. - - This function is used by `impulse`, `impulse2`, `step` and `step2` - to compute the response time when the `T` argument to the function - is None. - - Parameters - ---------- - A : ndarray - The system matrix, which is square. - n : int - The number of time samples to generate. - - Returns - ------- - t : ndarray - The 1-D array of length `n` of time samples at which the response - is to be computed. - """ - # Create a reasonable time interval. This could use some more work. - # For example, what is expected when the system is unstable? - vals = linalg.eigvals(A) - r = min(abs(real(vals))) - if r == 0.0: - r = 1.0 - tc = 1.0 / r - t = linspace(0.0, 7 * tc, n) - return t - - -def impulse(system, X0=None, T=None, N=None): - """Impulse response of continuous-time system. - - Parameters - ---------- - system : LTI class or tuple - If specified as a tuple, the system is described as - ``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``. - X0 : array_like, optional - Initial state-vector. Defaults to zero. - T : array_like, optional - Time points. Computed if not given. - N : int, optional - The number of time points to compute (if `T` is not given). - - Returns - ------- - T : ndarray - A 1-D array of time points. - yout : ndarray - A 1-D array containing the impulse response of the system (except for - singularities at zero). - - """ - if isinstance(system, lti): - sys = system - else: - sys = lti(*system) - if X0 is None: - B = sys.B - else: - B = sys.B + X0 - if N is None: - N = 100 - if T is None: - T = _default_response_times(sys.A, N) - h = zeros(T.shape, sys.A.dtype) - s, v = linalg.eig(sys.A) - vi = linalg.inv(v) - C = sys.C - for k in range(len(h)): - es = diag(numpy.exp(s * T[k])) - eA = (dot(dot(v, es), vi)).astype(h.dtype) - h[k] = squeeze(dot(dot(C, eA), B)) - return T, h - - -def impulse2(system, X0=None, T=None, N=None, **kwargs): - """ - Impulse response of a single-input, continuous-time linear system. - - - Parameters - ---------- - system : an instance of the LTI class or a tuple describing the system. - The following gives the number of elements in the tuple and - the interpretation: - 2 (num, den) - 3 (zeros, poles, gain) - 4 (A, B, C, D) - T : 1-D array_like, optional - The time steps at which the input is defined and at which the - output is desired. If `T` is not given, the function will - generate a set of time samples automatically. - X0 : 1-D array_like, optional - The initial condition of the state vector. Default: 0 (the - zero vector). - N : int, optional - Number of time points to compute. Default: 100. - kwargs : various types - Additional keyword arguments are passed on to the function - `scipy.signal.lsim2`, which in turn passes them on to - `scipy.integrate.odeint`; see the latter's documentation for - information about these arguments. - - Returns - ------- - T : ndarray - The time values for the output. - yout : ndarray - The output response of the system. - - See Also - -------- - impulse, lsim2, integrate.odeint - - Notes - ----- - The solution is generated by calling `scipy.signal.lsim2`, which uses - the differential equation solver `scipy.integrate.odeint`. - - .. versionadded:: 0.8.0 - - - Examples - -------- - - Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t) - - >>> import scipy.signal - >>> system = ([1.0], [1.0, 2.0, 1.0]) - >>> t, y = sp.signal.impulse2(system) - >>> import matplotlib.pyplot as plt - >>> plt.plot(t, y) - - """ - if isinstance(system, lti): - sys = system - else: - sys = lti(*system) - B = sys.B - if B.shape[-1] != 1: - raise ValueError("impulse2() requires a single-input system.") - B = B.squeeze() - if X0 is None: - X0 = zeros_like(B) - if N is None: - N = 100 - if T is None: - T = _default_response_times(sys.A, N) - # Move the impulse in the input to the initial conditions, and then - # solve using lsim2(). - U = zeros_like(T) - ic = B + X0 - Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs) - return Tr, Yr - - -def step(system, X0=None, T=None, N=None): - """Step response of continuous-time system. - - Parameters - ---------- - system : an instance of the LTI class or a tuple describing the system. - The following gives the number of elements in the tuple and - the interpretation. - 2 (num, den) - 3 (zeros, poles, gain) - 4 (A, B, C, D) - X0 : array_like, optional - Initial state-vector (default is zero). - T : array_like, optional - Time points (computed if not given). - N : int - Number of time points to compute if `T` is not given. - - Returns - ------- - T : 1D ndarray - Output time points. - yout : 1D ndarray - Step response of system. - - See also - -------- - scipy.signal.step2 - """ - if isinstance(system, lti): - sys = system - else: - sys = lti(*system) - if N is None: - N = 100 - if T is None: - T = _default_response_times(sys.A, N) - U = ones(T.shape, sys.A.dtype) - vals = lsim(sys, U, T, X0=X0) - return vals[0], vals[1] - - -def step2(system, X0=None, T=None, N=None, **kwargs): - """Step response of continuous-time system. - - This function is functionally the same as `scipy.signal.step`, but - it uses the function `scipy.signal.lsim2` to compute the step - response. - - Parameters - ---------- - system : an instance of the LTI class or a tuple describing the system. - The following gives the number of elements in the tuple and - the interpretation. - 2 (num, den) - 3 (zeros, poles, gain) - 4 (A, B, C, D) - X0 : array_like, optional - Initial state-vector (default is zero). - T : array_like, optional - Time points (computed if not given). - N : int - Number of time points to compute if `T` is not given. - **kwargs : - Additional keyword arguments are passed on the function - `scipy.signal.lsim2`, which in turn passes them on to - :func:`scipy.integrate.odeint`. See the documentation for - :func:`scipy.integrate.odeint` for information about these - arguments. - - Returns - ------- - T : 1D ndarray - Output time points. - yout : 1D ndarray - Step response of system. - - See also - -------- - scipy.signal.step - - Notes - ----- - .. versionadded:: 0.8.0 - """ - if isinstance(system, lti): - sys = system - else: - sys = lti(*system) - if N is None: - N = 100 - if T is None: - T = _default_response_times(sys.A, N) - U = ones(T.shape, sys.A.dtype) - vals = lsim2(sys, U, T, X0=X0, **kwargs) - return vals[0], vals[1] diff --git a/scipy-0.10.1/scipy/signal/medianfilter.c b/scipy-0.10.1/scipy/signal/medianfilter.c deleted file mode 100644 index ad90ca9533..0000000000 --- a/scipy-0.10.1/scipy/signal/medianfilter.c +++ /dev/null @@ -1,127 +0,0 @@ - -/*--------------------------------------------------------------------*/ - -#include "Python.h" -#define NO_IMPORT_ARRAY -#include "numpy/noprefix.h" - - -/* defined below */ -void f_medfilt2(float*,float*,intp*,intp*); -void d_medfilt2(double*,double*,intp*,intp*); -void b_medfilt2(unsigned char*,unsigned char*,intp*,intp*); -extern char *check_malloc (int); - - -/* The QUICK_SELECT routine is based on Hoare's Quickselect algorithm, - * with unrolled recursion. - * Author: Thouis R. Jones, 2008 - */ - -#define ELEM_SWAP(t, a, x, y) {register t temp = (a)[x]; (a)[x] = (a)[y]; (a)[y] = temp;} -#define FIRST_LOWEST(x, y, z) (((x) < (y)) && ((x) < (z))) -#define FIRST_HIGHEST(x, y, z) (((x) > (y)) && ((x) > (z))) -#define LOWEST_IDX(a, x, y) (((a)[x] < (a)[y]) ? (x) : (y)) -#define HIGHEST_IDX(a, x, y) (((a)[x] > (a)[y]) ? (x) : (y)) - -/* if (l is index of lowest) {return lower of mid,hi} else if (l is index of highest) {return higher of mid,hi} else return l */ -#define MEDIAN_IDX(a, l, m, h) (FIRST_LOWEST((a)[l], (a)[m], (a)[h]) ? LOWEST_IDX(a, m, h) : (FIRST_HIGHEST((a)[l], (a)[m], (a)[h]) ? HIGHEST_IDX(a, m, h) : (l))) - -#define QUICK_SELECT(NAME, TYPE) \ -TYPE NAME(TYPE arr[], int n) \ -{ \ - int lo, hi, mid, md; \ - int median_idx; \ - int ll, hh; \ - TYPE piv; \ - \ - lo = 0; hi = n-1; \ - median_idx = (n - 1) / 2; /* lower of middle values for even-length arrays */ \ - \ - while (1) { \ - if ((hi - lo) < 2) { \ - if (arr[hi] < arr[lo]) ELEM_SWAP(TYPE, arr, lo, hi); \ - return arr[median_idx]; \ - } \ - \ - mid = (hi + lo) / 2; \ - /* put the median of lo,mid,hi at position lo - this will be the pivot */ \ - md = MEDIAN_IDX(arr, lo, mid, hi); \ - ELEM_SWAP(TYPE, arr, lo, md); \ - \ - /* Nibble from each end towards middle, swapping misordered items */ \ - piv = arr[lo]; \ - for (ll = lo+1, hh = hi;; ll++, hh--) { \ - while (arr[ll] < piv) ll++; \ - while (arr[hh] > piv) hh--; \ - if (hh < ll) break; \ - ELEM_SWAP(TYPE, arr, ll, hh); \ - } \ - /* move pivot to top of lower partition */ \ - ELEM_SWAP(TYPE, arr, hh, lo); \ - /* set lo, hi for new range to search */ \ - if (hh < median_idx) /* search upper partition */ \ - lo = hh+1; \ - else if (hh > median_idx) /* search lower partition */ \ - hi = hh-1; \ - else \ - return piv; \ - } \ -} - - -/* 2-D median filter with zero-padding on edges. */ -#define MEDIAN_FILTER_2D(NAME, TYPE, SELECT) \ -void NAME(TYPE* in, TYPE* out, intp* Nwin, intp* Ns) \ -{ \ - int nx, ny, hN[2]; \ - int pre_x, pre_y, pos_x, pos_y; \ - int subx, suby, k, totN; \ - TYPE *myvals, *fptr1, *fptr2, *ptr1, *ptr2; \ - \ - totN = Nwin[0] * Nwin[1]; \ - myvals = (TYPE *) check_malloc( totN * sizeof(TYPE)); \ - \ - hN[0] = Nwin[0] >> 1; \ - hN[1] = Nwin[1] >> 1; \ - ptr1 = in; \ - fptr1 = out; \ - for (ny = 0; ny < Ns[0]; ny++) \ - for (nx = 0; nx < Ns[1]; nx++) { \ - pre_x = hN[1]; \ - pre_y = hN[0]; \ - pos_x = hN[1]; \ - pos_y = hN[0]; \ - if (nx < hN[1]) pre_x = nx; \ - if (nx >= Ns[1] - hN[1]) pos_x = Ns[1] - nx - 1; \ - if (ny < hN[0]) pre_y = ny; \ - if (ny >= Ns[0] - hN[0]) pos_y = Ns[0] - ny - 1; \ - fptr2 = myvals; \ - ptr2 = ptr1 - pre_x - pre_y*Ns[1]; \ - for (suby = -pre_y; suby <= pos_y; suby++) { \ - for (subx = -pre_x; subx <= pos_x; subx++) \ - *fptr2++ = *ptr2++; \ - ptr2 += Ns[1] - (pre_x + pos_x + 1); \ - } \ - ptr1++; \ - \ - /* Zero pad */ \ - for (k = (pre_x + pos_x + 1)*(pre_y + pos_y + 1); k < totN; k++) \ - *fptr2++ = 0.0; \ - \ - /* *fptr1++ = median(myvals,totN); */ \ - *fptr1++ = SELECT(myvals,totN); \ - } \ - free(myvals); \ -} - - -/* define quick_select for floats, doubles, and unsigned characters */ -QUICK_SELECT(f_quick_select, float) -QUICK_SELECT(d_quick_select, double) -QUICK_SELECT(b_quick_select, unsigned char) - -/* define medfilt for floats, doubles, and unsigned characters */ -MEDIAN_FILTER_2D(f_medfilt2, float, f_quick_select) -MEDIAN_FILTER_2D(d_medfilt2, double, d_quick_select) -MEDIAN_FILTER_2D(b_medfilt2, unsigned char, b_quick_select) diff --git a/scipy-0.10.1/scipy/signal/setup.py b/scipy-0.10.1/scipy/signal/setup.py deleted file mode 100755 index 34f84e30cd..0000000000 --- a/scipy-0.10.1/scipy/signal/setup.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python - - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('signal', parent_package, top_path) - - config.add_data_dir('tests') - - config.add_extension('sigtools', - sources=['sigtoolsmodule.c', 'firfilter.c', - 'medianfilter.c', 'lfilter.c.src', - 'correlate_nd.c.src'], - depends=['sigtools.h'], - include_dirs=['.'] - ) - - config.add_extension('spectral', sources=['spectral.c']) - - config.add_extension('spline', - sources=['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c', - 'C_bspline_util.c', 'Z_bspline_util.c', 'bspline_util.c'], - ) - - return config - - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/signal/setupscons.py b/scipy-0.10.1/scipy/signal/setupscons.py deleted file mode 100755 index 0a3009c2f5..0000000000 --- a/scipy-0.10.1/scipy/signal/setupscons.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python - - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('signal', parent_package, top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/signal/signaltools.py b/scipy-0.10.1/scipy/signal/signaltools.py deleted file mode 100644 index 4d0a6f0a42..0000000000 --- a/scipy-0.10.1/scipy/signal/signaltools.py +++ /dev/null @@ -1,1602 +0,0 @@ -# Author: Travis Oliphant -# 1999 -- 2002 - - -import sigtools -from scipy import linalg -from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \ - ifftn, fftfreq -from numpy import polyadd, polymul, polydiv, polysub, roots, \ - poly, polyval, polyder, cast, asarray, isscalar, atleast_1d, \ - ones, real_if_close, zeros, array, arange, where, rank, \ - newaxis, product, ravel, sum, r_, iscomplexobj, take, \ - argsort, allclose, expand_dims, unique, prod, sort, reshape, \ - transpose, dot, mean, ndarray, atleast_2d -import numpy as np -from scipy.misc import factorial -from windows import get_window -from _arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext - -__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d', - 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', - 'lfiltic', 'deconvolve', 'hilbert', 'hilbert2', 'cmplx_sort', - 'unique_roots', 'invres', 'invresz', 'residue', 'residuez', - 'resample', 'detrend', 'lfilter_zi', 'filtfilt', 'decimate'] - - -_modedict = {'valid': 0, 'same': 1, 'full': 2} - -_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1, - 'symmetric': 1, 'reflect': 4} - - -def _valfrommode(mode): - try: - val = _modedict[mode] - except KeyError: - if mode not in [0, 1, 2]: - raise ValueError("Acceptable mode flags are 'valid' (0)," - " 'same' (1), or 'full' (2).") - val = mode - return val - - -def _bvalfromboundary(boundary): - try: - val = _boundarydict[boundary] << 2 - except KeyError: - if val not in [0, 1, 2]: - raise ValueError("Acceptable boundary flags are 'fill', 'wrap'" - " (or 'circular'), \n and 'symm' (or 'symmetric').") - val = boundary << 2 - return val - - -def correlate(in1, in2, mode='full'): - """ - Cross-correlate two N-dimensional arrays. - - Cross-correlate in1 and in2 with the output size determined by the mode - argument. - - Parameters - ---------- - in1: array - first input. - in2: array - second input. Should have the same number of dimensions as in1. - mode: str {'valid', 'same', 'full'} - a string indicating the size of the output: - - 'valid': the output consists only of those elements that do not - rely on the zero-padding. - - 'same': the output is the same size as ``in1`` centered - with respect to the 'full' output. - - 'full': the output is the full discrete linear cross-correlation - of the inputs. (Default) - - Returns - ------- - out: array - an N-dimensional array containing a subset of the discrete linear - cross-correlation of in1 with in2. - - Notes - ----- - The correlation z of two arrays x and y of rank d is defined as - - z[...,k,...] = sum[..., i_l, ...] - x[..., i_l,...] * conj(y[..., i_l + k,...]) - - """ - val = _valfrommode(mode) - - if mode == 'valid': - ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)] - out = np.empty(ps, in1.dtype) - for i in range(len(ps)): - if ps[i] <= 0: - raise ValueError("Dimension of x(%d) < y(%d) " \ - "not compatible with valid mode" % \ - (in1.shape[i], in2.shape[i])) - - z = sigtools._correlateND(in1, in2, out, val) - else: - ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)] - # zero pad input - in1zpadded = np.zeros(ps, in1.dtype) - sc = [slice(0, i) for i in in1.shape] - in1zpadded[sc] = in1.copy() - - if mode == 'full': - out = np.empty(ps, in1.dtype) - z = sigtools._correlateND(in1zpadded, in2, out, val) - elif mode == 'same': - out = np.empty(in1.shape, in1.dtype) - - z = sigtools._correlateND(in1zpadded, in2, out, val) - else: - raise ValueError("Uknown mode %s" % mode) - - return z - - -def _centered(arr, newsize): - # Return the center newsize portion of the array. - newsize = asarray(newsize) - currsize = array(arr.shape) - startind = (currsize - newsize) / 2 - endind = startind + newsize - myslice = [slice(startind[k], endind[k]) for k in range(len(endind))] - return arr[tuple(myslice)] - - -def fftconvolve(in1, in2, mode="full"): - """Convolve two N-dimensional arrays using FFT. See convolve. - - """ - s1 = array(in1.shape) - s2 = array(in2.shape) - complex_result = (np.issubdtype(in1.dtype, np.complex) or - np.issubdtype(in2.dtype, np.complex)) - size = s1 + s2 - 1 - - # Always use 2**n-sized FFT - fsize = 2 ** np.ceil(np.log2(size)) - IN1 = fftn(in1, fsize) - IN1 *= fftn(in2, fsize) - fslice = tuple([slice(0, int(sz)) for sz in size]) - ret = ifftn(IN1)[fslice].copy() - del IN1 - if not complex_result: - ret = ret.real - if mode == "full": - return ret - elif mode == "same": - if product(s1, axis=0) > product(s2, axis=0): - osize = s1 - else: - osize = s2 - return _centered(ret, osize) - elif mode == "valid": - return _centered(ret, abs(s2 - s1) + 1) - - -def convolve(in1, in2, mode='full'): - """ - Convolve two N-dimensional arrays. - - Convolve in1 and in2 with output size determined by mode. - - Parameters - ---------- - in1: array - first input. - in2: array - second input. Should have the same number of dimensions as in1. - mode: str {'valid', 'same', 'full'} - a string indicating the size of the output: - - ``valid`` : the output consists only of those elements that do not - rely on the zero-padding. - - ``same`` : the output is the same size as ``in1`` centered - with respect to the 'full' output. - - ``full`` : the output is the full discrete linear cross-correlation - of the inputs. (Default) - - - Returns - ------- - out: array - an N-dimensional array containing a subset of the discrete linear - cross-correlation of in1 with in2. - - """ - volume = asarray(in1) - kernel = asarray(in2) - - if rank(volume) == rank(kernel) == 0: - return volume * kernel - elif not volume.ndim == kernel.ndim: - raise ValueError("in1 and in2 should have the same rank") - - slice_obj = [slice(None, None, -1)] * len(kernel.shape) - - if mode == 'valid': - for d1, d2 in zip(volume.shape, kernel.shape): - if not d1 >= d2: - raise ValueError( - "in1 should have at least as many items as in2 in " \ - "every dimension for valid mode.") - if np.iscomplexobj(kernel): - return correlate(volume, kernel[slice_obj].conj(), mode) - else: - return correlate(volume, kernel[slice_obj], mode) - - -def order_filter(a, domain, rank): - """ - Perform an order filter on an N-dimensional array. - - Perform an order filter on the array in. The domain argument acts as a - mask centered over each pixel. The non-zero elements of domain are - used to select elements surrounding each input pixel which are placed - in a list. The list is sorted, and the output for that pixel is the - element corresponding to rank in the sorted list. - - Parameters - ---------- - a : ndarray - The N-dimensional input array. - domain : array_like - A mask array with the same number of dimensions as `in`. - Each dimension should have an odd number of elements. - rank : int - A non-negative integer which selects the element from the - sorted list (0 corresponds to the smallest element, 1 is the - next smallest element, etc.). - - Returns - ------- - out : ndarray - The results of the order filter in an array with the same - shape as `in`. - - Examples - -------- - >>> import scipy.signal - >>> x = np.arange(25).reshape(5, 5) - >>> domain = np.identity(3) - >>> x - array([[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [10, 11, 12, 13, 14], - [15, 16, 17, 18, 19], - [20, 21, 22, 23, 24]]) - >>> sp.signal.order_filter(x, domain, 0) - array([[ 0., 0., 0., 0., 0.], - [ 0., 0., 1., 2., 0.], - [ 0., 5., 6., 7., 0.], - [ 0., 10., 11., 12., 0.], - [ 0., 0., 0., 0., 0.]]) - >>> sp.signal.order_filter(x, domain, 2) - array([[ 6., 7., 8., 9., 4.], - [ 11., 12., 13., 14., 9.], - [ 16., 17., 18., 19., 14.], - [ 21., 22., 23., 24., 19.], - [ 20., 21., 22., 23., 24.]]) - - """ - domain = asarray(domain) - size = domain.shape - for k in range(len(size)): - if (size[k] % 2) != 1: - raise ValueError("Each dimension of domain argument " - " should have an odd number of elements.") - return sigtools._order_filterND(a, domain, rank) - - -def medfilt(volume, kernel_size=None): - """ - Perform a median filter on an N-dimensional array. - - Apply a median filter to the input array using a local window-size - given by kernel_size. - - Parameters - ---------- - volume : array_like - An N-dimensional input array. - kernel_size : array_like, optional - A scalar or an N-length list giving the size of the median filter - window in each dimension. Elements of `kernel_size` should be odd. - If `kernel_size` is a scalar, then this scalar is used as the size in - each dimension. Default size is 3 for each dimension. - - Returns - ------- - out : ndarray - An array the same size as input containing the median filtered - result. - - """ - volume = atleast_1d(volume) - if kernel_size is None: - kernel_size = [3] * len(volume.shape) - kernel_size = asarray(kernel_size) - if len(kernel_size.shape) == 0: - kernel_size = [kernel_size.item()] * len(volume.shape) - kernel_size = asarray(kernel_size) - - for k in range(len(volume.shape)): - if (kernel_size[k] % 2) != 1: - raise ValueError("Each element of kernel_size should be odd.") - - domain = ones(kernel_size) - - numels = product(kernel_size, axis=0) - order = int(numels / 2) - return sigtools._order_filterND(volume, domain, order) - - -def wiener(im, mysize=None, noise=None): - """ - Perform a Wiener filter on an N-dimensional array. - - Apply a Wiener filter to the N-dimensional array `im`. - - Parameters - ---------- - im : ndarray - An N-dimensional array. - mysize : int or arraylike, optional - A scalar or an N-length list giving the size of the Wiener filter - window in each dimension. Elements of mysize should be odd. - If mysize is a scalar, then this scalar is used as the size - in each dimension. - noise : float, optional - The noise-power to use. If None, then noise is estimated as the - average of the local variance of the input. - - Returns - ------- - out : ndarray - Wiener filtered result with the same shape as `im`. - - """ - im = asarray(im) - if mysize is None: - mysize = [3] * len(im.shape) - mysize = asarray(mysize) - - # Estimate the local mean - lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0) - - # Estimate the local variance - lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0) - - lMean ** 2) - - # Estimate the noise power if needed. - if noise == None: - noise = mean(ravel(lVar), axis=0) - - res = (im - lMean) - res *= (1 - noise / lVar) - res += lMean - out = where(lVar < noise, lMean, res) - - return out - - -def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): - """Convolve two 2-dimensional arrays. - - Convolve `in1` and `in2` with output size determined by mode and boundary - conditions determined by `boundary` and `fillvalue`. - - Parameters - ---------- - in1, in2 : ndarray - Two-dimensional input arrays to be convolved. - mode: str, optional - A string indicating the size of the output: - - ``valid`` : the output consists only of those elements that do not - rely on the zero-padding. - - ``same`` : the output is the same size as ``in1`` centered - with respect to the 'full' output. - - ``full`` : the output is the full discrete linear cross-correlation - of the inputs. (Default) - - boundary : str, optional - A flag indicating how to handle boundaries: - - - 'fill' : pad input arrays with fillvalue. (default) - - 'wrap' : circular boundary conditions. - - 'symm' : symmetrical boundary conditions. - - fillvalue : scalar, optional - Value to fill pad input arrays with. Default is 0. - - Returns - ------- - out : ndarray - A 2-dimensional array containing a subset of the discrete linear - convolution of `in1` with `in2`. - - """ - if mode == 'valid': - for d1, d2 in zip(np.shape(in1), np.shape(in2)): - if not d1 >= d2: - raise ValueError( - "in1 should have at least as many items as in2 in " \ - "every dimension for valid mode.") - - val = _valfrommode(mode) - bval = _bvalfromboundary(boundary) - - return sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue) - - -def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): - """Cross-correlate two 2-dimensional arrays. - - Cross correlate in1 and in2 with output size determined by mode and - boundary conditions determined by `boundary` and `fillvalue`. - - Parameters - ---------- - in1, in2 : ndarray - Two-dimensional input arrays to be convolved. - mode: str, optional - A string indicating the size of the output: - - ``valid`` : the output consists only of those elements that do not - rely on the zero-padding. - - ``same`` : the output is the same size as ``in1`` centered - with respect to the 'full' output. - - ``full`` : the output is the full discrete linear cross-correlation - of the inputs. (Default) - - boundary : str, optional - A flag indicating how to handle boundaries: - - - 'fill' : pad input arrays with fillvalue. (default) - - 'wrap' : circular boundary conditions. - - 'symm' : symmetrical boundary conditions. - - fillvalue : scalar, optional - Value to fill pad input arrays with. Default is 0. - - Returns - ------- - out : ndarray - A 2-dimensional array containing a subset of the discrete linear - cross-correlation of `in1` with `in2`. - - """ - val = _valfrommode(mode) - bval = _bvalfromboundary(boundary) - - return sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue) - - -def medfilt2d(input, kernel_size=3): - """ - Median filter a 2-dimensional array. - - Apply a median filter to the input array using a local window-size - given by `kernel_size` (must be odd). - - Parameters - ---------- - input : array_like - A 2-dimensional input array. - kernel_size : array_like, optional - A scalar or a list of length 2, giving the size of the - median filter window in each dimension. Elements of - `kernel_size` should be odd. If `kernel_size` is a scalar, - then this scalar is used as the size in each dimension. - Default is a kernel of size (3, 3). - - Returns - ------- - out : ndarray - An array the same size as input containing the median filtered - result. - - """ - image = asarray(input) - if kernel_size is None: - kernel_size = [3] * 2 - kernel_size = asarray(kernel_size) - if len(kernel_size.shape) == 0: - kernel_size = [kernel_size.item()] * 2 - kernel_size = asarray(kernel_size) - - for size in kernel_size: - if (size % 2) != 1: - raise ValueError("Each element of kernel_size should be odd.") - - return sigtools._medfilt2d(image, kernel_size) - - -def lfilter(b, a, x, axis=-1, zi=None): - """ - Filter data along one-dimension with an IIR or FIR filter. - - Filter a data sequence, x, using a digital filter. This works for many - fundamental data types (including Object type). The filter is a direct - form II transposed implementation of the standard difference equation - (see Notes). - - Parameters - ---------- - b : array_like - The numerator coefficient vector in a 1-D sequence. - a : array_like - The denominator coefficient vector in a 1-D sequence. If a[0] - is not 1, then both a and b are normalized by a[0]. - x : array_like - An N-dimensional input array. - axis : int - The axis of the input data array along which to apply the - linear filter. The filter is applied to each subarray along - this axis (*Default* = -1) - zi : array_like (optional) - Initial conditions for the filter delays. It is a vector - (or array of vectors for an N-dimensional input) of length - max(len(a),len(b))-1. If zi=None or is not given then initial - rest is assumed. SEE signal.lfiltic for more information. - - Returns - ------- - y : array - The output of the digital filter. - zf : array (optional) - If zi is None, this is not returned, otherwise, zf holds the - final filter delay values. - - Notes - ----- - The filter function is implemented as a direct II transposed structure. - This means that the filter implements - - :: - - a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb] - - a[1]*y[n-1] - ... - a[na]*y[n-na] - - using the following difference equations:: - - y[m] = b[0]*x[m] + z[0,m-1] - z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m] - ... - z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m] - z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m] - - where m is the output sample number and n=max(len(a),len(b)) is the - model order. - - The rational transfer function describing this filter in the - z-transform domain is:: - - -1 -nb - b[0] + b[1]z + ... + b[nb] z - Y(z) = ---------------------------------- X(z) - -1 -na - a[0] + a[1]z + ... + a[na] z - - """ - if isscalar(a): - a = [a] - if zi is None: - return sigtools._linear_filter(b, a, x, axis) - else: - return sigtools._linear_filter(b, a, x, axis, zi) - - -def lfiltic(b, a, y, x=None): - """ - Construct initial conditions for lfilter - - Given a linear filter (b,a) and initial conditions on the output y - and the input x, return the inital conditions on the state vector zi - which is used by lfilter to generate the output given the input. - - If M=len(b)-1 and N=len(a)-1. Then, the initial conditions are given - in the vectors x and y as:: - - x = {x[-1],x[-2],...,x[-M]} - y = {y[-1],y[-2],...,y[-N]} - - If x is not given, its inital conditions are assumed zero. - If either vector is too short, then zeros are added - to achieve the proper length. - - The output vector zi contains:: - - zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]} where K=max(M,N). - - """ - N = np.size(a) - 1 - M = np.size(b) - 1 - K = max(M, N) - y = asarray(y) - zi = zeros(K, y.dtype.char) - if x is None: - x = zeros(M, y.dtype.char) - else: - x = asarray(x) - L = np.size(x) - if L < M: - x = r_[x, zeros(M - L)] - L = np.size(y) - if L < N: - y = r_[y, zeros(N - L)] - - for m in range(M): - zi[m] = sum(b[m + 1:] * x[:M - m], axis=0) - - for m in range(N): - zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0) - - return zi - - -def deconvolve(signal, divisor): - """Deconvolves divisor out of signal. - - """ - num = atleast_1d(signal) - den = atleast_1d(divisor) - N = len(num) - D = len(den) - if D > N: - quot = [] - rem = num - else: - input = ones(N - D + 1, float) - input[1:] = 0 - quot = lfilter(num, den, input) - rem = num - convolve(den, quot, mode='full') - return quot, rem - - -def hilbert(x, N=None, axis=-1): - """ - Compute the analytic signal. - - The transformation is done along the last axis by default. - - Parameters - ---------- - x : array_like - Signal data - N : int, optional - Number of Fourier components. Default: ``x.shape[axis]`` - axis : int, optional - Axis along which to do the transformation. Default: -1. - - Returns - ------- - xa : ndarray - Analytic signal of `x`, of each 1-D array along `axis` - - Notes - ----- - The analytic signal `x_a(t)` of `x(t)` is:: - - x_a = F^{-1}(F(x) 2U) = x + i y - - where ``F`` is the Fourier transform, ``U`` the unit step function, - and ``y`` the Hilbert transform of ``x``. [1]_ - - `axis` argument is new in scipy 0.8.0. - - References - ---------- - .. [1] Wikipedia, "Analytic signal". - http://en.wikipedia.org/wiki/Analytic_signal - - """ - x = asarray(x) - if iscomplexobj(x): - raise ValueError("x must be real.") - if N is None: - N = x.shape[axis] - if N <= 0: - raise ValueError("N must be positive.") - - Xf = fft(x, N, axis=axis) - h = zeros(N) - if N % 2 == 0: - h[0] = h[N / 2] = 1 - h[1:N / 2] = 2 - else: - h[0] = 1 - h[1:(N + 1) / 2] = 2 - - if len(x.shape) > 1: - ind = [newaxis] * x.ndim - ind[axis] = slice(None) - h = h[ind] - x = ifft(Xf * h, axis=axis) - return x - - -def hilbert2(x, N=None): - """ - Compute the '2-D' analytic signal of `x` - - Parameters - ---------- - x : array_like - 2-D signal data. - N : int or tuple of two ints, optional - Number of Fourier components. Default is ``x.shape`` - - Returns - ------- - xa : ndarray - Analytic signal of `x` taken along axes (0,1). - - References - ---------- - .. [1] Wikipedia, "Analytic signal", - http://en.wikipedia.org/wiki/Analytic_signal - - """ - x = atleast_2d(x) - if len(x.shape) > 2: - raise ValueError("x must be rank 2.") - if iscomplexobj(x): - raise ValueError("x must be real.") - if N is None: - N = x.shape - elif isinstance(N, int): - if N <= 0: - raise ValueError("N must be positive.") - N = (N, N) - elif len(N) != 2 or np.any(np.asarray(N) <= 0): - raise ValueError("When given as a tuple, N must hold exactly " - "two positive integers") - - Xf = fft2(x, N, axes=(0, 1)) - h1 = zeros(N[0], 'd') - h2 = zeros(N[1], 'd') - for p in range(2): - h = eval("h%d" % (p + 1)) - N1 = N[p] - if N1 % 2 == 0: - h[0] = h[N1 / 2] = 1 - h[1:N1 / 2] = 2 - else: - h[0] = 1 - h[1:(N1 + 1) / 2] = 2 - exec("h%d = h" % (p + 1), globals(), locals()) - - h = h1[:, newaxis] * h2[newaxis, :] - k = len(x.shape) - while k > 2: - h = h[:, newaxis] - k -= 1 - x = ifft2(Xf * h, axes=(0, 1)) - return x - - -def cmplx_sort(p): - "sort roots based on magnitude." - p = asarray(p) - if iscomplexobj(p): - indx = argsort(abs(p)) - else: - indx = argsort(p) - return take(p, indx, 0), indx - - -def unique_roots(p, tol=1e-3, rtype='min'): - """ - Determine unique roots and their multiplicities from a list of roots. - - Parameters - ---------- - p : array_like - The list of roots. - tol : float, optional - The tolerance for two roots to be considered equal. Default is 1e-3. - rtype : {'max', 'min, 'avg'}, optional - How to determine the returned root if multiple roots are within - `tol` of each other. - - - 'max': pick the maximum of those roots. - - 'min': pick the minimum of those roots. - - 'avg': take the average of those roots. - - Returns - ------- - pout : ndarray - The list of unique roots, sorted from low to high. - mult : ndarray - The multiplicity of each root. - - Notes - ----- - This utility function is not specific to roots but can be used for any - sequence of values for which uniqueness and multiplicity has to be - determined. For a more general routine, see `numpy.unique`. - - Examples - -------- - >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3] - >>> uniq, mult = sp.signal.unique_roots(vals, tol=2e-2, rtype='avg') - - Check which roots have multiplicity larger than 1: - - >>> uniq[mult > 1] - array([ 1.305]) - - """ - if rtype in ['max', 'maximum']: - comproot = np.maximum - elif rtype in ['min', 'minimum']: - comproot = np.minimum - elif rtype in ['avg', 'mean']: - comproot = np.mean - p = asarray(p) * 1.0 - tol = abs(tol) - p, indx = cmplx_sort(p) - pout = [] - mult = [] - indx = -1 - curp = p[0] + 5 * tol - sameroots = [] - for k in range(len(p)): - tr = p[k] - if abs(tr - curp) < tol: - sameroots.append(tr) - curp = comproot(sameroots) - pout[indx] = curp - mult[indx] += 1 - else: - pout.append(tr) - curp = tr - sameroots = [tr] - indx += 1 - mult.append(1) - return array(pout), array(mult) - - -def invres(r, p, k, tol=1e-3, rtype='avg'): - """Compute b(s) and a(s) from partial fraction expansion: r,p,k - - If M = len(b) and N = len(a) - - b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1] - H(s) = ------ = ---------------------------------------------- - a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1] - - r[0] r[1] r[-1] - = -------- + -------- + ... + --------- + k(s) - (s-p[0]) (s-p[1]) (s-p[-1]) - - If there are any repeated roots (closer than tol), then the partial - fraction expansion has terms like - - r[i] r[i+1] r[i+n-1] - -------- + ----------- + ... + ----------- - (s-p[i]) (s-p[i])**2 (s-p[i])**n - - See Also - -------- - residue, poly, polyval, unique_roots - - """ - extra = k - p, indx = cmplx_sort(p) - r = take(r, indx, 0) - pout, mult = unique_roots(p, tol=tol, rtype=rtype) - p = [] - for k in range(len(pout)): - p.extend([pout[k]] * mult[k]) - a = atleast_1d(poly(p)) - if len(extra) > 0: - b = polymul(extra, a) - else: - b = [0] - indx = 0 - for k in range(len(pout)): - temp = [] - for l in range(len(pout)): - if l != k: - temp.extend([pout[l]] * mult[l]) - for m in range(mult[k]): - t2 = temp[:] - t2.extend([pout[k]] * (mult[k] - m - 1)) - b = polyadd(b, r[indx] * poly(t2)) - indx += 1 - b = real_if_close(b) - while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1): - b = b[1:] - return b, a - - -def residue(b, a, tol=1e-3, rtype='avg'): - """ - Compute partial-fraction expansion of b(s) / a(s). - - If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction - expansion H(s) is defined as:: - - b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1] - H(s) = ------ = ---------------------------------------------- - a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1] - - r[0] r[1] r[-1] - = -------- + -------- + ... + --------- + k(s) - (s-p[0]) (s-p[1]) (s-p[-1]) - - If there are any repeated roots (closer together than `tol`), then H(s) - has terms like:: - - r[i] r[i+1] r[i+n-1] - -------- + ----------- + ... + ----------- - (s-p[i]) (s-p[i])**2 (s-p[i])**n - - Returns - ------- - r : ndarray - Residues. - p : ndarray - Poles. - k : ndarray - Coefficients of the direct polynomial term. - - See Also - -------- - invres, numpy.poly, unique_roots - - """ - - b, a = map(asarray, (b, a)) - rscale = a[0] - k, b = polydiv(b, a) - p = roots(a) - r = p * 0.0 - pout, mult = unique_roots(p, tol=tol, rtype=rtype) - p = [] - for n in range(len(pout)): - p.extend([pout[n]] * mult[n]) - p = asarray(p) - # Compute the residue from the general formula - indx = 0 - for n in range(len(pout)): - bn = b.copy() - pn = [] - for l in range(len(pout)): - if l != n: - pn.extend([pout[l]] * mult[l]) - an = atleast_1d(poly(pn)) - # bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is - # multiplicity of pole at po[n] - sig = mult[n] - for m in range(sig, 0, -1): - if sig > m: - # compute next derivative of bn(s) / an(s) - term1 = polymul(polyder(bn, 1), an) - term2 = polymul(bn, polyder(an, 1)) - bn = polysub(term1, term2) - an = polymul(an, an) - r[indx + m - 1] = polyval(bn, pout[n]) / polyval(an, pout[n]) \ - / factorial(sig - m) - indx += sig - return r / rscale, p, k - - -def residuez(b, a, tol=1e-3, rtype='avg'): - """Compute partial-fraction expansion of b(z) / a(z). - - If M = len(b) and N = len(a) - - b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1) - H(z) = ------ = ---------------------------------------------- - a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1) - - r[0] r[-1] - = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... - (1-p[0]z**(-1)) (1-p[-1]z**(-1)) - - If there are any repeated roots (closer than tol), then the partial - fraction expansion has terms like - - r[i] r[i+1] r[i+n-1] - -------------- + ------------------ + ... + ------------------ - (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n - - See also - -------- - invresz, poly, polyval, unique_roots - - """ - b, a = map(asarray, (b, a)) - gain = a[0] - brev, arev = b[::-1], a[::-1] - krev, brev = polydiv(brev, arev) - if krev == []: - k = [] - else: - k = krev[::-1] - b = brev[::-1] - p = roots(a) - r = p * 0.0 - pout, mult = unique_roots(p, tol=tol, rtype=rtype) - p = [] - for n in range(len(pout)): - p.extend([pout[n]] * mult[n]) - p = asarray(p) - # Compute the residue from the general formula (for discrete-time) - # the polynomial is in z**(-1) and the multiplication is by terms - # like this (1-p[i] z**(-1))**mult[i]. After differentiation, - # we must divide by (-p[i])**(m-k) as well as (m-k)! - indx = 0 - for n in range(len(pout)): - bn = brev.copy() - pn = [] - for l in range(len(pout)): - if l != n: - pn.extend([pout[l]] * mult[l]) - an = atleast_1d(poly(pn))[::-1] - # bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is - # multiplicity of pole at po[n] and b(z) and a(z) are polynomials. - sig = mult[n] - for m in range(sig, 0, -1): - if sig > m: - # compute next derivative of bn(s) / an(s) - term1 = polymul(polyder(bn, 1), an) - term2 = polymul(bn, polyder(an, 1)) - bn = polysub(term1, term2) - an = polymul(an, an) - r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) / - polyval(an, 1.0 / pout[n]) / - factorial(sig - m) / (-pout[n]) ** (sig - m)) - indx += sig - return r / gain, p, k - - -def invresz(r, p, k, tol=1e-3, rtype='avg'): - """Compute b(z) and a(z) from partial fraction expansion: r,p,k - - If M = len(b) and N = len(a) - - b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1) - H(z) = ------ = ---------------------------------------------- - a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1) - - r[0] r[-1] - = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... - (1-p[0]z**(-1)) (1-p[-1]z**(-1)) - - If there are any repeated roots (closer than tol), then the partial - fraction expansion has terms like - - r[i] r[i+1] r[i+n-1] - -------------- + ------------------ + ... + ------------------ - (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n - - See also - -------- - residuez, poly, polyval, unique_roots - - """ - extra = asarray(k) - p, indx = cmplx_sort(p) - r = take(r, indx, 0) - pout, mult = unique_roots(p, tol=tol, rtype=rtype) - p = [] - for k in range(len(pout)): - p.extend([pout[k]] * mult[k]) - a = atleast_1d(poly(p)) - if len(extra) > 0: - b = polymul(extra, a) - else: - b = [0] - indx = 0 - brev = asarray(b)[::-1] - for k in range(len(pout)): - temp = [] - # Construct polynomial which does not include any of this root - for l in range(len(pout)): - if l != k: - temp.extend([pout[l]] * mult[l]) - for m in range(mult[k]): - t2 = temp[:] - t2.extend([pout[k]] * (mult[k] - m - 1)) - brev = polyadd(brev, (r[indx] * poly(t2))[::-1]) - indx += 1 - b = real_if_close(brev[::-1]) - return b, a - - -def resample(x, num, t=None, axis=0, window=None): - """ - Resample `x` to `num` samples using Fourier method along the given axis. - - The resampled signal starts at the same value as `x` but is sampled - with a spacing of ``len(x) / num * (spacing of x)``. Because a - Fourier method is used, the signal is assumed to be periodic. - - Parameters - ---------- - x : array_like - The data to be resampled. - num : int - The number of samples in the resampled signal. - t : array_like, optional - If `t` is given, it is assumed to be the sample positions - associated with the signal data in `x`. - axis : int, optional - The axis of `x` that is resampled. Default is 0. - window : array_like, callable, string, float, or tuple, optional - Specifies the window applied to the signal in the Fourier - domain. See below for details. - - Returns - ------- - resampled_x or (resampled_x, resampled_t) - Either the resampled array, or, if `t` was given, a tuple - containing the resampled array and the corresponding resampled - positions. - - Notes - ----- - The argument `window` controls a Fourier-domain window that tapers - the Fourier spectrum before zero-padding to alleviate ringing in - the resampled values for sampled signals you didn't intend to be - interpreted as band-limited. - - If `window` is a function, then it is called with a vector of inputs - indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). - - If `window` is an array of the same length as `x.shape[axis]` it is - assumed to be the window to be applied directly in the Fourier - domain (with dc and low-frequency first). - - For any other type of `window`, the function `scipy.signal.get_window` - is called to generate the window. - - The first sample of the returned vector is the same as the first - sample of the input vector. The spacing between samples is changed - from dx to: - - dx * len(x) / num - - If `t` is not None, then it represents the old sample positions, - and the new sample positions will be returned as well as the new - samples. - - """ - x = asarray(x) - X = fft(x, axis=axis) - Nx = x.shape[axis] - if window is not None: - if callable(window): - W = window(fftfreq(Nx)) - elif isinstance(window, ndarray) and window.shape == (Nx,): - W = window - else: - W = ifftshift(get_window(window, Nx)) - newshape = ones(len(x.shape)) - newshape[axis] = len(W) - W.shape = newshape - X = X * W - sl = [slice(None)] * len(x.shape) - newshape = list(x.shape) - newshape[axis] = num - N = int(np.minimum(num, Nx)) - Y = zeros(newshape, 'D') - sl[axis] = slice(0, (N + 1) / 2) - Y[sl] = X[sl] - sl[axis] = slice(-(N - 1) / 2, None) - Y[sl] = X[sl] - y = ifft(Y, axis=axis) * (float(num) / float(Nx)) - - if x.dtype.char not in ['F', 'D']: - y = y.real - - if t is None: - return y - else: - new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] - return y, new_t - - -def detrend(data, axis=-1, type='linear', bp=0): - """ - Remove linear trend along axis from data. - - Parameters - ---------- - data : array_like - The input data. - axis : int, optional - The axis along which to detrend the data. By default this is the - last axis (-1). - type : {'linear', 'constant'}, optional - The type of detrending. If ``type == 'linear'`` (default), - the result of a linear least-squares fit to `data` is subtracted - from `data`. - If ``type == 'constant'``, only the mean of `data` is subtracted. - bp : array_like of ints, optional - A sequence of break points. If given, an individual linear fit is - performed for each part of `data` between two break points. - Break points are specified as indices into `data`. - - Returns - ------- - ret : ndarray - The detrended input data. - - Examples - -------- - >>> randgen = np.random.RandomState(9) - >>> npoints = 1e3 - >>> noise = randgen.randn(npoints) - >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise - >>> (sp.signal.detrend(x) - noise).max() < 0.01 - True - - """ - if type not in ['linear', 'l', 'constant', 'c']: - raise ValueError("Trend type must be 'linear' or 'constant'.") - data = asarray(data) - dtype = data.dtype.char - if dtype not in 'dfDF': - dtype = 'd' - if type in ['constant', 'c']: - ret = data - expand_dims(mean(data, axis), axis) - return ret - else: - dshape = data.shape - N = dshape[axis] - bp = sort(unique(r_[0, bp, N])) - if np.any(bp > N): - raise ValueError("Breakpoints must be less than length " - "of data along given axis.") - Nreg = len(bp) - 1 - # Restructure data so that axis is along first dimension and - # all other dimensions are collapsed into second dimension - rnk = len(dshape) - if axis < 0: - axis = axis + rnk - newdims = r_[axis, 0:axis, axis + 1:rnk] - newdata = reshape(transpose(data, tuple(newdims)), - (N, prod(dshape, axis=0) / N)) - newdata = newdata.copy() # make sure we have a copy - if newdata.dtype.char not in 'dfDF': - newdata = newdata.astype(dtype) - # Find leastsq fit and remove it for each piece - for m in range(Nreg): - Npts = bp[m + 1] - bp[m] - A = ones((Npts, 2), dtype) - A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts) - sl = slice(bp[m], bp[m + 1]) - coef, resids, rank, s = linalg.lstsq(A, newdata[sl]) - newdata[sl] = newdata[sl] - dot(A, coef) - # Put data back in original shape. - tdshape = take(dshape, newdims, 0) - ret = reshape(newdata, tuple(tdshape)) - vals = range(1, rnk) - olddims = vals[:axis] + [0] + vals[axis:] - ret = transpose(ret, tuple(olddims)) - return ret - - -def lfilter_zi(b, a): - """ - Compute an initial state `zi` for the lfilter function that corresponds - to the steady state of the step response. - - A typical use of this function is to set the initial state so that the - output of the filter starts at the same value as the first element of - the signal to be filtered. - - Parameters - ---------- - b, a : array_like (1-D) - The IIR filter coefficients. See `scipy.signal.lfilter` for more - information. - - Returns - ------- - zi : 1-D ndarray - The initial state for the filter. - - Notes - ----- - A linear filter with order m has a state space representation (A, B, C, D), - for which the output y of the filter can be expressed as:: - - z(n+1) = A*z(n) + B*x(n) - y(n) = C*z(n) + D*x(n) - - where z(n) is a vector of length m, A has shape (m, m), B has shape - (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is - a scalar). lfilter_zi solves:: - - zi = A*zi + B - - In other words, it finds the initial condition for which the response - to an input of all ones is a constant. - - Given the filter coefficients `a` and `b`, the state space matrices - for the transposed direct form II implementation of the linear filter, - which is the implementation used by scipy.signal.lfilter, are:: - - A = scipy.linalg.companion(a).T - B = b[1:] - a[1:]*b[0] - - assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first - divided by a[0]. - - Examples - -------- - The following code creates a lowpass Butterworth filter. Then it - applies that filter to an array whose values are all 1.0; the - output is also all 1.0, as expected for a lowpass filter. If the - `zi` argument of `lfilter` had not been given, the output would have - shown the transient signal. - - >>> from numpy import array, ones - >>> from scipy.signal import lfilter, lfilter_zi, butter - >>> b, a = butter(5, 0.25) - >>> zi = lfilter_zi(b, a) - >>> y, zo = lfilter(b, a, ones(10), zi=zi) - >>> y - array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) - - Another example: - - >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]) - >>> y, zf = lfilter(b, a, x, zi=zi*x[0]) - >>> y - array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528, - 0.44399389, 0.35505241]) - - Note that the `zi` argument to `lfilter` was computed using - `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no - transient until the input drops from 0.5 to 0.0. - - """ - - # FIXME: Can this function be replaced with an appropriate - # use of lfiltic? For example, when b,a = butter(N,Wn), - # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)). - # - - # We could use scipy.signal.normalize, but it uses warnings in - # cases where a ValueError is more appropriate, and it allows - # b to be 2D. - b = np.atleast_1d(b) - if b.ndim != 1: - raise ValueError("Numerator b must be rank 1.") - a = np.atleast_1d(a) - if a.ndim != 1: - raise ValueError("Denominator a must be rank 1.") - - while len(a) > 1 and a[0] == 0.0: - a = a[1:] - if a.size < 1: - raise ValueError("There must be at least one nonzero `a` coefficient.") - - if a[0] != 1.0: - # Normalize the coefficients so a[0] == 1. - a = a / a[0] - b = b / a[0] - - n = max(len(a), len(b)) - - # Pad a or b with zeros so they are the same length. - if len(a) < n: - a = np.r_[a, np.zeros(n - len(a))] - elif len(b) < n: - b = np.r_[b, np.zeros(n - len(b))] - - IminusA = np.eye(n - 1) - linalg.companion(a).T - B = b[1:] - a[1:] * b[0] - # Solve zi = A*zi + B - zi = np.linalg.solve(IminusA, B) - - # For future reference: we could also use the following - # explicit formulas to solve the linear system: - # - # zi = np.zeros(n - 1) - # zi[0] = B.sum() / IminusA[:,0].sum() - # asum = 1.0 - # csum = 0.0 - # for k in range(1,n-1): - # asum += a[k] - # csum += b[k] - a[k]*b[0] - # zi[k] = asum*zi[0] - csum - - return zi - - -def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None): - """A forward-backward filter. - - This function applies a linear filter twice, once forward - and once backwards. The combined filter has linear phase. - - Before applying the filter, the function can pad the data along the - given axis in one of three ways: odd, even or constant. The odd - and even extensions have the corresponding symmetry about the end point - of the data. The constant extension extends the data with the values - at end points. On both the forward and backwards passes, the - initial condition of the filter is found by using lfilter_zi and - scaling it by the end point of the extended data. - - Parameters - ---------- - b : array_like, 1-D - The numerator coefficient vector of the filter. - a : array_like, 1-D - The denominator coefficient vector of the filter. If a[0] - is not 1, then both a and b are normalized by a[0]. - x : array_like - The array of data to be filtered. - axis : int, optional - The axis of `x` to which the filter is applied. - Default is -1. - padtype : str or None, optional - Must be 'odd', 'even', 'constant', or None. This determines the - type of extension to use for the padded signal to which the filter - is applied. If `padtype` is None, no padding is used. The default - is 'odd'. - padlen : int or None, optional - The number of elements by which to extend `x` at both ends of - `axis` before applying the filter. This value must be less than - `x.shape[axis]-1`. `padlen=0` implies no padding. - The default value is 3*max(len(a),len(b)). - - Returns - ------- - y : ndarray - The filtered output, an array of type numpy.float64 with the same - shape as `x`. - - See Also - -------- - lfilter_zi - lfilter - - Examples - -------- - First we create a one second signal that is the sum of two pure sine - waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz. - - >>> t = np.linspace(0, 1.0, 2001) - >>> xlow = np.sin(2 * np.pi * 5 * t) - >>> xhigh = np.sin(2 * np.pi * 250 * t) - >>> x = xlow + xhigh - - Now create a lowpass Butterworth filter with a cutoff of 0.125 times - the Nyquist rate, or 125 Hz, and apply it to x with filtfilt. The - result should be approximately xlow, with no phase shift. - - >>> from scipy.signal import butter - >>> b, a = butter(8, 0.125) - >>> y = filtfilt(b, a, x, padlen=150) - >>> np.abs(y - xlow).max() - 9.1086182074789912e-06 - - We get a fairly clean result for this artificial example because - the odd extension is exact, and with the moderately long padding, - the filter's transients have dissipated by the time the actual data - is reached. In general, transient effects at the edges are - unavoidable. - """ - - if padtype not in ['even', 'odd', 'constant', None]: - raise ValueError(("Unknown value '%s' given to padtype. padtype must " - "be 'even', 'odd', 'constant', or None.") % - padtype) - - b = np.asarray(b) - a = np.asarray(a) - x = np.asarray(x) - - ntaps = max(len(a), len(b)) - - if padtype is None: - padlen = 0 - - if padlen is None: - # Original padding; preserved for backwards compatibility. - edge = ntaps * 3 - else: - edge = padlen - - # x's 'axis' dimension must be bigger than edge. - if x.shape[axis] <= edge: - raise ValueError("The length of the input vector x must be at least " - "padlen, which is %d." % edge) - - if padtype is not None and edge > 0: - # Make an extension of length `edge` at each - # end of the input array. - if padtype == 'even': - ext = even_ext(x, edge, axis=axis) - elif padtype == 'odd': - ext = odd_ext(x, edge, axis=axis) - else: - ext = const_ext(x, edge, axis=axis) - else: - ext = x - - # Get the steady state of the filter's step response. - zi = lfilter_zi(b, a) - - # Reshape zi and create x0 so that zi*x0 broadcasts - # to the correct value for the 'zi' keyword argument - # to lfilter. - zi_shape = [1] * x.ndim - zi_shape[axis] = zi.size - zi = np.reshape(zi, zi_shape) - x0 = axis_slice(ext, stop=1, axis=axis) - - # Forward filter. - (y, zf) = lfilter(b, a, ext, zi=zi * x0) - - # Backward filter. - # Create y0 so zi*y0 broadcasts appropriately. - y0 = axis_slice(y, start=-1, axis=axis) - (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), zi=zi * y0) - - # Reverse y. - y = axis_reverse(y, axis=axis) - - if edge > 0: - # Slice the actual signal from the extended signal. - y = axis_slice(y, start=edge, stop=-edge, axis=axis) - - return y - - -from scipy.signal.filter_design import cheby1 -from scipy.signal.fir_filter_design import firwin - - -def decimate(x, q, n=None, ftype='iir', axis=-1): - """Downsample the signal x by an integer factor q, using an order n filter. - - By default an order 8 Chebyshev type I filter is used. A 30 point FIR - filter with hamming window is used if ftype is 'fir'. - - Parameters - ---------- - x : N-d array - the signal to be downsampled - q : int - the downsampling factor - n : int or None - the order of the filter (1 less than the length for 'fir') - ftype : {'iir' or 'fir'} - the type of the lowpass filter - axis : int - the axis along which to decimate - - Returns - ------- - y : N-d array - the down-sampled signal - - See also - -------- - resample - """ - - if not isinstance(q, int): - raise TypeError("q must be an integer") - - if n is None: - if ftype == 'fir': - n = 30 - else: - n = 8 - - if ftype == 'fir': - b = firwin(n + 1, 1. / q, window='hamming') - a = 1. - else: - b, a = cheby1(n, 0.05, 0.8 / q) - - y = lfilter(b, a, x, axis=axis) - - sl = [slice(None)] * y.ndim - sl[axis] = slice(None, None, q) - return y[sl] diff --git a/scipy-0.10.1/scipy/signal/sigtools.h b/scipy-0.10.1/scipy/signal/sigtools.h deleted file mode 100644 index a6ace22aec..0000000000 --- a/scipy-0.10.1/scipy/signal/sigtools.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _SCIPY_PRIVATE_SIGNAL_SIGTOOLS_H_ -#define _SCIPY_PRIVATE_SIGNAL_SIGTOOLS_H_ - -#include "Python.h" - -#if PY_VERSION_HEX >= 0x03000000 - #define PyString_AsString PyBytes_AsString - #define PyString_FromFormat PyBytes_FromFormat -#endif - -#include "numpy/noprefix.h" - -#define BOUNDARY_MASK 12 -#define OUTSIZE_MASK 3 -#define FLIP_MASK 16 -#define TYPE_MASK (32+64+128+256+512) -#define TYPE_SHIFT 5 - -#define FULL 2 -#define SAME 1 -#define VALID 0 - -#define CIRCULAR 8 -#define REFLECT 4 -#define PAD 0 - -#define MAXTYPES 21 - - -/* Generally useful structures for passing data into and out of - subroutines. Used in the generic routines instead of the - Python Specific structures so that the routines can be easily - grabbed and used in another scripting language */ - -typedef struct { - char *data; - int elsize; -} Generic_ptr; - -typedef struct { - char *data; - intp numels; - int elsize; - char *zero; /* Pointer to Representation of zero */ -} Generic_Vector; - -typedef struct { - char *data; - int nd; - intp *dimensions; - int elsize; - intp *strides; - char *zero; /* Pointer to Representation of zero */ -} Generic_Array; - -typedef void (MultAddFunction) (char *, intp, char *, intp, char *, intp *, intp *, int, intp, int, intp *, intp *, uintp *); - -PyObject* -scipy_signal_sigtools_linear_filter(PyObject * NPY_UNUSED(dummy), PyObject * args); - -PyObject* -scipy_signal_sigtools_correlateND(PyObject *NPY_UNUSED(dummy), PyObject *args); - -void -scipy_signal_sigtools_linear_filter_module_init(); - -/* -static int index_out_of_bounds(int *, int *, int ); -static long compute_offsets (unsigned long *, long *, int *, int *, int *, int *, int); -static int increment(int *, int, int *); -static void convolveND(Generic_Array *, Generic_Array *, Generic_Array *, MultAddFunction *, int); -static void RawFilter(Generic_Vector, Generic_Vector, Generic_Array, Generic_Array, Generic_Array *, Generic_Array *, BasicFilterFunction *, int); -*/ - -#endif diff --git a/scipy-0.10.1/scipy/signal/sigtoolsmodule.c b/scipy-0.10.1/scipy/signal/sigtoolsmodule.c deleted file mode 100644 index 1525082ff2..0000000000 --- a/scipy-0.10.1/scipy/signal/sigtoolsmodule.c +++ /dev/null @@ -1,1373 +0,0 @@ -/* SIGTOOLS module by Travis Oliphant - -Copyright 2005 Travis Oliphant -Permission to use, copy, modify, and distribute this software without fee -is granted under the SciPy License. -*/ -#include - -#define PY_ARRAY_UNIQUE_SYMBOL _scipy_signal_ARRAY_API -#include - -#include "sigtools.h" -#include - -#define PYERR(message) {PyErr_SetString(PyExc_ValueError, message); goto fail;} - -#define DATA(arr) ((arr)->data) -#define DIMS(arr) ((arr)->dimensions) -#define STRIDES(arr) ((arr)->strides) -#define ELSIZE(arr) ((arr)->descr->elsize) -#define OBJECTTYPE(arr) ((arr)->descr->type_num) -#define BASEOBJ(arr) ((PyArrayObject *)((arr)->base)) -#define RANK(arr) ((arr)->nd) -#define ISCONTIGUOUS(m) ((m)->flags & CONTIGUOUS) - - -jmp_buf MALLOC_FAIL; - -char *check_malloc (int); - -char *check_malloc (size) - int size; -{ - char *the_block; - - the_block = (char *)malloc(size); - if (the_block == NULL) - { - printf("\nERROR: unable to allocate %d bytes!\n", size); - longjmp(MALLOC_FAIL,-1); - } - return(the_block); -} - - -/************************************************************************ - * Start of portable, non-python specific routines. * - ************************************************************************/ - -/* Some core routines are written -in a portable way so that they could be used in other applications. The -order filtering, however uses python-specific constructs in its guts -and is therefore Python dependent. This could be changed in a -straightforward way but I haven't done it for lack of time.*/ - -static int index_out_of_bounds(intp *indices, intp *max_indices, int ndims) { - int bad_index = 0, k = 0; - - while (!bad_index && (k++ < ndims)) { - bad_index = ((*(indices) >= *(max_indices++)) || (*(indices) < 0)); - indices++; - } - return bad_index; -} - -/* This maybe could be redone with stride information so it could be - * called with non-contiguous arrays: I think offsets is related to - * the difference between the strides. I'm not sure about init_offset - * just yet. I think it needs to be calculated because of mode_dep - * but probably with dim1 being the size of the "original, unsliced" array - */ - -static intp compute_offsets (uintp *offsets, intp *offsets2, intp *dim1, intp *dim2, intp *dim3, intp *mode_dep, int nd) { - int k,i; - intp init_offset = 0; - - for (k = 0; k < nd - 1; k++) - { - init_offset += mode_dep[k]; - init_offset *= dim1[k+1]; - } - init_offset += mode_dep[k] - 2; - - k = nd; - while(k--) { - offsets[k] = 0; - offsets2[k] = 0; - for (i = k + 1; i < nd - 1; i++) { - offsets[k] += dim1[i] - dim2[i]; - offsets[k] *= dim1[i+1]; - - offsets2[k] += dim1[i] - dim3[i]; - offsets2[k] *= dim1[i+1]; - } - - if (k < nd - 1) { - offsets[k] += dim1[i] - dim2[i]; - offsets2[k] += dim1[i] - dim3[i]; - } - offsets[k] += 1; - offsets2[k] += 1; - } - return init_offset; -} - -/* increment by 1 the index into an N-D array, doing the necessary - carrying when the index reaches the dimension along that axis */ -static int increment(intp *ret_ind, int nd, intp *max_ind) { - int k, incr = 1; - - k = nd - 1; - if (++ret_ind[k] >= max_ind[k]) { - while (k >= 0 && (ret_ind[k] >= max_ind[k]-1)) { - incr++; - ret_ind[k--] = 0; - } - if (k >= 0) ret_ind[k]++; - } - return incr; -} - -/******************************************************** - * - * Code taken from remez.c by Erik Kvaleberg which was - * converted from an original FORTRAN by - * - * AUTHORS: JAMES H. MCCLELLAN - * - * DEPARTMENT OF ELECTRICAL ENGINEERING AND COMPUTER SCIENCE - * MASSACHUSETTS INSTITUTE OF TECHNOLOGY - * CAMBRIDGE, MASS. 02139 - * - * THOMAS W. PARKS - * DEPARTMENT OF ELECTRICAL ENGINEERING - * RICE UNIVERSITY - * HOUSTON, TEXAS 77001 - * - * LAWRENCE R. RABINER - * BELL LABORATORIES - * MURRAY HILL, NEW JERSEY 07974 - * - * - * Adaptation to C by - * egil kvaleberg - * husebybakken 14a - * 0379 oslo, norway - * Email: - * egil@kvaleberg.no - * Web: - * http://www.kvaleberg.com/ - * - * - *********************************************************/ - - -#define BANDPASS 1 -#define DIFFERENTIATOR 2 -#define HILBERT 3 - -#define GOBACK goto -#define DOloop(a,from,to) for ( (a) = (from); (a) <= (to); ++(a)) -#define PI 3.14159265358979323846 -#define TWOPI (PI+PI) - -/* - *----------------------------------------------------------------------- - * FUNCTION: lagrange_interp (d) - * FUNCTION TO CALCULATE THE LAGRANGE INTERPOLATION - * COEFFICIENTS FOR USE IN THE FUNCTION gee. - *----------------------------------------------------------------------- - */ -static double lagrange_interp(int k, int n, int m, double *x) -{ - int j, l; - double q, retval; - - retval = 1.0; - q = x[k]; - DOloop(l,1,m) { - for (j = l; j <= n; j += m) { - if (j != k) - retval *= 2.0 * (q - x[j]); - } - } - return 1.0 / retval; -} - -/* - *----------------------------------------------------------------------- - * FUNCTION: freq_eval (gee) - * FUNCTION TO EVALUATE THE FREQUENCY RESPONSE USING THE - * LAGRANGE INTERPOLATION FORMULA IN THE BARYCENTRIC FORM - *----------------------------------------------------------------------- - */ -static double freq_eval(int k, int n, double *grid, double *x, double *y, double *ad) -{ - int j; - double p,c,d,xf; - - d = 0.0; - p = 0.0; - xf = cos(TWOPI * grid[k]); - - DOloop(j,1,n) { - c = ad[j] / (xf - x[j]); - d += c; - p += c * y[j]; - } - - return p/d; -} - - -/* - *----------------------------------------------------------------------- - * SUBROUTINE: remez - * THIS SUBROUTINE IMPLEMENTS THE REMEZ EXCHANGE ALGORITHM - * FOR THE WEIGHTED CHEBYSHEV APPROXIMATION OF A CONTINUOUS - * FUNCTION WITH A SUM OF COSINES. INPUTS TO THE SUBROUTINE - * ARE A DENSE GRID WHICH REPLACES THE FREQUENCY AXIS, THE - * DESIRED FUNCTION ON THIS GRID, THE WEIGHT FUNCTION ON THE - * GRID, THE NUMBER OF COSINES, AND AN INITIAL GUESS OF THE - * EXTREMAL FREQUENCIES. THE PROGRAM MINIMIZES THE CHEBYSHEV - * ERROR BY DETERMINING THE BSMINEST LOCATION OF THE EXTREMAL - * FREQUENCIES (POINTS OF MAXIMUM ERROR) AND THEN CALCULATES - * THE COEFFICIENTS OF THE BEST APPROXIMATION. - *----------------------------------------------------------------------- - */ -static int remez(double *dev, double des[], double grid[], double edge[], - double wt[], int ngrid, int nbands, int iext[], double alpha[], - int nfcns, int itrmax, double *work, int dimsize) - /* dev, iext, alpha are output types */ - /* des, grid, edge, wt, ngrid, nbands, nfcns are input types */ -{ - int k, k1, kkk, kn, knz, klow, kup, nz, nzz, nm1; - int cn; - int j, jchnge, jet, jm1, jp1; - int l, luck=0, nu, nut, nut1=0, niter; - - double ynz=0.0, comp=0.0, devl, gtemp, fsh, y1=0.0, err, dtemp, delf, dnum, dden; - double aa=0.0, bb=0.0, ft, xe, xt; - - static double *a, *p, *q; - static double *ad, *x, *y; - - a = work; p = a + dimsize+1; q = p + dimsize+1; - ad = q + dimsize+1; x = ad + dimsize+1; y = x + dimsize+1; - devl = -1.0; - nz = nfcns+1; - nzz = nfcns+2; - niter = 0; - - do { - L100: - iext[nzz] = ngrid + 1; - ++niter; - - if (niter > itrmax) break; - - /* printf("ITERATION %2d: ",niter); */ - - DOloop(j,1,nz) { - x[j] = cos(grid[iext[j]]*TWOPI); - } - jet = (nfcns-1) / 15 + 1; - - DOloop(j,1,nz) { - ad[j] = lagrange_interp(j,nz,jet,x); - } - - dnum = 0.0; - dden = 0.0; - k = 1; - - DOloop(j,1,nz) { - l = iext[j]; - dnum += ad[j] * des[l]; - dden += (double)k * ad[j] / wt[l]; - k = -k; - } - *dev = dnum / dden; - - /* printf("DEVIATION = %lg\n",*dev); */ - - nu = 1; - if ( (*dev) > 0.0 ) nu = -1; - (*dev) = -(double)nu * (*dev); - k = nu; - DOloop(j,1,nz) { - l = iext[j]; - y[j] = des[l] + (double)k * (*dev) / wt[l]; - k = -k; - } - if ( (*dev) <= devl ) { - /* finished */ - return -1; - } - devl = (*dev); - jchnge = 0; - k1 = iext[1]; - knz = iext[nz]; - klow = 0; - nut = -nu; - j = 1; - - /* - * SEARCH FOR THE EXTREMAL FREQUENCIES OF THE BEST APPROXIMATION - */ - - L200: - if (j == nzz) ynz = comp; - if (j >= nzz) goto L300; - kup = iext[j+1]; - l = iext[j]+1; - nut = -nut; - if (j == 2) y1 = comp; - comp = (*dev); - if (l >= kup) goto L220; - err = (freq_eval(l,nz,grid,x,y,ad)-des[l]) * wt[l]; - if (((double)nut*err-comp) <= 0.0) goto L220; - comp = (double)nut * err; - L210: - if (++l >= kup) goto L215; - err = (freq_eval(l,nz,grid,x,y,ad)-des[l]) * wt[l]; - if (((double)nut*err-comp) <= 0.0) goto L215; - comp = (double)nut * err; - GOBACK L210; - - L215: - iext[j++] = l - 1; - klow = l - 1; - ++jchnge; - GOBACK L200; - - L220: - --l; - L225: - if (--l <= klow) goto L250; - err = (freq_eval(l,nz,grid,x,y,ad)-des[l]) * wt[l]; - if (((double)nut*err-comp) > 0.0) goto L230; - if (jchnge <= 0) goto L225; - goto L260; - - L230: - comp = (double)nut * err; - L235: - if (--l <= klow) goto L240; - err = (freq_eval(l,nz,grid,x,y,ad)-des[l]) * wt[l]; - if (((double)nut*err-comp) <= 0.0) goto L240; - comp = (double)nut * err; - GOBACK L235; - L240: - klow = iext[j]; - iext[j] = l+1; - ++j; - ++jchnge; - GOBACK L200; - - L250: - l = iext[j]+1; - if (jchnge > 0) GOBACK L215; - - L255: - if (++l >= kup) goto L260; - err = (freq_eval(l,nz,grid,x,y,ad)-des[l]) * wt[l]; - if (((double)nut*err-comp) <= 0.0) GOBACK L255; - comp = (double)nut * err; - - GOBACK L210; - L260: - klow = iext[j++]; - GOBACK L200; - - L300: - if (j > nzz) goto L320; - if (k1 > iext[1] ) k1 = iext[1]; - if (knz < iext[nz]) knz = iext[nz]; - nut1 = nut; - nut = -nu; - l = 0; - kup = k1; - comp = ynz*(1.00001); - luck = 1; - L310: - if (++l >= kup) goto L315; - err = (freq_eval(l,nz,grid,x,y,ad)-des[l]) * wt[l]; - if (((double)nut*err-comp) <= 0.0) GOBACK L310; - comp = (double) nut * err; - j = nzz; - GOBACK L210; - - L315: - luck = 6; - goto L325; - - L320: - if (luck > 9) goto L350; - if (comp > y1) y1 = comp; - k1 = iext[nzz]; - L325: - l = ngrid+1; - klow = knz; - nut = -nut1; - comp = y1*(1.00001); - L330: - if (--l <= klow) goto L340; - err = (freq_eval(l,nz,grid,x,y,ad)-des[l]) * wt[l]; - if (((double)nut*err-comp) <= 0.0) GOBACK L330; - j = nzz; - comp = (double) nut * err; - luck = luck + 10; - GOBACK L235; - L340: - if (luck == 6) goto L370; - DOloop(j,1,nfcns) { - iext[nzz-j] = iext[nz-j]; - } - iext[1] = k1; - GOBACK L100; - L350: - kn = iext[nzz]; - DOloop(j,1,nfcns) iext[j] = iext[j+1]; - iext[nz] = kn; - - GOBACK L100; - L370: - ; - } while (jchnge > 0); - -/* - * CALCULATION OF THE COEFFICIENTS OF THE BEST APPROXIMATION - * USING THE INVERSE DISCRETE FOURIER TRANSFORM - */ - nm1 = nfcns - 1; - fsh = 1.0e-06; - gtemp = grid[1]; - x[nzz] = -2.0; - cn = 2*nfcns - 1; - delf = 1.0/cn; - l = 1; - kkk = 0; - - if (edge[1] == 0.0 && edge[2*nbands] == 0.5) kkk = 1; - - if (nfcns <= 3) kkk = 1; - if (kkk != 1) { - dtemp = cos(TWOPI*grid[1]); - dnum = cos(TWOPI*grid[ngrid]); - aa = 2.0/(dtemp-dnum); - bb = -(dtemp+dnum)/(dtemp-dnum); - } - - DOloop(j,1,nfcns) { - ft = (j - 1) * delf; - xt = cos(TWOPI*ft); - if (kkk != 1) { - xt = (xt-bb)/aa; -#if 0 - /*XX* ckeck up !! */ - xt1 = sqrt(1.0-xt*xt); - ft = atan2(xt1,xt)/TWOPI; -#else - ft = acos(xt)/TWOPI; -#endif - } -L410: - xe = x[l]; - if (xt > xe) goto L420; - if ((xe-xt) < fsh) goto L415; - ++l; - GOBACK L410; -L415: - a[j] = y[l]; - goto L425; -L420: - if ((xt-xe) < fsh) GOBACK L415; - grid[1] = ft; - a[j] = freq_eval(1,nz,grid,x,y,ad); -L425: - if (l > 1) l = l-1; - } - - grid[1] = gtemp; - dden = TWOPI / cn; - DOloop (j,1,nfcns) { - dtemp = 0.0; - dnum = (j-1) * dden; - if (nm1 >= 1) { - DOloop(k,1,nm1) { - dtemp += a[k+1] * cos(dnum*k); - } - } - alpha[j] = 2.0 * dtemp + a[1]; - } - - DOloop(j,2,nfcns) alpha[j] *= 2.0 / cn; - alpha[1] /= cn; - - if (kkk != 1) { - p[1] = 2.0*alpha[nfcns]*bb+alpha[nm1]; - p[2] = 2.0*aa*alpha[nfcns]; - q[1] = alpha[nfcns-2]-alpha[nfcns]; - DOloop(j,2,nm1) { - if (j >= nm1) { - aa *= 0.5; - bb *= 0.5; - } - p[j+1] = 0.0; - DOloop(k,1,j) { - a[k] = p[k]; - p[k] = 2.0 * bb * a[k]; - } - p[2] += a[1] * 2.0 *aa; - jm1 = j - 1; - DOloop(k,1,jm1) p[k] += q[k] + aa * a[k+1]; - jp1 = j + 1; - DOloop(k,3,jp1) p[k] += aa * a[k-1]; - - if (j != nm1) { - DOloop(k,1,j) q[k] = -a[k]; - q[1] += alpha[nfcns - 1 - j]; - } - } - DOloop(j,1,nfcns) alpha[j] = p[j]; - } - - if (nfcns <= 3) { - alpha[nfcns+1] = alpha[nfcns+2] = 0.0; - } - return 0; -} - - -/* - *----------------------------------------------------------------------- - * FUNCTION: eff - * FUNCTION TO CALCULATE THE DESIRED MAGNITUDE RESPONSE - * AS A FUNCTION OF FREQUENCY. - * AN ARBITRARY FUNCTION OF FREQUENCY CAN BE - * APPROXIMATED IF THE USER REPLACES THIS FUNCTION - * WITH THE APPROPRIATE CODE TO EVALUATE THE IDEAL - * MAGNITUDE. NOTE THAT THE PARAMETER FREQ IS THE - * VALUE OF NORMALIZED FREQUENCY NEEDED FOR EVALUATION. - *----------------------------------------------------------------------- - */ -static double eff(double freq, double *fx, int lband, int jtype) -{ - if (jtype != 2) return fx[lband]; - else return fx[lband] * freq; -} - -/* - *----------------------------------------------------------------------- - * FUNCTION: wate - * FUNCTION TO CALCULATE THE WEIGHT FUNCTION AS A FUNCTION - * OF FREQUENCY. SIMILAR TO THE FUNCTION eff, THIS FUNCTION CAN - * BE REPLACED BY A USER-WRITTEN ROUTINE TO CALCULATE ANY - * DESIRED WEIGHTING FUNCTION. - *----------------------------------------------------------------------- - */ -static double wate(double freq, double *fx, double *wtx, int lband, int jtype) -{ - if (jtype != 2) return wtx[lband]; - if (fx[lband] >= 0.0001) return wtx[lband] / freq; - return wtx[lband]; -} - -/*********************************************************/ - -/* This routine accepts basic input information and puts it in - * the form expected by remez. - - * Adpated from main() by Travis Oliphant - */ - -static int pre_remez(double *h2, int numtaps, int numbands, double *bands, double *response, double *weight, int type, int maxiter, int grid_density) { - - int jtype, nbands, nfilt, lgrid, nz; - int neg, nodd, nm1; - int j, k, l, lband, dimsize; - double delf, change, fup, temp; - double *tempstor, *edge, *h, *fx, *wtx; - double *des, *grid, *wt, *alpha, *work; - double dev; - int ngrid; - int *iext; - int nfcns, wrksize, total_dsize, total_isize; - - lgrid = grid_density; - dimsize = (int) ceil(numtaps/2.0 + 2); - wrksize = grid_density * dimsize; - nfilt = numtaps; - jtype = type; nbands = numbands; - /* Note: code assumes these arrays start at 1 */ - edge = bands-1; - h = h2 - 1; - fx = response - 1; - wtx = weight - 1; - - total_dsize = (dimsize+1)*7 + 3*(wrksize+1); - total_isize = (dimsize+1); - /* Need space for: (all arrays ignore the first element). - - des (wrksize+1) - grid (wrksize+1) - wt (wrksize+1) - iext (dimsize+1) (integer) - alpha (dimsize+1) - work (dimsize+1)*6 - - */ - tempstor = malloc((total_dsize)*sizeof(double)+(total_isize)*sizeof(int)); - if (tempstor == NULL) return -2; - - des = tempstor; grid = des + wrksize+1; - wt = grid + wrksize+1; alpha = wt + wrksize+1; - work = alpha + dimsize+1; iext = (int *)(work + (dimsize+1)*6); - - /* Set up problem on dense_grid */ - - neg = 1; - if (jtype == 1) neg = 0; - nodd = nfilt % 2; - nfcns = nfilt / 2; - if (nodd == 1 && neg == 0) nfcns = nfcns + 1; - - /* - * SET UP THE DENSE GRID. THE NUMBER OF POINTS IN THE GRID - * IS (FILTER LENGTH + 1)*GRID DENSITY/2 - */ - grid[1] = edge[1]; - delf = lgrid * nfcns; - delf = 0.5 / delf; - if (neg != 0) { - if (edge[1] < delf) grid[1] = delf; - } - j = 1; - l = 1; - lband = 1; - - /* - * CALCULATE THE DESIRED MAGNITUDE RESPONSE AND THE WEIGHT - * FUNCTION ON THE GRID - */ - for (;;) { - fup = edge[l + 1]; - do { - temp = grid[j]; - des[j] = eff(temp,fx,lband,jtype); - wt[j] = wate(temp,fx,wtx,lband,jtype); - if (++j > wrksize) { free(tempstor); return -1;} /* too many points, or too dense grid */ - grid[j] = temp + delf; - } while (grid[j] <= fup); - - grid[j-1] = fup; - des[j-1] = eff(fup,fx,lband,jtype); - wt[j-1] = wate(fup,fx,wtx,lband,jtype); - ++lband; - l += 2; - if (lband > nbands) break; - grid[j] = edge[l]; - } - - ngrid = j - 1; - if (neg == nodd) { - if (grid[ngrid] > (0.5-delf)) --ngrid; - } - - /* - * SET UP A NEW APPROXIMATION PROBLEM WHICH IS EQUIVALENT - * TO THE ORIGINAL PROBLEM - */ - if (neg <= 0) { - if (nodd != 1) { - DOloop(j,1,ngrid) { - change = cos(PI*grid[j]); - des[j] = des[j] / change; - wt[j] = wt[j] * change; - } - } - } else { - if (nodd != 1) { - DOloop(j,1,ngrid) { - change = sin(PI*grid[j]); - des[j] = des[j] / change; - wt[j] = wt[j] * change; - } - } else { - DOloop(j,1,ngrid) { - change = sin(TWOPI * grid[j]); - des[j] = des[j] / change; - wt[j] = wt[j] * change; - } - } - } - - /*XX*/ - temp = (double)(ngrid-1) / (double)nfcns; - DOloop(j,1,nfcns) { - iext[j] = (int)((j-1)*temp) + 1; /* round? !! */ - } - iext[nfcns+1] = ngrid; - nm1 = nfcns - 1; - nz = nfcns + 1; - - if (remez(&dev, des, grid, edge, wt, ngrid, numbands, iext, alpha, nfcns, maxiter, work, dimsize) < 0) { free(tempstor); return -1; } - - /* - * CALCULATE THE IMPULSE RESPONSE. - */ - if (neg <= 0) { - - if (nodd != 0) { - DOloop(j,1,nm1) { - h[j] = 0.5 * alpha[nz-j]; - } - h[nfcns] = alpha[1]; - } else { - h[1] = 0.25 * alpha[nfcns]; - DOloop(j,2,nm1) { - h[j] = 0.25 * (alpha[nz-j] + alpha[nfcns+2-j]); - } - h[nfcns] = 0.5*alpha[1] + 0.25*alpha[2]; - } - } else { - if (nodd != 0) { - h[1] = 0.25 * alpha[nfcns]; - h[2] = 0.25 * alpha[nm1]; - DOloop(j,3,nm1) { - h[j] = 0.25 * (alpha[nz-j] - alpha[nfcns+3-j]); - } - h[nfcns] = 0.5 * alpha[1] - 0.25 * alpha[3]; - h[nz] = 0.0; - } else { - h[1] = 0.25 * alpha[nfcns]; - DOloop(j,2,nm1) { - h[j] = 0.25 * (alpha[nz-j] - alpha[nfcns+2-j]); - } - h[nfcns] = 0.5 * alpha[1] - 0.25 * alpha[2]; - } - } - - DOloop(j,1,nfcns){ - k = nfilt + 1 - j; - if (neg == 0) - h[k] = h[j]; - else - h[k] = -h[j]; - } - if (neg == 1 && nodd == 1) h[nz] = 0.0; - - free(tempstor); - return 0; - -} - -/************************************************************** - * End of remez routines - **************************************************************/ - - -/****************************************************/ -/* End of python-independent routines */ -/****************************************************/ - -/************************/ -/* N-D Order Filtering. */ - - -static void fill_buffer(char *ip1, PyArrayObject *ap1, PyArrayObject *ap2, char *sort_buffer, int nels2, int check, intp *loop_ind, intp *temp_ind, uintp *offset){ - int i, k, incr = 1; - int ndims = ap1->nd; - intp *dims2 = ap2->dimensions; - intp *dims1 = ap1->dimensions; - intp is1 = ap1->strides[ndims-1]; - intp is2 = ap2->strides[ndims-1]; - char *ip2 = ap2->data; - int elsize = ap1->descr->elsize; - char *ptr; - - i = nels2; - ptr = PyArray_Zero(ap2); - temp_ind[ndims-1]--; - while (i--) { - /* Adjust index array and move ptr1 to right place */ - k = ndims - 1; - while(--incr) { - temp_ind[k] -= dims2[k] - 1; /* Return to start for these dimensions */ - k--; - } - ip1 += offset[k]*is1; /* Precomputed offset array */ - temp_ind[k]++; - - if (!(check && index_out_of_bounds(temp_ind,dims1,ndims)) && \ - memcmp(ip2, ptr, ap2->descr->elsize)) { - memcpy(sort_buffer, ip1, elsize); - sort_buffer += elsize; - } - incr = increment(loop_ind, ndims, dims2); /* Returns number of N-D indices incremented. */ - ip2 += is2; - - } - PyDataMem_FREE(ptr); - return; -} - -#define COMPARE(fname, type) \ -int fname(type *ip1, type *ip2) { return *ip1 < *ip2 ? -1 : *ip1 == *ip2 ? 0 : 1; } - -COMPARE(DOUBLE_compare, double) -COMPARE(FLOAT_compare, float) -COMPARE(LONGDOUBLE_compare, longdouble) -COMPARE(BYTE_compare, byte) -COMPARE(SHORT_compare, short) -COMPARE(INT_compare, int) -COMPARE(LONG_compare, long) -COMPARE(LONGLONG_compare, longlong) -COMPARE(UBYTE_compare, ubyte) -COMPARE(USHORT_compare, ushort) -COMPARE(UINT_compare, uint) -COMPARE(ULONG_compare, ulong) -COMPARE(ULONGLONG_compare, ulonglong) - - -int OBJECT_compare(PyObject **ip1, PyObject **ip2) { - /*return PyObject_Compare(*ip1, *ip2); */ - return PyObject_RichCompareBool(*ip1, *ip2, Py_EQ) != 1; -} - -typedef int (*CompareFunction)(const void *, const void *); - -CompareFunction compare_functions[] = \ - {NULL, (CompareFunction)BYTE_compare,(CompareFunction)UBYTE_compare,\ - (CompareFunction)SHORT_compare,(CompareFunction)USHORT_compare, \ - (CompareFunction)INT_compare,(CompareFunction)UINT_compare, \ - (CompareFunction)LONG_compare,(CompareFunction)ULONG_compare, \ - (CompareFunction)LONGLONG_compare,(CompareFunction)ULONGLONG_compare, - (CompareFunction)FLOAT_compare,(CompareFunction)DOUBLE_compare, - (CompareFunction)LONGDOUBLE_compare, NULL, NULL, NULL, - (CompareFunction)OBJECT_compare, NULL, NULL, NULL}; - -PyObject *PyArray_OrderFilterND(PyObject *op1, PyObject *op2, int order) { - PyArrayObject *ap1=NULL, *ap2=NULL, *ret=NULL; - intp *a_ind, *b_ind, *temp_ind, *mode_dep, *check_ind; - uintp *offsets, offset1; - intp *offsets2; - int i, n2, n2_nonzero, k, check, incr = 1; - int typenum, bytes_in_array; - int is1, os; - char *op, *ap1_ptr, *ap2_ptr, *sort_buffer; - intp *ret_ind; - CompareFunction compare_func; - char *zptr=NULL; - - /* Get Array objects from input */ - typenum = PyArray_ObjectType(op1, 0); - typenum = PyArray_ObjectType(op2, typenum); - - ap1 = (PyArrayObject *)PyArray_ContiguousFromObject(op1, typenum, 0, 0); - if (ap1 == NULL) return NULL; - ap2 = (PyArrayObject *)PyArray_ContiguousFromObject(op2, typenum, 0, 0); - if (ap2 == NULL) goto fail; - - if (ap1->nd != ap2->nd) { - PyErr_SetString(PyExc_ValueError, "All input arrays must have the same number of dimensions."); - goto fail; - } - - n2 = PyArray_Size((PyObject *)ap2); - n2_nonzero = 0; - ap2_ptr = ap2->data; - /* Find out the number of non-zero entries in domain (allows for - * different shapped rank-filters to be used besides just rectangles) - */ - zptr = PyArray_Zero(ap2); - if (zptr == NULL) goto fail; - for (k=0; k < n2; k++) { - n2_nonzero += (memcmp(ap2_ptr,zptr,ap2->descr->elsize) != 0); - ap2_ptr += ap2->descr->elsize; - } - - if ((order >= n2_nonzero) || (order < 0)) { - PyErr_SetString(PyExc_ValueError, "Order must be non-negative and less than number of nonzero elements in domain."); - goto fail; - } - - ret = (PyArrayObject *)PyArray_SimpleNew(ap1->nd, ap1->dimensions, typenum); - if (ret == NULL) goto fail; - - compare_func = compare_functions[ap1->descr->type_num]; - if (compare_func == NULL) { - PyErr_SetString(PyExc_ValueError, - "order_filterND not available for this type"); - goto fail; - } - - is1 = ap1->descr->elsize; - - if (!(sort_buffer = malloc(n2_nonzero*is1))) goto fail; - - op = ret->data; os = ret->descr->elsize; - - op = ret->data; - - bytes_in_array = ap1->nd*sizeof(intp); - mode_dep = malloc(bytes_in_array); - for (k = 0; k < ap1->nd; k++) { - mode_dep[k] = -((ap2->dimensions[k]-1) >> 1); - } - - b_ind = (intp *)malloc(bytes_in_array); /* loop variables */ - memset(b_ind,0,bytes_in_array); - a_ind = (intp *)malloc(bytes_in_array); - ret_ind = (intp *)malloc(bytes_in_array); - memset(ret_ind,0,bytes_in_array); - temp_ind = (intp *)malloc(bytes_in_array); - check_ind = (intp*)malloc(bytes_in_array); - offsets = (uintp *)malloc(ap1->nd*sizeof(uintp)); - offsets2 = (intp *)malloc(ap1->nd*sizeof(intp)); - offset1 = compute_offsets(offsets,offsets2,ap1->dimensions,ap2->dimensions,ret->dimensions,mode_dep,ap1->nd); - /* The filtering proceeds by looping through the output array - and for each value filling a buffer from the - element-by-element product of the two input arrays. The buffer - is then sorted and the order_th element is kept as output. Index - counters are used for book-keeping in the area so that we - can tell where we are in all of the arrays and be sure that - we are not trying to access areas outside the arrays definition. - - The inner loop is implemented separately but equivalently for each - datatype. The outer loop is similar in structure and form to - to the inner loop. - */ - /* Need to keep track of a ptr to place in big (first) input - array where we start the multiplication (we pass over it in the - inner loop (and not dereferenced) - if it is pointing outside dataspace) - */ - /* Calculate it once and the just move it around appropriately */ - PyDataMem_FREE(zptr); - zptr = PyArray_Zero(ap1); - if (zptr == NULL) goto fail; - ap1_ptr = ap1->data + offset1*is1; - for (k=0; k < ap1->nd; k++) {a_ind[k] = mode_dep[k]; check_ind[k] = ap1->dimensions[k] - ap2->dimensions[k] - mode_dep[k] - 1;} - a_ind[ap1->nd-1]--; - i = PyArray_Size((PyObject *)ret); - while (i--) { - /* Zero out the sort_buffer (has effect of zero-padding - on boundaries). Treat object arrays right.*/ - ap2_ptr = sort_buffer; - for (k=0; k < n2_nonzero; k++) { - memcpy(ap2_ptr,zptr,is1); - ap2_ptr += is1; - } - - k = ap1->nd - 1; - while(--incr) { - a_ind[k] -= ret->dimensions[k] - 1; /* Return to start */ - k--; - } - ap1_ptr += offsets2[k]*is1; - a_ind[k]++; - memcpy(temp_ind, a_ind, bytes_in_array); - - check = 0; k = -1; - while(!check && (++k < ap1->nd)) - check = check || (ret_ind[k] < -mode_dep[k]) || (ret_ind[k] > check_ind[k]); - - fill_buffer(ap1_ptr,ap1,ap2,sort_buffer,n2,check,b_ind,temp_ind,offsets); - qsort(sort_buffer, n2_nonzero, is1, compare_func); - memcpy(op, sort_buffer + order*is1, os); - - incr = increment(ret_ind,ret->nd,ret->dimensions); /* increment index counter */ - op += os; /* increment to next output index */ - - } - free(b_ind); free(a_ind); free(ret_ind); - free(offsets); free(offsets2); free(temp_ind); - free(check_ind); free(mode_dep); - free(sort_buffer); - - PyDataMem_FREE(zptr); - Py_DECREF(ap1); - Py_DECREF(ap2); - - return PyArray_Return(ret); - -fail: - if (zptr) PyDataMem_FREE(zptr); - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} - - -/******************************************/ - -static char doc_correlateND[] = "out = _correlateND(a,kernel,mode) \n\n mode = 0 - 'valid', 1 - 'same', \n 2 - 'full' (default)"; - -/*******************************************************************/ - -static char doc_convolve2d[] = "out = _convolve2d(in1, in2, flip, mode, boundary, fillvalue)"; - -extern int pylab_convolve_2d(char*,intp*,char*,intp*,char*,intp*,intp*,intp*,int,char*); - -static PyObject *sigtools_convolve2d(PyObject *NPY_UNUSED(dummy), PyObject *args) { - - PyObject *in1=NULL, *in2=NULL, *fill_value=NULL; - int mode=2, boundary=0, typenum, flag, flip=1, ret; - intp *aout_dimens=NULL, *dims=NULL; - char zeros[32]; /* Zeros */ - int n1, n2, i; - PyArrayObject *ain1=NULL, *ain2=NULL, *aout=NULL; - PyArrayObject *afill=NULL, *newfill=NULL; - - if (!PyArg_ParseTuple(args, "OO|iiiO", &in1, &in2, &flip, &mode, &boundary, &fill_value)) { - return NULL; - } - - typenum = PyArray_ObjectType(in1, 0); - typenum = PyArray_ObjectType(in2, typenum); - ain1 = (PyArrayObject *)PyArray_FromObject(in1, typenum, 2, 2); - if (ain1 == NULL) goto fail; - ain2 = (PyArrayObject *)PyArray_FromObject(in2, typenum, 2, 2); - if (ain2 == NULL) goto fail; - - if ((boundary != PAD) && (boundary != REFLECT) && (boundary != CIRCULAR)) - PYERR("Incorrect boundary value."); - if (boundary == PAD) { - if (fill_value == NULL) { - newfill = (PyArrayObject *)PyArray_SimpleNewFromData(0, dims, typenum, zeros); - } - else { - afill = (PyArrayObject *)PyArray_FromObject(fill_value, PyArray_CDOUBLE, 0, 0); - if (afill == NULL) goto fail; - newfill = (PyArrayObject *)PyArray_Cast(afill, typenum); - } - if (newfill == NULL) goto fail; - } - else { - newfill = (PyArrayObject *)PyArray_SimpleNewFromData(0, dims, typenum, zeros); - if (newfill == NULL) goto fail; - } - - n1 = PyArray_Size((PyObject *)ain1); - n2 = PyArray_Size((PyObject *)ain2); - - aout_dimens = malloc(ain1->nd*sizeof(intp)); - switch(mode & OUTSIZE_MASK) { - case VALID: - for (i = 0; i < ain1->nd; i++) { - aout_dimens[i] = ain1->dimensions[i] - ain2->dimensions[i] + 1; - if (aout_dimens[i] < 0) { - PyErr_SetString(PyExc_ValueError, "no part of the output is valid, use option 1 (same) or 2 (full) for third argument"); - goto fail; - } - } - break; - case SAME: - for (i = 0; i < ain1->nd; i++) { aout_dimens[i] = ain1->dimensions[i];} - break; - case FULL: - for (i = 0; i < ain1->nd; i++) { aout_dimens[i] = ain1->dimensions[i] + ain2->dimensions[i] - 1;} - break; - default: - PyErr_SetString(PyExc_ValueError, - "mode must be 0 (valid), 1 (same), or 2 (full)"); - goto fail; - } - - aout = (PyArrayObject *)PyArray_SimpleNew(ain1->nd, aout_dimens, typenum); - if (aout == NULL) goto fail; - - flag = mode + boundary + (typenum << TYPE_SHIFT) + \ - (flip != 0) * FLIP_MASK; - - ret = pylab_convolve_2d (DATA(ain1), /* Input data Ns[0] x Ns[1] */ - STRIDES(ain1), /* Input strides */ - DATA(aout), /* Output data */ - STRIDES(aout), /* Ouput strides */ - DATA(ain2), /* coefficients in filter */ - STRIDES(ain2), /* coefficients strides */ - DIMS(ain2), /* Size of kernel Nwin[2] */ - DIMS(ain1), /* Size of image Ns[0] x Ns[1] */ - flag, /* convolution parameters */ - DATA(newfill)); /* fill value */ - - - switch (ret) { - case 0: - Py_DECREF(ain1); - Py_DECREF(ain2); - Py_XDECREF(afill); - Py_XDECREF(newfill); - return (PyObject *)aout; - break; - case -5: - case -4: - PyErr_SetString(PyExc_ValueError, - "convolve2d not available for this type."); - goto fail; - case -3: - PyErr_NoMemory(); - goto fail; - case -2: - PyErr_SetString(PyExc_ValueError, - "Invalid boundary type."); - goto fail; - case -1: - PyErr_SetString(PyExc_ValueError, - "Invalid output flag."); - goto fail; - } - -fail: - free(aout_dimens); - Py_XDECREF(ain1); - Py_XDECREF(ain2); - Py_XDECREF(aout); - Py_XDECREF(afill); - Py_XDECREF(newfill); - return NULL; -} - -/*******************************************************************/ - -static char doc_order_filterND[] = "out = _order_filterND(a,domain,order)"; - -static PyObject *sigtools_order_filterND(PyObject *NPY_UNUSED(dummy), PyObject *args) { - PyObject *domain, *a0; - int order=0; - - if (!PyArg_ParseTuple(args, "OO|i", &a0, &domain, &order)) return NULL; - - return PyArray_OrderFilterND(a0, domain, order); -} - - - -static char doc_remez[] = "h = _remez(numtaps, bands, des, weight, type, Hz, maxiter, grid_density) \n returns the optimal (in the Chebyshev/minimax sense) FIR filter impulse \n response given a set of band edges, the desired response on those bands,\n and the weight given to the error in those bands. Bands is a monotonic\n vector with band edges given in frequency domain where Hz is the sampling\n frequency."; - -static PyObject *sigtools_remez(PyObject *NPY_UNUSED(dummy), PyObject *args) { - PyObject *bands, *des, *weight; - int k, numtaps, numbands, type = BANDPASS, err; - PyArrayObject *a_bands=NULL, *a_des=NULL, *a_weight=NULL; - PyArrayObject *h=NULL; - intp ret_dimens; int maxiter = 25, grid_density = 16; - double oldvalue, *dptr, Hz = 1.0; - char mystr[255]; - - - - if (!PyArg_ParseTuple(args, "iOOO|idii", &numtaps, &bands, &des, &weight, &type, &Hz, &maxiter, &grid_density)) - return NULL; - - if (type != BANDPASS && type != DIFFERENTIATOR && type != HILBERT) { - PyErr_SetString(PyExc_ValueError, - "The type must be BANDPASS, DIFFERENTIATOR, or HILBERT."); - return NULL; - } - - if (numtaps < 2) { - PyErr_SetString(PyExc_ValueError, - "The number of taps must be greater than 1."); - return NULL; - } - - - a_bands = (PyArrayObject *)PyArray_ContiguousFromObject(bands, PyArray_DOUBLE,1,1); - if (a_bands == NULL) goto fail; - a_des = (PyArrayObject *)PyArray_ContiguousFromObject(des, PyArray_DOUBLE,1,1); - if (a_des == NULL) goto fail; - a_weight = (PyArrayObject *)PyArray_ContiguousFromObject(weight, PyArray_DOUBLE,1,1); - if (a_weight == NULL) goto fail; - - - numbands = a_des->dimensions[0]; - if ((a_bands->dimensions[0] != 2*numbands) || (a_weight->dimensions[0] != numbands)) { - PyErr_SetString(PyExc_ValueError, - "The inputs desired and weight must have same length.\n The input bands must have twice this length."); - goto fail; - } - - /* Check the bands input to see if it is monotonic, divide by - Hz to take from range 0 to 0.5 and check to see if in that range */ - - dptr = (double *)a_bands->data; - oldvalue = 0; - for (k=0; k < 2*numbands; k++) { - if (*dptr < oldvalue) { - PyErr_SetString(PyExc_ValueError, - "Bands must be monotonic starting at zero."); - goto fail; - } - if (*dptr * 2 > Hz) { - PyErr_SetString(PyExc_ValueError, - "Band edges should be less than 1/2 the sampling frequency"); - goto fail; - } - oldvalue = *dptr; - *dptr = oldvalue / Hz; /* Change so that sampling frequency is 1.0 */ - dptr++; - } - - ret_dimens = numtaps; - h = (PyArrayObject *)PyArray_SimpleNew(1, &ret_dimens, PyArray_DOUBLE); - if (h == NULL) goto fail; - - err=pre_remez((double *)h->data, numtaps, numbands, (double *)a_bands->data, (double *)a_des->data, (double *)a_weight->data, type, maxiter, grid_density); - if (err < 0) { - if (err == -1) { - sprintf(mystr,"Failure to converge after %d iterations.\n Design may still be correct.",maxiter); - PyErr_SetString(PyExc_ValueError, mystr); - goto fail; - } - else if (err == -2) { - PyErr_NoMemory(); - goto fail; - } - } - - Py_DECREF(a_bands); - Py_DECREF(a_des); - Py_DECREF(a_weight); - - return PyArray_Return(h); - - fail: - Py_XDECREF(a_bands); - Py_XDECREF(a_des); - Py_XDECREF(a_weight); - Py_XDECREF(h); - return NULL; -} - -static char doc_median2d[] = "filt = _median2d(data, size)"; - -extern void f_medfilt2(float*,float*,intp*,intp*); -extern void d_medfilt2(double*,double*,intp*,intp*); -extern void b_medfilt2(unsigned char*,unsigned char*,intp*,intp*); - -static PyObject *sigtools_median2d(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *image=NULL, *size=NULL; - int typenum; - PyArrayObject *a_image=NULL, *a_size=NULL; - PyArrayObject *a_out=NULL; - intp Nwin[2] = {3,3}; - - if (!PyArg_ParseTuple(args, "O|O", &image, &size)) return NULL; - - typenum = PyArray_ObjectType(image, 0); - a_image = (PyArrayObject *)PyArray_ContiguousFromObject(image, typenum, 2, 2); - if (a_image == NULL) goto fail; - - if (size != NULL) { - a_size = (PyArrayObject *)PyArray_ContiguousFromObject(size, NPY_INTP, 1, 1); - if (a_size == NULL) goto fail; - if ((RANK(a_size) != 1) || (DIMS(a_size)[0] < 2)) - PYERR("Size must be a length two sequence"); - Nwin[0] = ((intp *)DATA(a_size))[0]; - Nwin[1] = ((intp *)DATA(a_size))[1]; - } - - a_out = (PyArrayObject *)PyArray_SimpleNew(2,DIMS(a_image),typenum); - if (a_out == NULL) goto fail; - - if (setjmp(MALLOC_FAIL)) { - PYERR("Memory allocation error."); - } - else { - switch (typenum) { - case PyArray_UBYTE: - b_medfilt2((unsigned char *)DATA(a_image), (unsigned char *)DATA(a_out), Nwin, DIMS(a_image)); - break; - case PyArray_FLOAT: - f_medfilt2((float *)DATA(a_image), (float *)DATA(a_out), Nwin, DIMS(a_image)); - break; - case PyArray_DOUBLE: - d_medfilt2((double *)DATA(a_image), (double *)DATA(a_out), Nwin, DIMS(a_image)); - break; - default: - PYERR("2D median filter only supports Int8, Float32, and Float64."); - } - } - - Py_DECREF(a_image); - Py_XDECREF(a_size); - - return PyArray_Return(a_out); - - fail: - Py_XDECREF(a_image); - Py_XDECREF(a_size); - Py_XDECREF(a_out); - return NULL; - -} - -static char doc_linear_filter[] = - "(y,Vf) = _linear_filter(b,a,X,Dim=-1,Vi=None) " \ - "implemented using Direct Form II transposed flow " \ - "diagram. If Vi is not given, Vf is not returned."; - -static struct PyMethodDef toolbox_module_methods[] = { - {"_correlateND", scipy_signal_sigtools_correlateND, METH_VARARGS, doc_correlateND}, - {"_convolve2d", sigtools_convolve2d, METH_VARARGS, doc_convolve2d}, - {"_order_filterND", sigtools_order_filterND, METH_VARARGS, doc_order_filterND}, - {"_linear_filter", scipy_signal_sigtools_linear_filter, METH_VARARGS, doc_linear_filter}, - {"_remez",sigtools_remez, METH_VARARGS, doc_remez}, - {"_medfilt2d", sigtools_median2d, METH_VARARGS, doc_median2d}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "sigtools", - NULL, - -1, - toolbox_module_methods, - NULL, - NULL, - NULL, - NULL -}; -PyObject *PyInit_sigtools(void) -{ - PyObject *m, *d, *s; - - m = PyModule_Create(&moduledef); - import_array(); - - scipy_signal_sigtools_linear_filter_module_init(); - - return m; -} -#else -/* Initialization function for the module (*must* be called initsigtools) */ - -PyMODINIT_FUNC initsigtools(void) { - PyObject *m, *d; - - /* Create the module and add the functions */ - m = Py_InitModule("sigtools", toolbox_module_methods); - - /* Import the C API function pointers for the Array Object*/ - import_array(); - - /* Make sure the multiarraymodule is loaded so that the zero - and one objects are defined */ - /* XXX: This should be updated for scipy. I think it's pulling in - Numeric's multiarray. */ - PyImport_ImportModule("numpy.core.multiarray"); - /* { PyObject *multi = PyImport_ImportModule("multiarray"); } */ - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - - /* PyDict_SetItemString(d,"BANDPASS", PyInt_FromLong((long) BANDPASS)); - PyDict_SetItemString(d,"DIFFERENTIATOR", PyInt_FromLong((long) DIFFERENTIATOR)); - PyDict_SetItemString(d,"HILBERT", PyInt_FromLong((long) HILBERT)); - */ - - scipy_signal_sigtools_linear_filter_module_init(); - - /* Check for errors */ - if (PyErr_Occurred()) { - PyErr_Print(); - Py_FatalError("can't initialize module array"); - } -} -#endif diff --git a/scipy-0.10.1/scipy/signal/spectral.c b/scipy-0.10.1/scipy/signal/spectral.c deleted file mode 100644 index 637110d0a1..0000000000 --- a/scipy-0.10.1/scipy/signal/spectral.c +++ /dev/null @@ -1,5598 +0,0 @@ -/* Generated by Cython 0.15 on Tue Nov 1 18:19:21 2011 */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#else - -#include /* For offsetof */ -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__signal__spectral -#define __PYX_HAVE_API__scipy__signal__spectral -#include "stdio.h" -#include "stdlib.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#include "math.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - -/* inline attribute */ -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -/* unused attribute */ -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ - - -/* Type Conversion Predeclarations */ - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - /* Test for GCC > 2.95 */ - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else /* __GNUC__ > 2 ... */ - #define likely(x) (x) - #define unlikely(x) (x) - #endif /* __GNUC__ > 2 ... */ -#else /* __GNUC__ */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - -static const char *__pyx_f[] = { - "spectral.pyx", - "numpy.pxd", -}; - -/* "numpy.pxd":719 - * # in Cython to enable them only on the right systems. - * - * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - */ -typedef npy_int8 __pyx_t_5numpy_int8_t; - -/* "numpy.pxd":720 - * - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t - */ -typedef npy_int16 __pyx_t_5numpy_int16_t; - -/* "numpy.pxd":721 - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< - * ctypedef npy_int64 int64_t - * #ctypedef npy_int96 int96_t - */ -typedef npy_int32 __pyx_t_5numpy_int32_t; - -/* "numpy.pxd":722 - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< - * #ctypedef npy_int96 int96_t - * #ctypedef npy_int128 int128_t - */ -typedef npy_int64 __pyx_t_5numpy_int64_t; - -/* "numpy.pxd":726 - * #ctypedef npy_int128 int128_t - * - * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - */ -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - -/* "numpy.pxd":727 - * - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t - */ -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - -/* "numpy.pxd":728 - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< - * ctypedef npy_uint64 uint64_t - * #ctypedef npy_uint96 uint96_t - */ -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - -/* "numpy.pxd":729 - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< - * #ctypedef npy_uint96 uint96_t - * #ctypedef npy_uint128 uint128_t - */ -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - -/* "numpy.pxd":733 - * #ctypedef npy_uint128 uint128_t - * - * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< - * ctypedef npy_float64 float64_t - * #ctypedef npy_float80 float80_t - */ -typedef npy_float32 __pyx_t_5numpy_float32_t; - -/* "numpy.pxd":734 - * - * ctypedef npy_float32 float32_t - * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< - * #ctypedef npy_float80 float80_t - * #ctypedef npy_float128 float128_t - */ -typedef npy_float64 __pyx_t_5numpy_float64_t; - -/* "numpy.pxd":743 - * # The int types are mapped a bit surprising -- - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t - */ -typedef npy_long __pyx_t_5numpy_int_t; - -/* "numpy.pxd":744 - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong longlong_t - * - */ -typedef npy_longlong __pyx_t_5numpy_long_t; - -/* "numpy.pxd":745 - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_ulong uint_t - */ -typedef npy_longlong __pyx_t_5numpy_longlong_t; - -/* "numpy.pxd":747 - * ctypedef npy_longlong longlong_t - * - * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t - */ -typedef npy_ulong __pyx_t_5numpy_uint_t; - -/* "numpy.pxd":748 - * - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulonglong_t - * - */ -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - -/* "numpy.pxd":749 - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_intp intp_t - */ -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - -/* "numpy.pxd":751 - * ctypedef npy_ulonglong ulonglong_t - * - * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< - * ctypedef npy_uintp uintp_t - * - */ -typedef npy_intp __pyx_t_5numpy_intp_t; - -/* "numpy.pxd":752 - * - * ctypedef npy_intp intp_t - * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< - * - * ctypedef npy_double float_t - */ -typedef npy_uintp __pyx_t_5numpy_uintp_t; - -/* "numpy.pxd":754 - * ctypedef npy_uintp uintp_t - * - * ctypedef npy_double float_t # <<<<<<<<<<<<<< - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t - */ -typedef npy_double __pyx_t_5numpy_float_t; - -/* "numpy.pxd":755 - * - * ctypedef npy_double float_t - * ctypedef npy_double double_t # <<<<<<<<<<<<<< - * ctypedef npy_longdouble longdouble_t - * - */ -typedef npy_double __pyx_t_5numpy_double_t; - -/* "numpy.pxd":756 - * ctypedef npy_double float_t - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cfloat cfloat_t - */ -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif - -/*--- Type declarations ---*/ - -/* "numpy.pxd":758 - * ctypedef npy_longdouble longdouble_t - * - * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t - */ -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - -/* "numpy.pxd":759 - * - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< - * ctypedef npy_clongdouble clongdouble_t - * - */ -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - -/* "numpy.pxd":760 - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cdouble complex_t - */ -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - -/* "numpy.pxd":762 - * ctypedef npy_clongdouble clongdouble_t - * - * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew1(a): - */ -typedef npy_cdouble __pyx_t_5numpy_complex_t; - - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); /*proto*/ - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact); /*proto*/ - -/* Run-time type information about structs used with buffers */ -struct __Pyx_StructField_; - -typedef struct { - const char* name; /* for error messages only */ - struct __Pyx_StructField_* fields; - size_t size; /* sizeof(type) */ - char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */ -} __Pyx_TypeInfo; - -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; - -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; - - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ -#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) - -#define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_SetItemInt_Fast(o, i, v) : \ - __Pyx_SetItemInt_Generic(o, to_py_func(i), v)) - -static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { - int r; - if (!j) return -1; - r = PyObject_SetItem(o, j, v); - Py_DECREF(j); - return r; -} - -static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) { - if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) { - Py_INCREF(v); - Py_DECREF(PyList_GET_ITEM(o, i)); - PyList_SET_ITEM(o, i, v); - return 1; - } - else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_ass_item && (likely(i >= 0))) - return PySequence_SetItem(o, i, v); - else { - PyObject *j = PyInt_FromSsize_t(i); - return __Pyx_SetItemInt_Generic(o, j, v); - } -} - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); -static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else -#define __Pyx_GetBuffer PyObject_GetBuffer -#define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - -Py_ssize_t __Pyx_zeros[] = {0}; -Py_ssize_t __Pyx_minusones[] = {-1}; - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eqf(a, b) ((a)==(b)) - #define __Pyx_c_sumf(a, b) ((a)+(b)) - #define __Pyx_c_difff(a, b) ((a)-(b)) - #define __Pyx_c_prodf(a, b) ((a)*(b)) - #define __Pyx_c_quotf(a, b) ((a)/(b)) - #define __Pyx_c_negf(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zerof(z) ((z)==(float)0) - #define __Pyx_c_conjf(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_absf(z) (::std::abs(z)) - #define __Pyx_c_powf(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zerof(z) ((z)==0) - #define __Pyx_c_conjf(z) (conjf(z)) - #if 1 - #define __Pyx_c_absf(z) (cabsf(z)) - #define __Pyx_c_powf(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq(a, b) ((a)==(b)) - #define __Pyx_c_sum(a, b) ((a)+(b)) - #define __Pyx_c_diff(a, b) ((a)-(b)) - #define __Pyx_c_prod(a, b) ((a)*(b)) - #define __Pyx_c_quot(a, b) ((a)/(b)) - #define __Pyx_c_neg(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero(z) ((z)==(double)0) - #define __Pyx_c_conj(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs(z) (::std::abs(z)) - #define __Pyx_c_pow(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero(z) ((z)==0) - #define __Pyx_c_conj(z) (conj(z)) - #if 1 - #define __Pyx_c_abs(z) (cabs(z)) - #define __Pyx_c_pow(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static int __Pyx_check_binary_version(void); - -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ - -static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'libc.stdlib' */ - -/* Module declarations from 'numpy' */ - -/* Module declarations from 'numpy' */ -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/ - -/* Module declarations from 'cython.cython.view' */ - -/* Module declarations from 'cython' */ - -/* Module declarations from 'scipy.signal.spectral' */ -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t = { "float64_t", NULL, sizeof(__pyx_t_5numpy_float64_t), 'R' }; -#define __Pyx_MODULE_NAME "scipy.signal.spectral" -int __pyx_module_is_main_scipy__signal__spectral = 0; - -/* Implementation of 'scipy.signal.spectral' */ -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_RuntimeError; -static char __pyx_k_1[] = "Input arrays do not have the same size."; -static char __pyx_k_3[] = "ndarray is not C contiguous"; -static char __pyx_k_5[] = "ndarray is not Fortran contiguous"; -static char __pyx_k_7[] = "Non-native byte order not supported"; -static char __pyx_k_9[] = "unknown dtype code in numpy.pxd (%d)"; -static char __pyx_k_10[] = "Format string allocated too short, see comment in numpy.pxd"; -static char __pyx_k_13[] = "Format string allocated too short."; -static char __pyx_k_15[] = "Tools for spectral analysis of unequally sampled signals."; -static char __pyx_k_16[] = "scipy.signal.spectral"; -static char __pyx_k_17[] = "lombscargle (line 19)"; -static char __pyx_k_18[] = "Computes the Lomb-Scargle periodogram.\n \n The Lomb-Scargle periodogram was developed by Lomb [1]_ and further\n extended by Scargle [2]_ to find, and test the significance of weak\n periodic signals with uneven temporal sampling.\n\n The computed periodogram is unnormalized, it takes the value\n ``(A**2) * N/4`` for a harmonic signal with amplitude A for sufficiently\n large N.\n\n Parameters\n ----------\n x : array_like\n Sample times.\n y : array_like\n Measurement values.\n freqs : array_like\n Angular frequencies for output periodogram.\n\n Returns\n -------\n pgram : array_like\n Lomb-Scargle periodogram.\n\n Raises\n ------\n ValueError\n If the input arrays `x` and `y` do not have the same shape.\n\n Notes\n -----\n This subroutine calculates the periodogram using a slightly\n modified algorithm due to Townsend [3]_ which allows the\n periodogram to be calculated using only a single pass through\n the input arrays for each frequency.\n\n The algorithm running time scales roughly as O(x * freqs) or O(N^2)\n for a large number of samples and frequencies.\n\n References\n ----------\n .. [1] N.R. Lomb \"Least-squares frequency analysis of unequally spaced\n data\", Astrophysics and Space Science, vol 39, pp. 447-462, 1976\n\n .. [2] J.D. Scargle \"Studies in astronomical time series analysis. II - \n Statistical aspects of spectral analysis of unevenly spaced data\",\n The Astrophysical Journal, vol 263, pp. 835-853, 1982\n\n .. [3] R.H.D. Townsend, \"Fast calculation of the Lomb-Scargle\n periodogram using graphics processing units.\", The Astrophysical\n Journal Supplement Series, vol 191, pp. 247-253, 2010\n\n Examples\n --------\n >>> import scipy.signal\n\n First define some input parameters for the signal:\n\n >>> A = 2.\n >>> w = 1.\n >>> phi = 0.5 * np.pi\n >>> nin = 1000\n >>> nout ="" 100000\n >>> frac_points = 0.9 # Fraction of points to select\n \n Randomly select a fraction of an array with timesteps:\n\n >>> r = np.random.rand(nin)\n >>> x = np.linspace(0.01, 10*np.pi, nin)\n >>> x = x[r >= frac_points]\n >>> normval = x.shape[0] # For normalization of the periodogram\n \n Plot a sine wave for the selected times:\n\n >>> y = A * np.sin(w*x+phi)\n\n Define the array of frequencies for which to compute the periodogram:\n \n >>> f = np.linspace(0.01, 10, nout)\n \n Calculate Lomb-Scargle periodogram:\n\n >>> pgram = sp.signal.lombscargle(x, y, f)\n\n Now make a plot of the input data:\n\n >>> plt.subplot(2, 1, 1)\n \n >>> plt.plot(x, y, 'b+')\n []\n\n Then plot the normalized periodogram:\n\n >>> plt.subplot(2, 1, 2)\n \n >>> plt.plot(f, np.sqrt(4*(pgram/normval)))\n []\n >>> plt.show()\n \n "; -static char __pyx_k__B[] = "B"; -static char __pyx_k__H[] = "H"; -static char __pyx_k__I[] = "I"; -static char __pyx_k__L[] = "L"; -static char __pyx_k__O[] = "O"; -static char __pyx_k__Q[] = "Q"; -static char __pyx_k__b[] = "b"; -static char __pyx_k__d[] = "d"; -static char __pyx_k__f[] = "f"; -static char __pyx_k__g[] = "g"; -static char __pyx_k__h[] = "h"; -static char __pyx_k__i[] = "i"; -static char __pyx_k__l[] = "l"; -static char __pyx_k__q[] = "q"; -static char __pyx_k__x[] = "x"; -static char __pyx_k__y[] = "y"; -static char __pyx_k__Zd[] = "Zd"; -static char __pyx_k__Zf[] = "Zf"; -static char __pyx_k__Zg[] = "Zg"; -static char __pyx_k__np[] = "np"; -static char __pyx_k__dtype[] = "dtype"; -static char __pyx_k__empty[] = "empty"; -static char __pyx_k__freqs[] = "freqs"; -static char __pyx_k__numpy[] = "numpy"; -static char __pyx_k__range[] = "range"; -static char __pyx_k____all__[] = "__all__"; -static char __pyx_k__float64[] = "float64"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k__ValueError[] = "ValueError"; -static char __pyx_k__lombscargle[] = "lombscargle"; -static char __pyx_k__RuntimeError[] = "RuntimeError"; -static PyObject *__pyx_kp_s_1; -static PyObject *__pyx_kp_u_10; -static PyObject *__pyx_kp_u_13; -static PyObject *__pyx_n_s_16; -static PyObject *__pyx_kp_u_17; -static PyObject *__pyx_kp_u_18; -static PyObject *__pyx_kp_u_3; -static PyObject *__pyx_kp_u_5; -static PyObject *__pyx_kp_u_7; -static PyObject *__pyx_kp_u_9; -static PyObject *__pyx_n_s__RuntimeError; -static PyObject *__pyx_n_s__ValueError; -static PyObject *__pyx_n_s____all__; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s__dtype; -static PyObject *__pyx_n_s__empty; -static PyObject *__pyx_n_s__float64; -static PyObject *__pyx_n_s__freqs; -static PyObject *__pyx_n_s__lombscargle; -static PyObject *__pyx_n_s__np; -static PyObject *__pyx_n_s__numpy; -static PyObject *__pyx_n_s__range; -static PyObject *__pyx_n_s__x; -static PyObject *__pyx_n_s__y; -static PyObject *__pyx_int_15; -static PyObject *__pyx_k_tuple_2; -static PyObject *__pyx_k_tuple_4; -static PyObject *__pyx_k_tuple_6; -static PyObject *__pyx_k_tuple_8; -static PyObject *__pyx_k_tuple_11; -static PyObject *__pyx_k_tuple_12; -static PyObject *__pyx_k_tuple_14; - -/* "scipy/signal/spectral.pyx":19 - * - * @cython.boundscheck(False) - * def lombscargle(np.ndarray[np.float64_t, ndim=1] x, # <<<<<<<<<<<<<< - * np.ndarray[np.float64_t, ndim=1] y, - * np.ndarray[np.float64_t, ndim=1] freqs): - */ - -static PyObject *__pyx_pf_5scipy_6signal_8spectral_lombscargle(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_6signal_8spectral_lombscargle[] = "Computes the Lomb-Scargle periodogram.\n \n The Lomb-Scargle periodogram was developed by Lomb [1]_ and further\n extended by Scargle [2]_ to find, and test the significance of weak\n periodic signals with uneven temporal sampling.\n\n The computed periodogram is unnormalized, it takes the value\n ``(A**2) * N/4`` for a harmonic signal with amplitude A for sufficiently\n large N.\n\n Parameters\n ----------\n x : array_like\n Sample times.\n y : array_like\n Measurement values.\n freqs : array_like\n Angular frequencies for output periodogram.\n\n Returns\n -------\n pgram : array_like\n Lomb-Scargle periodogram.\n\n Raises\n ------\n ValueError\n If the input arrays `x` and `y` do not have the same shape.\n\n Notes\n -----\n This subroutine calculates the periodogram using a slightly\n modified algorithm due to Townsend [3]_ which allows the\n periodogram to be calculated using only a single pass through\n the input arrays for each frequency.\n\n The algorithm running time scales roughly as O(x * freqs) or O(N^2)\n for a large number of samples and frequencies.\n\n References\n ----------\n .. [1] N.R. Lomb \"Least-squares frequency analysis of unequally spaced\n data\", Astrophysics and Space Science, vol 39, pp. 447-462, 1976\n\n .. [2] J.D. Scargle \"Studies in astronomical time series analysis. II - \n Statistical aspects of spectral analysis of unevenly spaced data\",\n The Astrophysical Journal, vol 263, pp. 835-853, 1982\n\n .. [3] R.H.D. Townsend, \"Fast calculation of the Lomb-Scargle\n periodogram using graphics processing units.\", The Astrophysical\n Journal Supplement Series, vol 191, pp. 247-253, 2010\n\n Examples\n --------\n >>> import scipy.signal\n\n First define some input parameters for the signal:\n\n >>> A = 2.\n >>> w = 1.\n >>> phi = 0.5 * np.pi\n >>> nin = 1000\n >>> nout ="" 100000\n >>> frac_points = 0.9 # Fraction of points to select\n \n Randomly select a fraction of an array with timesteps:\n\n >>> r = np.random.rand(nin)\n >>> x = np.linspace(0.01, 10*np.pi, nin)\n >>> x = x[r >= frac_points]\n >>> normval = x.shape[0] # For normalization of the periodogram\n \n Plot a sine wave for the selected times:\n\n >>> y = A * np.sin(w*x+phi)\n\n Define the array of frequencies for which to compute the periodogram:\n \n >>> f = np.linspace(0.01, 10, nout)\n \n Calculate Lomb-Scargle periodogram:\n\n >>> pgram = sp.signal.lombscargle(x, y, f)\n\n Now make a plot of the input data:\n\n >>> plt.subplot(2, 1, 1)\n \n >>> plt.plot(x, y, 'b+')\n []\n\n Then plot the normalized periodogram:\n\n >>> plt.subplot(2, 1, 2)\n \n >>> plt.plot(f, np.sqrt(4*(pgram/normval)))\n []\n >>> plt.show()\n \n "; -static PyMethodDef __pyx_mdef_5scipy_6signal_8spectral_lombscargle = {__Pyx_NAMESTR("lombscargle"), (PyCFunction)__pyx_pf_5scipy_6signal_8spectral_lombscargle, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_6signal_8spectral_lombscargle)}; -static PyObject *__pyx_pf_5scipy_6signal_8spectral_lombscargle(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_x = 0; - PyArrayObject *__pyx_v_y = 0; - PyArrayObject *__pyx_v_freqs = 0; - PyObject *__pyx_v_pgram = NULL; - Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_j; - double __pyx_v_c; - double __pyx_v_s; - double __pyx_v_xc; - double __pyx_v_xs; - double __pyx_v_cc; - double __pyx_v_ss; - double __pyx_v_cs; - double __pyx_v_tau; - double __pyx_v_c_tau; - double __pyx_v_s_tau; - double __pyx_v_c_tau2; - double __pyx_v_s_tau2; - double __pyx_v_cs_tau; - Py_buffer __pyx_bstruct_freqs; - Py_ssize_t __pyx_bstride_0_freqs = 0; - Py_ssize_t __pyx_bshape_0_freqs = 0; - Py_buffer __pyx_bstruct_y; - Py_ssize_t __pyx_bstride_0_y = 0; - Py_ssize_t __pyx_bshape_0_y = 0; - Py_buffer __pyx_bstruct_x; - Py_ssize_t __pyx_bstride_0_x = 0; - Py_ssize_t __pyx_bshape_0_x = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - npy_intp __pyx_t_7; - Py_ssize_t __pyx_t_8; - npy_intp __pyx_t_9; - Py_ssize_t __pyx_t_10; - Py_ssize_t __pyx_t_11; - Py_ssize_t __pyx_t_12; - Py_ssize_t __pyx_t_13; - Py_ssize_t __pyx_t_14; - Py_ssize_t __pyx_t_15; - Py_ssize_t __pyx_t_16; - double __pyx_t_17; - double __pyx_t_18; - double __pyx_t_19; - __pyx_t_5numpy_float64_t __pyx_t_20; - Py_ssize_t __pyx_t_21; - Py_ssize_t __pyx_t_22; - double __pyx_t_23; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__y,&__pyx_n_s__freqs,0}; - __Pyx_RefNannySetupContext("lombscargle"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__y); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("lombscargle", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__freqs); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("lombscargle", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "lombscargle") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_x = ((PyArrayObject *)values[0]); - __pyx_v_y = ((PyArrayObject *)values[1]); - __pyx_v_freqs = ((PyArrayObject *)values[2]); - } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_x = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 0)); - __pyx_v_y = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 1)); - __pyx_v_freqs = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 2)); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("lombscargle", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.signal.spectral.lombscargle", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_bstruct_x.buf = NULL; - __pyx_bstruct_y.buf = NULL; - __pyx_bstruct_freqs.buf = NULL; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 1, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_freqs), __pyx_ptype_5numpy_ndarray, 1, "freqs", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_x = __pyx_bstruct_x.strides[0]; - __pyx_bshape_0_x = __pyx_bstruct_x.shape[0]; - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_y, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_y = __pyx_bstruct_y.strides[0]; - __pyx_bshape_0_y = __pyx_bstruct_y.shape[0]; - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_freqs, (PyObject*)__pyx_v_freqs, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_freqs = __pyx_bstruct_freqs.strides[0]; - __pyx_bshape_0_freqs = __pyx_bstruct_freqs.shape[0]; - - /* "scipy/signal/spectral.pyx":124 - * - * # Check input sizes - * if x.shape[0] != y.shape[0]: # <<<<<<<<<<<<<< - * raise ValueError("Input arrays do not have the same size.") - * - */ - __pyx_t_1 = ((__pyx_v_x->dimensions[0]) != (__pyx_v_y->dimensions[0])); - if (__pyx_t_1) { - - /* "scipy/signal/spectral.pyx":125 - * # Check input sizes - * if x.shape[0] != y.shape[0]: - * raise ValueError("Input arrays do not have the same size.") # <<<<<<<<<<<<<< - * - * # Create empty array for output periodogram - */ - __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - /* "scipy/signal/spectral.pyx":128 - * - * # Create empty array for output periodogram - * pgram = np.empty(freqs.shape[0], dtype=np.float64) # <<<<<<<<<<<<<< - * - * # Local variables - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_freqs->dimensions[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__float64); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_v_pgram = __pyx_t_6; - __pyx_t_6 = 0; - - /* "scipy/signal/spectral.pyx":135 - * cdef double tau, c_tau, s_tau, c_tau2, s_tau2, cs_tau - * - * for i in range(freqs.shape[0]): # <<<<<<<<<<<<<< - * - * xc = 0. - */ - __pyx_t_7 = (__pyx_v_freqs->dimensions[0]); - for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { - __pyx_v_i = __pyx_t_8; - - /* "scipy/signal/spectral.pyx":137 - * for i in range(freqs.shape[0]): - * - * xc = 0. # <<<<<<<<<<<<<< - * xs = 0. - * cc = 0. - */ - __pyx_v_xc = 0.; - - /* "scipy/signal/spectral.pyx":138 - * - * xc = 0. - * xs = 0. # <<<<<<<<<<<<<< - * cc = 0. - * ss = 0. - */ - __pyx_v_xs = 0.; - - /* "scipy/signal/spectral.pyx":139 - * xc = 0. - * xs = 0. - * cc = 0. # <<<<<<<<<<<<<< - * ss = 0. - * cs = 0. - */ - __pyx_v_cc = 0.; - - /* "scipy/signal/spectral.pyx":140 - * xs = 0. - * cc = 0. - * ss = 0. # <<<<<<<<<<<<<< - * cs = 0. - * - */ - __pyx_v_ss = 0.; - - /* "scipy/signal/spectral.pyx":141 - * cc = 0. - * ss = 0. - * cs = 0. # <<<<<<<<<<<<<< - * - * for j in range(x.shape[0]): - */ - __pyx_v_cs = 0.; - - /* "scipy/signal/spectral.pyx":143 - * cs = 0. - * - * for j in range(x.shape[0]): # <<<<<<<<<<<<<< - * - * c = cos(freqs[i] * x[j]) - */ - __pyx_t_9 = (__pyx_v_x->dimensions[0]); - for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { - __pyx_v_j = __pyx_t_10; - - /* "scipy/signal/spectral.pyx":145 - * for j in range(x.shape[0]): - * - * c = cos(freqs[i] * x[j]) # <<<<<<<<<<<<<< - * s = sin(freqs[i] * x[j]) - * - */ - __pyx_t_11 = __pyx_v_i; - if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_0_freqs; - __pyx_t_12 = __pyx_v_j; - if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_0_x; - __pyx_v_c = cos(((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_freqs.buf, __pyx_t_11, __pyx_bstride_0_freqs)) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_x.buf, __pyx_t_12, __pyx_bstride_0_x)))); - - /* "scipy/signal/spectral.pyx":146 - * - * c = cos(freqs[i] * x[j]) - * s = sin(freqs[i] * x[j]) # <<<<<<<<<<<<<< - * - * xc += y[j] * c - */ - __pyx_t_13 = __pyx_v_i; - if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_freqs; - __pyx_t_14 = __pyx_v_j; - if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_0_x; - __pyx_v_s = sin(((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_freqs.buf, __pyx_t_13, __pyx_bstride_0_freqs)) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_x.buf, __pyx_t_14, __pyx_bstride_0_x)))); - - /* "scipy/signal/spectral.pyx":148 - * s = sin(freqs[i] * x[j]) - * - * xc += y[j] * c # <<<<<<<<<<<<<< - * xs += y[j] * s - * cc += c * c - */ - __pyx_t_15 = __pyx_v_j; - if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_y; - __pyx_v_xc = (__pyx_v_xc + ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_y.buf, __pyx_t_15, __pyx_bstride_0_y)) * __pyx_v_c)); - - /* "scipy/signal/spectral.pyx":149 - * - * xc += y[j] * c - * xs += y[j] * s # <<<<<<<<<<<<<< - * cc += c * c - * ss += s * s - */ - __pyx_t_16 = __pyx_v_j; - if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_0_y; - __pyx_v_xs = (__pyx_v_xs + ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_y.buf, __pyx_t_16, __pyx_bstride_0_y)) * __pyx_v_s)); - - /* "scipy/signal/spectral.pyx":150 - * xc += y[j] * c - * xs += y[j] * s - * cc += c * c # <<<<<<<<<<<<<< - * ss += s * s - * cs += c * s - */ - __pyx_v_cc = (__pyx_v_cc + (__pyx_v_c * __pyx_v_c)); - - /* "scipy/signal/spectral.pyx":151 - * xs += y[j] * s - * cc += c * c - * ss += s * s # <<<<<<<<<<<<<< - * cs += c * s - * - */ - __pyx_v_ss = (__pyx_v_ss + (__pyx_v_s * __pyx_v_s)); - - /* "scipy/signal/spectral.pyx":152 - * cc += c * c - * ss += s * s - * cs += c * s # <<<<<<<<<<<<<< - * - * tau = atan(2 * cs / (cc - ss)) / (2 * freqs[i]) - */ - __pyx_v_cs = (__pyx_v_cs + (__pyx_v_c * __pyx_v_s)); - } - - /* "scipy/signal/spectral.pyx":154 - * cs += c * s - * - * tau = atan(2 * cs / (cc - ss)) / (2 * freqs[i]) # <<<<<<<<<<<<<< - * c_tau = cos(freqs[i] * tau) - * s_tau = sin(freqs[i] * tau) - */ - __pyx_t_17 = (2.0 * __pyx_v_cs); - __pyx_t_18 = (__pyx_v_cc - __pyx_v_ss); - if (unlikely(__pyx_t_18 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_19 = atan((__pyx_t_17 / __pyx_t_18)); - __pyx_t_10 = __pyx_v_i; - if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_bshape_0_freqs; - __pyx_t_20 = (2.0 * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_freqs.buf, __pyx_t_10, __pyx_bstride_0_freqs))); - if (unlikely(__pyx_t_20 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_v_tau = (__pyx_t_19 / __pyx_t_20); - - /* "scipy/signal/spectral.pyx":155 - * - * tau = atan(2 * cs / (cc - ss)) / (2 * freqs[i]) - * c_tau = cos(freqs[i] * tau) # <<<<<<<<<<<<<< - * s_tau = sin(freqs[i] * tau) - * c_tau2 = c_tau * c_tau - */ - __pyx_t_21 = __pyx_v_i; - if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_bshape_0_freqs; - __pyx_v_c_tau = cos(((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_freqs.buf, __pyx_t_21, __pyx_bstride_0_freqs)) * __pyx_v_tau)); - - /* "scipy/signal/spectral.pyx":156 - * tau = atan(2 * cs / (cc - ss)) / (2 * freqs[i]) - * c_tau = cos(freqs[i] * tau) - * s_tau = sin(freqs[i] * tau) # <<<<<<<<<<<<<< - * c_tau2 = c_tau * c_tau - * s_tau2 = s_tau * s_tau - */ - __pyx_t_22 = __pyx_v_i; - if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_bshape_0_freqs; - __pyx_v_s_tau = sin(((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_bstruct_freqs.buf, __pyx_t_22, __pyx_bstride_0_freqs)) * __pyx_v_tau)); - - /* "scipy/signal/spectral.pyx":157 - * c_tau = cos(freqs[i] * tau) - * s_tau = sin(freqs[i] * tau) - * c_tau2 = c_tau * c_tau # <<<<<<<<<<<<<< - * s_tau2 = s_tau * s_tau - * cs_tau = 2 * c_tau * s_tau - */ - __pyx_v_c_tau2 = (__pyx_v_c_tau * __pyx_v_c_tau); - - /* "scipy/signal/spectral.pyx":158 - * s_tau = sin(freqs[i] * tau) - * c_tau2 = c_tau * c_tau - * s_tau2 = s_tau * s_tau # <<<<<<<<<<<<<< - * cs_tau = 2 * c_tau * s_tau - * - */ - __pyx_v_s_tau2 = (__pyx_v_s_tau * __pyx_v_s_tau); - - /* "scipy/signal/spectral.pyx":159 - * c_tau2 = c_tau * c_tau - * s_tau2 = s_tau * s_tau - * cs_tau = 2 * c_tau * s_tau # <<<<<<<<<<<<<< - * - * pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \ - */ - __pyx_v_cs_tau = ((2.0 * __pyx_v_c_tau) * __pyx_v_s_tau); - - /* "scipy/signal/spectral.pyx":161 - * cs_tau = 2 * c_tau * s_tau - * - * pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \ # <<<<<<<<<<<<<< - * (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \ - * ((c_tau * xs - s_tau * xc)**2 / \ - */ - __pyx_t_19 = pow(((__pyx_v_c_tau * __pyx_v_xc) + (__pyx_v_s_tau * __pyx_v_xs)), 2.0); - - /* "scipy/signal/spectral.pyx":162 - * - * pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \ - * (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \ # <<<<<<<<<<<<<< - * ((c_tau * xs - s_tau * xc)**2 / \ - * (c_tau2 * ss - cs_tau * cs + s_tau2 * cc))) - */ - __pyx_t_18 = (((__pyx_v_c_tau2 * __pyx_v_cc) + (__pyx_v_cs_tau * __pyx_v_cs)) + (__pyx_v_s_tau2 * __pyx_v_ss)); - if (unlikely(__pyx_t_18 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - - /* "scipy/signal/spectral.pyx":163 - * pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \ - * (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \ - * ((c_tau * xs - s_tau * xc)**2 / \ # <<<<<<<<<<<<<< - * (c_tau2 * ss - cs_tau * cs + s_tau2 * cc))) - * - */ - __pyx_t_17 = pow(((__pyx_v_c_tau * __pyx_v_xs) - (__pyx_v_s_tau * __pyx_v_xc)), 2.0); - - /* "scipy/signal/spectral.pyx":164 - * (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \ - * ((c_tau * xs - s_tau * xc)**2 / \ - * (c_tau2 * ss - cs_tau * cs + s_tau2 * cc))) # <<<<<<<<<<<<<< - * - * return pgram - */ - __pyx_t_23 = (((__pyx_v_c_tau2 * __pyx_v_ss) - (__pyx_v_cs_tau * __pyx_v_cs)) + (__pyx_v_s_tau2 * __pyx_v_cc)); - if (unlikely(__pyx_t_23 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_6 = PyFloat_FromDouble((0.5 * ((__pyx_t_19 / __pyx_t_18) + (__pyx_t_17 / __pyx_t_23)))); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - - /* "scipy/signal/spectral.pyx":161 - * cs_tau = 2 * c_tau * s_tau - * - * pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \ # <<<<<<<<<<<<<< - * (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \ - * ((c_tau * xs - s_tau * xc)**2 / \ - */ - if (__Pyx_SetItemInt(__pyx_v_pgram, __pyx_v_i, __pyx_t_6, sizeof(Py_ssize_t), PyInt_FromSsize_t) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - - /* "scipy/signal/spectral.pyx":166 - * (c_tau2 * ss - cs_tau * cs + s_tau2 * cc))) - * - * return pgram # <<<<<<<<<<<<<< - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_pgram); - __pyx_r = __pyx_v_pgram; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_freqs); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_y); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.signal.spectral.lombscargle", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_freqs); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_y); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); - __pyx_L2:; - __Pyx_XDECREF(__pyx_v_pgram); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":190 - * # experimental exception made for __getbuffer__ and __releasebuffer__ - * # -- the details of this may change. - * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< - * # This implementation of getbuffer is geared towards Cython - * # requirements, and does not yet fullfill the PEP. - */ - -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_copy_shape; - int __pyx_v_i; - int __pyx_v_ndim; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - int __pyx_v_t; - char *__pyx_v_f; - PyArray_Descr *__pyx_v_descr = 0; - int __pyx_v_offset; - int __pyx_v_hasfields; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - char *__pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getbuffer__"); - if (__pyx_v_info != NULL) { - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - } - - /* "numpy.pxd":196 - * # of flags - * - * if info == NULL: return # <<<<<<<<<<<<<< - * - * cdef int copy_shape, i, ndim - */ - __pyx_t_1 = (__pyx_v_info == NULL); - if (__pyx_t_1) { - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":199 - * - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":200 - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * - * ndim = PyArray_NDIM(self) - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":202 - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":204 - * ndim = PyArray_NDIM(self) - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * copy_shape = 1 - * else: - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":205 - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * copy_shape = 1 # <<<<<<<<<<<<<< - * else: - * copy_shape = 0 - */ - __pyx_v_copy_shape = 1; - goto __pyx_L6; - } - /*else*/ { - - /* "numpy.pxd":207 - * copy_shape = 1 - * else: - * copy_shape = 0 # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - */ - __pyx_v_copy_shape = 0; - } - __pyx_L6:; - - /* "numpy.pxd":209 - * copy_shape = 0 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); - if (__pyx_t_1) { - - /* "numpy.pxd":210 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not C contiguous") - * - */ - __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "numpy.pxd":213 - * raise ValueError(u"ndarray is not C contiguous") - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") - */ - __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); - if (__pyx_t_3) { - - /* "numpy.pxd":214 - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not Fortran contiguous") - * - */ - __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); - __pyx_t_2 = __pyx_t_1; - } else { - __pyx_t_2 = __pyx_t_3; - } - if (__pyx_t_2) { - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - /* "numpy.pxd":217 - * raise ValueError(u"ndarray is not Fortran contiguous") - * - * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< - * info.ndim = ndim - * if copy_shape: - */ - __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":218 - * - * info.buf = PyArray_DATA(self) - * info.ndim = ndim # <<<<<<<<<<<<<< - * if copy_shape: - * # Allocate new buffer for strides and shape info. - */ - __pyx_v_info->ndim = __pyx_v_ndim; - - /* "numpy.pxd":219 - * info.buf = PyArray_DATA(self) - * info.ndim = ndim - * if copy_shape: # <<<<<<<<<<<<<< - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - */ - if (__pyx_v_copy_shape) { - - /* "numpy.pxd":222 - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< - * info.shape = info.strides + ndim - * for i in range(ndim): - */ - __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); - - /* "numpy.pxd":223 - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim # <<<<<<<<<<<<<< - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - */ - __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); - - /* "numpy.pxd":224 - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim - * for i in range(ndim): # <<<<<<<<<<<<<< - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] - */ - __pyx_t_5 = __pyx_v_ndim; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "numpy.pxd":225 - * info.shape = info.strides + ndim - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - */ - (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - - /* "numpy.pxd":226 - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< - * else: - * info.strides = PyArray_STRIDES(self) - */ - (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - } - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":228 - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - */ - __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":229 - * else: - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - */ - __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); - } - __pyx_L9:; - - /* "numpy.pxd":230 - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) - */ - __pyx_v_info->suboffsets = NULL; - - /* "numpy.pxd":231 - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< - * info.readonly = not PyArray_ISWRITEABLE(self) - * - */ - __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":232 - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< - * - * cdef int t - */ - __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":235 - * - * cdef int t - * cdef char* f = NULL # <<<<<<<<<<<<<< - * cdef dtype descr = self.descr - * cdef list stack - */ - __pyx_v_f = NULL; - - /* "numpy.pxd":236 - * cdef int t - * cdef char* f = NULL - * cdef dtype descr = self.descr # <<<<<<<<<<<<<< - * cdef list stack - * cdef int offset - */ - __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); - __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; - - /* "numpy.pxd":240 - * cdef int offset - * - * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< - * - * if not hasfields and not copy_shape: - */ - __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); - - /* "numpy.pxd":242 - * cdef bint hasfields = PyDataType_HASFIELDS(descr) - * - * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< - * # do not call releasebuffer - * info.obj = None - */ - __pyx_t_2 = (!__pyx_v_hasfields); - if (__pyx_t_2) { - __pyx_t_3 = (!__pyx_v_copy_shape); - __pyx_t_1 = __pyx_t_3; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":244 - * if not hasfields and not copy_shape: - * # do not call releasebuffer - * info.obj = None # <<<<<<<<<<<<<< - * else: - * # need to call releasebuffer - */ - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = Py_None; - goto __pyx_L12; - } - /*else*/ { - - /* "numpy.pxd":247 - * else: - * # need to call releasebuffer - * info.obj = self # <<<<<<<<<<<<<< - * - * if not hasfields: - */ - __Pyx_INCREF(__pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = __pyx_v_self; - } - __pyx_L12:; - - /* "numpy.pxd":249 - * info.obj = self - * - * if not hasfields: # <<<<<<<<<<<<<< - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - */ - __pyx_t_1 = (!__pyx_v_hasfields); - if (__pyx_t_1) { - - /* "numpy.pxd":250 - * - * if not hasfields: - * t = descr.type_num # <<<<<<<<<<<<<< - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - */ - __pyx_v_t = __pyx_v_descr->type_num; - - /* "numpy.pxd":251 - * if not hasfields: - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); - if (__pyx_t_1) { - __pyx_t_2 = __pyx_v_little_endian; - } else { - __pyx_t_2 = __pyx_t_1; - } - if (!__pyx_t_2) { - - /* "numpy.pxd":252 - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); - if (__pyx_t_1) { - __pyx_t_3 = (!__pyx_v_little_endian); - __pyx_t_7 = __pyx_t_3; - } else { - __pyx_t_7 = __pyx_t_1; - } - __pyx_t_1 = __pyx_t_7; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L14; - } - __pyx_L14:; - - /* "numpy.pxd":254 - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - */ - __pyx_t_1 = (__pyx_v_t == NPY_BYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__b; - goto __pyx_L15; - } - - /* "numpy.pxd":255 - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__B; - goto __pyx_L15; - } - - /* "numpy.pxd":256 - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - */ - __pyx_t_1 = (__pyx_v_t == NPY_SHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__h; - goto __pyx_L15; - } - - /* "numpy.pxd":257 - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - */ - __pyx_t_1 = (__pyx_v_t == NPY_USHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__H; - goto __pyx_L15; - } - - /* "numpy.pxd":258 - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - */ - __pyx_t_1 = (__pyx_v_t == NPY_INT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__i; - goto __pyx_L15; - } - - /* "numpy.pxd":259 - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UINT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__I; - goto __pyx_L15; - } - - /* "numpy.pxd":260 - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__l; - goto __pyx_L15; - } - - /* "numpy.pxd":261 - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__L; - goto __pyx_L15; - } - - /* "numpy.pxd":262 - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__q; - goto __pyx_L15; - } - - /* "numpy.pxd":263 - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Q; - goto __pyx_L15; - } - - /* "numpy.pxd":264 - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - */ - __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__f; - goto __pyx_L15; - } - - /* "numpy.pxd":265 - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - */ - __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__d; - goto __pyx_L15; - } - - /* "numpy.pxd":266 - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__g; - goto __pyx_L15; - } - - /* "numpy.pxd":267 - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zf; - goto __pyx_L15; - } - - /* "numpy.pxd":268 - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zd; - goto __pyx_L15; - } - - /* "numpy.pxd":269 - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f = "O" - * else: - */ - __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zg; - goto __pyx_L15; - } - - /* "numpy.pxd":270 - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__O; - goto __pyx_L15; - } - /*else*/ { - - /* "numpy.pxd":272 - * elif t == NPY_OBJECT: f = "O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * info.format = f - * return - */ - __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L15:; - - /* "numpy.pxd":273 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f # <<<<<<<<<<<<<< - * return - * else: - */ - __pyx_v_info->format = __pyx_v_f; - - /* "numpy.pxd":274 - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f - * return # <<<<<<<<<<<<<< - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - */ - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L13; - } - /*else*/ { - - /* "numpy.pxd":276 - * return - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 - */ - __pyx_v_info->format = ((char *)malloc(255)); - - /* "numpy.pxd":277 - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<< - * offset = 0 - * f = _util_dtypestring(descr, info.format + 1, - */ - (__pyx_v_info->format[0]) = '^'; - - /* "numpy.pxd":278 - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 # <<<<<<<<<<<<<< - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - */ - __pyx_v_offset = 0; - - /* "numpy.pxd":281 - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - * &offset) # <<<<<<<<<<<<<< - * f[0] = 0 # Terminate format string - * - */ - __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_9; - - /* "numpy.pxd":282 - * info.format + _buffer_format_string_len, - * &offset) - * f[0] = 0 # Terminate format string # <<<<<<<<<<<<<< - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - */ - (__pyx_v_f[0]) = 0; - } - __pyx_L13:; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(Py_None); - __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; - } - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_descr); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":284 - * f[0] = 0 # Terminate format string - * - * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - */ - -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__releasebuffer__"); - - /* "numpy.pxd":285 - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); - if (__pyx_t_1) { - - /* "numpy.pxd":286 - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) # <<<<<<<<<<<<<< - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) - */ - free(__pyx_v_info->format); - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":287 - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * stdlib.free(info.strides) - * # info.shape was stored after info.strides in the same block - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":288 - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) # <<<<<<<<<<<<<< - * # info.shape was stored after info.strides in the same block - * - */ - free(__pyx_v_info->strides); - goto __pyx_L6; - } - __pyx_L6:; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":764 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); - - /* "numpy.pxd":765 - * - * cdef inline object PyArray_MultiIterNew1(a): - * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew2(a, b): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":767 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); - - /* "numpy.pxd":768 - * - * cdef inline object PyArray_MultiIterNew2(a, b): - * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":770 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); - - /* "numpy.pxd":771 - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":773 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); - - /* "numpy.pxd":774 - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":776 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); - - /* "numpy.pxd":777 - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":779 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< - * # Recursive utility function used in __getbuffer__ to get format - * # string. The new location in the format string is returned. - */ - -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { - PyArray_Descr *__pyx_v_child = 0; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - PyObject *__pyx_v_fields = 0; - PyObject *__pyx_v_childname = NULL; - PyObject *__pyx_v_new_offset = NULL; - PyObject *__pyx_v_t = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - long __pyx_t_10; - char *__pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_util_dtypestring"); - - /* "numpy.pxd":786 - * cdef int delta_offset - * cdef tuple i - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * cdef tuple fields - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":787 - * cdef tuple i - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * cdef tuple fields - * - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":790 - * cdef tuple fields - * - * for childname in descr.names: # <<<<<<<<<<<<<< - * fields = descr.fields[childname] - * child, new_offset = fields - */ - if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - for (;;) { - if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; - __Pyx_XDECREF(__pyx_v_childname); - __pyx_v_childname = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":791 - * - * for childname in descr.names: - * fields = descr.fields[childname] # <<<<<<<<<<<<<< - * child, new_offset = fields - * - */ - __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); - __pyx_v_fields = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "numpy.pxd":792 - * for childname in descr.names: - * fields = descr.fields[childname] - * child, new_offset = fields # <<<<<<<<<<<<<< - * - * if (end - f) - (new_offset - offset[0]) < 15: - */ - if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { - PyObject* sequence = ((PyObject *)__pyx_v_fields); - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - } else { - __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_child)); - __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_v_new_offset); - __pyx_v_new_offset = __pyx_t_4; - __pyx_t_4 = 0; - - /* "numpy.pxd":794 - * child, new_offset = fields - * - * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - */ - __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":797 - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - * if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '>'); - if (__pyx_t_6) { - __pyx_t_7 = __pyx_v_little_endian; - } else { - __pyx_t_7 = __pyx_t_6; - } - if (!__pyx_t_7) { - - /* "numpy.pxd":798 - * - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * # One could encode it in the format string and have Cython - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '<'); - if (__pyx_t_6) { - __pyx_t_8 = (!__pyx_v_little_endian); - __pyx_t_9 = __pyx_t_8; - } else { - __pyx_t_9 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_9; - } else { - __pyx_t_6 = __pyx_t_7; - } - if (__pyx_t_6) { - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - /* "numpy.pxd":809 - * - * # Output padding bytes - * while offset[0] < new_offset: # <<<<<<<<<<<<<< - * f[0] = 120 # "x"; pad byte - * f += 1 - */ - while (1) { - __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!__pyx_t_6) break; - - /* "numpy.pxd":810 - * # Output padding bytes - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< - * f += 1 - * offset[0] += 1 - */ - (__pyx_v_f[0]) = 120; - - /* "numpy.pxd":811 - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte - * f += 1 # <<<<<<<<<<<<<< - * offset[0] += 1 - * - */ - __pyx_v_f = (__pyx_v_f + 1); - - /* "numpy.pxd":812 - * f[0] = 120 # "x"; pad byte - * f += 1 - * offset[0] += 1 # <<<<<<<<<<<<<< - * - * offset[0] += child.itemsize - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); - } - - /* "numpy.pxd":814 - * offset[0] += 1 - * - * offset[0] += child.itemsize # <<<<<<<<<<<<<< - * - * if not PyDataType_HASFIELDS(child): - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); - - /* "numpy.pxd":816 - * offset[0] += child.itemsize - * - * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< - * t = child.type_num - * if end - f < 5: - */ - __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); - if (__pyx_t_6) { - - /* "numpy.pxd":817 - * - * if not PyDataType_HASFIELDS(child): - * t = child.type_num # <<<<<<<<<<<<<< - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") - */ - __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_v_t); - __pyx_v_t = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":818 - * if not PyDataType_HASFIELDS(child): - * t = child.type_num - * if end - f < 5: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short.") - * - */ - __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); - if (__pyx_t_6) { - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L10; - } - __pyx_L10:; - - /* "numpy.pxd":822 - * - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - */ - __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 98; - goto __pyx_L11; - } - - /* "numpy.pxd":823 - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 66; - goto __pyx_L11; - } - - /* "numpy.pxd":824 - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - */ - __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 104; - goto __pyx_L11; - } - - /* "numpy.pxd":825 - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - */ - __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 72; - goto __pyx_L11; - } - - /* "numpy.pxd":826 - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - */ - __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 105; - goto __pyx_L11; - } - - /* "numpy.pxd":827 - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 73; - goto __pyx_L11; - } - - /* "numpy.pxd":828 - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 108; - goto __pyx_L11; - } - - /* "numpy.pxd":829 - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 76; - goto __pyx_L11; - } - - /* "numpy.pxd":830 - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 113; - goto __pyx_L11; - } - - /* "numpy.pxd":831 - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 81; - goto __pyx_L11; - } - - /* "numpy.pxd":832 - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - */ - __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 102; - goto __pyx_L11; - } - - /* "numpy.pxd":833 - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - */ - __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 100; - goto __pyx_L11; - } - - /* "numpy.pxd":834 - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 103; - goto __pyx_L11; - } - - /* "numpy.pxd":835 - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - */ - __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 102; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":836 - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" - */ - __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 100; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":837 - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - */ - __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 103; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":838 - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 79; - goto __pyx_L11; - } - /*else*/ { - - /* "numpy.pxd":840 - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * f += 1 - * else: - */ - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L11:; - - /* "numpy.pxd":841 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * f += 1 # <<<<<<<<<<<<<< - * else: - * # Cython ignores struct boundary information ("T{...}"), - */ - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":845 - * # Cython ignores struct boundary information ("T{...}"), - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< - * return f - * - */ - __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_11; - } - __pyx_L9:; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "numpy.pxd":846 - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) - * return f # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_f; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_child); - __Pyx_XDECREF(__pyx_v_fields); - __Pyx_XDECREF(__pyx_v_childname); - __Pyx_XDECREF(__pyx_v_new_offset); - __Pyx_XDECREF(__pyx_v_t); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":961 - * - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * cdef PyObject* baseptr - * if base is None: - */ - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - PyObject *__pyx_v_baseptr; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("set_array_base"); - - /* "numpy.pxd":963 - * cdef inline void set_array_base(ndarray arr, object base): - * cdef PyObject* baseptr - * if base is None: # <<<<<<<<<<<<<< - * baseptr = NULL - * else: - */ - __pyx_t_1 = (__pyx_v_base == Py_None); - if (__pyx_t_1) { - - /* "numpy.pxd":964 - * cdef PyObject* baseptr - * if base is None: - * baseptr = NULL # <<<<<<<<<<<<<< - * else: - * Py_INCREF(base) # important to do this before decref below! - */ - __pyx_v_baseptr = NULL; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":966 - * baseptr = NULL - * else: - * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< - * baseptr = base - * Py_XDECREF(arr.base) - */ - Py_INCREF(__pyx_v_base); - - /* "numpy.pxd":967 - * else: - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base # <<<<<<<<<<<<<< - * Py_XDECREF(arr.base) - * arr.base = baseptr - */ - __pyx_v_baseptr = ((PyObject *)__pyx_v_base); - } - __pyx_L3:; - - /* "numpy.pxd":968 - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base - * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< - * arr.base = baseptr - * - */ - Py_XDECREF(__pyx_v_arr->base); - - /* "numpy.pxd":969 - * baseptr = base - * Py_XDECREF(arr.base) - * arr.base = baseptr # <<<<<<<<<<<<<< - * - * cdef inline object get_array_base(ndarray arr): - */ - __pyx_v_arr->base = __pyx_v_baseptr; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base"); - - /* "numpy.pxd":972 - * - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: # <<<<<<<<<<<<<< - * return None - * else: - */ - __pyx_t_1 = (__pyx_v_arr->base == NULL); - if (__pyx_t_1) { - - /* "numpy.pxd":973 - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: - * return None # <<<<<<<<<<<<<< - * else: - * return arr.base - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_None); - __pyx_r = Py_None; - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":975 - * return None - * else: - * return arr.base # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); - __pyx_r = ((PyObject *)__pyx_v_arr->base); - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("spectral"), - __Pyx_DOCSTR(__pyx_k_15), /* m_doc */ - -1, /* m_size */ - __pyx_methods /* m_methods */, - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, - {&__pyx_kp_u_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 1, 0, 0}, - {&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0}, - {&__pyx_n_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 1}, - {&__pyx_kp_u_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 1, 0, 0}, - {&__pyx_kp_u_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 1, 0, 0}, - {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, - {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, - {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, - {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, - {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, - {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, - {&__pyx_n_s____all__, __pyx_k____all__, sizeof(__pyx_k____all__), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, - {&__pyx_n_s__empty, __pyx_k__empty, sizeof(__pyx_k__empty), 0, 0, 1, 1}, - {&__pyx_n_s__float64, __pyx_k__float64, sizeof(__pyx_k__float64), 0, 0, 1, 1}, - {&__pyx_n_s__freqs, __pyx_k__freqs, sizeof(__pyx_k__freqs), 0, 0, 1, 1}, - {&__pyx_n_s__lombscargle, __pyx_k__lombscargle, sizeof(__pyx_k__lombscargle), 0, 0, 1, 1}, - {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, - {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, - {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - /* "scipy/signal/spectral.pyx":125 - * # Check input sizes - * if x.shape[0] != y.shape[0]: - * raise ValueError("Input arrays do not have the same size.") # <<<<<<<<<<<<<< - * - * # Create empty array for output periodogram - */ - __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_2)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); - PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); - PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); - PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_8)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); - PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_11)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_10)); - PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_10)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_10)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_12)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); - PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_14)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); - PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initspectral(void); /*proto*/ -PyMODINIT_FUNC initspectral(void) -#else -PyMODINIT_FUNC PyInit_spectral(void); /*proto*/ -PyMODINIT_FUNC PyInit_spectral(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_spectral(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("spectral"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_15), 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - /*--- Initialize various global constants etc. ---*/ - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__signal__spectral) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - /*--- Builtin init code ---*/ - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Constants init code ---*/ - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Global init code ---*/ - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - /*--- Type init code ---*/ - /*--- Type import code ---*/ - __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Variable import code ---*/ - /*--- Function import code ---*/ - /*--- Execution code ---*/ - - /* "scipy/signal/spectral.pyx":6 - * """Tools for spectral analysis of unequally sampled signals.""" - * - * import numpy as np # <<<<<<<<<<<<<< - * cimport numpy as np - * cimport cython - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/signal/spectral.pyx":10 - * cimport cython - * - * __all__ = ['lombscargle'] # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__lombscargle)); - PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__lombscargle)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__lombscargle)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____all__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - - /* "scipy/signal/spectral.pyx":19 - * - * @cython.boundscheck(False) - * def lombscargle(np.ndarray[np.float64_t, ndim=1] x, # <<<<<<<<<<<<<< - * np.ndarray[np.float64_t, ndim=1] y, - * np.ndarray[np.float64_t, ndim=1] freqs): - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_6signal_8spectral_lombscargle, NULL, __pyx_n_s_16); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__lombscargle, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/signal/spectral.pyx":1 - * # Author: Pim Schellart # <<<<<<<<<<<<<< - * # 2010 - 2011 - * - */ - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_17), ((PyObject *)__pyx_kp_u_18)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - - /* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.signal.spectral", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.signal.spectral"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - -/* Runtime support code */ - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - /* unexpected keyword found */ - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact) -{ - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (none_allowed && obj == Py_None) return 1; - else if (exact) { - if (Py_TYPE(obj) == type) return 1; - } - else { - if (PyObject_TypeCheck(obj, type)) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { - unsigned int n = 1; - return *(unsigned char*)(&n) != 0; -} - -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; -} __Pyx_BufFmt_Context; - -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} - -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t < '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} - -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} - -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case 'b': return "'char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 0: return "end"; - default: return "unparseable format string"; - } -} - -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif - -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I'; - case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; - case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); - case 'O': return 'O'; - case 'P': return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} - -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset; - if (ctx->enc_type == 0) return 0; - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - } - - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - /* special case -- treat as struct rather than complex number */ - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - - ctx->fmt_offset += size; - - --ctx->enc_count; /* Consume from buffer string */ - - /* Done checking, move to next field, pushing or popping struct stack if needed */ - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; /* breaks both loops as ctx->enc_count == 0 */ - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; /* empty struct */ - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} - -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case 10: - case 13: - ++ts; - break; - case '<': - if (!__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': /* substruct */ - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - } - break; - case '}': /* end of substruct; either repeat or move on */ - ++ts; - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } /* fall through */ - case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': - if (ctx->enc_type == *ts && got_Z == ctx->is_complex && - ctx->enc_packmode == ctx->new_packmode) { - /* Continue pooling same type */ - ctx->enc_count += ctx->new_count; - } else { - /* New type */ - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - } - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - default: - { - int number = __Pyx_BufFmt_ParseNumber(&ts); - if (number == -1) { /* First char was not a digit */ - PyErr_Format(PyExc_ValueError, - "Does not understand character buffer dtype format string ('%c')", *ts); - return NULL; - } - ctx->new_count = (size_t)number; - } - } - } -} - -static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { - buf->buf = NULL; - buf->obj = NULL; - buf->strides = __Pyx_zeros; - buf->shape = __Pyx_zeros; - buf->suboffsets = __Pyx_minusones; -} - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { - if (obj == Py_None || obj == NULL) { - __Pyx_ZeroBuffer(buf); - return 0; - } - buf->buf = NULL; - if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; - if (buf->ndim != nd) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - nd, buf->ndim); - goto fail; - } - if (!cast) { - __Pyx_BufFmt_Context ctx; - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; - } - if ((unsigned)buf->itemsize != dtype->size) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)", - buf->itemsize, (buf->itemsize > 1) ? "s" : "", - dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; - return 0; -fail:; - __Pyx_ZeroBuffer(buf); - return -1; -} - -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { - if (info->buf == NULL) return; - if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; - __Pyx_ReleaseBuffer(info); -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - /* cause is unused */ - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - /* First, check the traceback argument, replacing None with NULL. */ - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - /* Next, replace a missing value with None */ - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - /* Raising an instance. The value should be a dummy. */ - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - /* Normalize to raise , */ - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else /* Python 3+ */ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", - index, (index == 1) ? "" : "s"); -} - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); -} - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(PyObject_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags); - else { - PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; - } -} - -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject* obj = view->obj; - if (obj) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;} - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view); - Py_DECREF(obj); - view->obj = NULL; - } -} - -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { - const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(Py_intptr_t) == sizeof(char)) || - (sizeof(Py_intptr_t) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(Py_intptr_t) == sizeof(int)) || - (sizeof(Py_intptr_t) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), - little, !is_unsigned); - } -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(a, a); - case 3: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, a); - case 4: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_absf(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(a, a); - case 3: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, a); - case 4: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_abs(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, - size_t size, int strict) -{ - PyObject *py_module = 0; - PyObject *result = 0; - PyObject *py_name = 0; - char warning[200]; - - py_module = __Pyx_ImportModule(module_name); - if (!py_module) - goto bad; - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(class_name); - #else - py_name = PyUnicode_FromString(class_name); - #endif - if (!py_name) - goto bad; - result = PyObject_GetAttr(py_module, py_name); - Py_DECREF(py_name); - py_name = 0; - Py_DECREF(py_module); - py_module = 0; - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility", - module_name, class_name); - #if PY_VERSION_HEX < 0x02050000 - if (PyErr_Warn(NULL, warning) < 0) goto bad; - #else - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - #endif - } - else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { - PyErr_Format(PyExc_ValueError, - "%s.%s has the wrong size, try recompiling", - module_name, class_name); - goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(py_module); - Py_XDECREF(result); - return NULL; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(name); - #else - py_name = PyUnicode_FromString(name); - #endif - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - #if PY_MAJOR_VERSION >= 3 - 0, /*int kwonlyargcount,*/ - #endif - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else /* Python 3+ has unicode identifiers */ - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -/* Type Conversion Functions */ - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif /* Py_PYTHON_H */ diff --git a/scipy-0.10.1/scipy/signal/splinemodule.c b/scipy-0.10.1/scipy/signal/splinemodule.c deleted file mode 100644 index 45384b3353..0000000000 --- a/scipy-0.10.1/scipy/signal/splinemodule.c +++ /dev/null @@ -1,526 +0,0 @@ -#include "Python.h" -#include "numpy/arrayobject.h" -#include - - -#define PYERR(message) do {PyErr_SetString(PyExc_ValueError, message); goto fail;} while(0) -#define DATA(arr) ((arr)->data) -#define DIMS(arr) ((arr)->dimensions) -#define STRIDES(arr) ((arr)->strides) -#define ELSIZE(arr) ((arr)->descr->elsize) -#define OBJECTTYPE(arr) ((arr)->descr->type_num) -#define BASEOBJ(arr) ((PyArrayObject *)((arr)->base)) -#define RANK(arr) ((arr)->nd) -#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS) - -static void convert_strides(npy_intp*,npy_intp*,int,int); - -extern int S_cubic_spline2D(float*,float*,int,int,double,npy_intp*,npy_intp*,float); -extern int S_quadratic_spline2D(float*,float*,int,int,double,npy_intp*,npy_intp*,float); -extern int S_IIR_forback1(float,float,float*,float*,int,int,int,float); -extern int S_IIR_forback2(double,double,float*,float*,int,int,int,float); -extern int S_separable_2Dconvolve_mirror(float*,float*,int,int,float*,float*,int,int,npy_intp*,npy_intp*); - -extern int D_cubic_spline2D(double*,double*,int,int,double,npy_intp*,npy_intp*,double); -extern int D_quadratic_spline2D(double*,double*,int,int,double,npy_intp*,npy_intp*,double); -extern int D_IIR_forback1(double,double,double*,double*,int,int,int,double); -extern int D_IIR_forback2(double,double,double*,double*,int,int,int,double); -extern int D_separable_2Dconvolve_mirror(double*,double*,int,int,double*,double*,int,int,npy_intp*,npy_intp*); - -#ifdef __GNUC__ -extern int C_IIR_forback1(__complex__ float,__complex__ float,__complex__ float*,__complex__ float*,int,int,int,float); -extern int C_separable_2Dconvolve_mirror(__complex__ float*,__complex__ float*,int,int,__complex__ float*,__complex__ float*,int,int,npy_intp*,npy_intp*); -extern int Z_IIR_forback1(__complex__ double,__complex__ double,__complex__ double*,__complex__ double*,int,int,int,double); -extern int Z_separable_2Dconvolve_mirror(__complex__ double*,__complex__ double*,int,int,__complex__ double*,__complex__ double*,int,int,npy_intp*,npy_intp*); -#endif - -static void -convert_strides(npy_intp* instrides,npy_intp* convstrides,int size,int N) -{ - int n; npy_intp bitshift; - - bitshift = -1; - - while (size != 0) { - size >>= 1; - bitshift++; - } - for (n = 0; n < N; n++) { - convstrides[n] = instrides[n] >> bitshift; - } -} - - -static char doc_cspline2d[] = "cspline2d(input {, lambda, precision}) -> ck\n" -"\n" -" Description:\n" -"\n" -" Return the third-order B-spline coefficients over a regularly spacedi\n" -" input grid for the two-dimensional input image. The lambda argument\n" -" specifies the amount of smoothing. The precision argument allows specifying\n" -" the precision used when computing the infinite sum needed to apply mirror-\n" -" symmetric boundary conditions.\n"; - - -static PyObject *cspline2d(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *image=NULL; - PyArrayObject *a_image=NULL, *ck=NULL; - double lambda = 0.0; - double precision = -1.0; - int thetype, M, N, retval=0; - npy_intp outstrides[2], instrides[2]; - - if (!PyArg_ParseTuple(args, "O|dd", &image, &lambda, &precision)) return NULL; - - thetype = PyArray_ObjectType(image, PyArray_FLOAT); - thetype = NPY_MIN(thetype, PyArray_DOUBLE); - a_image = (PyArrayObject *)PyArray_FromObject(image, thetype, 2, 2); - if (a_image == NULL) goto fail; - - ck = (PyArrayObject *)PyArray_SimpleNew(2,DIMS(a_image),thetype); - if (ck == NULL) goto fail; - M = DIMS(a_image)[0]; - N = DIMS(a_image)[1]; - - convert_strides(STRIDES(a_image), instrides, ELSIZE(a_image), 2); - outstrides[0] = N; - outstrides[1] = 1; - - if (thetype == PyArray_FLOAT) { - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-3; - retval = S_cubic_spline2D((float *)DATA(a_image), (float *)DATA(ck), M, N, lambda, instrides, outstrides, precision); - } - else if (thetype == PyArray_DOUBLE) { - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-6; - retval = D_cubic_spline2D((double *)DATA(a_image), (double *)DATA(ck), M, N, lambda, instrides, outstrides, precision); - } - - if (retval == -3) PYERR("Precision too high. Error did not converge."); - if (retval < 0) PYERR("Problem occurred inside routine"); - - Py_DECREF(a_image); - return PyArray_Return(ck); - - fail: - Py_XDECREF(a_image); - Py_XDECREF(ck); - return NULL; - -} - -static char doc_qspline2d[] = "qspline2d(input {, lambda, precision}) -> qk\n" -"\n" -" Description:\n" -"\n" -" Return the second-order B-spline coefficients over a regularly spaced\n" -" input grid for the two-dimensional input image. The lambda argument\n" -" specifies the amount of smoothing. The precision argument allows specifying\n" -" the precision used when computing the infinite sum needed to apply mirror-\n" -" symmetric boundary conditions.\n"; - -static PyObject *qspline2d(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *image=NULL; - PyArrayObject *a_image=NULL, *ck=NULL; - double lambda = 0.0; - double precision = -1.0; - int thetype, M, N, retval=0; - npy_intp outstrides[2], instrides[2]; - - if (!PyArg_ParseTuple(args, "O|dd", &image, &lambda, &precision)) return NULL; - - if (lambda != 0.0) PYERR("Smoothing spline not yet implemented."); - - thetype = PyArray_ObjectType(image, PyArray_FLOAT); - thetype = NPY_MIN(thetype, PyArray_DOUBLE); - a_image = (PyArrayObject *)PyArray_FromObject(image, thetype, 2, 2); - if (a_image == NULL) goto fail; - - ck = (PyArrayObject *)PyArray_SimpleNew(2,DIMS(a_image),thetype); - if (ck == NULL) goto fail; - M = DIMS(a_image)[0]; - N = DIMS(a_image)[1]; - - convert_strides(STRIDES(a_image), instrides, ELSIZE(a_image), 2); - outstrides[0] = N; - outstrides[1] = 1; - - if (thetype == PyArray_FLOAT) { - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-3; - retval = S_quadratic_spline2D((float *)DATA(a_image), (float *)DATA(ck), M, N, lambda, instrides, outstrides, precision); - } - else if (thetype == PyArray_DOUBLE) { - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-6; - retval = D_quadratic_spline2D((double *)DATA(a_image), (double *)DATA(ck), M, N, lambda, instrides, outstrides, precision); - } - - if (retval == -3) PYERR("Precision too high. Error did not converge."); - if (retval < 0) PYERR("Problem occurred inside routine"); - - Py_DECREF(a_image); - return PyArray_Return(ck); - - fail: - Py_XDECREF(a_image); - Py_XDECREF(ck); - return NULL; - -} - -static char doc_FIRsepsym2d[] = " sepfir2d(input, hrow, hcol) -> output\n" -"\n" -" Description:\n" -"\n" -" Convolve the rank-2 input array with the separable filter defined by the\n" -" rank-1 arrays hrow, and hcol. Mirror symmetric boundary conditions are\n" -" assumed. This function can be used to find an image given its B-spline\n" -" representation."; - -static PyObject *FIRsepsym2d(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *image=NULL, *hrow=NULL, *hcol=NULL; - PyArrayObject *a_image=NULL, *a_hrow=NULL, *a_hcol=NULL, *out=NULL; - int thetype, M, N, ret; - npy_intp outstrides[2], instrides[2]; - - if (!PyArg_ParseTuple(args, "OOO", &image, &hrow, &hcol)) return NULL; - - thetype = PyArray_ObjectType(image, PyArray_FLOAT); - thetype = NPY_MIN(thetype, PyArray_CDOUBLE); - a_image = (PyArrayObject *)PyArray_FromObject(image, thetype, 2, 2); - a_hrow = (PyArrayObject *)PyArray_ContiguousFromObject(hrow, thetype, 1, 1); - a_hcol = (PyArrayObject *)PyArray_ContiguousFromObject(hcol, thetype, 1, 1); - - if ((a_image == NULL) || (a_hrow == NULL) || (a_hcol==NULL)) goto fail; - - out = (PyArrayObject *)PyArray_SimpleNew(2,DIMS(a_image),thetype); - if (out == NULL) goto fail; - M = DIMS(a_image)[0]; - N = DIMS(a_image)[1]; - - convert_strides(STRIDES(a_image), instrides, ELSIZE(a_image), 2); - outstrides[0] = N; - outstrides[1] = 1; - - switch (thetype) { - case PyArray_FLOAT: - ret = S_separable_2Dconvolve_mirror((float *)DATA(a_image), - (float *)DATA(out), M, N, - (float *)DATA(a_hrow), - (float *)DATA(a_hcol), - DIMS(a_hrow)[0], DIMS(a_hcol)[0], - instrides, outstrides); - break; - case PyArray_DOUBLE: - ret = D_separable_2Dconvolve_mirror((double *)DATA(a_image), - (double *)DATA(out), M, N, - (double *)DATA(a_hrow), - (double *)DATA(a_hcol), - DIMS(a_hrow)[0], DIMS(a_hcol)[0], - instrides, outstrides); - break; -#ifdef __GNUC__ - case PyArray_CFLOAT: - ret = C_separable_2Dconvolve_mirror((__complex__ float *)DATA(a_image), - (__complex__ float *)DATA(out), M, N, - (__complex__ float *)DATA(a_hrow), - (__complex__ float *)DATA(a_hcol), - DIMS(a_hrow)[0], DIMS(a_hcol)[0], - instrides, outstrides); - break; - case PyArray_CDOUBLE: - ret = Z_separable_2Dconvolve_mirror((__complex__ double *)DATA(a_image), - (__complex__ double *)DATA(out), M, N, - (__complex__ double *)DATA(a_hrow), - (__complex__ double *)DATA(a_hcol), - DIMS(a_hrow)[0], DIMS(a_hcol)[0], - instrides, outstrides); - break; -#endif - default: - PYERR("Incorrect type."); - } - - if (ret < 0) PYERR("Problem occurred inside routine."); - - Py_DECREF(a_image); - Py_DECREF(a_hrow); - Py_DECREF(a_hcol); - return PyArray_Return(out); - - fail: - Py_XDECREF(a_image); - Py_XDECREF(a_hrow); - Py_XDECREF(a_hcol); - Py_XDECREF(out); - return NULL; - -} - -static char doc_IIRsymorder1[] = " symiirorder1(input, c0, z1 {, precision}) -> output\n" -"\n" -" Description:\n" -"\n" -" Implement a smoothing IIR filter with mirror-symmetric boundary conditions\n" -" using a cascade of first-order sections. The second section uses a\n" -" reversed sequence. This implements a system with the following\n" -" transfer function and mirror-symmetric boundary conditions.\n" -"\n" -" c0 \n" -" H(z) = --------------------- \n" -" (1-z1/z) (1 - z1 z) \n" -"\n" -" The resulting signal will have mirror symmetric boundary conditions as well.\n" -"\n" -" Inputs:\n" -"\n" -" input -- the input signal.\n" -" c0, z1 -- parameters in the transfer function.\n" -" precision -- specifies the precision for calculating initial conditions\n" -" of the recursive filter based on mirror-symmetric input.\n" -"\n" -" Output:\n" -"\n" -" output -- filtered signal."; - -static PyObject *IIRsymorder1(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *sig=NULL; - PyArrayObject *a_sig=NULL, *out=NULL; - Py_complex c0, z1; - double precision = -1.0; - int thetype, N, ret; - npy_intp outstrides, instrides; - - if (!PyArg_ParseTuple(args, "ODD|d", &sig, &c0, &z1, &precision)) - return NULL; - - thetype = PyArray_ObjectType(sig, PyArray_FLOAT); - thetype = NPY_MIN(thetype, PyArray_CDOUBLE); - a_sig = (PyArrayObject *)PyArray_FromObject(sig, thetype, 1, 1); - - if ((a_sig == NULL)) goto fail; - - out = (PyArrayObject *)PyArray_SimpleNew(1,DIMS(a_sig),thetype); - if (out == NULL) goto fail; - N = DIMS(a_sig)[0]; - - convert_strides(STRIDES(a_sig), &instrides, ELSIZE(a_sig), 1); - outstrides = 1; - - switch (thetype) { - case PyArray_FLOAT: - { - float rc0 = c0.real; - float rz1 = z1.real; - - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-6; - ret = S_IIR_forback1 (rc0, rz1, (float *)DATA(a_sig), - (float *)DATA(out), N, - instrides, outstrides, (float )precision); - } - break; - case PyArray_DOUBLE: - { - double rc0 = c0.real; - double rz1 = z1.real; - - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-11; - ret = D_IIR_forback1 (rc0, rz1, (double *)DATA(a_sig), - (double *)DATA(out), N, - instrides, outstrides, precision); - } - break; -#ifdef __GNUC__ - case PyArray_CFLOAT: - { - __complex__ float zc0 = c0.real + 1.0i*c0.imag; - __complex__ float zz1 = z1.real + 1.0i*z1.imag; - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-6; - ret = C_IIR_forback1 (zc0, zz1, (__complex__ float *)DATA(a_sig), - (__complex__ float *)DATA(out), N, - instrides, outstrides, (float )precision); - } - break; - case PyArray_CDOUBLE: - { - __complex__ double zc0 = c0.real + 1.0i*c0.imag; - __complex__ double zz1 = z1.real + 1.0i*z1.imag; - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-11; - ret = Z_IIR_forback1 (zc0, zz1, (__complex__ double *)DATA(a_sig), - (__complex__ double *)DATA(out), N, - instrides, outstrides, precision); - } - break; -#endif - default: - PYERR("Incorrect type."); - } - - if (ret == 0) { - Py_DECREF(a_sig); - return PyArray_Return(out); - } - - if (ret == -1) PYERR("Could not allocate enough memory."); - if (ret == -2) PYERR("|z1| must be less than 1.0"); - if (ret == -3) PYERR("Sum to find symmetric boundary conditions did not converge."); - - PYERR("Unknown error."); - - - fail: - Py_XDECREF(a_sig); - Py_XDECREF(out); - return NULL; - -} - -static char doc_IIRsymorder2[] = " symiirorder2(input, r, omega {, precision}) -> output\n" -"\n" -" Description:\n" -"\n" -" Implement a smoothing IIR filter with mirror-symmetric boundary conditions\n" -" using a cascade of second-order sections. The second section uses a\n" -" reversed sequence. This implements the following transfer function:\n" -"\n" -" cs^2\n" -" H(z) = ---------------------------------------\n" -" (1 - a2/z - a3/z^2) (1 - a2 z - a3 z^2 )\n" -"\n" -" where a2 = (2 r cos omega)\n" -" a3 = - r^2\n" -" cs = 1 - 2 r cos omega + r^2\n" -"\n" -" Inputs:\n" -"\n" -" input -- the input signal.\n" -" r, omega -- parameters in the transfer function.\n" -" precision -- specifies the precision for calculating initial conditions\n" -" of the recursive filter based on mirror-symmetric input.\n" -"\n" -" Output:\n" -"\n" -" output -- filtered signal.\n"; - -static PyObject *IIRsymorder2(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *sig=NULL; - PyArrayObject *a_sig=NULL, *out=NULL; - double r, omega; - double precision = -1.0; - int thetype, N, ret; - npy_intp outstrides, instrides; - - if (!PyArg_ParseTuple(args, "Odd|d", &sig, &r, &omega, &precision)) - return NULL; - - thetype = PyArray_ObjectType(sig, PyArray_FLOAT); - thetype = NPY_MIN(thetype, PyArray_DOUBLE); - a_sig = (PyArrayObject *)PyArray_FromObject(sig, thetype, 1, 1); - - if ((a_sig == NULL)) goto fail; - - out = (PyArrayObject *)PyArray_SimpleNew(1,DIMS(a_sig),thetype); - if (out == NULL) goto fail; - N = DIMS(a_sig)[0]; - - convert_strides(STRIDES(a_sig), &instrides, ELSIZE(a_sig), 1); - outstrides = 1; - - switch (thetype) { - case PyArray_FLOAT: - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-6; - ret = S_IIR_forback2 (r, omega, (float *)DATA(a_sig), - (float *)DATA(out), N, - instrides, outstrides, precision); - break; - case PyArray_DOUBLE: - if ((precision <= 0.0) || (precision > 1.0)) precision = 1e-11; - ret = D_IIR_forback2 (r, omega, (double *)DATA(a_sig), - (double *)DATA(out), N, - instrides, outstrides, precision); - break; - default: - PYERR("Incorrect type."); - } - - if (ret < 0) PYERR("Problem occurred inside routine."); - - Py_DECREF(a_sig); - return PyArray_Return(out); - - fail: - Py_XDECREF(a_sig); - Py_XDECREF(out); - return NULL; - -} - - -static struct PyMethodDef toolbox_module_methods[] = { - {"cspline2d", cspline2d, METH_VARARGS, doc_cspline2d}, - {"qspline2d", qspline2d, METH_VARARGS, doc_qspline2d}, - {"sepfir2d", FIRsepsym2d, METH_VARARGS, doc_FIRsepsym2d}, - {"symiirorder1", IIRsymorder1, METH_VARARGS, doc_IIRsymorder1}, - {"symiirorder2", IIRsymorder2, METH_VARARGS, doc_IIRsymorder2}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -/* Initialization function for the module (*must* be called initXXXXX) */ -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "spline", - NULL, - -1, - toolbox_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit_spline(void) -{ - PyObject *m, *d, *s; - - m = PyModule_Create(&moduledef); - import_array(); - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - - s = PyUnicode_FromString("0.2"); - PyDict_SetItemString(d, "__version__", s); - Py_DECREF(s); - - /* Check for errors */ - if (PyErr_Occurred()) { - Py_FatalError("can't initialize module array"); - } - return m; -} -#else -PyMODINIT_FUNC initspline(void) { - PyObject *m, *d, *s; - - /* Create the module and add the functions */ - m = Py_InitModule("spline", toolbox_module_methods); - - /* Import the C API function pointers for the Array Object*/ - import_array(); - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - - s = PyString_FromString("0.2"); - PyDict_SetItemString(d, "__version__", s); - Py_DECREF(s); - - /* Check for errors */ - if (PyErr_Occurred()) - Py_FatalError("can't initialize module array"); -} -#endif diff --git a/scipy-0.10.1/scipy/signal/tests/test_array_tools.py b/scipy-0.10.1/scipy/signal/tests/test_array_tools.py deleted file mode 100644 index 93c45db837..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_array_tools.py +++ /dev/null @@ -1,99 +0,0 @@ - -import numpy as np - -from numpy.testing import TestCase, run_module_suite, \ - assert_array_equal, assert_raises - -from scipy.signal._arraytools import axis_slice, axis_reverse, \ - odd_ext, even_ext, const_ext - - -class TestArrayTools(TestCase): - - def test_axis_slice(self): - a = np.arange(12).reshape(3, 4) - - s = axis_slice(a, start=0, stop=1, axis=0) - assert_array_equal(s, a[0:1, :]) - - s = axis_slice(a, start=-1, axis=0) - assert_array_equal(s, a[-1:, :]) - - s = axis_slice(a, start=0, stop=1, axis=1) - assert_array_equal(s, a[:, 0:1]) - - s = axis_slice(a, start=-1, axis=1) - assert_array_equal(s, a[:, -1:]) - - s = axis_slice(a, start=0, step=2, axis=0) - assert_array_equal(s, a[::2, :]) - - s = axis_slice(a, start=0, step=2, axis=1) - assert_array_equal(s, a[:, ::2]) - - def test_axis_reverse(self): - a = np.arange(12).reshape(3, 4) - - r = axis_reverse(a, axis=0) - assert_array_equal(r, a[::-1, :]) - - r = axis_reverse(a, axis=1) - assert_array_equal(r, a[:, ::-1]) - - def test_odd_ext(self): - a = np.array([[1, 2, 3, 4, 5], - [9, 8, 7, 6, 5]]) - - odd = odd_ext(a, 2, axis=1) - expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], - [11, 10, 9, 8, 7, 6, 5, 4, 3]]) - assert_array_equal(odd, expected) - - odd = odd_ext(a, 1, axis=0) - expected = np.array([[-7, -4, -1, 2, 5], - [ 1, 2, 3, 4, 5], - [ 9, 8, 7, 6, 5], - [17, 14, 11, 8, 5]]) - assert_array_equal(odd, expected) - - assert_raises(ValueError, odd_ext, a, 2, axis=0) - assert_raises(ValueError, odd_ext, a, 5, axis=1) - - def test_even_ext(self): - a = np.array([[1, 2, 3, 4, 5], - [9, 8, 7, 6, 5]]) - - even = even_ext(a, 2, axis=1) - expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3], - [7, 8, 9, 8, 7, 6, 5, 6, 7]]) - assert_array_equal(even, expected) - - even = even_ext(a, 1, axis=0) - expected = np.array([[ 9, 8, 7, 6, 5], - [ 1, 2, 3, 4, 5], - [ 9, 8, 7, 6, 5], - [ 1, 2, 3, 4, 5]]) - assert_array_equal(even, expected) - - assert_raises(ValueError, even_ext, a, 2, axis=0) - assert_raises(ValueError, even_ext, a, 5, axis=1) - - def test_const_ext(self): - a = np.array([[1, 2, 3, 4, 5], - [9, 8, 7, 6, 5]]) - - const = const_ext(a, 2, axis=1) - expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5], - [9, 9, 9, 8, 7, 6, 5, 5, 5]]) - assert_array_equal(const, expected) - - const = const_ext(a, 1, axis=0) - expected = np.array([[ 1, 2, 3, 4, 5], - [ 1, 2, 3, 4, 5], - [ 9, 8, 7, 6, 5], - [ 9, 8, 7, 6, 5]]) - assert_array_equal(const, expected) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_cont2discrete.py b/scipy-0.10.1/scipy/signal/tests/test_cont2discrete.py deleted file mode 100644 index 13429077d0..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_cont2discrete.py +++ /dev/null @@ -1,272 +0,0 @@ -import numpy as np -from numpy.testing import TestCase, run_module_suite, \ - assert_array_almost_equal, assert_almost_equal, \ - assert_allclose - -from scipy.signal import cont2discrete as c2d -from scipy.signal import dlsim, ss2tf, ss2zpk, lsim2 - -# Author: Jeffrey Armstrong -# March 29, 2011 - - -class TestC2D(TestCase): - def test_zoh(self): - ac = np.eye(2) - bc = 0.5 * np.ones((2, 1)) - cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) - dc = np.array([[0.0], [0.0], [-0.33]]) - - ad_truth = 1.648721270700128 * np.eye(2) - bd_truth = 0.324360635350064 * np.ones((2, 1)) - # c and d in discrete should be equal to their continuous counterparts - dt_requested = 0.5 - - ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh') - - assert_array_almost_equal(ad_truth, ad) - assert_array_almost_equal(bd_truth, bd) - assert_array_almost_equal(cc, cd) - assert_array_almost_equal(dc, dd) - assert_almost_equal(dt_requested, dt) - - def test_gbt(self): - ac = np.eye(2) - bc = 0.5 * np.ones((2, 1)) - cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) - dc = np.array([[0.0], [0.0], [-0.33]]) - - dt_requested = 0.5 - alpha = 1.0 / 3.0 - - ad_truth = 1.6 * np.eye(2) - bd_truth = 0.3 * np.ones((2, 1)) - cd_truth = np.array([[0.9, 1.2], - [1.2, 1.2], - [1.2, 0.3]]) - dd_truth = np.array([[0.175], - [0.2], - [-0.205]]) - - ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, - method='gbt', alpha=alpha) - - assert_array_almost_equal(ad_truth, ad) - assert_array_almost_equal(bd_truth, bd) - assert_array_almost_equal(cd_truth, cd) - assert_array_almost_equal(dd_truth, dd) - - def test_euler(self): - ac = np.eye(2) - bc = 0.5 * np.ones((2, 1)) - cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) - dc = np.array([[0.0], [0.0], [-0.33]]) - - dt_requested = 0.5 - - ad_truth = 1.5 * np.eye(2) - bd_truth = 0.25 * np.ones((2, 1)) - cd_truth = np.array([[0.75, 1.0], - [1.0, 1.0], - [1.0, 0.25]]) - dd_truth = dc - - ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, - method='euler') - - assert_array_almost_equal(ad_truth, ad) - assert_array_almost_equal(bd_truth, bd) - assert_array_almost_equal(cd_truth, cd) - assert_array_almost_equal(dd_truth, dd) - assert_almost_equal(dt_requested, dt) - - def test_backward_diff(self): - ac = np.eye(2) - bc = 0.5 * np.ones((2, 1)) - cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) - dc = np.array([[0.0], [0.0], [-0.33]]) - - dt_requested = 0.5 - - ad_truth = 2.0 * np.eye(2) - bd_truth = 0.5 * np.ones((2, 1)) - cd_truth = np.array([[1.5, 2.0], - [2.0, 2.0], - [2.0, 0.5]]) - dd_truth = np.array([[0.875], - [1.0], - [0.295]]) - - ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, - method='backward_diff') - - assert_array_almost_equal(ad_truth, ad) - assert_array_almost_equal(bd_truth, bd) - assert_array_almost_equal(cd_truth, cd) - assert_array_almost_equal(dd_truth, dd) - - def test_bilinear(self): - ac = np.eye(2) - bc = 0.5 * np.ones((2, 1)) - cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) - dc = np.array([[0.0], [0.0], [-0.33]]) - - dt_requested = 0.5 - - ad_truth = (5.0 / 3.0) * np.eye(2) - bd_truth = (1.0 / 3.0) * np.ones((2, 1)) - cd_truth = np.array([[1.0, 4.0 / 3.0], - [4.0 / 3.0, 4.0 / 3.0], - [4.0 / 3.0, 1.0 / 3.0]]) - dd_truth = np.array([[0.291666666666667], - [1.0 / 3.0], - [-0.121666666666667]]) - - ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, - method='bilinear') - - assert_array_almost_equal(ad_truth, ad) - assert_array_almost_equal(bd_truth, bd) - assert_array_almost_equal(cd_truth, cd) - assert_array_almost_equal(dd_truth, dd) - assert_almost_equal(dt_requested, dt) - - # Same continuous system again, but change sampling rate - - ad_truth = 1.4 * np.eye(2) - bd_truth = 0.2 * np.ones((2, 1)) - cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]]) - dd_truth = np.array([[0.175], [0.2], [-0.205]]) - - dt_requested = 1.0 / 3.0 - - ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, - method='bilinear') - - assert_array_almost_equal(ad_truth, ad) - assert_array_almost_equal(bd_truth, bd) - assert_array_almost_equal(cd_truth, cd) - assert_array_almost_equal(dd_truth, dd) - assert_almost_equal(dt_requested, dt) - - def test_transferfunction(self): - numc = np.array([0.25, 0.25, 0.5]) - denc = np.array([0.75, 0.75, 1.0]) - - numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]]) - dend = np.array([1.0, -1.351394049721225, 0.606530659712634]) - - dt_requested = 0.5 - - num, den, dt = c2d((numc, denc), dt_requested, method='zoh') - - assert_array_almost_equal(numd, num) - assert_array_almost_equal(dend, den) - assert_almost_equal(dt_requested, dt) - - def test_zerospolesgain(self): - zeros_c = np.array([0.5, -0.5]) - poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) - k_c = 1.0 - - zeros_d = [1.23371727305860, 0.735356894461267] - polls_d = [0.938148335039729 + 0.346233593780536j, - 0.938148335039729 - 0.346233593780536j] - k_d = 1.0 - - dt_requested = 0.5 - - zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested, - method='zoh') - - assert_array_almost_equal(zeros_d, zeros) - assert_array_almost_equal(polls_d, poles) - assert_almost_equal(k_d, k) - assert_almost_equal(dt_requested, dt) - - def test_gbt_with_sio_tf_and_zpk(self): - """Test method='gbt' with alpha=0.25 for tf and zpk cases.""" - # State space coefficients for the continuous SIO system. - A = -1.0 - B = 1.0 - C = 1.0 - D = 0.5 - - # The continuous transfer function coefficients. - cnum, cden = ss2tf(A, B, C, D) - - # Continuous zpk representation - cz, cp, ck = ss2zpk(A, B, C, D) - - h = 1.0 - alpha = 0.25 - - # Explicit formulas, in the scalar case. - Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A) - Bd = h * B / (1 - alpha * h * A) - Cd = C / (1 - alpha * h * A) - Dd = D + alpha * C * Bd - - # Convert the explicit solution to tf - dnum, dden = ss2tf(Ad, Bd, Cd, Dd) - - # Compute the discrete tf using cont2discrete. - c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha) - - assert_allclose(dnum, c2dnum) - assert_allclose(dden, c2dden) - - # Convert explicit solution to zpk. - dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd) - - # Compute the discrete zpk using cont2discrete. - c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha) - - assert_allclose(dz, c2dz) - assert_allclose(dp, c2dp) - assert_allclose(dk, c2dk) - - def test_discrete_approx(self): - """ - Test that the solution to the discrete approximation of a continuous - system actually approximates the solution to the continuous sytem. - This is an indirect test of the correctness of the implementation - of cont2discrete. - """ - - def u(t): - return np.sin(2.5 * t) - - a = np.array([[-0.01]]) - b = np.array([[1.0]]) - c = np.array([[1.0]]) - d = np.array([[0.2]]) - x0 = 1.0 - - t = np.linspace(0, 10.0, 101) - dt = t[1] - t[0] - u1 = u(t) - - # Use lsim2 to compute the solution to the continuous system. - t, yout, xout = lsim2((a, b, c, d), T=t, U=u1, X0=x0, - rtol=1e-9, atol=1e-11) - - # Convert the continuous system to a discrete approximation. - dsys = c2d((a, b, c, d), dt, method='bilinear') - - # Use dlsim with the pairwise averaged input to compute the output - # of the discrete system. - u2 = 0.5 * (u1[:-1] + u1[1:]) - t2 = t[:-1] - td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0) - - # ymid is the average of consecutive terms of the "exact" output - # computed by lsim2. This is what the discrete approximation - # actually approximates. - ymid = 0.5 * (yout[:-1] + yout[1:]) - - assert_allclose(yd2.ravel(), ymid, rtol=1e-4) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_dltisys.py b/scipy-0.10.1/scipy/signal/tests/test_dltisys.py deleted file mode 100644 index 22c601b5c4..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_dltisys.py +++ /dev/null @@ -1,258 +0,0 @@ - -# Author: Jeffrey Armstrong -# April 4, 2011 - -import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_equal, \ - assert_array_almost_equal, assert_array_equal, \ - assert_allclose -from scipy.signal import dlsim, dstep, dimpulse, tf2zpk - - -class TestDLTI(TestCase): - - def test_dlsim(self): - - a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) - b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) - c = np.asarray([[0.1, 0.3]]) - d = np.asarray([[0.0, -0.1, 0.0]]) - dt = 0.5 - - # Create an input matrix with inputs down the columns (3 cols) and its - # respective time input vector - u = np.hstack((np.asmatrix(np.linspace(0, 4.0, num=5)).transpose(), - 0.01 * np.ones((5, 1)), - -0.002 * np.ones((5, 1)))) - t_in = np.linspace(0, 2.0, num=5) - - # Define the known result - yout_truth = np.asmatrix([-0.001, - -0.00073, - 0.039446, - 0.0915387, - 0.13195948]).transpose() - xout_truth = np.asarray([[0, 0], - [0.0012, 0.0005], - [0.40233, 0.00071], - [1.163368, -0.079327], - [2.2402985, -0.3035679]]) - - tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in) - - assert_array_almost_equal(yout_truth, yout) - assert_array_almost_equal(xout_truth, xout) - assert_array_almost_equal(t_in, tout) - - # Interpolated control - inputs should have different time steps - # than the discrete model uses internally - u_sparse = u[[0, 4], :] - t_sparse = np.asarray([0.0, 2.0]) - - tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse) - - assert_array_almost_equal(yout_truth, yout) - assert_array_almost_equal(xout_truth, xout) - assert_equal(len(tout), yout.shape[0]) - - # Transfer functions (assume dt = 0.5) - num = np.asarray([1.0, -0.1]) - den = np.asarray([0.3, 1.0, 0.2]) - yout_truth = np.asmatrix([0.0, - 0.0, - 3.33333333333333, - -4.77777777777778, - 23.0370370370370]).transpose() - - # Assume use of the first column of the control input built earlier - tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in) - - assert_array_almost_equal(yout, yout_truth) - assert_array_almost_equal(t_in, tout) - - # Retest the same with a 1-D input vector - uflat = np.asarray(u[:, 0]) - uflat = uflat.reshape((5,)) - tout, yout = dlsim((num, den, 0.5), uflat, t_in) - - assert_array_almost_equal(yout, yout_truth) - assert_array_almost_equal(t_in, tout) - - # zeros-poles-gain representation - zd = np.array([0.5, -0.5]) - pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) - k = 1.0 - yout_truth = np.asmatrix([0.0, 1.0, 2.0, 2.25, 2.5]).transpose() - - tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in) - - assert_array_almost_equal(yout, yout_truth) - assert_array_almost_equal(t_in, tout) - - def test_dstep(self): - - a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) - b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) - c = np.asarray([[0.1, 0.3]]) - d = np.asarray([[0.0, -0.1, 0.0]]) - dt = 0.5 - - # Because b.shape[1] == 3, dstep should result in a tuple of three - # result vectors - yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956, - -0.036324, -0.093318, -0.15782348, - -0.226628324, -0.2969374948]), - np.asarray([-0.1, -0.075, -0.058, -0.04815, - -0.04453, -0.0461895, -0.0521812, - -0.061588875, -0.073549579, - -0.08727047595]), - np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239, - 0.009081, 0.0233295, 0.03945587, - 0.056657081, 0.0742343737])) - - tout, yout = dstep((a, b, c, d, dt), n=10) - - assert_equal(len(yout), 3) - - for i in range(0, len(yout)): - assert_equal(yout[i].shape[0], 10) - assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i]) - - # Check that the other two inputs (tf, zpk) will work as well - tfin = ([1.0], [1.0, 1.0], 0.5) - yout_tfstep = np.asarray([0.0, 1.0, 0.0]) - tout, yout = dstep(tfin, n=3) - assert_equal(len(yout), 1) - assert_array_almost_equal(yout[0].flatten(), yout_tfstep) - - zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,) - tout, yout = dstep(zpkin, n=3) - assert_equal(len(yout), 1) - assert_array_almost_equal(yout[0].flatten(), yout_tfstep) - - def test_dimpulse(self): - - a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) - b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) - c = np.asarray([[0.1, 0.3]]) - d = np.asarray([[0.0, -0.1, 0.0]]) - dt = 0.5 - - # Because b.shape[1] == 3, dimpulse should result in a tuple of three - # result vectors - yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084, - -0.045884, -0.056994, -0.06450548, - -0.068804844, -0.0703091708]), - np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362, - -0.0016595, -0.0059917, -0.009407675, - -0.011960704, -0.01372089695]), - np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771, - 0.011471, 0.0142485, 0.01612637, - 0.017201211, 0.0175772927])) - - tout, yout = dimpulse((a, b, c, d, dt), n=10) - - assert_equal(len(yout), 3) - - for i in range(0, len(yout)): - assert_equal(yout[i].shape[0], 10) - assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i]) - - # Check that the other two inputs (tf, zpk) will work as well - tfin = ([1.0], [1.0, 1.0], 0.5) - yout_tfimpulse = np.asarray([0.0, 1.0, -1.0]) - tout, yout = dimpulse(tfin, n=3) - assert_equal(len(yout), 1) - assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse) - - zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,) - tout, yout = dimpulse(zpkin, n=3) - assert_equal(len(yout), 1) - assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse) - - def test_dlsim_trivial(self): - a = np.array([[0.0]]) - b = np.array([[0.0]]) - c = np.array([[0.0]]) - d = np.array([[0.0]]) - n = 5 - u = np.zeros(n).reshape(-1, 1) - tout, yout, xout = dlsim((a, b, c, d, 1), u) - assert_array_equal(tout, np.arange(float(n))) - assert_array_equal(yout, np.zeros((n, 1))) - assert_array_equal(xout, np.zeros((n, 1))) - - def test_dlsim_simple1d(self): - a = np.array([[0.5]]) - b = np.array([[0.0]]) - c = np.array([[1.0]]) - d = np.array([[0.0]]) - n = 5 - u = np.zeros(n).reshape(-1, 1) - tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1) - assert_array_equal(tout, np.arange(float(n))) - expected = (0.5 ** np.arange(float(n))).reshape(-1, 1) - assert_array_equal(yout, expected) - assert_array_equal(xout, expected) - - def test_dlsim_simple2d(self): - lambda1 = 0.5 - lambda2 = 0.25 - a = np.array([[lambda1, 0.0], - [0.0, lambda2]]) - b = np.array([[0.0], - [0.0]]) - c = np.array([[1.0, 0.0], - [0.0, 1.0]]) - d = np.array([[0.0], - [0.0]]) - n = 5 - u = np.zeros(n).reshape(-1, 1) - tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1) - assert_array_equal(tout, np.arange(float(n))) - # The analytical solution: - expected = (np.array([lambda1, lambda2]) ** - np.arange(float(n)).reshape(-1, 1)) - assert_array_equal(yout, expected) - assert_array_equal(xout, expected) - - def test_more_step_and_impulse(self): - lambda1 = 0.5 - lambda2 = 0.75 - a = np.array([[lambda1, 0.0], - [0.0, lambda2]]) - b = np.array([[1.0, 0.0], - [0.0, 1.0]]) - c = np.array([[1.0, 1.0]]) - d = np.array([[0.0, 0.0]]) - - n = 10 - - # Check a step response. - ts, ys = dstep((a, b, c, d, 1), n=n) - - # Create the exact step response. - stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n)) - stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n)) - - assert_allclose(ys[0][:, 0], stp0) - assert_allclose(ys[1][:, 0], stp1) - - # Check an impulse response with an initial condition. - x0 = np.array([1.0, 1.0]) - ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0) - - # Create the exact impulse response. - imp = (np.array([lambda1, lambda2]) ** - np.arange(-1, n + 1).reshape(-1, 1)) - imp[0, :] = 0.0 - # Analytical solution to impulse response - y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0) - y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0) - - assert_allclose(yi[0][:, 0], y0) - assert_allclose(yi[1][:, 0], y1) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_filter_design.py b/scipy-0.10.1/scipy/signal/tests/test_filter_design.py deleted file mode 100644 index d90547d3d6..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_filter_design.py +++ /dev/null @@ -1,91 +0,0 @@ -import warnings - -import numpy as np -from numpy.testing import TestCase, assert_array_almost_equal, \ - assert_array_equal, assert_raises, assert_equal, assert_, \ - run_module_suite - -from scipy.signal import tf2zpk, zpk2tf, BadCoefficients, freqz - - -class TestTf2zpk(TestCase): - - def test_simple(self): - z_r = np.array([0.5, -0.5]) - p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) - # Sort the zeros/poles so that we don't fail the test if the order - # changes - z_r.sort() - p_r.sort() - b = np.poly(z_r) - a = np.poly(p_r) - - z, p, k = tf2zpk(b, a) - z.sort() - p.sort() - assert_array_almost_equal(z, z_r) - assert_array_almost_equal(p, p_r) - - def test_bad_filter(self): - """Regression test for #651: better handling of badly conditioned - filter coefficients.""" - warnings.simplefilter("error", BadCoefficients) - try: - assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0]) - finally: - warnings.simplefilter("always", BadCoefficients) - - -class TestZpk2Tf(TestCase): - - def test_identity(self): - """Test the identity transfer function.""" - z = [] - p = [] - k = 1. - b, a = zpk2tf(z, p, k) - b_r = np.array([1.]) # desired result - a_r = np.array([1.]) # desired result - # The test for the *type* of the return values is a regression - # test for ticket #1095. In the case p=[], zpk2tf used to - # return the scalar 1.0 instead of array([1.0]). - assert_array_equal(b, b_r) - assert_(isinstance(b, np.ndarray)) - assert_array_equal(a, a_r) - assert_(isinstance(a, np.ndarray)) - - -class TestFreqz(TestCase): - - def test_ticket1441(self): - """Regression test for ticket 1441.""" - # Because freqz previously used arange instead of linspace, - # when N was large, it would return one more point than - # requested. - N = 100000 - w, h = freqz([1.0], worN=N) - assert_equal(w.shape, (N,)) - - def test_basic(self): - w, h = freqz([1.0], worN=8) - assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) - assert_array_almost_equal(h, np.ones(8)) - - def test_basic_whole(self): - w, h = freqz([1.0], worN=8, whole=True) - assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) - assert_array_almost_equal(h, np.ones(8)) - - def test_plot(self): - - def plot(w, h): - assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) - assert_array_almost_equal(h, np.ones(8)) - - assert_raises(ZeroDivisionError, - freqz, [1.0], worN=8, plot=lambda w, h: 1 / 0) - freqz([1.0], worN=8, plot=plot) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_fir_filter_design.py b/scipy-0.10.1/scipy/signal/tests/test_fir_filter_design.py deleted file mode 100644 index 7cab1fa0c1..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_fir_filter_design.py +++ /dev/null @@ -1,397 +0,0 @@ - -import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_raises, \ - assert_almost_equal, assert_array_almost_equal, assert_equal, \ - assert_ -from scipy.special import sinc - -from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \ - firwin, firwin2, freqz, remez - - -def test_kaiser_beta(): - b = kaiser_beta(58.7) - assert_almost_equal(b, 0.1102 * 50.0) - b = kaiser_beta(22.0) - assert_almost_equal(b, 0.5842 + 0.07886) - b = kaiser_beta(21.0) - assert_equal(b, 0.0) - b = kaiser_beta(10.0) - assert_equal(b, 0.0) - - -def test_kaiser_atten(): - a = kaiser_atten(1, 1.0) - assert_equal(a, 7.95) - a = kaiser_atten(2, 1/np.pi) - assert_equal(a, 2.285 + 7.95) - - -def test_kaiserord(): - assert_raises(ValueError, kaiserord, 1.0, 1.0) - numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi) - assert_equal((numtaps, beta), (2, 0.0)) - - -class TestFirwin(TestCase): - - def check_response(self, h, expected_response, tol=.05): - N = len(h) - alpha = 0.5 * (N-1) - m = np.arange(0,N) - alpha # time indices of taps - for freq, expected in expected_response: - actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq))) - mse = abs(actual-expected)**2 - self.assertTrue(mse < tol, 'response not as expected, mse=%g > %g'\ - %(mse, tol)) - - def test_response(self): - N = 51 - f = .5 - # increase length just to try even/odd - h = firwin(N, f) # low-pass from 0 to f - self.check_response(h, [(.25,1), (.75,0)]) - - h = firwin(N+1, f, window='nuttall') # specific window - self.check_response(h, [(.25,1), (.75,0)]) - - h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass - self.check_response(h, [(.25,0), (.75,1)]) - - f1, f2, f3, f4 = .2, .4, .6, .8 - h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter - self.check_response(h, [(.1,0), (.3,1), (.5,0)]) - - h = firwin(N+4, [f1, f2]) # band-stop filter - self.check_response(h, [(.1,1), (.3,0), (.5,1)]) - - h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False) - self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)]) - - h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter - self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)]) - - h = firwin(N+7, 0.1, width=.03) # low-pass - self.check_response(h, [(.05,1), (.75,0)]) - - h = firwin(N+8, 0.1, pass_zero=False) # high-pass - self.check_response(h, [(.05,0), (.75,1)]) - - def mse(self, h, bands): - """Compute mean squared error versus ideal response across frequency - band. - h -- coefficients - bands -- list of (left, right) tuples relative to 1==Nyquist of - passbands - """ - w, H = freqz(h, worN=1024) - f = w/np.pi - passIndicator = np.zeros(len(w), bool) - for left, right in bands: - passIndicator |= (f>=left) & (f a) * (f < 0.5-a) - assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity") - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_ltisys.py b/scipy-0.10.1/scipy/signal/tests/test_ltisys.py deleted file mode 100644 index e2fb95c8de..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_ltisys.py +++ /dev/null @@ -1,221 +0,0 @@ - -import warnings - -import numpy as np -from numpy.testing import assert_almost_equal, assert_equal, run_module_suite - -from scipy.signal.ltisys import ss2tf, lsim2, impulse2, step2, lti -from scipy.signal.filter_design import BadCoefficients - -class TestSS2TF: - def tst_matrix_shapes(self, p, q, r): - ss2tf(np.zeros((p, p)), - np.zeros((p, q)), - np.zeros((r, p)), - np.zeros((r, q)), 0) - - def test_basic(self): - for p, q, r in [ - (3, 3, 3), - (1, 3, 3), - (1, 1, 1)]: - yield self.tst_matrix_shapes, p, q, r - - -class Test_lsim2(object): - - def test_01(self): - t = np.linspace(0,10,1001) - u = np.zeros_like(t) - # First order system: x'(t) + x(t) = u(t), x(0) = 1. - # Exact solution is x(t) = exp(-t). - system = ([1.0],[1.0,1.0]) - tout, y, x = lsim2(system, u, t, X0=[1.0]) - expected_x = np.exp(-tout) - assert_almost_equal(x[:,0], expected_x) - - def test_02(self): - t = np.array([0.0, 1.0, 1.0, 3.0]) - u = np.array([0.0, 0.0, 1.0, 1.0]) - # Simple integrator: x'(t) = u(t) - system = ([1.0],[1.0,0.0]) - tout, y, x = lsim2(system, u, t, X0=[1.0]) - expected_x = np.maximum(1.0, tout) - assert_almost_equal(x[:,0], expected_x) - - def test_03(self): - t = np.array([0.0, 1.0, 1.0, 1.1, 1.1, 2.0]) - u = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0]) - # Simple integrator: x'(t) = u(t) - system = ([1.0],[1.0, 0.0]) - tout, y, x = lsim2(system, u, t, hmax=0.01) - expected_x = np.array([0.0, 0.0, 0.0, 0.1, 0.1, 0.1]) - assert_almost_equal(x[:,0], expected_x) - - def test_04(self): - t = np.linspace(0, 10, 1001) - u = np.zeros_like(t) - # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. - # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution - # is (1-t)*exp(-t). - system = ([1.0], [1.0, 2.0, 1.0]) - tout, y, x = lsim2(system, u, t, X0=[1.0, 0.0]) - expected_x = (1.0 - tout) * np.exp(-tout) - assert_almost_equal(x[:,0], expected_x) - - def test_05(self): - # The call to lsim2 triggers a "BadCoefficients" warning from - # scipy.signal.filter_design, but the test passes. I think the warning - # is related to the incomplete handling of multi-input systems in - # scipy.signal. - - # A system with two state variables, two inputs, and one output. - A = np.array([[-1.0, 0.0], [0.0, -2.0]]) - B = np.array([[1.0, 0.0], [0.0, 1.0]]) - C = np.array([1.0, 0.0]) - D = np.zeros((1,2)) - - t = np.linspace(0, 10.0, 101) - warnings.simplefilter("ignore", BadCoefficients) - try: - tout, y, x = lsim2((A,B,C,D), T=t, X0=[1.0, 1.0]) - finally: - del warnings.filters[0] - expected_y = np.exp(-tout) - expected_x0 = np.exp(-tout) - expected_x1 = np.exp(-2.0*tout) - assert_almost_equal(y, expected_y) - assert_almost_equal(x[:,0], expected_x0) - assert_almost_equal(x[:,1], expected_x1) - - def test_06(self): - """Test use of the default values of the arguments `T` and `U`.""" - # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. - # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution - # is (1-t)*exp(-t). - system = ([1.0], [1.0, 2.0, 1.0]) - tout, y, x = lsim2(system, X0=[1.0, 0.0]) - expected_x = (1.0 - tout) * np.exp(-tout) - assert_almost_equal(x[:,0], expected_x) - -class Test_impulse2(object): - - def test_01(self): - # First order system: x'(t) + x(t) = u(t) - # Exact impulse response is x(t) = exp(-t). - system = ([1.0],[1.0,1.0]) - tout, y = impulse2(system) - expected_y = np.exp(-tout) - assert_almost_equal(y, expected_y) - - def test_02(self): - """Specify the desired time values for the output.""" - - # First order system: x'(t) + x(t) = u(t) - # Exact impulse response is x(t) = exp(-t). - system = ([1.0],[1.0,1.0]) - n = 21 - t = np.linspace(0, 2.0, n) - tout, y = impulse2(system, T=t) - assert_equal(tout.shape, (n,)) - assert_almost_equal(tout, t) - expected_y = np.exp(-t) - assert_almost_equal(y, expected_y) - - def test_03(self): - """Specify an initial condition as a scalar.""" - - # First order system: x'(t) + x(t) = u(t), x(0)=3.0 - # Exact impulse response is x(t) = 4*exp(-t). - system = ([1.0],[1.0,1.0]) - tout, y = impulse2(system, X0=3.0) - expected_y = 4.0*np.exp(-tout) - assert_almost_equal(y, expected_y) - - def test_04(self): - """Specify an initial condition as a list.""" - - # First order system: x'(t) + x(t) = u(t), x(0)=3.0 - # Exact impulse response is x(t) = 4*exp(-t). - system = ([1.0],[1.0,1.0]) - tout, y = impulse2(system, X0=[3.0]) - expected_y = 4.0*np.exp(-tout) - assert_almost_equal(y, expected_y) - - def test_05(self): - # Simple integrator: x'(t) = u(t) - system = ([1.0],[1.0,0.0]) - tout, y = impulse2(system) - expected_y = np.ones_like(tout) - assert_almost_equal(y, expected_y) - - def test_06(self): - # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t) - # The exact impulse response is t*exp(-t). - system = ([1.0], [1.0, 2.0, 1.0]) - tout, y = impulse2(system) - expected_y = tout * np.exp(-tout) - assert_almost_equal(y, expected_y) - -class Test_step2(object): - - def test_01(self): - # First order system: x'(t) + x(t) = u(t) - # Exact step response is x(t) = 1 - exp(-t). - system = ([1.0],[1.0,1.0]) - tout, y = step2(system) - expected_y = 1.0 - np.exp(-tout) - assert_almost_equal(y, expected_y) - - def test_02(self): - """Specify the desired time values for the output.""" - - # First order system: x'(t) + x(t) = u(t) - # Exact step response is x(t) = 1 - exp(-t). - system = ([1.0],[1.0,1.0]) - n = 21 - t = np.linspace(0, 2.0, n) - tout, y = step2(system, T=t) - assert_equal(tout.shape, (n,)) - assert_almost_equal(tout, t) - expected_y = 1 - np.exp(-t) - assert_almost_equal(y, expected_y) - - def test_03(self): - """Specify an initial condition as a scalar.""" - - # First order system: x'(t) + x(t) = u(t), x(0)=3.0 - # Exact step response is x(t) = 1 + 2*exp(-t). - system = ([1.0],[1.0,1.0]) - tout, y = step2(system, X0=3.0) - expected_y = 1 + 2.0*np.exp(-tout) - assert_almost_equal(y, expected_y) - - def test_04(self): - """Specify an initial condition as a list.""" - - # First order system: x'(t) + x(t) = u(t), x(0)=3.0 - # Exact step response is x(t) = 1 + 2*exp(-t). - system = ([1.0],[1.0,1.0]) - tout, y = step2(system, X0=[3.0]) - expected_y = 1 + 2.0*np.exp(-tout) - assert_almost_equal(y, expected_y) - - def test_05(self): - # Simple integrator: x'(t) = u(t) - # Exact step response is x(t) = t. - system = ([1.0],[1.0,0.0]) - tout, y = step2(system, atol=1e-10, rtol=1e-8) - expected_y = tout - assert_almost_equal(y, expected_y) - - def test_06(self): - # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t) - # The exact step response is 1 - (1 + t)*exp(-t). - system = ([1.0], [1.0, 2.0, 1.0]) - tout, y = step2(system, atol=1e-10, rtol=1e-8) - expected_y = 1 - (1 + tout) * np.exp(-tout) - assert_almost_equal(y, expected_y) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_signaltools.py b/scipy-0.10.1/scipy/signal/tests/test_signaltools.py deleted file mode 100644 index 39e25543f3..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_signaltools.py +++ /dev/null @@ -1,713 +0,0 @@ -import sys -import platform -from decimal import Decimal - -from numpy.testing import TestCase, run_module_suite, assert_equal, \ - assert_almost_equal, assert_array_equal, assert_array_almost_equal, \ - assert_raises, assert_, dec - -import scipy.signal as signal -from scipy.signal import correlate, convolve, convolve2d, \ - hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, tf2zpk - - -from numpy import array, arange -import numpy as np - - -# check if we're on 64-bit Linux, there a medfilt test fails. -if sys.platform == 'linux2' and platform.architecture()[0] == '64bit': - _linux64bit = True -else: - _linux64bit = False - - -class _TestConvolve(TestCase): - def test_basic(self): - a = [3,4,5,6,5,4] - b = [1,2,3] - c = convolve(a,b) - assert_array_equal(c,array([3,10,22,28,32,32,23,12])) - - def test_complex(self): - x = array([1+1j, 2+1j, 3+1j]) - y = array([1+1j, 2+1j]) - z = convolve(x, y) - assert_array_equal(z, array([2j, 2+6j, 5+8j, 5+5j])) - - def test_zero_order(self): - a = 1289 - b = 4567 - c = convolve(a,b) - assert_array_equal(c,a*b) - - def test_2d_arrays(self): - a = [[1,2,3],[3,4,5]] - b = [[2,3,4],[4,5,6]] - c = convolve(a,b) - d = array( [[2 ,7 ,16,17,12],\ - [10,30,62,58,38],\ - [12,31,58,49,30]]) - assert_array_equal(c,d) - - def test_valid_mode(self): - a = [1,2,3,6,5,3] - b = [2,3,4,5,3,4,2,2,1] - c = convolve(a,b,'valid') - assert_array_equal(c,array([70,78,73,65])) - - -class TestConvolve(_TestConvolve): - def test_valid_mode(self): - # 'valid' mode if b.size > a.size does not make sense with the new - # behavior - a = [1,2,3,6,5,3] - b = [2,3,4,5,3,4,2,2,1] - def _test(): - convolve(a,b,'valid') - self.assertRaises(ValueError, _test) - - def test_same_mode(self): - a = [1,2,3,3,1,2] - b = [1,4,3,4,5,6,7,4,3,2,1,1,3] - c = convolve(a,b,'same') - d = array([57,61,63,57,45,36]) - assert_array_equal(c,d) - -class _TestConvolve2d(TestCase): - def test_2d_arrays(self): - a = [[1,2,3],[3,4,5]] - b = [[2,3,4],[4,5,6]] - d = array( [[2 ,7 ,16,17,12],\ - [10,30,62,58,38],\ - [12,31,58,49,30]]) - e = convolve2d(a,b) - assert_array_equal(e,d) - - def test_valid_mode(self): - e = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]] - f = [[1,2,3],[3,4,5]] - g = convolve2d(e,f,'valid') - h = array([[62,80,98,116,134]]) - assert_array_equal(g,h) - - def test_fillvalue(self): - a = [[1,2,3],[3,4,5]] - b = [[2,3,4],[4,5,6]] - fillval = 1 - c = convolve2d(a,b,'full','fill',fillval) - d = array([[24,26,31,34,32],\ - [28,40,62,64,52],\ - [32,46,67,62,48]]) - assert_array_equal(c,d) - - def test_wrap_boundary(self): - a = [[1,2,3],[3,4,5]] - b = [[2,3,4],[4,5,6]] - c = convolve2d(a,b,'full','wrap') - d = array([[80,80,74,80,80],\ - [68,68,62,68,68],\ - [80,80,74,80,80]]) - assert_array_equal(c,d) - - def test_sym_boundary(self): - a = [[1,2,3],[3,4,5]] - b = [[2,3,4],[4,5,6]] - c = convolve2d(a,b,'full','symm') - d = array([[34,30,44, 62, 66],\ - [52,48,62, 80, 84],\ - [82,78,92,110,114]]) - assert_array_equal(c,d) - - -#class TestConvolve2d(_TestConvolve2d): -# def test_same_mode(self): -# e = [[1,2,3],[3,4,5]] -# f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]] -# g = convolve2d(e,f,'same') -# h = array([[80,98,116],\ -# [70,82,94]]) -# assert_array_equal(g,h) -# -# def test_valid_mode2(self): -# # Test when in2.size > in1.size -# e = [[1,2,3],[3,4,5]] -# f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]] -# def _test(): -# convolve2d(e,f,'valid') -# self.assertRaises(ValueError, _test) - -class TestFFTConvolve(TestCase): - def test_real(self): - x = array([1,2,3]) - assert_array_almost_equal(signal.fftconvolve(x,x), [1,4,10,12,9.]) - - def test_complex(self): - x = array([1+1j,2+2j,3+3j]) - assert_array_almost_equal(signal.fftconvolve(x,x), - [0+2.0j, 0+8j, 0+20j, 0+24j, 0+18j]) - - def test_2d_real_same(self): - a = array([[1,2,3],[4,5,6]]) - assert_array_almost_equal(signal.fftconvolve(a,a),\ - array([[1,4,10,12,9],\ - [8,26,56,54,36],\ - [16,40,73,60,36]])) - - def test_2d_complex_same(self): - a = array([[1+2j,3+4j,5+6j],[2+1j,4+3j,6+5j]]) - c = signal.fftconvolve(a,a) - d = array([[-3+4j,-10+20j,-21+56j,-18+76j,-11+60j],\ - [10j,44j,118j,156j,122j],\ - [3+4j,10+20j,21+56j,18+76j,11+60j]]) - assert_array_almost_equal(c,d) - - def test_real_same_mode(self): - a = array([1,2,3]) - b = array([3,3,5,6,8,7,9,0,1]) - c = signal.fftconvolve(a,b,'same') - d = array([9.,20.,25.,35.,41.,47.,39.,28.,2.]) - assert_array_almost_equal(c,d) - - def test_real_valid_mode(self): - a = array([3,2,1]) - b = array([3,3,5,6,8,7,9,0,1]) - c = signal.fftconvolve(a,b,'valid') - d = array([24.,31.,41.,43.,49.,25.,12.]) - assert_array_almost_equal(c,d) - - def test_zero_order(self): - a = array([4967]) - b = array([3920]) - c = signal.fftconvolve(a,b) - d = a*b - assert_equal(c,d) - - def test_random_data(self): - np.random.seed(1234) - a = np.random.rand(1233) + 1j*np.random.rand(1233) - b = np.random.rand(1321) + 1j*np.random.rand(1321) - c = signal.fftconvolve(a, b, 'full') - d = np.convolve(a, b, 'full') - assert_(np.allclose(c, d, rtol=1e-10)) - -class TestMedFilt(TestCase): - @dec.knownfailureif(_linux64bit, - "Currently fails intermittently on 64-bit Linux") - def test_basic(self): - f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], - [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], - [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], - [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], - [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], - [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], - [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], - [ 3, 33, 53, 67, 1, 78, 74, 55, 12, 83], - [ 7, 11, 46, 70, 60, 47, 24, 43, 61, 26], - [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] - - d = signal.medfilt(f, [7, 3]) - e = signal.medfilt2d(np.array(f, np.float), [7, 3]) - assert_array_equal(d, [[ 0, 50, 50, 50, 42, 15, 15, 18, 27, 0], - [ 0, 50, 50, 50, 50, 42, 19, 21, 29, 0], - [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], - [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], - [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], - [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], - [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], - [ 7, 46, 50, 50, 47, 46, 46, 43, 45, 21], - [ 0, 32, 33, 39, 32, 32, 43, 43, 43, 0], - [ 0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]) - assert_array_equal(d, e) - - def test_none(self): - """Ticket #1124. Ensure this does not segfault.""" - try: - signal.medfilt(None) - except: - pass - -class TestWiener(TestCase): - def test_basic(self): - g = array([[5,6,4,3],[3,5,6,2],[2,3,5,6],[1,6,9,7]],'d') - correct = array([[2.16374269,3.2222222222, 2.8888888889, 1.6666666667], - [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], - [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], - [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) - h = signal.wiener(g) - assert_array_almost_equal(h,correct,decimal=6) - -class TestCSpline1DEval(TestCase): - def test_basic(self): - y=array([1,2,3,4,3,2,1,2,3.0]) - x=arange(len(y)) - dx=x[1]-x[0] - cj = signal.cspline1d(y) - - x2=arange(len(y)*10.0)/10.0 - y2=signal.cspline1d_eval(cj, x2, dx=dx,x0=x[0]) - - # make sure interpolated values are on knot points - assert_array_almost_equal(y2[::10], y, decimal=5) - -class TestOrderFilt(TestCase): - def test_basic(self): - assert_array_equal(signal.order_filter([1,2,3],[1,0,1],1), - [2,3,2]) - - -class _TestLinearFilter(TestCase): - dt = None - def test_rank1(self): - x = np.linspace(0, 5, 6).astype(self.dt) - b = np.array([1, -1]).astype(self.dt) - a = np.array([0.5, -0.5]).astype(self.dt) - - # Test simple IIR - y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt) - assert_array_almost_equal(lfilter(b, a, x), y_r) - - # Test simple FIR - b = np.array([1, 1]).astype(self.dt) - a = np.array([1]).astype(self.dt) - y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt) - assert_array_almost_equal(lfilter(b, a, x), y_r) - - # Test IIR with initial conditions - b = np.array([1, 1]).astype(self.dt) - a = np.array([1]).astype(self.dt) - zi = np.array([1]).astype(self.dt) - y_r = np.array([1, 1, 3, 5, 7, 9.]).astype(self.dt) - zf_r = np.array([5]).astype(self.dt) - y, zf = lfilter(b, a, x, zi=zi) - assert_array_almost_equal(y, y_r) - assert_array_almost_equal(zf, zf_r) - - b = np.array([1, 1, 1]).astype(self.dt) - a = np.array([1]).astype(self.dt) - zi = np.array([1, 1]).astype(self.dt) - y_r = np.array([1, 2, 3, 6, 9, 12.]).astype(self.dt) - zf_r = np.array([9, 5]).astype(self.dt) - y, zf = lfilter(b, a, x, zi=zi) - assert_array_almost_equal(y, y_r) - assert_array_almost_equal(zf, zf_r) - - def test_rank2(self): - shape = (4, 3) - x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) - x = x.astype(self.dt) - - b = np.array([1, -1]).astype(self.dt) - a = np.array([0.5, 0.5]).astype(self.dt) - - y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6 ,4 ,2]], - dtype=self.dt) - - y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], - [18, -16, 18]], dtype=self.dt) - - y = lfilter(b, a, x, axis = 0) - assert_array_almost_equal(y_r2_a0, y) - - y = lfilter(b, a, x, axis = 1) - assert_array_almost_equal(y_r2_a1, y) - - def test_rank2_init_cond_a1(self): - # Test initial condition handling along axis 1 - shape = (4, 3) - x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) - x = x.astype(self.dt) - - b = np.array([1, -1]).astype(self.dt) - a = np.array([0.5, 0.5]).astype(self.dt) - - y_r2_a0_1 = np.array([[1, 1, 1], [7, -5, 7], [13, -11, 13], - [19, -17, 19]], dtype=self.dt) - zf_r = np.array([-5, -17, -29, -41])[:, np.newaxis].astype(self.dt) - y, zf = lfilter(b, a, x, axis = 1, zi = np.ones((4, 1))) - assert_array_almost_equal(y_r2_a0_1, y) - assert_array_almost_equal(zf, zf_r) - - def test_rank2_init_cond_a0(self): - # Test initial condition handling along axis 0 - shape = (4, 3) - x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) - x = x.astype(self.dt) - - b = np.array([1, -1]).astype(self.dt) - a = np.array([0.5, 0.5]).astype(self.dt) - - y_r2_a0_0 = np.array([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5 ,3 ,1]], - dtype=self.dt) - zf_r = np.array([[-23, -23, -23]], dtype=self.dt) - y, zf = lfilter(b, a, x, axis = 0, zi = np.ones((1, 3))) - assert_array_almost_equal(y_r2_a0_0, y) - assert_array_almost_equal(zf, zf_r) - - def test_rank3(self): - shape = (4, 3, 2) - x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) - - b = np.array([1, -1]).astype(self.dt) - a = np.array([0.5, 0.5]).astype(self.dt) - - # Test last axis - y = lfilter(b, a, x) - for i in range(x.shape[0]): - for j in range(x.shape[1]): - assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) - - def test_empty_zi(self): - """Regression test for #880: empty array for zi crashes.""" - a = np.ones(1).astype(self.dt) - b = np.ones(1).astype(self.dt) - x = np.arange(5).astype(self.dt) - zi = np.ones(0).astype(self.dt) - y, zf = lfilter(b, a, x, zi=zi) - assert_array_almost_equal(y, x) - self.assertTrue(zf.dtype == self.dt) - self.assertTrue(zf.size == 0) - -class TestLinearFilterFloat32(_TestLinearFilter): - dt = np.float32 - -class TestLinearFilterFloat64(_TestLinearFilter): - dt = np.float64 - -class TestLinearFilterFloatExtended(_TestLinearFilter): - dt = np.longdouble - -class TestLinearFilterComplex64(_TestLinearFilter): - dt = np.complex64 - -class TestLinearFilterComplex128(_TestLinearFilter): - dt = np.complex128 - -class TestLinearFilterComplexxxiExtended28(_TestLinearFilter): - dt = np.longcomplex - -class TestLinearFilterDecimal(_TestLinearFilter): - dt = np.dtype(Decimal) - - -class _TestCorrelateReal(TestCase): - - dt = None - - def _setup_rank1(self): - # a.size should be greated than b.size for the tests - a = np.linspace(0, 3, 4).astype(self.dt) - b = np.linspace(1, 2, 2).astype(self.dt) - - y_r = np.array([0, 2, 5, 8, 3]).astype(self.dt) - return a, b, y_r - - def test_rank1_valid(self): - a, b, y_r = self._setup_rank1() - y = correlate(a, b, 'valid') - assert_array_almost_equal(y, y_r[1:4]) - self.assertTrue(y.dtype == self.dt) - - def test_rank1_same(self): - a, b, y_r = self._setup_rank1() - y = correlate(a, b, 'same') - assert_array_almost_equal(y, y_r[:-1]) - self.assertTrue(y.dtype == self.dt) - - def test_rank1_full(self): - a, b, y_r = self._setup_rank1() - y = correlate(a, b, 'full') - assert_array_almost_equal(y, y_r) - self.assertTrue(y.dtype == self.dt) - - def _setup_rank3(self): - a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(self.dt) - b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(self.dt) - - y_r = array([[[ 0., 184., 504., 912., 1360., 888., 472., 160.,], - [ 46., 432., 1062., 1840., 2672., 1698., 864., 266.,], - [ 134., 736., 1662., 2768., 3920., 2418., 1168., 314.,], - [ 260., 952., 1932., 3056., 4208., 2580., 1240., 332.,] , - [ 202., 664., 1290., 1984., 2688., 1590., 712., 150.,] , - [ 114., 344., 642., 960., 1280., 726., 296., 38.,]], - - [[ 23., 400., 1035., 1832., 2696., 1737., 904., 293.,], - [ 134., 920., 2166., 3680., 5280., 3306., 1640., 474.,], - [ 325., 1544., 3369., 5512., 7720., 4683., 2192., 535.,], - [ 571., 1964., 3891., 6064., 8272., 4989., 2324., 565.,], - [ 434., 1360., 2586., 3920., 5264., 3054., 1312., 230.,], - [ 241., 700., 1281., 1888., 2496., 1383., 532., 39.,]], - - [[ 22., 214., 528., 916., 1332., 846., 430., 132.,], - [ 86., 484., 1098., 1832., 2600., 1602., 772., 206.,], - [ 188., 802., 1698., 2732., 3788., 2256., 1018., 218.,], - [ 308., 1006., 1950., 2996., 4052., 2400., 1078., 230.,], - [ 230., 692., 1290., 1928., 2568., 1458., 596., 78.,], - [ 126., 354., 636., 924., 1212., 654., 234., 0.,]]], - dtype=self.dt) - - return a, b, y_r - - def test_rank3_valid(self): - a, b, y_r = self._setup_rank3() - y = correlate(a, b, "valid") - assert_array_almost_equal(y, y_r[1:2,2:4,3:5]) - self.assertTrue(y.dtype == self.dt) - - def test_rank3_same(self): - a, b, y_r = self._setup_rank3() - y = correlate(a, b, "same") - assert_array_almost_equal(y, y_r[0:-1,1:-1,1:-2]) - self.assertTrue(y.dtype == self.dt) - - def test_rank3_all(self): - a, b, y_r = self._setup_rank3() - y = correlate(a, b) - assert_array_almost_equal(y, y_r) - self.assertTrue(y.dtype == self.dt) - - -def _get_testcorrelate_class(datatype, base): - class TestCorrelateX(base): - dt = datatype - TestCorrelateX.__name__ = "TestCorrelate%s" % datatype.__name__.title() - return TestCorrelateX - -for datatype in [np.ubyte, np.byte, np.ushort, np.short, np.uint, np.int, - np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble, - Decimal]: - cls = _get_testcorrelate_class(datatype, _TestCorrelateReal) - globals()[cls.__name__] = cls - - -class _TestCorrelateComplex(TestCase): - - # The numpy data type to use. - dt = None - - # The decimal precision to be used for comparing results. - # This value will be passed as the 'decimal' keyword argument of - # assert_array_almost_equal(). - decimal = None - - def _setup_rank1(self, mode): - np.random.seed(9) - a = np.random.randn(10).astype(self.dt) - a += 1j * np.random.randn(10).astype(self.dt) - b = np.random.randn(8).astype(self.dt) - b += 1j * np.random.randn(8).astype(self.dt) - - y_r = (correlate(a.real, b.real, mode=mode) + - correlate(a.imag, b.imag, mode=mode)).astype(self.dt) - y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + - correlate(a.imag, b.real, mode=mode)) - return a, b, y_r - - def test_rank1_valid(self): - a, b, y_r = self._setup_rank1('valid') - y = correlate(a, b, 'valid') - assert_array_almost_equal(y, y_r, decimal=self.decimal) - self.assertTrue(y.dtype == self.dt) - - def test_rank1_same(self): - a, b, y_r = self._setup_rank1('same') - y = correlate(a, b, 'same') - assert_array_almost_equal(y, y_r, decimal=self.decimal) - self.assertTrue(y.dtype == self.dt) - - def test_rank1_full(self): - a, b, y_r = self._setup_rank1('full') - y = correlate(a, b, 'full') - assert_array_almost_equal(y, y_r, decimal=self.decimal) - self.assertTrue(y.dtype == self.dt) - - def test_rank3(self): - a = np.random.randn(10, 8, 6).astype(self.dt) - a += 1j * np.random.randn(10, 8, 6).astype(self.dt) - b = np.random.randn(8, 6, 4).astype(self.dt) - b += 1j * np.random.randn(8, 6, 4).astype(self.dt) - - y_r = (correlate(a.real, b.real) - + correlate(a.imag, b.imag)).astype(self.dt) - y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) - - y = correlate(a, b, 'full') - assert_array_almost_equal(y, y_r, decimal=self.decimal-1) - self.assertTrue(y.dtype == self.dt) - - -# Create three classes, one for each complex data type. The actual class -# name will be TestCorrelateComplex###, where ### is the number of bits. -for datatype in [np.csingle, np.cdouble, np.clongdouble]: - cls = _get_testcorrelate_class(datatype, _TestCorrelateComplex) - cls.decimal = int(2 * np.finfo(datatype).precision / 3) - globals()[cls.__name__] = cls - - -class TestLFilterZI(TestCase): - - def test_basic(self): - a = np.array([1.0, -1.0, 0.5]) - b = np.array([1.0, 0.0, 2.0]) - zi_expected = np.array([5.0, -1.0]) - zi = lfilter_zi(b, a) - assert_array_almost_equal(zi, zi_expected) - - -class TestFiltFilt(TestCase): - - def test_basic(self): - out = signal.filtfilt([1, 2, 3], [1, 2, 3], np.arange(12)) - assert_equal(out, arange(12)) - - def test_sine(self): - rate = 2000 - t = np.linspace(0, 1.0, rate + 1) - # A signal with low frequency and a high frequency. - xlow = np.sin(5 * 2 * np.pi * t) - xhigh = np.sin(250 * 2 * np.pi * t) - x = xlow + xhigh - - b, a = butter(8, 0.125) - z, p, k = tf2zpk(b, a) - # r is the magnitude of the largest pole. - r = np.abs(p).max() - eps = 1e-5 - # n estimates the number of steps for the - # transient to decay by a factor of eps. - n = int(np.ceil(np.log(eps) / np.log(r))) - - # High order lowpass filter... - y = filtfilt(b, a, x, padlen=n) - # Result should be just xlow. - err = np.abs(y - xlow).max() - assert_(err < 1e-4) - - # A 2D case. - x2d = np.vstack([xlow, xlow + xhigh]) - y2d = filtfilt(b, a, x2d, padlen=n, axis=1) - assert_equal(y2d.shape, x2d.shape) - err = np.abs(y2d - xlow).max() - assert_(err < 1e-4) - - -class TestDecimate: - - def test_basic(self): - x = np.arange(6) - assert_array_equal(signal.decimate(x, 2, n=1).round(), x[::2]) - - def test_shape(self): - """Regression test for ticket #1480.""" - z = np.zeros((10, 10)) - d0 = signal.decimate(z, 2, axis=0) - assert_equal(d0.shape, (5, 10)) - d1 = signal.decimate(z, 2, axis=1) - assert_equal(d1.shape, (10, 5)) - - -class TestHilbert(object): - - def test_bad_args(self): - x = np.array([1.0+0.0j]) - assert_raises(ValueError, hilbert, x) - x = np.arange(8.0) - assert_raises(ValueError, hilbert, x, N=0) - - def test_hilbert_theoretical(self): - #test cases by Ariel Rokem - decimal = 14 - - pi = np.pi - t = np.arange(0, 2*pi, pi/256) - a0 = np.sin(t) - a1 = np.cos(t) - a2 = np.sin(2*t) - a3 = np.cos(2*t) - a = np.vstack([a0,a1,a2,a3]) - - h = hilbert(a) - h_abs = np.abs(h) - h_angle = np.angle(h) - h_real = np.real(h) - - #The real part should be equal to the original signals: - assert_almost_equal(h_real, a, decimal) - #The absolute value should be one everywhere, for this input: - assert_almost_equal(h_abs, np.ones(a.shape), decimal) - #For the 'slow' sine - the phase should go from -pi/2 to pi/2 in - #the first 256 bins: - assert_almost_equal(h_angle[0,:256], np.arange(-pi/2,pi/2,pi/256), - decimal) - #For the 'slow' cosine - the phase should go from 0 to pi in the - #same interval: - assert_almost_equal(h_angle[1,:256], np.arange(0,pi,pi/256), decimal) - #The 'fast' sine should make this phase transition in half the time: - assert_almost_equal(h_angle[2,:128], np.arange(-pi/2,pi/2,pi/128), - decimal) - #Ditto for the 'fast' cosine: - assert_almost_equal(h_angle[3,:128], np.arange(0,pi,pi/128), decimal) - - #The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia - assert_almost_equal(h[1].imag, a0, decimal) - - def test_hilbert_axisN(self): - # tests for axis and N arguments - a = np.arange(18).reshape(3,6) - # test axis - aa = hilbert(a, axis=-1) - yield assert_equal, hilbert(a.T, axis=0), aa.T - # test 1d - yield assert_equal, hilbert(a[0]), aa[0] - - # test N - aan = hilbert(a, N=20, axis=-1) - yield assert_equal, aan.shape, [3,20] - yield assert_equal, hilbert(a.T, N=20, axis=0).shape, [20,3] - #the next test is just a regression test, - #no idea whether numbers make sense - a0hilb = np.array( - [ 0.000000000000000e+00-1.72015830311905j , - 1.000000000000000e+00-2.047794505137069j, - 1.999999999999999e+00-2.244055555687583j, - 3.000000000000000e+00-1.262750302935009j, - 4.000000000000000e+00-1.066489252384493j, - 5.000000000000000e+00+2.918022706971047j, - 8.881784197001253e-17+3.845658908989067j, - -9.444121133484362e-17+0.985044202202061j, - -1.776356839400251e-16+1.332257797702019j, - -3.996802888650564e-16+0.501905089898885j, - 1.332267629550188e-16+0.668696078880782j, - -1.192678053963799e-16+0.235487067862679j, - -1.776356839400251e-16+0.286439612812121j, - 3.108624468950438e-16+0.031676888064907j, - 1.332267629550188e-16-0.019275656884536j, - -2.360035624836702e-16-0.1652588660287j , - 0.000000000000000e+00-0.332049855010597j, - 3.552713678800501e-16-0.403810179797771j, - 8.881784197001253e-17-0.751023775297729j, - 9.444121133484362e-17-0.79252210110103j ]) - yield assert_almost_equal, aan[0], a0hilb, 14, 'N regression' - - -class TestHilbert2(object): - - def test_bad_args(self): - - # x must be real. - x = np.array([[1.0 + 0.0j]]) - assert_raises(ValueError, hilbert2, x) - - # x must be rank 2. - x = np.arange(24).reshape(2, 3, 4) - assert_raises(ValueError, hilbert2, x) - - # Bad value for N. - x = np.arange(16).reshape(4, 4) - assert_raises(ValueError, hilbert2, x, N=0) - assert_raises(ValueError, hilbert2, x, N=(2,0)) - assert_raises(ValueError, hilbert2, x, N=(2,)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_spectral.py b/scipy-0.10.1/scipy/signal/tests/test_spectral.py deleted file mode 100644 index 4dd1d597ea..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_spectral.py +++ /dev/null @@ -1,94 +0,0 @@ -"""Unit tests for Lomb Scargle routines. -""" - -import numpy as np -from numpy.testing import dec, assert_raises, assert_equal, \ - assert_almost_equal, assert_array_equal, \ - assert_array_almost_equal, assert_approx_equal, \ - assert_, run_module_suite - -from scipy.signal.spectral import lombscargle - - -class TestLombscargle: - def test_frequency(self): - """Test if frequency location of peak corresponds to frequency of - generated input signal. - """ - - # Input parameters - ampl = 2. - w = 1. - phi = 0.5 * np.pi - nin = 100 - nout = 1000 - p = 0.7 # Fraction of points to select - - # Randomly select a fraction of an array with timesteps - np.random.seed(2353425) - r = np.random.rand(nin) - t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] - - # Plot a sine wave for the selected times - x = ampl * np.sin(w*t + phi) - - # Define the array of frequencies for which to compute the periodogram - f = np.linspace(0.01, 10., nout) - - # Calculate Lomb-Scargle periodogram - P = lombscargle(t, x, f) - - # Check if difference between found frequency maximum and input - # frequency is less than accuracy - delta = f[1] - f[0] - assert_(w - f[np.argmax(P)] < (delta/2.)) - - def test_amplitude(self): - """Test if height of peak in normalized Lomb-Scargle periodogram - corresponds to amplitude of the generated input signal. - """ - - # Input parameters - ampl = 2. - w = 1. - phi = 0.5 * np.pi - nin = 100 - nout = 1000 - p = 0.7 # Fraction of points to select - - # Randomly select a fraction of an array with timesteps - np.random.seed(2353425) - r = np.random.rand(nin) - t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] - - # Plot a sine wave for the selected times - x = ampl * np.sin(w*t + phi) - - # Define the array of frequencies for which to compute the periodogram - f = np.linspace(0.01, 10., nout) - - # Calculate Lomb-Scargle periodogram - pgram = lombscargle(t, x, f) - - # Normalize - pgram = np.sqrt(4 * pgram / t.shape[0]) - - # Check if difference between found frequency maximum and input - # frequency is less than accuracy - assert_approx_equal(np.max(pgram), ampl, significant=2) - - def test_wrong_shape(self): - t = np.linspace(0, 1, 1) - x = np.linspace(0, 1, 2) - f = np.linspace(0, 1, 3) - assert_raises(ValueError, lombscargle, t, x, f) - - def test_zero_division(self): - t = np.zeros(1) - x = np.zeros(1) - f = np.zeros(1) - assert_raises(ZeroDivisionError, lombscargle, t, x, f) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_waveforms.py b/scipy-0.10.1/scipy/signal/tests/test_waveforms.py deleted file mode 100644 index 7a00e5ad6a..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_waveforms.py +++ /dev/null @@ -1,316 +0,0 @@ - -import numpy as np -from numpy.testing import TestCase, assert_almost_equal, assert_equal, assert_, \ - assert_raises, run_module_suite - -import scipy.signal.waveforms as waveforms - - -# These chirp_* functions are the instantaneous frequencies of the signals -# returned by chirp(). - -def chirp_linear(t, f0, f1, t1): - f = f0 + (f1 - f0) * t / t1 - return f - -def chirp_quadratic(t, f0, f1, t1, vertex_zero=True): - if vertex_zero: - f = f0 + (f1 - f0) * t**2 / t1**2 - else: - f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2 - return f - -def chirp_geometric(t, f0, f1, t1): - f = f0 * (f1/f0)**(t/t1) - return f - -def chirp_hyperbolic(t, f0, f1, t1): - f = f0*f1*t1 / ((f0 - f1)*t + f1*t1) - return f - - -def compute_frequency(t, theta): - """Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t).""" - # Assume theta and t are 1D numpy arrays. - # Assume that t is uniformly spaced. - dt = t[1] - t[0] - f = np.diff(theta)/(2*np.pi) / dt - tf = 0.5*(t[1:] + t[:-1]) - return tf, f - - -class TestChirp(TestCase): - - def test_linear_at_zero(self): - w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear') - assert_almost_equal(w, 1.0) - - def test_linear_freq_01(self): - method = 'linear' - f0 = 1.0 - f1 = 2.0 - t1 = 1.0 - t = np.linspace(0, t1, 100) - phase = waveforms._chirp_phase(t, f0, t1, f1, method) - tf, f = compute_frequency(t, phase) - abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) - assert_(abserr < 1e-6) - - def test_linear_freq_02(self): - method = 'linear' - f0 = 200.0 - f1 = 100.0 - t1 = 10.0 - t = np.linspace(0, t1, 100) - phase = waveforms._chirp_phase(t, f0, t1, f1, method) - tf, f = compute_frequency(t, phase) - abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) - assert_(abserr < 1e-6) - - def test_quadratic_at_zero(self): - w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic') - assert_almost_equal(w, 1.0) - - def test_quadratic_at_zero2(self): - w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic', - vertex_zero=False) - assert_almost_equal(w, 1.0) - - def test_quadratic_freq_01(self): - method = 'quadratic' - f0 = 1.0 - f1 = 2.0 - t1 = 1.0 - t = np.linspace(0, t1, 2000) - phase = waveforms._chirp_phase(t, f0, t1, f1, method) - tf, f = compute_frequency(t, phase) - abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) - assert_(abserr < 1e-6) - - def test_quadratic_freq_02(self): - method = 'quadratic' - f0 = 20.0 - f1 = 10.0 - t1 = 10.0 - t = np.linspace(0, t1, 2000) - phase = waveforms._chirp_phase(t, f0, t1, f1, method) - tf, f = compute_frequency(t, phase) - abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) - assert_(abserr < 1e-6) - - def test_logarithmic_at_zero(self): - w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic') - assert_almost_equal(w, 1.0) - - def test_logarithmic_freq_01(self): - method = 'logarithmic' - f0 = 1.0 - f1 = 2.0 - t1 = 1.0 - t = np.linspace(0, t1, 10000) - phase = waveforms._chirp_phase(t, f0, t1, f1, method) - tf, f = compute_frequency(t, phase) - abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) - assert_(abserr < 1e-6) - - def test_logarithmic_freq_02(self): - method = 'logarithmic' - f0 = 200.0 - f1 = 100.0 - t1 = 10.0 - t = np.linspace(0, t1, 10000) - phase = waveforms._chirp_phase(t, f0, t1, f1, method) - tf, f = compute_frequency(t, phase) - abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) - assert_(abserr < 1e-6) - - def test_logarithmic_freq_03(self): - method = 'logarithmic' - f0 = 100.0 - f1 = 100.0 - t1 = 10.0 - t = np.linspace(0, t1, 10000) - phase = waveforms._chirp_phase(t, f0, t1, f1, method) - tf, f = compute_frequency(t, phase) - abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) - assert_(abserr < 1e-6) - - def test_hyperbolic_at_zero(self): - w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic') - assert_almost_equal(w, 1.0) - - def test_hyperbolic_freq_01(self): - method = 'hyperbolic' - f0 = 10.0 - f1 = 1.0 - t1 = 1.0 - t = np.linspace(0, t1, 10000) - phase = waveforms._chirp_phase(t, f0, t1, f1, method) - tf, f = compute_frequency(t, phase) - abserr = np.max(np.abs(f - chirp_hyperbolic(tf, f0, f1, t1))) - assert_(abserr < 1e-6) - - def test_hyperbolic_freq_02(self): - method = 'hyperbolic' - f0 = 10.0 - f1 = 100.0 - t1 = 1.0 - t = np.linspace(0, t1, 10) - assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method) - - def test_hyperbolic_freq_03(self): - method = 'hyperbolic' - f0 = -10.0 - f1 = 0.0 - t1 = 1.0 - t = np.linspace(0, t1, 10) - assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method) - - def test_unknown_method(self): - method = "foo" - f0 = 10.0 - f1 = 20.0 - t1 = 1.0 - t = np.linspace(0, t1, 10) - assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method) - - def test_integer_t1(self): - f0 = 10.0 - f1 = 20.0 - t = np.linspace(-1, 1, 11) - t1 = 3.0 - float_result = waveforms.chirp(t, f0, t1, f1) - t1 = 3 - int_result = waveforms.chirp(t, f0, t1, f1) - err_msg = "Integer input 't1=3' gives wrong result" - assert_equal(int_result, float_result, err_msg=err_msg) - - def test_integer_f0(self): - f1 = 20.0 - t1 = 3.0 - t = np.linspace(-1, 1, 11) - f0 = 10.0 - float_result = waveforms.chirp(t, f0, t1, f1) - f0 = 10 - int_result = waveforms.chirp(t, f0, t1, f1) - err_msg = "Integer input 'f0=10' gives wrong result" - assert_equal(int_result, float_result, err_msg=err_msg) - - def test_integer_f1(self): - f0 = 10.0 - t1 = 3.0 - t = np.linspace(-1, 1, 11) - f1 = 20.0 - float_result = waveforms.chirp(t, f0, t1, f1) - f1 = 20 - int_result = waveforms.chirp(t, f0, t1, f1) - err_msg = "Integer input 'f1=20' gives wrong result" - assert_equal(int_result, float_result, err_msg=err_msg) - - def test_integer_all(self): - f0 = 10 - t1 = 3 - f1 = 20 - t = np.linspace(-1, 1, 11) - float_result = waveforms.chirp(t, float(f0), float(t1), float(f1)) - int_result = waveforms.chirp(t, f0, t1, f1) - err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result" - assert_equal(int_result, float_result, err_msg=err_msg) - -class TestSweepPoly(TestCase): - - def test_sweep_poly_quad1(self): - p = np.poly1d([1.0, 0.0, 1.0]) - t = np.linspace(0, 3.0, 10000) - phase = waveforms._sweep_poly_phase(t, p) - tf, f = compute_frequency(t, phase) - expected = p(tf) - abserr = np.max(np.abs(f - expected)) - assert_(abserr < 1e-6) - - def test_sweep_poly_const(self): - p = np.poly1d(2.0) - t = np.linspace(0, 3.0, 10000) - phase = waveforms._sweep_poly_phase(t, p) - tf, f = compute_frequency(t, phase) - expected = p(tf) - abserr = np.max(np.abs(f - expected)) - assert_(abserr < 1e-6) - - def test_sweep_poly_linear(self): - p = np.poly1d([-1.0, 10.0]) - t = np.linspace(0, 3.0, 10000) - phase = waveforms._sweep_poly_phase(t, p) - tf, f = compute_frequency(t, phase) - expected = p(tf) - abserr = np.max(np.abs(f - expected)) - assert_(abserr < 1e-6) - - def test_sweep_poly_quad2(self): - p = np.poly1d([1.0, 0.0, -2.0]) - t = np.linspace(0, 3.0, 10000) - phase = waveforms._sweep_poly_phase(t, p) - tf, f = compute_frequency(t, phase) - expected = p(tf) - abserr = np.max(np.abs(f - expected)) - assert_(abserr < 1e-6) - - def test_sweep_poly_cubic(self): - p = np.poly1d([2.0, 1.0, 0.0, -2.0]) - t = np.linspace(0, 2.0, 10000) - phase = waveforms._sweep_poly_phase(t, p) - tf, f = compute_frequency(t, phase) - expected = p(tf) - abserr = np.max(np.abs(f - expected)) - assert_(abserr < 1e-6) - - def test_sweep_poly_cubic2(self): - """Use an array of coefficients instead of a poly1d.""" - p = np.array([2.0, 1.0, 0.0, -2.0]) - t = np.linspace(0, 2.0, 10000) - phase = waveforms._sweep_poly_phase(t, p) - tf, f = compute_frequency(t, phase) - expected = np.poly1d(p)(tf) - abserr = np.max(np.abs(f - expected)) - assert_(abserr < 1e-6) - - def test_sweep_poly_cubic3(self): - """Use a list of coefficients instead of a poly1d.""" - p = [2.0, 1.0, 0.0, -2.0] - t = np.linspace(0, 2.0, 10000) - phase = waveforms._sweep_poly_phase(t, p) - tf, f = compute_frequency(t, phase) - expected = np.poly1d(p)(tf) - abserr = np.max(np.abs(f - expected)) - assert_(abserr < 1e-6) - - -class TestGaussPulse(TestCase): - - def test_integer_fc(self): - float_result = waveforms.gausspulse('cutoff', fc=1000.0) - int_result = waveforms.gausspulse('cutoff', fc=1000) - err_msg = "Integer input 'fc=1000' gives wrong result" - assert_equal(int_result, float_result, err_msg=err_msg) - - def test_integer_bw(self): - float_result = waveforms.gausspulse('cutoff', bw=1.0) - int_result = waveforms.gausspulse('cutoff', bw=1) - err_msg = "Integer input 'bw=1' gives wrong result" - assert_equal(int_result, float_result, err_msg=err_msg) - - def test_integer_bwr(self): - float_result = waveforms.gausspulse('cutoff', bwr=-6.0) - int_result = waveforms.gausspulse('cutoff', bwr=-6) - err_msg = "Integer input 'bwr=-6' gives wrong result" - assert_equal(int_result, float_result, err_msg=err_msg) - - def test_integer_tpr(self): - float_result = waveforms.gausspulse('cutoff', tpr=-60.0) - int_result = waveforms.gausspulse('cutoff', tpr=-60) - err_msg = "Integer input 'tpr=-60' gives wrong result" - assert_equal(int_result, float_result, err_msg=err_msg) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_wavelets.py b/scipy-0.10.1/scipy/signal/tests/test_wavelets.py deleted file mode 100644 index fbca1dc9e9..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_wavelets.py +++ /dev/null @@ -1,80 +0,0 @@ -import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_equal, \ - assert_array_equal, assert_array_almost_equal, assert_array_less, assert_ - -from scipy.signal import wavelets - - -class TestWavelets(TestCase): - def test_qmf(self): - assert_array_equal(wavelets.qmf([1,1]),[1,-1]) - - def test_daub(self): - for i in xrange(1,15): - assert_equal(len(wavelets.daub(i)),i*2) - - def test_cascade(self): - for J in xrange(1,7): - for i in xrange(1,5): - lpcoef = wavelets.daub(i) - k = len(lpcoef) - x,phi,psi = wavelets.cascade(lpcoef,J) - assert_(len(x) == len(phi) == len(psi)) - assert_equal(len(x),(k-1)*2**J) - - def test_morlet(self): - x = wavelets.morlet(50,4.1,complete=True) - y = wavelets.morlet(50,4.1,complete=False) - # Test if complete and incomplete wavelet have same lengths: - assert_equal(len(x),len(y)) - # Test if complete wavelet is less than incomplete wavelet: - assert_array_less(x,y) - - x = wavelets.morlet(10,50,complete=False) - y = wavelets.morlet(10,50,complete=True) - # For large widths complete and incomplete wavelets should be - # identical within numerical precision: - assert_equal(x,y) - - # miscellaneous tests: - x = np.array([1.73752399e-09 +9.84327394e-25j, - 6.49471756e-01 +0.00000000e+00j, - 1.73752399e-09 -9.84327394e-25j]) - y = wavelets.morlet(3,w=2,complete=True) - assert_array_almost_equal(x,y) - - x = np.array([2.00947715e-09 +9.84327394e-25j, - 7.51125544e-01 +0.00000000e+00j, - 2.00947715e-09 -9.84327394e-25j]) - y = wavelets.morlet(3,w=2,complete=False) - assert_array_almost_equal(x,y,decimal=2) - - x = wavelets.morlet(10000,s=4,complete=True) - y = wavelets.morlet(20000,s=8,complete=True)[5000:15000] - assert_array_almost_equal(x,y,decimal=2) - - x = wavelets.morlet(10000,s=4,complete=False) - assert_array_almost_equal(y,x,decimal=2) - y = wavelets.morlet(20000,s=8,complete=False)[5000:15000] - assert_array_almost_equal(x,y,decimal=2) - - x = wavelets.morlet(10000,w=3,s=5,complete=True) - y = wavelets.morlet(20000,w=3,s=10,complete=True)[5000:15000] - assert_array_almost_equal(x,y,decimal=2) - - x = wavelets.morlet(10000,w=3,s=5,complete=False) - assert_array_almost_equal(y,x,decimal=2) - y = wavelets.morlet(20000,w=3,s=10,complete=False)[5000:15000] - assert_array_almost_equal(x,y,decimal=2) - - x = wavelets.morlet(10000,w=7,s=10,complete=True) - y = wavelets.morlet(20000,w=7,s=20,complete=True)[5000:15000] - assert_array_almost_equal(x,y,decimal=2) - - x = wavelets.morlet(10000,w=7,s=10,complete=False) - assert_array_almost_equal(x,y,decimal=2) - y = wavelets.morlet(20000,w=7,s=20,complete=False)[5000:15000] - assert_array_almost_equal(x,y,decimal=2) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/signal/tests/test_windows.py b/scipy-0.10.1/scipy/signal/tests/test_windows.py deleted file mode 100644 index 4dfd76173d..0000000000 --- a/scipy-0.10.1/scipy/signal/tests/test_windows.py +++ /dev/null @@ -1,65 +0,0 @@ - -from numpy import array, ones_like -from numpy.testing import assert_array_almost_equal, assert_array_equal -from scipy import signal - - -cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348, - 0.198891, 0.235450, 0.274846, 0.316836, - 0.361119, 0.407338, 0.455079, 0.503883, - 0.553248, 0.602637, 0.651489, 0.699227, - 0.745266, 0.789028, 0.829947, 0.867485, - 0.901138, 0.930448, 0.955010, 0.974482, - 0.988591, 0.997138, 1.000000, 0.997138, - 0.988591, 0.974482, 0.955010, 0.930448, - 0.901138, 0.867485, 0.829947, 0.789028, - 0.745266, 0.699227, 0.651489, 0.602637, - 0.553248, 0.503883, 0.455079, 0.407338, - 0.361119, 0.316836, 0.274846, 0.235450, - 0.198891, 0.165348, 0.134941, 0.107729, - 0.200938]) - -cheb_even_true = array([0.203894, 0.107279, 0.133904, - 0.163608, 0.196338, 0.231986, - 0.270385, 0.311313, 0.354493, - 0.399594, 0.446233, 0.493983, - 0.542378, 0.590916, 0.639071, - 0.686302, 0.732055, 0.775783, - 0.816944, 0.855021, 0.889525, - 0.920006, 0.946060, 0.967339, - 0.983557, 0.994494, 1.000000, - 1.000000, 0.994494, 0.983557, - 0.967339, 0.946060, 0.920006, - 0.889525, 0.855021, 0.816944, - 0.775783, 0.732055, 0.686302, - 0.639071, 0.590916, 0.542378, - 0.493983, 0.446233, 0.399594, - 0.354493, 0.311313, 0.270385, - 0.231986, 0.196338, 0.163608, - 0.133904, 0.107279, 0.203894]) - - -class TestChebWin(object): - - def test_cheb_odd(self): - cheb_odd = signal.chebwin(53, at=-40) - assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4) - - def test_cheb_even(self): - cheb_even = signal.chebwin(54, at=-40) - assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4) - - -class TestGetWindow(object): - - def test_boxcar(self): - w = signal.get_window('boxcar', 12) - assert_array_equal(w, ones_like(w)) - - def test_cheb_odd(self): - w = signal.get_window(('chebwin', -40), 53, fftbins=False) - assert_array_almost_equal(w, cheb_odd_true, decimal=4) - - def test_cheb_even(self): - w = signal.get_window(('chebwin', -40), 54, fftbins=False) - assert_array_almost_equal(w, cheb_even_true, decimal=4) diff --git a/scipy-0.10.1/scipy/signal/waveforms.py b/scipy-0.10.1/scipy/signal/waveforms.py deleted file mode 100644 index 30597a67b0..0000000000 --- a/scipy-0.10.1/scipy/signal/waveforms.py +++ /dev/null @@ -1,399 +0,0 @@ -# Author: Travis Oliphant -# 2003 -# -# Feb. 2010: Updated by Warren Weckesser: -# Rewrote much of chirp() -# Added sweep_poly() - -from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ - exp, cos, sin, polyval, polyint - -__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly'] - - -def sawtooth(t, width=1): - """ - Return a periodic sawtooth waveform. - - The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the - interval 0 to width*2*pi and drops from 1 to -1 on the interval - width*2*pi to 2*pi. `width` must be in the interval [0,1]. - - Parameters - ---------- - t : array_like - Time. - width : float, optional - Width of the waveform. Default is 1. - - Returns - ------- - y : ndarray - Output array containing the sawtooth waveform. - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(0, 20*np.pi, 500) - >>> plt.plot(x, sp.signal.sawtooth(x)) - - """ - t, w = asarray(t), asarray(width) - w = asarray(w + (t - t)) - t = asarray(t + (w - w)) - if t.dtype.char in ['fFdD']: - ytype = t.dtype.char - else: - ytype = 'd' - y = zeros(t.shape, ytype) - - # width must be between 0 and 1 inclusive - mask1 = (w > 1) | (w < 0) - place(y, mask1, nan) - - # take t modulo 2*pi - tmod = mod(t, 2 * pi) - - # on the interval 0 to width*2*pi function is - # tmod / (pi*w) - 1 - mask2 = (1 - mask1) & (tmod < w * 2 * pi) - tsub = extract(mask2, tmod) - wsub = extract(mask2, w) - place(y, mask2, tsub / (pi * wsub) - 1) - - # on the interval width*2*pi to 2*pi function is - # (pi*(w+1)-tmod) / (pi*(1-w)) - - mask3 = (1 - mask1) & (1 - mask2) - tsub = extract(mask3, tmod) - wsub = extract(mask3, w) - place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub))) - return y - - -def square(t, duty=0.5): - """ - Return a periodic square-wave waveform. - - The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty - and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1]. - - Parameters - ---------- - t : array_like - The input time array. - duty : float, optional - Duty cycle. - - Returns - ------- - y : array_like - The output square wave. - - """ - t, w = asarray(t), asarray(duty) - w = asarray(w + (t - t)) - t = asarray(t + (w - w)) - if t.dtype.char in ['fFdD']: - ytype = t.dtype.char - else: - ytype = 'd' - y = zeros(t.shape, ytype) - - # width must be between 0 and 1 inclusive - mask1 = (w > 1) | (w < 0) - place(y, mask1, nan) - - # take t modulo 2*pi - tmod = mod(t, 2 * pi) - - # on the interval 0 to duty*2*pi function is - # 1 - mask2 = (1 - mask1) & (tmod < w * 2 * pi) - tsub = extract(mask2, tmod) - wsub = extract(mask2, w) - place(y, mask2, 1) - - # on the interval duty*2*pi to 2*pi function is - # (pi*(w+1)-tmod) / (pi*(1-w)) - - mask3 = (1 - mask1) & (1 - mask2) - tsub = extract(mask3, tmod) - wsub = extract(mask3, w) - place(y, mask3, -1) - return y - - -def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, - retenv=False): - """ - Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t). - - If `retquad` is True, then return the real and imaginary parts - (in-phase and quadrature). - If `retenv` is True, then return the envelope (unmodulated signal). - Otherwise, return the real part of the modulated sinusoid. - - Parameters - ---------- - t : ndarray, or the string 'cutoff' - Input array. - fc : int, optional - Center frequency (Hz). Default is 1000. - bw : float, optional - Fractional bandwidth in frequency domain of pulse (Hz). - Default is 0.5. - bwr: float, optional - Reference level at which fractional bandwidth is calculated (dB). - Default is -6. - tpr : float, optional - If `t` is 'cutoff', then the function returns the cutoff - time for when the pulse amplitude falls below `tpr` (in dB). - Default is -60. - retquad : bool, optional - If True, return the quadrature (imaginary) as well as the real part - of the signal. Default is False. - retenv : bool, optional - If True, return the envelope of the signal. Default is False. - - """ - if fc < 0: - raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc) - if bw <= 0: - raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw) - if bwr >= 0: - raise ValueError("Reference level for bandwidth (bwr=%.2f) must " - "be < 0 dB" % bwr) - - # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) - - ref = pow(10.0, bwr / 20.0) - # fdel = fc*bw/2: g(fdel) = ref --- solve this for a - # - # pi^2/a * fc^2 * bw^2 /4=-log(ref) - a = -(pi * fc * bw) ** 2 / (4.0 * log(ref)) - - if t == 'cutoff': # compute cut_off point - # Solve exp(-a tc**2) = tref for tc - # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) - if tpr >= 0: - raise ValueError("Reference level for time cutoff must be < 0 dB") - tref = pow(10.0, tpr / 20.0) - return sqrt(-log(tref) / a) - - yenv = exp(-a * t * t) - yI = yenv * cos(2 * pi * fc * t) - yQ = yenv * sin(2 * pi * fc * t) - if not retquad and not retenv: - return yI - if not retquad and retenv: - return yI, yenv - if retquad and not retenv: - return yI, yQ - if retquad and retenv: - return yI, yQ, yenv - - -def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True): - """Frequency-swept cosine generator. - - In the following, 'Hz' should be interpreted as 'cycles per time unit'; - there is no assumption here that the time unit is one second. The - important distinction is that the units of rotation are cycles, not - radians. - - Parameters - ---------- - t : ndarray - Times at which to evaluate the waveform. - f0 : float - Frequency (in Hz) at time t=0. - t1 : float - Time at which `f1` is specified. - f1 : float - Frequency (in Hz) of the waveform at time `t1`. - method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional - Kind of frequency sweep. If not given, `linear` is assumed. See - Notes below for more details. - phi : float, optional - Phase offset, in degrees. Default is 0. - vertex_zero : bool, optional - This parameter is only used when `method` is 'quadratic'. - It determines whether the vertex of the parabola that is the graph - of the frequency is at t=0 or t=t1. - - Returns - ------- - A numpy array containing the signal evaluated at 't' with the requested - time-varying frequency. More precisely, the function returns: - - ``cos(phase + (pi/180)*phi)`` - - where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``. - ``f(t)`` is defined below. - - See Also - -------- - scipy.signal.waveforms.sweep_poly - - Notes - ----- - There are four options for the `method`. The following formulas give - the instantaneous frequency (in Hz) of the signal generated by - `chirp()`. For convenience, the shorter names shown below may also be - used. - - linear, lin, li: - - ``f(t) = f0 + (f1 - f0) * t / t1`` - - quadratic, quad, q: - - The graph of the frequency f(t) is a parabola through (0, f0) and - (t1, f1). By default, the vertex of the parabola is at (0, f0). - If `vertex_zero` is False, then the vertex is at (t1, f1). The - formula is: - - if vertex_zero is True: - - ``f(t) = f0 + (f1 - f0) * t**2 / t1**2`` - - else: - - ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2`` - - To use a more general quadratic function, or an arbitrary - polynomial, use the function `scipy.signal.waveforms.sweep_poly`. - - logarithmic, log, lo: - - ``f(t) = f0 * (f1/f0)**(t/t1)`` - - f0 and f1 must be nonzero and have the same sign. - - This signal is also known as a geometric or exponential chirp. - - hyperbolic, hyp: - - ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)`` - - f1 must be positive, and f0 must be greater than f1. - - """ - - # 'phase' is computed in _chirp_phase, to make testing easier. - phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) - # Convert phi to radians. - phi *= pi / 180 - return cos(phase + phi) - - -def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True): - """ - Calculate the phase used by chirp_phase to generate its output. - - See `chirp_phase` for a description of the arguments. - - """ - f0 = float(f0) - t1 = float(t1) - f1 = float(f1) - if method in ['linear', 'lin', 'li']: - beta = (f1 - f0) / t1 - phase = 2 * pi * (f0 * t + 0.5 * beta * t * t) - - elif method in ['quadratic', 'quad', 'q']: - beta = (f1 - f0) / (t1 ** 2) - if vertex_zero: - phase = 2 * pi * (f0 * t + beta * t ** 3 / 3) - else: - phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3) - - elif method in ['logarithmic', 'log', 'lo']: - if f0 * f1 <= 0.0: - raise ValueError("For a geometric chirp, f0 and f1 must be " - "nonzero and have the same sign.") - if f0 == f1: - phase = 2 * pi * f0 * t - else: - beta = t1 / log(f1 / f0) - phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0) - - elif method in ['hyperbolic', 'hyp']: - if f1 <= 0.0 or f0 <= f1: - raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.") - c = f1 * t1 - df = f0 - f1 - phase = 2 * pi * (f0 * c / df) * log((df * t + c) / c) - - else: - raise ValueError("method must be 'linear', 'quadratic', 'logarithmic'," - " or 'hyperbolic', but a value of %r was given." % method) - - return phase - - -def sweep_poly(t, poly, phi=0): - """Frequency-swept cosine generator, with a time-dependent frequency - specified as a polynomial. - - This function generates a sinusoidal function whose instantaneous - frequency varies with time. The frequency at time `t` is given by - the polynomial `poly`. - - Parameters - ---------- - t : ndarray - Times at which to evaluate the waveform. - poly : 1D ndarray (or array-like), or instance of numpy.poly1d - The desired frequency expressed as a polynomial. If `poly` is - a list or ndarray of length n, then the elements of `poly` are - the coefficients of the polynomial, and the instantaneous - frequency is - - ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` - - If `poly` is an instance of numpy.poly1d, then the - instantaneous frequency is - - ``f(t) = poly(t)`` - - phi : float, optional - Phase offset, in degrees. Default is 0. - - Returns - ------- - A numpy array containing the signal evaluated at 't' with the requested - time-varying frequency. More precisely, the function returns - - ``cos(phase + (pi/180)*phi)`` - - where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``; - ``f(t)`` is defined above. - - See Also - -------- - scipy.signal.waveforms.chirp - - Notes - ----- - .. versionadded:: 0.8.0 - """ - # 'phase' is computed in _sweep_poly_phase, to make testing easier. - phase = _sweep_poly_phase(t, poly) - # Convert to radians. - phi *= pi / 180 - return cos(phase + phi) - - -def _sweep_poly_phase(t, poly): - """ - Calculate the phase used by sweep_poly to generate its output. - - See `sweep_poly` for a description of the arguments. - - """ - # polyint handles lists, ndarrays and instances of poly1d automatically. - intpoly = polyint(poly) - phase = 2 * pi * polyval(intpoly, t) - return phase diff --git a/scipy-0.10.1/scipy/signal/wavelets.py b/scipy-0.10.1/scipy/signal/wavelets.py deleted file mode 100644 index 3db2412154..0000000000 --- a/scipy-0.10.1/scipy/signal/wavelets.py +++ /dev/null @@ -1,248 +0,0 @@ -import numpy as np -from numpy.dual import eig -from scipy.misc import comb -from scipy import linspace, pi, exp - -__all__ = ['daub', 'qmf', 'cascade', 'morlet'] - - -def daub(p): - """ - The coefficients for the FIR low-pass filter producing Daubechies wavelets. - - p>=1 gives the order of the zero at f=1/2. - There are 2p filter coefficients. - - Parameters - ---------- - p : int - Order of the zero at f=1/2, can have values from 1 to 34. - - """ - sqrt = np.sqrt - if p < 1: - raise ValueError("p must be at least 1.") - if p == 1: - c = 1 / sqrt(2) - return np.array([c, c]) - elif p == 2: - f = sqrt(2) / 8 - c = sqrt(3) - return f * np.array([1 + c, 3 + c, 3 - c, 1 - c]) - elif p == 3: - tmp = 12 * sqrt(10) - z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6 - z1c = np.conj(z1) - f = sqrt(2) / 8 - d0 = np.real((1 - z1) * (1 - z1c)) - a0 = np.real(z1 * z1c) - a1 = 2 * np.real(z1) - return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1, - a0 - 3 * a1 + 3, 3 - a1, 1]) - elif p < 35: - # construct polynomial and factor it - if p < 35: - P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1] - yj = np.roots(P) - else: # try different polynomial --- needs work - P = [comb(p - 1 + k, k, exact=1) / 4.0 ** k - for k in range(p)][::-1] - yj = np.roots(P) / 4 - # for each root, compute two z roots, select the one with |z|>1 - # Build up final polynomial - c = np.poly1d([1, 1]) ** p - q = np.poly1d([1]) - for k in range(p - 1): - yval = yj[k] - part = 2 * sqrt(yval * (yval - 1)) - const = 1 - 2 * yval - z1 = const + part - if (abs(z1)) < 1: - z1 = const - part - q = q * [1, -z1] - - q = c * np.real(q) - # Normalize result - q = q / np.sum(q) * sqrt(2) - return q.c[::-1] - else: - raise ValueError("Polynomial factorization does not work " - "well for p too large.") - - -def qmf(hk): - """Return high-pass qmf filter from low-pass - """ - N = len(hk) - 1 - asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)] - return hk[::-1] * np.array(asgn) - - -def wavedec(amn, hk): - gk = qmf(hk) - return NotImplemented - - -def cascade(hk, J=7): - """ - Return (x, phi, psi) at dyadic points K/2**J from filter coefficients. - - Parameters - ---------- - hk : - Coefficients of low-pass filter. - J : int. optional - Values will be computed at grid points ``K/2**J``. - - Returns - ------- - x : - The dyadic points K/2**J for ``K=0...N * (2**J)-1`` where - ``len(hk) = len(gk) = N+1`` - phi : - The scaling function ``phi(x)`` at `x`: - - N - phi(x) = sum hk * phi(2x-k) - k=0 - - psi : - The wavelet function ``psi(x)`` at `x`: - - N - phi(x) = sum gk * phi(2x-k) - k=0 - - `psi` is only returned if `gk` is not None. - - Notes - ----- - The algorithm uses the vector cascade algorithm described by Strang and - Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values - and slices for quick reuse. Then inserts vectors into final vector at the - end. - - """ - - N = len(hk) - 1 - - if (J > 30 - np.log2(N + 1)): - raise ValueError("Too many levels.") - if (J < 1): - raise ValueError("Too few levels.") - - # construct matrices needed - nn, kk = np.ogrid[:N, :N] - s2 = np.sqrt(2) - # append a zero so that take works - thk = np.r_[hk, 0] - gk = qmf(hk) - tgk = np.r_[gk, 0] - - indx1 = np.clip(2 * nn - kk, -1, N + 1) - indx2 = np.clip(2 * nn - kk + 1, -1, N + 1) - m = np.zeros((2, 2, N, N), 'd') - m[0, 0] = np.take(thk, indx1, 0) - m[0, 1] = np.take(thk, indx2, 0) - m[1, 0] = np.take(tgk, indx1, 0) - m[1, 1] = np.take(tgk, indx2, 0) - m *= s2 - - # construct the grid of points - x = np.arange(0, N * (1 << J), dtype=np.float) / (1 << J) - phi = 0 * x - - psi = 0 * x - - # find phi0, and phi1 - lam, v = eig(m[0, 0]) - ind = np.argmin(np.absolute(lam - 1)) - # a dictionary with a binary representation of the - # evaluation points x < 1 -- i.e. position is 0.xxxx - v = np.real(v[:, ind]) - # need scaling function to integrate to 1 so find - # eigenvector normalized to sum(v,axis=0)=1 - sm = np.sum(v) - if sm < 0: # need scaling function to integrate to 1 - v = -v - sm = -sm - bitdic = {} - bitdic['0'] = v / sm - bitdic['1'] = np.dot(m[0, 1], bitdic['0']) - step = 1 << J - phi[::step] = bitdic['0'] - phi[(1 << (J - 1))::step] = bitdic['1'] - psi[::step] = np.dot(m[1, 0], bitdic['0']) - psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0']) - # descend down the levels inserting more and more values - # into bitdic -- store the values in the correct location once we - # have computed them -- stored in the dictionary - # for quicker use later. - prevkeys = ['1'] - for level in range(2, J + 1): - newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys] - fac = 1 << (J - level) - for key in newkeys: - # convert key to number - num = 0 - for pos in range(level): - if key[pos] == '1': - num += (1 << (level - 1 - pos)) - pastphi = bitdic[key[1:]] - ii = int(key[0]) - temp = np.dot(m[0, ii], pastphi) - bitdic[key] = temp - phi[num * fac::step] = temp - psi[num * fac::step] = np.dot(m[1, ii], pastphi) - prevkeys = newkeys - - return x, phi, psi - - -def morlet(M, w=5.0, s=1.0, complete=True): - """ - Complex Morlet wavelet. - - Parameters - ---------- - M : int - Length of the wavelet. - w : float - Omega0 - s : float - Scaling factor, windowed from -s*2*pi to +s*2*pi. - complete : bool - Whether to use the complete or the standard version. - - Notes - ----- - The standard version: - pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) - - This commonly used wavelet is often referred to simply as the - Morlet wavelet. Note that, this simplified version can cause - admissibility problems at low values of w. - - The complete version: - pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) - - The complete version of the Morlet wavelet, with a correction - term to improve admissibility. For w greater than 5, the - correction term is negligible. - - Note that the energy of the return wavelet is not normalised - according to s. - - The fundamental frequency of this wavelet in Hz is given - by f = 2*s*w*r / M where r is the sampling rate. - - """ - x = linspace(-s * 2 * pi, s * 2 * pi, M) - output = exp(1j * w * x) - - if complete: - output -= exp(-0.5 * (w ** 2)) - - output *= exp(-0.5 * (x ** 2)) * pi ** (-0.25) - - return output diff --git a/scipy-0.10.1/scipy/signal/windows.py b/scipy-0.10.1/scipy/signal/windows.py deleted file mode 100644 index 4fb3e4c4e7..0000000000 --- a/scipy-0.10.1/scipy/signal/windows.py +++ /dev/null @@ -1,505 +0,0 @@ -"""The suite of window functions.""" - -import numpy as np -from scipy import special, linalg -from scipy.fftpack import fft - -__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', - 'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann', - 'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin', - 'slepian', 'hann', 'get_window'] - - -def boxcar(M, sym=True): - """The M-point boxcar window. - - """ - return np.ones(M, float) - - -def triang(M, sym=True): - """The M-point triangular window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(1, int((M + 1) / 2) + 1) - if M % 2 == 0: - w = (2 * n - 1.0) / M - w = np.r_[w, w[::-1]] - else: - w = 2 * n / (M + 1.0) - w = np.r_[w, w[-2::-1]] - - if not sym and not odd: - w = w[:-1] - return w - - -def parzen(M, sym=True): - """The M-point Parzen window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0) - na = np.extract(n < -(M - 1) / 4.0, n) - nb = np.extract(abs(n) <= (M - 1) / 4.0, n) - wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0 - wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 + - 6 * (np.abs(nb) / (M / 2.0)) ** 3.0) - w = np.r_[wa, wb, wa[::-1]] - if not sym and not odd: - w = w[:-1] - return w - - -def bohman(M, sym=True): - """The M-point Bohman window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - fac = np.abs(np.linspace(-1, 1, M)[1:-1]) - w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac) - w = np.r_[0, w, 0] - if not sym and not odd: - w = w[:-1] - return w - - -def blackman(M, sym=True): - """The M-point Blackman window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(0, M) - w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) + - 0.08 * np.cos(4.0 * np.pi * n / (M - 1))) - if not sym and not odd: - w = w[:-1] - return w - - -def nuttall(M, sym=True): - """A minimum 4-term Blackman-Harris window according to Nuttall. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - a = [0.3635819, 0.4891775, 0.1365995, 0.0106411] - n = np.arange(0, M) - fac = n * 2 * np.pi / (M - 1.0) - w = (a[0] - a[1] * np.cos(fac) + - a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac)) - if not sym and not odd: - w = w[:-1] - return w - - -def blackmanharris(M, sym=True): - """The M-point minimum 4-term Blackman-Harris window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - a = [0.35875, 0.48829, 0.14128, 0.01168] - n = np.arange(0, M) - fac = n * 2 * np.pi / (M - 1.0) - w = (a[0] - a[1] * np.cos(fac) + - a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac)) - if not sym and not odd: - w = w[:-1] - return w - - -def flattop(M, sym=True): - """The M-point Flat top window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069] - n = np.arange(0, M) - fac = n * 2 * np.pi / (M - 1.0) - w = (a[0] - a[1] * np.cos(fac) + - a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) + - a[4] * np.cos(4 * fac)) - if not sym and not odd: - w = w[:-1] - return w - - -def bartlett(M, sym=True): - """The M-point Bartlett window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(0, M) - w = np.where(np.less_equal(n, (M - 1) / 2.0), - 2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1)) - if not sym and not odd: - w = w[:-1] - return w - - -def hanning(M, sym=True): - """The M-point Hanning window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(0, M) - w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) - if not sym and not odd: - w = w[:-1] - return w - -hann = hanning - - -def barthann(M, sym=True): - """Return the M-point modified Bartlett-Hann window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(0, M) - fac = np.abs(n / (M - 1.0) - 0.5) - w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac) - if not sym and not odd: - w = w[:-1] - return w - - -def hamming(M, sym=True): - """The M-point Hamming window. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(0, M) - w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1)) - if not sym and not odd: - w = w[:-1] - return w - - -def kaiser(M, beta, sym=True): - """Return a Kaiser window of length M with shape parameter beta. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(0, M) - alpha = (M - 1) / 2.0 - w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) / - special.i0(beta)) - if not sym and not odd: - w = w[:-1] - return w - - -def gaussian(M, std, sym=True): - """Return a Gaussian window of length M with standard-deviation std. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(0, M) - (M - 1.0) / 2.0 - sig2 = 2 * std * std - w = np.exp(-n ** 2 / sig2) - if not sym and not odd: - w = w[:-1] - return w - - -def general_gaussian(M, p, sig, sym=True): - """Return a window with a generalized Gaussian shape. - - The Gaussian shape is defined as ``exp(-0.5*(x/sig)**(2*p))``, the - half-power point is at ``(2*log(2)))**(1/(2*p)) * sig``. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - n = np.arange(0, M) - (M - 1.0) / 2.0 - w = np.exp(-0.5 * (n / sig) ** (2 * p)) - if not sym and not odd: - w = w[:-1] - return w - - -# `chebwin` contributed by Kumar Appaiah. - -def chebwin(M, at, sym=True): - """Dolph-Chebyshev window. - - Parameters - ---------- - M : int - Window size. - at : float - Attenuation (in dB). - sym : bool - Generates symmetric window if True. - - """ - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - - odd = M % 2 - if not sym and not odd: - M = M + 1 - - # compute the parameter beta - order = M - 1.0 - beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.))) - k = np.r_[0:M] * 1.0 - x = beta * np.cos(np.pi * k / M) - # Find the window's DFT coefficients - # Use analytic definition of Chebyshev polynomial instead of expansion - # from scipy.special. Using the expansion in scipy.special leads to errors. - p = np.zeros(x.shape) - p[x > 1] = np.cosh(order * np.arccosh(x[x > 1])) - p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1])) - p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1])) - - # Appropriate IDFT and filling up - # depending on even/odd M - if M % 2: - w = np.real(fft(p)) - n = (M + 1) / 2 - w = w[:n] / w[0] - w = np.concatenate((w[n - 1:0:-1], w)) - else: - p = p * np.exp(1.j * np.pi / M * np.r_[0:M]) - w = np.real(fft(p)) - n = M / 2 + 1 - w = w / w[1] - w = np.concatenate((w[n - 1:0:-1], w[1:n])) - if not sym and not odd: - w = w[:-1] - return w - - -def slepian(M, width, sym=True): - """Return the M-point slepian window. - - """ - if (M * width > 27.38): - raise ValueError("Cannot reliably obtain slepian sequences for" - " M*width > 27.38.") - if M < 1: - return np.array([]) - if M == 1: - return np.ones(1, 'd') - odd = M % 2 - if not sym and not odd: - M = M + 1 - - twoF = width / 2.0 - alpha = (M - 1) / 2.0 - m = np.arange(0, M) - alpha - n = m[:, np.newaxis] - k = m[np.newaxis, :] - AF = twoF * special.sinc(twoF * (n - k)) - [lam, vec] = linalg.eig(AF) - ind = np.argmax(abs(lam), axis=-1) - w = np.abs(vec[:, ind]) - w = w / max(w) - - if not sym and not odd: - w = w[:-1] - return w - - -def get_window(window, Nx, fftbins=True): - """ - Return a window of length `Nx` and type `window`. - - Parameters - ---------- - window : string, float, or tuple - The type of window to create. See below for more details. - Nx : int - The number of samples in the window. - fftbins : bool, optional - If True, create a "periodic" window ready to use with ifftshift - and be multiplied by the result of an fft (SEE ALSO fftfreq). - - Notes - ----- - Window types: - - boxcar, triang, blackman, hamming, hanning, bartlett, - parzen, bohman, blackmanharris, nuttall, barthann, - kaiser (needs beta), gaussian (needs std), - general_gaussian (needs power, width), - slepian (needs width), chebwin (needs attenuation) - - - If the window requires no parameters, then `window` can be a string. - - If the window requires parameters, then `window` must be a tuple - with the first argument the string name of the window, and the next - arguments the needed parameters. - - If `window` is a floating point number, it is interpreted as the beta - parameter of the kaiser window. - - Each of the window types listed above is also the name of - a function that can be called directly to create a window of - that type. - - Examples - -------- - >>> get_window('triang', 7) - array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25]) - >>> get_window(('kaiser', 4.0), 9) - array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. , - 0.89640418, 0.63343178, 0.32578323, 0.08848053]) - >>> get_window(4.0, 9) - array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. , - 0.89640418, 0.63343178, 0.32578323, 0.08848053]) - - """ - - sym = not fftbins - try: - beta = float(window) - except (TypeError, ValueError): - args = () - if isinstance(window, tuple): - winstr = window[0] - if len(window) > 1: - args = window[1:] - elif isinstance(window, str): - if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss', - 'general gaussian', 'general_gaussian', - 'general gauss', 'general_gauss', 'ggs', - 'slepian', 'optimal', 'slep', 'dss', - 'chebwin', 'cheb']: - raise ValueError("The '" + window + "' window needs one or " - "more parameters -- pass a tuple.") - else: - winstr = window - - if winstr in ['blackman', 'black', 'blk']: - winfunc = blackman - elif winstr in ['triangle', 'triang', 'tri']: - winfunc = triang - elif winstr in ['hamming', 'hamm', 'ham']: - winfunc = hamming - elif winstr in ['bartlett', 'bart', 'brt']: - winfunc = bartlett - elif winstr in ['hanning', 'hann', 'han']: - winfunc = hanning - elif winstr in ['blackmanharris', 'blackharr', 'bkh']: - winfunc = blackmanharris - elif winstr in ['parzen', 'parz', 'par']: - winfunc = parzen - elif winstr in ['bohman', 'bman', 'bmn']: - winfunc = bohman - elif winstr in ['nuttall', 'nutl', 'nut']: - winfunc = nuttall - elif winstr in ['barthann', 'brthan', 'bth']: - winfunc = barthann - elif winstr in ['flattop', 'flat', 'flt']: - winfunc = flattop - elif winstr in ['kaiser', 'ksr']: - winfunc = kaiser - elif winstr in ['gaussian', 'gauss', 'gss']: - winfunc = gaussian - elif winstr in ['general gaussian', 'general_gaussian', - 'general gauss', 'general_gauss', 'ggs']: - winfunc = general_gaussian - elif winstr in ['boxcar', 'box', 'ones']: - winfunc = boxcar - elif winstr in ['slepian', 'slep', 'optimal', 'dss']: - winfunc = slepian - elif winstr in ['chebwin', 'cheb']: - winfunc = chebwin - else: - raise ValueError("Unknown window type.") - - params = (Nx,) + args + (sym,) - else: - winfunc = kaiser - params = (Nx, beta, sym) - - return winfunc(*params) diff --git a/scipy-0.10.1/scipy/sparse/__init__.py b/scipy-0.10.1/scipy/sparse/__init__.py deleted file mode 100644 index e2ecb154bb..0000000000 --- a/scipy-0.10.1/scipy/sparse/__init__.py +++ /dev/null @@ -1,192 +0,0 @@ -""" -===================================== -Sparse matrices (:mod:`scipy.sparse`) -===================================== - -.. currentmodule:: scipy.sparse - -SciPy 2-D sparse matrix package. - -Contents -======== - -Sparse matrix classes ---------------------- - -.. autosummary:: - :toctree: generated/ - - bsr_matrix - Block Sparse Row matrix - coo_matrix - A sparse matrix in COOrdinate format - csc_matrix - Compressed Sparse Column matrix - csr_matrix - Compressed Sparse Row matrix - dia_matrix - Sparse matrix with DIAgonal storage - dok_matrix - Dictionary Of Keys based sparse matrix - lil_matrix - Row-based linked list sparse matrix - -Functions ---------- - -Building sparse matrices: - -.. autosummary:: - :toctree: generated/ - - eye - Sparse MxN matrix whose k-th diagonal is all ones - identity - Identity matrix in sparse format - kron - kronecker product of two sparse matrices - kronsum - kronecker sum of sparse matrices - spdiags - Return a sparse matrix from diagonals - tril - Lower triangular portion of a matrix in sparse format - triu - Upper triangular portion of a matrix in sparse format - bmat - Build a sparse matrix from sparse sub-blocks - hstack - Stack sparse matrices horizontally (column wise) - vstack - Stack sparse matrices vertically (row wise) - rand - Random values in a given shape - -Identifying sparse matrices: - -.. autosummary:: - :toctree: generated/ - - issparse - isspmatrix - isspmatrix_csc - isspmatrix_csr - isspmatrix_bsr - isspmatrix_lil - isspmatrix_dok - isspmatrix_coo - isspmatrix_dia - -Graph algorithms: - -.. autosummary:: - :toctree: generated/ - - cs_graph_components -- Determine connected components of a graph - -Exceptions ----------- - -.. autosummary:: - :toctree: generated/ - - SparseEfficiencyWarning - SparseWarning - - -Usage information -================= - -There are seven available sparse matrix types: - - 1. csc_matrix: Compressed Sparse Column format - 2. csr_matrix: Compressed Sparse Row format - 3. bsr_matrix: Block Sparse Row format - 4. lil_matrix: List of Lists format - 5. dok_matrix: Dictionary of Keys format - 6. coo_matrix: COOrdinate format (aka IJV, triplet format) - 7. dia_matrix: DIAgonal format - -To construct a matrix efficiently, use either lil_matrix (recommended) or -dok_matrix. The lil_matrix class supports basic slicing and fancy -indexing with a similar syntax to NumPy arrays. As illustrated below, -the COO format may also be used to efficiently construct matrices. - -To perform manipulations such as multiplication or inversion, first -convert the matrix to either CSC or CSR format. The lil_matrix format is -row-based, so conversion to CSR is efficient, whereas conversion to CSC -is less so. - -All conversions among the CSR, CSC, and COO formats are efficient, -linear-time operations. - -Example 1 ---------- -Construct a 1000x1000 lil_matrix and add some values to it: - ->>> from scipy.sparse import lil_matrix ->>> from scipy.sparse.linalg import spsolve ->>> from numpy.linalg import solve, norm ->>> from numpy.random import rand - ->>> A = lil_matrix((1000, 1000)) ->>> A[0, :100] = rand(100) ->>> A[1, 100:200] = A[0, :100] ->>> A.setdiag(rand(1000)) - -Now convert it to CSR format and solve A x = b for x: - ->>> A = A.tocsr() ->>> b = rand(1000) ->>> x = spsolve(A, b) - -Convert it to a dense matrix and solve, and check that the result -is the same: - ->>> x_ = solve(A.todense(), b) - -Now we can compute norm of the error with: - ->>> err = norm(x-x_) ->>> err < 1e-10 -True - -It should be small :) - - -Example 2 ---------- - -Construct a matrix in COO format: - ->>> from scipy import sparse ->>> from numpy import array ->>> I = array([0,3,1,0]) ->>> J = array([0,3,1,2]) ->>> V = array([4,5,7,9]) ->>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4)) - -Notice that the indices do not need to be sorted. - -Duplicate (i,j) entries are summed when converting to CSR or CSC. - ->>> I = array([0,0,1,3,1,0,0]) ->>> J = array([0,2,1,3,1,0,0]) ->>> V = array([1,1,1,1,1,1,1]) ->>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr() - -This is useful for constructing finite-element stiffness and mass matrices. - -Further Details ---------------- - -CSR column indices are not necessarily sorted. Likewise for CSC row -indices. Use the .sorted_indices() and .sort_indices() methods when -sorted indices are required (e.g. when passing data to other libraries). - -""" - -# Original code by Travis Oliphant. -# Modified and extended by Ed Schofield, Robert Cimrman, and Nathan Bell. - -from base import * -from csr import * -from csc import * -from lil import * -from dok import * -from coo import * -from dia import * -from bsr import * -from csgraph import * - -from construct import * -from extract import * - -#from spfuncs import * - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/scipy-0.10.1/scipy/sparse/base.py b/scipy-0.10.1/scipy/sparse/base.py deleted file mode 100644 index 5b2076d2f4..0000000000 --- a/scipy-0.10.1/scipy/sparse/base.py +++ /dev/null @@ -1,556 +0,0 @@ -"""Base class for sparse matrices""" - -__all__ = ['spmatrix', 'isspmatrix', 'issparse', - 'SparseWarning','SparseEfficiencyWarning'] - -from warnings import warn - -import numpy as np - -from sputils import isdense, isscalarlike, isintlike - - -class SparseWarning(Warning): pass -class SparseFormatWarning(SparseWarning): pass -class SparseEfficiencyWarning(SparseWarning): pass - - -# The formats that we might potentially understand. -_formats = {'csc':[0, "Compressed Sparse Column"], - 'csr':[1, "Compressed Sparse Row"], - 'dok':[2, "Dictionary Of Keys"], - 'lil':[3, "LInked List"], - 'dod':[4, "Dictionary of Dictionaries"], - 'sss':[5, "Symmetric Sparse Skyline"], - 'coo':[6, "COOrdinate"], - 'lba':[7, "Linpack BAnded"], - 'egd':[8, "Ellpack-itpack Generalized Diagonal"], - 'dia':[9, "DIAgonal"], - 'bsr':[10, "Block Sparse Row"], - 'msr':[11, "Modified compressed Sparse Row"], - 'bsc':[12, "Block Sparse Column"], - 'msc':[13, "Modified compressed Sparse Column"], - 'ssk':[14, "Symmetric SKyline"], - 'nsk':[15, "Nonsymmetric SKyline"], - 'jad':[16, "JAgged Diagonal"], - 'uss':[17, "Unsymmetric Sparse Skyline"], - 'vbr':[18, "Variable Block Row"], - 'und':[19, "Undefined"] - } - - -MAXPRINT = 50 - -class spmatrix(object): - """ This class provides a base class for all sparse matrices. It - cannot be instantiated. Most of the work is provided by subclasses. - """ - - __array_priority__ = 10.1 - ndim = 2 - def __init__(self, maxprint=MAXPRINT): - self.format = self.__class__.__name__[:3] - self._shape = None - if self.format == 'spm': - raise ValueError("This class is not intended" - " to be instantiated directly.") - self.maxprint = maxprint - - def set_shape(self,shape): - shape = tuple(shape) - - if len(shape) != 2: - raise ValueError("Only two-dimensional sparse arrays " - "are supported.") - try: - shape = int(shape[0]),int(shape[1]) #floats, other weirdness - except: - raise TypeError('invalid shape') - - if not (shape[0] >= 1 and shape[1] >= 1): - raise ValueError('invalid shape') - - if (self._shape != shape) and (self._shape is not None): - try: - self = self.reshape(shape) - except NotImplementedError: - raise NotImplementedError("Reshaping not implemented for %s." % - self.__class__.__name__) - self._shape = shape - - def get_shape(self): - return self._shape - - shape = property(fget=get_shape, fset=set_shape) - - def reshape(self,shape): - raise NotImplementedError - - def astype(self, t): - return self.tocsr().astype(t).asformat(self.format) - - def asfptype(self): - """Upcast matrix to a floating point format (if necessary)""" - - fp_types = ['f','d','F','D'] - - if self.dtype.char in fp_types: - return self - else: - for fp_type in fp_types: - if self.dtype <= np.dtype(fp_type): - return self.astype(fp_type) - - raise TypeError('cannot upcast [%s] to a floating ' - 'point format' % self.dtype.name) - - def __iter__(self): - for r in xrange(self.shape[0]): - yield self[r,:] - - def getmaxprint(self): - try: - maxprint = self.maxprint - except AttributeError: - maxprint = MAXPRINT - return maxprint - - #def typecode(self): - # try: - # typ = self.dtype.char - # except AttributeError: - # typ = None - # return typ - - def getnnz(self): - try: - return self.nnz - except AttributeError: - raise AttributeError("nnz not defined") - - def getformat(self): - try: - format = self.format - except AttributeError: - format = 'und' - return format - - def __repr__(self): - nnz = self.getnnz() - format = self.getformat() - return "<%dx%d sparse matrix of type '%s'\n" \ - "\twith %d stored elements in %s format>" % \ - (self.shape + (self.dtype.type, nnz, _formats[format][1])) - - def __str__(self): - maxprint = self.getmaxprint() - - A = self.tocoo() - nnz = self.getnnz() - - # helper function, outputs "(i,j) v" - def tostr(row,col,data): - triples = zip(zip(row,col),data) - return '\n'.join( [ (' %s\t%s' % t) for t in triples] ) - - if nnz > maxprint: - half = maxprint // 2 - out = tostr(A.row[:half], A.col[:half], A.data[:half]) - out += "\n :\t:\n" - half = maxprint - maxprint//2 - out += tostr(A.row[-half:], A.col[-half:], A.data[-half:]) - else: - out = tostr(A.row, A.col, A.data) - - return out - - def __nonzero__(self): # Simple -- other ideas? - return self.getnnz() > 0 - - # What should len(sparse) return? For consistency with dense matrices, - # perhaps it should be the number of rows? But for some uses the number of - # non-zeros is more important. For now, raise an exception! - def __len__(self): - # return self.getnnz() - raise TypeError("sparse matrix length is ambiguous; use getnnz()" - " or shape[0]") - - def asformat(self, format): - """Return this matrix in a given sparse format - - Parameters - ---------- - format : {string, None} - desired sparse matrix format - - None for no format conversion - - "csr" for csr_matrix format - - "csc" for csc_matrix format - - "lil" for lil_matrix format - - "dok" for dok_matrix format and so on - - """ - - if format is None or format == self.format: - return self - else: - return getattr(self,'to' + format)() - - ################################################################### - # NOTE: All arithmetic operations use csr_matrix by default. - # Therefore a new sparse matrix format just needs to define a - # .tocsr() method to provide arithmetic support. Any of these - # methods can be overridden for efficiency. - #################################################################### - - def multiply(self, other): - """Point-wise multiplication by another matrix - """ - return self.tocsr().multiply(other) - - def dot(self, other): - return self * other - - def __abs__(self): - return abs(self.tocsr()) - - def __add__(self, other): # self + other - return self.tocsr().__add__(other) - - def __radd__(self, other): # other + self - return self.tocsr().__radd__(other) - - def __sub__(self, other): # self - other - #note: this can't be replaced by self + (-other) for unsigned types - return self.tocsr().__sub__(other) - - def __rsub__(self, other): # other - self - return self.tocsr().__rsub__(other) - - def __mul__(self, other): - """interpret other and call one of the following - - self._mul_scalar() - self._mul_vector() - self._mul_multivector() - self._mul_sparse_matrix() - """ - - M,N = self.shape - - if isscalarlike(other): - # scalar value - return self._mul_scalar(other) - - if issparse(other): - if self.shape[1] != other.shape[0]: - raise ValueError('dimension mismatch') - return self._mul_sparse_matrix(other) - - try: - other.shape - except AttributeError: - # If it's a list or whatever, treat it like a matrix - other = np.asanyarray(other) - - other = np.asanyarray(other) - - if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1: - # dense row or column vector - if other.shape != (N,) and other.shape != (N,1): - raise ValueError('dimension mismatch') - - result = self._mul_vector(np.ravel(other)) - - if isinstance(other, np.matrix): - result = np.asmatrix(result) - - if other.ndim == 2 and other.shape[1] == 1: - # If 'other' was an (nx1) column vector, reshape the result - result = result.reshape(-1,1) - - return result - - elif other.ndim == 2: - ## - # dense 2D array or matrix ("multivector") - - if other.shape[0] != self.shape[1]: - raise ValueError('dimension mismatch') - - result = self._mul_multivector(np.asarray(other)) - - if isinstance(other, np.matrix): - result = np.asmatrix(result) - - return result - else: - raise ValueError('could not interpret dimensions') - - # by default, use CSR for __mul__ handlers - def _mul_scalar(self, other): - return self.tocsr()._mul_scalar(other) - - def _mul_vector(self, other): - return self.tocsr()._mul_vector(other) - - def _mul_multivector(self, other): - return self.tocsr()._mul_multivector(other) - - def _mul_sparse_matrix(self, other): - return self.tocsr()._mul_sparse_matrix(other) - - def __rmul__(self, other): # other * self - if isscalarlike(other): - return self.__mul__(other) - else: - # Don't use asarray unless we have to - try: - tr = other.transpose() - except AttributeError: - tr = np.asarray(other).transpose() - return (self.transpose() * tr).transpose() - - #################### - # Other Arithmetic # - #################### - - def __truediv__(self, other): - if isscalarlike(other): - return self * (1./other) - else: - return self.tocsr().__truediv__(other) - - def __div__(self, other): - # Always do true division - return self.__truediv__(other) - - def __neg__(self): - return -self.tocsr() - - def __iadd__(self, other): - raise NotImplementedError - - def __isub__(self, other): - raise NotImplementedError - - def __imul__(self, other): - raise NotImplementedError - - def __idiv__(self, other): - return self.__itruediv__(other) - - def __itruediv__(self, other): - raise NotImplementedError - - def __pow__(self, other): - if self.shape[0] != self.shape[1]: - raise TypeError('matrix is not square') - - if isintlike(other): - other = int(other) - if other < 0: - raise ValueError('exponent must be >= 0') - - if other == 0: - from construct import identity - return identity( self.shape[0], dtype=self.dtype ) - elif other == 1: - return self.copy() - else: - result = self - for i in range(1,other): - result = result*self - return result - elif isscalarlike(other): - raise ValueError('exponent must be an integer') - else: - raise NotImplementedError - - - def __getattr__(self, attr): - if attr == 'A': - return self.toarray() - elif attr == 'T': - return self.transpose() - elif attr == 'H': - return self.getH() - elif attr == 'real': - return self._real() - elif attr == 'imag': - return self._imag() - elif attr == 'size': - return self.getnnz() - else: - raise AttributeError(attr + " not found") - - def transpose(self): - return self.tocsr().transpose() - - def conj(self): - return self.tocsr().conj() - - def conjugate(self): - return self.conj() - - # Renamed conjtranspose() -> getH() for compatibility with dense matrices - def getH(self): - return self.transpose().conj() - - def _real(self): - return self.tocsr()._real() - - def _imag(self): - return self.tocsr()._imag() - - - def nonzero(self): - """nonzero indices - - Returns a tuple of arrays (row,col) containing the indices - of the non-zero elements of the matrix. - - Examples - -------- - >>> from scipy.sparse import csr_matrix - >>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]]) - >>> A.nonzero() - (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2])) - - """ - - # convert to COOrdinate format - A = self.tocoo() - nz_mask = A.data != 0 - return (A.row[nz_mask],A.col[nz_mask]) - - - def getcol(self, j): - """Returns a copy of column j of the matrix, as an (m x 1) sparse - matrix (column vector). - """ - # Spmatrix subclasses should override this method for efficiency. - # Post-multiply by a (n x 1) column vector 'a' containing all zeros - # except for a_j = 1 - from csc import csc_matrix - n = self.shape[1] - if j < 0: - j += n - if j < 0 or j >= n: - raise IndexError("index out of bounds") - col_selector = csc_matrix(([1], [[j], [0]]), shape=(n,1), dtype=self.dtype) - return self * col_selector - - def getrow(self, i): - """Returns a copy of row i of the matrix, as a (1 x n) sparse - matrix (row vector). - """ - # Spmatrix subclasses should override this method for efficiency. - # Pre-multiply by a (1 x m) row vector 'a' containing all zeros - # except for a_i = 1 - from csr import csr_matrix - m = self.shape[0] - if i < 0: - i += m - if i < 0 or i >= m: - raise IndexError("index out of bounds") - row_selector = csr_matrix(([1], [[0], [i]]), shape=(1,m), dtype=self.dtype) - return row_selector * self - - #def __array__(self): - # return self.toarray() - - def todense(self): - return np.asmatrix(self.toarray()) - - def toarray(self): - return self.tocoo().toarray() - - def todok(self): - return self.tocoo().todok() - - def tocoo(self): - return self.tocsr().tocoo() - - def tolil(self): - return self.tocsr().tolil() - - def todia(self): - return self.tocoo().todia() - - def tobsr(self, blocksize=None): - return self.tocsr().tobsr(blocksize=blocksize) - - def copy(self): - return self.__class__(self,copy=True) - - def sum(self, axis=None): - """Sum the matrix over the given axis. If the axis is None, sum - over both rows and columns, returning a scalar. - """ - # We use multiplication by an array of ones to achieve this. - # For some sparse matrix formats more efficient methods are - # possible -- these should override this function. - m, n = self.shape - if axis == 0: - # sum over columns - return np.asmatrix(np.ones((1, m), dtype=self.dtype)) * self - elif axis == 1: - # sum over rows - return self * np.asmatrix(np.ones((n, 1), dtype=self.dtype)) - elif axis is None: - # sum over rows and columns - return ( self * np.asmatrix(np.ones((n, 1), dtype=self.dtype)) ).sum() - else: - raise ValueError("axis out of bounds") - - def mean(self, axis=None): - """Average the matrix over the given axis. If the axis is None, - average over both rows and columns, returning a scalar. - """ - if axis == 0: - mean = self.sum(0) - mean *= 1.0 / self.shape[0] - return mean - elif axis == 1: - mean = self.sum(1) - mean *= 1.0 / self.shape[1] - return mean - elif axis is None: - return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1]) - else: - raise ValueError("axis out of bounds") - - def diagonal(self): - """Returns the main diagonal of the matrix - """ - #TODO support k != 0 - return self.tocsr().diagonal() - - def setdiag(self, values, k=0): - """Fills the diagonal elements {a_ii} with the values from the - given sequence. If k != 0, fills the off-diagonal elements - {a_{i,i+k}} instead. - - values may have any length. If the diagonal is longer than values, - then the remaining diagonal entries will not be set. If values if - longer than the diagonal, then the remaining values are ignored. - """ - M, N = self.shape - if (k > 0 and k >= N) or (k < 0 and -k >= M): - raise ValueError("k exceedes matrix dimensions") - if k < 0: - max_index = min(M+k, N, len(values)) - for i,v in enumerate(values[:max_index]): - self[i - k, i] = v - else: - max_index = min(M, N-k, len(values)) - for i,v in enumerate(values[:max_index]): - self[i, i + k] = v - - -from sputils import _isinstance - -def isspmatrix(x): - return _isinstance(x, spmatrix) - -issparse = isspmatrix diff --git a/scipy-0.10.1/scipy/sparse/benchmarks/bench_sparse.py b/scipy-0.10.1/scipy/sparse/benchmarks/bench_sparse.py deleted file mode 100644 index 412b3c34aa..0000000000 --- a/scipy-0.10.1/scipy/sparse/benchmarks/bench_sparse.py +++ /dev/null @@ -1,320 +0,0 @@ -"""general tests and simple benchmarks for the sparse module""" - -import time - -import numpy -from numpy import ones, array, asarray, empty - -from numpy.testing import * - -from scipy import sparse -from scipy.sparse import csr_matrix, coo_matrix, dia_matrix, lil_matrix, \ - dok_matrix - - -def random_sparse(m,n,nnz_per_row): - rows = numpy.arange(m).repeat(nnz_per_row) - cols = numpy.random.random_integers(low=0,high=n-1,size=nnz_per_row*m) - vals = numpy.random.random_sample(m*nnz_per_row) - return coo_matrix((vals,(rows,cols)),(m,n)).tocsr() - - -#TODO move this to a matrix gallery and add unittests -def poisson2d(N,dtype='d',format=None): - """ - Return a sparse matrix for the 2d poisson problem - with standard 5-point finite difference stencil on a - square N-by-N grid. - """ - if N == 1: - diags = asarray( [[4]],dtype=dtype) - return dia_matrix((diags,[0]), shape=(1,1)).asformat(format) - - offsets = array([0,-N,N,-1,1]) - - diags = empty((5,N**2),dtype=dtype) - - diags[0] = 4 #main diagonal - diags[1:] = -1 #all offdiagonals - - diags[3,N-1::N] = 0 #first lower diagonal - diags[4,N::N] = 0 #first upper diagonal - - return dia_matrix((diags,offsets),shape=(N**2,N**2)).asformat(format) - -class BenchmarkSparse(TestCase): - """Simple benchmarks for sparse matrix module""" - - def bench_arithmetic(self): - matrices = [] - #matrices.append( ('A','Identity', sparse.identity(500**2,format='csr')) ) - matrices.append( ('A','Poisson5pt', poisson2d(250,format='csr')) ) - matrices.append( ('B','Poisson5pt^2', poisson2d(250,format='csr')**2) ) - - print - print ' Sparse Matrix Arithmetic' - print '====================================================================' - print ' var | name | shape | dtype | nnz ' - print '--------------------------------------------------------------------' - fmt = ' %1s | %14s | %20s | %9s | %8d ' - - for var,name,mat in matrices: - name = name.center(14) - shape = ("%s" % (mat.shape,)).center(20) - dtype = mat.dtype.name.center(9) - print fmt % (var,name,shape,dtype,mat.nnz) - - space = ' ' * 10 - print - print space+' Timings' - print space+'==========================================' - print space+' format | operation | time (msec) ' - print space+'------------------------------------------' - fmt = space+' %3s | %17s | %7.1f ' - - for format in ['csr']: - vars = dict( [(var,mat.asformat(format)) for (var,name,mat) in matrices ] ) - for X,Y in [ ('A','A'),('A','B'),('B','A'),('B','B') ]: - x,y = vars[X],vars[Y] - for op in ['__add__','__sub__','multiply','__div__','__mul__']: - fn = getattr(x,op) - fn(y) #warmup - - start = time.clock() - iter = 0 - while iter < 3 or time.clock() < start + 0.5: - fn(y) - iter += 1 - end = time.clock() - - msec_per_it = 1000*(end - start)/float(iter) - operation = (X + '.' + op + '(' + Y + ')').center(17) - print fmt % (format,operation,msec_per_it) - - - def bench_sort(self): - """sort CSR column indices""" - matrices = [] - matrices.append( ('Rand10', 1e4, 10) ) - matrices.append( ('Rand25', 1e4, 25) ) - matrices.append( ('Rand50', 1e4, 50) ) - matrices.append( ('Rand100', 1e4, 100) ) - matrices.append( ('Rand200', 1e4, 200) ) - - print - print ' Sparse Matrix Index Sorting' - print '=====================================================================' - print ' type | name | shape | nnz | time (msec) ' - print '---------------------------------------------------------------------' - fmt = ' %3s | %12s | %20s | %8d | %6.2f ' - - for name,N,K in matrices: - N = int(N) - A = random_sparse(N,N,K) - - start = time.clock() - iter = 0 - while iter < 5 and time.clock() - start < 1: - A.has_sorted_indices = False - A.indices[:2] = 2,1 - A.sort_indices() - iter += 1 - end = time.clock() - - name = name.center(12) - shape = ("%s" % (A.shape,)).center(20) - - print fmt % (A.format,name,shape,A.nnz,1e3*(end-start)/float(iter) ) - - def bench_matvec(self): - matrices = [] - matrices.append(('Identity', sparse.identity(10**4,format='dia'))) - matrices.append(('Identity', sparse.identity(10**4,format='csr'))) - matrices.append(('Poisson5pt', poisson2d(300,format='lil'))) - matrices.append(('Poisson5pt', poisson2d(300,format='dok'))) - matrices.append(('Poisson5pt', poisson2d(300,format='dia'))) - matrices.append(('Poisson5pt', poisson2d(300,format='coo'))) - matrices.append(('Poisson5pt', poisson2d(300,format='csr'))) - matrices.append(('Poisson5pt', poisson2d(300,format='csc'))) - matrices.append(('Poisson5pt', poisson2d(300,format='bsr'))) - - A = sparse.kron(poisson2d(150),ones((2,2))).tobsr(blocksize=(2,2)) - matrices.append( ('Block2x2', A.tocsr()) ) - matrices.append( ('Block2x2', A) ) - - A = sparse.kron(poisson2d(100),ones((3,3))).tobsr(blocksize=(3,3)) - matrices.append( ('Block3x3', A.tocsr()) ) - matrices.append( ('Block3x3', A) ) - - print - print ' Sparse Matrix Vector Product' - print '==================================================================' - print ' type | name | shape | nnz | MFLOPs ' - print '------------------------------------------------------------------' - fmt = ' %3s | %12s | %20s | %8d | %6.1f ' - - for name,A in matrices: - x = ones(A.shape[1],dtype=A.dtype) - - y = A*x #warmup - - start = time.clock() - iter = 0 - while iter < 5 or time.clock() < start + 1: - y = A*x - iter += 1 - end = time.clock() - - del y - - name = name.center(12) - shape = ("%s" % (A.shape,)).center(20) - MFLOPs = (2*A.nnz*iter/(end-start))/float(1e6) - - print fmt % (A.format,name,shape,A.nnz,MFLOPs) - - def bench_matvecs(self): - matrices = [] - matrices.append(('Poisson5pt', poisson2d(300,format='dia'))) - matrices.append(('Poisson5pt', poisson2d(300,format='coo'))) - matrices.append(('Poisson5pt', poisson2d(300,format='csr'))) - matrices.append(('Poisson5pt', poisson2d(300,format='csc'))) - matrices.append(('Poisson5pt', poisson2d(300,format='bsr'))) - - - n_vecs = 10 - - print - print ' Sparse Matrix (Block) Vector Product' - print ' Blocksize = %d' % (n_vecs,) - print '==================================================================' - print ' type | name | shape | nnz | MFLOPs ' - print '------------------------------------------------------------------' - fmt = ' %3s | %12s | %20s | %8d | %6.1f ' - - for name,A in matrices: - x = ones((A.shape[1],10),dtype=A.dtype) - - y = A*x #warmup - - start = time.clock() - iter = 0 - while iter < 5 or time.clock() < start + 1: - y = A*x - iter += 1 - end = time.clock() - - del y - - name = name.center(12) - shape = ("%s" % (A.shape,)).center(20) - MFLOPs = (2*n_vecs*A.nnz*iter/(end-start))/float(1e6) - - print fmt % (A.format,name,shape,A.nnz,MFLOPs) - - - def bench_construction(self): - """build matrices by inserting single values""" - matrices = [] - matrices.append( ('Empty',csr_matrix((10000,10000))) ) - matrices.append( ('Identity',sparse.identity(10000)) ) - matrices.append( ('Poisson5pt', poisson2d(100)) ) - - print - print ' Sparse Matrix Construction' - print '====================================================================' - print ' type | name | shape | nnz | time (sec) ' - print '--------------------------------------------------------------------' - fmt = ' %3s | %12s | %20s | %8d | %6.4f ' - - for name,A in matrices: - A = A.tocoo() - - for format in ['lil','dok']: - - start = time.clock() - - iter = 0 - while time.clock() < start + 0.5: - T = eval(format + '_matrix')(A.shape) - for i,j,v in zip(A.row,A.col,A.data): - T[i,j] = v - iter += 1 - end = time.clock() - - del T - name = name.center(12) - shape = ("%s" % (A.shape,)).center(20) - - print fmt % (format,name,shape,A.nnz,(end-start)/float(iter)) - - def bench_conversion(self): - A = poisson2d(100) - - formats = ['csr','csc','coo','dia','lil','dok'] - - print - print ' Sparse Matrix Conversion' - print '====================================================================' - print ' format | tocsr() | tocsc() | tocoo() | todia() | tolil() | todok() ' - print '--------------------------------------------------------------------' - - for fromfmt in formats: - base = getattr(A,'to' + fromfmt)() - - times = [] - - for tofmt in formats: - try: - fn = getattr(base,'to' + tofmt) - except: - times.append(None) - else: - x = fn() #warmup - start = time.clock() - iter = 0 - while time.clock() < start + 0.2: - x = fn() - iter += 1 - end = time.clock() - del x - times.append( (end - start)/float(iter)) - - output = " %3s " % fromfmt - for t in times: - if t is None: - output += '| n/a ' - else: - output += '| %5.1fms ' % (1000*t) - print output - - -#class TestLarge(TestCase): -# def bench_large(self): -# # Create a 100x100 matrix with 100 non-zero elements -# # and play around with it -# #TODO move this out of Common since it doesn't use spmatrix -# random.seed(0) -# A = dok_matrix((100,100)) -# for k in range(100): -# i = random.randrange(100) -# j = random.randrange(100) -# A[i,j] = 1. -# csr = A.tocsr() -# csc = A.tocsc() -# csc2 = csr.tocsc() -# coo = A.tocoo() -# csr2 = coo.tocsr() -# assert_array_equal(A.transpose().todense(), csr.transpose().todense()) -# assert_array_equal(csc.todense(), csr.todense()) -# assert_array_equal(csr.todense(), csr2.todense()) -# assert_array_equal(csr2.todense().transpose(), coo.transpose().todense()) -# assert_array_equal(csr2.todense(), csc2.todense()) -# csr_plus_csc = csr + csc -# csc_plus_csr = csc + csr -# assert_array_equal(csr_plus_csc.todense(), (2*A).todense()) -# assert_array_equal(csr_plus_csc.todense(), csc_plus_csr.todense()) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/sparse/bento.info b/scipy-0.10.1/scipy/sparse/bento.info deleted file mode 100644 index 991bcf7de5..0000000000 --- a/scipy-0.10.1/scipy/sparse/bento.info +++ /dev/null @@ -1,8 +0,0 @@ -Recurse: - linalg, - sparsetools - -Library: - Packages: - linalg, - sparsetools diff --git a/scipy-0.10.1/scipy/sparse/bsr.py b/scipy-0.10.1/scipy/sparse/bsr.py deleted file mode 100644 index bbcdb40116..0000000000 --- a/scipy-0.10.1/scipy/sparse/bsr.py +++ /dev/null @@ -1,574 +0,0 @@ -"""Compressed Block Sparse Row matrix format""" - -__docformat__ = "restructuredtext en" - -__all__ = ['bsr_matrix', 'isspmatrix_bsr'] - -from warnings import warn - -import numpy as np - -from data import _data_matrix -from compressed import _cs_matrix -from base import isspmatrix, _formats -from sputils import isshape, getdtype, to_native, upcast -import sparsetools -from sparsetools import bsr_matvec, bsr_matvecs, csr_matmat_pass1, \ - bsr_matmat_pass2, bsr_transpose, bsr_sort_indices - -class bsr_matrix(_cs_matrix): - """Block Sparse Row matrix - - This can be instantiated in several ways: - bsr_matrix(D, [blocksize=(R,C)]) - with a dense matrix or rank-2 ndarray D - - bsr_matrix(S, [blocksize=(R,C)]) - with another sparse matrix S (equivalent to S.tobsr()) - - bsr_matrix((M, N), [blocksize=(R,C), dtype]) - to construct an empty matrix with shape (M, N) - dtype is optional, defaulting to dtype='d'. - - bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)]) - where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]`` - - bsr_matrix((data, indices, indptr), [shape=(M, N)]) - is the standard BSR representation where the block column - indices for row i are stored in ``indices[indptr[i]:indices[i+1]]`` - and their corresponding block values are stored in - ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not - supplied, the matrix dimensions are inferred from the index arrays. - - Attributes - ---------- - dtype : dtype - Data type of the matrix - shape : 2-tuple - Shape of the matrix - ndim : int - Number of dimensions (this is always 2) - nnz - Number of nonzero elements - data - Data array of the matrix - indices - BSR format index array - indptr - BSR format index pointer array - blocksize - Block size of the matrix - has_sorted_indices - Whether indices are sorted - - Notes - ----- - - Sparse matrices can be used in arithmetic operations: they support - addition, subtraction, multiplication, division, and matrix power. - - Summary of BSR format: - - - The Block Compressed Row (BSR) format is very similar to the - Compressed Sparse Row (CSR) format. BSR is appropriate for - sparse matrices with dense sub matrices like the last example - below. Block matrices often arise in vector-valued finite - element discretizations. In such cases, BSR is considerably - more efficient than CSR and CSC for many sparse arithmetic - operations. - - Blocksize - - The blocksize (R,C) must evenly divide the shape of - the matrix (M,N). That is, R and C must satisfy the - relationship M % R = 0 and N % C = 0. - - If no blocksize is specified, a simple heuristic is applied - to determine an appropriate blocksize. - - - - Examples - -------- - - >>> from scipy.sparse import * - >>> from scipy import * - >>> bsr_matrix( (3,4), dtype=int8 ).todense() - matrix([[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]], dtype=int8) - - >>> row = array([0,0,1,2,2,2]) - >>> col = array([0,2,2,0,1,2]) - >>> data = array([1,2,3,4,5,6]) - >>> bsr_matrix( (data,(row,col)), shape=(3,3) ).todense() - matrix([[1, 0, 2], - [0, 0, 3], - [4, 5, 6]]) - - >>> indptr = array([0,2,3,6]) - >>> indices = array([0,2,2,0,1,2]) - >>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2) - >>> bsr_matrix( (data,indices,indptr), shape=(6,6) ).todense() - matrix([[1, 1, 0, 0, 2, 2], - [1, 1, 0, 0, 2, 2], - [0, 0, 0, 0, 3, 3], - [0, 0, 0, 0, 3, 3], - [4, 4, 5, 5, 6, 6], - [4, 4, 5, 5, 6, 6]]) - - """ - def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None): - _data_matrix.__init__(self) - - - if isspmatrix(arg1): - if isspmatrix_bsr(arg1) and copy: - arg1 = arg1.copy() - else: - arg1 = arg1.tobsr(blocksize=blocksize) - self._set_self( arg1 ) - - elif isinstance(arg1,tuple): - if isshape(arg1): - #it's a tuple of matrix dimensions (M,N) - self.shape = arg1 - M,N = self.shape - #process blocksize - if blocksize is None: - blocksize = (1,1) - else: - if not isshape(blocksize): - raise ValueError('invalid blocksize=%s' % blocksize) - blocksize = tuple(blocksize) - self.data = np.zeros( (0,) + blocksize, getdtype(dtype, default=float) ) - self.indices = np.zeros( 0, dtype=np.intc ) - - R,C = blocksize - if (M % R) != 0 or (N % C) != 0: - raise ValueError('shape must be multiple of blocksize') - - self.indptr = np.zeros(M//R + 1, dtype=np.intc ) - - elif len(arg1) == 2: - # (data,(row,col)) format - from coo import coo_matrix - self._set_self( coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize) ) - - elif len(arg1) == 3: - # (data,indices,indptr) format - (data, indices, indptr) = arg1 - self.indices = np.array(indices, copy=copy) - self.indptr = np.array(indptr, copy=copy) - self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data)) - else: - raise ValueError('unrecognized bsr_matrix constructor usage') - else: - #must be dense - try: - arg1 = np.asarray(arg1) - except: - raise ValueError("unrecognized form for" \ - " %s_matrix constructor" % self.format) - from coo import coo_matrix - arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize) - self._set_self( arg1 ) - - if shape is not None: - self.shape = shape # spmatrix will check for errors - else: - if self.shape is None: - # shape not already set, try to infer dimensions - try: - M = len(self.indptr) - 1 - N = self.indices.max() + 1 - except: - raise ValueError('unable to infer matrix dimensions') - else: - R,C = self.blocksize - self.shape = (M*R,N*C) - - if self.shape is None: - if shape is None: - #TODO infer shape here - raise ValueError('need to infer shape') - else: - self.shape = shape - - if dtype is not None: - self.data = self.data.astype(dtype) - - self.check_format(full_check=False) - - def check_format(self, full_check=True): - """check whether the matrix format is valid - - *Parameters*: - full_check: - True - rigorous check, O(N) operations : default - False - basic check, O(1) operations - - """ - M,N = self.shape - R,C = self.blocksize - - # index arrays should have integer data types - if self.indptr.dtype.kind != 'i': - warn("indptr array has non-integer dtype (%s)" \ - % self.indptr.dtype.name ) - if self.indices.dtype.kind != 'i': - warn("indices array has non-integer dtype (%s)" \ - % self.indices.dtype.name ) - - # only support 32-bit ints for now - self.indptr = np.asarray(self.indptr, np.intc) - self.indices = np.asarray(self.indices, np.intc) - self.data = to_native(self.data) - - # check array shapes - if np.rank(self.indices) != 1 or np.rank(self.indptr) != 1: - raise ValueError("indices, and indptr should be rank 1") - if np.rank(self.data) != 3: - raise ValueError("data should be rank 3") - - # check index pointer - if (len(self.indptr) != M//R + 1 ): - raise ValueError("index pointer size (%d) should be (%d)" % - (len(self.indptr), M//R + 1)) - if (self.indptr[0] != 0): - raise ValueError("index pointer should start with 0") - - # check index and data arrays - if (len(self.indices) != len(self.data)): - raise ValueError("indices and data should have the same size") - if (self.indptr[-1] > len(self.indices)): - raise ValueError("Last value of index pointer should be less than " - "the size of index and data arrays") - - self.prune() - - if full_check: - #check format validity (more expensive) - if self.nnz > 0: - if self.indices.max() >= N//C: - print "max index",self.indices.max() - raise ValueError("column index values must be < %d" % (N//C)) - if self.indices.min() < 0: - raise ValueError("column index values must be >= 0") - if np.diff(self.indptr).min() < 0: - raise ValueError("index pointer values must form a " - "non-decreasing sequence") - - #if not self.has_sorted_indices(): - # warn('Indices were not in sorted order. Sorting indices.') - # self.sort_indices(check_first=False) - - def _get_blocksize(self): - return self.data.shape[1:] - blocksize = property(fget=_get_blocksize) - - def getnnz(self): - R,C = self.blocksize - return self.indptr[-1] * R * C - nnz = property(fget=getnnz) - - def __repr__(self): - nnz = self.getnnz() - format = self.getformat() - return "<%dx%d sparse matrix of type '%s'\n" \ - "\twith %d stored elements (blocksize = %dx%d) in %s format>" % \ - ( self.shape + (self.dtype.type, nnz) + self.blocksize + \ - (_formats[format][1],) ) - - - def diagonal(self): - """Returns the main diagonal of the matrix - """ - M,N = self.shape - R,C = self.blocksize - y = np.empty(min(M,N), dtype=upcast(self.dtype)) - sparsetools.bsr_diagonal(M//R, N//C, R, C, \ - self.indptr, self.indices, np.ravel(self.data), y) - return y - - ########################## - # NotImplemented methods # - ########################## - - def getdata(self,ind): - raise NotImplementedError - - def __getitem__(self,key): - raise NotImplementedError - - def __setitem__(self,key,val): - raise NotImplementedError - - ###################### - # Arithmetic methods # - ###################### - - def matvec(self, other): - return self * other - - def matmat(self, other): - return self * other - - def _mul_vector(self, other): - M,N = self.shape - R,C = self.blocksize - - result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype)) - - bsr_matvec(M//R, N//C, R, C, \ - self.indptr, self.indices, self.data.ravel(), - other, result) - - return result - - def _mul_multivector(self,other): - R,C = self.blocksize - M,N = self.shape - n_vecs = other.shape[1] #number of column vectors - - result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype)) - - bsr_matvecs(M//R, N//C, n_vecs, R, C, \ - self.indptr, self.indices, self.data.ravel(), \ - other.ravel(), result.ravel()) - - return result - - def _mul_sparse_matrix(self, other): - M, K1 = self.shape - K2, N = other.shape - - indptr = np.empty_like( self.indptr ) - - R,n = self.blocksize - - #convert to this format - if isspmatrix_bsr(other): - C = other.blocksize[1] - else: - C = 1 - - from csr import isspmatrix_csr - - if isspmatrix_csr(other) and n == 1: - other = other.tobsr(blocksize=(n,C), copy=False) #lightweight conversion - else: - other = other.tobsr(blocksize=(n,C)) - - csr_matmat_pass1( M//R, N//C, \ - self.indptr, self.indices, \ - other.indptr, other.indices, \ - indptr) - - bnnz = indptr[-1] - indices = np.empty(bnnz, dtype=np.intc) - data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype)) - - bsr_matmat_pass2( M//R, N//C, R, C, n, \ - self.indptr, self.indices, np.ravel(self.data), \ - other.indptr, other.indices, np.ravel(other.data), \ - indptr, indices, data) - - data = data.reshape(-1,R,C) - - #TODO eliminate zeros - - return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C)) - - - - - ###################### - # Conversion methods # - ###################### - - def tobsr(self,blocksize=None,copy=False): - if blocksize not in [None, self.blocksize]: - return self.tocsr().tobsr(blocksize=blocksize) - if copy: - return self.copy() - else: - return self - - def tocsr(self): - return self.tocoo(copy=False).tocsr() - #TODO make this more efficient - - def tocsc(self): - return self.tocoo(copy=False).tocsc() - - def tocoo(self,copy=True): - """Convert this matrix to COOrdinate format. - - When copy=False the data array will be shared between - this matrix and the resultant coo_matrix. - """ - - M,N = self.shape - R,C = self.blocksize - - row = (R * np.arange(M//R)).repeat(np.diff(self.indptr)) - row = row.repeat(R*C).reshape(-1,R,C) - row += np.tile(np.arange(R).reshape(-1,1), (1,C)) - row = row.reshape(-1) - - col = (C * self.indices).repeat(R*C).reshape(-1,R,C) - col += np.tile(np.arange(C), (R,1)) - col = col.reshape(-1) - - data = self.data.reshape(-1) - - if copy: - data = data.copy() - - from coo import coo_matrix - return coo_matrix((data,(row,col)), shape=self.shape) - - - def transpose(self): - - R,C = self.blocksize - M,N = self.shape - NBLK = self.nnz//(R*C) - - if self.nnz == 0: - return bsr_matrix((N,M), blocksize=(C,R)) - - indptr = np.empty( N//C + 1, dtype=self.indptr.dtype) - indices = np.empty( NBLK, dtype=self.indices.dtype) - data = np.empty( (NBLK,C,R), dtype=self.data.dtype) - - bsr_transpose(M//R, N//C, R, C, \ - self.indptr, self.indices, self.data.ravel(), \ - indptr, indices, data.ravel()) - - return bsr_matrix((data,indices,indptr), shape=(N,M)) - - - ############################################################## - # methods that examine or modify the internal data structure # - ############################################################## - - def eliminate_zeros(self): - R,C = self.blocksize - M,N = self.shape - - mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) #nonzero blocks - - nonzero_blocks = mask.nonzero()[0] - - if len(nonzero_blocks) == 0: - return #nothing to do - - self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks] - - from csr import csr_matrix - - # modifies self.indptr and self.indices *in place* - proxy = csr_matrix((mask,self.indices,self.indptr),shape=(M//R,N//C)) - proxy.eliminate_zeros() - - self.prune() - - - def sum_duplicates(self): - raise NotImplementedError - - def sort_indices(self): - """Sort the indices of this matrix *in place* - """ - if self.has_sorted_indices: - return - - R,C = self.blocksize - M,N = self.shape - - bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel()) - - self.has_sorted_indices = True - - def prune(self): - """ Remove empty space after all non-zero elements. - """ - - R,C = self.blocksize - M,N = self.shape - - if len(self.indptr) != M//R + 1: - raise ValueError("index pointer has invalid length") - - bnnz = self.indptr[-1] - - if len(self.indices) < bnnz: - raise ValueError("indices array has too few elements") - if len(self.data) < bnnz: - raise ValueError("data array has too few elements") - - self.data = self.data[:bnnz] - self.indices = self.indices[:bnnz] - - # utility functions - def _binopt(self, other, op, in_shape=None, out_shape=None): - """apply the binary operation fn to two sparse matrices""" - - # ideally we'd take the GCDs of the blocksize dimensions - # and explode self and other to match - other = self.__class__(other, blocksize=self.blocksize) - - # e.g. bsr_plus_bsr, etc. - fn = getattr(sparsetools, self.format + op + self.format) - - R,C = self.blocksize - - max_bnnz = len(self.data) + len(other.data) - indptr = np.empty_like(self.indptr) - indices = np.empty(max_bnnz, dtype=np.intc) - data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) - - fn(self.shape[0]//R, self.shape[1]//C, R, C, - self.indptr, self.indices, np.ravel(self.data), - other.indptr, other.indices, np.ravel(other.data), - indptr, indices, data) - - actual_bnnz = indptr[-1] - indices = indices[:actual_bnnz] - data = data[:R*C*actual_bnnz] - - if actual_bnnz < max_bnnz/2: - indices = indices.copy() - data = data.copy() - - data = data.reshape(-1,R,C) - - return self.__class__((data, indices, indptr), shape=self.shape) - - # needed by _data_matrix - def _with_data(self,data,copy=True): - """Returns a matrix with the same sparsity structure as self, - but with different data. By default the structure arrays - (i.e. .indptr and .indices) are copied. - """ - if copy: - return self.__class__((data,self.indices.copy(),self.indptr.copy()), \ - shape=self.shape,dtype=data.dtype) - else: - return self.__class__((data,self.indices,self.indptr), \ - shape=self.shape,dtype=data.dtype) - - - -# # these functions are used by the parent class -# # to remove redudancy between bsc_matrix and bsr_matrix -# def _swap(self,x): -# """swap the members of x if this is a column-oriented matrix -# """ -# return (x[0],x[1]) - - -from sputils import _isinstance - -def isspmatrix_bsr(x): - return _isinstance(x, bsr_matrix) diff --git a/scipy-0.10.1/scipy/sparse/compressed.py b/scipy-0.10.1/scipy/sparse/compressed.py deleted file mode 100644 index d56d2a7326..0000000000 --- a/scipy-0.10.1/scipy/sparse/compressed.py +++ /dev/null @@ -1,682 +0,0 @@ -"""Base class for sparse matrix formats using compressed storage -""" - -__all__ = [] - -from warnings import warn - -import numpy as np - -from base import spmatrix, isspmatrix, SparseEfficiencyWarning -from data import _data_matrix -import sparsetools -from sputils import upcast, to_native, isdense, isshape, getdtype, \ - isscalarlike, isintlike - - -class _cs_matrix(_data_matrix): - """base matrix class for compressed row and column oriented matrices""" - - def __init__(self, arg1, shape=None, dtype=None, copy=False): - _data_matrix.__init__(self) - - - if isspmatrix(arg1): - if arg1.format == self.format and copy: - arg1 = arg1.copy() - else: - arg1 = arg1.asformat(self.format) - self._set_self( arg1 ) - - elif isinstance(arg1, tuple): - if isshape(arg1): - # It's a tuple of matrix dimensions (M, N) - # create empty matrix - self.shape = arg1 #spmatrix checks for errors here - M, N = self.shape - self.data = np.zeros(0, getdtype(dtype, default=float)) - self.indices = np.zeros(0, np.intc) - self.indptr = np.zeros(self._swap((M,N))[0] + 1, dtype=np.intc) - else: - if len(arg1) == 2: - # (data, ij) format - from coo import coo_matrix - other = self.__class__( coo_matrix(arg1, shape=shape) ) - self._set_self( other ) - elif len(arg1) == 3: - # (data, indices, indptr) format - (data, indices, indptr) = arg1 - self.indices = np.array(indices, copy=copy) - self.indptr = np.array(indptr, copy=copy) - self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data)) - else: - raise ValueError("unrecognized %s_matrix constructor usage" % - self.format) - - else: - #must be dense - try: - arg1 = np.asarray(arg1) - except: - raise ValueError("unrecognized %s_matrix constructor usage" % - self.format) - from coo import coo_matrix - self._set_self( self.__class__(coo_matrix(arg1, dtype=dtype)) ) - - # Read matrix dimensions given, if any - if shape is not None: - self.shape = shape # spmatrix will check for errors - else: - if self.shape is None: - # shape not already set, try to infer dimensions - try: - major_dim = len(self.indptr) - 1 - minor_dim = self.indices.max() + 1 - except: - raise ValueError('unable to infer matrix dimensions') - else: - self.shape = self._swap((major_dim,minor_dim)) - - if dtype is not None: - self.data = self.data.astype(dtype) - - self.check_format(full_check=False) - - def getnnz(self): - return self.indptr[-1] - nnz = property(fget=getnnz) - - - def _set_self(self, other, copy=False): - """take the member variables of other and assign them to self""" - - if copy: - other = other.copy() - - self.data = other.data - self.indices = other.indices - self.indptr = other.indptr - self.shape = other.shape - - def check_format(self, full_check=True): - """check whether the matrix format is valid - - Parameters - ========== - - - full_check : {bool} - - True - rigorous check, O(N) operations : default - - False - basic check, O(1) operations - - """ - #use _swap to determine proper bounds - major_name,minor_name = self._swap(('row','column')) - major_dim,minor_dim = self._swap(self.shape) - - # index arrays should have integer data types - if self.indptr.dtype.kind != 'i': - warn("indptr array has non-integer dtype (%s)" \ - % self.indptr.dtype.name ) - if self.indices.dtype.kind != 'i': - warn("indices array has non-integer dtype (%s)" \ - % self.indices.dtype.name ) - - # only support 32-bit ints for now - self.indptr = np.asarray(self.indptr, dtype=np.intc) - self.indices = np.asarray(self.indices, dtype=np.intc) - self.data = to_native(self.data) - - # check array shapes - if np.rank(self.data) != 1 or np.rank(self.indices) != 1 or np.rank(self.indptr) != 1: - raise ValueError('data, indices, and indptr should be rank 1') - - # check index pointer - if (len(self.indptr) != major_dim + 1 ): - raise ValueError("index pointer size (%d) should be (%d)" % - (len(self.indptr), major_dim + 1)) - if (self.indptr[0] != 0): - raise ValueError("index pointer should start with 0") - - # check index and data arrays - if (len(self.indices) != len(self.data)): - raise ValueError("indices and data should have the same size") - if (self.indptr[-1] > len(self.indices)): - raise ValueError("Last value of index pointer should be less than " - "the size of index and data arrays") - - self.prune() - - if full_check: - #check format validity (more expensive) - if self.nnz > 0: - if self.indices.max() >= minor_dim: - raise ValueError("%s index values must be < %d" % - (minor_name,minor_dim)) - if self.indices.min() < 0: - raise ValueError("%s index values must be >= 0" % - minor_name) - if np.diff(self.indptr).min() < 0: - raise ValueError("index pointer values must form a " - "non-decreasing sequence") - - #if not self.has_sorted_indices(): - # warn('Indices were not in sorted order. Sorting indices.') - # self.sort_indices() - # assert(self.has_sorted_indices()) - #TODO check for duplicates? - - - def __add__(self,other): - # First check if argument is a scalar - if isscalarlike(other): - # Now we would add this scalar to every element. - raise NotImplementedError('adding a scalar to a CSC or CSR ' - 'matrix is not supported') - elif isspmatrix(other): - if (other.shape != self.shape): - raise ValueError("inconsistent shapes") - - return self._binopt(other,'_plus_') - elif isdense(other): - # Convert this matrix to a dense matrix and add them - return self.todense() + other - else: - raise NotImplementedError - - def __radd__(self,other): - return self.__add__(other) - - def __sub__(self,other): - # First check if argument is a scalar - if isscalarlike(other): - # Now we would add this scalar to every element. - raise NotImplementedError('adding a scalar to a sparse ' - 'matrix is not supported') - elif isspmatrix(other): - if (other.shape != self.shape): - raise ValueError("inconsistent shapes") - - return self._binopt(other,'_minus_') - elif isdense(other): - # Convert this matrix to a dense matrix and subtract them - return self.todense() - other - else: - raise NotImplementedError - - def __rsub__(self,other): # other - self - #note: this can't be replaced by other + (-self) for unsigned types - if isscalarlike(other): - # Now we would add this scalar to every element. - raise NotImplementedError('adding a scalar to a sparse ' - 'matrix is not supported') - elif isdense(other): - # Convert this matrix to a dense matrix and subtract them - return other - self.todense() - else: - raise NotImplementedError - - - def __truediv__(self,other): - if isscalarlike(other): - return self * (1./other) - - elif isspmatrix(other): - if other.shape != self.shape: - raise ValueError('inconsistent shapes') - - return self._binopt(other,'_eldiv_') - - else: - raise NotImplementedError - - - def multiply(self, other): - """Point-wise multiplication by another matrix - """ - if other.shape != self.shape: - raise ValueError('inconsistent shapes') - - if isdense(other): - return np.multiply(self.todense(),other) - else: - other = self.__class__(other) - return self._binopt(other,'_elmul_') - - - ########################### - # Multiplication handlers # - ########################### - - def _mul_vector(self, other): - M,N = self.shape - - #output array - result = np.zeros( self.shape[0], dtype=upcast(self.dtype,other.dtype) ) - - # csr_matvec or csc_matvec - fn = getattr(sparsetools,self.format + '_matvec') - fn(M, N, self.indptr, self.indices, self.data, other, result) - - return result - - - def _mul_multivector(self, other): - M,N = self.shape - n_vecs = other.shape[1] #number of column vectors - - result = np.zeros( (M,n_vecs), dtype=upcast(self.dtype,other.dtype) ) - - # csr_matvecs or csc_matvecs - fn = getattr(sparsetools,self.format + '_matvecs') - fn(M, N, n_vecs, self.indptr, self.indices, self.data, other.ravel(), result.ravel()) - - return result - - - def _mul_sparse_matrix(self, other): - M, K1 = self.shape - K2, N = other.shape - - major_axis = self._swap((M,N))[0] - indptr = np.empty(major_axis + 1, dtype=np.intc) - - other = self.__class__(other) #convert to this format - fn = getattr(sparsetools, self.format + '_matmat_pass1') - fn( M, N, self.indptr, self.indices, \ - other.indptr, other.indices, \ - indptr) - - nnz = indptr[-1] - indices = np.empty(nnz, dtype=np.intc) - data = np.empty(nnz, dtype=upcast(self.dtype,other.dtype)) - - fn = getattr(sparsetools, self.format + '_matmat_pass2') - fn( M, N, self.indptr, self.indices, self.data, \ - other.indptr, other.indices, other.data, \ - indptr, indices, data) - - return self.__class__((data,indices,indptr),shape=(M,N)) - - - def diagonal(self): - """Returns the main diagonal of the matrix - """ - #TODO support k-th diagonal - fn = getattr(sparsetools, self.format + "_diagonal") - y = np.empty( min(self.shape), dtype=upcast(self.dtype) ) - fn(self.shape[0], self.shape[1], self.indptr, self.indices, self.data, y) - return y - - def sum(self, axis=None): - """Sum the matrix over the given axis. If the axis is None, sum - over both rows and columns, returning a scalar. - """ - # The spmatrix base class already does axis=0 and axis=1 efficiently - # so we only do the case axis=None here - if axis is None: - return self.data.sum() - else: - return spmatrix.sum(self,axis) - raise ValueError("axis out of bounds") - - ####################### - # Getting and Setting # - ####################### - - def __getitem__(self, key): - if isinstance(key, tuple): - row = key[0] - col = key[1] - - #TODO implement CSR[ [1,2,3], X ] with sparse matmat - #TODO make use of sorted indices - - if isintlike(row) and isintlike(col): - return self._get_single_element(row,col) - else: - major,minor = self._swap((row,col)) - if isintlike(major) and isinstance(minor,slice): - minor_shape = self._swap(self.shape)[1] - start, stop, stride = minor.indices(minor_shape) - out_shape = self._swap( (1, stop-start) ) - return self._get_slice( major, start, stop, stride, out_shape) - elif isinstance( row, slice) or isinstance(col, slice): - return self._get_submatrix( row, col ) - else: - raise NotImplementedError - - elif isintlike(key): - return self[key, :] - else: - raise IndexError("invalid index") - - - def _get_single_element(self,row,col): - M, N = self.shape - if (row < 0): - row += M - if (col < 0): - col += N - if not (0<=row= 1") - - #TODO make [i,:] faster - #TODO implement [i,x:y:z] - - indices = [] - - for ind in xrange(self.indptr[i], self.indptr[i+1]): - if self.indices[ind] >= start and self.indices[ind] < stop: - indices.append(ind) - - index = self.indices[indices] - start - data = self.data[indices] - indptr = np.array([0, len(indices)]) - return self.__class__((data, index, indptr), shape=shape, \ - dtype=self.dtype) - - def _get_submatrix( self, slice0, slice1 ): - """Return a submatrix of this matrix (new matrix is created).""" - - slice0, slice1 = self._swap((slice0,slice1)) - shape0, shape1 = self._swap(self.shape) - def _process_slice( sl, num ): - if isinstance( sl, slice ): - i0, i1 = sl.start, sl.stop - if i0 is None: - i0 = 0 - elif i0 < 0: - i0 = num + i0 - - if i1 is None: - i1 = num - elif i1 < 0: - i1 = num + i1 - - return i0, i1 - - elif np.isscalar( sl ): - if sl < 0: - sl += num - - return sl, sl + 1 - - else: - return sl[0], sl[1] - - def _in_bounds( i0, i1, num ): - if not (0<=i0 0 the k-th upper diagonal - - k < 0 the k-th lower diagonal - m, n : int - shape of the result - format : format of the result (e.g. "csr") - By default (format=None) an appropriate sparse matrix - format is returned. This choice is subject to change. - - See Also - -------- - dia_matrix : the sparse DIAgonal format. - - Examples - -------- - >>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]]) - >>> diags = array([0,-1,2]) - >>> spdiags(data, diags, 4, 4).todense() - matrix([[1, 0, 3, 0], - [1, 2, 0, 4], - [0, 2, 3, 0], - [0, 0, 3, 4]]) - - """ - return dia_matrix((data, diags), shape=(m,n)).asformat(format) - -def identity(n, dtype='d', format=None): - """Identity matrix in sparse format - - Returns an identity matrix with shape (n,n) using a given - sparse format and dtype. - - Parameters - ---------- - n : integer - Shape of the identity matrix. - dtype : - Data type of the matrix - format : string - Sparse format of the result, e.g. format="csr", etc. - - Examples - -------- - >>> identity(3).todense() - matrix([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - >>> identity(3, dtype='int8', format='dia') - <3x3 sparse matrix of type '' - with 3 stored elements (1 diagonals) in DIAgonal format> - - """ - - if format in ['csr','csc']: - indptr = np.arange(n+1, dtype=np.intc) - indices = np.arange(n, dtype=np.intc) - data = np.ones(n, dtype=dtype) - cls = eval('%s_matrix' % format) - return cls((data,indices,indptr),(n,n)) - elif format == 'coo': - row = np.arange(n, dtype=np.intc) - col = np.arange(n, dtype=np.intc) - data = np.ones(n, dtype=dtype) - return coo_matrix((data,(row,col)),(n,n)) - elif format == 'dia': - data = np.ones(n, dtype=dtype) - diags = [0] - return dia_matrix((data,diags), shape=(n,n)) - else: - return identity(n, dtype=dtype, format='csr').asformat(format) - - -def eye(m, n, k=0, dtype='d', format=None): - """eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal - is all ones and everything else is zeros. - """ - m,n = int(m),int(n) - diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype) - return spdiags(diags, k, m, n).asformat(format) - - -def kron(A, B, format=None): - """kronecker product of sparse matrices A and B - - Parameters - ---------- - A : sparse or dense matrix - first matrix of the product - B : sparse or dense matrix - second matrix of the product - format : string - format of the result (e.g. "csr") - - Returns - ------- - kronecker product in a sparse matrix format - - - Examples - -------- - >>> A = csr_matrix(array([[0,2],[5,0]])) - >>> B = csr_matrix(array([[1,2],[3,4]])) - >>> kron(A,B).todense() - matrix([[ 0, 0, 2, 4], - [ 0, 0, 6, 8], - [ 5, 10, 0, 0], - [15, 20, 0, 0]]) - - >>> kron(A,[[1,2],[3,4]]).todense() - matrix([[ 0, 0, 2, 4], - [ 0, 0, 6, 8], - [ 5, 10, 0, 0], - [15, 20, 0, 0]]) - - """ - B = coo_matrix(B) - - if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]: - #B is fairly dense, use BSR - A = csr_matrix(A,copy=True) - - output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) - - if A.nnz == 0 or B.nnz == 0: - # kronecker product is the zero matrix - return coo_matrix( output_shape ) - - B = B.toarray() - data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1]) - data = data * B - - return bsr_matrix((data,A.indices,A.indptr), shape=output_shape) - else: - #use COO - A = coo_matrix(A) - output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) - - if A.nnz == 0 or B.nnz == 0: - # kronecker product is the zero matrix - return coo_matrix( output_shape ) - - # expand entries of a into blocks - row = A.row.repeat(B.nnz) - col = A.col.repeat(B.nnz) - data = A.data.repeat(B.nnz) - - row *= B.shape[0] - col *= B.shape[1] - - # increment block indices - row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz) - row += B.row - col += B.col - row,col = row.reshape(-1),col.reshape(-1) - - # compute block entries - data = data.reshape(-1,B.nnz) * B.data - data = data.reshape(-1) - - return coo_matrix((data,(row,col)), shape=output_shape).asformat(format) - -def kronsum(A, B, format=None): - """kronecker sum of sparse matrices A and B - - Kronecker sum of two sparse matrices is a sum of two Kronecker - products kron(I_n,A) + kron(B,I_m) where A has shape (m,m) - and B has shape (n,n) and I_m and I_n are identity matrices - of shape (m,m) and (n,n) respectively. - - Parameters - ---------- - A - square matrix - B - square matrix - format : string - format of the result (e.g. "csr") - - Returns - ------- - kronecker sum in a sparse matrix format - - Examples - -------- - - - """ - A = coo_matrix(A) - B = coo_matrix(B) - - if A.shape[0] != A.shape[1]: - raise ValueError('A is not square') - - if B.shape[0] != B.shape[1]: - raise ValueError('B is not square') - - dtype = upcast(A.dtype, B.dtype) - - L = kron(identity(B.shape[0],dtype=dtype), A, format=format) - R = kron(B, identity(A.shape[0],dtype=dtype), format=format) - - return (L+R).asformat(format) #since L + R is not always same format - - -def hstack(blocks, format=None, dtype=None): - """ - Stack sparse matrices horizontally (column wise) - - Parameters - ---------- - blocks - sequence of sparse matrices with compatible shapes - format : string - sparse format of the result (e.g. "csr") - by default an appropriate sparse matrix format is returned. - This choice is subject to change. - - See Also - -------- - vstack : stack sparse matrices vertically (row wise) - - Examples - -------- - >>> from scipy.sparse import coo_matrix, vstack - >>> A = coo_matrix([[1,2],[3,4]]) - >>> B = coo_matrix([[5],[6]]) - >>> hstack( [A,B] ).todense() - matrix([[1, 2, 5], - [3, 4, 6]]) - - """ - return bmat([blocks], format=format, dtype=dtype) - -def vstack(blocks, format=None, dtype=None): - """ - Stack sparse matrices vertically (row wise) - - Parameters - ---------- - blocks - sequence of sparse matrices with compatible shapes - format : string - sparse format of the result (e.g. "csr") - by default an appropriate sparse matrix format is returned. - This choice is subject to change. - - See Also - -------- - hstack : stack sparse matrices horizontally (column wise) - - Examples - -------- - >>> from scipy.sparse import coo_matrix, vstack - >>> A = coo_matrix([[1,2],[3,4]]) - >>> B = coo_matrix([[5,6]]) - >>> vstack( [A,B] ).todense() - matrix([[1, 2], - [3, 4], - [5, 6]]) - - """ - return bmat([ [b] for b in blocks ], format=format, dtype=dtype) - -def bmat(blocks, format=None, dtype=None): - """ - Build a sparse matrix from sparse sub-blocks - - Parameters - ---------- - blocks - grid of sparse matrices with compatible shapes - an entry of None implies an all-zero matrix - format : sparse format of the result (e.g. "csr") - by default an appropriate sparse matrix format is returned. - This choice is subject to change. - - Examples - -------- - >>> from scipy.sparse import coo_matrix, bmat - >>> A = coo_matrix([[1,2],[3,4]]) - >>> B = coo_matrix([[5],[6]]) - >>> C = coo_matrix([[7]]) - >>> bmat( [[A,B],[None,C]] ).todense() - matrix([[1, 2, 5], - [3, 4, 6], - [0, 0, 7]]) - - >>> bmat( [[A,None],[None,C]] ).todense() - matrix([[1, 2, 0], - [3, 4, 0], - [0, 0, 7]]) - - """ - - blocks = np.asarray(blocks, dtype='object') - - if np.rank(blocks) != 2: - raise ValueError('blocks must have rank 2') - - M,N = blocks.shape - - block_mask = np.zeros(blocks.shape, dtype=np.bool) - brow_lengths = np.zeros(blocks.shape[0], dtype=np.intc) - bcol_lengths = np.zeros(blocks.shape[1], dtype=np.intc) - - # convert everything to COO format - for i in range(M): - for j in range(N): - if blocks[i,j] is not None: - A = coo_matrix(blocks[i,j]) - blocks[i,j] = A - block_mask[i,j] = True - - if brow_lengths[i] == 0: - brow_lengths[i] = A.shape[0] - else: - if brow_lengths[i] != A.shape[0]: - raise ValueError('blocks[%d,:] has incompatible row dimensions' % i) - - if bcol_lengths[j] == 0: - bcol_lengths[j] = A.shape[1] - else: - if bcol_lengths[j] != A.shape[1]: - raise ValueError('blocks[:,%d] has incompatible column dimensions' % j) - - - # ensure that at least one value in each row and col is not None - if brow_lengths.min() == 0: - raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() ) - if bcol_lengths.min() == 0: - raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() ) - - nnz = sum([ A.nnz for A in blocks[block_mask] ]) - if dtype is None: - dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) ) - - row_offsets = np.concatenate(([0], np.cumsum(brow_lengths))) - col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths))) - - data = np.empty(nnz, dtype=dtype) - row = np.empty(nnz, dtype=np.intc) - col = np.empty(nnz, dtype=np.intc) - - nnz = 0 - for i in range(M): - for j in range(N): - if blocks[i,j] is not None: - A = blocks[i,j] - data[nnz:nnz + A.nnz] = A.data - row[nnz:nnz + A.nnz] = A.row - col[nnz:nnz + A.nnz] = A.col - - row[nnz:nnz + A.nnz] += row_offsets[i] - col[nnz:nnz + A.nnz] += col_offsets[j] - - nnz += A.nnz - - shape = (np.sum(brow_lengths), np.sum(bcol_lengths)) - return coo_matrix((data, (row, col)), shape=shape).asformat(format) - -def rand(m, n, density=0.01, format="coo", dtype=None): - """Generate a sparse matrix of the given shape and density with uniformely - distributed values. - - Parameters - ---------- - m, n: int - shape of the matrix - density: real - density of the generated matrix: density equal to one means a full - matrix, density of 0 means a matrix with no non-zero items. - format: str - sparse matrix format. - dtype: dtype - type of the returned matrix values. - - Notes - ----- - Only float types are supported for now. - """ - if density < 0 or density > 1: - raise ValueError("density expected to be 0 <= density <= 1") - if dtype and not dtype in [np.float32, np.float64, np.longdouble]: - raise NotImplementedError("type %s not supported" % dtype) - - mn = m * n - - # XXX: sparse uses intc instead of intp... - tp = np.intp - if mn > np.iinfo(tp).max: - msg = """\ -Trying to generate a random sparse matrix such as the product of dimensions is -greater than %d - this is not supported on this machine -""" - raise ValueError(msg % np.iinfo(tp).max) - - # Number of non zero values - k = long(density * m * n) - - # Generate a few more values than k so that we can get unique values - # afterwards. - # XXX: one could be smarter here - mlow = 5 - fac = 1.02 - gk = min(k + mlow, fac * k) - - def _gen_unique_rand(_gk): - id = np.random.rand(_gk) - return np.unique(np.floor(id * mn))[:k] - - id = _gen_unique_rand(gk) - while id.size < k: - gk *= 1.05 - id = _gen_unique_rand(gk) - - j = np.floor(id * 1. / m).astype(tp) - i = (id - j * m).astype(tp) - vals = np.random.rand(k).astype(dtype) - return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format) diff --git a/scipy-0.10.1/scipy/sparse/coo.py b/scipy-0.10.1/scipy/sparse/coo.py deleted file mode 100644 index 4f3b254cf6..0000000000 --- a/scipy-0.10.1/scipy/sparse/coo.py +++ /dev/null @@ -1,382 +0,0 @@ -""" A sparse matrix in COOrdinate or 'triplet' format""" - -__docformat__ = "restructuredtext en" - -__all__ = ['coo_matrix', 'isspmatrix_coo'] - -from warnings import warn - -import numpy as np - -from sparsetools import coo_tocsr, coo_todense, coo_matvec -from base import isspmatrix -from data import _data_matrix -from sputils import upcast, to_native, isshape, getdtype, isintlike - -class coo_matrix(_data_matrix): - """ - A sparse matrix in COOrdinate format. - - Also known as the 'ijv' or 'triplet' format. - - This can be instantiated in several ways: - coo_matrix(D) - with a dense matrix D - - coo_matrix(S) - with another sparse matrix S (equivalent to S.tocoo()) - - coo_matrix((M, N), [dtype]) - to construct an empty matrix with shape (M, N) - dtype is optional, defaulting to dtype='d'. - - coo_matrix((data, ij), [shape=(M, N)]) - The arguments 'data' and 'ij' represent three arrays: - 1. data[:] the entries of the matrix, in any order - 2. ij[0][:] the row indices of the matrix entries - 3. ij[1][:] the column indices of the matrix entries - - Where ``A[ij[0][k], ij[1][k] = data[k]``. When shape is - not specified, it is inferred from the index arrays - - Attributes - ---------- - dtype : dtype - Data type of the matrix - shape : 2-tuple - Shape of the matrix - ndim : int - Number of dimensions (this is always 2) - nnz - Number of nonzero elements - data - COO format data array of the matrix - row - COO format row index array of the matrix - col - COO format column index array of the matrix - - Notes - ----- - - Sparse matrices can be used in arithmetic operations: they support - addition, subtraction, multiplication, division, and matrix power. - - Advantages of the COO format - - facilitates fast conversion among sparse formats - - permits duplicate entries (see example) - - very fast conversion to and from CSR/CSC formats - - Disadvantages of the COO format - - does not directly support: - + arithmetic operations - + slicing - - Intended Usage - - COO is a fast format for constructing sparse matrices - - Once a matrix has been constructed, convert to CSR or - CSC format for fast arithmetic and matrix vector operations - - By default when converting to CSR or CSC format, duplicate (i,j) - entries will be summed together. This facilitates efficient - construction of finite element matrices and the like. (see example) - - - Examples - -------- - - >>> from scipy.sparse import * - >>> from scipy import * - >>> coo_matrix( (3,4), dtype=int8 ).todense() - matrix([[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]], dtype=int8) - - >>> row = array([0,3,1,0]) - >>> col = array([0,3,1,2]) - >>> data = array([4,5,7,9]) - >>> coo_matrix( (data,(row,col)), shape=(4,4) ).todense() - matrix([[4, 0, 9, 0], - [0, 7, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 5]]) - - >>> # example with duplicates - >>> row = array([0,0,1,3,1,0,0]) - >>> col = array([0,2,1,3,1,0,0]) - >>> data = array([1,1,1,1,1,1,1]) - >>> coo_matrix( (data,(row,col)), shape=(4,4)).todense() - matrix([[3, 0, 1, 0], - [0, 2, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 1]]) - - """ - - def __init__(self, arg1, shape=None, dtype=None, copy=False): - _data_matrix.__init__(self) - - if isinstance(arg1, tuple): - if isshape(arg1): - M, N = arg1 - self.shape = (M,N) - self.row = np.array([], dtype=np.intc) - self.col = np.array([], dtype=np.intc) - self.data = np.array([], getdtype(dtype, default=float)) - else: - try: - obj, ij = arg1 - except: - raise TypeError('invalid input format') - - try: - if len(ij) != 2: - raise TypeError - except TypeError: - raise TypeError('invalid input format') - - self.row = np.array(ij[0], copy=copy, dtype=np.intc) - self.col = np.array(ij[1], copy=copy, dtype=np.intc) - self.data = np.array( obj, copy=copy) - - if shape is None: - if len(self.row) == 0 or len(self.col) == 0: - raise ValueError('cannot infer dimensions from zero sized index arrays') - M = self.row.max() + 1 - N = self.col.max() + 1 - self.shape = (M, N) - else: - # Use 2 steps to ensure shape has length 2. - M, N = shape - self.shape = (M, N) - - elif arg1 is None: - # Initialize an empty matrix. - if not isinstance(shape, tuple) or not isintlike(shape[0]): - raise TypeError('dimensions not understood') - warn('coo_matrix(None, shape=(M,N)) is deprecated, ' \ - 'use coo_matrix( (M,N) ) instead', DeprecationWarning) - self.shape = shape - self.data = np.array([], getdtype(dtype, default=float)) - self.row = np.array([], dtype=np.intc) - self.col = np.array([], dtype=np.intc) - else: - if isspmatrix(arg1): - if isspmatrix_coo(arg1) and copy: - self.row = arg1.row.copy() - self.col = arg1.col.copy() - self.data = arg1.data.copy() - self.shape = arg1.shape - else: - coo = arg1.tocoo() - self.row = coo.row - self.col = coo.col - self.data = coo.data - self.shape = coo.shape - else: - #dense argument - try: - M = np.atleast_2d(np.asarray(arg1)) - except: - raise TypeError('invalid input format') - - if np.rank(M) != 2: - raise TypeError('expected rank <= 2 array or matrix') - self.shape = M.shape - self.row,self.col = (M != 0).nonzero() - self.data = M[self.row,self.col] - - if dtype is not None: - self.data = self.data.astype(dtype) - - - self._check() - - def getnnz(self): - nnz = len(self.data) - if nnz != len(self.row) or nnz != len(self.col): - raise ValueError('row, column, and data array must all be the same length') - - if np.rank(self.data) != 1 or np.rank(self.row) != 1 or np.rank(self.col) != 1: - raise ValueError('row, column, and data arrays must have rank 1') - - return nnz - nnz = property(fget=getnnz) - - def _check(self): - """ Checks data structure for consistency """ - nnz = self.nnz - - # index arrays should have integer data types - if self.row.dtype.kind != 'i': - warn("row index array has non-integer dtype (%s) " \ - % self.row.dtype.name ) - if self.col.dtype.kind != 'i': - warn("col index array has non-integer dtype (%s) " \ - % self.col.dtype.name ) - - # only support 32-bit ints for now - self.row = np.asarray(self.row, dtype=np.intc) - self.col = np.asarray(self.col, dtype=np.intc) - self.data = to_native(self.data) - - if nnz > 0: - if self.row.max() >= self.shape[0]: - raise ValueError('row index exceedes matrix dimensions') - if self.col.max() >= self.shape[1]: - raise ValueError('column index exceedes matrix dimensions') - if self.row.min() < 0: - raise ValueError('negative row index found') - if self.col.min() < 0: - raise ValueError('negative column index found') - - - def transpose(self, copy=False): - M,N = self.shape - return coo_matrix((self.data, (self.col, self.row)), shape=(N,M), copy=copy) - - def toarray(self): - B = np.zeros(self.shape, dtype=self.dtype) - M,N = self.shape - coo_todense(M, N, self.nnz, self.row, self.col, self.data, B.ravel()) - return B - - def tocsc(self): - """Return a copy of this matrix in Compressed Sparse Column format - - Duplicate entries will be summed together. - - Examples - -------- - >>> from numpy import array - >>> from scipy.sparse import coo_matrix - >>> row = array([0,0,1,3,1,0,0]) - >>> col = array([0,2,1,3,1,0,0]) - >>> data = array([1,1,1,1,1,1,1]) - >>> A = coo_matrix( (data,(row,col)), shape=(4,4)).tocsc() - >>> A.todense() - matrix([[3, 0, 1, 0], - [0, 2, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 1]]) - - """ - from csc import csc_matrix - if self.nnz == 0: - return csc_matrix(self.shape, dtype=self.dtype) - else: - M,N = self.shape - indptr = np.empty(N + 1, dtype=np.intc) - indices = np.empty(self.nnz, dtype=np.intc) - data = np.empty(self.nnz, dtype=upcast(self.dtype)) - - coo_tocsr(N, M, self.nnz, \ - self.col, self.row, self.data, \ - indptr, indices, data) - - A = csc_matrix((data, indices, indptr), shape=self.shape) - A.sum_duplicates() - - return A - - def tocsr(self): - """Return a copy of this matrix in Compressed Sparse Row format - - Duplicate entries will be summed together. - - Examples - -------- - >>> from numpy import array - >>> from scipy.sparse import coo_matrix - >>> row = array([0,0,1,3,1,0,0]) - >>> col = array([0,2,1,3,1,0,0]) - >>> data = array([1,1,1,1,1,1,1]) - >>> A = coo_matrix( (data,(row,col)), shape=(4,4)).tocsr() - >>> A.todense() - matrix([[3, 0, 1, 0], - [0, 2, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 1]]) - - """ - from csr import csr_matrix - if self.nnz == 0: - return csr_matrix(self.shape, dtype=self.dtype) - else: - M,N = self.shape - indptr = np.empty(M + 1, dtype=np.intc) - indices = np.empty(self.nnz, dtype=np.intc) - data = np.empty(self.nnz, dtype=upcast(self.dtype)) - - coo_tocsr(M, N, self.nnz, \ - self.row, self.col, self.data, \ - indptr, indices, data) - - A = csr_matrix((data, indices, indptr), shape=self.shape) - A.sum_duplicates() - - return A - - def tocoo(self, copy=False): - if copy: - return self.copy() - else: - return self - - def todia(self): - from dia import dia_matrix - - ks = self.col - self.row #the diagonal for each nonzero - diags = np.unique(ks) - - if len(diags) > 100: - #probably undesired, should we do something? - #should todia() have a maxdiags parameter? - pass - - #initialize and fill in data array - data = np.zeros( (len(diags), self.col.max()+1), dtype=self.dtype) - data[ np.searchsorted(diags,ks), self.col ] = self.data - - return dia_matrix((data,diags), shape=self.shape) - - def todok(self): - from itertools import izip - from dok import dok_matrix - - dok = dok_matrix((self.shape), dtype=self.dtype) - - dok.update( izip(izip(self.row,self.col),self.data) ) - - return dok - - - # needed by _data_matrix - def _with_data(self,data,copy=True): - """Returns a matrix with the same sparsity structure as self, - but with different data. By default the index arrays - (i.e. .row and .col) are copied. - """ - if copy: - return coo_matrix( (data, (self.row.copy(), self.col.copy()) ), \ - shape=self.shape, dtype=data.dtype) - else: - return coo_matrix( (data, (self.row, self.col) ), \ - shape=self.shape, dtype=data.dtype) - - ########################### - # Multiplication handlers # - ########################### - - def _mul_vector(self, other): - #output array - result = np.zeros( self.shape[0], dtype=upcast(self.dtype,other.dtype) ) - coo_matvec(self.nnz, self.row, self.col, self.data, other, result) - return result - - def _mul_multivector(self, other): - return np.hstack( [ self._mul_vector(col).reshape(-1,1) for col in other.T ] ) - -from sputils import _isinstance - -def isspmatrix_coo( x ): - return _isinstance(x, coo_matrix) diff --git a/scipy-0.10.1/scipy/sparse/csc.py b/scipy-0.10.1/scipy/sparse/csc.py deleted file mode 100644 index 1d1d312671..0000000000 --- a/scipy-0.10.1/scipy/sparse/csc.py +++ /dev/null @@ -1,182 +0,0 @@ -"""Compressed Sparse Column matrix format""" - -__docformat__ = "restructuredtext en" - -__all__ = ['csc_matrix', 'isspmatrix_csc'] - -from warnings import warn - -import numpy as np - -from sparsetools import csc_tocsr -from sputils import upcast, isintlike - -from compressed import _cs_matrix - - -class csc_matrix(_cs_matrix): - """ - Compressed Sparse Column matrix - - This can be instantiated in several ways: - - csc_matrix(D) - with a dense matrix or rank-2 ndarray D - - csc_matrix(S) - with another sparse matrix S (equivalent to S.tocsc()) - - csc_matrix((M, N), [dtype]) - to construct an empty matrix with shape (M, N) - dtype is optional, defaulting to dtype='d'. - - csc_matrix((data, ij), [shape=(M, N)]) - where ``data`` and ``ij`` satisfy the relationship - ``a[ij[0, k], ij[1, k]] = data[k]`` - - csc_matrix((data, indices, indptr), [shape=(M, N)]) - is the standard CSC representation where the row indices for - column i are stored in ``indices[indptr[i]:indices[i+1]]`` - and their corresponding values are stored in - ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is - not supplied, the matrix dimensions are inferred from - the index arrays. - - Attributes - ---------- - dtype : dtype - Data type of the matrix - shape : 2-tuple - Shape of the matrix - ndim : int - Number of dimensions (this is always 2) - nnz - Number of nonzero elements - data - Data array of the matrix - indices - CSC format index array - indptr - CSC format index pointer array - has_sorted_indices - Whether indices are sorted - - Notes - ----- - - Sparse matrices can be used in arithmetic operations: they support - addition, subtraction, multiplication, division, and matrix power. - - Advantages of the CSC format - - efficient arithmetic operations CSC + CSC, CSC * CSC, etc. - - efficient column slicing - - fast matrix vector products (CSR, BSR may be faster) - - Disadvantages of the CSC format - - slow row slicing operations (consider CSR) - - changes to the sparsity structure are expensive (consider LIL or DOK) - - - Examples - -------- - - >>> from scipy.sparse import * - >>> from scipy import * - >>> csc_matrix( (3,4), dtype=int8 ).todense() - matrix([[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]], dtype=int8) - - >>> row = array([0,2,2,0,1,2]) - >>> col = array([0,0,1,2,2,2]) - >>> data = array([1,2,3,4,5,6]) - >>> csc_matrix( (data,(row,col)), shape=(3,3) ).todense() - matrix([[1, 0, 4], - [0, 0, 5], - [2, 3, 6]]) - - >>> indptr = array([0,2,3,6]) - >>> indices = array([0,2,2,0,1,2]) - >>> data = array([1,2,3,4,5,6]) - >>> csc_matrix( (data,indices,indptr), shape=(3,3) ).todense() - matrix([[1, 0, 4], - [0, 0, 5], - [2, 3, 6]]) - - """ - - def transpose(self, copy=False): - from csr import csr_matrix - M,N = self.shape - return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy) - - def __iter__(self): - csr = self.tocsr() - for r in xrange(self.shape[0]): - yield csr[r,:] - - def tocsc(self, copy=False): - if copy: - return self.copy() - else: - return self - - def tocsr(self): - M,N = self.shape - indptr = np.empty(M + 1, dtype=np.intc) - indices = np.empty(self.nnz, dtype=np.intc) - data = np.empty(self.nnz, dtype=upcast(self.dtype)) - - csc_tocsr(M, N, \ - self.indptr, self.indices, self.data, \ - indptr, indices, data) - - from csr import csr_matrix - A = csr_matrix((data, indices, indptr), shape=self.shape) - A.has_sorted_indices = True - return A - - - def __getitem__(self, key): - # use CSR to implement fancy indexing - if isinstance(key, tuple): - row = key[0] - col = key[1] - - if isintlike(row) or isinstance(row, slice): - return self.T[col,row].T - else: - #[[1,2],??] or [[[1],[2]],??] - if isintlike(col) or isinstance(col,slice): - return self.T[col,row].T - else: - row = np.asarray(row, dtype=np.intc) - col = np.asarray(col, dtype=np.intc) - if len(row.shape) == 1: - return self.T[col,row] - elif len(row.shape) == 2: - row = row.reshape(-1) - col = col.reshape(-1,1) - return self.T[col,row].T - else: - raise NotImplementedError('unsupported indexing') - - return self.T[col,row].T - elif isintlike(key) or isinstance(key,slice): - return self.T[:,key].T #[i] or [1:2] - else: - return self.T[:,key].T #[[1,2]] - - - # these functions are used by the parent class (_cs_matrix) - # to remove redudancy between csc_matrix and csr_matrix - def _swap(self,x): - """swap the members of x if this is a column-oriented matrix - """ - return (x[1],x[0]) - - -from sputils import _isinstance - -def isspmatrix_csc(x): - return _isinstance(x, csc_matrix) diff --git a/scipy-0.10.1/scipy/sparse/csgraph.py b/scipy-0.10.1/scipy/sparse/csgraph.py deleted file mode 100644 index dbc88b1bdc..0000000000 --- a/scipy-0.10.1/scipy/sparse/csgraph.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Compressed Sparse graph algorithms""" - -__docformat__ = "restructuredtext en" - -__all__ = ['cs_graph_components'] - -import numpy as np - -from sparsetools import cs_graph_components as _cs_graph_components - -from csr import csr_matrix -from base import isspmatrix - -_msg0 = 'x must be a symmetric square matrix!' -_msg1 = _msg0 + '(has shape %s)' - -def cs_graph_components(x): - """ - Determine connected components of a graph stored as a compressed - sparse row or column matrix. - - For speed reasons, the symmetry of the matrix x is not checked. A - nonzero at index `(i, j)` means that node `i` is connected to node - `j` by an edge. The number of rows/columns of the matrix thus - corresponds to the number of nodes in the graph. - - Parameters - ----------- - x: ndarray-like, 2 dimensions, or sparse matrix - The adjacency matrix of the graph. Only the upper triangular part - is used. - - Returns - -------- - n_comp: int - The number of connected components. - label: ndarray (ints, 1 dimension): - The label array of each connected component (-2 is used to - indicate empty rows in the matrix: 0 everywhere, including - diagonal). This array has the length of the number of nodes, - i.e. one label for each node of the graph. Nodes having the same - label belong to the same connected component. - - Notes - ------ - The matrix is assumed to be symmetric and the upper triangular part - of the matrix is used. The matrix is converted to a CSR matrix unless - it is already a CSR. - - Examples - -------- - >>> from scipy.sparse import cs_graph_components - >>> import numpy as np - >>> D = np.eye(4) - >>> D[0,1] = D[1,0] = 1 - >>> cs_graph_components(D) - (3, array([0, 0, 1, 2])) - >>> from scipy.sparse import dok_matrix - >>> cs_graph_components(dok_matrix(D)) - (3, array([0, 0, 1, 2])) - - """ - try: - shape = x.shape - except AttributeError: - raise ValueError(_msg0) - - if not ((len(x.shape) == 2) and (x.shape[0] == x.shape[1])): - raise ValueError(_msg1 % x.shape) - - if isspmatrix(x): - x = x.tocsr() - else: - x = csr_matrix(x) - - label = np.empty((shape[0],), dtype=x.indptr.dtype) - - n_comp = _cs_graph_components(shape[0], x.indptr, x.indices, label) - - return n_comp, label diff --git a/scipy-0.10.1/scipy/sparse/csr.py b/scipy-0.10.1/scipy/sparse/csr.py deleted file mode 100644 index 23ebcdf6c9..0000000000 --- a/scipy-0.10.1/scipy/sparse/csr.py +++ /dev/null @@ -1,396 +0,0 @@ -"""Compressed Sparse Row matrix format""" - -__docformat__ = "restructuredtext en" - -__all__ = ['csr_matrix', 'isspmatrix_csr'] - - -from warnings import warn - -import numpy as np - -from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \ - get_csr_submatrix, csr_sample_values -from sputils import upcast, isintlike - - -from compressed import _cs_matrix - -class csr_matrix(_cs_matrix): - """ - Compressed Sparse Row matrix - - This can be instantiated in several ways: - csr_matrix(D) - with a dense matrix or rank-2 ndarray D - - csr_matrix(S) - with another sparse matrix S (equivalent to S.tocsr()) - - csr_matrix((M, N), [dtype]) - to construct an empty matrix with shape (M, N) - dtype is optional, defaulting to dtype='d'. - - csr_matrix((data, ij), [shape=(M, N)]) - where ``data`` and ``ij`` satisfy the relationship - ``a[ij[0, k], ij[1, k]] = data[k]`` - - csr_matrix((data, indices, indptr), [shape=(M, N)]) - is the standard CSR representation where the column indices for - row i are stored in ``indices[indptr[i]:indices[i+1]]`` and their - corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. - If the shape parameter is not supplied, the matrix dimensions - are inferred from the index arrays. - - Attributes - ---------- - dtype : dtype - Data type of the matrix - shape : 2-tuple - Shape of the matrix - ndim : int - Number of dimensions (this is always 2) - nnz - Number of nonzero elements - data - CSR format data array of the matrix - indices - CSR format index array of the matrix - indptr - CSR format index pointer array of the matrix - has_sorted_indices - Whether indices are sorted - - Notes - ----- - - Sparse matrices can be used in arithmetic operations: they support - addition, subtraction, multiplication, division, and matrix power. - - Advantages of the CSR format - - efficient arithmetic operations CSR + CSR, CSR * CSR, etc. - - efficient row slicing - - fast matrix vector products - - Disadvantages of the CSR format - - slow column slicing operations (consider CSC) - - changes to the sparsity structure are expensive (consider LIL or DOK) - - Examples - -------- - - >>> from scipy.sparse import * - >>> from scipy import * - >>> csr_matrix( (3,4), dtype=int8 ).todense() - matrix([[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]], dtype=int8) - - >>> row = array([0,0,1,2,2,2]) - >>> col = array([0,2,2,0,1,2]) - >>> data = array([1,2,3,4,5,6]) - >>> csr_matrix( (data,(row,col)), shape=(3,3) ).todense() - matrix([[1, 0, 2], - [0, 0, 3], - [4, 5, 6]]) - - >>> indptr = array([0,2,3,6]) - >>> indices = array([0,2,2,0,1,2]) - >>> data = array([1,2,3,4,5,6]) - >>> csr_matrix( (data,indices,indptr), shape=(3,3) ).todense() - matrix([[1, 0, 2], - [0, 0, 3], - [4, 5, 6]]) - - """ - - def transpose(self, copy=False): - from csc import csc_matrix - M,N = self.shape - return csc_matrix((self.data,self.indices,self.indptr), shape=(N,M), copy=copy) - - def tolil(self): - from lil import lil_matrix - lil = lil_matrix(self.shape,dtype=self.dtype) - - self.sort_indices() #lil_matrix needs sorted column indices - - ptr,ind,dat = self.indptr,self.indices,self.data - rows, data = lil.rows, lil.data - - for n in xrange(self.shape[0]): - start = ptr[n] - end = ptr[n+1] - rows[n] = ind[start:end].tolist() - data[n] = dat[start:end].tolist() - - return lil - - def tocsr(self, copy=False): - if copy: - return self.copy() - else: - return self - - def tocsc(self): - indptr = np.empty(self.shape[1] + 1, dtype=np.intc) - indices = np.empty(self.nnz, dtype=np.intc) - data = np.empty(self.nnz, dtype=upcast(self.dtype)) - - csr_tocsc(self.shape[0], self.shape[1], \ - self.indptr, self.indices, self.data, \ - indptr, indices, data) - - from csc import csc_matrix - A = csc_matrix((data, indices, indptr), shape=self.shape) - A.has_sorted_indices = True - return A - - def tobsr(self, blocksize=None, copy=True): - from bsr import bsr_matrix - - if blocksize is None: - from spfuncs import estimate_blocksize - return self.tobsr(blocksize=estimate_blocksize(self)) - - elif blocksize == (1,1): - arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr) - return bsr_matrix(arg1, shape=self.shape, copy=copy ) - - else: - R,C = blocksize - M,N = self.shape - - if R < 1 or C < 1 or M % R != 0 or N % C != 0: - raise ValueError('invalid blocksize %s' % blocksize) - - blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices) - - indptr = np.empty(M//R + 1, dtype=np.intc) - indices = np.empty(blks, dtype=np.intc) - data = np.zeros((blks,R,C), dtype=self.dtype) - - csr_tobsr(M, N, R, C, self.indptr, self.indices, self.data, \ - indptr, indices, data.ravel() ) - - return bsr_matrix((data,indices,indptr), shape=self.shape) - - # these functions are used by the parent class (_cs_matrix) - # to remove redudancy between csc_matrix and csr_matrix - def _swap(self,x): - """swap the members of x if this is a column-oriented matrix - """ - return (x[0],x[1]) - - - def __getitem__(self, key): - def asindices(x): - try: - x = np.asarray(x, dtype=np.intc) - except: - raise IndexError('invalid index') - else: - return x - def check_bounds(indices,N): - max_indx = indices.max() - if max_indx >= N: - raise IndexError('index (%d) out of range' % max_indx) - - min_indx = indices.min() - if min_indx < -N: - raise IndexError('index (%d) out of range' % (N + min_indx)) - - return (min_indx,max_indx) - - def extractor(indices,N): - """Return a sparse matrix P so that P*self implements - slicing of the form self[[1,2,3],:] - """ - indices = asindices(indices) - - (min_indx,max_indx) = check_bounds(indices,N) - - if min_indx < 0: - indices = indices.copy() - indices[indices < 0] += N - - indptr = np.arange(len(indices) + 1, dtype=np.intc) - data = np.ones(len(indices), dtype=self.dtype) - shape = (len(indices),N) - - return csr_matrix((data,indices,indptr), shape=shape) - - - if isinstance(key, tuple): - row = key[0] - col = key[1] - - if isintlike(row): - #[1,??] - if isintlike(col): - return self._get_single_element(row, col) #[i,j] - elif isinstance(col, slice): - return self._get_row_slice(row, col) #[i,1:2] - else: - P = extractor(col,self.shape[1]).T #[i,[1,2]] - return self[row,:]*P - - elif isinstance(row, slice): - #[1:2,??] - if isintlike(col) or isinstance(col, slice): - return self._get_submatrix(row, col) #[1:2,j] - else: - P = extractor(col,self.shape[1]).T #[1:2,[1,2]] - return self[row,:]*P - - else: - #[[1,2],??] or [[[1],[2]],??] - if isintlike(col) or isinstance(col,slice): - P = extractor(row, self.shape[0]) #[[1,2],j] or [[1,2],1:2] - return (P*self)[:,col] - - else: - row = asindices(row) - col = asindices(col) - if len(row.shape) == 1: - if len(row) != len(col): #[[1,2],[1,2]] - raise IndexError('number of row and column indices differ') - - check_bounds(row, self.shape[0]) - check_bounds(col, self.shape[1]) - - num_samples = len(row) - val = np.empty(num_samples, dtype=self.dtype) - csr_sample_values(self.shape[0], self.shape[1], - self.indptr, self.indices, self.data, - num_samples, row, col, val) - #val = [] - #for i,j in zip(row,col): - # val.append(self._get_single_element(i,j)) - return np.asmatrix(val) - - elif len(row.shape) == 2: - row = np.ravel(row) #[[[1],[2]],[1,2]] - P = extractor(row, self.shape[0]) - return (P*self)[:,col] - - else: - raise NotImplementedError('unsupported indexing') - - elif isintlike(key) or isinstance(key,slice): - return self[key,:] #[i] or [1:2] - else: - return self[asindices(key),:] #[[1,2]] - - - def _get_single_element(self,row,col): - """Returns the single element self[row, col] - """ - M, N = self.shape - if (row < 0): - row += M - if (col < 0): - col += N - if not (0<=row= self.shape[0]: - raise IndexError('index (%d) out of range' % i ) - - start, stop, stride = cslice.indices(self.shape[1]) - - if stride != 1: - raise ValueError("slicing with step != 1 not supported") - if stop <= start: - raise ValueError("slice width must be >= 1") - - #TODO make [i,:] faster - #TODO implement [i,x:y:z] - - indices = [] - - for ind in xrange(self.indptr[i], self.indptr[i+1]): - if self.indices[ind] >= start and self.indices[ind] < stop: - indices.append(ind) - - index = self.indices[indices] - start - data = self.data[indices] - indptr = np.array([0, len(indices)]) - return csr_matrix( (data, index, indptr), shape=(1, stop-start) ) - - def _get_submatrix( self, row_slice, col_slice ): - """Return a submatrix of this matrix (new matrix is created).""" - - M,N = self.shape - - def process_slice( sl, num ): - if isinstance( sl, slice ): - i0, i1 = sl.start, sl.stop - if i0 is None: - i0 = 0 - elif i0 < 0: - i0 = num + i0 - - if i1 is None: - i1 = num - elif i1 < 0: - i1 = num + i1 - - return i0, i1 - - elif isintlike( sl ): - if sl < 0: - sl += num - - return sl, sl + 1 - - else: - raise TypeError('expected slice or scalar') - - def check_bounds( i0, i1, num ): - if not (0<=i0>> from scipy.sparse import * - >>> from scipy import * - >>> dia_matrix( (3,4), dtype=int8).todense() - matrix([[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]], dtype=int8) - - >>> data = array([[1,2,3,4]]).repeat(3,axis=0) - >>> offsets = array([0,-1,2]) - >>> dia_matrix( (data,offsets), shape=(4,4)).todense() - matrix([[1, 0, 3, 0], - [1, 2, 0, 4], - [0, 2, 3, 0], - [0, 0, 3, 4]]) - - """ - - def __init__(self, arg1, shape=None, dtype=None, copy=False): - _data_matrix.__init__(self) - - if isspmatrix_dia(arg1): - if copy: - arg1 = arg1.copy() - self.data = arg1.data - self.offsets = arg1.offsets - self.shape = arg1.shape - elif isspmatrix(arg1): - if isspmatrix_dia(arg1) and copy: - A = arg1.copy() - else: - A = arg1.todia() - self.data = A.data - self.offsets = A.offsets - self.shape = A.shape - elif isinstance(arg1, tuple): - if isshape(arg1): - # It's a tuple of matrix dimensions (M, N) - # create empty matrix - self.shape = arg1 #spmatrix checks for errors here - self.data = np.zeros( (0,0), getdtype(dtype, default=float)) - self.offsets = np.zeros( (0), dtype=np.intc) - else: - try: - # Try interpreting it as (data, offsets) - data, offsets = arg1 - except: - raise ValueError('unrecognized form for dia_matrix constructor') - else: - if shape is None: - raise ValueError('expected a shape argument') - self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy)) - self.offsets = np.atleast_1d(np.array(arg1[1], dtype=np.intc, copy=copy)) - self.shape = shape - else: - #must be dense, convert to COO first, then to DIA - try: - arg1 = np.asarray(arg1) - except: - raise ValueError("unrecognized form for" \ - " %s_matrix constructor" % self.format) - from coo import coo_matrix - A = coo_matrix(arg1, dtype=dtype).todia() - self.data = A.data - self.offsets = A.offsets - self.shape = A.shape - - - if dtype is not None: - self.data = self.data.astype(dtype) - - #check format - if self.offsets.ndim != 1: - raise ValueError('offsets array must have rank 1') - - if self.data.ndim != 2: - raise ValueError('data array must have rank 2') - - if self.data.shape[0] != len(self.offsets): - raise ValueError('number of diagonals (%d) ' \ - 'does not match the number of offsets (%d)' \ - % (self.data.shape[0], len(self.offsets))) - - if len(np.unique(self.offsets)) != len(self.offsets): - raise ValueError('offset array contains duplicate values') - - def __repr__(self): - nnz = self.getnnz() - format = self.getformat() - return "<%dx%d sparse matrix of type '%s'\n" \ - "\twith %d stored elements (%d diagonals) in %s format>" % \ - ( self.shape + (self.dtype.type, nnz, self.data.shape[0], \ - _formats[format][1],) ) - - def getnnz(self): - """number of nonzero values - - explicit zero values are included in this number - """ - M,N = self.shape - nnz = 0 - for k in self.offsets: - if k > 0: - nnz += min(M,N-k) - else: - nnz += min(M+k,N) - return nnz - - nnz = property(fget=getnnz) - - def _mul_vector(self, other): - x = other - - y = np.zeros( self.shape[0], dtype=upcast(self.dtype,x.dtype)) - - L = self.data.shape[1] - - M,N = self.shape - - dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel()) - - return y - - def _mul_multimatrix(self, other): - return np.hstack( [ self._mul_vector(col).reshape(-1,1) for col in other.T ] ) - - def todia(self,copy=False): - if copy: - return self.copy() - else: - return self - - def tocsr(self): - #this could be faster - return self.tocoo().tocsr() - - def tocsc(self): - #this could be faster - return self.tocoo().tocsc() - - def tocoo(self): - num_data = len(self.data) - len_data = self.data.shape[1] - - row = np.arange(len_data).reshape(1,-1).repeat(num_data,axis=0) - col = row.copy() - - for i,k in enumerate(self.offsets): - row[i,:] -= k - - row,col,data = row.ravel(),col.ravel(),self.data.ravel() - - mask = (row >= 0) - mask &= (row < self.shape[0]) - mask &= (col < self.shape[1]) - mask &= data != 0 - row,col,data = row[mask],col[mask],data[mask] - - from coo import coo_matrix - return coo_matrix((data,(row,col)), shape=self.shape) - - # needed by _data_matrix - def _with_data(self, data, copy=True): - """Returns a matrix with the same sparsity structure as self, - but with different data. By default the structure arrays are copied. - """ - if copy: - return dia_matrix( (data, self.offsets.copy()), shape=self.shape) - else: - return dia_matrix( (data,self.offsets), shape=self.shape) - - -from sputils import _isinstance - -def isspmatrix_dia(x): - return _isinstance(x, dia_matrix) diff --git a/scipy-0.10.1/scipy/sparse/dok.py b/scipy-0.10.1/scipy/sparse/dok.py deleted file mode 100644 index 390002f6c1..0000000000 --- a/scipy-0.10.1/scipy/sparse/dok.py +++ /dev/null @@ -1,568 +0,0 @@ -"""Dictionary Of Keys based matrix""" - -__docformat__ = "restructuredtext en" - -__all__ = ['dok_matrix', 'isspmatrix_dok'] - -from itertools import izip - -import numpy as np - -from base import spmatrix, isspmatrix -from sputils import isdense, getdtype, isshape, isintlike, isscalarlike, upcast - -try: - from operator import isSequenceType as _is_sequence -except ImportError: - def _is_sequence(x): - return (hasattr(x, '__len__') or hasattr(x, '__next__') - or hasattr(x, 'next')) - -class dok_matrix(spmatrix, dict): - """ - Dictionary Of Keys based sparse matrix. - - This is an efficient structure for constructing sparse - matrices incrementally. - - This can be instantiated in several ways: - dok_matrix(D) - with a dense matrix, D - - dok_matrix(S) - with a sparse matrix, S - - dok_matrix((M,N), [dtype]) - create the matrix with initial shape (M,N) - dtype is optional, defaulting to dtype='d' - - Attributes - ---------- - dtype : dtype - Data type of the matrix - shape : 2-tuple - Shape of the matrix - ndim : int - Number of dimensions (this is always 2) - nnz - Number of nonzero elements - - Notes - ----- - - Sparse matrices can be used in arithmetic operations: they support - addition, subtraction, multiplication, division, and matrix power. - - Allows for efficient O(1) access of individual elements. - Duplicates are not allowed. - Can be efficiently converted to a coo_matrix once constructed. - - Examples - -------- - >>> from scipy.sparse import * - >>> from scipy import * - >>> S = dok_matrix((5,5), dtype=float32) - >>> for i in range(5): - >>> for j in range(5): - >>> S[i,j] = i+j # Update element - - """ - - def __init__(self, arg1, shape=None, dtype=None, copy=False): - dict.__init__(self) - spmatrix.__init__(self) - - self.dtype = getdtype(dtype, default=float) - if isinstance(arg1, tuple) and isshape(arg1): # (M,N) - M, N = arg1 - self.shape = (M, N) - elif isspmatrix(arg1): # Sparse ctor - if isspmatrix_dok(arg1) and copy: - arg1 = arg1.copy() - else: - arg1 = arg1.todok() - - if dtype is not None: - arg1 = arg1.astype(dtype) - - self.update(arg1) - self.shape = arg1.shape - self.dtype = arg1.dtype - else: # Dense ctor - try: - arg1 = np.asarray(arg1) - except: - raise TypeError('invalid input format') - - if len(arg1.shape)!=2: - raise TypeError('expected rank <=2 dense array or matrix') - - from coo import coo_matrix - self.update( coo_matrix(arg1, dtype=dtype).todok() ) - self.shape = arg1.shape - self.dtype = arg1.dtype - - def getnnz(self): - return dict.__len__(self) - nnz = property(fget=getnnz) - - def __len__(self): - return dict.__len__(self) - - def get(self, key, default=0.): - """This overrides the dict.get method, providing type checking - but otherwise equivalent functionality. - """ - try: - i, j = key - assert isintlike(i) and isintlike(j) - except (AssertionError, TypeError, ValueError): - raise IndexError('index must be a pair of integers') - if (i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]): - raise IndexError('index out of bounds') - return dict.get(self, key, default) - - def __getitem__(self, key): - """If key=(i,j) is a pair of integers, return the corresponding - element. If either i or j is a slice or sequence, return a new sparse - matrix with just these elements. - """ - try: - i, j = key - except (ValueError, TypeError): - raise TypeError('index must be a pair of integers or slices') - - - # Bounds checking - if isintlike(i): - if i < 0: - i += self.shape[0] - if i < 0 or i >= self.shape[0]: - raise IndexError('index out of bounds') - - if isintlike(j): - if j < 0: - j += self.shape[1] - if j < 0 or j >= self.shape[1]: - raise IndexError('index out of bounds') - - # First deal with the case where both i and j are integers - if isintlike(i) and isintlike(j): - return dict.get(self, (i,j), 0.) - else: - # Either i or j is a slice, sequence, or invalid. If i is a slice - # or sequence, unfold it first and call __getitem__ recursively. - - if isinstance(i, slice): - # Is there an easier way to do this? - seq = xrange(i.start or 0, i.stop or self.shape[0], i.step or 1) - elif _is_sequence(i): - seq = i - else: - # Make sure i is an integer. (But allow it to be a subclass of int). - if not isintlike(i): - raise TypeError('index must be a pair of integers or slices') - seq = None - if seq is not None: - # i is a seq - if isintlike(j): - # Create a new matrix of the correct dimensions - first = seq[0] - last = seq[-1] - if first < 0 or first >= self.shape[0] or last < 0 \ - or last >= self.shape[0]: - raise IndexError('index out of bounds') - newshape = (last-first+1, 1) - new = dok_matrix(newshape) - # ** This uses linear time in the size m of dimension 0: - # new[0:seq[-1]-seq[0]+1, 0] = \ - # [self.get((element, j), 0) for element in seq] - # ** Instead just add the non-zero elements. This uses - # ** linear time in the number of non-zeros: - for (ii, jj) in self.keys(): - if jj == j and ii >= first and ii <= last: - dict.__setitem__(new, (ii-first, 0), \ - dict.__getitem__(self, (ii,jj))) - else: - ################################### - # We should reshape the new matrix here! - ################################### - raise NotImplementedError("fancy indexing supported over" - " one axis only") - return new - - # Below here, j is a sequence, but i is an integer - if isinstance(j, slice): - # Is there an easier way to do this? - seq = xrange(j.start or 0, j.stop or self.shape[1], j.step or 1) - elif _is_sequence(j): - seq = j - else: - # j is not an integer - raise TypeError("index must be a pair of integers or slices") - - # Create a new matrix of the correct dimensions - first = seq[0] - last = seq[-1] - if first < 0 or first >= self.shape[1] or last < 0 \ - or last >= self.shape[1]: - raise IndexError("index out of bounds") - newshape = (1, last-first+1) - new = dok_matrix(newshape) - # ** This uses linear time in the size n of dimension 1: - # new[0, 0:seq[-1]-seq[0]+1] = \ - # [self.get((i, element), 0) for element in seq] - # ** Instead loop over the non-zero elements. This is slower - # ** if there are many non-zeros - for (ii, jj) in self.keys(): - if ii == i and jj >= first and jj <= last: - dict.__setitem__(new, (0, jj-first), \ - dict.__getitem__(self, (ii,jj))) - return new - - - def __setitem__(self, key, value): - try: - i, j = key - except (ValueError, TypeError): - raise TypeError("index must be a pair of integers or slices") - - # First deal with the case where both i and j are integers - if isintlike(i) and isintlike(j): - if i < 0: - i += self.shape[0] - if j < 0: - j += self.shape[1] - - if i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]: - raise IndexError("index out of bounds") - - if np.isscalar(value): - if value == 0: - if self.has_key((i,j)): - del self[(i,j)] - else: - dict.__setitem__(self, (i,j), self.dtype.type(value)) - else: - raise ValueError('setting an array element with a sequence') - - else: - # Either i or j is a slice, sequence, or invalid. If i is a slice - # or sequence, unfold it first and call __setitem__ recursively. - if isinstance(i, slice): - # Is there an easier way to do this? - seq = xrange(i.start or 0, i.stop or self.shape[0], i.step or 1) - elif _is_sequence(i): - seq = i - else: - # Make sure i is an integer. (But allow it to be a subclass of int). - if not isintlike(i): - raise TypeError("index must be a pair of integers or slices") - seq = None - if seq is not None: - # First see if 'value' is another dok_matrix of the appropriate - # dimensions - if isinstance(value, dok_matrix): - if value.shape[1] == 1: - for element in seq: - self[element, j] = value[element, 0] - else: - raise NotImplementedError("setting a 2-d slice of" - " a dok_matrix is not yet supported") - elif np.isscalar(value): - for element in seq: - self[element, j] = value - else: - # See if value is a sequence - try: - if len(seq) != len(value): - raise ValueError("index and value ranges must" - " have the same length") - except TypeError: - # Not a sequence - raise TypeError("unsupported type for" - " dok_matrix.__setitem__") - - # Value is a sequence - for element, val in izip(seq, value): - self[element, j] = val # don't use dict.__setitem__ - # here, since we still want to be able to delete - # 0-valued keys, do type checking on 'val' (e.g. if - # it's a rank-1 dense array), etc. - else: - # Process j - if isinstance(j, slice): - seq = xrange(j.start or 0, j.stop or self.shape[1], j.step or 1) - elif _is_sequence(j): - seq = j - else: - # j is not an integer - raise TypeError("index must be a pair of integers or slices") - - # First see if 'value' is another dok_matrix of the appropriate - # dimensions - if isinstance(value, dok_matrix): - if value.shape[0] == 1: - for element in seq: - self[i, element] = value[0, element] - else: - raise NotImplementedError("setting a 2-d slice of" - " a dok_matrix is not yet supported") - elif np.isscalar(value): - for element in seq: - self[i, element] = value - else: - # See if value is a sequence - try: - if len(seq) != len(value): - raise ValueError("index and value ranges must have" - " the same length") - except TypeError: - # Not a sequence - raise TypeError("unsupported type for dok_matrix.__setitem__") - else: - for element, val in izip(seq, value): - self[i, element] = val - - - def __add__(self, other): - # First check if argument is a scalar - if isscalarlike(other): - new = dok_matrix(self.shape, dtype=self.dtype) - # Add this scalar to every element. - M, N = self.shape - for i in xrange(M): - for j in xrange(N): - aij = self.get((i, j), 0) + other - if aij != 0: - new[i, j] = aij - #new.dtype.char = self.dtype.char - elif isinstance(other, dok_matrix): - if other.shape != self.shape: - raise ValueError("matrix dimensions are not equal") - # We could alternatively set the dimensions to the the largest of - # the two matrices to be summed. Would this be a good idea? - new = dok_matrix(self.shape, dtype=self.dtype) - new.update(self) - for key in other.keys(): - new[key] += other[key] - elif isspmatrix(other): - csc = self.tocsc() - new = csc + other - elif isdense(other): - new = self.todense() + other - else: - raise TypeError("data type not understood") - return new - - def __radd__(self, other): - # First check if argument is a scalar - if isscalarlike(other): - new = dok_matrix(self.shape, dtype=self.dtype) - # Add this scalar to every element. - M, N = self.shape - for i in xrange(M): - for j in xrange(N): - aij = self.get((i, j), 0) + other - if aij != 0: - new[i, j] = aij - elif isinstance(other, dok_matrix): - if other.shape != self.shape: - raise ValueError("matrix dimensions are not equal") - new = dok_matrix(self.shape, dtype=self.dtype) - new.update(self) - for key in other: - new[key] += other[key] - elif isspmatrix(other): - csc = self.tocsc() - new = csc + other - elif isdense(other): - new = other + self.todense() - else: - raise TypeError("data type not understood") - return new - - def __neg__(self): - new = dok_matrix(self.shape, dtype=self.dtype) - for key in self.keys(): - new[key] = -self[key] - return new - - def _mul_scalar(self, other): - # Multiply this scalar by every element. - new = dok_matrix(self.shape, dtype=self.dtype) - for (key, val) in self.iteritems(): - new[key] = val * other - return new - - def _mul_vector(self, other): - #matrix * vector - result = np.zeros( self.shape[0], dtype=upcast(self.dtype,other.dtype) ) - for (i,j),v in self.iteritems(): - result[i] += v * other[j] - return result - - def _mul_multivector(self, other): - #matrix * multivector - M,N = self.shape - n_vecs = other.shape[1] #number of column vectors - result = np.zeros( (M,n_vecs), dtype=upcast(self.dtype,other.dtype) ) - for (i,j),v in self.iteritems(): - result[i,:] += v * other[j,:] - return result - - def __imul__(self, other): - if isscalarlike(other): - # Multiply this scalar by every element. - for (key, val) in self.iteritems(): - self[key] = val * other - #new.dtype.char = self.dtype.char - return self - else: - return NotImplementedError - - - def __truediv__(self, other): - if isscalarlike(other): - new = dok_matrix(self.shape, dtype=self.dtype) - # Multiply this scalar by every element. - for (key, val) in self.iteritems(): - new[key] = val / other - #new.dtype.char = self.dtype.char - return new - else: - return self.tocsr() / other - - - def __itruediv__(self, other): - if isscalarlike(other): - # Multiply this scalar by every element. - for (key, val) in self.iteritems(): - self[key] = val / other - return self - else: - return NotImplementedError - - # What should len(sparse) return? For consistency with dense matrices, - # perhaps it should be the number of rows? For now it returns the number - # of non-zeros. - - def transpose(self): - """ Return the transpose - """ - M, N = self.shape - new = dok_matrix((N, M), dtype=self.dtype) - for key, value in self.iteritems(): - new[key[1], key[0]] = value - return new - - def conjtransp(self): - """ Return the conjugate transpose - """ - M, N = self.shape - new = dok_matrix((N, M), dtype=self.dtype) - for key, value in self.iteritems(): - new[key[1], key[0]] = np.conj(value) - return new - - def copy(self): - new = dok_matrix(self.shape, dtype=self.dtype) - new.update(self) - return new - - def take(self, cols_or_rows, columns=1): - # Extract columns or rows as indictated from matrix - # assume cols_or_rows is sorted - new = dok_matrix(dtype=self.dtype) # what should the dimensions be ?! - indx = int((columns == 1)) - N = len(cols_or_rows) - if indx: # columns - for key in self.keys(): - num = np.searchsorted(cols_or_rows, key[1]) - if num < N: - newkey = (key[0], num) - new[newkey] = self[key] - else: - for key in self.keys(): - num = np.searchsorted(cols_or_rows, key[0]) - if num < N: - newkey = (num, key[1]) - new[newkey] = self[key] - return new - - def split(self, cols_or_rows, columns=1): - # Similar to take but returns two arrays, the extracted columns plus - # the resulting array. Assumes cols_or_rows is sorted - base = dok_matrix() - ext = dok_matrix() - indx = int((columns == 1)) - if indx: - for key in self.keys(): - num = np.searchsorted(cols_or_rows, key[1]) - if cols_or_rows[num] == key[1]: - newkey = (key[0], num) - ext[newkey] = self[key] - else: - newkey = (key[0], key[1]-num) - base[newkey] = self[key] - else: - for key in self.keys(): - num = np.searchsorted(cols_or_rows, key[0]) - if cols_or_rows[num] == key[0]: - newkey = (num, key[1]) - ext[newkey] = self[key] - else: - newkey = (key[0]-num, key[1]) - base[newkey] = self[key] - return base, ext - - def tocoo(self): - """ Return a copy of this matrix in COOrdinate format""" - from coo import coo_matrix - if self.nnz == 0: - return coo_matrix(self.shape, dtype=self.dtype) - else: - data = np.asarray(self.values(), dtype=self.dtype) - indices = np.asarray(self.keys(), dtype=np.intc).T - return coo_matrix((data,indices), shape=self.shape, dtype=self.dtype) - - def todok(self,copy=False): - if copy: - return self.copy() - else: - return self - - def tocsr(self): - """ Return a copy of this matrix in Compressed Sparse Row format""" - return self.tocoo().tocsr() - - def tocsc(self): - """ Return a copy of this matrix in Compressed Sparse Column format""" - return self.tocoo().tocsc() - - def toarray(self): - return self.tocoo().toarray() - - def resize(self, shape): - """ Resize the matrix in-place to dimensions given by 'shape'. - - Any non-zero elements that lie outside the new shape are removed. - """ - if not isshape(shape): - raise TypeError("dimensions must be a 2-tuple of positive" - " integers") - newM, newN = shape - M, N = self.shape - if newM < M or newN < N: - # Remove all elements outside new dimensions - for (i, j) in self.keys(): - if i >= newM or j >= newN: - del self[i, j] - self._shape = shape - - - -from sputils import _isinstance - -def isspmatrix_dok(x): - return _isinstance(x, dok_matrix) diff --git a/scipy-0.10.1/scipy/sparse/extract.py b/scipy-0.10.1/scipy/sparse/extract.py deleted file mode 100644 index 2f24b76059..0000000000 --- a/scipy-0.10.1/scipy/sparse/extract.py +++ /dev/null @@ -1,172 +0,0 @@ -"""Functions to extract parts of sparse matrices -""" - -__docformat__ = "restructuredtext en" - -__all__ = ['find', 'tril', 'triu'] - - -from coo import coo_matrix - -def find(A): - """Return the indices and values of the nonzero elements of a matrix - - Parameters - ---------- - A : dense or sparse matrix - Matrix whose nonzero elements are desired. - - Returns - ------- - (I,J,V) : tuple of arrays - I,J, and V contain the row indices, column indices, and values - of the nonzero matrix entries. - - - Examples - -------- - >>> from scipy.sparse import csr_matrix - >>> A = csr_matrix([[7.0, 8.0, 0],[0, 0, 9.0]]) - >>> find(A) - (array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7., 8., 9.])) - - """ - - A = coo_matrix(A).tocsr() #sums duplicates - A.eliminate_zeros() #removes explicit zeros - A = A.tocoo(copy=False) #(cheaply) convert to COO - - return A.row,A.col,A.data - - - -def tril(A, k=0, format=None): - """Return the lower triangular portion of a matrix in sparse format - - Returns the elements on or below the k-th diagonal of the matrix A. - - k = 0 corresponds to the main diagonal - - k > 0 is above the main diagonal - - k < 0 is below the main diagonal - - Parameters - ---------- - A : dense or sparse matrix - Matrix whose lower trianglar portion is desired. - k : integer : optional - The top-most diagonal of the lower triangle. - format : string - Sparse format of the result, e.g. format="csr", etc. - - Returns - ------- - L : sparse matrix - Lower triangular portion of A in sparse format. - - See Also - -------- - triu : upper triangle in sparse format - - Examples - -------- - >>> from scipy.sparse import csr_matrix - >>> A = csr_matrix( [[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]], dtype='int32' ) - >>> A.todense() - matrix([[1, 2, 0, 0, 3], - [4, 5, 0, 6, 7], - [0, 0, 8, 9, 0]]) - >>> tril(A).todense() - matrix([[1, 0, 0, 0, 0], - [4, 5, 0, 0, 0], - [0, 0, 8, 0, 0]]) - >>> tril(A).nnz - 4 - >>> tril(A, k=1).todense() - matrix([[1, 2, 0, 0, 0], - [4, 5, 0, 0, 0], - [0, 0, 8, 9, 0]]) - >>> tril(A, k=-1).todense() - matrix([[0, 0, 0, 0, 0], - [4, 0, 0, 0, 0], - [0, 0, 0, 0, 0]]) - >>> tril(A, format='csc') - <3x5 sparse matrix of type '' - with 4 stored elements in Compressed Sparse Column format> - - """ - - # convert to COOrdinate format where things are easy - A = coo_matrix(A, copy=False) - - mask = A.row + k >= A.col - - row = A.row[mask] - col = A.col[mask] - data = A.data[mask] - - return coo_matrix( (data,(row,col)), shape=A.shape ).asformat(format) - - -def triu(A, k=0, format=None): - """Return the upper triangular portion of a matrix in sparse format - - Returns the elements on or above the k-th diagonal of the matrix A. - - k = 0 corresponds to the main diagonal - - k > 0 is above the main diagonal - - k < 0 is below the main diagonal - - Parameters - ---------- - A : dense or sparse matrix - Matrix whose upper trianglar portion is desired. - k : integer : optional - The bottom-most diagonal of the upper triangle. - format : string - Sparse format of the result, e.g. format="csr", etc. - - Returns - ------- - L : sparse matrix - Upper triangular portion of A in sparse format. - - See Also - -------- - tril : lower triangle in sparse format - - Examples - -------- - >>> from scipy.sparse import csr_matrix - >>> A = csr_matrix( [[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]], dtype='int32' ) - >>> A.todense() - matrix([[1, 2, 0, 0, 3], - [4, 5, 0, 6, 7], - [0, 0, 8, 9, 0]]) - >>> triu(A).todense() - matrix([[1, 2, 0, 0, 3], - [0, 5, 0, 6, 7], - [0, 0, 8, 9, 0]]) - >>> triu(A).nnz - 8 - >>> triu(A, k=1).todense() - matrix([[0, 2, 0, 0, 3], - [0, 0, 0, 6, 7], - [0, 0, 0, 9, 0]]) - >>> triu(A, k=-1).todense() - matrix([[1, 2, 0, 0, 3], - [4, 5, 0, 6, 7], - [0, 0, 8, 9, 0]]) - >>> triu(A, format='csc') - <3x5 sparse matrix of type '' - with 8 stored elements in Compressed Sparse Column format> - - """ - - # convert to COOrdinate format where things are easy - A = coo_matrix(A, copy=False) - - mask = A.row + k <= A.col - - row = A.row[mask] - col = A.col[mask] - data = A.data[mask] - - return coo_matrix( (data,(row,col)), shape=A.shape ).asformat(format) diff --git a/scipy-0.10.1/scipy/sparse/lil.py b/scipy-0.10.1/scipy/sparse/lil.py deleted file mode 100644 index c00414ed45..0000000000 --- a/scipy-0.10.1/scipy/sparse/lil.py +++ /dev/null @@ -1,472 +0,0 @@ -"""LInked List sparse matrix class -""" - -__docformat__ = "restructuredtext en" - -__all__ = ['lil_matrix','isspmatrix_lil'] - -from bisect import bisect_left - -import numpy as np - -from base import spmatrix, isspmatrix -from sputils import getdtype, isshape, issequence, isscalarlike - -class lil_matrix(spmatrix): - """Row-based linked list sparse matrix - - This is an efficient structure for constructing sparse - matrices incrementally. - - This can be instantiated in several ways: - lil_matrix(D) - with a dense matrix or rank-2 ndarray D - - lil_matrix(S) - with another sparse matrix S (equivalent to S.tocsc()) - - lil_matrix((M, N), [dtype]) - to construct an empty matrix with shape (M, N) - dtype is optional, defaulting to dtype='d'. - - Attributes - ---------- - dtype : dtype - Data type of the matrix - shape : 2-tuple - Shape of the matrix - ndim : int - Number of dimensions (this is always 2) - nnz - Number of nonzero elements - data - LIL format data array of the matrix - rows - LIL format row index array of the matrix - - Notes - ----- - - Sparse matrices can be used in arithmetic operations: they support - addition, subtraction, multiplication, division, and matrix power. - - Advantages of the LIL format - - supports flexible slicing - - changes to the matrix sparsity structure are efficient - - Disadvantages of the LIL format - - arithmetic operations LIL + LIL are slow (consider CSR or CSC) - - slow column slicing (consider CSC) - - slow matrix vector products (consider CSR or CSC) - - Intended Usage - - LIL is a convenient format for constructing sparse matrices - - once a matrix has been constructed, convert to CSR or - CSC format for fast arithmetic and matrix vector operations - - consider using the COO format when constructing large matrices - - Data Structure - - An array (``self.rows``) of rows, each of which is a sorted - list of column indices of non-zero elements. - - The corresponding nonzero values are stored in similar - fashion in ``self.data``. - - - """ - - def __init__(self, arg1, shape=None, dtype=None, copy=False): - spmatrix.__init__(self) - self.dtype = getdtype(dtype, arg1, default=float) - - # First get the shape - if isspmatrix(arg1): - if isspmatrix_lil(arg1) and copy: - A = arg1.copy() - else: - A = arg1.tolil() - - if dtype is not None: - A = A.astype(dtype) - - self.shape = A.shape - self.dtype = A.dtype - self.rows = A.rows - self.data = A.data - elif isinstance(arg1,tuple): - if isshape(arg1): - if shape is not None: - raise ValueError('invalid use of shape parameter') - M, N = arg1 - self.shape = (M,N) - self.rows = np.empty((M,), dtype=object) - self.data = np.empty((M,), dtype=object) - for i in range(M): - self.rows[i] = [] - self.data[i] = [] - else: - raise TypeError('unrecognized lil_matrix constructor usage') - else: - #assume A is dense - try: - A = np.asmatrix(arg1) - except TypeError: - raise TypeError('unsupported matrix type') - else: - from csr import csr_matrix - A = csr_matrix(A, dtype=dtype).tolil() - - self.shape = A.shape - self.dtype = A.dtype - self.rows = A.rows - self.data = A.data - - def __iadd__(self,other): - self[:,:] = self + other - return self - - def __isub__(self,other): - self[:,:] = self - other - return self - - def __imul__(self,other): - if isscalarlike(other): - self[:,:] = self * other - return self - else: - raise NotImplementedError - - def __itruediv__(self,other): - if isscalarlike(other): - self[:,:] = self / other - return self - else: - raise NotImplementedError - - # Whenever the dimensions change, empty lists should be created for each - # row - - def getnnz(self): - return sum([len(rowvals) for rowvals in self.data]) - nnz = property(fget=getnnz) - - def __str__(self): - val = '' - for i, row in enumerate(self.rows): - for pos, j in enumerate(row): - val += " %s\t%s\n" % (str((i, j)), str(self.data[i][pos])) - return val[:-1] - - def getrowview(self, i): - """Returns a view of the 'i'th row (without copying). - """ - new = lil_matrix((1, self.shape[1]), dtype=self.dtype) - new.rows[0] = self.rows[i] - new.data[0] = self.data[i] - return new - - def getrow(self, i): - """Returns a copy of the 'i'th row. - """ - new = lil_matrix((1, self.shape[1]), dtype=self.dtype) - new.rows[0] = self.rows[i][:] - new.data[0] = self.data[i][:] - return new - - def _get1(self, i, j): - - if i < 0: - i += self.shape[0] - if i < 0 or i >= self.shape[0]: - raise IndexError('row index out of bounds') - - if j < 0: - j += self.shape[1] - if j < 0 or j >= self.shape[1]: - raise IndexError('column index out of bounds') - - row = self.rows[i] - data = self.data[i] - - pos = bisect_left(row, j) - if pos != len(data) and row[pos] == j: - return data[pos] - else: - return 0 - - def _slicetoseq(self, j, shape): - if j.start is not None and j.start < 0: - start = shape + j.start - elif j.start is None: - start = 0 - else: - start = j.start - if j.stop is not None and j.stop < 0: - stop = shape + j.stop - elif j.stop is None: - stop = shape - else: - stop = j.stop - j = range(start, stop, j.step or 1) - return j - - - def __getitem__(self, index): - """Return the element(s) index=(i, j), where j may be a slice. - This always returns a copy for consistency, since slices into - Python lists return copies. - """ - try: - i, j = index - except (AssertionError, TypeError): - raise IndexError('invalid index') - - if np.isscalar(i): - if np.isscalar(j): - return self._get1(i, j) - if isinstance(j, slice): - j = self._slicetoseq(j, self.shape[1]) - if issequence(j): - return self.__class__([[self._get1(i, jj) for jj in j]]) - elif issequence(i) and issequence(j): - return self.__class__([[self._get1(ii, jj) for (ii, jj) in zip(i, j)]]) - elif issequence(i) or isinstance(i, slice): - if isinstance(i, slice): - i = self._slicetoseq(i, self.shape[0]) - if np.isscalar(j): - return self.__class__([[self._get1(ii, j)] for ii in i]) - if isinstance(j, slice): - j = self._slicetoseq(j, self.shape[1]) - if issequence(j): - return self.__class__([[self._get1(ii, jj) for jj in j] for ii in i]) - else: - raise IndexError - - def _insertat2(self, row, data, j, x): - """ helper for __setitem__: insert a value in the given row/data at - column j. """ - - if j < 0: #handle negative column indices - j += self.shape[1] - - if j < 0 or j >= self.shape[1]: - raise IndexError('column index out of bounds') - - if not np.isscalar(x): - raise ValueError('setting an array element with a sequence') - - try: - x = self.dtype.type(x) - except: - raise TypeError('Unable to convert value (%s) to dtype [%s]' % (x,self.dtype.name)) - - pos = bisect_left(row, j) - if x != 0: - if pos == len(row): - row.append(j) - data.append(x) - elif row[pos] != j: - row.insert(pos, j) - data.insert(pos, x) - else: - data[pos] = x - else: - if pos < len(row) and row[pos] == j: - del row[pos] - del data[pos] - - def _setitem_setrow(self, row, data, j, xrow, xdata, xcols): - if isinstance(j, slice): - j = self._slicetoseq(j, self.shape[1]) - if issequence(j): - if xcols == len(j): - for jj, xi in zip(j, xrange(xcols)): - pos = bisect_left(xrow, xi) - if pos != len(xdata) and xrow[pos] == xi: - self._insertat2(row, data, jj, xdata[pos]) - else: - self._insertat2(row, data, jj, 0) - elif xcols == 1: # OK, broadcast across row - if len(xdata) > 0 and xrow[0] == 0: - val = xdata[0] - else: - val = 0 - for jj in j: - self._insertat2(row, data, jj,val) - else: - raise IndexError('invalid index') - elif np.isscalar(j): - if not xcols == 1: - raise ValueError('array dimensions are not compatible for copy') - if len(xdata) > 0 and xrow[0] == 0: - self._insertat2(row, data, j, xdata[0]) - else: - self._insertat2(row, data, j, 0) - else: - raise ValueError('invalid column value: %s' % str(j)) - - def __setitem__(self, index, x): - try: - i, j = index - except (ValueError, TypeError): - raise IndexError('invalid index') - - # shortcut for common case of single entry assign: - if np.isscalar(x) and np.isscalar(i) and np.isscalar(j): - self._insertat2(self.rows[i], self.data[i], j, x) - return - - # shortcut for common case of full matrix assign: - if isspmatrix(x): - if isinstance(i, slice) and i == slice(None) and \ - isinstance(j, slice) and j == slice(None): - x = lil_matrix(x) - self.rows = x.rows - self.data = x.data - return - - if isinstance(i, tuple): # can't index lists with tuple - i = list(i) - - if np.isscalar(i): - rows = [self.rows[i]] - datas = [self.data[i]] - else: - rows = self.rows[i] - datas = self.data[i] - - x = lil_matrix(x, copy=False) - xrows, xcols = x.shape - if xrows == len(rows): # normal rectangular copy - for row, data, xrow, xdata in zip(rows, datas, x.rows, x.data): - self._setitem_setrow(row, data, j, xrow, xdata, xcols) - elif xrows == 1: # OK, broadcast down column - for row, data in zip(rows, datas): - self._setitem_setrow(row, data, j, x.rows[0], x.data[0], xcols) - - # needed to pass 'test_lil_sequence_assignement' unit test: - # -- set row from column of entries -- - elif xcols == len(rows): - x = x.T - for row, data, xrow, xdata in zip(rows, datas, x.rows, x.data): - self._setitem_setrow(row, data, j, xrow, xdata, xrows) - else: - raise IndexError('invalid index') - - def _mul_scalar(self, other): - if other == 0: - # Multiply by zero: return the zero matrix - new = lil_matrix(self.shape, dtype=self.dtype) - else: - new = self.copy() - # Multiply this scalar by every element. - new.data = np.array([[val*other for val in rowvals] for - rowvals in new.data], dtype=object) - return new - - def __truediv__(self, other): # self / other - if isscalarlike(other): - new = self.copy() - # Divide every element by this scalar - new.data = np.array([[val/other for val in rowvals] for - rowvals in new.data], dtype=object) - return new - else: - return self.tocsr() / other - -## This code doesn't work with complex matrices -# def multiply(self, other): -# """Point-wise multiplication by another lil_matrix. -# -# """ -# if np.isscalar(other): -# return self.__mul__(other) -# -# if isspmatrix_lil(other): -# reference,target = self,other -# -# if reference.shape != target.shape: -# raise ValueError("Dimensions do not match.") -# -# if len(reference.data) > len(target.data): -# reference,target = target,reference -# -# new = lil_matrix(reference.shape) -# for r,row in enumerate(reference.rows): -# tr = target.rows[r] -# td = target.data[r] -# rd = reference.data[r] -# L = len(tr) -# for c,column in enumerate(row): -# ix = bisect_left(tr,column) -# if ix < L and tr[ix] == column: -# new.rows[r].append(column) -# new.data[r].append(rd[c] * td[ix]) -# return new -# else: -# raise ValueError("Point-wise multiplication only allowed " -# "with another lil_matrix.") - - def copy(self): - from copy import deepcopy - new = lil_matrix(self.shape, dtype=self.dtype) - new.data = deepcopy(self.data) - new.rows = deepcopy(self.rows) - return new - - def reshape(self,shape): - new = lil_matrix(shape, dtype=self.dtype) - j_max = self.shape[1] - for i,row in enumerate(self.rows): - for col,j in enumerate(row): - new_r,new_c = np.unravel_index(i*j_max + j,shape) - new[new_r,new_c] = self[i,j] - return new - - def toarray(self): - d = np.zeros(self.shape, dtype=self.dtype) - for i, row in enumerate(self.rows): - for pos, j in enumerate(row): - d[i, j] = self.data[i][pos] - return d - - def transpose(self): - return self.tocsr().transpose().tolil() - - def tolil(self, copy=False): - if copy: - return self.copy() - else: - return self - - def tocsr(self): - """ Return Compressed Sparse Row format arrays for this matrix. - """ - - indptr = np.asarray([len(x) for x in self.rows], dtype=np.intc) - indptr = np.concatenate( (np.array([0], dtype=np.intc), np.cumsum(indptr)) ) - - nnz = indptr[-1] - - indices = [] - for x in self.rows: - indices.extend(x) - indices = np.asarray(indices, dtype=np.intc) - - data = [] - for x in self.data: - data.extend(x) - data = np.asarray(data, dtype=self.dtype) - - from csr import csr_matrix - return csr_matrix((data, indices, indptr), shape=self.shape) - - def tocsc(self): - """ Return Compressed Sparse Column format arrays for this matrix. - """ - return self.tocsr().tocsc() - - -from sputils import _isinstance - -def isspmatrix_lil( x ): - return _isinstance(x, lil_matrix) diff --git a/scipy-0.10.1/scipy/sparse/linalg/__init__.py b/scipy-0.10.1/scipy/sparse/linalg/__init__.py deleted file mode 100644 index 61f1be1b26..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -"Sparse Linear Algebra routines" - -from info import __doc__ - -from isolve import * -from dsolve import * -from interface import * -from eigen import * - - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/scipy-0.10.1/scipy/sparse/linalg/bento.info b/scipy-0.10.1/scipy/sparse/linalg/bento.info deleted file mode 100644 index faabf37afd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/bento.info +++ /dev/null @@ -1,7 +0,0 @@ -Recurse: isolve, dsolve, eigen - -Library: - Packages: - dsolve, - eigen, - isolve diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SConscript b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SConscript deleted file mode 100644 index c32e787df4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SConscript +++ /dev/null @@ -1,51 +0,0 @@ -import os -import glob -import sys - -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK -from numscons import write_info -from numscons.core.misc import built_with_mstools, built_with_gnu_f77 - -env = GetNumpyEnvironment(ARGUMENTS) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLapack' : CheckF77LAPACK}) - -#----------------- -# Checking Lapack -#----------------- -st = config.CheckLapack() -if not st: - raise RuntimeError("no lapack found, necessary for dsolve module") - -config.Finish() -write_info(env) - -# Build superlu lib -superlu_env = env.Clone() -superlu_def = [] -if sys.platform == 'win32': - superlu_def.append((('NO_TIMER'), 1)) -superlu_def.append((('USE_VENDOR_BLAS'), 2)) -superlu_env.Append(CPPDEFINES=superlu_def) -superlu_env.Append(CPPPATH=[os.path.join('SuperLU', 'SRC')]) - -superlu_src = env.Glob(os.path.join('SuperLU', 'SRC', "*.c")) - -# XXX: we should detect whether lsame is already defined in BLAS/LAPACK. Here, -# when using MSVC + MKL, lsame is already in MKL -if not (built_with_mstools(env) and (not built_with_gnu_f77(env))): - superlu_src.append(os.path.join("SuperLU", "SRC", "lsame.c")) -superlu = superlu_env.DistutilsStaticExtLibrary('superlu_src', source=superlu_src) - -# Build python extensions -pyenv = env.Clone() -pyenv.Append(CPPPATH=[os.path.join('SuperLU', 'SRC')]) -pyenv.Prepend(LIBPATH=["."]) -pyenv.Prepend(LIBS=["superlu_src"]) -common_src = ['_superlu_utils.c', '_superluobject.c'] - -pyenv.NumpyPythonExtension('_superlu', source=common_src + ['_superlumodule.c']) diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SConstruct b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccolumn_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccolumn_bmod.c deleted file mode 100644 index d20e95c54f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccolumn_bmod.c +++ /dev/null @@ -1,365 +0,0 @@ - -/*! @file ccolumn_bmod.c - * \brief performs numeric block updates - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - *  Permission is hereby granted to use or copy this program for any
    - *  purpose, provided the above notices are retained on all copies.
    - *  Permission to modify the code and to distribute modified code is
    - *  granted, provided the above notices are retained, and a notice that
    - *  the code was modified is included with the above copyright notice.
    - * 
    -*/ - -#include -#include -#include "slu_cdefs.h" - -/* - * Function prototypes - */ -void cusolve(int, int, complex*, complex*); -void clsolve(int, int, complex*, complex*); -void cmatvec(int, int, int, complex*, complex*, complex*); - - - -/*! \brief - * - *
    - * Purpose:
    - * ========
    - * Performs numeric block updates (sup-col) in topological order.
    - * It features: col-col, 2cols-col, 3cols-col, and sup-col updates.
    - * Special processing on the supernodal portion of L\U[*,j]
    - * Return value:   0 - successful return
    - *               > 0 - number of bytes allocated when run out of space
    - * 
    - */ -int -ccolumn_bmod ( - const int jcol, /* in */ - const int nseg, /* in */ - complex *dense, /* in */ - complex *tempv, /* working array */ - int *segrep, /* in */ - int *repfnz, /* in */ - int fpanelc, /* in -- first column in the current panel */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ - -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - complex alpha, beta; - - /* krep = representative of current k-th supernode - * fsupc = first supernodal column - * nsupc = no of columns in supernode - * nsupr = no of rows in supernode (used as leading dimension) - * luptr = location of supernodal LU-block in storage - * kfnz = first nonz in the k-th supernodal segment - * no_zeros = no of leading zeros in a supernodal U-segment - */ - complex ukj, ukj1, ukj2; - int luptr, luptr1, luptr2; - int fsupc, nsupc, nsupr, segsze; - int nrow; /* No of rows in the matrix of matrix-vector */ - int jcolp1, jsupno, k, ksub, krep, krep_ind, ksupno; - register int lptr, kfnz, isub, irow, i; - register int no_zeros, new_next; - int ufirst, nextlu; - int fst_col; /* First column within small LU update */ - int d_fsupc; /* Distance between the first column of the current - panel and the first column of the current snode. */ - int *xsup, *supno; - int *lsub, *xlsub; - complex *lusup; - int *xlusup; - int nzlumax; - complex *tempv1; - complex zero = {0.0, 0.0}; - complex one = {1.0, 0.0}; - complex none = {-1.0, 0.0}; - complex comp_temp, comp_temp1; - int mem_error; - flops_t *ops = stat->ops; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - nzlumax = Glu->nzlumax; - jcolp1 = jcol + 1; - jsupno = supno[jcol]; - - /* - * For each nonz supernode segment of U[*,j] in topological order - */ - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - - krep = segrep[k]; - k--; - ksupno = supno[krep]; - if ( jsupno != ksupno ) { /* Outside the rectangular supernode */ - - fsupc = xsup[ksupno]; - fst_col = SUPERLU_MAX ( fsupc, fpanelc ); - - /* Distance from the current supernode to the current panel; - d_fsupc=0 if fsupc > fpanelc. */ - d_fsupc = fst_col - fsupc; - - luptr = xlusup[fst_col] + d_fsupc; - lptr = xlsub[fsupc] + d_fsupc; - - kfnz = repfnz[krep]; - kfnz = SUPERLU_MAX ( kfnz, fpanelc ); - - segsze = krep - kfnz + 1; - nsupc = krep - fst_col + 1; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; /* Leading dimension */ - nrow = nsupr - d_fsupc - nsupc; - krep_ind = lptr + nsupc - 1; - - - - - /* - * Case 1: Update U-segment of size 1 -- col-col update - */ - if ( segsze == 1 ) { - ukj = dense[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - c_sub(&dense[irow], &dense[irow], &comp_temp); - luptr++; - } - - } else if ( segsze <= 3 ) { - ukj = dense[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - ukj1 = dense[lsub[krep_ind - 1]]; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { /* Case 2: 2cols-col update */ - cc_mult(&comp_temp, &ukj1, &lusup[luptr1]); - c_sub(&ukj, &ukj, &comp_temp); - dense[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; - luptr1++; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - cc_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&dense[irow], &dense[irow], &comp_temp); - } - } else { /* Case 3: 3cols-col update */ - ukj2 = dense[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - cc_mult(&comp_temp, &ukj2, &lusup[luptr2-1]); - c_sub(&ukj1, &ukj1, &comp_temp); - - cc_mult(&comp_temp, &ukj1, &lusup[luptr1]); - cc_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&ukj, &ukj, &comp_temp); - - dense[lsub[krep_ind]] = ukj; - dense[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; - luptr1++; - luptr2++; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - cc_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - cc_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&dense[irow], &dense[irow], &comp_temp); - } - } - - - } else { - /* - * Case: sup-col update - * Perform a triangular solve and block update, - * then scatter the result of sup-col update to dense - */ - - no_zeros = kfnz - fst_col; - - /* Copy U[*,j] segment from dense[*] to tempv[*] */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - tempv[i] = dense[irow]; - ++isub; - } - - /* Dense triangular solve -- start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#else - ctrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#endif - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - alpha = one; - beta = zero; -#ifdef _CRAY - CGEMV( ftcs2, &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#else - cgemv_( "N", &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#endif -#else - clsolve ( nsupr, segsze, &lusup[luptr], tempv ); - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - cmatvec (nsupr, nrow , segsze, &lusup[luptr], tempv, tempv1); -#endif - - - /* Scatter tempv[] into SPA dense[] as a temporary storage */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense[irow] = tempv[i]; - tempv[i] = zero; - ++isub; - } - - /* Scatter tempv1[] into SPA dense[] */ - for (i = 0; i < nrow; i++) { - irow = lsub[isub]; - c_sub(&dense[irow], &dense[irow], &tempv1[i]); - tempv1[i] = zero; - ++isub; - } - } - - } /* if jsupno ... */ - - } /* for each segment... */ - - /* - * Process the supernodal portion of L\U[*,j] - */ - nextlu = xlusup[jcol]; - fsupc = xsup[jsupno]; - - /* Copy the SPA dense into L\U[*,j] */ - new_next = nextlu + xlsub[fsupc+1] - xlsub[fsupc]; - while ( new_next > nzlumax ) { - if (mem_error = cLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, Glu)) - return (mem_error); - lusup = Glu->lusup; - lsub = Glu->lsub; - } - - for (isub = xlsub[fsupc]; isub < xlsub[fsupc+1]; isub++) { - irow = lsub[isub]; - lusup[nextlu] = dense[irow]; - dense[irow] = zero; - ++nextlu; - } - - xlusup[jcolp1] = nextlu; /* Close L\U[*,jcol] */ - - /* For more updates within the panel (also within the current supernode), - * should start from the first column of the panel, or the first column - * of the supernode, whichever is bigger. There are 2 cases: - * 1) fsupc < fpanelc, then fst_col := fpanelc - * 2) fsupc >= fpanelc, then fst_col := fsupc - */ - fst_col = SUPERLU_MAX ( fsupc, fpanelc ); - - if ( fst_col < jcol ) { - - /* Distance between the current supernode and the current panel. - d_fsupc=0 if fsupc >= fpanelc. */ - d_fsupc = fst_col - fsupc; - - lptr = xlsub[fsupc] + d_fsupc; - luptr = xlusup[fst_col] + d_fsupc; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; /* Leading dimension */ - nsupc = jcol - fst_col; /* Excluding jcol */ - nrow = nsupr - d_fsupc - nsupc; - - /* Points to the beginning of jcol in snode L\U(jsupno) */ - ufirst = xlusup[jcol] + d_fsupc; - - ops[TRSV] += 4 * nsupc * (nsupc - 1); - ops[GEMV] += 8 * nrow * nsupc; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &nsupc, &lusup[luptr], - &nsupr, &lusup[ufirst], &incx ); -#else - ctrsv_( "L", "N", "U", &nsupc, &lusup[luptr], - &nsupr, &lusup[ufirst], &incx ); -#endif - - alpha = none; beta = one; /* y := beta*y + alpha*A*x */ - -#ifdef _CRAY - CGEMV( ftcs2, &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#else - cgemv_( "N", &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#endif -#else - clsolve ( nsupr, nsupc, &lusup[luptr], &lusup[ufirst] ); - - cmatvec ( nsupr, nrow, nsupc, &lusup[luptr+nsupc], - &lusup[ufirst], tempv ); - - /* Copy updates from tempv[*] into lusup[*] */ - isub = ufirst + nsupc; - for (i = 0; i < nrow; i++) { - c_sub(&lusup[isub], &lusup[isub], &tempv[i]); - tempv[i] = zero; - ++isub; - } - -#endif - - - } /* if fst_col < jcol ... */ - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccolumn_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccolumn_dfs.c deleted file mode 100644 index 58940efb54..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccolumn_dfs.c +++ /dev/null @@ -1,275 +0,0 @@ - -/*! @file ccolumn_dfs.c - * \brief Performs a symbolic factorization - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    -*/ - -#include "slu_cdefs.h" - -/*! \brief What type of supernodes we want */ -#define T2_SUPER - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   CCOLUMN_DFS performs a symbolic factorization on column jcol, and
    - *   decide the supernode boundary.
    - *
    - *   This routine does not use numeric values, but only use the RHS 
    - *   row indices to start the dfs.
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives. The routine returns a list of such supernodal 
    - *   representatives in topological order of the dfs that generates them.
    - *   The location of the first nonzero in each such supernodal segment
    - *   (supernodal entry location) is also returned.
    - *
    - * Local parameters
    - * ================
    - *   nseg: no of segments in current U[*,j]
    - *   jsuper: jsuper=EMPTY if column j does not belong to the same
    - *	supernode as j-1. Otherwise, jsuper=nsuper.
    - *
    - *   marker2: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - *
    - * Return value
    - * ============
    - *     0  success;
    - *   > 0  number of bytes allocated when run out of space.
    - * 
    - */ -int -ccolumn_dfs( - const int m, /* in - number of rows in the matrix */ - const int jcol, /* in */ - int *perm_r, /* in */ - int *nseg, /* modified - with new segments appended */ - int *lsub_col, /* in - defines the RHS vector to start the dfs */ - int *segrep, /* modified - with new segments appended */ - int *repfnz, /* modified */ - int *xprune, /* modified */ - int *marker, /* modified */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - int jcolp1, jcolm1, jsuper, nsuper, nextl; - int k, krep, krow, kmark, kperm; - int *marker2; /* Used for small panel LU */ - int fsupc; /* First column of a snode */ - int myfnz; /* First nonz column of a U-segment */ - int chperm, chmark, chrep, kchild; - int xdfs, maxdfs, kpar, oldrep; - int jptr, jm1ptr; - int ito, ifrom, istop; /* Used to compress row subscripts */ - int mem_error; - int *xsup, *supno, *lsub, *xlsub; - int nzlmax; - static int first = 1, maxsuper; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - if ( first ) { - maxsuper = sp_ienv(3); - first = 0; - } - jcolp1 = jcol + 1; - jcolm1 = jcol - 1; - nsuper = supno[jcol]; - jsuper = nsuper; - nextl = xlsub[jcol]; - marker2 = &marker[2*m]; - - - /* For each nonzero in A[*,jcol] do dfs */ - for (k = 0; lsub_col[k] != EMPTY; k++) { - - krow = lsub_col[k]; - lsub_col[k] = EMPTY; - kmark = marker2[krow]; - - /* krow was visited before, go to the next nonz */ - if ( kmark == jcol ) continue; - - /* For each unmarked nbr krow of jcol - * krow is in L: place it in structure of L[*,jcol] - */ - marker2[krow] = jcol; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - lsub[nextl++] = krow; /* krow is indexed into A */ - if ( nextl >= nzlmax ) { - if ( mem_error = cLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( kmark != jcolm1 ) jsuper = EMPTY;/* Row index subset testing */ - } else { - /* krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz[krep]; - - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > kperm ) repfnz[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz[krep] = kperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker2[kchild]; - - if ( chmark != jcol ) { /* Not reached yet */ - marker2[kchild] = jcol; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,k] */ - if ( chperm == EMPTY ) { - lsub[nextl++] = kchild; - if ( nextl >= nzlmax ) { - if ( mem_error = - cLUMemXpand(jcol,nextl,LSUB,&nzlmax,Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( chmark != jcolm1 ) jsuper = EMPTY; - } else { - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz[chrep]; - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz[chrep] = chperm; - } else { - /* Continue dfs at super-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L^t) */ - parent[krep] = oldrep; - repfnz[krep] = chperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - } /* else */ - - } /* else */ - - } /* if */ - - } /* while */ - - /* krow has no more unexplored nbrs; - * place supernode-rep krep in postorder DFS. - * backtrack dfs to its parent - */ - segrep[*nseg] = krep; - ++(*nseg); - kpar = parent[krep]; /* Pop from stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xprune[krep]; - - } while ( kpar != EMPTY ); /* Until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonzero ... */ - - /* Check to see if j belongs in the same supernode as j-1 */ - if ( jcol == 0 ) { /* Do nothing for column 0 */ - nsuper = supno[0] = 0; - } else { - fsupc = xsup[nsuper]; - jptr = xlsub[jcol]; /* Not compressed yet */ - jm1ptr = xlsub[jcolm1]; - -#ifdef T2_SUPER - if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = EMPTY; -#endif - /* Make sure the number of columns in a supernode doesn't - exceed threshold. */ - if ( jcol - fsupc >= maxsuper ) jsuper = EMPTY; - - /* If jcol starts a new supernode, reclaim storage space in - * lsub from the previous supernode. Note we only store - * the subscript set of the first and last columns of - * a supernode. (first for num values, last for pruning) - */ - if ( jsuper == EMPTY ) { /* starts a new supernode */ - if ( (fsupc < jcolm1-1) ) { /* >= 3 columns in nsuper */ -#ifdef CHK_COMPRESS - printf(" Compress lsub[] at super %d-%d\n", fsupc, jcolm1); -#endif - ito = xlsub[fsupc+1]; - xlsub[jcolm1] = ito; - istop = ito + jptr - jm1ptr; - xprune[jcolm1] = istop; /* Initialize xprune[jcol-1] */ - xlsub[jcol] = istop; - for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) - lsub[ito] = lsub[ifrom]; - nextl = ito; /* = istop + length(jcol) */ - } - nsuper++; - supno[jcol] = nsuper; - } /* if a new supernode */ - - } /* else: jcol > 0 */ - - /* Tidy up the pointers before exit */ - xsup[nsuper+1] = jcolp1; - supno[jcolp1] = nsuper; - xprune[jcol] = nextl; /* Initialize upper bound for pruning */ - xlsub[jcolp1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccopy_to_ucol.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccopy_to_ucol.c deleted file mode 100644 index 3e4b39674b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ccopy_to_ucol.c +++ /dev/null @@ -1,103 +0,0 @@ - -/*! @file ccopy_to_ucol.c - * \brief Copy a computed column of U to the compressed data structure - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_cdefs.h" - -int -ccopy_to_ucol( - int jcol, /* in */ - int nseg, /* in */ - int *segrep, /* in */ - int *repfnz, /* in */ - int *perm_r, /* in */ - complex *dense, /* modified - reset to zero on return */ - GlobalLU_t *Glu /* modified */ - ) -{ -/* - * Gather from SPA dense[*] to global ucol[*]. - */ - int ksub, krep, ksupno; - int i, k, kfnz, segsze; - int fsupc, isub, irow; - int jsupno, nextu; - int new_next, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - complex *ucol; - int *usub, *xusub; - int nzumax; - complex zero = {0.0, 0.0}; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - nzumax = Glu->nzumax; - - jsupno = supno[jcol]; - nextu = xusub[jcol]; - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - krep = segrep[k--]; - ksupno = supno[krep]; - - if ( ksupno != jsupno ) { /* Should go into ucol[] */ - kfnz = repfnz[krep]; - if ( kfnz != EMPTY ) { /* Nonzero U-segment */ - - fsupc = xsup[ksupno]; - isub = xlsub[fsupc] + kfnz - fsupc; - segsze = krep - kfnz + 1; - - new_next = nextu + segsze; - while ( new_next > nzumax ) { - if (mem_error = cLUMemXpand(jcol, nextu, UCOL, &nzumax, Glu)) - return (mem_error); - ucol = Glu->ucol; - if (mem_error = cLUMemXpand(jcol, nextu, USUB, &nzumax, Glu)) - return (mem_error); - usub = Glu->usub; - lsub = Glu->lsub; - } - - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - usub[nextu] = perm_r[irow]; - ucol[nextu] = dense[irow]; - dense[irow] = zero; - nextu++; - isub++; - } - - } - - } - - } /* for each segment... */ - - xusub[jcol + 1] = nextu; /* Close U[*,jcol] */ - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cdiagonal.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cdiagonal.c deleted file mode 100644 index 3ecc549fe7..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cdiagonal.c +++ /dev/null @@ -1,133 +0,0 @@ - -/*! @file cdiagonal.c - * \brief Auxiliary routines to work with diagonal elements - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_cdefs.h" - -int cfill_diag(int n, NCformat *Astore) -/* fill explicit zeros on the diagonal entries, so that the matrix is not - structurally singular. */ -{ - complex *nzval = (complex *)Astore->nzval; - int *rowind = Astore->rowind; - int *colptr = Astore->colptr; - int nnz = colptr[n]; - int fill = 0; - complex *nzval_new; - complex zero = {1.0, 0.0}; - int *rowind_new; - int i, j, diag; - - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - if (rowind[j] == i) diag = j; - if (diag < 0) fill++; - } - if (fill) - { - nzval_new = complexMalloc(nnz + fill); - rowind_new = intMalloc(nnz + fill); - fill = 0; - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i] - fill; j < colptr[i + 1]; j++) - { - if ((rowind_new[j + fill] = rowind[j]) == i) diag = j; - nzval_new[j + fill] = nzval[j]; - } - if (diag < 0) - { - rowind_new[colptr[i + 1] + fill] = i; - nzval_new[colptr[i + 1] + fill] = zero; - fill++; - } - colptr[i + 1] += fill; - } - Astore->nzval = nzval_new; - Astore->rowind = rowind_new; - SUPERLU_FREE(nzval); - SUPERLU_FREE(rowind); - } - Astore->nnz += fill; - return fill; -} - -int cdominate(int n, NCformat *Astore) -/* make the matrix diagonally dominant */ -{ - complex *nzval = (complex *)Astore->nzval; - int *rowind = Astore->rowind; - int *colptr = Astore->colptr; - int nnz = colptr[n]; - int fill = 0; - complex *nzval_new; - int *rowind_new; - int i, j, diag; - double s; - - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - if (rowind[j] == i) diag = j; - if (diag < 0) fill++; - } - if (fill) - { - nzval_new = complexMalloc(nnz + fill); - rowind_new = intMalloc(nnz+ fill); - fill = 0; - for (i = 0; i < n; i++) - { - s = 1e-6; - diag = -1; - for (j = colptr[i] - fill; j < colptr[i + 1]; j++) - { - if ((rowind_new[j + fill] = rowind[j]) == i) diag = j; - nzval_new[j + fill] = nzval[j]; - s += slu_c_abs1(&nzval_new[j + fill]); - } - if (diag >= 0) { - nzval_new[diag+fill].r = s * 3.0; - nzval_new[diag+fill].i = 0.0; - } else { - rowind_new[colptr[i + 1] + fill] = i; - nzval_new[colptr[i + 1] + fill].r = s * 3.0; - nzval_new[colptr[i + 1] + fill].i = 0.0; - fill++; - } - colptr[i + 1] += fill; - } - Astore->nzval = nzval_new; - Astore->rowind = rowind_new; - SUPERLU_FREE(nzval); - SUPERLU_FREE(rowind); - } - else - { - for (i = 0; i < n; i++) - { - s = 1e-6; - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - { - if (rowind[j] == i) diag = j; - s += slu_c_abs1(&nzval[j]); - } - nzval[diag].r = s * 3.0; - nzval[diag].i = 0.0; - } - } - Astore->nnz += fill; - return fill; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgscon.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgscon.c deleted file mode 100644 index e17532d087..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgscon.c +++ /dev/null @@ -1,154 +0,0 @@ - -/*! @file cgscon.c - * \brief Estimates reciprocal of the condition number of a general matrix - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Modified from lapack routines CGECON.
    - * 
    - */ - -/* - * File name: cgscon.c - * History: Modified from lapack routines CGECON. - */ -#include -#include "slu_cdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   CGSCON estimates the reciprocal of the condition number of a general 
    - *   real matrix A, in either the 1-norm or the infinity-norm, using   
    - *   the LU factorization computed by CGETRF.   *
    - *
    - *   An estimate is obtained for norm(inv(A)), and the reciprocal of the   
    - *   condition number is computed as   
    - *      RCOND = 1 / ( norm(A) * norm(inv(A)) ).   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - * 
    - *   Arguments   
    - *   =========   
    - *
    - *    NORM    (input) char*
    - *            Specifies whether the 1-norm condition number or the   
    - *            infinity-norm condition number is required:   
    - *            = '1' or 'O':  1-norm;   
    - *            = 'I':         Infinity-norm.
    - *	    
    - *    L       (input) SuperMatrix*
    - *            The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *            cgstrf(). Use compressed row subscripts storage for supernodes,
    - *            i.e., L has types: Stype = SLU_SC, Dtype = SLU_C, Mtype = SLU_TRLU.
    - * 
    - *    U       (input) SuperMatrix*
    - *            The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *            cgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *            Stype = SLU_NC, Dtype = SLU_C, Mtype = SLU_TRU.
    - *	    
    - *    ANORM   (input) float
    - *            If NORM = '1' or 'O', the 1-norm of the original matrix A.   
    - *            If NORM = 'I', the infinity-norm of the original matrix A.
    - *	    
    - *    RCOND   (output) float*
    - *           The reciprocal of the condition number of the matrix A,   
    - *           computed as RCOND = 1/(norm(A) * norm(inv(A))).
    - *	    
    - *    INFO    (output) int*
    - *           = 0:  successful exit   
    - *           < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *
    - *    ===================================================================== 
    - * 
    - */ - -void -cgscon(char *norm, SuperMatrix *L, SuperMatrix *U, - float anorm, float *rcond, SuperLUStat_t *stat, int *info) -{ - - - /* Local variables */ - int kase, kase1, onenrm, i; - float ainvnm; - complex *work; - extern int crscl_(int *, complex *, complex *, int *); - - extern int clacon_(int *, complex *, complex *, float *, int *); - - - /* Test the input parameters. */ - *info = 0; - onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O"); - if (! onenrm && ! lsame_(norm, "I")) *info = -1; - else if (L->nrow < 0 || L->nrow != L->ncol || - L->Stype != SLU_SC || L->Dtype != SLU_C || L->Mtype != SLU_TRLU) - *info = -2; - else if (U->nrow < 0 || U->nrow != U->ncol || - U->Stype != SLU_NC || U->Dtype != SLU_C || U->Mtype != SLU_TRU) - *info = -3; - if (*info != 0) { - i = -(*info); - xerbla_("cgscon", &i); - return; - } - - /* Quick return if possible */ - *rcond = 0.; - if ( L->nrow == 0 || U->nrow == 0) { - *rcond = 1.; - return; - } - - work = complexCalloc( 3*L->nrow ); - - - if ( !work ) - ABORT("Malloc fails for work arrays in cgscon."); - - /* Estimate the norm of inv(A). */ - ainvnm = 0.; - if ( onenrm ) kase1 = 1; - else kase1 = 2; - kase = 0; - - do { - clacon_(&L->nrow, &work[L->nrow], &work[0], &ainvnm, &kase); - - if (kase == 0) break; - - if (kase == kase1) { - /* Multiply by inv(L). */ - sp_ctrsv("L", "No trans", "Unit", L, U, &work[0], stat, info); - - /* Multiply by inv(U). */ - sp_ctrsv("U", "No trans", "Non-unit", L, U, &work[0], stat, info); - - } else { - - /* Multiply by inv(U'). */ - sp_ctrsv("U", "Transpose", "Non-unit", L, U, &work[0], stat, info); - - /* Multiply by inv(L'). */ - sp_ctrsv("L", "Transpose", "Unit", L, U, &work[0], stat, info); - - } - - } while ( kase != 0 ); - - /* Compute the estimate of the reciprocal condition number. */ - if (ainvnm != 0.) *rcond = (1. / ainvnm) / anorm; - - SUPERLU_FREE (work); - return; - -} /* cgscon */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsequ.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsequ.c deleted file mode 100644 index d3913bb1fc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsequ.c +++ /dev/null @@ -1,195 +0,0 @@ - -/*! @file cgsequ.c - * \brief Computes row and column scalings - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Modified from LAPACK routine CGEEQU
    - * 
    - */ -/* - * File name: cgsequ.c - * History: Modified from LAPACK routine CGEEQU - */ -#include -#include "slu_cdefs.h" - - - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - *
    - *   CGSEQU computes row and column scalings intended to equilibrate an   
    - *   M-by-N sparse matrix A and reduce its condition number. R returns the row
    - *   scale factors and C the column scale factors, chosen to try to make   
    - *   the largest element in each row and column of the matrix B with   
    - *   elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1.   
    - *
    - *   R(i) and C(j) are restricted to be between SMLNUM = smallest safe   
    - *   number and BIGNUM = largest safe number.  Use of these scaling   
    - *   factors is not guaranteed to reduce the condition number of A but   
    - *   works well in practice.   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   A       (input) SuperMatrix*
    - *           The matrix of dimension (A->nrow, A->ncol) whose equilibration
    - *           factors are to be computed. The type of A can be:
    - *           Stype = SLU_NC; Dtype = SLU_C; Mtype = SLU_GE.
    - *	    
    - *   R       (output) float*, size A->nrow
    - *           If INFO = 0 or INFO > M, R contains the row scale factors   
    - *           for A.
    - *	    
    - *   C       (output) float*, size A->ncol
    - *           If INFO = 0,  C contains the column scale factors for A.
    - *	    
    - *   ROWCND  (output) float*
    - *           If INFO = 0 or INFO > M, ROWCND contains the ratio of the   
    - *           smallest R(i) to the largest R(i).  If ROWCND >= 0.1 and   
    - *           AMAX is neither too large nor too small, it is not worth   
    - *           scaling by R.
    - *	    
    - *   COLCND  (output) float*
    - *           If INFO = 0, COLCND contains the ratio of the smallest   
    - *           C(i) to the largest C(i).  If COLCND >= 0.1, it is not   
    - *           worth scaling by C.
    - *	    
    - *   AMAX    (output) float*
    - *           Absolute value of largest matrix element.  If AMAX is very   
    - *           close to overflow or very close to underflow, the matrix   
    - *           should be scaled.
    - *	    
    - *   INFO    (output) int*
    - *           = 0:  successful exit   
    - *           < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *           > 0:  if INFO = i,  and i is   
    - *                 <= A->nrow:  the i-th row of A is exactly zero   
    - *                 >  A->ncol:  the (i-M)-th column of A is exactly zero   
    - *
    - *   ===================================================================== 
    - * 
    - */ -void -cgsequ(SuperMatrix *A, float *r, float *c, float *rowcnd, - float *colcnd, float *amax, int *info) -{ - - - /* Local variables */ - NCformat *Astore; - complex *Aval; - int i, j, irow; - float rcmin, rcmax; - float bignum, smlnum; - extern double slamch_(char *); - - /* Test the input parameters. */ - *info = 0; - if ( A->nrow < 0 || A->ncol < 0 || - A->Stype != SLU_NC || A->Dtype != SLU_C || A->Mtype != SLU_GE ) - *info = -1; - if (*info != 0) { - i = -(*info); - xerbla_("cgsequ", &i); - return; - } - - /* Quick return if possible */ - if ( A->nrow == 0 || A->ncol == 0 ) { - *rowcnd = 1.; - *colcnd = 1.; - *amax = 0.; - return; - } - - Astore = A->Store; - Aval = Astore->nzval; - - /* Get machine constants. */ - smlnum = slamch_("S"); - bignum = 1. / smlnum; - - /* Compute row scale factors. */ - for (i = 0; i < A->nrow; ++i) r[i] = 0.; - - /* Find the maximum element in each row. */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - r[irow] = SUPERLU_MAX( r[irow], slu_c_abs1(&Aval[i]) ); - } - - /* Find the maximum and minimum scale factors. */ - rcmin = bignum; - rcmax = 0.; - for (i = 0; i < A->nrow; ++i) { - rcmax = SUPERLU_MAX(rcmax, r[i]); - rcmin = SUPERLU_MIN(rcmin, r[i]); - } - *amax = rcmax; - - if (rcmin == 0.) { - /* Find the first zero scale factor and return an error code. */ - for (i = 0; i < A->nrow; ++i) - if (r[i] == 0.) { - *info = i + 1; - return; - } - } else { - /* Invert the scale factors. */ - for (i = 0; i < A->nrow; ++i) - r[i] = 1. / SUPERLU_MIN( SUPERLU_MAX( r[i], smlnum ), bignum ); - /* Compute ROWCND = min(R(I)) / max(R(I)) */ - *rowcnd = SUPERLU_MAX( rcmin, smlnum ) / SUPERLU_MIN( rcmax, bignum ); - } - - /* Compute column scale factors */ - for (j = 0; j < A->ncol; ++j) c[j] = 0.; - - /* Find the maximum element in each column, assuming the row - scalings computed above. */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - c[j] = SUPERLU_MAX( c[j], slu_c_abs1(&Aval[i]) * r[irow] ); - } - - /* Find the maximum and minimum scale factors. */ - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->ncol; ++j) { - rcmax = SUPERLU_MAX(rcmax, c[j]); - rcmin = SUPERLU_MIN(rcmin, c[j]); - } - - if (rcmin == 0.) { - /* Find the first zero scale factor and return an error code. */ - for (j = 0; j < A->ncol; ++j) - if ( c[j] == 0. ) { - *info = A->nrow + j + 1; - return; - } - } else { - /* Invert the scale factors. */ - for (j = 0; j < A->ncol; ++j) - c[j] = 1. / SUPERLU_MIN( SUPERLU_MAX( c[j], smlnum ), bignum); - /* Compute COLCND = min(C(J)) / max(C(J)) */ - *colcnd = SUPERLU_MAX( rcmin, smlnum ) / SUPERLU_MIN( rcmax, bignum ); - } - - return; - -} /* cgsequ */ - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsisx.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsisx.c deleted file mode 100644 index cf8c55244e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsisx.c +++ /dev/null @@ -1,693 +0,0 @@ - -/*! @file cgsisx.c - * \brief Gives the approximate solutions of linear equations A*X=B or A'*X=B - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * CGSISX gives the approximate solutions of linear equations A*X=B or A'*X=B,
    - * using the ILU factorization from cgsitrf(). An estimation of
    - * the condition number is provided. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *  
    - *	1.1. If options->Equil = YES or options->RowPerm = LargeDiag, scaling
    - *	     factors are computed to equilibrate the system:
    - *	     options->Trans = NOTRANS:
    - *		 diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *	     options->Trans = TRANS:
    - *		 (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *	     options->Trans = CONJ:
    - *		 (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *	     Whether or not the system will be equilibrated depends on the
    - *	     scaling of the matrix A, but if equilibration is used, A is
    - *	     overwritten by diag(R)*A*diag(C) and B by diag(R)*B
    - *	     (if options->Trans=NOTRANS) or diag(C)*B (if options->Trans
    - *	     = TRANS or CONJ).
    - *
    - *	1.2. Permute columns of A, forming A*Pc, where Pc is a permutation
    - *	     matrix that usually preserves sparsity.
    - *	     For more details of this step, see sp_preorder.c.
    - *
    - *	1.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *	     factor the matrix A (after equilibration if options->Equil = YES)
    - *	     as Pr*A*Pc = L*U, with Pr determined by partial pivoting.
    - *
    - *	1.4. Compute the reciprocal pivot growth factor.
    - *
    - *	1.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *	     routine fills a small number on the diagonal entry, that is
    - *		U(i,i) = ||A(:,i)||_oo * options->ILU_FillTol ** (1 - i / n),
    - *	     and info will be increased by 1. The factored form of A is used
    - *	     to estimate the condition number of the preconditioner. If the
    - *	     reciprocal of the condition number is less than machine precision,
    - *	     info = A->ncol+1 is returned as a warning, but the routine still
    - *	     goes on to solve for X.
    - *
    - *	1.6. The system of equations is solved for X using the factored form
    - *	     of A.
    - *
    - *	1.7. options->IterRefine is not used
    - *
    - *	1.8. If equilibration was used, the matrix X is premultiplied by
    - *	     diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *	     (if options->Trans = TRANS or CONJ) so that it solves the
    - *	     original system before equilibration.
    - *
    - *	1.9. options for ILU only
    - *	     1) If options->RowPerm = LargeDiag, MC64 is used to scale and
    - *		permute the matrix to an I-matrix, that is Pr*Dr*A*Dc has
    - *		entries of modulus 1 on the diagonal and off-diagonal entries
    - *		of modulus at most 1. If MC64 fails, dgsequ() is used to
    - *		equilibrate the system.
    - *	     2) options->ILU_DropTol = tau is the threshold for dropping.
    - *		For L, it is used directly (for the whole row in a supernode);
    - *		For U, ||A(:,i)||_oo * tau is used as the threshold
    - *	        for the	i-th column.
    - *		If a secondary dropping rule is required, tau will
    - *	        also be used to compute the second threshold.
    - *	     3) options->ILU_FillFactor = gamma, used as the initial guess
    - *		of memory growth.
    - *		If a secondary dropping rule is required, it will also
    - *              be used as an upper bound of the memory.
    - *	     4) options->ILU_DropRule specifies the dropping rule.
    - *		Option		Explanation
    - *		======		===========
    - *		DROP_BASIC:	Basic dropping rule, supernodal based ILU.
    - *		DROP_PROWS:	Supernodal based ILUTP, p = gamma * nnz(A) / n.
    - *		DROP_COLUMN:	Variation of ILUTP, for j-th column,
    - *				p = gamma * nnz(A(:,j)).
    - *		DROP_AREA;	Variation of ILUTP, for j-th column, use
    - *				nnz(F(:,1:j)) / nnz(A(:,1:j)) to control the
    - *				memory.
    - *		DROP_DYNAMIC:	Modify the threshold tau during the
    - *				factorizaion.
    - *				If nnz(L(:,1:j)) / nnz(A(:,1:j)) < gamma
    - *				    tau_L(j) := MIN(1, tau_L(j-1) * 2);
    - *				Otherwise
    - *				    tau_L(j) := MIN(1, tau_L(j-1) * 2);
    - *				tau_U(j) uses the similar rule.
    - *				NOTE: the thresholds used by L and U are
    - *				indenpendent.
    - *		DROP_INTERP:	Compute the second dropping threshold by
    - *				interpolation instead of sorting (default).
    - *				In this case, the actual fill ratio is not
    - *				guaranteed smaller than gamma.
    - *		DROP_PROWS, DROP_COLUMN and DROP_AREA are mutually exclusive.
    - *		( The default option is DROP_BASIC | DROP_AREA. )
    - *	     5) options->ILU_Norm is the criterion of computing the average
    - *		value of a row in L.
    - *		options->ILU_Norm	average(x[1:n])
    - *		=================	===============
    - *		ONE_NORM		||x||_1 / n
    - *		TWO_NORM		||x||_2 / sqrt(n)
    - *		INF_NORM		max{|x[i]|}
    - *	     6) options->ILU_MILU specifies the type of MILU's variation.
    - *		= SILU (default): do not perform MILU;
    - *		= SMILU_1 (not recommended):
    - *		    U(i,i) := U(i,i) + sum(dropped entries);
    - *		= SMILU_2:
    - *		    U(i,i) := U(i,i) + SGN(U(i,i)) * sum(dropped entries);
    - *		= SMILU_3:
    - *		    U(i,i) := U(i,i) + SGN(U(i,i)) * sum(|dropped entries|);
    - *		NOTE: Even SMILU_1 does not preserve the column sum because of
    - *		late dropping.
    - *	     7) options->ILU_FillTol is used as the perturbation when
    - *		encountering zero pivots. If some U(i,i) = 0, so that U is
    - *		exactly singular, then
    - *		   U(i,i) := ||A(:,i)|| * options->ILU_FillTol ** (1 - i / n).
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the above algorithm
    - *	to the transpose of A:
    - *
    - *	2.1. If options->Equil = YES or options->RowPerm = LargeDiag, scaling
    - *	     factors are computed to equilibrate the system:
    - *	     options->Trans = NOTRANS:
    - *		 diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *	     options->Trans = TRANS:
    - *		 (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *	     options->Trans = CONJ:
    - *		 (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *	     Whether or not the system will be equilibrated depends on the
    - *	     scaling of the matrix A, but if equilibration is used, A' is
    - *	     overwritten by diag(R)*A'*diag(C) and B by diag(R)*B
    - *	     (if trans='N') or diag(C)*B (if trans = 'T' or 'C').
    - *
    - *	2.2. Permute columns of transpose(A) (rows of A),
    - *	     forming transpose(A)*Pc, where Pc is a permutation matrix that
    - *	     usually preserves sparsity.
    - *	     For more details of this step, see sp_preorder.c.
    - *
    - *	2.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *	     factor the transpose(A) (after equilibration if
    - *	     options->Fact = YES) as Pr*transpose(A)*Pc = L*U with the
    - *	     permutation Pr determined by partial pivoting.
    - *
    - *	2.4. Compute the reciprocal pivot growth factor.
    - *
    - *	2.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *	     routine fills a small number on the diagonal entry, that is
    - *		 U(i,i) = ||A(:,i)||_oo * options->ILU_FillTol ** (1 - i / n).
    - *	     And info will be increased by 1. The factored form of A is used
    - *	     to estimate the condition number of the preconditioner. If the
    - *	     reciprocal of the condition number is less than machine precision,
    - *	     info = A->ncol+1 is returned as a warning, but the routine still
    - *	     goes on to solve for X.
    - *
    - *	2.6. The system of equations is solved for X using the factored form
    - *	     of transpose(A).
    - *
    - *	2.7. If options->IterRefine is not used.
    - *
    - *	2.8. If equilibration was used, the matrix X is premultiplied by
    - *	     diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *	     (if options->Trans = TRANS or CONJ) so that it solves the
    - *	     original system before equilibration.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *	   The structure defines the input parameters to control
    - *	   how the LU decomposition will be performed and how the
    - *	   system will be solved.
    - *
    - * A	   (input/output) SuperMatrix*
    - *	   Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *	   of the linear equations is A->nrow. Currently, the type of A can be:
    - *	   Stype = SLU_NC or SLU_NR, Dtype = SLU_C, Mtype = SLU_GE.
    - *	   In the future, more general A may be handled.
    - *
    - *	   On entry, If options->Fact = FACTORED and equed is not 'N',
    - *	   then A must have been equilibrated by the scaling factors in
    - *	   R and/or C.
    - *	   On exit, A is not modified if options->Equil = NO, or if
    - *	   options->Equil = YES but equed = 'N' on exit.
    - *	   Otherwise, if options->Equil = YES and equed is not 'N',
    - *	   A is scaled as follows:
    - *	   If A->Stype = SLU_NC:
    - *	     equed = 'R':  A := diag(R) * A
    - *	     equed = 'C':  A := A * diag(C)
    - *	     equed = 'B':  A := diag(R) * A * diag(C).
    - *	   If A->Stype = SLU_NR:
    - *	     equed = 'R':  transpose(A) := diag(R) * transpose(A)
    - *	     equed = 'C':  transpose(A) := transpose(A) * diag(C)
    - *	     equed = 'B':  transpose(A) := diag(R) * transpose(A) * diag(C).
    - *
    - * perm_c  (input/output) int*
    - *	   If A->Stype = SLU_NC, Column permutation vector of size A->ncol,
    - *	   which defines the permutation matrix Pc; perm_c[i] = j means
    - *	   column i of A is in position j in A*Pc.
    - *	   On exit, perm_c may be overwritten by the product of the input
    - *	   perm_c and a permutation that postorders the elimination tree
    - *	   of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *	   is already in postorder.
    - *
    - *	   If A->Stype = SLU_NR, column permutation vector of size A->nrow,
    - *	   which describes permutation of columns of transpose(A) 
    - *	   (rows of A) as described above.
    - *
    - * perm_r  (input/output) int*
    - *	   If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *	   which defines the permutation matrix Pr, and is determined
    - *	   by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *	   position j in Pr*A.
    - *
    - *	   If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *	   determines permutation of rows of transpose(A)
    - *	   (columns of A) as described above.
    - *
    - *	   If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *	   will try to use the input perm_r, unless a certain threshold
    - *	   criterion is violated. In that case, perm_r is overwritten by a
    - *	   new permutation determined by partial pivoting or diagonal
    - *	   threshold pivoting.
    - *	   Otherwise, perm_r is output argument.
    - *
    - * etree   (input/output) int*,  dimension (A->ncol)
    - *	   Elimination tree of Pc'*A'*A*Pc.
    - *	   If options->Fact != FACTORED and options->Fact != DOFACT,
    - *	   etree is an input argument, otherwise it is an output argument.
    - *	   Note: etree is a vector of parent pointers for a forest whose
    - *	   vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * equed   (input/output) char*
    - *	   Specifies the form of equilibration that was done.
    - *	   = 'N': No equilibration.
    - *	   = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *	   = 'C': Column equilibration, i.e., A was postmultiplied by diag(C).
    - *	   = 'B': Both row and column equilibration, i.e., A was replaced 
    - *		  by diag(R)*A*diag(C).
    - *	   If options->Fact = FACTORED, equed is an input argument,
    - *	   otherwise it is an output argument.
    - *
    - * R	   (input/output) float*, dimension (A->nrow)
    - *	   The row scale factors for A or transpose(A).
    - *	   If equed = 'R' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *	       (if A->Stype = SLU_NR) is multiplied on the left by diag(R).
    - *	   If equed = 'N' or 'C', R is not accessed.
    - *	   If options->Fact = FACTORED, R is an input argument,
    - *	       otherwise, R is output.
    - *	   If options->zFact = FACTORED and equed = 'R' or 'B', each element
    - *	       of R must be positive.
    - *
    - * C	   (input/output) float*, dimension (A->ncol)
    - *	   The column scale factors for A or transpose(A).
    - *	   If equed = 'C' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *	       (if A->Stype = SLU_NR) is multiplied on the right by diag(C).
    - *	   If equed = 'N' or 'R', C is not accessed.
    - *	   If options->Fact = FACTORED, C is an input argument,
    - *	       otherwise, C is output.
    - *	   If options->Fact = FACTORED and equed = 'C' or 'B', each element
    - *	       of C must be positive.
    - *
    - * L	   (output) SuperMatrix*
    - *	   The factor L from the factorization
    - *	       Pr*A*Pc=L*U		(if A->Stype SLU_= NC) or
    - *	       Pr*transpose(A)*Pc=L*U	(if A->Stype = SLU_NR).
    - *	   Uses compressed row subscripts storage for supernodes, i.e.,
    - *	   L has types: Stype = SLU_SC, Dtype = SLU_C, Mtype = SLU_TRLU.
    - *
    - * U	   (output) SuperMatrix*
    - *	   The factor U from the factorization
    - *	       Pr*A*Pc=L*U		(if A->Stype = SLU_NC) or
    - *	       Pr*transpose(A)*Pc=L*U	(if A->Stype = SLU_NR).
    - *	   Uses column-wise storage scheme, i.e., U has types:
    - *	   Stype = SLU_NC, Dtype = SLU_C, Mtype = SLU_TRU.
    - *
    - * work    (workspace/output) void*, size (lwork) (in bytes)
    - *	   User supplied workspace, should be large enough
    - *	   to hold data structures for factors L and U.
    - *	   On exit, if fact is not 'F', L and U point to this array.
    - *
    - * lwork   (input) int
    - *	   Specifies the size of work array in bytes.
    - *	   = 0:  allocate space internally by system malloc;
    - *	   > 0:  use user-supplied work array of length lwork in bytes,
    - *		 returns error if space runs out.
    - *	   = -1: the routine guesses the amount of space needed without
    - *		 performing the factorization, and returns it in
    - *		 mem_usage->total_needed; no other side effects.
    - *
    - *	   See argument 'mem_usage' for memory usage statistics.
    - *
    - * B	   (input/output) SuperMatrix*
    - *	   B has types: Stype = SLU_DN, Dtype = SLU_C, Mtype = SLU_GE.
    - *	   On entry, the right hand side matrix.
    - *	   If B->ncol = 0, only LU decomposition is performed, the triangular
    - *			   solve is skipped.
    - *	   On exit,
    - *	      if equed = 'N', B is not modified; otherwise
    - *	      if A->Stype = SLU_NC:
    - *		 if options->Trans = NOTRANS and equed = 'R' or 'B',
    - *		    B is overwritten by diag(R)*B;
    - *		 if options->Trans = TRANS or CONJ and equed = 'C' of 'B',
    - *		    B is overwritten by diag(C)*B;
    - *	      if A->Stype = SLU_NR:
    - *		 if options->Trans = NOTRANS and equed = 'C' or 'B',
    - *		    B is overwritten by diag(C)*B;
    - *		 if options->Trans = TRANS or CONJ and equed = 'R' of 'B',
    - *		    B is overwritten by diag(R)*B.
    - *
    - * X	   (output) SuperMatrix*
    - *	   X has types: Stype = SLU_DN, Dtype = SLU_C, Mtype = SLU_GE.
    - *	   If info = 0 or info = A->ncol+1, X contains the solution matrix
    - *	   to the original system of equations. Note that A and B are modified
    - *	   on exit if equed is not 'N', and the solution to the equilibrated
    - *	   system is inv(diag(C))*X if options->Trans = NOTRANS and
    - *	   equed = 'C' or 'B', or inv(diag(R))*X if options->Trans = 'T' or 'C'
    - *	   and equed = 'R' or 'B'.
    - *
    - * recip_pivot_growth (output) float*
    - *	   The reciprocal pivot growth factor max_j( norm(A_j)/norm(U_j) ).
    - *	   The infinity norm is used. If recip_pivot_growth is much less
    - *	   than 1, the stability of the LU factorization could be poor.
    - *
    - * rcond   (output) float*
    - *	   The estimate of the reciprocal condition number of the matrix A
    - *	   after equilibration (if done). If rcond is less than the machine
    - *	   precision (in particular, if rcond = 0), the matrix is singular
    - *	   to working precision. This condition is indicated by a return
    - *	   code of info > 0.
    - *
    - * mem_usage (output) mem_usage_t*
    - *	   Record the memory usage statistics, consisting of following fields:
    - *	   - for_lu (float)
    - *	     The amount of space used in bytes for L\U data structures.
    - *	   - total_needed (float)
    - *	     The amount of space needed in bytes to perform factorization.
    - *	   - expansions (int)
    - *	     The number of memory expansions during the LU factorization.
    - *
    - * stat   (output) SuperLUStat_t*
    - *	  Record the statistics on runtime and floating-point operation count.
    - *	  See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - *	   > 0: if info = i, and i is
    - *		<= A->ncol: number of zero pivots. They are replaced by small
    - *		      entries due to options->ILU_FillTol.
    - *		= A->ncol+1: U is nonsingular, but RCOND is less than machine
    - *		      precision, meaning that the matrix is singular to
    - *		      working precision. Nevertheless, the solution and
    - *		      error bounds are computed because there are a number
    - *		      of situations where the computed solution can be more
    - *		      accurate than the value of RCOND would suggest.
    - *		> A->ncol+1: number of bytes allocated when memory allocation
    - *		      failure occurred, plus A->ncol.
    - * 
    - */ - -void -cgsisx(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - int *etree, char *equed, float *R, float *C, - SuperMatrix *L, SuperMatrix *U, void *work, int lwork, - SuperMatrix *B, SuperMatrix *X, - float *recip_pivot_growth, float *rcond, - mem_usage_t *mem_usage, SuperLUStat_t *stat, int *info) -{ - - DNformat *Bstore, *Xstore; - complex *Bmat, *Xmat; - int ldb, ldx, nrhs; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int colequ, equil, nofact, notran, rowequ, permc_spec, mc64; - trans_t trant; - char norm[1]; - int i, j, info1; - float amax, anorm, bignum, smlnum, colcnd, rowcnd, rcmax, rcmin; - int relax, panel_size; - float diag_pivot_thresh; - double t0; /* temporary time */ - double *utime; - - int *perm = NULL; - - /* External functions */ - extern float clangs(char *, SuperMatrix *); - - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - *info = 0; - nofact = (options->Fact != FACTORED); - equil = (options->Equil == YES); - notran = (options->Trans == NOTRANS); - mc64 = (options->RowPerm == LargeDiag); - if ( nofact ) { - *(unsigned char *)equed = 'N'; - rowequ = FALSE; - colequ = FALSE; - } else { - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - smlnum = slamch_("Safe minimum"); - bignum = 1. / smlnum; - } - - /* Test the input parameters */ - if (!nofact && options->Fact != DOFACT && options->Fact != SamePattern && - options->Fact != SamePattern_SameRowPerm && - !notran && options->Trans != TRANS && options->Trans != CONJ && - !equil && options->Equil != NO) - *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_C || A->Mtype != SLU_GE ) - *info = -2; - else if (options->Fact == FACTORED && - !(rowequ || colequ || lsame_(equed, "N"))) - *info = -6; - else { - if (rowequ) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, R[j]); - rcmax = SUPERLU_MAX(rcmax, R[j]); - } - if (rcmin <= 0.) *info = -7; - else if ( A->nrow > 0) - rowcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else rowcnd = 1.; - } - if (colequ && *info == 0) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, C[j]); - rcmax = SUPERLU_MAX(rcmax, C[j]); - } - if (rcmin <= 0.) *info = -8; - else if (A->nrow > 0) - colcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else colcnd = 1.; - } - if (*info == 0) { - if ( lwork < -1 ) *info = -12; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_C || - B->Mtype != SLU_GE ) - *info = -13; - else if ( X->ncol < 0 || Xstore->lda < SUPERLU_MAX(0, A->nrow) || - (B->ncol != 0 && B->ncol != X->ncol) || - X->Stype != SLU_DN || - X->Dtype != SLU_C || X->Mtype != SLU_GE ) - *info = -14; - } - } - if (*info != 0) { - i = -(*info); - xerbla_("cgsisx", &i); - return; - } - - /* Initialization for factor parameters */ - panel_size = sp_ienv(1); - relax = sp_ienv(2); - diag_pivot_thresh = options->DiagPivotThresh; - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - cCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - if ( notran ) { /* Reverse the transpose argument. */ - trant = TRANS; - notran = 0; - } else { - trant = NOTRANS; - notran = 1; - } - } else { /* A->Stype == SLU_NC */ - trant = options->Trans; - AA = A; - } - - if ( nofact ) { - register int i, j; - NCformat *Astore = AA->Store; - int nnz = Astore->nnz; - int *colptr = Astore->colptr; - int *rowind = Astore->rowind; - complex *nzval = (complex *)Astore->nzval; - int n = AA->nrow; - - if ( mc64 ) { - *equed = 'B'; - rowequ = colequ = 1; - t0 = SuperLU_timer_(); - if ((perm = intMalloc(n)) == NULL) - ABORT("SUPERLU_MALLOC fails for perm[]"); - - info1 = cldperm(5, n, nnz, colptr, rowind, nzval, perm, R, C); - - if (info1 > 0) { /* MC64 fails, call cgsequ() later */ - mc64 = 0; - SUPERLU_FREE(perm); - perm = NULL; - } else { - for (i = 0; i < n; i++) { - R[i] = exp(R[i]); - C[i] = exp(C[i]); - } - /* permute and scale the matrix */ - for (j = 0; j < n; j++) { - for (i = colptr[j]; i < colptr[j + 1]; i++) { - cs_mult(&nzval[i], &nzval[i], R[rowind[i]] * C[j]); - rowind[i] = perm[rowind[i]]; - } - } - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - if ( !mc64 & equil ) { - t0 = SuperLU_timer_(); - /* Compute row and column scalings to equilibrate the matrix A. */ - cgsequ(AA, R, C, &rowcnd, &colcnd, &amax, &info1); - - if ( info1 == 0 ) { - /* Equilibrate matrix A. */ - claqgs(AA, R, C, rowcnd, colcnd, amax, equed); - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - } - - if ( nrhs > 0 ) { - /* Scale the right hand side if equilibration was performed. */ - if ( notran ) { - if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&Bmat[i+j*ldb], &Bmat[i+j*ldb], R[i]); - } - } - } else if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&Bmat[i+j*ldb], &Bmat[i+j*ldb], C[i]); - } - } - } - - if ( nofact ) { - - t0 = SuperLU_timer_(); - /* - * Gnet column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t0; - - t0 = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t0; - - /* Compute the LU factorization of A*Pc. */ - t0 = SuperLU_timer_(); - cgsitrf(options, &AC, relax, panel_size, etree, work, lwork, - perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t0; - - if ( lwork == -1 ) { - mem_usage->total_needed = *info - A->ncol; - return; - } - } - - if ( options->PivotGrowth ) { - if ( *info > 0 ) return; - - /* Compute the reciprocal pivot growth factor *recip_pivot_growth. */ - *recip_pivot_growth = cPivotGrowth(A->ncol, AA, perm_c, L, U); - } - - if ( options->ConditionNumber ) { - /* Estimate the reciprocal of the condition number of A. */ - t0 = SuperLU_timer_(); - if ( notran ) { - *(unsigned char *)norm = '1'; - } else { - *(unsigned char *)norm = 'I'; - } - anorm = clangs(norm, AA); - cgscon(norm, L, U, anorm, rcond, stat, &info1); - utime[RCOND] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Compute the solution matrix X. */ - for (j = 0; j < nrhs; j++) /* Save a copy of the right hand sides */ - for (i = 0; i < B->nrow; i++) - Xmat[i + j*ldx] = Bmat[i + j*ldb]; - - t0 = SuperLU_timer_(); - cgstrs (trant, L, U, perm_c, perm_r, X, stat, &info1); - utime[SOLVE] = SuperLU_timer_() - t0; - - /* Transform the solution matrix X to a solution of the original - system. */ - if ( notran ) { - if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&Xmat[i+j*ldx], &Xmat[i+j*ldx], C[i]); - } - } - } else { - if ( rowequ ) { - if (perm) { - complex *tmp; - int n = A->nrow; - - if ((tmp = complexMalloc(n)) == NULL) - ABORT("SUPERLU_MALLOC fails for tmp[]"); - for (j = 0; j < nrhs; j++) { - for (i = 0; i < n; i++) - tmp[i] = Xmat[i + j * ldx]; /*dcopy*/ - for (i = 0; i < n; i++) - cs_mult(&Xmat[i+j*ldx], &tmp[perm[i]], R[i]); - } - SUPERLU_FREE(tmp); - } else { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&Xmat[i+j*ldx], &Xmat[i+j*ldx], R[i]); - } - } - } - } - } /* end if nrhs > 0 */ - - if ( options->ConditionNumber ) { - /* Set INFO = A->ncol+1 if the matrix is singular to working precision. */ - if ( *rcond < slamch_("E") && *info == 0) *info = A->ncol + 1; - } - - if (perm) SUPERLU_FREE(perm); - - if ( nofact ) { - ilu_cQuerySpace(L, U, mem_usage); - Destroy_CompCol_Permuted(&AC); - } - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsitrf.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsitrf.c deleted file mode 100644 index fcb68e20d5..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsitrf.c +++ /dev/null @@ -1,628 +0,0 @@ - -/*! @file cgsitf.c - * \brief Computes an ILU factorization of a general sparse matrix - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ - -#include "slu_cdefs.h" - -#ifdef DEBUG -int num_drop_L; -#endif - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * CGSITRF computes an ILU factorization of a general sparse m-by-n
    - * matrix A using partial pivoting with row interchanges.
    - * The factorization has the form
    - *     Pr * A = L * U
    - * where Pr is a row permutation matrix, L is lower triangular with unit
    - * diagonal elements (lower trapezoidal if A->nrow > A->ncol), and U is upper
    - * triangular (upper trapezoidal if A->nrow < A->ncol).
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *	   The structure defines the input parameters to control
    - *	   how the ILU decomposition will be performed.
    - *
    - * A	    (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *	    (A->nrow, A->ncol). The type of A can be:
    - *	    Stype = SLU_NCP; Dtype = SLU_C; Mtype = SLU_GE.
    - *
    - * relax    (input) int
    - *	    To control degree of relaxing supernodes. If the number
    - *	    of nodes (columns) in a subtree of the elimination tree is less
    - *	    than relax, this subtree is considered as one supernode,
    - *	    regardless of the row structures of those columns.
    - *
    - * panel_size (input) int
    - *	    A panel consists of at most panel_size consecutive columns.
    - *
    - * etree    (input) int*, dimension (A->ncol)
    - *	    Elimination tree of A'*A.
    - *	    Note: etree is a vector of parent pointers for a forest whose
    - *	    vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *	    On input, the columns of A should be permuted so that the
    - *	    etree is in a certain postorder.
    - *
    - * work     (input/output) void*, size (lwork) (in bytes)
    - *	    User-supplied work space and space for the output data structures.
    - *	    Not referenced if lwork = 0;
    - *
    - * lwork   (input) int
    - *	   Specifies the size of work array in bytes.
    - *	   = 0:  allocate space internally by system malloc;
    - *	   > 0:  use user-supplied work array of length lwork in bytes,
    - *		 returns error if space runs out.
    - *	   = -1: the routine guesses the amount of space needed without
    - *		 performing the factorization, and returns it in
    - *		 *info; no other side effects.
    - *
    - * perm_c   (input) int*, dimension (A->ncol)
    - *	    Column permutation vector, which defines the
    - *	    permutation matrix Pc; perm_c[i] = j means column i of A is
    - *	    in position j in A*Pc.
    - *	    When searching for diagonal, perm_c[*] is applied to the
    - *	    row subscripts of A, so that diagonal threshold pivoting
    - *	    can find the diagonal of A, rather than that of A*Pc.
    - *
    - * perm_r   (input/output) int*, dimension (A->nrow)
    - *	    Row permutation vector which defines the permutation matrix Pr,
    - *	    perm_r[i] = j means row i of A is in position j in Pr*A.
    - *	    If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *	       will try to use the input perm_r, unless a certain threshold
    - *	       criterion is violated. In that case, perm_r is overwritten by
    - *	       a new permutation determined by partial pivoting or diagonal
    - *	       threshold pivoting.
    - *	    Otherwise, perm_r is output argument;
    - *
    - * L	    (output) SuperMatrix*
    - *	    The factor L from the factorization Pr*A=L*U; use compressed row
    - *	    subscripts storage for supernodes, i.e., L has type:
    - *	    Stype = SLU_SC, Dtype = SLU_C, Mtype = SLU_TRLU.
    - *
    - * U	    (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *	    storage scheme, i.e., U has types: Stype = SLU_NC,
    - *	    Dtype = SLU_C, Mtype = SLU_TRU.
    - *
    - * stat     (output) SuperLUStat_t*
    - *	    Record the statistics on runtime and floating-point operation count.
    - *	    See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info     (output) int*
    - *	    = 0: successful exit
    - *	    < 0: if info = -i, the i-th argument had an illegal value
    - *	    > 0: if info = i, and i is
    - *	       <= A->ncol: number of zero pivots. They are replaced by small
    - *		  entries according to options->ILU_FillTol.
    - *	       > A->ncol: number of bytes allocated when memory allocation
    - *		  failure occurred, plus A->ncol. If lwork = -1, it is
    - *		  the estimated amount of space needed, plus A->ncol.
    - *
    - * ======================================================================
    - *
    - * Local Working Arrays:
    - * ======================
    - *   m = number of rows in the matrix
    - *   n = number of columns in the matrix
    - *
    - *   marker[0:3*m-1]: marker[i] = j means that node i has been
    - *	reached when working on column j.
    - *	Storage: relative to original row subscripts
    - *	NOTE: There are 4 of them:
    - *	      marker/marker1 are used for panel dfs, see (ilu_)dpanel_dfs.c;
    - *	      marker2 is used for inner-factorization, see (ilu)_dcolumn_dfs.c;
    - *	      marker_relax(has its own space) is used for relaxed supernodes.
    - *
    - *   parent[0:m-1]: parent vector used during dfs
    - *	Storage: relative to new row subscripts
    - *
    - *   xplore[0:m-1]: xplore[i] gives the location of the next (dfs)
    - *	unexplored neighbor of i in lsub[*]
    - *
    - *   segrep[0:nseg-1]: contains the list of supernodal representatives
    - *	in topological order of the dfs. A supernode representative is the
    - *	last column of a supernode.
    - *	The maximum size of segrep[] is n.
    - *
    - *   repfnz[0:W*m-1]: for a nonzero segment U[*,j] that ends at a
    - *	supernodal representative r, repfnz[r] is the location of the first
    - *	nonzero in this segment.  It is also used during the dfs: repfnz[r]>0
    - *	indicates the supernode r has been explored.
    - *	NOTE: There are W of them, each used for one column of a panel.
    - *
    - *   panel_lsub[0:W*m-1]: temporary for the nonzeros row indices below
    - *	the panel diagonal. These are filled in during dpanel_dfs(), and are
    - *	used later in the inner LU factorization within the panel.
    - *	panel_lsub[]/dense[] pair forms the SPA data structure.
    - *	NOTE: There are W of them.
    - *
    - *   dense[0:W*m-1]: sparse accumulating (SPA) vector for intermediate values;
    - *		   NOTE: there are W of them.
    - *
    - *   tempv[0:*]: real temporary used for dense numeric kernels;
    - *	The size of this array is defined by NUM_TEMPV() in slu_util.h.
    - *	It is also used by the dropping routine ilu_ddrop_row().
    - * 
    - */ - -void -cgsitrf(superlu_options_t *options, SuperMatrix *A, int relax, int panel_size, - int *etree, void *work, int lwork, int *perm_c, int *perm_r, - SuperMatrix *L, SuperMatrix *U, SuperLUStat_t *stat, int *info) -{ - /* Local working arrays */ - NCPformat *Astore; - int *iperm_r = NULL; /* inverse of perm_r; used when - options->Fact == SamePattern_SameRowPerm */ - int *iperm_c; /* inverse of perm_c */ - int *swap, *iswap; /* swap is used to store the row permutation - during the factorization. Initially, it is set - to iperm_c (row indeces of Pc*A*Pc'). - iswap is the inverse of swap. After the - factorization, it is equal to perm_r. */ - int *iwork; - complex *cwork; - int *segrep, *repfnz, *parent, *xplore; - int *panel_lsub; /* dense[]/panel_lsub[] pair forms a w-wide SPA */ - int *marker, *marker_relax; - complex *dense, *tempv; - float *stempv; - int *relax_end, *relax_fsupc; - complex *a; - int *asub; - int *xa_begin, *xa_end; - int *xsup, *supno; - int *xlsub, *xlusup, *xusub; - int nzlumax; - float *amax; - complex drop_sum; - static GlobalLU_t Glu; /* persistent to facilitate multiple factors. */ - int *iwork2; /* used by the second dropping rule */ - - /* Local scalars */ - fact_t fact = options->Fact; - double diag_pivot_thresh = options->DiagPivotThresh; - double drop_tol = options->ILU_DropTol; /* tau */ - double fill_ini = options->ILU_FillTol; /* tau^hat */ - double gamma = options->ILU_FillFactor; - int drop_rule = options->ILU_DropRule; - milu_t milu = options->ILU_MILU; - double fill_tol; - int pivrow; /* pivotal row number in the original matrix A */ - int nseg1; /* no of segments in U-column above panel row jcol */ - int nseg; /* no of segments in each U-column */ - register int jcol; - register int kcol; /* end column of a relaxed snode */ - register int icol; - register int i, k, jj, new_next, iinfo; - int m, n, min_mn, jsupno, fsupc, nextlu, nextu; - int w_def; /* upper bound on panel width */ - int usepr, iperm_r_allocated = 0; - int nnzL, nnzU; - int *panel_histo = stat->panel_histo; - flops_t *ops = stat->ops; - - int last_drop;/* the last column which the dropping rules applied */ - int quota; - int nnzAj; /* number of nonzeros in A(:,1:j) */ - int nnzLj, nnzUj; - double tol_L = drop_tol, tol_U = drop_tol; - complex zero = {0.0, 0.0}; - - /* Executable */ - iinfo = 0; - m = A->nrow; - n = A->ncol; - min_mn = SUPERLU_MIN(m, n); - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - - /* Allocate storage common to the factor routines */ - *info = cLUMemInit(fact, work, lwork, m, n, Astore->nnz, panel_size, - gamma, L, U, &Glu, &iwork, &cwork); - if ( *info ) return; - - xsup = Glu.xsup; - supno = Glu.supno; - xlsub = Glu.xlsub; - xlusup = Glu.xlusup; - xusub = Glu.xusub; - - SetIWork(m, n, panel_size, iwork, &segrep, &parent, &xplore, - &repfnz, &panel_lsub, &marker_relax, &marker); - cSetRWork(m, panel_size, cwork, &dense, &tempv); - - usepr = (fact == SamePattern_SameRowPerm); - if ( usepr ) { - /* Compute the inverse of perm_r */ - iperm_r = (int *) intMalloc(m); - for (k = 0; k < m; ++k) iperm_r[perm_r[k]] = k; - iperm_r_allocated = 1; - } - - iperm_c = (int *) intMalloc(n); - for (k = 0; k < n; ++k) iperm_c[perm_c[k]] = k; - swap = (int *)intMalloc(n); - for (k = 0; k < n; k++) swap[k] = iperm_c[k]; - iswap = (int *)intMalloc(n); - for (k = 0; k < n; k++) iswap[k] = perm_c[k]; - amax = (float *) floatMalloc(panel_size); - if (drop_rule & DROP_SECONDARY) - iwork2 = (int *)intMalloc(n); - else - iwork2 = NULL; - - nnzAj = 0; - nnzLj = 0; - nnzUj = 0; - last_drop = SUPERLU_MAX(min_mn - 2 * sp_ienv(3), (int)(min_mn * 0.95)); - - /* Identify relaxed snodes */ - relax_end = (int *) intMalloc(n); - relax_fsupc = (int *) intMalloc(n); - if ( options->SymmetricMode == YES ) - ilu_heap_relax_snode(n, etree, relax, marker, relax_end, relax_fsupc); - else - ilu_relax_snode(n, etree, relax, marker, relax_end, relax_fsupc); - - ifill (perm_r, m, EMPTY); - ifill (marker, m * NO_MARKER, EMPTY); - supno[0] = -1; - xsup[0] = xlsub[0] = xusub[0] = xlusup[0] = 0; - w_def = panel_size; - - /* Mark the rows used by relaxed supernodes */ - ifill (marker_relax, m, EMPTY); - i = mark_relax(m, relax_end, relax_fsupc, xa_begin, xa_end, - asub, marker_relax); -#if ( PRNTlevel >= 1) - printf("%d relaxed supernodes.\n", i); -#endif - - /* - * Work on one "panel" at a time. A panel is one of the following: - * (a) a relaxed supernode at the bottom of the etree, or - * (b) panel_size contiguous columns, defined by the user - */ - for (jcol = 0; jcol < min_mn; ) { - - if ( relax_end[jcol] != EMPTY ) { /* start of a relaxed snode */ - kcol = relax_end[jcol]; /* end of the relaxed snode */ - panel_histo[kcol-jcol+1]++; - - /* Drop small rows in the previous supernode. */ - if (jcol > 0 && jcol < last_drop) { - int first = xsup[supno[jcol - 1]]; - int last = jcol - 1; - int quota; - - /* Compute the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * (m - first) / m - * (last - first + 1); - else if (drop_rule & DROP_COLUMN) { - int i; - quota = 0; - for (i = first; i <= last; i++) - quota += xa_end[i] - xa_begin[i]; - quota = gamma * quota * (m - first) / m; - } else if (drop_rule & DROP_AREA) - quota = gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - - nnzLj; - else - quota = m * n; - fill_tol = pow(fill_ini, 1.0 - 0.5 * (first + last) / min_mn); - - /* Drop small rows */ - stempv = (float *) tempv; - i = ilu_cdrop_row(options, first, last, tol_L, quota, &nnzLj, - &fill_tol, &Glu, stempv, iwork2, 0); - /* Reset the parameters */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - < nnzLj) - tol_L = SUPERLU_MIN(1.0, tol_L * 2.0); - else - tol_L = SUPERLU_MAX(drop_tol, tol_L * 0.5); - } - if (fill_tol < 0) iinfo -= (int)fill_tol; -#ifdef DEBUG - num_drop_L += i * (last - first + 1); -#endif - } - - /* -------------------------------------- - * Factorize the relaxed supernode(jcol:kcol) - * -------------------------------------- */ - /* Determine the union of the row structure of the snode */ - if ( (*info = ilu_csnode_dfs(jcol, kcol, asub, xa_begin, xa_end, - marker, &Glu)) != 0 ) - return; - - nextu = xusub[jcol]; - nextlu = xlusup[jcol]; - jsupno = supno[jcol]; - fsupc = xsup[jsupno]; - new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1); - nzlumax = Glu.nzlumax; - while ( new_next > nzlumax ) { - if ((*info = cLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu))) - return; - } - - for (icol = jcol; icol <= kcol; icol++) { - xusub[icol+1] = nextu; - - amax[0] = 0.0; - /* Scatter into SPA dense[*] */ - for (k = xa_begin[icol]; k < xa_end[icol]; k++) { - register float tmp = slu_c_abs1 (&a[k]); - if (tmp > amax[0]) amax[0] = tmp; - dense[asub[k]] = a[k]; - } - nnzAj += xa_end[icol] - xa_begin[icol]; - if (amax[0] == 0.0) { - amax[0] = fill_ini; -#if ( PRNTlevel >= 1) - printf("Column %d is entirely zero!\n", icol); - fflush(stdout); -#endif - } - - /* Numeric update within the snode */ - csnode_bmod(icol, jsupno, fsupc, dense, tempv, &Glu, stat); - - if (usepr) pivrow = iperm_r[icol]; - fill_tol = pow(fill_ini, 1.0 - (double)icol / (double)min_mn); - if ( (*info = ilu_cpivotL(icol, diag_pivot_thresh, &usepr, - perm_r, iperm_c[icol], swap, iswap, - marker_relax, &pivrow, - amax[0] * fill_tol, milu, zero, - &Glu, stat)) ) { - iinfo++; - marker[pivrow] = kcol; - } - - } - - jcol = kcol + 1; - - } else { /* Work on one panel of panel_size columns */ - - /* Adjust panel_size so that a panel won't overlap with the next - * relaxed snode. - */ - panel_size = w_def; - for (k = jcol + 1; k < SUPERLU_MIN(jcol+panel_size, min_mn); k++) - if ( relax_end[k] != EMPTY ) { - panel_size = k - jcol; - break; - } - if ( k == min_mn ) panel_size = min_mn - jcol; - panel_histo[panel_size]++; - - /* symbolic factor on a panel of columns */ - ilu_cpanel_dfs(m, panel_size, jcol, A, perm_r, &nseg1, - dense, amax, panel_lsub, segrep, repfnz, - marker, parent, xplore, &Glu); - - /* numeric sup-panel updates in topological order */ - cpanel_bmod(m, panel_size, jcol, nseg1, dense, - tempv, segrep, repfnz, &Glu, stat); - - /* Sparse LU within the panel, and below panel diagonal */ - for (jj = jcol; jj < jcol + panel_size; jj++) { - - k = (jj - jcol) * m; /* column index for w-wide arrays */ - - nseg = nseg1; /* Begin after all the panel segments */ - - nnzAj += xa_end[jj] - xa_begin[jj]; - - if ((*info = ilu_ccolumn_dfs(m, jj, perm_r, &nseg, - &panel_lsub[k], segrep, &repfnz[k], - marker, parent, xplore, &Glu))) - return; - - /* Numeric updates */ - if ((*info = ccolumn_bmod(jj, (nseg - nseg1), &dense[k], - tempv, &segrep[nseg1], &repfnz[k], - jcol, &Glu, stat)) != 0) return; - - /* Make a fill-in position if the column is entirely zero */ - if (xlsub[jj + 1] == xlsub[jj]) { - register int i, row; - int nextl; - int nzlmax = Glu.nzlmax; - int *lsub = Glu.lsub; - int *marker2 = marker + 2 * m; - - /* Allocate memory */ - nextl = xlsub[jj] + 1; - if (nextl >= nzlmax) { - int error = cLUMemXpand(jj, nextl, LSUB, &nzlmax, &Glu); - if (error) { *info = error; return; } - lsub = Glu.lsub; - } - xlsub[jj + 1]++; - assert(xlusup[jj]==xlusup[jj+1]); - xlusup[jj + 1]++; - Glu.lusup[xlusup[jj]] = zero; - - /* Choose a row index (pivrow) for fill-in */ - for (i = jj; i < n; i++) - if (marker_relax[swap[i]] <= jj) break; - row = swap[i]; - marker2[row] = jj; - lsub[xlsub[jj]] = row; -#ifdef DEBUG - printf("Fill col %d.\n", jj); - fflush(stdout); -#endif - } - - /* Computer the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * jj / m; - else if (drop_rule & DROP_COLUMN) - quota = gamma * (xa_end[jj] - xa_begin[jj]) * - (jj + 1) / m; - else if (drop_rule & DROP_AREA) - quota = gamma * 0.9 * nnzAj * 0.5 - nnzUj; - else - quota = m; - - /* Copy the U-segments to ucol[*] and drop small entries */ - if ((*info = ilu_ccopy_to_ucol(jj, nseg, segrep, &repfnz[k], - perm_r, &dense[k], drop_rule, - milu, amax[jj - jcol] * tol_U, - quota, &drop_sum, &nnzUj, &Glu, - iwork2)) != 0) - return; - - /* Reset the dropping threshold if required */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * 0.9 * nnzAj * 0.5 < nnzLj) - tol_U = SUPERLU_MIN(1.0, tol_U * 2.0); - else - tol_U = SUPERLU_MAX(drop_tol, tol_U * 0.5); - } - - cs_mult(&drop_sum, &drop_sum, MILU_ALPHA); - if (usepr) pivrow = iperm_r[jj]; - fill_tol = pow(fill_ini, 1.0 - (double)jj / (double)min_mn); - if ( (*info = ilu_cpivotL(jj, diag_pivot_thresh, &usepr, perm_r, - iperm_c[jj], swap, iswap, - marker_relax, &pivrow, - amax[jj - jcol] * fill_tol, milu, - drop_sum, &Glu, stat)) ) { - iinfo++; - marker[m + pivrow] = jj; - marker[2 * m + pivrow] = jj; - } - - /* Reset repfnz[] for this column */ - resetrep_col (nseg, segrep, &repfnz[k]); - - /* Start a new supernode, drop the previous one */ - if (jj > 0 && supno[jj] > supno[jj - 1] && jj < last_drop) { - int first = xsup[supno[jj - 1]]; - int last = jj - 1; - int quota; - - /* Compute the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * (m - first) / m - * (last - first + 1); - else if (drop_rule & DROP_COLUMN) { - int i; - quota = 0; - for (i = first; i <= last; i++) - quota += xa_end[i] - xa_begin[i]; - quota = gamma * quota * (m - first) / m; - } else if (drop_rule & DROP_AREA) - quota = gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) - / m) - nnzLj; - else - quota = m * n; - fill_tol = pow(fill_ini, 1.0 - 0.5 * (first + last) / - (double)min_mn); - - /* Drop small rows */ - stempv = (float *) tempv; - i = ilu_cdrop_row(options, first, last, tol_L, quota, - &nnzLj, &fill_tol, &Glu, stempv, iwork2, - 1); - - /* Reset the parameters */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - < nnzLj) - tol_L = SUPERLU_MIN(1.0, tol_L * 2.0); - else - tol_L = SUPERLU_MAX(drop_tol, tol_L * 0.5); - } - if (fill_tol < 0) iinfo -= (int)fill_tol; -#ifdef DEBUG - num_drop_L += i * (last - first + 1); -#endif - } /* if start a new supernode */ - - } /* for */ - - jcol += panel_size; /* Move to the next panel */ - - } /* else */ - - } /* for */ - - *info = iinfo; - - if ( m > n ) { - k = 0; - for (i = 0; i < m; ++i) - if ( perm_r[i] == EMPTY ) { - perm_r[i] = n + k; - ++k; - } - } - - ilu_countnz(min_mn, &nnzL, &nnzU, &Glu); - fixupL(min_mn, perm_r, &Glu); - - cLUWorkFree(iwork, cwork, &Glu); /* Free work space and compress storage */ - - if ( fact == SamePattern_SameRowPerm ) { - /* L and U structures may have changed due to possibly different - pivoting, even though the storage is available. - There could also be memory expansions, so the array locations - may have changed, */ - ((SCformat *)L->Store)->nnz = nnzL; - ((SCformat *)L->Store)->nsuper = Glu.supno[n]; - ((SCformat *)L->Store)->nzval = Glu.lusup; - ((SCformat *)L->Store)->nzval_colptr = Glu.xlusup; - ((SCformat *)L->Store)->rowind = Glu.lsub; - ((SCformat *)L->Store)->rowind_colptr = Glu.xlsub; - ((NCformat *)U->Store)->nnz = nnzU; - ((NCformat *)U->Store)->nzval = Glu.ucol; - ((NCformat *)U->Store)->rowind = Glu.usub; - ((NCformat *)U->Store)->colptr = Glu.xusub; - } else { - cCreate_SuperNode_Matrix(L, A->nrow, min_mn, nnzL, Glu.lusup, - Glu.xlusup, Glu.lsub, Glu.xlsub, Glu.supno, - Glu.xsup, SLU_SC, SLU_C, SLU_TRLU); - cCreate_CompCol_Matrix(U, min_mn, min_mn, nnzU, Glu.ucol, - Glu.usub, Glu.xusub, SLU_NC, SLU_C, SLU_TRU); - } - - ops[FACT] += ops[TRSV] + ops[GEMV]; - - if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r); - SUPERLU_FREE (iperm_c); - SUPERLU_FREE (relax_end); - SUPERLU_FREE (swap); - SUPERLU_FREE (iswap); - SUPERLU_FREE (relax_fsupc); - SUPERLU_FREE (amax); - if ( iwork2 ) SUPERLU_FREE (iwork2); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsrfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsrfs.c deleted file mode 100644 index 2a1e9c499c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgsrfs.c +++ /dev/null @@ -1,460 +0,0 @@ - -/*! @file cgsrfs.c - * \brief Improves computed solution to a system of inear equations - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Modified from lapack routine CGERFS
    - * 
    - */ -/* - * File name: cgsrfs.c - * History: Modified from lapack routine CGERFS - */ -#include -#include "slu_cdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   CGSRFS improves the computed solution to a system of linear   
    - *   equations and provides error bounds and backward error estimates for 
    - *   the solution.   
    - *
    - *   If equilibration was performed, the system becomes:
    - *           (diag(R)*A_original*diag(C)) * X = diag(R)*B_original.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *   
    - *   A       (input) SuperMatrix*
    - *           The original matrix A in the system, or the scaled A if
    - *           equilibration was done. The type of A can be:
    - *           Stype = SLU_NC, Dtype = SLU_C, Mtype = SLU_GE.
    - *    
    - *   L       (input) SuperMatrix*
    - *	     The factor L from the factorization Pr*A*Pc=L*U. Use
    - *           compressed row subscripts storage for supernodes, 
    - *           i.e., L has types: Stype = SLU_SC, Dtype = SLU_C, Mtype = SLU_TRLU.
    - * 
    - *   U       (input) SuperMatrix*
    - *           The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *           cgstrf(). Use column-wise storage scheme, 
    - *           i.e., U has types: Stype = SLU_NC, Dtype = SLU_C, Mtype = SLU_TRU.
    - *
    - *   perm_c  (input) int*, dimension (A->ncol)
    - *	     Column permutation vector, which defines the 
    - *           permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *           in position j in A*Pc.
    - *
    - *   perm_r  (input) int*, dimension (A->nrow)
    - *           Row permutation vector, which defines the permutation matrix Pr;
    - *           perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - *   equed   (input) Specifies the form of equilibration that was done.
    - *           = 'N': No equilibration.
    - *           = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *           = 'C': Column equilibration, i.e., A was postmultiplied by
    - *                  diag(C).
    - *           = 'B': Both row and column equilibration, i.e., A was replaced 
    - *                  by diag(R)*A*diag(C).
    - *
    - *   R       (input) float*, dimension (A->nrow)
    - *           The row scale factors for A.
    - *           If equed = 'R' or 'B', A is premultiplied by diag(R).
    - *           If equed = 'N' or 'C', R is not accessed.
    - * 
    - *   C       (input) float*, dimension (A->ncol)
    - *           The column scale factors for A.
    - *           If equed = 'C' or 'B', A is postmultiplied by diag(C).
    - *           If equed = 'N' or 'R', C is not accessed.
    - *
    - *   B       (input) SuperMatrix*
    - *           B has types: Stype = SLU_DN, Dtype = SLU_C, Mtype = SLU_GE.
    - *           The right hand side matrix B.
    - *           if equed = 'R' or 'B', B is premultiplied by diag(R).
    - *
    - *   X       (input/output) SuperMatrix*
    - *           X has types: Stype = SLU_DN, Dtype = SLU_C, Mtype = SLU_GE.
    - *           On entry, the solution matrix X, as computed by cgstrs().
    - *           On exit, the improved solution matrix X.
    - *           if *equed = 'C' or 'B', X should be premultiplied by diag(C)
    - *               in order to obtain the solution to the original system.
    - *
    - *   FERR    (output) float*, dimension (B->ncol)   
    - *           The estimated forward error bound for each solution vector   
    - *           X(j) (the j-th column of the solution matrix X).   
    - *           If XTRUE is the true solution corresponding to X(j), FERR(j) 
    - *           is an estimated upper bound for the magnitude of the largest 
    - *           element in (X(j) - XTRUE) divided by the magnitude of the   
    - *           largest element in X(j).  The estimate is as reliable as   
    - *           the estimate for RCOND, and is almost always a slight   
    - *           overestimate of the true error.
    - *
    - *   BERR    (output) float*, dimension (B->ncol)   
    - *           The componentwise relative backward error of each solution   
    - *           vector X(j) (i.e., the smallest relative change in   
    - *           any element of A or B that makes X(j) an exact solution).
    - *
    - *   stat     (output) SuperLUStat_t*
    - *            Record the statistics on runtime and floating-point operation count.
    - *            See util.h for the definition of 'SuperLUStat_t'.
    - *
    - *   info    (output) int*   
    - *           = 0:  successful exit   
    - *            < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *
    - *    Internal Parameters   
    - *    ===================   
    - *
    - *    ITMAX is the maximum number of steps of iterative refinement.   
    - *
    - * 
    - */ -void -cgsrfs(trans_t trans, SuperMatrix *A, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, char *equed, float *R, float *C, - SuperMatrix *B, SuperMatrix *X, float *ferr, float *berr, - SuperLUStat_t *stat, int *info) -{ - - -#define ITMAX 5 - - /* Table of constant values */ - int ione = 1; - complex ndone = {-1., 0.}; - complex done = {1., 0.}; - - /* Local variables */ - NCformat *Astore; - complex *Aval; - SuperMatrix Bjcol; - DNformat *Bstore, *Xstore, *Bjcol_store; - complex *Bmat, *Xmat, *Bptr, *Xptr; - int kase; - float safe1, safe2; - int i, j, k, irow, nz, count, notran, rowequ, colequ; - int ldb, ldx, nrhs; - float s, xk, lstres, eps, safmin; - char transc[1]; - trans_t transt; - complex *work; - float *rwork; - int *iwork; - extern double slamch_(char *); - extern int clacon_(int *, complex *, complex *, float *, int *); -#ifdef _CRAY - extern int CCOPY(int *, complex *, int *, complex *, int *); - extern int CSAXPY(int *, complex *, complex *, int *, complex *, int *); -#else - extern int ccopy_(int *, complex *, int *, complex *, int *); - extern int caxpy_(int *, complex *, complex *, int *, complex *, int *); -#endif - - Astore = A->Store; - Aval = Astore->nzval; - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - /* Test the input parameters */ - *info = 0; - notran = (trans == NOTRANS); - if ( !notran && trans != TRANS && trans != CONJ ) *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - A->Stype != SLU_NC || A->Dtype != SLU_C || A->Mtype != SLU_GE ) - *info = -2; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_C || L->Mtype != SLU_TRLU ) - *info = -3; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_C || U->Mtype != SLU_TRU ) - *info = -4; - else if ( ldb < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_C || B->Mtype != SLU_GE ) - *info = -10; - else if ( ldx < SUPERLU_MAX(0, A->nrow) || - X->Stype != SLU_DN || X->Dtype != SLU_C || X->Mtype != SLU_GE ) - *info = -11; - if (*info != 0) { - i = -(*info); - xerbla_("cgsrfs", &i); - return; - } - - /* Quick return if possible */ - if ( A->nrow == 0 || nrhs == 0) { - for (j = 0; j < nrhs; ++j) { - ferr[j] = 0.; - berr[j] = 0.; - } - return; - } - - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - - /* Allocate working space */ - work = complexMalloc(2*A->nrow); - rwork = (float *) SUPERLU_MALLOC( A->nrow * sizeof(float) ); - iwork = intMalloc(A->nrow); - if ( !work || !rwork || !iwork ) - ABORT("Malloc fails for work/rwork/iwork."); - - if ( notran ) { - *(unsigned char *)transc = 'N'; - transt = TRANS; - } else { - *(unsigned char *)transc = 'T'; - transt = NOTRANS; - } - - /* NZ = maximum number of nonzero elements in each row of A, plus 1 */ - nz = A->ncol + 1; - eps = slamch_("Epsilon"); - safmin = slamch_("Safe minimum"); - /* Set SAFE1 essentially to be the underflow threshold times the - number of additions in each row. */ - safe1 = nz * safmin; - safe2 = safe1 / eps; - - /* Compute the number of nonzeros in each row (or column) of A */ - for (i = 0; i < A->nrow; ++i) iwork[i] = 0; - if ( notran ) { - for (k = 0; k < A->ncol; ++k) - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - ++iwork[Astore->rowind[i]]; - } else { - for (k = 0; k < A->ncol; ++k) - iwork[k] = Astore->colptr[k+1] - Astore->colptr[k]; - } - - /* Copy one column of RHS B into Bjcol. */ - Bjcol.Stype = B->Stype; - Bjcol.Dtype = B->Dtype; - Bjcol.Mtype = B->Mtype; - Bjcol.nrow = B->nrow; - Bjcol.ncol = 1; - Bjcol.Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) ); - if ( !Bjcol.Store ) ABORT("SUPERLU_MALLOC fails for Bjcol.Store"); - Bjcol_store = Bjcol.Store; - Bjcol_store->lda = ldb; - Bjcol_store->nzval = work; /* address aliasing */ - - /* Do for each right hand side ... */ - for (j = 0; j < nrhs; ++j) { - count = 0; - lstres = 3.; - Bptr = &Bmat[j*ldb]; - Xptr = &Xmat[j*ldx]; - - while (1) { /* Loop until stopping criterion is satisfied. */ - - /* Compute residual R = B - op(A) * X, - where op(A) = A, A**T, or A**H, depending on TRANS. */ - -#ifdef _CRAY - CCOPY(&A->nrow, Bptr, &ione, work, &ione); -#else - ccopy_(&A->nrow, Bptr, &ione, work, &ione); -#endif - sp_cgemv(transc, ndone, A, Xptr, ione, done, work, ione); - - /* Compute componentwise relative backward error from formula - max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) - where abs(Z) is the componentwise absolute value of the matrix - or vector Z. If the i-th component of the denominator is less - than SAFE2, then SAFE1 is added to the i-th component of the - numerator before dividing. */ - - for (i = 0; i < A->nrow; ++i) rwork[i] = slu_c_abs1( &Bptr[i] ); - - /* Compute abs(op(A))*abs(X) + abs(B). */ - if (notran) { - for (k = 0; k < A->ncol; ++k) { - xk = slu_c_abs1( &Xptr[k] ); - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - rwork[Astore->rowind[i]] += slu_c_abs1(&Aval[i]) * xk; - } - } else { - for (k = 0; k < A->ncol; ++k) { - s = 0.; - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) { - irow = Astore->rowind[i]; - s += slu_c_abs1(&Aval[i]) * slu_c_abs1(&Xptr[irow]); - } - rwork[k] += s; - } - } - s = 0.; - for (i = 0; i < A->nrow; ++i) { - if (rwork[i] > safe2) { - s = SUPERLU_MAX( s, slu_c_abs1(&work[i]) / rwork[i] ); - } else if ( rwork[i] != 0.0 ) { - s = SUPERLU_MAX( s, (slu_c_abs1(&work[i]) + safe1) / rwork[i] ); - } - /* If rwork[i] is exactly 0.0, then we know the true - residual also must be exactly 0.0. */ - } - berr[j] = s; - - /* Test stopping criterion. Continue iterating if - 1) The residual BERR(J) is larger than machine epsilon, and - 2) BERR(J) decreased by at least a factor of 2 during the - last iteration, and - 3) At most ITMAX iterations tried. */ - - if (berr[j] > eps && berr[j] * 2. <= lstres && count < ITMAX) { - /* Update solution and try again. */ - cgstrs (trans, L, U, perm_c, perm_r, &Bjcol, stat, info); - -#ifdef _CRAY - CAXPY(&A->nrow, &done, work, &ione, - &Xmat[j*ldx], &ione); -#else - caxpy_(&A->nrow, &done, work, &ione, - &Xmat[j*ldx], &ione); -#endif - lstres = berr[j]; - ++count; - } else { - break; - } - - } /* end while */ - - stat->RefineSteps = count; - - /* Bound error from formula: - norm(X - XTRUE) / norm(X) .le. FERR = norm( abs(inv(op(A)))* - ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) - where - norm(Z) is the magnitude of the largest component of Z - inv(op(A)) is the inverse of op(A) - abs(Z) is the componentwise absolute value of the matrix or - vector Z - NZ is the maximum number of nonzeros in any row of A, plus 1 - EPS is machine epsilon - - The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) - is incremented by SAFE1 if the i-th component of - abs(op(A))*abs(X) + abs(B) is less than SAFE2. - - Use CLACON to estimate the infinity-norm of the matrix - inv(op(A)) * diag(W), - where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */ - - for (i = 0; i < A->nrow; ++i) rwork[i] = slu_c_abs1( &Bptr[i] ); - - /* Compute abs(op(A))*abs(X) + abs(B). */ - if ( notran ) { - for (k = 0; k < A->ncol; ++k) { - xk = slu_c_abs1( &Xptr[k] ); - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - rwork[Astore->rowind[i]] += slu_c_abs1(&Aval[i]) * xk; - } - } else { - for (k = 0; k < A->ncol; ++k) { - s = 0.; - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) { - irow = Astore->rowind[i]; - xk = slu_c_abs1( &Xptr[irow] ); - s += slu_c_abs1(&Aval[i]) * xk; - } - rwork[k] += s; - } - } - - for (i = 0; i < A->nrow; ++i) - if (rwork[i] > safe2) - rwork[i] = slu_c_abs(&work[i]) + (iwork[i]+1)*eps*rwork[i]; - else - rwork[i] = slu_c_abs(&work[i])+(iwork[i]+1)*eps*rwork[i]+safe1; - kase = 0; - - do { - clacon_(&A->nrow, &work[A->nrow], work, - &ferr[j], &kase); - if (kase == 0) break; - - if (kase == 1) { - /* Multiply by diag(W)*inv(op(A)**T)*(diag(C) or diag(R)). */ - if ( notran && colequ ) - for (i = 0; i < A->ncol; ++i) { - cs_mult(&work[i], &work[i], C[i]); - } - else if ( !notran && rowequ ) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&work[i], &work[i], R[i]); - } - - cgstrs (transt, L, U, perm_c, perm_r, &Bjcol, stat, info); - - for (i = 0; i < A->nrow; ++i) { - cs_mult(&work[i], &work[i], rwork[i]); - } - } else { - /* Multiply by (diag(C) or diag(R))*inv(op(A))*diag(W). */ - for (i = 0; i < A->nrow; ++i) { - cs_mult(&work[i], &work[i], rwork[i]); - } - - cgstrs (trans, L, U, perm_c, perm_r, &Bjcol, stat, info); - - if ( notran && colequ ) - for (i = 0; i < A->ncol; ++i) { - cs_mult(&work[i], &work[i], C[i]); - } - else if ( !notran && rowequ ) - for (i = 0; i < A->ncol; ++i) { - cs_mult(&work[i], &work[i], R[i]); - } - } - - } while ( kase != 0 ); - - /* Normalize error. */ - lstres = 0.; - if ( notran && colequ ) { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, C[i] * slu_c_abs1( &Xptr[i]) ); - } else if ( !notran && rowequ ) { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, R[i] * slu_c_abs1( &Xptr[i]) ); - } else { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, slu_c_abs1( &Xptr[i]) ); - } - if ( lstres != 0. ) - ferr[j] /= lstres; - - } /* for each RHS j ... */ - - SUPERLU_FREE(work); - SUPERLU_FREE(rwork); - SUPERLU_FREE(iwork); - SUPERLU_FREE(Bjcol.Store); - - return; - -} /* cgsrfs */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgssv.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgssv.c deleted file mode 100644 index 07f4784a82..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgssv.c +++ /dev/null @@ -1,227 +0,0 @@ - -/*! @file cgssv.c - * \brief Solves the system of linear equations A*X=B - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * CGSSV solves the system of linear equations A*X=B, using the
    - * LU factorization from CGSTRF. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *
    - *      1.1. Permute the columns of A, forming A*Pc, where Pc
    - *           is a permutation matrix. For more details of this step, 
    - *           see sp_preorder.c.
    - *
    - *      1.2. Factor A as Pr*A*Pc=L*U with the permutation Pr determined
    - *           by Gaussian elimination with partial pivoting.
    - *           L is unit lower triangular with offdiagonal entries
    - *           bounded by 1 in magnitude, and U is upper triangular.
    - *
    - *      1.3. Solve the system of equations A*X=B using the factored
    - *           form of A.
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the
    - *      above algorithm to the transpose of A:
    - *
    - *      2.1. Permute columns of transpose(A) (rows of A),
    - *           forming transpose(A)*Pc, where Pc is a permutation matrix. 
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      2.2. Factor A as Pr*transpose(A)*Pc=L*U with the permutation Pr
    - *           determined by Gaussian elimination with partial pivoting.
    - *           L is unit lower triangular with offdiagonal entries
    - *           bounded by 1 in magnitude, and U is upper triangular.
    - *
    - *      2.3. Solve the system of equations A*X=B using the factored
    - *           form of A.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - * 
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed and how the
    - *         system will be solved.
    - *
    - * A       (input) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = SLU_NC or SLU_NR; Dtype = SLU_C; Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - * perm_c  (input/output) int*
    - *         If A->Stype = SLU_NC, column permutation vector of size A->ncol
    - *         which defines the permutation matrix Pc; perm_c[i] = j means 
    - *         column i of A is in position j in A*Pc.
    - *         If A->Stype = SLU_NR, column permutation vector of size A->nrow
    - *         which describes permutation of columns of transpose(A) 
    - *         (rows of A) as described above.
    - * 
    - *         If options->ColPerm = MY_PERMC or options->Fact = SamePattern or
    - *            options->Fact = SamePattern_SameRowPerm, it is an input argument.
    - *            On exit, perm_c may be overwritten by the product of the input
    - *            perm_c and a permutation that postorders the elimination tree
    - *            of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *            is already in postorder.
    - *         Otherwise, it is an output argument.
    - * 
    - * perm_r  (input/output) int*
    - *         If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *         which defines the permutation matrix Pr, and is determined 
    - *         by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *         position j in Pr*A.
    - *         If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *         determines permutation of rows of transpose(A)
    - *         (columns of A) as described above.
    - *
    - *         If options->RowPerm = MY_PERMR or
    - *            options->Fact = SamePattern_SameRowPerm, perm_r is an
    - *            input argument.
    - *         otherwise it is an output argument.
    - *
    - * L       (output) SuperMatrix*
    - *         The factor L from the factorization 
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses compressed row subscripts storage for supernodes, i.e.,
    - *         L has types: Stype = SLU_SC, Dtype = SLU_C, Mtype = SLU_TRLU.
    - *         
    - * U       (output) SuperMatrix*
    - *	   The factor U from the factorization 
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_C, Mtype = SLU_TRU.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_C, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat   (output) SuperLUStat_t*
    - *        Record the statistics on runtime and floating-point operation count.
    - *        See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *	   = 0: successful exit
    - *         > 0: if info = i, and i is
    - *             <= A->ncol: U(i,i) is exactly zero. The factorization has
    - *                been completed, but the factor U is exactly singular,
    - *                so the solution could not be computed.
    - *             > A->ncol: number of bytes allocated when memory allocation
    - *                failure occurred, plus A->ncol.
    - * 
    - */ - -void -cgssv(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - SuperMatrix *L, SuperMatrix *U, SuperMatrix *B, - SuperLUStat_t *stat, int *info ) -{ - - DNformat *Bstore; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int lwork = 0, *etree, i; - - /* Set default values for some parameters */ - int panel_size; /* panel size */ - int relax; /* no of columns in a relaxed snodes */ - int permc_spec; - trans_t trans = NOTRANS; - double *utime; - double t; /* Temporary time */ - - /* Test the input parameters ... */ - *info = 0; - Bstore = B->Store; - if ( options->Fact != DOFACT ) *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_C || A->Mtype != SLU_GE ) - *info = -2; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_C || B->Mtype != SLU_GE ) - *info = -7; - if ( *info != 0 ) { - i = -(*info); - xerbla_("cgssv", &i); - return; - } - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - cCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - trans = TRANS; - } else { - if ( A->Stype == SLU_NC ) AA = A; - } - - t = SuperLU_timer_(); - /* - * Get column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t; - - etree = intMalloc(A->ncol); - - t = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t; - - panel_size = sp_ienv(1); - relax = sp_ienv(2); - - /*printf("Factor PA = LU ... relax %d\tw %d\tmaxsuper %d\trowblk %d\n", - relax, panel_size, sp_ienv(3), sp_ienv(4));*/ - t = SuperLU_timer_(); - /* Compute the LU factorization of A. */ - cgstrf(options, &AC, relax, panel_size, etree, - NULL, lwork, perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t; - - t = SuperLU_timer_(); - if ( *info == 0 ) { - /* Solve the system A*X=B, overwriting B with X. */ - cgstrs (trans, L, U, perm_c, perm_r, B, stat, info); - } - utime[SOLVE] = SuperLU_timer_() - t; - - SUPERLU_FREE (etree); - Destroy_CompCol_Permuted(&AC); - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgssvx.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgssvx.c deleted file mode 100644 index 428ecb0be8..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgssvx.c +++ /dev/null @@ -1,619 +0,0 @@ - -/*! @file cgssvx.c - * \brief Solves the system of linear equations A*X=B or A'*X=B - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * CGSSVX solves the system of linear equations A*X=B or A'*X=B, using
    - * the LU factorization from cgstrf(). Error bounds on the solution and
    - * a condition estimate are also provided. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *  
    - *      1.1. If options->Equil = YES, scaling factors are computed to
    - *           equilibrate the system:
    - *           options->Trans = NOTRANS:
    - *               diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *           options->Trans = TRANS:
    - *               (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *           options->Trans = CONJ:
    - *               (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *           Whether or not the system will be equilibrated depends on the
    - *           scaling of the matrix A, but if equilibration is used, A is
    - *           overwritten by diag(R)*A*diag(C) and B by diag(R)*B
    - *           (if options->Trans=NOTRANS) or diag(C)*B (if options->Trans
    - *           = TRANS or CONJ).
    - *
    - *      1.2. Permute columns of A, forming A*Pc, where Pc is a permutation
    - *           matrix that usually preserves sparsity.
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      1.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *           factor the matrix A (after equilibration if options->Equil = YES)
    - *           as Pr*A*Pc = L*U, with Pr determined by partial pivoting.
    - *
    - *      1.4. Compute the reciprocal pivot growth factor.
    - *
    - *      1.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *           routine returns with info = i. Otherwise, the factored form of 
    - *           A is used to estimate the condition number of the matrix A. If
    - *           the reciprocal of the condition number is less than machine
    - *           precision, info = A->ncol+1 is returned as a warning, but the
    - *           routine still goes on to solve for X and computes error bounds
    - *           as described below.
    - *
    - *      1.6. The system of equations is solved for X using the factored form
    - *           of A.
    - *
    - *      1.7. If options->IterRefine != NOREFINE, iterative refinement is
    - *           applied to improve the computed solution matrix and calculate
    - *           error bounds and backward error estimates for it.
    - *
    - *      1.8. If equilibration was used, the matrix X is premultiplied by
    - *           diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *           (if options->Trans = TRANS or CONJ) so that it solves the
    - *           original system before equilibration.
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the above algorithm
    - *      to the transpose of A:
    - *
    - *      2.1. If options->Equil = YES, scaling factors are computed to
    - *           equilibrate the system:
    - *           options->Trans = NOTRANS:
    - *               diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *           options->Trans = TRANS:
    - *               (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *           options->Trans = CONJ:
    - *               (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *           Whether or not the system will be equilibrated depends on the
    - *           scaling of the matrix A, but if equilibration is used, A' is
    - *           overwritten by diag(R)*A'*diag(C) and B by diag(R)*B 
    - *           (if trans='N') or diag(C)*B (if trans = 'T' or 'C').
    - *
    - *      2.2. Permute columns of transpose(A) (rows of A), 
    - *           forming transpose(A)*Pc, where Pc is a permutation matrix that 
    - *           usually preserves sparsity.
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      2.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *           factor the transpose(A) (after equilibration if 
    - *           options->Fact = YES) as Pr*transpose(A)*Pc = L*U with the
    - *           permutation Pr determined by partial pivoting.
    - *
    - *      2.4. Compute the reciprocal pivot growth factor.
    - *
    - *      2.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *           routine returns with info = i. Otherwise, the factored form 
    - *           of transpose(A) is used to estimate the condition number of the
    - *           matrix A. If the reciprocal of the condition number
    - *           is less than machine precision, info = A->nrow+1 is returned as
    - *           a warning, but the routine still goes on to solve for X and
    - *           computes error bounds as described below.
    - *
    - *      2.6. The system of equations is solved for X using the factored form
    - *           of transpose(A).
    - *
    - *      2.7. If options->IterRefine != NOREFINE, iterative refinement is
    - *           applied to improve the computed solution matrix and calculate
    - *           error bounds and backward error estimates for it.
    - *
    - *      2.8. If equilibration was used, the matrix X is premultiplied by
    - *           diag(C) (if options->Trans = NOTRANS) or diag(R) 
    - *           (if options->Trans = TRANS or CONJ) so that it solves the
    - *           original system before equilibration.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed and how the
    - *         system will be solved.
    - *
    - * A       (input/output) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of the linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = SLU_NC or SLU_NR, Dtype = SLU_D, Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - *         On entry, If options->Fact = FACTORED and equed is not 'N', 
    - *         then A must have been equilibrated by the scaling factors in
    - *         R and/or C.  
    - *         On exit, A is not modified if options->Equil = NO, or if 
    - *         options->Equil = YES but equed = 'N' on exit.
    - *         Otherwise, if options->Equil = YES and equed is not 'N',
    - *         A is scaled as follows:
    - *         If A->Stype = SLU_NC:
    - *           equed = 'R':  A := diag(R) * A
    - *           equed = 'C':  A := A * diag(C)
    - *           equed = 'B':  A := diag(R) * A * diag(C).
    - *         If A->Stype = SLU_NR:
    - *           equed = 'R':  transpose(A) := diag(R) * transpose(A)
    - *           equed = 'C':  transpose(A) := transpose(A) * diag(C)
    - *           equed = 'B':  transpose(A) := diag(R) * transpose(A) * diag(C).
    - *
    - * perm_c  (input/output) int*
    - *	   If A->Stype = SLU_NC, Column permutation vector of size A->ncol,
    - *         which defines the permutation matrix Pc; perm_c[i] = j means
    - *         column i of A is in position j in A*Pc.
    - *         On exit, perm_c may be overwritten by the product of the input
    - *         perm_c and a permutation that postorders the elimination tree
    - *         of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *         is already in postorder.
    - *
    - *         If A->Stype = SLU_NR, column permutation vector of size A->nrow,
    - *         which describes permutation of columns of transpose(A) 
    - *         (rows of A) as described above.
    - * 
    - * perm_r  (input/output) int*
    - *         If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *         which defines the permutation matrix Pr, and is determined
    - *         by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *         position j in Pr*A.
    - *
    - *         If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *         determines permutation of rows of transpose(A)
    - *         (columns of A) as described above.
    - *
    - *         If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *         will try to use the input perm_r, unless a certain threshold
    - *         criterion is violated. In that case, perm_r is overwritten by a
    - *         new permutation determined by partial pivoting or diagonal
    - *         threshold pivoting.
    - *         Otherwise, perm_r is output argument.
    - * 
    - * etree   (input/output) int*,  dimension (A->ncol)
    - *         Elimination tree of Pc'*A'*A*Pc.
    - *         If options->Fact != FACTORED and options->Fact != DOFACT,
    - *         etree is an input argument, otherwise it is an output argument.
    - *         Note: etree is a vector of parent pointers for a forest whose
    - *         vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * equed   (input/output) char*
    - *         Specifies the form of equilibration that was done.
    - *         = 'N': No equilibration.
    - *         = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *         = 'C': Column equilibration, i.e., A was postmultiplied by diag(C).
    - *         = 'B': Both row and column equilibration, i.e., A was replaced 
    - *                by diag(R)*A*diag(C).
    - *         If options->Fact = FACTORED, equed is an input argument,
    - *         otherwise it is an output argument.
    - *
    - * R       (input/output) float*, dimension (A->nrow)
    - *         The row scale factors for A or transpose(A).
    - *         If equed = 'R' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *             (if A->Stype = SLU_NR) is multiplied on the left by diag(R).
    - *         If equed = 'N' or 'C', R is not accessed.
    - *         If options->Fact = FACTORED, R is an input argument,
    - *             otherwise, R is output.
    - *         If options->zFact = FACTORED and equed = 'R' or 'B', each element
    - *             of R must be positive.
    - * 
    - * C       (input/output) float*, dimension (A->ncol)
    - *         The column scale factors for A or transpose(A).
    - *         If equed = 'C' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *             (if A->Stype = SLU_NR) is multiplied on the right by diag(C).
    - *         If equed = 'N' or 'R', C is not accessed.
    - *         If options->Fact = FACTORED, C is an input argument,
    - *             otherwise, C is output.
    - *         If options->Fact = FACTORED and equed = 'C' or 'B', each element
    - *             of C must be positive.
    - *         
    - * L       (output) SuperMatrix*
    - *	   The factor L from the factorization
    - *             Pr*A*Pc=L*U              (if A->Stype SLU_= NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses compressed row subscripts storage for supernodes, i.e.,
    - *         L has types: Stype = SLU_SC, Dtype = SLU_C, Mtype = SLU_TRLU.
    - *
    - * U       (output) SuperMatrix*
    - *	   The factor U from the factorization
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_C, Mtype = SLU_TRU.
    - *
    - * work    (workspace/output) void*, size (lwork) (in bytes)
    - *         User supplied workspace, should be large enough
    - *         to hold data structures for factors L and U.
    - *         On exit, if fact is not 'F', L and U point to this array.
    - *
    - * lwork   (input) int
    - *         Specifies the size of work array in bytes.
    - *         = 0:  allocate space internally by system malloc;
    - *         > 0:  use user-supplied work array of length lwork in bytes,
    - *               returns error if space runs out.
    - *         = -1: the routine guesses the amount of space needed without
    - *               performing the factorization, and returns it in
    - *               mem_usage->total_needed; no other side effects.
    - *
    - *         See argument 'mem_usage' for memory usage statistics.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_C, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         If B->ncol = 0, only LU decomposition is performed, the triangular
    - *                         solve is skipped.
    - *         On exit,
    - *            if equed = 'N', B is not modified; otherwise
    - *            if A->Stype = SLU_NC:
    - *               if options->Trans = NOTRANS and equed = 'R' or 'B',
    - *                  B is overwritten by diag(R)*B;
    - *               if options->Trans = TRANS or CONJ and equed = 'C' of 'B',
    - *                  B is overwritten by diag(C)*B;
    - *            if A->Stype = SLU_NR:
    - *               if options->Trans = NOTRANS and equed = 'C' or 'B',
    - *                  B is overwritten by diag(C)*B;
    - *               if options->Trans = TRANS or CONJ and equed = 'R' of 'B',
    - *                  B is overwritten by diag(R)*B.
    - *
    - * X       (output) SuperMatrix*
    - *         X has types: Stype = SLU_DN, Dtype = SLU_C, Mtype = SLU_GE. 
    - *         If info = 0 or info = A->ncol+1, X contains the solution matrix
    - *         to the original system of equations. Note that A and B are modified
    - *         on exit if equed is not 'N', and the solution to the equilibrated
    - *         system is inv(diag(C))*X if options->Trans = NOTRANS and
    - *         equed = 'C' or 'B', or inv(diag(R))*X if options->Trans = 'T' or 'C'
    - *         and equed = 'R' or 'B'.
    - *
    - * recip_pivot_growth (output) float*
    - *         The reciprocal pivot growth factor max_j( norm(A_j)/norm(U_j) ).
    - *         The infinity norm is used. If recip_pivot_growth is much less
    - *         than 1, the stability of the LU factorization could be poor.
    - *
    - * rcond   (output) float*
    - *         The estimate of the reciprocal condition number of the matrix A
    - *         after equilibration (if done). If rcond is less than the machine
    - *         precision (in particular, if rcond = 0), the matrix is singular
    - *         to working precision. This condition is indicated by a return
    - *         code of info > 0.
    - *
    - * FERR    (output) float*, dimension (B->ncol)   
    - *         The estimated forward error bound for each solution vector   
    - *         X(j) (the j-th column of the solution matrix X).   
    - *         If XTRUE is the true solution corresponding to X(j), FERR(j) 
    - *         is an estimated upper bound for the magnitude of the largest 
    - *         element in (X(j) - XTRUE) divided by the magnitude of the   
    - *         largest element in X(j).  The estimate is as reliable as   
    - *         the estimate for RCOND, and is almost always a slight   
    - *         overestimate of the true error.
    - *         If options->IterRefine = NOREFINE, ferr = 1.0.
    - *
    - * BERR    (output) float*, dimension (B->ncol)
    - *         The componentwise relative backward error of each solution   
    - *         vector X(j) (i.e., the smallest relative change in   
    - *         any element of A or B that makes X(j) an exact solution).
    - *         If options->IterRefine = NOREFINE, berr = 1.0.
    - *
    - * mem_usage (output) mem_usage_t*
    - *         Record the memory usage statistics, consisting of following fields:
    - *         - for_lu (float)
    - *           The amount of space used in bytes for L\U data structures.
    - *         - total_needed (float)
    - *           The amount of space needed in bytes to perform factorization.
    - *         - expansions (int)
    - *           The number of memory expansions during the LU factorization.
    - *
    - * stat   (output) SuperLUStat_t*
    - *        Record the statistics on runtime and floating-point operation count.
    - *        See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *         = 0: successful exit   
    - *         < 0: if info = -i, the i-th argument had an illegal value   
    - *         > 0: if info = i, and i is   
    - *              <= A->ncol: U(i,i) is exactly zero. The factorization has   
    - *                    been completed, but the factor U is exactly   
    - *                    singular, so the solution and error bounds   
    - *                    could not be computed.   
    - *              = A->ncol+1: U is nonsingular, but RCOND is less than machine
    - *                    precision, meaning that the matrix is singular to
    - *                    working precision. Nevertheless, the solution and
    - *                    error bounds are computed because there are a number
    - *                    of situations where the computed solution can be more
    - *                    accurate than the value of RCOND would suggest.   
    - *              > A->ncol+1: number of bytes allocated when memory allocation
    - *                    failure occurred, plus A->ncol.
    - * 
    - */ - -void -cgssvx(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - int *etree, char *equed, float *R, float *C, - SuperMatrix *L, SuperMatrix *U, void *work, int lwork, - SuperMatrix *B, SuperMatrix *X, float *recip_pivot_growth, - float *rcond, float *ferr, float *berr, - mem_usage_t *mem_usage, SuperLUStat_t *stat, int *info ) -{ - - - DNformat *Bstore, *Xstore; - complex *Bmat, *Xmat; - int ldb, ldx, nrhs; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int colequ, equil, nofact, notran, rowequ, permc_spec; - trans_t trant; - char norm[1]; - int i, j, info1; - float amax, anorm, bignum, smlnum, colcnd, rowcnd, rcmax, rcmin; - int relax, panel_size; - float diag_pivot_thresh; - double t0; /* temporary time */ - double *utime; - - /* External functions */ - extern float clangs(char *, SuperMatrix *); - - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - *info = 0; - nofact = (options->Fact != FACTORED); - equil = (options->Equil == YES); - notran = (options->Trans == NOTRANS); - if ( nofact ) { - *(unsigned char *)equed = 'N'; - rowequ = FALSE; - colequ = FALSE; - } else { - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - smlnum = slamch_("Safe minimum"); - bignum = 1. / smlnum; - } - -#if 0 -printf("dgssvx: Fact=%4d, Trans=%4d, equed=%c\n", - options->Fact, options->Trans, *equed); -#endif - - /* Test the input parameters */ - if (!nofact && options->Fact != DOFACT && options->Fact != SamePattern && - options->Fact != SamePattern_SameRowPerm && - !notran && options->Trans != TRANS && options->Trans != CONJ && - !equil && options->Equil != NO) - *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_C || A->Mtype != SLU_GE ) - *info = -2; - else if (options->Fact == FACTORED && - !(rowequ || colequ || lsame_(equed, "N"))) - *info = -6; - else { - if (rowequ) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, R[j]); - rcmax = SUPERLU_MAX(rcmax, R[j]); - } - if (rcmin <= 0.) *info = -7; - else if ( A->nrow > 0) - rowcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else rowcnd = 1.; - } - if (colequ && *info == 0) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, C[j]); - rcmax = SUPERLU_MAX(rcmax, C[j]); - } - if (rcmin <= 0.) *info = -8; - else if (A->nrow > 0) - colcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else colcnd = 1.; - } - if (*info == 0) { - if ( lwork < -1 ) *info = -12; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_C || - B->Mtype != SLU_GE ) - *info = -13; - else if ( X->ncol < 0 || Xstore->lda < SUPERLU_MAX(0, A->nrow) || - (B->ncol != 0 && B->ncol != X->ncol) || - X->Stype != SLU_DN || - X->Dtype != SLU_C || X->Mtype != SLU_GE ) - *info = -14; - } - } - if (*info != 0) { - i = -(*info); - xerbla_("cgssvx", &i); - return; - } - - /* Initialization for factor parameters */ - panel_size = sp_ienv(1); - relax = sp_ienv(2); - diag_pivot_thresh = options->DiagPivotThresh; - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - cCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - if ( notran ) { /* Reverse the transpose argument. */ - trant = TRANS; - notran = 0; - } else { - trant = NOTRANS; - notran = 1; - } - } else { /* A->Stype == SLU_NC */ - trant = options->Trans; - AA = A; - } - - if ( nofact && equil ) { - t0 = SuperLU_timer_(); - /* Compute row and column scalings to equilibrate the matrix A. */ - cgsequ(AA, R, C, &rowcnd, &colcnd, &amax, &info1); - - if ( info1 == 0 ) { - /* Equilibrate matrix A. */ - claqgs(AA, R, C, rowcnd, colcnd, amax, equed); - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Scale the right hand side if equilibration was performed. */ - if ( notran ) { - if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&Bmat[i+j*ldb], &Bmat[i+j*ldb], R[i]); - } - } - } else if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&Bmat[i+j*ldb], &Bmat[i+j*ldb], C[i]); - } - } - } - - if ( nofact ) { - - t0 = SuperLU_timer_(); - /* - * Gnet column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t0; - - t0 = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t0; - -/* printf("Factor PA = LU ... relax %d\tw %d\tmaxsuper %d\trowblk %d\n", - relax, panel_size, sp_ienv(3), sp_ienv(4)); - fflush(stdout); */ - - /* Compute the LU factorization of A*Pc. */ - t0 = SuperLU_timer_(); - cgstrf(options, &AC, relax, panel_size, etree, - work, lwork, perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t0; - - if ( lwork == -1 ) { - mem_usage->total_needed = *info - A->ncol; - return; - } - } - - if ( options->PivotGrowth ) { - if ( *info > 0 ) { - if ( *info <= A->ncol ) { - /* Compute the reciprocal pivot growth factor of the leading - rank-deficient *info columns of A. */ - *recip_pivot_growth = cPivotGrowth(*info, AA, perm_c, L, U); - } - return; - } - - /* Compute the reciprocal pivot growth factor *recip_pivot_growth. */ - *recip_pivot_growth = cPivotGrowth(A->ncol, AA, perm_c, L, U); - } - - if ( options->ConditionNumber ) { - /* Estimate the reciprocal of the condition number of A. */ - t0 = SuperLU_timer_(); - if ( notran ) { - *(unsigned char *)norm = '1'; - } else { - *(unsigned char *)norm = 'I'; - } - anorm = clangs(norm, AA); - cgscon(norm, L, U, anorm, rcond, stat, info); - utime[RCOND] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Compute the solution matrix X. */ - for (j = 0; j < nrhs; j++) /* Save a copy of the right hand sides */ - for (i = 0; i < B->nrow; i++) - Xmat[i + j*ldx] = Bmat[i + j*ldb]; - - t0 = SuperLU_timer_(); - cgstrs (trant, L, U, perm_c, perm_r, X, stat, info); - utime[SOLVE] = SuperLU_timer_() - t0; - - /* Use iterative refinement to improve the computed solution and compute - error bounds and backward error estimates for it. */ - t0 = SuperLU_timer_(); - if ( options->IterRefine != NOREFINE ) { - cgsrfs(trant, AA, L, U, perm_c, perm_r, equed, R, C, B, - X, ferr, berr, stat, info); - } else { - for (j = 0; j < nrhs; ++j) ferr[j] = berr[j] = 1.0; - } - utime[REFINE] = SuperLU_timer_() - t0; - - /* Transform the solution matrix X to a solution of the original system. */ - if ( notran ) { - if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&Xmat[i+j*ldx], &Xmat[i+j*ldx], C[i]); - } - } - } else if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - cs_mult(&Xmat[i+j*ldx], &Xmat[i+j*ldx], R[i]); - } - } - } /* end if nrhs > 0 */ - - if ( options->ConditionNumber ) { - /* Set INFO = A->ncol+1 if the matrix is singular to working precision. */ - if ( *rcond < slamch_("E") ) *info = A->ncol + 1; - } - - if ( nofact ) { - cQuerySpace(L, U, mem_usage); - Destroy_CompCol_Permuted(&AC); - } - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgstrf.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgstrf.c deleted file mode 100644 index d3c23daaf3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgstrf.c +++ /dev/null @@ -1,436 +0,0 @@ - -/*! @file cgstrf.c - * \brief Computes an LU factorization of a general sparse matrix - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * CGSTRF computes an LU factorization of a general sparse m-by-n
    - * matrix A using partial pivoting with row interchanges.
    - * The factorization has the form
    - *     Pr * A = L * U
    - * where Pr is a row permutation matrix, L is lower triangular with unit
    - * diagonal elements (lower trapezoidal if A->nrow > A->ncol), and U is upper 
    - * triangular (upper trapezoidal if A->nrow < A->ncol).
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed.
    - *
    - * A        (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *          (A->nrow, A->ncol). The type of A can be:
    - *          Stype = SLU_NCP; Dtype = SLU_C; Mtype = SLU_GE.
    - *
    - * relax    (input) int
    - *          To control degree of relaxing supernodes. If the number
    - *          of nodes (columns) in a subtree of the elimination tree is less
    - *          than relax, this subtree is considered as one supernode,
    - *          regardless of the row structures of those columns.
    - *
    - * panel_size (input) int
    - *          A panel consists of at most panel_size consecutive columns.
    - *
    - * etree    (input) int*, dimension (A->ncol)
    - *          Elimination tree of A'*A.
    - *          Note: etree is a vector of parent pointers for a forest whose
    - *          vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *          On input, the columns of A should be permuted so that the
    - *          etree is in a certain postorder.
    - *
    - * work     (input/output) void*, size (lwork) (in bytes)
    - *          User-supplied work space and space for the output data structures.
    - *          Not referenced if lwork = 0;
    - *
    - * lwork   (input) int
    - *         Specifies the size of work array in bytes.
    - *         = 0:  allocate space internally by system malloc;
    - *         > 0:  use user-supplied work array of length lwork in bytes,
    - *               returns error if space runs out.
    - *         = -1: the routine guesses the amount of space needed without
    - *               performing the factorization, and returns it in
    - *               *info; no other side effects.
    - *
    - * perm_c   (input) int*, dimension (A->ncol)
    - *	    Column permutation vector, which defines the 
    - *          permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *          in position j in A*Pc.
    - *          When searching for diagonal, perm_c[*] is applied to the
    - *          row subscripts of A, so that diagonal threshold pivoting
    - *          can find the diagonal of A, rather than that of A*Pc.
    - *
    - * perm_r   (input/output) int*, dimension (A->nrow)
    - *          Row permutation vector which defines the permutation matrix Pr,
    - *          perm_r[i] = j means row i of A is in position j in Pr*A.
    - *          If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *             will try to use the input perm_r, unless a certain threshold
    - *             criterion is violated. In that case, perm_r is overwritten by
    - *             a new permutation determined by partial pivoting or diagonal
    - *             threshold pivoting.
    - *          Otherwise, perm_r is output argument;
    - *
    - * L        (output) SuperMatrix*
    - *          The factor L from the factorization Pr*A=L*U; use compressed row 
    - *          subscripts storage for supernodes, i.e., L has type: 
    - *          Stype = SLU_SC, Dtype = SLU_C, Mtype = SLU_TRLU.
    - *
    - * U        (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *          storage scheme, i.e., U has types: Stype = SLU_NC, 
    - *          Dtype = SLU_C, Mtype = SLU_TRU.
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info     (output) int*
    - *          = 0: successful exit
    - *          < 0: if info = -i, the i-th argument had an illegal value
    - *          > 0: if info = i, and i is
    - *             <= A->ncol: U(i,i) is exactly zero. The factorization has
    - *                been completed, but the factor U is exactly singular,
    - *                and division by zero will occur if it is used to solve a
    - *                system of equations.
    - *             > A->ncol: number of bytes allocated when memory allocation
    - *                failure occurred, plus A->ncol. If lwork = -1, it is
    - *                the estimated amount of space needed, plus A->ncol.
    - *
    - * ======================================================================
    - *
    - * Local Working Arrays: 
    - * ======================
    - *   m = number of rows in the matrix
    - *   n = number of columns in the matrix
    - *
    - *   xprune[0:n-1]: xprune[*] points to locations in subscript 
    - *	vector lsub[*]. For column i, xprune[i] denotes the point where 
    - *	structural pruning begins. I.e. only xlsub[i],..,xprune[i]-1 need 
    - *	to be traversed for symbolic factorization.
    - *
    - *   marker[0:3*m-1]: marker[i] = j means that node i has been 
    - *	reached when working on column j.
    - *	Storage: relative to original row subscripts
    - *	NOTE: There are 3 of them: marker/marker1 are used for panel dfs, 
    - *	      see cpanel_dfs.c; marker2 is used for inner-factorization,
    - *            see ccolumn_dfs.c.
    - *
    - *   parent[0:m-1]: parent vector used during dfs
    - *      Storage: relative to new row subscripts
    - *
    - *   xplore[0:m-1]: xplore[i] gives the location of the next (dfs) 
    - *	unexplored neighbor of i in lsub[*]
    - *
    - *   segrep[0:nseg-1]: contains the list of supernodal representatives
    - *	in topological order of the dfs. A supernode representative is the 
    - *	last column of a supernode.
    - *      The maximum size of segrep[] is n.
    - *
    - *   repfnz[0:W*m-1]: for a nonzero segment U[*,j] that ends at a 
    - *	supernodal representative r, repfnz[r] is the location of the first 
    - *	nonzero in this segment.  It is also used during the dfs: repfnz[r]>0
    - *	indicates the supernode r has been explored.
    - *	NOTE: There are W of them, each used for one column of a panel. 
    - *
    - *   panel_lsub[0:W*m-1]: temporary for the nonzeros row indices below 
    - *      the panel diagonal. These are filled in during cpanel_dfs(), and are
    - *      used later in the inner LU factorization within the panel.
    - *	panel_lsub[]/dense[] pair forms the SPA data structure.
    - *	NOTE: There are W of them.
    - *
    - *   dense[0:W*m-1]: sparse accumulating (SPA) vector for intermediate values;
    - *	    	   NOTE: there are W of them.
    - *
    - *   tempv[0:*]: real temporary used for dense numeric kernels;
    - *	The size of this array is defined by NUM_TEMPV() in slu_cdefs.h.
    - * 
    - */ - -void -cgstrf (superlu_options_t *options, SuperMatrix *A, - int relax, int panel_size, int *etree, void *work, int lwork, - int *perm_c, int *perm_r, SuperMatrix *L, SuperMatrix *U, - SuperLUStat_t *stat, int *info) -{ - /* Local working arrays */ - NCPformat *Astore; - int *iperm_r = NULL; /* inverse of perm_r; used when - options->Fact == SamePattern_SameRowPerm */ - int *iperm_c; /* inverse of perm_c */ - int *iwork; - complex *cwork; - int *segrep, *repfnz, *parent, *xplore; - int *panel_lsub; /* dense[]/panel_lsub[] pair forms a w-wide SPA */ - int *xprune; - int *marker; - complex *dense, *tempv; - int *relax_end; - complex *a; - int *asub; - int *xa_begin, *xa_end; - int *xsup, *supno; - int *xlsub, *xlusup, *xusub; - int nzlumax; - float fill_ratio = sp_ienv(6); /* estimated fill ratio */ - static GlobalLU_t Glu; /* persistent to facilitate multiple factors. */ - - /* Local scalars */ - fact_t fact = options->Fact; - double diag_pivot_thresh = options->DiagPivotThresh; - int pivrow; /* pivotal row number in the original matrix A */ - int nseg1; /* no of segments in U-column above panel row jcol */ - int nseg; /* no of segments in each U-column */ - register int jcol; - register int kcol; /* end column of a relaxed snode */ - register int icol; - register int i, k, jj, new_next, iinfo; - int m, n, min_mn, jsupno, fsupc, nextlu, nextu; - int w_def; /* upper bound on panel width */ - int usepr, iperm_r_allocated = 0; - int nnzL, nnzU; - int *panel_histo = stat->panel_histo; - flops_t *ops = stat->ops; - - iinfo = 0; - m = A->nrow; - n = A->ncol; - min_mn = SUPERLU_MIN(m, n); - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - - /* Allocate storage common to the factor routines */ - *info = cLUMemInit(fact, work, lwork, m, n, Astore->nnz, - panel_size, fill_ratio, L, U, &Glu, &iwork, &cwork); - if ( *info ) return; - - xsup = Glu.xsup; - supno = Glu.supno; - xlsub = Glu.xlsub; - xlusup = Glu.xlusup; - xusub = Glu.xusub; - - SetIWork(m, n, panel_size, iwork, &segrep, &parent, &xplore, - &repfnz, &panel_lsub, &xprune, &marker); - cSetRWork(m, panel_size, cwork, &dense, &tempv); - - usepr = (fact == SamePattern_SameRowPerm); - if ( usepr ) { - /* Compute the inverse of perm_r */ - iperm_r = (int *) intMalloc(m); - for (k = 0; k < m; ++k) iperm_r[perm_r[k]] = k; - iperm_r_allocated = 1; - } - iperm_c = (int *) intMalloc(n); - for (k = 0; k < n; ++k) iperm_c[perm_c[k]] = k; - - /* Identify relaxed snodes */ - relax_end = (int *) intMalloc(n); - if ( options->SymmetricMode == YES ) { - heap_relax_snode(n, etree, relax, marker, relax_end); - } else { - relax_snode(n, etree, relax, marker, relax_end); - } - - ifill (perm_r, m, EMPTY); - ifill (marker, m * NO_MARKER, EMPTY); - supno[0] = -1; - xsup[0] = xlsub[0] = xusub[0] = xlusup[0] = 0; - w_def = panel_size; - - /* - * Work on one "panel" at a time. A panel is one of the following: - * (a) a relaxed supernode at the bottom of the etree, or - * (b) panel_size contiguous columns, defined by the user - */ - for (jcol = 0; jcol < min_mn; ) { - - if ( relax_end[jcol] != EMPTY ) { /* start of a relaxed snode */ - kcol = relax_end[jcol]; /* end of the relaxed snode */ - panel_histo[kcol-jcol+1]++; - - /* -------------------------------------- - * Factorize the relaxed supernode(jcol:kcol) - * -------------------------------------- */ - /* Determine the union of the row structure of the snode */ - if ( (*info = csnode_dfs(jcol, kcol, asub, xa_begin, xa_end, - xprune, marker, &Glu)) != 0 ) - return; - - nextu = xusub[jcol]; - nextlu = xlusup[jcol]; - jsupno = supno[jcol]; - fsupc = xsup[jsupno]; - new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1); - nzlumax = Glu.nzlumax; - while ( new_next > nzlumax ) { - if ( (*info = cLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu)) ) - return; - } - - for (icol = jcol; icol<= kcol; icol++) { - xusub[icol+1] = nextu; - - /* Scatter into SPA dense[*] */ - for (k = xa_begin[icol]; k < xa_end[icol]; k++) - dense[asub[k]] = a[k]; - - /* Numeric update within the snode */ - csnode_bmod(icol, jsupno, fsupc, dense, tempv, &Glu, stat); - - if ( (*info = cpivotL(icol, diag_pivot_thresh, &usepr, perm_r, - iperm_r, iperm_c, &pivrow, &Glu, stat)) ) - if ( iinfo == 0 ) iinfo = *info; - -#ifdef DEBUG - cprint_lu_col("[1]: ", icol, pivrow, xprune, &Glu); -#endif - - } - - jcol = icol; - - } else { /* Work on one panel of panel_size columns */ - - /* Adjust panel_size so that a panel won't overlap with the next - * relaxed snode. - */ - panel_size = w_def; - for (k = jcol + 1; k < SUPERLU_MIN(jcol+panel_size, min_mn); k++) - if ( relax_end[k] != EMPTY ) { - panel_size = k - jcol; - break; - } - if ( k == min_mn ) panel_size = min_mn - jcol; - panel_histo[panel_size]++; - - /* symbolic factor on a panel of columns */ - cpanel_dfs(m, panel_size, jcol, A, perm_r, &nseg1, - dense, panel_lsub, segrep, repfnz, xprune, - marker, parent, xplore, &Glu); - - /* numeric sup-panel updates in topological order */ - cpanel_bmod(m, panel_size, jcol, nseg1, dense, - tempv, segrep, repfnz, &Glu, stat); - - /* Sparse LU within the panel, and below panel diagonal */ - for ( jj = jcol; jj < jcol + panel_size; jj++) { - k = (jj - jcol) * m; /* column index for w-wide arrays */ - - nseg = nseg1; /* Begin after all the panel segments */ - - if ((*info = ccolumn_dfs(m, jj, perm_r, &nseg, &panel_lsub[k], - segrep, &repfnz[k], xprune, marker, - parent, xplore, &Glu)) != 0) return; - - /* Numeric updates */ - if ((*info = ccolumn_bmod(jj, (nseg - nseg1), &dense[k], - tempv, &segrep[nseg1], &repfnz[k], - jcol, &Glu, stat)) != 0) return; - - /* Copy the U-segments to ucol[*] */ - if ((*info = ccopy_to_ucol(jj, nseg, segrep, &repfnz[k], - perm_r, &dense[k], &Glu)) != 0) - return; - - if ( (*info = cpivotL(jj, diag_pivot_thresh, &usepr, perm_r, - iperm_r, iperm_c, &pivrow, &Glu, stat)) ) - if ( iinfo == 0 ) iinfo = *info; - - /* Prune columns (0:jj-1) using column jj */ - cpruneL(jj, perm_r, pivrow, nseg, segrep, - &repfnz[k], xprune, &Glu); - - /* Reset repfnz[] for this column */ - resetrep_col (nseg, segrep, &repfnz[k]); - -#ifdef DEBUG - cprint_lu_col("[2]: ", jj, pivrow, xprune, &Glu); -#endif - - } - - jcol += panel_size; /* Move to the next panel */ - - } /* else */ - - } /* for */ - - *info = iinfo; - - if ( m > n ) { - k = 0; - for (i = 0; i < m; ++i) - if ( perm_r[i] == EMPTY ) { - perm_r[i] = n + k; - ++k; - } - } - - countnz(min_mn, xprune, &nnzL, &nnzU, &Glu); - fixupL(min_mn, perm_r, &Glu); - - cLUWorkFree(iwork, cwork, &Glu); /* Free work space and compress storage */ - - if ( fact == SamePattern_SameRowPerm ) { - /* L and U structures may have changed due to possibly different - pivoting, even though the storage is available. - There could also be memory expansions, so the array locations - may have changed, */ - ((SCformat *)L->Store)->nnz = nnzL; - ((SCformat *)L->Store)->nsuper = Glu.supno[n]; - ((SCformat *)L->Store)->nzval = Glu.lusup; - ((SCformat *)L->Store)->nzval_colptr = Glu.xlusup; - ((SCformat *)L->Store)->rowind = Glu.lsub; - ((SCformat *)L->Store)->rowind_colptr = Glu.xlsub; - ((NCformat *)U->Store)->nnz = nnzU; - ((NCformat *)U->Store)->nzval = Glu.ucol; - ((NCformat *)U->Store)->rowind = Glu.usub; - ((NCformat *)U->Store)->colptr = Glu.xusub; - } else { - cCreate_SuperNode_Matrix(L, A->nrow, min_mn, nnzL, Glu.lusup, - Glu.xlusup, Glu.lsub, Glu.xlsub, Glu.supno, - Glu.xsup, SLU_SC, SLU_C, SLU_TRLU); - cCreate_CompCol_Matrix(U, min_mn, min_mn, nnzU, Glu.ucol, - Glu.usub, Glu.xusub, SLU_NC, SLU_C, SLU_TRU); - } - - ops[FACT] += ops[TRSV] + ops[GEMV]; - stat->expansions = --(Glu.num_expansions); - - if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r); - SUPERLU_FREE (iperm_c); - SUPERLU_FREE (relax_end); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgstrs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgstrs.c deleted file mode 100644 index d7f5a3f955..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cgstrs.c +++ /dev/null @@ -1,350 +0,0 @@ - -/*! @file cgstrs.c - * \brief Solves a system using LU factorization - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_cdefs.h" - - -/* - * Function prototypes - */ -void cusolve(int, int, complex*, complex*); -void clsolve(int, int, complex*, complex*); -void cmatvec(int, int, int, complex*, complex*, complex*); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * CGSTRS solves a system of linear equations A*X=B or A'*X=B
    - * with A sparse and B dense, using the LU factorization computed by
    - * CGSTRF.
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *
    - * L       (input) SuperMatrix*
    - *         The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *         cgstrf(). Use compressed row subscripts storage for supernodes,
    - *         i.e., L has types: Stype = SLU_SC, Dtype = SLU_C, Mtype = SLU_TRLU.
    - *
    - * U       (input) SuperMatrix*
    - *         The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *         cgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_C, Mtype = SLU_TRU.
    - *
    - * perm_c  (input) int*, dimension (L->ncol)
    - *	   Column permutation vector, which defines the 
    - *         permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *         in position j in A*Pc.
    - *
    - * perm_r  (input) int*, dimension (L->nrow)
    - *         Row permutation vector, which defines the permutation matrix Pr; 
    - *         perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_C, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - * 	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - * 
    - */ - -void -cgstrs (trans_t trans, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, SuperMatrix *B, - SuperLUStat_t *stat, int *info) -{ - -#ifdef _CRAY - _fcd ftcs1, ftcs2, ftcs3, ftcs4; -#endif - int incx = 1, incy = 1; -#ifdef USE_VENDOR_BLAS - complex alpha = {1.0, 0.0}, beta = {1.0, 0.0}; - complex *work_col; -#endif - complex temp_comp; - DNformat *Bstore; - complex *Bmat; - SCformat *Lstore; - NCformat *Ustore; - complex *Lval, *Uval; - int fsupc, nrow, nsupr, nsupc, luptr, istart, irow; - int i, j, k, iptr, jcol, n, ldb, nrhs; - complex *work, *rhs_work, *soln; - flops_t solve_ops; - void cprint_soln(); - - /* Test input parameters ... */ - *info = 0; - Bstore = B->Store; - ldb = Bstore->lda; - nrhs = B->ncol; - if ( trans != NOTRANS && trans != TRANS && trans != CONJ ) *info = -1; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_C || L->Mtype != SLU_TRLU ) - *info = -2; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_C || U->Mtype != SLU_TRU ) - *info = -3; - else if ( ldb < SUPERLU_MAX(0, L->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_C || B->Mtype != SLU_GE ) - *info = -6; - if ( *info ) { - i = -(*info); - xerbla_("cgstrs", &i); - return; - } - - n = L->nrow; - work = complexCalloc(n * nrhs); - if ( !work ) ABORT("Malloc fails for local work[]."); - soln = complexMalloc(n); - if ( !soln ) ABORT("Malloc fails for local soln[]."); - - Bmat = Bstore->nzval; - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( trans == NOTRANS ) { - /* Permute right hand sides to form Pr*B */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_r[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - /* Forward solve PLy=Pb. */ - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - nrow = nsupr - nsupc; - - solve_ops += 4 * nsupc * (nsupc - 1) * nrhs; - solve_ops += 8 * nrow * nsupc * nrhs; - - if ( nsupc == 1 ) { - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - luptr = L_NZ_START(fsupc); - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); iptr++){ - irow = L_SUB(iptr); - ++luptr; - cc_mult(&temp_comp, &rhs_work[fsupc], &Lval[luptr]); - c_sub(&rhs_work[irow], &rhs_work[irow], &temp_comp); - } - } - } else { - luptr = L_NZ_START(fsupc); -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("N", strlen("N")); - ftcs3 = _cptofcd("U", strlen("U")); - CTRSM( ftcs1, ftcs1, ftcs2, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - CGEMM( ftcs2, ftcs2, &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#else - ctrsm_("L", "L", "N", "U", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - cgemm_( "N", "N", &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#endif - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - work_col = &work[j*n]; - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - c_sub(&rhs_work[irow], &rhs_work[irow], &work_col[i]); - work_col[i].r = 0.0; - work_col[i].i = 0.0; - iptr++; - } - } -#else - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - clsolve (nsupr, nsupc, &Lval[luptr], &rhs_work[fsupc]); - cmatvec (nsupr, nrow, nsupc, &Lval[luptr+nsupc], - &rhs_work[fsupc], &work[0] ); - - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - c_sub(&rhs_work[irow], &rhs_work[irow], &work[i]); - work[i].r = 0.; - work[i].i = 0.; - iptr++; - } - } -#endif - } /* else ... */ - } /* for L-solve */ - -#ifdef DEBUG - printf("After L-solve: y=\n"); - cprint_soln(n, nrhs, Bmat); -#endif - - /* - * Back solve Ux=y. - */ - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += 4 * nsupc * (nsupc + 1) * nrhs; - - if ( nsupc == 1 ) { - rhs_work = &Bmat[0]; - for (j = 0; j < nrhs; j++) { - c_div(&rhs_work[fsupc], &rhs_work[fsupc], &Lval[luptr]); - rhs_work += ldb; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("U", strlen("U")); - ftcs3 = _cptofcd("N", strlen("N")); - CTRSM( ftcs1, ftcs2, ftcs3, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#else - ctrsm_("L", "U", "N", "N", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#endif -#else - for (j = 0; j < nrhs; j++) - cusolve ( nsupr, nsupc, &Lval[luptr], &Bmat[fsupc+j*ldb] ); -#endif - } - - for (j = 0; j < nrhs; ++j) { - rhs_work = &Bmat[j*ldb]; - for (jcol = fsupc; jcol < fsupc + nsupc; jcol++) { - solve_ops += 8*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++ ){ - irow = U_SUB(i); - cc_mult(&temp_comp, &rhs_work[jcol], &Uval[i]); - c_sub(&rhs_work[irow], &rhs_work[irow], &temp_comp); - } - } - } - - } /* for U-solve */ - -#ifdef DEBUG - printf("After U-solve: x=\n"); - cprint_soln(n, nrhs, Bmat); -#endif - - /* Compute the final solution X := Pc*X. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_c[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = solve_ops; - - } else { /* Solve A'*X=B or CONJ(A)*X=B */ - /* Permute right hand sides to form Pc'*B. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_c[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = 0; - if (trans == TRANS) { - for (k = 0; k < nrhs; ++k) { - /* Multiply by inv(U'). */ - sp_ctrsv("U", "T", "N", L, U, &Bmat[k*ldb], stat, info); - - /* Multiply by inv(L'). */ - sp_ctrsv("L", "T", "U", L, U, &Bmat[k*ldb], stat, info); - } - } else { /* trans == CONJ */ - for (k = 0; k < nrhs; ++k) { - /* Multiply by conj(inv(U')). */ - sp_ctrsv("U", "C", "N", L, U, &Bmat[k*ldb], stat, info); - - /* Multiply by conj(inv(L')). */ - sp_ctrsv("L", "C", "U", L, U, &Bmat[k*ldb], stat, info); - } - } - /* Compute the final solution X := Pr'*X (=inv(Pr)*X) */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_r[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - } - - SUPERLU_FREE(work); - SUPERLU_FREE(soln); -} - -/* - * Diagnostic print of the solution vector - */ -void -cprint_soln(int n, int nrhs, complex *soln) -{ - int i; - - for (i = 0; i < n; i++) - printf("\t%d: %.4f\n", i, soln[i]); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/clacon.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/clacon.c deleted file mode 100644 index 0acecebcdc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/clacon.c +++ /dev/null @@ -1,221 +0,0 @@ - -/*! @file clacon.c - * \brief Estimates the 1-norm - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -#include -#include "slu_Cnames.h" -#include "slu_scomplex.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   CLACON estimates the 1-norm of a square matrix A.   
    - *   Reverse communication is used for evaluating matrix-vector products. 
    - * 
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   N      (input) INT
    - *          The order of the matrix.  N >= 1.   
    - *
    - *   V      (workspace) COMPLEX PRECISION array, dimension (N)   
    - *          On the final return, V = A*W,  where  EST = norm(V)/norm(W)   
    - *          (W is not returned).   
    - *
    - *   X      (input/output) COMPLEX PRECISION array, dimension (N)   
    - *          On an intermediate return, X should be overwritten by   
    - *                A * X,   if KASE=1,   
    - *                A' * X,  if KASE=2,
    - *          where A' is the conjugate transpose of A,
    - *         and CLACON must be re-called with all the other parameters   
    - *          unchanged.   
    - *
    - *
    - *   EST    (output) FLOAT PRECISION   
    - *          An estimate (a lower bound) for norm(A).   
    - *
    - *   KASE   (input/output) INT
    - *          On the initial call to CLACON, KASE should be 0.   
    - *          On an intermediate return, KASE will be 1 or 2, indicating   
    - *          whether X should be overwritten by A * X  or A' * X.   
    - *          On the final return from CLACON, KASE will again be 0.   
    - *
    - *   Further Details   
    - *   ======= =======   
    - *
    - *   Contributed by Nick Higham, University of Manchester.   
    - *   Originally named CONEST, dated March 16, 1988.   
    - *
    - *   Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of 
    - *   a real or complex matrix, with applications to condition estimation", 
    - *   ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988.   
    - *   ===================================================================== 
    - * 
    - */ - -int -clacon_(int *n, complex *v, complex *x, float *est, int *kase) - -{ - - - /* Table of constant values */ - int c__1 = 1; - complex zero = {0.0, 0.0}; - complex one = {1.0, 0.0}; - - /* System generated locals */ - float d__1; - - /* Local variables */ - static int iter; - static int jump, jlast; - static float altsgn, estold; - static int i, j; - float temp; - float safmin; - extern double slamch_(char *); - extern int icmax1_(int *, complex *, int *); - extern double scsum1_(int *, complex *, int *); - - safmin = slamch_("Safe minimum"); - if ( *kase == 0 ) { - for (i = 0; i < *n; ++i) { - x[i].r = 1. / (float) (*n); - x[i].i = 0.; - } - *kase = 1; - jump = 1; - return 0; - } - - switch (jump) { - case 1: goto L20; - case 2: goto L40; - case 3: goto L70; - case 4: goto L110; - case 5: goto L140; - } - - /* ................ ENTRY (JUMP = 1) - FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */ - L20: - if (*n == 1) { - v[0] = x[0]; - *est = slu_c_abs(&v[0]); - /* ... QUIT */ - goto L150; - } - *est = scsum1_(n, x, &c__1); - - for (i = 0; i < *n; ++i) { - d__1 = slu_c_abs(&x[i]); - if (d__1 > safmin) { - d__1 = 1 / d__1; - x[i].r *= d__1; - x[i].i *= d__1; - } else { - x[i] = one; - } - } - *kase = 2; - jump = 2; - return 0; - - /* ................ ENTRY (JUMP = 2) - FIRST ITERATION. X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */ -L40: - j = icmax1_(n, &x[0], &c__1); - --j; - iter = 2; - - /* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */ -L50: - for (i = 0; i < *n; ++i) x[i] = zero; - x[j] = one; - *kase = 1; - jump = 3; - return 0; - - /* ................ ENTRY (JUMP = 3) - X HAS BEEN OVERWRITTEN BY A*X. */ -L70: -#ifdef _CRAY - CCOPY(n, x, &c__1, v, &c__1); -#else - ccopy_(n, x, &c__1, v, &c__1); -#endif - estold = *est; - *est = scsum1_(n, v, &c__1); - - -L90: - /* TEST FOR CYCLING. */ - if (*est <= estold) goto L120; - - for (i = 0; i < *n; ++i) { - d__1 = slu_c_abs(&x[i]); - if (d__1 > safmin) { - d__1 = 1 / d__1; - x[i].r *= d__1; - x[i].i *= d__1; - } else { - x[i] = one; - } - } - *kase = 2; - jump = 4; - return 0; - - /* ................ ENTRY (JUMP = 4) - X HAS BEEN OVERWRITTEN BY TRANDPOSE(A)*X. */ -L110: - jlast = j; - j = icmax1_(n, &x[0], &c__1); - --j; - if (x[jlast].r != (d__1 = x[j].r, fabs(d__1)) && iter < 5) { - ++iter; - goto L50; - } - - /* ITERATION COMPLETE. FINAL STAGE. */ -L120: - altsgn = 1.; - for (i = 1; i <= *n; ++i) { - x[i-1].r = altsgn * ((float)(i - 1) / (float)(*n - 1) + 1.); - x[i-1].i = 0.; - altsgn = -altsgn; - } - *kase = 1; - jump = 5; - return 0; - - /* ................ ENTRY (JUMP = 5) - X HAS BEEN OVERWRITTEN BY A*X. */ -L140: - temp = scsum1_(n, x, &c__1) / (float)(*n * 3) * 2.; - if (temp > *est) { -#ifdef _CRAY - CCOPY(n, &x[0], &c__1, &v[0], &c__1); -#else - ccopy_(n, &x[0], &c__1, &v[0], &c__1); -#endif - *est = temp; - } - -L150: - *kase = 0; - return 0; - -} /* clacon_ */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/clangs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/clangs.c deleted file mode 100644 index ffa3934d6e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/clangs.c +++ /dev/null @@ -1,119 +0,0 @@ - -/*! @file clangs.c - * \brief Returns the value of the one norm - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Modified from lapack routine CLANGE 
    - * 
    - */ -/* - * File name: clangs.c - * History: Modified from lapack routine CLANGE - */ -#include -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - *
    - *   CLANGS returns the value of the one norm, or the Frobenius norm, or 
    - *   the infinity norm, or the element of largest absolute value of a 
    - *   real matrix A.   
    - *
    - *   Description   
    - *   ===========   
    - *
    - *   CLANGE returns the value   
    - *
    - *      CLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'   
    - *               (   
    - *               ( norm1(A),         NORM = '1', 'O' or 'o'   
    - *               (   
    - *               ( normI(A),         NORM = 'I' or 'i'   
    - *               (   
    - *               ( normF(A),         NORM = 'F', 'f', 'E' or 'e'   
    - *
    - *   where  norm1  denotes the  one norm of a matrix (maximum column sum), 
    - *   normI  denotes the  infinity norm  of a matrix  (maximum row sum) and 
    - *   normF  denotes the  Frobenius norm of a matrix (square root of sum of 
    - *   squares).  Note that  max(abs(A(i,j)))  is not a  matrix norm.   
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   NORM    (input) CHARACTER*1   
    - *           Specifies the value to be returned in CLANGE as described above.   
    - *   A       (input) SuperMatrix*
    - *           The M by N sparse matrix A. 
    - *
    - *  =====================================================================
    - * 
    - */ - -float clangs(char *norm, SuperMatrix *A) -{ - - /* Local variables */ - NCformat *Astore; - complex *Aval; - int i, j, irow; - float value, sum; - float *rwork; - - Astore = A->Store; - Aval = Astore->nzval; - - if ( SUPERLU_MIN(A->nrow, A->ncol) == 0) { - value = 0.; - - } else if (lsame_(norm, "M")) { - /* Find max(abs(A(i,j))). */ - value = 0.; - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) - value = SUPERLU_MAX( value, slu_c_abs( &Aval[i]) ); - - } else if (lsame_(norm, "O") || *(unsigned char *)norm == '1') { - /* Find norm1(A). */ - value = 0.; - for (j = 0; j < A->ncol; ++j) { - sum = 0.; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) - sum += slu_c_abs( &Aval[i] ); - value = SUPERLU_MAX(value,sum); - } - - } else if (lsame_(norm, "I")) { - /* Find normI(A). */ - if ( !(rwork = (float *) SUPERLU_MALLOC(A->nrow * sizeof(float))) ) - ABORT("SUPERLU_MALLOC fails for rwork."); - for (i = 0; i < A->nrow; ++i) rwork[i] = 0.; - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) { - irow = Astore->rowind[i]; - rwork[irow] += slu_c_abs( &Aval[i] ); - } - value = 0.; - for (i = 0; i < A->nrow; ++i) - value = SUPERLU_MAX(value, rwork[i]); - - SUPERLU_FREE (rwork); - - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - /* Find normF(A). */ - ABORT("Not implemented."); - } else - ABORT("Illegal norm specified."); - - return (value); - -} /* clangs */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/claqgs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/claqgs.c deleted file mode 100644 index 1ef52609e4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/claqgs.c +++ /dev/null @@ -1,148 +0,0 @@ - -/*! @file claqgs.c - * \brief Equlibrates a general sprase matrix - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - * Modified from LAPACK routine CLAQGE
    - * 
    - */ -/* - * File name: claqgs.c - * History: Modified from LAPACK routine CLAQGE - */ -#include -#include "slu_cdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   CLAQGS equilibrates a general sparse M by N matrix A using the row and   
    - *   scaling factors in the vectors R and C.   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   A       (input/output) SuperMatrix*
    - *           On exit, the equilibrated matrix.  See EQUED for the form of 
    - *           the equilibrated matrix. The type of A can be:
    - *	    Stype = NC; Dtype = SLU_C; Mtype = GE.
    - *	    
    - *   R       (input) float*, dimension (A->nrow)
    - *           The row scale factors for A.
    - *	    
    - *   C       (input) float*, dimension (A->ncol)
    - *           The column scale factors for A.
    - *	    
    - *   ROWCND  (input) float
    - *           Ratio of the smallest R(i) to the largest R(i).
    - *	    
    - *   COLCND  (input) float
    - *           Ratio of the smallest C(i) to the largest C(i).
    - *	    
    - *   AMAX    (input) float
    - *           Absolute value of largest matrix entry.
    - *	    
    - *   EQUED   (output) char*
    - *           Specifies the form of equilibration that was done.   
    - *           = 'N':  No equilibration   
    - *           = 'R':  Row equilibration, i.e., A has been premultiplied by  
    - *                   diag(R).   
    - *           = 'C':  Column equilibration, i.e., A has been postmultiplied  
    - *                   by diag(C).   
    - *           = 'B':  Both row and column equilibration, i.e., A has been
    - *                   replaced by diag(R) * A * diag(C).   
    - *
    - *   Internal Parameters   
    - *   ===================   
    - *
    - *   THRESH is a threshold value used to decide if row or column scaling   
    - *   should be done based on the ratio of the row or column scaling   
    - *   factors.  If ROWCND < THRESH, row scaling is done, and if   
    - *   COLCND < THRESH, column scaling is done.   
    - *
    - *   LARGE and SMALL are threshold values used to decide if row scaling   
    - *   should be done based on the absolute size of the largest matrix   
    - *   element.  If AMAX > LARGE or AMAX < SMALL, row scaling is done.   
    - *
    - *   ===================================================================== 
    - * 
    - */ - -void -claqgs(SuperMatrix *A, float *r, float *c, - float rowcnd, float colcnd, float amax, char *equed) -{ - - -#define THRESH (0.1) - - /* Local variables */ - NCformat *Astore; - complex *Aval; - int i, j, irow; - float large, small, cj; - extern double slamch_(char *); - float temp; - - - /* Quick return if possible */ - if (A->nrow <= 0 || A->ncol <= 0) { - *(unsigned char *)equed = 'N'; - return; - } - - Astore = A->Store; - Aval = Astore->nzval; - - /* Initialize LARGE and SMALL. */ - small = slamch_("Safe minimum") / slamch_("Precision"); - large = 1. / small; - - if (rowcnd >= THRESH && amax >= small && amax <= large) { - if (colcnd >= THRESH) - *(unsigned char *)equed = 'N'; - else { - /* Column scaling */ - for (j = 0; j < A->ncol; ++j) { - cj = c[j]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - cs_mult(&Aval[i], &Aval[i], cj); - } - } - *(unsigned char *)equed = 'C'; - } - } else if (colcnd >= THRESH) { - /* Row scaling, no column scaling */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - cs_mult(&Aval[i], &Aval[i], r[irow]); - } - *(unsigned char *)equed = 'R'; - } else { - /* Row and column scaling */ - for (j = 0; j < A->ncol; ++j) { - cj = c[j]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - temp = cj * r[irow]; - cs_mult(&Aval[i], &Aval[i], temp); - } - } - *(unsigned char *)equed = 'B'; - } - - return; - -} /* claqgs */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cldperm.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cldperm.c deleted file mode 100644 index fada2c61e3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cldperm.c +++ /dev/null @@ -1,168 +0,0 @@ - -/*! @file - * \brief Finds a row permutation so that the matrix has large entries on the diagonal - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ - -#include "slu_cdefs.h" - -extern void mc64id_(int_t*); -extern void mc64ad_(int_t*, int_t*, int_t*, int_t [], int_t [], double [], - int_t*, int_t [], int_t*, int_t[], int_t*, double [], - int_t [], int_t []); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   CLDPERM finds a row permutation so that the matrix has large
    - *   entries on the diagonal.
    - *
    - * Arguments
    - * =========
    - *
    - * job    (input) int
    - *        Control the action. Possible values for JOB are:
    - *        = 1 : Compute a row permutation of the matrix so that the
    - *              permuted matrix has as many entries on its diagonal as
    - *              possible. The values on the diagonal are of arbitrary size.
    - *              HSL subroutine MC21A/AD is used for this.
    - *        = 2 : Compute a row permutation of the matrix so that the smallest 
    - *              value on the diagonal of the permuted matrix is maximized.
    - *        = 3 : Compute a row permutation of the matrix so that the smallest
    - *              value on the diagonal of the permuted matrix is maximized.
    - *              The algorithm differs from the one used for JOB = 2 and may
    - *              have quite a different performance.
    - *        = 4 : Compute a row permutation of the matrix so that the sum
    - *              of the diagonal entries of the permuted matrix is maximized.
    - *        = 5 : Compute a row permutation of the matrix so that the product
    - *              of the diagonal entries of the permuted matrix is maximized
    - *              and vectors to scale the matrix so that the nonzero diagonal 
    - *              entries of the permuted matrix are one in absolute value and 
    - *              all the off-diagonal entries are less than or equal to one in 
    - *              absolute value.
    - *        Restriction: 1 <= JOB <= 5.
    - *
    - * n      (input) int
    - *        The order of the matrix.
    - *
    - * nnz    (input) int
    - *        The number of nonzeros in the matrix.
    - *
    - * adjncy (input) int*, of size nnz
    - *        The adjacency structure of the matrix, which contains the row
    - *        indices of the nonzeros.
    - *
    - * colptr (input) int*, of size n+1
    - *        The pointers to the beginning of each column in ADJNCY.
    - *
    - * nzval  (input) complex*, of size nnz
    - *        The nonzero values of the matrix. nzval[k] is the value of
    - *        the entry corresponding to adjncy[k].
    - *        It is not used if job = 1.
    - *
    - * perm   (output) int*, of size n
    - *        The permutation vector. perm[i] = j means row i in the
    - *        original matrix is in row j of the permuted matrix.
    - *
    - * u      (output) double*, of size n
    - *        If job = 5, the natural logarithms of the row scaling factors. 
    - *
    - * v      (output) double*, of size n
    - *        If job = 5, the natural logarithms of the column scaling factors. 
    - *        The scaled matrix B has entries b_ij = a_ij * exp(u_i + v_j).
    - * 
    - */ - -int -cldperm(int_t job, int_t n, int_t nnz, int_t colptr[], int_t adjncy[], - complex nzval[], int_t *perm, float u[], float v[]) -{ - int_t i, liw, ldw, num; - int_t *iw, icntl[10], info[10]; - double *dw; - double *nzval_d = (double *) SUPERLU_MALLOC(nnz * sizeof(double)); - -#if ( DEBUGlevel>=1 ) - CHECK_MALLOC(0, "Enter cldperm()"); -#endif - liw = 5*n; - if ( job == 3 ) liw = 10*n + nnz; - if ( !(iw = intMalloc(liw)) ) ABORT("Malloc fails for iw[]"); - ldw = 3*n + nnz; - if ( !(dw = (double*) SUPERLU_MALLOC(ldw * sizeof(double))) ) - ABORT("Malloc fails for dw[]"); - - /* Increment one to get 1-based indexing. */ - for (i = 0; i <= n; ++i) ++colptr[i]; - for (i = 0; i < nnz; ++i) ++adjncy[i]; -#if ( DEBUGlevel>=2 ) - printf("LDPERM(): n %d, nnz %d\n", n, nnz); - slu_PrintInt10("colptr", n+1, colptr); - slu_PrintInt10("adjncy", nnz, adjncy); -#endif - - /* - * NOTE: - * ===== - * - * MC64AD assumes that column permutation vector is defined as: - * perm(i) = j means column i of permuted A is in column j of original A. - * - * Since a symmetric permutation preserves the diagonal entries. Then - * by the following relation: - * P'(A*P')P = P'A - * we can apply inverse(perm) to rows of A to get large diagonal entries. - * But, since 'perm' defined in MC64AD happens to be the reverse of - * SuperLU's definition of permutation vector, therefore, it is already - * an inverse for our purpose. We will thus use it directly. - * - */ - mc64id_(icntl); -#if 0 - /* Suppress error and warning messages. */ - icntl[0] = -1; - icntl[1] = -1; -#endif - - for (i = 0; i < nnz; ++i) nzval_d[i] = slu_c_abs1(&nzval[i]); - mc64ad_(&job, &n, &nnz, colptr, adjncy, nzval_d, &num, perm, - &liw, iw, &ldw, dw, icntl, info); - -#if ( DEBUGlevel>=2 ) - slu_PrintInt10("perm", n, perm); - printf(".. After MC64AD info %d\tsize of matching %d\n", info[0], num); -#endif - if ( info[0] == 1 ) { /* Structurally singular */ - printf(".. The last %d permutations:\n", n-num); - slu_PrintInt10("perm", n-num, &perm[num]); - } - - /* Restore to 0-based indexing. */ - for (i = 0; i <= n; ++i) --colptr[i]; - for (i = 0; i < nnz; ++i) --adjncy[i]; - for (i = 0; i < n; ++i) --perm[i]; - - if ( job == 5 ) - for (i = 0; i < n; ++i) { - u[i] = dw[i]; - v[i] = dw[n+i]; - } - - SUPERLU_FREE(iw); - SUPERLU_FREE(dw); - SUPERLU_FREE(nzval_d); - -#if ( DEBUGlevel>=1 ) - CHECK_MALLOC(0, "Exit cldperm()"); -#endif - - return info[0]; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cmemory.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cmemory.c deleted file mode 100644 index 076f4aed85..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cmemory.c +++ /dev/null @@ -1,701 +0,0 @@ - -/*! @file cmemory.c - * \brief Memory details - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ -#include "slu_cdefs.h" - - -/* Internal prototypes */ -void *cexpand (int *, MemType,int, int, GlobalLU_t *); -int cLUWorkInit (int, int, int, int **, complex **, GlobalLU_t *); -void copy_mem_complex (int, void *, void *); -void cStackCompress (GlobalLU_t *); -void cSetupSpace (void *, int, GlobalLU_t *); -void *cuser_malloc (int, int, GlobalLU_t *); -void cuser_free (int, int, GlobalLU_t *); - -/* External prototypes (in memory.c - prec-independent) */ -extern void copy_mem_int (int, void *, void *); -extern void user_bcopy (char *, char *, int); - - -/* Macros to manipulate stack */ -#define StackFull(x) ( x + Glu->stack.used >= Glu->stack.size ) -#define NotDoubleAlign(addr) ( (long int)addr & 7 ) -#define DoubleAlign(addr) ( ((long int)addr + 7) & ~7L ) -#define TempSpace(m, w) ( (2*w + 4 + NO_MARKER) * m * sizeof(int) + \ - (w + 1) * m * sizeof(complex) ) -#define Reduce(alpha) ((alpha + 1) / 2) /* i.e. (alpha-1)/2 + 1 */ - - - - -/*! \brief Setup the memory model to be used for factorization. - * - * lwork = 0: use system malloc; - * lwork > 0: use user-supplied work[] space. - */ -void cSetupSpace(void *work, int lwork, GlobalLU_t *Glu) -{ - if ( lwork == 0 ) { - Glu->MemModel = SYSTEM; /* malloc/free */ - } else if ( lwork > 0 ) { - Glu->MemModel = USER; /* user provided space */ - Glu->stack.used = 0; - Glu->stack.top1 = 0; - Glu->stack.top2 = (lwork/4)*4; /* must be word addressable */ - Glu->stack.size = Glu->stack.top2; - Glu->stack.array = (void *) work; - } -} - - - -void *cuser_malloc(int bytes, int which_end, GlobalLU_t *Glu) -{ - void *buf; - - if ( StackFull(bytes) ) return (NULL); - - if ( which_end == HEAD ) { - buf = (char*) Glu->stack.array + Glu->stack.top1; - Glu->stack.top1 += bytes; - } else { - Glu->stack.top2 -= bytes; - buf = (char*) Glu->stack.array + Glu->stack.top2; - } - - Glu->stack.used += bytes; - return buf; -} - - -void cuser_free(int bytes, int which_end, GlobalLU_t *Glu) -{ - if ( which_end == HEAD ) { - Glu->stack.top1 -= bytes; - } else { - Glu->stack.top2 += bytes; - } - Glu->stack.used -= bytes; -} - - - -/*! \brief - * - *
    - * mem_usage consists of the following fields:
    - *    - for_lu (float)
    - *      The amount of space used in bytes for the L\U data structures.
    - *    - total_needed (float)
    - *      The amount of space needed in bytes to perform factorization.
    - * 
    - */ -int cQuerySpace(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage) -{ - SCformat *Lstore; - NCformat *Ustore; - register int n, iword, dword, panel_size = sp_ienv(1); - - Lstore = L->Store; - Ustore = U->Store; - n = L->ncol; - iword = sizeof(int); - dword = sizeof(complex); - - /* For LU factors */ - mem_usage->for_lu = (float)( (4.0*n + 3.0) * iword + - Lstore->nzval_colptr[n] * dword + - Lstore->rowind_colptr[n] * iword ); - mem_usage->for_lu += (float)( (n + 1.0) * iword + - Ustore->colptr[n] * (dword + iword) ); - - /* Working storage to support factorization */ - mem_usage->total_needed = mem_usage->for_lu + - (float)( (2.0 * panel_size + 4.0 + NO_MARKER) * n * iword + - (panel_size + 1.0) * n * dword ); - - return 0; -} /* cQuerySpace */ - - -/*! \brief - * - *
    - * mem_usage consists of the following fields:
    - *    - for_lu (float)
    - *      The amount of space used in bytes for the L\U data structures.
    - *    - total_needed (float)
    - *      The amount of space needed in bytes to perform factorization.
    - * 
    - */ -int ilu_cQuerySpace(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage) -{ - SCformat *Lstore; - NCformat *Ustore; - register int n, panel_size = sp_ienv(1); - register float iword, dword; - - Lstore = L->Store; - Ustore = U->Store; - n = L->ncol; - iword = sizeof(int); - dword = sizeof(double); - - /* For LU factors */ - mem_usage->for_lu = (float)( (4.0f * n + 3.0f) * iword + - Lstore->nzval_colptr[n] * dword + - Lstore->rowind_colptr[n] * iword ); - mem_usage->for_lu += (float)( (n + 1.0f) * iword + - Ustore->colptr[n] * (dword + iword) ); - - /* Working storage to support factorization. - ILU needs 5*n more integers than LU */ - mem_usage->total_needed = mem_usage->for_lu + - (float)( (2.0f * panel_size + 9.0f + NO_MARKER) * n * iword + - (panel_size + 1.0f) * n * dword ); - - return 0; -} /* ilu_cQuerySpace */ - - -/*! \brief Allocate storage for the data structures common to all factor routines. - * - *
    - * For those unpredictable size, estimate as fill_ratio * nnz(A).
    - * Return value:
    - *     If lwork = -1, return the estimated amount of space required, plus n;
    - *     otherwise, return the amount of space actually allocated when
    - *     memory allocation failure occurred.
    - * 
    - */ -int -cLUMemInit(fact_t fact, void *work, int lwork, int m, int n, int annz, - int panel_size, float fill_ratio, SuperMatrix *L, SuperMatrix *U, - GlobalLU_t *Glu, int **iwork, complex **dwork) -{ - int info, iword, dword; - SCformat *Lstore; - NCformat *Ustore; - int *xsup, *supno; - int *lsub, *xlsub; - complex *lusup; - int *xlusup; - complex *ucol; - int *usub, *xusub; - int nzlmax, nzumax, nzlumax; - - iword = sizeof(int); - dword = sizeof(complex); - Glu->n = n; - Glu->num_expansions = 0; - - if ( !Glu->expanders ) - Glu->expanders = (ExpHeader*)SUPERLU_MALLOC( NO_MEMTYPE * - sizeof(ExpHeader) ); - if ( !Glu->expanders ) ABORT("SUPERLU_MALLOC fails for expanders"); - - if ( fact != SamePattern_SameRowPerm ) { - /* Guess for L\U factors */ - nzumax = nzlumax = fill_ratio * annz; - nzlmax = SUPERLU_MAX(1, fill_ratio/4.) * annz; - - if ( lwork == -1 ) { - return ( GluIntArray(n) * iword + TempSpace(m, panel_size) - + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword + n ); - } else { - cSetupSpace(work, lwork, Glu); - } - -#if ( PRNTlevel >= 1 ) - printf("cLUMemInit() called: fill_ratio %ld, nzlmax %ld, nzumax %ld\n", - fill_ratio, nzlmax, nzumax); - fflush(stdout); -#endif - - /* Integer pointers for L\U factors */ - if ( Glu->MemModel == SYSTEM ) { - xsup = intMalloc(n+1); - supno = intMalloc(n+1); - xlsub = intMalloc(n+1); - xlusup = intMalloc(n+1); - xusub = intMalloc(n+1); - } else { - xsup = (int *)cuser_malloc((n+1) * iword, HEAD, Glu); - supno = (int *)cuser_malloc((n+1) * iword, HEAD, Glu); - xlsub = (int *)cuser_malloc((n+1) * iword, HEAD, Glu); - xlusup = (int *)cuser_malloc((n+1) * iword, HEAD, Glu); - xusub = (int *)cuser_malloc((n+1) * iword, HEAD, Glu); - } - - lusup = (complex *) cexpand( &nzlumax, LUSUP, 0, 0, Glu ); - ucol = (complex *) cexpand( &nzumax, UCOL, 0, 0, Glu ); - lsub = (int *) cexpand( &nzlmax, LSUB, 0, 0, Glu ); - usub = (int *) cexpand( &nzumax, USUB, 0, 1, Glu ); - - while ( !lusup || !ucol || !lsub || !usub ) { - if ( Glu->MemModel == SYSTEM ) { - SUPERLU_FREE(lusup); - SUPERLU_FREE(ucol); - SUPERLU_FREE(lsub); - SUPERLU_FREE(usub); - } else { - cuser_free((nzlumax+nzumax)*dword+(nzlmax+nzumax)*iword, - HEAD, Glu); - } - nzlumax /= 2; - nzumax /= 2; - nzlmax /= 2; - if ( nzlumax < annz ) { - printf("Not enough memory to perform factorization.\n"); - return (cmemory_usage(nzlmax, nzumax, nzlumax, n) + n); - } -#if ( PRNTlevel >= 1) - printf("cLUMemInit() reduce size: nzlmax %ld, nzumax %ld\n", - nzlmax, nzumax); - fflush(stdout); -#endif - lusup = (complex *) cexpand( &nzlumax, LUSUP, 0, 0, Glu ); - ucol = (complex *) cexpand( &nzumax, UCOL, 0, 0, Glu ); - lsub = (int *) cexpand( &nzlmax, LSUB, 0, 0, Glu ); - usub = (int *) cexpand( &nzumax, USUB, 0, 1, Glu ); - } - - } else { - /* fact == SamePattern_SameRowPerm */ - Lstore = L->Store; - Ustore = U->Store; - xsup = Lstore->sup_to_col; - supno = Lstore->col_to_sup; - xlsub = Lstore->rowind_colptr; - xlusup = Lstore->nzval_colptr; - xusub = Ustore->colptr; - nzlmax = Glu->nzlmax; /* max from previous factorization */ - nzumax = Glu->nzumax; - nzlumax = Glu->nzlumax; - - if ( lwork == -1 ) { - return ( GluIntArray(n) * iword + TempSpace(m, panel_size) - + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword + n ); - } else if ( lwork == 0 ) { - Glu->MemModel = SYSTEM; - } else { - Glu->MemModel = USER; - Glu->stack.top2 = (lwork/4)*4; /* must be word-addressable */ - Glu->stack.size = Glu->stack.top2; - } - - lsub = Glu->expanders[LSUB].mem = Lstore->rowind; - lusup = Glu->expanders[LUSUP].mem = Lstore->nzval; - usub = Glu->expanders[USUB].mem = Ustore->rowind; - ucol = Glu->expanders[UCOL].mem = Ustore->nzval;; - Glu->expanders[LSUB].size = nzlmax; - Glu->expanders[LUSUP].size = nzlumax; - Glu->expanders[USUB].size = nzumax; - Glu->expanders[UCOL].size = nzumax; - } - - Glu->xsup = xsup; - Glu->supno = supno; - Glu->lsub = lsub; - Glu->xlsub = xlsub; - Glu->lusup = lusup; - Glu->xlusup = xlusup; - Glu->ucol = ucol; - Glu->usub = usub; - Glu->xusub = xusub; - Glu->nzlmax = nzlmax; - Glu->nzumax = nzumax; - Glu->nzlumax = nzlumax; - - info = cLUWorkInit(m, n, panel_size, iwork, dwork, Glu); - if ( info ) - return ( info + cmemory_usage(nzlmax, nzumax, nzlumax, n) + n); - - ++Glu->num_expansions; - return 0; - -} /* cLUMemInit */ - -/*! \brief Allocate known working storage. Returns 0 if success, otherwise - returns the number of bytes allocated so far when failure occurred. */ -int -cLUWorkInit(int m, int n, int panel_size, int **iworkptr, - complex **dworkptr, GlobalLU_t *Glu) -{ - int isize, dsize, extra; - complex *old_ptr; - int maxsuper = sp_ienv(3), - rowblk = sp_ienv(4); - - isize = ( (2 * panel_size + 3 + NO_MARKER ) * m + n ) * sizeof(int); - dsize = (m * panel_size + - NUM_TEMPV(m,panel_size,maxsuper,rowblk)) * sizeof(complex); - - if ( Glu->MemModel == SYSTEM ) - *iworkptr = (int *) intCalloc(isize/sizeof(int)); - else - *iworkptr = (int *) cuser_malloc(isize, TAIL, Glu); - if ( ! *iworkptr ) { - fprintf(stderr, "cLUWorkInit: malloc fails for local iworkptr[]\n"); - return (isize + n); - } - - if ( Glu->MemModel == SYSTEM ) - *dworkptr = (complex *) SUPERLU_MALLOC(dsize); - else { - *dworkptr = (complex *) cuser_malloc(dsize, TAIL, Glu); - if ( NotDoubleAlign(*dworkptr) ) { - old_ptr = *dworkptr; - *dworkptr = (complex*) DoubleAlign(*dworkptr); - *dworkptr = (complex*) ((double*)*dworkptr - 1); - extra = (char*)old_ptr - (char*)*dworkptr; -#ifdef DEBUG - printf("cLUWorkInit: not aligned, extra %d\n", extra); -#endif - Glu->stack.top2 -= extra; - Glu->stack.used += extra; - } - } - if ( ! *dworkptr ) { - fprintf(stderr, "malloc fails for local dworkptr[]."); - return (isize + dsize + n); - } - - return 0; -} - - -/*! \brief Set up pointers for real working arrays. - */ -void -cSetRWork(int m, int panel_size, complex *dworkptr, - complex **dense, complex **tempv) -{ - complex zero = {0.0, 0.0}; - - int maxsuper = sp_ienv(3), - rowblk = sp_ienv(4); - *dense = dworkptr; - *tempv = *dense + panel_size*m; - cfill (*dense, m * panel_size, zero); - cfill (*tempv, NUM_TEMPV(m,panel_size,maxsuper,rowblk), zero); -} - -/*! \brief Free the working storage used by factor routines. - */ -void cLUWorkFree(int *iwork, complex *dwork, GlobalLU_t *Glu) -{ - if ( Glu->MemModel == SYSTEM ) { - SUPERLU_FREE (iwork); - SUPERLU_FREE (dwork); - } else { - Glu->stack.used -= (Glu->stack.size - Glu->stack.top2); - Glu->stack.top2 = Glu->stack.size; -/* cStackCompress(Glu); */ - } - - SUPERLU_FREE (Glu->expanders); - Glu->expanders = NULL; -} - -/*! \brief Expand the data structures for L and U during the factorization. - * - *
    - * Return value:   0 - successful return
    - *               > 0 - number of bytes allocated when run out of space
    - * 
    - */ -int -cLUMemXpand(int jcol, - int next, /* number of elements currently in the factors */ - MemType mem_type, /* which type of memory to expand */ - int *maxlen, /* modified - maximum length of a data structure */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - void *new_mem; - -#ifdef DEBUG - printf("cLUMemXpand(): jcol %d, next %d, maxlen %d, MemType %d\n", - jcol, next, *maxlen, mem_type); -#endif - - if (mem_type == USUB) - new_mem = cexpand(maxlen, mem_type, next, 1, Glu); - else - new_mem = cexpand(maxlen, mem_type, next, 0, Glu); - - if ( !new_mem ) { - int nzlmax = Glu->nzlmax; - int nzumax = Glu->nzumax; - int nzlumax = Glu->nzlumax; - fprintf(stderr, "Can't expand MemType %d: jcol %d\n", mem_type, jcol); - return (cmemory_usage(nzlmax, nzumax, nzlumax, Glu->n) + Glu->n); - } - - switch ( mem_type ) { - case LUSUP: - Glu->lusup = (complex *) new_mem; - Glu->nzlumax = *maxlen; - break; - case UCOL: - Glu->ucol = (complex *) new_mem; - Glu->nzumax = *maxlen; - break; - case LSUB: - Glu->lsub = (int *) new_mem; - Glu->nzlmax = *maxlen; - break; - case USUB: - Glu->usub = (int *) new_mem; - Glu->nzumax = *maxlen; - break; - } - - return 0; - -} - - - -void -copy_mem_complex(int howmany, void *old, void *new) -{ - register int i; - complex *dold = old; - complex *dnew = new; - for (i = 0; i < howmany; i++) dnew[i] = dold[i]; -} - -/*! \brief Expand the existing storage to accommodate more fill-ins. - */ -void -*cexpand ( - int *prev_len, /* length used from previous call */ - MemType type, /* which part of the memory to expand */ - int len_to_copy, /* size of the memory to be copied to new store */ - int keep_prev, /* = 1: use prev_len; - = 0: compute new_len to expand */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - float EXPAND = 1.5; - float alpha; - void *new_mem, *old_mem; - int new_len, tries, lword, extra, bytes_to_copy; - ExpHeader *expanders = Glu->expanders; /* Array of 4 types of memory */ - - alpha = EXPAND; - - if ( Glu->num_expansions == 0 || keep_prev ) { - /* First time allocate requested */ - new_len = *prev_len; - } else { - new_len = alpha * *prev_len; - } - - if ( type == LSUB || type == USUB ) lword = sizeof(int); - else lword = sizeof(complex); - - if ( Glu->MemModel == SYSTEM ) { - new_mem = (void *) SUPERLU_MALLOC((size_t)new_len * lword); - if ( Glu->num_expansions != 0 ) { - tries = 0; - if ( keep_prev ) { - if ( !new_mem ) return (NULL); - } else { - while ( !new_mem ) { - if ( ++tries > 10 ) return (NULL); - alpha = Reduce(alpha); - new_len = alpha * *prev_len; - new_mem = (void *) SUPERLU_MALLOC((size_t)new_len * lword); - } - } - if ( type == LSUB || type == USUB ) { - copy_mem_int(len_to_copy, expanders[type].mem, new_mem); - } else { - copy_mem_complex(len_to_copy, expanders[type].mem, new_mem); - } - SUPERLU_FREE (expanders[type].mem); - } - expanders[type].mem = (void *) new_mem; - - } else { /* MemModel == USER */ - if ( Glu->num_expansions == 0 ) { - new_mem = cuser_malloc(new_len * lword, HEAD, Glu); - if ( NotDoubleAlign(new_mem) && - (type == LUSUP || type == UCOL) ) { - old_mem = new_mem; - new_mem = (void *)DoubleAlign(new_mem); - extra = (char*)new_mem - (char*)old_mem; -#ifdef DEBUG - printf("expand(): not aligned, extra %d\n", extra); -#endif - Glu->stack.top1 += extra; - Glu->stack.used += extra; - } - expanders[type].mem = (void *) new_mem; - } else { - tries = 0; - extra = (new_len - *prev_len) * lword; - if ( keep_prev ) { - if ( StackFull(extra) ) return (NULL); - } else { - while ( StackFull(extra) ) { - if ( ++tries > 10 ) return (NULL); - alpha = Reduce(alpha); - new_len = alpha * *prev_len; - extra = (new_len - *prev_len) * lword; - } - } - - if ( type != USUB ) { - new_mem = (void*)((char*)expanders[type + 1].mem + extra); - bytes_to_copy = (char*)Glu->stack.array + Glu->stack.top1 - - (char*)expanders[type + 1].mem; - user_bcopy(expanders[type+1].mem, new_mem, bytes_to_copy); - - if ( type < USUB ) { - Glu->usub = expanders[USUB].mem = - (void*)((char*)expanders[USUB].mem + extra); - } - if ( type < LSUB ) { - Glu->lsub = expanders[LSUB].mem = - (void*)((char*)expanders[LSUB].mem + extra); - } - if ( type < UCOL ) { - Glu->ucol = expanders[UCOL].mem = - (void*)((char*)expanders[UCOL].mem + extra); - } - Glu->stack.top1 += extra; - Glu->stack.used += extra; - if ( type == UCOL ) { - Glu->stack.top1 += extra; /* Add same amount for USUB */ - Glu->stack.used += extra; - } - - } /* if ... */ - - } /* else ... */ - } - - expanders[type].size = new_len; - *prev_len = new_len; - if ( Glu->num_expansions ) ++Glu->num_expansions; - - return (void *) expanders[type].mem; - -} /* cexpand */ - - -/*! \brief Compress the work[] array to remove fragmentation. - */ -void -cStackCompress(GlobalLU_t *Glu) -{ - register int iword, dword, ndim; - char *last, *fragment; - int *ifrom, *ito; - complex *dfrom, *dto; - int *xlsub, *lsub, *xusub, *usub, *xlusup; - complex *ucol, *lusup; - - iword = sizeof(int); - dword = sizeof(complex); - ndim = Glu->n; - - xlsub = Glu->xlsub; - lsub = Glu->lsub; - xusub = Glu->xusub; - usub = Glu->usub; - xlusup = Glu->xlusup; - ucol = Glu->ucol; - lusup = Glu->lusup; - - dfrom = ucol; - dto = (complex *)((char*)lusup + xlusup[ndim] * dword); - copy_mem_complex(xusub[ndim], dfrom, dto); - ucol = dto; - - ifrom = lsub; - ito = (int *) ((char*)ucol + xusub[ndim] * iword); - copy_mem_int(xlsub[ndim], ifrom, ito); - lsub = ito; - - ifrom = usub; - ito = (int *) ((char*)lsub + xlsub[ndim] * iword); - copy_mem_int(xusub[ndim], ifrom, ito); - usub = ito; - - last = (char*)usub + xusub[ndim] * iword; - fragment = (char*) (((char*)Glu->stack.array + Glu->stack.top1) - last); - Glu->stack.used -= (long int) fragment; - Glu->stack.top1 -= (long int) fragment; - - Glu->ucol = ucol; - Glu->lsub = lsub; - Glu->usub = usub; - -#ifdef DEBUG - printf("cStackCompress: fragment %d\n", fragment); - /* for (last = 0; last < ndim; ++last) - print_lu_col("After compress:", last, 0);*/ -#endif - -} - -/*! \brief Allocate storage for original matrix A - */ -void -callocateA(int n, int nnz, complex **a, int **asub, int **xa) -{ - *a = (complex *) complexMalloc(nnz); - *asub = (int *) intMalloc(nnz); - *xa = (int *) intMalloc(n+1); -} - - -complex *complexMalloc(int n) -{ - complex *buf; - buf = (complex *) SUPERLU_MALLOC((size_t)n * sizeof(complex)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC failed for buf in complexMalloc()\n"); - } - return (buf); -} - -complex *complexCalloc(int n) -{ - complex *buf; - register int i; - complex zero = {0.0, 0.0}; - buf = (complex *) SUPERLU_MALLOC((size_t)n * sizeof(complex)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC failed for buf in complexCalloc()\n"); - } - for (i = 0; i < n; ++i) buf[i] = zero; - return (buf); -} - - -int cmemory_usage(const int nzlmax, const int nzumax, - const int nzlumax, const int n) -{ - register int iword, dword; - - iword = sizeof(int); - dword = sizeof(complex); - - return (10 * n * iword + - nzlmax * iword + nzumax * (iword + dword) + nzlumax * dword); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/colamd.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/colamd.c deleted file mode 100644 index 72c4390dc0..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/colamd.c +++ /dev/null @@ -1,3414 +0,0 @@ -/*! @file colamd.c - *\brief A sparse matrix column ordering algorithm - -
    -    ========================================================================== 
    -    === colamd/symamd - a sparse matrix column ordering algorithm ============ 
    -    ========================================================================== 
    -
    -
    -    colamd:  an approximate minimum degree column ordering algorithm,
    -    	for LU factorization of symmetric or unsymmetric matrices,
    -	QR factorization, least squares, interior point methods for
    -	linear programming problems, and other related problems.
    -
    -    symamd:  an approximate minimum degree ordering algorithm for Cholesky
    -    	factorization of symmetric matrices.
    -
    -    Purpose:
    -
    -	Colamd computes a permutation Q such that the Cholesky factorization of
    -	(AQ)'(AQ) has less fill-in and requires fewer floating point operations
    -	than A'A.  This also provides a good ordering for sparse partial
    -	pivoting methods, P(AQ) = LU, where Q is computed prior to numerical
    -	factorization, and P is computed during numerical factorization via
    -	conventional partial pivoting with row interchanges.  Colamd is the
    -	column ordering method used in SuperLU, part of the ScaLAPACK library.
    -	It is also available as built-in function in MATLAB Version 6,
    -	available from MathWorks, Inc. (http://www.mathworks.com).  This
    -	routine can be used in place of colmmd in MATLAB.
    -
    -    	Symamd computes a permutation P of a symmetric matrix A such that the
    -	Cholesky factorization of PAP' has less fill-in and requires fewer
    -	floating point operations than A.  Symamd constructs a matrix M such
    -	that M'M has the same nonzero pattern of A, and then orders the columns
    -	of M using colmmd.  The column ordering of M is then returned as the
    -	row and column ordering P of A. 
    -
    -    Authors:
    -
    -	The authors of the code itself are Stefan I. Larimore and Timothy A.
    -	Davis (davis@cise.ufl.edu), University of Florida.  The algorithm was
    -	developed in collaboration with John Gilbert, Xerox PARC, and Esmond
    -	Ng, Oak Ridge National Laboratory.
    -
    -    Date:
    -
    -	September 8, 2003.  Version 2.3.
    -
    -    Acknowledgements:
    -
    -	This work was supported by the National Science Foundation, under
    -	grants DMS-9504974 and DMS-9803599.
    -
    -    Copyright and License:
    -
    -	Copyright (c) 1998-2003 by the University of Florida.
    -	All Rights Reserved.
    -
    -	THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    -	EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    -
    -	Permission is hereby granted to use, copy, modify, and/or distribute
    -	this program, provided that the Copyright, this License, and the
    -	Availability of the original version is retained on all copies and made
    -	accessible to the end-user of any code or package that includes COLAMD
    -	or any modified version of COLAMD. 
    -
    -    Availability:
    -
    -	The colamd/symamd library is available at
    -
    -	    http://www.cise.ufl.edu/research/sparse/colamd/
    -
    -	This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.c
    -	file.  It requires the colamd.h file.  It is required by the colamdmex.c
    -	and symamdmex.c files, for the MATLAB interface to colamd and symamd.
    -
    -    See the ChangeLog file for changes since Version 1.0.
    -
    -    ========================================================================== 
    -    === Description of user-callable routines ================================ 
    -    ========================================================================== 
    -
    -
    -    ----------------------------------------------------------------------------
    -    colamd_recommended:
    -    ----------------------------------------------------------------------------
    -
    -	C syntax:
    -
    -	    #include "colamd.h"
    -	    int colamd_recommended (int nnz, int n_row, int n_col) ;
    -
    -	    or as a C macro
    -
    -	    #include "colamd.h"
    -	    Alen = COLAMD_RECOMMENDED (int nnz, int n_row, int n_col) ;
    -
    -	Purpose:
    -
    -	    Returns recommended value of Alen for use by colamd.  Returns -1
    -	    if any input argument is negative.  The use of this routine
    -	    or macro is optional.  Note that the macro uses its arguments
    -	    more than once, so be careful for side effects, if you pass
    -	    expressions as arguments to COLAMD_RECOMMENDED.  Not needed for
    -	    symamd, which dynamically allocates its own memory.
    -
    -	Arguments (all input arguments):
    -
    -	    int nnz ;		Number of nonzeros in the matrix A.  This must
    -				be the same value as p [n_col] in the call to
    -				colamd - otherwise you will get a wrong value
    -				of the recommended memory to use.
    -
    -	    int n_row ;		Number of rows in the matrix A.
    -
    -	    int n_col ;		Number of columns in the matrix A.
    -
    -    ----------------------------------------------------------------------------
    -    colamd_set_defaults:
    -    ----------------------------------------------------------------------------
    -
    -	C syntax:
    -
    -	    #include "colamd.h"
    -	    colamd_set_defaults (double knobs [COLAMD_KNOBS]) ;
    -
    -	Purpose:
    -
    -	    Sets the default parameters.  The use of this routine is optional.
    -
    -	Arguments:
    -
    -	    double knobs [COLAMD_KNOBS] ;	Output only.
    -
    -		Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col)
    -		entries are removed prior to ordering.  Columns with more than
    -		(knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to
    -		ordering, and placed last in the output column ordering. 
    -
    -		Symamd: uses only knobs [COLAMD_DENSE_ROW], which is knobs [0].
    -		Rows and columns with more than (knobs [COLAMD_DENSE_ROW] * n)
    -		entries are removed prior to ordering, and placed last in the
    -		output ordering.
    -
    -		COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1,
    -		respectively, in colamd.h.  Default values of these two knobs
    -		are both 0.5.  Currently, only knobs [0] and knobs [1] are
    -		used, but future versions may use more knobs.  If so, they will
    -		be properly set to their defaults by the future version of
    -		colamd_set_defaults, so that the code that calls colamd will
    -		not need to change, assuming that you either use
    -		colamd_set_defaults, or pass a (double *) NULL pointer as the
    -		knobs array to colamd or symamd.
    -
    -    ----------------------------------------------------------------------------
    -    colamd:
    -    ----------------------------------------------------------------------------
    -
    -	C syntax:
    -
    -	    #include "colamd.h"
    -	    int colamd (int n_row, int n_col, int Alen, int *A, int *p,
    -	    	double knobs [COLAMD_KNOBS], int stats [COLAMD_STATS]) ;
    -
    -	Purpose:
    -
    -	    Computes a column ordering (Q) of A such that P(AQ)=LU or
    -	    (AQ)'AQ=LL' have less fill-in and require fewer floating point
    -	    operations than factorizing the unpermuted matrix A or A'A,
    -	    respectively.
    -	    
    -	Returns:
    -
    -	    TRUE (1) if successful, FALSE (0) otherwise.
    -
    -	Arguments:
    -
    -	    int n_row ;		Input argument.
    -
    -		Number of rows in the matrix A.
    -		Restriction:  n_row >= 0.
    -		Colamd returns FALSE if n_row is negative.
    -
    -	    int n_col ;		Input argument.
    -
    -		Number of columns in the matrix A.
    -		Restriction:  n_col >= 0.
    -		Colamd returns FALSE if n_col is negative.
    -
    -	    int Alen ;		Input argument.
    -
    -		Restriction (see note):
    -		Alen >= 2*nnz + 6*(n_col+1) + 4*(n_row+1) + n_col
    -		Colamd returns FALSE if these conditions are not met.
    -
    -		Note:  this restriction makes an modest assumption regarding
    -		the size of the two typedef's structures in colamd.h.
    -		We do, however, guarantee that
    -
    -			Alen >= colamd_recommended (nnz, n_row, n_col)
    -		
    -		or equivalently as a C preprocessor macro: 
    -
    -			Alen >= COLAMD_RECOMMENDED (nnz, n_row, n_col)
    -
    -		will be sufficient.
    -
    -	    int A [Alen] ;	Input argument, undefined on output.
    -
    -		A is an integer array of size Alen.  Alen must be at least as
    -		large as the bare minimum value given above, but this is very
    -		low, and can result in excessive run time.  For best
    -		performance, we recommend that Alen be greater than or equal to
    -		colamd_recommended (nnz, n_row, n_col), which adds
    -		nnz/5 to the bare minimum value given above.
    -
    -		On input, the row indices of the entries in column c of the
    -		matrix are held in A [(p [c]) ... (p [c+1]-1)].  The row indices
    -		in a given column c need not be in ascending order, and
    -		duplicate row indices may be be present.  However, colamd will
    -		work a little faster if both of these conditions are met
    -		(Colamd puts the matrix into this format, if it finds that the
    -		the conditions are not met).
    -
    -		The matrix is 0-based.  That is, rows are in the range 0 to
    -		n_row-1, and columns are in the range 0 to n_col-1.  Colamd
    -		returns FALSE if any row index is out of range.
    -
    -		The contents of A are modified during ordering, and are
    -		undefined on output.
    -
    -	    int p [n_col+1] ;	Both input and output argument.
    -
    -		p is an integer array of size n_col+1.  On input, it holds the
    -		"pointers" for the column form of the matrix A.  Column c of
    -		the matrix A is held in A [(p [c]) ... (p [c+1]-1)].  The first
    -		entry, p [0], must be zero, and p [c] <= p [c+1] must hold
    -		for all c in the range 0 to n_col-1.  The value p [n_col] is
    -		thus the total number of entries in the pattern of the matrix A.
    -		Colamd returns FALSE if these conditions are not met.
    -
    -		On output, if colamd returns TRUE, the array p holds the column
    -		permutation (Q, for P(AQ)=LU or (AQ)'(AQ)=LL'), where p [0] is
    -		the first column index in the new ordering, and p [n_col-1] is
    -		the last.  That is, p [k] = j means that column j of A is the
    -		kth pivot column, in AQ, where k is in the range 0 to n_col-1
    -		(p [0] = j means that column j of A is the first column in AQ).
    -
    -		If colamd returns FALSE, then no permutation is returned, and
    -		p is undefined on output.
    -
    -	    double knobs [COLAMD_KNOBS] ;	Input argument.
    -
    -		See colamd_set_defaults for a description.
    -
    -	    int stats [COLAMD_STATS] ;		Output argument.
    -
    -		Statistics on the ordering, and error status.
    -		See colamd.h for related definitions.
    -		Colamd returns FALSE if stats is not present.
    -
    -		stats [0]:  number of dense or empty rows ignored.
    -
    -		stats [1]:  number of dense or empty columns ignored (and
    -				ordered last in the output permutation p)
    -				Note that a row can become "empty" if it
    -				contains only "dense" and/or "empty" columns,
    -				and similarly a column can become "empty" if it
    -				only contains "dense" and/or "empty" rows.
    -
    -		stats [2]:  number of garbage collections performed.
    -				This can be excessively high if Alen is close
    -				to the minimum required value.
    -
    -		stats [3]:  status code.  < 0 is an error code.
    -			    > 1 is a warning or notice.
    -
    -			0	OK.  Each column of the input matrix contained
    -				row indices in increasing order, with no
    -				duplicates.
    -
    -			1	OK, but columns of input matrix were jumbled
    -				(unsorted columns or duplicate entries).  Colamd
    -				had to do some extra work to sort the matrix
    -				first and remove duplicate entries, but it
    -				still was able to return a valid permutation
    -				(return value of colamd was TRUE).
    -
    -					stats [4]: highest numbered column that
    -						is unsorted or has duplicate
    -						entries.
    -					stats [5]: last seen duplicate or
    -						unsorted row index.
    -					stats [6]: number of duplicate or
    -						unsorted row indices.
    -
    -			-1	A is a null pointer
    -
    -			-2	p is a null pointer
    -
    -			-3 	n_row is negative
    -
    -					stats [4]: n_row
    -
    -			-4	n_col is negative
    -
    -					stats [4]: n_col
    -
    -			-5	number of nonzeros in matrix is negative
    -
    -					stats [4]: number of nonzeros, p [n_col]
    -
    -			-6	p [0] is nonzero
    -
    -					stats [4]: p [0]
    -
    -			-7	A is too small
    -
    -					stats [4]: required size
    -					stats [5]: actual size (Alen)
    -
    -			-8	a column has a negative number of entries
    -
    -					stats [4]: column with < 0 entries
    -					stats [5]: number of entries in col
    -
    -			-9	a row index is out of bounds
    -
    -					stats [4]: column with bad row index
    -					stats [5]: bad row index
    -					stats [6]: n_row, # of rows of matrx
    -
    -			-10	(unused; see symamd.c)
    -
    -			-999	(unused; see symamd.c)
    -
    -		Future versions may return more statistics in the stats array.
    -
    -	Example:
    -	
    -	    See http://www.cise.ufl.edu/research/sparse/colamd/example.c
    -	    for a complete example.
    -
    -	    To order the columns of a 5-by-4 matrix with 11 nonzero entries in
    -	    the following nonzero pattern
    -
    -	    	x 0 x 0
    -		x 0 x x
    -		0 x x 0
    -		0 0 x x
    -		x x 0 0
    -
    -	    with default knobs and no output statistics, do the following:
    -
    -		#include "colamd.h"
    -		#define ALEN COLAMD_RECOMMENDED (11, 5, 4)
    -		int A [ALEN] = {1, 2, 5, 3, 5, 1, 2, 3, 4, 2, 4} ;
    -		int p [ ] = {0, 3, 5, 9, 11} ;
    -		int stats [COLAMD_STATS] ;
    -		colamd (5, 4, ALEN, A, p, (double *) NULL, stats) ;
    -
    -	    The permutation is returned in the array p, and A is destroyed.
    -
    -    ----------------------------------------------------------------------------
    -    symamd:
    -    ----------------------------------------------------------------------------
    -
    -	C syntax:
    -
    -	    #include "colamd.h"
    -	    int symamd (int n, int *A, int *p, int *perm,
    -	    	double knobs [COLAMD_KNOBS], int stats [COLAMD_STATS],
    -		void (*allocate) (size_t, size_t), void (*release) (void *)) ;
    -
    -	Purpose:
    -
    -    	    The symamd routine computes an ordering P of a symmetric sparse
    -	    matrix A such that the Cholesky factorization PAP' = LL' remains
    -	    sparse.  It is based on a column ordering of a matrix M constructed
    -	    so that the nonzero pattern of M'M is the same as A.  The matrix A
    -	    is assumed to be symmetric; only the strictly lower triangular part
    -	    is accessed.  You must pass your selected memory allocator (usually
    -	    calloc/free or mxCalloc/mxFree) to symamd, for it to allocate
    -	    memory for the temporary matrix M.
    -
    -	Returns:
    -
    -	    TRUE (1) if successful, FALSE (0) otherwise.
    -
    -	Arguments:
    -
    -	    int n ;		Input argument.
    -
    -	    	Number of rows and columns in the symmetrix matrix A.
    -		Restriction:  n >= 0.
    -		Symamd returns FALSE if n is negative.
    -
    -	    int A [nnz] ;	Input argument.
    -
    -	    	A is an integer array of size nnz, where nnz = p [n].
    -		
    -		The row indices of the entries in column c of the matrix are
    -		held in A [(p [c]) ... (p [c+1]-1)].  The row indices in a
    -		given column c need not be in ascending order, and duplicate
    -		row indices may be present.  However, symamd will run faster
    -		if the columns are in sorted order with no duplicate entries. 
    -
    -		The matrix is 0-based.  That is, rows are in the range 0 to
    -		n-1, and columns are in the range 0 to n-1.  Symamd
    -		returns FALSE if any row index is out of range.
    -
    -		The contents of A are not modified.
    -
    -	    int p [n+1] ;   	Input argument.
    -
    -		p is an integer array of size n+1.  On input, it holds the
    -		"pointers" for the column form of the matrix A.  Column c of
    -		the matrix A is held in A [(p [c]) ... (p [c+1]-1)].  The first
    -		entry, p [0], must be zero, and p [c] <= p [c+1] must hold
    -		for all c in the range 0 to n-1.  The value p [n] is
    -		thus the total number of entries in the pattern of the matrix A.
    -		Symamd returns FALSE if these conditions are not met.
    -
    -		The contents of p are not modified.
    -
    -	    int perm [n+1] ;   	Output argument.
    -
    -		On output, if symamd returns TRUE, the array perm holds the
    -		permutation P, where perm [0] is the first index in the new
    -		ordering, and perm [n-1] is the last.  That is, perm [k] = j
    -		means that row and column j of A is the kth column in PAP',
    -		where k is in the range 0 to n-1 (perm [0] = j means
    -		that row and column j of A are the first row and column in
    -		PAP').  The array is used as a workspace during the ordering,
    -		which is why it must be of length n+1, not just n.
    -
    -	    double knobs [COLAMD_KNOBS] ;	Input argument.
    -
    -		See colamd_set_defaults for a description.
    -
    -	    int stats [COLAMD_STATS] ;		Output argument.
    -
    -		Statistics on the ordering, and error status.
    -		See colamd.h for related definitions.
    -		Symamd returns FALSE if stats is not present.
    -
    -		stats [0]:  number of dense or empty row and columns ignored
    -				(and ordered last in the output permutation 
    -				perm).  Note that a row/column can become
    -				"empty" if it contains only "dense" and/or
    -				"empty" columns/rows.
    -
    -		stats [1]:  (same as stats [0])
    -
    -		stats [2]:  number of garbage collections performed.
    -
    -		stats [3]:  status code.  < 0 is an error code.
    -			    > 1 is a warning or notice.
    -
    -			0	OK.  Each column of the input matrix contained
    -				row indices in increasing order, with no
    -				duplicates.
    -
    -			1	OK, but columns of input matrix were jumbled
    -				(unsorted columns or duplicate entries).  Symamd
    -				had to do some extra work to sort the matrix
    -				first and remove duplicate entries, but it
    -				still was able to return a valid permutation
    -				(return value of symamd was TRUE).
    -
    -					stats [4]: highest numbered column that
    -						is unsorted or has duplicate
    -						entries.
    -					stats [5]: last seen duplicate or
    -						unsorted row index.
    -					stats [6]: number of duplicate or
    -						unsorted row indices.
    -
    -			-1	A is a null pointer
    -
    -			-2	p is a null pointer
    -
    -			-3	(unused, see colamd.c)
    -
    -			-4 	n is negative
    -
    -					stats [4]: n
    -
    -			-5	number of nonzeros in matrix is negative
    -
    -					stats [4]: # of nonzeros (p [n]).
    -
    -			-6	p [0] is nonzero
    -
    -					stats [4]: p [0]
    -
    -			-7	(unused)
    -
    -			-8	a column has a negative number of entries
    -
    -					stats [4]: column with < 0 entries
    -					stats [5]: number of entries in col
    -
    -			-9	a row index is out of bounds
    -
    -					stats [4]: column with bad row index
    -					stats [5]: bad row index
    -					stats [6]: n_row, # of rows of matrx
    -
    -			-10	out of memory (unable to allocate temporary
    -				workspace for M or count arrays using the
    -				"allocate" routine passed into symamd).
    -
    -			-999	internal error.  colamd failed to order the
    -				matrix M, when it should have succeeded.  This
    -				indicates a bug.  If this (and *only* this)
    -				error code occurs, please contact the authors.
    -				Don't contact the authors if you get any other
    -				error code.
    -
    -		Future versions may return more statistics in the stats array.
    -
    -	    void * (*allocate) (size_t, size_t)
    -
    -	    	A pointer to a function providing memory allocation.  The
    -		allocated memory must be returned initialized to zero.  For a
    -		C application, this argument should normally be a pointer to
    -		calloc.  For a MATLAB mexFunction, the routine mxCalloc is
    -		passed instead.
    -
    -	    void (*release) (size_t, size_t)
    -
    -	    	A pointer to a function that frees memory allocated by the
    -		memory allocation routine above.  For a C application, this
    -		argument should normally be a pointer to free.  For a MATLAB
    -		mexFunction, the routine mxFree is passed instead.
    -
    -
    -    ----------------------------------------------------------------------------
    -    colamd_report:
    -    ----------------------------------------------------------------------------
    -
    -	C syntax:
    -
    -	    #include "colamd.h"
    -	    colamd_report (int stats [COLAMD_STATS]) ;
    -
    -	Purpose:
    -
    -	    Prints the error status and statistics recorded in the stats
    -	    array on the standard error output (for a standard C routine)
    -	    or on the MATLAB output (for a mexFunction).
    -
    -	Arguments:
    -
    -	    int stats [COLAMD_STATS] ;	Input only.  Statistics from colamd.
    -
    -
    -    ----------------------------------------------------------------------------
    -    symamd_report:
    -    ----------------------------------------------------------------------------
    -
    -	C syntax:
    -
    -	    #include "colamd.h"
    -	    symamd_report (int stats [COLAMD_STATS]) ;
    -
    -	Purpose:
    -
    -	    Prints the error status and statistics recorded in the stats
    -	    array on the standard error output (for a standard C routine)
    -	    or on the MATLAB output (for a mexFunction).
    -
    -	Arguments:
    -
    -	    int stats [COLAMD_STATS] ;	Input only.  Statistics from symamd.
    -
    - 
    -*/ - -/* ========================================================================== */ -/* === Scaffolding code definitions ======================================== */ -/* ========================================================================== */ - -/* Ensure that debugging is turned off: */ -#ifndef NDEBUG -#define NDEBUG -#endif /* NDEBUG */ - -/* - Our "scaffolding code" philosophy: In our opinion, well-written library - code should keep its "debugging" code, and just normally have it turned off - by the compiler so as not to interfere with performance. This serves - several purposes: - - (1) assertions act as comments to the reader, telling you what the code - expects at that point. All assertions will always be true (unless - there really is a bug, of course). - - (2) leaving in the scaffolding code assists anyone who would like to modify - the code, or understand the algorithm (by reading the debugging output, - one can get a glimpse into what the code is doing). - - (3) (gasp!) for actually finding bugs. This code has been heavily tested - and "should" be fully functional and bug-free ... but you never know... - - To enable debugging, comment out the "#define NDEBUG" above. For a MATLAB - mexFunction, you will also need to modify mexopts.sh to remove the -DNDEBUG - definition. The code will become outrageously slow when debugging is - enabled. To control the level of debugging output, set an environment - variable D to 0 (little), 1 (some), 2, 3, or 4 (lots). When debugging, - you should see the following message on the standard output: - - colamd: debug version, D = 1 (THIS WILL BE SLOW!) - - or a similar message for symamd. If you don't, then debugging has not - been enabled. - -*/ - -/* ========================================================================== */ -/* === Include files ======================================================== */ -/* ========================================================================== */ - -#include "colamd.h" -#include - -#ifdef MATLAB_MEX_FILE -#include "mex.h" -#include "matrix.h" -#else -#include -#include -#endif /* MATLAB_MEX_FILE */ - -/* ========================================================================== */ -/* === Definitions ========================================================== */ -/* ========================================================================== */ - -/* Routines are either PUBLIC (user-callable) or PRIVATE (not user-callable) */ -#define PUBLIC -#define PRIVATE static - -#define MAX(a,b) (((a) > (b)) ? (a) : (b)) -#define MIN(a,b) (((a) < (b)) ? (a) : (b)) - -#define ONES_COMPLEMENT(r) (-(r)-1) - -/* -------------------------------------------------------------------------- */ -/* Change for version 2.1: define TRUE and FALSE only if not yet defined */ -/* -------------------------------------------------------------------------- */ - -#ifndef TRUE -#define TRUE (1) -#endif - -#ifndef FALSE -#define FALSE (0) -#endif - -/* -------------------------------------------------------------------------- */ - -#define EMPTY (-1) - -/* Row and column status */ -#define ALIVE (0) -#define DEAD (-1) - -/* Column status */ -#define DEAD_PRINCIPAL (-1) -#define DEAD_NON_PRINCIPAL (-2) - -/* Macros for row and column status update and checking. */ -#define ROW_IS_DEAD(r) ROW_IS_MARKED_DEAD (Row[r].shared2.mark) -#define ROW_IS_MARKED_DEAD(row_mark) (row_mark < ALIVE) -#define ROW_IS_ALIVE(r) (Row [r].shared2.mark >= ALIVE) -#define COL_IS_DEAD(c) (Col [c].start < ALIVE) -#define COL_IS_ALIVE(c) (Col [c].start >= ALIVE) -#define COL_IS_DEAD_PRINCIPAL(c) (Col [c].start == DEAD_PRINCIPAL) -#define KILL_ROW(r) { Row [r].shared2.mark = DEAD ; } -#define KILL_PRINCIPAL_COL(c) { Col [c].start = DEAD_PRINCIPAL ; } -#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; } - -/* ========================================================================== */ -/* === Colamd reporting mechanism =========================================== */ -/* ========================================================================== */ - -#ifdef MATLAB_MEX_FILE - -/* use mexPrintf in a MATLAB mexFunction, for debugging and statistics output */ -#define PRINTF mexPrintf - -/* In MATLAB, matrices are 1-based to the user, but 0-based internally */ -#define INDEX(i) ((i)+1) - -#else - -/* Use printf in standard C environment, for debugging and statistics output. */ -/* Output is generated only if debugging is enabled at compile time, or if */ -/* the caller explicitly calls colamd_report or symamd_report. */ -#define PRINTF printf - -/* In C, matrices are 0-based and indices are reported as such in *_report */ -#define INDEX(i) (i) - -#endif /* MATLAB_MEX_FILE */ - -/* ========================================================================== */ -/* === Prototypes of PRIVATE routines ======================================= */ -/* ========================================================================== */ - -PRIVATE int init_rows_cols -( - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int A [], - int p [], - int stats [COLAMD_STATS] -) ; - -PRIVATE void init_scoring -( - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int A [], - int head [], - double knobs [COLAMD_KNOBS], - int *p_n_row2, - int *p_n_col2, - int *p_max_deg -) ; - -PRIVATE int find_ordering -( - int n_row, - int n_col, - int Alen, - Colamd_Row Row [], - Colamd_Col Col [], - int A [], - int head [], - int n_col2, - int max_deg, - int pfree -) ; - -PRIVATE void order_children -( - int n_col, - Colamd_Col Col [], - int p [] -) ; - -PRIVATE void detect_super_cols -( - -#ifndef NDEBUG - int n_col, - Colamd_Row Row [], -#endif /* NDEBUG */ - - Colamd_Col Col [], - int A [], - int head [], - int row_start, - int row_length -) ; - -PRIVATE int garbage_collection -( - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int A [], - int *pfree -) ; - -PRIVATE int clear_mark -( - int n_row, - Colamd_Row Row [] -) ; - -PRIVATE void print_report -( - char *method, - int stats [COLAMD_STATS] -) ; - -/* ========================================================================== */ -/* === Debugging prototypes and definitions ================================= */ -/* ========================================================================== */ - -#ifndef NDEBUG - -/* colamd_debug is the *ONLY* global variable, and is only */ -/* present when debugging */ - -PRIVATE int colamd_debug ; /* debug print level */ - -#define DEBUG0(params) { (void) PRINTF params ; } -#define DEBUG1(params) { if (colamd_debug >= 1) (void) PRINTF params ; } -#define DEBUG2(params) { if (colamd_debug >= 2) (void) PRINTF params ; } -#define DEBUG3(params) { if (colamd_debug >= 3) (void) PRINTF params ; } -#define DEBUG4(params) { if (colamd_debug >= 4) (void) PRINTF params ; } - -#ifdef MATLAB_MEX_FILE -#define ASSERT(expression) (mxAssert ((expression), "")) -#else -#define ASSERT(expression) (assert (expression)) -#endif /* MATLAB_MEX_FILE */ - -PRIVATE void colamd_get_debug /* gets the debug print level from getenv */ -( - char *method -) ; - -PRIVATE void debug_deg_lists -( - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int head [], - int min_score, - int should, - int max_deg -) ; - -PRIVATE void debug_mark -( - int n_row, - Colamd_Row Row [], - int tag_mark, - int max_mark -) ; - -PRIVATE void debug_matrix -( - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int A [] -) ; - -PRIVATE void debug_structures -( - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int A [], - int n_col2 -) ; - -#else /* NDEBUG */ - -/* === No debugging ========================================================= */ - -#define DEBUG0(params) ; -#define DEBUG1(params) ; -#define DEBUG2(params) ; -#define DEBUG3(params) ; -#define DEBUG4(params) ; - -#define ASSERT(expression) ((void) 0) - -#endif /* NDEBUG */ - -/* ========================================================================== */ - - - -/* ========================================================================== */ -/* === USER-CALLABLE ROUTINES: ============================================== */ -/* ========================================================================== */ - - -/* ========================================================================== */ -/* === colamd_recommended =================================================== */ -/* ========================================================================== */ - -/* - The colamd_recommended routine returns the suggested size for Alen. This - value has been determined to provide good balance between the number of - garbage collections and the memory requirements for colamd. If any - argument is negative, a -1 is returned as an error condition. This - function is also available as a macro defined in colamd.h, so that you - can use it for a statically-allocated array size. -*/ - -PUBLIC int colamd_recommended /* returns recommended value of Alen. */ -( - /* === Parameters ======================================================= */ - - int nnz, /* number of nonzeros in A */ - int n_row, /* number of rows in A */ - int n_col /* number of columns in A */ -) -{ - return (COLAMD_RECOMMENDED (nnz, n_row, n_col)) ; -} - - -/* ========================================================================== */ -/* === colamd_set_defaults ================================================== */ -/* ========================================================================== */ - -/* - The colamd_set_defaults routine sets the default values of the user- - controllable parameters for colamd: - - knobs [0] rows with knobs[0]*n_col entries or more are removed - prior to ordering in colamd. Rows and columns with - knobs[0]*n_col entries or more are removed prior to - ordering in symamd and placed last in the output - ordering. - - knobs [1] columns with knobs[1]*n_row entries or more are removed - prior to ordering in colamd, and placed last in the - column permutation. Symamd ignores this knob. - - knobs [2..19] unused, but future versions might use this -*/ - -PUBLIC void colamd_set_defaults -( - /* === Parameters ======================================================= */ - - double knobs [COLAMD_KNOBS] /* knob array */ -) -{ - /* === Local variables ================================================== */ - - int i ; - - if (!knobs) - { - return ; /* no knobs to initialize */ - } - for (i = 0 ; i < COLAMD_KNOBS ; i++) - { - knobs [i] = 0 ; - } - knobs [COLAMD_DENSE_ROW] = 0.5 ; /* ignore rows over 50% dense */ - knobs [COLAMD_DENSE_COL] = 0.5 ; /* ignore columns over 50% dense */ -} - - -/* ========================================================================== */ -/* === symamd =============================================================== */ -/* ========================================================================== */ - -PUBLIC int symamd /* return TRUE if OK, FALSE otherwise */ -( - /* === Parameters ======================================================= */ - - int n, /* number of rows and columns of A */ - int A [], /* row indices of A */ - int p [], /* column pointers of A */ - int perm [], /* output permutation, size n+1 */ - double knobs [COLAMD_KNOBS], /* parameters (uses defaults if NULL) */ - int stats [COLAMD_STATS], /* output statistics and error codes */ - void * (*allocate) (size_t, size_t), - /* pointer to calloc (ANSI C) or */ - /* mxCalloc (for MATLAB mexFunction) */ - void (*release) (void *) - /* pointer to free (ANSI C) or */ - /* mxFree (for MATLAB mexFunction) */ -) -{ - /* === Local variables ================================================== */ - - int *count ; /* length of each column of M, and col pointer*/ - int *mark ; /* mark array for finding duplicate entries */ - int *M ; /* row indices of matrix M */ - int Mlen ; /* length of M */ - int n_row ; /* number of rows in M */ - int nnz ; /* number of entries in A */ - int i ; /* row index of A */ - int j ; /* column index of A */ - int k ; /* row index of M */ - int mnz ; /* number of nonzeros in M */ - int pp ; /* index into a column of A */ - int last_row ; /* last row seen in the current column */ - int length ; /* number of nonzeros in a column */ - - double cknobs [COLAMD_KNOBS] ; /* knobs for colamd */ - double default_knobs [COLAMD_KNOBS] ; /* default knobs for colamd */ - int cstats [COLAMD_STATS] ; /* colamd stats */ - -#ifndef NDEBUG - colamd_get_debug ("symamd") ; -#endif /* NDEBUG */ - - /* === Check the input arguments ======================================== */ - - if (!stats) - { - DEBUG0 (("symamd: stats not present\n")) ; - return (FALSE) ; - } - for (i = 0 ; i < COLAMD_STATS ; i++) - { - stats [i] = 0 ; - } - stats [COLAMD_STATUS] = COLAMD_OK ; - stats [COLAMD_INFO1] = -1 ; - stats [COLAMD_INFO2] = -1 ; - - if (!A) - { - stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ; - DEBUG0 (("symamd: A not present\n")) ; - return (FALSE) ; - } - - if (!p) /* p is not present */ - { - stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ; - DEBUG0 (("symamd: p not present\n")) ; - return (FALSE) ; - } - - if (n < 0) /* n must be >= 0 */ - { - stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ; - stats [COLAMD_INFO1] = n ; - DEBUG0 (("symamd: n negative %d\n", n)) ; - return (FALSE) ; - } - - nnz = p [n] ; - if (nnz < 0) /* nnz must be >= 0 */ - { - stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ; - stats [COLAMD_INFO1] = nnz ; - DEBUG0 (("symamd: number of entries negative %d\n", nnz)) ; - return (FALSE) ; - } - - if (p [0] != 0) - { - stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ; - stats [COLAMD_INFO1] = p [0] ; - DEBUG0 (("symamd: p[0] not zero %d\n", p [0])) ; - return (FALSE) ; - } - - /* === If no knobs, set default knobs =================================== */ - - if (!knobs) - { - colamd_set_defaults (default_knobs) ; - knobs = default_knobs ; - } - - /* === Allocate count and mark ========================================== */ - - count = (int *) ((*allocate) (n+1, sizeof (int))) ; - if (!count) - { - stats [COLAMD_STATUS] = COLAMD_ERROR_out_of_memory ; - DEBUG0 (("symamd: allocate count (size %d) failed\n", n+1)) ; - return (FALSE) ; - } - - mark = (int *) ((*allocate) (n+1, sizeof (int))) ; - if (!mark) - { - stats [COLAMD_STATUS] = COLAMD_ERROR_out_of_memory ; - (*release) ((void *) count) ; - DEBUG0 (("symamd: allocate mark (size %d) failed\n", n+1)) ; - return (FALSE) ; - } - - /* === Compute column counts of M, check if A is valid ================== */ - - stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/ - - for (i = 0 ; i < n ; i++) - { - mark [i] = -1 ; - } - - for (j = 0 ; j < n ; j++) - { - last_row = -1 ; - - length = p [j+1] - p [j] ; - if (length < 0) - { - /* column pointers must be non-decreasing */ - stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ; - stats [COLAMD_INFO1] = j ; - stats [COLAMD_INFO2] = length ; - (*release) ((void *) count) ; - (*release) ((void *) mark) ; - DEBUG0 (("symamd: col %d negative length %d\n", j, length)) ; - return (FALSE) ; - } - - for (pp = p [j] ; pp < p [j+1] ; pp++) - { - i = A [pp] ; - if (i < 0 || i >= n) - { - /* row index i, in column j, is out of bounds */ - stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ; - stats [COLAMD_INFO1] = j ; - stats [COLAMD_INFO2] = i ; - stats [COLAMD_INFO3] = n ; - (*release) ((void *) count) ; - (*release) ((void *) mark) ; - DEBUG0 (("symamd: row %d col %d out of bounds\n", i, j)) ; - return (FALSE) ; - } - - if (i <= last_row || mark [i] == j) - { - /* row index is unsorted or repeated (or both), thus col */ - /* is jumbled. This is a notice, not an error condition. */ - stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ; - stats [COLAMD_INFO1] = j ; - stats [COLAMD_INFO2] = i ; - (stats [COLAMD_INFO3]) ++ ; - DEBUG1 (("symamd: row %d col %d unsorted/duplicate\n", i, j)) ; - } - - if (i > j && mark [i] != j) - { - /* row k of M will contain column indices i and j */ - count [i]++ ; - count [j]++ ; - } - - /* mark the row as having been seen in this column */ - mark [i] = j ; - - last_row = i ; - } - } - - if (stats [COLAMD_STATUS] == COLAMD_OK) - { - /* if there are no duplicate entries, then mark is no longer needed */ - (*release) ((void *) mark) ; - } - - /* === Compute column pointers of M ===================================== */ - - /* use output permutation, perm, for column pointers of M */ - perm [0] = 0 ; - for (j = 1 ; j <= n ; j++) - { - perm [j] = perm [j-1] + count [j-1] ; - } - for (j = 0 ; j < n ; j++) - { - count [j] = perm [j] ; - } - - /* === Construct M ====================================================== */ - - mnz = perm [n] ; - n_row = mnz / 2 ; - Mlen = colamd_recommended (mnz, n_row, n) ; - M = (int *) ((*allocate) (Mlen, sizeof (int))) ; - DEBUG0 (("symamd: M is %d-by-%d with %d entries, Mlen = %d\n", - n_row, n, mnz, Mlen)) ; - - if (!M) - { - stats [COLAMD_STATUS] = COLAMD_ERROR_out_of_memory ; - (*release) ((void *) count) ; - (*release) ((void *) mark) ; - DEBUG0 (("symamd: allocate M (size %d) failed\n", Mlen)) ; - return (FALSE) ; - } - - k = 0 ; - - if (stats [COLAMD_STATUS] == COLAMD_OK) - { - /* Matrix is OK */ - for (j = 0 ; j < n ; j++) - { - ASSERT (p [j+1] - p [j] >= 0) ; - for (pp = p [j] ; pp < p [j+1] ; pp++) - { - i = A [pp] ; - ASSERT (i >= 0 && i < n) ; - if (i > j) - { - /* row k of M contains column indices i and j */ - M [count [i]++] = k ; - M [count [j]++] = k ; - k++ ; - } - } - } - } - else - { - /* Matrix is jumbled. Do not add duplicates to M. Unsorted cols OK. */ - DEBUG0 (("symamd: Duplicates in A.\n")) ; - for (i = 0 ; i < n ; i++) - { - mark [i] = -1 ; - } - for (j = 0 ; j < n ; j++) - { - ASSERT (p [j+1] - p [j] >= 0) ; - for (pp = p [j] ; pp < p [j+1] ; pp++) - { - i = A [pp] ; - ASSERT (i >= 0 && i < n) ; - if (i > j && mark [i] != j) - { - /* row k of M contains column indices i and j */ - M [count [i]++] = k ; - M [count [j]++] = k ; - k++ ; - mark [i] = j ; - } - } - } - (*release) ((void *) mark) ; - } - - /* count and mark no longer needed */ - (*release) ((void *) count) ; - ASSERT (k == n_row) ; - - /* === Adjust the knobs for M =========================================== */ - - for (i = 0 ; i < COLAMD_KNOBS ; i++) - { - cknobs [i] = knobs [i] ; - } - - /* there are no dense rows in M */ - cknobs [COLAMD_DENSE_ROW] = 1.0 ; - - if (n_row != 0 && n < n_row) - { - /* On input, the knob is a fraction of 1..n, the number of rows of A. */ - /* Convert it to a fraction of 1..n_row, of the number of rows of M. */ - cknobs [COLAMD_DENSE_COL] = (knobs [COLAMD_DENSE_ROW] * n) / n_row ; - } - else - { - /* no dense columns in M */ - cknobs [COLAMD_DENSE_COL] = 1.0 ; - } - - DEBUG0 (("symamd: dense col knob for M: %g\n", cknobs [COLAMD_DENSE_COL])) ; - - /* === Order the columns of M =========================================== */ - - if (!colamd (n_row, n, Mlen, M, perm, cknobs, cstats)) - { - /* This "cannot" happen, unless there is a bug in the code. */ - stats [COLAMD_STATUS] = COLAMD_ERROR_internal_error ; - (*release) ((void *) M) ; - DEBUG0 (("symamd: internal error!\n")) ; - return (FALSE) ; - } - - /* Note that the output permutation is now in perm */ - - /* === get the statistics for symamd from colamd ======================== */ - - /* note that a dense column in colamd means a dense row and col in symamd */ - stats [COLAMD_DENSE_ROW] = cstats [COLAMD_DENSE_COL] ; - stats [COLAMD_DENSE_COL] = cstats [COLAMD_DENSE_COL] ; - stats [COLAMD_DEFRAG_COUNT] = cstats [COLAMD_DEFRAG_COUNT] ; - - /* === Free M =========================================================== */ - - (*release) ((void *) M) ; - DEBUG0 (("symamd: done.\n")) ; - return (TRUE) ; - -} - -/* ========================================================================== */ -/* === colamd =============================================================== */ -/* ========================================================================== */ - -/* - The colamd routine computes a column ordering Q of a sparse matrix - A such that the LU factorization P(AQ) = LU remains sparse, where P is - selected via partial pivoting. The routine can also be viewed as - providing a permutation Q such that the Cholesky factorization - (AQ)'(AQ) = LL' remains sparse. -*/ - -PUBLIC int colamd /* returns TRUE if successful, FALSE otherwise*/ -( - /* === Parameters ======================================================= */ - - int n_row, /* number of rows in A */ - int n_col, /* number of columns in A */ - int Alen, /* length of A */ - int A [], /* row indices of A */ - int p [], /* pointers to columns in A */ - double knobs [COLAMD_KNOBS],/* parameters (uses defaults if NULL) */ - int stats [COLAMD_STATS] /* output statistics and error codes */ -) -{ - /* === Local variables ================================================== */ - - int i ; /* loop index */ - int nnz ; /* nonzeros in A */ - int Row_size ; /* size of Row [], in integers */ - int Col_size ; /* size of Col [], in integers */ - int need ; /* minimum required length of A */ - Colamd_Row *Row ; /* pointer into A of Row [0..n_row] array */ - Colamd_Col *Col ; /* pointer into A of Col [0..n_col] array */ - int n_col2 ; /* number of non-dense, non-empty columns */ - int n_row2 ; /* number of non-dense, non-empty rows */ - int ngarbage ; /* number of garbage collections performed */ - int max_deg ; /* maximum row degree */ - double default_knobs [COLAMD_KNOBS] ; /* default knobs array */ - -#ifndef NDEBUG - colamd_get_debug ("colamd") ; -#endif /* NDEBUG */ - - /* === Check the input arguments ======================================== */ - - if (!stats) - { - DEBUG0 (("colamd: stats not present\n")) ; - return (FALSE) ; - } - for (i = 0 ; i < COLAMD_STATS ; i++) - { - stats [i] = 0 ; - } - stats [COLAMD_STATUS] = COLAMD_OK ; - stats [COLAMD_INFO1] = -1 ; - stats [COLAMD_INFO2] = -1 ; - - if (!A) /* A is not present */ - { - stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ; - DEBUG0 (("colamd: A not present\n")) ; - return (FALSE) ; - } - - if (!p) /* p is not present */ - { - stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ; - DEBUG0 (("colamd: p not present\n")) ; - return (FALSE) ; - } - - if (n_row < 0) /* n_row must be >= 0 */ - { - stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ; - stats [COLAMD_INFO1] = n_row ; - DEBUG0 (("colamd: nrow negative %d\n", n_row)) ; - return (FALSE) ; - } - - if (n_col < 0) /* n_col must be >= 0 */ - { - stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ; - stats [COLAMD_INFO1] = n_col ; - DEBUG0 (("colamd: ncol negative %d\n", n_col)) ; - return (FALSE) ; - } - - nnz = p [n_col] ; - if (nnz < 0) /* nnz must be >= 0 */ - { - stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ; - stats [COLAMD_INFO1] = nnz ; - DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ; - return (FALSE) ; - } - - if (p [0] != 0) - { - stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ; - stats [COLAMD_INFO1] = p [0] ; - DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ; - return (FALSE) ; - } - - /* === If no knobs, set default knobs =================================== */ - - if (!knobs) - { - colamd_set_defaults (default_knobs) ; - knobs = default_knobs ; - } - - /* === Allocate the Row and Col arrays from array A ===================== */ - - Col_size = COLAMD_C (n_col) ; - Row_size = COLAMD_R (n_row) ; - need = 2*nnz + n_col + Col_size + Row_size ; - - if (need > Alen) - { - /* not enough space in array A to perform the ordering */ - stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ; - stats [COLAMD_INFO1] = need ; - stats [COLAMD_INFO2] = Alen ; - DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen)); - return (FALSE) ; - } - - Alen -= Col_size + Row_size ; - Col = (Colamd_Col *) &A [Alen] ; - Row = (Colamd_Row *) &A [Alen + Col_size] ; - - /* === Construct the row and column data structures ===================== */ - - if (!init_rows_cols (n_row, n_col, Row, Col, A, p, stats)) - { - /* input matrix is invalid */ - DEBUG0 (("colamd: Matrix invalid\n")) ; - return (FALSE) ; - } - - /* === Initialize scores, kill dense rows/columns ======================= */ - - init_scoring (n_row, n_col, Row, Col, A, p, knobs, - &n_row2, &n_col2, &max_deg) ; - - /* === Order the supercolumns =========================================== */ - - ngarbage = find_ordering (n_row, n_col, Alen, Row, Col, A, p, - n_col2, max_deg, 2*nnz) ; - - /* === Order the non-principal columns ================================== */ - - order_children (n_col, Col, p) ; - - /* === Return statistics in stats ======================================= */ - - stats [COLAMD_DENSE_ROW] = n_row - n_row2 ; - stats [COLAMD_DENSE_COL] = n_col - n_col2 ; - stats [COLAMD_DEFRAG_COUNT] = ngarbage ; - DEBUG0 (("colamd: done.\n")) ; - return (TRUE) ; -} - - -/* ========================================================================== */ -/* === colamd_report ======================================================== */ -/* ========================================================================== */ - -PUBLIC void colamd_report -( - int stats [COLAMD_STATS] -) -{ - print_report ("colamd", stats) ; -} - - -/* ========================================================================== */ -/* === symamd_report ======================================================== */ -/* ========================================================================== */ - -PUBLIC void symamd_report -( - int stats [COLAMD_STATS] -) -{ - print_report ("symamd", stats) ; -} - - - -/* ========================================================================== */ -/* === NON-USER-CALLABLE ROUTINES: ========================================== */ -/* ========================================================================== */ - -/* There are no user-callable routines beyond this point in the file */ - - -/* ========================================================================== */ -/* === init_rows_cols ======================================================= */ -/* ========================================================================== */ - -/* - Takes the column form of the matrix in A and creates the row form of the - matrix. Also, row and column attributes are stored in the Col and Row - structs. If the columns are un-sorted or contain duplicate row indices, - this routine will also sort and remove duplicate row indices from the - column form of the matrix. Returns FALSE if the matrix is invalid, - TRUE otherwise. Not user-callable. -*/ - -PRIVATE int init_rows_cols /* returns TRUE if OK, or FALSE otherwise */ -( - /* === Parameters ======================================================= */ - - int n_row, /* number of rows of A */ - int n_col, /* number of columns of A */ - Colamd_Row Row [], /* of size n_row+1 */ - Colamd_Col Col [], /* of size n_col+1 */ - int A [], /* row indices of A, of size Alen */ - int p [], /* pointers to columns in A, of size n_col+1 */ - int stats [COLAMD_STATS] /* colamd statistics */ -) -{ - /* === Local variables ================================================== */ - - int col ; /* a column index */ - int row ; /* a row index */ - int *cp ; /* a column pointer */ - int *cp_end ; /* a pointer to the end of a column */ - int *rp ; /* a row pointer */ - int *rp_end ; /* a pointer to the end of a row */ - int last_row ; /* previous row */ - - /* === Initialize columns, and check column pointers ==================== */ - - for (col = 0 ; col < n_col ; col++) - { - Col [col].start = p [col] ; - Col [col].length = p [col+1] - p [col] ; - - if (Col [col].length < 0) - { - /* column pointers must be non-decreasing */ - stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ; - stats [COLAMD_INFO1] = col ; - stats [COLAMD_INFO2] = Col [col].length ; - DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ; - return (FALSE) ; - } - - Col [col].shared1.thickness = 1 ; - Col [col].shared2.score = 0 ; - Col [col].shared3.prev = EMPTY ; - Col [col].shared4.degree_next = EMPTY ; - } - - /* p [0..n_col] no longer needed, used as "head" in subsequent routines */ - - /* === Scan columns, compute row degrees, and check row indices ========= */ - - stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/ - - for (row = 0 ; row < n_row ; row++) - { - Row [row].length = 0 ; - Row [row].shared2.mark = -1 ; - } - - for (col = 0 ; col < n_col ; col++) - { - last_row = -1 ; - - cp = &A [p [col]] ; - cp_end = &A [p [col+1]] ; - - while (cp < cp_end) - { - row = *cp++ ; - - /* make sure row indices within range */ - if (row < 0 || row >= n_row) - { - stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ; - stats [COLAMD_INFO1] = col ; - stats [COLAMD_INFO2] = row ; - stats [COLAMD_INFO3] = n_row ; - DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ; - return (FALSE) ; - } - - if (row <= last_row || Row [row].shared2.mark == col) - { - /* row index are unsorted or repeated (or both), thus col */ - /* is jumbled. This is a notice, not an error condition. */ - stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ; - stats [COLAMD_INFO1] = col ; - stats [COLAMD_INFO2] = row ; - (stats [COLAMD_INFO3]) ++ ; - DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col)); - } - - if (Row [row].shared2.mark != col) - { - Row [row].length++ ; - } - else - { - /* this is a repeated entry in the column, */ - /* it will be removed */ - Col [col].length-- ; - } - - /* mark the row as having been seen in this column */ - Row [row].shared2.mark = col ; - - last_row = row ; - } - } - - /* === Compute row pointers ============================================= */ - - /* row form of the matrix starts directly after the column */ - /* form of matrix in A */ - Row [0].start = p [n_col] ; - Row [0].shared1.p = Row [0].start ; - Row [0].shared2.mark = -1 ; - for (row = 1 ; row < n_row ; row++) - { - Row [row].start = Row [row-1].start + Row [row-1].length ; - Row [row].shared1.p = Row [row].start ; - Row [row].shared2.mark = -1 ; - } - - /* === Create row form ================================================== */ - - if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED) - { - /* if cols jumbled, watch for repeated row indices */ - for (col = 0 ; col < n_col ; col++) - { - cp = &A [p [col]] ; - cp_end = &A [p [col+1]] ; - while (cp < cp_end) - { - row = *cp++ ; - if (Row [row].shared2.mark != col) - { - A [(Row [row].shared1.p)++] = col ; - Row [row].shared2.mark = col ; - } - } - } - } - else - { - /* if cols not jumbled, we don't need the mark (this is faster) */ - for (col = 0 ; col < n_col ; col++) - { - cp = &A [p [col]] ; - cp_end = &A [p [col+1]] ; - while (cp < cp_end) - { - A [(Row [*cp++].shared1.p)++] = col ; - } - } - } - - /* === Clear the row marks and set row degrees ========================== */ - - for (row = 0 ; row < n_row ; row++) - { - Row [row].shared2.mark = 0 ; - Row [row].shared1.degree = Row [row].length ; - } - - /* === See if we need to re-create columns ============================== */ - - if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED) - { - DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ; - -#ifndef NDEBUG - /* make sure column lengths are correct */ - for (col = 0 ; col < n_col ; col++) - { - p [col] = Col [col].length ; - } - for (row = 0 ; row < n_row ; row++) - { - rp = &A [Row [row].start] ; - rp_end = rp + Row [row].length ; - while (rp < rp_end) - { - p [*rp++]-- ; - } - } - for (col = 0 ; col < n_col ; col++) - { - ASSERT (p [col] == 0) ; - } - /* now p is all zero (different than when debugging is turned off) */ -#endif /* NDEBUG */ - - /* === Compute col pointers ========================================= */ - - /* col form of the matrix starts at A [0]. */ - /* Note, we may have a gap between the col form and the row */ - /* form if there were duplicate entries, if so, it will be */ - /* removed upon the first garbage collection */ - Col [0].start = 0 ; - p [0] = Col [0].start ; - for (col = 1 ; col < n_col ; col++) - { - /* note that the lengths here are for pruned columns, i.e. */ - /* no duplicate row indices will exist for these columns */ - Col [col].start = Col [col-1].start + Col [col-1].length ; - p [col] = Col [col].start ; - } - - /* === Re-create col form =========================================== */ - - for (row = 0 ; row < n_row ; row++) - { - rp = &A [Row [row].start] ; - rp_end = rp + Row [row].length ; - while (rp < rp_end) - { - A [(p [*rp++])++] = row ; - } - } - } - - /* === Done. Matrix is not (or no longer) jumbled ====================== */ - - return (TRUE) ; -} - - -/* ========================================================================== */ -/* === init_scoring ========================================================= */ -/* ========================================================================== */ - -/* - Kills dense or empty columns and rows, calculates an initial score for - each column, and places all columns in the degree lists. Not user-callable. -*/ - -PRIVATE void init_scoring -( - /* === Parameters ======================================================= */ - - int n_row, /* number of rows of A */ - int n_col, /* number of columns of A */ - Colamd_Row Row [], /* of size n_row+1 */ - Colamd_Col Col [], /* of size n_col+1 */ - int A [], /* column form and row form of A */ - int head [], /* of size n_col+1 */ - double knobs [COLAMD_KNOBS],/* parameters */ - int *p_n_row2, /* number of non-dense, non-empty rows */ - int *p_n_col2, /* number of non-dense, non-empty columns */ - int *p_max_deg /* maximum row degree */ -) -{ - /* === Local variables ================================================== */ - - int c ; /* a column index */ - int r, row ; /* a row index */ - int *cp ; /* a column pointer */ - int deg ; /* degree of a row or column */ - int *cp_end ; /* a pointer to the end of a column */ - int *new_cp ; /* new column pointer */ - int col_length ; /* length of pruned column */ - int score ; /* current column score */ - int n_col2 ; /* number of non-dense, non-empty columns */ - int n_row2 ; /* number of non-dense, non-empty rows */ - int dense_row_count ; /* remove rows with more entries than this */ - int dense_col_count ; /* remove cols with more entries than this */ - int min_score ; /* smallest column score */ - int max_deg ; /* maximum row degree */ - int next_col ; /* Used to add to degree list.*/ - -#ifndef NDEBUG - int debug_count ; /* debug only. */ -#endif /* NDEBUG */ - - /* === Extract knobs ==================================================== */ - - dense_row_count = MAX (0, MIN (knobs [COLAMD_DENSE_ROW] * n_col, n_col)) ; - dense_col_count = MAX (0, MIN (knobs [COLAMD_DENSE_COL] * n_row, n_row)) ; - DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ; - max_deg = 0 ; - n_col2 = n_col ; - n_row2 = n_row ; - - /* === Kill empty columns =============================================== */ - - /* Put the empty columns at the end in their natural order, so that LU */ - /* factorization can proceed as far as possible. */ - for (c = n_col-1 ; c >= 0 ; c--) - { - deg = Col [c].length ; - if (deg == 0) - { - /* this is a empty column, kill and order it last */ - Col [c].shared2.order = --n_col2 ; - KILL_PRINCIPAL_COL (c) ; - } - } - DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ; - - /* === Kill dense columns =============================================== */ - - /* Put the dense columns at the end, in their natural order */ - for (c = n_col-1 ; c >= 0 ; c--) - { - /* skip any dead columns */ - if (COL_IS_DEAD (c)) - { - continue ; - } - deg = Col [c].length ; - if (deg > dense_col_count) - { - /* this is a dense column, kill and order it last */ - Col [c].shared2.order = --n_col2 ; - /* decrement the row degrees */ - cp = &A [Col [c].start] ; - cp_end = cp + Col [c].length ; - while (cp < cp_end) - { - Row [*cp++].shared1.degree-- ; - } - KILL_PRINCIPAL_COL (c) ; - } - } - DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ; - - /* === Kill dense and empty rows ======================================== */ - - for (r = 0 ; r < n_row ; r++) - { - deg = Row [r].shared1.degree ; - ASSERT (deg >= 0 && deg <= n_col) ; - if (deg > dense_row_count || deg == 0) - { - /* kill a dense or empty row */ - KILL_ROW (r) ; - --n_row2 ; - } - else - { - /* keep track of max degree of remaining rows */ - max_deg = MAX (max_deg, deg) ; - } - } - DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ; - - /* === Compute initial column scores ==================================== */ - - /* At this point the row degrees are accurate. They reflect the number */ - /* of "live" (non-dense) columns in each row. No empty rows exist. */ - /* Some "live" columns may contain only dead rows, however. These are */ - /* pruned in the code below. */ - - /* now find the initial matlab score for each column */ - for (c = n_col-1 ; c >= 0 ; c--) - { - /* skip dead column */ - if (COL_IS_DEAD (c)) - { - continue ; - } - score = 0 ; - cp = &A [Col [c].start] ; - new_cp = cp ; - cp_end = cp + Col [c].length ; - while (cp < cp_end) - { - /* get a row */ - row = *cp++ ; - /* skip if dead */ - if (ROW_IS_DEAD (row)) - { - continue ; - } - /* compact the column */ - *new_cp++ = row ; - /* add row's external degree */ - score += Row [row].shared1.degree - 1 ; - /* guard against integer overflow */ - score = MIN (score, n_col) ; - } - /* determine pruned column length */ - col_length = (int) (new_cp - &A [Col [c].start]) ; - if (col_length == 0) - { - /* a newly-made null column (all rows in this col are "dense" */ - /* and have already been killed) */ - DEBUG2 (("Newly null killed: %d\n", c)) ; - Col [c].shared2.order = --n_col2 ; - KILL_PRINCIPAL_COL (c) ; - } - else - { - /* set column length and set score */ - ASSERT (score >= 0) ; - ASSERT (score <= n_col) ; - Col [c].length = col_length ; - Col [c].shared2.score = score ; - } - } - DEBUG1 (("colamd: Dense, null, and newly-null columns killed: %d\n", - n_col-n_col2)) ; - - /* At this point, all empty rows and columns are dead. All live columns */ - /* are "clean" (containing no dead rows) and simplicial (no supercolumns */ - /* yet). Rows may contain dead columns, but all live rows contain at */ - /* least one live column. */ - -#ifndef NDEBUG - debug_structures (n_row, n_col, Row, Col, A, n_col2) ; -#endif /* NDEBUG */ - - /* === Initialize degree lists ========================================== */ - -#ifndef NDEBUG - debug_count = 0 ; -#endif /* NDEBUG */ - - /* clear the hash buckets */ - for (c = 0 ; c <= n_col ; c++) - { - head [c] = EMPTY ; - } - min_score = n_col ; - /* place in reverse order, so low column indices are at the front */ - /* of the lists. This is to encourage natural tie-breaking */ - for (c = n_col-1 ; c >= 0 ; c--) - { - /* only add principal columns to degree lists */ - if (COL_IS_ALIVE (c)) - { - DEBUG4 (("place %d score %d minscore %d ncol %d\n", - c, Col [c].shared2.score, min_score, n_col)) ; - - /* === Add columns score to DList =============================== */ - - score = Col [c].shared2.score ; - - ASSERT (min_score >= 0) ; - ASSERT (min_score <= n_col) ; - ASSERT (score >= 0) ; - ASSERT (score <= n_col) ; - ASSERT (head [score] >= EMPTY) ; - - /* now add this column to dList at proper score location */ - next_col = head [score] ; - Col [c].shared3.prev = EMPTY ; - Col [c].shared4.degree_next = next_col ; - - /* if there already was a column with the same score, set its */ - /* previous pointer to this new column */ - if (next_col != EMPTY) - { - Col [next_col].shared3.prev = c ; - } - head [score] = c ; - - /* see if this score is less than current min */ - min_score = MIN (min_score, score) ; - -#ifndef NDEBUG - debug_count++ ; -#endif /* NDEBUG */ - - } - } - -#ifndef NDEBUG - DEBUG1 (("colamd: Live cols %d out of %d, non-princ: %d\n", - debug_count, n_col, n_col-debug_count)) ; - ASSERT (debug_count == n_col2) ; - debug_deg_lists (n_row, n_col, Row, Col, head, min_score, n_col2, max_deg) ; -#endif /* NDEBUG */ - - /* === Return number of remaining columns, and max row degree =========== */ - - *p_n_col2 = n_col2 ; - *p_n_row2 = n_row2 ; - *p_max_deg = max_deg ; -} - - -/* ========================================================================== */ -/* === find_ordering ======================================================== */ -/* ========================================================================== */ - -/* - Order the principal columns of the supercolumn form of the matrix - (no supercolumns on input). Uses a minimum approximate column minimum - degree ordering method. Not user-callable. -*/ - -PRIVATE int find_ordering /* return the number of garbage collections */ -( - /* === Parameters ======================================================= */ - - int n_row, /* number of rows of A */ - int n_col, /* number of columns of A */ - int Alen, /* size of A, 2*nnz + n_col or larger */ - Colamd_Row Row [], /* of size n_row+1 */ - Colamd_Col Col [], /* of size n_col+1 */ - int A [], /* column form and row form of A */ - int head [], /* of size n_col+1 */ - int n_col2, /* Remaining columns to order */ - int max_deg, /* Maximum row degree */ - int pfree /* index of first free slot (2*nnz on entry) */ -) -{ - /* === Local variables ================================================== */ - - int k ; /* current pivot ordering step */ - int pivot_col ; /* current pivot column */ - int *cp ; /* a column pointer */ - int *rp ; /* a row pointer */ - int pivot_row ; /* current pivot row */ - int *new_cp ; /* modified column pointer */ - int *new_rp ; /* modified row pointer */ - int pivot_row_start ; /* pointer to start of pivot row */ - int pivot_row_degree ; /* number of columns in pivot row */ - int pivot_row_length ; /* number of supercolumns in pivot row */ - int pivot_col_score ; /* score of pivot column */ - int needed_memory ; /* free space needed for pivot row */ - int *cp_end ; /* pointer to the end of a column */ - int *rp_end ; /* pointer to the end of a row */ - int row ; /* a row index */ - int col ; /* a column index */ - int max_score ; /* maximum possible score */ - int cur_score ; /* score of current column */ - unsigned int hash ; /* hash value for supernode detection */ - int head_column ; /* head of hash bucket */ - int first_col ; /* first column in hash bucket */ - int tag_mark ; /* marker value for mark array */ - int row_mark ; /* Row [row].shared2.mark */ - int set_difference ; /* set difference size of row with pivot row */ - int min_score ; /* smallest column score */ - int col_thickness ; /* "thickness" (no. of columns in a supercol) */ - int max_mark ; /* maximum value of tag_mark */ - int pivot_col_thickness ; /* number of columns represented by pivot col */ - int prev_col ; /* Used by Dlist operations. */ - int next_col ; /* Used by Dlist operations. */ - int ngarbage ; /* number of garbage collections performed */ - -#ifndef NDEBUG - int debug_d ; /* debug loop counter */ - int debug_step = 0 ; /* debug loop counter */ -#endif /* NDEBUG */ - - /* === Initialization and clear mark ==================================== */ - - max_mark = INT_MAX - n_col ; /* INT_MAX defined in */ - tag_mark = clear_mark (n_row, Row) ; - min_score = 0 ; - ngarbage = 0 ; - DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ; - - /* === Order the columns ================================================ */ - - for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */) - { - -#ifndef NDEBUG - if (debug_step % 100 == 0) - { - DEBUG2 (("\n... Step k: %d out of n_col2: %d\n", k, n_col2)) ; - } - else - { - DEBUG3 (("\n----------Step k: %d out of n_col2: %d\n", k, n_col2)) ; - } - debug_step++ ; - debug_deg_lists (n_row, n_col, Row, Col, head, - min_score, n_col2-k, max_deg) ; - debug_matrix (n_row, n_col, Row, Col, A) ; -#endif /* NDEBUG */ - - /* === Select pivot column, and order it ============================ */ - - /* make sure degree list isn't empty */ - ASSERT (min_score >= 0) ; - ASSERT (min_score <= n_col) ; - ASSERT (head [min_score] >= EMPTY) ; - -#ifndef NDEBUG - for (debug_d = 0 ; debug_d < min_score ; debug_d++) - { - ASSERT (head [debug_d] == EMPTY) ; - } -#endif /* NDEBUG */ - - /* get pivot column from head of minimum degree list */ - while (head [min_score] == EMPTY && min_score < n_col) - { - min_score++ ; - } - pivot_col = head [min_score] ; - ASSERT (pivot_col >= 0 && pivot_col <= n_col) ; - next_col = Col [pivot_col].shared4.degree_next ; - head [min_score] = next_col ; - if (next_col != EMPTY) - { - Col [next_col].shared3.prev = EMPTY ; - } - - ASSERT (COL_IS_ALIVE (pivot_col)) ; - DEBUG3 (("Pivot col: %d\n", pivot_col)) ; - - /* remember score for defrag check */ - pivot_col_score = Col [pivot_col].shared2.score ; - - /* the pivot column is the kth column in the pivot order */ - Col [pivot_col].shared2.order = k ; - - /* increment order count by column thickness */ - pivot_col_thickness = Col [pivot_col].shared1.thickness ; - k += pivot_col_thickness ; - ASSERT (pivot_col_thickness > 0) ; - - /* === Garbage_collection, if necessary ============================= */ - - needed_memory = MIN (pivot_col_score, n_col - k) ; - if (pfree + needed_memory >= Alen) - { - pfree = garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ; - ngarbage++ ; - /* after garbage collection we will have enough */ - ASSERT (pfree + needed_memory < Alen) ; - /* garbage collection has wiped out the Row[].shared2.mark array */ - tag_mark = clear_mark (n_row, Row) ; - -#ifndef NDEBUG - debug_matrix (n_row, n_col, Row, Col, A) ; -#endif /* NDEBUG */ - } - - /* === Compute pivot row pattern ==================================== */ - - /* get starting location for this new merged row */ - pivot_row_start = pfree ; - - /* initialize new row counts to zero */ - pivot_row_degree = 0 ; - - /* tag pivot column as having been visited so it isn't included */ - /* in merged pivot row */ - Col [pivot_col].shared1.thickness = -pivot_col_thickness ; - - /* pivot row is the union of all rows in the pivot column pattern */ - cp = &A [Col [pivot_col].start] ; - cp_end = cp + Col [pivot_col].length ; - while (cp < cp_end) - { - /* get a row */ - row = *cp++ ; - DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ; - /* skip if row is dead */ - if (ROW_IS_DEAD (row)) - { - continue ; - } - rp = &A [Row [row].start] ; - rp_end = rp + Row [row].length ; - while (rp < rp_end) - { - /* get a column */ - col = *rp++ ; - /* add the column, if alive and untagged */ - col_thickness = Col [col].shared1.thickness ; - if (col_thickness > 0 && COL_IS_ALIVE (col)) - { - /* tag column in pivot row */ - Col [col].shared1.thickness = -col_thickness ; - ASSERT (pfree < Alen) ; - /* place column in pivot row */ - A [pfree++] = col ; - pivot_row_degree += col_thickness ; - } - } - } - - /* clear tag on pivot column */ - Col [pivot_col].shared1.thickness = pivot_col_thickness ; - max_deg = MAX (max_deg, pivot_row_degree) ; - -#ifndef NDEBUG - DEBUG3 (("check2\n")) ; - debug_mark (n_row, Row, tag_mark, max_mark) ; -#endif /* NDEBUG */ - - /* === Kill all rows used to construct pivot row ==================== */ - - /* also kill pivot row, temporarily */ - cp = &A [Col [pivot_col].start] ; - cp_end = cp + Col [pivot_col].length ; - while (cp < cp_end) - { - /* may be killing an already dead row */ - row = *cp++ ; - DEBUG3 (("Kill row in pivot col: %d\n", row)) ; - KILL_ROW (row) ; - } - - /* === Select a row index to use as the new pivot row =============== */ - - pivot_row_length = pfree - pivot_row_start ; - if (pivot_row_length > 0) - { - /* pick the "pivot" row arbitrarily (first row in col) */ - pivot_row = A [Col [pivot_col].start] ; - DEBUG3 (("Pivotal row is %d\n", pivot_row)) ; - } - else - { - /* there is no pivot row, since it is of zero length */ - pivot_row = EMPTY ; - ASSERT (pivot_row_length == 0) ; - } - ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ; - - /* === Approximate degree computation =============================== */ - - /* Here begins the computation of the approximate degree. The column */ - /* score is the sum of the pivot row "length", plus the size of the */ - /* set differences of each row in the column minus the pattern of the */ - /* pivot row itself. The column ("thickness") itself is also */ - /* excluded from the column score (we thus use an approximate */ - /* external degree). */ - - /* The time taken by the following code (compute set differences, and */ - /* add them up) is proportional to the size of the data structure */ - /* being scanned - that is, the sum of the sizes of each column in */ - /* the pivot row. Thus, the amortized time to compute a column score */ - /* is proportional to the size of that column (where size, in this */ - /* context, is the column "length", or the number of row indices */ - /* in that column). The number of row indices in a column is */ - /* monotonically non-decreasing, from the length of the original */ - /* column on input to colamd. */ - - /* === Compute set differences ====================================== */ - - DEBUG3 (("** Computing set differences phase. **\n")) ; - - /* pivot row is currently dead - it will be revived later. */ - - DEBUG3 (("Pivot row: ")) ; - /* for each column in pivot row */ - rp = &A [pivot_row_start] ; - rp_end = rp + pivot_row_length ; - while (rp < rp_end) - { - col = *rp++ ; - ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ; - DEBUG3 (("Col: %d\n", col)) ; - - /* clear tags used to construct pivot row pattern */ - col_thickness = -Col [col].shared1.thickness ; - ASSERT (col_thickness > 0) ; - Col [col].shared1.thickness = col_thickness ; - - /* === Remove column from degree list =========================== */ - - cur_score = Col [col].shared2.score ; - prev_col = Col [col].shared3.prev ; - next_col = Col [col].shared4.degree_next ; - ASSERT (cur_score >= 0) ; - ASSERT (cur_score <= n_col) ; - ASSERT (cur_score >= EMPTY) ; - if (prev_col == EMPTY) - { - head [cur_score] = next_col ; - } - else - { - Col [prev_col].shared4.degree_next = next_col ; - } - if (next_col != EMPTY) - { - Col [next_col].shared3.prev = prev_col ; - } - - /* === Scan the column ========================================== */ - - cp = &A [Col [col].start] ; - cp_end = cp + Col [col].length ; - while (cp < cp_end) - { - /* get a row */ - row = *cp++ ; - row_mark = Row [row].shared2.mark ; - /* skip if dead */ - if (ROW_IS_MARKED_DEAD (row_mark)) - { - continue ; - } - ASSERT (row != pivot_row) ; - set_difference = row_mark - tag_mark ; - /* check if the row has been seen yet */ - if (set_difference < 0) - { - ASSERT (Row [row].shared1.degree <= max_deg) ; - set_difference = Row [row].shared1.degree ; - } - /* subtract column thickness from this row's set difference */ - set_difference -= col_thickness ; - ASSERT (set_difference >= 0) ; - /* absorb this row if the set difference becomes zero */ - if (set_difference == 0) - { - DEBUG3 (("aggressive absorption. Row: %d\n", row)) ; - KILL_ROW (row) ; - } - else - { - /* save the new mark */ - Row [row].shared2.mark = set_difference + tag_mark ; - } - } - } - -#ifndef NDEBUG - debug_deg_lists (n_row, n_col, Row, Col, head, - min_score, n_col2-k-pivot_row_degree, max_deg) ; -#endif /* NDEBUG */ - - /* === Add up set differences for each column ======================= */ - - DEBUG3 (("** Adding set differences phase. **\n")) ; - - /* for each column in pivot row */ - rp = &A [pivot_row_start] ; - rp_end = rp + pivot_row_length ; - while (rp < rp_end) - { - /* get a column */ - col = *rp++ ; - ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ; - hash = 0 ; - cur_score = 0 ; - cp = &A [Col [col].start] ; - /* compact the column */ - new_cp = cp ; - cp_end = cp + Col [col].length ; - - DEBUG4 (("Adding set diffs for Col: %d.\n", col)) ; - - while (cp < cp_end) - { - /* get a row */ - row = *cp++ ; - ASSERT(row >= 0 && row < n_row) ; - row_mark = Row [row].shared2.mark ; - /* skip if dead */ - if (ROW_IS_MARKED_DEAD (row_mark)) - { - continue ; - } - ASSERT (row_mark > tag_mark) ; - /* compact the column */ - *new_cp++ = row ; - /* compute hash function */ - hash += row ; - /* add set difference */ - cur_score += row_mark - tag_mark ; - /* integer overflow... */ - cur_score = MIN (cur_score, n_col) ; - } - - /* recompute the column's length */ - Col [col].length = (int) (new_cp - &A [Col [col].start]) ; - - /* === Further mass elimination ================================= */ - - if (Col [col].length == 0) - { - DEBUG4 (("further mass elimination. Col: %d\n", col)) ; - /* nothing left but the pivot row in this column */ - KILL_PRINCIPAL_COL (col) ; - pivot_row_degree -= Col [col].shared1.thickness ; - ASSERT (pivot_row_degree >= 0) ; - /* order it */ - Col [col].shared2.order = k ; - /* increment order count by column thickness */ - k += Col [col].shared1.thickness ; - } - else - { - /* === Prepare for supercolumn detection ==================== */ - - DEBUG4 (("Preparing supercol detection for Col: %d.\n", col)) ; - - /* save score so far */ - Col [col].shared2.score = cur_score ; - - /* add column to hash table, for supercolumn detection */ - hash %= n_col + 1 ; - - DEBUG4 ((" Hash = %d, n_col = %d.\n", hash, n_col)) ; - ASSERT (hash <= n_col) ; - - head_column = head [hash] ; - if (head_column > EMPTY) - { - /* degree list "hash" is non-empty, use prev (shared3) of */ - /* first column in degree list as head of hash bucket */ - first_col = Col [head_column].shared3.headhash ; - Col [head_column].shared3.headhash = col ; - } - else - { - /* degree list "hash" is empty, use head as hash bucket */ - first_col = - (head_column + 2) ; - head [hash] = - (col + 2) ; - } - Col [col].shared4.hash_next = first_col ; - - /* save hash function in Col [col].shared3.hash */ - Col [col].shared3.hash = (int) hash ; - ASSERT (COL_IS_ALIVE (col)) ; - } - } - - /* The approximate external column degree is now computed. */ - - /* === Supercolumn detection ======================================== */ - - DEBUG3 (("** Supercolumn detection phase. **\n")) ; - - detect_super_cols ( - -#ifndef NDEBUG - n_col, Row, -#endif /* NDEBUG */ - - Col, A, head, pivot_row_start, pivot_row_length) ; - - /* === Kill the pivotal column ====================================== */ - - KILL_PRINCIPAL_COL (pivot_col) ; - - /* === Clear mark =================================================== */ - - tag_mark += (max_deg + 1) ; - if (tag_mark >= max_mark) - { - DEBUG2 (("clearing tag_mark\n")) ; - tag_mark = clear_mark (n_row, Row) ; - } - -#ifndef NDEBUG - DEBUG3 (("check3\n")) ; - debug_mark (n_row, Row, tag_mark, max_mark) ; -#endif /* NDEBUG */ - - /* === Finalize the new pivot row, and column scores ================ */ - - DEBUG3 (("** Finalize scores phase. **\n")) ; - - /* for each column in pivot row */ - rp = &A [pivot_row_start] ; - /* compact the pivot row */ - new_rp = rp ; - rp_end = rp + pivot_row_length ; - while (rp < rp_end) - { - col = *rp++ ; - /* skip dead columns */ - if (COL_IS_DEAD (col)) - { - continue ; - } - *new_rp++ = col ; - /* add new pivot row to column */ - A [Col [col].start + (Col [col].length++)] = pivot_row ; - - /* retrieve score so far and add on pivot row's degree. */ - /* (we wait until here for this in case the pivot */ - /* row's degree was reduced due to mass elimination). */ - cur_score = Col [col].shared2.score + pivot_row_degree ; - - /* calculate the max possible score as the number of */ - /* external columns minus the 'k' value minus the */ - /* columns thickness */ - max_score = n_col - k - Col [col].shared1.thickness ; - - /* make the score the external degree of the union-of-rows */ - cur_score -= Col [col].shared1.thickness ; - - /* make sure score is less or equal than the max score */ - cur_score = MIN (cur_score, max_score) ; - ASSERT (cur_score >= 0) ; - - /* store updated score */ - Col [col].shared2.score = cur_score ; - - /* === Place column back in degree list ========================= */ - - ASSERT (min_score >= 0) ; - ASSERT (min_score <= n_col) ; - ASSERT (cur_score >= 0) ; - ASSERT (cur_score <= n_col) ; - ASSERT (head [cur_score] >= EMPTY) ; - next_col = head [cur_score] ; - Col [col].shared4.degree_next = next_col ; - Col [col].shared3.prev = EMPTY ; - if (next_col != EMPTY) - { - Col [next_col].shared3.prev = col ; - } - head [cur_score] = col ; - - /* see if this score is less than current min */ - min_score = MIN (min_score, cur_score) ; - - } - -#ifndef NDEBUG - debug_deg_lists (n_row, n_col, Row, Col, head, - min_score, n_col2-k, max_deg) ; -#endif /* NDEBUG */ - - /* === Resurrect the new pivot row ================================== */ - - if (pivot_row_degree > 0) - { - /* update pivot row length to reflect any cols that were killed */ - /* during super-col detection and mass elimination */ - Row [pivot_row].start = pivot_row_start ; - Row [pivot_row].length = (int) (new_rp - &A[pivot_row_start]) ; - Row [pivot_row].shared1.degree = pivot_row_degree ; - Row [pivot_row].shared2.mark = 0 ; - /* pivot row is no longer dead */ - } - } - - /* === All principal columns have now been ordered ====================== */ - - return (ngarbage) ; -} - - -/* ========================================================================== */ -/* === order_children ======================================================= */ -/* ========================================================================== */ - -/* - The find_ordering routine has ordered all of the principal columns (the - representatives of the supercolumns). The non-principal columns have not - yet been ordered. This routine orders those columns by walking up the - parent tree (a column is a child of the column which absorbed it). The - final permutation vector is then placed in p [0 ... n_col-1], with p [0] - being the first column, and p [n_col-1] being the last. It doesn't look - like it at first glance, but be assured that this routine takes time linear - in the number of columns. Although not immediately obvious, the time - taken by this routine is O (n_col), that is, linear in the number of - columns. Not user-callable. -*/ - -PRIVATE void order_children -( - /* === Parameters ======================================================= */ - - int n_col, /* number of columns of A */ - Colamd_Col Col [], /* of size n_col+1 */ - int p [] /* p [0 ... n_col-1] is the column permutation*/ -) -{ - /* === Local variables ================================================== */ - - int i ; /* loop counter for all columns */ - int c ; /* column index */ - int parent ; /* index of column's parent */ - int order ; /* column's order */ - - /* === Order each non-principal column ================================== */ - - for (i = 0 ; i < n_col ; i++) - { - /* find an un-ordered non-principal column */ - ASSERT (COL_IS_DEAD (i)) ; - if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == EMPTY) - { - parent = i ; - /* once found, find its principal parent */ - do - { - parent = Col [parent].shared1.parent ; - } while (!COL_IS_DEAD_PRINCIPAL (parent)) ; - - /* now, order all un-ordered non-principal columns along path */ - /* to this parent. collapse tree at the same time */ - c = i ; - /* get order of parent */ - order = Col [parent].shared2.order ; - - do - { - ASSERT (Col [c].shared2.order == EMPTY) ; - - /* order this column */ - Col [c].shared2.order = order++ ; - /* collaps tree */ - Col [c].shared1.parent = parent ; - - /* get immediate parent of this column */ - c = Col [c].shared1.parent ; - - /* continue until we hit an ordered column. There are */ - /* guarranteed not to be anymore unordered columns */ - /* above an ordered column */ - } while (Col [c].shared2.order == EMPTY) ; - - /* re-order the super_col parent to largest order for this group */ - Col [parent].shared2.order = order ; - } - } - - /* === Generate the permutation ========================================= */ - - for (c = 0 ; c < n_col ; c++) - { - p [Col [c].shared2.order] = c ; - } -} - - -/* ========================================================================== */ -/* === detect_super_cols ==================================================== */ -/* ========================================================================== */ - -/* - Detects supercolumns by finding matches between columns in the hash buckets. - Check amongst columns in the set A [row_start ... row_start + row_length-1]. - The columns under consideration are currently *not* in the degree lists, - and have already been placed in the hash buckets. - - The hash bucket for columns whose hash function is equal to h is stored - as follows: - - if head [h] is >= 0, then head [h] contains a degree list, so: - - head [h] is the first column in degree bucket h. - Col [head [h]].headhash gives the first column in hash bucket h. - - otherwise, the degree list is empty, and: - - -(head [h] + 2) is the first column in hash bucket h. - - For a column c in a hash bucket, Col [c].shared3.prev is NOT a "previous - column" pointer. Col [c].shared3.hash is used instead as the hash number - for that column. The value of Col [c].shared4.hash_next is the next column - in the same hash bucket. - - Assuming no, or "few" hash collisions, the time taken by this routine is - linear in the sum of the sizes (lengths) of each column whose score has - just been computed in the approximate degree computation. - Not user-callable. -*/ - -PRIVATE void detect_super_cols -( - /* === Parameters ======================================================= */ - -#ifndef NDEBUG - /* these two parameters are only needed when debugging is enabled: */ - int n_col, /* number of columns of A */ - Colamd_Row Row [], /* of size n_row+1 */ -#endif /* NDEBUG */ - - Colamd_Col Col [], /* of size n_col+1 */ - int A [], /* row indices of A */ - int head [], /* head of degree lists and hash buckets */ - int row_start, /* pointer to set of columns to check */ - int row_length /* number of columns to check */ -) -{ - /* === Local variables ================================================== */ - - int hash ; /* hash value for a column */ - int *rp ; /* pointer to a row */ - int c ; /* a column index */ - int super_c ; /* column index of the column to absorb into */ - int *cp1 ; /* column pointer for column super_c */ - int *cp2 ; /* column pointer for column c */ - int length ; /* length of column super_c */ - int prev_c ; /* column preceding c in hash bucket */ - int i ; /* loop counter */ - int *rp_end ; /* pointer to the end of the row */ - int col ; /* a column index in the row to check */ - int head_column ; /* first column in hash bucket or degree list */ - int first_col ; /* first column in hash bucket */ - - /* === Consider each column in the row ================================== */ - - rp = &A [row_start] ; - rp_end = rp + row_length ; - while (rp < rp_end) - { - col = *rp++ ; - if (COL_IS_DEAD (col)) - { - continue ; - } - - /* get hash number for this column */ - hash = Col [col].shared3.hash ; - ASSERT (hash <= n_col) ; - - /* === Get the first column in this hash bucket ===================== */ - - head_column = head [hash] ; - if (head_column > EMPTY) - { - first_col = Col [head_column].shared3.headhash ; - } - else - { - first_col = - (head_column + 2) ; - } - - /* === Consider each column in the hash bucket ====================== */ - - for (super_c = first_col ; super_c != EMPTY ; - super_c = Col [super_c].shared4.hash_next) - { - ASSERT (COL_IS_ALIVE (super_c)) ; - ASSERT (Col [super_c].shared3.hash == hash) ; - length = Col [super_c].length ; - - /* prev_c is the column preceding column c in the hash bucket */ - prev_c = super_c ; - - /* === Compare super_c with all columns after it ================ */ - - for (c = Col [super_c].shared4.hash_next ; - c != EMPTY ; c = Col [c].shared4.hash_next) - { - ASSERT (c != super_c) ; - ASSERT (COL_IS_ALIVE (c)) ; - ASSERT (Col [c].shared3.hash == hash) ; - - /* not identical if lengths or scores are different */ - if (Col [c].length != length || - Col [c].shared2.score != Col [super_c].shared2.score) - { - prev_c = c ; - continue ; - } - - /* compare the two columns */ - cp1 = &A [Col [super_c].start] ; - cp2 = &A [Col [c].start] ; - - for (i = 0 ; i < length ; i++) - { - /* the columns are "clean" (no dead rows) */ - ASSERT (ROW_IS_ALIVE (*cp1)) ; - ASSERT (ROW_IS_ALIVE (*cp2)) ; - /* row indices will same order for both supercols, */ - /* no gather scatter nessasary */ - if (*cp1++ != *cp2++) - { - break ; - } - } - - /* the two columns are different if the for-loop "broke" */ - if (i != length) - { - prev_c = c ; - continue ; - } - - /* === Got it! two columns are identical =================== */ - - ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ; - - Col [super_c].shared1.thickness += Col [c].shared1.thickness ; - Col [c].shared1.parent = super_c ; - KILL_NON_PRINCIPAL_COL (c) ; - /* order c later, in order_children() */ - Col [c].shared2.order = EMPTY ; - /* remove c from hash bucket */ - Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ; - } - } - - /* === Empty this hash bucket ======================================= */ - - if (head_column > EMPTY) - { - /* corresponding degree list "hash" is not empty */ - Col [head_column].shared3.headhash = EMPTY ; - } - else - { - /* corresponding degree list "hash" is empty */ - head [hash] = EMPTY ; - } - } -} - - -/* ========================================================================== */ -/* === garbage_collection =================================================== */ -/* ========================================================================== */ - -/* - Defragments and compacts columns and rows in the workspace A. Used when - all avaliable memory has been used while performing row merging. Returns - the index of the first free position in A, after garbage collection. The - time taken by this routine is linear is the size of the array A, which is - itself linear in the number of nonzeros in the input matrix. - Not user-callable. -*/ - -PRIVATE int garbage_collection /* returns the new value of pfree */ -( - /* === Parameters ======================================================= */ - - int n_row, /* number of rows */ - int n_col, /* number of columns */ - Colamd_Row Row [], /* row info */ - Colamd_Col Col [], /* column info */ - int A [], /* A [0 ... Alen-1] holds the matrix */ - int *pfree /* &A [0] ... pfree is in use */ -) -{ - /* === Local variables ================================================== */ - - int *psrc ; /* source pointer */ - int *pdest ; /* destination pointer */ - int j ; /* counter */ - int r ; /* a row index */ - int c ; /* a column index */ - int length ; /* length of a row or column */ - -#ifndef NDEBUG - int debug_rows ; - DEBUG2 (("Defrag..\n")) ; - for (psrc = &A[0] ; psrc < pfree ; psrc++) ASSERT (*psrc >= 0) ; - debug_rows = 0 ; -#endif /* NDEBUG */ - - /* === Defragment the columns =========================================== */ - - pdest = &A[0] ; - for (c = 0 ; c < n_col ; c++) - { - if (COL_IS_ALIVE (c)) - { - psrc = &A [Col [c].start] ; - - /* move and compact the column */ - ASSERT (pdest <= psrc) ; - Col [c].start = (int) (pdest - &A [0]) ; - length = Col [c].length ; - for (j = 0 ; j < length ; j++) - { - r = *psrc++ ; - if (ROW_IS_ALIVE (r)) - { - *pdest++ = r ; - } - } - Col [c].length = (int) (pdest - &A [Col [c].start]) ; - } - } - - /* === Prepare to defragment the rows =================================== */ - - for (r = 0 ; r < n_row ; r++) - { - if (ROW_IS_ALIVE (r)) - { - if (Row [r].length == 0) - { - /* this row is of zero length. cannot compact it, so kill it */ - DEBUG3 (("Defrag row kill\n")) ; - KILL_ROW (r) ; - } - else - { - /* save first column index in Row [r].shared2.first_column */ - psrc = &A [Row [r].start] ; - Row [r].shared2.first_column = *psrc ; - ASSERT (ROW_IS_ALIVE (r)) ; - /* flag the start of the row with the one's complement of row */ - *psrc = ONES_COMPLEMENT (r) ; - -#ifndef NDEBUG - debug_rows++ ; -#endif /* NDEBUG */ - - } - } - } - - /* === Defragment the rows ============================================== */ - - psrc = pdest ; - while (psrc < pfree) - { - /* find a negative number ... the start of a row */ - if (*psrc++ < 0) - { - psrc-- ; - /* get the row index */ - r = ONES_COMPLEMENT (*psrc) ; - ASSERT (r >= 0 && r < n_row) ; - /* restore first column index */ - *psrc = Row [r].shared2.first_column ; - ASSERT (ROW_IS_ALIVE (r)) ; - - /* move and compact the row */ - ASSERT (pdest <= psrc) ; - Row [r].start = (int) (pdest - &A [0]) ; - length = Row [r].length ; - for (j = 0 ; j < length ; j++) - { - c = *psrc++ ; - if (COL_IS_ALIVE (c)) - { - *pdest++ = c ; - } - } - Row [r].length = (int) (pdest - &A [Row [r].start]) ; - -#ifndef NDEBUG - debug_rows-- ; -#endif /* NDEBUG */ - - } - } - /* ensure we found all the rows */ - ASSERT (debug_rows == 0) ; - - /* === Return the new value of pfree ==================================== */ - - return ((int) (pdest - &A [0])) ; -} - - -/* ========================================================================== */ -/* === clear_mark =========================================================== */ -/* ========================================================================== */ - -/* - Clears the Row [].shared2.mark array, and returns the new tag_mark. - Return value is the new tag_mark. Not user-callable. -*/ - -PRIVATE int clear_mark /* return the new value for tag_mark */ -( - /* === Parameters ======================================================= */ - - int n_row, /* number of rows in A */ - Colamd_Row Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */ -) -{ - /* === Local variables ================================================== */ - - int r ; - - for (r = 0 ; r < n_row ; r++) - { - if (ROW_IS_ALIVE (r)) - { - Row [r].shared2.mark = 0 ; - } - } - return (1) ; -} - - -/* ========================================================================== */ -/* === print_report ========================================================= */ -/* ========================================================================== */ - -PRIVATE void print_report -( - char *method, - int stats [COLAMD_STATS] -) -{ - - int i1, i2, i3 ; - - if (!stats) - { - PRINTF ("%s: No statistics available.\n", method) ; - return ; - } - - i1 = stats [COLAMD_INFO1] ; - i2 = stats [COLAMD_INFO2] ; - i3 = stats [COLAMD_INFO3] ; - - if (stats [COLAMD_STATUS] >= 0) - { - PRINTF ("%s: OK. ", method) ; - } - else - { - PRINTF ("%s: ERROR. ", method) ; - } - - switch (stats [COLAMD_STATUS]) - { - - case COLAMD_OK_BUT_JUMBLED: - - PRINTF ("Matrix has unsorted or duplicate row indices.\n") ; - - PRINTF ("%s: number of duplicate or out-of-order row indices: %d\n", - method, i3) ; - - PRINTF ("%s: last seen duplicate or out-of-order row index: %d\n", - method, INDEX (i2)) ; - - PRINTF ("%s: last seen in column: %d", - method, INDEX (i1)) ; - - /* no break - fall through to next case instead */ - - case COLAMD_OK: - - PRINTF ("\n") ; - - PRINTF ("%s: number of dense or empty rows ignored: %d\n", - method, stats [COLAMD_DENSE_ROW]) ; - - PRINTF ("%s: number of dense or empty columns ignored: %d\n", - method, stats [COLAMD_DENSE_COL]) ; - - PRINTF ("%s: number of garbage collections performed: %d\n", - method, stats [COLAMD_DEFRAG_COUNT]) ; - break ; - - case COLAMD_ERROR_A_not_present: - - PRINTF ("Array A (row indices of matrix) not present.\n") ; - break ; - - case COLAMD_ERROR_p_not_present: - - PRINTF ("Array p (column pointers for matrix) not present.\n") ; - break ; - - case COLAMD_ERROR_nrow_negative: - - PRINTF ("Invalid number of rows (%d).\n", i1) ; - break ; - - case COLAMD_ERROR_ncol_negative: - - PRINTF ("Invalid number of columns (%d).\n", i1) ; - break ; - - case COLAMD_ERROR_nnz_negative: - - PRINTF ("Invalid number of nonzero entries (%d).\n", i1) ; - break ; - - case COLAMD_ERROR_p0_nonzero: - - PRINTF ("Invalid column pointer, p [0] = %d, must be zero.\n", i1) ; - break ; - - case COLAMD_ERROR_A_too_small: - - PRINTF ("Array A too small.\n") ; - PRINTF (" Need Alen >= %d, but given only Alen = %d.\n", - i1, i2) ; - break ; - - case COLAMD_ERROR_col_length_negative: - - PRINTF - ("Column %d has a negative number of nonzero entries (%d).\n", - INDEX (i1), i2) ; - break ; - - case COLAMD_ERROR_row_index_out_of_bounds: - - PRINTF - ("Row index (row %d) out of bounds (%d to %d) in column %d.\n", - INDEX (i2), INDEX (0), INDEX (i3-1), INDEX (i1)) ; - break ; - - case COLAMD_ERROR_out_of_memory: - - PRINTF ("Out of memory.\n") ; - break ; - - case COLAMD_ERROR_internal_error: - - /* if this happens, there is a bug in the code */ - PRINTF - ("Internal error! Please contact authors (davis@cise.ufl.edu).\n") ; - break ; - } -} - - - - -/* ========================================================================== */ -/* === colamd debugging routines ============================================ */ -/* ========================================================================== */ - -/* When debugging is disabled, the remainder of this file is ignored. */ - -#ifndef NDEBUG - - -/* ========================================================================== */ -/* === debug_structures ===================================================== */ -/* ========================================================================== */ - -/* - At this point, all empty rows and columns are dead. All live columns - are "clean" (containing no dead rows) and simplicial (no supercolumns - yet). Rows may contain dead columns, but all live rows contain at - least one live column. -*/ - -PRIVATE void debug_structures -( - /* === Parameters ======================================================= */ - - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int A [], - int n_col2 -) -{ - /* === Local variables ================================================== */ - - int i ; - int c ; - int *cp ; - int *cp_end ; - int len ; - int score ; - int r ; - int *rp ; - int *rp_end ; - int deg ; - - /* === Check A, Row, and Col ============================================ */ - - for (c = 0 ; c < n_col ; c++) - { - if (COL_IS_ALIVE (c)) - { - len = Col [c].length ; - score = Col [c].shared2.score ; - DEBUG4 (("initial live col %5d %5d %5d\n", c, len, score)) ; - ASSERT (len > 0) ; - ASSERT (score >= 0) ; - ASSERT (Col [c].shared1.thickness == 1) ; - cp = &A [Col [c].start] ; - cp_end = cp + len ; - while (cp < cp_end) - { - r = *cp++ ; - ASSERT (ROW_IS_ALIVE (r)) ; - } - } - else - { - i = Col [c].shared2.order ; - ASSERT (i >= n_col2 && i < n_col) ; - } - } - - for (r = 0 ; r < n_row ; r++) - { - if (ROW_IS_ALIVE (r)) - { - i = 0 ; - len = Row [r].length ; - deg = Row [r].shared1.degree ; - ASSERT (len > 0) ; - ASSERT (deg > 0) ; - rp = &A [Row [r].start] ; - rp_end = rp + len ; - while (rp < rp_end) - { - c = *rp++ ; - if (COL_IS_ALIVE (c)) - { - i++ ; - } - } - ASSERT (i > 0) ; - } - } -} - - -/* ========================================================================== */ -/* === debug_deg_lists ====================================================== */ -/* ========================================================================== */ - -/* - Prints the contents of the degree lists. Counts the number of columns - in the degree list and compares it to the total it should have. Also - checks the row degrees. -*/ - -PRIVATE void debug_deg_lists -( - /* === Parameters ======================================================= */ - - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int head [], - int min_score, - int should, - int max_deg -) -{ - /* === Local variables ================================================== */ - - int deg ; - int col ; - int have ; - int row ; - - /* === Check the degree lists =========================================== */ - - if (n_col > 10000 && colamd_debug <= 0) - { - return ; - } - have = 0 ; - DEBUG4 (("Degree lists: %d\n", min_score)) ; - for (deg = 0 ; deg <= n_col ; deg++) - { - col = head [deg] ; - if (col == EMPTY) - { - continue ; - } - DEBUG4 (("%d:", deg)) ; - while (col != EMPTY) - { - DEBUG4 ((" %d", col)) ; - have += Col [col].shared1.thickness ; - ASSERT (COL_IS_ALIVE (col)) ; - col = Col [col].shared4.degree_next ; - } - DEBUG4 (("\n")) ; - } - DEBUG4 (("should %d have %d\n", should, have)) ; - ASSERT (should == have) ; - - /* === Check the row degrees ============================================ */ - - if (n_row > 10000 && colamd_debug <= 0) - { - return ; - } - for (row = 0 ; row < n_row ; row++) - { - if (ROW_IS_ALIVE (row)) - { - ASSERT (Row [row].shared1.degree <= max_deg) ; - } - } -} - - -/* ========================================================================== */ -/* === debug_mark =========================================================== */ -/* ========================================================================== */ - -/* - Ensures that the tag_mark is less that the maximum and also ensures that - each entry in the mark array is less than the tag mark. -*/ - -PRIVATE void debug_mark -( - /* === Parameters ======================================================= */ - - int n_row, - Colamd_Row Row [], - int tag_mark, - int max_mark -) -{ - /* === Local variables ================================================== */ - - int r ; - - /* === Check the Row marks ============================================== */ - - ASSERT (tag_mark > 0 && tag_mark <= max_mark) ; - if (n_row > 10000 && colamd_debug <= 0) - { - return ; - } - for (r = 0 ; r < n_row ; r++) - { - ASSERT (Row [r].shared2.mark < tag_mark) ; - } -} - - -/* ========================================================================== */ -/* === debug_matrix ========================================================= */ -/* ========================================================================== */ - -/* - Prints out the contents of the columns and the rows. -*/ - -PRIVATE void debug_matrix -( - /* === Parameters ======================================================= */ - - int n_row, - int n_col, - Colamd_Row Row [], - Colamd_Col Col [], - int A [] -) -{ - /* === Local variables ================================================== */ - - int r ; - int c ; - int *rp ; - int *rp_end ; - int *cp ; - int *cp_end ; - - /* === Dump the rows and columns of the matrix ========================== */ - - if (colamd_debug < 3) - { - return ; - } - DEBUG3 (("DUMP MATRIX:\n")) ; - for (r = 0 ; r < n_row ; r++) - { - DEBUG3 (("Row %d alive? %d\n", r, ROW_IS_ALIVE (r))) ; - if (ROW_IS_DEAD (r)) - { - continue ; - } - DEBUG3 (("start %d length %d degree %d\n", - Row [r].start, Row [r].length, Row [r].shared1.degree)) ; - rp = &A [Row [r].start] ; - rp_end = rp + Row [r].length ; - while (rp < rp_end) - { - c = *rp++ ; - DEBUG4 ((" %d col %d\n", COL_IS_ALIVE (c), c)) ; - } - } - - for (c = 0 ; c < n_col ; c++) - { - DEBUG3 (("Col %d alive? %d\n", c, COL_IS_ALIVE (c))) ; - if (COL_IS_DEAD (c)) - { - continue ; - } - DEBUG3 (("start %d length %d shared1 %d shared2 %d\n", - Col [c].start, Col [c].length, - Col [c].shared1.thickness, Col [c].shared2.score)) ; - cp = &A [Col [c].start] ; - cp_end = cp + Col [c].length ; - while (cp < cp_end) - { - r = *cp++ ; - DEBUG4 ((" %d row %d\n", ROW_IS_ALIVE (r), r)) ; - } - } -} - -PRIVATE void colamd_get_debug -( - char *method -) -{ - colamd_debug = 0 ; /* no debug printing */ - - /* get "D" environment variable, which gives the debug printing level */ - if (getenv ("D")) - { - colamd_debug = atoi (getenv ("D")) ; - } - - DEBUG0 (("%s: debug version, D = %d (THIS WILL BE SLOW!)\n", - method, colamd_debug)) ; -} - -#endif /* NDEBUG */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/colamd.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/colamd.h deleted file mode 100644 index aacbd3f529..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/colamd.h +++ /dev/null @@ -1,249 +0,0 @@ -/*! @file colamd.h - \brief Colamd prototypes and definitions - -
     
    -    ==========================================================================
    -    === colamd/symamd prototypes and definitions =============================
    -    ==========================================================================
    -
    -    You must include this file (colamd.h) in any routine that uses colamd,
    -    symamd, or the related macros and definitions.
    -
    -    Authors:
    -
    -	The authors of the code itself are Stefan I. Larimore and Timothy A.
    -	Davis (davis@cise.ufl.edu), University of Florida.  The algorithm was
    -	developed in collaboration with John Gilbert, Xerox PARC, and Esmond
    -	Ng, Oak Ridge National Laboratory.
    -
    -    Date:
    -
    -	September 8, 2003.  Version 2.3.
    -
    -    Acknowledgements:
    -
    -	This work was supported by the National Science Foundation, under
    -	grants DMS-9504974 and DMS-9803599.
    -
    -    Notice:
    -
    -	Copyright (c) 1998-2003 by the University of Florida.
    -	All Rights Reserved.
    -
    -	THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    -	EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    -
    -	Permission is hereby granted to use, copy, modify, and/or distribute
    -	this program, provided that the Copyright, this License, and the
    -	Availability of the original version is retained on all copies and made
    -	accessible to the end-user of any code or package that includes COLAMD
    -	or any modified version of COLAMD. 
    -
    -    Availability:
    -
    -	The colamd/symamd library is available at
    -
    -	    http://www.cise.ufl.edu/research/sparse/colamd/
    -
    -	This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.h
    -	file.  It is required by the colamd.c, colamdmex.c, and symamdmex.c
    -	files, and by any C code that calls the routines whose prototypes are
    -	listed below, or that uses the colamd/symamd definitions listed below.
    - 
    -*/ - -#ifndef COLAMD_H -#define COLAMD_H - -/* ========================================================================== */ -/* === Include files ======================================================== */ -/* ========================================================================== */ - -#include - -/* ========================================================================== */ -/* === Knob and statistics definitions ====================================== */ -/* ========================================================================== */ - -/* size of the knobs [ ] array. Only knobs [0..1] are currently used. */ -#define COLAMD_KNOBS 20 - -/* number of output statistics. Only stats [0..6] are currently used. */ -#define COLAMD_STATS 20 - -/* knobs [0] and stats [0]: dense row knob and output statistic. */ -#define COLAMD_DENSE_ROW 0 - -/* knobs [1] and stats [1]: dense column knob and output statistic. */ -#define COLAMD_DENSE_COL 1 - -/* stats [2]: memory defragmentation count output statistic */ -#define COLAMD_DEFRAG_COUNT 2 - -/* stats [3]: colamd status: zero OK, > 0 warning or notice, < 0 error */ -#define COLAMD_STATUS 3 - -/* stats [4..6]: error info, or info on jumbled columns */ -#define COLAMD_INFO1 4 -#define COLAMD_INFO2 5 -#define COLAMD_INFO3 6 - -/* error codes returned in stats [3]: */ -#define COLAMD_OK (0) -#define COLAMD_OK_BUT_JUMBLED (1) -#define COLAMD_ERROR_A_not_present (-1) -#define COLAMD_ERROR_p_not_present (-2) -#define COLAMD_ERROR_nrow_negative (-3) -#define COLAMD_ERROR_ncol_negative (-4) -#define COLAMD_ERROR_nnz_negative (-5) -#define COLAMD_ERROR_p0_nonzero (-6) -#define COLAMD_ERROR_A_too_small (-7) -#define COLAMD_ERROR_col_length_negative (-8) -#define COLAMD_ERROR_row_index_out_of_bounds (-9) -#define COLAMD_ERROR_out_of_memory (-10) -#define COLAMD_ERROR_internal_error (-999) - -/* ========================================================================== */ -/* === Row and Column structures ============================================ */ -/* ========================================================================== */ - -/* User code that makes use of the colamd/symamd routines need not directly */ -/* reference these structures. They are used only for the COLAMD_RECOMMENDED */ -/* macro. */ - -typedef struct Colamd_Col_struct -{ - int start ; /* index for A of first row in this column, or DEAD */ - /* if column is dead */ - int length ; /* number of rows in this column */ - union - { - int thickness ; /* number of original columns represented by this */ - /* col, if the column is alive */ - int parent ; /* parent in parent tree super-column structure, if */ - /* the column is dead */ - } shared1 ; - union - { - int score ; /* the score used to maintain heap, if col is alive */ - int order ; /* pivot ordering of this column, if col is dead */ - } shared2 ; - union - { - int headhash ; /* head of a hash bucket, if col is at the head of */ - /* a degree list */ - int hash ; /* hash value, if col is not in a degree list */ - int prev ; /* previous column in degree list, if col is in a */ - /* degree list (but not at the head of a degree list) */ - } shared3 ; - union - { - int degree_next ; /* next column, if col is in a degree list */ - int hash_next ; /* next column, if col is in a hash list */ - } shared4 ; - -} Colamd_Col ; - -typedef struct Colamd_Row_struct -{ - int start ; /* index for A of first col in this row */ - int length ; /* number of principal columns in this row */ - union - { - int degree ; /* number of principal & non-principal columns in row */ - int p ; /* used as a row pointer in init_rows_cols () */ - } shared1 ; - union - { - int mark ; /* for computing set differences and marking dead rows*/ - int first_column ;/* first column in row (used in garbage collection) */ - } shared2 ; - -} Colamd_Row ; - -/* ========================================================================== */ -/* === Colamd recommended memory size ======================================= */ -/* ========================================================================== */ - -/* - The recommended length Alen of the array A passed to colamd is given by - the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro. It returns -1 if any - argument is negative. 2*nnz space is required for the row and column - indices of the matrix. COLAMD_C (n_col) + COLAMD_R (n_row) space is - required for the Col and Row arrays, respectively, which are internal to - colamd. An additional n_col space is the minimal amount of "elbow room", - and nnz/5 more space is recommended for run time efficiency. - - This macro is not needed when using symamd. - - Explicit typecast to int added Sept. 23, 2002, COLAMD version 2.2, to avoid - gcc -pedantic warning messages. -*/ - -#define COLAMD_C(n_col) ((int) (((n_col) + 1) * sizeof (Colamd_Col) / sizeof (int))) -#define COLAMD_R(n_row) ((int) (((n_row) + 1) * sizeof (Colamd_Row) / sizeof (int))) - -#define COLAMD_RECOMMENDED(nnz, n_row, n_col) \ -( \ -((nnz) < 0 || (n_row) < 0 || (n_col) < 0) \ -? \ - (-1) \ -: \ - (2 * (nnz) + COLAMD_C (n_col) + COLAMD_R (n_row) + (n_col) + ((nnz) / 5)) \ -) - -/* ========================================================================== */ -/* === Prototypes of user-callable routines ================================= */ -/* ========================================================================== */ - -int colamd_recommended /* returns recommended value of Alen, */ - /* or (-1) if input arguments are erroneous */ -( - int nnz, /* nonzeros in A */ - int n_row, /* number of rows in A */ - int n_col /* number of columns in A */ -) ; - -void colamd_set_defaults /* sets default parameters */ -( /* knobs argument is modified on output */ - double knobs [COLAMD_KNOBS] /* parameter settings for colamd */ -) ; - -int colamd /* returns (1) if successful, (0) otherwise*/ -( /* A and p arguments are modified on output */ - int n_row, /* number of rows in A */ - int n_col, /* number of columns in A */ - int Alen, /* size of the array A */ - int A [], /* row indices of A, of size Alen */ - int p [], /* column pointers of A, of size n_col+1 */ - double knobs [COLAMD_KNOBS],/* parameter settings for colamd */ - int stats [COLAMD_STATS] /* colamd output statistics and error codes */ -) ; - -int symamd /* return (1) if OK, (0) otherwise */ -( - int n, /* number of rows and columns of A */ - int A [], /* row indices of A */ - int p [], /* column pointers of A */ - int perm [], /* output permutation, size n_col+1 */ - double knobs [COLAMD_KNOBS], /* parameters (uses defaults if NULL) */ - int stats [COLAMD_STATS], /* output statistics and error codes */ - void * (*allocate) (size_t, size_t), - /* pointer to calloc (ANSI C) or */ - /* mxCalloc (for MATLAB mexFunction) */ - void (*release) (void *) - /* pointer to free (ANSI C) or */ - /* mxFree (for MATLAB mexFunction) */ -) ; - -void colamd_report -( - int stats [COLAMD_STATS] -) ; - -void symamd_report -( - int stats [COLAMD_STATS] -) ; - -#endif /* COLAMD_H */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpanel_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpanel_bmod.c deleted file mode 100644 index 7432c2ba04..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpanel_bmod.c +++ /dev/null @@ -1,487 +0,0 @@ - -/*! @file cpanel_bmod.c - * \brief Performs numeric block updates - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ -/* - -*/ - -#include -#include -#include "slu_cdefs.h" - -/* - * Function prototypes - */ -void clsolve(int, int, complex *, complex *); -void cmatvec(int, int, int, complex *, complex *, complex *); -extern void ccheck_tempv(); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *    Performs numeric block updates (sup-panel) in topological order.
    - *    It features: col-col, 2cols-col, 3cols-col, and sup-col updates.
    - *    Special processing on the supernodal portion of L\U[*,j]
    - *
    - *    Before entering this routine, the original nonzeros in the panel 
    - *    were already copied into the spa[m,w].
    - *
    - *    Updated/Output parameters-
    - *    dense[0:m-1,w]: L[*,j:j+w-1] and U[*,j:j+w-1] are returned 
    - *    collectively in the m-by-w vector dense[*]. 
    - * 
    - */ - -void -cpanel_bmod ( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - const int nseg, /* in */ - complex *dense, /* out, of size n by w */ - complex *tempv, /* working array */ - int *segrep, /* in */ - int *repfnz, /* in, of size n by w */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ - - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - complex alpha, beta; -#endif - - register int k, ksub; - int fsupc, nsupc, nsupr, nrow; - int krep, krep_ind; - complex ukj, ukj1, ukj2; - int luptr, luptr1, luptr2; - int segsze; - int block_nrow; /* no of rows in a block row */ - register int lptr; /* Points to the row subscripts of a supernode */ - int kfnz, irow, no_zeros; - register int isub, isub1, i; - register int jj; /* Index through each column in the panel */ - int *xsup, *supno; - int *lsub, *xlsub; - complex *lusup; - int *xlusup; - int *repfnz_col; /* repfnz[] for a column in the panel */ - complex *dense_col; /* dense[] for a column in the panel */ - complex *tempv1; /* Used in 1-D update */ - complex *TriTmp, *MatvecTmp; /* used in 2-D update */ - complex zero = {0.0, 0.0}; - complex one = {1.0, 0.0}; - complex comp_temp, comp_temp1; - register int ldaTmp; - register int r_ind, r_hi; - static int first = 1, maxsuper, rowblk, colblk; - flops_t *ops = stat->ops; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - if ( first ) { - maxsuper = sp_ienv(3); - rowblk = sp_ienv(4); - colblk = sp_ienv(5); - first = 0; - } - ldaTmp = maxsuper + rowblk; - - /* - * For each nonz supernode segment of U[*,j] in topological order - */ - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { /* for each updating supernode */ - - /* krep = representative of current k-th supernode - * fsupc = first supernodal column - * nsupc = no of columns in a supernode - * nsupr = no of rows in a supernode - */ - krep = segrep[k--]; - fsupc = xsup[supno[krep]]; - nsupc = krep - fsupc + 1; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; - nrow = nsupr - nsupc; - lptr = xlsub[fsupc]; - krep_ind = lptr + nsupc - 1; - - repfnz_col = repfnz; - dense_col = dense; - - if ( nsupc >= colblk && nrow > rowblk ) { /* 2-D block update */ - - TriTmp = tempv; - - /* Sequence through each column in panel -- triangular solves */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp ) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - luptr = xlusup[fsupc]; - - ops[TRSV] += 4 * segsze * (segsze - 1); - ops[GEMV] += 8 * nrow * segsze; - - /* Case 1: Update U-segment of size 1 -- col-col update */ - if ( segsze == 1 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; i++) { - irow = lsub[i]; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - c_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - ++luptr; - } - - } else if ( segsze <= 3 ) { - ukj = dense_col[lsub[krep_ind]]; - ukj1 = dense_col[lsub[krep_ind - 1]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { - cc_mult(&comp_temp, &ukj1, &lusup[luptr1]); - c_sub(&ukj, &ukj, &comp_temp); - dense_col[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; luptr1++; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - cc_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - } - } else { - ukj2 = dense_col[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - cc_mult(&comp_temp, &ukj2, &lusup[luptr2-1]); - c_sub(&ukj1, &ukj1, &comp_temp); - - cc_mult(&comp_temp, &ukj1, &lusup[luptr1]); - cc_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&ukj, &ukj, &comp_temp); - dense_col[lsub[krep_ind]] = ukj; - dense_col[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; luptr1++; luptr2++; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - cc_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - cc_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - } - } - - } else { /* segsze >= 4 */ - - /* Copy U[*,j] segment from dense[*] to TriTmp[*], which - holds the result of triangular solves. */ - no_zeros = kfnz - fsupc; - isub = lptr + no_zeros; - for (i = 0; i < segsze; ++i) { - irow = lsub[isub]; - TriTmp[i] = dense_col[irow]; /* Gather */ - ++isub; - } - - /* start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, TriTmp, &incx ); -#else - ctrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, TriTmp, &incx ); -#endif -#else - clsolve ( nsupr, segsze, &lusup[luptr], TriTmp ); -#endif - - - } /* else ... */ - - } /* for jj ... end tri-solves */ - - /* Block row updates; push all the way into dense[*] block */ - for ( r_ind = 0; r_ind < nrow; r_ind += rowblk ) { - - r_hi = SUPERLU_MIN(nrow, r_ind + rowblk); - block_nrow = SUPERLU_MIN(rowblk, r_hi - r_ind); - luptr = xlusup[fsupc] + nsupc + r_ind; - isub1 = lptr + nsupc + r_ind; - - repfnz_col = repfnz; - TriTmp = tempv; - dense_col = dense; - - /* Sequence through each column in panel -- matrix-vector */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - if ( segsze <= 3 ) continue; /* skip unrolled cases */ - - /* Perform a block update, and scatter the result of - matrix-vector to dense[]. */ - no_zeros = kfnz - fsupc; - luptr1 = luptr + nsupr * no_zeros; - MatvecTmp = &TriTmp[maxsuper]; - -#ifdef USE_VENDOR_BLAS - alpha = one; - beta = zero; -#ifdef _CRAY - CGEMV(ftcs2, &block_nrow, &segsze, &alpha, &lusup[luptr1], - &nsupr, TriTmp, &incx, &beta, MatvecTmp, &incy); -#else - cgemv_("N", &block_nrow, &segsze, &alpha, &lusup[luptr1], - &nsupr, TriTmp, &incx, &beta, MatvecTmp, &incy); -#endif -#else - cmatvec(nsupr, block_nrow, segsze, &lusup[luptr1], - TriTmp, MatvecTmp); -#endif - - /* Scatter MatvecTmp[*] into SPA dense[*] temporarily - * such that MatvecTmp[*] can be re-used for the - * the next blok row update. dense[] will be copied into - * global store after the whole panel has been finished. - */ - isub = isub1; - for (i = 0; i < block_nrow; i++) { - irow = lsub[isub]; - c_sub(&dense_col[irow], &dense_col[irow], - &MatvecTmp[i]); - MatvecTmp[i] = zero; - ++isub; - } - - } /* for jj ... */ - - } /* for each block row ... */ - - /* Scatter the triangular solves into SPA dense[*] */ - repfnz_col = repfnz; - TriTmp = tempv; - dense_col = dense; - - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp) { - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - if ( segsze <= 3 ) continue; /* skip unrolled cases */ - - no_zeros = kfnz - fsupc; - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense_col[irow] = TriTmp[i]; - TriTmp[i] = zero; - ++isub; - } - - } /* for jj ... */ - - } else { /* 1-D block modification */ - - - /* Sequence through each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - luptr = xlusup[fsupc]; - - ops[TRSV] += 4 * segsze * (segsze - 1); - ops[GEMV] += 8 * nrow * segsze; - - /* Case 1: Update U-segment of size 1 -- col-col update */ - if ( segsze == 1 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; i++) { - irow = lsub[i]; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - c_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - ++luptr; - } - - } else if ( segsze <= 3 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - ukj1 = dense_col[lsub[krep_ind - 1]]; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { - cc_mult(&comp_temp, &ukj1, &lusup[luptr1]); - c_sub(&ukj, &ukj, &comp_temp); - dense_col[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - ++luptr; ++luptr1; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - cc_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - } - } else { - ukj2 = dense_col[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - cc_mult(&comp_temp, &ukj2, &lusup[luptr2-1]); - c_sub(&ukj1, &ukj1, &comp_temp); - - cc_mult(&comp_temp, &ukj1, &lusup[luptr1]); - cc_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&ukj, &ukj, &comp_temp); - dense_col[lsub[krep_ind]] = ukj; - dense_col[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - ++luptr; ++luptr1; ++luptr2; - cc_mult(&comp_temp, &ukj, &lusup[luptr]); - cc_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - cc_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - c_add(&comp_temp, &comp_temp, &comp_temp1); - c_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - } - } - - } else { /* segsze >= 4 */ - /* - * Perform a triangular solve and block update, - * then scatter the result of sup-col update to dense[]. - */ - no_zeros = kfnz - fsupc; - - /* Copy U[*,j] segment from dense[*] to tempv[*]: - * The result of triangular solve is in tempv[*]; - * The result of matrix vector update is in dense_col[*] - */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; ++i) { - irow = lsub[isub]; - tempv[i] = dense_col[irow]; /* Gather */ - ++isub; - } - - /* start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#else - ctrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#endif - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - alpha = one; - beta = zero; -#ifdef _CRAY - CGEMV( ftcs2, &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#else - cgemv_( "N", &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#endif -#else - clsolve ( nsupr, segsze, &lusup[luptr], tempv ); - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - cmatvec (nsupr, nrow, segsze, &lusup[luptr], tempv, tempv1); -#endif - - /* Scatter tempv[*] into SPA dense[*] temporarily, such - * that tempv[*] can be used for the triangular solve of - * the next column of the panel. They will be copied into - * ucol[*] after the whole panel has been finished. - */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense_col[irow] = tempv[i]; - tempv[i] = zero; - isub++; - } - - /* Scatter the update from tempv1[*] into SPA dense[*] */ - /* Start dense rectangular L */ - for (i = 0; i < nrow; i++) { - irow = lsub[isub]; - c_sub(&dense_col[irow], &dense_col[irow], &tempv1[i]); - tempv1[i] = zero; - ++isub; - } - - } /* else segsze>=4 ... */ - - } /* for each column in the panel... */ - - } /* else 1-D update ... */ - - } /* for each updating supernode ... */ - -} - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpanel_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpanel_dfs.c deleted file mode 100644 index 57a603ef8f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpanel_dfs.c +++ /dev/null @@ -1,254 +0,0 @@ - -/*! @file cpanel_dfs.c - * \brief Peforms a symbolic factorization on a panel of symbols - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   Performs a symbolic factorization on a panel of columns [jcol, jcol+w).
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives.
    - *
    - *   The routine returns one list of the supernodal representatives
    - *   in topological order of the dfs that generates them. This list is
    - *   a superset of the topological order of each individual column within
    - *   the panel. 
    - *   The location of the first nonzero in each supernodal segment
    - *   (supernodal entry location) is also returned. Each column has a 
    - *   separate list for this purpose.
    - *
    - *   Two marker arrays are used for dfs:
    - *     marker[i] == jj, if i was visited during dfs of current column jj;
    - *     marker1[i] >= jcol, if i was visited by earlier columns in this panel;
    - *
    - *   marker: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - * 
    - */ - -void -cpanel_dfs ( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - SuperMatrix *A, /* in - original matrix */ - int *perm_r, /* in */ - int *nseg, /* out */ - complex *dense, /* out */ - int *panel_lsub, /* out */ - int *segrep, /* out */ - int *repfnz, /* out */ - int *xprune, /* out */ - int *marker, /* out */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - NCPformat *Astore; - complex *a; - int *asub; - int *xa_begin, *xa_end; - int krep, chperm, chmark, chrep, oldrep, kchild, myfnz; - int k, krow, kmark, kperm; - int xdfs, maxdfs, kpar; - int jj; /* index through each column in the panel */ - int *marker1; /* marker1[jj] >= jcol if vertex jj was visited - by a previous column within this panel. */ - int *repfnz_col; /* start of each column in the panel */ - complex *dense_col; /* start of each column in the panel */ - int nextl_col; /* next available position in panel_lsub[*,jj] */ - int *xsup, *supno; - int *lsub, *xlsub; - - /* Initialize pointers */ - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - marker1 = marker + m; - repfnz_col = repfnz; - dense_col = dense; - *nseg = 0; - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - - /* For each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++) { - nextl_col = (jj - jcol) * m; - -#ifdef CHK_DFS - printf("\npanel col %d: ", jj); -#endif - - /* For each nonz in A[*,jj] do dfs */ - for (k = xa_begin[jj]; k < xa_end[jj]; k++) { - krow = asub[k]; - dense_col[krow] = a[k]; - kmark = marker[krow]; - if ( kmark == jj ) - continue; /* krow visited before, go to the next nonzero */ - - /* For each unmarked nbr krow of jj - * krow is in L: place it in structure of L[*,jj] - */ - marker[krow] = jj; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - panel_lsub[nextl_col++] = krow; /* krow is indexed into A */ - } - /* - * krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - else { - - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz_col[krep]; - -#ifdef CHK_DFS - printf("krep %d, myfnz %d, perm_r[%d] %d\n", krep, myfnz, krow, kperm); -#endif - if ( myfnz != EMPTY ) { /* Representative visited before */ - if ( myfnz > kperm ) repfnz_col[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz_col[krep] = kperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker[kchild]; - - if ( chmark != jj ) { /* Not reached yet */ - marker[kchild] = jj; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,j] */ - if ( chperm == EMPTY ) { - panel_lsub[nextl_col++] = kchild; - } - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - else { - - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz_col[chrep]; -#ifdef CHK_DFS - printf("chrep %d,myfnz %d,perm_r[%d] %d\n",chrep,myfnz,kchild,chperm); -#endif - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz_col[chrep] = chperm; - } - else { - /* Cont. dfs at snode-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L) */ - parent[krep] = oldrep; - repfnz_col[krep] = chperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } /* else */ - - } /* else */ - - } /* if... */ - - } /* while xdfs < maxdfs */ - - /* krow has no more unexplored nbrs: - * Place snode-rep krep in postorder DFS, if this - * segment is seen for the first time. (Note that - * "repfnz[krep]" may change later.) - * Backtrack dfs to its parent. - */ - if ( marker1[krep] < jcol ) { - segrep[*nseg] = krep; - ++(*nseg); - marker1[krep] = jj; - } - - kpar = parent[krep]; /* Pop stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xprune[krep]; - -#ifdef CHK_DFS - printf(" pop stack: krep %d,xdfs %d,maxdfs %d: ", krep,xdfs,maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } while ( kpar != EMPTY ); /* do-while - until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonz in A[*,jj] */ - - repfnz_col += m; /* Move to next column */ - dense_col += m; - - } /* for jj ... */ - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpivotL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpivotL.c deleted file mode 100644 index c7cc88e195..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpivotL.c +++ /dev/null @@ -1,196 +0,0 @@ - -/*! @file cpivotL.c - * \brief Performs numerical pivoting - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include -#include "slu_cdefs.h" - -#undef DEBUG - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Performs the numerical pivoting on the current column of L,
    - *   and the CDIV operation.
    - *
    - *   Pivot policy:
    - *   (1) Compute thresh = u * max_(i>=j) abs(A_ij);
    - *   (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
    - *           pivot row = k;
    - *       ELSE IF abs(A_jj) >= thresh THEN
    - *           pivot row = j;
    - *       ELSE
    - *           pivot row = m;
    - * 
    - *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
    - *
    - *   Return value: 0      success;
    - *                 i > 0  U(i,i) is exactly zero.
    - * 
    - */ - -int -cpivotL( - const int jcol, /* in */ - const double u, /* in - diagonal pivoting threshold */ - int *usepr, /* re-use the pivot sequence given by perm_r/iperm_r */ - int *perm_r, /* may be modified */ - int *iperm_r, /* in - inverse of perm_r */ - int *iperm_c, /* in - used to find diagonal of Pc*A*Pc' */ - int *pivrow, /* out */ - GlobalLU_t *Glu, /* modified - global LU data structures */ - SuperLUStat_t *stat /* output */ - ) -{ - - complex one = {1.0, 0.0}; - int fsupc; /* first column in the supernode */ - int nsupc; /* no of columns in the supernode */ - int nsupr; /* no of rows in the supernode */ - int lptr; /* points to the starting subscript of the supernode */ - int pivptr, old_pivptr, diag, diagind; - float pivmax, rtemp, thresh; - complex temp; - complex *lu_sup_ptr; - complex *lu_col_ptr; - int *lsub_ptr; - int isub, icol, k, itemp; - int *lsub, *xlsub; - complex *lusup; - int *xlusup; - flops_t *ops = stat->ops; - - /* Initialize pointers */ - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; - nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ - lptr = xlsub[fsupc]; - nsupr = xlsub[fsupc+1] - lptr; - lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ - lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ - lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ - -#ifdef DEBUG -if ( jcol == MIN_COL ) { - printf("Before cdiv: col %d\n", jcol); - for (k = nsupc; k < nsupr; k++) - printf(" lu[%d] %f\n", lsub_ptr[k], lu_col_ptr[k]); -} -#endif - - /* Determine the largest abs numerical value for partial pivoting; - Also search for user-specified pivot, and diagonal element. */ - if ( *usepr ) *pivrow = iperm_r[jcol]; - diagind = iperm_c[jcol]; -#ifdef SCIPY_SPECIFIC_FIX - pivmax = -1.0; -#else - pivmax = 0.0; -#endif - pivptr = nsupc; - diag = EMPTY; - old_pivptr = nsupc; - for (isub = nsupc; isub < nsupr; ++isub) { - rtemp = slu_c_abs1 (&lu_col_ptr[isub]); - if ( rtemp > pivmax ) { - pivmax = rtemp; - pivptr = isub; - } - if ( *usepr && lsub_ptr[isub] == *pivrow ) old_pivptr = isub; - if ( lsub_ptr[isub] == diagind ) diag = isub; - } - - /* Test for singularity */ -#ifdef SCIPY_SPECIFIC_FIX - if (pivmax < 0.0) { - perm_r[diagind] = jcol; - *usepr = 0; - return (jcol+1); - } -#endif - if ( pivmax == 0.0 ) { -#if 1 - *pivrow = lsub_ptr[pivptr]; - perm_r[*pivrow] = jcol; -#else - perm_r[diagind] = jcol; -#endif - *usepr = 0; - return (jcol+1); - } - - thresh = u * pivmax; - - /* Choose appropriate pivotal element by our policy. */ - if ( *usepr ) { - rtemp = slu_c_abs1 (&lu_col_ptr[old_pivptr]); - if ( rtemp != 0.0 && rtemp >= thresh ) - pivptr = old_pivptr; - else - *usepr = 0; - } - if ( *usepr == 0 ) { - /* Use diagonal pivot? */ - if ( diag >= 0 ) { /* diagonal exists */ - rtemp = slu_c_abs1 (&lu_col_ptr[diag]); - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; - } - *pivrow = lsub_ptr[pivptr]; - } - - /* Record pivot row */ - perm_r[*pivrow] = jcol; - - /* Interchange row subscripts */ - if ( pivptr != nsupc ) { - itemp = lsub_ptr[pivptr]; - lsub_ptr[pivptr] = lsub_ptr[nsupc]; - lsub_ptr[nsupc] = itemp; - - /* Interchange numerical values as well, for the whole snode, such - * that L is indexed the same way as A. - */ - for (icol = 0; icol <= nsupc; icol++) { - itemp = pivptr + icol * nsupr; - temp = lu_sup_ptr[itemp]; - lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; - lu_sup_ptr[nsupc + icol*nsupr] = temp; - } - } /* if */ - - /* cdiv operation */ - ops[FACT] += 10 * (nsupr - nsupc); - - c_div(&temp, &one, &lu_col_ptr[nsupc]); - for (k = nsupc+1; k < nsupr; k++) - cc_mult(&lu_col_ptr[k], &lu_col_ptr[k], &temp); - - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpivotgrowth.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpivotgrowth.c deleted file mode 100644 index b465f6db77..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpivotgrowth.c +++ /dev/null @@ -1,115 +0,0 @@ - -/*! @file cpivotgrowth.c - * \brief Computes the reciprocal pivot growth factor - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -#include -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * Compute the reciprocal pivot growth factor of the leading ncols columns
    - * of the matrix, using the formula:
    - *     min_j ( max_i(abs(A_ij)) / max_i(abs(U_ij)) )
    - *
    - * Arguments
    - * =========
    - *
    - * ncols    (input) int
    - *          The number of columns of matrices A, L and U.
    - *
    - * A        (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *          (A->nrow, A->ncol). The type of A can be:
    - *          Stype = NC; Dtype = SLU_C; Mtype = GE.
    - *
    - * L        (output) SuperMatrix*
    - *          The factor L from the factorization Pr*A=L*U; use compressed row 
    - *          subscripts storage for supernodes, i.e., L has type: 
    - *          Stype = SC; Dtype = SLU_C; Mtype = TRLU.
    - *
    - * U        (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *          storage scheme, i.e., U has types: Stype = NC;
    - *          Dtype = SLU_C; Mtype = TRU.
    - * 
    - */ - -float -cPivotGrowth(int ncols, SuperMatrix *A, int *perm_c, - SuperMatrix *L, SuperMatrix *U) -{ - - NCformat *Astore; - SCformat *Lstore; - NCformat *Ustore; - complex *Aval, *Lval, *Uval; - int fsupc, nsupr, luptr, nz_in_U; - int i, j, k, oldcol; - int *inv_perm_c; - float rpg, maxaj, maxuj; - extern double slamch_(char *); - float smlnum; - complex *luval; - complex temp_comp; - - /* Get machine constants. */ - smlnum = slamch_("S"); - rpg = 1. / smlnum; - - Astore = A->Store; - Lstore = L->Store; - Ustore = U->Store; - Aval = Astore->nzval; - Lval = Lstore->nzval; - Uval = Ustore->nzval; - - inv_perm_c = (int *) SUPERLU_MALLOC(A->ncol*sizeof(int)); - for (j = 0; j < A->ncol; ++j) inv_perm_c[perm_c[j]] = j; - - for (k = 0; k <= Lstore->nsuper; ++k) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - luptr = L_NZ_START(fsupc); - luval = &Lval[luptr]; - nz_in_U = 1; - - for (j = fsupc; j < L_FST_SUPC(k+1) && j < ncols; ++j) { - maxaj = 0.; - oldcol = inv_perm_c[j]; - for (i = Astore->colptr[oldcol]; i < Astore->colptr[oldcol+1]; ++i) - maxaj = SUPERLU_MAX( maxaj, slu_c_abs1( &Aval[i]) ); - - maxuj = 0.; - for (i = Ustore->colptr[j]; i < Ustore->colptr[j+1]; i++) - maxuj = SUPERLU_MAX( maxuj, slu_c_abs1( &Uval[i]) ); - - /* Supernode */ - for (i = 0; i < nz_in_U; ++i) - maxuj = SUPERLU_MAX( maxuj, slu_c_abs1( &luval[i]) ); - - ++nz_in_U; - luval += nsupr; - - if ( maxuj == 0. ) - rpg = SUPERLU_MIN( rpg, 1.); - else - rpg = SUPERLU_MIN( rpg, maxaj / maxuj ); - } - - if ( j >= ncols ) break; - } - - SUPERLU_FREE(inv_perm_c); - return (rpg); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpruneL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpruneL.c deleted file mode 100644 index 22a9ff5245..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cpruneL.c +++ /dev/null @@ -1,154 +0,0 @@ - -/*! @file cpruneL.c - * \brief Prunes the L-structure - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - *
    - */ - - -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Prunes the L-structure of supernodes whose L-structure
    - *   contains the current pivot row "pivrow"
    - * 
    - */ - -void -cpruneL( - const int jcol, /* in */ - const int *perm_r, /* in */ - const int pivrow, /* in */ - const int nseg, /* in */ - const int *segrep, /* in */ - const int *repfnz, /* in */ - int *xprune, /* out */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - - complex utemp; - int jsupno, irep, irep1, kmin, kmax, krow, movnum; - int i, ktemp, minloc, maxloc; - int do_prune; /* logical variable */ - int *xsup, *supno; - int *lsub, *xlsub; - complex *lusup; - int *xlusup; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - /* - * For each supernode-rep irep in U[*,j] - */ - jsupno = supno[jcol]; - for (i = 0; i < nseg; i++) { - - irep = segrep[i]; - irep1 = irep + 1; - do_prune = FALSE; - - /* Don't prune with a zero U-segment */ - if ( repfnz[irep] == EMPTY ) - continue; - - /* If a snode overlaps with the next panel, then the U-segment - * is fragmented into two parts -- irep and irep1. We should let - * pruning occur at the rep-column in irep1's snode. - */ - if ( supno[irep] == supno[irep1] ) /* Don't prune */ - continue; - - /* - * If it has not been pruned & it has a nonz in row L[pivrow,i] - */ - if ( supno[irep] != jsupno ) { - if ( xprune[irep] >= xlsub[irep1] ) { - kmin = xlsub[irep]; - kmax = xlsub[irep1] - 1; - for (krow = kmin; krow <= kmax; krow++) - if ( lsub[krow] == pivrow ) { - do_prune = TRUE; - break; - } - } - - if ( do_prune ) { - - /* Do a quicksort-type partition - * movnum=TRUE means that the num values have to be exchanged. - */ - movnum = FALSE; - if ( irep == xsup[supno[irep]] ) /* Snode of size 1 */ - movnum = TRUE; - - while ( kmin <= kmax ) { - - if ( perm_r[lsub[kmax]] == EMPTY ) - kmax--; - else if ( perm_r[lsub[kmin]] != EMPTY ) - kmin++; - else { /* kmin below pivrow (not yet pivoted), and kmax - * above pivrow: interchange the two subscripts - */ - ktemp = lsub[kmin]; - lsub[kmin] = lsub[kmax]; - lsub[kmax] = ktemp; - - /* If the supernode has only one column, then we - * only keep one set of subscripts. For any subscript - * interchange performed, similar interchange must be - * done on the numerical values. - */ - if ( movnum ) { - minloc = xlusup[irep] + (kmin - xlsub[irep]); - maxloc = xlusup[irep] + (kmax - xlsub[irep]); - utemp = lusup[minloc]; - lusup[minloc] = lusup[maxloc]; - lusup[maxloc] = utemp; - } - - kmin++; - kmax--; - - } - - } /* while */ - - xprune[irep] = kmin; /* Pruning */ - -#ifdef CHK_PRUNE - printf(" After cpruneL(),using col %d: xprune[%d] = %d\n", - jcol, irep, kmin); -#endif - } /* if do_prune */ - - } /* if */ - - } /* for each U-segment... */ -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/creadhb.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/creadhb.c deleted file mode 100644 index a01bdc588a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/creadhb.c +++ /dev/null @@ -1,267 +0,0 @@ - -/*! @file creadhb.c - * \brief Read a matrix stored in Harwell-Boeing format - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Purpose
    - * =======
    - * 
    - * Read a COMPLEX PRECISION matrix stored in Harwell-Boeing format 
    - * as described below.
    - * 
    - * Line 1 (A72,A8) 
    - *  	Col. 1 - 72   Title (TITLE) 
    - *	Col. 73 - 80  Key (KEY) 
    - * 
    - * Line 2 (5I14) 
    - * 	Col. 1 - 14   Total number of lines excluding header (TOTCRD) 
    - * 	Col. 15 - 28  Number of lines for pointers (PTRCRD) 
    - * 	Col. 29 - 42  Number of lines for row (or variable) indices (INDCRD) 
    - * 	Col. 43 - 56  Number of lines for numerical values (VALCRD) 
    - *	Col. 57 - 70  Number of lines for right-hand sides (RHSCRD) 
    - *                    (including starting guesses and solution vectors 
    - *		       if present) 
    - *           	      (zero indicates no right-hand side data is present) 
    - *
    - * Line 3 (A3, 11X, 4I14) 
    - *   	Col. 1 - 3    Matrix type (see below) (MXTYPE) 
    - * 	Col. 15 - 28  Number of rows (or variables) (NROW) 
    - * 	Col. 29 - 42  Number of columns (or elements) (NCOL) 
    - *	Col. 43 - 56  Number of row (or variable) indices (NNZERO) 
    - *	              (equal to number of entries for assembled matrices) 
    - * 	Col. 57 - 70  Number of elemental matrix entries (NELTVL) 
    - *	              (zero in the case of assembled matrices) 
    - * Line 4 (2A16, 2A20) 
    - * 	Col. 1 - 16   Format for pointers (PTRFMT) 
    - *	Col. 17 - 32  Format for row (or variable) indices (INDFMT) 
    - *	Col. 33 - 52  Format for numerical values of coefficient matrix (VALFMT) 
    - * 	Col. 53 - 72 Format for numerical values of right-hand sides (RHSFMT) 
    - *
    - * Line 5 (A3, 11X, 2I14) Only present if there are right-hand sides present 
    - *    	Col. 1 	      Right-hand side type: 
    - *	         	  F for full storage or M for same format as matrix 
    - *    	Col. 2        G if a starting vector(s) (Guess) is supplied. (RHSTYP) 
    - *    	Col. 3        X if an exact solution vector(s) is supplied. 
    - *	Col. 15 - 28  Number of right-hand sides (NRHS) 
    - *	Col. 29 - 42  Number of row indices (NRHSIX) 
    - *          	      (ignored in case of unassembled matrices) 
    - *
    - * The three character type field on line 3 describes the matrix type. 
    - * The following table lists the permitted values for each of the three 
    - * characters. As an example of the type field, RSA denotes that the matrix 
    - * is real, symmetric, and assembled. 
    - *
    - * First Character: 
    - *	R Real matrix 
    - *	C Complex matrix 
    - *	P Pattern only (no numerical values supplied) 
    - *
    - * Second Character: 
    - *	S Symmetric 
    - *	U Unsymmetric 
    - *	H Hermitian 
    - *	Z Skew symmetric 
    - *	R Rectangular 
    - *
    - * Third Character: 
    - *	A Assembled 
    - *	E Elemental matrices (unassembled) 
    - *
    - * 
    - */ -#include -#include -#include "slu_cdefs.h" - - -/*! \brief Eat up the rest of the current line */ -int cDumpLine(FILE *fp) -{ - register int c; - while ((c = fgetc(fp)) != '\n') ; - return 0; -} - -int cParseIntFormat(char *buf, int *num, int *size) -{ - char *tmp; - - tmp = buf; - while (*tmp++ != '(') ; - sscanf(tmp, "%d", num); - while (*tmp != 'I' && *tmp != 'i') ++tmp; - ++tmp; - sscanf(tmp, "%d", size); - return 0; -} - -int cParseFloatFormat(char *buf, int *num, int *size) -{ - char *tmp, *period; - - tmp = buf; - while (*tmp++ != '(') ; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - while (*tmp != 'E' && *tmp != 'e' && *tmp != 'D' && *tmp != 'd' - && *tmp != 'F' && *tmp != 'f') { - /* May find kP before nE/nD/nF, like (1P6F13.6). In this case the - num picked up refers to P, which should be skipped. */ - if (*tmp=='p' || *tmp=='P') { - ++tmp; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - } else { - ++tmp; - } - } - ++tmp; - period = tmp; - while (*period != '.' && *period != ')') ++period ; - *period = '\0'; - *size = atoi(tmp); /*sscanf(tmp, "%2d", size);*/ - - return 0; -} - -static int ReadVector(FILE *fp, int n, int *where, int perline, int persize) -{ - register int i, j, item; - char tmp, buf[100]; - - i = 0; - while (i < n) { - fgets(buf, 100, fp); /* read a line at a time */ - for (j=0; j - * -- SuperLU routine (version 4.0) -- - * Lawrence Berkeley National Laboratory. - * June 30, 2009 - * - * - * Purpose - * ======= - * - * Read a COMPLEX PRECISION matrix stored in Rutherford-Boeing format - * as described below. - * - * Line 1 (A72, A8) - * Col. 1 - 72 Title (TITLE) - * Col. 73 - 80 Matrix name / identifier (MTRXID) - * - * Line 2 (I14, 3(1X, I13)) - * Col. 1 - 14 Total number of lines excluding header (TOTCRD) - * Col. 16 - 28 Number of lines for pointers (PTRCRD) - * Col. 30 - 42 Number of lines for row (or variable) indices (INDCRD) - * Col. 44 - 56 Number of lines for numerical values (VALCRD) - * - * Line 3 (A3, 11X, 4(1X, I13)) - * Col. 1 - 3 Matrix type (see below) (MXTYPE) - * Col. 15 - 28 Compressed Column: Number of rows (NROW) - * Elemental: Largest integer used to index variable (MVAR) - * Col. 30 - 42 Compressed Column: Number of columns (NCOL) - * Elemental: Number of element matrices (NELT) - * Col. 44 - 56 Compressed Column: Number of entries (NNZERO) - * Elemental: Number of variable indeces (NVARIX) - * Col. 58 - 70 Compressed Column: Unused, explicitly zero - * Elemental: Number of elemental matrix entries (NELTVL) - * - * Line 4 (2A16, A20) - * Col. 1 - 16 Fortran format for pointers (PTRFMT) - * Col. 17 - 32 Fortran format for row (or variable) indices (INDFMT) - * Col. 33 - 52 Fortran format for numerical values of coefficient matrix - * (VALFMT) - * (blank in the case of matrix patterns) - * - * The three character type field on line 3 describes the matrix type. - * The following table lists the permitted values for each of the three - * characters. As an example of the type field, RSA denotes that the matrix - * is real, symmetric, and assembled. - * - * First Character: - * R Real matrix - * C Complex matrix - * I integer matrix - * P Pattern only (no numerical values supplied) - * Q Pattern only (numerical values supplied in associated auxiliary value - * file) - * - * Second Character: - * S Symmetric - * U Unsymmetric - * H Hermitian - * Z Skew symmetric - * R Rectangular - * - * Third Character: - * A Compressed column form - * E Elemental form - * - * - */ - -#include "slu_cdefs.h" - - -/*! \brief Eat up the rest of the current line */ -static int cDumpLine(FILE *fp) -{ - register int c; - while ((c = fgetc(fp)) != '\n') ; - return 0; -} - -static int cParseIntFormat(char *buf, int *num, int *size) -{ - char *tmp; - - tmp = buf; - while (*tmp++ != '(') ; - sscanf(tmp, "%d", num); - while (*tmp != 'I' && *tmp != 'i') ++tmp; - ++tmp; - sscanf(tmp, "%d", size); - return 0; -} - -static int cParseFloatFormat(char *buf, int *num, int *size) -{ - char *tmp, *period; - - tmp = buf; - while (*tmp++ != '(') ; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - while (*tmp != 'E' && *tmp != 'e' && *tmp != 'D' && *tmp != 'd' - && *tmp != 'F' && *tmp != 'f') { - /* May find kP before nE/nD/nF, like (1P6F13.6). In this case the - num picked up refers to P, which should be skipped. */ - if (*tmp=='p' || *tmp=='P') { - ++tmp; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - } else { - ++tmp; - } - } - ++tmp; - period = tmp; - while (*period != '.' && *period != ')') ++period ; - *period = '\0'; - *size = atoi(tmp); /*sscanf(tmp, "%2d", size);*/ - - return 0; -} - -static int ReadVector(FILE *fp, int n, int *where, int perline, int persize) -{ - register int i, j, item; - char tmp, buf[100]; - - i = 0; - while (i < n) { - fgets(buf, 100, fp); /* read a line at a time */ - for (j=0; j - * -- SuperLU routine (version 4.0) -- - * Lawrence Berkeley National Laboratory. - * June 30, 2009 - * - */ - -#include "slu_cdefs.h" - - -void -creadtriple(int *m, int *n, int *nonz, - complex **nzval, int **rowind, int **colptr) -{ -/* - * Output parameters - * ================= - * (a,asub,xa): asub[*] contains the row subscripts of nonzeros - * in columns of matrix A; a[*] the numerical values; - * row i of A is given by a[k],k=xa[i],...,xa[i+1]-1. - * - */ - int j, k, jsize, nnz, nz; - complex *a, *val; - int *asub, *xa, *row, *col; - int zero_base = 0; - - /* Matrix format: - * First line: #rows, #cols, #non-zero - * Triplet in the rest of lines: - * row, col, value - */ - - scanf("%d%d", n, nonz); - *m = *n; - printf("m %d, n %d, nonz %d\n", *m, *n, *nonz); - callocateA(*n, *nonz, nzval, rowind, colptr); /* Allocate storage */ - a = *nzval; - asub = *rowind; - xa = *colptr; - - val = (complex *) SUPERLU_MALLOC(*nonz * sizeof(complex)); - row = (int *) SUPERLU_MALLOC(*nonz * sizeof(int)); - col = (int *) SUPERLU_MALLOC(*nonz * sizeof(int)); - - for (j = 0; j < *n; ++j) xa[j] = 0; - - /* Read into the triplet array from a file */ - for (nnz = 0, nz = 0; nnz < *nonz; ++nnz) { - scanf("%d%d%f%f\n", &row[nz], &col[nz], &val[nz].r, &val[nz].i); - - if ( nnz == 0 ) { /* first nonzero */ - if ( row[0] == 0 || col[0] == 0 ) { - zero_base = 1; - printf("triplet file: row/col indices are zero-based.\n"); - } else - printf("triplet file: row/col indices are one-based.\n"); - } - - if ( !zero_base ) { - /* Change to 0-based indexing. */ - --row[nz]; - --col[nz]; - } - - if (row[nz] < 0 || row[nz] >= *m || col[nz] < 0 || col[nz] >= *n - /*|| val[nz] == 0.*/) { - fprintf(stderr, "nz %d, (%d, %d) = (%e,%e) out of bound, removed\n", - nz, row[nz], col[nz], val[nz].r, val[nz].i); - exit(-1); - } else { - ++xa[col[nz]]; - ++nz; - } - } - - *nonz = nz; - - /* Initialize the array of column pointers */ - k = 0; - jsize = xa[0]; - xa[0] = 0; - for (j = 1; j < *n; ++j) { - k += jsize; - jsize = xa[j]; - xa[j] = k; - } - - /* Copy the triplets into the column oriented storage */ - for (nz = 0; nz < *nonz; ++nz) { - j = col[nz]; - k = xa[j]; - asub[k] = row[nz]; - a[k] = val[nz]; - ++xa[j]; - } - - /* Reset the column pointers to the beginning of each column */ - for (j = *n; j > 0; --j) - xa[j] = xa[j-1]; - xa[0] = 0; - - SUPERLU_FREE(val); - SUPERLU_FREE(row); - SUPERLU_FREE(col); - -#ifdef CHK_INPUT - { - int i; - for (i = 0; i < *n; i++) { - printf("Col %d, xa %d\n", i, xa[i]); - for (k = xa[i]; k < xa[i+1]; k++) - printf("%d\t%16.10f\n", asub[k], a[k]); - } - } -#endif - -} - - -void creadrhs(int m, complex *b) -{ - FILE *fp, *fopen(); - int i; - /*int j;*/ - - if ( !(fp = fopen("b.dat", "r")) ) { - fprintf(stderr, "dreadrhs: file does not exist\n"); - exit(-1); - } - for (i = 0; i < m; ++i) - fscanf(fp, "%f%f\n", &b[i].r, &b[i].i); - - /* readpair_(j, &b[i]);*/ - fclose(fp); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csnode_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csnode_bmod.c deleted file mode 100644 index 40c21f51cf..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csnode_bmod.c +++ /dev/null @@ -1,120 +0,0 @@ - -/*! @file csnode_bmod.c - * \brief Performs numeric block updates within the relaxed snode. - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_cdefs.h" - - -/*! \brief Performs numeric block updates within the relaxed snode. - */ -int -csnode_bmod ( - const int jcol, /* in */ - const int jsupno, /* in */ - const int fsupc, /* in */ - complex *dense, /* in */ - complex *tempv, /* working array */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - complex alpha = {-1.0, 0.0}, beta = {1.0, 0.0}; -#endif - - complex comp_zero = {0.0, 0.0}; - int luptr, nsupc, nsupr, nrow; - int isub, irow, i, iptr; - register int ufirst, nextlu; - int *lsub, *xlsub; - complex *lusup; - int *xlusup; - flops_t *ops = stat->ops; - - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - nextlu = xlusup[jcol]; - - /* - * Process the supernodal portion of L\U[*,j] - */ - for (isub = xlsub[fsupc]; isub < xlsub[fsupc+1]; isub++) { - irow = lsub[isub]; - lusup[nextlu] = dense[irow]; - dense[irow] = comp_zero; - ++nextlu; - } - - xlusup[jcol + 1] = nextlu; /* Initialize xlusup for next column */ - - if ( fsupc < jcol ) { - - luptr = xlusup[fsupc]; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; - nsupc = jcol - fsupc; /* Excluding jcol */ - ufirst = xlusup[jcol]; /* Points to the beginning of column - jcol in supernode L\U(jsupno). */ - nrow = nsupr - nsupc; - - ops[TRSV] += 4 * nsupc * (nsupc - 1); - ops[GEMV] += 8 * nrow * nsupc; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &nsupc, &lusup[luptr], &nsupr, - &lusup[ufirst], &incx ); - CGEMV( ftcs2, &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#else - ctrsv_( "L", "N", "U", &nsupc, &lusup[luptr], &nsupr, - &lusup[ufirst], &incx ); - cgemv_( "N", &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#endif -#else - clsolve ( nsupr, nsupc, &lusup[luptr], &lusup[ufirst] ); - cmatvec ( nsupr, nrow, nsupc, &lusup[luptr+nsupc], - &lusup[ufirst], &tempv[0] ); - - /* Scatter tempv[*] into lusup[*] */ - iptr = ufirst + nsupc; - for (i = 0; i < nrow; i++) { - c_sub(&lusup[iptr], &lusup[iptr], &tempv[i]); - ++iptr; - tempv[i] = comp_zero; - } -#endif - - } - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csnode_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csnode_dfs.c deleted file mode 100644 index 175da0973a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csnode_dfs.c +++ /dev/null @@ -1,112 +0,0 @@ - -/*! @file csnode_dfs.c - * \brief Determines the union of row structures of columns within the relaxed node - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    csnode_dfs() - Determine the union of the row structures of those 
    - *    columns within the relaxed snode.
    - *    Note: The relaxed snodes are leaves of the supernodal etree, therefore, 
    - *    the portion outside the rectangular supernode must be zero.
    - *
    - * Return value
    - * ============
    - *     0   success;
    - *    >0   number of bytes allocated when run out of memory.
    - * 
    - */ - -int -csnode_dfs ( - const int jcol, /* in - start of the supernode */ - const int kcol, /* in - end of the supernode */ - const int *asub, /* in */ - const int *xa_begin, /* in */ - const int *xa_end, /* in */ - int *xprune, /* out */ - int *marker, /* modified */ - GlobalLU_t *Glu /* modified */ - ) -{ - - register int i, k, ifrom, ito, nextl, new_next; - int nsuper, krow, kmark, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - int nzlmax; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - nsuper = ++supno[jcol]; /* Next available supernode number */ - nextl = xlsub[jcol]; - - for (i = jcol; i <= kcol; i++) { - /* For each nonzero in A[*,i] */ - for (k = xa_begin[i]; k < xa_end[i]; k++) { - krow = asub[k]; - kmark = marker[krow]; - if ( kmark != kcol ) { /* First time visit krow */ - marker[krow] = kcol; - lsub[nextl++] = krow; - if ( nextl >= nzlmax ) { - if ( mem_error = cLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - } - } - supno[i] = nsuper; - } - - /* Supernode > 1, then make a copy of the subscripts for pruning */ - if ( jcol < kcol ) { - new_next = nextl + (nextl - xlsub[jcol]); - while ( new_next > nzlmax ) { - if ( mem_error = cLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - ito = nextl; - for (ifrom = xlsub[jcol]; ifrom < nextl; ) - lsub[ito++] = lsub[ifrom++]; - for (i = jcol+1; i <= kcol; i++) xlsub[i] = nextl; - nextl = ito; - } - - xsup[nsuper+1] = kcol + 1; - supno[kcol+1] = nsuper; - xprune[kcol] = nextl; - xlsub[kcol+1] = nextl; - - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csp_blas2.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csp_blas2.c deleted file mode 100644 index 2ab271d068..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csp_blas2.c +++ /dev/null @@ -1,573 +0,0 @@ - -/*! @file csp_blas2.c - * \brief Sparse BLAS 2, using some dense BLAS 2 operations - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -/* - * File name: csp_blas2.c - * Purpose: Sparse BLAS 2, using some dense BLAS 2 operations. - */ - -#include "slu_cdefs.h" - -/* - * Function prototypes - */ -void cusolve(int, int, complex*, complex*); -void clsolve(int, int, complex*, complex*); -void cmatvec(int, int, int, complex*, complex*, complex*); - -/*! \brief Solves one of the systems of equations A*x = b, or A'*x = b - * - *
    - *   Purpose
    - *   =======
    - *
    - *   sp_ctrsv() solves one of the systems of equations   
    - *       A*x = b,   or   A'*x = b,
    - *   where b and x are n element vectors and A is a sparse unit , or   
    - *   non-unit, upper or lower triangular matrix.   
    - *   No test for singularity or near-singularity is included in this   
    - *   routine. Such tests must be performed before calling this routine.   
    - *
    - *   Parameters   
    - *   ==========   
    - *
    - *   uplo   - (input) char*
    - *            On entry, uplo specifies whether the matrix is an upper or   
    - *             lower triangular matrix as follows:   
    - *                uplo = 'U' or 'u'   A is an upper triangular matrix.   
    - *                uplo = 'L' or 'l'   A is a lower triangular matrix.   
    - *
    - *   trans  - (input) char*
    - *             On entry, trans specifies the equations to be solved as   
    - *             follows:   
    - *                trans = 'N' or 'n'   A*x = b.   
    - *                trans = 'T' or 't'   A'*x = b.
    - *                trans = 'C' or 'c'   A^H*x = b.   
    - *
    - *   diag   - (input) char*
    - *             On entry, diag specifies whether or not A is unit   
    - *             triangular as follows:   
    - *                diag = 'U' or 'u'   A is assumed to be unit triangular.   
    - *                diag = 'N' or 'n'   A is not assumed to be unit   
    - *                                    triangular.   
    - *	     
    - *   L       - (input) SuperMatrix*
    - *	       The factor L from the factorization Pr*A*Pc=L*U. Use
    - *             compressed row subscripts storage for supernodes,
    - *             i.e., L has types: Stype = SC, Dtype = SLU_C, Mtype = TRLU.
    - *
    - *   U       - (input) SuperMatrix*
    - *	        The factor U from the factorization Pr*A*Pc=L*U.
    - *	        U has types: Stype = NC, Dtype = SLU_C, Mtype = TRU.
    - *    
    - *   x       - (input/output) complex*
    - *             Before entry, the incremented array X must contain the n   
    - *             element right-hand side vector b. On exit, X is overwritten 
    - *             with the solution vector x.
    - *
    - *   info    - (output) int*
    - *             If *info = -i, the i-th argument had an illegal value.
    - * 
    - */ -int -sp_ctrsv(char *uplo, char *trans, char *diag, SuperMatrix *L, - SuperMatrix *U, complex *x, SuperLUStat_t *stat, int *info) -{ -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - SCformat *Lstore; - NCformat *Ustore; - complex *Lval, *Uval; - int incx = 1, incy = 1; - complex temp; - complex alpha = {1.0, 0.0}, beta = {1.0, 0.0}; - complex comp_zero = {0.0, 0.0}; - int nrow; - int fsupc, nsupr, nsupc, luptr, istart, irow; - int i, k, iptr, jcol; - complex *work; - flops_t solve_ops; - - /* Test the input parameters */ - *info = 0; - if ( !lsame_(uplo,"L") && !lsame_(uplo, "U") ) *info = -1; - else if ( !lsame_(trans, "N") && !lsame_(trans, "T") && - !lsame_(trans, "C")) *info = -2; - else if ( !lsame_(diag, "U") && !lsame_(diag, "N") ) *info = -3; - else if ( L->nrow != L->ncol || L->nrow < 0 ) *info = -4; - else if ( U->nrow != U->ncol || U->nrow < 0 ) *info = -5; - if ( *info ) { - i = -(*info); - xerbla_("sp_ctrsv", &i); - return 0; - } - - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( !(work = complexCalloc(L->nrow)) ) - ABORT("Malloc fails for work in sp_ctrsv()."); - - if ( lsame_(trans, "N") ) { /* Form x := inv(A)*x. */ - - if ( lsame_(uplo, "L") ) { - /* Form x := inv(L)*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - nrow = nsupr - nsupc; - - /* 1 c_div costs 10 flops */ - solve_ops += 4 * nsupc * (nsupc - 1) + 10 * nsupc; - solve_ops += 8 * nrow * nsupc; - - if ( nsupc == 1 ) { - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); ++iptr) { - irow = L_SUB(iptr); - ++luptr; - cc_mult(&comp_zero, &x[fsupc], &Lval[luptr]); - c_sub(&x[irow], &x[irow], &comp_zero); - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); - - CGEMV(ftcs2, &nrow, &nsupc, &alpha, &Lval[luptr+nsupc], - &nsupr, &x[fsupc], &incx, &beta, &work[0], &incy); -#else - ctrsv_("L", "N", "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); - - cgemv_("N", &nrow, &nsupc, &alpha, &Lval[luptr+nsupc], - &nsupr, &x[fsupc], &incx, &beta, &work[0], &incy); -#endif -#else - clsolve ( nsupr, nsupc, &Lval[luptr], &x[fsupc]); - - cmatvec ( nsupr, nsupr-nsupc, nsupc, &Lval[luptr+nsupc], - &x[fsupc], &work[0] ); -#endif - - iptr = istart + nsupc; - for (i = 0; i < nrow; ++i, ++iptr) { - irow = L_SUB(iptr); - c_sub(&x[irow], &x[irow], &work[i]); /* Scatter */ - work[i] = comp_zero; - - } - } - } /* for k ... */ - - } else { - /* Form x := inv(U)*x */ - - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - /* 1 c_div costs 10 flops */ - solve_ops += 4 * nsupc * (nsupc + 1) + 10 * nsupc; - - if ( nsupc == 1 ) { - c_div(&x[fsupc], &x[fsupc], &Lval[luptr]); - for (i = U_NZ_START(fsupc); i < U_NZ_START(fsupc+1); ++i) { - irow = U_SUB(i); - cc_mult(&comp_zero, &x[fsupc], &Uval[i]); - c_sub(&x[irow], &x[irow], &comp_zero); - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV(ftcs3, ftcs2, ftcs2, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ctrsv_("U", "N", "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif -#else - cusolve ( nsupr, nsupc, &Lval[luptr], &x[fsupc] ); -#endif - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 8*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); - i++) { - irow = U_SUB(i); - cc_mult(&comp_zero, &x[jcol], &Uval[i]); - c_sub(&x[irow], &x[irow], &comp_zero); - } - } - } - } /* for k ... */ - - } - } else if ( lsame_(trans, "T") ) { /* Form x := inv(A')*x */ - - if ( lsame_(uplo, "L") ) { - /* Form x := inv(L')*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; --k) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += 8 * (nsupr - nsupc) * nsupc; - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - iptr = istart + nsupc; - for (i = L_NZ_START(jcol) + nsupc; - i < L_NZ_START(jcol+1); i++) { - irow = L_SUB(iptr); - cc_mult(&comp_zero, &x[irow], &Lval[i]); - c_sub(&x[jcol], &x[jcol], &comp_zero); - iptr++; - } - } - - if ( nsupc > 1 ) { - solve_ops += 4 * nsupc * (nsupc - 1); -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("T", strlen("T")); - ftcs3 = _cptofcd("U", strlen("U")); - CTRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ctrsv_("L", "T", "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } - } else { - /* Form x := inv(U')*x */ - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 8*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++) { - irow = U_SUB(i); - cc_mult(&comp_zero, &x[irow], &Uval[i]); - c_sub(&x[jcol], &x[jcol], &comp_zero); - } - } - - /* 1 c_div costs 10 flops */ - solve_ops += 4 * nsupc * (nsupc + 1) + 10 * nsupc; - - if ( nsupc == 1 ) { - c_div(&x[fsupc], &x[fsupc], &Lval[luptr]); - } else { -#ifdef _CRAY - ftcs1 = _cptofcd("U", strlen("U")); - ftcs2 = _cptofcd("T", strlen("T")); - ftcs3 = _cptofcd("N", strlen("N")); - CTRSV( ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ctrsv_("U", "T", "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } /* for k ... */ - } - } else { /* Form x := conj(inv(A'))*x */ - - if ( lsame_(uplo, "L") ) { - /* Form x := conj(inv(L'))*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; --k) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += 8 * (nsupr - nsupc) * nsupc; - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - iptr = istart + nsupc; - for (i = L_NZ_START(jcol) + nsupc; - i < L_NZ_START(jcol+1); i++) { - irow = L_SUB(iptr); - cc_conj(&temp, &Lval[i]); - cc_mult(&comp_zero, &x[irow], &temp); - c_sub(&x[jcol], &x[jcol], &comp_zero); - iptr++; - } - } - - if ( nsupc > 1 ) { - solve_ops += 4 * nsupc * (nsupc - 1); -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd(trans, strlen("T")); - ftcs3 = _cptofcd("U", strlen("U")); - CTRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ctrsv_("L", trans, "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } - } else { - /* Form x := conj(inv(U'))*x */ - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 8*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++) { - irow = U_SUB(i); - cc_conj(&temp, &Uval[i]); - cc_mult(&comp_zero, &x[irow], &temp); - c_sub(&x[jcol], &x[jcol], &comp_zero); - } - } - - /* 1 c_div costs 10 flops */ - solve_ops += 4 * nsupc * (nsupc + 1) + 10 * nsupc; - - if ( nsupc == 1 ) { - cc_conj(&temp, &Lval[luptr]); - c_div(&x[fsupc], &x[fsupc], &temp); - } else { -#ifdef _CRAY - ftcs1 = _cptofcd("U", strlen("U")); - ftcs2 = _cptofcd(trans, strlen("T")); - ftcs3 = _cptofcd("N", strlen("N")); - CTRSV( ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ctrsv_("U", trans, "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } /* for k ... */ - } - } - - stat->ops[SOLVE] += solve_ops; - SUPERLU_FREE(work); - return 0; -} - - - -/*! \brief Performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y - * - *
      
    - *   Purpose   
    - *   =======   
    - *
    - *   sp_cgemv()  performs one of the matrix-vector operations   
    - *      y := alpha*A*x + beta*y,   or   y := alpha*A'*x + beta*y,   
    - *   where alpha and beta are scalars, x and y are vectors and A is a
    - *   sparse A->nrow by A->ncol matrix.   
    - *
    - *   Parameters   
    - *   ==========   
    - *
    - *   TRANS  - (input) char*
    - *            On entry, TRANS specifies the operation to be performed as   
    - *            follows:   
    - *               TRANS = 'N' or 'n'   y := alpha*A*x + beta*y.   
    - *               TRANS = 'T' or 't'   y := alpha*A'*x + beta*y.   
    - *               TRANS = 'C' or 'c'   y := alpha*A'*x + beta*y.   
    - *
    - *   ALPHA  - (input) complex
    - *            On entry, ALPHA specifies the scalar alpha.   
    - *
    - *   A      - (input) SuperMatrix*
    - *            Before entry, the leading m by n part of the array A must   
    - *            contain the matrix of coefficients.   
    - *
    - *   X      - (input) complex*, array of DIMENSION at least   
    - *            ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'   
    - *           and at least   
    - *            ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.   
    - *            Before entry, the incremented array X must contain the   
    - *            vector x.   
    - * 
    - *   INCX   - (input) int
    - *            On entry, INCX specifies the increment for the elements of   
    - *            X. INCX must not be zero.   
    - *
    - *   BETA   - (input) complex
    - *            On entry, BETA specifies the scalar beta. When BETA is   
    - *            supplied as zero then Y need not be set on input.   
    - *
    - *   Y      - (output) complex*,  array of DIMENSION at least   
    - *            ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'   
    - *            and at least   
    - *            ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.   
    - *            Before entry with BETA non-zero, the incremented array Y   
    - *            must contain the vector y. On exit, Y is overwritten by the 
    - *            updated vector y.
    - *	      
    - *   INCY   - (input) int
    - *            On entry, INCY specifies the increment for the elements of   
    - *            Y. INCY must not be zero.   
    - *
    - *    ==== Sparse Level 2 Blas routine.   
    - * 
    -*/ -int -sp_cgemv(char *trans, complex alpha, SuperMatrix *A, complex *x, - int incx, complex beta, complex *y, int incy) -{ - - /* Local variables */ - NCformat *Astore; - complex *Aval; - int info; - complex temp, temp1; - int lenx, leny, i, j, irow; - int iy, jx, jy, kx, ky; - int notran; - complex comp_zero = {0.0, 0.0}; - complex comp_one = {1.0, 0.0}; - - notran = lsame_(trans, "N"); - Astore = A->Store; - Aval = Astore->nzval; - - /* Test the input parameters */ - info = 0; - if ( !notran && !lsame_(trans, "T") && !lsame_(trans, "C")) info = 1; - else if ( A->nrow < 0 || A->ncol < 0 ) info = 3; - else if (incx == 0) info = 5; - else if (incy == 0) info = 8; - if (info != 0) { - xerbla_("sp_cgemv ", &info); - return 0; - } - - /* Quick return if possible. */ - if (A->nrow == 0 || A->ncol == 0 || - c_eq(&alpha, &comp_zero) && - c_eq(&beta, &comp_one)) - return 0; - - - /* Set LENX and LENY, the lengths of the vectors x and y, and set - up the start points in X and Y. */ - if (lsame_(trans, "N")) { - lenx = A->ncol; - leny = A->nrow; - } else { - lenx = A->nrow; - leny = A->ncol; - } - if (incx > 0) kx = 0; - else kx = - (lenx - 1) * incx; - if (incy > 0) ky = 0; - else ky = - (leny - 1) * incy; - - /* Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. */ - /* First form y := beta*y. */ - if ( !c_eq(&beta, &comp_one) ) { - if (incy == 1) { - if ( c_eq(&beta, &comp_zero) ) - for (i = 0; i < leny; ++i) y[i] = comp_zero; - else - for (i = 0; i < leny; ++i) - cc_mult(&y[i], &beta, &y[i]); - } else { - iy = ky; - if ( c_eq(&beta, &comp_zero) ) - for (i = 0; i < leny; ++i) { - y[iy] = comp_zero; - iy += incy; - } - else - for (i = 0; i < leny; ++i) { - cc_mult(&y[iy], &beta, &y[iy]); - iy += incy; - } - } - } - - if ( c_eq(&alpha, &comp_zero) ) return 0; - - if ( notran ) { - /* Form y := alpha*A*x + y. */ - jx = kx; - if (incy == 1) { - for (j = 0; j < A->ncol; ++j) { - if ( !c_eq(&x[jx], &comp_zero) ) { - cc_mult(&temp, &alpha, &x[jx]); - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - cc_mult(&temp1, &temp, &Aval[i]); - c_add(&y[irow], &y[irow], &temp1); - } - } - jx += incx; - } - } else { - ABORT("Not implemented."); - } - } else { - /* Form y := alpha*A'*x + y. */ - jy = ky; - if (incx == 1) { - for (j = 0; j < A->ncol; ++j) { - temp = comp_zero; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - cc_mult(&temp1, &Aval[i], &x[irow]); - c_add(&temp, &temp, &temp1); - } - cc_mult(&temp1, &alpha, &temp); - c_add(&y[jy], &y[jy], &temp1); - jy += incy; - } - } else { - ABORT("Not implemented."); - } - } - return 0; -} /* sp_cgemv */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csp_blas3.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csp_blas3.c deleted file mode 100644 index 84ff940ae5..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/csp_blas3.c +++ /dev/null @@ -1,127 +0,0 @@ - -/*! @file csp_blas3.c - * \brief Sparse BLAS3, using some dense BLAS3 operations - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -/* - * File name: sp_blas3.c - * Purpose: Sparse BLAS3, using some dense BLAS3 operations. - */ - -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - * 
    - *   sp_c performs one of the matrix-matrix operations   
    - * 
    - *      C := alpha*op( A )*op( B ) + beta*C,   
    - * 
    - *   where  op( X ) is one of 
    - * 
    - *      op( X ) = X   or   op( X ) = X'   or   op( X ) = conjg( X' ),
    - * 
    - *   alpha and beta are scalars, and A, B and C are matrices, with op( A ) 
    - *   an m by k matrix,  op( B )  a  k by n matrix and  C an m by n matrix. 
    - *   
    - * 
    - *   Parameters   
    - *   ==========   
    - * 
    - *   TRANSA - (input) char*
    - *            On entry, TRANSA specifies the form of op( A ) to be used in 
    - *            the matrix multiplication as follows:   
    - *               TRANSA = 'N' or 'n',  op( A ) = A.   
    - *               TRANSA = 'T' or 't',  op( A ) = A'.   
    - *               TRANSA = 'C' or 'c',  op( A ) = conjg( A' ).   
    - *            Unchanged on exit.   
    - * 
    - *   TRANSB - (input) char*
    - *            On entry, TRANSB specifies the form of op( B ) to be used in 
    - *            the matrix multiplication as follows:   
    - *               TRANSB = 'N' or 'n',  op( B ) = B.   
    - *               TRANSB = 'T' or 't',  op( B ) = B'.   
    - *               TRANSB = 'C' or 'c',  op( B ) = conjg( B' ).   
    - *            Unchanged on exit.   
    - * 
    - *   M      - (input) int   
    - *            On entry,  M  specifies  the number of rows of the matrix 
    - *	     op( A ) and of the matrix C.  M must be at least zero. 
    - *	     Unchanged on exit.   
    - * 
    - *   N      - (input) int
    - *            On entry,  N specifies the number of columns of the matrix 
    - *	     op( B ) and the number of columns of the matrix C. N must be 
    - *	     at least zero.
    - *	     Unchanged on exit.   
    - * 
    - *   K      - (input) int
    - *            On entry, K specifies the number of columns of the matrix 
    - *	     op( A ) and the number of rows of the matrix op( B ). K must 
    - *	     be at least  zero.   
    - *           Unchanged on exit.
    - *      
    - *   ALPHA  - (input) complex
    - *            On entry, ALPHA specifies the scalar alpha.   
    - * 
    - *   A      - (input) SuperMatrix*
    - *            Matrix A with a sparse format, of dimension (A->nrow, A->ncol).
    - *            Currently, the type of A can be:
    - *                Stype = NC or NCP; Dtype = SLU_C; Mtype = GE. 
    - *            In the future, more general A can be handled.
    - * 
    - *   B      - COMPLEX PRECISION array of DIMENSION ( LDB, kb ), where kb is 
    - *            n when TRANSB = 'N' or 'n',  and is  k otherwise.   
    - *            Before entry with  TRANSB = 'N' or 'n',  the leading k by n 
    - *            part of the array B must contain the matrix B, otherwise 
    - *            the leading n by k part of the array B must contain the 
    - *            matrix B.   
    - *            Unchanged on exit.   
    - * 
    - *   LDB    - (input) int
    - *            On entry, LDB specifies the first dimension of B as declared 
    - *            in the calling (sub) program. LDB must be at least max( 1, n ).  
    - *            Unchanged on exit.   
    - * 
    - *   BETA   - (input) complex
    - *            On entry, BETA specifies the scalar beta. When BETA is   
    - *            supplied as zero then C need not be set on input.   
    - *  
    - *   C      - COMPLEX PRECISION array of DIMENSION ( LDC, n ).   
    - *            Before entry, the leading m by n part of the array C must 
    - *            contain the matrix C,  except when beta is zero, in which 
    - *            case C need not be set on entry.   
    - *            On exit, the array C is overwritten by the m by n matrix 
    - *	     ( alpha*op( A )*B + beta*C ).   
    - *  
    - *   LDC    - (input) int
    - *            On entry, LDC specifies the first dimension of C as declared 
    - *            in the calling (sub)program. LDC must be at least max(1,m).   
    - *            Unchanged on exit.   
    - *  
    - *   ==== Sparse Level 3 Blas routine.   
    - * 
    - */ - -int -sp_cgemm(char *transa, char *transb, int m, int n, int k, - complex alpha, SuperMatrix *A, complex *b, int ldb, - complex beta, complex *c, int ldc) -{ - int incx = 1, incy = 1; - int j; - - for (j = 0; j < n; ++j) { - sp_cgemv(transa, alpha, A, &b[ldb*j], incx, beta, &c[ldc*j], incy); - } - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cutil.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cutil.c deleted file mode 100644 index 8b6a7f220c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/cutil.c +++ /dev/null @@ -1,475 +0,0 @@ - -/*! @file cutil.c - * \brief Matrix utility functions - * - *
    - * -- SuperLU routine (version 3.1) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * August 1, 2008
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include "slu_cdefs.h" - -void -cCreate_CompCol_Matrix(SuperMatrix *A, int m, int n, int nnz, - complex *nzval, int *rowind, int *colptr, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - NCformat *Astore; - - A->Stype = stype; - A->Dtype = dtype; - A->Mtype = mtype; - A->nrow = m; - A->ncol = n; - A->Store = (void *) SUPERLU_MALLOC( sizeof(NCformat) ); - if ( !(A->Store) ) ABORT("SUPERLU_MALLOC fails for A->Store"); - Astore = A->Store; - Astore->nnz = nnz; - Astore->nzval = nzval; - Astore->rowind = rowind; - Astore->colptr = colptr; -} - -void -cCreate_CompRow_Matrix(SuperMatrix *A, int m, int n, int nnz, - complex *nzval, int *colind, int *rowptr, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - NRformat *Astore; - - A->Stype = stype; - A->Dtype = dtype; - A->Mtype = mtype; - A->nrow = m; - A->ncol = n; - A->Store = (void *) SUPERLU_MALLOC( sizeof(NRformat) ); - if ( !(A->Store) ) ABORT("SUPERLU_MALLOC fails for A->Store"); - Astore = A->Store; - Astore->nnz = nnz; - Astore->nzval = nzval; - Astore->colind = colind; - Astore->rowptr = rowptr; -} - -/*! \brief Copy matrix A into matrix B. */ -void -cCopy_CompCol_Matrix(SuperMatrix *A, SuperMatrix *B) -{ - NCformat *Astore, *Bstore; - int ncol, nnz, i; - - B->Stype = A->Stype; - B->Dtype = A->Dtype; - B->Mtype = A->Mtype; - B->nrow = A->nrow;; - B->ncol = ncol = A->ncol; - Astore = (NCformat *) A->Store; - Bstore = (NCformat *) B->Store; - Bstore->nnz = nnz = Astore->nnz; - for (i = 0; i < nnz; ++i) - ((complex *)Bstore->nzval)[i] = ((complex *)Astore->nzval)[i]; - for (i = 0; i < nnz; ++i) Bstore->rowind[i] = Astore->rowind[i]; - for (i = 0; i <= ncol; ++i) Bstore->colptr[i] = Astore->colptr[i]; -} - - -void -cCreate_Dense_Matrix(SuperMatrix *X, int m, int n, complex *x, int ldx, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - DNformat *Xstore; - - X->Stype = stype; - X->Dtype = dtype; - X->Mtype = mtype; - X->nrow = m; - X->ncol = n; - X->Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) ); - if ( !(X->Store) ) ABORT("SUPERLU_MALLOC fails for X->Store"); - Xstore = (DNformat *) X->Store; - Xstore->lda = ldx; - Xstore->nzval = (complex *) x; -} - -void -cCopy_Dense_Matrix(int M, int N, complex *X, int ldx, - complex *Y, int ldy) -{ -/*! \brief Copies a two-dimensional matrix X to another matrix Y. - */ - int i, j; - - for (j = 0; j < N; ++j) - for (i = 0; i < M; ++i) - Y[i + j*ldy] = X[i + j*ldx]; -} - -void -cCreate_SuperNode_Matrix(SuperMatrix *L, int m, int n, int nnz, - complex *nzval, int *nzval_colptr, int *rowind, - int *rowind_colptr, int *col_to_sup, int *sup_to_col, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - SCformat *Lstore; - - L->Stype = stype; - L->Dtype = dtype; - L->Mtype = mtype; - L->nrow = m; - L->ncol = n; - L->Store = (void *) SUPERLU_MALLOC( sizeof(SCformat) ); - if ( !(L->Store) ) ABORT("SUPERLU_MALLOC fails for L->Store"); - Lstore = L->Store; - Lstore->nnz = nnz; - Lstore->nsuper = col_to_sup[n]; - Lstore->nzval = nzval; - Lstore->nzval_colptr = nzval_colptr; - Lstore->rowind = rowind; - Lstore->rowind_colptr = rowind_colptr; - Lstore->col_to_sup = col_to_sup; - Lstore->sup_to_col = sup_to_col; - -} - - -/*! \brief Convert a row compressed storage into a column compressed storage. - */ -void -cCompRow_to_CompCol(int m, int n, int nnz, - complex *a, int *colind, int *rowptr, - complex **at, int **rowind, int **colptr) -{ - register int i, j, col, relpos; - int *marker; - - /* Allocate storage for another copy of the matrix. */ - *at = (complex *) complexMalloc(nnz); - *rowind = (int *) intMalloc(nnz); - *colptr = (int *) intMalloc(n+1); - marker = (int *) intCalloc(n); - - /* Get counts of each column of A, and set up column pointers */ - for (i = 0; i < m; ++i) - for (j = rowptr[i]; j < rowptr[i+1]; ++j) ++marker[colind[j]]; - (*colptr)[0] = 0; - for (j = 0; j < n; ++j) { - (*colptr)[j+1] = (*colptr)[j] + marker[j]; - marker[j] = (*colptr)[j]; - } - - /* Transfer the matrix into the compressed column storage. */ - for (i = 0; i < m; ++i) { - for (j = rowptr[i]; j < rowptr[i+1]; ++j) { - col = colind[j]; - relpos = marker[col]; - (*rowind)[relpos] = i; - (*at)[relpos] = a[j]; - ++marker[col]; - } - } - - SUPERLU_FREE(marker); -} - - -void -cPrint_CompCol_Matrix(char *what, SuperMatrix *A) -{ - NCformat *Astore; - register int i,n; - float *dp; - - printf("\nCompCol matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - n = A->ncol; - Astore = (NCformat *) A->Store; - dp = (float *) Astore->nzval; - printf("nrow %d, ncol %d, nnz %d\n", A->nrow,A->ncol,Astore->nnz); - printf("nzval: "); - for (i = 0; i < 2*Astore->colptr[n]; ++i) printf("%f ", dp[i]); - printf("\nrowind: "); - for (i = 0; i < Astore->colptr[n]; ++i) printf("%d ", Astore->rowind[i]); - printf("\ncolptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->colptr[i]); - printf("\n"); - fflush(stdout); -} - -void -cPrint_SuperNode_Matrix(char *what, SuperMatrix *A) -{ - SCformat *Astore; - register int i, j, k, c, d, n, nsup; - float *dp; - int *col_to_sup, *sup_to_col, *rowind, *rowind_colptr; - - printf("\nSuperNode matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - n = A->ncol; - Astore = (SCformat *) A->Store; - dp = (float *) Astore->nzval; - col_to_sup = Astore->col_to_sup; - sup_to_col = Astore->sup_to_col; - rowind_colptr = Astore->rowind_colptr; - rowind = Astore->rowind; - printf("nrow %d, ncol %d, nnz %d, nsuper %d\n", - A->nrow,A->ncol,Astore->nnz,Astore->nsuper); - printf("nzval:\n"); - for (k = 0; k <= Astore->nsuper; ++k) { - c = sup_to_col[k]; - nsup = sup_to_col[k+1] - c; - for (j = c; j < c + nsup; ++j) { - d = Astore->nzval_colptr[j]; - for (i = rowind_colptr[c]; i < rowind_colptr[c+1]; ++i) { - printf("%d\t%d\t%e\t%e\n", rowind[i], j, dp[d], dp[d+1]); - d += 2; - } - } - } -#if 0 - for (i = 0; i < 2*Astore->nzval_colptr[n]; ++i) printf("%f ", dp[i]); -#endif - printf("\nnzval_colptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->nzval_colptr[i]); - printf("\nrowind: "); - for (i = 0; i < Astore->rowind_colptr[n]; ++i) - printf("%d ", Astore->rowind[i]); - printf("\nrowind_colptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->rowind_colptr[i]); - printf("\ncol_to_sup: "); - for (i = 0; i < n; ++i) printf("%d ", col_to_sup[i]); - printf("\nsup_to_col: "); - for (i = 0; i <= Astore->nsuper+1; ++i) - printf("%d ", sup_to_col[i]); - printf("\n"); - fflush(stdout); -} - -void -cPrint_Dense_Matrix(char *what, SuperMatrix *A) -{ - DNformat *Astore = (DNformat *) A->Store; - register int i, j, lda = Astore->lda; - float *dp; - - printf("\nDense matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - dp = (float *) Astore->nzval; - printf("nrow %d, ncol %d, lda %d\n", A->nrow,A->ncol,lda); - printf("\nnzval: "); - for (j = 0; j < A->ncol; ++j) { - for (i = 0; i < 2*A->nrow; ++i) printf("%f ", dp[i + j*2*lda]); - printf("\n"); - } - printf("\n"); - fflush(stdout); -} - -/*! \brief Diagnostic print of column "jcol" in the U/L factor. - */ -void -cprint_lu_col(char *msg, int jcol, int pivrow, int *xprune, GlobalLU_t *Glu) -{ - int i, k, fsupc; - int *xsup, *supno; - int *xlsub, *lsub; - complex *lusup; - int *xlusup; - complex *ucol; - int *usub, *xusub; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - - printf("%s", msg); - printf("col %d: pivrow %d, supno %d, xprune %d\n", - jcol, pivrow, supno[jcol], xprune[jcol]); - - printf("\tU-col:\n"); - for (i = xusub[jcol]; i < xusub[jcol+1]; i++) - printf("\t%d%10.4f, %10.4f\n", usub[i], ucol[i].r, ucol[i].i); - printf("\tL-col in rectangular snode:\n"); - fsupc = xsup[supno[jcol]]; /* first col of the snode */ - i = xlsub[fsupc]; - k = xlusup[jcol]; - while ( i < xlsub[fsupc+1] && k < xlusup[jcol+1] ) { - printf("\t%d\t%10.4f, %10.4f\n", lsub[i], lusup[k].r, lusup[k].i); - i++; k++; - } - fflush(stdout); -} - - -/*! \brief Check whether tempv[] == 0. This should be true before and after calling any numeric routines, i.e., "panel_bmod" and "column_bmod". - */ -void ccheck_tempv(int n, complex *tempv) -{ - int i; - - for (i = 0; i < n; i++) { - if ((tempv[i].r != 0.0) || (tempv[i].i != 0.0)) - { - fprintf(stderr,"tempv[%d] = {%f, %f}\n", i, tempv[i].r, tempv[i].i); - ABORT("ccheck_tempv"); - } - } -} - - -void -cGenXtrue(int n, int nrhs, complex *x, int ldx) -{ - int i, j; - for (j = 0; j < nrhs; ++j) - for (i = 0; i < n; ++i) { - x[i + j*ldx].r = 1.0; - x[i + j*ldx].i = 0.0; - } -} - -/*! \brief Let rhs[i] = sum of i-th row of A, so the solution vector is all 1's - */ -void -cFillRHS(trans_t trans, int nrhs, complex *x, int ldx, - SuperMatrix *A, SuperMatrix *B) -{ - NCformat *Astore; - complex *Aval; - DNformat *Bstore; - complex *rhs; - complex one = {1.0, 0.0}; - complex zero = {0.0, 0.0}; - int ldc; - char transc[1]; - - Astore = A->Store; - Aval = (complex *) Astore->nzval; - Bstore = B->Store; - rhs = Bstore->nzval; - ldc = Bstore->lda; - - if ( trans == NOTRANS ) *(unsigned char *)transc = 'N'; - else *(unsigned char *)transc = 'T'; - - sp_cgemm(transc, "N", A->nrow, nrhs, A->ncol, one, A, - x, ldx, zero, rhs, ldc); - -} - -/*! \brief Fills a complex precision array with a given value. - */ -void -cfill(complex *a, int alen, complex dval) -{ - register int i; - for (i = 0; i < alen; i++) a[i] = dval; -} - - - -/*! \brief Check the inf-norm of the error vector - */ -void cinf_norm_error(int nrhs, SuperMatrix *X, complex *xtrue) -{ - DNformat *Xstore; - float err, xnorm; - complex *Xmat, *soln_work; - complex temp; - int i, j; - - Xstore = X->Store; - Xmat = Xstore->nzval; - - for (j = 0; j < nrhs; j++) { - soln_work = &Xmat[j*Xstore->lda]; - err = xnorm = 0.0; - for (i = 0; i < X->nrow; i++) { - c_sub(&temp, &soln_work[i], &xtrue[i]); - err = SUPERLU_MAX(err, slu_c_abs(&temp)); - xnorm = SUPERLU_MAX(xnorm, slu_c_abs(&soln_work[i])); - } - err = err / xnorm; - printf("||X - Xtrue||/||X|| = %e\n", err); - } -} - - - -/*! \brief Print performance of the code. */ -void -cPrintPerf(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage, - float rpg, float rcond, float *ferr, - float *berr, char *equed, SuperLUStat_t *stat) -{ - SCformat *Lstore; - NCformat *Ustore; - double *utime; - flops_t *ops; - - utime = stat->utime; - ops = stat->ops; - - if ( utime[FACT] != 0. ) - printf("Factor flops = %e\tMflops = %8.2f\n", ops[FACT], - ops[FACT]*1e-6/utime[FACT]); - printf("Identify relaxed snodes = %8.2f\n", utime[RELAX]); - if ( utime[SOLVE] != 0. ) - printf("Solve flops = %.0f, Mflops = %8.2f\n", ops[SOLVE], - ops[SOLVE]*1e-6/utime[SOLVE]); - - Lstore = (SCformat *) L->Store; - Ustore = (NCformat *) U->Store; - printf("\tNo of nonzeros in factor L = %d\n", Lstore->nnz); - printf("\tNo of nonzeros in factor U = %d\n", Ustore->nnz); - printf("\tNo of nonzeros in L+U = %d\n", Lstore->nnz + Ustore->nnz); - - printf("L\\U MB %.3f\ttotal MB needed %.3f\n", - mem_usage->for_lu/1e6, mem_usage->total_needed/1e6); - printf("Number of memory expansions: %d\n", stat->expansions); - - printf("\tFactor\tMflops\tSolve\tMflops\tEtree\tEquil\tRcond\tRefine\n"); - printf("PERF:%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f\n", - utime[FACT], ops[FACT]*1e-6/utime[FACT], - utime[SOLVE], ops[SOLVE]*1e-6/utime[SOLVE], - utime[ETREE], utime[EQUIL], utime[RCOND], utime[REFINE]); - - printf("\tRpg\t\tRcond\t\tFerr\t\tBerr\t\tEquil?\n"); - printf("NUM:\t%e\t%e\t%e\t%e\t%s\n", - rpg, rcond, ferr[0], berr[0], equed); - -} - - - - -print_complex_vec(char *what, int n, complex *vec) -{ - int i; - printf("%s: n %d\n", what, n); - for (i = 0; i < n; ++i) printf("%d\t%f%f\n", i, vec[i].r, vec[i].i); - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dGetDiagU.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dGetDiagU.c deleted file mode 100644 index 41f1f2706d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dGetDiagU.c +++ /dev/null @@ -1,58 +0,0 @@ -/*! @file dGetDiagU.c - * \brief Extracts main diagonal of matrix - * - *
     
    - * -- Auxiliary routine in SuperLU (version 2.0) --
    - * Lawrence Berkeley National Lab, Univ. of California Berkeley.
    - * Xiaoye S. Li
    - * September 11, 2003
    - *
    - *  Purpose
    - * =======
    - *
    - * GetDiagU extracts the main diagonal of matrix U of the LU factorization.
    - *  
    - * Arguments
    - * =========
    - *
    - * L      (input) SuperMatrix*
    - *        The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *        dgstrf(). Use compressed row subscripts storage for supernodes,
    - *        i.e., L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *
    - * diagU  (output) double*, dimension (n)
    - *        The main diagonal of matrix U.
    - *
    - * Note
    - * ====
    - * The diagonal blocks of the L and U matrices are stored in the L
    - * data structures.
    - * 
    -*/ -#include "slu_ddefs.h" - -void dGetDiagU(SuperMatrix *L, double *diagU) -{ - int_t i, k, nsupers; - int_t fsupc, nsupr, nsupc, luptr; - double *dblock, *Lval; - SCformat *Lstore; - - Lstore = L->Store; - Lval = Lstore->nzval; - nsupers = Lstore->nsuper + 1; - - for (k = 0; k < nsupers; ++k) { - fsupc = L_FST_SUPC(k); - nsupc = L_FST_SUPC(k+1) - fsupc; - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - luptr = L_NZ_START(fsupc); - - dblock = &diagU[fsupc]; - for (i = 0; i < nsupc; ++i) { - dblock[i] = Lval[luptr]; - luptr += nsupr + 1; - } - } -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcolumn_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcolumn_bmod.c deleted file mode 100644 index 0eb2386f02..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcolumn_bmod.c +++ /dev/null @@ -1,352 +0,0 @@ - -/*! @file dcolumn_bmod.c - * \brief performs numeric block updates - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - *  Permission is hereby granted to use or copy this program for any
    - *  purpose, provided the above notices are retained on all copies.
    - *  Permission to modify the code and to distribute modified code is
    - *  granted, provided the above notices are retained, and a notice that
    - *  the code was modified is included with the above copyright notice.
    - * 
    -*/ - -#include -#include -#include "slu_ddefs.h" - -/* - * Function prototypes - */ -void dusolve(int, int, double*, double*); -void dlsolve(int, int, double*, double*); -void dmatvec(int, int, int, double*, double*, double*); - - - -/*! \brief - * - *
    - * Purpose:
    - * ========
    - * Performs numeric block updates (sup-col) in topological order.
    - * It features: col-col, 2cols-col, 3cols-col, and sup-col updates.
    - * Special processing on the supernodal portion of L\U[*,j]
    - * Return value:   0 - successful return
    - *               > 0 - number of bytes allocated when run out of space
    - * 
    - */ -int -dcolumn_bmod ( - const int jcol, /* in */ - const int nseg, /* in */ - double *dense, /* in */ - double *tempv, /* working array */ - int *segrep, /* in */ - int *repfnz, /* in */ - int fpanelc, /* in -- first column in the current panel */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ - -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - double alpha, beta; - - /* krep = representative of current k-th supernode - * fsupc = first supernodal column - * nsupc = no of columns in supernode - * nsupr = no of rows in supernode (used as leading dimension) - * luptr = location of supernodal LU-block in storage - * kfnz = first nonz in the k-th supernodal segment - * no_zeros = no of leading zeros in a supernodal U-segment - */ - double ukj, ukj1, ukj2; - int luptr, luptr1, luptr2; - int fsupc, nsupc, nsupr, segsze; - int nrow; /* No of rows in the matrix of matrix-vector */ - int jcolp1, jsupno, k, ksub, krep, krep_ind, ksupno; - register int lptr, kfnz, isub, irow, i; - register int no_zeros, new_next; - int ufirst, nextlu; - int fst_col; /* First column within small LU update */ - int d_fsupc; /* Distance between the first column of the current - panel and the first column of the current snode. */ - int *xsup, *supno; - int *lsub, *xlsub; - double *lusup; - int *xlusup; - int nzlumax; - double *tempv1; - double zero = 0.0; - double one = 1.0; - double none = -1.0; - int mem_error; - flops_t *ops = stat->ops; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - nzlumax = Glu->nzlumax; - jcolp1 = jcol + 1; - jsupno = supno[jcol]; - - /* - * For each nonz supernode segment of U[*,j] in topological order - */ - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - - krep = segrep[k]; - k--; - ksupno = supno[krep]; - if ( jsupno != ksupno ) { /* Outside the rectangular supernode */ - - fsupc = xsup[ksupno]; - fst_col = SUPERLU_MAX ( fsupc, fpanelc ); - - /* Distance from the current supernode to the current panel; - d_fsupc=0 if fsupc > fpanelc. */ - d_fsupc = fst_col - fsupc; - - luptr = xlusup[fst_col] + d_fsupc; - lptr = xlsub[fsupc] + d_fsupc; - - kfnz = repfnz[krep]; - kfnz = SUPERLU_MAX ( kfnz, fpanelc ); - - segsze = krep - kfnz + 1; - nsupc = krep - fst_col + 1; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; /* Leading dimension */ - nrow = nsupr - d_fsupc - nsupc; - krep_ind = lptr + nsupc - 1; - - ops[TRSV] += segsze * (segsze - 1); - ops[GEMV] += 2 * nrow * segsze; - - - /* - * Case 1: Update U-segment of size 1 -- col-col update - */ - if ( segsze == 1 ) { - ukj = dense[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - dense[irow] -= ukj*lusup[luptr]; - luptr++; - } - - } else if ( segsze <= 3 ) { - ukj = dense[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - ukj1 = dense[lsub[krep_ind - 1]]; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { /* Case 2: 2cols-col update */ - ukj -= ukj1 * lusup[luptr1]; - dense[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; - luptr1++; - dense[irow] -= ( ukj*lusup[luptr] - + ukj1*lusup[luptr1] ); - } - } else { /* Case 3: 3cols-col update */ - ukj2 = dense[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - ukj1 -= ukj2 * lusup[luptr2-1]; - ukj = ukj - ukj1*lusup[luptr1] - ukj2*lusup[luptr2]; - dense[lsub[krep_ind]] = ukj; - dense[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; - luptr1++; - luptr2++; - dense[irow] -= ( ukj*lusup[luptr] - + ukj1*lusup[luptr1] + ukj2*lusup[luptr2] ); - } - } - - - - } else { - /* - * Case: sup-col update - * Perform a triangular solve and block update, - * then scatter the result of sup-col update to dense - */ - - no_zeros = kfnz - fst_col; - - /* Copy U[*,j] segment from dense[*] to tempv[*] */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - tempv[i] = dense[irow]; - ++isub; - } - - /* Dense triangular solve -- start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#else - dtrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#endif - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - alpha = one; - beta = zero; -#ifdef _CRAY - SGEMV( ftcs2, &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#else - dgemv_( "N", &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#endif -#else - dlsolve ( nsupr, segsze, &lusup[luptr], tempv ); - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - dmatvec (nsupr, nrow , segsze, &lusup[luptr], tempv, tempv1); -#endif - - - /* Scatter tempv[] into SPA dense[] as a temporary storage */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense[irow] = tempv[i]; - tempv[i] = zero; - ++isub; - } - - /* Scatter tempv1[] into SPA dense[] */ - for (i = 0; i < nrow; i++) { - irow = lsub[isub]; - dense[irow] -= tempv1[i]; - tempv1[i] = zero; - ++isub; - } - } - - } /* if jsupno ... */ - - } /* for each segment... */ - - /* - * Process the supernodal portion of L\U[*,j] - */ - nextlu = xlusup[jcol]; - fsupc = xsup[jsupno]; - - /* Copy the SPA dense into L\U[*,j] */ - new_next = nextlu + xlsub[fsupc+1] - xlsub[fsupc]; - while ( new_next > nzlumax ) { - if (mem_error = dLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, Glu)) - return (mem_error); - lusup = Glu->lusup; - lsub = Glu->lsub; - } - - for (isub = xlsub[fsupc]; isub < xlsub[fsupc+1]; isub++) { - irow = lsub[isub]; - lusup[nextlu] = dense[irow]; - dense[irow] = zero; - ++nextlu; - } - - xlusup[jcolp1] = nextlu; /* Close L\U[*,jcol] */ - - /* For more updates within the panel (also within the current supernode), - * should start from the first column of the panel, or the first column - * of the supernode, whichever is bigger. There are 2 cases: - * 1) fsupc < fpanelc, then fst_col := fpanelc - * 2) fsupc >= fpanelc, then fst_col := fsupc - */ - fst_col = SUPERLU_MAX ( fsupc, fpanelc ); - - if ( fst_col < jcol ) { - - /* Distance between the current supernode and the current panel. - d_fsupc=0 if fsupc >= fpanelc. */ - d_fsupc = fst_col - fsupc; - - lptr = xlsub[fsupc] + d_fsupc; - luptr = xlusup[fst_col] + d_fsupc; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; /* Leading dimension */ - nsupc = jcol - fst_col; /* Excluding jcol */ - nrow = nsupr - d_fsupc - nsupc; - - /* Points to the beginning of jcol in snode L\U(jsupno) */ - ufirst = xlusup[jcol] + d_fsupc; - - ops[TRSV] += nsupc * (nsupc - 1); - ops[GEMV] += 2 * nrow * nsupc; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &nsupc, &lusup[luptr], - &nsupr, &lusup[ufirst], &incx ); -#else - dtrsv_( "L", "N", "U", &nsupc, &lusup[luptr], - &nsupr, &lusup[ufirst], &incx ); -#endif - - alpha = none; beta = one; /* y := beta*y + alpha*A*x */ - -#ifdef _CRAY - SGEMV( ftcs2, &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#else - dgemv_( "N", &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#endif -#else - dlsolve ( nsupr, nsupc, &lusup[luptr], &lusup[ufirst] ); - - dmatvec ( nsupr, nrow, nsupc, &lusup[luptr+nsupc], - &lusup[ufirst], tempv ); - - /* Copy updates from tempv[*] into lusup[*] */ - isub = ufirst + nsupc; - for (i = 0; i < nrow; i++) { - lusup[isub] -= tempv[i]; - tempv[i] = 0.0; - ++isub; - } - -#endif - - - } /* if fst_col < jcol ... */ - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcolumn_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcolumn_dfs.c deleted file mode 100644 index deebf948e3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcolumn_dfs.c +++ /dev/null @@ -1,275 +0,0 @@ - -/*! @file dcolumn_dfs.c - * \brief Performs a symbolic factorization - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    -*/ - -#include "slu_ddefs.h" - -/*! \brief What type of supernodes we want */ -#define T2_SUPER - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   DCOLUMN_DFS performs a symbolic factorization on column jcol, and
    - *   decide the supernode boundary.
    - *
    - *   This routine does not use numeric values, but only use the RHS 
    - *   row indices to start the dfs.
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives. The routine returns a list of such supernodal 
    - *   representatives in topological order of the dfs that generates them.
    - *   The location of the first nonzero in each such supernodal segment
    - *   (supernodal entry location) is also returned.
    - *
    - * Local parameters
    - * ================
    - *   nseg: no of segments in current U[*,j]
    - *   jsuper: jsuper=EMPTY if column j does not belong to the same
    - *	supernode as j-1. Otherwise, jsuper=nsuper.
    - *
    - *   marker2: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - *
    - * Return value
    - * ============
    - *     0  success;
    - *   > 0  number of bytes allocated when run out of space.
    - * 
    - */ -int -dcolumn_dfs( - const int m, /* in - number of rows in the matrix */ - const int jcol, /* in */ - int *perm_r, /* in */ - int *nseg, /* modified - with new segments appended */ - int *lsub_col, /* in - defines the RHS vector to start the dfs */ - int *segrep, /* modified - with new segments appended */ - int *repfnz, /* modified */ - int *xprune, /* modified */ - int *marker, /* modified */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - int jcolp1, jcolm1, jsuper, nsuper, nextl; - int k, krep, krow, kmark, kperm; - int *marker2; /* Used for small panel LU */ - int fsupc; /* First column of a snode */ - int myfnz; /* First nonz column of a U-segment */ - int chperm, chmark, chrep, kchild; - int xdfs, maxdfs, kpar, oldrep; - int jptr, jm1ptr; - int ito, ifrom, istop; /* Used to compress row subscripts */ - int mem_error; - int *xsup, *supno, *lsub, *xlsub; - int nzlmax; - static int first = 1, maxsuper; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - if ( first ) { - maxsuper = sp_ienv(3); - first = 0; - } - jcolp1 = jcol + 1; - jcolm1 = jcol - 1; - nsuper = supno[jcol]; - jsuper = nsuper; - nextl = xlsub[jcol]; - marker2 = &marker[2*m]; - - - /* For each nonzero in A[*,jcol] do dfs */ - for (k = 0; lsub_col[k] != EMPTY; k++) { - - krow = lsub_col[k]; - lsub_col[k] = EMPTY; - kmark = marker2[krow]; - - /* krow was visited before, go to the next nonz */ - if ( kmark == jcol ) continue; - - /* For each unmarked nbr krow of jcol - * krow is in L: place it in structure of L[*,jcol] - */ - marker2[krow] = jcol; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - lsub[nextl++] = krow; /* krow is indexed into A */ - if ( nextl >= nzlmax ) { - if ( mem_error = dLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( kmark != jcolm1 ) jsuper = EMPTY;/* Row index subset testing */ - } else { - /* krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz[krep]; - - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > kperm ) repfnz[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz[krep] = kperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker2[kchild]; - - if ( chmark != jcol ) { /* Not reached yet */ - marker2[kchild] = jcol; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,k] */ - if ( chperm == EMPTY ) { - lsub[nextl++] = kchild; - if ( nextl >= nzlmax ) { - if ( mem_error = - dLUMemXpand(jcol,nextl,LSUB,&nzlmax,Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( chmark != jcolm1 ) jsuper = EMPTY; - } else { - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz[chrep]; - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz[chrep] = chperm; - } else { - /* Continue dfs at super-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L^t) */ - parent[krep] = oldrep; - repfnz[krep] = chperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - } /* else */ - - } /* else */ - - } /* if */ - - } /* while */ - - /* krow has no more unexplored nbrs; - * place supernode-rep krep in postorder DFS. - * backtrack dfs to its parent - */ - segrep[*nseg] = krep; - ++(*nseg); - kpar = parent[krep]; /* Pop from stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xprune[krep]; - - } while ( kpar != EMPTY ); /* Until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonzero ... */ - - /* Check to see if j belongs in the same supernode as j-1 */ - if ( jcol == 0 ) { /* Do nothing for column 0 */ - nsuper = supno[0] = 0; - } else { - fsupc = xsup[nsuper]; - jptr = xlsub[jcol]; /* Not compressed yet */ - jm1ptr = xlsub[jcolm1]; - -#ifdef T2_SUPER - if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = EMPTY; -#endif - /* Make sure the number of columns in a supernode doesn't - exceed threshold. */ - if ( jcol - fsupc >= maxsuper ) jsuper = EMPTY; - - /* If jcol starts a new supernode, reclaim storage space in - * lsub from the previous supernode. Note we only store - * the subscript set of the first and last columns of - * a supernode. (first for num values, last for pruning) - */ - if ( jsuper == EMPTY ) { /* starts a new supernode */ - if ( (fsupc < jcolm1-1) ) { /* >= 3 columns in nsuper */ -#ifdef CHK_COMPRESS - printf(" Compress lsub[] at super %d-%d\n", fsupc, jcolm1); -#endif - ito = xlsub[fsupc+1]; - xlsub[jcolm1] = ito; - istop = ito + jptr - jm1ptr; - xprune[jcolm1] = istop; /* Initialize xprune[jcol-1] */ - xlsub[jcol] = istop; - for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) - lsub[ito] = lsub[ifrom]; - nextl = ito; /* = istop + length(jcol) */ - } - nsuper++; - supno[jcol] = nsuper; - } /* if a new supernode */ - - } /* else: jcol > 0 */ - - /* Tidy up the pointers before exit */ - xsup[nsuper+1] = jcolp1; - supno[jcolp1] = nsuper; - xprune[jcol] = nextl; /* Initialize upper bound for pruning */ - xlsub[jcolp1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcomplex.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcomplex.c deleted file mode 100644 index 42496a6b09..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcomplex.c +++ /dev/null @@ -1,147 +0,0 @@ - -/*! @file dcomplex.c - * \brief Common arithmetic for complex type - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * This file defines common arithmetic operations for complex type.
    - * 
    - */ - -#include -#include -#include -#include "slu_dcomplex.h" - - -/*! \brief Complex Division c = a/b */ -void z_div(doublecomplex *c, doublecomplex *a, doublecomplex *b) -{ - double ratio, den; - double abr, abi, cr, ci; - - if( (abr = b->r) < 0.) - abr = - abr; - if( (abi = b->i) < 0.) - abi = - abi; - if( abr <= abi ) { - if (abi == 0) { - fprintf(stderr, "z_div.c: division by zero\n"); - exit(-1); - } - ratio = b->r / b->i ; - den = b->i * (1 + ratio*ratio); - cr = (a->r*ratio + a->i) / den; - ci = (a->i*ratio - a->r) / den; - } else { - ratio = b->i / b->r ; - den = b->r * (1 + ratio*ratio); - cr = (a->r + a->i*ratio) / den; - ci = (a->i - a->r*ratio) / den; - } - c->r = cr; - c->i = ci; -} - - -/*! \brief Returns sqrt(z.r^2 + z.i^2) */ -double z_abs(doublecomplex *z) -{ - double temp; - double real = z->r; - double imag = z->i; - - if (real < 0) real = -real; - if (imag < 0) imag = -imag; - if (imag > real) { - temp = real; - real = imag; - imag = temp; - } - if ((real+imag) == real) return(real); - - temp = imag/real; - temp = real*sqrt(1.0 + temp*temp); /*overflow!!*/ - return (temp); -} - - -/*! \brief Approximates the abs. Returns abs(z.r) + abs(z.i) */ -double z_abs1(doublecomplex *z) -{ - double real = z->r; - double imag = z->i; - - if (real < 0) real = -real; - if (imag < 0) imag = -imag; - - return (real + imag); -} - -/*! \brief Return the exponentiation */ -void z_exp(doublecomplex *r, doublecomplex *z) -{ - double expx; - - expx = exp(z->r); - r->r = expx * cos(z->i); - r->i = expx * sin(z->i); -} - -/*! \brief Return the complex conjugate */ -void d_cnjg(doublecomplex *r, doublecomplex *z) -{ - r->r = z->r; - r->i = -z->i; -} - -/*! \brief Return the imaginary part */ -double d_imag(doublecomplex *z) -{ - return (z->i); -} - - -/*! \brief SIGN functions for complex number. Returns z/abs(z) */ -doublecomplex z_sgn(doublecomplex *z) -{ - register double t = z_abs(z); - register doublecomplex retval; - - if (t == 0.0) { - retval.r = 1.0, retval.i = 0.0; - } else { - retval.r = z->r / t, retval.i = z->i / t; - } - - return retval; -} - -/*! \brief Square-root of a complex number. */ -doublecomplex z_sqrt(doublecomplex *z) -{ - doublecomplex retval; - register double cr, ci, real, imag; - - real = z->r; - imag = z->i; - - if ( imag == 0.0 ) { - retval.r = sqrt(real); - retval.i = 0.0; - } else { - ci = (sqrt(real*real + imag*imag) - real) / 2.0; - ci = sqrt(ci); - cr = imag / (2.0 * ci); - retval.r = cr; - retval.i = ci; - } - - return retval; -} - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcopy_to_ucol.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcopy_to_ucol.c deleted file mode 100644 index 5a1d7b75cd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dcopy_to_ucol.c +++ /dev/null @@ -1,103 +0,0 @@ - -/*! @file dcopy_to_ucol.c - * \brief Copy a computed column of U to the compressed data structure - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_ddefs.h" - -int -dcopy_to_ucol( - int jcol, /* in */ - int nseg, /* in */ - int *segrep, /* in */ - int *repfnz, /* in */ - int *perm_r, /* in */ - double *dense, /* modified - reset to zero on return */ - GlobalLU_t *Glu /* modified */ - ) -{ -/* - * Gather from SPA dense[*] to global ucol[*]. - */ - int ksub, krep, ksupno; - int i, k, kfnz, segsze; - int fsupc, isub, irow; - int jsupno, nextu; - int new_next, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - double *ucol; - int *usub, *xusub; - int nzumax; - double zero = 0.0; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - nzumax = Glu->nzumax; - - jsupno = supno[jcol]; - nextu = xusub[jcol]; - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - krep = segrep[k--]; - ksupno = supno[krep]; - - if ( ksupno != jsupno ) { /* Should go into ucol[] */ - kfnz = repfnz[krep]; - if ( kfnz != EMPTY ) { /* Nonzero U-segment */ - - fsupc = xsup[ksupno]; - isub = xlsub[fsupc] + kfnz - fsupc; - segsze = krep - kfnz + 1; - - new_next = nextu + segsze; - while ( new_next > nzumax ) { - if (mem_error = dLUMemXpand(jcol, nextu, UCOL, &nzumax, Glu)) - return (mem_error); - ucol = Glu->ucol; - if (mem_error = dLUMemXpand(jcol, nextu, USUB, &nzumax, Glu)) - return (mem_error); - usub = Glu->usub; - lsub = Glu->lsub; - } - - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - usub[nextu] = perm_r[irow]; - ucol[nextu] = dense[irow]; - dense[irow] = zero; - nextu++; - isub++; - } - - } - - } - - } /* for each segment... */ - - xusub[jcol + 1] = nextu; /* Close U[*,jcol] */ - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ddiagonal.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ddiagonal.c deleted file mode 100644 index 60c7aa103f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ddiagonal.c +++ /dev/null @@ -1,129 +0,0 @@ - -/*! @file ddiagonal.c - * \brief Auxiliary routines to work with diagonal elements - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_ddefs.h" - -int dfill_diag(int n, NCformat *Astore) -/* fill explicit zeros on the diagonal entries, so that the matrix is not - structurally singular. */ -{ - double *nzval = (double *)Astore->nzval; - int *rowind = Astore->rowind; - int *colptr = Astore->colptr; - int nnz = colptr[n]; - int fill = 0; - double *nzval_new; - double zero = 0.0; - int *rowind_new; - int i, j, diag; - - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - if (rowind[j] == i) diag = j; - if (diag < 0) fill++; - } - if (fill) - { - nzval_new = doubleMalloc(nnz + fill); - rowind_new = intMalloc(nnz + fill); - fill = 0; - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i] - fill; j < colptr[i + 1]; j++) - { - if ((rowind_new[j + fill] = rowind[j]) == i) diag = j; - nzval_new[j + fill] = nzval[j]; - } - if (diag < 0) - { - rowind_new[colptr[i + 1] + fill] = i; - nzval_new[colptr[i + 1] + fill] = zero; - fill++; - } - colptr[i + 1] += fill; - } - Astore->nzval = nzval_new; - Astore->rowind = rowind_new; - SUPERLU_FREE(nzval); - SUPERLU_FREE(rowind); - } - Astore->nnz += fill; - return fill; -} - -int ddominate(int n, NCformat *Astore) -/* make the matrix diagonally dominant */ -{ - double *nzval = (double *)Astore->nzval; - int *rowind = Astore->rowind; - int *colptr = Astore->colptr; - int nnz = colptr[n]; - int fill = 0; - double *nzval_new; - int *rowind_new; - int i, j, diag; - double s; - - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - if (rowind[j] == i) diag = j; - if (diag < 0) fill++; - } - if (fill) - { - nzval_new = doubleMalloc(nnz + fill); - rowind_new = intMalloc(nnz+ fill); - fill = 0; - for (i = 0; i < n; i++) - { - s = 1e-6; - diag = -1; - for (j = colptr[i] - fill; j < colptr[i + 1]; j++) - { - if ((rowind_new[j + fill] = rowind[j]) == i) diag = j; - s += fabs(nzval_new[j + fill] = nzval[j]); - } - if (diag >= 0) { - nzval_new[diag+fill] = s * 3.0; - } else { - rowind_new[colptr[i + 1] + fill] = i; - nzval_new[colptr[i + 1] + fill] = s * 3.0; - fill++; - } - colptr[i + 1] += fill; - } - Astore->nzval = nzval_new; - Astore->rowind = rowind_new; - SUPERLU_FREE(nzval); - SUPERLU_FREE(rowind); - } - else - { - for (i = 0; i < n; i++) - { - s = 1e-6; - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - { - if (rowind[j] == i) diag = j; - s += fabs(nzval[j]); - } - nzval[diag] = s * 3.0; - } - } - Astore->nnz += fill; - return fill; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgscon.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgscon.c deleted file mode 100644 index d91474a2b4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgscon.c +++ /dev/null @@ -1,157 +0,0 @@ - -/*! @file dgscon.c - * \brief Estimates reciprocal of the condition number of a general matrix - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Modified from lapack routines DGECON.
    - * 
    - */ - -/* - * File name: dgscon.c - * History: Modified from lapack routines DGECON. - */ -#include -#include "slu_ddefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   DGSCON estimates the reciprocal of the condition number of a general 
    - *   real matrix A, in either the 1-norm or the infinity-norm, using   
    - *   the LU factorization computed by DGETRF.   *
    - *
    - *   An estimate is obtained for norm(inv(A)), and the reciprocal of the   
    - *   condition number is computed as   
    - *      RCOND = 1 / ( norm(A) * norm(inv(A)) ).   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - * 
    - *   Arguments   
    - *   =========   
    - *
    - *    NORM    (input) char*
    - *            Specifies whether the 1-norm condition number or the   
    - *            infinity-norm condition number is required:   
    - *            = '1' or 'O':  1-norm;   
    - *            = 'I':         Infinity-norm.
    - *	    
    - *    L       (input) SuperMatrix*
    - *            The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *            dgstrf(). Use compressed row subscripts storage for supernodes,
    - *            i.e., L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - * 
    - *    U       (input) SuperMatrix*
    - *            The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *            dgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *            Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_TRU.
    - *	    
    - *    ANORM   (input) double
    - *            If NORM = '1' or 'O', the 1-norm of the original matrix A.   
    - *            If NORM = 'I', the infinity-norm of the original matrix A.
    - *	    
    - *    RCOND   (output) double*
    - *           The reciprocal of the condition number of the matrix A,   
    - *           computed as RCOND = 1/(norm(A) * norm(inv(A))).
    - *	    
    - *    INFO    (output) int*
    - *           = 0:  successful exit   
    - *           < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *
    - *    ===================================================================== 
    - * 
    - */ - -void -dgscon(char *norm, SuperMatrix *L, SuperMatrix *U, - double anorm, double *rcond, SuperLUStat_t *stat, int *info) -{ - - - /* Local variables */ - int kase, kase1, onenrm, i; - double ainvnm; - double *work; - int *iwork; - extern int drscl_(int *, double *, double *, int *); - - extern int dlacon_(int *, double *, double *, int *, double *, int *); - - - /* Test the input parameters. */ - *info = 0; - onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O"); - if (! onenrm && ! lsame_(norm, "I")) *info = -1; - else if (L->nrow < 0 || L->nrow != L->ncol || - L->Stype != SLU_SC || L->Dtype != SLU_D || L->Mtype != SLU_TRLU) - *info = -2; - else if (U->nrow < 0 || U->nrow != U->ncol || - U->Stype != SLU_NC || U->Dtype != SLU_D || U->Mtype != SLU_TRU) - *info = -3; - if (*info != 0) { - i = -(*info); - xerbla_("dgscon", &i); - return; - } - - /* Quick return if possible */ - *rcond = 0.; - if ( L->nrow == 0 || U->nrow == 0) { - *rcond = 1.; - return; - } - - work = doubleCalloc( 3*L->nrow ); - iwork = intMalloc( L->nrow ); - - - if ( !work || !iwork ) - ABORT("Malloc fails for work arrays in dgscon."); - - /* Estimate the norm of inv(A). */ - ainvnm = 0.; - if ( onenrm ) kase1 = 1; - else kase1 = 2; - kase = 0; - - do { - dlacon_(&L->nrow, &work[L->nrow], &work[0], &iwork[0], &ainvnm, &kase); - - if (kase == 0) break; - - if (kase == kase1) { - /* Multiply by inv(L). */ - sp_dtrsv("L", "No trans", "Unit", L, U, &work[0], stat, info); - - /* Multiply by inv(U). */ - sp_dtrsv("U", "No trans", "Non-unit", L, U, &work[0], stat, info); - - } else { - - /* Multiply by inv(U'). */ - sp_dtrsv("U", "Transpose", "Non-unit", L, U, &work[0], stat, info); - - /* Multiply by inv(L'). */ - sp_dtrsv("L", "Transpose", "Unit", L, U, &work[0], stat, info); - - } - - } while ( kase != 0 ); - - /* Compute the estimate of the reciprocal condition number. */ - if (ainvnm != 0.) *rcond = (1. / ainvnm) / anorm; - - SUPERLU_FREE (work); - SUPERLU_FREE (iwork); - return; - -} /* dgscon */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsequ.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsequ.c deleted file mode 100644 index 73870d460a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsequ.c +++ /dev/null @@ -1,195 +0,0 @@ - -/*! @file dgsequ.c - * \brief Computes row and column scalings - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Modified from LAPACK routine DGEEQU
    - * 
    - */ -/* - * File name: dgsequ.c - * History: Modified from LAPACK routine DGEEQU - */ -#include -#include "slu_ddefs.h" - - - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - *
    - *   DGSEQU computes row and column scalings intended to equilibrate an   
    - *   M-by-N sparse matrix A and reduce its condition number. R returns the row
    - *   scale factors and C the column scale factors, chosen to try to make   
    - *   the largest element in each row and column of the matrix B with   
    - *   elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1.   
    - *
    - *   R(i) and C(j) are restricted to be between SMLNUM = smallest safe   
    - *   number and BIGNUM = largest safe number.  Use of these scaling   
    - *   factors is not guaranteed to reduce the condition number of A but   
    - *   works well in practice.   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   A       (input) SuperMatrix*
    - *           The matrix of dimension (A->nrow, A->ncol) whose equilibration
    - *           factors are to be computed. The type of A can be:
    - *           Stype = SLU_NC; Dtype = SLU_D; Mtype = SLU_GE.
    - *	    
    - *   R       (output) double*, size A->nrow
    - *           If INFO = 0 or INFO > M, R contains the row scale factors   
    - *           for A.
    - *	    
    - *   C       (output) double*, size A->ncol
    - *           If INFO = 0,  C contains the column scale factors for A.
    - *	    
    - *   ROWCND  (output) double*
    - *           If INFO = 0 or INFO > M, ROWCND contains the ratio of the   
    - *           smallest R(i) to the largest R(i).  If ROWCND >= 0.1 and   
    - *           AMAX is neither too large nor too small, it is not worth   
    - *           scaling by R.
    - *	    
    - *   COLCND  (output) double*
    - *           If INFO = 0, COLCND contains the ratio of the smallest   
    - *           C(i) to the largest C(i).  If COLCND >= 0.1, it is not   
    - *           worth scaling by C.
    - *	    
    - *   AMAX    (output) double*
    - *           Absolute value of largest matrix element.  If AMAX is very   
    - *           close to overflow or very close to underflow, the matrix   
    - *           should be scaled.
    - *	    
    - *   INFO    (output) int*
    - *           = 0:  successful exit   
    - *           < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *           > 0:  if INFO = i,  and i is   
    - *                 <= A->nrow:  the i-th row of A is exactly zero   
    - *                 >  A->ncol:  the (i-M)-th column of A is exactly zero   
    - *
    - *   ===================================================================== 
    - * 
    - */ -void -dgsequ(SuperMatrix *A, double *r, double *c, double *rowcnd, - double *colcnd, double *amax, int *info) -{ - - - /* Local variables */ - NCformat *Astore; - double *Aval; - int i, j, irow; - double rcmin, rcmax; - double bignum, smlnum; - extern double dlamch_(char *); - - /* Test the input parameters. */ - *info = 0; - if ( A->nrow < 0 || A->ncol < 0 || - A->Stype != SLU_NC || A->Dtype != SLU_D || A->Mtype != SLU_GE ) - *info = -1; - if (*info != 0) { - i = -(*info); - xerbla_("dgsequ", &i); - return; - } - - /* Quick return if possible */ - if ( A->nrow == 0 || A->ncol == 0 ) { - *rowcnd = 1.; - *colcnd = 1.; - *amax = 0.; - return; - } - - Astore = A->Store; - Aval = Astore->nzval; - - /* Get machine constants. */ - smlnum = dlamch_("S"); - bignum = 1. / smlnum; - - /* Compute row scale factors. */ - for (i = 0; i < A->nrow; ++i) r[i] = 0.; - - /* Find the maximum element in each row. */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - r[irow] = SUPERLU_MAX( r[irow], fabs(Aval[i]) ); - } - - /* Find the maximum and minimum scale factors. */ - rcmin = bignum; - rcmax = 0.; - for (i = 0; i < A->nrow; ++i) { - rcmax = SUPERLU_MAX(rcmax, r[i]); - rcmin = SUPERLU_MIN(rcmin, r[i]); - } - *amax = rcmax; - - if (rcmin == 0.) { - /* Find the first zero scale factor and return an error code. */ - for (i = 0; i < A->nrow; ++i) - if (r[i] == 0.) { - *info = i + 1; - return; - } - } else { - /* Invert the scale factors. */ - for (i = 0; i < A->nrow; ++i) - r[i] = 1. / SUPERLU_MIN( SUPERLU_MAX( r[i], smlnum ), bignum ); - /* Compute ROWCND = min(R(I)) / max(R(I)) */ - *rowcnd = SUPERLU_MAX( rcmin, smlnum ) / SUPERLU_MIN( rcmax, bignum ); - } - - /* Compute column scale factors */ - for (j = 0; j < A->ncol; ++j) c[j] = 0.; - - /* Find the maximum element in each column, assuming the row - scalings computed above. */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - c[j] = SUPERLU_MAX( c[j], fabs(Aval[i]) * r[irow] ); - } - - /* Find the maximum and minimum scale factors. */ - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->ncol; ++j) { - rcmax = SUPERLU_MAX(rcmax, c[j]); - rcmin = SUPERLU_MIN(rcmin, c[j]); - } - - if (rcmin == 0.) { - /* Find the first zero scale factor and return an error code. */ - for (j = 0; j < A->ncol; ++j) - if ( c[j] == 0. ) { - *info = A->nrow + j + 1; - return; - } - } else { - /* Invert the scale factors. */ - for (j = 0; j < A->ncol; ++j) - c[j] = 1. / SUPERLU_MIN( SUPERLU_MAX( c[j], smlnum ), bignum); - /* Compute COLCND = min(C(J)) / max(C(J)) */ - *colcnd = SUPERLU_MAX( rcmin, smlnum ) / SUPERLU_MIN( rcmax, bignum ); - } - - return; - -} /* dgsequ */ - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsisx.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsisx.c deleted file mode 100644 index 546b0755ca..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsisx.c +++ /dev/null @@ -1,693 +0,0 @@ - -/*! @file dgsisx.c - * \brief Gives the approximate solutions of linear equations A*X=B or A'*X=B - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * DGSISX gives the approximate solutions of linear equations A*X=B or A'*X=B,
    - * using the ILU factorization from dgsitrf(). An estimation of
    - * the condition number is provided. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *  
    - *	1.1. If options->Equil = YES or options->RowPerm = LargeDiag, scaling
    - *	     factors are computed to equilibrate the system:
    - *	     options->Trans = NOTRANS:
    - *		 diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *	     options->Trans = TRANS:
    - *		 (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *	     options->Trans = CONJ:
    - *		 (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *	     Whether or not the system will be equilibrated depends on the
    - *	     scaling of the matrix A, but if equilibration is used, A is
    - *	     overwritten by diag(R)*A*diag(C) and B by diag(R)*B
    - *	     (if options->Trans=NOTRANS) or diag(C)*B (if options->Trans
    - *	     = TRANS or CONJ).
    - *
    - *	1.2. Permute columns of A, forming A*Pc, where Pc is a permutation
    - *	     matrix that usually preserves sparsity.
    - *	     For more details of this step, see sp_preorder.c.
    - *
    - *	1.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *	     factor the matrix A (after equilibration if options->Equil = YES)
    - *	     as Pr*A*Pc = L*U, with Pr determined by partial pivoting.
    - *
    - *	1.4. Compute the reciprocal pivot growth factor.
    - *
    - *	1.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *	     routine fills a small number on the diagonal entry, that is
    - *		U(i,i) = ||A(:,i)||_oo * options->ILU_FillTol ** (1 - i / n),
    - *	     and info will be increased by 1. The factored form of A is used
    - *	     to estimate the condition number of the preconditioner. If the
    - *	     reciprocal of the condition number is less than machine precision,
    - *	     info = A->ncol+1 is returned as a warning, but the routine still
    - *	     goes on to solve for X.
    - *
    - *	1.6. The system of equations is solved for X using the factored form
    - *	     of A.
    - *
    - *	1.7. options->IterRefine is not used
    - *
    - *	1.8. If equilibration was used, the matrix X is premultiplied by
    - *	     diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *	     (if options->Trans = TRANS or CONJ) so that it solves the
    - *	     original system before equilibration.
    - *
    - *	1.9. options for ILU only
    - *	     1) If options->RowPerm = LargeDiag, MC64 is used to scale and
    - *		permute the matrix to an I-matrix, that is Pr*Dr*A*Dc has
    - *		entries of modulus 1 on the diagonal and off-diagonal entries
    - *		of modulus at most 1. If MC64 fails, dgsequ() is used to
    - *		equilibrate the system.
    - *	     2) options->ILU_DropTol = tau is the threshold for dropping.
    - *		For L, it is used directly (for the whole row in a supernode);
    - *		For U, ||A(:,i)||_oo * tau is used as the threshold
    - *	        for the	i-th column.
    - *		If a secondary dropping rule is required, tau will
    - *	        also be used to compute the second threshold.
    - *	     3) options->ILU_FillFactor = gamma, used as the initial guess
    - *		of memory growth.
    - *		If a secondary dropping rule is required, it will also
    - *              be used as an upper bound of the memory.
    - *	     4) options->ILU_DropRule specifies the dropping rule.
    - *		Option		Explanation
    - *		======		===========
    - *		DROP_BASIC:	Basic dropping rule, supernodal based ILU.
    - *		DROP_PROWS:	Supernodal based ILUTP, p = gamma * nnz(A) / n.
    - *		DROP_COLUMN:	Variation of ILUTP, for j-th column,
    - *				p = gamma * nnz(A(:,j)).
    - *		DROP_AREA;	Variation of ILUTP, for j-th column, use
    - *				nnz(F(:,1:j)) / nnz(A(:,1:j)) to control the
    - *				memory.
    - *		DROP_DYNAMIC:	Modify the threshold tau during the
    - *				factorizaion.
    - *				If nnz(L(:,1:j)) / nnz(A(:,1:j)) < gamma
    - *				    tau_L(j) := MIN(1, tau_L(j-1) * 2);
    - *				Otherwise
    - *				    tau_L(j) := MIN(1, tau_L(j-1) * 2);
    - *				tau_U(j) uses the similar rule.
    - *				NOTE: the thresholds used by L and U are
    - *				indenpendent.
    - *		DROP_INTERP:	Compute the second dropping threshold by
    - *				interpolation instead of sorting (default).
    - *				In this case, the actual fill ratio is not
    - *				guaranteed smaller than gamma.
    - *		DROP_PROWS, DROP_COLUMN and DROP_AREA are mutually exclusive.
    - *		( The default option is DROP_BASIC | DROP_AREA. )
    - *	     5) options->ILU_Norm is the criterion of computing the average
    - *		value of a row in L.
    - *		options->ILU_Norm	average(x[1:n])
    - *		=================	===============
    - *		ONE_NORM		||x||_1 / n
    - *		TWO_NORM		||x||_2 / sqrt(n)
    - *		INF_NORM		max{|x[i]|}
    - *	     6) options->ILU_MILU specifies the type of MILU's variation.
    - *		= SILU (default): do not perform MILU;
    - *		= SMILU_1 (not recommended):
    - *		    U(i,i) := U(i,i) + sum(dropped entries);
    - *		= SMILU_2:
    - *		    U(i,i) := U(i,i) + SGN(U(i,i)) * sum(dropped entries);
    - *		= SMILU_3:
    - *		    U(i,i) := U(i,i) + SGN(U(i,i)) * sum(|dropped entries|);
    - *		NOTE: Even SMILU_1 does not preserve the column sum because of
    - *		late dropping.
    - *	     7) options->ILU_FillTol is used as the perturbation when
    - *		encountering zero pivots. If some U(i,i) = 0, so that U is
    - *		exactly singular, then
    - *		   U(i,i) := ||A(:,i)|| * options->ILU_FillTol ** (1 - i / n).
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the above algorithm
    - *	to the transpose of A:
    - *
    - *	2.1. If options->Equil = YES or options->RowPerm = LargeDiag, scaling
    - *	     factors are computed to equilibrate the system:
    - *	     options->Trans = NOTRANS:
    - *		 diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *	     options->Trans = TRANS:
    - *		 (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *	     options->Trans = CONJ:
    - *		 (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *	     Whether or not the system will be equilibrated depends on the
    - *	     scaling of the matrix A, but if equilibration is used, A' is
    - *	     overwritten by diag(R)*A'*diag(C) and B by diag(R)*B
    - *	     (if trans='N') or diag(C)*B (if trans = 'T' or 'C').
    - *
    - *	2.2. Permute columns of transpose(A) (rows of A),
    - *	     forming transpose(A)*Pc, where Pc is a permutation matrix that
    - *	     usually preserves sparsity.
    - *	     For more details of this step, see sp_preorder.c.
    - *
    - *	2.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *	     factor the transpose(A) (after equilibration if
    - *	     options->Fact = YES) as Pr*transpose(A)*Pc = L*U with the
    - *	     permutation Pr determined by partial pivoting.
    - *
    - *	2.4. Compute the reciprocal pivot growth factor.
    - *
    - *	2.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *	     routine fills a small number on the diagonal entry, that is
    - *		 U(i,i) = ||A(:,i)||_oo * options->ILU_FillTol ** (1 - i / n).
    - *	     And info will be increased by 1. The factored form of A is used
    - *	     to estimate the condition number of the preconditioner. If the
    - *	     reciprocal of the condition number is less than machine precision,
    - *	     info = A->ncol+1 is returned as a warning, but the routine still
    - *	     goes on to solve for X.
    - *
    - *	2.6. The system of equations is solved for X using the factored form
    - *	     of transpose(A).
    - *
    - *	2.7. If options->IterRefine is not used.
    - *
    - *	2.8. If equilibration was used, the matrix X is premultiplied by
    - *	     diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *	     (if options->Trans = TRANS or CONJ) so that it solves the
    - *	     original system before equilibration.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *	   The structure defines the input parameters to control
    - *	   how the LU decomposition will be performed and how the
    - *	   system will be solved.
    - *
    - * A	   (input/output) SuperMatrix*
    - *	   Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *	   of the linear equations is A->nrow. Currently, the type of A can be:
    - *	   Stype = SLU_NC or SLU_NR, Dtype = SLU_D, Mtype = SLU_GE.
    - *	   In the future, more general A may be handled.
    - *
    - *	   On entry, If options->Fact = FACTORED and equed is not 'N',
    - *	   then A must have been equilibrated by the scaling factors in
    - *	   R and/or C.
    - *	   On exit, A is not modified if options->Equil = NO, or if
    - *	   options->Equil = YES but equed = 'N' on exit.
    - *	   Otherwise, if options->Equil = YES and equed is not 'N',
    - *	   A is scaled as follows:
    - *	   If A->Stype = SLU_NC:
    - *	     equed = 'R':  A := diag(R) * A
    - *	     equed = 'C':  A := A * diag(C)
    - *	     equed = 'B':  A := diag(R) * A * diag(C).
    - *	   If A->Stype = SLU_NR:
    - *	     equed = 'R':  transpose(A) := diag(R) * transpose(A)
    - *	     equed = 'C':  transpose(A) := transpose(A) * diag(C)
    - *	     equed = 'B':  transpose(A) := diag(R) * transpose(A) * diag(C).
    - *
    - * perm_c  (input/output) int*
    - *	   If A->Stype = SLU_NC, Column permutation vector of size A->ncol,
    - *	   which defines the permutation matrix Pc; perm_c[i] = j means
    - *	   column i of A is in position j in A*Pc.
    - *	   On exit, perm_c may be overwritten by the product of the input
    - *	   perm_c and a permutation that postorders the elimination tree
    - *	   of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *	   is already in postorder.
    - *
    - *	   If A->Stype = SLU_NR, column permutation vector of size A->nrow,
    - *	   which describes permutation of columns of transpose(A) 
    - *	   (rows of A) as described above.
    - *
    - * perm_r  (input/output) int*
    - *	   If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *	   which defines the permutation matrix Pr, and is determined
    - *	   by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *	   position j in Pr*A.
    - *
    - *	   If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *	   determines permutation of rows of transpose(A)
    - *	   (columns of A) as described above.
    - *
    - *	   If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *	   will try to use the input perm_r, unless a certain threshold
    - *	   criterion is violated. In that case, perm_r is overwritten by a
    - *	   new permutation determined by partial pivoting or diagonal
    - *	   threshold pivoting.
    - *	   Otherwise, perm_r is output argument.
    - *
    - * etree   (input/output) int*,  dimension (A->ncol)
    - *	   Elimination tree of Pc'*A'*A*Pc.
    - *	   If options->Fact != FACTORED and options->Fact != DOFACT,
    - *	   etree is an input argument, otherwise it is an output argument.
    - *	   Note: etree is a vector of parent pointers for a forest whose
    - *	   vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * equed   (input/output) char*
    - *	   Specifies the form of equilibration that was done.
    - *	   = 'N': No equilibration.
    - *	   = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *	   = 'C': Column equilibration, i.e., A was postmultiplied by diag(C).
    - *	   = 'B': Both row and column equilibration, i.e., A was replaced 
    - *		  by diag(R)*A*diag(C).
    - *	   If options->Fact = FACTORED, equed is an input argument,
    - *	   otherwise it is an output argument.
    - *
    - * R	   (input/output) double*, dimension (A->nrow)
    - *	   The row scale factors for A or transpose(A).
    - *	   If equed = 'R' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *	       (if A->Stype = SLU_NR) is multiplied on the left by diag(R).
    - *	   If equed = 'N' or 'C', R is not accessed.
    - *	   If options->Fact = FACTORED, R is an input argument,
    - *	       otherwise, R is output.
    - *	   If options->zFact = FACTORED and equed = 'R' or 'B', each element
    - *	       of R must be positive.
    - *
    - * C	   (input/output) double*, dimension (A->ncol)
    - *	   The column scale factors for A or transpose(A).
    - *	   If equed = 'C' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *	       (if A->Stype = SLU_NR) is multiplied on the right by diag(C).
    - *	   If equed = 'N' or 'R', C is not accessed.
    - *	   If options->Fact = FACTORED, C is an input argument,
    - *	       otherwise, C is output.
    - *	   If options->Fact = FACTORED and equed = 'C' or 'B', each element
    - *	       of C must be positive.
    - *
    - * L	   (output) SuperMatrix*
    - *	   The factor L from the factorization
    - *	       Pr*A*Pc=L*U		(if A->Stype SLU_= NC) or
    - *	       Pr*transpose(A)*Pc=L*U	(if A->Stype = SLU_NR).
    - *	   Uses compressed row subscripts storage for supernodes, i.e.,
    - *	   L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *
    - * U	   (output) SuperMatrix*
    - *	   The factor U from the factorization
    - *	       Pr*A*Pc=L*U		(if A->Stype = SLU_NC) or
    - *	       Pr*transpose(A)*Pc=L*U	(if A->Stype = SLU_NR).
    - *	   Uses column-wise storage scheme, i.e., U has types:
    - *	   Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - * work    (workspace/output) void*, size (lwork) (in bytes)
    - *	   User supplied workspace, should be large enough
    - *	   to hold data structures for factors L and U.
    - *	   On exit, if fact is not 'F', L and U point to this array.
    - *
    - * lwork   (input) int
    - *	   Specifies the size of work array in bytes.
    - *	   = 0:  allocate space internally by system malloc;
    - *	   > 0:  use user-supplied work array of length lwork in bytes,
    - *		 returns error if space runs out.
    - *	   = -1: the routine guesses the amount of space needed without
    - *		 performing the factorization, and returns it in
    - *		 mem_usage->total_needed; no other side effects.
    - *
    - *	   See argument 'mem_usage' for memory usage statistics.
    - *
    - * B	   (input/output) SuperMatrix*
    - *	   B has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *	   On entry, the right hand side matrix.
    - *	   If B->ncol = 0, only LU decomposition is performed, the triangular
    - *			   solve is skipped.
    - *	   On exit,
    - *	      if equed = 'N', B is not modified; otherwise
    - *	      if A->Stype = SLU_NC:
    - *		 if options->Trans = NOTRANS and equed = 'R' or 'B',
    - *		    B is overwritten by diag(R)*B;
    - *		 if options->Trans = TRANS or CONJ and equed = 'C' of 'B',
    - *		    B is overwritten by diag(C)*B;
    - *	      if A->Stype = SLU_NR:
    - *		 if options->Trans = NOTRANS and equed = 'C' or 'B',
    - *		    B is overwritten by diag(C)*B;
    - *		 if options->Trans = TRANS or CONJ and equed = 'R' of 'B',
    - *		    B is overwritten by diag(R)*B.
    - *
    - * X	   (output) SuperMatrix*
    - *	   X has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *	   If info = 0 or info = A->ncol+1, X contains the solution matrix
    - *	   to the original system of equations. Note that A and B are modified
    - *	   on exit if equed is not 'N', and the solution to the equilibrated
    - *	   system is inv(diag(C))*X if options->Trans = NOTRANS and
    - *	   equed = 'C' or 'B', or inv(diag(R))*X if options->Trans = 'T' or 'C'
    - *	   and equed = 'R' or 'B'.
    - *
    - * recip_pivot_growth (output) double*
    - *	   The reciprocal pivot growth factor max_j( norm(A_j)/norm(U_j) ).
    - *	   The infinity norm is used. If recip_pivot_growth is much less
    - *	   than 1, the stability of the LU factorization could be poor.
    - *
    - * rcond   (output) double*
    - *	   The estimate of the reciprocal condition number of the matrix A
    - *	   after equilibration (if done). If rcond is less than the machine
    - *	   precision (in particular, if rcond = 0), the matrix is singular
    - *	   to working precision. This condition is indicated by a return
    - *	   code of info > 0.
    - *
    - * mem_usage (output) mem_usage_t*
    - *	   Record the memory usage statistics, consisting of following fields:
    - *	   - for_lu (float)
    - *	     The amount of space used in bytes for L\U data structures.
    - *	   - total_needed (float)
    - *	     The amount of space needed in bytes to perform factorization.
    - *	   - expansions (int)
    - *	     The number of memory expansions during the LU factorization.
    - *
    - * stat   (output) SuperLUStat_t*
    - *	  Record the statistics on runtime and floating-point operation count.
    - *	  See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - *	   > 0: if info = i, and i is
    - *		<= A->ncol: number of zero pivots. They are replaced by small
    - *		      entries due to options->ILU_FillTol.
    - *		= A->ncol+1: U is nonsingular, but RCOND is less than machine
    - *		      precision, meaning that the matrix is singular to
    - *		      working precision. Nevertheless, the solution and
    - *		      error bounds are computed because there are a number
    - *		      of situations where the computed solution can be more
    - *		      accurate than the value of RCOND would suggest.
    - *		> A->ncol+1: number of bytes allocated when memory allocation
    - *		      failure occurred, plus A->ncol.
    - * 
    - */ - -void -dgsisx(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - int *etree, char *equed, double *R, double *C, - SuperMatrix *L, SuperMatrix *U, void *work, int lwork, - SuperMatrix *B, SuperMatrix *X, - double *recip_pivot_growth, double *rcond, - mem_usage_t *mem_usage, SuperLUStat_t *stat, int *info) -{ - - DNformat *Bstore, *Xstore; - double *Bmat, *Xmat; - int ldb, ldx, nrhs; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int colequ, equil, nofact, notran, rowequ, permc_spec, mc64; - trans_t trant; - char norm[1]; - int i, j, info1; - double amax, anorm, bignum, smlnum, colcnd, rowcnd, rcmax, rcmin; - int relax, panel_size; - double diag_pivot_thresh; - double t0; /* temporary time */ - double *utime; - - int *perm = NULL; - - /* External functions */ - extern double dlangs(char *, SuperMatrix *); - - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - *info = 0; - nofact = (options->Fact != FACTORED); - equil = (options->Equil == YES); - notran = (options->Trans == NOTRANS); - mc64 = (options->RowPerm == LargeDiag); - if ( nofact ) { - *(unsigned char *)equed = 'N'; - rowequ = FALSE; - colequ = FALSE; - } else { - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - smlnum = dlamch_("Safe minimum"); - bignum = 1. / smlnum; - } - - /* Test the input parameters */ - if (!nofact && options->Fact != DOFACT && options->Fact != SamePattern && - options->Fact != SamePattern_SameRowPerm && - !notran && options->Trans != TRANS && options->Trans != CONJ && - !equil && options->Equil != NO) - *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_D || A->Mtype != SLU_GE ) - *info = -2; - else if (options->Fact == FACTORED && - !(rowequ || colequ || lsame_(equed, "N"))) - *info = -6; - else { - if (rowequ) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, R[j]); - rcmax = SUPERLU_MAX(rcmax, R[j]); - } - if (rcmin <= 0.) *info = -7; - else if ( A->nrow > 0) - rowcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else rowcnd = 1.; - } - if (colequ && *info == 0) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, C[j]); - rcmax = SUPERLU_MAX(rcmax, C[j]); - } - if (rcmin <= 0.) *info = -8; - else if (A->nrow > 0) - colcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else colcnd = 1.; - } - if (*info == 0) { - if ( lwork < -1 ) *info = -12; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_D || - B->Mtype != SLU_GE ) - *info = -13; - else if ( X->ncol < 0 || Xstore->lda < SUPERLU_MAX(0, A->nrow) || - (B->ncol != 0 && B->ncol != X->ncol) || - X->Stype != SLU_DN || - X->Dtype != SLU_D || X->Mtype != SLU_GE ) - *info = -14; - } - } - if (*info != 0) { - i = -(*info); - xerbla_("dgsisx", &i); - return; - } - - /* Initialization for factor parameters */ - panel_size = sp_ienv(1); - relax = sp_ienv(2); - diag_pivot_thresh = options->DiagPivotThresh; - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - dCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - if ( notran ) { /* Reverse the transpose argument. */ - trant = TRANS; - notran = 0; - } else { - trant = NOTRANS; - notran = 1; - } - } else { /* A->Stype == SLU_NC */ - trant = options->Trans; - AA = A; - } - - if ( nofact ) { - register int i, j; - NCformat *Astore = AA->Store; - int nnz = Astore->nnz; - int *colptr = Astore->colptr; - int *rowind = Astore->rowind; - double *nzval = (double *)Astore->nzval; - int n = AA->nrow; - - if ( mc64 ) { - *equed = 'B'; - rowequ = colequ = 1; - t0 = SuperLU_timer_(); - if ((perm = intMalloc(n)) == NULL) - ABORT("SUPERLU_MALLOC fails for perm[]"); - - info1 = dldperm(5, n, nnz, colptr, rowind, nzval, perm, R, C); - - if (info1 > 0) { /* MC64 fails, call dgsequ() later */ - mc64 = 0; - SUPERLU_FREE(perm); - perm = NULL; - } else { - for (i = 0; i < n; i++) { - R[i] = exp(R[i]); - C[i] = exp(C[i]); - } - /* permute and scale the matrix */ - for (j = 0; j < n; j++) { - for (i = colptr[j]; i < colptr[j + 1]; i++) { - nzval[i] *= R[rowind[i]] * C[j]; - rowind[i] = perm[rowind[i]]; - } - } - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - if ( !mc64 & equil ) { - t0 = SuperLU_timer_(); - /* Compute row and column scalings to equilibrate the matrix A. */ - dgsequ(AA, R, C, &rowcnd, &colcnd, &amax, &info1); - - if ( info1 == 0 ) { - /* Equilibrate matrix A. */ - dlaqgs(AA, R, C, rowcnd, colcnd, amax, equed); - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - } - - if ( nrhs > 0 ) { - /* Scale the right hand side if equilibration was performed. */ - if ( notran ) { - if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Bmat[i + j*ldb] *= R[i]; - } - } - } else if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Bmat[i + j*ldb] *= C[i]; - } - } - } - - if ( nofact ) { - - t0 = SuperLU_timer_(); - /* - * Gnet column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t0; - - t0 = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t0; - - /* Compute the LU factorization of A*Pc. */ - t0 = SuperLU_timer_(); - dgsitrf(options, &AC, relax, panel_size, etree, work, lwork, - perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t0; - - if ( lwork == -1 ) { - mem_usage->total_needed = *info - A->ncol; - return; - } - } - - if ( options->PivotGrowth ) { - if ( *info > 0 ) return; - - /* Compute the reciprocal pivot growth factor *recip_pivot_growth. */ - *recip_pivot_growth = dPivotGrowth(A->ncol, AA, perm_c, L, U); - } - - if ( options->ConditionNumber ) { - /* Estimate the reciprocal of the condition number of A. */ - t0 = SuperLU_timer_(); - if ( notran ) { - *(unsigned char *)norm = '1'; - } else { - *(unsigned char *)norm = 'I'; - } - anorm = dlangs(norm, AA); - dgscon(norm, L, U, anorm, rcond, stat, &info1); - utime[RCOND] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Compute the solution matrix X. */ - for (j = 0; j < nrhs; j++) /* Save a copy of the right hand sides */ - for (i = 0; i < B->nrow; i++) - Xmat[i + j*ldx] = Bmat[i + j*ldb]; - - t0 = SuperLU_timer_(); - dgstrs (trant, L, U, perm_c, perm_r, X, stat, &info1); - utime[SOLVE] = SuperLU_timer_() - t0; - - /* Transform the solution matrix X to a solution of the original - system. */ - if ( notran ) { - if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Xmat[i + j*ldx] *= C[i]; - } - } - } else { - if ( rowequ ) { - if (perm) { - double *tmp; - int n = A->nrow; - - if ((tmp = doubleMalloc(n)) == NULL) - ABORT("SUPERLU_MALLOC fails for tmp[]"); - for (j = 0; j < nrhs; j++) { - for (i = 0; i < n; i++) - tmp[i] = Xmat[i + j * ldx]; /*dcopy*/ - for (i = 0; i < n; i++) - Xmat[i + j * ldx] = R[i] * tmp[perm[i]]; - } - SUPERLU_FREE(tmp); - } else { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Xmat[i + j*ldx] *= R[i]; - } - } - } - } - } /* end if nrhs > 0 */ - - if ( options->ConditionNumber ) { - /* Set INFO = A->ncol+1 if the matrix is singular to working precision. */ - if ( *rcond < dlamch_("E") && *info == 0) *info = A->ncol + 1; - } - - if (perm) SUPERLU_FREE(perm); - - if ( nofact ) { - ilu_dQuerySpace(L, U, mem_usage); - Destroy_CompCol_Permuted(&AC); - } - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsitrf.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsitrf.c deleted file mode 100644 index dadeb4d7e0..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsitrf.c +++ /dev/null @@ -1,625 +0,0 @@ - -/*! @file dgsitf.c - * \brief Computes an ILU factorization of a general sparse matrix - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ - -#include "slu_ddefs.h" - -#ifdef DEBUG -int num_drop_L; -#endif - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * DGSITRF computes an ILU factorization of a general sparse m-by-n
    - * matrix A using partial pivoting with row interchanges.
    - * The factorization has the form
    - *     Pr * A = L * U
    - * where Pr is a row permutation matrix, L is lower triangular with unit
    - * diagonal elements (lower trapezoidal if A->nrow > A->ncol), and U is upper
    - * triangular (upper trapezoidal if A->nrow < A->ncol).
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *	   The structure defines the input parameters to control
    - *	   how the ILU decomposition will be performed.
    - *
    - * A	    (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *	    (A->nrow, A->ncol). The type of A can be:
    - *	    Stype = SLU_NCP; Dtype = SLU_D; Mtype = SLU_GE.
    - *
    - * relax    (input) int
    - *	    To control degree of relaxing supernodes. If the number
    - *	    of nodes (columns) in a subtree of the elimination tree is less
    - *	    than relax, this subtree is considered as one supernode,
    - *	    regardless of the row structures of those columns.
    - *
    - * panel_size (input) int
    - *	    A panel consists of at most panel_size consecutive columns.
    - *
    - * etree    (input) int*, dimension (A->ncol)
    - *	    Elimination tree of A'*A.
    - *	    Note: etree is a vector of parent pointers for a forest whose
    - *	    vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *	    On input, the columns of A should be permuted so that the
    - *	    etree is in a certain postorder.
    - *
    - * work     (input/output) void*, size (lwork) (in bytes)
    - *	    User-supplied work space and space for the output data structures.
    - *	    Not referenced if lwork = 0;
    - *
    - * lwork   (input) int
    - *	   Specifies the size of work array in bytes.
    - *	   = 0:  allocate space internally by system malloc;
    - *	   > 0:  use user-supplied work array of length lwork in bytes,
    - *		 returns error if space runs out.
    - *	   = -1: the routine guesses the amount of space needed without
    - *		 performing the factorization, and returns it in
    - *		 *info; no other side effects.
    - *
    - * perm_c   (input) int*, dimension (A->ncol)
    - *	    Column permutation vector, which defines the
    - *	    permutation matrix Pc; perm_c[i] = j means column i of A is
    - *	    in position j in A*Pc.
    - *	    When searching for diagonal, perm_c[*] is applied to the
    - *	    row subscripts of A, so that diagonal threshold pivoting
    - *	    can find the diagonal of A, rather than that of A*Pc.
    - *
    - * perm_r   (input/output) int*, dimension (A->nrow)
    - *	    Row permutation vector which defines the permutation matrix Pr,
    - *	    perm_r[i] = j means row i of A is in position j in Pr*A.
    - *	    If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *	       will try to use the input perm_r, unless a certain threshold
    - *	       criterion is violated. In that case, perm_r is overwritten by
    - *	       a new permutation determined by partial pivoting or diagonal
    - *	       threshold pivoting.
    - *	    Otherwise, perm_r is output argument;
    - *
    - * L	    (output) SuperMatrix*
    - *	    The factor L from the factorization Pr*A=L*U; use compressed row
    - *	    subscripts storage for supernodes, i.e., L has type:
    - *	    Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *
    - * U	    (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *	    storage scheme, i.e., U has types: Stype = SLU_NC,
    - *	    Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - * stat     (output) SuperLUStat_t*
    - *	    Record the statistics on runtime and floating-point operation count.
    - *	    See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info     (output) int*
    - *	    = 0: successful exit
    - *	    < 0: if info = -i, the i-th argument had an illegal value
    - *	    > 0: if info = i, and i is
    - *	       <= A->ncol: number of zero pivots. They are replaced by small
    - *		  entries according to options->ILU_FillTol.
    - *	       > A->ncol: number of bytes allocated when memory allocation
    - *		  failure occurred, plus A->ncol. If lwork = -1, it is
    - *		  the estimated amount of space needed, plus A->ncol.
    - *
    - * ======================================================================
    - *
    - * Local Working Arrays:
    - * ======================
    - *   m = number of rows in the matrix
    - *   n = number of columns in the matrix
    - *
    - *   marker[0:3*m-1]: marker[i] = j means that node i has been
    - *	reached when working on column j.
    - *	Storage: relative to original row subscripts
    - *	NOTE: There are 4 of them:
    - *	      marker/marker1 are used for panel dfs, see (ilu_)dpanel_dfs.c;
    - *	      marker2 is used for inner-factorization, see (ilu)_dcolumn_dfs.c;
    - *	      marker_relax(has its own space) is used for relaxed supernodes.
    - *
    - *   parent[0:m-1]: parent vector used during dfs
    - *	Storage: relative to new row subscripts
    - *
    - *   xplore[0:m-1]: xplore[i] gives the location of the next (dfs)
    - *	unexplored neighbor of i in lsub[*]
    - *
    - *   segrep[0:nseg-1]: contains the list of supernodal representatives
    - *	in topological order of the dfs. A supernode representative is the
    - *	last column of a supernode.
    - *	The maximum size of segrep[] is n.
    - *
    - *   repfnz[0:W*m-1]: for a nonzero segment U[*,j] that ends at a
    - *	supernodal representative r, repfnz[r] is the location of the first
    - *	nonzero in this segment.  It is also used during the dfs: repfnz[r]>0
    - *	indicates the supernode r has been explored.
    - *	NOTE: There are W of them, each used for one column of a panel.
    - *
    - *   panel_lsub[0:W*m-1]: temporary for the nonzeros row indices below
    - *	the panel diagonal. These are filled in during dpanel_dfs(), and are
    - *	used later in the inner LU factorization within the panel.
    - *	panel_lsub[]/dense[] pair forms the SPA data structure.
    - *	NOTE: There are W of them.
    - *
    - *   dense[0:W*m-1]: sparse accumulating (SPA) vector for intermediate values;
    - *		   NOTE: there are W of them.
    - *
    - *   tempv[0:*]: real temporary used for dense numeric kernels;
    - *	The size of this array is defined by NUM_TEMPV() in slu_util.h.
    - *	It is also used by the dropping routine ilu_ddrop_row().
    - * 
    - */ - -void -dgsitrf(superlu_options_t *options, SuperMatrix *A, int relax, int panel_size, - int *etree, void *work, int lwork, int *perm_c, int *perm_r, - SuperMatrix *L, SuperMatrix *U, SuperLUStat_t *stat, int *info) -{ - /* Local working arrays */ - NCPformat *Astore; - int *iperm_r = NULL; /* inverse of perm_r; used when - options->Fact == SamePattern_SameRowPerm */ - int *iperm_c; /* inverse of perm_c */ - int *swap, *iswap; /* swap is used to store the row permutation - during the factorization. Initially, it is set - to iperm_c (row indeces of Pc*A*Pc'). - iswap is the inverse of swap. After the - factorization, it is equal to perm_r. */ - int *iwork; - double *dwork; - int *segrep, *repfnz, *parent, *xplore; - int *panel_lsub; /* dense[]/panel_lsub[] pair forms a w-wide SPA */ - int *marker, *marker_relax; - double *dense, *tempv; - int *relax_end, *relax_fsupc; - double *a; - int *asub; - int *xa_begin, *xa_end; - int *xsup, *supno; - int *xlsub, *xlusup, *xusub; - int nzlumax; - double *amax; - double drop_sum; - static GlobalLU_t Glu; /* persistent to facilitate multiple factors. */ - int *iwork2; /* used by the second dropping rule */ - - /* Local scalars */ - fact_t fact = options->Fact; - double diag_pivot_thresh = options->DiagPivotThresh; - double drop_tol = options->ILU_DropTol; /* tau */ - double fill_ini = options->ILU_FillTol; /* tau^hat */ - double gamma = options->ILU_FillFactor; - int drop_rule = options->ILU_DropRule; - milu_t milu = options->ILU_MILU; - double fill_tol; - int pivrow; /* pivotal row number in the original matrix A */ - int nseg1; /* no of segments in U-column above panel row jcol */ - int nseg; /* no of segments in each U-column */ - register int jcol; - register int kcol; /* end column of a relaxed snode */ - register int icol; - register int i, k, jj, new_next, iinfo; - int m, n, min_mn, jsupno, fsupc, nextlu, nextu; - int w_def; /* upper bound on panel width */ - int usepr, iperm_r_allocated = 0; - int nnzL, nnzU; - int *panel_histo = stat->panel_histo; - flops_t *ops = stat->ops; - - int last_drop;/* the last column which the dropping rules applied */ - int quota; - int nnzAj; /* number of nonzeros in A(:,1:j) */ - int nnzLj, nnzUj; - double tol_L = drop_tol, tol_U = drop_tol; - double zero = 0.0; - - /* Executable */ - iinfo = 0; - m = A->nrow; - n = A->ncol; - min_mn = SUPERLU_MIN(m, n); - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - - /* Allocate storage common to the factor routines */ - *info = dLUMemInit(fact, work, lwork, m, n, Astore->nnz, panel_size, - gamma, L, U, &Glu, &iwork, &dwork); - if ( *info ) return; - - xsup = Glu.xsup; - supno = Glu.supno; - xlsub = Glu.xlsub; - xlusup = Glu.xlusup; - xusub = Glu.xusub; - - SetIWork(m, n, panel_size, iwork, &segrep, &parent, &xplore, - &repfnz, &panel_lsub, &marker_relax, &marker); - dSetRWork(m, panel_size, dwork, &dense, &tempv); - - usepr = (fact == SamePattern_SameRowPerm); - if ( usepr ) { - /* Compute the inverse of perm_r */ - iperm_r = (int *) intMalloc(m); - for (k = 0; k < m; ++k) iperm_r[perm_r[k]] = k; - iperm_r_allocated = 1; - } - - iperm_c = (int *) intMalloc(n); - for (k = 0; k < n; ++k) iperm_c[perm_c[k]] = k; - swap = (int *)intMalloc(n); - for (k = 0; k < n; k++) swap[k] = iperm_c[k]; - iswap = (int *)intMalloc(n); - for (k = 0; k < n; k++) iswap[k] = perm_c[k]; - amax = (double *) doubleMalloc(panel_size); - if (drop_rule & DROP_SECONDARY) - iwork2 = (int *)intMalloc(n); - else - iwork2 = NULL; - - nnzAj = 0; - nnzLj = 0; - nnzUj = 0; - last_drop = SUPERLU_MAX(min_mn - 2 * sp_ienv(3), (int)(min_mn * 0.95)); - - /* Identify relaxed snodes */ - relax_end = (int *) intMalloc(n); - relax_fsupc = (int *) intMalloc(n); - if ( options->SymmetricMode == YES ) - ilu_heap_relax_snode(n, etree, relax, marker, relax_end, relax_fsupc); - else - ilu_relax_snode(n, etree, relax, marker, relax_end, relax_fsupc); - - ifill (perm_r, m, EMPTY); - ifill (marker, m * NO_MARKER, EMPTY); - supno[0] = -1; - xsup[0] = xlsub[0] = xusub[0] = xlusup[0] = 0; - w_def = panel_size; - - /* Mark the rows used by relaxed supernodes */ - ifill (marker_relax, m, EMPTY); - i = mark_relax(m, relax_end, relax_fsupc, xa_begin, xa_end, - asub, marker_relax); -#if ( PRNTlevel >= 1) - printf("%d relaxed supernodes.\n", i); -#endif - - /* - * Work on one "panel" at a time. A panel is one of the following: - * (a) a relaxed supernode at the bottom of the etree, or - * (b) panel_size contiguous columns, defined by the user - */ - for (jcol = 0; jcol < min_mn; ) { - - if ( relax_end[jcol] != EMPTY ) { /* start of a relaxed snode */ - kcol = relax_end[jcol]; /* end of the relaxed snode */ - panel_histo[kcol-jcol+1]++; - - /* Drop small rows in the previous supernode. */ - if (jcol > 0 && jcol < last_drop) { - int first = xsup[supno[jcol - 1]]; - int last = jcol - 1; - int quota; - - /* Compute the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * (m - first) / m - * (last - first + 1); - else if (drop_rule & DROP_COLUMN) { - int i; - quota = 0; - for (i = first; i <= last; i++) - quota += xa_end[i] - xa_begin[i]; - quota = gamma * quota * (m - first) / m; - } else if (drop_rule & DROP_AREA) - quota = gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - - nnzLj; - else - quota = m * n; - fill_tol = pow(fill_ini, 1.0 - 0.5 * (first + last) / min_mn); - - /* Drop small rows */ - i = ilu_ddrop_row(options, first, last, tol_L, quota, &nnzLj, - &fill_tol, &Glu, tempv, iwork2, 0); - /* Reset the parameters */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - < nnzLj) - tol_L = SUPERLU_MIN(1.0, tol_L * 2.0); - else - tol_L = SUPERLU_MAX(drop_tol, tol_L * 0.5); - } - if (fill_tol < 0) iinfo -= (int)fill_tol; -#ifdef DEBUG - num_drop_L += i * (last - first + 1); -#endif - } - - /* -------------------------------------- - * Factorize the relaxed supernode(jcol:kcol) - * -------------------------------------- */ - /* Determine the union of the row structure of the snode */ - if ( (*info = ilu_dsnode_dfs(jcol, kcol, asub, xa_begin, xa_end, - marker, &Glu)) != 0 ) - return; - - nextu = xusub[jcol]; - nextlu = xlusup[jcol]; - jsupno = supno[jcol]; - fsupc = xsup[jsupno]; - new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1); - nzlumax = Glu.nzlumax; - while ( new_next > nzlumax ) { - if ((*info = dLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu))) - return; - } - - for (icol = jcol; icol <= kcol; icol++) { - xusub[icol+1] = nextu; - - amax[0] = 0.0; - /* Scatter into SPA dense[*] */ - for (k = xa_begin[icol]; k < xa_end[icol]; k++) { - register double tmp = fabs(a[k]); - if (tmp > amax[0]) amax[0] = tmp; - dense[asub[k]] = a[k]; - } - nnzAj += xa_end[icol] - xa_begin[icol]; - if (amax[0] == 0.0) { - amax[0] = fill_ini; -#if ( PRNTlevel >= 1) - printf("Column %d is entirely zero!\n", icol); - fflush(stdout); -#endif - } - - /* Numeric update within the snode */ - dsnode_bmod(icol, jsupno, fsupc, dense, tempv, &Glu, stat); - - if (usepr) pivrow = iperm_r[icol]; - fill_tol = pow(fill_ini, 1.0 - (double)icol / (double)min_mn); - if ( (*info = ilu_dpivotL(icol, diag_pivot_thresh, &usepr, - perm_r, iperm_c[icol], swap, iswap, - marker_relax, &pivrow, - amax[0] * fill_tol, milu, zero, - &Glu, stat)) ) { - iinfo++; - marker[pivrow] = kcol; - } - - } - - jcol = kcol + 1; - - } else { /* Work on one panel of panel_size columns */ - - /* Adjust panel_size so that a panel won't overlap with the next - * relaxed snode. - */ - panel_size = w_def; - for (k = jcol + 1; k < SUPERLU_MIN(jcol+panel_size, min_mn); k++) - if ( relax_end[k] != EMPTY ) { - panel_size = k - jcol; - break; - } - if ( k == min_mn ) panel_size = min_mn - jcol; - panel_histo[panel_size]++; - - /* symbolic factor on a panel of columns */ - ilu_dpanel_dfs(m, panel_size, jcol, A, perm_r, &nseg1, - dense, amax, panel_lsub, segrep, repfnz, - marker, parent, xplore, &Glu); - - /* numeric sup-panel updates in topological order */ - dpanel_bmod(m, panel_size, jcol, nseg1, dense, - tempv, segrep, repfnz, &Glu, stat); - - /* Sparse LU within the panel, and below panel diagonal */ - for (jj = jcol; jj < jcol + panel_size; jj++) { - - k = (jj - jcol) * m; /* column index for w-wide arrays */ - - nseg = nseg1; /* Begin after all the panel segments */ - - nnzAj += xa_end[jj] - xa_begin[jj]; - - if ((*info = ilu_dcolumn_dfs(m, jj, perm_r, &nseg, - &panel_lsub[k], segrep, &repfnz[k], - marker, parent, xplore, &Glu))) - return; - - /* Numeric updates */ - if ((*info = dcolumn_bmod(jj, (nseg - nseg1), &dense[k], - tempv, &segrep[nseg1], &repfnz[k], - jcol, &Glu, stat)) != 0) return; - - /* Make a fill-in position if the column is entirely zero */ - if (xlsub[jj + 1] == xlsub[jj]) { - register int i, row; - int nextl; - int nzlmax = Glu.nzlmax; - int *lsub = Glu.lsub; - int *marker2 = marker + 2 * m; - - /* Allocate memory */ - nextl = xlsub[jj] + 1; - if (nextl >= nzlmax) { - int error = dLUMemXpand(jj, nextl, LSUB, &nzlmax, &Glu); - if (error) { *info = error; return; } - lsub = Glu.lsub; - } - xlsub[jj + 1]++; - assert(xlusup[jj]==xlusup[jj+1]); - xlusup[jj + 1]++; - Glu.lusup[xlusup[jj]] = zero; - - /* Choose a row index (pivrow) for fill-in */ - for (i = jj; i < n; i++) - if (marker_relax[swap[i]] <= jj) break; - row = swap[i]; - marker2[row] = jj; - lsub[xlsub[jj]] = row; -#ifdef DEBUG - printf("Fill col %d.\n", jj); - fflush(stdout); -#endif - } - - /* Computer the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * jj / m; - else if (drop_rule & DROP_COLUMN) - quota = gamma * (xa_end[jj] - xa_begin[jj]) * - (jj + 1) / m; - else if (drop_rule & DROP_AREA) - quota = gamma * 0.9 * nnzAj * 0.5 - nnzUj; - else - quota = m; - - /* Copy the U-segments to ucol[*] and drop small entries */ - if ((*info = ilu_dcopy_to_ucol(jj, nseg, segrep, &repfnz[k], - perm_r, &dense[k], drop_rule, - milu, amax[jj - jcol] * tol_U, - quota, &drop_sum, &nnzUj, &Glu, - iwork2)) != 0) - return; - - /* Reset the dropping threshold if required */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * 0.9 * nnzAj * 0.5 < nnzLj) - tol_U = SUPERLU_MIN(1.0, tol_U * 2.0); - else - tol_U = SUPERLU_MAX(drop_tol, tol_U * 0.5); - } - - drop_sum *= MILU_ALPHA; - if (usepr) pivrow = iperm_r[jj]; - fill_tol = pow(fill_ini, 1.0 - (double)jj / (double)min_mn); - if ( (*info = ilu_dpivotL(jj, diag_pivot_thresh, &usepr, perm_r, - iperm_c[jj], swap, iswap, - marker_relax, &pivrow, - amax[jj - jcol] * fill_tol, milu, - drop_sum, &Glu, stat)) ) { - iinfo++; - marker[m + pivrow] = jj; - marker[2 * m + pivrow] = jj; - } - - /* Reset repfnz[] for this column */ - resetrep_col (nseg, segrep, &repfnz[k]); - - /* Start a new supernode, drop the previous one */ - if (jj > 0 && supno[jj] > supno[jj - 1] && jj < last_drop) { - int first = xsup[supno[jj - 1]]; - int last = jj - 1; - int quota; - - /* Compute the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * (m - first) / m - * (last - first + 1); - else if (drop_rule & DROP_COLUMN) { - int i; - quota = 0; - for (i = first; i <= last; i++) - quota += xa_end[i] - xa_begin[i]; - quota = gamma * quota * (m - first) / m; - } else if (drop_rule & DROP_AREA) - quota = gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) - / m) - nnzLj; - else - quota = m * n; - fill_tol = pow(fill_ini, 1.0 - 0.5 * (first + last) / - (double)min_mn); - - /* Drop small rows */ - i = ilu_ddrop_row(options, first, last, tol_L, quota, - &nnzLj, &fill_tol, &Glu, tempv, iwork2, - 1); - - /* Reset the parameters */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - < nnzLj) - tol_L = SUPERLU_MIN(1.0, tol_L * 2.0); - else - tol_L = SUPERLU_MAX(drop_tol, tol_L * 0.5); - } - if (fill_tol < 0) iinfo -= (int)fill_tol; -#ifdef DEBUG - num_drop_L += i * (last - first + 1); -#endif - } /* if start a new supernode */ - - } /* for */ - - jcol += panel_size; /* Move to the next panel */ - - } /* else */ - - } /* for */ - - *info = iinfo; - - if ( m > n ) { - k = 0; - for (i = 0; i < m; ++i) - if ( perm_r[i] == EMPTY ) { - perm_r[i] = n + k; - ++k; - } - } - - ilu_countnz(min_mn, &nnzL, &nnzU, &Glu); - fixupL(min_mn, perm_r, &Glu); - - dLUWorkFree(iwork, dwork, &Glu); /* Free work space and compress storage */ - - if ( fact == SamePattern_SameRowPerm ) { - /* L and U structures may have changed due to possibly different - pivoting, even though the storage is available. - There could also be memory expansions, so the array locations - may have changed, */ - ((SCformat *)L->Store)->nnz = nnzL; - ((SCformat *)L->Store)->nsuper = Glu.supno[n]; - ((SCformat *)L->Store)->nzval = Glu.lusup; - ((SCformat *)L->Store)->nzval_colptr = Glu.xlusup; - ((SCformat *)L->Store)->rowind = Glu.lsub; - ((SCformat *)L->Store)->rowind_colptr = Glu.xlsub; - ((NCformat *)U->Store)->nnz = nnzU; - ((NCformat *)U->Store)->nzval = Glu.ucol; - ((NCformat *)U->Store)->rowind = Glu.usub; - ((NCformat *)U->Store)->colptr = Glu.xusub; - } else { - dCreate_SuperNode_Matrix(L, A->nrow, min_mn, nnzL, Glu.lusup, - Glu.xlusup, Glu.lsub, Glu.xlsub, Glu.supno, - Glu.xsup, SLU_SC, SLU_D, SLU_TRLU); - dCreate_CompCol_Matrix(U, min_mn, min_mn, nnzU, Glu.ucol, - Glu.usub, Glu.xusub, SLU_NC, SLU_D, SLU_TRU); - } - - ops[FACT] += ops[TRSV] + ops[GEMV]; - - if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r); - SUPERLU_FREE (iperm_c); - SUPERLU_FREE (relax_end); - SUPERLU_FREE (swap); - SUPERLU_FREE (iswap); - SUPERLU_FREE (relax_fsupc); - SUPERLU_FREE (amax); - if ( iwork2 ) SUPERLU_FREE (iwork2); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsrfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsrfs.c deleted file mode 100644 index 052285b56c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgsrfs.c +++ /dev/null @@ -1,452 +0,0 @@ - -/*! @file dgsrfs.c - * \brief Improves computed solution to a system of inear equations - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Modified from lapack routine DGERFS
    - * 
    - */ -/* - * File name: dgsrfs.c - * History: Modified from lapack routine DGERFS - */ -#include -#include "slu_ddefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   DGSRFS improves the computed solution to a system of linear   
    - *   equations and provides error bounds and backward error estimates for 
    - *   the solution.   
    - *
    - *   If equilibration was performed, the system becomes:
    - *           (diag(R)*A_original*diag(C)) * X = diag(R)*B_original.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *   
    - *   A       (input) SuperMatrix*
    - *           The original matrix A in the system, or the scaled A if
    - *           equilibration was done. The type of A can be:
    - *           Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_GE.
    - *    
    - *   L       (input) SuperMatrix*
    - *	     The factor L from the factorization Pr*A*Pc=L*U. Use
    - *           compressed row subscripts storage for supernodes, 
    - *           i.e., L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - * 
    - *   U       (input) SuperMatrix*
    - *           The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *           dgstrf(). Use column-wise storage scheme, 
    - *           i.e., U has types: Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - *   perm_c  (input) int*, dimension (A->ncol)
    - *	     Column permutation vector, which defines the 
    - *           permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *           in position j in A*Pc.
    - *
    - *   perm_r  (input) int*, dimension (A->nrow)
    - *           Row permutation vector, which defines the permutation matrix Pr;
    - *           perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - *   equed   (input) Specifies the form of equilibration that was done.
    - *           = 'N': No equilibration.
    - *           = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *           = 'C': Column equilibration, i.e., A was postmultiplied by
    - *                  diag(C).
    - *           = 'B': Both row and column equilibration, i.e., A was replaced 
    - *                  by diag(R)*A*diag(C).
    - *
    - *   R       (input) double*, dimension (A->nrow)
    - *           The row scale factors for A.
    - *           If equed = 'R' or 'B', A is premultiplied by diag(R).
    - *           If equed = 'N' or 'C', R is not accessed.
    - * 
    - *   C       (input) double*, dimension (A->ncol)
    - *           The column scale factors for A.
    - *           If equed = 'C' or 'B', A is postmultiplied by diag(C).
    - *           If equed = 'N' or 'R', C is not accessed.
    - *
    - *   B       (input) SuperMatrix*
    - *           B has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *           The right hand side matrix B.
    - *           if equed = 'R' or 'B', B is premultiplied by diag(R).
    - *
    - *   X       (input/output) SuperMatrix*
    - *           X has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *           On entry, the solution matrix X, as computed by dgstrs().
    - *           On exit, the improved solution matrix X.
    - *           if *equed = 'C' or 'B', X should be premultiplied by diag(C)
    - *               in order to obtain the solution to the original system.
    - *
    - *   FERR    (output) double*, dimension (B->ncol)   
    - *           The estimated forward error bound for each solution vector   
    - *           X(j) (the j-th column of the solution matrix X).   
    - *           If XTRUE is the true solution corresponding to X(j), FERR(j) 
    - *           is an estimated upper bound for the magnitude of the largest 
    - *           element in (X(j) - XTRUE) divided by the magnitude of the   
    - *           largest element in X(j).  The estimate is as reliable as   
    - *           the estimate for RCOND, and is almost always a slight   
    - *           overestimate of the true error.
    - *
    - *   BERR    (output) double*, dimension (B->ncol)   
    - *           The componentwise relative backward error of each solution   
    - *           vector X(j) (i.e., the smallest relative change in   
    - *           any element of A or B that makes X(j) an exact solution).
    - *
    - *   stat     (output) SuperLUStat_t*
    - *            Record the statistics on runtime and floating-point operation count.
    - *            See util.h for the definition of 'SuperLUStat_t'.
    - *
    - *   info    (output) int*   
    - *           = 0:  successful exit   
    - *            < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *
    - *    Internal Parameters   
    - *    ===================   
    - *
    - *    ITMAX is the maximum number of steps of iterative refinement.   
    - *
    - * 
    - */ -void -dgsrfs(trans_t trans, SuperMatrix *A, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, char *equed, double *R, double *C, - SuperMatrix *B, SuperMatrix *X, double *ferr, double *berr, - SuperLUStat_t *stat, int *info) -{ - - -#define ITMAX 5 - - /* Table of constant values */ - int ione = 1; - double ndone = -1.; - double done = 1.; - - /* Local variables */ - NCformat *Astore; - double *Aval; - SuperMatrix Bjcol; - DNformat *Bstore, *Xstore, *Bjcol_store; - double *Bmat, *Xmat, *Bptr, *Xptr; - int kase; - double safe1, safe2; - int i, j, k, irow, nz, count, notran, rowequ, colequ; - int ldb, ldx, nrhs; - double s, xk, lstres, eps, safmin; - char transc[1]; - trans_t transt; - double *work; - double *rwork; - int *iwork; - extern double dlamch_(char *); - extern int dlacon_(int *, double *, double *, int *, double *, int *); -#ifdef _CRAY - extern int SCOPY(int *, double *, int *, double *, int *); - extern int SSAXPY(int *, double *, double *, int *, double *, int *); -#else - extern int dcopy_(int *, double *, int *, double *, int *); - extern int daxpy_(int *, double *, double *, int *, double *, int *); -#endif - - Astore = A->Store; - Aval = Astore->nzval; - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - /* Test the input parameters */ - *info = 0; - notran = (trans == NOTRANS); - if ( !notran && trans != TRANS && trans != CONJ ) *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - A->Stype != SLU_NC || A->Dtype != SLU_D || A->Mtype != SLU_GE ) - *info = -2; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_D || L->Mtype != SLU_TRLU ) - *info = -3; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_D || U->Mtype != SLU_TRU ) - *info = -4; - else if ( ldb < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_D || B->Mtype != SLU_GE ) - *info = -10; - else if ( ldx < SUPERLU_MAX(0, A->nrow) || - X->Stype != SLU_DN || X->Dtype != SLU_D || X->Mtype != SLU_GE ) - *info = -11; - if (*info != 0) { - i = -(*info); - xerbla_("dgsrfs", &i); - return; - } - - /* Quick return if possible */ - if ( A->nrow == 0 || nrhs == 0) { - for (j = 0; j < nrhs; ++j) { - ferr[j] = 0.; - berr[j] = 0.; - } - return; - } - - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - - /* Allocate working space */ - work = doubleMalloc(2*A->nrow); - rwork = (double *) SUPERLU_MALLOC( A->nrow * sizeof(double) ); - iwork = intMalloc(2*A->nrow); - if ( !work || !rwork || !iwork ) - ABORT("Malloc fails for work/rwork/iwork."); - - if ( notran ) { - *(unsigned char *)transc = 'N'; - transt = TRANS; - } else { - *(unsigned char *)transc = 'T'; - transt = NOTRANS; - } - - /* NZ = maximum number of nonzero elements in each row of A, plus 1 */ - nz = A->ncol + 1; - eps = dlamch_("Epsilon"); - safmin = dlamch_("Safe minimum"); - /* Set SAFE1 essentially to be the underflow threshold times the - number of additions in each row. */ - safe1 = nz * safmin; - safe2 = safe1 / eps; - - /* Compute the number of nonzeros in each row (or column) of A */ - for (i = 0; i < A->nrow; ++i) iwork[i] = 0; - if ( notran ) { - for (k = 0; k < A->ncol; ++k) - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - ++iwork[Astore->rowind[i]]; - } else { - for (k = 0; k < A->ncol; ++k) - iwork[k] = Astore->colptr[k+1] - Astore->colptr[k]; - } - - /* Copy one column of RHS B into Bjcol. */ - Bjcol.Stype = B->Stype; - Bjcol.Dtype = B->Dtype; - Bjcol.Mtype = B->Mtype; - Bjcol.nrow = B->nrow; - Bjcol.ncol = 1; - Bjcol.Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) ); - if ( !Bjcol.Store ) ABORT("SUPERLU_MALLOC fails for Bjcol.Store"); - Bjcol_store = Bjcol.Store; - Bjcol_store->lda = ldb; - Bjcol_store->nzval = work; /* address aliasing */ - - /* Do for each right hand side ... */ - for (j = 0; j < nrhs; ++j) { - count = 0; - lstres = 3.; - Bptr = &Bmat[j*ldb]; - Xptr = &Xmat[j*ldx]; - - while (1) { /* Loop until stopping criterion is satisfied. */ - - /* Compute residual R = B - op(A) * X, - where op(A) = A, A**T, or A**H, depending on TRANS. */ - -#ifdef _CRAY - SCOPY(&A->nrow, Bptr, &ione, work, &ione); -#else - dcopy_(&A->nrow, Bptr, &ione, work, &ione); -#endif - sp_dgemv(transc, ndone, A, Xptr, ione, done, work, ione); - - /* Compute componentwise relative backward error from formula - max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) - where abs(Z) is the componentwise absolute value of the matrix - or vector Z. If the i-th component of the denominator is less - than SAFE2, then SAFE1 is added to the i-th component of the - numerator before dividing. */ - - for (i = 0; i < A->nrow; ++i) rwork[i] = fabs( Bptr[i] ); - - /* Compute abs(op(A))*abs(X) + abs(B). */ - if (notran) { - for (k = 0; k < A->ncol; ++k) { - xk = fabs( Xptr[k] ); - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - rwork[Astore->rowind[i]] += fabs(Aval[i]) * xk; - } - } else { - for (k = 0; k < A->ncol; ++k) { - s = 0.; - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) { - irow = Astore->rowind[i]; - s += fabs(Aval[i]) * fabs(Xptr[irow]); - } - rwork[k] += s; - } - } - s = 0.; - for (i = 0; i < A->nrow; ++i) { - if (rwork[i] > safe2) { - s = SUPERLU_MAX( s, fabs(work[i]) / rwork[i] ); - } else if ( rwork[i] != 0.0 ) { - /* Adding SAFE1 to the numerator guards against - spuriously zero residuals (underflow). */ - s = SUPERLU_MAX( s, (safe1 + fabs(work[i])) / rwork[i] ); - } - /* If rwork[i] is exactly 0.0, then we know the true - residual also must be exactly 0.0. */ - } - berr[j] = s; - - /* Test stopping criterion. Continue iterating if - 1) The residual BERR(J) is larger than machine epsilon, and - 2) BERR(J) decreased by at least a factor of 2 during the - last iteration, and - 3) At most ITMAX iterations tried. */ - - if (berr[j] > eps && berr[j] * 2. <= lstres && count < ITMAX) { - /* Update solution and try again. */ - dgstrs (trans, L, U, perm_c, perm_r, &Bjcol, stat, info); - -#ifdef _CRAY - SAXPY(&A->nrow, &done, work, &ione, - &Xmat[j*ldx], &ione); -#else - daxpy_(&A->nrow, &done, work, &ione, - &Xmat[j*ldx], &ione); -#endif - lstres = berr[j]; - ++count; - } else { - break; - } - - } /* end while */ - - stat->RefineSteps = count; - - /* Bound error from formula: - norm(X - XTRUE) / norm(X) .le. FERR = norm( abs(inv(op(A)))* - ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) - where - norm(Z) is the magnitude of the largest component of Z - inv(op(A)) is the inverse of op(A) - abs(Z) is the componentwise absolute value of the matrix or - vector Z - NZ is the maximum number of nonzeros in any row of A, plus 1 - EPS is machine epsilon - - The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) - is incremented by SAFE1 if the i-th component of - abs(op(A))*abs(X) + abs(B) is less than SAFE2. - - Use DLACON to estimate the infinity-norm of the matrix - inv(op(A)) * diag(W), - where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */ - - for (i = 0; i < A->nrow; ++i) rwork[i] = fabs( Bptr[i] ); - - /* Compute abs(op(A))*abs(X) + abs(B). */ - if ( notran ) { - for (k = 0; k < A->ncol; ++k) { - xk = fabs( Xptr[k] ); - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - rwork[Astore->rowind[i]] += fabs(Aval[i]) * xk; - } - } else { - for (k = 0; k < A->ncol; ++k) { - s = 0.; - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) { - irow = Astore->rowind[i]; - xk = fabs( Xptr[irow] ); - s += fabs(Aval[i]) * xk; - } - rwork[k] += s; - } - } - - for (i = 0; i < A->nrow; ++i) - if (rwork[i] > safe2) - rwork[i] = fabs(work[i]) + (iwork[i]+1)*eps*rwork[i]; - else - rwork[i] = fabs(work[i])+(iwork[i]+1)*eps*rwork[i]+safe1; - - kase = 0; - - do { - dlacon_(&A->nrow, &work[A->nrow], work, - &iwork[A->nrow], &ferr[j], &kase); - if (kase == 0) break; - - if (kase == 1) { - /* Multiply by diag(W)*inv(op(A)**T)*(diag(C) or diag(R)). */ - if ( notran && colequ ) - for (i = 0; i < A->ncol; ++i) work[i] *= C[i]; - else if ( !notran && rowequ ) - for (i = 0; i < A->nrow; ++i) work[i] *= R[i]; - - dgstrs (transt, L, U, perm_c, perm_r, &Bjcol, stat, info); - - for (i = 0; i < A->nrow; ++i) work[i] *= rwork[i]; - } else { - /* Multiply by (diag(C) or diag(R))*inv(op(A))*diag(W). */ - for (i = 0; i < A->nrow; ++i) work[i] *= rwork[i]; - - dgstrs (trans, L, U, perm_c, perm_r, &Bjcol, stat, info); - - if ( notran && colequ ) - for (i = 0; i < A->ncol; ++i) work[i] *= C[i]; - else if ( !notran && rowequ ) - for (i = 0; i < A->ncol; ++i) work[i] *= R[i]; - } - - } while ( kase != 0 ); - - - /* Normalize error. */ - lstres = 0.; - if ( notran && colequ ) { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, C[i] * fabs( Xptr[i]) ); - } else if ( !notran && rowequ ) { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, R[i] * fabs( Xptr[i]) ); - } else { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, fabs( Xptr[i]) ); - } - if ( lstres != 0. ) - ferr[j] /= lstres; - - } /* for each RHS j ... */ - - SUPERLU_FREE(work); - SUPERLU_FREE(rwork); - SUPERLU_FREE(iwork); - SUPERLU_FREE(Bjcol.Store); - - return; - -} /* dgsrfs */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgssv.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgssv.c deleted file mode 100644 index 5baeda0928..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgssv.c +++ /dev/null @@ -1,227 +0,0 @@ - -/*! @file dgssv.c - * \brief Solves the system of linear equations A*X=B - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * DGSSV solves the system of linear equations A*X=B, using the
    - * LU factorization from DGSTRF. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *
    - *      1.1. Permute the columns of A, forming A*Pc, where Pc
    - *           is a permutation matrix. For more details of this step, 
    - *           see sp_preorder.c.
    - *
    - *      1.2. Factor A as Pr*A*Pc=L*U with the permutation Pr determined
    - *           by Gaussian elimination with partial pivoting.
    - *           L is unit lower triangular with offdiagonal entries
    - *           bounded by 1 in magnitude, and U is upper triangular.
    - *
    - *      1.3. Solve the system of equations A*X=B using the factored
    - *           form of A.
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the
    - *      above algorithm to the transpose of A:
    - *
    - *      2.1. Permute columns of transpose(A) (rows of A),
    - *           forming transpose(A)*Pc, where Pc is a permutation matrix. 
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      2.2. Factor A as Pr*transpose(A)*Pc=L*U with the permutation Pr
    - *           determined by Gaussian elimination with partial pivoting.
    - *           L is unit lower triangular with offdiagonal entries
    - *           bounded by 1 in magnitude, and U is upper triangular.
    - *
    - *      2.3. Solve the system of equations A*X=B using the factored
    - *           form of A.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - * 
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed and how the
    - *         system will be solved.
    - *
    - * A       (input) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = SLU_NC or SLU_NR; Dtype = SLU_D; Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - * perm_c  (input/output) int*
    - *         If A->Stype = SLU_NC, column permutation vector of size A->ncol
    - *         which defines the permutation matrix Pc; perm_c[i] = j means 
    - *         column i of A is in position j in A*Pc.
    - *         If A->Stype = SLU_NR, column permutation vector of size A->nrow
    - *         which describes permutation of columns of transpose(A) 
    - *         (rows of A) as described above.
    - * 
    - *         If options->ColPerm = MY_PERMC or options->Fact = SamePattern or
    - *            options->Fact = SamePattern_SameRowPerm, it is an input argument.
    - *            On exit, perm_c may be overwritten by the product of the input
    - *            perm_c and a permutation that postorders the elimination tree
    - *            of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *            is already in postorder.
    - *         Otherwise, it is an output argument.
    - * 
    - * perm_r  (input/output) int*
    - *         If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *         which defines the permutation matrix Pr, and is determined 
    - *         by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *         position j in Pr*A.
    - *         If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *         determines permutation of rows of transpose(A)
    - *         (columns of A) as described above.
    - *
    - *         If options->RowPerm = MY_PERMR or
    - *            options->Fact = SamePattern_SameRowPerm, perm_r is an
    - *            input argument.
    - *         otherwise it is an output argument.
    - *
    - * L       (output) SuperMatrix*
    - *         The factor L from the factorization 
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses compressed row subscripts storage for supernodes, i.e.,
    - *         L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *         
    - * U       (output) SuperMatrix*
    - *	   The factor U from the factorization 
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat   (output) SuperLUStat_t*
    - *        Record the statistics on runtime and floating-point operation count.
    - *        See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *	   = 0: successful exit
    - *         > 0: if info = i, and i is
    - *             <= A->ncol: U(i,i) is exactly zero. The factorization has
    - *                been completed, but the factor U is exactly singular,
    - *                so the solution could not be computed.
    - *             > A->ncol: number of bytes allocated when memory allocation
    - *                failure occurred, plus A->ncol.
    - * 
    - */ - -void -dgssv(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - SuperMatrix *L, SuperMatrix *U, SuperMatrix *B, - SuperLUStat_t *stat, int *info ) -{ - - DNformat *Bstore; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int lwork = 0, *etree, i; - - /* Set default values for some parameters */ - int panel_size; /* panel size */ - int relax; /* no of columns in a relaxed snodes */ - int permc_spec; - trans_t trans = NOTRANS; - double *utime; - double t; /* Temporary time */ - - /* Test the input parameters ... */ - *info = 0; - Bstore = B->Store; - if ( options->Fact != DOFACT ) *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_D || A->Mtype != SLU_GE ) - *info = -2; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_D || B->Mtype != SLU_GE ) - *info = -7; - if ( *info != 0 ) { - i = -(*info); - xerbla_("dgssv", &i); - return; - } - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - dCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - trans = TRANS; - } else { - if ( A->Stype == SLU_NC ) AA = A; - } - - t = SuperLU_timer_(); - /* - * Get column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t; - - etree = intMalloc(A->ncol); - - t = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t; - - panel_size = sp_ienv(1); - relax = sp_ienv(2); - - /*printf("Factor PA = LU ... relax %d\tw %d\tmaxsuper %d\trowblk %d\n", - relax, panel_size, sp_ienv(3), sp_ienv(4));*/ - t = SuperLU_timer_(); - /* Compute the LU factorization of A. */ - dgstrf(options, &AC, relax, panel_size, etree, - NULL, lwork, perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t; - - t = SuperLU_timer_(); - if ( *info == 0 ) { - /* Solve the system A*X=B, overwriting B with X. */ - dgstrs (trans, L, U, perm_c, perm_r, B, stat, info); - } - utime[SOLVE] = SuperLU_timer_() - t; - - SUPERLU_FREE (etree); - Destroy_CompCol_Permuted(&AC); - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgssvx.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgssvx.c deleted file mode 100644 index b698addea3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgssvx.c +++ /dev/null @@ -1,619 +0,0 @@ - -/*! @file dgssvx.c - * \brief Solves the system of linear equations A*X=B or A'*X=B - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * DGSSVX solves the system of linear equations A*X=B or A'*X=B, using
    - * the LU factorization from dgstrf(). Error bounds on the solution and
    - * a condition estimate are also provided. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *  
    - *      1.1. If options->Equil = YES, scaling factors are computed to
    - *           equilibrate the system:
    - *           options->Trans = NOTRANS:
    - *               diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *           options->Trans = TRANS:
    - *               (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *           options->Trans = CONJ:
    - *               (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *           Whether or not the system will be equilibrated depends on the
    - *           scaling of the matrix A, but if equilibration is used, A is
    - *           overwritten by diag(R)*A*diag(C) and B by diag(R)*B
    - *           (if options->Trans=NOTRANS) or diag(C)*B (if options->Trans
    - *           = TRANS or CONJ).
    - *
    - *      1.2. Permute columns of A, forming A*Pc, where Pc is a permutation
    - *           matrix that usually preserves sparsity.
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      1.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *           factor the matrix A (after equilibration if options->Equil = YES)
    - *           as Pr*A*Pc = L*U, with Pr determined by partial pivoting.
    - *
    - *      1.4. Compute the reciprocal pivot growth factor.
    - *
    - *      1.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *           routine returns with info = i. Otherwise, the factored form of 
    - *           A is used to estimate the condition number of the matrix A. If
    - *           the reciprocal of the condition number is less than machine
    - *           precision, info = A->ncol+1 is returned as a warning, but the
    - *           routine still goes on to solve for X and computes error bounds
    - *           as described below.
    - *
    - *      1.6. The system of equations is solved for X using the factored form
    - *           of A.
    - *
    - *      1.7. If options->IterRefine != NOREFINE, iterative refinement is
    - *           applied to improve the computed solution matrix and calculate
    - *           error bounds and backward error estimates for it.
    - *
    - *      1.8. If equilibration was used, the matrix X is premultiplied by
    - *           diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *           (if options->Trans = TRANS or CONJ) so that it solves the
    - *           original system before equilibration.
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the above algorithm
    - *      to the transpose of A:
    - *
    - *      2.1. If options->Equil = YES, scaling factors are computed to
    - *           equilibrate the system:
    - *           options->Trans = NOTRANS:
    - *               diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *           options->Trans = TRANS:
    - *               (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *           options->Trans = CONJ:
    - *               (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *           Whether or not the system will be equilibrated depends on the
    - *           scaling of the matrix A, but if equilibration is used, A' is
    - *           overwritten by diag(R)*A'*diag(C) and B by diag(R)*B 
    - *           (if trans='N') or diag(C)*B (if trans = 'T' or 'C').
    - *
    - *      2.2. Permute columns of transpose(A) (rows of A), 
    - *           forming transpose(A)*Pc, where Pc is a permutation matrix that 
    - *           usually preserves sparsity.
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      2.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *           factor the transpose(A) (after equilibration if 
    - *           options->Fact = YES) as Pr*transpose(A)*Pc = L*U with the
    - *           permutation Pr determined by partial pivoting.
    - *
    - *      2.4. Compute the reciprocal pivot growth factor.
    - *
    - *      2.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *           routine returns with info = i. Otherwise, the factored form 
    - *           of transpose(A) is used to estimate the condition number of the
    - *           matrix A. If the reciprocal of the condition number
    - *           is less than machine precision, info = A->nrow+1 is returned as
    - *           a warning, but the routine still goes on to solve for X and
    - *           computes error bounds as described below.
    - *
    - *      2.6. The system of equations is solved for X using the factored form
    - *           of transpose(A).
    - *
    - *      2.7. If options->IterRefine != NOREFINE, iterative refinement is
    - *           applied to improve the computed solution matrix and calculate
    - *           error bounds and backward error estimates for it.
    - *
    - *      2.8. If equilibration was used, the matrix X is premultiplied by
    - *           diag(C) (if options->Trans = NOTRANS) or diag(R) 
    - *           (if options->Trans = TRANS or CONJ) so that it solves the
    - *           original system before equilibration.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed and how the
    - *         system will be solved.
    - *
    - * A       (input/output) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of the linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = SLU_NC or SLU_NR, Dtype = SLU_D, Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - *         On entry, If options->Fact = FACTORED and equed is not 'N', 
    - *         then A must have been equilibrated by the scaling factors in
    - *         R and/or C.  
    - *         On exit, A is not modified if options->Equil = NO, or if 
    - *         options->Equil = YES but equed = 'N' on exit.
    - *         Otherwise, if options->Equil = YES and equed is not 'N',
    - *         A is scaled as follows:
    - *         If A->Stype = SLU_NC:
    - *           equed = 'R':  A := diag(R) * A
    - *           equed = 'C':  A := A * diag(C)
    - *           equed = 'B':  A := diag(R) * A * diag(C).
    - *         If A->Stype = SLU_NR:
    - *           equed = 'R':  transpose(A) := diag(R) * transpose(A)
    - *           equed = 'C':  transpose(A) := transpose(A) * diag(C)
    - *           equed = 'B':  transpose(A) := diag(R) * transpose(A) * diag(C).
    - *
    - * perm_c  (input/output) int*
    - *	   If A->Stype = SLU_NC, Column permutation vector of size A->ncol,
    - *         which defines the permutation matrix Pc; perm_c[i] = j means
    - *         column i of A is in position j in A*Pc.
    - *         On exit, perm_c may be overwritten by the product of the input
    - *         perm_c and a permutation that postorders the elimination tree
    - *         of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *         is already in postorder.
    - *
    - *         If A->Stype = SLU_NR, column permutation vector of size A->nrow,
    - *         which describes permutation of columns of transpose(A) 
    - *         (rows of A) as described above.
    - * 
    - * perm_r  (input/output) int*
    - *         If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *         which defines the permutation matrix Pr, and is determined
    - *         by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *         position j in Pr*A.
    - *
    - *         If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *         determines permutation of rows of transpose(A)
    - *         (columns of A) as described above.
    - *
    - *         If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *         will try to use the input perm_r, unless a certain threshold
    - *         criterion is violated. In that case, perm_r is overwritten by a
    - *         new permutation determined by partial pivoting or diagonal
    - *         threshold pivoting.
    - *         Otherwise, perm_r is output argument.
    - * 
    - * etree   (input/output) int*,  dimension (A->ncol)
    - *         Elimination tree of Pc'*A'*A*Pc.
    - *         If options->Fact != FACTORED and options->Fact != DOFACT,
    - *         etree is an input argument, otherwise it is an output argument.
    - *         Note: etree is a vector of parent pointers for a forest whose
    - *         vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * equed   (input/output) char*
    - *         Specifies the form of equilibration that was done.
    - *         = 'N': No equilibration.
    - *         = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *         = 'C': Column equilibration, i.e., A was postmultiplied by diag(C).
    - *         = 'B': Both row and column equilibration, i.e., A was replaced 
    - *                by diag(R)*A*diag(C).
    - *         If options->Fact = FACTORED, equed is an input argument,
    - *         otherwise it is an output argument.
    - *
    - * R       (input/output) double*, dimension (A->nrow)
    - *         The row scale factors for A or transpose(A).
    - *         If equed = 'R' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *             (if A->Stype = SLU_NR) is multiplied on the left by diag(R).
    - *         If equed = 'N' or 'C', R is not accessed.
    - *         If options->Fact = FACTORED, R is an input argument,
    - *             otherwise, R is output.
    - *         If options->zFact = FACTORED and equed = 'R' or 'B', each element
    - *             of R must be positive.
    - * 
    - * C       (input/output) double*, dimension (A->ncol)
    - *         The column scale factors for A or transpose(A).
    - *         If equed = 'C' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *             (if A->Stype = SLU_NR) is multiplied on the right by diag(C).
    - *         If equed = 'N' or 'R', C is not accessed.
    - *         If options->Fact = FACTORED, C is an input argument,
    - *             otherwise, C is output.
    - *         If options->Fact = FACTORED and equed = 'C' or 'B', each element
    - *             of C must be positive.
    - *         
    - * L       (output) SuperMatrix*
    - *	   The factor L from the factorization
    - *             Pr*A*Pc=L*U              (if A->Stype SLU_= NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses compressed row subscripts storage for supernodes, i.e.,
    - *         L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *
    - * U       (output) SuperMatrix*
    - *	   The factor U from the factorization
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - * work    (workspace/output) void*, size (lwork) (in bytes)
    - *         User supplied workspace, should be large enough
    - *         to hold data structures for factors L and U.
    - *         On exit, if fact is not 'F', L and U point to this array.
    - *
    - * lwork   (input) int
    - *         Specifies the size of work array in bytes.
    - *         = 0:  allocate space internally by system malloc;
    - *         > 0:  use user-supplied work array of length lwork in bytes,
    - *               returns error if space runs out.
    - *         = -1: the routine guesses the amount of space needed without
    - *               performing the factorization, and returns it in
    - *               mem_usage->total_needed; no other side effects.
    - *
    - *         See argument 'mem_usage' for memory usage statistics.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         If B->ncol = 0, only LU decomposition is performed, the triangular
    - *                         solve is skipped.
    - *         On exit,
    - *            if equed = 'N', B is not modified; otherwise
    - *            if A->Stype = SLU_NC:
    - *               if options->Trans = NOTRANS and equed = 'R' or 'B',
    - *                  B is overwritten by diag(R)*B;
    - *               if options->Trans = TRANS or CONJ and equed = 'C' of 'B',
    - *                  B is overwritten by diag(C)*B;
    - *            if A->Stype = SLU_NR:
    - *               if options->Trans = NOTRANS and equed = 'C' or 'B',
    - *                  B is overwritten by diag(C)*B;
    - *               if options->Trans = TRANS or CONJ and equed = 'R' of 'B',
    - *                  B is overwritten by diag(R)*B.
    - *
    - * X       (output) SuperMatrix*
    - *         X has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE. 
    - *         If info = 0 or info = A->ncol+1, X contains the solution matrix
    - *         to the original system of equations. Note that A and B are modified
    - *         on exit if equed is not 'N', and the solution to the equilibrated
    - *         system is inv(diag(C))*X if options->Trans = NOTRANS and
    - *         equed = 'C' or 'B', or inv(diag(R))*X if options->Trans = 'T' or 'C'
    - *         and equed = 'R' or 'B'.
    - *
    - * recip_pivot_growth (output) double*
    - *         The reciprocal pivot growth factor max_j( norm(A_j)/norm(U_j) ).
    - *         The infinity norm is used. If recip_pivot_growth is much less
    - *         than 1, the stability of the LU factorization could be poor.
    - *
    - * rcond   (output) double*
    - *         The estimate of the reciprocal condition number of the matrix A
    - *         after equilibration (if done). If rcond is less than the machine
    - *         precision (in particular, if rcond = 0), the matrix is singular
    - *         to working precision. This condition is indicated by a return
    - *         code of info > 0.
    - *
    - * FERR    (output) double*, dimension (B->ncol)   
    - *         The estimated forward error bound for each solution vector   
    - *         X(j) (the j-th column of the solution matrix X).   
    - *         If XTRUE is the true solution corresponding to X(j), FERR(j) 
    - *         is an estimated upper bound for the magnitude of the largest 
    - *         element in (X(j) - XTRUE) divided by the magnitude of the   
    - *         largest element in X(j).  The estimate is as reliable as   
    - *         the estimate for RCOND, and is almost always a slight   
    - *         overestimate of the true error.
    - *         If options->IterRefine = NOREFINE, ferr = 1.0.
    - *
    - * BERR    (output) double*, dimension (B->ncol)
    - *         The componentwise relative backward error of each solution   
    - *         vector X(j) (i.e., the smallest relative change in   
    - *         any element of A or B that makes X(j) an exact solution).
    - *         If options->IterRefine = NOREFINE, berr = 1.0.
    - *
    - * mem_usage (output) mem_usage_t*
    - *         Record the memory usage statistics, consisting of following fields:
    - *         - for_lu (float)
    - *           The amount of space used in bytes for L\U data structures.
    - *         - total_needed (float)
    - *           The amount of space needed in bytes to perform factorization.
    - *         - expansions (int)
    - *           The number of memory expansions during the LU factorization.
    - *
    - * stat   (output) SuperLUStat_t*
    - *        Record the statistics on runtime and floating-point operation count.
    - *        See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *         = 0: successful exit   
    - *         < 0: if info = -i, the i-th argument had an illegal value   
    - *         > 0: if info = i, and i is   
    - *              <= A->ncol: U(i,i) is exactly zero. The factorization has   
    - *                    been completed, but the factor U is exactly   
    - *                    singular, so the solution and error bounds   
    - *                    could not be computed.   
    - *              = A->ncol+1: U is nonsingular, but RCOND is less than machine
    - *                    precision, meaning that the matrix is singular to
    - *                    working precision. Nevertheless, the solution and
    - *                    error bounds are computed because there are a number
    - *                    of situations where the computed solution can be more
    - *                    accurate than the value of RCOND would suggest.   
    - *              > A->ncol+1: number of bytes allocated when memory allocation
    - *                    failure occurred, plus A->ncol.
    - * 
    - */ - -void -dgssvx(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - int *etree, char *equed, double *R, double *C, - SuperMatrix *L, SuperMatrix *U, void *work, int lwork, - SuperMatrix *B, SuperMatrix *X, double *recip_pivot_growth, - double *rcond, double *ferr, double *berr, - mem_usage_t *mem_usage, SuperLUStat_t *stat, int *info ) -{ - - - DNformat *Bstore, *Xstore; - double *Bmat, *Xmat; - int ldb, ldx, nrhs; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int colequ, equil, nofact, notran, rowequ, permc_spec; - trans_t trant; - char norm[1]; - int i, j, info1; - double amax, anorm, bignum, smlnum, colcnd, rowcnd, rcmax, rcmin; - int relax, panel_size; - double diag_pivot_thresh; - double t0; /* temporary time */ - double *utime; - - /* External functions */ - extern double dlangs(char *, SuperMatrix *); - - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - *info = 0; - nofact = (options->Fact != FACTORED); - equil = (options->Equil == YES); - notran = (options->Trans == NOTRANS); - if ( nofact ) { - *(unsigned char *)equed = 'N'; - rowequ = FALSE; - colequ = FALSE; - } else { - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - smlnum = dlamch_("Safe minimum"); - bignum = 1. / smlnum; - } - -#if 0 -printf("dgssvx: Fact=%4d, Trans=%4d, equed=%c\n", - options->Fact, options->Trans, *equed); -#endif - - /* Test the input parameters */ - if (!nofact && options->Fact != DOFACT && options->Fact != SamePattern && - options->Fact != SamePattern_SameRowPerm && - !notran && options->Trans != TRANS && options->Trans != CONJ && - !equil && options->Equil != NO) - *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_D || A->Mtype != SLU_GE ) - *info = -2; - else if (options->Fact == FACTORED && - !(rowequ || colequ || lsame_(equed, "N"))) - *info = -6; - else { - if (rowequ) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, R[j]); - rcmax = SUPERLU_MAX(rcmax, R[j]); - } - if (rcmin <= 0.) *info = -7; - else if ( A->nrow > 0) - rowcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else rowcnd = 1.; - } - if (colequ && *info == 0) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, C[j]); - rcmax = SUPERLU_MAX(rcmax, C[j]); - } - if (rcmin <= 0.) *info = -8; - else if (A->nrow > 0) - colcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else colcnd = 1.; - } - if (*info == 0) { - if ( lwork < -1 ) *info = -12; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_D || - B->Mtype != SLU_GE ) - *info = -13; - else if ( X->ncol < 0 || Xstore->lda < SUPERLU_MAX(0, A->nrow) || - (B->ncol != 0 && B->ncol != X->ncol) || - X->Stype != SLU_DN || - X->Dtype != SLU_D || X->Mtype != SLU_GE ) - *info = -14; - } - } - if (*info != 0) { - i = -(*info); - xerbla_("dgssvx", &i); - return; - } - - /* Initialization for factor parameters */ - panel_size = sp_ienv(1); - relax = sp_ienv(2); - diag_pivot_thresh = options->DiagPivotThresh; - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - dCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - if ( notran ) { /* Reverse the transpose argument. */ - trant = TRANS; - notran = 0; - } else { - trant = NOTRANS; - notran = 1; - } - } else { /* A->Stype == SLU_NC */ - trant = options->Trans; - AA = A; - } - - if ( nofact && equil ) { - t0 = SuperLU_timer_(); - /* Compute row and column scalings to equilibrate the matrix A. */ - dgsequ(AA, R, C, &rowcnd, &colcnd, &amax, &info1); - - if ( info1 == 0 ) { - /* Equilibrate matrix A. */ - dlaqgs(AA, R, C, rowcnd, colcnd, amax, equed); - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Scale the right hand side if equilibration was performed. */ - if ( notran ) { - if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Bmat[i + j*ldb] *= R[i]; - } - } - } else if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Bmat[i + j*ldb] *= C[i]; - } - } - } - - if ( nofact ) { - - t0 = SuperLU_timer_(); - /* - * Gnet column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t0; - - t0 = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t0; - -/* printf("Factor PA = LU ... relax %d\tw %d\tmaxsuper %d\trowblk %d\n", - relax, panel_size, sp_ienv(3), sp_ienv(4)); - fflush(stdout); */ - - /* Compute the LU factorization of A*Pc. */ - t0 = SuperLU_timer_(); - dgstrf(options, &AC, relax, panel_size, etree, - work, lwork, perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t0; - - if ( lwork == -1 ) { - mem_usage->total_needed = *info - A->ncol; - return; - } - } - - if ( options->PivotGrowth ) { - if ( *info > 0 ) { - if ( *info <= A->ncol ) { - /* Compute the reciprocal pivot growth factor of the leading - rank-deficient *info columns of A. */ - *recip_pivot_growth = dPivotGrowth(*info, AA, perm_c, L, U); - } - return; - } - - /* Compute the reciprocal pivot growth factor *recip_pivot_growth. */ - *recip_pivot_growth = dPivotGrowth(A->ncol, AA, perm_c, L, U); - } - - if ( options->ConditionNumber ) { - /* Estimate the reciprocal of the condition number of A. */ - t0 = SuperLU_timer_(); - if ( notran ) { - *(unsigned char *)norm = '1'; - } else { - *(unsigned char *)norm = 'I'; - } - anorm = dlangs(norm, AA); - dgscon(norm, L, U, anorm, rcond, stat, info); - utime[RCOND] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Compute the solution matrix X. */ - for (j = 0; j < nrhs; j++) /* Save a copy of the right hand sides */ - for (i = 0; i < B->nrow; i++) - Xmat[i + j*ldx] = Bmat[i + j*ldb]; - - t0 = SuperLU_timer_(); - dgstrs (trant, L, U, perm_c, perm_r, X, stat, info); - utime[SOLVE] = SuperLU_timer_() - t0; - - /* Use iterative refinement to improve the computed solution and compute - error bounds and backward error estimates for it. */ - t0 = SuperLU_timer_(); - if ( options->IterRefine != NOREFINE ) { - dgsrfs(trant, AA, L, U, perm_c, perm_r, equed, R, C, B, - X, ferr, berr, stat, info); - } else { - for (j = 0; j < nrhs; ++j) ferr[j] = berr[j] = 1.0; - } - utime[REFINE] = SuperLU_timer_() - t0; - - /* Transform the solution matrix X to a solution of the original system. */ - if ( notran ) { - if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Xmat[i + j*ldx] *= C[i]; - } - } - } else if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Xmat[i + j*ldx] *= R[i]; - } - } - } /* end if nrhs > 0 */ - - if ( options->ConditionNumber ) { - /* Set INFO = A->ncol+1 if the matrix is singular to working precision. */ - if ( *rcond < dlamch_("E") ) *info = A->ncol + 1; - } - - if ( nofact ) { - dQuerySpace(L, U, mem_usage); - Destroy_CompCol_Permuted(&AC); - } - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrf.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrf.c deleted file mode 100644 index 47f129061e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrf.c +++ /dev/null @@ -1,436 +0,0 @@ - -/*! @file dgstrf.c - * \brief Computes an LU factorization of a general sparse matrix - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * DGSTRF computes an LU factorization of a general sparse m-by-n
    - * matrix A using partial pivoting with row interchanges.
    - * The factorization has the form
    - *     Pr * A = L * U
    - * where Pr is a row permutation matrix, L is lower triangular with unit
    - * diagonal elements (lower trapezoidal if A->nrow > A->ncol), and U is upper 
    - * triangular (upper trapezoidal if A->nrow < A->ncol).
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed.
    - *
    - * A        (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *          (A->nrow, A->ncol). The type of A can be:
    - *          Stype = SLU_NCP; Dtype = SLU_D; Mtype = SLU_GE.
    - *
    - * relax    (input) int
    - *          To control degree of relaxing supernodes. If the number
    - *          of nodes (columns) in a subtree of the elimination tree is less
    - *          than relax, this subtree is considered as one supernode,
    - *          regardless of the row structures of those columns.
    - *
    - * panel_size (input) int
    - *          A panel consists of at most panel_size consecutive columns.
    - *
    - * etree    (input) int*, dimension (A->ncol)
    - *          Elimination tree of A'*A.
    - *          Note: etree is a vector of parent pointers for a forest whose
    - *          vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *          On input, the columns of A should be permuted so that the
    - *          etree is in a certain postorder.
    - *
    - * work     (input/output) void*, size (lwork) (in bytes)
    - *          User-supplied work space and space for the output data structures.
    - *          Not referenced if lwork = 0;
    - *
    - * lwork   (input) int
    - *         Specifies the size of work array in bytes.
    - *         = 0:  allocate space internally by system malloc;
    - *         > 0:  use user-supplied work array of length lwork in bytes,
    - *               returns error if space runs out.
    - *         = -1: the routine guesses the amount of space needed without
    - *               performing the factorization, and returns it in
    - *               *info; no other side effects.
    - *
    - * perm_c   (input) int*, dimension (A->ncol)
    - *	    Column permutation vector, which defines the 
    - *          permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *          in position j in A*Pc.
    - *          When searching for diagonal, perm_c[*] is applied to the
    - *          row subscripts of A, so that diagonal threshold pivoting
    - *          can find the diagonal of A, rather than that of A*Pc.
    - *
    - * perm_r   (input/output) int*, dimension (A->nrow)
    - *          Row permutation vector which defines the permutation matrix Pr,
    - *          perm_r[i] = j means row i of A is in position j in Pr*A.
    - *          If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *             will try to use the input perm_r, unless a certain threshold
    - *             criterion is violated. In that case, perm_r is overwritten by
    - *             a new permutation determined by partial pivoting or diagonal
    - *             threshold pivoting.
    - *          Otherwise, perm_r is output argument;
    - *
    - * L        (output) SuperMatrix*
    - *          The factor L from the factorization Pr*A=L*U; use compressed row 
    - *          subscripts storage for supernodes, i.e., L has type: 
    - *          Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *
    - * U        (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *          storage scheme, i.e., U has types: Stype = SLU_NC, 
    - *          Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info     (output) int*
    - *          = 0: successful exit
    - *          < 0: if info = -i, the i-th argument had an illegal value
    - *          > 0: if info = i, and i is
    - *             <= A->ncol: U(i,i) is exactly zero. The factorization has
    - *                been completed, but the factor U is exactly singular,
    - *                and division by zero will occur if it is used to solve a
    - *                system of equations.
    - *             > A->ncol: number of bytes allocated when memory allocation
    - *                failure occurred, plus A->ncol. If lwork = -1, it is
    - *                the estimated amount of space needed, plus A->ncol.
    - *
    - * ======================================================================
    - *
    - * Local Working Arrays: 
    - * ======================
    - *   m = number of rows in the matrix
    - *   n = number of columns in the matrix
    - *
    - *   xprune[0:n-1]: xprune[*] points to locations in subscript 
    - *	vector lsub[*]. For column i, xprune[i] denotes the point where 
    - *	structural pruning begins. I.e. only xlsub[i],..,xprune[i]-1 need 
    - *	to be traversed for symbolic factorization.
    - *
    - *   marker[0:3*m-1]: marker[i] = j means that node i has been 
    - *	reached when working on column j.
    - *	Storage: relative to original row subscripts
    - *	NOTE: There are 3 of them: marker/marker1 are used for panel dfs, 
    - *	      see dpanel_dfs.c; marker2 is used for inner-factorization,
    - *            see dcolumn_dfs.c.
    - *
    - *   parent[0:m-1]: parent vector used during dfs
    - *      Storage: relative to new row subscripts
    - *
    - *   xplore[0:m-1]: xplore[i] gives the location of the next (dfs) 
    - *	unexplored neighbor of i in lsub[*]
    - *
    - *   segrep[0:nseg-1]: contains the list of supernodal representatives
    - *	in topological order of the dfs. A supernode representative is the 
    - *	last column of a supernode.
    - *      The maximum size of segrep[] is n.
    - *
    - *   repfnz[0:W*m-1]: for a nonzero segment U[*,j] that ends at a 
    - *	supernodal representative r, repfnz[r] is the location of the first 
    - *	nonzero in this segment.  It is also used during the dfs: repfnz[r]>0
    - *	indicates the supernode r has been explored.
    - *	NOTE: There are W of them, each used for one column of a panel. 
    - *
    - *   panel_lsub[0:W*m-1]: temporary for the nonzeros row indices below 
    - *      the panel diagonal. These are filled in during dpanel_dfs(), and are
    - *      used later in the inner LU factorization within the panel.
    - *	panel_lsub[]/dense[] pair forms the SPA data structure.
    - *	NOTE: There are W of them.
    - *
    - *   dense[0:W*m-1]: sparse accumulating (SPA) vector for intermediate values;
    - *	    	   NOTE: there are W of them.
    - *
    - *   tempv[0:*]: real temporary used for dense numeric kernels;
    - *	The size of this array is defined by NUM_TEMPV() in slu_ddefs.h.
    - * 
    - */ - -void -dgstrf (superlu_options_t *options, SuperMatrix *A, - int relax, int panel_size, int *etree, void *work, int lwork, - int *perm_c, int *perm_r, SuperMatrix *L, SuperMatrix *U, - SuperLUStat_t *stat, int *info) -{ - /* Local working arrays */ - NCPformat *Astore; - int *iperm_r = NULL; /* inverse of perm_r; used when - options->Fact == SamePattern_SameRowPerm */ - int *iperm_c; /* inverse of perm_c */ - int *iwork; - double *dwork; - int *segrep, *repfnz, *parent, *xplore; - int *panel_lsub; /* dense[]/panel_lsub[] pair forms a w-wide SPA */ - int *xprune; - int *marker; - double *dense, *tempv; - int *relax_end; - double *a; - int *asub; - int *xa_begin, *xa_end; - int *xsup, *supno; - int *xlsub, *xlusup, *xusub; - int nzlumax; - double fill_ratio = sp_ienv(6); /* estimated fill ratio */ - static GlobalLU_t Glu; /* persistent to facilitate multiple factors. */ - - /* Local scalars */ - fact_t fact = options->Fact; - double diag_pivot_thresh = options->DiagPivotThresh; - int pivrow; /* pivotal row number in the original matrix A */ - int nseg1; /* no of segments in U-column above panel row jcol */ - int nseg; /* no of segments in each U-column */ - register int jcol; - register int kcol; /* end column of a relaxed snode */ - register int icol; - register int i, k, jj, new_next, iinfo; - int m, n, min_mn, jsupno, fsupc, nextlu, nextu; - int w_def; /* upper bound on panel width */ - int usepr, iperm_r_allocated = 0; - int nnzL, nnzU; - int *panel_histo = stat->panel_histo; - flops_t *ops = stat->ops; - - iinfo = 0; - m = A->nrow; - n = A->ncol; - min_mn = SUPERLU_MIN(m, n); - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - - /* Allocate storage common to the factor routines */ - *info = dLUMemInit(fact, work, lwork, m, n, Astore->nnz, - panel_size, fill_ratio, L, U, &Glu, &iwork, &dwork); - if ( *info ) return; - - xsup = Glu.xsup; - supno = Glu.supno; - xlsub = Glu.xlsub; - xlusup = Glu.xlusup; - xusub = Glu.xusub; - - SetIWork(m, n, panel_size, iwork, &segrep, &parent, &xplore, - &repfnz, &panel_lsub, &xprune, &marker); - dSetRWork(m, panel_size, dwork, &dense, &tempv); - - usepr = (fact == SamePattern_SameRowPerm); - if ( usepr ) { - /* Compute the inverse of perm_r */ - iperm_r = (int *) intMalloc(m); - for (k = 0; k < m; ++k) iperm_r[perm_r[k]] = k; - iperm_r_allocated = 1; - } - iperm_c = (int *) intMalloc(n); - for (k = 0; k < n; ++k) iperm_c[perm_c[k]] = k; - - /* Identify relaxed snodes */ - relax_end = (int *) intMalloc(n); - if ( options->SymmetricMode == YES ) { - heap_relax_snode(n, etree, relax, marker, relax_end); - } else { - relax_snode(n, etree, relax, marker, relax_end); - } - - ifill (perm_r, m, EMPTY); - ifill (marker, m * NO_MARKER, EMPTY); - supno[0] = -1; - xsup[0] = xlsub[0] = xusub[0] = xlusup[0] = 0; - w_def = panel_size; - - /* - * Work on one "panel" at a time. A panel is one of the following: - * (a) a relaxed supernode at the bottom of the etree, or - * (b) panel_size contiguous columns, defined by the user - */ - for (jcol = 0; jcol < min_mn; ) { - - if ( relax_end[jcol] != EMPTY ) { /* start of a relaxed snode */ - kcol = relax_end[jcol]; /* end of the relaxed snode */ - panel_histo[kcol-jcol+1]++; - - /* -------------------------------------- - * Factorize the relaxed supernode(jcol:kcol) - * -------------------------------------- */ - /* Determine the union of the row structure of the snode */ - if ( (*info = dsnode_dfs(jcol, kcol, asub, xa_begin, xa_end, - xprune, marker, &Glu)) != 0 ) - return; - - nextu = xusub[jcol]; - nextlu = xlusup[jcol]; - jsupno = supno[jcol]; - fsupc = xsup[jsupno]; - new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1); - nzlumax = Glu.nzlumax; - while ( new_next > nzlumax ) { - if ( (*info = dLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu)) ) - return; - } - - for (icol = jcol; icol<= kcol; icol++) { - xusub[icol+1] = nextu; - - /* Scatter into SPA dense[*] */ - for (k = xa_begin[icol]; k < xa_end[icol]; k++) - dense[asub[k]] = a[k]; - - /* Numeric update within the snode */ - dsnode_bmod(icol, jsupno, fsupc, dense, tempv, &Glu, stat); - - if ( (*info = dpivotL(icol, diag_pivot_thresh, &usepr, perm_r, - iperm_r, iperm_c, &pivrow, &Glu, stat)) ) - if ( iinfo == 0 ) iinfo = *info; - -#ifdef DEBUG - dprint_lu_col("[1]: ", icol, pivrow, xprune, &Glu); -#endif - - } - - jcol = icol; - - } else { /* Work on one panel of panel_size columns */ - - /* Adjust panel_size so that a panel won't overlap with the next - * relaxed snode. - */ - panel_size = w_def; - for (k = jcol + 1; k < SUPERLU_MIN(jcol+panel_size, min_mn); k++) - if ( relax_end[k] != EMPTY ) { - panel_size = k - jcol; - break; - } - if ( k == min_mn ) panel_size = min_mn - jcol; - panel_histo[panel_size]++; - - /* symbolic factor on a panel of columns */ - dpanel_dfs(m, panel_size, jcol, A, perm_r, &nseg1, - dense, panel_lsub, segrep, repfnz, xprune, - marker, parent, xplore, &Glu); - - /* numeric sup-panel updates in topological order */ - dpanel_bmod(m, panel_size, jcol, nseg1, dense, - tempv, segrep, repfnz, &Glu, stat); - - /* Sparse LU within the panel, and below panel diagonal */ - for ( jj = jcol; jj < jcol + panel_size; jj++) { - k = (jj - jcol) * m; /* column index for w-wide arrays */ - - nseg = nseg1; /* Begin after all the panel segments */ - - if ((*info = dcolumn_dfs(m, jj, perm_r, &nseg, &panel_lsub[k], - segrep, &repfnz[k], xprune, marker, - parent, xplore, &Glu)) != 0) return; - - /* Numeric updates */ - if ((*info = dcolumn_bmod(jj, (nseg - nseg1), &dense[k], - tempv, &segrep[nseg1], &repfnz[k], - jcol, &Glu, stat)) != 0) return; - - /* Copy the U-segments to ucol[*] */ - if ((*info = dcopy_to_ucol(jj, nseg, segrep, &repfnz[k], - perm_r, &dense[k], &Glu)) != 0) - return; - - if ( (*info = dpivotL(jj, diag_pivot_thresh, &usepr, perm_r, - iperm_r, iperm_c, &pivrow, &Glu, stat)) ) - if ( iinfo == 0 ) iinfo = *info; - - /* Prune columns (0:jj-1) using column jj */ - dpruneL(jj, perm_r, pivrow, nseg, segrep, - &repfnz[k], xprune, &Glu); - - /* Reset repfnz[] for this column */ - resetrep_col (nseg, segrep, &repfnz[k]); - -#ifdef DEBUG - dprint_lu_col("[2]: ", jj, pivrow, xprune, &Glu); -#endif - - } - - jcol += panel_size; /* Move to the next panel */ - - } /* else */ - - } /* for */ - - *info = iinfo; - - if ( m > n ) { - k = 0; - for (i = 0; i < m; ++i) - if ( perm_r[i] == EMPTY ) { - perm_r[i] = n + k; - ++k; - } - } - - countnz(min_mn, xprune, &nnzL, &nnzU, &Glu); - fixupL(min_mn, perm_r, &Glu); - - dLUWorkFree(iwork, dwork, &Glu); /* Free work space and compress storage */ - - if ( fact == SamePattern_SameRowPerm ) { - /* L and U structures may have changed due to possibly different - pivoting, even though the storage is available. - There could also be memory expansions, so the array locations - may have changed, */ - ((SCformat *)L->Store)->nnz = nnzL; - ((SCformat *)L->Store)->nsuper = Glu.supno[n]; - ((SCformat *)L->Store)->nzval = Glu.lusup; - ((SCformat *)L->Store)->nzval_colptr = Glu.xlusup; - ((SCformat *)L->Store)->rowind = Glu.lsub; - ((SCformat *)L->Store)->rowind_colptr = Glu.xlsub; - ((NCformat *)U->Store)->nnz = nnzU; - ((NCformat *)U->Store)->nzval = Glu.ucol; - ((NCformat *)U->Store)->rowind = Glu.usub; - ((NCformat *)U->Store)->colptr = Glu.xusub; - } else { - dCreate_SuperNode_Matrix(L, A->nrow, min_mn, nnzL, Glu.lusup, - Glu.xlusup, Glu.lsub, Glu.xlsub, Glu.supno, - Glu.xsup, SLU_SC, SLU_D, SLU_TRLU); - dCreate_CompCol_Matrix(U, min_mn, min_mn, nnzU, Glu.ucol, - Glu.usub, Glu.xusub, SLU_NC, SLU_D, SLU_TRU); - } - - ops[FACT] += ops[TRSV] + ops[GEMV]; - stat->expansions = --(Glu.num_expansions); - - if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r); - SUPERLU_FREE (iperm_c); - SUPERLU_FREE (relax_end); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrs.c deleted file mode 100644 index 4e0247be22..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrs.c +++ /dev/null @@ -1,337 +0,0 @@ - -/*! @file dgstrs.c - * \brief Solves a system using LU factorization - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_ddefs.h" - - -/* - * Function prototypes - */ -void dusolve(int, int, double*, double*); -void dlsolve(int, int, double*, double*); -void dmatvec(int, int, int, double*, double*, double*); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * DGSTRS solves a system of linear equations A*X=B or A'*X=B
    - * with A sparse and B dense, using the LU factorization computed by
    - * DGSTRF.
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *
    - * L       (input) SuperMatrix*
    - *         The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *         dgstrf(). Use compressed row subscripts storage for supernodes,
    - *         i.e., L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *
    - * U       (input) SuperMatrix*
    - *         The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *         dgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - * perm_c  (input) int*, dimension (L->ncol)
    - *	   Column permutation vector, which defines the 
    - *         permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *         in position j in A*Pc.
    - *
    - * perm_r  (input) int*, dimension (L->nrow)
    - *         Row permutation vector, which defines the permutation matrix Pr; 
    - *         perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - * 	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - * 
    - */ - -void -dgstrs (trans_t trans, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, SuperMatrix *B, - SuperLUStat_t *stat, int *info) -{ - -#ifdef _CRAY - _fcd ftcs1, ftcs2, ftcs3, ftcs4; -#endif - int incx = 1, incy = 1; -#ifdef USE_VENDOR_BLAS - double alpha = 1.0, beta = 1.0; - double *work_col; -#endif - DNformat *Bstore; - double *Bmat; - SCformat *Lstore; - NCformat *Ustore; - double *Lval, *Uval; - int fsupc, nrow, nsupr, nsupc, luptr, istart, irow; - int i, j, k, iptr, jcol, n, ldb, nrhs; - double *work, *rhs_work, *soln; - flops_t solve_ops; - void dprint_soln(); - - /* Test input parameters ... */ - *info = 0; - Bstore = B->Store; - ldb = Bstore->lda; - nrhs = B->ncol; - if ( trans != NOTRANS && trans != TRANS && trans != CONJ ) *info = -1; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_D || L->Mtype != SLU_TRLU ) - *info = -2; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_D || U->Mtype != SLU_TRU ) - *info = -3; - else if ( ldb < SUPERLU_MAX(0, L->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_D || B->Mtype != SLU_GE ) - *info = -6; - if ( *info ) { - i = -(*info); - xerbla_("dgstrs", &i); - return; - } - - n = L->nrow; - work = doubleCalloc(n * nrhs); - if ( !work ) ABORT("Malloc fails for local work[]."); - soln = doubleMalloc(n); - if ( !soln ) ABORT("Malloc fails for local soln[]."); - - Bmat = Bstore->nzval; - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( trans == NOTRANS ) { - /* Permute right hand sides to form Pr*B */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_r[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - /* Forward solve PLy=Pb. */ - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - nrow = nsupr - nsupc; - - solve_ops += nsupc * (nsupc - 1) * nrhs; - solve_ops += 2 * nrow * nsupc * nrhs; - - if ( nsupc == 1 ) { - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - luptr = L_NZ_START(fsupc); - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); iptr++){ - irow = L_SUB(iptr); - ++luptr; - rhs_work[irow] -= rhs_work[fsupc] * Lval[luptr]; - } - } - } else { - luptr = L_NZ_START(fsupc); -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("N", strlen("N")); - ftcs3 = _cptofcd("U", strlen("U")); - STRSM( ftcs1, ftcs1, ftcs2, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - SGEMM( ftcs2, ftcs2, &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#else - dtrsm_("L", "L", "N", "U", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - dgemm_( "N", "N", &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#endif - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - work_col = &work[j*n]; - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - rhs_work[irow] -= work_col[i]; /* Scatter */ - work_col[i] = 0.0; - iptr++; - } - } -#else - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - dlsolve (nsupr, nsupc, &Lval[luptr], &rhs_work[fsupc]); - dmatvec (nsupr, nrow, nsupc, &Lval[luptr+nsupc], - &rhs_work[fsupc], &work[0] ); - - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - rhs_work[irow] -= work[i]; - work[i] = 0.0; - iptr++; - } - } -#endif - } /* else ... */ - } /* for L-solve */ - -#ifdef DEBUG - printf("After L-solve: y=\n"); - dprint_soln(n, nrhs, Bmat); -#endif - - /* - * Back solve Ux=y. - */ - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += nsupc * (nsupc + 1) * nrhs; - - if ( nsupc == 1 ) { - rhs_work = &Bmat[0]; - for (j = 0; j < nrhs; j++) { - rhs_work[fsupc] /= Lval[luptr]; - rhs_work += ldb; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("U", strlen("U")); - ftcs3 = _cptofcd("N", strlen("N")); - STRSM( ftcs1, ftcs2, ftcs3, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#else - dtrsm_("L", "U", "N", "N", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#endif -#else - for (j = 0; j < nrhs; j++) - dusolve ( nsupr, nsupc, &Lval[luptr], &Bmat[fsupc+j*ldb] ); -#endif - } - - for (j = 0; j < nrhs; ++j) { - rhs_work = &Bmat[j*ldb]; - for (jcol = fsupc; jcol < fsupc + nsupc; jcol++) { - solve_ops += 2*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++ ){ - irow = U_SUB(i); - rhs_work[irow] -= rhs_work[jcol] * Uval[i]; - } - } - } - - } /* for U-solve */ - -#ifdef DEBUG - printf("After U-solve: x=\n"); - dprint_soln(n, nrhs, Bmat); -#endif - - /* Compute the final solution X := Pc*X. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_c[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = solve_ops; - - } else { /* Solve A'*X=B or CONJ(A)*X=B */ - /* Permute right hand sides to form Pc'*B. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_c[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = 0; - for (k = 0; k < nrhs; ++k) { - - /* Multiply by inv(U'). */ - sp_dtrsv("U", "T", "N", L, U, &Bmat[k*ldb], stat, info); - - /* Multiply by inv(L'). */ - sp_dtrsv("L", "T", "U", L, U, &Bmat[k*ldb], stat, info); - - } - /* Compute the final solution X := Pr'*X (=inv(Pr)*X) */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_r[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - } - - SUPERLU_FREE(work); - SUPERLU_FREE(soln); -} - -/* - * Diagnostic print of the solution vector - */ -void -dprint_soln(int n, int nrhs, double *soln) -{ - int i; - - for (i = 0; i < n; i++) - printf("\t%d: %.4f\n", i, soln[i]); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrsL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrsL.c deleted file mode 100644 index b10d754411..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrsL.c +++ /dev/null @@ -1,234 +0,0 @@ -/*! @file dgstrsL.c - * \brief Performs the L-solve using the LU factorization computed by DGSTRF - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * September 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_ddefs.h" -#include "slu_util.h" - - -/* - * Function prototypes - */ -void dusolve(int, int, double*, double*); -void dlsolve(int, int, double*, double*); -void dmatvec(int, int, int, double*, double*, double*); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * dgstrsL only performs the L-solve using the LU factorization computed
    - * by DGSTRF.
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * trans   (input) char*
    - *          Specifies the form of the system of equations:
    - *          = 'N':  A * X = B  (No transpose)
    - *          = 'T':  A'* X = B  (Transpose)
    - *          = 'C':  A**H * X = B  (Conjugate transpose)
    - *
    - * L       (input) SuperMatrix*
    - *         The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *         dgstrf(). Use compressed row subscripts storage for supernodes,
    - *         i.e., L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *
    - * U       (input) SuperMatrix*
    - *         The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *         dgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - * perm_r  (input) int*, dimension (L->nrow)
    - *         Row permutation vector, which defines the permutation matrix Pr; 
    - *         perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * info    (output) int*
    - * 	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - * 
    - */ -void -dgstrsL(char *trans, SuperMatrix *L, int *perm_r, SuperMatrix *B, int *info) -{ -#ifdef _CRAY - _fcd ftcs1, ftcs2, ftcs3, ftcs4; -#endif - int incx = 1, incy = 1; - double alpha = 1.0, beta = 1.0; - DNformat *Bstore; - double *Bmat; - SCformat *Lstore; - double *Lval, *Uval; - int nrow, notran; - int fsupc, nsupr, nsupc, luptr, istart, irow; - int i, j, k, iptr, jcol, n, ldb, nrhs; - double *work, *work_col, *rhs_work, *soln; - flops_t solve_ops; - extern SuperLUStat_t SuperLUStat; - void dprint_soln(); - - /* Test input parameters ... */ - *info = 0; - Bstore = B->Store; - ldb = Bstore->lda; - nrhs = B->ncol; - notran = lsame_(trans, "N"); - if ( !notran && !lsame_(trans, "T") && !lsame_(trans, "C") ) *info = -1; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_D || L->Mtype != SLU_TRLU ) - *info = -2; - else if ( ldb < SUPERLU_MAX(0, L->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_D || B->Mtype != SLU_GE ) - *info = -4; - if ( *info ) { - i = -(*info); - xerbla_("dgstrsL", &i); - return; - } - - n = L->nrow; - work = doubleCalloc(n * nrhs); - if ( !work ) ABORT("Malloc fails for local work[]."); - soln = doubleMalloc(n); - if ( !soln ) ABORT("Malloc fails for local soln[]."); - - Bmat = Bstore->nzval; - Lstore = L->Store; - Lval = Lstore->nzval; - solve_ops = 0; - - if ( notran ) { - /* Permute right hand sides to form Pr*B */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_r[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - /* Forward solve PLy=Pb. */ - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - nrow = nsupr - nsupc; - - solve_ops += nsupc * (nsupc - 1) * nrhs; - solve_ops += 2 * nrow * nsupc * nrhs; - - if ( nsupc == 1 ) { - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - luptr = L_NZ_START(fsupc); - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); iptr++){ - irow = L_SUB(iptr); - ++luptr; - rhs_work[irow] -= rhs_work[fsupc] * Lval[luptr]; - } - } - } else { - luptr = L_NZ_START(fsupc); -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("N", strlen("N")); - ftcs3 = _cptofcd("U", strlen("U")); - STRSM( ftcs1, ftcs1, ftcs2, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - SGEMM( ftcs2, ftcs2, &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#else - dtrsm_("L", "L", "N", "U", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - dgemm_( "N", "N", &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#endif - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - work_col = &work[j*n]; - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - rhs_work[irow] -= work_col[i]; /* Scatter */ - work_col[i] = 0.0; - iptr++; - } - } -#else - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - dlsolve (nsupr, nsupc, &Lval[luptr], &rhs_work[fsupc]); - dmatvec (nsupr, nrow, nsupc, &Lval[luptr+nsupc], - &rhs_work[fsupc], &work[0] ); - - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - rhs_work[irow] -= work[i]; - work[i] = 0.0; - iptr++; - } - } -#endif - } /* else ... */ - } /* for L-solve */ - -#ifdef DEBUG - printf("After L-solve: y=\n"); - dprint_soln(n, nrhs, Bmat); -#endif - - SuperLUStat.ops[SOLVE] = solve_ops; - - } else { - printf("Transposed solve not implemented.\n"); - exit(0); - } - - SUPERLU_FREE(work); - SUPERLU_FREE(soln); -} - -/* - * Diagnostic print of the solution vector - */ -void -dprint_soln(int n, int nrhs, double *soln) -{ - int i; - - for (i = 0; i < n; i++) - printf("\t%d: %.4f\n", i, soln[i]); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrsU.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrsU.c deleted file mode 100644 index 4b3921ce08..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dgstrsU.c +++ /dev/null @@ -1,224 +0,0 @@ -/*! @file dgstrsU.c - * \brief Performs the U-solve using the LU factorization computed by DGSTRF - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_ddefs.h" - - -/* - * Function prototypes - */ -void dusolve(int, int, double*, double*); -void dlsolve(int, int, double*, double*); -void dmatvec(int, int, int, double*, double*, double*); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * dgstrsU only performs the U-solve using the LU factorization computed
    - * by DGSTRF.
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *
    - * L       (input) SuperMatrix*
    - *         The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *         dgstrf(). Use compressed row subscripts storage for supernodes,
    - *         i.e., L has types: Stype = SLU_SC, Dtype = SLU_D, Mtype = SLU_TRLU.
    - *
    - * U       (input) SuperMatrix*
    - *         The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *         dgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_D, Mtype = SLU_TRU.
    - *
    - * perm_c  (input) int*, dimension (L->ncol)
    - *	   Column permutation vector, which defines the 
    - *         permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *         in position j in A*Pc.
    - *
    - * perm_r  (input) int*, dimension (L->nrow)
    - *         Row permutation vector, which defines the permutation matrix Pr; 
    - *         perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_D, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - * 	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - * 
    - */ -void -dgstrsU(trans_t trans, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, SuperMatrix *B, - SuperLUStat_t *stat, int *info) -{ -#ifdef _CRAY - _fcd ftcs1, ftcs2, ftcs3, ftcs4; -#endif - int incx = 1, incy = 1; -#ifdef USE_VENDOR_BLAS - double alpha = 1.0, beta = 1.0; - double *work_col; -#endif - DNformat *Bstore; - double *Bmat; - SCformat *Lstore; - NCformat *Ustore; - double *Lval, *Uval; - int fsupc, nrow, nsupr, nsupc, luptr, istart, irow; - int i, j, k, iptr, jcol, n, ldb, nrhs; - double *rhs_work, *soln; - flops_t solve_ops; - void dprint_soln(); - - /* Test input parameters ... */ - *info = 0; - Bstore = B->Store; - ldb = Bstore->lda; - nrhs = B->ncol; - if ( trans != NOTRANS && trans != TRANS && trans != CONJ ) *info = -1; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_D || L->Mtype != SLU_TRLU ) - *info = -2; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_D || U->Mtype != SLU_TRU ) - *info = -3; - else if ( ldb < SUPERLU_MAX(0, L->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_D || B->Mtype != SLU_GE ) - *info = -6; - if ( *info ) { - i = -(*info); - xerbla_("dgstrs", &i); - return; - } - - n = L->nrow; - soln = doubleMalloc(n); - if ( !soln ) ABORT("Malloc fails for local soln[]."); - - Bmat = Bstore->nzval; - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( trans == NOTRANS ) { - /* - * Back solve Ux=y. - */ - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += nsupc * (nsupc + 1) * nrhs; - - if ( nsupc == 1 ) { - rhs_work = &Bmat[0]; - for (j = 0; j < nrhs; j++) { - rhs_work[fsupc] /= Lval[luptr]; - rhs_work += ldb; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("U", strlen("U")); - ftcs3 = _cptofcd("N", strlen("N")); - STRSM( ftcs1, ftcs2, ftcs3, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#else - dtrsm_("L", "U", "N", "N", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#endif -#else - for (j = 0; j < nrhs; j++) - dusolve ( nsupr, nsupc, &Lval[luptr], &Bmat[fsupc+j*ldb] ); -#endif - } - - for (j = 0; j < nrhs; ++j) { - rhs_work = &Bmat[j*ldb]; - for (jcol = fsupc; jcol < fsupc + nsupc; jcol++) { - solve_ops += 2*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++ ){ - irow = U_SUB(i); - rhs_work[irow] -= rhs_work[jcol] * Uval[i]; - } - } - } - - } /* for U-solve */ - -#ifdef DEBUG - printf("After U-solve: x=\n"); - dprint_soln(n, nrhs, Bmat); -#endif - - /* Compute the final solution X := Pc*X. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_c[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = solve_ops; - - } else { /* Solve U'x = b */ - /* Permute right hand sides to form Pc'*B. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_c[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - for (k = 0; k < nrhs; ++k) { - /* Multiply by inv(U'). */ - sp_dtrsv("U", "T", "N", L, U, &Bmat[k*ldb], stat, info); - } - - } - - SUPERLU_FREE(soln); -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlacon.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlacon.c deleted file mode 100644 index 951fe7a206..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlacon.c +++ /dev/null @@ -1,236 +0,0 @@ - -/*! @file dlacon.c - * \brief Estimates the 1-norm - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -#include -#include "slu_Cnames.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   DLACON estimates the 1-norm of a square matrix A.   
    - *   Reverse communication is used for evaluating matrix-vector products. 
    - * 
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   N      (input) INT
    - *          The order of the matrix.  N >= 1.   
    - *
    - *   V      (workspace) DOUBLE PRECISION array, dimension (N)   
    - *          On the final return, V = A*W,  where  EST = norm(V)/norm(W)   
    - *          (W is not returned).   
    - *
    - *   X      (input/output) DOUBLE PRECISION array, dimension (N)   
    - *          On an intermediate return, X should be overwritten by   
    - *                A * X,   if KASE=1,   
    - *                A' * X,  if KASE=2,
    - *         and DLACON must be re-called with all the other parameters   
    - *          unchanged.   
    - *
    - *   ISGN   (workspace) INT array, dimension (N)
    - *
    - *   EST    (output) DOUBLE PRECISION   
    - *          An estimate (a lower bound) for norm(A).   
    - *
    - *   KASE   (input/output) INT
    - *          On the initial call to DLACON, KASE should be 0.   
    - *          On an intermediate return, KASE will be 1 or 2, indicating   
    - *          whether X should be overwritten by A * X  or A' * X.   
    - *          On the final return from DLACON, KASE will again be 0.   
    - *
    - *   Further Details   
    - *   ======= =======   
    - *
    - *   Contributed by Nick Higham, University of Manchester.   
    - *   Originally named CONEST, dated March 16, 1988.   
    - *
    - *   Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of 
    - *   a real or complex matrix, with applications to condition estimation", 
    - *   ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988.   
    - *   ===================================================================== 
    - * 
    - */ - -int -dlacon_(int *n, double *v, double *x, int *isgn, double *est, int *kase) - -{ - - - /* Table of constant values */ - int c__1 = 1; - double zero = 0.0; - double one = 1.0; - - /* Local variables */ - static int iter; - static int jump, jlast; - static double altsgn, estold; - static int i, j; - double temp; -#ifdef _CRAY - extern int ISAMAX(int *, double *, int *); - extern double SASUM(int *, double *, int *); - extern int SCOPY(int *, double *, int *, double *, int *); -#else - extern int idamax_(int *, double *, int *); - extern double dasum_(int *, double *, int *); - extern int dcopy_(int *, double *, int *, double *, int *); -#endif -#define d_sign(a, b) (b >= 0 ? fabs(a) : -fabs(a)) /* Copy sign */ -#define i_dnnt(a) \ - ( a>=0 ? floor(a+.5) : -floor(.5-a) ) /* Round to nearest integer */ - - if ( *kase == 0 ) { - for (i = 0; i < *n; ++i) { - x[i] = 1. / (double) (*n); - } - *kase = 1; - jump = 1; - return 0; - } - - switch (jump) { - case 1: goto L20; - case 2: goto L40; - case 3: goto L70; - case 4: goto L110; - case 5: goto L140; - } - - /* ................ ENTRY (JUMP = 1) - FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */ - L20: - if (*n == 1) { - v[0] = x[0]; - *est = fabs(v[0]); - /* ... QUIT */ - goto L150; - } -#ifdef _CRAY - *est = SASUM(n, x, &c__1); -#else - *est = dasum_(n, x, &c__1); -#endif - - for (i = 0; i < *n; ++i) { - x[i] = d_sign(one, x[i]); - isgn[i] = i_dnnt(x[i]); - } - *kase = 2; - jump = 2; - return 0; - - /* ................ ENTRY (JUMP = 2) - FIRST ITERATION. X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */ -L40: -#ifdef _CRAY - j = ISAMAX(n, &x[0], &c__1); -#else - j = idamax_(n, &x[0], &c__1); -#endif - --j; - iter = 2; - - /* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */ -L50: - for (i = 0; i < *n; ++i) x[i] = zero; - x[j] = one; - *kase = 1; - jump = 3; - return 0; - - /* ................ ENTRY (JUMP = 3) - X HAS BEEN OVERWRITTEN BY A*X. */ -L70: -#ifdef _CRAY - SCOPY(n, x, &c__1, v, &c__1); -#else - dcopy_(n, x, &c__1, v, &c__1); -#endif - estold = *est; -#ifdef _CRAY - *est = SASUM(n, v, &c__1); -#else - *est = dasum_(n, v, &c__1); -#endif - - for (i = 0; i < *n; ++i) - if (i_dnnt(d_sign(one, x[i])) != isgn[i]) - goto L90; - - /* REPEATED SIGN VECTOR DETECTED, HENCE ALGORITHM HAS CONVERGED. */ - goto L120; - -L90: - /* TEST FOR CYCLING. */ - if (*est <= estold) goto L120; - - for (i = 0; i < *n; ++i) { - x[i] = d_sign(one, x[i]); - isgn[i] = i_dnnt(x[i]); - } - *kase = 2; - jump = 4; - return 0; - - /* ................ ENTRY (JUMP = 4) - X HAS BEEN OVERWRITTEN BY TRANDPOSE(A)*X. */ -L110: - jlast = j; -#ifdef _CRAY - j = ISAMAX(n, &x[0], &c__1); -#else - j = idamax_(n, &x[0], &c__1); -#endif - --j; - if (x[jlast] != fabs(x[j]) && iter < 5) { - ++iter; - goto L50; - } - - /* ITERATION COMPLETE. FINAL STAGE. */ -L120: - altsgn = 1.; - for (i = 1; i <= *n; ++i) { - x[i-1] = altsgn * ((double)(i - 1) / (double)(*n - 1) + 1.); - altsgn = -altsgn; - } - *kase = 1; - jump = 5; - return 0; - - /* ................ ENTRY (JUMP = 5) - X HAS BEEN OVERWRITTEN BY A*X. */ -L140: -#ifdef _CRAY - temp = SASUM(n, x, &c__1) / (double)(*n * 3) * 2.; -#else - temp = dasum_(n, x, &c__1) / (double)(*n * 3) * 2.; -#endif - if (temp > *est) { -#ifdef _CRAY - SCOPY(n, &x[0], &c__1, &v[0], &c__1); -#else - dcopy_(n, &x[0], &c__1, &v[0], &c__1); -#endif - *est = temp; - } - -L150: - *kase = 0; - return 0; - -} /* dlacon_ */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlamch.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlamch.c deleted file mode 100644 index e1179158fc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlamch.c +++ /dev/null @@ -1,975 +0,0 @@ -/*! @file dlamch.c - * \brief Determines double precision machine parameters - * - *
    - *       -- LAPACK auxiliary routine (version 2.0) --   
    - *       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
    - *       Courant Institute, Argonne National Lab, and Rice University   
    - *       October 31, 1992   
    - * 
    - */ -#include -#include "slu_Cnames.h" - -#define TRUE_ (1) -#define FALSE_ (0) -#define abs(x) ((x) >= 0 ? (x) : -(x)) -#define min(a,b) ((a) <= (b) ? (a) : (b)) -#define max(a,b) ((a) >= (b) ? (a) : (b)) - - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    DLAMCH determines double precision machine parameters.   
    -
    -    Arguments   
    -    =========   
    -
    -    CMACH   (input) CHARACTER*1   
    -            Specifies the value to be returned by DLAMCH:   
    -            = 'E' or 'e',   DLAMCH := eps   
    -            = 'S' or 's ,   DLAMCH := sfmin   
    -            = 'B' or 'b',   DLAMCH := base   
    -            = 'P' or 'p',   DLAMCH := eps*base   
    -            = 'N' or 'n',   DLAMCH := t   
    -            = 'R' or 'r',   DLAMCH := rnd   
    -            = 'M' or 'm',   DLAMCH := emin   
    -            = 'U' or 'u',   DLAMCH := rmin   
    -            = 'L' or 'l',   DLAMCH := emax   
    -            = 'O' or 'o',   DLAMCH := rmax   
    -
    -            where   
    -
    -            eps   = relative machine precision   
    -            sfmin = safe minimum, such that 1/sfmin does not overflow   
    -            base  = base of the machine   
    -            prec  = eps*base   
    -            t     = number of (base) digits in the mantissa   
    -            rnd   = 1.0 when rounding occurs in addition, 0.0 otherwise   
    -            emin  = minimum exponent before (gradual) underflow   
    -            rmin  = underflow threshold - base**(emin-1)   
    -            emax  = largest exponent before overflow   
    -            rmax  = overflow threshold  - (base**emax)*(1-eps)   
    -
    -   ===================================================================== 
    -
    -*/ -double dlamch_(char *cmach) -{ - - - static int first = TRUE_; - - /* System generated locals */ - int i__1; - double ret_val; - /* Builtin functions */ - double pow_di(double *, int *); - /* Local variables */ - static double base; - static int beta; - static double emin, prec, emax; - static int imin, imax; - static int lrnd; - static double rmin, rmax, t, rmach; - extern int lsame_(char *, char *); - static double small, sfmin; - extern /* Subroutine */ int dlamc2_(int *, int *, int *, - double *, int *, double *, int *, double *); - static int it; - static double rnd, eps; - - if (first) { - first = FALSE_; - dlamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax); - base = (double) beta; - t = (double) it; - if (lrnd) { - rnd = 1.; - i__1 = 1 - it; - eps = pow_di(&base, &i__1) / 2; - } else { - rnd = 0.; - i__1 = 1 - it; - eps = pow_di(&base, &i__1); - } - prec = eps * base; - emin = (double) imin; - emax = (double) imax; - sfmin = rmin; - small = 1. / rmax; - if (small >= sfmin) { - - /* Use SMALL plus a bit, to avoid the possibility of rounding - causing overflow when computing 1/sfmin. */ - sfmin = small * (eps + 1.); - } - } - - if (lsame_(cmach, "E")) { - rmach = eps; - } else if (lsame_(cmach, "S")) { - rmach = sfmin; - } else if (lsame_(cmach, "B")) { - rmach = base; - } else if (lsame_(cmach, "P")) { - rmach = prec; - } else if (lsame_(cmach, "N")) { - rmach = t; - } else if (lsame_(cmach, "R")) { - rmach = rnd; - } else if (lsame_(cmach, "M")) { - rmach = emin; - } else if (lsame_(cmach, "U")) { - rmach = rmin; - } else if (lsame_(cmach, "L")) { - rmach = emax; - } else if (lsame_(cmach, "O")) { - rmach = rmax; - } - - ret_val = rmach; - return ret_val; - -/* End of DLAMCH */ - -} /* dlamch_ */ -/* Subroutine */ -/*! \brief - -
    - Purpose   
    -    =======   
    -
    -    DLAMC1 determines the machine parameters given by BETA, T, RND, and   
    -    IEEE1.   
    -
    -    Arguments   
    -    =========   
    -
    -    BETA    (output) INT   
    -            The base of the machine.   
    -
    -    T       (output) INT   
    -            The number of ( BETA ) digits in the mantissa.   
    -
    -    RND     (output) INT   
    -            Specifies whether proper rounding  ( RND = .TRUE. )  or   
    -            chopping  ( RND = .FALSE. )  occurs in addition. This may not 
    -  
    -            be a reliable guide to the way in which the machine performs 
    -  
    -            its arithmetic.   
    -
    -    IEEE1   (output) INT   
    -            Specifies whether rounding appears to be done in the IEEE   
    -            'round to nearest' style.   
    -
    -    Further Details   
    -    ===============   
    -
    -    The routine is based on the routine  ENVRON  by Malcolm and   
    -    incorporates suggestions by Gentleman and Marovich. See   
    -
    -       Malcolm M. A. (1972) Algorithms to reveal properties of   
    -          floating-point arithmetic. Comms. of the ACM, 15, 949-951.   
    -
    -       Gentleman W. M. and Marovich S. B. (1974) More on algorithms   
    -          that reveal properties of floating point arithmetic units.   
    -          Comms. of the ACM, 17, 276-277.   
    -
    -   ===================================================================== 
    -
    -*/ -int dlamc1_(int *beta, int *t, int *rnd, int - *ieee1) -{ - /* Initialized data */ - static int first = TRUE_; - /* System generated locals */ - double d__1, d__2; - /* Local variables */ - static int lrnd; - static double a, b, c, f; - static int lbeta; - static double savec; - extern double dlamc3_(double *, double *); - static int lieee1; - static double t1, t2; - static int lt; - static double one, qtr; - - if (first) { - first = FALSE_; - one = 1.; - -/* LBETA, LIEEE1, LT and LRND are the local values of BE -TA, - IEEE1, T and RND. - - Throughout this routine we use the function DLAMC3 to ens -ure - that relevant values are stored and not held in registers, - or - are not affected by optimizers. - - Compute a = 2.0**m with the smallest positive integer m s -uch - that - - fl( a + 1.0 ) = a. */ - - a = 1.; - c = 1.; - -/* + WHILE( C.EQ.ONE )LOOP */ -L10: - if (c == one) { - a *= 2; - c = dlamc3_(&a, &one); - d__1 = -a; - c = dlamc3_(&c, &d__1); - goto L10; - } -/* + END WHILE - - Now compute b = 2.0**m with the smallest positive integer -m - such that - - fl( a + b ) .gt. a. */ - - b = 1.; - c = dlamc3_(&a, &b); - -/* + WHILE( C.EQ.A )LOOP */ -L20: - if (c == a) { - b *= 2; - c = dlamc3_(&a, &b); - goto L20; - } -/* + END WHILE - - Now compute the base. a and c are neighbouring floating po -int - numbers in the interval ( beta**t, beta**( t + 1 ) ) and - so - their difference is beta. Adding 0.25 to c is to ensure that - it - is truncated to beta and not ( beta - 1 ). */ - - qtr = one / 4; - savec = c; - d__1 = -a; - c = dlamc3_(&c, &d__1); - lbeta = (int) (c + qtr); - -/* Now determine whether rounding or chopping occurs, by addin -g a - bit less than beta/2 and a bit more than beta/2 to - a. */ - - b = (double) lbeta; - d__1 = b / 2; - d__2 = -b / 100; - f = dlamc3_(&d__1, &d__2); - c = dlamc3_(&f, &a); - if (c == a) { - lrnd = TRUE_; - } else { - lrnd = FALSE_; - } - d__1 = b / 2; - d__2 = b / 100; - f = dlamc3_(&d__1, &d__2); - c = dlamc3_(&f, &a); - if (lrnd && c == a) { - lrnd = FALSE_; - } - -/* Try and decide whether rounding is done in the IEEE 'round - to - nearest' style. B/2 is half a unit in the last place of the -two - numbers A and SAVEC. Furthermore, A is even, i.e. has last -bit - zero, and SAVEC is odd. Thus adding B/2 to A should not cha -nge - A, but adding B/2 to SAVEC should change SAVEC. */ - - d__1 = b / 2; - t1 = dlamc3_(&d__1, &a); - d__1 = b / 2; - t2 = dlamc3_(&d__1, &savec); - lieee1 = t1 == a && t2 > savec && lrnd; - -/* Now find the mantissa, t. It should be the integer part - of - log to the base beta of a, however it is safer to determine - t - by powering. So we find t as the smallest positive integer -for - which - - fl( beta**t + 1.0 ) = 1.0. */ - - lt = 0; - a = 1.; - c = 1.; - -/* + WHILE( C.EQ.ONE )LOOP */ -L30: - if (c == one) { - ++lt; - a *= lbeta; - c = dlamc3_(&a, &one); - d__1 = -a; - c = dlamc3_(&c, &d__1); - goto L30; - } -/* + END WHILE */ - - } - - *beta = lbeta; - *t = lt; - *rnd = lrnd; - *ieee1 = lieee1; - return 0; - -/* End of DLAMC1 */ - -} /* dlamc1_ */ - - -/* Subroutine */ -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    DLAMC2 determines the machine parameters specified in its argument   
    -    list.   
    -
    -    Arguments   
    -    =========   
    -
    -    BETA    (output) INT   
    -            The base of the machine.   
    -
    -    T       (output) INT   
    -            The number of ( BETA ) digits in the mantissa.   
    -
    -    RND     (output) INT   
    -            Specifies whether proper rounding  ( RND = .TRUE. )  or   
    -            chopping  ( RND = .FALSE. )  occurs in addition. This may not 
    -  
    -            be a reliable guide to the way in which the machine performs 
    -  
    -            its arithmetic.   
    -
    -    EPS     (output) DOUBLE PRECISION   
    -            The smallest positive number such that   
    -
    -               fl( 1.0 - EPS ) .LT. 1.0,   
    -
    -            where fl denotes the computed value.   
    -
    -    EMIN    (output) INT   
    -            The minimum exponent before (gradual) underflow occurs.   
    -
    -    RMIN    (output) DOUBLE PRECISION   
    -            The smallest normalized number for the machine, given by   
    -            BASE**( EMIN - 1 ), where  BASE  is the floating point value 
    -  
    -            of BETA.   
    -
    -    EMAX    (output) INT   
    -            The maximum exponent before overflow occurs.   
    -
    -    RMAX    (output) DOUBLE PRECISION   
    -            The largest positive number for the machine, given by   
    -            BASE**EMAX * ( 1 - EPS ), where  BASE  is the floating point 
    -  
    -            value of BETA.   
    -
    -    Further Details   
    -    ===============   
    -
    -    The computation of  EPS  is based on a routine PARANOIA by   
    -    W. Kahan of the University of California at Berkeley.   
    -
    -   ===================================================================== 
    -
    -*/ -int dlamc2_(int *beta, int *t, int *rnd, - double *eps, int *emin, double *rmin, int *emax, - double *rmax) -{ - - /* Table of constant values */ - static int c__1 = 1; - - /* Initialized data */ - static int first = TRUE_; - static int iwarn = FALSE_; - /* System generated locals */ - int i__1; - double d__1, d__2, d__3, d__4, d__5; - /* Builtin functions */ - double pow_di(double *, int *); - /* Local variables */ - static int ieee; - static double half; - static int lrnd; - static double leps, zero, a, b, c; - static int i, lbeta; - static double rbase; - static int lemin, lemax, gnmin; - static double small; - static int gpmin; - static double third, lrmin, lrmax, sixth; - extern /* Subroutine */ int dlamc1_(int *, int *, int *, - int *); - extern double dlamc3_(double *, double *); - static int lieee1; - extern /* Subroutine */ int dlamc4_(int *, double *, int *), - dlamc5_(int *, int *, int *, int *, int *, - double *); - static int lt, ngnmin, ngpmin; - static double one, two; - - if (first) { - first = FALSE_; - zero = 0.; - one = 1.; - two = 2.; - -/* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values - of - BETA, T, RND, EPS, EMIN and RMIN. - - Throughout this routine we use the function DLAMC3 to ens -ure - that relevant values are stored and not held in registers, - or - are not affected by optimizers. - - DLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. -*/ - - dlamc1_(&lbeta, <, &lrnd, &lieee1); - -/* Start to find EPS. */ - - b = (double) lbeta; - i__1 = -lt; - a = pow_di(&b, &i__1); - leps = a; - -/* Try some tricks to see whether or not this is the correct E -PS. */ - - b = two / 3; - half = one / 2; - d__1 = -half; - sixth = dlamc3_(&b, &d__1); - third = dlamc3_(&sixth, &sixth); - d__1 = -half; - b = dlamc3_(&third, &d__1); - b = dlamc3_(&b, &sixth); - b = abs(b); - if (b < leps) { - b = leps; - } - - leps = 1.; - -/* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */ -L10: - if (leps > b && b > zero) { - leps = b; - d__1 = half * leps; -/* Computing 5th power */ - d__3 = two, d__4 = d__3, d__3 *= d__3; -/* Computing 2nd power */ - d__5 = leps; - d__2 = d__4 * (d__3 * d__3) * (d__5 * d__5); - c = dlamc3_(&d__1, &d__2); - d__1 = -c; - c = dlamc3_(&half, &d__1); - b = dlamc3_(&half, &c); - d__1 = -b; - c = dlamc3_(&half, &d__1); - b = dlamc3_(&half, &c); - goto L10; - } -/* + END WHILE */ - - if (a < leps) { - leps = a; - } - -/* Computation of EPS complete. - - Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3 -)). - Keep dividing A by BETA until (gradual) underflow occurs. T -his - is detected when we cannot recover the previous A. */ - - rbase = one / lbeta; - small = one; - for (i = 1; i <= 3; ++i) { - d__1 = small * rbase; - small = dlamc3_(&d__1, &zero); -/* L20: */ - } - a = dlamc3_(&one, &small); - dlamc4_(&ngpmin, &one, &lbeta); - d__1 = -one; - dlamc4_(&ngnmin, &d__1, &lbeta); - dlamc4_(&gpmin, &a, &lbeta); - d__1 = -a; - dlamc4_(&gnmin, &d__1, &lbeta); - ieee = FALSE_; - - if (ngpmin == ngnmin && gpmin == gnmin) { - if (ngpmin == gpmin) { - lemin = ngpmin; -/* ( Non twos-complement machines, no gradual under -flow; - e.g., VAX ) */ - } else if (gpmin - ngpmin == 3) { - lemin = ngpmin - 1 + lt; - ieee = TRUE_; -/* ( Non twos-complement machines, with gradual und -erflow; - e.g., IEEE standard followers ) */ - } else { - lemin = min(ngpmin,gpmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else if (ngpmin == gpmin && ngnmin == gnmin) { - if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) { - lemin = max(ngpmin,ngnmin); -/* ( Twos-complement machines, no gradual underflow -; - e.g., CYBER 205 ) */ - } else { - lemin = min(ngpmin,ngnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin) - { - if (gpmin - min(ngpmin,ngnmin) == 3) { - lemin = max(ngpmin,ngnmin) - 1 + lt; -/* ( Twos-complement machines with gradual underflo -w; - no known machine ) */ - } else { - lemin = min(ngpmin,ngnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else { -/* Computing MIN */ - i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin); - lemin = min(i__1,gnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } -/* ** - Comment out this if block if EMIN is ok */ - if (iwarn) { - first = TRUE_; - printf("\n\n WARNING. The value EMIN may be incorrect:- "); - printf("EMIN = %8i\n",lemin); - printf("If, after inspection, the value EMIN looks acceptable"); - printf("please comment out \n the IF block as marked within the"); - printf("code of routine DLAMC2, \n otherwise supply EMIN"); - printf("explicitly.\n"); - } -/* ** - - Assume IEEE arithmetic if we found denormalised numbers abo -ve, - or if arithmetic seems to round in the IEEE style, determi -ned - in routine DLAMC1. A true IEEE machine should have both thi -ngs - true; however, faulty machines may have one or the other. */ - - ieee = ieee || lieee1; - -/* Compute RMIN by successive division by BETA. We could comp -ute - RMIN as BASE**( EMIN - 1 ), but some machines underflow dur -ing - this computation. */ - - lrmin = 1.; - i__1 = 1 - lemin; - for (i = 1; i <= 1-lemin; ++i) { - d__1 = lrmin * rbase; - lrmin = dlamc3_(&d__1, &zero); -/* L30: */ - } - -/* Finally, call DLAMC5 to compute EMAX and RMAX. */ - - dlamc5_(&lbeta, <, &lemin, &ieee, &lemax, &lrmax); - } - - *beta = lbeta; - *t = lt; - *rnd = lrnd; - *eps = leps; - *emin = lemin; - *rmin = lrmin; - *emax = lemax; - *rmax = lrmax; - - return 0; - - -/* End of DLAMC2 */ - -} /* dlamc2_ */ - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    DLAMC3  is intended to force  A  and  B  to be stored prior to doing 
    -  
    -    the addition of  A  and  B ,  for use in situations where optimizers 
    -  
    -    might hold one of these in a register.   
    -
    -    Arguments   
    -    =========   
    -
    -    A, B    (input) DOUBLE PRECISION   
    -            The values A and B.   
    -
    -   ===================================================================== 
    -
    -*/ -double dlamc3_(double *a, double *b) -{ -/* >>Start of File<< - System generated locals */ - volatile double ret_val; - volatile double x; - volatile double y; - - x = *a; - y = *b; - ret_val = x + y; - - return ret_val; - -/* End of DLAMC3 */ - -} /* dlamc3_ */ - - -/* Subroutine */ -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    DLAMC4 is a service routine for DLAMC2.   
    -
    -    Arguments   
    -    =========   
    -
    -    EMIN    (output) EMIN   
    -            The minimum exponent before (gradual) underflow, computed by 
    -  
    -            setting A = START and dividing by BASE until the previous A   
    -            can not be recovered.   
    -
    -    START   (input) DOUBLE PRECISION   
    -            The starting point for determining EMIN.   
    -
    -    BASE    (input) INT   
    -            The base of the machine.   
    -
    -   ===================================================================== 
    -
    -*/ - -int dlamc4_(int *emin, double *start, int *base) -{ - /* System generated locals */ - int i__1; - double d__1; - /* Local variables */ - static double zero, a; - static int i; - static double rbase, b1, b2, c1, c2, d1, d2; - extern double dlamc3_(double *, double *); - static double one; - - a = *start; - one = 1.; - rbase = one / *base; - zero = 0.; - *emin = 1; - d__1 = a * rbase; - b1 = dlamc3_(&d__1, &zero); - c1 = a; - c2 = a; - d1 = a; - d2 = a; -/* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. - $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */ -L10: - if (c1 == a && c2 == a && d1 == a && d2 == a) { - --(*emin); - a = b1; - d__1 = a / *base; - b1 = dlamc3_(&d__1, &zero); - d__1 = b1 * *base; - c1 = dlamc3_(&d__1, &zero); - d1 = zero; - i__1 = *base; - for (i = 1; i <= *base; ++i) { - d1 += b1; -/* L20: */ - } - d__1 = a * rbase; - b2 = dlamc3_(&d__1, &zero); - d__1 = b2 / rbase; - c2 = dlamc3_(&d__1, &zero); - d2 = zero; - i__1 = *base; - for (i = 1; i <= *base; ++i) { - d2 += b2; -/* L30: */ - } - goto L10; - } -/* + END WHILE */ - - return 0; - -/* End of DLAMC4 */ - -} /* dlamc4_ */ - - -/* Subroutine */ -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    DLAMC5 attempts to compute RMAX, the largest machine floating-point   
    -    number, without overflow.  It assumes that EMAX + abs(EMIN) sum   
    -    approximately to a power of 2.  It will fail on machines where this   
    -    assumption does not hold, for example, the Cyber 205 (EMIN = -28625, 
    -  
    -    EMAX = 28718).  It will also fail if the value supplied for EMIN is   
    -    too large (i.e. too close to zero), probably with overflow.   
    -
    -    Arguments   
    -    =========   
    -
    -    BETA    (input) INT   
    -            The base of floating-point arithmetic.   
    -
    -    P       (input) INT   
    -            The number of base BETA digits in the mantissa of a   
    -            floating-point value.   
    -
    -    EMIN    (input) INT   
    -            The minimum exponent before (gradual) underflow.   
    -
    -    IEEE    (input) INT   
    -            A int flag specifying whether or not the arithmetic   
    -            system is thought to comply with the IEEE standard.   
    -
    -    EMAX    (output) INT   
    -            The largest exponent before overflow   
    -
    -    RMAX    (output) DOUBLE PRECISION   
    -            The largest machine floating-point number.   
    -
    -   ===================================================================== 
    -  
    -
    -
    -       First compute LEXP and UEXP, two powers of 2 that bound   
    -       abs(EMIN). We then assume that EMAX + abs(EMIN) will sum   
    -       approximately to the bound that is closest to abs(EMIN).   
    -       (EMAX is the exponent of the required number RMAX).
    -
    -*/ -int dlamc5_(int *beta, int *p, int *emin, - int *ieee, int *emax, double *rmax) -{ - - /* Table of constant values */ - static double c_b5 = 0.; - - /* System generated locals */ - int i__1; - double d__1; - /* Local variables */ - static int lexp; - static double oldy; - static int uexp, i; - static double y, z; - static int nbits; - extern double dlamc3_(double *, double *); - static double recbas; - static int exbits, expsum, try__; - - - - lexp = 1; - exbits = 1; -L10: - try__ = lexp << 1; - if (try__ <= -(*emin)) { - lexp = try__; - ++exbits; - goto L10; - } - if (lexp == -(*emin)) { - uexp = lexp; - } else { - uexp = try__; - ++exbits; - } - -/* Now -LEXP is less than or equal to EMIN, and -UEXP is greater - than or equal to EMIN. EXBITS is the number of bits needed to - store the exponent. */ - - if (uexp + *emin > -lexp - *emin) { - expsum = lexp << 1; - } else { - expsum = uexp << 1; - } - -/* EXPSUM is the exponent range, approximately equal to - EMAX - EMIN + 1 . */ - - *emax = expsum + *emin - 1; - nbits = exbits + 1 + *p; - -/* NBITS is the total number of bits needed to store a - floating-point number. */ - - if (nbits % 2 == 1 && *beta == 2) { - -/* Either there are an odd number of bits used to store a - floating-point number, which is unlikely, or some bits are - - not used in the representation of numbers, which is possible -, - (e.g. Cray machines) or the mantissa has an implicit bit, - (e.g. IEEE machines, Dec Vax machines), which is perhaps the - - most likely. We have to assume the last alternative. - If this is true, then we need to reduce EMAX by one because - - there must be some way of representing zero in an implicit-b -it - system. On machines like Cray, we are reducing EMAX by one - - unnecessarily. */ - - --(*emax); - } - - if (*ieee) { - -/* Assume we are on an IEEE machine which reserves one exponent - - for infinity and NaN. */ - - --(*emax); - } - -/* Now create RMAX, the largest machine number, which should - be equal to (1.0 - BETA**(-P)) * BETA**EMAX . - - First compute 1.0 - BETA**(-P), being careful that the - result is less than 1.0 . */ - - recbas = 1. / *beta; - z = *beta - 1.; - y = 0.; - i__1 = *p; - for (i = 1; i <= *p; ++i) { - z *= recbas; - if (y < 1.) { - oldy = y; - } - y = dlamc3_(&y, &z); -/* L20: */ - } - if (y >= 1.) { - y = oldy; - } - -/* Now multiply by BETA**EMAX to get RMAX. */ - - i__1 = *emax; - for (i = 1; i <= *emax; ++i) { - d__1 = y * *beta; - y = dlamc3_(&d__1, &c_b5); -/* L30: */ - } - - *rmax = y; - return 0; - -/* End of DLAMC5 */ - -} /* dlamc5_ */ - -double pow_di(double *ap, int *bp) -{ - double pow, x; - int n; - - pow = 1; - x = *ap; - n = *bp; - - if(n != 0){ - if(n < 0) { - n = -n; - x = 1/x; - } - for( ; ; ) { - if(n & 01) pow *= x; - if(n >>= 1) x *= x; - else break; - } - } - return(pow); -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlangs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlangs.c deleted file mode 100644 index 871cd33f75..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlangs.c +++ /dev/null @@ -1,119 +0,0 @@ - -/*! @file dlangs.c - * \brief Returns the value of the one norm - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Modified from lapack routine DLANGE 
    - * 
    - */ -/* - * File name: dlangs.c - * History: Modified from lapack routine DLANGE - */ -#include -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - *
    - *   DLANGS returns the value of the one norm, or the Frobenius norm, or 
    - *   the infinity norm, or the element of largest absolute value of a 
    - *   real matrix A.   
    - *
    - *   Description   
    - *   ===========   
    - *
    - *   DLANGE returns the value   
    - *
    - *      DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'   
    - *               (   
    - *               ( norm1(A),         NORM = '1', 'O' or 'o'   
    - *               (   
    - *               ( normI(A),         NORM = 'I' or 'i'   
    - *               (   
    - *               ( normF(A),         NORM = 'F', 'f', 'E' or 'e'   
    - *
    - *   where  norm1  denotes the  one norm of a matrix (maximum column sum), 
    - *   normI  denotes the  infinity norm  of a matrix  (maximum row sum) and 
    - *   normF  denotes the  Frobenius norm of a matrix (square root of sum of 
    - *   squares).  Note that  max(abs(A(i,j)))  is not a  matrix norm.   
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   NORM    (input) CHARACTER*1   
    - *           Specifies the value to be returned in DLANGE as described above.   
    - *   A       (input) SuperMatrix*
    - *           The M by N sparse matrix A. 
    - *
    - *  =====================================================================
    - * 
    - */ - -double dlangs(char *norm, SuperMatrix *A) -{ - - /* Local variables */ - NCformat *Astore; - double *Aval; - int i, j, irow; - double value, sum; - double *rwork; - - Astore = A->Store; - Aval = Astore->nzval; - - if ( SUPERLU_MIN(A->nrow, A->ncol) == 0) { - value = 0.; - - } else if (lsame_(norm, "M")) { - /* Find max(abs(A(i,j))). */ - value = 0.; - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) - value = SUPERLU_MAX( value, fabs( Aval[i]) ); - - } else if (lsame_(norm, "O") || *(unsigned char *)norm == '1') { - /* Find norm1(A). */ - value = 0.; - for (j = 0; j < A->ncol; ++j) { - sum = 0.; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) - sum += fabs(Aval[i]); - value = SUPERLU_MAX(value,sum); - } - - } else if (lsame_(norm, "I")) { - /* Find normI(A). */ - if ( !(rwork = (double *) SUPERLU_MALLOC(A->nrow * sizeof(double))) ) - ABORT("SUPERLU_MALLOC fails for rwork."); - for (i = 0; i < A->nrow; ++i) rwork[i] = 0.; - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) { - irow = Astore->rowind[i]; - rwork[irow] += fabs(Aval[i]); - } - value = 0.; - for (i = 0; i < A->nrow; ++i) - value = SUPERLU_MAX(value, rwork[i]); - - SUPERLU_FREE (rwork); - - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - /* Find normF(A). */ - ABORT("Not implemented."); - } else - ABORT("Illegal norm specified."); - - return (value); - -} /* dlangs */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlaqgs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlaqgs.c deleted file mode 100644 index c5023d98b6..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dlaqgs.c +++ /dev/null @@ -1,146 +0,0 @@ - -/*! @file dlaqgs.c - * \brief Equlibrates a general sprase matrix - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - * Modified from LAPACK routine DLAQGE
    - * 
    - */ -/* - * File name: dlaqgs.c - * History: Modified from LAPACK routine DLAQGE - */ -#include -#include "slu_ddefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   DLAQGS equilibrates a general sparse M by N matrix A using the row and   
    - *   scaling factors in the vectors R and C.   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   A       (input/output) SuperMatrix*
    - *           On exit, the equilibrated matrix.  See EQUED for the form of 
    - *           the equilibrated matrix. The type of A can be:
    - *	    Stype = NC; Dtype = SLU_D; Mtype = GE.
    - *	    
    - *   R       (input) double*, dimension (A->nrow)
    - *           The row scale factors for A.
    - *	    
    - *   C       (input) double*, dimension (A->ncol)
    - *           The column scale factors for A.
    - *	    
    - *   ROWCND  (input) double
    - *           Ratio of the smallest R(i) to the largest R(i).
    - *	    
    - *   COLCND  (input) double
    - *           Ratio of the smallest C(i) to the largest C(i).
    - *	    
    - *   AMAX    (input) double
    - *           Absolute value of largest matrix entry.
    - *	    
    - *   EQUED   (output) char*
    - *           Specifies the form of equilibration that was done.   
    - *           = 'N':  No equilibration   
    - *           = 'R':  Row equilibration, i.e., A has been premultiplied by  
    - *                   diag(R).   
    - *           = 'C':  Column equilibration, i.e., A has been postmultiplied  
    - *                   by diag(C).   
    - *           = 'B':  Both row and column equilibration, i.e., A has been
    - *                   replaced by diag(R) * A * diag(C).   
    - *
    - *   Internal Parameters   
    - *   ===================   
    - *
    - *   THRESH is a threshold value used to decide if row or column scaling   
    - *   should be done based on the ratio of the row or column scaling   
    - *   factors.  If ROWCND < THRESH, row scaling is done, and if   
    - *   COLCND < THRESH, column scaling is done.   
    - *
    - *   LARGE and SMALL are threshold values used to decide if row scaling   
    - *   should be done based on the absolute size of the largest matrix   
    - *   element.  If AMAX > LARGE or AMAX < SMALL, row scaling is done.   
    - *
    - *   ===================================================================== 
    - * 
    - */ - -void -dlaqgs(SuperMatrix *A, double *r, double *c, - double rowcnd, double colcnd, double amax, char *equed) -{ - - -#define THRESH (0.1) - - /* Local variables */ - NCformat *Astore; - double *Aval; - int i, j, irow; - double large, small, cj; - extern double dlamch_(char *); - - - /* Quick return if possible */ - if (A->nrow <= 0 || A->ncol <= 0) { - *(unsigned char *)equed = 'N'; - return; - } - - Astore = A->Store; - Aval = Astore->nzval; - - /* Initialize LARGE and SMALL. */ - small = dlamch_("Safe minimum") / dlamch_("Precision"); - large = 1. / small; - - if (rowcnd >= THRESH && amax >= small && amax <= large) { - if (colcnd >= THRESH) - *(unsigned char *)equed = 'N'; - else { - /* Column scaling */ - for (j = 0; j < A->ncol; ++j) { - cj = c[j]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - Aval[i] *= cj; - } - } - *(unsigned char *)equed = 'C'; - } - } else if (colcnd >= THRESH) { - /* Row scaling, no column scaling */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - Aval[i] *= r[irow]; - } - *(unsigned char *)equed = 'R'; - } else { - /* Row and column scaling */ - for (j = 0; j < A->ncol; ++j) { - cj = c[j]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - Aval[i] *= cj * r[irow]; - } - } - *(unsigned char *)equed = 'B'; - } - - return; - -} /* dlaqgs */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dldperm.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dldperm.c deleted file mode 100644 index 3b224abdd6..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dldperm.c +++ /dev/null @@ -1,165 +0,0 @@ - -/*! @file - * \brief Finds a row permutation so that the matrix has large entries on the diagonal - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ - -#include "slu_ddefs.h" - -extern void mc64id_(int_t*); -extern void mc64ad_(int_t*, int_t*, int_t*, int_t [], int_t [], double [], - int_t*, int_t [], int_t*, int_t[], int_t*, double [], - int_t [], int_t []); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   DLDPERM finds a row permutation so that the matrix has large
    - *   entries on the diagonal.
    - *
    - * Arguments
    - * =========
    - *
    - * job    (input) int
    - *        Control the action. Possible values for JOB are:
    - *        = 1 : Compute a row permutation of the matrix so that the
    - *              permuted matrix has as many entries on its diagonal as
    - *              possible. The values on the diagonal are of arbitrary size.
    - *              HSL subroutine MC21A/AD is used for this.
    - *        = 2 : Compute a row permutation of the matrix so that the smallest 
    - *              value on the diagonal of the permuted matrix is maximized.
    - *        = 3 : Compute a row permutation of the matrix so that the smallest
    - *              value on the diagonal of the permuted matrix is maximized.
    - *              The algorithm differs from the one used for JOB = 2 and may
    - *              have quite a different performance.
    - *        = 4 : Compute a row permutation of the matrix so that the sum
    - *              of the diagonal entries of the permuted matrix is maximized.
    - *        = 5 : Compute a row permutation of the matrix so that the product
    - *              of the diagonal entries of the permuted matrix is maximized
    - *              and vectors to scale the matrix so that the nonzero diagonal 
    - *              entries of the permuted matrix are one in absolute value and 
    - *              all the off-diagonal entries are less than or equal to one in 
    - *              absolute value.
    - *        Restriction: 1 <= JOB <= 5.
    - *
    - * n      (input) int
    - *        The order of the matrix.
    - *
    - * nnz    (input) int
    - *        The number of nonzeros in the matrix.
    - *
    - * adjncy (input) int*, of size nnz
    - *        The adjacency structure of the matrix, which contains the row
    - *        indices of the nonzeros.
    - *
    - * colptr (input) int*, of size n+1
    - *        The pointers to the beginning of each column in ADJNCY.
    - *
    - * nzval  (input) double*, of size nnz
    - *        The nonzero values of the matrix. nzval[k] is the value of
    - *        the entry corresponding to adjncy[k].
    - *        It is not used if job = 1.
    - *
    - * perm   (output) int*, of size n
    - *        The permutation vector. perm[i] = j means row i in the
    - *        original matrix is in row j of the permuted matrix.
    - *
    - * u      (output) double*, of size n
    - *        If job = 5, the natural logarithms of the row scaling factors. 
    - *
    - * v      (output) double*, of size n
    - *        If job = 5, the natural logarithms of the column scaling factors. 
    - *        The scaled matrix B has entries b_ij = a_ij * exp(u_i + v_j).
    - * 
    - */ - -int -dldperm(int_t job, int_t n, int_t nnz, int_t colptr[], int_t adjncy[], - double nzval[], int_t *perm, double u[], double v[]) -{ - int_t i, liw, ldw, num; - int_t *iw, icntl[10], info[10]; - double *dw; - -#if ( DEBUGlevel>=1 ) - CHECK_MALLOC(0, "Enter dldperm()"); -#endif - liw = 5*n; - if ( job == 3 ) liw = 10*n + nnz; - if ( !(iw = intMalloc(liw)) ) ABORT("Malloc fails for iw[]"); - ldw = 3*n + nnz; - if ( !(dw = (double*) SUPERLU_MALLOC(ldw * sizeof(double))) ) - ABORT("Malloc fails for dw[]"); - - /* Increment one to get 1-based indexing. */ - for (i = 0; i <= n; ++i) ++colptr[i]; - for (i = 0; i < nnz; ++i) ++adjncy[i]; -#if ( DEBUGlevel>=2 ) - printf("LDPERM(): n %d, nnz %d\n", n, nnz); - slu_PrintInt10("colptr", n+1, colptr); - slu_PrintInt10("adjncy", nnz, adjncy); -#endif - - /* - * NOTE: - * ===== - * - * MC64AD assumes that column permutation vector is defined as: - * perm(i) = j means column i of permuted A is in column j of original A. - * - * Since a symmetric permutation preserves the diagonal entries. Then - * by the following relation: - * P'(A*P')P = P'A - * we can apply inverse(perm) to rows of A to get large diagonal entries. - * But, since 'perm' defined in MC64AD happens to be the reverse of - * SuperLU's definition of permutation vector, therefore, it is already - * an inverse for our purpose. We will thus use it directly. - * - */ - mc64id_(icntl); -#if 0 - /* Suppress error and warning messages. */ - icntl[0] = -1; - icntl[1] = -1; -#endif - - mc64ad_(&job, &n, &nnz, colptr, adjncy, nzval, &num, perm, - &liw, iw, &ldw, dw, icntl, info); - -#if ( DEBUGlevel>=2 ) - slu_PrintInt10("perm", n, perm); - printf(".. After MC64AD info %d\tsize of matching %d\n", info[0], num); -#endif - if ( info[0] == 1 ) { /* Structurally singular */ - printf(".. The last %d permutations:\n", n-num); - slu_PrintInt10("perm", n-num, &perm[num]); - } - - /* Restore to 0-based indexing. */ - for (i = 0; i <= n; ++i) --colptr[i]; - for (i = 0; i < nnz; ++i) --adjncy[i]; - for (i = 0; i < n; ++i) --perm[i]; - - if ( job == 5 ) - for (i = 0; i < n; ++i) { - u[i] = dw[i]; - v[i] = dw[n+i]; - } - - SUPERLU_FREE(iw); - SUPERLU_FREE(dw); - -#if ( DEBUGlevel>=1 ) - CHECK_MALLOC(0, "Exit dldperm()"); -#endif - - return info[0]; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dmemory.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dmemory.c deleted file mode 100644 index 27f3556cdd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dmemory.c +++ /dev/null @@ -1,701 +0,0 @@ - -/*! @file dmemory.c - * \brief Memory details - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ -#include "slu_ddefs.h" - - -/* Internal prototypes */ -void *dexpand (int *, MemType,int, int, GlobalLU_t *); -int dLUWorkInit (int, int, int, int **, double **, GlobalLU_t *); -void copy_mem_double (int, void *, void *); -void dStackCompress (GlobalLU_t *); -void dSetupSpace (void *, int, GlobalLU_t *); -void *duser_malloc (int, int, GlobalLU_t *); -void duser_free (int, int, GlobalLU_t *); - -/* External prototypes (in memory.c - prec-independent) */ -extern void copy_mem_int (int, void *, void *); -extern void user_bcopy (char *, char *, int); - - -/* Macros to manipulate stack */ -#define StackFull(x) ( x + Glu->stack.used >= Glu->stack.size ) -#define NotDoubleAlign(addr) ( (long int)addr & 7 ) -#define DoubleAlign(addr) ( ((long int)addr + 7) & ~7L ) -#define TempSpace(m, w) ( (2*w + 4 + NO_MARKER) * m * sizeof(int) + \ - (w + 1) * m * sizeof(double) ) -#define Reduce(alpha) ((alpha + 1) / 2) /* i.e. (alpha-1)/2 + 1 */ - - - - -/*! \brief Setup the memory model to be used for factorization. - * - * lwork = 0: use system malloc; - * lwork > 0: use user-supplied work[] space. - */ -void dSetupSpace(void *work, int lwork, GlobalLU_t *Glu) -{ - if ( lwork == 0 ) { - Glu->MemModel = SYSTEM; /* malloc/free */ - } else if ( lwork > 0 ) { - Glu->MemModel = USER; /* user provided space */ - Glu->stack.used = 0; - Glu->stack.top1 = 0; - Glu->stack.top2 = (lwork/4)*4; /* must be word addressable */ - Glu->stack.size = Glu->stack.top2; - Glu->stack.array = (void *) work; - } -} - - - -void *duser_malloc(int bytes, int which_end, GlobalLU_t *Glu) -{ - void *buf; - - if ( StackFull(bytes) ) return (NULL); - - if ( which_end == HEAD ) { - buf = (char*) Glu->stack.array + Glu->stack.top1; - Glu->stack.top1 += bytes; - } else { - Glu->stack.top2 -= bytes; - buf = (char*) Glu->stack.array + Glu->stack.top2; - } - - Glu->stack.used += bytes; - return buf; -} - - -void duser_free(int bytes, int which_end, GlobalLU_t *Glu) -{ - if ( which_end == HEAD ) { - Glu->stack.top1 -= bytes; - } else { - Glu->stack.top2 += bytes; - } - Glu->stack.used -= bytes; -} - - - -/*! \brief - * - *
    - * mem_usage consists of the following fields:
    - *    - for_lu (float)
    - *      The amount of space used in bytes for the L\U data structures.
    - *    - total_needed (float)
    - *      The amount of space needed in bytes to perform factorization.
    - * 
    - */ -int dQuerySpace(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage) -{ - SCformat *Lstore; - NCformat *Ustore; - register int n, iword, dword, panel_size = sp_ienv(1); - - Lstore = L->Store; - Ustore = U->Store; - n = L->ncol; - iword = sizeof(int); - dword = sizeof(double); - - /* For LU factors */ - mem_usage->for_lu = (float)( (4.0*n + 3.0) * iword + - Lstore->nzval_colptr[n] * dword + - Lstore->rowind_colptr[n] * iword ); - mem_usage->for_lu += (float)( (n + 1.0) * iword + - Ustore->colptr[n] * (dword + iword) ); - - /* Working storage to support factorization */ - mem_usage->total_needed = mem_usage->for_lu + - (float)( (2.0 * panel_size + 4.0 + NO_MARKER) * n * iword + - (panel_size + 1.0) * n * dword ); - - return 0; -} /* dQuerySpace */ - - -/*! \brief - * - *
    - * mem_usage consists of the following fields:
    - *    - for_lu (float)
    - *      The amount of space used in bytes for the L\U data structures.
    - *    - total_needed (float)
    - *      The amount of space needed in bytes to perform factorization.
    - * 
    - */ -int ilu_dQuerySpace(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage) -{ - SCformat *Lstore; - NCformat *Ustore; - register int n, panel_size = sp_ienv(1); - register float iword, dword; - - Lstore = L->Store; - Ustore = U->Store; - n = L->ncol; - iword = sizeof(int); - dword = sizeof(double); - - /* For LU factors */ - mem_usage->for_lu = (float)( (4.0f * n + 3.0f) * iword + - Lstore->nzval_colptr[n] * dword + - Lstore->rowind_colptr[n] * iword ); - mem_usage->for_lu += (float)( (n + 1.0f) * iword + - Ustore->colptr[n] * (dword + iword) ); - - /* Working storage to support factorization. - ILU needs 5*n more integers than LU */ - mem_usage->total_needed = mem_usage->for_lu + - (float)( (2.0f * panel_size + 9.0f + NO_MARKER) * n * iword + - (panel_size + 1.0f) * n * dword ); - - return 0; -} /* ilu_dQuerySpace */ - - -/*! \brief Allocate storage for the data structures common to all factor routines. - * - *
    - * For those unpredictable size, estimate as fill_ratio * nnz(A).
    - * Return value:
    - *     If lwork = -1, return the estimated amount of space required, plus n;
    - *     otherwise, return the amount of space actually allocated when
    - *     memory allocation failure occurred.
    - * 
    - */ -int -dLUMemInit(fact_t fact, void *work, int lwork, int m, int n, int annz, - int panel_size, double fill_ratio, SuperMatrix *L, SuperMatrix *U, - GlobalLU_t *Glu, int **iwork, double **dwork) -{ - int info, iword, dword; - SCformat *Lstore; - NCformat *Ustore; - int *xsup, *supno; - int *lsub, *xlsub; - double *lusup; - int *xlusup; - double *ucol; - int *usub, *xusub; - int nzlmax, nzumax, nzlumax; - - iword = sizeof(int); - dword = sizeof(double); - Glu->n = n; - Glu->num_expansions = 0; - - if ( !Glu->expanders ) - Glu->expanders = (ExpHeader*)SUPERLU_MALLOC( NO_MEMTYPE * - sizeof(ExpHeader) ); - if ( !Glu->expanders ) ABORT("SUPERLU_MALLOC fails for expanders"); - - if ( fact != SamePattern_SameRowPerm ) { - /* Guess for L\U factors */ - nzumax = nzlumax = fill_ratio * annz; - nzlmax = SUPERLU_MAX(1, fill_ratio/4.) * annz; - - if ( lwork == -1 ) { - return ( GluIntArray(n) * iword + TempSpace(m, panel_size) - + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword + n ); - } else { - dSetupSpace(work, lwork, Glu); - } - -#if ( PRNTlevel >= 1 ) - printf("dLUMemInit() called: fill_ratio %ld, nzlmax %ld, nzumax %ld\n", - fill_ratio, nzlmax, nzumax); - fflush(stdout); -#endif - - /* Integer pointers for L\U factors */ - if ( Glu->MemModel == SYSTEM ) { - xsup = intMalloc(n+1); - supno = intMalloc(n+1); - xlsub = intMalloc(n+1); - xlusup = intMalloc(n+1); - xusub = intMalloc(n+1); - } else { - xsup = (int *)duser_malloc((n+1) * iword, HEAD, Glu); - supno = (int *)duser_malloc((n+1) * iword, HEAD, Glu); - xlsub = (int *)duser_malloc((n+1) * iword, HEAD, Glu); - xlusup = (int *)duser_malloc((n+1) * iword, HEAD, Glu); - xusub = (int *)duser_malloc((n+1) * iword, HEAD, Glu); - } - - lusup = (double *) dexpand( &nzlumax, LUSUP, 0, 0, Glu ); - ucol = (double *) dexpand( &nzumax, UCOL, 0, 0, Glu ); - lsub = (int *) dexpand( &nzlmax, LSUB, 0, 0, Glu ); - usub = (int *) dexpand( &nzumax, USUB, 0, 1, Glu ); - - while ( !lusup || !ucol || !lsub || !usub ) { - if ( Glu->MemModel == SYSTEM ) { - SUPERLU_FREE(lusup); - SUPERLU_FREE(ucol); - SUPERLU_FREE(lsub); - SUPERLU_FREE(usub); - } else { - duser_free((nzlumax+nzumax)*dword+(nzlmax+nzumax)*iword, - HEAD, Glu); - } - nzlumax /= 2; - nzumax /= 2; - nzlmax /= 2; - if ( nzlumax < annz ) { - printf("Not enough memory to perform factorization.\n"); - return (dmemory_usage(nzlmax, nzumax, nzlumax, n) + n); - } -#if ( PRNTlevel >= 1) - printf("dLUMemInit() reduce size: nzlmax %ld, nzumax %ld\n", - nzlmax, nzumax); - fflush(stdout); -#endif - lusup = (double *) dexpand( &nzlumax, LUSUP, 0, 0, Glu ); - ucol = (double *) dexpand( &nzumax, UCOL, 0, 0, Glu ); - lsub = (int *) dexpand( &nzlmax, LSUB, 0, 0, Glu ); - usub = (int *) dexpand( &nzumax, USUB, 0, 1, Glu ); - } - - } else { - /* fact == SamePattern_SameRowPerm */ - Lstore = L->Store; - Ustore = U->Store; - xsup = Lstore->sup_to_col; - supno = Lstore->col_to_sup; - xlsub = Lstore->rowind_colptr; - xlusup = Lstore->nzval_colptr; - xusub = Ustore->colptr; - nzlmax = Glu->nzlmax; /* max from previous factorization */ - nzumax = Glu->nzumax; - nzlumax = Glu->nzlumax; - - if ( lwork == -1 ) { - return ( GluIntArray(n) * iword + TempSpace(m, panel_size) - + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword + n ); - } else if ( lwork == 0 ) { - Glu->MemModel = SYSTEM; - } else { - Glu->MemModel = USER; - Glu->stack.top2 = (lwork/4)*4; /* must be word-addressable */ - Glu->stack.size = Glu->stack.top2; - } - - lsub = Glu->expanders[LSUB].mem = Lstore->rowind; - lusup = Glu->expanders[LUSUP].mem = Lstore->nzval; - usub = Glu->expanders[USUB].mem = Ustore->rowind; - ucol = Glu->expanders[UCOL].mem = Ustore->nzval;; - Glu->expanders[LSUB].size = nzlmax; - Glu->expanders[LUSUP].size = nzlumax; - Glu->expanders[USUB].size = nzumax; - Glu->expanders[UCOL].size = nzumax; - } - - Glu->xsup = xsup; - Glu->supno = supno; - Glu->lsub = lsub; - Glu->xlsub = xlsub; - Glu->lusup = lusup; - Glu->xlusup = xlusup; - Glu->ucol = ucol; - Glu->usub = usub; - Glu->xusub = xusub; - Glu->nzlmax = nzlmax; - Glu->nzumax = nzumax; - Glu->nzlumax = nzlumax; - - info = dLUWorkInit(m, n, panel_size, iwork, dwork, Glu); - if ( info ) - return ( info + dmemory_usage(nzlmax, nzumax, nzlumax, n) + n); - - ++Glu->num_expansions; - return 0; - -} /* dLUMemInit */ - -/*! \brief Allocate known working storage. Returns 0 if success, otherwise - returns the number of bytes allocated so far when failure occurred. */ -int -dLUWorkInit(int m, int n, int panel_size, int **iworkptr, - double **dworkptr, GlobalLU_t *Glu) -{ - int isize, dsize, extra; - double *old_ptr; - int maxsuper = sp_ienv(3), - rowblk = sp_ienv(4); - - isize = ( (2 * panel_size + 3 + NO_MARKER ) * m + n ) * sizeof(int); - dsize = (m * panel_size + - NUM_TEMPV(m,panel_size,maxsuper,rowblk)) * sizeof(double); - - if ( Glu->MemModel == SYSTEM ) - *iworkptr = (int *) intCalloc(isize/sizeof(int)); - else - *iworkptr = (int *) duser_malloc(isize, TAIL, Glu); - if ( ! *iworkptr ) { - fprintf(stderr, "dLUWorkInit: malloc fails for local iworkptr[]\n"); - return (isize + n); - } - - if ( Glu->MemModel == SYSTEM ) - *dworkptr = (double *) SUPERLU_MALLOC(dsize); - else { - *dworkptr = (double *) duser_malloc(dsize, TAIL, Glu); - if ( NotDoubleAlign(*dworkptr) ) { - old_ptr = *dworkptr; - *dworkptr = (double*) DoubleAlign(*dworkptr); - *dworkptr = (double*) ((double*)*dworkptr - 1); - extra = (char*)old_ptr - (char*)*dworkptr; -#ifdef DEBUG - printf("dLUWorkInit: not aligned, extra %d\n", extra); -#endif - Glu->stack.top2 -= extra; - Glu->stack.used += extra; - } - } - if ( ! *dworkptr ) { - fprintf(stderr, "malloc fails for local dworkptr[]."); - return (isize + dsize + n); - } - - return 0; -} - - -/*! \brief Set up pointers for real working arrays. - */ -void -dSetRWork(int m, int panel_size, double *dworkptr, - double **dense, double **tempv) -{ - double zero = 0.0; - - int maxsuper = sp_ienv(3), - rowblk = sp_ienv(4); - *dense = dworkptr; - *tempv = *dense + panel_size*m; - dfill (*dense, m * panel_size, zero); - dfill (*tempv, NUM_TEMPV(m,panel_size,maxsuper,rowblk), zero); -} - -/*! \brief Free the working storage used by factor routines. - */ -void dLUWorkFree(int *iwork, double *dwork, GlobalLU_t *Glu) -{ - if ( Glu->MemModel == SYSTEM ) { - SUPERLU_FREE (iwork); - SUPERLU_FREE (dwork); - } else { - Glu->stack.used -= (Glu->stack.size - Glu->stack.top2); - Glu->stack.top2 = Glu->stack.size; -/* dStackCompress(Glu); */ - } - - SUPERLU_FREE (Glu->expanders); - Glu->expanders = NULL; -} - -/*! \brief Expand the data structures for L and U during the factorization. - * - *
    - * Return value:   0 - successful return
    - *               > 0 - number of bytes allocated when run out of space
    - * 
    - */ -int -dLUMemXpand(int jcol, - int next, /* number of elements currently in the factors */ - MemType mem_type, /* which type of memory to expand */ - int *maxlen, /* modified - maximum length of a data structure */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - void *new_mem; - -#ifdef DEBUG - printf("dLUMemXpand(): jcol %d, next %d, maxlen %d, MemType %d\n", - jcol, next, *maxlen, mem_type); -#endif - - if (mem_type == USUB) - new_mem = dexpand(maxlen, mem_type, next, 1, Glu); - else - new_mem = dexpand(maxlen, mem_type, next, 0, Glu); - - if ( !new_mem ) { - int nzlmax = Glu->nzlmax; - int nzumax = Glu->nzumax; - int nzlumax = Glu->nzlumax; - fprintf(stderr, "Can't expand MemType %d: jcol %d\n", mem_type, jcol); - return (dmemory_usage(nzlmax, nzumax, nzlumax, Glu->n) + Glu->n); - } - - switch ( mem_type ) { - case LUSUP: - Glu->lusup = (double *) new_mem; - Glu->nzlumax = *maxlen; - break; - case UCOL: - Glu->ucol = (double *) new_mem; - Glu->nzumax = *maxlen; - break; - case LSUB: - Glu->lsub = (int *) new_mem; - Glu->nzlmax = *maxlen; - break; - case USUB: - Glu->usub = (int *) new_mem; - Glu->nzumax = *maxlen; - break; - } - - return 0; - -} - - - -void -copy_mem_double(int howmany, void *old, void *new) -{ - register int i; - double *dold = old; - double *dnew = new; - for (i = 0; i < howmany; i++) dnew[i] = dold[i]; -} - -/*! \brief Expand the existing storage to accommodate more fill-ins. - */ -void -*dexpand ( - int *prev_len, /* length used from previous call */ - MemType type, /* which part of the memory to expand */ - int len_to_copy, /* size of the memory to be copied to new store */ - int keep_prev, /* = 1: use prev_len; - = 0: compute new_len to expand */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - float EXPAND = 1.5; - float alpha; - void *new_mem, *old_mem; - int new_len, tries, lword, extra, bytes_to_copy; - ExpHeader *expanders = Glu->expanders; /* Array of 4 types of memory */ - - alpha = EXPAND; - - if ( Glu->num_expansions == 0 || keep_prev ) { - /* First time allocate requested */ - new_len = *prev_len; - } else { - new_len = alpha * *prev_len; - } - - if ( type == LSUB || type == USUB ) lword = sizeof(int); - else lword = sizeof(double); - - if ( Glu->MemModel == SYSTEM ) { - new_mem = (void *) SUPERLU_MALLOC((size_t)new_len * lword); - if ( Glu->num_expansions != 0 ) { - tries = 0; - if ( keep_prev ) { - if ( !new_mem ) return (NULL); - } else { - while ( !new_mem ) { - if ( ++tries > 10 ) return (NULL); - alpha = Reduce(alpha); - new_len = alpha * *prev_len; - new_mem = (void *) SUPERLU_MALLOC((size_t)new_len * lword); - } - } - if ( type == LSUB || type == USUB ) { - copy_mem_int(len_to_copy, expanders[type].mem, new_mem); - } else { - copy_mem_double(len_to_copy, expanders[type].mem, new_mem); - } - SUPERLU_FREE (expanders[type].mem); - } - expanders[type].mem = (void *) new_mem; - - } else { /* MemModel == USER */ - if ( Glu->num_expansions == 0 ) { - new_mem = duser_malloc(new_len * lword, HEAD, Glu); - if ( NotDoubleAlign(new_mem) && - (type == LUSUP || type == UCOL) ) { - old_mem = new_mem; - new_mem = (void *)DoubleAlign(new_mem); - extra = (char*)new_mem - (char*)old_mem; -#ifdef DEBUG - printf("expand(): not aligned, extra %d\n", extra); -#endif - Glu->stack.top1 += extra; - Glu->stack.used += extra; - } - expanders[type].mem = (void *) new_mem; - } else { - tries = 0; - extra = (new_len - *prev_len) * lword; - if ( keep_prev ) { - if ( StackFull(extra) ) return (NULL); - } else { - while ( StackFull(extra) ) { - if ( ++tries > 10 ) return (NULL); - alpha = Reduce(alpha); - new_len = alpha * *prev_len; - extra = (new_len - *prev_len) * lword; - } - } - - if ( type != USUB ) { - new_mem = (void*)((char*)expanders[type + 1].mem + extra); - bytes_to_copy = (char*)Glu->stack.array + Glu->stack.top1 - - (char*)expanders[type + 1].mem; - user_bcopy(expanders[type+1].mem, new_mem, bytes_to_copy); - - if ( type < USUB ) { - Glu->usub = expanders[USUB].mem = - (void*)((char*)expanders[USUB].mem + extra); - } - if ( type < LSUB ) { - Glu->lsub = expanders[LSUB].mem = - (void*)((char*)expanders[LSUB].mem + extra); - } - if ( type < UCOL ) { - Glu->ucol = expanders[UCOL].mem = - (void*)((char*)expanders[UCOL].mem + extra); - } - Glu->stack.top1 += extra; - Glu->stack.used += extra; - if ( type == UCOL ) { - Glu->stack.top1 += extra; /* Add same amount for USUB */ - Glu->stack.used += extra; - } - - } /* if ... */ - - } /* else ... */ - } - - expanders[type].size = new_len; - *prev_len = new_len; - if ( Glu->num_expansions ) ++Glu->num_expansions; - - return (void *) expanders[type].mem; - -} /* dexpand */ - - -/*! \brief Compress the work[] array to remove fragmentation. - */ -void -dStackCompress(GlobalLU_t *Glu) -{ - register int iword, dword, ndim; - char *last, *fragment; - int *ifrom, *ito; - double *dfrom, *dto; - int *xlsub, *lsub, *xusub, *usub, *xlusup; - double *ucol, *lusup; - - iword = sizeof(int); - dword = sizeof(double); - ndim = Glu->n; - - xlsub = Glu->xlsub; - lsub = Glu->lsub; - xusub = Glu->xusub; - usub = Glu->usub; - xlusup = Glu->xlusup; - ucol = Glu->ucol; - lusup = Glu->lusup; - - dfrom = ucol; - dto = (double *)((char*)lusup + xlusup[ndim] * dword); - copy_mem_double(xusub[ndim], dfrom, dto); - ucol = dto; - - ifrom = lsub; - ito = (int *) ((char*)ucol + xusub[ndim] * iword); - copy_mem_int(xlsub[ndim], ifrom, ito); - lsub = ito; - - ifrom = usub; - ito = (int *) ((char*)lsub + xlsub[ndim] * iword); - copy_mem_int(xusub[ndim], ifrom, ito); - usub = ito; - - last = (char*)usub + xusub[ndim] * iword; - fragment = (char*) (((char*)Glu->stack.array + Glu->stack.top1) - last); - Glu->stack.used -= (long int) fragment; - Glu->stack.top1 -= (long int) fragment; - - Glu->ucol = ucol; - Glu->lsub = lsub; - Glu->usub = usub; - -#ifdef DEBUG - printf("dStackCompress: fragment %d\n", fragment); - /* for (last = 0; last < ndim; ++last) - print_lu_col("After compress:", last, 0);*/ -#endif - -} - -/*! \brief Allocate storage for original matrix A - */ -void -dallocateA(int n, int nnz, double **a, int **asub, int **xa) -{ - *a = (double *) doubleMalloc(nnz); - *asub = (int *) intMalloc(nnz); - *xa = (int *) intMalloc(n+1); -} - - -double *doubleMalloc(int n) -{ - double *buf; - buf = (double *) SUPERLU_MALLOC((size_t)n * sizeof(double)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC failed for buf in doubleMalloc()\n"); - } - return (buf); -} - -double *doubleCalloc(int n) -{ - double *buf; - register int i; - double zero = 0.0; - buf = (double *) SUPERLU_MALLOC((size_t)n * sizeof(double)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC failed for buf in doubleCalloc()\n"); - } - for (i = 0; i < n; ++i) buf[i] = zero; - return (buf); -} - - -int dmemory_usage(const int nzlmax, const int nzumax, - const int nzlumax, const int n) -{ - register int iword, dword; - - iword = sizeof(int); - dword = sizeof(double); - - return (10 * n * iword + - nzlmax * iword + nzumax * (iword + dword) + nzlumax * dword); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpanel_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpanel_bmod.c deleted file mode 100644 index de048b6e7f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpanel_bmod.c +++ /dev/null @@ -1,459 +0,0 @@ - -/*! @file dpanel_bmod.c - * \brief Performs numeric block updates - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ -/* - -*/ - -#include -#include -#include "slu_ddefs.h" - -/* - * Function prototypes - */ -void dlsolve(int, int, double *, double *); -void dmatvec(int, int, int, double *, double *, double *); -extern void dcheck_tempv(); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *    Performs numeric block updates (sup-panel) in topological order.
    - *    It features: col-col, 2cols-col, 3cols-col, and sup-col updates.
    - *    Special processing on the supernodal portion of L\U[*,j]
    - *
    - *    Before entering this routine, the original nonzeros in the panel 
    - *    were already copied into the spa[m,w].
    - *
    - *    Updated/Output parameters-
    - *    dense[0:m-1,w]: L[*,j:j+w-1] and U[*,j:j+w-1] are returned 
    - *    collectively in the m-by-w vector dense[*]. 
    - * 
    - */ - -void -dpanel_bmod ( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - const int nseg, /* in */ - double *dense, /* out, of size n by w */ - double *tempv, /* working array */ - int *segrep, /* in */ - int *repfnz, /* in, of size n by w */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ - - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - double alpha, beta; -#endif - - register int k, ksub; - int fsupc, nsupc, nsupr, nrow; - int krep, krep_ind; - double ukj, ukj1, ukj2; - int luptr, luptr1, luptr2; - int segsze; - int block_nrow; /* no of rows in a block row */ - register int lptr; /* Points to the row subscripts of a supernode */ - int kfnz, irow, no_zeros; - register int isub, isub1, i; - register int jj; /* Index through each column in the panel */ - int *xsup, *supno; - int *lsub, *xlsub; - double *lusup; - int *xlusup; - int *repfnz_col; /* repfnz[] for a column in the panel */ - double *dense_col; /* dense[] for a column in the panel */ - double *tempv1; /* Used in 1-D update */ - double *TriTmp, *MatvecTmp; /* used in 2-D update */ - double zero = 0.0; - double one = 1.0; - register int ldaTmp; - register int r_ind, r_hi; - static int first = 1, maxsuper, rowblk, colblk; - flops_t *ops = stat->ops; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - if ( first ) { - maxsuper = sp_ienv(3); - rowblk = sp_ienv(4); - colblk = sp_ienv(5); - first = 0; - } - ldaTmp = maxsuper + rowblk; - - /* - * For each nonz supernode segment of U[*,j] in topological order - */ - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { /* for each updating supernode */ - - /* krep = representative of current k-th supernode - * fsupc = first supernodal column - * nsupc = no of columns in a supernode - * nsupr = no of rows in a supernode - */ - krep = segrep[k--]; - fsupc = xsup[supno[krep]]; - nsupc = krep - fsupc + 1; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; - nrow = nsupr - nsupc; - lptr = xlsub[fsupc]; - krep_ind = lptr + nsupc - 1; - - repfnz_col = repfnz; - dense_col = dense; - - if ( nsupc >= colblk && nrow > rowblk ) { /* 2-D block update */ - - TriTmp = tempv; - - /* Sequence through each column in panel -- triangular solves */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp ) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - luptr = xlusup[fsupc]; - - ops[TRSV] += segsze * (segsze - 1); - ops[GEMV] += 2 * nrow * segsze; - - /* Case 1: Update U-segment of size 1 -- col-col update */ - if ( segsze == 1 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; i++) { - irow = lsub[i]; - dense_col[irow] -= ukj * lusup[luptr]; - ++luptr; - } - - } else if ( segsze <= 3 ) { - ukj = dense_col[lsub[krep_ind]]; - ukj1 = dense_col[lsub[krep_ind - 1]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { - ukj -= ukj1 * lusup[luptr1]; - dense_col[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; luptr1++; - dense_col[irow] -= (ukj*lusup[luptr] - + ukj1*lusup[luptr1]); - } - } else { - ukj2 = dense_col[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - ukj1 -= ukj2 * lusup[luptr2-1]; - ukj = ukj - ukj1*lusup[luptr1] - ukj2*lusup[luptr2]; - dense_col[lsub[krep_ind]] = ukj; - dense_col[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; luptr1++; luptr2++; - dense_col[irow] -= ( ukj*lusup[luptr] - + ukj1*lusup[luptr1] + ukj2*lusup[luptr2] ); - } - } - - } else { /* segsze >= 4 */ - - /* Copy U[*,j] segment from dense[*] to TriTmp[*], which - holds the result of triangular solves. */ - no_zeros = kfnz - fsupc; - isub = lptr + no_zeros; - for (i = 0; i < segsze; ++i) { - irow = lsub[isub]; - TriTmp[i] = dense_col[irow]; /* Gather */ - ++isub; - } - - /* start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, TriTmp, &incx ); -#else - dtrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, TriTmp, &incx ); -#endif -#else - dlsolve ( nsupr, segsze, &lusup[luptr], TriTmp ); -#endif - - - } /* else ... */ - - } /* for jj ... end tri-solves */ - - /* Block row updates; push all the way into dense[*] block */ - for ( r_ind = 0; r_ind < nrow; r_ind += rowblk ) { - - r_hi = SUPERLU_MIN(nrow, r_ind + rowblk); - block_nrow = SUPERLU_MIN(rowblk, r_hi - r_ind); - luptr = xlusup[fsupc] + nsupc + r_ind; - isub1 = lptr + nsupc + r_ind; - - repfnz_col = repfnz; - TriTmp = tempv; - dense_col = dense; - - /* Sequence through each column in panel -- matrix-vector */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - if ( segsze <= 3 ) continue; /* skip unrolled cases */ - - /* Perform a block update, and scatter the result of - matrix-vector to dense[]. */ - no_zeros = kfnz - fsupc; - luptr1 = luptr + nsupr * no_zeros; - MatvecTmp = &TriTmp[maxsuper]; - -#ifdef USE_VENDOR_BLAS - alpha = one; - beta = zero; -#ifdef _CRAY - SGEMV(ftcs2, &block_nrow, &segsze, &alpha, &lusup[luptr1], - &nsupr, TriTmp, &incx, &beta, MatvecTmp, &incy); -#else - dgemv_("N", &block_nrow, &segsze, &alpha, &lusup[luptr1], - &nsupr, TriTmp, &incx, &beta, MatvecTmp, &incy); -#endif -#else - dmatvec(nsupr, block_nrow, segsze, &lusup[luptr1], - TriTmp, MatvecTmp); -#endif - - /* Scatter MatvecTmp[*] into SPA dense[*] temporarily - * such that MatvecTmp[*] can be re-used for the - * the next blok row update. dense[] will be copied into - * global store after the whole panel has been finished. - */ - isub = isub1; - for (i = 0; i < block_nrow; i++) { - irow = lsub[isub]; - dense_col[irow] -= MatvecTmp[i]; - MatvecTmp[i] = zero; - ++isub; - } - - } /* for jj ... */ - - } /* for each block row ... */ - - /* Scatter the triangular solves into SPA dense[*] */ - repfnz_col = repfnz; - TriTmp = tempv; - dense_col = dense; - - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp) { - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - if ( segsze <= 3 ) continue; /* skip unrolled cases */ - - no_zeros = kfnz - fsupc; - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense_col[irow] = TriTmp[i]; - TriTmp[i] = zero; - ++isub; - } - - } /* for jj ... */ - - } else { /* 1-D block modification */ - - - /* Sequence through each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - luptr = xlusup[fsupc]; - - ops[TRSV] += segsze * (segsze - 1); - ops[GEMV] += 2 * nrow * segsze; - - /* Case 1: Update U-segment of size 1 -- col-col update */ - if ( segsze == 1 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; i++) { - irow = lsub[i]; - dense_col[irow] -= ukj * lusup[luptr]; - ++luptr; - } - - } else if ( segsze <= 3 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - ukj1 = dense_col[lsub[krep_ind - 1]]; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { - ukj -= ukj1 * lusup[luptr1]; - dense_col[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - ++luptr; ++luptr1; - dense_col[irow] -= (ukj*lusup[luptr] - + ukj1*lusup[luptr1]); - } - } else { - ukj2 = dense_col[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - ukj1 -= ukj2 * lusup[luptr2-1]; - ukj = ukj - ukj1*lusup[luptr1] - ukj2*lusup[luptr2]; - dense_col[lsub[krep_ind]] = ukj; - dense_col[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - ++luptr; ++luptr1; ++luptr2; - dense_col[irow] -= ( ukj*lusup[luptr] - + ukj1*lusup[luptr1] + ukj2*lusup[luptr2] ); - } - } - - } else { /* segsze >= 4 */ - /* - * Perform a triangular solve and block update, - * then scatter the result of sup-col update to dense[]. - */ - no_zeros = kfnz - fsupc; - - /* Copy U[*,j] segment from dense[*] to tempv[*]: - * The result of triangular solve is in tempv[*]; - * The result of matrix vector update is in dense_col[*] - */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; ++i) { - irow = lsub[isub]; - tempv[i] = dense_col[irow]; /* Gather */ - ++isub; - } - - /* start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#else - dtrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#endif - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - alpha = one; - beta = zero; -#ifdef _CRAY - SGEMV( ftcs2, &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#else - dgemv_( "N", &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#endif -#else - dlsolve ( nsupr, segsze, &lusup[luptr], tempv ); - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - dmatvec (nsupr, nrow, segsze, &lusup[luptr], tempv, tempv1); -#endif - - /* Scatter tempv[*] into SPA dense[*] temporarily, such - * that tempv[*] can be used for the triangular solve of - * the next column of the panel. They will be copied into - * ucol[*] after the whole panel has been finished. - */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense_col[irow] = tempv[i]; - tempv[i] = zero; - isub++; - } - - /* Scatter the update from tempv1[*] into SPA dense[*] */ - /* Start dense rectangular L */ - for (i = 0; i < nrow; i++) { - irow = lsub[isub]; - dense_col[irow] -= tempv1[i]; - tempv1[i] = zero; - ++isub; - } - - } /* else segsze>=4 ... */ - - } /* for each column in the panel... */ - - } /* else 1-D update ... */ - - } /* for each updating supernode ... */ - -} - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpanel_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpanel_dfs.c deleted file mode 100644 index 73343e5af9..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpanel_dfs.c +++ /dev/null @@ -1,254 +0,0 @@ - -/*! @file dpanel_dfs.c - * \brief Peforms a symbolic factorization on a panel of symbols - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   Performs a symbolic factorization on a panel of columns [jcol, jcol+w).
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives.
    - *
    - *   The routine returns one list of the supernodal representatives
    - *   in topological order of the dfs that generates them. This list is
    - *   a superset of the topological order of each individual column within
    - *   the panel. 
    - *   The location of the first nonzero in each supernodal segment
    - *   (supernodal entry location) is also returned. Each column has a 
    - *   separate list for this purpose.
    - *
    - *   Two marker arrays are used for dfs:
    - *     marker[i] == jj, if i was visited during dfs of current column jj;
    - *     marker1[i] >= jcol, if i was visited by earlier columns in this panel;
    - *
    - *   marker: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - * 
    - */ - -void -dpanel_dfs ( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - SuperMatrix *A, /* in - original matrix */ - int *perm_r, /* in */ - int *nseg, /* out */ - double *dense, /* out */ - int *panel_lsub, /* out */ - int *segrep, /* out */ - int *repfnz, /* out */ - int *xprune, /* out */ - int *marker, /* out */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - NCPformat *Astore; - double *a; - int *asub; - int *xa_begin, *xa_end; - int krep, chperm, chmark, chrep, oldrep, kchild, myfnz; - int k, krow, kmark, kperm; - int xdfs, maxdfs, kpar; - int jj; /* index through each column in the panel */ - int *marker1; /* marker1[jj] >= jcol if vertex jj was visited - by a previous column within this panel. */ - int *repfnz_col; /* start of each column in the panel */ - double *dense_col; /* start of each column in the panel */ - int nextl_col; /* next available position in panel_lsub[*,jj] */ - int *xsup, *supno; - int *lsub, *xlsub; - - /* Initialize pointers */ - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - marker1 = marker + m; - repfnz_col = repfnz; - dense_col = dense; - *nseg = 0; - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - - /* For each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++) { - nextl_col = (jj - jcol) * m; - -#ifdef CHK_DFS - printf("\npanel col %d: ", jj); -#endif - - /* For each nonz in A[*,jj] do dfs */ - for (k = xa_begin[jj]; k < xa_end[jj]; k++) { - krow = asub[k]; - dense_col[krow] = a[k]; - kmark = marker[krow]; - if ( kmark == jj ) - continue; /* krow visited before, go to the next nonzero */ - - /* For each unmarked nbr krow of jj - * krow is in L: place it in structure of L[*,jj] - */ - marker[krow] = jj; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - panel_lsub[nextl_col++] = krow; /* krow is indexed into A */ - } - /* - * krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - else { - - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz_col[krep]; - -#ifdef CHK_DFS - printf("krep %d, myfnz %d, perm_r[%d] %d\n", krep, myfnz, krow, kperm); -#endif - if ( myfnz != EMPTY ) { /* Representative visited before */ - if ( myfnz > kperm ) repfnz_col[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz_col[krep] = kperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker[kchild]; - - if ( chmark != jj ) { /* Not reached yet */ - marker[kchild] = jj; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,j] */ - if ( chperm == EMPTY ) { - panel_lsub[nextl_col++] = kchild; - } - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - else { - - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz_col[chrep]; -#ifdef CHK_DFS - printf("chrep %d,myfnz %d,perm_r[%d] %d\n",chrep,myfnz,kchild,chperm); -#endif - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz_col[chrep] = chperm; - } - else { - /* Cont. dfs at snode-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L) */ - parent[krep] = oldrep; - repfnz_col[krep] = chperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } /* else */ - - } /* else */ - - } /* if... */ - - } /* while xdfs < maxdfs */ - - /* krow has no more unexplored nbrs: - * Place snode-rep krep in postorder DFS, if this - * segment is seen for the first time. (Note that - * "repfnz[krep]" may change later.) - * Backtrack dfs to its parent. - */ - if ( marker1[krep] < jcol ) { - segrep[*nseg] = krep; - ++(*nseg); - marker1[krep] = jj; - } - - kpar = parent[krep]; /* Pop stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xprune[krep]; - -#ifdef CHK_DFS - printf(" pop stack: krep %d,xdfs %d,maxdfs %d: ", krep,xdfs,maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } while ( kpar != EMPTY ); /* do-while - until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonz in A[*,jj] */ - - repfnz_col += m; /* Move to next column */ - dense_col += m; - - } /* for jj ... */ - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpivotL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpivotL.c deleted file mode 100644 index 1a1110bf5b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpivotL.c +++ /dev/null @@ -1,195 +0,0 @@ - -/*! @file dpivotL.c - * \brief Performs numerical pivoting - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include -#include "slu_ddefs.h" - -#undef DEBUG - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Performs the numerical pivoting on the current column of L,
    - *   and the CDIV operation.
    - *
    - *   Pivot policy:
    - *   (1) Compute thresh = u * max_(i>=j) abs(A_ij);
    - *   (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
    - *           pivot row = k;
    - *       ELSE IF abs(A_jj) >= thresh THEN
    - *           pivot row = j;
    - *       ELSE
    - *           pivot row = m;
    - * 
    - *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
    - *
    - *   Return value: 0      success;
    - *                 i > 0  U(i,i) is exactly zero.
    - * 
    - */ - -int -dpivotL( - const int jcol, /* in */ - const double u, /* in - diagonal pivoting threshold */ - int *usepr, /* re-use the pivot sequence given by perm_r/iperm_r */ - int *perm_r, /* may be modified */ - int *iperm_r, /* in - inverse of perm_r */ - int *iperm_c, /* in - used to find diagonal of Pc*A*Pc' */ - int *pivrow, /* out */ - GlobalLU_t *Glu, /* modified - global LU data structures */ - SuperLUStat_t *stat /* output */ - ) -{ - - int fsupc; /* first column in the supernode */ - int nsupc; /* no of columns in the supernode */ - int nsupr; /* no of rows in the supernode */ - int lptr; /* points to the starting subscript of the supernode */ - int pivptr, old_pivptr, diag, diagind; - double pivmax, rtemp, thresh; - double temp; - double *lu_sup_ptr; - double *lu_col_ptr; - int *lsub_ptr; - int isub, icol, k, itemp; - int *lsub, *xlsub; - double *lusup; - int *xlusup; - flops_t *ops = stat->ops; - - /* Initialize pointers */ - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; - nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ - lptr = xlsub[fsupc]; - nsupr = xlsub[fsupc+1] - lptr; - lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ - lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ - lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ - -#ifdef DEBUG -if ( jcol == MIN_COL ) { - printf("Before cdiv: col %d\n", jcol); - for (k = nsupc; k < nsupr; k++) - printf(" lu[%d] %f\n", lsub_ptr[k], lu_col_ptr[k]); -} -#endif - - /* Determine the largest abs numerical value for partial pivoting; - Also search for user-specified pivot, and diagonal element. */ - if ( *usepr ) *pivrow = iperm_r[jcol]; - diagind = iperm_c[jcol]; -#ifdef SCIPY_SPECIFIC_FIX - pivmax = -1.0; -#else - pivmax = 0.0; -#endif - pivptr = nsupc; - diag = EMPTY; - old_pivptr = nsupc; - for (isub = nsupc; isub < nsupr; ++isub) { - rtemp = fabs (lu_col_ptr[isub]); - if ( rtemp > pivmax ) { - pivmax = rtemp; - pivptr = isub; - } - if ( *usepr && lsub_ptr[isub] == *pivrow ) old_pivptr = isub; - if ( lsub_ptr[isub] == diagind ) diag = isub; - } - - /* Test for singularity */ -#ifdef SCIPY_SPECIFIC_FIX - if (pivmax < 0.0) { - perm_r[diagind] = jcol; - *usepr = 0; - return (jcol+1); - } -#endif - if ( pivmax == 0.0 ) { -#if 1 - *pivrow = lsub_ptr[pivptr]; - perm_r[*pivrow] = jcol; -#else - perm_r[diagind] = jcol; -#endif - *usepr = 0; - return (jcol+1); - } - - thresh = u * pivmax; - - /* Choose appropriate pivotal element by our policy. */ - if ( *usepr ) { - rtemp = fabs (lu_col_ptr[old_pivptr]); - if ( rtemp != 0.0 && rtemp >= thresh ) - pivptr = old_pivptr; - else - *usepr = 0; - } - if ( *usepr == 0 ) { - /* Use diagonal pivot? */ - if ( diag >= 0 ) { /* diagonal exists */ - rtemp = fabs (lu_col_ptr[diag]); - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; - } - *pivrow = lsub_ptr[pivptr]; - } - - /* Record pivot row */ - perm_r[*pivrow] = jcol; - - /* Interchange row subscripts */ - if ( pivptr != nsupc ) { - itemp = lsub_ptr[pivptr]; - lsub_ptr[pivptr] = lsub_ptr[nsupc]; - lsub_ptr[nsupc] = itemp; - - /* Interchange numerical values as well, for the whole snode, such - * that L is indexed the same way as A. - */ - for (icol = 0; icol <= nsupc; icol++) { - itemp = pivptr + icol * nsupr; - temp = lu_sup_ptr[itemp]; - lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; - lu_sup_ptr[nsupc + icol*nsupr] = temp; - } - } /* if */ - - /* cdiv operation */ - ops[FACT] += nsupr - nsupc; - - temp = 1.0 / lu_col_ptr[nsupc]; - for (k = nsupc+1; k < nsupr; k++) - lu_col_ptr[k] *= temp; - - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpivotgrowth.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpivotgrowth.c deleted file mode 100644 index 76225b97c1..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpivotgrowth.c +++ /dev/null @@ -1,114 +0,0 @@ - -/*! @file dpivotgrowth.c - * \brief Computes the reciprocal pivot growth factor - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -#include -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * Compute the reciprocal pivot growth factor of the leading ncols columns
    - * of the matrix, using the formula:
    - *     min_j ( max_i(abs(A_ij)) / max_i(abs(U_ij)) )
    - *
    - * Arguments
    - * =========
    - *
    - * ncols    (input) int
    - *          The number of columns of matrices A, L and U.
    - *
    - * A        (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *          (A->nrow, A->ncol). The type of A can be:
    - *          Stype = NC; Dtype = SLU_D; Mtype = GE.
    - *
    - * L        (output) SuperMatrix*
    - *          The factor L from the factorization Pr*A=L*U; use compressed row 
    - *          subscripts storage for supernodes, i.e., L has type: 
    - *          Stype = SC; Dtype = SLU_D; Mtype = TRLU.
    - *
    - * U        (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *          storage scheme, i.e., U has types: Stype = NC;
    - *          Dtype = SLU_D; Mtype = TRU.
    - * 
    - */ - -double -dPivotGrowth(int ncols, SuperMatrix *A, int *perm_c, - SuperMatrix *L, SuperMatrix *U) -{ - - NCformat *Astore; - SCformat *Lstore; - NCformat *Ustore; - double *Aval, *Lval, *Uval; - int fsupc, nsupr, luptr, nz_in_U; - int i, j, k, oldcol; - int *inv_perm_c; - double rpg, maxaj, maxuj; - extern double dlamch_(char *); - double smlnum; - double *luval; - - /* Get machine constants. */ - smlnum = dlamch_("S"); - rpg = 1. / smlnum; - - Astore = A->Store; - Lstore = L->Store; - Ustore = U->Store; - Aval = Astore->nzval; - Lval = Lstore->nzval; - Uval = Ustore->nzval; - - inv_perm_c = (int *) SUPERLU_MALLOC(A->ncol*sizeof(int)); - for (j = 0; j < A->ncol; ++j) inv_perm_c[perm_c[j]] = j; - - for (k = 0; k <= Lstore->nsuper; ++k) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - luptr = L_NZ_START(fsupc); - luval = &Lval[luptr]; - nz_in_U = 1; - - for (j = fsupc; j < L_FST_SUPC(k+1) && j < ncols; ++j) { - maxaj = 0.; - oldcol = inv_perm_c[j]; - for (i = Astore->colptr[oldcol]; i < Astore->colptr[oldcol+1]; ++i) - maxaj = SUPERLU_MAX( maxaj, fabs(Aval[i]) ); - - maxuj = 0.; - for (i = Ustore->colptr[j]; i < Ustore->colptr[j+1]; i++) - maxuj = SUPERLU_MAX( maxuj, fabs(Uval[i]) ); - - /* Supernode */ - for (i = 0; i < nz_in_U; ++i) - maxuj = SUPERLU_MAX( maxuj, fabs(luval[i]) ); - - ++nz_in_U; - luval += nsupr; - - if ( maxuj == 0. ) - rpg = SUPERLU_MIN( rpg, 1.); - else - rpg = SUPERLU_MIN( rpg, maxaj / maxuj ); - } - - if ( j >= ncols ) break; - } - - SUPERLU_FREE(inv_perm_c); - return (rpg); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpruneL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpruneL.c deleted file mode 100644 index 62b8099014..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dpruneL.c +++ /dev/null @@ -1,154 +0,0 @@ - -/*! @file dpruneL.c - * \brief Prunes the L-structure - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - *
    - */ - - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Prunes the L-structure of supernodes whose L-structure
    - *   contains the current pivot row "pivrow"
    - * 
    - */ - -void -dpruneL( - const int jcol, /* in */ - const int *perm_r, /* in */ - const int pivrow, /* in */ - const int nseg, /* in */ - const int *segrep, /* in */ - const int *repfnz, /* in */ - int *xprune, /* out */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - - double utemp; - int jsupno, irep, irep1, kmin, kmax, krow, movnum; - int i, ktemp, minloc, maxloc; - int do_prune; /* logical variable */ - int *xsup, *supno; - int *lsub, *xlsub; - double *lusup; - int *xlusup; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - /* - * For each supernode-rep irep in U[*,j] - */ - jsupno = supno[jcol]; - for (i = 0; i < nseg; i++) { - - irep = segrep[i]; - irep1 = irep + 1; - do_prune = FALSE; - - /* Don't prune with a zero U-segment */ - if ( repfnz[irep] == EMPTY ) - continue; - - /* If a snode overlaps with the next panel, then the U-segment - * is fragmented into two parts -- irep and irep1. We should let - * pruning occur at the rep-column in irep1's snode. - */ - if ( supno[irep] == supno[irep1] ) /* Don't prune */ - continue; - - /* - * If it has not been pruned & it has a nonz in row L[pivrow,i] - */ - if ( supno[irep] != jsupno ) { - if ( xprune[irep] >= xlsub[irep1] ) { - kmin = xlsub[irep]; - kmax = xlsub[irep1] - 1; - for (krow = kmin; krow <= kmax; krow++) - if ( lsub[krow] == pivrow ) { - do_prune = TRUE; - break; - } - } - - if ( do_prune ) { - - /* Do a quicksort-type partition - * movnum=TRUE means that the num values have to be exchanged. - */ - movnum = FALSE; - if ( irep == xsup[supno[irep]] ) /* Snode of size 1 */ - movnum = TRUE; - - while ( kmin <= kmax ) { - - if ( perm_r[lsub[kmax]] == EMPTY ) - kmax--; - else if ( perm_r[lsub[kmin]] != EMPTY ) - kmin++; - else { /* kmin below pivrow (not yet pivoted), and kmax - * above pivrow: interchange the two subscripts - */ - ktemp = lsub[kmin]; - lsub[kmin] = lsub[kmax]; - lsub[kmax] = ktemp; - - /* If the supernode has only one column, then we - * only keep one set of subscripts. For any subscript - * interchange performed, similar interchange must be - * done on the numerical values. - */ - if ( movnum ) { - minloc = xlusup[irep] + (kmin - xlsub[irep]); - maxloc = xlusup[irep] + (kmax - xlsub[irep]); - utemp = lusup[minloc]; - lusup[minloc] = lusup[maxloc]; - lusup[maxloc] = utemp; - } - - kmin++; - kmax--; - - } - - } /* while */ - - xprune[irep] = kmin; /* Pruning */ - -#ifdef CHK_PRUNE - printf(" After dpruneL(),using col %d: xprune[%d] = %d\n", - jcol, irep, kmin); -#endif - } /* if do_prune */ - - } /* if */ - - } /* for each U-segment... */ -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dreadhb.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dreadhb.c deleted file mode 100644 index e2aa3d6f61..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dreadhb.c +++ /dev/null @@ -1,257 +0,0 @@ - -/*! @file dreadhb.c - * \brief Read a matrix stored in Harwell-Boeing format - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Purpose
    - * =======
    - * 
    - * Read a DOUBLE PRECISION matrix stored in Harwell-Boeing format 
    - * as described below.
    - * 
    - * Line 1 (A72,A8) 
    - *  	Col. 1 - 72   Title (TITLE) 
    - *	Col. 73 - 80  Key (KEY) 
    - * 
    - * Line 2 (5I14) 
    - * 	Col. 1 - 14   Total number of lines excluding header (TOTCRD) 
    - * 	Col. 15 - 28  Number of lines for pointers (PTRCRD) 
    - * 	Col. 29 - 42  Number of lines for row (or variable) indices (INDCRD) 
    - * 	Col. 43 - 56  Number of lines for numerical values (VALCRD) 
    - *	Col. 57 - 70  Number of lines for right-hand sides (RHSCRD) 
    - *                    (including starting guesses and solution vectors 
    - *		       if present) 
    - *           	      (zero indicates no right-hand side data is present) 
    - *
    - * Line 3 (A3, 11X, 4I14) 
    - *   	Col. 1 - 3    Matrix type (see below) (MXTYPE) 
    - * 	Col. 15 - 28  Number of rows (or variables) (NROW) 
    - * 	Col. 29 - 42  Number of columns (or elements) (NCOL) 
    - *	Col. 43 - 56  Number of row (or variable) indices (NNZERO) 
    - *	              (equal to number of entries for assembled matrices) 
    - * 	Col. 57 - 70  Number of elemental matrix entries (NELTVL) 
    - *	              (zero in the case of assembled matrices) 
    - * Line 4 (2A16, 2A20) 
    - * 	Col. 1 - 16   Format for pointers (PTRFMT) 
    - *	Col. 17 - 32  Format for row (or variable) indices (INDFMT) 
    - *	Col. 33 - 52  Format for numerical values of coefficient matrix (VALFMT) 
    - * 	Col. 53 - 72 Format for numerical values of right-hand sides (RHSFMT) 
    - *
    - * Line 5 (A3, 11X, 2I14) Only present if there are right-hand sides present 
    - *    	Col. 1 	      Right-hand side type: 
    - *	         	  F for full storage or M for same format as matrix 
    - *    	Col. 2        G if a starting vector(s) (Guess) is supplied. (RHSTYP) 
    - *    	Col. 3        X if an exact solution vector(s) is supplied. 
    - *	Col. 15 - 28  Number of right-hand sides (NRHS) 
    - *	Col. 29 - 42  Number of row indices (NRHSIX) 
    - *          	      (ignored in case of unassembled matrices) 
    - *
    - * The three character type field on line 3 describes the matrix type. 
    - * The following table lists the permitted values for each of the three 
    - * characters. As an example of the type field, RSA denotes that the matrix 
    - * is real, symmetric, and assembled. 
    - *
    - * First Character: 
    - *	R Real matrix 
    - *	C Complex matrix 
    - *	P Pattern only (no numerical values supplied) 
    - *
    - * Second Character: 
    - *	S Symmetric 
    - *	U Unsymmetric 
    - *	H Hermitian 
    - *	Z Skew symmetric 
    - *	R Rectangular 
    - *
    - * Third Character: 
    - *	A Assembled 
    - *	E Elemental matrices (unassembled) 
    - *
    - * 
    - */ -#include -#include -#include "slu_ddefs.h" - - -/*! \brief Eat up the rest of the current line */ -int dDumpLine(FILE *fp) -{ - register int c; - while ((c = fgetc(fp)) != '\n') ; - return 0; -} - -int dParseIntFormat(char *buf, int *num, int *size) -{ - char *tmp; - - tmp = buf; - while (*tmp++ != '(') ; - sscanf(tmp, "%d", num); - while (*tmp != 'I' && *tmp != 'i') ++tmp; - ++tmp; - sscanf(tmp, "%d", size); - return 0; -} - -int dParseFloatFormat(char *buf, int *num, int *size) -{ - char *tmp, *period; - - tmp = buf; - while (*tmp++ != '(') ; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - while (*tmp != 'E' && *tmp != 'e' && *tmp != 'D' && *tmp != 'd' - && *tmp != 'F' && *tmp != 'f') { - /* May find kP before nE/nD/nF, like (1P6F13.6). In this case the - num picked up refers to P, which should be skipped. */ - if (*tmp=='p' || *tmp=='P') { - ++tmp; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - } else { - ++tmp; - } - } - ++tmp; - period = tmp; - while (*period != '.' && *period != ')') ++period ; - *period = '\0'; - *size = atoi(tmp); /*sscanf(tmp, "%2d", size);*/ - - return 0; -} - -static int ReadVector(FILE *fp, int n, int *where, int perline, int persize) -{ - register int i, j, item; - char tmp, buf[100]; - - i = 0; - while (i < n) { - fgets(buf, 100, fp); /* read a line at a time */ - for (j=0; j - * -- SuperLU routine (version 4.0) -- - * Lawrence Berkeley National Laboratory. - * June 30, 2009 - * - * - * Purpose - * ======= - * - * Read a DOUBLE PRECISION matrix stored in Rutherford-Boeing format - * as described below. - * - * Line 1 (A72, A8) - * Col. 1 - 72 Title (TITLE) - * Col. 73 - 80 Matrix name / identifier (MTRXID) - * - * Line 2 (I14, 3(1X, I13)) - * Col. 1 - 14 Total number of lines excluding header (TOTCRD) - * Col. 16 - 28 Number of lines for pointers (PTRCRD) - * Col. 30 - 42 Number of lines for row (or variable) indices (INDCRD) - * Col. 44 - 56 Number of lines for numerical values (VALCRD) - * - * Line 3 (A3, 11X, 4(1X, I13)) - * Col. 1 - 3 Matrix type (see below) (MXTYPE) - * Col. 15 - 28 Compressed Column: Number of rows (NROW) - * Elemental: Largest integer used to index variable (MVAR) - * Col. 30 - 42 Compressed Column: Number of columns (NCOL) - * Elemental: Number of element matrices (NELT) - * Col. 44 - 56 Compressed Column: Number of entries (NNZERO) - * Elemental: Number of variable indeces (NVARIX) - * Col. 58 - 70 Compressed Column: Unused, explicitly zero - * Elemental: Number of elemental matrix entries (NELTVL) - * - * Line 4 (2A16, A20) - * Col. 1 - 16 Fortran format for pointers (PTRFMT) - * Col. 17 - 32 Fortran format for row (or variable) indices (INDFMT) - * Col. 33 - 52 Fortran format for numerical values of coefficient matrix - * (VALFMT) - * (blank in the case of matrix patterns) - * - * The three character type field on line 3 describes the matrix type. - * The following table lists the permitted values for each of the three - * characters. As an example of the type field, RSA denotes that the matrix - * is real, symmetric, and assembled. - * - * First Character: - * R Real matrix - * C Complex matrix - * I integer matrix - * P Pattern only (no numerical values supplied) - * Q Pattern only (numerical values supplied in associated auxiliary value - * file) - * - * Second Character: - * S Symmetric - * U Unsymmetric - * H Hermitian - * Z Skew symmetric - * R Rectangular - * - * Third Character: - * A Compressed column form - * E Elemental form - * - * - */ - -#include "slu_ddefs.h" - - -/*! \brief Eat up the rest of the current line */ -static int dDumpLine(FILE *fp) -{ - register int c; - while ((c = fgetc(fp)) != '\n') ; - return 0; -} - -static int dParseIntFormat(char *buf, int *num, int *size) -{ - char *tmp; - - tmp = buf; - while (*tmp++ != '(') ; - sscanf(tmp, "%d", num); - while (*tmp != 'I' && *tmp != 'i') ++tmp; - ++tmp; - sscanf(tmp, "%d", size); - return 0; -} - -static int dParseFloatFormat(char *buf, int *num, int *size) -{ - char *tmp, *period; - - tmp = buf; - while (*tmp++ != '(') ; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - while (*tmp != 'E' && *tmp != 'e' && *tmp != 'D' && *tmp != 'd' - && *tmp != 'F' && *tmp != 'f') { - /* May find kP before nE/nD/nF, like (1P6F13.6). In this case the - num picked up refers to P, which should be skipped. */ - if (*tmp=='p' || *tmp=='P') { - ++tmp; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - } else { - ++tmp; - } - } - ++tmp; - period = tmp; - while (*period != '.' && *period != ')') ++period ; - *period = '\0'; - *size = atoi(tmp); /*sscanf(tmp, "%2d", size);*/ - - return 0; -} - -static int ReadVector(FILE *fp, int n, int *where, int perline, int persize) -{ - register int i, j, item; - char tmp, buf[100]; - - i = 0; - while (i < n) { - fgets(buf, 100, fp); /* read a line at a time */ - for (j=0; j - * -- SuperLU routine (version 4.0) -- - * Lawrence Berkeley National Laboratory. - * June 30, 2009 - * - */ - -#include "slu_ddefs.h" - - -void -dreadtriple(int *m, int *n, int *nonz, - double **nzval, int **rowind, int **colptr) -{ -/* - * Output parameters - * ================= - * (a,asub,xa): asub[*] contains the row subscripts of nonzeros - * in columns of matrix A; a[*] the numerical values; - * row i of A is given by a[k],k=xa[i],...,xa[i+1]-1. - * - */ - int j, k, jsize, nnz, nz; - double *a, *val; - int *asub, *xa, *row, *col; - int zero_base = 0; - - /* Matrix format: - * First line: #rows, #cols, #non-zero - * Triplet in the rest of lines: - * row, col, value - */ - - scanf("%d%d", n, nonz); - *m = *n; - printf("m %d, n %d, nonz %d\n", *m, *n, *nonz); - dallocateA(*n, *nonz, nzval, rowind, colptr); /* Allocate storage */ - a = *nzval; - asub = *rowind; - xa = *colptr; - - val = (double *) SUPERLU_MALLOC(*nonz * sizeof(double)); - row = (int *) SUPERLU_MALLOC(*nonz * sizeof(int)); - col = (int *) SUPERLU_MALLOC(*nonz * sizeof(int)); - - for (j = 0; j < *n; ++j) xa[j] = 0; - - /* Read into the triplet array from a file */ - for (nnz = 0, nz = 0; nnz < *nonz; ++nnz) { - scanf("%d%d%lf\n", &row[nz], &col[nz], &val[nz]); - - if ( nnz == 0 ) { /* first nonzero */ - if ( row[0] == 0 || col[0] == 0 ) { - zero_base = 1; - printf("triplet file: row/col indices are zero-based.\n"); - } else - printf("triplet file: row/col indices are one-based.\n"); - } - - if ( !zero_base ) { - /* Change to 0-based indexing. */ - --row[nz]; - --col[nz]; - } - - if (row[nz] < 0 || row[nz] >= *m || col[nz] < 0 || col[nz] >= *n - /*|| val[nz] == 0.*/) { - fprintf(stderr, "nz %d, (%d, %d) = %e out of bound, removed\n", - nz, row[nz], col[nz], val[nz]); - exit(-1); - } else { - ++xa[col[nz]]; - ++nz; - } - } - - *nonz = nz; - - /* Initialize the array of column pointers */ - k = 0; - jsize = xa[0]; - xa[0] = 0; - for (j = 1; j < *n; ++j) { - k += jsize; - jsize = xa[j]; - xa[j] = k; - } - - /* Copy the triplets into the column oriented storage */ - for (nz = 0; nz < *nonz; ++nz) { - j = col[nz]; - k = xa[j]; - asub[k] = row[nz]; - a[k] = val[nz]; - ++xa[j]; - } - - /* Reset the column pointers to the beginning of each column */ - for (j = *n; j > 0; --j) - xa[j] = xa[j-1]; - xa[0] = 0; - - SUPERLU_FREE(val); - SUPERLU_FREE(row); - SUPERLU_FREE(col); - -#ifdef CHK_INPUT - { - int i; - for (i = 0; i < *n; i++) { - printf("Col %d, xa %d\n", i, xa[i]); - for (k = xa[i]; k < xa[i+1]; k++) - printf("%d\t%16.10f\n", asub[k], a[k]); - } - } -#endif - -} - - -void dreadrhs(int m, double *b) -{ - FILE *fp, *fopen(); - int i; - /*int j;*/ - - if ( !(fp = fopen("b.dat", "r")) ) { - fprintf(stderr, "dreadrhs: file does not exist\n"); - exit(-1); - } - for (i = 0; i < m; ++i) - fscanf(fp, "%lf\n", &b[i]); - - /* readpair_(j, &b[i]);*/ - fclose(fp); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsnode_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsnode_bmod.c deleted file mode 100644 index 8d70a996b2..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsnode_bmod.c +++ /dev/null @@ -1,118 +0,0 @@ - -/*! @file dsnode_bmod.c - * \brief Performs numeric block updates within the relaxed snode. - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_ddefs.h" - - -/*! \brief Performs numeric block updates within the relaxed snode. - */ -int -dsnode_bmod ( - const int jcol, /* in */ - const int jsupno, /* in */ - const int fsupc, /* in */ - double *dense, /* in */ - double *tempv, /* working array */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - double alpha = -1.0, beta = 1.0; -#endif - - int luptr, nsupc, nsupr, nrow; - int isub, irow, i, iptr; - register int ufirst, nextlu; - int *lsub, *xlsub; - double *lusup; - int *xlusup; - flops_t *ops = stat->ops; - - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - nextlu = xlusup[jcol]; - - /* - * Process the supernodal portion of L\U[*,j] - */ - for (isub = xlsub[fsupc]; isub < xlsub[fsupc+1]; isub++) { - irow = lsub[isub]; - lusup[nextlu] = dense[irow]; - dense[irow] = 0; - ++nextlu; - } - - xlusup[jcol + 1] = nextlu; /* Initialize xlusup for next column */ - - if ( fsupc < jcol ) { - - luptr = xlusup[fsupc]; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; - nsupc = jcol - fsupc; /* Excluding jcol */ - ufirst = xlusup[jcol]; /* Points to the beginning of column - jcol in supernode L\U(jsupno). */ - nrow = nsupr - nsupc; - - ops[TRSV] += nsupc * (nsupc - 1); - ops[GEMV] += 2 * nrow * nsupc; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &nsupc, &lusup[luptr], &nsupr, - &lusup[ufirst], &incx ); - SGEMV( ftcs2, &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#else - dtrsv_( "L", "N", "U", &nsupc, &lusup[luptr], &nsupr, - &lusup[ufirst], &incx ); - dgemv_( "N", &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#endif -#else - dlsolve ( nsupr, nsupc, &lusup[luptr], &lusup[ufirst] ); - dmatvec ( nsupr, nrow, nsupc, &lusup[luptr+nsupc], - &lusup[ufirst], &tempv[0] ); - - /* Scatter tempv[*] into lusup[*] */ - iptr = ufirst + nsupc; - for (i = 0; i < nrow; i++) { - lusup[iptr++] -= tempv[i]; - tempv[i] = 0.0; - } -#endif - - } - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsnode_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsnode_dfs.c deleted file mode 100644 index 9672fe66ce..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsnode_dfs.c +++ /dev/null @@ -1,112 +0,0 @@ - -/*! @file dsnode_dfs.c - * \brief Determines the union of row structures of columns within the relaxed node - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    dsnode_dfs() - Determine the union of the row structures of those 
    - *    columns within the relaxed snode.
    - *    Note: The relaxed snodes are leaves of the supernodal etree, therefore, 
    - *    the portion outside the rectangular supernode must be zero.
    - *
    - * Return value
    - * ============
    - *     0   success;
    - *    >0   number of bytes allocated when run out of memory.
    - * 
    - */ - -int -dsnode_dfs ( - const int jcol, /* in - start of the supernode */ - const int kcol, /* in - end of the supernode */ - const int *asub, /* in */ - const int *xa_begin, /* in */ - const int *xa_end, /* in */ - int *xprune, /* out */ - int *marker, /* modified */ - GlobalLU_t *Glu /* modified */ - ) -{ - - register int i, k, ifrom, ito, nextl, new_next; - int nsuper, krow, kmark, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - int nzlmax; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - nsuper = ++supno[jcol]; /* Next available supernode number */ - nextl = xlsub[jcol]; - - for (i = jcol; i <= kcol; i++) { - /* For each nonzero in A[*,i] */ - for (k = xa_begin[i]; k < xa_end[i]; k++) { - krow = asub[k]; - kmark = marker[krow]; - if ( kmark != kcol ) { /* First time visit krow */ - marker[krow] = kcol; - lsub[nextl++] = krow; - if ( nextl >= nzlmax ) { - if ( mem_error = dLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - } - } - supno[i] = nsuper; - } - - /* Supernode > 1, then make a copy of the subscripts for pruning */ - if ( jcol < kcol ) { - new_next = nextl + (nextl - xlsub[jcol]); - while ( new_next > nzlmax ) { - if ( mem_error = dLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - ito = nextl; - for (ifrom = xlsub[jcol]; ifrom < nextl; ) - lsub[ito++] = lsub[ifrom++]; - for (i = jcol+1; i <= kcol; i++) xlsub[i] = nextl; - nextl = ito; - } - - xsup[nsuper+1] = kcol + 1; - supno[kcol+1] = nsuper; - xprune[kcol] = nextl; - xlsub[kcol+1] = nextl; - - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsp_blas2.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsp_blas2.c deleted file mode 100644 index 59d6d40b64..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsp_blas2.c +++ /dev/null @@ -1,477 +0,0 @@ - -/*! @file dsp_blas2.c - * \brief Sparse BLAS 2, using some dense BLAS 2 operations - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -/* - * File name: dsp_blas2.c - * Purpose: Sparse BLAS 2, using some dense BLAS 2 operations. - */ - -#include "slu_ddefs.h" - -/* - * Function prototypes - */ -void dusolve(int, int, double*, double*); -void dlsolve(int, int, double*, double*); -void dmatvec(int, int, int, double*, double*, double*); - -/*! \brief Solves one of the systems of equations A*x = b, or A'*x = b - * - *
    - *   Purpose
    - *   =======
    - *
    - *   sp_dtrsv() solves one of the systems of equations   
    - *       A*x = b,   or   A'*x = b,
    - *   where b and x are n element vectors and A is a sparse unit , or   
    - *   non-unit, upper or lower triangular matrix.   
    - *   No test for singularity or near-singularity is included in this   
    - *   routine. Such tests must be performed before calling this routine.   
    - *
    - *   Parameters   
    - *   ==========   
    - *
    - *   uplo   - (input) char*
    - *            On entry, uplo specifies whether the matrix is an upper or   
    - *             lower triangular matrix as follows:   
    - *                uplo = 'U' or 'u'   A is an upper triangular matrix.   
    - *                uplo = 'L' or 'l'   A is a lower triangular matrix.   
    - *
    - *   trans  - (input) char*
    - *             On entry, trans specifies the equations to be solved as   
    - *             follows:   
    - *                trans = 'N' or 'n'   A*x = b.   
    - *                trans = 'T' or 't'   A'*x = b.
    - *                trans = 'C' or 'c'   A'*x = b.   
    - *
    - *   diag   - (input) char*
    - *             On entry, diag specifies whether or not A is unit   
    - *             triangular as follows:   
    - *                diag = 'U' or 'u'   A is assumed to be unit triangular.   
    - *                diag = 'N' or 'n'   A is not assumed to be unit   
    - *                                    triangular.   
    - *	     
    - *   L       - (input) SuperMatrix*
    - *	       The factor L from the factorization Pr*A*Pc=L*U. Use
    - *             compressed row subscripts storage for supernodes,
    - *             i.e., L has types: Stype = SC, Dtype = SLU_D, Mtype = TRLU.
    - *
    - *   U       - (input) SuperMatrix*
    - *	        The factor U from the factorization Pr*A*Pc=L*U.
    - *	        U has types: Stype = NC, Dtype = SLU_D, Mtype = TRU.
    - *    
    - *   x       - (input/output) double*
    - *             Before entry, the incremented array X must contain the n   
    - *             element right-hand side vector b. On exit, X is overwritten 
    - *             with the solution vector x.
    - *
    - *   info    - (output) int*
    - *             If *info = -i, the i-th argument had an illegal value.
    - * 
    - */ -int -sp_dtrsv(char *uplo, char *trans, char *diag, SuperMatrix *L, - SuperMatrix *U, double *x, SuperLUStat_t *stat, int *info) -{ -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - SCformat *Lstore; - NCformat *Ustore; - double *Lval, *Uval; - int incx = 1, incy = 1; - double alpha = 1.0, beta = 1.0; - int nrow; - int fsupc, nsupr, nsupc, luptr, istart, irow; - int i, k, iptr, jcol; - double *work; - flops_t solve_ops; - - /* Test the input parameters */ - *info = 0; - if ( !lsame_(uplo,"L") && !lsame_(uplo, "U") ) *info = -1; - else if ( !lsame_(trans, "N") && !lsame_(trans, "T") && - !lsame_(trans, "C")) *info = -2; - else if ( !lsame_(diag, "U") && !lsame_(diag, "N") ) *info = -3; - else if ( L->nrow != L->ncol || L->nrow < 0 ) *info = -4; - else if ( U->nrow != U->ncol || U->nrow < 0 ) *info = -5; - if ( *info ) { - i = -(*info); - xerbla_("sp_dtrsv", &i); - return 0; - } - - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( !(work = doubleCalloc(L->nrow)) ) - ABORT("Malloc fails for work in sp_dtrsv()."); - - if ( lsame_(trans, "N") ) { /* Form x := inv(A)*x. */ - - if ( lsame_(uplo, "L") ) { - /* Form x := inv(L)*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - nrow = nsupr - nsupc; - - solve_ops += nsupc * (nsupc - 1); - solve_ops += 2 * nrow * nsupc; - - if ( nsupc == 1 ) { - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); ++iptr) { - irow = L_SUB(iptr); - ++luptr; - x[irow] -= x[fsupc] * Lval[luptr]; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); - - SGEMV(ftcs2, &nrow, &nsupc, &alpha, &Lval[luptr+nsupc], - &nsupr, &x[fsupc], &incx, &beta, &work[0], &incy); -#else - dtrsv_("L", "N", "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); - - dgemv_("N", &nrow, &nsupc, &alpha, &Lval[luptr+nsupc], - &nsupr, &x[fsupc], &incx, &beta, &work[0], &incy); -#endif -#else - dlsolve ( nsupr, nsupc, &Lval[luptr], &x[fsupc]); - - dmatvec ( nsupr, nsupr-nsupc, nsupc, &Lval[luptr+nsupc], - &x[fsupc], &work[0] ); -#endif - - iptr = istart + nsupc; - for (i = 0; i < nrow; ++i, ++iptr) { - irow = L_SUB(iptr); - x[irow] -= work[i]; /* Scatter */ - work[i] = 0.0; - - } - } - } /* for k ... */ - - } else { - /* Form x := inv(U)*x */ - - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += nsupc * (nsupc + 1); - - if ( nsupc == 1 ) { - x[fsupc] /= Lval[luptr]; - for (i = U_NZ_START(fsupc); i < U_NZ_START(fsupc+1); ++i) { - irow = U_SUB(i); - x[irow] -= x[fsupc] * Uval[i]; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV(ftcs3, ftcs2, ftcs2, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - dtrsv_("U", "N", "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif -#else - dusolve ( nsupr, nsupc, &Lval[luptr], &x[fsupc] ); -#endif - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 2*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); - i++) { - irow = U_SUB(i); - x[irow] -= x[jcol] * Uval[i]; - } - } - } - } /* for k ... */ - - } - } else { /* Form x := inv(A')*x */ - - if ( lsame_(uplo, "L") ) { - /* Form x := inv(L')*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; --k) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += 2 * (nsupr - nsupc) * nsupc; - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - iptr = istart + nsupc; - for (i = L_NZ_START(jcol) + nsupc; - i < L_NZ_START(jcol+1); i++) { - irow = L_SUB(iptr); - x[jcol] -= x[irow] * Lval[i]; - iptr++; - } - } - - if ( nsupc > 1 ) { - solve_ops += nsupc * (nsupc - 1); -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("T", strlen("T")); - ftcs3 = _cptofcd("U", strlen("U")); - STRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - dtrsv_("L", "T", "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } - } else { - /* Form x := inv(U')*x */ - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 2*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++) { - irow = U_SUB(i); - x[jcol] -= x[irow] * Uval[i]; - } - } - - solve_ops += nsupc * (nsupc + 1); - - if ( nsupc == 1 ) { - x[fsupc] /= Lval[luptr]; - } else { -#ifdef _CRAY - ftcs1 = _cptofcd("U", strlen("U")); - ftcs2 = _cptofcd("T", strlen("T")); - ftcs3 = _cptofcd("N", strlen("N")); - STRSV( ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - dtrsv_("U", "T", "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } /* for k ... */ - } - } - - stat->ops[SOLVE] += solve_ops; - SUPERLU_FREE(work); - return 0; -} - - - -/*! \brief Performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   sp_dgemv()  performs one of the matrix-vector operations   
    - *      y := alpha*A*x + beta*y,   or   y := alpha*A'*x + beta*y,   
    - *   where alpha and beta are scalars, x and y are vectors and A is a
    - *   sparse A->nrow by A->ncol matrix.   
    - *
    - *   Parameters   
    - *   ==========   
    - *
    - *   TRANS  - (input) char*
    - *            On entry, TRANS specifies the operation to be performed as   
    - *            follows:   
    - *               TRANS = 'N' or 'n'   y := alpha*A*x + beta*y.   
    - *               TRANS = 'T' or 't'   y := alpha*A'*x + beta*y.   
    - *               TRANS = 'C' or 'c'   y := alpha*A'*x + beta*y.   
    - *
    - *   ALPHA  - (input) double
    - *            On entry, ALPHA specifies the scalar alpha.   
    - *
    - *   A      - (input) SuperMatrix*
    - *            Matrix A with a sparse format, of dimension (A->nrow, A->ncol).
    - *            Currently, the type of A can be:
    - *                Stype = NC or NCP; Dtype = SLU_D; Mtype = GE. 
    - *            In the future, more general A can be handled.
    - *
    - *   X      - (input) double*, array of DIMENSION at least   
    - *            ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'   
    - *            and at least   
    - *            ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.   
    - *            Before entry, the incremented array X must contain the   
    - *            vector x.   
    - *
    - *   INCX   - (input) int
    - *            On entry, INCX specifies the increment for the elements of   
    - *            X. INCX must not be zero.   
    - *
    - *   BETA   - (input) double
    - *            On entry, BETA specifies the scalar beta. When BETA is   
    - *            supplied as zero then Y need not be set on input.   
    - *
    - *   Y      - (output) double*,  array of DIMENSION at least   
    - *            ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'   
    - *            and at least   
    - *            ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.   
    - *            Before entry with BETA non-zero, the incremented array Y   
    - *            must contain the vector y. On exit, Y is overwritten by the 
    - *            updated vector y.
    - *	     
    - *   INCY   - (input) int
    - *            On entry, INCY specifies the increment for the elements of   
    - *            Y. INCY must not be zero.   
    - *
    - *   ==== Sparse Level 2 Blas routine.   
    - * 
    - */ - -int -sp_dgemv(char *trans, double alpha, SuperMatrix *A, double *x, - int incx, double beta, double *y, int incy) -{ - /* Local variables */ - NCformat *Astore; - double *Aval; - int info; - double temp; - int lenx, leny, i, j, irow; - int iy, jx, jy, kx, ky; - int notran; - - notran = lsame_(trans, "N"); - Astore = A->Store; - Aval = Astore->nzval; - - /* Test the input parameters */ - info = 0; - if ( !notran && !lsame_(trans, "T") && !lsame_(trans, "C")) info = 1; - else if ( A->nrow < 0 || A->ncol < 0 ) info = 3; - else if (incx == 0) info = 5; - else if (incy == 0) info = 8; - if (info != 0) { - xerbla_("sp_dgemv ", &info); - return 0; - } - - /* Quick return if possible. */ - if (A->nrow == 0 || A->ncol == 0 || (alpha == 0. && beta == 1.)) - return 0; - - /* Set LENX and LENY, the lengths of the vectors x and y, and set - up the start points in X and Y. */ - if (lsame_(trans, "N")) { - lenx = A->ncol; - leny = A->nrow; - } else { - lenx = A->nrow; - leny = A->ncol; - } - if (incx > 0) kx = 0; - else kx = - (lenx - 1) * incx; - if (incy > 0) ky = 0; - else ky = - (leny - 1) * incy; - - /* Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. */ - /* First form y := beta*y. */ - if (beta != 1.) { - if (incy == 1) { - if (beta == 0.) - for (i = 0; i < leny; ++i) y[i] = 0.; - else - for (i = 0; i < leny; ++i) y[i] = beta * y[i]; - } else { - iy = ky; - if (beta == 0.) - for (i = 0; i < leny; ++i) { - y[iy] = 0.; - iy += incy; - } - else - for (i = 0; i < leny; ++i) { - y[iy] = beta * y[iy]; - iy += incy; - } - } - } - - if (alpha == 0.) return 0; - - if ( notran ) { - /* Form y := alpha*A*x + y. */ - jx = kx; - if (incy == 1) { - for (j = 0; j < A->ncol; ++j) { - if (x[jx] != 0.) { - temp = alpha * x[jx]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - y[irow] += temp * Aval[i]; - } - } - jx += incx; - } - } else { - ABORT("Not implemented."); - } - } else { - /* Form y := alpha*A'*x + y. */ - jy = ky; - if (incx == 1) { - for (j = 0; j < A->ncol; ++j) { - temp = 0.; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - temp += Aval[i] * x[irow]; - } - y[jy] += alpha * temp; - jy += incy; - } - } else { - ABORT("Not implemented."); - } - } - return 0; -} /* sp_dgemv */ - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsp_blas3.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsp_blas3.c deleted file mode 100644 index f0c7233e6b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dsp_blas3.c +++ /dev/null @@ -1,127 +0,0 @@ - -/*! @file dsp_blas3.c - * \brief Sparse BLAS3, using some dense BLAS3 operations - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -/* - * File name: sp_blas3.c - * Purpose: Sparse BLAS3, using some dense BLAS3 operations. - */ - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - * 
    - *   sp_d performs one of the matrix-matrix operations   
    - * 
    - *      C := alpha*op( A )*op( B ) + beta*C,   
    - * 
    - *   where  op( X ) is one of 
    - * 
    - *      op( X ) = X   or   op( X ) = X'   or   op( X ) = conjg( X' ),
    - * 
    - *   alpha and beta are scalars, and A, B and C are matrices, with op( A ) 
    - *   an m by k matrix,  op( B )  a  k by n matrix and  C an m by n matrix. 
    - *   
    - * 
    - *   Parameters   
    - *   ==========   
    - * 
    - *   TRANSA - (input) char*
    - *            On entry, TRANSA specifies the form of op( A ) to be used in 
    - *            the matrix multiplication as follows:   
    - *               TRANSA = 'N' or 'n',  op( A ) = A.   
    - *               TRANSA = 'T' or 't',  op( A ) = A'.   
    - *               TRANSA = 'C' or 'c',  op( A ) = conjg( A' ).   
    - *            Unchanged on exit.   
    - * 
    - *   TRANSB - (input) char*
    - *            On entry, TRANSB specifies the form of op( B ) to be used in 
    - *            the matrix multiplication as follows:   
    - *               TRANSB = 'N' or 'n',  op( B ) = B.   
    - *               TRANSB = 'T' or 't',  op( B ) = B'.   
    - *               TRANSB = 'C' or 'c',  op( B ) = conjg( B' ).   
    - *            Unchanged on exit.   
    - * 
    - *   M      - (input) int   
    - *            On entry,  M  specifies  the number of rows of the matrix 
    - *	     op( A ) and of the matrix C.  M must be at least zero. 
    - *	     Unchanged on exit.   
    - * 
    - *   N      - (input) int
    - *            On entry,  N specifies the number of columns of the matrix 
    - *	     op( B ) and the number of columns of the matrix C. N must be 
    - *	     at least zero.
    - *	     Unchanged on exit.   
    - * 
    - *   K      - (input) int
    - *            On entry, K specifies the number of columns of the matrix 
    - *	     op( A ) and the number of rows of the matrix op( B ). K must 
    - *	     be at least  zero.   
    - *           Unchanged on exit.
    - *      
    - *   ALPHA  - (input) double
    - *            On entry, ALPHA specifies the scalar alpha.   
    - * 
    - *   A      - (input) SuperMatrix*
    - *            Matrix A with a sparse format, of dimension (A->nrow, A->ncol).
    - *            Currently, the type of A can be:
    - *                Stype = NC or NCP; Dtype = SLU_D; Mtype = GE. 
    - *            In the future, more general A can be handled.
    - * 
    - *   B      - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is 
    - *            n when TRANSB = 'N' or 'n',  and is  k otherwise.   
    - *            Before entry with  TRANSB = 'N' or 'n',  the leading k by n 
    - *            part of the array B must contain the matrix B, otherwise 
    - *            the leading n by k part of the array B must contain the 
    - *            matrix B.   
    - *            Unchanged on exit.   
    - * 
    - *   LDB    - (input) int
    - *            On entry, LDB specifies the first dimension of B as declared 
    - *            in the calling (sub) program. LDB must be at least max( 1, n ).  
    - *            Unchanged on exit.   
    - * 
    - *   BETA   - (input) double
    - *            On entry, BETA specifies the scalar beta. When BETA is   
    - *            supplied as zero then C need not be set on input.   
    - *  
    - *   C      - DOUBLE PRECISION array of DIMENSION ( LDC, n ).   
    - *            Before entry, the leading m by n part of the array C must 
    - *            contain the matrix C,  except when beta is zero, in which 
    - *            case C need not be set on entry.   
    - *            On exit, the array C is overwritten by the m by n matrix 
    - *	     ( alpha*op( A )*B + beta*C ).   
    - *  
    - *   LDC    - (input) int
    - *            On entry, LDC specifies the first dimension of C as declared 
    - *            in the calling (sub)program. LDC must be at least max(1,m).   
    - *            Unchanged on exit.   
    - *  
    - *   ==== Sparse Level 3 Blas routine.   
    - * 
    - */ - -int -sp_dgemm(char *transa, char *transb, int m, int n, int k, - double alpha, SuperMatrix *A, double *b, int ldb, - double beta, double *c, int ldc) -{ - int incx = 1, incy = 1; - int j; - - for (j = 0; j < n; ++j) { - sp_dgemv(transa, alpha, A, &b[ldb*j], incx, beta, &c[ldc*j], incy); - } - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dutil.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dutil.c deleted file mode 100644 index 807ff2fb12..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dutil.c +++ /dev/null @@ -1,471 +0,0 @@ - -/*! @file dutil.c - * \brief Matrix utility functions - * - *
    - * -- SuperLU routine (version 3.1) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * August 1, 2008
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include "slu_ddefs.h" - -void -dCreate_CompCol_Matrix(SuperMatrix *A, int m, int n, int nnz, - double *nzval, int *rowind, int *colptr, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - NCformat *Astore; - - A->Stype = stype; - A->Dtype = dtype; - A->Mtype = mtype; - A->nrow = m; - A->ncol = n; - A->Store = (void *) SUPERLU_MALLOC( sizeof(NCformat) ); - if ( !(A->Store) ) ABORT("SUPERLU_MALLOC fails for A->Store"); - Astore = A->Store; - Astore->nnz = nnz; - Astore->nzval = nzval; - Astore->rowind = rowind; - Astore->colptr = colptr; -} - -void -dCreate_CompRow_Matrix(SuperMatrix *A, int m, int n, int nnz, - double *nzval, int *colind, int *rowptr, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - NRformat *Astore; - - A->Stype = stype; - A->Dtype = dtype; - A->Mtype = mtype; - A->nrow = m; - A->ncol = n; - A->Store = (void *) SUPERLU_MALLOC( sizeof(NRformat) ); - if ( !(A->Store) ) ABORT("SUPERLU_MALLOC fails for A->Store"); - Astore = A->Store; - Astore->nnz = nnz; - Astore->nzval = nzval; - Astore->colind = colind; - Astore->rowptr = rowptr; -} - -/*! \brief Copy matrix A into matrix B. */ -void -dCopy_CompCol_Matrix(SuperMatrix *A, SuperMatrix *B) -{ - NCformat *Astore, *Bstore; - int ncol, nnz, i; - - B->Stype = A->Stype; - B->Dtype = A->Dtype; - B->Mtype = A->Mtype; - B->nrow = A->nrow;; - B->ncol = ncol = A->ncol; - Astore = (NCformat *) A->Store; - Bstore = (NCformat *) B->Store; - Bstore->nnz = nnz = Astore->nnz; - for (i = 0; i < nnz; ++i) - ((double *)Bstore->nzval)[i] = ((double *)Astore->nzval)[i]; - for (i = 0; i < nnz; ++i) Bstore->rowind[i] = Astore->rowind[i]; - for (i = 0; i <= ncol; ++i) Bstore->colptr[i] = Astore->colptr[i]; -} - - -void -dCreate_Dense_Matrix(SuperMatrix *X, int m, int n, double *x, int ldx, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - DNformat *Xstore; - - X->Stype = stype; - X->Dtype = dtype; - X->Mtype = mtype; - X->nrow = m; - X->ncol = n; - X->Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) ); - if ( !(X->Store) ) ABORT("SUPERLU_MALLOC fails for X->Store"); - Xstore = (DNformat *) X->Store; - Xstore->lda = ldx; - Xstore->nzval = (double *) x; -} - -void -dCopy_Dense_Matrix(int M, int N, double *X, int ldx, - double *Y, int ldy) -{ -/*! \brief Copies a two-dimensional matrix X to another matrix Y. - */ - int i, j; - - for (j = 0; j < N; ++j) - for (i = 0; i < M; ++i) - Y[i + j*ldy] = X[i + j*ldx]; -} - -void -dCreate_SuperNode_Matrix(SuperMatrix *L, int m, int n, int nnz, - double *nzval, int *nzval_colptr, int *rowind, - int *rowind_colptr, int *col_to_sup, int *sup_to_col, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - SCformat *Lstore; - - L->Stype = stype; - L->Dtype = dtype; - L->Mtype = mtype; - L->nrow = m; - L->ncol = n; - L->Store = (void *) SUPERLU_MALLOC( sizeof(SCformat) ); - if ( !(L->Store) ) ABORT("SUPERLU_MALLOC fails for L->Store"); - Lstore = L->Store; - Lstore->nnz = nnz; - Lstore->nsuper = col_to_sup[n]; - Lstore->nzval = nzval; - Lstore->nzval_colptr = nzval_colptr; - Lstore->rowind = rowind; - Lstore->rowind_colptr = rowind_colptr; - Lstore->col_to_sup = col_to_sup; - Lstore->sup_to_col = sup_to_col; - -} - - -/*! \brief Convert a row compressed storage into a column compressed storage. - */ -void -dCompRow_to_CompCol(int m, int n, int nnz, - double *a, int *colind, int *rowptr, - double **at, int **rowind, int **colptr) -{ - register int i, j, col, relpos; - int *marker; - - /* Allocate storage for another copy of the matrix. */ - *at = (double *) doubleMalloc(nnz); - *rowind = (int *) intMalloc(nnz); - *colptr = (int *) intMalloc(n+1); - marker = (int *) intCalloc(n); - - /* Get counts of each column of A, and set up column pointers */ - for (i = 0; i < m; ++i) - for (j = rowptr[i]; j < rowptr[i+1]; ++j) ++marker[colind[j]]; - (*colptr)[0] = 0; - for (j = 0; j < n; ++j) { - (*colptr)[j+1] = (*colptr)[j] + marker[j]; - marker[j] = (*colptr)[j]; - } - - /* Transfer the matrix into the compressed column storage. */ - for (i = 0; i < m; ++i) { - for (j = rowptr[i]; j < rowptr[i+1]; ++j) { - col = colind[j]; - relpos = marker[col]; - (*rowind)[relpos] = i; - (*at)[relpos] = a[j]; - ++marker[col]; - } - } - - SUPERLU_FREE(marker); -} - - -void -dPrint_CompCol_Matrix(char *what, SuperMatrix *A) -{ - NCformat *Astore; - register int i,n; - double *dp; - - printf("\nCompCol matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - n = A->ncol; - Astore = (NCformat *) A->Store; - dp = (double *) Astore->nzval; - printf("nrow %d, ncol %d, nnz %d\n", A->nrow,A->ncol,Astore->nnz); - printf("nzval: "); - for (i = 0; i < Astore->colptr[n]; ++i) printf("%f ", dp[i]); - printf("\nrowind: "); - for (i = 0; i < Astore->colptr[n]; ++i) printf("%d ", Astore->rowind[i]); - printf("\ncolptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->colptr[i]); - printf("\n"); - fflush(stdout); -} - -void -dPrint_SuperNode_Matrix(char *what, SuperMatrix *A) -{ - SCformat *Astore; - register int i, j, k, c, d, n, nsup; - double *dp; - int *col_to_sup, *sup_to_col, *rowind, *rowind_colptr; - - printf("\nSuperNode matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - n = A->ncol; - Astore = (SCformat *) A->Store; - dp = (double *) Astore->nzval; - col_to_sup = Astore->col_to_sup; - sup_to_col = Astore->sup_to_col; - rowind_colptr = Astore->rowind_colptr; - rowind = Astore->rowind; - printf("nrow %d, ncol %d, nnz %d, nsuper %d\n", - A->nrow,A->ncol,Astore->nnz,Astore->nsuper); - printf("nzval:\n"); - for (k = 0; k <= Astore->nsuper; ++k) { - c = sup_to_col[k]; - nsup = sup_to_col[k+1] - c; - for (j = c; j < c + nsup; ++j) { - d = Astore->nzval_colptr[j]; - for (i = rowind_colptr[c]; i < rowind_colptr[c+1]; ++i) { - printf("%d\t%d\t%e\n", rowind[i], j, dp[d++]); - } - } - } -#if 0 - for (i = 0; i < Astore->nzval_colptr[n]; ++i) printf("%f ", dp[i]); -#endif - printf("\nnzval_colptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->nzval_colptr[i]); - printf("\nrowind: "); - for (i = 0; i < Astore->rowind_colptr[n]; ++i) - printf("%d ", Astore->rowind[i]); - printf("\nrowind_colptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->rowind_colptr[i]); - printf("\ncol_to_sup: "); - for (i = 0; i < n; ++i) printf("%d ", col_to_sup[i]); - printf("\nsup_to_col: "); - for (i = 0; i <= Astore->nsuper+1; ++i) - printf("%d ", sup_to_col[i]); - printf("\n"); - fflush(stdout); -} - -void -dPrint_Dense_Matrix(char *what, SuperMatrix *A) -{ - DNformat *Astore = (DNformat *) A->Store; - register int i, j, lda = Astore->lda; - double *dp; - - printf("\nDense matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - dp = (double *) Astore->nzval; - printf("nrow %d, ncol %d, lda %d\n", A->nrow,A->ncol,lda); - printf("\nnzval: "); - for (j = 0; j < A->ncol; ++j) { - for (i = 0; i < A->nrow; ++i) printf("%f ", dp[i + j*lda]); - printf("\n"); - } - printf("\n"); - fflush(stdout); -} - -/*! \brief Diagnostic print of column "jcol" in the U/L factor. - */ -void -dprint_lu_col(char *msg, int jcol, int pivrow, int *xprune, GlobalLU_t *Glu) -{ - int i, k, fsupc; - int *xsup, *supno; - int *xlsub, *lsub; - double *lusup; - int *xlusup; - double *ucol; - int *usub, *xusub; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - - printf("%s", msg); - printf("col %d: pivrow %d, supno %d, xprune %d\n", - jcol, pivrow, supno[jcol], xprune[jcol]); - - printf("\tU-col:\n"); - for (i = xusub[jcol]; i < xusub[jcol+1]; i++) - printf("\t%d%10.4f\n", usub[i], ucol[i]); - printf("\tL-col in rectangular snode:\n"); - fsupc = xsup[supno[jcol]]; /* first col of the snode */ - i = xlsub[fsupc]; - k = xlusup[jcol]; - while ( i < xlsub[fsupc+1] && k < xlusup[jcol+1] ) { - printf("\t%d\t%10.4f\n", lsub[i], lusup[k]); - i++; k++; - } - fflush(stdout); -} - - -/*! \brief Check whether tempv[] == 0. This should be true before and after calling any numeric routines, i.e., "panel_bmod" and "column_bmod". - */ -void dcheck_tempv(int n, double *tempv) -{ - int i; - - for (i = 0; i < n; i++) { - if (tempv[i] != 0.0) - { - fprintf(stderr,"tempv[%d] = %f\n", i,tempv[i]); - ABORT("dcheck_tempv"); - } - } -} - - -void -dGenXtrue(int n, int nrhs, double *x, int ldx) -{ - int i, j; - for (j = 0; j < nrhs; ++j) - for (i = 0; i < n; ++i) { - x[i + j*ldx] = 1.0;/* + (double)(i+1.)/n;*/ - } -} - -/*! \brief Let rhs[i] = sum of i-th row of A, so the solution vector is all 1's - */ -void -dFillRHS(trans_t trans, int nrhs, double *x, int ldx, - SuperMatrix *A, SuperMatrix *B) -{ - NCformat *Astore; - double *Aval; - DNformat *Bstore; - double *rhs; - double one = 1.0; - double zero = 0.0; - int ldc; - char transc[1]; - - Astore = A->Store; - Aval = (double *) Astore->nzval; - Bstore = B->Store; - rhs = Bstore->nzval; - ldc = Bstore->lda; - - if ( trans == NOTRANS ) *(unsigned char *)transc = 'N'; - else *(unsigned char *)transc = 'T'; - - sp_dgemm(transc, "N", A->nrow, nrhs, A->ncol, one, A, - x, ldx, zero, rhs, ldc); - -} - -/*! \brief Fills a double precision array with a given value. - */ -void -dfill(double *a, int alen, double dval) -{ - register int i; - for (i = 0; i < alen; i++) a[i] = dval; -} - - - -/*! \brief Check the inf-norm of the error vector - */ -void dinf_norm_error(int nrhs, SuperMatrix *X, double *xtrue) -{ - DNformat *Xstore; - double err, xnorm; - double *Xmat, *soln_work; - int i, j; - - Xstore = X->Store; - Xmat = Xstore->nzval; - - for (j = 0; j < nrhs; j++) { - soln_work = &Xmat[j*Xstore->lda]; - err = xnorm = 0.0; - for (i = 0; i < X->nrow; i++) { - err = SUPERLU_MAX(err, fabs(soln_work[i] - xtrue[i])); - xnorm = SUPERLU_MAX(xnorm, fabs(soln_work[i])); - } - err = err / xnorm; - printf("||X - Xtrue||/||X|| = %e\n", err); - } -} - - - -/*! \brief Print performance of the code. */ -void -dPrintPerf(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage, - double rpg, double rcond, double *ferr, - double *berr, char *equed, SuperLUStat_t *stat) -{ - SCformat *Lstore; - NCformat *Ustore; - double *utime; - flops_t *ops; - - utime = stat->utime; - ops = stat->ops; - - if ( utime[FACT] != 0. ) - printf("Factor flops = %e\tMflops = %8.2f\n", ops[FACT], - ops[FACT]*1e-6/utime[FACT]); - printf("Identify relaxed snodes = %8.2f\n", utime[RELAX]); - if ( utime[SOLVE] != 0. ) - printf("Solve flops = %.0f, Mflops = %8.2f\n", ops[SOLVE], - ops[SOLVE]*1e-6/utime[SOLVE]); - - Lstore = (SCformat *) L->Store; - Ustore = (NCformat *) U->Store; - printf("\tNo of nonzeros in factor L = %d\n", Lstore->nnz); - printf("\tNo of nonzeros in factor U = %d\n", Ustore->nnz); - printf("\tNo of nonzeros in L+U = %d\n", Lstore->nnz + Ustore->nnz); - - printf("L\\U MB %.3f\ttotal MB needed %.3f\n", - mem_usage->for_lu/1e6, mem_usage->total_needed/1e6); - printf("Number of memory expansions: %d\n", stat->expansions); - - printf("\tFactor\tMflops\tSolve\tMflops\tEtree\tEquil\tRcond\tRefine\n"); - printf("PERF:%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f\n", - utime[FACT], ops[FACT]*1e-6/utime[FACT], - utime[SOLVE], ops[SOLVE]*1e-6/utime[SOLVE], - utime[ETREE], utime[EQUIL], utime[RCOND], utime[REFINE]); - - printf("\tRpg\t\tRcond\t\tFerr\t\tBerr\t\tEquil?\n"); - printf("NUM:\t%e\t%e\t%e\t%e\t%s\n", - rpg, rcond, ferr[0], berr[0], equed); - -} - - - - -print_double_vec(char *what, int n, double *vec) -{ - int i; - printf("%s: n %d\n", what, n); - for (i = 0; i < n; ++i) printf("%d\t%f\n", i, vec[i]); - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dzsum1.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dzsum1.c deleted file mode 100644 index ffaac7a692..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/dzsum1.c +++ /dev/null @@ -1,94 +0,0 @@ -/*! @file dzsum1.c - * \brief Takes sum of the absolute values of a complex vector and returns a double precision result - * - *
    - *     -- LAPACK auxiliary routine (version 2.0) --   
    - *     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
    - *     Courant Institute, Argonne National Lab, and Rice University   
    - *     October 31, 1992   
    - * 
    - */ - -#include "slu_dcomplex.h" -#include "slu_Cnames.h" - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    DZSUM1 takes the sum of the absolute values of a complex   
    -    vector and returns a double precision result.   
    -
    -    Based on DZASUM from the Level 1 BLAS.   
    -    The change is to use the 'genuine' absolute value.   
    -
    -    Contributed by Nick Higham for use with ZLACON.   
    -
    -    Arguments   
    -    =========   
    -
    -    N       (input) INT   
    -            The number of elements in the vector CX.   
    -
    -    CX      (input) COMPLEX*16 array, dimension (N)   
    -            The vector whose elements will be summed.   
    -
    -    INCX    (input) INT   
    -            The spacing between successive values of CX.  INCX > 0.   
    -
    -    ===================================================================== 
    -
    -*/ -double dzsum1_(int *n, doublecomplex *cx, int *incx) -{ - - /* Builtin functions */ - double z_abs(doublecomplex *); - - /* Local variables */ - int i, nincx; - double stemp; - - -#define CX(I) cx[(I)-1] - - stemp = 0.; - if (*n <= 0) { - return stemp; - } - if (*incx == 1) { - goto L20; - } - - /* CODE FOR INCREMENT NOT EQUAL TO 1 */ - - nincx = *n * *incx; - for (i = 1; *incx < 0 ? i >= nincx : i <= nincx; i += *incx) { - - /* NEXT LINE MODIFIED. */ - - stemp += z_abs(&CX(i)); -/* L10: */ - } - - return stemp; - - /* CODE FOR INCREMENT EQUAL TO 1 */ - -L20: - for (i = 1; i <= *n; ++i) { - - /* NEXT LINE MODIFIED. */ - - stemp += z_abs(&CX(i)); -/* L30: */ - } - - return stemp; - - /* End of DZSUM1 */ - -} /* dzsum1_ */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/get_perm_c.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/get_perm_c.c deleted file mode 100644 index 7688773443..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/get_perm_c.c +++ /dev/null @@ -1,458 +0,0 @@ -/*! @file get_perm_c.c - * \brief Matrix permutation operations - * - *
    - * -- SuperLU routine (version 3.1) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * August 1, 2008
    - * 
    - */ -#include "slu_ddefs.h" -#include "colamd.h" - -extern int genmmd_(int *, int *, int *, int *, int *, int *, int *, - int *, int *, int *, int *, int *); - -void -get_colamd( - const int m, /* number of rows in matrix A. */ - const int n, /* number of columns in matrix A. */ - const int nnz,/* number of nonzeros in matrix A. */ - int *colptr, /* column pointer of size n+1 for matrix A. */ - int *rowind, /* row indices of size nz for matrix A. */ - int *perm_c /* out - the column permutation vector. */ - ) -{ - int Alen, *A, i, info, *p; - double knobs[COLAMD_KNOBS]; - int stats[COLAMD_STATS]; - - Alen = colamd_recommended(nnz, m, n); - - colamd_set_defaults(knobs); - - if (!(A = (int *) SUPERLU_MALLOC(Alen * sizeof(int))) ) - ABORT("Malloc fails for A[]"); - if (!(p = (int *) SUPERLU_MALLOC((n+1) * sizeof(int))) ) - ABORT("Malloc fails for p[]"); - for (i = 0; i <= n; ++i) p[i] = colptr[i]; - for (i = 0; i < nnz; ++i) A[i] = rowind[i]; - info = colamd(m, n, Alen, A, p, knobs, stats); - if ( info == FALSE ) ABORT("COLAMD failed"); - - for (i = 0; i < n; ++i) perm_c[p[i]] = i; - - SUPERLU_FREE(A); - SUPERLU_FREE(p); -} -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * Form the structure of A'*A. A is an m-by-n matrix in column oriented
    - * format represented by (colptr, rowind). The output A'*A is in column
    - * oriented format (symmetrically, also row oriented), represented by
    - * (ata_colptr, ata_rowind).
    - *
    - * This routine is modified from GETATA routine by Tim Davis.
    - * The complexity of this algorithm is: SUM_{i=1,m} r(i)^2,
    - * i.e., the sum of the square of the row counts.
    - *
    - * Questions
    - * =========
    - *     o  Do I need to withhold the *dense* rows?
    - *     o  How do I know the number of nonzeros in A'*A?
    - * 
    - */ -void -getata( - const int m, /* number of rows in matrix A. */ - const int n, /* number of columns in matrix A. */ - const int nz, /* number of nonzeros in matrix A */ - int *colptr, /* column pointer of size n+1 for matrix A. */ - int *rowind, /* row indices of size nz for matrix A. */ - int *atanz, /* out - on exit, returns the actual number of - nonzeros in matrix A'*A. */ - int **ata_colptr, /* out - size n+1 */ - int **ata_rowind /* out - size *atanz */ - ) -{ - register int i, j, k, col, num_nz, ti, trow; - int *marker, *b_colptr, *b_rowind; - int *t_colptr, *t_rowind; /* a column oriented form of T = A' */ - - if ( !(marker = (int*) SUPERLU_MALLOC((SUPERLU_MAX(m,n)+1)*sizeof(int))) ) - ABORT("SUPERLU_MALLOC fails for marker[]"); - if ( !(t_colptr = (int*) SUPERLU_MALLOC((m+1) * sizeof(int))) ) - ABORT("SUPERLU_MALLOC t_colptr[]"); - if ( !(t_rowind = (int*) SUPERLU_MALLOC(nz * sizeof(int))) ) - ABORT("SUPERLU_MALLOC fails for t_rowind[]"); - - - /* Get counts of each column of T, and set up column pointers */ - for (i = 0; i < m; ++i) marker[i] = 0; - for (j = 0; j < n; ++j) { - for (i = colptr[j]; i < colptr[j+1]; ++i) - ++marker[rowind[i]]; - } - t_colptr[0] = 0; - for (i = 0; i < m; ++i) { - t_colptr[i+1] = t_colptr[i] + marker[i]; - marker[i] = t_colptr[i]; - } - - /* Transpose the matrix from A to T */ - for (j = 0; j < n; ++j) - for (i = colptr[j]; i < colptr[j+1]; ++i) { - col = rowind[i]; - t_rowind[marker[col]] = j; - ++marker[col]; - } - - - /* ---------------------------------------------------------------- - compute B = T * A, where column j of B is: - - Struct (B_*j) = UNION ( Struct (T_*k) ) - A_kj != 0 - - do not include the diagonal entry - - ( Partition A as: A = (A_*1, ..., A_*n) - Then B = T * A = (T * A_*1, ..., T * A_*n), where - T * A_*j = (T_*1, ..., T_*m) * A_*j. ) - ---------------------------------------------------------------- */ - - /* Zero the diagonal flag */ - for (i = 0; i < n; ++i) marker[i] = -1; - - /* First pass determines number of nonzeros in B */ - num_nz = 0; - for (j = 0; j < n; ++j) { - /* Flag the diagonal so it's not included in the B matrix */ - marker[j] = j; - - for (i = colptr[j]; i < colptr[j+1]; ++i) { - /* A_kj is nonzero, add pattern of column T_*k to B_*j */ - k = rowind[i]; - for (ti = t_colptr[k]; ti < t_colptr[k+1]; ++ti) { - trow = t_rowind[ti]; - if ( marker[trow] != j ) { - marker[trow] = j; - num_nz++; - } - } - } - } - *atanz = num_nz; - - /* Allocate storage for A'*A */ - if ( !(*ata_colptr = (int*) SUPERLU_MALLOC( (n+1) * sizeof(int)) ) ) - ABORT("SUPERLU_MALLOC fails for ata_colptr[]"); - if ( *atanz ) { - if ( !(*ata_rowind = (int*) SUPERLU_MALLOC( *atanz * sizeof(int)) ) ) - ABORT("SUPERLU_MALLOC fails for ata_rowind[]"); - } - b_colptr = *ata_colptr; /* aliasing */ - b_rowind = *ata_rowind; - - /* Zero the diagonal flag */ - for (i = 0; i < n; ++i) marker[i] = -1; - - /* Compute each column of B, one at a time */ - num_nz = 0; - for (j = 0; j < n; ++j) { - b_colptr[j] = num_nz; - - /* Flag the diagonal so it's not included in the B matrix */ - marker[j] = j; - - for (i = colptr[j]; i < colptr[j+1]; ++i) { - /* A_kj is nonzero, add pattern of column T_*k to B_*j */ - k = rowind[i]; - for (ti = t_colptr[k]; ti < t_colptr[k+1]; ++ti) { - trow = t_rowind[ti]; - if ( marker[trow] != j ) { - marker[trow] = j; - b_rowind[num_nz++] = trow; - } - } - } - } - b_colptr[n] = num_nz; - - SUPERLU_FREE(marker); - SUPERLU_FREE(t_colptr); - SUPERLU_FREE(t_rowind); -} - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * Form the structure of A'+A. A is an n-by-n matrix in column oriented
    - * format represented by (colptr, rowind). The output A'+A is in column
    - * oriented format (symmetrically, also row oriented), represented by
    - * (b_colptr, b_rowind).
    - * 
    - */ -void -at_plus_a( - const int n, /* number of columns in matrix A. */ - const int nz, /* number of nonzeros in matrix A */ - int *colptr, /* column pointer of size n+1 for matrix A. */ - int *rowind, /* row indices of size nz for matrix A. */ - int *bnz, /* out - on exit, returns the actual number of - nonzeros in matrix A'*A. */ - int **b_colptr, /* out - size n+1 */ - int **b_rowind /* out - size *bnz */ - ) -{ - register int i, j, k, col, num_nz; - int *t_colptr, *t_rowind; /* a column oriented form of T = A' */ - int *marker; - - if ( !(marker = (int*) SUPERLU_MALLOC( n * sizeof(int)) ) ) - ABORT("SUPERLU_MALLOC fails for marker[]"); - if ( !(t_colptr = (int*) SUPERLU_MALLOC( (n+1) * sizeof(int)) ) ) - ABORT("SUPERLU_MALLOC fails for t_colptr[]"); - if ( !(t_rowind = (int*) SUPERLU_MALLOC( nz * sizeof(int)) ) ) - ABORT("SUPERLU_MALLOC fails t_rowind[]"); - - - /* Get counts of each column of T, and set up column pointers */ - for (i = 0; i < n; ++i) marker[i] = 0; - for (j = 0; j < n; ++j) { - for (i = colptr[j]; i < colptr[j+1]; ++i) - ++marker[rowind[i]]; - } - t_colptr[0] = 0; - for (i = 0; i < n; ++i) { - t_colptr[i+1] = t_colptr[i] + marker[i]; - marker[i] = t_colptr[i]; - } - - /* Transpose the matrix from A to T */ - for (j = 0; j < n; ++j) - for (i = colptr[j]; i < colptr[j+1]; ++i) { - col = rowind[i]; - t_rowind[marker[col]] = j; - ++marker[col]; - } - - - /* ---------------------------------------------------------------- - compute B = A + T, where column j of B is: - - Struct (B_*j) = Struct (A_*k) UNION Struct (T_*k) - - do not include the diagonal entry - ---------------------------------------------------------------- */ - - /* Zero the diagonal flag */ - for (i = 0; i < n; ++i) marker[i] = -1; - - /* First pass determines number of nonzeros in B */ - num_nz = 0; - for (j = 0; j < n; ++j) { - /* Flag the diagonal so it's not included in the B matrix */ - marker[j] = j; - - /* Add pattern of column A_*k to B_*j */ - for (i = colptr[j]; i < colptr[j+1]; ++i) { - k = rowind[i]; - if ( marker[k] != j ) { - marker[k] = j; - ++num_nz; - } - } - - /* Add pattern of column T_*k to B_*j */ - for (i = t_colptr[j]; i < t_colptr[j+1]; ++i) { - k = t_rowind[i]; - if ( marker[k] != j ) { - marker[k] = j; - ++num_nz; - } - } - } - *bnz = num_nz; - - /* Allocate storage for A+A' */ - if ( !(*b_colptr = (int*) SUPERLU_MALLOC( (n+1) * sizeof(int)) ) ) - ABORT("SUPERLU_MALLOC fails for b_colptr[]"); - if ( *bnz) { - if ( !(*b_rowind = (int*) SUPERLU_MALLOC( *bnz * sizeof(int)) ) ) - ABORT("SUPERLU_MALLOC fails for b_rowind[]"); - } - - /* Zero the diagonal flag */ - for (i = 0; i < n; ++i) marker[i] = -1; - - /* Compute each column of B, one at a time */ - num_nz = 0; - for (j = 0; j < n; ++j) { - (*b_colptr)[j] = num_nz; - - /* Flag the diagonal so it's not included in the B matrix */ - marker[j] = j; - - /* Add pattern of column A_*k to B_*j */ - for (i = colptr[j]; i < colptr[j+1]; ++i) { - k = rowind[i]; - if ( marker[k] != j ) { - marker[k] = j; - (*b_rowind)[num_nz++] = k; - } - } - - /* Add pattern of column T_*k to B_*j */ - for (i = t_colptr[j]; i < t_colptr[j+1]; ++i) { - k = t_rowind[i]; - if ( marker[k] != j ) { - marker[k] = j; - (*b_rowind)[num_nz++] = k; - } - } - } - (*b_colptr)[n] = num_nz; - - SUPERLU_FREE(marker); - SUPERLU_FREE(t_colptr); - SUPERLU_FREE(t_rowind); -} - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * GET_PERM_C obtains a permutation matrix Pc, by applying the multiple
    - * minimum degree ordering code by Joseph Liu to matrix A'*A or A+A'.
    - * or using approximate minimum degree column ordering by Davis et. al.
    - * The LU factorization of A*Pc tends to have less fill than the LU 
    - * factorization of A.
    - *
    - * Arguments
    - * =========
    - *
    - * ispec   (input) int
    - *         Specifies the type of column ordering to reduce fill:
    - *         = 1: minimum degree on the structure of A^T * A
    - *         = 2: minimum degree on the structure of A^T + A
    - *         = 3: approximate minimum degree for unsymmetric matrices
    - *         If ispec == 0, the natural ordering (i.e., Pc = I) is returned.
    - * 
    - * A       (input) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of the linear equations is A->nrow. Currently, the type of A 
    - *         can be: Stype = NC; Dtype = _D; Mtype = GE. In the future,
    - *         more general A can be handled.
    - *
    - * perm_c  (output) int*
    - *	   Column permutation vector of size A->ncol, which defines the 
    - *         permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *         in position j in A*Pc.
    - * 
    - */ -void -get_perm_c(int ispec, SuperMatrix *A, int *perm_c) -{ - NCformat *Astore = A->Store; - int m, n, bnz = 0, *b_colptr, i; - int delta, maxint, nofsub, *invp; - int *b_rowind, *dhead, *qsize, *llist, *marker; - double t, SuperLU_timer_(); - - m = A->nrow; - n = A->ncol; - - t = SuperLU_timer_(); - switch ( ispec ) { - case 0: /* Natural ordering */ - for (i = 0; i < n; ++i) perm_c[i] = i; -#if ( PRNTlevel>=1 ) - printf("Use natural column ordering.\n"); -#endif - return; - case 1: /* Minimum degree ordering on A'*A */ - getata(m, n, Astore->nnz, Astore->colptr, Astore->rowind, - &bnz, &b_colptr, &b_rowind); -#if ( PRNTlevel>=1 ) - printf("Use minimum degree ordering on A'*A.\n"); -#endif - t = SuperLU_timer_() - t; - /*printf("Form A'*A time = %8.3f\n", t);*/ - break; - case 2: /* Minimum degree ordering on A'+A */ - if ( m != n ) ABORT("Matrix is not square"); - at_plus_a(n, Astore->nnz, Astore->colptr, Astore->rowind, - &bnz, &b_colptr, &b_rowind); -#if ( PRNTlevel>=1 ) - printf("Use minimum degree ordering on A'+A.\n"); -#endif - t = SuperLU_timer_() - t; - /*printf("Form A'+A time = %8.3f\n", t);*/ - break; - case 3: /* Approximate minimum degree column ordering. */ - get_colamd(m, n, Astore->nnz, Astore->colptr, Astore->rowind, - perm_c); -#if ( PRNTlevel>=1 ) - printf(".. Use approximate minimum degree column ordering.\n"); -#endif - return; - default: - ABORT("Invalid ISPEC"); - } - - if ( bnz != 0 ) { - t = SuperLU_timer_(); - - /* Initialize and allocate storage for GENMMD. */ - delta = 0; /* DELTA is a parameter to allow the choice of nodes - whose degree <= min-degree + DELTA. */ - maxint = 2147483647; /* 2**31 - 1 */ - invp = (int *) SUPERLU_MALLOC((n+delta)*sizeof(int)); - if ( !invp ) ABORT("SUPERLU_MALLOC fails for invp."); - dhead = (int *) SUPERLU_MALLOC((n+delta)*sizeof(int)); - if ( !dhead ) ABORT("SUPERLU_MALLOC fails for dhead."); - qsize = (int *) SUPERLU_MALLOC((n+delta)*sizeof(int)); - if ( !qsize ) ABORT("SUPERLU_MALLOC fails for qsize."); - llist = (int *) SUPERLU_MALLOC(n*sizeof(int)); - if ( !llist ) ABORT("SUPERLU_MALLOC fails for llist."); - marker = (int *) SUPERLU_MALLOC(n*sizeof(int)); - if ( !marker ) ABORT("SUPERLU_MALLOC fails for marker."); - - /* Transform adjacency list into 1-based indexing required by GENMMD.*/ - for (i = 0; i <= n; ++i) ++b_colptr[i]; - for (i = 0; i < bnz; ++i) ++b_rowind[i]; - - genmmd_(&n, b_colptr, b_rowind, perm_c, invp, &delta, dhead, - qsize, llist, marker, &maxint, &nofsub); - - /* Transform perm_c into 0-based indexing. */ - for (i = 0; i < n; ++i) --perm_c[i]; - - SUPERLU_FREE(invp); - SUPERLU_FREE(dhead); - SUPERLU_FREE(qsize); - SUPERLU_FREE(llist); - SUPERLU_FREE(marker); - SUPERLU_FREE(b_rowind); - - t = SuperLU_timer_() - t; - /* printf("call GENMMD time = %8.3f\n", t);*/ - - } else { /* Empty adjacency structure */ - for (i = 0; i < n; ++i) perm_c[i] = i; - } - - SUPERLU_FREE(b_colptr); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/heap_relax_snode.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/heap_relax_snode.c deleted file mode 100644 index 1dafd82d89..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/heap_relax_snode.c +++ /dev/null @@ -1,124 +0,0 @@ -/*! @file heap_relax_snode.c - * \brief Identify the initial relaxed supernodes - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    relax_snode() - Identify the initial relaxed supernodes, assuming that 
    - *    the matrix has been reordered according to the postorder of the etree.
    - * 
    - */ - -void -heap_relax_snode ( - const int n, - int *et, /* column elimination tree */ - const int relax_columns, /* max no of columns allowed in a - relaxed snode */ - int *descendants, /* no of descendants of each node - in the etree */ - int *relax_end /* last column in a supernode */ - ) -{ - register int i, j, k, l, parent; - register int snode_start; /* beginning of a snode */ - int *et_save, *post, *inv_post, *iwork; - int nsuper_et = 0, nsuper_et_post = 0; - - /* The etree may not be postordered, but is heap ordered. */ - - iwork = (int*) intMalloc(3*n+2); - if ( !iwork ) ABORT("SUPERLU_MALLOC fails for iwork[]"); - inv_post = iwork + n+1; - et_save = inv_post + n+1; - - /* Post order etree */ - post = (int *) TreePostorder(n, et); - for (i = 0; i < n+1; ++i) inv_post[post[i]] = i; - - /* Renumber etree in postorder */ - for (i = 0; i < n; ++i) { - iwork[post[i]] = post[et[i]]; - et_save[i] = et[i]; /* Save the original etree */ - } - for (i = 0; i < n; ++i) et[i] = iwork[i]; - - /* Compute the number of descendants of each node in the etree */ - ifill (relax_end, n, EMPTY); - for (j = 0; j < n; j++) descendants[j] = 0; - for (j = 0; j < n; j++) { - parent = et[j]; - if ( parent != n ) /* not the dummy root */ - descendants[parent] += descendants[j] + 1; - } - - /* Identify the relaxed supernodes by postorder traversal of the etree. */ - for (j = 0; j < n; ) { - parent = et[j]; - snode_start = j; - while ( parent != n && descendants[parent] < relax_columns ) { - j = parent; - parent = et[j]; - } - /* Found a supernode in postordered etree; j is the last column. */ - ++nsuper_et_post; - k = n; - for (i = snode_start; i <= j; ++i) - k = SUPERLU_MIN(k, inv_post[i]); - l = inv_post[j]; - if ( (l - k) == (j - snode_start) ) { - /* It's also a supernode in the original etree */ - relax_end[k] = l; /* Last column is recorded */ - ++nsuper_et; - } else { - for (i = snode_start; i <= j; ++i) { - l = inv_post[i]; - if ( descendants[i] == 0 ) { - relax_end[l] = l; - ++nsuper_et; - } - } - } - j++; - /* Search for a new leaf */ - while ( descendants[j] != 0 && j < n ) j++; - } - -#if ( PRNTlevel>=1 ) - printf(".. heap_snode_relax:\n" - "\tNo of relaxed snodes in postordered etree:\t%d\n" - "\tNo of relaxed snodes in original etree:\t%d\n", - nsuper_et_post, nsuper_et); -#endif - - /* Recover the original etree */ - for (i = 0; i < n; ++i) et[i] = et_save[i]; - - SUPERLU_FREE(post); - SUPERLU_FREE(iwork); -} - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/html_mainpage.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/html_mainpage.h deleted file mode 100644 index 5e25e642e0..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/html_mainpage.h +++ /dev/null @@ -1,9 +0,0 @@ -/*! \mainpage SuperLU Documentation - - SuperLU is a sequential library for the direct solution of large, - sparse, nonsymmetric systems of linear equations on high performance - machines. It also provides threshold-based ILU factorization - preconditioner. The library is written in C and is callable from either - C or Fortran. - - */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/icmax1.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/icmax1.c deleted file mode 100644 index 419c728fb4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/icmax1.c +++ /dev/null @@ -1,116 +0,0 @@ -/*! @file icmax1.c - * \brief Finds the index of the element whose real part has maximum absolute value - * - *
    - *     -- LAPACK auxiliary routine (version 2.0) --   
    - *     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
    - *     Courant Institute, Argonne National Lab, and Rice University   
    - *     October 31, 1992   
    - * 
    - */ -#include -#include "slu_scomplex.h" -#include "slu_Cnames.h" - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    ICMAX1 finds the index of the element whose real part has maximum   
    -    absolute value.   
    -
    -    Based on ICAMAX from Level 1 BLAS.   
    -    The change is to use the 'genuine' absolute value.   
    -
    -    Contributed by Nick Higham for use with CLACON.   
    -
    -    Arguments   
    -    =========   
    -
    -    N       (input) INT   
    -            The number of elements in the vector CX.   
    -
    -    CX      (input) COMPLEX array, dimension (N)   
    -            The vector whose elements will be summed.   
    -
    -    INCX    (input) INT   
    -            The spacing between successive values of CX.  INCX >= 1.   
    -
    -   ===================================================================== 
    -  
    -*/ -int icmax1_(int *n, complex *cx, int *incx) -{ -/* - NEXT LINE IS THE ONLY MODIFICATION. - - - Parameter adjustments - Function Body */ - /* System generated locals */ - int ret_val, i__1, i__2; - float r__1; - /* Local variables */ - static float smax; - static int i, ix; - - -#define CX(I) cx[(I)-1] - - - ret_val = 0; - if (*n < 1) { - return ret_val; - } - ret_val = 1; - if (*n == 1) { - return ret_val; - } - if (*incx == 1) { - goto L30; - } - -/* CODE FOR INCREMENT NOT EQUAL TO 1 */ - - ix = 1; - smax = (r__1 = CX(1).r, fabs(r__1)); - ix += *incx; - i__1 = *n; - for (i = 2; i <= *n; ++i) { - i__2 = ix; - if ((r__1 = CX(ix).r, fabs(r__1)) <= smax) { - goto L10; - } - ret_val = i; - i__2 = ix; - smax = (r__1 = CX(ix).r, fabs(r__1)); -L10: - ix += *incx; -/* L20: */ - } - return ret_val; - -/* CODE FOR INCREMENT EQUAL TO 1 */ - -L30: - smax = (r__1 = CX(1).r, fabs(r__1)); - i__1 = *n; - for (i = 2; i <= *n; ++i) { - i__2 = i; - if ((r__1 = CX(i).r, fabs(r__1)) <= smax) { - goto L40; - } - ret_val = i; - i__2 = i; - smax = (r__1 = CX(i).r, fabs(r__1)); -L40: - ; - } - return ret_val; - -/* End of ICMAX1 */ - -} /* icmax1_ */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ccolumn_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ccolumn_dfs.c deleted file mode 100644 index 8dd2289932..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ccolumn_dfs.c +++ /dev/null @@ -1,258 +0,0 @@ - -/*! @file ilu_ccolumn_dfs.c - * \brief Performs a symbolic factorization - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    -*/ - -#include "slu_cdefs.h" - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   ILU_CCOLUMN_DFS performs a symbolic factorization on column jcol, and
    - *   decide the supernode boundary.
    - *
    - *   This routine does not use numeric values, but only use the RHS
    - *   row indices to start the dfs.
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives. The routine returns a list of such supernodal
    - *   representatives in topological order of the dfs that generates them.
    - *   The location of the first nonzero in each such supernodal segment
    - *   (supernodal entry location) is also returned.
    - *
    - * Local parameters
    - * ================
    - *   nseg: no of segments in current U[*,j]
    - *   jsuper: jsuper=EMPTY if column j does not belong to the same
    - *	supernode as j-1. Otherwise, jsuper=nsuper.
    - *
    - *   marker2: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - *
    - * Return value
    - * ============
    - *     0  success;
    - *   > 0  number of bytes allocated when run out of space.
    - * 
    - */ -int -ilu_ccolumn_dfs( - const int m, /* in - number of rows in the matrix */ - const int jcol, /* in */ - int *perm_r, /* in */ - int *nseg, /* modified - with new segments appended */ - int *lsub_col, /* in - defines the RHS vector to start the - dfs */ - int *segrep, /* modified - with new segments appended */ - int *repfnz, /* modified */ - int *marker, /* modified */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - int jcolp1, jcolm1, jsuper, nsuper, nextl; - int k, krep, krow, kmark, kperm; - int *marker2; /* Used for small panel LU */ - int fsupc; /* First column of a snode */ - int myfnz; /* First nonz column of a U-segment */ - int chperm, chmark, chrep, kchild; - int xdfs, maxdfs, kpar, oldrep; - int jptr, jm1ptr; - int ito, ifrom; /* Used to compress row subscripts */ - int mem_error; - int *xsup, *supno, *lsub, *xlsub; - int nzlmax; - static int first = 1, maxsuper; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - if ( first ) { - maxsuper = sp_ienv(3); - first = 0; - } - jcolp1 = jcol + 1; - jcolm1 = jcol - 1; - nsuper = supno[jcol]; - jsuper = nsuper; - nextl = xlsub[jcol]; - marker2 = &marker[2*m]; - - - /* For each nonzero in A[*,jcol] do dfs */ - for (k = 0; lsub_col[k] != EMPTY; k++) { - - krow = lsub_col[k]; - lsub_col[k] = EMPTY; - kmark = marker2[krow]; - - /* krow was visited before, go to the next nonzero */ - if ( kmark == jcol ) continue; - - /* For each unmarked nbr krow of jcol - * krow is in L: place it in structure of L[*,jcol] - */ - marker2[krow] = jcol; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - lsub[nextl++] = krow; /* krow is indexed into A */ - if ( nextl >= nzlmax ) { - if ((mem_error = cLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu))) - return (mem_error); - lsub = Glu->lsub; - } - if ( kmark != jcolm1 ) jsuper = EMPTY;/* Row index subset testing */ - } else { - /* krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz[krep]; - - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > kperm ) repfnz[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz[krep] = kperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker2[kchild]; - - if ( chmark != jcol ) { /* Not reached yet */ - marker2[kchild] = jcol; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,k] */ - if ( chperm == EMPTY ) { - lsub[nextl++] = kchild; - if ( nextl >= nzlmax ) { - if ( (mem_error = cLUMemXpand(jcol,nextl, - LSUB,&nzlmax,Glu)) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( chmark != jcolm1 ) jsuper = EMPTY; - } else { - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz[chrep]; - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz[chrep] = chperm; - } else { - /* Continue dfs at super-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L^t) */ - parent[krep] = oldrep; - repfnz[krep] = chperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - } /* else */ - - } /* else */ - - } /* if */ - - } /* while */ - - /* krow has no more unexplored nbrs; - * place supernode-rep krep in postorder DFS. - * backtrack dfs to its parent - */ - segrep[*nseg] = krep; - ++(*nseg); - kpar = parent[krep]; /* Pop from stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xlsub[krep + 1]; - - } while ( kpar != EMPTY ); /* Until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonzero ... */ - - /* Check to see if j belongs in the same supernode as j-1 */ - if ( jcol == 0 ) { /* Do nothing for column 0 */ - nsuper = supno[0] = 0; - } else { - fsupc = xsup[nsuper]; - jptr = xlsub[jcol]; /* Not compressed yet */ - jm1ptr = xlsub[jcolm1]; - - if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = EMPTY; - - /* Always start a new supernode for a singular column */ - if ( nextl == jptr ) jsuper = EMPTY; - - /* Make sure the number of columns in a supernode doesn't - exceed threshold. */ - if ( jcol - fsupc >= maxsuper ) jsuper = EMPTY; - - /* If jcol starts a new supernode, reclaim storage space in - * lsub from the previous supernode. Note we only store - * the subscript set of the first columns of the supernode. - */ - if ( jsuper == EMPTY ) { /* starts a new supernode */ - if ( (fsupc < jcolm1) ) { /* >= 2 columns in nsuper */ -#ifdef CHK_COMPRESS - printf(" Compress lsub[] at super %d-%d\n", fsupc, jcolm1); -#endif - ito = xlsub[fsupc+1]; - xlsub[jcolm1] = ito; - xlsub[jcol] = ito; - for (ifrom = jptr; ifrom < nextl; ++ifrom, ++ito) - lsub[ito] = lsub[ifrom]; - nextl = ito; - } - nsuper++; - supno[jcol] = nsuper; - } /* if a new supernode */ - - } /* else: jcol > 0 */ - - /* Tidy up the pointers before exit */ - xsup[nsuper+1] = jcolp1; - supno[jcolp1] = nsuper; - xlsub[jcolp1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ccopy_to_ucol.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ccopy_to_ucol.c deleted file mode 100644 index 7593fe11cc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ccopy_to_ucol.c +++ /dev/null @@ -1,202 +0,0 @@ - -/*! @file ilu_ccopy_to_ucol.c - * \brief Copy a computed column of U to the compressed data structure - * and drop some small entries - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_cdefs.h" - -#ifdef DEBUG -int num_drop_U; -#endif - -static complex *A; /* used in _compare_ only */ -static int _compare_(const void *a, const void *b) -{ - register int *x = (int *)a, *y = (int *)b; - register float xx = slu_c_abs1(&A[*x]), yy = slu_c_abs1(&A[*y]); - if (xx > yy) return -1; - else if (xx < yy) return 1; - else return 0; -} - - -int -ilu_ccopy_to_ucol( - int jcol, /* in */ - int nseg, /* in */ - int *segrep, /* in */ - int *repfnz, /* in */ - int *perm_r, /* in */ - complex *dense, /* modified - reset to zero on return */ - int drop_rule,/* in */ - milu_t milu, /* in */ - double drop_tol, /* in */ - int quota, /* maximum nonzero entries allowed */ - complex *sum, /* out - the sum of dropped entries */ - int *nnzUj, /* in - out */ - GlobalLU_t *Glu, /* modified */ - int *work /* working space with minimum size n, - * used by the second dropping rule */ - ) -{ -/* - * Gather from SPA dense[*] to global ucol[*]. - */ - int ksub, krep, ksupno; - int i, k, kfnz, segsze; - int fsupc, isub, irow; - int jsupno, nextu; - int new_next, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - complex *ucol; - int *usub, *xusub; - int nzumax; - int m; /* number of entries in the nonzero U-segments */ - register float d_max = 0.0, d_min = 1.0 / dlamch_("Safe minimum"); - register double tmp; - complex zero = {0.0, 0.0}; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - nzumax = Glu->nzumax; - - *sum = zero; - if (drop_rule == NODROP) { - drop_tol = -1.0, quota = Glu->n; - } - - jsupno = supno[jcol]; - nextu = xusub[jcol]; - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - krep = segrep[k--]; - ksupno = supno[krep]; - - if ( ksupno != jsupno ) { /* Should go into ucol[] */ - kfnz = repfnz[krep]; - if ( kfnz != EMPTY ) { /* Nonzero U-segment */ - - fsupc = xsup[ksupno]; - isub = xlsub[fsupc] + kfnz - fsupc; - segsze = krep - kfnz + 1; - - new_next = nextu + segsze; - while ( new_next > nzumax ) { - if ((mem_error = cLUMemXpand(jcol, nextu, UCOL, &nzumax, - Glu)) != 0) - return (mem_error); - ucol = Glu->ucol; - if ((mem_error = cLUMemXpand(jcol, nextu, USUB, &nzumax, - Glu)) != 0) - return (mem_error); - usub = Glu->usub; - lsub = Glu->lsub; - } - - for (i = 0; i < segsze; i++) { - irow = lsub[isub++]; - tmp = slu_c_abs1(&dense[irow]); - - /* first dropping rule */ - if (quota > 0 && tmp >= drop_tol) { - if (tmp > d_max) d_max = tmp; - if (tmp < d_min) d_min = tmp; - usub[nextu] = perm_r[irow]; - ucol[nextu] = dense[irow]; - nextu++; - } else { - switch (milu) { - case SMILU_1: - case SMILU_2: - c_add(sum, sum, &dense[irow]); - break; - case SMILU_3: - /* *sum += fabs(dense[irow]);*/ - sum->r += tmp; - break; - case SILU: - default: - break; - } -#ifdef DEBUG - num_drop_U++; -#endif - } - dense[irow] = zero; - } - - } - - } - - } /* for each segment... */ - - xusub[jcol + 1] = nextu; /* Close U[*,jcol] */ - m = xusub[jcol + 1] - xusub[jcol]; - - /* second dropping rule */ - if (drop_rule & DROP_SECONDARY && m > quota) { - register double tol = d_max; - register int m0 = xusub[jcol] + m - 1; - - if (quota > 0) { - if (drop_rule & DROP_INTERP) { - d_max = 1.0 / d_max; d_min = 1.0 / d_min; - tol = 1.0 / (d_max + (d_min - d_max) * quota / m); - } else { - A = &ucol[xusub[jcol]]; - for (i = 0; i < m; i++) work[i] = i; - qsort(work, m, sizeof(int), _compare_); - tol = fabs(usub[xusub[jcol] + work[quota]]); - } - } - for (i = xusub[jcol]; i <= m0; ) { - if (slu_c_abs1(&ucol[i]) <= tol) { - switch (milu) { - case SMILU_1: - case SMILU_2: - c_add(sum, sum, &ucol[i]); - break; - case SMILU_3: - sum->r += tmp; - break; - case SILU: - default: - break; - } - ucol[i] = ucol[m0]; - usub[i] = usub[m0]; - m0--; - m--; -#ifdef DEBUG - num_drop_U++; -#endif - xusub[jcol + 1]--; - continue; - } - i++; - } - } - - if (milu == SMILU_2) { - sum->r = slu_c_abs1(sum); sum->i = 0.0; - } - if (milu == SMILU_3) sum->i = 0.0; - - *nnzUj += m; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cdrop_row.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cdrop_row.c deleted file mode 100644 index 50428629cc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cdrop_row.c +++ /dev/null @@ -1,321 +0,0 @@ - -/*! @file ilu_cdrop_row.c - * \brief Drop small rows from L - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * <\pre>
    - */
    -
    -#include 
    -#include 
    -#include "slu_cdefs.h"
    -
    -extern void cswap_(int *, complex [], int *, complex [], int *);
    -extern void caxpy_(int *, complex *, complex [], int *, complex [], int *);
    -
    -static float *A;  /* used in _compare_ only */
    -static int _compare_(const void *a, const void *b)
    -{
    -    register int *x = (int *)a, *y = (int *)b;
    -    if (A[*x] - A[*y] > 0.0) return -1;
    -    else if (A[*x] - A[*y] < 0.0) return 1;
    -    else return 0;
    -}
    -
    -/*! \brief
    - * 
    - * Purpose
    - * =======
    - *    ilu_cdrop_row() - Drop some small rows from the previous 
    - *    supernode (L-part only).
    - * 
    - */ -int ilu_cdrop_row( - superlu_options_t *options, /* options */ - int first, /* index of the first column in the supernode */ - int last, /* index of the last column in the supernode */ - double drop_tol, /* dropping parameter */ - int quota, /* maximum nonzero entries allowed */ - int *nnzLj, /* in/out number of nonzeros in L(:, 1:last) */ - double *fill_tol, /* in/out - on exit, fill_tol=-num_zero_pivots, - * does not change if options->ILU_MILU != SMILU1 */ - GlobalLU_t *Glu, /* modified */ - float swork[], /* working space with minimum size last-first+1 */ - int iwork[], /* working space with minimum size m - n, - * used by the second dropping rule */ - int lastc /* if lastc == 0, there is nothing after the - * working supernode [first:last]; - * if lastc == 1, there is one more column after - * the working supernode. */ ) -{ - register int i, j, k, m1; - register int nzlc; /* number of nonzeros in column last+1 */ - register int xlusup_first, xlsub_first; - int m, n; /* m x n is the size of the supernode */ - int r = 0; /* number of dropped rows */ - register float *temp; - register complex *lusup = Glu->lusup; - register int *lsub = Glu->lsub; - register int *xlsub = Glu->xlsub; - register int *xlusup = Glu->xlusup; - register float d_max = 0.0, d_min = 1.0; - int drop_rule = options->ILU_DropRule; - milu_t milu = options->ILU_MILU; - norm_t nrm = options->ILU_Norm; - complex zero = {0.0, 0.0}; - complex one = {1.0, 0.0}; - complex none = {-1.0, 0.0}; - int inc_diag; /* inc_diag = m + 1 */ - int nzp = 0; /* number of zero pivots */ - - xlusup_first = xlusup[first]; - xlsub_first = xlsub[first]; - m = xlusup[first + 1] - xlusup_first; - n = last - first + 1; - m1 = m - 1; - inc_diag = m + 1; - nzlc = lastc ? (xlusup[last + 2] - xlusup[last + 1]) : 0; - temp = swork - n; - - /* Quick return if nothing to do. */ - if (m == 0 || m == n || drop_rule == NODROP) - { - *nnzLj += m * n; - return 0; - } - - /* basic dropping: ILU(tau) */ - for (i = n; i <= m1; ) - { - /* the average abs value of ith row */ - switch (nrm) - { - case ONE_NORM: - temp[i] = scasum_(&n, &lusup[xlusup_first + i], &m) / (double)n; - break; - case TWO_NORM: - temp[i] = scnrm2_(&n, &lusup[xlusup_first + i], &m) - / sqrt((double)n); - break; - case INF_NORM: - default: - k = icamax_(&n, &lusup[xlusup_first + i], &m) - 1; - temp[i] = slu_c_abs1(&lusup[xlusup_first + i + m * k]); - break; - } - - /* drop small entries due to drop_tol */ - if (drop_rule & DROP_BASIC && temp[i] < drop_tol) - { - r++; - /* drop the current row and move the last undropped row here */ - if (r > 1) /* add to last row */ - { - /* accumulate the sum (for MILU) */ - switch (milu) - { - case SMILU_1: - case SMILU_2: - caxpy_(&n, &one, &lusup[xlusup_first + i], &m, - &lusup[xlusup_first + m - 1], &m); - break; - case SMILU_3: - for (j = 0; j < n; j++) - lusup[xlusup_first + (m - 1) + j * m].r += - slu_c_abs1(&lusup[xlusup_first + i + j * m]); - break; - case SILU: - default: - break; - } - ccopy_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - } /* if (r > 1) */ - else /* move to last row */ - { - cswap_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - if (milu == SMILU_3) - for (j = 0; j < n; j++) { - lusup[xlusup_first + m1 + j * m].r = - slu_c_abs1(&lusup[xlusup_first + m1 + j * m]); - lusup[xlusup_first + m1 + j * m].i = 0.0; - } - } - lsub[xlsub_first + i] = lsub[xlsub_first + m1]; - m1--; - continue; - } /* if dropping */ - else - { - if (temp[i] > d_max) d_max = temp[i]; - if (temp[i] < d_min) d_min = temp[i]; - } - i++; - } /* for */ - - /* Secondary dropping: drop more rows according to the quota. */ - quota = ceil((double)quota / (double)n); - if (drop_rule & DROP_SECONDARY && m - r > quota) - { - register double tol = d_max; - - /* Calculate the second dropping tolerance */ - if (quota > n) - { - if (drop_rule & DROP_INTERP) /* by interpolation */ - { - d_max = 1.0 / d_max; d_min = 1.0 / d_min; - tol = 1.0 / (d_max + (d_min - d_max) * quota / (m - n - r)); - } - else /* by quick sort */ - { - register int *itemp = iwork - n; - A = temp; - for (i = n; i <= m1; i++) itemp[i] = i; - qsort(iwork, m1 - n + 1, sizeof(int), _compare_); - tol = temp[iwork[quota]]; - } - } - - for (i = n; i <= m1; ) - { - if (temp[i] <= tol) - { - register int j; - r++; - /* drop the current row and move the last undropped row here */ - if (r > 1) /* add to last row */ - { - /* accumulate the sum (for MILU) */ - switch (milu) - { - case SMILU_1: - case SMILU_2: - caxpy_(&n, &one, &lusup[xlusup_first + i], &m, - &lusup[xlusup_first + m - 1], &m); - break; - case SMILU_3: - for (j = 0; j < n; j++) - lusup[xlusup_first + (m - 1) + j * m].r += - slu_c_abs1(&lusup[xlusup_first + i + j * m]); - break; - case SILU: - default: - break; - } - ccopy_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - } /* if (r > 1) */ - else /* move to last row */ - { - cswap_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - if (milu == SMILU_3) - for (j = 0; j < n; j++) { - lusup[xlusup_first + m1 + j * m].r = - slu_c_abs1(&lusup[xlusup_first + m1 + j * m]); - lusup[xlusup_first + m1 + j * m].i = 0.0; - } - } - lsub[xlsub_first + i] = lsub[xlsub_first + m1]; - m1--; - temp[i] = temp[m1]; - - continue; - } - i++; - - } /* for */ - - } /* if secondary dropping */ - - for (i = n; i < m; i++) temp[i] = 0.0; - - if (r == 0) - { - *nnzLj += m * n; - return 0; - } - - /* add dropped entries to the diagnal */ - if (milu != SILU) - { - register int j; - complex t; - for (j = 0; j < n; j++) - { - cs_mult(&t, &lusup[xlusup_first + (m - 1) + j * m], - MILU_ALPHA); - switch (milu) - { - case SMILU_1: - if ( !(c_eq(&t, &none)) ) { - c_add(&t, &t, &one); - cc_mult(&lusup[xlusup_first + j * inc_diag], - &lusup[xlusup_first + j * inc_diag], - &t); - } - else - { - cs_mult( - &lusup[xlusup_first + j * inc_diag], - &lusup[xlusup_first + j * inc_diag], - *fill_tol); -#ifdef DEBUG - printf("[1] ZERO PIVOT: FILL col %d.\n", first + j); - fflush(stdout); -#endif - nzp++; - } - break; - case SMILU_2: - cs_mult(&lusup[xlusup_first + j * inc_diag], - &lusup[xlusup_first + j * inc_diag], - 1.0 + slu_c_abs1(&t)); - break; - case SMILU_3: - c_add(&t, &t, &one); - cc_mult(&lusup[xlusup_first + j * inc_diag], - &lusup[xlusup_first + j * inc_diag], - &t); - break; - case SILU: - default: - break; - } - } - if (nzp > 0) *fill_tol = -nzp; - } - - /* Remove dropped entries from the memory and fix the pointers. */ - m1 = m - r; - for (j = 1; j < n; j++) - { - register int tmp1, tmp2; - tmp1 = xlusup_first + j * m1; - tmp2 = xlusup_first + j * m; - for (i = 0; i < m1; i++) - lusup[i + tmp1] = lusup[i + tmp2]; - } - for (i = 0; i < nzlc; i++) - lusup[xlusup_first + i + n * m1] = lusup[xlusup_first + i + n * m]; - for (i = 0; i < nzlc; i++) - lsub[xlsub[last + 1] - r + i] = lsub[xlsub[last + 1] + i]; - for (i = first + 1; i <= last + 1; i++) - { - xlusup[i] -= r * (i - first); - xlsub[i] -= r; - } - if (lastc) - { - xlusup[last + 2] -= r * n; - xlsub[last + 2] -= r; - } - - *nnzLj += (m - r) * n; - return r; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cpanel_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cpanel_dfs.c deleted file mode 100644 index 6b2ae3a5a0..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cpanel_dfs.c +++ /dev/null @@ -1,248 +0,0 @@ - -/*! @file ilu_cpanel_dfs.c - * \brief Peforms a symbolic factorization on a panel of symbols and - * record the entries with maximum absolute value in each column - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   Performs a symbolic factorization on a panel of columns [jcol, jcol+w).
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives.
    - *
    - *   The routine returns one list of the supernodal representatives
    - *   in topological order of the dfs that generates them. This list is
    - *   a superset of the topological order of each individual column within
    - *   the panel.
    - *   The location of the first nonzero in each supernodal segment
    - *   (supernodal entry location) is also returned. Each column has a
    - *   separate list for this purpose.
    - *
    - *   Two marker arrays are used for dfs:
    - *     marker[i] == jj, if i was visited during dfs of current column jj;
    - *     marker1[i] >= jcol, if i was visited by earlier columns in this panel;
    - *
    - *   marker: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - * 
    - */ -void -ilu_cpanel_dfs( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - SuperMatrix *A, /* in - original matrix */ - int *perm_r, /* in */ - int *nseg, /* out */ - complex *dense, /* out */ - float *amax, /* out - max. abs. value of each column in panel */ - int *panel_lsub, /* out */ - int *segrep, /* out */ - int *repfnz, /* out */ - int *marker, /* out */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ -) -{ - - NCPformat *Astore; - complex *a; - int *asub; - int *xa_begin, *xa_end; - int krep, chperm, chmark, chrep, oldrep, kchild, myfnz; - int k, krow, kmark, kperm; - int xdfs, maxdfs, kpar; - int jj; /* index through each column in the panel */ - int *marker1; /* marker1[jj] >= jcol if vertex jj was visited - by a previous column within this panel. */ - int *repfnz_col; /* start of each column in the panel */ - complex *dense_col; /* start of each column in the panel */ - int nextl_col; /* next available position in panel_lsub[*,jj] */ - int *xsup, *supno; - int *lsub, *xlsub; - float *amax_col; - register double tmp; - - /* Initialize pointers */ - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - marker1 = marker + m; - repfnz_col = repfnz; - dense_col = dense; - amax_col = amax; - *nseg = 0; - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - - /* For each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++) { - nextl_col = (jj - jcol) * m; - -#ifdef CHK_DFS - printf("\npanel col %d: ", jj); -#endif - - *amax_col = 0.0; - /* For each nonz in A[*,jj] do dfs */ - for (k = xa_begin[jj]; k < xa_end[jj]; k++) { - krow = asub[k]; - tmp = slu_c_abs1(&a[k]); - if (tmp > *amax_col) *amax_col = tmp; - dense_col[krow] = a[k]; - kmark = marker[krow]; - if ( kmark == jj ) - continue; /* krow visited before, go to the next nonzero */ - - /* For each unmarked nbr krow of jj - * krow is in L: place it in structure of L[*,jj] - */ - marker[krow] = jj; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - panel_lsub[nextl_col++] = krow; /* krow is indexed into A */ - } - /* - * krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - else { - - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz_col[krep]; - -#ifdef CHK_DFS - printf("krep %d, myfnz %d, perm_r[%d] %d\n", krep, myfnz, krow, kperm); -#endif - if ( myfnz != EMPTY ) { /* Representative visited before */ - if ( myfnz > kperm ) repfnz_col[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz_col[krep] = kperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker[kchild]; - - if ( chmark != jj ) { /* Not reached yet */ - marker[kchild] = jj; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,j] */ - if ( chperm == EMPTY ) { - panel_lsub[nextl_col++] = kchild; - } - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - else { - - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz_col[chrep]; -#ifdef CHK_DFS - printf("chrep %d,myfnz %d,perm_r[%d] %d\n",chrep,myfnz,kchild,chperm); -#endif - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz_col[chrep] = chperm; - } - else { - /* Cont. dfs at snode-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L) */ - parent[krep] = oldrep; - repfnz_col[krep] = chperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } /* else */ - - } /* else */ - - } /* if... */ - - } /* while xdfs < maxdfs */ - - /* krow has no more unexplored nbrs: - * Place snode-rep krep in postorder DFS, if this - * segment is seen for the first time. (Note that - * "repfnz[krep]" may change later.) - * Backtrack dfs to its parent. - */ - if ( marker1[krep] < jcol ) { - segrep[*nseg] = krep; - ++(*nseg); - marker1[krep] = jj; - } - - kpar = parent[krep]; /* Pop stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xlsub[krep + 1]; - -#ifdef CHK_DFS - printf(" pop stack: krep %d,xdfs %d,maxdfs %d: ", krep,xdfs,maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } while ( kpar != EMPTY ); /* do-while - until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonz in A[*,jj] */ - - repfnz_col += m; /* Move to next column */ - dense_col += m; - amax_col++; - - } /* for jj ... */ - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cpivotL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cpivotL.c deleted file mode 100644 index 4a9cc3db05..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_cpivotL.c +++ /dev/null @@ -1,282 +0,0 @@ - -/*! @file ilu_cpivotL.c - * \brief Performs numerical pivoting - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - - -#include -#include -#include "slu_cdefs.h" - -#ifndef SGN -#define SGN(x) ((x)>=0?1:-1) -#endif - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Performs the numerical pivoting on the current column of L,
    - *   and the CDIV operation.
    - *
    - *   Pivot policy:
    - *   (1) Compute thresh = u * max_(i>=j) abs(A_ij);
    - *   (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
    - *	     pivot row = k;
    - *	 ELSE IF abs(A_jj) >= thresh THEN
    - *	     pivot row = j;
    - *	 ELSE
    - *	     pivot row = m;
    - *
    - *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
    - *
    - *   Return value: 0	  success;
    - *		   i > 0  U(i,i) is exactly zero.
    - * 
    - */ - -int -ilu_cpivotL( - const int jcol, /* in */ - const double u, /* in - diagonal pivoting threshold */ - int *usepr, /* re-use the pivot sequence given by - * perm_r/iperm_r */ - int *perm_r, /* may be modified */ - int diagind, /* diagonal of Pc*A*Pc' */ - int *swap, /* in/out record the row permutation */ - int *iswap, /* in/out inverse of swap, it is the same as - perm_r after the factorization */ - int *marker, /* in */ - int *pivrow, /* in/out, as an input if *usepr!=0 */ - double fill_tol, /* in - fill tolerance of current column - * used for a singular column */ - milu_t milu, /* in */ - complex drop_sum, /* in - computed in ilu_ccopy_to_ucol() - (MILU only) */ - GlobalLU_t *Glu, /* modified - global LU data structures */ - SuperLUStat_t *stat /* output */ - ) -{ - - int n; /* number of columns */ - int fsupc; /* first column in the supernode */ - int nsupc; /* no of columns in the supernode */ - int nsupr; /* no of rows in the supernode */ - int lptr; /* points to the starting subscript of the supernode */ - register int pivptr; - int old_pivptr, diag, ptr0; - register float pivmax, rtemp; - float thresh; - complex temp; - complex *lu_sup_ptr; - complex *lu_col_ptr; - int *lsub_ptr; - register int isub, icol, k, itemp; - int *lsub, *xlsub; - complex *lusup; - int *xlusup; - flops_t *ops = stat->ops; - int info; - complex one = {1.0, 0.0}; - - /* Initialize pointers */ - n = Glu->n; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; - nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ - lptr = xlsub[fsupc]; - nsupr = xlsub[fsupc+1] - lptr; - lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ - lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ - lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ - - /* Determine the largest abs numerical value for partial pivoting; - Also search for user-specified pivot, and diagonal element. */ - pivmax = -1.0; - pivptr = nsupc; - diag = EMPTY; - old_pivptr = nsupc; - ptr0 = EMPTY; - for (isub = nsupc; isub < nsupr; ++isub) { - if (marker[lsub_ptr[isub]] > jcol) - continue; /* do not overlap with a later relaxed supernode */ - - switch (milu) { - case SMILU_1: - c_add(&temp, &lu_col_ptr[isub], &drop_sum); - rtemp = slu_c_abs1(&temp); - break; - case SMILU_2: - case SMILU_3: - /* In this case, drop_sum contains the sum of the abs. value */ - rtemp = slu_c_abs1(&lu_col_ptr[isub]); - break; - case SILU: - default: - rtemp = slu_c_abs1(&lu_col_ptr[isub]); - break; - } - if (rtemp > pivmax) { pivmax = rtemp; pivptr = isub; } - if (*usepr && lsub_ptr[isub] == *pivrow) old_pivptr = isub; - if (lsub_ptr[isub] == diagind) diag = isub; - if (ptr0 == EMPTY) ptr0 = isub; - } - - if (milu == SMILU_2 || milu == SMILU_3) pivmax += drop_sum.r; - - /* Test for singularity */ - if (pivmax < 0.0) { -#if SCIPY_SPECIFIC_FIX - ABORT("[0]: matrix is singular"); -#else - fprintf(stderr, "[0]: jcol=%d, SINGULAR!!!\n", jcol); - fflush(stderr); - exit(1); -#endif - } - if ( pivmax == 0.0 ) { - if (diag != EMPTY) - *pivrow = lsub_ptr[pivptr = diag]; - else if (ptr0 != EMPTY) - *pivrow = lsub_ptr[pivptr = ptr0]; - else { - /* look for the first row which does not - belong to any later supernodes */ - for (icol = jcol; icol < n; icol++) - if (marker[swap[icol]] <= jcol) break; - if (icol >= n) { -#if SCIPY_SPECIFIC_FIX - ABORT("[1]: matrix is singular"); -#else - fprintf(stderr, "[1]: jcol=%d, SINGULAR!!!\n", jcol); - fflush(stderr); - exit(1); -#endif - } - - *pivrow = swap[icol]; - - /* pick up the pivot row */ - for (isub = nsupc; isub < nsupr; ++isub) - if ( lsub_ptr[isub] == *pivrow ) { pivptr = isub; break; } - } - pivmax = fill_tol; - lu_col_ptr[pivptr].r = pivmax; - lu_col_ptr[pivptr].i = 0.0; - *usepr = 0; -#ifdef DEBUG - printf("[0] ZERO PIVOT: FILL (%d, %d).\n", *pivrow, jcol); - fflush(stdout); -#endif - info =jcol + 1; - } /* if (*pivrow == 0.0) */ - else { - thresh = u * pivmax; - - /* Choose appropriate pivotal element by our policy. */ - if ( *usepr ) { - switch (milu) { - case SMILU_1: - c_add(&temp, &lu_col_ptr[old_pivptr], &drop_sum); - rtemp = slu_c_abs1(&temp); - break; - case SMILU_2: - case SMILU_3: - rtemp = slu_c_abs1(&lu_col_ptr[old_pivptr]) + drop_sum.r; - break; - case SILU: - default: - rtemp = slu_c_abs1(&lu_col_ptr[old_pivptr]); - break; - } - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = old_pivptr; - else *usepr = 0; - } - if ( *usepr == 0 ) { - /* Use diagonal pivot? */ - if ( diag >= 0 ) { /* diagonal exists */ - switch (milu) { - case SMILU_1: - c_add(&temp, &lu_col_ptr[diag], &drop_sum); - rtemp = slu_c_abs1(&temp); - break; - case SMILU_2: - case SMILU_3: - rtemp = slu_c_abs1(&lu_col_ptr[diag]) + drop_sum.r; - break; - case SILU: - default: - rtemp = slu_c_abs1(&lu_col_ptr[diag]); - break; - } - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; - } - *pivrow = lsub_ptr[pivptr]; - } - info = 0; - - /* Reset the diagonal */ - switch (milu) { - case SMILU_1: - c_add(&lu_col_ptr[pivptr], &lu_col_ptr[pivptr], &drop_sum); - break; - case SMILU_2: - case SMILU_3: - temp = c_sgn(&lu_col_ptr[pivptr]); - cc_mult(&temp, &temp, &drop_sum); - c_add(&lu_col_ptr[pivptr], &lu_col_ptr[pivptr], &drop_sum); - break; - case SILU: - default: - break; - } - - } /* else */ - - /* Record pivot row */ - perm_r[*pivrow] = jcol; - if (jcol < n - 1) { - register int t1, t2, t; - t1 = iswap[*pivrow]; t2 = jcol; - if (t1 != t2) { - t = swap[t1]; swap[t1] = swap[t2]; swap[t2] = t; - t1 = swap[t1]; t2 = t; - t = iswap[t1]; iswap[t1] = iswap[t2]; iswap[t2] = t; - } - } /* if (jcol < n - 1) */ - - /* Interchange row subscripts */ - if ( pivptr != nsupc ) { - itemp = lsub_ptr[pivptr]; - lsub_ptr[pivptr] = lsub_ptr[nsupc]; - lsub_ptr[nsupc] = itemp; - - /* Interchange numerical values as well, for the whole snode, such - * that L is indexed the same way as A. - */ - for (icol = 0; icol <= nsupc; icol++) { - itemp = pivptr + icol * nsupr; - temp = lu_sup_ptr[itemp]; - lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; - lu_sup_ptr[nsupc + icol*nsupr] = temp; - } - } /* if */ - - /* cdiv operation */ - ops[FACT] += 10 * (nsupr - nsupc); - c_div(&temp, &one, &lu_col_ptr[nsupc]); - for (k = nsupc+1; k < nsupr; k++) - cc_mult(&lu_col_ptr[k], &lu_col_ptr[k], &temp); - - return info; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_csnode_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_csnode_dfs.c deleted file mode 100644 index 161d154615..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_csnode_dfs.c +++ /dev/null @@ -1,90 +0,0 @@ - -/*! @file ilu_csnode_dfs.c - * \brief Determines the union of row structures of columns within the relaxed node - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_cdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    ilu_csnode_dfs() - Determine the union of the row structures of those
    - *    columns within the relaxed snode.
    - *    Note: The relaxed snodes are leaves of the supernodal etree, therefore,
    - *    the portion outside the rectangular supernode must be zero.
    - *
    - * Return value
    - * ============
    - *     0   success;
    - *    >0   number of bytes allocated when run out of memory.
    - * 
    - */ - -int -ilu_csnode_dfs( - const int jcol, /* in - start of the supernode */ - const int kcol, /* in - end of the supernode */ - const int *asub, /* in */ - const int *xa_begin, /* in */ - const int *xa_end, /* in */ - int *marker, /* modified */ - GlobalLU_t *Glu /* modified */ - ) -{ - - register int i, k, nextl; - int nsuper, krow, kmark, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - int nzlmax; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - nsuper = ++supno[jcol]; /* Next available supernode number */ - nextl = xlsub[jcol]; - - for (i = jcol; i <= kcol; i++) - { - /* For each nonzero in A[*,i] */ - for (k = xa_begin[i]; k < xa_end[i]; k++) - { - krow = asub[k]; - kmark = marker[krow]; - if ( kmark != kcol ) - { /* First time visit krow */ - marker[krow] = kcol; - lsub[nextl++] = krow; - if ( nextl >= nzlmax ) - { - if ( (mem_error = cLUMemXpand(jcol, nextl, LSUB, &nzlmax, - Glu)) != 0) - return (mem_error); - lsub = Glu->lsub; - } - } - } - supno[i] = nsuper; - } - - /* Supernode > 1 */ - if ( jcol < kcol ) - for (i = jcol+1; i <= kcol; i++) xlsub[i] = nextl; - - xsup[nsuper+1] = kcol + 1; - supno[kcol+1] = nsuper; - xlsub[kcol+1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dcolumn_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dcolumn_dfs.c deleted file mode 100644 index a53cd09d64..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dcolumn_dfs.c +++ /dev/null @@ -1,258 +0,0 @@ - -/*! @file ilu_dcolumn_dfs.c - * \brief Performs a symbolic factorization - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    -*/ - -#include "slu_ddefs.h" - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   ILU_DCOLUMN_DFS performs a symbolic factorization on column jcol, and
    - *   decide the supernode boundary.
    - *
    - *   This routine does not use numeric values, but only use the RHS
    - *   row indices to start the dfs.
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives. The routine returns a list of such supernodal
    - *   representatives in topological order of the dfs that generates them.
    - *   The location of the first nonzero in each such supernodal segment
    - *   (supernodal entry location) is also returned.
    - *
    - * Local parameters
    - * ================
    - *   nseg: no of segments in current U[*,j]
    - *   jsuper: jsuper=EMPTY if column j does not belong to the same
    - *	supernode as j-1. Otherwise, jsuper=nsuper.
    - *
    - *   marker2: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - *
    - * Return value
    - * ============
    - *     0  success;
    - *   > 0  number of bytes allocated when run out of space.
    - * 
    - */ -int -ilu_dcolumn_dfs( - const int m, /* in - number of rows in the matrix */ - const int jcol, /* in */ - int *perm_r, /* in */ - int *nseg, /* modified - with new segments appended */ - int *lsub_col, /* in - defines the RHS vector to start the - dfs */ - int *segrep, /* modified - with new segments appended */ - int *repfnz, /* modified */ - int *marker, /* modified */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - int jcolp1, jcolm1, jsuper, nsuper, nextl; - int k, krep, krow, kmark, kperm; - int *marker2; /* Used for small panel LU */ - int fsupc; /* First column of a snode */ - int myfnz; /* First nonz column of a U-segment */ - int chperm, chmark, chrep, kchild; - int xdfs, maxdfs, kpar, oldrep; - int jptr, jm1ptr; - int ito, ifrom; /* Used to compress row subscripts */ - int mem_error; - int *xsup, *supno, *lsub, *xlsub; - int nzlmax; - static int first = 1, maxsuper; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - if ( first ) { - maxsuper = sp_ienv(3); - first = 0; - } - jcolp1 = jcol + 1; - jcolm1 = jcol - 1; - nsuper = supno[jcol]; - jsuper = nsuper; - nextl = xlsub[jcol]; - marker2 = &marker[2*m]; - - - /* For each nonzero in A[*,jcol] do dfs */ - for (k = 0; lsub_col[k] != EMPTY; k++) { - - krow = lsub_col[k]; - lsub_col[k] = EMPTY; - kmark = marker2[krow]; - - /* krow was visited before, go to the next nonzero */ - if ( kmark == jcol ) continue; - - /* For each unmarked nbr krow of jcol - * krow is in L: place it in structure of L[*,jcol] - */ - marker2[krow] = jcol; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - lsub[nextl++] = krow; /* krow is indexed into A */ - if ( nextl >= nzlmax ) { - if ((mem_error = dLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu))) - return (mem_error); - lsub = Glu->lsub; - } - if ( kmark != jcolm1 ) jsuper = EMPTY;/* Row index subset testing */ - } else { - /* krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz[krep]; - - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > kperm ) repfnz[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz[krep] = kperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker2[kchild]; - - if ( chmark != jcol ) { /* Not reached yet */ - marker2[kchild] = jcol; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,k] */ - if ( chperm == EMPTY ) { - lsub[nextl++] = kchild; - if ( nextl >= nzlmax ) { - if ( (mem_error = dLUMemXpand(jcol,nextl, - LSUB,&nzlmax,Glu)) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( chmark != jcolm1 ) jsuper = EMPTY; - } else { - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz[chrep]; - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz[chrep] = chperm; - } else { - /* Continue dfs at super-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L^t) */ - parent[krep] = oldrep; - repfnz[krep] = chperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - } /* else */ - - } /* else */ - - } /* if */ - - } /* while */ - - /* krow has no more unexplored nbrs; - * place supernode-rep krep in postorder DFS. - * backtrack dfs to its parent - */ - segrep[*nseg] = krep; - ++(*nseg); - kpar = parent[krep]; /* Pop from stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xlsub[krep + 1]; - - } while ( kpar != EMPTY ); /* Until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonzero ... */ - - /* Check to see if j belongs in the same supernode as j-1 */ - if ( jcol == 0 ) { /* Do nothing for column 0 */ - nsuper = supno[0] = 0; - } else { - fsupc = xsup[nsuper]; - jptr = xlsub[jcol]; /* Not compressed yet */ - jm1ptr = xlsub[jcolm1]; - - if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = EMPTY; - - /* Always start a new supernode for a singular column */ - if ( nextl == jptr ) jsuper = EMPTY; - - /* Make sure the number of columns in a supernode doesn't - exceed threshold. */ - if ( jcol - fsupc >= maxsuper ) jsuper = EMPTY; - - /* If jcol starts a new supernode, reclaim storage space in - * lsub from the previous supernode. Note we only store - * the subscript set of the first columns of the supernode. - */ - if ( jsuper == EMPTY ) { /* starts a new supernode */ - if ( (fsupc < jcolm1) ) { /* >= 2 columns in nsuper */ -#ifdef CHK_COMPRESS - printf(" Compress lsub[] at super %d-%d\n", fsupc, jcolm1); -#endif - ito = xlsub[fsupc+1]; - xlsub[jcolm1] = ito; - xlsub[jcol] = ito; - for (ifrom = jptr; ifrom < nextl; ++ifrom, ++ito) - lsub[ito] = lsub[ifrom]; - nextl = ito; - } - nsuper++; - supno[jcol] = nsuper; - } /* if a new supernode */ - - } /* else: jcol > 0 */ - - /* Tidy up the pointers before exit */ - xsup[nsuper+1] = jcolp1; - supno[jcolp1] = nsuper; - xlsub[jcolp1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dcopy_to_ucol.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dcopy_to_ucol.c deleted file mode 100644 index a27a1484a7..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dcopy_to_ucol.c +++ /dev/null @@ -1,199 +0,0 @@ - -/*! @file ilu_dcopy_to_ucol.c - * \brief Copy a computed column of U to the compressed data structure - * and drop some small entries - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_ddefs.h" - -#ifdef DEBUG -int num_drop_U; -#endif - -static double *A; /* used in _compare_ only */ -static int _compare_(const void *a, const void *b) -{ - register int *x = (int *)a, *y = (int *)b; - register double xx = fabs(A[*x]), yy = fabs(A[*y]); - if (xx > yy) return -1; - else if (xx < yy) return 1; - else return 0; -} - - -int -ilu_dcopy_to_ucol( - int jcol, /* in */ - int nseg, /* in */ - int *segrep, /* in */ - int *repfnz, /* in */ - int *perm_r, /* in */ - double *dense, /* modified - reset to zero on return */ - int drop_rule,/* in */ - milu_t milu, /* in */ - double drop_tol, /* in */ - int quota, /* maximum nonzero entries allowed */ - double *sum, /* out - the sum of dropped entries */ - int *nnzUj, /* in - out */ - GlobalLU_t *Glu, /* modified */ - int *work /* working space with minimum size n, - * used by the second dropping rule */ - ) -{ -/* - * Gather from SPA dense[*] to global ucol[*]. - */ - int ksub, krep, ksupno; - int i, k, kfnz, segsze; - int fsupc, isub, irow; - int jsupno, nextu; - int new_next, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - double *ucol; - int *usub, *xusub; - int nzumax; - int m; /* number of entries in the nonzero U-segments */ - register double d_max = 0.0, d_min = 1.0 / dlamch_("Safe minimum"); - register double tmp; - double zero = 0.0; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - nzumax = Glu->nzumax; - - *sum = zero; - if (drop_rule == NODROP) { - drop_tol = -1.0, quota = Glu->n; - } - - jsupno = supno[jcol]; - nextu = xusub[jcol]; - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - krep = segrep[k--]; - ksupno = supno[krep]; - - if ( ksupno != jsupno ) { /* Should go into ucol[] */ - kfnz = repfnz[krep]; - if ( kfnz != EMPTY ) { /* Nonzero U-segment */ - - fsupc = xsup[ksupno]; - isub = xlsub[fsupc] + kfnz - fsupc; - segsze = krep - kfnz + 1; - - new_next = nextu + segsze; - while ( new_next > nzumax ) { - if ((mem_error = dLUMemXpand(jcol, nextu, UCOL, &nzumax, - Glu)) != 0) - return (mem_error); - ucol = Glu->ucol; - if ((mem_error = dLUMemXpand(jcol, nextu, USUB, &nzumax, - Glu)) != 0) - return (mem_error); - usub = Glu->usub; - lsub = Glu->lsub; - } - - for (i = 0; i < segsze; i++) { - irow = lsub[isub++]; - tmp = fabs(dense[irow]); - - /* first dropping rule */ - if (quota > 0 && tmp >= drop_tol) { - if (tmp > d_max) d_max = tmp; - if (tmp < d_min) d_min = tmp; - usub[nextu] = perm_r[irow]; - ucol[nextu] = dense[irow]; - nextu++; - } else { - switch (milu) { - case SMILU_1: - case SMILU_2: - *sum += dense[irow]; - break; - case SMILU_3: - /* *sum += fabs(dense[irow]);*/ - *sum += tmp; - break; - case SILU: - default: - break; - } -#ifdef DEBUG - num_drop_U++; -#endif - } - dense[irow] = zero; - } - - } - - } - - } /* for each segment... */ - - xusub[jcol + 1] = nextu; /* Close U[*,jcol] */ - m = xusub[jcol + 1] - xusub[jcol]; - - /* second dropping rule */ - if (drop_rule & DROP_SECONDARY && m > quota) { - register double tol = d_max; - register int m0 = xusub[jcol] + m - 1; - - if (quota > 0) { - if (drop_rule & DROP_INTERP) { - d_max = 1.0 / d_max; d_min = 1.0 / d_min; - tol = 1.0 / (d_max + (d_min - d_max) * quota / m); - } else { - A = &ucol[xusub[jcol]]; - for (i = 0; i < m; i++) work[i] = i; - qsort(work, m, sizeof(int), _compare_); - tol = fabs(usub[xusub[jcol] + work[quota]]); - } - } - for (i = xusub[jcol]; i <= m0; ) { - if (fabs(ucol[i]) <= tol) { - switch (milu) { - case SMILU_1: - case SMILU_2: - *sum += ucol[i]; - break; - case SMILU_3: - *sum += fabs(ucol[i]); - break; - case SILU: - default: - break; - } - ucol[i] = ucol[m0]; - usub[i] = usub[m0]; - m0--; - m--; -#ifdef DEBUG - num_drop_U++; -#endif - xusub[jcol + 1]--; - continue; - } - i++; - } - } - - if (milu == SMILU_2) *sum = fabs(*sum); - - *nnzUj += m; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ddrop_row.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ddrop_row.c deleted file mode 100644 index f493a89864..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ddrop_row.c +++ /dev/null @@ -1,307 +0,0 @@ - -/*! @file ilu_ddrop_row.c - * \brief Drop small rows from L - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * <\pre>
    - */
    -
    -#include 
    -#include 
    -#include "slu_ddefs.h"
    -
    -extern void dswap_(int *, double [], int *, double [], int *);
    -extern void daxpy_(int *, double *, double [], int *, double [], int *);
    -
    -static double *A;  /* used in _compare_ only */
    -static int _compare_(const void *a, const void *b)
    -{
    -    register int *x = (int *)a, *y = (int *)b;
    -    if (A[*x] - A[*y] > 0.0) return -1;
    -    else if (A[*x] - A[*y] < 0.0) return 1;
    -    else return 0;
    -}
    -
    -/*! \brief
    - * 
    - * Purpose
    - * =======
    - *    ilu_ddrop_row() - Drop some small rows from the previous 
    - *    supernode (L-part only).
    - * 
    - */ -int ilu_ddrop_row( - superlu_options_t *options, /* options */ - int first, /* index of the first column in the supernode */ - int last, /* index of the last column in the supernode */ - double drop_tol, /* dropping parameter */ - int quota, /* maximum nonzero entries allowed */ - int *nnzLj, /* in/out number of nonzeros in L(:, 1:last) */ - double *fill_tol, /* in/out - on exit, fill_tol=-num_zero_pivots, - * does not change if options->ILU_MILU != SMILU1 */ - GlobalLU_t *Glu, /* modified */ - double dwork[], /* working space with minimum size last-first+1 */ - int iwork[], /* working space with minimum size m - n, - * used by the second dropping rule */ - int lastc /* if lastc == 0, there is nothing after the - * working supernode [first:last]; - * if lastc == 1, there is one more column after - * the working supernode. */ ) -{ - register int i, j, k, m1; - register int nzlc; /* number of nonzeros in column last+1 */ - register int xlusup_first, xlsub_first; - int m, n; /* m x n is the size of the supernode */ - int r = 0; /* number of dropped rows */ - register double *temp; - register double *lusup = Glu->lusup; - register int *lsub = Glu->lsub; - register int *xlsub = Glu->xlsub; - register int *xlusup = Glu->xlusup; - register double d_max = 0.0, d_min = 1.0; - int drop_rule = options->ILU_DropRule; - milu_t milu = options->ILU_MILU; - norm_t nrm = options->ILU_Norm; - double zero = 0.0; - double one = 1.0; - double none = -1.0; - int inc_diag; /* inc_diag = m + 1 */ - int nzp = 0; /* number of zero pivots */ - - xlusup_first = xlusup[first]; - xlsub_first = xlsub[first]; - m = xlusup[first + 1] - xlusup_first; - n = last - first + 1; - m1 = m - 1; - inc_diag = m + 1; - nzlc = lastc ? (xlusup[last + 2] - xlusup[last + 1]) : 0; - temp = dwork - n; - - /* Quick return if nothing to do. */ - if (m == 0 || m == n || drop_rule == NODROP) - { - *nnzLj += m * n; - return 0; - } - - /* basic dropping: ILU(tau) */ - for (i = n; i <= m1; ) - { - /* the average abs value of ith row */ - switch (nrm) - { - case ONE_NORM: - temp[i] = dasum_(&n, &lusup[xlusup_first + i], &m) / (double)n; - break; - case TWO_NORM: - temp[i] = dnrm2_(&n, &lusup[xlusup_first + i], &m) - / sqrt((double)n); - break; - case INF_NORM: - default: - k = idamax_(&n, &lusup[xlusup_first + i], &m) - 1; - temp[i] = fabs(lusup[xlusup_first + i + m * k]); - break; - } - - /* drop small entries due to drop_tol */ - if (drop_rule & DROP_BASIC && temp[i] < drop_tol) - { - r++; - /* drop the current row and move the last undropped row here */ - if (r > 1) /* add to last row */ - { - /* accumulate the sum (for MILU) */ - switch (milu) - { - case SMILU_1: - case SMILU_2: - daxpy_(&n, &one, &lusup[xlusup_first + i], &m, - &lusup[xlusup_first + m - 1], &m); - break; - case SMILU_3: - for (j = 0; j < n; j++) - lusup[xlusup_first + (m - 1) + j * m] += - fabs(lusup[xlusup_first + i + j * m]); - break; - case SILU: - default: - break; - } - dcopy_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - } /* if (r > 1) */ - else /* move to last row */ - { - dswap_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - if (milu == SMILU_3) - for (j = 0; j < n; j++) { - lusup[xlusup_first + m1 + j * m] = - fabs(lusup[xlusup_first + m1 + j * m]); - } - } - lsub[xlsub_first + i] = lsub[xlsub_first + m1]; - m1--; - continue; - } /* if dropping */ - else - { - if (temp[i] > d_max) d_max = temp[i]; - if (temp[i] < d_min) d_min = temp[i]; - } - i++; - } /* for */ - - /* Secondary dropping: drop more rows according to the quota. */ - quota = ceil((double)quota / (double)n); - if (drop_rule & DROP_SECONDARY && m - r > quota) - { - register double tol = d_max; - - /* Calculate the second dropping tolerance */ - if (quota > n) - { - if (drop_rule & DROP_INTERP) /* by interpolation */ - { - d_max = 1.0 / d_max; d_min = 1.0 / d_min; - tol = 1.0 / (d_max + (d_min - d_max) * quota / (m - n - r)); - } - else /* by quick sort */ - { - register int *itemp = iwork - n; - A = temp; - for (i = n; i <= m1; i++) itemp[i] = i; - qsort(iwork, m1 - n + 1, sizeof(int), _compare_); - tol = temp[iwork[quota]]; - } - } - - for (i = n; i <= m1; ) - { - if (temp[i] <= tol) - { - register int j; - r++; - /* drop the current row and move the last undropped row here */ - if (r > 1) /* add to last row */ - { - /* accumulate the sum (for MILU) */ - switch (milu) - { - case SMILU_1: - case SMILU_2: - daxpy_(&n, &one, &lusup[xlusup_first + i], &m, - &lusup[xlusup_first + m - 1], &m); - break; - case SMILU_3: - for (j = 0; j < n; j++) - lusup[xlusup_first + (m - 1) + j * m] += - fabs(lusup[xlusup_first + i + j * m]); - break; - case SILU: - default: - break; - } - dcopy_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - } /* if (r > 1) */ - else /* move to last row */ - { - dswap_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - if (milu == SMILU_3) - for (j = 0; j < n; j++) { - lusup[xlusup_first + m1 + j * m] = - fabs(lusup[xlusup_first + m1 + j * m]); - } - } - lsub[xlsub_first + i] = lsub[xlsub_first + m1]; - m1--; - temp[i] = temp[m1]; - - continue; - } - i++; - - } /* for */ - - } /* if secondary dropping */ - - for (i = n; i < m; i++) temp[i] = 0.0; - - if (r == 0) - { - *nnzLj += m * n; - return 0; - } - - /* add dropped entries to the diagnal */ - if (milu != SILU) - { - register int j; - double t; - for (j = 0; j < n; j++) - { - t = lusup[xlusup_first + (m - 1) + j * m] * MILU_ALPHA; - switch (milu) - { - case SMILU_1: - if (t != none) { - lusup[xlusup_first + j * inc_diag] *= (one + t); - } - else - { - lusup[xlusup_first + j * inc_diag] *= *fill_tol; -#ifdef DEBUG - printf("[1] ZERO PIVOT: FILL col %d.\n", first + j); - fflush(stdout); -#endif - nzp++; - } - break; - case SMILU_2: - lusup[xlusup_first + j * inc_diag] *= (1.0 + fabs(t)); - break; - case SMILU_3: - lusup[xlusup_first + j * inc_diag] *= (one + t); - break; - case SILU: - default: - break; - } - } - if (nzp > 0) *fill_tol = -nzp; - } - - /* Remove dropped entries from the memory and fix the pointers. */ - m1 = m - r; - for (j = 1; j < n; j++) - { - register int tmp1, tmp2; - tmp1 = xlusup_first + j * m1; - tmp2 = xlusup_first + j * m; - for (i = 0; i < m1; i++) - lusup[i + tmp1] = lusup[i + tmp2]; - } - for (i = 0; i < nzlc; i++) - lusup[xlusup_first + i + n * m1] = lusup[xlusup_first + i + n * m]; - for (i = 0; i < nzlc; i++) - lsub[xlsub[last + 1] - r + i] = lsub[xlsub[last + 1] + i]; - for (i = first + 1; i <= last + 1; i++) - { - xlusup[i] -= r * (i - first); - xlsub[i] -= r; - } - if (lastc) - { - xlusup[last + 2] -= r * n; - xlsub[last + 2] -= r; - } - - *nnzLj += (m - r) * n; - return r; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dpanel_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dpanel_dfs.c deleted file mode 100644 index 5aae050b68..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dpanel_dfs.c +++ /dev/null @@ -1,248 +0,0 @@ - -/*! @file ilu_dpanel_dfs.c - * \brief Peforms a symbolic factorization on a panel of symbols and - * record the entries with maximum absolute value in each column - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   Performs a symbolic factorization on a panel of columns [jcol, jcol+w).
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives.
    - *
    - *   The routine returns one list of the supernodal representatives
    - *   in topological order of the dfs that generates them. This list is
    - *   a superset of the topological order of each individual column within
    - *   the panel.
    - *   The location of the first nonzero in each supernodal segment
    - *   (supernodal entry location) is also returned. Each column has a
    - *   separate list for this purpose.
    - *
    - *   Two marker arrays are used for dfs:
    - *     marker[i] == jj, if i was visited during dfs of current column jj;
    - *     marker1[i] >= jcol, if i was visited by earlier columns in this panel;
    - *
    - *   marker: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - * 
    - */ -void -ilu_dpanel_dfs( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - SuperMatrix *A, /* in - original matrix */ - int *perm_r, /* in */ - int *nseg, /* out */ - double *dense, /* out */ - double *amax, /* out - max. abs. value of each column in panel */ - int *panel_lsub, /* out */ - int *segrep, /* out */ - int *repfnz, /* out */ - int *marker, /* out */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ -) -{ - - NCPformat *Astore; - double *a; - int *asub; - int *xa_begin, *xa_end; - int krep, chperm, chmark, chrep, oldrep, kchild, myfnz; - int k, krow, kmark, kperm; - int xdfs, maxdfs, kpar; - int jj; /* index through each column in the panel */ - int *marker1; /* marker1[jj] >= jcol if vertex jj was visited - by a previous column within this panel. */ - int *repfnz_col; /* start of each column in the panel */ - double *dense_col; /* start of each column in the panel */ - int nextl_col; /* next available position in panel_lsub[*,jj] */ - int *xsup, *supno; - int *lsub, *xlsub; - double *amax_col; - register double tmp; - - /* Initialize pointers */ - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - marker1 = marker + m; - repfnz_col = repfnz; - dense_col = dense; - amax_col = amax; - *nseg = 0; - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - - /* For each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++) { - nextl_col = (jj - jcol) * m; - -#ifdef CHK_DFS - printf("\npanel col %d: ", jj); -#endif - - *amax_col = 0.0; - /* For each nonz in A[*,jj] do dfs */ - for (k = xa_begin[jj]; k < xa_end[jj]; k++) { - krow = asub[k]; - tmp = fabs(a[k]); - if (tmp > *amax_col) *amax_col = tmp; - dense_col[krow] = a[k]; - kmark = marker[krow]; - if ( kmark == jj ) - continue; /* krow visited before, go to the next nonzero */ - - /* For each unmarked nbr krow of jj - * krow is in L: place it in structure of L[*,jj] - */ - marker[krow] = jj; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - panel_lsub[nextl_col++] = krow; /* krow is indexed into A */ - } - /* - * krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - else { - - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz_col[krep]; - -#ifdef CHK_DFS - printf("krep %d, myfnz %d, perm_r[%d] %d\n", krep, myfnz, krow, kperm); -#endif - if ( myfnz != EMPTY ) { /* Representative visited before */ - if ( myfnz > kperm ) repfnz_col[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz_col[krep] = kperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker[kchild]; - - if ( chmark != jj ) { /* Not reached yet */ - marker[kchild] = jj; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,j] */ - if ( chperm == EMPTY ) { - panel_lsub[nextl_col++] = kchild; - } - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - else { - - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz_col[chrep]; -#ifdef CHK_DFS - printf("chrep %d,myfnz %d,perm_r[%d] %d\n",chrep,myfnz,kchild,chperm); -#endif - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz_col[chrep] = chperm; - } - else { - /* Cont. dfs at snode-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L) */ - parent[krep] = oldrep; - repfnz_col[krep] = chperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } /* else */ - - } /* else */ - - } /* if... */ - - } /* while xdfs < maxdfs */ - - /* krow has no more unexplored nbrs: - * Place snode-rep krep in postorder DFS, if this - * segment is seen for the first time. (Note that - * "repfnz[krep]" may change later.) - * Backtrack dfs to its parent. - */ - if ( marker1[krep] < jcol ) { - segrep[*nseg] = krep; - ++(*nseg); - marker1[krep] = jj; - } - - kpar = parent[krep]; /* Pop stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xlsub[krep + 1]; - -#ifdef CHK_DFS - printf(" pop stack: krep %d,xdfs %d,maxdfs %d: ", krep,xdfs,maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } while ( kpar != EMPTY ); /* do-while - until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonz in A[*,jj] */ - - repfnz_col += m; /* Move to next column */ - dense_col += m; - amax_col++; - - } /* for jj ... */ - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dpivotL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dpivotL.c deleted file mode 100644 index 9735ac748b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dpivotL.c +++ /dev/null @@ -1,274 +0,0 @@ - -/*! @file ilu_dpivotL.c - * \brief Performs numerical pivoting - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - - -#include -#include -#include "slu_ddefs.h" - -#ifndef SGN -#define SGN(x) ((x)>=0?1:-1) -#endif - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Performs the numerical pivoting on the current column of L,
    - *   and the CDIV operation.
    - *
    - *   Pivot policy:
    - *   (1) Compute thresh = u * max_(i>=j) abs(A_ij);
    - *   (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
    - *	     pivot row = k;
    - *	 ELSE IF abs(A_jj) >= thresh THEN
    - *	     pivot row = j;
    - *	 ELSE
    - *	     pivot row = m;
    - *
    - *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
    - *
    - *   Return value: 0	  success;
    - *		   i > 0  U(i,i) is exactly zero.
    - * 
    - */ - -int -ilu_dpivotL( - const int jcol, /* in */ - const double u, /* in - diagonal pivoting threshold */ - int *usepr, /* re-use the pivot sequence given by - * perm_r/iperm_r */ - int *perm_r, /* may be modified */ - int diagind, /* diagonal of Pc*A*Pc' */ - int *swap, /* in/out record the row permutation */ - int *iswap, /* in/out inverse of swap, it is the same as - perm_r after the factorization */ - int *marker, /* in */ - int *pivrow, /* in/out, as an input if *usepr!=0 */ - double fill_tol, /* in - fill tolerance of current column - * used for a singular column */ - milu_t milu, /* in */ - double drop_sum, /* in - computed in ilu_dcopy_to_ucol() - (MILU only) */ - GlobalLU_t *Glu, /* modified - global LU data structures */ - SuperLUStat_t *stat /* output */ - ) -{ - - int n; /* number of columns */ - int fsupc; /* first column in the supernode */ - int nsupc; /* no of columns in the supernode */ - int nsupr; /* no of rows in the supernode */ - int lptr; /* points to the starting subscript of the supernode */ - register int pivptr; - int old_pivptr, diag, ptr0; - register double pivmax, rtemp; - double thresh; - double temp; - double *lu_sup_ptr; - double *lu_col_ptr; - int *lsub_ptr; - register int isub, icol, k, itemp; - int *lsub, *xlsub; - double *lusup; - int *xlusup; - flops_t *ops = stat->ops; - int info; - - /* Initialize pointers */ - n = Glu->n; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; - nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ - lptr = xlsub[fsupc]; - nsupr = xlsub[fsupc+1] - lptr; - lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ - lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ - lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ - - /* Determine the largest abs numerical value for partial pivoting; - Also search for user-specified pivot, and diagonal element. */ - pivmax = -1.0; - pivptr = nsupc; - diag = EMPTY; - old_pivptr = nsupc; - ptr0 = EMPTY; - for (isub = nsupc; isub < nsupr; ++isub) { - if (marker[lsub_ptr[isub]] > jcol) - continue; /* do not overlap with a later relaxed supernode */ - - switch (milu) { - case SMILU_1: - rtemp = fabs(lu_col_ptr[isub] + drop_sum); - break; - case SMILU_2: - case SMILU_3: - /* In this case, drop_sum contains the sum of the abs. value */ - rtemp = fabs(lu_col_ptr[isub]); - break; - case SILU: - default: - rtemp = fabs(lu_col_ptr[isub]); - break; - } - if (rtemp > pivmax) { pivmax = rtemp; pivptr = isub; } - if (*usepr && lsub_ptr[isub] == *pivrow) old_pivptr = isub; - if (lsub_ptr[isub] == diagind) diag = isub; - if (ptr0 == EMPTY) ptr0 = isub; - } - - if (milu == SMILU_2 || milu == SMILU_3) pivmax += drop_sum; - - /* Test for singularity */ - if (pivmax < 0.0) { -#if SCIPY_SPECIFIC_FIX - ABORT("[0]: matrix is singular"); -#else - fprintf(stderr, "[0]: jcol=%d, SINGULAR!!!\n", jcol); - fflush(stderr); - exit(1); -#endif - } - if ( pivmax == 0.0 ) { - if (diag != EMPTY) - *pivrow = lsub_ptr[pivptr = diag]; - else if (ptr0 != EMPTY) - *pivrow = lsub_ptr[pivptr = ptr0]; - else { - /* look for the first row which does not - belong to any later supernodes */ - for (icol = jcol; icol < n; icol++) - if (marker[swap[icol]] <= jcol) break; - if (icol >= n) { -#if SCIPY_SPECIFIC_FIX - ABORT("[1]: matrix is singular"); -#else - fprintf(stderr, "[1]: jcol=%d, SINGULAR!!!\n", jcol); - fflush(stderr); - exit(1); -#endif - } - - *pivrow = swap[icol]; - - /* pick up the pivot row */ - for (isub = nsupc; isub < nsupr; ++isub) - if ( lsub_ptr[isub] == *pivrow ) { pivptr = isub; break; } - } - pivmax = fill_tol; - lu_col_ptr[pivptr] = pivmax; - *usepr = 0; -#ifdef DEBUG - printf("[0] ZERO PIVOT: FILL (%d, %d).\n", *pivrow, jcol); - fflush(stdout); -#endif - info =jcol + 1; - } /* if (*pivrow == 0.0) */ - else { - thresh = u * pivmax; - - /* Choose appropriate pivotal element by our policy. */ - if ( *usepr ) { - switch (milu) { - case SMILU_1: - rtemp = fabs(lu_col_ptr[old_pivptr] + drop_sum); - break; - case SMILU_2: - case SMILU_3: - rtemp = fabs(lu_col_ptr[old_pivptr]) + drop_sum; - break; - case SILU: - default: - rtemp = fabs(lu_col_ptr[old_pivptr]); - break; - } - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = old_pivptr; - else *usepr = 0; - } - if ( *usepr == 0 ) { - /* Use diagonal pivot? */ - if ( diag >= 0 ) { /* diagonal exists */ - switch (milu) { - case SMILU_1: - rtemp = fabs(lu_col_ptr[diag] + drop_sum); - break; - case SMILU_2: - case SMILU_3: - rtemp = fabs(lu_col_ptr[diag]) + drop_sum; - break; - case SILU: - default: - rtemp = fabs(lu_col_ptr[diag]); - break; - } - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; - } - *pivrow = lsub_ptr[pivptr]; - } - info = 0; - - /* Reset the diagonal */ - switch (milu) { - case SMILU_1: - lu_col_ptr[pivptr] += drop_sum; - break; - case SMILU_2: - case SMILU_3: - lu_col_ptr[pivptr] += SGN(lu_col_ptr[pivptr]) * drop_sum; - break; - case SILU: - default: - break; - } - - } /* else */ - - /* Record pivot row */ - perm_r[*pivrow] = jcol; - if (jcol < n - 1) { - register int t1, t2, t; - t1 = iswap[*pivrow]; t2 = jcol; - if (t1 != t2) { - t = swap[t1]; swap[t1] = swap[t2]; swap[t2] = t; - t1 = swap[t1]; t2 = t; - t = iswap[t1]; iswap[t1] = iswap[t2]; iswap[t2] = t; - } - } /* if (jcol < n - 1) */ - - /* Interchange row subscripts */ - if ( pivptr != nsupc ) { - itemp = lsub_ptr[pivptr]; - lsub_ptr[pivptr] = lsub_ptr[nsupc]; - lsub_ptr[nsupc] = itemp; - - /* Interchange numerical values as well, for the whole snode, such - * that L is indexed the same way as A. - */ - for (icol = 0; icol <= nsupc; icol++) { - itemp = pivptr + icol * nsupr; - temp = lu_sup_ptr[itemp]; - lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; - lu_sup_ptr[nsupc + icol*nsupr] = temp; - } - } /* if */ - - /* cdiv operation */ - ops[FACT] += nsupr - nsupc; - temp = 1.0 / lu_col_ptr[nsupc]; - for (k = nsupc+1; k < nsupr; k++) lu_col_ptr[k] *= temp; - - return info; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dsnode_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dsnode_dfs.c deleted file mode 100644 index 5251134981..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_dsnode_dfs.c +++ /dev/null @@ -1,90 +0,0 @@ - -/*! @file ilu_dsnode_dfs.c - * \brief Determines the union of row structures of columns within the relaxed node - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    ilu_dsnode_dfs() - Determine the union of the row structures of those
    - *    columns within the relaxed snode.
    - *    Note: The relaxed snodes are leaves of the supernodal etree, therefore,
    - *    the portion outside the rectangular supernode must be zero.
    - *
    - * Return value
    - * ============
    - *     0   success;
    - *    >0   number of bytes allocated when run out of memory.
    - * 
    - */ - -int -ilu_dsnode_dfs( - const int jcol, /* in - start of the supernode */ - const int kcol, /* in - end of the supernode */ - const int *asub, /* in */ - const int *xa_begin, /* in */ - const int *xa_end, /* in */ - int *marker, /* modified */ - GlobalLU_t *Glu /* modified */ - ) -{ - - register int i, k, nextl; - int nsuper, krow, kmark, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - int nzlmax; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - nsuper = ++supno[jcol]; /* Next available supernode number */ - nextl = xlsub[jcol]; - - for (i = jcol; i <= kcol; i++) - { - /* For each nonzero in A[*,i] */ - for (k = xa_begin[i]; k < xa_end[i]; k++) - { - krow = asub[k]; - kmark = marker[krow]; - if ( kmark != kcol ) - { /* First time visit krow */ - marker[krow] = kcol; - lsub[nextl++] = krow; - if ( nextl >= nzlmax ) - { - if ( (mem_error = dLUMemXpand(jcol, nextl, LSUB, &nzlmax, - Glu)) != 0) - return (mem_error); - lsub = Glu->lsub; - } - } - } - supno[i] = nsuper; - } - - /* Supernode > 1 */ - if ( jcol < kcol ) - for (i = jcol+1; i <= kcol; i++) xlsub[i] = nextl; - - xsup[nsuper+1] = kcol + 1; - supno[kcol+1] = nsuper; - xlsub[kcol+1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_heap_relax_snode.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_heap_relax_snode.c deleted file mode 100644 index d7a98bce7f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_heap_relax_snode.c +++ /dev/null @@ -1,120 +0,0 @@ -/*! @file ilu_heap_relax_snode.c - * \brief Identify the initial relaxed supernodes - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 1, 2009
    - * 
    - */ - -#include "slu_ddefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    ilu_heap_relax_snode() - Identify the initial relaxed supernodes,
    - *    assuming that the matrix has been reordered according to the postorder
    - *    of the etree.
    - * 
    - */ - -void -ilu_heap_relax_snode ( - const int n, - int *et, /* column elimination tree */ - const int relax_columns, /* max no of columns allowed in a - relaxed snode */ - int *descendants, /* no of descendants of each node - in the etree */ - int *relax_end, /* last column in a supernode - * if j-th column starts a relaxed - * supernode, relax_end[j] represents - * the last column of this supernode */ - int *relax_fsupc /* first column in a supernode - * relax_fsupc[j] represents the first - * column of j-th supernode */ - ) -{ - register int i, j, k, l, f, parent; - register int snode_start; /* beginning of a snode */ - int *et_save, *post, *inv_post, *iwork; - int nsuper_et = 0, nsuper_et_post = 0; - - /* The etree may not be postordered, but is heap ordered. */ - - iwork = (int*) intMalloc(3*n+2); - if ( !iwork ) ABORT("SUPERLU_MALLOC fails for iwork[]"); - inv_post = iwork + n+1; - et_save = inv_post + n+1; - - /* Post order etree */ - post = (int *) TreePostorder(n, et); - for (i = 0; i < n+1; ++i) inv_post[post[i]] = i; - - /* Renumber etree in postorder */ - for (i = 0; i < n; ++i) { - iwork[post[i]] = post[et[i]]; - et_save[i] = et[i]; /* Save the original etree */ - } - for (i = 0; i < n; ++i) et[i] = iwork[i]; - - /* Compute the number of descendants of each node in the etree */ - ifill (relax_end, n, EMPTY); - ifill (relax_fsupc, n, EMPTY); - for (j = 0; j < n; j++) descendants[j] = 0; - for (j = 0; j < n; j++) { - parent = et[j]; - if ( parent != n ) /* not the dummy root */ - descendants[parent] += descendants[j] + 1; - } - - /* Identify the relaxed supernodes by postorder traversal of the etree. */ - for ( f = j = 0; j < n; ) { - parent = et[j]; - snode_start = j; - while ( parent != n && descendants[parent] < relax_columns ) { - j = parent; - parent = et[j]; - } - /* Found a supernode in postordered etree; j is the last column. */ - ++nsuper_et_post; - k = n; - for (i = snode_start; i <= j; ++i) - k = SUPERLU_MIN(k, inv_post[i]); - l = inv_post[j]; - if ( (l - k) == (j - snode_start) ) { - /* It's also a supernode in the original etree */ - relax_end[k] = l; /* Last column is recorded */ - relax_fsupc[f++] = k; - ++nsuper_et; - } else { - for (i = snode_start; i <= j; ++i) { - l = inv_post[i]; - if ( descendants[i] == 0 ) { - relax_end[l] = l; - relax_fsupc[f++] = l; - ++nsuper_et; - } - } - } - j++; - /* Search for a new leaf */ - while ( descendants[j] != 0 && j < n ) j++; - } - -#if ( PRNTlevel>=1 ) - printf(".. heap_snode_relax:\n" - "\tNo of relaxed snodes in postordered etree:\t%d\n" - "\tNo of relaxed snodes in original etree:\t%d\n", - nsuper_et_post, nsuper_et); -#endif - - /* Recover the original etree */ - for (i = 0; i < n; ++i) et[i] = et_save[i]; - - SUPERLU_FREE(post); - SUPERLU_FREE(iwork); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_relax_snode.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_relax_snode.c deleted file mode 100644 index 124101a7da..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_relax_snode.c +++ /dev/null @@ -1,69 +0,0 @@ -/*! @file ilu_relax_snode.c - * \brief Identify initial relaxed supernodes - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 1, 2009
    - * 
    - */ - -#include "slu_ddefs.h" -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    ilu_relax_snode() - Identify the initial relaxed supernodes, assuming
    - *    that the matrix has been reordered according to the postorder of the
    - *    etree.
    - * 
    - */ -void -ilu_relax_snode ( - const int n, - int *et, /* column elimination tree */ - const int relax_columns, /* max no of columns allowed in a - relaxed snode */ - int *descendants, /* no of descendants of each node - in the etree */ - int *relax_end, /* last column in a supernode - * if j-th column starts a relaxed - * supernode, relax_end[j] represents - * the last column of this supernode */ - int *relax_fsupc /* first column in a supernode - * relax_fsupc[j] represents the first - * column of j-th supernode */ - ) -{ - - register int j, f, parent; - register int snode_start; /* beginning of a snode */ - - ifill (relax_end, n, EMPTY); - ifill (relax_fsupc, n, EMPTY); - for (j = 0; j < n; j++) descendants[j] = 0; - - /* Compute the number of descendants of each node in the etree */ - for (j = 0; j < n; j++) { - parent = et[j]; - if ( parent != n ) /* not the dummy root */ - descendants[parent] += descendants[j] + 1; - } - - /* Identify the relaxed supernodes by postorder traversal of the etree. */ - for (j = f = 0; j < n; ) { - parent = et[j]; - snode_start = j; - while ( parent != n && descendants[parent] < relax_columns ) { - j = parent; - parent = et[j]; - } - /* Found a supernode with j being the last column. */ - relax_end[snode_start] = j; /* Last column is recorded */ - j++; - relax_fsupc[f++] = snode_start; - /* Search for a new leaf */ - while ( descendants[j] != 0 && j < n ) j++; - } -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_scolumn_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_scolumn_dfs.c deleted file mode 100644 index 35d0323215..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_scolumn_dfs.c +++ /dev/null @@ -1,258 +0,0 @@ - -/*! @file ilu_scolumn_dfs.c - * \brief Performs a symbolic factorization - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    -*/ - -#include "slu_sdefs.h" - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   ILU_SCOLUMN_DFS performs a symbolic factorization on column jcol, and
    - *   decide the supernode boundary.
    - *
    - *   This routine does not use numeric values, but only use the RHS
    - *   row indices to start the dfs.
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives. The routine returns a list of such supernodal
    - *   representatives in topological order of the dfs that generates them.
    - *   The location of the first nonzero in each such supernodal segment
    - *   (supernodal entry location) is also returned.
    - *
    - * Local parameters
    - * ================
    - *   nseg: no of segments in current U[*,j]
    - *   jsuper: jsuper=EMPTY if column j does not belong to the same
    - *	supernode as j-1. Otherwise, jsuper=nsuper.
    - *
    - *   marker2: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - *
    - * Return value
    - * ============
    - *     0  success;
    - *   > 0  number of bytes allocated when run out of space.
    - * 
    - */ -int -ilu_scolumn_dfs( - const int m, /* in - number of rows in the matrix */ - const int jcol, /* in */ - int *perm_r, /* in */ - int *nseg, /* modified - with new segments appended */ - int *lsub_col, /* in - defines the RHS vector to start the - dfs */ - int *segrep, /* modified - with new segments appended */ - int *repfnz, /* modified */ - int *marker, /* modified */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - int jcolp1, jcolm1, jsuper, nsuper, nextl; - int k, krep, krow, kmark, kperm; - int *marker2; /* Used for small panel LU */ - int fsupc; /* First column of a snode */ - int myfnz; /* First nonz column of a U-segment */ - int chperm, chmark, chrep, kchild; - int xdfs, maxdfs, kpar, oldrep; - int jptr, jm1ptr; - int ito, ifrom; /* Used to compress row subscripts */ - int mem_error; - int *xsup, *supno, *lsub, *xlsub; - int nzlmax; - static int first = 1, maxsuper; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - if ( first ) { - maxsuper = sp_ienv(3); - first = 0; - } - jcolp1 = jcol + 1; - jcolm1 = jcol - 1; - nsuper = supno[jcol]; - jsuper = nsuper; - nextl = xlsub[jcol]; - marker2 = &marker[2*m]; - - - /* For each nonzero in A[*,jcol] do dfs */ - for (k = 0; lsub_col[k] != EMPTY; k++) { - - krow = lsub_col[k]; - lsub_col[k] = EMPTY; - kmark = marker2[krow]; - - /* krow was visited before, go to the next nonzero */ - if ( kmark == jcol ) continue; - - /* For each unmarked nbr krow of jcol - * krow is in L: place it in structure of L[*,jcol] - */ - marker2[krow] = jcol; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - lsub[nextl++] = krow; /* krow is indexed into A */ - if ( nextl >= nzlmax ) { - if ((mem_error = sLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu))) - return (mem_error); - lsub = Glu->lsub; - } - if ( kmark != jcolm1 ) jsuper = EMPTY;/* Row index subset testing */ - } else { - /* krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz[krep]; - - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > kperm ) repfnz[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz[krep] = kperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker2[kchild]; - - if ( chmark != jcol ) { /* Not reached yet */ - marker2[kchild] = jcol; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,k] */ - if ( chperm == EMPTY ) { - lsub[nextl++] = kchild; - if ( nextl >= nzlmax ) { - if ( (mem_error = sLUMemXpand(jcol,nextl, - LSUB,&nzlmax,Glu)) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( chmark != jcolm1 ) jsuper = EMPTY; - } else { - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz[chrep]; - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz[chrep] = chperm; - } else { - /* Continue dfs at super-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L^t) */ - parent[krep] = oldrep; - repfnz[krep] = chperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - } /* else */ - - } /* else */ - - } /* if */ - - } /* while */ - - /* krow has no more unexplored nbrs; - * place supernode-rep krep in postorder DFS. - * backtrack dfs to its parent - */ - segrep[*nseg] = krep; - ++(*nseg); - kpar = parent[krep]; /* Pop from stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xlsub[krep + 1]; - - } while ( kpar != EMPTY ); /* Until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonzero ... */ - - /* Check to see if j belongs in the same supernode as j-1 */ - if ( jcol == 0 ) { /* Do nothing for column 0 */ - nsuper = supno[0] = 0; - } else { - fsupc = xsup[nsuper]; - jptr = xlsub[jcol]; /* Not compressed yet */ - jm1ptr = xlsub[jcolm1]; - - if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = EMPTY; - - /* Always start a new supernode for a singular column */ - if ( nextl == jptr ) jsuper = EMPTY; - - /* Make sure the number of columns in a supernode doesn't - exceed threshold. */ - if ( jcol - fsupc >= maxsuper ) jsuper = EMPTY; - - /* If jcol starts a new supernode, reclaim storage space in - * lsub from the previous supernode. Note we only store - * the subscript set of the first columns of the supernode. - */ - if ( jsuper == EMPTY ) { /* starts a new supernode */ - if ( (fsupc < jcolm1) ) { /* >= 2 columns in nsuper */ -#ifdef CHK_COMPRESS - printf(" Compress lsub[] at super %d-%d\n", fsupc, jcolm1); -#endif - ito = xlsub[fsupc+1]; - xlsub[jcolm1] = ito; - xlsub[jcol] = ito; - for (ifrom = jptr; ifrom < nextl; ++ifrom, ++ito) - lsub[ito] = lsub[ifrom]; - nextl = ito; - } - nsuper++; - supno[jcol] = nsuper; - } /* if a new supernode */ - - } /* else: jcol > 0 */ - - /* Tidy up the pointers before exit */ - xsup[nsuper+1] = jcolp1; - supno[jcolp1] = nsuper; - xlsub[jcolp1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_scopy_to_ucol.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_scopy_to_ucol.c deleted file mode 100644 index 2b3bc7074f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_scopy_to_ucol.c +++ /dev/null @@ -1,199 +0,0 @@ - -/*! @file ilu_scopy_to_ucol.c - * \brief Copy a computed column of U to the compressed data structure - * and drop some small entries - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_sdefs.h" - -#ifdef DEBUG -int num_drop_U; -#endif - -static float *A; /* used in _compare_ only */ -static int _compare_(const void *a, const void *b) -{ - register int *x = (int *)a, *y = (int *)b; - register double xx = fabs(A[*x]), yy = fabs(A[*y]); - if (xx > yy) return -1; - else if (xx < yy) return 1; - else return 0; -} - - -int -ilu_scopy_to_ucol( - int jcol, /* in */ - int nseg, /* in */ - int *segrep, /* in */ - int *repfnz, /* in */ - int *perm_r, /* in */ - float *dense, /* modified - reset to zero on return */ - int drop_rule,/* in */ - milu_t milu, /* in */ - double drop_tol, /* in */ - int quota, /* maximum nonzero entries allowed */ - float *sum, /* out - the sum of dropped entries */ - int *nnzUj, /* in - out */ - GlobalLU_t *Glu, /* modified */ - int *work /* working space with minimum size n, - * used by the second dropping rule */ - ) -{ -/* - * Gather from SPA dense[*] to global ucol[*]. - */ - int ksub, krep, ksupno; - int i, k, kfnz, segsze; - int fsupc, isub, irow; - int jsupno, nextu; - int new_next, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - float *ucol; - int *usub, *xusub; - int nzumax; - int m; /* number of entries in the nonzero U-segments */ - register float d_max = 0.0, d_min = 1.0 / dlamch_("Safe minimum"); - register double tmp; - float zero = 0.0; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - nzumax = Glu->nzumax; - - *sum = zero; - if (drop_rule == NODROP) { - drop_tol = -1.0, quota = Glu->n; - } - - jsupno = supno[jcol]; - nextu = xusub[jcol]; - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - krep = segrep[k--]; - ksupno = supno[krep]; - - if ( ksupno != jsupno ) { /* Should go into ucol[] */ - kfnz = repfnz[krep]; - if ( kfnz != EMPTY ) { /* Nonzero U-segment */ - - fsupc = xsup[ksupno]; - isub = xlsub[fsupc] + kfnz - fsupc; - segsze = krep - kfnz + 1; - - new_next = nextu + segsze; - while ( new_next > nzumax ) { - if ((mem_error = sLUMemXpand(jcol, nextu, UCOL, &nzumax, - Glu)) != 0) - return (mem_error); - ucol = Glu->ucol; - if ((mem_error = sLUMemXpand(jcol, nextu, USUB, &nzumax, - Glu)) != 0) - return (mem_error); - usub = Glu->usub; - lsub = Glu->lsub; - } - - for (i = 0; i < segsze; i++) { - irow = lsub[isub++]; - tmp = fabs(dense[irow]); - - /* first dropping rule */ - if (quota > 0 && tmp >= drop_tol) { - if (tmp > d_max) d_max = tmp; - if (tmp < d_min) d_min = tmp; - usub[nextu] = perm_r[irow]; - ucol[nextu] = dense[irow]; - nextu++; - } else { - switch (milu) { - case SMILU_1: - case SMILU_2: - *sum += dense[irow]; - break; - case SMILU_3: - /* *sum += fabs(dense[irow]);*/ - *sum += tmp; - break; - case SILU: - default: - break; - } -#ifdef DEBUG - num_drop_U++; -#endif - } - dense[irow] = zero; - } - - } - - } - - } /* for each segment... */ - - xusub[jcol + 1] = nextu; /* Close U[*,jcol] */ - m = xusub[jcol + 1] - xusub[jcol]; - - /* second dropping rule */ - if (drop_rule & DROP_SECONDARY && m > quota) { - register double tol = d_max; - register int m0 = xusub[jcol] + m - 1; - - if (quota > 0) { - if (drop_rule & DROP_INTERP) { - d_max = 1.0 / d_max; d_min = 1.0 / d_min; - tol = 1.0 / (d_max + (d_min - d_max) * quota / m); - } else { - A = &ucol[xusub[jcol]]; - for (i = 0; i < m; i++) work[i] = i; - qsort(work, m, sizeof(int), _compare_); - tol = fabs(usub[xusub[jcol] + work[quota]]); - } - } - for (i = xusub[jcol]; i <= m0; ) { - if (fabs(ucol[i]) <= tol) { - switch (milu) { - case SMILU_1: - case SMILU_2: - *sum += ucol[i]; - break; - case SMILU_3: - *sum += fabs(ucol[i]); - break; - case SILU: - default: - break; - } - ucol[i] = ucol[m0]; - usub[i] = usub[m0]; - m0--; - m--; -#ifdef DEBUG - num_drop_U++; -#endif - xusub[jcol + 1]--; - continue; - } - i++; - } - } - - if (milu == SMILU_2) *sum = fabs(*sum); - - *nnzUj += m; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_sdrop_row.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_sdrop_row.c deleted file mode 100644 index a68fe32b99..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_sdrop_row.c +++ /dev/null @@ -1,307 +0,0 @@ - -/*! @file ilu_sdrop_row.c - * \brief Drop small rows from L - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * <\pre>
    - */
    -
    -#include 
    -#include 
    -#include "slu_sdefs.h"
    -
    -extern void sswap_(int *, float [], int *, float [], int *);
    -extern void saxpy_(int *, float *, float [], int *, float [], int *);
    -
    -static float *A;  /* used in _compare_ only */
    -static int _compare_(const void *a, const void *b)
    -{
    -    register int *x = (int *)a, *y = (int *)b;
    -    if (A[*x] - A[*y] > 0.0) return -1;
    -    else if (A[*x] - A[*y] < 0.0) return 1;
    -    else return 0;
    -}
    -
    -/*! \brief
    - * 
    - * Purpose
    - * =======
    - *    ilu_sdrop_row() - Drop some small rows from the previous 
    - *    supernode (L-part only).
    - * 
    - */ -int ilu_sdrop_row( - superlu_options_t *options, /* options */ - int first, /* index of the first column in the supernode */ - int last, /* index of the last column in the supernode */ - double drop_tol, /* dropping parameter */ - int quota, /* maximum nonzero entries allowed */ - int *nnzLj, /* in/out number of nonzeros in L(:, 1:last) */ - double *fill_tol, /* in/out - on exit, fill_tol=-num_zero_pivots, - * does not change if options->ILU_MILU != SMILU1 */ - GlobalLU_t *Glu, /* modified */ - float swork[], /* working space with minimum size last-first+1 */ - int iwork[], /* working space with minimum size m - n, - * used by the second dropping rule */ - int lastc /* if lastc == 0, there is nothing after the - * working supernode [first:last]; - * if lastc == 1, there is one more column after - * the working supernode. */ ) -{ - register int i, j, k, m1; - register int nzlc; /* number of nonzeros in column last+1 */ - register int xlusup_first, xlsub_first; - int m, n; /* m x n is the size of the supernode */ - int r = 0; /* number of dropped rows */ - register float *temp; - register float *lusup = Glu->lusup; - register int *lsub = Glu->lsub; - register int *xlsub = Glu->xlsub; - register int *xlusup = Glu->xlusup; - register float d_max = 0.0, d_min = 1.0; - int drop_rule = options->ILU_DropRule; - milu_t milu = options->ILU_MILU; - norm_t nrm = options->ILU_Norm; - float zero = 0.0; - float one = 1.0; - float none = -1.0; - int inc_diag; /* inc_diag = m + 1 */ - int nzp = 0; /* number of zero pivots */ - - xlusup_first = xlusup[first]; - xlsub_first = xlsub[first]; - m = xlusup[first + 1] - xlusup_first; - n = last - first + 1; - m1 = m - 1; - inc_diag = m + 1; - nzlc = lastc ? (xlusup[last + 2] - xlusup[last + 1]) : 0; - temp = swork - n; - - /* Quick return if nothing to do. */ - if (m == 0 || m == n || drop_rule == NODROP) - { - *nnzLj += m * n; - return 0; - } - - /* basic dropping: ILU(tau) */ - for (i = n; i <= m1; ) - { - /* the average abs value of ith row */ - switch (nrm) - { - case ONE_NORM: - temp[i] = sasum_(&n, &lusup[xlusup_first + i], &m) / (double)n; - break; - case TWO_NORM: - temp[i] = snrm2_(&n, &lusup[xlusup_first + i], &m) - / sqrt((double)n); - break; - case INF_NORM: - default: - k = isamax_(&n, &lusup[xlusup_first + i], &m) - 1; - temp[i] = fabs(lusup[xlusup_first + i + m * k]); - break; - } - - /* drop small entries due to drop_tol */ - if (drop_rule & DROP_BASIC && temp[i] < drop_tol) - { - r++; - /* drop the current row and move the last undropped row here */ - if (r > 1) /* add to last row */ - { - /* accumulate the sum (for MILU) */ - switch (milu) - { - case SMILU_1: - case SMILU_2: - saxpy_(&n, &one, &lusup[xlusup_first + i], &m, - &lusup[xlusup_first + m - 1], &m); - break; - case SMILU_3: - for (j = 0; j < n; j++) - lusup[xlusup_first + (m - 1) + j * m] += - fabs(lusup[xlusup_first + i + j * m]); - break; - case SILU: - default: - break; - } - scopy_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - } /* if (r > 1) */ - else /* move to last row */ - { - sswap_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - if (milu == SMILU_3) - for (j = 0; j < n; j++) { - lusup[xlusup_first + m1 + j * m] = - fabs(lusup[xlusup_first + m1 + j * m]); - } - } - lsub[xlsub_first + i] = lsub[xlsub_first + m1]; - m1--; - continue; - } /* if dropping */ - else - { - if (temp[i] > d_max) d_max = temp[i]; - if (temp[i] < d_min) d_min = temp[i]; - } - i++; - } /* for */ - - /* Secondary dropping: drop more rows according to the quota. */ - quota = ceil((double)quota / (double)n); - if (drop_rule & DROP_SECONDARY && m - r > quota) - { - register double tol = d_max; - - /* Calculate the second dropping tolerance */ - if (quota > n) - { - if (drop_rule & DROP_INTERP) /* by interpolation */ - { - d_max = 1.0 / d_max; d_min = 1.0 / d_min; - tol = 1.0 / (d_max + (d_min - d_max) * quota / (m - n - r)); - } - else /* by quick sort */ - { - register int *itemp = iwork - n; - A = temp; - for (i = n; i <= m1; i++) itemp[i] = i; - qsort(iwork, m1 - n + 1, sizeof(int), _compare_); - tol = temp[iwork[quota]]; - } - } - - for (i = n; i <= m1; ) - { - if (temp[i] <= tol) - { - register int j; - r++; - /* drop the current row and move the last undropped row here */ - if (r > 1) /* add to last row */ - { - /* accumulate the sum (for MILU) */ - switch (milu) - { - case SMILU_1: - case SMILU_2: - saxpy_(&n, &one, &lusup[xlusup_first + i], &m, - &lusup[xlusup_first + m - 1], &m); - break; - case SMILU_3: - for (j = 0; j < n; j++) - lusup[xlusup_first + (m - 1) + j * m] += - fabs(lusup[xlusup_first + i + j * m]); - break; - case SILU: - default: - break; - } - scopy_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - } /* if (r > 1) */ - else /* move to last row */ - { - sswap_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - if (milu == SMILU_3) - for (j = 0; j < n; j++) { - lusup[xlusup_first + m1 + j * m] = - fabs(lusup[xlusup_first + m1 + j * m]); - } - } - lsub[xlsub_first + i] = lsub[xlsub_first + m1]; - m1--; - temp[i] = temp[m1]; - - continue; - } - i++; - - } /* for */ - - } /* if secondary dropping */ - - for (i = n; i < m; i++) temp[i] = 0.0; - - if (r == 0) - { - *nnzLj += m * n; - return 0; - } - - /* add dropped entries to the diagnal */ - if (milu != SILU) - { - register int j; - float t; - for (j = 0; j < n; j++) - { - t = lusup[xlusup_first + (m - 1) + j * m] * MILU_ALPHA; - switch (milu) - { - case SMILU_1: - if (t != none) { - lusup[xlusup_first + j * inc_diag] *= (one + t); - } - else - { - lusup[xlusup_first + j * inc_diag] *= *fill_tol; -#ifdef DEBUG - printf("[1] ZERO PIVOT: FILL col %d.\n", first + j); - fflush(stdout); -#endif - nzp++; - } - break; - case SMILU_2: - lusup[xlusup_first + j * inc_diag] *= (1.0 + fabs(t)); - break; - case SMILU_3: - lusup[xlusup_first + j * inc_diag] *= (one + t); - break; - case SILU: - default: - break; - } - } - if (nzp > 0) *fill_tol = -nzp; - } - - /* Remove dropped entries from the memory and fix the pointers. */ - m1 = m - r; - for (j = 1; j < n; j++) - { - register int tmp1, tmp2; - tmp1 = xlusup_first + j * m1; - tmp2 = xlusup_first + j * m; - for (i = 0; i < m1; i++) - lusup[i + tmp1] = lusup[i + tmp2]; - } - for (i = 0; i < nzlc; i++) - lusup[xlusup_first + i + n * m1] = lusup[xlusup_first + i + n * m]; - for (i = 0; i < nzlc; i++) - lsub[xlsub[last + 1] - r + i] = lsub[xlsub[last + 1] + i]; - for (i = first + 1; i <= last + 1; i++) - { - xlusup[i] -= r * (i - first); - xlsub[i] -= r; - } - if (lastc) - { - xlusup[last + 2] -= r * n; - xlsub[last + 2] -= r; - } - - *nnzLj += (m - r) * n; - return r; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_spanel_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_spanel_dfs.c deleted file mode 100644 index a741846a37..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_spanel_dfs.c +++ /dev/null @@ -1,248 +0,0 @@ - -/*! @file ilu_spanel_dfs.c - * \brief Peforms a symbolic factorization on a panel of symbols and - * record the entries with maximum absolute value in each column - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   Performs a symbolic factorization on a panel of columns [jcol, jcol+w).
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives.
    - *
    - *   The routine returns one list of the supernodal representatives
    - *   in topological order of the dfs that generates them. This list is
    - *   a superset of the topological order of each individual column within
    - *   the panel.
    - *   The location of the first nonzero in each supernodal segment
    - *   (supernodal entry location) is also returned. Each column has a
    - *   separate list for this purpose.
    - *
    - *   Two marker arrays are used for dfs:
    - *     marker[i] == jj, if i was visited during dfs of current column jj;
    - *     marker1[i] >= jcol, if i was visited by earlier columns in this panel;
    - *
    - *   marker: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - * 
    - */ -void -ilu_spanel_dfs( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - SuperMatrix *A, /* in - original matrix */ - int *perm_r, /* in */ - int *nseg, /* out */ - float *dense, /* out */ - float *amax, /* out - max. abs. value of each column in panel */ - int *panel_lsub, /* out */ - int *segrep, /* out */ - int *repfnz, /* out */ - int *marker, /* out */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ -) -{ - - NCPformat *Astore; - float *a; - int *asub; - int *xa_begin, *xa_end; - int krep, chperm, chmark, chrep, oldrep, kchild, myfnz; - int k, krow, kmark, kperm; - int xdfs, maxdfs, kpar; - int jj; /* index through each column in the panel */ - int *marker1; /* marker1[jj] >= jcol if vertex jj was visited - by a previous column within this panel. */ - int *repfnz_col; /* start of each column in the panel */ - float *dense_col; /* start of each column in the panel */ - int nextl_col; /* next available position in panel_lsub[*,jj] */ - int *xsup, *supno; - int *lsub, *xlsub; - float *amax_col; - register double tmp; - - /* Initialize pointers */ - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - marker1 = marker + m; - repfnz_col = repfnz; - dense_col = dense; - amax_col = amax; - *nseg = 0; - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - - /* For each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++) { - nextl_col = (jj - jcol) * m; - -#ifdef CHK_DFS - printf("\npanel col %d: ", jj); -#endif - - *amax_col = 0.0; - /* For each nonz in A[*,jj] do dfs */ - for (k = xa_begin[jj]; k < xa_end[jj]; k++) { - krow = asub[k]; - tmp = fabs(a[k]); - if (tmp > *amax_col) *amax_col = tmp; - dense_col[krow] = a[k]; - kmark = marker[krow]; - if ( kmark == jj ) - continue; /* krow visited before, go to the next nonzero */ - - /* For each unmarked nbr krow of jj - * krow is in L: place it in structure of L[*,jj] - */ - marker[krow] = jj; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - panel_lsub[nextl_col++] = krow; /* krow is indexed into A */ - } - /* - * krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - else { - - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz_col[krep]; - -#ifdef CHK_DFS - printf("krep %d, myfnz %d, perm_r[%d] %d\n", krep, myfnz, krow, kperm); -#endif - if ( myfnz != EMPTY ) { /* Representative visited before */ - if ( myfnz > kperm ) repfnz_col[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz_col[krep] = kperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker[kchild]; - - if ( chmark != jj ) { /* Not reached yet */ - marker[kchild] = jj; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,j] */ - if ( chperm == EMPTY ) { - panel_lsub[nextl_col++] = kchild; - } - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - else { - - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz_col[chrep]; -#ifdef CHK_DFS - printf("chrep %d,myfnz %d,perm_r[%d] %d\n",chrep,myfnz,kchild,chperm); -#endif - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz_col[chrep] = chperm; - } - else { - /* Cont. dfs at snode-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L) */ - parent[krep] = oldrep; - repfnz_col[krep] = chperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } /* else */ - - } /* else */ - - } /* if... */ - - } /* while xdfs < maxdfs */ - - /* krow has no more unexplored nbrs: - * Place snode-rep krep in postorder DFS, if this - * segment is seen for the first time. (Note that - * "repfnz[krep]" may change later.) - * Backtrack dfs to its parent. - */ - if ( marker1[krep] < jcol ) { - segrep[*nseg] = krep; - ++(*nseg); - marker1[krep] = jj; - } - - kpar = parent[krep]; /* Pop stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xlsub[krep + 1]; - -#ifdef CHK_DFS - printf(" pop stack: krep %d,xdfs %d,maxdfs %d: ", krep,xdfs,maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } while ( kpar != EMPTY ); /* do-while - until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonz in A[*,jj] */ - - repfnz_col += m; /* Move to next column */ - dense_col += m; - amax_col++; - - } /* for jj ... */ - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_spivotL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_spivotL.c deleted file mode 100644 index 0ee014dd48..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_spivotL.c +++ /dev/null @@ -1,274 +0,0 @@ - -/*! @file ilu_spivotL.c - * \brief Performs numerical pivoting - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - - -#include -#include -#include "slu_sdefs.h" - -#ifndef SGN -#define SGN(x) ((x)>=0?1:-1) -#endif - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Performs the numerical pivoting on the current column of L,
    - *   and the CDIV operation.
    - *
    - *   Pivot policy:
    - *   (1) Compute thresh = u * max_(i>=j) abs(A_ij);
    - *   (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
    - *	     pivot row = k;
    - *	 ELSE IF abs(A_jj) >= thresh THEN
    - *	     pivot row = j;
    - *	 ELSE
    - *	     pivot row = m;
    - *
    - *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
    - *
    - *   Return value: 0	  success;
    - *		   i > 0  U(i,i) is exactly zero.
    - * 
    - */ - -int -ilu_spivotL( - const int jcol, /* in */ - const double u, /* in - diagonal pivoting threshold */ - int *usepr, /* re-use the pivot sequence given by - * perm_r/iperm_r */ - int *perm_r, /* may be modified */ - int diagind, /* diagonal of Pc*A*Pc' */ - int *swap, /* in/out record the row permutation */ - int *iswap, /* in/out inverse of swap, it is the same as - perm_r after the factorization */ - int *marker, /* in */ - int *pivrow, /* in/out, as an input if *usepr!=0 */ - double fill_tol, /* in - fill tolerance of current column - * used for a singular column */ - milu_t milu, /* in */ - float drop_sum, /* in - computed in ilu_scopy_to_ucol() - (MILU only) */ - GlobalLU_t *Glu, /* modified - global LU data structures */ - SuperLUStat_t *stat /* output */ - ) -{ - - int n; /* number of columns */ - int fsupc; /* first column in the supernode */ - int nsupc; /* no of columns in the supernode */ - int nsupr; /* no of rows in the supernode */ - int lptr; /* points to the starting subscript of the supernode */ - register int pivptr; - int old_pivptr, diag, ptr0; - register float pivmax, rtemp; - float thresh; - float temp; - float *lu_sup_ptr; - float *lu_col_ptr; - int *lsub_ptr; - register int isub, icol, k, itemp; - int *lsub, *xlsub; - float *lusup; - int *xlusup; - flops_t *ops = stat->ops; - int info; - - /* Initialize pointers */ - n = Glu->n; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; - nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ - lptr = xlsub[fsupc]; - nsupr = xlsub[fsupc+1] - lptr; - lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ - lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ - lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ - - /* Determine the largest abs numerical value for partial pivoting; - Also search for user-specified pivot, and diagonal element. */ - pivmax = -1.0; - pivptr = nsupc; - diag = EMPTY; - old_pivptr = nsupc; - ptr0 = EMPTY; - for (isub = nsupc; isub < nsupr; ++isub) { - if (marker[lsub_ptr[isub]] > jcol) - continue; /* do not overlap with a later relaxed supernode */ - - switch (milu) { - case SMILU_1: - rtemp = fabs(lu_col_ptr[isub] + drop_sum); - break; - case SMILU_2: - case SMILU_3: - /* In this case, drop_sum contains the sum of the abs. value */ - rtemp = fabs(lu_col_ptr[isub]); - break; - case SILU: - default: - rtemp = fabs(lu_col_ptr[isub]); - break; - } - if (rtemp > pivmax) { pivmax = rtemp; pivptr = isub; } - if (*usepr && lsub_ptr[isub] == *pivrow) old_pivptr = isub; - if (lsub_ptr[isub] == diagind) diag = isub; - if (ptr0 == EMPTY) ptr0 = isub; - } - - if (milu == SMILU_2 || milu == SMILU_3) pivmax += drop_sum; - - /* Test for singularity */ - if (pivmax < 0.0) { -#if SCIPY_SPECIFIC_FIX - ABORT("[0]: matrix is singular"); -#else - fprintf(stderr, "[0]: jcol=%d, SINGULAR!!!\n", jcol); - fflush(stderr); - exit(1); -#endif - } - if ( pivmax == 0.0 ) { - if (diag != EMPTY) - *pivrow = lsub_ptr[pivptr = diag]; - else if (ptr0 != EMPTY) - *pivrow = lsub_ptr[pivptr = ptr0]; - else { - /* look for the first row which does not - belong to any later supernodes */ - for (icol = jcol; icol < n; icol++) - if (marker[swap[icol]] <= jcol) break; - if (icol >= n) { -#if SCIPY_SPECIFIC_FIX - ABORT("[1]: matrix is singular"); -#else - fprintf(stderr, "[1]: jcol=%d, SINGULAR!!!\n", jcol); - fflush(stderr); - exit(1); -#endif - } - - *pivrow = swap[icol]; - - /* pick up the pivot row */ - for (isub = nsupc; isub < nsupr; ++isub) - if ( lsub_ptr[isub] == *pivrow ) { pivptr = isub; break; } - } - pivmax = fill_tol; - lu_col_ptr[pivptr] = pivmax; - *usepr = 0; -#ifdef DEBUG - printf("[0] ZERO PIVOT: FILL (%d, %d).\n", *pivrow, jcol); - fflush(stdout); -#endif - info =jcol + 1; - } /* if (*pivrow == 0.0) */ - else { - thresh = u * pivmax; - - /* Choose appropriate pivotal element by our policy. */ - if ( *usepr ) { - switch (milu) { - case SMILU_1: - rtemp = fabs(lu_col_ptr[old_pivptr] + drop_sum); - break; - case SMILU_2: - case SMILU_3: - rtemp = fabs(lu_col_ptr[old_pivptr]) + drop_sum; - break; - case SILU: - default: - rtemp = fabs(lu_col_ptr[old_pivptr]); - break; - } - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = old_pivptr; - else *usepr = 0; - } - if ( *usepr == 0 ) { - /* Use diagonal pivot? */ - if ( diag >= 0 ) { /* diagonal exists */ - switch (milu) { - case SMILU_1: - rtemp = fabs(lu_col_ptr[diag] + drop_sum); - break; - case SMILU_2: - case SMILU_3: - rtemp = fabs(lu_col_ptr[diag]) + drop_sum; - break; - case SILU: - default: - rtemp = fabs(lu_col_ptr[diag]); - break; - } - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; - } - *pivrow = lsub_ptr[pivptr]; - } - info = 0; - - /* Reset the diagonal */ - switch (milu) { - case SMILU_1: - lu_col_ptr[pivptr] += drop_sum; - break; - case SMILU_2: - case SMILU_3: - lu_col_ptr[pivptr] += SGN(lu_col_ptr[pivptr]) * drop_sum; - break; - case SILU: - default: - break; - } - - } /* else */ - - /* Record pivot row */ - perm_r[*pivrow] = jcol; - if (jcol < n - 1) { - register int t1, t2, t; - t1 = iswap[*pivrow]; t2 = jcol; - if (t1 != t2) { - t = swap[t1]; swap[t1] = swap[t2]; swap[t2] = t; - t1 = swap[t1]; t2 = t; - t = iswap[t1]; iswap[t1] = iswap[t2]; iswap[t2] = t; - } - } /* if (jcol < n - 1) */ - - /* Interchange row subscripts */ - if ( pivptr != nsupc ) { - itemp = lsub_ptr[pivptr]; - lsub_ptr[pivptr] = lsub_ptr[nsupc]; - lsub_ptr[nsupc] = itemp; - - /* Interchange numerical values as well, for the whole snode, such - * that L is indexed the same way as A. - */ - for (icol = 0; icol <= nsupc; icol++) { - itemp = pivptr + icol * nsupr; - temp = lu_sup_ptr[itemp]; - lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; - lu_sup_ptr[nsupc + icol*nsupr] = temp; - } - } /* if */ - - /* cdiv operation */ - ops[FACT] += nsupr - nsupc; - temp = 1.0 / lu_col_ptr[nsupc]; - for (k = nsupc+1; k < nsupr; k++) lu_col_ptr[k] *= temp; - - return info; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ssnode_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ssnode_dfs.c deleted file mode 100644 index 22ae22606e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_ssnode_dfs.c +++ /dev/null @@ -1,90 +0,0 @@ - -/*! @file ilu_ssnode_dfs.c - * \brief Determines the union of row structures of columns within the relaxed node - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    ilu_ssnode_dfs() - Determine the union of the row structures of those
    - *    columns within the relaxed snode.
    - *    Note: The relaxed snodes are leaves of the supernodal etree, therefore,
    - *    the portion outside the rectangular supernode must be zero.
    - *
    - * Return value
    - * ============
    - *     0   success;
    - *    >0   number of bytes allocated when run out of memory.
    - * 
    - */ - -int -ilu_ssnode_dfs( - const int jcol, /* in - start of the supernode */ - const int kcol, /* in - end of the supernode */ - const int *asub, /* in */ - const int *xa_begin, /* in */ - const int *xa_end, /* in */ - int *marker, /* modified */ - GlobalLU_t *Glu /* modified */ - ) -{ - - register int i, k, nextl; - int nsuper, krow, kmark, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - int nzlmax; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - nsuper = ++supno[jcol]; /* Next available supernode number */ - nextl = xlsub[jcol]; - - for (i = jcol; i <= kcol; i++) - { - /* For each nonzero in A[*,i] */ - for (k = xa_begin[i]; k < xa_end[i]; k++) - { - krow = asub[k]; - kmark = marker[krow]; - if ( kmark != kcol ) - { /* First time visit krow */ - marker[krow] = kcol; - lsub[nextl++] = krow; - if ( nextl >= nzlmax ) - { - if ( (mem_error = sLUMemXpand(jcol, nextl, LSUB, &nzlmax, - Glu)) != 0) - return (mem_error); - lsub = Glu->lsub; - } - } - } - supno[i] = nsuper; - } - - /* Supernode > 1 */ - if ( jcol < kcol ) - for (i = jcol+1; i <= kcol; i++) xlsub[i] = nextl; - - xsup[nsuper+1] = kcol + 1; - supno[kcol+1] = nsuper; - xlsub[kcol+1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zcolumn_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zcolumn_dfs.c deleted file mode 100644 index c5ce87d7a8..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zcolumn_dfs.c +++ /dev/null @@ -1,258 +0,0 @@ - -/*! @file ilu_zcolumn_dfs.c - * \brief Performs a symbolic factorization - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    -*/ - -#include "slu_zdefs.h" - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   ILU_ZCOLUMN_DFS performs a symbolic factorization on column jcol, and
    - *   decide the supernode boundary.
    - *
    - *   This routine does not use numeric values, but only use the RHS
    - *   row indices to start the dfs.
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives. The routine returns a list of such supernodal
    - *   representatives in topological order of the dfs that generates them.
    - *   The location of the first nonzero in each such supernodal segment
    - *   (supernodal entry location) is also returned.
    - *
    - * Local parameters
    - * ================
    - *   nseg: no of segments in current U[*,j]
    - *   jsuper: jsuper=EMPTY if column j does not belong to the same
    - *	supernode as j-1. Otherwise, jsuper=nsuper.
    - *
    - *   marker2: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - *
    - * Return value
    - * ============
    - *     0  success;
    - *   > 0  number of bytes allocated when run out of space.
    - * 
    - */ -int -ilu_zcolumn_dfs( - const int m, /* in - number of rows in the matrix */ - const int jcol, /* in */ - int *perm_r, /* in */ - int *nseg, /* modified - with new segments appended */ - int *lsub_col, /* in - defines the RHS vector to start the - dfs */ - int *segrep, /* modified - with new segments appended */ - int *repfnz, /* modified */ - int *marker, /* modified */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - int jcolp1, jcolm1, jsuper, nsuper, nextl; - int k, krep, krow, kmark, kperm; - int *marker2; /* Used for small panel LU */ - int fsupc; /* First column of a snode */ - int myfnz; /* First nonz column of a U-segment */ - int chperm, chmark, chrep, kchild; - int xdfs, maxdfs, kpar, oldrep; - int jptr, jm1ptr; - int ito, ifrom; /* Used to compress row subscripts */ - int mem_error; - int *xsup, *supno, *lsub, *xlsub; - int nzlmax; - static int first = 1, maxsuper; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - if ( first ) { - maxsuper = sp_ienv(3); - first = 0; - } - jcolp1 = jcol + 1; - jcolm1 = jcol - 1; - nsuper = supno[jcol]; - jsuper = nsuper; - nextl = xlsub[jcol]; - marker2 = &marker[2*m]; - - - /* For each nonzero in A[*,jcol] do dfs */ - for (k = 0; lsub_col[k] != EMPTY; k++) { - - krow = lsub_col[k]; - lsub_col[k] = EMPTY; - kmark = marker2[krow]; - - /* krow was visited before, go to the next nonzero */ - if ( kmark == jcol ) continue; - - /* For each unmarked nbr krow of jcol - * krow is in L: place it in structure of L[*,jcol] - */ - marker2[krow] = jcol; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - lsub[nextl++] = krow; /* krow is indexed into A */ - if ( nextl >= nzlmax ) { - if ((mem_error = zLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu))) - return (mem_error); - lsub = Glu->lsub; - } - if ( kmark != jcolm1 ) jsuper = EMPTY;/* Row index subset testing */ - } else { - /* krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz[krep]; - - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > kperm ) repfnz[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz[krep] = kperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker2[kchild]; - - if ( chmark != jcol ) { /* Not reached yet */ - marker2[kchild] = jcol; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,k] */ - if ( chperm == EMPTY ) { - lsub[nextl++] = kchild; - if ( nextl >= nzlmax ) { - if ( (mem_error = zLUMemXpand(jcol,nextl, - LSUB,&nzlmax,Glu)) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( chmark != jcolm1 ) jsuper = EMPTY; - } else { - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz[chrep]; - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz[chrep] = chperm; - } else { - /* Continue dfs at super-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L^t) */ - parent[krep] = oldrep; - repfnz[krep] = chperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - } /* else */ - - } /* else */ - - } /* if */ - - } /* while */ - - /* krow has no more unexplored nbrs; - * place supernode-rep krep in postorder DFS. - * backtrack dfs to its parent - */ - segrep[*nseg] = krep; - ++(*nseg); - kpar = parent[krep]; /* Pop from stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xlsub[krep + 1]; - - } while ( kpar != EMPTY ); /* Until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonzero ... */ - - /* Check to see if j belongs in the same supernode as j-1 */ - if ( jcol == 0 ) { /* Do nothing for column 0 */ - nsuper = supno[0] = 0; - } else { - fsupc = xsup[nsuper]; - jptr = xlsub[jcol]; /* Not compressed yet */ - jm1ptr = xlsub[jcolm1]; - - if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = EMPTY; - - /* Always start a new supernode for a singular column */ - if ( nextl == jptr ) jsuper = EMPTY; - - /* Make sure the number of columns in a supernode doesn't - exceed threshold. */ - if ( jcol - fsupc >= maxsuper ) jsuper = EMPTY; - - /* If jcol starts a new supernode, reclaim storage space in - * lsub from the previous supernode. Note we only store - * the subscript set of the first columns of the supernode. - */ - if ( jsuper == EMPTY ) { /* starts a new supernode */ - if ( (fsupc < jcolm1) ) { /* >= 2 columns in nsuper */ -#ifdef CHK_COMPRESS - printf(" Compress lsub[] at super %d-%d\n", fsupc, jcolm1); -#endif - ito = xlsub[fsupc+1]; - xlsub[jcolm1] = ito; - xlsub[jcol] = ito; - for (ifrom = jptr; ifrom < nextl; ++ifrom, ++ito) - lsub[ito] = lsub[ifrom]; - nextl = ito; - } - nsuper++; - supno[jcol] = nsuper; - } /* if a new supernode */ - - } /* else: jcol > 0 */ - - /* Tidy up the pointers before exit */ - xsup[nsuper+1] = jcolp1; - supno[jcolp1] = nsuper; - xlsub[jcolp1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zcopy_to_ucol.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zcopy_to_ucol.c deleted file mode 100644 index 9859c2fa58..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zcopy_to_ucol.c +++ /dev/null @@ -1,202 +0,0 @@ - -/*! @file ilu_zcopy_to_ucol.c - * \brief Copy a computed column of U to the compressed data structure - * and drop some small entries - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_zdefs.h" - -#ifdef DEBUG -int num_drop_U; -#endif - -static doublecomplex *A; /* used in _compare_ only */ -static int _compare_(const void *a, const void *b) -{ - register int *x = (int *)a, *y = (int *)b; - register double xx = z_abs1(&A[*x]), yy = z_abs1(&A[*y]); - if (xx > yy) return -1; - else if (xx < yy) return 1; - else return 0; -} - - -int -ilu_zcopy_to_ucol( - int jcol, /* in */ - int nseg, /* in */ - int *segrep, /* in */ - int *repfnz, /* in */ - int *perm_r, /* in */ - doublecomplex *dense, /* modified - reset to zero on return */ - int drop_rule,/* in */ - milu_t milu, /* in */ - double drop_tol, /* in */ - int quota, /* maximum nonzero entries allowed */ - doublecomplex *sum, /* out - the sum of dropped entries */ - int *nnzUj, /* in - out */ - GlobalLU_t *Glu, /* modified */ - int *work /* working space with minimum size n, - * used by the second dropping rule */ - ) -{ -/* - * Gather from SPA dense[*] to global ucol[*]. - */ - int ksub, krep, ksupno; - int i, k, kfnz, segsze; - int fsupc, isub, irow; - int jsupno, nextu; - int new_next, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - doublecomplex *ucol; - int *usub, *xusub; - int nzumax; - int m; /* number of entries in the nonzero U-segments */ - register double d_max = 0.0, d_min = 1.0 / dlamch_("Safe minimum"); - register double tmp; - doublecomplex zero = {0.0, 0.0}; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - nzumax = Glu->nzumax; - - *sum = zero; - if (drop_rule == NODROP) { - drop_tol = -1.0, quota = Glu->n; - } - - jsupno = supno[jcol]; - nextu = xusub[jcol]; - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - krep = segrep[k--]; - ksupno = supno[krep]; - - if ( ksupno != jsupno ) { /* Should go into ucol[] */ - kfnz = repfnz[krep]; - if ( kfnz != EMPTY ) { /* Nonzero U-segment */ - - fsupc = xsup[ksupno]; - isub = xlsub[fsupc] + kfnz - fsupc; - segsze = krep - kfnz + 1; - - new_next = nextu + segsze; - while ( new_next > nzumax ) { - if ((mem_error = zLUMemXpand(jcol, nextu, UCOL, &nzumax, - Glu)) != 0) - return (mem_error); - ucol = Glu->ucol; - if ((mem_error = zLUMemXpand(jcol, nextu, USUB, &nzumax, - Glu)) != 0) - return (mem_error); - usub = Glu->usub; - lsub = Glu->lsub; - } - - for (i = 0; i < segsze; i++) { - irow = lsub[isub++]; - tmp = z_abs1(&dense[irow]); - - /* first dropping rule */ - if (quota > 0 && tmp >= drop_tol) { - if (tmp > d_max) d_max = tmp; - if (tmp < d_min) d_min = tmp; - usub[nextu] = perm_r[irow]; - ucol[nextu] = dense[irow]; - nextu++; - } else { - switch (milu) { - case SMILU_1: - case SMILU_2: - z_add(sum, sum, &dense[irow]); - break; - case SMILU_3: - /* *sum += fabs(dense[irow]);*/ - sum->r += tmp; - break; - case SILU: - default: - break; - } -#ifdef DEBUG - num_drop_U++; -#endif - } - dense[irow] = zero; - } - - } - - } - - } /* for each segment... */ - - xusub[jcol + 1] = nextu; /* Close U[*,jcol] */ - m = xusub[jcol + 1] - xusub[jcol]; - - /* second dropping rule */ - if (drop_rule & DROP_SECONDARY && m > quota) { - register double tol = d_max; - register int m0 = xusub[jcol] + m - 1; - - if (quota > 0) { - if (drop_rule & DROP_INTERP) { - d_max = 1.0 / d_max; d_min = 1.0 / d_min; - tol = 1.0 / (d_max + (d_min - d_max) * quota / m); - } else { - A = &ucol[xusub[jcol]]; - for (i = 0; i < m; i++) work[i] = i; - qsort(work, m, sizeof(int), _compare_); - tol = fabs(usub[xusub[jcol] + work[quota]]); - } - } - for (i = xusub[jcol]; i <= m0; ) { - if (z_abs1(&ucol[i]) <= tol) { - switch (milu) { - case SMILU_1: - case SMILU_2: - z_add(sum, sum, &ucol[i]); - break; - case SMILU_3: - sum->r += tmp; - break; - case SILU: - default: - break; - } - ucol[i] = ucol[m0]; - usub[i] = usub[m0]; - m0--; - m--; -#ifdef DEBUG - num_drop_U++; -#endif - xusub[jcol + 1]--; - continue; - } - i++; - } - } - - if (milu == SMILU_2) { - sum->r = z_abs1(sum); sum->i = 0.0; - } - if (milu == SMILU_3) sum->i = 0.0; - - *nnzUj += m; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zdrop_row.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zdrop_row.c deleted file mode 100644 index 680cfa3a83..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zdrop_row.c +++ /dev/null @@ -1,321 +0,0 @@ - -/*! @file ilu_zdrop_row.c - * \brief Drop small rows from L - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * <\pre>
    - */
    -
    -#include 
    -#include 
    -#include "slu_zdefs.h"
    -
    -extern void zswap_(int *, doublecomplex [], int *, doublecomplex [], int *);
    -extern void zaxpy_(int *, doublecomplex *, doublecomplex [], int *, doublecomplex [], int *);
    -
    -static double *A;  /* used in _compare_ only */
    -static int _compare_(const void *a, const void *b)
    -{
    -    register int *x = (int *)a, *y = (int *)b;
    -    if (A[*x] - A[*y] > 0.0) return -1;
    -    else if (A[*x] - A[*y] < 0.0) return 1;
    -    else return 0;
    -}
    -
    -/*! \brief
    - * 
    - * Purpose
    - * =======
    - *    ilu_zdrop_row() - Drop some small rows from the previous 
    - *    supernode (L-part only).
    - * 
    - */ -int ilu_zdrop_row( - superlu_options_t *options, /* options */ - int first, /* index of the first column in the supernode */ - int last, /* index of the last column in the supernode */ - double drop_tol, /* dropping parameter */ - int quota, /* maximum nonzero entries allowed */ - int *nnzLj, /* in/out number of nonzeros in L(:, 1:last) */ - double *fill_tol, /* in/out - on exit, fill_tol=-num_zero_pivots, - * does not change if options->ILU_MILU != SMILU1 */ - GlobalLU_t *Glu, /* modified */ - double dwork[], /* working space with minimum size last-first+1 */ - int iwork[], /* working space with minimum size m - n, - * used by the second dropping rule */ - int lastc /* if lastc == 0, there is nothing after the - * working supernode [first:last]; - * if lastc == 1, there is one more column after - * the working supernode. */ ) -{ - register int i, j, k, m1; - register int nzlc; /* number of nonzeros in column last+1 */ - register int xlusup_first, xlsub_first; - int m, n; /* m x n is the size of the supernode */ - int r = 0; /* number of dropped rows */ - register double *temp; - register doublecomplex *lusup = Glu->lusup; - register int *lsub = Glu->lsub; - register int *xlsub = Glu->xlsub; - register int *xlusup = Glu->xlusup; - register double d_max = 0.0, d_min = 1.0; - int drop_rule = options->ILU_DropRule; - milu_t milu = options->ILU_MILU; - norm_t nrm = options->ILU_Norm; - doublecomplex zero = {0.0, 0.0}; - doublecomplex one = {1.0, 0.0}; - doublecomplex none = {-1.0, 0.0}; - int inc_diag; /* inc_diag = m + 1 */ - int nzp = 0; /* number of zero pivots */ - - xlusup_first = xlusup[first]; - xlsub_first = xlsub[first]; - m = xlusup[first + 1] - xlusup_first; - n = last - first + 1; - m1 = m - 1; - inc_diag = m + 1; - nzlc = lastc ? (xlusup[last + 2] - xlusup[last + 1]) : 0; - temp = dwork - n; - - /* Quick return if nothing to do. */ - if (m == 0 || m == n || drop_rule == NODROP) - { - *nnzLj += m * n; - return 0; - } - - /* basic dropping: ILU(tau) */ - for (i = n; i <= m1; ) - { - /* the average abs value of ith row */ - switch (nrm) - { - case ONE_NORM: - temp[i] = dzasum_(&n, &lusup[xlusup_first + i], &m) / (double)n; - break; - case TWO_NORM: - temp[i] = dznrm2_(&n, &lusup[xlusup_first + i], &m) - / sqrt((double)n); - break; - case INF_NORM: - default: - k = izamax_(&n, &lusup[xlusup_first + i], &m) - 1; - temp[i] = z_abs1(&lusup[xlusup_first + i + m * k]); - break; - } - - /* drop small entries due to drop_tol */ - if (drop_rule & DROP_BASIC && temp[i] < drop_tol) - { - r++; - /* drop the current row and move the last undropped row here */ - if (r > 1) /* add to last row */ - { - /* accumulate the sum (for MILU) */ - switch (milu) - { - case SMILU_1: - case SMILU_2: - zaxpy_(&n, &one, &lusup[xlusup_first + i], &m, - &lusup[xlusup_first + m - 1], &m); - break; - case SMILU_3: - for (j = 0; j < n; j++) - lusup[xlusup_first + (m - 1) + j * m].r += - z_abs1(&lusup[xlusup_first + i + j * m]); - break; - case SILU: - default: - break; - } - zcopy_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - } /* if (r > 1) */ - else /* move to last row */ - { - zswap_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - if (milu == SMILU_3) - for (j = 0; j < n; j++) { - lusup[xlusup_first + m1 + j * m].r = - z_abs1(&lusup[xlusup_first + m1 + j * m]); - lusup[xlusup_first + m1 + j * m].i = 0.0; - } - } - lsub[xlsub_first + i] = lsub[xlsub_first + m1]; - m1--; - continue; - } /* if dropping */ - else - { - if (temp[i] > d_max) d_max = temp[i]; - if (temp[i] < d_min) d_min = temp[i]; - } - i++; - } /* for */ - - /* Secondary dropping: drop more rows according to the quota. */ - quota = ceil((double)quota / (double)n); - if (drop_rule & DROP_SECONDARY && m - r > quota) - { - register double tol = d_max; - - /* Calculate the second dropping tolerance */ - if (quota > n) - { - if (drop_rule & DROP_INTERP) /* by interpolation */ - { - d_max = 1.0 / d_max; d_min = 1.0 / d_min; - tol = 1.0 / (d_max + (d_min - d_max) * quota / (m - n - r)); - } - else /* by quick sort */ - { - register int *itemp = iwork - n; - A = temp; - for (i = n; i <= m1; i++) itemp[i] = i; - qsort(iwork, m1 - n + 1, sizeof(int), _compare_); - tol = temp[iwork[quota]]; - } - } - - for (i = n; i <= m1; ) - { - if (temp[i] <= tol) - { - register int j; - r++; - /* drop the current row and move the last undropped row here */ - if (r > 1) /* add to last row */ - { - /* accumulate the sum (for MILU) */ - switch (milu) - { - case SMILU_1: - case SMILU_2: - zaxpy_(&n, &one, &lusup[xlusup_first + i], &m, - &lusup[xlusup_first + m - 1], &m); - break; - case SMILU_3: - for (j = 0; j < n; j++) - lusup[xlusup_first + (m - 1) + j * m].r += - z_abs1(&lusup[xlusup_first + i + j * m]); - break; - case SILU: - default: - break; - } - zcopy_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - } /* if (r > 1) */ - else /* move to last row */ - { - zswap_(&n, &lusup[xlusup_first + m1], &m, - &lusup[xlusup_first + i], &m); - if (milu == SMILU_3) - for (j = 0; j < n; j++) { - lusup[xlusup_first + m1 + j * m].r = - z_abs1(&lusup[xlusup_first + m1 + j * m]); - lusup[xlusup_first + m1 + j * m].i = 0.0; - } - } - lsub[xlsub_first + i] = lsub[xlsub_first + m1]; - m1--; - temp[i] = temp[m1]; - - continue; - } - i++; - - } /* for */ - - } /* if secondary dropping */ - - for (i = n; i < m; i++) temp[i] = 0.0; - - if (r == 0) - { - *nnzLj += m * n; - return 0; - } - - /* add dropped entries to the diagnal */ - if (milu != SILU) - { - register int j; - doublecomplex t; - for (j = 0; j < n; j++) - { - zd_mult(&t, &lusup[xlusup_first + (m - 1) + j * m], - MILU_ALPHA); - switch (milu) - { - case SMILU_1: - if ( !(z_eq(&t, &none)) ) { - z_add(&t, &t, &one); - zz_mult(&lusup[xlusup_first + j * inc_diag], - &lusup[xlusup_first + j * inc_diag], - &t); - } - else - { - zd_mult( - &lusup[xlusup_first + j * inc_diag], - &lusup[xlusup_first + j * inc_diag], - *fill_tol); -#ifdef DEBUG - printf("[1] ZERO PIVOT: FILL col %d.\n", first + j); - fflush(stdout); -#endif - nzp++; - } - break; - case SMILU_2: - zd_mult(&lusup[xlusup_first + j * inc_diag], - &lusup[xlusup_first + j * inc_diag], - 1.0 + z_abs1(&t)); - break; - case SMILU_3: - z_add(&t, &t, &one); - zz_mult(&lusup[xlusup_first + j * inc_diag], - &lusup[xlusup_first + j * inc_diag], - &t); - break; - case SILU: - default: - break; - } - } - if (nzp > 0) *fill_tol = -nzp; - } - - /* Remove dropped entries from the memory and fix the pointers. */ - m1 = m - r; - for (j = 1; j < n; j++) - { - register int tmp1, tmp2; - tmp1 = xlusup_first + j * m1; - tmp2 = xlusup_first + j * m; - for (i = 0; i < m1; i++) - lusup[i + tmp1] = lusup[i + tmp2]; - } - for (i = 0; i < nzlc; i++) - lusup[xlusup_first + i + n * m1] = lusup[xlusup_first + i + n * m]; - for (i = 0; i < nzlc; i++) - lsub[xlsub[last + 1] - r + i] = lsub[xlsub[last + 1] + i]; - for (i = first + 1; i <= last + 1; i++) - { - xlusup[i] -= r * (i - first); - xlsub[i] -= r; - } - if (lastc) - { - xlusup[last + 2] -= r * n; - xlsub[last + 2] -= r; - } - - *nnzLj += (m - r) * n; - return r; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zpanel_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zpanel_dfs.c deleted file mode 100644 index 3f5a0819fa..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zpanel_dfs.c +++ /dev/null @@ -1,248 +0,0 @@ - -/*! @file ilu_zpanel_dfs.c - * \brief Peforms a symbolic factorization on a panel of symbols and - * record the entries with maximum absolute value in each column - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   Performs a symbolic factorization on a panel of columns [jcol, jcol+w).
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives.
    - *
    - *   The routine returns one list of the supernodal representatives
    - *   in topological order of the dfs that generates them. This list is
    - *   a superset of the topological order of each individual column within
    - *   the panel.
    - *   The location of the first nonzero in each supernodal segment
    - *   (supernodal entry location) is also returned. Each column has a
    - *   separate list for this purpose.
    - *
    - *   Two marker arrays are used for dfs:
    - *     marker[i] == jj, if i was visited during dfs of current column jj;
    - *     marker1[i] >= jcol, if i was visited by earlier columns in this panel;
    - *
    - *   marker: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - * 
    - */ -void -ilu_zpanel_dfs( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - SuperMatrix *A, /* in - original matrix */ - int *perm_r, /* in */ - int *nseg, /* out */ - doublecomplex *dense, /* out */ - double *amax, /* out - max. abs. value of each column in panel */ - int *panel_lsub, /* out */ - int *segrep, /* out */ - int *repfnz, /* out */ - int *marker, /* out */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ -) -{ - - NCPformat *Astore; - doublecomplex *a; - int *asub; - int *xa_begin, *xa_end; - int krep, chperm, chmark, chrep, oldrep, kchild, myfnz; - int k, krow, kmark, kperm; - int xdfs, maxdfs, kpar; - int jj; /* index through each column in the panel */ - int *marker1; /* marker1[jj] >= jcol if vertex jj was visited - by a previous column within this panel. */ - int *repfnz_col; /* start of each column in the panel */ - doublecomplex *dense_col; /* start of each column in the panel */ - int nextl_col; /* next available position in panel_lsub[*,jj] */ - int *xsup, *supno; - int *lsub, *xlsub; - double *amax_col; - register double tmp; - - /* Initialize pointers */ - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - marker1 = marker + m; - repfnz_col = repfnz; - dense_col = dense; - amax_col = amax; - *nseg = 0; - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - - /* For each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++) { - nextl_col = (jj - jcol) * m; - -#ifdef CHK_DFS - printf("\npanel col %d: ", jj); -#endif - - *amax_col = 0.0; - /* For each nonz in A[*,jj] do dfs */ - for (k = xa_begin[jj]; k < xa_end[jj]; k++) { - krow = asub[k]; - tmp = z_abs1(&a[k]); - if (tmp > *amax_col) *amax_col = tmp; - dense_col[krow] = a[k]; - kmark = marker[krow]; - if ( kmark == jj ) - continue; /* krow visited before, go to the next nonzero */ - - /* For each unmarked nbr krow of jj - * krow is in L: place it in structure of L[*,jj] - */ - marker[krow] = jj; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - panel_lsub[nextl_col++] = krow; /* krow is indexed into A */ - } - /* - * krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - else { - - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz_col[krep]; - -#ifdef CHK_DFS - printf("krep %d, myfnz %d, perm_r[%d] %d\n", krep, myfnz, krow, kperm); -#endif - if ( myfnz != EMPTY ) { /* Representative visited before */ - if ( myfnz > kperm ) repfnz_col[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz_col[krep] = kperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; - -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker[kchild]; - - if ( chmark != jj ) { /* Not reached yet */ - marker[kchild] = jj; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,j] */ - if ( chperm == EMPTY ) { - panel_lsub[nextl_col++] = kchild; - } - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - else { - - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz_col[chrep]; -#ifdef CHK_DFS - printf("chrep %d,myfnz %d,perm_r[%d] %d\n",chrep,myfnz,kchild,chperm); -#endif - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz_col[chrep] = chperm; - } - else { - /* Cont. dfs at snode-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L) */ - parent[krep] = oldrep; - repfnz_col[krep] = chperm; - xdfs = xlsub[xsup[supno[krep]]]; - maxdfs = xlsub[krep + 1]; -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } /* else */ - - } /* else */ - - } /* if... */ - - } /* while xdfs < maxdfs */ - - /* krow has no more unexplored nbrs: - * Place snode-rep krep in postorder DFS, if this - * segment is seen for the first time. (Note that - * "repfnz[krep]" may change later.) - * Backtrack dfs to its parent. - */ - if ( marker1[krep] < jcol ) { - segrep[*nseg] = krep; - ++(*nseg); - marker1[krep] = jj; - } - - kpar = parent[krep]; /* Pop stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xlsub[krep + 1]; - -#ifdef CHK_DFS - printf(" pop stack: krep %d,xdfs %d,maxdfs %d: ", krep,xdfs,maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } while ( kpar != EMPTY ); /* do-while - until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonz in A[*,jj] */ - - repfnz_col += m; /* Move to next column */ - dense_col += m; - amax_col++; - - } /* for jj ... */ - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zpivotL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zpivotL.c deleted file mode 100644 index f44bfbf0b4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zpivotL.c +++ /dev/null @@ -1,282 +0,0 @@ - -/*! @file ilu_zpivotL.c - * \brief Performs numerical pivoting - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - - -#include -#include -#include "slu_zdefs.h" - -#ifndef SGN -#define SGN(x) ((x)>=0?1:-1) -#endif - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Performs the numerical pivoting on the current column of L,
    - *   and the CDIV operation.
    - *
    - *   Pivot policy:
    - *   (1) Compute thresh = u * max_(i>=j) abs(A_ij);
    - *   (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
    - *	     pivot row = k;
    - *	 ELSE IF abs(A_jj) >= thresh THEN
    - *	     pivot row = j;
    - *	 ELSE
    - *	     pivot row = m;
    - *
    - *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
    - *
    - *   Return value: 0	  success;
    - *		   i > 0  U(i,i) is exactly zero.
    - * 
    - */ - -int -ilu_zpivotL( - const int jcol, /* in */ - const double u, /* in - diagonal pivoting threshold */ - int *usepr, /* re-use the pivot sequence given by - * perm_r/iperm_r */ - int *perm_r, /* may be modified */ - int diagind, /* diagonal of Pc*A*Pc' */ - int *swap, /* in/out record the row permutation */ - int *iswap, /* in/out inverse of swap, it is the same as - perm_r after the factorization */ - int *marker, /* in */ - int *pivrow, /* in/out, as an input if *usepr!=0 */ - double fill_tol, /* in - fill tolerance of current column - * used for a singular column */ - milu_t milu, /* in */ - doublecomplex drop_sum, /* in - computed in ilu_zcopy_to_ucol() - (MILU only) */ - GlobalLU_t *Glu, /* modified - global LU data structures */ - SuperLUStat_t *stat /* output */ - ) -{ - - int n; /* number of columns */ - int fsupc; /* first column in the supernode */ - int nsupc; /* no of columns in the supernode */ - int nsupr; /* no of rows in the supernode */ - int lptr; /* points to the starting subscript of the supernode */ - register int pivptr; - int old_pivptr, diag, ptr0; - register double pivmax, rtemp; - double thresh; - doublecomplex temp; - doublecomplex *lu_sup_ptr; - doublecomplex *lu_col_ptr; - int *lsub_ptr; - register int isub, icol, k, itemp; - int *lsub, *xlsub; - doublecomplex *lusup; - int *xlusup; - flops_t *ops = stat->ops; - int info; - doublecomplex one = {1.0, 0.0}; - - /* Initialize pointers */ - n = Glu->n; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; - nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ - lptr = xlsub[fsupc]; - nsupr = xlsub[fsupc+1] - lptr; - lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ - lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ - lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ - - /* Determine the largest abs numerical value for partial pivoting; - Also search for user-specified pivot, and diagonal element. */ - pivmax = -1.0; - pivptr = nsupc; - diag = EMPTY; - old_pivptr = nsupc; - ptr0 = EMPTY; - for (isub = nsupc; isub < nsupr; ++isub) { - if (marker[lsub_ptr[isub]] > jcol) - continue; /* do not overlap with a later relaxed supernode */ - - switch (milu) { - case SMILU_1: - z_add(&temp, &lu_col_ptr[isub], &drop_sum); - rtemp = z_abs1(&temp); - break; - case SMILU_2: - case SMILU_3: - /* In this case, drop_sum contains the sum of the abs. value */ - rtemp = z_abs1(&lu_col_ptr[isub]); - break; - case SILU: - default: - rtemp = z_abs1(&lu_col_ptr[isub]); - break; - } - if (rtemp > pivmax) { pivmax = rtemp; pivptr = isub; } - if (*usepr && lsub_ptr[isub] == *pivrow) old_pivptr = isub; - if (lsub_ptr[isub] == diagind) diag = isub; - if (ptr0 == EMPTY) ptr0 = isub; - } - - if (milu == SMILU_2 || milu == SMILU_3) pivmax += drop_sum.r; - - /* Test for singularity */ - if (pivmax < 0.0) { -#if SCIPY_SPECIFIC_FIX - ABORT("[0]: matrix is singular"); -#else - fprintf(stderr, "[0]: jcol=%d, SINGULAR!!!\n", jcol); - fflush(stderr); - exit(1); -#endif - } - if ( pivmax == 0.0 ) { - if (diag != EMPTY) - *pivrow = lsub_ptr[pivptr = diag]; - else if (ptr0 != EMPTY) - *pivrow = lsub_ptr[pivptr = ptr0]; - else { - /* look for the first row which does not - belong to any later supernodes */ - for (icol = jcol; icol < n; icol++) - if (marker[swap[icol]] <= jcol) break; - if (icol >= n) { -#if SCIPY_SPECIFIC_FIX - ABORT("[1]: matrix is singular"); -#else - fprintf(stderr, "[1]: jcol=%d, SINGULAR!!!\n", jcol); - fflush(stderr); - exit(1); -#endif - } - - *pivrow = swap[icol]; - - /* pick up the pivot row */ - for (isub = nsupc; isub < nsupr; ++isub) - if ( lsub_ptr[isub] == *pivrow ) { pivptr = isub; break; } - } - pivmax = fill_tol; - lu_col_ptr[pivptr].r = pivmax; - lu_col_ptr[pivptr].i = 0.0; - *usepr = 0; -#ifdef DEBUG - printf("[0] ZERO PIVOT: FILL (%d, %d).\n", *pivrow, jcol); - fflush(stdout); -#endif - info =jcol + 1; - } /* if (*pivrow == 0.0) */ - else { - thresh = u * pivmax; - - /* Choose appropriate pivotal element by our policy. */ - if ( *usepr ) { - switch (milu) { - case SMILU_1: - z_add(&temp, &lu_col_ptr[old_pivptr], &drop_sum); - rtemp = z_abs1(&temp); - break; - case SMILU_2: - case SMILU_3: - rtemp = z_abs1(&lu_col_ptr[old_pivptr]) + drop_sum.r; - break; - case SILU: - default: - rtemp = z_abs1(&lu_col_ptr[old_pivptr]); - break; - } - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = old_pivptr; - else *usepr = 0; - } - if ( *usepr == 0 ) { - /* Use diagonal pivot? */ - if ( diag >= 0 ) { /* diagonal exists */ - switch (milu) { - case SMILU_1: - z_add(&temp, &lu_col_ptr[diag], &drop_sum); - rtemp = z_abs1(&temp); - break; - case SMILU_2: - case SMILU_3: - rtemp = z_abs1(&lu_col_ptr[diag]) + drop_sum.r; - break; - case SILU: - default: - rtemp = z_abs1(&lu_col_ptr[diag]); - break; - } - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; - } - *pivrow = lsub_ptr[pivptr]; - } - info = 0; - - /* Reset the diagonal */ - switch (milu) { - case SMILU_1: - z_add(&lu_col_ptr[pivptr], &lu_col_ptr[pivptr], &drop_sum); - break; - case SMILU_2: - case SMILU_3: - temp = z_sgn(&lu_col_ptr[pivptr]); - zz_mult(&temp, &temp, &drop_sum); - z_add(&lu_col_ptr[pivptr], &lu_col_ptr[pivptr], &drop_sum); - break; - case SILU: - default: - break; - } - - } /* else */ - - /* Record pivot row */ - perm_r[*pivrow] = jcol; - if (jcol < n - 1) { - register int t1, t2, t; - t1 = iswap[*pivrow]; t2 = jcol; - if (t1 != t2) { - t = swap[t1]; swap[t1] = swap[t2]; swap[t2] = t; - t1 = swap[t1]; t2 = t; - t = iswap[t1]; iswap[t1] = iswap[t2]; iswap[t2] = t; - } - } /* if (jcol < n - 1) */ - - /* Interchange row subscripts */ - if ( pivptr != nsupc ) { - itemp = lsub_ptr[pivptr]; - lsub_ptr[pivptr] = lsub_ptr[nsupc]; - lsub_ptr[nsupc] = itemp; - - /* Interchange numerical values as well, for the whole snode, such - * that L is indexed the same way as A. - */ - for (icol = 0; icol <= nsupc; icol++) { - itemp = pivptr + icol * nsupr; - temp = lu_sup_ptr[itemp]; - lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; - lu_sup_ptr[nsupc + icol*nsupr] = temp; - } - } /* if */ - - /* cdiv operation */ - ops[FACT] += 10 * (nsupr - nsupc); - z_div(&temp, &one, &lu_col_ptr[nsupc]); - for (k = nsupc+1; k < nsupr; k++) - zz_mult(&lu_col_ptr[k], &lu_col_ptr[k], &temp); - - return info; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zsnode_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zsnode_dfs.c deleted file mode 100644 index e7a357de85..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ilu_zsnode_dfs.c +++ /dev/null @@ -1,90 +0,0 @@ - -/*! @file ilu_zsnode_dfs.c - * \brief Determines the union of row structures of columns within the relaxed node - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    ilu_zsnode_dfs() - Determine the union of the row structures of those
    - *    columns within the relaxed snode.
    - *    Note: The relaxed snodes are leaves of the supernodal etree, therefore,
    - *    the portion outside the rectangular supernode must be zero.
    - *
    - * Return value
    - * ============
    - *     0   success;
    - *    >0   number of bytes allocated when run out of memory.
    - * 
    - */ - -int -ilu_zsnode_dfs( - const int jcol, /* in - start of the supernode */ - const int kcol, /* in - end of the supernode */ - const int *asub, /* in */ - const int *xa_begin, /* in */ - const int *xa_end, /* in */ - int *marker, /* modified */ - GlobalLU_t *Glu /* modified */ - ) -{ - - register int i, k, nextl; - int nsuper, krow, kmark, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - int nzlmax; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - nsuper = ++supno[jcol]; /* Next available supernode number */ - nextl = xlsub[jcol]; - - for (i = jcol; i <= kcol; i++) - { - /* For each nonzero in A[*,i] */ - for (k = xa_begin[i]; k < xa_end[i]; k++) - { - krow = asub[k]; - kmark = marker[krow]; - if ( kmark != kcol ) - { /* First time visit krow */ - marker[krow] = kcol; - lsub[nextl++] = krow; - if ( nextl >= nzlmax ) - { - if ( (mem_error = zLUMemXpand(jcol, nextl, LSUB, &nzlmax, - Glu)) != 0) - return (mem_error); - lsub = Glu->lsub; - } - } - } - supno[i] = nsuper; - } - - /* Supernode > 1 */ - if ( jcol < kcol ) - for (i = jcol+1; i <= kcol; i++) xlsub[i] = nextl; - - xsup[nsuper+1] = kcol + 1; - supno[kcol+1] = nsuper; - xlsub[kcol+1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/izmax1.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/izmax1.c deleted file mode 100644 index d0154abdbc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/izmax1.c +++ /dev/null @@ -1,113 +0,0 @@ -/*! @file izmax1.c - * \brief Finds the index of the element whose real part has maximum absolute value - * - *
    - *     -- LAPACK auxiliary routine (version 2.0) --   
    - *     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
    - *     Courant Institute, Argonne National Lab, and Rice University   
    - *     October 31, 1992   
    - * 
    - */ -#include -#include "slu_dcomplex.h" -#include "slu_Cnames.h" - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    IZMAX1 finds the index of the element whose real part has maximum   
    -    absolute value.   
    -
    -    Based on IZAMAX from Level 1 BLAS.   
    -    The change is to use the 'genuine' absolute value.   
    -
    -    Contributed by Nick Higham for use with ZLACON.   
    -
    -    Arguments   
    -    =========   
    -
    -    N       (input) INT   
    -            The number of elements in the vector CX.   
    -
    -    CX      (input) COMPLEX*16 array, dimension (N)   
    -            The vector whose elements will be summed.   
    -
    -    INCX    (input) INT   
    -            The spacing between successive values of CX.  INCX >= 1.   
    -
    -   ===================================================================== 
    -
    -*/ - -int -izmax1_(int *n, doublecomplex *cx, int *incx) -{ - - - /* System generated locals */ - int ret_val, i__1, i__2; - double d__1; - - /* Local variables */ - double smax; - int i, ix; - -#define CX(I) cx[(I)-1] - - ret_val = 0; - if (*n < 1) { - return ret_val; - } - ret_val = 1; - if (*n == 1) { - return ret_val; - } - if (*incx == 1) { - goto L30; - } - -/* CODE FOR INCREMENT NOT EQUAL TO 1 */ - - ix = 1; - smax = (d__1 = CX(1).r, fabs(d__1)); - ix += *incx; - i__1 = *n; - for (i = 2; i <= *n; ++i) { - i__2 = ix; - if ((d__1 = CX(ix).r, fabs(d__1)) <= smax) { - goto L10; - } - ret_val = i; - i__2 = ix; - smax = (d__1 = CX(ix).r, fabs(d__1)); -L10: - ix += *incx; -/* L20: */ - } - return ret_val; - -/* CODE FOR INCREMENT EQUAL TO 1 */ - -L30: - smax = (d__1 = CX(1).r, fabs(d__1)); - i__1 = *n; - for (i = 2; i <= *n; ++i) { - i__2 = i; - if ((d__1 = CX(i).r, fabs(d__1)) <= smax) { - goto L40; - } - ret_val = i; - i__2 = i; - smax = (d__1 = CX(i).r, fabs(d__1)); -L40: - ; - } - return ret_val; - -/* End of IZMAX1 */ - -} /* izmax1_ */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/lsame.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/lsame.c deleted file mode 100644 index 073a551173..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/lsame.c +++ /dev/null @@ -1,83 +0,0 @@ -/*! @file lsame.c - * \brief Check if CA is the same letter as CB regardless of case. - * - *
    - * -- LAPACK auxiliary routine (version 2.0) --   
    - *      Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
    - *      Courant Institute, Argonne National Lab, and Rice University   
    - *      September 30, 1994   
    - * 
    - */ -#include "slu_Cnames.h" - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    LSAME returns .TRUE. if CA is the same letter as CB regardless of case.   
    -
    -    Arguments   
    -    =========   
    -
    -    CA      (input) CHARACTER*1   
    -    CB      (input) CHARACTER*1   
    -            CA and CB specify the single characters to be compared.   
    -
    -   ===================================================================== 
    -
    -*/ - -int lsame_(char *ca, char *cb) -{ - - - /* System generated locals */ - int ret_val; - - /* Local variables */ - int inta, intb, zcode; - - ret_val = *(unsigned char *)ca == *(unsigned char *)cb; - if (ret_val) { - return ret_val; - } - - /* Now test for equivalence if both characters are alphabetic. */ - - zcode = 'Z'; - - /* Use 'Z' rather than 'A' so that ASCII can be detected on Prime - machines, on which ICHAR returns a value with bit 8 set. - ICHAR('A') on Prime machines returns 193 which is the same as - ICHAR('A') on an EBCDIC machine. */ - - inta = *(unsigned char *)ca; - intb = *(unsigned char *)cb; - - if (zcode == 90 || zcode == 122) { - /* ASCII is assumed - ZCODE is the ASCII code of either lower or - upper case 'Z'. */ - if (inta >= 97 && inta <= 122) inta += -32; - if (intb >= 97 && intb <= 122) intb += -32; - - } else if (zcode == 233 || zcode == 169) { - /* EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or - upper case 'Z'. */ - if (inta >= 129 && inta <= 137 || inta >= 145 && inta <= 153 || inta - >= 162 && inta <= 169) - inta += 64; - if (intb >= 129 && intb <= 137 || intb >= 145 && intb <= 153 || intb - >= 162 && intb <= 169) - intb += 64; - } else if (zcode == 218 || zcode == 250) { - /* ASCII is assumed, on Prime machines - ZCODE is the ASCII code - plus 128 of either lower or upper case 'Z'. */ - if (inta >= 225 && inta <= 250) inta += -32; - if (intb >= 225 && intb <= 250) intb += -32; - } - ret_val = inta == intb; - return ret_val; - -} /* lsame_ */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/mark_relax.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/mark_relax.c deleted file mode 100644 index b2338aa85e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/mark_relax.c +++ /dev/null @@ -1,47 +0,0 @@ -/*! @file mark_relax.c - * \brief Record the rows pivoted by the relaxed supernodes. - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 1, 2009
    - * <\pre>
    - */
    -#include "slu_ddefs.h"
    -
    -/*! \brief
    - *
    - * 
    - * Purpose
    - * =======
    - *    mark_relax() - record the rows used by the relaxed supernodes.
    - * 
    - */ -int mark_relax( - int n, /* order of the matrix A */ - int *relax_end, /* last column in a relaxed supernode. - * if j-th column starts a relaxed supernode, - * relax_end[j] represents the last column of - * this supernode. */ - int *relax_fsupc, /* first column in a relaxed supernode. - * relax_fsupc[j] represents the first column of - * j-th supernode. */ - int *xa_begin, /* Astore->colbeg */ - int *xa_end, /* Astore->colend */ - int *asub, /* row index of A */ - int *marker /* marker[j] is the maximum column index if j-th - * row belongs to a relaxed supernode. */ ) -{ - register int jcol, kcol; - register int i, j, k; - - for (i = 0; i < n && relax_fsupc[i] != EMPTY; i++) - { - jcol = relax_fsupc[i]; /* first column */ - kcol = relax_end[jcol]; /* last column */ - for (j = jcol; j <= kcol; j++) - for (k = xa_begin[j]; k < xa_end[j]; k++) - marker[asub[k]] = jcol; - } - return i; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/memory.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/memory.c deleted file mode 100644 index 6706784f59..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/memory.c +++ /dev/null @@ -1,210 +0,0 @@ -/*! @file memory.c - * \brief Precision-independent memory-related routines - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -/** Precision-independent memory-related routines. - (Shared by [sdcz]memory.c) **/ - -#include "slu_ddefs.h" - - -#if ( DEBUGlevel>=1 ) /* Debug malloc/free. */ -int superlu_malloc_total = 0; - -#define PAD_FACTOR 2 -#define DWORD (sizeof(double)) /* Be sure it's no smaller than double. */ -/* size_t is usually defined as 'unsigned long' */ - -void *superlu_malloc(size_t size) -{ - char *buf; - - buf = (char *) malloc(size + DWORD); - if ( !buf ) { - printf("superlu_malloc fails: malloc_total %.0f MB, size %ld\n", - superlu_malloc_total*1e-6, size); - ABORT("superlu_malloc: out of memory"); - } - - ((int_t *) buf)[0] = size; -#if 0 - superlu_malloc_total += size + DWORD; -#else - superlu_malloc_total += size; -#endif - return (void *) (buf + DWORD); -} - -void superlu_free(void *addr) -{ - char *p = ((char *) addr) - DWORD; - - if ( !addr ) - ABORT("superlu_free: tried to free NULL pointer"); - - if ( !p ) - ABORT("superlu_free: tried to free NULL+DWORD pointer"); - - { - int_t n = ((int_t *) p)[0]; - - if ( !n ) - ABORT("superlu_free: tried to free a freed pointer"); - *((int_t *) p) = 0; /* Set to zero to detect duplicate free's. */ -#if 0 - superlu_malloc_total -= (n + DWORD); -#else - superlu_malloc_total -= n; -#endif - - if ( superlu_malloc_total < 0 ) - ABORT("superlu_malloc_total went negative!"); - - /*free (addr);*/ - free (p); - } - -} - -#else /* production mode */ - -void *superlu_malloc(size_t size) -{ - void *buf; - buf = (void *) malloc(size); - return (buf); -} - -void superlu_free(void *addr) -{ - free (addr); -} - -#endif - - -/*! \brief Set up pointers for integer working arrays. - */ -void -SetIWork(int m, int n, int panel_size, int *iworkptr, int **segrep, - int **parent, int **xplore, int **repfnz, int **panel_lsub, - int **xprune, int **marker) -{ - *segrep = iworkptr; - *parent = iworkptr + m; - *xplore = *parent + m; - *repfnz = *xplore + m; - *panel_lsub = *repfnz + panel_size * m; - *xprune = *panel_lsub + panel_size * m; - *marker = *xprune + n; - ifill (*repfnz, m * panel_size, EMPTY); - ifill (*panel_lsub, m * panel_size, EMPTY); -} - - -void -copy_mem_int(int howmany, void *old, void *new) -{ - register int i; - int *iold = old; - int *inew = new; - for (i = 0; i < howmany; i++) inew[i] = iold[i]; -} - - -void -user_bcopy(char *src, char *dest, int bytes) -{ - char *s_ptr, *d_ptr; - - s_ptr = src + bytes - 1; - d_ptr = dest + bytes - 1; - for (; d_ptr >= dest; --s_ptr, --d_ptr ) *d_ptr = *s_ptr; -} - - - -int *intMalloc(int n) -{ - int *buf; - buf = (int *) SUPERLU_MALLOC(n * sizeof(int)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC fails for buf in intMalloc()"); - } - return (buf); -} - -int *intCalloc(int n) -{ - int *buf; - register int i; - buf = (int *) SUPERLU_MALLOC(n * sizeof(int)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC fails for buf in intCalloc()"); - } - for (i = 0; i < n; ++i) buf[i] = 0; - return (buf); -} - - - -#if 0 -check_expanders() -{ - int p; - printf("Check expanders:\n"); - for (p = 0; p < NO_MEMTYPE; p++) { - printf("type %d, size %d, mem %d\n", - p, expanders[p].size, (int)expanders[p].mem); - } - - return 0; -} - - -StackInfo() -{ - printf("Stack: size %d, used %d, top1 %d, top2 %d\n", - stack.size, stack.used, stack.top1, stack.top2); - return 0; -} - - - -PrintStack(char *msg, GlobalLU_t *Glu) -{ - int i; - int *xlsub, *lsub, *xusub, *usub; - - xlsub = Glu->xlsub; - lsub = Glu->lsub; - xusub = Glu->xusub; - usub = Glu->usub; - - printf("%s\n", msg); - -/* printf("\nUCOL: "); - for (i = 0; i < xusub[ndim]; ++i) - printf("%f ", ucol[i]); - - printf("\nLSUB: "); - for (i = 0; i < xlsub[ndim]; ++i) - printf("%d ", lsub[i]); - - printf("\nUSUB: "); - for (i = 0; i < xusub[ndim]; ++i) - printf("%d ", usub[i]); - - printf("\n");*/ - return 0; -} -#endif - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/mmd.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/mmd.c deleted file mode 100644 index 05f26ce099..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/mmd.c +++ /dev/null @@ -1,1012 +0,0 @@ - -typedef int shortint; - -/* *************************************************************** */ -/* *************************************************************** */ -/* **** GENMMD ..... MULTIPLE MINIMUM EXTERNAL DEGREE **** */ -/* *************************************************************** */ -/* *************************************************************** */ - -/* AUTHOR - JOSEPH W.H. LIU */ -/* DEPT OF COMPUTER SCIENCE, YORK UNIVERSITY. */ - -/* PURPOSE - THIS ROUTINE IMPLEMENTS THE MINIMUM DEGREE */ -/* ALGORITHM. IT MAKES USE OF THE IMPLICIT REPRESENTATION */ -/* OF ELIMINATION GRAPHS BY QUOTIENT GRAPHS, AND THE */ -/* NOTION OF INDISTINGUISHABLE NODES. IT ALSO IMPLEMENTS */ -/* THE MODIFICATIONS BY MULTIPLE ELIMINATION AND MINIMUM */ -/* EXTERNAL DEGREE. */ -/* --------------------------------------------- */ -/* CAUTION - THE ADJACENCY VECTOR ADJNCY WILL BE */ -/* DESTROYED. */ -/* --------------------------------------------- */ - -/* INPUT PARAMETERS - */ -/* NEQNS - NUMBER OF EQUATIONS. */ -/* (XADJ,ADJNCY) - THE ADJACENCY STRUCTURE. */ -/* DELTA - TOLERANCE VALUE FOR MULTIPLE ELIMINATION. */ -/* MAXINT - MAXIMUM MACHINE REPRESENTABLE (SHORT) INTEGER */ -/* (ANY SMALLER ESTIMATE WILL DO) FOR MARKING */ -/* NODES. */ - -/* OUTPUT PARAMETERS - */ -/* PERM - THE MINIMUM DEGREE ORDERING. */ -/* INVP - THE INVERSE OF PERM. */ -/* NOFSUB - AN UPPER BOUND ON THE NUMBER OF NONZERO */ -/* SUBSCRIPTS FOR THE COMPRESSED STORAGE SCHEME. */ - -/* WORKING PARAMETERS - */ -/* DHEAD - VECTOR FOR HEAD OF DEGREE LISTS. */ -/* INVP - USED TEMPORARILY FOR DEGREE FORWARD LINK. */ -/* PERM - USED TEMPORARILY FOR DEGREE BACKWARD LINK. */ -/* QSIZE - VECTOR FOR SIZE OF SUPERNODES. */ -/* LLIST - VECTOR FOR TEMPORARY LINKED LISTS. */ -/* MARKER - A TEMPORARY MARKER VECTOR. */ - -/* PROGRAM SUBROUTINES - */ -/* MMDELM, MMDINT, MMDNUM, MMDUPD. */ - -/* *************************************************************** */ - -/* Subroutine */ int genmmd_(int *neqns, int *xadj, shortint *adjncy, - shortint *invp, shortint *perm, int *delta, shortint *dhead, - shortint *qsize, shortint *llist, shortint *marker, int *maxint, - int *nofsub) -{ - /* System generated locals */ - int i__1; - - /* Local variables */ - static int mdeg, ehead, i, mdlmt, mdnode; - extern /* Subroutine */ int mmdelm_(int *, int *, shortint *, - shortint *, shortint *, shortint *, shortint *, shortint *, - shortint *, int *, int *), mmdupd_(int *, int *, - int *, shortint *, int *, int *, shortint *, shortint - *, shortint *, shortint *, shortint *, shortint *, int *, - int *), mmdint_(int *, int *, shortint *, shortint *, - shortint *, shortint *, shortint *, shortint *, shortint *), - mmdnum_(int *, shortint *, shortint *, shortint *); - static int nextmd, tag, num; - - -/* *************************************************************** */ - - -/* *************************************************************** */ - - /* Parameter adjustments */ - --marker; - --llist; - --qsize; - --dhead; - --perm; - --invp; - --adjncy; - --xadj; - - /* Function Body */ - if (*neqns <= 0) { - return 0; - } - -/* ------------------------------------------------ */ -/* INITIALIZATION FOR THE MINIMUM DEGREE ALGORITHM. */ -/* ------------------------------------------------ */ - *nofsub = 0; - mmdint_(neqns, &xadj[1], &adjncy[1], &dhead[1], &invp[1], &perm[1], & - qsize[1], &llist[1], &marker[1]); - -/* ---------------------------------------------- */ -/* NUM COUNTS THE NUMBER OF ORDERED NODES PLUS 1. */ -/* ---------------------------------------------- */ - num = 1; - -/* ----------------------------- */ -/* ELIMINATE ALL ISOLATED NODES. */ -/* ----------------------------- */ - nextmd = dhead[1]; -L100: - if (nextmd <= 0) { - goto L200; - } - mdnode = nextmd; - nextmd = invp[mdnode]; - marker[mdnode] = *maxint; - invp[mdnode] = -num; - ++num; - goto L100; - -L200: -/* ---------------------------------------- */ -/* SEARCH FOR NODE OF THE MINIMUM DEGREE. */ -/* MDEG IS THE CURRENT MINIMUM DEGREE; */ -/* TAG IS USED TO FACILITATE MARKING NODES. */ -/* ---------------------------------------- */ - if (num > *neqns) { - goto L1000; - } - tag = 1; - dhead[1] = 0; - mdeg = 2; -L300: - if (dhead[mdeg] > 0) { - goto L400; - } - ++mdeg; - goto L300; -L400: -/* ------------------------------------------------- */ -/* USE VALUE OF DELTA TO SET UP MDLMT, WHICH GOVERNS */ -/* WHEN A DEGREE UPDATE IS TO BE PERFORMED. */ -/* ------------------------------------------------- */ - mdlmt = mdeg + *delta; - ehead = 0; - -L500: - mdnode = dhead[mdeg]; - if (mdnode > 0) { - goto L600; - } - ++mdeg; - if (mdeg > mdlmt) { - goto L900; - } - goto L500; -L600: -/* ---------------------------------------- */ -/* REMOVE MDNODE FROM THE DEGREE STRUCTURE. */ -/* ---------------------------------------- */ - nextmd = invp[mdnode]; - dhead[mdeg] = nextmd; - if (nextmd > 0) { - perm[nextmd] = -mdeg; - } - invp[mdnode] = -num; - *nofsub = *nofsub + mdeg + qsize[mdnode] - 2; - if (num + qsize[mdnode] > *neqns) { - goto L1000; - } -/* ---------------------------------------------- */ -/* ELIMINATE MDNODE AND PERFORM QUOTIENT GRAPH */ -/* TRANSFORMATION. RESET TAG VALUE IF NECESSARY. */ -/* ---------------------------------------------- */ - ++tag; - if (tag < *maxint) { - goto L800; - } - tag = 1; - i__1 = *neqns; - for (i = 1; i <= i__1; ++i) { - if (marker[i] < *maxint) { - marker[i] = 0; - } -/* L700: */ - } -L800: - mmdelm_(&mdnode, &xadj[1], &adjncy[1], &dhead[1], &invp[1], &perm[1], & - qsize[1], &llist[1], &marker[1], maxint, &tag); - num += qsize[mdnode]; - llist[mdnode] = ehead; - ehead = mdnode; - if (*delta >= 0) { - goto L500; - } -L900: -/* ------------------------------------------- */ -/* UPDATE DEGREES OF THE NODES INVOLVED IN THE */ -/* MINIMUM DEGREE NODES ELIMINATION. */ -/* ------------------------------------------- */ - if (num > *neqns) { - goto L1000; - } - mmdupd_(&ehead, neqns, &xadj[1], &adjncy[1], delta, &mdeg, &dhead[1], & - invp[1], &perm[1], &qsize[1], &llist[1], &marker[1], maxint, &tag) - ; - goto L300; - -L1000: - mmdnum_(neqns, &perm[1], &invp[1], &qsize[1]); - return 0; - -} /* genmmd_ */ - -/* *************************************************************** */ -/* *************************************************************** */ -/* *** MMDINT ..... MULT MINIMUM DEGREE INITIALIZATION *** */ -/* *************************************************************** */ -/* *************************************************************** */ - -/* AUTHOR - JOSEPH W.H. LIU */ -/* DEPT OF COMPUTER SCIENCE, YORK UNIVERSITY. */ - -/* PURPOSE - THIS ROUTINE PERFORMS INITIALIZATION FOR THE */ -/* MULTIPLE ELIMINATION VERSION OF THE MINIMUM DEGREE */ -/* ALGORITHM. */ - -/* INPUT PARAMETERS - */ -/* NEQNS - NUMBER OF EQUATIONS. */ -/* (XADJ,ADJNCY) - ADJACENCY STRUCTURE. */ - -/* OUTPUT PARAMETERS - */ -/* (DHEAD,DFORW,DBAKW) - DEGREE DOUBLY LINKED STRUCTURE. */ -/* QSIZE - SIZE OF SUPERNODE (INITIALIZED TO ONE). */ -/* LLIST - LINKED LIST. */ -/* MARKER - MARKER VECTOR. */ - -/* *************************************************************** */ - -/* Subroutine */ int mmdint_(int *neqns, int *xadj, shortint *adjncy, - shortint *dhead, shortint *dforw, shortint *dbakw, shortint *qsize, - shortint *llist, shortint *marker) -{ - /* System generated locals */ - int i__1; - - /* Local variables */ - static int ndeg, node, fnode; - - -/* *************************************************************** */ - - -/* *************************************************************** */ - - /* Parameter adjustments */ - --marker; - --llist; - --qsize; - --dbakw; - --dforw; - --dhead; - --adjncy; - --xadj; - - /* Function Body */ - i__1 = *neqns; - for (node = 1; node <= i__1; ++node) { - dhead[node] = 0; - qsize[node] = 1; - marker[node] = 0; - llist[node] = 0; -/* L100: */ - } -/* ------------------------------------------ */ -/* INITIALIZE THE DEGREE DOUBLY LINKED LISTS. */ -/* ------------------------------------------ */ - i__1 = *neqns; - for (node = 1; node <= i__1; ++node) { - ndeg = xadj[node + 1] - xadj[node] + 1; - fnode = dhead[ndeg]; - dforw[node] = fnode; - dhead[ndeg] = node; - if (fnode > 0) { - dbakw[fnode] = node; - } - dbakw[node] = -ndeg; -/* L200: */ - } - return 0; - -} /* mmdint_ */ - -/* *************************************************************** */ -/* *************************************************************** */ -/* ** MMDELM ..... MULTIPLE MINIMUM DEGREE ELIMINATION *** */ -/* *************************************************************** */ -/* *************************************************************** */ - -/* AUTHOR - JOSEPH W.H. LIU */ -/* DEPT OF COMPUTER SCIENCE, YORK UNIVERSITY. */ - -/* PURPOSE - THIS ROUTINE ELIMINATES THE NODE MDNODE OF */ -/* MINIMUM DEGREE FROM THE ADJACENCY STRUCTURE, WHICH */ -/* IS STORED IN THE QUOTIENT GRAPH FORMAT. IT ALSO */ -/* TRANSFORMS THE QUOTIENT GRAPH REPRESENTATION OF THE */ -/* ELIMINATION GRAPH. */ - -/* INPUT PARAMETERS - */ -/* MDNODE - NODE OF MINIMUM DEGREE. */ -/* MAXINT - ESTIMATE OF MAXIMUM REPRESENTABLE (SHORT) */ -/* INT. */ -/* TAG - TAG VALUE. */ - -/* UPDATED PARAMETERS - */ -/* (XADJ,ADJNCY) - UPDATED ADJACENCY STRUCTURE. */ -/* (DHEAD,DFORW,DBAKW) - DEGREE DOUBLY LINKED STRUCTURE. */ -/* QSIZE - SIZE OF SUPERNODE. */ -/* MARKER - MARKER VECTOR. */ -/* LLIST - TEMPORARY LINKED LIST OF ELIMINATED NABORS. */ - -/* *************************************************************** */ - -/* Subroutine */ int mmdelm_(int *mdnode, int *xadj, shortint *adjncy, - shortint *dhead, shortint *dforw, shortint *dbakw, shortint *qsize, - shortint *llist, shortint *marker, int *maxint, int *tag) -{ - /* System generated locals */ - int i__1, i__2; - - /* Local variables */ - static int node, link, rloc, rlmt, i, j, nabor, rnode, elmnt, xqnbr, - istop, jstop, istrt, jstrt, nxnode, pvnode, nqnbrs, npv; - - -/* *************************************************************** */ - - -/* *************************************************************** */ - -/* ----------------------------------------------- */ -/* FIND REACHABLE SET AND PLACE IN DATA STRUCTURE. */ -/* ----------------------------------------------- */ - /* Parameter adjustments */ - --marker; - --llist; - --qsize; - --dbakw; - --dforw; - --dhead; - --adjncy; - --xadj; - - /* Function Body */ - marker[*mdnode] = *tag; - istrt = xadj[*mdnode]; - istop = xadj[*mdnode + 1] - 1; -/* ------------------------------------------------------- */ -/* ELMNT POINTS TO THE BEGINNING OF THE LIST OF ELIMINATED */ -/* NABORS OF MDNODE, AND RLOC GIVES THE STORAGE LOCATION */ -/* FOR THE NEXT REACHABLE NODE. */ -/* ------------------------------------------------------- */ - elmnt = 0; - rloc = istrt; - rlmt = istop; - i__1 = istop; - for (i = istrt; i <= i__1; ++i) { - nabor = adjncy[i]; - if (nabor == 0) { - goto L300; - } - if (marker[nabor] >= *tag) { - goto L200; - } - marker[nabor] = *tag; - if (dforw[nabor] < 0) { - goto L100; - } - adjncy[rloc] = nabor; - ++rloc; - goto L200; -L100: - llist[nabor] = elmnt; - elmnt = nabor; -L200: - ; - } -L300: -/* ----------------------------------------------------- */ -/* MERGE WITH REACHABLE NODES FROM GENERALIZED ELEMENTS. */ -/* ----------------------------------------------------- */ - if (elmnt <= 0) { - goto L1000; - } - adjncy[rlmt] = -elmnt; - link = elmnt; -L400: - jstrt = xadj[link]; - jstop = xadj[link + 1] - 1; - i__1 = jstop; - for (j = jstrt; j <= i__1; ++j) { - node = adjncy[j]; - link = -node; - if (node < 0) { - goto L400; - } else if (node == 0) { - goto L900; - } else { - goto L500; - } -L500: - if (marker[node] >= *tag || dforw[node] < 0) { - goto L800; - } - marker[node] = *tag; -/* --------------------------------- */ -/* USE STORAGE FROM ELIMINATED NODES */ -/* IF NECESSARY. */ -/* --------------------------------- */ -L600: - if (rloc < rlmt) { - goto L700; - } - link = -adjncy[rlmt]; - rloc = xadj[link]; - rlmt = xadj[link + 1] - 1; - goto L600; -L700: - adjncy[rloc] = node; - ++rloc; -L800: - ; - } -L900: - elmnt = llist[elmnt]; - goto L300; -L1000: - if (rloc <= rlmt) { - adjncy[rloc] = 0; - } -/* -------------------------------------------------------- */ -/* FOR EACH NODE IN THE REACHABLE SET, DO THE FOLLOWING ... */ -/* -------------------------------------------------------- */ - link = *mdnode; -L1100: - istrt = xadj[link]; - istop = xadj[link + 1] - 1; - i__1 = istop; - for (i = istrt; i <= i__1; ++i) { - rnode = adjncy[i]; - link = -rnode; - if (rnode < 0) { - goto L1100; - } else if (rnode == 0) { - goto L1800; - } else { - goto L1200; - } -L1200: -/* -------------------------------------------- */ -/* IF RNODE IS IN THE DEGREE LIST STRUCTURE ... */ -/* -------------------------------------------- */ - pvnode = dbakw[rnode]; - if (pvnode == 0 || pvnode == -(*maxint)) { - goto L1300; - } -/* ------------------------------------- */ -/* THEN REMOVE RNODE FROM THE STRUCTURE. */ -/* ------------------------------------- */ - nxnode = dforw[rnode]; - if (nxnode > 0) { - dbakw[nxnode] = pvnode; - } - if (pvnode > 0) { - dforw[pvnode] = nxnode; - } - npv = -pvnode; - if (pvnode < 0) { - dhead[npv] = nxnode; - } -L1300: -/* ---------------------------------------- */ -/* PURGE INACTIVE QUOTIENT NABORS OF RNODE. */ -/* ---------------------------------------- */ - jstrt = xadj[rnode]; - jstop = xadj[rnode + 1] - 1; - xqnbr = jstrt; - i__2 = jstop; - for (j = jstrt; j <= i__2; ++j) { - nabor = adjncy[j]; - if (nabor == 0) { - goto L1500; - } - if (marker[nabor] >= *tag) { - goto L1400; - } - adjncy[xqnbr] = nabor; - ++xqnbr; -L1400: - ; - } -L1500: -/* ---------------------------------------- */ -/* IF NO ACTIVE NABOR AFTER THE PURGING ... */ -/* ---------------------------------------- */ - nqnbrs = xqnbr - jstrt; - if (nqnbrs > 0) { - goto L1600; - } -/* ----------------------------- */ -/* THEN MERGE RNODE WITH MDNODE. */ -/* ----------------------------- */ - qsize[*mdnode] += qsize[rnode]; - qsize[rnode] = 0; - marker[rnode] = *maxint; - dforw[rnode] = -(*mdnode); - dbakw[rnode] = -(*maxint); - goto L1700; -L1600: -/* -------------------------------------- */ -/* ELSE FLAG RNODE FOR DEGREE UPDATE, AND */ -/* ADD MDNODE AS A NABOR OF RNODE. */ -/* -------------------------------------- */ - dforw[rnode] = nqnbrs + 1; - dbakw[rnode] = 0; - adjncy[xqnbr] = *mdnode; - ++xqnbr; - if (xqnbr <= jstop) { - adjncy[xqnbr] = 0; - } - -L1700: - ; - } -L1800: - return 0; - -} /* mmdelm_ */ - -/* *************************************************************** */ -/* *************************************************************** */ -/* ***** MMDUPD ..... MULTIPLE MINIMUM DEGREE UPDATE ***** */ -/* *************************************************************** */ -/* *************************************************************** */ - -/* AUTHOR - JOSEPH W.H. LIU */ -/* DEPT OF COMPUTER SCIENCE, YORK UNIVERSITY. */ - -/* PURPOSE - THIS ROUTINE UPDATES THE DEGREES OF NODES */ -/* AFTER A MULTIPLE ELIMINATION STEP. */ - -/* INPUT PARAMETERS - */ -/* EHEAD - THE BEGINNING OF THE LIST OF ELIMINATED */ -/* NODES (I.E., NEWLY FORMED ELEMENTS). */ -/* NEQNS - NUMBER OF EQUATIONS. */ -/* (XADJ,ADJNCY) - ADJACENCY STRUCTURE. */ -/* DELTA - TOLERANCE VALUE FOR MULTIPLE ELIMINATION. */ -/* MAXINT - MAXIMUM MACHINE REPRESENTABLE (SHORT) */ -/* INTEGER. */ - -/* UPDATED PARAMETERS - */ -/* MDEG - NEW MINIMUM DEGREE AFTER DEGREE UPDATE. */ -/* (DHEAD,DFORW,DBAKW) - DEGREE DOUBLY LINKED STRUCTURE. */ -/* QSIZE - SIZE OF SUPERNODE. */ -/* LLIST - WORKING LINKED LIST. */ -/* MARKER - MARKER VECTOR FOR DEGREE UPDATE. */ -/* TAG - TAG VALUE. */ - -/* *************************************************************** */ - -/* Subroutine */ int mmdupd_(int *ehead, int *neqns, int *xadj, - shortint *adjncy, int *delta, int *mdeg, shortint *dhead, - shortint *dforw, shortint *dbakw, shortint *qsize, shortint *llist, - shortint *marker, int *maxint, int *tag) -{ - /* System generated locals */ - int i__1, i__2; - - /* Local variables */ - static int node, mtag, link, mdeg0, i, j, enode, fnode, nabor, elmnt, - istop, jstop, q2head, istrt, jstrt, qxhead, iq2, deg, deg0; - - -/* *************************************************************** */ - - -/* *************************************************************** */ - - /* Parameter adjustments */ - --marker; - --llist; - --qsize; - --dbakw; - --dforw; - --dhead; - --adjncy; - --xadj; - - /* Function Body */ - mdeg0 = *mdeg + *delta; - elmnt = *ehead; -L100: -/* ------------------------------------------------------- */ -/* FOR EACH OF THE NEWLY FORMED ELEMENT, DO THE FOLLOWING. */ -/* (RESET TAG VALUE IF NECESSARY.) */ -/* ------------------------------------------------------- */ - if (elmnt <= 0) { - return 0; - } - mtag = *tag + mdeg0; - if (mtag < *maxint) { - goto L300; - } - *tag = 1; - i__1 = *neqns; - for (i = 1; i <= i__1; ++i) { - if (marker[i] < *maxint) { - marker[i] = 0; - } -/* L200: */ - } - mtag = *tag + mdeg0; -L300: -/* --------------------------------------------- */ -/* CREATE TWO LINKED LISTS FROM NODES ASSOCIATED */ -/* WITH ELMNT: ONE WITH TWO NABORS (Q2HEAD) IN */ -/* ADJACENCY STRUCTURE, AND THE OTHER WITH MORE */ -/* THAN TWO NABORS (QXHEAD). ALSO COMPUTE DEG0, */ -/* NUMBER OF NODES IN THIS ELEMENT. */ -/* --------------------------------------------- */ - q2head = 0; - qxhead = 0; - deg0 = 0; - link = elmnt; -L400: - istrt = xadj[link]; - istop = xadj[link + 1] - 1; - i__1 = istop; - for (i = istrt; i <= i__1; ++i) { - enode = adjncy[i]; - link = -enode; - if (enode < 0) { - goto L400; - } else if (enode == 0) { - goto L800; - } else { - goto L500; - } - -L500: - if (qsize[enode] == 0) { - goto L700; - } - deg0 += qsize[enode]; - marker[enode] = mtag; -/* ---------------------------------- */ -/* IF ENODE REQUIRES A DEGREE UPDATE, */ -/* THEN DO THE FOLLOWING. */ -/* ---------------------------------- */ - if (dbakw[enode] != 0) { - goto L700; - } -/* --------------------------------------- -*/ -/* PLACE EITHER IN QXHEAD OR Q2HEAD LISTS. -*/ -/* --------------------------------------- -*/ - if (dforw[enode] == 2) { - goto L600; - } - llist[enode] = qxhead; - qxhead = enode; - goto L700; -L600: - llist[enode] = q2head; - q2head = enode; -L700: - ; - } -L800: -/* -------------------------------------------- */ -/* FOR EACH ENODE IN Q2 LIST, DO THE FOLLOWING. */ -/* -------------------------------------------- */ - enode = q2head; - iq2 = 1; -L900: - if (enode <= 0) { - goto L1500; - } - if (dbakw[enode] != 0) { - goto L2200; - } - ++(*tag); - deg = deg0; -/* ------------------------------------------ */ -/* IDENTIFY THE OTHER ADJACENT ELEMENT NABOR. */ -/* ------------------------------------------ */ - istrt = xadj[enode]; - nabor = adjncy[istrt]; - if (nabor == elmnt) { - nabor = adjncy[istrt + 1]; - } -/* ------------------------------------------------ */ -/* IF NABOR IS UNELIMINATED, INCREASE DEGREE COUNT. */ -/* ------------------------------------------------ */ - link = nabor; - if (dforw[nabor] < 0) { - goto L1000; - } - deg += qsize[nabor]; - goto L2100; -L1000: -/* -------------------------------------------- */ -/* OTHERWISE, FOR EACH NODE IN THE 2ND ELEMENT, */ -/* DO THE FOLLOWING. */ -/* -------------------------------------------- */ - istrt = xadj[link]; - istop = xadj[link + 1] - 1; - i__1 = istop; - for (i = istrt; i <= i__1; ++i) { - node = adjncy[i]; - link = -node; - if (node == enode) { - goto L1400; - } - if (node < 0) { - goto L1000; - } else if (node == 0) { - goto L2100; - } else { - goto L1100; - } - -L1100: - if (qsize[node] == 0) { - goto L1400; - } - if (marker[node] >= *tag) { - goto L1200; - } -/* ----------------------------------- --- */ -/* CASE WHEN NODE IS NOT YET CONSIDERED -. */ -/* ----------------------------------- --- */ - marker[node] = *tag; - deg += qsize[node]; - goto L1400; -L1200: -/* ---------------------------------------- - */ -/* CASE WHEN NODE IS INDISTINGUISHABLE FROM - */ -/* ENODE. MERGE THEM INTO A NEW SUPERNODE. - */ -/* ---------------------------------------- - */ - if (dbakw[node] != 0) { - goto L1400; - } - if (dforw[node] != 2) { - goto L1300; - } - qsize[enode] += qsize[node]; - qsize[node] = 0; - marker[node] = *maxint; - dforw[node] = -enode; - dbakw[node] = -(*maxint); - goto L1400; -L1300: -/* -------------------------------------- -*/ -/* CASE WHEN NODE IS OUTMATCHED BY ENODE. -*/ -/* -------------------------------------- -*/ - if (dbakw[node] == 0) { - dbakw[node] = -(*maxint); - } -L1400: - ; - } - goto L2100; -L1500: -/* ------------------------------------------------ */ -/* FOR EACH ENODE IN THE QX LIST, DO THE FOLLOWING. */ -/* ------------------------------------------------ */ - enode = qxhead; - iq2 = 0; -L1600: - if (enode <= 0) { - goto L2300; - } - if (dbakw[enode] != 0) { - goto L2200; - } - ++(*tag); - deg = deg0; -/* --------------------------------- */ -/* FOR EACH UNMARKED NABOR OF ENODE, */ -/* DO THE FOLLOWING. */ -/* --------------------------------- */ - istrt = xadj[enode]; - istop = xadj[enode + 1] - 1; - i__1 = istop; - for (i = istrt; i <= i__1; ++i) { - nabor = adjncy[i]; - if (nabor == 0) { - goto L2100; - } - if (marker[nabor] >= *tag) { - goto L2000; - } - marker[nabor] = *tag; - link = nabor; -/* ------------------------------ */ -/* IF UNELIMINATED, INCLUDE IT IN */ -/* DEG COUNT. */ -/* ------------------------------ */ - if (dforw[nabor] < 0) { - goto L1700; - } - deg += qsize[nabor]; - goto L2000; -L1700: -/* ------------------------------- -*/ -/* IF ELIMINATED, INCLUDE UNMARKED -*/ -/* NODES IN THIS ELEMENT INTO THE -*/ -/* DEGREE COUNT. */ -/* ------------------------------- -*/ - jstrt = xadj[link]; - jstop = xadj[link + 1] - 1; - i__2 = jstop; - for (j = jstrt; j <= i__2; ++j) { - node = adjncy[j]; - link = -node; - if (node < 0) { - goto L1700; - } else if (node == 0) { - goto L2000; - } else { - goto L1800; - } - -L1800: - if (marker[node] >= *tag) { - goto L1900; - } - marker[node] = *tag; - deg += qsize[node]; -L1900: - ; - } -L2000: - ; - } -L2100: -/* ------------------------------------------- */ -/* UPDATE EXTERNAL DEGREE OF ENODE IN DEGREE */ -/* STRUCTURE, AND MDEG (MIN DEG) IF NECESSARY. */ -/* ------------------------------------------- */ - deg = deg - qsize[enode] + 1; - fnode = dhead[deg]; - dforw[enode] = fnode; - dbakw[enode] = -deg; - if (fnode > 0) { - dbakw[fnode] = enode; - } - dhead[deg] = enode; - if (deg < *mdeg) { - *mdeg = deg; - } -L2200: -/* ---------------------------------- */ -/* GET NEXT ENODE IN CURRENT ELEMENT. */ -/* ---------------------------------- */ - enode = llist[enode]; - if (iq2 == 1) { - goto L900; - } - goto L1600; -L2300: -/* ----------------------------- */ -/* GET NEXT ELEMENT IN THE LIST. */ -/* ----------------------------- */ - *tag = mtag; - elmnt = llist[elmnt]; - goto L100; - -} /* mmdupd_ */ - -/* *************************************************************** */ -/* *************************************************************** */ -/* ***** MMDNUM ..... MULTI MINIMUM DEGREE NUMBERING ***** */ -/* *************************************************************** */ -/* *************************************************************** */ - -/* AUTHOR - JOSEPH W.H. LIU */ -/* DEPT OF COMPUTER SCIENCE, YORK UNIVERSITY. */ - -/* PURPOSE - THIS ROUTINE PERFORMS THE FINAL STEP IN */ -/* PRODUCING THE PERMUTATION AND INVERSE PERMUTATION */ -/* VECTORS IN THE MULTIPLE ELIMINATION VERSION OF THE */ -/* MINIMUM DEGREE ORDERING ALGORITHM. */ - -/* INPUT PARAMETERS - */ -/* NEQNS - NUMBER OF EQUATIONS. */ -/* QSIZE - SIZE OF SUPERNODES AT ELIMINATION. */ - -/* UPDATED PARAMETERS - */ -/* INVP - INVERSE PERMUTATION VECTOR. ON INPUT, */ -/* IF QSIZE(NODE)=0, THEN NODE HAS BEEN MERGED */ -/* INTO THE NODE -INVP(NODE); OTHERWISE, */ -/* -INVP(NODE) IS ITS INVERSE LABELLING. */ - -/* OUTPUT PARAMETERS - */ -/* PERM - THE PERMUTATION VECTOR. */ - -/* *************************************************************** */ - -/* Subroutine */ int mmdnum_(int *neqns, shortint *perm, shortint *invp, - shortint *qsize) -{ - /* System generated locals */ - int i__1; - - /* Local variables */ - static int node, root, nextf, father, nqsize, num; - - -/* *************************************************************** */ - - -/* *************************************************************** */ - - /* Parameter adjustments */ - --qsize; - --invp; - --perm; - - /* Function Body */ - i__1 = *neqns; - for (node = 1; node <= i__1; ++node) { - nqsize = qsize[node]; - if (nqsize <= 0) { - perm[node] = invp[node]; - } - if (nqsize > 0) { - perm[node] = -invp[node]; - } -/* L100: */ - } -/* ------------------------------------------------------ */ -/* FOR EACH NODE WHICH HAS BEEN MERGED, DO THE FOLLOWING. */ -/* ------------------------------------------------------ */ - i__1 = *neqns; - for (node = 1; node <= i__1; ++node) { - if (perm[node] > 0) { - goto L500; - } -/* ----------------------------------------- */ -/* TRACE THE MERGED TREE UNTIL ONE WHICH HAS */ -/* NOT BEEN MERGED, CALL IT ROOT. */ -/* ----------------------------------------- */ - father = node; -L200: - if (perm[father] > 0) { - goto L300; - } - father = -perm[father]; - goto L200; -L300: -/* ----------------------- */ -/* NUMBER NODE AFTER ROOT. */ -/* ----------------------- */ - root = father; - num = perm[root] + 1; - invp[node] = -num; - perm[root] = num; -/* ------------------------ */ -/* SHORTEN THE MERGED TREE. */ -/* ------------------------ */ - father = node; -L400: - nextf = -perm[father]; - if (nextf <= 0) { - goto L500; - } - perm[father] = -root; - father = nextf; - goto L400; -L500: - ; - } -/* ---------------------- */ -/* READY TO COMPUTE PERM. */ -/* ---------------------- */ - i__1 = *neqns; - for (node = 1; node <= i__1; ++node) { - num = -invp[node]; - invp[node] = num; - perm[num] = node; -/* L600: */ - } - return 0; - -} /* mmdnum_ */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/relax_snode.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/relax_snode.c deleted file mode 100644 index f666b6e7a5..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/relax_snode.c +++ /dev/null @@ -1,75 +0,0 @@ -/*! @file relax_snode.c - * \brief Identify initial relaxed supernodes - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_ddefs.h" -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    relax_snode() - Identify the initial relaxed supernodes, assuming that 
    - *    the matrix has been reordered according to the postorder of the etree.
    - * 
    - */ -void -relax_snode ( - const int n, - int *et, /* column elimination tree */ - const int relax_columns, /* max no of columns allowed in a - relaxed snode */ - int *descendants, /* no of descendants of each node - in the etree */ - int *relax_end /* last column in a supernode */ - ) -{ - - register int j, parent; - register int snode_start; /* beginning of a snode */ - - ifill (relax_end, n, EMPTY); - for (j = 0; j < n; j++) descendants[j] = 0; - - /* Compute the number of descendants of each node in the etree */ - for (j = 0; j < n; j++) { - parent = et[j]; - if ( parent != n ) /* not the dummy root */ - descendants[parent] += descendants[j] + 1; - } - - /* Identify the relaxed supernodes by postorder traversal of the etree. */ - for (j = 0; j < n; ) { - parent = et[j]; - snode_start = j; - while ( parent != n && descendants[parent] < relax_columns ) { - j = parent; - parent = et[j]; - } - /* Found a supernode with j being the last column. */ - relax_end[snode_start] = j; /* Last column is recorded */ - j++; - /* Search for a new leaf */ - while ( descendants[j] != 0 && j < n ) j++; - } - - /*printf("No of relaxed snodes: %d; relaxed columns: %d\n", - nsuper, no_relaxed_col); */ -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scipy_slu_config.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scipy_slu_config.h deleted file mode 100644 index 11ba5e3914..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scipy_slu_config.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef SCIPY_SLU_CONFIG_H -#define SCIPY_SLU_CONFIG_H - -#include - -/* - * Support routines - */ -void superlu_python_module_abort(char *msg); -void *superlu_python_module_malloc(size_t size); -void superlu_python_module_free(void *ptr); - -#define USER_ABORT superlu_python_module_abort -#define USER_MALLOC superlu_python_module_malloc -#define USER_FREE superlu_python_module_free - -#define SCIPY_SPECIFIC_FIX 1 - -/* - * Fortran configuration - */ -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define UpCase 1 -#else -#define NoChange 1 -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#error Uppercase and trailing slash in Fortran names not supported -#else -#define Add_ 1 -#endif -#endif - -#endif diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scolumn_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scolumn_bmod.c deleted file mode 100644 index c1839d37fc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scolumn_bmod.c +++ /dev/null @@ -1,352 +0,0 @@ - -/*! @file scolumn_bmod.c - * \brief performs numeric block updates - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - *  Permission is hereby granted to use or copy this program for any
    - *  purpose, provided the above notices are retained on all copies.
    - *  Permission to modify the code and to distribute modified code is
    - *  granted, provided the above notices are retained, and a notice that
    - *  the code was modified is included with the above copyright notice.
    - * 
    -*/ - -#include -#include -#include "slu_sdefs.h" - -/* - * Function prototypes - */ -void susolve(int, int, float*, float*); -void slsolve(int, int, float*, float*); -void smatvec(int, int, int, float*, float*, float*); - - - -/*! \brief - * - *
    - * Purpose:
    - * ========
    - * Performs numeric block updates (sup-col) in topological order.
    - * It features: col-col, 2cols-col, 3cols-col, and sup-col updates.
    - * Special processing on the supernodal portion of L\U[*,j]
    - * Return value:   0 - successful return
    - *               > 0 - number of bytes allocated when run out of space
    - * 
    - */ -int -scolumn_bmod ( - const int jcol, /* in */ - const int nseg, /* in */ - float *dense, /* in */ - float *tempv, /* working array */ - int *segrep, /* in */ - int *repfnz, /* in */ - int fpanelc, /* in -- first column in the current panel */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ - -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - float alpha, beta; - - /* krep = representative of current k-th supernode - * fsupc = first supernodal column - * nsupc = no of columns in supernode - * nsupr = no of rows in supernode (used as leading dimension) - * luptr = location of supernodal LU-block in storage - * kfnz = first nonz in the k-th supernodal segment - * no_zeros = no of leading zeros in a supernodal U-segment - */ - float ukj, ukj1, ukj2; - int luptr, luptr1, luptr2; - int fsupc, nsupc, nsupr, segsze; - int nrow; /* No of rows in the matrix of matrix-vector */ - int jcolp1, jsupno, k, ksub, krep, krep_ind, ksupno; - register int lptr, kfnz, isub, irow, i; - register int no_zeros, new_next; - int ufirst, nextlu; - int fst_col; /* First column within small LU update */ - int d_fsupc; /* Distance between the first column of the current - panel and the first column of the current snode. */ - int *xsup, *supno; - int *lsub, *xlsub; - float *lusup; - int *xlusup; - int nzlumax; - float *tempv1; - float zero = 0.0; - float one = 1.0; - float none = -1.0; - int mem_error; - flops_t *ops = stat->ops; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - nzlumax = Glu->nzlumax; - jcolp1 = jcol + 1; - jsupno = supno[jcol]; - - /* - * For each nonz supernode segment of U[*,j] in topological order - */ - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - - krep = segrep[k]; - k--; - ksupno = supno[krep]; - if ( jsupno != ksupno ) { /* Outside the rectangular supernode */ - - fsupc = xsup[ksupno]; - fst_col = SUPERLU_MAX ( fsupc, fpanelc ); - - /* Distance from the current supernode to the current panel; - d_fsupc=0 if fsupc > fpanelc. */ - d_fsupc = fst_col - fsupc; - - luptr = xlusup[fst_col] + d_fsupc; - lptr = xlsub[fsupc] + d_fsupc; - - kfnz = repfnz[krep]; - kfnz = SUPERLU_MAX ( kfnz, fpanelc ); - - segsze = krep - kfnz + 1; - nsupc = krep - fst_col + 1; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; /* Leading dimension */ - nrow = nsupr - d_fsupc - nsupc; - krep_ind = lptr + nsupc - 1; - - ops[TRSV] += segsze * (segsze - 1); - ops[GEMV] += 2 * nrow * segsze; - - - /* - * Case 1: Update U-segment of size 1 -- col-col update - */ - if ( segsze == 1 ) { - ukj = dense[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - dense[irow] -= ukj*lusup[luptr]; - luptr++; - } - - } else if ( segsze <= 3 ) { - ukj = dense[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - ukj1 = dense[lsub[krep_ind - 1]]; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { /* Case 2: 2cols-col update */ - ukj -= ukj1 * lusup[luptr1]; - dense[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; - luptr1++; - dense[irow] -= ( ukj*lusup[luptr] - + ukj1*lusup[luptr1] ); - } - } else { /* Case 3: 3cols-col update */ - ukj2 = dense[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - ukj1 -= ukj2 * lusup[luptr2-1]; - ukj = ukj - ukj1*lusup[luptr1] - ukj2*lusup[luptr2]; - dense[lsub[krep_ind]] = ukj; - dense[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; - luptr1++; - luptr2++; - dense[irow] -= ( ukj*lusup[luptr] - + ukj1*lusup[luptr1] + ukj2*lusup[luptr2] ); - } - } - - - - } else { - /* - * Case: sup-col update - * Perform a triangular solve and block update, - * then scatter the result of sup-col update to dense - */ - - no_zeros = kfnz - fst_col; - - /* Copy U[*,j] segment from dense[*] to tempv[*] */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - tempv[i] = dense[irow]; - ++isub; - } - - /* Dense triangular solve -- start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#else - strsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#endif - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - alpha = one; - beta = zero; -#ifdef _CRAY - SGEMV( ftcs2, &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#else - sgemv_( "N", &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#endif -#else - slsolve ( nsupr, segsze, &lusup[luptr], tempv ); - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - smatvec (nsupr, nrow , segsze, &lusup[luptr], tempv, tempv1); -#endif - - - /* Scatter tempv[] into SPA dense[] as a temporary storage */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense[irow] = tempv[i]; - tempv[i] = zero; - ++isub; - } - - /* Scatter tempv1[] into SPA dense[] */ - for (i = 0; i < nrow; i++) { - irow = lsub[isub]; - dense[irow] -= tempv1[i]; - tempv1[i] = zero; - ++isub; - } - } - - } /* if jsupno ... */ - - } /* for each segment... */ - - /* - * Process the supernodal portion of L\U[*,j] - */ - nextlu = xlusup[jcol]; - fsupc = xsup[jsupno]; - - /* Copy the SPA dense into L\U[*,j] */ - new_next = nextlu + xlsub[fsupc+1] - xlsub[fsupc]; - while ( new_next > nzlumax ) { - if (mem_error = sLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, Glu)) - return (mem_error); - lusup = Glu->lusup; - lsub = Glu->lsub; - } - - for (isub = xlsub[fsupc]; isub < xlsub[fsupc+1]; isub++) { - irow = lsub[isub]; - lusup[nextlu] = dense[irow]; - dense[irow] = zero; - ++nextlu; - } - - xlusup[jcolp1] = nextlu; /* Close L\U[*,jcol] */ - - /* For more updates within the panel (also within the current supernode), - * should start from the first column of the panel, or the first column - * of the supernode, whichever is bigger. There are 2 cases: - * 1) fsupc < fpanelc, then fst_col := fpanelc - * 2) fsupc >= fpanelc, then fst_col := fsupc - */ - fst_col = SUPERLU_MAX ( fsupc, fpanelc ); - - if ( fst_col < jcol ) { - - /* Distance between the current supernode and the current panel. - d_fsupc=0 if fsupc >= fpanelc. */ - d_fsupc = fst_col - fsupc; - - lptr = xlsub[fsupc] + d_fsupc; - luptr = xlusup[fst_col] + d_fsupc; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; /* Leading dimension */ - nsupc = jcol - fst_col; /* Excluding jcol */ - nrow = nsupr - d_fsupc - nsupc; - - /* Points to the beginning of jcol in snode L\U(jsupno) */ - ufirst = xlusup[jcol] + d_fsupc; - - ops[TRSV] += nsupc * (nsupc - 1); - ops[GEMV] += 2 * nrow * nsupc; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &nsupc, &lusup[luptr], - &nsupr, &lusup[ufirst], &incx ); -#else - strsv_( "L", "N", "U", &nsupc, &lusup[luptr], - &nsupr, &lusup[ufirst], &incx ); -#endif - - alpha = none; beta = one; /* y := beta*y + alpha*A*x */ - -#ifdef _CRAY - SGEMV( ftcs2, &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#else - sgemv_( "N", &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#endif -#else - slsolve ( nsupr, nsupc, &lusup[luptr], &lusup[ufirst] ); - - smatvec ( nsupr, nrow, nsupc, &lusup[luptr+nsupc], - &lusup[ufirst], tempv ); - - /* Copy updates from tempv[*] into lusup[*] */ - isub = ufirst + nsupc; - for (i = 0; i < nrow; i++) { - lusup[isub] -= tempv[i]; - tempv[i] = 0.0; - ++isub; - } - -#endif - - - } /* if fst_col < jcol ... */ - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scolumn_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scolumn_dfs.c deleted file mode 100644 index 4a412ab9f4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scolumn_dfs.c +++ /dev/null @@ -1,275 +0,0 @@ - -/*! @file scolumn_dfs.c - * \brief Performs a symbolic factorization - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    -*/ - -#include "slu_sdefs.h" - -/*! \brief What type of supernodes we want */ -#define T2_SUPER - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   SCOLUMN_DFS performs a symbolic factorization on column jcol, and
    - *   decide the supernode boundary.
    - *
    - *   This routine does not use numeric values, but only use the RHS 
    - *   row indices to start the dfs.
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives. The routine returns a list of such supernodal 
    - *   representatives in topological order of the dfs that generates them.
    - *   The location of the first nonzero in each such supernodal segment
    - *   (supernodal entry location) is also returned.
    - *
    - * Local parameters
    - * ================
    - *   nseg: no of segments in current U[*,j]
    - *   jsuper: jsuper=EMPTY if column j does not belong to the same
    - *	supernode as j-1. Otherwise, jsuper=nsuper.
    - *
    - *   marker2: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - *
    - * Return value
    - * ============
    - *     0  success;
    - *   > 0  number of bytes allocated when run out of space.
    - * 
    - */ -int -scolumn_dfs( - const int m, /* in - number of rows in the matrix */ - const int jcol, /* in */ - int *perm_r, /* in */ - int *nseg, /* modified - with new segments appended */ - int *lsub_col, /* in - defines the RHS vector to start the dfs */ - int *segrep, /* modified - with new segments appended */ - int *repfnz, /* modified */ - int *xprune, /* modified */ - int *marker, /* modified */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - int jcolp1, jcolm1, jsuper, nsuper, nextl; - int k, krep, krow, kmark, kperm; - int *marker2; /* Used for small panel LU */ - int fsupc; /* First column of a snode */ - int myfnz; /* First nonz column of a U-segment */ - int chperm, chmark, chrep, kchild; - int xdfs, maxdfs, kpar, oldrep; - int jptr, jm1ptr; - int ito, ifrom, istop; /* Used to compress row subscripts */ - int mem_error; - int *xsup, *supno, *lsub, *xlsub; - int nzlmax; - static int first = 1, maxsuper; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - if ( first ) { - maxsuper = sp_ienv(3); - first = 0; - } - jcolp1 = jcol + 1; - jcolm1 = jcol - 1; - nsuper = supno[jcol]; - jsuper = nsuper; - nextl = xlsub[jcol]; - marker2 = &marker[2*m]; - - - /* For each nonzero in A[*,jcol] do dfs */ - for (k = 0; lsub_col[k] != EMPTY; k++) { - - krow = lsub_col[k]; - lsub_col[k] = EMPTY; - kmark = marker2[krow]; - - /* krow was visited before, go to the next nonz */ - if ( kmark == jcol ) continue; - - /* For each unmarked nbr krow of jcol - * krow is in L: place it in structure of L[*,jcol] - */ - marker2[krow] = jcol; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - lsub[nextl++] = krow; /* krow is indexed into A */ - if ( nextl >= nzlmax ) { - if ( mem_error = sLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( kmark != jcolm1 ) jsuper = EMPTY;/* Row index subset testing */ - } else { - /* krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz[krep]; - - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > kperm ) repfnz[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz[krep] = kperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker2[kchild]; - - if ( chmark != jcol ) { /* Not reached yet */ - marker2[kchild] = jcol; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,k] */ - if ( chperm == EMPTY ) { - lsub[nextl++] = kchild; - if ( nextl >= nzlmax ) { - if ( mem_error = - sLUMemXpand(jcol,nextl,LSUB,&nzlmax,Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( chmark != jcolm1 ) jsuper = EMPTY; - } else { - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz[chrep]; - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz[chrep] = chperm; - } else { - /* Continue dfs at super-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L^t) */ - parent[krep] = oldrep; - repfnz[krep] = chperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - } /* else */ - - } /* else */ - - } /* if */ - - } /* while */ - - /* krow has no more unexplored nbrs; - * place supernode-rep krep in postorder DFS. - * backtrack dfs to its parent - */ - segrep[*nseg] = krep; - ++(*nseg); - kpar = parent[krep]; /* Pop from stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xprune[krep]; - - } while ( kpar != EMPTY ); /* Until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonzero ... */ - - /* Check to see if j belongs in the same supernode as j-1 */ - if ( jcol == 0 ) { /* Do nothing for column 0 */ - nsuper = supno[0] = 0; - } else { - fsupc = xsup[nsuper]; - jptr = xlsub[jcol]; /* Not compressed yet */ - jm1ptr = xlsub[jcolm1]; - -#ifdef T2_SUPER - if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = EMPTY; -#endif - /* Make sure the number of columns in a supernode doesn't - exceed threshold. */ - if ( jcol - fsupc >= maxsuper ) jsuper = EMPTY; - - /* If jcol starts a new supernode, reclaim storage space in - * lsub from the previous supernode. Note we only store - * the subscript set of the first and last columns of - * a supernode. (first for num values, last for pruning) - */ - if ( jsuper == EMPTY ) { /* starts a new supernode */ - if ( (fsupc < jcolm1-1) ) { /* >= 3 columns in nsuper */ -#ifdef CHK_COMPRESS - printf(" Compress lsub[] at super %d-%d\n", fsupc, jcolm1); -#endif - ito = xlsub[fsupc+1]; - xlsub[jcolm1] = ito; - istop = ito + jptr - jm1ptr; - xprune[jcolm1] = istop; /* Initialize xprune[jcol-1] */ - xlsub[jcol] = istop; - for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) - lsub[ito] = lsub[ifrom]; - nextl = ito; /* = istop + length(jcol) */ - } - nsuper++; - supno[jcol] = nsuper; - } /* if a new supernode */ - - } /* else: jcol > 0 */ - - /* Tidy up the pointers before exit */ - xsup[nsuper+1] = jcolp1; - supno[jcolp1] = nsuper; - xprune[jcol] = nextl; /* Initialize upper bound for pruning */ - xlsub[jcolp1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scomplex.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scomplex.c deleted file mode 100644 index 5114db48bf..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scomplex.c +++ /dev/null @@ -1,147 +0,0 @@ - -/*! @file scomplex.c - * \brief Common arithmetic for complex type - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * This file defines common arithmetic operations for complex type.
    - * 
    - */ - -#include -#include -#include -#include "slu_scomplex.h" - - -/*! \brief Complex Division c = a/b */ -void c_div(complex *c, complex *a, complex *b) -{ - float ratio, den; - float abr, abi, cr, ci; - - if( (abr = b->r) < 0.) - abr = - abr; - if( (abi = b->i) < 0.) - abi = - abi; - if( abr <= abi ) { - if (abi == 0) { - fprintf(stderr, "z_div.c: division by zero\n"); - exit(-1); - } - ratio = b->r / b->i ; - den = b->i * (1 + ratio*ratio); - cr = (a->r*ratio + a->i) / den; - ci = (a->i*ratio - a->r) / den; - } else { - ratio = b->i / b->r ; - den = b->r * (1 + ratio*ratio); - cr = (a->r + a->i*ratio) / den; - ci = (a->i - a->r*ratio) / den; - } - c->r = cr; - c->i = ci; -} - - -/*! \brief Returns sqrt(z.r^2 + z.i^2) */ -double slu_c_abs(complex *z) -{ - float temp; - float real = z->r; - float imag = z->i; - - if (real < 0) real = -real; - if (imag < 0) imag = -imag; - if (imag > real) { - temp = real; - real = imag; - imag = temp; - } - if ((real+imag) == real) return(real); - - temp = imag/real; - temp = real*sqrt(1.0 + temp*temp); /*overflow!!*/ - return (temp); -} - - -/*! \brief Approximates the abs. Returns abs(z.r) + abs(z.i) */ -double slu_c_abs1(complex *z) -{ - float real = z->r; - float imag = z->i; - - if (real < 0) real = -real; - if (imag < 0) imag = -imag; - - return (real + imag); -} - -/*! \brief Return the exponentiation */ -void c_exp(complex *r, complex *z) -{ - float expx; - - expx = exp(z->r); - r->r = expx * cos(z->i); - r->i = expx * sin(z->i); -} - -/*! \brief Return the complex conjugate */ -void r_cnjg(complex *r, complex *z) -{ - r->r = z->r; - r->i = -z->i; -} - -/*! \brief Return the imaginary part */ -double r_imag(complex *z) -{ - return (z->i); -} - - -/*! \brief SIGN functions for complex number. Returns z/abs(z) */ -complex c_sgn(complex *z) -{ - register float t = slu_c_abs(z); - register complex retval; - - if (t == 0.0) { - retval.r = 1.0, retval.i = 0.0; - } else { - retval.r = z->r / t, retval.i = z->i / t; - } - - return retval; -} - -/*! \brief Square-root of a complex number. */ -complex c_sqrt(complex *z) -{ - complex retval; - register float cr, ci, real, imag; - - real = z->r; - imag = z->i; - - if ( imag == 0.0 ) { - retval.r = sqrt(real); - retval.i = 0.0; - } else { - ci = (sqrt(real*real + imag*imag) - real) / 2.0; - ci = sqrt(ci); - cr = imag / (2.0 * ci); - retval.r = cr; - retval.i = ci; - } - - return retval; -} - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scopy_to_ucol.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scopy_to_ucol.c deleted file mode 100644 index 2f6399fe56..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scopy_to_ucol.c +++ /dev/null @@ -1,103 +0,0 @@ - -/*! @file scopy_to_ucol.c - * \brief Copy a computed column of U to the compressed data structure - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_sdefs.h" - -int -scopy_to_ucol( - int jcol, /* in */ - int nseg, /* in */ - int *segrep, /* in */ - int *repfnz, /* in */ - int *perm_r, /* in */ - float *dense, /* modified - reset to zero on return */ - GlobalLU_t *Glu /* modified */ - ) -{ -/* - * Gather from SPA dense[*] to global ucol[*]. - */ - int ksub, krep, ksupno; - int i, k, kfnz, segsze; - int fsupc, isub, irow; - int jsupno, nextu; - int new_next, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - float *ucol; - int *usub, *xusub; - int nzumax; - float zero = 0.0; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - nzumax = Glu->nzumax; - - jsupno = supno[jcol]; - nextu = xusub[jcol]; - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - krep = segrep[k--]; - ksupno = supno[krep]; - - if ( ksupno != jsupno ) { /* Should go into ucol[] */ - kfnz = repfnz[krep]; - if ( kfnz != EMPTY ) { /* Nonzero U-segment */ - - fsupc = xsup[ksupno]; - isub = xlsub[fsupc] + kfnz - fsupc; - segsze = krep - kfnz + 1; - - new_next = nextu + segsze; - while ( new_next > nzumax ) { - if (mem_error = sLUMemXpand(jcol, nextu, UCOL, &nzumax, Glu)) - return (mem_error); - ucol = Glu->ucol; - if (mem_error = sLUMemXpand(jcol, nextu, USUB, &nzumax, Glu)) - return (mem_error); - usub = Glu->usub; - lsub = Glu->lsub; - } - - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - usub[nextu] = perm_r[irow]; - ucol[nextu] = dense[irow]; - dense[irow] = zero; - nextu++; - isub++; - } - - } - - } - - } /* for each segment... */ - - xusub[jcol + 1] = nextu; /* Close U[*,jcol] */ - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scsum1.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scsum1.c deleted file mode 100644 index 46afa76105..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/scsum1.c +++ /dev/null @@ -1,99 +0,0 @@ -/*! @file scsum1.c - * \brief Takes sum of the absolute values of a complex vector and returns a single precision result - * - *
    - *     -- LAPACK auxiliary routine (version 2.0) --   
    - *     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
    - *     Courant Institute, Argonne National Lab, and Rice University   
    - *     October 31, 1992   
    - * 
    - */ -#include "slu_scomplex.h" -#include "slu_Cnames.h" - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    SCSUM1 takes the sum of the absolute values of a complex   
    -    vector and returns a single precision result.   
    -
    -    Based on SCASUM from the Level 1 BLAS.   
    -    The change is to use the 'genuine' absolute value.   
    -
    -    Contributed by Nick Higham for use with CLACON.   
    -
    -    Arguments   
    -    =========   
    -
    -    N       (input) INT
    -            The number of elements in the vector CX.   
    -
    -    CX      (input) COMPLEX array, dimension (N)   
    -            The vector whose elements will be summed.   
    -
    -    INCX    (input) INT
    -            The spacing between successive values of CX.  INCX > 0.   
    -
    -    ===================================================================== 
    -
    -*/ -double scsum1_(int *n, complex *cx, int *incx) -{ - /* System generated locals */ - int i__1, i__2; - float ret_val; - /* Builtin functions */ - double slu_c_abs(complex *); - /* Local variables */ - static int i, nincx; - static float stemp; - - -#define CX(I) cx[(I)-1] - - - ret_val = 0.f; - stemp = 0.f; - if (*n <= 0) { - return ret_val; - } - if (*incx == 1) { - goto L20; - } - -/* CODE FOR INCREMENT NOT EQUAL TO 1 */ - - nincx = *n * *incx; - i__1 = nincx; - i__2 = *incx; - for (i = 1; *incx < 0 ? i >= nincx : i <= nincx; i += *incx) { - -/* NEXT LINE MODIFIED. */ - - stemp += slu_c_abs(&CX(i)); -/* L10: */ - } - ret_val = stemp; - return ret_val; - -/* CODE FOR INCREMENT EQUAL TO 1 */ - -L20: - i__2 = *n; - for (i = 1; i <= *n; ++i) { - -/* NEXT LINE MODIFIED. */ - - stemp += slu_c_abs(&CX(i)); -/* L30: */ - } - ret_val = stemp; - return ret_val; - -/* End of SCSUM1 */ - -} /* scsum1_ */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sdiagonal.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sdiagonal.c deleted file mode 100644 index 54a56c8a48..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sdiagonal.c +++ /dev/null @@ -1,129 +0,0 @@ - -/*! @file sdiagonal.c - * \brief Auxiliary routines to work with diagonal elements - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_sdefs.h" - -int sfill_diag(int n, NCformat *Astore) -/* fill explicit zeros on the diagonal entries, so that the matrix is not - structurally singular. */ -{ - float *nzval = (float *)Astore->nzval; - int *rowind = Astore->rowind; - int *colptr = Astore->colptr; - int nnz = colptr[n]; - int fill = 0; - float *nzval_new; - float zero = 0.0; - int *rowind_new; - int i, j, diag; - - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - if (rowind[j] == i) diag = j; - if (diag < 0) fill++; - } - if (fill) - { - nzval_new = floatMalloc(nnz + fill); - rowind_new = intMalloc(nnz + fill); - fill = 0; - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i] - fill; j < colptr[i + 1]; j++) - { - if ((rowind_new[j + fill] = rowind[j]) == i) diag = j; - nzval_new[j + fill] = nzval[j]; - } - if (diag < 0) - { - rowind_new[colptr[i + 1] + fill] = i; - nzval_new[colptr[i + 1] + fill] = zero; - fill++; - } - colptr[i + 1] += fill; - } - Astore->nzval = nzval_new; - Astore->rowind = rowind_new; - SUPERLU_FREE(nzval); - SUPERLU_FREE(rowind); - } - Astore->nnz += fill; - return fill; -} - -int sdominate(int n, NCformat *Astore) -/* make the matrix diagonally dominant */ -{ - float *nzval = (float *)Astore->nzval; - int *rowind = Astore->rowind; - int *colptr = Astore->colptr; - int nnz = colptr[n]; - int fill = 0; - float *nzval_new; - int *rowind_new; - int i, j, diag; - double s; - - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - if (rowind[j] == i) diag = j; - if (diag < 0) fill++; - } - if (fill) - { - nzval_new = floatMalloc(nnz + fill); - rowind_new = intMalloc(nnz+ fill); - fill = 0; - for (i = 0; i < n; i++) - { - s = 1e-6; - diag = -1; - for (j = colptr[i] - fill; j < colptr[i + 1]; j++) - { - if ((rowind_new[j + fill] = rowind[j]) == i) diag = j; - s += fabs(nzval_new[j + fill] = nzval[j]); - } - if (diag >= 0) { - nzval_new[diag+fill] = s * 3.0; - } else { - rowind_new[colptr[i + 1] + fill] = i; - nzval_new[colptr[i + 1] + fill] = s * 3.0; - fill++; - } - colptr[i + 1] += fill; - } - Astore->nzval = nzval_new; - Astore->rowind = rowind_new; - SUPERLU_FREE(nzval); - SUPERLU_FREE(rowind); - } - else - { - for (i = 0; i < n; i++) - { - s = 1e-6; - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - { - if (rowind[j] == i) diag = j; - s += fabs(nzval[j]); - } - nzval[diag] = s * 3.0; - } - } - Astore->nnz += fill; - return fill; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgscon.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgscon.c deleted file mode 100644 index a4749673eb..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgscon.c +++ /dev/null @@ -1,157 +0,0 @@ - -/*! @file sgscon.c - * \brief Estimates reciprocal of the condition number of a general matrix - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Modified from lapack routines SGECON.
    - * 
    - */ - -/* - * File name: sgscon.c - * History: Modified from lapack routines SGECON. - */ -#include -#include "slu_sdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   SGSCON estimates the reciprocal of the condition number of a general 
    - *   real matrix A, in either the 1-norm or the infinity-norm, using   
    - *   the LU factorization computed by SGETRF.   *
    - *
    - *   An estimate is obtained for norm(inv(A)), and the reciprocal of the   
    - *   condition number is computed as   
    - *      RCOND = 1 / ( norm(A) * norm(inv(A)) ).   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - * 
    - *   Arguments   
    - *   =========   
    - *
    - *    NORM    (input) char*
    - *            Specifies whether the 1-norm condition number or the   
    - *            infinity-norm condition number is required:   
    - *            = '1' or 'O':  1-norm;   
    - *            = 'I':         Infinity-norm.
    - *	    
    - *    L       (input) SuperMatrix*
    - *            The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *            sgstrf(). Use compressed row subscripts storage for supernodes,
    - *            i.e., L has types: Stype = SLU_SC, Dtype = SLU_S, Mtype = SLU_TRLU.
    - * 
    - *    U       (input) SuperMatrix*
    - *            The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *            sgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *            Stype = SLU_NC, Dtype = SLU_S, Mtype = SLU_TRU.
    - *	    
    - *    ANORM   (input) float
    - *            If NORM = '1' or 'O', the 1-norm of the original matrix A.   
    - *            If NORM = 'I', the infinity-norm of the original matrix A.
    - *	    
    - *    RCOND   (output) float*
    - *           The reciprocal of the condition number of the matrix A,   
    - *           computed as RCOND = 1/(norm(A) * norm(inv(A))).
    - *	    
    - *    INFO    (output) int*
    - *           = 0:  successful exit   
    - *           < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *
    - *    ===================================================================== 
    - * 
    - */ - -void -sgscon(char *norm, SuperMatrix *L, SuperMatrix *U, - float anorm, float *rcond, SuperLUStat_t *stat, int *info) -{ - - - /* Local variables */ - int kase, kase1, onenrm, i; - float ainvnm; - float *work; - int *iwork; - extern int srscl_(int *, float *, float *, int *); - - extern int slacon_(int *, float *, float *, int *, float *, int *); - - - /* Test the input parameters. */ - *info = 0; - onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O"); - if (! onenrm && ! lsame_(norm, "I")) *info = -1; - else if (L->nrow < 0 || L->nrow != L->ncol || - L->Stype != SLU_SC || L->Dtype != SLU_S || L->Mtype != SLU_TRLU) - *info = -2; - else if (U->nrow < 0 || U->nrow != U->ncol || - U->Stype != SLU_NC || U->Dtype != SLU_S || U->Mtype != SLU_TRU) - *info = -3; - if (*info != 0) { - i = -(*info); - xerbla_("sgscon", &i); - return; - } - - /* Quick return if possible */ - *rcond = 0.; - if ( L->nrow == 0 || U->nrow == 0) { - *rcond = 1.; - return; - } - - work = floatCalloc( 3*L->nrow ); - iwork = intMalloc( L->nrow ); - - - if ( !work || !iwork ) - ABORT("Malloc fails for work arrays in sgscon."); - - /* Estimate the norm of inv(A). */ - ainvnm = 0.; - if ( onenrm ) kase1 = 1; - else kase1 = 2; - kase = 0; - - do { - slacon_(&L->nrow, &work[L->nrow], &work[0], &iwork[0], &ainvnm, &kase); - - if (kase == 0) break; - - if (kase == kase1) { - /* Multiply by inv(L). */ - sp_strsv("L", "No trans", "Unit", L, U, &work[0], stat, info); - - /* Multiply by inv(U). */ - sp_strsv("U", "No trans", "Non-unit", L, U, &work[0], stat, info); - - } else { - - /* Multiply by inv(U'). */ - sp_strsv("U", "Transpose", "Non-unit", L, U, &work[0], stat, info); - - /* Multiply by inv(L'). */ - sp_strsv("L", "Transpose", "Unit", L, U, &work[0], stat, info); - - } - - } while ( kase != 0 ); - - /* Compute the estimate of the reciprocal condition number. */ - if (ainvnm != 0.) *rcond = (1. / ainvnm) / anorm; - - SUPERLU_FREE (work); - SUPERLU_FREE (iwork); - return; - -} /* sgscon */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsequ.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsequ.c deleted file mode 100644 index dd7730d1ed..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsequ.c +++ /dev/null @@ -1,195 +0,0 @@ - -/*! @file sgsequ.c - * \brief Computes row and column scalings - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Modified from LAPACK routine SGEEQU
    - * 
    - */ -/* - * File name: sgsequ.c - * History: Modified from LAPACK routine SGEEQU - */ -#include -#include "slu_sdefs.h" - - - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - *
    - *   SGSEQU computes row and column scalings intended to equilibrate an   
    - *   M-by-N sparse matrix A and reduce its condition number. R returns the row
    - *   scale factors and C the column scale factors, chosen to try to make   
    - *   the largest element in each row and column of the matrix B with   
    - *   elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1.   
    - *
    - *   R(i) and C(j) are restricted to be between SMLNUM = smallest safe   
    - *   number and BIGNUM = largest safe number.  Use of these scaling   
    - *   factors is not guaranteed to reduce the condition number of A but   
    - *   works well in practice.   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   A       (input) SuperMatrix*
    - *           The matrix of dimension (A->nrow, A->ncol) whose equilibration
    - *           factors are to be computed. The type of A can be:
    - *           Stype = SLU_NC; Dtype = SLU_S; Mtype = SLU_GE.
    - *	    
    - *   R       (output) float*, size A->nrow
    - *           If INFO = 0 or INFO > M, R contains the row scale factors   
    - *           for A.
    - *	    
    - *   C       (output) float*, size A->ncol
    - *           If INFO = 0,  C contains the column scale factors for A.
    - *	    
    - *   ROWCND  (output) float*
    - *           If INFO = 0 or INFO > M, ROWCND contains the ratio of the   
    - *           smallest R(i) to the largest R(i).  If ROWCND >= 0.1 and   
    - *           AMAX is neither too large nor too small, it is not worth   
    - *           scaling by R.
    - *	    
    - *   COLCND  (output) float*
    - *           If INFO = 0, COLCND contains the ratio of the smallest   
    - *           C(i) to the largest C(i).  If COLCND >= 0.1, it is not   
    - *           worth scaling by C.
    - *	    
    - *   AMAX    (output) float*
    - *           Absolute value of largest matrix element.  If AMAX is very   
    - *           close to overflow or very close to underflow, the matrix   
    - *           should be scaled.
    - *	    
    - *   INFO    (output) int*
    - *           = 0:  successful exit   
    - *           < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *           > 0:  if INFO = i,  and i is   
    - *                 <= A->nrow:  the i-th row of A is exactly zero   
    - *                 >  A->ncol:  the (i-M)-th column of A is exactly zero   
    - *
    - *   ===================================================================== 
    - * 
    - */ -void -sgsequ(SuperMatrix *A, float *r, float *c, float *rowcnd, - float *colcnd, float *amax, int *info) -{ - - - /* Local variables */ - NCformat *Astore; - float *Aval; - int i, j, irow; - float rcmin, rcmax; - float bignum, smlnum; - extern double slamch_(char *); - - /* Test the input parameters. */ - *info = 0; - if ( A->nrow < 0 || A->ncol < 0 || - A->Stype != SLU_NC || A->Dtype != SLU_S || A->Mtype != SLU_GE ) - *info = -1; - if (*info != 0) { - i = -(*info); - xerbla_("sgsequ", &i); - return; - } - - /* Quick return if possible */ - if ( A->nrow == 0 || A->ncol == 0 ) { - *rowcnd = 1.; - *colcnd = 1.; - *amax = 0.; - return; - } - - Astore = A->Store; - Aval = Astore->nzval; - - /* Get machine constants. */ - smlnum = slamch_("S"); - bignum = 1. / smlnum; - - /* Compute row scale factors. */ - for (i = 0; i < A->nrow; ++i) r[i] = 0.; - - /* Find the maximum element in each row. */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - r[irow] = SUPERLU_MAX( r[irow], fabs(Aval[i]) ); - } - - /* Find the maximum and minimum scale factors. */ - rcmin = bignum; - rcmax = 0.; - for (i = 0; i < A->nrow; ++i) { - rcmax = SUPERLU_MAX(rcmax, r[i]); - rcmin = SUPERLU_MIN(rcmin, r[i]); - } - *amax = rcmax; - - if (rcmin == 0.) { - /* Find the first zero scale factor and return an error code. */ - for (i = 0; i < A->nrow; ++i) - if (r[i] == 0.) { - *info = i + 1; - return; - } - } else { - /* Invert the scale factors. */ - for (i = 0; i < A->nrow; ++i) - r[i] = 1. / SUPERLU_MIN( SUPERLU_MAX( r[i], smlnum ), bignum ); - /* Compute ROWCND = min(R(I)) / max(R(I)) */ - *rowcnd = SUPERLU_MAX( rcmin, smlnum ) / SUPERLU_MIN( rcmax, bignum ); - } - - /* Compute column scale factors */ - for (j = 0; j < A->ncol; ++j) c[j] = 0.; - - /* Find the maximum element in each column, assuming the row - scalings computed above. */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - c[j] = SUPERLU_MAX( c[j], fabs(Aval[i]) * r[irow] ); - } - - /* Find the maximum and minimum scale factors. */ - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->ncol; ++j) { - rcmax = SUPERLU_MAX(rcmax, c[j]); - rcmin = SUPERLU_MIN(rcmin, c[j]); - } - - if (rcmin == 0.) { - /* Find the first zero scale factor and return an error code. */ - for (j = 0; j < A->ncol; ++j) - if ( c[j] == 0. ) { - *info = A->nrow + j + 1; - return; - } - } else { - /* Invert the scale factors. */ - for (j = 0; j < A->ncol; ++j) - c[j] = 1. / SUPERLU_MIN( SUPERLU_MAX( c[j], smlnum ), bignum); - /* Compute COLCND = min(C(J)) / max(C(J)) */ - *colcnd = SUPERLU_MAX( rcmin, smlnum ) / SUPERLU_MIN( rcmax, bignum ); - } - - return; - -} /* sgsequ */ - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsisx.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsisx.c deleted file mode 100644 index 91301cd3e8..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsisx.c +++ /dev/null @@ -1,693 +0,0 @@ - -/*! @file sgsisx.c - * \brief Gives the approximate solutions of linear equations A*X=B or A'*X=B - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * SGSISX gives the approximate solutions of linear equations A*X=B or A'*X=B,
    - * using the ILU factorization from sgsitrf(). An estimation of
    - * the condition number is provided. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *  
    - *	1.1. If options->Equil = YES or options->RowPerm = LargeDiag, scaling
    - *	     factors are computed to equilibrate the system:
    - *	     options->Trans = NOTRANS:
    - *		 diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *	     options->Trans = TRANS:
    - *		 (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *	     options->Trans = CONJ:
    - *		 (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *	     Whether or not the system will be equilibrated depends on the
    - *	     scaling of the matrix A, but if equilibration is used, A is
    - *	     overwritten by diag(R)*A*diag(C) and B by diag(R)*B
    - *	     (if options->Trans=NOTRANS) or diag(C)*B (if options->Trans
    - *	     = TRANS or CONJ).
    - *
    - *	1.2. Permute columns of A, forming A*Pc, where Pc is a permutation
    - *	     matrix that usually preserves sparsity.
    - *	     For more details of this step, see sp_preorder.c.
    - *
    - *	1.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *	     factor the matrix A (after equilibration if options->Equil = YES)
    - *	     as Pr*A*Pc = L*U, with Pr determined by partial pivoting.
    - *
    - *	1.4. Compute the reciprocal pivot growth factor.
    - *
    - *	1.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *	     routine fills a small number on the diagonal entry, that is
    - *		U(i,i) = ||A(:,i)||_oo * options->ILU_FillTol ** (1 - i / n),
    - *	     and info will be increased by 1. The factored form of A is used
    - *	     to estimate the condition number of the preconditioner. If the
    - *	     reciprocal of the condition number is less than machine precision,
    - *	     info = A->ncol+1 is returned as a warning, but the routine still
    - *	     goes on to solve for X.
    - *
    - *	1.6. The system of equations is solved for X using the factored form
    - *	     of A.
    - *
    - *	1.7. options->IterRefine is not used
    - *
    - *	1.8. If equilibration was used, the matrix X is premultiplied by
    - *	     diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *	     (if options->Trans = TRANS or CONJ) so that it solves the
    - *	     original system before equilibration.
    - *
    - *	1.9. options for ILU only
    - *	     1) If options->RowPerm = LargeDiag, MC64 is used to scale and
    - *		permute the matrix to an I-matrix, that is Pr*Dr*A*Dc has
    - *		entries of modulus 1 on the diagonal and off-diagonal entries
    - *		of modulus at most 1. If MC64 fails, dgsequ() is used to
    - *		equilibrate the system.
    - *	     2) options->ILU_DropTol = tau is the threshold for dropping.
    - *		For L, it is used directly (for the whole row in a supernode);
    - *		For U, ||A(:,i)||_oo * tau is used as the threshold
    - *	        for the	i-th column.
    - *		If a secondary dropping rule is required, tau will
    - *	        also be used to compute the second threshold.
    - *	     3) options->ILU_FillFactor = gamma, used as the initial guess
    - *		of memory growth.
    - *		If a secondary dropping rule is required, it will also
    - *              be used as an upper bound of the memory.
    - *	     4) options->ILU_DropRule specifies the dropping rule.
    - *		Option		Explanation
    - *		======		===========
    - *		DROP_BASIC:	Basic dropping rule, supernodal based ILU.
    - *		DROP_PROWS:	Supernodal based ILUTP, p = gamma * nnz(A) / n.
    - *		DROP_COLUMN:	Variation of ILUTP, for j-th column,
    - *				p = gamma * nnz(A(:,j)).
    - *		DROP_AREA;	Variation of ILUTP, for j-th column, use
    - *				nnz(F(:,1:j)) / nnz(A(:,1:j)) to control the
    - *				memory.
    - *		DROP_DYNAMIC:	Modify the threshold tau during the
    - *				factorizaion.
    - *				If nnz(L(:,1:j)) / nnz(A(:,1:j)) < gamma
    - *				    tau_L(j) := MIN(1, tau_L(j-1) * 2);
    - *				Otherwise
    - *				    tau_L(j) := MIN(1, tau_L(j-1) * 2);
    - *				tau_U(j) uses the similar rule.
    - *				NOTE: the thresholds used by L and U are
    - *				indenpendent.
    - *		DROP_INTERP:	Compute the second dropping threshold by
    - *				interpolation instead of sorting (default).
    - *				In this case, the actual fill ratio is not
    - *				guaranteed smaller than gamma.
    - *		DROP_PROWS, DROP_COLUMN and DROP_AREA are mutually exclusive.
    - *		( The default option is DROP_BASIC | DROP_AREA. )
    - *	     5) options->ILU_Norm is the criterion of computing the average
    - *		value of a row in L.
    - *		options->ILU_Norm	average(x[1:n])
    - *		=================	===============
    - *		ONE_NORM		||x||_1 / n
    - *		TWO_NORM		||x||_2 / sqrt(n)
    - *		INF_NORM		max{|x[i]|}
    - *	     6) options->ILU_MILU specifies the type of MILU's variation.
    - *		= SILU (default): do not perform MILU;
    - *		= SMILU_1 (not recommended):
    - *		    U(i,i) := U(i,i) + sum(dropped entries);
    - *		= SMILU_2:
    - *		    U(i,i) := U(i,i) + SGN(U(i,i)) * sum(dropped entries);
    - *		= SMILU_3:
    - *		    U(i,i) := U(i,i) + SGN(U(i,i)) * sum(|dropped entries|);
    - *		NOTE: Even SMILU_1 does not preserve the column sum because of
    - *		late dropping.
    - *	     7) options->ILU_FillTol is used as the perturbation when
    - *		encountering zero pivots. If some U(i,i) = 0, so that U is
    - *		exactly singular, then
    - *		   U(i,i) := ||A(:,i)|| * options->ILU_FillTol ** (1 - i / n).
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the above algorithm
    - *	to the transpose of A:
    - *
    - *	2.1. If options->Equil = YES or options->RowPerm = LargeDiag, scaling
    - *	     factors are computed to equilibrate the system:
    - *	     options->Trans = NOTRANS:
    - *		 diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *	     options->Trans = TRANS:
    - *		 (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *	     options->Trans = CONJ:
    - *		 (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *	     Whether or not the system will be equilibrated depends on the
    - *	     scaling of the matrix A, but if equilibration is used, A' is
    - *	     overwritten by diag(R)*A'*diag(C) and B by diag(R)*B
    - *	     (if trans='N') or diag(C)*B (if trans = 'T' or 'C').
    - *
    - *	2.2. Permute columns of transpose(A) (rows of A),
    - *	     forming transpose(A)*Pc, where Pc is a permutation matrix that
    - *	     usually preserves sparsity.
    - *	     For more details of this step, see sp_preorder.c.
    - *
    - *	2.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *	     factor the transpose(A) (after equilibration if
    - *	     options->Fact = YES) as Pr*transpose(A)*Pc = L*U with the
    - *	     permutation Pr determined by partial pivoting.
    - *
    - *	2.4. Compute the reciprocal pivot growth factor.
    - *
    - *	2.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *	     routine fills a small number on the diagonal entry, that is
    - *		 U(i,i) = ||A(:,i)||_oo * options->ILU_FillTol ** (1 - i / n).
    - *	     And info will be increased by 1. The factored form of A is used
    - *	     to estimate the condition number of the preconditioner. If the
    - *	     reciprocal of the condition number is less than machine precision,
    - *	     info = A->ncol+1 is returned as a warning, but the routine still
    - *	     goes on to solve for X.
    - *
    - *	2.6. The system of equations is solved for X using the factored form
    - *	     of transpose(A).
    - *
    - *	2.7. If options->IterRefine is not used.
    - *
    - *	2.8. If equilibration was used, the matrix X is premultiplied by
    - *	     diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *	     (if options->Trans = TRANS or CONJ) so that it solves the
    - *	     original system before equilibration.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *	   The structure defines the input parameters to control
    - *	   how the LU decomposition will be performed and how the
    - *	   system will be solved.
    - *
    - * A	   (input/output) SuperMatrix*
    - *	   Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *	   of the linear equations is A->nrow. Currently, the type of A can be:
    - *	   Stype = SLU_NC or SLU_NR, Dtype = SLU_S, Mtype = SLU_GE.
    - *	   In the future, more general A may be handled.
    - *
    - *	   On entry, If options->Fact = FACTORED and equed is not 'N',
    - *	   then A must have been equilibrated by the scaling factors in
    - *	   R and/or C.
    - *	   On exit, A is not modified if options->Equil = NO, or if
    - *	   options->Equil = YES but equed = 'N' on exit.
    - *	   Otherwise, if options->Equil = YES and equed is not 'N',
    - *	   A is scaled as follows:
    - *	   If A->Stype = SLU_NC:
    - *	     equed = 'R':  A := diag(R) * A
    - *	     equed = 'C':  A := A * diag(C)
    - *	     equed = 'B':  A := diag(R) * A * diag(C).
    - *	   If A->Stype = SLU_NR:
    - *	     equed = 'R':  transpose(A) := diag(R) * transpose(A)
    - *	     equed = 'C':  transpose(A) := transpose(A) * diag(C)
    - *	     equed = 'B':  transpose(A) := diag(R) * transpose(A) * diag(C).
    - *
    - * perm_c  (input/output) int*
    - *	   If A->Stype = SLU_NC, Column permutation vector of size A->ncol,
    - *	   which defines the permutation matrix Pc; perm_c[i] = j means
    - *	   column i of A is in position j in A*Pc.
    - *	   On exit, perm_c may be overwritten by the product of the input
    - *	   perm_c and a permutation that postorders the elimination tree
    - *	   of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *	   is already in postorder.
    - *
    - *	   If A->Stype = SLU_NR, column permutation vector of size A->nrow,
    - *	   which describes permutation of columns of transpose(A) 
    - *	   (rows of A) as described above.
    - *
    - * perm_r  (input/output) int*
    - *	   If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *	   which defines the permutation matrix Pr, and is determined
    - *	   by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *	   position j in Pr*A.
    - *
    - *	   If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *	   determines permutation of rows of transpose(A)
    - *	   (columns of A) as described above.
    - *
    - *	   If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *	   will try to use the input perm_r, unless a certain threshold
    - *	   criterion is violated. In that case, perm_r is overwritten by a
    - *	   new permutation determined by partial pivoting or diagonal
    - *	   threshold pivoting.
    - *	   Otherwise, perm_r is output argument.
    - *
    - * etree   (input/output) int*,  dimension (A->ncol)
    - *	   Elimination tree of Pc'*A'*A*Pc.
    - *	   If options->Fact != FACTORED and options->Fact != DOFACT,
    - *	   etree is an input argument, otherwise it is an output argument.
    - *	   Note: etree is a vector of parent pointers for a forest whose
    - *	   vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * equed   (input/output) char*
    - *	   Specifies the form of equilibration that was done.
    - *	   = 'N': No equilibration.
    - *	   = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *	   = 'C': Column equilibration, i.e., A was postmultiplied by diag(C).
    - *	   = 'B': Both row and column equilibration, i.e., A was replaced 
    - *		  by diag(R)*A*diag(C).
    - *	   If options->Fact = FACTORED, equed is an input argument,
    - *	   otherwise it is an output argument.
    - *
    - * R	   (input/output) float*, dimension (A->nrow)
    - *	   The row scale factors for A or transpose(A).
    - *	   If equed = 'R' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *	       (if A->Stype = SLU_NR) is multiplied on the left by diag(R).
    - *	   If equed = 'N' or 'C', R is not accessed.
    - *	   If options->Fact = FACTORED, R is an input argument,
    - *	       otherwise, R is output.
    - *	   If options->zFact = FACTORED and equed = 'R' or 'B', each element
    - *	       of R must be positive.
    - *
    - * C	   (input/output) float*, dimension (A->ncol)
    - *	   The column scale factors for A or transpose(A).
    - *	   If equed = 'C' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *	       (if A->Stype = SLU_NR) is multiplied on the right by diag(C).
    - *	   If equed = 'N' or 'R', C is not accessed.
    - *	   If options->Fact = FACTORED, C is an input argument,
    - *	       otherwise, C is output.
    - *	   If options->Fact = FACTORED and equed = 'C' or 'B', each element
    - *	       of C must be positive.
    - *
    - * L	   (output) SuperMatrix*
    - *	   The factor L from the factorization
    - *	       Pr*A*Pc=L*U		(if A->Stype SLU_= NC) or
    - *	       Pr*transpose(A)*Pc=L*U	(if A->Stype = SLU_NR).
    - *	   Uses compressed row subscripts storage for supernodes, i.e.,
    - *	   L has types: Stype = SLU_SC, Dtype = SLU_S, Mtype = SLU_TRLU.
    - *
    - * U	   (output) SuperMatrix*
    - *	   The factor U from the factorization
    - *	       Pr*A*Pc=L*U		(if A->Stype = SLU_NC) or
    - *	       Pr*transpose(A)*Pc=L*U	(if A->Stype = SLU_NR).
    - *	   Uses column-wise storage scheme, i.e., U has types:
    - *	   Stype = SLU_NC, Dtype = SLU_S, Mtype = SLU_TRU.
    - *
    - * work    (workspace/output) void*, size (lwork) (in bytes)
    - *	   User supplied workspace, should be large enough
    - *	   to hold data structures for factors L and U.
    - *	   On exit, if fact is not 'F', L and U point to this array.
    - *
    - * lwork   (input) int
    - *	   Specifies the size of work array in bytes.
    - *	   = 0:  allocate space internally by system malloc;
    - *	   > 0:  use user-supplied work array of length lwork in bytes,
    - *		 returns error if space runs out.
    - *	   = -1: the routine guesses the amount of space needed without
    - *		 performing the factorization, and returns it in
    - *		 mem_usage->total_needed; no other side effects.
    - *
    - *	   See argument 'mem_usage' for memory usage statistics.
    - *
    - * B	   (input/output) SuperMatrix*
    - *	   B has types: Stype = SLU_DN, Dtype = SLU_S, Mtype = SLU_GE.
    - *	   On entry, the right hand side matrix.
    - *	   If B->ncol = 0, only LU decomposition is performed, the triangular
    - *			   solve is skipped.
    - *	   On exit,
    - *	      if equed = 'N', B is not modified; otherwise
    - *	      if A->Stype = SLU_NC:
    - *		 if options->Trans = NOTRANS and equed = 'R' or 'B',
    - *		    B is overwritten by diag(R)*B;
    - *		 if options->Trans = TRANS or CONJ and equed = 'C' of 'B',
    - *		    B is overwritten by diag(C)*B;
    - *	      if A->Stype = SLU_NR:
    - *		 if options->Trans = NOTRANS and equed = 'C' or 'B',
    - *		    B is overwritten by diag(C)*B;
    - *		 if options->Trans = TRANS or CONJ and equed = 'R' of 'B',
    - *		    B is overwritten by diag(R)*B.
    - *
    - * X	   (output) SuperMatrix*
    - *	   X has types: Stype = SLU_DN, Dtype = SLU_S, Mtype = SLU_GE.
    - *	   If info = 0 or info = A->ncol+1, X contains the solution matrix
    - *	   to the original system of equations. Note that A and B are modified
    - *	   on exit if equed is not 'N', and the solution to the equilibrated
    - *	   system is inv(diag(C))*X if options->Trans = NOTRANS and
    - *	   equed = 'C' or 'B', or inv(diag(R))*X if options->Trans = 'T' or 'C'
    - *	   and equed = 'R' or 'B'.
    - *
    - * recip_pivot_growth (output) float*
    - *	   The reciprocal pivot growth factor max_j( norm(A_j)/norm(U_j) ).
    - *	   The infinity norm is used. If recip_pivot_growth is much less
    - *	   than 1, the stability of the LU factorization could be poor.
    - *
    - * rcond   (output) float*
    - *	   The estimate of the reciprocal condition number of the matrix A
    - *	   after equilibration (if done). If rcond is less than the machine
    - *	   precision (in particular, if rcond = 0), the matrix is singular
    - *	   to working precision. This condition is indicated by a return
    - *	   code of info > 0.
    - *
    - * mem_usage (output) mem_usage_t*
    - *	   Record the memory usage statistics, consisting of following fields:
    - *	   - for_lu (float)
    - *	     The amount of space used in bytes for L\U data structures.
    - *	   - total_needed (float)
    - *	     The amount of space needed in bytes to perform factorization.
    - *	   - expansions (int)
    - *	     The number of memory expansions during the LU factorization.
    - *
    - * stat   (output) SuperLUStat_t*
    - *	  Record the statistics on runtime and floating-point operation count.
    - *	  See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - *	   > 0: if info = i, and i is
    - *		<= A->ncol: number of zero pivots. They are replaced by small
    - *		      entries due to options->ILU_FillTol.
    - *		= A->ncol+1: U is nonsingular, but RCOND is less than machine
    - *		      precision, meaning that the matrix is singular to
    - *		      working precision. Nevertheless, the solution and
    - *		      error bounds are computed because there are a number
    - *		      of situations where the computed solution can be more
    - *		      accurate than the value of RCOND would suggest.
    - *		> A->ncol+1: number of bytes allocated when memory allocation
    - *		      failure occurred, plus A->ncol.
    - * 
    - */ - -void -sgsisx(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - int *etree, char *equed, float *R, float *C, - SuperMatrix *L, SuperMatrix *U, void *work, int lwork, - SuperMatrix *B, SuperMatrix *X, - float *recip_pivot_growth, float *rcond, - mem_usage_t *mem_usage, SuperLUStat_t *stat, int *info) -{ - - DNformat *Bstore, *Xstore; - float *Bmat, *Xmat; - int ldb, ldx, nrhs; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int colequ, equil, nofact, notran, rowequ, permc_spec, mc64; - trans_t trant; - char norm[1]; - int i, j, info1; - float amax, anorm, bignum, smlnum, colcnd, rowcnd, rcmax, rcmin; - int relax, panel_size; - float diag_pivot_thresh; - double t0; /* temporary time */ - double *utime; - - int *perm = NULL; - - /* External functions */ - extern float slangs(char *, SuperMatrix *); - - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - *info = 0; - nofact = (options->Fact != FACTORED); - equil = (options->Equil == YES); - notran = (options->Trans == NOTRANS); - mc64 = (options->RowPerm == LargeDiag); - if ( nofact ) { - *(unsigned char *)equed = 'N'; - rowequ = FALSE; - colequ = FALSE; - } else { - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - smlnum = slamch_("Safe minimum"); - bignum = 1. / smlnum; - } - - /* Test the input parameters */ - if (!nofact && options->Fact != DOFACT && options->Fact != SamePattern && - options->Fact != SamePattern_SameRowPerm && - !notran && options->Trans != TRANS && options->Trans != CONJ && - !equil && options->Equil != NO) - *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_S || A->Mtype != SLU_GE ) - *info = -2; - else if (options->Fact == FACTORED && - !(rowequ || colequ || lsame_(equed, "N"))) - *info = -6; - else { - if (rowequ) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, R[j]); - rcmax = SUPERLU_MAX(rcmax, R[j]); - } - if (rcmin <= 0.) *info = -7; - else if ( A->nrow > 0) - rowcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else rowcnd = 1.; - } - if (colequ && *info == 0) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, C[j]); - rcmax = SUPERLU_MAX(rcmax, C[j]); - } - if (rcmin <= 0.) *info = -8; - else if (A->nrow > 0) - colcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else colcnd = 1.; - } - if (*info == 0) { - if ( lwork < -1 ) *info = -12; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_S || - B->Mtype != SLU_GE ) - *info = -13; - else if ( X->ncol < 0 || Xstore->lda < SUPERLU_MAX(0, A->nrow) || - (B->ncol != 0 && B->ncol != X->ncol) || - X->Stype != SLU_DN || - X->Dtype != SLU_S || X->Mtype != SLU_GE ) - *info = -14; - } - } - if (*info != 0) { - i = -(*info); - xerbla_("sgsisx", &i); - return; - } - - /* Initialization for factor parameters */ - panel_size = sp_ienv(1); - relax = sp_ienv(2); - diag_pivot_thresh = options->DiagPivotThresh; - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - sCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - if ( notran ) { /* Reverse the transpose argument. */ - trant = TRANS; - notran = 0; - } else { - trant = NOTRANS; - notran = 1; - } - } else { /* A->Stype == SLU_NC */ - trant = options->Trans; - AA = A; - } - - if ( nofact ) { - register int i, j; - NCformat *Astore = AA->Store; - int nnz = Astore->nnz; - int *colptr = Astore->colptr; - int *rowind = Astore->rowind; - float *nzval = (float *)Astore->nzval; - int n = AA->nrow; - - if ( mc64 ) { - *equed = 'B'; - rowequ = colequ = 1; - t0 = SuperLU_timer_(); - if ((perm = intMalloc(n)) == NULL) - ABORT("SUPERLU_MALLOC fails for perm[]"); - - info1 = sldperm(5, n, nnz, colptr, rowind, nzval, perm, R, C); - - if (info1 > 0) { /* MC64 fails, call sgsequ() later */ - mc64 = 0; - SUPERLU_FREE(perm); - perm = NULL; - } else { - for (i = 0; i < n; i++) { - R[i] = exp(R[i]); - C[i] = exp(C[i]); - } - /* permute and scale the matrix */ - for (j = 0; j < n; j++) { - for (i = colptr[j]; i < colptr[j + 1]; i++) { - nzval[i] *= R[rowind[i]] * C[j]; - rowind[i] = perm[rowind[i]]; - } - } - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - if ( !mc64 & equil ) { - t0 = SuperLU_timer_(); - /* Compute row and column scalings to equilibrate the matrix A. */ - sgsequ(AA, R, C, &rowcnd, &colcnd, &amax, &info1); - - if ( info1 == 0 ) { - /* Equilibrate matrix A. */ - slaqgs(AA, R, C, rowcnd, colcnd, amax, equed); - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - } - - if ( nrhs > 0 ) { - /* Scale the right hand side if equilibration was performed. */ - if ( notran ) { - if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Bmat[i + j*ldb] *= R[i]; - } - } - } else if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Bmat[i + j*ldb] *= C[i]; - } - } - } - - if ( nofact ) { - - t0 = SuperLU_timer_(); - /* - * Gnet column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t0; - - t0 = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t0; - - /* Compute the LU factorization of A*Pc. */ - t0 = SuperLU_timer_(); - sgsitrf(options, &AC, relax, panel_size, etree, work, lwork, - perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t0; - - if ( lwork == -1 ) { - mem_usage->total_needed = *info - A->ncol; - return; - } - } - - if ( options->PivotGrowth ) { - if ( *info > 0 ) return; - - /* Compute the reciprocal pivot growth factor *recip_pivot_growth. */ - *recip_pivot_growth = sPivotGrowth(A->ncol, AA, perm_c, L, U); - } - - if ( options->ConditionNumber ) { - /* Estimate the reciprocal of the condition number of A. */ - t0 = SuperLU_timer_(); - if ( notran ) { - *(unsigned char *)norm = '1'; - } else { - *(unsigned char *)norm = 'I'; - } - anorm = slangs(norm, AA); - sgscon(norm, L, U, anorm, rcond, stat, &info1); - utime[RCOND] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Compute the solution matrix X. */ - for (j = 0; j < nrhs; j++) /* Save a copy of the right hand sides */ - for (i = 0; i < B->nrow; i++) - Xmat[i + j*ldx] = Bmat[i + j*ldb]; - - t0 = SuperLU_timer_(); - sgstrs (trant, L, U, perm_c, perm_r, X, stat, &info1); - utime[SOLVE] = SuperLU_timer_() - t0; - - /* Transform the solution matrix X to a solution of the original - system. */ - if ( notran ) { - if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Xmat[i + j*ldx] *= C[i]; - } - } - } else { - if ( rowequ ) { - if (perm) { - float *tmp; - int n = A->nrow; - - if ((tmp = floatMalloc(n)) == NULL) - ABORT("SUPERLU_MALLOC fails for tmp[]"); - for (j = 0; j < nrhs; j++) { - for (i = 0; i < n; i++) - tmp[i] = Xmat[i + j * ldx]; /*dcopy*/ - for (i = 0; i < n; i++) - Xmat[i + j * ldx] = R[i] * tmp[perm[i]]; - } - SUPERLU_FREE(tmp); - } else { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Xmat[i + j*ldx] *= R[i]; - } - } - } - } - } /* end if nrhs > 0 */ - - if ( options->ConditionNumber ) { - /* Set INFO = A->ncol+1 if the matrix is singular to working precision. */ - if ( *rcond < slamch_("E") && *info == 0) *info = A->ncol + 1; - } - - if (perm) SUPERLU_FREE(perm); - - if ( nofact ) { - ilu_sQuerySpace(L, U, mem_usage); - Destroy_CompCol_Permuted(&AC); - } - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsitrf.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsitrf.c deleted file mode 100644 index d48de25c67..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsitrf.c +++ /dev/null @@ -1,625 +0,0 @@ - -/*! @file sgsitf.c - * \brief Computes an ILU factorization of a general sparse matrix - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ - -#include "slu_sdefs.h" - -#ifdef DEBUG -int num_drop_L; -#endif - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * SGSITRF computes an ILU factorization of a general sparse m-by-n
    - * matrix A using partial pivoting with row interchanges.
    - * The factorization has the form
    - *     Pr * A = L * U
    - * where Pr is a row permutation matrix, L is lower triangular with unit
    - * diagonal elements (lower trapezoidal if A->nrow > A->ncol), and U is upper
    - * triangular (upper trapezoidal if A->nrow < A->ncol).
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *	   The structure defines the input parameters to control
    - *	   how the ILU decomposition will be performed.
    - *
    - * A	    (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *	    (A->nrow, A->ncol). The type of A can be:
    - *	    Stype = SLU_NCP; Dtype = SLU_S; Mtype = SLU_GE.
    - *
    - * relax    (input) int
    - *	    To control degree of relaxing supernodes. If the number
    - *	    of nodes (columns) in a subtree of the elimination tree is less
    - *	    than relax, this subtree is considered as one supernode,
    - *	    regardless of the row structures of those columns.
    - *
    - * panel_size (input) int
    - *	    A panel consists of at most panel_size consecutive columns.
    - *
    - * etree    (input) int*, dimension (A->ncol)
    - *	    Elimination tree of A'*A.
    - *	    Note: etree is a vector of parent pointers for a forest whose
    - *	    vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *	    On input, the columns of A should be permuted so that the
    - *	    etree is in a certain postorder.
    - *
    - * work     (input/output) void*, size (lwork) (in bytes)
    - *	    User-supplied work space and space for the output data structures.
    - *	    Not referenced if lwork = 0;
    - *
    - * lwork   (input) int
    - *	   Specifies the size of work array in bytes.
    - *	   = 0:  allocate space internally by system malloc;
    - *	   > 0:  use user-supplied work array of length lwork in bytes,
    - *		 returns error if space runs out.
    - *	   = -1: the routine guesses the amount of space needed without
    - *		 performing the factorization, and returns it in
    - *		 *info; no other side effects.
    - *
    - * perm_c   (input) int*, dimension (A->ncol)
    - *	    Column permutation vector, which defines the
    - *	    permutation matrix Pc; perm_c[i] = j means column i of A is
    - *	    in position j in A*Pc.
    - *	    When searching for diagonal, perm_c[*] is applied to the
    - *	    row subscripts of A, so that diagonal threshold pivoting
    - *	    can find the diagonal of A, rather than that of A*Pc.
    - *
    - * perm_r   (input/output) int*, dimension (A->nrow)
    - *	    Row permutation vector which defines the permutation matrix Pr,
    - *	    perm_r[i] = j means row i of A is in position j in Pr*A.
    - *	    If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *	       will try to use the input perm_r, unless a certain threshold
    - *	       criterion is violated. In that case, perm_r is overwritten by
    - *	       a new permutation determined by partial pivoting or diagonal
    - *	       threshold pivoting.
    - *	    Otherwise, perm_r is output argument;
    - *
    - * L	    (output) SuperMatrix*
    - *	    The factor L from the factorization Pr*A=L*U; use compressed row
    - *	    subscripts storage for supernodes, i.e., L has type:
    - *	    Stype = SLU_SC, Dtype = SLU_S, Mtype = SLU_TRLU.
    - *
    - * U	    (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *	    storage scheme, i.e., U has types: Stype = SLU_NC,
    - *	    Dtype = SLU_S, Mtype = SLU_TRU.
    - *
    - * stat     (output) SuperLUStat_t*
    - *	    Record the statistics on runtime and floating-point operation count.
    - *	    See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info     (output) int*
    - *	    = 0: successful exit
    - *	    < 0: if info = -i, the i-th argument had an illegal value
    - *	    > 0: if info = i, and i is
    - *	       <= A->ncol: number of zero pivots. They are replaced by small
    - *		  entries according to options->ILU_FillTol.
    - *	       > A->ncol: number of bytes allocated when memory allocation
    - *		  failure occurred, plus A->ncol. If lwork = -1, it is
    - *		  the estimated amount of space needed, plus A->ncol.
    - *
    - * ======================================================================
    - *
    - * Local Working Arrays:
    - * ======================
    - *   m = number of rows in the matrix
    - *   n = number of columns in the matrix
    - *
    - *   marker[0:3*m-1]: marker[i] = j means that node i has been
    - *	reached when working on column j.
    - *	Storage: relative to original row subscripts
    - *	NOTE: There are 4 of them:
    - *	      marker/marker1 are used for panel dfs, see (ilu_)dpanel_dfs.c;
    - *	      marker2 is used for inner-factorization, see (ilu)_dcolumn_dfs.c;
    - *	      marker_relax(has its own space) is used for relaxed supernodes.
    - *
    - *   parent[0:m-1]: parent vector used during dfs
    - *	Storage: relative to new row subscripts
    - *
    - *   xplore[0:m-1]: xplore[i] gives the location of the next (dfs)
    - *	unexplored neighbor of i in lsub[*]
    - *
    - *   segrep[0:nseg-1]: contains the list of supernodal representatives
    - *	in topological order of the dfs. A supernode representative is the
    - *	last column of a supernode.
    - *	The maximum size of segrep[] is n.
    - *
    - *   repfnz[0:W*m-1]: for a nonzero segment U[*,j] that ends at a
    - *	supernodal representative r, repfnz[r] is the location of the first
    - *	nonzero in this segment.  It is also used during the dfs: repfnz[r]>0
    - *	indicates the supernode r has been explored.
    - *	NOTE: There are W of them, each used for one column of a panel.
    - *
    - *   panel_lsub[0:W*m-1]: temporary for the nonzeros row indices below
    - *	the panel diagonal. These are filled in during dpanel_dfs(), and are
    - *	used later in the inner LU factorization within the panel.
    - *	panel_lsub[]/dense[] pair forms the SPA data structure.
    - *	NOTE: There are W of them.
    - *
    - *   dense[0:W*m-1]: sparse accumulating (SPA) vector for intermediate values;
    - *		   NOTE: there are W of them.
    - *
    - *   tempv[0:*]: real temporary used for dense numeric kernels;
    - *	The size of this array is defined by NUM_TEMPV() in slu_util.h.
    - *	It is also used by the dropping routine ilu_ddrop_row().
    - * 
    - */ - -void -sgsitrf(superlu_options_t *options, SuperMatrix *A, int relax, int panel_size, - int *etree, void *work, int lwork, int *perm_c, int *perm_r, - SuperMatrix *L, SuperMatrix *U, SuperLUStat_t *stat, int *info) -{ - /* Local working arrays */ - NCPformat *Astore; - int *iperm_r = NULL; /* inverse of perm_r; used when - options->Fact == SamePattern_SameRowPerm */ - int *iperm_c; /* inverse of perm_c */ - int *swap, *iswap; /* swap is used to store the row permutation - during the factorization. Initially, it is set - to iperm_c (row indeces of Pc*A*Pc'). - iswap is the inverse of swap. After the - factorization, it is equal to perm_r. */ - int *iwork; - float *swork; - int *segrep, *repfnz, *parent, *xplore; - int *panel_lsub; /* dense[]/panel_lsub[] pair forms a w-wide SPA */ - int *marker, *marker_relax; - float *dense, *tempv; - int *relax_end, *relax_fsupc; - float *a; - int *asub; - int *xa_begin, *xa_end; - int *xsup, *supno; - int *xlsub, *xlusup, *xusub; - int nzlumax; - float *amax; - float drop_sum; - static GlobalLU_t Glu; /* persistent to facilitate multiple factors. */ - int *iwork2; /* used by the second dropping rule */ - - /* Local scalars */ - fact_t fact = options->Fact; - double diag_pivot_thresh = options->DiagPivotThresh; - double drop_tol = options->ILU_DropTol; /* tau */ - double fill_ini = options->ILU_FillTol; /* tau^hat */ - double gamma = options->ILU_FillFactor; - int drop_rule = options->ILU_DropRule; - milu_t milu = options->ILU_MILU; - double fill_tol; - int pivrow; /* pivotal row number in the original matrix A */ - int nseg1; /* no of segments in U-column above panel row jcol */ - int nseg; /* no of segments in each U-column */ - register int jcol; - register int kcol; /* end column of a relaxed snode */ - register int icol; - register int i, k, jj, new_next, iinfo; - int m, n, min_mn, jsupno, fsupc, nextlu, nextu; - int w_def; /* upper bound on panel width */ - int usepr, iperm_r_allocated = 0; - int nnzL, nnzU; - int *panel_histo = stat->panel_histo; - flops_t *ops = stat->ops; - - int last_drop;/* the last column which the dropping rules applied */ - int quota; - int nnzAj; /* number of nonzeros in A(:,1:j) */ - int nnzLj, nnzUj; - double tol_L = drop_tol, tol_U = drop_tol; - float zero = 0.0; - - /* Executable */ - iinfo = 0; - m = A->nrow; - n = A->ncol; - min_mn = SUPERLU_MIN(m, n); - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - - /* Allocate storage common to the factor routines */ - *info = sLUMemInit(fact, work, lwork, m, n, Astore->nnz, panel_size, - gamma, L, U, &Glu, &iwork, &swork); - if ( *info ) return; - - xsup = Glu.xsup; - supno = Glu.supno; - xlsub = Glu.xlsub; - xlusup = Glu.xlusup; - xusub = Glu.xusub; - - SetIWork(m, n, panel_size, iwork, &segrep, &parent, &xplore, - &repfnz, &panel_lsub, &marker_relax, &marker); - sSetRWork(m, panel_size, swork, &dense, &tempv); - - usepr = (fact == SamePattern_SameRowPerm); - if ( usepr ) { - /* Compute the inverse of perm_r */ - iperm_r = (int *) intMalloc(m); - for (k = 0; k < m; ++k) iperm_r[perm_r[k]] = k; - iperm_r_allocated = 1; - } - - iperm_c = (int *) intMalloc(n); - for (k = 0; k < n; ++k) iperm_c[perm_c[k]] = k; - swap = (int *)intMalloc(n); - for (k = 0; k < n; k++) swap[k] = iperm_c[k]; - iswap = (int *)intMalloc(n); - for (k = 0; k < n; k++) iswap[k] = perm_c[k]; - amax = (float *) floatMalloc(panel_size); - if (drop_rule & DROP_SECONDARY) - iwork2 = (int *)intMalloc(n); - else - iwork2 = NULL; - - nnzAj = 0; - nnzLj = 0; - nnzUj = 0; - last_drop = SUPERLU_MAX(min_mn - 2 * sp_ienv(3), (int)(min_mn * 0.95)); - - /* Identify relaxed snodes */ - relax_end = (int *) intMalloc(n); - relax_fsupc = (int *) intMalloc(n); - if ( options->SymmetricMode == YES ) - ilu_heap_relax_snode(n, etree, relax, marker, relax_end, relax_fsupc); - else - ilu_relax_snode(n, etree, relax, marker, relax_end, relax_fsupc); - - ifill (perm_r, m, EMPTY); - ifill (marker, m * NO_MARKER, EMPTY); - supno[0] = -1; - xsup[0] = xlsub[0] = xusub[0] = xlusup[0] = 0; - w_def = panel_size; - - /* Mark the rows used by relaxed supernodes */ - ifill (marker_relax, m, EMPTY); - i = mark_relax(m, relax_end, relax_fsupc, xa_begin, xa_end, - asub, marker_relax); -#if ( PRNTlevel >= 1) - printf("%d relaxed supernodes.\n", i); -#endif - - /* - * Work on one "panel" at a time. A panel is one of the following: - * (a) a relaxed supernode at the bottom of the etree, or - * (b) panel_size contiguous columns, defined by the user - */ - for (jcol = 0; jcol < min_mn; ) { - - if ( relax_end[jcol] != EMPTY ) { /* start of a relaxed snode */ - kcol = relax_end[jcol]; /* end of the relaxed snode */ - panel_histo[kcol-jcol+1]++; - - /* Drop small rows in the previous supernode. */ - if (jcol > 0 && jcol < last_drop) { - int first = xsup[supno[jcol - 1]]; - int last = jcol - 1; - int quota; - - /* Compute the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * (m - first) / m - * (last - first + 1); - else if (drop_rule & DROP_COLUMN) { - int i; - quota = 0; - for (i = first; i <= last; i++) - quota += xa_end[i] - xa_begin[i]; - quota = gamma * quota * (m - first) / m; - } else if (drop_rule & DROP_AREA) - quota = gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - - nnzLj; - else - quota = m * n; - fill_tol = pow(fill_ini, 1.0 - 0.5 * (first + last) / min_mn); - - /* Drop small rows */ - i = ilu_sdrop_row(options, first, last, tol_L, quota, &nnzLj, - &fill_tol, &Glu, tempv, iwork2, 0); - /* Reset the parameters */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - < nnzLj) - tol_L = SUPERLU_MIN(1.0, tol_L * 2.0); - else - tol_L = SUPERLU_MAX(drop_tol, tol_L * 0.5); - } - if (fill_tol < 0) iinfo -= (int)fill_tol; -#ifdef DEBUG - num_drop_L += i * (last - first + 1); -#endif - } - - /* -------------------------------------- - * Factorize the relaxed supernode(jcol:kcol) - * -------------------------------------- */ - /* Determine the union of the row structure of the snode */ - if ( (*info = ilu_ssnode_dfs(jcol, kcol, asub, xa_begin, xa_end, - marker, &Glu)) != 0 ) - return; - - nextu = xusub[jcol]; - nextlu = xlusup[jcol]; - jsupno = supno[jcol]; - fsupc = xsup[jsupno]; - new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1); - nzlumax = Glu.nzlumax; - while ( new_next > nzlumax ) { - if ((*info = sLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu))) - return; - } - - for (icol = jcol; icol <= kcol; icol++) { - xusub[icol+1] = nextu; - - amax[0] = 0.0; - /* Scatter into SPA dense[*] */ - for (k = xa_begin[icol]; k < xa_end[icol]; k++) { - register float tmp = fabs(a[k]); - if (tmp > amax[0]) amax[0] = tmp; - dense[asub[k]] = a[k]; - } - nnzAj += xa_end[icol] - xa_begin[icol]; - if (amax[0] == 0.0) { - amax[0] = fill_ini; -#if ( PRNTlevel >= 1) - printf("Column %d is entirely zero!\n", icol); - fflush(stdout); -#endif - } - - /* Numeric update within the snode */ - ssnode_bmod(icol, jsupno, fsupc, dense, tempv, &Glu, stat); - - if (usepr) pivrow = iperm_r[icol]; - fill_tol = pow(fill_ini, 1.0 - (double)icol / (double)min_mn); - if ( (*info = ilu_spivotL(icol, diag_pivot_thresh, &usepr, - perm_r, iperm_c[icol], swap, iswap, - marker_relax, &pivrow, - amax[0] * fill_tol, milu, zero, - &Glu, stat)) ) { - iinfo++; - marker[pivrow] = kcol; - } - - } - - jcol = kcol + 1; - - } else { /* Work on one panel of panel_size columns */ - - /* Adjust panel_size so that a panel won't overlap with the next - * relaxed snode. - */ - panel_size = w_def; - for (k = jcol + 1; k < SUPERLU_MIN(jcol+panel_size, min_mn); k++) - if ( relax_end[k] != EMPTY ) { - panel_size = k - jcol; - break; - } - if ( k == min_mn ) panel_size = min_mn - jcol; - panel_histo[panel_size]++; - - /* symbolic factor on a panel of columns */ - ilu_spanel_dfs(m, panel_size, jcol, A, perm_r, &nseg1, - dense, amax, panel_lsub, segrep, repfnz, - marker, parent, xplore, &Glu); - - /* numeric sup-panel updates in topological order */ - spanel_bmod(m, panel_size, jcol, nseg1, dense, - tempv, segrep, repfnz, &Glu, stat); - - /* Sparse LU within the panel, and below panel diagonal */ - for (jj = jcol; jj < jcol + panel_size; jj++) { - - k = (jj - jcol) * m; /* column index for w-wide arrays */ - - nseg = nseg1; /* Begin after all the panel segments */ - - nnzAj += xa_end[jj] - xa_begin[jj]; - - if ((*info = ilu_scolumn_dfs(m, jj, perm_r, &nseg, - &panel_lsub[k], segrep, &repfnz[k], - marker, parent, xplore, &Glu))) - return; - - /* Numeric updates */ - if ((*info = scolumn_bmod(jj, (nseg - nseg1), &dense[k], - tempv, &segrep[nseg1], &repfnz[k], - jcol, &Glu, stat)) != 0) return; - - /* Make a fill-in position if the column is entirely zero */ - if (xlsub[jj + 1] == xlsub[jj]) { - register int i, row; - int nextl; - int nzlmax = Glu.nzlmax; - int *lsub = Glu.lsub; - int *marker2 = marker + 2 * m; - - /* Allocate memory */ - nextl = xlsub[jj] + 1; - if (nextl >= nzlmax) { - int error = sLUMemXpand(jj, nextl, LSUB, &nzlmax, &Glu); - if (error) { *info = error; return; } - lsub = Glu.lsub; - } - xlsub[jj + 1]++; - assert(xlusup[jj]==xlusup[jj+1]); - xlusup[jj + 1]++; - Glu.lusup[xlusup[jj]] = zero; - - /* Choose a row index (pivrow) for fill-in */ - for (i = jj; i < n; i++) - if (marker_relax[swap[i]] <= jj) break; - row = swap[i]; - marker2[row] = jj; - lsub[xlsub[jj]] = row; -#ifdef DEBUG - printf("Fill col %d.\n", jj); - fflush(stdout); -#endif - } - - /* Computer the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * jj / m; - else if (drop_rule & DROP_COLUMN) - quota = gamma * (xa_end[jj] - xa_begin[jj]) * - (jj + 1) / m; - else if (drop_rule & DROP_AREA) - quota = gamma * 0.9 * nnzAj * 0.5 - nnzUj; - else - quota = m; - - /* Copy the U-segments to ucol[*] and drop small entries */ - if ((*info = ilu_scopy_to_ucol(jj, nseg, segrep, &repfnz[k], - perm_r, &dense[k], drop_rule, - milu, amax[jj - jcol] * tol_U, - quota, &drop_sum, &nnzUj, &Glu, - iwork2)) != 0) - return; - - /* Reset the dropping threshold if required */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * 0.9 * nnzAj * 0.5 < nnzLj) - tol_U = SUPERLU_MIN(1.0, tol_U * 2.0); - else - tol_U = SUPERLU_MAX(drop_tol, tol_U * 0.5); - } - - drop_sum *= MILU_ALPHA; - if (usepr) pivrow = iperm_r[jj]; - fill_tol = pow(fill_ini, 1.0 - (double)jj / (double)min_mn); - if ( (*info = ilu_spivotL(jj, diag_pivot_thresh, &usepr, perm_r, - iperm_c[jj], swap, iswap, - marker_relax, &pivrow, - amax[jj - jcol] * fill_tol, milu, - drop_sum, &Glu, stat)) ) { - iinfo++; - marker[m + pivrow] = jj; - marker[2 * m + pivrow] = jj; - } - - /* Reset repfnz[] for this column */ - resetrep_col (nseg, segrep, &repfnz[k]); - - /* Start a new supernode, drop the previous one */ - if (jj > 0 && supno[jj] > supno[jj - 1] && jj < last_drop) { - int first = xsup[supno[jj - 1]]; - int last = jj - 1; - int quota; - - /* Compute the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * (m - first) / m - * (last - first + 1); - else if (drop_rule & DROP_COLUMN) { - int i; - quota = 0; - for (i = first; i <= last; i++) - quota += xa_end[i] - xa_begin[i]; - quota = gamma * quota * (m - first) / m; - } else if (drop_rule & DROP_AREA) - quota = gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) - / m) - nnzLj; - else - quota = m * n; - fill_tol = pow(fill_ini, 1.0 - 0.5 * (first + last) / - (double)min_mn); - - /* Drop small rows */ - i = ilu_sdrop_row(options, first, last, tol_L, quota, - &nnzLj, &fill_tol, &Glu, tempv, iwork2, - 1); - - /* Reset the parameters */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - < nnzLj) - tol_L = SUPERLU_MIN(1.0, tol_L * 2.0); - else - tol_L = SUPERLU_MAX(drop_tol, tol_L * 0.5); - } - if (fill_tol < 0) iinfo -= (int)fill_tol; -#ifdef DEBUG - num_drop_L += i * (last - first + 1); -#endif - } /* if start a new supernode */ - - } /* for */ - - jcol += panel_size; /* Move to the next panel */ - - } /* else */ - - } /* for */ - - *info = iinfo; - - if ( m > n ) { - k = 0; - for (i = 0; i < m; ++i) - if ( perm_r[i] == EMPTY ) { - perm_r[i] = n + k; - ++k; - } - } - - ilu_countnz(min_mn, &nnzL, &nnzU, &Glu); - fixupL(min_mn, perm_r, &Glu); - - sLUWorkFree(iwork, swork, &Glu); /* Free work space and compress storage */ - - if ( fact == SamePattern_SameRowPerm ) { - /* L and U structures may have changed due to possibly different - pivoting, even though the storage is available. - There could also be memory expansions, so the array locations - may have changed, */ - ((SCformat *)L->Store)->nnz = nnzL; - ((SCformat *)L->Store)->nsuper = Glu.supno[n]; - ((SCformat *)L->Store)->nzval = Glu.lusup; - ((SCformat *)L->Store)->nzval_colptr = Glu.xlusup; - ((SCformat *)L->Store)->rowind = Glu.lsub; - ((SCformat *)L->Store)->rowind_colptr = Glu.xlsub; - ((NCformat *)U->Store)->nnz = nnzU; - ((NCformat *)U->Store)->nzval = Glu.ucol; - ((NCformat *)U->Store)->rowind = Glu.usub; - ((NCformat *)U->Store)->colptr = Glu.xusub; - } else { - sCreate_SuperNode_Matrix(L, A->nrow, min_mn, nnzL, Glu.lusup, - Glu.xlusup, Glu.lsub, Glu.xlsub, Glu.supno, - Glu.xsup, SLU_SC, SLU_S, SLU_TRLU); - sCreate_CompCol_Matrix(U, min_mn, min_mn, nnzU, Glu.ucol, - Glu.usub, Glu.xusub, SLU_NC, SLU_S, SLU_TRU); - } - - ops[FACT] += ops[TRSV] + ops[GEMV]; - - if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r); - SUPERLU_FREE (iperm_c); - SUPERLU_FREE (relax_end); - SUPERLU_FREE (swap); - SUPERLU_FREE (iswap); - SUPERLU_FREE (relax_fsupc); - SUPERLU_FREE (amax); - if ( iwork2 ) SUPERLU_FREE (iwork2); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsrfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsrfs.c deleted file mode 100644 index c63b83f379..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgsrfs.c +++ /dev/null @@ -1,452 +0,0 @@ - -/*! @file sgsrfs.c - * \brief Improves computed solution to a system of inear equations - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Modified from lapack routine SGERFS
    - * 
    - */ -/* - * File name: sgsrfs.c - * History: Modified from lapack routine SGERFS - */ -#include -#include "slu_sdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   SGSRFS improves the computed solution to a system of linear   
    - *   equations and provides error bounds and backward error estimates for 
    - *   the solution.   
    - *
    - *   If equilibration was performed, the system becomes:
    - *           (diag(R)*A_original*diag(C)) * X = diag(R)*B_original.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *   
    - *   A       (input) SuperMatrix*
    - *           The original matrix A in the system, or the scaled A if
    - *           equilibration was done. The type of A can be:
    - *           Stype = SLU_NC, Dtype = SLU_S, Mtype = SLU_GE.
    - *    
    - *   L       (input) SuperMatrix*
    - *	     The factor L from the factorization Pr*A*Pc=L*U. Use
    - *           compressed row subscripts storage for supernodes, 
    - *           i.e., L has types: Stype = SLU_SC, Dtype = SLU_S, Mtype = SLU_TRLU.
    - * 
    - *   U       (input) SuperMatrix*
    - *           The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *           sgstrf(). Use column-wise storage scheme, 
    - *           i.e., U has types: Stype = SLU_NC, Dtype = SLU_S, Mtype = SLU_TRU.
    - *
    - *   perm_c  (input) int*, dimension (A->ncol)
    - *	     Column permutation vector, which defines the 
    - *           permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *           in position j in A*Pc.
    - *
    - *   perm_r  (input) int*, dimension (A->nrow)
    - *           Row permutation vector, which defines the permutation matrix Pr;
    - *           perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - *   equed   (input) Specifies the form of equilibration that was done.
    - *           = 'N': No equilibration.
    - *           = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *           = 'C': Column equilibration, i.e., A was postmultiplied by
    - *                  diag(C).
    - *           = 'B': Both row and column equilibration, i.e., A was replaced 
    - *                  by diag(R)*A*diag(C).
    - *
    - *   R       (input) float*, dimension (A->nrow)
    - *           The row scale factors for A.
    - *           If equed = 'R' or 'B', A is premultiplied by diag(R).
    - *           If equed = 'N' or 'C', R is not accessed.
    - * 
    - *   C       (input) float*, dimension (A->ncol)
    - *           The column scale factors for A.
    - *           If equed = 'C' or 'B', A is postmultiplied by diag(C).
    - *           If equed = 'N' or 'R', C is not accessed.
    - *
    - *   B       (input) SuperMatrix*
    - *           B has types: Stype = SLU_DN, Dtype = SLU_S, Mtype = SLU_GE.
    - *           The right hand side matrix B.
    - *           if equed = 'R' or 'B', B is premultiplied by diag(R).
    - *
    - *   X       (input/output) SuperMatrix*
    - *           X has types: Stype = SLU_DN, Dtype = SLU_S, Mtype = SLU_GE.
    - *           On entry, the solution matrix X, as computed by sgstrs().
    - *           On exit, the improved solution matrix X.
    - *           if *equed = 'C' or 'B', X should be premultiplied by diag(C)
    - *               in order to obtain the solution to the original system.
    - *
    - *   FERR    (output) float*, dimension (B->ncol)   
    - *           The estimated forward error bound for each solution vector   
    - *           X(j) (the j-th column of the solution matrix X).   
    - *           If XTRUE is the true solution corresponding to X(j), FERR(j) 
    - *           is an estimated upper bound for the magnitude of the largest 
    - *           element in (X(j) - XTRUE) divided by the magnitude of the   
    - *           largest element in X(j).  The estimate is as reliable as   
    - *           the estimate for RCOND, and is almost always a slight   
    - *           overestimate of the true error.
    - *
    - *   BERR    (output) float*, dimension (B->ncol)   
    - *           The componentwise relative backward error of each solution   
    - *           vector X(j) (i.e., the smallest relative change in   
    - *           any element of A or B that makes X(j) an exact solution).
    - *
    - *   stat     (output) SuperLUStat_t*
    - *            Record the statistics on runtime and floating-point operation count.
    - *            See util.h for the definition of 'SuperLUStat_t'.
    - *
    - *   info    (output) int*   
    - *           = 0:  successful exit   
    - *            < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *
    - *    Internal Parameters   
    - *    ===================   
    - *
    - *    ITMAX is the maximum number of steps of iterative refinement.   
    - *
    - * 
    - */ -void -sgsrfs(trans_t trans, SuperMatrix *A, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, char *equed, float *R, float *C, - SuperMatrix *B, SuperMatrix *X, float *ferr, float *berr, - SuperLUStat_t *stat, int *info) -{ - - -#define ITMAX 5 - - /* Table of constant values */ - int ione = 1; - float ndone = -1.; - float done = 1.; - - /* Local variables */ - NCformat *Astore; - float *Aval; - SuperMatrix Bjcol; - DNformat *Bstore, *Xstore, *Bjcol_store; - float *Bmat, *Xmat, *Bptr, *Xptr; - int kase; - float safe1, safe2; - int i, j, k, irow, nz, count, notran, rowequ, colequ; - int ldb, ldx, nrhs; - float s, xk, lstres, eps, safmin; - char transc[1]; - trans_t transt; - float *work; - float *rwork; - int *iwork; - extern double slamch_(char *); - extern int slacon_(int *, float *, float *, int *, float *, int *); -#ifdef _CRAY - extern int SCOPY(int *, float *, int *, float *, int *); - extern int SSAXPY(int *, float *, float *, int *, float *, int *); -#else - extern int scopy_(int *, float *, int *, float *, int *); - extern int saxpy_(int *, float *, float *, int *, float *, int *); -#endif - - Astore = A->Store; - Aval = Astore->nzval; - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - /* Test the input parameters */ - *info = 0; - notran = (trans == NOTRANS); - if ( !notran && trans != TRANS && trans != CONJ ) *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - A->Stype != SLU_NC || A->Dtype != SLU_S || A->Mtype != SLU_GE ) - *info = -2; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_S || L->Mtype != SLU_TRLU ) - *info = -3; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_S || U->Mtype != SLU_TRU ) - *info = -4; - else if ( ldb < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_S || B->Mtype != SLU_GE ) - *info = -10; - else if ( ldx < SUPERLU_MAX(0, A->nrow) || - X->Stype != SLU_DN || X->Dtype != SLU_S || X->Mtype != SLU_GE ) - *info = -11; - if (*info != 0) { - i = -(*info); - xerbla_("sgsrfs", &i); - return; - } - - /* Quick return if possible */ - if ( A->nrow == 0 || nrhs == 0) { - for (j = 0; j < nrhs; ++j) { - ferr[j] = 0.; - berr[j] = 0.; - } - return; - } - - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - - /* Allocate working space */ - work = floatMalloc(2*A->nrow); - rwork = (float *) SUPERLU_MALLOC( A->nrow * sizeof(float) ); - iwork = intMalloc(2*A->nrow); - if ( !work || !rwork || !iwork ) - ABORT("Malloc fails for work/rwork/iwork."); - - if ( notran ) { - *(unsigned char *)transc = 'N'; - transt = TRANS; - } else { - *(unsigned char *)transc = 'T'; - transt = NOTRANS; - } - - /* NZ = maximum number of nonzero elements in each row of A, plus 1 */ - nz = A->ncol + 1; - eps = slamch_("Epsilon"); - safmin = slamch_("Safe minimum"); - /* Set SAFE1 essentially to be the underflow threshold times the - number of additions in each row. */ - safe1 = nz * safmin; - safe2 = safe1 / eps; - - /* Compute the number of nonzeros in each row (or column) of A */ - for (i = 0; i < A->nrow; ++i) iwork[i] = 0; - if ( notran ) { - for (k = 0; k < A->ncol; ++k) - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - ++iwork[Astore->rowind[i]]; - } else { - for (k = 0; k < A->ncol; ++k) - iwork[k] = Astore->colptr[k+1] - Astore->colptr[k]; - } - - /* Copy one column of RHS B into Bjcol. */ - Bjcol.Stype = B->Stype; - Bjcol.Dtype = B->Dtype; - Bjcol.Mtype = B->Mtype; - Bjcol.nrow = B->nrow; - Bjcol.ncol = 1; - Bjcol.Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) ); - if ( !Bjcol.Store ) ABORT("SUPERLU_MALLOC fails for Bjcol.Store"); - Bjcol_store = Bjcol.Store; - Bjcol_store->lda = ldb; - Bjcol_store->nzval = work; /* address aliasing */ - - /* Do for each right hand side ... */ - for (j = 0; j < nrhs; ++j) { - count = 0; - lstres = 3.; - Bptr = &Bmat[j*ldb]; - Xptr = &Xmat[j*ldx]; - - while (1) { /* Loop until stopping criterion is satisfied. */ - - /* Compute residual R = B - op(A) * X, - where op(A) = A, A**T, or A**H, depending on TRANS. */ - -#ifdef _CRAY - SCOPY(&A->nrow, Bptr, &ione, work, &ione); -#else - scopy_(&A->nrow, Bptr, &ione, work, &ione); -#endif - sp_sgemv(transc, ndone, A, Xptr, ione, done, work, ione); - - /* Compute componentwise relative backward error from formula - max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) - where abs(Z) is the componentwise absolute value of the matrix - or vector Z. If the i-th component of the denominator is less - than SAFE2, then SAFE1 is added to the i-th component of the - numerator before dividing. */ - - for (i = 0; i < A->nrow; ++i) rwork[i] = fabs( Bptr[i] ); - - /* Compute abs(op(A))*abs(X) + abs(B). */ - if (notran) { - for (k = 0; k < A->ncol; ++k) { - xk = fabs( Xptr[k] ); - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - rwork[Astore->rowind[i]] += fabs(Aval[i]) * xk; - } - } else { - for (k = 0; k < A->ncol; ++k) { - s = 0.; - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) { - irow = Astore->rowind[i]; - s += fabs(Aval[i]) * fabs(Xptr[irow]); - } - rwork[k] += s; - } - } - s = 0.; - for (i = 0; i < A->nrow; ++i) { - if (rwork[i] > safe2) { - s = SUPERLU_MAX( s, fabs(work[i]) / rwork[i] ); - } else if ( rwork[i] != 0.0 ) { - /* Adding SAFE1 to the numerator guards against - spuriously zero residuals (underflow). */ - s = SUPERLU_MAX( s, (safe1 + fabs(work[i])) / rwork[i] ); - } - /* If rwork[i] is exactly 0.0, then we know the true - residual also must be exactly 0.0. */ - } - berr[j] = s; - - /* Test stopping criterion. Continue iterating if - 1) The residual BERR(J) is larger than machine epsilon, and - 2) BERR(J) decreased by at least a factor of 2 during the - last iteration, and - 3) At most ITMAX iterations tried. */ - - if (berr[j] > eps && berr[j] * 2. <= lstres && count < ITMAX) { - /* Update solution and try again. */ - sgstrs (trans, L, U, perm_c, perm_r, &Bjcol, stat, info); - -#ifdef _CRAY - SAXPY(&A->nrow, &done, work, &ione, - &Xmat[j*ldx], &ione); -#else - saxpy_(&A->nrow, &done, work, &ione, - &Xmat[j*ldx], &ione); -#endif - lstres = berr[j]; - ++count; - } else { - break; - } - - } /* end while */ - - stat->RefineSteps = count; - - /* Bound error from formula: - norm(X - XTRUE) / norm(X) .le. FERR = norm( abs(inv(op(A)))* - ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) - where - norm(Z) is the magnitude of the largest component of Z - inv(op(A)) is the inverse of op(A) - abs(Z) is the componentwise absolute value of the matrix or - vector Z - NZ is the maximum number of nonzeros in any row of A, plus 1 - EPS is machine epsilon - - The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) - is incremented by SAFE1 if the i-th component of - abs(op(A))*abs(X) + abs(B) is less than SAFE2. - - Use SLACON to estimate the infinity-norm of the matrix - inv(op(A)) * diag(W), - where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */ - - for (i = 0; i < A->nrow; ++i) rwork[i] = fabs( Bptr[i] ); - - /* Compute abs(op(A))*abs(X) + abs(B). */ - if ( notran ) { - for (k = 0; k < A->ncol; ++k) { - xk = fabs( Xptr[k] ); - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - rwork[Astore->rowind[i]] += fabs(Aval[i]) * xk; - } - } else { - for (k = 0; k < A->ncol; ++k) { - s = 0.; - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) { - irow = Astore->rowind[i]; - xk = fabs( Xptr[irow] ); - s += fabs(Aval[i]) * xk; - } - rwork[k] += s; - } - } - - for (i = 0; i < A->nrow; ++i) - if (rwork[i] > safe2) - rwork[i] = fabs(work[i]) + (iwork[i]+1)*eps*rwork[i]; - else - rwork[i] = fabs(work[i])+(iwork[i]+1)*eps*rwork[i]+safe1; - - kase = 0; - - do { - slacon_(&A->nrow, &work[A->nrow], work, - &iwork[A->nrow], &ferr[j], &kase); - if (kase == 0) break; - - if (kase == 1) { - /* Multiply by diag(W)*inv(op(A)**T)*(diag(C) or diag(R)). */ - if ( notran && colequ ) - for (i = 0; i < A->ncol; ++i) work[i] *= C[i]; - else if ( !notran && rowequ ) - for (i = 0; i < A->nrow; ++i) work[i] *= R[i]; - - sgstrs (transt, L, U, perm_c, perm_r, &Bjcol, stat, info); - - for (i = 0; i < A->nrow; ++i) work[i] *= rwork[i]; - } else { - /* Multiply by (diag(C) or diag(R))*inv(op(A))*diag(W). */ - for (i = 0; i < A->nrow; ++i) work[i] *= rwork[i]; - - sgstrs (trans, L, U, perm_c, perm_r, &Bjcol, stat, info); - - if ( notran && colequ ) - for (i = 0; i < A->ncol; ++i) work[i] *= C[i]; - else if ( !notran && rowequ ) - for (i = 0; i < A->ncol; ++i) work[i] *= R[i]; - } - - } while ( kase != 0 ); - - - /* Normalize error. */ - lstres = 0.; - if ( notran && colequ ) { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, C[i] * fabs( Xptr[i]) ); - } else if ( !notran && rowequ ) { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, R[i] * fabs( Xptr[i]) ); - } else { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, fabs( Xptr[i]) ); - } - if ( lstres != 0. ) - ferr[j] /= lstres; - - } /* for each RHS j ... */ - - SUPERLU_FREE(work); - SUPERLU_FREE(rwork); - SUPERLU_FREE(iwork); - SUPERLU_FREE(Bjcol.Store); - - return; - -} /* sgsrfs */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgssv.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgssv.c deleted file mode 100644 index c567daa966..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgssv.c +++ /dev/null @@ -1,227 +0,0 @@ - -/*! @file sgssv.c - * \brief Solves the system of linear equations A*X=B - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * SGSSV solves the system of linear equations A*X=B, using the
    - * LU factorization from SGSTRF. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *
    - *      1.1. Permute the columns of A, forming A*Pc, where Pc
    - *           is a permutation matrix. For more details of this step, 
    - *           see sp_preorder.c.
    - *
    - *      1.2. Factor A as Pr*A*Pc=L*U with the permutation Pr determined
    - *           by Gaussian elimination with partial pivoting.
    - *           L is unit lower triangular with offdiagonal entries
    - *           bounded by 1 in magnitude, and U is upper triangular.
    - *
    - *      1.3. Solve the system of equations A*X=B using the factored
    - *           form of A.
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the
    - *      above algorithm to the transpose of A:
    - *
    - *      2.1. Permute columns of transpose(A) (rows of A),
    - *           forming transpose(A)*Pc, where Pc is a permutation matrix. 
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      2.2. Factor A as Pr*transpose(A)*Pc=L*U with the permutation Pr
    - *           determined by Gaussian elimination with partial pivoting.
    - *           L is unit lower triangular with offdiagonal entries
    - *           bounded by 1 in magnitude, and U is upper triangular.
    - *
    - *      2.3. Solve the system of equations A*X=B using the factored
    - *           form of A.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - * 
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed and how the
    - *         system will be solved.
    - *
    - * A       (input) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = SLU_NC or SLU_NR; Dtype = SLU_S; Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - * perm_c  (input/output) int*
    - *         If A->Stype = SLU_NC, column permutation vector of size A->ncol
    - *         which defines the permutation matrix Pc; perm_c[i] = j means 
    - *         column i of A is in position j in A*Pc.
    - *         If A->Stype = SLU_NR, column permutation vector of size A->nrow
    - *         which describes permutation of columns of transpose(A) 
    - *         (rows of A) as described above.
    - * 
    - *         If options->ColPerm = MY_PERMC or options->Fact = SamePattern or
    - *            options->Fact = SamePattern_SameRowPerm, it is an input argument.
    - *            On exit, perm_c may be overwritten by the product of the input
    - *            perm_c and a permutation that postorders the elimination tree
    - *            of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *            is already in postorder.
    - *         Otherwise, it is an output argument.
    - * 
    - * perm_r  (input/output) int*
    - *         If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *         which defines the permutation matrix Pr, and is determined 
    - *         by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *         position j in Pr*A.
    - *         If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *         determines permutation of rows of transpose(A)
    - *         (columns of A) as described above.
    - *
    - *         If options->RowPerm = MY_PERMR or
    - *            options->Fact = SamePattern_SameRowPerm, perm_r is an
    - *            input argument.
    - *         otherwise it is an output argument.
    - *
    - * L       (output) SuperMatrix*
    - *         The factor L from the factorization 
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses compressed row subscripts storage for supernodes, i.e.,
    - *         L has types: Stype = SLU_SC, Dtype = SLU_S, Mtype = SLU_TRLU.
    - *         
    - * U       (output) SuperMatrix*
    - *	   The factor U from the factorization 
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_S, Mtype = SLU_TRU.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_S, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat   (output) SuperLUStat_t*
    - *        Record the statistics on runtime and floating-point operation count.
    - *        See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *	   = 0: successful exit
    - *         > 0: if info = i, and i is
    - *             <= A->ncol: U(i,i) is exactly zero. The factorization has
    - *                been completed, but the factor U is exactly singular,
    - *                so the solution could not be computed.
    - *             > A->ncol: number of bytes allocated when memory allocation
    - *                failure occurred, plus A->ncol.
    - * 
    - */ - -void -sgssv(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - SuperMatrix *L, SuperMatrix *U, SuperMatrix *B, - SuperLUStat_t *stat, int *info ) -{ - - DNformat *Bstore; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int lwork = 0, *etree, i; - - /* Set default values for some parameters */ - int panel_size; /* panel size */ - int relax; /* no of columns in a relaxed snodes */ - int permc_spec; - trans_t trans = NOTRANS; - double *utime; - double t; /* Temporary time */ - - /* Test the input parameters ... */ - *info = 0; - Bstore = B->Store; - if ( options->Fact != DOFACT ) *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_S || A->Mtype != SLU_GE ) - *info = -2; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_S || B->Mtype != SLU_GE ) - *info = -7; - if ( *info != 0 ) { - i = -(*info); - xerbla_("sgssv", &i); - return; - } - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - sCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - trans = TRANS; - } else { - if ( A->Stype == SLU_NC ) AA = A; - } - - t = SuperLU_timer_(); - /* - * Get column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t; - - etree = intMalloc(A->ncol); - - t = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t; - - panel_size = sp_ienv(1); - relax = sp_ienv(2); - - /*printf("Factor PA = LU ... relax %d\tw %d\tmaxsuper %d\trowblk %d\n", - relax, panel_size, sp_ienv(3), sp_ienv(4));*/ - t = SuperLU_timer_(); - /* Compute the LU factorization of A. */ - sgstrf(options, &AC, relax, panel_size, etree, - NULL, lwork, perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t; - - t = SuperLU_timer_(); - if ( *info == 0 ) { - /* Solve the system A*X=B, overwriting B with X. */ - sgstrs (trans, L, U, perm_c, perm_r, B, stat, info); - } - utime[SOLVE] = SuperLU_timer_() - t; - - SUPERLU_FREE (etree); - Destroy_CompCol_Permuted(&AC); - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgssvx.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgssvx.c deleted file mode 100644 index 116429dadd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgssvx.c +++ /dev/null @@ -1,619 +0,0 @@ - -/*! @file sgssvx.c - * \brief Solves the system of linear equations A*X=B or A'*X=B - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * SGSSVX solves the system of linear equations A*X=B or A'*X=B, using
    - * the LU factorization from sgstrf(). Error bounds on the solution and
    - * a condition estimate are also provided. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *  
    - *      1.1. If options->Equil = YES, scaling factors are computed to
    - *           equilibrate the system:
    - *           options->Trans = NOTRANS:
    - *               diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *           options->Trans = TRANS:
    - *               (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *           options->Trans = CONJ:
    - *               (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *           Whether or not the system will be equilibrated depends on the
    - *           scaling of the matrix A, but if equilibration is used, A is
    - *           overwritten by diag(R)*A*diag(C) and B by diag(R)*B
    - *           (if options->Trans=NOTRANS) or diag(C)*B (if options->Trans
    - *           = TRANS or CONJ).
    - *
    - *      1.2. Permute columns of A, forming A*Pc, where Pc is a permutation
    - *           matrix that usually preserves sparsity.
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      1.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *           factor the matrix A (after equilibration if options->Equil = YES)
    - *           as Pr*A*Pc = L*U, with Pr determined by partial pivoting.
    - *
    - *      1.4. Compute the reciprocal pivot growth factor.
    - *
    - *      1.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *           routine returns with info = i. Otherwise, the factored form of 
    - *           A is used to estimate the condition number of the matrix A. If
    - *           the reciprocal of the condition number is less than machine
    - *           precision, info = A->ncol+1 is returned as a warning, but the
    - *           routine still goes on to solve for X and computes error bounds
    - *           as described below.
    - *
    - *      1.6. The system of equations is solved for X using the factored form
    - *           of A.
    - *
    - *      1.7. If options->IterRefine != NOREFINE, iterative refinement is
    - *           applied to improve the computed solution matrix and calculate
    - *           error bounds and backward error estimates for it.
    - *
    - *      1.8. If equilibration was used, the matrix X is premultiplied by
    - *           diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *           (if options->Trans = TRANS or CONJ) so that it solves the
    - *           original system before equilibration.
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the above algorithm
    - *      to the transpose of A:
    - *
    - *      2.1. If options->Equil = YES, scaling factors are computed to
    - *           equilibrate the system:
    - *           options->Trans = NOTRANS:
    - *               diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *           options->Trans = TRANS:
    - *               (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *           options->Trans = CONJ:
    - *               (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *           Whether or not the system will be equilibrated depends on the
    - *           scaling of the matrix A, but if equilibration is used, A' is
    - *           overwritten by diag(R)*A'*diag(C) and B by diag(R)*B 
    - *           (if trans='N') or diag(C)*B (if trans = 'T' or 'C').
    - *
    - *      2.2. Permute columns of transpose(A) (rows of A), 
    - *           forming transpose(A)*Pc, where Pc is a permutation matrix that 
    - *           usually preserves sparsity.
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      2.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *           factor the transpose(A) (after equilibration if 
    - *           options->Fact = YES) as Pr*transpose(A)*Pc = L*U with the
    - *           permutation Pr determined by partial pivoting.
    - *
    - *      2.4. Compute the reciprocal pivot growth factor.
    - *
    - *      2.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *           routine returns with info = i. Otherwise, the factored form 
    - *           of transpose(A) is used to estimate the condition number of the
    - *           matrix A. If the reciprocal of the condition number
    - *           is less than machine precision, info = A->nrow+1 is returned as
    - *           a warning, but the routine still goes on to solve for X and
    - *           computes error bounds as described below.
    - *
    - *      2.6. The system of equations is solved for X using the factored form
    - *           of transpose(A).
    - *
    - *      2.7. If options->IterRefine != NOREFINE, iterative refinement is
    - *           applied to improve the computed solution matrix and calculate
    - *           error bounds and backward error estimates for it.
    - *
    - *      2.8. If equilibration was used, the matrix X is premultiplied by
    - *           diag(C) (if options->Trans = NOTRANS) or diag(R) 
    - *           (if options->Trans = TRANS or CONJ) so that it solves the
    - *           original system before equilibration.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed and how the
    - *         system will be solved.
    - *
    - * A       (input/output) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of the linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = SLU_NC or SLU_NR, Dtype = SLU_D, Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - *         On entry, If options->Fact = FACTORED and equed is not 'N', 
    - *         then A must have been equilibrated by the scaling factors in
    - *         R and/or C.  
    - *         On exit, A is not modified if options->Equil = NO, or if 
    - *         options->Equil = YES but equed = 'N' on exit.
    - *         Otherwise, if options->Equil = YES and equed is not 'N',
    - *         A is scaled as follows:
    - *         If A->Stype = SLU_NC:
    - *           equed = 'R':  A := diag(R) * A
    - *           equed = 'C':  A := A * diag(C)
    - *           equed = 'B':  A := diag(R) * A * diag(C).
    - *         If A->Stype = SLU_NR:
    - *           equed = 'R':  transpose(A) := diag(R) * transpose(A)
    - *           equed = 'C':  transpose(A) := transpose(A) * diag(C)
    - *           equed = 'B':  transpose(A) := diag(R) * transpose(A) * diag(C).
    - *
    - * perm_c  (input/output) int*
    - *	   If A->Stype = SLU_NC, Column permutation vector of size A->ncol,
    - *         which defines the permutation matrix Pc; perm_c[i] = j means
    - *         column i of A is in position j in A*Pc.
    - *         On exit, perm_c may be overwritten by the product of the input
    - *         perm_c and a permutation that postorders the elimination tree
    - *         of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *         is already in postorder.
    - *
    - *         If A->Stype = SLU_NR, column permutation vector of size A->nrow,
    - *         which describes permutation of columns of transpose(A) 
    - *         (rows of A) as described above.
    - * 
    - * perm_r  (input/output) int*
    - *         If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *         which defines the permutation matrix Pr, and is determined
    - *         by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *         position j in Pr*A.
    - *
    - *         If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *         determines permutation of rows of transpose(A)
    - *         (columns of A) as described above.
    - *
    - *         If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *         will try to use the input perm_r, unless a certain threshold
    - *         criterion is violated. In that case, perm_r is overwritten by a
    - *         new permutation determined by partial pivoting or diagonal
    - *         threshold pivoting.
    - *         Otherwise, perm_r is output argument.
    - * 
    - * etree   (input/output) int*,  dimension (A->ncol)
    - *         Elimination tree of Pc'*A'*A*Pc.
    - *         If options->Fact != FACTORED and options->Fact != DOFACT,
    - *         etree is an input argument, otherwise it is an output argument.
    - *         Note: etree is a vector of parent pointers for a forest whose
    - *         vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * equed   (input/output) char*
    - *         Specifies the form of equilibration that was done.
    - *         = 'N': No equilibration.
    - *         = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *         = 'C': Column equilibration, i.e., A was postmultiplied by diag(C).
    - *         = 'B': Both row and column equilibration, i.e., A was replaced 
    - *                by diag(R)*A*diag(C).
    - *         If options->Fact = FACTORED, equed is an input argument,
    - *         otherwise it is an output argument.
    - *
    - * R       (input/output) float*, dimension (A->nrow)
    - *         The row scale factors for A or transpose(A).
    - *         If equed = 'R' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *             (if A->Stype = SLU_NR) is multiplied on the left by diag(R).
    - *         If equed = 'N' or 'C', R is not accessed.
    - *         If options->Fact = FACTORED, R is an input argument,
    - *             otherwise, R is output.
    - *         If options->zFact = FACTORED and equed = 'R' or 'B', each element
    - *             of R must be positive.
    - * 
    - * C       (input/output) float*, dimension (A->ncol)
    - *         The column scale factors for A or transpose(A).
    - *         If equed = 'C' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *             (if A->Stype = SLU_NR) is multiplied on the right by diag(C).
    - *         If equed = 'N' or 'R', C is not accessed.
    - *         If options->Fact = FACTORED, C is an input argument,
    - *             otherwise, C is output.
    - *         If options->Fact = FACTORED and equed = 'C' or 'B', each element
    - *             of C must be positive.
    - *         
    - * L       (output) SuperMatrix*
    - *	   The factor L from the factorization
    - *             Pr*A*Pc=L*U              (if A->Stype SLU_= NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses compressed row subscripts storage for supernodes, i.e.,
    - *         L has types: Stype = SLU_SC, Dtype = SLU_S, Mtype = SLU_TRLU.
    - *
    - * U       (output) SuperMatrix*
    - *	   The factor U from the factorization
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_S, Mtype = SLU_TRU.
    - *
    - * work    (workspace/output) void*, size (lwork) (in bytes)
    - *         User supplied workspace, should be large enough
    - *         to hold data structures for factors L and U.
    - *         On exit, if fact is not 'F', L and U point to this array.
    - *
    - * lwork   (input) int
    - *         Specifies the size of work array in bytes.
    - *         = 0:  allocate space internally by system malloc;
    - *         > 0:  use user-supplied work array of length lwork in bytes,
    - *               returns error if space runs out.
    - *         = -1: the routine guesses the amount of space needed without
    - *               performing the factorization, and returns it in
    - *               mem_usage->total_needed; no other side effects.
    - *
    - *         See argument 'mem_usage' for memory usage statistics.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_S, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         If B->ncol = 0, only LU decomposition is performed, the triangular
    - *                         solve is skipped.
    - *         On exit,
    - *            if equed = 'N', B is not modified; otherwise
    - *            if A->Stype = SLU_NC:
    - *               if options->Trans = NOTRANS and equed = 'R' or 'B',
    - *                  B is overwritten by diag(R)*B;
    - *               if options->Trans = TRANS or CONJ and equed = 'C' of 'B',
    - *                  B is overwritten by diag(C)*B;
    - *            if A->Stype = SLU_NR:
    - *               if options->Trans = NOTRANS and equed = 'C' or 'B',
    - *                  B is overwritten by diag(C)*B;
    - *               if options->Trans = TRANS or CONJ and equed = 'R' of 'B',
    - *                  B is overwritten by diag(R)*B.
    - *
    - * X       (output) SuperMatrix*
    - *         X has types: Stype = SLU_DN, Dtype = SLU_S, Mtype = SLU_GE. 
    - *         If info = 0 or info = A->ncol+1, X contains the solution matrix
    - *         to the original system of equations. Note that A and B are modified
    - *         on exit if equed is not 'N', and the solution to the equilibrated
    - *         system is inv(diag(C))*X if options->Trans = NOTRANS and
    - *         equed = 'C' or 'B', or inv(diag(R))*X if options->Trans = 'T' or 'C'
    - *         and equed = 'R' or 'B'.
    - *
    - * recip_pivot_growth (output) float*
    - *         The reciprocal pivot growth factor max_j( norm(A_j)/norm(U_j) ).
    - *         The infinity norm is used. If recip_pivot_growth is much less
    - *         than 1, the stability of the LU factorization could be poor.
    - *
    - * rcond   (output) float*
    - *         The estimate of the reciprocal condition number of the matrix A
    - *         after equilibration (if done). If rcond is less than the machine
    - *         precision (in particular, if rcond = 0), the matrix is singular
    - *         to working precision. This condition is indicated by a return
    - *         code of info > 0.
    - *
    - * FERR    (output) float*, dimension (B->ncol)   
    - *         The estimated forward error bound for each solution vector   
    - *         X(j) (the j-th column of the solution matrix X).   
    - *         If XTRUE is the true solution corresponding to X(j), FERR(j) 
    - *         is an estimated upper bound for the magnitude of the largest 
    - *         element in (X(j) - XTRUE) divided by the magnitude of the   
    - *         largest element in X(j).  The estimate is as reliable as   
    - *         the estimate for RCOND, and is almost always a slight   
    - *         overestimate of the true error.
    - *         If options->IterRefine = NOREFINE, ferr = 1.0.
    - *
    - * BERR    (output) float*, dimension (B->ncol)
    - *         The componentwise relative backward error of each solution   
    - *         vector X(j) (i.e., the smallest relative change in   
    - *         any element of A or B that makes X(j) an exact solution).
    - *         If options->IterRefine = NOREFINE, berr = 1.0.
    - *
    - * mem_usage (output) mem_usage_t*
    - *         Record the memory usage statistics, consisting of following fields:
    - *         - for_lu (float)
    - *           The amount of space used in bytes for L\U data structures.
    - *         - total_needed (float)
    - *           The amount of space needed in bytes to perform factorization.
    - *         - expansions (int)
    - *           The number of memory expansions during the LU factorization.
    - *
    - * stat   (output) SuperLUStat_t*
    - *        Record the statistics on runtime and floating-point operation count.
    - *        See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *         = 0: successful exit   
    - *         < 0: if info = -i, the i-th argument had an illegal value   
    - *         > 0: if info = i, and i is   
    - *              <= A->ncol: U(i,i) is exactly zero. The factorization has   
    - *                    been completed, but the factor U is exactly   
    - *                    singular, so the solution and error bounds   
    - *                    could not be computed.   
    - *              = A->ncol+1: U is nonsingular, but RCOND is less than machine
    - *                    precision, meaning that the matrix is singular to
    - *                    working precision. Nevertheless, the solution and
    - *                    error bounds are computed because there are a number
    - *                    of situations where the computed solution can be more
    - *                    accurate than the value of RCOND would suggest.   
    - *              > A->ncol+1: number of bytes allocated when memory allocation
    - *                    failure occurred, plus A->ncol.
    - * 
    - */ - -void -sgssvx(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - int *etree, char *equed, float *R, float *C, - SuperMatrix *L, SuperMatrix *U, void *work, int lwork, - SuperMatrix *B, SuperMatrix *X, float *recip_pivot_growth, - float *rcond, float *ferr, float *berr, - mem_usage_t *mem_usage, SuperLUStat_t *stat, int *info ) -{ - - - DNformat *Bstore, *Xstore; - float *Bmat, *Xmat; - int ldb, ldx, nrhs; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int colequ, equil, nofact, notran, rowequ, permc_spec; - trans_t trant; - char norm[1]; - int i, j, info1; - float amax, anorm, bignum, smlnum, colcnd, rowcnd, rcmax, rcmin; - int relax, panel_size; - float diag_pivot_thresh; - double t0; /* temporary time */ - double *utime; - - /* External functions */ - extern float slangs(char *, SuperMatrix *); - - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - *info = 0; - nofact = (options->Fact != FACTORED); - equil = (options->Equil == YES); - notran = (options->Trans == NOTRANS); - if ( nofact ) { - *(unsigned char *)equed = 'N'; - rowequ = FALSE; - colequ = FALSE; - } else { - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - smlnum = slamch_("Safe minimum"); - bignum = 1. / smlnum; - } - -#if 0 -printf("dgssvx: Fact=%4d, Trans=%4d, equed=%c\n", - options->Fact, options->Trans, *equed); -#endif - - /* Test the input parameters */ - if (!nofact && options->Fact != DOFACT && options->Fact != SamePattern && - options->Fact != SamePattern_SameRowPerm && - !notran && options->Trans != TRANS && options->Trans != CONJ && - !equil && options->Equil != NO) - *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_S || A->Mtype != SLU_GE ) - *info = -2; - else if (options->Fact == FACTORED && - !(rowequ || colequ || lsame_(equed, "N"))) - *info = -6; - else { - if (rowequ) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, R[j]); - rcmax = SUPERLU_MAX(rcmax, R[j]); - } - if (rcmin <= 0.) *info = -7; - else if ( A->nrow > 0) - rowcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else rowcnd = 1.; - } - if (colequ && *info == 0) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, C[j]); - rcmax = SUPERLU_MAX(rcmax, C[j]); - } - if (rcmin <= 0.) *info = -8; - else if (A->nrow > 0) - colcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else colcnd = 1.; - } - if (*info == 0) { - if ( lwork < -1 ) *info = -12; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_S || - B->Mtype != SLU_GE ) - *info = -13; - else if ( X->ncol < 0 || Xstore->lda < SUPERLU_MAX(0, A->nrow) || - (B->ncol != 0 && B->ncol != X->ncol) || - X->Stype != SLU_DN || - X->Dtype != SLU_S || X->Mtype != SLU_GE ) - *info = -14; - } - } - if (*info != 0) { - i = -(*info); - xerbla_("sgssvx", &i); - return; - } - - /* Initialization for factor parameters */ - panel_size = sp_ienv(1); - relax = sp_ienv(2); - diag_pivot_thresh = options->DiagPivotThresh; - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - sCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - if ( notran ) { /* Reverse the transpose argument. */ - trant = TRANS; - notran = 0; - } else { - trant = NOTRANS; - notran = 1; - } - } else { /* A->Stype == SLU_NC */ - trant = options->Trans; - AA = A; - } - - if ( nofact && equil ) { - t0 = SuperLU_timer_(); - /* Compute row and column scalings to equilibrate the matrix A. */ - sgsequ(AA, R, C, &rowcnd, &colcnd, &amax, &info1); - - if ( info1 == 0 ) { - /* Equilibrate matrix A. */ - slaqgs(AA, R, C, rowcnd, colcnd, amax, equed); - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Scale the right hand side if equilibration was performed. */ - if ( notran ) { - if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Bmat[i + j*ldb] *= R[i]; - } - } - } else if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Bmat[i + j*ldb] *= C[i]; - } - } - } - - if ( nofact ) { - - t0 = SuperLU_timer_(); - /* - * Gnet column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t0; - - t0 = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t0; - -/* printf("Factor PA = LU ... relax %d\tw %d\tmaxsuper %d\trowblk %d\n", - relax, panel_size, sp_ienv(3), sp_ienv(4)); - fflush(stdout); */ - - /* Compute the LU factorization of A*Pc. */ - t0 = SuperLU_timer_(); - sgstrf(options, &AC, relax, panel_size, etree, - work, lwork, perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t0; - - if ( lwork == -1 ) { - mem_usage->total_needed = *info - A->ncol; - return; - } - } - - if ( options->PivotGrowth ) { - if ( *info > 0 ) { - if ( *info <= A->ncol ) { - /* Compute the reciprocal pivot growth factor of the leading - rank-deficient *info columns of A. */ - *recip_pivot_growth = sPivotGrowth(*info, AA, perm_c, L, U); - } - return; - } - - /* Compute the reciprocal pivot growth factor *recip_pivot_growth. */ - *recip_pivot_growth = sPivotGrowth(A->ncol, AA, perm_c, L, U); - } - - if ( options->ConditionNumber ) { - /* Estimate the reciprocal of the condition number of A. */ - t0 = SuperLU_timer_(); - if ( notran ) { - *(unsigned char *)norm = '1'; - } else { - *(unsigned char *)norm = 'I'; - } - anorm = slangs(norm, AA); - sgscon(norm, L, U, anorm, rcond, stat, info); - utime[RCOND] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Compute the solution matrix X. */ - for (j = 0; j < nrhs; j++) /* Save a copy of the right hand sides */ - for (i = 0; i < B->nrow; i++) - Xmat[i + j*ldx] = Bmat[i + j*ldb]; - - t0 = SuperLU_timer_(); - sgstrs (trant, L, U, perm_c, perm_r, X, stat, info); - utime[SOLVE] = SuperLU_timer_() - t0; - - /* Use iterative refinement to improve the computed solution and compute - error bounds and backward error estimates for it. */ - t0 = SuperLU_timer_(); - if ( options->IterRefine != NOREFINE ) { - sgsrfs(trant, AA, L, U, perm_c, perm_r, equed, R, C, B, - X, ferr, berr, stat, info); - } else { - for (j = 0; j < nrhs; ++j) ferr[j] = berr[j] = 1.0; - } - utime[REFINE] = SuperLU_timer_() - t0; - - /* Transform the solution matrix X to a solution of the original system. */ - if ( notran ) { - if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Xmat[i + j*ldx] *= C[i]; - } - } - } else if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - Xmat[i + j*ldx] *= R[i]; - } - } - } /* end if nrhs > 0 */ - - if ( options->ConditionNumber ) { - /* Set INFO = A->ncol+1 if the matrix is singular to working precision. */ - if ( *rcond < slamch_("E") ) *info = A->ncol + 1; - } - - if ( nofact ) { - sQuerySpace(L, U, mem_usage); - Destroy_CompCol_Permuted(&AC); - } - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgstrf.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgstrf.c deleted file mode 100644 index 109f0bbdc9..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgstrf.c +++ /dev/null @@ -1,436 +0,0 @@ - -/*! @file sgstrf.c - * \brief Computes an LU factorization of a general sparse matrix - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * SGSTRF computes an LU factorization of a general sparse m-by-n
    - * matrix A using partial pivoting with row interchanges.
    - * The factorization has the form
    - *     Pr * A = L * U
    - * where Pr is a row permutation matrix, L is lower triangular with unit
    - * diagonal elements (lower trapezoidal if A->nrow > A->ncol), and U is upper 
    - * triangular (upper trapezoidal if A->nrow < A->ncol).
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed.
    - *
    - * A        (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *          (A->nrow, A->ncol). The type of A can be:
    - *          Stype = SLU_NCP; Dtype = SLU_S; Mtype = SLU_GE.
    - *
    - * relax    (input) int
    - *          To control degree of relaxing supernodes. If the number
    - *          of nodes (columns) in a subtree of the elimination tree is less
    - *          than relax, this subtree is considered as one supernode,
    - *          regardless of the row structures of those columns.
    - *
    - * panel_size (input) int
    - *          A panel consists of at most panel_size consecutive columns.
    - *
    - * etree    (input) int*, dimension (A->ncol)
    - *          Elimination tree of A'*A.
    - *          Note: etree is a vector of parent pointers for a forest whose
    - *          vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *          On input, the columns of A should be permuted so that the
    - *          etree is in a certain postorder.
    - *
    - * work     (input/output) void*, size (lwork) (in bytes)
    - *          User-supplied work space and space for the output data structures.
    - *          Not referenced if lwork = 0;
    - *
    - * lwork   (input) int
    - *         Specifies the size of work array in bytes.
    - *         = 0:  allocate space internally by system malloc;
    - *         > 0:  use user-supplied work array of length lwork in bytes,
    - *               returns error if space runs out.
    - *         = -1: the routine guesses the amount of space needed without
    - *               performing the factorization, and returns it in
    - *               *info; no other side effects.
    - *
    - * perm_c   (input) int*, dimension (A->ncol)
    - *	    Column permutation vector, which defines the 
    - *          permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *          in position j in A*Pc.
    - *          When searching for diagonal, perm_c[*] is applied to the
    - *          row subscripts of A, so that diagonal threshold pivoting
    - *          can find the diagonal of A, rather than that of A*Pc.
    - *
    - * perm_r   (input/output) int*, dimension (A->nrow)
    - *          Row permutation vector which defines the permutation matrix Pr,
    - *          perm_r[i] = j means row i of A is in position j in Pr*A.
    - *          If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *             will try to use the input perm_r, unless a certain threshold
    - *             criterion is violated. In that case, perm_r is overwritten by
    - *             a new permutation determined by partial pivoting or diagonal
    - *             threshold pivoting.
    - *          Otherwise, perm_r is output argument;
    - *
    - * L        (output) SuperMatrix*
    - *          The factor L from the factorization Pr*A=L*U; use compressed row 
    - *          subscripts storage for supernodes, i.e., L has type: 
    - *          Stype = SLU_SC, Dtype = SLU_S, Mtype = SLU_TRLU.
    - *
    - * U        (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *          storage scheme, i.e., U has types: Stype = SLU_NC, 
    - *          Dtype = SLU_S, Mtype = SLU_TRU.
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info     (output) int*
    - *          = 0: successful exit
    - *          < 0: if info = -i, the i-th argument had an illegal value
    - *          > 0: if info = i, and i is
    - *             <= A->ncol: U(i,i) is exactly zero. The factorization has
    - *                been completed, but the factor U is exactly singular,
    - *                and division by zero will occur if it is used to solve a
    - *                system of equations.
    - *             > A->ncol: number of bytes allocated when memory allocation
    - *                failure occurred, plus A->ncol. If lwork = -1, it is
    - *                the estimated amount of space needed, plus A->ncol.
    - *
    - * ======================================================================
    - *
    - * Local Working Arrays: 
    - * ======================
    - *   m = number of rows in the matrix
    - *   n = number of columns in the matrix
    - *
    - *   xprune[0:n-1]: xprune[*] points to locations in subscript 
    - *	vector lsub[*]. For column i, xprune[i] denotes the point where 
    - *	structural pruning begins. I.e. only xlsub[i],..,xprune[i]-1 need 
    - *	to be traversed for symbolic factorization.
    - *
    - *   marker[0:3*m-1]: marker[i] = j means that node i has been 
    - *	reached when working on column j.
    - *	Storage: relative to original row subscripts
    - *	NOTE: There are 3 of them: marker/marker1 are used for panel dfs, 
    - *	      see spanel_dfs.c; marker2 is used for inner-factorization,
    - *            see scolumn_dfs.c.
    - *
    - *   parent[0:m-1]: parent vector used during dfs
    - *      Storage: relative to new row subscripts
    - *
    - *   xplore[0:m-1]: xplore[i] gives the location of the next (dfs) 
    - *	unexplored neighbor of i in lsub[*]
    - *
    - *   segrep[0:nseg-1]: contains the list of supernodal representatives
    - *	in topological order of the dfs. A supernode representative is the 
    - *	last column of a supernode.
    - *      The maximum size of segrep[] is n.
    - *
    - *   repfnz[0:W*m-1]: for a nonzero segment U[*,j] that ends at a 
    - *	supernodal representative r, repfnz[r] is the location of the first 
    - *	nonzero in this segment.  It is also used during the dfs: repfnz[r]>0
    - *	indicates the supernode r has been explored.
    - *	NOTE: There are W of them, each used for one column of a panel. 
    - *
    - *   panel_lsub[0:W*m-1]: temporary for the nonzeros row indices below 
    - *      the panel diagonal. These are filled in during spanel_dfs(), and are
    - *      used later in the inner LU factorization within the panel.
    - *	panel_lsub[]/dense[] pair forms the SPA data structure.
    - *	NOTE: There are W of them.
    - *
    - *   dense[0:W*m-1]: sparse accumulating (SPA) vector for intermediate values;
    - *	    	   NOTE: there are W of them.
    - *
    - *   tempv[0:*]: real temporary used for dense numeric kernels;
    - *	The size of this array is defined by NUM_TEMPV() in slu_sdefs.h.
    - * 
    - */ - -void -sgstrf (superlu_options_t *options, SuperMatrix *A, - int relax, int panel_size, int *etree, void *work, int lwork, - int *perm_c, int *perm_r, SuperMatrix *L, SuperMatrix *U, - SuperLUStat_t *stat, int *info) -{ - /* Local working arrays */ - NCPformat *Astore; - int *iperm_r = NULL; /* inverse of perm_r; used when - options->Fact == SamePattern_SameRowPerm */ - int *iperm_c; /* inverse of perm_c */ - int *iwork; - float *swork; - int *segrep, *repfnz, *parent, *xplore; - int *panel_lsub; /* dense[]/panel_lsub[] pair forms a w-wide SPA */ - int *xprune; - int *marker; - float *dense, *tempv; - int *relax_end; - float *a; - int *asub; - int *xa_begin, *xa_end; - int *xsup, *supno; - int *xlsub, *xlusup, *xusub; - int nzlumax; - float fill_ratio = sp_ienv(6); /* estimated fill ratio */ - static GlobalLU_t Glu; /* persistent to facilitate multiple factors. */ - - /* Local scalars */ - fact_t fact = options->Fact; - double diag_pivot_thresh = options->DiagPivotThresh; - int pivrow; /* pivotal row number in the original matrix A */ - int nseg1; /* no of segments in U-column above panel row jcol */ - int nseg; /* no of segments in each U-column */ - register int jcol; - register int kcol; /* end column of a relaxed snode */ - register int icol; - register int i, k, jj, new_next, iinfo; - int m, n, min_mn, jsupno, fsupc, nextlu, nextu; - int w_def; /* upper bound on panel width */ - int usepr, iperm_r_allocated = 0; - int nnzL, nnzU; - int *panel_histo = stat->panel_histo; - flops_t *ops = stat->ops; - - iinfo = 0; - m = A->nrow; - n = A->ncol; - min_mn = SUPERLU_MIN(m, n); - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - - /* Allocate storage common to the factor routines */ - *info = sLUMemInit(fact, work, lwork, m, n, Astore->nnz, - panel_size, fill_ratio, L, U, &Glu, &iwork, &swork); - if ( *info ) return; - - xsup = Glu.xsup; - supno = Glu.supno; - xlsub = Glu.xlsub; - xlusup = Glu.xlusup; - xusub = Glu.xusub; - - SetIWork(m, n, panel_size, iwork, &segrep, &parent, &xplore, - &repfnz, &panel_lsub, &xprune, &marker); - sSetRWork(m, panel_size, swork, &dense, &tempv); - - usepr = (fact == SamePattern_SameRowPerm); - if ( usepr ) { - /* Compute the inverse of perm_r */ - iperm_r = (int *) intMalloc(m); - for (k = 0; k < m; ++k) iperm_r[perm_r[k]] = k; - iperm_r_allocated = 1; - } - iperm_c = (int *) intMalloc(n); - for (k = 0; k < n; ++k) iperm_c[perm_c[k]] = k; - - /* Identify relaxed snodes */ - relax_end = (int *) intMalloc(n); - if ( options->SymmetricMode == YES ) { - heap_relax_snode(n, etree, relax, marker, relax_end); - } else { - relax_snode(n, etree, relax, marker, relax_end); - } - - ifill (perm_r, m, EMPTY); - ifill (marker, m * NO_MARKER, EMPTY); - supno[0] = -1; - xsup[0] = xlsub[0] = xusub[0] = xlusup[0] = 0; - w_def = panel_size; - - /* - * Work on one "panel" at a time. A panel is one of the following: - * (a) a relaxed supernode at the bottom of the etree, or - * (b) panel_size contiguous columns, defined by the user - */ - for (jcol = 0; jcol < min_mn; ) { - - if ( relax_end[jcol] != EMPTY ) { /* start of a relaxed snode */ - kcol = relax_end[jcol]; /* end of the relaxed snode */ - panel_histo[kcol-jcol+1]++; - - /* -------------------------------------- - * Factorize the relaxed supernode(jcol:kcol) - * -------------------------------------- */ - /* Determine the union of the row structure of the snode */ - if ( (*info = ssnode_dfs(jcol, kcol, asub, xa_begin, xa_end, - xprune, marker, &Glu)) != 0 ) - return; - - nextu = xusub[jcol]; - nextlu = xlusup[jcol]; - jsupno = supno[jcol]; - fsupc = xsup[jsupno]; - new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1); - nzlumax = Glu.nzlumax; - while ( new_next > nzlumax ) { - if ( (*info = sLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu)) ) - return; - } - - for (icol = jcol; icol<= kcol; icol++) { - xusub[icol+1] = nextu; - - /* Scatter into SPA dense[*] */ - for (k = xa_begin[icol]; k < xa_end[icol]; k++) - dense[asub[k]] = a[k]; - - /* Numeric update within the snode */ - ssnode_bmod(icol, jsupno, fsupc, dense, tempv, &Glu, stat); - - if ( (*info = spivotL(icol, diag_pivot_thresh, &usepr, perm_r, - iperm_r, iperm_c, &pivrow, &Glu, stat)) ) - if ( iinfo == 0 ) iinfo = *info; - -#ifdef DEBUG - sprint_lu_col("[1]: ", icol, pivrow, xprune, &Glu); -#endif - - } - - jcol = icol; - - } else { /* Work on one panel of panel_size columns */ - - /* Adjust panel_size so that a panel won't overlap with the next - * relaxed snode. - */ - panel_size = w_def; - for (k = jcol + 1; k < SUPERLU_MIN(jcol+panel_size, min_mn); k++) - if ( relax_end[k] != EMPTY ) { - panel_size = k - jcol; - break; - } - if ( k == min_mn ) panel_size = min_mn - jcol; - panel_histo[panel_size]++; - - /* symbolic factor on a panel of columns */ - spanel_dfs(m, panel_size, jcol, A, perm_r, &nseg1, - dense, panel_lsub, segrep, repfnz, xprune, - marker, parent, xplore, &Glu); - - /* numeric sup-panel updates in topological order */ - spanel_bmod(m, panel_size, jcol, nseg1, dense, - tempv, segrep, repfnz, &Glu, stat); - - /* Sparse LU within the panel, and below panel diagonal */ - for ( jj = jcol; jj < jcol + panel_size; jj++) { - k = (jj - jcol) * m; /* column index for w-wide arrays */ - - nseg = nseg1; /* Begin after all the panel segments */ - - if ((*info = scolumn_dfs(m, jj, perm_r, &nseg, &panel_lsub[k], - segrep, &repfnz[k], xprune, marker, - parent, xplore, &Glu)) != 0) return; - - /* Numeric updates */ - if ((*info = scolumn_bmod(jj, (nseg - nseg1), &dense[k], - tempv, &segrep[nseg1], &repfnz[k], - jcol, &Glu, stat)) != 0) return; - - /* Copy the U-segments to ucol[*] */ - if ((*info = scopy_to_ucol(jj, nseg, segrep, &repfnz[k], - perm_r, &dense[k], &Glu)) != 0) - return; - - if ( (*info = spivotL(jj, diag_pivot_thresh, &usepr, perm_r, - iperm_r, iperm_c, &pivrow, &Glu, stat)) ) - if ( iinfo == 0 ) iinfo = *info; - - /* Prune columns (0:jj-1) using column jj */ - spruneL(jj, perm_r, pivrow, nseg, segrep, - &repfnz[k], xprune, &Glu); - - /* Reset repfnz[] for this column */ - resetrep_col (nseg, segrep, &repfnz[k]); - -#ifdef DEBUG - sprint_lu_col("[2]: ", jj, pivrow, xprune, &Glu); -#endif - - } - - jcol += panel_size; /* Move to the next panel */ - - } /* else */ - - } /* for */ - - *info = iinfo; - - if ( m > n ) { - k = 0; - for (i = 0; i < m; ++i) - if ( perm_r[i] == EMPTY ) { - perm_r[i] = n + k; - ++k; - } - } - - countnz(min_mn, xprune, &nnzL, &nnzU, &Glu); - fixupL(min_mn, perm_r, &Glu); - - sLUWorkFree(iwork, swork, &Glu); /* Free work space and compress storage */ - - if ( fact == SamePattern_SameRowPerm ) { - /* L and U structures may have changed due to possibly different - pivoting, even though the storage is available. - There could also be memory expansions, so the array locations - may have changed, */ - ((SCformat *)L->Store)->nnz = nnzL; - ((SCformat *)L->Store)->nsuper = Glu.supno[n]; - ((SCformat *)L->Store)->nzval = Glu.lusup; - ((SCformat *)L->Store)->nzval_colptr = Glu.xlusup; - ((SCformat *)L->Store)->rowind = Glu.lsub; - ((SCformat *)L->Store)->rowind_colptr = Glu.xlsub; - ((NCformat *)U->Store)->nnz = nnzU; - ((NCformat *)U->Store)->nzval = Glu.ucol; - ((NCformat *)U->Store)->rowind = Glu.usub; - ((NCformat *)U->Store)->colptr = Glu.xusub; - } else { - sCreate_SuperNode_Matrix(L, A->nrow, min_mn, nnzL, Glu.lusup, - Glu.xlusup, Glu.lsub, Glu.xlsub, Glu.supno, - Glu.xsup, SLU_SC, SLU_S, SLU_TRLU); - sCreate_CompCol_Matrix(U, min_mn, min_mn, nnzU, Glu.ucol, - Glu.usub, Glu.xusub, SLU_NC, SLU_S, SLU_TRU); - } - - ops[FACT] += ops[TRSV] + ops[GEMV]; - stat->expansions = --(Glu.num_expansions); - - if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r); - SUPERLU_FREE (iperm_c); - SUPERLU_FREE (relax_end); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgstrs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgstrs.c deleted file mode 100644 index 13b9bb450d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sgstrs.c +++ /dev/null @@ -1,337 +0,0 @@ - -/*! @file sgstrs.c - * \brief Solves a system using LU factorization - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_sdefs.h" - - -/* - * Function prototypes - */ -void susolve(int, int, float*, float*); -void slsolve(int, int, float*, float*); -void smatvec(int, int, int, float*, float*, float*); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * SGSTRS solves a system of linear equations A*X=B or A'*X=B
    - * with A sparse and B dense, using the LU factorization computed by
    - * SGSTRF.
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *
    - * L       (input) SuperMatrix*
    - *         The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *         sgstrf(). Use compressed row subscripts storage for supernodes,
    - *         i.e., L has types: Stype = SLU_SC, Dtype = SLU_S, Mtype = SLU_TRLU.
    - *
    - * U       (input) SuperMatrix*
    - *         The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *         sgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_S, Mtype = SLU_TRU.
    - *
    - * perm_c  (input) int*, dimension (L->ncol)
    - *	   Column permutation vector, which defines the 
    - *         permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *         in position j in A*Pc.
    - *
    - * perm_r  (input) int*, dimension (L->nrow)
    - *         Row permutation vector, which defines the permutation matrix Pr; 
    - *         perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_S, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - * 	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - * 
    - */ - -void -sgstrs (trans_t trans, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, SuperMatrix *B, - SuperLUStat_t *stat, int *info) -{ - -#ifdef _CRAY - _fcd ftcs1, ftcs2, ftcs3, ftcs4; -#endif - int incx = 1, incy = 1; -#ifdef USE_VENDOR_BLAS - float alpha = 1.0, beta = 1.0; - float *work_col; -#endif - DNformat *Bstore; - float *Bmat; - SCformat *Lstore; - NCformat *Ustore; - float *Lval, *Uval; - int fsupc, nrow, nsupr, nsupc, luptr, istart, irow; - int i, j, k, iptr, jcol, n, ldb, nrhs; - float *work, *rhs_work, *soln; - flops_t solve_ops; - void sprint_soln(); - - /* Test input parameters ... */ - *info = 0; - Bstore = B->Store; - ldb = Bstore->lda; - nrhs = B->ncol; - if ( trans != NOTRANS && trans != TRANS && trans != CONJ ) *info = -1; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_S || L->Mtype != SLU_TRLU ) - *info = -2; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_S || U->Mtype != SLU_TRU ) - *info = -3; - else if ( ldb < SUPERLU_MAX(0, L->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_S || B->Mtype != SLU_GE ) - *info = -6; - if ( *info ) { - i = -(*info); - xerbla_("sgstrs", &i); - return; - } - - n = L->nrow; - work = floatCalloc(n * nrhs); - if ( !work ) ABORT("Malloc fails for local work[]."); - soln = floatMalloc(n); - if ( !soln ) ABORT("Malloc fails for local soln[]."); - - Bmat = Bstore->nzval; - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( trans == NOTRANS ) { - /* Permute right hand sides to form Pr*B */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_r[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - /* Forward solve PLy=Pb. */ - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - nrow = nsupr - nsupc; - - solve_ops += nsupc * (nsupc - 1) * nrhs; - solve_ops += 2 * nrow * nsupc * nrhs; - - if ( nsupc == 1 ) { - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - luptr = L_NZ_START(fsupc); - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); iptr++){ - irow = L_SUB(iptr); - ++luptr; - rhs_work[irow] -= rhs_work[fsupc] * Lval[luptr]; - } - } - } else { - luptr = L_NZ_START(fsupc); -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("N", strlen("N")); - ftcs3 = _cptofcd("U", strlen("U")); - STRSM( ftcs1, ftcs1, ftcs2, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - SGEMM( ftcs2, ftcs2, &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#else - strsm_("L", "L", "N", "U", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - sgemm_( "N", "N", &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#endif - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - work_col = &work[j*n]; - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - rhs_work[irow] -= work_col[i]; /* Scatter */ - work_col[i] = 0.0; - iptr++; - } - } -#else - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - slsolve (nsupr, nsupc, &Lval[luptr], &rhs_work[fsupc]); - smatvec (nsupr, nrow, nsupc, &Lval[luptr+nsupc], - &rhs_work[fsupc], &work[0] ); - - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - rhs_work[irow] -= work[i]; - work[i] = 0.0; - iptr++; - } - } -#endif - } /* else ... */ - } /* for L-solve */ - -#ifdef DEBUG - printf("After L-solve: y=\n"); - sprint_soln(n, nrhs, Bmat); -#endif - - /* - * Back solve Ux=y. - */ - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += nsupc * (nsupc + 1) * nrhs; - - if ( nsupc == 1 ) { - rhs_work = &Bmat[0]; - for (j = 0; j < nrhs; j++) { - rhs_work[fsupc] /= Lval[luptr]; - rhs_work += ldb; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("U", strlen("U")); - ftcs3 = _cptofcd("N", strlen("N")); - STRSM( ftcs1, ftcs2, ftcs3, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#else - strsm_("L", "U", "N", "N", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#endif -#else - for (j = 0; j < nrhs; j++) - susolve ( nsupr, nsupc, &Lval[luptr], &Bmat[fsupc+j*ldb] ); -#endif - } - - for (j = 0; j < nrhs; ++j) { - rhs_work = &Bmat[j*ldb]; - for (jcol = fsupc; jcol < fsupc + nsupc; jcol++) { - solve_ops += 2*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++ ){ - irow = U_SUB(i); - rhs_work[irow] -= rhs_work[jcol] * Uval[i]; - } - } - } - - } /* for U-solve */ - -#ifdef DEBUG - printf("After U-solve: x=\n"); - sprint_soln(n, nrhs, Bmat); -#endif - - /* Compute the final solution X := Pc*X. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_c[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = solve_ops; - - } else { /* Solve A'*X=B or CONJ(A)*X=B */ - /* Permute right hand sides to form Pc'*B. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_c[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = 0; - for (k = 0; k < nrhs; ++k) { - - /* Multiply by inv(U'). */ - sp_strsv("U", "T", "N", L, U, &Bmat[k*ldb], stat, info); - - /* Multiply by inv(L'). */ - sp_strsv("L", "T", "U", L, U, &Bmat[k*ldb], stat, info); - - } - /* Compute the final solution X := Pr'*X (=inv(Pr)*X) */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_r[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - } - - SUPERLU_FREE(work); - SUPERLU_FREE(soln); -} - -/* - * Diagnostic print of the solution vector - */ -void -sprint_soln(int n, int nrhs, float *soln) -{ - int i; - - for (i = 0; i < n; i++) - printf("\t%d: %.4f\n", i, soln[i]); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slacon.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slacon.c deleted file mode 100644 index 4e02fdc2a5..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slacon.c +++ /dev/null @@ -1,236 +0,0 @@ - -/*! @file slacon.c - * \brief Estimates the 1-norm - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -#include -#include "slu_Cnames.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   SLACON estimates the 1-norm of a square matrix A.   
    - *   Reverse communication is used for evaluating matrix-vector products. 
    - * 
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   N      (input) INT
    - *          The order of the matrix.  N >= 1.   
    - *
    - *   V      (workspace) FLOAT PRECISION array, dimension (N)   
    - *          On the final return, V = A*W,  where  EST = norm(V)/norm(W)   
    - *          (W is not returned).   
    - *
    - *   X      (input/output) FLOAT PRECISION array, dimension (N)   
    - *          On an intermediate return, X should be overwritten by   
    - *                A * X,   if KASE=1,   
    - *                A' * X,  if KASE=2,
    - *         and SLACON must be re-called with all the other parameters   
    - *          unchanged.   
    - *
    - *   ISGN   (workspace) INT array, dimension (N)
    - *
    - *   EST    (output) FLOAT PRECISION   
    - *          An estimate (a lower bound) for norm(A).   
    - *
    - *   KASE   (input/output) INT
    - *          On the initial call to SLACON, KASE should be 0.   
    - *          On an intermediate return, KASE will be 1 or 2, indicating   
    - *          whether X should be overwritten by A * X  or A' * X.   
    - *          On the final return from SLACON, KASE will again be 0.   
    - *
    - *   Further Details   
    - *   ======= =======   
    - *
    - *   Contributed by Nick Higham, University of Manchester.   
    - *   Originally named CONEST, dated March 16, 1988.   
    - *
    - *   Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of 
    - *   a real or complex matrix, with applications to condition estimation", 
    - *   ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988.   
    - *   ===================================================================== 
    - * 
    - */ - -int -slacon_(int *n, float *v, float *x, int *isgn, float *est, int *kase) - -{ - - - /* Table of constant values */ - int c__1 = 1; - float zero = 0.0; - float one = 1.0; - - /* Local variables */ - static int iter; - static int jump, jlast; - static float altsgn, estold; - static int i, j; - float temp; -#ifdef _CRAY - extern int ISAMAX(int *, float *, int *); - extern float SASUM(int *, float *, int *); - extern int SCOPY(int *, float *, int *, float *, int *); -#else - extern int isamax_(int *, float *, int *); - extern float sasum_(int *, float *, int *); - extern int scopy_(int *, float *, int *, float *, int *); -#endif -#define d_sign(a, b) (b >= 0 ? fabs(a) : -fabs(a)) /* Copy sign */ -#define i_dnnt(a) \ - ( a>=0 ? floor(a+.5) : -floor(.5-a) ) /* Round to nearest integer */ - - if ( *kase == 0 ) { - for (i = 0; i < *n; ++i) { - x[i] = 1. / (float) (*n); - } - *kase = 1; - jump = 1; - return 0; - } - - switch (jump) { - case 1: goto L20; - case 2: goto L40; - case 3: goto L70; - case 4: goto L110; - case 5: goto L140; - } - - /* ................ ENTRY (JUMP = 1) - FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */ - L20: - if (*n == 1) { - v[0] = x[0]; - *est = fabs(v[0]); - /* ... QUIT */ - goto L150; - } -#ifdef _CRAY - *est = SASUM(n, x, &c__1); -#else - *est = sasum_(n, x, &c__1); -#endif - - for (i = 0; i < *n; ++i) { - x[i] = d_sign(one, x[i]); - isgn[i] = i_dnnt(x[i]); - } - *kase = 2; - jump = 2; - return 0; - - /* ................ ENTRY (JUMP = 2) - FIRST ITERATION. X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */ -L40: -#ifdef _CRAY - j = ISAMAX(n, &x[0], &c__1); -#else - j = isamax_(n, &x[0], &c__1); -#endif - --j; - iter = 2; - - /* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */ -L50: - for (i = 0; i < *n; ++i) x[i] = zero; - x[j] = one; - *kase = 1; - jump = 3; - return 0; - - /* ................ ENTRY (JUMP = 3) - X HAS BEEN OVERWRITTEN BY A*X. */ -L70: -#ifdef _CRAY - SCOPY(n, x, &c__1, v, &c__1); -#else - scopy_(n, x, &c__1, v, &c__1); -#endif - estold = *est; -#ifdef _CRAY - *est = SASUM(n, v, &c__1); -#else - *est = sasum_(n, v, &c__1); -#endif - - for (i = 0; i < *n; ++i) - if (i_dnnt(d_sign(one, x[i])) != isgn[i]) - goto L90; - - /* REPEATED SIGN VECTOR DETECTED, HENCE ALGORITHM HAS CONVERGED. */ - goto L120; - -L90: - /* TEST FOR CYCLING. */ - if (*est <= estold) goto L120; - - for (i = 0; i < *n; ++i) { - x[i] = d_sign(one, x[i]); - isgn[i] = i_dnnt(x[i]); - } - *kase = 2; - jump = 4; - return 0; - - /* ................ ENTRY (JUMP = 4) - X HAS BEEN OVERWRITTEN BY TRANDPOSE(A)*X. */ -L110: - jlast = j; -#ifdef _CRAY - j = ISAMAX(n, &x[0], &c__1); -#else - j = isamax_(n, &x[0], &c__1); -#endif - --j; - if (x[jlast] != fabs(x[j]) && iter < 5) { - ++iter; - goto L50; - } - - /* ITERATION COMPLETE. FINAL STAGE. */ -L120: - altsgn = 1.; - for (i = 1; i <= *n; ++i) { - x[i-1] = altsgn * ((float)(i - 1) / (float)(*n - 1) + 1.); - altsgn = -altsgn; - } - *kase = 1; - jump = 5; - return 0; - - /* ................ ENTRY (JUMP = 5) - X HAS BEEN OVERWRITTEN BY A*X. */ -L140: -#ifdef _CRAY - temp = SASUM(n, x, &c__1) / (float)(*n * 3) * 2.; -#else - temp = sasum_(n, x, &c__1) / (float)(*n * 3) * 2.; -#endif - if (temp > *est) { -#ifdef _CRAY - SCOPY(n, &x[0], &c__1, &v[0], &c__1); -#else - scopy_(n, &x[0], &c__1, &v[0], &c__1); -#endif - *est = temp; - } - -L150: - *kase = 0; - return 0; - -} /* slacon_ */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slamch.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slamch.c deleted file mode 100644 index c06c3574ee..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slamch.c +++ /dev/null @@ -1,995 +0,0 @@ -/*! @file slamch.c - * \brief Determines single precision machine parameters and other service routines - * - *
    - *   -- LAPACK auxiliary routine (version 2.0) --   
    - *      Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
    - *      Courant Institute, Argonne National Lab, and Rice University   
    - *      October 31, 1992   
    - * 
    - */ -#include -#include "slu_Cnames.h" - -#define TRUE_ (1) -#define FALSE_ (0) -#define min(a,b) ((a) <= (b) ? (a) : (b)) -#define max(a,b) ((a) >= (b) ? (a) : (b)) -#define abs(x) ((x) >= 0 ? (x) : -(x)) -#define dabs(x) (double)abs(x) - -/*! \brief - -
    - Purpose   
    -    =======   
    -
    -    SLAMCH determines single precision machine parameters.   
    -
    -    Arguments   
    -    =========   
    -
    -    CMACH   (input) CHARACTER*1   
    -            Specifies the value to be returned by SLAMCH:   
    -            = 'E' or 'e',   SLAMCH := eps   
    -            = 'S' or 's ,   SLAMCH := sfmin   
    -            = 'B' or 'b',   SLAMCH := base   
    -            = 'P' or 'p',   SLAMCH := eps*base   
    -            = 'N' or 'n',   SLAMCH := t   
    -            = 'R' or 'r',   SLAMCH := rnd   
    -            = 'M' or 'm',   SLAMCH := emin   
    -            = 'U' or 'u',   SLAMCH := rmin   
    -            = 'L' or 'l',   SLAMCH := emax   
    -            = 'O' or 'o',   SLAMCH := rmax   
    -
    -            where   
    -
    -            eps   = relative machine precision   
    -            sfmin = safe minimum, such that 1/sfmin does not overflow   
    -            base  = base of the machine   
    -            prec  = eps*base   
    -            t     = number of (base) digits in the mantissa   
    -            rnd   = 1.0 when rounding occurs in addition, 0.0 otherwise   
    -            emin  = minimum exponent before (gradual) underflow   
    -            rmin  = underflow threshold - base**(emin-1)   
    -            emax  = largest exponent before overflow   
    -            rmax  = overflow threshold  - (base**emax)*(1-eps)   
    -
    -   ===================================================================== 
    -
    -*/ -double slamch_(char *cmach) -{ -/* >>Start of File<< - Initialized data */ - static int first = TRUE_; - /* System generated locals */ - int i__1; - float ret_val; - /* Builtin functions */ - double pow_ri(float *, int *); - /* Local variables */ - static float base; - static int beta; - static float emin, prec, emax; - static int imin, imax; - static int lrnd; - static float rmin, rmax, t, rmach; - extern int lsame_(char *, char *); - static float small, sfmin; - extern /* Subroutine */ int slamc2_(int *, int *, int *, float - *, int *, float *, int *, float *); - static int it; - static float rnd, eps; - - - - if (first) { - first = FALSE_; - slamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax); - base = (float) beta; - t = (float) it; - if (lrnd) { - rnd = 1.f; - i__1 = 1 - it; - eps = pow_ri(&base, &i__1) / 2; - } else { - rnd = 0.f; - i__1 = 1 - it; - eps = pow_ri(&base, &i__1); - } - prec = eps * base; - emin = (float) imin; - emax = (float) imax; - sfmin = rmin; - small = 1.f / rmax; - if (small >= sfmin) { - -/* Use SMALL plus a bit, to avoid the possibility of rou -nding - causing overflow when computing 1/sfmin. */ - - sfmin = small * (eps + 1.f); - } - } - - if (lsame_(cmach, "E")) { - rmach = eps; - } else if (lsame_(cmach, "S")) { - rmach = sfmin; - } else if (lsame_(cmach, "B")) { - rmach = base; - } else if (lsame_(cmach, "P")) { - rmach = prec; - } else if (lsame_(cmach, "N")) { - rmach = t; - } else if (lsame_(cmach, "R")) { - rmach = rnd; - } else if (lsame_(cmach, "M")) { - rmach = emin; - } else if (lsame_(cmach, "U")) { - rmach = rmin; - } else if (lsame_(cmach, "L")) { - rmach = emax; - } else if (lsame_(cmach, "O")) { - rmach = rmax; - } - - ret_val = rmach; - return ret_val; - -/* End of SLAMCH */ - -} /* slamch_ */ - - -/* Subroutine */ -/*! \brief - -
    - Purpose   
    -    =======   
    -
    -    SLAMC1 determines the machine parameters given by BETA, T, RND, and   
    -    IEEE1.   
    -
    -    Arguments   
    -    =========   
    -
    -    BETA    (output) INT   
    -            The base of the machine.   
    -
    -    T       (output) INT   
    -            The number of ( BETA ) digits in the mantissa.   
    -
    -    RND     (output) INT   
    -            Specifies whether proper rounding  ( RND = .TRUE. )  or   
    -            chopping  ( RND = .FALSE. )  occurs in addition. This may not 
    -  
    -            be a reliable guide to the way in which the machine performs 
    -  
    -            its arithmetic.   
    -
    -    IEEE1   (output) INT   
    -            Specifies whether rounding appears to be done in the IEEE   
    -            'round to nearest' style.   
    -
    -    Further Details   
    -    ===============   
    -
    -    The routine is based on the routine  ENVRON  by Malcolm and   
    -    incorporates suggestions by Gentleman and Marovich. See   
    -
    -       Malcolm M. A. (1972) Algorithms to reveal properties of   
    -          floating-point arithmetic. Comms. of the ACM, 15, 949-951.   
    -
    -       Gentleman W. M. and Marovich S. B. (1974) More on algorithms   
    -          that reveal properties of floating point arithmetic units.   
    -          Comms. of the ACM, 17, 276-277.   
    -
    -   ===================================================================== 
    -
    -*/ - -int slamc1_(int *beta, int *t, int *rnd, int - *ieee1) -{ - /* Initialized data */ - static int first = TRUE_; - /* System generated locals */ - float r__1, r__2; - /* Local variables */ - static int lrnd; - static float a, b, c, f; - static int lbeta; - static float savec; - static int lieee1; - static float t1, t2; - extern double slamc3_(float *, float *); - static int lt; - static float one, qtr; - - - - if (first) { - first = FALSE_; - one = 1.f; - -/* LBETA, LIEEE1, LT and LRND are the local values of BE -TA, - IEEE1, T and RND. - - Throughout this routine we use the function SLAMC3 to ens -ure - that relevant values are stored and not held in registers, - or - are not affected by optimizers. - - Compute a = 2.0**m with the smallest positive integer m s -uch - that - - fl( a + 1.0 ) = a. */ - - a = 1.f; - c = 1.f; - -/* + WHILE( C.EQ.ONE )LOOP */ -L10: - if (c == one) { - a *= 2; - c = slamc3_(&a, &one); - r__1 = -(double)a; - c = slamc3_(&c, &r__1); - goto L10; - } -/* + END WHILE - - Now compute b = 2.0**m with the smallest positive integer -m - such that - - fl( a + b ) .gt. a. */ - - b = 1.f; - c = slamc3_(&a, &b); - -/* + WHILE( C.EQ.A )LOOP */ -L20: - if (c == a) { - b *= 2; - c = slamc3_(&a, &b); - goto L20; - } -/* + END WHILE - - Now compute the base. a and c are neighbouring floating po -int - numbers in the interval ( beta**t, beta**( t + 1 ) ) and - so - their difference is beta. Adding 0.25 to c is to ensure that - it - is truncated to beta and not ( beta - 1 ). */ - - qtr = one / 4; - savec = c; - r__1 = -(double)a; - c = slamc3_(&c, &r__1); - lbeta = c + qtr; - -/* Now determine whether rounding or chopping occurs, by addin -g a - bit less than beta/2 and a bit more than beta/2 to - a. */ - - b = (float) lbeta; - r__1 = b / 2; - r__2 = -(double)b / 100; - f = slamc3_(&r__1, &r__2); - c = slamc3_(&f, &a); - if (c == a) { - lrnd = TRUE_; - } else { - lrnd = FALSE_; - } - r__1 = b / 2; - r__2 = b / 100; - f = slamc3_(&r__1, &r__2); - c = slamc3_(&f, &a); - if (lrnd && c == a) { - lrnd = FALSE_; - } - -/* Try and decide whether rounding is done in the IEEE 'round - to - nearest' style. B/2 is half a unit in the last place of the -two - numbers A and SAVEC. Furthermore, A is even, i.e. has last -bit - zero, and SAVEC is odd. Thus adding B/2 to A should not cha -nge - A, but adding B/2 to SAVEC should change SAVEC. */ - - r__1 = b / 2; - t1 = slamc3_(&r__1, &a); - r__1 = b / 2; - t2 = slamc3_(&r__1, &savec); - lieee1 = t1 == a && t2 > savec && lrnd; - -/* Now find the mantissa, t. It should be the integer part - of - log to the base beta of a, however it is safer to determine - t - by powering. So we find t as the smallest positive integer -for - which - - fl( beta**t + 1.0 ) = 1.0. */ - - lt = 0; - a = 1.f; - c = 1.f; - -/* + WHILE( C.EQ.ONE )LOOP */ -L30: - if (c == one) { - ++lt; - a *= lbeta; - c = slamc3_(&a, &one); - r__1 = -(double)a; - c = slamc3_(&c, &r__1); - goto L30; - } -/* + END WHILE */ - - } - - *beta = lbeta; - *t = lt; - *rnd = lrnd; - *ieee1 = lieee1; - return 0; - -/* End of SLAMC1 */ - -} /* slamc1_ */ - - -/* Subroutine */ - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    SLAMC2 determines the machine parameters specified in its argument   
    -    list.   
    -
    -    Arguments   
    -    =========   
    -
    -    BETA    (output) INT   
    -            The base of the machine.   
    -
    -    T       (output) INT   
    -            The number of ( BETA ) digits in the mantissa.   
    -
    -    RND     (output) INT   
    -            Specifies whether proper rounding  ( RND = .TRUE. )  or   
    -            chopping  ( RND = .FALSE. )  occurs in addition. This may not 
    -  
    -            be a reliable guide to the way in which the machine performs 
    -  
    -            its arithmetic.   
    -
    -    EPS     (output) FLOAT   
    -            The smallest positive number such that   
    -
    -               fl( 1.0 - EPS ) .LT. 1.0,   
    -
    -            where fl denotes the computed value.   
    -
    -    EMIN    (output) INT   
    -            The minimum exponent before (gradual) underflow occurs.   
    -
    -    RMIN    (output) FLOAT   
    -            The smallest normalized number for the machine, given by   
    -            BASE**( EMIN - 1 ), where  BASE  is the floating point value 
    -  
    -            of BETA.   
    -
    -    EMAX    (output) INT   
    -            The maximum exponent before overflow occurs.   
    -
    -    RMAX    (output) FLOAT   
    -            The largest positive number for the machine, given by   
    -            BASE**EMAX * ( 1 - EPS ), where  BASE  is the floating point 
    -  
    -            value of BETA.   
    -
    -    Further Details   
    -    ===============   
    -
    -    The computation of  EPS  is based on a routine PARANOIA by   
    -    W. Kahan of the University of California at Berkeley.   
    -
    -   ===================================================================== 
    -
    -*/ -int slamc2_(int *beta, int *t, int *rnd, float * - eps, int *emin, float *rmin, int *emax, float *rmax) -{ - /* Table of constant values */ - static int c__1 = 1; - - /* Initialized data */ - static int first = TRUE_; - static int iwarn = FALSE_; - /* System generated locals */ - int i__1; - float r__1, r__2, r__3, r__4, r__5; - /* Builtin functions */ - double pow_ri(float *, int *); - /* Local variables */ - static int ieee; - static float half; - static int lrnd; - static float leps, zero, a, b, c; - static int i, lbeta; - static float rbase; - static int lemin, lemax, gnmin; - static float small; - static int gpmin; - static float third, lrmin, lrmax, sixth; - static int lieee1; - extern /* Subroutine */ int slamc1_(int *, int *, int *, - int *); - extern double slamc3_(float *, float *); - extern /* Subroutine */ int slamc4_(int *, float *, int *), - slamc5_(int *, int *, int *, int *, int *, - float *); - static int lt, ngnmin, ngpmin; - static float one, two; - - - - if (first) { - first = FALSE_; - zero = 0.f; - one = 1.f; - two = 2.f; - -/* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values - of - BETA, T, RND, EPS, EMIN and RMIN. - - Throughout this routine we use the function SLAMC3 to ens -ure - that relevant values are stored and not held in registers, - or - are not affected by optimizers. - - SLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. -*/ - - slamc1_(&lbeta, <, &lrnd, &lieee1); - -/* Start to find EPS. */ - - b = (float) lbeta; - i__1 = -lt; - a = pow_ri(&b, &i__1); - leps = a; - -/* Try some tricks to see whether or not this is the correct E -PS. */ - - b = two / 3; - half = one / 2; - r__1 = -(double)half; - sixth = slamc3_(&b, &r__1); - third = slamc3_(&sixth, &sixth); - r__1 = -(double)half; - b = slamc3_(&third, &r__1); - b = slamc3_(&b, &sixth); - b = dabs(b); - if (b < leps) { - b = leps; - } - - leps = 1.f; - -/* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */ -L10: - if (leps > b && b > zero) { - leps = b; - r__1 = half * leps; -/* Computing 5th power */ - r__3 = two, r__4 = r__3, r__3 *= r__3; -/* Computing 2nd power */ - r__5 = leps; - r__2 = r__4 * (r__3 * r__3) * (r__5 * r__5); - c = slamc3_(&r__1, &r__2); - r__1 = -(double)c; - c = slamc3_(&half, &r__1); - b = slamc3_(&half, &c); - r__1 = -(double)b; - c = slamc3_(&half, &r__1); - b = slamc3_(&half, &c); - goto L10; - } -/* + END WHILE */ - - if (a < leps) { - leps = a; - } - -/* Computation of EPS complete. - - Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3 -)). - Keep dividing A by BETA until (gradual) underflow occurs. T -his - is detected when we cannot recover the previous A. */ - - rbase = one / lbeta; - small = one; - for (i = 1; i <= 3; ++i) { - r__1 = small * rbase; - small = slamc3_(&r__1, &zero); -/* L20: */ - } - a = slamc3_(&one, &small); - slamc4_(&ngpmin, &one, &lbeta); - r__1 = -(double)one; - slamc4_(&ngnmin, &r__1, &lbeta); - slamc4_(&gpmin, &a, &lbeta); - r__1 = -(double)a; - slamc4_(&gnmin, &r__1, &lbeta); - ieee = FALSE_; - - if (ngpmin == ngnmin && gpmin == gnmin) { - if (ngpmin == gpmin) { - lemin = ngpmin; -/* ( Non twos-complement machines, no gradual under -flow; - e.g., VAX ) */ - } else if (gpmin - ngpmin == 3) { - lemin = ngpmin - 1 + lt; - ieee = TRUE_; -/* ( Non twos-complement machines, with gradual und -erflow; - e.g., IEEE standard followers ) */ - } else { - lemin = min(ngpmin,gpmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else if (ngpmin == gpmin && ngnmin == gnmin) { - if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) { - lemin = max(ngpmin,ngnmin); -/* ( Twos-complement machines, no gradual underflow -; - e.g., CYBER 205 ) */ - } else { - lemin = min(ngpmin,ngnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin) - { - if (gpmin - min(ngpmin,ngnmin) == 3) { - lemin = max(ngpmin,ngnmin) - 1 + lt; -/* ( Twos-complement machines with gradual underflo -w; - no known machine ) */ - } else { - lemin = min(ngpmin,ngnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } - - } else { -/* Computing MIN */ - i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin); - lemin = min(i__1,gnmin); -/* ( A guess; no known machine ) */ - iwarn = TRUE_; - } -/* ** - Comment out this if block if EMIN is ok */ - if (iwarn) { - first = TRUE_; - printf("\n\n WARNING. The value EMIN may be incorrect:- "); - printf("EMIN = %8i\n",lemin); - printf("If, after inspection, the value EMIN looks acceptable"); - printf("please comment out \n the IF block as marked within the"); - printf("code of routine SLAMC2, \n otherwise supply EMIN"); - printf("explicitly.\n"); - } -/* ** - - Assume IEEE arithmetic if we found denormalised numbers abo -ve, - or if arithmetic seems to round in the IEEE style, determi -ned - in routine SLAMC1. A true IEEE machine should have both thi -ngs - true; however, faulty machines may have one or the other. */ - - ieee = ieee || lieee1; - -/* Compute RMIN by successive division by BETA. We could comp -ute - RMIN as BASE**( EMIN - 1 ), but some machines underflow dur -ing - this computation. */ - - lrmin = 1.f; - i__1 = 1 - lemin; - for (i = 1; i <= 1-lemin; ++i) { - r__1 = lrmin * rbase; - lrmin = slamc3_(&r__1, &zero); -/* L30: */ - } - -/* Finally, call SLAMC5 to compute EMAX and RMAX. */ - - slamc5_(&lbeta, <, &lemin, &ieee, &lemax, &lrmax); - } - - *beta = lbeta; - *t = lt; - *rnd = lrnd; - *eps = leps; - *emin = lemin; - *rmin = lrmin; - *emax = lemax; - *rmax = lrmax; - - return 0; - - -/* End of SLAMC2 */ - -} /* slamc2_ */ - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    SLAMC3  is intended to force  A  and  B  to be stored prior to doing 
    -  
    -    the addition of  A  and  B ,  for use in situations where optimizers 
    -  
    -    might hold one of these in a register.   
    -
    -    Arguments   
    -    =========   
    -
    -    A, B    (input) FLOAT   
    -            The values A and B.   
    -
    -   ===================================================================== 
    -
    -*/ - -double slamc3_(float *a, float *b) -{ - -/* >>Start of File<< - System generated locals */ - volatile float ret_val; - volatile float x; - volatile float y; - - x = *a; - y = *b; - ret_val = x + y; - - return ret_val; - -/* End of SLAMC3 */ - -} /* slamc3_ */ - - -/* Subroutine */ - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    SLAMC4 is a service routine for SLAMC2.   
    -
    -    Arguments   
    -    =========   
    -
    -    EMIN    (output) EMIN   
    -            The minimum exponent before (gradual) underflow, computed by 
    -  
    -            setting A = START and dividing by BASE until the previous A   
    -            can not be recovered.   
    -
    -    START   (input) FLOAT   
    -            The starting point for determining EMIN.   
    -
    -    BASE    (input) INT   
    -            The base of the machine.   
    -
    -   ===================================================================== 
    -
    -*/ - -int slamc4_(int *emin, float *start, int *base) -{ - /* System generated locals */ - int i__1; - float r__1; - /* Local variables */ - static float zero, a; - static int i; - static float rbase, b1, b2, c1, c2, d1, d2; - extern double slamc3_(float *, float *); - static float one; - - - - a = *start; - one = 1.f; - rbase = one / *base; - zero = 0.f; - *emin = 1; - r__1 = a * rbase; - b1 = slamc3_(&r__1, &zero); - c1 = a; - c2 = a; - d1 = a; - d2 = a; -/* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. - $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */ -L10: - if (c1 == a && c2 == a && d1 == a && d2 == a) { - --(*emin); - a = b1; - r__1 = a / *base; - b1 = slamc3_(&r__1, &zero); - r__1 = b1 * *base; - c1 = slamc3_(&r__1, &zero); - d1 = zero; - i__1 = *base; - for (i = 1; i <= *base; ++i) { - d1 += b1; -/* L20: */ - } - r__1 = a * rbase; - b2 = slamc3_(&r__1, &zero); - r__1 = b2 / rbase; - c2 = slamc3_(&r__1, &zero); - d2 = zero; - i__1 = *base; - for (i = 1; i <= *base; ++i) { - d2 += b2; -/* L30: */ - } - goto L10; - } -/* + END WHILE */ - - return 0; - -/* End of SLAMC4 */ - -} /* slamc4_ */ - - -/* Subroutine */ -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    SLAMC5 attempts to compute RMAX, the largest machine floating-point   
    -    number, without overflow.  It assumes that EMAX + abs(EMIN) sum   
    -    approximately to a power of 2.  It will fail on machines where this   
    -    assumption does not hold, for example, the Cyber 205 (EMIN = -28625, 
    -  
    -    EMAX = 28718).  It will also fail if the value supplied for EMIN is   
    -    too large (i.e. too close to zero), probably with overflow.   
    -
    -    Arguments   
    -    =========   
    -
    -    BETA    (input) INT   
    -            The base of floating-point arithmetic.   
    -
    -    P       (input) INT   
    -            The number of base BETA digits in the mantissa of a   
    -            floating-point value.   
    -
    -    EMIN    (input) INT   
    -            The minimum exponent before (gradual) underflow.   
    -
    -    IEEE    (input) INT   
    -            A logical flag specifying whether or not the arithmetic   
    -            system is thought to comply with the IEEE standard.   
    -
    -    EMAX    (output) INT   
    -            The largest exponent before overflow   
    -
    -    RMAX    (output) FLOAT   
    -            The largest machine floating-point number.   
    -
    -   ===================================================================== 
    -  
    -
    -
    -       First compute LEXP and UEXP, two powers of 2 that bound   
    -       abs(EMIN). We then assume that EMAX + abs(EMIN) will sum   
    -       approximately to the bound that is closest to abs(EMIN).   
    -       (EMAX is the exponent of the required number RMAX). 
    -
    -*/ - -int slamc5_(int *beta, int *p, int *emin, - int *ieee, int *emax, float *rmax) -{ - /* Table of constant values */ - static float c_b5 = 0.f; - - /* System generated locals */ - int i__1; - float r__1; - /* Local variables */ - static int lexp; - static float oldy; - static int uexp, i; - static float y, z; - static int nbits; - extern double slamc3_(float *, float *); - static float recbas; - static int exbits, expsum, try__; - - - - lexp = 1; - exbits = 1; -L10: - try__ = lexp << 1; - if (try__ <= -(*emin)) { - lexp = try__; - ++exbits; - goto L10; - } - if (lexp == -(*emin)) { - uexp = lexp; - } else { - uexp = try__; - ++exbits; - } - -/* Now -LEXP is less than or equal to EMIN, and -UEXP is greater - than or equal to EMIN. EXBITS is the number of bits needed to - store the exponent. */ - - if (uexp + *emin > -lexp - *emin) { - expsum = lexp << 1; - } else { - expsum = uexp << 1; - } - -/* EXPSUM is the exponent range, approximately equal to - EMAX - EMIN + 1 . */ - - *emax = expsum + *emin - 1; - nbits = exbits + 1 + *p; - -/* NBITS is the total number of bits needed to store a - floating-point number. */ - - if (nbits % 2 == 1 && *beta == 2) { - -/* Either there are an odd number of bits used to store a - floating-point number, which is unlikely, or some bits are - - not used in the representation of numbers, which is possible -, - (e.g. Cray machines) or the mantissa has an implicit bit, - (e.g. IEEE machines, Dec Vax machines), which is perhaps the - - most likely. We have to assume the last alternative. - If this is true, then we need to reduce EMAX by one because - - there must be some way of representing zero in an implicit-b -it - system. On machines like Cray, we are reducing EMAX by one - - unnecessarily. */ - - --(*emax); - } - - if (*ieee) { - -/* Assume we are on an IEEE machine which reserves one exponent - - for infinity and NaN. */ - - --(*emax); - } - -/* Now create RMAX, the largest machine number, which should - be equal to (1.0 - BETA**(-P)) * BETA**EMAX . - - First compute 1.0 - BETA**(-P), being careful that the - result is less than 1.0 . */ - - recbas = 1.f / *beta; - z = *beta - 1.f; - y = 0.f; - i__1 = *p; - for (i = 1; i <= *p; ++i) { - z *= recbas; - if (y < 1.f) { - oldy = y; - } - y = slamc3_(&y, &z); -/* L20: */ - } - if (y >= 1.f) { - y = oldy; - } - -/* Now multiply by BETA**EMAX to get RMAX. */ - - i__1 = *emax; - for (i = 1; i <= *emax; ++i) { - r__1 = y * *beta; - y = slamc3_(&r__1, &c_b5); -/* L30: */ - } - - *rmax = y; - return 0; - -/* End of SLAMC5 */ - -} /* slamc5_ */ - - -double pow_ri(float *ap, int *bp) -{ -double pow, x; -int n; - -pow = 1; -x = *ap; -n = *bp; - -if(n != 0) - { - if(n < 0) - { - n = -n; - x = 1/x; - } - for( ; ; ) - { - if(n & 01) - pow *= x; - if(n >>= 1) - x *= x; - else - break; - } - } -return(pow); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slangs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slangs.c deleted file mode 100644 index d765f2d08f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slangs.c +++ /dev/null @@ -1,119 +0,0 @@ - -/*! @file slangs.c - * \brief Returns the value of the one norm - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Modified from lapack routine SLANGE 
    - * 
    - */ -/* - * File name: slangs.c - * History: Modified from lapack routine SLANGE - */ -#include -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - *
    - *   SLANGS returns the value of the one norm, or the Frobenius norm, or 
    - *   the infinity norm, or the element of largest absolute value of a 
    - *   real matrix A.   
    - *
    - *   Description   
    - *   ===========   
    - *
    - *   SLANGE returns the value   
    - *
    - *      SLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'   
    - *               (   
    - *               ( norm1(A),         NORM = '1', 'O' or 'o'   
    - *               (   
    - *               ( normI(A),         NORM = 'I' or 'i'   
    - *               (   
    - *               ( normF(A),         NORM = 'F', 'f', 'E' or 'e'   
    - *
    - *   where  norm1  denotes the  one norm of a matrix (maximum column sum), 
    - *   normI  denotes the  infinity norm  of a matrix  (maximum row sum) and 
    - *   normF  denotes the  Frobenius norm of a matrix (square root of sum of 
    - *   squares).  Note that  max(abs(A(i,j)))  is not a  matrix norm.   
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   NORM    (input) CHARACTER*1   
    - *           Specifies the value to be returned in SLANGE as described above.   
    - *   A       (input) SuperMatrix*
    - *           The M by N sparse matrix A. 
    - *
    - *  =====================================================================
    - * 
    - */ - -float slangs(char *norm, SuperMatrix *A) -{ - - /* Local variables */ - NCformat *Astore; - float *Aval; - int i, j, irow; - float value, sum; - float *rwork; - - Astore = A->Store; - Aval = Astore->nzval; - - if ( SUPERLU_MIN(A->nrow, A->ncol) == 0) { - value = 0.; - - } else if (lsame_(norm, "M")) { - /* Find max(abs(A(i,j))). */ - value = 0.; - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) - value = SUPERLU_MAX( value, fabs( Aval[i]) ); - - } else if (lsame_(norm, "O") || *(unsigned char *)norm == '1') { - /* Find norm1(A). */ - value = 0.; - for (j = 0; j < A->ncol; ++j) { - sum = 0.; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) - sum += fabs(Aval[i]); - value = SUPERLU_MAX(value,sum); - } - - } else if (lsame_(norm, "I")) { - /* Find normI(A). */ - if ( !(rwork = (float *) SUPERLU_MALLOC(A->nrow * sizeof(float))) ) - ABORT("SUPERLU_MALLOC fails for rwork."); - for (i = 0; i < A->nrow; ++i) rwork[i] = 0.; - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) { - irow = Astore->rowind[i]; - rwork[irow] += fabs(Aval[i]); - } - value = 0.; - for (i = 0; i < A->nrow; ++i) - value = SUPERLU_MAX(value, rwork[i]); - - SUPERLU_FREE (rwork); - - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - /* Find normF(A). */ - ABORT("Not implemented."); - } else - ABORT("Illegal norm specified."); - - return (value); - -} /* slangs */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slaqgs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slaqgs.c deleted file mode 100644 index d619f07a93..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slaqgs.c +++ /dev/null @@ -1,146 +0,0 @@ - -/*! @file slaqgs.c - * \brief Equlibrates a general sprase matrix - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - * Modified from LAPACK routine SLAQGE
    - * 
    - */ -/* - * File name: slaqgs.c - * History: Modified from LAPACK routine SLAQGE - */ -#include -#include "slu_sdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   SLAQGS equilibrates a general sparse M by N matrix A using the row and   
    - *   scaling factors in the vectors R and C.   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   A       (input/output) SuperMatrix*
    - *           On exit, the equilibrated matrix.  See EQUED for the form of 
    - *           the equilibrated matrix. The type of A can be:
    - *	    Stype = NC; Dtype = SLU_S; Mtype = GE.
    - *	    
    - *   R       (input) float*, dimension (A->nrow)
    - *           The row scale factors for A.
    - *	    
    - *   C       (input) float*, dimension (A->ncol)
    - *           The column scale factors for A.
    - *	    
    - *   ROWCND  (input) float
    - *           Ratio of the smallest R(i) to the largest R(i).
    - *	    
    - *   COLCND  (input) float
    - *           Ratio of the smallest C(i) to the largest C(i).
    - *	    
    - *   AMAX    (input) float
    - *           Absolute value of largest matrix entry.
    - *	    
    - *   EQUED   (output) char*
    - *           Specifies the form of equilibration that was done.   
    - *           = 'N':  No equilibration   
    - *           = 'R':  Row equilibration, i.e., A has been premultiplied by  
    - *                   diag(R).   
    - *           = 'C':  Column equilibration, i.e., A has been postmultiplied  
    - *                   by diag(C).   
    - *           = 'B':  Both row and column equilibration, i.e., A has been
    - *                   replaced by diag(R) * A * diag(C).   
    - *
    - *   Internal Parameters   
    - *   ===================   
    - *
    - *   THRESH is a threshold value used to decide if row or column scaling   
    - *   should be done based on the ratio of the row or column scaling   
    - *   factors.  If ROWCND < THRESH, row scaling is done, and if   
    - *   COLCND < THRESH, column scaling is done.   
    - *
    - *   LARGE and SMALL are threshold values used to decide if row scaling   
    - *   should be done based on the absolute size of the largest matrix   
    - *   element.  If AMAX > LARGE or AMAX < SMALL, row scaling is done.   
    - *
    - *   ===================================================================== 
    - * 
    - */ - -void -slaqgs(SuperMatrix *A, float *r, float *c, - float rowcnd, float colcnd, float amax, char *equed) -{ - - -#define THRESH (0.1) - - /* Local variables */ - NCformat *Astore; - float *Aval; - int i, j, irow; - float large, small, cj; - extern double slamch_(char *); - - - /* Quick return if possible */ - if (A->nrow <= 0 || A->ncol <= 0) { - *(unsigned char *)equed = 'N'; - return; - } - - Astore = A->Store; - Aval = Astore->nzval; - - /* Initialize LARGE and SMALL. */ - small = slamch_("Safe minimum") / slamch_("Precision"); - large = 1. / small; - - if (rowcnd >= THRESH && amax >= small && amax <= large) { - if (colcnd >= THRESH) - *(unsigned char *)equed = 'N'; - else { - /* Column scaling */ - for (j = 0; j < A->ncol; ++j) { - cj = c[j]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - Aval[i] *= cj; - } - } - *(unsigned char *)equed = 'C'; - } - } else if (colcnd >= THRESH) { - /* Row scaling, no column scaling */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - Aval[i] *= r[irow]; - } - *(unsigned char *)equed = 'R'; - } else { - /* Row and column scaling */ - for (j = 0; j < A->ncol; ++j) { - cj = c[j]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - Aval[i] *= cj * r[irow]; - } - } - *(unsigned char *)equed = 'B'; - } - - return; - -} /* slaqgs */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sldperm.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sldperm.c deleted file mode 100644 index e4c7059d9b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sldperm.c +++ /dev/null @@ -1,168 +0,0 @@ - -/*! @file - * \brief Finds a row permutation so that the matrix has large entries on the diagonal - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ - -#include "slu_sdefs.h" - -extern void mc64id_(int_t*); -extern void mc64ad_(int_t*, int_t*, int_t*, int_t [], int_t [], double [], - int_t*, int_t [], int_t*, int_t[], int_t*, double [], - int_t [], int_t []); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   SLDPERM finds a row permutation so that the matrix has large
    - *   entries on the diagonal.
    - *
    - * Arguments
    - * =========
    - *
    - * job    (input) int
    - *        Control the action. Possible values for JOB are:
    - *        = 1 : Compute a row permutation of the matrix so that the
    - *              permuted matrix has as many entries on its diagonal as
    - *              possible. The values on the diagonal are of arbitrary size.
    - *              HSL subroutine MC21A/AD is used for this.
    - *        = 2 : Compute a row permutation of the matrix so that the smallest 
    - *              value on the diagonal of the permuted matrix is maximized.
    - *        = 3 : Compute a row permutation of the matrix so that the smallest
    - *              value on the diagonal of the permuted matrix is maximized.
    - *              The algorithm differs from the one used for JOB = 2 and may
    - *              have quite a different performance.
    - *        = 4 : Compute a row permutation of the matrix so that the sum
    - *              of the diagonal entries of the permuted matrix is maximized.
    - *        = 5 : Compute a row permutation of the matrix so that the product
    - *              of the diagonal entries of the permuted matrix is maximized
    - *              and vectors to scale the matrix so that the nonzero diagonal 
    - *              entries of the permuted matrix are one in absolute value and 
    - *              all the off-diagonal entries are less than or equal to one in 
    - *              absolute value.
    - *        Restriction: 1 <= JOB <= 5.
    - *
    - * n      (input) int
    - *        The order of the matrix.
    - *
    - * nnz    (input) int
    - *        The number of nonzeros in the matrix.
    - *
    - * adjncy (input) int*, of size nnz
    - *        The adjacency structure of the matrix, which contains the row
    - *        indices of the nonzeros.
    - *
    - * colptr (input) int*, of size n+1
    - *        The pointers to the beginning of each column in ADJNCY.
    - *
    - * nzval  (input) float*, of size nnz
    - *        The nonzero values of the matrix. nzval[k] is the value of
    - *        the entry corresponding to adjncy[k].
    - *        It is not used if job = 1.
    - *
    - * perm   (output) int*, of size n
    - *        The permutation vector. perm[i] = j means row i in the
    - *        original matrix is in row j of the permuted matrix.
    - *
    - * u      (output) double*, of size n
    - *        If job = 5, the natural logarithms of the row scaling factors. 
    - *
    - * v      (output) double*, of size n
    - *        If job = 5, the natural logarithms of the column scaling factors. 
    - *        The scaled matrix B has entries b_ij = a_ij * exp(u_i + v_j).
    - * 
    - */ - -int -sldperm(int_t job, int_t n, int_t nnz, int_t colptr[], int_t adjncy[], - float nzval[], int_t *perm, float u[], float v[]) -{ - int_t i, liw, ldw, num; - int_t *iw, icntl[10], info[10]; - double *dw; - double *nzval_d = (double *) SUPERLU_MALLOC(nnz * sizeof(double)); - -#if ( DEBUGlevel>=1 ) - CHECK_MALLOC(0, "Enter sldperm()"); -#endif - liw = 5*n; - if ( job == 3 ) liw = 10*n + nnz; - if ( !(iw = intMalloc(liw)) ) ABORT("Malloc fails for iw[]"); - ldw = 3*n + nnz; - if ( !(dw = (double*) SUPERLU_MALLOC(ldw * sizeof(double))) ) - ABORT("Malloc fails for dw[]"); - - /* Increment one to get 1-based indexing. */ - for (i = 0; i <= n; ++i) ++colptr[i]; - for (i = 0; i < nnz; ++i) ++adjncy[i]; -#if ( DEBUGlevel>=2 ) - printf("LDPERM(): n %d, nnz %d\n", n, nnz); - slu_PrintInt10("colptr", n+1, colptr); - slu_PrintInt10("adjncy", nnz, adjncy); -#endif - - /* - * NOTE: - * ===== - * - * MC64AD assumes that column permutation vector is defined as: - * perm(i) = j means column i of permuted A is in column j of original A. - * - * Since a symmetric permutation preserves the diagonal entries. Then - * by the following relation: - * P'(A*P')P = P'A - * we can apply inverse(perm) to rows of A to get large diagonal entries. - * But, since 'perm' defined in MC64AD happens to be the reverse of - * SuperLU's definition of permutation vector, therefore, it is already - * an inverse for our purpose. We will thus use it directly. - * - */ - mc64id_(icntl); -#if 0 - /* Suppress error and warning messages. */ - icntl[0] = -1; - icntl[1] = -1; -#endif - - for (i = 0; i < nnz; ++i) nzval_d[i] = nzval[i]; - mc64ad_(&job, &n, &nnz, colptr, adjncy, nzval_d, &num, perm, - &liw, iw, &ldw, dw, icntl, info); - -#if ( DEBUGlevel>=2 ) - slu_PrintInt10("perm", n, perm); - printf(".. After MC64AD info %d\tsize of matching %d\n", info[0], num); -#endif - if ( info[0] == 1 ) { /* Structurally singular */ - printf(".. The last %d permutations:\n", n-num); - slu_PrintInt10("perm", n-num, &perm[num]); - } - - /* Restore to 0-based indexing. */ - for (i = 0; i <= n; ++i) --colptr[i]; - for (i = 0; i < nnz; ++i) --adjncy[i]; - for (i = 0; i < n; ++i) --perm[i]; - - if ( job == 5 ) - for (i = 0; i < n; ++i) { - u[i] = dw[i]; - v[i] = dw[n+i]; - } - - SUPERLU_FREE(iw); - SUPERLU_FREE(dw); - SUPERLU_FREE(nzval_d); - -#if ( DEBUGlevel>=1 ) - CHECK_MALLOC(0, "Exit sldperm()"); -#endif - - return info[0]; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_Cnames.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_Cnames.h deleted file mode 100644 index 528b9e5f51..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_Cnames.h +++ /dev/null @@ -1,362 +0,0 @@ -/*! @file slu_Cnames.h - * \brief Macros defining how C routines will be called - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 1, 1997
    - *
    - * These macros define how C routines will be called.  ADD_ assumes that
    - * they will be called by fortran, which expects C routines to have an
    - * underscore postfixed to the name (Suns, and the Intel expect this).
    - * NOCHANGE indicates that fortran will be calling, and that it expects
    - * the name called by fortran to be identical to that compiled by the C
    - * (RS6K's do this).  UPCASE says it expects C routines called by fortran
    - * to be in all upcase (CRAY wants this). 
    - * 
    - */ -#ifndef __SUPERLU_CNAMES /* allow multiple inclusions */ -#define __SUPERLU_CNAMES - -#include "scipy_slu_config.h" - -#define ADD_ 0 -#define ADD__ 1 -#define NOCHANGE 2 -#define UPCASE 3 -#define C_CALL 4 - -#ifdef UpCase -#define F77_CALL_C UPCASE -#endif - -#ifdef NoChange -#define F77_CALL_C NOCHANGE -#endif - -#ifdef Add_ -#define F77_CALL_C ADD_ -#endif - -#ifdef Add__ -#define F77_CALL_C ADD__ -#endif - -/* Default */ -#ifndef F77_CALL_C -#define F77_CALL_C ADD_ -#endif - - -#if (F77_CALL_C == ADD_) -/* - * These defines set up the naming scheme required to have a fortran 77 - * routine call a C routine - * No redefinition necessary to have following Fortran to C interface: - * FORTRAN CALL C DECLARATION - * call dgemm(...) void dgemm_(...) - * - * This is the default. - */ - -#endif - -#if (F77_CALL_C == ADD__) -/* - * These defines set up the naming scheme required to have a fortran 77 - * routine call a C routine - * for following Fortran to C interface: - * FORTRAN CALL C DECLARATION - * call dgemm(...) void dgemm__(...) - */ -/* BLAS */ -#define sswap_ sswap__ -#define saxpy_ saxpy__ -#define sasum_ sasum__ -#define isamax_ isamax__ -#define scopy_ scopy__ -#define sscal_ sscal__ -#define sger_ sger__ -#define snrm2_ snrm2__ -#define ssymv_ ssymv__ -#define sdot_ sdot__ -#define saxpy_ saxpy__ -#define ssyr2_ ssyr2__ -#define srot_ srot__ -#define sgemv_ sgemv__ -#define strsv_ strsv__ -#define sgemm_ sgemm__ -#define strsm_ strsm__ - -#define dswap_ dswap__ -#define daxpy_ daxpy__ -#define dasum_ dasum__ -#define idamax_ idamax__ -#define dcopy_ dcopy__ -#define dscal_ dscal__ -#define dger_ dger__ -#define dnrm2_ dnrm2__ -#define dsymv_ dsymv__ -#define ddot_ ddot__ -#define daxpy_ daxpy__ -#define dsyr2_ dsyr2__ -#define drot_ drot__ -#define dgemv_ dgemv__ -#define dtrsv_ dtrsv__ -#define dgemm_ dgemm__ -#define dtrsm_ dtrsm__ - -#define cswap_ cswap__ -#define caxpy_ caxpy__ -#define scasum_ scasum__ -#define icamax_ icamax__ -#define ccopy_ ccopy__ -#define cscal_ cscal__ -#define scnrm2_ scnrm2__ -#define caxpy_ caxpy__ -#define cgemv_ cgemv__ -#define ctrsv_ ctrsv__ -#define cgemm_ cgemm__ -#define ctrsm_ ctrsm__ -#define cgerc_ cgerc__ -#define chemv_ chemv__ -#define cher2_ cher2__ - -#define zswap_ zswap__ -#define zaxpy_ zaxpy__ -#define dzasum_ dzasum__ -#define izamax_ izamax__ -#define zcopy_ zcopy__ -#define zscal_ zscal__ -#define dznrm2_ dznrm2__ -#define zaxpy_ zaxpy__ -#define zgemv_ zgemv__ -#define ztrsv_ ztrsv__ -#define zgemm_ zgemm__ -#define ztrsm_ ztrsm__ -#define zgerc_ zgerc__ -#define zhemv_ zhemv__ -#define zher2_ zher2__ - -/* LAPACK */ -#define dlamch_ dlamch__ -#define slamch_ slamch__ -#define xerbla_ xerbla__ -#define lsame_ lsame__ -#define dlacon_ dlacon__ -#define slacon_ slacon__ -#define icmax1_ icmax1__ -#define scsum1_ scsum1__ -#define clacon_ clacon__ -#define dzsum1_ dzsum1__ -#define izmax1_ izmax1__ -#define zlacon_ zlacon__ - -/* Fortran interface */ -#define c_bridge_dgssv_ c_bridge_dgssv__ -#define c_fortran_sgssv_ c_fortran_sgssv__ -#define c_fortran_dgssv_ c_fortran_dgssv__ -#define c_fortran_cgssv_ c_fortran_cgssv__ -#define c_fortran_zgssv_ c_fortran_zgssv__ -#endif - -#if (F77_CALL_C == UPCASE) -/* - * These defines set up the naming scheme required to have a fortran 77 - * routine call a C routine - * following Fortran to C interface: - * FORTRAN CALL C DECLARATION - * call dgemm(...) void DGEMM(...) - */ -/* BLAS */ -#define sswap_ SSWAP -#define saxpy_ SAXPY -#define sasum_ SASUM -#define isamax_ ISAMAX -#define scopy_ SCOPY -#define sscal_ SSCAL -#define sger_ SGER -#define snrm2_ SNRM2 -#define ssymv_ SSYMV -#define sdot_ SDOT -#define saxpy_ SAXPY -#define ssyr2_ SSYR2 -#define srot_ SROT -#define sgemv_ SGEMV -#define strsv_ STRSV -#define sgemm_ SGEMM -#define strsm_ STRSM - -#define dswap_ DSWAP -#define daxpy_ DAXPY -#define dasum_ SASUM -#define idamax_ ISAMAX -#define dcopy_ SCOPY -#define dscal_ SSCAL -#define dger_ SGER -#define dnrm2_ SNRM2 -#define dsymv_ SSYMV -#define ddot_ SDOT -#define daxpy_ SAXPY -#define dsyr2_ SSYR2 -#define drot_ SROT -#define dgemv_ SGEMV -#define dtrsv_ STRSV -#define dgemm_ SGEMM -#define dtrsm_ STRSM - -#define cswap_ CSWAP -#define caxpy_ CAXPY -#define scasum_ SCASUM -#define icamax_ ICAMAX -#define ccopy_ CCOPY -#define cscal_ CSCAL -#define scnrm2_ SCNRM2 -#define caxpy_ CAXPY -#define cgemv_ CGEMV -#define ctrsv_ CTRSV -#define cgemm_ CGEMM -#define ctrsm_ CTRSM -#define cgerc_ CGERC -#define chemv_ CHEMV -#define cher2_ CHER2 - -#define zswap_ ZSWAP -#define zaxpy_ ZAXPY -#define dzasum_ DZASUM -#define izamax_ IZAMAX -#define zcopy_ ZCOPY -#define zscal_ ZSCAL -#define dznrm2_ DZNRM2 -#define zaxpy_ ZAXPY -#define zgemv_ ZGEMV -#define ztrsv_ ZTRSV -#define zgemm_ ZGEMM -#define ztrsm_ ZTRSM -#define zgerc_ ZGERC -#define zhemv_ ZHEMV -#define zher2_ ZHER2 - -/* LAPACK */ -#define dlamch_ DLAMCH -#define slamch_ SLAMCH -#define xerbla_ XERBLA -#define lsame_ LSAME -#define dlacon_ DLACON -#define slacon_ SLACON -#define icmax1_ ICMAX1 -#define scsum1_ SCSUM1 -#define clacon_ CLACON -#define dzsum1_ DZSUM1 -#define izmax1_ IZMAX1 -#define zlacon_ ZLACON - -/* Fortran interface */ -#define c_bridge_dgssv_ C_BRIDGE_DGSSV -#define c_fortran_sgssv_ C_FORTRAN_SGSSV -#define c_fortran_dgssv_ C_FORTRAN_DGSSV -#define c_fortran_cgssv_ C_FORTRAN_CGSSV -#define c_fortran_zgssv_ C_FORTRAN_ZGSSV -#endif - -#if (F77_CALL_C == NOCHANGE) -/* - * These defines set up the naming scheme required to have a fortran 77 - * routine call a C routine - * for following Fortran to C interface: - * FORTRAN CALL C DECLARATION - * call dgemm(...) void dgemm(...) - */ -/* BLAS */ -#define sswap_ sswap -#define saxpy_ saxpy -#define sasum_ sasum -#define isamax_ isamax -#define scopy_ scopy -#define sscal_ sscal -#define sger_ sger -#define snrm2_ snrm2 -#define ssymv_ ssymv -#define sdot_ sdot -#define saxpy_ saxpy -#define ssyr2_ ssyr2 -#define srot_ srot -#define sgemv_ sgemv -#define strsv_ strsv -#define sgemm_ sgemm -#define strsm_ strsm - -#define dswap_ dswap -#define daxpy_ daxpy -#define dasum_ dasum -#define idamax_ idamax -#define dcopy_ dcopy -#define dscal_ dscal -#define dger_ dger -#define dnrm2_ dnrm2 -#define dsymv_ dsymv -#define ddot_ ddot -#define daxpy_ daxpy -#define dsyr2_ dsyr2 -#define drot_ drot -#define dgemv_ dgemv -#define dtrsv_ dtrsv -#define dgemm_ dgemm -#define dtrsm_ dtrsm - -#define cswap_ cswap -#define caxpy_ caxpy -#define scasum_ scasum -#define icamax_ icamax -#define ccopy_ ccopy -#define cscal_ cscal -#define scnrm2_ scnrm2 -#define caxpy_ caxpy -#define cgemv_ cgemv -#define ctrsv_ ctrsv -#define cgemm_ cgemm -#define ctrsm_ ctrsm -#define cgerc_ cgerc -#define chemv_ chemv -#define cher2_ cher2 - -#define zswap_ zswap -#define zaxpy_ zaxpy -#define dzasum_ dzasum -#define izamax_ izamax -#define zcopy_ zcopy -#define zscal_ zscal -#define dznrm2_ dznrm2 -#define zaxpy_ zaxpy -#define zgemv_ zgemv -#define ztrsv_ ztrsv -#define zgemm_ zgemm -#define ztrsm_ ztrsm -#define zgerc_ zgerc -#define zhemv_ zhemv -#define zher2_ zher2 - -/* LAPACK */ -#define dlamch_ dlamch -#define slamch_ slamch -#define xerbla_ xerbla -#define lsame_ lsame -#define dlacon_ dlacon -#define slacon_ slacon -#define icmax1_ icmax1 -#define scsum1_ scsum1 -#define clacon_ clacon -#define dzsum1_ dzsum1 -#define izmax1_ izmax1 -#define zlacon_ zlacon - -/* Fortran interface */ -#define c_bridge_dgssv_ c_bridge_dgssv -#define c_fortran_sgssv_ c_fortran_sgssv -#define c_fortran_dgssv_ c_fortran_dgssv -#define c_fortran_cgssv_ c_fortran_cgssv -#define c_fortran_zgssv_ c_fortran_zgssv -#endif - -#endif /* __SUPERLU_CNAMES */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_cdefs.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_cdefs.h deleted file mode 100644 index c96f10f619..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_cdefs.h +++ /dev/null @@ -1,282 +0,0 @@ - -/*! @file slu_cdefs.h - * \brief Header file for real operations - * - *
     
    - * -- SuperLU routine (version 4.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * June 30, 2009
    - * 
    - * Global data structures used in LU factorization -
    - * 
    - *   nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].
    - *   (xsup,supno): supno[i] is the supernode no to which i belongs;
    - *	xsup(s) points to the beginning of the s-th supernode.
    - *	e.g.   supno 0 1 2 2 3 3 3 4 4 4 4 4   (n=12)
    - *	        xsup 0 1 2 4 7 12
    - *	Note: dfs will be performed on supernode rep. relative to the new 
    - *	      row pivoting ordering
    - *
    - *   (xlsub,lsub): lsub[*] contains the compressed subscript of
    - *	rectangular supernodes; xlsub[j] points to the starting
    - *	location of the j-th column in lsub[*]. Note that xlsub 
    - *	is indexed by column.
    - *	Storage: original row subscripts
    - *
    - *      During the course of sparse LU factorization, we also use
    - *	(xlsub,lsub) for the purpose of symmetric pruning. For each
    - *	supernode {s,s+1,...,t=s+r} with first column s and last
    - *	column t, the subscript set
    - *		lsub[j], j=xlsub[s], .., xlsub[s+1]-1
    - *	is the structure of column s (i.e. structure of this supernode).
    - *	It is used for the storage of numerical values.
    - *	Furthermore,
    - *		lsub[j], j=xlsub[t], .., xlsub[t+1]-1
    - *	is the structure of the last column t of this supernode.
    - *	It is for the purpose of symmetric pruning. Therefore, the
    - *	structural subscripts can be rearranged without making physical
    - *	interchanges among the numerical values.
    - *
    - *	However, if the supernode has only one column, then we
    - *	only keep one set of subscripts. For any subscript interchange
    - *	performed, similar interchange must be done on the numerical
    - *	values.
    - *
    - *	The last column structures (for pruning) will be removed
    - *	after the numercial LU factorization phase.
    - *
    - *   (xlusup,lusup): lusup[*] contains the numerical values of the
    - *	rectangular supernodes; xlusup[j] points to the starting
    - *	location of the j-th column in storage vector lusup[*]
    - *	Note: xlusup is indexed by column.
    - *	Each rectangular supernode is stored by column-major
    - *	scheme, consistent with Fortran 2-dim array storage.
    - *
    - *   (xusub,ucol,usub): ucol[*] stores the numerical values of
    - *	U-columns outside the rectangular supernodes. The row
    - *	subscript of nonzero ucol[k] is stored in usub[k].
    - *	xusub[i] points to the starting location of column i in ucol.
    - *	Storage: new row subscripts; that is subscripts of PA.
    - * 
    - */ -#ifndef __SUPERLU_cSP_DEFS /* allow multiple inclusions */ -#define __SUPERLU_cSP_DEFS - -/* - * File name: csp_defs.h - * Purpose: Sparse matrix types and function prototypes - * History: - */ - -#ifdef _CRAY -#include -#include -#endif - -/* Define my integer type int_t */ -typedef int int_t; /* default */ - -#include -#include -#include "slu_Cnames.h" -#include "supermatrix.h" -#include "slu_util.h" -#include "slu_scomplex.h" - - - -typedef struct { - int *xsup; /* supernode and column mapping */ - int *supno; - int *lsub; /* compressed L subscripts */ - int *xlsub; - complex *lusup; /* L supernodes */ - int *xlusup; - complex *ucol; /* U columns */ - int *usub; - int *xusub; - int nzlmax; /* current max size of lsub */ - int nzumax; /* " " " ucol */ - int nzlumax; /* " " " lusup */ - int n; /* number of columns in the matrix */ - LU_space_t MemModel; /* 0 - system malloc'd; 1 - user provided */ - int num_expansions; - ExpHeader *expanders; /* Array of pointers to 4 types of memory */ - LU_stack_t stack; /* use user supplied memory */ -} GlobalLU_t; - - -/* -------- Prototypes -------- */ - -#ifdef __cplusplus -extern "C" { -#endif - -/*! \brief Driver routines */ -extern void -cgssv(superlu_options_t *, SuperMatrix *, int *, int *, SuperMatrix *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *); -extern void -cgssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, - char *, float *, float *, SuperMatrix *, SuperMatrix *, - void *, int, SuperMatrix *, SuperMatrix *, - float *, float *, float *, float *, - mem_usage_t *, SuperLUStat_t *, int *); - /* ILU */ -extern void -cgsisv(superlu_options_t *, SuperMatrix *, int *, int *, SuperMatrix *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *); -extern void -cgsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, - char *, float *, float *, SuperMatrix *, SuperMatrix *, - void *, int, SuperMatrix *, SuperMatrix *, float *, float *, - mem_usage_t *, SuperLUStat_t *, int *); - - -/*! \brief Supernodal LU factor related */ -extern void -cCreate_CompCol_Matrix(SuperMatrix *, int, int, int, complex *, - int *, int *, Stype_t, Dtype_t, Mtype_t); -extern void -cCreate_CompRow_Matrix(SuperMatrix *, int, int, int, complex *, - int *, int *, Stype_t, Dtype_t, Mtype_t); -extern void -cCopy_CompCol_Matrix(SuperMatrix *, SuperMatrix *); -extern void -cCreate_Dense_Matrix(SuperMatrix *, int, int, complex *, int, - Stype_t, Dtype_t, Mtype_t); -extern void -cCreate_SuperNode_Matrix(SuperMatrix *, int, int, int, complex *, - int *, int *, int *, int *, int *, - Stype_t, Dtype_t, Mtype_t); -extern void -cCopy_Dense_Matrix(int, int, complex *, int, complex *, int); - -extern void countnz (const int, int *, int *, int *, GlobalLU_t *); -extern void ilu_countnz (const int, int *, int *, GlobalLU_t *); -extern void fixupL (const int, const int *, GlobalLU_t *); - -extern void callocateA (int, int, complex **, int **, int **); -extern void cgstrf (superlu_options_t*, SuperMatrix*, - int, int, int*, void *, int, int *, int *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t*, int *); -extern int csnode_dfs (const int, const int, const int *, const int *, - const int *, int *, int *, GlobalLU_t *); -extern int csnode_bmod (const int, const int, const int, complex *, - complex *, GlobalLU_t *, SuperLUStat_t*); -extern void cpanel_dfs (const int, const int, const int, SuperMatrix *, - int *, int *, complex *, int *, int *, int *, - int *, int *, int *, int *, GlobalLU_t *); -extern void cpanel_bmod (const int, const int, const int, const int, - complex *, complex *, int *, int *, - GlobalLU_t *, SuperLUStat_t*); -extern int ccolumn_dfs (const int, const int, int *, int *, int *, int *, - int *, int *, int *, int *, int *, GlobalLU_t *); -extern int ccolumn_bmod (const int, const int, complex *, - complex *, int *, int *, int, - GlobalLU_t *, SuperLUStat_t*); -extern int ccopy_to_ucol (int, int, int *, int *, int *, - complex *, GlobalLU_t *); -extern int cpivotL (const int, const double, int *, int *, - int *, int *, int *, GlobalLU_t *, SuperLUStat_t*); -extern void cpruneL (const int, const int *, const int, const int, - const int *, const int *, int *, GlobalLU_t *); -extern void creadmt (int *, int *, int *, complex **, int **, int **); -extern void cGenXtrue (int, int, complex *, int); -extern void cFillRHS (trans_t, int, complex *, int, SuperMatrix *, - SuperMatrix *); -extern void cgstrs (trans_t, SuperMatrix *, SuperMatrix *, int *, int *, - SuperMatrix *, SuperLUStat_t*, int *); -/* ILU */ -extern void cgsitrf (superlu_options_t*, SuperMatrix*, int, int, int*, - void *, int, int *, int *, SuperMatrix *, SuperMatrix *, - SuperLUStat_t*, int *); -extern int cldperm(int, int, int, int [], int [], complex [], - int [], float [], float []); -extern int ilu_csnode_dfs (const int, const int, const int *, const int *, - const int *, int *, GlobalLU_t *); -extern void ilu_cpanel_dfs (const int, const int, const int, SuperMatrix *, - int *, int *, complex *, float *, int *, int *, - int *, int *, int *, int *, GlobalLU_t *); -extern int ilu_ccolumn_dfs (const int, const int, int *, int *, int *, - int *, int *, int *, int *, int *, - GlobalLU_t *); -extern int ilu_ccopy_to_ucol (int, int, int *, int *, int *, - complex *, int, milu_t, double, int, - complex *, int *, GlobalLU_t *, int *); -extern int ilu_cpivotL (const int, const double, int *, int *, int, int *, - int *, int *, int *, double, milu_t, - complex, GlobalLU_t *, SuperLUStat_t*); -extern int ilu_cdrop_row (superlu_options_t *, int, int, double, - int, int *, double *, GlobalLU_t *, - float *, int *, int); - - -/*! \brief Driver related */ - -extern void cgsequ (SuperMatrix *, float *, float *, float *, - float *, float *, int *); -extern void claqgs (SuperMatrix *, float *, float *, float, - float, float, char *); -extern void cgscon (char *, SuperMatrix *, SuperMatrix *, - float, float *, SuperLUStat_t*, int *); -extern float cPivotGrowth(int, SuperMatrix *, int *, - SuperMatrix *, SuperMatrix *); -extern void cgsrfs (trans_t, SuperMatrix *, SuperMatrix *, - SuperMatrix *, int *, int *, char *, float *, - float *, SuperMatrix *, SuperMatrix *, - float *, float *, SuperLUStat_t*, int *); - -extern int sp_ctrsv (char *, char *, char *, SuperMatrix *, - SuperMatrix *, complex *, SuperLUStat_t*, int *); -extern int sp_cgemv (char *, complex, SuperMatrix *, complex *, - int, complex, complex *, int); - -extern int sp_cgemm (char *, char *, int, int, int, complex, - SuperMatrix *, complex *, int, complex, - complex *, int); -extern double slamch_(char *); - - -/*! \brief Memory-related */ -extern int cLUMemInit (fact_t, void *, int, int, int, int, int, - float, SuperMatrix *, SuperMatrix *, - GlobalLU_t *, int **, complex **); -extern void cSetRWork (int, int, complex *, complex **, complex **); -extern void cLUWorkFree (int *, complex *, GlobalLU_t *); -extern int cLUMemXpand (int, int, MemType, int *, GlobalLU_t *); - -extern complex *complexMalloc(int); -extern complex *complexCalloc(int); -extern float *floatMalloc(int); -extern float *floatCalloc(int); -extern int cmemory_usage(const int, const int, const int, const int); -extern int cQuerySpace (SuperMatrix *, SuperMatrix *, mem_usage_t *); -extern int ilu_cQuerySpace (SuperMatrix *, SuperMatrix *, mem_usage_t *); - -/*! \brief Auxiliary routines */ -extern void creadhb(int *, int *, int *, complex **, int **, int **); -extern void creadrb(int *, int *, int *, complex **, int **, int **); -extern void creadtriple(int *, int *, int *, complex **, int **, int **); -extern void cCompRow_to_CompCol(int, int, int, complex*, int*, int*, - complex **, int **, int **); -extern void cfill (complex *, int, complex); -extern void cinf_norm_error (int, SuperMatrix *, complex *); -extern void PrintPerf (SuperMatrix *, SuperMatrix *, mem_usage_t *, - complex, complex, complex *, complex *, char *); - -/*! \brief Routines for debugging */ -extern void cPrint_CompCol_Matrix(char *, SuperMatrix *); -extern void cPrint_SuperNode_Matrix(char *, SuperMatrix *); -extern void cPrint_Dense_Matrix(char *, SuperMatrix *); -extern void cprint_lu_col(char *, int, int, int *, GlobalLU_t *); -extern int print_double_vec(char *, int, double *); -extern void check_tempv(int, complex *); - -#ifdef __cplusplus - } -#endif - -#endif /* __SUPERLU_cSP_DEFS */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_dcomplex.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_dcomplex.h deleted file mode 100644 index 386ad68938..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_dcomplex.h +++ /dev/null @@ -1,78 +0,0 @@ - -/*! @file slu_dcomplex.h - * \brief Header file for complex operations - *
     
    - *  -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Contains definitions for various complex operations.
    - * This header file is to be included in source files z*.c
    - * 
    - */ -#ifndef __SUPERLU_DCOMPLEX /* allow multiple inclusions */ -#define __SUPERLU_DCOMPLEX - - -#ifndef DCOMPLEX_INCLUDE -#define DCOMPLEX_INCLUDE - -typedef struct { double r, i; } doublecomplex; - - -/* Macro definitions */ - -/*! \brief Complex Addition c = a + b */ -#define z_add(c, a, b) { (c)->r = (a)->r + (b)->r; \ - (c)->i = (a)->i + (b)->i; } - -/*! \brief Complex Subtraction c = a - b */ -#define z_sub(c, a, b) { (c)->r = (a)->r - (b)->r; \ - (c)->i = (a)->i - (b)->i; } - -/*! \brief Complex-Double Multiplication */ -#define zd_mult(c, a, b) { (c)->r = (a)->r * (b); \ - (c)->i = (a)->i * (b); } - -/*! \brief Complex-Complex Multiplication */ -#define zz_mult(c, a, b) { \ - double cr, ci; \ - cr = (a)->r * (b)->r - (a)->i * (b)->i; \ - ci = (a)->i * (b)->r + (a)->r * (b)->i; \ - (c)->r = cr; \ - (c)->i = ci; \ - } - -#define zz_conj(a, b) { \ - (a)->r = (b)->r; \ - (a)->i = -((b)->i); \ - } - -/*! \brief Complex equality testing */ -#define z_eq(a, b) ( (a)->r == (b)->r && (a)->i == (b)->i ) - - -#ifdef __cplusplus -extern "C" { -#endif - -/* Prototypes for functions in dcomplex.c */ -void z_div(doublecomplex *, doublecomplex *, doublecomplex *); -double z_abs(doublecomplex *); /* exact */ -double z_abs1(doublecomplex *); /* approximate */ -void z_exp(doublecomplex *, doublecomplex *); -void d_cnjg(doublecomplex *r, doublecomplex *z); -double d_imag(doublecomplex *); -doublecomplex z_sgn(doublecomplex *); -doublecomplex z_sqrt(doublecomplex *); - - - -#ifdef __cplusplus - } -#endif - -#endif - -#endif /* __SUPERLU_DCOMPLEX */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_ddefs.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_ddefs.h deleted file mode 100644 index c427f44ce9..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_ddefs.h +++ /dev/null @@ -1,279 +0,0 @@ - -/*! @file slu_ddefs.h - * \brief Header file for real operations - * - *
     
    - * -- SuperLU routine (version 4.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * June 30, 2009
    - * 
    - * Global data structures used in LU factorization -
    - * 
    - *   nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].
    - *   (xsup,supno): supno[i] is the supernode no to which i belongs;
    - *	xsup(s) points to the beginning of the s-th supernode.
    - *	e.g.   supno 0 1 2 2 3 3 3 4 4 4 4 4   (n=12)
    - *	        xsup 0 1 2 4 7 12
    - *	Note: dfs will be performed on supernode rep. relative to the new 
    - *	      row pivoting ordering
    - *
    - *   (xlsub,lsub): lsub[*] contains the compressed subscript of
    - *	rectangular supernodes; xlsub[j] points to the starting
    - *	location of the j-th column in lsub[*]. Note that xlsub 
    - *	is indexed by column.
    - *	Storage: original row subscripts
    - *
    - *      During the course of sparse LU factorization, we also use
    - *	(xlsub,lsub) for the purpose of symmetric pruning. For each
    - *	supernode {s,s+1,...,t=s+r} with first column s and last
    - *	column t, the subscript set
    - *		lsub[j], j=xlsub[s], .., xlsub[s+1]-1
    - *	is the structure of column s (i.e. structure of this supernode).
    - *	It is used for the storage of numerical values.
    - *	Furthermore,
    - *		lsub[j], j=xlsub[t], .., xlsub[t+1]-1
    - *	is the structure of the last column t of this supernode.
    - *	It is for the purpose of symmetric pruning. Therefore, the
    - *	structural subscripts can be rearranged without making physical
    - *	interchanges among the numerical values.
    - *
    - *	However, if the supernode has only one column, then we
    - *	only keep one set of subscripts. For any subscript interchange
    - *	performed, similar interchange must be done on the numerical
    - *	values.
    - *
    - *	The last column structures (for pruning) will be removed
    - *	after the numercial LU factorization phase.
    - *
    - *   (xlusup,lusup): lusup[*] contains the numerical values of the
    - *	rectangular supernodes; xlusup[j] points to the starting
    - *	location of the j-th column in storage vector lusup[*]
    - *	Note: xlusup is indexed by column.
    - *	Each rectangular supernode is stored by column-major
    - *	scheme, consistent with Fortran 2-dim array storage.
    - *
    - *   (xusub,ucol,usub): ucol[*] stores the numerical values of
    - *	U-columns outside the rectangular supernodes. The row
    - *	subscript of nonzero ucol[k] is stored in usub[k].
    - *	xusub[i] points to the starting location of column i in ucol.
    - *	Storage: new row subscripts; that is subscripts of PA.
    - * 
    - */ -#ifndef __SUPERLU_dSP_DEFS /* allow multiple inclusions */ -#define __SUPERLU_dSP_DEFS - -/* - * File name: dsp_defs.h - * Purpose: Sparse matrix types and function prototypes - * History: - */ - -#ifdef _CRAY -#include -#include -#endif - -/* Define my integer type int_t */ -typedef int int_t; /* default */ - -#include -#include -#include "slu_Cnames.h" -#include "supermatrix.h" -#include "slu_util.h" - - - -typedef struct { - int *xsup; /* supernode and column mapping */ - int *supno; - int *lsub; /* compressed L subscripts */ - int *xlsub; - double *lusup; /* L supernodes */ - int *xlusup; - double *ucol; /* U columns */ - int *usub; - int *xusub; - int nzlmax; /* current max size of lsub */ - int nzumax; /* " " " ucol */ - int nzlumax; /* " " " lusup */ - int n; /* number of columns in the matrix */ - LU_space_t MemModel; /* 0 - system malloc'd; 1 - user provided */ - int num_expansions; - ExpHeader *expanders; /* Array of pointers to 4 types of memory */ - LU_stack_t stack; /* use user supplied memory */ -} GlobalLU_t; - - -/* -------- Prototypes -------- */ - -#ifdef __cplusplus -extern "C" { -#endif - -/*! \brief Driver routines */ -extern void -dgssv(superlu_options_t *, SuperMatrix *, int *, int *, SuperMatrix *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *); -extern void -dgssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, - char *, double *, double *, SuperMatrix *, SuperMatrix *, - void *, int, SuperMatrix *, SuperMatrix *, - double *, double *, double *, double *, - mem_usage_t *, SuperLUStat_t *, int *); - /* ILU */ -extern void -dgsisv(superlu_options_t *, SuperMatrix *, int *, int *, SuperMatrix *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *); -extern void -dgsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, - char *, double *, double *, SuperMatrix *, SuperMatrix *, - void *, int, SuperMatrix *, SuperMatrix *, double *, double *, - mem_usage_t *, SuperLUStat_t *, int *); - - -/*! \brief Supernodal LU factor related */ -extern void -dCreate_CompCol_Matrix(SuperMatrix *, int, int, int, double *, - int *, int *, Stype_t, Dtype_t, Mtype_t); -extern void -dCreate_CompRow_Matrix(SuperMatrix *, int, int, int, double *, - int *, int *, Stype_t, Dtype_t, Mtype_t); -extern void -dCopy_CompCol_Matrix(SuperMatrix *, SuperMatrix *); -extern void -dCreate_Dense_Matrix(SuperMatrix *, int, int, double *, int, - Stype_t, Dtype_t, Mtype_t); -extern void -dCreate_SuperNode_Matrix(SuperMatrix *, int, int, int, double *, - int *, int *, int *, int *, int *, - Stype_t, Dtype_t, Mtype_t); -extern void -dCopy_Dense_Matrix(int, int, double *, int, double *, int); - -extern void countnz (const int, int *, int *, int *, GlobalLU_t *); -extern void ilu_countnz (const int, int *, int *, GlobalLU_t *); -extern void fixupL (const int, const int *, GlobalLU_t *); - -extern void dallocateA (int, int, double **, int **, int **); -extern void dgstrf (superlu_options_t*, SuperMatrix*, - int, int, int*, void *, int, int *, int *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t*, int *); -extern int dsnode_dfs (const int, const int, const int *, const int *, - const int *, int *, int *, GlobalLU_t *); -extern int dsnode_bmod (const int, const int, const int, double *, - double *, GlobalLU_t *, SuperLUStat_t*); -extern void dpanel_dfs (const int, const int, const int, SuperMatrix *, - int *, int *, double *, int *, int *, int *, - int *, int *, int *, int *, GlobalLU_t *); -extern void dpanel_bmod (const int, const int, const int, const int, - double *, double *, int *, int *, - GlobalLU_t *, SuperLUStat_t*); -extern int dcolumn_dfs (const int, const int, int *, int *, int *, int *, - int *, int *, int *, int *, int *, GlobalLU_t *); -extern int dcolumn_bmod (const int, const int, double *, - double *, int *, int *, int, - GlobalLU_t *, SuperLUStat_t*); -extern int dcopy_to_ucol (int, int, int *, int *, int *, - double *, GlobalLU_t *); -extern int dpivotL (const int, const double, int *, int *, - int *, int *, int *, GlobalLU_t *, SuperLUStat_t*); -extern void dpruneL (const int, const int *, const int, const int, - const int *, const int *, int *, GlobalLU_t *); -extern void dreadmt (int *, int *, int *, double **, int **, int **); -extern void dGenXtrue (int, int, double *, int); -extern void dFillRHS (trans_t, int, double *, int, SuperMatrix *, - SuperMatrix *); -extern void dgstrs (trans_t, SuperMatrix *, SuperMatrix *, int *, int *, - SuperMatrix *, SuperLUStat_t*, int *); -/* ILU */ -extern void dgsitrf (superlu_options_t*, SuperMatrix*, int, int, int*, - void *, int, int *, int *, SuperMatrix *, SuperMatrix *, - SuperLUStat_t*, int *); -extern int dldperm(int, int, int, int [], int [], double [], - int [], double [], double []); -extern int ilu_dsnode_dfs (const int, const int, const int *, const int *, - const int *, int *, GlobalLU_t *); -extern void ilu_dpanel_dfs (const int, const int, const int, SuperMatrix *, - int *, int *, double *, double *, int *, int *, - int *, int *, int *, int *, GlobalLU_t *); -extern int ilu_dcolumn_dfs (const int, const int, int *, int *, int *, - int *, int *, int *, int *, int *, - GlobalLU_t *); -extern int ilu_dcopy_to_ucol (int, int, int *, int *, int *, - double *, int, milu_t, double, int, - double *, int *, GlobalLU_t *, int *); -extern int ilu_dpivotL (const int, const double, int *, int *, int, int *, - int *, int *, int *, double, milu_t, - double, GlobalLU_t *, SuperLUStat_t*); -extern int ilu_ddrop_row (superlu_options_t *, int, int, double, - int, int *, double *, GlobalLU_t *, - double *, int *, int); - - -/*! \brief Driver related */ - -extern void dgsequ (SuperMatrix *, double *, double *, double *, - double *, double *, int *); -extern void dlaqgs (SuperMatrix *, double *, double *, double, - double, double, char *); -extern void dgscon (char *, SuperMatrix *, SuperMatrix *, - double, double *, SuperLUStat_t*, int *); -extern double dPivotGrowth(int, SuperMatrix *, int *, - SuperMatrix *, SuperMatrix *); -extern void dgsrfs (trans_t, SuperMatrix *, SuperMatrix *, - SuperMatrix *, int *, int *, char *, double *, - double *, SuperMatrix *, SuperMatrix *, - double *, double *, SuperLUStat_t*, int *); - -extern int sp_dtrsv (char *, char *, char *, SuperMatrix *, - SuperMatrix *, double *, SuperLUStat_t*, int *); -extern int sp_dgemv (char *, double, SuperMatrix *, double *, - int, double, double *, int); - -extern int sp_dgemm (char *, char *, int, int, int, double, - SuperMatrix *, double *, int, double, - double *, int); -extern double dlamch_(char *); - - -/*! \brief Memory-related */ -extern int dLUMemInit (fact_t, void *, int, int, int, int, int, - double, SuperMatrix *, SuperMatrix *, - GlobalLU_t *, int **, double **); -extern void dSetRWork (int, int, double *, double **, double **); -extern void dLUWorkFree (int *, double *, GlobalLU_t *); -extern int dLUMemXpand (int, int, MemType, int *, GlobalLU_t *); - -extern double *doubleMalloc(int); -extern double *doubleCalloc(int); -extern int dmemory_usage(const int, const int, const int, const int); -extern int dQuerySpace (SuperMatrix *, SuperMatrix *, mem_usage_t *); -extern int ilu_dQuerySpace (SuperMatrix *, SuperMatrix *, mem_usage_t *); - -/*! \brief Auxiliary routines */ -extern void dreadhb(int *, int *, int *, double **, int **, int **); -extern void dreadrb(int *, int *, int *, double **, int **, int **); -extern void dreadtriple(int *, int *, int *, double **, int **, int **); -extern void dCompRow_to_CompCol(int, int, int, double*, int*, int*, - double **, int **, int **); -extern void dfill (double *, int, double); -extern void dinf_norm_error (int, SuperMatrix *, double *); -extern void PrintPerf (SuperMatrix *, SuperMatrix *, mem_usage_t *, - double, double, double *, double *, char *); - -/*! \brief Routines for debugging */ -extern void dPrint_CompCol_Matrix(char *, SuperMatrix *); -extern void dPrint_SuperNode_Matrix(char *, SuperMatrix *); -extern void dPrint_Dense_Matrix(char *, SuperMatrix *); -extern void dprint_lu_col(char *, int, int, int *, GlobalLU_t *); -extern int print_double_vec(char *, int, double *); -extern void check_tempv(int, double *); - -#ifdef __cplusplus - } -#endif - -#endif /* __SUPERLU_dSP_DEFS */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_scomplex.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_scomplex.h deleted file mode 100644 index 5fa2186f74..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_scomplex.h +++ /dev/null @@ -1,78 +0,0 @@ - -/*! @file slu_scomplex.h - * \brief Header file for complex operations - *
     
    - *  -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Contains definitions for various complex operations.
    - * This header file is to be included in source files c*.c
    - * 
    - */ -#ifndef __SUPERLU_SCOMPLEX /* allow multiple inclusions */ -#define __SUPERLU_SCOMPLEX - - -#ifndef SCOMPLEX_INCLUDE -#define SCOMPLEX_INCLUDE - -typedef struct { float r, i; } complex; - - -/* Macro definitions */ - -/*! \brief Complex Addition c = a + b */ -#define c_add(c, a, b) { (c)->r = (a)->r + (b)->r; \ - (c)->i = (a)->i + (b)->i; } - -/*! \brief Complex Subtraction c = a - b */ -#define c_sub(c, a, b) { (c)->r = (a)->r - (b)->r; \ - (c)->i = (a)->i - (b)->i; } - -/*! \brief Complex-Double Multiplication */ -#define cs_mult(c, a, b) { (c)->r = (a)->r * (b); \ - (c)->i = (a)->i * (b); } - -/*! \brief Complex-Complex Multiplication */ -#define cc_mult(c, a, b) { \ - float cr, ci; \ - cr = (a)->r * (b)->r - (a)->i * (b)->i; \ - ci = (a)->i * (b)->r + (a)->r * (b)->i; \ - (c)->r = cr; \ - (c)->i = ci; \ - } - -#define cc_conj(a, b) { \ - (a)->r = (b)->r; \ - (a)->i = -((b)->i); \ - } - -/*! \brief Complex equality testing */ -#define c_eq(a, b) ( (a)->r == (b)->r && (a)->i == (b)->i ) - - -#ifdef __cplusplus -extern "C" { -#endif - -/* Prototypes for functions in scomplex.c */ -void c_div(complex *, complex *, complex *); -double slu_c_abs(complex *); /* exact */ -double slu_c_abs1(complex *); /* approximate */ -void c_exp(complex *, complex *); -void r_cnjg(complex *, complex *); -double r_imag(complex *); -complex c_sgn(complex *); -complex c_sqrt(complex *); - - - -#ifdef __cplusplus - } -#endif - -#endif - -#endif /* __SUPERLU_SCOMPLEX */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_sdefs.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_sdefs.h deleted file mode 100644 index 1dadad8b49..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_sdefs.h +++ /dev/null @@ -1,279 +0,0 @@ - -/*! @file slu_sdefs.h - * \brief Header file for real operations - * - *
     
    - * -- SuperLU routine (version 4.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * June 30, 2009
    - * 
    - * Global data structures used in LU factorization -
    - * 
    - *   nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].
    - *   (xsup,supno): supno[i] is the supernode no to which i belongs;
    - *	xsup(s) points to the beginning of the s-th supernode.
    - *	e.g.   supno 0 1 2 2 3 3 3 4 4 4 4 4   (n=12)
    - *	        xsup 0 1 2 4 7 12
    - *	Note: dfs will be performed on supernode rep. relative to the new 
    - *	      row pivoting ordering
    - *
    - *   (xlsub,lsub): lsub[*] contains the compressed subscript of
    - *	rectangular supernodes; xlsub[j] points to the starting
    - *	location of the j-th column in lsub[*]. Note that xlsub 
    - *	is indexed by column.
    - *	Storage: original row subscripts
    - *
    - *      During the course of sparse LU factorization, we also use
    - *	(xlsub,lsub) for the purpose of symmetric pruning. For each
    - *	supernode {s,s+1,...,t=s+r} with first column s and last
    - *	column t, the subscript set
    - *		lsub[j], j=xlsub[s], .., xlsub[s+1]-1
    - *	is the structure of column s (i.e. structure of this supernode).
    - *	It is used for the storage of numerical values.
    - *	Furthermore,
    - *		lsub[j], j=xlsub[t], .., xlsub[t+1]-1
    - *	is the structure of the last column t of this supernode.
    - *	It is for the purpose of symmetric pruning. Therefore, the
    - *	structural subscripts can be rearranged without making physical
    - *	interchanges among the numerical values.
    - *
    - *	However, if the supernode has only one column, then we
    - *	only keep one set of subscripts. For any subscript interchange
    - *	performed, similar interchange must be done on the numerical
    - *	values.
    - *
    - *	The last column structures (for pruning) will be removed
    - *	after the numercial LU factorization phase.
    - *
    - *   (xlusup,lusup): lusup[*] contains the numerical values of the
    - *	rectangular supernodes; xlusup[j] points to the starting
    - *	location of the j-th column in storage vector lusup[*]
    - *	Note: xlusup is indexed by column.
    - *	Each rectangular supernode is stored by column-major
    - *	scheme, consistent with Fortran 2-dim array storage.
    - *
    - *   (xusub,ucol,usub): ucol[*] stores the numerical values of
    - *	U-columns outside the rectangular supernodes. The row
    - *	subscript of nonzero ucol[k] is stored in usub[k].
    - *	xusub[i] points to the starting location of column i in ucol.
    - *	Storage: new row subscripts; that is subscripts of PA.
    - * 
    - */ -#ifndef __SUPERLU_sSP_DEFS /* allow multiple inclusions */ -#define __SUPERLU_sSP_DEFS - -/* - * File name: ssp_defs.h - * Purpose: Sparse matrix types and function prototypes - * History: - */ - -#ifdef _CRAY -#include -#include -#endif - -/* Define my integer type int_t */ -typedef int int_t; /* default */ - -#include -#include -#include "slu_Cnames.h" -#include "supermatrix.h" -#include "slu_util.h" - - - -typedef struct { - int *xsup; /* supernode and column mapping */ - int *supno; - int *lsub; /* compressed L subscripts */ - int *xlsub; - float *lusup; /* L supernodes */ - int *xlusup; - float *ucol; /* U columns */ - int *usub; - int *xusub; - int nzlmax; /* current max size of lsub */ - int nzumax; /* " " " ucol */ - int nzlumax; /* " " " lusup */ - int n; /* number of columns in the matrix */ - LU_space_t MemModel; /* 0 - system malloc'd; 1 - user provided */ - int num_expansions; - ExpHeader *expanders; /* Array of pointers to 4 types of memory */ - LU_stack_t stack; /* use user supplied memory */ -} GlobalLU_t; - - -/* -------- Prototypes -------- */ - -#ifdef __cplusplus -extern "C" { -#endif - -/*! \brief Driver routines */ -extern void -sgssv(superlu_options_t *, SuperMatrix *, int *, int *, SuperMatrix *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *); -extern void -sgssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, - char *, float *, float *, SuperMatrix *, SuperMatrix *, - void *, int, SuperMatrix *, SuperMatrix *, - float *, float *, float *, float *, - mem_usage_t *, SuperLUStat_t *, int *); - /* ILU */ -extern void -sgsisv(superlu_options_t *, SuperMatrix *, int *, int *, SuperMatrix *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *); -extern void -sgsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, - char *, float *, float *, SuperMatrix *, SuperMatrix *, - void *, int, SuperMatrix *, SuperMatrix *, float *, float *, - mem_usage_t *, SuperLUStat_t *, int *); - - -/*! \brief Supernodal LU factor related */ -extern void -sCreate_CompCol_Matrix(SuperMatrix *, int, int, int, float *, - int *, int *, Stype_t, Dtype_t, Mtype_t); -extern void -sCreate_CompRow_Matrix(SuperMatrix *, int, int, int, float *, - int *, int *, Stype_t, Dtype_t, Mtype_t); -extern void -sCopy_CompCol_Matrix(SuperMatrix *, SuperMatrix *); -extern void -sCreate_Dense_Matrix(SuperMatrix *, int, int, float *, int, - Stype_t, Dtype_t, Mtype_t); -extern void -sCreate_SuperNode_Matrix(SuperMatrix *, int, int, int, float *, - int *, int *, int *, int *, int *, - Stype_t, Dtype_t, Mtype_t); -extern void -sCopy_Dense_Matrix(int, int, float *, int, float *, int); - -extern void countnz (const int, int *, int *, int *, GlobalLU_t *); -extern void ilu_countnz (const int, int *, int *, GlobalLU_t *); -extern void fixupL (const int, const int *, GlobalLU_t *); - -extern void sallocateA (int, int, float **, int **, int **); -extern void sgstrf (superlu_options_t*, SuperMatrix*, - int, int, int*, void *, int, int *, int *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t*, int *); -extern int ssnode_dfs (const int, const int, const int *, const int *, - const int *, int *, int *, GlobalLU_t *); -extern int ssnode_bmod (const int, const int, const int, float *, - float *, GlobalLU_t *, SuperLUStat_t*); -extern void spanel_dfs (const int, const int, const int, SuperMatrix *, - int *, int *, float *, int *, int *, int *, - int *, int *, int *, int *, GlobalLU_t *); -extern void spanel_bmod (const int, const int, const int, const int, - float *, float *, int *, int *, - GlobalLU_t *, SuperLUStat_t*); -extern int scolumn_dfs (const int, const int, int *, int *, int *, int *, - int *, int *, int *, int *, int *, GlobalLU_t *); -extern int scolumn_bmod (const int, const int, float *, - float *, int *, int *, int, - GlobalLU_t *, SuperLUStat_t*); -extern int scopy_to_ucol (int, int, int *, int *, int *, - float *, GlobalLU_t *); -extern int spivotL (const int, const double, int *, int *, - int *, int *, int *, GlobalLU_t *, SuperLUStat_t*); -extern void spruneL (const int, const int *, const int, const int, - const int *, const int *, int *, GlobalLU_t *); -extern void sreadmt (int *, int *, int *, float **, int **, int **); -extern void sGenXtrue (int, int, float *, int); -extern void sFillRHS (trans_t, int, float *, int, SuperMatrix *, - SuperMatrix *); -extern void sgstrs (trans_t, SuperMatrix *, SuperMatrix *, int *, int *, - SuperMatrix *, SuperLUStat_t*, int *); -/* ILU */ -extern void sgsitrf (superlu_options_t*, SuperMatrix*, int, int, int*, - void *, int, int *, int *, SuperMatrix *, SuperMatrix *, - SuperLUStat_t*, int *); -extern int sldperm(int, int, int, int [], int [], float [], - int [], float [], float []); -extern int ilu_ssnode_dfs (const int, const int, const int *, const int *, - const int *, int *, GlobalLU_t *); -extern void ilu_spanel_dfs (const int, const int, const int, SuperMatrix *, - int *, int *, float *, float *, int *, int *, - int *, int *, int *, int *, GlobalLU_t *); -extern int ilu_scolumn_dfs (const int, const int, int *, int *, int *, - int *, int *, int *, int *, int *, - GlobalLU_t *); -extern int ilu_scopy_to_ucol (int, int, int *, int *, int *, - float *, int, milu_t, double, int, - float *, int *, GlobalLU_t *, int *); -extern int ilu_spivotL (const int, const double, int *, int *, int, int *, - int *, int *, int *, double, milu_t, - float, GlobalLU_t *, SuperLUStat_t*); -extern int ilu_sdrop_row (superlu_options_t *, int, int, double, - int, int *, double *, GlobalLU_t *, - float *, int *, int); - - -/*! \brief Driver related */ - -extern void sgsequ (SuperMatrix *, float *, float *, float *, - float *, float *, int *); -extern void slaqgs (SuperMatrix *, float *, float *, float, - float, float, char *); -extern void sgscon (char *, SuperMatrix *, SuperMatrix *, - float, float *, SuperLUStat_t*, int *); -extern float sPivotGrowth(int, SuperMatrix *, int *, - SuperMatrix *, SuperMatrix *); -extern void sgsrfs (trans_t, SuperMatrix *, SuperMatrix *, - SuperMatrix *, int *, int *, char *, float *, - float *, SuperMatrix *, SuperMatrix *, - float *, float *, SuperLUStat_t*, int *); - -extern int sp_strsv (char *, char *, char *, SuperMatrix *, - SuperMatrix *, float *, SuperLUStat_t*, int *); -extern int sp_sgemv (char *, float, SuperMatrix *, float *, - int, float, float *, int); - -extern int sp_sgemm (char *, char *, int, int, int, float, - SuperMatrix *, float *, int, float, - float *, int); -extern double slamch_(char *); - - -/*! \brief Memory-related */ -extern int sLUMemInit (fact_t, void *, int, int, int, int, int, - float, SuperMatrix *, SuperMatrix *, - GlobalLU_t *, int **, float **); -extern void sSetRWork (int, int, float *, float **, float **); -extern void sLUWorkFree (int *, float *, GlobalLU_t *); -extern int sLUMemXpand (int, int, MemType, int *, GlobalLU_t *); - -extern float *floatMalloc(int); -extern float *floatCalloc(int); -extern int smemory_usage(const int, const int, const int, const int); -extern int sQuerySpace (SuperMatrix *, SuperMatrix *, mem_usage_t *); -extern int ilu_sQuerySpace (SuperMatrix *, SuperMatrix *, mem_usage_t *); - -/*! \brief Auxiliary routines */ -extern void sreadhb(int *, int *, int *, float **, int **, int **); -extern void sreadrb(int *, int *, int *, float **, int **, int **); -extern void sreadtriple(int *, int *, int *, float **, int **, int **); -extern void sCompRow_to_CompCol(int, int, int, float*, int*, int*, - float **, int **, int **); -extern void sfill (float *, int, float); -extern void sinf_norm_error (int, SuperMatrix *, float *); -extern void PrintPerf (SuperMatrix *, SuperMatrix *, mem_usage_t *, - float, float, float *, float *, char *); - -/*! \brief Routines for debugging */ -extern void sPrint_CompCol_Matrix(char *, SuperMatrix *); -extern void sPrint_SuperNode_Matrix(char *, SuperMatrix *); -extern void sPrint_Dense_Matrix(char *, SuperMatrix *); -extern void sprint_lu_col(char *, int, int, int *, GlobalLU_t *); -extern int print_double_vec(char *, int, double *); -extern void check_tempv(int, float *); - -#ifdef __cplusplus - } -#endif - -#endif /* __SUPERLU_sSP_DEFS */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_util.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_util.h deleted file mode 100644 index f95788a9d8..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_util.h +++ /dev/null @@ -1,369 +0,0 @@ -/** @file slu_util.h - * \brief Utility header file - * - * -- SuperLU routine (version 3.1) -- - * Univ. of California Berkeley, Xerox Palo Alto Research Center, - * and Lawrence Berkeley National Lab. - * August 1, 2008 - * - */ - -#ifndef __SUPERLU_UTIL /* allow multiple inclusions */ -#define __SUPERLU_UTIL - -#include -#include -#include -/* -#ifndef __STDC__ -#include -#endif -*/ -#include - -#include "scipy_slu_config.h" - -/*********************************************************************** - * Macros - ***********************************************************************/ -#define FIRSTCOL_OF_SNODE(i) (xsup[i]) -/* No of marker arrays used in the symbolic factorization, - each of size n */ -#define NO_MARKER 3 -#define NUM_TEMPV(m,w,t,b) ( SUPERLU_MAX(m, (t + b)*w) ) - -#ifndef USER_ABORT -#define USER_ABORT(msg) superlu_abort_and_exit(msg) -#endif - -#define ABORT(err_msg) \ - { char msg[256];\ - sprintf(msg,"%s at line %d in file %s\n",err_msg,__LINE__, __FILE__);\ - USER_ABORT(msg); } - - -#ifndef USER_MALLOC -#if 1 -#define USER_MALLOC(size) superlu_malloc(size) -#else -/* The following may check out some uninitialized data */ -#define USER_MALLOC(size) memset (superlu_malloc(size), '\x0F', size) -#endif -#endif - -#define SUPERLU_MALLOC(size) USER_MALLOC(size) - -#ifndef USER_FREE -#define USER_FREE(addr) superlu_free(addr) -#endif - -#define SUPERLU_FREE(addr) USER_FREE(addr) - -#define CHECK_MALLOC(where) { \ - extern int superlu_malloc_total; \ - printf("%s: malloc_total %d Bytes\n", \ - where, superlu_malloc_total); \ -} - -#define SUPERLU_MAX(x, y) ( (x) > (y) ? (x) : (y) ) -#define SUPERLU_MIN(x, y) ( (x) < (y) ? (x) : (y) ) - -/********************************************************* - * Macros used for easy access of sparse matrix entries. * - *********************************************************/ -#define L_SUB_START(col) ( Lstore->rowind_colptr[col] ) -#define L_SUB(ptr) ( Lstore->rowind[ptr] ) -#define L_NZ_START(col) ( Lstore->nzval_colptr[col] ) -#define L_FST_SUPC(superno) ( Lstore->sup_to_col[superno] ) -#define U_NZ_START(col) ( Ustore->colptr[col] ) -#define U_SUB(ptr) ( Ustore->rowind[ptr] ) - - -/*********************************************************************** - * Constants - ***********************************************************************/ -#define EMPTY (-1) -/*#define NO (-1)*/ -#define FALSE 0 -#define TRUE 1 - -#define NO_MEMTYPE 4 /* 0: lusup; - 1: ucol; - 2: lsub; - 3: usub */ - -#define GluIntArray(n) (5 * (n) + 5) - -/* Dropping rules */ -#define NODROP ( 0x0000 ) -#define DROP_BASIC ( 0x0001 ) /* ILU(tau) */ -#define DROP_PROWS ( 0x0002 ) /* ILUTP: keep p maximum rows */ -#define DROP_COLUMN ( 0x0004 ) /* ILUTP: for j-th column, - p = gamma * nnz(A(:,j)) */ -#define DROP_AREA ( 0x0008 ) /* ILUTP: for j-th column, use - nnz(F(:,1:j)) / nnz(A(:,1:j)) - to limit memory growth */ -#define DROP_SECONDARY ( 0x000E ) /* PROWS | COLUMN | AREA */ -#define DROP_DYNAMIC ( 0x0010 ) /* adaptive tau */ -#define DROP_INTERP ( 0x0100 ) /* use interpolation */ - - -#if 1 -#define MILU_ALPHA (1.0e-2) /* multiple of drop_sum to be added to diagonal */ -#else -#define MILU_ALPHA 1.0 /* multiple of drop_sum to be added to diagonal */ -#endif - - -/*********************************************************************** - * Enumerate types - ***********************************************************************/ -typedef enum {NO, YES} yes_no_t; -typedef enum {DOFACT, SamePattern, SamePattern_SameRowPerm, FACTORED} fact_t; -typedef enum {NOROWPERM, LargeDiag, MY_PERMR} rowperm_t; -typedef enum {NATURAL, MMD_ATA, MMD_AT_PLUS_A, COLAMD, MY_PERMC}colperm_t; -typedef enum {NOTRANS, TRANS, CONJ} trans_t; -typedef enum {NOEQUIL, ROW, COL, BOTH} DiagScale_t; -typedef enum {NOREFINE, SINGLE=1, DOUBLE, EXTRA} IterRefine_t; -typedef enum {LUSUP, UCOL, LSUB, USUB} MemType; -typedef enum {HEAD, TAIL} stack_end_t; -typedef enum {SYSTEM, USER} LU_space_t; -typedef enum {ONE_NORM, TWO_NORM, INF_NORM} norm_t; -typedef enum {SILU, SMILU_1, SMILU_2, SMILU_3} milu_t; -#if 0 -typedef enum {NODROP = 0x0000, - DROP_BASIC = 0x0001, /* ILU(tau) */ - DROP_PROWS = 0x0002, /* ILUTP: keep p maximum rows */ - DROP_COLUMN = 0x0004, /* ILUTP: for j-th column, - p = gamma * nnz(A(:,j)) */ - DROP_AREA = 0x0008, /* ILUTP: for j-th column, use - nnz(F(:,1:j)) / nnz(A(:,1:j)) - to limit memory growth */ - DROP_SECONDARY = 0x000E, /* PROWS | COLUMN | AREA */ - DROP_DYNAMIC = 0x0010, - DROP_INTERP = 0x0100} rule_t; -#endif - - -/* - * The following enumerate type is used by the statistics variable - * to keep track of flop count and time spent at various stages. - * - * Note that not all of the fields are disjoint. - */ -typedef enum { - COLPERM, /* find a column ordering that minimizes fills */ - RELAX, /* find artificial supernodes */ - ETREE, /* compute column etree */ - EQUIL, /* equilibrate the original matrix */ - FACT, /* perform LU factorization */ - RCOND, /* estimate reciprocal condition number */ - SOLVE, /* forward and back solves */ - REFINE, /* perform iterative refinement */ - TRSV, /* fraction of FACT spent in xTRSV */ - GEMV, /* fraction of FACT spent in xGEMV */ - FERR, /* estimate error bounds after iterative refinement */ - NPHASES /* total number of phases */ -} PhaseType; - - -/*********************************************************************** - * Type definitions - ***********************************************************************/ -typedef float flops_t; -typedef unsigned char Logical; - -/* - *-- This contains the options used to control the solve process. - * - * Fact (fact_t) - * Specifies whether or not the factored form of the matrix - * A is supplied on entry, and if not, how the matrix A should - * be factorizaed. - * = DOFACT: The matrix A will be factorized from scratch, and the - * factors will be stored in L and U. - * = SamePattern: The matrix A will be factorized assuming - * that a factorization of a matrix with the same sparsity - * pattern was performed prior to this one. Therefore, this - * factorization will reuse column permutation vector - * ScalePermstruct->perm_c and the column elimination tree - * LUstruct->etree. - * = SamePattern_SameRowPerm: The matrix A will be factorized - * assuming that a factorization of a matrix with the same - * sparsity pattern and similar numerical values was performed - * prior to this one. Therefore, this factorization will reuse - * both row and column scaling factors R and C, both row and - * column permutation vectors perm_r and perm_c, and the - * data structure set up from the previous symbolic factorization. - * = FACTORED: On entry, L, U, perm_r and perm_c contain the - * factored form of A. If DiagScale is not NOEQUIL, the matrix - * A has been equilibrated with scaling factors R and C. - * - * Equil (yes_no_t) - * Specifies whether to equilibrate the system (scale A's row and - * columns to have unit norm). - * - * ColPerm (colperm_t) - * Specifies what type of column permutation to use to reduce fill. - * = NATURAL: use the natural ordering - * = MMD_ATA: use minimum degree ordering on structure of A'*A - * = MMD_AT_PLUS_A: use minimum degree ordering on structure of A'+A - * = COLAMD: use approximate minimum degree column ordering - * = MY_PERMC: use the ordering specified in ScalePermstruct->perm_c[] - * - * Trans (trans_t) - * Specifies the form of the system of equations: - * = NOTRANS: A * X = B (No transpose) - * = TRANS: A**T * X = B (Transpose) - * = CONJ: A**H * X = B (Transpose) - * - * IterRefine (IterRefine_t) - * Specifies whether to perform iterative refinement. - * = NO: no iterative refinement - * = WorkingPrec: perform iterative refinement in working precision - * = ExtraPrec: perform iterative refinement in extra precision - * - * DiagPivotThresh (double, in [0.0, 1.0]) (only for sequential SuperLU) - * Specifies the threshold used for a diagonal entry to be an - * acceptable pivot. - * - * PivotGrowth (yes_no_t) - * Specifies whether to compute the reciprocal pivot growth. - * - * ConditionNumber (ues_no_t) - * Specifies whether to compute the reciprocal condition number. - * - * RowPerm (rowperm_t) (only for SuperLU_DIST or ILU) - * Specifies whether to permute rows of the original matrix. - * = NO: not to permute the rows - * = LargeDiag: make the diagonal large relative to the off-diagonal - * = MY_PERMR: use the permutation given in ScalePermstruct->perm_r[] - * - * SymmetricMode (yest_no_t) - * Specifies whether to use symmetric mode. - * - * PrintStat (yes_no_t) - * Specifies whether to print the solver's statistics. - * - * ReplaceTinyPivot (yes_no_t) (only for SuperLU_DIST) - * Specifies whether to replace the tiny diagonals by - * sqrt(epsilon)*||A|| during LU factorization. - * - * SolveInitialized (yes_no_t) (only for SuperLU_DIST) - * Specifies whether the initialization has been performed to the - * triangular solve. - * - * RefineInitialized (yes_no_t) (only for SuperLU_DIST) - * Specifies whether the initialization has been performed to the - * sparse matrix-vector multiplication routine needed in iterative - * refinement. - */ -typedef struct { - fact_t Fact; - yes_no_t Equil; - colperm_t ColPerm; - trans_t Trans; - IterRefine_t IterRefine; - double DiagPivotThresh; - yes_no_t PivotGrowth; - yes_no_t ConditionNumber; - rowperm_t RowPerm; - yes_no_t SymmetricMode; - yes_no_t PrintStat; - yes_no_t ReplaceTinyPivot; - yes_no_t SolveInitialized; - yes_no_t RefineInitialized; - double ILU_DropTol; /* threshold for dropping */ - double ILU_FillTol; /* threshold for zero pivot perturbation */ - double ILU_FillFactor; /* gamma in the secondary dropping */ - int ILU_DropRule; - norm_t ILU_Norm; - milu_t ILU_MILU; -} superlu_options_t; - -/*! \brief Headers for 4 types of dynamatically managed memory */ -typedef struct e_node { - int size; /* length of the memory that has been used */ - void *mem; /* pointer to the new malloc'd store */ -} ExpHeader; - -typedef struct { - int size; - int used; - int top1; /* grow upward, relative to &array[0] */ - int top2; /* grow downward */ - void *array; -} LU_stack_t; - -typedef struct { - int *panel_histo; /* histogram of panel size distribution */ - double *utime; /* running time at various phases */ - flops_t *ops; /* operation count at various phases */ - int TinyPivots; /* number of tiny pivots */ - int RefineSteps; /* number of iterative refinement steps */ - int expansions; /* number of memory expansions */ -} SuperLUStat_t; - -typedef struct { - float for_lu; - float total_needed; -} mem_usage_t; - - -/*********************************************************************** - * Prototypes - ***********************************************************************/ -#ifdef __cplusplus -extern "C" { -#endif - -extern void Destroy_SuperMatrix_Store(SuperMatrix *); -extern void Destroy_CompCol_Matrix(SuperMatrix *); -extern void Destroy_CompRow_Matrix(SuperMatrix *); -extern void Destroy_SuperNode_Matrix(SuperMatrix *); -extern void Destroy_CompCol_Permuted(SuperMatrix *); -extern void Destroy_Dense_Matrix(SuperMatrix *); -extern void get_perm_c(int, SuperMatrix *, int *); -extern void set_default_options(superlu_options_t *options); -extern void ilu_set_default_options(superlu_options_t *options); -extern void sp_preorder (superlu_options_t *, SuperMatrix*, int*, int*, - SuperMatrix*); -extern void superlu_abort_and_exit(char*); -extern void *superlu_malloc (size_t); -extern int *intMalloc (int); -extern int *intCalloc (int); -extern void superlu_free (void*); -extern void SetIWork (int, int, int, int *, int **, int **, int **, - int **, int **, int **, int **); -extern int sp_coletree (int *, int *, int *, int, int, int *); -extern void relax_snode (const int, int *, const int, int *, int *); -extern void heap_relax_snode (const int, int *, const int, int *, int *); -extern int mark_relax(int, int *, int *, int *, int *, int *, int *); -extern void ilu_relax_snode (const int, int *, const int, int *, - int *, int *); -extern void ilu_heap_relax_snode (const int, int *, const int, int *, - int *, int*); -extern void resetrep_col (const int, const int *, int *); -extern int spcoletree (int *, int *, int *, int, int, int *); -extern int *TreePostorder (int, int *); -extern double SuperLU_timer_ (); -extern int sp_ienv (int); -extern int lsame_ (char *, char *); -extern int xerbla_ (char *, int *); -extern void ifill (int *, int, int); -extern void snode_profile (int, int *); -extern void super_stats (int, int *); -extern void check_repfnz(int, int, int, int *); -extern void PrintSumm (char *, int, int, int); -extern void StatInit(SuperLUStat_t *); -extern void StatPrint (SuperLUStat_t *); -extern void StatFree(SuperLUStat_t *); -extern void print_panel_seg(int, int, int, int, int *, int *); -extern int print_int_vec(char *,int, int *); -extern int slu_PrintInt10(char *, int, int *); - -#ifdef __cplusplus - } -#endif - -#endif /* __SUPERLU_UTIL */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_zdefs.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_zdefs.h deleted file mode 100644 index 78101adf21..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/slu_zdefs.h +++ /dev/null @@ -1,282 +0,0 @@ - -/*! @file slu_zdefs.h - * \brief Header file for real operations - * - *
     
    - * -- SuperLU routine (version 4.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * June 30, 2009
    - * 
    - * Global data structures used in LU factorization -
    - * 
    - *   nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].
    - *   (xsup,supno): supno[i] is the supernode no to which i belongs;
    - *	xsup(s) points to the beginning of the s-th supernode.
    - *	e.g.   supno 0 1 2 2 3 3 3 4 4 4 4 4   (n=12)
    - *	        xsup 0 1 2 4 7 12
    - *	Note: dfs will be performed on supernode rep. relative to the new 
    - *	      row pivoting ordering
    - *
    - *   (xlsub,lsub): lsub[*] contains the compressed subscript of
    - *	rectangular supernodes; xlsub[j] points to the starting
    - *	location of the j-th column in lsub[*]. Note that xlsub 
    - *	is indexed by column.
    - *	Storage: original row subscripts
    - *
    - *      During the course of sparse LU factorization, we also use
    - *	(xlsub,lsub) for the purpose of symmetric pruning. For each
    - *	supernode {s,s+1,...,t=s+r} with first column s and last
    - *	column t, the subscript set
    - *		lsub[j], j=xlsub[s], .., xlsub[s+1]-1
    - *	is the structure of column s (i.e. structure of this supernode).
    - *	It is used for the storage of numerical values.
    - *	Furthermore,
    - *		lsub[j], j=xlsub[t], .., xlsub[t+1]-1
    - *	is the structure of the last column t of this supernode.
    - *	It is for the purpose of symmetric pruning. Therefore, the
    - *	structural subscripts can be rearranged without making physical
    - *	interchanges among the numerical values.
    - *
    - *	However, if the supernode has only one column, then we
    - *	only keep one set of subscripts. For any subscript interchange
    - *	performed, similar interchange must be done on the numerical
    - *	values.
    - *
    - *	The last column structures (for pruning) will be removed
    - *	after the numercial LU factorization phase.
    - *
    - *   (xlusup,lusup): lusup[*] contains the numerical values of the
    - *	rectangular supernodes; xlusup[j] points to the starting
    - *	location of the j-th column in storage vector lusup[*]
    - *	Note: xlusup is indexed by column.
    - *	Each rectangular supernode is stored by column-major
    - *	scheme, consistent with Fortran 2-dim array storage.
    - *
    - *   (xusub,ucol,usub): ucol[*] stores the numerical values of
    - *	U-columns outside the rectangular supernodes. The row
    - *	subscript of nonzero ucol[k] is stored in usub[k].
    - *	xusub[i] points to the starting location of column i in ucol.
    - *	Storage: new row subscripts; that is subscripts of PA.
    - * 
    - */ -#ifndef __SUPERLU_zSP_DEFS /* allow multiple inclusions */ -#define __SUPERLU_zSP_DEFS - -/* - * File name: zsp_defs.h - * Purpose: Sparse matrix types and function prototypes - * History: - */ - -#ifdef _CRAY -#include -#include -#endif - -/* Define my integer type int_t */ -typedef int int_t; /* default */ - -#include -#include -#include "slu_Cnames.h" -#include "supermatrix.h" -#include "slu_util.h" -#include "slu_dcomplex.h" - - - -typedef struct { - int *xsup; /* supernode and column mapping */ - int *supno; - int *lsub; /* compressed L subscripts */ - int *xlsub; - doublecomplex *lusup; /* L supernodes */ - int *xlusup; - doublecomplex *ucol; /* U columns */ - int *usub; - int *xusub; - int nzlmax; /* current max size of lsub */ - int nzumax; /* " " " ucol */ - int nzlumax; /* " " " lusup */ - int n; /* number of columns in the matrix */ - LU_space_t MemModel; /* 0 - system malloc'd; 1 - user provided */ - int num_expansions; - ExpHeader *expanders; /* Array of pointers to 4 types of memory */ - LU_stack_t stack; /* use user supplied memory */ -} GlobalLU_t; - - -/* -------- Prototypes -------- */ - -#ifdef __cplusplus -extern "C" { -#endif - -/*! \brief Driver routines */ -extern void -zgssv(superlu_options_t *, SuperMatrix *, int *, int *, SuperMatrix *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *); -extern void -zgssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, - char *, double *, double *, SuperMatrix *, SuperMatrix *, - void *, int, SuperMatrix *, SuperMatrix *, - double *, double *, double *, double *, - mem_usage_t *, SuperLUStat_t *, int *); - /* ILU */ -extern void -zgsisv(superlu_options_t *, SuperMatrix *, int *, int *, SuperMatrix *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *); -extern void -zgsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, - char *, double *, double *, SuperMatrix *, SuperMatrix *, - void *, int, SuperMatrix *, SuperMatrix *, double *, double *, - mem_usage_t *, SuperLUStat_t *, int *); - - -/*! \brief Supernodal LU factor related */ -extern void -zCreate_CompCol_Matrix(SuperMatrix *, int, int, int, doublecomplex *, - int *, int *, Stype_t, Dtype_t, Mtype_t); -extern void -zCreate_CompRow_Matrix(SuperMatrix *, int, int, int, doublecomplex *, - int *, int *, Stype_t, Dtype_t, Mtype_t); -extern void -zCopy_CompCol_Matrix(SuperMatrix *, SuperMatrix *); -extern void -zCreate_Dense_Matrix(SuperMatrix *, int, int, doublecomplex *, int, - Stype_t, Dtype_t, Mtype_t); -extern void -zCreate_SuperNode_Matrix(SuperMatrix *, int, int, int, doublecomplex *, - int *, int *, int *, int *, int *, - Stype_t, Dtype_t, Mtype_t); -extern void -zCopy_Dense_Matrix(int, int, doublecomplex *, int, doublecomplex *, int); - -extern void countnz (const int, int *, int *, int *, GlobalLU_t *); -extern void ilu_countnz (const int, int *, int *, GlobalLU_t *); -extern void fixupL (const int, const int *, GlobalLU_t *); - -extern void zallocateA (int, int, doublecomplex **, int **, int **); -extern void zgstrf (superlu_options_t*, SuperMatrix*, - int, int, int*, void *, int, int *, int *, - SuperMatrix *, SuperMatrix *, SuperLUStat_t*, int *); -extern int zsnode_dfs (const int, const int, const int *, const int *, - const int *, int *, int *, GlobalLU_t *); -extern int zsnode_bmod (const int, const int, const int, doublecomplex *, - doublecomplex *, GlobalLU_t *, SuperLUStat_t*); -extern void zpanel_dfs (const int, const int, const int, SuperMatrix *, - int *, int *, doublecomplex *, int *, int *, int *, - int *, int *, int *, int *, GlobalLU_t *); -extern void zpanel_bmod (const int, const int, const int, const int, - doublecomplex *, doublecomplex *, int *, int *, - GlobalLU_t *, SuperLUStat_t*); -extern int zcolumn_dfs (const int, const int, int *, int *, int *, int *, - int *, int *, int *, int *, int *, GlobalLU_t *); -extern int zcolumn_bmod (const int, const int, doublecomplex *, - doublecomplex *, int *, int *, int, - GlobalLU_t *, SuperLUStat_t*); -extern int zcopy_to_ucol (int, int, int *, int *, int *, - doublecomplex *, GlobalLU_t *); -extern int zpivotL (const int, const double, int *, int *, - int *, int *, int *, GlobalLU_t *, SuperLUStat_t*); -extern void zpruneL (const int, const int *, const int, const int, - const int *, const int *, int *, GlobalLU_t *); -extern void zreadmt (int *, int *, int *, doublecomplex **, int **, int **); -extern void zGenXtrue (int, int, doublecomplex *, int); -extern void zFillRHS (trans_t, int, doublecomplex *, int, SuperMatrix *, - SuperMatrix *); -extern void zgstrs (trans_t, SuperMatrix *, SuperMatrix *, int *, int *, - SuperMatrix *, SuperLUStat_t*, int *); -/* ILU */ -extern void zgsitrf (superlu_options_t*, SuperMatrix*, int, int, int*, - void *, int, int *, int *, SuperMatrix *, SuperMatrix *, - SuperLUStat_t*, int *); -extern int zldperm(int, int, int, int [], int [], doublecomplex [], - int [], double [], double []); -extern int ilu_zsnode_dfs (const int, const int, const int *, const int *, - const int *, int *, GlobalLU_t *); -extern void ilu_zpanel_dfs (const int, const int, const int, SuperMatrix *, - int *, int *, doublecomplex *, double *, int *, int *, - int *, int *, int *, int *, GlobalLU_t *); -extern int ilu_zcolumn_dfs (const int, const int, int *, int *, int *, - int *, int *, int *, int *, int *, - GlobalLU_t *); -extern int ilu_zcopy_to_ucol (int, int, int *, int *, int *, - doublecomplex *, int, milu_t, double, int, - doublecomplex *, int *, GlobalLU_t *, int *); -extern int ilu_zpivotL (const int, const double, int *, int *, int, int *, - int *, int *, int *, double, milu_t, - doublecomplex, GlobalLU_t *, SuperLUStat_t*); -extern int ilu_zdrop_row (superlu_options_t *, int, int, double, - int, int *, double *, GlobalLU_t *, - double *, int *, int); - - -/*! \brief Driver related */ - -extern void zgsequ (SuperMatrix *, double *, double *, double *, - double *, double *, int *); -extern void zlaqgs (SuperMatrix *, double *, double *, double, - double, double, char *); -extern void zgscon (char *, SuperMatrix *, SuperMatrix *, - double, double *, SuperLUStat_t*, int *); -extern double zPivotGrowth(int, SuperMatrix *, int *, - SuperMatrix *, SuperMatrix *); -extern void zgsrfs (trans_t, SuperMatrix *, SuperMatrix *, - SuperMatrix *, int *, int *, char *, double *, - double *, SuperMatrix *, SuperMatrix *, - double *, double *, SuperLUStat_t*, int *); - -extern int sp_ztrsv (char *, char *, char *, SuperMatrix *, - SuperMatrix *, doublecomplex *, SuperLUStat_t*, int *); -extern int sp_zgemv (char *, doublecomplex, SuperMatrix *, doublecomplex *, - int, doublecomplex, doublecomplex *, int); - -extern int sp_zgemm (char *, char *, int, int, int, doublecomplex, - SuperMatrix *, doublecomplex *, int, doublecomplex, - doublecomplex *, int); -extern double dlamch_(char *); - - -/*! \brief Memory-related */ -extern int zLUMemInit (fact_t, void *, int, int, int, int, int, - double, SuperMatrix *, SuperMatrix *, - GlobalLU_t *, int **, doublecomplex **); -extern void zSetRWork (int, int, doublecomplex *, doublecomplex **, doublecomplex **); -extern void zLUWorkFree (int *, doublecomplex *, GlobalLU_t *); -extern int zLUMemXpand (int, int, MemType, int *, GlobalLU_t *); - -extern doublecomplex *doublecomplexMalloc(int); -extern doublecomplex *doublecomplexCalloc(int); -extern double *doubleMalloc(int); -extern double *doubleCalloc(int); -extern int zmemory_usage(const int, const int, const int, const int); -extern int zQuerySpace (SuperMatrix *, SuperMatrix *, mem_usage_t *); -extern int ilu_zQuerySpace (SuperMatrix *, SuperMatrix *, mem_usage_t *); - -/*! \brief Auxiliary routines */ -extern void zreadhb(int *, int *, int *, doublecomplex **, int **, int **); -extern void zreadrb(int *, int *, int *, doublecomplex **, int **, int **); -extern void zreadtriple(int *, int *, int *, doublecomplex **, int **, int **); -extern void zCompRow_to_CompCol(int, int, int, doublecomplex*, int*, int*, - doublecomplex **, int **, int **); -extern void zfill (doublecomplex *, int, doublecomplex); -extern void zinf_norm_error (int, SuperMatrix *, doublecomplex *); -extern void PrintPerf (SuperMatrix *, SuperMatrix *, mem_usage_t *, - doublecomplex, doublecomplex, doublecomplex *, doublecomplex *, char *); - -/*! \brief Routines for debugging */ -extern void zPrint_CompCol_Matrix(char *, SuperMatrix *); -extern void zPrint_SuperNode_Matrix(char *, SuperMatrix *); -extern void zPrint_Dense_Matrix(char *, SuperMatrix *); -extern void zprint_lu_col(char *, int, int, int *, GlobalLU_t *); -extern int print_double_vec(char *, int, double *); -extern void check_tempv(int, doublecomplex *); - -#ifdef __cplusplus - } -#endif - -#endif /* __SUPERLU_zSP_DEFS */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/smemory.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/smemory.c deleted file mode 100644 index 30d9b624c7..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/smemory.c +++ /dev/null @@ -1,701 +0,0 @@ - -/*! @file smemory.c - * \brief Memory details - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ -#include "slu_sdefs.h" - - -/* Internal prototypes */ -void *sexpand (int *, MemType,int, int, GlobalLU_t *); -int sLUWorkInit (int, int, int, int **, float **, GlobalLU_t *); -void copy_mem_float (int, void *, void *); -void sStackCompress (GlobalLU_t *); -void sSetupSpace (void *, int, GlobalLU_t *); -void *suser_malloc (int, int, GlobalLU_t *); -void suser_free (int, int, GlobalLU_t *); - -/* External prototypes (in memory.c - prec-independent) */ -extern void copy_mem_int (int, void *, void *); -extern void user_bcopy (char *, char *, int); - - -/* Macros to manipulate stack */ -#define StackFull(x) ( x + Glu->stack.used >= Glu->stack.size ) -#define NotDoubleAlign(addr) ( (long int)addr & 7 ) -#define DoubleAlign(addr) ( ((long int)addr + 7) & ~7L ) -#define TempSpace(m, w) ( (2*w + 4 + NO_MARKER) * m * sizeof(int) + \ - (w + 1) * m * sizeof(float) ) -#define Reduce(alpha) ((alpha + 1) / 2) /* i.e. (alpha-1)/2 + 1 */ - - - - -/*! \brief Setup the memory model to be used for factorization. - * - * lwork = 0: use system malloc; - * lwork > 0: use user-supplied work[] space. - */ -void sSetupSpace(void *work, int lwork, GlobalLU_t *Glu) -{ - if ( lwork == 0 ) { - Glu->MemModel = SYSTEM; /* malloc/free */ - } else if ( lwork > 0 ) { - Glu->MemModel = USER; /* user provided space */ - Glu->stack.used = 0; - Glu->stack.top1 = 0; - Glu->stack.top2 = (lwork/4)*4; /* must be word addressable */ - Glu->stack.size = Glu->stack.top2; - Glu->stack.array = (void *) work; - } -} - - - -void *suser_malloc(int bytes, int which_end, GlobalLU_t *Glu) -{ - void *buf; - - if ( StackFull(bytes) ) return (NULL); - - if ( which_end == HEAD ) { - buf = (char*) Glu->stack.array + Glu->stack.top1; - Glu->stack.top1 += bytes; - } else { - Glu->stack.top2 -= bytes; - buf = (char*) Glu->stack.array + Glu->stack.top2; - } - - Glu->stack.used += bytes; - return buf; -} - - -void suser_free(int bytes, int which_end, GlobalLU_t *Glu) -{ - if ( which_end == HEAD ) { - Glu->stack.top1 -= bytes; - } else { - Glu->stack.top2 += bytes; - } - Glu->stack.used -= bytes; -} - - - -/*! \brief - * - *
    - * mem_usage consists of the following fields:
    - *    - for_lu (float)
    - *      The amount of space used in bytes for the L\U data structures.
    - *    - total_needed (float)
    - *      The amount of space needed in bytes to perform factorization.
    - * 
    - */ -int sQuerySpace(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage) -{ - SCformat *Lstore; - NCformat *Ustore; - register int n, iword, dword, panel_size = sp_ienv(1); - - Lstore = L->Store; - Ustore = U->Store; - n = L->ncol; - iword = sizeof(int); - dword = sizeof(float); - - /* For LU factors */ - mem_usage->for_lu = (float)( (4.0*n + 3.0) * iword + - Lstore->nzval_colptr[n] * dword + - Lstore->rowind_colptr[n] * iword ); - mem_usage->for_lu += (float)( (n + 1.0) * iword + - Ustore->colptr[n] * (dword + iword) ); - - /* Working storage to support factorization */ - mem_usage->total_needed = mem_usage->for_lu + - (float)( (2.0 * panel_size + 4.0 + NO_MARKER) * n * iword + - (panel_size + 1.0) * n * dword ); - - return 0; -} /* sQuerySpace */ - - -/*! \brief - * - *
    - * mem_usage consists of the following fields:
    - *    - for_lu (float)
    - *      The amount of space used in bytes for the L\U data structures.
    - *    - total_needed (float)
    - *      The amount of space needed in bytes to perform factorization.
    - * 
    - */ -int ilu_sQuerySpace(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage) -{ - SCformat *Lstore; - NCformat *Ustore; - register int n, panel_size = sp_ienv(1); - register float iword, dword; - - Lstore = L->Store; - Ustore = U->Store; - n = L->ncol; - iword = sizeof(int); - dword = sizeof(double); - - /* For LU factors */ - mem_usage->for_lu = (float)( (4.0f * n + 3.0f) * iword + - Lstore->nzval_colptr[n] * dword + - Lstore->rowind_colptr[n] * iword ); - mem_usage->for_lu += (float)( (n + 1.0f) * iword + - Ustore->colptr[n] * (dword + iword) ); - - /* Working storage to support factorization. - ILU needs 5*n more integers than LU */ - mem_usage->total_needed = mem_usage->for_lu + - (float)( (2.0f * panel_size + 9.0f + NO_MARKER) * n * iword + - (panel_size + 1.0f) * n * dword ); - - return 0; -} /* ilu_sQuerySpace */ - - -/*! \brief Allocate storage for the data structures common to all factor routines. - * - *
    - * For those unpredictable size, estimate as fill_ratio * nnz(A).
    - * Return value:
    - *     If lwork = -1, return the estimated amount of space required, plus n;
    - *     otherwise, return the amount of space actually allocated when
    - *     memory allocation failure occurred.
    - * 
    - */ -int -sLUMemInit(fact_t fact, void *work, int lwork, int m, int n, int annz, - int panel_size, float fill_ratio, SuperMatrix *L, SuperMatrix *U, - GlobalLU_t *Glu, int **iwork, float **dwork) -{ - int info, iword, dword; - SCformat *Lstore; - NCformat *Ustore; - int *xsup, *supno; - int *lsub, *xlsub; - float *lusup; - int *xlusup; - float *ucol; - int *usub, *xusub; - int nzlmax, nzumax, nzlumax; - - iword = sizeof(int); - dword = sizeof(float); - Glu->n = n; - Glu->num_expansions = 0; - - if ( !Glu->expanders ) - Glu->expanders = (ExpHeader*)SUPERLU_MALLOC( NO_MEMTYPE * - sizeof(ExpHeader) ); - if ( !Glu->expanders ) ABORT("SUPERLU_MALLOC fails for expanders"); - - if ( fact != SamePattern_SameRowPerm ) { - /* Guess for L\U factors */ - nzumax = nzlumax = fill_ratio * annz; - nzlmax = SUPERLU_MAX(1, fill_ratio/4.) * annz; - - if ( lwork == -1 ) { - return ( GluIntArray(n) * iword + TempSpace(m, panel_size) - + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword + n ); - } else { - sSetupSpace(work, lwork, Glu); - } - -#if ( PRNTlevel >= 1 ) - printf("sLUMemInit() called: fill_ratio %ld, nzlmax %ld, nzumax %ld\n", - fill_ratio, nzlmax, nzumax); - fflush(stdout); -#endif - - /* Integer pointers for L\U factors */ - if ( Glu->MemModel == SYSTEM ) { - xsup = intMalloc(n+1); - supno = intMalloc(n+1); - xlsub = intMalloc(n+1); - xlusup = intMalloc(n+1); - xusub = intMalloc(n+1); - } else { - xsup = (int *)suser_malloc((n+1) * iword, HEAD, Glu); - supno = (int *)suser_malloc((n+1) * iword, HEAD, Glu); - xlsub = (int *)suser_malloc((n+1) * iword, HEAD, Glu); - xlusup = (int *)suser_malloc((n+1) * iword, HEAD, Glu); - xusub = (int *)suser_malloc((n+1) * iword, HEAD, Glu); - } - - lusup = (float *) sexpand( &nzlumax, LUSUP, 0, 0, Glu ); - ucol = (float *) sexpand( &nzumax, UCOL, 0, 0, Glu ); - lsub = (int *) sexpand( &nzlmax, LSUB, 0, 0, Glu ); - usub = (int *) sexpand( &nzumax, USUB, 0, 1, Glu ); - - while ( !lusup || !ucol || !lsub || !usub ) { - if ( Glu->MemModel == SYSTEM ) { - SUPERLU_FREE(lusup); - SUPERLU_FREE(ucol); - SUPERLU_FREE(lsub); - SUPERLU_FREE(usub); - } else { - suser_free((nzlumax+nzumax)*dword+(nzlmax+nzumax)*iword, - HEAD, Glu); - } - nzlumax /= 2; - nzumax /= 2; - nzlmax /= 2; - if ( nzlumax < annz ) { - printf("Not enough memory to perform factorization.\n"); - return (smemory_usage(nzlmax, nzumax, nzlumax, n) + n); - } -#if ( PRNTlevel >= 1) - printf("sLUMemInit() reduce size: nzlmax %ld, nzumax %ld\n", - nzlmax, nzumax); - fflush(stdout); -#endif - lusup = (float *) sexpand( &nzlumax, LUSUP, 0, 0, Glu ); - ucol = (float *) sexpand( &nzumax, UCOL, 0, 0, Glu ); - lsub = (int *) sexpand( &nzlmax, LSUB, 0, 0, Glu ); - usub = (int *) sexpand( &nzumax, USUB, 0, 1, Glu ); - } - - } else { - /* fact == SamePattern_SameRowPerm */ - Lstore = L->Store; - Ustore = U->Store; - xsup = Lstore->sup_to_col; - supno = Lstore->col_to_sup; - xlsub = Lstore->rowind_colptr; - xlusup = Lstore->nzval_colptr; - xusub = Ustore->colptr; - nzlmax = Glu->nzlmax; /* max from previous factorization */ - nzumax = Glu->nzumax; - nzlumax = Glu->nzlumax; - - if ( lwork == -1 ) { - return ( GluIntArray(n) * iword + TempSpace(m, panel_size) - + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword + n ); - } else if ( lwork == 0 ) { - Glu->MemModel = SYSTEM; - } else { - Glu->MemModel = USER; - Glu->stack.top2 = (lwork/4)*4; /* must be word-addressable */ - Glu->stack.size = Glu->stack.top2; - } - - lsub = Glu->expanders[LSUB].mem = Lstore->rowind; - lusup = Glu->expanders[LUSUP].mem = Lstore->nzval; - usub = Glu->expanders[USUB].mem = Ustore->rowind; - ucol = Glu->expanders[UCOL].mem = Ustore->nzval;; - Glu->expanders[LSUB].size = nzlmax; - Glu->expanders[LUSUP].size = nzlumax; - Glu->expanders[USUB].size = nzumax; - Glu->expanders[UCOL].size = nzumax; - } - - Glu->xsup = xsup; - Glu->supno = supno; - Glu->lsub = lsub; - Glu->xlsub = xlsub; - Glu->lusup = lusup; - Glu->xlusup = xlusup; - Glu->ucol = ucol; - Glu->usub = usub; - Glu->xusub = xusub; - Glu->nzlmax = nzlmax; - Glu->nzumax = nzumax; - Glu->nzlumax = nzlumax; - - info = sLUWorkInit(m, n, panel_size, iwork, dwork, Glu); - if ( info ) - return ( info + smemory_usage(nzlmax, nzumax, nzlumax, n) + n); - - ++Glu->num_expansions; - return 0; - -} /* sLUMemInit */ - -/*! \brief Allocate known working storage. Returns 0 if success, otherwise - returns the number of bytes allocated so far when failure occurred. */ -int -sLUWorkInit(int m, int n, int panel_size, int **iworkptr, - float **dworkptr, GlobalLU_t *Glu) -{ - int isize, dsize, extra; - float *old_ptr; - int maxsuper = sp_ienv(3), - rowblk = sp_ienv(4); - - isize = ( (2 * panel_size + 3 + NO_MARKER ) * m + n ) * sizeof(int); - dsize = (m * panel_size + - NUM_TEMPV(m,panel_size,maxsuper,rowblk)) * sizeof(float); - - if ( Glu->MemModel == SYSTEM ) - *iworkptr = (int *) intCalloc(isize/sizeof(int)); - else - *iworkptr = (int *) suser_malloc(isize, TAIL, Glu); - if ( ! *iworkptr ) { - fprintf(stderr, "sLUWorkInit: malloc fails for local iworkptr[]\n"); - return (isize + n); - } - - if ( Glu->MemModel == SYSTEM ) - *dworkptr = (float *) SUPERLU_MALLOC(dsize); - else { - *dworkptr = (float *) suser_malloc(dsize, TAIL, Glu); - if ( NotDoubleAlign(*dworkptr) ) { - old_ptr = *dworkptr; - *dworkptr = (float*) DoubleAlign(*dworkptr); - *dworkptr = (float*) ((double*)*dworkptr - 1); - extra = (char*)old_ptr - (char*)*dworkptr; -#ifdef DEBUG - printf("sLUWorkInit: not aligned, extra %d\n", extra); -#endif - Glu->stack.top2 -= extra; - Glu->stack.used += extra; - } - } - if ( ! *dworkptr ) { - fprintf(stderr, "malloc fails for local dworkptr[]."); - return (isize + dsize + n); - } - - return 0; -} - - -/*! \brief Set up pointers for real working arrays. - */ -void -sSetRWork(int m, int panel_size, float *dworkptr, - float **dense, float **tempv) -{ - float zero = 0.0; - - int maxsuper = sp_ienv(3), - rowblk = sp_ienv(4); - *dense = dworkptr; - *tempv = *dense + panel_size*m; - sfill (*dense, m * panel_size, zero); - sfill (*tempv, NUM_TEMPV(m,panel_size,maxsuper,rowblk), zero); -} - -/*! \brief Free the working storage used by factor routines. - */ -void sLUWorkFree(int *iwork, float *dwork, GlobalLU_t *Glu) -{ - if ( Glu->MemModel == SYSTEM ) { - SUPERLU_FREE (iwork); - SUPERLU_FREE (dwork); - } else { - Glu->stack.used -= (Glu->stack.size - Glu->stack.top2); - Glu->stack.top2 = Glu->stack.size; -/* sStackCompress(Glu); */ - } - - SUPERLU_FREE (Glu->expanders); - Glu->expanders = NULL; -} - -/*! \brief Expand the data structures for L and U during the factorization. - * - *
    - * Return value:   0 - successful return
    - *               > 0 - number of bytes allocated when run out of space
    - * 
    - */ -int -sLUMemXpand(int jcol, - int next, /* number of elements currently in the factors */ - MemType mem_type, /* which type of memory to expand */ - int *maxlen, /* modified - maximum length of a data structure */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - void *new_mem; - -#ifdef DEBUG - printf("sLUMemXpand(): jcol %d, next %d, maxlen %d, MemType %d\n", - jcol, next, *maxlen, mem_type); -#endif - - if (mem_type == USUB) - new_mem = sexpand(maxlen, mem_type, next, 1, Glu); - else - new_mem = sexpand(maxlen, mem_type, next, 0, Glu); - - if ( !new_mem ) { - int nzlmax = Glu->nzlmax; - int nzumax = Glu->nzumax; - int nzlumax = Glu->nzlumax; - fprintf(stderr, "Can't expand MemType %d: jcol %d\n", mem_type, jcol); - return (smemory_usage(nzlmax, nzumax, nzlumax, Glu->n) + Glu->n); - } - - switch ( mem_type ) { - case LUSUP: - Glu->lusup = (float *) new_mem; - Glu->nzlumax = *maxlen; - break; - case UCOL: - Glu->ucol = (float *) new_mem; - Glu->nzumax = *maxlen; - break; - case LSUB: - Glu->lsub = (int *) new_mem; - Glu->nzlmax = *maxlen; - break; - case USUB: - Glu->usub = (int *) new_mem; - Glu->nzumax = *maxlen; - break; - } - - return 0; - -} - - - -void -copy_mem_float(int howmany, void *old, void *new) -{ - register int i; - float *dold = old; - float *dnew = new; - for (i = 0; i < howmany; i++) dnew[i] = dold[i]; -} - -/*! \brief Expand the existing storage to accommodate more fill-ins. - */ -void -*sexpand ( - int *prev_len, /* length used from previous call */ - MemType type, /* which part of the memory to expand */ - int len_to_copy, /* size of the memory to be copied to new store */ - int keep_prev, /* = 1: use prev_len; - = 0: compute new_len to expand */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - float EXPAND = 1.5; - float alpha; - void *new_mem, *old_mem; - int new_len, tries, lword, extra, bytes_to_copy; - ExpHeader *expanders = Glu->expanders; /* Array of 4 types of memory */ - - alpha = EXPAND; - - if ( Glu->num_expansions == 0 || keep_prev ) { - /* First time allocate requested */ - new_len = *prev_len; - } else { - new_len = alpha * *prev_len; - } - - if ( type == LSUB || type == USUB ) lword = sizeof(int); - else lword = sizeof(float); - - if ( Glu->MemModel == SYSTEM ) { - new_mem = (void *) SUPERLU_MALLOC((size_t)new_len * lword); - if ( Glu->num_expansions != 0 ) { - tries = 0; - if ( keep_prev ) { - if ( !new_mem ) return (NULL); - } else { - while ( !new_mem ) { - if ( ++tries > 10 ) return (NULL); - alpha = Reduce(alpha); - new_len = alpha * *prev_len; - new_mem = (void *) SUPERLU_MALLOC((size_t)new_len * lword); - } - } - if ( type == LSUB || type == USUB ) { - copy_mem_int(len_to_copy, expanders[type].mem, new_mem); - } else { - copy_mem_float(len_to_copy, expanders[type].mem, new_mem); - } - SUPERLU_FREE (expanders[type].mem); - } - expanders[type].mem = (void *) new_mem; - - } else { /* MemModel == USER */ - if ( Glu->num_expansions == 0 ) { - new_mem = suser_malloc(new_len * lword, HEAD, Glu); - if ( NotDoubleAlign(new_mem) && - (type == LUSUP || type == UCOL) ) { - old_mem = new_mem; - new_mem = (void *)DoubleAlign(new_mem); - extra = (char*)new_mem - (char*)old_mem; -#ifdef DEBUG - printf("expand(): not aligned, extra %d\n", extra); -#endif - Glu->stack.top1 += extra; - Glu->stack.used += extra; - } - expanders[type].mem = (void *) new_mem; - } else { - tries = 0; - extra = (new_len - *prev_len) * lword; - if ( keep_prev ) { - if ( StackFull(extra) ) return (NULL); - } else { - while ( StackFull(extra) ) { - if ( ++tries > 10 ) return (NULL); - alpha = Reduce(alpha); - new_len = alpha * *prev_len; - extra = (new_len - *prev_len) * lword; - } - } - - if ( type != USUB ) { - new_mem = (void*)((char*)expanders[type + 1].mem + extra); - bytes_to_copy = (char*)Glu->stack.array + Glu->stack.top1 - - (char*)expanders[type + 1].mem; - user_bcopy(expanders[type+1].mem, new_mem, bytes_to_copy); - - if ( type < USUB ) { - Glu->usub = expanders[USUB].mem = - (void*)((char*)expanders[USUB].mem + extra); - } - if ( type < LSUB ) { - Glu->lsub = expanders[LSUB].mem = - (void*)((char*)expanders[LSUB].mem + extra); - } - if ( type < UCOL ) { - Glu->ucol = expanders[UCOL].mem = - (void*)((char*)expanders[UCOL].mem + extra); - } - Glu->stack.top1 += extra; - Glu->stack.used += extra; - if ( type == UCOL ) { - Glu->stack.top1 += extra; /* Add same amount for USUB */ - Glu->stack.used += extra; - } - - } /* if ... */ - - } /* else ... */ - } - - expanders[type].size = new_len; - *prev_len = new_len; - if ( Glu->num_expansions ) ++Glu->num_expansions; - - return (void *) expanders[type].mem; - -} /* sexpand */ - - -/*! \brief Compress the work[] array to remove fragmentation. - */ -void -sStackCompress(GlobalLU_t *Glu) -{ - register int iword, dword, ndim; - char *last, *fragment; - int *ifrom, *ito; - float *dfrom, *dto; - int *xlsub, *lsub, *xusub, *usub, *xlusup; - float *ucol, *lusup; - - iword = sizeof(int); - dword = sizeof(float); - ndim = Glu->n; - - xlsub = Glu->xlsub; - lsub = Glu->lsub; - xusub = Glu->xusub; - usub = Glu->usub; - xlusup = Glu->xlusup; - ucol = Glu->ucol; - lusup = Glu->lusup; - - dfrom = ucol; - dto = (float *)((char*)lusup + xlusup[ndim] * dword); - copy_mem_float(xusub[ndim], dfrom, dto); - ucol = dto; - - ifrom = lsub; - ito = (int *) ((char*)ucol + xusub[ndim] * iword); - copy_mem_int(xlsub[ndim], ifrom, ito); - lsub = ito; - - ifrom = usub; - ito = (int *) ((char*)lsub + xlsub[ndim] * iword); - copy_mem_int(xusub[ndim], ifrom, ito); - usub = ito; - - last = (char*)usub + xusub[ndim] * iword; - fragment = (char*) (((char*)Glu->stack.array + Glu->stack.top1) - last); - Glu->stack.used -= (long int) fragment; - Glu->stack.top1 -= (long int) fragment; - - Glu->ucol = ucol; - Glu->lsub = lsub; - Glu->usub = usub; - -#ifdef DEBUG - printf("sStackCompress: fragment %d\n", fragment); - /* for (last = 0; last < ndim; ++last) - print_lu_col("After compress:", last, 0);*/ -#endif - -} - -/*! \brief Allocate storage for original matrix A - */ -void -sallocateA(int n, int nnz, float **a, int **asub, int **xa) -{ - *a = (float *) floatMalloc(nnz); - *asub = (int *) intMalloc(nnz); - *xa = (int *) intMalloc(n+1); -} - - -float *floatMalloc(int n) -{ - float *buf; - buf = (float *) SUPERLU_MALLOC((size_t)n * sizeof(float)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC failed for buf in floatMalloc()\n"); - } - return (buf); -} - -float *floatCalloc(int n) -{ - float *buf; - register int i; - float zero = 0.0; - buf = (float *) SUPERLU_MALLOC((size_t)n * sizeof(float)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC failed for buf in floatCalloc()\n"); - } - for (i = 0; i < n; ++i) buf[i] = zero; - return (buf); -} - - -int smemory_usage(const int nzlmax, const int nzumax, - const int nzlumax, const int n) -{ - register int iword, dword; - - iword = sizeof(int); - dword = sizeof(float); - - return (10 * n * iword + - nzlmax * iword + nzumax * (iword + dword) + nzlumax * dword); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_coletree.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_coletree.c deleted file mode 100644 index 5d845b0389..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_coletree.c +++ /dev/null @@ -1,419 +0,0 @@ -/*! @file sp_coletree.c - * \brief Tree layout and computation routines - * - *
    - * -- SuperLU routine (version 3.1) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * August 1, 2008
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    -*/ - -/* Elimination tree computation and layout routines */ - -#include -#include -#include "slu_ddefs.h" - -/* - * Implementation of disjoint set union routines. - * Elements are integers in 0..n-1, and the - * names of the sets themselves are of type int. - * - * Calls are: - * initialize_disjoint_sets (n) initial call. - * s = make_set (i) returns a set containing only i. - * s = link (t, u) returns s = t union u, destroying t and u. - * s = find (i) return name of set containing i. - * finalize_disjoint_sets final call. - * - * This implementation uses path compression but not weighted union. - * See Tarjan's book for details. - * John Gilbert, CMI, 1987. - * - * Implemented path-halving by XSL 07/05/95. - */ - - -static -int *mxCallocInt(int n) -{ - register int i; - int *buf; - - buf = (int *) SUPERLU_MALLOC( n * sizeof(int) ); - if ( !buf ) { - ABORT("SUPERLU_MALLOC fails for buf in mxCallocInt()"); - } - for (i = 0; i < n; i++) buf[i] = 0; - return (buf); -} - -static -void initialize_disjoint_sets ( - int n, - int **pp - ) -{ - (*pp) = mxCallocInt(n); -} - - -static -int make_set ( - int i, - int *pp - ) -{ - pp[i] = i; - return i; -} - - -static -int link ( - int s, - int t, - int *pp - ) -{ - pp[s] = t; - return t; -} - - -/* PATH HALVING */ -static -int find ( - int i, - int *pp - ) -{ - register int p, gp; - - p = pp[i]; - gp = pp[p]; - while (gp != p) { - pp[i] = gp; - i = gp; - p = pp[i]; - gp = pp[p]; - } - return (p); -} - -#if 0 -/* PATH COMPRESSION */ -static -int find ( - int i - ) -{ - if (pp[i] != i) - pp[i] = find (pp[i]); - return pp[i]; -} -#endif - -static -void finalize_disjoint_sets ( - int *pp - ) -{ - SUPERLU_FREE(pp); -} - - -/* - * Find the elimination tree for A'*A. - * This uses something similar to Liu's algorithm. - * It runs in time O(nz(A)*log n) and does not form A'*A. - * - * Input: - * Sparse matrix A. Numeric values are ignored, so any - * explicit zeros are treated as nonzero. - * Output: - * Integer array of parents representing the elimination - * tree of the symbolic product A'*A. Each vertex is a - * column of A, and nc means a root of the elimination forest. - * - * John R. Gilbert, Xerox, 10 Dec 1990 - * Based on code by JRG dated 1987, 1988, and 1990. - */ - -/* - * Nonsymmetric elimination tree - */ -int -sp_coletree( - int *acolst, int *acolend, /* column start and end past 1 */ - int *arow, /* row indices of A */ - int nr, int nc, /* dimension of A */ - int *parent /* parent in elim tree */ - ) -{ - int *root; /* root of subtee of etree */ - int *firstcol; /* first nonzero col in each row*/ - int rset, cset; - int row, col; - int rroot; - int p; - int *pp; - - root = mxCallocInt (nc); - initialize_disjoint_sets (nc, &pp); - - /* Compute firstcol[row] = first nonzero column in row */ - - firstcol = mxCallocInt (nr); - for (row = 0; row < nr; firstcol[row++] = nc); - for (col = 0; col < nc; col++) - for (p = acolst[col]; p < acolend[col]; p++) { - row = arow[p]; - firstcol[row] = SUPERLU_MIN(firstcol[row], col); - } - - /* Compute etree by Liu's algorithm for symmetric matrices, - except use (firstcol[r],c) in place of an edge (r,c) of A. - Thus each row clique in A'*A is replaced by a star - centered at its first vertex, which has the same fill. */ - - for (col = 0; col < nc; col++) { - cset = make_set (col, pp); - root[cset] = col; - parent[col] = nc; /* Matlab */ - for (p = acolst[col]; p < acolend[col]; p++) { - row = firstcol[arow[p]]; - if (row >= col) continue; - rset = find (row, pp); - rroot = root[rset]; - if (rroot != col) { - parent[rroot] = col; - cset = link (cset, rset, pp); - root[cset] = col; - } - } - } - - SUPERLU_FREE (root); - SUPERLU_FREE (firstcol); - finalize_disjoint_sets (pp); - return 0; -} - -/* - * q = TreePostorder (n, p); - * - * Postorder a tree. - * Input: - * p is a vector of parent pointers for a forest whose - * vertices are the integers 0 to n-1; p[root]==n. - * Output: - * q is a vector indexed by 0..n-1 such that q[i] is the - * i-th vertex in a postorder numbering of the tree. - * - * ( 2/7/95 modified by X.Li: - * q is a vector indexed by 0:n-1 such that vertex i is the - * q[i]-th vertex in a postorder numbering of the tree. - * That is, this is the inverse of the previous q. ) - * - * In the child structure, lower-numbered children are represented - * first, so that a tree which is already numbered in postorder - * will not have its order changed. - * - * Written by John Gilbert, Xerox, 10 Dec 1990. - * Based on code written by John Gilbert at CMI in 1987. - */ - -static -/* - * Depth-first search from vertex v. - */ -void etdfs ( - int v, - int first_kid[], - int next_kid[], - int post[], - int *postnum - ) -{ - int w; - - for (w = first_kid[v]; w != -1; w = next_kid[w]) { - etdfs (w, first_kid, next_kid, post, postnum); - } - /* post[postnum++] = v; in Matlab */ - post[v] = (*postnum)++; /* Modified by X. Li on 08/10/07 */ -} - - -static -/* - * Depth-first search from vertex n. No recursion. - * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France. - */ -void nr_etdfs (int n, int *parent, - int *first_kid, int *next_kid, - int *post, int postnum) -{ - int current = n, first, next; - - while (postnum != n){ - - /* no kid for the current node */ - first = first_kid[current]; - - /* no first kid for the current node */ - if (first == -1){ - - /* numbering this node because it has no kid */ - post[current] = postnum++; - - /* looking for the next kid */ - next = next_kid[current]; - - while (next == -1){ - - /* no more kids : back to the parent node */ - current = parent[current]; - - /* numbering the parent node */ - post[current] = postnum++; - - /* get the next kid */ - next = next_kid[current]; - } - - /* stopping criterion */ - if (postnum==n+1) return; - - /* updating current node */ - current = next; - } - /* updating current node */ - else { - current = first; - } - } -} - -/* - * Post order a tree - */ -int *TreePostorder( - int n, - int *parent - ) -{ - int *first_kid, *next_kid; /* Linked list of children. */ - int *post, postnum; - int v, dad; - - /* Allocate storage for working arrays and results */ - first_kid = mxCallocInt (n+1); - next_kid = mxCallocInt (n+1); - post = mxCallocInt (n+1); - - /* Set up structure describing children */ - for (v = 0; v <= n; first_kid[v++] = -1); - for (v = n-1; v >= 0; v--) { - dad = parent[v]; - next_kid[v] = first_kid[dad]; - first_kid[dad] = v; - } - - /* Depth-first search from dummy root vertex #n */ - postnum = 0; -#if 0 - /* recursion */ - etdfs (n, first_kid, next_kid, post, &postnum); -#else - /* no recursion */ - nr_etdfs(n, parent, first_kid, next_kid, post, postnum); -#endif - - SUPERLU_FREE (first_kid); - SUPERLU_FREE (next_kid); - return post; -} - - -/* - * p = spsymetree (A); - * - * Find the elimination tree for symmetric matrix A. - * This uses Liu's algorithm, and runs in time O(nz*log n). - * - * Input: - * Square sparse matrix A. No check is made for symmetry; - * elements below and on the diagonal are ignored. - * Numeric values are ignored, so any explicit zeros are - * treated as nonzero. - * Output: - * Integer array of parents representing the etree, with n - * meaning a root of the elimination forest. - * Note: - * This routine uses only the upper triangle, while sparse - * Cholesky (as in spchol.c) uses only the lower. Matlab's - * dense Cholesky uses only the upper. This routine could - * be modified to use the lower triangle either by transposing - * the matrix or by traversing it by rows with auxiliary - * pointer and link arrays. - * - * John R. Gilbert, Xerox, 10 Dec 1990 - * Based on code by JRG dated 1987, 1988, and 1990. - * Modified by X.S. Li, November 1999. - */ - -/* - * Symmetric elimination tree - */ -int -sp_symetree( - int *acolst, int *acolend, /* column starts and ends past 1 */ - int *arow, /* row indices of A */ - int n, /* dimension of A */ - int *parent /* parent in elim tree */ - ) -{ - int *root; /* root of subtree of etree */ - int rset, cset; - int row, col; - int rroot; - int p; - int *pp; - - root = mxCallocInt (n); - initialize_disjoint_sets (n, &pp); - - for (col = 0; col < n; col++) { - cset = make_set (col, pp); - root[cset] = col; - parent[col] = n; /* Matlab */ - for (p = acolst[col]; p < acolend[col]; p++) { - row = arow[p]; - if (row >= col) continue; - rset = find (row, pp); - rroot = root[rset]; - if (rroot != col) { - parent[rroot] = col; - cset = link (cset, rset, pp); - root[cset] = col; - } - } - } - SUPERLU_FREE (root); - finalize_disjoint_sets (pp); - return 0; -} /* SP_SYMETREE */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_ienv.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_ienv.c deleted file mode 100644 index 86c45d6975..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_ienv.c +++ /dev/null @@ -1,70 +0,0 @@ -/*! @file sp_ienv.c - * \brief Chooses machine-dependent parameters for the local environment -*/ - -/* - * File name: sp_ienv.c - * History: Modified from lapack routine ILAENV - */ -#include "slu_Cnames.h" - -/*! \brief - -
    -    Purpose   
    -    =======   
    -
    -    sp_ienv() is inquired to choose machine-dependent parameters for the
    -    local environment. See ISPEC for a description of the parameters.   
    -
    -    This version provides a set of parameters which should give good,   
    -    but not optimal, performance on many of the currently available   
    -    computers.  Users are encouraged to modify this subroutine to set   
    -    the tuning parameters for their particular machine using the option   
    -    and problem size information in the arguments.   
    -
    -    Arguments   
    -    =========   
    -
    -    ISPEC   (input) int
    -            Specifies the parameter to be returned as the value of SP_IENV.   
    -            = 1: the panel size w; a panel consists of w consecutive
    -	         columns of matrix A in the process of Gaussian elimination.
    -		 The best value depends on machine's cache characters.
    -            = 2: the relaxation parameter relax; if the number of
    -	         nodes (columns) in a subtree of the elimination tree is less
    -		 than relax, this subtree is considered as one supernode,
    -		 regardless of their row structures.
    -            = 3: the maximum size for a supernode;
    -	    = 4: the minimum row dimension for 2-D blocking to be used;
    -	    = 5: the minimum column dimension for 2-D blocking to be used;
    -	    = 6: the estimated fills factor for L and U, compared with A;
    -	    
    -   (SP_IENV) (output) int
    -            >= 0: the value of the parameter specified by ISPEC   
    -            < 0:  if SP_IENV = -k, the k-th argument had an illegal value. 
    -  
    -    ===================================================================== 
    -
    -*/ -int -sp_ienv(int ispec) -{ - int i; - - switch (ispec) { - case 1: return (10); - case 2: return (5); - case 3: return (100); - case 4: return (200); - case 5: return (40); - case 6: return (20); - } - - /* Invalid value for ISPEC */ - i = 1; - xerbla_("sp_ienv", &i); - return 0; - -} /* sp_ienv_ */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_preorder.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_preorder.c deleted file mode 100644 index abee61944d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sp_preorder.c +++ /dev/null @@ -1,208 +0,0 @@ -/*! @file sp_preorder.c - * \brief Permute and performs functions on columns of orginal matrix - */ -#include "slu_ddefs.h" - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * sp_preorder() permutes the columns of the original matrix. It performs
    - * the following steps:
    - *
    - *    1. Apply column permutation perm_c[] to A's column pointers to form AC;
    - *
    - *    2. If options->Fact = DOFACT, then
    - *       (1) Compute column elimination tree etree[] of AC'AC;
    - *       (2) Post order etree[] to get a postordered elimination tree etree[],
    - *           and a postorder permutation post[];
    - *       (3) Apply post[] permutation to columns of AC;
    - *       (4) Overwrite perm_c[] with the product perm_c * post.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         Specifies whether or not the elimination tree will be re-used.
    - *         If options->Fact == DOFACT, this means first time factor A, 
    - *         etree is computed, postered, and output.
    - *         Otherwise, re-factor A, etree is input, unchanged on exit.
    - *
    - * A       (input) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of the linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = NC or SLU_NCP; Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - * perm_c  (input/output) int*
    - *	   Column permutation vector of size A->ncol, which defines the 
    - *         permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *         in position j in A*Pc.
    - *         If options->Fact == DOFACT, perm_c is both input and output.
    - *         On output, it is changed according to a postorder of etree.
    - *         Otherwise, perm_c is input.
    - *
    - * etree   (input/output) int*
    - *         Elimination tree of Pc'*A'*A*Pc, dimension A->ncol.
    - *         If options->Fact == DOFACT, etree is an output argument,
    - *         otherwise it is an input argument.
    - *         Note: etree is a vector of parent pointers for a forest whose
    - *         vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * AC      (output) SuperMatrix*
    - *         The resulting matrix after applied the column permutation
    - *         perm_c[] to matrix A. The type of AC can be:
    - *         Stype = SLU_NCP; Dtype = A->Dtype; Mtype = SLU_GE.
    - * 
    - */ -void -sp_preorder(superlu_options_t *options, SuperMatrix *A, int *perm_c, - int *etree, SuperMatrix *AC) -{ - NCformat *Astore; - NCPformat *ACstore; - int *iwork, *post; - register int n, i; - - n = A->ncol; - - /* Apply column permutation perm_c to A's column pointers so to - obtain NCP format in AC = A*Pc. */ - AC->Stype = SLU_NCP; - AC->Dtype = A->Dtype; - AC->Mtype = A->Mtype; - AC->nrow = A->nrow; - AC->ncol = A->ncol; - Astore = A->Store; - ACstore = AC->Store = (void *) SUPERLU_MALLOC( sizeof(NCPformat) ); - if ( !ACstore ) ABORT("SUPERLU_MALLOC fails for ACstore"); - ACstore->nnz = Astore->nnz; - ACstore->nzval = Astore->nzval; - ACstore->rowind = Astore->rowind; - ACstore->colbeg = (int*) SUPERLU_MALLOC(n*sizeof(int)); - if ( !(ACstore->colbeg) ) ABORT("SUPERLU_MALLOC fails for ACstore->colbeg"); - ACstore->colend = (int*) SUPERLU_MALLOC(n*sizeof(int)); - if ( !(ACstore->colend) ) ABORT("SUPERLU_MALLOC fails for ACstore->colend"); - -#ifdef DEBUG - print_int_vec("pre_order:", n, perm_c); - check_perm("Initial perm_c", n, perm_c); -#endif - - for (i = 0; i < n; i++) { - ACstore->colbeg[perm_c[i]] = Astore->colptr[i]; - ACstore->colend[perm_c[i]] = Astore->colptr[i+1]; - } - - if ( options->Fact == DOFACT ) { -#undef ETREE_ATplusA -#ifdef ETREE_ATplusA - /*-------------------------------------------- - COMPUTE THE ETREE OF Pc*(A'+A)*Pc'. - --------------------------------------------*/ - int *b_colptr, *b_rowind, bnz, j; - int *c_colbeg, *c_colend; - - /*printf("Use etree(A'+A)\n");*/ - - /* Form B = A + A'. */ - at_plus_a(n, Astore->nnz, Astore->colptr, Astore->rowind, - &bnz, &b_colptr, &b_rowind); - - /* Form C = Pc*B*Pc'. */ - c_colbeg = (int*) SUPERLU_MALLOC(2*n*sizeof(int)); - c_colend = c_colbeg + n; - if (!c_colbeg ) ABORT("SUPERLU_MALLOC fails for c_colbeg/c_colend"); - for (i = 0; i < n; i++) { - c_colbeg[perm_c[i]] = b_colptr[i]; - c_colend[perm_c[i]] = b_colptr[i+1]; - } - for (j = 0; j < n; ++j) { - for (i = c_colbeg[j]; i < c_colend[j]; ++i) { - b_rowind[i] = perm_c[b_rowind[i]]; - } - } - - /* Compute etree of C. */ - sp_symetree(c_colbeg, c_colend, b_rowind, n, etree); - - SUPERLU_FREE(b_colptr); - if ( bnz ) SUPERLU_FREE(b_rowind); - SUPERLU_FREE(c_colbeg); - -#else - /*-------------------------------------------- - COMPUTE THE COLUMN ELIMINATION TREE. - --------------------------------------------*/ - sp_coletree(ACstore->colbeg, ACstore->colend, ACstore->rowind, - A->nrow, A->ncol, etree); -#endif -#ifdef DEBUG - print_int_vec("etree:", n, etree); -#endif - - /* In symmetric mode, do not do postorder here. */ - if ( options->SymmetricMode == NO ) { - /* Post order etree */ - post = (int *) TreePostorder(n, etree); - /* for (i = 0; i < n+1; ++i) inv_post[post[i]] = i; - iwork = post; */ - -#ifdef DEBUG - print_int_vec("post:", n+1, post); - check_perm("post", n, post); -#endif - iwork = (int*) SUPERLU_MALLOC((n+1)*sizeof(int)); - if ( !iwork ) ABORT("SUPERLU_MALLOC fails for iwork[]"); - - /* Renumber etree in postorder */ - for (i = 0; i < n; ++i) iwork[post[i]] = post[etree[i]]; - for (i = 0; i < n; ++i) etree[i] = iwork[i]; - -#ifdef DEBUG - print_int_vec("postorder etree:", n, etree); -#endif - - /* Postmultiply A*Pc by post[] */ - for (i = 0; i < n; ++i) iwork[post[i]] = ACstore->colbeg[i]; - for (i = 0; i < n; ++i) ACstore->colbeg[i] = iwork[i]; - for (i = 0; i < n; ++i) iwork[post[i]] = ACstore->colend[i]; - for (i = 0; i < n; ++i) ACstore->colend[i] = iwork[i]; - - for (i = 0; i < n; ++i) - iwork[i] = post[perm_c[i]]; /* product of perm_c and post */ - for (i = 0; i < n; ++i) perm_c[i] = iwork[i]; - -#ifdef DEBUG - print_int_vec("Pc*post:", n, perm_c); - check_perm("final perm_c", n, perm_c); -#endif - SUPERLU_FREE (post); - SUPERLU_FREE (iwork); - } /* end postordering */ - - } /* if options->Fact == DOFACT ... */ - -} - -int check_perm(char *what, int n, int *perm) -{ - register int i; - int *marker; - marker = (int *) calloc(n, sizeof(int)); - - for (i = 0; i < n; ++i) { - if ( marker[perm[i]] == 1 || perm[i] >= n ) { - printf("%s: Not a valid PERM[%d] = %d\n", what, i, perm[i]); - ABORT("check_perm"); - } else { - marker[perm[i]] = 1; - } - } - - SUPERLU_FREE(marker); - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spanel_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spanel_bmod.c deleted file mode 100644 index 53fe4ac14b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spanel_bmod.c +++ /dev/null @@ -1,459 +0,0 @@ - -/*! @file spanel_bmod.c - * \brief Performs numeric block updates - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ -/* - -*/ - -#include -#include -#include "slu_sdefs.h" - -/* - * Function prototypes - */ -void slsolve(int, int, float *, float *); -void smatvec(int, int, int, float *, float *, float *); -extern void scheck_tempv(); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *    Performs numeric block updates (sup-panel) in topological order.
    - *    It features: col-col, 2cols-col, 3cols-col, and sup-col updates.
    - *    Special processing on the supernodal portion of L\U[*,j]
    - *
    - *    Before entering this routine, the original nonzeros in the panel 
    - *    were already copied into the spa[m,w].
    - *
    - *    Updated/Output parameters-
    - *    dense[0:m-1,w]: L[*,j:j+w-1] and U[*,j:j+w-1] are returned 
    - *    collectively in the m-by-w vector dense[*]. 
    - * 
    - */ - -void -spanel_bmod ( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - const int nseg, /* in */ - float *dense, /* out, of size n by w */ - float *tempv, /* working array */ - int *segrep, /* in */ - int *repfnz, /* in, of size n by w */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ - - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - float alpha, beta; -#endif - - register int k, ksub; - int fsupc, nsupc, nsupr, nrow; - int krep, krep_ind; - float ukj, ukj1, ukj2; - int luptr, luptr1, luptr2; - int segsze; - int block_nrow; /* no of rows in a block row */ - register int lptr; /* Points to the row subscripts of a supernode */ - int kfnz, irow, no_zeros; - register int isub, isub1, i; - register int jj; /* Index through each column in the panel */ - int *xsup, *supno; - int *lsub, *xlsub; - float *lusup; - int *xlusup; - int *repfnz_col; /* repfnz[] for a column in the panel */ - float *dense_col; /* dense[] for a column in the panel */ - float *tempv1; /* Used in 1-D update */ - float *TriTmp, *MatvecTmp; /* used in 2-D update */ - float zero = 0.0; - float one = 1.0; - register int ldaTmp; - register int r_ind, r_hi; - static int first = 1, maxsuper, rowblk, colblk; - flops_t *ops = stat->ops; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - if ( first ) { - maxsuper = sp_ienv(3); - rowblk = sp_ienv(4); - colblk = sp_ienv(5); - first = 0; - } - ldaTmp = maxsuper + rowblk; - - /* - * For each nonz supernode segment of U[*,j] in topological order - */ - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { /* for each updating supernode */ - - /* krep = representative of current k-th supernode - * fsupc = first supernodal column - * nsupc = no of columns in a supernode - * nsupr = no of rows in a supernode - */ - krep = segrep[k--]; - fsupc = xsup[supno[krep]]; - nsupc = krep - fsupc + 1; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; - nrow = nsupr - nsupc; - lptr = xlsub[fsupc]; - krep_ind = lptr + nsupc - 1; - - repfnz_col = repfnz; - dense_col = dense; - - if ( nsupc >= colblk && nrow > rowblk ) { /* 2-D block update */ - - TriTmp = tempv; - - /* Sequence through each column in panel -- triangular solves */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp ) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - luptr = xlusup[fsupc]; - - ops[TRSV] += segsze * (segsze - 1); - ops[GEMV] += 2 * nrow * segsze; - - /* Case 1: Update U-segment of size 1 -- col-col update */ - if ( segsze == 1 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; i++) { - irow = lsub[i]; - dense_col[irow] -= ukj * lusup[luptr]; - ++luptr; - } - - } else if ( segsze <= 3 ) { - ukj = dense_col[lsub[krep_ind]]; - ukj1 = dense_col[lsub[krep_ind - 1]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { - ukj -= ukj1 * lusup[luptr1]; - dense_col[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; luptr1++; - dense_col[irow] -= (ukj*lusup[luptr] - + ukj1*lusup[luptr1]); - } - } else { - ukj2 = dense_col[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - ukj1 -= ukj2 * lusup[luptr2-1]; - ukj = ukj - ukj1*lusup[luptr1] - ukj2*lusup[luptr2]; - dense_col[lsub[krep_ind]] = ukj; - dense_col[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; luptr1++; luptr2++; - dense_col[irow] -= ( ukj*lusup[luptr] - + ukj1*lusup[luptr1] + ukj2*lusup[luptr2] ); - } - } - - } else { /* segsze >= 4 */ - - /* Copy U[*,j] segment from dense[*] to TriTmp[*], which - holds the result of triangular solves. */ - no_zeros = kfnz - fsupc; - isub = lptr + no_zeros; - for (i = 0; i < segsze; ++i) { - irow = lsub[isub]; - TriTmp[i] = dense_col[irow]; /* Gather */ - ++isub; - } - - /* start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, TriTmp, &incx ); -#else - strsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, TriTmp, &incx ); -#endif -#else - slsolve ( nsupr, segsze, &lusup[luptr], TriTmp ); -#endif - - - } /* else ... */ - - } /* for jj ... end tri-solves */ - - /* Block row updates; push all the way into dense[*] block */ - for ( r_ind = 0; r_ind < nrow; r_ind += rowblk ) { - - r_hi = SUPERLU_MIN(nrow, r_ind + rowblk); - block_nrow = SUPERLU_MIN(rowblk, r_hi - r_ind); - luptr = xlusup[fsupc] + nsupc + r_ind; - isub1 = lptr + nsupc + r_ind; - - repfnz_col = repfnz; - TriTmp = tempv; - dense_col = dense; - - /* Sequence through each column in panel -- matrix-vector */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - if ( segsze <= 3 ) continue; /* skip unrolled cases */ - - /* Perform a block update, and scatter the result of - matrix-vector to dense[]. */ - no_zeros = kfnz - fsupc; - luptr1 = luptr + nsupr * no_zeros; - MatvecTmp = &TriTmp[maxsuper]; - -#ifdef USE_VENDOR_BLAS - alpha = one; - beta = zero; -#ifdef _CRAY - SGEMV(ftcs2, &block_nrow, &segsze, &alpha, &lusup[luptr1], - &nsupr, TriTmp, &incx, &beta, MatvecTmp, &incy); -#else - sgemv_("N", &block_nrow, &segsze, &alpha, &lusup[luptr1], - &nsupr, TriTmp, &incx, &beta, MatvecTmp, &incy); -#endif -#else - smatvec(nsupr, block_nrow, segsze, &lusup[luptr1], - TriTmp, MatvecTmp); -#endif - - /* Scatter MatvecTmp[*] into SPA dense[*] temporarily - * such that MatvecTmp[*] can be re-used for the - * the next blok row update. dense[] will be copied into - * global store after the whole panel has been finished. - */ - isub = isub1; - for (i = 0; i < block_nrow; i++) { - irow = lsub[isub]; - dense_col[irow] -= MatvecTmp[i]; - MatvecTmp[i] = zero; - ++isub; - } - - } /* for jj ... */ - - } /* for each block row ... */ - - /* Scatter the triangular solves into SPA dense[*] */ - repfnz_col = repfnz; - TriTmp = tempv; - dense_col = dense; - - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp) { - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - if ( segsze <= 3 ) continue; /* skip unrolled cases */ - - no_zeros = kfnz - fsupc; - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense_col[irow] = TriTmp[i]; - TriTmp[i] = zero; - ++isub; - } - - } /* for jj ... */ - - } else { /* 1-D block modification */ - - - /* Sequence through each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - luptr = xlusup[fsupc]; - - ops[TRSV] += segsze * (segsze - 1); - ops[GEMV] += 2 * nrow * segsze; - - /* Case 1: Update U-segment of size 1 -- col-col update */ - if ( segsze == 1 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; i++) { - irow = lsub[i]; - dense_col[irow] -= ukj * lusup[luptr]; - ++luptr; - } - - } else if ( segsze <= 3 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - ukj1 = dense_col[lsub[krep_ind - 1]]; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { - ukj -= ukj1 * lusup[luptr1]; - dense_col[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - ++luptr; ++luptr1; - dense_col[irow] -= (ukj*lusup[luptr] - + ukj1*lusup[luptr1]); - } - } else { - ukj2 = dense_col[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - ukj1 -= ukj2 * lusup[luptr2-1]; - ukj = ukj - ukj1*lusup[luptr1] - ukj2*lusup[luptr2]; - dense_col[lsub[krep_ind]] = ukj; - dense_col[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - ++luptr; ++luptr1; ++luptr2; - dense_col[irow] -= ( ukj*lusup[luptr] - + ukj1*lusup[luptr1] + ukj2*lusup[luptr2] ); - } - } - - } else { /* segsze >= 4 */ - /* - * Perform a triangular solve and block update, - * then scatter the result of sup-col update to dense[]. - */ - no_zeros = kfnz - fsupc; - - /* Copy U[*,j] segment from dense[*] to tempv[*]: - * The result of triangular solve is in tempv[*]; - * The result of matrix vector update is in dense_col[*] - */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; ++i) { - irow = lsub[isub]; - tempv[i] = dense_col[irow]; /* Gather */ - ++isub; - } - - /* start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#else - strsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#endif - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - alpha = one; - beta = zero; -#ifdef _CRAY - SGEMV( ftcs2, &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#else - sgemv_( "N", &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#endif -#else - slsolve ( nsupr, segsze, &lusup[luptr], tempv ); - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - smatvec (nsupr, nrow, segsze, &lusup[luptr], tempv, tempv1); -#endif - - /* Scatter tempv[*] into SPA dense[*] temporarily, such - * that tempv[*] can be used for the triangular solve of - * the next column of the panel. They will be copied into - * ucol[*] after the whole panel has been finished. - */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense_col[irow] = tempv[i]; - tempv[i] = zero; - isub++; - } - - /* Scatter the update from tempv1[*] into SPA dense[*] */ - /* Start dense rectangular L */ - for (i = 0; i < nrow; i++) { - irow = lsub[isub]; - dense_col[irow] -= tempv1[i]; - tempv1[i] = zero; - ++isub; - } - - } /* else segsze>=4 ... */ - - } /* for each column in the panel... */ - - } /* else 1-D update ... */ - - } /* for each updating supernode ... */ - -} - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spanel_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spanel_dfs.c deleted file mode 100644 index 3f5bb46100..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spanel_dfs.c +++ /dev/null @@ -1,254 +0,0 @@ - -/*! @file spanel_dfs.c - * \brief Peforms a symbolic factorization on a panel of symbols - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   Performs a symbolic factorization on a panel of columns [jcol, jcol+w).
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives.
    - *
    - *   The routine returns one list of the supernodal representatives
    - *   in topological order of the dfs that generates them. This list is
    - *   a superset of the topological order of each individual column within
    - *   the panel. 
    - *   The location of the first nonzero in each supernodal segment
    - *   (supernodal entry location) is also returned. Each column has a 
    - *   separate list for this purpose.
    - *
    - *   Two marker arrays are used for dfs:
    - *     marker[i] == jj, if i was visited during dfs of current column jj;
    - *     marker1[i] >= jcol, if i was visited by earlier columns in this panel;
    - *
    - *   marker: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - * 
    - */ - -void -spanel_dfs ( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - SuperMatrix *A, /* in - original matrix */ - int *perm_r, /* in */ - int *nseg, /* out */ - float *dense, /* out */ - int *panel_lsub, /* out */ - int *segrep, /* out */ - int *repfnz, /* out */ - int *xprune, /* out */ - int *marker, /* out */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - NCPformat *Astore; - float *a; - int *asub; - int *xa_begin, *xa_end; - int krep, chperm, chmark, chrep, oldrep, kchild, myfnz; - int k, krow, kmark, kperm; - int xdfs, maxdfs, kpar; - int jj; /* index through each column in the panel */ - int *marker1; /* marker1[jj] >= jcol if vertex jj was visited - by a previous column within this panel. */ - int *repfnz_col; /* start of each column in the panel */ - float *dense_col; /* start of each column in the panel */ - int nextl_col; /* next available position in panel_lsub[*,jj] */ - int *xsup, *supno; - int *lsub, *xlsub; - - /* Initialize pointers */ - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - marker1 = marker + m; - repfnz_col = repfnz; - dense_col = dense; - *nseg = 0; - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - - /* For each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++) { - nextl_col = (jj - jcol) * m; - -#ifdef CHK_DFS - printf("\npanel col %d: ", jj); -#endif - - /* For each nonz in A[*,jj] do dfs */ - for (k = xa_begin[jj]; k < xa_end[jj]; k++) { - krow = asub[k]; - dense_col[krow] = a[k]; - kmark = marker[krow]; - if ( kmark == jj ) - continue; /* krow visited before, go to the next nonzero */ - - /* For each unmarked nbr krow of jj - * krow is in L: place it in structure of L[*,jj] - */ - marker[krow] = jj; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - panel_lsub[nextl_col++] = krow; /* krow is indexed into A */ - } - /* - * krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - else { - - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz_col[krep]; - -#ifdef CHK_DFS - printf("krep %d, myfnz %d, perm_r[%d] %d\n", krep, myfnz, krow, kperm); -#endif - if ( myfnz != EMPTY ) { /* Representative visited before */ - if ( myfnz > kperm ) repfnz_col[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz_col[krep] = kperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker[kchild]; - - if ( chmark != jj ) { /* Not reached yet */ - marker[kchild] = jj; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,j] */ - if ( chperm == EMPTY ) { - panel_lsub[nextl_col++] = kchild; - } - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - else { - - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz_col[chrep]; -#ifdef CHK_DFS - printf("chrep %d,myfnz %d,perm_r[%d] %d\n",chrep,myfnz,kchild,chperm); -#endif - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz_col[chrep] = chperm; - } - else { - /* Cont. dfs at snode-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L) */ - parent[krep] = oldrep; - repfnz_col[krep] = chperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } /* else */ - - } /* else */ - - } /* if... */ - - } /* while xdfs < maxdfs */ - - /* krow has no more unexplored nbrs: - * Place snode-rep krep in postorder DFS, if this - * segment is seen for the first time. (Note that - * "repfnz[krep]" may change later.) - * Backtrack dfs to its parent. - */ - if ( marker1[krep] < jcol ) { - segrep[*nseg] = krep; - ++(*nseg); - marker1[krep] = jj; - } - - kpar = parent[krep]; /* Pop stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xprune[krep]; - -#ifdef CHK_DFS - printf(" pop stack: krep %d,xdfs %d,maxdfs %d: ", krep,xdfs,maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } while ( kpar != EMPTY ); /* do-while - until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonz in A[*,jj] */ - - repfnz_col += m; /* Move to next column */ - dense_col += m; - - } /* for jj ... */ - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spivotL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spivotL.c deleted file mode 100644 index 8a32f2fe0d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spivotL.c +++ /dev/null @@ -1,195 +0,0 @@ - -/*! @file spivotL.c - * \brief Performs numerical pivoting - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include -#include "slu_sdefs.h" - -#undef DEBUG - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Performs the numerical pivoting on the current column of L,
    - *   and the CDIV operation.
    - *
    - *   Pivot policy:
    - *   (1) Compute thresh = u * max_(i>=j) abs(A_ij);
    - *   (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
    - *           pivot row = k;
    - *       ELSE IF abs(A_jj) >= thresh THEN
    - *           pivot row = j;
    - *       ELSE
    - *           pivot row = m;
    - * 
    - *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
    - *
    - *   Return value: 0      success;
    - *                 i > 0  U(i,i) is exactly zero.
    - * 
    - */ - -int -spivotL( - const int jcol, /* in */ - const double u, /* in - diagonal pivoting threshold */ - int *usepr, /* re-use the pivot sequence given by perm_r/iperm_r */ - int *perm_r, /* may be modified */ - int *iperm_r, /* in - inverse of perm_r */ - int *iperm_c, /* in - used to find diagonal of Pc*A*Pc' */ - int *pivrow, /* out */ - GlobalLU_t *Glu, /* modified - global LU data structures */ - SuperLUStat_t *stat /* output */ - ) -{ - - int fsupc; /* first column in the supernode */ - int nsupc; /* no of columns in the supernode */ - int nsupr; /* no of rows in the supernode */ - int lptr; /* points to the starting subscript of the supernode */ - int pivptr, old_pivptr, diag, diagind; - float pivmax, rtemp, thresh; - float temp; - float *lu_sup_ptr; - float *lu_col_ptr; - int *lsub_ptr; - int isub, icol, k, itemp; - int *lsub, *xlsub; - float *lusup; - int *xlusup; - flops_t *ops = stat->ops; - - /* Initialize pointers */ - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; - nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ - lptr = xlsub[fsupc]; - nsupr = xlsub[fsupc+1] - lptr; - lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ - lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ - lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ - -#ifdef DEBUG -if ( jcol == MIN_COL ) { - printf("Before cdiv: col %d\n", jcol); - for (k = nsupc; k < nsupr; k++) - printf(" lu[%d] %f\n", lsub_ptr[k], lu_col_ptr[k]); -} -#endif - - /* Determine the largest abs numerical value for partial pivoting; - Also search for user-specified pivot, and diagonal element. */ - if ( *usepr ) *pivrow = iperm_r[jcol]; - diagind = iperm_c[jcol]; -#ifdef SCIPY_SPECIFIC_FIX - pivmax = -1.0; -#else - pivmax = 0.0; -#endif - pivptr = nsupc; - diag = EMPTY; - old_pivptr = nsupc; - for (isub = nsupc; isub < nsupr; ++isub) { - rtemp = fabs (lu_col_ptr[isub]); - if ( rtemp > pivmax ) { - pivmax = rtemp; - pivptr = isub; - } - if ( *usepr && lsub_ptr[isub] == *pivrow ) old_pivptr = isub; - if ( lsub_ptr[isub] == diagind ) diag = isub; - } - - /* Test for singularity */ -#ifdef SCIPY_SPECIFIC_FIX - if (pivmax < 0.0) { - perm_r[diagind] = jcol; - *usepr = 0; - return (jcol+1); - } -#endif - if ( pivmax == 0.0 ) { -#if 1 - *pivrow = lsub_ptr[pivptr]; - perm_r[*pivrow] = jcol; -#else - perm_r[diagind] = jcol; -#endif - *usepr = 0; - return (jcol+1); - } - - thresh = u * pivmax; - - /* Choose appropriate pivotal element by our policy. */ - if ( *usepr ) { - rtemp = fabs (lu_col_ptr[old_pivptr]); - if ( rtemp != 0.0 && rtemp >= thresh ) - pivptr = old_pivptr; - else - *usepr = 0; - } - if ( *usepr == 0 ) { - /* Use diagonal pivot? */ - if ( diag >= 0 ) { /* diagonal exists */ - rtemp = fabs (lu_col_ptr[diag]); - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; - } - *pivrow = lsub_ptr[pivptr]; - } - - /* Record pivot row */ - perm_r[*pivrow] = jcol; - - /* Interchange row subscripts */ - if ( pivptr != nsupc ) { - itemp = lsub_ptr[pivptr]; - lsub_ptr[pivptr] = lsub_ptr[nsupc]; - lsub_ptr[nsupc] = itemp; - - /* Interchange numerical values as well, for the whole snode, such - * that L is indexed the same way as A. - */ - for (icol = 0; icol <= nsupc; icol++) { - itemp = pivptr + icol * nsupr; - temp = lu_sup_ptr[itemp]; - lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; - lu_sup_ptr[nsupc + icol*nsupr] = temp; - } - } /* if */ - - /* cdiv operation */ - ops[FACT] += nsupr - nsupc; - - temp = 1.0 / lu_col_ptr[nsupc]; - for (k = nsupc+1; k < nsupr; k++) - lu_col_ptr[k] *= temp; - - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spivotgrowth.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spivotgrowth.c deleted file mode 100644 index 6217b97c45..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spivotgrowth.c +++ /dev/null @@ -1,114 +0,0 @@ - -/*! @file spivotgrowth.c - * \brief Computes the reciprocal pivot growth factor - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -#include -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * Compute the reciprocal pivot growth factor of the leading ncols columns
    - * of the matrix, using the formula:
    - *     min_j ( max_i(abs(A_ij)) / max_i(abs(U_ij)) )
    - *
    - * Arguments
    - * =========
    - *
    - * ncols    (input) int
    - *          The number of columns of matrices A, L and U.
    - *
    - * A        (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *          (A->nrow, A->ncol). The type of A can be:
    - *          Stype = NC; Dtype = SLU_S; Mtype = GE.
    - *
    - * L        (output) SuperMatrix*
    - *          The factor L from the factorization Pr*A=L*U; use compressed row 
    - *          subscripts storage for supernodes, i.e., L has type: 
    - *          Stype = SC; Dtype = SLU_S; Mtype = TRLU.
    - *
    - * U        (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *          storage scheme, i.e., U has types: Stype = NC;
    - *          Dtype = SLU_S; Mtype = TRU.
    - * 
    - */ - -float -sPivotGrowth(int ncols, SuperMatrix *A, int *perm_c, - SuperMatrix *L, SuperMatrix *U) -{ - - NCformat *Astore; - SCformat *Lstore; - NCformat *Ustore; - float *Aval, *Lval, *Uval; - int fsupc, nsupr, luptr, nz_in_U; - int i, j, k, oldcol; - int *inv_perm_c; - float rpg, maxaj, maxuj; - extern double slamch_(char *); - float smlnum; - float *luval; - - /* Get machine constants. */ - smlnum = slamch_("S"); - rpg = 1. / smlnum; - - Astore = A->Store; - Lstore = L->Store; - Ustore = U->Store; - Aval = Astore->nzval; - Lval = Lstore->nzval; - Uval = Ustore->nzval; - - inv_perm_c = (int *) SUPERLU_MALLOC(A->ncol*sizeof(int)); - for (j = 0; j < A->ncol; ++j) inv_perm_c[perm_c[j]] = j; - - for (k = 0; k <= Lstore->nsuper; ++k) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - luptr = L_NZ_START(fsupc); - luval = &Lval[luptr]; - nz_in_U = 1; - - for (j = fsupc; j < L_FST_SUPC(k+1) && j < ncols; ++j) { - maxaj = 0.; - oldcol = inv_perm_c[j]; - for (i = Astore->colptr[oldcol]; i < Astore->colptr[oldcol+1]; ++i) - maxaj = SUPERLU_MAX( maxaj, fabs(Aval[i]) ); - - maxuj = 0.; - for (i = Ustore->colptr[j]; i < Ustore->colptr[j+1]; i++) - maxuj = SUPERLU_MAX( maxuj, fabs(Uval[i]) ); - - /* Supernode */ - for (i = 0; i < nz_in_U; ++i) - maxuj = SUPERLU_MAX( maxuj, fabs(luval[i]) ); - - ++nz_in_U; - luval += nsupr; - - if ( maxuj == 0. ) - rpg = SUPERLU_MIN( rpg, 1.); - else - rpg = SUPERLU_MIN( rpg, maxaj / maxuj ); - } - - if ( j >= ncols ) break; - } - - SUPERLU_FREE(inv_perm_c); - return (rpg); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spruneL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spruneL.c deleted file mode 100644 index 3301bfa57d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/spruneL.c +++ /dev/null @@ -1,154 +0,0 @@ - -/*! @file spruneL.c - * \brief Prunes the L-structure - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - *
    - */ - - -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Prunes the L-structure of supernodes whose L-structure
    - *   contains the current pivot row "pivrow"
    - * 
    - */ - -void -spruneL( - const int jcol, /* in */ - const int *perm_r, /* in */ - const int pivrow, /* in */ - const int nseg, /* in */ - const int *segrep, /* in */ - const int *repfnz, /* in */ - int *xprune, /* out */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - - float utemp; - int jsupno, irep, irep1, kmin, kmax, krow, movnum; - int i, ktemp, minloc, maxloc; - int do_prune; /* logical variable */ - int *xsup, *supno; - int *lsub, *xlsub; - float *lusup; - int *xlusup; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - /* - * For each supernode-rep irep in U[*,j] - */ - jsupno = supno[jcol]; - for (i = 0; i < nseg; i++) { - - irep = segrep[i]; - irep1 = irep + 1; - do_prune = FALSE; - - /* Don't prune with a zero U-segment */ - if ( repfnz[irep] == EMPTY ) - continue; - - /* If a snode overlaps with the next panel, then the U-segment - * is fragmented into two parts -- irep and irep1. We should let - * pruning occur at the rep-column in irep1's snode. - */ - if ( supno[irep] == supno[irep1] ) /* Don't prune */ - continue; - - /* - * If it has not been pruned & it has a nonz in row L[pivrow,i] - */ - if ( supno[irep] != jsupno ) { - if ( xprune[irep] >= xlsub[irep1] ) { - kmin = xlsub[irep]; - kmax = xlsub[irep1] - 1; - for (krow = kmin; krow <= kmax; krow++) - if ( lsub[krow] == pivrow ) { - do_prune = TRUE; - break; - } - } - - if ( do_prune ) { - - /* Do a quicksort-type partition - * movnum=TRUE means that the num values have to be exchanged. - */ - movnum = FALSE; - if ( irep == xsup[supno[irep]] ) /* Snode of size 1 */ - movnum = TRUE; - - while ( kmin <= kmax ) { - - if ( perm_r[lsub[kmax]] == EMPTY ) - kmax--; - else if ( perm_r[lsub[kmin]] != EMPTY ) - kmin++; - else { /* kmin below pivrow (not yet pivoted), and kmax - * above pivrow: interchange the two subscripts - */ - ktemp = lsub[kmin]; - lsub[kmin] = lsub[kmax]; - lsub[kmax] = ktemp; - - /* If the supernode has only one column, then we - * only keep one set of subscripts. For any subscript - * interchange performed, similar interchange must be - * done on the numerical values. - */ - if ( movnum ) { - minloc = xlusup[irep] + (kmin - xlsub[irep]); - maxloc = xlusup[irep] + (kmax - xlsub[irep]); - utemp = lusup[minloc]; - lusup[minloc] = lusup[maxloc]; - lusup[maxloc] = utemp; - } - - kmin++; - kmax--; - - } - - } /* while */ - - xprune[irep] = kmin; /* Pruning */ - -#ifdef CHK_PRUNE - printf(" After spruneL(),using col %d: xprune[%d] = %d\n", - jcol, irep, kmin); -#endif - } /* if do_prune */ - - } /* if */ - - } /* for each U-segment... */ -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sreadhb.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sreadhb.c deleted file mode 100644 index 77f6f19e72..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sreadhb.c +++ /dev/null @@ -1,257 +0,0 @@ - -/*! @file sreadhb.c - * \brief Read a matrix stored in Harwell-Boeing format - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Purpose
    - * =======
    - * 
    - * Read a FLOAT PRECISION matrix stored in Harwell-Boeing format 
    - * as described below.
    - * 
    - * Line 1 (A72,A8) 
    - *  	Col. 1 - 72   Title (TITLE) 
    - *	Col. 73 - 80  Key (KEY) 
    - * 
    - * Line 2 (5I14) 
    - * 	Col. 1 - 14   Total number of lines excluding header (TOTCRD) 
    - * 	Col. 15 - 28  Number of lines for pointers (PTRCRD) 
    - * 	Col. 29 - 42  Number of lines for row (or variable) indices (INDCRD) 
    - * 	Col. 43 - 56  Number of lines for numerical values (VALCRD) 
    - *	Col. 57 - 70  Number of lines for right-hand sides (RHSCRD) 
    - *                    (including starting guesses and solution vectors 
    - *		       if present) 
    - *           	      (zero indicates no right-hand side data is present) 
    - *
    - * Line 3 (A3, 11X, 4I14) 
    - *   	Col. 1 - 3    Matrix type (see below) (MXTYPE) 
    - * 	Col. 15 - 28  Number of rows (or variables) (NROW) 
    - * 	Col. 29 - 42  Number of columns (or elements) (NCOL) 
    - *	Col. 43 - 56  Number of row (or variable) indices (NNZERO) 
    - *	              (equal to number of entries for assembled matrices) 
    - * 	Col. 57 - 70  Number of elemental matrix entries (NELTVL) 
    - *	              (zero in the case of assembled matrices) 
    - * Line 4 (2A16, 2A20) 
    - * 	Col. 1 - 16   Format for pointers (PTRFMT) 
    - *	Col. 17 - 32  Format for row (or variable) indices (INDFMT) 
    - *	Col. 33 - 52  Format for numerical values of coefficient matrix (VALFMT) 
    - * 	Col. 53 - 72 Format for numerical values of right-hand sides (RHSFMT) 
    - *
    - * Line 5 (A3, 11X, 2I14) Only present if there are right-hand sides present 
    - *    	Col. 1 	      Right-hand side type: 
    - *	         	  F for full storage or M for same format as matrix 
    - *    	Col. 2        G if a starting vector(s) (Guess) is supplied. (RHSTYP) 
    - *    	Col. 3        X if an exact solution vector(s) is supplied. 
    - *	Col. 15 - 28  Number of right-hand sides (NRHS) 
    - *	Col. 29 - 42  Number of row indices (NRHSIX) 
    - *          	      (ignored in case of unassembled matrices) 
    - *
    - * The three character type field on line 3 describes the matrix type. 
    - * The following table lists the permitted values for each of the three 
    - * characters. As an example of the type field, RSA denotes that the matrix 
    - * is real, symmetric, and assembled. 
    - *
    - * First Character: 
    - *	R Real matrix 
    - *	C Complex matrix 
    - *	P Pattern only (no numerical values supplied) 
    - *
    - * Second Character: 
    - *	S Symmetric 
    - *	U Unsymmetric 
    - *	H Hermitian 
    - *	Z Skew symmetric 
    - *	R Rectangular 
    - *
    - * Third Character: 
    - *	A Assembled 
    - *	E Elemental matrices (unassembled) 
    - *
    - * 
    - */ -#include -#include -#include "slu_sdefs.h" - - -/*! \brief Eat up the rest of the current line */ -int sDumpLine(FILE *fp) -{ - register int c; - while ((c = fgetc(fp)) != '\n') ; - return 0; -} - -int sParseIntFormat(char *buf, int *num, int *size) -{ - char *tmp; - - tmp = buf; - while (*tmp++ != '(') ; - sscanf(tmp, "%d", num); - while (*tmp != 'I' && *tmp != 'i') ++tmp; - ++tmp; - sscanf(tmp, "%d", size); - return 0; -} - -int sParseFloatFormat(char *buf, int *num, int *size) -{ - char *tmp, *period; - - tmp = buf; - while (*tmp++ != '(') ; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - while (*tmp != 'E' && *tmp != 'e' && *tmp != 'D' && *tmp != 'd' - && *tmp != 'F' && *tmp != 'f') { - /* May find kP before nE/nD/nF, like (1P6F13.6). In this case the - num picked up refers to P, which should be skipped. */ - if (*tmp=='p' || *tmp=='P') { - ++tmp; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - } else { - ++tmp; - } - } - ++tmp; - period = tmp; - while (*period != '.' && *period != ')') ++period ; - *period = '\0'; - *size = atoi(tmp); /*sscanf(tmp, "%2d", size);*/ - - return 0; -} - -static int ReadVector(FILE *fp, int n, int *where, int perline, int persize) -{ - register int i, j, item; - char tmp, buf[100]; - - i = 0; - while (i < n) { - fgets(buf, 100, fp); /* read a line at a time */ - for (j=0; j - * -- SuperLU routine (version 4.0) -- - * Lawrence Berkeley National Laboratory. - * June 30, 2009 - *
    - * - * Purpose - * ======= - * - * Read a FLOAT PRECISION matrix stored in Rutherford-Boeing format - * as described below. - * - * Line 1 (A72, A8) - * Col. 1 - 72 Title (TITLE) - * Col. 73 - 80 Matrix name / identifier (MTRXID) - * - * Line 2 (I14, 3(1X, I13)) - * Col. 1 - 14 Total number of lines excluding header (TOTCRD) - * Col. 16 - 28 Number of lines for pointers (PTRCRD) - * Col. 30 - 42 Number of lines for row (or variable) indices (INDCRD) - * Col. 44 - 56 Number of lines for numerical values (VALCRD) - * - * Line 3 (A3, 11X, 4(1X, I13)) - * Col. 1 - 3 Matrix type (see below) (MXTYPE) - * Col. 15 - 28 Compressed Column: Number of rows (NROW) - * Elemental: Largest integer used to index variable (MVAR) - * Col. 30 - 42 Compressed Column: Number of columns (NCOL) - * Elemental: Number of element matrices (NELT) - * Col. 44 - 56 Compressed Column: Number of entries (NNZERO) - * Elemental: Number of variable indeces (NVARIX) - * Col. 58 - 70 Compressed Column: Unused, explicitly zero - * Elemental: Number of elemental matrix entries (NELTVL) - * - * Line 4 (2A16, A20) - * Col. 1 - 16 Fortran format for pointers (PTRFMT) - * Col. 17 - 32 Fortran format for row (or variable) indices (INDFMT) - * Col. 33 - 52 Fortran format for numerical values of coefficient matrix - * (VALFMT) - * (blank in the case of matrix patterns) - * - * The three character type field on line 3 describes the matrix type. - * The following table lists the permitted values for each of the three - * characters. As an example of the type field, RSA denotes that the matrix - * is real, symmetric, and assembled. - * - * First Character: - * R Real matrix - * C Complex matrix - * I integer matrix - * P Pattern only (no numerical values supplied) - * Q Pattern only (numerical values supplied in associated auxiliary value - * file) - * - * Second Character: - * S Symmetric - * U Unsymmetric - * H Hermitian - * Z Skew symmetric - * R Rectangular - * - * Third Character: - * A Compressed column form - * E Elemental form - * - *
    - */ - -#include "slu_sdefs.h" - - -/*! \brief Eat up the rest of the current line */ -static int sDumpLine(FILE *fp) -{ - register int c; - while ((c = fgetc(fp)) != '\n') ; - return 0; -} - -static int sParseIntFormat(char *buf, int *num, int *size) -{ - char *tmp; - - tmp = buf; - while (*tmp++ != '(') ; - sscanf(tmp, "%d", num); - while (*tmp != 'I' && *tmp != 'i') ++tmp; - ++tmp; - sscanf(tmp, "%d", size); - return 0; -} - -static int sParseFloatFormat(char *buf, int *num, int *size) -{ - char *tmp, *period; - - tmp = buf; - while (*tmp++ != '(') ; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - while (*tmp != 'E' && *tmp != 'e' && *tmp != 'D' && *tmp != 'd' - && *tmp != 'F' && *tmp != 'f') { - /* May find kP before nE/nD/nF, like (1P6F13.6). In this case the - num picked up refers to P, which should be skipped. */ - if (*tmp=='p' || *tmp=='P') { - ++tmp; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - } else { - ++tmp; - } - } - ++tmp; - period = tmp; - while (*period != '.' && *period != ')') ++period ; - *period = '\0'; - *size = atoi(tmp); /*sscanf(tmp, "%2d", size);*/ - - return 0; -} - -static int ReadVector(FILE *fp, int n, int *where, int perline, int persize) -{ - register int i, j, item; - char tmp, buf[100]; - - i = 0; - while (i < n) { - fgets(buf, 100, fp); /* read a line at a time */ - for (j=0; j - * -- SuperLU routine (version 4.0) -- - * Lawrence Berkeley National Laboratory. - * June 30, 2009 - *
    - */ - -#include "slu_sdefs.h" - - -void -sreadtriple(int *m, int *n, int *nonz, - float **nzval, int **rowind, int **colptr) -{ -/* - * Output parameters - * ================= - * (a,asub,xa): asub[*] contains the row subscripts of nonzeros - * in columns of matrix A; a[*] the numerical values; - * row i of A is given by a[k],k=xa[i],...,xa[i+1]-1. - * - */ - int j, k, jsize, nnz, nz; - float *a, *val; - int *asub, *xa, *row, *col; - int zero_base = 0; - - /* Matrix format: - * First line: #rows, #cols, #non-zero - * Triplet in the rest of lines: - * row, col, value - */ - - scanf("%d%d", n, nonz); - *m = *n; - printf("m %d, n %d, nonz %d\n", *m, *n, *nonz); - sallocateA(*n, *nonz, nzval, rowind, colptr); /* Allocate storage */ - a = *nzval; - asub = *rowind; - xa = *colptr; - - val = (float *) SUPERLU_MALLOC(*nonz * sizeof(float)); - row = (int *) SUPERLU_MALLOC(*nonz * sizeof(int)); - col = (int *) SUPERLU_MALLOC(*nonz * sizeof(int)); - - for (j = 0; j < *n; ++j) xa[j] = 0; - - /* Read into the triplet array from a file */ - for (nnz = 0, nz = 0; nnz < *nonz; ++nnz) { - scanf("%d%d%f\n", &row[nz], &col[nz], &val[nz]); - - if ( nnz == 0 ) { /* first nonzero */ - if ( row[0] == 0 || col[0] == 0 ) { - zero_base = 1; - printf("triplet file: row/col indices are zero-based.\n"); - } else - printf("triplet file: row/col indices are one-based.\n"); - } - - if ( !zero_base ) { - /* Change to 0-based indexing. */ - --row[nz]; - --col[nz]; - } - - if (row[nz] < 0 || row[nz] >= *m || col[nz] < 0 || col[nz] >= *n - /*|| val[nz] == 0.*/) { - fprintf(stderr, "nz %d, (%d, %d) = %e out of bound, removed\n", - nz, row[nz], col[nz], val[nz]); - exit(-1); - } else { - ++xa[col[nz]]; - ++nz; - } - } - - *nonz = nz; - - /* Initialize the array of column pointers */ - k = 0; - jsize = xa[0]; - xa[0] = 0; - for (j = 1; j < *n; ++j) { - k += jsize; - jsize = xa[j]; - xa[j] = k; - } - - /* Copy the triplets into the column oriented storage */ - for (nz = 0; nz < *nonz; ++nz) { - j = col[nz]; - k = xa[j]; - asub[k] = row[nz]; - a[k] = val[nz]; - ++xa[j]; - } - - /* Reset the column pointers to the beginning of each column */ - for (j = *n; j > 0; --j) - xa[j] = xa[j-1]; - xa[0] = 0; - - SUPERLU_FREE(val); - SUPERLU_FREE(row); - SUPERLU_FREE(col); - -#ifdef CHK_INPUT - { - int i; - for (i = 0; i < *n; i++) { - printf("Col %d, xa %d\n", i, xa[i]); - for (k = xa[i]; k < xa[i+1]; k++) - printf("%d\t%16.10f\n", asub[k], a[k]); - } - } -#endif - -} - - -void sreadrhs(int m, float *b) -{ - FILE *fp, *fopen(); - int i; - /*int j;*/ - - if ( !(fp = fopen("b.dat", "r")) ) { - fprintf(stderr, "dreadrhs: file does not exist\n"); - exit(-1); - } - for (i = 0; i < m; ++i) - fscanf(fp, "%f\n", &b[i]); - - /* readpair_(j, &b[i]);*/ - fclose(fp); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssnode_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssnode_bmod.c deleted file mode 100644 index c6813ddbd0..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssnode_bmod.c +++ /dev/null @@ -1,118 +0,0 @@ - -/*! @file ssnode_bmod.c - * \brief Performs numeric block updates within the relaxed snode. - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_sdefs.h" - - -/*! \brief Performs numeric block updates within the relaxed snode. - */ -int -ssnode_bmod ( - const int jcol, /* in */ - const int jsupno, /* in */ - const int fsupc, /* in */ - float *dense, /* in */ - float *tempv, /* working array */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - float alpha = -1.0, beta = 1.0; -#endif - - int luptr, nsupc, nsupr, nrow; - int isub, irow, i, iptr; - register int ufirst, nextlu; - int *lsub, *xlsub; - float *lusup; - int *xlusup; - flops_t *ops = stat->ops; - - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - nextlu = xlusup[jcol]; - - /* - * Process the supernodal portion of L\U[*,j] - */ - for (isub = xlsub[fsupc]; isub < xlsub[fsupc+1]; isub++) { - irow = lsub[isub]; - lusup[nextlu] = dense[irow]; - dense[irow] = 0; - ++nextlu; - } - - xlusup[jcol + 1] = nextlu; /* Initialize xlusup for next column */ - - if ( fsupc < jcol ) { - - luptr = xlusup[fsupc]; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; - nsupc = jcol - fsupc; /* Excluding jcol */ - ufirst = xlusup[jcol]; /* Points to the beginning of column - jcol in supernode L\U(jsupno). */ - nrow = nsupr - nsupc; - - ops[TRSV] += nsupc * (nsupc - 1); - ops[GEMV] += 2 * nrow * nsupc; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV( ftcs1, ftcs2, ftcs3, &nsupc, &lusup[luptr], &nsupr, - &lusup[ufirst], &incx ); - SGEMV( ftcs2, &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#else - strsv_( "L", "N", "U", &nsupc, &lusup[luptr], &nsupr, - &lusup[ufirst], &incx ); - sgemv_( "N", &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#endif -#else - slsolve ( nsupr, nsupc, &lusup[luptr], &lusup[ufirst] ); - smatvec ( nsupr, nrow, nsupc, &lusup[luptr+nsupc], - &lusup[ufirst], &tempv[0] ); - - /* Scatter tempv[*] into lusup[*] */ - iptr = ufirst + nsupc; - for (i = 0; i < nrow; i++) { - lusup[iptr++] -= tempv[i]; - tempv[i] = 0.0; - } -#endif - - } - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssnode_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssnode_dfs.c deleted file mode 100644 index a191bc4d53..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssnode_dfs.c +++ /dev/null @@ -1,112 +0,0 @@ - -/*! @file ssnode_dfs.c - * \brief Determines the union of row structures of columns within the relaxed node - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    ssnode_dfs() - Determine the union of the row structures of those 
    - *    columns within the relaxed snode.
    - *    Note: The relaxed snodes are leaves of the supernodal etree, therefore, 
    - *    the portion outside the rectangular supernode must be zero.
    - *
    - * Return value
    - * ============
    - *     0   success;
    - *    >0   number of bytes allocated when run out of memory.
    - * 
    - */ - -int -ssnode_dfs ( - const int jcol, /* in - start of the supernode */ - const int kcol, /* in - end of the supernode */ - const int *asub, /* in */ - const int *xa_begin, /* in */ - const int *xa_end, /* in */ - int *xprune, /* out */ - int *marker, /* modified */ - GlobalLU_t *Glu /* modified */ - ) -{ - - register int i, k, ifrom, ito, nextl, new_next; - int nsuper, krow, kmark, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - int nzlmax; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - nsuper = ++supno[jcol]; /* Next available supernode number */ - nextl = xlsub[jcol]; - - for (i = jcol; i <= kcol; i++) { - /* For each nonzero in A[*,i] */ - for (k = xa_begin[i]; k < xa_end[i]; k++) { - krow = asub[k]; - kmark = marker[krow]; - if ( kmark != kcol ) { /* First time visit krow */ - marker[krow] = kcol; - lsub[nextl++] = krow; - if ( nextl >= nzlmax ) { - if ( mem_error = sLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - } - } - supno[i] = nsuper; - } - - /* Supernode > 1, then make a copy of the subscripts for pruning */ - if ( jcol < kcol ) { - new_next = nextl + (nextl - xlsub[jcol]); - while ( new_next > nzlmax ) { - if ( mem_error = sLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - ito = nextl; - for (ifrom = xlsub[jcol]; ifrom < nextl; ) - lsub[ito++] = lsub[ifrom++]; - for (i = jcol+1; i <= kcol; i++) xlsub[i] = nextl; - nextl = ito; - } - - xsup[nsuper+1] = kcol + 1; - supno[kcol+1] = nsuper; - xprune[kcol] = nextl; - xlsub[kcol+1] = nextl; - - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssp_blas2.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssp_blas2.c deleted file mode 100644 index 7824be6560..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssp_blas2.c +++ /dev/null @@ -1,477 +0,0 @@ - -/*! @file ssp_blas2.c - * \brief Sparse BLAS 2, using some dense BLAS 2 operations - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -/* - * File name: ssp_blas2.c - * Purpose: Sparse BLAS 2, using some dense BLAS 2 operations. - */ - -#include "slu_sdefs.h" - -/* - * Function prototypes - */ -void susolve(int, int, float*, float*); -void slsolve(int, int, float*, float*); -void smatvec(int, int, int, float*, float*, float*); - -/*! \brief Solves one of the systems of equations A*x = b, or A'*x = b - * - *
    - *   Purpose
    - *   =======
    - *
    - *   sp_strsv() solves one of the systems of equations   
    - *       A*x = b,   or   A'*x = b,
    - *   where b and x are n element vectors and A is a sparse unit , or   
    - *   non-unit, upper or lower triangular matrix.   
    - *   No test for singularity or near-singularity is included in this   
    - *   routine. Such tests must be performed before calling this routine.   
    - *
    - *   Parameters   
    - *   ==========   
    - *
    - *   uplo   - (input) char*
    - *            On entry, uplo specifies whether the matrix is an upper or   
    - *             lower triangular matrix as follows:   
    - *                uplo = 'U' or 'u'   A is an upper triangular matrix.   
    - *                uplo = 'L' or 'l'   A is a lower triangular matrix.   
    - *
    - *   trans  - (input) char*
    - *             On entry, trans specifies the equations to be solved as   
    - *             follows:   
    - *                trans = 'N' or 'n'   A*x = b.   
    - *                trans = 'T' or 't'   A'*x = b.
    - *                trans = 'C' or 'c'   A'*x = b.   
    - *
    - *   diag   - (input) char*
    - *             On entry, diag specifies whether or not A is unit   
    - *             triangular as follows:   
    - *                diag = 'U' or 'u'   A is assumed to be unit triangular.   
    - *                diag = 'N' or 'n'   A is not assumed to be unit   
    - *                                    triangular.   
    - *	     
    - *   L       - (input) SuperMatrix*
    - *	       The factor L from the factorization Pr*A*Pc=L*U. Use
    - *             compressed row subscripts storage for supernodes,
    - *             i.e., L has types: Stype = SC, Dtype = SLU_S, Mtype = TRLU.
    - *
    - *   U       - (input) SuperMatrix*
    - *	        The factor U from the factorization Pr*A*Pc=L*U.
    - *	        U has types: Stype = NC, Dtype = SLU_S, Mtype = TRU.
    - *    
    - *   x       - (input/output) float*
    - *             Before entry, the incremented array X must contain the n   
    - *             element right-hand side vector b. On exit, X is overwritten 
    - *             with the solution vector x.
    - *
    - *   info    - (output) int*
    - *             If *info = -i, the i-th argument had an illegal value.
    - * 
    - */ -int -sp_strsv(char *uplo, char *trans, char *diag, SuperMatrix *L, - SuperMatrix *U, float *x, SuperLUStat_t *stat, int *info) -{ -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - SCformat *Lstore; - NCformat *Ustore; - float *Lval, *Uval; - int incx = 1, incy = 1; - float alpha = 1.0, beta = 1.0; - int nrow; - int fsupc, nsupr, nsupc, luptr, istart, irow; - int i, k, iptr, jcol; - float *work; - flops_t solve_ops; - - /* Test the input parameters */ - *info = 0; - if ( !lsame_(uplo,"L") && !lsame_(uplo, "U") ) *info = -1; - else if ( !lsame_(trans, "N") && !lsame_(trans, "T") && - !lsame_(trans, "C")) *info = -2; - else if ( !lsame_(diag, "U") && !lsame_(diag, "N") ) *info = -3; - else if ( L->nrow != L->ncol || L->nrow < 0 ) *info = -4; - else if ( U->nrow != U->ncol || U->nrow < 0 ) *info = -5; - if ( *info ) { - i = -(*info); - xerbla_("sp_strsv", &i); - return 0; - } - - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( !(work = floatCalloc(L->nrow)) ) - ABORT("Malloc fails for work in sp_strsv()."); - - if ( lsame_(trans, "N") ) { /* Form x := inv(A)*x. */ - - if ( lsame_(uplo, "L") ) { - /* Form x := inv(L)*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - nrow = nsupr - nsupc; - - solve_ops += nsupc * (nsupc - 1); - solve_ops += 2 * nrow * nsupc; - - if ( nsupc == 1 ) { - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); ++iptr) { - irow = L_SUB(iptr); - ++luptr; - x[irow] -= x[fsupc] * Lval[luptr]; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); - - SGEMV(ftcs2, &nrow, &nsupc, &alpha, &Lval[luptr+nsupc], - &nsupr, &x[fsupc], &incx, &beta, &work[0], &incy); -#else - strsv_("L", "N", "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); - - sgemv_("N", &nrow, &nsupc, &alpha, &Lval[luptr+nsupc], - &nsupr, &x[fsupc], &incx, &beta, &work[0], &incy); -#endif -#else - slsolve ( nsupr, nsupc, &Lval[luptr], &x[fsupc]); - - smatvec ( nsupr, nsupr-nsupc, nsupc, &Lval[luptr+nsupc], - &x[fsupc], &work[0] ); -#endif - - iptr = istart + nsupc; - for (i = 0; i < nrow; ++i, ++iptr) { - irow = L_SUB(iptr); - x[irow] -= work[i]; /* Scatter */ - work[i] = 0.0; - - } - } - } /* for k ... */ - - } else { - /* Form x := inv(U)*x */ - - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += nsupc * (nsupc + 1); - - if ( nsupc == 1 ) { - x[fsupc] /= Lval[luptr]; - for (i = U_NZ_START(fsupc); i < U_NZ_START(fsupc+1); ++i) { - irow = U_SUB(i); - x[irow] -= x[fsupc] * Uval[i]; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - STRSV(ftcs3, ftcs2, ftcs2, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - strsv_("U", "N", "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif -#else - susolve ( nsupr, nsupc, &Lval[luptr], &x[fsupc] ); -#endif - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 2*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); - i++) { - irow = U_SUB(i); - x[irow] -= x[jcol] * Uval[i]; - } - } - } - } /* for k ... */ - - } - } else { /* Form x := inv(A')*x */ - - if ( lsame_(uplo, "L") ) { - /* Form x := inv(L')*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; --k) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += 2 * (nsupr - nsupc) * nsupc; - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - iptr = istart + nsupc; - for (i = L_NZ_START(jcol) + nsupc; - i < L_NZ_START(jcol+1); i++) { - irow = L_SUB(iptr); - x[jcol] -= x[irow] * Lval[i]; - iptr++; - } - } - - if ( nsupc > 1 ) { - solve_ops += nsupc * (nsupc - 1); -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("T", strlen("T")); - ftcs3 = _cptofcd("U", strlen("U")); - STRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - strsv_("L", "T", "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } - } else { - /* Form x := inv(U')*x */ - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 2*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++) { - irow = U_SUB(i); - x[jcol] -= x[irow] * Uval[i]; - } - } - - solve_ops += nsupc * (nsupc + 1); - - if ( nsupc == 1 ) { - x[fsupc] /= Lval[luptr]; - } else { -#ifdef _CRAY - ftcs1 = _cptofcd("U", strlen("U")); - ftcs2 = _cptofcd("T", strlen("T")); - ftcs3 = _cptofcd("N", strlen("N")); - STRSV( ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - strsv_("U", "T", "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } /* for k ... */ - } - } - - stat->ops[SOLVE] += solve_ops; - SUPERLU_FREE(work); - return 0; -} - - - -/*! \brief Performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   sp_sgemv()  performs one of the matrix-vector operations   
    - *      y := alpha*A*x + beta*y,   or   y := alpha*A'*x + beta*y,   
    - *   where alpha and beta are scalars, x and y are vectors and A is a
    - *   sparse A->nrow by A->ncol matrix.   
    - *
    - *   Parameters   
    - *   ==========   
    - *
    - *   TRANS  - (input) char*
    - *            On entry, TRANS specifies the operation to be performed as   
    - *            follows:   
    - *               TRANS = 'N' or 'n'   y := alpha*A*x + beta*y.   
    - *               TRANS = 'T' or 't'   y := alpha*A'*x + beta*y.   
    - *               TRANS = 'C' or 'c'   y := alpha*A'*x + beta*y.   
    - *
    - *   ALPHA  - (input) float
    - *            On entry, ALPHA specifies the scalar alpha.   
    - *
    - *   A      - (input) SuperMatrix*
    - *            Matrix A with a sparse format, of dimension (A->nrow, A->ncol).
    - *            Currently, the type of A can be:
    - *                Stype = NC or NCP; Dtype = SLU_S; Mtype = GE. 
    - *            In the future, more general A can be handled.
    - *
    - *   X      - (input) float*, array of DIMENSION at least   
    - *            ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'   
    - *            and at least   
    - *            ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.   
    - *            Before entry, the incremented array X must contain the   
    - *            vector x.   
    - *
    - *   INCX   - (input) int
    - *            On entry, INCX specifies the increment for the elements of   
    - *            X. INCX must not be zero.   
    - *
    - *   BETA   - (input) float
    - *            On entry, BETA specifies the scalar beta. When BETA is   
    - *            supplied as zero then Y need not be set on input.   
    - *
    - *   Y      - (output) float*,  array of DIMENSION at least   
    - *            ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'   
    - *            and at least   
    - *            ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.   
    - *            Before entry with BETA non-zero, the incremented array Y   
    - *            must contain the vector y. On exit, Y is overwritten by the 
    - *            updated vector y.
    - *	     
    - *   INCY   - (input) int
    - *            On entry, INCY specifies the increment for the elements of   
    - *            Y. INCY must not be zero.   
    - *
    - *   ==== Sparse Level 2 Blas routine.   
    - * 
    - */ - -int -sp_sgemv(char *trans, float alpha, SuperMatrix *A, float *x, - int incx, float beta, float *y, int incy) -{ - /* Local variables */ - NCformat *Astore; - float *Aval; - int info; - float temp; - int lenx, leny, i, j, irow; - int iy, jx, jy, kx, ky; - int notran; - - notran = lsame_(trans, "N"); - Astore = A->Store; - Aval = Astore->nzval; - - /* Test the input parameters */ - info = 0; - if ( !notran && !lsame_(trans, "T") && !lsame_(trans, "C")) info = 1; - else if ( A->nrow < 0 || A->ncol < 0 ) info = 3; - else if (incx == 0) info = 5; - else if (incy == 0) info = 8; - if (info != 0) { - xerbla_("sp_sgemv ", &info); - return 0; - } - - /* Quick return if possible. */ - if (A->nrow == 0 || A->ncol == 0 || (alpha == 0. && beta == 1.)) - return 0; - - /* Set LENX and LENY, the lengths of the vectors x and y, and set - up the start points in X and Y. */ - if (lsame_(trans, "N")) { - lenx = A->ncol; - leny = A->nrow; - } else { - lenx = A->nrow; - leny = A->ncol; - } - if (incx > 0) kx = 0; - else kx = - (lenx - 1) * incx; - if (incy > 0) ky = 0; - else ky = - (leny - 1) * incy; - - /* Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. */ - /* First form y := beta*y. */ - if (beta != 1.) { - if (incy == 1) { - if (beta == 0.) - for (i = 0; i < leny; ++i) y[i] = 0.; - else - for (i = 0; i < leny; ++i) y[i] = beta * y[i]; - } else { - iy = ky; - if (beta == 0.) - for (i = 0; i < leny; ++i) { - y[iy] = 0.; - iy += incy; - } - else - for (i = 0; i < leny; ++i) { - y[iy] = beta * y[iy]; - iy += incy; - } - } - } - - if (alpha == 0.) return 0; - - if ( notran ) { - /* Form y := alpha*A*x + y. */ - jx = kx; - if (incy == 1) { - for (j = 0; j < A->ncol; ++j) { - if (x[jx] != 0.) { - temp = alpha * x[jx]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - y[irow] += temp * Aval[i]; - } - } - jx += incx; - } - } else { - ABORT("Not implemented."); - } - } else { - /* Form y := alpha*A'*x + y. */ - jy = ky; - if (incx == 1) { - for (j = 0; j < A->ncol; ++j) { - temp = 0.; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - temp += Aval[i] * x[irow]; - } - y[jy] += alpha * temp; - jy += incy; - } - } else { - ABORT("Not implemented."); - } - } - return 0; -} /* sp_sgemv */ - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssp_blas3.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssp_blas3.c deleted file mode 100644 index be3148b842..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/ssp_blas3.c +++ /dev/null @@ -1,127 +0,0 @@ - -/*! @file ssp_blas3.c - * \brief Sparse BLAS3, using some dense BLAS3 operations - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -/* - * File name: sp_blas3.c - * Purpose: Sparse BLAS3, using some dense BLAS3 operations. - */ - -#include "slu_sdefs.h" - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - * 
    - *   sp_s performs one of the matrix-matrix operations   
    - * 
    - *      C := alpha*op( A )*op( B ) + beta*C,   
    - * 
    - *   where  op( X ) is one of 
    - * 
    - *      op( X ) = X   or   op( X ) = X'   or   op( X ) = conjg( X' ),
    - * 
    - *   alpha and beta are scalars, and A, B and C are matrices, with op( A ) 
    - *   an m by k matrix,  op( B )  a  k by n matrix and  C an m by n matrix. 
    - *   
    - * 
    - *   Parameters   
    - *   ==========   
    - * 
    - *   TRANSA - (input) char*
    - *            On entry, TRANSA specifies the form of op( A ) to be used in 
    - *            the matrix multiplication as follows:   
    - *               TRANSA = 'N' or 'n',  op( A ) = A.   
    - *               TRANSA = 'T' or 't',  op( A ) = A'.   
    - *               TRANSA = 'C' or 'c',  op( A ) = conjg( A' ).   
    - *            Unchanged on exit.   
    - * 
    - *   TRANSB - (input) char*
    - *            On entry, TRANSB specifies the form of op( B ) to be used in 
    - *            the matrix multiplication as follows:   
    - *               TRANSB = 'N' or 'n',  op( B ) = B.   
    - *               TRANSB = 'T' or 't',  op( B ) = B'.   
    - *               TRANSB = 'C' or 'c',  op( B ) = conjg( B' ).   
    - *            Unchanged on exit.   
    - * 
    - *   M      - (input) int   
    - *            On entry,  M  specifies  the number of rows of the matrix 
    - *	     op( A ) and of the matrix C.  M must be at least zero. 
    - *	     Unchanged on exit.   
    - * 
    - *   N      - (input) int
    - *            On entry,  N specifies the number of columns of the matrix 
    - *	     op( B ) and the number of columns of the matrix C. N must be 
    - *	     at least zero.
    - *	     Unchanged on exit.   
    - * 
    - *   K      - (input) int
    - *            On entry, K specifies the number of columns of the matrix 
    - *	     op( A ) and the number of rows of the matrix op( B ). K must 
    - *	     be at least  zero.   
    - *           Unchanged on exit.
    - *      
    - *   ALPHA  - (input) float
    - *            On entry, ALPHA specifies the scalar alpha.   
    - * 
    - *   A      - (input) SuperMatrix*
    - *            Matrix A with a sparse format, of dimension (A->nrow, A->ncol).
    - *            Currently, the type of A can be:
    - *                Stype = NC or NCP; Dtype = SLU_S; Mtype = GE. 
    - *            In the future, more general A can be handled.
    - * 
    - *   B      - FLOAT PRECISION array of DIMENSION ( LDB, kb ), where kb is 
    - *            n when TRANSB = 'N' or 'n',  and is  k otherwise.   
    - *            Before entry with  TRANSB = 'N' or 'n',  the leading k by n 
    - *            part of the array B must contain the matrix B, otherwise 
    - *            the leading n by k part of the array B must contain the 
    - *            matrix B.   
    - *            Unchanged on exit.   
    - * 
    - *   LDB    - (input) int
    - *            On entry, LDB specifies the first dimension of B as declared 
    - *            in the calling (sub) program. LDB must be at least max( 1, n ).  
    - *            Unchanged on exit.   
    - * 
    - *   BETA   - (input) float
    - *            On entry, BETA specifies the scalar beta. When BETA is   
    - *            supplied as zero then C need not be set on input.   
    - *  
    - *   C      - FLOAT PRECISION array of DIMENSION ( LDC, n ).   
    - *            Before entry, the leading m by n part of the array C must 
    - *            contain the matrix C,  except when beta is zero, in which 
    - *            case C need not be set on entry.   
    - *            On exit, the array C is overwritten by the m by n matrix 
    - *	     ( alpha*op( A )*B + beta*C ).   
    - *  
    - *   LDC    - (input) int
    - *            On entry, LDC specifies the first dimension of C as declared 
    - *            in the calling (sub)program. LDC must be at least max(1,m).   
    - *            Unchanged on exit.   
    - *  
    - *   ==== Sparse Level 3 Blas routine.   
    - * 
    - */ - -int -sp_sgemm(char *transa, char *transb, int m, int n, int k, - float alpha, SuperMatrix *A, float *b, int ldb, - float beta, float *c, int ldc) -{ - int incx = 1, incy = 1; - int j; - - for (j = 0; j < n; ++j) { - sp_sgemv(transa, alpha, A, &b[ldb*j], incx, beta, &c[ldc*j], incy); - } - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/superlu_timer.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/superlu_timer.c deleted file mode 100644 index 5820bdb558..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/superlu_timer.c +++ /dev/null @@ -1,70 +0,0 @@ -/*! @file superlu_timer.c - * \brief Returns the time used - * - *
    - * Purpose
    - * ======= 
    - * 
    - * Returns the time in seconds used by the process.
    - *
    - * Note: the timer function call is machine dependent. Use conditional
    - *       compilation to choose the appropriate function.
    - * 
    - */ - - -#ifdef SUN -/* - * It uses the system call gethrtime(3C), which is accurate to - * nanoseconds. -*/ -#include - -double SuperLU_timer_() { - return ( (double)gethrtime() / 1e9 ); -} - -#elif _WIN32 - -#include - -double SuperLU_timer_() -{ - clock_t t; - t=clock(); - - return ((double)t)/CLOCKS_PER_SEC; -} - -#else - -#ifndef NO_TIMER -#include -#include -#include -#include -#endif - -#ifndef CLK_TCK -#define CLK_TCK 60 -#endif -/*! \brief Timer function - */ -double SuperLU_timer_() -{ -#ifdef NO_TIMER - /* no sys/times.h on WIN32 */ - double tmp; - tmp = 0.0; -#else - struct tms use; - double tmp; - times(&use); - tmp = use.tms_utime; - tmp += use.tms_stime; -#endif - return (double)(tmp) / CLK_TCK; -} - -#endif - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/supermatrix.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/supermatrix.h deleted file mode 100644 index 8b8e388c4d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/supermatrix.h +++ /dev/null @@ -1,180 +0,0 @@ -/*! @file supermatrix.h - * \brief Defines matrix types - */ -#ifndef __SUPERLU_SUPERMATRIX /* allow multiple inclusions */ -#define __SUPERLU_SUPERMATRIX - - -/******************************************** - * The matrix types are defined as follows. * - ********************************************/ -typedef enum { - SLU_NC, /* column-wise, no supernode */ - SLU_NCP, /* column-wise, column-permuted, no supernode - (The consecutive columns of nonzeros, after permutation, - may not be stored contiguously.) */ - SLU_NR, /* row-wize, no supernode */ - SLU_SC, /* column-wise, supernode */ - SLU_SCP, /* supernode, column-wise, permuted */ - SLU_SR, /* row-wise, supernode */ - SLU_DN, /* Fortran style column-wise storage for dense matrix */ - SLU_NR_loc /* distributed compressed row format */ -} Stype_t; - -typedef enum { - SLU_S, /* single */ - SLU_D, /* double */ - SLU_C, /* single complex */ - SLU_Z /* double complex */ -} Dtype_t; - -typedef enum { - SLU_GE, /* general */ - SLU_TRLU, /* lower triangular, unit diagonal */ - SLU_TRUU, /* upper triangular, unit diagonal */ - SLU_TRL, /* lower triangular */ - SLU_TRU, /* upper triangular */ - SLU_SYL, /* symmetric, store lower half */ - SLU_SYU, /* symmetric, store upper half */ - SLU_HEL, /* Hermitian, store lower half */ - SLU_HEU /* Hermitian, store upper half */ -} Mtype_t; - -typedef struct { - Stype_t Stype; /* Storage type: interprets the storage structure - pointed to by *Store. */ - Dtype_t Dtype; /* Data type. */ - Mtype_t Mtype; /* Matrix type: describes the mathematical property of - the matrix. */ - int_t nrow; /* number of rows */ - int_t ncol; /* number of columns */ - void *Store; /* pointer to the actual storage of the matrix */ -} SuperMatrix; - -/*********************************************** - * The storage schemes are defined as follows. * - ***********************************************/ - -/* Stype == SLU_NC (Also known as Harwell-Boeing sparse matrix format) */ -typedef struct { - int_t nnz; /* number of nonzeros in the matrix */ - void *nzval; /* pointer to array of nonzero values, packed by column */ - int_t *rowind; /* pointer to array of row indices of the nonzeros */ - int_t *colptr; /* pointer to array of beginning of columns in nzval[] - and rowind[] */ - /* Note: - Zero-based indexing is used; - colptr[] has ncol+1 entries, the last one pointing - beyond the last column, so that colptr[ncol] = nnz. */ -} NCformat; - -/* Stype == SLU_NR */ -typedef struct { - int_t nnz; /* number of nonzeros in the matrix */ - void *nzval; /* pointer to array of nonzero values, packed by raw */ - int_t *colind; /* pointer to array of columns indices of the nonzeros */ - int_t *rowptr; /* pointer to array of beginning of rows in nzval[] - and colind[] */ - /* Note: - Zero-based indexing is used; - rowptr[] has nrow+1 entries, the last one pointing - beyond the last row, so that rowptr[nrow] = nnz. */ -} NRformat; - -/* Stype == SLU_SC */ -typedef struct { - int_t nnz; /* number of nonzeros in the matrix */ - int_t nsuper; /* number of supernodes, minus 1 */ - void *nzval; /* pointer to array of nonzero values, packed by column */ - int_t *nzval_colptr;/* pointer to array of beginning of columns in nzval[] */ - int_t *rowind; /* pointer to array of compressed row indices of - rectangular supernodes */ - int_t *rowind_colptr;/* pointer to array of beginning of columns in rowind[] */ - int_t *col_to_sup; /* col_to_sup[j] is the supernode number to which column - j belongs; mapping from column to supernode number. */ - int_t *sup_to_col; /* sup_to_col[s] points to the start of the s-th - supernode; mapping from supernode number to column. - e.g.: col_to_sup: 0 1 2 2 3 3 3 4 4 4 4 4 4 (ncol=12) - sup_to_col: 0 1 2 4 7 12 (nsuper=4) */ - /* Note: - Zero-based indexing is used; - nzval_colptr[], rowind_colptr[], col_to_sup and - sup_to_col[] have ncol+1 entries, the last one - pointing beyond the last column. - For col_to_sup[], only the first ncol entries are - defined. For sup_to_col[], only the first nsuper+2 - entries are defined. */ -} SCformat; - -/* Stype == SLU_SCP */ -typedef struct { - int_t nnz; /* number of nonzeros in the matrix */ - int_t nsuper; /* number of supernodes */ - void *nzval; /* pointer to array of nonzero values, packed by column */ - int_t *nzval_colbeg;/* nzval_colbeg[j] points to beginning of column j - in nzval[] */ - int_t *nzval_colend;/* nzval_colend[j] points to one past the last element - of column j in nzval[] */ - int_t *rowind; /* pointer to array of compressed row indices of - rectangular supernodes */ - int_t *rowind_colbeg;/* rowind_colbeg[j] points to beginning of column j - in rowind[] */ - int_t *rowind_colend;/* rowind_colend[j] points to one past the last element - of column j in rowind[] */ - int_t *col_to_sup; /* col_to_sup[j] is the supernode number to which column - j belongs; mapping from column to supernode. */ - int_t *sup_to_colbeg; /* sup_to_colbeg[s] points to the start of the s-th - supernode; mapping from supernode to column.*/ - int_t *sup_to_colend; /* sup_to_colend[s] points to one past the end of the - s-th supernode; mapping from supernode number to - column. - e.g.: col_to_sup: 0 1 2 2 3 3 3 4 4 4 4 4 4 (ncol=12) - sup_to_colbeg: 0 1 2 4 7 (nsuper=4) - sup_to_colend: 1 2 4 7 12 */ - /* Note: - Zero-based indexing is used; - nzval_colptr[], rowind_colptr[], col_to_sup and - sup_to_col[] have ncol+1 entries, the last one - pointing beyond the last column. */ -} SCPformat; - -/* Stype == SLU_NCP */ -typedef struct { - int_t nnz; /* number of nonzeros in the matrix */ - void *nzval; /* pointer to array of nonzero values, packed by column */ - int_t *rowind;/* pointer to array of row indices of the nonzeros */ - /* Note: nzval[]/rowind[] always have the same length */ - int_t *colbeg;/* colbeg[j] points to the beginning of column j in nzval[] - and rowind[] */ - int_t *colend;/* colend[j] points to one past the last element of column - j in nzval[] and rowind[] */ - /* Note: - Zero-based indexing is used; - The consecutive columns of the nonzeros may not be - contiguous in storage, because the matrix has been - postmultiplied by a column permutation matrix. */ -} NCPformat; - -/* Stype == SLU_DN */ -typedef struct { - int_t lda; /* leading dimension */ - void *nzval; /* array of size lda*ncol to represent a dense matrix */ -} DNformat; - -/* Stype == SLU_NR_loc (Distributed Compressed Row Format) */ -typedef struct { - int_t nnz_loc; /* number of nonzeros in the local submatrix */ - int_t m_loc; /* number of rows local to this processor */ - int_t fst_row; /* global index of the first row */ - void *nzval; /* pointer to array of nonzero values, packed by row */ - int_t *rowptr; /* pointer to array of beginning of rows in nzval[] - and colind[] */ - int_t *colind; /* pointer to array of column indices of the nonzeros */ - /* Note: - Zero-based indexing is used; - rowptr[] has n_loc + 1 entries, the last one pointing - beyond the last row, so that rowptr[n_loc] = nnz_loc.*/ -} NRformat_loc; - - -#endif /* __SUPERLU_SUPERMATRIX */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sutil.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sutil.c deleted file mode 100644 index aab65adcc4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/sutil.c +++ /dev/null @@ -1,471 +0,0 @@ - -/*! @file sutil.c - * \brief Matrix utility functions - * - *
    - * -- SuperLU routine (version 3.1) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * August 1, 2008
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include "slu_sdefs.h" - -void -sCreate_CompCol_Matrix(SuperMatrix *A, int m, int n, int nnz, - float *nzval, int *rowind, int *colptr, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - NCformat *Astore; - - A->Stype = stype; - A->Dtype = dtype; - A->Mtype = mtype; - A->nrow = m; - A->ncol = n; - A->Store = (void *) SUPERLU_MALLOC( sizeof(NCformat) ); - if ( !(A->Store) ) ABORT("SUPERLU_MALLOC fails for A->Store"); - Astore = A->Store; - Astore->nnz = nnz; - Astore->nzval = nzval; - Astore->rowind = rowind; - Astore->colptr = colptr; -} - -void -sCreate_CompRow_Matrix(SuperMatrix *A, int m, int n, int nnz, - float *nzval, int *colind, int *rowptr, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - NRformat *Astore; - - A->Stype = stype; - A->Dtype = dtype; - A->Mtype = mtype; - A->nrow = m; - A->ncol = n; - A->Store = (void *) SUPERLU_MALLOC( sizeof(NRformat) ); - if ( !(A->Store) ) ABORT("SUPERLU_MALLOC fails for A->Store"); - Astore = A->Store; - Astore->nnz = nnz; - Astore->nzval = nzval; - Astore->colind = colind; - Astore->rowptr = rowptr; -} - -/*! \brief Copy matrix A into matrix B. */ -void -sCopy_CompCol_Matrix(SuperMatrix *A, SuperMatrix *B) -{ - NCformat *Astore, *Bstore; - int ncol, nnz, i; - - B->Stype = A->Stype; - B->Dtype = A->Dtype; - B->Mtype = A->Mtype; - B->nrow = A->nrow;; - B->ncol = ncol = A->ncol; - Astore = (NCformat *) A->Store; - Bstore = (NCformat *) B->Store; - Bstore->nnz = nnz = Astore->nnz; - for (i = 0; i < nnz; ++i) - ((float *)Bstore->nzval)[i] = ((float *)Astore->nzval)[i]; - for (i = 0; i < nnz; ++i) Bstore->rowind[i] = Astore->rowind[i]; - for (i = 0; i <= ncol; ++i) Bstore->colptr[i] = Astore->colptr[i]; -} - - -void -sCreate_Dense_Matrix(SuperMatrix *X, int m, int n, float *x, int ldx, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - DNformat *Xstore; - - X->Stype = stype; - X->Dtype = dtype; - X->Mtype = mtype; - X->nrow = m; - X->ncol = n; - X->Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) ); - if ( !(X->Store) ) ABORT("SUPERLU_MALLOC fails for X->Store"); - Xstore = (DNformat *) X->Store; - Xstore->lda = ldx; - Xstore->nzval = (float *) x; -} - -void -sCopy_Dense_Matrix(int M, int N, float *X, int ldx, - float *Y, int ldy) -{ -/*! \brief Copies a two-dimensional matrix X to another matrix Y. - */ - int i, j; - - for (j = 0; j < N; ++j) - for (i = 0; i < M; ++i) - Y[i + j*ldy] = X[i + j*ldx]; -} - -void -sCreate_SuperNode_Matrix(SuperMatrix *L, int m, int n, int nnz, - float *nzval, int *nzval_colptr, int *rowind, - int *rowind_colptr, int *col_to_sup, int *sup_to_col, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - SCformat *Lstore; - - L->Stype = stype; - L->Dtype = dtype; - L->Mtype = mtype; - L->nrow = m; - L->ncol = n; - L->Store = (void *) SUPERLU_MALLOC( sizeof(SCformat) ); - if ( !(L->Store) ) ABORT("SUPERLU_MALLOC fails for L->Store"); - Lstore = L->Store; - Lstore->nnz = nnz; - Lstore->nsuper = col_to_sup[n]; - Lstore->nzval = nzval; - Lstore->nzval_colptr = nzval_colptr; - Lstore->rowind = rowind; - Lstore->rowind_colptr = rowind_colptr; - Lstore->col_to_sup = col_to_sup; - Lstore->sup_to_col = sup_to_col; - -} - - -/*! \brief Convert a row compressed storage into a column compressed storage. - */ -void -sCompRow_to_CompCol(int m, int n, int nnz, - float *a, int *colind, int *rowptr, - float **at, int **rowind, int **colptr) -{ - register int i, j, col, relpos; - int *marker; - - /* Allocate storage for another copy of the matrix. */ - *at = (float *) floatMalloc(nnz); - *rowind = (int *) intMalloc(nnz); - *colptr = (int *) intMalloc(n+1); - marker = (int *) intCalloc(n); - - /* Get counts of each column of A, and set up column pointers */ - for (i = 0; i < m; ++i) - for (j = rowptr[i]; j < rowptr[i+1]; ++j) ++marker[colind[j]]; - (*colptr)[0] = 0; - for (j = 0; j < n; ++j) { - (*colptr)[j+1] = (*colptr)[j] + marker[j]; - marker[j] = (*colptr)[j]; - } - - /* Transfer the matrix into the compressed column storage. */ - for (i = 0; i < m; ++i) { - for (j = rowptr[i]; j < rowptr[i+1]; ++j) { - col = colind[j]; - relpos = marker[col]; - (*rowind)[relpos] = i; - (*at)[relpos] = a[j]; - ++marker[col]; - } - } - - SUPERLU_FREE(marker); -} - - -void -sPrint_CompCol_Matrix(char *what, SuperMatrix *A) -{ - NCformat *Astore; - register int i,n; - float *dp; - - printf("\nCompCol matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - n = A->ncol; - Astore = (NCformat *) A->Store; - dp = (float *) Astore->nzval; - printf("nrow %d, ncol %d, nnz %d\n", A->nrow,A->ncol,Astore->nnz); - printf("nzval: "); - for (i = 0; i < Astore->colptr[n]; ++i) printf("%f ", dp[i]); - printf("\nrowind: "); - for (i = 0; i < Astore->colptr[n]; ++i) printf("%d ", Astore->rowind[i]); - printf("\ncolptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->colptr[i]); - printf("\n"); - fflush(stdout); -} - -void -sPrint_SuperNode_Matrix(char *what, SuperMatrix *A) -{ - SCformat *Astore; - register int i, j, k, c, d, n, nsup; - float *dp; - int *col_to_sup, *sup_to_col, *rowind, *rowind_colptr; - - printf("\nSuperNode matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - n = A->ncol; - Astore = (SCformat *) A->Store; - dp = (float *) Astore->nzval; - col_to_sup = Astore->col_to_sup; - sup_to_col = Astore->sup_to_col; - rowind_colptr = Astore->rowind_colptr; - rowind = Astore->rowind; - printf("nrow %d, ncol %d, nnz %d, nsuper %d\n", - A->nrow,A->ncol,Astore->nnz,Astore->nsuper); - printf("nzval:\n"); - for (k = 0; k <= Astore->nsuper; ++k) { - c = sup_to_col[k]; - nsup = sup_to_col[k+1] - c; - for (j = c; j < c + nsup; ++j) { - d = Astore->nzval_colptr[j]; - for (i = rowind_colptr[c]; i < rowind_colptr[c+1]; ++i) { - printf("%d\t%d\t%e\n", rowind[i], j, dp[d++]); - } - } - } -#if 0 - for (i = 0; i < Astore->nzval_colptr[n]; ++i) printf("%f ", dp[i]); -#endif - printf("\nnzval_colptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->nzval_colptr[i]); - printf("\nrowind: "); - for (i = 0; i < Astore->rowind_colptr[n]; ++i) - printf("%d ", Astore->rowind[i]); - printf("\nrowind_colptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->rowind_colptr[i]); - printf("\ncol_to_sup: "); - for (i = 0; i < n; ++i) printf("%d ", col_to_sup[i]); - printf("\nsup_to_col: "); - for (i = 0; i <= Astore->nsuper+1; ++i) - printf("%d ", sup_to_col[i]); - printf("\n"); - fflush(stdout); -} - -void -sPrint_Dense_Matrix(char *what, SuperMatrix *A) -{ - DNformat *Astore = (DNformat *) A->Store; - register int i, j, lda = Astore->lda; - float *dp; - - printf("\nDense matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - dp = (float *) Astore->nzval; - printf("nrow %d, ncol %d, lda %d\n", A->nrow,A->ncol,lda); - printf("\nnzval: "); - for (j = 0; j < A->ncol; ++j) { - for (i = 0; i < A->nrow; ++i) printf("%f ", dp[i + j*lda]); - printf("\n"); - } - printf("\n"); - fflush(stdout); -} - -/*! \brief Diagnostic print of column "jcol" in the U/L factor. - */ -void -sprint_lu_col(char *msg, int jcol, int pivrow, int *xprune, GlobalLU_t *Glu) -{ - int i, k, fsupc; - int *xsup, *supno; - int *xlsub, *lsub; - float *lusup; - int *xlusup; - float *ucol; - int *usub, *xusub; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - - printf("%s", msg); - printf("col %d: pivrow %d, supno %d, xprune %d\n", - jcol, pivrow, supno[jcol], xprune[jcol]); - - printf("\tU-col:\n"); - for (i = xusub[jcol]; i < xusub[jcol+1]; i++) - printf("\t%d%10.4f\n", usub[i], ucol[i]); - printf("\tL-col in rectangular snode:\n"); - fsupc = xsup[supno[jcol]]; /* first col of the snode */ - i = xlsub[fsupc]; - k = xlusup[jcol]; - while ( i < xlsub[fsupc+1] && k < xlusup[jcol+1] ) { - printf("\t%d\t%10.4f\n", lsub[i], lusup[k]); - i++; k++; - } - fflush(stdout); -} - - -/*! \brief Check whether tempv[] == 0. This should be true before and after calling any numeric routines, i.e., "panel_bmod" and "column_bmod". - */ -void scheck_tempv(int n, float *tempv) -{ - int i; - - for (i = 0; i < n; i++) { - if (tempv[i] != 0.0) - { - fprintf(stderr,"tempv[%d] = %f\n", i,tempv[i]); - ABORT("scheck_tempv"); - } - } -} - - -void -sGenXtrue(int n, int nrhs, float *x, int ldx) -{ - int i, j; - for (j = 0; j < nrhs; ++j) - for (i = 0; i < n; ++i) { - x[i + j*ldx] = 1.0;/* + (float)(i+1.)/n;*/ - } -} - -/*! \brief Let rhs[i] = sum of i-th row of A, so the solution vector is all 1's - */ -void -sFillRHS(trans_t trans, int nrhs, float *x, int ldx, - SuperMatrix *A, SuperMatrix *B) -{ - NCformat *Astore; - float *Aval; - DNformat *Bstore; - float *rhs; - float one = 1.0; - float zero = 0.0; - int ldc; - char transc[1]; - - Astore = A->Store; - Aval = (float *) Astore->nzval; - Bstore = B->Store; - rhs = Bstore->nzval; - ldc = Bstore->lda; - - if ( trans == NOTRANS ) *(unsigned char *)transc = 'N'; - else *(unsigned char *)transc = 'T'; - - sp_sgemm(transc, "N", A->nrow, nrhs, A->ncol, one, A, - x, ldx, zero, rhs, ldc); - -} - -/*! \brief Fills a float precision array with a given value. - */ -void -sfill(float *a, int alen, float dval) -{ - register int i; - for (i = 0; i < alen; i++) a[i] = dval; -} - - - -/*! \brief Check the inf-norm of the error vector - */ -void sinf_norm_error(int nrhs, SuperMatrix *X, float *xtrue) -{ - DNformat *Xstore; - float err, xnorm; - float *Xmat, *soln_work; - int i, j; - - Xstore = X->Store; - Xmat = Xstore->nzval; - - for (j = 0; j < nrhs; j++) { - soln_work = &Xmat[j*Xstore->lda]; - err = xnorm = 0.0; - for (i = 0; i < X->nrow; i++) { - err = SUPERLU_MAX(err, fabs(soln_work[i] - xtrue[i])); - xnorm = SUPERLU_MAX(xnorm, fabs(soln_work[i])); - } - err = err / xnorm; - printf("||X - Xtrue||/||X|| = %e\n", err); - } -} - - - -/*! \brief Print performance of the code. */ -void -sPrintPerf(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage, - float rpg, float rcond, float *ferr, - float *berr, char *equed, SuperLUStat_t *stat) -{ - SCformat *Lstore; - NCformat *Ustore; - double *utime; - flops_t *ops; - - utime = stat->utime; - ops = stat->ops; - - if ( utime[FACT] != 0. ) - printf("Factor flops = %e\tMflops = %8.2f\n", ops[FACT], - ops[FACT]*1e-6/utime[FACT]); - printf("Identify relaxed snodes = %8.2f\n", utime[RELAX]); - if ( utime[SOLVE] != 0. ) - printf("Solve flops = %.0f, Mflops = %8.2f\n", ops[SOLVE], - ops[SOLVE]*1e-6/utime[SOLVE]); - - Lstore = (SCformat *) L->Store; - Ustore = (NCformat *) U->Store; - printf("\tNo of nonzeros in factor L = %d\n", Lstore->nnz); - printf("\tNo of nonzeros in factor U = %d\n", Ustore->nnz); - printf("\tNo of nonzeros in L+U = %d\n", Lstore->nnz + Ustore->nnz); - - printf("L\\U MB %.3f\ttotal MB needed %.3f\n", - mem_usage->for_lu/1e6, mem_usage->total_needed/1e6); - printf("Number of memory expansions: %d\n", stat->expansions); - - printf("\tFactor\tMflops\tSolve\tMflops\tEtree\tEquil\tRcond\tRefine\n"); - printf("PERF:%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f\n", - utime[FACT], ops[FACT]*1e-6/utime[FACT], - utime[SOLVE], ops[SOLVE]*1e-6/utime[SOLVE], - utime[ETREE], utime[EQUIL], utime[RCOND], utime[REFINE]); - - printf("\tRpg\t\tRcond\t\tFerr\t\tBerr\t\tEquil?\n"); - printf("NUM:\t%e\t%e\t%e\t%e\t%s\n", - rpg, rcond, ferr[0], berr[0], equed); - -} - - - - -print_float_vec(char *what, int n, float *vec) -{ - int i; - printf("%s: n %d\n", what, n); - for (i = 0; i < n; ++i) printf("%d\t%f\n", i, vec[i]); - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/util.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/util.c deleted file mode 100644 index 584d0407c2..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/util.c +++ /dev/null @@ -1,484 +0,0 @@ -/*! @file util.c - * \brief Utility functions - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include "slu_ddefs.h" - -/*! \brief Global statistics variale - */ - -void superlu_abort_and_exit(char* msg) -{ - fprintf(stderr, "%s\n", msg); - exit (-1); -} - -/*! \brief Set the default values for the options argument. - */ -void set_default_options(superlu_options_t *options) -{ - options->Fact = DOFACT; - options->Equil = YES; - options->ColPerm = COLAMD; - options->DiagPivotThresh = 1.0; - options->Trans = NOTRANS; - options->IterRefine = NOREFINE; - options->SymmetricMode = NO; - options->PivotGrowth = NO; - options->ConditionNumber = NO; - options->PrintStat = YES; -} - -/*! \brief Set the default values for the options argument for ILU. - */ -void ilu_set_default_options(superlu_options_t *options) -{ - set_default_options(options); - - /* further options for incomplete factorization */ - options->DiagPivotThresh = 0.1; - options->RowPerm = LargeDiag; - options->DiagPivotThresh = 0.1; - options->ILU_FillFactor = 10.0; - options->ILU_DropTol = 1e-4; - options->ILU_DropRule = DROP_BASIC | DROP_AREA; - options->ILU_Norm = INF_NORM; - options->ILU_MILU = SMILU_2; /* SILU */ - options->ILU_FillTol = 1e-2; -} - -/*! \brief Print the options setting. - */ -void print_options(superlu_options_t *options) -{ - printf(".. options:\n"); - printf("\tFact\t %8d\n", options->Fact); - printf("\tEquil\t %8d\n", options->Equil); - printf("\tColPerm\t %8d\n", options->ColPerm); - printf("\tDiagPivotThresh %8.4f\n", options->DiagPivotThresh); - printf("\tTrans\t %8d\n", options->Trans); - printf("\tIterRefine\t%4d\n", options->IterRefine); - printf("\tSymmetricMode\t%4d\n", options->SymmetricMode); - printf("\tPivotGrowth\t%4d\n", options->PivotGrowth); - printf("\tConditionNumber\t%4d\n", options->ConditionNumber); - printf("..\n"); -} - -/*! \brief Print the options setting. - */ -void print_ilu_options(superlu_options_t *options) -{ - printf(".. ILU options:\n"); - printf("\tDiagPivotThresh\t%6.2e\n", options->DiagPivotThresh); - printf("\ttau\t%6.2e\n", options->ILU_DropTol); - printf("\tgamma\t%6.2f\n", options->ILU_FillFactor); - printf("\tDropRule\t%0x\n", options->ILU_DropRule); - printf("\tMILU\t%d\n", options->ILU_MILU); - printf("\tMILU_ALPHA\t%6.2e\n", MILU_ALPHA); - printf("\tDiagFillTol\t%6.2e\n", options->ILU_FillTol); - printf("..\n"); -} - -/*! \brief Deallocate the structure pointing to the actual storage of the matrix. */ -void -Destroy_SuperMatrix_Store(SuperMatrix *A) -{ - SUPERLU_FREE ( A->Store ); -} - -void -Destroy_CompCol_Matrix(SuperMatrix *A) -{ - SUPERLU_FREE( ((NCformat *)A->Store)->rowind ); - SUPERLU_FREE( ((NCformat *)A->Store)->colptr ); - SUPERLU_FREE( ((NCformat *)A->Store)->nzval ); - SUPERLU_FREE( A->Store ); -} - -void -Destroy_CompRow_Matrix(SuperMatrix *A) -{ - SUPERLU_FREE( ((NRformat *)A->Store)->colind ); - SUPERLU_FREE( ((NRformat *)A->Store)->rowptr ); - SUPERLU_FREE( ((NRformat *)A->Store)->nzval ); - SUPERLU_FREE( A->Store ); -} - -void -Destroy_SuperNode_Matrix(SuperMatrix *A) -{ - SUPERLU_FREE ( ((SCformat *)A->Store)->rowind ); - SUPERLU_FREE ( ((SCformat *)A->Store)->rowind_colptr ); - SUPERLU_FREE ( ((SCformat *)A->Store)->nzval ); - SUPERLU_FREE ( ((SCformat *)A->Store)->nzval_colptr ); - SUPERLU_FREE ( ((SCformat *)A->Store)->col_to_sup ); - SUPERLU_FREE ( ((SCformat *)A->Store)->sup_to_col ); - SUPERLU_FREE ( A->Store ); -} - -/*! \brief A is of type Stype==NCP */ -void -Destroy_CompCol_Permuted(SuperMatrix *A) -{ - SUPERLU_FREE ( ((NCPformat *)A->Store)->colbeg ); - SUPERLU_FREE ( ((NCPformat *)A->Store)->colend ); - SUPERLU_FREE ( A->Store ); -} - -/*! \brief A is of type Stype==DN */ -void -Destroy_Dense_Matrix(SuperMatrix *A) -{ - DNformat* Astore = A->Store; - SUPERLU_FREE (Astore->nzval); - SUPERLU_FREE ( A->Store ); -} - -/*! \brief Reset repfnz[] for the current column - */ -void -resetrep_col (const int nseg, const int *segrep, int *repfnz) -{ - int i, irep; - - for (i = 0; i < nseg; i++) { - irep = segrep[i]; - repfnz[irep] = EMPTY; - } -} - - -/*! \brief Count the total number of nonzeros in factors L and U, and in the symmetrically reduced L. - */ -void -countnz(const int n, int *xprune, int *nnzL, int *nnzU, GlobalLU_t *Glu) -{ - int nsuper, fsupc, i, j; - int nnzL0, jlen, irep; - int *xsup, *xlsub; - - xsup = Glu->xsup; - xlsub = Glu->xlsub; - *nnzL = 0; - *nnzU = (Glu->xusub)[n]; - nnzL0 = 0; - nsuper = (Glu->supno)[n]; - - if ( n <= 0 ) return; - - /* - * For each supernode - */ - for (i = 0; i <= nsuper; i++) { - fsupc = xsup[i]; - jlen = xlsub[fsupc+1] - xlsub[fsupc]; - - for (j = fsupc; j < xsup[i+1]; j++) { - *nnzL += jlen; - *nnzU += j - fsupc + 1; - jlen--; - } - irep = xsup[i+1] - 1; - nnzL0 += xprune[irep] - xlsub[irep]; - } - - /* printf("\tNo of nonzeros in symm-reduced L = %d\n", nnzL0);*/ -} - -/*! \brief Count the total number of nonzeros in factors L and U. - */ -void -ilu_countnz(const int n, int *nnzL, int *nnzU, GlobalLU_t *Glu) -{ - int nsuper, fsupc, i, j; - int jlen, irep; - int *xsup, *xlsub; - - xsup = Glu->xsup; - xlsub = Glu->xlsub; - *nnzL = 0; - *nnzU = (Glu->xusub)[n]; - nsuper = (Glu->supno)[n]; - - if ( n <= 0 ) return; - - /* - * For each supernode - */ - for (i = 0; i <= nsuper; i++) { - fsupc = xsup[i]; - jlen = xlsub[fsupc+1] - xlsub[fsupc]; - - for (j = fsupc; j < xsup[i+1]; j++) { - *nnzL += jlen; - *nnzU += j - fsupc + 1; - jlen--; - } - irep = xsup[i+1] - 1; - } -} - - -/*! \brief Fix up the data storage lsub for L-subscripts. It removes the subscript sets for structural pruning, and applies permuation to the remaining subscripts. - */ -void -fixupL(const int n, const int *perm_r, GlobalLU_t *Glu) -{ - register int nsuper, fsupc, nextl, i, j, k, jstrt; - int *xsup, *lsub, *xlsub; - - if ( n <= 1 ) return; - - xsup = Glu->xsup; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nextl = 0; - nsuper = (Glu->supno)[n]; - - /* - * For each supernode ... - */ - for (i = 0; i <= nsuper; i++) { - fsupc = xsup[i]; - jstrt = xlsub[fsupc]; - xlsub[fsupc] = nextl; - for (j = jstrt; j < xlsub[fsupc+1]; j++) { - lsub[nextl] = perm_r[lsub[j]]; /* Now indexed into P*A */ - nextl++; - } - for (k = fsupc+1; k < xsup[i+1]; k++) - xlsub[k] = nextl; /* Other columns in supernode i */ - - } - - xlsub[n] = nextl; -} - - -/*! \brief Diagnostic print of segment info after panel_dfs(). - */ -void print_panel_seg(int n, int w, int jcol, int nseg, - int *segrep, int *repfnz) -{ - int j, k; - - for (j = jcol; j < jcol+w; j++) { - printf("\tcol %d:\n", j); - for (k = 0; k < nseg; k++) - printf("\t\tseg %d, segrep %d, repfnz %d\n", k, - segrep[k], repfnz[(j-jcol)*n + segrep[k]]); - } - -} - - -void -StatInit(SuperLUStat_t *stat) -{ - register int i, w, panel_size, relax; - - panel_size = sp_ienv(1); - relax = sp_ienv(2); - w = SUPERLU_MAX(panel_size, relax); - stat->panel_histo = intCalloc(w+1); - stat->utime = (double *) SUPERLU_MALLOC(NPHASES * sizeof(double)); - if (!stat->utime) ABORT("SUPERLU_MALLOC fails for stat->utime"); - stat->ops = (flops_t *) SUPERLU_MALLOC(NPHASES * sizeof(flops_t)); - if (!stat->ops) ABORT("SUPERLU_MALLOC fails for stat->ops"); - for (i = 0; i < NPHASES; ++i) { - stat->utime[i] = 0.; - stat->ops[i] = 0.; - } - stat->TinyPivots = 0; - stat->RefineSteps = 0; - stat->expansions = 0; -} - - -void -StatPrint(SuperLUStat_t *stat) -{ - double *utime; - flops_t *ops; - - utime = stat->utime; - ops = stat->ops; - printf("Factor time = %8.2f\n", utime[FACT]); - if ( utime[FACT] != 0.0 ) - printf("Factor flops = %e\tMflops = %8.2f\n", ops[FACT], - ops[FACT]*1e-6/utime[FACT]); - - printf("Solve time = %8.2f\n", utime[SOLVE]); - if ( utime[SOLVE] != 0.0 ) - printf("Solve flops = %e\tMflops = %8.2f\n", ops[SOLVE], - ops[SOLVE]*1e-6/utime[SOLVE]); - - printf("Number of memory expansions: %d\n", stat->expansions); - -} - - -void -StatFree(SuperLUStat_t *stat) -{ - SUPERLU_FREE(stat->panel_histo); - SUPERLU_FREE(stat->utime); - SUPERLU_FREE(stat->ops); -} - - -flops_t -LUFactFlops(SuperLUStat_t *stat) -{ - return (stat->ops[FACT]); -} - -flops_t -LUSolveFlops(SuperLUStat_t *stat) -{ - return (stat->ops[SOLVE]); -} - - - - - -/*! \brief Fills an integer array with a given value. - */ -void ifill(int *a, int alen, int ival) -{ - register int i; - for (i = 0; i < alen; i++) a[i] = ival; -} - - - -/*! \brief Get the statistics of the supernodes - */ -#define NBUCKS 10 -static int max_sup_size; - -void super_stats(int nsuper, int *xsup) -{ - register int nsup1 = 0; - int i, isize, whichb, bl, bh; - int bucket[NBUCKS]; - - max_sup_size = 0; - - for (i = 0; i <= nsuper; i++) { - isize = xsup[i+1] - xsup[i]; - if ( isize == 1 ) nsup1++; - if ( max_sup_size < isize ) max_sup_size = isize; - } - - printf(" Supernode statistics:\n\tno of super = %d\n", nsuper+1); - printf("\tmax supernode size = %d\n", max_sup_size); - printf("\tno of size 1 supernodes = %d\n", nsup1); - - /* Histogram of the supernode sizes */ - ifill (bucket, NBUCKS, 0); - - for (i = 0; i <= nsuper; i++) { - isize = xsup[i+1] - xsup[i]; - whichb = (float) isize / max_sup_size * NBUCKS; - if (whichb >= NBUCKS) whichb = NBUCKS - 1; - bucket[whichb]++; - } - - printf("\tHistogram of supernode sizes:\n"); - for (i = 0; i < NBUCKS; i++) { - bl = (float) i * max_sup_size / NBUCKS; - bh = (float) (i+1) * max_sup_size / NBUCKS; - printf("\tsnode: %d-%d\t\t%d\n", bl+1, bh, bucket[i]); - } - -} - - -float SpaSize(int n, int np, float sum_npw) -{ - return (sum_npw*8 + np*8 + n*4)/1024.; -} - -float DenseSize(int n, float sum_nw) -{ - return (sum_nw*8 + n*8)/1024.;; -} - - - -/*! \brief Check whether repfnz[] == EMPTY after reset. - */ -void check_repfnz(int n, int w, int jcol, int *repfnz) -{ - int jj, k; - - for (jj = jcol; jj < jcol+w; jj++) - for (k = 0; k < n; k++) - if ( repfnz[(jj-jcol)*n + k] != EMPTY ) { - fprintf(stderr, "col %d, repfnz_col[%d] = %d\n", jj, - k, repfnz[(jj-jcol)*n + k]); - ABORT("check_repfnz"); - } -} - - -/*! \brief Print a summary of the testing results. */ -void -PrintSumm(char *type, int nfail, int nrun, int nerrs) -{ - if ( nfail > 0 ) - printf("%3s driver: %d out of %d tests failed to pass the threshold\n", - type, nfail, nrun); - else - printf("All tests for %3s driver passed the threshold (%6d tests run)\n", type, nrun); - - if ( nerrs > 0 ) - printf("%6d error messages recorded\n", nerrs); -} - - -int print_int_vec(char *what, int n, int *vec) -{ - int i; - printf("%s\n", what); - for (i = 0; i < n; ++i) printf("%d\t%d\n", i, vec[i]); - return 0; -} - -int slu_PrintInt10(char *name, int len, int *x) -{ - register i; - - printf("%10s:", name); - for (i = 0; i < len; ++i) - { - if ( i % 10 == 0 ) printf("\n\t[%2d-%2d]", i, i + 9); - printf("%6d", x[i]); - } - printf("\n"); - return 0; -} - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/xerbla.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/xerbla.c deleted file mode 100644 index bffd66bd61..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/xerbla.c +++ /dev/null @@ -1,43 +0,0 @@ -#include -#include "slu_Cnames.h" - -/* Subroutine */ int xerbla_(char *srname, int *info) -{ -/* -- LAPACK auxiliary routine (version 2.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 - - - Purpose - ======= - - XERBLA is an error handler for the LAPACK routines. - It is called by an LAPACK routine if an input parameter has an - invalid value. A message is printed and execution stops. - - Installers may consider modifying the STOP statement in order to - call system-specific exception-handling facilities. - - Arguments - ========= - - SRNAME (input) CHARACTER*6 - The name of the routine which called XERBLA. - - INFO (input) INT - The position of the invalid parameter in the parameter list - - of the calling routine. - - ===================================================================== -*/ - - printf("** On entry to %6s, parameter number %2d had an illegal value\n", - srname, *info); - -/* End of XERBLA */ - - return 0; -} /* xerbla_ */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcolumn_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcolumn_bmod.c deleted file mode 100644 index c50e761f09..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcolumn_bmod.c +++ /dev/null @@ -1,367 +0,0 @@ - -/*! @file zcolumn_bmod.c - * \brief performs numeric block updates - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - *  Permission is hereby granted to use or copy this program for any
    - *  purpose, provided the above notices are retained on all copies.
    - *  Permission to modify the code and to distribute modified code is
    - *  granted, provided the above notices are retained, and a notice that
    - *  the code was modified is included with the above copyright notice.
    - * 
    -*/ - -#include -#include -#include "slu_zdefs.h" - -/* - * Function prototypes - */ -void zusolve(int, int, doublecomplex*, doublecomplex*); -void zlsolve(int, int, doublecomplex*, doublecomplex*); -void zmatvec(int, int, int, doublecomplex*, doublecomplex*, doublecomplex*); - - - -/*! \brief - * - *
    - * Purpose:
    - * ========
    - * Performs numeric block updates (sup-col) in topological order.
    - * It features: col-col, 2cols-col, 3cols-col, and sup-col updates.
    - * Special processing on the supernodal portion of L\U[*,j]
    - * Return value:   0 - successful return
    - *               > 0 - number of bytes allocated when run out of space
    - * 
    - */ -int -zcolumn_bmod ( - const int jcol, /* in */ - const int nseg, /* in */ - doublecomplex *dense, /* in */ - doublecomplex *tempv, /* working array */ - int *segrep, /* in */ - int *repfnz, /* in */ - int fpanelc, /* in -- first column in the current panel */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ - -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - doublecomplex alpha, beta; - - /* krep = representative of current k-th supernode - * fsupc = first supernodal column - * nsupc = no of columns in supernode - * nsupr = no of rows in supernode (used as leading dimension) - * luptr = location of supernodal LU-block in storage - * kfnz = first nonz in the k-th supernodal segment - * no_zeros = no of leading zeros in a supernodal U-segment - */ - doublecomplex ukj, ukj1, ukj2; - int luptr, luptr1, luptr2; - int fsupc, nsupc, nsupr, segsze; - int nrow; /* No of rows in the matrix of matrix-vector */ - int jcolp1, jsupno, k, ksub, krep, krep_ind, ksupno; - register int lptr, kfnz, isub, irow, i; - register int no_zeros, new_next; - int ufirst, nextlu; - int fst_col; /* First column within small LU update */ - int d_fsupc; /* Distance between the first column of the current - panel and the first column of the current snode. */ - int *xsup, *supno; - int *lsub, *xlsub; - doublecomplex *lusup; - int *xlusup; - int nzlumax; - doublecomplex *tempv1; - doublecomplex zero = {0.0, 0.0}; - doublecomplex one = {1.0, 0.0}; - doublecomplex none = {-1.0, 0.0}; - doublecomplex comp_temp, comp_temp1; - int mem_error; - flops_t *ops = stat->ops; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - nzlumax = Glu->nzlumax; - jcolp1 = jcol + 1; - jsupno = supno[jcol]; - - /* - * For each nonz supernode segment of U[*,j] in topological order - */ - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - - krep = segrep[k]; - k--; - ksupno = supno[krep]; - if ( jsupno != ksupno ) { /* Outside the rectangular supernode */ - - fsupc = xsup[ksupno]; - fst_col = SUPERLU_MAX ( fsupc, fpanelc ); - - /* Distance from the current supernode to the current panel; - d_fsupc=0 if fsupc > fpanelc. */ - d_fsupc = fst_col - fsupc; - - luptr = xlusup[fst_col] + d_fsupc; - lptr = xlsub[fsupc] + d_fsupc; - - kfnz = repfnz[krep]; - kfnz = SUPERLU_MAX ( kfnz, fpanelc ); - - segsze = krep - kfnz + 1; - nsupc = krep - fst_col + 1; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; /* Leading dimension */ - nrow = nsupr - d_fsupc - nsupc; - krep_ind = lptr + nsupc - 1; - - ops[TRSV] += 4 * segsze * (segsze - 1); - ops[GEMV] += 8 * nrow * segsze; - - - - /* - * Case 1: Update U-segment of size 1 -- col-col update - */ - if ( segsze == 1 ) { - ukj = dense[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - z_sub(&dense[irow], &dense[irow], &comp_temp); - luptr++; - } - - } else if ( segsze <= 3 ) { - ukj = dense[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - ukj1 = dense[lsub[krep_ind - 1]]; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { /* Case 2: 2cols-col update */ - zz_mult(&comp_temp, &ukj1, &lusup[luptr1]); - z_sub(&ukj, &ukj, &comp_temp); - dense[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; - luptr1++; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - zz_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&dense[irow], &dense[irow], &comp_temp); - } - } else { /* Case 3: 3cols-col update */ - ukj2 = dense[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - zz_mult(&comp_temp, &ukj2, &lusup[luptr2-1]); - z_sub(&ukj1, &ukj1, &comp_temp); - - zz_mult(&comp_temp, &ukj1, &lusup[luptr1]); - zz_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&ukj, &ukj, &comp_temp); - - dense[lsub[krep_ind]] = ukj; - dense[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; - luptr1++; - luptr2++; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - zz_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - zz_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&dense[irow], &dense[irow], &comp_temp); - } - } - - - } else { - /* - * Case: sup-col update - * Perform a triangular solve and block update, - * then scatter the result of sup-col update to dense - */ - - no_zeros = kfnz - fst_col; - - /* Copy U[*,j] segment from dense[*] to tempv[*] */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - tempv[i] = dense[irow]; - ++isub; - } - - /* Dense triangular solve -- start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#else - ztrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#endif - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - alpha = one; - beta = zero; -#ifdef _CRAY - CGEMV( ftcs2, &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#else - zgemv_( "N", &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#endif -#else - zlsolve ( nsupr, segsze, &lusup[luptr], tempv ); - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - zmatvec (nsupr, nrow , segsze, &lusup[luptr], tempv, tempv1); -#endif - - - /* Scatter tempv[] into SPA dense[] as a temporary storage */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense[irow] = tempv[i]; - tempv[i] = zero; - ++isub; - } - - /* Scatter tempv1[] into SPA dense[] */ - for (i = 0; i < nrow; i++) { - irow = lsub[isub]; - z_sub(&dense[irow], &dense[irow], &tempv1[i]); - tempv1[i] = zero; - ++isub; - } - } - - } /* if jsupno ... */ - - } /* for each segment... */ - - /* - * Process the supernodal portion of L\U[*,j] - */ - nextlu = xlusup[jcol]; - fsupc = xsup[jsupno]; - - /* Copy the SPA dense into L\U[*,j] */ - new_next = nextlu + xlsub[fsupc+1] - xlsub[fsupc]; - while ( new_next > nzlumax ) { - if (mem_error = zLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, Glu)) - return (mem_error); - lusup = Glu->lusup; - lsub = Glu->lsub; - } - - for (isub = xlsub[fsupc]; isub < xlsub[fsupc+1]; isub++) { - irow = lsub[isub]; - lusup[nextlu] = dense[irow]; - dense[irow] = zero; - ++nextlu; - } - - xlusup[jcolp1] = nextlu; /* Close L\U[*,jcol] */ - - /* For more updates within the panel (also within the current supernode), - * should start from the first column of the panel, or the first column - * of the supernode, whichever is bigger. There are 2 cases: - * 1) fsupc < fpanelc, then fst_col := fpanelc - * 2) fsupc >= fpanelc, then fst_col := fsupc - */ - fst_col = SUPERLU_MAX ( fsupc, fpanelc ); - - if ( fst_col < jcol ) { - - /* Distance between the current supernode and the current panel. - d_fsupc=0 if fsupc >= fpanelc. */ - d_fsupc = fst_col - fsupc; - - lptr = xlsub[fsupc] + d_fsupc; - luptr = xlusup[fst_col] + d_fsupc; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; /* Leading dimension */ - nsupc = jcol - fst_col; /* Excluding jcol */ - nrow = nsupr - d_fsupc - nsupc; - - /* Points to the beginning of jcol in snode L\U(jsupno) */ - ufirst = xlusup[jcol] + d_fsupc; - - ops[TRSV] += 4 * nsupc * (nsupc - 1); - ops[GEMV] += 8 * nrow * nsupc; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &nsupc, &lusup[luptr], - &nsupr, &lusup[ufirst], &incx ); -#else - ztrsv_( "L", "N", "U", &nsupc, &lusup[luptr], - &nsupr, &lusup[ufirst], &incx ); -#endif - - alpha = none; beta = one; /* y := beta*y + alpha*A*x */ - -#ifdef _CRAY - CGEMV( ftcs2, &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#else - zgemv_( "N", &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#endif -#else - zlsolve ( nsupr, nsupc, &lusup[luptr], &lusup[ufirst] ); - - zmatvec ( nsupr, nrow, nsupc, &lusup[luptr+nsupc], - &lusup[ufirst], tempv ); - - /* Copy updates from tempv[*] into lusup[*] */ - isub = ufirst + nsupc; - for (i = 0; i < nrow; i++) { - z_sub(&lusup[isub], &lusup[isub], &tempv[i]); - tempv[i] = zero; - ++isub; - } - -#endif - - - } /* if fst_col < jcol ... */ - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcolumn_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcolumn_dfs.c deleted file mode 100644 index 0f8c6ed38c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcolumn_dfs.c +++ /dev/null @@ -1,275 +0,0 @@ - -/*! @file zcolumn_dfs.c - * \brief Performs a symbolic factorization - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    -*/ - -#include "slu_zdefs.h" - -/*! \brief What type of supernodes we want */ -#define T2_SUPER - - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   ZCOLUMN_DFS performs a symbolic factorization on column jcol, and
    - *   decide the supernode boundary.
    - *
    - *   This routine does not use numeric values, but only use the RHS 
    - *   row indices to start the dfs.
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives. The routine returns a list of such supernodal 
    - *   representatives in topological order of the dfs that generates them.
    - *   The location of the first nonzero in each such supernodal segment
    - *   (supernodal entry location) is also returned.
    - *
    - * Local parameters
    - * ================
    - *   nseg: no of segments in current U[*,j]
    - *   jsuper: jsuper=EMPTY if column j does not belong to the same
    - *	supernode as j-1. Otherwise, jsuper=nsuper.
    - *
    - *   marker2: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - *
    - * Return value
    - * ============
    - *     0  success;
    - *   > 0  number of bytes allocated when run out of space.
    - * 
    - */ -int -zcolumn_dfs( - const int m, /* in - number of rows in the matrix */ - const int jcol, /* in */ - int *perm_r, /* in */ - int *nseg, /* modified - with new segments appended */ - int *lsub_col, /* in - defines the RHS vector to start the dfs */ - int *segrep, /* modified - with new segments appended */ - int *repfnz, /* modified */ - int *xprune, /* modified */ - int *marker, /* modified */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - int jcolp1, jcolm1, jsuper, nsuper, nextl; - int k, krep, krow, kmark, kperm; - int *marker2; /* Used for small panel LU */ - int fsupc; /* First column of a snode */ - int myfnz; /* First nonz column of a U-segment */ - int chperm, chmark, chrep, kchild; - int xdfs, maxdfs, kpar, oldrep; - int jptr, jm1ptr; - int ito, ifrom, istop; /* Used to compress row subscripts */ - int mem_error; - int *xsup, *supno, *lsub, *xlsub; - int nzlmax; - static int first = 1, maxsuper; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - if ( first ) { - maxsuper = sp_ienv(3); - first = 0; - } - jcolp1 = jcol + 1; - jcolm1 = jcol - 1; - nsuper = supno[jcol]; - jsuper = nsuper; - nextl = xlsub[jcol]; - marker2 = &marker[2*m]; - - - /* For each nonzero in A[*,jcol] do dfs */ - for (k = 0; lsub_col[k] != EMPTY; k++) { - - krow = lsub_col[k]; - lsub_col[k] = EMPTY; - kmark = marker2[krow]; - - /* krow was visited before, go to the next nonz */ - if ( kmark == jcol ) continue; - - /* For each unmarked nbr krow of jcol - * krow is in L: place it in structure of L[*,jcol] - */ - marker2[krow] = jcol; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - lsub[nextl++] = krow; /* krow is indexed into A */ - if ( nextl >= nzlmax ) { - if ( mem_error = zLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( kmark != jcolm1 ) jsuper = EMPTY;/* Row index subset testing */ - } else { - /* krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz[krep]; - - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > kperm ) repfnz[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz[krep] = kperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker2[kchild]; - - if ( chmark != jcol ) { /* Not reached yet */ - marker2[kchild] = jcol; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,k] */ - if ( chperm == EMPTY ) { - lsub[nextl++] = kchild; - if ( nextl >= nzlmax ) { - if ( mem_error = - zLUMemXpand(jcol,nextl,LSUB,&nzlmax,Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - if ( chmark != jcolm1 ) jsuper = EMPTY; - } else { - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz[chrep]; - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz[chrep] = chperm; - } else { - /* Continue dfs at super-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L^t) */ - parent[krep] = oldrep; - repfnz[krep] = chperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - } /* else */ - - } /* else */ - - } /* if */ - - } /* while */ - - /* krow has no more unexplored nbrs; - * place supernode-rep krep in postorder DFS. - * backtrack dfs to its parent - */ - segrep[*nseg] = krep; - ++(*nseg); - kpar = parent[krep]; /* Pop from stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xprune[krep]; - - } while ( kpar != EMPTY ); /* Until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonzero ... */ - - /* Check to see if j belongs in the same supernode as j-1 */ - if ( jcol == 0 ) { /* Do nothing for column 0 */ - nsuper = supno[0] = 0; - } else { - fsupc = xsup[nsuper]; - jptr = xlsub[jcol]; /* Not compressed yet */ - jm1ptr = xlsub[jcolm1]; - -#ifdef T2_SUPER - if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = EMPTY; -#endif - /* Make sure the number of columns in a supernode doesn't - exceed threshold. */ - if ( jcol - fsupc >= maxsuper ) jsuper = EMPTY; - - /* If jcol starts a new supernode, reclaim storage space in - * lsub from the previous supernode. Note we only store - * the subscript set of the first and last columns of - * a supernode. (first for num values, last for pruning) - */ - if ( jsuper == EMPTY ) { /* starts a new supernode */ - if ( (fsupc < jcolm1-1) ) { /* >= 3 columns in nsuper */ -#ifdef CHK_COMPRESS - printf(" Compress lsub[] at super %d-%d\n", fsupc, jcolm1); -#endif - ito = xlsub[fsupc+1]; - xlsub[jcolm1] = ito; - istop = ito + jptr - jm1ptr; - xprune[jcolm1] = istop; /* Initialize xprune[jcol-1] */ - xlsub[jcol] = istop; - for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) - lsub[ito] = lsub[ifrom]; - nextl = ito; /* = istop + length(jcol) */ - } - nsuper++; - supno[jcol] = nsuper; - } /* if a new supernode */ - - } /* else: jcol > 0 */ - - /* Tidy up the pointers before exit */ - xsup[nsuper+1] = jcolp1; - supno[jcolp1] = nsuper; - xprune[jcol] = nextl; /* Initialize upper bound for pruning */ - xlsub[jcolp1] = nextl; - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcopy_to_ucol.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcopy_to_ucol.c deleted file mode 100644 index 8f5a05165b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zcopy_to_ucol.c +++ /dev/null @@ -1,103 +0,0 @@ - -/*! @file zcopy_to_ucol.c - * \brief Copy a computed column of U to the compressed data structure - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_zdefs.h" - -int -zcopy_to_ucol( - int jcol, /* in */ - int nseg, /* in */ - int *segrep, /* in */ - int *repfnz, /* in */ - int *perm_r, /* in */ - doublecomplex *dense, /* modified - reset to zero on return */ - GlobalLU_t *Glu /* modified */ - ) -{ -/* - * Gather from SPA dense[*] to global ucol[*]. - */ - int ksub, krep, ksupno; - int i, k, kfnz, segsze; - int fsupc, isub, irow; - int jsupno, nextu; - int new_next, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - doublecomplex *ucol; - int *usub, *xusub; - int nzumax; - doublecomplex zero = {0.0, 0.0}; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - nzumax = Glu->nzumax; - - jsupno = supno[jcol]; - nextu = xusub[jcol]; - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { - krep = segrep[k--]; - ksupno = supno[krep]; - - if ( ksupno != jsupno ) { /* Should go into ucol[] */ - kfnz = repfnz[krep]; - if ( kfnz != EMPTY ) { /* Nonzero U-segment */ - - fsupc = xsup[ksupno]; - isub = xlsub[fsupc] + kfnz - fsupc; - segsze = krep - kfnz + 1; - - new_next = nextu + segsze; - while ( new_next > nzumax ) { - if (mem_error = zLUMemXpand(jcol, nextu, UCOL, &nzumax, Glu)) - return (mem_error); - ucol = Glu->ucol; - if (mem_error = zLUMemXpand(jcol, nextu, USUB, &nzumax, Glu)) - return (mem_error); - usub = Glu->usub; - lsub = Glu->lsub; - } - - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - usub[nextu] = perm_r[irow]; - ucol[nextu] = dense[irow]; - dense[irow] = zero; - nextu++; - isub++; - } - - } - - } - - } /* for each segment... */ - - xusub[jcol + 1] = nextu; /* Close U[*,jcol] */ - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zdiagonal.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zdiagonal.c deleted file mode 100644 index ddd9c3260d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zdiagonal.c +++ /dev/null @@ -1,133 +0,0 @@ - -/*! @file zdiagonal.c - * \brief Auxiliary routines to work with diagonal elements - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory
    - * June 30, 2009
    - * 
    - */ - -#include "slu_zdefs.h" - -int zfill_diag(int n, NCformat *Astore) -/* fill explicit zeros on the diagonal entries, so that the matrix is not - structurally singular. */ -{ - doublecomplex *nzval = (doublecomplex *)Astore->nzval; - int *rowind = Astore->rowind; - int *colptr = Astore->colptr; - int nnz = colptr[n]; - int fill = 0; - doublecomplex *nzval_new; - doublecomplex zero = {1.0, 0.0}; - int *rowind_new; - int i, j, diag; - - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - if (rowind[j] == i) diag = j; - if (diag < 0) fill++; - } - if (fill) - { - nzval_new = doublecomplexMalloc(nnz + fill); - rowind_new = intMalloc(nnz + fill); - fill = 0; - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i] - fill; j < colptr[i + 1]; j++) - { - if ((rowind_new[j + fill] = rowind[j]) == i) diag = j; - nzval_new[j + fill] = nzval[j]; - } - if (diag < 0) - { - rowind_new[colptr[i + 1] + fill] = i; - nzval_new[colptr[i + 1] + fill] = zero; - fill++; - } - colptr[i + 1] += fill; - } - Astore->nzval = nzval_new; - Astore->rowind = rowind_new; - SUPERLU_FREE(nzval); - SUPERLU_FREE(rowind); - } - Astore->nnz += fill; - return fill; -} - -int zdominate(int n, NCformat *Astore) -/* make the matrix diagonally dominant */ -{ - doublecomplex *nzval = (doublecomplex *)Astore->nzval; - int *rowind = Astore->rowind; - int *colptr = Astore->colptr; - int nnz = colptr[n]; - int fill = 0; - doublecomplex *nzval_new; - int *rowind_new; - int i, j, diag; - double s; - - for (i = 0; i < n; i++) - { - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - if (rowind[j] == i) diag = j; - if (diag < 0) fill++; - } - if (fill) - { - nzval_new = doublecomplexMalloc(nnz + fill); - rowind_new = intMalloc(nnz+ fill); - fill = 0; - for (i = 0; i < n; i++) - { - s = 1e-6; - diag = -1; - for (j = colptr[i] - fill; j < colptr[i + 1]; j++) - { - if ((rowind_new[j + fill] = rowind[j]) == i) diag = j; - nzval_new[j + fill] = nzval[j]; - s += z_abs1(&nzval_new[j + fill]); - } - if (diag >= 0) { - nzval_new[diag+fill].r = s * 3.0; - nzval_new[diag+fill].i = 0.0; - } else { - rowind_new[colptr[i + 1] + fill] = i; - nzval_new[colptr[i + 1] + fill].r = s * 3.0; - nzval_new[colptr[i + 1] + fill].i = 0.0; - fill++; - } - colptr[i + 1] += fill; - } - Astore->nzval = nzval_new; - Astore->rowind = rowind_new; - SUPERLU_FREE(nzval); - SUPERLU_FREE(rowind); - } - else - { - for (i = 0; i < n; i++) - { - s = 1e-6; - diag = -1; - for (j = colptr[i]; j < colptr[i + 1]; j++) - { - if (rowind[j] == i) diag = j; - s += z_abs1(&nzval[j]); - } - nzval[diag].r = s * 3.0; - nzval[diag].i = 0.0; - } - } - Astore->nnz += fill; - return fill; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgscon.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgscon.c deleted file mode 100644 index 8bb95aa3db..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgscon.c +++ /dev/null @@ -1,154 +0,0 @@ - -/*! @file zgscon.c - * \brief Estimates reciprocal of the condition number of a general matrix - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Modified from lapack routines ZGECON.
    - * 
    - */ - -/* - * File name: zgscon.c - * History: Modified from lapack routines ZGECON. - */ -#include -#include "slu_zdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   ZGSCON estimates the reciprocal of the condition number of a general 
    - *   real matrix A, in either the 1-norm or the infinity-norm, using   
    - *   the LU factorization computed by ZGETRF.   *
    - *
    - *   An estimate is obtained for norm(inv(A)), and the reciprocal of the   
    - *   condition number is computed as   
    - *      RCOND = 1 / ( norm(A) * norm(inv(A)) ).   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - * 
    - *   Arguments   
    - *   =========   
    - *
    - *    NORM    (input) char*
    - *            Specifies whether the 1-norm condition number or the   
    - *            infinity-norm condition number is required:   
    - *            = '1' or 'O':  1-norm;   
    - *            = 'I':         Infinity-norm.
    - *	    
    - *    L       (input) SuperMatrix*
    - *            The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *            zgstrf(). Use compressed row subscripts storage for supernodes,
    - *            i.e., L has types: Stype = SLU_SC, Dtype = SLU_Z, Mtype = SLU_TRLU.
    - * 
    - *    U       (input) SuperMatrix*
    - *            The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *            zgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *            Stype = SLU_NC, Dtype = SLU_Z, Mtype = SLU_TRU.
    - *	    
    - *    ANORM   (input) double
    - *            If NORM = '1' or 'O', the 1-norm of the original matrix A.   
    - *            If NORM = 'I', the infinity-norm of the original matrix A.
    - *	    
    - *    RCOND   (output) double*
    - *           The reciprocal of the condition number of the matrix A,   
    - *           computed as RCOND = 1/(norm(A) * norm(inv(A))).
    - *	    
    - *    INFO    (output) int*
    - *           = 0:  successful exit   
    - *           < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *
    - *    ===================================================================== 
    - * 
    - */ - -void -zgscon(char *norm, SuperMatrix *L, SuperMatrix *U, - double anorm, double *rcond, SuperLUStat_t *stat, int *info) -{ - - - /* Local variables */ - int kase, kase1, onenrm, i; - double ainvnm; - doublecomplex *work; - extern int zrscl_(int *, doublecomplex *, doublecomplex *, int *); - - extern int zlacon_(int *, doublecomplex *, doublecomplex *, double *, int *); - - - /* Test the input parameters. */ - *info = 0; - onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O"); - if (! onenrm && ! lsame_(norm, "I")) *info = -1; - else if (L->nrow < 0 || L->nrow != L->ncol || - L->Stype != SLU_SC || L->Dtype != SLU_Z || L->Mtype != SLU_TRLU) - *info = -2; - else if (U->nrow < 0 || U->nrow != U->ncol || - U->Stype != SLU_NC || U->Dtype != SLU_Z || U->Mtype != SLU_TRU) - *info = -3; - if (*info != 0) { - i = -(*info); - xerbla_("zgscon", &i); - return; - } - - /* Quick return if possible */ - *rcond = 0.; - if ( L->nrow == 0 || U->nrow == 0) { - *rcond = 1.; - return; - } - - work = doublecomplexCalloc( 3*L->nrow ); - - - if ( !work ) - ABORT("Malloc fails for work arrays in zgscon."); - - /* Estimate the norm of inv(A). */ - ainvnm = 0.; - if ( onenrm ) kase1 = 1; - else kase1 = 2; - kase = 0; - - do { - zlacon_(&L->nrow, &work[L->nrow], &work[0], &ainvnm, &kase); - - if (kase == 0) break; - - if (kase == kase1) { - /* Multiply by inv(L). */ - sp_ztrsv("L", "No trans", "Unit", L, U, &work[0], stat, info); - - /* Multiply by inv(U). */ - sp_ztrsv("U", "No trans", "Non-unit", L, U, &work[0], stat, info); - - } else { - - /* Multiply by inv(U'). */ - sp_ztrsv("U", "Transpose", "Non-unit", L, U, &work[0], stat, info); - - /* Multiply by inv(L'). */ - sp_ztrsv("L", "Transpose", "Unit", L, U, &work[0], stat, info); - - } - - } while ( kase != 0 ); - - /* Compute the estimate of the reciprocal condition number. */ - if (ainvnm != 0.) *rcond = (1. / ainvnm) / anorm; - - SUPERLU_FREE (work); - return; - -} /* zgscon */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsequ.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsequ.c deleted file mode 100644 index 657637dee1..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsequ.c +++ /dev/null @@ -1,195 +0,0 @@ - -/*! @file zgsequ.c - * \brief Computes row and column scalings - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Modified from LAPACK routine ZGEEQU
    - * 
    - */ -/* - * File name: zgsequ.c - * History: Modified from LAPACK routine ZGEEQU - */ -#include -#include "slu_zdefs.h" - - - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - *
    - *   ZGSEQU computes row and column scalings intended to equilibrate an   
    - *   M-by-N sparse matrix A and reduce its condition number. R returns the row
    - *   scale factors and C the column scale factors, chosen to try to make   
    - *   the largest element in each row and column of the matrix B with   
    - *   elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1.   
    - *
    - *   R(i) and C(j) are restricted to be between SMLNUM = smallest safe   
    - *   number and BIGNUM = largest safe number.  Use of these scaling   
    - *   factors is not guaranteed to reduce the condition number of A but   
    - *   works well in practice.   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   A       (input) SuperMatrix*
    - *           The matrix of dimension (A->nrow, A->ncol) whose equilibration
    - *           factors are to be computed. The type of A can be:
    - *           Stype = SLU_NC; Dtype = SLU_Z; Mtype = SLU_GE.
    - *	    
    - *   R       (output) double*, size A->nrow
    - *           If INFO = 0 or INFO > M, R contains the row scale factors   
    - *           for A.
    - *	    
    - *   C       (output) double*, size A->ncol
    - *           If INFO = 0,  C contains the column scale factors for A.
    - *	    
    - *   ROWCND  (output) double*
    - *           If INFO = 0 or INFO > M, ROWCND contains the ratio of the   
    - *           smallest R(i) to the largest R(i).  If ROWCND >= 0.1 and   
    - *           AMAX is neither too large nor too small, it is not worth   
    - *           scaling by R.
    - *	    
    - *   COLCND  (output) double*
    - *           If INFO = 0, COLCND contains the ratio of the smallest   
    - *           C(i) to the largest C(i).  If COLCND >= 0.1, it is not   
    - *           worth scaling by C.
    - *	    
    - *   AMAX    (output) double*
    - *           Absolute value of largest matrix element.  If AMAX is very   
    - *           close to overflow or very close to underflow, the matrix   
    - *           should be scaled.
    - *	    
    - *   INFO    (output) int*
    - *           = 0:  successful exit   
    - *           < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *           > 0:  if INFO = i,  and i is   
    - *                 <= A->nrow:  the i-th row of A is exactly zero   
    - *                 >  A->ncol:  the (i-M)-th column of A is exactly zero   
    - *
    - *   ===================================================================== 
    - * 
    - */ -void -zgsequ(SuperMatrix *A, double *r, double *c, double *rowcnd, - double *colcnd, double *amax, int *info) -{ - - - /* Local variables */ - NCformat *Astore; - doublecomplex *Aval; - int i, j, irow; - double rcmin, rcmax; - double bignum, smlnum; - extern double dlamch_(char *); - - /* Test the input parameters. */ - *info = 0; - if ( A->nrow < 0 || A->ncol < 0 || - A->Stype != SLU_NC || A->Dtype != SLU_Z || A->Mtype != SLU_GE ) - *info = -1; - if (*info != 0) { - i = -(*info); - xerbla_("zgsequ", &i); - return; - } - - /* Quick return if possible */ - if ( A->nrow == 0 || A->ncol == 0 ) { - *rowcnd = 1.; - *colcnd = 1.; - *amax = 0.; - return; - } - - Astore = A->Store; - Aval = Astore->nzval; - - /* Get machine constants. */ - smlnum = dlamch_("S"); - bignum = 1. / smlnum; - - /* Compute row scale factors. */ - for (i = 0; i < A->nrow; ++i) r[i] = 0.; - - /* Find the maximum element in each row. */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - r[irow] = SUPERLU_MAX( r[irow], z_abs1(&Aval[i]) ); - } - - /* Find the maximum and minimum scale factors. */ - rcmin = bignum; - rcmax = 0.; - for (i = 0; i < A->nrow; ++i) { - rcmax = SUPERLU_MAX(rcmax, r[i]); - rcmin = SUPERLU_MIN(rcmin, r[i]); - } - *amax = rcmax; - - if (rcmin == 0.) { - /* Find the first zero scale factor and return an error code. */ - for (i = 0; i < A->nrow; ++i) - if (r[i] == 0.) { - *info = i + 1; - return; - } - } else { - /* Invert the scale factors. */ - for (i = 0; i < A->nrow; ++i) - r[i] = 1. / SUPERLU_MIN( SUPERLU_MAX( r[i], smlnum ), bignum ); - /* Compute ROWCND = min(R(I)) / max(R(I)) */ - *rowcnd = SUPERLU_MAX( rcmin, smlnum ) / SUPERLU_MIN( rcmax, bignum ); - } - - /* Compute column scale factors */ - for (j = 0; j < A->ncol; ++j) c[j] = 0.; - - /* Find the maximum element in each column, assuming the row - scalings computed above. */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - c[j] = SUPERLU_MAX( c[j], z_abs1(&Aval[i]) * r[irow] ); - } - - /* Find the maximum and minimum scale factors. */ - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->ncol; ++j) { - rcmax = SUPERLU_MAX(rcmax, c[j]); - rcmin = SUPERLU_MIN(rcmin, c[j]); - } - - if (rcmin == 0.) { - /* Find the first zero scale factor and return an error code. */ - for (j = 0; j < A->ncol; ++j) - if ( c[j] == 0. ) { - *info = A->nrow + j + 1; - return; - } - } else { - /* Invert the scale factors. */ - for (j = 0; j < A->ncol; ++j) - c[j] = 1. / SUPERLU_MIN( SUPERLU_MAX( c[j], smlnum ), bignum); - /* Compute COLCND = min(C(J)) / max(C(J)) */ - *colcnd = SUPERLU_MAX( rcmin, smlnum ) / SUPERLU_MIN( rcmax, bignum ); - } - - return; - -} /* zgsequ */ - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsisx.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsisx.c deleted file mode 100644 index 5d4d785fe3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsisx.c +++ /dev/null @@ -1,693 +0,0 @@ - -/*! @file zgsisx.c - * \brief Gives the approximate solutions of linear equations A*X=B or A'*X=B - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * ZGSISX gives the approximate solutions of linear equations A*X=B or A'*X=B,
    - * using the ILU factorization from zgsitrf(). An estimation of
    - * the condition number is provided. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *  
    - *	1.1. If options->Equil = YES or options->RowPerm = LargeDiag, scaling
    - *	     factors are computed to equilibrate the system:
    - *	     options->Trans = NOTRANS:
    - *		 diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *	     options->Trans = TRANS:
    - *		 (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *	     options->Trans = CONJ:
    - *		 (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *	     Whether or not the system will be equilibrated depends on the
    - *	     scaling of the matrix A, but if equilibration is used, A is
    - *	     overwritten by diag(R)*A*diag(C) and B by diag(R)*B
    - *	     (if options->Trans=NOTRANS) or diag(C)*B (if options->Trans
    - *	     = TRANS or CONJ).
    - *
    - *	1.2. Permute columns of A, forming A*Pc, where Pc is a permutation
    - *	     matrix that usually preserves sparsity.
    - *	     For more details of this step, see sp_preorder.c.
    - *
    - *	1.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *	     factor the matrix A (after equilibration if options->Equil = YES)
    - *	     as Pr*A*Pc = L*U, with Pr determined by partial pivoting.
    - *
    - *	1.4. Compute the reciprocal pivot growth factor.
    - *
    - *	1.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *	     routine fills a small number on the diagonal entry, that is
    - *		U(i,i) = ||A(:,i)||_oo * options->ILU_FillTol ** (1 - i / n),
    - *	     and info will be increased by 1. The factored form of A is used
    - *	     to estimate the condition number of the preconditioner. If the
    - *	     reciprocal of the condition number is less than machine precision,
    - *	     info = A->ncol+1 is returned as a warning, but the routine still
    - *	     goes on to solve for X.
    - *
    - *	1.6. The system of equations is solved for X using the factored form
    - *	     of A.
    - *
    - *	1.7. options->IterRefine is not used
    - *
    - *	1.8. If equilibration was used, the matrix X is premultiplied by
    - *	     diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *	     (if options->Trans = TRANS or CONJ) so that it solves the
    - *	     original system before equilibration.
    - *
    - *	1.9. options for ILU only
    - *	     1) If options->RowPerm = LargeDiag, MC64 is used to scale and
    - *		permute the matrix to an I-matrix, that is Pr*Dr*A*Dc has
    - *		entries of modulus 1 on the diagonal and off-diagonal entries
    - *		of modulus at most 1. If MC64 fails, dgsequ() is used to
    - *		equilibrate the system.
    - *	     2) options->ILU_DropTol = tau is the threshold for dropping.
    - *		For L, it is used directly (for the whole row in a supernode);
    - *		For U, ||A(:,i)||_oo * tau is used as the threshold
    - *	        for the	i-th column.
    - *		If a secondary dropping rule is required, tau will
    - *	        also be used to compute the second threshold.
    - *	     3) options->ILU_FillFactor = gamma, used as the initial guess
    - *		of memory growth.
    - *		If a secondary dropping rule is required, it will also
    - *              be used as an upper bound of the memory.
    - *	     4) options->ILU_DropRule specifies the dropping rule.
    - *		Option		Explanation
    - *		======		===========
    - *		DROP_BASIC:	Basic dropping rule, supernodal based ILU.
    - *		DROP_PROWS:	Supernodal based ILUTP, p = gamma * nnz(A) / n.
    - *		DROP_COLUMN:	Variation of ILUTP, for j-th column,
    - *				p = gamma * nnz(A(:,j)).
    - *		DROP_AREA;	Variation of ILUTP, for j-th column, use
    - *				nnz(F(:,1:j)) / nnz(A(:,1:j)) to control the
    - *				memory.
    - *		DROP_DYNAMIC:	Modify the threshold tau during the
    - *				factorizaion.
    - *				If nnz(L(:,1:j)) / nnz(A(:,1:j)) < gamma
    - *				    tau_L(j) := MIN(1, tau_L(j-1) * 2);
    - *				Otherwise
    - *				    tau_L(j) := MIN(1, tau_L(j-1) * 2);
    - *				tau_U(j) uses the similar rule.
    - *				NOTE: the thresholds used by L and U are
    - *				indenpendent.
    - *		DROP_INTERP:	Compute the second dropping threshold by
    - *				interpolation instead of sorting (default).
    - *				In this case, the actual fill ratio is not
    - *				guaranteed smaller than gamma.
    - *		DROP_PROWS, DROP_COLUMN and DROP_AREA are mutually exclusive.
    - *		( The default option is DROP_BASIC | DROP_AREA. )
    - *	     5) options->ILU_Norm is the criterion of computing the average
    - *		value of a row in L.
    - *		options->ILU_Norm	average(x[1:n])
    - *		=================	===============
    - *		ONE_NORM		||x||_1 / n
    - *		TWO_NORM		||x||_2 / sqrt(n)
    - *		INF_NORM		max{|x[i]|}
    - *	     6) options->ILU_MILU specifies the type of MILU's variation.
    - *		= SILU (default): do not perform MILU;
    - *		= SMILU_1 (not recommended):
    - *		    U(i,i) := U(i,i) + sum(dropped entries);
    - *		= SMILU_2:
    - *		    U(i,i) := U(i,i) + SGN(U(i,i)) * sum(dropped entries);
    - *		= SMILU_3:
    - *		    U(i,i) := U(i,i) + SGN(U(i,i)) * sum(|dropped entries|);
    - *		NOTE: Even SMILU_1 does not preserve the column sum because of
    - *		late dropping.
    - *	     7) options->ILU_FillTol is used as the perturbation when
    - *		encountering zero pivots. If some U(i,i) = 0, so that U is
    - *		exactly singular, then
    - *		   U(i,i) := ||A(:,i)|| * options->ILU_FillTol ** (1 - i / n).
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the above algorithm
    - *	to the transpose of A:
    - *
    - *	2.1. If options->Equil = YES or options->RowPerm = LargeDiag, scaling
    - *	     factors are computed to equilibrate the system:
    - *	     options->Trans = NOTRANS:
    - *		 diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *	     options->Trans = TRANS:
    - *		 (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *	     options->Trans = CONJ:
    - *		 (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *	     Whether or not the system will be equilibrated depends on the
    - *	     scaling of the matrix A, but if equilibration is used, A' is
    - *	     overwritten by diag(R)*A'*diag(C) and B by diag(R)*B
    - *	     (if trans='N') or diag(C)*B (if trans = 'T' or 'C').
    - *
    - *	2.2. Permute columns of transpose(A) (rows of A),
    - *	     forming transpose(A)*Pc, where Pc is a permutation matrix that
    - *	     usually preserves sparsity.
    - *	     For more details of this step, see sp_preorder.c.
    - *
    - *	2.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *	     factor the transpose(A) (after equilibration if
    - *	     options->Fact = YES) as Pr*transpose(A)*Pc = L*U with the
    - *	     permutation Pr determined by partial pivoting.
    - *
    - *	2.4. Compute the reciprocal pivot growth factor.
    - *
    - *	2.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *	     routine fills a small number on the diagonal entry, that is
    - *		 U(i,i) = ||A(:,i)||_oo * options->ILU_FillTol ** (1 - i / n).
    - *	     And info will be increased by 1. The factored form of A is used
    - *	     to estimate the condition number of the preconditioner. If the
    - *	     reciprocal of the condition number is less than machine precision,
    - *	     info = A->ncol+1 is returned as a warning, but the routine still
    - *	     goes on to solve for X.
    - *
    - *	2.6. The system of equations is solved for X using the factored form
    - *	     of transpose(A).
    - *
    - *	2.7. If options->IterRefine is not used.
    - *
    - *	2.8. If equilibration was used, the matrix X is premultiplied by
    - *	     diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *	     (if options->Trans = TRANS or CONJ) so that it solves the
    - *	     original system before equilibration.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *	   The structure defines the input parameters to control
    - *	   how the LU decomposition will be performed and how the
    - *	   system will be solved.
    - *
    - * A	   (input/output) SuperMatrix*
    - *	   Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *	   of the linear equations is A->nrow. Currently, the type of A can be:
    - *	   Stype = SLU_NC or SLU_NR, Dtype = SLU_Z, Mtype = SLU_GE.
    - *	   In the future, more general A may be handled.
    - *
    - *	   On entry, If options->Fact = FACTORED and equed is not 'N',
    - *	   then A must have been equilibrated by the scaling factors in
    - *	   R and/or C.
    - *	   On exit, A is not modified if options->Equil = NO, or if
    - *	   options->Equil = YES but equed = 'N' on exit.
    - *	   Otherwise, if options->Equil = YES and equed is not 'N',
    - *	   A is scaled as follows:
    - *	   If A->Stype = SLU_NC:
    - *	     equed = 'R':  A := diag(R) * A
    - *	     equed = 'C':  A := A * diag(C)
    - *	     equed = 'B':  A := diag(R) * A * diag(C).
    - *	   If A->Stype = SLU_NR:
    - *	     equed = 'R':  transpose(A) := diag(R) * transpose(A)
    - *	     equed = 'C':  transpose(A) := transpose(A) * diag(C)
    - *	     equed = 'B':  transpose(A) := diag(R) * transpose(A) * diag(C).
    - *
    - * perm_c  (input/output) int*
    - *	   If A->Stype = SLU_NC, Column permutation vector of size A->ncol,
    - *	   which defines the permutation matrix Pc; perm_c[i] = j means
    - *	   column i of A is in position j in A*Pc.
    - *	   On exit, perm_c may be overwritten by the product of the input
    - *	   perm_c and a permutation that postorders the elimination tree
    - *	   of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *	   is already in postorder.
    - *
    - *	   If A->Stype = SLU_NR, column permutation vector of size A->nrow,
    - *	   which describes permutation of columns of transpose(A) 
    - *	   (rows of A) as described above.
    - *
    - * perm_r  (input/output) int*
    - *	   If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *	   which defines the permutation matrix Pr, and is determined
    - *	   by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *	   position j in Pr*A.
    - *
    - *	   If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *	   determines permutation of rows of transpose(A)
    - *	   (columns of A) as described above.
    - *
    - *	   If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *	   will try to use the input perm_r, unless a certain threshold
    - *	   criterion is violated. In that case, perm_r is overwritten by a
    - *	   new permutation determined by partial pivoting or diagonal
    - *	   threshold pivoting.
    - *	   Otherwise, perm_r is output argument.
    - *
    - * etree   (input/output) int*,  dimension (A->ncol)
    - *	   Elimination tree of Pc'*A'*A*Pc.
    - *	   If options->Fact != FACTORED and options->Fact != DOFACT,
    - *	   etree is an input argument, otherwise it is an output argument.
    - *	   Note: etree is a vector of parent pointers for a forest whose
    - *	   vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * equed   (input/output) char*
    - *	   Specifies the form of equilibration that was done.
    - *	   = 'N': No equilibration.
    - *	   = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *	   = 'C': Column equilibration, i.e., A was postmultiplied by diag(C).
    - *	   = 'B': Both row and column equilibration, i.e., A was replaced 
    - *		  by diag(R)*A*diag(C).
    - *	   If options->Fact = FACTORED, equed is an input argument,
    - *	   otherwise it is an output argument.
    - *
    - * R	   (input/output) double*, dimension (A->nrow)
    - *	   The row scale factors for A or transpose(A).
    - *	   If equed = 'R' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *	       (if A->Stype = SLU_NR) is multiplied on the left by diag(R).
    - *	   If equed = 'N' or 'C', R is not accessed.
    - *	   If options->Fact = FACTORED, R is an input argument,
    - *	       otherwise, R is output.
    - *	   If options->zFact = FACTORED and equed = 'R' or 'B', each element
    - *	       of R must be positive.
    - *
    - * C	   (input/output) double*, dimension (A->ncol)
    - *	   The column scale factors for A or transpose(A).
    - *	   If equed = 'C' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *	       (if A->Stype = SLU_NR) is multiplied on the right by diag(C).
    - *	   If equed = 'N' or 'R', C is not accessed.
    - *	   If options->Fact = FACTORED, C is an input argument,
    - *	       otherwise, C is output.
    - *	   If options->Fact = FACTORED and equed = 'C' or 'B', each element
    - *	       of C must be positive.
    - *
    - * L	   (output) SuperMatrix*
    - *	   The factor L from the factorization
    - *	       Pr*A*Pc=L*U		(if A->Stype SLU_= NC) or
    - *	       Pr*transpose(A)*Pc=L*U	(if A->Stype = SLU_NR).
    - *	   Uses compressed row subscripts storage for supernodes, i.e.,
    - *	   L has types: Stype = SLU_SC, Dtype = SLU_Z, Mtype = SLU_TRLU.
    - *
    - * U	   (output) SuperMatrix*
    - *	   The factor U from the factorization
    - *	       Pr*A*Pc=L*U		(if A->Stype = SLU_NC) or
    - *	       Pr*transpose(A)*Pc=L*U	(if A->Stype = SLU_NR).
    - *	   Uses column-wise storage scheme, i.e., U has types:
    - *	   Stype = SLU_NC, Dtype = SLU_Z, Mtype = SLU_TRU.
    - *
    - * work    (workspace/output) void*, size (lwork) (in bytes)
    - *	   User supplied workspace, should be large enough
    - *	   to hold data structures for factors L and U.
    - *	   On exit, if fact is not 'F', L and U point to this array.
    - *
    - * lwork   (input) int
    - *	   Specifies the size of work array in bytes.
    - *	   = 0:  allocate space internally by system malloc;
    - *	   > 0:  use user-supplied work array of length lwork in bytes,
    - *		 returns error if space runs out.
    - *	   = -1: the routine guesses the amount of space needed without
    - *		 performing the factorization, and returns it in
    - *		 mem_usage->total_needed; no other side effects.
    - *
    - *	   See argument 'mem_usage' for memory usage statistics.
    - *
    - * B	   (input/output) SuperMatrix*
    - *	   B has types: Stype = SLU_DN, Dtype = SLU_Z, Mtype = SLU_GE.
    - *	   On entry, the right hand side matrix.
    - *	   If B->ncol = 0, only LU decomposition is performed, the triangular
    - *			   solve is skipped.
    - *	   On exit,
    - *	      if equed = 'N', B is not modified; otherwise
    - *	      if A->Stype = SLU_NC:
    - *		 if options->Trans = NOTRANS and equed = 'R' or 'B',
    - *		    B is overwritten by diag(R)*B;
    - *		 if options->Trans = TRANS or CONJ and equed = 'C' of 'B',
    - *		    B is overwritten by diag(C)*B;
    - *	      if A->Stype = SLU_NR:
    - *		 if options->Trans = NOTRANS and equed = 'C' or 'B',
    - *		    B is overwritten by diag(C)*B;
    - *		 if options->Trans = TRANS or CONJ and equed = 'R' of 'B',
    - *		    B is overwritten by diag(R)*B.
    - *
    - * X	   (output) SuperMatrix*
    - *	   X has types: Stype = SLU_DN, Dtype = SLU_Z, Mtype = SLU_GE.
    - *	   If info = 0 or info = A->ncol+1, X contains the solution matrix
    - *	   to the original system of equations. Note that A and B are modified
    - *	   on exit if equed is not 'N', and the solution to the equilibrated
    - *	   system is inv(diag(C))*X if options->Trans = NOTRANS and
    - *	   equed = 'C' or 'B', or inv(diag(R))*X if options->Trans = 'T' or 'C'
    - *	   and equed = 'R' or 'B'.
    - *
    - * recip_pivot_growth (output) double*
    - *	   The reciprocal pivot growth factor max_j( norm(A_j)/norm(U_j) ).
    - *	   The infinity norm is used. If recip_pivot_growth is much less
    - *	   than 1, the stability of the LU factorization could be poor.
    - *
    - * rcond   (output) double*
    - *	   The estimate of the reciprocal condition number of the matrix A
    - *	   after equilibration (if done). If rcond is less than the machine
    - *	   precision (in particular, if rcond = 0), the matrix is singular
    - *	   to working precision. This condition is indicated by a return
    - *	   code of info > 0.
    - *
    - * mem_usage (output) mem_usage_t*
    - *	   Record the memory usage statistics, consisting of following fields:
    - *	   - for_lu (float)
    - *	     The amount of space used in bytes for L\U data structures.
    - *	   - total_needed (float)
    - *	     The amount of space needed in bytes to perform factorization.
    - *	   - expansions (int)
    - *	     The number of memory expansions during the LU factorization.
    - *
    - * stat   (output) SuperLUStat_t*
    - *	  Record the statistics on runtime and floating-point operation count.
    - *	  See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - *	   > 0: if info = i, and i is
    - *		<= A->ncol: number of zero pivots. They are replaced by small
    - *		      entries due to options->ILU_FillTol.
    - *		= A->ncol+1: U is nonsingular, but RCOND is less than machine
    - *		      precision, meaning that the matrix is singular to
    - *		      working precision. Nevertheless, the solution and
    - *		      error bounds are computed because there are a number
    - *		      of situations where the computed solution can be more
    - *		      accurate than the value of RCOND would suggest.
    - *		> A->ncol+1: number of bytes allocated when memory allocation
    - *		      failure occurred, plus A->ncol.
    - * 
    - */ - -void -zgsisx(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - int *etree, char *equed, double *R, double *C, - SuperMatrix *L, SuperMatrix *U, void *work, int lwork, - SuperMatrix *B, SuperMatrix *X, - double *recip_pivot_growth, double *rcond, - mem_usage_t *mem_usage, SuperLUStat_t *stat, int *info) -{ - - DNformat *Bstore, *Xstore; - doublecomplex *Bmat, *Xmat; - int ldb, ldx, nrhs; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int colequ, equil, nofact, notran, rowequ, permc_spec, mc64; - trans_t trant; - char norm[1]; - int i, j, info1; - double amax, anorm, bignum, smlnum, colcnd, rowcnd, rcmax, rcmin; - int relax, panel_size; - double diag_pivot_thresh; - double t0; /* temporary time */ - double *utime; - - int *perm = NULL; - - /* External functions */ - extern double zlangs(char *, SuperMatrix *); - - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - *info = 0; - nofact = (options->Fact != FACTORED); - equil = (options->Equil == YES); - notran = (options->Trans == NOTRANS); - mc64 = (options->RowPerm == LargeDiag); - if ( nofact ) { - *(unsigned char *)equed = 'N'; - rowequ = FALSE; - colequ = FALSE; - } else { - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - smlnum = dlamch_("Safe minimum"); - bignum = 1. / smlnum; - } - - /* Test the input parameters */ - if (!nofact && options->Fact != DOFACT && options->Fact != SamePattern && - options->Fact != SamePattern_SameRowPerm && - !notran && options->Trans != TRANS && options->Trans != CONJ && - !equil && options->Equil != NO) - *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_Z || A->Mtype != SLU_GE ) - *info = -2; - else if (options->Fact == FACTORED && - !(rowequ || colequ || lsame_(equed, "N"))) - *info = -6; - else { - if (rowequ) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, R[j]); - rcmax = SUPERLU_MAX(rcmax, R[j]); - } - if (rcmin <= 0.) *info = -7; - else if ( A->nrow > 0) - rowcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else rowcnd = 1.; - } - if (colequ && *info == 0) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, C[j]); - rcmax = SUPERLU_MAX(rcmax, C[j]); - } - if (rcmin <= 0.) *info = -8; - else if (A->nrow > 0) - colcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else colcnd = 1.; - } - if (*info == 0) { - if ( lwork < -1 ) *info = -12; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_Z || - B->Mtype != SLU_GE ) - *info = -13; - else if ( X->ncol < 0 || Xstore->lda < SUPERLU_MAX(0, A->nrow) || - (B->ncol != 0 && B->ncol != X->ncol) || - X->Stype != SLU_DN || - X->Dtype != SLU_Z || X->Mtype != SLU_GE ) - *info = -14; - } - } - if (*info != 0) { - i = -(*info); - xerbla_("zgsisx", &i); - return; - } - - /* Initialization for factor parameters */ - panel_size = sp_ienv(1); - relax = sp_ienv(2); - diag_pivot_thresh = options->DiagPivotThresh; - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - zCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - if ( notran ) { /* Reverse the transpose argument. */ - trant = TRANS; - notran = 0; - } else { - trant = NOTRANS; - notran = 1; - } - } else { /* A->Stype == SLU_NC */ - trant = options->Trans; - AA = A; - } - - if ( nofact ) { - register int i, j; - NCformat *Astore = AA->Store; - int nnz = Astore->nnz; - int *colptr = Astore->colptr; - int *rowind = Astore->rowind; - doublecomplex *nzval = (doublecomplex *)Astore->nzval; - int n = AA->nrow; - - if ( mc64 ) { - *equed = 'B'; - rowequ = colequ = 1; - t0 = SuperLU_timer_(); - if ((perm = intMalloc(n)) == NULL) - ABORT("SUPERLU_MALLOC fails for perm[]"); - - info1 = zldperm(5, n, nnz, colptr, rowind, nzval, perm, R, C); - - if (info1 > 0) { /* MC64 fails, call zgsequ() later */ - mc64 = 0; - SUPERLU_FREE(perm); - perm = NULL; - } else { - for (i = 0; i < n; i++) { - R[i] = exp(R[i]); - C[i] = exp(C[i]); - } - /* permute and scale the matrix */ - for (j = 0; j < n; j++) { - for (i = colptr[j]; i < colptr[j + 1]; i++) { - zd_mult(&nzval[i], &nzval[i], R[rowind[i]] * C[j]); - rowind[i] = perm[rowind[i]]; - } - } - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - if ( !mc64 & equil ) { - t0 = SuperLU_timer_(); - /* Compute row and column scalings to equilibrate the matrix A. */ - zgsequ(AA, R, C, &rowcnd, &colcnd, &amax, &info1); - - if ( info1 == 0 ) { - /* Equilibrate matrix A. */ - zlaqgs(AA, R, C, rowcnd, colcnd, amax, equed); - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - } - - if ( nrhs > 0 ) { - /* Scale the right hand side if equilibration was performed. */ - if ( notran ) { - if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&Bmat[i+j*ldb], &Bmat[i+j*ldb], R[i]); - } - } - } else if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&Bmat[i+j*ldb], &Bmat[i+j*ldb], C[i]); - } - } - } - - if ( nofact ) { - - t0 = SuperLU_timer_(); - /* - * Gnet column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t0; - - t0 = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t0; - - /* Compute the LU factorization of A*Pc. */ - t0 = SuperLU_timer_(); - zgsitrf(options, &AC, relax, panel_size, etree, work, lwork, - perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t0; - - if ( lwork == -1 ) { - mem_usage->total_needed = *info - A->ncol; - return; - } - } - - if ( options->PivotGrowth ) { - if ( *info > 0 ) return; - - /* Compute the reciprocal pivot growth factor *recip_pivot_growth. */ - *recip_pivot_growth = zPivotGrowth(A->ncol, AA, perm_c, L, U); - } - - if ( options->ConditionNumber ) { - /* Estimate the reciprocal of the condition number of A. */ - t0 = SuperLU_timer_(); - if ( notran ) { - *(unsigned char *)norm = '1'; - } else { - *(unsigned char *)norm = 'I'; - } - anorm = zlangs(norm, AA); - zgscon(norm, L, U, anorm, rcond, stat, &info1); - utime[RCOND] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Compute the solution matrix X. */ - for (j = 0; j < nrhs; j++) /* Save a copy of the right hand sides */ - for (i = 0; i < B->nrow; i++) - Xmat[i + j*ldx] = Bmat[i + j*ldb]; - - t0 = SuperLU_timer_(); - zgstrs (trant, L, U, perm_c, perm_r, X, stat, &info1); - utime[SOLVE] = SuperLU_timer_() - t0; - - /* Transform the solution matrix X to a solution of the original - system. */ - if ( notran ) { - if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&Xmat[i+j*ldx], &Xmat[i+j*ldx], C[i]); - } - } - } else { - if ( rowequ ) { - if (perm) { - doublecomplex *tmp; - int n = A->nrow; - - if ((tmp = doublecomplexMalloc(n)) == NULL) - ABORT("SUPERLU_MALLOC fails for tmp[]"); - for (j = 0; j < nrhs; j++) { - for (i = 0; i < n; i++) - tmp[i] = Xmat[i + j * ldx]; /*dcopy*/ - for (i = 0; i < n; i++) - zd_mult(&Xmat[i+j*ldx], &tmp[perm[i]], R[i]); - } - SUPERLU_FREE(tmp); - } else { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&Xmat[i+j*ldx], &Xmat[i+j*ldx], R[i]); - } - } - } - } - } /* end if nrhs > 0 */ - - if ( options->ConditionNumber ) { - /* Set INFO = A->ncol+1 if the matrix is singular to working precision. */ - if ( *rcond < dlamch_("E") && *info == 0) *info = A->ncol + 1; - } - - if (perm) SUPERLU_FREE(perm); - - if ( nofact ) { - ilu_zQuerySpace(L, U, mem_usage); - Destroy_CompCol_Permuted(&AC); - } - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsitrf.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsitrf.c deleted file mode 100644 index 409aff2769..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsitrf.c +++ /dev/null @@ -1,628 +0,0 @@ - -/*! @file zgsitf.c - * \brief Computes an ILU factorization of a general sparse matrix - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ - -#include "slu_zdefs.h" - -#ifdef DEBUG -int num_drop_L; -#endif - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * ZGSITRF computes an ILU factorization of a general sparse m-by-n
    - * matrix A using partial pivoting with row interchanges.
    - * The factorization has the form
    - *     Pr * A = L * U
    - * where Pr is a row permutation matrix, L is lower triangular with unit
    - * diagonal elements (lower trapezoidal if A->nrow > A->ncol), and U is upper
    - * triangular (upper trapezoidal if A->nrow < A->ncol).
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *	   The structure defines the input parameters to control
    - *	   how the ILU decomposition will be performed.
    - *
    - * A	    (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *	    (A->nrow, A->ncol). The type of A can be:
    - *	    Stype = SLU_NCP; Dtype = SLU_Z; Mtype = SLU_GE.
    - *
    - * relax    (input) int
    - *	    To control degree of relaxing supernodes. If the number
    - *	    of nodes (columns) in a subtree of the elimination tree is less
    - *	    than relax, this subtree is considered as one supernode,
    - *	    regardless of the row structures of those columns.
    - *
    - * panel_size (input) int
    - *	    A panel consists of at most panel_size consecutive columns.
    - *
    - * etree    (input) int*, dimension (A->ncol)
    - *	    Elimination tree of A'*A.
    - *	    Note: etree is a vector of parent pointers for a forest whose
    - *	    vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *	    On input, the columns of A should be permuted so that the
    - *	    etree is in a certain postorder.
    - *
    - * work     (input/output) void*, size (lwork) (in bytes)
    - *	    User-supplied work space and space for the output data structures.
    - *	    Not referenced if lwork = 0;
    - *
    - * lwork   (input) int
    - *	   Specifies the size of work array in bytes.
    - *	   = 0:  allocate space internally by system malloc;
    - *	   > 0:  use user-supplied work array of length lwork in bytes,
    - *		 returns error if space runs out.
    - *	   = -1: the routine guesses the amount of space needed without
    - *		 performing the factorization, and returns it in
    - *		 *info; no other side effects.
    - *
    - * perm_c   (input) int*, dimension (A->ncol)
    - *	    Column permutation vector, which defines the
    - *	    permutation matrix Pc; perm_c[i] = j means column i of A is
    - *	    in position j in A*Pc.
    - *	    When searching for diagonal, perm_c[*] is applied to the
    - *	    row subscripts of A, so that diagonal threshold pivoting
    - *	    can find the diagonal of A, rather than that of A*Pc.
    - *
    - * perm_r   (input/output) int*, dimension (A->nrow)
    - *	    Row permutation vector which defines the permutation matrix Pr,
    - *	    perm_r[i] = j means row i of A is in position j in Pr*A.
    - *	    If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *	       will try to use the input perm_r, unless a certain threshold
    - *	       criterion is violated. In that case, perm_r is overwritten by
    - *	       a new permutation determined by partial pivoting or diagonal
    - *	       threshold pivoting.
    - *	    Otherwise, perm_r is output argument;
    - *
    - * L	    (output) SuperMatrix*
    - *	    The factor L from the factorization Pr*A=L*U; use compressed row
    - *	    subscripts storage for supernodes, i.e., L has type:
    - *	    Stype = SLU_SC, Dtype = SLU_Z, Mtype = SLU_TRLU.
    - *
    - * U	    (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *	    storage scheme, i.e., U has types: Stype = SLU_NC,
    - *	    Dtype = SLU_Z, Mtype = SLU_TRU.
    - *
    - * stat     (output) SuperLUStat_t*
    - *	    Record the statistics on runtime and floating-point operation count.
    - *	    See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info     (output) int*
    - *	    = 0: successful exit
    - *	    < 0: if info = -i, the i-th argument had an illegal value
    - *	    > 0: if info = i, and i is
    - *	       <= A->ncol: number of zero pivots. They are replaced by small
    - *		  entries according to options->ILU_FillTol.
    - *	       > A->ncol: number of bytes allocated when memory allocation
    - *		  failure occurred, plus A->ncol. If lwork = -1, it is
    - *		  the estimated amount of space needed, plus A->ncol.
    - *
    - * ======================================================================
    - *
    - * Local Working Arrays:
    - * ======================
    - *   m = number of rows in the matrix
    - *   n = number of columns in the matrix
    - *
    - *   marker[0:3*m-1]: marker[i] = j means that node i has been
    - *	reached when working on column j.
    - *	Storage: relative to original row subscripts
    - *	NOTE: There are 4 of them:
    - *	      marker/marker1 are used for panel dfs, see (ilu_)dpanel_dfs.c;
    - *	      marker2 is used for inner-factorization, see (ilu)_dcolumn_dfs.c;
    - *	      marker_relax(has its own space) is used for relaxed supernodes.
    - *
    - *   parent[0:m-1]: parent vector used during dfs
    - *	Storage: relative to new row subscripts
    - *
    - *   xplore[0:m-1]: xplore[i] gives the location of the next (dfs)
    - *	unexplored neighbor of i in lsub[*]
    - *
    - *   segrep[0:nseg-1]: contains the list of supernodal representatives
    - *	in topological order of the dfs. A supernode representative is the
    - *	last column of a supernode.
    - *	The maximum size of segrep[] is n.
    - *
    - *   repfnz[0:W*m-1]: for a nonzero segment U[*,j] that ends at a
    - *	supernodal representative r, repfnz[r] is the location of the first
    - *	nonzero in this segment.  It is also used during the dfs: repfnz[r]>0
    - *	indicates the supernode r has been explored.
    - *	NOTE: There are W of them, each used for one column of a panel.
    - *
    - *   panel_lsub[0:W*m-1]: temporary for the nonzeros row indices below
    - *	the panel diagonal. These are filled in during dpanel_dfs(), and are
    - *	used later in the inner LU factorization within the panel.
    - *	panel_lsub[]/dense[] pair forms the SPA data structure.
    - *	NOTE: There are W of them.
    - *
    - *   dense[0:W*m-1]: sparse accumulating (SPA) vector for intermediate values;
    - *		   NOTE: there are W of them.
    - *
    - *   tempv[0:*]: real temporary used for dense numeric kernels;
    - *	The size of this array is defined by NUM_TEMPV() in slu_util.h.
    - *	It is also used by the dropping routine ilu_ddrop_row().
    - * 
    - */ - -void -zgsitrf(superlu_options_t *options, SuperMatrix *A, int relax, int panel_size, - int *etree, void *work, int lwork, int *perm_c, int *perm_r, - SuperMatrix *L, SuperMatrix *U, SuperLUStat_t *stat, int *info) -{ - /* Local working arrays */ - NCPformat *Astore; - int *iperm_r = NULL; /* inverse of perm_r; used when - options->Fact == SamePattern_SameRowPerm */ - int *iperm_c; /* inverse of perm_c */ - int *swap, *iswap; /* swap is used to store the row permutation - during the factorization. Initially, it is set - to iperm_c (row indeces of Pc*A*Pc'). - iswap is the inverse of swap. After the - factorization, it is equal to perm_r. */ - int *iwork; - doublecomplex *zwork; - int *segrep, *repfnz, *parent, *xplore; - int *panel_lsub; /* dense[]/panel_lsub[] pair forms a w-wide SPA */ - int *marker, *marker_relax; - doublecomplex *dense, *tempv; - double *dtempv; - int *relax_end, *relax_fsupc; - doublecomplex *a; - int *asub; - int *xa_begin, *xa_end; - int *xsup, *supno; - int *xlsub, *xlusup, *xusub; - int nzlumax; - double *amax; - doublecomplex drop_sum; - static GlobalLU_t Glu; /* persistent to facilitate multiple factors. */ - int *iwork2; /* used by the second dropping rule */ - - /* Local scalars */ - fact_t fact = options->Fact; - double diag_pivot_thresh = options->DiagPivotThresh; - double drop_tol = options->ILU_DropTol; /* tau */ - double fill_ini = options->ILU_FillTol; /* tau^hat */ - double gamma = options->ILU_FillFactor; - int drop_rule = options->ILU_DropRule; - milu_t milu = options->ILU_MILU; - double fill_tol; - int pivrow; /* pivotal row number in the original matrix A */ - int nseg1; /* no of segments in U-column above panel row jcol */ - int nseg; /* no of segments in each U-column */ - register int jcol; - register int kcol; /* end column of a relaxed snode */ - register int icol; - register int i, k, jj, new_next, iinfo; - int m, n, min_mn, jsupno, fsupc, nextlu, nextu; - int w_def; /* upper bound on panel width */ - int usepr, iperm_r_allocated = 0; - int nnzL, nnzU; - int *panel_histo = stat->panel_histo; - flops_t *ops = stat->ops; - - int last_drop;/* the last column which the dropping rules applied */ - int quota; - int nnzAj; /* number of nonzeros in A(:,1:j) */ - int nnzLj, nnzUj; - double tol_L = drop_tol, tol_U = drop_tol; - doublecomplex zero = {0.0, 0.0}; - - /* Executable */ - iinfo = 0; - m = A->nrow; - n = A->ncol; - min_mn = SUPERLU_MIN(m, n); - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - - /* Allocate storage common to the factor routines */ - *info = zLUMemInit(fact, work, lwork, m, n, Astore->nnz, panel_size, - gamma, L, U, &Glu, &iwork, &zwork); - if ( *info ) return; - - xsup = Glu.xsup; - supno = Glu.supno; - xlsub = Glu.xlsub; - xlusup = Glu.xlusup; - xusub = Glu.xusub; - - SetIWork(m, n, panel_size, iwork, &segrep, &parent, &xplore, - &repfnz, &panel_lsub, &marker_relax, &marker); - zSetRWork(m, panel_size, zwork, &dense, &tempv); - - usepr = (fact == SamePattern_SameRowPerm); - if ( usepr ) { - /* Compute the inverse of perm_r */ - iperm_r = (int *) intMalloc(m); - for (k = 0; k < m; ++k) iperm_r[perm_r[k]] = k; - iperm_r_allocated = 1; - } - - iperm_c = (int *) intMalloc(n); - for (k = 0; k < n; ++k) iperm_c[perm_c[k]] = k; - swap = (int *)intMalloc(n); - for (k = 0; k < n; k++) swap[k] = iperm_c[k]; - iswap = (int *)intMalloc(n); - for (k = 0; k < n; k++) iswap[k] = perm_c[k]; - amax = (double *) doubleMalloc(panel_size); - if (drop_rule & DROP_SECONDARY) - iwork2 = (int *)intMalloc(n); - else - iwork2 = NULL; - - nnzAj = 0; - nnzLj = 0; - nnzUj = 0; - last_drop = SUPERLU_MAX(min_mn - 2 * sp_ienv(3), (int)(min_mn * 0.95)); - - /* Identify relaxed snodes */ - relax_end = (int *) intMalloc(n); - relax_fsupc = (int *) intMalloc(n); - if ( options->SymmetricMode == YES ) - ilu_heap_relax_snode(n, etree, relax, marker, relax_end, relax_fsupc); - else - ilu_relax_snode(n, etree, relax, marker, relax_end, relax_fsupc); - - ifill (perm_r, m, EMPTY); - ifill (marker, m * NO_MARKER, EMPTY); - supno[0] = -1; - xsup[0] = xlsub[0] = xusub[0] = xlusup[0] = 0; - w_def = panel_size; - - /* Mark the rows used by relaxed supernodes */ - ifill (marker_relax, m, EMPTY); - i = mark_relax(m, relax_end, relax_fsupc, xa_begin, xa_end, - asub, marker_relax); -#if ( PRNTlevel >= 1) - printf("%d relaxed supernodes.\n", i); -#endif - - /* - * Work on one "panel" at a time. A panel is one of the following: - * (a) a relaxed supernode at the bottom of the etree, or - * (b) panel_size contiguous columns, defined by the user - */ - for (jcol = 0; jcol < min_mn; ) { - - if ( relax_end[jcol] != EMPTY ) { /* start of a relaxed snode */ - kcol = relax_end[jcol]; /* end of the relaxed snode */ - panel_histo[kcol-jcol+1]++; - - /* Drop small rows in the previous supernode. */ - if (jcol > 0 && jcol < last_drop) { - int first = xsup[supno[jcol - 1]]; - int last = jcol - 1; - int quota; - - /* Compute the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * (m - first) / m - * (last - first + 1); - else if (drop_rule & DROP_COLUMN) { - int i; - quota = 0; - for (i = first; i <= last; i++) - quota += xa_end[i] - xa_begin[i]; - quota = gamma * quota * (m - first) / m; - } else if (drop_rule & DROP_AREA) - quota = gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - - nnzLj; - else - quota = m * n; - fill_tol = pow(fill_ini, 1.0 - 0.5 * (first + last) / min_mn); - - /* Drop small rows */ - dtempv = (double *) tempv; - i = ilu_zdrop_row(options, first, last, tol_L, quota, &nnzLj, - &fill_tol, &Glu, dtempv, iwork2, 0); - /* Reset the parameters */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - < nnzLj) - tol_L = SUPERLU_MIN(1.0, tol_L * 2.0); - else - tol_L = SUPERLU_MAX(drop_tol, tol_L * 0.5); - } - if (fill_tol < 0) iinfo -= (int)fill_tol; -#ifdef DEBUG - num_drop_L += i * (last - first + 1); -#endif - } - - /* -------------------------------------- - * Factorize the relaxed supernode(jcol:kcol) - * -------------------------------------- */ - /* Determine the union of the row structure of the snode */ - if ( (*info = ilu_zsnode_dfs(jcol, kcol, asub, xa_begin, xa_end, - marker, &Glu)) != 0 ) - return; - - nextu = xusub[jcol]; - nextlu = xlusup[jcol]; - jsupno = supno[jcol]; - fsupc = xsup[jsupno]; - new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1); - nzlumax = Glu.nzlumax; - while ( new_next > nzlumax ) { - if ((*info = zLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu))) - return; - } - - for (icol = jcol; icol <= kcol; icol++) { - xusub[icol+1] = nextu; - - amax[0] = 0.0; - /* Scatter into SPA dense[*] */ - for (k = xa_begin[icol]; k < xa_end[icol]; k++) { - register double tmp = z_abs1 (&a[k]); - if (tmp > amax[0]) amax[0] = tmp; - dense[asub[k]] = a[k]; - } - nnzAj += xa_end[icol] - xa_begin[icol]; - if (amax[0] == 0.0) { - amax[0] = fill_ini; -#if ( PRNTlevel >= 1) - printf("Column %d is entirely zero!\n", icol); - fflush(stdout); -#endif - } - - /* Numeric update within the snode */ - zsnode_bmod(icol, jsupno, fsupc, dense, tempv, &Glu, stat); - - if (usepr) pivrow = iperm_r[icol]; - fill_tol = pow(fill_ini, 1.0 - (double)icol / (double)min_mn); - if ( (*info = ilu_zpivotL(icol, diag_pivot_thresh, &usepr, - perm_r, iperm_c[icol], swap, iswap, - marker_relax, &pivrow, - amax[0] * fill_tol, milu, zero, - &Glu, stat)) ) { - iinfo++; - marker[pivrow] = kcol; - } - - } - - jcol = kcol + 1; - - } else { /* Work on one panel of panel_size columns */ - - /* Adjust panel_size so that a panel won't overlap with the next - * relaxed snode. - */ - panel_size = w_def; - for (k = jcol + 1; k < SUPERLU_MIN(jcol+panel_size, min_mn); k++) - if ( relax_end[k] != EMPTY ) { - panel_size = k - jcol; - break; - } - if ( k == min_mn ) panel_size = min_mn - jcol; - panel_histo[panel_size]++; - - /* symbolic factor on a panel of columns */ - ilu_zpanel_dfs(m, panel_size, jcol, A, perm_r, &nseg1, - dense, amax, panel_lsub, segrep, repfnz, - marker, parent, xplore, &Glu); - - /* numeric sup-panel updates in topological order */ - zpanel_bmod(m, panel_size, jcol, nseg1, dense, - tempv, segrep, repfnz, &Glu, stat); - - /* Sparse LU within the panel, and below panel diagonal */ - for (jj = jcol; jj < jcol + panel_size; jj++) { - - k = (jj - jcol) * m; /* column index for w-wide arrays */ - - nseg = nseg1; /* Begin after all the panel segments */ - - nnzAj += xa_end[jj] - xa_begin[jj]; - - if ((*info = ilu_zcolumn_dfs(m, jj, perm_r, &nseg, - &panel_lsub[k], segrep, &repfnz[k], - marker, parent, xplore, &Glu))) - return; - - /* Numeric updates */ - if ((*info = zcolumn_bmod(jj, (nseg - nseg1), &dense[k], - tempv, &segrep[nseg1], &repfnz[k], - jcol, &Glu, stat)) != 0) return; - - /* Make a fill-in position if the column is entirely zero */ - if (xlsub[jj + 1] == xlsub[jj]) { - register int i, row; - int nextl; - int nzlmax = Glu.nzlmax; - int *lsub = Glu.lsub; - int *marker2 = marker + 2 * m; - - /* Allocate memory */ - nextl = xlsub[jj] + 1; - if (nextl >= nzlmax) { - int error = zLUMemXpand(jj, nextl, LSUB, &nzlmax, &Glu); - if (error) { *info = error; return; } - lsub = Glu.lsub; - } - xlsub[jj + 1]++; - assert(xlusup[jj]==xlusup[jj+1]); - xlusup[jj + 1]++; - Glu.lusup[xlusup[jj]] = zero; - - /* Choose a row index (pivrow) for fill-in */ - for (i = jj; i < n; i++) - if (marker_relax[swap[i]] <= jj) break; - row = swap[i]; - marker2[row] = jj; - lsub[xlsub[jj]] = row; -#ifdef DEBUG - printf("Fill col %d.\n", jj); - fflush(stdout); -#endif - } - - /* Computer the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * jj / m; - else if (drop_rule & DROP_COLUMN) - quota = gamma * (xa_end[jj] - xa_begin[jj]) * - (jj + 1) / m; - else if (drop_rule & DROP_AREA) - quota = gamma * 0.9 * nnzAj * 0.5 - nnzUj; - else - quota = m; - - /* Copy the U-segments to ucol[*] and drop small entries */ - if ((*info = ilu_zcopy_to_ucol(jj, nseg, segrep, &repfnz[k], - perm_r, &dense[k], drop_rule, - milu, amax[jj - jcol] * tol_U, - quota, &drop_sum, &nnzUj, &Glu, - iwork2)) != 0) - return; - - /* Reset the dropping threshold if required */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * 0.9 * nnzAj * 0.5 < nnzLj) - tol_U = SUPERLU_MIN(1.0, tol_U * 2.0); - else - tol_U = SUPERLU_MAX(drop_tol, tol_U * 0.5); - } - - zd_mult(&drop_sum, &drop_sum, MILU_ALPHA); - if (usepr) pivrow = iperm_r[jj]; - fill_tol = pow(fill_ini, 1.0 - (double)jj / (double)min_mn); - if ( (*info = ilu_zpivotL(jj, diag_pivot_thresh, &usepr, perm_r, - iperm_c[jj], swap, iswap, - marker_relax, &pivrow, - amax[jj - jcol] * fill_tol, milu, - drop_sum, &Glu, stat)) ) { - iinfo++; - marker[m + pivrow] = jj; - marker[2 * m + pivrow] = jj; - } - - /* Reset repfnz[] for this column */ - resetrep_col (nseg, segrep, &repfnz[k]); - - /* Start a new supernode, drop the previous one */ - if (jj > 0 && supno[jj] > supno[jj - 1] && jj < last_drop) { - int first = xsup[supno[jj - 1]]; - int last = jj - 1; - int quota; - - /* Compute the quota */ - if (drop_rule & DROP_PROWS) - quota = gamma * Astore->nnz / m * (m - first) / m - * (last - first + 1); - else if (drop_rule & DROP_COLUMN) { - int i; - quota = 0; - for (i = first; i <= last; i++) - quota += xa_end[i] - xa_begin[i]; - quota = gamma * quota * (m - first) / m; - } else if (drop_rule & DROP_AREA) - quota = gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) - / m) - nnzLj; - else - quota = m * n; - fill_tol = pow(fill_ini, 1.0 - 0.5 * (first + last) / - (double)min_mn); - - /* Drop small rows */ - dtempv = (double *) tempv; - i = ilu_zdrop_row(options, first, last, tol_L, quota, - &nnzLj, &fill_tol, &Glu, dtempv, iwork2, - 1); - - /* Reset the parameters */ - if (drop_rule & DROP_DYNAMIC) { - if (gamma * nnzAj * (1.0 - 0.5 * (last + 1.0) / m) - < nnzLj) - tol_L = SUPERLU_MIN(1.0, tol_L * 2.0); - else - tol_L = SUPERLU_MAX(drop_tol, tol_L * 0.5); - } - if (fill_tol < 0) iinfo -= (int)fill_tol; -#ifdef DEBUG - num_drop_L += i * (last - first + 1); -#endif - } /* if start a new supernode */ - - } /* for */ - - jcol += panel_size; /* Move to the next panel */ - - } /* else */ - - } /* for */ - - *info = iinfo; - - if ( m > n ) { - k = 0; - for (i = 0; i < m; ++i) - if ( perm_r[i] == EMPTY ) { - perm_r[i] = n + k; - ++k; - } - } - - ilu_countnz(min_mn, &nnzL, &nnzU, &Glu); - fixupL(min_mn, perm_r, &Glu); - - zLUWorkFree(iwork, zwork, &Glu); /* Free work space and compress storage */ - - if ( fact == SamePattern_SameRowPerm ) { - /* L and U structures may have changed due to possibly different - pivoting, even though the storage is available. - There could also be memory expansions, so the array locations - may have changed, */ - ((SCformat *)L->Store)->nnz = nnzL; - ((SCformat *)L->Store)->nsuper = Glu.supno[n]; - ((SCformat *)L->Store)->nzval = Glu.lusup; - ((SCformat *)L->Store)->nzval_colptr = Glu.xlusup; - ((SCformat *)L->Store)->rowind = Glu.lsub; - ((SCformat *)L->Store)->rowind_colptr = Glu.xlsub; - ((NCformat *)U->Store)->nnz = nnzU; - ((NCformat *)U->Store)->nzval = Glu.ucol; - ((NCformat *)U->Store)->rowind = Glu.usub; - ((NCformat *)U->Store)->colptr = Glu.xusub; - } else { - zCreate_SuperNode_Matrix(L, A->nrow, min_mn, nnzL, Glu.lusup, - Glu.xlusup, Glu.lsub, Glu.xlsub, Glu.supno, - Glu.xsup, SLU_SC, SLU_Z, SLU_TRLU); - zCreate_CompCol_Matrix(U, min_mn, min_mn, nnzU, Glu.ucol, - Glu.usub, Glu.xusub, SLU_NC, SLU_Z, SLU_TRU); - } - - ops[FACT] += ops[TRSV] + ops[GEMV]; - - if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r); - SUPERLU_FREE (iperm_c); - SUPERLU_FREE (relax_end); - SUPERLU_FREE (swap); - SUPERLU_FREE (iswap); - SUPERLU_FREE (relax_fsupc); - SUPERLU_FREE (amax); - if ( iwork2 ) SUPERLU_FREE (iwork2); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsrfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsrfs.c deleted file mode 100644 index c1a79d90dc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgsrfs.c +++ /dev/null @@ -1,460 +0,0 @@ - -/*! @file zgsrfs.c - * \brief Improves computed solution to a system of inear equations - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Modified from lapack routine ZGERFS
    - * 
    - */ -/* - * File name: zgsrfs.c - * History: Modified from lapack routine ZGERFS - */ -#include -#include "slu_zdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   ZGSRFS improves the computed solution to a system of linear   
    - *   equations and provides error bounds and backward error estimates for 
    - *   the solution.   
    - *
    - *   If equilibration was performed, the system becomes:
    - *           (diag(R)*A_original*diag(C)) * X = diag(R)*B_original.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *   
    - *   A       (input) SuperMatrix*
    - *           The original matrix A in the system, or the scaled A if
    - *           equilibration was done. The type of A can be:
    - *           Stype = SLU_NC, Dtype = SLU_Z, Mtype = SLU_GE.
    - *    
    - *   L       (input) SuperMatrix*
    - *	     The factor L from the factorization Pr*A*Pc=L*U. Use
    - *           compressed row subscripts storage for supernodes, 
    - *           i.e., L has types: Stype = SLU_SC, Dtype = SLU_Z, Mtype = SLU_TRLU.
    - * 
    - *   U       (input) SuperMatrix*
    - *           The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *           zgstrf(). Use column-wise storage scheme, 
    - *           i.e., U has types: Stype = SLU_NC, Dtype = SLU_Z, Mtype = SLU_TRU.
    - *
    - *   perm_c  (input) int*, dimension (A->ncol)
    - *	     Column permutation vector, which defines the 
    - *           permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *           in position j in A*Pc.
    - *
    - *   perm_r  (input) int*, dimension (A->nrow)
    - *           Row permutation vector, which defines the permutation matrix Pr;
    - *           perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - *   equed   (input) Specifies the form of equilibration that was done.
    - *           = 'N': No equilibration.
    - *           = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *           = 'C': Column equilibration, i.e., A was postmultiplied by
    - *                  diag(C).
    - *           = 'B': Both row and column equilibration, i.e., A was replaced 
    - *                  by diag(R)*A*diag(C).
    - *
    - *   R       (input) double*, dimension (A->nrow)
    - *           The row scale factors for A.
    - *           If equed = 'R' or 'B', A is premultiplied by diag(R).
    - *           If equed = 'N' or 'C', R is not accessed.
    - * 
    - *   C       (input) double*, dimension (A->ncol)
    - *           The column scale factors for A.
    - *           If equed = 'C' or 'B', A is postmultiplied by diag(C).
    - *           If equed = 'N' or 'R', C is not accessed.
    - *
    - *   B       (input) SuperMatrix*
    - *           B has types: Stype = SLU_DN, Dtype = SLU_Z, Mtype = SLU_GE.
    - *           The right hand side matrix B.
    - *           if equed = 'R' or 'B', B is premultiplied by diag(R).
    - *
    - *   X       (input/output) SuperMatrix*
    - *           X has types: Stype = SLU_DN, Dtype = SLU_Z, Mtype = SLU_GE.
    - *           On entry, the solution matrix X, as computed by zgstrs().
    - *           On exit, the improved solution matrix X.
    - *           if *equed = 'C' or 'B', X should be premultiplied by diag(C)
    - *               in order to obtain the solution to the original system.
    - *
    - *   FERR    (output) double*, dimension (B->ncol)   
    - *           The estimated forward error bound for each solution vector   
    - *           X(j) (the j-th column of the solution matrix X).   
    - *           If XTRUE is the true solution corresponding to X(j), FERR(j) 
    - *           is an estimated upper bound for the magnitude of the largest 
    - *           element in (X(j) - XTRUE) divided by the magnitude of the   
    - *           largest element in X(j).  The estimate is as reliable as   
    - *           the estimate for RCOND, and is almost always a slight   
    - *           overestimate of the true error.
    - *
    - *   BERR    (output) double*, dimension (B->ncol)   
    - *           The componentwise relative backward error of each solution   
    - *           vector X(j) (i.e., the smallest relative change in   
    - *           any element of A or B that makes X(j) an exact solution).
    - *
    - *   stat     (output) SuperLUStat_t*
    - *            Record the statistics on runtime and floating-point operation count.
    - *            See util.h for the definition of 'SuperLUStat_t'.
    - *
    - *   info    (output) int*   
    - *           = 0:  successful exit   
    - *            < 0:  if INFO = -i, the i-th argument had an illegal value   
    - *
    - *    Internal Parameters   
    - *    ===================   
    - *
    - *    ITMAX is the maximum number of steps of iterative refinement.   
    - *
    - * 
    - */ -void -zgsrfs(trans_t trans, SuperMatrix *A, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, char *equed, double *R, double *C, - SuperMatrix *B, SuperMatrix *X, double *ferr, double *berr, - SuperLUStat_t *stat, int *info) -{ - - -#define ITMAX 5 - - /* Table of constant values */ - int ione = 1; - doublecomplex ndone = {-1., 0.}; - doublecomplex done = {1., 0.}; - - /* Local variables */ - NCformat *Astore; - doublecomplex *Aval; - SuperMatrix Bjcol; - DNformat *Bstore, *Xstore, *Bjcol_store; - doublecomplex *Bmat, *Xmat, *Bptr, *Xptr; - int kase; - double safe1, safe2; - int i, j, k, irow, nz, count, notran, rowequ, colequ; - int ldb, ldx, nrhs; - double s, xk, lstres, eps, safmin; - char transc[1]; - trans_t transt; - doublecomplex *work; - double *rwork; - int *iwork; - extern double dlamch_(char *); - extern int zlacon_(int *, doublecomplex *, doublecomplex *, double *, int *); -#ifdef _CRAY - extern int CCOPY(int *, doublecomplex *, int *, doublecomplex *, int *); - extern int CSAXPY(int *, doublecomplex *, doublecomplex *, int *, doublecomplex *, int *); -#else - extern int zcopy_(int *, doublecomplex *, int *, doublecomplex *, int *); - extern int zaxpy_(int *, doublecomplex *, doublecomplex *, int *, doublecomplex *, int *); -#endif - - Astore = A->Store; - Aval = Astore->nzval; - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - /* Test the input parameters */ - *info = 0; - notran = (trans == NOTRANS); - if ( !notran && trans != TRANS && trans != CONJ ) *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - A->Stype != SLU_NC || A->Dtype != SLU_Z || A->Mtype != SLU_GE ) - *info = -2; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_Z || L->Mtype != SLU_TRLU ) - *info = -3; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_Z || U->Mtype != SLU_TRU ) - *info = -4; - else if ( ldb < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_Z || B->Mtype != SLU_GE ) - *info = -10; - else if ( ldx < SUPERLU_MAX(0, A->nrow) || - X->Stype != SLU_DN || X->Dtype != SLU_Z || X->Mtype != SLU_GE ) - *info = -11; - if (*info != 0) { - i = -(*info); - xerbla_("zgsrfs", &i); - return; - } - - /* Quick return if possible */ - if ( A->nrow == 0 || nrhs == 0) { - for (j = 0; j < nrhs; ++j) { - ferr[j] = 0.; - berr[j] = 0.; - } - return; - } - - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - - /* Allocate working space */ - work = doublecomplexMalloc(2*A->nrow); - rwork = (double *) SUPERLU_MALLOC( A->nrow * sizeof(double) ); - iwork = intMalloc(A->nrow); - if ( !work || !rwork || !iwork ) - ABORT("Malloc fails for work/rwork/iwork."); - - if ( notran ) { - *(unsigned char *)transc = 'N'; - transt = TRANS; - } else { - *(unsigned char *)transc = 'T'; - transt = NOTRANS; - } - - /* NZ = maximum number of nonzero elements in each row of A, plus 1 */ - nz = A->ncol + 1; - eps = dlamch_("Epsilon"); - safmin = dlamch_("Safe minimum"); - /* Set SAFE1 essentially to be the underflow threshold times the - number of additions in each row. */ - safe1 = nz * safmin; - safe2 = safe1 / eps; - - /* Compute the number of nonzeros in each row (or column) of A */ - for (i = 0; i < A->nrow; ++i) iwork[i] = 0; - if ( notran ) { - for (k = 0; k < A->ncol; ++k) - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - ++iwork[Astore->rowind[i]]; - } else { - for (k = 0; k < A->ncol; ++k) - iwork[k] = Astore->colptr[k+1] - Astore->colptr[k]; - } - - /* Copy one column of RHS B into Bjcol. */ - Bjcol.Stype = B->Stype; - Bjcol.Dtype = B->Dtype; - Bjcol.Mtype = B->Mtype; - Bjcol.nrow = B->nrow; - Bjcol.ncol = 1; - Bjcol.Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) ); - if ( !Bjcol.Store ) ABORT("SUPERLU_MALLOC fails for Bjcol.Store"); - Bjcol_store = Bjcol.Store; - Bjcol_store->lda = ldb; - Bjcol_store->nzval = work; /* address aliasing */ - - /* Do for each right hand side ... */ - for (j = 0; j < nrhs; ++j) { - count = 0; - lstres = 3.; - Bptr = &Bmat[j*ldb]; - Xptr = &Xmat[j*ldx]; - - while (1) { /* Loop until stopping criterion is satisfied. */ - - /* Compute residual R = B - op(A) * X, - where op(A) = A, A**T, or A**H, depending on TRANS. */ - -#ifdef _CRAY - CCOPY(&A->nrow, Bptr, &ione, work, &ione); -#else - zcopy_(&A->nrow, Bptr, &ione, work, &ione); -#endif - sp_zgemv(transc, ndone, A, Xptr, ione, done, work, ione); - - /* Compute componentwise relative backward error from formula - max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) - where abs(Z) is the componentwise absolute value of the matrix - or vector Z. If the i-th component of the denominator is less - than SAFE2, then SAFE1 is added to the i-th component of the - numerator before dividing. */ - - for (i = 0; i < A->nrow; ++i) rwork[i] = z_abs1( &Bptr[i] ); - - /* Compute abs(op(A))*abs(X) + abs(B). */ - if (notran) { - for (k = 0; k < A->ncol; ++k) { - xk = z_abs1( &Xptr[k] ); - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - rwork[Astore->rowind[i]] += z_abs1(&Aval[i]) * xk; - } - } else { - for (k = 0; k < A->ncol; ++k) { - s = 0.; - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) { - irow = Astore->rowind[i]; - s += z_abs1(&Aval[i]) * z_abs1(&Xptr[irow]); - } - rwork[k] += s; - } - } - s = 0.; - for (i = 0; i < A->nrow; ++i) { - if (rwork[i] > safe2) { - s = SUPERLU_MAX( s, z_abs1(&work[i]) / rwork[i] ); - } else if ( rwork[i] != 0.0 ) { - s = SUPERLU_MAX( s, (z_abs1(&work[i]) + safe1) / rwork[i] ); - } - /* If rwork[i] is exactly 0.0, then we know the true - residual also must be exactly 0.0. */ - } - berr[j] = s; - - /* Test stopping criterion. Continue iterating if - 1) The residual BERR(J) is larger than machine epsilon, and - 2) BERR(J) decreased by at least a factor of 2 during the - last iteration, and - 3) At most ITMAX iterations tried. */ - - if (berr[j] > eps && berr[j] * 2. <= lstres && count < ITMAX) { - /* Update solution and try again. */ - zgstrs (trans, L, U, perm_c, perm_r, &Bjcol, stat, info); - -#ifdef _CRAY - CAXPY(&A->nrow, &done, work, &ione, - &Xmat[j*ldx], &ione); -#else - zaxpy_(&A->nrow, &done, work, &ione, - &Xmat[j*ldx], &ione); -#endif - lstres = berr[j]; - ++count; - } else { - break; - } - - } /* end while */ - - stat->RefineSteps = count; - - /* Bound error from formula: - norm(X - XTRUE) / norm(X) .le. FERR = norm( abs(inv(op(A)))* - ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) - where - norm(Z) is the magnitude of the largest component of Z - inv(op(A)) is the inverse of op(A) - abs(Z) is the componentwise absolute value of the matrix or - vector Z - NZ is the maximum number of nonzeros in any row of A, plus 1 - EPS is machine epsilon - - The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) - is incremented by SAFE1 if the i-th component of - abs(op(A))*abs(X) + abs(B) is less than SAFE2. - - Use ZLACON to estimate the infinity-norm of the matrix - inv(op(A)) * diag(W), - where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */ - - for (i = 0; i < A->nrow; ++i) rwork[i] = z_abs1( &Bptr[i] ); - - /* Compute abs(op(A))*abs(X) + abs(B). */ - if ( notran ) { - for (k = 0; k < A->ncol; ++k) { - xk = z_abs1( &Xptr[k] ); - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) - rwork[Astore->rowind[i]] += z_abs1(&Aval[i]) * xk; - } - } else { - for (k = 0; k < A->ncol; ++k) { - s = 0.; - for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) { - irow = Astore->rowind[i]; - xk = z_abs1( &Xptr[irow] ); - s += z_abs1(&Aval[i]) * xk; - } - rwork[k] += s; - } - } - - for (i = 0; i < A->nrow; ++i) - if (rwork[i] > safe2) - rwork[i] = z_abs(&work[i]) + (iwork[i]+1)*eps*rwork[i]; - else - rwork[i] = z_abs(&work[i])+(iwork[i]+1)*eps*rwork[i]+safe1; - kase = 0; - - do { - zlacon_(&A->nrow, &work[A->nrow], work, - &ferr[j], &kase); - if (kase == 0) break; - - if (kase == 1) { - /* Multiply by diag(W)*inv(op(A)**T)*(diag(C) or diag(R)). */ - if ( notran && colequ ) - for (i = 0; i < A->ncol; ++i) { - zd_mult(&work[i], &work[i], C[i]); - } - else if ( !notran && rowequ ) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&work[i], &work[i], R[i]); - } - - zgstrs (transt, L, U, perm_c, perm_r, &Bjcol, stat, info); - - for (i = 0; i < A->nrow; ++i) { - zd_mult(&work[i], &work[i], rwork[i]); - } - } else { - /* Multiply by (diag(C) or diag(R))*inv(op(A))*diag(W). */ - for (i = 0; i < A->nrow; ++i) { - zd_mult(&work[i], &work[i], rwork[i]); - } - - zgstrs (trans, L, U, perm_c, perm_r, &Bjcol, stat, info); - - if ( notran && colequ ) - for (i = 0; i < A->ncol; ++i) { - zd_mult(&work[i], &work[i], C[i]); - } - else if ( !notran && rowequ ) - for (i = 0; i < A->ncol; ++i) { - zd_mult(&work[i], &work[i], R[i]); - } - } - - } while ( kase != 0 ); - - /* Normalize error. */ - lstres = 0.; - if ( notran && colequ ) { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, C[i] * z_abs1( &Xptr[i]) ); - } else if ( !notran && rowequ ) { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, R[i] * z_abs1( &Xptr[i]) ); - } else { - for (i = 0; i < A->nrow; ++i) - lstres = SUPERLU_MAX( lstres, z_abs1( &Xptr[i]) ); - } - if ( lstres != 0. ) - ferr[j] /= lstres; - - } /* for each RHS j ... */ - - SUPERLU_FREE(work); - SUPERLU_FREE(rwork); - SUPERLU_FREE(iwork); - SUPERLU_FREE(Bjcol.Store); - - return; - -} /* zgsrfs */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgssv.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgssv.c deleted file mode 100644 index aceb10da4d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgssv.c +++ /dev/null @@ -1,227 +0,0 @@ - -/*! @file zgssv.c - * \brief Solves the system of linear equations A*X=B - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * ZGSSV solves the system of linear equations A*X=B, using the
    - * LU factorization from ZGSTRF. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *
    - *      1.1. Permute the columns of A, forming A*Pc, where Pc
    - *           is a permutation matrix. For more details of this step, 
    - *           see sp_preorder.c.
    - *
    - *      1.2. Factor A as Pr*A*Pc=L*U with the permutation Pr determined
    - *           by Gaussian elimination with partial pivoting.
    - *           L is unit lower triangular with offdiagonal entries
    - *           bounded by 1 in magnitude, and U is upper triangular.
    - *
    - *      1.3. Solve the system of equations A*X=B using the factored
    - *           form of A.
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the
    - *      above algorithm to the transpose of A:
    - *
    - *      2.1. Permute columns of transpose(A) (rows of A),
    - *           forming transpose(A)*Pc, where Pc is a permutation matrix. 
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      2.2. Factor A as Pr*transpose(A)*Pc=L*U with the permutation Pr
    - *           determined by Gaussian elimination with partial pivoting.
    - *           L is unit lower triangular with offdiagonal entries
    - *           bounded by 1 in magnitude, and U is upper triangular.
    - *
    - *      2.3. Solve the system of equations A*X=B using the factored
    - *           form of A.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - * 
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed and how the
    - *         system will be solved.
    - *
    - * A       (input) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = SLU_NC or SLU_NR; Dtype = SLU_Z; Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - * perm_c  (input/output) int*
    - *         If A->Stype = SLU_NC, column permutation vector of size A->ncol
    - *         which defines the permutation matrix Pc; perm_c[i] = j means 
    - *         column i of A is in position j in A*Pc.
    - *         If A->Stype = SLU_NR, column permutation vector of size A->nrow
    - *         which describes permutation of columns of transpose(A) 
    - *         (rows of A) as described above.
    - * 
    - *         If options->ColPerm = MY_PERMC or options->Fact = SamePattern or
    - *            options->Fact = SamePattern_SameRowPerm, it is an input argument.
    - *            On exit, perm_c may be overwritten by the product of the input
    - *            perm_c and a permutation that postorders the elimination tree
    - *            of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *            is already in postorder.
    - *         Otherwise, it is an output argument.
    - * 
    - * perm_r  (input/output) int*
    - *         If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *         which defines the permutation matrix Pr, and is determined 
    - *         by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *         position j in Pr*A.
    - *         If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *         determines permutation of rows of transpose(A)
    - *         (columns of A) as described above.
    - *
    - *         If options->RowPerm = MY_PERMR or
    - *            options->Fact = SamePattern_SameRowPerm, perm_r is an
    - *            input argument.
    - *         otherwise it is an output argument.
    - *
    - * L       (output) SuperMatrix*
    - *         The factor L from the factorization 
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses compressed row subscripts storage for supernodes, i.e.,
    - *         L has types: Stype = SLU_SC, Dtype = SLU_Z, Mtype = SLU_TRLU.
    - *         
    - * U       (output) SuperMatrix*
    - *	   The factor U from the factorization 
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_Z, Mtype = SLU_TRU.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_Z, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat   (output) SuperLUStat_t*
    - *        Record the statistics on runtime and floating-point operation count.
    - *        See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *	   = 0: successful exit
    - *         > 0: if info = i, and i is
    - *             <= A->ncol: U(i,i) is exactly zero. The factorization has
    - *                been completed, but the factor U is exactly singular,
    - *                so the solution could not be computed.
    - *             > A->ncol: number of bytes allocated when memory allocation
    - *                failure occurred, plus A->ncol.
    - * 
    - */ - -void -zgssv(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - SuperMatrix *L, SuperMatrix *U, SuperMatrix *B, - SuperLUStat_t *stat, int *info ) -{ - - DNformat *Bstore; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int lwork = 0, *etree, i; - - /* Set default values for some parameters */ - int panel_size; /* panel size */ - int relax; /* no of columns in a relaxed snodes */ - int permc_spec; - trans_t trans = NOTRANS; - double *utime; - double t; /* Temporary time */ - - /* Test the input parameters ... */ - *info = 0; - Bstore = B->Store; - if ( options->Fact != DOFACT ) *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_Z || A->Mtype != SLU_GE ) - *info = -2; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_Z || B->Mtype != SLU_GE ) - *info = -7; - if ( *info != 0 ) { - i = -(*info); - xerbla_("zgssv", &i); - return; - } - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - zCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - trans = TRANS; - } else { - if ( A->Stype == SLU_NC ) AA = A; - } - - t = SuperLU_timer_(); - /* - * Get column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t; - - etree = intMalloc(A->ncol); - - t = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t; - - panel_size = sp_ienv(1); - relax = sp_ienv(2); - - /*printf("Factor PA = LU ... relax %d\tw %d\tmaxsuper %d\trowblk %d\n", - relax, panel_size, sp_ienv(3), sp_ienv(4));*/ - t = SuperLU_timer_(); - /* Compute the LU factorization of A. */ - zgstrf(options, &AC, relax, panel_size, etree, - NULL, lwork, perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t; - - t = SuperLU_timer_(); - if ( *info == 0 ) { - /* Solve the system A*X=B, overwriting B with X. */ - zgstrs (trans, L, U, perm_c, perm_r, B, stat, info); - } - utime[SOLVE] = SuperLU_timer_() - t; - - SUPERLU_FREE (etree); - Destroy_CompCol_Permuted(&AC); - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgssvx.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgssvx.c deleted file mode 100644 index 4681a38870..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgssvx.c +++ /dev/null @@ -1,619 +0,0 @@ - -/*! @file zgssvx.c - * \brief Solves the system of linear equations A*X=B or A'*X=B - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * ZGSSVX solves the system of linear equations A*X=B or A'*X=B, using
    - * the LU factorization from zgstrf(). Error bounds on the solution and
    - * a condition estimate are also provided. It performs the following steps:
    - *
    - *   1. If A is stored column-wise (A->Stype = SLU_NC):
    - *  
    - *      1.1. If options->Equil = YES, scaling factors are computed to
    - *           equilibrate the system:
    - *           options->Trans = NOTRANS:
    - *               diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *           options->Trans = TRANS:
    - *               (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *           options->Trans = CONJ:
    - *               (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *           Whether or not the system will be equilibrated depends on the
    - *           scaling of the matrix A, but if equilibration is used, A is
    - *           overwritten by diag(R)*A*diag(C) and B by diag(R)*B
    - *           (if options->Trans=NOTRANS) or diag(C)*B (if options->Trans
    - *           = TRANS or CONJ).
    - *
    - *      1.2. Permute columns of A, forming A*Pc, where Pc is a permutation
    - *           matrix that usually preserves sparsity.
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      1.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *           factor the matrix A (after equilibration if options->Equil = YES)
    - *           as Pr*A*Pc = L*U, with Pr determined by partial pivoting.
    - *
    - *      1.4. Compute the reciprocal pivot growth factor.
    - *
    - *      1.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *           routine returns with info = i. Otherwise, the factored form of 
    - *           A is used to estimate the condition number of the matrix A. If
    - *           the reciprocal of the condition number is less than machine
    - *           precision, info = A->ncol+1 is returned as a warning, but the
    - *           routine still goes on to solve for X and computes error bounds
    - *           as described below.
    - *
    - *      1.6. The system of equations is solved for X using the factored form
    - *           of A.
    - *
    - *      1.7. If options->IterRefine != NOREFINE, iterative refinement is
    - *           applied to improve the computed solution matrix and calculate
    - *           error bounds and backward error estimates for it.
    - *
    - *      1.8. If equilibration was used, the matrix X is premultiplied by
    - *           diag(C) (if options->Trans = NOTRANS) or diag(R)
    - *           (if options->Trans = TRANS or CONJ) so that it solves the
    - *           original system before equilibration.
    - *
    - *   2. If A is stored row-wise (A->Stype = SLU_NR), apply the above algorithm
    - *      to the transpose of A:
    - *
    - *      2.1. If options->Equil = YES, scaling factors are computed to
    - *           equilibrate the system:
    - *           options->Trans = NOTRANS:
    - *               diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B
    - *           options->Trans = TRANS:
    - *               (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B
    - *           options->Trans = CONJ:
    - *               (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B
    - *           Whether or not the system will be equilibrated depends on the
    - *           scaling of the matrix A, but if equilibration is used, A' is
    - *           overwritten by diag(R)*A'*diag(C) and B by diag(R)*B 
    - *           (if trans='N') or diag(C)*B (if trans = 'T' or 'C').
    - *
    - *      2.2. Permute columns of transpose(A) (rows of A), 
    - *           forming transpose(A)*Pc, where Pc is a permutation matrix that 
    - *           usually preserves sparsity.
    - *           For more details of this step, see sp_preorder.c.
    - *
    - *      2.3. If options->Fact != FACTORED, the LU decomposition is used to
    - *           factor the transpose(A) (after equilibration if 
    - *           options->Fact = YES) as Pr*transpose(A)*Pc = L*U with the
    - *           permutation Pr determined by partial pivoting.
    - *
    - *      2.4. Compute the reciprocal pivot growth factor.
    - *
    - *      2.5. If some U(i,i) = 0, so that U is exactly singular, then the
    - *           routine returns with info = i. Otherwise, the factored form 
    - *           of transpose(A) is used to estimate the condition number of the
    - *           matrix A. If the reciprocal of the condition number
    - *           is less than machine precision, info = A->nrow+1 is returned as
    - *           a warning, but the routine still goes on to solve for X and
    - *           computes error bounds as described below.
    - *
    - *      2.6. The system of equations is solved for X using the factored form
    - *           of transpose(A).
    - *
    - *      2.7. If options->IterRefine != NOREFINE, iterative refinement is
    - *           applied to improve the computed solution matrix and calculate
    - *           error bounds and backward error estimates for it.
    - *
    - *      2.8. If equilibration was used, the matrix X is premultiplied by
    - *           diag(C) (if options->Trans = NOTRANS) or diag(R) 
    - *           (if options->Trans = TRANS or CONJ) so that it solves the
    - *           original system before equilibration.
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed and how the
    - *         system will be solved.
    - *
    - * A       (input/output) SuperMatrix*
    - *         Matrix A in A*X=B, of dimension (A->nrow, A->ncol). The number
    - *         of the linear equations is A->nrow. Currently, the type of A can be:
    - *         Stype = SLU_NC or SLU_NR, Dtype = SLU_D, Mtype = SLU_GE.
    - *         In the future, more general A may be handled.
    - *
    - *         On entry, If options->Fact = FACTORED and equed is not 'N', 
    - *         then A must have been equilibrated by the scaling factors in
    - *         R and/or C.  
    - *         On exit, A is not modified if options->Equil = NO, or if 
    - *         options->Equil = YES but equed = 'N' on exit.
    - *         Otherwise, if options->Equil = YES and equed is not 'N',
    - *         A is scaled as follows:
    - *         If A->Stype = SLU_NC:
    - *           equed = 'R':  A := diag(R) * A
    - *           equed = 'C':  A := A * diag(C)
    - *           equed = 'B':  A := diag(R) * A * diag(C).
    - *         If A->Stype = SLU_NR:
    - *           equed = 'R':  transpose(A) := diag(R) * transpose(A)
    - *           equed = 'C':  transpose(A) := transpose(A) * diag(C)
    - *           equed = 'B':  transpose(A) := diag(R) * transpose(A) * diag(C).
    - *
    - * perm_c  (input/output) int*
    - *	   If A->Stype = SLU_NC, Column permutation vector of size A->ncol,
    - *         which defines the permutation matrix Pc; perm_c[i] = j means
    - *         column i of A is in position j in A*Pc.
    - *         On exit, perm_c may be overwritten by the product of the input
    - *         perm_c and a permutation that postorders the elimination tree
    - *         of Pc'*A'*A*Pc; perm_c is not changed if the elimination tree
    - *         is already in postorder.
    - *
    - *         If A->Stype = SLU_NR, column permutation vector of size A->nrow,
    - *         which describes permutation of columns of transpose(A) 
    - *         (rows of A) as described above.
    - * 
    - * perm_r  (input/output) int*
    - *         If A->Stype = SLU_NC, row permutation vector of size A->nrow, 
    - *         which defines the permutation matrix Pr, and is determined
    - *         by partial pivoting.  perm_r[i] = j means row i of A is in 
    - *         position j in Pr*A.
    - *
    - *         If A->Stype = SLU_NR, permutation vector of size A->ncol, which
    - *         determines permutation of rows of transpose(A)
    - *         (columns of A) as described above.
    - *
    - *         If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *         will try to use the input perm_r, unless a certain threshold
    - *         criterion is violated. In that case, perm_r is overwritten by a
    - *         new permutation determined by partial pivoting or diagonal
    - *         threshold pivoting.
    - *         Otherwise, perm_r is output argument.
    - * 
    - * etree   (input/output) int*,  dimension (A->ncol)
    - *         Elimination tree of Pc'*A'*A*Pc.
    - *         If options->Fact != FACTORED and options->Fact != DOFACT,
    - *         etree is an input argument, otherwise it is an output argument.
    - *         Note: etree is a vector of parent pointers for a forest whose
    - *         vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *
    - * equed   (input/output) char*
    - *         Specifies the form of equilibration that was done.
    - *         = 'N': No equilibration.
    - *         = 'R': Row equilibration, i.e., A was premultiplied by diag(R).
    - *         = 'C': Column equilibration, i.e., A was postmultiplied by diag(C).
    - *         = 'B': Both row and column equilibration, i.e., A was replaced 
    - *                by diag(R)*A*diag(C).
    - *         If options->Fact = FACTORED, equed is an input argument,
    - *         otherwise it is an output argument.
    - *
    - * R       (input/output) double*, dimension (A->nrow)
    - *         The row scale factors for A or transpose(A).
    - *         If equed = 'R' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *             (if A->Stype = SLU_NR) is multiplied on the left by diag(R).
    - *         If equed = 'N' or 'C', R is not accessed.
    - *         If options->Fact = FACTORED, R is an input argument,
    - *             otherwise, R is output.
    - *         If options->zFact = FACTORED and equed = 'R' or 'B', each element
    - *             of R must be positive.
    - * 
    - * C       (input/output) double*, dimension (A->ncol)
    - *         The column scale factors for A or transpose(A).
    - *         If equed = 'C' or 'B', A (if A->Stype = SLU_NC) or transpose(A)
    - *             (if A->Stype = SLU_NR) is multiplied on the right by diag(C).
    - *         If equed = 'N' or 'R', C is not accessed.
    - *         If options->Fact = FACTORED, C is an input argument,
    - *             otherwise, C is output.
    - *         If options->Fact = FACTORED and equed = 'C' or 'B', each element
    - *             of C must be positive.
    - *         
    - * L       (output) SuperMatrix*
    - *	   The factor L from the factorization
    - *             Pr*A*Pc=L*U              (if A->Stype SLU_= NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses compressed row subscripts storage for supernodes, i.e.,
    - *         L has types: Stype = SLU_SC, Dtype = SLU_Z, Mtype = SLU_TRLU.
    - *
    - * U       (output) SuperMatrix*
    - *	   The factor U from the factorization
    - *             Pr*A*Pc=L*U              (if A->Stype = SLU_NC) or
    - *             Pr*transpose(A)*Pc=L*U   (if A->Stype = SLU_NR).
    - *         Uses column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_Z, Mtype = SLU_TRU.
    - *
    - * work    (workspace/output) void*, size (lwork) (in bytes)
    - *         User supplied workspace, should be large enough
    - *         to hold data structures for factors L and U.
    - *         On exit, if fact is not 'F', L and U point to this array.
    - *
    - * lwork   (input) int
    - *         Specifies the size of work array in bytes.
    - *         = 0:  allocate space internally by system malloc;
    - *         > 0:  use user-supplied work array of length lwork in bytes,
    - *               returns error if space runs out.
    - *         = -1: the routine guesses the amount of space needed without
    - *               performing the factorization, and returns it in
    - *               mem_usage->total_needed; no other side effects.
    - *
    - *         See argument 'mem_usage' for memory usage statistics.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_Z, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         If B->ncol = 0, only LU decomposition is performed, the triangular
    - *                         solve is skipped.
    - *         On exit,
    - *            if equed = 'N', B is not modified; otherwise
    - *            if A->Stype = SLU_NC:
    - *               if options->Trans = NOTRANS and equed = 'R' or 'B',
    - *                  B is overwritten by diag(R)*B;
    - *               if options->Trans = TRANS or CONJ and equed = 'C' of 'B',
    - *                  B is overwritten by diag(C)*B;
    - *            if A->Stype = SLU_NR:
    - *               if options->Trans = NOTRANS and equed = 'C' or 'B',
    - *                  B is overwritten by diag(C)*B;
    - *               if options->Trans = TRANS or CONJ and equed = 'R' of 'B',
    - *                  B is overwritten by diag(R)*B.
    - *
    - * X       (output) SuperMatrix*
    - *         X has types: Stype = SLU_DN, Dtype = SLU_Z, Mtype = SLU_GE. 
    - *         If info = 0 or info = A->ncol+1, X contains the solution matrix
    - *         to the original system of equations. Note that A and B are modified
    - *         on exit if equed is not 'N', and the solution to the equilibrated
    - *         system is inv(diag(C))*X if options->Trans = NOTRANS and
    - *         equed = 'C' or 'B', or inv(diag(R))*X if options->Trans = 'T' or 'C'
    - *         and equed = 'R' or 'B'.
    - *
    - * recip_pivot_growth (output) double*
    - *         The reciprocal pivot growth factor max_j( norm(A_j)/norm(U_j) ).
    - *         The infinity norm is used. If recip_pivot_growth is much less
    - *         than 1, the stability of the LU factorization could be poor.
    - *
    - * rcond   (output) double*
    - *         The estimate of the reciprocal condition number of the matrix A
    - *         after equilibration (if done). If rcond is less than the machine
    - *         precision (in particular, if rcond = 0), the matrix is singular
    - *         to working precision. This condition is indicated by a return
    - *         code of info > 0.
    - *
    - * FERR    (output) double*, dimension (B->ncol)   
    - *         The estimated forward error bound for each solution vector   
    - *         X(j) (the j-th column of the solution matrix X).   
    - *         If XTRUE is the true solution corresponding to X(j), FERR(j) 
    - *         is an estimated upper bound for the magnitude of the largest 
    - *         element in (X(j) - XTRUE) divided by the magnitude of the   
    - *         largest element in X(j).  The estimate is as reliable as   
    - *         the estimate for RCOND, and is almost always a slight   
    - *         overestimate of the true error.
    - *         If options->IterRefine = NOREFINE, ferr = 1.0.
    - *
    - * BERR    (output) double*, dimension (B->ncol)
    - *         The componentwise relative backward error of each solution   
    - *         vector X(j) (i.e., the smallest relative change in   
    - *         any element of A or B that makes X(j) an exact solution).
    - *         If options->IterRefine = NOREFINE, berr = 1.0.
    - *
    - * mem_usage (output) mem_usage_t*
    - *         Record the memory usage statistics, consisting of following fields:
    - *         - for_lu (float)
    - *           The amount of space used in bytes for L\U data structures.
    - *         - total_needed (float)
    - *           The amount of space needed in bytes to perform factorization.
    - *         - expansions (int)
    - *           The number of memory expansions during the LU factorization.
    - *
    - * stat   (output) SuperLUStat_t*
    - *        Record the statistics on runtime and floating-point operation count.
    - *        See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - *         = 0: successful exit   
    - *         < 0: if info = -i, the i-th argument had an illegal value   
    - *         > 0: if info = i, and i is   
    - *              <= A->ncol: U(i,i) is exactly zero. The factorization has   
    - *                    been completed, but the factor U is exactly   
    - *                    singular, so the solution and error bounds   
    - *                    could not be computed.   
    - *              = A->ncol+1: U is nonsingular, but RCOND is less than machine
    - *                    precision, meaning that the matrix is singular to
    - *                    working precision. Nevertheless, the solution and
    - *                    error bounds are computed because there are a number
    - *                    of situations where the computed solution can be more
    - *                    accurate than the value of RCOND would suggest.   
    - *              > A->ncol+1: number of bytes allocated when memory allocation
    - *                    failure occurred, plus A->ncol.
    - * 
    - */ - -void -zgssvx(superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r, - int *etree, char *equed, double *R, double *C, - SuperMatrix *L, SuperMatrix *U, void *work, int lwork, - SuperMatrix *B, SuperMatrix *X, double *recip_pivot_growth, - double *rcond, double *ferr, double *berr, - mem_usage_t *mem_usage, SuperLUStat_t *stat, int *info ) -{ - - - DNformat *Bstore, *Xstore; - doublecomplex *Bmat, *Xmat; - int ldb, ldx, nrhs; - SuperMatrix *AA;/* A in SLU_NC format used by the factorization routine.*/ - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int colequ, equil, nofact, notran, rowequ, permc_spec; - trans_t trant; - char norm[1]; - int i, j, info1; - double amax, anorm, bignum, smlnum, colcnd, rowcnd, rcmax, rcmin; - int relax, panel_size; - double diag_pivot_thresh; - double t0; /* temporary time */ - double *utime; - - /* External functions */ - extern double zlangs(char *, SuperMatrix *); - - Bstore = B->Store; - Xstore = X->Store; - Bmat = Bstore->nzval; - Xmat = Xstore->nzval; - ldb = Bstore->lda; - ldx = Xstore->lda; - nrhs = B->ncol; - - *info = 0; - nofact = (options->Fact != FACTORED); - equil = (options->Equil == YES); - notran = (options->Trans == NOTRANS); - if ( nofact ) { - *(unsigned char *)equed = 'N'; - rowequ = FALSE; - colequ = FALSE; - } else { - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - smlnum = dlamch_("Safe minimum"); - bignum = 1. / smlnum; - } - -#if 0 -printf("dgssvx: Fact=%4d, Trans=%4d, equed=%c\n", - options->Fact, options->Trans, *equed); -#endif - - /* Test the input parameters */ - if (!nofact && options->Fact != DOFACT && options->Fact != SamePattern && - options->Fact != SamePattern_SameRowPerm && - !notran && options->Trans != TRANS && options->Trans != CONJ && - !equil && options->Equil != NO) - *info = -1; - else if ( A->nrow != A->ncol || A->nrow < 0 || - (A->Stype != SLU_NC && A->Stype != SLU_NR) || - A->Dtype != SLU_Z || A->Mtype != SLU_GE ) - *info = -2; - else if (options->Fact == FACTORED && - !(rowequ || colequ || lsame_(equed, "N"))) - *info = -6; - else { - if (rowequ) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, R[j]); - rcmax = SUPERLU_MAX(rcmax, R[j]); - } - if (rcmin <= 0.) *info = -7; - else if ( A->nrow > 0) - rowcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else rowcnd = 1.; - } - if (colequ && *info == 0) { - rcmin = bignum; - rcmax = 0.; - for (j = 0; j < A->nrow; ++j) { - rcmin = SUPERLU_MIN(rcmin, C[j]); - rcmax = SUPERLU_MAX(rcmax, C[j]); - } - if (rcmin <= 0.) *info = -8; - else if (A->nrow > 0) - colcnd = SUPERLU_MAX(rcmin,smlnum) / SUPERLU_MIN(rcmax,bignum); - else colcnd = 1.; - } - if (*info == 0) { - if ( lwork < -1 ) *info = -12; - else if ( B->ncol < 0 || Bstore->lda < SUPERLU_MAX(0, A->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_Z || - B->Mtype != SLU_GE ) - *info = -13; - else if ( X->ncol < 0 || Xstore->lda < SUPERLU_MAX(0, A->nrow) || - (B->ncol != 0 && B->ncol != X->ncol) || - X->Stype != SLU_DN || - X->Dtype != SLU_Z || X->Mtype != SLU_GE ) - *info = -14; - } - } - if (*info != 0) { - i = -(*info); - xerbla_("zgssvx", &i); - return; - } - - /* Initialization for factor parameters */ - panel_size = sp_ienv(1); - relax = sp_ienv(2); - diag_pivot_thresh = options->DiagPivotThresh; - - utime = stat->utime; - - /* Convert A to SLU_NC format when necessary. */ - if ( A->Stype == SLU_NR ) { - NRformat *Astore = A->Store; - AA = (SuperMatrix *) SUPERLU_MALLOC( sizeof(SuperMatrix) ); - zCreate_CompCol_Matrix(AA, A->ncol, A->nrow, Astore->nnz, - Astore->nzval, Astore->colind, Astore->rowptr, - SLU_NC, A->Dtype, A->Mtype); - if ( notran ) { /* Reverse the transpose argument. */ - trant = TRANS; - notran = 0; - } else { - trant = NOTRANS; - notran = 1; - } - } else { /* A->Stype == SLU_NC */ - trant = options->Trans; - AA = A; - } - - if ( nofact && equil ) { - t0 = SuperLU_timer_(); - /* Compute row and column scalings to equilibrate the matrix A. */ - zgsequ(AA, R, C, &rowcnd, &colcnd, &amax, &info1); - - if ( info1 == 0 ) { - /* Equilibrate matrix A. */ - zlaqgs(AA, R, C, rowcnd, colcnd, amax, equed); - rowequ = lsame_(equed, "R") || lsame_(equed, "B"); - colequ = lsame_(equed, "C") || lsame_(equed, "B"); - } - utime[EQUIL] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Scale the right hand side if equilibration was performed. */ - if ( notran ) { - if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&Bmat[i+j*ldb], &Bmat[i+j*ldb], R[i]); - } - } - } else if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&Bmat[i+j*ldb], &Bmat[i+j*ldb], C[i]); - } - } - } - - if ( nofact ) { - - t0 = SuperLU_timer_(); - /* - * Gnet column permutation vector perm_c[], according to permc_spec: - * permc_spec = NATURAL: natural ordering - * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A - * permc_spec = MMD_ATA: minimum degree on structure of A'*A - * permc_spec = COLAMD: approximate minimum degree column ordering - * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] - */ - permc_spec = options->ColPerm; - if ( permc_spec != MY_PERMC && options->Fact == DOFACT ) - get_perm_c(permc_spec, AA, perm_c); - utime[COLPERM] = SuperLU_timer_() - t0; - - t0 = SuperLU_timer_(); - sp_preorder(options, AA, perm_c, etree, &AC); - utime[ETREE] = SuperLU_timer_() - t0; - -/* printf("Factor PA = LU ... relax %d\tw %d\tmaxsuper %d\trowblk %d\n", - relax, panel_size, sp_ienv(3), sp_ienv(4)); - fflush(stdout); */ - - /* Compute the LU factorization of A*Pc. */ - t0 = SuperLU_timer_(); - zgstrf(options, &AC, relax, panel_size, etree, - work, lwork, perm_c, perm_r, L, U, stat, info); - utime[FACT] = SuperLU_timer_() - t0; - - if ( lwork == -1 ) { - mem_usage->total_needed = *info - A->ncol; - return; - } - } - - if ( options->PivotGrowth ) { - if ( *info > 0 ) { - if ( *info <= A->ncol ) { - /* Compute the reciprocal pivot growth factor of the leading - rank-deficient *info columns of A. */ - *recip_pivot_growth = zPivotGrowth(*info, AA, perm_c, L, U); - } - return; - } - - /* Compute the reciprocal pivot growth factor *recip_pivot_growth. */ - *recip_pivot_growth = zPivotGrowth(A->ncol, AA, perm_c, L, U); - } - - if ( options->ConditionNumber ) { - /* Estimate the reciprocal of the condition number of A. */ - t0 = SuperLU_timer_(); - if ( notran ) { - *(unsigned char *)norm = '1'; - } else { - *(unsigned char *)norm = 'I'; - } - anorm = zlangs(norm, AA); - zgscon(norm, L, U, anorm, rcond, stat, info); - utime[RCOND] = SuperLU_timer_() - t0; - } - - if ( nrhs > 0 ) { - /* Compute the solution matrix X. */ - for (j = 0; j < nrhs; j++) /* Save a copy of the right hand sides */ - for (i = 0; i < B->nrow; i++) - Xmat[i + j*ldx] = Bmat[i + j*ldb]; - - t0 = SuperLU_timer_(); - zgstrs (trant, L, U, perm_c, perm_r, X, stat, info); - utime[SOLVE] = SuperLU_timer_() - t0; - - /* Use iterative refinement to improve the computed solution and compute - error bounds and backward error estimates for it. */ - t0 = SuperLU_timer_(); - if ( options->IterRefine != NOREFINE ) { - zgsrfs(trant, AA, L, U, perm_c, perm_r, equed, R, C, B, - X, ferr, berr, stat, info); - } else { - for (j = 0; j < nrhs; ++j) ferr[j] = berr[j] = 1.0; - } - utime[REFINE] = SuperLU_timer_() - t0; - - /* Transform the solution matrix X to a solution of the original system. */ - if ( notran ) { - if ( colequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&Xmat[i+j*ldx], &Xmat[i+j*ldx], C[i]); - } - } - } else if ( rowequ ) { - for (j = 0; j < nrhs; ++j) - for (i = 0; i < A->nrow; ++i) { - zd_mult(&Xmat[i+j*ldx], &Xmat[i+j*ldx], R[i]); - } - } - } /* end if nrhs > 0 */ - - if ( options->ConditionNumber ) { - /* Set INFO = A->ncol+1 if the matrix is singular to working precision. */ - if ( *rcond < dlamch_("E") ) *info = A->ncol + 1; - } - - if ( nofact ) { - zQuerySpace(L, U, mem_usage); - Destroy_CompCol_Permuted(&AC); - } - if ( A->Stype == SLU_NR ) { - Destroy_SuperMatrix_Store(AA); - SUPERLU_FREE(AA); - } - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgstrf.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgstrf.c deleted file mode 100644 index e3c22f0081..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgstrf.c +++ /dev/null @@ -1,436 +0,0 @@ - -/*! @file zgstrf.c - * \brief Computes an LU factorization of a general sparse matrix - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * ZGSTRF computes an LU factorization of a general sparse m-by-n
    - * matrix A using partial pivoting with row interchanges.
    - * The factorization has the form
    - *     Pr * A = L * U
    - * where Pr is a row permutation matrix, L is lower triangular with unit
    - * diagonal elements (lower trapezoidal if A->nrow > A->ncol), and U is upper 
    - * triangular (upper trapezoidal if A->nrow < A->ncol).
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * options (input) superlu_options_t*
    - *         The structure defines the input parameters to control
    - *         how the LU decomposition will be performed.
    - *
    - * A        (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *          (A->nrow, A->ncol). The type of A can be:
    - *          Stype = SLU_NCP; Dtype = SLU_Z; Mtype = SLU_GE.
    - *
    - * relax    (input) int
    - *          To control degree of relaxing supernodes. If the number
    - *          of nodes (columns) in a subtree of the elimination tree is less
    - *          than relax, this subtree is considered as one supernode,
    - *          regardless of the row structures of those columns.
    - *
    - * panel_size (input) int
    - *          A panel consists of at most panel_size consecutive columns.
    - *
    - * etree    (input) int*, dimension (A->ncol)
    - *          Elimination tree of A'*A.
    - *          Note: etree is a vector of parent pointers for a forest whose
    - *          vertices are the integers 0 to A->ncol-1; etree[root]==A->ncol.
    - *          On input, the columns of A should be permuted so that the
    - *          etree is in a certain postorder.
    - *
    - * work     (input/output) void*, size (lwork) (in bytes)
    - *          User-supplied work space and space for the output data structures.
    - *          Not referenced if lwork = 0;
    - *
    - * lwork   (input) int
    - *         Specifies the size of work array in bytes.
    - *         = 0:  allocate space internally by system malloc;
    - *         > 0:  use user-supplied work array of length lwork in bytes,
    - *               returns error if space runs out.
    - *         = -1: the routine guesses the amount of space needed without
    - *               performing the factorization, and returns it in
    - *               *info; no other side effects.
    - *
    - * perm_c   (input) int*, dimension (A->ncol)
    - *	    Column permutation vector, which defines the 
    - *          permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *          in position j in A*Pc.
    - *          When searching for diagonal, perm_c[*] is applied to the
    - *          row subscripts of A, so that diagonal threshold pivoting
    - *          can find the diagonal of A, rather than that of A*Pc.
    - *
    - * perm_r   (input/output) int*, dimension (A->nrow)
    - *          Row permutation vector which defines the permutation matrix Pr,
    - *          perm_r[i] = j means row i of A is in position j in Pr*A.
    - *          If options->Fact = SamePattern_SameRowPerm, the pivoting routine
    - *             will try to use the input perm_r, unless a certain threshold
    - *             criterion is violated. In that case, perm_r is overwritten by
    - *             a new permutation determined by partial pivoting or diagonal
    - *             threshold pivoting.
    - *          Otherwise, perm_r is output argument;
    - *
    - * L        (output) SuperMatrix*
    - *          The factor L from the factorization Pr*A=L*U; use compressed row 
    - *          subscripts storage for supernodes, i.e., L has type: 
    - *          Stype = SLU_SC, Dtype = SLU_Z, Mtype = SLU_TRLU.
    - *
    - * U        (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *          storage scheme, i.e., U has types: Stype = SLU_NC, 
    - *          Dtype = SLU_Z, Mtype = SLU_TRU.
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See slu_util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info     (output) int*
    - *          = 0: successful exit
    - *          < 0: if info = -i, the i-th argument had an illegal value
    - *          > 0: if info = i, and i is
    - *             <= A->ncol: U(i,i) is exactly zero. The factorization has
    - *                been completed, but the factor U is exactly singular,
    - *                and division by zero will occur if it is used to solve a
    - *                system of equations.
    - *             > A->ncol: number of bytes allocated when memory allocation
    - *                failure occurred, plus A->ncol. If lwork = -1, it is
    - *                the estimated amount of space needed, plus A->ncol.
    - *
    - * ======================================================================
    - *
    - * Local Working Arrays: 
    - * ======================
    - *   m = number of rows in the matrix
    - *   n = number of columns in the matrix
    - *
    - *   xprune[0:n-1]: xprune[*] points to locations in subscript 
    - *	vector lsub[*]. For column i, xprune[i] denotes the point where 
    - *	structural pruning begins. I.e. only xlsub[i],..,xprune[i]-1 need 
    - *	to be traversed for symbolic factorization.
    - *
    - *   marker[0:3*m-1]: marker[i] = j means that node i has been 
    - *	reached when working on column j.
    - *	Storage: relative to original row subscripts
    - *	NOTE: There are 3 of them: marker/marker1 are used for panel dfs, 
    - *	      see zpanel_dfs.c; marker2 is used for inner-factorization,
    - *            see zcolumn_dfs.c.
    - *
    - *   parent[0:m-1]: parent vector used during dfs
    - *      Storage: relative to new row subscripts
    - *
    - *   xplore[0:m-1]: xplore[i] gives the location of the next (dfs) 
    - *	unexplored neighbor of i in lsub[*]
    - *
    - *   segrep[0:nseg-1]: contains the list of supernodal representatives
    - *	in topological order of the dfs. A supernode representative is the 
    - *	last column of a supernode.
    - *      The maximum size of segrep[] is n.
    - *
    - *   repfnz[0:W*m-1]: for a nonzero segment U[*,j] that ends at a 
    - *	supernodal representative r, repfnz[r] is the location of the first 
    - *	nonzero in this segment.  It is also used during the dfs: repfnz[r]>0
    - *	indicates the supernode r has been explored.
    - *	NOTE: There are W of them, each used for one column of a panel. 
    - *
    - *   panel_lsub[0:W*m-1]: temporary for the nonzeros row indices below 
    - *      the panel diagonal. These are filled in during zpanel_dfs(), and are
    - *      used later in the inner LU factorization within the panel.
    - *	panel_lsub[]/dense[] pair forms the SPA data structure.
    - *	NOTE: There are W of them.
    - *
    - *   dense[0:W*m-1]: sparse accumulating (SPA) vector for intermediate values;
    - *	    	   NOTE: there are W of them.
    - *
    - *   tempv[0:*]: real temporary used for dense numeric kernels;
    - *	The size of this array is defined by NUM_TEMPV() in slu_zdefs.h.
    - * 
    - */ - -void -zgstrf (superlu_options_t *options, SuperMatrix *A, - int relax, int panel_size, int *etree, void *work, int lwork, - int *perm_c, int *perm_r, SuperMatrix *L, SuperMatrix *U, - SuperLUStat_t *stat, int *info) -{ - /* Local working arrays */ - NCPformat *Astore; - int *iperm_r = NULL; /* inverse of perm_r; used when - options->Fact == SamePattern_SameRowPerm */ - int *iperm_c; /* inverse of perm_c */ - int *iwork; - doublecomplex *zwork; - int *segrep, *repfnz, *parent, *xplore; - int *panel_lsub; /* dense[]/panel_lsub[] pair forms a w-wide SPA */ - int *xprune; - int *marker; - doublecomplex *dense, *tempv; - int *relax_end; - doublecomplex *a; - int *asub; - int *xa_begin, *xa_end; - int *xsup, *supno; - int *xlsub, *xlusup, *xusub; - int nzlumax; - double fill_ratio = sp_ienv(6); /* estimated fill ratio */ - static GlobalLU_t Glu; /* persistent to facilitate multiple factors. */ - - /* Local scalars */ - fact_t fact = options->Fact; - double diag_pivot_thresh = options->DiagPivotThresh; - int pivrow; /* pivotal row number in the original matrix A */ - int nseg1; /* no of segments in U-column above panel row jcol */ - int nseg; /* no of segments in each U-column */ - register int jcol; - register int kcol; /* end column of a relaxed snode */ - register int icol; - register int i, k, jj, new_next, iinfo; - int m, n, min_mn, jsupno, fsupc, nextlu, nextu; - int w_def; /* upper bound on panel width */ - int usepr, iperm_r_allocated = 0; - int nnzL, nnzU; - int *panel_histo = stat->panel_histo; - flops_t *ops = stat->ops; - - iinfo = 0; - m = A->nrow; - n = A->ncol; - min_mn = SUPERLU_MIN(m, n); - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - - /* Allocate storage common to the factor routines */ - *info = zLUMemInit(fact, work, lwork, m, n, Astore->nnz, - panel_size, fill_ratio, L, U, &Glu, &iwork, &zwork); - if ( *info ) return; - - xsup = Glu.xsup; - supno = Glu.supno; - xlsub = Glu.xlsub; - xlusup = Glu.xlusup; - xusub = Glu.xusub; - - SetIWork(m, n, panel_size, iwork, &segrep, &parent, &xplore, - &repfnz, &panel_lsub, &xprune, &marker); - zSetRWork(m, panel_size, zwork, &dense, &tempv); - - usepr = (fact == SamePattern_SameRowPerm); - if ( usepr ) { - /* Compute the inverse of perm_r */ - iperm_r = (int *) intMalloc(m); - for (k = 0; k < m; ++k) iperm_r[perm_r[k]] = k; - iperm_r_allocated = 1; - } - iperm_c = (int *) intMalloc(n); - for (k = 0; k < n; ++k) iperm_c[perm_c[k]] = k; - - /* Identify relaxed snodes */ - relax_end = (int *) intMalloc(n); - if ( options->SymmetricMode == YES ) { - heap_relax_snode(n, etree, relax, marker, relax_end); - } else { - relax_snode(n, etree, relax, marker, relax_end); - } - - ifill (perm_r, m, EMPTY); - ifill (marker, m * NO_MARKER, EMPTY); - supno[0] = -1; - xsup[0] = xlsub[0] = xusub[0] = xlusup[0] = 0; - w_def = panel_size; - - /* - * Work on one "panel" at a time. A panel is one of the following: - * (a) a relaxed supernode at the bottom of the etree, or - * (b) panel_size contiguous columns, defined by the user - */ - for (jcol = 0; jcol < min_mn; ) { - - if ( relax_end[jcol] != EMPTY ) { /* start of a relaxed snode */ - kcol = relax_end[jcol]; /* end of the relaxed snode */ - panel_histo[kcol-jcol+1]++; - - /* -------------------------------------- - * Factorize the relaxed supernode(jcol:kcol) - * -------------------------------------- */ - /* Determine the union of the row structure of the snode */ - if ( (*info = zsnode_dfs(jcol, kcol, asub, xa_begin, xa_end, - xprune, marker, &Glu)) != 0 ) - return; - - nextu = xusub[jcol]; - nextlu = xlusup[jcol]; - jsupno = supno[jcol]; - fsupc = xsup[jsupno]; - new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1); - nzlumax = Glu.nzlumax; - while ( new_next > nzlumax ) { - if ( (*info = zLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu)) ) - return; - } - - for (icol = jcol; icol<= kcol; icol++) { - xusub[icol+1] = nextu; - - /* Scatter into SPA dense[*] */ - for (k = xa_begin[icol]; k < xa_end[icol]; k++) - dense[asub[k]] = a[k]; - - /* Numeric update within the snode */ - zsnode_bmod(icol, jsupno, fsupc, dense, tempv, &Glu, stat); - - if ( (*info = zpivotL(icol, diag_pivot_thresh, &usepr, perm_r, - iperm_r, iperm_c, &pivrow, &Glu, stat)) ) - if ( iinfo == 0 ) iinfo = *info; - -#ifdef DEBUG - zprint_lu_col("[1]: ", icol, pivrow, xprune, &Glu); -#endif - - } - - jcol = icol; - - } else { /* Work on one panel of panel_size columns */ - - /* Adjust panel_size so that a panel won't overlap with the next - * relaxed snode. - */ - panel_size = w_def; - for (k = jcol + 1; k < SUPERLU_MIN(jcol+panel_size, min_mn); k++) - if ( relax_end[k] != EMPTY ) { - panel_size = k - jcol; - break; - } - if ( k == min_mn ) panel_size = min_mn - jcol; - panel_histo[panel_size]++; - - /* symbolic factor on a panel of columns */ - zpanel_dfs(m, panel_size, jcol, A, perm_r, &nseg1, - dense, panel_lsub, segrep, repfnz, xprune, - marker, parent, xplore, &Glu); - - /* numeric sup-panel updates in topological order */ - zpanel_bmod(m, panel_size, jcol, nseg1, dense, - tempv, segrep, repfnz, &Glu, stat); - - /* Sparse LU within the panel, and below panel diagonal */ - for ( jj = jcol; jj < jcol + panel_size; jj++) { - k = (jj - jcol) * m; /* column index for w-wide arrays */ - - nseg = nseg1; /* Begin after all the panel segments */ - - if ((*info = zcolumn_dfs(m, jj, perm_r, &nseg, &panel_lsub[k], - segrep, &repfnz[k], xprune, marker, - parent, xplore, &Glu)) != 0) return; - - /* Numeric updates */ - if ((*info = zcolumn_bmod(jj, (nseg - nseg1), &dense[k], - tempv, &segrep[nseg1], &repfnz[k], - jcol, &Glu, stat)) != 0) return; - - /* Copy the U-segments to ucol[*] */ - if ((*info = zcopy_to_ucol(jj, nseg, segrep, &repfnz[k], - perm_r, &dense[k], &Glu)) != 0) - return; - - if ( (*info = zpivotL(jj, diag_pivot_thresh, &usepr, perm_r, - iperm_r, iperm_c, &pivrow, &Glu, stat)) ) - if ( iinfo == 0 ) iinfo = *info; - - /* Prune columns (0:jj-1) using column jj */ - zpruneL(jj, perm_r, pivrow, nseg, segrep, - &repfnz[k], xprune, &Glu); - - /* Reset repfnz[] for this column */ - resetrep_col (nseg, segrep, &repfnz[k]); - -#ifdef DEBUG - zprint_lu_col("[2]: ", jj, pivrow, xprune, &Glu); -#endif - - } - - jcol += panel_size; /* Move to the next panel */ - - } /* else */ - - } /* for */ - - *info = iinfo; - - if ( m > n ) { - k = 0; - for (i = 0; i < m; ++i) - if ( perm_r[i] == EMPTY ) { - perm_r[i] = n + k; - ++k; - } - } - - countnz(min_mn, xprune, &nnzL, &nnzU, &Glu); - fixupL(min_mn, perm_r, &Glu); - - zLUWorkFree(iwork, zwork, &Glu); /* Free work space and compress storage */ - - if ( fact == SamePattern_SameRowPerm ) { - /* L and U structures may have changed due to possibly different - pivoting, even though the storage is available. - There could also be memory expansions, so the array locations - may have changed, */ - ((SCformat *)L->Store)->nnz = nnzL; - ((SCformat *)L->Store)->nsuper = Glu.supno[n]; - ((SCformat *)L->Store)->nzval = Glu.lusup; - ((SCformat *)L->Store)->nzval_colptr = Glu.xlusup; - ((SCformat *)L->Store)->rowind = Glu.lsub; - ((SCformat *)L->Store)->rowind_colptr = Glu.xlsub; - ((NCformat *)U->Store)->nnz = nnzU; - ((NCformat *)U->Store)->nzval = Glu.ucol; - ((NCformat *)U->Store)->rowind = Glu.usub; - ((NCformat *)U->Store)->colptr = Glu.xusub; - } else { - zCreate_SuperNode_Matrix(L, A->nrow, min_mn, nnzL, Glu.lusup, - Glu.xlusup, Glu.lsub, Glu.xlsub, Glu.supno, - Glu.xsup, SLU_SC, SLU_Z, SLU_TRLU); - zCreate_CompCol_Matrix(U, min_mn, min_mn, nnzU, Glu.ucol, - Glu.usub, Glu.xusub, SLU_NC, SLU_Z, SLU_TRU); - } - - ops[FACT] += ops[TRSV] + ops[GEMV]; - stat->expansions = --(Glu.num_expansions); - - if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r); - SUPERLU_FREE (iperm_c); - SUPERLU_FREE (relax_end); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgstrs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgstrs.c deleted file mode 100644 index 0a86e604cd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zgstrs.c +++ /dev/null @@ -1,350 +0,0 @@ - -/*! @file zgstrs.c - * \brief Solves a system using LU factorization - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - *
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - -#include "slu_zdefs.h" - - -/* - * Function prototypes - */ -void zusolve(int, int, doublecomplex*, doublecomplex*); -void zlsolve(int, int, doublecomplex*, doublecomplex*); -void zmatvec(int, int, int, doublecomplex*, doublecomplex*, doublecomplex*); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * ZGSTRS solves a system of linear equations A*X=B or A'*X=B
    - * with A sparse and B dense, using the LU factorization computed by
    - * ZGSTRF.
    - *
    - * See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - * Arguments
    - * =========
    - *
    - * trans   (input) trans_t
    - *          Specifies the form of the system of equations:
    - *          = NOTRANS: A * X = B  (No transpose)
    - *          = TRANS:   A'* X = B  (Transpose)
    - *          = CONJ:    A**H * X = B  (Conjugate transpose)
    - *
    - * L       (input) SuperMatrix*
    - *         The factor L from the factorization Pr*A*Pc=L*U as computed by
    - *         zgstrf(). Use compressed row subscripts storage for supernodes,
    - *         i.e., L has types: Stype = SLU_SC, Dtype = SLU_Z, Mtype = SLU_TRLU.
    - *
    - * U       (input) SuperMatrix*
    - *         The factor U from the factorization Pr*A*Pc=L*U as computed by
    - *         zgstrf(). Use column-wise storage scheme, i.e., U has types:
    - *         Stype = SLU_NC, Dtype = SLU_Z, Mtype = SLU_TRU.
    - *
    - * perm_c  (input) int*, dimension (L->ncol)
    - *	   Column permutation vector, which defines the 
    - *         permutation matrix Pc; perm_c[i] = j means column i of A is 
    - *         in position j in A*Pc.
    - *
    - * perm_r  (input) int*, dimension (L->nrow)
    - *         Row permutation vector, which defines the permutation matrix Pr; 
    - *         perm_r[i] = j means row i of A is in position j in Pr*A.
    - *
    - * B       (input/output) SuperMatrix*
    - *         B has types: Stype = SLU_DN, Dtype = SLU_Z, Mtype = SLU_GE.
    - *         On entry, the right hand side matrix.
    - *         On exit, the solution matrix if info = 0;
    - *
    - * stat     (output) SuperLUStat_t*
    - *          Record the statistics on runtime and floating-point operation count.
    - *          See util.h for the definition of 'SuperLUStat_t'.
    - *
    - * info    (output) int*
    - * 	   = 0: successful exit
    - *	   < 0: if info = -i, the i-th argument had an illegal value
    - * 
    - */ - -void -zgstrs (trans_t trans, SuperMatrix *L, SuperMatrix *U, - int *perm_c, int *perm_r, SuperMatrix *B, - SuperLUStat_t *stat, int *info) -{ - -#ifdef _CRAY - _fcd ftcs1, ftcs2, ftcs3, ftcs4; -#endif - int incx = 1, incy = 1; -#ifdef USE_VENDOR_BLAS - doublecomplex alpha = {1.0, 0.0}, beta = {1.0, 0.0}; - doublecomplex *work_col; -#endif - doublecomplex temp_comp; - DNformat *Bstore; - doublecomplex *Bmat; - SCformat *Lstore; - NCformat *Ustore; - doublecomplex *Lval, *Uval; - int fsupc, nrow, nsupr, nsupc, luptr, istart, irow; - int i, j, k, iptr, jcol, n, ldb, nrhs; - doublecomplex *work, *rhs_work, *soln; - flops_t solve_ops; - void zprint_soln(); - - /* Test input parameters ... */ - *info = 0; - Bstore = B->Store; - ldb = Bstore->lda; - nrhs = B->ncol; - if ( trans != NOTRANS && trans != TRANS && trans != CONJ ) *info = -1; - else if ( L->nrow != L->ncol || L->nrow < 0 || - L->Stype != SLU_SC || L->Dtype != SLU_Z || L->Mtype != SLU_TRLU ) - *info = -2; - else if ( U->nrow != U->ncol || U->nrow < 0 || - U->Stype != SLU_NC || U->Dtype != SLU_Z || U->Mtype != SLU_TRU ) - *info = -3; - else if ( ldb < SUPERLU_MAX(0, L->nrow) || - B->Stype != SLU_DN || B->Dtype != SLU_Z || B->Mtype != SLU_GE ) - *info = -6; - if ( *info ) { - i = -(*info); - xerbla_("zgstrs", &i); - return; - } - - n = L->nrow; - work = doublecomplexCalloc(n * nrhs); - if ( !work ) ABORT("Malloc fails for local work[]."); - soln = doublecomplexMalloc(n); - if ( !soln ) ABORT("Malloc fails for local soln[]."); - - Bmat = Bstore->nzval; - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( trans == NOTRANS ) { - /* Permute right hand sides to form Pr*B */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_r[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - /* Forward solve PLy=Pb. */ - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - nrow = nsupr - nsupc; - - solve_ops += 4 * nsupc * (nsupc - 1) * nrhs; - solve_ops += 8 * nrow * nsupc * nrhs; - - if ( nsupc == 1 ) { - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - luptr = L_NZ_START(fsupc); - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); iptr++){ - irow = L_SUB(iptr); - ++luptr; - zz_mult(&temp_comp, &rhs_work[fsupc], &Lval[luptr]); - z_sub(&rhs_work[irow], &rhs_work[irow], &temp_comp); - } - } - } else { - luptr = L_NZ_START(fsupc); -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("N", strlen("N")); - ftcs3 = _cptofcd("U", strlen("U")); - CTRSM( ftcs1, ftcs1, ftcs2, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - CGEMM( ftcs2, ftcs2, &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#else - ztrsm_("L", "L", "N", "U", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); - - zgemm_( "N", "N", &nrow, &nrhs, &nsupc, &alpha, - &Lval[luptr+nsupc], &nsupr, &Bmat[fsupc], &ldb, - &beta, &work[0], &n ); -#endif - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - work_col = &work[j*n]; - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - z_sub(&rhs_work[irow], &rhs_work[irow], &work_col[i]); - work_col[i].r = 0.0; - work_col[i].i = 0.0; - iptr++; - } - } -#else - for (j = 0; j < nrhs; j++) { - rhs_work = &Bmat[j*ldb]; - zlsolve (nsupr, nsupc, &Lval[luptr], &rhs_work[fsupc]); - zmatvec (nsupr, nrow, nsupc, &Lval[luptr+nsupc], - &rhs_work[fsupc], &work[0] ); - - iptr = istart + nsupc; - for (i = 0; i < nrow; i++) { - irow = L_SUB(iptr); - z_sub(&rhs_work[irow], &rhs_work[irow], &work[i]); - work[i].r = 0.; - work[i].i = 0.; - iptr++; - } - } -#endif - } /* else ... */ - } /* for L-solve */ - -#ifdef DEBUG - printf("After L-solve: y=\n"); - zprint_soln(n, nrhs, Bmat); -#endif - - /* - * Back solve Ux=y. - */ - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += 4 * nsupc * (nsupc + 1) * nrhs; - - if ( nsupc == 1 ) { - rhs_work = &Bmat[0]; - for (j = 0; j < nrhs; j++) { - z_div(&rhs_work[fsupc], &rhs_work[fsupc], &Lval[luptr]); - rhs_work += ldb; - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("U", strlen("U")); - ftcs3 = _cptofcd("N", strlen("N")); - CTRSM( ftcs1, ftcs2, ftcs3, ftcs3, &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#else - ztrsm_("L", "U", "N", "N", &nsupc, &nrhs, &alpha, - &Lval[luptr], &nsupr, &Bmat[fsupc], &ldb); -#endif -#else - for (j = 0; j < nrhs; j++) - zusolve ( nsupr, nsupc, &Lval[luptr], &Bmat[fsupc+j*ldb] ); -#endif - } - - for (j = 0; j < nrhs; ++j) { - rhs_work = &Bmat[j*ldb]; - for (jcol = fsupc; jcol < fsupc + nsupc; jcol++) { - solve_ops += 8*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++ ){ - irow = U_SUB(i); - zz_mult(&temp_comp, &rhs_work[jcol], &Uval[i]); - z_sub(&rhs_work[irow], &rhs_work[irow], &temp_comp); - } - } - } - - } /* for U-solve */ - -#ifdef DEBUG - printf("After U-solve: x=\n"); - zprint_soln(n, nrhs, Bmat); -#endif - - /* Compute the final solution X := Pc*X. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_c[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = solve_ops; - - } else { /* Solve A'*X=B or CONJ(A)*X=B */ - /* Permute right hand sides to form Pc'*B. */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[perm_c[k]] = rhs_work[k]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - stat->ops[SOLVE] = 0; - if (trans == TRANS) { - for (k = 0; k < nrhs; ++k) { - /* Multiply by inv(U'). */ - sp_ztrsv("U", "T", "N", L, U, &Bmat[k*ldb], stat, info); - - /* Multiply by inv(L'). */ - sp_ztrsv("L", "T", "U", L, U, &Bmat[k*ldb], stat, info); - } - } else { /* trans == CONJ */ - for (k = 0; k < nrhs; ++k) { - /* Multiply by conj(inv(U')). */ - sp_ztrsv("U", "C", "N", L, U, &Bmat[k*ldb], stat, info); - - /* Multiply by conj(inv(L')). */ - sp_ztrsv("L", "C", "U", L, U, &Bmat[k*ldb], stat, info); - } - } - /* Compute the final solution X := Pr'*X (=inv(Pr)*X) */ - for (i = 0; i < nrhs; i++) { - rhs_work = &Bmat[i*ldb]; - for (k = 0; k < n; k++) soln[k] = rhs_work[perm_r[k]]; - for (k = 0; k < n; k++) rhs_work[k] = soln[k]; - } - - } - - SUPERLU_FREE(work); - SUPERLU_FREE(soln); -} - -/* - * Diagnostic print of the solution vector - */ -void -zprint_soln(int n, int nrhs, doublecomplex *soln) -{ - int i; - - for (i = 0; i < n; i++) - printf("\t%d: %.4f\n", i, soln[i]); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlacon.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlacon.c deleted file mode 100644 index b2cd1ede74..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlacon.c +++ /dev/null @@ -1,221 +0,0 @@ - -/*! @file zlacon.c - * \brief Estimates the 1-norm - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -#include -#include "slu_Cnames.h" -#include "slu_dcomplex.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   ZLACON estimates the 1-norm of a square matrix A.   
    - *   Reverse communication is used for evaluating matrix-vector products. 
    - * 
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   N      (input) INT
    - *          The order of the matrix.  N >= 1.   
    - *
    - *   V      (workspace) DOUBLE COMPLEX PRECISION array, dimension (N)   
    - *          On the final return, V = A*W,  where  EST = norm(V)/norm(W)   
    - *          (W is not returned).   
    - *
    - *   X      (input/output) DOUBLE COMPLEX PRECISION array, dimension (N)   
    - *          On an intermediate return, X should be overwritten by   
    - *                A * X,   if KASE=1,   
    - *                A' * X,  if KASE=2,
    - *          where A' is the conjugate transpose of A,
    - *         and ZLACON must be re-called with all the other parameters   
    - *          unchanged.   
    - *
    - *
    - *   EST    (output) DOUBLE PRECISION   
    - *          An estimate (a lower bound) for norm(A).   
    - *
    - *   KASE   (input/output) INT
    - *          On the initial call to ZLACON, KASE should be 0.   
    - *          On an intermediate return, KASE will be 1 or 2, indicating   
    - *          whether X should be overwritten by A * X  or A' * X.   
    - *          On the final return from ZLACON, KASE will again be 0.   
    - *
    - *   Further Details   
    - *   ======= =======   
    - *
    - *   Contributed by Nick Higham, University of Manchester.   
    - *   Originally named CONEST, dated March 16, 1988.   
    - *
    - *   Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of 
    - *   a real or complex matrix, with applications to condition estimation", 
    - *   ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988.   
    - *   ===================================================================== 
    - * 
    - */ - -int -zlacon_(int *n, doublecomplex *v, doublecomplex *x, double *est, int *kase) - -{ - - - /* Table of constant values */ - int c__1 = 1; - doublecomplex zero = {0.0, 0.0}; - doublecomplex one = {1.0, 0.0}; - - /* System generated locals */ - double d__1; - - /* Local variables */ - static int iter; - static int jump, jlast; - static double altsgn, estold; - static int i, j; - double temp; - double safmin; - extern double dlamch_(char *); - extern int izmax1_(int *, doublecomplex *, int *); - extern double dzsum1_(int *, doublecomplex *, int *); - - safmin = dlamch_("Safe minimum"); - if ( *kase == 0 ) { - for (i = 0; i < *n; ++i) { - x[i].r = 1. / (double) (*n); - x[i].i = 0.; - } - *kase = 1; - jump = 1; - return 0; - } - - switch (jump) { - case 1: goto L20; - case 2: goto L40; - case 3: goto L70; - case 4: goto L110; - case 5: goto L140; - } - - /* ................ ENTRY (JUMP = 1) - FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */ - L20: - if (*n == 1) { - v[0] = x[0]; - *est = z_abs(&v[0]); - /* ... QUIT */ - goto L150; - } - *est = dzsum1_(n, x, &c__1); - - for (i = 0; i < *n; ++i) { - d__1 = z_abs(&x[i]); - if (d__1 > safmin) { - d__1 = 1 / d__1; - x[i].r *= d__1; - x[i].i *= d__1; - } else { - x[i] = one; - } - } - *kase = 2; - jump = 2; - return 0; - - /* ................ ENTRY (JUMP = 2) - FIRST ITERATION. X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */ -L40: - j = izmax1_(n, &x[0], &c__1); - --j; - iter = 2; - - /* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */ -L50: - for (i = 0; i < *n; ++i) x[i] = zero; - x[j] = one; - *kase = 1; - jump = 3; - return 0; - - /* ................ ENTRY (JUMP = 3) - X HAS BEEN OVERWRITTEN BY A*X. */ -L70: -#ifdef _CRAY - CCOPY(n, x, &c__1, v, &c__1); -#else - zcopy_(n, x, &c__1, v, &c__1); -#endif - estold = *est; - *est = dzsum1_(n, v, &c__1); - - -L90: - /* TEST FOR CYCLING. */ - if (*est <= estold) goto L120; - - for (i = 0; i < *n; ++i) { - d__1 = z_abs(&x[i]); - if (d__1 > safmin) { - d__1 = 1 / d__1; - x[i].r *= d__1; - x[i].i *= d__1; - } else { - x[i] = one; - } - } - *kase = 2; - jump = 4; - return 0; - - /* ................ ENTRY (JUMP = 4) - X HAS BEEN OVERWRITTEN BY TRANDPOSE(A)*X. */ -L110: - jlast = j; - j = izmax1_(n, &x[0], &c__1); - --j; - if (x[jlast].r != (d__1 = x[j].r, fabs(d__1)) && iter < 5) { - ++iter; - goto L50; - } - - /* ITERATION COMPLETE. FINAL STAGE. */ -L120: - altsgn = 1.; - for (i = 1; i <= *n; ++i) { - x[i-1].r = altsgn * ((double)(i - 1) / (double)(*n - 1) + 1.); - x[i-1].i = 0.; - altsgn = -altsgn; - } - *kase = 1; - jump = 5; - return 0; - - /* ................ ENTRY (JUMP = 5) - X HAS BEEN OVERWRITTEN BY A*X. */ -L140: - temp = dzsum1_(n, x, &c__1) / (double)(*n * 3) * 2.; - if (temp > *est) { -#ifdef _CRAY - CCOPY(n, &x[0], &c__1, &v[0], &c__1); -#else - zcopy_(n, &x[0], &c__1, &v[0], &c__1); -#endif - *est = temp; - } - -L150: - *kase = 0; - return 0; - -} /* zlacon_ */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlangs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlangs.c deleted file mode 100644 index b86ddaa8f5..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlangs.c +++ /dev/null @@ -1,119 +0,0 @@ - -/*! @file zlangs.c - * \brief Returns the value of the one norm - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Modified from lapack routine ZLANGE 
    - * 
    - */ -/* - * File name: zlangs.c - * History: Modified from lapack routine ZLANGE - */ -#include -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - *
    - *   ZLANGS returns the value of the one norm, or the Frobenius norm, or 
    - *   the infinity norm, or the element of largest absolute value of a 
    - *   real matrix A.   
    - *
    - *   Description   
    - *   ===========   
    - *
    - *   ZLANGE returns the value   
    - *
    - *      ZLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'   
    - *               (   
    - *               ( norm1(A),         NORM = '1', 'O' or 'o'   
    - *               (   
    - *               ( normI(A),         NORM = 'I' or 'i'   
    - *               (   
    - *               ( normF(A),         NORM = 'F', 'f', 'E' or 'e'   
    - *
    - *   where  norm1  denotes the  one norm of a matrix (maximum column sum), 
    - *   normI  denotes the  infinity norm  of a matrix  (maximum row sum) and 
    - *   normF  denotes the  Frobenius norm of a matrix (square root of sum of 
    - *   squares).  Note that  max(abs(A(i,j)))  is not a  matrix norm.   
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   NORM    (input) CHARACTER*1   
    - *           Specifies the value to be returned in ZLANGE as described above.   
    - *   A       (input) SuperMatrix*
    - *           The M by N sparse matrix A. 
    - *
    - *  =====================================================================
    - * 
    - */ - -double zlangs(char *norm, SuperMatrix *A) -{ - - /* Local variables */ - NCformat *Astore; - doublecomplex *Aval; - int i, j, irow; - double value, sum; - double *rwork; - - Astore = A->Store; - Aval = Astore->nzval; - - if ( SUPERLU_MIN(A->nrow, A->ncol) == 0) { - value = 0.; - - } else if (lsame_(norm, "M")) { - /* Find max(abs(A(i,j))). */ - value = 0.; - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) - value = SUPERLU_MAX( value, z_abs( &Aval[i]) ); - - } else if (lsame_(norm, "O") || *(unsigned char *)norm == '1') { - /* Find norm1(A). */ - value = 0.; - for (j = 0; j < A->ncol; ++j) { - sum = 0.; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) - sum += z_abs( &Aval[i] ); - value = SUPERLU_MAX(value,sum); - } - - } else if (lsame_(norm, "I")) { - /* Find normI(A). */ - if ( !(rwork = (double *) SUPERLU_MALLOC(A->nrow * sizeof(double))) ) - ABORT("SUPERLU_MALLOC fails for rwork."); - for (i = 0; i < A->nrow; ++i) rwork[i] = 0.; - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) { - irow = Astore->rowind[i]; - rwork[irow] += z_abs( &Aval[i] ); - } - value = 0.; - for (i = 0; i < A->nrow; ++i) - value = SUPERLU_MAX(value, rwork[i]); - - SUPERLU_FREE (rwork); - - } else if (lsame_(norm, "F") || lsame_(norm, "E")) { - /* Find normF(A). */ - ABORT("Not implemented."); - } else - ABORT("Illegal norm specified."); - - return (value); - -} /* zlangs */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlaqgs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlaqgs.c deleted file mode 100644 index 0423dd85e3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zlaqgs.c +++ /dev/null @@ -1,148 +0,0 @@ - -/*! @file zlaqgs.c - * \brief Equlibrates a general sprase matrix - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - * Modified from LAPACK routine ZLAQGE
    - * 
    - */ -/* - * File name: zlaqgs.c - * History: Modified from LAPACK routine ZLAQGE - */ -#include -#include "slu_zdefs.h" - -/*! \brief - * - *
    - *   Purpose   
    - *   =======   
    - *
    - *   ZLAQGS equilibrates a general sparse M by N matrix A using the row and   
    - *   scaling factors in the vectors R and C.   
    - *
    - *   See supermatrix.h for the definition of 'SuperMatrix' structure.
    - *
    - *   Arguments   
    - *   =========   
    - *
    - *   A       (input/output) SuperMatrix*
    - *           On exit, the equilibrated matrix.  See EQUED for the form of 
    - *           the equilibrated matrix. The type of A can be:
    - *	    Stype = NC; Dtype = SLU_Z; Mtype = GE.
    - *	    
    - *   R       (input) double*, dimension (A->nrow)
    - *           The row scale factors for A.
    - *	    
    - *   C       (input) double*, dimension (A->ncol)
    - *           The column scale factors for A.
    - *	    
    - *   ROWCND  (input) double
    - *           Ratio of the smallest R(i) to the largest R(i).
    - *	    
    - *   COLCND  (input) double
    - *           Ratio of the smallest C(i) to the largest C(i).
    - *	    
    - *   AMAX    (input) double
    - *           Absolute value of largest matrix entry.
    - *	    
    - *   EQUED   (output) char*
    - *           Specifies the form of equilibration that was done.   
    - *           = 'N':  No equilibration   
    - *           = 'R':  Row equilibration, i.e., A has been premultiplied by  
    - *                   diag(R).   
    - *           = 'C':  Column equilibration, i.e., A has been postmultiplied  
    - *                   by diag(C).   
    - *           = 'B':  Both row and column equilibration, i.e., A has been
    - *                   replaced by diag(R) * A * diag(C).   
    - *
    - *   Internal Parameters   
    - *   ===================   
    - *
    - *   THRESH is a threshold value used to decide if row or column scaling   
    - *   should be done based on the ratio of the row or column scaling   
    - *   factors.  If ROWCND < THRESH, row scaling is done, and if   
    - *   COLCND < THRESH, column scaling is done.   
    - *
    - *   LARGE and SMALL are threshold values used to decide if row scaling   
    - *   should be done based on the absolute size of the largest matrix   
    - *   element.  If AMAX > LARGE or AMAX < SMALL, row scaling is done.   
    - *
    - *   ===================================================================== 
    - * 
    - */ - -void -zlaqgs(SuperMatrix *A, double *r, double *c, - double rowcnd, double colcnd, double amax, char *equed) -{ - - -#define THRESH (0.1) - - /* Local variables */ - NCformat *Astore; - doublecomplex *Aval; - int i, j, irow; - double large, small, cj; - extern double dlamch_(char *); - double temp; - - - /* Quick return if possible */ - if (A->nrow <= 0 || A->ncol <= 0) { - *(unsigned char *)equed = 'N'; - return; - } - - Astore = A->Store; - Aval = Astore->nzval; - - /* Initialize LARGE and SMALL. */ - small = dlamch_("Safe minimum") / dlamch_("Precision"); - large = 1. / small; - - if (rowcnd >= THRESH && amax >= small && amax <= large) { - if (colcnd >= THRESH) - *(unsigned char *)equed = 'N'; - else { - /* Column scaling */ - for (j = 0; j < A->ncol; ++j) { - cj = c[j]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - zd_mult(&Aval[i], &Aval[i], cj); - } - } - *(unsigned char *)equed = 'C'; - } - } else if (colcnd >= THRESH) { - /* Row scaling, no column scaling */ - for (j = 0; j < A->ncol; ++j) - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - zd_mult(&Aval[i], &Aval[i], r[irow]); - } - *(unsigned char *)equed = 'R'; - } else { - /* Row and column scaling */ - for (j = 0; j < A->ncol; ++j) { - cj = c[j]; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - temp = cj * r[irow]; - zd_mult(&Aval[i], &Aval[i], temp); - } - } - *(unsigned char *)equed = 'B'; - } - - return; - -} /* zlaqgs */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zldperm.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zldperm.c deleted file mode 100644 index 4d1af27921..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zldperm.c +++ /dev/null @@ -1,168 +0,0 @@ - -/*! @file - * \brief Finds a row permutation so that the matrix has large entries on the diagonal - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ - -#include "slu_zdefs.h" - -extern void mc64id_(int_t*); -extern void mc64ad_(int_t*, int_t*, int_t*, int_t [], int_t [], double [], - int_t*, int_t [], int_t*, int_t[], int_t*, double [], - int_t [], int_t []); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   ZLDPERM finds a row permutation so that the matrix has large
    - *   entries on the diagonal.
    - *
    - * Arguments
    - * =========
    - *
    - * job    (input) int
    - *        Control the action. Possible values for JOB are:
    - *        = 1 : Compute a row permutation of the matrix so that the
    - *              permuted matrix has as many entries on its diagonal as
    - *              possible. The values on the diagonal are of arbitrary size.
    - *              HSL subroutine MC21A/AD is used for this.
    - *        = 2 : Compute a row permutation of the matrix so that the smallest 
    - *              value on the diagonal of the permuted matrix is maximized.
    - *        = 3 : Compute a row permutation of the matrix so that the smallest
    - *              value on the diagonal of the permuted matrix is maximized.
    - *              The algorithm differs from the one used for JOB = 2 and may
    - *              have quite a different performance.
    - *        = 4 : Compute a row permutation of the matrix so that the sum
    - *              of the diagonal entries of the permuted matrix is maximized.
    - *        = 5 : Compute a row permutation of the matrix so that the product
    - *              of the diagonal entries of the permuted matrix is maximized
    - *              and vectors to scale the matrix so that the nonzero diagonal 
    - *              entries of the permuted matrix are one in absolute value and 
    - *              all the off-diagonal entries are less than or equal to one in 
    - *              absolute value.
    - *        Restriction: 1 <= JOB <= 5.
    - *
    - * n      (input) int
    - *        The order of the matrix.
    - *
    - * nnz    (input) int
    - *        The number of nonzeros in the matrix.
    - *
    - * adjncy (input) int*, of size nnz
    - *        The adjacency structure of the matrix, which contains the row
    - *        indices of the nonzeros.
    - *
    - * colptr (input) int*, of size n+1
    - *        The pointers to the beginning of each column in ADJNCY.
    - *
    - * nzval  (input) doublecomplex*, of size nnz
    - *        The nonzero values of the matrix. nzval[k] is the value of
    - *        the entry corresponding to adjncy[k].
    - *        It is not used if job = 1.
    - *
    - * perm   (output) int*, of size n
    - *        The permutation vector. perm[i] = j means row i in the
    - *        original matrix is in row j of the permuted matrix.
    - *
    - * u      (output) double*, of size n
    - *        If job = 5, the natural logarithms of the row scaling factors. 
    - *
    - * v      (output) double*, of size n
    - *        If job = 5, the natural logarithms of the column scaling factors. 
    - *        The scaled matrix B has entries b_ij = a_ij * exp(u_i + v_j).
    - * 
    - */ - -int -zldperm(int_t job, int_t n, int_t nnz, int_t colptr[], int_t adjncy[], - doublecomplex nzval[], int_t *perm, double u[], double v[]) -{ - int_t i, liw, ldw, num; - int_t *iw, icntl[10], info[10]; - double *dw; - double *nzval_d = (double *) SUPERLU_MALLOC(nnz * sizeof(double)); - -#if ( DEBUGlevel>=1 ) - CHECK_MALLOC(0, "Enter zldperm()"); -#endif - liw = 5*n; - if ( job == 3 ) liw = 10*n + nnz; - if ( !(iw = intMalloc(liw)) ) ABORT("Malloc fails for iw[]"); - ldw = 3*n + nnz; - if ( !(dw = (double*) SUPERLU_MALLOC(ldw * sizeof(double))) ) - ABORT("Malloc fails for dw[]"); - - /* Increment one to get 1-based indexing. */ - for (i = 0; i <= n; ++i) ++colptr[i]; - for (i = 0; i < nnz; ++i) ++adjncy[i]; -#if ( DEBUGlevel>=2 ) - printf("LDPERM(): n %d, nnz %d\n", n, nnz); - slu_PrintInt10("colptr", n+1, colptr); - slu_PrintInt10("adjncy", nnz, adjncy); -#endif - - /* - * NOTE: - * ===== - * - * MC64AD assumes that column permutation vector is defined as: - * perm(i) = j means column i of permuted A is in column j of original A. - * - * Since a symmetric permutation preserves the diagonal entries. Then - * by the following relation: - * P'(A*P')P = P'A - * we can apply inverse(perm) to rows of A to get large diagonal entries. - * But, since 'perm' defined in MC64AD happens to be the reverse of - * SuperLU's definition of permutation vector, therefore, it is already - * an inverse for our purpose. We will thus use it directly. - * - */ - mc64id_(icntl); -#if 0 - /* Suppress error and warning messages. */ - icntl[0] = -1; - icntl[1] = -1; -#endif - - for (i = 0; i < nnz; ++i) nzval_d[i] = z_abs1(&nzval[i]); - mc64ad_(&job, &n, &nnz, colptr, adjncy, nzval_d, &num, perm, - &liw, iw, &ldw, dw, icntl, info); - -#if ( DEBUGlevel>=2 ) - slu_PrintInt10("perm", n, perm); - printf(".. After MC64AD info %d\tsize of matching %d\n", info[0], num); -#endif - if ( info[0] == 1 ) { /* Structurally singular */ - printf(".. The last %d permutations:\n", n-num); - slu_PrintInt10("perm", n-num, &perm[num]); - } - - /* Restore to 0-based indexing. */ - for (i = 0; i <= n; ++i) --colptr[i]; - for (i = 0; i < nnz; ++i) --adjncy[i]; - for (i = 0; i < n; ++i) --perm[i]; - - if ( job == 5 ) - for (i = 0; i < n; ++i) { - u[i] = dw[i]; - v[i] = dw[n+i]; - } - - SUPERLU_FREE(iw); - SUPERLU_FREE(dw); - SUPERLU_FREE(nzval_d); - -#if ( DEBUGlevel>=1 ) - CHECK_MALLOC(0, "Exit zldperm()"); -#endif - - return info[0]; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zmemory.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zmemory.c deleted file mode 100644 index b741e45b85..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zmemory.c +++ /dev/null @@ -1,701 +0,0 @@ - -/*! @file zmemory.c - * \brief Memory details - * - *
    - * -- SuperLU routine (version 4.0) --
    - * Lawrence Berkeley National Laboratory.
    - * June 30, 2009
    - * 
    - */ -#include "slu_zdefs.h" - - -/* Internal prototypes */ -void *zexpand (int *, MemType,int, int, GlobalLU_t *); -int zLUWorkInit (int, int, int, int **, doublecomplex **, GlobalLU_t *); -void copy_mem_doublecomplex (int, void *, void *); -void zStackCompress (GlobalLU_t *); -void zSetupSpace (void *, int, GlobalLU_t *); -void *zuser_malloc (int, int, GlobalLU_t *); -void zuser_free (int, int, GlobalLU_t *); - -/* External prototypes (in memory.c - prec-independent) */ -extern void copy_mem_int (int, void *, void *); -extern void user_bcopy (char *, char *, int); - - -/* Macros to manipulate stack */ -#define StackFull(x) ( x + Glu->stack.used >= Glu->stack.size ) -#define NotDoubleAlign(addr) ( (long int)addr & 7 ) -#define DoubleAlign(addr) ( ((long int)addr + 7) & ~7L ) -#define TempSpace(m, w) ( (2*w + 4 + NO_MARKER) * m * sizeof(int) + \ - (w + 1) * m * sizeof(doublecomplex) ) -#define Reduce(alpha) ((alpha + 1) / 2) /* i.e. (alpha-1)/2 + 1 */ - - - - -/*! \brief Setup the memory model to be used for factorization. - * - * lwork = 0: use system malloc; - * lwork > 0: use user-supplied work[] space. - */ -void zSetupSpace(void *work, int lwork, GlobalLU_t *Glu) -{ - if ( lwork == 0 ) { - Glu->MemModel = SYSTEM; /* malloc/free */ - } else if ( lwork > 0 ) { - Glu->MemModel = USER; /* user provided space */ - Glu->stack.used = 0; - Glu->stack.top1 = 0; - Glu->stack.top2 = (lwork/4)*4; /* must be word addressable */ - Glu->stack.size = Glu->stack.top2; - Glu->stack.array = (void *) work; - } -} - - - -void *zuser_malloc(int bytes, int which_end, GlobalLU_t *Glu) -{ - void *buf; - - if ( StackFull(bytes) ) return (NULL); - - if ( which_end == HEAD ) { - buf = (char*) Glu->stack.array + Glu->stack.top1; - Glu->stack.top1 += bytes; - } else { - Glu->stack.top2 -= bytes; - buf = (char*) Glu->stack.array + Glu->stack.top2; - } - - Glu->stack.used += bytes; - return buf; -} - - -void zuser_free(int bytes, int which_end, GlobalLU_t *Glu) -{ - if ( which_end == HEAD ) { - Glu->stack.top1 -= bytes; - } else { - Glu->stack.top2 += bytes; - } - Glu->stack.used -= bytes; -} - - - -/*! \brief - * - *
    - * mem_usage consists of the following fields:
    - *    - for_lu (float)
    - *      The amount of space used in bytes for the L\U data structures.
    - *    - total_needed (float)
    - *      The amount of space needed in bytes to perform factorization.
    - * 
    - */ -int zQuerySpace(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage) -{ - SCformat *Lstore; - NCformat *Ustore; - register int n, iword, dword, panel_size = sp_ienv(1); - - Lstore = L->Store; - Ustore = U->Store; - n = L->ncol; - iword = sizeof(int); - dword = sizeof(doublecomplex); - - /* For LU factors */ - mem_usage->for_lu = (float)( (4.0*n + 3.0) * iword + - Lstore->nzval_colptr[n] * dword + - Lstore->rowind_colptr[n] * iword ); - mem_usage->for_lu += (float)( (n + 1.0) * iword + - Ustore->colptr[n] * (dword + iword) ); - - /* Working storage to support factorization */ - mem_usage->total_needed = mem_usage->for_lu + - (float)( (2.0 * panel_size + 4.0 + NO_MARKER) * n * iword + - (panel_size + 1.0) * n * dword ); - - return 0; -} /* zQuerySpace */ - - -/*! \brief - * - *
    - * mem_usage consists of the following fields:
    - *    - for_lu (float)
    - *      The amount of space used in bytes for the L\U data structures.
    - *    - total_needed (float)
    - *      The amount of space needed in bytes to perform factorization.
    - * 
    - */ -int ilu_zQuerySpace(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage) -{ - SCformat *Lstore; - NCformat *Ustore; - register int n, panel_size = sp_ienv(1); - register float iword, dword; - - Lstore = L->Store; - Ustore = U->Store; - n = L->ncol; - iword = sizeof(int); - dword = sizeof(double); - - /* For LU factors */ - mem_usage->for_lu = (float)( (4.0f * n + 3.0f) * iword + - Lstore->nzval_colptr[n] * dword + - Lstore->rowind_colptr[n] * iword ); - mem_usage->for_lu += (float)( (n + 1.0f) * iword + - Ustore->colptr[n] * (dword + iword) ); - - /* Working storage to support factorization. - ILU needs 5*n more integers than LU */ - mem_usage->total_needed = mem_usage->for_lu + - (float)( (2.0f * panel_size + 9.0f + NO_MARKER) * n * iword + - (panel_size + 1.0f) * n * dword ); - - return 0; -} /* ilu_zQuerySpace */ - - -/*! \brief Allocate storage for the data structures common to all factor routines. - * - *
    - * For those unpredictable size, estimate as fill_ratio * nnz(A).
    - * Return value:
    - *     If lwork = -1, return the estimated amount of space required, plus n;
    - *     otherwise, return the amount of space actually allocated when
    - *     memory allocation failure occurred.
    - * 
    - */ -int -zLUMemInit(fact_t fact, void *work, int lwork, int m, int n, int annz, - int panel_size, double fill_ratio, SuperMatrix *L, SuperMatrix *U, - GlobalLU_t *Glu, int **iwork, doublecomplex **dwork) -{ - int info, iword, dword; - SCformat *Lstore; - NCformat *Ustore; - int *xsup, *supno; - int *lsub, *xlsub; - doublecomplex *lusup; - int *xlusup; - doublecomplex *ucol; - int *usub, *xusub; - int nzlmax, nzumax, nzlumax; - - iword = sizeof(int); - dword = sizeof(doublecomplex); - Glu->n = n; - Glu->num_expansions = 0; - - if ( !Glu->expanders ) - Glu->expanders = (ExpHeader*)SUPERLU_MALLOC( NO_MEMTYPE * - sizeof(ExpHeader) ); - if ( !Glu->expanders ) ABORT("SUPERLU_MALLOC fails for expanders"); - - if ( fact != SamePattern_SameRowPerm ) { - /* Guess for L\U factors */ - nzumax = nzlumax = fill_ratio * annz; - nzlmax = SUPERLU_MAX(1, fill_ratio/4.) * annz; - - if ( lwork == -1 ) { - return ( GluIntArray(n) * iword + TempSpace(m, panel_size) - + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword + n ); - } else { - zSetupSpace(work, lwork, Glu); - } - -#if ( PRNTlevel >= 1 ) - printf("zLUMemInit() called: fill_ratio %ld, nzlmax %ld, nzumax %ld\n", - fill_ratio, nzlmax, nzumax); - fflush(stdout); -#endif - - /* Integer pointers for L\U factors */ - if ( Glu->MemModel == SYSTEM ) { - xsup = intMalloc(n+1); - supno = intMalloc(n+1); - xlsub = intMalloc(n+1); - xlusup = intMalloc(n+1); - xusub = intMalloc(n+1); - } else { - xsup = (int *)zuser_malloc((n+1) * iword, HEAD, Glu); - supno = (int *)zuser_malloc((n+1) * iword, HEAD, Glu); - xlsub = (int *)zuser_malloc((n+1) * iword, HEAD, Glu); - xlusup = (int *)zuser_malloc((n+1) * iword, HEAD, Glu); - xusub = (int *)zuser_malloc((n+1) * iword, HEAD, Glu); - } - - lusup = (doublecomplex *) zexpand( &nzlumax, LUSUP, 0, 0, Glu ); - ucol = (doublecomplex *) zexpand( &nzumax, UCOL, 0, 0, Glu ); - lsub = (int *) zexpand( &nzlmax, LSUB, 0, 0, Glu ); - usub = (int *) zexpand( &nzumax, USUB, 0, 1, Glu ); - - while ( !lusup || !ucol || !lsub || !usub ) { - if ( Glu->MemModel == SYSTEM ) { - SUPERLU_FREE(lusup); - SUPERLU_FREE(ucol); - SUPERLU_FREE(lsub); - SUPERLU_FREE(usub); - } else { - zuser_free((nzlumax+nzumax)*dword+(nzlmax+nzumax)*iword, - HEAD, Glu); - } - nzlumax /= 2; - nzumax /= 2; - nzlmax /= 2; - if ( nzlumax < annz ) { - printf("Not enough memory to perform factorization.\n"); - return (zmemory_usage(nzlmax, nzumax, nzlumax, n) + n); - } -#if ( PRNTlevel >= 1) - printf("zLUMemInit() reduce size: nzlmax %ld, nzumax %ld\n", - nzlmax, nzumax); - fflush(stdout); -#endif - lusup = (doublecomplex *) zexpand( &nzlumax, LUSUP, 0, 0, Glu ); - ucol = (doublecomplex *) zexpand( &nzumax, UCOL, 0, 0, Glu ); - lsub = (int *) zexpand( &nzlmax, LSUB, 0, 0, Glu ); - usub = (int *) zexpand( &nzumax, USUB, 0, 1, Glu ); - } - - } else { - /* fact == SamePattern_SameRowPerm */ - Lstore = L->Store; - Ustore = U->Store; - xsup = Lstore->sup_to_col; - supno = Lstore->col_to_sup; - xlsub = Lstore->rowind_colptr; - xlusup = Lstore->nzval_colptr; - xusub = Ustore->colptr; - nzlmax = Glu->nzlmax; /* max from previous factorization */ - nzumax = Glu->nzumax; - nzlumax = Glu->nzlumax; - - if ( lwork == -1 ) { - return ( GluIntArray(n) * iword + TempSpace(m, panel_size) - + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword + n ); - } else if ( lwork == 0 ) { - Glu->MemModel = SYSTEM; - } else { - Glu->MemModel = USER; - Glu->stack.top2 = (lwork/4)*4; /* must be word-addressable */ - Glu->stack.size = Glu->stack.top2; - } - - lsub = Glu->expanders[LSUB].mem = Lstore->rowind; - lusup = Glu->expanders[LUSUP].mem = Lstore->nzval; - usub = Glu->expanders[USUB].mem = Ustore->rowind; - ucol = Glu->expanders[UCOL].mem = Ustore->nzval;; - Glu->expanders[LSUB].size = nzlmax; - Glu->expanders[LUSUP].size = nzlumax; - Glu->expanders[USUB].size = nzumax; - Glu->expanders[UCOL].size = nzumax; - } - - Glu->xsup = xsup; - Glu->supno = supno; - Glu->lsub = lsub; - Glu->xlsub = xlsub; - Glu->lusup = lusup; - Glu->xlusup = xlusup; - Glu->ucol = ucol; - Glu->usub = usub; - Glu->xusub = xusub; - Glu->nzlmax = nzlmax; - Glu->nzumax = nzumax; - Glu->nzlumax = nzlumax; - - info = zLUWorkInit(m, n, panel_size, iwork, dwork, Glu); - if ( info ) - return ( info + zmemory_usage(nzlmax, nzumax, nzlumax, n) + n); - - ++Glu->num_expansions; - return 0; - -} /* zLUMemInit */ - -/*! \brief Allocate known working storage. Returns 0 if success, otherwise - returns the number of bytes allocated so far when failure occurred. */ -int -zLUWorkInit(int m, int n, int panel_size, int **iworkptr, - doublecomplex **dworkptr, GlobalLU_t *Glu) -{ - int isize, dsize, extra; - doublecomplex *old_ptr; - int maxsuper = sp_ienv(3), - rowblk = sp_ienv(4); - - isize = ( (2 * panel_size + 3 + NO_MARKER ) * m + n ) * sizeof(int); - dsize = (m * panel_size + - NUM_TEMPV(m,panel_size,maxsuper,rowblk)) * sizeof(doublecomplex); - - if ( Glu->MemModel == SYSTEM ) - *iworkptr = (int *) intCalloc(isize/sizeof(int)); - else - *iworkptr = (int *) zuser_malloc(isize, TAIL, Glu); - if ( ! *iworkptr ) { - fprintf(stderr, "zLUWorkInit: malloc fails for local iworkptr[]\n"); - return (isize + n); - } - - if ( Glu->MemModel == SYSTEM ) - *dworkptr = (doublecomplex *) SUPERLU_MALLOC(dsize); - else { - *dworkptr = (doublecomplex *) zuser_malloc(dsize, TAIL, Glu); - if ( NotDoubleAlign(*dworkptr) ) { - old_ptr = *dworkptr; - *dworkptr = (doublecomplex*) DoubleAlign(*dworkptr); - *dworkptr = (doublecomplex*) ((double*)*dworkptr - 1); - extra = (char*)old_ptr - (char*)*dworkptr; -#ifdef DEBUG - printf("zLUWorkInit: not aligned, extra %d\n", extra); -#endif - Glu->stack.top2 -= extra; - Glu->stack.used += extra; - } - } - if ( ! *dworkptr ) { - fprintf(stderr, "malloc fails for local dworkptr[]."); - return (isize + dsize + n); - } - - return 0; -} - - -/*! \brief Set up pointers for real working arrays. - */ -void -zSetRWork(int m, int panel_size, doublecomplex *dworkptr, - doublecomplex **dense, doublecomplex **tempv) -{ - doublecomplex zero = {0.0, 0.0}; - - int maxsuper = sp_ienv(3), - rowblk = sp_ienv(4); - *dense = dworkptr; - *tempv = *dense + panel_size*m; - zfill (*dense, m * panel_size, zero); - zfill (*tempv, NUM_TEMPV(m,panel_size,maxsuper,rowblk), zero); -} - -/*! \brief Free the working storage used by factor routines. - */ -void zLUWorkFree(int *iwork, doublecomplex *dwork, GlobalLU_t *Glu) -{ - if ( Glu->MemModel == SYSTEM ) { - SUPERLU_FREE (iwork); - SUPERLU_FREE (dwork); - } else { - Glu->stack.used -= (Glu->stack.size - Glu->stack.top2); - Glu->stack.top2 = Glu->stack.size; -/* zStackCompress(Glu); */ - } - - SUPERLU_FREE (Glu->expanders); - Glu->expanders = NULL; -} - -/*! \brief Expand the data structures for L and U during the factorization. - * - *
    - * Return value:   0 - successful return
    - *               > 0 - number of bytes allocated when run out of space
    - * 
    - */ -int -zLUMemXpand(int jcol, - int next, /* number of elements currently in the factors */ - MemType mem_type, /* which type of memory to expand */ - int *maxlen, /* modified - maximum length of a data structure */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - void *new_mem; - -#ifdef DEBUG - printf("zLUMemXpand(): jcol %d, next %d, maxlen %d, MemType %d\n", - jcol, next, *maxlen, mem_type); -#endif - - if (mem_type == USUB) - new_mem = zexpand(maxlen, mem_type, next, 1, Glu); - else - new_mem = zexpand(maxlen, mem_type, next, 0, Glu); - - if ( !new_mem ) { - int nzlmax = Glu->nzlmax; - int nzumax = Glu->nzumax; - int nzlumax = Glu->nzlumax; - fprintf(stderr, "Can't expand MemType %d: jcol %d\n", mem_type, jcol); - return (zmemory_usage(nzlmax, nzumax, nzlumax, Glu->n) + Glu->n); - } - - switch ( mem_type ) { - case LUSUP: - Glu->lusup = (doublecomplex *) new_mem; - Glu->nzlumax = *maxlen; - break; - case UCOL: - Glu->ucol = (doublecomplex *) new_mem; - Glu->nzumax = *maxlen; - break; - case LSUB: - Glu->lsub = (int *) new_mem; - Glu->nzlmax = *maxlen; - break; - case USUB: - Glu->usub = (int *) new_mem; - Glu->nzumax = *maxlen; - break; - } - - return 0; - -} - - - -void -copy_mem_doublecomplex(int howmany, void *old, void *new) -{ - register int i; - doublecomplex *dold = old; - doublecomplex *dnew = new; - for (i = 0; i < howmany; i++) dnew[i] = dold[i]; -} - -/*! \brief Expand the existing storage to accommodate more fill-ins. - */ -void -*zexpand ( - int *prev_len, /* length used from previous call */ - MemType type, /* which part of the memory to expand */ - int len_to_copy, /* size of the memory to be copied to new store */ - int keep_prev, /* = 1: use prev_len; - = 0: compute new_len to expand */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - float EXPAND = 1.5; - float alpha; - void *new_mem, *old_mem; - int new_len, tries, lword, extra, bytes_to_copy; - ExpHeader *expanders = Glu->expanders; /* Array of 4 types of memory */ - - alpha = EXPAND; - - if ( Glu->num_expansions == 0 || keep_prev ) { - /* First time allocate requested */ - new_len = *prev_len; - } else { - new_len = alpha * *prev_len; - } - - if ( type == LSUB || type == USUB ) lword = sizeof(int); - else lword = sizeof(doublecomplex); - - if ( Glu->MemModel == SYSTEM ) { - new_mem = (void *) SUPERLU_MALLOC((size_t)new_len * lword); - if ( Glu->num_expansions != 0 ) { - tries = 0; - if ( keep_prev ) { - if ( !new_mem ) return (NULL); - } else { - while ( !new_mem ) { - if ( ++tries > 10 ) return (NULL); - alpha = Reduce(alpha); - new_len = alpha * *prev_len; - new_mem = (void *) SUPERLU_MALLOC((size_t)new_len * lword); - } - } - if ( type == LSUB || type == USUB ) { - copy_mem_int(len_to_copy, expanders[type].mem, new_mem); - } else { - copy_mem_doublecomplex(len_to_copy, expanders[type].mem, new_mem); - } - SUPERLU_FREE (expanders[type].mem); - } - expanders[type].mem = (void *) new_mem; - - } else { /* MemModel == USER */ - if ( Glu->num_expansions == 0 ) { - new_mem = zuser_malloc(new_len * lword, HEAD, Glu); - if ( NotDoubleAlign(new_mem) && - (type == LUSUP || type == UCOL) ) { - old_mem = new_mem; - new_mem = (void *)DoubleAlign(new_mem); - extra = (char*)new_mem - (char*)old_mem; -#ifdef DEBUG - printf("expand(): not aligned, extra %d\n", extra); -#endif - Glu->stack.top1 += extra; - Glu->stack.used += extra; - } - expanders[type].mem = (void *) new_mem; - } else { - tries = 0; - extra = (new_len - *prev_len) * lword; - if ( keep_prev ) { - if ( StackFull(extra) ) return (NULL); - } else { - while ( StackFull(extra) ) { - if ( ++tries > 10 ) return (NULL); - alpha = Reduce(alpha); - new_len = alpha * *prev_len; - extra = (new_len - *prev_len) * lword; - } - } - - if ( type != USUB ) { - new_mem = (void*)((char*)expanders[type + 1].mem + extra); - bytes_to_copy = (char*)Glu->stack.array + Glu->stack.top1 - - (char*)expanders[type + 1].mem; - user_bcopy(expanders[type+1].mem, new_mem, bytes_to_copy); - - if ( type < USUB ) { - Glu->usub = expanders[USUB].mem = - (void*)((char*)expanders[USUB].mem + extra); - } - if ( type < LSUB ) { - Glu->lsub = expanders[LSUB].mem = - (void*)((char*)expanders[LSUB].mem + extra); - } - if ( type < UCOL ) { - Glu->ucol = expanders[UCOL].mem = - (void*)((char*)expanders[UCOL].mem + extra); - } - Glu->stack.top1 += extra; - Glu->stack.used += extra; - if ( type == UCOL ) { - Glu->stack.top1 += extra; /* Add same amount for USUB */ - Glu->stack.used += extra; - } - - } /* if ... */ - - } /* else ... */ - } - - expanders[type].size = new_len; - *prev_len = new_len; - if ( Glu->num_expansions ) ++Glu->num_expansions; - - return (void *) expanders[type].mem; - -} /* zexpand */ - - -/*! \brief Compress the work[] array to remove fragmentation. - */ -void -zStackCompress(GlobalLU_t *Glu) -{ - register int iword, dword, ndim; - char *last, *fragment; - int *ifrom, *ito; - doublecomplex *dfrom, *dto; - int *xlsub, *lsub, *xusub, *usub, *xlusup; - doublecomplex *ucol, *lusup; - - iword = sizeof(int); - dword = sizeof(doublecomplex); - ndim = Glu->n; - - xlsub = Glu->xlsub; - lsub = Glu->lsub; - xusub = Glu->xusub; - usub = Glu->usub; - xlusup = Glu->xlusup; - ucol = Glu->ucol; - lusup = Glu->lusup; - - dfrom = ucol; - dto = (doublecomplex *)((char*)lusup + xlusup[ndim] * dword); - copy_mem_doublecomplex(xusub[ndim], dfrom, dto); - ucol = dto; - - ifrom = lsub; - ito = (int *) ((char*)ucol + xusub[ndim] * iword); - copy_mem_int(xlsub[ndim], ifrom, ito); - lsub = ito; - - ifrom = usub; - ito = (int *) ((char*)lsub + xlsub[ndim] * iword); - copy_mem_int(xusub[ndim], ifrom, ito); - usub = ito; - - last = (char*)usub + xusub[ndim] * iword; - fragment = (char*) (((char*)Glu->stack.array + Glu->stack.top1) - last); - Glu->stack.used -= (long int) fragment; - Glu->stack.top1 -= (long int) fragment; - - Glu->ucol = ucol; - Glu->lsub = lsub; - Glu->usub = usub; - -#ifdef DEBUG - printf("zStackCompress: fragment %d\n", fragment); - /* for (last = 0; last < ndim; ++last) - print_lu_col("After compress:", last, 0);*/ -#endif - -} - -/*! \brief Allocate storage for original matrix A - */ -void -zallocateA(int n, int nnz, doublecomplex **a, int **asub, int **xa) -{ - *a = (doublecomplex *) doublecomplexMalloc(nnz); - *asub = (int *) intMalloc(nnz); - *xa = (int *) intMalloc(n+1); -} - - -doublecomplex *doublecomplexMalloc(int n) -{ - doublecomplex *buf; - buf = (doublecomplex *) SUPERLU_MALLOC((size_t)n * sizeof(doublecomplex)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC failed for buf in doublecomplexMalloc()\n"); - } - return (buf); -} - -doublecomplex *doublecomplexCalloc(int n) -{ - doublecomplex *buf; - register int i; - doublecomplex zero = {0.0, 0.0}; - buf = (doublecomplex *) SUPERLU_MALLOC((size_t)n * sizeof(doublecomplex)); - if ( !buf ) { - ABORT("SUPERLU_MALLOC failed for buf in doublecomplexCalloc()\n"); - } - for (i = 0; i < n; ++i) buf[i] = zero; - return (buf); -} - - -int zmemory_usage(const int nzlmax, const int nzumax, - const int nzlumax, const int n) -{ - register int iword, dword; - - iword = sizeof(int); - dword = sizeof(doublecomplex); - - return (10 * n * iword + - nzlmax * iword + nzumax * (iword + dword) + nzlumax * dword); - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpanel_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpanel_bmod.c deleted file mode 100644 index 7edbc82c67..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpanel_bmod.c +++ /dev/null @@ -1,487 +0,0 @@ - -/*! @file zpanel_bmod.c - * \brief Performs numeric block updates - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ -/* - -*/ - -#include -#include -#include "slu_zdefs.h" - -/* - * Function prototypes - */ -void zlsolve(int, int, doublecomplex *, doublecomplex *); -void zmatvec(int, int, int, doublecomplex *, doublecomplex *, doublecomplex *); -extern void zcheck_tempv(); - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *    Performs numeric block updates (sup-panel) in topological order.
    - *    It features: col-col, 2cols-col, 3cols-col, and sup-col updates.
    - *    Special processing on the supernodal portion of L\U[*,j]
    - *
    - *    Before entering this routine, the original nonzeros in the panel 
    - *    were already copied into the spa[m,w].
    - *
    - *    Updated/Output parameters-
    - *    dense[0:m-1,w]: L[*,j:j+w-1] and U[*,j:j+w-1] are returned 
    - *    collectively in the m-by-w vector dense[*]. 
    - * 
    - */ - -void -zpanel_bmod ( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - const int nseg, /* in */ - doublecomplex *dense, /* out, of size n by w */ - doublecomplex *tempv, /* working array */ - int *segrep, /* in */ - int *repfnz, /* in, of size n by w */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ - - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - doublecomplex alpha, beta; -#endif - - register int k, ksub; - int fsupc, nsupc, nsupr, nrow; - int krep, krep_ind; - doublecomplex ukj, ukj1, ukj2; - int luptr, luptr1, luptr2; - int segsze; - int block_nrow; /* no of rows in a block row */ - register int lptr; /* Points to the row subscripts of a supernode */ - int kfnz, irow, no_zeros; - register int isub, isub1, i; - register int jj; /* Index through each column in the panel */ - int *xsup, *supno; - int *lsub, *xlsub; - doublecomplex *lusup; - int *xlusup; - int *repfnz_col; /* repfnz[] for a column in the panel */ - doublecomplex *dense_col; /* dense[] for a column in the panel */ - doublecomplex *tempv1; /* Used in 1-D update */ - doublecomplex *TriTmp, *MatvecTmp; /* used in 2-D update */ - doublecomplex zero = {0.0, 0.0}; - doublecomplex one = {1.0, 0.0}; - doublecomplex comp_temp, comp_temp1; - register int ldaTmp; - register int r_ind, r_hi; - static int first = 1, maxsuper, rowblk, colblk; - flops_t *ops = stat->ops; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - if ( first ) { - maxsuper = sp_ienv(3); - rowblk = sp_ienv(4); - colblk = sp_ienv(5); - first = 0; - } - ldaTmp = maxsuper + rowblk; - - /* - * For each nonz supernode segment of U[*,j] in topological order - */ - k = nseg - 1; - for (ksub = 0; ksub < nseg; ksub++) { /* for each updating supernode */ - - /* krep = representative of current k-th supernode - * fsupc = first supernodal column - * nsupc = no of columns in a supernode - * nsupr = no of rows in a supernode - */ - krep = segrep[k--]; - fsupc = xsup[supno[krep]]; - nsupc = krep - fsupc + 1; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; - nrow = nsupr - nsupc; - lptr = xlsub[fsupc]; - krep_ind = lptr + nsupc - 1; - - repfnz_col = repfnz; - dense_col = dense; - - if ( nsupc >= colblk && nrow > rowblk ) { /* 2-D block update */ - - TriTmp = tempv; - - /* Sequence through each column in panel -- triangular solves */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp ) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - luptr = xlusup[fsupc]; - - ops[TRSV] += 4 * segsze * (segsze - 1); - ops[GEMV] += 8 * nrow * segsze; - - /* Case 1: Update U-segment of size 1 -- col-col update */ - if ( segsze == 1 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; i++) { - irow = lsub[i]; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - z_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - ++luptr; - } - - } else if ( segsze <= 3 ) { - ukj = dense_col[lsub[krep_ind]]; - ukj1 = dense_col[lsub[krep_ind - 1]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { - zz_mult(&comp_temp, &ukj1, &lusup[luptr1]); - z_sub(&ukj, &ukj, &comp_temp); - dense_col[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; luptr1++; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - zz_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - } - } else { - ukj2 = dense_col[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - zz_mult(&comp_temp, &ukj2, &lusup[luptr2-1]); - z_sub(&ukj1, &ukj1, &comp_temp); - - zz_mult(&comp_temp, &ukj1, &lusup[luptr1]); - zz_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&ukj, &ukj, &comp_temp); - dense_col[lsub[krep_ind]] = ukj; - dense_col[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - luptr++; luptr1++; luptr2++; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - zz_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - zz_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - } - } - - } else { /* segsze >= 4 */ - - /* Copy U[*,j] segment from dense[*] to TriTmp[*], which - holds the result of triangular solves. */ - no_zeros = kfnz - fsupc; - isub = lptr + no_zeros; - for (i = 0; i < segsze; ++i) { - irow = lsub[isub]; - TriTmp[i] = dense_col[irow]; /* Gather */ - ++isub; - } - - /* start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, TriTmp, &incx ); -#else - ztrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, TriTmp, &incx ); -#endif -#else - zlsolve ( nsupr, segsze, &lusup[luptr], TriTmp ); -#endif - - - } /* else ... */ - - } /* for jj ... end tri-solves */ - - /* Block row updates; push all the way into dense[*] block */ - for ( r_ind = 0; r_ind < nrow; r_ind += rowblk ) { - - r_hi = SUPERLU_MIN(nrow, r_ind + rowblk); - block_nrow = SUPERLU_MIN(rowblk, r_hi - r_ind); - luptr = xlusup[fsupc] + nsupc + r_ind; - isub1 = lptr + nsupc + r_ind; - - repfnz_col = repfnz; - TriTmp = tempv; - dense_col = dense; - - /* Sequence through each column in panel -- matrix-vector */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - if ( segsze <= 3 ) continue; /* skip unrolled cases */ - - /* Perform a block update, and scatter the result of - matrix-vector to dense[]. */ - no_zeros = kfnz - fsupc; - luptr1 = luptr + nsupr * no_zeros; - MatvecTmp = &TriTmp[maxsuper]; - -#ifdef USE_VENDOR_BLAS - alpha = one; - beta = zero; -#ifdef _CRAY - CGEMV(ftcs2, &block_nrow, &segsze, &alpha, &lusup[luptr1], - &nsupr, TriTmp, &incx, &beta, MatvecTmp, &incy); -#else - zgemv_("N", &block_nrow, &segsze, &alpha, &lusup[luptr1], - &nsupr, TriTmp, &incx, &beta, MatvecTmp, &incy); -#endif -#else - zmatvec(nsupr, block_nrow, segsze, &lusup[luptr1], - TriTmp, MatvecTmp); -#endif - - /* Scatter MatvecTmp[*] into SPA dense[*] temporarily - * such that MatvecTmp[*] can be re-used for the - * the next blok row update. dense[] will be copied into - * global store after the whole panel has been finished. - */ - isub = isub1; - for (i = 0; i < block_nrow; i++) { - irow = lsub[isub]; - z_sub(&dense_col[irow], &dense_col[irow], - &MatvecTmp[i]); - MatvecTmp[i] = zero; - ++isub; - } - - } /* for jj ... */ - - } /* for each block row ... */ - - /* Scatter the triangular solves into SPA dense[*] */ - repfnz_col = repfnz; - TriTmp = tempv; - dense_col = dense; - - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m, TriTmp += ldaTmp) { - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - if ( segsze <= 3 ) continue; /* skip unrolled cases */ - - no_zeros = kfnz - fsupc; - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense_col[irow] = TriTmp[i]; - TriTmp[i] = zero; - ++isub; - } - - } /* for jj ... */ - - } else { /* 1-D block modification */ - - - /* Sequence through each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++, - repfnz_col += m, dense_col += m) { - - kfnz = repfnz_col[krep]; - if ( kfnz == EMPTY ) continue; /* Skip any zero segment */ - - segsze = krep - kfnz + 1; - luptr = xlusup[fsupc]; - - ops[TRSV] += 4 * segsze * (segsze - 1); - ops[GEMV] += 8 * nrow * segsze; - - /* Case 1: Update U-segment of size 1 -- col-col update */ - if ( segsze == 1 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc; - - for (i = lptr + nsupc; i < xlsub[fsupc+1]; i++) { - irow = lsub[i]; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - z_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - ++luptr; - } - - } else if ( segsze <= 3 ) { - ukj = dense_col[lsub[krep_ind]]; - luptr += nsupr*(nsupc-1) + nsupc-1; - ukj1 = dense_col[lsub[krep_ind - 1]]; - luptr1 = luptr - nsupr; - - if ( segsze == 2 ) { - zz_mult(&comp_temp, &ukj1, &lusup[luptr1]); - z_sub(&ukj, &ukj, &comp_temp); - dense_col[lsub[krep_ind]] = ukj; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - ++luptr; ++luptr1; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - zz_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - } - } else { - ukj2 = dense_col[lsub[krep_ind - 2]]; - luptr2 = luptr1 - nsupr; - zz_mult(&comp_temp, &ukj2, &lusup[luptr2-1]); - z_sub(&ukj1, &ukj1, &comp_temp); - - zz_mult(&comp_temp, &ukj1, &lusup[luptr1]); - zz_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&ukj, &ukj, &comp_temp); - dense_col[lsub[krep_ind]] = ukj; - dense_col[lsub[krep_ind-1]] = ukj1; - for (i = lptr + nsupc; i < xlsub[fsupc+1]; ++i) { - irow = lsub[i]; - ++luptr; ++luptr1; ++luptr2; - zz_mult(&comp_temp, &ukj, &lusup[luptr]); - zz_mult(&comp_temp1, &ukj1, &lusup[luptr1]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - zz_mult(&comp_temp1, &ukj2, &lusup[luptr2]); - z_add(&comp_temp, &comp_temp, &comp_temp1); - z_sub(&dense_col[irow], &dense_col[irow], &comp_temp); - } - } - - } else { /* segsze >= 4 */ - /* - * Perform a triangular solve and block update, - * then scatter the result of sup-col update to dense[]. - */ - no_zeros = kfnz - fsupc; - - /* Copy U[*,j] segment from dense[*] to tempv[*]: - * The result of triangular solve is in tempv[*]; - * The result of matrix vector update is in dense_col[*] - */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; ++i) { - irow = lsub[isub]; - tempv[i] = dense_col[irow]; /* Gather */ - ++isub; - } - - /* start effective triangle */ - luptr += nsupr * no_zeros + no_zeros; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#else - ztrsv_( "L", "N", "U", &segsze, &lusup[luptr], - &nsupr, tempv, &incx ); -#endif - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - alpha = one; - beta = zero; -#ifdef _CRAY - CGEMV( ftcs2, &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#else - zgemv_( "N", &nrow, &segsze, &alpha, &lusup[luptr], - &nsupr, tempv, &incx, &beta, tempv1, &incy ); -#endif -#else - zlsolve ( nsupr, segsze, &lusup[luptr], tempv ); - - luptr += segsze; /* Dense matrix-vector */ - tempv1 = &tempv[segsze]; - zmatvec (nsupr, nrow, segsze, &lusup[luptr], tempv, tempv1); -#endif - - /* Scatter tempv[*] into SPA dense[*] temporarily, such - * that tempv[*] can be used for the triangular solve of - * the next column of the panel. They will be copied into - * ucol[*] after the whole panel has been finished. - */ - isub = lptr + no_zeros; - for (i = 0; i < segsze; i++) { - irow = lsub[isub]; - dense_col[irow] = tempv[i]; - tempv[i] = zero; - isub++; - } - - /* Scatter the update from tempv1[*] into SPA dense[*] */ - /* Start dense rectangular L */ - for (i = 0; i < nrow; i++) { - irow = lsub[isub]; - z_sub(&dense_col[irow], &dense_col[irow], &tempv1[i]); - tempv1[i] = zero; - ++isub; - } - - } /* else segsze>=4 ... */ - - } /* for each column in the panel... */ - - } /* else 1-D update ... */ - - } /* for each updating supernode ... */ - -} - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpanel_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpanel_dfs.c deleted file mode 100644 index e05766fb92..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpanel_dfs.c +++ /dev/null @@ -1,254 +0,0 @@ - -/*! @file zpanel_dfs.c - * \brief Peforms a symbolic factorization on a panel of symbols - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - *   Performs a symbolic factorization on a panel of columns [jcol, jcol+w).
    - *
    - *   A supernode representative is the last column of a supernode.
    - *   The nonzeros in U[*,j] are segments that end at supernodal
    - *   representatives.
    - *
    - *   The routine returns one list of the supernodal representatives
    - *   in topological order of the dfs that generates them. This list is
    - *   a superset of the topological order of each individual column within
    - *   the panel. 
    - *   The location of the first nonzero in each supernodal segment
    - *   (supernodal entry location) is also returned. Each column has a 
    - *   separate list for this purpose.
    - *
    - *   Two marker arrays are used for dfs:
    - *     marker[i] == jj, if i was visited during dfs of current column jj;
    - *     marker1[i] >= jcol, if i was visited by earlier columns in this panel;
    - *
    - *   marker: A-row --> A-row/col (0/1)
    - *   repfnz: SuperA-col --> PA-row
    - *   parent: SuperA-col --> SuperA-col
    - *   xplore: SuperA-col --> index to L-structure
    - * 
    - */ - -void -zpanel_dfs ( - const int m, /* in - number of rows in the matrix */ - const int w, /* in */ - const int jcol, /* in */ - SuperMatrix *A, /* in - original matrix */ - int *perm_r, /* in */ - int *nseg, /* out */ - doublecomplex *dense, /* out */ - int *panel_lsub, /* out */ - int *segrep, /* out */ - int *repfnz, /* out */ - int *xprune, /* out */ - int *marker, /* out */ - int *parent, /* working array */ - int *xplore, /* working array */ - GlobalLU_t *Glu /* modified */ - ) -{ - - NCPformat *Astore; - doublecomplex *a; - int *asub; - int *xa_begin, *xa_end; - int krep, chperm, chmark, chrep, oldrep, kchild, myfnz; - int k, krow, kmark, kperm; - int xdfs, maxdfs, kpar; - int jj; /* index through each column in the panel */ - int *marker1; /* marker1[jj] >= jcol if vertex jj was visited - by a previous column within this panel. */ - int *repfnz_col; /* start of each column in the panel */ - doublecomplex *dense_col; /* start of each column in the panel */ - int nextl_col; /* next available position in panel_lsub[*,jj] */ - int *xsup, *supno; - int *lsub, *xlsub; - - /* Initialize pointers */ - Astore = A->Store; - a = Astore->nzval; - asub = Astore->rowind; - xa_begin = Astore->colbeg; - xa_end = Astore->colend; - marker1 = marker + m; - repfnz_col = repfnz; - dense_col = dense; - *nseg = 0; - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - - /* For each column in the panel */ - for (jj = jcol; jj < jcol + w; jj++) { - nextl_col = (jj - jcol) * m; - -#ifdef CHK_DFS - printf("\npanel col %d: ", jj); -#endif - - /* For each nonz in A[*,jj] do dfs */ - for (k = xa_begin[jj]; k < xa_end[jj]; k++) { - krow = asub[k]; - dense_col[krow] = a[k]; - kmark = marker[krow]; - if ( kmark == jj ) - continue; /* krow visited before, go to the next nonzero */ - - /* For each unmarked nbr krow of jj - * krow is in L: place it in structure of L[*,jj] - */ - marker[krow] = jj; - kperm = perm_r[krow]; - - if ( kperm == EMPTY ) { - panel_lsub[nextl_col++] = krow; /* krow is indexed into A */ - } - /* - * krow is in U: if its supernode-rep krep - * has been explored, update repfnz[*] - */ - else { - - krep = xsup[supno[kperm]+1] - 1; - myfnz = repfnz_col[krep]; - -#ifdef CHK_DFS - printf("krep %d, myfnz %d, perm_r[%d] %d\n", krep, myfnz, krow, kperm); -#endif - if ( myfnz != EMPTY ) { /* Representative visited before */ - if ( myfnz > kperm ) repfnz_col[krep] = kperm; - /* continue; */ - } - else { - /* Otherwise, perform dfs starting at krep */ - oldrep = EMPTY; - parent[krep] = oldrep; - repfnz_col[krep] = kperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; - -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - do { - /* - * For each unmarked kchild of krep - */ - while ( xdfs < maxdfs ) { - - kchild = lsub[xdfs]; - xdfs++; - chmark = marker[kchild]; - - if ( chmark != jj ) { /* Not reached yet */ - marker[kchild] = jj; - chperm = perm_r[kchild]; - - /* Case kchild is in L: place it in L[*,j] */ - if ( chperm == EMPTY ) { - panel_lsub[nextl_col++] = kchild; - } - /* Case kchild is in U: - * chrep = its supernode-rep. If its rep has - * been explored, update its repfnz[*] - */ - else { - - chrep = xsup[supno[chperm]+1] - 1; - myfnz = repfnz_col[chrep]; -#ifdef CHK_DFS - printf("chrep %d,myfnz %d,perm_r[%d] %d\n",chrep,myfnz,kchild,chperm); -#endif - if ( myfnz != EMPTY ) { /* Visited before */ - if ( myfnz > chperm ) - repfnz_col[chrep] = chperm; - } - else { - /* Cont. dfs at snode-rep of kchild */ - xplore[krep] = xdfs; - oldrep = krep; - krep = chrep; /* Go deeper down G(L) */ - parent[krep] = oldrep; - repfnz_col[krep] = chperm; - xdfs = xlsub[krep]; - maxdfs = xprune[krep]; -#ifdef CHK_DFS - printf(" xdfs %d, maxdfs %d: ", xdfs, maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } /* else */ - - } /* else */ - - } /* if... */ - - } /* while xdfs < maxdfs */ - - /* krow has no more unexplored nbrs: - * Place snode-rep krep in postorder DFS, if this - * segment is seen for the first time. (Note that - * "repfnz[krep]" may change later.) - * Backtrack dfs to its parent. - */ - if ( marker1[krep] < jcol ) { - segrep[*nseg] = krep; - ++(*nseg); - marker1[krep] = jj; - } - - kpar = parent[krep]; /* Pop stack, mimic recursion */ - if ( kpar == EMPTY ) break; /* dfs done */ - krep = kpar; - xdfs = xplore[krep]; - maxdfs = xprune[krep]; - -#ifdef CHK_DFS - printf(" pop stack: krep %d,xdfs %d,maxdfs %d: ", krep,xdfs,maxdfs); - for (i = xdfs; i < maxdfs; i++) printf(" %d", lsub[i]); - printf("\n"); -#endif - } while ( kpar != EMPTY ); /* do-while - until empty stack */ - - } /* else */ - - } /* else */ - - } /* for each nonz in A[*,jj] */ - - repfnz_col += m; /* Move to next column */ - dense_col += m; - - } /* for jj ... */ - -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpivotL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpivotL.c deleted file mode 100644 index 96724848a1..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpivotL.c +++ /dev/null @@ -1,196 +0,0 @@ - -/*! @file zpivotL.c - * \brief Performs numerical pivoting - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include -#include "slu_zdefs.h" - -#undef DEBUG - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Performs the numerical pivoting on the current column of L,
    - *   and the CDIV operation.
    - *
    - *   Pivot policy:
    - *   (1) Compute thresh = u * max_(i>=j) abs(A_ij);
    - *   (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
    - *           pivot row = k;
    - *       ELSE IF abs(A_jj) >= thresh THEN
    - *           pivot row = j;
    - *       ELSE
    - *           pivot row = m;
    - * 
    - *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
    - *
    - *   Return value: 0      success;
    - *                 i > 0  U(i,i) is exactly zero.
    - * 
    - */ - -int -zpivotL( - const int jcol, /* in */ - const double u, /* in - diagonal pivoting threshold */ - int *usepr, /* re-use the pivot sequence given by perm_r/iperm_r */ - int *perm_r, /* may be modified */ - int *iperm_r, /* in - inverse of perm_r */ - int *iperm_c, /* in - used to find diagonal of Pc*A*Pc' */ - int *pivrow, /* out */ - GlobalLU_t *Glu, /* modified - global LU data structures */ - SuperLUStat_t *stat /* output */ - ) -{ - - doublecomplex one = {1.0, 0.0}; - int fsupc; /* first column in the supernode */ - int nsupc; /* no of columns in the supernode */ - int nsupr; /* no of rows in the supernode */ - int lptr; /* points to the starting subscript of the supernode */ - int pivptr, old_pivptr, diag, diagind; - double pivmax, rtemp, thresh; - doublecomplex temp; - doublecomplex *lu_sup_ptr; - doublecomplex *lu_col_ptr; - int *lsub_ptr; - int isub, icol, k, itemp; - int *lsub, *xlsub; - doublecomplex *lusup; - int *xlusup; - flops_t *ops = stat->ops; - - /* Initialize pointers */ - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; - nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ - lptr = xlsub[fsupc]; - nsupr = xlsub[fsupc+1] - lptr; - lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ - lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ - lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ - -#ifdef DEBUG -if ( jcol == MIN_COL ) { - printf("Before cdiv: col %d\n", jcol); - for (k = nsupc; k < nsupr; k++) - printf(" lu[%d] %f\n", lsub_ptr[k], lu_col_ptr[k]); -} -#endif - - /* Determine the largest abs numerical value for partial pivoting; - Also search for user-specified pivot, and diagonal element. */ - if ( *usepr ) *pivrow = iperm_r[jcol]; - diagind = iperm_c[jcol]; -#ifdef SCIPY_SPECIFIC_FIX - pivmax = -1.0; -#else - pivmax = 0.0; -#endif - pivptr = nsupc; - diag = EMPTY; - old_pivptr = nsupc; - for (isub = nsupc; isub < nsupr; ++isub) { - rtemp = z_abs1 (&lu_col_ptr[isub]); - if ( rtemp > pivmax ) { - pivmax = rtemp; - pivptr = isub; - } - if ( *usepr && lsub_ptr[isub] == *pivrow ) old_pivptr = isub; - if ( lsub_ptr[isub] == diagind ) diag = isub; - } - - /* Test for singularity */ -#ifdef SCIPY_SPECIFIC_FIX - if (pivmax < 0.0) { - perm_r[diagind] = jcol; - *usepr = 0; - return (jcol+1); - } -#endif - if ( pivmax == 0.0 ) { -#if 1 - *pivrow = lsub_ptr[pivptr]; - perm_r[*pivrow] = jcol; -#else - perm_r[diagind] = jcol; -#endif - *usepr = 0; - return (jcol+1); - } - - thresh = u * pivmax; - - /* Choose appropriate pivotal element by our policy. */ - if ( *usepr ) { - rtemp = z_abs1 (&lu_col_ptr[old_pivptr]); - if ( rtemp != 0.0 && rtemp >= thresh ) - pivptr = old_pivptr; - else - *usepr = 0; - } - if ( *usepr == 0 ) { - /* Use diagonal pivot? */ - if ( diag >= 0 ) { /* diagonal exists */ - rtemp = z_abs1 (&lu_col_ptr[diag]); - if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; - } - *pivrow = lsub_ptr[pivptr]; - } - - /* Record pivot row */ - perm_r[*pivrow] = jcol; - - /* Interchange row subscripts */ - if ( pivptr != nsupc ) { - itemp = lsub_ptr[pivptr]; - lsub_ptr[pivptr] = lsub_ptr[nsupc]; - lsub_ptr[nsupc] = itemp; - - /* Interchange numerical values as well, for the whole snode, such - * that L is indexed the same way as A. - */ - for (icol = 0; icol <= nsupc; icol++) { - itemp = pivptr + icol * nsupr; - temp = lu_sup_ptr[itemp]; - lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; - lu_sup_ptr[nsupc + icol*nsupr] = temp; - } - } /* if */ - - /* cdiv operation */ - ops[FACT] += 10 * (nsupr - nsupc); - - z_div(&temp, &one, &lu_col_ptr[nsupc]); - for (k = nsupc+1; k < nsupr; k++) - zz_mult(&lu_col_ptr[k], &lu_col_ptr[k], &temp); - - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpivotgrowth.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpivotgrowth.c deleted file mode 100644 index ddfc3772b8..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpivotgrowth.c +++ /dev/null @@ -1,115 +0,0 @@ - -/*! @file zpivotgrowth.c - * \brief Computes the reciprocal pivot growth factor - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -#include -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *
    - * Compute the reciprocal pivot growth factor of the leading ncols columns
    - * of the matrix, using the formula:
    - *     min_j ( max_i(abs(A_ij)) / max_i(abs(U_ij)) )
    - *
    - * Arguments
    - * =========
    - *
    - * ncols    (input) int
    - *          The number of columns of matrices A, L and U.
    - *
    - * A        (input) SuperMatrix*
    - *	    Original matrix A, permuted by columns, of dimension
    - *          (A->nrow, A->ncol). The type of A can be:
    - *          Stype = NC; Dtype = SLU_Z; Mtype = GE.
    - *
    - * L        (output) SuperMatrix*
    - *          The factor L from the factorization Pr*A=L*U; use compressed row 
    - *          subscripts storage for supernodes, i.e., L has type: 
    - *          Stype = SC; Dtype = SLU_Z; Mtype = TRLU.
    - *
    - * U        (output) SuperMatrix*
    - *	    The factor U from the factorization Pr*A*Pc=L*U. Use column-wise
    - *          storage scheme, i.e., U has types: Stype = NC;
    - *          Dtype = SLU_Z; Mtype = TRU.
    - * 
    - */ - -double -zPivotGrowth(int ncols, SuperMatrix *A, int *perm_c, - SuperMatrix *L, SuperMatrix *U) -{ - - NCformat *Astore; - SCformat *Lstore; - NCformat *Ustore; - doublecomplex *Aval, *Lval, *Uval; - int fsupc, nsupr, luptr, nz_in_U; - int i, j, k, oldcol; - int *inv_perm_c; - double rpg, maxaj, maxuj; - extern double dlamch_(char *); - double smlnum; - doublecomplex *luval; - doublecomplex temp_comp; - - /* Get machine constants. */ - smlnum = dlamch_("S"); - rpg = 1. / smlnum; - - Astore = A->Store; - Lstore = L->Store; - Ustore = U->Store; - Aval = Astore->nzval; - Lval = Lstore->nzval; - Uval = Ustore->nzval; - - inv_perm_c = (int *) SUPERLU_MALLOC(A->ncol*sizeof(int)); - for (j = 0; j < A->ncol; ++j) inv_perm_c[perm_c[j]] = j; - - for (k = 0; k <= Lstore->nsuper; ++k) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - luptr = L_NZ_START(fsupc); - luval = &Lval[luptr]; - nz_in_U = 1; - - for (j = fsupc; j < L_FST_SUPC(k+1) && j < ncols; ++j) { - maxaj = 0.; - oldcol = inv_perm_c[j]; - for (i = Astore->colptr[oldcol]; i < Astore->colptr[oldcol+1]; ++i) - maxaj = SUPERLU_MAX( maxaj, z_abs1( &Aval[i]) ); - - maxuj = 0.; - for (i = Ustore->colptr[j]; i < Ustore->colptr[j+1]; i++) - maxuj = SUPERLU_MAX( maxuj, z_abs1( &Uval[i]) ); - - /* Supernode */ - for (i = 0; i < nz_in_U; ++i) - maxuj = SUPERLU_MAX( maxuj, z_abs1( &luval[i]) ); - - ++nz_in_U; - luval += nsupr; - - if ( maxuj == 0. ) - rpg = SUPERLU_MIN( rpg, 1.); - else - rpg = SUPERLU_MIN( rpg, maxaj / maxuj ); - } - - if ( j >= ncols ) break; - } - - SUPERLU_FREE(inv_perm_c); - return (rpg); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpruneL.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpruneL.c deleted file mode 100644 index 8aa708196a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zpruneL.c +++ /dev/null @@ -1,154 +0,0 @@ - -/*! @file zpruneL.c - * \brief Prunes the L-structure - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - *
    - */ - - -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *   Prunes the L-structure of supernodes whose L-structure
    - *   contains the current pivot row "pivrow"
    - * 
    - */ - -void -zpruneL( - const int jcol, /* in */ - const int *perm_r, /* in */ - const int pivrow, /* in */ - const int nseg, /* in */ - const int *segrep, /* in */ - const int *repfnz, /* in */ - int *xprune, /* out */ - GlobalLU_t *Glu /* modified - global LU data structures */ - ) -{ - - doublecomplex utemp; - int jsupno, irep, irep1, kmin, kmax, krow, movnum; - int i, ktemp, minloc, maxloc; - int do_prune; /* logical variable */ - int *xsup, *supno; - int *lsub, *xlsub; - doublecomplex *lusup; - int *xlusup; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - /* - * For each supernode-rep irep in U[*,j] - */ - jsupno = supno[jcol]; - for (i = 0; i < nseg; i++) { - - irep = segrep[i]; - irep1 = irep + 1; - do_prune = FALSE; - - /* Don't prune with a zero U-segment */ - if ( repfnz[irep] == EMPTY ) - continue; - - /* If a snode overlaps with the next panel, then the U-segment - * is fragmented into two parts -- irep and irep1. We should let - * pruning occur at the rep-column in irep1's snode. - */ - if ( supno[irep] == supno[irep1] ) /* Don't prune */ - continue; - - /* - * If it has not been pruned & it has a nonz in row L[pivrow,i] - */ - if ( supno[irep] != jsupno ) { - if ( xprune[irep] >= xlsub[irep1] ) { - kmin = xlsub[irep]; - kmax = xlsub[irep1] - 1; - for (krow = kmin; krow <= kmax; krow++) - if ( lsub[krow] == pivrow ) { - do_prune = TRUE; - break; - } - } - - if ( do_prune ) { - - /* Do a quicksort-type partition - * movnum=TRUE means that the num values have to be exchanged. - */ - movnum = FALSE; - if ( irep == xsup[supno[irep]] ) /* Snode of size 1 */ - movnum = TRUE; - - while ( kmin <= kmax ) { - - if ( perm_r[lsub[kmax]] == EMPTY ) - kmax--; - else if ( perm_r[lsub[kmin]] != EMPTY ) - kmin++; - else { /* kmin below pivrow (not yet pivoted), and kmax - * above pivrow: interchange the two subscripts - */ - ktemp = lsub[kmin]; - lsub[kmin] = lsub[kmax]; - lsub[kmax] = ktemp; - - /* If the supernode has only one column, then we - * only keep one set of subscripts. For any subscript - * interchange performed, similar interchange must be - * done on the numerical values. - */ - if ( movnum ) { - minloc = xlusup[irep] + (kmin - xlsub[irep]); - maxloc = xlusup[irep] + (kmax - xlsub[irep]); - utemp = lusup[minloc]; - lusup[minloc] = lusup[maxloc]; - lusup[maxloc] = utemp; - } - - kmin++; - kmax--; - - } - - } /* while */ - - xprune[irep] = kmin; /* Pruning */ - -#ifdef CHK_PRUNE - printf(" After zpruneL(),using col %d: xprune[%d] = %d\n", - jcol, irep, kmin); -#endif - } /* if do_prune */ - - } /* if */ - - } /* for each U-segment... */ -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zreadhb.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zreadhb.c deleted file mode 100644 index 31282285ed..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zreadhb.c +++ /dev/null @@ -1,267 +0,0 @@ - -/*! @file zreadhb.c - * \brief Read a matrix stored in Harwell-Boeing format - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Purpose
    - * =======
    - * 
    - * Read a DOUBLE COMPLEX PRECISION matrix stored in Harwell-Boeing format 
    - * as described below.
    - * 
    - * Line 1 (A72,A8) 
    - *  	Col. 1 - 72   Title (TITLE) 
    - *	Col. 73 - 80  Key (KEY) 
    - * 
    - * Line 2 (5I14) 
    - * 	Col. 1 - 14   Total number of lines excluding header (TOTCRD) 
    - * 	Col. 15 - 28  Number of lines for pointers (PTRCRD) 
    - * 	Col. 29 - 42  Number of lines for row (or variable) indices (INDCRD) 
    - * 	Col. 43 - 56  Number of lines for numerical values (VALCRD) 
    - *	Col. 57 - 70  Number of lines for right-hand sides (RHSCRD) 
    - *                    (including starting guesses and solution vectors 
    - *		       if present) 
    - *           	      (zero indicates no right-hand side data is present) 
    - *
    - * Line 3 (A3, 11X, 4I14) 
    - *   	Col. 1 - 3    Matrix type (see below) (MXTYPE) 
    - * 	Col. 15 - 28  Number of rows (or variables) (NROW) 
    - * 	Col. 29 - 42  Number of columns (or elements) (NCOL) 
    - *	Col. 43 - 56  Number of row (or variable) indices (NNZERO) 
    - *	              (equal to number of entries for assembled matrices) 
    - * 	Col. 57 - 70  Number of elemental matrix entries (NELTVL) 
    - *	              (zero in the case of assembled matrices) 
    - * Line 4 (2A16, 2A20) 
    - * 	Col. 1 - 16   Format for pointers (PTRFMT) 
    - *	Col. 17 - 32  Format for row (or variable) indices (INDFMT) 
    - *	Col. 33 - 52  Format for numerical values of coefficient matrix (VALFMT) 
    - * 	Col. 53 - 72 Format for numerical values of right-hand sides (RHSFMT) 
    - *
    - * Line 5 (A3, 11X, 2I14) Only present if there are right-hand sides present 
    - *    	Col. 1 	      Right-hand side type: 
    - *	         	  F for full storage or M for same format as matrix 
    - *    	Col. 2        G if a starting vector(s) (Guess) is supplied. (RHSTYP) 
    - *    	Col. 3        X if an exact solution vector(s) is supplied. 
    - *	Col. 15 - 28  Number of right-hand sides (NRHS) 
    - *	Col. 29 - 42  Number of row indices (NRHSIX) 
    - *          	      (ignored in case of unassembled matrices) 
    - *
    - * The three character type field on line 3 describes the matrix type. 
    - * The following table lists the permitted values for each of the three 
    - * characters. As an example of the type field, RSA denotes that the matrix 
    - * is real, symmetric, and assembled. 
    - *
    - * First Character: 
    - *	R Real matrix 
    - *	C Complex matrix 
    - *	P Pattern only (no numerical values supplied) 
    - *
    - * Second Character: 
    - *	S Symmetric 
    - *	U Unsymmetric 
    - *	H Hermitian 
    - *	Z Skew symmetric 
    - *	R Rectangular 
    - *
    - * Third Character: 
    - *	A Assembled 
    - *	E Elemental matrices (unassembled) 
    - *
    - * 
    - */ -#include -#include -#include "slu_zdefs.h" - - -/*! \brief Eat up the rest of the current line */ -int zDumpLine(FILE *fp) -{ - register int c; - while ((c = fgetc(fp)) != '\n') ; - return 0; -} - -int zParseIntFormat(char *buf, int *num, int *size) -{ - char *tmp; - - tmp = buf; - while (*tmp++ != '(') ; - sscanf(tmp, "%d", num); - while (*tmp != 'I' && *tmp != 'i') ++tmp; - ++tmp; - sscanf(tmp, "%d", size); - return 0; -} - -int zParseFloatFormat(char *buf, int *num, int *size) -{ - char *tmp, *period; - - tmp = buf; - while (*tmp++ != '(') ; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - while (*tmp != 'E' && *tmp != 'e' && *tmp != 'D' && *tmp != 'd' - && *tmp != 'F' && *tmp != 'f') { - /* May find kP before nE/nD/nF, like (1P6F13.6). In this case the - num picked up refers to P, which should be skipped. */ - if (*tmp=='p' || *tmp=='P') { - ++tmp; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - } else { - ++tmp; - } - } - ++tmp; - period = tmp; - while (*period != '.' && *period != ')') ++period ; - *period = '\0'; - *size = atoi(tmp); /*sscanf(tmp, "%2d", size);*/ - - return 0; -} - -static int ReadVector(FILE *fp, int n, int *where, int perline, int persize) -{ - register int i, j, item; - char tmp, buf[100]; - - i = 0; - while (i < n) { - fgets(buf, 100, fp); /* read a line at a time */ - for (j=0; j - * -- SuperLU routine (version 4.0) -- - * Lawrence Berkeley National Laboratory. - * June 30, 2009 - *
    - * - * Purpose - * ======= - * - * Read a DOUBLE COMPLEX PRECISION matrix stored in Rutherford-Boeing format - * as described below. - * - * Line 1 (A72, A8) - * Col. 1 - 72 Title (TITLE) - * Col. 73 - 80 Matrix name / identifier (MTRXID) - * - * Line 2 (I14, 3(1X, I13)) - * Col. 1 - 14 Total number of lines excluding header (TOTCRD) - * Col. 16 - 28 Number of lines for pointers (PTRCRD) - * Col. 30 - 42 Number of lines for row (or variable) indices (INDCRD) - * Col. 44 - 56 Number of lines for numerical values (VALCRD) - * - * Line 3 (A3, 11X, 4(1X, I13)) - * Col. 1 - 3 Matrix type (see below) (MXTYPE) - * Col. 15 - 28 Compressed Column: Number of rows (NROW) - * Elemental: Largest integer used to index variable (MVAR) - * Col. 30 - 42 Compressed Column: Number of columns (NCOL) - * Elemental: Number of element matrices (NELT) - * Col. 44 - 56 Compressed Column: Number of entries (NNZERO) - * Elemental: Number of variable indeces (NVARIX) - * Col. 58 - 70 Compressed Column: Unused, explicitly zero - * Elemental: Number of elemental matrix entries (NELTVL) - * - * Line 4 (2A16, A20) - * Col. 1 - 16 Fortran format for pointers (PTRFMT) - * Col. 17 - 32 Fortran format for row (or variable) indices (INDFMT) - * Col. 33 - 52 Fortran format for numerical values of coefficient matrix - * (VALFMT) - * (blank in the case of matrix patterns) - * - * The three character type field on line 3 describes the matrix type. - * The following table lists the permitted values for each of the three - * characters. As an example of the type field, RSA denotes that the matrix - * is real, symmetric, and assembled. - * - * First Character: - * R Real matrix - * C Complex matrix - * I integer matrix - * P Pattern only (no numerical values supplied) - * Q Pattern only (numerical values supplied in associated auxiliary value - * file) - * - * Second Character: - * S Symmetric - * U Unsymmetric - * H Hermitian - * Z Skew symmetric - * R Rectangular - * - * Third Character: - * A Compressed column form - * E Elemental form - * - *
    - */ - -#include "slu_zdefs.h" - - -/*! \brief Eat up the rest of the current line */ -static int zDumpLine(FILE *fp) -{ - register int c; - while ((c = fgetc(fp)) != '\n') ; - return 0; -} - -static int zParseIntFormat(char *buf, int *num, int *size) -{ - char *tmp; - - tmp = buf; - while (*tmp++ != '(') ; - sscanf(tmp, "%d", num); - while (*tmp != 'I' && *tmp != 'i') ++tmp; - ++tmp; - sscanf(tmp, "%d", size); - return 0; -} - -static int zParseFloatFormat(char *buf, int *num, int *size) -{ - char *tmp, *period; - - tmp = buf; - while (*tmp++ != '(') ; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - while (*tmp != 'E' && *tmp != 'e' && *tmp != 'D' && *tmp != 'd' - && *tmp != 'F' && *tmp != 'f') { - /* May find kP before nE/nD/nF, like (1P6F13.6). In this case the - num picked up refers to P, which should be skipped. */ - if (*tmp=='p' || *tmp=='P') { - ++tmp; - *num = atoi(tmp); /*sscanf(tmp, "%d", num);*/ - } else { - ++tmp; - } - } - ++tmp; - period = tmp; - while (*period != '.' && *period != ')') ++period ; - *period = '\0'; - *size = atoi(tmp); /*sscanf(tmp, "%2d", size);*/ - - return 0; -} - -static int ReadVector(FILE *fp, int n, int *where, int perline, int persize) -{ - register int i, j, item; - char tmp, buf[100]; - - i = 0; - while (i < n) { - fgets(buf, 100, fp); /* read a line at a time */ - for (j=0; j - * -- SuperLU routine (version 4.0) -- - * Lawrence Berkeley National Laboratory. - * June 30, 2009 - * - */ - -#include "slu_zdefs.h" - - -void -zreadtriple(int *m, int *n, int *nonz, - doublecomplex **nzval, int **rowind, int **colptr) -{ -/* - * Output parameters - * ================= - * (a,asub,xa): asub[*] contains the row subscripts of nonzeros - * in columns of matrix A; a[*] the numerical values; - * row i of A is given by a[k],k=xa[i],...,xa[i+1]-1. - * - */ - int j, k, jsize, nnz, nz; - doublecomplex *a, *val; - int *asub, *xa, *row, *col; - int zero_base = 0; - - /* Matrix format: - * First line: #rows, #cols, #non-zero - * Triplet in the rest of lines: - * row, col, value - */ - - scanf("%d%d", n, nonz); - *m = *n; - printf("m %d, n %d, nonz %d\n", *m, *n, *nonz); - zallocateA(*n, *nonz, nzval, rowind, colptr); /* Allocate storage */ - a = *nzval; - asub = *rowind; - xa = *colptr; - - val = (doublecomplex *) SUPERLU_MALLOC(*nonz * sizeof(doublecomplex)); - row = (int *) SUPERLU_MALLOC(*nonz * sizeof(int)); - col = (int *) SUPERLU_MALLOC(*nonz * sizeof(int)); - - for (j = 0; j < *n; ++j) xa[j] = 0; - - /* Read into the triplet array from a file */ - for (nnz = 0, nz = 0; nnz < *nonz; ++nnz) { - scanf("%d%d%lf%lf\n", &row[nz], &col[nz], &val[nz].r, &val[nz].i); - - if ( nnz == 0 ) { /* first nonzero */ - if ( row[0] == 0 || col[0] == 0 ) { - zero_base = 1; - printf("triplet file: row/col indices are zero-based.\n"); - } else - printf("triplet file: row/col indices are one-based.\n"); - } - - if ( !zero_base ) { - /* Change to 0-based indexing. */ - --row[nz]; - --col[nz]; - } - - if (row[nz] < 0 || row[nz] >= *m || col[nz] < 0 || col[nz] >= *n - /*|| val[nz] == 0.*/) { - fprintf(stderr, "nz %d, (%d, %d) = (%e,%e) out of bound, removed\n", - nz, row[nz], col[nz], val[nz].r, val[nz].i); - exit(-1); - } else { - ++xa[col[nz]]; - ++nz; - } - } - - *nonz = nz; - - /* Initialize the array of column pointers */ - k = 0; - jsize = xa[0]; - xa[0] = 0; - for (j = 1; j < *n; ++j) { - k += jsize; - jsize = xa[j]; - xa[j] = k; - } - - /* Copy the triplets into the column oriented storage */ - for (nz = 0; nz < *nonz; ++nz) { - j = col[nz]; - k = xa[j]; - asub[k] = row[nz]; - a[k] = val[nz]; - ++xa[j]; - } - - /* Reset the column pointers to the beginning of each column */ - for (j = *n; j > 0; --j) - xa[j] = xa[j-1]; - xa[0] = 0; - - SUPERLU_FREE(val); - SUPERLU_FREE(row); - SUPERLU_FREE(col); - -#ifdef CHK_INPUT - { - int i; - for (i = 0; i < *n; i++) { - printf("Col %d, xa %d\n", i, xa[i]); - for (k = xa[i]; k < xa[i+1]; k++) - printf("%d\t%16.10f\n", asub[k], a[k]); - } - } -#endif - -} - - -void zreadrhs(int m, doublecomplex *b) -{ - FILE *fp, *fopen(); - int i; - /*int j;*/ - - if ( !(fp = fopen("b.dat", "r")) ) { - fprintf(stderr, "dreadrhs: file does not exist\n"); - exit(-1); - } - for (i = 0; i < m; ++i) - fscanf(fp, "%lf%lf\n", &b[i].r, &b[i].i); - - /* readpair_(j, &b[i]);*/ - fclose(fp); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsnode_bmod.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsnode_bmod.c deleted file mode 100644 index f760517e0f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsnode_bmod.c +++ /dev/null @@ -1,120 +0,0 @@ - -/*! @file zsnode_bmod.c - * \brief Performs numeric block updates within the relaxed snode. - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_zdefs.h" - - -/*! \brief Performs numeric block updates within the relaxed snode. - */ -int -zsnode_bmod ( - const int jcol, /* in */ - const int jsupno, /* in */ - const int fsupc, /* in */ - doublecomplex *dense, /* in */ - doublecomplex *tempv, /* working array */ - GlobalLU_t *Glu, /* modified */ - SuperLUStat_t *stat /* output */ - ) -{ -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - int incx = 1, incy = 1; - doublecomplex alpha = {-1.0, 0.0}, beta = {1.0, 0.0}; -#endif - - doublecomplex comp_zero = {0.0, 0.0}; - int luptr, nsupc, nsupr, nrow; - int isub, irow, i, iptr; - register int ufirst, nextlu; - int *lsub, *xlsub; - doublecomplex *lusup; - int *xlusup; - flops_t *ops = stat->ops; - - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - - nextlu = xlusup[jcol]; - - /* - * Process the supernodal portion of L\U[*,j] - */ - for (isub = xlsub[fsupc]; isub < xlsub[fsupc+1]; isub++) { - irow = lsub[isub]; - lusup[nextlu] = dense[irow]; - dense[irow] = comp_zero; - ++nextlu; - } - - xlusup[jcol + 1] = nextlu; /* Initialize xlusup for next column */ - - if ( fsupc < jcol ) { - - luptr = xlusup[fsupc]; - nsupr = xlsub[fsupc+1] - xlsub[fsupc]; - nsupc = jcol - fsupc; /* Excluding jcol */ - ufirst = xlusup[jcol]; /* Points to the beginning of column - jcol in supernode L\U(jsupno). */ - nrow = nsupr - nsupc; - - ops[TRSV] += 4 * nsupc * (nsupc - 1); - ops[GEMV] += 8 * nrow * nsupc; - -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV( ftcs1, ftcs2, ftcs3, &nsupc, &lusup[luptr], &nsupr, - &lusup[ufirst], &incx ); - CGEMV( ftcs2, &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#else - ztrsv_( "L", "N", "U", &nsupc, &lusup[luptr], &nsupr, - &lusup[ufirst], &incx ); - zgemv_( "N", &nrow, &nsupc, &alpha, &lusup[luptr+nsupc], &nsupr, - &lusup[ufirst], &incx, &beta, &lusup[ufirst+nsupc], &incy ); -#endif -#else - zlsolve ( nsupr, nsupc, &lusup[luptr], &lusup[ufirst] ); - zmatvec ( nsupr, nrow, nsupc, &lusup[luptr+nsupc], - &lusup[ufirst], &tempv[0] ); - - /* Scatter tempv[*] into lusup[*] */ - iptr = ufirst + nsupc; - for (i = 0; i < nrow; i++) { - z_sub(&lusup[iptr], &lusup[iptr], &tempv[i]); - ++iptr; - tempv[i] = comp_zero; - } -#endif - - } - - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsnode_dfs.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsnode_dfs.c deleted file mode 100644 index 247b1a4333..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsnode_dfs.c +++ /dev/null @@ -1,112 +0,0 @@ - -/*! @file zsnode_dfs.c - * \brief Determines the union of row structures of columns within the relaxed node - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose
    - * =======
    - *    zsnode_dfs() - Determine the union of the row structures of those 
    - *    columns within the relaxed snode.
    - *    Note: The relaxed snodes are leaves of the supernodal etree, therefore, 
    - *    the portion outside the rectangular supernode must be zero.
    - *
    - * Return value
    - * ============
    - *     0   success;
    - *    >0   number of bytes allocated when run out of memory.
    - * 
    - */ - -int -zsnode_dfs ( - const int jcol, /* in - start of the supernode */ - const int kcol, /* in - end of the supernode */ - const int *asub, /* in */ - const int *xa_begin, /* in */ - const int *xa_end, /* in */ - int *xprune, /* out */ - int *marker, /* modified */ - GlobalLU_t *Glu /* modified */ - ) -{ - - register int i, k, ifrom, ito, nextl, new_next; - int nsuper, krow, kmark, mem_error; - int *xsup, *supno; - int *lsub, *xlsub; - int nzlmax; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - nzlmax = Glu->nzlmax; - - nsuper = ++supno[jcol]; /* Next available supernode number */ - nextl = xlsub[jcol]; - - for (i = jcol; i <= kcol; i++) { - /* For each nonzero in A[*,i] */ - for (k = xa_begin[i]; k < xa_end[i]; k++) { - krow = asub[k]; - kmark = marker[krow]; - if ( kmark != kcol ) { /* First time visit krow */ - marker[krow] = kcol; - lsub[nextl++] = krow; - if ( nextl >= nzlmax ) { - if ( mem_error = zLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - } - } - supno[i] = nsuper; - } - - /* Supernode > 1, then make a copy of the subscripts for pruning */ - if ( jcol < kcol ) { - new_next = nextl + (nextl - xlsub[jcol]); - while ( new_next > nzlmax ) { - if ( mem_error = zLUMemXpand(jcol, nextl, LSUB, &nzlmax, Glu) ) - return (mem_error); - lsub = Glu->lsub; - } - ito = nextl; - for (ifrom = xlsub[jcol]; ifrom < nextl; ) - lsub[ito++] = lsub[ifrom++]; - for (i = jcol+1; i <= kcol; i++) xlsub[i] = nextl; - nextl = ito; - } - - xsup[nsuper+1] = kcol + 1; - supno[kcol+1] = nsuper; - xprune[kcol] = nextl; - xlsub[kcol+1] = nextl; - - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsp_blas2.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsp_blas2.c deleted file mode 100644 index 58500fd592..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsp_blas2.c +++ /dev/null @@ -1,573 +0,0 @@ - -/*! @file zsp_blas2.c - * \brief Sparse BLAS 2, using some dense BLAS 2 operations - * - *
    - * -- SuperLU routine (version 3.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * October 15, 2003
    - * 
    - */ -/* - * File name: zsp_blas2.c - * Purpose: Sparse BLAS 2, using some dense BLAS 2 operations. - */ - -#include "slu_zdefs.h" - -/* - * Function prototypes - */ -void zusolve(int, int, doublecomplex*, doublecomplex*); -void zlsolve(int, int, doublecomplex*, doublecomplex*); -void zmatvec(int, int, int, doublecomplex*, doublecomplex*, doublecomplex*); - -/*! \brief Solves one of the systems of equations A*x = b, or A'*x = b - * - *
    - *   Purpose
    - *   =======
    - *
    - *   sp_ztrsv() solves one of the systems of equations   
    - *       A*x = b,   or   A'*x = b,
    - *   where b and x are n element vectors and A is a sparse unit , or   
    - *   non-unit, upper or lower triangular matrix.   
    - *   No test for singularity or near-singularity is included in this   
    - *   routine. Such tests must be performed before calling this routine.   
    - *
    - *   Parameters   
    - *   ==========   
    - *
    - *   uplo   - (input) char*
    - *            On entry, uplo specifies whether the matrix is an upper or   
    - *             lower triangular matrix as follows:   
    - *                uplo = 'U' or 'u'   A is an upper triangular matrix.   
    - *                uplo = 'L' or 'l'   A is a lower triangular matrix.   
    - *
    - *   trans  - (input) char*
    - *             On entry, trans specifies the equations to be solved as   
    - *             follows:   
    - *                trans = 'N' or 'n'   A*x = b.   
    - *                trans = 'T' or 't'   A'*x = b.
    - *                trans = 'C' or 'c'   A^H*x = b.   
    - *
    - *   diag   - (input) char*
    - *             On entry, diag specifies whether or not A is unit   
    - *             triangular as follows:   
    - *                diag = 'U' or 'u'   A is assumed to be unit triangular.   
    - *                diag = 'N' or 'n'   A is not assumed to be unit   
    - *                                    triangular.   
    - *	     
    - *   L       - (input) SuperMatrix*
    - *	       The factor L from the factorization Pr*A*Pc=L*U. Use
    - *             compressed row subscripts storage for supernodes,
    - *             i.e., L has types: Stype = SC, Dtype = SLU_Z, Mtype = TRLU.
    - *
    - *   U       - (input) SuperMatrix*
    - *	        The factor U from the factorization Pr*A*Pc=L*U.
    - *	        U has types: Stype = NC, Dtype = SLU_Z, Mtype = TRU.
    - *    
    - *   x       - (input/output) doublecomplex*
    - *             Before entry, the incremented array X must contain the n   
    - *             element right-hand side vector b. On exit, X is overwritten 
    - *             with the solution vector x.
    - *
    - *   info    - (output) int*
    - *             If *info = -i, the i-th argument had an illegal value.
    - * 
    - */ -int -sp_ztrsv(char *uplo, char *trans, char *diag, SuperMatrix *L, - SuperMatrix *U, doublecomplex *x, SuperLUStat_t *stat, int *info) -{ -#ifdef _CRAY - _fcd ftcs1 = _cptofcd("L", strlen("L")), - ftcs2 = _cptofcd("N", strlen("N")), - ftcs3 = _cptofcd("U", strlen("U")); -#endif - SCformat *Lstore; - NCformat *Ustore; - doublecomplex *Lval, *Uval; - int incx = 1, incy = 1; - doublecomplex temp; - doublecomplex alpha = {1.0, 0.0}, beta = {1.0, 0.0}; - doublecomplex comp_zero = {0.0, 0.0}; - int nrow; - int fsupc, nsupr, nsupc, luptr, istart, irow; - int i, k, iptr, jcol; - doublecomplex *work; - flops_t solve_ops; - - /* Test the input parameters */ - *info = 0; - if ( !lsame_(uplo,"L") && !lsame_(uplo, "U") ) *info = -1; - else if ( !lsame_(trans, "N") && !lsame_(trans, "T") && - !lsame_(trans, "C")) *info = -2; - else if ( !lsame_(diag, "U") && !lsame_(diag, "N") ) *info = -3; - else if ( L->nrow != L->ncol || L->nrow < 0 ) *info = -4; - else if ( U->nrow != U->ncol || U->nrow < 0 ) *info = -5; - if ( *info ) { - i = -(*info); - xerbla_("sp_ztrsv", &i); - return 0; - } - - Lstore = L->Store; - Lval = Lstore->nzval; - Ustore = U->Store; - Uval = Ustore->nzval; - solve_ops = 0; - - if ( !(work = doublecomplexCalloc(L->nrow)) ) - ABORT("Malloc fails for work in sp_ztrsv()."); - - if ( lsame_(trans, "N") ) { /* Form x := inv(A)*x. */ - - if ( lsame_(uplo, "L") ) { - /* Form x := inv(L)*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - nrow = nsupr - nsupc; - - /* 1 z_div costs 10 flops */ - solve_ops += 4 * nsupc * (nsupc - 1) + 10 * nsupc; - solve_ops += 8 * nrow * nsupc; - - if ( nsupc == 1 ) { - for (iptr=istart+1; iptr < L_SUB_START(fsupc+1); ++iptr) { - irow = L_SUB(iptr); - ++luptr; - zz_mult(&comp_zero, &x[fsupc], &Lval[luptr]); - z_sub(&x[irow], &x[irow], &comp_zero); - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); - - CGEMV(ftcs2, &nrow, &nsupc, &alpha, &Lval[luptr+nsupc], - &nsupr, &x[fsupc], &incx, &beta, &work[0], &incy); -#else - ztrsv_("L", "N", "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); - - zgemv_("N", &nrow, &nsupc, &alpha, &Lval[luptr+nsupc], - &nsupr, &x[fsupc], &incx, &beta, &work[0], &incy); -#endif -#else - zlsolve ( nsupr, nsupc, &Lval[luptr], &x[fsupc]); - - zmatvec ( nsupr, nsupr-nsupc, nsupc, &Lval[luptr+nsupc], - &x[fsupc], &work[0] ); -#endif - - iptr = istart + nsupc; - for (i = 0; i < nrow; ++i, ++iptr) { - irow = L_SUB(iptr); - z_sub(&x[irow], &x[irow], &work[i]); /* Scatter */ - work[i] = comp_zero; - - } - } - } /* for k ... */ - - } else { - /* Form x := inv(U)*x */ - - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; k--) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - /* 1 z_div costs 10 flops */ - solve_ops += 4 * nsupc * (nsupc + 1) + 10 * nsupc; - - if ( nsupc == 1 ) { - z_div(&x[fsupc], &x[fsupc], &Lval[luptr]); - for (i = U_NZ_START(fsupc); i < U_NZ_START(fsupc+1); ++i) { - irow = U_SUB(i); - zz_mult(&comp_zero, &x[fsupc], &Uval[i]); - z_sub(&x[irow], &x[irow], &comp_zero); - } - } else { -#ifdef USE_VENDOR_BLAS -#ifdef _CRAY - CTRSV(ftcs3, ftcs2, ftcs2, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ztrsv_("U", "N", "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif -#else - zusolve ( nsupr, nsupc, &Lval[luptr], &x[fsupc] ); -#endif - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 8*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); - i++) { - irow = U_SUB(i); - zz_mult(&comp_zero, &x[jcol], &Uval[i]); - z_sub(&x[irow], &x[irow], &comp_zero); - } - } - } - } /* for k ... */ - - } - } else if ( lsame_(trans, "T") ) { /* Form x := inv(A')*x */ - - if ( lsame_(uplo, "L") ) { - /* Form x := inv(L')*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; --k) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += 8 * (nsupr - nsupc) * nsupc; - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - iptr = istart + nsupc; - for (i = L_NZ_START(jcol) + nsupc; - i < L_NZ_START(jcol+1); i++) { - irow = L_SUB(iptr); - zz_mult(&comp_zero, &x[irow], &Lval[i]); - z_sub(&x[jcol], &x[jcol], &comp_zero); - iptr++; - } - } - - if ( nsupc > 1 ) { - solve_ops += 4 * nsupc * (nsupc - 1); -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd("T", strlen("T")); - ftcs3 = _cptofcd("U", strlen("U")); - CTRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ztrsv_("L", "T", "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } - } else { - /* Form x := inv(U')*x */ - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 8*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++) { - irow = U_SUB(i); - zz_mult(&comp_zero, &x[irow], &Uval[i]); - z_sub(&x[jcol], &x[jcol], &comp_zero); - } - } - - /* 1 z_div costs 10 flops */ - solve_ops += 4 * nsupc * (nsupc + 1) + 10 * nsupc; - - if ( nsupc == 1 ) { - z_div(&x[fsupc], &x[fsupc], &Lval[luptr]); - } else { -#ifdef _CRAY - ftcs1 = _cptofcd("U", strlen("U")); - ftcs2 = _cptofcd("T", strlen("T")); - ftcs3 = _cptofcd("N", strlen("N")); - CTRSV( ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ztrsv_("U", "T", "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } /* for k ... */ - } - } else { /* Form x := conj(inv(A'))*x */ - - if ( lsame_(uplo, "L") ) { - /* Form x := conj(inv(L'))*x */ - if ( L->nrow == 0 ) return 0; /* Quick return */ - - for (k = Lstore->nsuper; k >= 0; --k) { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - solve_ops += 8 * (nsupr - nsupc) * nsupc; - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - iptr = istart + nsupc; - for (i = L_NZ_START(jcol) + nsupc; - i < L_NZ_START(jcol+1); i++) { - irow = L_SUB(iptr); - zz_conj(&temp, &Lval[i]); - zz_mult(&comp_zero, &x[irow], &temp); - z_sub(&x[jcol], &x[jcol], &comp_zero); - iptr++; - } - } - - if ( nsupc > 1 ) { - solve_ops += 4 * nsupc * (nsupc - 1); -#ifdef _CRAY - ftcs1 = _cptofcd("L", strlen("L")); - ftcs2 = _cptofcd(trans, strlen("T")); - ftcs3 = _cptofcd("U", strlen("U")); - ZTRSV(ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ztrsv_("L", trans, "U", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } - } else { - /* Form x := conj(inv(U'))*x */ - if ( U->nrow == 0 ) return 0; /* Quick return */ - - for (k = 0; k <= Lstore->nsuper; k++) { - fsupc = L_FST_SUPC(k); - nsupr = L_SUB_START(fsupc+1) - L_SUB_START(fsupc); - nsupc = L_FST_SUPC(k+1) - fsupc; - luptr = L_NZ_START(fsupc); - - for (jcol = fsupc; jcol < L_FST_SUPC(k+1); jcol++) { - solve_ops += 8*(U_NZ_START(jcol+1) - U_NZ_START(jcol)); - for (i = U_NZ_START(jcol); i < U_NZ_START(jcol+1); i++) { - irow = U_SUB(i); - zz_conj(&temp, &Uval[i]); - zz_mult(&comp_zero, &x[irow], &temp); - z_sub(&x[jcol], &x[jcol], &comp_zero); - } - } - - /* 1 z_div costs 10 flops */ - solve_ops += 4 * nsupc * (nsupc + 1) + 10 * nsupc; - - if ( nsupc == 1 ) { - zz_conj(&temp, &Lval[luptr]); - z_div(&x[fsupc], &x[fsupc], &temp); - } else { -#ifdef _CRAY - ftcs1 = _cptofcd("U", strlen("U")); - ftcs2 = _cptofcd(trans, strlen("T")); - ftcs3 = _cptofcd("N", strlen("N")); - ZTRSV( ftcs1, ftcs2, ftcs3, &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#else - ztrsv_("U", trans, "N", &nsupc, &Lval[luptr], &nsupr, - &x[fsupc], &incx); -#endif - } - } /* for k ... */ - } - } - - stat->ops[SOLVE] += solve_ops; - SUPERLU_FREE(work); - return 0; -} - - - -/*! \brief Performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y - * - *
      
    - *   Purpose   
    - *   =======   
    - *
    - *   sp_zgemv()  performs one of the matrix-vector operations   
    - *      y := alpha*A*x + beta*y,   or   y := alpha*A'*x + beta*y,   
    - *   where alpha and beta are scalars, x and y are vectors and A is a
    - *   sparse A->nrow by A->ncol matrix.   
    - *
    - *   Parameters   
    - *   ==========   
    - *
    - *   TRANS  - (input) char*
    - *            On entry, TRANS specifies the operation to be performed as   
    - *            follows:   
    - *               TRANS = 'N' or 'n'   y := alpha*A*x + beta*y.   
    - *               TRANS = 'T' or 't'   y := alpha*A'*x + beta*y.   
    - *               TRANS = 'C' or 'c'   y := alpha*A'*x + beta*y.   
    - *
    - *   ALPHA  - (input) doublecomplex
    - *            On entry, ALPHA specifies the scalar alpha.   
    - *
    - *   A      - (input) SuperMatrix*
    - *            Before entry, the leading m by n part of the array A must   
    - *            contain the matrix of coefficients.   
    - *
    - *   X      - (input) doublecomplex*, array of DIMENSION at least   
    - *            ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'   
    - *           and at least   
    - *            ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.   
    - *            Before entry, the incremented array X must contain the   
    - *            vector x.   
    - * 
    - *   INCX   - (input) int
    - *            On entry, INCX specifies the increment for the elements of   
    - *            X. INCX must not be zero.   
    - *
    - *   BETA   - (input) doublecomplex
    - *            On entry, BETA specifies the scalar beta. When BETA is   
    - *            supplied as zero then Y need not be set on input.   
    - *
    - *   Y      - (output) doublecomplex*,  array of DIMENSION at least   
    - *            ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'   
    - *            and at least   
    - *            ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.   
    - *            Before entry with BETA non-zero, the incremented array Y   
    - *            must contain the vector y. On exit, Y is overwritten by the 
    - *            updated vector y.
    - *	      
    - *   INCY   - (input) int
    - *            On entry, INCY specifies the increment for the elements of   
    - *            Y. INCY must not be zero.   
    - *
    - *    ==== Sparse Level 2 Blas routine.   
    - * 
    -*/ -int -sp_zgemv(char *trans, doublecomplex alpha, SuperMatrix *A, doublecomplex *x, - int incx, doublecomplex beta, doublecomplex *y, int incy) -{ - - /* Local variables */ - NCformat *Astore; - doublecomplex *Aval; - int info; - doublecomplex temp, temp1; - int lenx, leny, i, j, irow; - int iy, jx, jy, kx, ky; - int notran; - doublecomplex comp_zero = {0.0, 0.0}; - doublecomplex comp_one = {1.0, 0.0}; - - notran = lsame_(trans, "N"); - Astore = A->Store; - Aval = Astore->nzval; - - /* Test the input parameters */ - info = 0; - if ( !notran && !lsame_(trans, "T") && !lsame_(trans, "C")) info = 1; - else if ( A->nrow < 0 || A->ncol < 0 ) info = 3; - else if (incx == 0) info = 5; - else if (incy == 0) info = 8; - if (info != 0) { - xerbla_("sp_zgemv ", &info); - return 0; - } - - /* Quick return if possible. */ - if (A->nrow == 0 || A->ncol == 0 || - z_eq(&alpha, &comp_zero) && - z_eq(&beta, &comp_one)) - return 0; - - - /* Set LENX and LENY, the lengths of the vectors x and y, and set - up the start points in X and Y. */ - if (lsame_(trans, "N")) { - lenx = A->ncol; - leny = A->nrow; - } else { - lenx = A->nrow; - leny = A->ncol; - } - if (incx > 0) kx = 0; - else kx = - (lenx - 1) * incx; - if (incy > 0) ky = 0; - else ky = - (leny - 1) * incy; - - /* Start the operations. In this version the elements of A are - accessed sequentially with one pass through A. */ - /* First form y := beta*y. */ - if ( !z_eq(&beta, &comp_one) ) { - if (incy == 1) { - if ( z_eq(&beta, &comp_zero) ) - for (i = 0; i < leny; ++i) y[i] = comp_zero; - else - for (i = 0; i < leny; ++i) - zz_mult(&y[i], &beta, &y[i]); - } else { - iy = ky; - if ( z_eq(&beta, &comp_zero) ) - for (i = 0; i < leny; ++i) { - y[iy] = comp_zero; - iy += incy; - } - else - for (i = 0; i < leny; ++i) { - zz_mult(&y[iy], &beta, &y[iy]); - iy += incy; - } - } - } - - if ( z_eq(&alpha, &comp_zero) ) return 0; - - if ( notran ) { - /* Form y := alpha*A*x + y. */ - jx = kx; - if (incy == 1) { - for (j = 0; j < A->ncol; ++j) { - if ( !z_eq(&x[jx], &comp_zero) ) { - zz_mult(&temp, &alpha, &x[jx]); - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - zz_mult(&temp1, &temp, &Aval[i]); - z_add(&y[irow], &y[irow], &temp1); - } - } - jx += incx; - } - } else { - ABORT("Not implemented."); - } - } else { - /* Form y := alpha*A'*x + y. */ - jy = ky; - if (incx == 1) { - for (j = 0; j < A->ncol; ++j) { - temp = comp_zero; - for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; ++i) { - irow = Astore->rowind[i]; - zz_mult(&temp1, &Aval[i], &x[irow]); - z_add(&temp, &temp, &temp1); - } - zz_mult(&temp1, &alpha, &temp); - z_add(&y[jy], &y[jy], &temp1); - jy += incy; - } - } else { - ABORT("Not implemented."); - } - } - return 0; -} /* sp_zgemv */ - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsp_blas3.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsp_blas3.c deleted file mode 100644 index 0e1a5c2f51..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zsp_blas3.c +++ /dev/null @@ -1,127 +0,0 @@ - -/*! @file zsp_blas3.c - * \brief Sparse BLAS3, using some dense BLAS3 operations - * - *
    - * -- SuperLU routine (version 2.0) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * November 15, 1997
    - * 
    - */ -/* - * File name: sp_blas3.c - * Purpose: Sparse BLAS3, using some dense BLAS3 operations. - */ - -#include "slu_zdefs.h" - -/*! \brief - * - *
    - * Purpose   
    - *   =======   
    - * 
    - *   sp_z performs one of the matrix-matrix operations   
    - * 
    - *      C := alpha*op( A )*op( B ) + beta*C,   
    - * 
    - *   where  op( X ) is one of 
    - * 
    - *      op( X ) = X   or   op( X ) = X'   or   op( X ) = conjg( X' ),
    - * 
    - *   alpha and beta are scalars, and A, B and C are matrices, with op( A ) 
    - *   an m by k matrix,  op( B )  a  k by n matrix and  C an m by n matrix. 
    - *   
    - * 
    - *   Parameters   
    - *   ==========   
    - * 
    - *   TRANSA - (input) char*
    - *            On entry, TRANSA specifies the form of op( A ) to be used in 
    - *            the matrix multiplication as follows:   
    - *               TRANSA = 'N' or 'n',  op( A ) = A.   
    - *               TRANSA = 'T' or 't',  op( A ) = A'.   
    - *               TRANSA = 'C' or 'c',  op( A ) = conjg( A' ).   
    - *            Unchanged on exit.   
    - * 
    - *   TRANSB - (input) char*
    - *            On entry, TRANSB specifies the form of op( B ) to be used in 
    - *            the matrix multiplication as follows:   
    - *               TRANSB = 'N' or 'n',  op( B ) = B.   
    - *               TRANSB = 'T' or 't',  op( B ) = B'.   
    - *               TRANSB = 'C' or 'c',  op( B ) = conjg( B' ).   
    - *            Unchanged on exit.   
    - * 
    - *   M      - (input) int   
    - *            On entry,  M  specifies  the number of rows of the matrix 
    - *	     op( A ) and of the matrix C.  M must be at least zero. 
    - *	     Unchanged on exit.   
    - * 
    - *   N      - (input) int
    - *            On entry,  N specifies the number of columns of the matrix 
    - *	     op( B ) and the number of columns of the matrix C. N must be 
    - *	     at least zero.
    - *	     Unchanged on exit.   
    - * 
    - *   K      - (input) int
    - *            On entry, K specifies the number of columns of the matrix 
    - *	     op( A ) and the number of rows of the matrix op( B ). K must 
    - *	     be at least  zero.   
    - *           Unchanged on exit.
    - *      
    - *   ALPHA  - (input) doublecomplex
    - *            On entry, ALPHA specifies the scalar alpha.   
    - * 
    - *   A      - (input) SuperMatrix*
    - *            Matrix A with a sparse format, of dimension (A->nrow, A->ncol).
    - *            Currently, the type of A can be:
    - *                Stype = NC or NCP; Dtype = SLU_Z; Mtype = GE. 
    - *            In the future, more general A can be handled.
    - * 
    - *   B      - DOUBLE COMPLEX PRECISION array of DIMENSION ( LDB, kb ), where kb is 
    - *            n when TRANSB = 'N' or 'n',  and is  k otherwise.   
    - *            Before entry with  TRANSB = 'N' or 'n',  the leading k by n 
    - *            part of the array B must contain the matrix B, otherwise 
    - *            the leading n by k part of the array B must contain the 
    - *            matrix B.   
    - *            Unchanged on exit.   
    - * 
    - *   LDB    - (input) int
    - *            On entry, LDB specifies the first dimension of B as declared 
    - *            in the calling (sub) program. LDB must be at least max( 1, n ).  
    - *            Unchanged on exit.   
    - * 
    - *   BETA   - (input) doublecomplex
    - *            On entry, BETA specifies the scalar beta. When BETA is   
    - *            supplied as zero then C need not be set on input.   
    - *  
    - *   C      - DOUBLE COMPLEX PRECISION array of DIMENSION ( LDC, n ).   
    - *            Before entry, the leading m by n part of the array C must 
    - *            contain the matrix C,  except when beta is zero, in which 
    - *            case C need not be set on entry.   
    - *            On exit, the array C is overwritten by the m by n matrix 
    - *	     ( alpha*op( A )*B + beta*C ).   
    - *  
    - *   LDC    - (input) int
    - *            On entry, LDC specifies the first dimension of C as declared 
    - *            in the calling (sub)program. LDC must be at least max(1,m).   
    - *            Unchanged on exit.   
    - *  
    - *   ==== Sparse Level 3 Blas routine.   
    - * 
    - */ - -int -sp_zgemm(char *transa, char *transb, int m, int n, int k, - doublecomplex alpha, SuperMatrix *A, doublecomplex *b, int ldb, - doublecomplex beta, doublecomplex *c, int ldc) -{ - int incx = 1, incy = 1; - int j; - - for (j = 0; j < n; ++j) { - sp_zgemv(transa, alpha, A, &b[ldb*j], incx, beta, &c[ldc*j], incy); - } - return 0; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zutil.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zutil.c deleted file mode 100644 index f3c06cb5ee..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/SuperLU/SRC/zutil.c +++ /dev/null @@ -1,475 +0,0 @@ - -/*! @file zutil.c - * \brief Matrix utility functions - * - *
    - * -- SuperLU routine (version 3.1) --
    - * Univ. of California Berkeley, Xerox Palo Alto Research Center,
    - * and Lawrence Berkeley National Lab.
    - * August 1, 2008
    - *
    - * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
    - *
    - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
    - * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
    - * 
    - * Permission is hereby granted to use or copy this program for any
    - * purpose, provided the above notices are retained on all copies.
    - * Permission to modify the code and to distribute modified code is
    - * granted, provided the above notices are retained, and a notice that
    - * the code was modified is included with the above copyright notice.
    - * 
    - */ - - -#include -#include "slu_zdefs.h" - -void -zCreate_CompCol_Matrix(SuperMatrix *A, int m, int n, int nnz, - doublecomplex *nzval, int *rowind, int *colptr, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - NCformat *Astore; - - A->Stype = stype; - A->Dtype = dtype; - A->Mtype = mtype; - A->nrow = m; - A->ncol = n; - A->Store = (void *) SUPERLU_MALLOC( sizeof(NCformat) ); - if ( !(A->Store) ) ABORT("SUPERLU_MALLOC fails for A->Store"); - Astore = A->Store; - Astore->nnz = nnz; - Astore->nzval = nzval; - Astore->rowind = rowind; - Astore->colptr = colptr; -} - -void -zCreate_CompRow_Matrix(SuperMatrix *A, int m, int n, int nnz, - doublecomplex *nzval, int *colind, int *rowptr, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - NRformat *Astore; - - A->Stype = stype; - A->Dtype = dtype; - A->Mtype = mtype; - A->nrow = m; - A->ncol = n; - A->Store = (void *) SUPERLU_MALLOC( sizeof(NRformat) ); - if ( !(A->Store) ) ABORT("SUPERLU_MALLOC fails for A->Store"); - Astore = A->Store; - Astore->nnz = nnz; - Astore->nzval = nzval; - Astore->colind = colind; - Astore->rowptr = rowptr; -} - -/*! \brief Copy matrix A into matrix B. */ -void -zCopy_CompCol_Matrix(SuperMatrix *A, SuperMatrix *B) -{ - NCformat *Astore, *Bstore; - int ncol, nnz, i; - - B->Stype = A->Stype; - B->Dtype = A->Dtype; - B->Mtype = A->Mtype; - B->nrow = A->nrow;; - B->ncol = ncol = A->ncol; - Astore = (NCformat *) A->Store; - Bstore = (NCformat *) B->Store; - Bstore->nnz = nnz = Astore->nnz; - for (i = 0; i < nnz; ++i) - ((doublecomplex *)Bstore->nzval)[i] = ((doublecomplex *)Astore->nzval)[i]; - for (i = 0; i < nnz; ++i) Bstore->rowind[i] = Astore->rowind[i]; - for (i = 0; i <= ncol; ++i) Bstore->colptr[i] = Astore->colptr[i]; -} - - -void -zCreate_Dense_Matrix(SuperMatrix *X, int m, int n, doublecomplex *x, int ldx, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - DNformat *Xstore; - - X->Stype = stype; - X->Dtype = dtype; - X->Mtype = mtype; - X->nrow = m; - X->ncol = n; - X->Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) ); - if ( !(X->Store) ) ABORT("SUPERLU_MALLOC fails for X->Store"); - Xstore = (DNformat *) X->Store; - Xstore->lda = ldx; - Xstore->nzval = (doublecomplex *) x; -} - -void -zCopy_Dense_Matrix(int M, int N, doublecomplex *X, int ldx, - doublecomplex *Y, int ldy) -{ -/*! \brief Copies a two-dimensional matrix X to another matrix Y. - */ - int i, j; - - for (j = 0; j < N; ++j) - for (i = 0; i < M; ++i) - Y[i + j*ldy] = X[i + j*ldx]; -} - -void -zCreate_SuperNode_Matrix(SuperMatrix *L, int m, int n, int nnz, - doublecomplex *nzval, int *nzval_colptr, int *rowind, - int *rowind_colptr, int *col_to_sup, int *sup_to_col, - Stype_t stype, Dtype_t dtype, Mtype_t mtype) -{ - SCformat *Lstore; - - L->Stype = stype; - L->Dtype = dtype; - L->Mtype = mtype; - L->nrow = m; - L->ncol = n; - L->Store = (void *) SUPERLU_MALLOC( sizeof(SCformat) ); - if ( !(L->Store) ) ABORT("SUPERLU_MALLOC fails for L->Store"); - Lstore = L->Store; - Lstore->nnz = nnz; - Lstore->nsuper = col_to_sup[n]; - Lstore->nzval = nzval; - Lstore->nzval_colptr = nzval_colptr; - Lstore->rowind = rowind; - Lstore->rowind_colptr = rowind_colptr; - Lstore->col_to_sup = col_to_sup; - Lstore->sup_to_col = sup_to_col; - -} - - -/*! \brief Convert a row compressed storage into a column compressed storage. - */ -void -zCompRow_to_CompCol(int m, int n, int nnz, - doublecomplex *a, int *colind, int *rowptr, - doublecomplex **at, int **rowind, int **colptr) -{ - register int i, j, col, relpos; - int *marker; - - /* Allocate storage for another copy of the matrix. */ - *at = (doublecomplex *) doublecomplexMalloc(nnz); - *rowind = (int *) intMalloc(nnz); - *colptr = (int *) intMalloc(n+1); - marker = (int *) intCalloc(n); - - /* Get counts of each column of A, and set up column pointers */ - for (i = 0; i < m; ++i) - for (j = rowptr[i]; j < rowptr[i+1]; ++j) ++marker[colind[j]]; - (*colptr)[0] = 0; - for (j = 0; j < n; ++j) { - (*colptr)[j+1] = (*colptr)[j] + marker[j]; - marker[j] = (*colptr)[j]; - } - - /* Transfer the matrix into the compressed column storage. */ - for (i = 0; i < m; ++i) { - for (j = rowptr[i]; j < rowptr[i+1]; ++j) { - col = colind[j]; - relpos = marker[col]; - (*rowind)[relpos] = i; - (*at)[relpos] = a[j]; - ++marker[col]; - } - } - - SUPERLU_FREE(marker); -} - - -void -zPrint_CompCol_Matrix(char *what, SuperMatrix *A) -{ - NCformat *Astore; - register int i,n; - double *dp; - - printf("\nCompCol matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - n = A->ncol; - Astore = (NCformat *) A->Store; - dp = (double *) Astore->nzval; - printf("nrow %d, ncol %d, nnz %d\n", A->nrow,A->ncol,Astore->nnz); - printf("nzval: "); - for (i = 0; i < 2*Astore->colptr[n]; ++i) printf("%f ", dp[i]); - printf("\nrowind: "); - for (i = 0; i < Astore->colptr[n]; ++i) printf("%d ", Astore->rowind[i]); - printf("\ncolptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->colptr[i]); - printf("\n"); - fflush(stdout); -} - -void -zPrint_SuperNode_Matrix(char *what, SuperMatrix *A) -{ - SCformat *Astore; - register int i, j, k, c, d, n, nsup; - double *dp; - int *col_to_sup, *sup_to_col, *rowind, *rowind_colptr; - - printf("\nSuperNode matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - n = A->ncol; - Astore = (SCformat *) A->Store; - dp = (double *) Astore->nzval; - col_to_sup = Astore->col_to_sup; - sup_to_col = Astore->sup_to_col; - rowind_colptr = Astore->rowind_colptr; - rowind = Astore->rowind; - printf("nrow %d, ncol %d, nnz %d, nsuper %d\n", - A->nrow,A->ncol,Astore->nnz,Astore->nsuper); - printf("nzval:\n"); - for (k = 0; k <= Astore->nsuper; ++k) { - c = sup_to_col[k]; - nsup = sup_to_col[k+1] - c; - for (j = c; j < c + nsup; ++j) { - d = Astore->nzval_colptr[j]; - for (i = rowind_colptr[c]; i < rowind_colptr[c+1]; ++i) { - printf("%d\t%d\t%e\t%e\n", rowind[i], j, dp[d], dp[d+1]); - d += 2; - } - } - } -#if 0 - for (i = 0; i < 2*Astore->nzval_colptr[n]; ++i) printf("%f ", dp[i]); -#endif - printf("\nnzval_colptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->nzval_colptr[i]); - printf("\nrowind: "); - for (i = 0; i < Astore->rowind_colptr[n]; ++i) - printf("%d ", Astore->rowind[i]); - printf("\nrowind_colptr: "); - for (i = 0; i <= n; ++i) printf("%d ", Astore->rowind_colptr[i]); - printf("\ncol_to_sup: "); - for (i = 0; i < n; ++i) printf("%d ", col_to_sup[i]); - printf("\nsup_to_col: "); - for (i = 0; i <= Astore->nsuper+1; ++i) - printf("%d ", sup_to_col[i]); - printf("\n"); - fflush(stdout); -} - -void -zPrint_Dense_Matrix(char *what, SuperMatrix *A) -{ - DNformat *Astore = (DNformat *) A->Store; - register int i, j, lda = Astore->lda; - double *dp; - - printf("\nDense matrix %s:\n", what); - printf("Stype %d, Dtype %d, Mtype %d\n", A->Stype,A->Dtype,A->Mtype); - dp = (double *) Astore->nzval; - printf("nrow %d, ncol %d, lda %d\n", A->nrow,A->ncol,lda); - printf("\nnzval: "); - for (j = 0; j < A->ncol; ++j) { - for (i = 0; i < 2*A->nrow; ++i) printf("%f ", dp[i + j*2*lda]); - printf("\n"); - } - printf("\n"); - fflush(stdout); -} - -/*! \brief Diagnostic print of column "jcol" in the U/L factor. - */ -void -zprint_lu_col(char *msg, int jcol, int pivrow, int *xprune, GlobalLU_t *Glu) -{ - int i, k, fsupc; - int *xsup, *supno; - int *xlsub, *lsub; - doublecomplex *lusup; - int *xlusup; - doublecomplex *ucol; - int *usub, *xusub; - - xsup = Glu->xsup; - supno = Glu->supno; - lsub = Glu->lsub; - xlsub = Glu->xlsub; - lusup = Glu->lusup; - xlusup = Glu->xlusup; - ucol = Glu->ucol; - usub = Glu->usub; - xusub = Glu->xusub; - - printf("%s", msg); - printf("col %d: pivrow %d, supno %d, xprune %d\n", - jcol, pivrow, supno[jcol], xprune[jcol]); - - printf("\tU-col:\n"); - for (i = xusub[jcol]; i < xusub[jcol+1]; i++) - printf("\t%d%10.4f, %10.4f\n", usub[i], ucol[i].r, ucol[i].i); - printf("\tL-col in rectangular snode:\n"); - fsupc = xsup[supno[jcol]]; /* first col of the snode */ - i = xlsub[fsupc]; - k = xlusup[jcol]; - while ( i < xlsub[fsupc+1] && k < xlusup[jcol+1] ) { - printf("\t%d\t%10.4f, %10.4f\n", lsub[i], lusup[k].r, lusup[k].i); - i++; k++; - } - fflush(stdout); -} - - -/*! \brief Check whether tempv[] == 0. This should be true before and after calling any numeric routines, i.e., "panel_bmod" and "column_bmod". - */ -void zcheck_tempv(int n, doublecomplex *tempv) -{ - int i; - - for (i = 0; i < n; i++) { - if ((tempv[i].r != 0.0) || (tempv[i].i != 0.0)) - { - fprintf(stderr,"tempv[%d] = {%f, %f}\n", i, tempv[i].r, tempv[i].i); - ABORT("zcheck_tempv"); - } - } -} - - -void -zGenXtrue(int n, int nrhs, doublecomplex *x, int ldx) -{ - int i, j; - for (j = 0; j < nrhs; ++j) - for (i = 0; i < n; ++i) { - x[i + j*ldx].r = 1.0; - x[i + j*ldx].i = 0.0; - } -} - -/*! \brief Let rhs[i] = sum of i-th row of A, so the solution vector is all 1's - */ -void -zFillRHS(trans_t trans, int nrhs, doublecomplex *x, int ldx, - SuperMatrix *A, SuperMatrix *B) -{ - NCformat *Astore; - doublecomplex *Aval; - DNformat *Bstore; - doublecomplex *rhs; - doublecomplex one = {1.0, 0.0}; - doublecomplex zero = {0.0, 0.0}; - int ldc; - char transc[1]; - - Astore = A->Store; - Aval = (doublecomplex *) Astore->nzval; - Bstore = B->Store; - rhs = Bstore->nzval; - ldc = Bstore->lda; - - if ( trans == NOTRANS ) *(unsigned char *)transc = 'N'; - else *(unsigned char *)transc = 'T'; - - sp_zgemm(transc, "N", A->nrow, nrhs, A->ncol, one, A, - x, ldx, zero, rhs, ldc); - -} - -/*! \brief Fills a doublecomplex precision array with a given value. - */ -void -zfill(doublecomplex *a, int alen, doublecomplex dval) -{ - register int i; - for (i = 0; i < alen; i++) a[i] = dval; -} - - - -/*! \brief Check the inf-norm of the error vector - */ -void zinf_norm_error(int nrhs, SuperMatrix *X, doublecomplex *xtrue) -{ - DNformat *Xstore; - double err, xnorm; - doublecomplex *Xmat, *soln_work; - doublecomplex temp; - int i, j; - - Xstore = X->Store; - Xmat = Xstore->nzval; - - for (j = 0; j < nrhs; j++) { - soln_work = &Xmat[j*Xstore->lda]; - err = xnorm = 0.0; - for (i = 0; i < X->nrow; i++) { - z_sub(&temp, &soln_work[i], &xtrue[i]); - err = SUPERLU_MAX(err, z_abs(&temp)); - xnorm = SUPERLU_MAX(xnorm, z_abs(&soln_work[i])); - } - err = err / xnorm; - printf("||X - Xtrue||/||X|| = %e\n", err); - } -} - - - -/*! \brief Print performance of the code. */ -void -zPrintPerf(SuperMatrix *L, SuperMatrix *U, mem_usage_t *mem_usage, - double rpg, double rcond, double *ferr, - double *berr, char *equed, SuperLUStat_t *stat) -{ - SCformat *Lstore; - NCformat *Ustore; - double *utime; - flops_t *ops; - - utime = stat->utime; - ops = stat->ops; - - if ( utime[FACT] != 0. ) - printf("Factor flops = %e\tMflops = %8.2f\n", ops[FACT], - ops[FACT]*1e-6/utime[FACT]); - printf("Identify relaxed snodes = %8.2f\n", utime[RELAX]); - if ( utime[SOLVE] != 0. ) - printf("Solve flops = %.0f, Mflops = %8.2f\n", ops[SOLVE], - ops[SOLVE]*1e-6/utime[SOLVE]); - - Lstore = (SCformat *) L->Store; - Ustore = (NCformat *) U->Store; - printf("\tNo of nonzeros in factor L = %d\n", Lstore->nnz); - printf("\tNo of nonzeros in factor U = %d\n", Ustore->nnz); - printf("\tNo of nonzeros in L+U = %d\n", Lstore->nnz + Ustore->nnz); - - printf("L\\U MB %.3f\ttotal MB needed %.3f\n", - mem_usage->for_lu/1e6, mem_usage->total_needed/1e6); - printf("Number of memory expansions: %d\n", stat->expansions); - - printf("\tFactor\tMflops\tSolve\tMflops\tEtree\tEquil\tRcond\tRefine\n"); - printf("PERF:%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f\n", - utime[FACT], ops[FACT]*1e-6/utime[FACT], - utime[SOLVE], ops[SOLVE]*1e-6/utime[SOLVE], - utime[ETREE], utime[EQUIL], utime[RCOND], utime[REFINE]); - - printf("\tRpg\t\tRcond\t\tFerr\t\tBerr\t\tEquil?\n"); - printf("NUM:\t%e\t%e\t%e\t%e\t%s\n", - rpg, rcond, ferr[0], berr[0], equed); - -} - - - - -print_doublecomplex_vec(char *what, int n, doublecomplex *vec) -{ - int i; - printf("%s: n %d\n", what, n); - for (i = 0; i < n; ++i) printf("%d\t%f%f\n", i, vec[i].r, vec[i].i); - return 0; -} - diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/__init__.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/__init__.py deleted file mode 100644 index 4898fa6c27..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Linear Solvers -============== - -The default solver is SuperLU (included in the scipy distribution), -which can solve real or complex linear systems in both single and -double precisions. It is automatically replaced by UMFPACK, if -available. Note that UMFPACK works in double precision only, so -switch it off by - ->>> use_solver( useUmfpack = False ) - -to solve in the single precision. See also use_solver documentation. - -Example session: - ->>> from scipy.sparse import csc_matrix, spdiags ->>> from numpy import array ->>> from scipy.sparse.linalg import spsolve, use_solver ->>> ->>> print "Inverting a sparse linear system:" ->>> print "The sparse matrix (constructed from diagonals):" ->>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) ->>> b = array([1, 2, 3, 4, 5]) ->>> print "Solve: single precision complex:" ->>> use_solver( useUmfpack = False ) ->>> a = a.astype('F') ->>> x = spsolve(a, b) ->>> print x ->>> print "Error: ", a*x-b ->>> ->>> print "Solve: double precision complex:" ->>> use_solver( useUmfpack = True ) ->>> a = a.astype('D') ->>> x = spsolve(a, b) ->>> print x ->>> print "Error: ", a*x-b ->>> ->>> print "Solve: double precision:" ->>> a = a.astype('d') ->>> x = spsolve(a, b) ->>> print x ->>> print "Error: ", a*x-b ->>> ->>> print "Solve: single precision:" ->>> use_solver( useUmfpack = False ) ->>> a = a.astype('f') ->>> x = spsolve(a, b.astype('f')) ->>> print x ->>> print "Error: ", a*x-b - -""" - -from info import __doc__ - -#import umfpack -#__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) ) -#del umfpack - -from linsolve import * - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superlu_utils.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superlu_utils.c deleted file mode 100644 index c876754656..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superlu_utils.c +++ /dev/null @@ -1,87 +0,0 @@ -/* Should be imported before Python.h */ - -#include - -#define NO_IMPORT_ARRAY -#define PY_ARRAY_UNIQUE_SYMBOL _scipy_sparse_superlu_ARRAY_API - -#include "_superluobject.h" -#include "numpy/npy_3kcompat.h" -#include - -jmp_buf _superlu_py_jmpbuf; -PyObject *_superlumodule_memory_dict=NULL; - -/* Abort to be used inside the superlu module so that memory allocation - errors don't exit Python and memory allocated internal to SuperLU is freed. - Calling program should deallocate (using SUPERLU_FREE) all memory that could have - been allocated. (It's ok to FREE unallocated memory)---will be ignored. -*/ - -void superlu_python_module_abort(char *msg) -{ - PyErr_SetString(PyExc_RuntimeError, msg); - longjmp(_superlu_py_jmpbuf, -1); -} - -void *superlu_python_module_malloc(size_t size) -{ - PyObject *key=NULL; - void *mem_ptr; - - if (_superlumodule_memory_dict == NULL) { - _superlumodule_memory_dict = PyDict_New(); - } - mem_ptr = malloc(size); - if (mem_ptr == NULL) return NULL; - key = PyLong_FromVoidPtr(mem_ptr); - if (key == NULL) goto fail; - if (PyDict_SetItem(_superlumodule_memory_dict, key, Py_None)) goto fail; - Py_DECREF(key); - return mem_ptr; - - fail: - Py_XDECREF(key); - free(mem_ptr); - superlu_python_module_abort("superlu_malloc: Cannot set dictionary key value in malloc."); - return NULL; - -} - -void superlu_python_module_free(void *ptr) -{ - PyObject *key; - PyObject *ptype, *pvalue, *ptraceback; - - if (ptr == NULL) return; - PyErr_Fetch(&ptype, &pvalue, &ptraceback); - key = PyLong_FromVoidPtr(ptr); - /* This will only free the pointer if it could find it in the dictionary - of already allocated pointers --- thus after abort, the module can free all - the memory that "might" have been allocated to avoid memory leaks on abort - calls. - */ - if (_superlumodule_memory_dict && \ - !(PyDict_DelItem(_superlumodule_memory_dict, key))) { - free(ptr); - } - Py_DECREF(key); - PyErr_Restore(ptype, pvalue, ptraceback); - return; -} - -/* - * Stubs for Harwell Subroutine Library functions that SuperLU tries to call. - */ - -void mc64id_(int *a) -{ - superlu_python_module_abort("chosen functionality not available"); -} - -void mc64ad_(int *a, int *b, int *c, int d[], int e[], double f[], - int *g, int h[], int *i, int j[], int *k, double l[], - int m[], int n[]) -{ - superlu_python_module_abort("chosen functionality not available"); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superlumodule.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superlumodule.c deleted file mode 100644 index d26cb204d4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superlumodule.c +++ /dev/null @@ -1,304 +0,0 @@ -/* -*-c-*- */ -/* - * _superlu module - * - * Python interface to SuperLU decompositions. - */ - -/* Copyright 1999 Travis Oliphant - * - * Permision to copy and modified this file is granted under - * the revised BSD license. No warranty is expressed or IMPLIED - */ - -#include -#include - -#define PY_ARRAY_UNIQUE_SYMBOL _scipy_sparse_superlu_ARRAY_API -#include - -#include "_superluobject.h" -#include "numpy/npy_3kcompat.h" - -extern jmp_buf _superlu_py_jmpbuf; - -/* - * Data-type dependent implementations for Xgssv and Xgstrf; - * - * These have to included from separate files because of SuperLU include - * structure. - */ - -static PyObject * -Py_gssv(PyObject *self, PyObject *args, PyObject *kwdict) -{ - PyObject *Py_B=NULL, *Py_X=NULL; - PyArrayObject *nzvals=NULL; - PyArrayObject *colind=NULL, *rowptr=NULL; - int N, nnz; - int info; - int csc=0; - int *perm_r=NULL, *perm_c=NULL; - SuperMatrix A, B, L, U; - superlu_options_t options; - SuperLUStat_t stat; - PyObject *option_dict = NULL; - int type; - int ssv_finished = 0; - - static char *kwlist[] = {"N","nnz","nzvals","colind","rowptr","B", "csc", - "options",NULL}; - - /* Get input arguments */ - if (!PyArg_ParseTupleAndKeywords(args, kwdict, "iiO!O!O!O|iO", kwlist, - &N, &nnz, &PyArray_Type, &nzvals, - &PyArray_Type, &colind, &PyArray_Type, - &rowptr, &Py_B, &csc, &option_dict)) { - return NULL; - } - - if (!_CHECK_INTEGER(colind) || !_CHECK_INTEGER(rowptr)) { - PyErr_SetString(PyExc_TypeError, - "colind and rowptr must be of type cint"); - return NULL; - } - - type = PyArray_TYPE(nzvals); - if (!CHECK_SLU_TYPE(type)) { - PyErr_SetString(PyExc_TypeError, - "nzvals is not of a type supported by SuperLU"); - return NULL; - } - - if (!set_superlu_options_from_dict(&options, 0, option_dict, NULL, NULL)) { - return NULL; - } - - /* Create Space for output */ - Py_X = PyArray_CopyFromObject(Py_B, type, 1, 2); - if (Py_X == NULL) return NULL; - - if (csc) { - if (NCFormat_from_spMatrix(&A, N, N, nnz, nzvals, colind, rowptr, - type)) { - Py_DECREF(Py_X); - return NULL; - } - } - else { - if (NRFormat_from_spMatrix(&A, N, N, nnz, nzvals, colind, rowptr, - type)) { - Py_DECREF(Py_X); - return NULL; - } - } - - if (DenseSuper_from_Numeric(&B, Py_X)) { - Destroy_SuperMatrix_Store(&A); - Py_DECREF(Py_X); - return NULL; - } - - /* B and Py_X share same data now but Py_X "owns" it */ - - /* Setup options */ - - if (setjmp(_superlu_py_jmpbuf)) { - goto fail; - } - else { - perm_c = intMalloc(N); - perm_r = intMalloc(N); - StatInit(&stat); - - /* Compute direct inverse of sparse Matrix */ - gssv(type, &options, &A, perm_c, perm_r, &L, &U, &B, &stat, &info); - } - ssv_finished = 1; - - SUPERLU_FREE(perm_r); - SUPERLU_FREE(perm_c); - Destroy_SuperMatrix_Store(&A); /* holds just a pointer to the data */ - Destroy_SuperMatrix_Store(&B); - Destroy_SuperNode_Matrix(&L); - Destroy_CompCol_Matrix(&U); - StatFree(&stat); - - return Py_BuildValue("Ni", Py_X, info); - -fail: - SUPERLU_FREE(perm_r); - SUPERLU_FREE(perm_c); - Destroy_SuperMatrix_Store(&A); /* holds just a pointer to the data */ - Destroy_SuperMatrix_Store(&B); - if (ssv_finished) { - /* Avoid trying to free partially initialized matrices; - might leak some memory, but avoids a crash */ - Destroy_SuperNode_Matrix(&L); - Destroy_CompCol_Matrix(&U); - } - StatFree(&stat); - Py_XDECREF(Py_X); - return NULL; -} - -static PyObject * -Py_gstrf(PyObject *self, PyObject *args, PyObject *keywds) -{ - /* default value for SuperLU parameters*/ - int N, nnz; - PyArrayObject *rowind, *colptr, *nzvals; - SuperMatrix A; - PyObject *result; - PyObject *option_dict = NULL; - int type; - int ilu = 0; - - static char *kwlist[] = {"N","nnz","nzvals","colind","rowptr", - "options", "ilu", - NULL}; - - int res = PyArg_ParseTupleAndKeywords( - args, keywds, "iiO!O!O!|Oi", kwlist, - &N, &nnz, - &PyArray_Type, &nzvals, - &PyArray_Type, &rowind, - &PyArray_Type, &colptr, - &option_dict, - &ilu); - - if (!res) - return NULL; - - if (!_CHECK_INTEGER(colptr) || !_CHECK_INTEGER(rowind)) { - PyErr_SetString(PyExc_TypeError, - "rowind and colptr must be of type cint"); - return NULL; - } - - type = PyArray_TYPE(nzvals); - if (!CHECK_SLU_TYPE(type)) { - PyErr_SetString(PyExc_TypeError, - "nzvals is not of a type supported by SuperLU"); - return NULL; - } - - if (NCFormat_from_spMatrix(&A, N, N, nnz, nzvals, rowind, colptr, - type)) { - goto fail; - } - - result = newSciPyLUObject(&A, option_dict, type, ilu); - if (result == NULL) { - goto fail; - } - - /* arrays of input matrix will not be freed */ - Destroy_SuperMatrix_Store(&A); - return result; - -fail: - /* arrays of input matrix will not be freed */ - Destroy_SuperMatrix_Store(&A); - return NULL; -} - -static char gssv_doc[] = "Direct inversion of sparse matrix.\n\nX = gssv(A,B) solves A*X = B for X."; - -static char gstrf_doc[] = "gstrf(A, ...)\n\ -\n\ -performs a factorization of the sparse matrix A=*(N,nnz,nzvals,rowind,colptr) and \n\ -returns a factored_lu object.\n\ -\n\ -arguments\n\ ----------\n\ -\n\ -Matrix to be factorized is represented as N,nnz,nzvals,rowind,colptr\n\ - as separate arguments. This is compressed sparse column representation.\n\ -\n\ -N number of rows and columns \n\ -nnz number of non-zero elements\n\ -nzvals non-zero values \n\ -rowind row-index for this column (same size as nzvals)\n\ -colptr index into rowind for first non-zero value in this column\n\ - size is (N+1). Last value should be nnz. \n\ -\n\ -additional keyword arguments:\n\ ------------------------------\n\ -options specifies additional options for SuperLU\n\ - (same keys and values as in superlu_options_t C structure,\n\ - and additionally 'Relax' and 'PanelSize')\n\ -\n\ -ilu whether to perform an incomplete LU decomposition\n\ - (default: false)\n\ -"; - - -/* - * Main SuperLU module - */ - -static PyMethodDef SuperLU_Methods[] = { - {"gssv", (PyCFunction)Py_gssv, METH_VARARGS|METH_KEYWORDS, gssv_doc}, - {"gstrf", (PyCFunction)Py_gstrf, METH_VARARGS|METH_KEYWORDS, gstrf_doc}, - {NULL, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 - -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_superlu", - NULL, - -1, - SuperLU_Methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__superlu(void) -{ - PyObject *m, *d; - - import_array(); - - if (PyType_Ready(&SciPySuperLUType) < 0) { - return; - } - - m = PyModule_Create(&moduledef); - d = PyModule_GetDict(m); - - Py_INCREF(&PyArrayFlags_Type); - PyDict_SetItemString(d, "SciPyLUType", (PyObject *)&SciPySuperLUType); - - if (PyErr_Occurred()) - Py_FatalError("can't initialize module _superlu"); - - return m; -} - -#else - -PyMODINIT_FUNC -init_superlu(void) -{ - PyObject *m, *d; - - import_array(); - - SciPySuperLUType.ob_type = &PyType_Type; - if (PyType_Ready(&SciPySuperLUType) < 0) { - return; - } - - m = Py_InitModule("_superlu", SuperLU_Methods); - d = PyModule_GetDict(m); - - Py_INCREF(&PyArrayFlags_Type); - PyDict_SetItemString(d, "SciPyLUType", (PyObject *)&SciPySuperLUType); -} - -#endif diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superluobject.c b/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superluobject.c deleted file mode 100644 index e9ec0bd6e6..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superluobject.c +++ /dev/null @@ -1,781 +0,0 @@ -/* -*-c-*- */ -/* - * _superlu object - * - * Python object representing SuperLU factorization + some utility functions. - */ - -#include - -#define NO_IMPORT_ARRAY -#define PY_ARRAY_UNIQUE_SYMBOL _scipy_sparse_superlu_ARRAY_API - -#include "_superluobject.h" -#include "numpy/npy_3kcompat.h" -#include -#include - -extern jmp_buf _superlu_py_jmpbuf; - - -/*********************************************************************** - * SciPyLUObject methods - */ - -static char solve_doc[] = "x = self.solve(b, trans)\n\ -\n\ -solves linear system of equations with one or sereral right hand sides.\n\ -\n\ -parameters\n\ -----------\n\ -\n\ -b array, right hand side(s) of equation\n\ -x array, solution vector(s)\n\ -trans 'N': solve A * x == b\n\ - 'T': solve A^T * x == b\n\ - 'H': solve A^H * x == b\n\ - (optional, default value 'N')\n\ -"; - -static PyObject * -SciPyLU_solve(SciPyLUObject *self, PyObject *args, PyObject *kwds) { - PyArrayObject *b, *x=NULL; - SuperMatrix B; -#ifndef NPY_PY3K - char itrans = 'N'; -#else - int itrans = 'N'; -#endif - int info; - trans_t trans; - SuperLUStat_t stat; - - static char *kwlist[] = {"rhs","trans",NULL}; - - if (!CHECK_SLU_TYPE(self->type)) { - PyErr_SetString(PyExc_ValueError, "unsupported data type"); - return NULL; - } - -#ifndef NPY_PY3K - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|c", kwlist, -#else - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|C", kwlist, -#endif - &PyArray_Type, &b, - &itrans)) - return NULL; - - /* solve transposed system: matrix was passed row-wise instead of - * column-wise */ - if (itrans == 'n' || itrans == 'N') - trans = NOTRANS; - else if (itrans == 't' || itrans == 'T') - trans = TRANS; - else if (itrans == 'h' || itrans == 'H') - trans = CONJ; - else { - PyErr_SetString(PyExc_ValueError, "trans must be N, T, or H"); - return NULL; - } - - if ((x = (PyArrayObject *) \ - PyArray_CopyFromObject((PyObject *)b,self->type,1,2))==NULL) return NULL; - - if (b->dimensions[0] != self->n) goto fail; - - - if (setjmp(_superlu_py_jmpbuf)) goto fail; - - if (DenseSuper_from_Numeric(&B, (PyObject *)x)) goto fail; - - StatInit(&stat); - - /* Solve the system, overwriting vector x. */ - gstrs(self->type, - trans, &self->L, &self->U, self->perm_c, self->perm_r, &B, - &stat, &info); - - if (info) { - PyErr_SetString(PyExc_SystemError, - "gstrs was called with invalid arguments"); - goto fail; - } - - /* free memory */ - Destroy_SuperMatrix_Store(&B); - StatFree(&stat); - return (PyObject *)x; - -fail: - Destroy_SuperMatrix_Store(&B); - StatFree(&stat); - Py_XDECREF(x); - return NULL; -} - -/** table of object methods - */ -PyMethodDef SciPyLU_methods[] = { - {"solve", (PyCFunction)SciPyLU_solve, METH_VARARGS|METH_KEYWORDS, solve_doc}, - {NULL, NULL} /* sentinel */ -}; - - -/*********************************************************************** - * SciPySuperLUType methods - */ - -static void -SciPyLU_dealloc(SciPyLUObject *self) -{ - SUPERLU_FREE(self->perm_r); - SUPERLU_FREE(self->perm_c); - if (self->L.Store != NULL) { - Destroy_SuperNode_Matrix(&self->L); - } - if (self->U.Store != NULL) { - Destroy_CompCol_Matrix(&self->U); - } - PyObject_Del(self); -} - -static PyObject * -SciPyLU_getattr(SciPyLUObject *self, char *name) -{ - if (strcmp(name, "shape") == 0) - return Py_BuildValue("(i,i)", self->m, self->n); - if (strcmp(name, "nnz") == 0) - return Py_BuildValue("i", ((SCformat *)self->L.Store)->nnz + ((SCformat *)self->U.Store)->nnz); - if (strcmp(name, "perm_r") == 0) { - PyArrayObject* perm_r = PyArray_SimpleNewFromData(1, (npy_intp*) (&self->n), NPY_INT, (void*)self->perm_r); - /* For ref counting of the memory */ - PyArray_BASE(perm_r) = self; - Py_INCREF(self); - return perm_r ; - } - if (strcmp(name, "perm_c") == 0) { - PyArrayObject* perm_c = PyArray_SimpleNewFromData(1, (npy_intp*) (&self->n), NPY_INT, (void*)self->perm_c); - /* For ref counting of the memory */ - PyArray_BASE(perm_c) = self; - Py_INCREF(self); - return perm_c ; - } - if (strcmp(name, "__members__") == 0) { - char *members[] = {"shape", "nnz", "perm_r", "perm_c"}; - int i; - - PyObject *list = PyList_New(sizeof(members)/sizeof(char *)); - if (list != NULL) { - for (i = 0; i < sizeof(members)/sizeof(char *); i ++) - PyList_SetItem(list, i, PyUString_FromString(members[i])); - if (PyErr_Occurred()) { - Py_DECREF(list); - list = NULL; - } - } - return list; - } -#if PY_VERSION_HEX >= 0x03000000 - if (1) { - PyObject *str, *ret; - str = PyUnicode_FromString(name); - ret = PyObject_GenericGetAttr((PyObject *)self, str); - Py_DECREF(str); - return ret; - } -#else - return Py_FindMethod(SciPyLU_methods, (PyObject *)self, name); -#endif -} - - -/*********************************************************************** - * SciPySuperLUType structure - */ -static char factored_lu_doc[] = "\ -Object resulting from a factorization of a sparse matrix\n\ -\n\ -Attributes\n\ ------------\n\ -\n\ -shape : 2-tuple\n\ - the shape of the orginal matrix factored\n\ -nnz : int\n\ - the number of non-zero elements in the matrix\n\ -perm_c\n\ - the permutation applied to the colums of the matrix for the LU factorization\n\ -perm_r\n\ - the permutation applied to the rows of the matrix for the LU factorization\n\ -\n\ -Methods\n\ --------\n\ -solve\n\ - solves the system for a given right hand side vector\n \ -\n\ -"; - -PyTypeObject SciPySuperLUType = { -#if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, -#endif - "factored_lu", - sizeof(SciPyLUObject), - 0, - (destructor)SciPyLU_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - (getattrfunc)SciPyLU_getattr, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare / tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number*/ - 0, /* tp_as_sequence*/ - 0, /* tp_as_mapping*/ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - factored_lu_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - SciPyLU_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ -#if PY_VERSION_HEX >= 0x02060000 - 0, /* tp_version_tag */ -#endif -}; - - -int DenseSuper_from_Numeric(SuperMatrix *X, PyObject *PyX) -{ - int m, n, ldx, nd; - PyArrayObject *aX; - - if (!PyArray_Check(PyX)) { - PyErr_SetString(PyExc_TypeError, "dgssv: Second argument is not an array."); - return -1; - } - - aX = (PyArrayObject *)PyX; - nd = aX->nd; - - if (nd == 1) { - m = aX->dimensions[0]; - n = 1; - ldx = m; - } - else { /* nd == 2 */ - m = aX->dimensions[1]; - n = aX->dimensions[0]; - ldx = m; - } - - if (setjmp(_superlu_py_jmpbuf)) - return -1; - else { - if (!CHECK_SLU_TYPE(aX->descr->type_num)) { - PyErr_SetString(PyExc_ValueError, "unsupported data type"); - return -1; - } - Create_Dense_Matrix(aX->descr->type_num, X, m, n, - aX->data, ldx, SLU_DN, - NPY_TYPECODE_TO_SLU(aX->descr->type_num), SLU_GE); - } - return 0; -} - -/* Natively handles Compressed Sparse Row and CSC */ - -int NRFormat_from_spMatrix(SuperMatrix *A, int m, int n, int nnz, - PyArrayObject *nzvals, PyArrayObject *colind, - PyArrayObject *rowptr, int typenum) -{ - int err = 0; - - err = (nzvals->descr->type_num != typenum); - err += (nzvals->nd != 1); - err += (nnz > nzvals->dimensions[0]); - if (err) { - PyErr_SetString(PyExc_TypeError, "Fourth argument must be a 1-D array at least as big as third argument."); - return -1; - } - - if (setjmp(_superlu_py_jmpbuf)) - return -1; - else { - if (!CHECK_SLU_TYPE(nzvals->descr->type_num)) { - PyErr_SetString(PyExc_TypeError, "Invalid type for array."); - return -1; - } - Create_CompRow_Matrix(nzvals->descr->type_num, - A, m, n, nnz, nzvals->data, (int *)colind->data, - (int *)rowptr->data, SLU_NR, - NPY_TYPECODE_TO_SLU(nzvals->descr->type_num), - SLU_GE); - } - - return 0; -} - -int NCFormat_from_spMatrix(SuperMatrix *A, int m, int n, int nnz, - PyArrayObject *nzvals, PyArrayObject *rowind, - PyArrayObject *colptr, int typenum) -{ - int err=0; - - err = (nzvals->descr->type_num != typenum); - err += (nzvals->nd != 1); - err += (nnz > nzvals->dimensions[0]); - if (err) { - PyErr_SetString(PyExc_TypeError, "Fifth argument must be a 1-D array at least as big as fourth argument."); - return -1; - } - - - if (setjmp(_superlu_py_jmpbuf)) - return -1; - else { - if (!CHECK_SLU_TYPE(nzvals->descr->type_num)) { - PyErr_SetString(PyExc_TypeError, "Invalid type for array."); - return -1; - } - Create_CompCol_Matrix(nzvals->descr->type_num, - A, m, n, nnz, nzvals->data, (int *)rowind->data, - (int *)colptr->data, SLU_NC, - NPY_TYPECODE_TO_SLU(nzvals->descr->type_num), - SLU_GE); - } - - return 0; -} - -PyObject * -newSciPyLUObject(SuperMatrix *A, PyObject *option_dict, int intype, int ilu) -{ - - /* A must be in SLU_NC format used by the factorization routine. */ - SciPyLUObject *self; - SuperMatrix AC; /* Matrix postmultiplied by Pc */ - int lwork = 0; - int *etree=NULL; - int info; - int n; - superlu_options_t options; - SuperLUStat_t stat; - int panel_size, relax; - int trf_finished = 0; - - n = A->ncol; - - if (!set_superlu_options_from_dict(&options, ilu, option_dict, - &panel_size, &relax)) { - return NULL; - } - - /* Create SciPyLUObject */ - self = PyObject_New(SciPyLUObject, &SciPySuperLUType); - if (self == NULL) - return PyErr_NoMemory(); - self->m = A->nrow; - self->n = n; - self->perm_r = NULL; - self->perm_c = NULL; - self->type = intype; - - if (setjmp(_superlu_py_jmpbuf)) goto fail; - - /* Calculate and apply minimum degree ordering*/ - etree = intMalloc(n); - self->perm_r = intMalloc(n); - self->perm_c = intMalloc(n); - StatInit(&stat); - - get_perm_c(options.ColPerm, A, self->perm_c); /* calc column permutation */ - sp_preorder(&options, A, self->perm_c, etree, &AC); /* apply column - * permutation */ - - /* Perform factorization */ - if (!CHECK_SLU_TYPE(SLU_TYPECODE_TO_NPY(A->Dtype))) { - PyErr_SetString(PyExc_ValueError, "Invalid type in SuperMatrix."); - goto fail; - } - if (ilu) { - gsitrf(SLU_TYPECODE_TO_NPY(A->Dtype), - &options, &AC, relax, panel_size, - etree, NULL, lwork, self->perm_c, self->perm_r, - &self->L, &self->U, &stat, &info); - } - else { - gstrf(SLU_TYPECODE_TO_NPY(A->Dtype), - &options, &AC, relax, panel_size, - etree, NULL, lwork, self->perm_c, self->perm_r, - &self->L, &self->U, &stat, &info); - } - trf_finished = 1; - - if (info) { - if (info < 0) - PyErr_SetString(PyExc_SystemError, - "gstrf was called with invalid arguments"); - else { - if (info <= n) - PyErr_SetString(PyExc_RuntimeError, "Factor is exactly singular"); - else - PyErr_NoMemory(); - } - goto fail; - } - - /* free memory */ - SUPERLU_FREE(etree); - Destroy_CompCol_Permuted(&AC); - StatFree(&stat); - - return (PyObject *)self; - -fail: - if (!trf_finished) { - /* Avoid trying to free partially initialized matrices; - might leak some memory, but avoids a crash */ - self->L.Store = NULL; - self->U.Store = NULL; - } - SUPERLU_FREE(etree); - Destroy_CompCol_Permuted(&AC); - StatFree(&stat); - SciPyLU_dealloc(self); - return NULL; -} - - -/*********************************************************************** - * Preparing superlu_options_t - */ - -#define ENUM_CHECK_INIT \ - long i = -1; \ - char *s = ""; \ - PyObject *tmpobj = NULL; \ - if (input == Py_None) return 1; \ - if (PyString_Check(input)) { \ - s = PyString_AS_STRING(input); \ - } \ - else if (PyUnicode_Check(input)) { \ - tmpobj = PyUnicode_AsASCIIString(input);\ - if (tmpobj == NULL) return 0; \ - s = PyString_AS_STRING(tmpobj); \ - } \ - else if (PyInt_Check(input)) { \ - i = PyInt_AsLong(input); \ - } - -#define ENUM_CHECK_FINISH(message) \ - Py_XDECREF(tmpobj); \ - PyErr_SetString(PyExc_ValueError, message); \ - return 0; - -#define ENUM_CHECK(name) \ - if (my_strxcmp(s, #name) == 0 || i == (long)name) { \ - *value = name; \ - Py_XDECREF(tmpobj); \ - return 1; \ - } - -/* - * Compare strings ignoring case, underscores and whitespace - */ -static int my_strxcmp(const char *a, const char *b) -{ - int c; - while (*a != '\0' && *b != '\0') { - while (*a == '_' || isspace(*a)) ++a; - while (*b == '_' || isspace(*b)) ++b; - c = (int)tolower(*a) - (int)tolower(*b); - if (c != 0) { - return c; - } - ++a; - ++b; - } - return (int)tolower(*a) - (int)tolower(*b); -} - -static int yes_no_cvt(PyObject *input, yes_no_t *value) -{ - if (input == Py_None) { - return 1; - } - else if (input == Py_True) { - *value = YES; - } else if (input == Py_False) { - *value = NO; - } else { - PyErr_SetString(PyExc_ValueError, "value not a boolean"); - return 0; - } - return 1; -} - -static int fact_cvt(PyObject *input, fact_t *value) -{ - ENUM_CHECK_INIT; - ENUM_CHECK(DOFACT); - ENUM_CHECK(SamePattern); - ENUM_CHECK(SamePattern_SameRowPerm); - ENUM_CHECK(FACTORED); - ENUM_CHECK_FINISH("invalid value for 'Fact' parameter"); -} - -static int rowperm_cvt(PyObject *input, rowperm_t *value) -{ - ENUM_CHECK_INIT; - ENUM_CHECK(NOROWPERM); - ENUM_CHECK(LargeDiag); - ENUM_CHECK(MY_PERMR); - ENUM_CHECK_FINISH("invalid value for 'RowPerm' parameter"); -} - -static int colperm_cvt(PyObject *input, colperm_t *value) -{ - ENUM_CHECK_INIT; - ENUM_CHECK(NATURAL); - ENUM_CHECK(MMD_ATA); - ENUM_CHECK(MMD_AT_PLUS_A); - ENUM_CHECK(COLAMD); - ENUM_CHECK(MY_PERMC); - ENUM_CHECK_FINISH("invalid value for 'ColPerm' parameter"); -} - -static int trans_cvt(PyObject *input, trans_t *value) -{ - ENUM_CHECK_INIT; - ENUM_CHECK(NOTRANS); - ENUM_CHECK(TRANS); - ENUM_CHECK(CONJ); - if (my_strxcmp(s, "N") == 0) { *value = NOTRANS; return 1; } - if (my_strxcmp(s, "T") == 0) { *value = TRANS; return 1; } - if (my_strxcmp(s, "H") == 0) { *value = CONJ; return 1; } - ENUM_CHECK_FINISH("invalid value for 'Trans' parameter"); -} - -static int iterrefine_cvt(PyObject *input, IterRefine_t *value) -{ - ENUM_CHECK_INIT; - ENUM_CHECK(NOREFINE); - ENUM_CHECK(SINGLE); - ENUM_CHECK(DOUBLE); - ENUM_CHECK(EXTRA); - ENUM_CHECK_FINISH("invalid value for 'IterRefine' parameter"); -} - -static int norm_cvt(PyObject *input, norm_t *value) -{ - ENUM_CHECK_INIT; - ENUM_CHECK(ONE_NORM); - ENUM_CHECK(TWO_NORM); - ENUM_CHECK(INF_NORM); - ENUM_CHECK_FINISH("invalid value for 'ILU_Norm' parameter"); -} - -static int milu_cvt(PyObject *input, milu_t *value) -{ - ENUM_CHECK_INIT; - ENUM_CHECK(SILU); - ENUM_CHECK(SMILU_1); - ENUM_CHECK(SMILU_2); - ENUM_CHECK(SMILU_3); - ENUM_CHECK_FINISH("invalid value for 'ILU_MILU' parameter"); -} - -static int droprule_one_cvt(PyObject *input, int *value) -{ - ENUM_CHECK_INIT; - if (my_strxcmp(s, "BASIC") == 0) { *value = DROP_BASIC; return 1; } - if (my_strxcmp(s, "PROWS") == 0) { *value = DROP_PROWS; return 1; } - if (my_strxcmp(s, "COLUMN") == 0) { *value = DROP_COLUMN; return 1; } - if (my_strxcmp(s, "AREA") == 0) { *value = DROP_AREA; return 1; } - if (my_strxcmp(s, "SECONDARY") == 0) { *value = DROP_SECONDARY; return 1; } - if (my_strxcmp(s, "DYNAMIC") == 0) { *value = DROP_DYNAMIC; return 1; } - if (my_strxcmp(s, "INTERP") == 0) { *value = DROP_INTERP; return 1; } - ENUM_CHECK_FINISH("invalid value for 'ILU_DropRule' parameter"); -} - -static int droprule_cvt(PyObject *input, int *value) -{ - PyObject *seq = NULL; - int i; - int rule = 0; - - if (input == Py_None) { - /* Leave as default */ - return 1; - } - else if (PyInt_Check(input)) { - *value = PyInt_AsLong(input); - return 1; - } - else if (PyString_Check(input)) { - /* Comma-separated string */ - seq = PyObject_CallMethod(input, "split", "s", ","); - if (seq == NULL || !PySequence_Check(seq)) - goto fail; - } - else if (PyUnicode_Check(input)) { - /* Comma-separated string */ - PyObject *s; - int ret; - s = PyUnicode_AsASCIIString(input); - if (s == NULL) { - goto fail; - } - ret = droprule_cvt(s, value); - Py_DECREF(s); - return ret; - } - else if (PySequence_Check(input)) { - /* Sequence of strings or integers */ - seq = input; - Py_INCREF(seq); - } - else { - PyErr_SetString(PyExc_ValueError, "invalid value for drop rule"); - goto fail; - } - - /* OR multiple values together */ - for (i = 0; i < PySequence_Size(seq); ++i) { - PyObject *item; - int one_value; - item = PySequence_ITEM(seq, i); - if (item == NULL) { - goto fail; - } - if (!droprule_one_cvt(item, &one_value)) { - Py_DECREF(item); - goto fail; - } - Py_DECREF(item); - rule |= one_value; - } - Py_DECREF(seq); - - *value = rule; - return 1; - -fail: - Py_XDECREF(seq); - return 0; -} - -static int double_cvt(PyObject *input, double *value) -{ - if (input == Py_None) return 1; - *value = PyFloat_AsDouble(input); - if (PyErr_Occurred()) return 0; - return 1; -} - -static int int_cvt(PyObject *input, int *value) -{ - if (input == Py_None) return 1; - *value = PyInt_AsLong(input); - if (PyErr_Occurred()) return 0; - return 1; -} - -int set_superlu_options_from_dict(superlu_options_t *options, - int ilu, PyObject *option_dict, - int *panel_size, int *relax) -{ - PyObject *args; - int ret; - int _relax, _panel_size; - - static char *kwlist[] = { - "Fact", "Equil", "ColPerm", "Trans", "IterRefine", - "DiagPivotThresh", "PivotGrowth", "ConditionNumber", - "RowPerm", "SymmetricMode", "PrintStat", "ReplaceTinyPivot", - "SolveInitialized", "RefineInitialized", "ILU_Norm", - "ILU_MILU", "ILU_DropTol", "ILU_FillTol", "ILU_FillFactor", - "ILU_DropRule", "PanelSize", "Relax", NULL - }; - - if (ilu) { - ilu_set_default_options(options); - } - else { - set_default_options(options); - } - - _panel_size = sp_ienv(1); - _relax = sp_ienv(2); - - if (option_dict == NULL) { - return 0; - } - - args = PyTuple_New(0); - ret = PyArg_ParseTupleAndKeywords( - args, option_dict, - "|O&O&O&O&O&O&O&O&O&O&O&O&O&O&O&O&O&O&O&O&O&O&", kwlist, - fact_cvt, &options->Fact, - yes_no_cvt, &options->Equil, - colperm_cvt, &options->ColPerm, - trans_cvt, &options->Trans, - iterrefine_cvt, &options->IterRefine, - double_cvt, &options->DiagPivotThresh, - yes_no_cvt, &options->PivotGrowth, - yes_no_cvt, &options->ConditionNumber, - rowperm_cvt, &options->RowPerm, - yes_no_cvt, &options->SymmetricMode, - yes_no_cvt, &options->PrintStat, - yes_no_cvt, &options->ReplaceTinyPivot, - yes_no_cvt, &options->SolveInitialized, - yes_no_cvt, &options->RefineInitialized, - norm_cvt, &options->ILU_Norm, - milu_cvt, &options->ILU_MILU, - double_cvt, &options->ILU_DropTol, - double_cvt, &options->ILU_FillTol, - double_cvt, &options->ILU_FillFactor, - droprule_cvt, &options->ILU_DropRule, - int_cvt, &_panel_size, - int_cvt, &_relax - ); - Py_DECREF(args); - - if (panel_size != NULL) { - *panel_size = _panel_size; - } - if (relax != NULL) { - *relax = _relax; - } - - return ret; -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superluobject.h b/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superluobject.h deleted file mode 100644 index 182821d090..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/_superluobject.h +++ /dev/null @@ -1,126 +0,0 @@ -/* -*-c-*- */ -/* - * _superlu object - * - * Python object representing SuperLU factorization + some utility functions. - */ - -#ifndef __SUPERLU_OBJECT -#define __SUPERLU_OBJECT - -#include "Python.h" -#include "SuperLU/SRC/slu_zdefs.h" -#include "numpy/arrayobject.h" -#include "SuperLU/SRC/slu_util.h" -#include "SuperLU/SRC/slu_dcomplex.h" -#include "SuperLU/SRC/slu_scomplex.h" - - -#define _CHECK_INTEGER(x) (PyArray_ISINTEGER(x) && (x)->descr->elsize == sizeof(int)) - -/* - * SuperLUObject definition - */ -typedef struct { - PyObject_HEAD - npy_intp m,n; - SuperMatrix L; - SuperMatrix U; - int *perm_r; - int *perm_c; - int type; -} SciPyLUObject; - -extern PyTypeObject SciPySuperLUType; - -int DenseSuper_from_Numeric(SuperMatrix *, PyObject *); -int NRFormat_from_spMatrix(SuperMatrix *, int, int, int, PyArrayObject *, - PyArrayObject *, PyArrayObject *, int); -int NCFormat_from_spMatrix(SuperMatrix *, int, int, int, PyArrayObject *, - PyArrayObject *, PyArrayObject *, int); -colperm_t superlu_module_getpermc(int); -PyObject *newSciPyLUObject(SuperMatrix *, PyObject*, int, int); -int set_superlu_options_from_dict(superlu_options_t *options, - int ilu, PyObject *option_dict, - int *panel_size, int *relax); - -/* - * Definitions for other SuperLU data types than Z, - * and type-generic definitions. - */ - -#define CHECK_SLU_TYPE(type) \ - (type == NPY_FLOAT || type == NPY_DOUBLE || type == NPY_CFLOAT || type == NPY_CDOUBLE) - -#define TYPE_GENERIC_FUNC(name, returntype) \ - returntype s##name(name##_ARGS); \ - returntype d##name(name##_ARGS); \ - returntype c##name(name##_ARGS); \ - static returntype name(int type, name##_ARGS) \ - { \ - switch(type) { \ - case NPY_FLOAT: s##name(name##_ARGS_REF); break; \ - case NPY_DOUBLE: d##name(name##_ARGS_REF); break; \ - case NPY_CFLOAT: c##name(name##_ARGS_REF); break; \ - case NPY_CDOUBLE: z##name(name##_ARGS_REF); break; \ - default: return; \ - } \ - } - -#define SLU_TYPECODE_TO_NPY(s) \ - ( ((s) == SLU_S) ? NPY_FLOAT : \ - ((s) == SLU_D) ? NPY_DOUBLE : \ - ((s) == SLU_C) ? NPY_CFLOAT : \ - ((s) == SLU_Z) ? NPY_CDOUBLE : -1) - -#define NPY_TYPECODE_TO_SLU(s) \ - ( ((s) == NPY_FLOAT) ? SLU_S : \ - ((s) == NPY_DOUBLE) ? SLU_D : \ - ((s) == NPY_CFLOAT) ? SLU_C : \ - ((s) == NPY_CDOUBLE) ? SLU_Z : -1) - -#define gstrf_ARGS \ - superlu_options_t *a, SuperMatrix *b, \ - int c, int d, int *e, void *f, int g, \ - int *h, int *i, SuperMatrix *j, SuperMatrix *k, \ - SuperLUStat_t *l, int *m -#define gstrf_ARGS_REF a,b,c,d,e,f,g,h,i,j,k,l,m - -#define gsitrf_ARGS gstrf_ARGS -#define gsitrf_ARGS_REF gstrf_ARGS_REF - -#define gstrs_ARGS \ - trans_t a, SuperMatrix *b, SuperMatrix *c, \ - int *d, int *e, SuperMatrix *f, \ - SuperLUStat_t *g, int *h -#define gstrs_ARGS_REF a,b,c,d,e,f,g,h - -#define gssv_ARGS \ - superlu_options_t *a, SuperMatrix *b, int *c, int *d, \ - SuperMatrix *e, SuperMatrix *f, SuperMatrix *g, \ - SuperLUStat_t *h, int *i -#define gssv_ARGS_REF a,b,c,d,e,f,g,h,i - -#define Create_Dense_Matrix_ARGS \ - SuperMatrix *a, int b, int c, void *d, int e, \ - Stype_t f, Dtype_t g, Mtype_t h -#define Create_Dense_Matrix_ARGS_REF a,b,c,d,e,f,g,h - -#define Create_CompRow_Matrix_ARGS \ - SuperMatrix *a, int b, int c, int d, \ - void *e, int *f, int *g, \ - Stype_t h, Dtype_t i, Mtype_t j -#define Create_CompRow_Matrix_ARGS_REF a,b,c,d,e,f,g,h,i,j - -#define Create_CompCol_Matrix_ARGS Create_CompRow_Matrix_ARGS -#define Create_CompCol_Matrix_ARGS_REF Create_CompRow_Matrix_ARGS_REF - -TYPE_GENERIC_FUNC(gstrf, void); -TYPE_GENERIC_FUNC(gsitrf, void); -TYPE_GENERIC_FUNC(gstrs, void); -TYPE_GENERIC_FUNC(gssv, void); -TYPE_GENERIC_FUNC(Create_Dense_Matrix, void); -TYPE_GENERIC_FUNC(Create_CompRow_Matrix, void); -TYPE_GENERIC_FUNC(Create_CompCol_Matrix, void); - -#endif /* __SUPERLU_OBJECT */ diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/bento.info b/scipy-0.10.1/scipy/sparse/linalg/dsolve/bento.info deleted file mode 100644 index 06008f0983..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/bento.info +++ /dev/null @@ -1,14 +0,0 @@ -HookFile: bscript - -Recurse: umfpack - -Library: - Packages: umfpack - CompiledLibrary: superlu_src - Sources: - SuperLU/SRC/*.c - Extension: _superlu - Sources: - _superlu_utils.c, - _superluobject.c, - _superlumodule.c diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/bscript b/scipy-0.10.1/scipy/sparse/linalg/dsolve/bscript deleted file mode 100644 index 267295fe96..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/bscript +++ /dev/null @@ -1,17 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - bld = context.waf_context - default_builder = context.default_builder - - def builder(extension): - return default_builder(extension, - use="superlu_src FLAPACK") - context.register_builder("_superlu", builder) - - def st_builder(extension): - return default_builder(extension, - features="c bento cstlib", - defines=["USE_VENDOR_BLAS=2"]) - context.register_compiled_library_builder("superlu_src", st_builder) diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/info.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/info.py deleted file mode 100644 index 1fe210cec6..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/info.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Linear Solvers -============== - -The default solver is SuperLU (included in the scipy distribution), which can -solve real or complex linear systems in both single and double precisions. It -is automatically replaced by UMFPACK, if available. Note that UMFPACK works in -double precision only, so switch it off by ->>> use_solver( useUmfpack = False ) -to solve in the single precision. See also use_solver documentation. - -Example session: - ->>> from scipy.sparse import csc_matrix, spdiags ->>> from numpy import array ->>> from scipy.sparse.linalg import spsolve, use_solver ->>> ->>> print "Inverting a sparse linear system:" ->>> print "The sparse matrix (constructed from diagonals):" ->>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) ->>> b = array([1, 2, 3, 4, 5]) ->>> print "Solve: single precision complex:" ->>> use_solver( useUmfpack = False ) ->>> a = a.astype('F') ->>> x = spsolve(a, b) ->>> print x ->>> print "Error: ", a*x-b ->>> ->>> print "Solve: double precision complex:" ->>> use_solver( useUmfpack = True ) ->>> a = a.astype('D') ->>> x = spsolve(a, b) ->>> print x ->>> print "Error: ", a*x-b ->>> ->>> print "Solve: double precision:" ->>> a = a.astype('d') ->>> x = spsolve(a, b) ->>> print x ->>> print "Error: ", a*x-b ->>> ->>> print "Solve: single precision:" ->>> use_solver( useUmfpack = False ) ->>> a = a.astype('f') ->>> x = spsolve(a, b.astype('f')) ->>> print x ->>> print "Error: ", a*x-b - - -""" -postpone_import = 1 diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/linsolve.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/linsolve.py deleted file mode 100644 index 6c5b29093c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/linsolve.py +++ /dev/null @@ -1,283 +0,0 @@ -from warnings import warn - -from numpy import asarray -from scipy.sparse import isspmatrix_csc, isspmatrix_csr, isspmatrix, \ - SparseEfficiencyWarning, csc_matrix - -import _superlu - -noScikit = False -try: - import scikits.umfpack as umfpack -except ImportError: - import umfpack - noScikit = True - -isUmfpack = hasattr( umfpack, 'UMFPACK_OK' ) - -useUmfpack = True - - -__all__ = [ 'use_solver', 'spsolve', 'splu', 'spilu', 'factorized' ] - -def use_solver( **kwargs ): - """ - Valid keyword arguments with defaults (other ignored): - useUmfpack = True - assumeSortedIndices = False - - The default sparse solver is umfpack when available. This can be changed by - passing useUmfpack = False, which then causes the always present SuperLU - based solver to be used. - - Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If - sure that the matrix fulfills this, pass assumeSortedIndices=True - to gain some speed. - """ - if 'useUmfpack' in kwargs: - globals()['useUmfpack'] = kwargs['useUmfpack'] - - if isUmfpack: - umfpack.configure( **kwargs ) - - -def spsolve(A, b, permc_spec=None, use_umfpack=True): - """Solve the sparse linear system Ax=b - """ - if isspmatrix( b ): - b = b.toarray() - - if b.ndim > 1: - if max( b.shape ) == b.size: - b = b.squeeze() - else: - raise ValueError("rhs must be a vector (has shape %s)" % (b.shape,)) - - if not (isspmatrix_csc(A) or isspmatrix_csr(A)): - A = csc_matrix(A) - warn('spsolve requires CSC or CSR matrix format', SparseEfficiencyWarning) - - A.sort_indices() - A = A.asfptype() #upcast to a floating point format - - M, N = A.shape - if (M != N): - raise ValueError("matrix must be square (has shape %s)" % ((M, N),)) - if M != b.size: - raise ValueError("matrix - rhs size mismatch (%s - %s)" - % (A.shape, b.size)) - - use_umfpack = use_umfpack and useUmfpack - - if isUmfpack and use_umfpack: - if noScikit: - warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,' - ' install scikits.umfpack instead', DeprecationWarning ) - if A.dtype.char not in 'dD': - raise ValueError("convert matrix data to double, please, using" - " .astype(), or set linsolve.useUmfpack = False") - - b = asarray(b, dtype=A.dtype).reshape(-1) - - family = {'d' : 'di', 'D' : 'zi'} - umf = umfpack.UmfpackContext( family[A.dtype.char] ) - return umf.linsolve( umfpack.UMFPACK_A, A, b, - autoTranspose = True ) - - else: - if isspmatrix_csc(A): - flag = 1 # CSC format - elif isspmatrix_csr(A): - flag = 0 # CSR format - else: - A = csc_matrix(A) - flag = 1 - - b = asarray(b, dtype=A.dtype) - options = dict(ColPerm=permc_spec) - return _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr, b, flag, - options=options)[0] - -def splu(A, permc_spec=None, diag_pivot_thresh=None, - drop_tol=None, relax=None, panel_size=None, options=dict()): - """ - Compute the LU decomposition of a sparse, square matrix. - - Parameters - ---------- - A - Sparse matrix to factorize. Should be in CSR or CSC format. - - permc_spec : str, optional - How to permute the columns of the matrix for sparsity preservation. - (default: 'COLAMD') - - - ``NATURAL``: natural ordering. - - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - - ``COLAMD``: approximate minimum degree column ordering - - diag_pivot_thresh : float, optional - Threshold used for a diagonal entry to be an acceptable pivot. - See SuperLU user's guide for details [SLU]_ - drop_tol : float, optional - (deprecated) No effect. - relax : int, optional - Expert option for customizing the degree of relaxing supernodes. - See SuperLU user's guide for details [SLU]_ - panel_size : int, optional - Expert option for customizing the panel size. - See SuperLU user's guide for details [SLU]_ - options : dict, optional - Dictionary containing additional expert options to SuperLU. - See SuperLU user guide [SLU]_ (section 2.4 on the 'Options' argument) - for more details. For example, you can specify - ``options=dict(Equil=False, IterRefine='SINGLE'))`` - to turn equilibration off and perform a single iterative refinement. - - Returns - ------- - invA : scipy.sparse.linalg.dsolve._superlu.SciPyLUType - Object, which has a ``solve`` method. - - See also - -------- - spilu : incomplete LU decomposition - - Notes - ----- - This function uses the SuperLU library. - - References - ---------- - .. [SLU] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/ - - """ - - if not isspmatrix_csc(A): - A = csc_matrix(A) - warn('splu requires CSC matrix format', SparseEfficiencyWarning) - - A.sort_indices() - A = A.asfptype() #upcast to a floating point format - - M, N = A.shape - if (M != N): - raise ValueError("can only factor square matrices") #is this true? - - _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, - PanelSize=panel_size, Relax=relax) - if options is not None: - _options.update(options) - return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr, - ilu=False, options=_options) - -def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None, - diag_pivot_thresh=None, relax=None, panel_size=None, options=None): - """ - Compute an incomplete LU decomposition for a sparse, square matrix A. - - The resulting object is an approximation to the inverse of A. - - Parameters - ---------- - A - Sparse matrix to factorize - - drop_tol : float, optional - Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition. - (default: 1e-4) - fill_factor : float, optional - Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10) - drop_rule : str, optional - Comma-separated string of drop rules to use. - Available rules: ``basic``, ``prows``, ``column``, ``area``, - ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``) - - See SuperLU documentation for details. - milu : str, optional - Which version of modified ILU to use. (Choices: ``silu``, - ``smilu_1``, ``smilu_2`` (default), ``smilu_3``.) - - Remaining other options - Same as for `splu` - - Returns - ------- - invA_approx : scipy.sparse.linalg.dsolve._superlu.SciPyLUType - Object, which has a ``solve`` method. - - See also - -------- - splu : complete LU decomposition - - Notes - ----- - To improve the better approximation to the inverse, you may need to - increase ``fill_factor`` AND decrease ``drop_tol``. - - This function uses the SuperLU library. - - References - ---------- - .. [SLU] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/ - - """ - - if not isspmatrix_csc(A): - A = csc_matrix(A) - warn('splu requires CSC matrix format', SparseEfficiencyWarning) - - A.sort_indices() - A = A.asfptype() #upcast to a floating point format - - M, N = A.shape - if (M != N): - raise ValueError("can only factor square matrices") #is this true? - - _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol, - ILU_FillFactor=fill_factor, - DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, - PanelSize=panel_size, Relax=relax) - if options is not None: - _options.update(options) - return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr, - ilu=True, options=_options) - -def factorized( A ): - """ - Return a fuction for solving a sparse linear system, with A pre-factorized. - - Example: - solve = factorized( A ) # Makes LU decomposition. - x1 = solve( rhs1 ) # Uses the LU factors. - x2 = solve( rhs2 ) # Uses again the LU factors. - """ - if isUmfpack and useUmfpack: - if noScikit: - warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,' - ' install scikits.umfpack instead', DeprecationWarning ) - - if not isspmatrix_csc(A): - A = csc_matrix(A) - warn('splu requires CSC matrix format', SparseEfficiencyWarning) - - A.sort_indices() - A = A.asfptype() #upcast to a floating point format - - if A.dtype.char not in 'dD': - raise ValueError("convert matrix data to double, please, using" - " .astype(), or set linsolve.useUmfpack = False") - - family = {'d' : 'di', 'D' : 'zi'} - umf = umfpack.UmfpackContext( family[A.dtype.char] ) - - # Make LU decomposition. - umf.numeric( A ) - - def solve( b ): - return umf.solve( umfpack.UMFPACK_A, A, b, autoTranspose = True ) - - return solve - else: - return splu( A ).solve diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/setup.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/setup.py deleted file mode 100755 index e7d63daa60..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/setup.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python -from os.path import join, dirname -import sys -import os -import glob - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - - config = Configuration('dsolve',parent_package,top_path) - config.add_data_dir('tests') - - lapack_opt = get_info('lapack_opt',notfound_action=2) - if sys.platform=='win32': - superlu_defs = [('NO_TIMER',1)] - else: - superlu_defs = [] - superlu_defs.append(('USE_VENDOR_BLAS',1)) - - superlu_src = join(dirname(__file__), 'SuperLU', 'SRC') - - sources = list(glob.glob(join(superlu_src, '*.c'))) - if os.name == 'nt' and 'FPATH' in os.environ: - # when using MSVC + MKL, lsame is already in MKL - sources.remove(join(superlu_src, 'lsame.c')) - - config.add_library('superlu_src', - sources = sources, - macros = superlu_defs, - include_dirs=[superlu_src], - ) - - # Extension - config.add_extension('_superlu', - sources = ['_superlumodule.c', - '_superlu_utils.c', - '_superluobject.c'], - libraries = ['superlu_src'], - extra_info = lapack_opt, - ) - - config.add_subpackage('umfpack') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/setupscons.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/setupscons.py deleted file mode 100755 index 330d0c981e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/setupscons.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -from os.path import join -import sys - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - - config = Configuration('dsolve',parent_package,top_path, - setup_name = 'setupscons.py') - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - config.add_subpackage('umfpack') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/tests/test_linsolve.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/tests/test_linsolve.py deleted file mode 100644 index 1fb91634af..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/tests/test_linsolve.py +++ /dev/null @@ -1,157 +0,0 @@ -import warnings - -from numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix -import numpy.random as random -from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal, \ - assert_raises, assert_almost_equal, assert_equal, assert_array_equal, assert_ - -from scipy.linalg import norm, inv -from scipy.sparse import spdiags, SparseEfficiencyWarning, csc_matrix -from scipy.sparse.linalg.dsolve import spsolve, use_solver, splu, spilu - -warnings.simplefilter('ignore',SparseEfficiencyWarning) - -#TODO add more comprehensive tests -use_solver( useUmfpack = False ) - -class TestLinsolve(TestCase): - def test_singular(self): - A = csc_matrix( (5,5), dtype='d' ) - b = array([1, 2, 3, 4, 5],dtype='d') - x = spsolve(A, b, use_umfpack=False) - - def test_twodiags(self): - A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) - b = array([1, 2, 3, 4, 5]) - - # condition number of A - cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2) - - - for t in ['f','d','F','D']: - eps = finfo(t).eps #floating point epsilon - b = b.astype(t) - - for format in ['csc','csr']: - Asp = A.astype(t).asformat(format) - - x = spsolve(Asp,b) - - assert_( norm(b - Asp*x) < 10 * cond_A * eps ) - - def test_smoketest(self): - Adense = matrix([[ 0., 1., 1.], - [ 1., 0., 1.], - [ 0., 0., 1.]]) - As = csc_matrix(Adense) - random.seed(1234) - x = random.randn(3) - b = As*x - x2 = spsolve(As, b) - - assert_array_almost_equal(x, x2) - - def test_non_square(self): - # A is not square. - A = ones((3, 4)) - b = ones((4, 1)) - assert_raises(ValueError, spsolve, A, b) - # A2 and b2 have incompatible shapes. - A2 = csc_matrix(eye(3)) - b2 = array([1.0, 2.0]) - assert_raises(ValueError, spsolve, A2, b2) - -class TestSplu(object): - def setUp(self): - n = 40 - d = arange(n) + 1 - self.n = n - self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) - random.seed(1234) - - def test_splu_smoketest(self): - # Check that splu works at all - x = random.rand(self.n) - lu = splu(self.A) - r = self.A*lu.solve(x) - assert_(abs(x - r).max() < 1e-13) - - def test_spilu_smoketest(self): - # Check that spilu works at all - x = random.rand(self.n) - lu = spilu(self.A, drop_tol=1e-2, fill_factor=5) - r = self.A*lu.solve(x) - assert_(abs(x - r).max() < 1e-2) - assert_(abs(x - r).max() > 1e-5) - - def test_splu_nnz0(self): - A = csc_matrix( (5,5), dtype='d' ) - assert_raises(RuntimeError, splu, A) - - def test_spilu_nnz0(self): - A = csc_matrix( (5,5), dtype='d' ) - assert_raises(RuntimeError, spilu, A) - - def test_splu_basic(self): - # Test basic splu functionality. - n = 30 - a = random.random((n, n)) - a[a < 0.95] = 0 - # First test with a singular matrix - a[:, 0] = 0 - a_ = csc_matrix(a) - # Matrix is exactly singular - assert_raises(RuntimeError, splu, a_) - - # Make a diagonal dominant, to make sure it is not singular - a += 4*eye(n) - a_ = csc_matrix(a) - lu = splu(a_) - b = ones(n) - x = lu.solve(b) - assert_almost_equal(dot(a, x), b) - - def test_splu_perm(self): - # Test the permutation vectors exposed by splu. - n = 30 - a = random.random((n, n)) - a[a < 0.95] = 0 - # Make a diagonal dominant, to make sure it is not singular - a += 4*eye(n) - a_ = csc_matrix(a) - lu = splu(a_) - # Check that the permutation indices do belong to [0, n-1]. - for perm in (lu.perm_r, lu.perm_c): - assert_(all(perm > -1)) - assert_(all(perm < n)) - assert_equal(len(unique(perm)), len(perm)) - - # Now make a symmetric, and test that the two permutation vectors are - # the same - a += a.T - a_ = csc_matrix(a) - lu = splu(a_) - assert_array_equal(lu.perm_r, lu.perm_c) - - def test_lu_refcount(self): - # Test that we are keeping track of the reference count with splu. - n = 30 - a = random.random((n, n)) - a[a < 0.95] = 0 - # Make a diagonal dominant, to make sure it is not singular - a += 4*eye(n) - a_ = csc_matrix(a) - lu = splu(a_) - - # And now test that we don't have a refcount bug - import gc, sys - rc = sys.getrefcount(lu) - for attr in ('perm_r', 'perm_c'): - perm = getattr(lu, attr) - assert_equal(sys.getrefcount(lu), rc + 1) - del perm - assert_equal(sys.getrefcount(lu), rc) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/SConscript b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/SConscript deleted file mode 100644 index 95aec6c8df..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/SConscript +++ /dev/null @@ -1,32 +0,0 @@ -from os.path import join as pjoin - -from numscons import GetNumpyEnvironment -from numscons import CheckF77BLAS, CheckF77Clib, NumpyCheckLibAndHeader -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = - {'CheckBLAS' : CheckF77BLAS, - 'CheckF77Clib' : CheckF77Clib, - 'NumpyCheckLibAndHeader' : NumpyCheckLibAndHeader}) - -#----------------- -# Checking Lapack -#----------------- -st = config.CheckBLAS() -if not st: - raise RuntimeError("no blas found, necessary for umfpack module") - -has_umfpack = config.NumpyCheckLibAndHeader( - 'umfpack', None, 'umfpack.h', section = 'umfpack', autoadd = 1) -config.Finish() -write_info(env) - -if has_umfpack: - env.Append(SWIGFLAGS = '-python') - env.Append(SWIGFLAGS = '$_CPPINCFLAGS') - env.NumpyPythonExtension('__umfpack', source = 'umfpack.i') diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/SConstruct b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/__init__.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/__init__.py deleted file mode 100644 index 60aa09896e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from info import __doc__ - -from umfpack import * - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/bento.info b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/bento.info deleted file mode 100644 index cb722461da..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/bento.info +++ /dev/null @@ -1,5 +0,0 @@ -HookFile: bscript - -Library: - Extension: __umfpack - Sources: umfpack.i diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/bscript b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/bscript deleted file mode 100644 index 9c577294d5..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/bscript +++ /dev/null @@ -1,11 +0,0 @@ -import sys - -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - bld = context.waf_context - - def builder(extension): - print "UMFPACK DISABLED" - context.register_builder("__umfpack", builder) diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/info.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/info.py deleted file mode 100644 index dc3117677b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/info.py +++ /dev/null @@ -1,170 +0,0 @@ -""" -:Interface to the UMFPACK library: -================================== - -:Contains: UmfpackContext class - -:Description: -------------- -Routines for symbolic and numeric LU factorization of sparse -matrices and for solving systems of linear equations with sparse matrices. - -Tested with UMFPACK V4.4 (Jan. 28, 2005), V5.0 (May 5, 2006) -Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. -UMFPACK homepage: http://www.cise.ufl.edu/research/sparse/umfpack - -Use 'print UmfpackContext().funs' to see all UMFPACK library functions the -module exposes, if you need something not covered by the examples below. - -:Installation: --------------- - -Example site.cfg entry: - - -UMFPACK v4.4 in : - -[amd] -library_dirs = /UMFPACK/AMD/Lib -include_dirs = /UMFPACK/AMD/Include -amd_libs = amd - -[umfpack] -library_dirs = /UMFPACK/UMFPACK/Lib -include_dirs = /UMFPACK/UMFPACK/Include -umfpack_libs = umfpack - -UMFPACK v5.0 (as part of UFsparse package) in : - -[amd] -library_dirs = /UFsparse/AMD/Lib -include_dirs = /UFsparse/AMD/Include, /UFsparse/UFconfig -amd_libs = amd - -[umfpack] -library_dirs = /UFsparse/UMFPACK/Lib -include_dirs = /UFsparse/UMFPACK/Include, /UFsparse/UFconfig -umfpack_libs = umfpack - - -:Examples: ----------- - -Assuming this module imported as um (import scipy.sparse.linalg.dsolve.umfpack as um) - -Sparse matrix in CSR or CSC format: mtx -Righthand-side: rhs -Solution: sol - - -# Contruct the solver. -umfpack = um.UmfpackContext() # Use default 'di' family of UMFPACK routines. - -# One-shot solution. -sol = umfpack( um.UMFPACK_A, mtx, rhs, autoTranspose = True ) -# same as: -sol = umfpack.linsolve( um.UMFPACK_A, mtx, rhs, autoTranspose = True ) - - --or- - - -# Make LU decomposition. -umfpack.numeric( mtx ) -... -# Use already LU-decomposed matrix. -sol1 = umfpack( um.UMFPACK_A, mtx, rhs1, autoTranspose = True ) -sol2 = umfpack( um.UMFPACK_A, mtx, rhs2, autoTranspose = True ) -# same as: -sol1 = umfpack.solve( um.UMFPACK_A, mtx, rhs1, autoTranspose = True ) -sol2 = umfpack.solve( um.UMFPACK_A, mtx, rhs2, autoTranspose = True ) - - --or- - - -# Make symbolic decomposition. -umfpack.symbolic( mtx0 ) -# Print statistics. -umfpack.report_symbolic() - -# ... - -# Make LU decomposition of mtx1 which has same structure as mtx0. -umfpack.numeric( mtx1 ) -# Print statistics. -umfpack.report_numeric() - -# Use already LU-decomposed matrix. -sol1 = umfpack( um.UMFPACK_A, mtx1, rhs1, autoTranspose = True ) - -# ... - -# Make LU decomposition of mtx2 which has same structure as mtx0. -umfpack.numeric( mtx2 ) -sol2 = umfpack.solve( um.UMFPACK_A, mtx2, rhs2, autoTranspose = True ) - -# Print all statistics. -umfpack.report_info() - - --or- - - -# Get LU factors and permutation matrices of a matrix. -L, U, P, Q, R, do_recip = umfpack.lu( mtx ) - - -:Returns: - - `L` : Lower triangular m-by-min(m,n) CSR matrix - - `U` : Upper triangular min(m,n)-by-n CSC matrix - - `P` : Vector of row permuations - - `Q` : Vector of column permuations - - `R` : Vector of diagonal row scalings - - `do_recip` : boolean - -:Note: - For a given matrix A, the decomposition satisfies: - $LU = PRAQ$ when do_recip is true, - $LU = P(R^{-1})AQ$ when do_recip is false - -:UmfpackContext solution methods: ---------------------------------- - -umfpack(), umfpack.linsolve(), umfpack.solve() - -:Parameters: - - `sys` : constant, - one of UMFPACK system description constants, like - UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs - - `mtx` : sparse matrix (CSR or CSC) - - `rhs` : right hand side vector - - `autoTranspose` : bool - automatically changes 'sys' to the transposed type, if 'mtx' is in CSR, - since UMFPACK assumes CSC internally - -:Setting control parameters: ----------------------------- - -Assuming this module imported as um: - -List of control parameter names is accessible as 'um.umfControls' - their -meaning and possible values are described in the UMFPACK documentation. -To each name corresponds an attribute of the 'um' module, such as, -for example 'um.UMFPACK_PRL' (controlling the verbosity of umfpack report -functions). These attributes are in fact indices into the control array -- to set the corresponding control array value, just do the following: - - -umfpack = um.UmfpackContext() -umfpack.control[um.UMFPACK_PRL] = 4 # Let's be more verbose. - - --- -:Author: Robert Cimrman - -:Other contributors: Nathan Bell (lu() method wrappers) -""" - -postpone_import = 1 -global_symbols = ['UmfpackContext'] diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/setup.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/setup.py deleted file mode 100644 index a7ac4891c6..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/setup.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python -# 05.12.2005, c -# last change: 27.03.2006 -def configuration(parent_package='',top_path=None): - import numpy - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info, dict_append - - config = Configuration( 'umfpack', parent_package, top_path ) - config.add_data_dir('tests') - - umf_info = get_info( 'umfpack', notfound_action = 1 ) - - umfpack_i_file = config.paths('umfpack.i')[0] - def umfpack_i(ext, build_dir): - if umf_info: - return umfpack_i_file - - blas_info = get_info('blas_opt') - build_info = {} - dict_append(build_info, **umf_info) - dict_append(build_info, **blas_info) - - config.add_extension( '__umfpack', - sources = [umfpack_i], - depends = ['umfpack.i'], - **build_info) - - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/setupscons.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/setupscons.py deleted file mode 100644 index d44fe14877..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/setupscons.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# 05.12.2005, c -# last change: 27.03.2006 -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration( 'umfpack', parent_package, top_path ) - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - -# umf_info = get_info( 'umfpack', notfound_action = 1 ) -# -# umfpack_i_file = config.paths('umfpack.i')[0] -# def umfpack_i(ext, build_dir): -# if umf_info: -# return umfpack_i_file -# -# blas_info = get_info('blas_opt') -# build_info = {} -# dict_append(build_info, **umf_info) -# dict_append(build_info, **blas_info) -# -# config.add_extension( '__umfpack', -# sources = [umfpack_i], -# depends = ['umfpack.i'], -# **build_info) -# - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py deleted file mode 100644 index 6bf4b963e1..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python -# - -""" Test functions for UMFPACK wrappers - -""" - -import warnings -import random - -from numpy.testing import assert_array_almost_equal, dec, \ - decorate_methods -from numpy.testing.utils import WarningManager - -from scipy import rand, matrix, diag, eye -from scipy.sparse import csc_matrix, spdiags, SparseEfficiencyWarning -from scipy.sparse.linalg import linsolve - -warnings.simplefilter('ignore',SparseEfficiencyWarning) - -import numpy as np -try: - import scipy.sparse.linalg.dsolve.umfpack as um -except (ImportError, AttributeError): - _have_umfpack = False -else: - _have_umfpack = um.umfpack._um is not None - -# Allow disabling of nose tests if umfpack not present -# See end of file for application -_umfpack_skip = dec.skipif(not _have_umfpack, - 'UMFPACK appears not to be compiled') - -class _DeprecationAccept: - def setUp(self): - self.mgr = WarningManager() - self.mgr.__enter__() - warnings.simplefilter("ignore", DeprecationWarning) - - def tearDown(self): - self.mgr.__exit__() - - -class TestSolvers(_DeprecationAccept): - """Tests inverting a sparse linear system""" - - def test_solve_complex_without_umfpack(self): - """Solve: single precision complex""" - linsolve.use_solver( useUmfpack = False ) - a = self.a.astype('F') - b = self.b - x = linsolve.spsolve(a, b) - #print x - #print "Error: ", a*x-b - assert_array_almost_equal(a*x, b, decimal=4) - - - def test_solve_without_umfpack(self): - """Solve: single precision""" - linsolve.use_solver( useUmfpack = False ) - a = self.a.astype('f') - b = self.b - x = linsolve.spsolve(a, b.astype('f')) - #print x - #print "Error: ", a*x-b - assert_array_almost_equal(a*x, b, decimal=4) - - - def test_solve_complex_umfpack(self): - """Solve with UMFPACK: double precision complex""" - linsolve.use_solver( useUmfpack = True ) - a = self.a.astype('D') - b = self.b - x = linsolve.spsolve(a, b) - #print x - #print "Error: ", a*x-b - assert_array_almost_equal(a*x, b) - - def test_solve_umfpack(self): - """Solve with UMFPACK: double precision""" - linsolve.use_solver( useUmfpack = True ) - a = self.a.astype('d') - b = self.b - x = linsolve.spsolve(a, b) - #print x - #print "Error: ", a*x-b - assert_array_almost_equal(a*x, b) - - def test_solve_sparse_rhs(self): - """Solve with UMFPACK: double precision, sparse rhs""" - linsolve.use_solver( useUmfpack = True ) - a = self.a.astype('d') - b = csc_matrix( self.b ) - x = linsolve.spsolve(a, b) - #print x - #print "Error: ", a*x-b - assert_array_almost_equal(a*x, self.b) - - def test_factorized_umfpack(self): - """Prefactorize (with UMFPACK) matrix for solving with multiple rhs""" - linsolve.use_solver( useUmfpack = True ) - a = self.a.astype('d') - solve = linsolve.factorized( a ) - - x1 = solve( self.b ) - assert_array_almost_equal(a*x1, self.b) - x2 = solve( self.b2 ) - assert_array_almost_equal(a*x2, self.b2) - - def test_factorized_without_umfpack(self): - """Prefactorize matrix for solving with multiple rhs""" - linsolve.use_solver( useUmfpack = False ) - a = self.a.astype('d') - solve = linsolve.factorized( a ) - - x1 = solve( self.b ) - assert_array_almost_equal(a*x1, self.b) - x2 = solve( self.b2 ) - assert_array_almost_equal(a*x2, self.b2) - - def setUp(self): - self.a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) - #print "The sparse matrix (constructed from diagonals):" - #print self.a - self.b = np.array([1, 2, 3, 4, 5]) - self.b2 = np.array([5, 4, 3, 2, 1]) - - _DeprecationAccept.setUp(self) - -class TestFactorization(_DeprecationAccept): - """Tests factorizing a sparse linear system""" - - def test_complex_lu(self): - """Getting factors of complex matrix""" - umfpack = um.UmfpackContext("zi") - - for A in self.complex_matrices: - umfpack.numeric(A) - - (L,U,P,Q,R,do_recip) = umfpack.lu(A) - - L = L.todense() - U = U.todense() - A = A.todense() - if not do_recip: R = 1.0/R - R = matrix(diag(R)) - P = eye(A.shape[0])[P,:] - Q = eye(A.shape[1])[:,Q] - - assert_array_almost_equal(P*R*A*Q,L*U) - - def test_real_lu(self): - """Getting factors of real matrix""" - umfpack = um.UmfpackContext("di") - - for A in self.real_matrices: - umfpack.numeric(A) - - (L,U,P,Q,R,do_recip) = umfpack.lu(A) - - L = L.todense() - U = U.todense() - A = A.todense() - if not do_recip: R = 1.0/R - R = matrix(diag(R)) - P = eye(A.shape[0])[P,:] - Q = eye(A.shape[1])[:,Q] - - assert_array_almost_equal(P*R*A*Q,L*U) - - - def setUp(self): - random.seed(0) #make tests repeatable - self.real_matrices = [] - self.real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], - [0, 1], 5, 5) ) - self.real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], - [0, 1], 4, 5) ) - self.real_matrices.append(spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], - [0, 2], 5, 5) ) - self.real_matrices.append(rand(3,3)) - self.real_matrices.append(rand(5,4)) - self.real_matrices.append(rand(4,5)) - - self.real_matrices = [csc_matrix(x).astype('d') for x \ - in self.real_matrices] - self.complex_matrices = [x.astype(np.complex128) - for x in self.real_matrices] - - _DeprecationAccept.setUp(self) - -# Skip methods if umfpack not present -for cls in [TestSolvers, TestFactorization]: - decorate_methods(cls, _umfpack_skip) - -if __name__ == "__main__": - import nose - nose.run(argv=['', __file__]) diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py deleted file mode 100644 index 6f7cd7acdb..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/env python -# Created by: Robert Cimrman, 05.12.2005 - -"""Benchamrks for umfpack module""" - -from optparse import OptionParser -import time -import urllib -import gzip - -import numpy as np - -import scipy.sparse as sp -import scipy.sparse.linalg.dsolve.umfpack as um -import scipy.linalg as nla - -defaultURL = 'http://www.cise.ufl.edu/research/sparse/HBformat/' - -usage = """%%prog [options] [, ...] - - can be a local or distant (gzipped) file - -default url is: - %s - -supported formats are: - triplet .. [nRow, nCol, nItem] followed by 'nItem' * [ir, ic, value] - hb .. Harwell-Boeing format N/A -""" % defaultURL - - -## -# 05.12.2005, c -def read_triplet( fd ): - nRow, nCol = map( int, fd.readline().split() ) - nItem = int( fd.readline() ) - - ij = np.zeros( (nItem,2), np.int32 ) - val = np.zeros( (nItem,), np.float64 ) - for ii, row in enumerate( fd.readlines() ): - aux = row.split() - ij[ii] = int( aux[0] ), int( aux[1] ) - val[ii] = float( aux[2] ) - - mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem ) - - return mtx - -## -# 06.12.2005, c -def read_triplet2( fd ): - nRow, nCol = map( int, fd.readline().split() ) - nItem = int( fd.readline() ) - - ij, val = io.read_array( fd, - columns = [(0,1), (2,)], - atype = (np.int32, np.float64), - rowsize = nItem ) - - mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem ) - - return mtx - - -formatMap = {'triplet' : read_triplet} -## -# 05.12.2005, c -def readMatrix( matrixName, options ): - - if options.default_url: - matrixName = defaultURL + matrixName - - print 'url:', matrixName - - if matrixName[:7] == 'http://': - fileName, status = urllib.urlretrieve( matrixName ) -## print status - else: - fileName = matrixName - - print 'file:', fileName - - try: - readMatrix = formatMap[options.format] - except: - raise ValueError('unsupported format: %s' % options.format) - - print 'format:', options.format - - print 'reading...' - if fileName.endswith('.gz'): - fd = gzip.open( fileName ) - else: - fd = open( fileName ) - - mtx = readMatrix( fd ) - - fd.close() - - print 'ok' - - return mtx - -## -# 05.12.2005, c -def main(): - parser = OptionParser( usage = usage ) - parser.add_option( "-c", "--compare", - action = "store_true", dest = "compare", - default = False, - help = "compare with default scipy.sparse solver [default: %default]" ) - parser.add_option( "-p", "--plot", - action = "store_true", dest = "plot", - default = False, - help = "plot time statistics [default: %default]" ) - parser.add_option( "-d", "--default-url", - action = "store_true", dest = "default_url", - default = False, - help = "use default url [default: %default]" ) - parser.add_option( "-f", "--format", type = type( '' ), - dest = "format", default = 'triplet', - help = "matrix format [default: %default]" ) - (options, args) = parser.parse_args() - - if (len( args ) >= 1): - matrixNames = args; - else: - parser.print_help(), - return - - sizes, nnzs, times, errors = [], [], [], [] - legends = ['umfpack', 'sparse.solve'] - for ii, matrixName in enumerate( matrixNames ): - - print '*' * 50 - mtx = readMatrix( matrixName, options ) - - sizes.append( mtx.shape ) - nnzs.append( mtx.nnz ) - tts = np.zeros( (2,), dtype = np.double ) - times.append( tts ) - err = np.zeros( (2,2), dtype = np.double ) - errors.append( err ) - - print 'size : %s (%d nnz)' % (mtx.shape, mtx.nnz) - - sol0 = np.ones( (mtx.shape[0],), dtype = np.double ) - rhs = mtx * sol0 - - umfpack = um.UmfpackContext() - - tt = time.clock() - sol = umfpack( um.UMFPACK_A, mtx, rhs, autoTranspose = True ) - tts[0] = time.clock() - tt - print "umfpack : %.2f s" % tts[0] - - error = mtx * sol - rhs - err[0,0] = nla.norm( error ) - print '||Ax-b|| :', err[0,0] - - error = sol0 - sol - err[0,1] = nla.norm( error ) - print '||x - x_{exact}|| :', err[0,1] - - if options.compare: - tt = time.clock() - sol = sp.solve( mtx, rhs ) - tts[1] = time.clock() - tt - print "sparse.solve : %.2f s" % tts[1] - - error = mtx * sol - rhs - err[1,0] = nla.norm( error ) - print '||Ax-b|| :', err[1,0] - - error = sol0 - sol - err[1,1] = nla.norm( error ) - print '||x - x_{exact}|| :', err[1,1] - - if options.plot: - try: - import pylab - except ImportError: - raise ImportError("could not import pylab") - times = np.array( times ) - print times - pylab.plot( times[:,0], 'b-o' ) - if options.compare: - pylab.plot( times[:,1], 'r-s' ) - else: - del legends[1] - - print legends - - ax = pylab.axis() - y2 = 0.5 * (ax[3] - ax[2]) - xrng = range( len( nnzs ) ) - for ii in xrng: - yy = y2 + 0.4 * (ax[3] - ax[2])\ - * np.sin( ii * 2 * np.pi / (len( xrng ) - 1) ) - - if options.compare: - pylab.text( ii+0.02, yy, - '%s\n%.2e err_umf\n%.2e err_sp' - % (sizes[ii], np.sum( errors[ii][0,:] ), - np.sum( errors[ii][1,:] )) ) - else: - pylab.text( ii+0.02, yy, - '%s\n%.2e err_umf' - % (sizes[ii], np.sum( errors[ii][0,:] )) ) - pylab.plot( [ii, ii], [ax[2], ax[3]], 'k:' ) - - pylab.xticks( xrng, ['%d' % (nnzs[ii] ) for ii in xrng] ) - pylab.xlabel( 'nnz' ) - pylab.ylabel( 'time [s]' ) - pylab.legend( legends ) - pylab.axis( [ax[0] - 0.05, ax[1] + 1, ax[2], ax[3]] ) - pylab.show() - -if __name__ == '__main__': - main() diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/umfpack.i b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/umfpack.i deleted file mode 100644 index 53086a0d82..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/umfpack.i +++ /dev/null @@ -1,274 +0,0 @@ -/* -*- C -*- */ -#ifdef SWIGPYTHON - -%module _umfpack - -/* - See umfpack.py for more information. - - Created by: Robert Cimrman -*/ - -%{ -#include -#include "numpy/arrayobject.h" -%} - -%feature("autodoc", "1"); - -#include - -%init %{ - import_array(); -%} - -%{ -/*! - Appends @a what to @a where. On input, @a where need not to be a tuple, but on - return it always is. - - @par Revision history: - - 17.02.2005, c -*/ -PyObject *helper_appendToTuple( PyObject *where, PyObject *what ) { - PyObject *o2, *o3; - - if ((!where) || (where == Py_None)) { - where = what; - } else { - if (!PyTuple_Check( where )) { - o2 = where; - where = PyTuple_New( 1 ); - PyTuple_SetItem( where, 0, o2 ); - } - o3 = PyTuple_New( 1 ); - PyTuple_SetItem( o3, 0, what ); - o2 = where; - where = PySequence_Concat( o2, o3 ); - Py_DECREF( o2 ); - Py_DECREF( o3 ); - } - return where; -} - -/*! - Gets PyArrayObject from a PyObject. - - @par Revision history: - - 22.02.2005, c - - 03.03.2005 - - 25.11.2005 - - 30.11.2005 - - 01.12.2005 -*/ -PyArrayObject *helper_getCArrayObject( PyObject *input, int type, - int minDim, int maxDim ) { - PyArrayObject *obj; - - if (PyArray_Check( input )) { - obj = (PyArrayObject *) input; - if (!PyArray_ISCARRAY( obj )) { - PyErr_SetString( PyExc_TypeError, "not a C array" ); - return NULL; - } - obj = (PyArrayObject *) - PyArray_ContiguousFromAny( input, type, minDim, maxDim ); - if (!obj) return NULL; - } else { - PyErr_SetString( PyExc_TypeError, "not an array" ); - return NULL; - } - return obj; -} -%} - -/*! - Use for arrays as input arguments. Could be also used for changing an array - in place. - - @a rtype ... return this C data type - @a ctype ... C data type of the C function - @a atype ... PyArray_* suffix - - @par Revision history: - - 30.11.2005, c -*/ -#define ARRAY_IN( rtype, ctype, atype ) \ -%typemap( in ) (ctype *array) { \ - PyArrayObject *obj; \ - obj = helper_getCArrayObject( $input, PyArray_##atype, 1, 1 ); \ - if (!obj) return NULL; \ - $1 = (rtype *) obj->data; \ - Py_DECREF( obj ); \ -}; - -/*! - @par Revision history: - - 30.11.2005, c -*/ -#define CONF_IN( arSize ) \ -%typemap( in ) (double conf [arSize]) { \ - PyArrayObject *obj; \ - obj = helper_getCArrayObject( $input, PyArray_DOUBLE, 1, 1 ); \ - if (!obj) return NULL; \ - if ((obj->nd != 1) || (obj->dimensions[0] != arSize)) { \ - PyErr_SetString( PyExc_ValueError, "wrong Control/Info array size" ); \ - Py_DECREF( obj ); \ - return NULL; \ - } \ - $1 = (double *) obj->data; \ - Py_DECREF( obj ); \ -}; - -/*! - @par Revision history: - - 01.12.2005, c - - 02.12.2005 -*/ -#define OPAQUE_ARGOUT( ttype ) \ -%typemap( in, numinputs=0 ) ttype* opaque_argout( ttype tmp ) { \ - $1 = &tmp; \ -}; \ -%typemap( argout ) ttype* opaque_argout { \ - PyObject *obj; \ - obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 0 ); \ - $result = helper_appendToTuple( $result, obj ); \ -}; - -/*! - @par Revision history: - - 02.12.2005, c -*/ -#define OPAQUE_ARGINOUT( ttype ) \ -%typemap( in ) ttype* opaque_arginout( ttype tmp ) { \ - if ((SWIG_ConvertPtr( $input,(void **) &tmp, $*1_descriptor, \ - SWIG_POINTER_EXCEPTION)) == -1) return NULL; \ - $1 = &tmp; \ -}; \ -%typemap( argout ) ttype* opaque_arginout { \ - PyObject *obj; \ - obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 0 ); \ - $result = helper_appendToTuple( $result, obj ); \ -}; - -ARRAY_IN( int, const int, INT ) -%apply const int *array { - const int Ap [ ], - const int Ai [ ] -}; - -ARRAY_IN( long, const long, LONG ) -%apply const long *array { - const long Ap [ ], - const long Ai [ ] -}; - -ARRAY_IN( double, const double, DOUBLE ) -%apply const double *array { - const double Ax [ ], - const double Az [ ], - const double B [ ], - const double Bx [ ], - const double Bz [ ] -}; - -ARRAY_IN( double, double, DOUBLE ) -%apply double *array { - double X [ ], - double Xx [ ], - double Xz [ ] -}; - -CONF_IN( UMFPACK_CONTROL ) -%apply (double conf [UMFPACK_CONTROL]) { - double Control [ANY] -}; - -CONF_IN( UMFPACK_INFO ) -%apply double conf [UMFPACK_INFO] { - double Info [ANY] -}; - -%include -%include -%include -%include -%include -%include -%include - -%include -%include -%include -%include - -/* - The order is important below! -*/ - -OPAQUE_ARGOUT( void * ) -%apply void ** opaque_argout { - void **Symbolic, - void **Numeric -} - -%include -%include - - -OPAQUE_ARGINOUT( void * ) -%apply void ** opaque_arginout { - void **Symbolic, - void **Numeric -} - -%include -%include - - - -/* - * wnbell - attempt to get L,U,P,Q out - */ -%include "typemaps.i" -%apply int *OUTPUT { - int *lnz, - int *unz, - int *n_row, - int *n_col, - int *nz_udiag -}; -%apply long *OUTPUT { - long *lnz, - long *unz, - long *n_row, - long *n_col, - long *nz_udiag -}; -%include - - -ARRAY_IN( double, double, DOUBLE ) -%apply double *array { - double Lx [ ], - double Lz [ ], - double Ux [ ], - double Uz [ ], - double Dx [ ], - double Dz [ ], - double Rs [ ] -}; - -ARRAY_IN( int, int, INT ) -%apply int *array { - int Lp [ ], - int Lj [ ], - int Up [ ], - int Ui [ ], - int P [ ], - int Q [ ] -}; -%apply int *OUTPUT { int *do_recip}; -%include - -#endif diff --git a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/umfpack.py b/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/umfpack.py deleted file mode 100644 index 8db7789211..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/dsolve/umfpack/umfpack.py +++ /dev/null @@ -1,715 +0,0 @@ -""" -Interface to the UMFPACK library. - --- -Author: Robert Cimrman -""" - - -#from base import Struct, pause -import numpy as np -import scipy.sparse as sp -import re -try: # Silence import error. - import _umfpack as _um -except: - _um = None - -assumeSortedIndices = False - -## -# 10.01.2006, c -def configure( **kwargs ): - """ - Valid keyword arguments with defaults (other ignored): - assumeSortedIndices = False - - Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If - sure that the matrix fulfills this, pass assumeSortedIndices = - True to gain some speed. - """ - if 'assumeSortedIndices' in kwargs: - globals()['assumeSortedIndices'] = kwargs['assumeSortedIndices'] - - -## -# 30.11.2005, c -def updateDictWithVars( adict, module, pattern, group = None ): - match = re.compile( pattern ).match - - for name in [ii for ii in vars( module ).keys() - if match( ii )]: - if group is not None: - outName = match( name ).group( group ) - else: - outName = name - - adict[outName] = module.__dict__[name] - - return adict - -## -# How to list these automagically? -umfControls = [ - 'UMFPACK_PRL', - 'UMFPACK_DENSE_ROW', - 'UMFPACK_DENSE_COL', - 'UMFPACK_BLOCK_SIZE', - 'UMFPACK_STRATEGY', - 'UMFPACK_2BY2_TOLERANCE', - 'UMFPACK_FIXQ', - 'UMFPACK_AMD_DENSE', - 'UMFPACK_AGGRESSIVE', - 'UMFPACK_PIVOT_TOLERANCE', - 'UMFPACK_ALLOC_INIT', - 'UMFPACK_SYM_PIVOT_TOLERANCE', - 'UMFPACK_SCALE', - 'UMFPACK_FRONT_ALLOC_INIT', - 'UMFPACK_DROPTOL', - 'UMFPACK_IRSTEP', - 'UMFPACK_COMPILED_WITH_BLAS', - 'UMFPACK_COMPILED_FOR_MATLAB', - 'UMFPACK_COMPILED_WITH_GETRUSAGE', - 'UMFPACK_COMPILED_IN_DEBUG_MODE', - 'UMFPACK_STRATEGY_AUTO', - 'UMFPACK_STRATEGY_UNSYMMETRIC', - 'UMFPACK_STRATEGY_2BY2', - 'UMFPACK_STRATEGY_SYMMETRIC', - 'UMFPACK_SCALE_NONE', - 'UMFPACK_SCALE_SUM', - 'UMFPACK_SCALE_MAX', -] - -umfInfo = [ - 'UMFPACK_STATUS', - 'UMFPACK_NROW', - 'UMFPACK_NCOL', - 'UMFPACK_NZ', - 'UMFPACK_SIZE_OF_UNIT', - 'UMFPACK_SIZE_OF_INT', - 'UMFPACK_SIZE_OF_LONG', - 'UMFPACK_SIZE_OF_POINTER', - 'UMFPACK_SIZE_OF_ENTRY', - 'UMFPACK_NDENSE_ROW', - 'UMFPACK_NEMPTY_ROW', - 'UMFPACK_NDENSE_COL', - 'UMFPACK_NEMPTY_COL', - 'UMFPACK_SYMBOLIC_DEFRAG', - 'UMFPACK_SYMBOLIC_PEAK_MEMORY', - 'UMFPACK_SYMBOLIC_SIZE', - 'UMFPACK_SYMBOLIC_TIME', - 'UMFPACK_SYMBOLIC_WALLTIME', - 'UMFPACK_STRATEGY_USED', - 'UMFPACK_ORDERING_USED', - 'UMFPACK_QFIXED', - 'UMFPACK_DIAG_PREFERRED', - 'UMFPACK_PATTERN_SYMMETRY', - 'UMFPACK_NZ_A_PLUS_AT', - 'UMFPACK_NZDIAG', - 'UMFPACK_SYMMETRIC_LUNZ', - 'UMFPACK_SYMMETRIC_FLOPS', - 'UMFPACK_SYMMETRIC_NDENSE', - 'UMFPACK_SYMMETRIC_DMAX', - 'UMFPACK_2BY2_NWEAK', - 'UMFPACK_2BY2_UNMATCHED', - 'UMFPACK_2BY2_PATTERN_SYMMETRY', - 'UMFPACK_2BY2_NZ_PA_PLUS_PAT', - 'UMFPACK_2BY2_NZDIAG', - 'UMFPACK_COL_SINGLETONS', - 'UMFPACK_ROW_SINGLETONS', - 'UMFPACK_N2', - 'UMFPACK_S_SYMMETRIC', - 'UMFPACK_NUMERIC_SIZE_ESTIMATE', - 'UMFPACK_PEAK_MEMORY_ESTIMATE', - 'UMFPACK_FLOPS_ESTIMATE', - 'UMFPACK_LNZ_ESTIMATE', - 'UMFPACK_UNZ_ESTIMATE', - 'UMFPACK_VARIABLE_INIT_ESTIMATE', - 'UMFPACK_VARIABLE_PEAK_ESTIMATE', - 'UMFPACK_VARIABLE_FINAL_ESTIMATE', - 'UMFPACK_MAX_FRONT_SIZE_ESTIMATE', - 'UMFPACK_MAX_FRONT_NROWS_ESTIMATE', - 'UMFPACK_MAX_FRONT_NCOLS_ESTIMATE', - 'UMFPACK_NUMERIC_SIZE', - 'UMFPACK_PEAK_MEMORY', - 'UMFPACK_FLOPS', - 'UMFPACK_LNZ', - 'UMFPACK_UNZ', - 'UMFPACK_VARIABLE_INIT', - 'UMFPACK_VARIABLE_PEAK', - 'UMFPACK_VARIABLE_FINAL', - 'UMFPACK_MAX_FRONT_SIZE', - 'UMFPACK_MAX_FRONT_NROWS', - 'UMFPACK_MAX_FRONT_NCOLS', - 'UMFPACK_NUMERIC_DEFRAG', - 'UMFPACK_NUMERIC_REALLOC', - 'UMFPACK_NUMERIC_COSTLY_REALLOC', - 'UMFPACK_COMPRESSED_PATTERN', - 'UMFPACK_LU_ENTRIES', - 'UMFPACK_NUMERIC_TIME', - 'UMFPACK_UDIAG_NZ', - 'UMFPACK_RCOND', - 'UMFPACK_WAS_SCALED', - 'UMFPACK_RSMIN', - 'UMFPACK_RSMAX', - 'UMFPACK_UMIN', - 'UMFPACK_UMAX', - 'UMFPACK_ALLOC_INIT_USED', - 'UMFPACK_FORCED_UPDATES', - 'UMFPACK_NUMERIC_WALLTIME', - 'UMFPACK_NOFF_DIAG', - 'UMFPACK_ALL_LNZ', - 'UMFPACK_ALL_UNZ', - 'UMFPACK_NZDROPPED', - 'UMFPACK_IR_TAKEN', - 'UMFPACK_IR_ATTEMPTED', - 'UMFPACK_OMEGA1', - 'UMFPACK_OMEGA2', - 'UMFPACK_SOLVE_FLOPS', - 'UMFPACK_SOLVE_TIME', - 'UMFPACK_SOLVE_WALLTIME', - 'UMFPACK_ORDERING_COLAMD', - 'UMFPACK_ORDERING_AMD', - 'UMFPACK_ORDERING_GIVEN', -] - -if _um: - ## - # Export UMFPACK constants from _um. - umfDefines = updateDictWithVars( {}, _um, 'UMFPACK_.*' ) - locals().update( umfDefines ) - - - umfStatus = { - UMFPACK_OK : 'UMFPACK_OK', - UMFPACK_WARNING_singular_matrix : 'UMFPACK_WARNING_singular_matrix', - UMFPACK_WARNING_determinant_underflow : 'UMFPACK_WARNING_determinant_underflow', - UMFPACK_WARNING_determinant_overflow : 'UMFPACK_WARNING_determinant_overflow', - UMFPACK_ERROR_out_of_memory : 'UMFPACK_ERROR_out_of_memory', - UMFPACK_ERROR_invalid_Numeric_object : 'UMFPACK_ERROR_invalid_Numeric_object', - UMFPACK_ERROR_invalid_Symbolic_object : 'UMFPACK_ERROR_invalid_Symbolic_object', - UMFPACK_ERROR_argument_missing : 'UMFPACK_ERROR_argument_missing', - UMFPACK_ERROR_n_nonpositive : 'UMFPACK_ERROR_n_nonpositive', - UMFPACK_ERROR_invalid_matrix : 'UMFPACK_ERROR_invalid_matrix', - UMFPACK_ERROR_different_pattern : 'UMFPACK_ERROR_different_pattern', - UMFPACK_ERROR_invalid_system : 'UMFPACK_ERROR_invalid_system', - UMFPACK_ERROR_invalid_permutation : 'UMFPACK_ERROR_invalid_permutation', - UMFPACK_ERROR_internal_error : 'UMFPACK_ERROR_internal_error', - UMFPACK_ERROR_file_IO : 'UMFPACK_ERROR_file_IO', - } - - umfSys = [ - UMFPACK_A, - UMFPACK_At, - UMFPACK_Aat, - UMFPACK_Pt_L, - UMFPACK_L, - UMFPACK_Lt_P, - UMFPACK_Lat_P, - UMFPACK_Lt, - UMFPACK_U_Qt, - UMFPACK_U, - UMFPACK_Q_Ut, - UMFPACK_Q_Uat, - UMFPACK_Ut, - UMFPACK_Uat, - ] - - # Real, complex. - umfSys_transposeMap = [ - {UMFPACK_A : UMFPACK_At, - UMFPACK_At : UMFPACK_A, - UMFPACK_Aat : UMFPACK_A}, - {UMFPACK_A : UMFPACK_Aat, - UMFPACK_Aat : UMFPACK_A} - ] - -umfFamilyTypes = {'di' : int, 'dl' : long, 'zi' : int, 'zl' : long} -umfRealTypes = ('di', 'dl') -umfComplexTypes = ('zi', 'zl') - -## -# 02.01.2005 -class Struct( object ): - # 03.10.2005, c - # 26.10.2005 - def __init__( self, **kwargs ): - if kwargs: - self.__dict__.update( kwargs ) - - # 08.03.2005 - def __str__( self ): - ss = "%s\n" % self.__class__ - for key, val in self.__dict__.iteritems(): - if (issubclass( self.__dict__[key].__class__, Struct )): - ss += " %s:\n %s\n" % (key, self.__dict__[key].__class__) - else: - aux = "\n" + str( val ) - aux = aux.replace( "\n", "\n " ); - ss += " %s:\n%s\n" % (key, aux[1:]) - return( ss.rstrip() ) - -## -# 30.11.2005, c -class UmfpackContext( Struct ): - - ## - # 30.11.2005, c - # 01.12.2005 - # 21.12.2005 - # 01.03.2006 - def __init__( self, family = 'di', **kwargs ): - """ - Arguments: - - family .. family of UMFPACK functions ('di', 'dl', 'zi', 'zl') - - Keyword arguments: - - maxCond .. if extimated condition number is greater than maxCond, - a warning is printed (default: 1e12)""" - if _um is None: - raise ImportError('Scipy was built without UMFPACK support. ' - 'You need to install the UMFPACK library and ' - 'header files before building scipy.') - - self.maxCond = 1e12 - Struct.__init__( self, **kwargs ) - - if family not in umfFamilyTypes.keys(): - raise TypeError('wrong family: %s' % family) - - self.family = family - self.control = np.zeros( (UMFPACK_CONTROL, ), dtype = np.double ) - self.info = np.zeros( (UMFPACK_INFO, ), dtype = np.double ) - self._symbolic = None - self._numeric = None - self.mtx = None - self.isReal = self.family in umfRealTypes - - ## - # Functions corresponding to are stored in self.funs. - pattern = 'umfpack_' + family + '_(.*)' - fn = updateDictWithVars( {}, _um, pattern, group = 1 ) - self.funs = Struct( **fn ) - - self.funs.defaults( self.control ) - self.control[UMFPACK_PRL] = 3 - - def __del__(self): - self.free() - - ## - # 30.11.2005, c - def strControl( self ): - maxLen = max( [len( name ) for name in umfControls] ) - format = '%%-%ds : %%d' % maxLen - aux = [format % (name, self.control[umfDefines[name]]) - for name in umfControls if name in umfDefines] - return '\n'.join( aux ) - - ## - # 01.12.2005, c - def strInfo( self ): - maxLen = max( [len( name ) for name in umfInfo] ) - format = '%%-%ds : %%d' % maxLen - aux = [format % (name, self.info[umfDefines[name]]) - for name in umfInfo if name in umfDefines] - return '\n'.join( aux ) - - ## - # 30.11.2005, c - # 01.12.2005 - # 14.12.2005 - # 01.03.2006 - def _getIndx( self, mtx ): - - if sp.isspmatrix_csc( mtx ): - indx = mtx.indices - self.isCSR = 0 - elif sp.isspmatrix_csr( mtx ): - indx = mtx.indices - self.isCSR = 1 - else: - raise TypeError('must be a CSC/CSR matrix (is %s)' % mtx.__class__) - - ## - # Should check types of indices to correspond to familyTypes. - if self.family[1] == 'i': - if (indx.dtype != np.dtype('i')) \ - or mtx.indptr.dtype != np.dtype('i'): - raise ValueError('matrix must have int indices') - else: - if (indx.dtype != np.dtype('l')) \ - or mtx.indptr.dtype != np.dtype('l'): - raise ValueError('matrix must have long indices') - - if self.isReal: - if mtx.data.dtype != np.dtype('= 2: - raise RuntimeError('%s failed with %s' % (self.funs.numeric, - umfStatus[status])) - - ## - # 14.12.2005, c - def report_symbolic( self ): - """Print information about the symbolic object. Output depends on - self.control[UMFPACK_PRL].""" - self.funs.report_symbolic( self._symbolic, self.control ) - - ## - # 14.12.2005, c - def report_numeric( self ): - """Print information about the numeric object. Output depends on - self.control[UMFPACK_PRL].""" - self.funs.report_numeric( self._numeric, self.control ) - - ## - # 14.12.2005, c - def report_control( self ): - """Print control values.""" - self.funs.report_control( self.control ) - - ## - # 14.12.2005, c - def report_info( self ): - """Print all status information. Output depends on - self.control[UMFPACK_PRL].""" - self.funs.report_info( self.control, self.info ) - - ## - # 30.11.2005, c - # 01.12.2005 - def free_symbolic( self ): - if self._symbolic is not None: - self.funs.free_symbolic( self._symbolic ) - self._symbolic = None - self.mtx = None - - ## - # 30.11.2005, c - # 01.12.2005 - def free_numeric( self ): - if self._numeric is not None: - self.funs.free_numeric( self._numeric ) - self._numeric = None - self.free_symbolic() - - ## - # 30.11.2005, c - def free( self ): - self.free_symbolic() - self.free_numeric() - - ## - # 30.11.2005, c - # 01.12.2005 - # 02.12.2005 - # 21.12.2005 - # 01.03.2006 - def solve( self, sys, mtx, rhs, autoTranspose = False ): - """ - Solution of system of linear equation using the Numeric object. - - Arguments: - sys - one of UMFPACK system description constants, like - UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK - docs - mtx - sparse matrix (CSR or CSC) - rhs - right hand side vector - autoTranspose - automatically changes 'sys' to the - transposed type, if 'mtx' is in CSR, since UMFPACK - assumes CSC internally - """ - if sys not in umfSys: - raise ValueError('sys must be in' % umfSys) - - if autoTranspose and self.isCSR: - ## - # UMFPACK uses CSC internally... - if self.family in umfRealTypes: ii = 0 - else: ii = 1 - if sys in umfSys_transposeMap[ii]: - sys = umfSys_transposeMap[ii][sys] - else: - raise RuntimeError('autoTranspose ambiguous, switch it off') - - if self._numeric is not None: - if self.mtx is not mtx: - raise ValueError('must be called with same matrix as numeric()') - else: - raise RuntimeError('numeric() not called') - - indx = self._getIndx( mtx ) - - if self.isReal: - rhs = rhs.astype( np.float64 ) - sol = np.zeros( (mtx.shape[1],), dtype = np.float64 ) - status = self.funs.solve( sys, mtx.indptr, indx, mtx.data, sol, rhs, - self._numeric, self.control, self.info ) - else: - rhs = rhs.astype( np.complex128 ) - sol = np.zeros( (mtx.shape[1],), dtype = np.complex128 ) - mreal, mimag = mtx.data.real.copy(), mtx.data.imag.copy() - sreal, simag = sol.real.copy(), sol.imag.copy() - rreal, rimag = rhs.real.copy(), rhs.imag.copy() - status = self.funs.solve( sys, mtx.indptr, indx, - mreal, mimag, sreal, simag, rreal, rimag, - self._numeric, self.control, self.info ) - sol.real, sol.imag = sreal, simag - - #self.funs.report_info( self.control, self.info ) - #pause() - if status != UMFPACK_OK: - if status == UMFPACK_WARNING_singular_matrix: - ## Change inf, nan to zeros. - print 'zeroing nan and inf entries...' - sol[~np.isfinite( sol )] = 0.0 - else: - raise RuntimeError('%s failed with %s' % (self.funs.solve, - umfStatus[status])) - econd = 1.0 / self.info[UMFPACK_RCOND] - if econd > self.maxCond: - print 'warning: (almost) singular matrix! '\ - + '(estimated cond. number: %.2e)' % econd - - return sol - - ## - # 30.11.2005, c - # 01.12.2005 - def linsolve( self, sys, mtx, rhs, autoTranspose = False ): - """ - One-shot solution of system of linear equation. Reuses Numeric object - if possible. - - Arguments: - sys - one of UMFPACK system description constants, like - UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK - docs - mtx - sparse matrix (CSR or CSC) - rhs - right hand side vector - autoTranspose - automatically changes 'sys' to the - transposed type, if 'mtx' is in CSR, since UMFPACK - assumes CSC internally - """ - -# print self.family - if sys not in umfSys: - raise ValueError('sys must be in' % umfSys) - - if self._numeric is None: - self.numeric( mtx ) - else: - if self.mtx is not mtx: - self.numeric( mtx ) - - sol = self.solve( sys, mtx, rhs, autoTranspose ) - self.free_numeric() - - return sol - - ## - # 30.11.2005, c - # 01.12.2005 - def __call__( self, sys, mtx, rhs, autoTranspose = False ): - """ - Uses solve() or linsolve() depending on the presence of the Numeric - object. - - Arguments: - sys - one of UMFPACK system description constants, like - UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK - docs - mtx - sparse matrix (CSR or CSC) - rhs - right hand side vector - autoTranspose - automatically changes 'sys' to the - transposed type, if 'mtx' is in CSR, since UMFPACK - assumes CSC internally - """ - - if self._numeric is not None: - return self.solve( sys, mtx, rhs, autoTranspose ) - else: - return self.linsolve( sys, mtx, rhs, autoTranspose ) - - ## - # 21.09.2006, added by Nathan Bell - def lu( self, mtx ): - """ - Returns an LU decomposition of an m-by-n matrix in the form - (L, U, P, Q, R, do_recip): - - L - Lower triangular m-by-min(m,n) CSR matrix - U - Upper triangular min(m,n)-by-n CSC matrix - P - Vector of row permuations - Q - Vector of column permuations - R - Vector of diagonal row scalings - do_recip - boolean - - For a given matrix A, the decomposition satisfies: - LU = PRAQ when do_recip is true - LU = P(R^-1)AQ when do_recip is false - """ - - #this should probably be changed - mtx = mtx.tocsc() - self.numeric( mtx ) - - #first find out how much space to reserve - (status, lnz, unz, n_row, n_col, nz_udiag)\ - = self.funs.get_lunz( self._numeric ) - - if status != UMFPACK_OK: - raise RuntimeError('%s failed with %s' % (self.funs.get_lunz, - umfStatus[status])) - - #allocate storage for decomposition data - i_type = mtx.indptr.dtype - - Lp = np.zeros( (n_row+1,), dtype = i_type ) - Lj = np.zeros( (lnz,), dtype = i_type ) - Lx = np.zeros( (lnz,), dtype = np.double ) - - Up = np.zeros( (n_col+1,), dtype = i_type ) - Ui = np.zeros( (unz,), dtype = i_type ) - Ux = np.zeros( (unz,), dtype = np.double ) - - P = np.zeros( (n_row,), dtype = i_type ) - Q = np.zeros( (n_col,), dtype = i_type ) - - Dx = np.zeros( (min(n_row,n_col),), dtype = np.double ) - - Rs = np.zeros( (n_row,), dtype = np.double ) - - if self.isReal: - (status,do_recip) = self.funs.get_numeric( Lp,Lj,Lx,Up,Ui,Ux, - P,Q,Dx,Rs, - self._numeric ) - - if status != UMFPACK_OK: - raise RuntimeError('%s failed with %s' - % (self.funs.get_numeric, umfStatus[status])) - - L = sp.csr_matrix((Lx,Lj,Lp),(n_row,min(n_row,n_col))) - U = sp.csc_matrix((Ux,Ui,Up),(min(n_row,n_col),n_col)) - R = Rs - - return (L,U,P,Q,R,bool(do_recip)) - - else: - #allocate additional storage for imaginary parts - Lz = np.zeros( (lnz,), dtype = np.double ) - Uz = np.zeros( (unz,), dtype = np.double ) - Dz = np.zeros( (min(n_row,n_col),), dtype = np.double ) - - (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Lz,Up,Ui,Ux,Uz, - P,Q,Dx,Dz,Rs, - self._numeric) - - if status != UMFPACK_OK: - raise RuntimeError('%s failed with %s' - % (self.funs.get_numeric, umfStatus[status])) - - - Lxz = np.zeros( (lnz,), dtype = np.complex128 ) - Uxz = np.zeros( (unz,), dtype = np.complex128 ) - Dxz = np.zeros( (min(n_row,n_col),), dtype = np.complex128 ) - - Lxz.real,Lxz.imag = Lx,Lz - Uxz.real,Uxz.imag = Ux,Uz - Dxz.real,Dxz.imag = Dx,Dz - - L = sp.csr_matrix((Lxz,Lj,Lp),(n_row,min(n_row,n_col))) - U = sp.csc_matrix((Uxz,Ui,Up),(min(n_row,n_col),n_col)) - R = Rs - - return (L,U,P,Q,R,bool(do_recip)) diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/__init__.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/__init__.py deleted file mode 100644 index 49c1e8c455..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -"Sparse eigenvalue solvers" - -from info import __doc__ - -from arpack import * -from lobpcg import * - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/dummy.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/dummy.f deleted file mode 100644 index 46a7148871..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/dummy.f +++ /dev/null @@ -1,57 +0,0 @@ - double complex function wzdotc(n, zx, incx, zy, incy) - double complex zx(*), zy(*), z - double complex zdotc - integer n, incx, incy - - z = zdotc(n, zx, incx, zy, incy) - wzdotc = z - - end - - double complex function wzdotu(n, zx, incx, zy, incy) - double complex zx(*), zy(*), z, zdotu - integer n, incx, incy - - z = zdotu(n, zx, incx, zy, incy) - wzdotu = z - - return - end - - complex function wcdotc(n, cx, incx, cy, incy) - complex cx(*), cy(*), c, cdotc - integer n, incx, incy - - c = cdotc(n, cx, incx, cy, incy) - wcdotc = c - - return - end - - complex function wcdotu(n, cx, incx, cy, incy) - complex cx(*), cy(*), c, cdotu - integer n, incx, incy - - c = cdotu(n, cx, incx, cy, incy) - wcdotu = c - - return - end - - complex function wcladiv(x, y) - complex x, y, z - complex cladiv - - z = cladiv(x, y) - wcladiv = z - return - end - - double complex function wzladiv(x, y) - double complex x, y, z - double complex zladiv - - z = zladiv(x, y) - wzladiv = z - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/veclib_cabi_c.c b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/veclib_cabi_c.c deleted file mode 100644 index 64565a28cd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/veclib_cabi_c.c +++ /dev/null @@ -1,26 +0,0 @@ -#include -#include - -#define WRAP_F77(a) a##_ -void WRAP_F77(veclib_cdotc)(const int *N, const complex float *X, const int *incX, -const complex float *Y, const int *incY, complex float *dotc) -{ - cblas_cdotc_sub(*N, X, *incX, Y, *incY, dotc); -} - -void WRAP_F77(veclib_cdotu)(const int *N, const complex float *X, const int *incX, -const complex float *Y, const int *incY, complex float* dotu) -{ - cblas_cdotu_sub(*N, X, *incX, Y, *incY, dotu); -} - -void WRAP_F77(veclib_zdotc)(const int *N, const double complex *X, const int -*incX, const double complex *Y, const int *incY, double complex *dotu) -{ - cblas_zdotc_sub(*N, X, *incX, Y, *incY, dotu); -} -void WRAP_F77(veclib_zdotu)(const int *N, const double complex *X, const int -*incX, const double complex *Y, const int *incY, double complex *dotu) -{ - cblas_zdotu_sub(*N, X, *incX, Y, *incY, dotu); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/veclib_cabi_f.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/veclib_cabi_f.f deleted file mode 100644 index e050f8250e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/FWRAPPERS/veclib_cabi_f.f +++ /dev/null @@ -1,55 +0,0 @@ - double complex function wzdotc(n, zx, incx, zy, incy) - double complex zx(*), zy(*), z - integer n, incx, incy - - call veclib_zdotc(n, zx, incx, zy, incy, z) - - wzdotc = z - return - end - - double complex function wzdotu(n, zx, incx, zy, incy) - double complex zx(*), zy(*), z - integer n, incx, incy - - call veclib_zdotu(n, zx, incx, zy, incy, z) - - wzdotu = z - return - end - - complex function wcdotc(n, cx, incx, cy, incy) - complex cx(*), cy(*), c - integer n, incx, incy - - call veclib_cdotc(n, cx, incx, cy, incy, c) - - wcdotc = c - return - end - - complex function wcdotu(n, cx, incx, cy, incy) - complex cx(*), cy(*), c - integer n, incx, incy - - call veclib_cdotu(n, cx, incx, cy, incy, c) - - wcdotu = c - return - end - - complex function wcladiv(x, y) - complex x, y, z - - call cladiv(z, x, y) - wcladiv = z - return - end - - double complex function wzladiv(x, y) - double complex x, y, z - - call zladiv(z, x, y) - wzladiv = z - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/clahqr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/clahqr.f deleted file mode 100644 index c0b06e86cc..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/clahqr.f +++ /dev/null @@ -1,384 +0,0 @@ - SUBROUTINE CLAHQR( WANTT, WANTZ, N, ILO, IHI, H, LDH, W, ILOZ, - $ IHIZ, Z, LDZ, INFO ) -* -* -- LAPACK auxiliary routine (version 2.0) -- -* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., -* Courant Institute, Argonne National Lab, and Rice University -* September 30, 1994 -* -* .. Scalar Arguments .. - LOGICAL WANTT, WANTZ - INTEGER IHI, IHIZ, ILO, ILOZ, INFO, LDH, LDZ, N -* .. -* .. Array Arguments .. - COMPLEX H( LDH, * ), W( * ), Z( LDZ, * ) -* .. -* -* Purpose -* ======= -* -* CLAHQR is an auxiliary routine called by CHSEQR to update the -* eigenvalues and Schur decomposition already computed by CHSEQR, by -* dealing with the Hessenberg submatrix in rows and columns ILO to IHI. -* -* Arguments -* ========= -* -* WANTT (input) LOGICAL -* = .TRUE. : the full Schur form T is required; -* = .FALSE.: only eigenvalues are required. -* -* WANTZ (input) LOGICAL -* = .TRUE. : the matrix of Schur vectors Z is required; -* = .FALSE.: Schur vectors are not required. -* -* N (input) INTEGER -* The order of the matrix H. N >= 0. -* -* ILO (input) INTEGER -* IHI (input) INTEGER -* It is assumed that H is already upper triangular in rows and -* columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1). -* CLAHQR works primarily with the Hessenberg submatrix in rows -* and columns ILO to IHI, but applies transformations to all of -* H if WANTT is .TRUE.. -* 1 <= ILO <= max(1,IHI); IHI <= N. -* -* H (input/output) COMPLEX array, dimension (LDH,N) -* On entry, the upper Hessenberg matrix H. -* On exit, if WANTT is .TRUE., H is upper triangular in rows -* and columns ILO:IHI, with any 2-by-2 diagonal blocks in -* standard form. If WANTT is .FALSE., the contents of H are -* unspecified on exit. -* -* LDH (input) INTEGER -* The leading dimension of the array H. LDH >= max(1,N). -* -* W (output) COMPLEX array, dimension (N) -* The computed eigenvalues ILO to IHI are stored in the -* corresponding elements of W. If WANTT is .TRUE., the -* eigenvalues are stored in the same order as on the diagonal -* of the Schur form returned in H, with W(i) = H(i,i). -* -* ILOZ (input) INTEGER -* IHIZ (input) INTEGER -* Specify the rows of Z to which transformations must be -* applied if WANTZ is .TRUE.. -* 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. -* -* Z (input/output) COMPLEX array, dimension (LDZ,N) -* If WANTZ is .TRUE., on entry Z must contain the current -* matrix Z of transformations accumulated by CHSEQR, and on -* exit Z has been updated; transformations are applied only to -* the submatrix Z(ILOZ:IHIZ,ILO:IHI). -* If WANTZ is .FALSE., Z is not referenced. -* -* LDZ (input) INTEGER -* The leading dimension of the array Z. LDZ >= max(1,N). -* -* INFO (output) INTEGER -* = 0: successful exit -* > 0: if INFO = i, CLAHQR failed to compute all the -* eigenvalues ILO to IHI in a total of 30*(IHI-ILO+1) -* iterations; elements i+1:ihi of W contain those -* eigenvalues which have been successfully computed. -* -* ===================================================================== -* -* .. Parameters .. - COMPLEX ZERO, ONE - PARAMETER ( ZERO = ( 0.0E+0, 0.0E+0 ), - $ ONE = ( 1.0E+0, 0.0E+0 ) ) - REAL RZERO, HALF - PARAMETER ( RZERO = 0.0E+0, HALF = 0.5E+0 ) -* .. -* .. Local Scalars .. - INTEGER I, I1, I2, ITN, ITS, J, K, L, M, NH, NZ - REAL H10, H21, RTEMP, S, SMLNUM, T2, TST1, ULP - COMPLEX CDUM, H11, H11S, H22, SUM, T, T1, TEMP, U, V2, - $ X, Y -* .. -* .. Local Arrays .. - REAL RWORK( 1 ) - COMPLEX V( 2 ) -* .. -* .. External Functions .. - REAL CLANHS, SLAMCH - COMPLEX WCLADIV - EXTERNAL CLANHS, SLAMCH, WCLADIV -* .. -* .. External Subroutines .. - EXTERNAL CCOPY, CLARFG, CSCAL -* .. -* .. Intrinsic Functions .. - INTRINSIC ABS, AIMAG, CONJG, MAX, MIN, REAL, SQRT -* .. -* .. Statement Functions .. - REAL CABS1 -* .. -* .. Statement Function definitions .. - CABS1( CDUM ) = ABS( REAL( CDUM ) ) + ABS( AIMAG( CDUM ) ) -* .. -* .. Executable Statements .. -* - INFO = 0 -* -* Quick return if possible -* - IF( N.EQ.0 ) - $ RETURN - IF( ILO.EQ.IHI ) THEN - W( ILO ) = H( ILO, ILO ) - RETURN - END IF -* - NH = IHI - ILO + 1 - NZ = IHIZ - ILOZ + 1 -* -* Set machine-dependent constants for the stopping criterion. -* If norm(H) <= sqrt(OVFL), overflow should not occur. -* - ULP = SLAMCH( 'Precision' ) - SMLNUM = SLAMCH( 'Safe minimum' ) / ULP -* -* I1 and I2 are the indices of the first row and last column of H -* to which transformations must be applied. If eigenvalues only are -* being computed, I1 and I2 are set inside the main loop. -* - IF( WANTT ) THEN - I1 = 1 - I2 = N - END IF -* -* ITN is the total number of QR iterations allowed. -* - ITN = 30*NH -* -* The main loop begins here. I is the loop index and decreases from -* IHI to ILO in steps of 1. Each iteration of the loop works -* with the active submatrix in rows and columns L to I. -* Eigenvalues I+1 to IHI have already converged. Either L = ILO, or -* H(L,L-1) is negligible so that the matrix splits. -* - I = IHI - 10 CONTINUE - IF( I.LT.ILO ) - $ GO TO 130 -* -* Perform QR iterations on rows and columns ILO to I until a -* submatrix of order 1 splits off at the bottom because a -* subdiagonal element has become negligible. -* - L = ILO - DO 110 ITS = 0, ITN -* -* Look for a single small subdiagonal element. -* - DO 20 K = I, L + 1, -1 - TST1 = CABS1( H( K-1, K-1 ) ) + CABS1( H( K, K ) ) - IF( TST1.EQ.RZERO ) - $ TST1 = CLANHS( '1', I-L+1, H( L, L ), LDH, RWORK ) - IF( ABS( REAL( H( K, K-1 ) ) ).LE.MAX( ULP*TST1, SMLNUM ) ) - $ GO TO 30 - 20 CONTINUE - 30 CONTINUE - L = K - IF( L.GT.ILO ) THEN -* -* H(L,L-1) is negligible -* - H( L, L-1 ) = ZERO - END IF -* -* Exit from loop if a submatrix of order 1 has split off. -* - IF( L.GE.I ) - $ GO TO 120 -* -* Now the active submatrix is in rows and columns L to I. If -* eigenvalues only are being computed, only the active submatrix -* need be transformed. -* - IF( .NOT.WANTT ) THEN - I1 = L - I2 = I - END IF -* - IF( ITS.EQ.10 .OR. ITS.EQ.20 ) THEN -* -* Exceptional shift. -* - T = ABS( REAL( H( I, I-1 ) ) ) + - $ ABS( REAL( H( I-1, I-2 ) ) ) - ELSE -* -* Wilkinson's shift. -* - T = H( I, I ) - U = H( I-1, I )*REAL( H( I, I-1 ) ) - IF( U.NE.ZERO ) THEN - X = HALF*( H( I-1, I-1 )-T ) - Y = SQRT( X*X+U ) - IF( REAL( X )*REAL( Y )+AIMAG( X )*AIMAG( Y ).LT.RZERO ) - $ Y = -Y - T = T - WCLADIV( U, ( X+Y ) ) - END IF - END IF -* -* Look for two consecutive small subdiagonal elements. -* - DO 40 M = I - 1, L + 1, -1 -* -* Determine the effect of starting the single-shift QR -* iteration at row M, and see if this would make H(M,M-1) -* negligible. -* - H11 = H( M, M ) - H22 = H( M+1, M+1 ) - H11S = H11 - T - H21 = H( M+1, M ) - S = CABS1( H11S ) + ABS( H21 ) - H11S = H11S / S - H21 = H21 / S - V( 1 ) = H11S - V( 2 ) = H21 - H10 = H( M, M-1 ) - TST1 = CABS1( H11S )*( CABS1( H11 )+CABS1( H22 ) ) - IF( ABS( H10*H21 ).LE.ULP*TST1 ) - $ GO TO 50 - 40 CONTINUE - H11 = H( L, L ) - H22 = H( L+1, L+1 ) - H11S = H11 - T - H21 = H( L+1, L ) - S = CABS1( H11S ) + ABS( H21 ) - H11S = H11S / S - H21 = H21 / S - V( 1 ) = H11S - V( 2 ) = H21 - 50 CONTINUE -* -* Single-shift QR step -* - DO 100 K = M, I - 1 -* -* The first iteration of this loop determines a reflection G -* from the vector V and applies it from left and right to H, -* thus creating a nonzero bulge below the subdiagonal. -* -* Each subsequent iteration determines a reflection G to -* restore the Hessenberg form in the (K-1)th column, and thus -* chases the bulge one step toward the bottom of the active -* submatrix. -* -* V(2) is always real before the call to CLARFG, and hence -* after the call T2 ( = T1*V(2) ) is also real. -* - IF( K.GT.M ) - $ CALL CCOPY( 2, H( K, K-1 ), 1, V, 1 ) - CALL CLARFG( 2, V( 1 ), V( 2 ), 1, T1 ) - IF( K.GT.M ) THEN - H( K, K-1 ) = V( 1 ) - H( K+1, K-1 ) = ZERO - END IF - V2 = V( 2 ) - T2 = REAL( T1*V2 ) -* -* Apply G from the left to transform the rows of the matrix -* in columns K to I2. -* - DO 60 J = K, I2 - SUM = CONJG( T1 )*H( K, J ) + T2*H( K+1, J ) - H( K, J ) = H( K, J ) - SUM - H( K+1, J ) = H( K+1, J ) - SUM*V2 - 60 CONTINUE -* -* Apply G from the right to transform the columns of the -* matrix in rows I1 to min(K+2,I). -* - DO 70 J = I1, MIN( K+2, I ) - SUM = T1*H( J, K ) + T2*H( J, K+1 ) - H( J, K ) = H( J, K ) - SUM - H( J, K+1 ) = H( J, K+1 ) - SUM*CONJG( V2 ) - 70 CONTINUE -* - IF( WANTZ ) THEN -* -* Accumulate transformations in the matrix Z -* - DO 80 J = ILOZ, IHIZ - SUM = T1*Z( J, K ) + T2*Z( J, K+1 ) - Z( J, K ) = Z( J, K ) - SUM - Z( J, K+1 ) = Z( J, K+1 ) - SUM*CONJG( V2 ) - 80 CONTINUE - END IF -* - IF( K.EQ.M .AND. M.GT.L ) THEN -* -* If the QR step was started at row M > L because two -* consecutive small subdiagonals were found, then extra -* scaling must be performed to ensure that H(M,M-1) remains -* real. -* - TEMP = ONE - T1 - TEMP = TEMP / ABS( TEMP ) - H( M+1, M ) = H( M+1, M )*CONJG( TEMP ) - IF( M+2.LE.I ) - $ H( M+2, M+1 ) = H( M+2, M+1 )*TEMP - DO 90 J = M, I - IF( J.NE.M+1 ) THEN - IF( I2.GT.J ) - $ CALL CSCAL( I2-J, TEMP, H( J, J+1 ), LDH ) - CALL CSCAL( J-I1, CONJG( TEMP ), H( I1, J ), 1 ) - IF( WANTZ ) THEN - CALL CSCAL( NZ, CONJG( TEMP ), Z( ILOZ, J ), 1 ) - END IF - END IF - 90 CONTINUE - END IF - 100 CONTINUE -* -* Ensure that H(I,I-1) is real. -* - TEMP = H( I, I-1 ) - IF( AIMAG( TEMP ).NE.RZERO ) THEN - RTEMP = ABS( TEMP ) - H( I, I-1 ) = RTEMP - TEMP = TEMP / RTEMP - IF( I2.GT.I ) - $ CALL CSCAL( I2-I, CONJG( TEMP ), H( I, I+1 ), LDH ) - CALL CSCAL( I-I1, TEMP, H( I1, I ), 1 ) - IF( WANTZ ) THEN - CALL CSCAL( NZ, TEMP, Z( ILOZ, I ), 1 ) - END IF - END IF -* - 110 CONTINUE -* -* Failure to converge in remaining number of iterations -* - INFO = I - RETURN -* - 120 CONTINUE -* -* H(I,I-1) is negligible: one eigenvalue has converged. -* - W( I ) = H( I, I ) -* -* Decrement number of remaining iterations, and return to start of -* the main loop with new value of I. -* - ITN = ITN - ITS - I = L - 1 - GO TO 10 -* - 130 CONTINUE - RETURN -* -* End of CLAHQR -* - END - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/dlahqr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/dlahqr.f deleted file mode 100644 index 6833271986..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/dlahqr.f +++ /dev/null @@ -1,410 +0,0 @@ - SUBROUTINE DLAHQR( WANTT, WANTZ, N, ILO, IHI, H, LDH, WR, WI, - $ ILOZ, IHIZ, Z, LDZ, INFO ) -* -* -- LAPACK auxiliary routine (version 2.0) -- -* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., -* Courant Institute, Argonne National Lab, and Rice University -* October 31, 1992 -* -* .. Scalar Arguments .. - LOGICAL WANTT, WANTZ - INTEGER IHI, IHIZ, ILO, ILOZ, INFO, LDH, LDZ, N -* .. -* .. Array Arguments .. - DOUBLE PRECISION H( LDH, * ), WI( * ), WR( * ), Z( LDZ, * ) -* .. -* -* Purpose -* ======= -* -* DLAHQR is an auxiliary routine called by DHSEQR to update the -* eigenvalues and Schur decomposition already computed by DHSEQR, by -* dealing with the Hessenberg submatrix in rows and columns ILO to IHI. -* -* Arguments -* ========= -* -* WANTT (input) LOGICAL -* = .TRUE. : the full Schur form T is required; -* = .FALSE.: only eigenvalues are required. -* -* WANTZ (input) LOGICAL -* = .TRUE. : the matrix of Schur vectors Z is required; -* = .FALSE.: Schur vectors are not required. -* -* N (input) INTEGER -* The order of the matrix H. N >= 0. -* -* ILO (input) INTEGER -* IHI (input) INTEGER -* It is assumed that H is already upper quasi-triangular in -* rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless -* ILO = 1). DLAHQR works primarily with the Hessenberg -* submatrix in rows and columns ILO to IHI, but applies -* transformations to all of H if WANTT is .TRUE.. -* 1 <= ILO <= max(1,IHI); IHI <= N. -* -* H (input/output) DOUBLE PRECISION array, dimension (LDH,N) -* On entry, the upper Hessenberg matrix H. -* On exit, if WANTT is .TRUE., H is upper quasi-triangular in -* rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in -* standard form. If WANTT is .FALSE., the contents of H are -* unspecified on exit. -* -* LDH (input) INTEGER -* The leading dimension of the array H. LDH >= max(1,N). -* -* WR (output) DOUBLE PRECISION array, dimension (N) -* WI (output) DOUBLE PRECISION array, dimension (N) -* The real and imaginary parts, respectively, of the computed -* eigenvalues ILO to IHI are stored in the corresponding -* elements of WR and WI. If two eigenvalues are computed as a -* complex conjugate pair, they are stored in consecutive -* elements of WR and WI, say the i-th and (i+1)th, with -* WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the -* eigenvalues are stored in the same order as on the diagonal -* of the Schur form returned in H, with WR(i) = H(i,i), and, if -* H(i:i+1,i:i+1) is a 2-by-2 diagonal block, -* WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). -* -* ILOZ (input) INTEGER -* IHIZ (input) INTEGER -* Specify the rows of Z to which transformations must be -* applied if WANTZ is .TRUE.. -* 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. -* -* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) -* If WANTZ is .TRUE., on entry Z must contain the current -* matrix Z of transformations accumulated by DHSEQR, and on -* exit Z has been updated; transformations are applied only to -* the submatrix Z(ILOZ:IHIZ,ILO:IHI). -* If WANTZ is .FALSE., Z is not referenced. -* -* LDZ (input) INTEGER -* The leading dimension of the array Z. LDZ >= max(1,N). -* -* INFO (output) INTEGER -* = 0: successful exit -* > 0: DLAHQR failed to compute all the eigenvalues ILO to IHI -* in a total of 30*(IHI-ILO+1) iterations; if INFO = i, -* elements i+1:ihi of WR and WI contain those eigenvalues -* which have been successfully computed. -* -* ===================================================================== -* -* .. Parameters .. - DOUBLE PRECISION ZERO, ONE - PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) - DOUBLE PRECISION DAT1, DAT2 - PARAMETER ( DAT1 = 0.75D+0, DAT2 = -0.4375D+0 ) -* .. -* .. Local Scalars .. - INTEGER I, I1, I2, ITN, ITS, J, K, L, M, NH, NR, NZ - DOUBLE PRECISION CS, H00, H10, H11, H12, H21, H22, H33, H33S, - $ H43H34, H44, H44S, OVFL, S, SMLNUM, SN, SUM, - $ T1, T2, T3, TST1, ULP, UNFL, V1, V2, V3 -* .. -* .. Local Arrays .. - DOUBLE PRECISION V( 3 ), WORK( 1 ) -* .. -* .. External Functions .. - DOUBLE PRECISION DLAMCH, DLANHS - EXTERNAL DLAMCH, DLANHS -* .. -* .. External Subroutines .. - EXTERNAL DCOPY, DLABAD, DLANV2, DLARFG, DROT -* .. -* .. Intrinsic Functions .. - INTRINSIC ABS, MAX, MIN -* .. -* .. Executable Statements .. -* - INFO = 0 -* -* Quick return if possible -* - IF( N.EQ.0 ) - $ RETURN - IF( ILO.EQ.IHI ) THEN - WR( ILO ) = H( ILO, ILO ) - WI( ILO ) = ZERO - RETURN - END IF -* - NH = IHI - ILO + 1 - NZ = IHIZ - ILOZ + 1 -* -* Set machine-dependent constants for the stopping criterion. -* If norm(H) <= sqrt(OVFL), overflow should not occur. -* - UNFL = DLAMCH( 'Safe minimum' ) - OVFL = ONE / UNFL - CALL DLABAD( UNFL, OVFL ) - ULP = DLAMCH( 'Precision' ) - SMLNUM = UNFL*( NH / ULP ) -* -* I1 and I2 are the indices of the first row and last column of H -* to which transformations must be applied. If eigenvalues only are -* being computed, I1 and I2 are set inside the main loop. -* - IF( WANTT ) THEN - I1 = 1 - I2 = N - END IF -* -* ITN is the total number of QR iterations allowed. -* - ITN = 30*NH -* -* The main loop begins here. I is the loop index and decreases from -* IHI to ILO in steps of 1 or 2. Each iteration of the loop works -* with the active submatrix in rows and columns L to I. -* Eigenvalues I+1 to IHI have already converged. Either L = ILO or -* H(L,L-1) is negligible so that the matrix splits. -* - I = IHI - 10 CONTINUE - L = ILO - IF( I.LT.ILO ) - $ GO TO 150 -* -* Perform QR iterations on rows and columns ILO to I until a -* submatrix of order 1 or 2 splits off at the bottom because a -* subdiagonal element has become negligible. -* - DO 130 ITS = 0, ITN -* -* Look for a single small subdiagonal element. -* - DO 20 K = I, L + 1, -1 - TST1 = ABS( H( K-1, K-1 ) ) + ABS( H( K, K ) ) - IF( TST1.EQ.ZERO ) - $ TST1 = DLANHS( '1', I-L+1, H( L, L ), LDH, WORK ) - IF( ABS( H( K, K-1 ) ).LE.MAX( ULP*TST1, SMLNUM ) ) - $ GO TO 30 - 20 CONTINUE - 30 CONTINUE - L = K - IF( L.GT.ILO ) THEN -* -* H(L,L-1) is negligible -* - H( L, L-1 ) = ZERO - END IF -* -* Exit from loop if a submatrix of order 1 or 2 has split off. -* - IF( L.GE.I-1 ) - $ GO TO 140 -* -* Now the active submatrix is in rows and columns L to I. If -* eigenvalues only are being computed, only the active submatrix -* need be transformed. -* - IF( .NOT.WANTT ) THEN - I1 = L - I2 = I - END IF -* - IF( ITS.EQ.10 .OR. ITS.EQ.20 ) THEN -* -* Exceptional shift. -* - S = ABS( H( I, I-1 ) ) + ABS( H( I-1, I-2 ) ) - H44 = DAT1*S - H33 = H44 - H43H34 = DAT2*S*S - ELSE -* -* Prepare to use Wilkinson's double shift -* - H44 = H( I, I ) - H33 = H( I-1, I-1 ) - H43H34 = H( I, I-1 )*H( I-1, I ) - END IF -* -* Look for two consecutive small subdiagonal elements. -* - DO 40 M = I - 2, L, -1 -* -* Determine the effect of starting the double-shift QR -* iteration at row M, and see if this would make H(M,M-1) -* negligible. -* - H11 = H( M, M ) - H22 = H( M+1, M+1 ) - H21 = H( M+1, M ) - H12 = H( M, M+1 ) - H44S = H44 - H11 - H33S = H33 - H11 - V1 = ( H33S*H44S-H43H34 ) / H21 + H12 - V2 = H22 - H11 - H33S - H44S - V3 = H( M+2, M+1 ) - S = ABS( V1 ) + ABS( V2 ) + ABS( V3 ) - V1 = V1 / S - V2 = V2 / S - V3 = V3 / S - V( 1 ) = V1 - V( 2 ) = V2 - V( 3 ) = V3 - IF( M.EQ.L ) - $ GO TO 50 - H00 = H( M-1, M-1 ) - H10 = H( M, M-1 ) - TST1 = ABS( V1 )*( ABS( H00 )+ABS( H11 )+ABS( H22 ) ) - IF( ABS( H10 )*( ABS( V2 )+ABS( V3 ) ).LE.ULP*TST1 ) - $ GO TO 50 - 40 CONTINUE - 50 CONTINUE -* -* Double-shift QR step -* - DO 120 K = M, I - 1 -* -* The first iteration of this loop determines a reflection G -* from the vector V and applies it from left and right to H, -* thus creating a nonzero bulge below the subdiagonal. -* -* Each subsequent iteration determines a reflection G to -* restore the Hessenberg form in the (K-1)th column, and thus -* chases the bulge one step toward the bottom of the active -* submatrix. NR is the order of G. -* - NR = MIN( 3, I-K+1 ) - IF( K.GT.M ) - $ CALL DCOPY( NR, H( K, K-1 ), 1, V, 1 ) - CALL DLARFG( NR, V( 1 ), V( 2 ), 1, T1 ) - IF( K.GT.M ) THEN - H( K, K-1 ) = V( 1 ) - H( K+1, K-1 ) = ZERO - IF( K.LT.I-1 ) - $ H( K+2, K-1 ) = ZERO - ELSE IF( M.GT.L ) THEN - H( K, K-1 ) = -H( K, K-1 ) - END IF - V2 = V( 2 ) - T2 = T1*V2 - IF( NR.EQ.3 ) THEN - V3 = V( 3 ) - T3 = T1*V3 -* -* Apply G from the left to transform the rows of the matrix -* in columns K to I2. -* - DO 60 J = K, I2 - SUM = H( K, J ) + V2*H( K+1, J ) + V3*H( K+2, J ) - H( K, J ) = H( K, J ) - SUM*T1 - H( K+1, J ) = H( K+1, J ) - SUM*T2 - H( K+2, J ) = H( K+2, J ) - SUM*T3 - 60 CONTINUE -* -* Apply G from the right to transform the columns of the -* matrix in rows I1 to min(K+3,I). -* - DO 70 J = I1, MIN( K+3, I ) - SUM = H( J, K ) + V2*H( J, K+1 ) + V3*H( J, K+2 ) - H( J, K ) = H( J, K ) - SUM*T1 - H( J, K+1 ) = H( J, K+1 ) - SUM*T2 - H( J, K+2 ) = H( J, K+2 ) - SUM*T3 - 70 CONTINUE -* - IF( WANTZ ) THEN -* -* Accumulate transformations in the matrix Z -* - DO 80 J = ILOZ, IHIZ - SUM = Z( J, K ) + V2*Z( J, K+1 ) + V3*Z( J, K+2 ) - Z( J, K ) = Z( J, K ) - SUM*T1 - Z( J, K+1 ) = Z( J, K+1 ) - SUM*T2 - Z( J, K+2 ) = Z( J, K+2 ) - SUM*T3 - 80 CONTINUE - END IF - ELSE IF( NR.EQ.2 ) THEN -* -* Apply G from the left to transform the rows of the matrix -* in columns K to I2. -* - DO 90 J = K, I2 - SUM = H( K, J ) + V2*H( K+1, J ) - H( K, J ) = H( K, J ) - SUM*T1 - H( K+1, J ) = H( K+1, J ) - SUM*T2 - 90 CONTINUE -* -* Apply G from the right to transform the columns of the -* matrix in rows I1 to min(K+3,I). -* - DO 100 J = I1, I - SUM = H( J, K ) + V2*H( J, K+1 ) - H( J, K ) = H( J, K ) - SUM*T1 - H( J, K+1 ) = H( J, K+1 ) - SUM*T2 - 100 CONTINUE -* - IF( WANTZ ) THEN -* -* Accumulate transformations in the matrix Z -* - DO 110 J = ILOZ, IHIZ - SUM = Z( J, K ) + V2*Z( J, K+1 ) - Z( J, K ) = Z( J, K ) - SUM*T1 - Z( J, K+1 ) = Z( J, K+1 ) - SUM*T2 - 110 CONTINUE - END IF - END IF - 120 CONTINUE -* - 130 CONTINUE -* -* Failure to converge in remaining number of iterations -* - INFO = I - RETURN -* - 140 CONTINUE -* - IF( L.EQ.I ) THEN -* -* H(I,I-1) is negligible: one eigenvalue has converged. -* - WR( I ) = H( I, I ) - WI( I ) = ZERO - ELSE IF( L.EQ.I-1 ) THEN -* -* H(I-1,I-2) is negligible: a pair of eigenvalues have converged. -* -* Transform the 2-by-2 submatrix to standard Schur form, -* and compute and store the eigenvalues. -* - CALL DLANV2( H( I-1, I-1 ), H( I-1, I ), H( I, I-1 ), - $ H( I, I ), WR( I-1 ), WI( I-1 ), WR( I ), WI( I ), - $ CS, SN ) -* - IF( WANTT ) THEN -* -* Apply the transformation to the rest of H. -* - IF( I2.GT.I ) - $ CALL DROT( I2-I, H( I-1, I+1 ), LDH, H( I, I+1 ), LDH, - $ CS, SN ) - CALL DROT( I-I1-1, H( I1, I-1 ), 1, H( I1, I ), 1, CS, SN ) - END IF - IF( WANTZ ) THEN -* -* Apply the transformation to Z. -* - CALL DROT( NZ, Z( ILOZ, I-1 ), 1, Z( ILOZ, I ), 1, CS, SN ) - END IF - END IF -* -* Decrement number of remaining iterations, and return to start of -* the main loop with new value of I. -* - ITN = ITN - ITS - I = L - 1 - GO TO 10 -* - 150 CONTINUE - RETURN -* -* End of DLAHQR -* - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/slahqr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/slahqr.f deleted file mode 100644 index 58e21546ec..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/slahqr.f +++ /dev/null @@ -1,410 +0,0 @@ - SUBROUTINE SLAHQR( WANTT, WANTZ, N, ILO, IHI, H, LDH, WR, WI, - $ ILOZ, IHIZ, Z, LDZ, INFO ) -* -* -- LAPACK auxiliary routine (version 2.0) -- -* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., -* Courant Institute, Argonne National Lab, and Rice University -* October 31, 1992 -* -* .. Scalar Arguments .. - LOGICAL WANTT, WANTZ - INTEGER IHI, IHIZ, ILO, ILOZ, INFO, LDH, LDZ, N -* .. -* .. Array Arguments .. - REAL H( LDH, * ), WI( * ), WR( * ), Z( LDZ, * ) -* .. -* -* Purpose -* ======= -* -* SLAHQR is an auxiliary routine called by SHSEQR to update the -* eigenvalues and Schur decomposition already computed by SHSEQR, by -* dealing with the Hessenberg submatrix in rows and columns ILO to IHI. -* -* Arguments -* ========= -* -* WANTT (input) LOGICAL -* = .TRUE. : the full Schur form T is required; -* = .FALSE.: only eigenvalues are required. -* -* WANTZ (input) LOGICAL -* = .TRUE. : the matrix of Schur vectors Z is required; -* = .FALSE.: Schur vectors are not required. -* -* N (input) INTEGER -* The order of the matrix H. N >= 0. -* -* ILO (input) INTEGER -* IHI (input) INTEGER -* It is assumed that H is already upper quasi-triangular in -* rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless -* ILO = 1). SLAHQR works primarily with the Hessenberg -* submatrix in rows and columns ILO to IHI, but applies -* transformations to all of H if WANTT is .TRUE.. -* 1 <= ILO <= max(1,IHI); IHI <= N. -* -* H (input/output) REAL array, dimension (LDH,N) -* On entry, the upper Hessenberg matrix H. -* On exit, if WANTT is .TRUE., H is upper quasi-triangular in -* rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in -* standard form. If WANTT is .FALSE., the contents of H are -* unspecified on exit. -* -* LDH (input) INTEGER -* The leading dimension of the array H. LDH >= max(1,N). -* -* WR (output) REAL array, dimension (N) -* WI (output) REAL array, dimension (N) -* The real and imaginary parts, respectively, of the computed -* eigenvalues ILO to IHI are stored in the corresponding -* elements of WR and WI. If two eigenvalues are computed as a -* complex conjugate pair, they are stored in consecutive -* elements of WR and WI, say the i-th and (i+1)th, with -* WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the -* eigenvalues are stored in the same order as on the diagonal -* of the Schur form returned in H, with WR(i) = H(i,i), and, if -* H(i:i+1,i:i+1) is a 2-by-2 diagonal block, -* WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). -* -* ILOZ (input) INTEGER -* IHIZ (input) INTEGER -* Specify the rows of Z to which transformations must be -* applied if WANTZ is .TRUE.. -* 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. -* -* Z (input/output) REAL array, dimension (LDZ,N) -* If WANTZ is .TRUE., on entry Z must contain the current -* matrix Z of transformations accumulated by SHSEQR, and on -* exit Z has been updated; transformations are applied only to -* the submatrix Z(ILOZ:IHIZ,ILO:IHI). -* If WANTZ is .FALSE., Z is not referenced. -* -* LDZ (input) INTEGER -* The leading dimension of the array Z. LDZ >= max(1,N). -* -* INFO (output) INTEGER -* = 0: successful exit -* > 0: SLAHQR failed to compute all the eigenvalues ILO to IHI -* in a total of 30*(IHI-ILO+1) iterations; if INFO = i, -* elements i+1:ihi of WR and WI contain those eigenvalues -* which have been successfully computed. -* -* ===================================================================== -* -* .. Parameters .. - REAL ZERO, ONE - PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) - REAL DAT1, DAT2 - PARAMETER ( DAT1 = 0.75E+0, DAT2 = -0.4375E+0 ) -* .. -* .. Local Scalars .. - INTEGER I, I1, I2, ITN, ITS, J, K, L, M, NH, NR, NZ - REAL CS, H00, H10, H11, H12, H21, H22, H33, H33S, - $ H43H34, H44, H44S, OVFL, S, SMLNUM, SN, SUM, - $ T1, T2, T3, TST1, ULP, UNFL, V1, V2, V3 -* .. -* .. Local Arrays .. - REAL V( 3 ), WORK( 1 ) -* .. -* .. External Functions .. - REAL SLAMCH, SLANHS - EXTERNAL SLAMCH, SLANHS -* .. -* .. External Subroutines .. - EXTERNAL SCOPY, SLABAD, SLANV2, SLARFG, SROT -* .. -* .. Intrinsic Functions .. - INTRINSIC ABS, MAX, MIN -* .. -* .. Executable Statements .. -* - INFO = 0 -* -* Quick return if possible -* - IF( N.EQ.0 ) - $ RETURN - IF( ILO.EQ.IHI ) THEN - WR( ILO ) = H( ILO, ILO ) - WI( ILO ) = ZERO - RETURN - END IF -* - NH = IHI - ILO + 1 - NZ = IHIZ - ILOZ + 1 -* -* Set machine-dependent constants for the stopping criterion. -* If norm(H) <= sqrt(OVFL), overflow should not occur. -* - UNFL = SLAMCH( 'Safe minimum' ) - OVFL = ONE / UNFL - CALL SLABAD( UNFL, OVFL ) - ULP = SLAMCH( 'Precision' ) - SMLNUM = UNFL*( NH / ULP ) -* -* I1 and I2 are the indices of the first row and last column of H -* to which transformations must be applied. If eigenvalues only are -* being computed, I1 and I2 are set inside the main loop. -* - IF( WANTT ) THEN - I1 = 1 - I2 = N - END IF -* -* ITN is the total number of QR iterations allowed. -* - ITN = 30*NH -* -* The main loop begins here. I is the loop index and decreases from -* IHI to ILO in steps of 1 or 2. Each iteration of the loop works -* with the active submatrix in rows and columns L to I. -* Eigenvalues I+1 to IHI have already converged. Either L = ILO or -* H(L,L-1) is negligible so that the matrix splits. -* - I = IHI - 10 CONTINUE - L = ILO - IF( I.LT.ILO ) - $ GO TO 150 -* -* Perform QR iterations on rows and columns ILO to I until a -* submatrix of order 1 or 2 splits off at the bottom because a -* subdiagonal element has become negligible. -* - DO 130 ITS = 0, ITN -* -* Look for a single small subdiagonal element. -* - DO 20 K = I, L + 1, -1 - TST1 = ABS( H( K-1, K-1 ) ) + ABS( H( K, K ) ) - IF( TST1.EQ.ZERO ) - $ TST1 = SLANHS( '1', I-L+1, H( L, L ), LDH, WORK ) - IF( ABS( H( K, K-1 ) ).LE.MAX( ULP*TST1, SMLNUM ) ) - $ GO TO 30 - 20 CONTINUE - 30 CONTINUE - L = K - IF( L.GT.ILO ) THEN -* -* H(L,L-1) is negligible -* - H( L, L-1 ) = ZERO - END IF -* -* Exit from loop if a submatrix of order 1 or 2 has split off. -* - IF( L.GE.I-1 ) - $ GO TO 140 -* -* Now the active submatrix is in rows and columns L to I. If -* eigenvalues only are being computed, only the active submatrix -* need be transformed. -* - IF( .NOT.WANTT ) THEN - I1 = L - I2 = I - END IF -* - IF( ITS.EQ.10 .OR. ITS.EQ.20 ) THEN -* -* Exceptional shift. -* - S = ABS( H( I, I-1 ) ) + ABS( H( I-1, I-2 ) ) - H44 = DAT1*S - H33 = H44 - H43H34 = DAT2*S*S - ELSE -* -* Prepare to use Wilkinson's double shift -* - H44 = H( I, I ) - H33 = H( I-1, I-1 ) - H43H34 = H( I, I-1 )*H( I-1, I ) - END IF -* -* Look for two consecutive small subdiagonal elements. -* - DO 40 M = I - 2, L, -1 -* -* Determine the effect of starting the double-shift QR -* iteration at row M, and see if this would make H(M,M-1) -* negligible. -* - H11 = H( M, M ) - H22 = H( M+1, M+1 ) - H21 = H( M+1, M ) - H12 = H( M, M+1 ) - H44S = H44 - H11 - H33S = H33 - H11 - V1 = ( H33S*H44S-H43H34 ) / H21 + H12 - V2 = H22 - H11 - H33S - H44S - V3 = H( M+2, M+1 ) - S = ABS( V1 ) + ABS( V2 ) + ABS( V3 ) - V1 = V1 / S - V2 = V2 / S - V3 = V3 / S - V( 1 ) = V1 - V( 2 ) = V2 - V( 3 ) = V3 - IF( M.EQ.L ) - $ GO TO 50 - H00 = H( M-1, M-1 ) - H10 = H( M, M-1 ) - TST1 = ABS( V1 )*( ABS( H00 )+ABS( H11 )+ABS( H22 ) ) - IF( ABS( H10 )*( ABS( V2 )+ABS( V3 ) ).LE.ULP*TST1 ) - $ GO TO 50 - 40 CONTINUE - 50 CONTINUE -* -* Double-shift QR step -* - DO 120 K = M, I - 1 -* -* The first iteration of this loop determines a reflection G -* from the vector V and applies it from left and right to H, -* thus creating a nonzero bulge below the subdiagonal. -* -* Each subsequent iteration determines a reflection G to -* restore the Hessenberg form in the (K-1)th column, and thus -* chases the bulge one step toward the bottom of the active -* submatrix. NR is the order of G. -* - NR = MIN( 3, I-K+1 ) - IF( K.GT.M ) - $ CALL SCOPY( NR, H( K, K-1 ), 1, V, 1 ) - CALL SLARFG( NR, V( 1 ), V( 2 ), 1, T1 ) - IF( K.GT.M ) THEN - H( K, K-1 ) = V( 1 ) - H( K+1, K-1 ) = ZERO - IF( K.LT.I-1 ) - $ H( K+2, K-1 ) = ZERO - ELSE IF( M.GT.L ) THEN - H( K, K-1 ) = -H( K, K-1 ) - END IF - V2 = V( 2 ) - T2 = T1*V2 - IF( NR.EQ.3 ) THEN - V3 = V( 3 ) - T3 = T1*V3 -* -* Apply G from the left to transform the rows of the matrix -* in columns K to I2. -* - DO 60 J = K, I2 - SUM = H( K, J ) + V2*H( K+1, J ) + V3*H( K+2, J ) - H( K, J ) = H( K, J ) - SUM*T1 - H( K+1, J ) = H( K+1, J ) - SUM*T2 - H( K+2, J ) = H( K+2, J ) - SUM*T3 - 60 CONTINUE -* -* Apply G from the right to transform the columns of the -* matrix in rows I1 to min(K+3,I). -* - DO 70 J = I1, MIN( K+3, I ) - SUM = H( J, K ) + V2*H( J, K+1 ) + V3*H( J, K+2 ) - H( J, K ) = H( J, K ) - SUM*T1 - H( J, K+1 ) = H( J, K+1 ) - SUM*T2 - H( J, K+2 ) = H( J, K+2 ) - SUM*T3 - 70 CONTINUE -* - IF( WANTZ ) THEN -* -* Accumulate transformations in the matrix Z -* - DO 80 J = ILOZ, IHIZ - SUM = Z( J, K ) + V2*Z( J, K+1 ) + V3*Z( J, K+2 ) - Z( J, K ) = Z( J, K ) - SUM*T1 - Z( J, K+1 ) = Z( J, K+1 ) - SUM*T2 - Z( J, K+2 ) = Z( J, K+2 ) - SUM*T3 - 80 CONTINUE - END IF - ELSE IF( NR.EQ.2 ) THEN -* -* Apply G from the left to transform the rows of the matrix -* in columns K to I2. -* - DO 90 J = K, I2 - SUM = H( K, J ) + V2*H( K+1, J ) - H( K, J ) = H( K, J ) - SUM*T1 - H( K+1, J ) = H( K+1, J ) - SUM*T2 - 90 CONTINUE -* -* Apply G from the right to transform the columns of the -* matrix in rows I1 to min(K+3,I). -* - DO 100 J = I1, I - SUM = H( J, K ) + V2*H( J, K+1 ) - H( J, K ) = H( J, K ) - SUM*T1 - H( J, K+1 ) = H( J, K+1 ) - SUM*T2 - 100 CONTINUE -* - IF( WANTZ ) THEN -* -* Accumulate transformations in the matrix Z -* - DO 110 J = ILOZ, IHIZ - SUM = Z( J, K ) + V2*Z( J, K+1 ) - Z( J, K ) = Z( J, K ) - SUM*T1 - Z( J, K+1 ) = Z( J, K+1 ) - SUM*T2 - 110 CONTINUE - END IF - END IF - 120 CONTINUE -* - 130 CONTINUE -* -* Failure to converge in remaining number of iterations -* - INFO = I - RETURN -* - 140 CONTINUE -* - IF( L.EQ.I ) THEN -* -* H(I,I-1) is negligible: one eigenvalue has converged. -* - WR( I ) = H( I, I ) - WI( I ) = ZERO - ELSE IF( L.EQ.I-1 ) THEN -* -* H(I-1,I-2) is negligible: a pair of eigenvalues have converged. -* -* Transform the 2-by-2 submatrix to standard Schur form, -* and compute and store the eigenvalues. -* - CALL SLANV2( H( I-1, I-1 ), H( I-1, I ), H( I, I-1 ), - $ H( I, I ), WR( I-1 ), WI( I-1 ), WR( I ), WI( I ), - $ CS, SN ) -* - IF( WANTT ) THEN -* -* Apply the transformation to the rest of H. -* - IF( I2.GT.I ) - $ CALL SROT( I2-I, H( I-1, I+1 ), LDH, H( I, I+1 ), LDH, - $ CS, SN ) - CALL SROT( I-I1-1, H( I1, I-1 ), 1, H( I1, I ), 1, CS, SN ) - END IF - IF( WANTZ ) THEN -* -* Apply the transformation to Z. -* - CALL SROT( NZ, Z( ILOZ, I-1 ), 1, Z( ILOZ, I ), 1, CS, SN ) - END IF - END IF -* -* Decrement number of remaining iterations, and return to start of -* the main loop with new value of I. -* - ITN = ITN - ITS - I = L - 1 - GO TO 10 -* - 150 CONTINUE - RETURN -* -* End of SLAHQR -* - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/zlahqr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/zlahqr.f deleted file mode 100644 index cd856dd4d4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/LAPACK/zlahqr.f +++ /dev/null @@ -1,385 +0,0 @@ - SUBROUTINE ZLAHQR( WANTT, WANTZ, N, ILO, IHI, H, LDH, W, ILOZ, - $ IHIZ, Z, LDZ, INFO ) -* -* -- LAPACK auxiliary routine (version 2.0) -- -* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., -* Courant Institute, Argonne National Lab, and Rice University -* September 30, 1994 -* -* .. Scalar Arguments .. - LOGICAL WANTT, WANTZ - INTEGER IHI, IHIZ, ILO, ILOZ, INFO, LDH, LDZ, N -* .. -* .. Array Arguments .. - COMPLEX*16 H( LDH, * ), W( * ), Z( LDZ, * ) -* .. -* -* Purpose -* ======= -* -* ZLAHQR is an auxiliary routine called by CHSEQR to update the -* eigenvalues and Schur decomposition already computed by CHSEQR, by -* dealing with the Hessenberg submatrix in rows and columns ILO to IHI. -* -* Arguments -* ========= -* -* WANTT (input) LOGICAL -* = .TRUE. : the full Schur form T is required; -* = .FALSE.: only eigenvalues are required. -* -* WANTZ (input) LOGICAL -* = .TRUE. : the matrix of Schur vectors Z is required; -* = .FALSE.: Schur vectors are not required. -* -* N (input) INTEGER -* The order of the matrix H. N >= 0. -* -* ILO (input) INTEGER -* IHI (input) INTEGER -* It is assumed that H is already upper triangular in rows and -* columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1). -* ZLAHQR works primarily with the Hessenberg submatrix in rows -* and columns ILO to IHI, but applies transformations to all of -* H if WANTT is .TRUE.. -* 1 <= ILO <= max(1,IHI); IHI <= N. -* -* H (input/output) COMPLEX*16 array, dimension (LDH,N) -* On entry, the upper Hessenberg matrix H. -* On exit, if WANTT is .TRUE., H is upper triangular in rows -* and columns ILO:IHI, with any 2-by-2 diagonal blocks in -* standard form. If WANTT is .FALSE., the contents of H are -* unspecified on exit. -* -* LDH (input) INTEGER -* The leading dimension of the array H. LDH >= max(1,N). -* -* W (output) COMPLEX*16 array, dimension (N) -* The computed eigenvalues ILO to IHI are stored in the -* corresponding elements of W. If WANTT is .TRUE., the -* eigenvalues are stored in the same order as on the diagonal -* of the Schur form returned in H, with W(i) = H(i,i). -* -* ILOZ (input) INTEGER -* IHIZ (input) INTEGER -* Specify the rows of Z to which transformations must be -* applied if WANTZ is .TRUE.. -* 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. -* -* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) -* If WANTZ is .TRUE., on entry Z must contain the current -* matrix Z of transformations accumulated by CHSEQR, and on -* exit Z has been updated; transformations are applied only to -* the submatrix Z(ILOZ:IHIZ,ILO:IHI). -* If WANTZ is .FALSE., Z is not referenced. -* -* LDZ (input) INTEGER -* The leading dimension of the array Z. LDZ >= max(1,N). -* -* INFO (output) INTEGER -* = 0: successful exit -* > 0: if INFO = i, ZLAHQR failed to compute all the -* eigenvalues ILO to IHI in a total of 30*(IHI-ILO+1) -* iterations; elements i+1:ihi of W contain those -* eigenvalues which have been successfully computed. -* -* ===================================================================== -* -* .. Parameters .. - COMPLEX*16 ZERO, ONE - PARAMETER ( ZERO = ( 0.0D+0, 0.0D+0 ), - $ ONE = ( 1.0D+0, 0.0D+0 ) ) - DOUBLE PRECISION RZERO, HALF - PARAMETER ( RZERO = 0.0D+0, HALF = 0.5D+0 ) -* .. -* .. Local Scalars .. - INTEGER I, I1, I2, ITN, ITS, J, K, L, M, NH, NZ - DOUBLE PRECISION H10, H21, RTEMP, S, SMLNUM, T2, TST1, ULP - COMPLEX*16 CDUM, H11, H11S, H22, SUM, T, T1, TEMP, U, V2, - $ X, Y -* .. -* .. Local Arrays .. - DOUBLE PRECISION RWORK( 1 ) - COMPLEX*16 V( 2 ) -* .. -* .. External Functions .. - DOUBLE PRECISION ZLANHS, DLAMCH - COMPLEX*16 WZLADIV - EXTERNAL ZLANHS, DLAMCH, WZLADIV -* .. -* .. External Subroutines .. - EXTERNAL ZCOPY, ZLARFG, ZSCAL -* .. -* .. Intrinsic Functions .. - INTRINSIC ABS, DIMAG, DCONJG, MAX, MIN, DBLE, SQRT -* .. -* .. Statement Functions .. - DOUBLE PRECISION CABS1 -* .. -* .. Statement Function definitions .. - CABS1( CDUM ) = ABS( DBLE( CDUM ) ) + ABS( DIMAG( CDUM ) ) -* .. -* .. Executable Statements .. -* - INFO = 0 -* -* Quick return if possible -* - IF( N.EQ.0 ) - $ RETURN - IF( ILO.EQ.IHI ) THEN - W( ILO ) = H( ILO, ILO ) - RETURN - END IF -* - NH = IHI - ILO + 1 - NZ = IHIZ - ILOZ + 1 -* -* Set machine-dependent constants for the stopping criterion. -* If norm(H) <= sqrt(OVFL), overflow should not occur. -* - ULP = DLAMCH( 'Precision' ) - SMLNUM = DLAMCH( 'Safe minimum' ) / ULP -* -* I1 and I2 are the indices of the first row and last column of H -* to which transformations must be applied. If eigenvalues only are -* being computed, I1 and I2 are set inside the main loop. -* - IF( WANTT ) THEN - I1 = 1 - I2 = N - END IF -* -* ITN is the total number of QR iterations allowed. -* - ITN = 30*NH -* -* The main loop begins here. I is the loop index and decreases from -* IHI to ILO in steps of 1. Each iteration of the loop works -* with the active submatrix in rows and columns L to I. -* Eigenvalues I+1 to IHI have already converged. Either L = ILO, or -* H(L,L-1) is negligible so that the matrix splits. -* - I = IHI - 10 CONTINUE - IF( I.LT.ILO ) - $ GO TO 130 -* -* Perform QR iterations on rows and columns ILO to I until a -* submatrix of order 1 splits off at the bottom because a -* subdiagonal element has become negligible. -* - L = ILO - DO 110 ITS = 0, ITN -* -* Look for a single small subdiagonal element. -* - DO 20 K = I, L + 1, -1 - TST1 = CABS1( H( K-1, K-1 ) ) + CABS1( H( K, K ) ) - IF( TST1.EQ.RZERO ) - $ TST1 = ZLANHS( '1', I-L+1, H( L, L ), LDH, RWORK ) - IF( ABS( DBLE( H( K, K-1 ) ) ).LE.MAX( ULP*TST1, SMLNUM ) ) - $ GO TO 30 - 20 CONTINUE - 30 CONTINUE - L = K - IF( L.GT.ILO ) THEN -* -* H(L,L-1) is negligible -* - H( L, L-1 ) = ZERO - END IF -* -* Exit from loop if a submatrix of order 1 has split off. -* - IF( L.GE.I ) - $ GO TO 120 -* -* Now the active submatrix is in rows and columns L to I. If -* eigenvalues only are being computed, only the active submatrix -* need be transformed. -* - IF( .NOT.WANTT ) THEN - I1 = L - I2 = I - END IF -* - IF( ITS.EQ.10 .OR. ITS.EQ.20 ) THEN -* -* Exceptional shift. -* - T = ABS( DBLE( H( I, I-1 ) ) ) + - $ ABS( DBLE( H( I-1, I-2 ) ) ) - ELSE -* -* Wilkinson's shift. -* - T = H( I, I ) - U = H( I-1, I )*DBLE( H( I, I-1 ) ) - IF( U.NE.ZERO ) THEN - X = HALF*( H( I-1, I-1 )-T ) - Y = SQRT( X*X+U ) - IF( DBLE( X )*DBLE( Y )+DIMAG( X )*DIMAG( Y ).LT.RZERO ) - $ Y = -Y - T = T - WZLADIV( U, ( X+Y ) ) - END IF - END IF -* -* Look for two consecutive small subdiagonal elements. -* - DO 40 M = I - 1, L + 1, -1 -* -* Determine the effect of starting the single-shift QR -* iteration at row M, and see if this would make H(M,M-1) -* negligible. -* - H11 = H( M, M ) - H22 = H( M+1, M+1 ) - H11S = H11 - T - H21 = H( M+1, M ) - S = CABS1( H11S ) + ABS( H21 ) - H11S = H11S / S - H21 = H21 / S - V( 1 ) = H11S - V( 2 ) = H21 - H10 = H( M, M-1 ) - TST1 = CABS1( H11S )*( CABS1( H11 )+CABS1( H22 ) ) - IF( ABS( H10*H21 ).LE.ULP*TST1 ) - $ GO TO 50 - 40 CONTINUE - H11 = H( L, L ) - H22 = H( L+1, L+1 ) - H11S = H11 - T - H21 = H( L+1, L ) - S = CABS1( H11S ) + ABS( H21 ) - H11S = H11S / S - H21 = H21 / S - V( 1 ) = H11S - V( 2 ) = H21 - 50 CONTINUE -* -* Single-shift QR step -* - DO 100 K = M, I - 1 -* -* The first iteration of this loop determines a reflection G -* from the vector V and applies it from left and right to H, -* thus creating a nonzero bulge below the subdiagonal. -* -* Each subsequent iteration determines a reflection G to -* restore the Hessenberg form in the (K-1)th column, and thus -* chases the bulge one step toward the bottom of the active -* submatrix. -* -* V(2) is always real before the call to ZLARFG, and hence -* after the call T2 ( = T1*V(2) ) is also real. -* - IF( K.GT.M ) - $ CALL ZCOPY( 2, H( K, K-1 ), 1, V, 1 ) - CALL ZLARFG( 2, V( 1 ), V( 2 ), 1, T1 ) - IF( K.GT.M ) THEN - H( K, K-1 ) = V( 1 ) - H( K+1, K-1 ) = ZERO - END IF - V2 = V( 2 ) - T2 = DBLE( T1*V2 ) -* -* Apply G from the left to transform the rows of the matrix -* in columns K to I2. -* - DO 60 J = K, I2 - SUM = DCONJG( T1 )*H( K, J ) + T2*H( K+1, J ) - H( K, J ) = H( K, J ) - SUM - H( K+1, J ) = H( K+1, J ) - SUM*V2 - 60 CONTINUE -* -* Apply G from the right to transform the columns of the -* matrix in rows I1 to min(K+2,I). -* - DO 70 J = I1, MIN( K+2, I ) - SUM = T1*H( J, K ) + T2*H( J, K+1 ) - H( J, K ) = H( J, K ) - SUM - H( J, K+1 ) = H( J, K+1 ) - SUM*DCONJG( V2 ) - 70 CONTINUE -* - IF( WANTZ ) THEN -* -* Accumulate transformations in the matrix Z -* - DO 80 J = ILOZ, IHIZ - SUM = T1*Z( J, K ) + T2*Z( J, K+1 ) - Z( J, K ) = Z( J, K ) - SUM - Z( J, K+1 ) = Z( J, K+1 ) - SUM*DCONJG( V2 ) - 80 CONTINUE - END IF -* - IF( K.EQ.M .AND. M.GT.L ) THEN -* -* If the QR step was started at row M > L because two -* consecutive small subdiagonals were found, then extra -* scaling must be performed to ensure that H(M,M-1) remains -* real. -* - TEMP = ONE - T1 - TEMP = TEMP / ABS( TEMP ) - H( M+1, M ) = H( M+1, M )*DCONJG( TEMP ) - IF( M+2.LE.I ) - $ H( M+2, M+1 ) = H( M+2, M+1 )*TEMP - DO 90 J = M, I - IF( J.NE.M+1 ) THEN - IF( I2.GT.J ) - $ CALL ZSCAL( I2-J, TEMP, H( J, J+1 ), LDH ) - CALL ZSCAL( J-I1, DCONJG( TEMP ), H( I1, J ), 1 ) - IF( WANTZ ) THEN - CALL ZSCAL( NZ, DCONJG( TEMP ), - $ Z( ILOZ, J ), 1 ) - END IF - END IF - 90 CONTINUE - END IF - 100 CONTINUE -* -* Ensure that H(I,I-1) is real. -* - TEMP = H( I, I-1 ) - IF( DIMAG( TEMP ).NE.RZERO ) THEN - RTEMP = ABS( TEMP ) - H( I, I-1 ) = RTEMP - TEMP = TEMP / RTEMP - IF( I2.GT.I ) - $ CALL ZSCAL( I2-I, DCONJG( TEMP ), H( I, I+1 ), LDH ) - CALL ZSCAL( I-I1, TEMP, H( I1, I ), 1 ) - IF( WANTZ ) THEN - CALL ZSCAL( NZ, TEMP, Z( ILOZ, I ), 1 ) - END IF - END IF -* - 110 CONTINUE -* -* Failure to converge in remaining number of iterations -* - INFO = I - RETURN -* - 120 CONTINUE -* -* H(I,I-1) is negligible: one eigenvalue has converged. -* - W( I ) = H( I, I ) -* -* Decrement number of remaining iterations, and return to start of -* the main loop with new value of I. -* - ITN = ITN - ITS - I = L - 1 - GO TO 10 -* - 130 CONTINUE - RETURN -* -* End of ZLAHQR -* - END - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cgetv0.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cgetv0.f deleted file mode 100644 index bd35747553..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cgetv0.f +++ /dev/null @@ -1,414 +0,0 @@ -c\BeginDoc -c -c\Name: cgetv0 -c -c\Description: -c Generate a random initial residual vector for the Arnoldi process. -c Force the residual vector to be in the range of the operator OP. -c -c\Usage: -c call cgetv0 -c ( IDO, BMAT, ITRY, INITV, N, J, V, LDV, RESID, RNORM, -c IPNTR, WORKD, IERR ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to cgetv0. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B in the (generalized) -c eigenvalue problem A*x = lambda*B*x. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*B*x -c -c ITRY Integer. (INPUT) -c ITRY counts the number of times that cgetv0 is called. -c It should be set to 1 on the initial call to cgetv0. -c -c INITV Logical variable. (INPUT) -c .TRUE. => the initial residual vector is given in RESID. -c .FALSE. => generate a random initial residual vector. -c -c N Integer. (INPUT) -c Dimension of the problem. -c -c J Integer. (INPUT) -c Index of the residual vector to be generated, with respect to -c the Arnoldi process. J > 1 in case of a "restart". -c -c V Complex N by J array. (INPUT) -c The first J-1 columns of V contain the current Arnoldi basis -c if this is a "restart". -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c RESID Complex array of length N. (INPUT/OUTPUT) -c Initial residual vector to be generated. If RESID is -c provided, force RESID into the range of the operator OP. -c -c RNORM Real scalar. (OUTPUT) -c B-norm of the generated residual. -c -c IPNTR Integer array of length 3. (OUTPUT) -c -c WORKD Complex work array of length 2*N. (REVERSE COMMUNICATION). -c On exit, WORK(1:N) = B*RESID to be used in SSAITR. -c -c IERR Integer. (OUTPUT) -c = 0: Normal exit. -c = -1: Cannot generate a nontrivial restarted residual vector -c in the range of the operator OP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c -c\Routines called: -c second ARPACK utility routine for timing. -c cvout ARPACK utility routine that prints vectors. -c clarnv LAPACK routine for generating a random vector. -c cgemv Level 2 BLAS routine for matrix vector multiplication. -c ccopy Level 1 BLAS that copies one vector to another. -c wcdotc Level 1 BLAS that computes the scalar product of two vectors. -c scnrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: getv0.F SID: 2.3 DATE OF SID: 08/27/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine cgetv0 - & ( ido, bmat, itry, initv, n, j, v, ldv, resid, rnorm, - & ipntr, workd, ierr ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - logical initv - integer ido, ierr, itry, j, ldv, n - Real - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Complex - & resid(n), v(ldv,j), workd(2*n) -c -c %------------% -c | Parameters | -c %------------% -c - Complex - & one, zero - Real - & rzero - parameter (one = (1.0E+0, 0.0E+0), zero = (0.0E+0, 0.0E+0), - & rzero = 0.0E+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - logical first, inits, orth - integer idist, iseed(4), iter, msglvl, jj - Real - & rnorm0 - Complex - & cnorm - save first, iseed, inits, iter, msglvl, orth, rnorm0 -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external ccopy, cgemv, clarnv, cvout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & scnrm2, slapy2 - Complex - & wcdotc - external wcdotc, scnrm2, slapy2 -c -c %-----------------% -c | Data Statements | -c %-----------------% -c - data inits /.true./ -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c -c %-----------------------------------% -c | Initialize the seed of the LAPACK | -c | random number generator | -c %-----------------------------------% -c - if (inits) then - iseed(1) = 1 - iseed(2) = 3 - iseed(3) = 5 - iseed(4) = 7 - inits = .false. - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mgetv0 -c - ierr = 0 - iter = 0 - first = .FALSE. - orth = .FALSE. -c -c %-----------------------------------------------------% -c | Possibly generate a random starting vector in RESID | -c | Use a LAPACK random number generator used by the | -c | matrix generation routines. | -c | idist = 1: uniform (0,1) distribution; | -c | idist = 2: uniform (-1,1) distribution; | -c | idist = 3: normal (0,1) distribution; | -c %-----------------------------------------------------% -c - if (.not.initv) then - idist = 2 - call clarnv (idist, iseed, n, resid) - end if -c -c %----------------------------------------------------------% -c | Force the starting vector into the range of OP to handle | -c | the generalized problem when B is possibly (singular). | -c %----------------------------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nopx = nopx + 1 - ipntr(1) = 1 - ipntr(2) = n + 1 - call ccopy (n, resid, 1, workd, 1) - ido = -1 - go to 9000 - end if - end if -c -c %----------------------------------------% -c | Back from computing B*(initial-vector) | -c %----------------------------------------% -c - if (first) go to 20 -c -c %-----------------------------------------------% -c | Back from computing B*(orthogonalized-vector) | -c %-----------------------------------------------% -c - if (orth) go to 40 -c - call second (t3) - tmvopx = tmvopx + (t3 - t2) -c -c %------------------------------------------------------% -c | Starting vector is now in the range of OP; r = OP*r; | -c | Compute B-norm of starting vector. | -c %------------------------------------------------------% -c - call second (t2) - first = .TRUE. - if (bmat .eq. 'G') then - nbx = nbx + 1 - call ccopy (n, workd(n+1), 1, resid, 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 - go to 9000 - else if (bmat .eq. 'I') then - call ccopy (n, resid, 1, workd, 1) - end if -c - 20 continue -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - first = .FALSE. - if (bmat .eq. 'G') then - cnorm = wcdotc (n, resid, 1, workd, 1) - rnorm0 = sqrt(slapy2(real(cnorm),aimag(cnorm))) - else if (bmat .eq. 'I') then - rnorm0 = scnrm2(n, resid, 1) - end if - rnorm = rnorm0 -c -c %---------------------------------------------% -c | Exit if this is the very first Arnoldi step | -c %---------------------------------------------% -c - if (j .eq. 1) go to 50 -c -c %---------------------------------------------------------------- -c | Otherwise need to B-orthogonalize the starting vector against | -c | the current Arnoldi basis using Gram-Schmidt with iter. ref. | -c | This is the case where an invariant subspace is encountered | -c | in the middle of the Arnoldi factorization. | -c | | -c | s = V^{T}*B*r; r = r - V*s; | -c | | -c | Stopping criteria used for iter. ref. is discussed in | -c | Parlett's book, page 107 and in Gragg & Reichel TOMS paper. | -c %---------------------------------------------------------------% -c - orth = .TRUE. - 30 continue -c - call cgemv ('C', n, j-1, one, v, ldv, workd, 1, - & zero, workd(n+1), 1) - call cgemv ('N', n, j-1, -one, v, ldv, workd(n+1), 1, - & one, resid, 1) -c -c %----------------------------------------------------------% -c | Compute the B-norm of the orthogonalized starting vector | -c %----------------------------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call ccopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 - go to 9000 - else if (bmat .eq. 'I') then - call ccopy (n, resid, 1, workd, 1) - end if -c - 40 continue -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - cnorm = wcdotc (n, resid, 1, workd, 1) - rnorm = sqrt(slapy2(real(cnorm),aimag(cnorm))) - else if (bmat .eq. 'I') then - rnorm = scnrm2(n, resid, 1) - end if -c -c %--------------------------------------% -c | Check for further orthogonalization. | -c %--------------------------------------% -c - if (msglvl .gt. 2) then - call svout (logfil, 1, rnorm0, ndigit, - & '_getv0: re-orthonalization ; rnorm0 is') - call svout (logfil, 1, rnorm, ndigit, - & '_getv0: re-orthonalization ; rnorm is') - end if -c - if (rnorm .gt. 0.717*rnorm0) go to 50 -c - iter = iter + 1 - if (iter .le. 1) then -c -c %-----------------------------------% -c | Perform iterative refinement step | -c %-----------------------------------% -c - rnorm0 = rnorm - go to 30 - else -c -c %------------------------------------% -c | Iterative refinement step "failed" | -c %------------------------------------% -c - do 45 jj = 1, n - resid(jj) = zero - 45 continue - rnorm = rzero - ierr = -1 - end if -c - 50 continue -c - if (msglvl .gt. 0) then - call svout (logfil, 1, rnorm, ndigit, - & '_getv0: B-norm of initial / restarted starting vector') - end if - if (msglvl .gt. 2) then - call cvout (logfil, n, resid, ndigit, - & '_getv0: initial / restarted starting vector') - end if - ido = 99 -c - call second (t1) - tgetv0 = tgetv0 + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of cgetv0 | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaitr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaitr.f deleted file mode 100644 index 7c911d75d2..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaitr.f +++ /dev/null @@ -1,850 +0,0 @@ -c\BeginDoc -c -c\Name: cnaitr -c -c\Description: -c Reverse communication interface for applying NP additional steps to -c a K step nonsymmetric Arnoldi factorization. -c -c Input: OP*V_{k} - V_{k}*H = r_{k}*e_{k}^T -c -c with (V_{k}^T)*B*V_{k} = I, (V_{k}^T)*B*r_{k} = 0. -c -c Output: OP*V_{k+p} - V_{k+p}*H = r_{k+p}*e_{k+p}^T -c -c with (V_{k+p}^T)*B*V_{k+p} = I, (V_{k+p}^T)*B*r_{k+p} = 0. -c -c where OP and B are as in cnaupd. The B-norm of r_{k+p} is also -c computed and returned. -c -c\Usage: -c call cnaitr -c ( IDO, BMAT, N, K, NP, NB, RESID, RNORM, V, LDV, H, LDH, -c IPNTR, WORKD, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c This is for the restart phase to force the new -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y, -c IPNTR(3) is the pointer into WORK for B * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c When the routine is used in the "shift-and-invert" mode, the -c vector B * Q is already available and do not need to be -c recomputed in forming OP * Q. -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. See cnaupd. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*M**x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c K Integer. (INPUT) -c Current size of V and H. -c -c NP Integer. (INPUT) -c Number of additional Arnoldi steps to take. -c -c NB Integer. (INPUT) -c Blocksize to be used in the recurrence. -c Only work for NB = 1 right now. The goal is to have a -c program that implement both the block and non-block method. -c -c RESID Complex array of length N. (INPUT/OUTPUT) -c On INPUT: RESID contains the residual vector r_{k}. -c On OUTPUT: RESID contains the residual vector r_{k+p}. -c -c RNORM Real scalar. (INPUT/OUTPUT) -c B-norm of the starting residual on input. -c B-norm of the updated residual r_{k+p} on output. -c -c V Complex N by K+NP array. (INPUT/OUTPUT) -c On INPUT: V contains the Arnoldi vectors in the first K -c columns. -c On OUTPUT: V contains the new NP Arnoldi vectors in the next -c NP columns. The first K columns are unchanged. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Complex (K+NP) by (K+NP) array. (INPUT/OUTPUT) -c H is used to store the generated upper Hessenberg matrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORK for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Complex work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The calling program should not -c use WORKD as temporary workspace during the iteration !!!!!! -c On input, WORKD(1:N) = B*RESID and is used to save some -c computation at the first step. -c -c INFO Integer. (OUTPUT) -c = 0: Normal exit. -c > 0: Size of the spanning invariant subspace of OP found. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c cgetv0 ARPACK routine to generate the initial vector. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c cmout ARPACK utility routine that prints matrices -c cvout ARPACK utility routine that prints vectors. -c clanhs LAPACK routine that computes various norms of a matrix. -c clascl LAPACK routine for careful scaling of a matrix. -c slabad LAPACK routine for defining the underflow and overflow -c limits. -c slamch LAPACK routine that determines machine constants. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c cgemv Level 2 BLAS routine for matrix vector multiplication. -c caxpy Level 1 BLAS that computes a vector triad. -c ccopy Level 1 BLAS that copies one vector to another . -c wcdotc Level 1 BLAS that computes the scalar product of two vectors. -c cscal Level 1 BLAS that scales a vector. -c csscal Level 1 BLAS that scales a complex vector by a real number. -c scnrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: naitr.F SID: 2.3 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c The algorithm implemented is: -c -c restart = .false. -c Given V_{k} = [v_{1}, ..., v_{k}], r_{k}; -c r_{k} contains the initial residual vector even for k = 0; -c Also assume that rnorm = || B*r_{k} || and B*r_{k} are already -c computed by the calling program. -c -c betaj = rnorm ; p_{k+1} = B*r_{k} ; -c For j = k+1, ..., k+np Do -c 1) if ( betaj < tol ) stop or restart depending on j. -c ( At present tol is zero ) -c if ( restart ) generate a new starting vector. -c 2) v_{j} = r(j-1)/betaj; V_{j} = [V_{j-1}, v_{j}]; -c p_{j} = p_{j}/betaj -c 3) r_{j} = OP*v_{j} where OP is defined as in cnaupd -c For shift-invert mode p_{j} = B*v_{j} is already available. -c wnorm = || OP*v_{j} || -c 4) Compute the j-th step residual vector. -c w_{j} = V_{j}^T * B * OP * v_{j} -c r_{j} = OP*v_{j} - V_{j} * w_{j} -c H(:,j) = w_{j}; -c H(j,j-1) = rnorm -c rnorm = || r_(j) || -c If (rnorm > 0.717*wnorm) accept step and go back to 1) -c 5) Re-orthogonalization step: -c s = V_{j}'*B*r_{j} -c r_{j} = r_{j} - V_{j}*s; rnorm1 = || r_{j} || -c alphaj = alphaj + s_{j}; -c 6) Iterative refinement step: -c If (rnorm1 > 0.717*rnorm) then -c rnorm = rnorm1 -c accept step and go back to 1) -c Else -c rnorm = rnorm1 -c If this is the first time in step 6), go to 5) -c Else r_{j} lies in the span of V_{j} numerically. -c Set r_{j} = 0 and rnorm = 0; go to 1) -c EndIf -c End Do -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine cnaitr - & (ido, bmat, n, k, np, nb, resid, rnorm, v, ldv, h, ldh, - & ipntr, workd, info) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - integer ido, info, k, ldh, ldv, n, nb, np - Real - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Complex - & h(ldh,k+np), resid(n), v(ldv,k+np), workd(3*n) -c -c %------------% -c | Parameters | -c %------------% -c - Complex - & one, zero - Real - & rone, rzero - parameter (one = (1.0E+0, 0.0E+0), zero = (0.0E+0, 0.0E+0), - & rone = 1.0E+0, rzero = 0.0E+0) -c -c %--------------% -c | Local Arrays | -c %--------------% -c - Real - & rtemp(2) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - logical first, orth1, orth2, rstart, step3, step4 - integer ierr, i, infol, ipj, irj, ivj, iter, itry, j, msglvl, - & jj - Real - & ovfl, smlnum, tst1, ulp, unfl, betaj, - & temp1, rnorm1, wnorm - Complex - & cnorm -c - save first, orth1, orth2, rstart, step3, step4, - & ierr, ipj, irj, ivj, iter, itry, j, msglvl, ovfl, - & betaj, rnorm1, smlnum, ulp, unfl, wnorm -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external caxpy, ccopy, cscal, csscal, cgemv, cgetv0, - & slabad, cvout, cmout, ivout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Complex - & wcdotc - Real - & slamch, scnrm2, clanhs, slapy2 - external wcdotc, scnrm2, clanhs, slamch, slapy2 -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic aimag, real, max, sqrt -c -c %-----------------% -c | Data statements | -c %-----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then -c -c %-----------------------------------------% -c | Set machine-dependent constants for the | -c | the splitting and deflation criterion. | -c | If norm(H) <= sqrt(OVFL), | -c | overflow should not occur. | -c | REFERENCE: LAPACK subroutine clahqr | -c %-----------------------------------------% -c - unfl = slamch( 'safe minimum' ) - ovfl = real(one / unfl) - call slabad( unfl, ovfl ) - ulp = slamch( 'precision' ) - smlnum = unfl*( n / ulp ) - first = .false. - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mcaitr -c -c %------------------------------% -c | Initial call to this routine | -c %------------------------------% -c - info = 0 - step3 = .false. - step4 = .false. - rstart = .false. - orth1 = .false. - orth2 = .false. - j = k + 1 - ipj = 1 - irj = ipj + n - ivj = irj + n - end if -c -c %-------------------------------------------------% -c | When in reverse communication mode one of: | -c | STEP3, STEP4, ORTH1, ORTH2, RSTART | -c | will be .true. when .... | -c | STEP3: return from computing OP*v_{j}. | -c | STEP4: return from computing B-norm of OP*v_{j} | -c | ORTH1: return from computing B-norm of r_{j+1} | -c | ORTH2: return from computing B-norm of | -c | correction to the residual vector. | -c | RSTART: return from OP computations needed by | -c | cgetv0. | -c %-------------------------------------------------% -c - if (step3) go to 50 - if (step4) go to 60 - if (orth1) go to 70 - if (orth2) go to 90 - if (rstart) go to 30 -c -c %-----------------------------% -c | Else this is the first step | -c %-----------------------------% -c -c %--------------------------------------------------------------% -c | | -c | A R N O L D I I T E R A T I O N L O O P | -c | | -c | Note: B*r_{j-1} is already in WORKD(1:N)=WORKD(IPJ:IPJ+N-1) | -c %--------------------------------------------------------------% - - 1000 continue -c - if (msglvl .gt. 1) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: generating Arnoldi vector number') - call svout (logfil, 1, rnorm, ndigit, - & '_naitr: B-norm of the current residual is') - end if -c -c %---------------------------------------------------% -c | STEP 1: Check if the B norm of j-th residual | -c | vector is zero. Equivalent to determine whether | -c | an exact j-step Arnoldi factorization is present. | -c %---------------------------------------------------% -c - betaj = rnorm - if (rnorm .gt. rzero) go to 40 -c -c %---------------------------------------------------% -c | Invariant subspace found, generate a new starting | -c | vector which is orthogonal to the current Arnoldi | -c | basis and continue the iteration. | -c %---------------------------------------------------% -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: ****** RESTART AT STEP ******') - end if -c -c %---------------------------------------------% -c | ITRY is the loop variable that controls the | -c | maximum amount of times that a restart is | -c | attempted. NRSTRT is used by stat.h | -c %---------------------------------------------% -c - betaj = rzero - nrstrt = nrstrt + 1 - itry = 1 - 20 continue - rstart = .true. - ido = 0 - 30 continue -c -c %--------------------------------------% -c | If in reverse communication mode and | -c | RSTART = .true. flow returns here. | -c %--------------------------------------% -c - call cgetv0 (ido, bmat, itry, .false., n, j, v, ldv, - & resid, rnorm, ipntr, workd, ierr) - if (ido .ne. 99) go to 9000 - if (ierr .lt. 0) then - itry = itry + 1 - if (itry .le. 3) go to 20 -c -c %------------------------------------------------% -c | Give up after several restart attempts. | -c | Set INFO to the size of the invariant subspace | -c | which spans OP and exit. | -c %------------------------------------------------% -c - info = j - 1 - call second (t1) - tcaitr = tcaitr + (t1 - t0) - ido = 99 - go to 9000 - end if -c - 40 continue -c -c %---------------------------------------------------------% -c | STEP 2: v_{j} = r_{j-1}/rnorm and p_{j} = p_{j}/rnorm | -c | Note that p_{j} = B*r_{j-1}. In order to avoid overflow | -c | when reciprocating a small RNORM, test against lower | -c | machine bound. | -c %---------------------------------------------------------% -c - call ccopy (n, resid, 1, v(1,j), 1) - if ( rnorm .ge. unfl) then - temp1 = rone / rnorm - call csscal (n, temp1, v(1,j), 1) - call csscal (n, temp1, workd(ipj), 1) - else -c -c %-----------------------------------------% -c | To scale both v_{j} and p_{j} carefully | -c | use LAPACK routine clascl | -c %-----------------------------------------% -c - call clascl ('General', i, i, rnorm, rone, - & n, 1, v(1,j), n, infol) - call clascl ('General', i, i, rnorm, rone, - & n, 1, workd(ipj), n, infol) - end if -c -c %------------------------------------------------------% -c | STEP 3: r_{j} = OP*v_{j}; Note that p_{j} = B*v_{j} | -c | Note that this is not quite yet r_{j}. See STEP 4 | -c %------------------------------------------------------% -c - step3 = .true. - nopx = nopx + 1 - call second (t2) - call ccopy (n, v(1,j), 1, workd(ivj), 1) - ipntr(1) = ivj - ipntr(2) = irj - ipntr(3) = ipj - ido = 1 -c -c %-----------------------------------% -c | Exit in order to compute OP*v_{j} | -c %-----------------------------------% -c - go to 9000 - 50 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(IRJ:IRJ+N-1) := OP*v_{j} | -c | if step3 = .true. | -c %----------------------------------% -c - call second (t3) - tmvopx = tmvopx + (t3 - t2) - - step3 = .false. -c -c %------------------------------------------% -c | Put another copy of OP*v_{j} into RESID. | -c %------------------------------------------% -c - call ccopy (n, workd(irj), 1, resid, 1) -c -c %---------------------------------------% -c | STEP 4: Finish extending the Arnoldi | -c | factorization to length j. | -c %---------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - step4 = .true. - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-------------------------------------% -c | Exit in order to compute B*OP*v_{j} | -c %-------------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call ccopy (n, resid, 1, workd(ipj), 1) - end if - 60 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(IPJ:IPJ+N-1) := B*OP*v_{j} | -c | if step4 = .true. | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - step4 = .false. -c -c %-------------------------------------% -c | The following is needed for STEP 5. | -c | Compute the B-norm of OP*v_{j}. | -c %-------------------------------------% -c - if (bmat .eq. 'G') then - cnorm = wcdotc (n, resid, 1, workd(ipj), 1) - wnorm = sqrt( slapy2(real(cnorm),aimag(cnorm)) ) - else if (bmat .eq. 'I') then - wnorm = scnrm2(n, resid, 1) - end if -c -c %-----------------------------------------% -c | Compute the j-th residual corresponding | -c | to the j step factorization. | -c | Use Classical Gram Schmidt and compute: | -c | w_{j} <- V_{j}^T * B * OP * v_{j} | -c | r_{j} <- OP*v_{j} - V_{j} * w_{j} | -c %-----------------------------------------% -c -c -c %------------------------------------------% -c | Compute the j Fourier coefficients w_{j} | -c | WORKD(IPJ:IPJ+N-1) contains B*OP*v_{j}. | -c %------------------------------------------% -c - call cgemv ('C', n, j, one, v, ldv, workd(ipj), 1, - & zero, h(1,j), 1) -c -c %--------------------------------------% -c | Orthogonalize r_{j} against V_{j}. | -c | RESID contains OP*v_{j}. See STEP 3. | -c %--------------------------------------% -c - call cgemv ('N', n, j, -one, v, ldv, h(1,j), 1, - & one, resid, 1) -c - if (j .gt. 1) h(j,j-1) = cmplx(betaj, rzero) -c - call second (t4) -c - orth1 = .true. -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call ccopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*r_{j} | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call ccopy (n, resid, 1, workd(ipj), 1) - end if - 70 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH1 = .true. | -c | WORKD(IPJ:IPJ+N-1) := B*r_{j}. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - orth1 = .false. -c -c %------------------------------% -c | Compute the B-norm of r_{j}. | -c %------------------------------% -c - if (bmat .eq. 'G') then - cnorm = wcdotc (n, resid, 1, workd(ipj), 1) - rnorm = sqrt( slapy2(real(cnorm),aimag(cnorm)) ) - else if (bmat .eq. 'I') then - rnorm = scnrm2(n, resid, 1) - end if -c -c %-----------------------------------------------------------% -c | STEP 5: Re-orthogonalization / Iterative refinement phase | -c | Maximum NITER_ITREF tries. | -c | | -c | s = V_{j}^T * B * r_{j} | -c | r_{j} = r_{j} - V_{j}*s | -c | alphaj = alphaj + s_{j} | -c | | -c | The stopping criteria used for iterative refinement is | -c | discussed in Parlett's book SEP, page 107 and in Gragg & | -c | Reichel ACM TOMS paper; Algorithm 686, Dec. 1990. | -c | Determine if we need to correct the residual. The goal is | -c | to enforce ||v(:,1:j)^T * r_{j}|| .le. eps * || r_{j} || | -c | The following test determines whether the sine of the | -c | angle between OP*x and the computed residual is less | -c | than or equal to 0.717. | -c %-----------------------------------------------------------% -c - if ( rnorm .gt. 0.717*wnorm ) go to 100 -c - iter = 0 - nrorth = nrorth + 1 -c -c %---------------------------------------------------% -c | Enter the Iterative refinement phase. If further | -c | refinement is necessary, loop back here. The loop | -c | variable is ITER. Perform a step of Classical | -c | Gram-Schmidt using all the Arnoldi vectors V_{j} | -c %---------------------------------------------------% -c - 80 continue -c - if (msglvl .gt. 2) then - rtemp(1) = wnorm - rtemp(2) = rnorm - call svout (logfil, 2, rtemp, ndigit, - & '_naitr: re-orthogonalization; wnorm and rnorm are') - call cvout (logfil, j, h(1,j), ndigit, - & '_naitr: j-th column of H') - end if -c -c %----------------------------------------------------% -c | Compute V_{j}^T * B * r_{j}. | -c | WORKD(IRJ:IRJ+J-1) = v(:,1:J)'*WORKD(IPJ:IPJ+N-1). | -c %----------------------------------------------------% -c - call cgemv ('C', n, j, one, v, ldv, workd(ipj), 1, - & zero, workd(irj), 1) -c -c %---------------------------------------------% -c | Compute the correction to the residual: | -c | r_{j} = r_{j} - V_{j} * WORKD(IRJ:IRJ+J-1). | -c | The correction to H is v(:,1:J)*H(1:J,1:J) | -c | + v(:,1:J)*WORKD(IRJ:IRJ+J-1)*e'_j. | -c %---------------------------------------------% -c - call cgemv ('N', n, j, -one, v, ldv, workd(irj), 1, - & one, resid, 1) - call caxpy (j, one, workd(irj), 1, h(1,j), 1) -c - orth2 = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call ccopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-----------------------------------% -c | Exit in order to compute B*r_{j}. | -c | r_{j} is the corrected residual. | -c %-----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call ccopy (n, resid, 1, workd(ipj), 1) - end if - 90 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH2 = .true. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c -c %-----------------------------------------------------% -c | Compute the B-norm of the corrected residual r_{j}. | -c %-----------------------------------------------------% -c - if (bmat .eq. 'G') then - cnorm = wcdotc (n, resid, 1, workd(ipj), 1) - rnorm1 = sqrt( slapy2(real(cnorm),aimag(cnorm)) ) - else if (bmat .eq. 'I') then - rnorm1 = scnrm2(n, resid, 1) - end if -c - if (msglvl .gt. 0 .and. iter .gt. 0 ) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: Iterative refinement for Arnoldi residual') - if (msglvl .gt. 2) then - rtemp(1) = rnorm - rtemp(2) = rnorm1 - call svout (logfil, 2, rtemp, ndigit, - & '_naitr: iterative refinement ; rnorm and rnorm1 are') - end if - end if -c -c %-----------------------------------------% -c | Determine if we need to perform another | -c | step of re-orthogonalization. | -c %-----------------------------------------% -c - if ( rnorm1 .gt. 0.717*rnorm ) then -c -c %---------------------------------------% -c | No need for further refinement. | -c | The cosine of the angle between the | -c | corrected residual vector and the old | -c | residual vector is greater than 0.717 | -c | In other words the corrected residual | -c | and the old residual vector share an | -c | angle of less than arcCOS(0.717) | -c %---------------------------------------% -c - rnorm = rnorm1 -c - else -c -c %-------------------------------------------% -c | Another step of iterative refinement step | -c | is required. NITREF is used by stat.h | -c %-------------------------------------------% -c - nitref = nitref + 1 - rnorm = rnorm1 - iter = iter + 1 - if (iter .le. 1) go to 80 -c -c %-------------------------------------------------% -c | Otherwise RESID is numerically in the span of V | -c %-------------------------------------------------% -c - do 95 jj = 1, n - resid(jj) = zero - 95 continue - rnorm = rzero - end if -c -c %----------------------------------------------% -c | Branch here directly if iterative refinement | -c | wasn't necessary or after at most NITER_REF | -c | steps of iterative refinement. | -c %----------------------------------------------% -c - 100 continue -c - rstart = .false. - orth2 = .false. -c - call second (t5) - titref = titref + (t5 - t4) -c -c %------------------------------------% -c | STEP 6: Update j = j+1; Continue | -c %------------------------------------% -c - j = j + 1 - if (j .gt. k+np) then - call second (t1) - tcaitr = tcaitr + (t1 - t0) - ido = 99 - do 110 i = max(1,k), k+np-1 -c -c %--------------------------------------------% -c | Check for splitting and deflation. | -c | Use a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine clahqr | -c %--------------------------------------------% -c - tst1 = slapy2(real(h(i,i)),aimag(h(i,i))) - & + slapy2(real(h(i+1,i+1)), aimag(h(i+1,i+1))) - if( tst1.eq.real(zero) ) - & tst1 = clanhs( '1', k+np, h, ldh, workd(n+1) ) - if( slapy2(real(h(i+1,i)),aimag(h(i+1,i))) .le. - & max( ulp*tst1, smlnum ) ) - & h(i+1,i) = zero - 110 continue -c - if (msglvl .gt. 2) then - call cmout (logfil, k+np, k+np, h, ldh, ndigit, - & '_naitr: Final upper Hessenberg matrix H of order K+NP') - end if -c - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Loop back to extend the factorization by another step. | -c %--------------------------------------------------------% -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 9000 continue - return -c -c %---------------% -c | End of cnaitr | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnapps.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnapps.f deleted file mode 100644 index 0c8c85b4d3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnapps.f +++ /dev/null @@ -1,507 +0,0 @@ -c\BeginDoc -c -c\Name: cnapps -c -c\Description: -c Given the Arnoldi factorization -c -c A*V_{k} - V_{k}*H_{k} = r_{k+p}*e_{k+p}^T, -c -c apply NP implicit shifts resulting in -c -c A*(V_{k}*Q) - (V_{k}*Q)*(Q^T* H_{k}*Q) = r_{k+p}*e_{k+p}^T * Q -c -c where Q is an orthogonal matrix which is the product of rotations -c and reflections resulting from the NP bulge change sweeps. -c The updated Arnoldi factorization becomes: -c -c A*VNEW_{k} - VNEW_{k}*HNEW_{k} = rnew_{k}*e_{k}^T. -c -c\Usage: -c call cnapps -c ( N, KEV, NP, SHIFT, V, LDV, H, LDH, RESID, Q, LDQ, -c WORKL, WORKD ) -c -c\Arguments -c N Integer. (INPUT) -c Problem size, i.e. size of matrix A. -c -c KEV Integer. (INPUT/OUTPUT) -c KEV+NP is the size of the input matrix H. -c KEV is the size of the updated matrix HNEW. -c -c NP Integer. (INPUT) -c Number of implicit shifts to be applied. -c -c SHIFT Complex array of length NP. (INPUT) -c The shifts to be applied. -c -c V Complex N by (KEV+NP) array. (INPUT/OUTPUT) -c On INPUT, V contains the current KEV+NP Arnoldi vectors. -c On OUTPUT, V contains the updated KEV Arnoldi vectors -c in the first KEV columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Complex (KEV+NP) by (KEV+NP) array. (INPUT/OUTPUT) -c On INPUT, H contains the current KEV+NP by KEV+NP upper -c Hessenberg matrix of the Arnoldi factorization. -c On OUTPUT, H contains the updated KEV by KEV upper Hessenberg -c matrix in the KEV leading submatrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RESID Complex array of length N. (INPUT/OUTPUT) -c On INPUT, RESID contains the the residual vector r_{k+p}. -c On OUTPUT, RESID is the update residual vector rnew_{k} -c in the first KEV locations. -c -c Q Complex KEV+NP by KEV+NP work array. (WORKSPACE) -c Work array used to accumulate the rotations and reflections -c during the bulge chase sweep. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Complex work array of length (KEV+NP). (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c WORKD Complex work array of length 2*N. (WORKSPACE) -c Distributed array used in the application of the accumulated -c orthogonal matrix Q. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c cmout ARPACK utility routine that prints matrices -c cvout ARPACK utility routine that prints vectors. -c clacpy LAPACK matrix copy routine. -c clanhs LAPACK routine that computes various norms of a matrix. -c clartg LAPACK Givens rotation construction routine. -c claset LAPACK matrix initialization routine. -c slabad LAPACK routine for defining the underflow and overflow -c limits. -c slamch LAPACK routine that determines machine constants. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c cgemv Level 2 BLAS routine for matrix vector multiplication. -c caxpy Level 1 BLAS that computes a vector triad. -c ccopy Level 1 BLAS that copies one vector to another. -c cscal Level 1 BLAS that scales a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: napps.F SID: 2.3 DATE OF SID: 3/28/97 RELEASE: 2 -c -c\Remarks -c 1. In this version, each shift is applied to all the sublocks of -c the Hessenberg matrix H and not just to the submatrix that it -c comes from. Deflation as in LAPACK routine clahqr (QR algorithm -c for upper Hessenberg matrices ) is used. -c Upon output, the subdiagonals of H are enforced to be non-negative -c real numbers. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine cnapps - & ( n, kev, np, shift, v, ldv, h, ldh, resid, q, ldq, - & workl, workd ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer kev, ldh, ldq, ldv, n, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Complex - & h(ldh,kev+np), resid(n), shift(np), - & v(ldv,kev+np), q(ldq,kev+np), workd(2*n), workl(kev+np) -c -c %------------% -c | Parameters | -c %------------% -c - Complex - & one, zero - Real - & rzero - parameter (one = (1.0E+0, 0.0E+0), zero = (0.0E+0, 0.0E+0), - & rzero = 0.0E+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - integer i, iend, istart, j, jj, kplusp, msglvl - logical first - Complex - & cdum, f, g, h11, h21, r, s, sigma, t - Real - & c, ovfl, smlnum, ulp, unfl, tst1 - save first, ovfl, smlnum, ulp, unfl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external caxpy, ccopy, cgemv, cscal, clacpy, clartg, - & cvout, claset, slabad, cmout, second, ivout -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & clanhs, slamch, slapy2 - external clanhs, slamch, slapy2 -c -c %----------------------% -c | Intrinsics Functions | -c %----------------------% -c - intrinsic abs, aimag, conjg, cmplx, max, min, real -c -c %---------------------% -c | Statement Functions | -c %---------------------% -c - Real - & cabs1 - cabs1( cdum ) = abs( real( cdum ) ) + abs( aimag( cdum ) ) -c -c %----------------% -c | Data statments | -c %----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then -c -c %-----------------------------------------------% -c | Set machine-dependent constants for the | -c | stopping criterion. If norm(H) <= sqrt(OVFL), | -c | overflow should not occur. | -c | REFERENCE: LAPACK subroutine clahqr | -c %-----------------------------------------------% -c - unfl = slamch( 'safe minimum' ) - ovfl = real(one / unfl) - call slabad( unfl, ovfl ) - ulp = slamch( 'precision' ) - smlnum = unfl*( n / ulp ) - first = .false. - end if -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mcapps -c - kplusp = kev + np -c -c %--------------------------------------------% -c | Initialize Q to the identity to accumulate | -c | the rotations and reflections | -c %--------------------------------------------% -c - call claset ('All', kplusp, kplusp, zero, one, q, ldq) -c -c %----------------------------------------------% -c | Quick return if there are no shifts to apply | -c %----------------------------------------------% -c - if (np .eq. 0) go to 9000 -c -c %----------------------------------------------% -c | Chase the bulge with the application of each | -c | implicit shift. Each shift is applied to the | -c | whole matrix including each block. | -c %----------------------------------------------% -c - do 110 jj = 1, np - sigma = shift(jj) -c - if (msglvl .gt. 2 ) then - call ivout (logfil, 1, jj, ndigit, - & '_napps: shift number.') - call cvout (logfil, 1, sigma, ndigit, - & '_napps: Value of the shift ') - end if -c - istart = 1 - 20 continue -c - do 30 i = istart, kplusp-1 -c -c %----------------------------------------% -c | Check for splitting and deflation. Use | -c | a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine clahqr | -c %----------------------------------------% -c - tst1 = cabs1( h( i, i ) ) + cabs1( h( i+1, i+1 ) ) - if( tst1.eq.rzero ) - & tst1 = clanhs( '1', kplusp-jj+1, h, ldh, workl ) - if ( abs(real(h(i+1,i))) - & .le. max(ulp*tst1, smlnum) ) then - if (msglvl .gt. 0) then - call ivout (logfil, 1, i, ndigit, - & '_napps: matrix splitting at row/column no.') - call ivout (logfil, 1, jj, ndigit, - & '_napps: matrix splitting with shift number.') - call cvout (logfil, 1, h(i+1,i), ndigit, - & '_napps: off diagonal element.') - end if - iend = i - h(i+1,i) = zero - go to 40 - end if - 30 continue - iend = kplusp - 40 continue -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, istart, ndigit, - & '_napps: Start of current block ') - call ivout (logfil, 1, iend, ndigit, - & '_napps: End of current block ') - end if -c -c %------------------------------------------------% -c | No reason to apply a shift to block of order 1 | -c | or if the current block starts after the point | -c | of compression since we'll discard this stuff | -c %------------------------------------------------% -c - if ( istart .eq. iend .or. istart .gt. kev) go to 100 -c - h11 = h(istart,istart) - h21 = h(istart+1,istart) - f = h11 - sigma - g = h21 -c - do 80 i = istart, iend-1 -c -c %------------------------------------------------------% -c | Construct the plane rotation G to zero out the bulge | -c %------------------------------------------------------% -c - call clartg (f, g, c, s, r) - if (i .gt. istart) then - h(i,i-1) = r - h(i+1,i-1) = zero - end if -c -c %---------------------------------------------% -c | Apply rotation to the left of H; H <- G'*H | -c %---------------------------------------------% -c - do 50 j = i, kplusp - t = c*h(i,j) + s*h(i+1,j) - h(i+1,j) = -conjg(s)*h(i,j) + c*h(i+1,j) - h(i,j) = t - 50 continue -c -c %---------------------------------------------% -c | Apply rotation to the right of H; H <- H*G | -c %---------------------------------------------% -c - do 60 j = 1, min(i+2,iend) - t = c*h(j,i) + conjg(s)*h(j,i+1) - h(j,i+1) = -s*h(j,i) + c*h(j,i+1) - h(j,i) = t - 60 continue -c -c %-----------------------------------------------------% -c | Accumulate the rotation in the matrix Q; Q <- Q*G' | -c %-----------------------------------------------------% -c - do 70 j = 1, min(i+jj, kplusp) - t = c*q(j,i) + conjg(s)*q(j,i+1) - q(j,i+1) = - s*q(j,i) + c*q(j,i+1) - q(j,i) = t - 70 continue -c -c %---------------------------% -c | Prepare for next rotation | -c %---------------------------% -c - if (i .lt. iend-1) then - f = h(i+1,i) - g = h(i+2,i) - end if - 80 continue -c -c %-------------------------------% -c | Finished applying the shift. | -c %-------------------------------% -c - 100 continue -c -c %---------------------------------------------------------% -c | Apply the same shift to the next block if there is any. | -c %---------------------------------------------------------% -c - istart = iend + 1 - if (iend .lt. kplusp) go to 20 -c -c %---------------------------------------------% -c | Loop back to the top to get the next shift. | -c %---------------------------------------------% -c - 110 continue -c -c %---------------------------------------------------% -c | Perform a similarity transformation that makes | -c | sure that the compressed H will have non-negative | -c | real subdiagonal elements. | -c %---------------------------------------------------% -c - do 120 j=1,kev - if ( real( h(j+1,j) ) .lt. rzero .or. - & aimag( h(j+1,j) ) .ne. rzero ) then - t = h(j+1,j) / slapy2(real(h(j+1,j)),aimag(h(j+1,j))) - call cscal( kplusp-j+1, conjg(t), h(j+1,j), ldh ) - call cscal( min(j+2, kplusp), t, h(1,j+1), 1 ) - call cscal( min(j+np+1,kplusp), t, q(1,j+1), 1 ) - h(j+1,j) = cmplx( real( h(j+1,j) ), rzero ) - end if - 120 continue -c - do 130 i = 1, kev -c -c %--------------------------------------------% -c | Final check for splitting and deflation. | -c | Use a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine clahqr. | -c | Note: Since the subdiagonals of the | -c | compressed H are nonnegative real numbers, | -c | we take advantage of this. | -c %--------------------------------------------% -c - tst1 = cabs1( h( i, i ) ) + cabs1( h( i+1, i+1 ) ) - if( tst1 .eq. rzero ) - & tst1 = clanhs( '1', kev, h, ldh, workl ) - if( real( h( i+1,i ) ) .le. max( ulp*tst1, smlnum ) ) - & h(i+1,i) = zero - 130 continue -c -c %-------------------------------------------------% -c | Compute the (kev+1)-st column of (V*Q) and | -c | temporarily store the result in WORKD(N+1:2*N). | -c | This is needed in the residual update since we | -c | cannot GUARANTEE that the corresponding entry | -c | of H would be zero as in exact arithmetic. | -c %-------------------------------------------------% -c - if ( real( h(kev+1,kev) ) .gt. rzero ) - & call cgemv ('N', n, kplusp, one, v, ldv, q(1,kev+1), 1, zero, - & workd(n+1), 1) -c -c %----------------------------------------------------------% -c | Compute column 1 to kev of (V*Q) in backward order | -c | taking advantage of the upper Hessenberg structure of Q. | -c %----------------------------------------------------------% -c - do 140 i = 1, kev - call cgemv ('N', n, kplusp-i+1, one, v, ldv, - & q(1,kev-i+1), 1, zero, workd, 1) - call ccopy (n, workd, 1, v(1,kplusp-i+1), 1) - 140 continue -c -c %-------------------------------------------------% -c | Move v(:,kplusp-kev+1:kplusp) into v(:,1:kev). | -c %-------------------------------------------------% -c - call clacpy ('A', n, kev, v(1,kplusp-kev+1), ldv, v, ldv) -c -c %--------------------------------------------------------------% -c | Copy the (kev+1)-st column of (V*Q) in the appropriate place | -c %--------------------------------------------------------------% -c - if ( real( h(kev+1,kev) ) .gt. rzero ) - & call ccopy (n, workd(n+1), 1, v(1,kev+1), 1) -c -c %-------------------------------------% -c | Update the residual vector: | -c | r <- sigmak*r + betak*v(:,kev+1) | -c | where | -c | sigmak = (e_{kev+p}'*Q)*e_{kev} | -c | betak = e_{kev+1}'*H*e_{kev} | -c %-------------------------------------% -c - call cscal (n, q(kplusp,kev), resid, 1) - if ( real( h(kev+1,kev) ) .gt. rzero ) - & call caxpy (n, h(kev+1,kev), v(1,kev+1), 1, resid, 1) -c - if (msglvl .gt. 1) then - call cvout (logfil, 1, q(kplusp,kev), ndigit, - & '_napps: sigmak = (e_{kev+p}^T*Q)*e_{kev}') - call cvout (logfil, 1, h(kev+1,kev), ndigit, - & '_napps: betak = e_{kev+1}^T*H*e_{kev}') - call ivout (logfil, 1, kev, ndigit, - & '_napps: Order of the final Hessenberg matrix ') - if (msglvl .gt. 2) then - call cmout (logfil, kev, kev, h, ldh, ndigit, - & '_napps: updated Hessenberg matrix H for next iteration') - end if -c - end if -c - 9000 continue - call second (t1) - tcapps = tcapps + (t1 - t0) -c - return -c -c %---------------% -c | End of cnapps | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaup2.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaup2.f deleted file mode 100644 index da8ed1f5db..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaup2.f +++ /dev/null @@ -1,801 +0,0 @@ -c\BeginDoc -c -c\Name: cnaup2 -c -c\Description: -c Intermediate level interface called by cnaupd. -c -c\Usage: -c call cnaup2 -c ( IDO, BMAT, N, WHICH, NEV, NP, TOL, RESID, MODE, IUPD, -c ISHIFT, MXITER, V, LDV, H, LDH, RITZ, BOUNDS, -c Q, LDQ, WORKL, IPNTR, WORKD, RWORK, INFO ) -c -c\Arguments -c -c IDO, BMAT, N, WHICH, NEV, TOL, RESID: same as defined in cnaupd. -c MODE, ISHIFT, MXITER: see the definition of IPARAM in cnaupd. -c -c NP Integer. (INPUT/OUTPUT) -c Contains the number of implicit shifts to apply during -c each Arnoldi iteration. -c If ISHIFT=1, NP is adjusted dynamically at each iteration -c to accelerate convergence and prevent stagnation. -c This is also roughly equal to the number of matrix-vector -c products (involving the operator OP) per Arnoldi iteration. -c The logic for adjusting is contained within the current -c subroutine. -c If ISHIFT=0, NP is the number of shifts the user needs -c to provide via reverse comunication. 0 < NP < NCV-NEV. -c NP may be less than NCV-NEV since a leading block of the current -c upper Hessenberg matrix has split off and contains "unwanted" -c Ritz values. -c Upon termination of the IRA iteration, NP contains the number -c of "converged" wanted Ritz values. -c -c IUPD Integer. (INPUT) -c IUPD .EQ. 0: use explicit restart instead implicit update. -c IUPD .NE. 0: use implicit update. -c -c V Complex N by (NEV+NP) array. (INPUT/OUTPUT) -c The Arnoldi basis vectors are returned in the first NEV -c columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Complex (NEV+NP) by (NEV+NP) array. (OUTPUT) -c H is used to store the generated upper Hessenberg matrix -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZ Complex array of length NEV+NP. (OUTPUT) -c RITZ(1:NEV) contains the computed Ritz values of OP. -c -c BOUNDS Complex array of length NEV+NP. (OUTPUT) -c BOUNDS(1:NEV) contain the error bounds corresponding to -c the computed Ritz values. -c -c Q Complex (NEV+NP) by (NEV+NP) array. (WORKSPACE) -c Private (replicated) work array used to accumulate the -c rotation in the shift application step. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Complex work array of length at least -c (NEV+NP)**2 + 3*(NEV+NP). (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. It is used in shifts calculation, shifts -c application and convergence checking. -c -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORKD for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Complex work array of length 3*N. (WORKSPACE) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration !!!!!!!!!! -c See Data Distribution Note in CNAUPD. -c -c RWORK Real work array of length NEV+NP ( WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal return. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. -c NP returns the number of converged Ritz values. -c = 2: No shifts could be applied. -c = -8: Error return from LAPACK eigenvalue calculation; -c This should never happen. -c = -9: Starting vector is zero. -c = -9999: Could not build an Arnoldi factorization. -c Size that was built in returned in NP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c cgetv0 ARPACK initial vector generation routine. -c cnaitr ARPACK Arnoldi factorization routine. -c cnapps ARPACK application of implicit shifts routine. -c cneigh ARPACK compute Ritz values and error bounds routine. -c cngets ARPACK reorder Ritz values and error bounds routine. -c csortc ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c cmout ARPACK utility routine that prints matrices -c cvout ARPACK utility routine that prints vectors. -c svout ARPACK utility routine that prints vectors. -c slamch LAPACK routine that determines machine constants. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c ccopy Level 1 BLAS that copies one vector to another . -c wcdotc Level 1 BLAS that computes the scalar product of two vectors. -c cswap Level 1 BLAS that swaps two vectors. -c scnrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice Universitya -c Chao Yang Houston, Texas -c Dept. of Computational & -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: naup2.F SID: 2.6 DATE OF SID: 06/01/00 RELEASE: 2 -c -c\Remarks -c 1. None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine cnaup2 - & ( ido, bmat, n, which, nev, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, h, ldh, ritz, bounds, - & q, ldq, workl, ipntr, workd, rwork, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ishift, iupd, mode, ldh, ldq, ldv, mxiter, - & n, nev, np - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(13) - Complex - & bounds(nev+np), h(ldh,nev+np), q(ldq,nev+np), - & resid(n), ritz(nev+np), v(ldv,nev+np), - & workd(3*n), workl( (nev+np)*(nev+np+3) ) - Real - & rwork(nev+np) -c -c %------------% -c | Parameters | -c %------------% -c - Complex - & one, zero - Real - & rzero - parameter (one = (1.0E+0, 0.0E+0) , zero = (0.0E+0, 0.0E+0) , - & rzero = 0.0E+0 ) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - logical cnorm , getv0, initv , update, ushift - integer ierr , iter , kplusp, msglvl, nconv, - & nevbef, nev0 , np0 , nptemp, i , - & j - Complex - & cmpnorm - Real - & rnorm , eps23, rtemp - character wprime*2 -c - save cnorm, getv0, initv , update, ushift, - & rnorm, iter , kplusp, msglvl, nconv , - & nevbef, nev0 , np0 , eps23 -c -c -c %-----------------------% -c | Local array arguments | -c %-----------------------% -c - integer kp(3) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external ccopy, cgetv0, cnaitr, cneigh, cngets, cnapps, - & csortc, cswap, cmout, cvout, ivout, second -c -c %--------------------% -c | External functions | -c %--------------------% -c - Complex - & wcdotc - Real - & scnrm2, slamch, slapy2 - external wcdotc, scnrm2, slamch, slapy2 -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic aimag, real , min, max -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c - call second (t0) -c - msglvl = mcaup2 -c - nev0 = nev - np0 = np -c -c %-------------------------------------% -c | kplusp is the bound on the largest | -c | Lanczos factorization built. | -c | nconv is the current number of | -c | "converged" eigenvalues. | -c | iter is the counter on the current | -c | iteration step. | -c %-------------------------------------% -c - kplusp = nev + np - nconv = 0 - iter = 0 -c -c %---------------------------------% -c | Get machine dependent constant. | -c %---------------------------------% -c - eps23 = slamch('Epsilon-Machine') - eps23 = eps23**(2.0E+0 / 3.0E+0 ) -c -c %---------------------------------------% -c | Set flags for computing the first NEV | -c | steps of the Arnoldi factorization. | -c %---------------------------------------% -c - getv0 = .true. - update = .false. - ushift = .false. - cnorm = .false. -c - if (info .ne. 0) then -c -c %--------------------------------------------% -c | User provides the initial residual vector. | -c %--------------------------------------------% -c - initv = .true. - info = 0 - else - initv = .false. - end if - end if -c -c %---------------------------------------------% -c | Get a possibly random starting vector and | -c | force it into the range of the operator OP. | -c %---------------------------------------------% -c - 10 continue -c - if (getv0) then - call cgetv0 (ido, bmat, 1, initv, n, 1, v, ldv, resid, rnorm, - & ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (rnorm .eq. rzero) then -c -c %-----------------------------------------% -c | The initial vector is zero. Error exit. | -c %-----------------------------------------% -c - info = -9 - go to 1100 - end if - getv0 = .false. - ido = 0 - end if -c -c %-----------------------------------% -c | Back from reverse communication : | -c | continue with update step | -c %-----------------------------------% -c - if (update) go to 20 -c -c %-------------------------------------------% -c | Back from computing user specified shifts | -c %-------------------------------------------% -c - if (ushift) go to 50 -c -c %-------------------------------------% -c | Back from computing residual norm | -c | at the end of the current iteration | -c %-------------------------------------% -c - if (cnorm) go to 100 -c -c %----------------------------------------------------------% -c | Compute the first NEV steps of the Arnoldi factorization | -c %----------------------------------------------------------% -c - call cnaitr (ido, bmat, n, 0, nev, mode, resid, rnorm, v, ldv, - & h, ldh, ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then - np = info - mxiter = iter - info = -9999 - go to 1200 - end if -c -c %--------------------------------------------------------------% -c | | -c | M A I N ARNOLDI I T E R A T I O N L O O P | -c | Each iteration implicitly restarts the Arnoldi | -c | factorization in place. | -c | | -c %--------------------------------------------------------------% -c - 1000 continue -c - iter = iter + 1 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, iter, ndigit, - & '_naup2: **** Start of major iteration number ****') - end if -c -c %-----------------------------------------------------------% -c | Compute NP additional steps of the Arnoldi factorization. | -c | Adjust NP since NEV might have been updated by last call | -c | to the shift application routine cnapps. | -c %-----------------------------------------------------------% -c - np = kplusp - nev -c - if (msglvl .gt. 1) then - call ivout (logfil, 1, nev, ndigit, - & '_naup2: The length of the current Arnoldi factorization') - call ivout (logfil, 1, np, ndigit, - & '_naup2: Extend the Arnoldi factorization by') - end if -c -c %-----------------------------------------------------------% -c | Compute NP additional steps of the Arnoldi factorization. | -c %-----------------------------------------------------------% -c - ido = 0 - 20 continue - update = .true. -c - call cnaitr(ido, bmat, n, nev, np, mode, resid, rnorm, - & v , ldv , h, ldh, ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then - np = info - mxiter = iter - info = -9999 - go to 1200 - end if - update = .false. -c - if (msglvl .gt. 1) then - call svout (logfil, 1, rnorm, ndigit, - & '_naup2: Corresponding B-norm of the residual') - end if -c -c %--------------------------------------------------------% -c | Compute the eigenvalues and corresponding error bounds | -c | of the current upper Hessenberg matrix. | -c %--------------------------------------------------------% -c - call cneigh (rnorm, kplusp, h, ldh, ritz, bounds, - & q, ldq, workl, rwork, ierr) -c - if (ierr .ne. 0) then - info = -8 - go to 1200 - end if -c -c %---------------------------------------------------% -c | Select the wanted Ritz values and their bounds | -c | to be used in the convergence test. | -c | The wanted part of the spectrum and corresponding | -c | error bounds are in the last NEV loc. of RITZ, | -c | and BOUNDS respectively. | -c %---------------------------------------------------% -c - nev = nev0 - np = np0 -c -c %--------------------------------------------------% -c | Make a copy of Ritz values and the corresponding | -c | Ritz estimates obtained from cneigh. | -c %--------------------------------------------------% -c - call ccopy(kplusp,ritz,1,workl(kplusp**2+1),1) - call ccopy(kplusp,bounds,1,workl(kplusp**2+kplusp+1),1) -c -c %---------------------------------------------------% -c | Select the wanted Ritz values and their bounds | -c | to be used in the convergence test. | -c | The wanted part of the spectrum and corresponding | -c | bounds are in the last NEV loc. of RITZ | -c | BOUNDS respectively. | -c %---------------------------------------------------% -c - call cngets (ishift, which, nev, np, ritz, bounds) -c -c %------------------------------------------------------------% -c | Convergence test: currently we use the following criteria. | -c | The relative accuracy of a Ritz value is considered | -c | acceptable if: | -c | | -c | error_bounds(i) .le. tol*max(eps23, magnitude_of_ritz(i)). | -c | | -c %------------------------------------------------------------% -c - nconv = 0 -c - do 25 i = 1, nev - rtemp = max( eps23, slapy2( real (ritz(np+i)), - & aimag(ritz(np+i)) ) ) - if ( slapy2(real (bounds(np+i)),aimag(bounds(np+i))) - & .le. tol*rtemp ) then - nconv = nconv + 1 - end if - 25 continue -c - if (msglvl .gt. 2) then - kp(1) = nev - kp(2) = np - kp(3) = nconv - call ivout (logfil, 3, kp, ndigit, - & '_naup2: NEV, NP, NCONV are') - call cvout (logfil, kplusp, ritz, ndigit, - & '_naup2: The eigenvalues of H') - call cvout (logfil, kplusp, bounds, ndigit, - & '_naup2: Ritz estimates of the current NCV Ritz values') - end if -c -c %---------------------------------------------------------% -c | Count the number of unwanted Ritz values that have zero | -c | Ritz estimates. If any Ritz estimates are equal to zero | -c | then a leading block of H of order equal to at least | -c | the number of Ritz values with zero Ritz estimates has | -c | split off. None of these Ritz values may be removed by | -c | shifting. Decrease NP the number of shifts to apply. If | -c | no shifts may be applied, then prepare to exit | -c %---------------------------------------------------------% -c - nptemp = np - do 30 j=1, nptemp - if (bounds(j) .eq. zero) then - np = np - 1 - nev = nev + 1 - end if - 30 continue -c - if ( (nconv .ge. nev0) .or. - & (iter .gt. mxiter) .or. - & (np .eq. 0) ) then -c - if (msglvl .gt. 4) then - call cvout(logfil, kplusp, workl(kplusp**2+1), ndigit, - & '_naup2: Eigenvalues computed by _neigh:') - call cvout(logfil, kplusp, workl(kplusp**2+kplusp+1), - & ndigit, - & '_naup2: Ritz estimates computed by _neigh:') - end if -c -c %------------------------------------------------% -c | Prepare to exit. Put the converged Ritz values | -c | and corresponding bounds in RITZ(1:NCONV) and | -c | BOUNDS(1:NCONV) respectively. Then sort. Be | -c | careful when NCONV > NP | -c %------------------------------------------------% -c -c %------------------------------------------% -c | Use h( 3,1 ) as storage to communicate | -c | rnorm to cneupd if needed | -c %------------------------------------------% - - h(3,1) = cmplx(rnorm,rzero) -c -c %----------------------------------------------% -c | Sort Ritz values so that converged Ritz | -c | values appear within the first NEV locations | -c | of ritz and bounds, and the most desired one | -c | appears at the front. | -c %----------------------------------------------% -c - if (which .eq. 'LM') wprime = 'SM' - if (which .eq. 'SM') wprime = 'LM' - if (which .eq. 'LR') wprime = 'SR' - if (which .eq. 'SR') wprime = 'LR' - if (which .eq. 'LI') wprime = 'SI' - if (which .eq. 'SI') wprime = 'LI' -c - call csortc(wprime, .true., kplusp, ritz, bounds) -c -c %--------------------------------------------------% -c | Scale the Ritz estimate of each Ritz value | -c | by 1 / max(eps23, magnitude of the Ritz value). | -c %--------------------------------------------------% -c - do 35 j = 1, nev0 - rtemp = max( eps23, slapy2( real (ritz(j)), - & aimag(ritz(j)) ) ) - bounds(j) = bounds(j)/rtemp - 35 continue -c -c %---------------------------------------------------% -c | Sort the Ritz values according to the scaled Ritz | -c | estimates. This will push all the converged ones | -c | towards the front of ritz, bounds (in the case | -c | when NCONV < NEV.) | -c %---------------------------------------------------% -c - wprime = 'LM' - call csortc(wprime, .true., nev0, bounds, ritz) -c -c %----------------------------------------------% -c | Scale the Ritz estimate back to its original | -c | value. | -c %----------------------------------------------% -c - do 40 j = 1, nev0 - rtemp = max( eps23, slapy2( real (ritz(j)), - & aimag(ritz(j)) ) ) - bounds(j) = bounds(j)*rtemp - 40 continue -c -c %-----------------------------------------------% -c | Sort the converged Ritz values again so that | -c | the "threshold" value appears at the front of | -c | ritz and bound. | -c %-----------------------------------------------% -c - call csortc(which, .true., nconv, ritz, bounds) -c - if (msglvl .gt. 1) then - call cvout (logfil, kplusp, ritz, ndigit, - & '_naup2: Sorted eigenvalues') - call cvout (logfil, kplusp, bounds, ndigit, - & '_naup2: Sorted ritz estimates.') - end if -c -c %------------------------------------% -c | Max iterations have been exceeded. | -c %------------------------------------% -c - if (iter .gt. mxiter .and. nconv .lt. nev0) info = 1 -c -c %---------------------% -c | No shifts to apply. | -c %---------------------% -c - if (np .eq. 0 .and. nconv .lt. nev0) info = 2 -c - np = nconv - go to 1100 -c - else if ( (nconv .lt. nev0) .and. (ishift .eq. 1) ) then -c -c %-------------------------------------------------% -c | Do not have all the requested eigenvalues yet. | -c | To prevent possible stagnation, adjust the size | -c | of NEV. | -c %-------------------------------------------------% -c - nevbef = nev - nev = nev + min(nconv, np/2) - if (nev .eq. 1 .and. kplusp .ge. 6) then - nev = kplusp / 2 - else if (nev .eq. 1 .and. kplusp .gt. 3) then - nev = 2 - end if - np = kplusp - nev -c -c %---------------------------------------% -c | If the size of NEV was just increased | -c | resort the eigenvalues. | -c %---------------------------------------% -c - if (nevbef .lt. nev) - & call cngets (ishift, which, nev, np, ritz, bounds) -c - end if -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, nconv, ndigit, - & '_naup2: no. of "converged" Ritz values at this iter.') - if (msglvl .gt. 1) then - kp(1) = nev - kp(2) = np - call ivout (logfil, 2, kp, ndigit, - & '_naup2: NEV and NP are') - call cvout (logfil, nev, ritz(np+1), ndigit, - & '_naup2: "wanted" Ritz values ') - call cvout (logfil, nev, bounds(np+1), ndigit, - & '_naup2: Ritz estimates of the "wanted" values ') - end if - end if -c - if (ishift .eq. 0) then -c -c %-------------------------------------------------------% -c | User specified shifts: pop back out to get the shifts | -c | and return them in the first 2*NP locations of WORKL. | -c %-------------------------------------------------------% -c - ushift = .true. - ido = 3 - go to 9000 - end if - 50 continue - ushift = .false. -c - if ( ishift .ne. 1 ) then -c -c %----------------------------------% -c | Move the NP shifts from WORKL to | -c | RITZ, to free up WORKL | -c | for non-exact shift case. | -c %----------------------------------% -c - call ccopy (np, workl, 1, ritz, 1) - end if -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, np, ndigit, - & '_naup2: The number of shifts to apply ') - call cvout (logfil, np, ritz, ndigit, - & '_naup2: values of the shifts') - if ( ishift .eq. 1 ) - & call cvout (logfil, np, bounds, ndigit, - & '_naup2: Ritz estimates of the shifts') - end if -c -c %---------------------------------------------------------% -c | Apply the NP implicit shifts by QR bulge chasing. | -c | Each shift is applied to the whole upper Hessenberg | -c | matrix H. | -c | The first 2*N locations of WORKD are used as workspace. | -c %---------------------------------------------------------% -c - call cnapps (n, nev, np, ritz, v, ldv, - & h, ldh, resid, q, ldq, workl, workd) -c -c %---------------------------------------------% -c | Compute the B-norm of the updated residual. | -c | Keep B*RESID in WORKD(1:N) to be used in | -c | the first step of the next call to cnaitr. | -c %---------------------------------------------% -c - cnorm = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call ccopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*RESID | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call ccopy (n, resid, 1, workd, 1) - end if -c - 100 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(1:N) := B*RESID | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - cmpnorm = wcdotc (n, resid, 1, workd, 1) - rnorm = sqrt(slapy2(real (cmpnorm),aimag(cmpnorm))) - else if (bmat .eq. 'I') then - rnorm = scnrm2(n, resid, 1) - end if - cnorm = .false. -c - if (msglvl .gt. 2) then - call svout (logfil, 1, rnorm, ndigit, - & '_naup2: B-norm of residual for compressed factorization') - call cmout (logfil, nev, nev, h, ldh, ndigit, - & '_naup2: Compressed upper Hessenberg matrix H') - end if -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 1100 continue -c - mxiter = iter - nev = nconv -c - 1200 continue - ido = 99 -c -c %------------% -c | Error Exit | -c %------------% -c - call second (t1) - tcaup2 = t1 - t0 -c - 9000 continue -c -c %---------------% -c | End of cnaup2 | -c %---------------% -c - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaupd.f deleted file mode 100644 index 1d15534d46..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cnaupd.f +++ /dev/null @@ -1,664 +0,0 @@ -c\BeginDoc -c -c\Name: cnaupd -c -c\Description: -c Reverse communication interface for the Implicitly Restarted Arnoldi -c iteration. This is intended to be used to find a few eigenpairs of a -c complex linear operator OP with respect to a semi-inner product defined -c by a hermitian positive semi-definite real matrix B. B may be the identity -c matrix. NOTE: if both OP and B are real, then ssaupd or snaupd should -c be used. -c -c -c The computed approximate eigenvalues are called Ritz values and -c the corresponding approximate eigenvectors are called Ritz vectors. -c -c cnaupd is usually called iteratively to solve one of the -c following problems: -c -c Mode 1: A*x = lambda*x. -c ===> OP = A and B = I. -c -c Mode 2: A*x = lambda*M*x, M hermitian positive definite -c ===> OP = inv[M]*A and B = M. -c ===> (If M can be factored see remark 3 below) -c -c Mode 3: A*x = lambda*M*x, M hermitian semi-definite -c ===> OP = inv[A - sigma*M]*M and B = M. -c ===> shift-and-invert mode -c If OP*x = amu*x, then lambda = sigma + 1/amu. -c -c -c NOTE: The action of w <- inv[A - sigma*M]*v or w <- inv[M]*v -c should be accomplished either by a direct method -c using a sparse matrix factorization and solving -c -c [A - sigma*M]*w = v or M*w = v, -c -c or through an iterative method for solving these -c systems. If an iterative method is used, the -c convergence test must be more stringent than -c the accuracy requirements for the eigenvalue -c approximations. -c -c\Usage: -c call cnaupd -c ( IDO, BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, -c IPNTR, WORKD, WORKL, LWORKL, RWORK, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to cnaupd. IDO will be set internally to -c indicate the type of operation to be performed. Control is -c then given back to the calling routine which has the -c responsibility to carry out the requested operation and call -c cnaupd with the result. The operand is given in -c WORKD(IPNTR(1)), the result must be put in WORKD(IPNTR(2)). -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c In mode 3, the vector B * X is already -c available in WORKD(ipntr(3)). It does not -c need to be recomputed in forming OP * X. -c IDO = 2: compute Y = M * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 3: compute and return the shifts in the first -c NP locations of WORKL. -c IDO = 99: done -c ------------------------------------------------------------- -c After the initialization phase, when the routine is used in -c the "shift-and-invert" mode, the vector M * X is already -c available and does not need to be recomputed in forming OP*X. -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. -c BMAT = 'I' -> standard eigenvalue problem A*x = lambda*x -c BMAT = 'G' -> generalized eigenvalue problem A*x = lambda*M*x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c WHICH Character*2. (INPUT) -c 'LM' -> want the NEV eigenvalues of largest magnitude. -c 'SM' -> want the NEV eigenvalues of smallest magnitude. -c 'LR' -> want the NEV eigenvalues of largest real part. -c 'SR' -> want the NEV eigenvalues of smallest real part. -c 'LI' -> want the NEV eigenvalues of largest imaginary part. -c 'SI' -> want the NEV eigenvalues of smallest imaginary part. -c -c NEV Integer. (INPUT) -c Number of eigenvalues of OP to be computed. 0 < NEV < N-1. -c -c TOL Real scalar. (INPUT) -c Stopping criteria: the relative accuracy of the Ritz value -c is considered acceptable if BOUNDS(I) .LE. TOL*ABS(RITZ(I)) -c where ABS(RITZ(I)) is the magnitude when RITZ(I) is complex. -c DEFAULT = slamch('EPS') (machine precision as computed -c by the LAPACK auxiliary subroutine slamch). -c -c RESID Complex array of length N. (INPUT/OUTPUT) -c On INPUT: -c If INFO .EQ. 0, a random initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c On OUTPUT: -c RESID contains the final residual vector. -c -c NCV Integer. (INPUT) -c Number of columns of the matrix V. NCV must satisfy the two -c inequalities 1 <= NCV-NEV and NCV <= N. -c This will indicate how many Arnoldi vectors are generated -c at each iteration. After the startup phase in which NEV -c Arnoldi vectors are generated, the algorithm generates -c approximately NCV-NEV Arnoldi vectors at each subsequent update -c iteration. Most of the cost in generating each Arnoldi vector is -c in the matrix-vector operation OP*x. (See remark 4 below.) -c -c V Complex array N by NCV. (OUTPUT) -c Contains the final set of Arnoldi basis vectors. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling program. -c -c IPARAM Integer array of length 11. (INPUT/OUTPUT) -c IPARAM(1) = ISHIFT: method for selecting the implicit shifts. -c The shifts selected at each iteration are used to filter out -c the components of the unwanted eigenvector. -c ------------------------------------------------------------- -c ISHIFT = 0: the shifts are to be provided by the user via -c reverse communication. The NCV eigenvalues of -c the Hessenberg matrix H are returned in the part -c of WORKL array corresponding to RITZ. -c ISHIFT = 1: exact shifts with respect to the current -c Hessenberg matrix H. This is equivalent to -c restarting the iteration from the beginning -c after updating the starting vector with a linear -c combination of Ritz vectors associated with the -c "wanted" eigenvalues. -c ISHIFT = 2: other choice of internal shift to be defined. -c ------------------------------------------------------------- -c -c IPARAM(2) = No longer referenced -c -c IPARAM(3) = MXITER -c On INPUT: maximum number of Arnoldi update iterations allowed. -c On OUTPUT: actual number of Arnoldi update iterations taken. -c -c IPARAM(4) = NB: blocksize to be used in the recurrence. -c The code currently works only for NB = 1. -c -c IPARAM(5) = NCONV: number of "converged" Ritz values. -c This represents the number of Ritz values that satisfy -c the convergence criterion. -c -c IPARAM(6) = IUPD -c No longer referenced. Implicit restarting is ALWAYS used. -c -c IPARAM(7) = MODE -c On INPUT determines what type of eigenproblem is being solved. -c Must be 1,2,3; See under \Description of cnaupd for the -c four modes available. -c -c IPARAM(8) = NP -c When ido = 3 and the user provides shifts through reverse -c communication (IPARAM(1)=0), _naupd returns NP, the number -c of shifts the user is to provide. 0 < NP < NCV-NEV. -c -c IPARAM(9) = NUMOP, IPARAM(10) = NUMOPB, IPARAM(11) = NUMREO, -c OUTPUT: NUMOP = total number of OP*x operations, -c NUMOPB = total number of B*x operations if BMAT='G', -c NUMREO = total number of steps of re-orthogonalization. -c -c IPNTR Integer array of length 14. (OUTPUT) -c Pointer to mark the starting locations in the WORKD and WORKL -c arrays for matrices/vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X in WORKD. -c IPNTR(2): pointer to the current result vector Y in WORKD. -c IPNTR(3): pointer to the vector B * X in WORKD when used in -c the shift-and-invert mode. -c IPNTR(4): pointer to the next available location in WORKL -c that is untouched by the program. -c IPNTR(5): pointer to the NCV by NCV upper Hessenberg -c matrix H in WORKL. -c IPNTR(6): pointer to the ritz value array RITZ -c IPNTR(7): pointer to the (projected) ritz vector array Q -c IPNTR(8): pointer to the error BOUNDS array in WORKL. -c IPNTR(14): pointer to the NP shifts in WORKL. See Remark 5 below. -c -c Note: IPNTR(9:13) is only referenced by cneupd. See Remark 2 below. -c -c IPNTR(9): pointer to the NCV RITZ values of the -c original system. -c IPNTR(10): Not Used -c IPNTR(11): pointer to the NCV corresponding error bounds. -c IPNTR(12): pointer to the NCV by NCV upper triangular -c Schur matrix for H. -c IPNTR(13): pointer to the NCV by NCV matrix of eigenvectors -c of the upper Hessenberg matrix H. Only referenced by -c cneupd if RVEC = .TRUE. See Remark 2 below. -c -c ------------------------------------------------------------- -c -c WORKD Complex work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration !!!!!!!!!! -c See Data Distribution Note below. -c -c WORKL Complex work array of length LWORKL. (OUTPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. See Data Distribution Note below. -c -c LWORKL Integer. (INPUT) -c LWORKL must be at least 3*NCV**2 + 5*NCV. -c -c RWORK Real work array of length NCV (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal exit. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. IPARAM(5) -c returns the number of wanted converged Ritz values. -c = 2: No longer an informational error. Deprecated starting -c with release 2 of ARPACK. -c = 3: No shifts could be applied during a cycle of the -c Implicitly restarted Arnoldi iteration. One possibility -c is to increase the size of NCV relative to NEV. -c See remark 4 below. -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV-NEV >= 1 and less than or equal to N. -c = -4: The maximum number of Arnoldi update iteration -c must be greater than zero. -c = -5: WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI' -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work array is not sufficient. -c = -8: Error return from LAPACK eigenvalue calculation; -c = -9: Starting vector is zero. -c = -10: IPARAM(7) must be 1,2,3. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatible. -c = -12: IPARAM(1) must be equal to 0 or 1. -c = -9999: Could not build an Arnoldi factorization. -c User input error highly likely. Please -c check actual array dimensions and layout. -c IPARAM(5) returns the size of the current Arnoldi -c factorization. -c -c\Remarks -c 1. The computed Ritz values are approximate eigenvalues of OP. The -c selection of WHICH should be made with this in mind when using -c Mode = 3. When operating in Mode = 3 setting WHICH = 'LM' will -c compute the NEV eigenvalues of the original problem that are -c closest to the shift SIGMA . After convergence, approximate eigenvalues -c of the original problem may be obtained with the ARPACK subroutine cneupd. -c -c 2. If a basis for the invariant subspace corresponding to the converged Ritz -c values is needed, the user must call cneupd immediately following -c completion of cnaupd. This is new starting with release 2 of ARPACK. -c -c 3. If M can be factored into a Cholesky factorization M = LL` -c then Mode = 2 should not be selected. Instead one should use -c Mode = 1 with OP = inv(L)*A*inv(L`). Appropriate triangular -c linear systems should be solved with L and L` rather -c than computing inverses. After convergence, an approximate -c eigenvector z of the original problem is recovered by solving -c L`z = x where x is a Ritz vector of OP. -c -c 4. At present there is no a-priori analysis to guide the selection -c of NCV relative to NEV. The only formal requirement is that NCV > NEV + 1. -c However, it is recommended that NCV .ge. 2*NEV. If many problems of -c the same type are to be solved, one should experiment with increasing -c NCV while keeping NEV fixed for a given test problem. This will -c usually decrease the required number of OP*x operations but it -c also increases the work and storage required to maintain the orthogonal -c basis vectors. The optimal "cross-over" with respect to CPU time -c is problem dependent and must be determined empirically. -c See Chapter 8 of Reference 2 for further information. -c -c 5. When IPARAM(1) = 0, and IDO = 3, the user needs to provide the -c NP = IPARAM(8) complex shifts in locations -c WORKL(IPNTR(14)), WORKL(IPNTR(14)+1), ... , WORKL(IPNTR(14)+NP). -c Eigenvalues of the current upper Hessenberg matrix are located in -c WORKL(IPNTR(6)) through WORKL(IPNTR(6)+NCV-1). They are ordered -c according to the order defined by WHICH. The associated Ritz estimates -c are located in WORKL(IPNTR(8)), WORKL(IPNTR(8)+1), ... , -c WORKL(IPNTR(8)+NCV-1). -c -c----------------------------------------------------------------------- -c -c\Data Distribution Note: -c -c Fortran-D syntax: -c ================ -c Complex resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) -c decompose d1(n), d2(n,ncv) -c align resid(i) with d1(i) -c align v(i,j) with d2(i,j) -c align workd(i) with d1(i) range (1:n) -c align workd(i) with d1(i-n) range (n+1:2*n) -c align workd(i) with d1(i-2*n) range (2*n+1:3*n) -c distribute d1(block), d2(block,:) -c replicated workl(lworkl) -c -c Cray MPP syntax: -c =============== -c Complex resid(n), v(ldv,ncv), workd(n,3), workl(lworkl) -c shared resid(block), v(block,:), workd(block,:) -c replicated workl(lworkl) -c -c CM2/CM5 syntax: -c ============== -c -c----------------------------------------------------------------------- -c -c include 'ex-nonsym.doc' -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett & Y. Saad, "_Complex_ Shift and Invert Strategies for -c Real Matrices", Linear Algebra and its Applications, vol 88/89, -c pp 575-595, (1987). -c -c\Routines called: -c cnaup2 ARPACK routine that implements the Implicitly Restarted -c Arnoldi Iteration. -c cstatn ARPACK routine that initializes the timing variables. -c ivout ARPACK utility routine that prints integers. -c cvout ARPACK utility routine that prints vectors. -c second ARPACK utility routine for timing. -c slamch LAPACK routine that determines machine constants. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: naupd.F SID: 2.9 DATE OF SID: 07/21/02 RELEASE: 2 -c -c\Remarks -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine cnaupd - & ( ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, - & ipntr, workd, workl, lworkl, rwork, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ldv, lworkl, n, ncv, nev - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(14) - Complex - & resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) - Real - & rwork(ncv) -c -c %------------% -c | Parameters | -c %------------% -c - Complex - & one, zero - parameter (one = (1.0E+0, 0.0E+0), zero = (0.0E+0, 0.0E+0)) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer bounds, ierr, ih, iq, ishift, iupd, iw, - & ldh, ldq, levec, mode, msglvl, mxiter, nb, - & nev0, next, np, ritz, j - save bounds, ih, iq, ishift, iupd, iw, - & ldh, ldq, levec, mode, msglvl, mxiter, nb, - & nev0, next, np, ritz -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external cnaup2, cvout, ivout, second, cstatn -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slamch - external slamch -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call cstatn - call second (t0) - msglvl = mcaupd -c -c %----------------% -c | Error checking | -c %----------------% -c - ierr = 0 - ishift = iparam(1) -c levec = iparam(2) - mxiter = iparam(3) -c nb = iparam(4) - nb = 1 -c -c %--------------------------------------------% -c | Revision 2 performs only implicit restart. | -c %--------------------------------------------% -c - iupd = 1 - mode = iparam(7) -c - if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev .or. ncv .gt. n) then - ierr = -3 - else if (mxiter .le. 0) then - ierr = -4 - else if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LR' .and. - & which .ne. 'SR' .and. - & which .ne. 'LI' .and. - & which .ne. 'SI') then - ierr = -5 - else if (bmat .ne. 'I' .and. bmat .ne. 'G') then - ierr = -6 - else if (lworkl .lt. 3*ncv**2 + 5*ncv) then - ierr = -7 - else if (mode .lt. 1 .or. mode .gt. 3) then - ierr = -10 - else if (mode .eq. 1 .and. bmat .eq. 'G') then - ierr = -11 - end if -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - ido = 99 - go to 9000 - end if -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - if (nb .le. 0) nb = 1 - if (tol .le. 0.0E+0 ) tol = slamch('EpsMach') - if (ishift .ne. 0 .and. - & ishift .ne. 1 .and. - & ishift .ne. 2) ishift = 1 -c -c %----------------------------------------------% -c | NP is the number of additional steps to | -c | extend the length NEV Lanczos factorization. | -c | NEV0 is the local variable designating the | -c | size of the invariant subspace desired. | -c %----------------------------------------------% -c - np = ncv - nev - nev0 = nev -c -c %-----------------------------% -c | Zero out internal workspace | -c %-----------------------------% -c - do 10 j = 1, 3*ncv**2 + 5*ncv - workl(j) = zero - 10 continue -c -c %-------------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:ncv*ncv) := generated Hessenberg matrix | -c | workl(ncv*ncv+1:ncv*ncv+ncv) := the ritz values | -c | workl(ncv*ncv+ncv+1:ncv*ncv+2*ncv) := error bounds | -c | workl(ncv*ncv+2*ncv+1:2*ncv*ncv+2*ncv) := rotation matrix Q | -c | workl(2*ncv*ncv+2*ncv+1:3*ncv*ncv+5*ncv) := workspace | -c | The final workspace is needed by subroutine cneigh called | -c | by cnaup2. Subroutine cneigh calls LAPACK routines for | -c | calculating eigenvalues and the last row of the eigenvector | -c | matrix. | -c %-------------------------------------------------------------% -c - ldh = ncv - ldq = ncv - ih = 1 - ritz = ih + ldh*ncv - bounds = ritz + ncv - iq = bounds + ncv - iw = iq + ldq*ncv - next = iw + ncv**2 + 3*ncv -c - ipntr(4) = next - ipntr(5) = ih - ipntr(6) = ritz - ipntr(7) = iq - ipntr(8) = bounds - ipntr(14) = iw - end if -c -c %-------------------------------------------------------% -c | Carry out the Implicitly restarted Arnoldi Iteration. | -c %-------------------------------------------------------% -c - call cnaup2 - & ( ido, bmat, n, which, nev0, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, workl(ih), ldh, workl(ritz), - & workl(bounds), workl(iq), ldq, workl(iw), - & ipntr, workd, rwork, info ) -c -c %--------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP. | -c %--------------------------------------------------% -c - if (ido .eq. 3) iparam(8) = np - if (ido .ne. 99) go to 9000 -c - iparam(3) = mxiter - iparam(5) = np - iparam(9) = nopx - iparam(10) = nbx - iparam(11) = nrorth -c -c %------------------------------------% -c | Exit if there was an informational | -c | error within cnaup2. | -c %------------------------------------% -c - if (info .lt. 0) go to 9000 - if (info .eq. 2) info = 3 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, mxiter, ndigit, - & '_naupd: Number of update iterations taken') - call ivout (logfil, 1, np, ndigit, - & '_naupd: Number of wanted "converged" Ritz values') - call cvout (logfil, np, workl(ritz), ndigit, - & '_naupd: The final Ritz values') - call cvout (logfil, np, workl(bounds), ndigit, - & '_naupd: Associated Ritz estimates') - end if -c - call second (t1) - tcaupd = t1 - t0 -c - if (msglvl .gt. 0) then -c -c %--------------------------------------------------------% -c | Version Number & Version Date are defined in version.h | -c %--------------------------------------------------------% -c - write (6,1000) - write (6,1100) mxiter, nopx, nbx, nrorth, nitref, nrstrt, - & tmvopx, tmvbx, tcaupd, tcaup2, tcaitr, titref, - & tgetv0, tceigh, tcgets, tcapps, tcconv, trvec - 1000 format (//, - & 5x, '=============================================',/ - & 5x, '= Complex implicit Arnoldi update code =',/ - & 5x, '= Version Number: ', ' 2.3', 21x, ' =',/ - & 5x, '= Version Date: ', ' 07/31/96', 16x, ' =',/ - & 5x, '=============================================',/ - & 5x, '= Summary of timing statistics =',/ - & 5x, '=============================================',//) - 1100 format ( - & 5x, 'Total number update iterations = ', i5,/ - & 5x, 'Total number of OP*x operations = ', i5,/ - & 5x, 'Total number of B*x operations = ', i5,/ - & 5x, 'Total number of reorthogonalization steps = ', i5,/ - & 5x, 'Total number of iterative refinement steps = ', i5,/ - & 5x, 'Total number of restart steps = ', i5,/ - & 5x, 'Total time in user OP*x operation = ', f12.6,/ - & 5x, 'Total time in user B*x operation = ', f12.6,/ - & 5x, 'Total time in Arnoldi update routine = ', f12.6,/ - & 5x, 'Total time in naup2 routine = ', f12.6,/ - & 5x, 'Total time in basic Arnoldi iteration loop = ', f12.6,/ - & 5x, 'Total time in reorthogonalization phase = ', f12.6,/ - & 5x, 'Total time in (re)start vector generation = ', f12.6,/ - & 5x, 'Total time in Hessenberg eig. subproblem = ', f12.6,/ - & 5x, 'Total time in getting the shifts = ', f12.6,/ - & 5x, 'Total time in applying the shifts = ', f12.6,/ - & 5x, 'Total time in convergence testing = ', f12.6,/ - & 5x, 'Total time in computing final Ritz vectors = ', f12.6/) - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of cnaupd | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cneigh.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cneigh.f deleted file mode 100644 index da476b675e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cneigh.f +++ /dev/null @@ -1,257 +0,0 @@ -c\BeginDoc -c -c\Name: cneigh -c -c\Description: -c Compute the eigenvalues of the current upper Hessenberg matrix -c and the corresponding Ritz estimates given the current residual norm. -c -c\Usage: -c call cneigh -c ( RNORM, N, H, LDH, RITZ, BOUNDS, Q, LDQ, WORKL, RWORK, IERR ) -c -c\Arguments -c RNORM Real scalar. (INPUT) -c Residual norm corresponding to the current upper Hessenberg -c matrix H. -c -c N Integer. (INPUT) -c Size of the matrix H. -c -c H Complex N by N array. (INPUT) -c H contains the current upper Hessenberg matrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZ Complex array of length N. (OUTPUT) -c On output, RITZ(1:N) contains the eigenvalues of H. -c -c BOUNDS Complex array of length N. (OUTPUT) -c On output, BOUNDS contains the Ritz estimates associated with -c the eigenvalues held in RITZ. This is equal to RNORM -c times the last components of the eigenvectors corresponding -c to the eigenvalues in RITZ. -c -c Q Complex N by N array. (WORKSPACE) -c Workspace needed to store the eigenvectors of H. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Complex work array of length N**2 + 3*N. (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. This is needed to keep the full Schur form -c of H and also in the calculation of the eigenvectors of H. -c -c RWORK Real work array of length N (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c IERR Integer. (OUTPUT) -c Error exit flag from clahqr or ctrevc. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c cmout ARPACK utility routine that prints matrices -c cvout ARPACK utility routine that prints vectors. -c svout ARPACK utility routine that prints vectors. -c clacpy LAPACK matrix copy routine. -c clahqr LAPACK routine to compute the Schur form of an -c upper Hessenberg matrix. -c claset LAPACK matrix initialization routine. -c ctrevc LAPACK routine to compute the eigenvectors of a matrix -c in upper triangular form -c ccopy Level 1 BLAS that copies one vector to another. -c csscal Level 1 BLAS that scales a complex vector by a real number. -c scnrm2 Level 1 BLAS that computes the norm of a vector. -c -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: neigh.F SID: 2.2 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine cneigh (rnorm, n, h, ldh, ritz, bounds, - & q, ldq, workl, rwork, ierr) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer ierr, n, ldh, ldq - Real - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Complex - & bounds(n), h(ldh,n), q(ldq,n), ritz(n), - & workl(n*(n+3)) - Real - & rwork(n) -c -c %------------% -c | Parameters | -c %------------% -c - Complex - & one, zero - Real - & rone - parameter (one = (1.0E+0, 0.0E+0), zero = (0.0E+0, 0.0E+0), - & rone = 1.0E+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - logical select(1) - integer j, msglvl - Complex - & vl(1) - Real - & temp -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external clacpy, clahqr, ctrevc, ccopy, - & csscal, cmout, cvout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & scnrm2 - external scnrm2 -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mceigh -c - if (msglvl .gt. 2) then - call cmout (logfil, n, n, h, ldh, ndigit, - & '_neigh: Entering upper Hessenberg matrix H ') - end if -c -c %----------------------------------------------------------% -c | 1. Compute the eigenvalues, the last components of the | -c | corresponding Schur vectors and the full Schur form T | -c | of the current upper Hessenberg matrix H. | -c | clahqr returns the full Schur form of H | -c | in WORKL(1:N**2), and the Schur vectors in q. | -c %----------------------------------------------------------% -c - call clacpy ('All', n, n, h, ldh, workl, n) - call claset ('All', n, n, zero, one, q, ldq) - call clahqr (.true., .true., n, 1, n, workl, ldh, ritz, - & 1, n, q, ldq, ierr) - if (ierr .ne. 0) go to 9000 -c - call ccopy (n, q(n-1,1), ldq, bounds, 1) - if (msglvl .gt. 1) then - call cvout (logfil, n, bounds, ndigit, - & '_neigh: last row of the Schur matrix for H') - end if -c -c %----------------------------------------------------------% -c | 2. Compute the eigenvectors of the full Schur form T and | -c | apply the Schur vectors to get the corresponding | -c | eigenvectors. | -c %----------------------------------------------------------% -c - call ctrevc ('Right', 'Back', select, n, workl, n, vl, n, q, - & ldq, n, n, workl(n*n+1), rwork, ierr) -c - if (ierr .ne. 0) go to 9000 -c -c %------------------------------------------------% -c | Scale the returning eigenvectors so that their | -c | Euclidean norms are all one. LAPACK subroutine | -c | ctrevc returns each eigenvector normalized so | -c | that the element of largest magnitude has | -c | magnitude 1; here the magnitude of a complex | -c | number (x,y) is taken to be |x| + |y|. | -c %------------------------------------------------% -c - do 10 j=1, n - temp = scnrm2( n, q(1,j), 1 ) - call csscal ( n, rone / temp, q(1,j), 1 ) - 10 continue -c - if (msglvl .gt. 1) then - call ccopy(n, q(n,1), ldq, workl, 1) - call cvout (logfil, n, workl, ndigit, - & '_neigh: Last row of the eigenvector matrix for H') - end if -c -c %----------------------------% -c | Compute the Ritz estimates | -c %----------------------------% -c - call ccopy(n, q(n,1), n, bounds, 1) - call csscal(n, rnorm, bounds, 1) -c - if (msglvl .gt. 2) then - call cvout (logfil, n, ritz, ndigit, - & '_neigh: The eigenvalues of H') - call cvout (logfil, n, bounds, ndigit, - & '_neigh: Ritz estimates for the eigenvalues of H') - end if -c - call second(t1) - tceigh = tceigh + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of cneigh | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cneupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cneupd.f deleted file mode 100644 index aacd45925a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cneupd.f +++ /dev/null @@ -1,876 +0,0 @@ -c\BeginDoc -c -c\Name: cneupd -c -c\Description: -c This subroutine returns the converged approximations to eigenvalues -c of A*z = lambda*B*z and (optionally): -c -c (1) The corresponding approximate eigenvectors; -c -c (2) An orthonormal basis for the associated approximate -c invariant subspace; -c -c (3) Both. -c -c There is negligible additional cost to obtain eigenvectors. An orthonormal -c basis is always computed. There is an additional storage cost of n*nev -c if both are requested (in this case a separate array Z must be supplied). -c -c The approximate eigenvalues and eigenvectors of A*z = lambda*B*z -c are derived from approximate eigenvalues and eigenvectors of -c of the linear operator OP prescribed by the MODE selection in the -c call to CNAUPD. CNAUPD must be called before this routine is called. -c These approximate eigenvalues and vectors are commonly called Ritz -c values and Ritz vectors respectively. They are referred to as such -c in the comments that follow. The computed orthonormal basis for the -c invariant subspace corresponding to these Ritz values is referred to as a -c Schur basis. -c -c The definition of OP as well as other terms and the relation of computed -c Ritz values and vectors of OP with respect to the given problem -c A*z = lambda*B*z may be found in the header of CNAUPD. For a brief -c description, see definitions of IPARAM(7), MODE and WHICH in the -c documentation of CNAUPD. -c -c\Usage: -c call cneupd -c ( RVEC, HOWMNY, SELECT, D, Z, LDZ, SIGMA, WORKEV, BMAT, -c N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, WORKD, -c WORKL, LWORKL, RWORK, INFO ) -c -c\Arguments: -c RVEC LOGICAL (INPUT) -c Specifies whether a basis for the invariant subspace corresponding -c to the converged Ritz value approximations for the eigenproblem -c A*z = lambda*B*z is computed. -c -c RVEC = .FALSE. Compute Ritz values only. -c -c RVEC = .TRUE. Compute Ritz vectors or Schur vectors. -c See Remarks below. -c -c HOWMNY Character*1 (INPUT) -c Specifies the form of the basis for the invariant subspace -c corresponding to the converged Ritz values that is to be computed. -c -c = 'A': Compute NEV Ritz vectors; -c = 'P': Compute NEV Schur vectors; -c = 'S': compute some of the Ritz vectors, specified -c by the logical array SELECT. -c -c SELECT Logical array of dimension NCV. (INPUT) -c If HOWMNY = 'S', SELECT specifies the Ritz vectors to be -c computed. To select the Ritz vector corresponding to a -c Ritz value D(j), SELECT(j) must be set to .TRUE.. -c If HOWMNY = 'A' or 'P', SELECT need not be initialized -c but it is used as internal workspace. -c -c D Complex array of dimension NEV+1. (OUTPUT) -c On exit, D contains the Ritz approximations -c to the eigenvalues lambda for A*z = lambda*B*z. -c -c Z Complex N by NEV array (OUTPUT) -c On exit, if RVEC = .TRUE. and HOWMNY = 'A', then the columns of -c Z represents approximate eigenvectors (Ritz vectors) corresponding -c to the NCONV=IPARAM(5) Ritz values for eigensystem -c A*z = lambda*B*z. -c -c If RVEC = .FALSE. or HOWMNY = 'P', then Z is NOT REFERENCED. -c -c NOTE: If if RVEC = .TRUE. and a Schur basis is not required, -c the array Z may be set equal to first NEV+1 columns of the Arnoldi -c basis array V computed by CNAUPD. In this case the Arnoldi basis -c will be destroyed and overwritten with the eigenvector basis. -c -c LDZ Integer. (INPUT) -c The leading dimension of the array Z. If Ritz vectors are -c desired, then LDZ .ge. max( 1, N ) is required. -c In any case, LDZ .ge. 1 is required. -c -c SIGMA Complex (INPUT) -c If IPARAM(7) = 3 then SIGMA represents the shift. -c Not referenced if IPARAM(7) = 1 or 2. -c -c WORKEV Complex work array of dimension 2*NCV. (WORKSPACE) -c -c **** The remaining arguments MUST be the same as for the **** -c **** call to CNAUPD that was just completed. **** -c -c NOTE: The remaining arguments -c -c BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, -c WORKD, WORKL, LWORKL, RWORK, INFO -c -c must be passed directly to CNEUPD following the last call -c to CNAUPD. These arguments MUST NOT BE MODIFIED between -c the the last call to CNAUPD and the call to CNEUPD. -c -c Three of these parameters (V, WORKL and INFO) are also output parameters: -c -c V Complex N by NCV array. (INPUT/OUTPUT) -c -c Upon INPUT: the NCV columns of V contain the Arnoldi basis -c vectors for OP as constructed by CNAUPD . -c -c Upon OUTPUT: If RVEC = .TRUE. the first NCONV=IPARAM(5) columns -c contain approximate Schur vectors that span the -c desired invariant subspace. -c -c NOTE: If the array Z has been set equal to first NEV+1 columns -c of the array V and RVEC=.TRUE. and HOWMNY= 'A', then the -c Arnoldi basis held by V has been overwritten by the desired -c Ritz vectors. If a separate array Z has been passed then -c the first NCONV=IPARAM(5) columns of V will contain approximate -c Schur vectors that span the desired invariant subspace. -c -c WORKL Real work array of length LWORKL. (OUTPUT/WORKSPACE) -c WORKL(1:ncv*ncv+2*ncv) contains information obtained in -c cnaupd. They are not changed by cneupd. -c WORKL(ncv*ncv+2*ncv+1:3*ncv*ncv+4*ncv) holds the -c untransformed Ritz values, the untransformed error estimates of -c the Ritz values, the upper triangular matrix for H, and the -c associated matrix representation of the invariant subspace for H. -c -c Note: IPNTR(9:13) contains the pointer into WORKL for addresses -c of the above information computed by cneupd. -c ------------------------------------------------------------- -c IPNTR(9): pointer to the NCV RITZ values of the -c original system. -c IPNTR(10): Not used -c IPNTR(11): pointer to the NCV corresponding error estimates. -c IPNTR(12): pointer to the NCV by NCV upper triangular -c Schur matrix for H. -c IPNTR(13): pointer to the NCV by NCV matrix of eigenvectors -c of the upper Hessenberg matrix H. Only referenced by -c cneupd if RVEC = .TRUE. See Remark 2 below. -c ------------------------------------------------------------- -c -c INFO Integer. (OUTPUT) -c Error flag on output. -c = 0: Normal exit. -c -c = 1: The Schur form computed by LAPACK routine csheqr -c could not be reordered by LAPACK routine ctrsen. -c Re-enter subroutine cneupd with IPARAM(5)=NCV and -c increase the size of the array D to have -c dimension at least dimension NCV and allocate at least NCV -c columns for Z. NOTE: Not necessary if Z and V share -c the same space. Please notify the authors if this error -c occurs. -c -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV-NEV >= 1 and less than or equal to N. -c = -5: WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI' -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work WORKL array is not sufficient. -c = -8: Error return from LAPACK eigenvalue calculation. -c This should never happened. -c = -9: Error return from calculation of eigenvectors. -c Informational error from LAPACK routine ctrevc. -c = -10: IPARAM(7) must be 1,2,3 -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatible. -c = -12: HOWMNY = 'S' not yet implemented -c = -13: HOWMNY must be one of 'A' or 'P' if RVEC = .true. -c = -14: CNAUPD did not find any eigenvalues to sufficient -c accuracy. -c = -15: CNEUPD got a different count of the number of converged -c Ritz values than CNAUPD got. This indicates the user -c probably made an error in passing data from CNAUPD to -c CNEUPD or that the data was modified before entering -c CNEUPD -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B. Nour-Omid, B. N. Parlett, T. Ericsson and P. S. Jensen, -c "How to Implement the Spectral Transformation", Math Comp., -c Vol. 48, No. 178, April, 1987 pp. 664-673. -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c cmout ARPACK utility routine that prints matrices -c cvout ARPACK utility routine that prints vectors. -c cgeqr2 LAPACK routine that computes the QR factorization of -c a matrix. -c clacpy LAPACK matrix copy routine. -c clahqr LAPACK routine that computes the Schur form of a -c upper Hessenberg matrix. -c claset LAPACK matrix initialization routine. -c ctrevc LAPACK routine to compute the eigenvectors of a matrix -c in upper triangular form. -c ctrsen LAPACK routine that re-orders the Schur form. -c cunm2r LAPACK routine that applies an orthogonal matrix in -c factored form. -c slamch LAPACK routine that determines machine constants. -c ctrmm Level 3 BLAS matrix times an upper triangular matrix. -c cgeru Level 2 BLAS rank one update to a matrix. -c ccopy Level 1 BLAS that copies one vector to another . -c cscal Level 1 BLAS that scales a vector. -c csscal Level 1 BLAS that scales a complex vector by a real number. -c scnrm2 Level 1 BLAS that computes the norm of a complex vector. -c -c\Remarks -c -c 1. Currently only HOWMNY = 'A' and 'P' are implemented. -c -c 2. Schur vectors are an orthogonal representation for the basis of -c Ritz vectors. Thus, their numerical properties are often superior. -c If RVEC = .true. then the relationship -c A * V(:,1:IPARAM(5)) = V(:,1:IPARAM(5)) * T, and -c transpose( V(:,1:IPARAM(5)) ) * V(:,1:IPARAM(5)) = I -c are approximately satisfied. -c Here T is the leading submatrix of order IPARAM(5) of the -c upper triangular matrix stored workl(ipntr(12)). -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Chao Yang Houston, Texas -c Dept. of Computational & -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: neupd.F SID: 2.8 DATE OF SID: 07/21/02 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- - subroutine cneupd(rvec , howmny, select, d , - & z , ldz , sigma , workev, - & bmat , n , which , nev , - & tol , resid , ncv , v , - & ldv , iparam, ipntr , workd , - & workl, lworkl, rwork , info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat, howmny, which*2 - logical rvec - integer info, ldz, ldv, lworkl, n, ncv, nev - Complex - & sigma - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(14) - logical select(ncv) - Real - & rwork(ncv) - Complex - & d(nev) , resid(n) , v(ldv,ncv), - & z(ldz, nev), - & workd(3*n) , workl(lworkl), workev(2*ncv) -c -c %------------% -c | Parameters | -c %------------% -c - Complex - & one, zero - parameter (one = (1.0E+0, 0.0E+0), zero = (0.0E+0, 0.0E+0)) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character type*6 - integer bounds, ierr , ih , ihbds, iheig , nconv , - & invsub, iuptri, iwev , j , ldh , ldq , - & mode , msglvl, ritz , wr , k , irz , - & ibd , outncv, iq , np , numcnv, jj , - & ishift, nconv2 - Complex - & rnorm, temp, vl(1) - Real - & conds, sep, rtemp, eps23 - logical reord -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external ccopy , cgeru, cgeqr2, clacpy, cmout, - & cunm2r, ctrmm, cvout, ivout, - & clahqr -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & scnrm2, slamch, slapy2 - external scnrm2, slamch, slapy2 -c - Complex - & wcdotc - external wcdotc -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - msglvl = mceupd - mode = iparam(7) - nconv = iparam(5) - info = 0 -c -c -c %---------------------------------% -c | Get machine dependent constant. | -c %---------------------------------% -c - eps23 = slamch('Epsilon-Machine') - eps23 = eps23**(2.0E+0 / 3.0E+0) -c -c %-------------------------------% -c | Quick return | -c | Check for incompatible input | -c %-------------------------------% -c - ierr = 0 -c - if (nconv .le. 0) then - ierr = -14 - else if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev .or. ncv .gt. n) then - ierr = -3 - else if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LR' .and. - & which .ne. 'SR' .and. - & which .ne. 'LI' .and. - & which .ne. 'SI') then - ierr = -5 - else if (bmat .ne. 'I' .and. bmat .ne. 'G') then - ierr = -6 - else if (lworkl .lt. 3*ncv**2 + 4*ncv) then - ierr = -7 - else if ( (howmny .ne. 'A' .and. - & howmny .ne. 'P' .and. - & howmny .ne. 'S') .and. rvec ) then - ierr = -13 - else if (howmny .eq. 'S' ) then - ierr = -12 - end if -c - if (mode .eq. 1 .or. mode .eq. 2) then - type = 'REGULR' - else if (mode .eq. 3 ) then - type = 'SHIFTI' - else - ierr = -10 - end if - if (mode .eq. 1 .and. bmat .eq. 'G') ierr = -11 -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, WORKEV, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:ncv*ncv) := generated Hessenberg matrix | -c | workl(ncv*ncv+1:ncv*ncv+ncv) := ritz values | -c | workl(ncv*ncv+ncv+1:ncv*ncv+2*ncv) := error bounds | -c %--------------------------------------------------------% -c -c %-----------------------------------------------------------% -c | The following is used and set by CNEUPD. | -c | workl(ncv*ncv+2*ncv+1:ncv*ncv+3*ncv) := The untransformed | -c | Ritz values. | -c | workl(ncv*ncv+3*ncv+1:ncv*ncv+4*ncv) := The untransformed | -c | error bounds of | -c | the Ritz values | -c | workl(ncv*ncv+4*ncv+1:2*ncv*ncv+4*ncv) := Holds the upper | -c | triangular matrix | -c | for H. | -c | workl(2*ncv*ncv+4*ncv+1: 3*ncv*ncv+4*ncv) := Holds the | -c | associated matrix | -c | representation of | -c | the invariant | -c | subspace for H. | -c | GRAND total of NCV * ( 3 * NCV + 4 ) locations. | -c %-----------------------------------------------------------% -c - ih = ipntr(5) - ritz = ipntr(6) - iq = ipntr(7) - bounds = ipntr(8) - ldh = ncv - ldq = ncv - iheig = bounds + ldh - ihbds = iheig + ldh - iuptri = ihbds + ldh - invsub = iuptri + ldh*ncv - ipntr(9) = iheig - ipntr(11) = ihbds - ipntr(12) = iuptri - ipntr(13) = invsub - wr = 1 - iwev = wr + ncv -c -c %-----------------------------------------% -c | irz points to the Ritz values computed | -c | by _neigh before exiting _naup2. | -c | ibd points to the Ritz estimates | -c | computed by _neigh before exiting | -c | _naup2. | -c %-----------------------------------------% -c - irz = ipntr(14) + ncv*ncv - ibd = irz + ncv -c -c %------------------------------------% -c | RNORM is B-norm of the RESID(1:N). | -c %------------------------------------% -c - rnorm = workl(ih+2) - workl(ih+2) = zero -c - if (msglvl .gt. 2) then - call cvout(logfil, ncv, workl(irz), ndigit, - & '_neupd: Ritz values passed in from _NAUPD.') - call cvout(logfil, ncv, workl(ibd), ndigit, - & '_neupd: Ritz estimates passed in from _NAUPD.') - end if -c - if (rvec) then -c - reord = .false. -c -c %---------------------------------------------------% -c | Use the temporary bounds array to store indices | -c | These will be used to mark the select array later | -c %---------------------------------------------------% -c - do 10 j = 1,ncv - workl(bounds+j-1) = j - select(j) = .false. - 10 continue -c -c %-------------------------------------% -c | Select the wanted Ritz values. | -c | Sort the Ritz values so that the | -c | wanted ones appear at the tailing | -c | NEV positions of workl(irr) and | -c | workl(iri). Move the corresponding | -c | error estimates in workl(ibd) | -c | accordingly. | -c %-------------------------------------% -c - np = ncv - nev - ishift = 0 - call cngets(ishift, which , nev , - & np , workl(irz), workl(bounds)) -c - if (msglvl .gt. 2) then - call cvout (logfil, ncv, workl(irz), ndigit, - & '_neupd: Ritz values after calling _NGETS.') - call cvout (logfil, ncv, workl(bounds), ndigit, - & '_neupd: Ritz value indices after calling _NGETS.') - end if -c -c %-----------------------------------------------------% -c | Record indices of the converged wanted Ritz values | -c | Mark the select array for possible reordering | -c %-----------------------------------------------------% -c - numcnv = 0 - do 11 j = 1,ncv - rtemp = max(eps23, - & slapy2 ( real(workl(irz+ncv-j)), - & aimag(workl(irz+ncv-j)) )) - jj = workl(bounds + ncv - j) - if (numcnv .lt. nconv .and. - & slapy2( real(workl(ibd+jj-1)), - & aimag(workl(ibd+jj-1)) ) - & .le. tol*rtemp) then - select(jj) = .true. - numcnv = numcnv + 1 - if (jj .gt. nev) reord = .true. - endif - 11 continue -c -c %-----------------------------------------------------------% -c | Check the count (numcnv) of converged Ritz values with | -c | the number (nconv) reported by dnaupd. If these two | -c | are different then there has probably been an error | -c | caused by incorrect passing of the dnaupd data. | -c %-----------------------------------------------------------% -c - if (msglvl .gt. 2) then - call ivout(logfil, 1, numcnv, ndigit, - & '_neupd: Number of specified eigenvalues') - call ivout(logfil, 1, nconv, ndigit, - & '_neupd: Number of "converged" eigenvalues') - end if -c - if (numcnv .ne. nconv) then - info = -15 - go to 9000 - end if -c -c %-------------------------------------------------------% -c | Call LAPACK routine clahqr to compute the Schur form | -c | of the upper Hessenberg matrix returned by CNAUPD. | -c | Make a copy of the upper Hessenberg matrix. | -c | Initialize the Schur vector matrix Q to the identity. | -c %-------------------------------------------------------% -c - call ccopy(ldh*ncv, workl(ih), 1, workl(iuptri), 1) - call claset('All', ncv, ncv , - & zero , one, workl(invsub), - & ldq) - call clahqr(.true., .true. , ncv , - & 1 , ncv , workl(iuptri), - & ldh , workl(iheig) , 1 , - & ncv , workl(invsub), ldq , - & ierr) - call ccopy(ncv , workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) -c - if (ierr .ne. 0) then - info = -8 - go to 9000 - end if -c - if (msglvl .gt. 1) then - call cvout (logfil, ncv, workl(iheig), ndigit, - & '_neupd: Eigenvalues of H') - call cvout (logfil, ncv, workl(ihbds), ndigit, - & '_neupd: Last row of the Schur vector matrix') - if (msglvl .gt. 3) then - call cmout (logfil , ncv, ncv , - & workl(iuptri), ldh, ndigit, - & '_neupd: The upper triangular matrix ') - end if - end if -c - if (reord) then -c -c %-----------------------------------------------% -c | Reorder the computed upper triangular matrix. | -c %-----------------------------------------------% -c - call ctrsen('None' , 'V' , select , - & ncv , workl(iuptri), ldh , - & workl(invsub), ldq , workl(iheig), - & nconv2 , conds , sep , - & workev , ncv , ierr) -c - if (nconv2 .lt. nconv) then - nconv = nconv2 - end if - - if (ierr .eq. 1) then - info = 1 - go to 9000 - end if -c - if (msglvl .gt. 2) then - call cvout (logfil, ncv, workl(iheig), ndigit, - & '_neupd: Eigenvalues of H--reordered') - if (msglvl .gt. 3) then - call cmout(logfil , ncv, ncv , - & workl(iuptri), ldq, ndigit, - & '_neupd: Triangular matrix after re-ordering') - end if - end if -c - end if -c -c %---------------------------------------------% -c | Copy the last row of the Schur basis matrix | -c | to workl(ihbds). This vector will be used | -c | to compute the Ritz estimates of converged | -c | Ritz values. | -c %---------------------------------------------% -c - call ccopy(ncv , workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) -c -c %--------------------------------------------% -c | Place the computed eigenvalues of H into D | -c | if a spectral transformation was not used. | -c %--------------------------------------------% -c - if (type .eq. 'REGULR') then - call ccopy(nconv, workl(iheig), 1, d, 1) - end if -c -c %----------------------------------------------------------% -c | Compute the QR factorization of the matrix representing | -c | the wanted invariant subspace located in the first NCONV | -c | columns of workl(invsub,ldq). | -c %----------------------------------------------------------% -c - call cgeqr2(ncv , nconv , workl(invsub), - & ldq , workev, workev(ncv+1), - & ierr) -c -c %--------------------------------------------------------% -c | * Postmultiply V by Q using cunm2r. | -c | * Copy the first NCONV columns of VQ into Z. | -c | * Postmultiply Z by R. | -c | The N by NCONV matrix Z is now a matrix representation | -c | of the approximate invariant subspace associated with | -c | the Ritz values in workl(iheig). The first NCONV | -c | columns of V are now approximate Schur vectors | -c | associated with the upper triangular matrix of order | -c | NCONV in workl(iuptri). | -c %--------------------------------------------------------% -c - call cunm2r('Right', 'Notranspose', n , - & ncv , nconv , workl(invsub), - & ldq , workev , v , - & ldv , workd(n+1) , ierr) - call clacpy('All', n, nconv, v, ldv, z, ldz) -c - do 20 j=1, nconv -c -c %---------------------------------------------------% -c | Perform both a column and row scaling if the | -c | diagonal element of workl(invsub,ldq) is negative | -c | I'm lazy and don't take advantage of the upper | -c | triangular form of workl(iuptri,ldq). | -c | Note that since Q is orthogonal, R is a diagonal | -c | matrix consisting of plus or minus ones. | -c %---------------------------------------------------% -c - if ( real( workl(invsub+(j-1)*ldq+j-1) ) .lt. - & real(zero) ) then - call cscal(nconv, -one, workl(iuptri+j-1), ldq) - call cscal(nconv, -one, workl(iuptri+(j-1)*ldq), 1) - end if -c - 20 continue -c - if (howmny .eq. 'A') then -c -c %--------------------------------------------% -c | Compute the NCONV wanted eigenvectors of T | -c | located in workl(iuptri,ldq). | -c %--------------------------------------------% -c - do 30 j=1, ncv - if (j .le. nconv) then - select(j) = .true. - else - select(j) = .false. - end if - 30 continue -c - call ctrevc('Right', 'Select' , select , - & ncv , workl(iuptri), ldq , - & vl , 1 , workl(invsub), - & ldq , ncv , outncv , - & workev , rwork , ierr) -c - if (ierr .ne. 0) then - info = -9 - go to 9000 - end if -c -c %------------------------------------------------% -c | Scale the returning eigenvectors so that their | -c | Euclidean norms are all one. LAPACK subroutine | -c | ctrevc returns each eigenvector normalized so | -c | that the element of largest magnitude has | -c | magnitude 1. | -c %------------------------------------------------% -c - do 40 j=1, nconv - rtemp = scnrm2(ncv, workl(invsub+(j-1)*ldq), 1) - rtemp = real(one) / rtemp - call csscal ( ncv, rtemp, - & workl(invsub+(j-1)*ldq), 1 ) -c -c %------------------------------------------% -c | Ritz estimates can be obtained by taking | -c | the inner product of the last row of the | -c | Schur basis of H with eigenvectors of T. | -c | Note that the eigenvector matrix of T is | -c | upper triangular, thus the length of the | -c | inner product can be set to j. | -c %------------------------------------------% -c - workev(j) = wcdotc(j, workl(ihbds), 1, - & workl(invsub+(j-1)*ldq), 1) - 40 continue -c - if (msglvl .gt. 2) then - call ccopy(nconv, workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) - call cvout (logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Last row of the eigenvector matrix for T') - if (msglvl .gt. 3) then - call cmout(logfil , ncv, ncv , - & workl(invsub), ldq, ndigit, - & '_neupd: The eigenvector matrix for T') - end if - end if -c -c %---------------------------------------% -c | Copy Ritz estimates into workl(ihbds) | -c %---------------------------------------% -c - call ccopy(nconv, workev, 1, workl(ihbds), 1) -c -c %----------------------------------------------% -c | The eigenvector matrix Q of T is triangular. | -c | Form Z*Q. | -c %----------------------------------------------% -c - call ctrmm('Right' , 'Upper' , 'No transpose', - & 'Non-unit', n , nconv , - & one , workl(invsub), ldq , - & z , ldz) - end if -c - else -c -c %--------------------------------------------------% -c | An approximate invariant subspace is not needed. | -c | Place the Ritz values computed CNAUPD into D. | -c %--------------------------------------------------% -c - call ccopy(nconv, workl(ritz), 1, d, 1) - call ccopy(nconv, workl(ritz), 1, workl(iheig), 1) - call ccopy(nconv, workl(bounds), 1, workl(ihbds), 1) -c - end if -c -c %------------------------------------------------% -c | Transform the Ritz values and possibly vectors | -c | and corresponding error bounds of OP to those | -c | of A*x = lambda*B*x. | -c %------------------------------------------------% -c - if (type .eq. 'REGULR') then -c - if (rvec) - & call cscal(ncv, rnorm, workl(ihbds), 1) -c - else -c -c %---------------------------------------% -c | A spectral transformation was used. | -c | * Determine the Ritz estimates of the | -c | Ritz values in the original system. | -c %---------------------------------------% -c - if (rvec) - & call cscal(ncv, rnorm, workl(ihbds), 1) -c - do 50 k=1, ncv - temp = workl(iheig+k-1) - workl(ihbds+k-1) = workl(ihbds+k-1) / temp / temp - 50 continue -c - end if -c -c %-----------------------------------------------------------% -c | * Transform the Ritz values back to the original system. | -c | For TYPE = 'SHIFTI' the transformation is | -c | lambda = 1/theta + sigma | -c | NOTES: | -c | *The Ritz vectors are not affected by the transformation. | -c %-----------------------------------------------------------% -c - if (type .eq. 'SHIFTI') then - do 60 k=1, nconv - d(k) = one / workl(iheig+k-1) + sigma - 60 continue - end if -c - if (type .ne. 'REGULR' .and. msglvl .gt. 1) then - call cvout (logfil, nconv, d, ndigit, - & '_neupd: Untransformed Ritz values.') - call cvout (logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Ritz estimates of the untransformed Ritz values.') - else if ( msglvl .gt. 1) then - call cvout (logfil, nconv, d, ndigit, - & '_neupd: Converged Ritz values.') - call cvout (logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Associated Ritz estimates.') - end if -c -c %-------------------------------------------------% -c | Eigenvector Purification step. Formally perform | -c | one of inverse subspace iteration. Only used | -c | for MODE = 3. See reference 3. | -c %-------------------------------------------------% -c - if (rvec .and. howmny .eq. 'A' .and. type .eq. 'SHIFTI') then -c -c %------------------------------------------------% -c | Purify the computed Ritz vectors by adding a | -c | little bit of the residual vector: | -c | T | -c | resid(:)*( e s ) / theta | -c | NCV | -c | where H s = s theta. | -c %------------------------------------------------% -c - do 100 j=1, nconv - if (workl(iheig+j-1) .ne. zero) then - workev(j) = workl(invsub+(j-1)*ldq+ncv-1) / - & workl(iheig+j-1) - endif - 100 continue - -c %---------------------------------------% -c | Perform a rank one update to Z and | -c | purify all the Ritz vectors together. | -c %---------------------------------------% -c - call cgeru (n, nconv, one, resid, 1, workev, 1, z, ldz) -c - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of cneupd| -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cngets.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cngets.f deleted file mode 100644 index 8b817e3bdb..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cngets.f +++ /dev/null @@ -1,178 +0,0 @@ -c\BeginDoc -c -c\Name: cngets -c -c\Description: -c Given the eigenvalues of the upper Hessenberg matrix H, -c computes the NP shifts AMU that are zeros of the polynomial of -c degree NP which filters out components of the unwanted eigenvectors -c corresponding to the AMU's based on some given criteria. -c -c NOTE: call this even in the case of user specified shifts in order -c to sort the eigenvalues, and error bounds of H for later use. -c -c\Usage: -c call cngets -c ( ISHIFT, WHICH, KEV, NP, RITZ, BOUNDS ) -c -c\Arguments -c ISHIFT Integer. (INPUT) -c Method for selecting the implicit shifts at each iteration. -c ISHIFT = 0: user specified shifts -c ISHIFT = 1: exact shift with respect to the matrix H. -c -c WHICH Character*2. (INPUT) -c Shift selection criteria. -c 'LM' -> want the KEV eigenvalues of largest magnitude. -c 'SM' -> want the KEV eigenvalues of smallest magnitude. -c 'LR' -> want the KEV eigenvalues of largest REAL part. -c 'SR' -> want the KEV eigenvalues of smallest REAL part. -c 'LI' -> want the KEV eigenvalues of largest imaginary part. -c 'SI' -> want the KEV eigenvalues of smallest imaginary part. -c -c KEV Integer. (INPUT) -c The number of desired eigenvalues. -c -c NP Integer. (INPUT) -c The number of shifts to compute. -c -c RITZ Complex array of length KEV+NP. (INPUT/OUTPUT) -c On INPUT, RITZ contains the the eigenvalues of H. -c On OUTPUT, RITZ are sorted so that the unwanted -c eigenvalues are in the first NP locations and the wanted -c portion is in the last KEV locations. When exact shifts are -c selected, the unwanted part corresponds to the shifts to -c be applied. Also, if ISHIFT .eq. 1, the unwanted eigenvalues -c are further sorted so that the ones with largest Ritz values -c are first. -c -c BOUNDS Complex array of length KEV+NP. (INPUT/OUTPUT) -c Error bounds corresponding to the ordering in RITZ. -c -c -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex -c -c\Routines called: -c csortc ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c cvout ARPACK utility routine that prints vectors. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: ngets.F SID: 2.2 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c 1. This routine does not keep complex conjugate pairs of -c eigenvalues together. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine cngets ( ishift, which, kev, np, ritz, bounds) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - integer ishift, kev, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Complex - & bounds(kev+np), ritz(kev+np) -c -c %------------% -c | Parameters | -c %------------% -c - Complex - & one, zero - parameter (one = (1.0E+0, 0.0E+0), zero = (0.0E+0, 0.0E+0)) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer msglvl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external cvout, csortc, second -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mcgets -c - call csortc (which, .true., kev+np, ritz, bounds) -c - if ( ishift .eq. 1 ) then -c -c %-------------------------------------------------------% -c | Sort the unwanted Ritz values used as shifts so that | -c | the ones with largest Ritz estimates are first | -c | This will tend to minimize the effects of the | -c | forward instability of the iteration when the shifts | -c | are applied in subroutine cnapps. | -c | Be careful and use 'SM' since we want to sort BOUNDS! | -c %-------------------------------------------------------% -c - call csortc ( 'SM', .true., np, bounds, ritz ) -c - end if -c - call second (t1) - tcgets = tcgets + (t1 - t0) -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, kev, ndigit, '_ngets: KEV is') - call ivout (logfil, 1, np, ndigit, '_ngets: NP is') - call cvout (logfil, kev+np, ritz, ndigit, - & '_ngets: Eigenvalues of current H matrix ') - call cvout (logfil, kev+np, bounds, ndigit, - & '_ngets: Ritz estimates of the current KEV+NP Ritz values') - end if -c - return -c -c %---------------% -c | End of cngets | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/csortc.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/csortc.f deleted file mode 100644 index 017c487f53..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/csortc.f +++ /dev/null @@ -1,322 +0,0 @@ -c\BeginDoc -c -c\Name: csortc -c -c\Description: -c Sorts the Complex array in X into the order -c specified by WHICH and optionally applies the permutation to the -c Real array Y. -c -c\Usage: -c call csortc -c ( WHICH, APPLY, N, X, Y ) -c -c\Arguments -c WHICH Character*2. (Input) -c 'LM' -> sort X into increasing order of magnitude. -c 'SM' -> sort X into decreasing order of magnitude. -c 'LR' -> sort X with real(X) in increasing algebraic order -c 'SR' -> sort X with real(X) in decreasing algebraic order -c 'LI' -> sort X with imag(X) in increasing algebraic order -c 'SI' -> sort X with imag(X) in decreasing algebraic order -c -c APPLY Logical. (Input) -c APPLY = .TRUE. -> apply the sorted order to array Y. -c APPLY = .FALSE. -> do not apply the sorted order to array Y. -c -c N Integer. (INPUT) -c Size of the arrays. -c -c X Complex array of length N. (INPUT/OUTPUT) -c This is the array to be sorted. -c -c Y Complex array of length N. (INPUT/OUTPUT) -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Routines called: -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c Adapted from the sort routine in LANSO. -c -c\SCCS Information: @(#) -c FILE: sortc.F SID: 2.2 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine csortc (which, apply, n, x, y) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - logical apply - integer n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Complex - & x(0:n-1), y(0:n-1) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, igap, j - Complex - & temp - Real - & temp1, temp2 -c -c %--------------------% -c | External functions | -c %--------------------% -c - Real - & slapy2 -c -c %--------------------% -c | Intrinsic Functions | -c %--------------------% - Intrinsic - & real, aimag -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - igap = n / 2 -c - if (which .eq. 'LM') then -c -c %--------------------------------------------% -c | Sort X into increasing order of magnitude. | -c %--------------------------------------------% -c - 10 continue - if (igap .eq. 0) go to 9000 -c - do 30 i = igap, n-1 - j = i-igap - 20 continue -c - if (j.lt.0) go to 30 -c - temp1 = slapy2(real(x(j)),aimag(x(j))) - temp2 = slapy2(real(x(j+igap)),aimag(x(j+igap))) -c - if (temp1.gt.temp2) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 30 - end if - j = j-igap - go to 20 - 30 continue - igap = igap / 2 - go to 10 -c - else if (which .eq. 'SM') then -c -c %--------------------------------------------% -c | Sort X into decreasing order of magnitude. | -c %--------------------------------------------% -c - 40 continue - if (igap .eq. 0) go to 9000 -c - do 60 i = igap, n-1 - j = i-igap - 50 continue -c - if (j .lt. 0) go to 60 -c - temp1 = slapy2(real(x(j)),aimag(x(j))) - temp2 = slapy2(real(x(j+igap)),aimag(x(j+igap))) -c - if (temp1.lt.temp2) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 60 - endif - j = j-igap - go to 50 - 60 continue - igap = igap / 2 - go to 40 -c - else if (which .eq. 'LR') then -c -c %------------------------------------------------% -c | Sort XREAL into increasing order of algebraic. | -c %------------------------------------------------% -c - 70 continue - if (igap .eq. 0) go to 9000 -c - do 90 i = igap, n-1 - j = i-igap - 80 continue -c - if (j.lt.0) go to 90 -c - if (real(x(j)).gt.real(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 90 - endif - j = j-igap - go to 80 - 90 continue - igap = igap / 2 - go to 70 -c - else if (which .eq. 'SR') then -c -c %------------------------------------------------% -c | Sort XREAL into decreasing order of algebraic. | -c %------------------------------------------------% -c - 100 continue - if (igap .eq. 0) go to 9000 - do 120 i = igap, n-1 - j = i-igap - 110 continue -c - if (j.lt.0) go to 120 -c - if (real(x(j)).lt.real(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 120 - endif - j = j-igap - go to 110 - 120 continue - igap = igap / 2 - go to 100 -c - else if (which .eq. 'LI') then -c -c %--------------------------------------------% -c | Sort XIMAG into increasing algebraic order | -c %--------------------------------------------% -c - 130 continue - if (igap .eq. 0) go to 9000 - do 150 i = igap, n-1 - j = i-igap - 140 continue -c - if (j.lt.0) go to 150 -c - if (aimag(x(j)).gt.aimag(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 150 - endif - j = j-igap - go to 140 - 150 continue - igap = igap / 2 - go to 130 -c - else if (which .eq. 'SI') then -c -c %---------------------------------------------% -c | Sort XIMAG into decreasing algebraic order | -c %---------------------------------------------% -c - 160 continue - if (igap .eq. 0) go to 9000 - do 180 i = igap, n-1 - j = i-igap - 170 continue -c - if (j.lt.0) go to 180 -c - if (aimag(x(j)).lt.aimag(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 180 - endif - j = j-igap - go to 170 - 180 continue - igap = igap / 2 - go to 160 - end if -c - 9000 continue - return -c -c %---------------% -c | End of csortc | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cstatn.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cstatn.f deleted file mode 100644 index bfb740549c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/cstatn.f +++ /dev/null @@ -1,51 +0,0 @@ -c -c\SCCS Information: @(#) -c FILE: statn.F SID: 2.2 DATE OF SID: 4/20/96 RELEASE: 2 -c -c %---------------------------------------------% -c | Initialize statistic and timing information | -c | for complex nonsymmetric Arnoldi code. | -c %---------------------------------------------% - - subroutine cstatn -c -c %--------------------------------% -c | See stat.doc for documentation | -c %--------------------------------% -c - include 'stat.h' - -c %-----------------------% -c | Executable Statements | -c %-----------------------% - - nopx = 0 - nbx = 0 - nrorth = 0 - nitref = 0 - nrstrt = 0 - - tcaupd = 0.0E+0 - tcaup2 = 0.0E+0 - tcaitr = 0.0E+0 - tceigh = 0.0E+0 - tcgets = 0.0E+0 - tcapps = 0.0E+0 - tcconv = 0.0E+0 - titref = 0.0E+0 - tgetv0 = 0.0E+0 - trvec = 0.0E+0 - -c %----------------------------------------------------% -c | User time including reverse communication overhead | -c %----------------------------------------------------% - tmvopx = 0.0E+0 - tmvbx = 0.0E+0 - - return -c -c %---------------% -c | End of cstatn | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/debug.h b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/debug.h deleted file mode 100644 index 5eb0bb1b3d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/debug.h +++ /dev/null @@ -1,16 +0,0 @@ -c -c\SCCS Information: @(#) -c FILE: debug.h SID: 2.3 DATE OF SID: 11/16/95 RELEASE: 2 -c -c %---------------------------------% -c | See debug.doc for documentation | -c %---------------------------------% - integer logfil, ndigit, mgetv0, - & msaupd, msaup2, msaitr, mseigt, msapps, msgets, mseupd, - & mnaupd, mnaup2, mnaitr, mneigh, mnapps, mngets, mneupd, - & mcaupd, mcaup2, mcaitr, mceigh, mcapps, mcgets, mceupd - common /debug/ - & logfil, ndigit, mgetv0, - & msaupd, msaup2, msaitr, mseigt, msapps, msgets, mseupd, - & mnaupd, mnaup2, mnaitr, mneigh, mnapps, mngets, mneupd, - & mcaupd, mcaup2, mcaitr, mceigh, mcapps, mcgets, mceupd diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dgetv0.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dgetv0.f deleted file mode 100644 index 40d384e420..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dgetv0.f +++ /dev/null @@ -1,419 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dgetv0 -c -c\Description: -c Generate a random initial residual vector for the Arnoldi process. -c Force the residual vector to be in the range of the operator OP. -c -c\Usage: -c call dgetv0 -c ( IDO, BMAT, ITRY, INITV, N, J, V, LDV, RESID, RNORM, -c IPNTR, WORKD, IERR ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to dgetv0. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B in the (generalized) -c eigenvalue problem A*x = lambda*B*x. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*B*x -c -c ITRY Integer. (INPUT) -c ITRY counts the number of times that dgetv0 is called. -c It should be set to 1 on the initial call to dgetv0. -c -c INITV Logical variable. (INPUT) -c .TRUE. => the initial residual vector is given in RESID. -c .FALSE. => generate a random initial residual vector. -c -c N Integer. (INPUT) -c Dimension of the problem. -c -c J Integer. (INPUT) -c Index of the residual vector to be generated, with respect to -c the Arnoldi process. J > 1 in case of a "restart". -c -c V Double precision N by J array. (INPUT) -c The first J-1 columns of V contain the current Arnoldi basis -c if this is a "restart". -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c RESID Double precision array of length N. (INPUT/OUTPUT) -c Initial residual vector to be generated. If RESID is -c provided, force RESID into the range of the operator OP. -c -c RNORM Double precision scalar. (OUTPUT) -c B-norm of the generated residual. -c -c IPNTR Integer array of length 3. (OUTPUT) -c -c WORKD Double precision work array of length 2*N. (REVERSE COMMUNICATION). -c On exit, WORK(1:N) = B*RESID to be used in SSAITR. -c -c IERR Integer. (OUTPUT) -c = 0: Normal exit. -c = -1: Cannot generate a nontrivial restarted residual vector -c in the range of the operator OP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c second ARPACK utility routine for timing. -c dvout ARPACK utility routine for vector output. -c dlarnv LAPACK routine for generating a random vector. -c dgemv Level 2 BLAS routine for matrix vector multiplication. -c dcopy Level 1 BLAS that copies one vector to another. -c ddot Level 1 BLAS that computes the scalar product of two vectors. -c dnrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: getv0.F SID: 2.7 DATE OF SID: 04/07/99 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dgetv0 - & ( ido, bmat, itry, initv, n, j, v, ldv, resid, rnorm, - & ipntr, workd, ierr ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - logical initv - integer ido, ierr, itry, j, ldv, n - Double precision - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Double precision - & resid(n), v(ldv,j), workd(2*n) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - logical first, inits, orth - integer idist, iseed(4), iter, msglvl, jj - Double precision - & rnorm0 - save first, iseed, inits, iter, msglvl, orth, rnorm0 -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dlarnv, dvout, dcopy, dgemv, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & ddot, dnrm2 - external ddot, dnrm2 -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs, sqrt -c -c %-----------------% -c | Data Statements | -c %-----------------% -c - data inits /.true./ -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c -c %-----------------------------------% -c | Initialize the seed of the LAPACK | -c | random number generator | -c %-----------------------------------% -c - if (inits) then - iseed(1) = 1 - iseed(2) = 3 - iseed(3) = 5 - iseed(4) = 7 - inits = .false. - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mgetv0 -c - ierr = 0 - iter = 0 - first = .FALSE. - orth = .FALSE. -c -c %-----------------------------------------------------% -c | Possibly generate a random starting vector in RESID | -c | Use a LAPACK random number generator used by the | -c | matrix generation routines. | -c | idist = 1: uniform (0,1) distribution; | -c | idist = 2: uniform (-1,1) distribution; | -c | idist = 3: normal (0,1) distribution; | -c %-----------------------------------------------------% -c - if (.not.initv) then - idist = 2 - call dlarnv (idist, iseed, n, resid) - end if -c -c %----------------------------------------------------------% -c | Force the starting vector into the range of OP to handle | -c | the generalized problem when B is possibly (singular). | -c %----------------------------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nopx = nopx + 1 - ipntr(1) = 1 - ipntr(2) = n + 1 - call dcopy (n, resid, 1, workd, 1) - ido = -1 - go to 9000 - end if - end if -c -c %-----------------------------------------% -c | Back from computing OP*(initial-vector) | -c %-----------------------------------------% -c - if (first) go to 20 -c -c %-----------------------------------------------% -c | Back from computing B*(orthogonalized-vector) | -c %-----------------------------------------------% -c - if (orth) go to 40 -c - if (bmat .eq. 'G') then - call second (t3) - tmvopx = tmvopx + (t3 - t2) - end if -c -c %------------------------------------------------------% -c | Starting vector is now in the range of OP; r = OP*r; | -c | Compute B-norm of starting vector. | -c %------------------------------------------------------% -c - call second (t2) - first = .TRUE. - if (bmat .eq. 'G') then - nbx = nbx + 1 - call dcopy (n, workd(n+1), 1, resid, 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd, 1) - end if -c - 20 continue -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - first = .FALSE. - if (bmat .eq. 'G') then - rnorm0 = ddot (n, resid, 1, workd, 1) - rnorm0 = sqrt(abs(rnorm0)) - else if (bmat .eq. 'I') then - rnorm0 = dnrm2(n, resid, 1) - end if - rnorm = rnorm0 -c -c %---------------------------------------------% -c | Exit if this is the very first Arnoldi step | -c %---------------------------------------------% -c - if (j .eq. 1) go to 50 -c -c %---------------------------------------------------------------- -c | Otherwise need to B-orthogonalize the starting vector against | -c | the current Arnoldi basis using Gram-Schmidt with iter. ref. | -c | This is the case where an invariant subspace is encountered | -c | in the middle of the Arnoldi factorization. | -c | | -c | s = V^{T}*B*r; r = r - V*s; | -c | | -c | Stopping criteria used for iter. ref. is discussed in | -c | Parlett's book, page 107 and in Gragg & Reichel TOMS paper. | -c %---------------------------------------------------------------% -c - orth = .TRUE. - 30 continue -c - call dgemv ('T', n, j-1, one, v, ldv, workd, 1, - & zero, workd(n+1), 1) - call dgemv ('N', n, j-1, -one, v, ldv, workd(n+1), 1, - & one, resid, 1) -c -c %----------------------------------------------------------% -c | Compute the B-norm of the orthogonalized starting vector | -c %----------------------------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call dcopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd, 1) - end if -c - 40 continue -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - rnorm = ddot (n, resid, 1, workd, 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = dnrm2(n, resid, 1) - end if -c -c %--------------------------------------% -c | Check for further orthogonalization. | -c %--------------------------------------% -c - if (msglvl .gt. 2) then - call dvout (logfil, 1, rnorm0, ndigit, - & '_getv0: re-orthonalization ; rnorm0 is') - call dvout (logfil, 1, rnorm, ndigit, - & '_getv0: re-orthonalization ; rnorm is') - end if -c - if (rnorm .gt. 0.717*rnorm0) go to 50 -c - iter = iter + 1 - if (iter .le. 5) then -c -c %-----------------------------------% -c | Perform iterative refinement step | -c %-----------------------------------% -c - rnorm0 = rnorm - go to 30 - else -c -c %------------------------------------% -c | Iterative refinement step "failed" | -c %------------------------------------% -c - do 45 jj = 1, n - resid(jj) = zero - 45 continue - rnorm = zero - ierr = -1 - end if -c - 50 continue -c - if (msglvl .gt. 0) then - call dvout (logfil, 1, rnorm, ndigit, - & '_getv0: B-norm of initial / restarted starting vector') - end if - if (msglvl .gt. 3) then - call dvout (logfil, n, resid, ndigit, - & '_getv0: initial / restarted starting vector') - end if - ido = 99 -c - call second (t1) - tgetv0 = tgetv0 + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of dgetv0 | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dlaqrb.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dlaqrb.f deleted file mode 100644 index d851b86361..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dlaqrb.f +++ /dev/null @@ -1,521 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dlaqrb -c -c\Description: -c Compute the eigenvalues and the Schur decomposition of an upper -c Hessenberg submatrix in rows and columns ILO to IHI. Only the -c last component of the Schur vectors are computed. -c -c This is mostly a modification of the LAPACK routine dlahqr. -c -c\Usage: -c call dlaqrb -c ( WANTT, N, ILO, IHI, H, LDH, WR, WI, Z, INFO ) -c -c\Arguments -c WANTT Logical variable. (INPUT) -c = .TRUE. : the full Schur form T is required; -c = .FALSE.: only eigenvalues are required. -c -c N Integer. (INPUT) -c The order of the matrix H. N >= 0. -c -c ILO Integer. (INPUT) -c IHI Integer. (INPUT) -c It is assumed that H is already upper quasi-triangular in -c rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless -c ILO = 1). SLAQRB works primarily with the Hessenberg -c submatrix in rows and columns ILO to IHI, but applies -c transformations to all of H if WANTT is .TRUE.. -c 1 <= ILO <= max(1,IHI); IHI <= N. -c -c H Double precision array, dimension (LDH,N). (INPUT/OUTPUT) -c On entry, the upper Hessenberg matrix H. -c On exit, if WANTT is .TRUE., H is upper quasi-triangular in -c rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in -c standard form. If WANTT is .FALSE., the contents of H are -c unspecified on exit. -c -c LDH Integer. (INPUT) -c The leading dimension of the array H. LDH >= max(1,N). -c -c WR Double precision array, dimension (N). (OUTPUT) -c WI Double precision array, dimension (N). (OUTPUT) -c The real and imaginary parts, respectively, of the computed -c eigenvalues ILO to IHI are stored in the corresponding -c elements of WR and WI. If two eigenvalues are computed as a -c complex conjugate pair, they are stored in consecutive -c elements of WR and WI, say the i-th and (i+1)th, with -c WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the -c eigenvalues are stored in the same order as on the diagonal -c of the Schur form returned in H, with WR(i) = H(i,i), and, if -c H(i:i+1,i:i+1) is a 2-by-2 diagonal block, -c WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). -c -c Z Double precision array, dimension (N). (OUTPUT) -c On exit Z contains the last components of the Schur vectors. -c -c INFO Integer. (OUPUT) -c = 0: successful exit -c > 0: SLAQRB failed to compute all the eigenvalues ILO to IHI -c in a total of 30*(IHI-ILO+1) iterations; if INFO = i, -c elements i+1:ihi of WR and WI contain those eigenvalues -c which have been successfully computed. -c -c\Remarks -c 1. None. -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c dlabad LAPACK routine that computes machine constants. -c dlamch LAPACK routine that determines machine constants. -c dlanhs LAPACK routine that computes various norms of a matrix. -c dlanv2 LAPACK routine that computes the Schur factorization of -c 2 by 2 nonsymmetric matrix in standard form. -c dlarfg LAPACK Householder reflection construction routine. -c dcopy Level 1 BLAS that copies one vector to another. -c drot Level 1 BLAS that applies a rotation to a 2 by 2 matrix. - -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.4' -c Modified from the LAPACK routine dlahqr so that only the -c last component of the Schur vectors are computed. -c -c\SCCS Information: @(#) -c FILE: laqrb.F SID: 2.2 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c 1. None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dlaqrb ( wantt, n, ilo, ihi, h, ldh, wr, wi, - & z, info ) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - logical wantt - integer ihi, ilo, info, ldh, n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & h( ldh, * ), wi( * ), wr( * ), z( * ) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & zero, one, dat1, dat2 - parameter (zero = 0.0D+0, one = 1.0D+0, dat1 = 7.5D-1, - & dat2 = -4.375D-1) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - integer i, i1, i2, itn, its, j, k, l, m, nh, nr - Double precision - & cs, h00, h10, h11, h12, h21, h22, h33, h33s, - & h43h34, h44, h44s, ovfl, s, smlnum, sn, sum, - & t1, t2, t3, tst1, ulp, unfl, v1, v2, v3 - Double precision - & v( 3 ), work( 1 ) -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlamch, dlanhs - external dlamch, dlanhs -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dcopy, dlabad, dlanv2, dlarfg, drot -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - info = 0 -c -c %--------------------------% -c | Quick return if possible | -c %--------------------------% -c - if( n.eq.0 ) - & return - if( ilo.eq.ihi ) then - wr( ilo ) = h( ilo, ilo ) - wi( ilo ) = zero - return - end if -c -c %---------------------------------------------% -c | Initialize the vector of last components of | -c | the Schur vectors for accumulation. | -c %---------------------------------------------% -c - do 5 j = 1, n-1 - z(j) = zero - 5 continue - z(n) = one -c - nh = ihi - ilo + 1 -c -c %-------------------------------------------------------------% -c | Set machine-dependent constants for the stopping criterion. | -c | If norm(H) <= sqrt(OVFL), overflow should not occur. | -c %-------------------------------------------------------------% -c - unfl = dlamch( 'safe minimum' ) - ovfl = one / unfl - call dlabad( unfl, ovfl ) - ulp = dlamch( 'precision' ) - smlnum = unfl*( nh / ulp ) -c -c %---------------------------------------------------------------% -c | I1 and I2 are the indices of the first row and last column | -c | of H to which transformations must be applied. If eigenvalues | -c | only are computed, I1 and I2 are set inside the main loop. | -c | Zero out H(J+2,J) = ZERO for J=1:N if WANTT = .TRUE. | -c | else H(J+2,J) for J=ILO:IHI-ILO-1 if WANTT = .FALSE. | -c %---------------------------------------------------------------% -c - if( wantt ) then - i1 = 1 - i2 = n - do 8 i=1,i2-2 - h(i1+i+1,i) = zero - 8 continue - else - do 9 i=1, ihi-ilo-1 - h(ilo+i+1,ilo+i-1) = zero - 9 continue - end if -c -c %---------------------------------------------------% -c | ITN is the total number of QR iterations allowed. | -c %---------------------------------------------------% -c - itn = 30*nh -c -c ------------------------------------------------------------------ -c The main loop begins here. I is the loop index and decreases from -c IHI to ILO in steps of 1 or 2. Each iteration of the loop works -c with the active submatrix in rows and columns L to I. -c Eigenvalues I+1 to IHI have already converged. Either L = ILO or -c H(L,L-1) is negligible so that the matrix splits. -c ------------------------------------------------------------------ -c - i = ihi - 10 continue - l = ilo - if( i.lt.ilo ) - & go to 150 - -c %--------------------------------------------------------------% -c | Perform QR iterations on rows and columns ILO to I until a | -c | submatrix of order 1 or 2 splits off at the bottom because a | -c | subdiagonal element has become negligible. | -c %--------------------------------------------------------------% - - do 130 its = 0, itn -c -c %----------------------------------------------% -c | Look for a single small subdiagonal element. | -c %----------------------------------------------% -c - do 20 k = i, l + 1, -1 - tst1 = abs( h( k-1, k-1 ) ) + abs( h( k, k ) ) - if( tst1.eq.zero ) - & tst1 = dlanhs( '1', i-l+1, h( l, l ), ldh, work ) - if( abs( h( k, k-1 ) ).le.max( ulp*tst1, smlnum ) ) - & go to 30 - 20 continue - 30 continue - l = k - if( l.gt.ilo ) then -c -c %------------------------% -c | H(L,L-1) is negligible | -c %------------------------% -c - h( l, l-1 ) = zero - end if -c -c %-------------------------------------------------------------% -c | Exit from loop if a submatrix of order 1 or 2 has split off | -c %-------------------------------------------------------------% -c - if( l.ge.i-1 ) - & go to 140 -c -c %---------------------------------------------------------% -c | Now the active submatrix is in rows and columns L to I. | -c | If eigenvalues only are being computed, only the active | -c | submatrix need be transformed. | -c %---------------------------------------------------------% -c - if( .not.wantt ) then - i1 = l - i2 = i - end if -c - if( its.eq.10 .or. its.eq.20 ) then -c -c %-------------------% -c | Exceptional shift | -c %-------------------% -c - s = abs( h( i, i-1 ) ) + abs( h( i-1, i-2 ) ) - h44 = dat1*s - h33 = h44 - h43h34 = dat2*s*s -c - else -c -c %-----------------------------------------% -c | Prepare to use Wilkinson's double shift | -c %-----------------------------------------% -c - h44 = h( i, i ) - h33 = h( i-1, i-1 ) - h43h34 = h( i, i-1 )*h( i-1, i ) - end if -c -c %-----------------------------------------------------% -c | Look for two consecutive small subdiagonal elements | -c %-----------------------------------------------------% -c - do 40 m = i - 2, l, -1 -c -c %---------------------------------------------------------% -c | Determine the effect of starting the double-shift QR | -c | iteration at row M, and see if this would make H(M,M-1) | -c | negligible. | -c %---------------------------------------------------------% -c - h11 = h( m, m ) - h22 = h( m+1, m+1 ) - h21 = h( m+1, m ) - h12 = h( m, m+1 ) - h44s = h44 - h11 - h33s = h33 - h11 - v1 = ( h33s*h44s-h43h34 ) / h21 + h12 - v2 = h22 - h11 - h33s - h44s - v3 = h( m+2, m+1 ) - s = abs( v1 ) + abs( v2 ) + abs( v3 ) - v1 = v1 / s - v2 = v2 / s - v3 = v3 / s - v( 1 ) = v1 - v( 2 ) = v2 - v( 3 ) = v3 - if( m.eq.l ) - & go to 50 - h00 = h( m-1, m-1 ) - h10 = h( m, m-1 ) - tst1 = abs( v1 )*( abs( h00 )+abs( h11 )+abs( h22 ) ) - if( abs( h10 )*( abs( v2 )+abs( v3 ) ).le.ulp*tst1 ) - & go to 50 - 40 continue - 50 continue -c -c %----------------------% -c | Double-shift QR step | -c %----------------------% -c - do 120 k = m, i - 1 -c -c ------------------------------------------------------------ -c The first iteration of this loop determines a reflection G -c from the vector V and applies it from left and right to H, -c thus creating a nonzero bulge below the subdiagonal. -c -c Each subsequent iteration determines a reflection G to -c restore the Hessenberg form in the (K-1)th column, and thus -c chases the bulge one step toward the bottom of the active -c submatrix. NR is the order of G. -c ------------------------------------------------------------ -c - nr = min( 3, i-k+1 ) - if( k.gt.m ) - & call dcopy( nr, h( k, k-1 ), 1, v, 1 ) - call dlarfg( nr, v( 1 ), v( 2 ), 1, t1 ) - if( k.gt.m ) then - h( k, k-1 ) = v( 1 ) - h( k+1, k-1 ) = zero - if( k.lt.i-1 ) - & h( k+2, k-1 ) = zero - else if( m.gt.l ) then - h( k, k-1 ) = -h( k, k-1 ) - end if - v2 = v( 2 ) - t2 = t1*v2 - if( nr.eq.3 ) then - v3 = v( 3 ) - t3 = t1*v3 -c -c %------------------------------------------------% -c | Apply G from the left to transform the rows of | -c | the matrix in columns K to I2. | -c %------------------------------------------------% -c - do 60 j = k, i2 - sum = h( k, j ) + v2*h( k+1, j ) + v3*h( k+2, j ) - h( k, j ) = h( k, j ) - sum*t1 - h( k+1, j ) = h( k+1, j ) - sum*t2 - h( k+2, j ) = h( k+2, j ) - sum*t3 - 60 continue -c -c %----------------------------------------------------% -c | Apply G from the right to transform the columns of | -c | the matrix in rows I1 to min(K+3,I). | -c %----------------------------------------------------% -c - do 70 j = i1, min( k+3, i ) - sum = h( j, k ) + v2*h( j, k+1 ) + v3*h( j, k+2 ) - h( j, k ) = h( j, k ) - sum*t1 - h( j, k+1 ) = h( j, k+1 ) - sum*t2 - h( j, k+2 ) = h( j, k+2 ) - sum*t3 - 70 continue -c -c %----------------------------------% -c | Accumulate transformations for Z | -c %----------------------------------% -c - sum = z( k ) + v2*z( k+1 ) + v3*z( k+2 ) - z( k ) = z( k ) - sum*t1 - z( k+1 ) = z( k+1 ) - sum*t2 - z( k+2 ) = z( k+2 ) - sum*t3 - - else if( nr.eq.2 ) then -c -c %------------------------------------------------% -c | Apply G from the left to transform the rows of | -c | the matrix in columns K to I2. | -c %------------------------------------------------% -c - do 90 j = k, i2 - sum = h( k, j ) + v2*h( k+1, j ) - h( k, j ) = h( k, j ) - sum*t1 - h( k+1, j ) = h( k+1, j ) - sum*t2 - 90 continue -c -c %----------------------------------------------------% -c | Apply G from the right to transform the columns of | -c | the matrix in rows I1 to min(K+3,I). | -c %----------------------------------------------------% -c - do 100 j = i1, i - sum = h( j, k ) + v2*h( j, k+1 ) - h( j, k ) = h( j, k ) - sum*t1 - h( j, k+1 ) = h( j, k+1 ) - sum*t2 - 100 continue -c -c %----------------------------------% -c | Accumulate transformations for Z | -c %----------------------------------% -c - sum = z( k ) + v2*z( k+1 ) - z( k ) = z( k ) - sum*t1 - z( k+1 ) = z( k+1 ) - sum*t2 - end if - 120 continue - - 130 continue -c -c %-------------------------------------------------------% -c | Failure to converge in remaining number of iterations | -c %-------------------------------------------------------% -c - info = i - return - - 140 continue - - if( l.eq.i ) then -c -c %------------------------------------------------------% -c | H(I,I-1) is negligible: one eigenvalue has converged | -c %------------------------------------------------------% -c - wr( i ) = h( i, i ) - wi( i ) = zero - - else if( l.eq.i-1 ) then -c -c %--------------------------------------------------------% -c | H(I-1,I-2) is negligible; | -c | a pair of eigenvalues have converged. | -c | | -c | Transform the 2-by-2 submatrix to standard Schur form, | -c | and compute and store the eigenvalues. | -c %--------------------------------------------------------% -c - call dlanv2( h( i-1, i-1 ), h( i-1, i ), h( i, i-1 ), - & h( i, i ), wr( i-1 ), wi( i-1 ), wr( i ), wi( i ), - & cs, sn ) - - if( wantt ) then -c -c %-----------------------------------------------------% -c | Apply the transformation to the rest of H and to Z, | -c | as required. | -c %-----------------------------------------------------% -c - if( i2.gt.i ) - & call drot( i2-i, h( i-1, i+1 ), ldh, h( i, i+1 ), ldh, - & cs, sn ) - call drot( i-i1-1, h( i1, i-1 ), 1, h( i1, i ), 1, cs, sn ) - sum = cs*z( i-1 ) + sn*z( i ) - z( i ) = cs*z( i ) - sn*z( i-1 ) - z( i-1 ) = sum - end if - end if -c -c %---------------------------------------------------------% -c | Decrement number of remaining iterations, and return to | -c | start of the main loop with new value of I. | -c %---------------------------------------------------------% -c - itn = itn - its - i = l - 1 - go to 10 - - 150 continue - return -c -c %---------------% -c | End of dlaqrb | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaitr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaitr.f deleted file mode 100644 index 02c35c0a24..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaitr.f +++ /dev/null @@ -1,840 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dnaitr -c -c\Description: -c Reverse communication interface for applying NP additional steps to -c a K step nonsymmetric Arnoldi factorization. -c -c Input: OP*V_{k} - V_{k}*H = r_{k}*e_{k}^T -c -c with (V_{k}^T)*B*V_{k} = I, (V_{k}^T)*B*r_{k} = 0. -c -c Output: OP*V_{k+p} - V_{k+p}*H = r_{k+p}*e_{k+p}^T -c -c with (V_{k+p}^T)*B*V_{k+p} = I, (V_{k+p}^T)*B*r_{k+p} = 0. -c -c where OP and B are as in dnaupd. The B-norm of r_{k+p} is also -c computed and returned. -c -c\Usage: -c call dnaitr -c ( IDO, BMAT, N, K, NP, NB, RESID, RNORM, V, LDV, H, LDH, -c IPNTR, WORKD, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c This is for the restart phase to force the new -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y, -c IPNTR(3) is the pointer into WORK for B * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c When the routine is used in the "shift-and-invert" mode, the -c vector B * Q is already available and do not need to be -c recompute in forming OP * Q. -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. See dnaupd. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*M**x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c K Integer. (INPUT) -c Current size of V and H. -c -c NP Integer. (INPUT) -c Number of additional Arnoldi steps to take. -c -c NB Integer. (INPUT) -c Blocksize to be used in the recurrence. -c Only work for NB = 1 right now. The goal is to have a -c program that implement both the block and non-block method. -c -c RESID Double precision array of length N. (INPUT/OUTPUT) -c On INPUT: RESID contains the residual vector r_{k}. -c On OUTPUT: RESID contains the residual vector r_{k+p}. -c -c RNORM Double precision scalar. (INPUT/OUTPUT) -c B-norm of the starting residual on input. -c B-norm of the updated residual r_{k+p} on output. -c -c V Double precision N by K+NP array. (INPUT/OUTPUT) -c On INPUT: V contains the Arnoldi vectors in the first K -c columns. -c On OUTPUT: V contains the new NP Arnoldi vectors in the next -c NP columns. The first K columns are unchanged. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Double precision (K+NP) by (K+NP) array. (INPUT/OUTPUT) -c H is used to store the generated upper Hessenberg matrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORK for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Double precision work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The calling program should not -c use WORKD as temporary workspace during the iteration !!!!!! -c On input, WORKD(1:N) = B*RESID and is used to save some -c computation at the first step. -c -c INFO Integer. (OUTPUT) -c = 0: Normal exit. -c > 0: Size of the spanning invariant subspace of OP found. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c dgetv0 ARPACK routine to generate the initial vector. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c dmout ARPACK utility routine that prints matrices -c dvout ARPACK utility routine that prints vectors. -c dlabad LAPACK routine that computes machine constants. -c dlamch LAPACK routine that determines machine constants. -c dlascl LAPACK routine for careful scaling of a matrix. -c dlanhs LAPACK routine that computes various norms of a matrix. -c dgemv Level 2 BLAS routine for matrix vector multiplication. -c daxpy Level 1 BLAS that computes a vector triad. -c dscal Level 1 BLAS that scales a vector. -c dcopy Level 1 BLAS that copies one vector to another . -c ddot Level 1 BLAS that computes the scalar product of two vectors. -c dnrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: naitr.F SID: 2.4 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c The algorithm implemented is: -c -c restart = .false. -c Given V_{k} = [v_{1}, ..., v_{k}], r_{k}; -c r_{k} contains the initial residual vector even for k = 0; -c Also assume that rnorm = || B*r_{k} || and B*r_{k} are already -c computed by the calling program. -c -c betaj = rnorm ; p_{k+1} = B*r_{k} ; -c For j = k+1, ..., k+np Do -c 1) if ( betaj < tol ) stop or restart depending on j. -c ( At present tol is zero ) -c if ( restart ) generate a new starting vector. -c 2) v_{j} = r(j-1)/betaj; V_{j} = [V_{j-1}, v_{j}]; -c p_{j} = p_{j}/betaj -c 3) r_{j} = OP*v_{j} where OP is defined as in dnaupd -c For shift-invert mode p_{j} = B*v_{j} is already available. -c wnorm = || OP*v_{j} || -c 4) Compute the j-th step residual vector. -c w_{j} = V_{j}^T * B * OP * v_{j} -c r_{j} = OP*v_{j} - V_{j} * w_{j} -c H(:,j) = w_{j}; -c H(j,j-1) = rnorm -c rnorm = || r_(j) || -c If (rnorm > 0.717*wnorm) accept step and go back to 1) -c 5) Re-orthogonalization step: -c s = V_{j}'*B*r_{j} -c r_{j} = r_{j} - V_{j}*s; rnorm1 = || r_{j} || -c alphaj = alphaj + s_{j}; -c 6) Iterative refinement step: -c If (rnorm1 > 0.717*rnorm) then -c rnorm = rnorm1 -c accept step and go back to 1) -c Else -c rnorm = rnorm1 -c If this is the first time in step 6), go to 5) -c Else r_{j} lies in the span of V_{j} numerically. -c Set r_{j} = 0 and rnorm = 0; go to 1) -c EndIf -c End Do -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dnaitr - & (ido, bmat, n, k, np, nb, resid, rnorm, v, ldv, h, ldh, - & ipntr, workd, info) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - integer ido, info, k, ldh, ldv, n, nb, np - Double precision - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Double precision - & h(ldh,k+np), resid(n), v(ldv,k+np), workd(3*n) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - logical first, orth1, orth2, rstart, step3, step4 - integer ierr, i, infol, ipj, irj, ivj, iter, itry, j, msglvl, - & jj - Double precision - & betaj, ovfl, temp1, rnorm1, smlnum, tst1, ulp, unfl, - & wnorm - save first, orth1, orth2, rstart, step3, step4, - & ierr, ipj, irj, ivj, iter, itry, j, msglvl, ovfl, - & betaj, rnorm1, smlnum, ulp, unfl, wnorm -c -c %-----------------------% -c | Local Array Arguments | -c %-----------------------% -c - Double precision - & xtemp(2) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external daxpy, dcopy, dscal, dgemv, dgetv0, dlabad, - & dvout, dmout, ivout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & ddot, dnrm2, dlanhs, dlamch - external ddot, dnrm2, dlanhs, dlamch -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs, sqrt -c -c %-----------------% -c | Data statements | -c %-----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then -c -c %-----------------------------------------% -c | Set machine-dependent constants for the | -c | the splitting and deflation criterion. | -c | If norm(H) <= sqrt(OVFL), | -c | overflow should not occur. | -c | REFERENCE: LAPACK subroutine dlahqr | -c %-----------------------------------------% -c - unfl = dlamch( 'safe minimum' ) - ovfl = one / unfl - call dlabad( unfl, ovfl ) - ulp = dlamch( 'precision' ) - smlnum = unfl*( n / ulp ) - first = .false. - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mnaitr -c -c %------------------------------% -c | Initial call to this routine | -c %------------------------------% -c - info = 0 - step3 = .false. - step4 = .false. - rstart = .false. - orth1 = .false. - orth2 = .false. - j = k + 1 - ipj = 1 - irj = ipj + n - ivj = irj + n - end if -c -c %-------------------------------------------------% -c | When in reverse communication mode one of: | -c | STEP3, STEP4, ORTH1, ORTH2, RSTART | -c | will be .true. when .... | -c | STEP3: return from computing OP*v_{j}. | -c | STEP4: return from computing B-norm of OP*v_{j} | -c | ORTH1: return from computing B-norm of r_{j+1} | -c | ORTH2: return from computing B-norm of | -c | correction to the residual vector. | -c | RSTART: return from OP computations needed by | -c | dgetv0. | -c %-------------------------------------------------% -c - if (step3) go to 50 - if (step4) go to 60 - if (orth1) go to 70 - if (orth2) go to 90 - if (rstart) go to 30 -c -c %-----------------------------% -c | Else this is the first step | -c %-----------------------------% -c -c %--------------------------------------------------------------% -c | | -c | A R N O L D I I T E R A T I O N L O O P | -c | | -c | Note: B*r_{j-1} is already in WORKD(1:N)=WORKD(IPJ:IPJ+N-1) | -c %--------------------------------------------------------------% - - 1000 continue -c - if (msglvl .gt. 1) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: generating Arnoldi vector number') - call dvout (logfil, 1, rnorm, ndigit, - & '_naitr: B-norm of the current residual is') - end if -c -c %---------------------------------------------------% -c | STEP 1: Check if the B norm of j-th residual | -c | vector is zero. Equivalent to determing whether | -c | an exact j-step Arnoldi factorization is present. | -c %---------------------------------------------------% -c - betaj = rnorm - if (rnorm .gt. zero) go to 40 -c -c %---------------------------------------------------% -c | Invariant subspace found, generate a new starting | -c | vector which is orthogonal to the current Arnoldi | -c | basis and continue the iteration. | -c %---------------------------------------------------% -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: ****** RESTART AT STEP ******') - end if -c -c %---------------------------------------------% -c | ITRY is the loop variable that controls the | -c | maximum amount of times that a restart is | -c | attempted. NRSTRT is used by stat.h | -c %---------------------------------------------% -c - betaj = zero - nrstrt = nrstrt + 1 - itry = 1 - 20 continue - rstart = .true. - ido = 0 - 30 continue -c -c %--------------------------------------% -c | If in reverse communication mode and | -c | RSTART = .true. flow returns here. | -c %--------------------------------------% -c - call dgetv0 (ido, bmat, itry, .false., n, j, v, ldv, - & resid, rnorm, ipntr, workd, ierr) - if (ido .ne. 99) go to 9000 - if (ierr .lt. 0) then - itry = itry + 1 - if (itry .le. 3) go to 20 -c -c %------------------------------------------------% -c | Give up after several restart attempts. | -c | Set INFO to the size of the invariant subspace | -c | which spans OP and exit. | -c %------------------------------------------------% -c - info = j - 1 - call second (t1) - tnaitr = tnaitr + (t1 - t0) - ido = 99 - go to 9000 - end if -c - 40 continue -c -c %---------------------------------------------------------% -c | STEP 2: v_{j} = r_{j-1}/rnorm and p_{j} = p_{j}/rnorm | -c | Note that p_{j} = B*r_{j-1}. In order to avoid overflow | -c | when reciprocating a small RNORM, test against lower | -c | machine bound. | -c %---------------------------------------------------------% -c - call dcopy (n, resid, 1, v(1,j), 1) - if (rnorm .ge. unfl) then - temp1 = one / rnorm - call dscal (n, temp1, v(1,j), 1) - call dscal (n, temp1, workd(ipj), 1) - else -c -c %-----------------------------------------% -c | To scale both v_{j} and p_{j} carefully | -c | use LAPACK routine SLASCL | -c %-----------------------------------------% -c - call dlascl ('General', i, i, rnorm, one, n, 1, - & v(1,j), n, infol) - call dlascl ('General', i, i, rnorm, one, n, 1, - & workd(ipj), n, infol) - end if -c -c %------------------------------------------------------% -c | STEP 3: r_{j} = OP*v_{j}; Note that p_{j} = B*v_{j} | -c | Note that this is not quite yet r_{j}. See STEP 4 | -c %------------------------------------------------------% -c - step3 = .true. - nopx = nopx + 1 - call second (t2) - call dcopy (n, v(1,j), 1, workd(ivj), 1) - ipntr(1) = ivj - ipntr(2) = irj - ipntr(3) = ipj - ido = 1 -c -c %-----------------------------------% -c | Exit in order to compute OP*v_{j} | -c %-----------------------------------% -c - go to 9000 - 50 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(IRJ:IRJ+N-1) := OP*v_{j} | -c | if step3 = .true. | -c %----------------------------------% -c - call second (t3) - tmvopx = tmvopx + (t3 - t2) - - step3 = .false. -c -c %------------------------------------------% -c | Put another copy of OP*v_{j} into RESID. | -c %------------------------------------------% -c - call dcopy (n, workd(irj), 1, resid, 1) -c -c %---------------------------------------% -c | STEP 4: Finish extending the Arnoldi | -c | factorization to length j. | -c %---------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - step4 = .true. - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-------------------------------------% -c | Exit in order to compute B*OP*v_{j} | -c %-------------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd(ipj), 1) - end if - 60 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(IPJ:IPJ+N-1) := B*OP*v_{j} | -c | if step4 = .true. | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - step4 = .false. -c -c %-------------------------------------% -c | The following is needed for STEP 5. | -c | Compute the B-norm of OP*v_{j}. | -c %-------------------------------------% -c - if (bmat .eq. 'G') then - wnorm = ddot (n, resid, 1, workd(ipj), 1) - wnorm = sqrt(abs(wnorm)) - else if (bmat .eq. 'I') then - wnorm = dnrm2(n, resid, 1) - end if -c -c %-----------------------------------------% -c | Compute the j-th residual corresponding | -c | to the j step factorization. | -c | Use Classical Gram Schmidt and compute: | -c | w_{j} <- V_{j}^T * B * OP * v_{j} | -c | r_{j} <- OP*v_{j} - V_{j} * w_{j} | -c %-----------------------------------------% -c -c -c %------------------------------------------% -c | Compute the j Fourier coefficients w_{j} | -c | WORKD(IPJ:IPJ+N-1) contains B*OP*v_{j}. | -c %------------------------------------------% -c - call dgemv ('T', n, j, one, v, ldv, workd(ipj), 1, - & zero, h(1,j), 1) -c -c %--------------------------------------% -c | Orthogonalize r_{j} against V_{j}. | -c | RESID contains OP*v_{j}. See STEP 3. | -c %--------------------------------------% -c - call dgemv ('N', n, j, -one, v, ldv, h(1,j), 1, - & one, resid, 1) -c - if (j .gt. 1) h(j,j-1) = betaj -c - call second (t4) -c - orth1 = .true. -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call dcopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*r_{j} | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd(ipj), 1) - end if - 70 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH1 = .true. | -c | WORKD(IPJ:IPJ+N-1) := B*r_{j}. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - orth1 = .false. -c -c %------------------------------% -c | Compute the B-norm of r_{j}. | -c %------------------------------% -c - if (bmat .eq. 'G') then - rnorm = ddot (n, resid, 1, workd(ipj), 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = dnrm2(n, resid, 1) - end if -c -c %-----------------------------------------------------------% -c | STEP 5: Re-orthogonalization / Iterative refinement phase | -c | Maximum NITER_ITREF tries. | -c | | -c | s = V_{j}^T * B * r_{j} | -c | r_{j} = r_{j} - V_{j}*s | -c | alphaj = alphaj + s_{j} | -c | | -c | The stopping criteria used for iterative refinement is | -c | discussed in Parlett's book SEP, page 107 and in Gragg & | -c | Reichel ACM TOMS paper; Algorithm 686, Dec. 1990. | -c | Determine if we need to correct the residual. The goal is | -c | to enforce ||v(:,1:j)^T * r_{j}|| .le. eps * || r_{j} || | -c | The following test determines whether the sine of the | -c | angle between OP*x and the computed residual is less | -c | than or equal to 0.717. | -c %-----------------------------------------------------------% -c - if (rnorm .gt. 0.717*wnorm) go to 100 - iter = 0 - nrorth = nrorth + 1 -c -c %---------------------------------------------------% -c | Enter the Iterative refinement phase. If further | -c | refinement is necessary, loop back here. The loop | -c | variable is ITER. Perform a step of Classical | -c | Gram-Schmidt using all the Arnoldi vectors V_{j} | -c %---------------------------------------------------% -c - 80 continue -c - if (msglvl .gt. 2) then - xtemp(1) = wnorm - xtemp(2) = rnorm - call dvout (logfil, 2, xtemp, ndigit, - & '_naitr: re-orthonalization; wnorm and rnorm are') - call dvout (logfil, j, h(1,j), ndigit, - & '_naitr: j-th column of H') - end if -c -c %----------------------------------------------------% -c | Compute V_{j}^T * B * r_{j}. | -c | WORKD(IRJ:IRJ+J-1) = v(:,1:J)'*WORKD(IPJ:IPJ+N-1). | -c %----------------------------------------------------% -c - call dgemv ('T', n, j, one, v, ldv, workd(ipj), 1, - & zero, workd(irj), 1) -c -c %---------------------------------------------% -c | Compute the correction to the residual: | -c | r_{j} = r_{j} - V_{j} * WORKD(IRJ:IRJ+J-1). | -c | The correction to H is v(:,1:J)*H(1:J,1:J) | -c | + v(:,1:J)*WORKD(IRJ:IRJ+J-1)*e'_j. | -c %---------------------------------------------% -c - call dgemv ('N', n, j, -one, v, ldv, workd(irj), 1, - & one, resid, 1) - call daxpy (j, one, workd(irj), 1, h(1,j), 1) -c - orth2 = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call dcopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-----------------------------------% -c | Exit in order to compute B*r_{j}. | -c | r_{j} is the corrected residual. | -c %-----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd(ipj), 1) - end if - 90 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH2 = .true. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c -c %-----------------------------------------------------% -c | Compute the B-norm of the corrected residual r_{j}. | -c %-----------------------------------------------------% -c - if (bmat .eq. 'G') then - rnorm1 = ddot (n, resid, 1, workd(ipj), 1) - rnorm1 = sqrt(abs(rnorm1)) - else if (bmat .eq. 'I') then - rnorm1 = dnrm2(n, resid, 1) - end if -c - if (msglvl .gt. 0 .and. iter .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: Iterative refinement for Arnoldi residual') - if (msglvl .gt. 2) then - xtemp(1) = rnorm - xtemp(2) = rnorm1 - call dvout (logfil, 2, xtemp, ndigit, - & '_naitr: iterative refinement ; rnorm and rnorm1 are') - end if - end if -c -c %-----------------------------------------% -c | Determine if we need to perform another | -c | step of re-orthogonalization. | -c %-----------------------------------------% -c - if (rnorm1 .gt. 0.717*rnorm) then -c -c %---------------------------------------% -c | No need for further refinement. | -c | The cosine of the angle between the | -c | corrected residual vector and the old | -c | residual vector is greater than 0.717 | -c | In other words the corrected residual | -c | and the old residual vector share an | -c | angle of less than arcCOS(0.717) | -c %---------------------------------------% -c - rnorm = rnorm1 -c - else -c -c %-------------------------------------------% -c | Another step of iterative refinement step | -c | is required. NITREF is used by stat.h | -c %-------------------------------------------% -c - nitref = nitref + 1 - rnorm = rnorm1 - iter = iter + 1 - if (iter .le. 1) go to 80 -c -c %-------------------------------------------------% -c | Otherwise RESID is numerically in the span of V | -c %-------------------------------------------------% -c - do 95 jj = 1, n - resid(jj) = zero - 95 continue - rnorm = zero - end if -c -c %----------------------------------------------% -c | Branch here directly if iterative refinement | -c | wasn't necessary or after at most NITER_REF | -c | steps of iterative refinement. | -c %----------------------------------------------% -c - 100 continue -c - rstart = .false. - orth2 = .false. -c - call second (t5) - titref = titref + (t5 - t4) -c -c %------------------------------------% -c | STEP 6: Update j = j+1; Continue | -c %------------------------------------% -c - j = j + 1 - if (j .gt. k+np) then - call second (t1) - tnaitr = tnaitr + (t1 - t0) - ido = 99 - do 110 i = max(1,k), k+np-1 -c -c %--------------------------------------------% -c | Check for splitting and deflation. | -c | Use a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine dlahqr | -c %--------------------------------------------% -c - tst1 = abs( h( i, i ) ) + abs( h( i+1, i+1 ) ) - if( tst1.eq.zero ) - & tst1 = dlanhs( '1', k+np, h, ldh, workd(n+1) ) - if( abs( h( i+1,i ) ).le.max( ulp*tst1, smlnum ) ) - & h(i+1,i) = zero - 110 continue -c - if (msglvl .gt. 2) then - call dmout (logfil, k+np, k+np, h, ldh, ndigit, - & '_naitr: Final upper Hessenberg matrix H of order K+NP') - end if -c - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Loop back to extend the factorization by another step. | -c %--------------------------------------------------------% -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 9000 continue - return -c -c %---------------% -c | End of dnaitr | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnapps.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnapps.f deleted file mode 100644 index 5385c1b95b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnapps.f +++ /dev/null @@ -1,647 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dnapps -c -c\Description: -c Given the Arnoldi factorization -c -c A*V_{k} - V_{k}*H_{k} = r_{k+p}*e_{k+p}^T, -c -c apply NP implicit shifts resulting in -c -c A*(V_{k}*Q) - (V_{k}*Q)*(Q^T* H_{k}*Q) = r_{k+p}*e_{k+p}^T * Q -c -c where Q is an orthogonal matrix which is the product of rotations -c and reflections resulting from the NP bulge chage sweeps. -c The updated Arnoldi factorization becomes: -c -c A*VNEW_{k} - VNEW_{k}*HNEW_{k} = rnew_{k}*e_{k}^T. -c -c\Usage: -c call dnapps -c ( N, KEV, NP, SHIFTR, SHIFTI, V, LDV, H, LDH, RESID, Q, LDQ, -c WORKL, WORKD ) -c -c\Arguments -c N Integer. (INPUT) -c Problem size, i.e. size of matrix A. -c -c KEV Integer. (INPUT/OUTPUT) -c KEV+NP is the size of the input matrix H. -c KEV is the size of the updated matrix HNEW. KEV is only -c updated on ouput when fewer than NP shifts are applied in -c order to keep the conjugate pair together. -c -c NP Integer. (INPUT) -c Number of implicit shifts to be applied. -c -c SHIFTR, Double precision array of length NP. (INPUT) -c SHIFTI Real and imaginary part of the shifts to be applied. -c Upon, entry to dnapps, the shifts must be sorted so that the -c conjugate pairs are in consecutive locations. -c -c V Double precision N by (KEV+NP) array. (INPUT/OUTPUT) -c On INPUT, V contains the current KEV+NP Arnoldi vectors. -c On OUTPUT, V contains the updated KEV Arnoldi vectors -c in the first KEV columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Double precision (KEV+NP) by (KEV+NP) array. (INPUT/OUTPUT) -c On INPUT, H contains the current KEV+NP by KEV+NP upper -c Hessenber matrix of the Arnoldi factorization. -c On OUTPUT, H contains the updated KEV by KEV upper Hessenberg -c matrix in the KEV leading submatrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RESID Double precision array of length N. (INPUT/OUTPUT) -c On INPUT, RESID contains the the residual vector r_{k+p}. -c On OUTPUT, RESID is the update residual vector rnew_{k} -c in the first KEV locations. -c -c Q Double precision KEV+NP by KEV+NP work array. (WORKSPACE) -c Work array used to accumulate the rotations and reflections -c during the bulge chase sweep. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Double precision work array of length (KEV+NP). (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c WORKD Double precision work array of length 2*N. (WORKSPACE) -c Distributed array used in the application of the accumulated -c orthogonal matrix Q. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c dmout ARPACK utility routine that prints matrices. -c dvout ARPACK utility routine that prints vectors. -c dlabad LAPACK routine that computes machine constants. -c dlacpy LAPACK matrix copy routine. -c dlamch LAPACK routine that determines machine constants. -c dlanhs LAPACK routine that computes various norms of a matrix. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c dlarf LAPACK routine that applies Householder reflection to -c a matrix. -c dlarfg LAPACK Householder reflection construction routine. -c dlartg LAPACK Givens rotation construction routine. -c dlaset LAPACK matrix initialization routine. -c dgemv Level 2 BLAS routine for matrix vector multiplication. -c daxpy Level 1 BLAS that computes a vector triad. -c dcopy Level 1 BLAS that copies one vector to another . -c dscal Level 1 BLAS that scales a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: napps.F SID: 2.4 DATE OF SID: 3/28/97 RELEASE: 2 -c -c\Remarks -c 1. In this version, each shift is applied to all the sublocks of -c the Hessenberg matrix H and not just to the submatrix that it -c comes from. Deflation as in LAPACK routine dlahqr (QR algorithm -c for upper Hessenberg matrices ) is used. -c The subdiagonals of H are enforced to be non-negative. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dnapps - & ( n, kev, np, shiftr, shifti, v, ldv, h, ldh, resid, q, ldq, - & workl, workd ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer kev, ldh, ldq, ldv, n, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & h(ldh,kev+np), resid(n), shifti(np), shiftr(np), - & v(ldv,kev+np), q(ldq,kev+np), workd(2*n), workl(kev+np) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - integer i, iend, ir, istart, j, jj, kplusp, msglvl, nr - logical cconj, first - Double precision - & c, f, g, h11, h12, h21, h22, h32, ovfl, r, s, sigmai, - & sigmar, smlnum, ulp, unfl, u(3), t, tau, tst1 - save first, ovfl, smlnum, ulp, unfl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external daxpy, dcopy, dscal, dlacpy, dlarfg, dlarf, - & dlaset, dlabad, second, dlartg -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlamch, dlanhs, dlapy2 - external dlamch, dlanhs, dlapy2 -c -c %----------------------% -c | Intrinsics Functions | -c %----------------------% -c - intrinsic abs, max, min -c -c %----------------% -c | Data statments | -c %----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then -c -c %-----------------------------------------------% -c | Set machine-dependent constants for the | -c | stopping criterion. If norm(H) <= sqrt(OVFL), | -c | overflow should not occur. | -c | REFERENCE: LAPACK subroutine dlahqr | -c %-----------------------------------------------% -c - unfl = dlamch( 'safe minimum' ) - ovfl = one / unfl - call dlabad( unfl, ovfl ) - ulp = dlamch( 'precision' ) - smlnum = unfl*( n / ulp ) - first = .false. - end if -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mnapps - kplusp = kev + np -c -c %--------------------------------------------% -c | Initialize Q to the identity to accumulate | -c | the rotations and reflections | -c %--------------------------------------------% -c - call dlaset ('All', kplusp, kplusp, zero, one, q, ldq) -c -c %----------------------------------------------% -c | Quick return if there are no shifts to apply | -c %----------------------------------------------% -c - if (np .eq. 0) go to 9000 -c -c %----------------------------------------------% -c | Chase the bulge with the application of each | -c | implicit shift. Each shift is applied to the | -c | whole matrix including each block. | -c %----------------------------------------------% -c - cconj = .false. - do 110 jj = 1, np - sigmar = shiftr(jj) - sigmai = shifti(jj) -c - if (msglvl .gt. 2 ) then - call ivout (logfil, 1, jj, ndigit, - & '_napps: shift number.') - call dvout (logfil, 1, sigmar, ndigit, - & '_napps: The real part of the shift ') - call dvout (logfil, 1, sigmai, ndigit, - & '_napps: The imaginary part of the shift ') - end if -c -c %-------------------------------------------------% -c | The following set of conditionals is necessary | -c | in order that complex conjugate pairs of shifts | -c | are applied together or not at all. | -c %-------------------------------------------------% -c - if ( cconj ) then -c -c %-----------------------------------------% -c | cconj = .true. means the previous shift | -c | had non-zero imaginary part. | -c %-----------------------------------------% -c - cconj = .false. - go to 110 - else if ( jj .lt. np .and. abs( sigmai ) .gt. zero ) then -c -c %------------------------------------% -c | Start of a complex conjugate pair. | -c %------------------------------------% -c - cconj = .true. - else if ( jj .eq. np .and. abs( sigmai ) .gt. zero ) then -c -c %----------------------------------------------% -c | The last shift has a nonzero imaginary part. | -c | Don't apply it; thus the order of the | -c | compressed H is order KEV+1 since only np-1 | -c | were applied. | -c %----------------------------------------------% -c - kev = kev + 1 - go to 110 - end if - istart = 1 - 20 continue -c -c %--------------------------------------------------% -c | if sigmai = 0 then | -c | Apply the jj-th shift ... | -c | else | -c | Apply the jj-th and (jj+1)-th together ... | -c | (Note that jj < np at this point in the code) | -c | end | -c | to the current block of H. The next do loop | -c | determines the current block ; | -c %--------------------------------------------------% -c - do 30 i = istart, kplusp-1 -c -c %----------------------------------------% -c | Check for splitting and deflation. Use | -c | a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine dlahqr | -c %----------------------------------------% -c - tst1 = abs( h( i, i ) ) + abs( h( i+1, i+1 ) ) - if( tst1.eq.zero ) - & tst1 = dlanhs( '1', kplusp-jj+1, h, ldh, workl ) - if( abs( h( i+1,i ) ).le.max( ulp*tst1, smlnum ) ) then - if (msglvl .gt. 0) then - call ivout (logfil, 1, i, ndigit, - & '_napps: matrix splitting at row/column no.') - call ivout (logfil, 1, jj, ndigit, - & '_napps: matrix splitting with shift number.') - call dvout (logfil, 1, h(i+1,i), ndigit, - & '_napps: off diagonal element.') - end if - iend = i - h(i+1,i) = zero - go to 40 - end if - 30 continue - iend = kplusp - 40 continue -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, istart, ndigit, - & '_napps: Start of current block ') - call ivout (logfil, 1, iend, ndigit, - & '_napps: End of current block ') - end if -c -c %------------------------------------------------% -c | No reason to apply a shift to block of order 1 | -c %------------------------------------------------% -c - if ( istart .eq. iend ) go to 100 -c -c %------------------------------------------------------% -c | If istart + 1 = iend then no reason to apply a | -c | complex conjugate pair of shifts on a 2 by 2 matrix. | -c %------------------------------------------------------% -c - if ( istart + 1 .eq. iend .and. abs( sigmai ) .gt. zero ) - & go to 100 -c - h11 = h(istart,istart) - h21 = h(istart+1,istart) - if ( abs( sigmai ) .le. zero ) then -c -c %---------------------------------------------% -c | Real-valued shift ==> apply single shift QR | -c %---------------------------------------------% -c - f = h11 - sigmar - g = h21 -c - do 80 i = istart, iend-1 -c -c %-----------------------------------------------------% -c | Contruct the plane rotation G to zero out the bulge | -c %-----------------------------------------------------% -c - call dlartg (f, g, c, s, r) - if (i .gt. istart) then -c -c %-------------------------------------------% -c | The following ensures that h(1:iend-1,1), | -c | the first iend-2 off diagonal of elements | -c | H, remain non negative. | -c %-------------------------------------------% -c - if (r .lt. zero) then - r = -r - c = -c - s = -s - end if - h(i,i-1) = r - h(i+1,i-1) = zero - end if -c -c %---------------------------------------------% -c | Apply rotation to the left of H; H <- G'*H | -c %---------------------------------------------% -c - do 50 j = i, kplusp - t = c*h(i,j) + s*h(i+1,j) - h(i+1,j) = -s*h(i,j) + c*h(i+1,j) - h(i,j) = t - 50 continue -c -c %---------------------------------------------% -c | Apply rotation to the right of H; H <- H*G | -c %---------------------------------------------% -c - do 60 j = 1, min(i+2,iend) - t = c*h(j,i) + s*h(j,i+1) - h(j,i+1) = -s*h(j,i) + c*h(j,i+1) - h(j,i) = t - 60 continue -c -c %----------------------------------------------------% -c | Accumulate the rotation in the matrix Q; Q <- Q*G | -c %----------------------------------------------------% -c - do 70 j = 1, min( i+jj, kplusp ) - t = c*q(j,i) + s*q(j,i+1) - q(j,i+1) = - s*q(j,i) + c*q(j,i+1) - q(j,i) = t - 70 continue -c -c %---------------------------% -c | Prepare for next rotation | -c %---------------------------% -c - if (i .lt. iend-1) then - f = h(i+1,i) - g = h(i+2,i) - end if - 80 continue -c -c %-----------------------------------% -c | Finished applying the real shift. | -c %-----------------------------------% -c - else -c -c %----------------------------------------------------% -c | Complex conjugate shifts ==> apply double shift QR | -c %----------------------------------------------------% -c - h12 = h(istart,istart+1) - h22 = h(istart+1,istart+1) - h32 = h(istart+2,istart+1) -c -c %---------------------------------------------------------% -c | Compute 1st column of (H - shift*I)*(H - conj(shift)*I) | -c %---------------------------------------------------------% -c - s = 2.0*sigmar - t = dlapy2 ( sigmar, sigmai ) - u(1) = ( h11 * (h11 - s) + t * t ) / h21 + h12 - u(2) = h11 + h22 - s - u(3) = h32 -c - do 90 i = istart, iend-1 -c - nr = min ( 3, iend-i+1 ) -c -c %-----------------------------------------------------% -c | Construct Householder reflector G to zero out u(1). | -c | G is of the form I - tau*( 1 u )' * ( 1 u' ). | -c %-----------------------------------------------------% -c - call dlarfg ( nr, u(1), u(2), 1, tau ) -c - if (i .gt. istart) then - h(i,i-1) = u(1) - h(i+1,i-1) = zero - if (i .lt. iend-1) h(i+2,i-1) = zero - end if - u(1) = one -c -c %--------------------------------------% -c | Apply the reflector to the left of H | -c %--------------------------------------% -c - call dlarf ('Left', nr, kplusp-i+1, u, 1, tau, - & h(i,i), ldh, workl) -c -c %---------------------------------------% -c | Apply the reflector to the right of H | -c %---------------------------------------% -c - ir = min ( i+3, iend ) - call dlarf ('Right', ir, nr, u, 1, tau, - & h(1,i), ldh, workl) -c -c %-----------------------------------------------------% -c | Accumulate the reflector in the matrix Q; Q <- Q*G | -c %-----------------------------------------------------% -c - call dlarf ('Right', kplusp, nr, u, 1, tau, - & q(1,i), ldq, workl) -c -c %----------------------------% -c | Prepare for next reflector | -c %----------------------------% -c - if (i .lt. iend-1) then - u(1) = h(i+1,i) - u(2) = h(i+2,i) - if (i .lt. iend-2) u(3) = h(i+3,i) - end if -c - 90 continue -c -c %--------------------------------------------% -c | Finished applying a complex pair of shifts | -c | to the current block | -c %--------------------------------------------% -c - end if -c - 100 continue -c -c %---------------------------------------------------------% -c | Apply the same shift to the next block if there is any. | -c %---------------------------------------------------------% -c - istart = iend + 1 - if (iend .lt. kplusp) go to 20 -c -c %---------------------------------------------% -c | Loop back to the top to get the next shift. | -c %---------------------------------------------% -c - 110 continue -c -c %--------------------------------------------------% -c | Perform a similarity transformation that makes | -c | sure that H will have non negative sub diagonals | -c %--------------------------------------------------% -c - do 120 j=1,kev - if ( h(j+1,j) .lt. zero ) then - call dscal( kplusp-j+1, -one, h(j+1,j), ldh ) - call dscal( min(j+2, kplusp), -one, h(1,j+1), 1 ) - call dscal( min(j+np+1,kplusp), -one, q(1,j+1), 1 ) - end if - 120 continue -c - do 130 i = 1, kev -c -c %--------------------------------------------% -c | Final check for splitting and deflation. | -c | Use a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine dlahqr | -c %--------------------------------------------% -c - tst1 = abs( h( i, i ) ) + abs( h( i+1, i+1 ) ) - if( tst1.eq.zero ) - & tst1 = dlanhs( '1', kev, h, ldh, workl ) - if( h( i+1,i ) .le. max( ulp*tst1, smlnum ) ) - & h(i+1,i) = zero - 130 continue -c -c %-------------------------------------------------% -c | Compute the (kev+1)-st column of (V*Q) and | -c | temporarily store the result in WORKD(N+1:2*N). | -c | This is needed in the residual update since we | -c | cannot GUARANTEE that the corresponding entry | -c | of H would be zero as in exact arithmetic. | -c %-------------------------------------------------% -c - if (h(kev+1,kev) .gt. zero) - & call dgemv ('N', n, kplusp, one, v, ldv, q(1,kev+1), 1, zero, - & workd(n+1), 1) -c -c %----------------------------------------------------------% -c | Compute column 1 to kev of (V*Q) in backward order | -c | taking advantage of the upper Hessenberg structure of Q. | -c %----------------------------------------------------------% -c - do 140 i = 1, kev - call dgemv ('N', n, kplusp-i+1, one, v, ldv, - & q(1,kev-i+1), 1, zero, workd, 1) - call dcopy (n, workd, 1, v(1,kplusp-i+1), 1) - 140 continue -c -c %-------------------------------------------------% -c | Move v(:,kplusp-kev+1:kplusp) into v(:,1:kev). | -c %-------------------------------------------------% -c - call dlacpy ('A', n, kev, v(1,kplusp-kev+1), ldv, v, ldv) -c -c %--------------------------------------------------------------% -c | Copy the (kev+1)-st column of (V*Q) in the appropriate place | -c %--------------------------------------------------------------% -c - if (h(kev+1,kev) .gt. zero) - & call dcopy (n, workd(n+1), 1, v(1,kev+1), 1) -c -c %-------------------------------------% -c | Update the residual vector: | -c | r <- sigmak*r + betak*v(:,kev+1) | -c | where | -c | sigmak = (e_{kplusp}'*Q)*e_{kev} | -c | betak = e_{kev+1}'*H*e_{kev} | -c %-------------------------------------% -c - call dscal (n, q(kplusp,kev), resid, 1) - if (h(kev+1,kev) .gt. zero) - & call daxpy (n, h(kev+1,kev), v(1,kev+1), 1, resid, 1) -c - if (msglvl .gt. 1) then - call dvout (logfil, 1, q(kplusp,kev), ndigit, - & '_napps: sigmak = (e_{kev+p}^T*Q)*e_{kev}') - call dvout (logfil, 1, h(kev+1,kev), ndigit, - & '_napps: betak = e_{kev+1}^T*H*e_{kev}') - call ivout (logfil, 1, kev, ndigit, - & '_napps: Order of the final Hessenberg matrix ') - if (msglvl .gt. 2) then - call dmout (logfil, kev, kev, h, ldh, ndigit, - & '_napps: updated Hessenberg matrix H for next iteration') - end if -c - end if -c - 9000 continue - call second (t1) - tnapps = tnapps + (t1 - t0) -c - return -c -c %---------------% -c | End of dnapps | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaup2.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaup2.f deleted file mode 100644 index eb2e3205aa..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaup2.f +++ /dev/null @@ -1,847 +0,0 @@ -c\BeginDoc -c -c\Name: dnaup2 -c -c\Description: -c Intermediate level interface called by dnaupd. -c -c\Usage: -c call dnaup2 -c ( IDO, BMAT, N, WHICH, NEV, NP, TOL, RESID, MODE, IUPD, -c ISHIFT, MXITER, V, LDV, H, LDH, RITZR, RITZI, BOUNDS, -c Q, LDQ, WORKL, IPNTR, WORKD, INFO ) -c -c\Arguments -c -c IDO, BMAT, N, WHICH, NEV, TOL, RESID: same as defined in dnaupd. -c MODE, ISHIFT, MXITER: see the definition of IPARAM in dnaupd. -c -c NP Integer. (INPUT/OUTPUT) -c Contains the number of implicit shifts to apply during -c each Arnoldi iteration. -c If ISHIFT=1, NP is adjusted dynamically at each iteration -c to accelerate convergence and prevent stagnation. -c This is also roughly equal to the number of matrix-vector -c products (involving the operator OP) per Arnoldi iteration. -c The logic for adjusting is contained within the current -c subroutine. -c If ISHIFT=0, NP is the number of shifts the user needs -c to provide via reverse comunication. 0 < NP < NCV-NEV. -c NP may be less than NCV-NEV for two reasons. The first, is -c to keep complex conjugate pairs of "wanted" Ritz values -c together. The second, is that a leading block of the current -c upper Hessenberg matrix has split off and contains "unwanted" -c Ritz values. -c Upon termination of the IRA iteration, NP contains the number -c of "converged" wanted Ritz values. -c -c IUPD Integer. (INPUT) -c IUPD .EQ. 0: use explicit restart instead implicit update. -c IUPD .NE. 0: use implicit update. -c -c V Double precision N by (NEV+NP) array. (INPUT/OUTPUT) -c The Arnoldi basis vectors are returned in the first NEV -c columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Double precision (NEV+NP) by (NEV+NP) array. (OUTPUT) -c H is used to store the generated upper Hessenberg matrix -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZR, Double precision arrays of length NEV+NP. (OUTPUT) -c RITZI RITZR(1:NEV) (resp. RITZI(1:NEV)) contains the real (resp. -c imaginary) part of the computed Ritz values of OP. -c -c BOUNDS Double precision array of length NEV+NP. (OUTPUT) -c BOUNDS(1:NEV) contain the error bounds corresponding to -c the computed Ritz values. -c -c Q Double precision (NEV+NP) by (NEV+NP) array. (WORKSPACE) -c Private (replicated) work array used to accumulate the -c rotation in the shift application step. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Double precision work array of length at least -c (NEV+NP)**2 + 3*(NEV+NP). (INPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. It is used in shifts calculation, shifts -c application and convergence checking. -c -c On exit, the last 3*(NEV+NP) locations of WORKL contain -c the Ritz values (real,imaginary) and associated Ritz -c estimates of the current Hessenberg matrix. They are -c listed in the same order as returned from dneigh. -c -c If ISHIFT .EQ. O and IDO .EQ. 3, the first 2*NP locations -c of WORKL are used in reverse communication to hold the user -c supplied shifts. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORKD for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Double precision work array of length 3*N. (WORKSPACE) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration !!!!!!!!!! -c See Data Distribution Note in DNAUPD. -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal return. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. -c NP returns the number of converged Ritz values. -c = 2: No shifts could be applied. -c = -8: Error return from LAPACK eigenvalue calculation; -c This should never happen. -c = -9: Starting vector is zero. -c = -9999: Could not build an Arnoldi factorization. -c Size that was built in returned in NP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c dgetv0 ARPACK initial vector generation routine. -c dnaitr ARPACK Arnoldi factorization routine. -c dnapps ARPACK application of implicit shifts routine. -c dnconv ARPACK convergence of Ritz values routine. -c dneigh ARPACK compute Ritz values and error bounds routine. -c dngets ARPACK reorder Ritz values and error bounds routine. -c dsortc ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c dmout ARPACK utility routine that prints matrices -c dvout ARPACK utility routine that prints vectors. -c dlamch LAPACK routine that determines machine constants. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c dcopy Level 1 BLAS that copies one vector to another . -c ddot Level 1 BLAS that computes the scalar product of two vectors. -c dnrm2 Level 1 BLAS that computes the norm of a vector. -c dswap Level 1 BLAS that swaps two vectors. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: naup2.F SID: 2.8 DATE OF SID: 10/17/00 RELEASE: 2 -c -c\Remarks -c 1. None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dnaup2 - & ( ido, bmat, n, which, nev, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, h, ldh, ritzr, ritzi, bounds, - & q, ldq, workl, ipntr, workd, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ishift, iupd, mode, ldh, ldq, ldv, mxiter, - & n, nev, np - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(13) - Double precision - & bounds(nev+np), h(ldh,nev+np), q(ldq,nev+np), resid(n), - & ritzi(nev+np), ritzr(nev+np), v(ldv,nev+np), - & workd(3*n), workl( (nev+np)*(nev+np+3) ) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character wprime*2 - logical cnorm , getv0, initv, update, ushift - integer ierr , iter , j , kplusp, msglvl, nconv, - & nevbef, nev0 , np0 , nptemp, numcnv - Double precision - & rnorm , temp , eps23 - save cnorm , getv0, initv, update, ushift, - & rnorm , iter , eps23, kplusp, msglvl, nconv , - & nevbef, nev0 , np0 , numcnv -c -c %-----------------------% -c | Local array arguments | -c %-----------------------% -c - integer kp(4) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dcopy , dgetv0, dnaitr, dnconv, dneigh, - & dngets, dnapps, dvout , ivout , second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & ddot, dnrm2, dlapy2, dlamch - external ddot, dnrm2, dlapy2, dlamch -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic min, max, abs, sqrt -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c - call second (t0) -c - msglvl = mnaup2 -c -c %-------------------------------------% -c | Get the machine dependent constant. | -c %-------------------------------------% -c - eps23 = dlamch('Epsilon-Machine') - eps23 = eps23**(2.0D+0 / 3.0D+0) -c - nev0 = nev - np0 = np -c -c %-------------------------------------% -c | kplusp is the bound on the largest | -c | Lanczos factorization built. | -c | nconv is the current number of | -c | "converged" eigenvlues. | -c | iter is the counter on the current | -c | iteration step. | -c %-------------------------------------% -c - kplusp = nev + np - nconv = 0 - iter = 0 -c -c %---------------------------------------% -c | Set flags for computing the first NEV | -c | steps of the Arnoldi factorization. | -c %---------------------------------------% -c - getv0 = .true. - update = .false. - ushift = .false. - cnorm = .false. -c - if (info .ne. 0) then -c -c %--------------------------------------------% -c | User provides the initial residual vector. | -c %--------------------------------------------% -c - initv = .true. - info = 0 - else - initv = .false. - end if - end if -c -c %---------------------------------------------% -c | Get a possibly random starting vector and | -c | force it into the range of the operator OP. | -c %---------------------------------------------% -c - 10 continue -c - if (getv0) then - call dgetv0 (ido, bmat, 1, initv, n, 1, v, ldv, resid, rnorm, - & ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (rnorm .eq. zero) then -c -c %-----------------------------------------% -c | The initial vector is zero. Error exit. | -c %-----------------------------------------% -c - info = -9 - go to 1100 - end if - getv0 = .false. - ido = 0 - end if -c -c %-----------------------------------% -c | Back from reverse communication : | -c | continue with update step | -c %-----------------------------------% -c - if (update) go to 20 -c -c %-------------------------------------------% -c | Back from computing user specified shifts | -c %-------------------------------------------% -c - if (ushift) go to 50 -c -c %-------------------------------------% -c | Back from computing residual norm | -c | at the end of the current iteration | -c %-------------------------------------% -c - if (cnorm) go to 100 -c -c %----------------------------------------------------------% -c | Compute the first NEV steps of the Arnoldi factorization | -c %----------------------------------------------------------% -c - call dnaitr (ido, bmat, n, 0, nev, mode, resid, rnorm, v, ldv, - & h, ldh, ipntr, workd, info) -c -c %---------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP and possibly B | -c %---------------------------------------------------% -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then - np = info - mxiter = iter - info = -9999 - go to 1200 - end if -c -c %--------------------------------------------------------------% -c | | -c | M A I N ARNOLDI I T E R A T I O N L O O P | -c | Each iteration implicitly restarts the Arnoldi | -c | factorization in place. | -c | | -c %--------------------------------------------------------------% -c - 1000 continue -c - iter = iter + 1 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, iter, ndigit, - & '_naup2: **** Start of major iteration number ****') - end if -c -c %-----------------------------------------------------------% -c | Compute NP additional steps of the Arnoldi factorization. | -c | Adjust NP since NEV might have been updated by last call | -c | to the shift application routine dnapps. | -c %-----------------------------------------------------------% -c - np = kplusp - nev -c - if (msglvl .gt. 1) then - call ivout (logfil, 1, nev, ndigit, - & '_naup2: The length of the current Arnoldi factorization') - call ivout (logfil, 1, np, ndigit, - & '_naup2: Extend the Arnoldi factorization by') - end if -c -c %-----------------------------------------------------------% -c | Compute NP additional steps of the Arnoldi factorization. | -c %-----------------------------------------------------------% -c - ido = 0 - 20 continue - update = .true. -c - call dnaitr (ido , bmat, n , nev, np , mode , resid, - & rnorm, v , ldv, h , ldh, ipntr, workd, - & info) -c -c %---------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP and possibly B | -c %---------------------------------------------------% -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then - np = info - mxiter = iter - info = -9999 - go to 1200 - end if - update = .false. -c - if (msglvl .gt. 1) then - call dvout (logfil, 1, rnorm, ndigit, - & '_naup2: Corresponding B-norm of the residual') - end if -c -c %--------------------------------------------------------% -c | Compute the eigenvalues and corresponding error bounds | -c | of the current upper Hessenberg matrix. | -c %--------------------------------------------------------% -c - call dneigh (rnorm, kplusp, h, ldh, ritzr, ritzi, bounds, - & q, ldq, workl, ierr) -c - if (ierr .ne. 0) then - info = -8 - go to 1200 - end if -c -c %----------------------------------------------------% -c | Make a copy of eigenvalues and corresponding error | -c | bounds obtained from dneigh. | -c %----------------------------------------------------% -c - call dcopy(kplusp, ritzr, 1, workl(kplusp**2+1), 1) - call dcopy(kplusp, ritzi, 1, workl(kplusp**2+kplusp+1), 1) - call dcopy(kplusp, bounds, 1, workl(kplusp**2+2*kplusp+1), 1) -c -c %---------------------------------------------------% -c | Select the wanted Ritz values and their bounds | -c | to be used in the convergence test. | -c | The wanted part of the spectrum and corresponding | -c | error bounds are in the last NEV loc. of RITZR, | -c | RITZI and BOUNDS respectively. The variables NEV | -c | and NP may be updated if the NEV-th wanted Ritz | -c | value has a non zero imaginary part. In this case | -c | NEV is increased by one and NP decreased by one. | -c | NOTE: The last two arguments of dngets are no | -c | longer used as of version 2.1. | -c %---------------------------------------------------% -c - nev = nev0 - np = np0 - numcnv = nev - call dngets (ishift, which, nev, np, ritzr, ritzi, - & bounds, workl, workl(np+1)) - if (nev .eq. nev0+1) numcnv = nev0+1 -c -c %-------------------% -c | Convergence test. | -c %-------------------% -c - call dcopy (nev, bounds(np+1), 1, workl(2*np+1), 1) - call dnconv (nev, ritzr(np+1), ritzi(np+1), workl(2*np+1), - & tol, nconv) -c - if (msglvl .gt. 2) then - kp(1) = nev - kp(2) = np - kp(3) = numcnv - kp(4) = nconv - call ivout (logfil, 4, kp, ndigit, - & '_naup2: NEV, NP, NUMCNV, NCONV are') - call dvout (logfil, kplusp, ritzr, ndigit, - & '_naup2: Real part of the eigenvalues of H') - call dvout (logfil, kplusp, ritzi, ndigit, - & '_naup2: Imaginary part of the eigenvalues of H') - call dvout (logfil, kplusp, bounds, ndigit, - & '_naup2: Ritz estimates of the current NCV Ritz values') - end if -c -c %---------------------------------------------------------% -c | Count the number of unwanted Ritz values that have zero | -c | Ritz estimates. If any Ritz estimates are equal to zero | -c | then a leading block of H of order equal to at least | -c | the number of Ritz values with zero Ritz estimates has | -c | split off. None of these Ritz values may be removed by | -c | shifting. Decrease NP the number of shifts to apply. If | -c | no shifts may be applied, then prepare to exit | -c %---------------------------------------------------------% -c - nptemp = np - do 30 j=1, nptemp - if (bounds(j) .eq. zero) then - np = np - 1 - nev = nev + 1 - end if - 30 continue -c - if ( (nconv .ge. numcnv) .or. - & (iter .gt. mxiter) .or. - & (np .eq. 0) ) then -c - if (msglvl .gt. 4) then - call dvout(logfil, kplusp, workl(kplusp**2+1), ndigit, - & '_naup2: Real part of the eig computed by _neigh:') - call dvout(logfil, kplusp, workl(kplusp**2+kplusp+1), - & ndigit, - & '_naup2: Imag part of the eig computed by _neigh:') - call dvout(logfil, kplusp, workl(kplusp**2+kplusp*2+1), - & ndigit, - & '_naup2: Ritz eistmates computed by _neigh:') - end if -c -c %------------------------------------------------% -c | Prepare to exit. Put the converged Ritz values | -c | and corresponding bounds in RITZ(1:NCONV) and | -c | BOUNDS(1:NCONV) respectively. Then sort. Be | -c | careful when NCONV > NP | -c %------------------------------------------------% -c -c %------------------------------------------% -c | Use h( 3,1 ) as storage to communicate | -c | rnorm to _neupd if needed | -c %------------------------------------------% - - h(3,1) = rnorm -c -c %----------------------------------------------% -c | To be consistent with dngets, we first do a | -c | pre-processing sort in order to keep complex | -c | conjugate pairs together. This is similar | -c | to the pre-processing sort used in dngets | -c | except that the sort is done in the opposite | -c | order. | -c %----------------------------------------------% -c - if (which .eq. 'LM') wprime = 'SR' - if (which .eq. 'SM') wprime = 'LR' - if (which .eq. 'LR') wprime = 'SM' - if (which .eq. 'SR') wprime = 'LM' - if (which .eq. 'LI') wprime = 'SM' - if (which .eq. 'SI') wprime = 'LM' -c - call dsortc (wprime, .true., kplusp, ritzr, ritzi, bounds) -c -c %----------------------------------------------% -c | Now sort Ritz values so that converged Ritz | -c | values appear within the first NEV locations | -c | of ritzr, ritzi and bounds, and the most | -c | desired one appears at the front. | -c %----------------------------------------------% -c - if (which .eq. 'LM') wprime = 'SM' - if (which .eq. 'SM') wprime = 'LM' - if (which .eq. 'LR') wprime = 'SR' - if (which .eq. 'SR') wprime = 'LR' - if (which .eq. 'LI') wprime = 'SI' - if (which .eq. 'SI') wprime = 'LI' -c - call dsortc(wprime, .true., kplusp, ritzr, ritzi, bounds) -c -c %--------------------------------------------------% -c | Scale the Ritz estimate of each Ritz value | -c | by 1 / max(eps23,magnitude of the Ritz value). | -c %--------------------------------------------------% -c - do 35 j = 1, numcnv - temp = max(eps23,dlapy2(ritzr(j), - & ritzi(j))) - bounds(j) = bounds(j)/temp - 35 continue -c -c %----------------------------------------------------% -c | Sort the Ritz values according to the scaled Ritz | -c | esitmates. This will push all the converged ones | -c | towards the front of ritzr, ritzi, bounds | -c | (in the case when NCONV < NEV.) | -c %----------------------------------------------------% -c - wprime = 'LR' - call dsortc(wprime, .true., numcnv, bounds, ritzr, ritzi) -c -c %----------------------------------------------% -c | Scale the Ritz estimate back to its original | -c | value. | -c %----------------------------------------------% -c - do 40 j = 1, numcnv - temp = max(eps23, dlapy2(ritzr(j), - & ritzi(j))) - bounds(j) = bounds(j)*temp - 40 continue -c -c %------------------------------------------------% -c | Sort the converged Ritz values again so that | -c | the "threshold" value appears at the front of | -c | ritzr, ritzi and bound. | -c %------------------------------------------------% -c - call dsortc(which, .true., nconv, ritzr, ritzi, bounds) -c - if (msglvl .gt. 1) then - call dvout (logfil, kplusp, ritzr, ndigit, - & '_naup2: Sorted real part of the eigenvalues') - call dvout (logfil, kplusp, ritzi, ndigit, - & '_naup2: Sorted imaginary part of the eigenvalues') - call dvout (logfil, kplusp, bounds, ndigit, - & '_naup2: Sorted ritz estimates.') - end if -c -c %------------------------------------% -c | Max iterations have been exceeded. | -c %------------------------------------% -c - if (iter .gt. mxiter .and. nconv .lt. numcnv) info = 1 -c -c %---------------------% -c | No shifts to apply. | -c %---------------------% -c - if (np .eq. 0 .and. nconv .lt. numcnv) info = 2 -c - np = nconv - go to 1100 -c - else if ( (nconv .lt. numcnv) .and. (ishift .eq. 1) ) then -c -c %-------------------------------------------------% -c | Do not have all the requested eigenvalues yet. | -c | To prevent possible stagnation, adjust the size | -c | of NEV. | -c %-------------------------------------------------% -c - nevbef = nev - nev = nev + min(nconv, np/2) - if (nev .eq. 1 .and. kplusp .ge. 6) then - nev = kplusp / 2 - else if (nev .eq. 1 .and. kplusp .gt. 3) then - nev = 2 - end if -c -c %---- Scipy fix ------------------------------------------------ -c | We must keep nev below this value, as otherwise we can get -c | np == 0 (note that dngets below can bump nev by 1). If np == 0, -c | the next call to `dnaitr` will write out-of-bounds. -c | - if (nev .gt. kplusp - 2) then - nev = kplusp - 2 - end if -c | -c %---- Scipy fix end -------------------------------------------- -c - np = kplusp - nev -c -c %---------------------------------------% -c | If the size of NEV was just increased | -c | resort the eigenvalues. | -c %---------------------------------------% -c - if (nevbef .lt. nev) - & call dngets (ishift, which, nev, np, ritzr, ritzi, - & bounds, workl, workl(np+1)) -c - end if -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, nconv, ndigit, - & '_naup2: no. of "converged" Ritz values at this iter.') - if (msglvl .gt. 1) then - kp(1) = nev - kp(2) = np - call ivout (logfil, 2, kp, ndigit, - & '_naup2: NEV and NP are') - call dvout (logfil, nev, ritzr(np+1), ndigit, - & '_naup2: "wanted" Ritz values -- real part') - call dvout (logfil, nev, ritzi(np+1), ndigit, - & '_naup2: "wanted" Ritz values -- imag part') - call dvout (logfil, nev, bounds(np+1), ndigit, - & '_naup2: Ritz estimates of the "wanted" values ') - end if - end if -c - if (ishift .eq. 0) then -c -c %-------------------------------------------------------% -c | User specified shifts: reverse comminucation to | -c | compute the shifts. They are returned in the first | -c | 2*NP locations of WORKL. | -c %-------------------------------------------------------% -c - ushift = .true. - ido = 3 - go to 9000 - end if -c - 50 continue -c -c %------------------------------------% -c | Back from reverse communication; | -c | User specified shifts are returned | -c | in WORKL(1:2*NP) | -c %------------------------------------% -c - ushift = .false. -c - if ( ishift .eq. 0 ) then -c -c %----------------------------------% -c | Move the NP shifts from WORKL to | -c | RITZR, RITZI to free up WORKL | -c | for non-exact shift case. | -c %----------------------------------% -c - call dcopy (np, workl, 1, ritzr, 1) - call dcopy (np, workl(np+1), 1, ritzi, 1) - end if -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, np, ndigit, - & '_naup2: The number of shifts to apply ') - call dvout (logfil, np, ritzr, ndigit, - & '_naup2: Real part of the shifts') - call dvout (logfil, np, ritzi, ndigit, - & '_naup2: Imaginary part of the shifts') - if ( ishift .eq. 1 ) - & call dvout (logfil, np, bounds, ndigit, - & '_naup2: Ritz estimates of the shifts') - end if -c -c %---------------------------------------------------------% -c | Apply the NP implicit shifts by QR bulge chasing. | -c | Each shift is applied to the whole upper Hessenberg | -c | matrix H. | -c | The first 2*N locations of WORKD are used as workspace. | -c %---------------------------------------------------------% -c - call dnapps (n, nev, np, ritzr, ritzi, v, ldv, - & h, ldh, resid, q, ldq, workl, workd) -c -c %---------------------------------------------% -c | Compute the B-norm of the updated residual. | -c | Keep B*RESID in WORKD(1:N) to be used in | -c | the first step of the next call to dnaitr. | -c %---------------------------------------------% -c - cnorm = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call dcopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*RESID | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd, 1) - end if -c - 100 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(1:N) := B*RESID | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - rnorm = ddot (n, resid, 1, workd, 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = dnrm2(n, resid, 1) - end if - cnorm = .false. -c - if (msglvl .gt. 2) then - call dvout (logfil, 1, rnorm, ndigit, - & '_naup2: B-norm of residual for compressed factorization') - call dmout (logfil, nev, nev, h, ldh, ndigit, - & '_naup2: Compressed upper Hessenberg matrix H') - end if -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 1100 continue -c - mxiter = iter - nev = numcnv -c - 1200 continue - ido = 99 -c -c %------------% -c | Error Exit | -c %------------% -c - call second (t1) - tnaup2 = t1 - t0 -c - 9000 continue -c -c %---------------% -c | End of dnaup2 | -c %---------------% -c - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaupd.f deleted file mode 100644 index 3b7cc3e027..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnaupd.f +++ /dev/null @@ -1,693 +0,0 @@ -c\BeginDoc -c -c\Name: dnaupd -c -c\Description: -c Reverse communication interface for the Implicitly Restarted Arnoldi -c iteration. This subroutine computes approximations to a few eigenpairs -c of a linear operator "OP" with respect to a semi-inner product defined by -c a symmetric positive semi-definite real matrix B. B may be the identity -c matrix. NOTE: If the linear operator "OP" is real and symmetric -c with respect to the real positive semi-definite symmetric matrix B, -c i.e. B*OP = (OP`)*B, then subroutine dsaupd should be used instead. -c -c The computed approximate eigenvalues are called Ritz values and -c the corresponding approximate eigenvectors are called Ritz vectors. -c -c dnaupd is usually called iteratively to solve one of the -c following problems: -c -c Mode 1: A*x = lambda*x. -c ===> OP = A and B = I. -c -c Mode 2: A*x = lambda*M*x, M symmetric positive definite -c ===> OP = inv[M]*A and B = M. -c ===> (If M can be factored see remark 3 below) -c -c Mode 3: A*x = lambda*M*x, M symmetric semi-definite -c ===> OP = Real_Part{ inv[A - sigma*M]*M } and B = M. -c ===> shift-and-invert mode (in real arithmetic) -c If OP*x = amu*x, then -c amu = 1/2 * [ 1/(lambda-sigma) + 1/(lambda-conjg(sigma)) ]. -c Note: If sigma is real, i.e. imaginary part of sigma is zero; -c Real_Part{ inv[A - sigma*M]*M } == inv[A - sigma*M]*M -c amu == 1/(lambda-sigma). -c -c Mode 4: A*x = lambda*M*x, M symmetric semi-definite -c ===> OP = Imaginary_Part{ inv[A - sigma*M]*M } and B = M. -c ===> shift-and-invert mode (in real arithmetic) -c If OP*x = amu*x, then -c amu = 1/2i * [ 1/(lambda-sigma) - 1/(lambda-conjg(sigma)) ]. -c -c Both mode 3 and 4 give the same enhancement to eigenvalues close to -c the (complex) shift sigma. However, as lambda goes to infinity, -c the operator OP in mode 4 dampens the eigenvalues more strongly than -c does OP defined in mode 3. -c -c NOTE: The action of w <- inv[A - sigma*M]*v or w <- inv[M]*v -c should be accomplished either by a direct method -c using a sparse matrix factorization and solving -c -c [A - sigma*M]*w = v or M*w = v, -c -c or through an iterative method for solving these -c systems. If an iterative method is used, the -c convergence test must be more stringent than -c the accuracy requirements for the eigenvalue -c approximations. -c -c\Usage: -c call dnaupd -c ( IDO, BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, -c IPNTR, WORKD, WORKL, LWORKL, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to dnaupd. IDO will be set internally to -c indicate the type of operation to be performed. Control is -c then given back to the calling routine which has the -c responsibility to carry out the requested operation and call -c dnaupd with the result. The operand is given in -c WORKD(IPNTR(1)), the result must be put in WORKD(IPNTR(2)). -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c In mode 3 and 4, the vector B * X is already -c available in WORKD(ipntr(3)). It does not -c need to be recomputed in forming OP * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 3: compute the IPARAM(8) real and imaginary parts -c of the shifts where INPTR(14) is the pointer -c into WORKL for placing the shifts. See Remark -c 5 below. -c IDO = 99: done -c ------------------------------------------------------------- -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. -c BMAT = 'I' -> standard eigenvalue problem A*x = lambda*x -c BMAT = 'G' -> generalized eigenvalue problem A*x = lambda*B*x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c WHICH Character*2. (INPUT) -c 'LM' -> want the NEV eigenvalues of largest magnitude. -c 'SM' -> want the NEV eigenvalues of smallest magnitude. -c 'LR' -> want the NEV eigenvalues of largest real part. -c 'SR' -> want the NEV eigenvalues of smallest real part. -c 'LI' -> want the NEV eigenvalues of largest imaginary part. -c 'SI' -> want the NEV eigenvalues of smallest imaginary part. -c -c NEV Integer. (INPUT/OUTPUT) -c Number of eigenvalues of OP to be computed. 0 < NEV < N-1. -c -c TOL Double precision scalar. (INPUT) -c Stopping criterion: the relative accuracy of the Ritz value -c is considered acceptable if BOUNDS(I) .LE. TOL*ABS(RITZ(I)) -c where ABS(RITZ(I)) is the magnitude when RITZ(I) is complex. -c DEFAULT = DLAMCH('EPS') (machine precision as computed -c by the LAPACK auxiliary subroutine DLAMCH). -c -c RESID Double precision array of length N. (INPUT/OUTPUT) -c On INPUT: -c If INFO .EQ. 0, a random initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c On OUTPUT: -c RESID contains the final residual vector. -c -c NCV Integer. (INPUT) -c Number of columns of the matrix V. NCV must satisfy the two -c inequalities 2 <= NCV-NEV and NCV <= N. -c This will indicate how many Arnoldi vectors are generated -c at each iteration. After the startup phase in which NEV -c Arnoldi vectors are generated, the algorithm generates -c approximately NCV-NEV Arnoldi vectors at each subsequent update -c iteration. Most of the cost in generating each Arnoldi vector is -c in the matrix-vector operation OP*x. -c NOTE: 2 <= NCV-NEV in order that complex conjugate pairs of Ritz -c values are kept together. (See remark 4 below) -c -c V Double precision array N by NCV. (OUTPUT) -c Contains the final set of Arnoldi basis vectors. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling program. -c -c IPARAM Integer array of length 11. (INPUT/OUTPUT) -c IPARAM(1) = ISHIFT: method for selecting the implicit shifts. -c The shifts selected at each iteration are used to restart -c the Arnoldi iteration in an implicit fashion. -c ------------------------------------------------------------- -c ISHIFT = 0: the shifts are provided by the user via -c reverse communication. The real and imaginary -c parts of the NCV eigenvalues of the Hessenberg -c matrix H are returned in the part of the WORKL -c array corresponding to RITZR and RITZI. See remark -c 5 below. -c ISHIFT = 1: exact shifts with respect to the current -c Hessenberg matrix H. This is equivalent to -c restarting the iteration with a starting vector -c that is a linear combination of approximate Schur -c vectors associated with the "wanted" Ritz values. -c ------------------------------------------------------------- -c -c IPARAM(2) = No longer referenced. -c -c IPARAM(3) = MXITER -c On INPUT: maximum number of Arnoldi update iterations allowed. -c On OUTPUT: actual number of Arnoldi update iterations taken. -c -c IPARAM(4) = NB: blocksize to be used in the recurrence. -c The code currently works only for NB = 1. -c -c IPARAM(5) = NCONV: number of "converged" Ritz values. -c This represents the number of Ritz values that satisfy -c the convergence criterion. -c -c IPARAM(6) = IUPD -c No longer referenced. Implicit restarting is ALWAYS used. -c -c IPARAM(7) = MODE -c On INPUT determines what type of eigenproblem is being solved. -c Must be 1,2,3,4; See under \Description of dnaupd for the -c four modes available. -c -c IPARAM(8) = NP -c When ido = 3 and the user provides shifts through reverse -c communication (IPARAM(1)=0), dnaupd returns NP, the number -c of shifts the user is to provide. 0 < NP <=NCV-NEV. See Remark -c 5 below. -c -c IPARAM(9) = NUMOP, IPARAM(10) = NUMOPB, IPARAM(11) = NUMREO, -c OUTPUT: NUMOP = total number of OP*x operations, -c NUMOPB = total number of B*x operations if BMAT='G', -c NUMREO = total number of steps of re-orthogonalization. -c -c IPNTR Integer array of length 14. (OUTPUT) -c Pointer to mark the starting locations in the WORKD and WORKL -c arrays for matrices/vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X in WORKD. -c IPNTR(2): pointer to the current result vector Y in WORKD. -c IPNTR(3): pointer to the vector B * X in WORKD when used in -c the shift-and-invert mode. -c IPNTR(4): pointer to the next available location in WORKL -c that is untouched by the program. -c IPNTR(5): pointer to the NCV by NCV upper Hessenberg matrix -c H in WORKL. -c IPNTR(6): pointer to the real part of the ritz value array -c RITZR in WORKL. -c IPNTR(7): pointer to the imaginary part of the ritz value array -c RITZI in WORKL. -c IPNTR(8): pointer to the Ritz estimates in array WORKL associated -c with the Ritz values located in RITZR and RITZI in WORKL. -c -c IPNTR(14): pointer to the NP shifts in WORKL. See Remark 5 below. -c -c Note: IPNTR(9:13) is only referenced by dneupd. See Remark 2 below. -c -c IPNTR(9): pointer to the real part of the NCV RITZ values of the -c original system. -c IPNTR(10): pointer to the imaginary part of the NCV RITZ values of -c the original system. -c IPNTR(11): pointer to the NCV corresponding error bounds. -c IPNTR(12): pointer to the NCV by NCV upper quasi-triangular -c Schur matrix for H. -c IPNTR(13): pointer to the NCV by NCV matrix of eigenvectors -c of the upper Hessenberg matrix H. Only referenced by -c dneupd if RVEC = .TRUE. See Remark 2 below. -c ------------------------------------------------------------- -c -c WORKD Double precision work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration. Upon termination -c WORKD(1:N) contains B*RESID(1:N). If an invariant subspace -c associated with the converged Ritz values is desired, see remark -c 2 below, subroutine dneupd uses this output. -c See Data Distribution Note below. -c -c WORKL Double precision work array of length LWORKL. (OUTPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. See Data Distribution Note below. -c -c LWORKL Integer. (INPUT) -c LWORKL must be at least 3*NCV**2 + 6*NCV. -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal exit. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. IPARAM(5) -c returns the number of wanted converged Ritz values. -c = 2: No longer an informational error. Deprecated starting -c with release 2 of ARPACK. -c = 3: No shifts could be applied during a cycle of the -c Implicitly restarted Arnoldi iteration. One possibility -c is to increase the size of NCV relative to NEV. -c See remark 4 below. -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV-NEV >= 2 and less than or equal to N. -c = -4: The maximum number of Arnoldi update iteration -c must be greater than zero. -c = -5: WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI' -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work array is not sufficient. -c = -8: Error return from LAPACK eigenvalue calculation; -c = -9: Starting vector is zero. -c = -10: IPARAM(7) must be 1,2,3,4. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatable. -c = -12: IPARAM(1) must be equal to 0 or 1. -c = -9999: Could not build an Arnoldi factorization. -c IPARAM(5) returns the size of the current Arnoldi -c factorization. -c -c\Remarks -c 1. The computed Ritz values are approximate eigenvalues of OP. The -c selection of WHICH should be made with this in mind when -c Mode = 3 and 4. After convergence, approximate eigenvalues of the -c original problem may be obtained with the ARPACK subroutine dneupd. -c -c 2. If a basis for the invariant subspace corresponding to the converged Ritz -c values is needed, the user must call dneupd immediately following -c completion of dnaupd. This is new starting with release 2 of ARPACK. -c -c 3. If M can be factored into a Cholesky factorization M = LL` -c then Mode = 2 should not be selected. Instead one should use -c Mode = 1 with OP = inv(L)*A*inv(L`). Appropriate triangular -c linear systems should be solved with L and L` rather -c than computing inverses. After convergence, an approximate -c eigenvector z of the original problem is recovered by solving -c L`z = x where x is a Ritz vector of OP. -c -c 4. At present there is no a-priori analysis to guide the selection -c of NCV relative to NEV. The only formal requrement is that NCV > NEV + 2. -c However, it is recommended that NCV .ge. 2*NEV+1. If many problems of -c the same type are to be solved, one should experiment with increasing -c NCV while keeping NEV fixed for a given test problem. This will -c usually decrease the required number of OP*x operations but it -c also increases the work and storage required to maintain the orthogonal -c basis vectors. The optimal "cross-over" with respect to CPU time -c is problem dependent and must be determined empirically. -c See Chapter 8 of Reference 2 for further information. -c -c 5. When IPARAM(1) = 0, and IDO = 3, the user needs to provide the -c NP = IPARAM(8) real and imaginary parts of the shifts in locations -c real part imaginary part -c ----------------------- -------------- -c 1 WORKL(IPNTR(14)) WORKL(IPNTR(14)+NP) -c 2 WORKL(IPNTR(14)+1) WORKL(IPNTR(14)+NP+1) -c . . -c . . -c . . -c NP WORKL(IPNTR(14)+NP-1) WORKL(IPNTR(14)+2*NP-1). -c -c Only complex conjugate pairs of shifts may be applied and the pairs -c must be placed in consecutive locations. The real part of the -c eigenvalues of the current upper Hessenberg matrix are located in -c WORKL(IPNTR(6)) through WORKL(IPNTR(6)+NCV-1) and the imaginary part -c in WORKL(IPNTR(7)) through WORKL(IPNTR(7)+NCV-1). They are ordered -c according to the order defined by WHICH. The complex conjugate -c pairs are kept together and the associated Ritz estimates are located in -c WORKL(IPNTR(8)), WORKL(IPNTR(8)+1), ... , WORKL(IPNTR(8)+NCV-1). -c -c----------------------------------------------------------------------- -c -c\Data Distribution Note: -c -c Fortran-D syntax: -c ================ -c Double precision resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) -c decompose d1(n), d2(n,ncv) -c align resid(i) with d1(i) -c align v(i,j) with d2(i,j) -c align workd(i) with d1(i) range (1:n) -c align workd(i) with d1(i-n) range (n+1:2*n) -c align workd(i) with d1(i-2*n) range (2*n+1:3*n) -c distribute d1(block), d2(block,:) -c replicated workl(lworkl) -c -c Cray MPP syntax: -c =============== -c Double precision resid(n), v(ldv,ncv), workd(n,3), workl(lworkl) -c shared resid(block), v(block,:), workd(block,:) -c replicated workl(lworkl) -c -c CM2/CM5 syntax: -c ============== -c -c----------------------------------------------------------------------- -c -c include 'ex-nonsym.doc' -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett & Y. Saad, "Complex Shift and Invert Strategies for -c Real Matrices", Linear Algebra and its Applications, vol 88/89, -c pp 575-595, (1987). -c -c\Routines called: -c dnaup2 ARPACK routine that implements the Implicitly Restarted -c Arnoldi Iteration. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c dvout ARPACK utility routine that prints vectors. -c dlamch LAPACK routine that determines machine constants. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/16/93: Version '1.1' -c -c\SCCS Information: @(#) -c FILE: naupd.F SID: 2.10 DATE OF SID: 08/23/02 RELEASE: 2 -c -c\Remarks -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dnaupd - & ( ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, - & ipntr, workd, workl, lworkl, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ldv, lworkl, n, ncv, nev - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(14) - Double precision - & resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer bounds, ierr, ih, iq, ishift, iupd, iw, - & ldh, ldq, levec, mode, msglvl, mxiter, nb, - & nev0, next, np, ritzi, ritzr, j - save bounds, ih, iq, ishift, iupd, iw, ldh, ldq, - & levec, mode, msglvl, mxiter, nb, nev0, next, - & np, ritzi, ritzr -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dnaup2, dvout, ivout, second, dstatn -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlamch - external dlamch -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call dstatn - call second (t0) - msglvl = mnaupd -c -c %----------------% -c | Error checking | -c %----------------% -c - ierr = 0 - ishift = iparam(1) -c levec = iparam(2) - mxiter = iparam(3) -c nb = iparam(4) - nb = 1 -c -c %--------------------------------------------% -c | Revision 2 performs only implicit restart. | -c %--------------------------------------------% -c - iupd = 1 - mode = iparam(7) -c - if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev+1 .or. ncv .gt. n) then - ierr = -3 - else if (mxiter .le. 0) then - ierr = 4 - else if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LR' .and. - & which .ne. 'SR' .and. - & which .ne. 'LI' .and. - & which .ne. 'SI') then - ierr = -5 - else if (bmat .ne. 'I' .and. bmat .ne. 'G') then - ierr = -6 - else if (lworkl .lt. 3*ncv**2 + 6*ncv) then - ierr = -7 - else if (mode .lt. 1 .or. mode .gt. 4) then - ierr = -10 - else if (mode .eq. 1 .and. bmat .eq. 'G') then - ierr = -11 - else if (ishift .lt. 0 .or. ishift .gt. 1) then - ierr = -12 - end if -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - ido = 99 - go to 9000 - end if -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - if (nb .le. 0) nb = 1 - if (tol .le. zero) tol = dlamch('EpsMach') -c -c %----------------------------------------------% -c | NP is the number of additional steps to | -c | extend the length NEV Lanczos factorization. | -c | NEV0 is the local variable designating the | -c | size of the invariant subspace desired. | -c %----------------------------------------------% -c - np = ncv - nev - nev0 = nev -c -c %-----------------------------% -c | Zero out internal workspace | -c %-----------------------------% -c - do 10 j = 1, 3*ncv**2 + 6*ncv - workl(j) = zero - 10 continue -c -c %-------------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:ncv*ncv) := generated Hessenberg matrix | -c | workl(ncv*ncv+1:ncv*ncv+2*ncv) := real and imaginary | -c | parts of ritz values | -c | workl(ncv*ncv+2*ncv+1:ncv*ncv+3*ncv) := error bounds | -c | workl(ncv*ncv+3*ncv+1:2*ncv*ncv+3*ncv) := rotation matrix Q | -c | workl(2*ncv*ncv+3*ncv+1:3*ncv*ncv+6*ncv) := workspace | -c | The final workspace is needed by subroutine dneigh called | -c | by dnaup2. Subroutine dneigh calls LAPACK routines for | -c | calculating eigenvalues and the last row of the eigenvector | -c | matrix. | -c %-------------------------------------------------------------% -c - ldh = ncv - ldq = ncv - ih = 1 - ritzr = ih + ldh*ncv - ritzi = ritzr + ncv - bounds = ritzi + ncv - iq = bounds + ncv - iw = iq + ldq*ncv - next = iw + ncv**2 + 3*ncv -c - ipntr(4) = next - ipntr(5) = ih - ipntr(6) = ritzr - ipntr(7) = ritzi - ipntr(8) = bounds - ipntr(14) = iw -c - end if -c -c %-------------------------------------------------------% -c | Carry out the Implicitly restarted Arnoldi Iteration. | -c %-------------------------------------------------------% -c - call dnaup2 - & ( ido, bmat, n, which, nev0, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, workl(ih), ldh, workl(ritzr), - & workl(ritzi), workl(bounds), workl(iq), ldq, workl(iw), - & ipntr, workd, info ) -c -c %--------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP or shifts. | -c %--------------------------------------------------% -c - if (ido .eq. 3) iparam(8) = np - if (ido .ne. 99) go to 9000 -c - iparam(3) = mxiter - iparam(5) = np - iparam(9) = nopx - iparam(10) = nbx - iparam(11) = nrorth -c -c %------------------------------------% -c | Exit if there was an informational | -c | error within dnaup2. | -c %------------------------------------% -c - if (info .lt. 0) go to 9000 - if (info .eq. 2) info = 3 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, mxiter, ndigit, - & '_naupd: Number of update iterations taken') - call ivout (logfil, 1, np, ndigit, - & '_naupd: Number of wanted "converged" Ritz values') - call dvout (logfil, np, workl(ritzr), ndigit, - & '_naupd: Real part of the final Ritz values') - call dvout (logfil, np, workl(ritzi), ndigit, - & '_naupd: Imaginary part of the final Ritz values') - call dvout (logfil, np, workl(bounds), ndigit, - & '_naupd: Associated Ritz estimates') - end if -c - call second (t1) - tnaupd = t1 - t0 -c - if (msglvl .gt. 0) then -c -c %--------------------------------------------------------% -c | Version Number & Version Date are defined in version.h | -c %--------------------------------------------------------% -c - write (6,1000) - write (6,1100) mxiter, nopx, nbx, nrorth, nitref, nrstrt, - & tmvopx, tmvbx, tnaupd, tnaup2, tnaitr, titref, - & tgetv0, tneigh, tngets, tnapps, tnconv, trvec - 1000 format (//, - & 5x, '=============================================',/ - & 5x, '= Nonsymmetric implicit Arnoldi update code =',/ - & 5x, '= Version Number: ', ' 2.4', 21x, ' =',/ - & 5x, '= Version Date: ', ' 07/31/96', 16x, ' =',/ - & 5x, '=============================================',/ - & 5x, '= Summary of timing statistics =',/ - & 5x, '=============================================',//) - 1100 format ( - & 5x, 'Total number update iterations = ', i5,/ - & 5x, 'Total number of OP*x operations = ', i5,/ - & 5x, 'Total number of B*x operations = ', i5,/ - & 5x, 'Total number of reorthogonalization steps = ', i5,/ - & 5x, 'Total number of iterative refinement steps = ', i5,/ - & 5x, 'Total number of restart steps = ', i5,/ - & 5x, 'Total time in user OP*x operation = ', f12.6,/ - & 5x, 'Total time in user B*x operation = ', f12.6,/ - & 5x, 'Total time in Arnoldi update routine = ', f12.6,/ - & 5x, 'Total time in naup2 routine = ', f12.6,/ - & 5x, 'Total time in basic Arnoldi iteration loop = ', f12.6,/ - & 5x, 'Total time in reorthogonalization phase = ', f12.6,/ - & 5x, 'Total time in (re)start vector generation = ', f12.6,/ - & 5x, 'Total time in Hessenberg eig. subproblem = ', f12.6,/ - & 5x, 'Total time in getting the shifts = ', f12.6,/ - & 5x, 'Total time in applying the shifts = ', f12.6,/ - & 5x, 'Total time in convergence testing = ', f12.6,/ - & 5x, 'Total time in computing final Ritz vectors = ', f12.6/) - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of dnaupd | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnconv.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnconv.f deleted file mode 100644 index 015ccffd84..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dnconv.f +++ /dev/null @@ -1,146 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dnconv -c -c\Description: -c Convergence testing for the nonsymmetric Arnoldi eigenvalue routine. -c -c\Usage: -c call dnconv -c ( N, RITZR, RITZI, BOUNDS, TOL, NCONV ) -c -c\Arguments -c N Integer. (INPUT) -c Number of Ritz values to check for convergence. -c -c RITZR, Double precision arrays of length N. (INPUT) -c RITZI Real and imaginary parts of the Ritz values to be checked -c for convergence. - -c BOUNDS Double precision array of length N. (INPUT) -c Ritz estimates for the Ritz values in RITZR and RITZI. -c -c TOL Double precision scalar. (INPUT) -c Desired backward error for a Ritz value to be considered -c "converged". -c -c NCONV Integer scalar. (OUTPUT) -c Number of "converged" Ritz values. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c second ARPACK utility routine for timing. -c dlamch LAPACK routine that determines machine constants. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: nconv.F SID: 2.3 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c 1. xxxx -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dnconv (n, ritzr, ritzi, bounds, tol, nconv) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer n, nconv - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% - - Double precision - & ritzr(n), ritzi(n), bounds(n) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i - Double precision - & temp, eps23 -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlapy2, dlamch - external dlapy2, dlamch - -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------------------------------------% -c | Convergence test: unlike in the symmetric code, I am not | -c | using things like refined error bounds and gap condition | -c | because I don't know the exact equivalent concept. | -c | | -c | Instead the i-th Ritz value is considered "converged" when: | -c | | -c | bounds(i) .le. ( TOL * | ritz | ) | -c | | -c | for some appropriate choice of norm. | -c %-------------------------------------------------------------% -c - call second (t0) -c -c %---------------------------------% -c | Get machine dependent constant. | -c %---------------------------------% -c - eps23 = dlamch('Epsilon-Machine') - eps23 = eps23**(2.0D+0 / 3.0D+0) -c - nconv = 0 - do 20 i = 1, n - temp = max( eps23, dlapy2( ritzr(i), ritzi(i) ) ) - if (bounds(i) .le. tol*temp) nconv = nconv + 1 - 20 continue -c - call second (t1) - tnconv = tnconv + (t1 - t0) -c - return -c -c %---------------% -c | End of dnconv | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dneigh.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dneigh.f deleted file mode 100644 index 5a83a21bb3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dneigh.f +++ /dev/null @@ -1,314 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dneigh -c -c\Description: -c Compute the eigenvalues of the current upper Hessenberg matrix -c and the corresponding Ritz estimates given the current residual norm. -c -c\Usage: -c call dneigh -c ( RNORM, N, H, LDH, RITZR, RITZI, BOUNDS, Q, LDQ, WORKL, IERR ) -c -c\Arguments -c RNORM Double precision scalar. (INPUT) -c Residual norm corresponding to the current upper Hessenberg -c matrix H. -c -c N Integer. (INPUT) -c Size of the matrix H. -c -c H Double precision N by N array. (INPUT) -c H contains the current upper Hessenberg matrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZR, Double precision arrays of length N. (OUTPUT) -c RITZI On output, RITZR(1:N) (resp. RITZI(1:N)) contains the real -c (respectively imaginary) parts of the eigenvalues of H. -c -c BOUNDS Double precision array of length N. (OUTPUT) -c On output, BOUNDS contains the Ritz estimates associated with -c the eigenvalues RITZR and RITZI. This is equal to RNORM -c times the last components of the eigenvectors corresponding -c to the eigenvalues in RITZR and RITZI. -c -c Q Double precision N by N array. (WORKSPACE) -c Workspace needed to store the eigenvectors of H. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Double precision work array of length N**2 + 3*N. (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. This is needed to keep the full Schur form -c of H and also in the calculation of the eigenvectors of H. -c -c IERR Integer. (OUTPUT) -c Error exit flag from dlaqrb or dtrevc. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c dlaqrb ARPACK routine to compute the real Schur form of an -c upper Hessenberg matrix and last row of the Schur vectors. -c second ARPACK utility routine for timing. -c dmout ARPACK utility routine that prints matrices -c dvout ARPACK utility routine that prints vectors. -c dlacpy LAPACK matrix copy routine. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c dtrevc LAPACK routine to compute the eigenvectors of a matrix -c in upper quasi-triangular form -c dgemv Level 2 BLAS routine for matrix vector multiplication. -c dcopy Level 1 BLAS that copies one vector to another . -c dnrm2 Level 1 BLAS that computes the norm of a vector. -c dscal Level 1 BLAS that scales a vector. -c -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: neigh.F SID: 2.3 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dneigh (rnorm, n, h, ldh, ritzr, ritzi, bounds, - & q, ldq, workl, ierr) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer ierr, n, ldh, ldq - Double precision - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & bounds(n), h(ldh,n), q(ldq,n), ritzi(n), ritzr(n), - & workl(n*(n+3)) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - logical select(1) - integer i, iconj, msglvl - Double precision - & temp, vl(1) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dcopy, dlacpy, dlaqrb, dtrevc, dvout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlapy2, dnrm2 - external dlapy2, dnrm2 -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mneigh -c - if (msglvl .gt. 2) then - call dmout (logfil, n, n, h, ldh, ndigit, - & '_neigh: Entering upper Hessenberg matrix H ') - end if -c -c %-----------------------------------------------------------% -c | 1. Compute the eigenvalues, the last components of the | -c | corresponding Schur vectors and the full Schur form T | -c | of the current upper Hessenberg matrix H. | -c | dlaqrb returns the full Schur form of H in WORKL(1:N**2) | -c | and the last components of the Schur vectors in BOUNDS. | -c %-----------------------------------------------------------% -c - call dlacpy ('All', n, n, h, ldh, workl, n) - call dlaqrb (.true., n, 1, n, workl, n, ritzr, ritzi, bounds, - & ierr) - if (ierr .ne. 0) go to 9000 -c - if (msglvl .gt. 1) then - call dvout (logfil, n, bounds, ndigit, - & '_neigh: last row of the Schur matrix for H') - end if -c -c %-----------------------------------------------------------% -c | 2. Compute the eigenvectors of the full Schur form T and | -c | apply the last components of the Schur vectors to get | -c | the last components of the corresponding eigenvectors. | -c | Remember that if the i-th and (i+1)-st eigenvalues are | -c | complex conjugate pairs, then the real & imaginary part | -c | of the eigenvector components are split across adjacent | -c | columns of Q. | -c %-----------------------------------------------------------% -c - call dtrevc ('R', 'A', select, n, workl, n, vl, n, q, ldq, - & n, n, workl(n*n+1), ierr) -c - if (ierr .ne. 0) go to 9000 -c -c %------------------------------------------------% -c | Scale the returning eigenvectors so that their | -c | euclidean norms are all one. LAPACK subroutine | -c | dtrevc returns each eigenvector normalized so | -c | that the element of largest magnitude has | -c | magnitude 1; here the magnitude of a complex | -c | number (x,y) is taken to be |x| + |y|. | -c %------------------------------------------------% -c - iconj = 0 - do 10 i=1, n - if ( abs( ritzi(i) ) .le. zero ) then -c -c %----------------------% -c | Real eigenvalue case | -c %----------------------% -c - temp = dnrm2( n, q(1,i), 1 ) - call dscal ( n, one / temp, q(1,i), 1 ) - else -c -c %-------------------------------------------% -c | Complex conjugate pair case. Note that | -c | since the real and imaginary part of | -c | the eigenvector are stored in consecutive | -c | columns, we further normalize by the | -c | square root of two. | -c %-------------------------------------------% -c - if (iconj .eq. 0) then - temp = dlapy2( dnrm2( n, q(1,i), 1 ), - & dnrm2( n, q(1,i+1), 1 ) ) - call dscal ( n, one / temp, q(1,i), 1 ) - call dscal ( n, one / temp, q(1,i+1), 1 ) - iconj = 1 - else - iconj = 0 - end if - end if - 10 continue -c - call dgemv ('T', n, n, one, q, ldq, bounds, 1, zero, workl, 1) -c - if (msglvl .gt. 1) then - call dvout (logfil, n, workl, ndigit, - & '_neigh: Last row of the eigenvector matrix for H') - end if -c -c %----------------------------% -c | Compute the Ritz estimates | -c %----------------------------% -c - iconj = 0 - do 20 i = 1, n - if ( abs( ritzi(i) ) .le. zero ) then -c -c %----------------------% -c | Real eigenvalue case | -c %----------------------% -c - bounds(i) = rnorm * abs( workl(i) ) - else -c -c %-------------------------------------------% -c | Complex conjugate pair case. Note that | -c | since the real and imaginary part of | -c | the eigenvector are stored in consecutive | -c | columns, we need to take the magnitude | -c | of the last components of the two vectors | -c %-------------------------------------------% -c - if (iconj .eq. 0) then - bounds(i) = rnorm * dlapy2( workl(i), workl(i+1) ) - bounds(i+1) = bounds(i) - iconj = 1 - else - iconj = 0 - end if - end if - 20 continue -c - if (msglvl .gt. 2) then - call dvout (logfil, n, ritzr, ndigit, - & '_neigh: Real part of the eigenvalues of H') - call dvout (logfil, n, ritzi, ndigit, - & '_neigh: Imaginary part of the eigenvalues of H') - call dvout (logfil, n, bounds, ndigit, - & '_neigh: Ritz estimates for the eigenvalues of H') - end if -c - call second (t1) - tneigh = tneigh + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of dneigh | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dneupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dneupd.f deleted file mode 100644 index bfbac00b9a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dneupd.f +++ /dev/null @@ -1,1068 +0,0 @@ -c\BeginDoc -c -c\Name: dneupd -c -c\Description: -c -c This subroutine returns the converged approximations to eigenvalues -c of A*z = lambda*B*z and (optionally): -c -c (1) The corresponding approximate eigenvectors; -c -c (2) An orthonormal basis for the associated approximate -c invariant subspace; -c -c (3) Both. -c -c There is negligible additional cost to obtain eigenvectors. An orthonormal -c basis is always computed. There is an additional storage cost of n*nev -c if both are requested (in this case a separate array Z must be supplied). -c -c The approximate eigenvalues and eigenvectors of A*z = lambda*B*z -c are derived from approximate eigenvalues and eigenvectors of -c of the linear operator OP prescribed by the MODE selection in the -c call to DNAUPD . DNAUPD must be called before this routine is called. -c These approximate eigenvalues and vectors are commonly called Ritz -c values and Ritz vectors respectively. They are referred to as such -c in the comments that follow. The computed orthonormal basis for the -c invariant subspace corresponding to these Ritz values is referred to as a -c Schur basis. -c -c See documentation in the header of the subroutine DNAUPD for -c definition of OP as well as other terms and the relation of computed -c Ritz values and Ritz vectors of OP with respect to the given problem -c A*z = lambda*B*z. For a brief description, see definitions of -c IPARAM(7), MODE and WHICH in the documentation of DNAUPD . -c -c\Usage: -c call dneupd -c ( RVEC, HOWMNY, SELECT, DR, DI, Z, LDZ, SIGMAR, SIGMAI, WORKEV, BMAT, -c N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, WORKD, WORKL, -c LWORKL, INFO ) -c -c\Arguments: -c RVEC LOGICAL (INPUT) -c Specifies whether a basis for the invariant subspace corresponding -c to the converged Ritz value approximations for the eigenproblem -c A*z = lambda*B*z is computed. -c -c RVEC = .FALSE. Compute Ritz values only. -c -c RVEC = .TRUE. Compute the Ritz vectors or Schur vectors. -c See Remarks below. -c -c HOWMNY Character*1 (INPUT) -c Specifies the form of the basis for the invariant subspace -c corresponding to the converged Ritz values that is to be computed. -c -c = 'A': Compute NEV Ritz vectors; -c = 'P': Compute NEV Schur vectors; -c = 'S': compute some of the Ritz vectors, specified -c by the logical array SELECT. -c -c SELECT Logical array of dimension NCV. (INPUT) -c If HOWMNY = 'S', SELECT specifies the Ritz vectors to be -c computed. To select the Ritz vector corresponding to a -c Ritz value (DR(j), DI(j)), SELECT(j) must be set to .TRUE.. -c If HOWMNY = 'A' or 'P', SELECT is used as internal workspace. -c -c DR Double precision array of dimension NEV+1. (OUTPUT) -c If IPARAM(7) = 1,2 or 3 and SIGMAI=0.0 then on exit: DR contains -c the real part of the Ritz approximations to the eigenvalues of -c A*z = lambda*B*z. -c If IPARAM(7) = 3, 4 and SIGMAI is not equal to zero, then on exit: -c DR contains the real part of the Ritz values of OP computed by -c DNAUPD . A further computation must be performed by the user -c to transform the Ritz values computed for OP by DNAUPD to those -c of the original system A*z = lambda*B*z. See remark 3 below. -c -c DI Double precision array of dimension NEV+1. (OUTPUT) -c On exit, DI contains the imaginary part of the Ritz value -c approximations to the eigenvalues of A*z = lambda*B*z associated -c with DR. -c -c NOTE: When Ritz values are complex, they will come in complex -c conjugate pairs. If eigenvectors are requested, the -c corresponding Ritz vectors will also come in conjugate -c pairs and the real and imaginary parts of these are -c represented in two consecutive columns of the array Z -c (see below). -c -c Z Double precision N by NEV+1 array if RVEC = .TRUE. and HOWMNY = 'A'. (OUTPUT) -c On exit, if RVEC = .TRUE. and HOWMNY = 'A', then the columns of -c Z represent approximate eigenvectors (Ritz vectors) corresponding -c to the NCONV=IPARAM(5) Ritz values for eigensystem -c A*z = lambda*B*z. -c -c The complex Ritz vector associated with the Ritz value -c with positive imaginary part is stored in two consecutive -c columns. The first column holds the real part of the Ritz -c vector and the second column holds the imaginary part. The -c Ritz vector associated with the Ritz value with negative -c imaginary part is simply the complex conjugate of the Ritz vector -c associated with the positive imaginary part. -c -c If RVEC = .FALSE. or HOWMNY = 'P', then Z is not referenced. -c -c NOTE: If if RVEC = .TRUE. and a Schur basis is not required, -c the array Z may be set equal to first NEV+1 columns of the Arnoldi -c basis array V computed by DNAUPD . In this case the Arnoldi basis -c will be destroyed and overwritten with the eigenvector basis. -c -c LDZ Integer. (INPUT) -c The leading dimension of the array Z. If Ritz vectors are -c desired, then LDZ >= max( 1, N ). In any case, LDZ >= 1. -c -c SIGMAR Double precision (INPUT) -c If IPARAM(7) = 3 or 4, represents the real part of the shift. -c Not referenced if IPARAM(7) = 1 or 2. -c -c SIGMAI Double precision (INPUT) -c If IPARAM(7) = 3 or 4, represents the imaginary part of the shift. -c Not referenced if IPARAM(7) = 1 or 2. See remark 3 below. -c -c WORKEV Double precision work array of dimension 3*NCV. (WORKSPACE) -c -c **** The remaining arguments MUST be the same as for the **** -c **** call to DNAUPD that was just completed. **** -c -c NOTE: The remaining arguments -c -c BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, -c WORKD, WORKL, LWORKL, INFO -c -c must be passed directly to DNEUPD following the last call -c to DNAUPD . These arguments MUST NOT BE MODIFIED between -c the the last call to DNAUPD and the call to DNEUPD . -c -c Three of these parameters (V, WORKL, INFO) are also output parameters: -c -c V Double precision N by NCV array. (INPUT/OUTPUT) -c -c Upon INPUT: the NCV columns of V contain the Arnoldi basis -c vectors for OP as constructed by DNAUPD . -c -c Upon OUTPUT: If RVEC = .TRUE. the first NCONV=IPARAM(5) columns -c contain approximate Schur vectors that span the -c desired invariant subspace. See Remark 2 below. -c -c NOTE: If the array Z has been set equal to first NEV+1 columns -c of the array V and RVEC=.TRUE. and HOWMNY= 'A', then the -c Arnoldi basis held by V has been overwritten by the desired -c Ritz vectors. If a separate array Z has been passed then -c the first NCONV=IPARAM(5) columns of V will contain approximate -c Schur vectors that span the desired invariant subspace. -c -c WORKL Double precision work array of length LWORKL. (OUTPUT/WORKSPACE) -c WORKL(1:ncv*ncv+3*ncv) contains information obtained in -c dnaupd . They are not changed by dneupd . -c WORKL(ncv*ncv+3*ncv+1:3*ncv*ncv+6*ncv) holds the -c real and imaginary part of the untransformed Ritz values, -c the upper quasi-triangular matrix for H, and the -c associated matrix representation of the invariant subspace for H. -c -c Note: IPNTR(9:13) contains the pointer into WORKL for addresses -c of the above information computed by dneupd . -c ------------------------------------------------------------- -c IPNTR(9): pointer to the real part of the NCV RITZ values of the -c original system. -c IPNTR(10): pointer to the imaginary part of the NCV RITZ values of -c the original system. -c IPNTR(11): pointer to the NCV corresponding error bounds. -c IPNTR(12): pointer to the NCV by NCV upper quasi-triangular -c Schur matrix for H. -c IPNTR(13): pointer to the NCV by NCV matrix of eigenvectors -c of the upper Hessenberg matrix H. Only referenced by -c dneupd if RVEC = .TRUE. See Remark 2 below. -c ------------------------------------------------------------- -c -c INFO Integer. (OUTPUT) -c Error flag on output. -c -c = 0: Normal exit. -c -c = 1: The Schur form computed by LAPACK routine dlahqr -c could not be reordered by LAPACK routine dtrsen . -c Re-enter subroutine dneupd with IPARAM(5)=NCV and -c increase the size of the arrays DR and DI to have -c dimension at least dimension NCV and allocate at least NCV -c columns for Z. NOTE: Not necessary if Z and V share -c the same space. Please notify the authors if this error -c occurs. -c -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV-NEV >= 2 and less than or equal to N. -c = -5: WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI' -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work WORKL array is not sufficient. -c = -8: Error return from calculation of a real Schur form. -c Informational error from LAPACK routine dlahqr . -c = -9: Error return from calculation of eigenvectors. -c Informational error from LAPACK routine dtrevc . -c = -10: IPARAM(7) must be 1,2,3,4. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatible. -c = -12: HOWMNY = 'S' not yet implemented -c = -13: HOWMNY must be one of 'A' or 'P' if RVEC = .true. -c = -14: DNAUPD did not find any eigenvalues to sufficient -c accuracy. -c = -15: DNEUPD got a different count of the number of converged -c Ritz values than DNAUPD got. This indicates the user -c probably made an error in passing data from DNAUPD to -c DNEUPD or that the data was modified before entering -c DNEUPD -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett & Y. Saad, "Complex Shift and Invert Strategies for -c Real Matrices", Linear Algebra and its Applications, vol 88/89, -c pp 575-595, (1987). -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c dmout ARPACK utility routine that prints matrices -c dvout ARPACK utility routine that prints vectors. -c dgeqr2 LAPACK routine that computes the QR factorization of -c a matrix. -c dlacpy LAPACK matrix copy routine. -c dlahqr LAPACK routine to compute the real Schur form of an -c upper Hessenberg matrix. -c dlamch LAPACK routine that determines machine constants. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c dlaset LAPACK matrix initialization routine. -c dorm2r LAPACK routine that applies an orthogonal matrix in -c factored form. -c dtrevc LAPACK routine to compute the eigenvectors of a matrix -c in upper quasi-triangular form. -c dtrsen LAPACK routine that re-orders the Schur form. -c dtrmm Level 3 BLAS matrix times an upper triangular matrix. -c dger Level 2 BLAS rank one update to a matrix. -c dcopy Level 1 BLAS that copies one vector to another . -c ddot Level 1 BLAS that computes the scalar product of two vectors. -c dnrm2 Level 1 BLAS that computes the norm of a vector. -c dscal Level 1 BLAS that scales a vector. -c -c\Remarks -c -c 1. Currently only HOWMNY = 'A' and 'P' are implemented. -c -c Let trans(X) denote the transpose of X. -c -c 2. Schur vectors are an orthogonal representation for the basis of -c Ritz vectors. Thus, their numerical properties are often superior. -c If RVEC = .TRUE. then the relationship -c A * V(:,1:IPARAM(5)) = V(:,1:IPARAM(5)) * T, and -c trans(V(:,1:IPARAM(5))) * V(:,1:IPARAM(5)) = I are approximately -c satisfied. Here T is the leading submatrix of order IPARAM(5) of the -c real upper quasi-triangular matrix stored workl(ipntr(12)). That is, -c T is block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; -c each 2-by-2 diagonal block has its diagonal elements equal and its -c off-diagonal elements of opposite sign. Corresponding to each 2-by-2 -c diagonal block is a complex conjugate pair of Ritz values. The real -c Ritz values are stored on the diagonal of T. -c -c 3. If IPARAM(7) = 3 or 4 and SIGMAI is not equal zero, then the user must -c form the IPARAM(5) Rayleigh quotients in order to transform the Ritz -c values computed by DNAUPD for OP to those of A*z = lambda*B*z. -c Set RVEC = .true. and HOWMNY = 'A', and -c compute -c trans(Z(:,I)) * A * Z(:,I) if DI(I) = 0. -c If DI(I) is not equal to zero and DI(I+1) = - D(I), -c then the desired real and imaginary parts of the Ritz value are -c trans(Z(:,I)) * A * Z(:,I) + trans(Z(:,I+1)) * A * Z(:,I+1), -c trans(Z(:,I)) * A * Z(:,I+1) - trans(Z(:,I+1)) * A * Z(:,I), -c respectively. -c Another possibility is to set RVEC = .true. and HOWMNY = 'P' and -c compute trans(V(:,1:IPARAM(5))) * A * V(:,1:IPARAM(5)) and then an upper -c quasi-triangular matrix of order IPARAM(5) is computed. See remark -c 2 above. -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Chao Yang Houston, Texas -c Dept. of Computational & -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: neupd.F SID: 2.7 DATE OF SID: 09/20/00 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- - subroutine dneupd (rvec , howmny, select, dr , di, - & z , ldz , sigmar, sigmai, workev, - & bmat , n , which , nev , tol, - & resid, ncv , v , ldv , iparam, - & ipntr, workd , workl , lworkl, info) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat, howmny, which*2 - logical rvec - integer info, ldz, ldv, lworkl, n, ncv, nev - Double precision - & sigmar, sigmai, tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(14) - logical select(ncv) - Double precision - & dr(nev+1) , di(nev+1), resid(n) , - & v(ldv,ncv) , z(ldz,*) , workd(3*n), - & workl(lworkl), workev(3*ncv) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0 , zero = 0.0D+0 ) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character type*6 - integer bounds, ierr , ih , ihbds , - & iheigr, iheigi, iconj , nconv , - & invsub, iuptri, iwev , iwork(1), - & j , k , ldh , ldq , - & mode , msglvl, outncv, ritzr , - & ritzi , wri , wrr , irr , - & iri , ibd , ishift, numcnv , - & np , jj , nconv2 - logical reord - Double precision - & conds , rnorm, sep , temp, - & vl(1,1), temp1, eps23 -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dcopy , dger , dgeqr2 , dlacpy , - & dlahqr , dlaset , dmout , dorm2r , - & dtrevc , dtrmm , dtrsen , dscal , - & dvout , ivout -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlapy2 , dnrm2 , dlamch , ddot - external dlapy2 , dnrm2 , dlamch , ddot -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs, min, sqrt -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - msglvl = mneupd - mode = iparam(7) - nconv = iparam(5) - info = 0 -c -c %---------------------------------% -c | Get machine dependent constant. | -c %---------------------------------% -c - eps23 = dlamch ('Epsilon-Machine') - eps23 = eps23**(2.0D+0 / 3.0D+0 ) -c -c %--------------% -c | Quick return | -c %--------------% -c - ierr = 0 -c - if (nconv .le. 0) then - ierr = -14 - else if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev+1 .or. ncv .gt. n) then - ierr = -3 - else if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LR' .and. - & which .ne. 'SR' .and. - & which .ne. 'LI' .and. - & which .ne. 'SI') then - ierr = -5 - else if (bmat .ne. 'I' .and. bmat .ne. 'G') then - ierr = -6 - else if (lworkl .lt. 3*ncv**2 + 6*ncv) then - ierr = -7 - else if ( (howmny .ne. 'A' .and. - & howmny .ne. 'P' .and. - & howmny .ne. 'S') .and. rvec ) then - ierr = -13 - else if (howmny .eq. 'S' ) then - ierr = -12 - end if -c - if (mode .eq. 1 .or. mode .eq. 2) then - type = 'REGULR' - else if (mode .eq. 3 .and. sigmai .eq. zero) then - type = 'SHIFTI' - else if (mode .eq. 3 ) then - type = 'REALPT' - else if (mode .eq. 4 ) then - type = 'IMAGPT' - else - ierr = -10 - end if - if (mode .eq. 1 .and. bmat .eq. 'G') ierr = -11 -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:ncv*ncv) := generated Hessenberg matrix | -c | workl(ncv*ncv+1:ncv*ncv+2*ncv) := real and imaginary | -c | parts of ritz values | -c | workl(ncv*ncv+2*ncv+1:ncv*ncv+3*ncv) := error bounds | -c %--------------------------------------------------------% -c -c %-----------------------------------------------------------% -c | The following is used and set by DNEUPD . | -c | workl(ncv*ncv+3*ncv+1:ncv*ncv+4*ncv) := The untransformed | -c | real part of the Ritz values. | -c | workl(ncv*ncv+4*ncv+1:ncv*ncv+5*ncv) := The untransformed | -c | imaginary part of the Ritz values. | -c | workl(ncv*ncv+5*ncv+1:ncv*ncv+6*ncv) := The untransformed | -c | error bounds of the Ritz values | -c | workl(ncv*ncv+6*ncv+1:2*ncv*ncv+6*ncv) := Holds the upper | -c | quasi-triangular matrix for H | -c | workl(2*ncv*ncv+6*ncv+1: 3*ncv*ncv+6*ncv) := Holds the | -c | associated matrix representation of the invariant | -c | subspace for H. | -c | GRAND total of NCV * ( 3 * NCV + 6 ) locations. | -c %-----------------------------------------------------------% -c - ih = ipntr(5) - ritzr = ipntr(6) - ritzi = ipntr(7) - bounds = ipntr(8) - ldh = ncv - ldq = ncv - iheigr = bounds + ldh - iheigi = iheigr + ldh - ihbds = iheigi + ldh - iuptri = ihbds + ldh - invsub = iuptri + ldh*ncv - ipntr(9) = iheigr - ipntr(10) = iheigi - ipntr(11) = ihbds - ipntr(12) = iuptri - ipntr(13) = invsub - wrr = 1 - wri = ncv + 1 - iwev = wri + ncv -c -c %-----------------------------------------% -c | irr points to the REAL part of the Ritz | -c | values computed by _neigh before | -c | exiting _naup2. | -c | iri points to the IMAGINARY part of the | -c | Ritz values computed by _neigh | -c | before exiting _naup2. | -c | ibd points to the Ritz estimates | -c | computed by _neigh before exiting | -c | _naup2. | -c %-----------------------------------------% -c - irr = ipntr(14)+ncv*ncv - iri = irr+ncv - ibd = iri+ncv -c -c %------------------------------------% -c | RNORM is B-norm of the RESID(1:N). | -c %------------------------------------% -c - rnorm = workl(ih+2) - workl(ih+2) = zero -c - if (msglvl .gt. 2) then - call dvout (logfil, ncv, workl(irr), ndigit, - & '_neupd: Real part of Ritz values passed in from _NAUPD.') - call dvout (logfil, ncv, workl(iri), ndigit, - & '_neupd: Imag part of Ritz values passed in from _NAUPD.') - call dvout (logfil, ncv, workl(ibd), ndigit, - & '_neupd: Ritz estimates passed in from _NAUPD.') - end if -c - if (rvec) then -c - reord = .false. -c -c %---------------------------------------------------% -c | Use the temporary bounds array to store indices | -c | These will be used to mark the select array later | -c %---------------------------------------------------% -c - do 10 j = 1,ncv - workl(bounds+j-1) = j - select(j) = .false. - 10 continue -c -c %-------------------------------------% -c | Select the wanted Ritz values. | -c | Sort the Ritz values so that the | -c | wanted ones appear at the tailing | -c | NEV positions of workl(irr) and | -c | workl(iri). Move the corresponding | -c | error estimates in workl(bound) | -c | accordingly. | -c %-------------------------------------% -c - np = ncv - nev - ishift = 0 - call dngets (ishift , which , nev , - & np , workl(irr), workl(iri), - & workl(bounds), workl , workl(np+1)) -c - if (msglvl .gt. 2) then - call dvout (logfil, ncv, workl(irr), ndigit, - & '_neupd: Real part of Ritz values after calling _NGETS.') - call dvout (logfil, ncv, workl(iri), ndigit, - & '_neupd: Imag part of Ritz values after calling _NGETS.') - call dvout (logfil, ncv, workl(bounds), ndigit, - & '_neupd: Ritz value indices after calling _NGETS.') - end if -c -c %-----------------------------------------------------% -c | Record indices of the converged wanted Ritz values | -c | Mark the select array for possible reordering | -c %-----------------------------------------------------% -c - numcnv = 0 - do 11 j = 1,ncv - temp1 = max(eps23, - & dlapy2 ( workl(irr+ncv-j), workl(iri+ncv-j) )) - jj = workl(bounds + ncv - j) - if (numcnv .lt. nconv .and. - & workl(ibd+jj-1) .le. tol*temp1) then - select(jj) = .true. - numcnv = numcnv + 1 - if (jj .gt. nev) reord = .true. - endif - 11 continue -c -c %-----------------------------------------------------------% -c | Check the count (numcnv) of converged Ritz values with | -c | the number (nconv) reported by dnaupd. If these two | -c | are different then there has probably been an error | -c | caused by incorrect passing of the dnaupd data. | -c %-----------------------------------------------------------% -c - if (msglvl .gt. 2) then - call ivout(logfil, 1, numcnv, ndigit, - & '_neupd: Number of specified eigenvalues') - call ivout(logfil, 1, nconv, ndigit, - & '_neupd: Number of "converged" eigenvalues') - end if -c - if (numcnv .ne. nconv) then - info = -15 - go to 9000 - end if -c -c %-----------------------------------------------------------% -c | Call LAPACK routine dlahqr to compute the real Schur form | -c | of the upper Hessenberg matrix returned by DNAUPD . | -c | Make a copy of the upper Hessenberg matrix. | -c | Initialize the Schur vector matrix Q to the identity. | -c %-----------------------------------------------------------% -c - call dcopy (ldh*ncv, workl(ih), 1, workl(iuptri), 1) - call dlaset ('All', ncv, ncv, - & zero , one, workl(invsub), - & ldq) - call dlahqr (.true., .true. , ncv, - & 1 , ncv , workl(iuptri), - & ldh , workl(iheigr), workl(iheigi), - & 1 , ncv , workl(invsub), - & ldq , ierr) - call dcopy (ncv , workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) -c - if (ierr .ne. 0) then - info = -8 - go to 9000 - end if -c - if (msglvl .gt. 1) then - call dvout (logfil, ncv, workl(iheigr), ndigit, - & '_neupd: Real part of the eigenvalues of H') - call dvout (logfil, ncv, workl(iheigi), ndigit, - & '_neupd: Imaginary part of the Eigenvalues of H') - call dvout (logfil, ncv, workl(ihbds), ndigit, - & '_neupd: Last row of the Schur vector matrix') - if (msglvl .gt. 3) then - call dmout (logfil , ncv, ncv , - & workl(iuptri), ldh, ndigit, - & '_neupd: The upper quasi-triangular matrix ') - end if - end if -c - if (reord) then -c -c %-----------------------------------------------------% -c | Reorder the computed upper quasi-triangular matrix. | -c %-----------------------------------------------------% -c - call dtrsen ('None' , 'V' , - & select , ncv , - & workl(iuptri), ldh , - & workl(invsub), ldq , - & workl(iheigr), workl(iheigi), - & nconv2 , conds , - & sep , workl(ihbds) , - & ncv , iwork , - & 1 , ierr) -c - if (nconv2 .lt. nconv) then - nconv = nconv2 - end if - - if (ierr .eq. 1) then - info = 1 - go to 9000 - end if -c - - if (msglvl .gt. 2) then - call dvout (logfil, ncv, workl(iheigr), ndigit, - & '_neupd: Real part of the eigenvalues of H--reordered') - call dvout (logfil, ncv, workl(iheigi), ndigit, - & '_neupd: Imag part of the eigenvalues of H--reordered') - if (msglvl .gt. 3) then - call dmout (logfil , ncv, ncv , - & workl(iuptri), ldq, ndigit, - & '_neupd: Quasi-triangular matrix after re-ordering') - end if - end if -c - end if -c -c %---------------------------------------% -c | Copy the last row of the Schur vector | -c | into workl(ihbds). This will be used | -c | to compute the Ritz estimates of | -c | converged Ritz values. | -c %---------------------------------------% -c - call dcopy (ncv, workl(invsub+ncv-1), ldq, workl(ihbds), 1) -c -c %----------------------------------------------------% -c | Place the computed eigenvalues of H into DR and DI | -c | if a spectral transformation was not used. | -c %----------------------------------------------------% -c - if (type .eq. 'REGULR') then - call dcopy (nconv, workl(iheigr), 1, dr, 1) - call dcopy (nconv, workl(iheigi), 1, di, 1) - end if -c -c %----------------------------------------------------------% -c | Compute the QR factorization of the matrix representing | -c | the wanted invariant subspace located in the first NCONV | -c | columns of workl(invsub,ldq). | -c %----------------------------------------------------------% -c - call dgeqr2 (ncv, nconv , workl(invsub), - & ldq, workev, workev(ncv+1), - & ierr) -c -c %---------------------------------------------------------% -c | * Postmultiply V by Q using dorm2r . | -c | * Copy the first NCONV columns of VQ into Z. | -c | * Postmultiply Z by R. | -c | The N by NCONV matrix Z is now a matrix representation | -c | of the approximate invariant subspace associated with | -c | the Ritz values in workl(iheigr) and workl(iheigi) | -c | The first NCONV columns of V are now approximate Schur | -c | vectors associated with the real upper quasi-triangular | -c | matrix of order NCONV in workl(iuptri) | -c %---------------------------------------------------------% -c - call dorm2r ('Right', 'Notranspose', n , - & ncv , nconv , workl(invsub), - & ldq , workev , v , - & ldv , workd(n+1) , ierr) - call dlacpy ('All', n, nconv, v, ldv, z, ldz) -c - do 20 j=1, nconv -c -c %---------------------------------------------------% -c | Perform both a column and row scaling if the | -c | diagonal element of workl(invsub,ldq) is negative | -c | I'm lazy and don't take advantage of the upper | -c | quasi-triangular form of workl(iuptri,ldq) | -c | Note that since Q is orthogonal, R is a diagonal | -c | matrix consisting of plus or minus ones | -c %---------------------------------------------------% -c - if (workl(invsub+(j-1)*ldq+j-1) .lt. zero) then - call dscal (nconv, -one, workl(iuptri+j-1), ldq) - call dscal (nconv, -one, workl(iuptri+(j-1)*ldq), 1) - end if -c - 20 continue -c - if (howmny .eq. 'A') then -c -c %--------------------------------------------% -c | Compute the NCONV wanted eigenvectors of T | -c | located in workl(iuptri,ldq). | -c %--------------------------------------------% -c - do 30 j=1, ncv - if (j .le. nconv) then - select(j) = .true. - else - select(j) = .false. - end if - 30 continue -c - call dtrevc ('Right', 'Select' , select , - & ncv , workl(iuptri), ldq , - & vl , 1 , workl(invsub), - & ldq , ncv , outncv , - & workev , ierr) -c - if (ierr .ne. 0) then - info = -9 - go to 9000 - end if -c -c %------------------------------------------------% -c | Scale the returning eigenvectors so that their | -c | Euclidean norms are all one. LAPACK subroutine | -c | dtrevc returns each eigenvector normalized so | -c | that the element of largest magnitude has | -c | magnitude 1; | -c %------------------------------------------------% -c - iconj = 0 - do 40 j=1, nconv -c - if ( workl(iheigi+j-1) .eq. zero ) then -c -c %----------------------% -c | real eigenvalue case | -c %----------------------% -c - temp = dnrm2 ( ncv, workl(invsub+(j-1)*ldq), 1 ) - call dscal ( ncv, one / temp, - & workl(invsub+(j-1)*ldq), 1 ) -c - else -c -c %-------------------------------------------% -c | Complex conjugate pair case. Note that | -c | since the real and imaginary part of | -c | the eigenvector are stored in consecutive | -c | columns, we further normalize by the | -c | square root of two. | -c %-------------------------------------------% -c - if (iconj .eq. 0) then - temp = dlapy2 (dnrm2 (ncv, - & workl(invsub+(j-1)*ldq), - & 1), - & dnrm2 (ncv, - & workl(invsub+j*ldq), - & 1)) - call dscal (ncv, one/temp, - & workl(invsub+(j-1)*ldq), 1 ) - call dscal (ncv, one/temp, - & workl(invsub+j*ldq), 1 ) - iconj = 1 - else - iconj = 0 - end if -c - end if -c - 40 continue -c - call dgemv ('T', ncv, nconv, one, workl(invsub), - & ldq, workl(ihbds), 1, zero, workev, 1) -c - iconj = 0 - do 45 j=1, nconv - if (workl(iheigi+j-1) .ne. zero) then -c -c %-------------------------------------------% -c | Complex conjugate pair case. Note that | -c | since the real and imaginary part of | -c | the eigenvector are stored in consecutive | -c %-------------------------------------------% -c - if (iconj .eq. 0) then - workev(j) = dlapy2 (workev(j), workev(j+1)) - workev(j+1) = workev(j) - iconj = 1 - else - iconj = 0 - end if - end if - 45 continue -c - if (msglvl .gt. 2) then - call dcopy (ncv, workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) - call dvout (logfil, ncv, workl(ihbds), ndigit, - & '_neupd: Last row of the eigenvector matrix for T') - if (msglvl .gt. 3) then - call dmout (logfil, ncv, ncv, workl(invsub), ldq, - & ndigit, '_neupd: The eigenvector matrix for T') - end if - end if -c -c %---------------------------------------% -c | Copy Ritz estimates into workl(ihbds) | -c %---------------------------------------% -c - call dcopy (nconv, workev, 1, workl(ihbds), 1) -c -c %---------------------------------------------------------% -c | Compute the QR factorization of the eigenvector matrix | -c | associated with leading portion of T in the first NCONV | -c | columns of workl(invsub,ldq). | -c %---------------------------------------------------------% -c - call dgeqr2 (ncv, nconv , workl(invsub), - & ldq, workev, workev(ncv+1), - & ierr) -c -c %----------------------------------------------% -c | * Postmultiply Z by Q. | -c | * Postmultiply Z by R. | -c | The N by NCONV matrix Z is now contains the | -c | Ritz vectors associated with the Ritz values | -c | in workl(iheigr) and workl(iheigi). | -c %----------------------------------------------% -c - call dorm2r ('Right', 'Notranspose', n , - & ncv , nconv , workl(invsub), - & ldq , workev , z , - & ldz , workd(n+1) , ierr) -c - call dtrmm ('Right' , 'Upper' , 'No transpose', - & 'Non-unit', n , nconv , - & one , workl(invsub), ldq , - & z , ldz) -c - end if -c - else -c -c %------------------------------------------------------% -c | An approximate invariant subspace is not needed. | -c | Place the Ritz values computed DNAUPD into DR and DI | -c %------------------------------------------------------% -c - call dcopy (nconv, workl(ritzr), 1, dr, 1) - call dcopy (nconv, workl(ritzi), 1, di, 1) - call dcopy (nconv, workl(ritzr), 1, workl(iheigr), 1) - call dcopy (nconv, workl(ritzi), 1, workl(iheigi), 1) - call dcopy (nconv, workl(bounds), 1, workl(ihbds), 1) - end if -c -c %------------------------------------------------% -c | Transform the Ritz values and possibly vectors | -c | and corresponding error bounds of OP to those | -c | of A*x = lambda*B*x. | -c %------------------------------------------------% -c - if (type .eq. 'REGULR') then -c - if (rvec) - & call dscal (ncv, rnorm, workl(ihbds), 1) -c - else -c -c %---------------------------------------% -c | A spectral transformation was used. | -c | * Determine the Ritz estimates of the | -c | Ritz values in the original system. | -c %---------------------------------------% -c - if (type .eq. 'SHIFTI') then -c - if (rvec) - & call dscal (ncv, rnorm, workl(ihbds), 1) -c - do 50 k=1, ncv - temp = dlapy2 ( workl(iheigr+k-1), - & workl(iheigi+k-1) ) - workl(ihbds+k-1) = abs( workl(ihbds+k-1) ) - & / temp / temp - 50 continue -c - else if (type .eq. 'REALPT') then -c - do 60 k=1, ncv - 60 continue -c - else if (type .eq. 'IMAGPT') then -c - do 70 k=1, ncv - 70 continue -c - end if -c -c %-----------------------------------------------------------% -c | * Transform the Ritz values back to the original system. | -c | For TYPE = 'SHIFTI' the transformation is | -c | lambda = 1/theta + sigma | -c | For TYPE = 'REALPT' or 'IMAGPT' the user must from | -c | Rayleigh quotients or a projection. See remark 3 above.| -c | NOTES: | -c | *The Ritz vectors are not affected by the transformation. | -c %-----------------------------------------------------------% -c - if (type .eq. 'SHIFTI') then -c - do 80 k=1, ncv - temp = dlapy2 ( workl(iheigr+k-1), - & workl(iheigi+k-1) ) - workl(iheigr+k-1) = workl(iheigr+k-1)/temp/temp - & + sigmar - workl(iheigi+k-1) = -workl(iheigi+k-1)/temp/temp - & + sigmai - 80 continue -c - call dcopy (nconv, workl(iheigr), 1, dr, 1) - call dcopy (nconv, workl(iheigi), 1, di, 1) -c - else if (type .eq. 'REALPT' .or. type .eq. 'IMAGPT') then -c - call dcopy (nconv, workl(iheigr), 1, dr, 1) - call dcopy (nconv, workl(iheigi), 1, di, 1) -c - end if -c - end if -c - if (type .eq. 'SHIFTI' .and. msglvl .gt. 1) then - call dvout (logfil, nconv, dr, ndigit, - & '_neupd: Untransformed real part of the Ritz valuess.') - call dvout (logfil, nconv, di, ndigit, - & '_neupd: Untransformed imag part of the Ritz valuess.') - call dvout (logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Ritz estimates of untransformed Ritz values.') - else if (type .eq. 'REGULR' .and. msglvl .gt. 1) then - call dvout (logfil, nconv, dr, ndigit, - & '_neupd: Real parts of converged Ritz values.') - call dvout (logfil, nconv, di, ndigit, - & '_neupd: Imag parts of converged Ritz values.') - call dvout (logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Associated Ritz estimates.') - end if -c -c %-------------------------------------------------% -c | Eigenvector Purification step. Formally perform | -c | one of inverse subspace iteration. Only used | -c | for MODE = 2. | -c %-------------------------------------------------% -c - if (rvec .and. howmny .eq. 'A' .and. type .eq. 'SHIFTI') then -c -c %------------------------------------------------% -c | Purify the computed Ritz vectors by adding a | -c | little bit of the residual vector: | -c | T | -c | resid(:)*( e s ) / theta | -c | NCV | -c | where H s = s theta. Remember that when theta | -c | has nonzero imaginary part, the corresponding | -c | Ritz vector is stored across two columns of Z. | -c %------------------------------------------------% -c - iconj = 0 - do 110 j=1, nconv - if (workl(iheigi+j-1) .eq. zero) then - workev(j) = workl(invsub+(j-1)*ldq+ncv-1) / - & workl(iheigr+j-1) - else if (iconj .eq. 0) then - temp = dlapy2 ( workl(iheigr+j-1), workl(iheigi+j-1) ) - workev(j) = ( workl(invsub+(j-1)*ldq+ncv-1) * - & workl(iheigr+j-1) + - & workl(invsub+j*ldq+ncv-1) * - & workl(iheigi+j-1) ) / temp / temp - workev(j+1) = ( workl(invsub+j*ldq+ncv-1) * - & workl(iheigr+j-1) - - & workl(invsub+(j-1)*ldq+ncv-1) * - & workl(iheigi+j-1) ) / temp / temp - iconj = 1 - else - iconj = 0 - end if - 110 continue -c -c %---------------------------------------% -c | Perform a rank one update to Z and | -c | purify all the Ritz vectors together. | -c %---------------------------------------% -c - call dger (n, nconv, one, resid, 1, workev, 1, z, ldz) -c - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of DNEUPD | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dngets.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dngets.f deleted file mode 100644 index 2a0d9a6379..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dngets.f +++ /dev/null @@ -1,231 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dngets -c -c\Description: -c Given the eigenvalues of the upper Hessenberg matrix H, -c computes the NP shifts AMU that are zeros of the polynomial of -c degree NP which filters out components of the unwanted eigenvectors -c corresponding to the AMU's based on some given criteria. -c -c NOTE: call this even in the case of user specified shifts in order -c to sort the eigenvalues, and error bounds of H for later use. -c -c\Usage: -c call dngets -c ( ISHIFT, WHICH, KEV, NP, RITZR, RITZI, BOUNDS, SHIFTR, SHIFTI ) -c -c\Arguments -c ISHIFT Integer. (INPUT) -c Method for selecting the implicit shifts at each iteration. -c ISHIFT = 0: user specified shifts -c ISHIFT = 1: exact shift with respect to the matrix H. -c -c WHICH Character*2. (INPUT) -c Shift selection criteria. -c 'LM' -> want the KEV eigenvalues of largest magnitude. -c 'SM' -> want the KEV eigenvalues of smallest magnitude. -c 'LR' -> want the KEV eigenvalues of largest real part. -c 'SR' -> want the KEV eigenvalues of smallest real part. -c 'LI' -> want the KEV eigenvalues of largest imaginary part. -c 'SI' -> want the KEV eigenvalues of smallest imaginary part. -c -c KEV Integer. (INPUT/OUTPUT) -c INPUT: KEV+NP is the size of the matrix H. -c OUTPUT: Possibly increases KEV by one to keep complex conjugate -c pairs together. -c -c NP Integer. (INPUT/OUTPUT) -c Number of implicit shifts to be computed. -c OUTPUT: Possibly decreases NP by one to keep complex conjugate -c pairs together. -c -c RITZR, Double precision array of length KEV+NP. (INPUT/OUTPUT) -c RITZI On INPUT, RITZR and RITZI contain the real and imaginary -c parts of the eigenvalues of H. -c On OUTPUT, RITZR and RITZI are sorted so that the unwanted -c eigenvalues are in the first NP locations and the wanted -c portion is in the last KEV locations. When exact shifts are -c selected, the unwanted part corresponds to the shifts to -c be applied. Also, if ISHIFT .eq. 1, the unwanted eigenvalues -c are further sorted so that the ones with largest Ritz values -c are first. -c -c BOUNDS Double precision array of length KEV+NP. (INPUT/OUTPUT) -c Error bounds corresponding to the ordering in RITZ. -c -c SHIFTR, SHIFTI *** USE deprecated as of version 2.1. *** -c -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c dsortc ARPACK sorting routine. -c dcopy Level 1 BLAS that copies one vector to another . -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: ngets.F SID: 2.3 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c 1. xxxx -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dngets ( ishift, which, kev, np, ritzr, ritzi, bounds, - & shiftr, shifti ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - integer ishift, kev, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & bounds(kev+np), ritzr(kev+np), ritzi(kev+np), - & shiftr(1), shifti(1) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0, zero = 0.0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer msglvl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dcopy, dsortc, second -c -c %----------------------% -c | Intrinsics Functions | -c %----------------------% -c - intrinsic abs -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mngets -c -c %----------------------------------------------------% -c | LM, SM, LR, SR, LI, SI case. | -c | Sort the eigenvalues of H into the desired order | -c | and apply the resulting order to BOUNDS. | -c | The eigenvalues are sorted so that the wanted part | -c | are always in the last KEV locations. | -c | We first do a pre-processing sort in order to keep | -c | complex conjugate pairs together | -c %----------------------------------------------------% -c - if (which .eq. 'LM') then - call dsortc ('LR', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'SM') then - call dsortc ('SR', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'LR') then - call dsortc ('LM', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'SR') then - call dsortc ('SM', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'LI') then - call dsortc ('LM', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'SI') then - call dsortc ('SM', .true., kev+np, ritzr, ritzi, bounds) - end if -c - call dsortc (which, .true., kev+np, ritzr, ritzi, bounds) -c -c %-------------------------------------------------------% -c | Increase KEV by one if the ( ritzr(np),ritzi(np) ) | -c | = ( ritzr(np+1),-ritzi(np+1) ) and ritz(np) .ne. zero | -c | Accordingly decrease NP by one. In other words keep | -c | complex conjugate pairs together. | -c %-------------------------------------------------------% -c - if ( ( ritzr(np+1) - ritzr(np) ) .eq. zero - & .and. ( ritzi(np+1) + ritzi(np) ) .eq. zero ) then - np = np - 1 - kev = kev + 1 - end if -c - if ( ishift .eq. 1 ) then -c -c %-------------------------------------------------------% -c | Sort the unwanted Ritz values used as shifts so that | -c | the ones with largest Ritz estimates are first | -c | This will tend to minimize the effects of the | -c | forward instability of the iteration when they shifts | -c | are applied in subroutine dnapps. | -c | Be careful and use 'SR' since we want to sort BOUNDS! | -c %-------------------------------------------------------% -c - call dsortc ( 'SR', .true., np, bounds, ritzr, ritzi ) - end if -c - call second (t1) - tngets = tngets + (t1 - t0) -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, kev, ndigit, '_ngets: KEV is') - call ivout (logfil, 1, np, ndigit, '_ngets: NP is') - call dvout (logfil, kev+np, ritzr, ndigit, - & '_ngets: Eigenvalues of current H matrix -- real part') - call dvout (logfil, kev+np, ritzi, ndigit, - & '_ngets: Eigenvalues of current H matrix -- imag part') - call dvout (logfil, kev+np, bounds, ndigit, - & '_ngets: Ritz estimates of the current KEV+NP Ritz values') - end if -c - return -c -c %---------------% -c | End of dngets | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaitr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaitr.f deleted file mode 100644 index 9ceb0453b0..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaitr.f +++ /dev/null @@ -1,853 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsaitr -c -c\Description: -c Reverse communication interface for applying NP additional steps to -c a K step symmetric Arnoldi factorization. -c -c Input: OP*V_{k} - V_{k}*H = r_{k}*e_{k}^T -c -c with (V_{k}^T)*B*V_{k} = I, (V_{k}^T)*B*r_{k} = 0. -c -c Output: OP*V_{k+p} - V_{k+p}*H = r_{k+p}*e_{k+p}^T -c -c with (V_{k+p}^T)*B*V_{k+p} = I, (V_{k+p}^T)*B*r_{k+p} = 0. -c -c where OP and B are as in dsaupd. The B-norm of r_{k+p} is also -c computed and returned. -c -c\Usage: -c call dsaitr -c ( IDO, BMAT, N, K, NP, MODE, RESID, RNORM, V, LDV, H, LDH, -c IPNTR, WORKD, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c This is for the restart phase to force the new -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y, -c IPNTR(3) is the pointer into WORK for B * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c When the routine is used in the "shift-and-invert" mode, the -c vector B * Q is already available and does not need to be -c recomputed in forming OP * Q. -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of matrix B that defines the -c semi-inner product for the operator OP. See dsaupd. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*M*x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c K Integer. (INPUT) -c Current order of H and the number of columns of V. -c -c NP Integer. (INPUT) -c Number of additional Arnoldi steps to take. -c -c MODE Integer. (INPUT) -c Signifies which form for "OP". If MODE=2 then -c a reduction in the number of B matrix vector multiplies -c is possible since the B-norm of OP*x is equivalent to -c the inv(B)-norm of A*x. -c -c RESID Double precision array of length N. (INPUT/OUTPUT) -c On INPUT: RESID contains the residual vector r_{k}. -c On OUTPUT: RESID contains the residual vector r_{k+p}. -c -c RNORM Double precision scalar. (INPUT/OUTPUT) -c On INPUT the B-norm of r_{k}. -c On OUTPUT the B-norm of the updated residual r_{k+p}. -c -c V Double precision N by K+NP array. (INPUT/OUTPUT) -c On INPUT: V contains the Arnoldi vectors in the first K -c columns. -c On OUTPUT: V contains the new NP Arnoldi vectors in the next -c NP columns. The first K columns are unchanged. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Double precision (K+NP) by 2 array. (INPUT/OUTPUT) -c H is used to store the generated symmetric tridiagonal matrix -c with the subdiagonal in the first column starting at H(2,1) -c and the main diagonal in the second column. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORK for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Double precision work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The calling program should not -c use WORKD as temporary workspace during the iteration !!!!!! -c On INPUT, WORKD(1:N) = B*RESID where RESID is associated -c with the K step Arnoldi factorization. Used to save some -c computation at the first step. -c On OUTPUT, WORKD(1:N) = B*RESID where RESID is associated -c with the K+NP step Arnoldi factorization. -c -c INFO Integer. (OUTPUT) -c = 0: Normal exit. -c > 0: Size of an invariant subspace of OP is found that is -c less than K + NP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c dgetv0 ARPACK routine to generate the initial vector. -c ivout ARPACK utility routine that prints integers. -c dmout ARPACK utility routine that prints matrices. -c dvout ARPACK utility routine that prints vectors. -c dlamch LAPACK routine that determines machine constants. -c dlascl LAPACK routine for careful scaling of a matrix. -c dgemv Level 2 BLAS routine for matrix vector multiplication. -c daxpy Level 1 BLAS that computes a vector triad. -c dscal Level 1 BLAS that scales a vector. -c dcopy Level 1 BLAS that copies one vector to another . -c ddot Level 1 BLAS that computes the scalar product of two vectors. -c dnrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/93: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: saitr.F SID: 2.6 DATE OF SID: 8/28/96 RELEASE: 2 -c -c\Remarks -c The algorithm implemented is: -c -c restart = .false. -c Given V_{k} = [v_{1}, ..., v_{k}], r_{k}; -c r_{k} contains the initial residual vector even for k = 0; -c Also assume that rnorm = || B*r_{k} || and B*r_{k} are already -c computed by the calling program. -c -c betaj = rnorm ; p_{k+1} = B*r_{k} ; -c For j = k+1, ..., k+np Do -c 1) if ( betaj < tol ) stop or restart depending on j. -c if ( restart ) generate a new starting vector. -c 2) v_{j} = r(j-1)/betaj; V_{j} = [V_{j-1}, v_{j}]; -c p_{j} = p_{j}/betaj -c 3) r_{j} = OP*v_{j} where OP is defined as in dsaupd -c For shift-invert mode p_{j} = B*v_{j} is already available. -c wnorm = || OP*v_{j} || -c 4) Compute the j-th step residual vector. -c w_{j} = V_{j}^T * B * OP * v_{j} -c r_{j} = OP*v_{j} - V_{j} * w_{j} -c alphaj <- j-th component of w_{j} -c rnorm = || r_{j} || -c betaj+1 = rnorm -c If (rnorm > 0.717*wnorm) accept step and go back to 1) -c 5) Re-orthogonalization step: -c s = V_{j}'*B*r_{j} -c r_{j} = r_{j} - V_{j}*s; rnorm1 = || r_{j} || -c alphaj = alphaj + s_{j}; -c 6) Iterative refinement step: -c If (rnorm1 > 0.717*rnorm) then -c rnorm = rnorm1 -c accept step and go back to 1) -c Else -c rnorm = rnorm1 -c If this is the first time in step 6), go to 5) -c Else r_{j} lies in the span of V_{j} numerically. -c Set r_{j} = 0 and rnorm = 0; go to 1) -c EndIf -c End Do -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsaitr - & (ido, bmat, n, k, np, mode, resid, rnorm, v, ldv, h, ldh, - & ipntr, workd, info) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - integer ido, info, k, ldh, ldv, n, mode, np - Double precision - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Double precision - & h(ldh,2), resid(n), v(ldv,k+np), workd(3*n) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - logical first, orth1, orth2, rstart, step3, step4 - integer i, ierr, ipj, irj, ivj, iter, itry, j, msglvl, - & infol, jj - Double precision - & rnorm1, wnorm, safmin, temp1 - save orth1, orth2, rstart, step3, step4, - & ierr, ipj, irj, ivj, iter, itry, j, msglvl, - & rnorm1, safmin, wnorm -c -c %-----------------------% -c | Local Array Arguments | -c %-----------------------% -c - Double precision - & xtemp(2) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external daxpy, dcopy, dscal, dgemv, dgetv0, dvout, dmout, - & dlascl, ivout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & ddot, dnrm2, dlamch - external ddot, dnrm2, dlamch -c -c %-----------------% -c | Data statements | -c %-----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then - first = .false. -c -c %--------------------------------% -c | safmin = safe minimum is such | -c | that 1/sfmin does not overflow | -c %--------------------------------% -c - safmin = dlamch('safmin') - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = msaitr -c -c %------------------------------% -c | Initial call to this routine | -c %------------------------------% -c - info = 0 - step3 = .false. - step4 = .false. - rstart = .false. - orth1 = .false. - orth2 = .false. -c -c %--------------------------------% -c | Pointer to the current step of | -c | the factorization to build | -c %--------------------------------% -c - j = k + 1 -c -c %------------------------------------------% -c | Pointers used for reverse communication | -c | when using WORKD. | -c %------------------------------------------% -c - ipj = 1 - irj = ipj + n - ivj = irj + n - end if -c -c %-------------------------------------------------% -c | When in reverse communication mode one of: | -c | STEP3, STEP4, ORTH1, ORTH2, RSTART | -c | will be .true. | -c | STEP3: return from computing OP*v_{j}. | -c | STEP4: return from computing B-norm of OP*v_{j} | -c | ORTH1: return from computing B-norm of r_{j+1} | -c | ORTH2: return from computing B-norm of | -c | correction to the residual vector. | -c | RSTART: return from OP computations needed by | -c | dgetv0. | -c %-------------------------------------------------% -c - if (step3) go to 50 - if (step4) go to 60 - if (orth1) go to 70 - if (orth2) go to 90 - if (rstart) go to 30 -c -c %------------------------------% -c | Else this is the first step. | -c %------------------------------% -c -c %--------------------------------------------------------------% -c | | -c | A R N O L D I I T E R A T I O N L O O P | -c | | -c | Note: B*r_{j-1} is already in WORKD(1:N)=WORKD(IPJ:IPJ+N-1) | -c %--------------------------------------------------------------% -c - 1000 continue -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, j, ndigit, - & '_saitr: generating Arnoldi vector no.') - call dvout (logfil, 1, rnorm, ndigit, - & '_saitr: B-norm of the current residual =') - end if -c -c %---------------------------------------------------------% -c | Check for exact zero. Equivalent to determing whether a | -c | j-step Arnoldi factorization is present. | -c %---------------------------------------------------------% -c - if (rnorm .gt. zero) go to 40 -c -c %---------------------------------------------------% -c | Invariant subspace found, generate a new starting | -c | vector which is orthogonal to the current Arnoldi | -c | basis and continue the iteration. | -c %---------------------------------------------------% -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_saitr: ****** restart at step ******') - end if -c -c %---------------------------------------------% -c | ITRY is the loop variable that controls the | -c | maximum amount of times that a restart is | -c | attempted. NRSTRT is used by stat.h | -c %---------------------------------------------% -c - nrstrt = nrstrt + 1 - itry = 1 - 20 continue - rstart = .true. - ido = 0 - 30 continue -c -c %--------------------------------------% -c | If in reverse communication mode and | -c | RSTART = .true. flow returns here. | -c %--------------------------------------% -c - call dgetv0 (ido, bmat, itry, .false., n, j, v, ldv, - & resid, rnorm, ipntr, workd, ierr) - if (ido .ne. 99) go to 9000 - if (ierr .lt. 0) then - itry = itry + 1 - if (itry .le. 3) go to 20 -c -c %------------------------------------------------% -c | Give up after several restart attempts. | -c | Set INFO to the size of the invariant subspace | -c | which spans OP and exit. | -c %------------------------------------------------% -c - info = j - 1 - call second (t1) - tsaitr = tsaitr + (t1 - t0) - ido = 99 - go to 9000 - end if -c - 40 continue -c -c %---------------------------------------------------------% -c | STEP 2: v_{j} = r_{j-1}/rnorm and p_{j} = p_{j}/rnorm | -c | Note that p_{j} = B*r_{j-1}. In order to avoid overflow | -c | when reciprocating a small RNORM, test against lower | -c | machine bound. | -c %---------------------------------------------------------% -c - call dcopy (n, resid, 1, v(1,j), 1) - if (rnorm .ge. safmin) then - temp1 = one / rnorm - call dscal (n, temp1, v(1,j), 1) - call dscal (n, temp1, workd(ipj), 1) - else -c -c %-----------------------------------------% -c | To scale both v_{j} and p_{j} carefully | -c | use LAPACK routine SLASCL | -c %-----------------------------------------% -c - call dlascl ('General', i, i, rnorm, one, n, 1, - & v(1,j), n, infol) - call dlascl ('General', i, i, rnorm, one, n, 1, - & workd(ipj), n, infol) - end if -c -c %------------------------------------------------------% -c | STEP 3: r_{j} = OP*v_{j}; Note that p_{j} = B*v_{j} | -c | Note that this is not quite yet r_{j}. See STEP 4 | -c %------------------------------------------------------% -c - step3 = .true. - nopx = nopx + 1 - call second (t2) - call dcopy (n, v(1,j), 1, workd(ivj), 1) - ipntr(1) = ivj - ipntr(2) = irj - ipntr(3) = ipj - ido = 1 -c -c %-----------------------------------% -c | Exit in order to compute OP*v_{j} | -c %-----------------------------------% -c - go to 9000 - 50 continue -c -c %-----------------------------------% -c | Back from reverse communication; | -c | WORKD(IRJ:IRJ+N-1) := OP*v_{j}. | -c %-----------------------------------% -c - call second (t3) - tmvopx = tmvopx + (t3 - t2) -c - step3 = .false. -c -c %------------------------------------------% -c | Put another copy of OP*v_{j} into RESID. | -c %------------------------------------------% -c - call dcopy (n, workd(irj), 1, resid, 1) -c -c %-------------------------------------------% -c | STEP 4: Finish extending the symmetric | -c | Arnoldi to length j. If MODE = 2 | -c | then B*OP = B*inv(B)*A = A and | -c | we don't need to compute B*OP. | -c | NOTE: If MODE = 2 WORKD(IVJ:IVJ+N-1) is | -c | assumed to have A*v_{j}. | -c %-------------------------------------------% -c - if (mode .eq. 2) go to 65 - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - step4 = .true. - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-------------------------------------% -c | Exit in order to compute B*OP*v_{j} | -c %-------------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call dcopy(n, resid, 1 , workd(ipj), 1) - end if - 60 continue -c -c %-----------------------------------% -c | Back from reverse communication; | -c | WORKD(IPJ:IPJ+N-1) := B*OP*v_{j}. | -c %-----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - step4 = .false. -c -c %-------------------------------------% -c | The following is needed for STEP 5. | -c | Compute the B-norm of OP*v_{j}. | -c %-------------------------------------% -c - 65 continue - if (mode .eq. 2) then -c -c %----------------------------------% -c | Note that the B-norm of OP*v_{j} | -c | is the inv(B)-norm of A*v_{j}. | -c %----------------------------------% -c - wnorm = ddot (n, resid, 1, workd(ivj), 1) - wnorm = sqrt(abs(wnorm)) - else if (bmat .eq. 'G') then - wnorm = ddot (n, resid, 1, workd(ipj), 1) - wnorm = sqrt(abs(wnorm)) - else if (bmat .eq. 'I') then - wnorm = dnrm2(n, resid, 1) - end if -c -c %-----------------------------------------% -c | Compute the j-th residual corresponding | -c | to the j step factorization. | -c | Use Classical Gram Schmidt and compute: | -c | w_{j} <- V_{j}^T * B * OP * v_{j} | -c | r_{j} <- OP*v_{j} - V_{j} * w_{j} | -c %-----------------------------------------% -c -c -c %------------------------------------------% -c | Compute the j Fourier coefficients w_{j} | -c | WORKD(IPJ:IPJ+N-1) contains B*OP*v_{j}. | -c %------------------------------------------% -c - if (mode .ne. 2 ) then - call dgemv('T', n, j, one, v, ldv, workd(ipj), 1, zero, - & workd(irj), 1) - else if (mode .eq. 2) then - call dgemv('T', n, j, one, v, ldv, workd(ivj), 1, zero, - & workd(irj), 1) - end if -c -c %--------------------------------------% -c | Orthgonalize r_{j} against V_{j}. | -c | RESID contains OP*v_{j}. See STEP 3. | -c %--------------------------------------% -c - call dgemv('N', n, j, -one, v, ldv, workd(irj), 1, one, - & resid, 1) -c -c %--------------------------------------% -c | Extend H to have j rows and columns. | -c %--------------------------------------% -c - h(j,2) = workd(irj + j - 1) - if (j .eq. 1 .or. rstart) then - h(j,1) = zero - else - h(j,1) = rnorm - end if - call second (t4) -c - orth1 = .true. - iter = 0 -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call dcopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*r_{j} | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd(ipj), 1) - end if - 70 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH1 = .true. | -c | WORKD(IPJ:IPJ+N-1) := B*r_{j}. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - orth1 = .false. -c -c %------------------------------% -c | Compute the B-norm of r_{j}. | -c %------------------------------% -c - if (bmat .eq. 'G') then - rnorm = ddot (n, resid, 1, workd(ipj), 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = dnrm2(n, resid, 1) - end if -c -c %-----------------------------------------------------------% -c | STEP 5: Re-orthogonalization / Iterative refinement phase | -c | Maximum NITER_ITREF tries. | -c | | -c | s = V_{j}^T * B * r_{j} | -c | r_{j} = r_{j} - V_{j}*s | -c | alphaj = alphaj + s_{j} | -c | | -c | The stopping criteria used for iterative refinement is | -c | discussed in Parlett's book SEP, page 107 and in Gragg & | -c | Reichel ACM TOMS paper; Algorithm 686, Dec. 1990. | -c | Determine if we need to correct the residual. The goal is | -c | to enforce ||v(:,1:j)^T * r_{j}|| .le. eps * || r_{j} || | -c %-----------------------------------------------------------% -c - if (rnorm .gt. 0.717*wnorm) go to 100 - nrorth = nrorth + 1 -c -c %---------------------------------------------------% -c | Enter the Iterative refinement phase. If further | -c | refinement is necessary, loop back here. The loop | -c | variable is ITER. Perform a step of Classical | -c | Gram-Schmidt using all the Arnoldi vectors V_{j} | -c %---------------------------------------------------% -c - 80 continue -c - if (msglvl .gt. 2) then - xtemp(1) = wnorm - xtemp(2) = rnorm - call dvout (logfil, 2, xtemp, ndigit, - & '_saitr: re-orthonalization ; wnorm and rnorm are') - end if -c -c %----------------------------------------------------% -c | Compute V_{j}^T * B * r_{j}. | -c | WORKD(IRJ:IRJ+J-1) = v(:,1:J)'*WORKD(IPJ:IPJ+N-1). | -c %----------------------------------------------------% -c - call dgemv ('T', n, j, one, v, ldv, workd(ipj), 1, - & zero, workd(irj), 1) -c -c %----------------------------------------------% -c | Compute the correction to the residual: | -c | r_{j} = r_{j} - V_{j} * WORKD(IRJ:IRJ+J-1). | -c | The correction to H is v(:,1:J)*H(1:J,1:J) + | -c | v(:,1:J)*WORKD(IRJ:IRJ+J-1)*e'_j, but only | -c | H(j,j) is updated. | -c %----------------------------------------------% -c - call dgemv ('N', n, j, -one, v, ldv, workd(irj), 1, - & one, resid, 1) -c - if (j .eq. 1 .or. rstart) h(j,1) = zero - h(j,2) = h(j,2) + workd(irj + j - 1) -c - orth2 = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call dcopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-----------------------------------% -c | Exit in order to compute B*r_{j}. | -c | r_{j} is the corrected residual. | -c %-----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd(ipj), 1) - end if - 90 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH2 = .true. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c -c %-----------------------------------------------------% -c | Compute the B-norm of the corrected residual r_{j}. | -c %-----------------------------------------------------% -c - if (bmat .eq. 'G') then - rnorm1 = ddot (n, resid, 1, workd(ipj), 1) - rnorm1 = sqrt(abs(rnorm1)) - else if (bmat .eq. 'I') then - rnorm1 = dnrm2(n, resid, 1) - end if -c - if (msglvl .gt. 0 .and. iter .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_saitr: Iterative refinement for Arnoldi residual') - if (msglvl .gt. 2) then - xtemp(1) = rnorm - xtemp(2) = rnorm1 - call dvout (logfil, 2, xtemp, ndigit, - & '_saitr: iterative refinement ; rnorm and rnorm1 are') - end if - end if -c -c %-----------------------------------------% -c | Determine if we need to perform another | -c | step of re-orthogonalization. | -c %-----------------------------------------% -c - if (rnorm1 .gt. 0.717*rnorm) then -c -c %--------------------------------% -c | No need for further refinement | -c %--------------------------------% -c - rnorm = rnorm1 -c - else -c -c %-------------------------------------------% -c | Another step of iterative refinement step | -c | is required. NITREF is used by stat.h | -c %-------------------------------------------% -c - nitref = nitref + 1 - rnorm = rnorm1 - iter = iter + 1 - if (iter .le. 1) go to 80 -c -c %-------------------------------------------------% -c | Otherwise RESID is numerically in the span of V | -c %-------------------------------------------------% -c - do 95 jj = 1, n - resid(jj) = zero - 95 continue - rnorm = zero - end if -c -c %----------------------------------------------% -c | Branch here directly if iterative refinement | -c | wasn't necessary or after at most NITER_REF | -c | steps of iterative refinement. | -c %----------------------------------------------% -c - 100 continue -c - rstart = .false. - orth2 = .false. -c - call second (t5) - titref = titref + (t5 - t4) -c -c %----------------------------------------------------------% -c | Make sure the last off-diagonal element is non negative | -c | If not perform a similarity transformation on H(1:j,1:j) | -c | and scale v(:,j) by -1. | -c %----------------------------------------------------------% -c - if (h(j,1) .lt. zero) then - h(j,1) = -h(j,1) - if ( j .lt. k+np) then - call dscal(n, -one, v(1,j+1), 1) - else - call dscal(n, -one, resid, 1) - end if - end if -c -c %------------------------------------% -c | STEP 6: Update j = j+1; Continue | -c %------------------------------------% -c - j = j + 1 - if (j .gt. k+np) then - call second (t1) - tsaitr = tsaitr + (t1 - t0) - ido = 99 -c - if (msglvl .gt. 1) then - call dvout (logfil, k+np, h(1,2), ndigit, - & '_saitr: main diagonal of matrix H of step K+NP.') - if (k+np .gt. 1) then - call dvout (logfil, k+np-1, h(2,1), ndigit, - & '_saitr: sub diagonal of matrix H of step K+NP.') - end if - end if -c - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Loop back to extend the factorization by another step. | -c %--------------------------------------------------------% -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 9000 continue - return -c -c %---------------% -c | End of dsaitr | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsapps.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsapps.f deleted file mode 100644 index 5c9178055a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsapps.f +++ /dev/null @@ -1,516 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsapps -c -c\Description: -c Given the Arnoldi factorization -c -c A*V_{k} - V_{k}*H_{k} = r_{k+p}*e_{k+p}^T, -c -c apply NP shifts implicitly resulting in -c -c A*(V_{k}*Q) - (V_{k}*Q)*(Q^T* H_{k}*Q) = r_{k+p}*e_{k+p}^T * Q -c -c where Q is an orthogonal matrix of order KEV+NP. Q is the product of -c rotations resulting from the NP bulge chasing sweeps. The updated Arnoldi -c factorization becomes: -c -c A*VNEW_{k} - VNEW_{k}*HNEW_{k} = rnew_{k}*e_{k}^T. -c -c\Usage: -c call dsapps -c ( N, KEV, NP, SHIFT, V, LDV, H, LDH, RESID, Q, LDQ, WORKD ) -c -c\Arguments -c N Integer. (INPUT) -c Problem size, i.e. dimension of matrix A. -c -c KEV Integer. (INPUT) -c INPUT: KEV+NP is the size of the input matrix H. -c OUTPUT: KEV is the size of the updated matrix HNEW. -c -c NP Integer. (INPUT) -c Number of implicit shifts to be applied. -c -c SHIFT Double precision array of length NP. (INPUT) -c The shifts to be applied. -c -c V Double precision N by (KEV+NP) array. (INPUT/OUTPUT) -c INPUT: V contains the current KEV+NP Arnoldi vectors. -c OUTPUT: VNEW = V(1:n,1:KEV); the updated Arnoldi vectors -c are in the first KEV columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Double precision (KEV+NP) by 2 array. (INPUT/OUTPUT) -c INPUT: H contains the symmetric tridiagonal matrix of the -c Arnoldi factorization with the subdiagonal in the 1st column -c starting at H(2,1) and the main diagonal in the 2nd column. -c OUTPUT: H contains the updated tridiagonal matrix in the -c KEV leading submatrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RESID Double precision array of length (N). (INPUT/OUTPUT) -c INPUT: RESID contains the the residual vector r_{k+p}. -c OUTPUT: RESID is the updated residual vector rnew_{k}. -c -c Q Double precision KEV+NP by KEV+NP work array. (WORKSPACE) -c Work array used to accumulate the rotations during the bulge -c chase sweep. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKD Double precision work array of length 2*N. (WORKSPACE) -c Distributed array used in the application of the accumulated -c orthogonal matrix Q. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c dvout ARPACK utility routine that prints vectors. -c dlamch LAPACK routine that determines machine constants. -c dlartg LAPACK Givens rotation construction routine. -c dlacpy LAPACK matrix copy routine. -c dlaset LAPACK matrix initialization routine. -c dgemv Level 2 BLAS routine for matrix vector multiplication. -c daxpy Level 1 BLAS that computes a vector triad. -c dcopy Level 1 BLAS that copies one vector to another. -c dscal Level 1 BLAS that scales a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/16/93: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: sapps.F SID: 2.6 DATE OF SID: 3/28/97 RELEASE: 2 -c -c\Remarks -c 1. In this version, each shift is applied to all the subblocks of -c the tridiagonal matrix H and not just to the submatrix that it -c comes from. This routine assumes that the subdiagonal elements -c of H that are stored in h(1:kev+np,1) are nonegative upon input -c and enforce this condition upon output. This version incorporates -c deflation. See code for documentation. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsapps - & ( n, kev, np, shift, v, ldv, h, ldh, resid, q, ldq, workd ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer kev, ldh, ldq, ldv, n, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & h(ldh,2), q(ldq,kev+np), resid(n), shift(np), - & v(ldv,kev+np), workd(2*n) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, iend, istart, itop, j, jj, kplusp, msglvl - logical first - Double precision - & a1, a2, a3, a4, big, c, epsmch, f, g, r, s - save epsmch, first -c -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external daxpy, dcopy, dscal, dlacpy, dlartg, dlaset, dvout, - & ivout, second, dgemv -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlamch - external dlamch -c -c %----------------------% -c | Intrinsics Functions | -c %----------------------% -c - intrinsic abs -c -c %----------------% -c | Data statments | -c %----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then - epsmch = dlamch('Epsilon-Machine') - first = .false. - end if - itop = 1 -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = msapps -c - kplusp = kev + np -c -c %----------------------------------------------% -c | Initialize Q to the identity matrix of order | -c | kplusp used to accumulate the rotations. | -c %----------------------------------------------% -c - call dlaset ('All', kplusp, kplusp, zero, one, q, ldq) -c -c %----------------------------------------------% -c | Quick return if there are no shifts to apply | -c %----------------------------------------------% -c - if (np .eq. 0) go to 9000 -c -c %----------------------------------------------------------% -c | Apply the np shifts implicitly. Apply each shift to the | -c | whole matrix and not just to the submatrix from which it | -c | comes. | -c %----------------------------------------------------------% -c - do 90 jj = 1, np -c - istart = itop -c -c %----------------------------------------------------------% -c | Check for splitting and deflation. Currently we consider | -c | an off-diagonal element h(i+1,1) negligible if | -c | h(i+1,1) .le. epsmch*( |h(i,2)| + |h(i+1,2)| ) | -c | for i=1:KEV+NP-1. | -c | If above condition tests true then we set h(i+1,1) = 0. | -c | Note that h(1:KEV+NP,1) are assumed to be non negative. | -c %----------------------------------------------------------% -c - 20 continue -c -c %------------------------------------------------% -c | The following loop exits early if we encounter | -c | a negligible off diagonal element. | -c %------------------------------------------------% -c - do 30 i = istart, kplusp-1 - big = abs(h(i,2)) + abs(h(i+1,2)) - if (h(i+1,1) .le. epsmch*big) then - if (msglvl .gt. 0) then - call ivout (logfil, 1, i, ndigit, - & '_sapps: deflation at row/column no.') - call ivout (logfil, 1, jj, ndigit, - & '_sapps: occured before shift number.') - call dvout (logfil, 1, h(i+1,1), ndigit, - & '_sapps: the corresponding off diagonal element') - end if - h(i+1,1) = zero - iend = i - go to 40 - end if - 30 continue - iend = kplusp - 40 continue -c - if (istart .lt. iend) then -c -c %--------------------------------------------------------% -c | Construct the plane rotation G'(istart,istart+1,theta) | -c | that attempts to drive h(istart+1,1) to zero. | -c %--------------------------------------------------------% -c - f = h(istart,2) - shift(jj) - g = h(istart+1,1) - call dlartg (f, g, c, s, r) -c -c %-------------------------------------------------------% -c | Apply rotation to the left and right of H; | -c | H <- G' * H * G, where G = G(istart,istart+1,theta). | -c | This will create a "bulge". | -c %-------------------------------------------------------% -c - a1 = c*h(istart,2) + s*h(istart+1,1) - a2 = c*h(istart+1,1) + s*h(istart+1,2) - a4 = c*h(istart+1,2) - s*h(istart+1,1) - a3 = c*h(istart+1,1) - s*h(istart,2) - h(istart,2) = c*a1 + s*a2 - h(istart+1,2) = c*a4 - s*a3 - h(istart+1,1) = c*a3 + s*a4 -c -c %----------------------------------------------------% -c | Accumulate the rotation in the matrix Q; Q <- Q*G | -c %----------------------------------------------------% -c - do 60 j = 1, min(istart+jj,kplusp) - a1 = c*q(j,istart) + s*q(j,istart+1) - q(j,istart+1) = - s*q(j,istart) + c*q(j,istart+1) - q(j,istart) = a1 - 60 continue -c -c -c %----------------------------------------------% -c | The following loop chases the bulge created. | -c | Note that the previous rotation may also be | -c | done within the following loop. But it is | -c | kept separate to make the distinction among | -c | the bulge chasing sweeps and the first plane | -c | rotation designed to drive h(istart+1,1) to | -c | zero. | -c %----------------------------------------------% -c - do 70 i = istart+1, iend-1 -c -c %----------------------------------------------% -c | Construct the plane rotation G'(i,i+1,theta) | -c | that zeros the i-th bulge that was created | -c | by G(i-1,i,theta). g represents the bulge. | -c %----------------------------------------------% -c - f = h(i,1) - g = s*h(i+1,1) -c -c %----------------------------------% -c | Final update with G(i-1,i,theta) | -c %----------------------------------% -c - h(i+1,1) = c*h(i+1,1) - call dlartg (f, g, c, s, r) -c -c %-------------------------------------------% -c | The following ensures that h(1:iend-1,1), | -c | the first iend-2 off diagonal of elements | -c | H, remain non negative. | -c %-------------------------------------------% -c - if (r .lt. zero) then - r = -r - c = -c - s = -s - end if -c -c %--------------------------------------------% -c | Apply rotation to the left and right of H; | -c | H <- G * H * G', where G = G(i,i+1,theta) | -c %--------------------------------------------% -c - h(i,1) = r -c - a1 = c*h(i,2) + s*h(i+1,1) - a2 = c*h(i+1,1) + s*h(i+1,2) - a3 = c*h(i+1,1) - s*h(i,2) - a4 = c*h(i+1,2) - s*h(i+1,1) -c - h(i,2) = c*a1 + s*a2 - h(i+1,2) = c*a4 - s*a3 - h(i+1,1) = c*a3 + s*a4 -c -c %----------------------------------------------------% -c | Accumulate the rotation in the matrix Q; Q <- Q*G | -c %----------------------------------------------------% -c - do 50 j = 1, min( i+jj, kplusp ) - a1 = c*q(j,i) + s*q(j,i+1) - q(j,i+1) = - s*q(j,i) + c*q(j,i+1) - q(j,i) = a1 - 50 continue -c - 70 continue -c - end if -c -c %--------------------------% -c | Update the block pointer | -c %--------------------------% -c - istart = iend + 1 -c -c %------------------------------------------% -c | Make sure that h(iend,1) is non-negative | -c | If not then set h(iend,1) <-- -h(iend,1) | -c | and negate the last column of Q. | -c | We have effectively carried out a | -c | similarity on transformation H | -c %------------------------------------------% -c - if (h(iend,1) .lt. zero) then - h(iend,1) = -h(iend,1) - call dscal(kplusp, -one, q(1,iend), 1) - end if -c -c %--------------------------------------------------------% -c | Apply the same shift to the next block if there is any | -c %--------------------------------------------------------% -c - if (iend .lt. kplusp) go to 20 -c -c %-----------------------------------------------------% -c | Check if we can increase the the start of the block | -c %-----------------------------------------------------% -c - do 80 i = itop, kplusp-1 - if (h(i+1,1) .gt. zero) go to 90 - itop = itop + 1 - 80 continue -c -c %-----------------------------------% -c | Finished applying the jj-th shift | -c %-----------------------------------% -c - 90 continue -c -c %------------------------------------------% -c | All shifts have been applied. Check for | -c | more possible deflation that might occur | -c | after the last shift is applied. | -c %------------------------------------------% -c - do 100 i = itop, kplusp-1 - big = abs(h(i,2)) + abs(h(i+1,2)) - if (h(i+1,1) .le. epsmch*big) then - if (msglvl .gt. 0) then - call ivout (logfil, 1, i, ndigit, - & '_sapps: deflation at row/column no.') - call dvout (logfil, 1, h(i+1,1), ndigit, - & '_sapps: the corresponding off diagonal element') - end if - h(i+1,1) = zero - end if - 100 continue -c -c %-------------------------------------------------% -c | Compute the (kev+1)-st column of (V*Q) and | -c | temporarily store the result in WORKD(N+1:2*N). | -c | This is not necessary if h(kev+1,1) = 0. | -c %-------------------------------------------------% -c - if ( h(kev+1,1) .gt. zero ) - & call dgemv ('N', n, kplusp, one, v, ldv, - & q(1,kev+1), 1, zero, workd(n+1), 1) -c -c %-------------------------------------------------------% -c | Compute column 1 to kev of (V*Q) in backward order | -c | taking advantage that Q is an upper triangular matrix | -c | with lower bandwidth np. | -c | Place results in v(:,kplusp-kev:kplusp) temporarily. | -c %-------------------------------------------------------% -c - do 130 i = 1, kev - call dgemv ('N', n, kplusp-i+1, one, v, ldv, - & q(1,kev-i+1), 1, zero, workd, 1) - call dcopy (n, workd, 1, v(1,kplusp-i+1), 1) - 130 continue -c -c %-------------------------------------------------% -c | Move v(:,kplusp-kev+1:kplusp) into v(:,1:kev). | -c %-------------------------------------------------% -c - call dlacpy ('All', n, kev, v(1,np+1), ldv, v, ldv) -c -c %--------------------------------------------% -c | Copy the (kev+1)-st column of (V*Q) in the | -c | appropriate place if h(kev+1,1) .ne. zero. | -c %--------------------------------------------% -c - if ( h(kev+1,1) .gt. zero ) - & call dcopy (n, workd(n+1), 1, v(1,kev+1), 1) -c -c %-------------------------------------% -c | Update the residual vector: | -c | r <- sigmak*r + betak*v(:,kev+1) | -c | where | -c | sigmak = (e_{kev+p}'*Q)*e_{kev} | -c | betak = e_{kev+1}'*H*e_{kev} | -c %-------------------------------------% -c - call dscal (n, q(kplusp,kev), resid, 1) - if (h(kev+1,1) .gt. zero) - & call daxpy (n, h(kev+1,1), v(1,kev+1), 1, resid, 1) -c - if (msglvl .gt. 1) then - call dvout (logfil, 1, q(kplusp,kev), ndigit, - & '_sapps: sigmak of the updated residual vector') - call dvout (logfil, 1, h(kev+1,1), ndigit, - & '_sapps: betak of the updated residual vector') - call dvout (logfil, kev, h(1,2), ndigit, - & '_sapps: updated main diagonal of H for next iteration') - if (kev .gt. 1) then - call dvout (logfil, kev-1, h(2,1), ndigit, - & '_sapps: updated sub diagonal of H for next iteration') - end if - end if -c - call second (t1) - tsapps = tsapps + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of dsapps | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaup2.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaup2.f deleted file mode 100644 index 0b5b5129ce..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaup2.f +++ /dev/null @@ -1,850 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsaup2 -c -c\Description: -c Intermediate level interface called by dsaupd. -c -c\Usage: -c call dsaup2 -c ( IDO, BMAT, N, WHICH, NEV, NP, TOL, RESID, MODE, IUPD, -c ISHIFT, MXITER, V, LDV, H, LDH, RITZ, BOUNDS, Q, LDQ, WORKL, -c IPNTR, WORKD, INFO ) -c -c\Arguments -c -c IDO, BMAT, N, WHICH, NEV, TOL, RESID: same as defined in dsaupd. -c MODE, ISHIFT, MXITER: see the definition of IPARAM in dsaupd. -c -c NP Integer. (INPUT/OUTPUT) -c Contains the number of implicit shifts to apply during -c each Arnoldi/Lanczos iteration. -c If ISHIFT=1, NP is adjusted dynamically at each iteration -c to accelerate convergence and prevent stagnation. -c This is also roughly equal to the number of matrix-vector -c products (involving the operator OP) per Arnoldi iteration. -c The logic for adjusting is contained within the current -c subroutine. -c If ISHIFT=0, NP is the number of shifts the user needs -c to provide via reverse comunication. 0 < NP < NCV-NEV. -c NP may be less than NCV-NEV since a leading block of the current -c upper Tridiagonal matrix has split off and contains "unwanted" -c Ritz values. -c Upon termination of the IRA iteration, NP contains the number -c of "converged" wanted Ritz values. -c -c IUPD Integer. (INPUT) -c IUPD .EQ. 0: use explicit restart instead implicit update. -c IUPD .NE. 0: use implicit update. -c -c V Double precision N by (NEV+NP) array. (INPUT/OUTPUT) -c The Lanczos basis vectors. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Double precision (NEV+NP) by 2 array. (OUTPUT) -c H is used to store the generated symmetric tridiagonal matrix -c The subdiagonal is stored in the first column of H starting -c at H(2,1). The main diagonal is stored in the second column -c of H starting at H(1,2). If dsaup2 converges store the -c B-norm of the final residual vector in H(1,1). -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZ Double precision array of length NEV+NP. (OUTPUT) -c RITZ(1:NEV) contains the computed Ritz values of OP. -c -c BOUNDS Double precision array of length NEV+NP. (OUTPUT) -c BOUNDS(1:NEV) contain the error bounds corresponding to RITZ. -c -c Q Double precision (NEV+NP) by (NEV+NP) array. (WORKSPACE) -c Private (replicated) work array used to accumulate the -c rotation in the shift application step. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Double precision array of length at least 3*(NEV+NP). (INPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. It is used in the computation of the -c tridiagonal eigenvalue problem, the calculation and -c application of the shifts and convergence checking. -c If ISHIFT .EQ. O and IDO .EQ. 3, the first NP locations -c of WORKL are used in reverse communication to hold the user -c supplied shifts. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORKD for -c vectors used by the Lanczos iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in one of -c the spectral transformation modes. X is the current -c operand. -c ------------------------------------------------------------- -c -c WORKD Double precision work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Lanczos iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration !!!!!!!!!! -c See Data Distribution Note in dsaupd. -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal return. -c = 1: All possible eigenvalues of OP has been found. -c NP returns the size of the invariant subspace -c spanning the operator OP. -c = 2: No shifts could be applied. -c = -8: Error return from trid. eigenvalue calculation; -c This should never happen. -c = -9: Starting vector is zero. -c = -9999: Could not build an Lanczos factorization. -c Size that was built in returned in NP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett, "The Symmetric Eigenvalue Problem". Prentice-Hall, -c 1980. -c 4. B.N. Parlett, B. Nour-Omid, "Towards a Black Box Lanczos Program", -c Computer Physics Communications, 53 (1989), pp 169-179. -c 5. B. Nour-Omid, B.N. Parlett, T. Ericson, P.S. Jensen, "How to -c Implement the Spectral Transformation", Math. Comp., 48 (1987), -c pp 663-673. -c 6. R.G. Grimes, J.G. Lewis and H.D. Simon, "A Shifted Block Lanczos -c Algorithm for Solving Sparse Symmetric Generalized Eigenproblems", -c SIAM J. Matr. Anal. Apps., January (1993). -c 7. L. Reichel, W.B. Gragg, "Algorithm 686: FORTRAN Subroutines -c for Updating the QR decomposition", ACM TOMS, December 1990, -c Volume 16 Number 4, pp 369-377. -c -c\Routines called: -c dgetv0 ARPACK initial vector generation routine. -c dsaitr ARPACK Lanczos factorization routine. -c dsapps ARPACK application of implicit shifts routine. -c dsconv ARPACK convergence of Ritz values routine. -c dseigt ARPACK compute Ritz values and error bounds routine. -c dsgets ARPACK reorder Ritz values and error bounds routine. -c dsortr ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c dvout ARPACK utility routine that prints vectors. -c dlamch LAPACK routine that determines machine constants. -c dcopy Level 1 BLAS that copies one vector to another. -c ddot Level 1 BLAS that computes the scalar product of two vectors. -c dnrm2 Level 1 BLAS that computes the norm of a vector. -c dscal Level 1 BLAS that scales a vector. -c dswap Level 1 BLAS that swaps two vectors. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/15/93: Version ' 2.4' -c xx/xx/95: Version ' 2.4'. (R.B. Lehoucq) -c -c\SCCS Information: @(#) -c FILE: saup2.F SID: 2.7 DATE OF SID: 5/19/98 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsaup2 - & ( ido, bmat, n, which, nev, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, h, ldh, ritz, bounds, - & q, ldq, workl, ipntr, workd, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ishift, iupd, ldh, ldq, ldv, mxiter, - & n, mode, nev, np - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Double precision - & bounds(nev+np), h(ldh,2), q(ldq,nev+np), resid(n), - & ritz(nev+np), v(ldv,nev+np), workd(3*n), - & workl(3*(nev+np)) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character wprime*2 - logical cnorm, getv0, initv, update, ushift - integer ierr, iter, j, kplusp, msglvl, nconv, nevbef, nev0, - & np0, nptemp, nevd2, nevm2, kp(3) - Double precision - & rnorm, temp, eps23 - save cnorm, getv0, initv, update, ushift, - & iter, kplusp, msglvl, nconv, nev0, np0, - & rnorm, eps23 -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dcopy, dgetv0, dsaitr, dscal, dsconv, dseigt, dsgets, - & dsapps, dsortr, dvout, ivout, second, dswap -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & ddot, dnrm2, dlamch - external ddot, dnrm2, dlamch -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic min -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = msaup2 -c -c %---------------------------------% -c | Set machine dependent constant. | -c %---------------------------------% -c - eps23 = dlamch('Epsilon-Machine') - eps23 = eps23**(2.0D+0/3.0D+0) -c -c %-------------------------------------% -c | nev0 and np0 are integer variables | -c | hold the initial values of NEV & NP | -c %-------------------------------------% -c - nev0 = nev - np0 = np -c -c %-------------------------------------% -c | kplusp is the bound on the largest | -c | Lanczos factorization built. | -c | nconv is the current number of | -c | "converged" eigenvlues. | -c | iter is the counter on the current | -c | iteration step. | -c %-------------------------------------% -c - kplusp = nev0 + np0 - nconv = 0 - iter = 0 -c -c %--------------------------------------------% -c | Set flags for computing the first NEV steps | -c | of the Lanczos factorization. | -c %--------------------------------------------% -c - getv0 = .true. - update = .false. - ushift = .false. - cnorm = .false. -c - if (info .ne. 0) then -c -c %--------------------------------------------% -c | User provides the initial residual vector. | -c %--------------------------------------------% -c - initv = .true. - info = 0 - else - initv = .false. - end if - end if -c -c %---------------------------------------------% -c | Get a possibly random starting vector and | -c | force it into the range of the operator OP. | -c %---------------------------------------------% -c - 10 continue -c - if (getv0) then - call dgetv0 (ido, bmat, 1, initv, n, 1, v, ldv, resid, rnorm, - & ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (rnorm .eq. zero) then -c -c %-----------------------------------------% -c | The initial vector is zero. Error exit. | -c %-----------------------------------------% -c - info = -9 - go to 1200 - end if - getv0 = .false. - ido = 0 - end if -c -c %------------------------------------------------------------% -c | Back from reverse communication: continue with update step | -c %------------------------------------------------------------% -c - if (update) go to 20 -c -c %-------------------------------------------% -c | Back from computing user specified shifts | -c %-------------------------------------------% -c - if (ushift) go to 50 -c -c %-------------------------------------% -c | Back from computing residual norm | -c | at the end of the current iteration | -c %-------------------------------------% -c - if (cnorm) go to 100 -c -c %----------------------------------------------------------% -c | Compute the first NEV steps of the Lanczos factorization | -c %----------------------------------------------------------% -c - call dsaitr (ido, bmat, n, 0, nev0, mode, resid, rnorm, v, ldv, - & h, ldh, ipntr, workd, info) -c -c %---------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP and possibly B | -c %---------------------------------------------------% -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then -c -c %-----------------------------------------------------% -c | dsaitr was unable to build an Lanczos factorization | -c | of length NEV0. INFO is returned with the size of | -c | the factorization built. Exit main loop. | -c %-----------------------------------------------------% -c - np = info - mxiter = iter - info = -9999 - go to 1200 - end if -c -c %--------------------------------------------------------------% -c | | -c | M A I N LANCZOS I T E R A T I O N L O O P | -c | Each iteration implicitly restarts the Lanczos | -c | factorization in place. | -c | | -c %--------------------------------------------------------------% -c - 1000 continue -c - iter = iter + 1 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, iter, ndigit, - & '_saup2: **** Start of major iteration number ****') - end if - if (msglvl .gt. 1) then - call ivout (logfil, 1, nev, ndigit, - & '_saup2: The length of the current Lanczos factorization') - call ivout (logfil, 1, np, ndigit, - & '_saup2: Extend the Lanczos factorization by') - end if -c -c %------------------------------------------------------------% -c | Compute NP additional steps of the Lanczos factorization. | -c %------------------------------------------------------------% -c - ido = 0 - 20 continue - update = .true. -c - call dsaitr (ido, bmat, n, nev, np, mode, resid, rnorm, v, - & ldv, h, ldh, ipntr, workd, info) -c -c %---------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP and possibly B | -c %---------------------------------------------------% -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then -c -c %-----------------------------------------------------% -c | dsaitr was unable to build an Lanczos factorization | -c | of length NEV0+NP0. INFO is returned with the size | -c | of the factorization built. Exit main loop. | -c %-----------------------------------------------------% -c - np = info - mxiter = iter - info = -9999 - go to 1200 - end if - update = .false. -c - if (msglvl .gt. 1) then - call dvout (logfil, 1, rnorm, ndigit, - & '_saup2: Current B-norm of residual for factorization') - end if -c -c %--------------------------------------------------------% -c | Compute the eigenvalues and corresponding error bounds | -c | of the current symmetric tridiagonal matrix. | -c %--------------------------------------------------------% -c - call dseigt (rnorm, kplusp, h, ldh, ritz, bounds, workl, ierr) -c - if (ierr .ne. 0) then - info = -8 - go to 1200 - end if -c -c %----------------------------------------------------% -c | Make a copy of eigenvalues and corresponding error | -c | bounds obtained from _seigt. | -c %----------------------------------------------------% -c - call dcopy(kplusp, ritz, 1, workl(kplusp+1), 1) - call dcopy(kplusp, bounds, 1, workl(2*kplusp+1), 1) -c -c %---------------------------------------------------% -c | Select the wanted Ritz values and their bounds | -c | to be used in the convergence test. | -c | The selection is based on the requested number of | -c | eigenvalues instead of the current NEV and NP to | -c | prevent possible misconvergence. | -c | * Wanted Ritz values := RITZ(NP+1:NEV+NP) | -c | * Shifts := RITZ(1:NP) := WORKL(1:NP) | -c %---------------------------------------------------% -c - nev = nev0 - np = np0 - call dsgets (ishift, which, nev, np, ritz, bounds, workl) -c -c %-------------------% -c | Convergence test. | -c %-------------------% -c - call dcopy (nev, bounds(np+1), 1, workl(np+1), 1) - call dsconv (nev, ritz(np+1), workl(np+1), tol, nconv) -c - if (msglvl .gt. 2) then - kp(1) = nev - kp(2) = np - kp(3) = nconv - call ivout (logfil, 3, kp, ndigit, - & '_saup2: NEV, NP, NCONV are') - call dvout (logfil, kplusp, ritz, ndigit, - & '_saup2: The eigenvalues of H') - call dvout (logfil, kplusp, bounds, ndigit, - & '_saup2: Ritz estimates of the current NCV Ritz values') - end if -c -c %---------------------------------------------------------% -c | Count the number of unwanted Ritz values that have zero | -c | Ritz estimates. If any Ritz estimates are equal to zero | -c | then a leading block of H of order equal to at least | -c | the number of Ritz values with zero Ritz estimates has | -c | split off. None of these Ritz values may be removed by | -c | shifting. Decrease NP the number of shifts to apply. If | -c | no shifts may be applied, then prepare to exit | -c %---------------------------------------------------------% -c - nptemp = np - do 30 j=1, nptemp - if (bounds(j) .eq. zero) then - np = np - 1 - nev = nev + 1 - end if - 30 continue -c - if ( (nconv .ge. nev0) .or. - & (iter .gt. mxiter) .or. - & (np .eq. 0) ) then -c -c %------------------------------------------------% -c | Prepare to exit. Put the converged Ritz values | -c | and corresponding bounds in RITZ(1:NCONV) and | -c | BOUNDS(1:NCONV) respectively. Then sort. Be | -c | careful when NCONV > NP since we don't want to | -c | swap overlapping locations. | -c %------------------------------------------------% -c - if (which .eq. 'BE') then -c -c %-----------------------------------------------------% -c | Both ends of the spectrum are requested. | -c | Sort the eigenvalues into algebraically decreasing | -c | order first then swap low end of the spectrum next | -c | to high end in appropriate locations. | -c | NOTE: when np < floor(nev/2) be careful not to swap | -c | overlapping locations. | -c %-----------------------------------------------------% -c - wprime = 'SA' - call dsortr (wprime, .true., kplusp, ritz, bounds) - nevd2 = nev0 / 2 - nevm2 = nev0 - nevd2 - if ( nev .gt. 1 ) then - call dswap ( min(nevd2,np), ritz(nevm2+1), 1, - & ritz( max(kplusp-nevd2+1,kplusp-np+1) ), 1) - call dswap ( min(nevd2,np), bounds(nevm2+1), 1, - & bounds( max(kplusp-nevd2+1,kplusp-np+1)), 1) - end if -c - else -c -c %--------------------------------------------------% -c | LM, SM, LA, SA case. | -c | Sort the eigenvalues of H into the an order that | -c | is opposite to WHICH, and apply the resulting | -c | order to BOUNDS. The eigenvalues are sorted so | -c | that the wanted part are always within the first | -c | NEV locations. | -c %--------------------------------------------------% -c - if (which .eq. 'LM') wprime = 'SM' - if (which .eq. 'SM') wprime = 'LM' - if (which .eq. 'LA') wprime = 'SA' - if (which .eq. 'SA') wprime = 'LA' -c - call dsortr (wprime, .true., kplusp, ritz, bounds) -c - end if -c -c %--------------------------------------------------% -c | Scale the Ritz estimate of each Ritz value | -c | by 1 / max(eps23,magnitude of the Ritz value). | -c %--------------------------------------------------% -c - do 35 j = 1, nev0 - temp = max( eps23, abs(ritz(j)) ) - bounds(j) = bounds(j)/temp - 35 continue -c -c %----------------------------------------------------% -c | Sort the Ritz values according to the scaled Ritz | -c | esitmates. This will push all the converged ones | -c | towards the front of ritzr, ritzi, bounds | -c | (in the case when NCONV < NEV.) | -c %----------------------------------------------------% -c - wprime = 'LA' - call dsortr(wprime, .true., nev0, bounds, ritz) -c -c %----------------------------------------------% -c | Scale the Ritz estimate back to its original | -c | value. | -c %----------------------------------------------% -c - do 40 j = 1, nev0 - temp = max( eps23, abs(ritz(j)) ) - bounds(j) = bounds(j)*temp - 40 continue -c -c %--------------------------------------------------% -c | Sort the "converged" Ritz values again so that | -c | the "threshold" values and their associated Ritz | -c | estimates appear at the appropriate position in | -c | ritz and bound. | -c %--------------------------------------------------% -c - if (which .eq. 'BE') then -c -c %------------------------------------------------% -c | Sort the "converged" Ritz values in increasing | -c | order. The "threshold" values are in the | -c | middle. | -c %------------------------------------------------% -c - wprime = 'LA' - call dsortr(wprime, .true., nconv, ritz, bounds) -c - else -c -c %----------------------------------------------% -c | In LM, SM, LA, SA case, sort the "converged" | -c | Ritz values according to WHICH so that the | -c | "threshold" value appears at the front of | -c | ritz. | -c %----------------------------------------------% - - call dsortr(which, .true., nconv, ritz, bounds) -c - end if -c -c %------------------------------------------% -c | Use h( 1,1 ) as storage to communicate | -c | rnorm to _seupd if needed | -c %------------------------------------------% -c - h(1,1) = rnorm -c - if (msglvl .gt. 1) then - call dvout (logfil, kplusp, ritz, ndigit, - & '_saup2: Sorted Ritz values.') - call dvout (logfil, kplusp, bounds, ndigit, - & '_saup2: Sorted ritz estimates.') - end if -c -c %------------------------------------% -c | Max iterations have been exceeded. | -c %------------------------------------% -c - if (iter .gt. mxiter .and. nconv .lt. nev) info = 1 -c -c %---------------------% -c | No shifts to apply. | -c %---------------------% -c - if (np .eq. 0 .and. nconv .lt. nev0) info = 2 -c - np = nconv - go to 1100 -c - else if (nconv .lt. nev .and. ishift .eq. 1) then -c -c %---------------------------------------------------% -c | Do not have all the requested eigenvalues yet. | -c | To prevent possible stagnation, adjust the number | -c | of Ritz values and the shifts. | -c %---------------------------------------------------% -c - nevbef = nev - nev = nev + min (nconv, np/2) - if (nev .eq. 1 .and. kplusp .ge. 6) then - nev = kplusp / 2 - else if (nev .eq. 1 .and. kplusp .gt. 2) then - nev = 2 - end if - np = kplusp - nev -c -c %---------------------------------------% -c | If the size of NEV was just increased | -c | resort the eigenvalues. | -c %---------------------------------------% -c - if (nevbef .lt. nev) - & call dsgets (ishift, which, nev, np, ritz, bounds, - & workl) -c - end if -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, nconv, ndigit, - & '_saup2: no. of "converged" Ritz values at this iter.') - if (msglvl .gt. 1) then - kp(1) = nev - kp(2) = np - call ivout (logfil, 2, kp, ndigit, - & '_saup2: NEV and NP are') - call dvout (logfil, nev, ritz(np+1), ndigit, - & '_saup2: "wanted" Ritz values.') - call dvout (logfil, nev, bounds(np+1), ndigit, - & '_saup2: Ritz estimates of the "wanted" values ') - end if - end if - -c - if (ishift .eq. 0) then -c -c %-----------------------------------------------------% -c | User specified shifts: reverse communication to | -c | compute the shifts. They are returned in the first | -c | NP locations of WORKL. | -c %-----------------------------------------------------% -c - ushift = .true. - ido = 3 - go to 9000 - end if -c - 50 continue -c -c %------------------------------------% -c | Back from reverse communication; | -c | User specified shifts are returned | -c | in WORKL(1:*NP) | -c %------------------------------------% -c - ushift = .false. -c -c -c %---------------------------------------------------------% -c | Move the NP shifts to the first NP locations of RITZ to | -c | free up WORKL. This is for the non-exact shift case; | -c | in the exact shift case, dsgets already handles this. | -c %---------------------------------------------------------% -c - if (ishift .eq. 0) call dcopy (np, workl, 1, ritz, 1) -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, np, ndigit, - & '_saup2: The number of shifts to apply ') - call dvout (logfil, np, workl, ndigit, - & '_saup2: shifts selected') - if (ishift .eq. 1) then - call dvout (logfil, np, bounds, ndigit, - & '_saup2: corresponding Ritz estimates') - end if - end if -c -c %---------------------------------------------------------% -c | Apply the NP0 implicit shifts by QR bulge chasing. | -c | Each shift is applied to the entire tridiagonal matrix. | -c | The first 2*N locations of WORKD are used as workspace. | -c | After dsapps is done, we have a Lanczos | -c | factorization of length NEV. | -c %---------------------------------------------------------% -c - call dsapps (n, nev, np, ritz, v, ldv, h, ldh, resid, q, ldq, - & workd) -c -c %---------------------------------------------% -c | Compute the B-norm of the updated residual. | -c | Keep B*RESID in WORKD(1:N) to be used in | -c | the first step of the next call to dsaitr. | -c %---------------------------------------------% -c - cnorm = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call dcopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*RESID | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call dcopy (n, resid, 1, workd, 1) - end if -c - 100 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(1:N) := B*RESID | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - rnorm = ddot (n, resid, 1, workd, 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = dnrm2(n, resid, 1) - end if - cnorm = .false. - 130 continue -c - if (msglvl .gt. 2) then - call dvout (logfil, 1, rnorm, ndigit, - & '_saup2: B-norm of residual for NEV factorization') - call dvout (logfil, nev, h(1,2), ndigit, - & '_saup2: main diagonal of compressed H matrix') - call dvout (logfil, nev-1, h(2,1), ndigit, - & '_saup2: subdiagonal of compressed H matrix') - end if -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 1100 continue -c - mxiter = iter - nev = nconv -c - 1200 continue - ido = 99 -c -c %------------% -c | Error exit | -c %------------% -c - call second (t1) - tsaup2 = t1 - t0 -c - 9000 continue - return -c -c %---------------% -c | End of dsaup2 | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaupd.f deleted file mode 100644 index c4272c1891..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsaupd.f +++ /dev/null @@ -1,690 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsaupd -c -c\Description: -c -c Reverse communication interface for the Implicitly Restarted Arnoldi -c Iteration. For symmetric problems this reduces to a variant of the Lanczos -c method. This method has been designed to compute approximations to a -c few eigenpairs of a linear operator OP that is real and symmetric -c with respect to a real positive semi-definite symmetric matrix B, -c i.e. -c -c B*OP = (OP`)*B. -c -c Another way to express this condition is -c -c < x,OPy > = < OPx,y > where < z,w > = z`Bw . -c -c In the standard eigenproblem B is the identity matrix. -c ( A` denotes transpose of A) -c -c The computed approximate eigenvalues are called Ritz values and -c the corresponding approximate eigenvectors are called Ritz vectors. -c -c dsaupd is usually called iteratively to solve one of the -c following problems: -c -c Mode 1: A*x = lambda*x, A symmetric -c ===> OP = A and B = I. -c -c Mode 2: A*x = lambda*M*x, A symmetric, M symmetric positive definite -c ===> OP = inv[M]*A and B = M. -c ===> (If M can be factored see remark 3 below) -c -c Mode 3: K*x = lambda*M*x, K symmetric, M symmetric positive semi-definite -c ===> OP = (inv[K - sigma*M])*M and B = M. -c ===> Shift-and-Invert mode -c -c Mode 4: K*x = lambda*KG*x, K symmetric positive semi-definite, -c KG symmetric indefinite -c ===> OP = (inv[K - sigma*KG])*K and B = K. -c ===> Buckling mode -c -c Mode 5: A*x = lambda*M*x, A symmetric, M symmetric positive semi-definite -c ===> OP = inv[A - sigma*M]*[A + sigma*M] and B = M. -c ===> Cayley transformed mode -c -c NOTE: The action of w <- inv[A - sigma*M]*v or w <- inv[M]*v -c should be accomplished either by a direct method -c using a sparse matrix factorization and solving -c -c [A - sigma*M]*w = v or M*w = v, -c -c or through an iterative method for solving these -c systems. If an iterative method is used, the -c convergence test must be more stringent than -c the accuracy requirements for the eigenvalue -c approximations. -c -c\Usage: -c call dsaupd -c ( IDO, BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, -c IPNTR, WORKD, WORKL, LWORKL, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to dsaupd . IDO will be set internally to -c indicate the type of operation to be performed. Control is -c then given back to the calling routine which has the -c responsibility to carry out the requested operation and call -c dsaupd with the result. The operand is given in -c WORKD(IPNTR(1)), the result must be put in WORKD(IPNTR(2)). -c (If Mode = 2 see remark 5 below) -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c In mode 3,4 and 5, the vector B * X is already -c available in WORKD(ipntr(3)). It does not -c need to be recomputed in forming OP * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 3: compute the IPARAM(8) shifts where -c IPNTR(11) is the pointer into WORKL for -c placing the shifts. See remark 6 below. -c IDO = 99: done -c ------------------------------------------------------------- -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*B*x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c WHICH Character*2. (INPUT) -c Specify which of the Ritz values of OP to compute. -c -c 'LA' - compute the NEV largest (algebraic) eigenvalues. -c 'SA' - compute the NEV smallest (algebraic) eigenvalues. -c 'LM' - compute the NEV largest (in magnitude) eigenvalues. -c 'SM' - compute the NEV smallest (in magnitude) eigenvalues. -c 'BE' - compute NEV eigenvalues, half from each end of the -c spectrum. When NEV is odd, compute one more from the -c high end than from the low end. -c (see remark 1 below) -c -c NEV Integer. (INPUT) -c Number of eigenvalues of OP to be computed. 0 < NEV < N. -c -c TOL Double precision scalar. (INPUT) -c Stopping criterion: the relative accuracy of the Ritz value -c is considered acceptable if BOUNDS(I) .LE. TOL*ABS(RITZ(I)). -c If TOL .LE. 0. is passed a default is set: -c DEFAULT = DLAMCH ('EPS') (machine precision as computed -c by the LAPACK auxiliary subroutine DLAMCH ). -c -c RESID Double precision array of length N. (INPUT/OUTPUT) -c On INPUT: -c If INFO .EQ. 0, a random initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c On OUTPUT: -c RESID contains the final residual vector. -c -c NCV Integer. (INPUT) -c Number of columns of the matrix V (less than or equal to N). -c This will indicate how many Lanczos vectors are generated -c at each iteration. After the startup phase in which NEV -c Lanczos vectors are generated, the algorithm generates -c NCV-NEV Lanczos vectors at each subsequent update iteration. -c Most of the cost in generating each Lanczos vector is in the -c matrix-vector product OP*x. (See remark 4 below). -c -c V Double precision N by NCV array. (OUTPUT) -c The NCV columns of V contain the Lanczos basis vectors. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c IPARAM Integer array of length 11. (INPUT/OUTPUT) -c IPARAM(1) = ISHIFT: method for selecting the implicit shifts. -c The shifts selected at each iteration are used to restart -c the Arnoldi iteration in an implicit fashion. -c ------------------------------------------------------------- -c ISHIFT = 0: the shifts are provided by the user via -c reverse communication. The NCV eigenvalues of -c the current tridiagonal matrix T are returned in -c the part of WORKL array corresponding to RITZ. -c See remark 6 below. -c ISHIFT = 1: exact shifts with respect to the reduced -c tridiagonal matrix T. This is equivalent to -c restarting the iteration with a starting vector -c that is a linear combination of Ritz vectors -c associated with the "wanted" Ritz values. -c ------------------------------------------------------------- -c -c IPARAM(2) = LEVEC -c No longer referenced. See remark 2 below. -c -c IPARAM(3) = MXITER -c On INPUT: maximum number of Arnoldi update iterations allowed. -c On OUTPUT: actual number of Arnoldi update iterations taken. -c -c IPARAM(4) = NB: blocksize to be used in the recurrence. -c The code currently works only for NB = 1. -c -c IPARAM(5) = NCONV: number of "converged" Ritz values. -c This represents the number of Ritz values that satisfy -c the convergence criterion. -c -c IPARAM(6) = IUPD -c No longer referenced. Implicit restarting is ALWAYS used. -c -c IPARAM(7) = MODE -c On INPUT determines what type of eigenproblem is being solved. -c Must be 1,2,3,4,5; See under \Description of dsaupd for the -c five modes available. -c -c IPARAM(8) = NP -c When ido = 3 and the user provides shifts through reverse -c communication (IPARAM(1)=0), dsaupd returns NP, the number -c of shifts the user is to provide. 0 < NP <=NCV-NEV. See Remark -c 6 below. -c -c IPARAM(9) = NUMOP, IPARAM(10) = NUMOPB, IPARAM(11) = NUMREO, -c OUTPUT: NUMOP = total number of OP*x operations, -c NUMOPB = total number of B*x operations if BMAT='G', -c NUMREO = total number of steps of re-orthogonalization. -c -c IPNTR Integer array of length 11. (OUTPUT) -c Pointer to mark the starting locations in the WORKD and WORKL -c arrays for matrices/vectors used by the Lanczos iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X in WORKD. -c IPNTR(2): pointer to the current result vector Y in WORKD. -c IPNTR(3): pointer to the vector B * X in WORKD when used in -c the shift-and-invert mode. -c IPNTR(4): pointer to the next available location in WORKL -c that is untouched by the program. -c IPNTR(5): pointer to the NCV by 2 tridiagonal matrix T in WORKL. -c IPNTR(6): pointer to the NCV RITZ values array in WORKL. -c IPNTR(7): pointer to the Ritz estimates in array WORKL associated -c with the Ritz values located in RITZ in WORKL. -c IPNTR(11): pointer to the NP shifts in WORKL. See Remark 6 below. -c -c Note: IPNTR(8:10) is only referenced by dseupd . See Remark 2. -c IPNTR(8): pointer to the NCV RITZ values of the original system. -c IPNTR(9): pointer to the NCV corresponding error bounds. -c IPNTR(10): pointer to the NCV by NCV matrix of eigenvectors -c of the tridiagonal matrix T. Only referenced by -c dseupd if RVEC = .TRUE. See Remarks. -c ------------------------------------------------------------- -c -c WORKD Double precision work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration. Upon termination -c WORKD(1:N) contains B*RESID(1:N). If the Ritz vectors are desired -c subroutine dseupd uses this output. -c See Data Distribution Note below. -c -c WORKL Double precision work array of length LWORKL. (OUTPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. See Data Distribution Note below. -c -c LWORKL Integer. (INPUT) -c LWORKL must be at least NCV**2 + 8*NCV . -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal exit. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. IPARAM(5) -c returns the number of wanted converged Ritz values. -c = 2: No longer an informational error. Deprecated starting -c with release 2 of ARPACK. -c = 3: No shifts could be applied during a cycle of the -c Implicitly restarted Arnoldi iteration. One possibility -c is to increase the size of NCV relative to NEV. -c See remark 4 below. -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV must be greater than NEV and less than or equal to N. -c = -4: The maximum number of Arnoldi update iterations allowed -c must be greater than zero. -c = -5: WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'. -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work array WORKL is not sufficient. -c = -8: Error return from trid. eigenvalue calculation; -c Informatinal error from LAPACK routine dsteqr . -c = -9: Starting vector is zero. -c = -10: IPARAM(7) must be 1,2,3,4,5. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatable. -c = -12: IPARAM(1) must be equal to 0 or 1. -c = -13: NEV and WHICH = 'BE' are incompatable. -c = -9999: Could not build an Arnoldi factorization. -c IPARAM(5) returns the size of the current Arnoldi -c factorization. The user is advised to check that -c enough workspace and array storage has been allocated. -c -c -c\Remarks -c 1. The converged Ritz values are always returned in ascending -c algebraic order. The computed Ritz values are approximate -c eigenvalues of OP. The selection of WHICH should be made -c with this in mind when Mode = 3,4,5. After convergence, -c approximate eigenvalues of the original problem may be obtained -c with the ARPACK subroutine dseupd . -c -c 2. If the Ritz vectors corresponding to the converged Ritz values -c are needed, the user must call dseupd immediately following completion -c of dsaupd . This is new starting with version 2.1 of ARPACK. -c -c 3. If M can be factored into a Cholesky factorization M = LL` -c then Mode = 2 should not be selected. Instead one should use -c Mode = 1 with OP = inv(L)*A*inv(L`). Appropriate triangular -c linear systems should be solved with L and L` rather -c than computing inverses. After convergence, an approximate -c eigenvector z of the original problem is recovered by solving -c L`z = x where x is a Ritz vector of OP. -c -c 4. At present there is no a-priori analysis to guide the selection -c of NCV relative to NEV. The only formal requrement is that NCV > NEV. -c However, it is recommended that NCV .ge. 2*NEV. If many problems of -c the same type are to be solved, one should experiment with increasing -c NCV while keeping NEV fixed for a given test problem. This will -c usually decrease the required number of OP*x operations but it -c also increases the work and storage required to maintain the orthogonal -c basis vectors. The optimal "cross-over" with respect to CPU time -c is problem dependent and must be determined empirically. -c -c 5. If IPARAM(7) = 2 then in the Reverse commuication interface the user -c must do the following. When IDO = 1, Y = OP * X is to be computed. -c When IPARAM(7) = 2 OP = inv(B)*A. After computing A*X the user -c must overwrite X with A*X. Y is then the solution to the linear set -c of equations B*Y = A*X. -c -c 6. When IPARAM(1) = 0, and IDO = 3, the user needs to provide the -c NP = IPARAM(8) shifts in locations: -c 1 WORKL(IPNTR(11)) -c 2 WORKL(IPNTR(11)+1) -c . -c . -c . -c NP WORKL(IPNTR(11)+NP-1). -c -c The eigenvalues of the current tridiagonal matrix are located in -c WORKL(IPNTR(6)) through WORKL(IPNTR(6)+NCV-1). They are in the -c order defined by WHICH. The associated Ritz estimates are located in -c WORKL(IPNTR(8)), WORKL(IPNTR(8)+1), ... , WORKL(IPNTR(8)+NCV-1). -c -c----------------------------------------------------------------------- -c -c\Data Distribution Note: -c -c Fortran-D syntax: -c ================ -c REAL RESID(N), V(LDV,NCV), WORKD(3*N), WORKL(LWORKL) -c DECOMPOSE D1(N), D2(N,NCV) -c ALIGN RESID(I) with D1(I) -c ALIGN V(I,J) with D2(I,J) -c ALIGN WORKD(I) with D1(I) range (1:N) -c ALIGN WORKD(I) with D1(I-N) range (N+1:2*N) -c ALIGN WORKD(I) with D1(I-2*N) range (2*N+1:3*N) -c DISTRIBUTE D1(BLOCK), D2(BLOCK,:) -c REPLICATED WORKL(LWORKL) -c -c Cray MPP syntax: -c =============== -c REAL RESID(N), V(LDV,NCV), WORKD(N,3), WORKL(LWORKL) -c SHARED RESID(BLOCK), V(BLOCK,:), WORKD(BLOCK,:) -c REPLICATED WORKL(LWORKL) -c -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett, "The Symmetric Eigenvalue Problem". Prentice-Hall, -c 1980. -c 4. B.N. Parlett, B. Nour-Omid, "Towards a Black Box Lanczos Program", -c Computer Physics Communications, 53 (1989), pp 169-179. -c 5. B. Nour-Omid, B.N. Parlett, T. Ericson, P.S. Jensen, "How to -c Implement the Spectral Transformation", Math. Comp., 48 (1987), -c pp 663-673. -c 6. R.G. Grimes, J.G. Lewis and H.D. Simon, "A Shifted Block Lanczos -c Algorithm for Solving Sparse Symmetric Generalized Eigenproblems", -c SIAM J. Matr. Anal. Apps., January (1993). -c 7. L. Reichel, W.B. Gragg, "Algorithm 686: FORTRAN Subroutines -c for Updating the QR decomposition", ACM TOMS, December 1990, -c Volume 16 Number 4, pp 369-377. -c 8. R.B. Lehoucq, D.C. Sorensen, "Implementation of Some Spectral -c Transformations in a k-Step Arnoldi Method". In Preparation. -c -c\Routines called: -c dsaup2 ARPACK routine that implements the Implicitly Restarted -c Arnoldi Iteration. -c dstats ARPACK routine that initialize timing and other statistics -c variables. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c dvout ARPACK utility routine that prints vectors. -c dlamch LAPACK routine that determines machine constants. -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/15/93: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: saupd.F SID: 2.8 DATE OF SID: 04/10/01 RELEASE: 2 -c -c\Remarks -c 1. None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsaupd - & ( ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, - & ipntr, workd, workl, lworkl, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ldv, lworkl, n, ncv, nev - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(11) - Double precision - & resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0 , zero = 0.0D+0 ) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer bounds, ierr, ih, iq, ishift, iupd, iw, - & ldh, ldq, msglvl, mxiter, mode, nb, - & nev0, next, np, ritz, j - save bounds, ierr, ih, iq, ishift, iupd, iw, - & ldh, ldq, msglvl, mxiter, mode, nb, - & nev0, next, np, ritz -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dsaup2 , dvout , ivout, second, dstats -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlamch - external dlamch -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call dstats - call second (t0) - msglvl = msaupd -c - ierr = 0 - ishift = iparam(1) - mxiter = iparam(3) -c nb = iparam(4) - nb = 1 -c -c %--------------------------------------------% -c | Revision 2 performs only implicit restart. | -c %--------------------------------------------% -c - iupd = 1 - mode = iparam(7) -c -c %----------------% -c | Error checking | -c %----------------% -c - if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev .or. ncv .gt. n) then - ierr = -3 - end if -c -c %----------------------------------------------% -c | NP is the number of additional steps to | -c | extend the length NEV Lanczos factorization. | -c %----------------------------------------------% -c - np = ncv - nev -c - if (mxiter .le. 0) ierr = -4 - if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LA' .and. - & which .ne. 'SA' .and. - & which .ne. 'BE') ierr = -5 - if (bmat .ne. 'I' .and. bmat .ne. 'G') ierr = -6 -c - if (lworkl .lt. ncv**2 + 8*ncv) ierr = -7 - if (mode .lt. 1 .or. mode .gt. 5) then - ierr = -10 - else if (mode .eq. 1 .and. bmat .eq. 'G') then - ierr = -11 - else if (ishift .lt. 0 .or. ishift .gt. 1) then - ierr = -12 - else if (nev .eq. 1 .and. which .eq. 'BE') then - ierr = -13 - end if -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - ido = 99 - go to 9000 - end if -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - if (nb .le. 0) nb = 1 - if (tol .le. zero) tol = dlamch ('EpsMach') -c -c %----------------------------------------------% -c | NP is the number of additional steps to | -c | extend the length NEV Lanczos factorization. | -c | NEV0 is the local variable designating the | -c | size of the invariant subspace desired. | -c %----------------------------------------------% -c - np = ncv - nev - nev0 = nev -c -c %-----------------------------% -c | Zero out internal workspace | -c %-----------------------------% -c - do 10 j = 1, ncv**2 + 8*ncv - workl(j) = zero - 10 continue -c -c %-------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:2*ncv) := generated tridiagonal matrix | -c | workl(2*ncv+1:2*ncv+ncv) := ritz values | -c | workl(3*ncv+1:3*ncv+ncv) := computed error bounds | -c | workl(4*ncv+1:4*ncv+ncv*ncv) := rotation matrix Q | -c | workl(4*ncv+ncv*ncv+1:7*ncv+ncv*ncv) := workspace | -c %-------------------------------------------------------% -c - ldh = ncv - ldq = ncv - ih = 1 - ritz = ih + 2*ldh - bounds = ritz + ncv - iq = bounds + ncv - iw = iq + ncv**2 - next = iw + 3*ncv -c - ipntr(4) = next - ipntr(5) = ih - ipntr(6) = ritz - ipntr(7) = bounds - ipntr(11) = iw - end if -c -c %-------------------------------------------------------% -c | Carry out the Implicitly restarted Lanczos Iteration. | -c %-------------------------------------------------------% -c - call dsaup2 - & ( ido, bmat, n, which, nev0, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, workl(ih), ldh, workl(ritz), - & workl(bounds), workl(iq), ldq, workl(iw), ipntr, workd, - & info ) -c -c %--------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP or shifts. | -c %--------------------------------------------------% -c - if (ido .eq. 3) iparam(8) = np - if (ido .ne. 99) go to 9000 -c - iparam(3) = mxiter - iparam(5) = np - iparam(9) = nopx - iparam(10) = nbx - iparam(11) = nrorth -c -c %------------------------------------% -c | Exit if there was an informational | -c | error within dsaup2 . | -c %------------------------------------% -c - if (info .lt. 0) go to 9000 - if (info .eq. 2) info = 3 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, mxiter, ndigit, - & '_saupd: number of update iterations taken') - call ivout (logfil, 1, np, ndigit, - & '_saupd: number of "converged" Ritz values') - call dvout (logfil, np, workl(Ritz), ndigit, - & '_saupd: final Ritz values') - call dvout (logfil, np, workl(Bounds), ndigit, - & '_saupd: corresponding error bounds') - end if -c - call second (t1) - tsaupd = t1 - t0 -c - if (msglvl .gt. 0) then -c -c %--------------------------------------------------------% -c | Version Number & Version Date are defined in version.h | -c %--------------------------------------------------------% -c - write (6,1000) - write (6,1100) mxiter, nopx, nbx, nrorth, nitref, nrstrt, - & tmvopx, tmvbx, tsaupd, tsaup2, tsaitr, titref, - & tgetv0, tseigt, tsgets, tsapps, tsconv - 1000 format (//, - & 5x, '==========================================',/ - & 5x, '= Symmetric implicit Arnoldi update code =',/ - & 5x, '= Version Number:', ' 2.4' , 19x, ' =',/ - & 5x, '= Version Date: ', ' 07/31/96' , 14x, ' =',/ - & 5x, '==========================================',/ - & 5x, '= Summary of timing statistics =',/ - & 5x, '==========================================',//) - 1100 format ( - & 5x, 'Total number update iterations = ', i5,/ - & 5x, 'Total number of OP*x operations = ', i5,/ - & 5x, 'Total number of B*x operations = ', i5,/ - & 5x, 'Total number of reorthogonalization steps = ', i5,/ - & 5x, 'Total number of iterative refinement steps = ', i5,/ - & 5x, 'Total number of restart steps = ', i5,/ - & 5x, 'Total time in user OP*x operation = ', f12.6,/ - & 5x, 'Total time in user B*x operation = ', f12.6,/ - & 5x, 'Total time in Arnoldi update routine = ', f12.6,/ - & 5x, 'Total time in saup2 routine = ', f12.6,/ - & 5x, 'Total time in basic Arnoldi iteration loop = ', f12.6,/ - & 5x, 'Total time in reorthogonalization phase = ', f12.6,/ - & 5x, 'Total time in (re)start vector generation = ', f12.6,/ - & 5x, 'Total time in trid eigenvalue subproblem = ', f12.6,/ - & 5x, 'Total time in getting the shifts = ', f12.6,/ - & 5x, 'Total time in applying the shifts = ', f12.6,/ - & 5x, 'Total time in convergence testing = ', f12.6) - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of dsaupd | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsconv.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsconv.f deleted file mode 100644 index 7e3d0a7bb6..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsconv.f +++ /dev/null @@ -1,138 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsconv -c -c\Description: -c Convergence testing for the symmetric Arnoldi eigenvalue routine. -c -c\Usage: -c call dsconv -c ( N, RITZ, BOUNDS, TOL, NCONV ) -c -c\Arguments -c N Integer. (INPUT) -c Number of Ritz values to check for convergence. -c -c RITZ Double precision array of length N. (INPUT) -c The Ritz values to be checked for convergence. -c -c BOUNDS Double precision array of length N. (INPUT) -c Ritz estimates associated with the Ritz values in RITZ. -c -c TOL Double precision scalar. (INPUT) -c Desired relative accuracy for a Ritz value to be considered -c "converged". -c -c NCONV Integer scalar. (OUTPUT) -c Number of "converged" Ritz values. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Routines called: -c second ARPACK utility routine for timing. -c dlamch LAPACK routine that determines machine constants. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: sconv.F SID: 2.4 DATE OF SID: 4/19/96 RELEASE: 2 -c -c\Remarks -c 1. Starting with version 2.4, this routine no longer uses the -c Parlett strategy using the gap conditions. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsconv (n, ritz, bounds, tol, nconv) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer n, nconv - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & ritz(n), bounds(n) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i - Double precision - & temp, eps23 -c -c %-------------------% -c | External routines | -c %-------------------% -c - Double precision - & dlamch - external dlamch - -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - call second (t0) -c - eps23 = dlamch('Epsilon-Machine') - eps23 = eps23**(2.0D+0 / 3.0D+0) -c - nconv = 0 - do 10 i = 1, n -c -c %-----------------------------------------------------% -c | The i-th Ritz value is considered "converged" | -c | when: bounds(i) .le. TOL*max(eps23, abs(ritz(i))) | -c %-----------------------------------------------------% -c - temp = max( eps23, abs(ritz(i)) ) - if ( bounds(i) .le. tol*temp ) then - nconv = nconv + 1 - end if -c - 10 continue -c - call second (t1) - tsconv = tsconv + (t1 - t0) -c - return -c -c %---------------% -c | End of dsconv | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dseigt.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dseigt.f deleted file mode 100644 index a6d68914cd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dseigt.f +++ /dev/null @@ -1,181 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dseigt -c -c\Description: -c Compute the eigenvalues of the current symmetric tridiagonal matrix -c and the corresponding error bounds given the current residual norm. -c -c\Usage: -c call dseigt -c ( RNORM, N, H, LDH, EIG, BOUNDS, WORKL, IERR ) -c -c\Arguments -c RNORM Double precision scalar. (INPUT) -c RNORM contains the residual norm corresponding to the current -c symmetric tridiagonal matrix H. -c -c N Integer. (INPUT) -c Size of the symmetric tridiagonal matrix H. -c -c H Double precision N by 2 array. (INPUT) -c H contains the symmetric tridiagonal matrix with the -c subdiagonal in the first column starting at H(2,1) and the -c main diagonal in second column. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c EIG Double precision array of length N. (OUTPUT) -c On output, EIG contains the N eigenvalues of H possibly -c unsorted. The BOUNDS arrays are returned in the -c same sorted order as EIG. -c -c BOUNDS Double precision array of length N. (OUTPUT) -c On output, BOUNDS contains the error estimates corresponding -c to the eigenvalues EIG. This is equal to RNORM times the -c last components of the eigenvectors corresponding to the -c eigenvalues in EIG. -c -c WORKL Double precision work array of length 3*N. (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c IERR Integer. (OUTPUT) -c Error exit flag from dstqrb. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c dstqrb ARPACK routine that computes the eigenvalues and the -c last components of the eigenvectors of a symmetric -c and tridiagonal matrix. -c second ARPACK utility routine for timing. -c dvout ARPACK utility routine that prints vectors. -c dcopy Level 1 BLAS that copies one vector to another. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: seigt.F SID: 2.4 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dseigt - & ( rnorm, n, h, ldh, eig, bounds, workl, ierr ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer ierr, ldh, n - Double precision - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & eig(n), bounds(n), h(ldh,2), workl(3*n) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & zero - parameter (zero = 0.0D+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, k, msglvl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dcopy, dstqrb, dvout, second -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mseigt -c - if (msglvl .gt. 0) then - call dvout (logfil, n, h(1,2), ndigit, - & '_seigt: main diagonal of matrix H') - if (n .gt. 1) then - call dvout (logfil, n-1, h(2,1), ndigit, - & '_seigt: sub diagonal of matrix H') - end if - end if -c - call dcopy (n, h(1,2), 1, eig, 1) - call dcopy (n-1, h(2,1), 1, workl, 1) - call dstqrb (n, eig, workl, bounds, workl(n+1), ierr) - if (ierr .ne. 0) go to 9000 - if (msglvl .gt. 1) then - call dvout (logfil, n, bounds, ndigit, - & '_seigt: last row of the eigenvector matrix for H') - end if -c -c %-----------------------------------------------% -c | Finally determine the error bounds associated | -c | with the n Ritz values of H. | -c %-----------------------------------------------% -c - do 30 k = 1, n - bounds(k) = rnorm*abs(bounds(k)) - 30 continue -c - call second (t1) - tseigt = tseigt + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of dseigt | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsesrt.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsesrt.f deleted file mode 100644 index 2b4ca8cbc0..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsesrt.f +++ /dev/null @@ -1,217 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsesrt -c -c\Description: -c Sort the array X in the order specified by WHICH and optionally -c apply the permutation to the columns of the matrix A. -c -c\Usage: -c call dsesrt -c ( WHICH, APPLY, N, X, NA, A, LDA) -c -c\Arguments -c WHICH Character*2. (Input) -c 'LM' -> X is sorted into increasing order of magnitude. -c 'SM' -> X is sorted into decreasing order of magnitude. -c 'LA' -> X is sorted into increasing order of algebraic. -c 'SA' -> X is sorted into decreasing order of algebraic. -c -c APPLY Logical. (Input) -c APPLY = .TRUE. -> apply the sorted order to A. -c APPLY = .FALSE. -> do not apply the sorted order to A. -c -c N Integer. (INPUT) -c Dimension of the array X. -c -c X Double precision array of length N. (INPUT/OUTPUT) -c The array to be sorted. -c -c NA Integer. (INPUT) -c Number of rows of the matrix A. -c -c A Double precision array of length NA by N. (INPUT/OUTPUT) -c -c LDA Integer. (INPUT) -c Leading dimension of A. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Routines -c dswap Level 1 BLAS that swaps the contents of two vectors. -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/15/93: Version ' 2.1'. -c Adapted from the sort routine in LANSO and -c the ARPACK code dsortr -c -c\SCCS Information: @(#) -c FILE: sesrt.F SID: 2.3 DATE OF SID: 4/19/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsesrt (which, apply, n, x, na, a, lda) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - logical apply - integer lda, n, na -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & x(0:n-1), a(lda, 0:n-1) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, igap, j - Double precision - & temp -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dswap -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - igap = n / 2 -c - if (which .eq. 'SA') then -c -c X is sorted into decreasing order of algebraic. -c - 10 continue - if (igap .eq. 0) go to 9000 - do 30 i = igap, n-1 - j = i-igap - 20 continue -c - if (j.lt.0) go to 30 -c - if (x(j).lt.x(j+igap)) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp - if (apply) call dswap( na, a(1, j), 1, a(1,j+igap), 1) - else - go to 30 - endif - j = j-igap - go to 20 - 30 continue - igap = igap / 2 - go to 10 -c - else if (which .eq. 'SM') then -c -c X is sorted into decreasing order of magnitude. -c - 40 continue - if (igap .eq. 0) go to 9000 - do 60 i = igap, n-1 - j = i-igap - 50 continue -c - if (j.lt.0) go to 60 -c - if (abs(x(j)).lt.abs(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp - if (apply) call dswap( na, a(1, j), 1, a(1,j+igap), 1) - else - go to 60 - endif - j = j-igap - go to 50 - 60 continue - igap = igap / 2 - go to 40 -c - else if (which .eq. 'LA') then -c -c X is sorted into increasing order of algebraic. -c - 70 continue - if (igap .eq. 0) go to 9000 - do 90 i = igap, n-1 - j = i-igap - 80 continue -c - if (j.lt.0) go to 90 -c - if (x(j).gt.x(j+igap)) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp - if (apply) call dswap( na, a(1, j), 1, a(1,j+igap), 1) - else - go to 90 - endif - j = j-igap - go to 80 - 90 continue - igap = igap / 2 - go to 70 -c - else if (which .eq. 'LM') then -c -c X is sorted into increasing order of magnitude. -c - 100 continue - if (igap .eq. 0) go to 9000 - do 120 i = igap, n-1 - j = i-igap - 110 continue -c - if (j.lt.0) go to 120 -c - if (abs(x(j)).gt.abs(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp - if (apply) call dswap( na, a(1, j), 1, a(1,j+igap), 1) - else - go to 120 - endif - j = j-igap - go to 110 - 120 continue - igap = igap / 2 - go to 100 - end if -c - 9000 continue - return -c -c %---------------% -c | End of dsesrt | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dseupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dseupd.f deleted file mode 100644 index 2ed0fd6a61..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dseupd.f +++ /dev/null @@ -1,857 +0,0 @@ -c\BeginDoc -c -c\Name: dseupd -c -c\Description: -c -c This subroutine returns the converged approximations to eigenvalues -c of A*z = lambda*B*z and (optionally): -c -c (1) the corresponding approximate eigenvectors, -c -c (2) an orthonormal (Lanczos) basis for the associated approximate -c invariant subspace, -c -c (3) Both. -c -c There is negligible additional cost to obtain eigenvectors. An orthonormal -c (Lanczos) basis is always computed. There is an additional storage cost -c of n*nev if both are requested (in this case a separate array Z must be -c supplied). -c -c These quantities are obtained from the Lanczos factorization computed -c by DSAUPD for the linear operator OP prescribed by the MODE selection -c (see IPARAM(7) in DSAUPD documentation.) DSAUPD must be called before -c this routine is called. These approximate eigenvalues and vectors are -c commonly called Ritz values and Ritz vectors respectively. They are -c referred to as such in the comments that follow. The computed orthonormal -c basis for the invariant subspace corresponding to these Ritz values is -c referred to as a Lanczos basis. -c -c See documentation in the header of the subroutine DSAUPD for a definition -c of OP as well as other terms and the relation of computed Ritz values -c and vectors of OP with respect to the given problem A*z = lambda*B*z. -c -c The approximate eigenvalues of the original problem are returned in -c ascending algebraic order. The user may elect to call this routine -c once for each desired Ritz vector and store it peripherally if desired. -c There is also the option of computing a selected set of these vectors -c with a single call. -c -c\Usage: -c call dseupd -c ( RVEC, HOWMNY, SELECT, D, Z, LDZ, SIGMA, BMAT, N, WHICH, NEV, TOL, -c RESID, NCV, V, LDV, IPARAM, IPNTR, WORKD, WORKL, LWORKL, INFO ) -c -c RVEC LOGICAL (INPUT) -c Specifies whether Ritz vectors corresponding to the Ritz value -c approximations to the eigenproblem A*z = lambda*B*z are computed. -c -c RVEC = .FALSE. Compute Ritz values only. -c -c RVEC = .TRUE. Compute Ritz vectors. -c -c HOWMNY Character*1 (INPUT) -c Specifies how many Ritz vectors are wanted and the form of Z -c the matrix of Ritz vectors. See remark 1 below. -c = 'A': compute NEV Ritz vectors; -c = 'S': compute some of the Ritz vectors, specified -c by the logical array SELECT. -c -c SELECT Logical array of dimension NCV. (INPUT/WORKSPACE) -c If HOWMNY = 'S', SELECT specifies the Ritz vectors to be -c computed. To select the Ritz vector corresponding to a -c Ritz value D(j), SELECT(j) must be set to .TRUE.. -c If HOWMNY = 'A' , SELECT is used as a workspace for -c reordering the Ritz values. -c -c D Double precision array of dimension NEV. (OUTPUT) -c On exit, D contains the Ritz value approximations to the -c eigenvalues of A*z = lambda*B*z. The values are returned -c in ascending order. If IPARAM(7) = 3,4,5 then D represents -c the Ritz values of OP computed by dsaupd transformed to -c those of the original eigensystem A*z = lambda*B*z. If -c IPARAM(7) = 1,2 then the Ritz values of OP are the same -c as the those of A*z = lambda*B*z. -c -c Z Double precision N by NEV array if HOWMNY = 'A'. (OUTPUT) -c On exit, Z contains the B-orthonormal Ritz vectors of the -c eigensystem A*z = lambda*B*z corresponding to the Ritz -c value approximations. -c If RVEC = .FALSE. then Z is not referenced. -c NOTE: The array Z may be set equal to first NEV columns of the -c Arnoldi/Lanczos basis array V computed by DSAUPD . -c -c LDZ Integer. (INPUT) -c The leading dimension of the array Z. If Ritz vectors are -c desired, then LDZ .ge. max( 1, N ). In any case, LDZ .ge. 1. -c -c SIGMA Double precision (INPUT) -c If IPARAM(7) = 3,4,5 represents the shift. Not referenced if -c IPARAM(7) = 1 or 2. -c -c -c **** The remaining arguments MUST be the same as for the **** -c **** call to DSAUPD that was just completed. **** -c -c NOTE: The remaining arguments -c -c BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, -c WORKD, WORKL, LWORKL, INFO -c -c must be passed directly to DSEUPD following the last call -c to DSAUPD . These arguments MUST NOT BE MODIFIED between -c the the last call to DSAUPD and the call to DSEUPD . -c -c Two of these parameters (WORKL, INFO) are also output parameters: -c -c WORKL Double precision work array of length LWORKL. (OUTPUT/WORKSPACE) -c WORKL(1:4*ncv) contains information obtained in -c dsaupd . They are not changed by dseupd . -c WORKL(4*ncv+1:ncv*ncv+8*ncv) holds the -c untransformed Ritz values, the computed error estimates, -c and the associated eigenvector matrix of H. -c -c Note: IPNTR(8:10) contains the pointer into WORKL for addresses -c of the above information computed by dseupd . -c ------------------------------------------------------------- -c IPNTR(8): pointer to the NCV RITZ values of the original system. -c IPNTR(9): pointer to the NCV corresponding error bounds. -c IPNTR(10): pointer to the NCV by NCV matrix of eigenvectors -c of the tridiagonal matrix T. Only referenced by -c dseupd if RVEC = .TRUE. See Remarks. -c ------------------------------------------------------------- -c -c INFO Integer. (OUTPUT) -c Error flag on output. -c = 0: Normal exit. -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV must be greater than NEV and less than or equal to N. -c = -5: WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'. -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work WORKL array is not sufficient. -c = -8: Error return from trid. eigenvalue calculation; -c Information error from LAPACK routine dsteqr . -c = -9: Starting vector is zero. -c = -10: IPARAM(7) must be 1,2,3,4,5. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatible. -c = -12: NEV and WHICH = 'BE' are incompatible. -c = -14: DSAUPD did not find any eigenvalues to sufficient -c accuracy. -c = -15: HOWMNY must be one of 'A' or 'S' if RVEC = .true. -c = -16: HOWMNY = 'S' not yet implemented -c = -17: DSEUPD got a different count of the number of converged -c Ritz values than DSAUPD got. This indicates the user -c probably made an error in passing data from DSAUPD to -c DSEUPD or that the data was modified before entering -c DSEUPD . -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett, "The Symmetric Eigenvalue Problem". Prentice-Hall, -c 1980. -c 4. B.N. Parlett, B. Nour-Omid, "Towards a Black Box Lanczos Program", -c Computer Physics Communications, 53 (1989), pp 169-179. -c 5. B. Nour-Omid, B.N. Parlett, T. Ericson, P.S. Jensen, "How to -c Implement the Spectral Transformation", Math. Comp., 48 (1987), -c pp 663-673. -c 6. R.G. Grimes, J.G. Lewis and H.D. Simon, "A Shifted Block Lanczos -c Algorithm for Solving Sparse Symmetric Generalized Eigenproblems", -c SIAM J. Matr. Anal. Apps., January (1993). -c 7. L. Reichel, W.B. Gragg, "Algorithm 686: FORTRAN Subroutines -c for Updating the QR decomposition", ACM TOMS, December 1990, -c Volume 16 Number 4, pp 369-377. -c -c\Remarks -c 1. The converged Ritz values are always returned in increasing -c (algebraic) order. -c -c 2. Currently only HOWMNY = 'A' is implemented. It is included at this -c stage for the user who wants to incorporate it. -c -c\Routines called: -c dsesrt ARPACK routine that sorts an array X, and applies the -c corresponding permutation to a matrix A. -c dsortr dsortr ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c dvout ARPACK utility routine that prints vectors. -c dgeqr2 LAPACK routine that computes the QR factorization of -c a matrix. -c dlacpy LAPACK matrix copy routine. -c dlamch LAPACK routine that determines machine constants. -c dorm2r LAPACK routine that applies an orthogonal matrix in -c factored form. -c dsteqr LAPACK routine that computes eigenvalues and eigenvectors -c of a tridiagonal matrix. -c dger Level 2 BLAS rank one update to a matrix. -c dcopy Level 1 BLAS that copies one vector to another . -c dnrm2 Level 1 BLAS that computes the norm of a vector. -c dscal Level 1 BLAS that scales a vector. -c dswap Level 1 BLAS that swaps the contents of two vectors. - -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Chao Yang Houston, Texas -c Dept. of Computational & -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/15/93: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: seupd.F SID: 2.11 DATE OF SID: 04/10/01 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- - subroutine dseupd (rvec , howmny, select, d , - & z , ldz , sigma , bmat , - & n , which , nev , tol , - & resid , ncv , v , ldv , - & iparam, ipntr , workd , workl, - & lworkl, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat, howmny, which*2 - logical rvec - integer info, ldz, ldv, lworkl, n, ncv, nev - Double precision - & sigma, tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(7), ipntr(11) - logical select(ncv) - Double precision - & d(nev) , resid(n) , v(ldv,ncv), - & z(ldz, nev), workd(2*n), workl(lworkl) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0 , zero = 0.0D+0 ) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character type*6 - integer bounds , ierr , ih , ihb , ihd , - & iq , iw , j , k , ldh , - & ldq , mode , msglvl, nconv , next , - & ritz , irz , ibd , np , ishift, - & leftptr, rghtptr, numcnv, jj - Double precision - & bnorm2 , rnorm, temp, temp1, eps23 - logical reord -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dcopy , dger , dgeqr2 , dlacpy , dorm2r , dscal , - & dsesrt , dsteqr , dswap , dvout , ivout , dsortr -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dnrm2 , dlamch - external dnrm2 , dlamch -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic min -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - msglvl = mseupd - mode = iparam(7) - nconv = iparam(5) - info = 0 -c -c %--------------% -c | Quick return | -c %--------------% -c - if (nconv .eq. 0) go to 9000 - ierr = 0 -c - if (nconv .le. 0) ierr = -14 - if (n .le. 0) ierr = -1 - if (nev .le. 0) ierr = -2 - if (ncv .le. nev .or. ncv .gt. n) ierr = -3 - if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LA' .and. - & which .ne. 'SA' .and. - & which .ne. 'BE') ierr = -5 - if (bmat .ne. 'I' .and. bmat .ne. 'G') ierr = -6 - if ( (howmny .ne. 'A' .and. - & howmny .ne. 'P' .and. - & howmny .ne. 'S') .and. rvec ) - & ierr = -15 - if (rvec .and. howmny .eq. 'S') ierr = -16 -c - if (rvec .and. lworkl .lt. ncv**2+8*ncv) ierr = -7 -c - if (mode .eq. 1 .or. mode .eq. 2) then - type = 'REGULR' - else if (mode .eq. 3 ) then - type = 'SHIFTI' - else if (mode .eq. 4 ) then - type = 'BUCKLE' - else if (mode .eq. 5 ) then - type = 'CAYLEY' - else - ierr = -10 - end if - if (mode .eq. 1 .and. bmat .eq. 'G') ierr = -11 - if (nev .eq. 1 .and. which .eq. 'BE') ierr = -12 -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - go to 9000 - end if -c -c %-------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:2*ncv) := generated tridiagonal matrix H | -c | The subdiagonal is stored in workl(2:ncv). | -c | The dead spot is workl(1) but upon exiting | -c | dsaupd stores the B-norm of the last residual | -c | vector in workl(1). We use this !!! | -c | workl(2*ncv+1:2*ncv+ncv) := ritz values | -c | The wanted values are in the first NCONV spots. | -c | workl(3*ncv+1:3*ncv+ncv) := computed Ritz estimates | -c | The wanted values are in the first NCONV spots. | -c | NOTE: workl(1:4*ncv) is set by dsaupd and is not | -c | modified by dseupd . | -c %-------------------------------------------------------% -c -c %-------------------------------------------------------% -c | The following is used and set by dseupd . | -c | workl(4*ncv+1:4*ncv+ncv) := used as workspace during | -c | computation of the eigenvectors of H. Stores | -c | the diagonal of H. Upon EXIT contains the NCV | -c | Ritz values of the original system. The first | -c | NCONV spots have the wanted values. If MODE = | -c | 1 or 2 then will equal workl(2*ncv+1:3*ncv). | -c | workl(5*ncv+1:5*ncv+ncv) := used as workspace during | -c | computation of the eigenvectors of H. Stores | -c | the subdiagonal of H. Upon EXIT contains the | -c | NCV corresponding Ritz estimates of the | -c | original system. The first NCONV spots have the | -c | wanted values. If MODE = 1,2 then will equal | -c | workl(3*ncv+1:4*ncv). | -c | workl(6*ncv+1:6*ncv+ncv*ncv) := orthogonal Q that is | -c | the eigenvector matrix for H as returned by | -c | dsteqr . Not referenced if RVEC = .False. | -c | Ordering follows that of workl(4*ncv+1:5*ncv) | -c | workl(6*ncv+ncv*ncv+1:6*ncv+ncv*ncv+2*ncv) := | -c | Workspace. Needed by dsteqr and by dseupd . | -c | GRAND total of NCV*(NCV+8) locations. | -c %-------------------------------------------------------% -c -c - ih = ipntr(5) - ritz = ipntr(6) - bounds = ipntr(7) - ldh = ncv - ldq = ncv - ihd = bounds + ldh - ihb = ihd + ldh - iq = ihb + ldh - iw = iq + ldh*ncv - next = iw + 2*ncv - ipntr(4) = next - ipntr(8) = ihd - ipntr(9) = ihb - ipntr(10) = iq -c -c %----------------------------------------% -c | irz points to the Ritz values computed | -c | by _seigt before exiting _saup2. | -c | ibd points to the Ritz estimates | -c | computed by _seigt before exiting | -c | _saup2. | -c %----------------------------------------% -c - irz = ipntr(11)+ncv - ibd = irz+ncv -c -c -c %---------------------------------% -c | Set machine dependent constant. | -c %---------------------------------% -c - eps23 = dlamch ('Epsilon-Machine') - eps23 = eps23**(2.0D+0 / 3.0D+0 ) -c -c %---------------------------------------% -c | RNORM is B-norm of the RESID(1:N). | -c | BNORM2 is the 2 norm of B*RESID(1:N). | -c | Upon exit of dsaupd WORKD(1:N) has | -c | B*RESID(1:N). | -c %---------------------------------------% -c - rnorm = workl(ih) - if (bmat .eq. 'I') then - bnorm2 = rnorm - else if (bmat .eq. 'G') then - bnorm2 = dnrm2 (n, workd, 1) - end if -c - if (msglvl .gt. 2) then - call dvout (logfil, ncv, workl(irz), ndigit, - & '_seupd: Ritz values passed in from _SAUPD.') - call dvout (logfil, ncv, workl(ibd), ndigit, - & '_seupd: Ritz estimates passed in from _SAUPD.') - end if -c - if (rvec) then -c - reord = .false. -c -c %---------------------------------------------------% -c | Use the temporary bounds array to store indices | -c | These will be used to mark the select array later | -c %---------------------------------------------------% -c - do 10 j = 1,ncv - workl(bounds+j-1) = j - select(j) = .false. - 10 continue -c -c %-------------------------------------% -c | Select the wanted Ritz values. | -c | Sort the Ritz values so that the | -c | wanted ones appear at the tailing | -c | NEV positions of workl(irr) and | -c | workl(iri). Move the corresponding | -c | error estimates in workl(bound) | -c | accordingly. | -c %-------------------------------------% -c - np = ncv - nev - ishift = 0 - call dsgets (ishift, which , nev , - & np , workl(irz) , workl(bounds), - & workl) -c - if (msglvl .gt. 2) then - call dvout (logfil, ncv, workl(irz), ndigit, - & '_seupd: Ritz values after calling _SGETS.') - call dvout (logfil, ncv, workl(bounds), ndigit, - & '_seupd: Ritz value indices after calling _SGETS.') - end if -c -c %-----------------------------------------------------% -c | Record indices of the converged wanted Ritz values | -c | Mark the select array for possible reordering | -c %-----------------------------------------------------% -c - numcnv = 0 - do 11 j = 1,ncv - temp1 = max(eps23, abs(workl(irz+ncv-j)) ) - jj = workl(bounds + ncv - j) - if (numcnv .lt. nconv .and. - & workl(ibd+jj-1) .le. tol*temp1) then - select(jj) = .true. - numcnv = numcnv + 1 - if (jj .gt. nev) reord = .true. - endif - 11 continue -c -c %-----------------------------------------------------------% -c | Check the count (numcnv) of converged Ritz values with | -c | the number (nconv) reported by _saupd. If these two | -c | are different then there has probably been an error | -c | caused by incorrect passing of the _saupd data. | -c %-----------------------------------------------------------% -c - if (msglvl .gt. 2) then - call ivout(logfil, 1, numcnv, ndigit, - & '_seupd: Number of specified eigenvalues') - call ivout(logfil, 1, nconv, ndigit, - & '_seupd: Number of "converged" eigenvalues') - end if -c - if (numcnv .ne. nconv) then - info = -17 - go to 9000 - end if -c -c %-----------------------------------------------------------% -c | Call LAPACK routine _steqr to compute the eigenvalues and | -c | eigenvectors of the final symmetric tridiagonal matrix H. | -c | Initialize the eigenvector matrix Q to the identity. | -c %-----------------------------------------------------------% -c - call dcopy (ncv-1, workl(ih+1), 1, workl(ihb), 1) - call dcopy (ncv, workl(ih+ldh), 1, workl(ihd), 1) -c - call dsteqr ('Identity', ncv, workl(ihd), workl(ihb), - & workl(iq) , ldq, workl(iw), ierr) -c - if (ierr .ne. 0) then - info = -8 - go to 9000 - end if -c - if (msglvl .gt. 1) then - call dcopy (ncv, workl(iq+ncv-1), ldq, workl(iw), 1) - call dvout (logfil, ncv, workl(ihd), ndigit, - & '_seupd: NCV Ritz values of the final H matrix') - call dvout (logfil, ncv, workl(iw), ndigit, - & '_seupd: last row of the eigenvector matrix for H') - end if -c - if (reord) then -c -c %---------------------------------------------% -c | Reordered the eigenvalues and eigenvectors | -c | computed by _steqr so that the "converged" | -c | eigenvalues appear in the first NCONV | -c | positions of workl(ihd), and the associated | -c | eigenvectors appear in the first NCONV | -c | columns. | -c %---------------------------------------------% -c - leftptr = 1 - rghtptr = ncv -c - if (ncv .eq. 1) go to 30 -c - 20 if (select(leftptr)) then -c -c %-------------------------------------------% -c | Search, from the left, for the first Ritz | -c | value that has not converged. | -c %-------------------------------------------% -c - leftptr = leftptr + 1 -c - else if ( .not. select(rghtptr)) then -c -c %----------------------------------------------% -c | Search, from the right, the first Ritz value | -c | that has converged. | -c %----------------------------------------------% -c - rghtptr = rghtptr - 1 -c - else -c -c %----------------------------------------------% -c | Swap the Ritz value on the left that has not | -c | converged with the Ritz value on the right | -c | that has converged. Swap the associated | -c | eigenvector of the tridiagonal matrix H as | -c | well. | -c %----------------------------------------------% -c - temp = workl(ihd+leftptr-1) - workl(ihd+leftptr-1) = workl(ihd+rghtptr-1) - workl(ihd+rghtptr-1) = temp - call dcopy (ncv, workl(iq+ncv*(leftptr-1)), 1, - & workl(iw), 1) - call dcopy (ncv, workl(iq+ncv*(rghtptr-1)), 1, - & workl(iq+ncv*(leftptr-1)), 1) - call dcopy (ncv, workl(iw), 1, - & workl(iq+ncv*(rghtptr-1)), 1) - leftptr = leftptr + 1 - rghtptr = rghtptr - 1 -c - end if -c - if (leftptr .lt. rghtptr) go to 20 -c - 30 end if -c - if (msglvl .gt. 2) then - call dvout (logfil, ncv, workl(ihd), ndigit, - & '_seupd: The eigenvalues of H--reordered') - end if -c -c %----------------------------------------% -c | Load the converged Ritz values into D. | -c %----------------------------------------% -c - call dcopy (nconv, workl(ihd), 1, d, 1) -c - else -c -c %-----------------------------------------------------% -c | Ritz vectors not required. Load Ritz values into D. | -c %-----------------------------------------------------% -c - call dcopy (nconv, workl(ritz), 1, d, 1) - call dcopy (ncv, workl(ritz), 1, workl(ihd), 1) -c - end if -c -c %------------------------------------------------------------------% -c | Transform the Ritz values and possibly vectors and corresponding | -c | Ritz estimates of OP to those of A*x=lambda*B*x. The Ritz values | -c | (and corresponding data) are returned in ascending order. | -c %------------------------------------------------------------------% -c - if (type .eq. 'REGULR') then -c -c %---------------------------------------------------------% -c | Ascending sort of wanted Ritz values, vectors and error | -c | bounds. Not necessary if only Ritz values are desired. | -c %---------------------------------------------------------% -c - if (rvec) then - call dsesrt ('LA', rvec , nconv, d, ncv, workl(iq), ldq) - else - call dcopy (ncv, workl(bounds), 1, workl(ihb), 1) - end if -c - else -c -c %-------------------------------------------------------------% -c | * Make a copy of all the Ritz values. | -c | * Transform the Ritz values back to the original system. | -c | For TYPE = 'SHIFTI' the transformation is | -c | lambda = 1/theta + sigma | -c | For TYPE = 'BUCKLE' the transformation is | -c | lambda = sigma * theta / ( theta - 1 ) | -c | For TYPE = 'CAYLEY' the transformation is | -c | lambda = sigma * (theta + 1) / (theta - 1 ) | -c | where the theta are the Ritz values returned by dsaupd . | -c | NOTES: | -c | *The Ritz vectors are not affected by the transformation. | -c | They are only reordered. | -c %-------------------------------------------------------------% -c - call dcopy (ncv, workl(ihd), 1, workl(iw), 1) - if (type .eq. 'SHIFTI') then - do 40 k=1, ncv - workl(ihd+k-1) = one / workl(ihd+k-1) + sigma - 40 continue - else if (type .eq. 'BUCKLE') then - do 50 k=1, ncv - workl(ihd+k-1) = sigma * workl(ihd+k-1) / - & (workl(ihd+k-1) - one) - 50 continue - else if (type .eq. 'CAYLEY') then - do 60 k=1, ncv - workl(ihd+k-1) = sigma * (workl(ihd+k-1) + one) / - & (workl(ihd+k-1) - one) - 60 continue - end if -c -c %-------------------------------------------------------------% -c | * Store the wanted NCONV lambda values into D. | -c | * Sort the NCONV wanted lambda in WORKL(IHD:IHD+NCONV-1) | -c | into ascending order and apply sort to the NCONV theta | -c | values in the transformed system. We will need this to | -c | compute Ritz estimates in the original system. | -c | * Finally sort the lambda`s into ascending order and apply | -c | to Ritz vectors if wanted. Else just sort lambda`s into | -c | ascending order. | -c | NOTES: | -c | *workl(iw:iw+ncv-1) contain the theta ordered so that they | -c | match the ordering of the lambda. We`ll use them again for | -c | Ritz vector purification. | -c %-------------------------------------------------------------% -c - call dcopy (nconv, workl(ihd), 1, d, 1) - call dsortr ('LA', .true., nconv, workl(ihd), workl(iw)) - if (rvec) then - call dsesrt ('LA', rvec , nconv, d, ncv, workl(iq), ldq) - else - call dcopy (ncv, workl(bounds), 1, workl(ihb), 1) - call dscal (ncv, bnorm2/rnorm, workl(ihb), 1) - call dsortr ('LA', .true., nconv, d, workl(ihb)) - end if -c - end if -c -c %------------------------------------------------% -c | Compute the Ritz vectors. Transform the wanted | -c | eigenvectors of the symmetric tridiagonal H by | -c | the Lanczos basis matrix V. | -c %------------------------------------------------% -c - if (rvec .and. howmny .eq. 'A') then -c -c %----------------------------------------------------------% -c | Compute the QR factorization of the matrix representing | -c | the wanted invariant subspace located in the first NCONV | -c | columns of workl(iq,ldq). | -c %----------------------------------------------------------% -c - call dgeqr2 (ncv, nconv , workl(iq) , - & ldq, workl(iw+ncv), workl(ihb), - & ierr) -c -c %--------------------------------------------------------% -c | * Postmultiply V by Q. | -c | * Copy the first NCONV columns of VQ into Z. | -c | The N by NCONV matrix Z is now a matrix representation | -c | of the approximate invariant subspace associated with | -c | the Ritz values in workl(ihd). | -c %--------------------------------------------------------% -c - call dorm2r ('Right', 'Notranspose', n , - & ncv , nconv , workl(iq), - & ldq , workl(iw+ncv), v , - & ldv , workd(n+1) , ierr) - call dlacpy ('All', n, nconv, v, ldv, z, ldz) -c -c %-----------------------------------------------------% -c | In order to compute the Ritz estimates for the Ritz | -c | values in both systems, need the last row of the | -c | eigenvector matrix. Remember, it`s in factored form | -c %-----------------------------------------------------% -c - do 65 j = 1, ncv-1 - workl(ihb+j-1) = zero - 65 continue - workl(ihb+ncv-1) = one - call dorm2r ('Left', 'Transpose' , ncv , - & 1 , nconv , workl(iq) , - & ldq , workl(iw+ncv), workl(ihb), - & ncv , temp , ierr) -c - else if (rvec .and. howmny .eq. 'S') then -c -c Not yet implemented. See remark 2 above. -c - end if -c - if (type .eq. 'REGULR' .and. rvec) then -c - do 70 j=1, ncv - workl(ihb+j-1) = rnorm * abs( workl(ihb+j-1) ) - 70 continue -c - else if (type .ne. 'REGULR' .and. rvec) then -c -c %-------------------------------------------------% -c | * Determine Ritz estimates of the theta. | -c | If RVEC = .true. then compute Ritz estimates | -c | of the theta. | -c | If RVEC = .false. then copy Ritz estimates | -c | as computed by dsaupd . | -c | * Determine Ritz estimates of the lambda. | -c %-------------------------------------------------% -c - call dscal (ncv, bnorm2, workl(ihb), 1) - if (type .eq. 'SHIFTI') then -c - do 80 k=1, ncv - workl(ihb+k-1) = abs( workl(ihb+k-1) ) - & / workl(iw+k-1)**2 - 80 continue -c - else if (type .eq. 'BUCKLE') then -c - do 90 k=1, ncv - workl(ihb+k-1) = sigma * abs( workl(ihb+k-1) ) - & / (workl(iw+k-1)-one )**2 - 90 continue -c - else if (type .eq. 'CAYLEY') then -c - do 100 k=1, ncv - workl(ihb+k-1) = abs( workl(ihb+k-1) - & / workl(iw+k-1)*(workl(iw+k-1)-one) ) - 100 continue -c - end if -c - end if -c - if (type .ne. 'REGULR' .and. msglvl .gt. 1) then - call dvout (logfil, nconv, d, ndigit, - & '_seupd: Untransformed converged Ritz values') - call dvout (logfil, nconv, workl(ihb), ndigit, - & '_seupd: Ritz estimates of the untransformed Ritz values') - else if (msglvl .gt. 1) then - call dvout (logfil, nconv, d, ndigit, - & '_seupd: Converged Ritz values') - call dvout (logfil, nconv, workl(ihb), ndigit, - & '_seupd: Associated Ritz estimates') - end if -c -c %-------------------------------------------------% -c | Ritz vector purification step. Formally perform | -c | one of inverse subspace iteration. Only used | -c | for MODE = 3,4,5. See reference 7 | -c %-------------------------------------------------% -c - if (rvec .and. (type .eq. 'SHIFTI' .or. type .eq. 'CAYLEY')) then -c - do 110 k=0, nconv-1 - workl(iw+k) = workl(iq+k*ldq+ncv-1) - & / workl(iw+k) - 110 continue -c - else if (rvec .and. type .eq. 'BUCKLE') then -c - do 120 k=0, nconv-1 - workl(iw+k) = workl(iq+k*ldq+ncv-1) - & / (workl(iw+k)-one) - 120 continue -c - end if -c - if (type .ne. 'REGULR') - & call dger (n, nconv, one, resid, 1, workl(iw), 1, z, ldz) -c - 9000 continue -c - return -c -c %---------------% -c | End of dseupd | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsgets.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsgets.f deleted file mode 100644 index b4562e421c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsgets.f +++ /dev/null @@ -1,219 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsgets -c -c\Description: -c Given the eigenvalues of the symmetric tridiagonal matrix H, -c computes the NP shifts AMU that are zeros of the polynomial of -c degree NP which filters out components of the unwanted eigenvectors -c corresponding to the AMU's based on some given criteria. -c -c NOTE: This is called even in the case of user specified shifts in -c order to sort the eigenvalues, and error bounds of H for later use. -c -c\Usage: -c call dsgets -c ( ISHIFT, WHICH, KEV, NP, RITZ, BOUNDS, SHIFTS ) -c -c\Arguments -c ISHIFT Integer. (INPUT) -c Method for selecting the implicit shifts at each iteration. -c ISHIFT = 0: user specified shifts -c ISHIFT = 1: exact shift with respect to the matrix H. -c -c WHICH Character*2. (INPUT) -c Shift selection criteria. -c 'LM' -> KEV eigenvalues of largest magnitude are retained. -c 'SM' -> KEV eigenvalues of smallest magnitude are retained. -c 'LA' -> KEV eigenvalues of largest value are retained. -c 'SA' -> KEV eigenvalues of smallest value are retained. -c 'BE' -> KEV eigenvalues, half from each end of the spectrum. -c If KEV is odd, compute one more from the high end. -c -c KEV Integer. (INPUT) -c KEV+NP is the size of the matrix H. -c -c NP Integer. (INPUT) -c Number of implicit shifts to be computed. -c -c RITZ Double precision array of length KEV+NP. (INPUT/OUTPUT) -c On INPUT, RITZ contains the eigenvalues of H. -c On OUTPUT, RITZ are sorted so that the unwanted eigenvalues -c are in the first NP locations and the wanted part is in -c the last KEV locations. When exact shifts are selected, the -c unwanted part corresponds to the shifts to be applied. -c -c BOUNDS Double precision array of length KEV+NP. (INPUT/OUTPUT) -c Error bounds corresponding to the ordering in RITZ. -c -c SHIFTS Double precision array of length NP. (INPUT/OUTPUT) -c On INPUT: contains the user specified shifts if ISHIFT = 0. -c On OUTPUT: contains the shifts sorted into decreasing order -c of magnitude with respect to the Ritz estimates contained in -c BOUNDS. If ISHIFT = 0, SHIFTS is not modified on exit. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c dsortr ARPACK utility sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c dvout ARPACK utility routine that prints vectors. -c dcopy Level 1 BLAS that copies one vector to another. -c dswap Level 1 BLAS that swaps the contents of two vectors. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/93: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: sgets.F SID: 2.4 DATE OF SID: 4/19/96 RELEASE: 2 -c -c\Remarks -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsgets ( ishift, which, kev, np, ritz, bounds, shifts ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - integer ishift, kev, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & bounds(kev+np), ritz(kev+np), shifts(np) -c -c %------------% -c | Parameters | -c %------------% -c - Double precision - & one, zero - parameter (one = 1.0D+0, zero = 0.0D+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer kevd2, msglvl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external dswap, dcopy, dsortr, second -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic max, min -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = msgets -c - if (which .eq. 'BE') then -c -c %-----------------------------------------------------% -c | Both ends of the spectrum are requested. | -c | Sort the eigenvalues into algebraically increasing | -c | order first then swap high end of the spectrum next | -c | to low end in appropriate locations. | -c | NOTE: when np < floor(kev/2) be careful not to swap | -c | overlapping locations. | -c %-----------------------------------------------------% -c - call dsortr ('LA', .true., kev+np, ritz, bounds) - kevd2 = kev / 2 - if ( kev .gt. 1 ) then - call dswap ( min(kevd2,np), ritz, 1, - & ritz( max(kevd2,np)+1 ), 1) - call dswap ( min(kevd2,np), bounds, 1, - & bounds( max(kevd2,np)+1 ), 1) - end if -c - else -c -c %----------------------------------------------------% -c | LM, SM, LA, SA case. | -c | Sort the eigenvalues of H into the desired order | -c | and apply the resulting order to BOUNDS. | -c | The eigenvalues are sorted so that the wanted part | -c | are always in the last KEV locations. | -c %----------------------------------------------------% -c - call dsortr (which, .true., kev+np, ritz, bounds) - end if -c - if (ishift .eq. 1 .and. np .gt. 0) then -c -c %-------------------------------------------------------% -c | Sort the unwanted Ritz values used as shifts so that | -c | the ones with largest Ritz estimates are first. | -c | This will tend to minimize the effects of the | -c | forward instability of the iteration when the shifts | -c | are applied in subroutine dsapps. | -c %-------------------------------------------------------% -c - call dsortr ('SM', .true., np, bounds, ritz) - call dcopy (np, ritz, 1, shifts, 1) - end if -c - call second (t1) - tsgets = tsgets + (t1 - t0) -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, kev, ndigit, '_sgets: KEV is') - call ivout (logfil, 1, np, ndigit, '_sgets: NP is') - call dvout (logfil, kev+np, ritz, ndigit, - & '_sgets: Eigenvalues of current H matrix') - call dvout (logfil, kev+np, bounds, ndigit, - & '_sgets: Associated Ritz estimates') - end if -c - return -c -c %---------------% -c | End of dsgets | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsortc.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsortc.f deleted file mode 100644 index 91af30f8ae..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsortc.f +++ /dev/null @@ -1,344 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsortc -c -c\Description: -c Sorts the complex array in XREAL and XIMAG into the order -c specified by WHICH and optionally applies the permutation to the -c real array Y. It is assumed that if an element of XIMAG is -c nonzero, then its negative is also an element. In other words, -c both members of a complex conjugate pair are to be sorted and the -c pairs are kept adjacent to each other. -c -c\Usage: -c call dsortc -c ( WHICH, APPLY, N, XREAL, XIMAG, Y ) -c -c\Arguments -c WHICH Character*2. (Input) -c 'LM' -> sort XREAL,XIMAG into increasing order of magnitude. -c 'SM' -> sort XREAL,XIMAG into decreasing order of magnitude. -c 'LR' -> sort XREAL into increasing order of algebraic. -c 'SR' -> sort XREAL into decreasing order of algebraic. -c 'LI' -> sort XIMAG into increasing order of magnitude. -c 'SI' -> sort XIMAG into decreasing order of magnitude. -c NOTE: If an element of XIMAG is non-zero, then its negative -c is also an element. -c -c APPLY Logical. (Input) -c APPLY = .TRUE. -> apply the sorted order to array Y. -c APPLY = .FALSE. -> do not apply the sorted order to array Y. -c -c N Integer. (INPUT) -c Size of the arrays. -c -c XREAL, Double precision array of length N. (INPUT/OUTPUT) -c XIMAG Real and imaginary part of the array to be sorted. -c -c Y Double precision array of length N. (INPUT/OUTPUT) -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.1' -c Adapted from the sort routine in LANSO. -c -c\SCCS Information: @(#) -c FILE: sortc.F SID: 2.3 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsortc (which, apply, n, xreal, ximag, y) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - logical apply - integer n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & xreal(0:n-1), ximag(0:n-1), y(0:n-1) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, igap, j - Double precision - & temp, temp1, temp2 -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlapy2 - external dlapy2 -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - igap = n / 2 -c - if (which .eq. 'LM') then -c -c %------------------------------------------------------% -c | Sort XREAL,XIMAG into increasing order of magnitude. | -c %------------------------------------------------------% -c - 10 continue - if (igap .eq. 0) go to 9000 -c - do 30 i = igap, n-1 - j = i-igap - 20 continue -c - if (j.lt.0) go to 30 -c - temp1 = dlapy2(xreal(j),ximag(j)) - temp2 = dlapy2(xreal(j+igap),ximag(j+igap)) -c - if (temp1.gt.temp2) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 30 - end if - j = j-igap - go to 20 - 30 continue - igap = igap / 2 - go to 10 -c - else if (which .eq. 'SM') then -c -c %------------------------------------------------------% -c | Sort XREAL,XIMAG into decreasing order of magnitude. | -c %------------------------------------------------------% -c - 40 continue - if (igap .eq. 0) go to 9000 -c - do 60 i = igap, n-1 - j = i-igap - 50 continue -c - if (j .lt. 0) go to 60 -c - temp1 = dlapy2(xreal(j),ximag(j)) - temp2 = dlapy2(xreal(j+igap),ximag(j+igap)) -c - if (temp1.lt.temp2) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 60 - endif - j = j-igap - go to 50 - 60 continue - igap = igap / 2 - go to 40 -c - else if (which .eq. 'LR') then -c -c %------------------------------------------------% -c | Sort XREAL into increasing order of algebraic. | -c %------------------------------------------------% -c - 70 continue - if (igap .eq. 0) go to 9000 -c - do 90 i = igap, n-1 - j = i-igap - 80 continue -c - if (j.lt.0) go to 90 -c - if (xreal(j).gt.xreal(j+igap)) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 90 - endif - j = j-igap - go to 80 - 90 continue - igap = igap / 2 - go to 70 -c - else if (which .eq. 'SR') then -c -c %------------------------------------------------% -c | Sort XREAL into decreasing order of algebraic. | -c %------------------------------------------------% -c - 100 continue - if (igap .eq. 0) go to 9000 - do 120 i = igap, n-1 - j = i-igap - 110 continue -c - if (j.lt.0) go to 120 -c - if (xreal(j).lt.xreal(j+igap)) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 120 - endif - j = j-igap - go to 110 - 120 continue - igap = igap / 2 - go to 100 -c - else if (which .eq. 'LI') then -c -c %------------------------------------------------% -c | Sort XIMAG into increasing order of magnitude. | -c %------------------------------------------------% -c - 130 continue - if (igap .eq. 0) go to 9000 - do 150 i = igap, n-1 - j = i-igap - 140 continue -c - if (j.lt.0) go to 150 -c - if (abs(ximag(j)).gt.abs(ximag(j+igap))) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 150 - endif - j = j-igap - go to 140 - 150 continue - igap = igap / 2 - go to 130 -c - else if (which .eq. 'SI') then -c -c %------------------------------------------------% -c | Sort XIMAG into decreasing order of magnitude. | -c %------------------------------------------------% -c - 160 continue - if (igap .eq. 0) go to 9000 - do 180 i = igap, n-1 - j = i-igap - 170 continue -c - if (j.lt.0) go to 180 -c - if (abs(ximag(j)).lt.abs(ximag(j+igap))) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 180 - endif - j = j-igap - go to 170 - 180 continue - igap = igap / 2 - go to 160 - end if -c - 9000 continue - return -c -c %---------------% -c | End of dsortc | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsortr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsortr.f deleted file mode 100644 index 3903b81c5a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dsortr.f +++ /dev/null @@ -1,218 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dsortr -c -c\Description: -c Sort the array X1 in the order specified by WHICH and optionally -c applies the permutation to the array X2. -c -c\Usage: -c call dsortr -c ( WHICH, APPLY, N, X1, X2 ) -c -c\Arguments -c WHICH Character*2. (Input) -c 'LM' -> X1 is sorted into increasing order of magnitude. -c 'SM' -> X1 is sorted into decreasing order of magnitude. -c 'LA' -> X1 is sorted into increasing order of algebraic. -c 'SA' -> X1 is sorted into decreasing order of algebraic. -c -c APPLY Logical. (Input) -c APPLY = .TRUE. -> apply the sorted order to X2. -c APPLY = .FALSE. -> do not apply the sorted order to X2. -c -c N Integer. (INPUT) -c Size of the arrays. -c -c X1 Double precision array of length N. (INPUT/OUTPUT) -c The array to be sorted. -c -c X2 Double precision array of length N. (INPUT/OUTPUT) -c Only referenced if APPLY = .TRUE. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/16/93: Version ' 2.1'. -c Adapted from the sort routine in LANSO. -c -c\SCCS Information: @(#) -c FILE: sortr.F SID: 2.3 DATE OF SID: 4/19/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dsortr (which, apply, n, x1, x2) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - logical apply - integer n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & x1(0:n-1), x2(0:n-1) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, igap, j - Double precision - & temp -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - igap = n / 2 -c - if (which .eq. 'SA') then -c -c X1 is sorted into decreasing order of algebraic. -c - 10 continue - if (igap .eq. 0) go to 9000 - do 30 i = igap, n-1 - j = i-igap - 20 continue -c - if (j.lt.0) go to 30 -c - if (x1(j).lt.x1(j+igap)) then - temp = x1(j) - x1(j) = x1(j+igap) - x1(j+igap) = temp - if (apply) then - temp = x2(j) - x2(j) = x2(j+igap) - x2(j+igap) = temp - end if - else - go to 30 - endif - j = j-igap - go to 20 - 30 continue - igap = igap / 2 - go to 10 -c - else if (which .eq. 'SM') then -c -c X1 is sorted into decreasing order of magnitude. -c - 40 continue - if (igap .eq. 0) go to 9000 - do 60 i = igap, n-1 - j = i-igap - 50 continue -c - if (j.lt.0) go to 60 -c - if (abs(x1(j)).lt.abs(x1(j+igap))) then - temp = x1(j) - x1(j) = x1(j+igap) - x1(j+igap) = temp - if (apply) then - temp = x2(j) - x2(j) = x2(j+igap) - x2(j+igap) = temp - end if - else - go to 60 - endif - j = j-igap - go to 50 - 60 continue - igap = igap / 2 - go to 40 -c - else if (which .eq. 'LA') then -c -c X1 is sorted into increasing order of algebraic. -c - 70 continue - if (igap .eq. 0) go to 9000 - do 90 i = igap, n-1 - j = i-igap - 80 continue -c - if (j.lt.0) go to 90 -c - if (x1(j).gt.x1(j+igap)) then - temp = x1(j) - x1(j) = x1(j+igap) - x1(j+igap) = temp - if (apply) then - temp = x2(j) - x2(j) = x2(j+igap) - x2(j+igap) = temp - end if - else - go to 90 - endif - j = j-igap - go to 80 - 90 continue - igap = igap / 2 - go to 70 -c - else if (which .eq. 'LM') then -c -c X1 is sorted into increasing order of magnitude. -c - 100 continue - if (igap .eq. 0) go to 9000 - do 120 i = igap, n-1 - j = i-igap - 110 continue -c - if (j.lt.0) go to 120 -c - if (abs(x1(j)).gt.abs(x1(j+igap))) then - temp = x1(j) - x1(j) = x1(j+igap) - x1(j+igap) = temp - if (apply) then - temp = x2(j) - x2(j) = x2(j+igap) - x2(j+igap) = temp - end if - else - go to 120 - endif - j = j-igap - go to 110 - 120 continue - igap = igap / 2 - go to 100 - end if -c - 9000 continue - return -c -c %---------------% -c | End of dsortr | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstatn.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstatn.f deleted file mode 100644 index 244df1ae79..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstatn.f +++ /dev/null @@ -1,61 +0,0 @@ -c -c %---------------------------------------------% -c | Initialize statistic and timing information | -c | for nonsymmetric Arnoldi code. | -c %---------------------------------------------% -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: statn.F SID: 2.4 DATE OF SID: 4/20/96 RELEASE: 2 -c - subroutine dstatn -c -c %--------------------------------% -c | See stat.doc for documentation | -c %--------------------------------% -c - include 'stat.h' -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - nopx = 0 - nbx = 0 - nrorth = 0 - nitref = 0 - nrstrt = 0 -c - tnaupd = 0.0D+0 - tnaup2 = 0.0D+0 - tnaitr = 0.0D+0 - tneigh = 0.0D+0 - tngets = 0.0D+0 - tnapps = 0.0D+0 - tnconv = 0.0D+0 - titref = 0.0D+0 - tgetv0 = 0.0D+0 - trvec = 0.0D+0 -c -c %----------------------------------------------------% -c | User time including reverse communication overhead | -c %----------------------------------------------------% -c - tmvopx = 0.0D+0 - tmvbx = 0.0D+0 -c - return -c -c -c %---------------% -c | End of dstatn | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstats.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstats.f deleted file mode 100644 index 84f74b473a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstats.f +++ /dev/null @@ -1,47 +0,0 @@ -c -c\SCCS Information: @(#) -c FILE: stats.F SID: 2.1 DATE OF SID: 4/19/96 RELEASE: 2 -c %---------------------------------------------% -c | Initialize statistic and timing information | -c | for symmetric Arnoldi code. | -c %---------------------------------------------% - - subroutine dstats - -c %--------------------------------% -c | See stat.doc for documentation | -c %--------------------------------% - include 'stat.h' - -c %-----------------------% -c | Executable Statements | -c %-----------------------% - - nopx = 0 - nbx = 0 - nrorth = 0 - nitref = 0 - nrstrt = 0 - - tsaupd = 0.0D+0 - tsaup2 = 0.0D+0 - tsaitr = 0.0D+0 - tseigt = 0.0D+0 - tsgets = 0.0D+0 - tsapps = 0.0D+0 - tsconv = 0.0D+0 - titref = 0.0D+0 - tgetv0 = 0.0D+0 - trvec = 0.0D+0 - -c %----------------------------------------------------% -c | User time including reverse communication overhead | -c %----------------------------------------------------% - tmvopx = 0.0D+0 - tmvbx = 0.0D+0 - - return -c -c End of dstats -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstqrb.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstqrb.f deleted file mode 100644 index 9fef543ba7..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/dstqrb.f +++ /dev/null @@ -1,594 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: dstqrb -c -c\Description: -c Computes all eigenvalues and the last component of the eigenvectors -c of a symmetric tridiagonal matrix using the implicit QL or QR method. -c -c This is mostly a modification of the LAPACK routine dsteqr. -c See Remarks. -c -c\Usage: -c call dstqrb -c ( N, D, E, Z, WORK, INFO ) -c -c\Arguments -c N Integer. (INPUT) -c The number of rows and columns in the matrix. N >= 0. -c -c D Double precision array, dimension (N). (INPUT/OUTPUT) -c On entry, D contains the diagonal elements of the -c tridiagonal matrix. -c On exit, D contains the eigenvalues, in ascending order. -c If an error exit is made, the eigenvalues are correct -c for indices 1,2,...,INFO-1, but they are unordered and -c may not be the smallest eigenvalues of the matrix. -c -c E Double precision array, dimension (N-1). (INPUT/OUTPUT) -c On entry, E contains the subdiagonal elements of the -c tridiagonal matrix in positions 1 through N-1. -c On exit, E has been destroyed. -c -c Z Double precision array, dimension (N). (OUTPUT) -c On exit, Z contains the last row of the orthonormal -c eigenvector matrix of the symmetric tridiagonal matrix. -c If an error exit is made, Z contains the last row of the -c eigenvector matrix associated with the stored eigenvalues. -c -c WORK Double precision array, dimension (max(1,2*N-2)). (WORKSPACE) -c Workspace used in accumulating the transformation for -c computing the last components of the eigenvectors. -c -c INFO Integer. (OUTPUT) -c = 0: normal return. -c < 0: if INFO = -i, the i-th argument had an illegal value. -c > 0: if INFO = +i, the i-th eigenvalue has not converged -c after a total of 30*N iterations. -c -c\Remarks -c 1. None. -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c daxpy Level 1 BLAS that computes a vector triad. -c dcopy Level 1 BLAS that copies one vector to another. -c dswap Level 1 BLAS that swaps the contents of two vectors. -c lsame LAPACK character comparison routine. -c dlae2 LAPACK routine that computes the eigenvalues of a 2-by-2 -c symmetric matrix. -c dlaev2 LAPACK routine that eigendecomposition of a 2-by-2 symmetric -c matrix. -c dlamch LAPACK routine that determines machine constants. -c dlanst LAPACK routine that computes the norm of a matrix. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c dlartg LAPACK Givens rotation construction routine. -c dlascl LAPACK routine for careful scaling of a matrix. -c dlaset LAPACK matrix initialization routine. -c dlasr LAPACK routine that applies an orthogonal transformation to -c a matrix. -c dlasrt LAPACK sorting routine. -c dsteqr LAPACK routine that computes eigenvalues and eigenvectors -c of a symmetric tridiagonal matrix. -c xerbla LAPACK error handler routine. -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: stqrb.F SID: 2.5 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c 1. Starting with version 2.5, this routine is a modified version -c of LAPACK version 2.0 subroutine SSTEQR. No lines are deleted, -c only commeted out and new lines inserted. -c All lines commented out have "c$$$" at the beginning. -c Note that the LAPACK version 1.0 subroutine SSTEQR contained -c bugs. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine dstqrb ( n, d, e, z, work, info ) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer info, n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Double precision - & d( n ), e( n-1 ), z( n ), work( 2*n-2 ) -c -c .. parameters .. - Double precision - & zero, one, two, three - parameter ( zero = 0.0D+0, one = 1.0D+0, - & two = 2.0D+0, three = 3.0D+0 ) - integer maxit - parameter ( maxit = 30 ) -c .. -c .. local scalars .. - integer i, icompz, ii, iscale, j, jtot, k, l, l1, lend, - & lendm1, lendp1, lendsv, lm1, lsv, m, mm, mm1, - & nm1, nmaxit - Double precision - & anorm, b, c, eps, eps2, f, g, p, r, rt1, rt2, - & s, safmax, safmin, ssfmax, ssfmin, tst -c .. -c .. external functions .. - logical lsame - Double precision - & dlamch, dlanst, dlapy2 - external lsame, dlamch, dlanst, dlapy2 -c .. -c .. external subroutines .. - external dlae2, dlaev2, dlartg, dlascl, dlaset, dlasr, - & dlasrt, dswap, xerbla -c .. -c .. intrinsic functions .. - intrinsic abs, max, sign, sqrt -c .. -c .. executable statements .. -c -c test the input parameters. -c - info = 0 -c -c$$$ IF( LSAME( COMPZ, 'N' ) ) THEN -c$$$ ICOMPZ = 0 -c$$$ ELSE IF( LSAME( COMPZ, 'V' ) ) THEN -c$$$ ICOMPZ = 1 -c$$$ ELSE IF( LSAME( COMPZ, 'I' ) ) THEN -c$$$ ICOMPZ = 2 -c$$$ ELSE -c$$$ ICOMPZ = -1 -c$$$ END IF -c$$$ IF( ICOMPZ.LT.0 ) THEN -c$$$ INFO = -1 -c$$$ ELSE IF( N.LT.0 ) THEN -c$$$ INFO = -2 -c$$$ ELSE IF( ( LDZ.LT.1 ) .OR. ( ICOMPZ.GT.0 .AND. LDZ.LT.MAX( 1, -c$$$ $ N ) ) ) THEN -c$$$ INFO = -6 -c$$$ END IF -c$$$ IF( INFO.NE.0 ) THEN -c$$$ CALL XERBLA( 'SSTEQR', -INFO ) -c$$$ RETURN -c$$$ END IF -c -c *** New starting with version 2.5 *** -c - icompz = 2 -c ************************************* -c -c quick return if possible -c - if( n.eq.0 ) - $ return -c - if( n.eq.1 ) then - if( icompz.eq.2 ) z( 1 ) = one - return - end if -c -c determine the unit roundoff and over/underflow thresholds. -c - eps = dlamch( 'e' ) - eps2 = eps**2 - safmin = dlamch( 's' ) - safmax = one / safmin - ssfmax = sqrt( safmax ) / three - ssfmin = sqrt( safmin ) / eps2 -c -c compute the eigenvalues and eigenvectors of the tridiagonal -c matrix. -c -c$$ if( icompz.eq.2 ) -c$$$ $ call dlaset( 'full', n, n, zero, one, z, ldz ) -c -c *** New starting with version 2.5 *** -c - if ( icompz .eq. 2 ) then - do 5 j = 1, n-1 - z(j) = zero - 5 continue - z( n ) = one - end if -c ************************************* -c - nmaxit = n*maxit - jtot = 0 -c -c determine where the matrix splits and choose ql or qr iteration -c for each block, according to whether top or bottom diagonal -c element is smaller. -c - l1 = 1 - nm1 = n - 1 -c - 10 continue - if( l1.gt.n ) - $ go to 160 - if( l1.gt.1 ) - $ e( l1-1 ) = zero - if( l1.le.nm1 ) then - do 20 m = l1, nm1 - tst = abs( e( m ) ) - if( tst.eq.zero ) - $ go to 30 - if( tst.le.( sqrt( abs( d( m ) ) )*sqrt( abs( d( m+ - $ 1 ) ) ) )*eps ) then - e( m ) = zero - go to 30 - end if - 20 continue - end if - m = n -c - 30 continue - l = l1 - lsv = l - lend = m - lendsv = lend - l1 = m + 1 - if( lend.eq.l ) - $ go to 10 -c -c scale submatrix in rows and columns l to lend -c - anorm = dlanst( 'i', lend-l+1, d( l ), e( l ) ) - iscale = 0 - if( anorm.eq.zero ) - $ go to 10 - if( anorm.gt.ssfmax ) then - iscale = 1 - call dlascl( 'g', 0, 0, anorm, ssfmax, lend-l+1, 1, d( l ), n, - $ info ) - call dlascl( 'g', 0, 0, anorm, ssfmax, lend-l, 1, e( l ), n, - $ info ) - else if( anorm.lt.ssfmin ) then - iscale = 2 - call dlascl( 'g', 0, 0, anorm, ssfmin, lend-l+1, 1, d( l ), n, - $ info ) - call dlascl( 'g', 0, 0, anorm, ssfmin, lend-l, 1, e( l ), n, - $ info ) - end if -c -c choose between ql and qr iteration -c - if( abs( d( lend ) ).lt.abs( d( l ) ) ) then - lend = lsv - l = lendsv - end if -c - if( lend.gt.l ) then -c -c ql iteration -c -c look for small subdiagonal element. -c - 40 continue - if( l.ne.lend ) then - lendm1 = lend - 1 - do 50 m = l, lendm1 - tst = abs( e( m ) )**2 - if( tst.le.( eps2*abs( d( m ) ) )*abs( d( m+1 ) )+ - $ safmin )go to 60 - 50 continue - end if -c - m = lend -c - 60 continue - if( m.lt.lend ) - $ e( m ) = zero - p = d( l ) - if( m.eq.l ) - $ go to 80 -c -c if remaining matrix is 2-by-2, use dlae2 or dlaev2 -c to compute its eigensystem. -c - if( m.eq.l+1 ) then - if( icompz.gt.0 ) then - call dlaev2( d( l ), e( l ), d( l+1 ), rt1, rt2, c, s ) - work( l ) = c - work( n-1+l ) = s -c$$$ call dlasr( 'r', 'v', 'b', n, 2, work( l ), -c$$$ $ work( n-1+l ), z( 1, l ), ldz ) -c -c *** New starting with version 2.5 *** -c - tst = z(l+1) - z(l+1) = c*tst - s*z(l) - z(l) = s*tst + c*z(l) -c ************************************* - else - call dlae2( d( l ), e( l ), d( l+1 ), rt1, rt2 ) - end if - d( l ) = rt1 - d( l+1 ) = rt2 - e( l ) = zero - l = l + 2 - if( l.le.lend ) - $ go to 40 - go to 140 - end if -c - if( jtot.eq.nmaxit ) - $ go to 140 - jtot = jtot + 1 -c -c form shift. -c - g = ( d( l+1 )-p ) / ( two*e( l ) ) - r = dlapy2( g, one ) - g = d( m ) - p + ( e( l ) / ( g+sign( r, g ) ) ) -c - s = one - c = one - p = zero -c -c inner loop -c - mm1 = m - 1 - do 70 i = mm1, l, -1 - f = s*e( i ) - b = c*e( i ) - call dlartg( g, f, c, s, r ) - if( i.ne.m-1 ) - $ e( i+1 ) = r - g = d( i+1 ) - p - r = ( d( i )-g )*s + two*c*b - p = s*r - d( i+1 ) = g + p - g = c*r - b -c -c if eigenvectors are desired, then save rotations. -c - if( icompz.gt.0 ) then - work( i ) = c - work( n-1+i ) = -s - end if -c - 70 continue -c -c if eigenvectors are desired, then apply saved rotations. -c - if( icompz.gt.0 ) then - mm = m - l + 1 -c$$$ call dlasr( 'r', 'v', 'b', n, mm, work( l ), work( n-1+l ), -c$$$ $ z( 1, l ), ldz ) -c -c *** New starting with version 2.5 *** -c - call dlasr( 'r', 'v', 'b', 1, mm, work( l ), - & work( n-1+l ), z( l ), 1 ) -c ************************************* - end if -c - d( l ) = d( l ) - p - e( l ) = g - go to 40 -c -c eigenvalue found. -c - 80 continue - d( l ) = p -c - l = l + 1 - if( l.le.lend ) - $ go to 40 - go to 140 -c - else -c -c qr iteration -c -c look for small superdiagonal element. -c - 90 continue - if( l.ne.lend ) then - lendp1 = lend + 1 - do 100 m = l, lendp1, -1 - tst = abs( e( m-1 ) )**2 - if( tst.le.( eps2*abs( d( m ) ) )*abs( d( m-1 ) )+ - $ safmin )go to 110 - 100 continue - end if -c - m = lend -c - 110 continue - if( m.gt.lend ) - $ e( m-1 ) = zero - p = d( l ) - if( m.eq.l ) - $ go to 130 -c -c if remaining matrix is 2-by-2, use dlae2 or dlaev2 -c to compute its eigensystem. -c - if( m.eq.l-1 ) then - if( icompz.gt.0 ) then - call dlaev2( d( l-1 ), e( l-1 ), d( l ), rt1, rt2, c, s ) -c$$$ work( m ) = c -c$$$ work( n-1+m ) = s -c$$$ call dlasr( 'r', 'v', 'f', n, 2, work( m ), -c$$$ $ work( n-1+m ), z( 1, l-1 ), ldz ) -c -c *** New starting with version 2.5 *** -c - tst = z(l) - z(l) = c*tst - s*z(l-1) - z(l-1) = s*tst + c*z(l-1) -c ************************************* - else - call dlae2( d( l-1 ), e( l-1 ), d( l ), rt1, rt2 ) - end if - d( l-1 ) = rt1 - d( l ) = rt2 - e( l-1 ) = zero - l = l - 2 - if( l.ge.lend ) - $ go to 90 - go to 140 - end if -c - if( jtot.eq.nmaxit ) - $ go to 140 - jtot = jtot + 1 -c -c form shift. -c - g = ( d( l-1 )-p ) / ( two*e( l-1 ) ) - r = dlapy2( g, one ) - g = d( m ) - p + ( e( l-1 ) / ( g+sign( r, g ) ) ) -c - s = one - c = one - p = zero -c -c inner loop -c - lm1 = l - 1 - do 120 i = m, lm1 - f = s*e( i ) - b = c*e( i ) - call dlartg( g, f, c, s, r ) - if( i.ne.m ) - $ e( i-1 ) = r - g = d( i ) - p - r = ( d( i+1 )-g )*s + two*c*b - p = s*r - d( i ) = g + p - g = c*r - b -c -c if eigenvectors are desired, then save rotations. -c - if( icompz.gt.0 ) then - work( i ) = c - work( n-1+i ) = s - end if -c - 120 continue -c -c if eigenvectors are desired, then apply saved rotations. -c - if( icompz.gt.0 ) then - mm = l - m + 1 -c$$$ call dlasr( 'r', 'v', 'f', n, mm, work( m ), work( n-1+m ), -c$$$ $ z( 1, m ), ldz ) -c -c *** New starting with version 2.5 *** -c - call dlasr( 'r', 'v', 'f', 1, mm, work( m ), work( n-1+m ), - & z( m ), 1 ) -c ************************************* - end if -c - d( l ) = d( l ) - p - e( lm1 ) = g - go to 90 -c -c eigenvalue found. -c - 130 continue - d( l ) = p -c - l = l - 1 - if( l.ge.lend ) - $ go to 90 - go to 140 -c - end if -c -c undo scaling if necessary -c - 140 continue - if( iscale.eq.1 ) then - call dlascl( 'g', 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, - $ d( lsv ), n, info ) - call dlascl( 'g', 0, 0, ssfmax, anorm, lendsv-lsv, 1, e( lsv ), - $ n, info ) - else if( iscale.eq.2 ) then - call dlascl( 'g', 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, - $ d( lsv ), n, info ) - call dlascl( 'g', 0, 0, ssfmin, anorm, lendsv-lsv, 1, e( lsv ), - $ n, info ) - end if -c -c check for no convergence to an eigenvalue after a total -c of n*maxit iterations. -c - if( jtot.lt.nmaxit ) - $ go to 10 - do 150 i = 1, n - 1 - if( e( i ).ne.zero ) - $ info = info + 1 - 150 continue - go to 190 -c -c order eigenvalues and eigenvectors. -c - 160 continue - if( icompz.eq.0 ) then -c -c use quick sort -c - call dlasrt( 'i', n, d, info ) -c - else -c -c use selection sort to minimize swaps of eigenvectors -c - do 180 ii = 2, n - i = ii - 1 - k = i - p = d( i ) - do 170 j = ii, n - if( d( j ).lt.p ) then - k = j - p = d( j ) - end if - 170 continue - if( k.ne.i ) then - d( k ) = d( i ) - d( i ) = p -c$$$ call dswap( n, z( 1, i ), 1, z( 1, k ), 1 ) -c *** New starting with version 2.5 *** -c - p = z(k) - z(k) = z(i) - z(i) = p -c ************************************* - end if - 180 continue - end if -c - 190 continue - return -c -c %---------------% -c | End of dstqrb | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sgetv0.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sgetv0.f deleted file mode 100644 index 86a98c4a76..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sgetv0.f +++ /dev/null @@ -1,419 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: sgetv0 -c -c\Description: -c Generate a random initial residual vector for the Arnoldi process. -c Force the residual vector to be in the range of the operator OP. -c -c\Usage: -c call sgetv0 -c ( IDO, BMAT, ITRY, INITV, N, J, V, LDV, RESID, RNORM, -c IPNTR, WORKD, IERR ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to sgetv0. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B in the (generalized) -c eigenvalue problem A*x = lambda*B*x. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*B*x -c -c ITRY Integer. (INPUT) -c ITRY counts the number of times that sgetv0 is called. -c It should be set to 1 on the initial call to sgetv0. -c -c INITV Logical variable. (INPUT) -c .TRUE. => the initial residual vector is given in RESID. -c .FALSE. => generate a random initial residual vector. -c -c N Integer. (INPUT) -c Dimension of the problem. -c -c J Integer. (INPUT) -c Index of the residual vector to be generated, with respect to -c the Arnoldi process. J > 1 in case of a "restart". -c -c V Real N by J array. (INPUT) -c The first J-1 columns of V contain the current Arnoldi basis -c if this is a "restart". -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c RESID Real array of length N. (INPUT/OUTPUT) -c Initial residual vector to be generated. If RESID is -c provided, force RESID into the range of the operator OP. -c -c RNORM Real scalar. (OUTPUT) -c B-norm of the generated residual. -c -c IPNTR Integer array of length 3. (OUTPUT) -c -c WORKD Real work array of length 2*N. (REVERSE COMMUNICATION). -c On exit, WORK(1:N) = B*RESID to be used in SSAITR. -c -c IERR Integer. (OUTPUT) -c = 0: Normal exit. -c = -1: Cannot generate a nontrivial restarted residual vector -c in the range of the operator OP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c second ARPACK utility routine for timing. -c svout ARPACK utility routine for vector output. -c slarnv LAPACK routine for generating a random vector. -c sgemv Level 2 BLAS routine for matrix vector multiplication. -c scopy Level 1 BLAS that copies one vector to another. -c sdot Level 1 BLAS that computes the scalar product of two vectors. -c snrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: getv0.F SID: 2.7 DATE OF SID: 04/07/99 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine sgetv0 - & ( ido, bmat, itry, initv, n, j, v, ldv, resid, rnorm, - & ipntr, workd, ierr ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - logical initv - integer ido, ierr, itry, j, ldv, n - Real - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Real - & resid(n), v(ldv,j), workd(2*n) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - logical first, inits, orth - integer idist, iseed(4), iter, msglvl, jj - Real - & rnorm0 - save first, iseed, inits, iter, msglvl, orth, rnorm0 -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external slarnv, svout, scopy, sgemv, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & sdot, snrm2 - external sdot, snrm2 -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs, sqrt -c -c %-----------------% -c | Data Statements | -c %-----------------% -c - data inits /.true./ -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c -c %-----------------------------------% -c | Initialize the seed of the LAPACK | -c | random number generator | -c %-----------------------------------% -c - if (inits) then - iseed(1) = 1 - iseed(2) = 3 - iseed(3) = 5 - iseed(4) = 7 - inits = .false. - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mgetv0 -c - ierr = 0 - iter = 0 - first = .FALSE. - orth = .FALSE. -c -c %-----------------------------------------------------% -c | Possibly generate a random starting vector in RESID | -c | Use a LAPACK random number generator used by the | -c | matrix generation routines. | -c | idist = 1: uniform (0,1) distribution; | -c | idist = 2: uniform (-1,1) distribution; | -c | idist = 3: normal (0,1) distribution; | -c %-----------------------------------------------------% -c - if (.not.initv) then - idist = 2 - call slarnv (idist, iseed, n, resid) - end if -c -c %----------------------------------------------------------% -c | Force the starting vector into the range of OP to handle | -c | the generalized problem when B is possibly (singular). | -c %----------------------------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nopx = nopx + 1 - ipntr(1) = 1 - ipntr(2) = n + 1 - call scopy (n, resid, 1, workd, 1) - ido = -1 - go to 9000 - end if - end if -c -c %-----------------------------------------% -c | Back from computing OP*(initial-vector) | -c %-----------------------------------------% -c - if (first) go to 20 -c -c %-----------------------------------------------% -c | Back from computing B*(orthogonalized-vector) | -c %-----------------------------------------------% -c - if (orth) go to 40 -c - if (bmat .eq. 'G') then - call second (t3) - tmvopx = tmvopx + (t3 - t2) - end if -c -c %------------------------------------------------------% -c | Starting vector is now in the range of OP; r = OP*r; | -c | Compute B-norm of starting vector. | -c %------------------------------------------------------% -c - call second (t2) - first = .TRUE. - if (bmat .eq. 'G') then - nbx = nbx + 1 - call scopy (n, workd(n+1), 1, resid, 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd, 1) - end if -c - 20 continue -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - first = .FALSE. - if (bmat .eq. 'G') then - rnorm0 = sdot (n, resid, 1, workd, 1) - rnorm0 = sqrt(abs(rnorm0)) - else if (bmat .eq. 'I') then - rnorm0 = snrm2(n, resid, 1) - end if - rnorm = rnorm0 -c -c %---------------------------------------------% -c | Exit if this is the very first Arnoldi step | -c %---------------------------------------------% -c - if (j .eq. 1) go to 50 -c -c %---------------------------------------------------------------- -c | Otherwise need to B-orthogonalize the starting vector against | -c | the current Arnoldi basis using Gram-Schmidt with iter. ref. | -c | This is the case where an invariant subspace is encountered | -c | in the middle of the Arnoldi factorization. | -c | | -c | s = V^{T}*B*r; r = r - V*s; | -c | | -c | Stopping criteria used for iter. ref. is discussed in | -c | Parlett's book, page 107 and in Gragg & Reichel TOMS paper. | -c %---------------------------------------------------------------% -c - orth = .TRUE. - 30 continue -c - call sgemv ('T', n, j-1, one, v, ldv, workd, 1, - & zero, workd(n+1), 1) - call sgemv ('N', n, j-1, -one, v, ldv, workd(n+1), 1, - & one, resid, 1) -c -c %----------------------------------------------------------% -c | Compute the B-norm of the orthogonalized starting vector | -c %----------------------------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call scopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd, 1) - end if -c - 40 continue -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - rnorm = sdot (n, resid, 1, workd, 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = snrm2(n, resid, 1) - end if -c -c %--------------------------------------% -c | Check for further orthogonalization. | -c %--------------------------------------% -c - if (msglvl .gt. 2) then - call svout (logfil, 1, rnorm0, ndigit, - & '_getv0: re-orthonalization ; rnorm0 is') - call svout (logfil, 1, rnorm, ndigit, - & '_getv0: re-orthonalization ; rnorm is') - end if -c - if (rnorm .gt. 0.717*rnorm0) go to 50 -c - iter = iter + 1 - if (iter .le. 5) then -c -c %-----------------------------------% -c | Perform iterative refinement step | -c %-----------------------------------% -c - rnorm0 = rnorm - go to 30 - else -c -c %------------------------------------% -c | Iterative refinement step "failed" | -c %------------------------------------% -c - do 45 jj = 1, n - resid(jj) = zero - 45 continue - rnorm = zero - ierr = -1 - end if -c - 50 continue -c - if (msglvl .gt. 0) then - call svout (logfil, 1, rnorm, ndigit, - & '_getv0: B-norm of initial / restarted starting vector') - end if - if (msglvl .gt. 3) then - call svout (logfil, n, resid, ndigit, - & '_getv0: initial / restarted starting vector') - end if - ido = 99 -c - call second (t1) - tgetv0 = tgetv0 + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of sgetv0 | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/slaqrb.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/slaqrb.f deleted file mode 100644 index e967b18e4c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/slaqrb.f +++ /dev/null @@ -1,521 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: slaqrb -c -c\Description: -c Compute the eigenvalues and the Schur decomposition of an upper -c Hessenberg submatrix in rows and columns ILO to IHI. Only the -c last component of the Schur vectors are computed. -c -c This is mostly a modification of the LAPACK routine slahqr. -c -c\Usage: -c call slaqrb -c ( WANTT, N, ILO, IHI, H, LDH, WR, WI, Z, INFO ) -c -c\Arguments -c WANTT Logical variable. (INPUT) -c = .TRUE. : the full Schur form T is required; -c = .FALSE.: only eigenvalues are required. -c -c N Integer. (INPUT) -c The order of the matrix H. N >= 0. -c -c ILO Integer. (INPUT) -c IHI Integer. (INPUT) -c It is assumed that H is already upper quasi-triangular in -c rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless -c ILO = 1). SLAQRB works primarily with the Hessenberg -c submatrix in rows and columns ILO to IHI, but applies -c transformations to all of H if WANTT is .TRUE.. -c 1 <= ILO <= max(1,IHI); IHI <= N. -c -c H Real array, dimension (LDH,N). (INPUT/OUTPUT) -c On entry, the upper Hessenberg matrix H. -c On exit, if WANTT is .TRUE., H is upper quasi-triangular in -c rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in -c standard form. If WANTT is .FALSE., the contents of H are -c unspecified on exit. -c -c LDH Integer. (INPUT) -c The leading dimension of the array H. LDH >= max(1,N). -c -c WR Real array, dimension (N). (OUTPUT) -c WI Real array, dimension (N). (OUTPUT) -c The real and imaginary parts, respectively, of the computed -c eigenvalues ILO to IHI are stored in the corresponding -c elements of WR and WI. If two eigenvalues are computed as a -c complex conjugate pair, they are stored in consecutive -c elements of WR and WI, say the i-th and (i+1)th, with -c WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the -c eigenvalues are stored in the same order as on the diagonal -c of the Schur form returned in H, with WR(i) = H(i,i), and, if -c H(i:i+1,i:i+1) is a 2-by-2 diagonal block, -c WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). -c -c Z Real array, dimension (N). (OUTPUT) -c On exit Z contains the last components of the Schur vectors. -c -c INFO Integer. (OUPUT) -c = 0: successful exit -c > 0: SLAQRB failed to compute all the eigenvalues ILO to IHI -c in a total of 30*(IHI-ILO+1) iterations; if INFO = i, -c elements i+1:ihi of WR and WI contain those eigenvalues -c which have been successfully computed. -c -c\Remarks -c 1. None. -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c slabad LAPACK routine that computes machine constants. -c slamch LAPACK routine that determines machine constants. -c slanhs LAPACK routine that computes various norms of a matrix. -c slanv2 LAPACK routine that computes the Schur factorization of -c 2 by 2 nonsymmetric matrix in standard form. -c slarfg LAPACK Householder reflection construction routine. -c scopy Level 1 BLAS that copies one vector to another. -c srot Level 1 BLAS that applies a rotation to a 2 by 2 matrix. - -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.4' -c Modified from the LAPACK routine slahqr so that only the -c last component of the Schur vectors are computed. -c -c\SCCS Information: @(#) -c FILE: laqrb.F SID: 2.2 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c 1. None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine slaqrb ( wantt, n, ilo, ihi, h, ldh, wr, wi, - & z, info ) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - logical wantt - integer ihi, ilo, info, ldh, n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & h( ldh, * ), wi( * ), wr( * ), z( * ) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & zero, one, dat1, dat2 - parameter (zero = 0.0E+0, one = 1.0E+0, dat1 = 7.5E-1, - & dat2 = -4.375E-1) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - integer i, i1, i2, itn, its, j, k, l, m, nh, nr - Real - & cs, h00, h10, h11, h12, h21, h22, h33, h33s, - & h43h34, h44, h44s, ovfl, s, smlnum, sn, sum, - & t1, t2, t3, tst1, ulp, unfl, v1, v2, v3 - Real - & v( 3 ), work( 1 ) -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slamch, slanhs - external slamch, slanhs -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external scopy, slabad, slanv2, slarfg, srot -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - info = 0 -c -c %--------------------------% -c | Quick return if possible | -c %--------------------------% -c - if( n.eq.0 ) - & return - if( ilo.eq.ihi ) then - wr( ilo ) = h( ilo, ilo ) - wi( ilo ) = zero - return - end if -c -c %---------------------------------------------% -c | Initialize the vector of last components of | -c | the Schur vectors for accumulation. | -c %---------------------------------------------% -c - do 5 j = 1, n-1 - z(j) = zero - 5 continue - z(n) = one -c - nh = ihi - ilo + 1 -c -c %-------------------------------------------------------------% -c | Set machine-dependent constants for the stopping criterion. | -c | If norm(H) <= sqrt(OVFL), overflow should not occur. | -c %-------------------------------------------------------------% -c - unfl = slamch( 'safe minimum' ) - ovfl = one / unfl - call slabad( unfl, ovfl ) - ulp = slamch( 'precision' ) - smlnum = unfl*( nh / ulp ) -c -c %---------------------------------------------------------------% -c | I1 and I2 are the indices of the first row and last column | -c | of H to which transformations must be applied. If eigenvalues | -c | only are computed, I1 and I2 are set inside the main loop. | -c | Zero out H(J+2,J) = ZERO for J=1:N if WANTT = .TRUE. | -c | else H(J+2,J) for J=ILO:IHI-ILO-1 if WANTT = .FALSE. | -c %---------------------------------------------------------------% -c - if( wantt ) then - i1 = 1 - i2 = n - do 8 i=1,i2-2 - h(i1+i+1,i) = zero - 8 continue - else - do 9 i=1, ihi-ilo-1 - h(ilo+i+1,ilo+i-1) = zero - 9 continue - end if -c -c %---------------------------------------------------% -c | ITN is the total number of QR iterations allowed. | -c %---------------------------------------------------% -c - itn = 30*nh -c -c ------------------------------------------------------------------ -c The main loop begins here. I is the loop index and decreases from -c IHI to ILO in steps of 1 or 2. Each iteration of the loop works -c with the active submatrix in rows and columns L to I. -c Eigenvalues I+1 to IHI have already converged. Either L = ILO or -c H(L,L-1) is negligible so that the matrix splits. -c ------------------------------------------------------------------ -c - i = ihi - 10 continue - l = ilo - if( i.lt.ilo ) - & go to 150 - -c %--------------------------------------------------------------% -c | Perform QR iterations on rows and columns ILO to I until a | -c | submatrix of order 1 or 2 splits off at the bottom because a | -c | subdiagonal element has become negligible. | -c %--------------------------------------------------------------% - - do 130 its = 0, itn -c -c %----------------------------------------------% -c | Look for a single small subdiagonal element. | -c %----------------------------------------------% -c - do 20 k = i, l + 1, -1 - tst1 = abs( h( k-1, k-1 ) ) + abs( h( k, k ) ) - if( tst1.eq.zero ) - & tst1 = slanhs( '1', i-l+1, h( l, l ), ldh, work ) - if( abs( h( k, k-1 ) ).le.max( ulp*tst1, smlnum ) ) - & go to 30 - 20 continue - 30 continue - l = k - if( l.gt.ilo ) then -c -c %------------------------% -c | H(L,L-1) is negligible | -c %------------------------% -c - h( l, l-1 ) = zero - end if -c -c %-------------------------------------------------------------% -c | Exit from loop if a submatrix of order 1 or 2 has split off | -c %-------------------------------------------------------------% -c - if( l.ge.i-1 ) - & go to 140 -c -c %---------------------------------------------------------% -c | Now the active submatrix is in rows and columns L to I. | -c | If eigenvalues only are being computed, only the active | -c | submatrix need be transformed. | -c %---------------------------------------------------------% -c - if( .not.wantt ) then - i1 = l - i2 = i - end if -c - if( its.eq.10 .or. its.eq.20 ) then -c -c %-------------------% -c | Exceptional shift | -c %-------------------% -c - s = abs( h( i, i-1 ) ) + abs( h( i-1, i-2 ) ) - h44 = dat1*s - h33 = h44 - h43h34 = dat2*s*s -c - else -c -c %-----------------------------------------% -c | Prepare to use Wilkinson's double shift | -c %-----------------------------------------% -c - h44 = h( i, i ) - h33 = h( i-1, i-1 ) - h43h34 = h( i, i-1 )*h( i-1, i ) - end if -c -c %-----------------------------------------------------% -c | Look for two consecutive small subdiagonal elements | -c %-----------------------------------------------------% -c - do 40 m = i - 2, l, -1 -c -c %---------------------------------------------------------% -c | Determine the effect of starting the double-shift QR | -c | iteration at row M, and see if this would make H(M,M-1) | -c | negligible. | -c %---------------------------------------------------------% -c - h11 = h( m, m ) - h22 = h( m+1, m+1 ) - h21 = h( m+1, m ) - h12 = h( m, m+1 ) - h44s = h44 - h11 - h33s = h33 - h11 - v1 = ( h33s*h44s-h43h34 ) / h21 + h12 - v2 = h22 - h11 - h33s - h44s - v3 = h( m+2, m+1 ) - s = abs( v1 ) + abs( v2 ) + abs( v3 ) - v1 = v1 / s - v2 = v2 / s - v3 = v3 / s - v( 1 ) = v1 - v( 2 ) = v2 - v( 3 ) = v3 - if( m.eq.l ) - & go to 50 - h00 = h( m-1, m-1 ) - h10 = h( m, m-1 ) - tst1 = abs( v1 )*( abs( h00 )+abs( h11 )+abs( h22 ) ) - if( abs( h10 )*( abs( v2 )+abs( v3 ) ).le.ulp*tst1 ) - & go to 50 - 40 continue - 50 continue -c -c %----------------------% -c | Double-shift QR step | -c %----------------------% -c - do 120 k = m, i - 1 -c -c ------------------------------------------------------------ -c The first iteration of this loop determines a reflection G -c from the vector V and applies it from left and right to H, -c thus creating a nonzero bulge below the subdiagonal. -c -c Each subsequent iteration determines a reflection G to -c restore the Hessenberg form in the (K-1)th column, and thus -c chases the bulge one step toward the bottom of the active -c submatrix. NR is the order of G. -c ------------------------------------------------------------ -c - nr = min( 3, i-k+1 ) - if( k.gt.m ) - & call scopy( nr, h( k, k-1 ), 1, v, 1 ) - call slarfg( nr, v( 1 ), v( 2 ), 1, t1 ) - if( k.gt.m ) then - h( k, k-1 ) = v( 1 ) - h( k+1, k-1 ) = zero - if( k.lt.i-1 ) - & h( k+2, k-1 ) = zero - else if( m.gt.l ) then - h( k, k-1 ) = -h( k, k-1 ) - end if - v2 = v( 2 ) - t2 = t1*v2 - if( nr.eq.3 ) then - v3 = v( 3 ) - t3 = t1*v3 -c -c %------------------------------------------------% -c | Apply G from the left to transform the rows of | -c | the matrix in columns K to I2. | -c %------------------------------------------------% -c - do 60 j = k, i2 - sum = h( k, j ) + v2*h( k+1, j ) + v3*h( k+2, j ) - h( k, j ) = h( k, j ) - sum*t1 - h( k+1, j ) = h( k+1, j ) - sum*t2 - h( k+2, j ) = h( k+2, j ) - sum*t3 - 60 continue -c -c %----------------------------------------------------% -c | Apply G from the right to transform the columns of | -c | the matrix in rows I1 to min(K+3,I). | -c %----------------------------------------------------% -c - do 70 j = i1, min( k+3, i ) - sum = h( j, k ) + v2*h( j, k+1 ) + v3*h( j, k+2 ) - h( j, k ) = h( j, k ) - sum*t1 - h( j, k+1 ) = h( j, k+1 ) - sum*t2 - h( j, k+2 ) = h( j, k+2 ) - sum*t3 - 70 continue -c -c %----------------------------------% -c | Accumulate transformations for Z | -c %----------------------------------% -c - sum = z( k ) + v2*z( k+1 ) + v3*z( k+2 ) - z( k ) = z( k ) - sum*t1 - z( k+1 ) = z( k+1 ) - sum*t2 - z( k+2 ) = z( k+2 ) - sum*t3 - - else if( nr.eq.2 ) then -c -c %------------------------------------------------% -c | Apply G from the left to transform the rows of | -c | the matrix in columns K to I2. | -c %------------------------------------------------% -c - do 90 j = k, i2 - sum = h( k, j ) + v2*h( k+1, j ) - h( k, j ) = h( k, j ) - sum*t1 - h( k+1, j ) = h( k+1, j ) - sum*t2 - 90 continue -c -c %----------------------------------------------------% -c | Apply G from the right to transform the columns of | -c | the matrix in rows I1 to min(K+3,I). | -c %----------------------------------------------------% -c - do 100 j = i1, i - sum = h( j, k ) + v2*h( j, k+1 ) - h( j, k ) = h( j, k ) - sum*t1 - h( j, k+1 ) = h( j, k+1 ) - sum*t2 - 100 continue -c -c %----------------------------------% -c | Accumulate transformations for Z | -c %----------------------------------% -c - sum = z( k ) + v2*z( k+1 ) - z( k ) = z( k ) - sum*t1 - z( k+1 ) = z( k+1 ) - sum*t2 - end if - 120 continue - - 130 continue -c -c %-------------------------------------------------------% -c | Failure to converge in remaining number of iterations | -c %-------------------------------------------------------% -c - info = i - return - - 140 continue - - if( l.eq.i ) then -c -c %------------------------------------------------------% -c | H(I,I-1) is negligible: one eigenvalue has converged | -c %------------------------------------------------------% -c - wr( i ) = h( i, i ) - wi( i ) = zero - - else if( l.eq.i-1 ) then -c -c %--------------------------------------------------------% -c | H(I-1,I-2) is negligible; | -c | a pair of eigenvalues have converged. | -c | | -c | Transform the 2-by-2 submatrix to standard Schur form, | -c | and compute and store the eigenvalues. | -c %--------------------------------------------------------% -c - call slanv2( h( i-1, i-1 ), h( i-1, i ), h( i, i-1 ), - & h( i, i ), wr( i-1 ), wi( i-1 ), wr( i ), wi( i ), - & cs, sn ) - - if( wantt ) then -c -c %-----------------------------------------------------% -c | Apply the transformation to the rest of H and to Z, | -c | as required. | -c %-----------------------------------------------------% -c - if( i2.gt.i ) - & call srot( i2-i, h( i-1, i+1 ), ldh, h( i, i+1 ), ldh, - & cs, sn ) - call srot( i-i1-1, h( i1, i-1 ), 1, h( i1, i ), 1, cs, sn ) - sum = cs*z( i-1 ) + sn*z( i ) - z( i ) = cs*z( i ) - sn*z( i-1 ) - z( i-1 ) = sum - end if - end if -c -c %---------------------------------------------------------% -c | Decrement number of remaining iterations, and return to | -c | start of the main loop with new value of I. | -c %---------------------------------------------------------% -c - itn = itn - its - i = l - 1 - go to 10 - - 150 continue - return -c -c %---------------% -c | End of slaqrb | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaitr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaitr.f deleted file mode 100644 index 8d9b1ecfbf..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaitr.f +++ /dev/null @@ -1,840 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: snaitr -c -c\Description: -c Reverse communication interface for applying NP additional steps to -c a K step nonsymmetric Arnoldi factorization. -c -c Input: OP*V_{k} - V_{k}*H = r_{k}*e_{k}^T -c -c with (V_{k}^T)*B*V_{k} = I, (V_{k}^T)*B*r_{k} = 0. -c -c Output: OP*V_{k+p} - V_{k+p}*H = r_{k+p}*e_{k+p}^T -c -c with (V_{k+p}^T)*B*V_{k+p} = I, (V_{k+p}^T)*B*r_{k+p} = 0. -c -c where OP and B are as in snaupd. The B-norm of r_{k+p} is also -c computed and returned. -c -c\Usage: -c call snaitr -c ( IDO, BMAT, N, K, NP, NB, RESID, RNORM, V, LDV, H, LDH, -c IPNTR, WORKD, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c This is for the restart phase to force the new -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y, -c IPNTR(3) is the pointer into WORK for B * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c When the routine is used in the "shift-and-invert" mode, the -c vector B * Q is already available and do not need to be -c recompute in forming OP * Q. -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. See snaupd. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*M**x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c K Integer. (INPUT) -c Current size of V and H. -c -c NP Integer. (INPUT) -c Number of additional Arnoldi steps to take. -c -c NB Integer. (INPUT) -c Blocksize to be used in the recurrence. -c Only work for NB = 1 right now. The goal is to have a -c program that implement both the block and non-block method. -c -c RESID Real array of length N. (INPUT/OUTPUT) -c On INPUT: RESID contains the residual vector r_{k}. -c On OUTPUT: RESID contains the residual vector r_{k+p}. -c -c RNORM Real scalar. (INPUT/OUTPUT) -c B-norm of the starting residual on input. -c B-norm of the updated residual r_{k+p} on output. -c -c V Real N by K+NP array. (INPUT/OUTPUT) -c On INPUT: V contains the Arnoldi vectors in the first K -c columns. -c On OUTPUT: V contains the new NP Arnoldi vectors in the next -c NP columns. The first K columns are unchanged. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Real (K+NP) by (K+NP) array. (INPUT/OUTPUT) -c H is used to store the generated upper Hessenberg matrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORK for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Real work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The calling program should not -c use WORKD as temporary workspace during the iteration !!!!!! -c On input, WORKD(1:N) = B*RESID and is used to save some -c computation at the first step. -c -c INFO Integer. (OUTPUT) -c = 0: Normal exit. -c > 0: Size of the spanning invariant subspace of OP found. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c sgetv0 ARPACK routine to generate the initial vector. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c smout ARPACK utility routine that prints matrices -c svout ARPACK utility routine that prints vectors. -c slabad LAPACK routine that computes machine constants. -c slamch LAPACK routine that determines machine constants. -c slascl LAPACK routine for careful scaling of a matrix. -c slanhs LAPACK routine that computes various norms of a matrix. -c sgemv Level 2 BLAS routine for matrix vector multiplication. -c saxpy Level 1 BLAS that computes a vector triad. -c sscal Level 1 BLAS that scales a vector. -c scopy Level 1 BLAS that copies one vector to another . -c sdot Level 1 BLAS that computes the scalar product of two vectors. -c snrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: naitr.F SID: 2.4 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c The algorithm implemented is: -c -c restart = .false. -c Given V_{k} = [v_{1}, ..., v_{k}], r_{k}; -c r_{k} contains the initial residual vector even for k = 0; -c Also assume that rnorm = || B*r_{k} || and B*r_{k} are already -c computed by the calling program. -c -c betaj = rnorm ; p_{k+1} = B*r_{k} ; -c For j = k+1, ..., k+np Do -c 1) if ( betaj < tol ) stop or restart depending on j. -c ( At present tol is zero ) -c if ( restart ) generate a new starting vector. -c 2) v_{j} = r(j-1)/betaj; V_{j} = [V_{j-1}, v_{j}]; -c p_{j} = p_{j}/betaj -c 3) r_{j} = OP*v_{j} where OP is defined as in snaupd -c For shift-invert mode p_{j} = B*v_{j} is already available. -c wnorm = || OP*v_{j} || -c 4) Compute the j-th step residual vector. -c w_{j} = V_{j}^T * B * OP * v_{j} -c r_{j} = OP*v_{j} - V_{j} * w_{j} -c H(:,j) = w_{j}; -c H(j,j-1) = rnorm -c rnorm = || r_(j) || -c If (rnorm > 0.717*wnorm) accept step and go back to 1) -c 5) Re-orthogonalization step: -c s = V_{j}'*B*r_{j} -c r_{j} = r_{j} - V_{j}*s; rnorm1 = || r_{j} || -c alphaj = alphaj + s_{j}; -c 6) Iterative refinement step: -c If (rnorm1 > 0.717*rnorm) then -c rnorm = rnorm1 -c accept step and go back to 1) -c Else -c rnorm = rnorm1 -c If this is the first time in step 6), go to 5) -c Else r_{j} lies in the span of V_{j} numerically. -c Set r_{j} = 0 and rnorm = 0; go to 1) -c EndIf -c End Do -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine snaitr - & (ido, bmat, n, k, np, nb, resid, rnorm, v, ldv, h, ldh, - & ipntr, workd, info) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - integer ido, info, k, ldh, ldv, n, nb, np - Real - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Real - & h(ldh,k+np), resid(n), v(ldv,k+np), workd(3*n) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - logical first, orth1, orth2, rstart, step3, step4 - integer ierr, i, infol, ipj, irj, ivj, iter, itry, j, msglvl, - & jj - Real - & betaj, ovfl, temp1, rnorm1, smlnum, tst1, ulp, unfl, - & wnorm - save first, orth1, orth2, rstart, step3, step4, - & ierr, ipj, irj, ivj, iter, itry, j, msglvl, ovfl, - & betaj, rnorm1, smlnum, ulp, unfl, wnorm -c -c %-----------------------% -c | Local Array Arguments | -c %-----------------------% -c - Real - & xtemp(2) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external saxpy, scopy, sscal, sgemv, sgetv0, slabad, - & svout, smout, ivout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & sdot, snrm2, slanhs, slamch - external sdot, snrm2, slanhs, slamch -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs, sqrt -c -c %-----------------% -c | Data statements | -c %-----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then -c -c %-----------------------------------------% -c | Set machine-dependent constants for the | -c | the splitting and deflation criterion. | -c | If norm(H) <= sqrt(OVFL), | -c | overflow should not occur. | -c | REFERENCE: LAPACK subroutine slahqr | -c %-----------------------------------------% -c - unfl = slamch( 'safe minimum' ) - ovfl = one / unfl - call slabad( unfl, ovfl ) - ulp = slamch( 'precision' ) - smlnum = unfl*( n / ulp ) - first = .false. - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mnaitr -c -c %------------------------------% -c | Initial call to this routine | -c %------------------------------% -c - info = 0 - step3 = .false. - step4 = .false. - rstart = .false. - orth1 = .false. - orth2 = .false. - j = k + 1 - ipj = 1 - irj = ipj + n - ivj = irj + n - end if -c -c %-------------------------------------------------% -c | When in reverse communication mode one of: | -c | STEP3, STEP4, ORTH1, ORTH2, RSTART | -c | will be .true. when .... | -c | STEP3: return from computing OP*v_{j}. | -c | STEP4: return from computing B-norm of OP*v_{j} | -c | ORTH1: return from computing B-norm of r_{j+1} | -c | ORTH2: return from computing B-norm of | -c | correction to the residual vector. | -c | RSTART: return from OP computations needed by | -c | sgetv0. | -c %-------------------------------------------------% -c - if (step3) go to 50 - if (step4) go to 60 - if (orth1) go to 70 - if (orth2) go to 90 - if (rstart) go to 30 -c -c %-----------------------------% -c | Else this is the first step | -c %-----------------------------% -c -c %--------------------------------------------------------------% -c | | -c | A R N O L D I I T E R A T I O N L O O P | -c | | -c | Note: B*r_{j-1} is already in WORKD(1:N)=WORKD(IPJ:IPJ+N-1) | -c %--------------------------------------------------------------% - - 1000 continue -c - if (msglvl .gt. 1) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: generating Arnoldi vector number') - call svout (logfil, 1, rnorm, ndigit, - & '_naitr: B-norm of the current residual is') - end if -c -c %---------------------------------------------------% -c | STEP 1: Check if the B norm of j-th residual | -c | vector is zero. Equivalent to determing whether | -c | an exact j-step Arnoldi factorization is present. | -c %---------------------------------------------------% -c - betaj = rnorm - if (rnorm .gt. zero) go to 40 -c -c %---------------------------------------------------% -c | Invariant subspace found, generate a new starting | -c | vector which is orthogonal to the current Arnoldi | -c | basis and continue the iteration. | -c %---------------------------------------------------% -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: ****** RESTART AT STEP ******') - end if -c -c %---------------------------------------------% -c | ITRY is the loop variable that controls the | -c | maximum amount of times that a restart is | -c | attempted. NRSTRT is used by stat.h | -c %---------------------------------------------% -c - betaj = zero - nrstrt = nrstrt + 1 - itry = 1 - 20 continue - rstart = .true. - ido = 0 - 30 continue -c -c %--------------------------------------% -c | If in reverse communication mode and | -c | RSTART = .true. flow returns here. | -c %--------------------------------------% -c - call sgetv0 (ido, bmat, itry, .false., n, j, v, ldv, - & resid, rnorm, ipntr, workd, ierr) - if (ido .ne. 99) go to 9000 - if (ierr .lt. 0) then - itry = itry + 1 - if (itry .le. 3) go to 20 -c -c %------------------------------------------------% -c | Give up after several restart attempts. | -c | Set INFO to the size of the invariant subspace | -c | which spans OP and exit. | -c %------------------------------------------------% -c - info = j - 1 - call second (t1) - tnaitr = tnaitr + (t1 - t0) - ido = 99 - go to 9000 - end if -c - 40 continue -c -c %---------------------------------------------------------% -c | STEP 2: v_{j} = r_{j-1}/rnorm and p_{j} = p_{j}/rnorm | -c | Note that p_{j} = B*r_{j-1}. In order to avoid overflow | -c | when reciprocating a small RNORM, test against lower | -c | machine bound. | -c %---------------------------------------------------------% -c - call scopy (n, resid, 1, v(1,j), 1) - if (rnorm .ge. unfl) then - temp1 = one / rnorm - call sscal (n, temp1, v(1,j), 1) - call sscal (n, temp1, workd(ipj), 1) - else -c -c %-----------------------------------------% -c | To scale both v_{j} and p_{j} carefully | -c | use LAPACK routine SLASCL | -c %-----------------------------------------% -c - call slascl ('General', i, i, rnorm, one, n, 1, - & v(1,j), n, infol) - call slascl ('General', i, i, rnorm, one, n, 1, - & workd(ipj), n, infol) - end if -c -c %------------------------------------------------------% -c | STEP 3: r_{j} = OP*v_{j}; Note that p_{j} = B*v_{j} | -c | Note that this is not quite yet r_{j}. See STEP 4 | -c %------------------------------------------------------% -c - step3 = .true. - nopx = nopx + 1 - call second (t2) - call scopy (n, v(1,j), 1, workd(ivj), 1) - ipntr(1) = ivj - ipntr(2) = irj - ipntr(3) = ipj - ido = 1 -c -c %-----------------------------------% -c | Exit in order to compute OP*v_{j} | -c %-----------------------------------% -c - go to 9000 - 50 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(IRJ:IRJ+N-1) := OP*v_{j} | -c | if step3 = .true. | -c %----------------------------------% -c - call second (t3) - tmvopx = tmvopx + (t3 - t2) - - step3 = .false. -c -c %------------------------------------------% -c | Put another copy of OP*v_{j} into RESID. | -c %------------------------------------------% -c - call scopy (n, workd(irj), 1, resid, 1) -c -c %---------------------------------------% -c | STEP 4: Finish extending the Arnoldi | -c | factorization to length j. | -c %---------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - step4 = .true. - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-------------------------------------% -c | Exit in order to compute B*OP*v_{j} | -c %-------------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd(ipj), 1) - end if - 60 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(IPJ:IPJ+N-1) := B*OP*v_{j} | -c | if step4 = .true. | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - step4 = .false. -c -c %-------------------------------------% -c | The following is needed for STEP 5. | -c | Compute the B-norm of OP*v_{j}. | -c %-------------------------------------% -c - if (bmat .eq. 'G') then - wnorm = sdot (n, resid, 1, workd(ipj), 1) - wnorm = sqrt(abs(wnorm)) - else if (bmat .eq. 'I') then - wnorm = snrm2(n, resid, 1) - end if -c -c %-----------------------------------------% -c | Compute the j-th residual corresponding | -c | to the j step factorization. | -c | Use Classical Gram Schmidt and compute: | -c | w_{j} <- V_{j}^T * B * OP * v_{j} | -c | r_{j} <- OP*v_{j} - V_{j} * w_{j} | -c %-----------------------------------------% -c -c -c %------------------------------------------% -c | Compute the j Fourier coefficients w_{j} | -c | WORKD(IPJ:IPJ+N-1) contains B*OP*v_{j}. | -c %------------------------------------------% -c - call sgemv ('T', n, j, one, v, ldv, workd(ipj), 1, - & zero, h(1,j), 1) -c -c %--------------------------------------% -c | Orthogonalize r_{j} against V_{j}. | -c | RESID contains OP*v_{j}. See STEP 3. | -c %--------------------------------------% -c - call sgemv ('N', n, j, -one, v, ldv, h(1,j), 1, - & one, resid, 1) -c - if (j .gt. 1) h(j,j-1) = betaj -c - call second (t4) -c - orth1 = .true. -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call scopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*r_{j} | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd(ipj), 1) - end if - 70 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH1 = .true. | -c | WORKD(IPJ:IPJ+N-1) := B*r_{j}. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - orth1 = .false. -c -c %------------------------------% -c | Compute the B-norm of r_{j}. | -c %------------------------------% -c - if (bmat .eq. 'G') then - rnorm = sdot (n, resid, 1, workd(ipj), 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = snrm2(n, resid, 1) - end if -c -c %-----------------------------------------------------------% -c | STEP 5: Re-orthogonalization / Iterative refinement phase | -c | Maximum NITER_ITREF tries. | -c | | -c | s = V_{j}^T * B * r_{j} | -c | r_{j} = r_{j} - V_{j}*s | -c | alphaj = alphaj + s_{j} | -c | | -c | The stopping criteria used for iterative refinement is | -c | discussed in Parlett's book SEP, page 107 and in Gragg & | -c | Reichel ACM TOMS paper; Algorithm 686, Dec. 1990. | -c | Determine if we need to correct the residual. The goal is | -c | to enforce ||v(:,1:j)^T * r_{j}|| .le. eps * || r_{j} || | -c | The following test determines whether the sine of the | -c | angle between OP*x and the computed residual is less | -c | than or equal to 0.717. | -c %-----------------------------------------------------------% -c - if (rnorm .gt. 0.717*wnorm) go to 100 - iter = 0 - nrorth = nrorth + 1 -c -c %---------------------------------------------------% -c | Enter the Iterative refinement phase. If further | -c | refinement is necessary, loop back here. The loop | -c | variable is ITER. Perform a step of Classical | -c | Gram-Schmidt using all the Arnoldi vectors V_{j} | -c %---------------------------------------------------% -c - 80 continue -c - if (msglvl .gt. 2) then - xtemp(1) = wnorm - xtemp(2) = rnorm - call svout (logfil, 2, xtemp, ndigit, - & '_naitr: re-orthonalization; wnorm and rnorm are') - call svout (logfil, j, h(1,j), ndigit, - & '_naitr: j-th column of H') - end if -c -c %----------------------------------------------------% -c | Compute V_{j}^T * B * r_{j}. | -c | WORKD(IRJ:IRJ+J-1) = v(:,1:J)'*WORKD(IPJ:IPJ+N-1). | -c %----------------------------------------------------% -c - call sgemv ('T', n, j, one, v, ldv, workd(ipj), 1, - & zero, workd(irj), 1) -c -c %---------------------------------------------% -c | Compute the correction to the residual: | -c | r_{j} = r_{j} - V_{j} * WORKD(IRJ:IRJ+J-1). | -c | The correction to H is v(:,1:J)*H(1:J,1:J) | -c | + v(:,1:J)*WORKD(IRJ:IRJ+J-1)*e'_j. | -c %---------------------------------------------% -c - call sgemv ('N', n, j, -one, v, ldv, workd(irj), 1, - & one, resid, 1) - call saxpy (j, one, workd(irj), 1, h(1,j), 1) -c - orth2 = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call scopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-----------------------------------% -c | Exit in order to compute B*r_{j}. | -c | r_{j} is the corrected residual. | -c %-----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd(ipj), 1) - end if - 90 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH2 = .true. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c -c %-----------------------------------------------------% -c | Compute the B-norm of the corrected residual r_{j}. | -c %-----------------------------------------------------% -c - if (bmat .eq. 'G') then - rnorm1 = sdot (n, resid, 1, workd(ipj), 1) - rnorm1 = sqrt(abs(rnorm1)) - else if (bmat .eq. 'I') then - rnorm1 = snrm2(n, resid, 1) - end if -c - if (msglvl .gt. 0 .and. iter .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: Iterative refinement for Arnoldi residual') - if (msglvl .gt. 2) then - xtemp(1) = rnorm - xtemp(2) = rnorm1 - call svout (logfil, 2, xtemp, ndigit, - & '_naitr: iterative refinement ; rnorm and rnorm1 are') - end if - end if -c -c %-----------------------------------------% -c | Determine if we need to perform another | -c | step of re-orthogonalization. | -c %-----------------------------------------% -c - if (rnorm1 .gt. 0.717*rnorm) then -c -c %---------------------------------------% -c | No need for further refinement. | -c | The cosine of the angle between the | -c | corrected residual vector and the old | -c | residual vector is greater than 0.717 | -c | In other words the corrected residual | -c | and the old residual vector share an | -c | angle of less than arcCOS(0.717) | -c %---------------------------------------% -c - rnorm = rnorm1 -c - else -c -c %-------------------------------------------% -c | Another step of iterative refinement step | -c | is required. NITREF is used by stat.h | -c %-------------------------------------------% -c - nitref = nitref + 1 - rnorm = rnorm1 - iter = iter + 1 - if (iter .le. 1) go to 80 -c -c %-------------------------------------------------% -c | Otherwise RESID is numerically in the span of V | -c %-------------------------------------------------% -c - do 95 jj = 1, n - resid(jj) = zero - 95 continue - rnorm = zero - end if -c -c %----------------------------------------------% -c | Branch here directly if iterative refinement | -c | wasn't necessary or after at most NITER_REF | -c | steps of iterative refinement. | -c %----------------------------------------------% -c - 100 continue -c - rstart = .false. - orth2 = .false. -c - call second (t5) - titref = titref + (t5 - t4) -c -c %------------------------------------% -c | STEP 6: Update j = j+1; Continue | -c %------------------------------------% -c - j = j + 1 - if (j .gt. k+np) then - call second (t1) - tnaitr = tnaitr + (t1 - t0) - ido = 99 - do 110 i = max(1,k), k+np-1 -c -c %--------------------------------------------% -c | Check for splitting and deflation. | -c | Use a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine slahqr | -c %--------------------------------------------% -c - tst1 = abs( h( i, i ) ) + abs( h( i+1, i+1 ) ) - if( tst1.eq.zero ) - & tst1 = slanhs( '1', k+np, h, ldh, workd(n+1) ) - if( abs( h( i+1,i ) ).le.max( ulp*tst1, smlnum ) ) - & h(i+1,i) = zero - 110 continue -c - if (msglvl .gt. 2) then - call smout (logfil, k+np, k+np, h, ldh, ndigit, - & '_naitr: Final upper Hessenberg matrix H of order K+NP') - end if -c - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Loop back to extend the factorization by another step. | -c %--------------------------------------------------------% -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 9000 continue - return -c -c %---------------% -c | End of snaitr | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snapps.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snapps.f deleted file mode 100644 index 0ae94bf846..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snapps.f +++ /dev/null @@ -1,647 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: snapps -c -c\Description: -c Given the Arnoldi factorization -c -c A*V_{k} - V_{k}*H_{k} = r_{k+p}*e_{k+p}^T, -c -c apply NP implicit shifts resulting in -c -c A*(V_{k}*Q) - (V_{k}*Q)*(Q^T* H_{k}*Q) = r_{k+p}*e_{k+p}^T * Q -c -c where Q is an orthogonal matrix which is the product of rotations -c and reflections resulting from the NP bulge chage sweeps. -c The updated Arnoldi factorization becomes: -c -c A*VNEW_{k} - VNEW_{k}*HNEW_{k} = rnew_{k}*e_{k}^T. -c -c\Usage: -c call snapps -c ( N, KEV, NP, SHIFTR, SHIFTI, V, LDV, H, LDH, RESID, Q, LDQ, -c WORKL, WORKD ) -c -c\Arguments -c N Integer. (INPUT) -c Problem size, i.e. size of matrix A. -c -c KEV Integer. (INPUT/OUTPUT) -c KEV+NP is the size of the input matrix H. -c KEV is the size of the updated matrix HNEW. KEV is only -c updated on ouput when fewer than NP shifts are applied in -c order to keep the conjugate pair together. -c -c NP Integer. (INPUT) -c Number of implicit shifts to be applied. -c -c SHIFTR, Real array of length NP. (INPUT) -c SHIFTI Real and imaginary part of the shifts to be applied. -c Upon, entry to snapps, the shifts must be sorted so that the -c conjugate pairs are in consecutive locations. -c -c V Real N by (KEV+NP) array. (INPUT/OUTPUT) -c On INPUT, V contains the current KEV+NP Arnoldi vectors. -c On OUTPUT, V contains the updated KEV Arnoldi vectors -c in the first KEV columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Real (KEV+NP) by (KEV+NP) array. (INPUT/OUTPUT) -c On INPUT, H contains the current KEV+NP by KEV+NP upper -c Hessenber matrix of the Arnoldi factorization. -c On OUTPUT, H contains the updated KEV by KEV upper Hessenberg -c matrix in the KEV leading submatrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RESID Real array of length N. (INPUT/OUTPUT) -c On INPUT, RESID contains the the residual vector r_{k+p}. -c On OUTPUT, RESID is the update residual vector rnew_{k} -c in the first KEV locations. -c -c Q Real KEV+NP by KEV+NP work array. (WORKSPACE) -c Work array used to accumulate the rotations and reflections -c during the bulge chase sweep. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Real work array of length (KEV+NP). (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c WORKD Real work array of length 2*N. (WORKSPACE) -c Distributed array used in the application of the accumulated -c orthogonal matrix Q. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c smout ARPACK utility routine that prints matrices. -c svout ARPACK utility routine that prints vectors. -c slabad LAPACK routine that computes machine constants. -c slacpy LAPACK matrix copy routine. -c slamch LAPACK routine that determines machine constants. -c slanhs LAPACK routine that computes various norms of a matrix. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c slarf LAPACK routine that applies Householder reflection to -c a matrix. -c slarfg LAPACK Householder reflection construction routine. -c slartg LAPACK Givens rotation construction routine. -c slaset LAPACK matrix initialization routine. -c sgemv Level 2 BLAS routine for matrix vector multiplication. -c saxpy Level 1 BLAS that computes a vector triad. -c scopy Level 1 BLAS that copies one vector to another . -c sscal Level 1 BLAS that scales a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: napps.F SID: 2.4 DATE OF SID: 3/28/97 RELEASE: 2 -c -c\Remarks -c 1. In this version, each shift is applied to all the sublocks of -c the Hessenberg matrix H and not just to the submatrix that it -c comes from. Deflation as in LAPACK routine slahqr (QR algorithm -c for upper Hessenberg matrices ) is used. -c The subdiagonals of H are enforced to be non-negative. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine snapps - & ( n, kev, np, shiftr, shifti, v, ldv, h, ldh, resid, q, ldq, - & workl, workd ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer kev, ldh, ldq, ldv, n, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & h(ldh,kev+np), resid(n), shifti(np), shiftr(np), - & v(ldv,kev+np), q(ldq,kev+np), workd(2*n), workl(kev+np) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - integer i, iend, ir, istart, j, jj, kplusp, msglvl, nr - logical cconj, first - Real - & c, f, g, h11, h12, h21, h22, h32, ovfl, r, s, sigmai, - & sigmar, smlnum, ulp, unfl, u(3), t, tau, tst1 - save first, ovfl, smlnum, ulp, unfl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external saxpy, scopy, sscal, slacpy, slarfg, slarf, - & slaset, slabad, second, slartg -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slamch, slanhs, slapy2 - external slamch, slanhs, slapy2 -c -c %----------------------% -c | Intrinsics Functions | -c %----------------------% -c - intrinsic abs, max, min -c -c %----------------% -c | Data statments | -c %----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then -c -c %-----------------------------------------------% -c | Set machine-dependent constants for the | -c | stopping criterion. If norm(H) <= sqrt(OVFL), | -c | overflow should not occur. | -c | REFERENCE: LAPACK subroutine slahqr | -c %-----------------------------------------------% -c - unfl = slamch( 'safe minimum' ) - ovfl = one / unfl - call slabad( unfl, ovfl ) - ulp = slamch( 'precision' ) - smlnum = unfl*( n / ulp ) - first = .false. - end if -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mnapps - kplusp = kev + np -c -c %--------------------------------------------% -c | Initialize Q to the identity to accumulate | -c | the rotations and reflections | -c %--------------------------------------------% -c - call slaset ('All', kplusp, kplusp, zero, one, q, ldq) -c -c %----------------------------------------------% -c | Quick return if there are no shifts to apply | -c %----------------------------------------------% -c - if (np .eq. 0) go to 9000 -c -c %----------------------------------------------% -c | Chase the bulge with the application of each | -c | implicit shift. Each shift is applied to the | -c | whole matrix including each block. | -c %----------------------------------------------% -c - cconj = .false. - do 110 jj = 1, np - sigmar = shiftr(jj) - sigmai = shifti(jj) -c - if (msglvl .gt. 2 ) then - call ivout (logfil, 1, jj, ndigit, - & '_napps: shift number.') - call svout (logfil, 1, sigmar, ndigit, - & '_napps: The real part of the shift ') - call svout (logfil, 1, sigmai, ndigit, - & '_napps: The imaginary part of the shift ') - end if -c -c %-------------------------------------------------% -c | The following set of conditionals is necessary | -c | in order that complex conjugate pairs of shifts | -c | are applied together or not at all. | -c %-------------------------------------------------% -c - if ( cconj ) then -c -c %-----------------------------------------% -c | cconj = .true. means the previous shift | -c | had non-zero imaginary part. | -c %-----------------------------------------% -c - cconj = .false. - go to 110 - else if ( jj .lt. np .and. abs( sigmai ) .gt. zero ) then -c -c %------------------------------------% -c | Start of a complex conjugate pair. | -c %------------------------------------% -c - cconj = .true. - else if ( jj .eq. np .and. abs( sigmai ) .gt. zero ) then -c -c %----------------------------------------------% -c | The last shift has a nonzero imaginary part. | -c | Don't apply it; thus the order of the | -c | compressed H is order KEV+1 since only np-1 | -c | were applied. | -c %----------------------------------------------% -c - kev = kev + 1 - go to 110 - end if - istart = 1 - 20 continue -c -c %--------------------------------------------------% -c | if sigmai = 0 then | -c | Apply the jj-th shift ... | -c | else | -c | Apply the jj-th and (jj+1)-th together ... | -c | (Note that jj < np at this point in the code) | -c | end | -c | to the current block of H. The next do loop | -c | determines the current block ; | -c %--------------------------------------------------% -c - do 30 i = istart, kplusp-1 -c -c %----------------------------------------% -c | Check for splitting and deflation. Use | -c | a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine slahqr | -c %----------------------------------------% -c - tst1 = abs( h( i, i ) ) + abs( h( i+1, i+1 ) ) - if( tst1.eq.zero ) - & tst1 = slanhs( '1', kplusp-jj+1, h, ldh, workl ) - if( abs( h( i+1,i ) ).le.max( ulp*tst1, smlnum ) ) then - if (msglvl .gt. 0) then - call ivout (logfil, 1, i, ndigit, - & '_napps: matrix splitting at row/column no.') - call ivout (logfil, 1, jj, ndigit, - & '_napps: matrix splitting with shift number.') - call svout (logfil, 1, h(i+1,i), ndigit, - & '_napps: off diagonal element.') - end if - iend = i - h(i+1,i) = zero - go to 40 - end if - 30 continue - iend = kplusp - 40 continue -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, istart, ndigit, - & '_napps: Start of current block ') - call ivout (logfil, 1, iend, ndigit, - & '_napps: End of current block ') - end if -c -c %------------------------------------------------% -c | No reason to apply a shift to block of order 1 | -c %------------------------------------------------% -c - if ( istart .eq. iend ) go to 100 -c -c %------------------------------------------------------% -c | If istart + 1 = iend then no reason to apply a | -c | complex conjugate pair of shifts on a 2 by 2 matrix. | -c %------------------------------------------------------% -c - if ( istart + 1 .eq. iend .and. abs( sigmai ) .gt. zero ) - & go to 100 -c - h11 = h(istart,istart) - h21 = h(istart+1,istart) - if ( abs( sigmai ) .le. zero ) then -c -c %---------------------------------------------% -c | Real-valued shift ==> apply single shift QR | -c %---------------------------------------------% -c - f = h11 - sigmar - g = h21 -c - do 80 i = istart, iend-1 -c -c %-----------------------------------------------------% -c | Contruct the plane rotation G to zero out the bulge | -c %-----------------------------------------------------% -c - call slartg (f, g, c, s, r) - if (i .gt. istart) then -c -c %-------------------------------------------% -c | The following ensures that h(1:iend-1,1), | -c | the first iend-2 off diagonal of elements | -c | H, remain non negative. | -c %-------------------------------------------% -c - if (r .lt. zero) then - r = -r - c = -c - s = -s - end if - h(i,i-1) = r - h(i+1,i-1) = zero - end if -c -c %---------------------------------------------% -c | Apply rotation to the left of H; H <- G'*H | -c %---------------------------------------------% -c - do 50 j = i, kplusp - t = c*h(i,j) + s*h(i+1,j) - h(i+1,j) = -s*h(i,j) + c*h(i+1,j) - h(i,j) = t - 50 continue -c -c %---------------------------------------------% -c | Apply rotation to the right of H; H <- H*G | -c %---------------------------------------------% -c - do 60 j = 1, min(i+2,iend) - t = c*h(j,i) + s*h(j,i+1) - h(j,i+1) = -s*h(j,i) + c*h(j,i+1) - h(j,i) = t - 60 continue -c -c %----------------------------------------------------% -c | Accumulate the rotation in the matrix Q; Q <- Q*G | -c %----------------------------------------------------% -c - do 70 j = 1, min( i+jj, kplusp ) - t = c*q(j,i) + s*q(j,i+1) - q(j,i+1) = - s*q(j,i) + c*q(j,i+1) - q(j,i) = t - 70 continue -c -c %---------------------------% -c | Prepare for next rotation | -c %---------------------------% -c - if (i .lt. iend-1) then - f = h(i+1,i) - g = h(i+2,i) - end if - 80 continue -c -c %-----------------------------------% -c | Finished applying the real shift. | -c %-----------------------------------% -c - else -c -c %----------------------------------------------------% -c | Complex conjugate shifts ==> apply double shift QR | -c %----------------------------------------------------% -c - h12 = h(istart,istart+1) - h22 = h(istart+1,istart+1) - h32 = h(istart+2,istart+1) -c -c %---------------------------------------------------------% -c | Compute 1st column of (H - shift*I)*(H - conj(shift)*I) | -c %---------------------------------------------------------% -c - s = 2.0*sigmar - t = slapy2 ( sigmar, sigmai ) - u(1) = ( h11 * (h11 - s) + t * t ) / h21 + h12 - u(2) = h11 + h22 - s - u(3) = h32 -c - do 90 i = istart, iend-1 -c - nr = min ( 3, iend-i+1 ) -c -c %-----------------------------------------------------% -c | Construct Householder reflector G to zero out u(1). | -c | G is of the form I - tau*( 1 u )' * ( 1 u' ). | -c %-----------------------------------------------------% -c - call slarfg ( nr, u(1), u(2), 1, tau ) -c - if (i .gt. istart) then - h(i,i-1) = u(1) - h(i+1,i-1) = zero - if (i .lt. iend-1) h(i+2,i-1) = zero - end if - u(1) = one -c -c %--------------------------------------% -c | Apply the reflector to the left of H | -c %--------------------------------------% -c - call slarf ('Left', nr, kplusp-i+1, u, 1, tau, - & h(i,i), ldh, workl) -c -c %---------------------------------------% -c | Apply the reflector to the right of H | -c %---------------------------------------% -c - ir = min ( i+3, iend ) - call slarf ('Right', ir, nr, u, 1, tau, - & h(1,i), ldh, workl) -c -c %-----------------------------------------------------% -c | Accumulate the reflector in the matrix Q; Q <- Q*G | -c %-----------------------------------------------------% -c - call slarf ('Right', kplusp, nr, u, 1, tau, - & q(1,i), ldq, workl) -c -c %----------------------------% -c | Prepare for next reflector | -c %----------------------------% -c - if (i .lt. iend-1) then - u(1) = h(i+1,i) - u(2) = h(i+2,i) - if (i .lt. iend-2) u(3) = h(i+3,i) - end if -c - 90 continue -c -c %--------------------------------------------% -c | Finished applying a complex pair of shifts | -c | to the current block | -c %--------------------------------------------% -c - end if -c - 100 continue -c -c %---------------------------------------------------------% -c | Apply the same shift to the next block if there is any. | -c %---------------------------------------------------------% -c - istart = iend + 1 - if (iend .lt. kplusp) go to 20 -c -c %---------------------------------------------% -c | Loop back to the top to get the next shift. | -c %---------------------------------------------% -c - 110 continue -c -c %--------------------------------------------------% -c | Perform a similarity transformation that makes | -c | sure that H will have non negative sub diagonals | -c %--------------------------------------------------% -c - do 120 j=1,kev - if ( h(j+1,j) .lt. zero ) then - call sscal( kplusp-j+1, -one, h(j+1,j), ldh ) - call sscal( min(j+2, kplusp), -one, h(1,j+1), 1 ) - call sscal( min(j+np+1,kplusp), -one, q(1,j+1), 1 ) - end if - 120 continue -c - do 130 i = 1, kev -c -c %--------------------------------------------% -c | Final check for splitting and deflation. | -c | Use a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine slahqr | -c %--------------------------------------------% -c - tst1 = abs( h( i, i ) ) + abs( h( i+1, i+1 ) ) - if( tst1.eq.zero ) - & tst1 = slanhs( '1', kev, h, ldh, workl ) - if( h( i+1,i ) .le. max( ulp*tst1, smlnum ) ) - & h(i+1,i) = zero - 130 continue -c -c %-------------------------------------------------% -c | Compute the (kev+1)-st column of (V*Q) and | -c | temporarily store the result in WORKD(N+1:2*N). | -c | This is needed in the residual update since we | -c | cannot GUARANTEE that the corresponding entry | -c | of H would be zero as in exact arithmetic. | -c %-------------------------------------------------% -c - if (h(kev+1,kev) .gt. zero) - & call sgemv ('N', n, kplusp, one, v, ldv, q(1,kev+1), 1, zero, - & workd(n+1), 1) -c -c %----------------------------------------------------------% -c | Compute column 1 to kev of (V*Q) in backward order | -c | taking advantage of the upper Hessenberg structure of Q. | -c %----------------------------------------------------------% -c - do 140 i = 1, kev - call sgemv ('N', n, kplusp-i+1, one, v, ldv, - & q(1,kev-i+1), 1, zero, workd, 1) - call scopy (n, workd, 1, v(1,kplusp-i+1), 1) - 140 continue -c -c %-------------------------------------------------% -c | Move v(:,kplusp-kev+1:kplusp) into v(:,1:kev). | -c %-------------------------------------------------% -c - call slacpy ('A', n, kev, v(1,kplusp-kev+1), ldv, v, ldv) -c -c %--------------------------------------------------------------% -c | Copy the (kev+1)-st column of (V*Q) in the appropriate place | -c %--------------------------------------------------------------% -c - if (h(kev+1,kev) .gt. zero) - & call scopy (n, workd(n+1), 1, v(1,kev+1), 1) -c -c %-------------------------------------% -c | Update the residual vector: | -c | r <- sigmak*r + betak*v(:,kev+1) | -c | where | -c | sigmak = (e_{kplusp}'*Q)*e_{kev} | -c | betak = e_{kev+1}'*H*e_{kev} | -c %-------------------------------------% -c - call sscal (n, q(kplusp,kev), resid, 1) - if (h(kev+1,kev) .gt. zero) - & call saxpy (n, h(kev+1,kev), v(1,kev+1), 1, resid, 1) -c - if (msglvl .gt. 1) then - call svout (logfil, 1, q(kplusp,kev), ndigit, - & '_napps: sigmak = (e_{kev+p}^T*Q)*e_{kev}') - call svout (logfil, 1, h(kev+1,kev), ndigit, - & '_napps: betak = e_{kev+1}^T*H*e_{kev}') - call ivout (logfil, 1, kev, ndigit, - & '_napps: Order of the final Hessenberg matrix ') - if (msglvl .gt. 2) then - call smout (logfil, kev, kev, h, ldh, ndigit, - & '_napps: updated Hessenberg matrix H for next iteration') - end if -c - end if -c - 9000 continue - call second (t1) - tnapps = tnapps + (t1 - t0) -c - return -c -c %---------------% -c | End of snapps | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaup2.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaup2.f deleted file mode 100644 index 39b1283939..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaup2.f +++ /dev/null @@ -1,847 +0,0 @@ -c\BeginDoc -c -c\Name: snaup2 -c -c\Description: -c Intermediate level interface called by snaupd. -c -c\Usage: -c call snaup2 -c ( IDO, BMAT, N, WHICH, NEV, NP, TOL, RESID, MODE, IUPD, -c ISHIFT, MXITER, V, LDV, H, LDH, RITZR, RITZI, BOUNDS, -c Q, LDQ, WORKL, IPNTR, WORKD, INFO ) -c -c\Arguments -c -c IDO, BMAT, N, WHICH, NEV, TOL, RESID: same as defined in snaupd. -c MODE, ISHIFT, MXITER: see the definition of IPARAM in snaupd. -c -c NP Integer. (INPUT/OUTPUT) -c Contains the number of implicit shifts to apply during -c each Arnoldi iteration. -c If ISHIFT=1, NP is adjusted dynamically at each iteration -c to accelerate convergence and prevent stagnation. -c This is also roughly equal to the number of matrix-vector -c products (involving the operator OP) per Arnoldi iteration. -c The logic for adjusting is contained within the current -c subroutine. -c If ISHIFT=0, NP is the number of shifts the user needs -c to provide via reverse comunication. 0 < NP < NCV-NEV. -c NP may be less than NCV-NEV for two reasons. The first, is -c to keep complex conjugate pairs of "wanted" Ritz values -c together. The second, is that a leading block of the current -c upper Hessenberg matrix has split off and contains "unwanted" -c Ritz values. -c Upon termination of the IRA iteration, NP contains the number -c of "converged" wanted Ritz values. -c -c IUPD Integer. (INPUT) -c IUPD .EQ. 0: use explicit restart instead implicit update. -c IUPD .NE. 0: use implicit update. -c -c V Real N by (NEV+NP) array. (INPUT/OUTPUT) -c The Arnoldi basis vectors are returned in the first NEV -c columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Real (NEV+NP) by (NEV+NP) array. (OUTPUT) -c H is used to store the generated upper Hessenberg matrix -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZR, Real arrays of length NEV+NP. (OUTPUT) -c RITZI RITZR(1:NEV) (resp. RITZI(1:NEV)) contains the real (resp. -c imaginary) part of the computed Ritz values of OP. -c -c BOUNDS Real array of length NEV+NP. (OUTPUT) -c BOUNDS(1:NEV) contain the error bounds corresponding to -c the computed Ritz values. -c -c Q Real (NEV+NP) by (NEV+NP) array. (WORKSPACE) -c Private (replicated) work array used to accumulate the -c rotation in the shift application step. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Real work array of length at least -c (NEV+NP)**2 + 3*(NEV+NP). (INPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. It is used in shifts calculation, shifts -c application and convergence checking. -c -c On exit, the last 3*(NEV+NP) locations of WORKL contain -c the Ritz values (real,imaginary) and associated Ritz -c estimates of the current Hessenberg matrix. They are -c listed in the same order as returned from sneigh. -c -c If ISHIFT .EQ. O and IDO .EQ. 3, the first 2*NP locations -c of WORKL are used in reverse communication to hold the user -c supplied shifts. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORKD for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Real work array of length 3*N. (WORKSPACE) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration !!!!!!!!!! -c See Data Distribution Note in DNAUPD. -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal return. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. -c NP returns the number of converged Ritz values. -c = 2: No shifts could be applied. -c = -8: Error return from LAPACK eigenvalue calculation; -c This should never happen. -c = -9: Starting vector is zero. -c = -9999: Could not build an Arnoldi factorization. -c Size that was built in returned in NP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c sgetv0 ARPACK initial vector generation routine. -c snaitr ARPACK Arnoldi factorization routine. -c snapps ARPACK application of implicit shifts routine. -c snconv ARPACK convergence of Ritz values routine. -c sneigh ARPACK compute Ritz values and error bounds routine. -c sngets ARPACK reorder Ritz values and error bounds routine. -c ssortc ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c smout ARPACK utility routine that prints matrices -c svout ARPACK utility routine that prints vectors. -c slamch LAPACK routine that determines machine constants. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c scopy Level 1 BLAS that copies one vector to another . -c sdot Level 1 BLAS that computes the scalar product of two vectors. -c snrm2 Level 1 BLAS that computes the norm of a vector. -c sswap Level 1 BLAS that swaps two vectors. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: naup2.F SID: 2.8 DATE OF SID: 10/17/00 RELEASE: 2 -c -c\Remarks -c 1. None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine snaup2 - & ( ido, bmat, n, which, nev, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, h, ldh, ritzr, ritzi, bounds, - & q, ldq, workl, ipntr, workd, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ishift, iupd, mode, ldh, ldq, ldv, mxiter, - & n, nev, np - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(13) - Real - & bounds(nev+np), h(ldh,nev+np), q(ldq,nev+np), resid(n), - & ritzi(nev+np), ritzr(nev+np), v(ldv,nev+np), - & workd(3*n), workl( (nev+np)*(nev+np+3) ) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character wprime*2 - logical cnorm , getv0, initv, update, ushift - integer ierr , iter , j , kplusp, msglvl, nconv, - & nevbef, nev0 , np0 , nptemp, numcnv - Real - & rnorm , temp , eps23 - save cnorm , getv0, initv, update, ushift, - & rnorm , iter , eps23, kplusp, msglvl, nconv , - & nevbef, nev0 , np0 , numcnv -c -c %-----------------------% -c | Local array arguments | -c %-----------------------% -c - integer kp(4) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external scopy , sgetv0, snaitr, snconv, sneigh, - & sngets, snapps, svout , ivout , second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & sdot, snrm2, slapy2, slamch - external sdot, snrm2, slapy2, slamch -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic min, max, abs, sqrt -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c - call second (t0) -c - msglvl = mnaup2 -c -c %-------------------------------------% -c | Get the machine dependent constant. | -c %-------------------------------------% -c - eps23 = slamch('Epsilon-Machine') - eps23 = eps23**(2.0E+0 / 3.0E+0) -c - nev0 = nev - np0 = np -c -c %-------------------------------------% -c | kplusp is the bound on the largest | -c | Lanczos factorization built. | -c | nconv is the current number of | -c | "converged" eigenvlues. | -c | iter is the counter on the current | -c | iteration step. | -c %-------------------------------------% -c - kplusp = nev + np - nconv = 0 - iter = 0 -c -c %---------------------------------------% -c | Set flags for computing the first NEV | -c | steps of the Arnoldi factorization. | -c %---------------------------------------% -c - getv0 = .true. - update = .false. - ushift = .false. - cnorm = .false. -c - if (info .ne. 0) then -c -c %--------------------------------------------% -c | User provides the initial residual vector. | -c %--------------------------------------------% -c - initv = .true. - info = 0 - else - initv = .false. - end if - end if -c -c %---------------------------------------------% -c | Get a possibly random starting vector and | -c | force it into the range of the operator OP. | -c %---------------------------------------------% -c - 10 continue -c - if (getv0) then - call sgetv0 (ido, bmat, 1, initv, n, 1, v, ldv, resid, rnorm, - & ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (rnorm .eq. zero) then -c -c %-----------------------------------------% -c | The initial vector is zero. Error exit. | -c %-----------------------------------------% -c - info = -9 - go to 1100 - end if - getv0 = .false. - ido = 0 - end if -c -c %-----------------------------------% -c | Back from reverse communication : | -c | continue with update step | -c %-----------------------------------% -c - if (update) go to 20 -c -c %-------------------------------------------% -c | Back from computing user specified shifts | -c %-------------------------------------------% -c - if (ushift) go to 50 -c -c %-------------------------------------% -c | Back from computing residual norm | -c | at the end of the current iteration | -c %-------------------------------------% -c - if (cnorm) go to 100 -c -c %----------------------------------------------------------% -c | Compute the first NEV steps of the Arnoldi factorization | -c %----------------------------------------------------------% -c - call snaitr (ido, bmat, n, 0, nev, mode, resid, rnorm, v, ldv, - & h, ldh, ipntr, workd, info) -c -c %---------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP and possibly B | -c %---------------------------------------------------% -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then - np = info - mxiter = iter - info = -9999 - go to 1200 - end if -c -c %--------------------------------------------------------------% -c | | -c | M A I N ARNOLDI I T E R A T I O N L O O P | -c | Each iteration implicitly restarts the Arnoldi | -c | factorization in place. | -c | | -c %--------------------------------------------------------------% -c - 1000 continue -c - iter = iter + 1 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, iter, ndigit, - & '_naup2: **** Start of major iteration number ****') - end if -c -c %-----------------------------------------------------------% -c | Compute NP additional steps of the Arnoldi factorization. | -c | Adjust NP since NEV might have been updated by last call | -c | to the shift application routine snapps. | -c %-----------------------------------------------------------% -c - np = kplusp - nev -c - if (msglvl .gt. 1) then - call ivout (logfil, 1, nev, ndigit, - & '_naup2: The length of the current Arnoldi factorization') - call ivout (logfil, 1, np, ndigit, - & '_naup2: Extend the Arnoldi factorization by') - end if -c -c %-----------------------------------------------------------% -c | Compute NP additional steps of the Arnoldi factorization. | -c %-----------------------------------------------------------% -c - ido = 0 - 20 continue - update = .true. -c - call snaitr (ido , bmat, n , nev, np , mode , resid, - & rnorm, v , ldv, h , ldh, ipntr, workd, - & info) -c -c %---------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP and possibly B | -c %---------------------------------------------------% -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then - np = info - mxiter = iter - info = -9999 - go to 1200 - end if - update = .false. -c - if (msglvl .gt. 1) then - call svout (logfil, 1, rnorm, ndigit, - & '_naup2: Corresponding B-norm of the residual') - end if -c -c %--------------------------------------------------------% -c | Compute the eigenvalues and corresponding error bounds | -c | of the current upper Hessenberg matrix. | -c %--------------------------------------------------------% -c - call sneigh (rnorm, kplusp, h, ldh, ritzr, ritzi, bounds, - & q, ldq, workl, ierr) -c - if (ierr .ne. 0) then - info = -8 - go to 1200 - end if -c -c %----------------------------------------------------% -c | Make a copy of eigenvalues and corresponding error | -c | bounds obtained from sneigh. | -c %----------------------------------------------------% -c - call scopy(kplusp, ritzr, 1, workl(kplusp**2+1), 1) - call scopy(kplusp, ritzi, 1, workl(kplusp**2+kplusp+1), 1) - call scopy(kplusp, bounds, 1, workl(kplusp**2+2*kplusp+1), 1) -c -c %---------------------------------------------------% -c | Select the wanted Ritz values and their bounds | -c | to be used in the convergence test. | -c | The wanted part of the spectrum and corresponding | -c | error bounds are in the last NEV loc. of RITZR, | -c | RITZI and BOUNDS respectively. The variables NEV | -c | and NP may be updated if the NEV-th wanted Ritz | -c | value has a non zero imaginary part. In this case | -c | NEV is increased by one and NP decreased by one. | -c | NOTE: The last two arguments of sngets are no | -c | longer used as of version 2.1. | -c %---------------------------------------------------% -c - nev = nev0 - np = np0 - numcnv = nev - call sngets (ishift, which, nev, np, ritzr, ritzi, - & bounds, workl, workl(np+1)) - if (nev .eq. nev0+1) numcnv = nev0+1 -c -c %-------------------% -c | Convergence test. | -c %-------------------% -c - call scopy (nev, bounds(np+1), 1, workl(2*np+1), 1) - call snconv (nev, ritzr(np+1), ritzi(np+1), workl(2*np+1), - & tol, nconv) -c - if (msglvl .gt. 2) then - kp(1) = nev - kp(2) = np - kp(3) = numcnv - kp(4) = nconv - call ivout (logfil, 4, kp, ndigit, - & '_naup2: NEV, NP, NUMCNV, NCONV are') - call svout (logfil, kplusp, ritzr, ndigit, - & '_naup2: Real part of the eigenvalues of H') - call svout (logfil, kplusp, ritzi, ndigit, - & '_naup2: Imaginary part of the eigenvalues of H') - call svout (logfil, kplusp, bounds, ndigit, - & '_naup2: Ritz estimates of the current NCV Ritz values') - end if -c -c %---------------------------------------------------------% -c | Count the number of unwanted Ritz values that have zero | -c | Ritz estimates. If any Ritz estimates are equal to zero | -c | then a leading block of H of order equal to at least | -c | the number of Ritz values with zero Ritz estimates has | -c | split off. None of these Ritz values may be removed by | -c | shifting. Decrease NP the number of shifts to apply. If | -c | no shifts may be applied, then prepare to exit | -c %---------------------------------------------------------% -c - nptemp = np - do 30 j=1, nptemp - if (bounds(j) .eq. zero) then - np = np - 1 - nev = nev + 1 - end if - 30 continue -c - if ( (nconv .ge. numcnv) .or. - & (iter .gt. mxiter) .or. - & (np .eq. 0) ) then -c - if (msglvl .gt. 4) then - call svout(logfil, kplusp, workl(kplusp**2+1), ndigit, - & '_naup2: Real part of the eig computed by _neigh:') - call svout(logfil, kplusp, workl(kplusp**2+kplusp+1), - & ndigit, - & '_naup2: Imag part of the eig computed by _neigh:') - call svout(logfil, kplusp, workl(kplusp**2+kplusp*2+1), - & ndigit, - & '_naup2: Ritz eistmates computed by _neigh:') - end if -c -c %------------------------------------------------% -c | Prepare to exit. Put the converged Ritz values | -c | and corresponding bounds in RITZ(1:NCONV) and | -c | BOUNDS(1:NCONV) respectively. Then sort. Be | -c | careful when NCONV > NP | -c %------------------------------------------------% -c -c %------------------------------------------% -c | Use h( 3,1 ) as storage to communicate | -c | rnorm to _neupd if needed | -c %------------------------------------------% - - h(3,1) = rnorm -c -c %----------------------------------------------% -c | To be consistent with sngets, we first do a | -c | pre-processing sort in order to keep complex | -c | conjugate pairs together. This is similar | -c | to the pre-processing sort used in sngets | -c | except that the sort is done in the opposite | -c | order. | -c %----------------------------------------------% -c - if (which .eq. 'LM') wprime = 'SR' - if (which .eq. 'SM') wprime = 'LR' - if (which .eq. 'LR') wprime = 'SM' - if (which .eq. 'SR') wprime = 'LM' - if (which .eq. 'LI') wprime = 'SM' - if (which .eq. 'SI') wprime = 'LM' -c - call ssortc (wprime, .true., kplusp, ritzr, ritzi, bounds) -c -c %----------------------------------------------% -c | Now sort Ritz values so that converged Ritz | -c | values appear within the first NEV locations | -c | of ritzr, ritzi and bounds, and the most | -c | desired one appears at the front. | -c %----------------------------------------------% -c - if (which .eq. 'LM') wprime = 'SM' - if (which .eq. 'SM') wprime = 'LM' - if (which .eq. 'LR') wprime = 'SR' - if (which .eq. 'SR') wprime = 'LR' - if (which .eq. 'LI') wprime = 'SI' - if (which .eq. 'SI') wprime = 'LI' -c - call ssortc(wprime, .true., kplusp, ritzr, ritzi, bounds) -c -c %--------------------------------------------------% -c | Scale the Ritz estimate of each Ritz value | -c | by 1 / max(eps23,magnitude of the Ritz value). | -c %--------------------------------------------------% -c - do 35 j = 1, numcnv - temp = max(eps23,slapy2(ritzr(j), - & ritzi(j))) - bounds(j) = bounds(j)/temp - 35 continue -c -c %----------------------------------------------------% -c | Sort the Ritz values according to the scaled Ritz | -c | esitmates. This will push all the converged ones | -c | towards the front of ritzr, ritzi, bounds | -c | (in the case when NCONV < NEV.) | -c %----------------------------------------------------% -c - wprime = 'LR' - call ssortc(wprime, .true., numcnv, bounds, ritzr, ritzi) -c -c %----------------------------------------------% -c | Scale the Ritz estimate back to its original | -c | value. | -c %----------------------------------------------% -c - do 40 j = 1, numcnv - temp = max(eps23, slapy2(ritzr(j), - & ritzi(j))) - bounds(j) = bounds(j)*temp - 40 continue -c -c %------------------------------------------------% -c | Sort the converged Ritz values again so that | -c | the "threshold" value appears at the front of | -c | ritzr, ritzi and bound. | -c %------------------------------------------------% -c - call ssortc(which, .true., nconv, ritzr, ritzi, bounds) -c - if (msglvl .gt. 1) then - call svout (logfil, kplusp, ritzr, ndigit, - & '_naup2: Sorted real part of the eigenvalues') - call svout (logfil, kplusp, ritzi, ndigit, - & '_naup2: Sorted imaginary part of the eigenvalues') - call svout (logfil, kplusp, bounds, ndigit, - & '_naup2: Sorted ritz estimates.') - end if -c -c %------------------------------------% -c | Max iterations have been exceeded. | -c %------------------------------------% -c - if (iter .gt. mxiter .and. nconv .lt. numcnv) info = 1 -c -c %---------------------% -c | No shifts to apply. | -c %---------------------% -c - if (np .eq. 0 .and. nconv .lt. numcnv) info = 2 -c - np = nconv - go to 1100 -c - else if ( (nconv .lt. numcnv) .and. (ishift .eq. 1) ) then -c -c %-------------------------------------------------% -c | Do not have all the requested eigenvalues yet. | -c | To prevent possible stagnation, adjust the size | -c | of NEV. | -c %-------------------------------------------------% -c - nevbef = nev - nev = nev + min(nconv, np/2) - if (nev .eq. 1 .and. kplusp .ge. 6) then - nev = kplusp / 2 - else if (nev .eq. 1 .and. kplusp .gt. 3) then - nev = 2 - end if -c -c %---- Scipy fix ------------------------------------------------ -c | We must keep nev below this value, as otherwise we can get -c | np == 0 (note that sngets below can bump nev by 1). If np == 0, -c | the next call to `snaitr` will write out-of-bounds. -c | - if (nev .gt. kplusp - 2) then - nev = kplusp - 2 - end if -c | -c %---- Scipy fix end -------------------------------------------- -c - np = kplusp - nev -c -c %---------------------------------------% -c | If the size of NEV was just increased | -c | resort the eigenvalues. | -c %---------------------------------------% -c - if (nevbef .lt. nev) - & call sngets (ishift, which, nev, np, ritzr, ritzi, - & bounds, workl, workl(np+1)) -c - end if -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, nconv, ndigit, - & '_naup2: no. of "converged" Ritz values at this iter.') - if (msglvl .gt. 1) then - kp(1) = nev - kp(2) = np - call ivout (logfil, 2, kp, ndigit, - & '_naup2: NEV and NP are') - call svout (logfil, nev, ritzr(np+1), ndigit, - & '_naup2: "wanted" Ritz values -- real part') - call svout (logfil, nev, ritzi(np+1), ndigit, - & '_naup2: "wanted" Ritz values -- imag part') - call svout (logfil, nev, bounds(np+1), ndigit, - & '_naup2: Ritz estimates of the "wanted" values ') - end if - end if -c - if (ishift .eq. 0) then -c -c %-------------------------------------------------------% -c | User specified shifts: reverse comminucation to | -c | compute the shifts. They are returned in the first | -c | 2*NP locations of WORKL. | -c %-------------------------------------------------------% -c - ushift = .true. - ido = 3 - go to 9000 - end if -c - 50 continue -c -c %------------------------------------% -c | Back from reverse communication; | -c | User specified shifts are returned | -c | in WORKL(1:2*NP) | -c %------------------------------------% -c - ushift = .false. -c - if ( ishift .eq. 0 ) then -c -c %----------------------------------% -c | Move the NP shifts from WORKL to | -c | RITZR, RITZI to free up WORKL | -c | for non-exact shift case. | -c %----------------------------------% -c - call scopy (np, workl, 1, ritzr, 1) - call scopy (np, workl(np+1), 1, ritzi, 1) - end if -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, np, ndigit, - & '_naup2: The number of shifts to apply ') - call svout (logfil, np, ritzr, ndigit, - & '_naup2: Real part of the shifts') - call svout (logfil, np, ritzi, ndigit, - & '_naup2: Imaginary part of the shifts') - if ( ishift .eq. 1 ) - & call svout (logfil, np, bounds, ndigit, - & '_naup2: Ritz estimates of the shifts') - end if -c -c %---------------------------------------------------------% -c | Apply the NP implicit shifts by QR bulge chasing. | -c | Each shift is applied to the whole upper Hessenberg | -c | matrix H. | -c | The first 2*N locations of WORKD are used as workspace. | -c %---------------------------------------------------------% -c - call snapps (n, nev, np, ritzr, ritzi, v, ldv, - & h, ldh, resid, q, ldq, workl, workd) -c -c %---------------------------------------------% -c | Compute the B-norm of the updated residual. | -c | Keep B*RESID in WORKD(1:N) to be used in | -c | the first step of the next call to snaitr. | -c %---------------------------------------------% -c - cnorm = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call scopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*RESID | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd, 1) - end if -c - 100 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(1:N) := B*RESID | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - rnorm = sdot (n, resid, 1, workd, 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = snrm2(n, resid, 1) - end if - cnorm = .false. -c - if (msglvl .gt. 2) then - call svout (logfil, 1, rnorm, ndigit, - & '_naup2: B-norm of residual for compressed factorization') - call smout (logfil, nev, nev, h, ldh, ndigit, - & '_naup2: Compressed upper Hessenberg matrix H') - end if -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 1100 continue -c - mxiter = iter - nev = numcnv -c - 1200 continue - ido = 99 -c -c %------------% -c | Error Exit | -c %------------% -c - call second (t1) - tnaup2 = t1 - t0 -c - 9000 continue -c -c %---------------% -c | End of snaup2 | -c %---------------% -c - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaupd.f deleted file mode 100644 index 68aad43ca8..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snaupd.f +++ /dev/null @@ -1,693 +0,0 @@ -c\BeginDoc -c -c\Name: snaupd -c -c\Description: -c Reverse communication interface for the Implicitly Restarted Arnoldi -c iteration. This subroutine computes approximations to a few eigenpairs -c of a linear operator "OP" with respect to a semi-inner product defined by -c a symmetric positive semi-definite real matrix B. B may be the identity -c matrix. NOTE: If the linear operator "OP" is real and symmetric -c with respect to the real positive semi-definite symmetric matrix B, -c i.e. B*OP = (OP`)*B, then subroutine ssaupd should be used instead. -c -c The computed approximate eigenvalues are called Ritz values and -c the corresponding approximate eigenvectors are called Ritz vectors. -c -c snaupd is usually called iteratively to solve one of the -c following problems: -c -c Mode 1: A*x = lambda*x. -c ===> OP = A and B = I. -c -c Mode 2: A*x = lambda*M*x, M symmetric positive definite -c ===> OP = inv[M]*A and B = M. -c ===> (If M can be factored see remark 3 below) -c -c Mode 3: A*x = lambda*M*x, M symmetric semi-definite -c ===> OP = Real_Part{ inv[A - sigma*M]*M } and B = M. -c ===> shift-and-invert mode (in real arithmetic) -c If OP*x = amu*x, then -c amu = 1/2 * [ 1/(lambda-sigma) + 1/(lambda-conjg(sigma)) ]. -c Note: If sigma is real, i.e. imaginary part of sigma is zero; -c Real_Part{ inv[A - sigma*M]*M } == inv[A - sigma*M]*M -c amu == 1/(lambda-sigma). -c -c Mode 4: A*x = lambda*M*x, M symmetric semi-definite -c ===> OP = Imaginary_Part{ inv[A - sigma*M]*M } and B = M. -c ===> shift-and-invert mode (in real arithmetic) -c If OP*x = amu*x, then -c amu = 1/2i * [ 1/(lambda-sigma) - 1/(lambda-conjg(sigma)) ]. -c -c Both mode 3 and 4 give the same enhancement to eigenvalues close to -c the (complex) shift sigma. However, as lambda goes to infinity, -c the operator OP in mode 4 dampens the eigenvalues more strongly than -c does OP defined in mode 3. -c -c NOTE: The action of w <- inv[A - sigma*M]*v or w <- inv[M]*v -c should be accomplished either by a direct method -c using a sparse matrix factorization and solving -c -c [A - sigma*M]*w = v or M*w = v, -c -c or through an iterative method for solving these -c systems. If an iterative method is used, the -c convergence test must be more stringent than -c the accuracy requirements for the eigenvalue -c approximations. -c -c\Usage: -c call snaupd -c ( IDO, BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, -c IPNTR, WORKD, WORKL, LWORKL, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to snaupd. IDO will be set internally to -c indicate the type of operation to be performed. Control is -c then given back to the calling routine which has the -c responsibility to carry out the requested operation and call -c snaupd with the result. The operand is given in -c WORKD(IPNTR(1)), the result must be put in WORKD(IPNTR(2)). -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c In mode 3 and 4, the vector B * X is already -c available in WORKD(ipntr(3)). It does not -c need to be recomputed in forming OP * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 3: compute the IPARAM(8) real and imaginary parts -c of the shifts where INPTR(14) is the pointer -c into WORKL for placing the shifts. See Remark -c 5 below. -c IDO = 99: done -c ------------------------------------------------------------- -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. -c BMAT = 'I' -> standard eigenvalue problem A*x = lambda*x -c BMAT = 'G' -> generalized eigenvalue problem A*x = lambda*B*x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c WHICH Character*2. (INPUT) -c 'LM' -> want the NEV eigenvalues of largest magnitude. -c 'SM' -> want the NEV eigenvalues of smallest magnitude. -c 'LR' -> want the NEV eigenvalues of largest real part. -c 'SR' -> want the NEV eigenvalues of smallest real part. -c 'LI' -> want the NEV eigenvalues of largest imaginary part. -c 'SI' -> want the NEV eigenvalues of smallest imaginary part. -c -c NEV Integer. (INPUT/OUTPUT) -c Number of eigenvalues of OP to be computed. 0 < NEV < N-1. -c -c TOL Real scalar. (INPUT) -c Stopping criterion: the relative accuracy of the Ritz value -c is considered acceptable if BOUNDS(I) .LE. TOL*ABS(RITZ(I)) -c where ABS(RITZ(I)) is the magnitude when RITZ(I) is complex. -c DEFAULT = SLAMCH('EPS') (machine precision as computed -c by the LAPACK auxiliary subroutine SLAMCH). -c -c RESID Real array of length N. (INPUT/OUTPUT) -c On INPUT: -c If INFO .EQ. 0, a random initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c On OUTPUT: -c RESID contains the final residual vector. -c -c NCV Integer. (INPUT) -c Number of columns of the matrix V. NCV must satisfy the two -c inequalities 2 <= NCV-NEV and NCV <= N. -c This will indicate how many Arnoldi vectors are generated -c at each iteration. After the startup phase in which NEV -c Arnoldi vectors are generated, the algorithm generates -c approximately NCV-NEV Arnoldi vectors at each subsequent update -c iteration. Most of the cost in generating each Arnoldi vector is -c in the matrix-vector operation OP*x. -c NOTE: 2 <= NCV-NEV in order that complex conjugate pairs of Ritz -c values are kept together. (See remark 4 below) -c -c V Real array N by NCV. (OUTPUT) -c Contains the final set of Arnoldi basis vectors. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling program. -c -c IPARAM Integer array of length 11. (INPUT/OUTPUT) -c IPARAM(1) = ISHIFT: method for selecting the implicit shifts. -c The shifts selected at each iteration are used to restart -c the Arnoldi iteration in an implicit fashion. -c ------------------------------------------------------------- -c ISHIFT = 0: the shifts are provided by the user via -c reverse communication. The real and imaginary -c parts of the NCV eigenvalues of the Hessenberg -c matrix H are returned in the part of the WORKL -c array corresponding to RITZR and RITZI. See remark -c 5 below. -c ISHIFT = 1: exact shifts with respect to the current -c Hessenberg matrix H. This is equivalent to -c restarting the iteration with a starting vector -c that is a linear combination of approximate Schur -c vectors associated with the "wanted" Ritz values. -c ------------------------------------------------------------- -c -c IPARAM(2) = No longer referenced. -c -c IPARAM(3) = MXITER -c On INPUT: maximum number of Arnoldi update iterations allowed. -c On OUTPUT: actual number of Arnoldi update iterations taken. -c -c IPARAM(4) = NB: blocksize to be used in the recurrence. -c The code currently works only for NB = 1. -c -c IPARAM(5) = NCONV: number of "converged" Ritz values. -c This represents the number of Ritz values that satisfy -c the convergence criterion. -c -c IPARAM(6) = IUPD -c No longer referenced. Implicit restarting is ALWAYS used. -c -c IPARAM(7) = MODE -c On INPUT determines what type of eigenproblem is being solved. -c Must be 1,2,3,4; See under \Description of snaupd for the -c four modes available. -c -c IPARAM(8) = NP -c When ido = 3 and the user provides shifts through reverse -c communication (IPARAM(1)=0), snaupd returns NP, the number -c of shifts the user is to provide. 0 < NP <=NCV-NEV. See Remark -c 5 below. -c -c IPARAM(9) = NUMOP, IPARAM(10) = NUMOPB, IPARAM(11) = NUMREO, -c OUTPUT: NUMOP = total number of OP*x operations, -c NUMOPB = total number of B*x operations if BMAT='G', -c NUMREO = total number of steps of re-orthogonalization. -c -c IPNTR Integer array of length 14. (OUTPUT) -c Pointer to mark the starting locations in the WORKD and WORKL -c arrays for matrices/vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X in WORKD. -c IPNTR(2): pointer to the current result vector Y in WORKD. -c IPNTR(3): pointer to the vector B * X in WORKD when used in -c the shift-and-invert mode. -c IPNTR(4): pointer to the next available location in WORKL -c that is untouched by the program. -c IPNTR(5): pointer to the NCV by NCV upper Hessenberg matrix -c H in WORKL. -c IPNTR(6): pointer to the real part of the ritz value array -c RITZR in WORKL. -c IPNTR(7): pointer to the imaginary part of the ritz value array -c RITZI in WORKL. -c IPNTR(8): pointer to the Ritz estimates in array WORKL associated -c with the Ritz values located in RITZR and RITZI in WORKL. -c -c IPNTR(14): pointer to the NP shifts in WORKL. See Remark 5 below. -c -c Note: IPNTR(9:13) is only referenced by sneupd. See Remark 2 below. -c -c IPNTR(9): pointer to the real part of the NCV RITZ values of the -c original system. -c IPNTR(10): pointer to the imaginary part of the NCV RITZ values of -c the original system. -c IPNTR(11): pointer to the NCV corresponding error bounds. -c IPNTR(12): pointer to the NCV by NCV upper quasi-triangular -c Schur matrix for H. -c IPNTR(13): pointer to the NCV by NCV matrix of eigenvectors -c of the upper Hessenberg matrix H. Only referenced by -c sneupd if RVEC = .TRUE. See Remark 2 below. -c ------------------------------------------------------------- -c -c WORKD Real work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration. Upon termination -c WORKD(1:N) contains B*RESID(1:N). If an invariant subspace -c associated with the converged Ritz values is desired, see remark -c 2 below, subroutine sneupd uses this output. -c See Data Distribution Note below. -c -c WORKL Real work array of length LWORKL. (OUTPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. See Data Distribution Note below. -c -c LWORKL Integer. (INPUT) -c LWORKL must be at least 3*NCV**2 + 6*NCV. -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal exit. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. IPARAM(5) -c returns the number of wanted converged Ritz values. -c = 2: No longer an informational error. Deprecated starting -c with release 2 of ARPACK. -c = 3: No shifts could be applied during a cycle of the -c Implicitly restarted Arnoldi iteration. One possibility -c is to increase the size of NCV relative to NEV. -c See remark 4 below. -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV-NEV >= 2 and less than or equal to N. -c = -4: The maximum number of Arnoldi update iteration -c must be greater than zero. -c = -5: WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI' -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work array is not sufficient. -c = -8: Error return from LAPACK eigenvalue calculation; -c = -9: Starting vector is zero. -c = -10: IPARAM(7) must be 1,2,3,4. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatable. -c = -12: IPARAM(1) must be equal to 0 or 1. -c = -9999: Could not build an Arnoldi factorization. -c IPARAM(5) returns the size of the current Arnoldi -c factorization. -c -c\Remarks -c 1. The computed Ritz values are approximate eigenvalues of OP. The -c selection of WHICH should be made with this in mind when -c Mode = 3 and 4. After convergence, approximate eigenvalues of the -c original problem may be obtained with the ARPACK subroutine sneupd. -c -c 2. If a basis for the invariant subspace corresponding to the converged Ritz -c values is needed, the user must call sneupd immediately following -c completion of snaupd. This is new starting with release 2 of ARPACK. -c -c 3. If M can be factored into a Cholesky factorization M = LL` -c then Mode = 2 should not be selected. Instead one should use -c Mode = 1 with OP = inv(L)*A*inv(L`). Appropriate triangular -c linear systems should be solved with L and L` rather -c than computing inverses. After convergence, an approximate -c eigenvector z of the original problem is recovered by solving -c L`z = x where x is a Ritz vector of OP. -c -c 4. At present there is no a-priori analysis to guide the selection -c of NCV relative to NEV. The only formal requrement is that NCV > NEV + 2. -c However, it is recommended that NCV .ge. 2*NEV+1. If many problems of -c the same type are to be solved, one should experiment with increasing -c NCV while keeping NEV fixed for a given test problem. This will -c usually decrease the required number of OP*x operations but it -c also increases the work and storage required to maintain the orthogonal -c basis vectors. The optimal "cross-over" with respect to CPU time -c is problem dependent and must be determined empirically. -c See Chapter 8 of Reference 2 for further information. -c -c 5. When IPARAM(1) = 0, and IDO = 3, the user needs to provide the -c NP = IPARAM(8) real and imaginary parts of the shifts in locations -c real part imaginary part -c ----------------------- -------------- -c 1 WORKL(IPNTR(14)) WORKL(IPNTR(14)+NP) -c 2 WORKL(IPNTR(14)+1) WORKL(IPNTR(14)+NP+1) -c . . -c . . -c . . -c NP WORKL(IPNTR(14)+NP-1) WORKL(IPNTR(14)+2*NP-1). -c -c Only complex conjugate pairs of shifts may be applied and the pairs -c must be placed in consecutive locations. The real part of the -c eigenvalues of the current upper Hessenberg matrix are located in -c WORKL(IPNTR(6)) through WORKL(IPNTR(6)+NCV-1) and the imaginary part -c in WORKL(IPNTR(7)) through WORKL(IPNTR(7)+NCV-1). They are ordered -c according to the order defined by WHICH. The complex conjugate -c pairs are kept together and the associated Ritz estimates are located in -c WORKL(IPNTR(8)), WORKL(IPNTR(8)+1), ... , WORKL(IPNTR(8)+NCV-1). -c -c----------------------------------------------------------------------- -c -c\Data Distribution Note: -c -c Fortran-D syntax: -c ================ -c Real resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) -c decompose d1(n), d2(n,ncv) -c align resid(i) with d1(i) -c align v(i,j) with d2(i,j) -c align workd(i) with d1(i) range (1:n) -c align workd(i) with d1(i-n) range (n+1:2*n) -c align workd(i) with d1(i-2*n) range (2*n+1:3*n) -c distribute d1(block), d2(block,:) -c replicated workl(lworkl) -c -c Cray MPP syntax: -c =============== -c Real resid(n), v(ldv,ncv), workd(n,3), workl(lworkl) -c shared resid(block), v(block,:), workd(block,:) -c replicated workl(lworkl) -c -c CM2/CM5 syntax: -c ============== -c -c----------------------------------------------------------------------- -c -c include 'ex-nonsym.doc' -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett & Y. Saad, "Complex Shift and Invert Strategies for -c Real Matrices", Linear Algebra and its Applications, vol 88/89, -c pp 575-595, (1987). -c -c\Routines called: -c snaup2 ARPACK routine that implements the Implicitly Restarted -c Arnoldi Iteration. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c svout ARPACK utility routine that prints vectors. -c slamch LAPACK routine that determines machine constants. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/16/93: Version '1.1' -c -c\SCCS Information: @(#) -c FILE: naupd.F SID: 2.10 DATE OF SID: 08/23/02 RELEASE: 2 -c -c\Remarks -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine snaupd - & ( ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, - & ipntr, workd, workl, lworkl, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ldv, lworkl, n, ncv, nev - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(14) - Real - & resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer bounds, ierr, ih, iq, ishift, iupd, iw, - & ldh, ldq, levec, mode, msglvl, mxiter, nb, - & nev0, next, np, ritzi, ritzr, j - save bounds, ih, iq, ishift, iupd, iw, ldh, ldq, - & levec, mode, msglvl, mxiter, nb, nev0, next, - & np, ritzi, ritzr -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external snaup2, svout, ivout, second, sstatn -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slamch - external slamch -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call sstatn - call second (t0) - msglvl = mnaupd -c -c %----------------% -c | Error checking | -c %----------------% -c - ierr = 0 - ishift = iparam(1) -c levec = iparam(2) - mxiter = iparam(3) -c nb = iparam(4) - nb = 1 -c -c %--------------------------------------------% -c | Revision 2 performs only implicit restart. | -c %--------------------------------------------% -c - iupd = 1 - mode = iparam(7) -c - if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev+1 .or. ncv .gt. n) then - ierr = -3 - else if (mxiter .le. 0) then - ierr = 4 - else if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LR' .and. - & which .ne. 'SR' .and. - & which .ne. 'LI' .and. - & which .ne. 'SI') then - ierr = -5 - else if (bmat .ne. 'I' .and. bmat .ne. 'G') then - ierr = -6 - else if (lworkl .lt. 3*ncv**2 + 6*ncv) then - ierr = -7 - else if (mode .lt. 1 .or. mode .gt. 4) then - ierr = -10 - else if (mode .eq. 1 .and. bmat .eq. 'G') then - ierr = -11 - else if (ishift .lt. 0 .or. ishift .gt. 1) then - ierr = -12 - end if -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - ido = 99 - go to 9000 - end if -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - if (nb .le. 0) nb = 1 - if (tol .le. zero) tol = slamch('EpsMach') -c -c %----------------------------------------------% -c | NP is the number of additional steps to | -c | extend the length NEV Lanczos factorization. | -c | NEV0 is the local variable designating the | -c | size of the invariant subspace desired. | -c %----------------------------------------------% -c - np = ncv - nev - nev0 = nev -c -c %-----------------------------% -c | Zero out internal workspace | -c %-----------------------------% -c - do 10 j = 1, 3*ncv**2 + 6*ncv - workl(j) = zero - 10 continue -c -c %-------------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:ncv*ncv) := generated Hessenberg matrix | -c | workl(ncv*ncv+1:ncv*ncv+2*ncv) := real and imaginary | -c | parts of ritz values | -c | workl(ncv*ncv+2*ncv+1:ncv*ncv+3*ncv) := error bounds | -c | workl(ncv*ncv+3*ncv+1:2*ncv*ncv+3*ncv) := rotation matrix Q | -c | workl(2*ncv*ncv+3*ncv+1:3*ncv*ncv+6*ncv) := workspace | -c | The final workspace is needed by subroutine sneigh called | -c | by snaup2. Subroutine sneigh calls LAPACK routines for | -c | calculating eigenvalues and the last row of the eigenvector | -c | matrix. | -c %-------------------------------------------------------------% -c - ldh = ncv - ldq = ncv - ih = 1 - ritzr = ih + ldh*ncv - ritzi = ritzr + ncv - bounds = ritzi + ncv - iq = bounds + ncv - iw = iq + ldq*ncv - next = iw + ncv**2 + 3*ncv -c - ipntr(4) = next - ipntr(5) = ih - ipntr(6) = ritzr - ipntr(7) = ritzi - ipntr(8) = bounds - ipntr(14) = iw -c - end if -c -c %-------------------------------------------------------% -c | Carry out the Implicitly restarted Arnoldi Iteration. | -c %-------------------------------------------------------% -c - call snaup2 - & ( ido, bmat, n, which, nev0, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, workl(ih), ldh, workl(ritzr), - & workl(ritzi), workl(bounds), workl(iq), ldq, workl(iw), - & ipntr, workd, info ) -c -c %--------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP or shifts. | -c %--------------------------------------------------% -c - if (ido .eq. 3) iparam(8) = np - if (ido .ne. 99) go to 9000 -c - iparam(3) = mxiter - iparam(5) = np - iparam(9) = nopx - iparam(10) = nbx - iparam(11) = nrorth -c -c %------------------------------------% -c | Exit if there was an informational | -c | error within snaup2. | -c %------------------------------------% -c - if (info .lt. 0) go to 9000 - if (info .eq. 2) info = 3 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, mxiter, ndigit, - & '_naupd: Number of update iterations taken') - call ivout (logfil, 1, np, ndigit, - & '_naupd: Number of wanted "converged" Ritz values') - call svout (logfil, np, workl(ritzr), ndigit, - & '_naupd: Real part of the final Ritz values') - call svout (logfil, np, workl(ritzi), ndigit, - & '_naupd: Imaginary part of the final Ritz values') - call svout (logfil, np, workl(bounds), ndigit, - & '_naupd: Associated Ritz estimates') - end if -c - call second (t1) - tnaupd = t1 - t0 -c - if (msglvl .gt. 0) then -c -c %--------------------------------------------------------% -c | Version Number & Version Date are defined in version.h | -c %--------------------------------------------------------% -c - write (6,1000) - write (6,1100) mxiter, nopx, nbx, nrorth, nitref, nrstrt, - & tmvopx, tmvbx, tnaupd, tnaup2, tnaitr, titref, - & tgetv0, tneigh, tngets, tnapps, tnconv, trvec - 1000 format (//, - & 5x, '=============================================',/ - & 5x, '= Nonsymmetric implicit Arnoldi update code =',/ - & 5x, '= Version Number: ', ' 2.4', 21x, ' =',/ - & 5x, '= Version Date: ', ' 07/31/96', 16x, ' =',/ - & 5x, '=============================================',/ - & 5x, '= Summary of timing statistics =',/ - & 5x, '=============================================',//) - 1100 format ( - & 5x, 'Total number update iterations = ', i5,/ - & 5x, 'Total number of OP*x operations = ', i5,/ - & 5x, 'Total number of B*x operations = ', i5,/ - & 5x, 'Total number of reorthogonalization steps = ', i5,/ - & 5x, 'Total number of iterative refinement steps = ', i5,/ - & 5x, 'Total number of restart steps = ', i5,/ - & 5x, 'Total time in user OP*x operation = ', f12.6,/ - & 5x, 'Total time in user B*x operation = ', f12.6,/ - & 5x, 'Total time in Arnoldi update routine = ', f12.6,/ - & 5x, 'Total time in naup2 routine = ', f12.6,/ - & 5x, 'Total time in basic Arnoldi iteration loop = ', f12.6,/ - & 5x, 'Total time in reorthogonalization phase = ', f12.6,/ - & 5x, 'Total time in (re)start vector generation = ', f12.6,/ - & 5x, 'Total time in Hessenberg eig. subproblem = ', f12.6,/ - & 5x, 'Total time in getting the shifts = ', f12.6,/ - & 5x, 'Total time in applying the shifts = ', f12.6,/ - & 5x, 'Total time in convergence testing = ', f12.6,/ - & 5x, 'Total time in computing final Ritz vectors = ', f12.6/) - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of snaupd | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snconv.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snconv.f deleted file mode 100644 index 3094b1512e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/snconv.f +++ /dev/null @@ -1,146 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: snconv -c -c\Description: -c Convergence testing for the nonsymmetric Arnoldi eigenvalue routine. -c -c\Usage: -c call snconv -c ( N, RITZR, RITZI, BOUNDS, TOL, NCONV ) -c -c\Arguments -c N Integer. (INPUT) -c Number of Ritz values to check for convergence. -c -c RITZR, Real arrays of length N. (INPUT) -c RITZI Real and imaginary parts of the Ritz values to be checked -c for convergence. - -c BOUNDS Real array of length N. (INPUT) -c Ritz estimates for the Ritz values in RITZR and RITZI. -c -c TOL Real scalar. (INPUT) -c Desired backward error for a Ritz value to be considered -c "converged". -c -c NCONV Integer scalar. (OUTPUT) -c Number of "converged" Ritz values. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c second ARPACK utility routine for timing. -c slamch LAPACK routine that determines machine constants. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: nconv.F SID: 2.3 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c 1. xxxx -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine snconv (n, ritzr, ritzi, bounds, tol, nconv) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer n, nconv - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% - - Real - & ritzr(n), ritzi(n), bounds(n) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i - Real - & temp, eps23 -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slapy2, slamch - external slapy2, slamch - -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------------------------------------% -c | Convergence test: unlike in the symmetric code, I am not | -c | using things like refined error bounds and gap condition | -c | because I don't know the exact equivalent concept. | -c | | -c | Instead the i-th Ritz value is considered "converged" when: | -c | | -c | bounds(i) .le. ( TOL * | ritz | ) | -c | | -c | for some appropriate choice of norm. | -c %-------------------------------------------------------------% -c - call second (t0) -c -c %---------------------------------% -c | Get machine dependent constant. | -c %---------------------------------% -c - eps23 = slamch('Epsilon-Machine') - eps23 = eps23**(2.0E+0 / 3.0E+0) -c - nconv = 0 - do 20 i = 1, n - temp = max( eps23, slapy2( ritzr(i), ritzi(i) ) ) - if (bounds(i) .le. tol*temp) nconv = nconv + 1 - 20 continue -c - call second (t1) - tnconv = tnconv + (t1 - t0) -c - return -c -c %---------------% -c | End of snconv | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sneigh.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sneigh.f deleted file mode 100644 index 6dd9c90c79..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sneigh.f +++ /dev/null @@ -1,314 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: sneigh -c -c\Description: -c Compute the eigenvalues of the current upper Hessenberg matrix -c and the corresponding Ritz estimates given the current residual norm. -c -c\Usage: -c call sneigh -c ( RNORM, N, H, LDH, RITZR, RITZI, BOUNDS, Q, LDQ, WORKL, IERR ) -c -c\Arguments -c RNORM Real scalar. (INPUT) -c Residual norm corresponding to the current upper Hessenberg -c matrix H. -c -c N Integer. (INPUT) -c Size of the matrix H. -c -c H Real N by N array. (INPUT) -c H contains the current upper Hessenberg matrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZR, Real arrays of length N. (OUTPUT) -c RITZI On output, RITZR(1:N) (resp. RITZI(1:N)) contains the real -c (respectively imaginary) parts of the eigenvalues of H. -c -c BOUNDS Real array of length N. (OUTPUT) -c On output, BOUNDS contains the Ritz estimates associated with -c the eigenvalues RITZR and RITZI. This is equal to RNORM -c times the last components of the eigenvectors corresponding -c to the eigenvalues in RITZR and RITZI. -c -c Q Real N by N array. (WORKSPACE) -c Workspace needed to store the eigenvectors of H. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Real work array of length N**2 + 3*N. (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. This is needed to keep the full Schur form -c of H and also in the calculation of the eigenvectors of H. -c -c IERR Integer. (OUTPUT) -c Error exit flag from slaqrb or strevc. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c slaqrb ARPACK routine to compute the real Schur form of an -c upper Hessenberg matrix and last row of the Schur vectors. -c second ARPACK utility routine for timing. -c smout ARPACK utility routine that prints matrices -c svout ARPACK utility routine that prints vectors. -c slacpy LAPACK matrix copy routine. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c strevc LAPACK routine to compute the eigenvectors of a matrix -c in upper quasi-triangular form -c sgemv Level 2 BLAS routine for matrix vector multiplication. -c scopy Level 1 BLAS that copies one vector to another . -c snrm2 Level 1 BLAS that computes the norm of a vector. -c sscal Level 1 BLAS that scales a vector. -c -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: neigh.F SID: 2.3 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine sneigh (rnorm, n, h, ldh, ritzr, ritzi, bounds, - & q, ldq, workl, ierr) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer ierr, n, ldh, ldq - Real - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & bounds(n), h(ldh,n), q(ldq,n), ritzi(n), ritzr(n), - & workl(n*(n+3)) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - logical select(1) - integer i, iconj, msglvl - Real - & temp, vl(1) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external scopy, slacpy, slaqrb, strevc, svout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slapy2, snrm2 - external slapy2, snrm2 -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mneigh -c - if (msglvl .gt. 2) then - call smout (logfil, n, n, h, ldh, ndigit, - & '_neigh: Entering upper Hessenberg matrix H ') - end if -c -c %-----------------------------------------------------------% -c | 1. Compute the eigenvalues, the last components of the | -c | corresponding Schur vectors and the full Schur form T | -c | of the current upper Hessenberg matrix H. | -c | slaqrb returns the full Schur form of H in WORKL(1:N**2) | -c | and the last components of the Schur vectors in BOUNDS. | -c %-----------------------------------------------------------% -c - call slacpy ('All', n, n, h, ldh, workl, n) - call slaqrb (.true., n, 1, n, workl, n, ritzr, ritzi, bounds, - & ierr) - if (ierr .ne. 0) go to 9000 -c - if (msglvl .gt. 1) then - call svout (logfil, n, bounds, ndigit, - & '_neigh: last row of the Schur matrix for H') - end if -c -c %-----------------------------------------------------------% -c | 2. Compute the eigenvectors of the full Schur form T and | -c | apply the last components of the Schur vectors to get | -c | the last components of the corresponding eigenvectors. | -c | Remember that if the i-th and (i+1)-st eigenvalues are | -c | complex conjugate pairs, then the real & imaginary part | -c | of the eigenvector components are split across adjacent | -c | columns of Q. | -c %-----------------------------------------------------------% -c - call strevc ('R', 'A', select, n, workl, n, vl, n, q, ldq, - & n, n, workl(n*n+1), ierr) -c - if (ierr .ne. 0) go to 9000 -c -c %------------------------------------------------% -c | Scale the returning eigenvectors so that their | -c | euclidean norms are all one. LAPACK subroutine | -c | strevc returns each eigenvector normalized so | -c | that the element of largest magnitude has | -c | magnitude 1; here the magnitude of a complex | -c | number (x,y) is taken to be |x| + |y|. | -c %------------------------------------------------% -c - iconj = 0 - do 10 i=1, n - if ( abs( ritzi(i) ) .le. zero ) then -c -c %----------------------% -c | Real eigenvalue case | -c %----------------------% -c - temp = snrm2( n, q(1,i), 1 ) - call sscal ( n, one / temp, q(1,i), 1 ) - else -c -c %-------------------------------------------% -c | Complex conjugate pair case. Note that | -c | since the real and imaginary part of | -c | the eigenvector are stored in consecutive | -c | columns, we further normalize by the | -c | square root of two. | -c %-------------------------------------------% -c - if (iconj .eq. 0) then - temp = slapy2( snrm2( n, q(1,i), 1 ), - & snrm2( n, q(1,i+1), 1 ) ) - call sscal ( n, one / temp, q(1,i), 1 ) - call sscal ( n, one / temp, q(1,i+1), 1 ) - iconj = 1 - else - iconj = 0 - end if - end if - 10 continue -c - call sgemv ('T', n, n, one, q, ldq, bounds, 1, zero, workl, 1) -c - if (msglvl .gt. 1) then - call svout (logfil, n, workl, ndigit, - & '_neigh: Last row of the eigenvector matrix for H') - end if -c -c %----------------------------% -c | Compute the Ritz estimates | -c %----------------------------% -c - iconj = 0 - do 20 i = 1, n - if ( abs( ritzi(i) ) .le. zero ) then -c -c %----------------------% -c | Real eigenvalue case | -c %----------------------% -c - bounds(i) = rnorm * abs( workl(i) ) - else -c -c %-------------------------------------------% -c | Complex conjugate pair case. Note that | -c | since the real and imaginary part of | -c | the eigenvector are stored in consecutive | -c | columns, we need to take the magnitude | -c | of the last components of the two vectors | -c %-------------------------------------------% -c - if (iconj .eq. 0) then - bounds(i) = rnorm * slapy2( workl(i), workl(i+1) ) - bounds(i+1) = bounds(i) - iconj = 1 - else - iconj = 0 - end if - end if - 20 continue -c - if (msglvl .gt. 2) then - call svout (logfil, n, ritzr, ndigit, - & '_neigh: Real part of the eigenvalues of H') - call svout (logfil, n, ritzi, ndigit, - & '_neigh: Imaginary part of the eigenvalues of H') - call svout (logfil, n, bounds, ndigit, - & '_neigh: Ritz estimates for the eigenvalues of H') - end if -c - call second (t1) - tneigh = tneigh + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of sneigh | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sneupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sneupd.f deleted file mode 100644 index 2ee0e60a92..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sneupd.f +++ /dev/null @@ -1,1067 +0,0 @@ -c\BeginDoc -c -c\Name: sneupd -c -c\Description: -c -c This subroutine returns the converged approximations to eigenvalues -c of A*z = lambda*B*z and (optionally): -c -c (1) The corresponding approximate eigenvectors; -c -c (2) An orthonormal basis for the associated approximate -c invariant subspace; -c -c (3) Both. -c -c There is negligible additional cost to obtain eigenvectors. An orthonormal -c basis is always computed. There is an additional storage cost of n*nev -c if both are requested (in this case a separate array Z must be supplied). -c -c The approximate eigenvalues and eigenvectors of A*z = lambda*B*z -c are derived from approximate eigenvalues and eigenvectors of -c of the linear operator OP prescribed by the MODE selection in the -c call to SNAUPD. SNAUPD must be called before this routine is called. -c These approximate eigenvalues and vectors are commonly called Ritz -c values and Ritz vectors respectively. They are referred to as such -c in the comments that follow. The computed orthonormal basis for the -c invariant subspace corresponding to these Ritz values is referred to as a -c Schur basis. -c -c See documentation in the header of the subroutine SNAUPD for -c definition of OP as well as other terms and the relation of computed -c Ritz values and Ritz vectors of OP with respect to the given problem -c A*z = lambda*B*z. For a brief description, see definitions of -c IPARAM(7), MODE and WHICH in the documentation of SNAUPD. -c -c\Usage: -c call sneupd -c ( RVEC, HOWMNY, SELECT, DR, DI, Z, LDZ, SIGMAR, SIGMAI, WORKEV, BMAT, -c N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, WORKD, WORKL, -c LWORKL, INFO ) -c -c\Arguments: -c RVEC LOGICAL (INPUT) -c Specifies whether a basis for the invariant subspace corresponding -c to the converged Ritz value approximations for the eigenproblem -c A*z = lambda*B*z is computed. -c -c RVEC = .FALSE. Compute Ritz values only. -c -c RVEC = .TRUE. Compute the Ritz vectors or Schur vectors. -c See Remarks below. -c -c HOWMNY Character*1 (INPUT) -c Specifies the form of the basis for the invariant subspace -c corresponding to the converged Ritz values that is to be computed. -c -c = 'A': Compute NEV Ritz vectors; -c = 'P': Compute NEV Schur vectors; -c = 'S': compute some of the Ritz vectors, specified -c by the logical array SELECT. -c -c SELECT Logical array of dimension NCV. (INPUT) -c If HOWMNY = 'S', SELECT specifies the Ritz vectors to be -c computed. To select the Ritz vector corresponding to a -c Ritz value (DR(j), DI(j)), SELECT(j) must be set to .TRUE.. -c If HOWMNY = 'A' or 'P', SELECT is used as internal workspace. -c -c DR Real array of dimension NEV+1. (OUTPUT) -c If IPARAM(7) = 1,2 or 3 and SIGMAI=0.0 then on exit: DR contains -c the real part of the Ritz approximations to the eigenvalues of -c A*z = lambda*B*z. -c If IPARAM(7) = 3, 4 and SIGMAI is not equal to zero, then on exit: -c DR contains the real part of the Ritz values of OP computed by -c SNAUPD. A further computation must be performed by the user -c to transform the Ritz values computed for OP by SNAUPD to those -c of the original system A*z = lambda*B*z. See remark 3 below. -c -c DI Real array of dimension NEV+1. (OUTPUT) -c On exit, DI contains the imaginary part of the Ritz value -c approximations to the eigenvalues of A*z = lambda*B*z associated -c with DR. -c -c NOTE: When Ritz values are complex, they will come in complex -c conjugate pairs. If eigenvectors are requested, the -c corresponding Ritz vectors will also come in conjugate -c pairs and the real and imaginary parts of these are -c represented in two consecutive columns of the array Z -c (see below). -c -c Z Real N by NEV+1 array if RVEC = .TRUE. and HOWMNY = 'A'. (OUTPUT) -c On exit, if RVEC = .TRUE. and HOWMNY = 'A', then the columns of -c Z represent approximate eigenvectors (Ritz vectors) corresponding -c to the NCONV=IPARAM(5) Ritz values for eigensystem -c A*z = lambda*B*z. -c -c The complex Ritz vector associated with the Ritz value -c with positive imaginary part is stored in two consecutive -c columns. The first column holds the real part of the Ritz -c vector and the second column holds the imaginary part. The -c Ritz vector associated with the Ritz value with negative -c imaginary part is simply the complex conjugate of the Ritz vector -c associated with the positive imaginary part. -c -c If RVEC = .FALSE. or HOWMNY = 'P', then Z is not referenced. -c -c NOTE: If if RVEC = .TRUE. and a Schur basis is not required, -c the array Z may be set equal to first NEV+1 columns of the Arnoldi -c basis array V computed by SNAUPD. In this case the Arnoldi basis -c will be destroyed and overwritten with the eigenvector basis. -c -c LDZ Integer. (INPUT) -c The leading dimension of the array Z. If Ritz vectors are -c desired, then LDZ >= max( 1, N ). In any case, LDZ >= 1. -c -c SIGMAR Real (INPUT) -c If IPARAM(7) = 3 or 4, represents the real part of the shift. -c Not referenced if IPARAM(7) = 1 or 2. -c -c SIGMAI Real (INPUT) -c If IPARAM(7) = 3 or 4, represents the imaginary part of the shift. -c Not referenced if IPARAM(7) = 1 or 2. See remark 3 below. -c -c WORKEV Real work array of dimension 3*NCV. (WORKSPACE) -c -c **** The remaining arguments MUST be the same as for the **** -c **** call to SNAUPD that was just completed. **** -c -c NOTE: The remaining arguments -c -c BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, -c WORKD, WORKL, LWORKL, INFO -c -c must be passed directly to SNEUPD following the last call -c to SNAUPD. These arguments MUST NOT BE MODIFIED between -c the the last call to SNAUPD and the call to SNEUPD. -c -c Three of these parameters (V, WORKL, INFO) are also output parameters: -c -c V Real N by NCV array. (INPUT/OUTPUT) -c -c Upon INPUT: the NCV columns of V contain the Arnoldi basis -c vectors for OP as constructed by SNAUPD . -c -c Upon OUTPUT: If RVEC = .TRUE. the first NCONV=IPARAM(5) columns -c contain approximate Schur vectors that span the -c desired invariant subspace. See Remark 2 below. -c -c NOTE: If the array Z has been set equal to first NEV+1 columns -c of the array V and RVEC=.TRUE. and HOWMNY= 'A', then the -c Arnoldi basis held by V has been overwritten by the desired -c Ritz vectors. If a separate array Z has been passed then -c the first NCONV=IPARAM(5) columns of V will contain approximate -c Schur vectors that span the desired invariant subspace. -c -c WORKL Real work array of length LWORKL. (OUTPUT/WORKSPACE) -c WORKL(1:ncv*ncv+3*ncv) contains information obtained in -c snaupd. They are not changed by sneupd. -c WORKL(ncv*ncv+3*ncv+1:3*ncv*ncv+6*ncv) holds the -c real and imaginary part of the untransformed Ritz values, -c the upper quasi-triangular matrix for H, and the -c associated matrix representation of the invariant subspace for H. -c -c Note: IPNTR(9:13) contains the pointer into WORKL for addresses -c of the above information computed by sneupd. -c ------------------------------------------------------------- -c IPNTR(9): pointer to the real part of the NCV RITZ values of the -c original system. -c IPNTR(10): pointer to the imaginary part of the NCV RITZ values of -c the original system. -c IPNTR(11): pointer to the NCV corresponding error bounds. -c IPNTR(12): pointer to the NCV by NCV upper quasi-triangular -c Schur matrix for H. -c IPNTR(13): pointer to the NCV by NCV matrix of eigenvectors -c of the upper Hessenberg matrix H. Only referenced by -c sneupd if RVEC = .TRUE. See Remark 2 below. -c ------------------------------------------------------------- -c -c INFO Integer. (OUTPUT) -c Error flag on output. -c -c = 0: Normal exit. -c -c = 1: The Schur form computed by LAPACK routine slahqr -c could not be reordered by LAPACK routine strsen. -c Re-enter subroutine sneupd with IPARAM(5)=NCV and -c increase the size of the arrays DR and DI to have -c dimension at least dimension NCV and allocate at least NCV -c columns for Z. NOTE: Not necessary if Z and V share -c the same space. Please notify the authors if this error -c occurs. -c -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV-NEV >= 2 and less than or equal to N. -c = -5: WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI' -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work WORKL array is not sufficient. -c = -8: Error return from calculation of a real Schur form. -c Informational error from LAPACK routine slahqr. -c = -9: Error return from calculation of eigenvectors. -c Informational error from LAPACK routine strevc. -c = -10: IPARAM(7) must be 1,2,3,4. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatible. -c = -12: HOWMNY = 'S' not yet implemented -c = -13: HOWMNY must be one of 'A' or 'P' if RVEC = .true. -c = -14: SNAUPD did not find any eigenvalues to sufficient -c accuracy. -c = -15: DNEUPD got a different count of the number of converged -c Ritz values than DNAUPD got. This indicates the user -c probably made an error in passing data from DNAUPD to -c DNEUPD or that the data was modified before entering -c DNEUPD -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett & Y. Saad, "Complex Shift and Invert Strategies for -c Real Matrices", Linear Algebra and its Applications, vol 88/89, -c pp 575-595, (1987). -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c smout ARPACK utility routine that prints matrices -c svout ARPACK utility routine that prints vectors. -c sgeqr2 LAPACK routine that computes the QR factorization of -c a matrix. -c slacpy LAPACK matrix copy routine. -c slahqr LAPACK routine to compute the real Schur form of an -c upper Hessenberg matrix. -c slamch LAPACK routine that determines machine constants. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c slaset LAPACK matrix initialization routine. -c sorm2r LAPACK routine that applies an orthogonal matrix in -c factored form. -c strevc LAPACK routine to compute the eigenvectors of a matrix -c in upper quasi-triangular form. -c strsen LAPACK routine that re-orders the Schur form. -c strmm Level 3 BLAS matrix times an upper triangular matrix. -c sger Level 2 BLAS rank one update to a matrix. -c scopy Level 1 BLAS that copies one vector to another . -c sdot Level 1 BLAS that computes the scalar product of two vectors. -c snrm2 Level 1 BLAS that computes the norm of a vector. -c sscal Level 1 BLAS that scales a vector. -c -c\Remarks -c -c 1. Currently only HOWMNY = 'A' and 'P' are implemented. -c -c Let trans(X) denote the transpose of X. -c -c 2. Schur vectors are an orthogonal representation for the basis of -c Ritz vectors. Thus, their numerical properties are often superior. -c If RVEC = .TRUE. then the relationship -c A * V(:,1:IPARAM(5)) = V(:,1:IPARAM(5)) * T, and -c trans(V(:,1:IPARAM(5))) * V(:,1:IPARAM(5)) = I are approximately -c satisfied. Here T is the leading submatrix of order IPARAM(5) of the -c real upper quasi-triangular matrix stored workl(ipntr(12)). That is, -c T is block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; -c each 2-by-2 diagonal block has its diagonal elements equal and its -c off-diagonal elements of opposite sign. Corresponding to each 2-by-2 -c diagonal block is a complex conjugate pair of Ritz values. The real -c Ritz values are stored on the diagonal of T. -c -c 3. If IPARAM(7) = 3 or 4 and SIGMAI is not equal zero, then the user must -c form the IPARAM(5) Rayleigh quotients in order to transform the Ritz -c values computed by SNAUPD for OP to those of A*z = lambda*B*z. -c Set RVEC = .true. and HOWMNY = 'A', and -c compute -c trans(Z(:,I)) * A * Z(:,I) if DI(I) = 0. -c If DI(I) is not equal to zero and DI(I+1) = - D(I), -c then the desired real and imaginary parts of the Ritz value are -c trans(Z(:,I)) * A * Z(:,I) + trans(Z(:,I+1)) * A * Z(:,I+1), -c trans(Z(:,I)) * A * Z(:,I+1) - trans(Z(:,I+1)) * A * Z(:,I), -c respectively. -c Another possibility is to set RVEC = .true. and HOWMNY = 'P' and -c compute trans(V(:,1:IPARAM(5))) * A * V(:,1:IPARAM(5)) and then an upper -c quasi-triangular matrix of order IPARAM(5) is computed. See remark -c 2 above. -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Chao Yang Houston, Texas -c Dept. of Computational & -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: neupd.F SID: 2.7 DATE OF SID: 09/20/00 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- - subroutine sneupd(rvec , howmny, select, dr , di, - & z , ldz , sigmar, sigmai, workev, - & bmat , n , which , nev , tol, - & resid, ncv , v , ldv , iparam, - & ipntr, workd , workl , lworkl, info) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat, howmny, which*2 - logical rvec - integer info, ldz, ldv, lworkl, n, ncv, nev - Real - & sigmar, sigmai, tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(14) - logical select(ncv) - Real - & dr(nev+1) , di(nev+1), resid(n) , - & v(ldv,ncv) , z(ldz,*) , workd(3*n), - & workl(lworkl), workev(3*ncv) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0 , zero = 0.0E+0 ) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character type*6 - integer bounds, ierr , ih , ihbds , - & iheigr, iheigi, iconj , nconv , - & invsub, iuptri, iwev , iwork(1), - & j , k , ldh , ldq , - & mode , msglvl, outncv, ritzr , - & ritzi , wri , wrr , irr , - & iri , ibd , ishift, numcnv , - & np , jj , nconv2 - logical reord - Real - & conds , rnorm, sep , temp, - & vl(1,1), temp1, eps23 -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external scopy , sger , sgeqr2, slacpy, - & slahqr, slaset, smout , sorm2r, - & strevc, strmm , strsen, sscal , - & svout , ivout -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slapy2, snrm2, slamch, sdot - external slapy2, snrm2, slamch, sdot -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs, min, sqrt -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - msglvl = mneupd - mode = iparam(7) - nconv = iparam(5) - info = 0 -c -c %---------------------------------% -c | Get machine dependent constant. | -c %---------------------------------% -c - eps23 = slamch('Epsilon-Machine') - eps23 = eps23**(2.0E+0 / 3.0E+0 ) -c -c %--------------% -c | Quick return | -c %--------------% -c - ierr = 0 -c - if (nconv .le. 0) then - ierr = -14 - else if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev+1 .or. ncv .gt. n) then - ierr = -3 - else if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LR' .and. - & which .ne. 'SR' .and. - & which .ne. 'LI' .and. - & which .ne. 'SI') then - ierr = -5 - else if (bmat .ne. 'I' .and. bmat .ne. 'G') then - ierr = -6 - else if (lworkl .lt. 3*ncv**2 + 6*ncv) then - ierr = -7 - else if ( (howmny .ne. 'A' .and. - & howmny .ne. 'P' .and. - & howmny .ne. 'S') .and. rvec ) then - ierr = -13 - else if (howmny .eq. 'S' ) then - ierr = -12 - end if -c - if (mode .eq. 1 .or. mode .eq. 2) then - type = 'REGULR' - else if (mode .eq. 3 .and. sigmai .eq. zero) then - type = 'SHIFTI' - else if (mode .eq. 3 ) then - type = 'REALPT' - else if (mode .eq. 4 ) then - type = 'IMAGPT' - else - ierr = -10 - end if - if (mode .eq. 1 .and. bmat .eq. 'G') ierr = -11 -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:ncv*ncv) := generated Hessenberg matrix | -c | workl(ncv*ncv+1:ncv*ncv+2*ncv) := real and imaginary | -c | parts of ritz values | -c | workl(ncv*ncv+2*ncv+1:ncv*ncv+3*ncv) := error bounds | -c %--------------------------------------------------------% -c -c %-----------------------------------------------------------% -c | The following is used and set by SNEUPD. | -c | workl(ncv*ncv+3*ncv+1:ncv*ncv+4*ncv) := The untransformed | -c | real part of the Ritz values. | -c | workl(ncv*ncv+4*ncv+1:ncv*ncv+5*ncv) := The untransformed | -c | imaginary part of the Ritz values. | -c | workl(ncv*ncv+5*ncv+1:ncv*ncv+6*ncv) := The untransformed | -c | error bounds of the Ritz values | -c | workl(ncv*ncv+6*ncv+1:2*ncv*ncv+6*ncv) := Holds the upper | -c | quasi-triangular matrix for H | -c | workl(2*ncv*ncv+6*ncv+1: 3*ncv*ncv+6*ncv) := Holds the | -c | associated matrix representation of the invariant | -c | subspace for H. | -c | GRAND total of NCV * ( 3 * NCV + 6 ) locations. | -c %-----------------------------------------------------------% -c - ih = ipntr(5) - ritzr = ipntr(6) - ritzi = ipntr(7) - bounds = ipntr(8) - ldh = ncv - ldq = ncv - iheigr = bounds + ldh - iheigi = iheigr + ldh - ihbds = iheigi + ldh - iuptri = ihbds + ldh - invsub = iuptri + ldh*ncv - ipntr(9) = iheigr - ipntr(10) = iheigi - ipntr(11) = ihbds - ipntr(12) = iuptri - ipntr(13) = invsub - wrr = 1 - wri = ncv + 1 - iwev = wri + ncv -c -c %-----------------------------------------% -c | irr points to the REAL part of the Ritz | -c | values computed by _neigh before | -c | exiting _naup2. | -c | iri points to the IMAGINARY part of the | -c | Ritz values computed by _neigh | -c | before exiting _naup2. | -c | ibd points to the Ritz estimates | -c | computed by _neigh before exiting | -c | _naup2. | -c %-----------------------------------------% -c - irr = ipntr(14)+ncv*ncv - iri = irr+ncv - ibd = iri+ncv -c -c %------------------------------------% -c | RNORM is B-norm of the RESID(1:N). | -c %------------------------------------% -c - rnorm = workl(ih+2) - workl(ih+2) = zero -c - if (msglvl .gt. 2) then - call svout(logfil, ncv, workl(irr), ndigit, - & '_neupd: Real part of Ritz values passed in from _NAUPD.') - call svout(logfil, ncv, workl(iri), ndigit, - & '_neupd: Imag part of Ritz values passed in from _NAUPD.') - call svout(logfil, ncv, workl(ibd), ndigit, - & '_neupd: Ritz estimates passed in from _NAUPD.') - end if -c - if (rvec) then -c - reord = .false. -c -c %---------------------------------------------------% -c | Use the temporary bounds array to store indices | -c | These will be used to mark the select array later | -c %---------------------------------------------------% -c - do 10 j = 1,ncv - workl(bounds+j-1) = j - select(j) = .false. - 10 continue -c -c %-------------------------------------% -c | Select the wanted Ritz values. | -c | Sort the Ritz values so that the | -c | wanted ones appear at the tailing | -c | NEV positions of workl(irr) and | -c | workl(iri). Move the corresponding | -c | error estimates in workl(bound) | -c | accordingly. | -c %-------------------------------------% -c - np = ncv - nev - ishift = 0 - call sngets(ishift , which , nev , - & np , workl(irr), workl(iri), - & workl(bounds), workl , workl(np+1)) -c - if (msglvl .gt. 2) then - call svout(logfil, ncv, workl(irr), ndigit, - & '_neupd: Real part of Ritz values after calling _NGETS.') - call svout(logfil, ncv, workl(iri), ndigit, - & '_neupd: Imag part of Ritz values after calling _NGETS.') - call svout(logfil, ncv, workl(bounds), ndigit, - & '_neupd: Ritz value indices after calling _NGETS.') - end if -c -c %-----------------------------------------------------% -c | Record indices of the converged wanted Ritz values | -c | Mark the select array for possible reordering | -c %-----------------------------------------------------% -c - numcnv = 0 - do 11 j = 1,ncv - temp1 = max(eps23, - & slapy2( workl(irr+ncv-j), workl(iri+ncv-j) )) - jj = workl(bounds + ncv - j) - if (numcnv .lt. nconv .and. - & workl(ibd+jj-1) .le. tol*temp1) then - select(jj) = .true. - numcnv = numcnv + 1 - if (jj .gt. nev) reord = .true. - endif - 11 continue -c -c %-----------------------------------------------------------% -c | Check the count (numcnv) of converged Ritz values with | -c | the number (nconv) reported by dnaupd. If these two | -c | are different then there has probably been an error | -c | caused by incorrect passing of the dnaupd data. | -c %-----------------------------------------------------------% -c - if (msglvl .gt. 2) then - call ivout(logfil, 1, numcnv, ndigit, - & '_neupd: Number of specified eigenvalues') - call ivout(logfil, 1, nconv, ndigit, - & '_neupd: Number of "converged" eigenvalues') - end if -c - if (numcnv .ne. nconv) then - info = -15 - go to 9000 - end if -c -c %-----------------------------------------------------------% -c | Call LAPACK routine slahqr to compute the real Schur form | -c | of the upper Hessenberg matrix returned by SNAUPD. | -c | Make a copy of the upper Hessenberg matrix. | -c | Initialize the Schur vector matrix Q to the identity. | -c %-----------------------------------------------------------% -c - call scopy(ldh*ncv, workl(ih), 1, workl(iuptri), 1) - call slaset('All', ncv, ncv, - & zero , one, workl(invsub), - & ldq) - call slahqr(.true., .true. , ncv, - & 1 , ncv , workl(iuptri), - & ldh , workl(iheigr), workl(iheigi), - & 1 , ncv , workl(invsub), - & ldq , ierr) - call scopy(ncv , workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) -c - if (ierr .ne. 0) then - info = -8 - go to 9000 - end if -c - if (msglvl .gt. 1) then - call svout(logfil, ncv, workl(iheigr), ndigit, - & '_neupd: Real part of the eigenvalues of H') - call svout(logfil, ncv, workl(iheigi), ndigit, - & '_neupd: Imaginary part of the Eigenvalues of H') - call svout(logfil, ncv, workl(ihbds), ndigit, - & '_neupd: Last row of the Schur vector matrix') - if (msglvl .gt. 3) then - call smout(logfil , ncv, ncv , - & workl(iuptri), ldh, ndigit, - & '_neupd: The upper quasi-triangular matrix ') - end if - end if -c - if (reord) then -c -c %-----------------------------------------------------% -c | Reorder the computed upper quasi-triangular matrix. | -c %-----------------------------------------------------% -c - call strsen('None' , 'V' , - & select , ncv , - & workl(iuptri), ldh , - & workl(invsub), ldq , - & workl(iheigr), workl(iheigi), - & nconv2 , conds , - & sep , workl(ihbds) , - & ncv , iwork , - & 1 , ierr) -c - if (nconv2 .lt. nconv) then - nconv = nconv2 - end if - - if (ierr .eq. 1) then - info = 1 - go to 9000 - end if -c - if (msglvl .gt. 2) then - call svout(logfil, ncv, workl(iheigr), ndigit, - & '_neupd: Real part of the eigenvalues of H--reordered') - call svout(logfil, ncv, workl(iheigi), ndigit, - & '_neupd: Imag part of the eigenvalues of H--reordered') - if (msglvl .gt. 3) then - call smout(logfil , ncv, ncv , - & workl(iuptri), ldq, ndigit, - & '_neupd: Quasi-triangular matrix after re-ordering') - end if - end if -c - end if -c -c %---------------------------------------% -c | Copy the last row of the Schur vector | -c | into workl(ihbds). This will be used | -c | to compute the Ritz estimates of | -c | converged Ritz values. | -c %---------------------------------------% -c - call scopy(ncv, workl(invsub+ncv-1), ldq, workl(ihbds), 1) -c -c %----------------------------------------------------% -c | Place the computed eigenvalues of H into DR and DI | -c | if a spectral transformation was not used. | -c %----------------------------------------------------% -c - if (type .eq. 'REGULR') then - call scopy(nconv, workl(iheigr), 1, dr, 1) - call scopy(nconv, workl(iheigi), 1, di, 1) - end if -c -c %----------------------------------------------------------% -c | Compute the QR factorization of the matrix representing | -c | the wanted invariant subspace located in the first NCONV | -c | columns of workl(invsub,ldq). | -c %----------------------------------------------------------% -c - call sgeqr2(ncv, nconv , workl(invsub), - & ldq, workev, workev(ncv+1), - & ierr) -c -c %---------------------------------------------------------% -c | * Postmultiply V by Q using sorm2r. | -c | * Copy the first NCONV columns of VQ into Z. | -c | * Postmultiply Z by R. | -c | The N by NCONV matrix Z is now a matrix representation | -c | of the approximate invariant subspace associated with | -c | the Ritz values in workl(iheigr) and workl(iheigi) | -c | The first NCONV columns of V are now approximate Schur | -c | vectors associated with the real upper quasi-triangular | -c | matrix of order NCONV in workl(iuptri) | -c %---------------------------------------------------------% -c - call sorm2r('Right', 'Notranspose', n , - & ncv , nconv , workl(invsub), - & ldq , workev , v , - & ldv , workd(n+1) , ierr) - call slacpy('All', n, nconv, v, ldv, z, ldz) -c - do 20 j=1, nconv -c -c %---------------------------------------------------% -c | Perform both a column and row scaling if the | -c | diagonal element of workl(invsub,ldq) is negative | -c | I'm lazy and don't take advantage of the upper | -c | quasi-triangular form of workl(iuptri,ldq) | -c | Note that since Q is orthogonal, R is a diagonal | -c | matrix consisting of plus or minus ones | -c %---------------------------------------------------% -c - if (workl(invsub+(j-1)*ldq+j-1) .lt. zero) then - call sscal(nconv, -one, workl(iuptri+j-1), ldq) - call sscal(nconv, -one, workl(iuptri+(j-1)*ldq), 1) - end if -c - 20 continue -c - if (howmny .eq. 'A') then -c -c %--------------------------------------------% -c | Compute the NCONV wanted eigenvectors of T | -c | located in workl(iuptri,ldq). | -c %--------------------------------------------% -c - do 30 j=1, ncv - if (j .le. nconv) then - select(j) = .true. - else - select(j) = .false. - end if - 30 continue -c - call strevc('Right', 'Select' , select , - & ncv , workl(iuptri), ldq , - & vl , 1 , workl(invsub), - & ldq , ncv , outncv , - & workev , ierr) -c - if (ierr .ne. 0) then - info = -9 - go to 9000 - end if -c -c %------------------------------------------------% -c | Scale the returning eigenvectors so that their | -c | Euclidean norms are all one. LAPACK subroutine | -c | strevc returns each eigenvector normalized so | -c | that the element of largest magnitude has | -c | magnitude 1; | -c %------------------------------------------------% -c - iconj = 0 - do 40 j=1, nconv -c - if ( workl(iheigi+j-1) .eq. zero ) then -c -c %----------------------% -c | real eigenvalue case | -c %----------------------% -c - temp = snrm2( ncv, workl(invsub+(j-1)*ldq), 1 ) - call sscal( ncv, one / temp, - & workl(invsub+(j-1)*ldq), 1 ) -c - else -c -c %-------------------------------------------% -c | Complex conjugate pair case. Note that | -c | since the real and imaginary part of | -c | the eigenvector are stored in consecutive | -c | columns, we further normalize by the | -c | square root of two. | -c %-------------------------------------------% -c - if (iconj .eq. 0) then - temp = slapy2(snrm2(ncv, - & workl(invsub+(j-1)*ldq), - & 1), - & snrm2(ncv, - & workl(invsub+j*ldq), - & 1)) - call sscal(ncv, one/temp, - & workl(invsub+(j-1)*ldq), 1 ) - call sscal(ncv, one/temp, - & workl(invsub+j*ldq), 1 ) - iconj = 1 - else - iconj = 0 - end if -c - end if -c - 40 continue -c - call sgemv('T', ncv, nconv, one, workl(invsub), - & ldq, workl(ihbds), 1, zero, workev, 1) -c - iconj = 0 - do 45 j=1, nconv - if (workl(iheigi+j-1) .ne. zero) then -c -c %-------------------------------------------% -c | Complex conjugate pair case. Note that | -c | since the real and imaginary part of | -c | the eigenvector are stored in consecutive | -c %-------------------------------------------% -c - if (iconj .eq. 0) then - workev(j) = slapy2(workev(j), workev(j+1)) - workev(j+1) = workev(j) - iconj = 1 - else - iconj = 0 - end if - end if - 45 continue -c - if (msglvl .gt. 2) then - call scopy(ncv, workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) - call svout(logfil, ncv, workl(ihbds), ndigit, - & '_neupd: Last row of the eigenvector matrix for T') - if (msglvl .gt. 3) then - call smout(logfil, ncv, ncv, workl(invsub), ldq, - & ndigit, '_neupd: The eigenvector matrix for T') - end if - end if -c -c %---------------------------------------% -c | Copy Ritz estimates into workl(ihbds) | -c %---------------------------------------% -c - call scopy(nconv, workev, 1, workl(ihbds), 1) -c -c %---------------------------------------------------------% -c | Compute the QR factorization of the eigenvector matrix | -c | associated with leading portion of T in the first NCONV | -c | columns of workl(invsub,ldq). | -c %---------------------------------------------------------% -c - call sgeqr2(ncv, nconv , workl(invsub), - & ldq, workev, workev(ncv+1), - & ierr) -c -c %----------------------------------------------% -c | * Postmultiply Z by Q. | -c | * Postmultiply Z by R. | -c | The N by NCONV matrix Z is now contains the | -c | Ritz vectors associated with the Ritz values | -c | in workl(iheigr) and workl(iheigi). | -c %----------------------------------------------% -c - call sorm2r('Right', 'Notranspose', n , - & ncv , nconv , workl(invsub), - & ldq , workev , z , - & ldz , workd(n+1) , ierr) -c - call strmm('Right' , 'Upper' , 'No transpose', - & 'Non-unit', n , nconv , - & one , workl(invsub), ldq , - & z , ldz) -c - end if -c - else -c -c %------------------------------------------------------% -c | An approximate invariant subspace is not needed. | -c | Place the Ritz values computed SNAUPD into DR and DI | -c %------------------------------------------------------% -c - call scopy(nconv, workl(ritzr), 1, dr, 1) - call scopy(nconv, workl(ritzi), 1, di, 1) - call scopy(nconv, workl(ritzr), 1, workl(iheigr), 1) - call scopy(nconv, workl(ritzi), 1, workl(iheigi), 1) - call scopy(nconv, workl(bounds), 1, workl(ihbds), 1) - end if -c -c %------------------------------------------------% -c | Transform the Ritz values and possibly vectors | -c | and corresponding error bounds of OP to those | -c | of A*x = lambda*B*x. | -c %------------------------------------------------% -c - if (type .eq. 'REGULR') then -c - if (rvec) - & call sscal(ncv, rnorm, workl(ihbds), 1) -c - else -c -c %---------------------------------------% -c | A spectral transformation was used. | -c | * Determine the Ritz estimates of the | -c | Ritz values in the original system. | -c %---------------------------------------% -c - if (type .eq. 'SHIFTI') then -c - if (rvec) - & call sscal(ncv, rnorm, workl(ihbds), 1) -c - do 50 k=1, ncv - temp = slapy2( workl(iheigr+k-1), - & workl(iheigi+k-1) ) - workl(ihbds+k-1) = abs( workl(ihbds+k-1) ) - & / temp / temp - 50 continue -c - else if (type .eq. 'REALPT') then -c - do 60 k=1, ncv - 60 continue -c - else if (type .eq. 'IMAGPT') then -c - do 70 k=1, ncv - 70 continue -c - end if -c -c %-----------------------------------------------------------% -c | * Transform the Ritz values back to the original system. | -c | For TYPE = 'SHIFTI' the transformation is | -c | lambda = 1/theta + sigma | -c | For TYPE = 'REALPT' or 'IMAGPT' the user must from | -c | Rayleigh quotients or a projection. See remark 3 above.| -c | NOTES: | -c | *The Ritz vectors are not affected by the transformation. | -c %-----------------------------------------------------------% -c - if (type .eq. 'SHIFTI') then -c - do 80 k=1, ncv - temp = slapy2( workl(iheigr+k-1), - & workl(iheigi+k-1) ) - workl(iheigr+k-1) = workl(iheigr+k-1)/temp/temp - & + sigmar - workl(iheigi+k-1) = -workl(iheigi+k-1)/temp/temp - & + sigmai - 80 continue -c - call scopy(nconv, workl(iheigr), 1, dr, 1) - call scopy(nconv, workl(iheigi), 1, di, 1) -c - else if (type .eq. 'REALPT' .or. type .eq. 'IMAGPT') then -c - call scopy(nconv, workl(iheigr), 1, dr, 1) - call scopy(nconv, workl(iheigi), 1, di, 1) -c - end if -c - end if -c - if (type .eq. 'SHIFTI' .and. msglvl .gt. 1) then - call svout(logfil, nconv, dr, ndigit, - & '_neupd: Untransformed real part of the Ritz valuess.') - call svout (logfil, nconv, di, ndigit, - & '_neupd: Untransformed imag part of the Ritz valuess.') - call svout(logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Ritz estimates of untransformed Ritz values.') - else if (type .eq. 'REGULR' .and. msglvl .gt. 1) then - call svout(logfil, nconv, dr, ndigit, - & '_neupd: Real parts of converged Ritz values.') - call svout (logfil, nconv, di, ndigit, - & '_neupd: Imag parts of converged Ritz values.') - call svout(logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Associated Ritz estimates.') - end if -c -c %-------------------------------------------------% -c | Eigenvector Purification step. Formally perform | -c | one of inverse subspace iteration. Only used | -c | for MODE = 2. | -c %-------------------------------------------------% -c - if (rvec .and. howmny .eq. 'A' .and. type .eq. 'SHIFTI') then -c -c %------------------------------------------------% -c | Purify the computed Ritz vectors by adding a | -c | little bit of the residual vector: | -c | T | -c | resid(:)*( e s ) / theta | -c | NCV | -c | where H s = s theta. Remember that when theta | -c | has nonzero imaginary part, the corresponding | -c | Ritz vector is stored across two columns of Z. | -c %------------------------------------------------% -c - iconj = 0 - do 110 j=1, nconv - if (workl(iheigi+j-1) .eq. zero) then - workev(j) = workl(invsub+(j-1)*ldq+ncv-1) / - & workl(iheigr+j-1) - else if (iconj .eq. 0) then - temp = slapy2( workl(iheigr+j-1), workl(iheigi+j-1) ) - workev(j) = ( workl(invsub+(j-1)*ldq+ncv-1) * - & workl(iheigr+j-1) + - & workl(invsub+j*ldq+ncv-1) * - & workl(iheigi+j-1) ) / temp / temp - workev(j+1) = ( workl(invsub+j*ldq+ncv-1) * - & workl(iheigr+j-1) - - & workl(invsub+(j-1)*ldq+ncv-1) * - & workl(iheigi+j-1) ) / temp / temp - iconj = 1 - else - iconj = 0 - end if - 110 continue -c -c %---------------------------------------% -c | Perform a rank one update to Z and | -c | purify all the Ritz vectors together. | -c %---------------------------------------% -c - call sger(n, nconv, one, resid, 1, workev, 1, z, ldz) -c - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of SNEUPD | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sngets.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sngets.f deleted file mode 100644 index b4d7c155e0..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sngets.f +++ /dev/null @@ -1,231 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: sngets -c -c\Description: -c Given the eigenvalues of the upper Hessenberg matrix H, -c computes the NP shifts AMU that are zeros of the polynomial of -c degree NP which filters out components of the unwanted eigenvectors -c corresponding to the AMU's based on some given criteria. -c -c NOTE: call this even in the case of user specified shifts in order -c to sort the eigenvalues, and error bounds of H for later use. -c -c\Usage: -c call sngets -c ( ISHIFT, WHICH, KEV, NP, RITZR, RITZI, BOUNDS, SHIFTR, SHIFTI ) -c -c\Arguments -c ISHIFT Integer. (INPUT) -c Method for selecting the implicit shifts at each iteration. -c ISHIFT = 0: user specified shifts -c ISHIFT = 1: exact shift with respect to the matrix H. -c -c WHICH Character*2. (INPUT) -c Shift selection criteria. -c 'LM' -> want the KEV eigenvalues of largest magnitude. -c 'SM' -> want the KEV eigenvalues of smallest magnitude. -c 'LR' -> want the KEV eigenvalues of largest real part. -c 'SR' -> want the KEV eigenvalues of smallest real part. -c 'LI' -> want the KEV eigenvalues of largest imaginary part. -c 'SI' -> want the KEV eigenvalues of smallest imaginary part. -c -c KEV Integer. (INPUT/OUTPUT) -c INPUT: KEV+NP is the size of the matrix H. -c OUTPUT: Possibly increases KEV by one to keep complex conjugate -c pairs together. -c -c NP Integer. (INPUT/OUTPUT) -c Number of implicit shifts to be computed. -c OUTPUT: Possibly decreases NP by one to keep complex conjugate -c pairs together. -c -c RITZR, Real array of length KEV+NP. (INPUT/OUTPUT) -c RITZI On INPUT, RITZR and RITZI contain the real and imaginary -c parts of the eigenvalues of H. -c On OUTPUT, RITZR and RITZI are sorted so that the unwanted -c eigenvalues are in the first NP locations and the wanted -c portion is in the last KEV locations. When exact shifts are -c selected, the unwanted part corresponds to the shifts to -c be applied. Also, if ISHIFT .eq. 1, the unwanted eigenvalues -c are further sorted so that the ones with largest Ritz values -c are first. -c -c BOUNDS Real array of length KEV+NP. (INPUT/OUTPUT) -c Error bounds corresponding to the ordering in RITZ. -c -c SHIFTR, SHIFTI *** USE deprecated as of version 2.1. *** -c -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c ssortc ARPACK sorting routine. -c scopy Level 1 BLAS that copies one vector to another . -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: ngets.F SID: 2.3 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c 1. xxxx -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine sngets ( ishift, which, kev, np, ritzr, ritzi, bounds, - & shiftr, shifti ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - integer ishift, kev, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & bounds(kev+np), ritzr(kev+np), ritzi(kev+np), - & shiftr(1), shifti(1) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0, zero = 0.0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer msglvl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external scopy, ssortc, second -c -c %----------------------% -c | Intrinsics Functions | -c %----------------------% -c - intrinsic abs -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mngets -c -c %----------------------------------------------------% -c | LM, SM, LR, SR, LI, SI case. | -c | Sort the eigenvalues of H into the desired order | -c | and apply the resulting order to BOUNDS. | -c | The eigenvalues are sorted so that the wanted part | -c | are always in the last KEV locations. | -c | We first do a pre-processing sort in order to keep | -c | complex conjugate pairs together | -c %----------------------------------------------------% -c - if (which .eq. 'LM') then - call ssortc ('LR', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'SM') then - call ssortc ('SR', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'LR') then - call ssortc ('LM', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'SR') then - call ssortc ('SM', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'LI') then - call ssortc ('LM', .true., kev+np, ritzr, ritzi, bounds) - else if (which .eq. 'SI') then - call ssortc ('SM', .true., kev+np, ritzr, ritzi, bounds) - end if -c - call ssortc (which, .true., kev+np, ritzr, ritzi, bounds) -c -c %-------------------------------------------------------% -c | Increase KEV by one if the ( ritzr(np),ritzi(np) ) | -c | = ( ritzr(np+1),-ritzi(np+1) ) and ritz(np) .ne. zero | -c | Accordingly decrease NP by one. In other words keep | -c | complex conjugate pairs together. | -c %-------------------------------------------------------% -c - if ( ( ritzr(np+1) - ritzr(np) ) .eq. zero - & .and. ( ritzi(np+1) + ritzi(np) ) .eq. zero ) then - np = np - 1 - kev = kev + 1 - end if -c - if ( ishift .eq. 1 ) then -c -c %-------------------------------------------------------% -c | Sort the unwanted Ritz values used as shifts so that | -c | the ones with largest Ritz estimates are first | -c | This will tend to minimize the effects of the | -c | forward instability of the iteration when they shifts | -c | are applied in subroutine snapps. | -c | Be careful and use 'SR' since we want to sort BOUNDS! | -c %-------------------------------------------------------% -c - call ssortc ( 'SR', .true., np, bounds, ritzr, ritzi ) - end if -c - call second (t1) - tngets = tngets + (t1 - t0) -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, kev, ndigit, '_ngets: KEV is') - call ivout (logfil, 1, np, ndigit, '_ngets: NP is') - call svout (logfil, kev+np, ritzr, ndigit, - & '_ngets: Eigenvalues of current H matrix -- real part') - call svout (logfil, kev+np, ritzi, ndigit, - & '_ngets: Eigenvalues of current H matrix -- imag part') - call svout (logfil, kev+np, bounds, ndigit, - & '_ngets: Ritz estimates of the current KEV+NP Ritz values') - end if -c - return -c -c %---------------% -c | End of sngets | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaitr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaitr.f deleted file mode 100644 index a1c810e9f4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaitr.f +++ /dev/null @@ -1,853 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssaitr -c -c\Description: -c Reverse communication interface for applying NP additional steps to -c a K step symmetric Arnoldi factorization. -c -c Input: OP*V_{k} - V_{k}*H = r_{k}*e_{k}^T -c -c with (V_{k}^T)*B*V_{k} = I, (V_{k}^T)*B*r_{k} = 0. -c -c Output: OP*V_{k+p} - V_{k+p}*H = r_{k+p}*e_{k+p}^T -c -c with (V_{k+p}^T)*B*V_{k+p} = I, (V_{k+p}^T)*B*r_{k+p} = 0. -c -c where OP and B are as in ssaupd. The B-norm of r_{k+p} is also -c computed and returned. -c -c\Usage: -c call ssaitr -c ( IDO, BMAT, N, K, NP, MODE, RESID, RNORM, V, LDV, H, LDH, -c IPNTR, WORKD, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c This is for the restart phase to force the new -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y, -c IPNTR(3) is the pointer into WORK for B * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c When the routine is used in the "shift-and-invert" mode, the -c vector B * Q is already available and does not need to be -c recomputed in forming OP * Q. -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of matrix B that defines the -c semi-inner product for the operator OP. See ssaupd. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*M*x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c K Integer. (INPUT) -c Current order of H and the number of columns of V. -c -c NP Integer. (INPUT) -c Number of additional Arnoldi steps to take. -c -c MODE Integer. (INPUT) -c Signifies which form for "OP". If MODE=2 then -c a reduction in the number of B matrix vector multiplies -c is possible since the B-norm of OP*x is equivalent to -c the inv(B)-norm of A*x. -c -c RESID Real array of length N. (INPUT/OUTPUT) -c On INPUT: RESID contains the residual vector r_{k}. -c On OUTPUT: RESID contains the residual vector r_{k+p}. -c -c RNORM Real scalar. (INPUT/OUTPUT) -c On INPUT the B-norm of r_{k}. -c On OUTPUT the B-norm of the updated residual r_{k+p}. -c -c V Real N by K+NP array. (INPUT/OUTPUT) -c On INPUT: V contains the Arnoldi vectors in the first K -c columns. -c On OUTPUT: V contains the new NP Arnoldi vectors in the next -c NP columns. The first K columns are unchanged. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Real (K+NP) by 2 array. (INPUT/OUTPUT) -c H is used to store the generated symmetric tridiagonal matrix -c with the subdiagonal in the first column starting at H(2,1) -c and the main diagonal in the second column. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORK for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Real work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The calling program should not -c use WORKD as temporary workspace during the iteration !!!!!! -c On INPUT, WORKD(1:N) = B*RESID where RESID is associated -c with the K step Arnoldi factorization. Used to save some -c computation at the first step. -c On OUTPUT, WORKD(1:N) = B*RESID where RESID is associated -c with the K+NP step Arnoldi factorization. -c -c INFO Integer. (OUTPUT) -c = 0: Normal exit. -c > 0: Size of an invariant subspace of OP is found that is -c less than K + NP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c sgetv0 ARPACK routine to generate the initial vector. -c ivout ARPACK utility routine that prints integers. -c smout ARPACK utility routine that prints matrices. -c svout ARPACK utility routine that prints vectors. -c slamch LAPACK routine that determines machine constants. -c slascl LAPACK routine for careful scaling of a matrix. -c sgemv Level 2 BLAS routine for matrix vector multiplication. -c saxpy Level 1 BLAS that computes a vector triad. -c sscal Level 1 BLAS that scales a vector. -c scopy Level 1 BLAS that copies one vector to another . -c sdot Level 1 BLAS that computes the scalar product of two vectors. -c snrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/93: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: saitr.F SID: 2.6 DATE OF SID: 8/28/96 RELEASE: 2 -c -c\Remarks -c The algorithm implemented is: -c -c restart = .false. -c Given V_{k} = [v_{1}, ..., v_{k}], r_{k}; -c r_{k} contains the initial residual vector even for k = 0; -c Also assume that rnorm = || B*r_{k} || and B*r_{k} are already -c computed by the calling program. -c -c betaj = rnorm ; p_{k+1} = B*r_{k} ; -c For j = k+1, ..., k+np Do -c 1) if ( betaj < tol ) stop or restart depending on j. -c if ( restart ) generate a new starting vector. -c 2) v_{j} = r(j-1)/betaj; V_{j} = [V_{j-1}, v_{j}]; -c p_{j} = p_{j}/betaj -c 3) r_{j} = OP*v_{j} where OP is defined as in ssaupd -c For shift-invert mode p_{j} = B*v_{j} is already available. -c wnorm = || OP*v_{j} || -c 4) Compute the j-th step residual vector. -c w_{j} = V_{j}^T * B * OP * v_{j} -c r_{j} = OP*v_{j} - V_{j} * w_{j} -c alphaj <- j-th component of w_{j} -c rnorm = || r_{j} || -c betaj+1 = rnorm -c If (rnorm > 0.717*wnorm) accept step and go back to 1) -c 5) Re-orthogonalization step: -c s = V_{j}'*B*r_{j} -c r_{j} = r_{j} - V_{j}*s; rnorm1 = || r_{j} || -c alphaj = alphaj + s_{j}; -c 6) Iterative refinement step: -c If (rnorm1 > 0.717*rnorm) then -c rnorm = rnorm1 -c accept step and go back to 1) -c Else -c rnorm = rnorm1 -c If this is the first time in step 6), go to 5) -c Else r_{j} lies in the span of V_{j} numerically. -c Set r_{j} = 0 and rnorm = 0; go to 1) -c EndIf -c End Do -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssaitr - & (ido, bmat, n, k, np, mode, resid, rnorm, v, ldv, h, ldh, - & ipntr, workd, info) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - integer ido, info, k, ldh, ldv, n, mode, np - Real - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Real - & h(ldh,2), resid(n), v(ldv,k+np), workd(3*n) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - logical first, orth1, orth2, rstart, step3, step4 - integer i, ierr, ipj, irj, ivj, iter, itry, j, msglvl, - & infol, jj - Real - & rnorm1, wnorm, safmin, temp1 - save orth1, orth2, rstart, step3, step4, - & ierr, ipj, irj, ivj, iter, itry, j, msglvl, - & rnorm1, safmin, wnorm -c -c %-----------------------% -c | Local Array Arguments | -c %-----------------------% -c - Real - & xtemp(2) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external saxpy, scopy, sscal, sgemv, sgetv0, svout, smout, - & slascl, ivout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & sdot, snrm2, slamch - external sdot, snrm2, slamch -c -c %-----------------% -c | Data statements | -c %-----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then - first = .false. -c -c %--------------------------------% -c | safmin = safe minimum is such | -c | that 1/sfmin does not overflow | -c %--------------------------------% -c - safmin = slamch('safmin') - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = msaitr -c -c %------------------------------% -c | Initial call to this routine | -c %------------------------------% -c - info = 0 - step3 = .false. - step4 = .false. - rstart = .false. - orth1 = .false. - orth2 = .false. -c -c %--------------------------------% -c | Pointer to the current step of | -c | the factorization to build | -c %--------------------------------% -c - j = k + 1 -c -c %------------------------------------------% -c | Pointers used for reverse communication | -c | when using WORKD. | -c %------------------------------------------% -c - ipj = 1 - irj = ipj + n - ivj = irj + n - end if -c -c %-------------------------------------------------% -c | When in reverse communication mode one of: | -c | STEP3, STEP4, ORTH1, ORTH2, RSTART | -c | will be .true. | -c | STEP3: return from computing OP*v_{j}. | -c | STEP4: return from computing B-norm of OP*v_{j} | -c | ORTH1: return from computing B-norm of r_{j+1} | -c | ORTH2: return from computing B-norm of | -c | correction to the residual vector. | -c | RSTART: return from OP computations needed by | -c | sgetv0. | -c %-------------------------------------------------% -c - if (step3) go to 50 - if (step4) go to 60 - if (orth1) go to 70 - if (orth2) go to 90 - if (rstart) go to 30 -c -c %------------------------------% -c | Else this is the first step. | -c %------------------------------% -c -c %--------------------------------------------------------------% -c | | -c | A R N O L D I I T E R A T I O N L O O P | -c | | -c | Note: B*r_{j-1} is already in WORKD(1:N)=WORKD(IPJ:IPJ+N-1) | -c %--------------------------------------------------------------% -c - 1000 continue -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, j, ndigit, - & '_saitr: generating Arnoldi vector no.') - call svout (logfil, 1, rnorm, ndigit, - & '_saitr: B-norm of the current residual =') - end if -c -c %---------------------------------------------------------% -c | Check for exact zero. Equivalent to determing whether a | -c | j-step Arnoldi factorization is present. | -c %---------------------------------------------------------% -c - if (rnorm .gt. zero) go to 40 -c -c %---------------------------------------------------% -c | Invariant subspace found, generate a new starting | -c | vector which is orthogonal to the current Arnoldi | -c | basis and continue the iteration. | -c %---------------------------------------------------% -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_saitr: ****** restart at step ******') - end if -c -c %---------------------------------------------% -c | ITRY is the loop variable that controls the | -c | maximum amount of times that a restart is | -c | attempted. NRSTRT is used by stat.h | -c %---------------------------------------------% -c - nrstrt = nrstrt + 1 - itry = 1 - 20 continue - rstart = .true. - ido = 0 - 30 continue -c -c %--------------------------------------% -c | If in reverse communication mode and | -c | RSTART = .true. flow returns here. | -c %--------------------------------------% -c - call sgetv0 (ido, bmat, itry, .false., n, j, v, ldv, - & resid, rnorm, ipntr, workd, ierr) - if (ido .ne. 99) go to 9000 - if (ierr .lt. 0) then - itry = itry + 1 - if (itry .le. 3) go to 20 -c -c %------------------------------------------------% -c | Give up after several restart attempts. | -c | Set INFO to the size of the invariant subspace | -c | which spans OP and exit. | -c %------------------------------------------------% -c - info = j - 1 - call second (t1) - tsaitr = tsaitr + (t1 - t0) - ido = 99 - go to 9000 - end if -c - 40 continue -c -c %---------------------------------------------------------% -c | STEP 2: v_{j} = r_{j-1}/rnorm and p_{j} = p_{j}/rnorm | -c | Note that p_{j} = B*r_{j-1}. In order to avoid overflow | -c | when reciprocating a small RNORM, test against lower | -c | machine bound. | -c %---------------------------------------------------------% -c - call scopy (n, resid, 1, v(1,j), 1) - if (rnorm .ge. safmin) then - temp1 = one / rnorm - call sscal (n, temp1, v(1,j), 1) - call sscal (n, temp1, workd(ipj), 1) - else -c -c %-----------------------------------------% -c | To scale both v_{j} and p_{j} carefully | -c | use LAPACK routine SLASCL | -c %-----------------------------------------% -c - call slascl ('General', i, i, rnorm, one, n, 1, - & v(1,j), n, infol) - call slascl ('General', i, i, rnorm, one, n, 1, - & workd(ipj), n, infol) - end if -c -c %------------------------------------------------------% -c | STEP 3: r_{j} = OP*v_{j}; Note that p_{j} = B*v_{j} | -c | Note that this is not quite yet r_{j}. See STEP 4 | -c %------------------------------------------------------% -c - step3 = .true. - nopx = nopx + 1 - call second (t2) - call scopy (n, v(1,j), 1, workd(ivj), 1) - ipntr(1) = ivj - ipntr(2) = irj - ipntr(3) = ipj - ido = 1 -c -c %-----------------------------------% -c | Exit in order to compute OP*v_{j} | -c %-----------------------------------% -c - go to 9000 - 50 continue -c -c %-----------------------------------% -c | Back from reverse communication; | -c | WORKD(IRJ:IRJ+N-1) := OP*v_{j}. | -c %-----------------------------------% -c - call second (t3) - tmvopx = tmvopx + (t3 - t2) -c - step3 = .false. -c -c %------------------------------------------% -c | Put another copy of OP*v_{j} into RESID. | -c %------------------------------------------% -c - call scopy (n, workd(irj), 1, resid, 1) -c -c %-------------------------------------------% -c | STEP 4: Finish extending the symmetric | -c | Arnoldi to length j. If MODE = 2 | -c | then B*OP = B*inv(B)*A = A and | -c | we don't need to compute B*OP. | -c | NOTE: If MODE = 2 WORKD(IVJ:IVJ+N-1) is | -c | assumed to have A*v_{j}. | -c %-------------------------------------------% -c - if (mode .eq. 2) go to 65 - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - step4 = .true. - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-------------------------------------% -c | Exit in order to compute B*OP*v_{j} | -c %-------------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call scopy(n, resid, 1 , workd(ipj), 1) - end if - 60 continue -c -c %-----------------------------------% -c | Back from reverse communication; | -c | WORKD(IPJ:IPJ+N-1) := B*OP*v_{j}. | -c %-----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - step4 = .false. -c -c %-------------------------------------% -c | The following is needed for STEP 5. | -c | Compute the B-norm of OP*v_{j}. | -c %-------------------------------------% -c - 65 continue - if (mode .eq. 2) then -c -c %----------------------------------% -c | Note that the B-norm of OP*v_{j} | -c | is the inv(B)-norm of A*v_{j}. | -c %----------------------------------% -c - wnorm = sdot (n, resid, 1, workd(ivj), 1) - wnorm = sqrt(abs(wnorm)) - else if (bmat .eq. 'G') then - wnorm = sdot (n, resid, 1, workd(ipj), 1) - wnorm = sqrt(abs(wnorm)) - else if (bmat .eq. 'I') then - wnorm = snrm2(n, resid, 1) - end if -c -c %-----------------------------------------% -c | Compute the j-th residual corresponding | -c | to the j step factorization. | -c | Use Classical Gram Schmidt and compute: | -c | w_{j} <- V_{j}^T * B * OP * v_{j} | -c | r_{j} <- OP*v_{j} - V_{j} * w_{j} | -c %-----------------------------------------% -c -c -c %------------------------------------------% -c | Compute the j Fourier coefficients w_{j} | -c | WORKD(IPJ:IPJ+N-1) contains B*OP*v_{j}. | -c %------------------------------------------% -c - if (mode .ne. 2 ) then - call sgemv('T', n, j, one, v, ldv, workd(ipj), 1, zero, - & workd(irj), 1) - else if (mode .eq. 2) then - call sgemv('T', n, j, one, v, ldv, workd(ivj), 1, zero, - & workd(irj), 1) - end if -c -c %--------------------------------------% -c | Orthgonalize r_{j} against V_{j}. | -c | RESID contains OP*v_{j}. See STEP 3. | -c %--------------------------------------% -c - call sgemv('N', n, j, -one, v, ldv, workd(irj), 1, one, - & resid, 1) -c -c %--------------------------------------% -c | Extend H to have j rows and columns. | -c %--------------------------------------% -c - h(j,2) = workd(irj + j - 1) - if (j .eq. 1 .or. rstart) then - h(j,1) = zero - else - h(j,1) = rnorm - end if - call second (t4) -c - orth1 = .true. - iter = 0 -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call scopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*r_{j} | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd(ipj), 1) - end if - 70 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH1 = .true. | -c | WORKD(IPJ:IPJ+N-1) := B*r_{j}. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - orth1 = .false. -c -c %------------------------------% -c | Compute the B-norm of r_{j}. | -c %------------------------------% -c - if (bmat .eq. 'G') then - rnorm = sdot (n, resid, 1, workd(ipj), 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = snrm2(n, resid, 1) - end if -c -c %-----------------------------------------------------------% -c | STEP 5: Re-orthogonalization / Iterative refinement phase | -c | Maximum NITER_ITREF tries. | -c | | -c | s = V_{j}^T * B * r_{j} | -c | r_{j} = r_{j} - V_{j}*s | -c | alphaj = alphaj + s_{j} | -c | | -c | The stopping criteria used for iterative refinement is | -c | discussed in Parlett's book SEP, page 107 and in Gragg & | -c | Reichel ACM TOMS paper; Algorithm 686, Dec. 1990. | -c | Determine if we need to correct the residual. The goal is | -c | to enforce ||v(:,1:j)^T * r_{j}|| .le. eps * || r_{j} || | -c %-----------------------------------------------------------% -c - if (rnorm .gt. 0.717*wnorm) go to 100 - nrorth = nrorth + 1 -c -c %---------------------------------------------------% -c | Enter the Iterative refinement phase. If further | -c | refinement is necessary, loop back here. The loop | -c | variable is ITER. Perform a step of Classical | -c | Gram-Schmidt using all the Arnoldi vectors V_{j} | -c %---------------------------------------------------% -c - 80 continue -c - if (msglvl .gt. 2) then - xtemp(1) = wnorm - xtemp(2) = rnorm - call svout (logfil, 2, xtemp, ndigit, - & '_saitr: re-orthonalization ; wnorm and rnorm are') - end if -c -c %----------------------------------------------------% -c | Compute V_{j}^T * B * r_{j}. | -c | WORKD(IRJ:IRJ+J-1) = v(:,1:J)'*WORKD(IPJ:IPJ+N-1). | -c %----------------------------------------------------% -c - call sgemv ('T', n, j, one, v, ldv, workd(ipj), 1, - & zero, workd(irj), 1) -c -c %----------------------------------------------% -c | Compute the correction to the residual: | -c | r_{j} = r_{j} - V_{j} * WORKD(IRJ:IRJ+J-1). | -c | The correction to H is v(:,1:J)*H(1:J,1:J) + | -c | v(:,1:J)*WORKD(IRJ:IRJ+J-1)*e'_j, but only | -c | H(j,j) is updated. | -c %----------------------------------------------% -c - call sgemv ('N', n, j, -one, v, ldv, workd(irj), 1, - & one, resid, 1) -c - if (j .eq. 1 .or. rstart) h(j,1) = zero - h(j,2) = h(j,2) + workd(irj + j - 1) -c - orth2 = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call scopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-----------------------------------% -c | Exit in order to compute B*r_{j}. | -c | r_{j} is the corrected residual. | -c %-----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd(ipj), 1) - end if - 90 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH2 = .true. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c -c %-----------------------------------------------------% -c | Compute the B-norm of the corrected residual r_{j}. | -c %-----------------------------------------------------% -c - if (bmat .eq. 'G') then - rnorm1 = sdot (n, resid, 1, workd(ipj), 1) - rnorm1 = sqrt(abs(rnorm1)) - else if (bmat .eq. 'I') then - rnorm1 = snrm2(n, resid, 1) - end if -c - if (msglvl .gt. 0 .and. iter .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_saitr: Iterative refinement for Arnoldi residual') - if (msglvl .gt. 2) then - xtemp(1) = rnorm - xtemp(2) = rnorm1 - call svout (logfil, 2, xtemp, ndigit, - & '_saitr: iterative refinement ; rnorm and rnorm1 are') - end if - end if -c -c %-----------------------------------------% -c | Determine if we need to perform another | -c | step of re-orthogonalization. | -c %-----------------------------------------% -c - if (rnorm1 .gt. 0.717*rnorm) then -c -c %--------------------------------% -c | No need for further refinement | -c %--------------------------------% -c - rnorm = rnorm1 -c - else -c -c %-------------------------------------------% -c | Another step of iterative refinement step | -c | is required. NITREF is used by stat.h | -c %-------------------------------------------% -c - nitref = nitref + 1 - rnorm = rnorm1 - iter = iter + 1 - if (iter .le. 1) go to 80 -c -c %-------------------------------------------------% -c | Otherwise RESID is numerically in the span of V | -c %-------------------------------------------------% -c - do 95 jj = 1, n - resid(jj) = zero - 95 continue - rnorm = zero - end if -c -c %----------------------------------------------% -c | Branch here directly if iterative refinement | -c | wasn't necessary or after at most NITER_REF | -c | steps of iterative refinement. | -c %----------------------------------------------% -c - 100 continue -c - rstart = .false. - orth2 = .false. -c - call second (t5) - titref = titref + (t5 - t4) -c -c %----------------------------------------------------------% -c | Make sure the last off-diagonal element is non negative | -c | If not perform a similarity transformation on H(1:j,1:j) | -c | and scale v(:,j) by -1. | -c %----------------------------------------------------------% -c - if (h(j,1) .lt. zero) then - h(j,1) = -h(j,1) - if ( j .lt. k+np) then - call sscal(n, -one, v(1,j+1), 1) - else - call sscal(n, -one, resid, 1) - end if - end if -c -c %------------------------------------% -c | STEP 6: Update j = j+1; Continue | -c %------------------------------------% -c - j = j + 1 - if (j .gt. k+np) then - call second (t1) - tsaitr = tsaitr + (t1 - t0) - ido = 99 -c - if (msglvl .gt. 1) then - call svout (logfil, k+np, h(1,2), ndigit, - & '_saitr: main diagonal of matrix H of step K+NP.') - if (k+np .gt. 1) then - call svout (logfil, k+np-1, h(2,1), ndigit, - & '_saitr: sub diagonal of matrix H of step K+NP.') - end if - end if -c - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Loop back to extend the factorization by another step. | -c %--------------------------------------------------------% -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 9000 continue - return -c -c %---------------% -c | End of ssaitr | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssapps.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssapps.f deleted file mode 100644 index b1eb5e343f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssapps.f +++ /dev/null @@ -1,516 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssapps -c -c\Description: -c Given the Arnoldi factorization -c -c A*V_{k} - V_{k}*H_{k} = r_{k+p}*e_{k+p}^T, -c -c apply NP shifts implicitly resulting in -c -c A*(V_{k}*Q) - (V_{k}*Q)*(Q^T* H_{k}*Q) = r_{k+p}*e_{k+p}^T * Q -c -c where Q is an orthogonal matrix of order KEV+NP. Q is the product of -c rotations resulting from the NP bulge chasing sweeps. The updated Arnoldi -c factorization becomes: -c -c A*VNEW_{k} - VNEW_{k}*HNEW_{k} = rnew_{k}*e_{k}^T. -c -c\Usage: -c call ssapps -c ( N, KEV, NP, SHIFT, V, LDV, H, LDH, RESID, Q, LDQ, WORKD ) -c -c\Arguments -c N Integer. (INPUT) -c Problem size, i.e. dimension of matrix A. -c -c KEV Integer. (INPUT) -c INPUT: KEV+NP is the size of the input matrix H. -c OUTPUT: KEV is the size of the updated matrix HNEW. -c -c NP Integer. (INPUT) -c Number of implicit shifts to be applied. -c -c SHIFT Real array of length NP. (INPUT) -c The shifts to be applied. -c -c V Real N by (KEV+NP) array. (INPUT/OUTPUT) -c INPUT: V contains the current KEV+NP Arnoldi vectors. -c OUTPUT: VNEW = V(1:n,1:KEV); the updated Arnoldi vectors -c are in the first KEV columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Real (KEV+NP) by 2 array. (INPUT/OUTPUT) -c INPUT: H contains the symmetric tridiagonal matrix of the -c Arnoldi factorization with the subdiagonal in the 1st column -c starting at H(2,1) and the main diagonal in the 2nd column. -c OUTPUT: H contains the updated tridiagonal matrix in the -c KEV leading submatrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RESID Real array of length (N). (INPUT/OUTPUT) -c INPUT: RESID contains the the residual vector r_{k+p}. -c OUTPUT: RESID is the updated residual vector rnew_{k}. -c -c Q Real KEV+NP by KEV+NP work array. (WORKSPACE) -c Work array used to accumulate the rotations during the bulge -c chase sweep. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKD Real work array of length 2*N. (WORKSPACE) -c Distributed array used in the application of the accumulated -c orthogonal matrix Q. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c svout ARPACK utility routine that prints vectors. -c slamch LAPACK routine that determines machine constants. -c slartg LAPACK Givens rotation construction routine. -c slacpy LAPACK matrix copy routine. -c slaset LAPACK matrix initialization routine. -c sgemv Level 2 BLAS routine for matrix vector multiplication. -c saxpy Level 1 BLAS that computes a vector triad. -c scopy Level 1 BLAS that copies one vector to another. -c sscal Level 1 BLAS that scales a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/16/93: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: sapps.F SID: 2.6 DATE OF SID: 3/28/97 RELEASE: 2 -c -c\Remarks -c 1. In this version, each shift is applied to all the subblocks of -c the tridiagonal matrix H and not just to the submatrix that it -c comes from. This routine assumes that the subdiagonal elements -c of H that are stored in h(1:kev+np,1) are nonegative upon input -c and enforce this condition upon output. This version incorporates -c deflation. See code for documentation. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssapps - & ( n, kev, np, shift, v, ldv, h, ldh, resid, q, ldq, workd ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer kev, ldh, ldq, ldv, n, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & h(ldh,2), q(ldq,kev+np), resid(n), shift(np), - & v(ldv,kev+np), workd(2*n) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, iend, istart, itop, j, jj, kplusp, msglvl - logical first - Real - & a1, a2, a3, a4, big, c, epsmch, f, g, r, s - save epsmch, first -c -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external saxpy, scopy, sscal, slacpy, slartg, slaset, svout, - & ivout, second, sgemv -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slamch - external slamch -c -c %----------------------% -c | Intrinsics Functions | -c %----------------------% -c - intrinsic abs -c -c %----------------% -c | Data statments | -c %----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then - epsmch = slamch('Epsilon-Machine') - first = .false. - end if - itop = 1 -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = msapps -c - kplusp = kev + np -c -c %----------------------------------------------% -c | Initialize Q to the identity matrix of order | -c | kplusp used to accumulate the rotations. | -c %----------------------------------------------% -c - call slaset ('All', kplusp, kplusp, zero, one, q, ldq) -c -c %----------------------------------------------% -c | Quick return if there are no shifts to apply | -c %----------------------------------------------% -c - if (np .eq. 0) go to 9000 -c -c %----------------------------------------------------------% -c | Apply the np shifts implicitly. Apply each shift to the | -c | whole matrix and not just to the submatrix from which it | -c | comes. | -c %----------------------------------------------------------% -c - do 90 jj = 1, np -c - istart = itop -c -c %----------------------------------------------------------% -c | Check for splitting and deflation. Currently we consider | -c | an off-diagonal element h(i+1,1) negligible if | -c | h(i+1,1) .le. epsmch*( |h(i,2)| + |h(i+1,2)| ) | -c | for i=1:KEV+NP-1. | -c | If above condition tests true then we set h(i+1,1) = 0. | -c | Note that h(1:KEV+NP,1) are assumed to be non negative. | -c %----------------------------------------------------------% -c - 20 continue -c -c %------------------------------------------------% -c | The following loop exits early if we encounter | -c | a negligible off diagonal element. | -c %------------------------------------------------% -c - do 30 i = istart, kplusp-1 - big = abs(h(i,2)) + abs(h(i+1,2)) - if (h(i+1,1) .le. epsmch*big) then - if (msglvl .gt. 0) then - call ivout (logfil, 1, i, ndigit, - & '_sapps: deflation at row/column no.') - call ivout (logfil, 1, jj, ndigit, - & '_sapps: occured before shift number.') - call svout (logfil, 1, h(i+1,1), ndigit, - & '_sapps: the corresponding off diagonal element') - end if - h(i+1,1) = zero - iend = i - go to 40 - end if - 30 continue - iend = kplusp - 40 continue -c - if (istart .lt. iend) then -c -c %--------------------------------------------------------% -c | Construct the plane rotation G'(istart,istart+1,theta) | -c | that attempts to drive h(istart+1,1) to zero. | -c %--------------------------------------------------------% -c - f = h(istart,2) - shift(jj) - g = h(istart+1,1) - call slartg (f, g, c, s, r) -c -c %-------------------------------------------------------% -c | Apply rotation to the left and right of H; | -c | H <- G' * H * G, where G = G(istart,istart+1,theta). | -c | This will create a "bulge". | -c %-------------------------------------------------------% -c - a1 = c*h(istart,2) + s*h(istart+1,1) - a2 = c*h(istart+1,1) + s*h(istart+1,2) - a4 = c*h(istart+1,2) - s*h(istart+1,1) - a3 = c*h(istart+1,1) - s*h(istart,2) - h(istart,2) = c*a1 + s*a2 - h(istart+1,2) = c*a4 - s*a3 - h(istart+1,1) = c*a3 + s*a4 -c -c %----------------------------------------------------% -c | Accumulate the rotation in the matrix Q; Q <- Q*G | -c %----------------------------------------------------% -c - do 60 j = 1, min(istart+jj,kplusp) - a1 = c*q(j,istart) + s*q(j,istart+1) - q(j,istart+1) = - s*q(j,istart) + c*q(j,istart+1) - q(j,istart) = a1 - 60 continue -c -c -c %----------------------------------------------% -c | The following loop chases the bulge created. | -c | Note that the previous rotation may also be | -c | done within the following loop. But it is | -c | kept separate to make the distinction among | -c | the bulge chasing sweeps and the first plane | -c | rotation designed to drive h(istart+1,1) to | -c | zero. | -c %----------------------------------------------% -c - do 70 i = istart+1, iend-1 -c -c %----------------------------------------------% -c | Construct the plane rotation G'(i,i+1,theta) | -c | that zeros the i-th bulge that was created | -c | by G(i-1,i,theta). g represents the bulge. | -c %----------------------------------------------% -c - f = h(i,1) - g = s*h(i+1,1) -c -c %----------------------------------% -c | Final update with G(i-1,i,theta) | -c %----------------------------------% -c - h(i+1,1) = c*h(i+1,1) - call slartg (f, g, c, s, r) -c -c %-------------------------------------------% -c | The following ensures that h(1:iend-1,1), | -c | the first iend-2 off diagonal of elements | -c | H, remain non negative. | -c %-------------------------------------------% -c - if (r .lt. zero) then - r = -r - c = -c - s = -s - end if -c -c %--------------------------------------------% -c | Apply rotation to the left and right of H; | -c | H <- G * H * G', where G = G(i,i+1,theta) | -c %--------------------------------------------% -c - h(i,1) = r -c - a1 = c*h(i,2) + s*h(i+1,1) - a2 = c*h(i+1,1) + s*h(i+1,2) - a3 = c*h(i+1,1) - s*h(i,2) - a4 = c*h(i+1,2) - s*h(i+1,1) -c - h(i,2) = c*a1 + s*a2 - h(i+1,2) = c*a4 - s*a3 - h(i+1,1) = c*a3 + s*a4 -c -c %----------------------------------------------------% -c | Accumulate the rotation in the matrix Q; Q <- Q*G | -c %----------------------------------------------------% -c - do 50 j = 1, min( i+jj, kplusp ) - a1 = c*q(j,i) + s*q(j,i+1) - q(j,i+1) = - s*q(j,i) + c*q(j,i+1) - q(j,i) = a1 - 50 continue -c - 70 continue -c - end if -c -c %--------------------------% -c | Update the block pointer | -c %--------------------------% -c - istart = iend + 1 -c -c %------------------------------------------% -c | Make sure that h(iend,1) is non-negative | -c | If not then set h(iend,1) <-- -h(iend,1) | -c | and negate the last column of Q. | -c | We have effectively carried out a | -c | similarity on transformation H | -c %------------------------------------------% -c - if (h(iend,1) .lt. zero) then - h(iend,1) = -h(iend,1) - call sscal(kplusp, -one, q(1,iend), 1) - end if -c -c %--------------------------------------------------------% -c | Apply the same shift to the next block if there is any | -c %--------------------------------------------------------% -c - if (iend .lt. kplusp) go to 20 -c -c %-----------------------------------------------------% -c | Check if we can increase the the start of the block | -c %-----------------------------------------------------% -c - do 80 i = itop, kplusp-1 - if (h(i+1,1) .gt. zero) go to 90 - itop = itop + 1 - 80 continue -c -c %-----------------------------------% -c | Finished applying the jj-th shift | -c %-----------------------------------% -c - 90 continue -c -c %------------------------------------------% -c | All shifts have been applied. Check for | -c | more possible deflation that might occur | -c | after the last shift is applied. | -c %------------------------------------------% -c - do 100 i = itop, kplusp-1 - big = abs(h(i,2)) + abs(h(i+1,2)) - if (h(i+1,1) .le. epsmch*big) then - if (msglvl .gt. 0) then - call ivout (logfil, 1, i, ndigit, - & '_sapps: deflation at row/column no.') - call svout (logfil, 1, h(i+1,1), ndigit, - & '_sapps: the corresponding off diagonal element') - end if - h(i+1,1) = zero - end if - 100 continue -c -c %-------------------------------------------------% -c | Compute the (kev+1)-st column of (V*Q) and | -c | temporarily store the result in WORKD(N+1:2*N). | -c | This is not necessary if h(kev+1,1) = 0. | -c %-------------------------------------------------% -c - if ( h(kev+1,1) .gt. zero ) - & call sgemv ('N', n, kplusp, one, v, ldv, - & q(1,kev+1), 1, zero, workd(n+1), 1) -c -c %-------------------------------------------------------% -c | Compute column 1 to kev of (V*Q) in backward order | -c | taking advantage that Q is an upper triangular matrix | -c | with lower bandwidth np. | -c | Place results in v(:,kplusp-kev:kplusp) temporarily. | -c %-------------------------------------------------------% -c - do 130 i = 1, kev - call sgemv ('N', n, kplusp-i+1, one, v, ldv, - & q(1,kev-i+1), 1, zero, workd, 1) - call scopy (n, workd, 1, v(1,kplusp-i+1), 1) - 130 continue -c -c %-------------------------------------------------% -c | Move v(:,kplusp-kev+1:kplusp) into v(:,1:kev). | -c %-------------------------------------------------% -c - call slacpy ('All', n, kev, v(1,np+1), ldv, v, ldv) -c -c %--------------------------------------------% -c | Copy the (kev+1)-st column of (V*Q) in the | -c | appropriate place if h(kev+1,1) .ne. zero. | -c %--------------------------------------------% -c - if ( h(kev+1,1) .gt. zero ) - & call scopy (n, workd(n+1), 1, v(1,kev+1), 1) -c -c %-------------------------------------% -c | Update the residual vector: | -c | r <- sigmak*r + betak*v(:,kev+1) | -c | where | -c | sigmak = (e_{kev+p}'*Q)*e_{kev} | -c | betak = e_{kev+1}'*H*e_{kev} | -c %-------------------------------------% -c - call sscal (n, q(kplusp,kev), resid, 1) - if (h(kev+1,1) .gt. zero) - & call saxpy (n, h(kev+1,1), v(1,kev+1), 1, resid, 1) -c - if (msglvl .gt. 1) then - call svout (logfil, 1, q(kplusp,kev), ndigit, - & '_sapps: sigmak of the updated residual vector') - call svout (logfil, 1, h(kev+1,1), ndigit, - & '_sapps: betak of the updated residual vector') - call svout (logfil, kev, h(1,2), ndigit, - & '_sapps: updated main diagonal of H for next iteration') - if (kev .gt. 1) then - call svout (logfil, kev-1, h(2,1), ndigit, - & '_sapps: updated sub diagonal of H for next iteration') - end if - end if -c - call second (t1) - tsapps = tsapps + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of ssapps | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaup2.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaup2.f deleted file mode 100644 index 42fd768950..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaup2.f +++ /dev/null @@ -1,850 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssaup2 -c -c\Description: -c Intermediate level interface called by ssaupd. -c -c\Usage: -c call ssaup2 -c ( IDO, BMAT, N, WHICH, NEV, NP, TOL, RESID, MODE, IUPD, -c ISHIFT, MXITER, V, LDV, H, LDH, RITZ, BOUNDS, Q, LDQ, WORKL, -c IPNTR, WORKD, INFO ) -c -c\Arguments -c -c IDO, BMAT, N, WHICH, NEV, TOL, RESID: same as defined in ssaupd. -c MODE, ISHIFT, MXITER: see the definition of IPARAM in ssaupd. -c -c NP Integer. (INPUT/OUTPUT) -c Contains the number of implicit shifts to apply during -c each Arnoldi/Lanczos iteration. -c If ISHIFT=1, NP is adjusted dynamically at each iteration -c to accelerate convergence and prevent stagnation. -c This is also roughly equal to the number of matrix-vector -c products (involving the operator OP) per Arnoldi iteration. -c The logic for adjusting is contained within the current -c subroutine. -c If ISHIFT=0, NP is the number of shifts the user needs -c to provide via reverse comunication. 0 < NP < NCV-NEV. -c NP may be less than NCV-NEV since a leading block of the current -c upper Tridiagonal matrix has split off and contains "unwanted" -c Ritz values. -c Upon termination of the IRA iteration, NP contains the number -c of "converged" wanted Ritz values. -c -c IUPD Integer. (INPUT) -c IUPD .EQ. 0: use explicit restart instead implicit update. -c IUPD .NE. 0: use implicit update. -c -c V Real N by (NEV+NP) array. (INPUT/OUTPUT) -c The Lanczos basis vectors. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Real (NEV+NP) by 2 array. (OUTPUT) -c H is used to store the generated symmetric tridiagonal matrix -c The subdiagonal is stored in the first column of H starting -c at H(2,1). The main diagonal is stored in the second column -c of H starting at H(1,2). If ssaup2 converges store the -c B-norm of the final residual vector in H(1,1). -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZ Real array of length NEV+NP. (OUTPUT) -c RITZ(1:NEV) contains the computed Ritz values of OP. -c -c BOUNDS Real array of length NEV+NP. (OUTPUT) -c BOUNDS(1:NEV) contain the error bounds corresponding to RITZ. -c -c Q Real (NEV+NP) by (NEV+NP) array. (WORKSPACE) -c Private (replicated) work array used to accumulate the -c rotation in the shift application step. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Real array of length at least 3*(NEV+NP). (INPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. It is used in the computation of the -c tridiagonal eigenvalue problem, the calculation and -c application of the shifts and convergence checking. -c If ISHIFT .EQ. O and IDO .EQ. 3, the first NP locations -c of WORKL are used in reverse communication to hold the user -c supplied shifts. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORKD for -c vectors used by the Lanczos iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in one of -c the spectral transformation modes. X is the current -c operand. -c ------------------------------------------------------------- -c -c WORKD Real work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Lanczos iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration !!!!!!!!!! -c See Data Distribution Note in ssaupd. -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal return. -c = 1: All possible eigenvalues of OP has been found. -c NP returns the size of the invariant subspace -c spanning the operator OP. -c = 2: No shifts could be applied. -c = -8: Error return from trid. eigenvalue calculation; -c This should never happen. -c = -9: Starting vector is zero. -c = -9999: Could not build an Lanczos factorization. -c Size that was built in returned in NP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett, "The Symmetric Eigenvalue Problem". Prentice-Hall, -c 1980. -c 4. B.N. Parlett, B. Nour-Omid, "Towards a Black Box Lanczos Program", -c Computer Physics Communications, 53 (1989), pp 169-179. -c 5. B. Nour-Omid, B.N. Parlett, T. Ericson, P.S. Jensen, "How to -c Implement the Spectral Transformation", Math. Comp., 48 (1987), -c pp 663-673. -c 6. R.G. Grimes, J.G. Lewis and H.D. Simon, "A Shifted Block Lanczos -c Algorithm for Solving Sparse Symmetric Generalized Eigenproblems", -c SIAM J. Matr. Anal. Apps., January (1993). -c 7. L. Reichel, W.B. Gragg, "Algorithm 686: FORTRAN Subroutines -c for Updating the QR decomposition", ACM TOMS, December 1990, -c Volume 16 Number 4, pp 369-377. -c -c\Routines called: -c sgetv0 ARPACK initial vector generation routine. -c ssaitr ARPACK Lanczos factorization routine. -c ssapps ARPACK application of implicit shifts routine. -c ssconv ARPACK convergence of Ritz values routine. -c sseigt ARPACK compute Ritz values and error bounds routine. -c ssgets ARPACK reorder Ritz values and error bounds routine. -c ssortr ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c svout ARPACK utility routine that prints vectors. -c slamch LAPACK routine that determines machine constants. -c scopy Level 1 BLAS that copies one vector to another. -c sdot Level 1 BLAS that computes the scalar product of two vectors. -c snrm2 Level 1 BLAS that computes the norm of a vector. -c sscal Level 1 BLAS that scales a vector. -c sswap Level 1 BLAS that swaps two vectors. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/15/93: Version ' 2.4' -c xx/xx/95: Version ' 2.4'. (R.B. Lehoucq) -c -c\SCCS Information: @(#) -c FILE: saup2.F SID: 2.7 DATE OF SID: 5/19/98 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssaup2 - & ( ido, bmat, n, which, nev, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, h, ldh, ritz, bounds, - & q, ldq, workl, ipntr, workd, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ishift, iupd, ldh, ldq, ldv, mxiter, - & n, mode, nev, np - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Real - & bounds(nev+np), h(ldh,2), q(ldq,nev+np), resid(n), - & ritz(nev+np), v(ldv,nev+np), workd(3*n), - & workl(3*(nev+np)) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character wprime*2 - logical cnorm, getv0, initv, update, ushift - integer ierr, iter, j, kplusp, msglvl, nconv, nevbef, nev0, - & np0, nptemp, nevd2, nevm2, kp(3) - Real - & rnorm, temp, eps23 - save cnorm, getv0, initv, update, ushift, - & iter, kplusp, msglvl, nconv, nev0, np0, - & rnorm, eps23 -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external scopy, sgetv0, ssaitr, sscal, ssconv, sseigt, ssgets, - & ssapps, ssortr, svout, ivout, second, sswap -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & sdot, snrm2, slamch - external sdot, snrm2, slamch -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic min -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = msaup2 -c -c %---------------------------------% -c | Set machine dependent constant. | -c %---------------------------------% -c - eps23 = slamch('Epsilon-Machine') - eps23 = eps23**(2.0E+0/3.0E+0) -c -c %-------------------------------------% -c | nev0 and np0 are integer variables | -c | hold the initial values of NEV & NP | -c %-------------------------------------% -c - nev0 = nev - np0 = np -c -c %-------------------------------------% -c | kplusp is the bound on the largest | -c | Lanczos factorization built. | -c | nconv is the current number of | -c | "converged" eigenvlues. | -c | iter is the counter on the current | -c | iteration step. | -c %-------------------------------------% -c - kplusp = nev0 + np0 - nconv = 0 - iter = 0 -c -c %--------------------------------------------% -c | Set flags for computing the first NEV steps | -c | of the Lanczos factorization. | -c %--------------------------------------------% -c - getv0 = .true. - update = .false. - ushift = .false. - cnorm = .false. -c - if (info .ne. 0) then -c -c %--------------------------------------------% -c | User provides the initial residual vector. | -c %--------------------------------------------% -c - initv = .true. - info = 0 - else - initv = .false. - end if - end if -c -c %---------------------------------------------% -c | Get a possibly random starting vector and | -c | force it into the range of the operator OP. | -c %---------------------------------------------% -c - 10 continue -c - if (getv0) then - call sgetv0 (ido, bmat, 1, initv, n, 1, v, ldv, resid, rnorm, - & ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (rnorm .eq. zero) then -c -c %-----------------------------------------% -c | The initial vector is zero. Error exit. | -c %-----------------------------------------% -c - info = -9 - go to 1200 - end if - getv0 = .false. - ido = 0 - end if -c -c %------------------------------------------------------------% -c | Back from reverse communication: continue with update step | -c %------------------------------------------------------------% -c - if (update) go to 20 -c -c %-------------------------------------------% -c | Back from computing user specified shifts | -c %-------------------------------------------% -c - if (ushift) go to 50 -c -c %-------------------------------------% -c | Back from computing residual norm | -c | at the end of the current iteration | -c %-------------------------------------% -c - if (cnorm) go to 100 -c -c %----------------------------------------------------------% -c | Compute the first NEV steps of the Lanczos factorization | -c %----------------------------------------------------------% -c - call ssaitr (ido, bmat, n, 0, nev0, mode, resid, rnorm, v, ldv, - & h, ldh, ipntr, workd, info) -c -c %---------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP and possibly B | -c %---------------------------------------------------% -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then -c -c %-----------------------------------------------------% -c | ssaitr was unable to build an Lanczos factorization | -c | of length NEV0. INFO is returned with the size of | -c | the factorization built. Exit main loop. | -c %-----------------------------------------------------% -c - np = info - mxiter = iter - info = -9999 - go to 1200 - end if -c -c %--------------------------------------------------------------% -c | | -c | M A I N LANCZOS I T E R A T I O N L O O P | -c | Each iteration implicitly restarts the Lanczos | -c | factorization in place. | -c | | -c %--------------------------------------------------------------% -c - 1000 continue -c - iter = iter + 1 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, iter, ndigit, - & '_saup2: **** Start of major iteration number ****') - end if - if (msglvl .gt. 1) then - call ivout (logfil, 1, nev, ndigit, - & '_saup2: The length of the current Lanczos factorization') - call ivout (logfil, 1, np, ndigit, - & '_saup2: Extend the Lanczos factorization by') - end if -c -c %------------------------------------------------------------% -c | Compute NP additional steps of the Lanczos factorization. | -c %------------------------------------------------------------% -c - ido = 0 - 20 continue - update = .true. -c - call ssaitr (ido, bmat, n, nev, np, mode, resid, rnorm, v, - & ldv, h, ldh, ipntr, workd, info) -c -c %---------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP and possibly B | -c %---------------------------------------------------% -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then -c -c %-----------------------------------------------------% -c | ssaitr was unable to build an Lanczos factorization | -c | of length NEV0+NP0. INFO is returned with the size | -c | of the factorization built. Exit main loop. | -c %-----------------------------------------------------% -c - np = info - mxiter = iter - info = -9999 - go to 1200 - end if - update = .false. -c - if (msglvl .gt. 1) then - call svout (logfil, 1, rnorm, ndigit, - & '_saup2: Current B-norm of residual for factorization') - end if -c -c %--------------------------------------------------------% -c | Compute the eigenvalues and corresponding error bounds | -c | of the current symmetric tridiagonal matrix. | -c %--------------------------------------------------------% -c - call sseigt (rnorm, kplusp, h, ldh, ritz, bounds, workl, ierr) -c - if (ierr .ne. 0) then - info = -8 - go to 1200 - end if -c -c %----------------------------------------------------% -c | Make a copy of eigenvalues and corresponding error | -c | bounds obtained from _seigt. | -c %----------------------------------------------------% -c - call scopy(kplusp, ritz, 1, workl(kplusp+1), 1) - call scopy(kplusp, bounds, 1, workl(2*kplusp+1), 1) -c -c %---------------------------------------------------% -c | Select the wanted Ritz values and their bounds | -c | to be used in the convergence test. | -c | The selection is based on the requested number of | -c | eigenvalues instead of the current NEV and NP to | -c | prevent possible misconvergence. | -c | * Wanted Ritz values := RITZ(NP+1:NEV+NP) | -c | * Shifts := RITZ(1:NP) := WORKL(1:NP) | -c %---------------------------------------------------% -c - nev = nev0 - np = np0 - call ssgets (ishift, which, nev, np, ritz, bounds, workl) -c -c %-------------------% -c | Convergence test. | -c %-------------------% -c - call scopy (nev, bounds(np+1), 1, workl(np+1), 1) - call ssconv (nev, ritz(np+1), workl(np+1), tol, nconv) -c - if (msglvl .gt. 2) then - kp(1) = nev - kp(2) = np - kp(3) = nconv - call ivout (logfil, 3, kp, ndigit, - & '_saup2: NEV, NP, NCONV are') - call svout (logfil, kplusp, ritz, ndigit, - & '_saup2: The eigenvalues of H') - call svout (logfil, kplusp, bounds, ndigit, - & '_saup2: Ritz estimates of the current NCV Ritz values') - end if -c -c %---------------------------------------------------------% -c | Count the number of unwanted Ritz values that have zero | -c | Ritz estimates. If any Ritz estimates are equal to zero | -c | then a leading block of H of order equal to at least | -c | the number of Ritz values with zero Ritz estimates has | -c | split off. None of these Ritz values may be removed by | -c | shifting. Decrease NP the number of shifts to apply. If | -c | no shifts may be applied, then prepare to exit | -c %---------------------------------------------------------% -c - nptemp = np - do 30 j=1, nptemp - if (bounds(j) .eq. zero) then - np = np - 1 - nev = nev + 1 - end if - 30 continue -c - if ( (nconv .ge. nev0) .or. - & (iter .gt. mxiter) .or. - & (np .eq. 0) ) then -c -c %------------------------------------------------% -c | Prepare to exit. Put the converged Ritz values | -c | and corresponding bounds in RITZ(1:NCONV) and | -c | BOUNDS(1:NCONV) respectively. Then sort. Be | -c | careful when NCONV > NP since we don't want to | -c | swap overlapping locations. | -c %------------------------------------------------% -c - if (which .eq. 'BE') then -c -c %-----------------------------------------------------% -c | Both ends of the spectrum are requested. | -c | Sort the eigenvalues into algebraically decreasing | -c | order first then swap low end of the spectrum next | -c | to high end in appropriate locations. | -c | NOTE: when np < floor(nev/2) be careful not to swap | -c | overlapping locations. | -c %-----------------------------------------------------% -c - wprime = 'SA' - call ssortr (wprime, .true., kplusp, ritz, bounds) - nevd2 = nev0 / 2 - nevm2 = nev0 - nevd2 - if ( nev .gt. 1 ) then - call sswap ( min(nevd2,np), ritz(nevm2+1), 1, - & ritz( max(kplusp-nevd2+1,kplusp-np+1) ), 1) - call sswap ( min(nevd2,np), bounds(nevm2+1), 1, - & bounds( max(kplusp-nevd2+1,kplusp-np+1)), 1) - end if -c - else -c -c %--------------------------------------------------% -c | LM, SM, LA, SA case. | -c | Sort the eigenvalues of H into the an order that | -c | is opposite to WHICH, and apply the resulting | -c | order to BOUNDS. The eigenvalues are sorted so | -c | that the wanted part are always within the first | -c | NEV locations. | -c %--------------------------------------------------% -c - if (which .eq. 'LM') wprime = 'SM' - if (which .eq. 'SM') wprime = 'LM' - if (which .eq. 'LA') wprime = 'SA' - if (which .eq. 'SA') wprime = 'LA' -c - call ssortr (wprime, .true., kplusp, ritz, bounds) -c - end if -c -c %--------------------------------------------------% -c | Scale the Ritz estimate of each Ritz value | -c | by 1 / max(eps23,magnitude of the Ritz value). | -c %--------------------------------------------------% -c - do 35 j = 1, nev0 - temp = max( eps23, abs(ritz(j)) ) - bounds(j) = bounds(j)/temp - 35 continue -c -c %----------------------------------------------------% -c | Sort the Ritz values according to the scaled Ritz | -c | esitmates. This will push all the converged ones | -c | towards the front of ritzr, ritzi, bounds | -c | (in the case when NCONV < NEV.) | -c %----------------------------------------------------% -c - wprime = 'LA' - call ssortr(wprime, .true., nev0, bounds, ritz) -c -c %----------------------------------------------% -c | Scale the Ritz estimate back to its original | -c | value. | -c %----------------------------------------------% -c - do 40 j = 1, nev0 - temp = max( eps23, abs(ritz(j)) ) - bounds(j) = bounds(j)*temp - 40 continue -c -c %--------------------------------------------------% -c | Sort the "converged" Ritz values again so that | -c | the "threshold" values and their associated Ritz | -c | estimates appear at the appropriate position in | -c | ritz and bound. | -c %--------------------------------------------------% -c - if (which .eq. 'BE') then -c -c %------------------------------------------------% -c | Sort the "converged" Ritz values in increasing | -c | order. The "threshold" values are in the | -c | middle. | -c %------------------------------------------------% -c - wprime = 'LA' - call ssortr(wprime, .true., nconv, ritz, bounds) -c - else -c -c %----------------------------------------------% -c | In LM, SM, LA, SA case, sort the "converged" | -c | Ritz values according to WHICH so that the | -c | "threshold" value appears at the front of | -c | ritz. | -c %----------------------------------------------% - - call ssortr(which, .true., nconv, ritz, bounds) -c - end if -c -c %------------------------------------------% -c | Use h( 1,1 ) as storage to communicate | -c | rnorm to _seupd if needed | -c %------------------------------------------% -c - h(1,1) = rnorm -c - if (msglvl .gt. 1) then - call svout (logfil, kplusp, ritz, ndigit, - & '_saup2: Sorted Ritz values.') - call svout (logfil, kplusp, bounds, ndigit, - & '_saup2: Sorted ritz estimates.') - end if -c -c %------------------------------------% -c | Max iterations have been exceeded. | -c %------------------------------------% -c - if (iter .gt. mxiter .and. nconv .lt. nev) info = 1 -c -c %---------------------% -c | No shifts to apply. | -c %---------------------% -c - if (np .eq. 0 .and. nconv .lt. nev0) info = 2 -c - np = nconv - go to 1100 -c - else if (nconv .lt. nev .and. ishift .eq. 1) then -c -c %---------------------------------------------------% -c | Do not have all the requested eigenvalues yet. | -c | To prevent possible stagnation, adjust the number | -c | of Ritz values and the shifts. | -c %---------------------------------------------------% -c - nevbef = nev - nev = nev + min (nconv, np/2) - if (nev .eq. 1 .and. kplusp .ge. 6) then - nev = kplusp / 2 - else if (nev .eq. 1 .and. kplusp .gt. 2) then - nev = 2 - end if - np = kplusp - nev -c -c %---------------------------------------% -c | If the size of NEV was just increased | -c | resort the eigenvalues. | -c %---------------------------------------% -c - if (nevbef .lt. nev) - & call ssgets (ishift, which, nev, np, ritz, bounds, - & workl) -c - end if -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, nconv, ndigit, - & '_saup2: no. of "converged" Ritz values at this iter.') - if (msglvl .gt. 1) then - kp(1) = nev - kp(2) = np - call ivout (logfil, 2, kp, ndigit, - & '_saup2: NEV and NP are') - call svout (logfil, nev, ritz(np+1), ndigit, - & '_saup2: "wanted" Ritz values.') - call svout (logfil, nev, bounds(np+1), ndigit, - & '_saup2: Ritz estimates of the "wanted" values ') - end if - end if - -c - if (ishift .eq. 0) then -c -c %-----------------------------------------------------% -c | User specified shifts: reverse communication to | -c | compute the shifts. They are returned in the first | -c | NP locations of WORKL. | -c %-----------------------------------------------------% -c - ushift = .true. - ido = 3 - go to 9000 - end if -c - 50 continue -c -c %------------------------------------% -c | Back from reverse communication; | -c | User specified shifts are returned | -c | in WORKL(1:*NP) | -c %------------------------------------% -c - ushift = .false. -c -c -c %---------------------------------------------------------% -c | Move the NP shifts to the first NP locations of RITZ to | -c | free up WORKL. This is for the non-exact shift case; | -c | in the exact shift case, ssgets already handles this. | -c %---------------------------------------------------------% -c - if (ishift .eq. 0) call scopy (np, workl, 1, ritz, 1) -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, np, ndigit, - & '_saup2: The number of shifts to apply ') - call svout (logfil, np, workl, ndigit, - & '_saup2: shifts selected') - if (ishift .eq. 1) then - call svout (logfil, np, bounds, ndigit, - & '_saup2: corresponding Ritz estimates') - end if - end if -c -c %---------------------------------------------------------% -c | Apply the NP0 implicit shifts by QR bulge chasing. | -c | Each shift is applied to the entire tridiagonal matrix. | -c | The first 2*N locations of WORKD are used as workspace. | -c | After ssapps is done, we have a Lanczos | -c | factorization of length NEV. | -c %---------------------------------------------------------% -c - call ssapps (n, nev, np, ritz, v, ldv, h, ldh, resid, q, ldq, - & workd) -c -c %---------------------------------------------% -c | Compute the B-norm of the updated residual. | -c | Keep B*RESID in WORKD(1:N) to be used in | -c | the first step of the next call to ssaitr. | -c %---------------------------------------------% -c - cnorm = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call scopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*RESID | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call scopy (n, resid, 1, workd, 1) - end if -c - 100 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(1:N) := B*RESID | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - rnorm = sdot (n, resid, 1, workd, 1) - rnorm = sqrt(abs(rnorm)) - else if (bmat .eq. 'I') then - rnorm = snrm2(n, resid, 1) - end if - cnorm = .false. - 130 continue -c - if (msglvl .gt. 2) then - call svout (logfil, 1, rnorm, ndigit, - & '_saup2: B-norm of residual for NEV factorization') - call svout (logfil, nev, h(1,2), ndigit, - & '_saup2: main diagonal of compressed H matrix') - call svout (logfil, nev-1, h(2,1), ndigit, - & '_saup2: subdiagonal of compressed H matrix') - end if -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 1100 continue -c - mxiter = iter - nev = nconv -c - 1200 continue - ido = 99 -c -c %------------% -c | Error exit | -c %------------% -c - call second (t1) - tsaup2 = t1 - t0 -c - 9000 continue - return -c -c %---------------% -c | End of ssaup2 | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaupd.f deleted file mode 100644 index bd4184a108..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssaupd.f +++ /dev/null @@ -1,690 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssaupd -c -c\Description: -c -c Reverse communication interface for the Implicitly Restarted Arnoldi -c Iteration. For symmetric problems this reduces to a variant of the Lanczos -c method. This method has been designed to compute approximations to a -c few eigenpairs of a linear operator OP that is real and symmetric -c with respect to a real positive semi-definite symmetric matrix B, -c i.e. -c -c B*OP = (OP`)*B. -c -c Another way to express this condition is -c -c < x,OPy > = < OPx,y > where < z,w > = z`Bw . -c -c In the standard eigenproblem B is the identity matrix. -c ( A` denotes transpose of A) -c -c The computed approximate eigenvalues are called Ritz values and -c the corresponding approximate eigenvectors are called Ritz vectors. -c -c ssaupd is usually called iteratively to solve one of the -c following problems: -c -c Mode 1: A*x = lambda*x, A symmetric -c ===> OP = A and B = I. -c -c Mode 2: A*x = lambda*M*x, A symmetric, M symmetric positive definite -c ===> OP = inv[M]*A and B = M. -c ===> (If M can be factored see remark 3 below) -c -c Mode 3: K*x = lambda*M*x, K symmetric, M symmetric positive semi-definite -c ===> OP = (inv[K - sigma*M])*M and B = M. -c ===> Shift-and-Invert mode -c -c Mode 4: K*x = lambda*KG*x, K symmetric positive semi-definite, -c KG symmetric indefinite -c ===> OP = (inv[K - sigma*KG])*K and B = K. -c ===> Buckling mode -c -c Mode 5: A*x = lambda*M*x, A symmetric, M symmetric positive semi-definite -c ===> OP = inv[A - sigma*M]*[A + sigma*M] and B = M. -c ===> Cayley transformed mode -c -c NOTE: The action of w <- inv[A - sigma*M]*v or w <- inv[M]*v -c should be accomplished either by a direct method -c using a sparse matrix factorization and solving -c -c [A - sigma*M]*w = v or M*w = v, -c -c or through an iterative method for solving these -c systems. If an iterative method is used, the -c convergence test must be more stringent than -c the accuracy requirements for the eigenvalue -c approximations. -c -c\Usage: -c call ssaupd -c ( IDO, BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, -c IPNTR, WORKD, WORKL, LWORKL, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to ssaupd. IDO will be set internally to -c indicate the type of operation to be performed. Control is -c then given back to the calling routine which has the -c responsibility to carry out the requested operation and call -c ssaupd with the result. The operand is given in -c WORKD(IPNTR(1)), the result must be put in WORKD(IPNTR(2)). -c (If Mode = 2 see remark 5 below) -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c In mode 3,4 and 5, the vector B * X is already -c available in WORKD(ipntr(3)). It does not -c need to be recomputed in forming OP * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 3: compute the IPARAM(8) shifts where -c IPNTR(11) is the pointer into WORKL for -c placing the shifts. See remark 6 below. -c IDO = 99: done -c ------------------------------------------------------------- -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*B*x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c WHICH Character*2. (INPUT) -c Specify which of the Ritz values of OP to compute. -c -c 'LA' - compute the NEV largest (algebraic) eigenvalues. -c 'SA' - compute the NEV smallest (algebraic) eigenvalues. -c 'LM' - compute the NEV largest (in magnitude) eigenvalues. -c 'SM' - compute the NEV smallest (in magnitude) eigenvalues. -c 'BE' - compute NEV eigenvalues, half from each end of the -c spectrum. When NEV is odd, compute one more from the -c high end than from the low end. -c (see remark 1 below) -c -c NEV Integer. (INPUT) -c Number of eigenvalues of OP to be computed. 0 < NEV < N. -c -c TOL Real scalar. (INPUT) -c Stopping criterion: the relative accuracy of the Ritz value -c is considered acceptable if BOUNDS(I) .LE. TOL*ABS(RITZ(I)). -c If TOL .LE. 0. is passed a default is set: -c DEFAULT = SLAMCH('EPS') (machine precision as computed -c by the LAPACK auxiliary subroutine SLAMCH). -c -c RESID Real array of length N. (INPUT/OUTPUT) -c On INPUT: -c If INFO .EQ. 0, a random initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c On OUTPUT: -c RESID contains the final residual vector. -c -c NCV Integer. (INPUT) -c Number of columns of the matrix V (less than or equal to N). -c This will indicate how many Lanczos vectors are generated -c at each iteration. After the startup phase in which NEV -c Lanczos vectors are generated, the algorithm generates -c NCV-NEV Lanczos vectors at each subsequent update iteration. -c Most of the cost in generating each Lanczos vector is in the -c matrix-vector product OP*x. (See remark 4 below). -c -c V Real N by NCV array. (OUTPUT) -c The NCV columns of V contain the Lanczos basis vectors. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c IPARAM Integer array of length 11. (INPUT/OUTPUT) -c IPARAM(1) = ISHIFT: method for selecting the implicit shifts. -c The shifts selected at each iteration are used to restart -c the Arnoldi iteration in an implicit fashion. -c ------------------------------------------------------------- -c ISHIFT = 0: the shifts are provided by the user via -c reverse communication. The NCV eigenvalues of -c the current tridiagonal matrix T are returned in -c the part of WORKL array corresponding to RITZ. -c See remark 6 below. -c ISHIFT = 1: exact shifts with respect to the reduced -c tridiagonal matrix T. This is equivalent to -c restarting the iteration with a starting vector -c that is a linear combination of Ritz vectors -c associated with the "wanted" Ritz values. -c ------------------------------------------------------------- -c -c IPARAM(2) = LEVEC -c No longer referenced. See remark 2 below. -c -c IPARAM(3) = MXITER -c On INPUT: maximum number of Arnoldi update iterations allowed. -c On OUTPUT: actual number of Arnoldi update iterations taken. -c -c IPARAM(4) = NB: blocksize to be used in the recurrence. -c The code currently works only for NB = 1. -c -c IPARAM(5) = NCONV: number of "converged" Ritz values. -c This represents the number of Ritz values that satisfy -c the convergence criterion. -c -c IPARAM(6) = IUPD -c No longer referenced. Implicit restarting is ALWAYS used. -c -c IPARAM(7) = MODE -c On INPUT determines what type of eigenproblem is being solved. -c Must be 1,2,3,4,5; See under \Description of ssaupd for the -c five modes available. -c -c IPARAM(8) = NP -c When ido = 3 and the user provides shifts through reverse -c communication (IPARAM(1)=0), ssaupd returns NP, the number -c of shifts the user is to provide. 0 < NP <=NCV-NEV. See Remark -c 6 below. -c -c IPARAM(9) = NUMOP, IPARAM(10) = NUMOPB, IPARAM(11) = NUMREO, -c OUTPUT: NUMOP = total number of OP*x operations, -c NUMOPB = total number of B*x operations if BMAT='G', -c NUMREO = total number of steps of re-orthogonalization. -c -c IPNTR Integer array of length 11. (OUTPUT) -c Pointer to mark the starting locations in the WORKD and WORKL -c arrays for matrices/vectors used by the Lanczos iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X in WORKD. -c IPNTR(2): pointer to the current result vector Y in WORKD. -c IPNTR(3): pointer to the vector B * X in WORKD when used in -c the shift-and-invert mode. -c IPNTR(4): pointer to the next available location in WORKL -c that is untouched by the program. -c IPNTR(5): pointer to the NCV by 2 tridiagonal matrix T in WORKL. -c IPNTR(6): pointer to the NCV RITZ values array in WORKL. -c IPNTR(7): pointer to the Ritz estimates in array WORKL associated -c with the Ritz values located in RITZ in WORKL. -c IPNTR(11): pointer to the NP shifts in WORKL. See Remark 6 below. -c -c Note: IPNTR(8:10) is only referenced by sseupd. See Remark 2. -c IPNTR(8): pointer to the NCV RITZ values of the original system. -c IPNTR(9): pointer to the NCV corresponding error bounds. -c IPNTR(10): pointer to the NCV by NCV matrix of eigenvectors -c of the tridiagonal matrix T. Only referenced by -c sseupd if RVEC = .TRUE. See Remarks. -c ------------------------------------------------------------- -c -c WORKD Real work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration. Upon termination -c WORKD(1:N) contains B*RESID(1:N). If the Ritz vectors are desired -c subroutine sseupd uses this output. -c See Data Distribution Note below. -c -c WORKL Real work array of length LWORKL. (OUTPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. See Data Distribution Note below. -c -c LWORKL Integer. (INPUT) -c LWORKL must be at least NCV**2 + 8*NCV . -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal exit. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. IPARAM(5) -c returns the number of wanted converged Ritz values. -c = 2: No longer an informational error. Deprecated starting -c with release 2 of ARPACK. -c = 3: No shifts could be applied during a cycle of the -c Implicitly restarted Arnoldi iteration. One possibility -c is to increase the size of NCV relative to NEV. -c See remark 4 below. -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV must be greater than NEV and less than or equal to N. -c = -4: The maximum number of Arnoldi update iterations allowed -c must be greater than zero. -c = -5: WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'. -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work array WORKL is not sufficient. -c = -8: Error return from trid. eigenvalue calculation; -c Informatinal error from LAPACK routine ssteqr. -c = -9: Starting vector is zero. -c = -10: IPARAM(7) must be 1,2,3,4,5. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatable. -c = -12: IPARAM(1) must be equal to 0 or 1. -c = -13: NEV and WHICH = 'BE' are incompatable. -c = -9999: Could not build an Arnoldi factorization. -c IPARAM(5) returns the size of the current Arnoldi -c factorization. The user is advised to check that -c enough workspace and array storage has been allocated. -c -c -c\Remarks -c 1. The converged Ritz values are always returned in ascending -c algebraic order. The computed Ritz values are approximate -c eigenvalues of OP. The selection of WHICH should be made -c with this in mind when Mode = 3,4,5. After convergence, -c approximate eigenvalues of the original problem may be obtained -c with the ARPACK subroutine sseupd. -c -c 2. If the Ritz vectors corresponding to the converged Ritz values -c are needed, the user must call sseupd immediately following completion -c of ssaupd. This is new starting with version 2.1 of ARPACK. -c -c 3. If M can be factored into a Cholesky factorization M = LL` -c then Mode = 2 should not be selected. Instead one should use -c Mode = 1 with OP = inv(L)*A*inv(L`). Appropriate triangular -c linear systems should be solved with L and L` rather -c than computing inverses. After convergence, an approximate -c eigenvector z of the original problem is recovered by solving -c L`z = x where x is a Ritz vector of OP. -c -c 4. At present there is no a-priori analysis to guide the selection -c of NCV relative to NEV. The only formal requrement is that NCV > NEV. -c However, it is recommended that NCV .ge. 2*NEV. If many problems of -c the same type are to be solved, one should experiment with increasing -c NCV while keeping NEV fixed for a given test problem. This will -c usually decrease the required number of OP*x operations but it -c also increases the work and storage required to maintain the orthogonal -c basis vectors. The optimal "cross-over" with respect to CPU time -c is problem dependent and must be determined empirically. -c -c 5. If IPARAM(7) = 2 then in the Reverse commuication interface the user -c must do the following. When IDO = 1, Y = OP * X is to be computed. -c When IPARAM(7) = 2 OP = inv(B)*A. After computing A*X the user -c must overwrite X with A*X. Y is then the solution to the linear set -c of equations B*Y = A*X. -c -c 6. When IPARAM(1) = 0, and IDO = 3, the user needs to provide the -c NP = IPARAM(8) shifts in locations: -c 1 WORKL(IPNTR(11)) -c 2 WORKL(IPNTR(11)+1) -c . -c . -c . -c NP WORKL(IPNTR(11)+NP-1). -c -c The eigenvalues of the current tridiagonal matrix are located in -c WORKL(IPNTR(6)) through WORKL(IPNTR(6)+NCV-1). They are in the -c order defined by WHICH. The associated Ritz estimates are located in -c WORKL(IPNTR(8)), WORKL(IPNTR(8)+1), ... , WORKL(IPNTR(8)+NCV-1). -c -c----------------------------------------------------------------------- -c -c\Data Distribution Note: -c -c Fortran-D syntax: -c ================ -c REAL RESID(N), V(LDV,NCV), WORKD(3*N), WORKL(LWORKL) -c DECOMPOSE D1(N), D2(N,NCV) -c ALIGN RESID(I) with D1(I) -c ALIGN V(I,J) with D2(I,J) -c ALIGN WORKD(I) with D1(I) range (1:N) -c ALIGN WORKD(I) with D1(I-N) range (N+1:2*N) -c ALIGN WORKD(I) with D1(I-2*N) range (2*N+1:3*N) -c DISTRIBUTE D1(BLOCK), D2(BLOCK,:) -c REPLICATED WORKL(LWORKL) -c -c Cray MPP syntax: -c =============== -c REAL RESID(N), V(LDV,NCV), WORKD(N,3), WORKL(LWORKL) -c SHARED RESID(BLOCK), V(BLOCK,:), WORKD(BLOCK,:) -c REPLICATED WORKL(LWORKL) -c -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett, "The Symmetric Eigenvalue Problem". Prentice-Hall, -c 1980. -c 4. B.N. Parlett, B. Nour-Omid, "Towards a Black Box Lanczos Program", -c Computer Physics Communications, 53 (1989), pp 169-179. -c 5. B. Nour-Omid, B.N. Parlett, T. Ericson, P.S. Jensen, "How to -c Implement the Spectral Transformation", Math. Comp., 48 (1987), -c pp 663-673. -c 6. R.G. Grimes, J.G. Lewis and H.D. Simon, "A Shifted Block Lanczos -c Algorithm for Solving Sparse Symmetric Generalized Eigenproblems", -c SIAM J. Matr. Anal. Apps., January (1993). -c 7. L. Reichel, W.B. Gragg, "Algorithm 686: FORTRAN Subroutines -c for Updating the QR decomposition", ACM TOMS, December 1990, -c Volume 16 Number 4, pp 369-377. -c 8. R.B. Lehoucq, D.C. Sorensen, "Implementation of Some Spectral -c Transformations in a k-Step Arnoldi Method". In Preparation. -c -c\Routines called: -c ssaup2 ARPACK routine that implements the Implicitly Restarted -c Arnoldi Iteration. -c sstats ARPACK routine that initialize timing and other statistics -c variables. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c svout ARPACK utility routine that prints vectors. -c slamch LAPACK routine that determines machine constants. -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/15/93: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: saupd.F SID: 2.8 DATE OF SID: 04/10/01 RELEASE: 2 -c -c\Remarks -c 1. None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssaupd - & ( ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, - & ipntr, workd, workl, lworkl, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ldv, lworkl, n, ncv, nev - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(11) - Real - & resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0 , zero = 0.0E+0 ) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer bounds, ierr, ih, iq, ishift, iupd, iw, - & ldh, ldq, msglvl, mxiter, mode, nb, - & nev0, next, np, ritz, j - save bounds, ierr, ih, iq, ishift, iupd, iw, - & ldh, ldq, msglvl, mxiter, mode, nb, - & nev0, next, np, ritz -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external ssaup2, svout, ivout, second, sstats -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slamch - external slamch -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call sstats - call second (t0) - msglvl = msaupd -c - ierr = 0 - ishift = iparam(1) - mxiter = iparam(3) -c nb = iparam(4) - nb = 1 -c -c %--------------------------------------------% -c | Revision 2 performs only implicit restart. | -c %--------------------------------------------% -c - iupd = 1 - mode = iparam(7) -c -c %----------------% -c | Error checking | -c %----------------% -c - if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev .or. ncv .gt. n) then - ierr = -3 - end if -c -c %----------------------------------------------% -c | NP is the number of additional steps to | -c | extend the length NEV Lanczos factorization. | -c %----------------------------------------------% -c - np = ncv - nev -c - if (mxiter .le. 0) ierr = -4 - if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LA' .and. - & which .ne. 'SA' .and. - & which .ne. 'BE') ierr = -5 - if (bmat .ne. 'I' .and. bmat .ne. 'G') ierr = -6 -c - if (lworkl .lt. ncv**2 + 8*ncv) ierr = -7 - if (mode .lt. 1 .or. mode .gt. 5) then - ierr = -10 - else if (mode .eq. 1 .and. bmat .eq. 'G') then - ierr = -11 - else if (ishift .lt. 0 .or. ishift .gt. 1) then - ierr = -12 - else if (nev .eq. 1 .and. which .eq. 'BE') then - ierr = -13 - end if -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - ido = 99 - go to 9000 - end if -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - if (nb .le. 0) nb = 1 - if (tol .le. zero) tol = slamch('EpsMach') -c -c %----------------------------------------------% -c | NP is the number of additional steps to | -c | extend the length NEV Lanczos factorization. | -c | NEV0 is the local variable designating the | -c | size of the invariant subspace desired. | -c %----------------------------------------------% -c - np = ncv - nev - nev0 = nev -c -c %-----------------------------% -c | Zero out internal workspace | -c %-----------------------------% -c - do 10 j = 1, ncv**2 + 8*ncv - workl(j) = zero - 10 continue -c -c %-------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:2*ncv) := generated tridiagonal matrix | -c | workl(2*ncv+1:2*ncv+ncv) := ritz values | -c | workl(3*ncv+1:3*ncv+ncv) := computed error bounds | -c | workl(4*ncv+1:4*ncv+ncv*ncv) := rotation matrix Q | -c | workl(4*ncv+ncv*ncv+1:7*ncv+ncv*ncv) := workspace | -c %-------------------------------------------------------% -c - ldh = ncv - ldq = ncv - ih = 1 - ritz = ih + 2*ldh - bounds = ritz + ncv - iq = bounds + ncv - iw = iq + ncv**2 - next = iw + 3*ncv -c - ipntr(4) = next - ipntr(5) = ih - ipntr(6) = ritz - ipntr(7) = bounds - ipntr(11) = iw - end if -c -c %-------------------------------------------------------% -c | Carry out the Implicitly restarted Lanczos Iteration. | -c %-------------------------------------------------------% -c - call ssaup2 - & ( ido, bmat, n, which, nev0, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, workl(ih), ldh, workl(ritz), - & workl(bounds), workl(iq), ldq, workl(iw), ipntr, workd, - & info ) -c -c %--------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP or shifts. | -c %--------------------------------------------------% -c - if (ido .eq. 3) iparam(8) = np - if (ido .ne. 99) go to 9000 -c - iparam(3) = mxiter - iparam(5) = np - iparam(9) = nopx - iparam(10) = nbx - iparam(11) = nrorth -c -c %------------------------------------% -c | Exit if there was an informational | -c | error within ssaup2. | -c %------------------------------------% -c - if (info .lt. 0) go to 9000 - if (info .eq. 2) info = 3 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, mxiter, ndigit, - & '_saupd: number of update iterations taken') - call ivout (logfil, 1, np, ndigit, - & '_saupd: number of "converged" Ritz values') - call svout (logfil, np, workl(Ritz), ndigit, - & '_saupd: final Ritz values') - call svout (logfil, np, workl(Bounds), ndigit, - & '_saupd: corresponding error bounds') - end if -c - call second (t1) - tsaupd = t1 - t0 -c - if (msglvl .gt. 0) then -c -c %--------------------------------------------------------% -c | Version Number & Version Date are defined in version.h | -c %--------------------------------------------------------% -c - write (6,1000) - write (6,1100) mxiter, nopx, nbx, nrorth, nitref, nrstrt, - & tmvopx, tmvbx, tsaupd, tsaup2, tsaitr, titref, - & tgetv0, tseigt, tsgets, tsapps, tsconv - 1000 format (//, - & 5x, '==========================================',/ - & 5x, '= Symmetric implicit Arnoldi update code =',/ - & 5x, '= Version Number:', ' 2.4' , 19x, ' =',/ - & 5x, '= Version Date: ', ' 07/31/96' , 14x, ' =',/ - & 5x, '==========================================',/ - & 5x, '= Summary of timing statistics =',/ - & 5x, '==========================================',//) - 1100 format ( - & 5x, 'Total number update iterations = ', i5,/ - & 5x, 'Total number of OP*x operations = ', i5,/ - & 5x, 'Total number of B*x operations = ', i5,/ - & 5x, 'Total number of reorthogonalization steps = ', i5,/ - & 5x, 'Total number of iterative refinement steps = ', i5,/ - & 5x, 'Total number of restart steps = ', i5,/ - & 5x, 'Total time in user OP*x operation = ', f12.6,/ - & 5x, 'Total time in user B*x operation = ', f12.6,/ - & 5x, 'Total time in Arnoldi update routine = ', f12.6,/ - & 5x, 'Total time in saup2 routine = ', f12.6,/ - & 5x, 'Total time in basic Arnoldi iteration loop = ', f12.6,/ - & 5x, 'Total time in reorthogonalization phase = ', f12.6,/ - & 5x, 'Total time in (re)start vector generation = ', f12.6,/ - & 5x, 'Total time in trid eigenvalue subproblem = ', f12.6,/ - & 5x, 'Total time in getting the shifts = ', f12.6,/ - & 5x, 'Total time in applying the shifts = ', f12.6,/ - & 5x, 'Total time in convergence testing = ', f12.6) - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of ssaupd | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssconv.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssconv.f deleted file mode 100644 index 36fe836407..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssconv.f +++ /dev/null @@ -1,138 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssconv -c -c\Description: -c Convergence testing for the symmetric Arnoldi eigenvalue routine. -c -c\Usage: -c call ssconv -c ( N, RITZ, BOUNDS, TOL, NCONV ) -c -c\Arguments -c N Integer. (INPUT) -c Number of Ritz values to check for convergence. -c -c RITZ Real array of length N. (INPUT) -c The Ritz values to be checked for convergence. -c -c BOUNDS Real array of length N. (INPUT) -c Ritz estimates associated with the Ritz values in RITZ. -c -c TOL Real scalar. (INPUT) -c Desired relative accuracy for a Ritz value to be considered -c "converged". -c -c NCONV Integer scalar. (OUTPUT) -c Number of "converged" Ritz values. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Routines called: -c second ARPACK utility routine for timing. -c slamch LAPACK routine that determines machine constants. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: sconv.F SID: 2.4 DATE OF SID: 4/19/96 RELEASE: 2 -c -c\Remarks -c 1. Starting with version 2.4, this routine no longer uses the -c Parlett strategy using the gap conditions. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssconv (n, ritz, bounds, tol, nconv) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer n, nconv - Real - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & ritz(n), bounds(n) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i - Real - & temp, eps23 -c -c %-------------------% -c | External routines | -c %-------------------% -c - Real - & slamch - external slamch - -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic abs -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - call second (t0) -c - eps23 = slamch('Epsilon-Machine') - eps23 = eps23**(2.0E+0 / 3.0E+0) -c - nconv = 0 - do 10 i = 1, n -c -c %-----------------------------------------------------% -c | The i-th Ritz value is considered "converged" | -c | when: bounds(i) .le. TOL*max(eps23, abs(ritz(i))) | -c %-----------------------------------------------------% -c - temp = max( eps23, abs(ritz(i)) ) - if ( bounds(i) .le. tol*temp ) then - nconv = nconv + 1 - end if -c - 10 continue -c - call second (t1) - tsconv = tsconv + (t1 - t0) -c - return -c -c %---------------% -c | End of ssconv | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sseigt.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sseigt.f deleted file mode 100644 index 208b672109..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sseigt.f +++ /dev/null @@ -1,181 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: sseigt -c -c\Description: -c Compute the eigenvalues of the current symmetric tridiagonal matrix -c and the corresponding error bounds given the current residual norm. -c -c\Usage: -c call sseigt -c ( RNORM, N, H, LDH, EIG, BOUNDS, WORKL, IERR ) -c -c\Arguments -c RNORM Real scalar. (INPUT) -c RNORM contains the residual norm corresponding to the current -c symmetric tridiagonal matrix H. -c -c N Integer. (INPUT) -c Size of the symmetric tridiagonal matrix H. -c -c H Real N by 2 array. (INPUT) -c H contains the symmetric tridiagonal matrix with the -c subdiagonal in the first column starting at H(2,1) and the -c main diagonal in second column. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c EIG Real array of length N. (OUTPUT) -c On output, EIG contains the N eigenvalues of H possibly -c unsorted. The BOUNDS arrays are returned in the -c same sorted order as EIG. -c -c BOUNDS Real array of length N. (OUTPUT) -c On output, BOUNDS contains the error estimates corresponding -c to the eigenvalues EIG. This is equal to RNORM times the -c last components of the eigenvectors corresponding to the -c eigenvalues in EIG. -c -c WORKL Real work array of length 3*N. (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c IERR Integer. (OUTPUT) -c Error exit flag from sstqrb. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c sstqrb ARPACK routine that computes the eigenvalues and the -c last components of the eigenvectors of a symmetric -c and tridiagonal matrix. -c second ARPACK utility routine for timing. -c svout ARPACK utility routine that prints vectors. -c scopy Level 1 BLAS that copies one vector to another. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.4' -c -c\SCCS Information: @(#) -c FILE: seigt.F SID: 2.4 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine sseigt - & ( rnorm, n, h, ldh, eig, bounds, workl, ierr ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer ierr, ldh, n - Real - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & eig(n), bounds(n), h(ldh,2), workl(3*n) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & zero - parameter (zero = 0.0E+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, k, msglvl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external scopy, sstqrb, svout, second -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mseigt -c - if (msglvl .gt. 0) then - call svout (logfil, n, h(1,2), ndigit, - & '_seigt: main diagonal of matrix H') - if (n .gt. 1) then - call svout (logfil, n-1, h(2,1), ndigit, - & '_seigt: sub diagonal of matrix H') - end if - end if -c - call scopy (n, h(1,2), 1, eig, 1) - call scopy (n-1, h(2,1), 1, workl, 1) - call sstqrb (n, eig, workl, bounds, workl(n+1), ierr) - if (ierr .ne. 0) go to 9000 - if (msglvl .gt. 1) then - call svout (logfil, n, bounds, ndigit, - & '_seigt: last row of the eigenvector matrix for H') - end if -c -c %-----------------------------------------------% -c | Finally determine the error bounds associated | -c | with the n Ritz values of H. | -c %-----------------------------------------------% -c - do 30 k = 1, n - bounds(k) = rnorm*abs(bounds(k)) - 30 continue -c - call second (t1) - tseigt = tseigt + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of sseigt | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssesrt.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssesrt.f deleted file mode 100644 index 36e8787e1c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssesrt.f +++ /dev/null @@ -1,217 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssesrt -c -c\Description: -c Sort the array X in the order specified by WHICH and optionally -c apply the permutation to the columns of the matrix A. -c -c\Usage: -c call ssesrt -c ( WHICH, APPLY, N, X, NA, A, LDA) -c -c\Arguments -c WHICH Character*2. (Input) -c 'LM' -> X is sorted into increasing order of magnitude. -c 'SM' -> X is sorted into decreasing order of magnitude. -c 'LA' -> X is sorted into increasing order of algebraic. -c 'SA' -> X is sorted into decreasing order of algebraic. -c -c APPLY Logical. (Input) -c APPLY = .TRUE. -> apply the sorted order to A. -c APPLY = .FALSE. -> do not apply the sorted order to A. -c -c N Integer. (INPUT) -c Dimension of the array X. -c -c X Real array of length N. (INPUT/OUTPUT) -c The array to be sorted. -c -c NA Integer. (INPUT) -c Number of rows of the matrix A. -c -c A Real array of length NA by N. (INPUT/OUTPUT) -c -c LDA Integer. (INPUT) -c Leading dimension of A. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Routines -c sswap Level 1 BLAS that swaps the contents of two vectors. -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/15/93: Version ' 2.1'. -c Adapted from the sort routine in LANSO and -c the ARPACK code ssortr -c -c\SCCS Information: @(#) -c FILE: sesrt.F SID: 2.3 DATE OF SID: 4/19/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssesrt (which, apply, n, x, na, a, lda) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - logical apply - integer lda, n, na -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & x(0:n-1), a(lda, 0:n-1) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, igap, j - Real - & temp -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external sswap -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - igap = n / 2 -c - if (which .eq. 'SA') then -c -c X is sorted into decreasing order of algebraic. -c - 10 continue - if (igap .eq. 0) go to 9000 - do 30 i = igap, n-1 - j = i-igap - 20 continue -c - if (j.lt.0) go to 30 -c - if (x(j).lt.x(j+igap)) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp - if (apply) call sswap( na, a(1, j), 1, a(1,j+igap), 1) - else - go to 30 - endif - j = j-igap - go to 20 - 30 continue - igap = igap / 2 - go to 10 -c - else if (which .eq. 'SM') then -c -c X is sorted into decreasing order of magnitude. -c - 40 continue - if (igap .eq. 0) go to 9000 - do 60 i = igap, n-1 - j = i-igap - 50 continue -c - if (j.lt.0) go to 60 -c - if (abs(x(j)).lt.abs(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp - if (apply) call sswap( na, a(1, j), 1, a(1,j+igap), 1) - else - go to 60 - endif - j = j-igap - go to 50 - 60 continue - igap = igap / 2 - go to 40 -c - else if (which .eq. 'LA') then -c -c X is sorted into increasing order of algebraic. -c - 70 continue - if (igap .eq. 0) go to 9000 - do 90 i = igap, n-1 - j = i-igap - 80 continue -c - if (j.lt.0) go to 90 -c - if (x(j).gt.x(j+igap)) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp - if (apply) call sswap( na, a(1, j), 1, a(1,j+igap), 1) - else - go to 90 - endif - j = j-igap - go to 80 - 90 continue - igap = igap / 2 - go to 70 -c - else if (which .eq. 'LM') then -c -c X is sorted into increasing order of magnitude. -c - 100 continue - if (igap .eq. 0) go to 9000 - do 120 i = igap, n-1 - j = i-igap - 110 continue -c - if (j.lt.0) go to 120 -c - if (abs(x(j)).gt.abs(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp - if (apply) call sswap( na, a(1, j), 1, a(1,j+igap), 1) - else - go to 120 - endif - j = j-igap - go to 110 - 120 continue - igap = igap / 2 - go to 100 - end if -c - 9000 continue - return -c -c %---------------% -c | End of ssesrt | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sseupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sseupd.f deleted file mode 100644 index 91443d725f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sseupd.f +++ /dev/null @@ -1,857 +0,0 @@ -c\BeginDoc -c -c\Name: sseupd -c -c\Description: -c -c This subroutine returns the converged approximations to eigenvalues -c of A*z = lambda*B*z and (optionally): -c -c (1) the corresponding approximate eigenvectors, -c -c (2) an orthonormal (Lanczos) basis for the associated approximate -c invariant subspace, -c -c (3) Both. -c -c There is negligible additional cost to obtain eigenvectors. An orthonormal -c (Lanczos) basis is always computed. There is an additional storage cost -c of n*nev if both are requested (in this case a separate array Z must be -c supplied). -c -c These quantities are obtained from the Lanczos factorization computed -c by SSAUPD for the linear operator OP prescribed by the MODE selection -c (see IPARAM(7) in SSAUPD documentation.) SSAUPD must be called before -c this routine is called. These approximate eigenvalues and vectors are -c commonly called Ritz values and Ritz vectors respectively. They are -c referred to as such in the comments that follow. The computed orthonormal -c basis for the invariant subspace corresponding to these Ritz values is -c referred to as a Lanczos basis. -c -c See documentation in the header of the subroutine SSAUPD for a definition -c of OP as well as other terms and the relation of computed Ritz values -c and vectors of OP with respect to the given problem A*z = lambda*B*z. -c -c The approximate eigenvalues of the original problem are returned in -c ascending algebraic order. The user may elect to call this routine -c once for each desired Ritz vector and store it peripherally if desired. -c There is also the option of computing a selected set of these vectors -c with a single call. -c -c\Usage: -c call sseupd -c ( RVEC, HOWMNY, SELECT, D, Z, LDZ, SIGMA, BMAT, N, WHICH, NEV, TOL, -c RESID, NCV, V, LDV, IPARAM, IPNTR, WORKD, WORKL, LWORKL, INFO ) -c -c RVEC LOGICAL (INPUT) -c Specifies whether Ritz vectors corresponding to the Ritz value -c approximations to the eigenproblem A*z = lambda*B*z are computed. -c -c RVEC = .FALSE. Compute Ritz values only. -c -c RVEC = .TRUE. Compute Ritz vectors. -c -c HOWMNY Character*1 (INPUT) -c Specifies how many Ritz vectors are wanted and the form of Z -c the matrix of Ritz vectors. See remark 1 below. -c = 'A': compute NEV Ritz vectors; -c = 'S': compute some of the Ritz vectors, specified -c by the logical array SELECT. -c -c SELECT Logical array of dimension NCV. (INPUT/WORKSPACE) -c If HOWMNY = 'S', SELECT specifies the Ritz vectors to be -c computed. To select the Ritz vector corresponding to a -c Ritz value D(j), SELECT(j) must be set to .TRUE.. -c If HOWMNY = 'A' , SELECT is used as a workspace for -c reordering the Ritz values. -c -c D Real array of dimension NEV. (OUTPUT) -c On exit, D contains the Ritz value approximations to the -c eigenvalues of A*z = lambda*B*z. The values are returned -c in ascending order. If IPARAM(7) = 3,4,5 then D represents -c the Ritz values of OP computed by ssaupd transformed to -c those of the original eigensystem A*z = lambda*B*z. If -c IPARAM(7) = 1,2 then the Ritz values of OP are the same -c as the those of A*z = lambda*B*z. -c -c Z Real N by NEV array if HOWMNY = 'A'. (OUTPUT) -c On exit, Z contains the B-orthonormal Ritz vectors of the -c eigensystem A*z = lambda*B*z corresponding to the Ritz -c value approximations. -c If RVEC = .FALSE. then Z is not referenced. -c NOTE: The array Z may be set equal to first NEV columns of the -c Arnoldi/Lanczos basis array V computed by SSAUPD. -c -c LDZ Integer. (INPUT) -c The leading dimension of the array Z. If Ritz vectors are -c desired, then LDZ .ge. max( 1, N ). In any case, LDZ .ge. 1. -c -c SIGMA Real (INPUT) -c If IPARAM(7) = 3,4,5 represents the shift. Not referenced if -c IPARAM(7) = 1 or 2. -c -c -c **** The remaining arguments MUST be the same as for the **** -c **** call to SSAUPD that was just completed. **** -c -c NOTE: The remaining arguments -c -c BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, -c WORKD, WORKL, LWORKL, INFO -c -c must be passed directly to SSEUPD following the last call -c to SSAUPD. These arguments MUST NOT BE MODIFIED between -c the the last call to SSAUPD and the call to SSEUPD. -c -c Two of these parameters (WORKL, INFO) are also output parameters: -c -c WORKL Real work array of length LWORKL. (OUTPUT/WORKSPACE) -c WORKL(1:4*ncv) contains information obtained in -c ssaupd. They are not changed by sseupd. -c WORKL(4*ncv+1:ncv*ncv+8*ncv) holds the -c untransformed Ritz values, the computed error estimates, -c and the associated eigenvector matrix of H. -c -c Note: IPNTR(8:10) contains the pointer into WORKL for addresses -c of the above information computed by sseupd. -c ------------------------------------------------------------- -c IPNTR(8): pointer to the NCV RITZ values of the original system. -c IPNTR(9): pointer to the NCV corresponding error bounds. -c IPNTR(10): pointer to the NCV by NCV matrix of eigenvectors -c of the tridiagonal matrix T. Only referenced by -c sseupd if RVEC = .TRUE. See Remarks. -c ------------------------------------------------------------- -c -c INFO Integer. (OUTPUT) -c Error flag on output. -c = 0: Normal exit. -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV must be greater than NEV and less than or equal to N. -c = -5: WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'. -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work WORKL array is not sufficient. -c = -8: Error return from trid. eigenvalue calculation; -c Information error from LAPACK routine ssteqr. -c = -9: Starting vector is zero. -c = -10: IPARAM(7) must be 1,2,3,4,5. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatible. -c = -12: NEV and WHICH = 'BE' are incompatible. -c = -14: SSAUPD did not find any eigenvalues to sufficient -c accuracy. -c = -15: HOWMNY must be one of 'A' or 'S' if RVEC = .true. -c = -16: HOWMNY = 'S' not yet implemented -c = -17: SSEUPD got a different count of the number of converged -c Ritz values than SSAUPD got. This indicates the user -c probably made an error in passing data from SSAUPD to -c SSEUPD or that the data was modified before entering -c SSEUPD. -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett, "The Symmetric Eigenvalue Problem". Prentice-Hall, -c 1980. -c 4. B.N. Parlett, B. Nour-Omid, "Towards a Black Box Lanczos Program", -c Computer Physics Communications, 53 (1989), pp 169-179. -c 5. B. Nour-Omid, B.N. Parlett, T. Ericson, P.S. Jensen, "How to -c Implement the Spectral Transformation", Math. Comp., 48 (1987), -c pp 663-673. -c 6. R.G. Grimes, J.G. Lewis and H.D. Simon, "A Shifted Block Lanczos -c Algorithm for Solving Sparse Symmetric Generalized Eigenproblems", -c SIAM J. Matr. Anal. Apps., January (1993). -c 7. L. Reichel, W.B. Gragg, "Algorithm 686: FORTRAN Subroutines -c for Updating the QR decomposition", ACM TOMS, December 1990, -c Volume 16 Number 4, pp 369-377. -c -c\Remarks -c 1. The converged Ritz values are always returned in increasing -c (algebraic) order. -c -c 2. Currently only HOWMNY = 'A' is implemented. It is included at this -c stage for the user who wants to incorporate it. -c -c\Routines called: -c ssesrt ARPACK routine that sorts an array X, and applies the -c corresponding permutation to a matrix A. -c ssortr ssortr ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c svout ARPACK utility routine that prints vectors. -c sgeqr2 LAPACK routine that computes the QR factorization of -c a matrix. -c slacpy LAPACK matrix copy routine. -c slamch LAPACK routine that determines machine constants. -c sorm2r LAPACK routine that applies an orthogonal matrix in -c factored form. -c ssteqr LAPACK routine that computes eigenvalues and eigenvectors -c of a tridiagonal matrix. -c sger Level 2 BLAS rank one update to a matrix. -c scopy Level 1 BLAS that copies one vector to another . -c snrm2 Level 1 BLAS that computes the norm of a vector. -c sscal Level 1 BLAS that scales a vector. -c sswap Level 1 BLAS that swaps the contents of two vectors. - -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Chao Yang Houston, Texas -c Dept. of Computational & -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/15/93: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: seupd.F SID: 2.11 DATE OF SID: 04/10/01 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- - subroutine sseupd(rvec , howmny, select, d , - & z , ldz , sigma , bmat , - & n , which , nev , tol , - & resid , ncv , v , ldv , - & iparam, ipntr , workd , workl, - & lworkl, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat, howmny, which*2 - logical rvec - integer info, ldz, ldv, lworkl, n, ncv, nev - Real - & sigma, tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(7), ipntr(11) - logical select(ncv) - Real - & d(nev) , resid(n) , v(ldv,ncv), - & z(ldz, nev), workd(2*n), workl(lworkl) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0 , zero = 0.0E+0 ) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character type*6 - integer bounds , ierr , ih , ihb , ihd , - & iq , iw , j , k , ldh , - & ldq , mode , msglvl, nconv , next , - & ritz , irz , ibd , np , ishift, - & leftptr, rghtptr, numcnv, jj - Real - & bnorm2 , rnorm, temp, temp1, eps23 - logical reord -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external scopy , sger , sgeqr2, slacpy, sorm2r, sscal, - & ssesrt, ssteqr, sswap , svout , ivout , ssortr -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & snrm2, slamch - external snrm2, slamch -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic min -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - msglvl = mseupd - mode = iparam(7) - nconv = iparam(5) - info = 0 -c -c %--------------% -c | Quick return | -c %--------------% -c - if (nconv .eq. 0) go to 9000 - ierr = 0 -c - if (nconv .le. 0) ierr = -14 - if (n .le. 0) ierr = -1 - if (nev .le. 0) ierr = -2 - if (ncv .le. nev .or. ncv .gt. n) ierr = -3 - if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LA' .and. - & which .ne. 'SA' .and. - & which .ne. 'BE') ierr = -5 - if (bmat .ne. 'I' .and. bmat .ne. 'G') ierr = -6 - if ( (howmny .ne. 'A' .and. - & howmny .ne. 'P' .and. - & howmny .ne. 'S') .and. rvec ) - & ierr = -15 - if (rvec .and. howmny .eq. 'S') ierr = -16 -c - if (rvec .and. lworkl .lt. ncv**2+8*ncv) ierr = -7 -c - if (mode .eq. 1 .or. mode .eq. 2) then - type = 'REGULR' - else if (mode .eq. 3 ) then - type = 'SHIFTI' - else if (mode .eq. 4 ) then - type = 'BUCKLE' - else if (mode .eq. 5 ) then - type = 'CAYLEY' - else - ierr = -10 - end if - if (mode .eq. 1 .and. bmat .eq. 'G') ierr = -11 - if (nev .eq. 1 .and. which .eq. 'BE') ierr = -12 -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - go to 9000 - end if -c -c %-------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:2*ncv) := generated tridiagonal matrix H | -c | The subdiagonal is stored in workl(2:ncv). | -c | The dead spot is workl(1) but upon exiting | -c | ssaupd stores the B-norm of the last residual | -c | vector in workl(1). We use this !!! | -c | workl(2*ncv+1:2*ncv+ncv) := ritz values | -c | The wanted values are in the first NCONV spots. | -c | workl(3*ncv+1:3*ncv+ncv) := computed Ritz estimates | -c | The wanted values are in the first NCONV spots. | -c | NOTE: workl(1:4*ncv) is set by ssaupd and is not | -c | modified by sseupd. | -c %-------------------------------------------------------% -c -c %-------------------------------------------------------% -c | The following is used and set by sseupd. | -c | workl(4*ncv+1:4*ncv+ncv) := used as workspace during | -c | computation of the eigenvectors of H. Stores | -c | the diagonal of H. Upon EXIT contains the NCV | -c | Ritz values of the original system. The first | -c | NCONV spots have the wanted values. If MODE = | -c | 1 or 2 then will equal workl(2*ncv+1:3*ncv). | -c | workl(5*ncv+1:5*ncv+ncv) := used as workspace during | -c | computation of the eigenvectors of H. Stores | -c | the subdiagonal of H. Upon EXIT contains the | -c | NCV corresponding Ritz estimates of the | -c | original system. The first NCONV spots have the | -c | wanted values. If MODE = 1,2 then will equal | -c | workl(3*ncv+1:4*ncv). | -c | workl(6*ncv+1:6*ncv+ncv*ncv) := orthogonal Q that is | -c | the eigenvector matrix for H as returned by | -c | ssteqr. Not referenced if RVEC = .False. | -c | Ordering follows that of workl(4*ncv+1:5*ncv) | -c | workl(6*ncv+ncv*ncv+1:6*ncv+ncv*ncv+2*ncv) := | -c | Workspace. Needed by ssteqr and by sseupd. | -c | GRAND total of NCV*(NCV+8) locations. | -c %-------------------------------------------------------% -c -c - ih = ipntr(5) - ritz = ipntr(6) - bounds = ipntr(7) - ldh = ncv - ldq = ncv - ihd = bounds + ldh - ihb = ihd + ldh - iq = ihb + ldh - iw = iq + ldh*ncv - next = iw + 2*ncv - ipntr(4) = next - ipntr(8) = ihd - ipntr(9) = ihb - ipntr(10) = iq -c -c %----------------------------------------% -c | irz points to the Ritz values computed | -c | by _seigt before exiting _saup2. | -c | ibd points to the Ritz estimates | -c | computed by _seigt before exiting | -c | _saup2. | -c %----------------------------------------% -c - irz = ipntr(11)+ncv - ibd = irz+ncv -c -c -c %---------------------------------% -c | Set machine dependent constant. | -c %---------------------------------% -c - eps23 = slamch('Epsilon-Machine') - eps23 = eps23**(2.0E+0 / 3.0E+0 ) -c -c %---------------------------------------% -c | RNORM is B-norm of the RESID(1:N). | -c | BNORM2 is the 2 norm of B*RESID(1:N). | -c | Upon exit of ssaupd WORKD(1:N) has | -c | B*RESID(1:N). | -c %---------------------------------------% -c - rnorm = workl(ih) - if (bmat .eq. 'I') then - bnorm2 = rnorm - else if (bmat .eq. 'G') then - bnorm2 = snrm2(n, workd, 1) - end if -c - if (msglvl .gt. 2) then - call svout(logfil, ncv, workl(irz), ndigit, - & '_seupd: Ritz values passed in from _SAUPD.') - call svout(logfil, ncv, workl(ibd), ndigit, - & '_seupd: Ritz estimates passed in from _SAUPD.') - end if -c - if (rvec) then -c - reord = .false. -c -c %---------------------------------------------------% -c | Use the temporary bounds array to store indices | -c | These will be used to mark the select array later | -c %---------------------------------------------------% -c - do 10 j = 1,ncv - workl(bounds+j-1) = j - select(j) = .false. - 10 continue -c -c %-------------------------------------% -c | Select the wanted Ritz values. | -c | Sort the Ritz values so that the | -c | wanted ones appear at the tailing | -c | NEV positions of workl(irr) and | -c | workl(iri). Move the corresponding | -c | error estimates in workl(bound) | -c | accordingly. | -c %-------------------------------------% -c - np = ncv - nev - ishift = 0 - call ssgets(ishift, which , nev , - & np , workl(irz) , workl(bounds), - & workl) -c - if (msglvl .gt. 2) then - call svout(logfil, ncv, workl(irz), ndigit, - & '_seupd: Ritz values after calling _SGETS.') - call svout(logfil, ncv, workl(bounds), ndigit, - & '_seupd: Ritz value indices after calling _SGETS.') - end if -c -c %-----------------------------------------------------% -c | Record indices of the converged wanted Ritz values | -c | Mark the select array for possible reordering | -c %-----------------------------------------------------% -c - numcnv = 0 - do 11 j = 1,ncv - temp1 = max(eps23, abs(workl(irz+ncv-j)) ) - jj = workl(bounds + ncv - j) - if (numcnv .lt. nconv .and. - & workl(ibd+jj-1) .le. tol*temp1) then - select(jj) = .true. - numcnv = numcnv + 1 - if (jj .gt. nev) reord = .true. - endif - 11 continue -c -c %-----------------------------------------------------------% -c | Check the count (numcnv) of converged Ritz values with | -c | the number (nconv) reported by _saupd. If these two | -c | are different then there has probably been an error | -c | caused by incorrect passing of the _saupd data. | -c %-----------------------------------------------------------% -c - if (msglvl .gt. 2) then - call ivout(logfil, 1, numcnv, ndigit, - & '_seupd: Number of specified eigenvalues') - call ivout(logfil, 1, nconv, ndigit, - & '_seupd: Number of "converged" eigenvalues') - end if -c - if (numcnv .ne. nconv) then - info = -17 - go to 9000 - end if -c -c %-----------------------------------------------------------% -c | Call LAPACK routine _steqr to compute the eigenvalues and | -c | eigenvectors of the final symmetric tridiagonal matrix H. | -c | Initialize the eigenvector matrix Q to the identity. | -c %-----------------------------------------------------------% -c - call scopy(ncv-1, workl(ih+1), 1, workl(ihb), 1) - call scopy(ncv, workl(ih+ldh), 1, workl(ihd), 1) -c - call ssteqr('Identity', ncv, workl(ihd), workl(ihb), - & workl(iq) , ldq, workl(iw), ierr) -c - if (ierr .ne. 0) then - info = -8 - go to 9000 - end if -c - if (msglvl .gt. 1) then - call scopy(ncv, workl(iq+ncv-1), ldq, workl(iw), 1) - call svout(logfil, ncv, workl(ihd), ndigit, - & '_seupd: NCV Ritz values of the final H matrix') - call svout(logfil, ncv, workl(iw), ndigit, - & '_seupd: last row of the eigenvector matrix for H') - end if -c - if (reord) then -c -c %---------------------------------------------% -c | Reordered the eigenvalues and eigenvectors | -c | computed by _steqr so that the "converged" | -c | eigenvalues appear in the first NCONV | -c | positions of workl(ihd), and the associated | -c | eigenvectors appear in the first NCONV | -c | columns. | -c %---------------------------------------------% -c - leftptr = 1 - rghtptr = ncv -c - if (ncv .eq. 1) go to 30 -c - 20 if (select(leftptr)) then -c -c %-------------------------------------------% -c | Search, from the left, for the first Ritz | -c | value that has not converged. | -c %-------------------------------------------% -c - leftptr = leftptr + 1 -c - else if ( .not. select(rghtptr)) then -c -c %----------------------------------------------% -c | Search, from the right, the first Ritz value | -c | that has converged. | -c %----------------------------------------------% -c - rghtptr = rghtptr - 1 -c - else -c -c %----------------------------------------------% -c | Swap the Ritz value on the left that has not | -c | converged with the Ritz value on the right | -c | that has converged. Swap the associated | -c | eigenvector of the tridiagonal matrix H as | -c | well. | -c %----------------------------------------------% -c - temp = workl(ihd+leftptr-1) - workl(ihd+leftptr-1) = workl(ihd+rghtptr-1) - workl(ihd+rghtptr-1) = temp - call scopy(ncv, workl(iq+ncv*(leftptr-1)), 1, - & workl(iw), 1) - call scopy(ncv, workl(iq+ncv*(rghtptr-1)), 1, - & workl(iq+ncv*(leftptr-1)), 1) - call scopy(ncv, workl(iw), 1, - & workl(iq+ncv*(rghtptr-1)), 1) - leftptr = leftptr + 1 - rghtptr = rghtptr - 1 -c - end if -c - if (leftptr .lt. rghtptr) go to 20 -c - 30 end if -c - if (msglvl .gt. 2) then - call svout (logfil, ncv, workl(ihd), ndigit, - & '_seupd: The eigenvalues of H--reordered') - end if -c -c %----------------------------------------% -c | Load the converged Ritz values into D. | -c %----------------------------------------% -c - call scopy(nconv, workl(ihd), 1, d, 1) -c - else -c -c %-----------------------------------------------------% -c | Ritz vectors not required. Load Ritz values into D. | -c %-----------------------------------------------------% -c - call scopy(nconv, workl(ritz), 1, d, 1) - call scopy(ncv, workl(ritz), 1, workl(ihd), 1) -c - end if -c -c %------------------------------------------------------------------% -c | Transform the Ritz values and possibly vectors and corresponding | -c | Ritz estimates of OP to those of A*x=lambda*B*x. The Ritz values | -c | (and corresponding data) are returned in ascending order. | -c %------------------------------------------------------------------% -c - if (type .eq. 'REGULR') then -c -c %---------------------------------------------------------% -c | Ascending sort of wanted Ritz values, vectors and error | -c | bounds. Not necessary if only Ritz values are desired. | -c %---------------------------------------------------------% -c - if (rvec) then - call ssesrt('LA', rvec , nconv, d, ncv, workl(iq), ldq) - else - call scopy(ncv, workl(bounds), 1, workl(ihb), 1) - end if -c - else -c -c %-------------------------------------------------------------% -c | * Make a copy of all the Ritz values. | -c | * Transform the Ritz values back to the original system. | -c | For TYPE = 'SHIFTI' the transformation is | -c | lambda = 1/theta + sigma | -c | For TYPE = 'BUCKLE' the transformation is | -c | lambda = sigma * theta / ( theta - 1 ) | -c | For TYPE = 'CAYLEY' the transformation is | -c | lambda = sigma * (theta + 1) / (theta - 1 ) | -c | where the theta are the Ritz values returned by ssaupd. | -c | NOTES: | -c | *The Ritz vectors are not affected by the transformation. | -c | They are only reordered. | -c %-------------------------------------------------------------% -c - call scopy (ncv, workl(ihd), 1, workl(iw), 1) - if (type .eq. 'SHIFTI') then - do 40 k=1, ncv - workl(ihd+k-1) = one / workl(ihd+k-1) + sigma - 40 continue - else if (type .eq. 'BUCKLE') then - do 50 k=1, ncv - workl(ihd+k-1) = sigma * workl(ihd+k-1) / - & (workl(ihd+k-1) - one) - 50 continue - else if (type .eq. 'CAYLEY') then - do 60 k=1, ncv - workl(ihd+k-1) = sigma * (workl(ihd+k-1) + one) / - & (workl(ihd+k-1) - one) - 60 continue - end if -c -c %-------------------------------------------------------------% -c | * Store the wanted NCONV lambda values into D. | -c | * Sort the NCONV wanted lambda in WORKL(IHD:IHD+NCONV-1) | -c | into ascending order and apply sort to the NCONV theta | -c | values in the transformed system. We will need this to | -c | compute Ritz estimates in the original system. | -c | * Finally sort the lambda`s into ascending order and apply | -c | to Ritz vectors if wanted. Else just sort lambda`s into | -c | ascending order. | -c | NOTES: | -c | *workl(iw:iw+ncv-1) contain the theta ordered so that they | -c | match the ordering of the lambda. We`ll use them again for | -c | Ritz vector purification. | -c %-------------------------------------------------------------% -c - call scopy(nconv, workl(ihd), 1, d, 1) - call ssortr('LA', .true., nconv, workl(ihd), workl(iw)) - if (rvec) then - call ssesrt('LA', rvec , nconv, d, ncv, workl(iq), ldq) - else - call scopy(ncv, workl(bounds), 1, workl(ihb), 1) - call sscal(ncv, bnorm2/rnorm, workl(ihb), 1) - call ssortr('LA', .true., nconv, d, workl(ihb)) - end if -c - end if -c -c %------------------------------------------------% -c | Compute the Ritz vectors. Transform the wanted | -c | eigenvectors of the symmetric tridiagonal H by | -c | the Lanczos basis matrix V. | -c %------------------------------------------------% -c - if (rvec .and. howmny .eq. 'A') then -c -c %----------------------------------------------------------% -c | Compute the QR factorization of the matrix representing | -c | the wanted invariant subspace located in the first NCONV | -c | columns of workl(iq,ldq). | -c %----------------------------------------------------------% -c - call sgeqr2(ncv, nconv , workl(iq) , - & ldq, workl(iw+ncv), workl(ihb), - & ierr) -c -c %--------------------------------------------------------% -c | * Postmultiply V by Q. | -c | * Copy the first NCONV columns of VQ into Z. | -c | The N by NCONV matrix Z is now a matrix representation | -c | of the approximate invariant subspace associated with | -c | the Ritz values in workl(ihd). | -c %--------------------------------------------------------% -c - call sorm2r('Right', 'Notranspose', n , - & ncv , nconv , workl(iq), - & ldq , workl(iw+ncv), v , - & ldv , workd(n+1) , ierr) - call slacpy('All', n, nconv, v, ldv, z, ldz) -c -c %-----------------------------------------------------% -c | In order to compute the Ritz estimates for the Ritz | -c | values in both systems, need the last row of the | -c | eigenvector matrix. Remember, it`s in factored form | -c %-----------------------------------------------------% -c - do 65 j = 1, ncv-1 - workl(ihb+j-1) = zero - 65 continue - workl(ihb+ncv-1) = one - call sorm2r('Left', 'Transpose' , ncv , - & 1 , nconv , workl(iq) , - & ldq , workl(iw+ncv), workl(ihb), - & ncv , temp , ierr) -c - else if (rvec .and. howmny .eq. 'S') then -c -c Not yet implemented. See remark 2 above. -c - end if -c - if (type .eq. 'REGULR' .and. rvec) then -c - do 70 j=1, ncv - workl(ihb+j-1) = rnorm * abs( workl(ihb+j-1) ) - 70 continue -c - else if (type .ne. 'REGULR' .and. rvec) then -c -c %-------------------------------------------------% -c | * Determine Ritz estimates of the theta. | -c | If RVEC = .true. then compute Ritz estimates | -c | of the theta. | -c | If RVEC = .false. then copy Ritz estimates | -c | as computed by ssaupd. | -c | * Determine Ritz estimates of the lambda. | -c %-------------------------------------------------% -c - call sscal (ncv, bnorm2, workl(ihb), 1) - if (type .eq. 'SHIFTI') then -c - do 80 k=1, ncv - workl(ihb+k-1) = abs( workl(ihb+k-1) ) - & / workl(iw+k-1)**2 - 80 continue -c - else if (type .eq. 'BUCKLE') then -c - do 90 k=1, ncv - workl(ihb+k-1) = sigma * abs( workl(ihb+k-1) ) - & / (workl(iw+k-1)-one )**2 - 90 continue -c - else if (type .eq. 'CAYLEY') then -c - do 100 k=1, ncv - workl(ihb+k-1) = abs( workl(ihb+k-1) - & / workl(iw+k-1)*(workl(iw+k-1)-one) ) - 100 continue -c - end if -c - end if -c - if (type .ne. 'REGULR' .and. msglvl .gt. 1) then - call svout(logfil, nconv, d, ndigit, - & '_seupd: Untransformed converged Ritz values') - call svout(logfil, nconv, workl(ihb), ndigit, - & '_seupd: Ritz estimates of the untransformed Ritz values') - else if (msglvl .gt. 1) then - call svout(logfil, nconv, d, ndigit, - & '_seupd: Converged Ritz values') - call svout(logfil, nconv, workl(ihb), ndigit, - & '_seupd: Associated Ritz estimates') - end if -c -c %-------------------------------------------------% -c | Ritz vector purification step. Formally perform | -c | one of inverse subspace iteration. Only used | -c | for MODE = 3,4,5. See reference 7 | -c %-------------------------------------------------% -c - if (rvec .and. (type .eq. 'SHIFTI' .or. type .eq. 'CAYLEY')) then -c - do 110 k=0, nconv-1 - workl(iw+k) = workl(iq+k*ldq+ncv-1) - & / workl(iw+k) - 110 continue -c - else if (rvec .and. type .eq. 'BUCKLE') then -c - do 120 k=0, nconv-1 - workl(iw+k) = workl(iq+k*ldq+ncv-1) - & / (workl(iw+k)-one) - 120 continue -c - end if -c - if (type .ne. 'REGULR') - & call sger (n, nconv, one, resid, 1, workl(iw), 1, z, ldz) -c - 9000 continue -c - return -c -c %---------------% -c | End of sseupd| -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssgets.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssgets.f deleted file mode 100644 index 8abb6ffaae..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssgets.f +++ /dev/null @@ -1,219 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssgets -c -c\Description: -c Given the eigenvalues of the symmetric tridiagonal matrix H, -c computes the NP shifts AMU that are zeros of the polynomial of -c degree NP which filters out components of the unwanted eigenvectors -c corresponding to the AMU's based on some given criteria. -c -c NOTE: This is called even in the case of user specified shifts in -c order to sort the eigenvalues, and error bounds of H for later use. -c -c\Usage: -c call ssgets -c ( ISHIFT, WHICH, KEV, NP, RITZ, BOUNDS, SHIFTS ) -c -c\Arguments -c ISHIFT Integer. (INPUT) -c Method for selecting the implicit shifts at each iteration. -c ISHIFT = 0: user specified shifts -c ISHIFT = 1: exact shift with respect to the matrix H. -c -c WHICH Character*2. (INPUT) -c Shift selection criteria. -c 'LM' -> KEV eigenvalues of largest magnitude are retained. -c 'SM' -> KEV eigenvalues of smallest magnitude are retained. -c 'LA' -> KEV eigenvalues of largest value are retained. -c 'SA' -> KEV eigenvalues of smallest value are retained. -c 'BE' -> KEV eigenvalues, half from each end of the spectrum. -c If KEV is odd, compute one more from the high end. -c -c KEV Integer. (INPUT) -c KEV+NP is the size of the matrix H. -c -c NP Integer. (INPUT) -c Number of implicit shifts to be computed. -c -c RITZ Real array of length KEV+NP. (INPUT/OUTPUT) -c On INPUT, RITZ contains the eigenvalues of H. -c On OUTPUT, RITZ are sorted so that the unwanted eigenvalues -c are in the first NP locations and the wanted part is in -c the last KEV locations. When exact shifts are selected, the -c unwanted part corresponds to the shifts to be applied. -c -c BOUNDS Real array of length KEV+NP. (INPUT/OUTPUT) -c Error bounds corresponding to the ordering in RITZ. -c -c SHIFTS Real array of length NP. (INPUT/OUTPUT) -c On INPUT: contains the user specified shifts if ISHIFT = 0. -c On OUTPUT: contains the shifts sorted into decreasing order -c of magnitude with respect to the Ritz estimates contained in -c BOUNDS. If ISHIFT = 0, SHIFTS is not modified on exit. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c ssortr ARPACK utility sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c svout ARPACK utility routine that prints vectors. -c scopy Level 1 BLAS that copies one vector to another. -c sswap Level 1 BLAS that swaps the contents of two vectors. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/93: Version ' 2.1' -c -c\SCCS Information: @(#) -c FILE: sgets.F SID: 2.4 DATE OF SID: 4/19/96 RELEASE: 2 -c -c\Remarks -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssgets ( ishift, which, kev, np, ritz, bounds, shifts ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - integer ishift, kev, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & bounds(kev+np), ritz(kev+np), shifts(np) -c -c %------------% -c | Parameters | -c %------------% -c - Real - & one, zero - parameter (one = 1.0E+0, zero = 0.0E+0) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer kevd2, msglvl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external sswap, scopy, ssortr, second -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic max, min -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = msgets -c - if (which .eq. 'BE') then -c -c %-----------------------------------------------------% -c | Both ends of the spectrum are requested. | -c | Sort the eigenvalues into algebraically increasing | -c | order first then swap high end of the spectrum next | -c | to low end in appropriate locations. | -c | NOTE: when np < floor(kev/2) be careful not to swap | -c | overlapping locations. | -c %-----------------------------------------------------% -c - call ssortr ('LA', .true., kev+np, ritz, bounds) - kevd2 = kev / 2 - if ( kev .gt. 1 ) then - call sswap ( min(kevd2,np), ritz, 1, - & ritz( max(kevd2,np)+1 ), 1) - call sswap ( min(kevd2,np), bounds, 1, - & bounds( max(kevd2,np)+1 ), 1) - end if -c - else -c -c %----------------------------------------------------% -c | LM, SM, LA, SA case. | -c | Sort the eigenvalues of H into the desired order | -c | and apply the resulting order to BOUNDS. | -c | The eigenvalues are sorted so that the wanted part | -c | are always in the last KEV locations. | -c %----------------------------------------------------% -c - call ssortr (which, .true., kev+np, ritz, bounds) - end if -c - if (ishift .eq. 1 .and. np .gt. 0) then -c -c %-------------------------------------------------------% -c | Sort the unwanted Ritz values used as shifts so that | -c | the ones with largest Ritz estimates are first. | -c | This will tend to minimize the effects of the | -c | forward instability of the iteration when the shifts | -c | are applied in subroutine ssapps. | -c %-------------------------------------------------------% -c - call ssortr ('SM', .true., np, bounds, ritz) - call scopy (np, ritz, 1, shifts, 1) - end if -c - call second (t1) - tsgets = tsgets + (t1 - t0) -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, kev, ndigit, '_sgets: KEV is') - call ivout (logfil, 1, np, ndigit, '_sgets: NP is') - call svout (logfil, kev+np, ritz, ndigit, - & '_sgets: Eigenvalues of current H matrix') - call svout (logfil, kev+np, bounds, ndigit, - & '_sgets: Associated Ritz estimates') - end if -c - return -c -c %---------------% -c | End of ssgets | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssortc.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssortc.f deleted file mode 100644 index dba628ff92..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssortc.f +++ /dev/null @@ -1,344 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssortc -c -c\Description: -c Sorts the complex array in XREAL and XIMAG into the order -c specified by WHICH and optionally applies the permutation to the -c real array Y. It is assumed that if an element of XIMAG is -c nonzero, then its negative is also an element. In other words, -c both members of a complex conjugate pair are to be sorted and the -c pairs are kept adjacent to each other. -c -c\Usage: -c call ssortc -c ( WHICH, APPLY, N, XREAL, XIMAG, Y ) -c -c\Arguments -c WHICH Character*2. (Input) -c 'LM' -> sort XREAL,XIMAG into increasing order of magnitude. -c 'SM' -> sort XREAL,XIMAG into decreasing order of magnitude. -c 'LR' -> sort XREAL into increasing order of algebraic. -c 'SR' -> sort XREAL into decreasing order of algebraic. -c 'LI' -> sort XIMAG into increasing order of magnitude. -c 'SI' -> sort XIMAG into decreasing order of magnitude. -c NOTE: If an element of XIMAG is non-zero, then its negative -c is also an element. -c -c APPLY Logical. (Input) -c APPLY = .TRUE. -> apply the sorted order to array Y. -c APPLY = .FALSE. -> do not apply the sorted order to array Y. -c -c N Integer. (INPUT) -c Size of the arrays. -c -c XREAL, Real array of length N. (INPUT/OUTPUT) -c XIMAG Real and imaginary part of the array to be sorted. -c -c Y Real array of length N. (INPUT/OUTPUT) -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c xx/xx/92: Version ' 2.1' -c Adapted from the sort routine in LANSO. -c -c\SCCS Information: @(#) -c FILE: sortc.F SID: 2.3 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssortc (which, apply, n, xreal, ximag, y) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - logical apply - integer n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & xreal(0:n-1), ximag(0:n-1), y(0:n-1) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, igap, j - Real - & temp, temp1, temp2 -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Real - & slapy2 - external slapy2 -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - igap = n / 2 -c - if (which .eq. 'LM') then -c -c %------------------------------------------------------% -c | Sort XREAL,XIMAG into increasing order of magnitude. | -c %------------------------------------------------------% -c - 10 continue - if (igap .eq. 0) go to 9000 -c - do 30 i = igap, n-1 - j = i-igap - 20 continue -c - if (j.lt.0) go to 30 -c - temp1 = slapy2(xreal(j),ximag(j)) - temp2 = slapy2(xreal(j+igap),ximag(j+igap)) -c - if (temp1.gt.temp2) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 30 - end if - j = j-igap - go to 20 - 30 continue - igap = igap / 2 - go to 10 -c - else if (which .eq. 'SM') then -c -c %------------------------------------------------------% -c | Sort XREAL,XIMAG into decreasing order of magnitude. | -c %------------------------------------------------------% -c - 40 continue - if (igap .eq. 0) go to 9000 -c - do 60 i = igap, n-1 - j = i-igap - 50 continue -c - if (j .lt. 0) go to 60 -c - temp1 = slapy2(xreal(j),ximag(j)) - temp2 = slapy2(xreal(j+igap),ximag(j+igap)) -c - if (temp1.lt.temp2) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 60 - endif - j = j-igap - go to 50 - 60 continue - igap = igap / 2 - go to 40 -c - else if (which .eq. 'LR') then -c -c %------------------------------------------------% -c | Sort XREAL into increasing order of algebraic. | -c %------------------------------------------------% -c - 70 continue - if (igap .eq. 0) go to 9000 -c - do 90 i = igap, n-1 - j = i-igap - 80 continue -c - if (j.lt.0) go to 90 -c - if (xreal(j).gt.xreal(j+igap)) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 90 - endif - j = j-igap - go to 80 - 90 continue - igap = igap / 2 - go to 70 -c - else if (which .eq. 'SR') then -c -c %------------------------------------------------% -c | Sort XREAL into decreasing order of algebraic. | -c %------------------------------------------------% -c - 100 continue - if (igap .eq. 0) go to 9000 - do 120 i = igap, n-1 - j = i-igap - 110 continue -c - if (j.lt.0) go to 120 -c - if (xreal(j).lt.xreal(j+igap)) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 120 - endif - j = j-igap - go to 110 - 120 continue - igap = igap / 2 - go to 100 -c - else if (which .eq. 'LI') then -c -c %------------------------------------------------% -c | Sort XIMAG into increasing order of magnitude. | -c %------------------------------------------------% -c - 130 continue - if (igap .eq. 0) go to 9000 - do 150 i = igap, n-1 - j = i-igap - 140 continue -c - if (j.lt.0) go to 150 -c - if (abs(ximag(j)).gt.abs(ximag(j+igap))) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 150 - endif - j = j-igap - go to 140 - 150 continue - igap = igap / 2 - go to 130 -c - else if (which .eq. 'SI') then -c -c %------------------------------------------------% -c | Sort XIMAG into decreasing order of magnitude. | -c %------------------------------------------------% -c - 160 continue - if (igap .eq. 0) go to 9000 - do 180 i = igap, n-1 - j = i-igap - 170 continue -c - if (j.lt.0) go to 180 -c - if (abs(ximag(j)).lt.abs(ximag(j+igap))) then - temp = xreal(j) - xreal(j) = xreal(j+igap) - xreal(j+igap) = temp -c - temp = ximag(j) - ximag(j) = ximag(j+igap) - ximag(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 180 - endif - j = j-igap - go to 170 - 180 continue - igap = igap / 2 - go to 160 - end if -c - 9000 continue - return -c -c %---------------% -c | End of ssortc | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssortr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssortr.f deleted file mode 100644 index 267b1251c1..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/ssortr.f +++ /dev/null @@ -1,218 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: ssortr -c -c\Description: -c Sort the array X1 in the order specified by WHICH and optionally -c applies the permutation to the array X2. -c -c\Usage: -c call ssortr -c ( WHICH, APPLY, N, X1, X2 ) -c -c\Arguments -c WHICH Character*2. (Input) -c 'LM' -> X1 is sorted into increasing order of magnitude. -c 'SM' -> X1 is sorted into decreasing order of magnitude. -c 'LA' -> X1 is sorted into increasing order of algebraic. -c 'SA' -> X1 is sorted into decreasing order of algebraic. -c -c APPLY Logical. (Input) -c APPLY = .TRUE. -> apply the sorted order to X2. -c APPLY = .FALSE. -> do not apply the sorted order to X2. -c -c N Integer. (INPUT) -c Size of the arrays. -c -c X1 Real array of length N. (INPUT/OUTPUT) -c The array to be sorted. -c -c X2 Real array of length N. (INPUT/OUTPUT) -c Only referenced if APPLY = .TRUE. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\Revision history: -c 12/16/93: Version ' 2.1'. -c Adapted from the sort routine in LANSO. -c -c\SCCS Information: @(#) -c FILE: sortr.F SID: 2.3 DATE OF SID: 4/19/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine ssortr (which, apply, n, x1, x2) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - logical apply - integer n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & x1(0:n-1), x2(0:n-1) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, igap, j - Real - & temp -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - igap = n / 2 -c - if (which .eq. 'SA') then -c -c X1 is sorted into decreasing order of algebraic. -c - 10 continue - if (igap .eq. 0) go to 9000 - do 30 i = igap, n-1 - j = i-igap - 20 continue -c - if (j.lt.0) go to 30 -c - if (x1(j).lt.x1(j+igap)) then - temp = x1(j) - x1(j) = x1(j+igap) - x1(j+igap) = temp - if (apply) then - temp = x2(j) - x2(j) = x2(j+igap) - x2(j+igap) = temp - end if - else - go to 30 - endif - j = j-igap - go to 20 - 30 continue - igap = igap / 2 - go to 10 -c - else if (which .eq. 'SM') then -c -c X1 is sorted into decreasing order of magnitude. -c - 40 continue - if (igap .eq. 0) go to 9000 - do 60 i = igap, n-1 - j = i-igap - 50 continue -c - if (j.lt.0) go to 60 -c - if (abs(x1(j)).lt.abs(x1(j+igap))) then - temp = x1(j) - x1(j) = x1(j+igap) - x1(j+igap) = temp - if (apply) then - temp = x2(j) - x2(j) = x2(j+igap) - x2(j+igap) = temp - end if - else - go to 60 - endif - j = j-igap - go to 50 - 60 continue - igap = igap / 2 - go to 40 -c - else if (which .eq. 'LA') then -c -c X1 is sorted into increasing order of algebraic. -c - 70 continue - if (igap .eq. 0) go to 9000 - do 90 i = igap, n-1 - j = i-igap - 80 continue -c - if (j.lt.0) go to 90 -c - if (x1(j).gt.x1(j+igap)) then - temp = x1(j) - x1(j) = x1(j+igap) - x1(j+igap) = temp - if (apply) then - temp = x2(j) - x2(j) = x2(j+igap) - x2(j+igap) = temp - end if - else - go to 90 - endif - j = j-igap - go to 80 - 90 continue - igap = igap / 2 - go to 70 -c - else if (which .eq. 'LM') then -c -c X1 is sorted into increasing order of magnitude. -c - 100 continue - if (igap .eq. 0) go to 9000 - do 120 i = igap, n-1 - j = i-igap - 110 continue -c - if (j.lt.0) go to 120 -c - if (abs(x1(j)).gt.abs(x1(j+igap))) then - temp = x1(j) - x1(j) = x1(j+igap) - x1(j+igap) = temp - if (apply) then - temp = x2(j) - x2(j) = x2(j+igap) - x2(j+igap) = temp - end if - else - go to 120 - endif - j = j-igap - go to 110 - 120 continue - igap = igap / 2 - go to 100 - end if -c - 9000 continue - return -c -c %---------------% -c | End of ssortr | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstatn.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstatn.f deleted file mode 100644 index cdf2883073..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstatn.f +++ /dev/null @@ -1,61 +0,0 @@ -c -c %---------------------------------------------% -c | Initialize statistic and timing information | -c | for nonsymmetric Arnoldi code. | -c %---------------------------------------------% -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: statn.F SID: 2.4 DATE OF SID: 4/20/96 RELEASE: 2 -c - subroutine sstatn -c -c %--------------------------------% -c | See stat.doc for documentation | -c %--------------------------------% -c - include 'stat.h' -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - nopx = 0 - nbx = 0 - nrorth = 0 - nitref = 0 - nrstrt = 0 -c - tnaupd = 0.0E+0 - tnaup2 = 0.0E+0 - tnaitr = 0.0E+0 - tneigh = 0.0E+0 - tngets = 0.0E+0 - tnapps = 0.0E+0 - tnconv = 0.0E+0 - titref = 0.0E+0 - tgetv0 = 0.0E+0 - trvec = 0.0E+0 -c -c %----------------------------------------------------% -c | User time including reverse communication overhead | -c %----------------------------------------------------% -c - tmvopx = 0.0E+0 - tmvbx = 0.0E+0 -c - return -c -c -c %---------------% -c | End of sstatn | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstats.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstats.f deleted file mode 100644 index 86109dcb47..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstats.f +++ /dev/null @@ -1,47 +0,0 @@ -c -c\SCCS Information: @(#) -c FILE: stats.F SID: 2.1 DATE OF SID: 4/19/96 RELEASE: 2 -c %---------------------------------------------% -c | Initialize statistic and timing information | -c | for symmetric Arnoldi code. | -c %---------------------------------------------% - - subroutine sstats - -c %--------------------------------% -c | See stat.doc for documentation | -c %--------------------------------% - include 'stat.h' - -c %-----------------------% -c | Executable Statements | -c %-----------------------% - - nopx = 0 - nbx = 0 - nrorth = 0 - nitref = 0 - nrstrt = 0 - - tsaupd = 0.0E+0 - tsaup2 = 0.0E+0 - tsaitr = 0.0E+0 - tseigt = 0.0E+0 - tsgets = 0.0E+0 - tsapps = 0.0E+0 - tsconv = 0.0E+0 - titref = 0.0E+0 - tgetv0 = 0.0E+0 - trvec = 0.0E+0 - -c %----------------------------------------------------% -c | User time including reverse communication overhead | -c %----------------------------------------------------% - tmvopx = 0.0E+0 - tmvbx = 0.0E+0 - - return -c -c End of sstats -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstqrb.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstqrb.f deleted file mode 100644 index 9fd1e19257..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/sstqrb.f +++ /dev/null @@ -1,594 +0,0 @@ -c----------------------------------------------------------------------- -c\BeginDoc -c -c\Name: sstqrb -c -c\Description: -c Computes all eigenvalues and the last component of the eigenvectors -c of a symmetric tridiagonal matrix using the implicit QL or QR method. -c -c This is mostly a modification of the LAPACK routine ssteqr. -c See Remarks. -c -c\Usage: -c call sstqrb -c ( N, D, E, Z, WORK, INFO ) -c -c\Arguments -c N Integer. (INPUT) -c The number of rows and columns in the matrix. N >= 0. -c -c D Real array, dimension (N). (INPUT/OUTPUT) -c On entry, D contains the diagonal elements of the -c tridiagonal matrix. -c On exit, D contains the eigenvalues, in ascending order. -c If an error exit is made, the eigenvalues are correct -c for indices 1,2,...,INFO-1, but they are unordered and -c may not be the smallest eigenvalues of the matrix. -c -c E Real array, dimension (N-1). (INPUT/OUTPUT) -c On entry, E contains the subdiagonal elements of the -c tridiagonal matrix in positions 1 through N-1. -c On exit, E has been destroyed. -c -c Z Real array, dimension (N). (OUTPUT) -c On exit, Z contains the last row of the orthonormal -c eigenvector matrix of the symmetric tridiagonal matrix. -c If an error exit is made, Z contains the last row of the -c eigenvector matrix associated with the stored eigenvalues. -c -c WORK Real array, dimension (max(1,2*N-2)). (WORKSPACE) -c Workspace used in accumulating the transformation for -c computing the last components of the eigenvectors. -c -c INFO Integer. (OUTPUT) -c = 0: normal return. -c < 0: if INFO = -i, the i-th argument had an illegal value. -c > 0: if INFO = +i, the i-th eigenvalue has not converged -c after a total of 30*N iterations. -c -c\Remarks -c 1. None. -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx real -c -c\Routines called: -c saxpy Level 1 BLAS that computes a vector triad. -c scopy Level 1 BLAS that copies one vector to another. -c sswap Level 1 BLAS that swaps the contents of two vectors. -c lsame LAPACK character comparison routine. -c slae2 LAPACK routine that computes the eigenvalues of a 2-by-2 -c symmetric matrix. -c slaev2 LAPACK routine that eigendecomposition of a 2-by-2 symmetric -c matrix. -c slamch LAPACK routine that determines machine constants. -c slanst LAPACK routine that computes the norm of a matrix. -c slapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c slartg LAPACK Givens rotation construction routine. -c slascl LAPACK routine for careful scaling of a matrix. -c slaset LAPACK matrix initialization routine. -c slasr LAPACK routine that applies an orthogonal transformation to -c a matrix. -c slasrt LAPACK sorting routine. -c ssteqr LAPACK routine that computes eigenvalues and eigenvectors -c of a symmetric tridiagonal matrix. -c xerbla LAPACK error handler routine. -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: stqrb.F SID: 2.5 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c 1. Starting with version 2.5, this routine is a modified version -c of LAPACK version 2.0 subroutine SSTEQR. No lines are deleted, -c only commeted out and new lines inserted. -c All lines commented out have "c$$$" at the beginning. -c Note that the LAPACK version 1.0 subroutine SSTEQR contained -c bugs. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine sstqrb ( n, d, e, z, work, info ) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer info, n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Real - & d( n ), e( n-1 ), z( n ), work( 2*n-2 ) -c -c .. parameters .. - Real - & zero, one, two, three - parameter ( zero = 0.0E+0, one = 1.0E+0, - & two = 2.0E+0, three = 3.0E+0 ) - integer maxit - parameter ( maxit = 30 ) -c .. -c .. local scalars .. - integer i, icompz, ii, iscale, j, jtot, k, l, l1, lend, - & lendm1, lendp1, lendsv, lm1, lsv, m, mm, mm1, - & nm1, nmaxit - Real - & anorm, b, c, eps, eps2, f, g, p, r, rt1, rt2, - & s, safmax, safmin, ssfmax, ssfmin, tst -c .. -c .. external functions .. - logical lsame - Real - & slamch, slanst, slapy2 - external lsame, slamch, slanst, slapy2 -c .. -c .. external subroutines .. - external slae2, slaev2, slartg, slascl, slaset, slasr, - & slasrt, sswap, xerbla -c .. -c .. intrinsic functions .. - intrinsic abs, max, sign, sqrt -c .. -c .. executable statements .. -c -c test the input parameters. -c - info = 0 -c -c$$$ IF( LSAME( COMPZ, 'N' ) ) THEN -c$$$ ICOMPZ = 0 -c$$$ ELSE IF( LSAME( COMPZ, 'V' ) ) THEN -c$$$ ICOMPZ = 1 -c$$$ ELSE IF( LSAME( COMPZ, 'I' ) ) THEN -c$$$ ICOMPZ = 2 -c$$$ ELSE -c$$$ ICOMPZ = -1 -c$$$ END IF -c$$$ IF( ICOMPZ.LT.0 ) THEN -c$$$ INFO = -1 -c$$$ ELSE IF( N.LT.0 ) THEN -c$$$ INFO = -2 -c$$$ ELSE IF( ( LDZ.LT.1 ) .OR. ( ICOMPZ.GT.0 .AND. LDZ.LT.MAX( 1, -c$$$ $ N ) ) ) THEN -c$$$ INFO = -6 -c$$$ END IF -c$$$ IF( INFO.NE.0 ) THEN -c$$$ CALL XERBLA( 'SSTEQR', -INFO ) -c$$$ RETURN -c$$$ END IF -c -c *** New starting with version 2.5 *** -c - icompz = 2 -c ************************************* -c -c quick return if possible -c - if( n.eq.0 ) - $ return -c - if( n.eq.1 ) then - if( icompz.eq.2 ) z( 1 ) = one - return - end if -c -c determine the unit roundoff and over/underflow thresholds. -c - eps = slamch( 'e' ) - eps2 = eps**2 - safmin = slamch( 's' ) - safmax = one / safmin - ssfmax = sqrt( safmax ) / three - ssfmin = sqrt( safmin ) / eps2 -c -c compute the eigenvalues and eigenvectors of the tridiagonal -c matrix. -c -c$$ if( icompz.eq.2 ) -c$$$ $ call slaset( 'full', n, n, zero, one, z, ldz ) -c -c *** New starting with version 2.5 *** -c - if ( icompz .eq. 2 ) then - do 5 j = 1, n-1 - z(j) = zero - 5 continue - z( n ) = one - end if -c ************************************* -c - nmaxit = n*maxit - jtot = 0 -c -c determine where the matrix splits and choose ql or qr iteration -c for each block, according to whether top or bottom diagonal -c element is smaller. -c - l1 = 1 - nm1 = n - 1 -c - 10 continue - if( l1.gt.n ) - $ go to 160 - if( l1.gt.1 ) - $ e( l1-1 ) = zero - if( l1.le.nm1 ) then - do 20 m = l1, nm1 - tst = abs( e( m ) ) - if( tst.eq.zero ) - $ go to 30 - if( tst.le.( sqrt( abs( d( m ) ) )*sqrt( abs( d( m+ - $ 1 ) ) ) )*eps ) then - e( m ) = zero - go to 30 - end if - 20 continue - end if - m = n -c - 30 continue - l = l1 - lsv = l - lend = m - lendsv = lend - l1 = m + 1 - if( lend.eq.l ) - $ go to 10 -c -c scale submatrix in rows and columns l to lend -c - anorm = slanst( 'i', lend-l+1, d( l ), e( l ) ) - iscale = 0 - if( anorm.eq.zero ) - $ go to 10 - if( anorm.gt.ssfmax ) then - iscale = 1 - call slascl( 'g', 0, 0, anorm, ssfmax, lend-l+1, 1, d( l ), n, - $ info ) - call slascl( 'g', 0, 0, anorm, ssfmax, lend-l, 1, e( l ), n, - $ info ) - else if( anorm.lt.ssfmin ) then - iscale = 2 - call slascl( 'g', 0, 0, anorm, ssfmin, lend-l+1, 1, d( l ), n, - $ info ) - call slascl( 'g', 0, 0, anorm, ssfmin, lend-l, 1, e( l ), n, - $ info ) - end if -c -c choose between ql and qr iteration -c - if( abs( d( lend ) ).lt.abs( d( l ) ) ) then - lend = lsv - l = lendsv - end if -c - if( lend.gt.l ) then -c -c ql iteration -c -c look for small subdiagonal element. -c - 40 continue - if( l.ne.lend ) then - lendm1 = lend - 1 - do 50 m = l, lendm1 - tst = abs( e( m ) )**2 - if( tst.le.( eps2*abs( d( m ) ) )*abs( d( m+1 ) )+ - $ safmin )go to 60 - 50 continue - end if -c - m = lend -c - 60 continue - if( m.lt.lend ) - $ e( m ) = zero - p = d( l ) - if( m.eq.l ) - $ go to 80 -c -c if remaining matrix is 2-by-2, use slae2 or slaev2 -c to compute its eigensystem. -c - if( m.eq.l+1 ) then - if( icompz.gt.0 ) then - call slaev2( d( l ), e( l ), d( l+1 ), rt1, rt2, c, s ) - work( l ) = c - work( n-1+l ) = s -c$$$ call slasr( 'r', 'v', 'b', n, 2, work( l ), -c$$$ $ work( n-1+l ), z( 1, l ), ldz ) -c -c *** New starting with version 2.5 *** -c - tst = z(l+1) - z(l+1) = c*tst - s*z(l) - z(l) = s*tst + c*z(l) -c ************************************* - else - call slae2( d( l ), e( l ), d( l+1 ), rt1, rt2 ) - end if - d( l ) = rt1 - d( l+1 ) = rt2 - e( l ) = zero - l = l + 2 - if( l.le.lend ) - $ go to 40 - go to 140 - end if -c - if( jtot.eq.nmaxit ) - $ go to 140 - jtot = jtot + 1 -c -c form shift. -c - g = ( d( l+1 )-p ) / ( two*e( l ) ) - r = slapy2( g, one ) - g = d( m ) - p + ( e( l ) / ( g+sign( r, g ) ) ) -c - s = one - c = one - p = zero -c -c inner loop -c - mm1 = m - 1 - do 70 i = mm1, l, -1 - f = s*e( i ) - b = c*e( i ) - call slartg( g, f, c, s, r ) - if( i.ne.m-1 ) - $ e( i+1 ) = r - g = d( i+1 ) - p - r = ( d( i )-g )*s + two*c*b - p = s*r - d( i+1 ) = g + p - g = c*r - b -c -c if eigenvectors are desired, then save rotations. -c - if( icompz.gt.0 ) then - work( i ) = c - work( n-1+i ) = -s - end if -c - 70 continue -c -c if eigenvectors are desired, then apply saved rotations. -c - if( icompz.gt.0 ) then - mm = m - l + 1 -c$$$ call slasr( 'r', 'v', 'b', n, mm, work( l ), work( n-1+l ), -c$$$ $ z( 1, l ), ldz ) -c -c *** New starting with version 2.5 *** -c - call slasr( 'r', 'v', 'b', 1, mm, work( l ), - & work( n-1+l ), z( l ), 1 ) -c ************************************* - end if -c - d( l ) = d( l ) - p - e( l ) = g - go to 40 -c -c eigenvalue found. -c - 80 continue - d( l ) = p -c - l = l + 1 - if( l.le.lend ) - $ go to 40 - go to 140 -c - else -c -c qr iteration -c -c look for small superdiagonal element. -c - 90 continue - if( l.ne.lend ) then - lendp1 = lend + 1 - do 100 m = l, lendp1, -1 - tst = abs( e( m-1 ) )**2 - if( tst.le.( eps2*abs( d( m ) ) )*abs( d( m-1 ) )+ - $ safmin )go to 110 - 100 continue - end if -c - m = lend -c - 110 continue - if( m.gt.lend ) - $ e( m-1 ) = zero - p = d( l ) - if( m.eq.l ) - $ go to 130 -c -c if remaining matrix is 2-by-2, use slae2 or slaev2 -c to compute its eigensystem. -c - if( m.eq.l-1 ) then - if( icompz.gt.0 ) then - call slaev2( d( l-1 ), e( l-1 ), d( l ), rt1, rt2, c, s ) -c$$$ work( m ) = c -c$$$ work( n-1+m ) = s -c$$$ call slasr( 'r', 'v', 'f', n, 2, work( m ), -c$$$ $ work( n-1+m ), z( 1, l-1 ), ldz ) -c -c *** New starting with version 2.5 *** -c - tst = z(l) - z(l) = c*tst - s*z(l-1) - z(l-1) = s*tst + c*z(l-1) -c ************************************* - else - call slae2( d( l-1 ), e( l-1 ), d( l ), rt1, rt2 ) - end if - d( l-1 ) = rt1 - d( l ) = rt2 - e( l-1 ) = zero - l = l - 2 - if( l.ge.lend ) - $ go to 90 - go to 140 - end if -c - if( jtot.eq.nmaxit ) - $ go to 140 - jtot = jtot + 1 -c -c form shift. -c - g = ( d( l-1 )-p ) / ( two*e( l-1 ) ) - r = slapy2( g, one ) - g = d( m ) - p + ( e( l-1 ) / ( g+sign( r, g ) ) ) -c - s = one - c = one - p = zero -c -c inner loop -c - lm1 = l - 1 - do 120 i = m, lm1 - f = s*e( i ) - b = c*e( i ) - call slartg( g, f, c, s, r ) - if( i.ne.m ) - $ e( i-1 ) = r - g = d( i ) - p - r = ( d( i+1 )-g )*s + two*c*b - p = s*r - d( i ) = g + p - g = c*r - b -c -c if eigenvectors are desired, then save rotations. -c - if( icompz.gt.0 ) then - work( i ) = c - work( n-1+i ) = s - end if -c - 120 continue -c -c if eigenvectors are desired, then apply saved rotations. -c - if( icompz.gt.0 ) then - mm = l - m + 1 -c$$$ call slasr( 'r', 'v', 'f', n, mm, work( m ), work( n-1+m ), -c$$$ $ z( 1, m ), ldz ) -c -c *** New starting with version 2.5 *** -c - call slasr( 'r', 'v', 'f', 1, mm, work( m ), work( n-1+m ), - & z( m ), 1 ) -c ************************************* - end if -c - d( l ) = d( l ) - p - e( lm1 ) = g - go to 90 -c -c eigenvalue found. -c - 130 continue - d( l ) = p -c - l = l - 1 - if( l.ge.lend ) - $ go to 90 - go to 140 -c - end if -c -c undo scaling if necessary -c - 140 continue - if( iscale.eq.1 ) then - call slascl( 'g', 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, - $ d( lsv ), n, info ) - call slascl( 'g', 0, 0, ssfmax, anorm, lendsv-lsv, 1, e( lsv ), - $ n, info ) - else if( iscale.eq.2 ) then - call slascl( 'g', 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, - $ d( lsv ), n, info ) - call slascl( 'g', 0, 0, ssfmin, anorm, lendsv-lsv, 1, e( lsv ), - $ n, info ) - end if -c -c check for no convergence to an eigenvalue after a total -c of n*maxit iterations. -c - if( jtot.lt.nmaxit ) - $ go to 10 - do 150 i = 1, n - 1 - if( e( i ).ne.zero ) - $ info = info + 1 - 150 continue - go to 190 -c -c order eigenvalues and eigenvectors. -c - 160 continue - if( icompz.eq.0 ) then -c -c use quick sort -c - call slasrt( 'i', n, d, info ) -c - else -c -c use selection sort to minimize swaps of eigenvectors -c - do 180 ii = 2, n - i = ii - 1 - k = i - p = d( i ) - do 170 j = ii, n - if( d( j ).lt.p ) then - k = j - p = d( j ) - end if - 170 continue - if( k.ne.i ) then - d( k ) = d( i ) - d( i ) = p -c$$$ call sswap( n, z( 1, i ), 1, z( 1, k ), 1 ) -c *** New starting with version 2.5 *** -c - p = z(k) - z(k) = z(i) - z(i) = p -c ************************************* - end if - 180 continue - end if -c - 190 continue - return -c -c %---------------% -c | End of sstqrb | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/stat.h b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/stat.h deleted file mode 100644 index 66a8e9f87f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/stat.h +++ /dev/null @@ -1,21 +0,0 @@ -c %--------------------------------% -c | See stat.doc for documentation | -c %--------------------------------% -c -c\SCCS Information: @(#) -c FILE: stat.h SID: 2.2 DATE OF SID: 11/16/95 RELEASE: 2 -c - real t0, t1, t2, t3, t4, t5 - save t0, t1, t2, t3, t4, t5 -c - integer nopx, nbx, nrorth, nitref, nrstrt - real tsaupd, tsaup2, tsaitr, tseigt, tsgets, tsapps, tsconv, - & tnaupd, tnaup2, tnaitr, tneigh, tngets, tnapps, tnconv, - & tcaupd, tcaup2, tcaitr, tceigh, tcgets, tcapps, tcconv, - & tmvopx, tmvbx, tgetv0, titref, trvec - common /timing/ - & nopx, nbx, nrorth, nitref, nrstrt, - & tsaupd, tsaup2, tsaitr, tseigt, tsgets, tsapps, tsconv, - & tnaupd, tnaup2, tnaitr, tneigh, tngets, tnapps, tnconv, - & tcaupd, tcaup2, tcaitr, tceigh, tcgets, tcapps, tcconv, - & tmvopx, tmvbx, tgetv0, titref, trvec diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/version.h b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/version.h deleted file mode 100644 index ecdd9b3405..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/version.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - - In the current version, the parameter KAPPA in the Kahan's test - for orthogonality is set to 0.717, the same as used by Gragg & Reichel. - However computational experience indicates that this is a little too - strict and will frequently force reorthogonalization when it is not - necessary to do so. - - Also the "moving boundary" idea is not currently activated in the nonsymmetric - code since it is not conclusive that it's the right thing to do all the time. - Requires further investigation. - - As of 02/01/93 Richard Lehoucq assumes software control of the codes from - Phuong Vu. On 03/01/93 all the *.F files were migrated SCCS. The 1.1 version - of codes are those received from Phuong Vu. The frozen version of 07/08/92 - is now considered version 1.1. - - Version 2.1 contains two new symmetric routines, sesrt and seupd. - Changes as well as bug fixes for version 1.1 codes that were only corrected - for programming bugs are version 1.2. These 1.2 versions will also be in version 2.1. - Subroutine [d,s]saupd now requires slightly more workspace. See [d,s]saupd for the - details. - - \SCCS Information: @(#) - FILE: version.h SID: 2.3 DATE OF SID: 11/16/95 RELEASE: 2 - - */ - -#define VERSION_NUMBER ' 2.1' -#define VERSION_DATE ' 11/15/95' diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zgetv0.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zgetv0.f deleted file mode 100644 index 99ce0ea0f7..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zgetv0.f +++ /dev/null @@ -1,414 +0,0 @@ -c\BeginDoc -c -c\Name: zgetv0 -c -c\Description: -c Generate a random initial residual vector for the Arnoldi process. -c Force the residual vector to be in the range of the operator OP. -c -c\Usage: -c call zgetv0 -c ( IDO, BMAT, ITRY, INITV, N, J, V, LDV, RESID, RNORM, -c IPNTR, WORKD, IERR ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to zgetv0. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B in the (generalized) -c eigenvalue problem A*x = lambda*B*x. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*B*x -c -c ITRY Integer. (INPUT) -c ITRY counts the number of times that zgetv0 is called. -c It should be set to 1 on the initial call to zgetv0. -c -c INITV Logical variable. (INPUT) -c .TRUE. => the initial residual vector is given in RESID. -c .FALSE. => generate a random initial residual vector. -c -c N Integer. (INPUT) -c Dimension of the problem. -c -c J Integer. (INPUT) -c Index of the residual vector to be generated, with respect to -c the Arnoldi process. J > 1 in case of a "restart". -c -c V Complex*16 N by J array. (INPUT) -c The first J-1 columns of V contain the current Arnoldi basis -c if this is a "restart". -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c RESID Complex*16 array of length N. (INPUT/OUTPUT) -c Initial residual vector to be generated. If RESID is -c provided, force RESID into the range of the operator OP. -c -c RNORM Double precision scalar. (OUTPUT) -c B-norm of the generated residual. -c -c IPNTR Integer array of length 3. (OUTPUT) -c -c WORKD Complex*16 work array of length 2*N. (REVERSE COMMUNICATION). -c On exit, WORK(1:N) = B*RESID to be used in SSAITR. -c -c IERR Integer. (OUTPUT) -c = 0: Normal exit. -c = -1: Cannot generate a nontrivial restarted residual vector -c in the range of the operator OP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex*16 -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c -c\Routines called: -c second ARPACK utility routine for timing. -c zvout ARPACK utility routine that prints vectors. -c zlarnv LAPACK routine for generating a random vector. -c zgemv Level 2 BLAS routine for matrix vector multiplication. -c zcopy Level 1 BLAS that copies one vector to another. -c wzdotc Level 1 BLAS that computes the scalar product of two vectors. -c dznrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: getv0.F SID: 2.3 DATE OF SID: 08/27/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine zgetv0 - & ( ido, bmat, itry, initv, n, j, v, ldv, resid, rnorm, - & ipntr, workd, ierr ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - logical initv - integer ido, ierr, itry, j, ldv, n - Double precision - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Complex*16 - & resid(n), v(ldv,j), workd(2*n) -c -c %------------% -c | Parameters | -c %------------% -c - Complex*16 - & one, zero - Double precision - & rzero - parameter (one = (1.0D+0, 0.0D+0), zero = (0.0D+0, 0.0D+0), - & rzero = 0.0D+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - logical first, inits, orth - integer idist, iseed(4), iter, msglvl, jj - Double precision - & rnorm0 - Complex*16 - & cnorm - save first, iseed, inits, iter, msglvl, orth, rnorm0 -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external zcopy, zgemv, zlarnv, zvout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dznrm2, dlapy2 - Complex*16 - & wzdotc - external wzdotc, dznrm2, dlapy2 -c -c %-----------------% -c | Data Statements | -c %-----------------% -c - data inits /.true./ -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c -c %-----------------------------------% -c | Initialize the seed of the LAPACK | -c | random number generator | -c %-----------------------------------% -c - if (inits) then - iseed(1) = 1 - iseed(2) = 3 - iseed(3) = 5 - iseed(4) = 7 - inits = .false. - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mgetv0 -c - ierr = 0 - iter = 0 - first = .FALSE. - orth = .FALSE. -c -c %-----------------------------------------------------% -c | Possibly generate a random starting vector in RESID | -c | Use a LAPACK random number generator used by the | -c | matrix generation routines. | -c | idist = 1: uniform (0,1) distribution; | -c | idist = 2: uniform (-1,1) distribution; | -c | idist = 3: normal (0,1) distribution; | -c %-----------------------------------------------------% -c - if (.not.initv) then - idist = 2 - call zlarnv (idist, iseed, n, resid) - end if -c -c %----------------------------------------------------------% -c | Force the starting vector into the range of OP to handle | -c | the generalized problem when B is possibly (singular). | -c %----------------------------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nopx = nopx + 1 - ipntr(1) = 1 - ipntr(2) = n + 1 - call zcopy (n, resid, 1, workd, 1) - ido = -1 - go to 9000 - end if - end if -c -c %----------------------------------------% -c | Back from computing B*(initial-vector) | -c %----------------------------------------% -c - if (first) go to 20 -c -c %-----------------------------------------------% -c | Back from computing B*(orthogonalized-vector) | -c %-----------------------------------------------% -c - if (orth) go to 40 -c - call second (t3) - tmvopx = tmvopx + (t3 - t2) -c -c %------------------------------------------------------% -c | Starting vector is now in the range of OP; r = OP*r; | -c | Compute B-norm of starting vector. | -c %------------------------------------------------------% -c - call second (t2) - first = .TRUE. - if (bmat .eq. 'G') then - nbx = nbx + 1 - call zcopy (n, workd(n+1), 1, resid, 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 - go to 9000 - else if (bmat .eq. 'I') then - call zcopy (n, resid, 1, workd, 1) - end if -c - 20 continue -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - first = .FALSE. - if (bmat .eq. 'G') then - cnorm = wzdotc (n, resid, 1, workd, 1) - rnorm0 = sqrt(dlapy2(dble(cnorm),dimag(cnorm))) - else if (bmat .eq. 'I') then - rnorm0 = dznrm2(n, resid, 1) - end if - rnorm = rnorm0 -c -c %---------------------------------------------% -c | Exit if this is the very first Arnoldi step | -c %---------------------------------------------% -c - if (j .eq. 1) go to 50 -c -c %---------------------------------------------------------------- -c | Otherwise need to B-orthogonalize the starting vector against | -c | the current Arnoldi basis using Gram-Schmidt with iter. ref. | -c | This is the case where an invariant subspace is encountered | -c | in the middle of the Arnoldi factorization. | -c | | -c | s = V^{T}*B*r; r = r - V*s; | -c | | -c | Stopping criteria used for iter. ref. is discussed in | -c | Parlett's book, page 107 and in Gragg & Reichel TOMS paper. | -c %---------------------------------------------------------------% -c - orth = .TRUE. - 30 continue -c - call zgemv ('C', n, j-1, one, v, ldv, workd, 1, - & zero, workd(n+1), 1) - call zgemv ('N', n, j-1, -one, v, ldv, workd(n+1), 1, - & one, resid, 1) -c -c %----------------------------------------------------------% -c | Compute the B-norm of the orthogonalized starting vector | -c %----------------------------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call zcopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 - go to 9000 - else if (bmat .eq. 'I') then - call zcopy (n, resid, 1, workd, 1) - end if -c - 40 continue -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - cnorm = wzdotc (n, resid, 1, workd, 1) - rnorm = sqrt(dlapy2(dble(cnorm),dimag(cnorm))) - else if (bmat .eq. 'I') then - rnorm = dznrm2(n, resid, 1) - end if -c -c %--------------------------------------% -c | Check for further orthogonalization. | -c %--------------------------------------% -c - if (msglvl .gt. 2) then - call dvout (logfil, 1, rnorm0, ndigit, - & '_getv0: re-orthonalization ; rnorm0 is') - call dvout (logfil, 1, rnorm, ndigit, - & '_getv0: re-orthonalization ; rnorm is') - end if -c - if (rnorm .gt. 0.717*rnorm0) go to 50 -c - iter = iter + 1 - if (iter .le. 1) then -c -c %-----------------------------------% -c | Perform iterative refinement step | -c %-----------------------------------% -c - rnorm0 = rnorm - go to 30 - else -c -c %------------------------------------% -c | Iterative refinement step "failed" | -c %------------------------------------% -c - do 45 jj = 1, n - resid(jj) = zero - 45 continue - rnorm = rzero - ierr = -1 - end if -c - 50 continue -c - if (msglvl .gt. 0) then - call dvout (logfil, 1, rnorm, ndigit, - & '_getv0: B-norm of initial / restarted starting vector') - end if - if (msglvl .gt. 2) then - call zvout (logfil, n, resid, ndigit, - & '_getv0: initial / restarted starting vector') - end if - ido = 99 -c - call second (t1) - tgetv0 = tgetv0 + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of zgetv0 | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaitr.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaitr.f deleted file mode 100644 index d43724b681..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaitr.f +++ /dev/null @@ -1,850 +0,0 @@ -c\BeginDoc -c -c\Name: znaitr -c -c\Description: -c Reverse communication interface for applying NP additional steps to -c a K step nonsymmetric Arnoldi factorization. -c -c Input: OP*V_{k} - V_{k}*H = r_{k}*e_{k}^T -c -c with (V_{k}^T)*B*V_{k} = I, (V_{k}^T)*B*r_{k} = 0. -c -c Output: OP*V_{k+p} - V_{k+p}*H = r_{k+p}*e_{k+p}^T -c -c with (V_{k+p}^T)*B*V_{k+p} = I, (V_{k+p}^T)*B*r_{k+p} = 0. -c -c where OP and B are as in znaupd. The B-norm of r_{k+p} is also -c computed and returned. -c -c\Usage: -c call znaitr -c ( IDO, BMAT, N, K, NP, NB, RESID, RNORM, V, LDV, H, LDH, -c IPNTR, WORKD, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c This is for the restart phase to force the new -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y, -c IPNTR(3) is the pointer into WORK for B * X. -c IDO = 2: compute Y = B * X where -c IPNTR(1) is the pointer into WORK for X, -c IPNTR(2) is the pointer into WORK for Y. -c IDO = 99: done -c ------------------------------------------------------------- -c When the routine is used in the "shift-and-invert" mode, the -c vector B * Q is already available and do not need to be -c recomputed in forming OP * Q. -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. See znaupd. -c B = 'I' -> standard eigenvalue problem A*x = lambda*x -c B = 'G' -> generalized eigenvalue problem A*x = lambda*M**x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c K Integer. (INPUT) -c Current size of V and H. -c -c NP Integer. (INPUT) -c Number of additional Arnoldi steps to take. -c -c NB Integer. (INPUT) -c Blocksize to be used in the recurrence. -c Only work for NB = 1 right now. The goal is to have a -c program that implement both the block and non-block method. -c -c RESID Complex*16 array of length N. (INPUT/OUTPUT) -c On INPUT: RESID contains the residual vector r_{k}. -c On OUTPUT: RESID contains the residual vector r_{k+p}. -c -c RNORM Double precision scalar. (INPUT/OUTPUT) -c B-norm of the starting residual on input. -c B-norm of the updated residual r_{k+p} on output. -c -c V Complex*16 N by K+NP array. (INPUT/OUTPUT) -c On INPUT: V contains the Arnoldi vectors in the first K -c columns. -c On OUTPUT: V contains the new NP Arnoldi vectors in the next -c NP columns. The first K columns are unchanged. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Complex*16 (K+NP) by (K+NP) array. (INPUT/OUTPUT) -c H is used to store the generated upper Hessenberg matrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORK for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Complex*16 work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The calling program should not -c use WORKD as temporary workspace during the iteration !!!!!! -c On input, WORKD(1:N) = B*RESID and is used to save some -c computation at the first step. -c -c INFO Integer. (OUTPUT) -c = 0: Normal exit. -c > 0: Size of the spanning invariant subspace of OP found. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex*16 -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c zgetv0 ARPACK routine to generate the initial vector. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c zmout ARPACK utility routine that prints matrices -c zvout ARPACK utility routine that prints vectors. -c zlanhs LAPACK routine that computes various norms of a matrix. -c zlascl LAPACK routine for careful scaling of a matrix. -c dlabad LAPACK routine for defining the underflow and overflow -c limits. -c dlamch LAPACK routine that determines machine constants. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c zgemv Level 2 BLAS routine for matrix vector multiplication. -c zaxpy Level 1 BLAS that computes a vector triad. -c zcopy Level 1 BLAS that copies one vector to another . -c wzdotc Level 1 BLAS that computes the scalar product of two vectors. -c zscal Level 1 BLAS that scales a vector. -c zdscal Level 1 BLAS that scales a complex vector by a real number. -c dznrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: naitr.F SID: 2.3 DATE OF SID: 8/27/96 RELEASE: 2 -c -c\Remarks -c The algorithm implemented is: -c -c restart = .false. -c Given V_{k} = [v_{1}, ..., v_{k}], r_{k}; -c r_{k} contains the initial residual vector even for k = 0; -c Also assume that rnorm = || B*r_{k} || and B*r_{k} are already -c computed by the calling program. -c -c betaj = rnorm ; p_{k+1} = B*r_{k} ; -c For j = k+1, ..., k+np Do -c 1) if ( betaj < tol ) stop or restart depending on j. -c ( At present tol is zero ) -c if ( restart ) generate a new starting vector. -c 2) v_{j} = r(j-1)/betaj; V_{j} = [V_{j-1}, v_{j}]; -c p_{j} = p_{j}/betaj -c 3) r_{j} = OP*v_{j} where OP is defined as in znaupd -c For shift-invert mode p_{j} = B*v_{j} is already available. -c wnorm = || OP*v_{j} || -c 4) Compute the j-th step residual vector. -c w_{j} = V_{j}^T * B * OP * v_{j} -c r_{j} = OP*v_{j} - V_{j} * w_{j} -c H(:,j) = w_{j}; -c H(j,j-1) = rnorm -c rnorm = || r_(j) || -c If (rnorm > 0.717*wnorm) accept step and go back to 1) -c 5) Re-orthogonalization step: -c s = V_{j}'*B*r_{j} -c r_{j} = r_{j} - V_{j}*s; rnorm1 = || r_{j} || -c alphaj = alphaj + s_{j}; -c 6) Iterative refinement step: -c If (rnorm1 > 0.717*rnorm) then -c rnorm = rnorm1 -c accept step and go back to 1) -c Else -c rnorm = rnorm1 -c If this is the first time in step 6), go to 5) -c Else r_{j} lies in the span of V_{j} numerically. -c Set r_{j} = 0 and rnorm = 0; go to 1) -c EndIf -c End Do -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine znaitr - & (ido, bmat, n, k, np, nb, resid, rnorm, v, ldv, h, ldh, - & ipntr, workd, info) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1 - integer ido, info, k, ldh, ldv, n, nb, np - Double precision - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(3) - Complex*16 - & h(ldh,k+np), resid(n), v(ldv,k+np), workd(3*n) -c -c %------------% -c | Parameters | -c %------------% -c - Complex*16 - & one, zero - Double precision - & rone, rzero - parameter (one = (1.0D+0, 0.0D+0), zero = (0.0D+0, 0.0D+0), - & rone = 1.0D+0, rzero = 0.0D+0) -c -c %--------------% -c | Local Arrays | -c %--------------% -c - Double precision - & rtemp(2) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - logical first, orth1, orth2, rstart, step3, step4 - integer ierr, i, infol, ipj, irj, ivj, iter, itry, j, msglvl, - & jj - Double precision - & ovfl, smlnum, tst1, ulp, unfl, betaj, - & temp1, rnorm1, wnorm - Complex*16 - & cnorm -c - save first, orth1, orth2, rstart, step3, step4, - & ierr, ipj, irj, ivj, iter, itry, j, msglvl, ovfl, - & betaj, rnorm1, smlnum, ulp, unfl, wnorm -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external zaxpy, zcopy, zscal, zdscal, zgemv, zgetv0, - & dlabad, zvout, zmout, ivout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Complex*16 - & wzdotc - Double precision - & dlamch, dznrm2, zlanhs, dlapy2 - external wzdotc, dznrm2, zlanhs, dlamch, dlapy2 -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic dimag, dble, max, sqrt -c -c %-----------------% -c | Data statements | -c %-----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then -c -c %-----------------------------------------% -c | Set machine-dependent constants for the | -c | the splitting and deflation criterion. | -c | If norm(H) <= sqrt(OVFL), | -c | overflow should not occur. | -c | REFERENCE: LAPACK subroutine zlahqr | -c %-----------------------------------------% -c - unfl = dlamch( 'safe minimum' ) - ovfl = dble(one / unfl) - call dlabad( unfl, ovfl ) - ulp = dlamch( 'precision' ) - smlnum = unfl*( n / ulp ) - first = .false. - end if -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mcaitr -c -c %------------------------------% -c | Initial call to this routine | -c %------------------------------% -c - info = 0 - step3 = .false. - step4 = .false. - rstart = .false. - orth1 = .false. - orth2 = .false. - j = k + 1 - ipj = 1 - irj = ipj + n - ivj = irj + n - end if -c -c %-------------------------------------------------% -c | When in reverse communication mode one of: | -c | STEP3, STEP4, ORTH1, ORTH2, RSTART | -c | will be .true. when .... | -c | STEP3: return from computing OP*v_{j}. | -c | STEP4: return from computing B-norm of OP*v_{j} | -c | ORTH1: return from computing B-norm of r_{j+1} | -c | ORTH2: return from computing B-norm of | -c | correction to the residual vector. | -c | RSTART: return from OP computations needed by | -c | zgetv0. | -c %-------------------------------------------------% -c - if (step3) go to 50 - if (step4) go to 60 - if (orth1) go to 70 - if (orth2) go to 90 - if (rstart) go to 30 -c -c %-----------------------------% -c | Else this is the first step | -c %-----------------------------% -c -c %--------------------------------------------------------------% -c | | -c | A R N O L D I I T E R A T I O N L O O P | -c | | -c | Note: B*r_{j-1} is already in WORKD(1:N)=WORKD(IPJ:IPJ+N-1) | -c %--------------------------------------------------------------% - - 1000 continue -c - if (msglvl .gt. 1) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: generating Arnoldi vector number') - call dvout (logfil, 1, rnorm, ndigit, - & '_naitr: B-norm of the current residual is') - end if -c -c %---------------------------------------------------% -c | STEP 1: Check if the B norm of j-th residual | -c | vector is zero. Equivalent to determine whether | -c | an exact j-step Arnoldi factorization is present. | -c %---------------------------------------------------% -c - betaj = rnorm - if (rnorm .gt. rzero) go to 40 -c -c %---------------------------------------------------% -c | Invariant subspace found, generate a new starting | -c | vector which is orthogonal to the current Arnoldi | -c | basis and continue the iteration. | -c %---------------------------------------------------% -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: ****** RESTART AT STEP ******') - end if -c -c %---------------------------------------------% -c | ITRY is the loop variable that controls the | -c | maximum amount of times that a restart is | -c | attempted. NRSTRT is used by stat.h | -c %---------------------------------------------% -c - betaj = rzero - nrstrt = nrstrt + 1 - itry = 1 - 20 continue - rstart = .true. - ido = 0 - 30 continue -c -c %--------------------------------------% -c | If in reverse communication mode and | -c | RSTART = .true. flow returns here. | -c %--------------------------------------% -c - call zgetv0 (ido, bmat, itry, .false., n, j, v, ldv, - & resid, rnorm, ipntr, workd, ierr) - if (ido .ne. 99) go to 9000 - if (ierr .lt. 0) then - itry = itry + 1 - if (itry .le. 3) go to 20 -c -c %------------------------------------------------% -c | Give up after several restart attempts. | -c | Set INFO to the size of the invariant subspace | -c | which spans OP and exit. | -c %------------------------------------------------% -c - info = j - 1 - call second (t1) - tcaitr = tcaitr + (t1 - t0) - ido = 99 - go to 9000 - end if -c - 40 continue -c -c %---------------------------------------------------------% -c | STEP 2: v_{j} = r_{j-1}/rnorm and p_{j} = p_{j}/rnorm | -c | Note that p_{j} = B*r_{j-1}. In order to avoid overflow | -c | when reciprocating a small RNORM, test against lower | -c | machine bound. | -c %---------------------------------------------------------% -c - call zcopy (n, resid, 1, v(1,j), 1) - if ( rnorm .ge. unfl) then - temp1 = rone / rnorm - call zdscal (n, temp1, v(1,j), 1) - call zdscal (n, temp1, workd(ipj), 1) - else -c -c %-----------------------------------------% -c | To scale both v_{j} and p_{j} carefully | -c | use LAPACK routine zlascl | -c %-----------------------------------------% -c - call zlascl ('General', i, i, rnorm, rone, - & n, 1, v(1,j), n, infol) - call zlascl ('General', i, i, rnorm, rone, - & n, 1, workd(ipj), n, infol) - end if -c -c %------------------------------------------------------% -c | STEP 3: r_{j} = OP*v_{j}; Note that p_{j} = B*v_{j} | -c | Note that this is not quite yet r_{j}. See STEP 4 | -c %------------------------------------------------------% -c - step3 = .true. - nopx = nopx + 1 - call second (t2) - call zcopy (n, v(1,j), 1, workd(ivj), 1) - ipntr(1) = ivj - ipntr(2) = irj - ipntr(3) = ipj - ido = 1 -c -c %-----------------------------------% -c | Exit in order to compute OP*v_{j} | -c %-----------------------------------% -c - go to 9000 - 50 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(IRJ:IRJ+N-1) := OP*v_{j} | -c | if step3 = .true. | -c %----------------------------------% -c - call second (t3) - tmvopx = tmvopx + (t3 - t2) - - step3 = .false. -c -c %------------------------------------------% -c | Put another copy of OP*v_{j} into RESID. | -c %------------------------------------------% -c - call zcopy (n, workd(irj), 1, resid, 1) -c -c %---------------------------------------% -c | STEP 4: Finish extending the Arnoldi | -c | factorization to length j. | -c %---------------------------------------% -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - step4 = .true. - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-------------------------------------% -c | Exit in order to compute B*OP*v_{j} | -c %-------------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call zcopy (n, resid, 1, workd(ipj), 1) - end if - 60 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(IPJ:IPJ+N-1) := B*OP*v_{j} | -c | if step4 = .true. | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - step4 = .false. -c -c %-------------------------------------% -c | The following is needed for STEP 5. | -c | Compute the B-norm of OP*v_{j}. | -c %-------------------------------------% -c - if (bmat .eq. 'G') then - cnorm = wzdotc (n, resid, 1, workd(ipj), 1) - wnorm = sqrt( dlapy2(dble(cnorm),dimag(cnorm)) ) - else if (bmat .eq. 'I') then - wnorm = dznrm2(n, resid, 1) - end if -c -c %-----------------------------------------% -c | Compute the j-th residual corresponding | -c | to the j step factorization. | -c | Use Classical Gram Schmidt and compute: | -c | w_{j} <- V_{j}^T * B * OP * v_{j} | -c | r_{j} <- OP*v_{j} - V_{j} * w_{j} | -c %-----------------------------------------% -c -c -c %------------------------------------------% -c | Compute the j Fourier coefficients w_{j} | -c | WORKD(IPJ:IPJ+N-1) contains B*OP*v_{j}. | -c %------------------------------------------% -c - call zgemv ('C', n, j, one, v, ldv, workd(ipj), 1, - & zero, h(1,j), 1) -c -c %--------------------------------------% -c | Orthogonalize r_{j} against V_{j}. | -c | RESID contains OP*v_{j}. See STEP 3. | -c %--------------------------------------% -c - call zgemv ('N', n, j, -one, v, ldv, h(1,j), 1, - & one, resid, 1) -c - if (j .gt. 1) h(j,j-1) = dcmplx(betaj, rzero) -c - call second (t4) -c - orth1 = .true. -c - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call zcopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*r_{j} | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call zcopy (n, resid, 1, workd(ipj), 1) - end if - 70 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH1 = .true. | -c | WORKD(IPJ:IPJ+N-1) := B*r_{j}. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - orth1 = .false. -c -c %------------------------------% -c | Compute the B-norm of r_{j}. | -c %------------------------------% -c - if (bmat .eq. 'G') then - cnorm = wzdotc (n, resid, 1, workd(ipj), 1) - rnorm = sqrt( dlapy2(dble(cnorm),dimag(cnorm)) ) - else if (bmat .eq. 'I') then - rnorm = dznrm2(n, resid, 1) - end if -c -c %-----------------------------------------------------------% -c | STEP 5: Re-orthogonalization / Iterative refinement phase | -c | Maximum NITER_ITREF tries. | -c | | -c | s = V_{j}^T * B * r_{j} | -c | r_{j} = r_{j} - V_{j}*s | -c | alphaj = alphaj + s_{j} | -c | | -c | The stopping criteria used for iterative refinement is | -c | discussed in Parlett's book SEP, page 107 and in Gragg & | -c | Reichel ACM TOMS paper; Algorithm 686, Dec. 1990. | -c | Determine if we need to correct the residual. The goal is | -c | to enforce ||v(:,1:j)^T * r_{j}|| .le. eps * || r_{j} || | -c | The following test determines whether the sine of the | -c | angle between OP*x and the computed residual is less | -c | than or equal to 0.717. | -c %-----------------------------------------------------------% -c - if ( rnorm .gt. 0.717*wnorm ) go to 100 -c - iter = 0 - nrorth = nrorth + 1 -c -c %---------------------------------------------------% -c | Enter the Iterative refinement phase. If further | -c | refinement is necessary, loop back here. The loop | -c | variable is ITER. Perform a step of Classical | -c | Gram-Schmidt using all the Arnoldi vectors V_{j} | -c %---------------------------------------------------% -c - 80 continue -c - if (msglvl .gt. 2) then - rtemp(1) = wnorm - rtemp(2) = rnorm - call dvout (logfil, 2, rtemp, ndigit, - & '_naitr: re-orthogonalization; wnorm and rnorm are') - call zvout (logfil, j, h(1,j), ndigit, - & '_naitr: j-th column of H') - end if -c -c %----------------------------------------------------% -c | Compute V_{j}^T * B * r_{j}. | -c | WORKD(IRJ:IRJ+J-1) = v(:,1:J)'*WORKD(IPJ:IPJ+N-1). | -c %----------------------------------------------------% -c - call zgemv ('C', n, j, one, v, ldv, workd(ipj), 1, - & zero, workd(irj), 1) -c -c %---------------------------------------------% -c | Compute the correction to the residual: | -c | r_{j} = r_{j} - V_{j} * WORKD(IRJ:IRJ+J-1). | -c | The correction to H is v(:,1:J)*H(1:J,1:J) | -c | + v(:,1:J)*WORKD(IRJ:IRJ+J-1)*e'_j. | -c %---------------------------------------------% -c - call zgemv ('N', n, j, -one, v, ldv, workd(irj), 1, - & one, resid, 1) - call zaxpy (j, one, workd(irj), 1, h(1,j), 1) -c - orth2 = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call zcopy (n, resid, 1, workd(irj), 1) - ipntr(1) = irj - ipntr(2) = ipj - ido = 2 -c -c %-----------------------------------% -c | Exit in order to compute B*r_{j}. | -c | r_{j} is the corrected residual. | -c %-----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call zcopy (n, resid, 1, workd(ipj), 1) - end if - 90 continue -c -c %---------------------------------------------------% -c | Back from reverse communication if ORTH2 = .true. | -c %---------------------------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c -c %-----------------------------------------------------% -c | Compute the B-norm of the corrected residual r_{j}. | -c %-----------------------------------------------------% -c - if (bmat .eq. 'G') then - cnorm = wzdotc (n, resid, 1, workd(ipj), 1) - rnorm1 = sqrt( dlapy2(dble(cnorm),dimag(cnorm)) ) - else if (bmat .eq. 'I') then - rnorm1 = dznrm2(n, resid, 1) - end if -c - if (msglvl .gt. 0 .and. iter .gt. 0 ) then - call ivout (logfil, 1, j, ndigit, - & '_naitr: Iterative refinement for Arnoldi residual') - if (msglvl .gt. 2) then - rtemp(1) = rnorm - rtemp(2) = rnorm1 - call dvout (logfil, 2, rtemp, ndigit, - & '_naitr: iterative refinement ; rnorm and rnorm1 are') - end if - end if -c -c %-----------------------------------------% -c | Determine if we need to perform another | -c | step of re-orthogonalization. | -c %-----------------------------------------% -c - if ( rnorm1 .gt. 0.717*rnorm ) then -c -c %---------------------------------------% -c | No need for further refinement. | -c | The cosine of the angle between the | -c | corrected residual vector and the old | -c | residual vector is greater than 0.717 | -c | In other words the corrected residual | -c | and the old residual vector share an | -c | angle of less than arcCOS(0.717) | -c %---------------------------------------% -c - rnorm = rnorm1 -c - else -c -c %-------------------------------------------% -c | Another step of iterative refinement step | -c | is required. NITREF is used by stat.h | -c %-------------------------------------------% -c - nitref = nitref + 1 - rnorm = rnorm1 - iter = iter + 1 - if (iter .le. 1) go to 80 -c -c %-------------------------------------------------% -c | Otherwise RESID is numerically in the span of V | -c %-------------------------------------------------% -c - do 95 jj = 1, n - resid(jj) = zero - 95 continue - rnorm = rzero - end if -c -c %----------------------------------------------% -c | Branch here directly if iterative refinement | -c | wasn't necessary or after at most NITER_REF | -c | steps of iterative refinement. | -c %----------------------------------------------% -c - 100 continue -c - rstart = .false. - orth2 = .false. -c - call second (t5) - titref = titref + (t5 - t4) -c -c %------------------------------------% -c | STEP 6: Update j = j+1; Continue | -c %------------------------------------% -c - j = j + 1 - if (j .gt. k+np) then - call second (t1) - tcaitr = tcaitr + (t1 - t0) - ido = 99 - do 110 i = max(1,k), k+np-1 -c -c %--------------------------------------------% -c | Check for splitting and deflation. | -c | Use a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine zlahqr | -c %--------------------------------------------% -c - tst1 = dlapy2(dble(h(i,i)),dimag(h(i,i))) - & + dlapy2(dble(h(i+1,i+1)), dimag(h(i+1,i+1))) - if( tst1.eq.dble(zero) ) - & tst1 = zlanhs( '1', k+np, h, ldh, workd(n+1) ) - if( dlapy2(dble(h(i+1,i)),dimag(h(i+1,i))) .le. - & max( ulp*tst1, smlnum ) ) - & h(i+1,i) = zero - 110 continue -c - if (msglvl .gt. 2) then - call zmout (logfil, k+np, k+np, h, ldh, ndigit, - & '_naitr: Final upper Hessenberg matrix H of order K+NP') - end if -c - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Loop back to extend the factorization by another step. | -c %--------------------------------------------------------% -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 9000 continue - return -c -c %---------------% -c | End of znaitr | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znapps.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znapps.f deleted file mode 100644 index 95bbce4254..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znapps.f +++ /dev/null @@ -1,507 +0,0 @@ -c\BeginDoc -c -c\Name: znapps -c -c\Description: -c Given the Arnoldi factorization -c -c A*V_{k} - V_{k}*H_{k} = r_{k+p}*e_{k+p}^T, -c -c apply NP implicit shifts resulting in -c -c A*(V_{k}*Q) - (V_{k}*Q)*(Q^T* H_{k}*Q) = r_{k+p}*e_{k+p}^T * Q -c -c where Q is an orthogonal matrix which is the product of rotations -c and reflections resulting from the NP bulge change sweeps. -c The updated Arnoldi factorization becomes: -c -c A*VNEW_{k} - VNEW_{k}*HNEW_{k} = rnew_{k}*e_{k}^T. -c -c\Usage: -c call znapps -c ( N, KEV, NP, SHIFT, V, LDV, H, LDH, RESID, Q, LDQ, -c WORKL, WORKD ) -c -c\Arguments -c N Integer. (INPUT) -c Problem size, i.e. size of matrix A. -c -c KEV Integer. (INPUT/OUTPUT) -c KEV+NP is the size of the input matrix H. -c KEV is the size of the updated matrix HNEW. -c -c NP Integer. (INPUT) -c Number of implicit shifts to be applied. -c -c SHIFT Complex*16 array of length NP. (INPUT) -c The shifts to be applied. -c -c V Complex*16 N by (KEV+NP) array. (INPUT/OUTPUT) -c On INPUT, V contains the current KEV+NP Arnoldi vectors. -c On OUTPUT, V contains the updated KEV Arnoldi vectors -c in the first KEV columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Complex*16 (KEV+NP) by (KEV+NP) array. (INPUT/OUTPUT) -c On INPUT, H contains the current KEV+NP by KEV+NP upper -c Hessenberg matrix of the Arnoldi factorization. -c On OUTPUT, H contains the updated KEV by KEV upper Hessenberg -c matrix in the KEV leading submatrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RESID Complex*16 array of length N. (INPUT/OUTPUT) -c On INPUT, RESID contains the the residual vector r_{k+p}. -c On OUTPUT, RESID is the update residual vector rnew_{k} -c in the first KEV locations. -c -c Q Complex*16 KEV+NP by KEV+NP work array. (WORKSPACE) -c Work array used to accumulate the rotations and reflections -c during the bulge chase sweep. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Complex*16 work array of length (KEV+NP). (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c WORKD Complex*16 work array of length 2*N. (WORKSPACE) -c Distributed array used in the application of the accumulated -c orthogonal matrix Q. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex*16 -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c zmout ARPACK utility routine that prints matrices -c zvout ARPACK utility routine that prints vectors. -c zlacpy LAPACK matrix copy routine. -c zlanhs LAPACK routine that computes various norms of a matrix. -c zlartg LAPACK Givens rotation construction routine. -c zlaset LAPACK matrix initialization routine. -c dlabad LAPACK routine for defining the underflow and overflow -c limits. -c dlamch LAPACK routine that determines machine constants. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c zgemv Level 2 BLAS routine for matrix vector multiplication. -c zaxpy Level 1 BLAS that computes a vector triad. -c zcopy Level 1 BLAS that copies one vector to another. -c zscal Level 1 BLAS that scales a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: napps.F SID: 2.3 DATE OF SID: 3/28/97 RELEASE: 2 -c -c\Remarks -c 1. In this version, each shift is applied to all the sublocks of -c the Hessenberg matrix H and not just to the submatrix that it -c comes from. Deflation as in LAPACK routine zlahqr (QR algorithm -c for upper Hessenberg matrices ) is used. -c Upon output, the subdiagonals of H are enforced to be non-negative -c real numbers. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine znapps - & ( n, kev, np, shift, v, ldv, h, ldh, resid, q, ldq, - & workl, workd ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer kev, ldh, ldq, ldv, n, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Complex*16 - & h(ldh,kev+np), resid(n), shift(np), - & v(ldv,kev+np), q(ldq,kev+np), workd(2*n), workl(kev+np) -c -c %------------% -c | Parameters | -c %------------% -c - Complex*16 - & one, zero - Double precision - & rzero - parameter (one = (1.0D+0, 0.0D+0), zero = (0.0D+0, 0.0D+0), - & rzero = 0.0D+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - integer i, iend, istart, j, jj, kplusp, msglvl - logical first - Complex*16 - & cdum, f, g, h11, h21, r, s, sigma, t - Double precision - & c, ovfl, smlnum, ulp, unfl, tst1 - save first, ovfl, smlnum, ulp, unfl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external zaxpy, zcopy, zgemv, zscal, zlacpy, zlartg, - & zvout, zlaset, dlabad, zmout, second, ivout -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & zlanhs, dlamch, dlapy2 - external zlanhs, dlamch, dlapy2 -c -c %----------------------% -c | Intrinsics Functions | -c %----------------------% -c - intrinsic abs, dimag, conjg, dcmplx, max, min, dble -c -c %---------------------% -c | Statement Functions | -c %---------------------% -c - Double precision - & zabs1 - zabs1( cdum ) = abs( dble( cdum ) ) + abs( dimag( cdum ) ) -c -c %----------------% -c | Data statments | -c %----------------% -c - data first / .true. / -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (first) then -c -c %-----------------------------------------------% -c | Set machine-dependent constants for the | -c | stopping criterion. If norm(H) <= sqrt(OVFL), | -c | overflow should not occur. | -c | REFERENCE: LAPACK subroutine zlahqr | -c %-----------------------------------------------% -c - unfl = dlamch( 'safe minimum' ) - ovfl = dble(one / unfl) - call dlabad( unfl, ovfl ) - ulp = dlamch( 'precision' ) - smlnum = unfl*( n / ulp ) - first = .false. - end if -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mcapps -c - kplusp = kev + np -c -c %--------------------------------------------% -c | Initialize Q to the identity to accumulate | -c | the rotations and reflections | -c %--------------------------------------------% -c - call zlaset ('All', kplusp, kplusp, zero, one, q, ldq) -c -c %----------------------------------------------% -c | Quick return if there are no shifts to apply | -c %----------------------------------------------% -c - if (np .eq. 0) go to 9000 -c -c %----------------------------------------------% -c | Chase the bulge with the application of each | -c | implicit shift. Each shift is applied to the | -c | whole matrix including each block. | -c %----------------------------------------------% -c - do 110 jj = 1, np - sigma = shift(jj) -c - if (msglvl .gt. 2 ) then - call ivout (logfil, 1, jj, ndigit, - & '_napps: shift number.') - call zvout (logfil, 1, sigma, ndigit, - & '_napps: Value of the shift ') - end if -c - istart = 1 - 20 continue -c - do 30 i = istart, kplusp-1 -c -c %----------------------------------------% -c | Check for splitting and deflation. Use | -c | a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine zlahqr | -c %----------------------------------------% -c - tst1 = zabs1( h( i, i ) ) + zabs1( h( i+1, i+1 ) ) - if( tst1.eq.rzero ) - & tst1 = zlanhs( '1', kplusp-jj+1, h, ldh, workl ) - if ( abs(dble(h(i+1,i))) - & .le. max(ulp*tst1, smlnum) ) then - if (msglvl .gt. 0) then - call ivout (logfil, 1, i, ndigit, - & '_napps: matrix splitting at row/column no.') - call ivout (logfil, 1, jj, ndigit, - & '_napps: matrix splitting with shift number.') - call zvout (logfil, 1, h(i+1,i), ndigit, - & '_napps: off diagonal element.') - end if - iend = i - h(i+1,i) = zero - go to 40 - end if - 30 continue - iend = kplusp - 40 continue -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, istart, ndigit, - & '_napps: Start of current block ') - call ivout (logfil, 1, iend, ndigit, - & '_napps: End of current block ') - end if -c -c %------------------------------------------------% -c | No reason to apply a shift to block of order 1 | -c | or if the current block starts after the point | -c | of compression since we'll discard this stuff | -c %------------------------------------------------% -c - if ( istart .eq. iend .or. istart .gt. kev) go to 100 -c - h11 = h(istart,istart) - h21 = h(istart+1,istart) - f = h11 - sigma - g = h21 -c - do 80 i = istart, iend-1 -c -c %------------------------------------------------------% -c | Construct the plane rotation G to zero out the bulge | -c %------------------------------------------------------% -c - call zlartg (f, g, c, s, r) - if (i .gt. istart) then - h(i,i-1) = r - h(i+1,i-1) = zero - end if -c -c %---------------------------------------------% -c | Apply rotation to the left of H; H <- G'*H | -c %---------------------------------------------% -c - do 50 j = i, kplusp - t = c*h(i,j) + s*h(i+1,j) - h(i+1,j) = -conjg(s)*h(i,j) + c*h(i+1,j) - h(i,j) = t - 50 continue -c -c %---------------------------------------------% -c | Apply rotation to the right of H; H <- H*G | -c %---------------------------------------------% -c - do 60 j = 1, min(i+2,iend) - t = c*h(j,i) + conjg(s)*h(j,i+1) - h(j,i+1) = -s*h(j,i) + c*h(j,i+1) - h(j,i) = t - 60 continue -c -c %-----------------------------------------------------% -c | Accumulate the rotation in the matrix Q; Q <- Q*G' | -c %-----------------------------------------------------% -c - do 70 j = 1, min(i+jj, kplusp) - t = c*q(j,i) + conjg(s)*q(j,i+1) - q(j,i+1) = - s*q(j,i) + c*q(j,i+1) - q(j,i) = t - 70 continue -c -c %---------------------------% -c | Prepare for next rotation | -c %---------------------------% -c - if (i .lt. iend-1) then - f = h(i+1,i) - g = h(i+2,i) - end if - 80 continue -c -c %-------------------------------% -c | Finished applying the shift. | -c %-------------------------------% -c - 100 continue -c -c %---------------------------------------------------------% -c | Apply the same shift to the next block if there is any. | -c %---------------------------------------------------------% -c - istart = iend + 1 - if (iend .lt. kplusp) go to 20 -c -c %---------------------------------------------% -c | Loop back to the top to get the next shift. | -c %---------------------------------------------% -c - 110 continue -c -c %---------------------------------------------------% -c | Perform a similarity transformation that makes | -c | sure that the compressed H will have non-negative | -c | real subdiagonal elements. | -c %---------------------------------------------------% -c - do 120 j=1,kev - if ( dble( h(j+1,j) ) .lt. rzero .or. - & dimag( h(j+1,j) ) .ne. rzero ) then - t = h(j+1,j) / dlapy2(dble(h(j+1,j)),dimag(h(j+1,j))) - call zscal( kplusp-j+1, conjg(t), h(j+1,j), ldh ) - call zscal( min(j+2, kplusp), t, h(1,j+1), 1 ) - call zscal( min(j+np+1,kplusp), t, q(1,j+1), 1 ) - h(j+1,j) = dcmplx( dble( h(j+1,j) ), rzero ) - end if - 120 continue -c - do 130 i = 1, kev -c -c %--------------------------------------------% -c | Final check for splitting and deflation. | -c | Use a standard test as in the QR algorithm | -c | REFERENCE: LAPACK subroutine zlahqr. | -c | Note: Since the subdiagonals of the | -c | compressed H are nonnegative real numbers, | -c | we take advantage of this. | -c %--------------------------------------------% -c - tst1 = zabs1( h( i, i ) ) + zabs1( h( i+1, i+1 ) ) - if( tst1 .eq. rzero ) - & tst1 = zlanhs( '1', kev, h, ldh, workl ) - if( dble( h( i+1,i ) ) .le. max( ulp*tst1, smlnum ) ) - & h(i+1,i) = zero - 130 continue -c -c %-------------------------------------------------% -c | Compute the (kev+1)-st column of (V*Q) and | -c | temporarily store the result in WORKD(N+1:2*N). | -c | This is needed in the residual update since we | -c | cannot GUARANTEE that the corresponding entry | -c | of H would be zero as in exact arithmetic. | -c %-------------------------------------------------% -c - if ( dble( h(kev+1,kev) ) .gt. rzero ) - & call zgemv ('N', n, kplusp, one, v, ldv, q(1,kev+1), 1, zero, - & workd(n+1), 1) -c -c %----------------------------------------------------------% -c | Compute column 1 to kev of (V*Q) in backward order | -c | taking advantage of the upper Hessenberg structure of Q. | -c %----------------------------------------------------------% -c - do 140 i = 1, kev - call zgemv ('N', n, kplusp-i+1, one, v, ldv, - & q(1,kev-i+1), 1, zero, workd, 1) - call zcopy (n, workd, 1, v(1,kplusp-i+1), 1) - 140 continue -c -c %-------------------------------------------------% -c | Move v(:,kplusp-kev+1:kplusp) into v(:,1:kev). | -c %-------------------------------------------------% -c - call zlacpy ('A', n, kev, v(1,kplusp-kev+1), ldv, v, ldv) -c -c %--------------------------------------------------------------% -c | Copy the (kev+1)-st column of (V*Q) in the appropriate place | -c %--------------------------------------------------------------% -c - if ( dble( h(kev+1,kev) ) .gt. rzero ) - & call zcopy (n, workd(n+1), 1, v(1,kev+1), 1) -c -c %-------------------------------------% -c | Update the residual vector: | -c | r <- sigmak*r + betak*v(:,kev+1) | -c | where | -c | sigmak = (e_{kev+p}'*Q)*e_{kev} | -c | betak = e_{kev+1}'*H*e_{kev} | -c %-------------------------------------% -c - call zscal (n, q(kplusp,kev), resid, 1) - if ( dble( h(kev+1,kev) ) .gt. rzero ) - & call zaxpy (n, h(kev+1,kev), v(1,kev+1), 1, resid, 1) -c - if (msglvl .gt. 1) then - call zvout (logfil, 1, q(kplusp,kev), ndigit, - & '_napps: sigmak = (e_{kev+p}^T*Q)*e_{kev}') - call zvout (logfil, 1, h(kev+1,kev), ndigit, - & '_napps: betak = e_{kev+1}^T*H*e_{kev}') - call ivout (logfil, 1, kev, ndigit, - & '_napps: Order of the final Hessenberg matrix ') - if (msglvl .gt. 2) then - call zmout (logfil, kev, kev, h, ldh, ndigit, - & '_napps: updated Hessenberg matrix H for next iteration') - end if -c - end if -c - 9000 continue - call second (t1) - tcapps = tcapps + (t1 - t0) -c - return -c -c %---------------% -c | End of znapps | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaup2.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaup2.f deleted file mode 100644 index 43bbea50b5..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaup2.f +++ /dev/null @@ -1,801 +0,0 @@ -c\BeginDoc -c -c\Name: znaup2 -c -c\Description: -c Intermediate level interface called by znaupd . -c -c\Usage: -c call znaup2 -c ( IDO, BMAT, N, WHICH, NEV, NP, TOL, RESID, MODE, IUPD, -c ISHIFT, MXITER, V, LDV, H, LDH, RITZ, BOUNDS, -c Q, LDQ, WORKL, IPNTR, WORKD, RWORK, INFO ) -c -c\Arguments -c -c IDO, BMAT, N, WHICH, NEV, TOL, RESID: same as defined in znaupd . -c MODE, ISHIFT, MXITER: see the definition of IPARAM in znaupd . -c -c NP Integer. (INPUT/OUTPUT) -c Contains the number of implicit shifts to apply during -c each Arnoldi iteration. -c If ISHIFT=1, NP is adjusted dynamically at each iteration -c to accelerate convergence and prevent stagnation. -c This is also roughly equal to the number of matrix-vector -c products (involving the operator OP) per Arnoldi iteration. -c The logic for adjusting is contained within the current -c subroutine. -c If ISHIFT=0, NP is the number of shifts the user needs -c to provide via reverse comunication. 0 < NP < NCV-NEV. -c NP may be less than NCV-NEV since a leading block of the current -c upper Hessenberg matrix has split off and contains "unwanted" -c Ritz values. -c Upon termination of the IRA iteration, NP contains the number -c of "converged" wanted Ritz values. -c -c IUPD Integer. (INPUT) -c IUPD .EQ. 0: use explicit restart instead implicit update. -c IUPD .NE. 0: use implicit update. -c -c V Complex*16 N by (NEV+NP) array. (INPUT/OUTPUT) -c The Arnoldi basis vectors are returned in the first NEV -c columns of V. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling -c program. -c -c H Complex*16 (NEV+NP) by (NEV+NP) array. (OUTPUT) -c H is used to store the generated upper Hessenberg matrix -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZ Complex*16 array of length NEV+NP. (OUTPUT) -c RITZ(1:NEV) contains the computed Ritz values of OP. -c -c BOUNDS Complex*16 array of length NEV+NP. (OUTPUT) -c BOUNDS(1:NEV) contain the error bounds corresponding to -c the computed Ritz values. -c -c Q Complex*16 (NEV+NP) by (NEV+NP) array. (WORKSPACE) -c Private (replicated) work array used to accumulate the -c rotation in the shift application step. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Complex*16 work array of length at least -c (NEV+NP)**2 + 3*(NEV+NP). (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. It is used in shifts calculation, shifts -c application and convergence checking. -c -c -c IPNTR Integer array of length 3. (OUTPUT) -c Pointer to mark the starting locations in the WORKD for -c vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X. -c IPNTR(2): pointer to the current result vector Y. -c IPNTR(3): pointer to the vector B * X when used in the -c shift-and-invert mode. X is the current operand. -c ------------------------------------------------------------- -c -c WORKD Complex*16 work array of length 3*N. (WORKSPACE) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration !!!!!!!!!! -c See Data Distribution Note in ZNAUPD . -c -c RWORK Double precision work array of length NEV+NP ( WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal return. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. -c NP returns the number of converged Ritz values. -c = 2: No shifts could be applied. -c = -8: Error return from LAPACK eigenvalue calculation; -c This should never happen. -c = -9: Starting vector is zero. -c = -9999: Could not build an Arnoldi factorization. -c Size that was built in returned in NP. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex*16 -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c -c\Routines called: -c zgetv0 ARPACK initial vector generation routine. -c znaitr ARPACK Arnoldi factorization routine. -c znapps ARPACK application of implicit shifts routine. -c zneigh ARPACK compute Ritz values and error bounds routine. -c zngets ARPACK reorder Ritz values and error bounds routine. -c zsortc ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c zmout ARPACK utility routine that prints matrices -c zvout ARPACK utility routine that prints vectors. -c dvout ARPACK utility routine that prints vectors. -c dlamch LAPACK routine that determines machine constants. -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c zcopy Level 1 BLAS that copies one vector to another . -c wzdotc Level 1 BLAS that computes the scalar product of two vectors. -c zswap Level 1 BLAS that swaps two vectors. -c dznrm2 Level 1 BLAS that computes the norm of a vector. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice Universitya -c Chao Yang Houston, Texas -c Dept. of Computational & -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: naup2.F SID: 2.6 DATE OF SID: 06/01/00 RELEASE: 2 -c -c\Remarks -c 1. None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine znaup2 - & ( ido, bmat, n, which, nev, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, h, ldh, ritz, bounds, - & q, ldq, workl, ipntr, workd, rwork, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ishift, iupd, mode, ldh, ldq, ldv, mxiter, - & n, nev, np - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer ipntr(13) - Complex*16 - & bounds(nev+np), h(ldh,nev+np), q(ldq,nev+np), - & resid(n), ritz(nev+np), v(ldv,nev+np), - & workd(3*n), workl( (nev+np)*(nev+np+3) ) - Double precision - & rwork(nev+np) -c -c %------------% -c | Parameters | -c %------------% -c - Complex*16 - & one, zero - Double precision - & rzero - parameter (one = (1.0D+0, 0.0D+0) , zero = (0.0D+0, 0.0D+0) , - & rzero = 0.0D+0 ) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - logical cnorm , getv0, initv , update, ushift - integer ierr , iter , kplusp, msglvl, nconv, - & nevbef, nev0 , np0 , nptemp, i , - & j - Complex*16 - & cmpnorm - Double precision - & rnorm , eps23, rtemp - character wprime*2 -c - save cnorm, getv0, initv , update, ushift, - & rnorm, iter , kplusp, msglvl, nconv , - & nevbef, nev0 , np0 , eps23 -c -c -c %-----------------------% -c | Local array arguments | -c %-----------------------% -c - integer kp(3) -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external zcopy , zgetv0 , znaitr , zneigh , zngets , znapps , - & zsortc , zswap , zmout , zvout , ivout, second -c -c %--------------------% -c | External functions | -c %--------------------% -c - Complex*16 - & wzdotc - Double precision - & dznrm2 , dlamch , dlapy2 - external wzdotc , dznrm2 , dlamch , dlapy2 -c -c %---------------------% -c | Intrinsic Functions | -c %---------------------% -c - intrinsic dimag , dble , min, max -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c - call second (t0) -c - msglvl = mcaup2 -c - nev0 = nev - np0 = np -c -c %-------------------------------------% -c | kplusp is the bound on the largest | -c | Lanczos factorization built. | -c | nconv is the current number of | -c | "converged" eigenvalues. | -c | iter is the counter on the current | -c | iteration step. | -c %-------------------------------------% -c - kplusp = nev + np - nconv = 0 - iter = 0 -c -c %---------------------------------% -c | Get machine dependent constant. | -c %---------------------------------% -c - eps23 = dlamch ('Epsilon-Machine') - eps23 = eps23**(2.0D+0 / 3.0D+0 ) -c -c %---------------------------------------% -c | Set flags for computing the first NEV | -c | steps of the Arnoldi factorization. | -c %---------------------------------------% -c - getv0 = .true. - update = .false. - ushift = .false. - cnorm = .false. -c - if (info .ne. 0) then -c -c %--------------------------------------------% -c | User provides the initial residual vector. | -c %--------------------------------------------% -c - initv = .true. - info = 0 - else - initv = .false. - end if - end if -c -c %---------------------------------------------% -c | Get a possibly random starting vector and | -c | force it into the range of the operator OP. | -c %---------------------------------------------% -c - 10 continue -c - if (getv0) then - call zgetv0 (ido, bmat, 1, initv, n, 1, v, ldv, resid, rnorm, - & ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (rnorm .eq. rzero) then -c -c %-----------------------------------------% -c | The initial vector is zero. Error exit. | -c %-----------------------------------------% -c - info = -9 - go to 1100 - end if - getv0 = .false. - ido = 0 - end if -c -c %-----------------------------------% -c | Back from reverse communication : | -c | continue with update step | -c %-----------------------------------% -c - if (update) go to 20 -c -c %-------------------------------------------% -c | Back from computing user specified shifts | -c %-------------------------------------------% -c - if (ushift) go to 50 -c -c %-------------------------------------% -c | Back from computing residual norm | -c | at the end of the current iteration | -c %-------------------------------------% -c - if (cnorm) go to 100 -c -c %----------------------------------------------------------% -c | Compute the first NEV steps of the Arnoldi factorization | -c %----------------------------------------------------------% -c - call znaitr (ido, bmat, n, 0, nev, mode, resid, rnorm, v, ldv, - & h, ldh, ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then - np = info - mxiter = iter - info = -9999 - go to 1200 - end if -c -c %--------------------------------------------------------------% -c | | -c | M A I N ARNOLDI I T E R A T I O N L O O P | -c | Each iteration implicitly restarts the Arnoldi | -c | factorization in place. | -c | | -c %--------------------------------------------------------------% -c - 1000 continue -c - iter = iter + 1 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, iter, ndigit, - & '_naup2: **** Start of major iteration number ****') - end if -c -c %-----------------------------------------------------------% -c | Compute NP additional steps of the Arnoldi factorization. | -c | Adjust NP since NEV might have been updated by last call | -c | to the shift application routine znapps . | -c %-----------------------------------------------------------% -c - np = kplusp - nev -c - if (msglvl .gt. 1) then - call ivout (logfil, 1, nev, ndigit, - & '_naup2: The length of the current Arnoldi factorization') - call ivout (logfil, 1, np, ndigit, - & '_naup2: Extend the Arnoldi factorization by') - end if -c -c %-----------------------------------------------------------% -c | Compute NP additional steps of the Arnoldi factorization. | -c %-----------------------------------------------------------% -c - ido = 0 - 20 continue - update = .true. -c - call znaitr (ido, bmat, n, nev, np, mode, resid, rnorm, - & v , ldv , h, ldh, ipntr, workd, info) -c - if (ido .ne. 99) go to 9000 -c - if (info .gt. 0) then - np = info - mxiter = iter - info = -9999 - go to 1200 - end if - update = .false. -c - if (msglvl .gt. 1) then - call dvout (logfil, 1, rnorm, ndigit, - & '_naup2: Corresponding B-norm of the residual') - end if -c -c %--------------------------------------------------------% -c | Compute the eigenvalues and corresponding error bounds | -c | of the current upper Hessenberg matrix. | -c %--------------------------------------------------------% -c - call zneigh (rnorm, kplusp, h, ldh, ritz, bounds, - & q, ldq, workl, rwork, ierr) -c - if (ierr .ne. 0) then - info = -8 - go to 1200 - end if -c -c %---------------------------------------------------% -c | Select the wanted Ritz values and their bounds | -c | to be used in the convergence test. | -c | The wanted part of the spectrum and corresponding | -c | error bounds are in the last NEV loc. of RITZ, | -c | and BOUNDS respectively. | -c %---------------------------------------------------% -c - nev = nev0 - np = np0 -c -c %--------------------------------------------------% -c | Make a copy of Ritz values and the corresponding | -c | Ritz estimates obtained from zneigh . | -c %--------------------------------------------------% -c - call zcopy (kplusp,ritz,1,workl(kplusp**2+1),1) - call zcopy (kplusp,bounds,1,workl(kplusp**2+kplusp+1),1) -c -c %---------------------------------------------------% -c | Select the wanted Ritz values and their bounds | -c | to be used in the convergence test. | -c | The wanted part of the spectrum and corresponding | -c | bounds are in the last NEV loc. of RITZ | -c | BOUNDS respectively. | -c %---------------------------------------------------% -c - call zngets (ishift, which, nev, np, ritz, bounds) -c -c %------------------------------------------------------------% -c | Convergence test: currently we use the following criteria. | -c | The relative accuracy of a Ritz value is considered | -c | acceptable if: | -c | | -c | error_bounds(i) .le. tol*max(eps23, magnitude_of_ritz(i)). | -c | | -c %------------------------------------------------------------% -c - nconv = 0 -c - do 25 i = 1, nev - rtemp = max( eps23, dlapy2 ( dble (ritz(np+i)), - & dimag (ritz(np+i)) ) ) - if ( dlapy2 (dble (bounds(np+i)),dimag (bounds(np+i))) - & .le. tol*rtemp ) then - nconv = nconv + 1 - end if - 25 continue -c - if (msglvl .gt. 2) then - kp(1) = nev - kp(2) = np - kp(3) = nconv - call ivout (logfil, 3, kp, ndigit, - & '_naup2: NEV, NP, NCONV are') - call zvout (logfil, kplusp, ritz, ndigit, - & '_naup2: The eigenvalues of H') - call zvout (logfil, kplusp, bounds, ndigit, - & '_naup2: Ritz estimates of the current NCV Ritz values') - end if -c -c %---------------------------------------------------------% -c | Count the number of unwanted Ritz values that have zero | -c | Ritz estimates. If any Ritz estimates are equal to zero | -c | then a leading block of H of order equal to at least | -c | the number of Ritz values with zero Ritz estimates has | -c | split off. None of these Ritz values may be removed by | -c | shifting. Decrease NP the number of shifts to apply. If | -c | no shifts may be applied, then prepare to exit | -c %---------------------------------------------------------% -c - nptemp = np - do 30 j=1, nptemp - if (bounds(j) .eq. zero) then - np = np - 1 - nev = nev + 1 - end if - 30 continue -c - if ( (nconv .ge. nev0) .or. - & (iter .gt. mxiter) .or. - & (np .eq. 0) ) then -c - if (msglvl .gt. 4) then - call zvout (logfil, kplusp, workl(kplusp**2+1), ndigit, - & '_naup2: Eigenvalues computed by _neigh:') - call zvout (logfil, kplusp, workl(kplusp**2+kplusp+1), - & ndigit, - & '_naup2: Ritz estimates computed by _neigh:') - end if -c -c %------------------------------------------------% -c | Prepare to exit. Put the converged Ritz values | -c | and corresponding bounds in RITZ(1:NCONV) and | -c | BOUNDS(1:NCONV) respectively. Then sort. Be | -c | careful when NCONV > NP | -c %------------------------------------------------% -c -c %------------------------------------------% -c | Use h( 3,1 ) as storage to communicate | -c | rnorm to zneupd if needed | -c %------------------------------------------% - - h(3,1) = dcmplx (rnorm,rzero) -c -c %----------------------------------------------% -c | Sort Ritz values so that converged Ritz | -c | values appear within the first NEV locations | -c | of ritz and bounds, and the most desired one | -c | appears at the front. | -c %----------------------------------------------% -c - if (which .eq. 'LM') wprime = 'SM' - if (which .eq. 'SM') wprime = 'LM' - if (which .eq. 'LR') wprime = 'SR' - if (which .eq. 'SR') wprime = 'LR' - if (which .eq. 'LI') wprime = 'SI' - if (which .eq. 'SI') wprime = 'LI' -c - call zsortc (wprime, .true., kplusp, ritz, bounds) -c -c %--------------------------------------------------% -c | Scale the Ritz estimate of each Ritz value | -c | by 1 / max(eps23, magnitude of the Ritz value). | -c %--------------------------------------------------% -c - do 35 j = 1, nev0 - rtemp = max( eps23, dlapy2 ( dble (ritz(j)), - & dimag (ritz(j)) ) ) - bounds(j) = bounds(j)/rtemp - 35 continue -c -c %---------------------------------------------------% -c | Sort the Ritz values according to the scaled Ritz | -c | estimates. This will push all the converged ones | -c | towards the front of ritz, bounds (in the case | -c | when NCONV < NEV.) | -c %---------------------------------------------------% -c - wprime = 'LM' - call zsortc (wprime, .true., nev0, bounds, ritz) -c -c %----------------------------------------------% -c | Scale the Ritz estimate back to its original | -c | value. | -c %----------------------------------------------% -c - do 40 j = 1, nev0 - rtemp = max( eps23, dlapy2 ( dble (ritz(j)), - & dimag (ritz(j)) ) ) - bounds(j) = bounds(j)*rtemp - 40 continue -c -c %-----------------------------------------------% -c | Sort the converged Ritz values again so that | -c | the "threshold" value appears at the front of | -c | ritz and bound. | -c %-----------------------------------------------% -c - call zsortc (which, .true., nconv, ritz, bounds) -c - if (msglvl .gt. 1) then - call zvout (logfil, kplusp, ritz, ndigit, - & '_naup2: Sorted eigenvalues') - call zvout (logfil, kplusp, bounds, ndigit, - & '_naup2: Sorted ritz estimates.') - end if -c -c %------------------------------------% -c | Max iterations have been exceeded. | -c %------------------------------------% -c - if (iter .gt. mxiter .and. nconv .lt. nev0) info = 1 -c -c %---------------------% -c | No shifts to apply. | -c %---------------------% -c - if (np .eq. 0 .and. nconv .lt. nev0) info = 2 -c - np = nconv - go to 1100 -c - else if ( (nconv .lt. nev0) .and. (ishift .eq. 1) ) then -c -c %-------------------------------------------------% -c | Do not have all the requested eigenvalues yet. | -c | To prevent possible stagnation, adjust the size | -c | of NEV. | -c %-------------------------------------------------% -c - nevbef = nev - nev = nev + min(nconv, np/2) - if (nev .eq. 1 .and. kplusp .ge. 6) then - nev = kplusp / 2 - else if (nev .eq. 1 .and. kplusp .gt. 3) then - nev = 2 - end if - np = kplusp - nev -c -c %---------------------------------------% -c | If the size of NEV was just increased | -c | resort the eigenvalues. | -c %---------------------------------------% -c - if (nevbef .lt. nev) - & call zngets (ishift, which, nev, np, ritz, bounds) -c - end if -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, nconv, ndigit, - & '_naup2: no. of "converged" Ritz values at this iter.') - if (msglvl .gt. 1) then - kp(1) = nev - kp(2) = np - call ivout (logfil, 2, kp, ndigit, - & '_naup2: NEV and NP are') - call zvout (logfil, nev, ritz(np+1), ndigit, - & '_naup2: "wanted" Ritz values ') - call zvout (logfil, nev, bounds(np+1), ndigit, - & '_naup2: Ritz estimates of the "wanted" values ') - end if - end if -c - if (ishift .eq. 0) then -c -c %-------------------------------------------------------% -c | User specified shifts: pop back out to get the shifts | -c | and return them in the first 2*NP locations of WORKL. | -c %-------------------------------------------------------% -c - ushift = .true. - ido = 3 - go to 9000 - end if - 50 continue - ushift = .false. -c - if ( ishift .ne. 1 ) then -c -c %----------------------------------% -c | Move the NP shifts from WORKL to | -c | RITZ, to free up WORKL | -c | for non-exact shift case. | -c %----------------------------------% -c - call zcopy (np, workl, 1, ritz, 1) - end if -c - if (msglvl .gt. 2) then - call ivout (logfil, 1, np, ndigit, - & '_naup2: The number of shifts to apply ') - call zvout (logfil, np, ritz, ndigit, - & '_naup2: values of the shifts') - if ( ishift .eq. 1 ) - & call zvout (logfil, np, bounds, ndigit, - & '_naup2: Ritz estimates of the shifts') - end if -c -c %---------------------------------------------------------% -c | Apply the NP implicit shifts by QR bulge chasing. | -c | Each shift is applied to the whole upper Hessenberg | -c | matrix H. | -c | The first 2*N locations of WORKD are used as workspace. | -c %---------------------------------------------------------% -c - call znapps (n, nev, np, ritz, v, ldv, - & h, ldh, resid, q, ldq, workl, workd) -c -c %---------------------------------------------% -c | Compute the B-norm of the updated residual. | -c | Keep B*RESID in WORKD(1:N) to be used in | -c | the first step of the next call to znaitr . | -c %---------------------------------------------% -c - cnorm = .true. - call second (t2) - if (bmat .eq. 'G') then - nbx = nbx + 1 - call zcopy (n, resid, 1, workd(n+1), 1) - ipntr(1) = n + 1 - ipntr(2) = 1 - ido = 2 -c -c %----------------------------------% -c | Exit in order to compute B*RESID | -c %----------------------------------% -c - go to 9000 - else if (bmat .eq. 'I') then - call zcopy (n, resid, 1, workd, 1) - end if -c - 100 continue -c -c %----------------------------------% -c | Back from reverse communication; | -c | WORKD(1:N) := B*RESID | -c %----------------------------------% -c - if (bmat .eq. 'G') then - call second (t3) - tmvbx = tmvbx + (t3 - t2) - end if -c - if (bmat .eq. 'G') then - cmpnorm = wzdotc (n, resid, 1, workd, 1) - rnorm = sqrt(dlapy2 (dble (cmpnorm),dimag (cmpnorm))) - else if (bmat .eq. 'I') then - rnorm = dznrm2 (n, resid, 1) - end if - cnorm = .false. -c - if (msglvl .gt. 2) then - call dvout (logfil, 1, rnorm, ndigit, - & '_naup2: B-norm of residual for compressed factorization') - call zmout (logfil, nev, nev, h, ldh, ndigit, - & '_naup2: Compressed upper Hessenberg matrix H') - end if -c - go to 1000 -c -c %---------------------------------------------------------------% -c | | -c | E N D O F M A I N I T E R A T I O N L O O P | -c | | -c %---------------------------------------------------------------% -c - 1100 continue -c - mxiter = iter - nev = nconv -c - 1200 continue - ido = 99 -c -c %------------% -c | Error Exit | -c %------------% -c - call second (t1) - tcaup2 = t1 - t0 -c - 9000 continue -c -c %---------------% -c | End of znaup2 | -c %---------------% -c - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaupd.f deleted file mode 100644 index ce107ccce4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/znaupd.f +++ /dev/null @@ -1,664 +0,0 @@ -c\BeginDoc -c -c\Name: znaupd -c -c\Description: -c Reverse communication interface for the Implicitly Restarted Arnoldi -c iteration. This is intended to be used to find a few eigenpairs of a -c complex linear operator OP with respect to a semi-inner product defined -c by a hermitian positive semi-definite real matrix B. B may be the identity -c matrix. NOTE: if both OP and B are real, then dsaupd or dnaupd should -c be used. -c -c -c The computed approximate eigenvalues are called Ritz values and -c the corresponding approximate eigenvectors are called Ritz vectors. -c -c znaupd is usually called iteratively to solve one of the -c following problems: -c -c Mode 1: A*x = lambda*x. -c ===> OP = A and B = I. -c -c Mode 2: A*x = lambda*M*x, M hermitian positive definite -c ===> OP = inv[M]*A and B = M. -c ===> (If M can be factored see remark 3 below) -c -c Mode 3: A*x = lambda*M*x, M hermitian semi-definite -c ===> OP = inv[A - sigma*M]*M and B = M. -c ===> shift-and-invert mode -c If OP*x = amu*x, then lambda = sigma + 1/amu. -c -c -c NOTE: The action of w <- inv[A - sigma*M]*v or w <- inv[M]*v -c should be accomplished either by a direct method -c using a sparse matrix factorization and solving -c -c [A - sigma*M]*w = v or M*w = v, -c -c or through an iterative method for solving these -c systems. If an iterative method is used, the -c convergence test must be more stringent than -c the accuracy requirements for the eigenvalue -c approximations. -c -c\Usage: -c call znaupd -c ( IDO, BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, -c IPNTR, WORKD, WORKL, LWORKL, RWORK, INFO ) -c -c\Arguments -c IDO Integer. (INPUT/OUTPUT) -c Reverse communication flag. IDO must be zero on the first -c call to znaupd. IDO will be set internally to -c indicate the type of operation to be performed. Control is -c then given back to the calling routine which has the -c responsibility to carry out the requested operation and call -c znaupd with the result. The operand is given in -c WORKD(IPNTR(1)), the result must be put in WORKD(IPNTR(2)). -c ------------------------------------------------------------- -c IDO = 0: first call to the reverse communication interface -c IDO = -1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c This is for the initialization phase to force the -c starting vector into the range of OP. -c IDO = 1: compute Y = OP * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c In mode 3, the vector B * X is already -c available in WORKD(ipntr(3)). It does not -c need to be recomputed in forming OP * X. -c IDO = 2: compute Y = M * X where -c IPNTR(1) is the pointer into WORKD for X, -c IPNTR(2) is the pointer into WORKD for Y. -c IDO = 3: compute and return the shifts in the first -c NP locations of WORKL. -c IDO = 99: done -c ------------------------------------------------------------- -c After the initialization phase, when the routine is used in -c the "shift-and-invert" mode, the vector M * X is already -c available and does not need to be recomputed in forming OP*X. -c -c BMAT Character*1. (INPUT) -c BMAT specifies the type of the matrix B that defines the -c semi-inner product for the operator OP. -c BMAT = 'I' -> standard eigenvalue problem A*x = lambda*x -c BMAT = 'G' -> generalized eigenvalue problem A*x = lambda*M*x -c -c N Integer. (INPUT) -c Dimension of the eigenproblem. -c -c WHICH Character*2. (INPUT) -c 'LM' -> want the NEV eigenvalues of largest magnitude. -c 'SM' -> want the NEV eigenvalues of smallest magnitude. -c 'LR' -> want the NEV eigenvalues of largest real part. -c 'SR' -> want the NEV eigenvalues of smallest real part. -c 'LI' -> want the NEV eigenvalues of largest imaginary part. -c 'SI' -> want the NEV eigenvalues of smallest imaginary part. -c -c NEV Integer. (INPUT) -c Number of eigenvalues of OP to be computed. 0 < NEV < N-1. -c -c TOL Double precision scalar. (INPUT) -c Stopping criteria: the relative accuracy of the Ritz value -c is considered acceptable if BOUNDS(I) .LE. TOL*ABS(RITZ(I)) -c where ABS(RITZ(I)) is the magnitude when RITZ(I) is complex. -c DEFAULT = dlamch('EPS') (machine precision as computed -c by the LAPACK auxiliary subroutine dlamch). -c -c RESID Complex*16 array of length N. (INPUT/OUTPUT) -c On INPUT: -c If INFO .EQ. 0, a random initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c On OUTPUT: -c RESID contains the final residual vector. -c -c NCV Integer. (INPUT) -c Number of columns of the matrix V. NCV must satisfy the two -c inequalities 1 <= NCV-NEV and NCV <= N. -c This will indicate how many Arnoldi vectors are generated -c at each iteration. After the startup phase in which NEV -c Arnoldi vectors are generated, the algorithm generates -c approximately NCV-NEV Arnoldi vectors at each subsequent update -c iteration. Most of the cost in generating each Arnoldi vector is -c in the matrix-vector operation OP*x. (See remark 4 below.) -c -c V Complex*16 array N by NCV. (OUTPUT) -c Contains the final set of Arnoldi basis vectors. -c -c LDV Integer. (INPUT) -c Leading dimension of V exactly as declared in the calling program. -c -c IPARAM Integer array of length 11. (INPUT/OUTPUT) -c IPARAM(1) = ISHIFT: method for selecting the implicit shifts. -c The shifts selected at each iteration are used to filter out -c the components of the unwanted eigenvector. -c ------------------------------------------------------------- -c ISHIFT = 0: the shifts are to be provided by the user via -c reverse communication. The NCV eigenvalues of -c the Hessenberg matrix H are returned in the part -c of WORKL array corresponding to RITZ. -c ISHIFT = 1: exact shifts with respect to the current -c Hessenberg matrix H. This is equivalent to -c restarting the iteration from the beginning -c after updating the starting vector with a linear -c combination of Ritz vectors associated with the -c "wanted" eigenvalues. -c ISHIFT = 2: other choice of internal shift to be defined. -c ------------------------------------------------------------- -c -c IPARAM(2) = No longer referenced -c -c IPARAM(3) = MXITER -c On INPUT: maximum number of Arnoldi update iterations allowed. -c On OUTPUT: actual number of Arnoldi update iterations taken. -c -c IPARAM(4) = NB: blocksize to be used in the recurrence. -c The code currently works only for NB = 1. -c -c IPARAM(5) = NCONV: number of "converged" Ritz values. -c This represents the number of Ritz values that satisfy -c the convergence criterion. -c -c IPARAM(6) = IUPD -c No longer referenced. Implicit restarting is ALWAYS used. -c -c IPARAM(7) = MODE -c On INPUT determines what type of eigenproblem is being solved. -c Must be 1,2,3; See under \Description of znaupd for the -c four modes available. -c -c IPARAM(8) = NP -c When ido = 3 and the user provides shifts through reverse -c communication (IPARAM(1)=0), _naupd returns NP, the number -c of shifts the user is to provide. 0 < NP < NCV-NEV. -c -c IPARAM(9) = NUMOP, IPARAM(10) = NUMOPB, IPARAM(11) = NUMREO, -c OUTPUT: NUMOP = total number of OP*x operations, -c NUMOPB = total number of B*x operations if BMAT='G', -c NUMREO = total number of steps of re-orthogonalization. -c -c IPNTR Integer array of length 14. (OUTPUT) -c Pointer to mark the starting locations in the WORKD and WORKL -c arrays for matrices/vectors used by the Arnoldi iteration. -c ------------------------------------------------------------- -c IPNTR(1): pointer to the current operand vector X in WORKD. -c IPNTR(2): pointer to the current result vector Y in WORKD. -c IPNTR(3): pointer to the vector B * X in WORKD when used in -c the shift-and-invert mode. -c IPNTR(4): pointer to the next available location in WORKL -c that is untouched by the program. -c IPNTR(5): pointer to the NCV by NCV upper Hessenberg -c matrix H in WORKL. -c IPNTR(6): pointer to the ritz value array RITZ -c IPNTR(7): pointer to the (projected) ritz vector array Q -c IPNTR(8): pointer to the error BOUNDS array in WORKL. -c IPNTR(14): pointer to the NP shifts in WORKL. See Remark 5 below. -c -c Note: IPNTR(9:13) is only referenced by zneupd. See Remark 2 below. -c -c IPNTR(9): pointer to the NCV RITZ values of the -c original system. -c IPNTR(10): Not Used -c IPNTR(11): pointer to the NCV corresponding error bounds. -c IPNTR(12): pointer to the NCV by NCV upper triangular -c Schur matrix for H. -c IPNTR(13): pointer to the NCV by NCV matrix of eigenvectors -c of the upper Hessenberg matrix H. Only referenced by -c zneupd if RVEC = .TRUE. See Remark 2 below. -c -c ------------------------------------------------------------- -c -c WORKD Complex*16 work array of length 3*N. (REVERSE COMMUNICATION) -c Distributed array to be used in the basic Arnoldi iteration -c for reverse communication. The user should not use WORKD -c as temporary workspace during the iteration !!!!!!!!!! -c See Data Distribution Note below. -c -c WORKL Complex*16 work array of length LWORKL. (OUTPUT/WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. See Data Distribution Note below. -c -c LWORKL Integer. (INPUT) -c LWORKL must be at least 3*NCV**2 + 5*NCV. -c -c RWORK Double precision work array of length NCV (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c -c INFO Integer. (INPUT/OUTPUT) -c If INFO .EQ. 0, a randomly initial residual vector is used. -c If INFO .NE. 0, RESID contains the initial residual vector, -c possibly from a previous run. -c Error flag on output. -c = 0: Normal exit. -c = 1: Maximum number of iterations taken. -c All possible eigenvalues of OP has been found. IPARAM(5) -c returns the number of wanted converged Ritz values. -c = 2: No longer an informational error. Deprecated starting -c with release 2 of ARPACK. -c = 3: No shifts could be applied during a cycle of the -c Implicitly restarted Arnoldi iteration. One possibility -c is to increase the size of NCV relative to NEV. -c See remark 4 below. -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV-NEV >= 1 and less than or equal to N. -c = -4: The maximum number of Arnoldi update iteration -c must be greater than zero. -c = -5: WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI' -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work array is not sufficient. -c = -8: Error return from LAPACK eigenvalue calculation; -c = -9: Starting vector is zero. -c = -10: IPARAM(7) must be 1,2,3. -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatible. -c = -12: IPARAM(1) must be equal to 0 or 1. -c = -9999: Could not build an Arnoldi factorization. -c User input error highly likely. Please -c check actual array dimensions and layout. -c IPARAM(5) returns the size of the current Arnoldi -c factorization. -c -c\Remarks -c 1. The computed Ritz values are approximate eigenvalues of OP. The -c selection of WHICH should be made with this in mind when using -c Mode = 3. When operating in Mode = 3 setting WHICH = 'LM' will -c compute the NEV eigenvalues of the original problem that are -c closest to the shift SIGMA . After convergence, approximate eigenvalues -c of the original problem may be obtained with the ARPACK subroutine zneupd. -c -c 2. If a basis for the invariant subspace corresponding to the converged Ritz -c values is needed, the user must call zneupd immediately following -c completion of znaupd. This is new starting with release 2 of ARPACK. -c -c 3. If M can be factored into a Cholesky factorization M = LL` -c then Mode = 2 should not be selected. Instead one should use -c Mode = 1 with OP = inv(L)*A*inv(L`). Appropriate triangular -c linear systems should be solved with L and L` rather -c than computing inverses. After convergence, an approximate -c eigenvector z of the original problem is recovered by solving -c L`z = x where x is a Ritz vector of OP. -c -c 4. At present there is no a-priori analysis to guide the selection -c of NCV relative to NEV. The only formal requirement is that NCV > NEV + 1. -c However, it is recommended that NCV .ge. 2*NEV. If many problems of -c the same type are to be solved, one should experiment with increasing -c NCV while keeping NEV fixed for a given test problem. This will -c usually decrease the required number of OP*x operations but it -c also increases the work and storage required to maintain the orthogonal -c basis vectors. The optimal "cross-over" with respect to CPU time -c is problem dependent and must be determined empirically. -c See Chapter 8 of Reference 2 for further information. -c -c 5. When IPARAM(1) = 0, and IDO = 3, the user needs to provide the -c NP = IPARAM(8) complex shifts in locations -c WORKL(IPNTR(14)), WORKL(IPNTR(14)+1), ... , WORKL(IPNTR(14)+NP). -c Eigenvalues of the current upper Hessenberg matrix are located in -c WORKL(IPNTR(6)) through WORKL(IPNTR(6)+NCV-1). They are ordered -c according to the order defined by WHICH. The associated Ritz estimates -c are located in WORKL(IPNTR(8)), WORKL(IPNTR(8)+1), ... , -c WORKL(IPNTR(8)+NCV-1). -c -c----------------------------------------------------------------------- -c -c\Data Distribution Note: -c -c Fortran-D syntax: -c ================ -c Complex*16 resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) -c decompose d1(n), d2(n,ncv) -c align resid(i) with d1(i) -c align v(i,j) with d2(i,j) -c align workd(i) with d1(i) range (1:n) -c align workd(i) with d1(i-n) range (n+1:2*n) -c align workd(i) with d1(i-2*n) range (2*n+1:3*n) -c distribute d1(block), d2(block,:) -c replicated workl(lworkl) -c -c Cray MPP syntax: -c =============== -c Complex*16 resid(n), v(ldv,ncv), workd(n,3), workl(lworkl) -c shared resid(block), v(block,:), workd(block,:) -c replicated workl(lworkl) -c -c CM2/CM5 syntax: -c ============== -c -c----------------------------------------------------------------------- -c -c include 'ex-nonsym.doc' -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex*16 -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B.N. Parlett & Y. Saad, "_Complex_ Shift and Invert Strategies for -c Double precision Matrices", Linear Algebra and its Applications, vol 88/89, -c pp 575-595, (1987). -c -c\Routines called: -c znaup2 ARPACK routine that implements the Implicitly Restarted -c Arnoldi Iteration. -c zstatn ARPACK routine that initializes the timing variables. -c ivout ARPACK utility routine that prints integers. -c zvout ARPACK utility routine that prints vectors. -c second ARPACK utility routine for timing. -c dlamch LAPACK routine that determines machine constants. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: naupd.F SID: 2.9 DATE OF SID: 07/21/02 RELEASE: 2 -c -c\Remarks -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine znaupd - & ( ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, - & ipntr, workd, workl, lworkl, rwork, info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat*1, which*2 - integer ido, info, ldv, lworkl, n, ncv, nev - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(14) - Complex*16 - & resid(n), v(ldv,ncv), workd(3*n), workl(lworkl) - Double precision - & rwork(ncv) -c -c %------------% -c | Parameters | -c %------------% -c - Complex*16 - & one, zero - parameter (one = (1.0D+0, 0.0D+0), zero = (0.0D+0, 0.0D+0)) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer bounds, ierr, ih, iq, ishift, iupd, iw, - & ldh, ldq, levec, mode, msglvl, mxiter, nb, - & nev0, next, np, ritz, j - save bounds, ih, iq, ishift, iupd, iw, - & ldh, ldq, levec, mode, msglvl, mxiter, nb, - & nev0, next, np, ritz -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external znaup2, zvout, ivout, second, zstatn -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dlamch - external dlamch -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - if (ido .eq. 0) then -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call zstatn - call second (t0) - msglvl = mcaupd -c -c %----------------% -c | Error checking | -c %----------------% -c - ierr = 0 - ishift = iparam(1) -c levec = iparam(2) - mxiter = iparam(3) -c nb = iparam(4) - nb = 1 -c -c %--------------------------------------------% -c | Revision 2 performs only implicit restart. | -c %--------------------------------------------% -c - iupd = 1 - mode = iparam(7) -c - if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev .or. ncv .gt. n) then - ierr = -3 - else if (mxiter .le. 0) then - ierr = -4 - else if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LR' .and. - & which .ne. 'SR' .and. - & which .ne. 'LI' .and. - & which .ne. 'SI') then - ierr = -5 - else if (bmat .ne. 'I' .and. bmat .ne. 'G') then - ierr = -6 - else if (lworkl .lt. 3*ncv**2 + 5*ncv) then - ierr = -7 - else if (mode .lt. 1 .or. mode .gt. 3) then - ierr = -10 - else if (mode .eq. 1 .and. bmat .eq. 'G') then - ierr = -11 - end if -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - ido = 99 - go to 9000 - end if -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - if (nb .le. 0) nb = 1 - if (tol .le. 0.0D+0 ) tol = dlamch('EpsMach') - if (ishift .ne. 0 .and. - & ishift .ne. 1 .and. - & ishift .ne. 2) ishift = 1 -c -c %----------------------------------------------% -c | NP is the number of additional steps to | -c | extend the length NEV Lanczos factorization. | -c | NEV0 is the local variable designating the | -c | size of the invariant subspace desired. | -c %----------------------------------------------% -c - np = ncv - nev - nev0 = nev -c -c %-----------------------------% -c | Zero out internal workspace | -c %-----------------------------% -c - do 10 j = 1, 3*ncv**2 + 5*ncv - workl(j) = zero - 10 continue -c -c %-------------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, BOUNDS, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:ncv*ncv) := generated Hessenberg matrix | -c | workl(ncv*ncv+1:ncv*ncv+ncv) := the ritz values | -c | workl(ncv*ncv+ncv+1:ncv*ncv+2*ncv) := error bounds | -c | workl(ncv*ncv+2*ncv+1:2*ncv*ncv+2*ncv) := rotation matrix Q | -c | workl(2*ncv*ncv+2*ncv+1:3*ncv*ncv+5*ncv) := workspace | -c | The final workspace is needed by subroutine zneigh called | -c | by znaup2. Subroutine zneigh calls LAPACK routines for | -c | calculating eigenvalues and the last row of the eigenvector | -c | matrix. | -c %-------------------------------------------------------------% -c - ldh = ncv - ldq = ncv - ih = 1 - ritz = ih + ldh*ncv - bounds = ritz + ncv - iq = bounds + ncv - iw = iq + ldq*ncv - next = iw + ncv**2 + 3*ncv -c - ipntr(4) = next - ipntr(5) = ih - ipntr(6) = ritz - ipntr(7) = iq - ipntr(8) = bounds - ipntr(14) = iw - end if -c -c %-------------------------------------------------------% -c | Carry out the Implicitly restarted Arnoldi Iteration. | -c %-------------------------------------------------------% -c - call znaup2 - & ( ido, bmat, n, which, nev0, np, tol, resid, mode, iupd, - & ishift, mxiter, v, ldv, workl(ih), ldh, workl(ritz), - & workl(bounds), workl(iq), ldq, workl(iw), - & ipntr, workd, rwork, info ) -c -c %--------------------------------------------------% -c | ido .ne. 99 implies use of reverse communication | -c | to compute operations involving OP. | -c %--------------------------------------------------% -c - if (ido .eq. 3) iparam(8) = np - if (ido .ne. 99) go to 9000 -c - iparam(3) = mxiter - iparam(5) = np - iparam(9) = nopx - iparam(10) = nbx - iparam(11) = nrorth -c -c %------------------------------------% -c | Exit if there was an informational | -c | error within znaup2. | -c %------------------------------------% -c - if (info .lt. 0) go to 9000 - if (info .eq. 2) info = 3 -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, mxiter, ndigit, - & '_naupd: Number of update iterations taken') - call ivout (logfil, 1, np, ndigit, - & '_naupd: Number of wanted "converged" Ritz values') - call zvout (logfil, np, workl(ritz), ndigit, - & '_naupd: The final Ritz values') - call zvout (logfil, np, workl(bounds), ndigit, - & '_naupd: Associated Ritz estimates') - end if -c - call second (t1) - tcaupd = t1 - t0 -c - if (msglvl .gt. 0) then -c -c %--------------------------------------------------------% -c | Version Number & Version Date are defined in version.h | -c %--------------------------------------------------------% -c - write (6,1000) - write (6,1100) mxiter, nopx, nbx, nrorth, nitref, nrstrt, - & tmvopx, tmvbx, tcaupd, tcaup2, tcaitr, titref, - & tgetv0, tceigh, tcgets, tcapps, tcconv, trvec - 1000 format (//, - & 5x, '=============================================',/ - & 5x, '= Complex implicit Arnoldi update code =',/ - & 5x, '= Version Number: ', ' 2.3', 21x, ' =',/ - & 5x, '= Version Date: ', ' 07/31/96', 16x, ' =',/ - & 5x, '=============================================',/ - & 5x, '= Summary of timing statistics =',/ - & 5x, '=============================================',//) - 1100 format ( - & 5x, 'Total number update iterations = ', i5,/ - & 5x, 'Total number of OP*x operations = ', i5,/ - & 5x, 'Total number of B*x operations = ', i5,/ - & 5x, 'Total number of reorthogonalization steps = ', i5,/ - & 5x, 'Total number of iterative refinement steps = ', i5,/ - & 5x, 'Total number of restart steps = ', i5,/ - & 5x, 'Total time in user OP*x operation = ', f12.6,/ - & 5x, 'Total time in user B*x operation = ', f12.6,/ - & 5x, 'Total time in Arnoldi update routine = ', f12.6,/ - & 5x, 'Total time in naup2 routine = ', f12.6,/ - & 5x, 'Total time in basic Arnoldi iteration loop = ', f12.6,/ - & 5x, 'Total time in reorthogonalization phase = ', f12.6,/ - & 5x, 'Total time in (re)start vector generation = ', f12.6,/ - & 5x, 'Total time in Hessenberg eig. subproblem = ', f12.6,/ - & 5x, 'Total time in getting the shifts = ', f12.6,/ - & 5x, 'Total time in applying the shifts = ', f12.6,/ - & 5x, 'Total time in convergence testing = ', f12.6,/ - & 5x, 'Total time in computing final Ritz vectors = ', f12.6/) - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of znaupd | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zneigh.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zneigh.f deleted file mode 100644 index 299a9cf313..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zneigh.f +++ /dev/null @@ -1,257 +0,0 @@ -c\BeginDoc -c -c\Name: zneigh -c -c\Description: -c Compute the eigenvalues of the current upper Hessenberg matrix -c and the corresponding Ritz estimates given the current residual norm. -c -c\Usage: -c call zneigh -c ( RNORM, N, H, LDH, RITZ, BOUNDS, Q, LDQ, WORKL, RWORK, IERR ) -c -c\Arguments -c RNORM Double precision scalar. (INPUT) -c Residual norm corresponding to the current upper Hessenberg -c matrix H. -c -c N Integer. (INPUT) -c Size of the matrix H. -c -c H Complex*16 N by N array. (INPUT) -c H contains the current upper Hessenberg matrix. -c -c LDH Integer. (INPUT) -c Leading dimension of H exactly as declared in the calling -c program. -c -c RITZ Complex*16 array of length N. (OUTPUT) -c On output, RITZ(1:N) contains the eigenvalues of H. -c -c BOUNDS Complex*16 array of length N. (OUTPUT) -c On output, BOUNDS contains the Ritz estimates associated with -c the eigenvalues held in RITZ. This is equal to RNORM -c times the last components of the eigenvectors corresponding -c to the eigenvalues in RITZ. -c -c Q Complex*16 N by N array. (WORKSPACE) -c Workspace needed to store the eigenvectors of H. -c -c LDQ Integer. (INPUT) -c Leading dimension of Q exactly as declared in the calling -c program. -c -c WORKL Complex*16 work array of length N**2 + 3*N. (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. This is needed to keep the full Schur form -c of H and also in the calculation of the eigenvectors of H. -c -c RWORK Double precision work array of length N (WORKSPACE) -c Private (replicated) array on each PE or array allocated on -c the front end. -c -c IERR Integer. (OUTPUT) -c Error exit flag from zlahqr or ztrevc. -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex*16 -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c zmout ARPACK utility routine that prints matrices -c zvout ARPACK utility routine that prints vectors. -c dvout ARPACK utility routine that prints vectors. -c zlacpy LAPACK matrix copy routine. -c zlahqr LAPACK routine to compute the Schur form of an -c upper Hessenberg matrix. -c zlaset LAPACK matrix initialization routine. -c ztrevc LAPACK routine to compute the eigenvectors of a matrix -c in upper triangular form -c zcopy Level 1 BLAS that copies one vector to another. -c zdscal Level 1 BLAS that scales a complex vector by a real number. -c dznrm2 Level 1 BLAS that computes the norm of a vector. -c -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: neigh.F SID: 2.2 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c None -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine zneigh (rnorm, n, h, ldh, ritz, bounds, - & q, ldq, workl, rwork, ierr) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - integer ierr, n, ldh, ldq - Double precision - & rnorm -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Complex*16 - & bounds(n), h(ldh,n), q(ldq,n), ritz(n), - & workl(n*(n+3)) - Double precision - & rwork(n) -c -c %------------% -c | Parameters | -c %------------% -c - Complex*16 - & one, zero - Double precision - & rone - parameter (one = (1.0D+0, 0.0D+0), zero = (0.0D+0, 0.0D+0), - & rone = 1.0D+0) -c -c %------------------------% -c | Local Scalars & Arrays | -c %------------------------% -c - logical select(1) - integer j, msglvl - Complex*16 - & vl(1) - Double precision - & temp -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external zlacpy, zlahqr, ztrevc, zcopy, - & zdscal, zmout, zvout, second -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dznrm2 - external dznrm2 -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mceigh -c - if (msglvl .gt. 2) then - call zmout (logfil, n, n, h, ldh, ndigit, - & '_neigh: Entering upper Hessenberg matrix H ') - end if -c -c %----------------------------------------------------------% -c | 1. Compute the eigenvalues, the last components of the | -c | corresponding Schur vectors and the full Schur form T | -c | of the current upper Hessenberg matrix H. | -c | zlahqr returns the full Schur form of H | -c | in WORKL(1:N**2), and the Schur vectors in q. | -c %----------------------------------------------------------% -c - call zlacpy ('All', n, n, h, ldh, workl, n) - call zlaset ('All', n, n, zero, one, q, ldq) - call zlahqr (.true., .true., n, 1, n, workl, ldh, ritz, - & 1, n, q, ldq, ierr) - if (ierr .ne. 0) go to 9000 -c - call zcopy (n, q(n-1,1), ldq, bounds, 1) - if (msglvl .gt. 1) then - call zvout (logfil, n, bounds, ndigit, - & '_neigh: last row of the Schur matrix for H') - end if -c -c %----------------------------------------------------------% -c | 2. Compute the eigenvectors of the full Schur form T and | -c | apply the Schur vectors to get the corresponding | -c | eigenvectors. | -c %----------------------------------------------------------% -c - call ztrevc ('Right', 'Back', select, n, workl, n, vl, n, q, - & ldq, n, n, workl(n*n+1), rwork, ierr) -c - if (ierr .ne. 0) go to 9000 -c -c %------------------------------------------------% -c | Scale the returning eigenvectors so that their | -c | Euclidean norms are all one. LAPACK subroutine | -c | ztrevc returns each eigenvector normalized so | -c | that the element of largest magnitude has | -c | magnitude 1; here the magnitude of a complex | -c | number (x,y) is taken to be |x| + |y|. | -c %------------------------------------------------% -c - do 10 j=1, n - temp = dznrm2( n, q(1,j), 1 ) - call zdscal ( n, rone / temp, q(1,j), 1 ) - 10 continue -c - if (msglvl .gt. 1) then - call zcopy(n, q(n,1), ldq, workl, 1) - call zvout (logfil, n, workl, ndigit, - & '_neigh: Last row of the eigenvector matrix for H') - end if -c -c %----------------------------% -c | Compute the Ritz estimates | -c %----------------------------% -c - call zcopy(n, q(n,1), n, bounds, 1) - call zdscal(n, rnorm, bounds, 1) -c - if (msglvl .gt. 2) then - call zvout (logfil, n, ritz, ndigit, - & '_neigh: The eigenvalues of H') - call zvout (logfil, n, bounds, ndigit, - & '_neigh: Ritz estimates for the eigenvalues of H') - end if -c - call second(t1) - tceigh = tceigh + (t1 - t0) -c - 9000 continue - return -c -c %---------------% -c | End of zneigh | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zneupd.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zneupd.f deleted file mode 100644 index 5fb566f5ce..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zneupd.f +++ /dev/null @@ -1,876 +0,0 @@ -c\BeginDoc -c -c\Name: zneupd -c -c\Description: -c This subroutine returns the converged approximations to eigenvalues -c of A*z = lambda*B*z and (optionally): -c -c (1) The corresponding approximate eigenvectors; -c -c (2) An orthonormal basis for the associated approximate -c invariant subspace; -c -c (3) Both. -c -c There is negligible additional cost to obtain eigenvectors. An orthonormal -c basis is always computed. There is an additional storage cost of n*nev -c if both are requested (in this case a separate array Z must be supplied). -c -c The approximate eigenvalues and eigenvectors of A*z = lambda*B*z -c are derived from approximate eigenvalues and eigenvectors of -c of the linear operator OP prescribed by the MODE selection in the -c call to ZNAUPD. ZNAUPD must be called before this routine is called. -c These approximate eigenvalues and vectors are commonly called Ritz -c values and Ritz vectors respectively. They are referred to as such -c in the comments that follow. The computed orthonormal basis for the -c invariant subspace corresponding to these Ritz values is referred to as a -c Schur basis. -c -c The definition of OP as well as other terms and the relation of computed -c Ritz values and vectors of OP with respect to the given problem -c A*z = lambda*B*z may be found in the header of ZNAUPD. For a brief -c description, see definitions of IPARAM(7), MODE and WHICH in the -c documentation of ZNAUPD. -c -c\Usage: -c call zneupd -c ( RVEC, HOWMNY, SELECT, D, Z, LDZ, SIGMA, WORKEV, BMAT, -c N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, WORKD, -c WORKL, LWORKL, RWORK, INFO ) -c -c\Arguments: -c RVEC LOGICAL (INPUT) -c Specifies whether a basis for the invariant subspace corresponding -c to the converged Ritz value approximations for the eigenproblem -c A*z = lambda*B*z is computed. -c -c RVEC = .FALSE. Compute Ritz values only. -c -c RVEC = .TRUE. Compute Ritz vectors or Schur vectors. -c See Remarks below. -c -c HOWMNY Character*1 (INPUT) -c Specifies the form of the basis for the invariant subspace -c corresponding to the converged Ritz values that is to be computed. -c -c = 'A': Compute NEV Ritz vectors; -c = 'P': Compute NEV Schur vectors; -c = 'S': compute some of the Ritz vectors, specified -c by the logical array SELECT. -c -c SELECT Logical array of dimension NCV. (INPUT) -c If HOWMNY = 'S', SELECT specifies the Ritz vectors to be -c computed. To select the Ritz vector corresponding to a -c Ritz value D(j), SELECT(j) must be set to .TRUE.. -c If HOWMNY = 'A' or 'P', SELECT need not be initialized -c but it is used as internal workspace. -c -c D Complex*16 array of dimension NEV+1. (OUTPUT) -c On exit, D contains the Ritz approximations -c to the eigenvalues lambda for A*z = lambda*B*z. -c -c Z Complex*16 N by NEV array (OUTPUT) -c On exit, if RVEC = .TRUE. and HOWMNY = 'A', then the columns of -c Z represents approximate eigenvectors (Ritz vectors) corresponding -c to the NCONV=IPARAM(5) Ritz values for eigensystem -c A*z = lambda*B*z. -c -c If RVEC = .FALSE. or HOWMNY = 'P', then Z is NOT REFERENCED. -c -c NOTE: If if RVEC = .TRUE. and a Schur basis is not required, -c the array Z may be set equal to first NEV+1 columns of the Arnoldi -c basis array V computed by ZNAUPD. In this case the Arnoldi basis -c will be destroyed and overwritten with the eigenvector basis. -c -c LDZ Integer. (INPUT) -c The leading dimension of the array Z. If Ritz vectors are -c desired, then LDZ .ge. max( 1, N ) is required. -c In any case, LDZ .ge. 1 is required. -c -c SIGMA Complex*16 (INPUT) -c If IPARAM(7) = 3 then SIGMA represents the shift. -c Not referenced if IPARAM(7) = 1 or 2. -c -c WORKEV Complex*16 work array of dimension 2*NCV. (WORKSPACE) -c -c **** The remaining arguments MUST be the same as for the **** -c **** call to ZNAUPD that was just completed. **** -c -c NOTE: The remaining arguments -c -c BMAT, N, WHICH, NEV, TOL, RESID, NCV, V, LDV, IPARAM, IPNTR, -c WORKD, WORKL, LWORKL, RWORK, INFO -c -c must be passed directly to ZNEUPD following the last call -c to ZNAUPD. These arguments MUST NOT BE MODIFIED between -c the the last call to ZNAUPD and the call to ZNEUPD. -c -c Three of these parameters (V, WORKL and INFO) are also output parameters: -c -c V Complex*16 N by NCV array. (INPUT/OUTPUT) -c -c Upon INPUT: the NCV columns of V contain the Arnoldi basis -c vectors for OP as constructed by ZNAUPD . -c -c Upon OUTPUT: If RVEC = .TRUE. the first NCONV=IPARAM(5) columns -c contain approximate Schur vectors that span the -c desired invariant subspace. -c -c NOTE: If the array Z has been set equal to first NEV+1 columns -c of the array V and RVEC=.TRUE. and HOWMNY= 'A', then the -c Arnoldi basis held by V has been overwritten by the desired -c Ritz vectors. If a separate array Z has been passed then -c the first NCONV=IPARAM(5) columns of V will contain approximate -c Schur vectors that span the desired invariant subspace. -c -c WORKL Double precision work array of length LWORKL. (OUTPUT/WORKSPACE) -c WORKL(1:ncv*ncv+2*ncv) contains information obtained in -c znaupd. They are not changed by zneupd. -c WORKL(ncv*ncv+2*ncv+1:3*ncv*ncv+4*ncv) holds the -c untransformed Ritz values, the untransformed error estimates of -c the Ritz values, the upper triangular matrix for H, and the -c associated matrix representation of the invariant subspace for H. -c -c Note: IPNTR(9:13) contains the pointer into WORKL for addresses -c of the above information computed by zneupd. -c ------------------------------------------------------------- -c IPNTR(9): pointer to the NCV RITZ values of the -c original system. -c IPNTR(10): Not used -c IPNTR(11): pointer to the NCV corresponding error estimates. -c IPNTR(12): pointer to the NCV by NCV upper triangular -c Schur matrix for H. -c IPNTR(13): pointer to the NCV by NCV matrix of eigenvectors -c of the upper Hessenberg matrix H. Only referenced by -c zneupd if RVEC = .TRUE. See Remark 2 below. -c ------------------------------------------------------------- -c -c INFO Integer. (OUTPUT) -c Error flag on output. -c = 0: Normal exit. -c -c = 1: The Schur form computed by LAPACK routine csheqr -c could not be reordered by LAPACK routine ztrsen. -c Re-enter subroutine zneupd with IPARAM(5)=NCV and -c increase the size of the array D to have -c dimension at least dimension NCV and allocate at least NCV -c columns for Z. NOTE: Not necessary if Z and V share -c the same space. Please notify the authors if this error -c occurs. -c -c = -1: N must be positive. -c = -2: NEV must be positive. -c = -3: NCV-NEV >= 1 and less than or equal to N. -c = -5: WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI' -c = -6: BMAT must be one of 'I' or 'G'. -c = -7: Length of private work WORKL array is not sufficient. -c = -8: Error return from LAPACK eigenvalue calculation. -c This should never happened. -c = -9: Error return from calculation of eigenvectors. -c Informational error from LAPACK routine ztrevc. -c = -10: IPARAM(7) must be 1,2,3 -c = -11: IPARAM(7) = 1 and BMAT = 'G' are incompatible. -c = -12: HOWMNY = 'S' not yet implemented -c = -13: HOWMNY must be one of 'A' or 'P' if RVEC = .true. -c = -14: ZNAUPD did not find any eigenvalues to sufficient -c accuracy. -c = -15: ZNEUPD got a different count of the number of converged -c Ritz values than ZNAUPD got. This indicates the user -c probably made an error in passing data from ZNAUPD to -c ZNEUPD or that the data was modified before entering -c ZNEUPD -c -c\BeginLib -c -c\References: -c 1. D.C. Sorensen, "Implicit Application of Polynomial Filters in -c a k-Step Arnoldi Method", SIAM J. Matr. Anal. Apps., 13 (1992), -c pp 357-385. -c 2. R.B. Lehoucq, "Analysis and Implementation of an Implicitly -c Restarted Arnoldi Iteration", Rice University Technical Report -c TR95-13, Department of Computational and Applied Mathematics. -c 3. B. Nour-Omid, B. N. Parlett, T. Ericsson and P. S. Jensen, -c "How to Implement the Spectral Transformation", Math Comp., -c Vol. 48, No. 178, April, 1987 pp. 664-673. -c -c\Routines called: -c ivout ARPACK utility routine that prints integers. -c zmout ARPACK utility routine that prints matrices -c zvout ARPACK utility routine that prints vectors. -c zgeqr2 LAPACK routine that computes the QR factorization of -c a matrix. -c zlacpy LAPACK matrix copy routine. -c zlahqr LAPACK routine that computes the Schur form of a -c upper Hessenberg matrix. -c zlaset LAPACK matrix initialization routine. -c ztrevc LAPACK routine to compute the eigenvectors of a matrix -c in upper triangular form. -c ztrsen LAPACK routine that re-orders the Schur form. -c zunm2r LAPACK routine that applies an orthogonal matrix in -c factored form. -c dlamch LAPACK routine that determines machine constants. -c ztrmm Level 3 BLAS matrix times an upper triangular matrix. -c zgeru Level 2 BLAS rank one update to a matrix. -c zcopy Level 1 BLAS that copies one vector to another . -c zscal Level 1 BLAS that scales a vector. -c zdscal Level 1 BLAS that scales a complex vector by a real number. -c dznrm2 Level 1 BLAS that computes the norm of a complex vector. -c -c\Remarks -c -c 1. Currently only HOWMNY = 'A' and 'P' are implemented. -c -c 2. Schur vectors are an orthogonal representation for the basis of -c Ritz vectors. Thus, their numerical properties are often superior. -c If RVEC = .true. then the relationship -c A * V(:,1:IPARAM(5)) = V(:,1:IPARAM(5)) * T, and -c transpose( V(:,1:IPARAM(5)) ) * V(:,1:IPARAM(5)) = I -c are approximately satisfied. -c Here T is the leading submatrix of order IPARAM(5) of the -c upper triangular matrix stored workl(ipntr(12)). -c -c\Authors -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Chao Yang Houston, Texas -c Dept. of Computational & -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: neupd.F SID: 2.8 DATE OF SID: 07/21/02 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- - subroutine zneupd(rvec , howmny, select, d , - & z , ldz , sigma , workev, - & bmat , n , which , nev , - & tol , resid , ncv , v , - & ldv , iparam, ipntr , workd , - & workl, lworkl, rwork , info ) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character bmat, howmny, which*2 - logical rvec - integer info, ldz, ldv, lworkl, n, ncv, nev - Complex*16 - & sigma - Double precision - & tol -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - integer iparam(11), ipntr(14) - logical select(ncv) - Double precision - & rwork(ncv) - Complex*16 - & d(nev) , resid(n) , v(ldv,ncv), - & z(ldz, nev), - & workd(3*n) , workl(lworkl), workev(2*ncv) -c -c %------------% -c | Parameters | -c %------------% -c - Complex*16 - & one, zero - parameter (one = (1.0D+0, 0.0D+0), zero = (0.0D+0, 0.0D+0)) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - character type*6 - integer bounds, ierr , ih , ihbds, iheig , nconv , - & invsub, iuptri, iwev , j , ldh , ldq , - & mode , msglvl, ritz , wr , k , irz , - & ibd , outncv, iq , np , numcnv, jj , - & ishift, nconv2 - Complex*16 - & rnorm, temp, vl(1) - Double precision - & conds, sep, rtemp, eps23 - logical reord -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external zcopy , zgeru, zgeqr2, zlacpy, zmout, - & zunm2r, ztrmm, zvout, ivout, - & zlahqr -c -c %--------------------% -c | External Functions | -c %--------------------% -c - Double precision - & dznrm2, dlamch, dlapy2 - external dznrm2, dlamch, dlapy2 -c - Complex*16 - & wzdotc - external wzdotc -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %------------------------% -c | Set default parameters | -c %------------------------% -c - msglvl = mceupd - mode = iparam(7) - nconv = iparam(5) - info = 0 -c -c -c %---------------------------------% -c | Get machine dependent constant. | -c %---------------------------------% -c - eps23 = dlamch('Epsilon-Machine') - eps23 = eps23**(2.0D+0 / 3.0D+0) -c -c %-------------------------------% -c | Quick return | -c | Check for incompatible input | -c %-------------------------------% -c - ierr = 0 -c - if (nconv .le. 0) then - ierr = -14 - else if (n .le. 0) then - ierr = -1 - else if (nev .le. 0) then - ierr = -2 - else if (ncv .le. nev .or. ncv .gt. n) then - ierr = -3 - else if (which .ne. 'LM' .and. - & which .ne. 'SM' .and. - & which .ne. 'LR' .and. - & which .ne. 'SR' .and. - & which .ne. 'LI' .and. - & which .ne. 'SI') then - ierr = -5 - else if (bmat .ne. 'I' .and. bmat .ne. 'G') then - ierr = -6 - else if (lworkl .lt. 3*ncv**2 + 4*ncv) then - ierr = -7 - else if ( (howmny .ne. 'A' .and. - & howmny .ne. 'P' .and. - & howmny .ne. 'S') .and. rvec ) then - ierr = -13 - else if (howmny .eq. 'S' ) then - ierr = -12 - end if -c - if (mode .eq. 1 .or. mode .eq. 2) then - type = 'REGULR' - else if (mode .eq. 3 ) then - type = 'SHIFTI' - else - ierr = -10 - end if - if (mode .eq. 1 .and. bmat .eq. 'G') ierr = -11 -c -c %------------% -c | Error Exit | -c %------------% -c - if (ierr .ne. 0) then - info = ierr - go to 9000 - end if -c -c %--------------------------------------------------------% -c | Pointer into WORKL for address of H, RITZ, WORKEV, Q | -c | etc... and the remaining workspace. | -c | Also update pointer to be used on output. | -c | Memory is laid out as follows: | -c | workl(1:ncv*ncv) := generated Hessenberg matrix | -c | workl(ncv*ncv+1:ncv*ncv+ncv) := ritz values | -c | workl(ncv*ncv+ncv+1:ncv*ncv+2*ncv) := error bounds | -c %--------------------------------------------------------% -c -c %-----------------------------------------------------------% -c | The following is used and set by ZNEUPD. | -c | workl(ncv*ncv+2*ncv+1:ncv*ncv+3*ncv) := The untransformed | -c | Ritz values. | -c | workl(ncv*ncv+3*ncv+1:ncv*ncv+4*ncv) := The untransformed | -c | error bounds of | -c | the Ritz values | -c | workl(ncv*ncv+4*ncv+1:2*ncv*ncv+4*ncv) := Holds the upper | -c | triangular matrix | -c | for H. | -c | workl(2*ncv*ncv+4*ncv+1: 3*ncv*ncv+4*ncv) := Holds the | -c | associated matrix | -c | representation of | -c | the invariant | -c | subspace for H. | -c | GRAND total of NCV * ( 3 * NCV + 4 ) locations. | -c %-----------------------------------------------------------% -c - ih = ipntr(5) - ritz = ipntr(6) - iq = ipntr(7) - bounds = ipntr(8) - ldh = ncv - ldq = ncv - iheig = bounds + ldh - ihbds = iheig + ldh - iuptri = ihbds + ldh - invsub = iuptri + ldh*ncv - ipntr(9) = iheig - ipntr(11) = ihbds - ipntr(12) = iuptri - ipntr(13) = invsub - wr = 1 - iwev = wr + ncv -c -c %-----------------------------------------% -c | irz points to the Ritz values computed | -c | by _neigh before exiting _naup2. | -c | ibd points to the Ritz estimates | -c | computed by _neigh before exiting | -c | _naup2. | -c %-----------------------------------------% -c - irz = ipntr(14) + ncv*ncv - ibd = irz + ncv -c -c %------------------------------------% -c | RNORM is B-norm of the RESID(1:N). | -c %------------------------------------% -c - rnorm = workl(ih+2) - workl(ih+2) = zero -c - if (msglvl .gt. 2) then - call zvout(logfil, ncv, workl(irz), ndigit, - & '_neupd: Ritz values passed in from _NAUPD.') - call zvout(logfil, ncv, workl(ibd), ndigit, - & '_neupd: Ritz estimates passed in from _NAUPD.') - end if -c - if (rvec) then -c - reord = .false. -c -c %---------------------------------------------------% -c | Use the temporary bounds array to store indices | -c | These will be used to mark the select array later | -c %---------------------------------------------------% -c - do 10 j = 1,ncv - workl(bounds+j-1) = j - select(j) = .false. - 10 continue -c -c %-------------------------------------% -c | Select the wanted Ritz values. | -c | Sort the Ritz values so that the | -c | wanted ones appear at the tailing | -c | NEV positions of workl(irr) and | -c | workl(iri). Move the corresponding | -c | error estimates in workl(ibd) | -c | accordingly. | -c %-------------------------------------% -c - np = ncv - nev - ishift = 0 - call zngets(ishift, which , nev , - & np , workl(irz), workl(bounds)) -c - if (msglvl .gt. 2) then - call zvout (logfil, ncv, workl(irz), ndigit, - & '_neupd: Ritz values after calling _NGETS.') - call zvout (logfil, ncv, workl(bounds), ndigit, - & '_neupd: Ritz value indices after calling _NGETS.') - end if -c -c %-----------------------------------------------------% -c | Record indices of the converged wanted Ritz values | -c | Mark the select array for possible reordering | -c %-----------------------------------------------------% -c - numcnv = 0 - do 11 j = 1,ncv - rtemp = max(eps23, - & dlapy2 ( dble(workl(irz+ncv-j)), - & dimag(workl(irz+ncv-j)) )) - jj = workl(bounds + ncv - j) - if (numcnv .lt. nconv .and. - & dlapy2( dble(workl(ibd+jj-1)), - & dimag(workl(ibd+jj-1)) ) - & .le. tol*rtemp) then - select(jj) = .true. - numcnv = numcnv + 1 - if (jj .gt. nev) reord = .true. - endif - 11 continue -c -c %-----------------------------------------------------------% -c | Check the count (numcnv) of converged Ritz values with | -c | the number (nconv) reported by dnaupd. If these two | -c | are different then there has probably been an error | -c | caused by incorrect passing of the dnaupd data. | -c %-----------------------------------------------------------% -c - if (msglvl .gt. 2) then - call ivout(logfil, 1, numcnv, ndigit, - & '_neupd: Number of specified eigenvalues') - call ivout(logfil, 1, nconv, ndigit, - & '_neupd: Number of "converged" eigenvalues') - end if -c - if (numcnv .ne. nconv) then - info = -15 - go to 9000 - end if -c -c %-------------------------------------------------------% -c | Call LAPACK routine zlahqr to compute the Schur form | -c | of the upper Hessenberg matrix returned by ZNAUPD. | -c | Make a copy of the upper Hessenberg matrix. | -c | Initialize the Schur vector matrix Q to the identity. | -c %-------------------------------------------------------% -c - call zcopy(ldh*ncv, workl(ih), 1, workl(iuptri), 1) - call zlaset('All', ncv, ncv , - & zero , one, workl(invsub), - & ldq) - call zlahqr(.true., .true. , ncv , - & 1 , ncv , workl(iuptri), - & ldh , workl(iheig) , 1 , - & ncv , workl(invsub), ldq , - & ierr) - call zcopy(ncv , workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) -c - if (ierr .ne. 0) then - info = -8 - go to 9000 - end if -c - if (msglvl .gt. 1) then - call zvout (logfil, ncv, workl(iheig), ndigit, - & '_neupd: Eigenvalues of H') - call zvout (logfil, ncv, workl(ihbds), ndigit, - & '_neupd: Last row of the Schur vector matrix') - if (msglvl .gt. 3) then - call zmout (logfil , ncv, ncv , - & workl(iuptri), ldh, ndigit, - & '_neupd: The upper triangular matrix ') - end if - end if -c - if (reord) then -c -c %-----------------------------------------------% -c | Reorder the computed upper triangular matrix. | -c %-----------------------------------------------% -c - call ztrsen('None' , 'V' , select , - & ncv , workl(iuptri), ldh , - & workl(invsub), ldq , workl(iheig), - & nconv2 , conds , sep , - & workev , ncv , ierr) -c - if (nconv2 .lt. nconv) then - nconv = nconv2 - end if - - if (ierr .eq. 1) then - info = 1 - go to 9000 - end if -c - if (msglvl .gt. 2) then - call zvout (logfil, ncv, workl(iheig), ndigit, - & '_neupd: Eigenvalues of H--reordered') - if (msglvl .gt. 3) then - call zmout(logfil , ncv, ncv , - & workl(iuptri), ldq, ndigit, - & '_neupd: Triangular matrix after re-ordering') - end if - end if -c - end if -c -c %---------------------------------------------% -c | Copy the last row of the Schur basis matrix | -c | to workl(ihbds). This vector will be used | -c | to compute the Ritz estimates of converged | -c | Ritz values. | -c %---------------------------------------------% -c - call zcopy(ncv , workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) -c -c %--------------------------------------------% -c | Place the computed eigenvalues of H into D | -c | if a spectral transformation was not used. | -c %--------------------------------------------% -c - if (type .eq. 'REGULR') then - call zcopy(nconv, workl(iheig), 1, d, 1) - end if -c -c %----------------------------------------------------------% -c | Compute the QR factorization of the matrix representing | -c | the wanted invariant subspace located in the first NCONV | -c | columns of workl(invsub,ldq). | -c %----------------------------------------------------------% -c - call zgeqr2(ncv , nconv , workl(invsub), - & ldq , workev, workev(ncv+1), - & ierr) -c -c %--------------------------------------------------------% -c | * Postmultiply V by Q using zunm2r. | -c | * Copy the first NCONV columns of VQ into Z. | -c | * Postmultiply Z by R. | -c | The N by NCONV matrix Z is now a matrix representation | -c | of the approximate invariant subspace associated with | -c | the Ritz values in workl(iheig). The first NCONV | -c | columns of V are now approximate Schur vectors | -c | associated with the upper triangular matrix of order | -c | NCONV in workl(iuptri). | -c %--------------------------------------------------------% -c - call zunm2r('Right', 'Notranspose', n , - & ncv , nconv , workl(invsub), - & ldq , workev , v , - & ldv , workd(n+1) , ierr) - call zlacpy('All', n, nconv, v, ldv, z, ldz) -c - do 20 j=1, nconv -c -c %---------------------------------------------------% -c | Perform both a column and row scaling if the | -c | diagonal element of workl(invsub,ldq) is negative | -c | I'm lazy and don't take advantage of the upper | -c | triangular form of workl(iuptri,ldq). | -c | Note that since Q is orthogonal, R is a diagonal | -c | matrix consisting of plus or minus ones. | -c %---------------------------------------------------% -c - if ( dble( workl(invsub+(j-1)*ldq+j-1) ) .lt. - & dble(zero) ) then - call zscal(nconv, -one, workl(iuptri+j-1), ldq) - call zscal(nconv, -one, workl(iuptri+(j-1)*ldq), 1) - end if -c - 20 continue -c - if (howmny .eq. 'A') then -c -c %--------------------------------------------% -c | Compute the NCONV wanted eigenvectors of T | -c | located in workl(iuptri,ldq). | -c %--------------------------------------------% -c - do 30 j=1, ncv - if (j .le. nconv) then - select(j) = .true. - else - select(j) = .false. - end if - 30 continue -c - call ztrevc('Right', 'Select' , select , - & ncv , workl(iuptri), ldq , - & vl , 1 , workl(invsub), - & ldq , ncv , outncv , - & workev , rwork , ierr) -c - if (ierr .ne. 0) then - info = -9 - go to 9000 - end if -c -c %------------------------------------------------% -c | Scale the returning eigenvectors so that their | -c | Euclidean norms are all one. LAPACK subroutine | -c | ztrevc returns each eigenvector normalized so | -c | that the element of largest magnitude has | -c | magnitude 1. | -c %------------------------------------------------% -c - do 40 j=1, nconv - rtemp = dznrm2(ncv, workl(invsub+(j-1)*ldq), 1) - rtemp = dble(one) / rtemp - call zdscal ( ncv, rtemp, - & workl(invsub+(j-1)*ldq), 1 ) -c -c %------------------------------------------% -c | Ritz estimates can be obtained by taking | -c | the inner product of the last row of the | -c | Schur basis of H with eigenvectors of T. | -c | Note that the eigenvector matrix of T is | -c | upper triangular, thus the length of the | -c | inner product can be set to j. | -c %------------------------------------------% -c - workev(j) = wzdotc(j, workl(ihbds), 1, - & workl(invsub+(j-1)*ldq), 1) - 40 continue -c - if (msglvl .gt. 2) then - call zcopy(nconv, workl(invsub+ncv-1), ldq, - & workl(ihbds), 1) - call zvout (logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Last row of the eigenvector matrix for T') - if (msglvl .gt. 3) then - call zmout(logfil , ncv, ncv , - & workl(invsub), ldq, ndigit, - & '_neupd: The eigenvector matrix for T') - end if - end if -c -c %---------------------------------------% -c | Copy Ritz estimates into workl(ihbds) | -c %---------------------------------------% -c - call zcopy(nconv, workev, 1, workl(ihbds), 1) -c -c %----------------------------------------------% -c | The eigenvector matrix Q of T is triangular. | -c | Form Z*Q. | -c %----------------------------------------------% -c - call ztrmm('Right' , 'Upper' , 'No transpose', - & 'Non-unit', n , nconv , - & one , workl(invsub), ldq , - & z , ldz) - end if -c - else -c -c %--------------------------------------------------% -c | An approximate invariant subspace is not needed. | -c | Place the Ritz values computed ZNAUPD into D. | -c %--------------------------------------------------% -c - call zcopy(nconv, workl(ritz), 1, d, 1) - call zcopy(nconv, workl(ritz), 1, workl(iheig), 1) - call zcopy(nconv, workl(bounds), 1, workl(ihbds), 1) -c - end if -c -c %------------------------------------------------% -c | Transform the Ritz values and possibly vectors | -c | and corresponding error bounds of OP to those | -c | of A*x = lambda*B*x. | -c %------------------------------------------------% -c - if (type .eq. 'REGULR') then -c - if (rvec) - & call zscal(ncv, rnorm, workl(ihbds), 1) -c - else -c -c %---------------------------------------% -c | A spectral transformation was used. | -c | * Determine the Ritz estimates of the | -c | Ritz values in the original system. | -c %---------------------------------------% -c - if (rvec) - & call zscal(ncv, rnorm, workl(ihbds), 1) -c - do 50 k=1, ncv - temp = workl(iheig+k-1) - workl(ihbds+k-1) = workl(ihbds+k-1) / temp / temp - 50 continue -c - end if -c -c %-----------------------------------------------------------% -c | * Transform the Ritz values back to the original system. | -c | For TYPE = 'SHIFTI' the transformation is | -c | lambda = 1/theta + sigma | -c | NOTES: | -c | *The Ritz vectors are not affected by the transformation. | -c %-----------------------------------------------------------% -c - if (type .eq. 'SHIFTI') then - do 60 k=1, nconv - d(k) = one / workl(iheig+k-1) + sigma - 60 continue - end if -c - if (type .ne. 'REGULR' .and. msglvl .gt. 1) then - call zvout (logfil, nconv, d, ndigit, - & '_neupd: Untransformed Ritz values.') - call zvout (logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Ritz estimates of the untransformed Ritz values.') - else if ( msglvl .gt. 1) then - call zvout (logfil, nconv, d, ndigit, - & '_neupd: Converged Ritz values.') - call zvout (logfil, nconv, workl(ihbds), ndigit, - & '_neupd: Associated Ritz estimates.') - end if -c -c %-------------------------------------------------% -c | Eigenvector Purification step. Formally perform | -c | one of inverse subspace iteration. Only used | -c | for MODE = 3. See reference 3. | -c %-------------------------------------------------% -c - if (rvec .and. howmny .eq. 'A' .and. type .eq. 'SHIFTI') then -c -c %------------------------------------------------% -c | Purify the computed Ritz vectors by adding a | -c | little bit of the residual vector: | -c | T | -c | resid(:)*( e s ) / theta | -c | NCV | -c | where H s = s theta. | -c %------------------------------------------------% -c - do 100 j=1, nconv - if (workl(iheig+j-1) .ne. zero) then - workev(j) = workl(invsub+(j-1)*ldq+ncv-1) / - & workl(iheig+j-1) - endif - 100 continue - -c %---------------------------------------% -c | Perform a rank one update to Z and | -c | purify all the Ritz vectors together. | -c %---------------------------------------% -c - call zgeru (n, nconv, one, resid, 1, workev, 1, z, ldz) -c - end if -c - 9000 continue -c - return -c -c %---------------% -c | End of zneupd| -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zngets.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zngets.f deleted file mode 100644 index fb8c4ec149..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zngets.f +++ /dev/null @@ -1,178 +0,0 @@ -c\BeginDoc -c -c\Name: zngets -c -c\Description: -c Given the eigenvalues of the upper Hessenberg matrix H, -c computes the NP shifts AMU that are zeros of the polynomial of -c degree NP which filters out components of the unwanted eigenvectors -c corresponding to the AMU's based on some given criteria. -c -c NOTE: call this even in the case of user specified shifts in order -c to sort the eigenvalues, and error bounds of H for later use. -c -c\Usage: -c call zngets -c ( ISHIFT, WHICH, KEV, NP, RITZ, BOUNDS ) -c -c\Arguments -c ISHIFT Integer. (INPUT) -c Method for selecting the implicit shifts at each iteration. -c ISHIFT = 0: user specified shifts -c ISHIFT = 1: exact shift with respect to the matrix H. -c -c WHICH Character*2. (INPUT) -c Shift selection criteria. -c 'LM' -> want the KEV eigenvalues of largest magnitude. -c 'SM' -> want the KEV eigenvalues of smallest magnitude. -c 'LR' -> want the KEV eigenvalues of largest REAL part. -c 'SR' -> want the KEV eigenvalues of smallest REAL part. -c 'LI' -> want the KEV eigenvalues of largest imaginary part. -c 'SI' -> want the KEV eigenvalues of smallest imaginary part. -c -c KEV Integer. (INPUT) -c The number of desired eigenvalues. -c -c NP Integer. (INPUT) -c The number of shifts to compute. -c -c RITZ Complex*16 array of length KEV+NP. (INPUT/OUTPUT) -c On INPUT, RITZ contains the the eigenvalues of H. -c On OUTPUT, RITZ are sorted so that the unwanted -c eigenvalues are in the first NP locations and the wanted -c portion is in the last KEV locations. When exact shifts are -c selected, the unwanted part corresponds to the shifts to -c be applied. Also, if ISHIFT .eq. 1, the unwanted eigenvalues -c are further sorted so that the ones with largest Ritz values -c are first. -c -c BOUNDS Complex*16 array of length KEV+NP. (INPUT/OUTPUT) -c Error bounds corresponding to the ordering in RITZ. -c -c -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Local variables: -c xxxxxx Complex*16 -c -c\Routines called: -c zsortc ARPACK sorting routine. -c ivout ARPACK utility routine that prints integers. -c second ARPACK utility routine for timing. -c zvout ARPACK utility routine that prints vectors. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c\SCCS Information: @(#) -c FILE: ngets.F SID: 2.2 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\Remarks -c 1. This routine does not keep complex conjugate pairs of -c eigenvalues together. -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine zngets ( ishift, which, kev, np, ritz, bounds) -c -c %----------------------------------------------------% -c | Include files for debugging and timing information | -c %----------------------------------------------------% -c - include 'debug.h' - include 'stat.h' -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - integer ishift, kev, np -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Complex*16 - & bounds(kev+np), ritz(kev+np) -c -c %------------% -c | Parameters | -c %------------% -c - Complex*16 - & one, zero - parameter (one = (1.0D+0, 0.0D+0), zero = (0.0D+0, 0.0D+0)) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer msglvl -c -c %----------------------% -c | External Subroutines | -c %----------------------% -c - external zvout, zsortc, second -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c -c %-------------------------------% -c | Initialize timing statistics | -c | & message level for debugging | -c %-------------------------------% -c - call second (t0) - msglvl = mcgets -c - call zsortc (which, .true., kev+np, ritz, bounds) -c - if ( ishift .eq. 1 ) then -c -c %-------------------------------------------------------% -c | Sort the unwanted Ritz values used as shifts so that | -c | the ones with largest Ritz estimates are first | -c | This will tend to minimize the effects of the | -c | forward instability of the iteration when the shifts | -c | are applied in subroutine znapps. | -c | Be careful and use 'SM' since we want to sort BOUNDS! | -c %-------------------------------------------------------% -c - call zsortc ( 'SM', .true., np, bounds, ritz ) -c - end if -c - call second (t1) - tcgets = tcgets + (t1 - t0) -c - if (msglvl .gt. 0) then - call ivout (logfil, 1, kev, ndigit, '_ngets: KEV is') - call ivout (logfil, 1, np, ndigit, '_ngets: NP is') - call zvout (logfil, kev+np, ritz, ndigit, - & '_ngets: Eigenvalues of current H matrix ') - call zvout (logfil, kev+np, bounds, ndigit, - & '_ngets: Ritz estimates of the current KEV+NP Ritz values') - end if -c - return -c -c %---------------% -c | End of zngets | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zsortc.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zsortc.f deleted file mode 100644 index 7dc688a06f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zsortc.f +++ /dev/null @@ -1,322 +0,0 @@ -c\BeginDoc -c -c\Name: zsortc -c -c\Description: -c Sorts the Complex*16 array in X into the order -c specified by WHICH and optionally applies the permutation to the -c Double precision array Y. -c -c\Usage: -c call zsortc -c ( WHICH, APPLY, N, X, Y ) -c -c\Arguments -c WHICH Character*2. (Input) -c 'LM' -> sort X into increasing order of magnitude. -c 'SM' -> sort X into decreasing order of magnitude. -c 'LR' -> sort X with real(X) in increasing algebraic order -c 'SR' -> sort X with real(X) in decreasing algebraic order -c 'LI' -> sort X with imag(X) in increasing algebraic order -c 'SI' -> sort X with imag(X) in decreasing algebraic order -c -c APPLY Logical. (Input) -c APPLY = .TRUE. -> apply the sorted order to array Y. -c APPLY = .FALSE. -> do not apply the sorted order to array Y. -c -c N Integer. (INPUT) -c Size of the arrays. -c -c X Complex*16 array of length N. (INPUT/OUTPUT) -c This is the array to be sorted. -c -c Y Complex*16 array of length N. (INPUT/OUTPUT) -c -c\EndDoc -c -c----------------------------------------------------------------------- -c -c\BeginLib -c -c\Routines called: -c dlapy2 LAPACK routine to compute sqrt(x**2+y**2) carefully. -c -c\Author -c Danny Sorensen Phuong Vu -c Richard Lehoucq CRPC / Rice University -c Dept. of Computational & Houston, Texas -c Applied Mathematics -c Rice University -c Houston, Texas -c -c Adapted from the sort routine in LANSO. -c -c\SCCS Information: @(#) -c FILE: sortc.F SID: 2.2 DATE OF SID: 4/20/96 RELEASE: 2 -c -c\EndLib -c -c----------------------------------------------------------------------- -c - subroutine zsortc (which, apply, n, x, y) -c -c %------------------% -c | Scalar Arguments | -c %------------------% -c - character*2 which - logical apply - integer n -c -c %-----------------% -c | Array Arguments | -c %-----------------% -c - Complex*16 - & x(0:n-1), y(0:n-1) -c -c %---------------% -c | Local Scalars | -c %---------------% -c - integer i, igap, j - Complex*16 - & temp - Double precision - & temp1, temp2 -c -c %--------------------% -c | External functions | -c %--------------------% -c - Double precision - & dlapy2 -c -c %--------------------% -c | Intrinsic Functions | -c %--------------------% - Intrinsic - & dble, dimag -c -c %-----------------------% -c | Executable Statements | -c %-----------------------% -c - igap = n / 2 -c - if (which .eq. 'LM') then -c -c %--------------------------------------------% -c | Sort X into increasing order of magnitude. | -c %--------------------------------------------% -c - 10 continue - if (igap .eq. 0) go to 9000 -c - do 30 i = igap, n-1 - j = i-igap - 20 continue -c - if (j.lt.0) go to 30 -c - temp1 = dlapy2(dble(x(j)),dimag(x(j))) - temp2 = dlapy2(dble(x(j+igap)),dimag(x(j+igap))) -c - if (temp1.gt.temp2) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 30 - end if - j = j-igap - go to 20 - 30 continue - igap = igap / 2 - go to 10 -c - else if (which .eq. 'SM') then -c -c %--------------------------------------------% -c | Sort X into decreasing order of magnitude. | -c %--------------------------------------------% -c - 40 continue - if (igap .eq. 0) go to 9000 -c - do 60 i = igap, n-1 - j = i-igap - 50 continue -c - if (j .lt. 0) go to 60 -c - temp1 = dlapy2(dble(x(j)),dimag(x(j))) - temp2 = dlapy2(dble(x(j+igap)),dimag(x(j+igap))) -c - if (temp1.lt.temp2) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 60 - endif - j = j-igap - go to 50 - 60 continue - igap = igap / 2 - go to 40 -c - else if (which .eq. 'LR') then -c -c %------------------------------------------------% -c | Sort XREAL into increasing order of algebraic. | -c %------------------------------------------------% -c - 70 continue - if (igap .eq. 0) go to 9000 -c - do 90 i = igap, n-1 - j = i-igap - 80 continue -c - if (j.lt.0) go to 90 -c - if (dble(x(j)).gt.dble(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 90 - endif - j = j-igap - go to 80 - 90 continue - igap = igap / 2 - go to 70 -c - else if (which .eq. 'SR') then -c -c %------------------------------------------------% -c | Sort XREAL into decreasing order of algebraic. | -c %------------------------------------------------% -c - 100 continue - if (igap .eq. 0) go to 9000 - do 120 i = igap, n-1 - j = i-igap - 110 continue -c - if (j.lt.0) go to 120 -c - if (dble(x(j)).lt.dble(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 120 - endif - j = j-igap - go to 110 - 120 continue - igap = igap / 2 - go to 100 -c - else if (which .eq. 'LI') then -c -c %--------------------------------------------% -c | Sort XIMAG into increasing algebraic order | -c %--------------------------------------------% -c - 130 continue - if (igap .eq. 0) go to 9000 - do 150 i = igap, n-1 - j = i-igap - 140 continue -c - if (j.lt.0) go to 150 -c - if (dimag(x(j)).gt.dimag(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 150 - endif - j = j-igap - go to 140 - 150 continue - igap = igap / 2 - go to 130 -c - else if (which .eq. 'SI') then -c -c %---------------------------------------------% -c | Sort XIMAG into decreasing algebraic order | -c %---------------------------------------------% -c - 160 continue - if (igap .eq. 0) go to 9000 - do 180 i = igap, n-1 - j = i-igap - 170 continue -c - if (j.lt.0) go to 180 -c - if (dimag(x(j)).lt.dimag(x(j+igap))) then - temp = x(j) - x(j) = x(j+igap) - x(j+igap) = temp -c - if (apply) then - temp = y(j) - y(j) = y(j+igap) - y(j+igap) = temp - end if - else - go to 180 - endif - j = j-igap - go to 170 - 180 continue - igap = igap / 2 - go to 160 - end if -c - 9000 continue - return -c -c %---------------% -c | End of zsortc | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zstatn.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zstatn.f deleted file mode 100644 index 1cdf5b3dfa..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/SRC/zstatn.f +++ /dev/null @@ -1,51 +0,0 @@ -c -c\SCCS Information: @(#) -c FILE: statn.F SID: 2.2 DATE OF SID: 4/20/96 RELEASE: 2 -c -c %---------------------------------------------% -c | Initialize statistic and timing information | -c | for complex nonsymmetric Arnoldi code. | -c %---------------------------------------------% - - subroutine zstatn -c -c %--------------------------------% -c | See stat.doc for documentation | -c %--------------------------------% -c - include 'stat.h' - -c %-----------------------% -c | Executable Statements | -c %-----------------------% - - nopx = 0 - nbx = 0 - nrorth = 0 - nitref = 0 - nrstrt = 0 - - tcaupd = 0.0D+0 - tcaup2 = 0.0D+0 - tcaitr = 0.0D+0 - tceigh = 0.0D+0 - tcgets = 0.0D+0 - tcapps = 0.0D+0 - tcconv = 0.0D+0 - titref = 0.0D+0 - tgetv0 = 0.0D+0 - trvec = 0.0D+0 - -c %----------------------------------------------------% -c | User time including reverse communication overhead | -c %----------------------------------------------------% - tmvopx = 0.0D+0 - tmvbx = 0.0D+0 - - return -c -c %---------------% -c | End of zstatn | -c %---------------% -c - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/cmout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/cmout.f deleted file mode 100644 index 1cdaf33e90..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/cmout.f +++ /dev/null @@ -1,250 +0,0 @@ -* -* Routine: CMOUT -* -* Purpose: Complex matrix output routine. -* -* Usage: CALL CMOUT (LOUT, M, N, A, LDA, IDIGIT, IFMT) -* -* Arguments -* M - Number of rows of A. (Input) -* N - Number of columns of A. (Input) -* A - Complex M by N matrix to be printed. (Input) -* LDA - Leading dimension of A exactly as specified in the -* dimension statement of the calling program. (Input) -* IFMT - Format to be used in printing matrix A. (Input) -* IDIGIT - Print up to IABS(IDIGIT) decimal digits per number. (In) -* If IDIGIT .LT. 0, printing is done with 72 columns. -* If IDIGIT .GT. 0, printing is done with 132 columns. -* -*\SCCS Information: @(#) -* FILE: cmout.f SID: 2.1 DATE OF SID: 11/16/95 RELEASE: 2 -* -*----------------------------------------------------------------------- -* - SUBROUTINE CMOUT( LOUT, M, N, A, LDA, IDIGIT, IFMT ) -* ... -* ... SPECIFICATIONS FOR ARGUMENTS - INTEGER M, N, IDIGIT, LDA, LOUT - Complex - & A( LDA, * ) - CHARACTER IFMT*( * ) -* ... -* ... SPECIFICATIONS FOR LOCAL VARIABLES - INTEGER I, J, NDIGIT, K1, K2, LLL - CHARACTER*1 ICOL( 3 ) - CHARACTER*80 LINE -* ... -* ... SPECIFICATIONS INTRINSICS - INTRINSIC MIN -* - DATA ICOL( 1 ), ICOL( 2 ), ICOL( 3 ) / 'C', 'o', - $ 'l' / -* ... -* ... FIRST EXECUTABLE STATEMENT -* - LLL = MIN( LEN( IFMT ), 80 ) - DO 10 I = 1, LLL - LINE( I: I ) = '-' - 10 CONTINUE -* - DO 20 I = LLL + 1, 80 - LINE( I: I ) = ' ' - 20 CONTINUE -* - WRITE( LOUT, 9999 )IFMT, LINE( 1: LLL ) - 9999 FORMAT( / 1X, A / 1X, A ) -* - IF( M.LE.0 .OR. N.LE.0 .OR. LDA.LE.0 ) - $ RETURN - NDIGIT = IDIGIT - IF( IDIGIT.EQ.0 ) - $ NDIGIT = 4 -* -*======================================================================= -* CODE FOR OUTPUT USING 72 COLUMNS FORMAT -*======================================================================= -* - IF( IDIGIT.LT.0 ) THEN - NDIGIT = -IDIGIT - IF( NDIGIT.LE.4 ) THEN - DO 40 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9998 )( ICOL, I, I = K1, K2 ) - DO 30 I = 1, M - IF (K1.NE.N) THEN - WRITE( LOUT, 9994 )I, ( A( I, J ), J = K1, K2 ) - ELSE - WRITE( LOUT, 9984 )I, ( A( I, J ), J = K1, K2 ) - END IF - 30 CONTINUE - 40 CONTINUE -* - ELSE IF( NDIGIT.LE.6 ) THEN - DO 60 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9997 )( ICOL, I, I = K1, K2 ) - DO 50 I = 1, M - IF (K1.NE.N) THEN - WRITE( LOUT, 9993 )I, ( A( I, J ), J = K1, K2 ) - ELSE - WRITE( LOUT, 9983 )I, ( A( I, J ), J = K1, K2 ) - END IF - 50 CONTINUE - 60 CONTINUE -* - ELSE IF( NDIGIT.LE.8 ) THEN - DO 80 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9996 )( ICOL, I, I = K1, K2 ) - DO 70 I = 1, M - IF (K1.NE.N) THEN - WRITE( LOUT, 9992 )I, ( A( I, J ), J = K1, K2 ) - ELSE - WRITE( LOUT, 9982 )I, ( A( I, J ), J = K1, K2 ) - END IF - 70 CONTINUE - 80 CONTINUE -* - ELSE - DO 100 K1 = 1, N - WRITE( LOUT, 9995 ) ICOL, K1 - DO 90 I = 1, M - WRITE( LOUT, 9991 )I, A( I, K1 ) - 90 CONTINUE - 100 CONTINUE - END IF -* -*======================================================================= -* CODE FOR OUTPUT USING 132 COLUMNS FORMAT -*======================================================================= -* - ELSE - IF( NDIGIT.LE.4 ) THEN - DO 120 K1 = 1, N, 4 - K2 = MIN0( N, K1+3 ) - WRITE( LOUT, 9998 )( ICOL, I, I = K1, K2 ) - DO 110 I = 1, M - IF ((K1+3).LE.N) THEN - WRITE( LOUT, 9974 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+3-N).EQ.1) THEN - WRITE( LOUT, 9964 )I, ( A( I, J ), J = k1, K2 ) - ELSE IF ((K1+3-N).EQ.2) THEN - WRITE( LOUT, 9954 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+3-N).EQ.3) THEN - WRITE( LOUT, 9944 )I, ( A( I, J ), J = K1, K2 ) - END IF - 110 CONTINUE - 120 CONTINUE -* - ELSE IF( NDIGIT.LE.6 ) THEN - DO 140 K1 = 1, N, 3 - K2 = MIN0( N, K1+ 2) - WRITE( LOUT, 9997 )( ICOL, I, I = K1, K2 ) - DO 130 I = 1, M - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9973 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+2-N).EQ.1) THEN - WRITE( LOUT, 9963 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+2-N).EQ.2) THEN - WRITE( LOUT, 9953 )I, ( A( I, J ), J = K1, K2 ) - END IF - 130 CONTINUE - 140 CONTINUE -* - ELSE IF( NDIGIT.LE.8 ) THEN - DO 160 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - WRITE( LOUT, 9996 )( ICOL, I, I = K1, K2 ) - DO 150 I = 1, M - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9972 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+2-N).EQ.1) THEN - WRITE( LOUT, 9962 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+2-N).EQ.2) THEN - WRITE( LOUT, 9952 )I, ( A( I, J ), J = K1, K2 ) - END IF - 150 CONTINUE - 160 CONTINUE -* - ELSE - DO 180 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9995 )( ICOL, I, I = K1, K2 ) - DO 170 I = 1, M - IF ((K1+1).LE.N) THEN - WRITE( LOUT, 9971 )I, ( A( I, J ), J = K1, K2 ) - ELSE - WRITE( LOUT, 9961 )I, ( A( I, J ), J = K1, K2 ) - END IF - 170 CONTINUE - 180 CONTINUE - END IF - END IF - WRITE( LOUT, 9990 ) -* - 9998 FORMAT( 11X, 4( 9X, 3A1, I4, 9X ) ) - 9997 FORMAT( 10X, 4( 11X, 3A1, I4, 11X ) ) - 9996 FORMAT( 10X, 3( 13X, 3A1, I4, 13X ) ) - 9995 FORMAT( 12X, 2( 18x, 3A1, I4, 18X ) ) -* -*======================================================== -* FORMAT FOR 72 COLUMN -*======================================================== -* -* DISPLAY 4 SIGNIFICANT DIGITS -* - 9994 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',E10.3,',',E10.3,') ') ) - 9984 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',E10.3,',',E10.3,') ') ) -* -* DISPLAY 6 SIGNIFICANT DIGITS -* - 9993 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',E12.5,',',E12.5,') ') ) - 9983 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',E12.5,',',E12.5,') ') ) -* -* DISPLAY 8 SIGNIFICANT DIGITS -* - 9992 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',E14.7,',',E14.7,') ') ) - 9982 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',E14.7,',',E14.7,') ') ) -* -* DISPLAY 13 SIGNIFICANT DIGITS -* - 9991 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',E20.13,',',E20.13,')') ) - 9990 FORMAT( 1X, ' ' ) -* -* -*======================================================== -* FORMAT FOR 132 COLUMN -*======================================================== -* -* DISPLAY 4 SIGNIFICANT DIGIT -* - 9974 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,4('(',E10.3,',',E10.3,') ') ) - 9964 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,3('(',E10.3,',',E10.3,') ') ) - 9954 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',E10.3,',',E10.3,') ') ) - 9944 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',E10.3,',',E10.3,') ') ) -* -* DISPLAY 6 SIGNIFICANT DIGIT -* - 9973 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,3('(',E12.5,',',E12.5,') ') ) - 9963 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',E12.5,',',E12.5,') ') ) - 9953 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',E12.5,',',E12.5,') ') ) -* -* DISPLAY 8 SIGNIFICANT DIGIT -* - 9972 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,3('(',E14.7,',',E14.7,') ') ) - 9962 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',E14.7,',',E14.7,') ') ) - 9952 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',E14.7,',',E14.7,') ') ) -* -* DISPLAY 13 SIGNIFICANT DIGIT -* - 9971 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',E20.13,',',E20.13, - & ') ')) - 9961 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',E20.13,',',E20.13, - & ') ')) - -* -* -* -* - RETURN - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/cvout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/cvout.f deleted file mode 100644 index 31c22fe029..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/cvout.f +++ /dev/null @@ -1,240 +0,0 @@ -c----------------------------------------------------------------------- -c -c\SCCS Information: @(#) -c FILE: cvout.f SID: 2.1 DATE OF SID: 11/16/95 RELEASE: 2 -c -*----------------------------------------------------------------------- -* Routine: CVOUT -* -* Purpose: Complex vector output routine. -* -* Usage: CALL CVOUT (LOUT, N, CX, IDIGIT, IFMT) -* -* Arguments -* N - Length of array CX. (Input) -* CX - Complex array to be printed. (Input) -* IFMT - Format to be used in printing array CX. (Input) -* IDIGIT - Print up to IABS(IDIGIT) decimal digits per number. (In) -* If IDIGIT .LT. 0, printing is done with 72 columns. -* If IDIGIT .GT. 0, printing is done with 132 columns. -* -*----------------------------------------------------------------------- -* - SUBROUTINE CVOUT( LOUT, N, CX, IDIGIT, IFMT ) -* ... -* ... SPECIFICATIONS FOR ARGUMENTS - INTEGER N, IDIGIT, LOUT - Complex - & CX( * ) - CHARACTER IFMT*( * ) -* ... -* ... SPECIFICATIONS FOR LOCAL VARIABLES - INTEGER I, NDIGIT, K1, K2, LLL - CHARACTER*80 LINE -* ... -* ... FIRST EXECUTABLE STATEMENT -* -* - LLL = MIN( LEN( IFMT ), 80 ) - DO 10 I = 1, LLL - LINE( I: I ) = '-' - 10 CONTINUE -* - DO 20 I = LLL + 1, 80 - LINE( I: I ) = ' ' - 20 CONTINUE -* - WRITE( LOUT, 9999 )IFMT, LINE( 1: LLL ) - 9999 FORMAT( / 1X, A / 1X, A ) -* - IF( N.LE.0 ) - $ RETURN - NDIGIT = IDIGIT - IF( IDIGIT.EQ.0 ) - $ NDIGIT = 4 -* -*======================================================================= -* CODE FOR OUTPUT USING 72 COLUMNS FORMAT -*======================================================================= -* - IF( IDIGIT.LT.0 ) THEN - NDIGIT = -IDIGIT - IF( NDIGIT.LE.4 ) THEN - DO 30 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - IF (K1.NE.N) THEN - WRITE( LOUT, 9998 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE - WRITE( LOUT, 9997 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 30 CONTINUE - ELSE IF( NDIGIT.LE.6 ) THEN - DO 40 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - IF (K1.NE.N) THEN - WRITE( LOUT, 9988 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE - WRITE( LOUT, 9987 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 40 CONTINUE - ELSE IF( NDIGIT.LE.8 ) THEN - DO 50 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - IF (K1.NE.N) THEN - WRITE( LOUT, 9978 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE - WRITE( LOUT, 9977 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 50 CONTINUE - ELSE - DO 60 K1 = 1, N - WRITE( LOUT, 9968 )K1, K1, CX( I ) - 60 CONTINUE - END IF -* -*======================================================================= -* CODE FOR OUTPUT USING 132 COLUMNS FORMAT -*======================================================================= -* - ELSE - IF( NDIGIT.LE.4 ) THEN - DO 70 K1 = 1, N, 4 - K2 = MIN0( N, K1+3 ) - IF ((K1+3).LE.N) THEN - WRITE( LOUT, 9958 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+3-N) .EQ. 1) THEN - WRITE( LOUT, 9957 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+3-N) .EQ. 2) THEN - WRITE( LOUT, 9956 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+3-N) .EQ. 1) THEN - WRITE( LOUT, 9955 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 70 CONTINUE - ELSE IF( NDIGIT.LE.6 ) THEN - DO 80 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9948 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 1) THEN - WRITE( LOUT, 9947 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 2) THEN - WRITE( LOUT, 9946 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 80 CONTINUE - ELSE IF( NDIGIT.LE.8 ) THEN - DO 90 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9938 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 1) THEN - WRITE( LOUT, 9937 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 2) THEN - WRITE( LOUT, 9936 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 90 CONTINUE - ELSE - DO 100 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9928 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 1) THEN - WRITE( LOUT, 9927 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 100 CONTINUE - END IF - END IF - WRITE( LOUT, 9994 ) - RETURN -* -*======================================================================= -* FORMAT FOR 72 COLUMNS -*======================================================================= -* -* DISPLAY 4 SIGNIFICANT DIGITS -* - 9998 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',E10.3,',',E10.3,') ') ) - 9997 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',E10.3,',',E10.3,') ') ) -* -* DISPLAY 6 SIGNIFICANT DIGITS -* - 9988 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',E12.5,',',E12.5,') ') ) - 9987 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',E12.5,',',E12.5,') ') ) -* -* DISPLAY 8 SIGNIFICANT DIGITS -* - 9978 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',E14.7,',',E14.7,') ') ) - 9977 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',E14.7,',',E14.7,') ') ) -* -* DISPLAY 13 SIGNIFICANT DIGITS -* - 9968 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',E20.13,',',E20.13,') ') ) -* -*========================================================================= -* FORMAT FOR 132 COLUMNS -*========================================================================= -* -* DISPLAY 4 SIGNIFICANT DIGITS -* - 9958 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,4('(',E10.3,',',E10.3,') ') ) - 9957 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,3('(',E10.3,',',E10.3,') ') ) - 9956 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',E10.3,',',E10.3,') ') ) - 9955 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',E10.3,',',E10.3,') ') ) -* -* DISPLAY 6 SIGNIFICANT DIGITS -* - 9948 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,3('(',E12.5,',',E12.5,') ') ) - 9947 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',E12.5,',',E12.5,') ') ) - 9946 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',E12.5,',',E12.5,') ') ) -* -* DISPLAY 8 SIGNIFICANT DIGITS -* - 9938 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,3('(',E14.7,',',E14.7,') ') ) - 9937 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',E14.7,',',E14.7,') ') ) - 9936 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',E14.7,',',E14.7,') ') ) -* -* DISPLAY 13 SIGNIFICANT DIGITS -* - 9928 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',E20.13,',',E20.13,') ') ) - 9927 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',E20.13,',',E20.13,') ') ) -* -* -* - 9994 FORMAT( 1X, ' ' ) - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/dmout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/dmout.f deleted file mode 100644 index 72edc042fa..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/dmout.f +++ /dev/null @@ -1,167 +0,0 @@ -*----------------------------------------------------------------------- -* Routine: DMOUT -* -* Purpose: Real matrix output routine. -* -* Usage: CALL DMOUT (LOUT, M, N, A, LDA, IDIGIT, IFMT) -* -* Arguments -* M - Number of rows of A. (Input) -* N - Number of columns of A. (Input) -* A - Real M by N matrix to be printed. (Input) -* LDA - Leading dimension of A exactly as specified in the -* dimension statement of the calling program. (Input) -* IFMT - Format to be used in printing matrix A. (Input) -* IDIGIT - Print up to IABS(IDIGIT) decimal digits per number. (In) -* If IDIGIT .LT. 0, printing is done with 72 columns. -* If IDIGIT .GT. 0, printing is done with 132 columns. -* -*----------------------------------------------------------------------- -* - SUBROUTINE DMOUT( LOUT, M, N, A, LDA, IDIGIT, IFMT ) -* ... -* ... SPECIFICATIONS FOR ARGUMENTS -* ... -* ... SPECIFICATIONS FOR LOCAL VARIABLES -* .. Scalar Arguments .. - CHARACTER*( * ) IFMT - INTEGER IDIGIT, LDA, LOUT, M, N -* .. -* .. Array Arguments .. - DOUBLE PRECISION A( LDA, * ) -* .. -* .. Local Scalars .. - CHARACTER*80 LINE - INTEGER I, J, K1, K2, LLL, NDIGIT -* .. -* .. Local Arrays .. - CHARACTER ICOL( 3 ) -* .. -* .. Intrinsic Functions .. - INTRINSIC LEN, MIN, MIN0 -* .. -* .. Data statements .. - DATA ICOL( 1 ), ICOL( 2 ), ICOL( 3 ) / 'C', 'o', - $ 'l' / -* .. -* .. Executable Statements .. -* ... -* ... FIRST EXECUTABLE STATEMENT -* - LLL = MIN( LEN( IFMT ), 80 ) - DO 10 I = 1, LLL - LINE( I: I ) = '-' - 10 CONTINUE -* - DO 20 I = LLL + 1, 80 - LINE( I: I ) = ' ' - 20 CONTINUE -* - WRITE( LOUT, FMT = 9999 )IFMT, LINE( 1: LLL ) - 9999 FORMAT( / 1X, A, / 1X, A ) -* - IF( M.LE.0 .OR. N.LE.0 .OR. LDA.LE.0 ) - $ RETURN - NDIGIT = IDIGIT - IF( IDIGIT.EQ.0 ) - $ NDIGIT = 4 -* -*======================================================================= -* CODE FOR OUTPUT USING 72 COLUMNS FORMAT -*======================================================================= -* - IF( IDIGIT.LT.0 ) THEN - NDIGIT = -IDIGIT - IF( NDIGIT.LE.4 ) THEN - DO 40 K1 = 1, N, 5 - K2 = MIN0( N, K1+4 ) - WRITE( LOUT, FMT = 9998 )( ICOL, I, I = K1, K2 ) - DO 30 I = 1, M - WRITE( LOUT, FMT = 9994 )I, ( A( I, J ), J = K1, K2 ) - 30 CONTINUE - 40 CONTINUE -* - ELSE IF( NDIGIT.LE.6 ) THEN - DO 60 K1 = 1, N, 4 - K2 = MIN0( N, K1+3 ) - WRITE( LOUT, FMT = 9997 )( ICOL, I, I = K1, K2 ) - DO 50 I = 1, M - WRITE( LOUT, FMT = 9993 )I, ( A( I, J ), J = K1, K2 ) - 50 CONTINUE - 60 CONTINUE -* - ELSE IF( NDIGIT.LE.10 ) THEN - DO 80 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - WRITE( LOUT, FMT = 9996 )( ICOL, I, I = K1, K2 ) - DO 70 I = 1, M - WRITE( LOUT, FMT = 9992 )I, ( A( I, J ), J = K1, K2 ) - 70 CONTINUE - 80 CONTINUE -* - ELSE - DO 100 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, FMT = 9995 )( ICOL, I, I = K1, K2 ) - DO 90 I = 1, M - WRITE( LOUT, FMT = 9991 )I, ( A( I, J ), J = K1, K2 ) - 90 CONTINUE - 100 CONTINUE - END IF -* -*======================================================================= -* CODE FOR OUTPUT USING 132 COLUMNS FORMAT -*======================================================================= -* - ELSE - IF( NDIGIT.LE.4 ) THEN - DO 120 K1 = 1, N, 10 - K2 = MIN0( N, K1+9 ) - WRITE( LOUT, FMT = 9998 )( ICOL, I, I = K1, K2 ) - DO 110 I = 1, M - WRITE( LOUT, FMT = 9994 )I, ( A( I, J ), J = K1, K2 ) - 110 CONTINUE - 120 CONTINUE -* - ELSE IF( NDIGIT.LE.6 ) THEN - DO 140 K1 = 1, N, 8 - K2 = MIN0( N, K1+7 ) - WRITE( LOUT, FMT = 9997 )( ICOL, I, I = K1, K2 ) - DO 130 I = 1, M - WRITE( LOUT, FMT = 9993 )I, ( A( I, J ), J = K1, K2 ) - 130 CONTINUE - 140 CONTINUE -* - ELSE IF( NDIGIT.LE.10 ) THEN - DO 160 K1 = 1, N, 6 - K2 = MIN0( N, K1+5 ) - WRITE( LOUT, FMT = 9996 )( ICOL, I, I = K1, K2 ) - DO 150 I = 1, M - WRITE( LOUT, FMT = 9992 )I, ( A( I, J ), J = K1, K2 ) - 150 CONTINUE - 160 CONTINUE -* - ELSE - DO 180 K1 = 1, N, 5 - K2 = MIN0( N, K1+4 ) - WRITE( LOUT, FMT = 9995 )( ICOL, I, I = K1, K2 ) - DO 170 I = 1, M - WRITE( LOUT, FMT = 9991 )I, ( A( I, J ), J = K1, K2 ) - 170 CONTINUE - 180 CONTINUE - END IF - END IF - WRITE( LOUT, FMT = 9990 ) -* - 9998 FORMAT( 10X, 10( 4X, 3A1, I4, 1X ) ) - 9997 FORMAT( 10X, 8( 5X, 3A1, I4, 2X ) ) - 9996 FORMAT( 10X, 6( 7X, 3A1, I4, 4X ) ) - 9995 FORMAT( 10X, 5( 9X, 3A1, I4, 6X ) ) - 9994 FORMAT( 1X, ' Row', I4, ':', 1X, 1P, 10D12.3 ) - 9993 FORMAT( 1X, ' Row', I4, ':', 1X, 1P, 8D14.5 ) - 9992 FORMAT( 1X, ' Row', I4, ':', 1X, 1P, 6D18.9 ) - 9991 FORMAT( 1X, ' Row', I4, ':', 1X, 1P, 5D22.13 ) - 9990 FORMAT( 1X, ' ' ) -* - RETURN - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/dvout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/dvout.f deleted file mode 100644 index 4138e52c6f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/dvout.f +++ /dev/null @@ -1,122 +0,0 @@ -*----------------------------------------------------------------------- -* Routine: DVOUT -* -* Purpose: Real vector output routine. -* -* Usage: CALL DVOUT (LOUT, N, SX, IDIGIT, IFMT) -* -* Arguments -* N - Length of array SX. (Input) -* SX - Real array to be printed. (Input) -* IFMT - Format to be used in printing array SX. (Input) -* IDIGIT - Print up to IABS(IDIGIT) decimal digits per number. (In) -* If IDIGIT .LT. 0, printing is done with 72 columns. -* If IDIGIT .GT. 0, printing is done with 132 columns. -* -*----------------------------------------------------------------------- -* - SUBROUTINE DVOUT( LOUT, N, SX, IDIGIT, IFMT ) -* ... -* ... SPECIFICATIONS FOR ARGUMENTS -* ... -* ... SPECIFICATIONS FOR LOCAL VARIABLES -* .. Scalar Arguments .. - CHARACTER*( * ) IFMT - INTEGER IDIGIT, LOUT, N -* .. -* .. Array Arguments .. - DOUBLE PRECISION SX( * ) -* .. -* .. Local Scalars .. - CHARACTER*80 LINE - INTEGER I, K1, K2, LLL, NDIGIT -* .. -* .. Intrinsic Functions .. - INTRINSIC LEN, MIN, MIN0 -* .. -* .. Executable Statements .. -* ... -* ... FIRST EXECUTABLE STATEMENT -* -* - LLL = MIN( LEN( IFMT ), 80 ) - DO 10 I = 1, LLL - LINE( I: I ) = '-' - 10 CONTINUE -* - DO 20 I = LLL + 1, 80 - LINE( I: I ) = ' ' - 20 CONTINUE -* - WRITE( LOUT, FMT = 9999 )IFMT, LINE( 1: LLL ) - 9999 FORMAT( / 1X, A, / 1X, A ) -* - IF( N.LE.0 ) - $ RETURN - NDIGIT = IDIGIT - IF( IDIGIT.EQ.0 ) - $ NDIGIT = 4 -* -*======================================================================= -* CODE FOR OUTPUT USING 72 COLUMNS FORMAT -*======================================================================= -* - IF( IDIGIT.LT.0 ) THEN - NDIGIT = -IDIGIT - IF( NDIGIT.LE.4 ) THEN - DO 30 K1 = 1, N, 5 - K2 = MIN0( N, K1+4 ) - WRITE( LOUT, FMT = 9998 )K1, K2, ( SX( I ), I = K1, K2 ) - 30 CONTINUE - ELSE IF( NDIGIT.LE.6 ) THEN - DO 40 K1 = 1, N, 4 - K2 = MIN0( N, K1+3 ) - WRITE( LOUT, FMT = 9997 )K1, K2, ( SX( I ), I = K1, K2 ) - 40 CONTINUE - ELSE IF( NDIGIT.LE.10 ) THEN - DO 50 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - WRITE( LOUT, FMT = 9996 )K1, K2, ( SX( I ), I = K1, K2 ) - 50 CONTINUE - ELSE - DO 60 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, FMT = 9995 )K1, K2, ( SX( I ), I = K1, K2 ) - 60 CONTINUE - END IF -* -*======================================================================= -* CODE FOR OUTPUT USING 132 COLUMNS FORMAT -*======================================================================= -* - ELSE - IF( NDIGIT.LE.4 ) THEN - DO 70 K1 = 1, N, 10 - K2 = MIN0( N, K1+9 ) - WRITE( LOUT, FMT = 9998 )K1, K2, ( SX( I ), I = K1, K2 ) - 70 CONTINUE - ELSE IF( NDIGIT.LE.6 ) THEN - DO 80 K1 = 1, N, 8 - K2 = MIN0( N, K1+7 ) - WRITE( LOUT, FMT = 9997 )K1, K2, ( SX( I ), I = K1, K2 ) - 80 CONTINUE - ELSE IF( NDIGIT.LE.10 ) THEN - DO 90 K1 = 1, N, 6 - K2 = MIN0( N, K1+5 ) - WRITE( LOUT, FMT = 9996 )K1, K2, ( SX( I ), I = K1, K2 ) - 90 CONTINUE - ELSE - DO 100 K1 = 1, N, 5 - K2 = MIN0( N, K1+4 ) - WRITE( LOUT, FMT = 9995 )K1, K2, ( SX( I ), I = K1, K2 ) - 100 CONTINUE - END IF - END IF - WRITE( LOUT, FMT = 9994 ) - RETURN - 9998 FORMAT( 1X, I4, ' - ', I4, ':', 1P, 10D12.3 ) - 9997 FORMAT( 1X, I4, ' - ', I4, ':', 1X, 1P, 8D14.5 ) - 9996 FORMAT( 1X, I4, ' - ', I4, ':', 1X, 1P, 6D18.9 ) - 9995 FORMAT( 1X, I4, ' - ', I4, ':', 1X, 1P, 5D24.13 ) - 9994 FORMAT( 1X, ' ' ) - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/icnteq.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/icnteq.f deleted file mode 100644 index dc345f9bad..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/icnteq.f +++ /dev/null @@ -1,18 +0,0 @@ -c -c----------------------------------------------------------------------- -c -c Count the number of elements equal to a specified integer value. -c - integer function icnteq (n, array, value) -c - integer n, value - integer array(*) -c - k = 0 - do 10 i = 1, n - if (array(i) .eq. value) k = k + 1 - 10 continue - icnteq = k -c - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/icopy.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/icopy.f deleted file mode 100644 index f9e8c11003..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/icopy.f +++ /dev/null @@ -1,77 +0,0 @@ -*-------------------------------------------------------------------- -*\Documentation -* -*\Name: ICOPY -* -*\Description: -* ICOPY copies an integer vector lx to an integer vector ly. -* -*\Usage: -* call icopy ( n, lx, inc, ly, incy ) -* -*\Arguments: -* n integer (input) -* On entry, n is the number of elements of lx to be -c copied to ly. -* -* lx integer array (input) -* On entry, lx is the integer vector to be copied. -* -* incx integer (input) -* On entry, incx is the increment between elements of lx. -* -* ly integer array (input) -* On exit, ly is the integer vector that contains the -* copy of lx. -* -* incy integer (input) -* On entry, incy is the increment between elements of ly. -* -*\Enddoc -* -*-------------------------------------------------------------------- -* - subroutine icopy( n, lx, incx, ly, incy ) -* -* ---------------------------- -* Specifications for arguments -* ---------------------------- - integer incx, incy, n - integer lx( 1 ), ly( 1 ) -* -* ---------------------------------- -* Specifications for local variables -* ---------------------------------- - integer i, ix, iy -* -* -------------------------- -* First executable statement -* -------------------------- - if( n.le.0 ) - $ return - if( incx.eq.1 .and. incy.eq.1 ) - $ go to 20 -c -c.....code for unequal increments or equal increments -c not equal to 1 - ix = 1 - iy = 1 - if( incx.lt.0 ) - $ ix = ( -n+1 )*incx + 1 - if( incy.lt.0 ) - $ iy = ( -n+1 )*incy + 1 - do 10 i = 1, n - ly( iy ) = lx( ix ) - ix = ix + incx - iy = iy + incy - 10 continue - return -c -c.....code for both increments equal to 1 -c - 20 continue - do 30 i = 1, n - ly( i ) = lx( i ) - 30 continue - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/iset.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/iset.f deleted file mode 100644 index cb690bc3e9..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/iset.f +++ /dev/null @@ -1,16 +0,0 @@ -c -c----------------------------------------------------------------------- -c -c Only work with increment equal to 1 right now. -c - subroutine iset (n, value, array, inc) -c - integer n, value, inc - integer array(*) -c - do 10 i = 1, n - array(i) = value - 10 continue -c - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/iswap.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/iswap.f deleted file mode 100644 index 088798d007..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/iswap.f +++ /dev/null @@ -1,55 +0,0 @@ - subroutine iswap (n,sx,incx,sy,incy) -c -c interchanges two vectors. -c uses unrolled loops for increments equal to 1. -c jack dongarra, linpack, 3/11/78. -c - integer sx(1),sy(1),stemp - integer i,incx,incy,ix,iy,m,mp1,n -c - if(n.le.0)return - if(incx.eq.1.and.incy.eq.1)go to 20 -c -c code for unequal increments or equal increments not equal -c to 1 -c - ix = 1 - iy = 1 - if(incx.lt.0)ix = (-n+1)*incx + 1 - if(incy.lt.0)iy = (-n+1)*incy + 1 - do 10 i = 1,n - stemp = sx(ix) - sx(ix) = sy(iy) - sy(iy) = stemp - ix = ix + incx - iy = iy + incy - 10 continue - return -c -c code for both increments equal to 1 -c -c -c clean-up loop -c - 20 m = mod(n,3) - if( m .eq. 0 ) go to 40 - do 30 i = 1,m - stemp = sx(i) - sx(i) = sy(i) - sy(i) = stemp - 30 continue - if( n .lt. 3 ) return - 40 mp1 = m + 1 - do 50 i = mp1,n,3 - stemp = sx(i) - sx(i) = sy(i) - sy(i) = stemp - stemp = sx(i + 1) - sx(i + 1) = sy(i + 1) - sy(i + 1) = stemp - stemp = sx(i + 2) - sx(i + 2) = sy(i + 2) - sy(i + 2) = stemp - 50 continue - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/ivout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/ivout.f deleted file mode 100644 index e97118a86b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/ivout.f +++ /dev/null @@ -1,120 +0,0 @@ -C----------------------------------------------------------------------- -C Routine: IVOUT -C -C Purpose: Integer vector output routine. -C -C Usage: CALL IVOUT (LOUT, N, IX, IDIGIT, IFMT) -C -C Arguments -C N - Length of array IX. (Input) -C IX - Integer array to be printed. (Input) -C IFMT - Format to be used in printing array IX. (Input) -C IDIGIT - Print up to ABS(IDIGIT) decimal digits / number. (Input) -C If IDIGIT .LT. 0, printing is done with 72 columns. -C If IDIGIT .GT. 0, printing is done with 132 columns. -C -C----------------------------------------------------------------------- -C - SUBROUTINE IVOUT (LOUT, N, IX, IDIGIT, IFMT) -C ... -C ... SPECIFICATIONS FOR ARGUMENTS - INTEGER IX(*), N, IDIGIT, LOUT - CHARACTER IFMT*(*) -C ... -C ... SPECIFICATIONS FOR LOCAL VARIABLES - INTEGER I, NDIGIT, K1, K2, LLL - CHARACTER*80 LINE -* ... -* ... SPECIFICATIONS INTRINSICS - INTRINSIC MIN -* -C - LLL = MIN ( LEN ( IFMT ), 80 ) - DO 1 I = 1, LLL - LINE(I:I) = '-' - 1 CONTINUE -C - DO 2 I = LLL+1, 80 - LINE(I:I) = ' ' - 2 CONTINUE -C - WRITE ( LOUT, 2000 ) IFMT, LINE(1:LLL) - 2000 FORMAT ( /1X, A /1X, A ) -C - IF (N .LE. 0) RETURN - NDIGIT = IDIGIT - IF (IDIGIT .EQ. 0) NDIGIT = 4 -C -C======================================================================= -C CODE FOR OUTPUT USING 72 COLUMNS FORMAT -C======================================================================= -C - IF (IDIGIT .LT. 0) THEN -C - NDIGIT = -IDIGIT - IF (NDIGIT .LE. 4) THEN - DO 10 K1 = 1, N, 10 - K2 = MIN0(N,K1+9) - WRITE(LOUT,1000) K1,K2,(IX(I),I=K1,K2) - 10 CONTINUE -C - ELSE IF (NDIGIT .LE. 6) THEN - DO 30 K1 = 1, N, 7 - K2 = MIN0(N,K1+6) - WRITE(LOUT,1001) K1,K2,(IX(I),I=K1,K2) - 30 CONTINUE -C - ELSE IF (NDIGIT .LE. 10) THEN - DO 50 K1 = 1, N, 5 - K2 = MIN0(N,K1+4) - WRITE(LOUT,1002) K1,K2,(IX(I),I=K1,K2) - 50 CONTINUE -C - ELSE - DO 70 K1 = 1, N, 3 - K2 = MIN0(N,K1+2) - WRITE(LOUT,1003) K1,K2,(IX(I),I=K1,K2) - 70 CONTINUE - END IF -C -C======================================================================= -C CODE FOR OUTPUT USING 132 COLUMNS FORMAT -C======================================================================= -C - ELSE -C - IF (NDIGIT .LE. 4) THEN - DO 90 K1 = 1, N, 20 - K2 = MIN0(N,K1+19) - WRITE(LOUT,1000) K1,K2,(IX(I),I=K1,K2) - 90 CONTINUE -C - ELSE IF (NDIGIT .LE. 6) THEN - DO 110 K1 = 1, N, 15 - K2 = MIN0(N,K1+14) - WRITE(LOUT,1001) K1,K2,(IX(I),I=K1,K2) - 110 CONTINUE -C - ELSE IF (NDIGIT .LE. 10) THEN - DO 130 K1 = 1, N, 10 - K2 = MIN0(N,K1+9) - WRITE(LOUT,1002) K1,K2,(IX(I),I=K1,K2) - 130 CONTINUE -C - ELSE - DO 150 K1 = 1, N, 7 - K2 = MIN0(N,K1+6) - WRITE(LOUT,1003) K1,K2,(IX(I),I=K1,K2) - 150 CONTINUE - END IF - END IF - WRITE (LOUT,1004) -C - 1000 FORMAT(1X,I4,' - ',I4,':',20(1X,I5)) - 1001 FORMAT(1X,I4,' - ',I4,':',15(1X,I7)) - 1002 FORMAT(1X,I4,' - ',I4,':',10(1X,I11)) - 1003 FORMAT(1X,I4,' - ',I4,':',7(1X,I15)) - 1004 FORMAT(1X,' ') -C - RETURN - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/second.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/second.f deleted file mode 100644 index f75394c831..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/second.f +++ /dev/null @@ -1,23 +0,0 @@ - SUBROUTINE SECOND( T ) -* - REAL T -* -* -- LAPACK auxiliary routine (preliminary version) -- -* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., -* Courant Institute, Argonne National Lab, and Rice University -* July 26, 1991 -* -* Purpose -* ======= -* -* SECOND returns the user time for a process in seconds. -* This version gets the time from the system function CPU_TIME. -* - - CALL CPU_TIME(T) - - RETURN -* -* End of SECOND -* - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/smout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/smout.f deleted file mode 100644 index 8d90bf2099..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/smout.f +++ /dev/null @@ -1,157 +0,0 @@ -*----------------------------------------------------------------------- -* Routine: SMOUT -* -* Purpose: Real matrix output routine. -* -* Usage: CALL SMOUT (LOUT, M, N, A, LDA, IDIGIT, IFMT) -* -* Arguments -* M - Number of rows of A. (Input) -* N - Number of columns of A. (Input) -* A - Real M by N matrix to be printed. (Input) -* LDA - Leading dimension of A exactly as specified in the -* dimension statement of the calling program. (Input) -* IFMT - Format to be used in printing matrix A. (Input) -* IDIGIT - Print up to IABS(IDIGIT) decimal digits per number. (In) -* If IDIGIT .LT. 0, printing is done with 72 columns. -* If IDIGIT .GT. 0, printing is done with 132 columns. -* -*----------------------------------------------------------------------- -* - SUBROUTINE SMOUT( LOUT, M, N, A, LDA, IDIGIT, IFMT ) -* ... -* ... SPECIFICATIONS FOR ARGUMENTS - INTEGER M, N, IDIGIT, LDA, LOUT - REAL A( LDA, * ) - CHARACTER IFMT*( * ) -* ... -* ... SPECIFICATIONS FOR LOCAL VARIABLES - INTEGER I, J, NDIGIT, K1, K2, LLL - CHARACTER*1 ICOL( 3 ) - CHARACTER*80 LINE -* ... -* ... SPECIFICATIONS INTRINSICS - INTRINSIC MIN -* - DATA ICOL( 1 ), ICOL( 2 ), ICOL( 3 ) / 'C', 'o', - $ 'l' / -* ... -* ... FIRST EXECUTABLE STATEMENT -* - LLL = MIN( LEN( IFMT ), 80 ) - DO 10 I = 1, LLL - LINE( I: I ) = '-' - 10 CONTINUE -* - DO 20 I = LLL + 1, 80 - LINE( I: I ) = ' ' - 20 CONTINUE -* - WRITE( LOUT, 9999 )IFMT, LINE( 1: LLL ) - 9999 FORMAT( / 1X, A / 1X, A ) -* - IF( M.LE.0 .OR. N.LE.0 .OR. LDA.LE.0 ) - $ RETURN - NDIGIT = IDIGIT - IF( IDIGIT.EQ.0 ) - $ NDIGIT = 4 -* -*======================================================================= -* CODE FOR OUTPUT USING 72 COLUMNS FORMAT -*======================================================================= -* - IF( IDIGIT.LT.0 ) THEN - NDIGIT = -IDIGIT - IF( NDIGIT.LE.4 ) THEN - DO 40 K1 = 1, N, 5 - K2 = MIN0( N, K1+4 ) - WRITE( LOUT, 9998 )( ICOL, I, I = K1, K2 ) - DO 30 I = 1, M - WRITE( LOUT, 9994 )I, ( A( I, J ), J = K1, K2 ) - 30 CONTINUE - 40 CONTINUE -* - ELSE IF( NDIGIT.LE.6 ) THEN - DO 60 K1 = 1, N, 4 - K2 = MIN0( N, K1+3 ) - WRITE( LOUT, 9997 )( ICOL, I, I = K1, K2 ) - DO 50 I = 1, M - WRITE( LOUT, 9993 )I, ( A( I, J ), J = K1, K2 ) - 50 CONTINUE - 60 CONTINUE -* - ELSE IF( NDIGIT.LE.10 ) THEN - DO 80 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - WRITE( LOUT, 9996 )( ICOL, I, I = K1, K2 ) - DO 70 I = 1, M - WRITE( LOUT, 9992 )I, ( A( I, J ), J = K1, K2 ) - 70 CONTINUE - 80 CONTINUE -* - ELSE - DO 100 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9995 )( ICOL, I, I = K1, K2 ) - DO 90 I = 1, M - WRITE( LOUT, 9991 )I, ( A( I, J ), J = K1, K2 ) - 90 CONTINUE - 100 CONTINUE - END IF -* -*======================================================================= -* CODE FOR OUTPUT USING 132 COLUMNS FORMAT -*======================================================================= -* - ELSE - IF( NDIGIT.LE.4 ) THEN - DO 120 K1 = 1, N, 10 - K2 = MIN0( N, K1+9 ) - WRITE( LOUT, 9998 )( ICOL, I, I = K1, K2 ) - DO 110 I = 1, M - WRITE( LOUT, 9994 )I, ( A( I, J ), J = K1, K2 ) - 110 CONTINUE - 120 CONTINUE -* - ELSE IF( NDIGIT.LE.6 ) THEN - DO 140 K1 = 1, N, 8 - K2 = MIN0( N, K1+7 ) - WRITE( LOUT, 9997 )( ICOL, I, I = K1, K2 ) - DO 130 I = 1, M - WRITE( LOUT, 9993 )I, ( A( I, J ), J = K1, K2 ) - 130 CONTINUE - 140 CONTINUE -* - ELSE IF( NDIGIT.LE.10 ) THEN - DO 160 K1 = 1, N, 6 - K2 = MIN0( N, K1+5 ) - WRITE( LOUT, 9996 )( ICOL, I, I = K1, K2 ) - DO 150 I = 1, M - WRITE( LOUT, 9992 )I, ( A( I, J ), J = K1, K2 ) - 150 CONTINUE - 160 CONTINUE -* - ELSE - DO 180 K1 = 1, N, 5 - K2 = MIN0( N, K1+4 ) - WRITE( LOUT, 9995 )( ICOL, I, I = K1, K2 ) - DO 170 I = 1, M - WRITE( LOUT, 9991 )I, ( A( I, J ), J = K1, K2 ) - 170 CONTINUE - 180 CONTINUE - END IF - END IF - WRITE( LOUT, 9990 ) -* - 9998 FORMAT( 10X, 10( 4X, 3A1, I4, 1X ) ) - 9997 FORMAT( 10X, 8( 5X, 3A1, I4, 2X ) ) - 9996 FORMAT( 10X, 6( 7X, 3A1, I4, 4X ) ) - 9995 FORMAT( 10X, 5( 9X, 3A1, I4, 6X ) ) - 9994 FORMAT( 1X, ' Row', I4, ':', 1X, 1P10E12.3 ) - 9993 FORMAT( 1X, ' Row', I4, ':', 1X, 1P8E14.5 ) - 9992 FORMAT( 1X, ' Row', I4, ':', 1X, 1P6E18.9 ) - 9991 FORMAT( 1X, ' Row', I4, ':', 1X, 1P5E22.13 ) - 9990 FORMAT( 1X, ' ' ) -* - RETURN - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/svout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/svout.f deleted file mode 100644 index 4363b924b2..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/svout.f +++ /dev/null @@ -1,112 +0,0 @@ -*----------------------------------------------------------------------- -* Routine: SVOUT -* -* Purpose: Real vector output routine. -* -* Usage: CALL SVOUT (LOUT, N, SX, IDIGIT, IFMT) -* -* Arguments -* N - Length of array SX. (Input) -* SX - Real array to be printed. (Input) -* IFMT - Format to be used in printing array SX. (Input) -* IDIGIT - Print up to IABS(IDIGIT) decimal digits per number. (In) -* If IDIGIT .LT. 0, printing is done with 72 columns. -* If IDIGIT .GT. 0, printing is done with 132 columns. -* -*----------------------------------------------------------------------- -* - SUBROUTINE SVOUT( LOUT, N, SX, IDIGIT, IFMT ) -* ... -* ... SPECIFICATIONS FOR ARGUMENTS - INTEGER N, IDIGIT, LOUT - REAL SX( * ) - CHARACTER IFMT*( * ) -* ... -* ... SPECIFICATIONS FOR LOCAL VARIABLES - INTEGER I, NDIGIT, K1, K2, LLL - CHARACTER*80 LINE -* ... -* ... FIRST EXECUTABLE STATEMENT -* -* - LLL = MIN( LEN( IFMT ), 80 ) - DO 10 I = 1, LLL - LINE( I: I ) = '-' - 10 CONTINUE -* - DO 20 I = LLL + 1, 80 - LINE( I: I ) = ' ' - 20 CONTINUE -* - WRITE( LOUT, 9999 )IFMT, LINE( 1: LLL ) - 9999 FORMAT( / 1X, A / 1X, A ) -* - IF( N.LE.0 ) - $ RETURN - NDIGIT = IDIGIT - IF( IDIGIT.EQ.0 ) - $ NDIGIT = 4 -* -*======================================================================= -* CODE FOR OUTPUT USING 72 COLUMNS FORMAT -*======================================================================= -* - IF( IDIGIT.LT.0 ) THEN - NDIGIT = -IDIGIT - IF( NDIGIT.LE.4 ) THEN - DO 30 K1 = 1, N, 5 - K2 = MIN0( N, K1+4 ) - WRITE( LOUT, 9998 )K1, K2, ( SX( I ), I = K1, K2 ) - 30 CONTINUE - ELSE IF( NDIGIT.LE.6 ) THEN - DO 40 K1 = 1, N, 4 - K2 = MIN0( N, K1+3 ) - WRITE( LOUT, 9997 )K1, K2, ( SX( I ), I = K1, K2 ) - 40 CONTINUE - ELSE IF( NDIGIT.LE.10 ) THEN - DO 50 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - WRITE( LOUT, 9996 )K1, K2, ( SX( I ), I = K1, K2 ) - 50 CONTINUE - ELSE - DO 60 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9995 )K1, K2, ( SX( I ), I = K1, K2 ) - 60 CONTINUE - END IF -* -*======================================================================= -* CODE FOR OUTPUT USING 132 COLUMNS FORMAT -*======================================================================= -* - ELSE - IF( NDIGIT.LE.4 ) THEN - DO 70 K1 = 1, N, 10 - K2 = MIN0( N, K1+9 ) - WRITE( LOUT, 9998 )K1, K2, ( SX( I ), I = K1, K2 ) - 70 CONTINUE - ELSE IF( NDIGIT.LE.6 ) THEN - DO 80 K1 = 1, N, 8 - K2 = MIN0( N, K1+7 ) - WRITE( LOUT, 9997 )K1, K2, ( SX( I ), I = K1, K2 ) - 80 CONTINUE - ELSE IF( NDIGIT.LE.10 ) THEN - DO 90 K1 = 1, N, 6 - K2 = MIN0( N, K1+5 ) - WRITE( LOUT, 9996 )K1, K2, ( SX( I ), I = K1, K2 ) - 90 CONTINUE - ELSE - DO 100 K1 = 1, N, 5 - K2 = MIN0( N, K1+4 ) - WRITE( LOUT, 9995 )K1, K2, ( SX( I ), I = K1, K2 ) - 100 CONTINUE - END IF - END IF - WRITE( LOUT, 9994 ) - RETURN - 9998 FORMAT( 1X, I4, ' - ', I4, ':', 1P10E12.3 ) - 9997 FORMAT( 1X, I4, ' - ', I4, ':', 1X, 1P8E14.5 ) - 9996 FORMAT( 1X, I4, ' - ', I4, ':', 1X, 1P6E18.9 ) - 9995 FORMAT( 1X, I4, ' - ', I4, ':', 1X, 1P5E24.13 ) - 9994 FORMAT( 1X, ' ' ) - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/zmout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/zmout.f deleted file mode 100644 index 9877aa8336..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/zmout.f +++ /dev/null @@ -1,250 +0,0 @@ -* -* Routine: ZMOUT -* -* Purpose: Complex*16 matrix output routine. -* -* Usage: CALL ZMOUT (LOUT, M, N, A, LDA, IDIGIT, IFMT) -* -* Arguments -* M - Number of rows of A. (Input) -* N - Number of columns of A. (Input) -* A - Complex*16 M by N matrix to be printed. (Input) -* LDA - Leading dimension of A exactly as specified in the -* dimension statement of the calling program. (Input) -* IFMT - Format to be used in printing matrix A. (Input) -* IDIGIT - Print up to IABS(IDIGIT) decimal digits per number. (In) -* If IDIGIT .LT. 0, printing is done with 72 columns. -* If IDIGIT .GT. 0, printing is done with 132 columns. -* -*\SCCS Information: @(#) -* FILE: zmout.f SID: 2.1 DATE OF SID: 11/16/95 RELEASE: 2 -* -*----------------------------------------------------------------------- -* - SUBROUTINE ZMOUT( LOUT, M, N, A, LDA, IDIGIT, IFMT ) -* ... -* ... SPECIFICATIONS FOR ARGUMENTS - INTEGER M, N, IDIGIT, LDA, LOUT - Complex*16 - & A( LDA, * ) - CHARACTER IFMT*( * ) -* ... -* ... SPECIFICATIONS FOR LOCAL VARIABLES - INTEGER I, J, NDIGIT, K1, K2, LLL - CHARACTER*1 ICOL( 3 ) - CHARACTER*80 LINE -* ... -* ... SPECIFICATIONS INTRINSICS - INTRINSIC MIN -* - DATA ICOL( 1 ), ICOL( 2 ), ICOL( 3 ) / 'C', 'o', - $ 'l' / -* ... -* ... FIRST EXECUTABLE STATEMENT -* - LLL = MIN( LEN( IFMT ), 80 ) - DO 10 I = 1, LLL - LINE( I: I ) = '-' - 10 CONTINUE -* - DO 20 I = LLL + 1, 80 - LINE( I: I ) = ' ' - 20 CONTINUE -* - WRITE( LOUT, 9999 )IFMT, LINE( 1: LLL ) - 9999 FORMAT( / 1X, A / 1X, A ) -* - IF( M.LE.0 .OR. N.LE.0 .OR. LDA.LE.0 ) - $ RETURN - NDIGIT = IDIGIT - IF( IDIGIT.EQ.0 ) - $ NDIGIT = 4 -* -*======================================================================= -* CODE FOR OUTPUT USING 72 COLUMNS FORMAT -*======================================================================= -* - IF( IDIGIT.LT.0 ) THEN - NDIGIT = -IDIGIT - IF( NDIGIT.LE.4 ) THEN - DO 40 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9998 )( ICOL, I, I = K1, K2 ) - DO 30 I = 1, M - IF (K1.NE.N) THEN - WRITE( LOUT, 9994 )I, ( A( I, J ), J = K1, K2 ) - ELSE - WRITE( LOUT, 9984 )I, ( A( I, J ), J = K1, K2 ) - END IF - 30 CONTINUE - 40 CONTINUE -* - ELSE IF( NDIGIT.LE.6 ) THEN - DO 60 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9997 )( ICOL, I, I = K1, K2 ) - DO 50 I = 1, M - IF (K1.NE.N) THEN - WRITE( LOUT, 9993 )I, ( A( I, J ), J = K1, K2 ) - ELSE - WRITE( LOUT, 9983 )I, ( A( I, J ), J = K1, K2 ) - END IF - 50 CONTINUE - 60 CONTINUE -* - ELSE IF( NDIGIT.LE.8 ) THEN - DO 80 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9996 )( ICOL, I, I = K1, K2 ) - DO 70 I = 1, M - IF (K1.NE.N) THEN - WRITE( LOUT, 9992 )I, ( A( I, J ), J = K1, K2 ) - ELSE - WRITE( LOUT, 9982 )I, ( A( I, J ), J = K1, K2 ) - END IF - 70 CONTINUE - 80 CONTINUE -* - ELSE - DO 100 K1 = 1, N - WRITE( LOUT, 9995 ) ICOL, K1 - DO 90 I = 1, M - WRITE( LOUT, 9991 )I, A( I, K1 ) - 90 CONTINUE - 100 CONTINUE - END IF -* -*======================================================================= -* CODE FOR OUTPUT USING 132 COLUMNS FORMAT -*======================================================================= -* - ELSE - IF( NDIGIT.LE.4 ) THEN - DO 120 K1 = 1, N, 4 - K2 = MIN0( N, K1+3 ) - WRITE( LOUT, 9998 )( ICOL, I, I = K1, K2 ) - DO 110 I = 1, M - IF ((K1+3).LE.N) THEN - WRITE( LOUT, 9974 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+3-N).EQ.1) THEN - WRITE( LOUT, 9964 )I, ( A( I, J ), J = k1, K2 ) - ELSE IF ((K1+3-N).EQ.2) THEN - WRITE( LOUT, 9954 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+3-N).EQ.3) THEN - WRITE( LOUT, 9944 )I, ( A( I, J ), J = K1, K2 ) - END IF - 110 CONTINUE - 120 CONTINUE -* - ELSE IF( NDIGIT.LE.6 ) THEN - DO 140 K1 = 1, N, 3 - K2 = MIN0( N, K1+ 2) - WRITE( LOUT, 9997 )( ICOL, I, I = K1, K2 ) - DO 130 I = 1, M - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9973 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+2-N).EQ.1) THEN - WRITE( LOUT, 9963 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+2-N).EQ.2) THEN - WRITE( LOUT, 9953 )I, ( A( I, J ), J = K1, K2 ) - END IF - 130 CONTINUE - 140 CONTINUE -* - ELSE IF( NDIGIT.LE.8 ) THEN - DO 160 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - WRITE( LOUT, 9996 )( ICOL, I, I = K1, K2 ) - DO 150 I = 1, M - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9972 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+2-N).EQ.1) THEN - WRITE( LOUT, 9962 )I, ( A( I, J ), J = K1, K2 ) - ELSE IF ((K1+2-N).EQ.2) THEN - WRITE( LOUT, 9952 )I, ( A( I, J ), J = K1, K2 ) - END IF - 150 CONTINUE - 160 CONTINUE -* - ELSE - DO 180 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - WRITE( LOUT, 9995 )( ICOL, I, I = K1, K2 ) - DO 170 I = 1, M - IF ((K1+1).LE.N) THEN - WRITE( LOUT, 9971 )I, ( A( I, J ), J = K1, K2 ) - ELSE - WRITE( LOUT, 9961 )I, ( A( I, J ), J = K1, K2 ) - END IF - 170 CONTINUE - 180 CONTINUE - END IF - END IF - WRITE( LOUT, 9990 ) -* - 9998 FORMAT( 11X, 4( 9X, 3A1, I4, 9X ) ) - 9997 FORMAT( 10X, 4( 11X, 3A1, I4, 11X ) ) - 9996 FORMAT( 10X, 3( 13X, 3A1, I4, 13X ) ) - 9995 FORMAT( 12X, 2( 18x, 3A1, I4, 18X ) ) -* -*======================================================== -* FORMAT FOR 72 COLUMN -*======================================================== -* -* DISPLAY 4 SIGNIFICANT DIGITS -* - 9994 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',D10.3,',',D10.3,') ') ) - 9984 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',D10.3,',',D10.3,') ') ) -* -* DISPLAY 6 SIGNIFICANT DIGITS -* - 9993 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',D12.5,',',D12.5,') ') ) - 9983 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',D12.5,',',D12.5,') ') ) -* -* DISPLAY 8 SIGNIFICANT DIGITS -* - 9992 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',D14.7,',',D14.7,') ') ) - 9982 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',D14.7,',',D14.7,') ') ) -* -* DISPLAY 13 SIGNIFICANT DIGITS -* - 9991 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',D20.13,',',D20.13,')') ) - 9990 FORMAT( 1X, ' ' ) -* -* -*======================================================== -* FORMAT FOR 132 COLUMN -*======================================================== -* -* DISPLAY 4 SIGNIFICANT DIGIT -* - 9974 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,4('(',D10.3,',',D10.3,') ') ) - 9964 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,3('(',D10.3,',',D10.3,') ') ) - 9954 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',D10.3,',',D10.3,') ') ) - 9944 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',D10.3,',',D10.3,') ') ) -* -* DISPLAY 6 SIGNIFICANT DIGIT -* - 9973 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,3('(',D12.5,',',D12.5,') ') ) - 9963 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',D12.5,',',D12.5,') ') ) - 9953 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',D12.5,',',D12.5,') ') ) -* -* DISPLAY 8 SIGNIFICANT DIGIT -* - 9972 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,3('(',D14.7,',',D14.7,') ') ) - 9962 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',D14.7,',',D14.7,') ') ) - 9952 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',D14.7,',',D14.7,') ') ) -* -* DISPLAY 13 SIGNIFICANT DIGIT -* - 9971 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,2('(',D20.13,',',D20.13, - & ') ')) - 9961 FORMAT( 1X, ' Row', I4, ':', 1X, 1P,1('(',D20.13,',',D20.13, - & ') ')) - -* -* -* -* - RETURN - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/zvout.f b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/zvout.f deleted file mode 100644 index ac7e6f9fcd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/ARPACK/UTIL/zvout.f +++ /dev/null @@ -1,240 +0,0 @@ -c----------------------------------------------------------------------- -c -c\SCCS Information: @(#) -c FILE: zvout.f SID: 2.1 DATE OF SID: 11/16/95 RELEASE: 2 -c -*----------------------------------------------------------------------- -* Routine: ZVOUT -* -* Purpose: Complex*16 vector output routine. -* -* Usage: CALL ZVOUT (LOUT, N, CX, IDIGIT, IFMT) -* -* Arguments -* N - Length of array CX. (Input) -* CX - Complex*16 array to be printed. (Input) -* IFMT - Format to be used in printing array CX. (Input) -* IDIGIT - Print up to IABS(IDIGIT) decimal digits per number. (In) -* If IDIGIT .LT. 0, printing is done with 72 columns. -* If IDIGIT .GT. 0, printing is done with 132 columns. -* -*----------------------------------------------------------------------- -* - SUBROUTINE ZVOUT( LOUT, N, CX, IDIGIT, IFMT ) -* ... -* ... SPECIFICATIONS FOR ARGUMENTS - INTEGER N, IDIGIT, LOUT - Complex*16 - & CX( * ) - CHARACTER IFMT*( * ) -* ... -* ... SPECIFICATIONS FOR LOCAL VARIABLES - INTEGER I, NDIGIT, K1, K2, LLL - CHARACTER*80 LINE -* ... -* ... FIRST EXECUTABLE STATEMENT -* -* - LLL = MIN( LEN( IFMT ), 80 ) - DO 10 I = 1, LLL - LINE( I: I ) = '-' - 10 CONTINUE -* - DO 20 I = LLL + 1, 80 - LINE( I: I ) = ' ' - 20 CONTINUE -* - WRITE( LOUT, 9999 )IFMT, LINE( 1: LLL ) - 9999 FORMAT( / 1X, A / 1X, A ) -* - IF( N.LE.0 ) - $ RETURN - NDIGIT = IDIGIT - IF( IDIGIT.EQ.0 ) - $ NDIGIT = 4 -* -*======================================================================= -* CODE FOR OUTPUT USING 72 COLUMNS FORMAT -*======================================================================= -* - IF( IDIGIT.LT.0 ) THEN - NDIGIT = -IDIGIT - IF( NDIGIT.LE.4 ) THEN - DO 30 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - IF (K1.NE.N) THEN - WRITE( LOUT, 9998 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE - WRITE( LOUT, 9997 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 30 CONTINUE - ELSE IF( NDIGIT.LE.6 ) THEN - DO 40 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - IF (K1.NE.N) THEN - WRITE( LOUT, 9988 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE - WRITE( LOUT, 9987 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 40 CONTINUE - ELSE IF( NDIGIT.LE.8 ) THEN - DO 50 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - IF (K1.NE.N) THEN - WRITE( LOUT, 9978 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE - WRITE( LOUT, 9977 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 50 CONTINUE - ELSE - DO 60 K1 = 1, N - WRITE( LOUT, 9968 )K1, K1, CX( I ) - 60 CONTINUE - END IF -* -*======================================================================= -* CODE FOR OUTPUT USING 132 COLUMNS FORMAT -*======================================================================= -* - ELSE - IF( NDIGIT.LE.4 ) THEN - DO 70 K1 = 1, N, 4 - K2 = MIN0( N, K1+3 ) - IF ((K1+3).LE.N) THEN - WRITE( LOUT, 9958 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+3-N) .EQ. 1) THEN - WRITE( LOUT, 9957 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+3-N) .EQ. 2) THEN - WRITE( LOUT, 9956 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+3-N) .EQ. 1) THEN - WRITE( LOUT, 9955 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 70 CONTINUE - ELSE IF( NDIGIT.LE.6 ) THEN - DO 80 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9948 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 1) THEN - WRITE( LOUT, 9947 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 2) THEN - WRITE( LOUT, 9946 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 80 CONTINUE - ELSE IF( NDIGIT.LE.8 ) THEN - DO 90 K1 = 1, N, 3 - K2 = MIN0( N, K1+2 ) - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9938 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 1) THEN - WRITE( LOUT, 9937 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 2) THEN - WRITE( LOUT, 9936 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 90 CONTINUE - ELSE - DO 100 K1 = 1, N, 2 - K2 = MIN0( N, K1+1 ) - IF ((K1+2).LE.N) THEN - WRITE( LOUT, 9928 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - ELSE IF ((K1+2-N) .EQ. 1) THEN - WRITE( LOUT, 9927 )K1, K2, ( CX( I ), - $ I = K1, K2 ) - END IF - 100 CONTINUE - END IF - END IF - WRITE( LOUT, 9994 ) - RETURN -* -*======================================================================= -* FORMAT FOR 72 COLUMNS -*======================================================================= -* -* DISPLAY 4 SIGNIFICANT DIGITS -* - 9998 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',D10.3,',',D10.3,') ') ) - 9997 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',D10.3,',',D10.3,') ') ) -* -* DISPLAY 6 SIGNIFICANT DIGITS -* - 9988 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',D12.5,',',D12.5,') ') ) - 9987 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',D12.5,',',D12.5,') ') ) -* -* DISPLAY 8 SIGNIFICANT DIGITS -* - 9978 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',D14.7,',',D14.7,') ') ) - 9977 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',D14.7,',',D14.7,') ') ) -* -* DISPLAY 13 SIGNIFICANT DIGITS -* - 9968 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',D20.13,',',D20.13,') ') ) -* -*========================================================================= -* FORMAT FOR 132 COLUMNS -*========================================================================= -* -* DISPLAY 4 SIGNIFICANT DIGITS -* - 9958 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,4('(',D10.3,',',D10.3,') ') ) - 9957 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,3('(',D10.3,',',D10.3,') ') ) - 9956 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',D10.3,',',D10.3,') ') ) - 9955 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',D10.3,',',D10.3,') ') ) -* -* DISPLAY 6 SIGNIFICANT DIGITS -* - 9948 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,3('(',D12.5,',',D12.5,') ') ) - 9947 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',D12.5,',',D12.5,') ') ) - 9946 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',D12.5,',',D12.5,') ') ) -* -* DISPLAY 8 SIGNIFICANT DIGITS -* - 9938 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,3('(',D14.7,',',D14.7,') ') ) - 9937 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',D14.7,',',D14.7,') ') ) - 9936 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',D14.7,',',D14.7,') ') ) -* -* DISPLAY 13 SIGNIFICANT DIGITS -* - 9928 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,2('(',D20.13,',',D20.13,') ') ) - 9927 FORMAT( 1X, I4, ' - ', I4, ':', 1X, - $ 1P,1('(',D20.13,',',D20.13,') ') ) -* -* -* - 9994 FORMAT( 1X, ' ' ) - END diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/SConscript b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/SConscript deleted file mode 100644 index e868cf3a0f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/SConscript +++ /dev/null @@ -1,65 +0,0 @@ -# vim:syntax=python -from os.path import join as pjoin - -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK, CheckF77Clib -from numscons import write_info, IsAccelerate, IsVeclib - -env = GetNumpyEnvironment(ARGUMENTS) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK, - 'CheckF77Clib' : CheckF77Clib}) - -env.Tool('f2py') -#----------------- -# Checking Lapack -#----------------- -st = config.CheckF77Clib() -st = config.CheckLAPACK(autoadd = 1) -if not st: - raise RuntimeError("no lapack found, necessary for arpack module") - -use_c_calling = IsAccelerate(env, "lapack") or IsVeclib(env, "lapack") -config.Finish() -write_info(env) - -# Build arpack -arpack_src = [pjoin("ARPACK", "SRC", s) for s in [ "cgetv0.f", "cnaitr.f", -"cnapps.f", "cnaup2.f", "cnaupd.f", "cneigh.f", "cneupd.f", "cngets.f", -"csortc.f", "cstatn.f", "dgetv0.f", "dlaqrb.f", "dnaitr.f", "dnapps.f", -"dnaup2.f", "dnaupd.f", "dnconv.f", "dneigh.f", "dneupd.f", "dngets.f", -"dsaitr.f", "dsapps.f", "dsaup2.f", "dsaupd.f", "dsconv.f", "dseigt.f", -"dsesrt.f", "dseupd.f", "dsgets.f", "dsortc.f", "dsortr.f", "dstatn.f", -"dstats.f", "dstqrb.f", "sgetv0.f", "slaqrb.f", "snaitr.f", "snapps.f", -"snaup2.f", "snaupd.f", "snconv.f", "sneigh.f", "sneupd.f", "sngets.f", -"ssaitr.f", "ssapps.f", "ssaup2.f", "ssaupd.f", "ssconv.f", "sseigt.f", -"ssesrt.f", "sseupd.f", "ssgets.f", "ssortc.f", "ssortr.f", "sstatn.f", -"sstats.f", "sstqrb.f", "zgetv0.f", "znaitr.f", "znapps.f", "znaup2.f", -"znaupd.f", "zneigh.f", "zneupd.f", "zngets.f", "zsortc.f", "zstatn.f"]] - -arpack_src += [pjoin('ARPACK', 'UTIL', s) for s in [ "cmout.f", "cvout.f", -"dmout.f", "dvout.f", "icnteq.f", "icopy.f", "iset.f", "iswap.f", "ivout.f", -"second.f", "smout.f", "svout.f", "zmout.f", "zvout.f"]] - -if use_c_calling: - arpack_src += [pjoin('ARPACK', 'FWRAPPERS', 'veclib_cabi_f.f'), - pjoin('ARPACK', 'FWRAPPERS', 'veclib_cabi_c.c')] -else: - arpack_src += [pjoin('ARPACK', 'FWRAPPERS', 'dummy.f')] -arpack_src += [pjoin('ARPACK', 'LAPACK', s) for s in [ "clahqr.f", "dlahqr.f", -"slahqr.f", "zlahqr.f"]] - -src = [str(s) for s in arpack_src] - -env.AppendUnique(CPPPATH = [pjoin('ARPACK', 'SRC')]) -env.AppendUnique(F77PATH = [pjoin('ARPACK', 'SRC')]) -env.AppendUnique(LIBPATH = ['.']) -arpack_lib = env.DistutilsStaticExtLibrary('arpack', source = src) - -# Build _arpack extension -env.FromFTemplate('arpack.pyf', 'arpack.pyf.src') -env.Prepend(LIBS = 'arpack') -env.NumpyPythonExtension('_arpack', 'arpack.pyf') diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/SConstruct b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/__init__.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/__init__.py deleted file mode 100644 index fdfa2eaf1e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from info import __doc__ -from arpack import * diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/arpack.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/arpack.py deleted file mode 100644 index 8e588ee6d5..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/arpack.py +++ /dev/null @@ -1,1608 +0,0 @@ -""" -Find a few eigenvectors and eigenvalues of a matrix. - - -Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/ - -""" -# Wrapper implementation notes -# -# ARPACK Entry Points -# ------------------- -# The entry points to ARPACK are -# - (s,d)seupd : single and double precision symmetric matrix -# - (s,d,c,z)neupd: single,double,complex,double complex general matrix -# This wrapper puts the *neupd (general matrix) interfaces in eigs() -# and the *seupd (symmetric matrix) in eigsh(). -# There is no Hermetian complex/double complex interface. -# To find eigenvalues of a Hermetian matrix you -# must use eigs() and not eigsh() -# It might be desirable to handle the Hermetian case differently -# and, for example, return real eigenvalues. - -# Number of eigenvalues returned and complex eigenvalues -# ------------------------------------------------------ -# The ARPACK nonsymmetric real and double interface (s,d)naupd return -# eigenvalues and eigenvectors in real (float,double) arrays. -# Since the eigenvalues and eigenvectors are, in general, complex -# ARPACK puts the real and imaginary parts in consecutive entries -# in real-valued arrays. This wrapper puts the real entries -# into complex data types and attempts to return the requested eigenvalues -# and eigenvectors. - - -# Solver modes -# ------------ -# ARPACK and handle shifted and shift-inverse computations -# for eigenvalues by providing a shift (sigma) and a solver. - -__docformat__ = "restructuredtext en" - -__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence'] - -import sys -import warnings - -import _arpack -import numpy as np -from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator -from scipy.sparse import identity, csc_matrix, csr_matrix, \ - isspmatrix, isspmatrix_csr -from scipy.linalg import lu_factor, lu_solve -from scipy.sparse.sputils import isdense -from scipy.sparse.linalg import gmres, splu -from scipy.linalg.lapack import get_lapack_funcs - - -def _single_precision_cast(typechar): - # This check is required, for now, because we have unresolved crashes - # occurring in single precision Veclib routines, on at least 64-bit OSX - # and some Linux systems. When these crashes are resolved, this - # restriction can be removed. - if typechar in ('f', 'F'): - warnings.warn("Single-precision types in `eigs` and `eighs` " - "are not supported currently. " - "Double precision routines are used instead.") - return {'f': 'd', 'F': 'D'}[typechar] - - return typechar - - -_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'} -_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12} - -DNAUPD_ERRORS = { - 0: "Normal exit.", - 1: "Maximum number of iterations taken. " - "All possible eigenvalues of OP has been found. IPARAM(5) " - "returns the number of wanted converged Ritz values.", - 2: "No longer an informational error. Deprecated starting " - "with release 2 of ARPACK.", - 3: "No shifts could be applied during a cycle of the " - "Implicitly restarted Arnoldi iteration. One possibility " - "is to increase the size of NCV relative to NEV. ", - -1: "N must be positive.", - -2: "NEV must be positive.", - -3: "NCV-NEV >= 2 and less than or equal to N.", - -4: "The maximum number of Arnoldi update iterations allowed " - "must be greater than zero.", - -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", - -6: "BMAT must be one of 'I' or 'G'.", - -7: "Length of private work array WORKL is not sufficient.", - -8: "Error return from LAPACK eigenvalue calculation;", - -9: "Starting vector is zero.", - -10: "IPARAM(7) must be 1,2,3,4.", - -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatable.", - -12: "IPARAM(1) must be equal to 0 or 1.", - -13: "NEV and WHICH = 'BE' are incompatable.", - -9999: "Could not build an Arnoldi factorization. " - "IPARAM(5) returns the size of the current Arnoldi " - "factorization. The user is advised to check that " - "enough workspace and array storage has been allocated." -} - -SNAUPD_ERRORS = DNAUPD_ERRORS - -ZNAUPD_ERRORS = DNAUPD_ERRORS.copy() -ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3." - -CNAUPD_ERRORS = ZNAUPD_ERRORS - -DSAUPD_ERRORS = { - 0: "Normal exit.", - 1: "Maximum number of iterations taken. " - "All possible eigenvalues of OP has been found.", - 2: "No longer an informational error. Deprecated starting with " - "release 2 of ARPACK.", - 3: "No shifts could be applied during a cycle of the Implicitly " - "restarted Arnoldi iteration. One possibility is to increase " - "the size of NCV relative to NEV. ", - -1: "N must be positive.", - -2: "NEV must be positive.", - -3: "NCV must be greater than NEV and less than or equal to N.", - -4: "The maximum number of Arnoldi update iterations allowed " - "must be greater than zero.", - -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", - -6: "BMAT must be one of 'I' or 'G'.", - -7: "Length of private work array WORKL is not sufficient.", - -8: "Error return from trid. eigenvalue calculation; " - "Informational error from LAPACK routine dsteqr .", - -9: "Starting vector is zero.", - -10: "IPARAM(7) must be 1,2,3,4,5.", - -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatable.", - -12: "IPARAM(1) must be equal to 0 or 1.", - -13: "NEV and WHICH = 'BE' are incompatable. ", - -9999: "Could not build an Arnoldi factorization. " - "IPARAM(5) returns the size of the current Arnoldi " - "factorization. The user is advised to check that " - "enough workspace and array storage has been allocated.", -} - -SSAUPD_ERRORS = DSAUPD_ERRORS - -DNEUPD_ERRORS = { - 0: "Normal exit.", - 1: "The Schur form computed by LAPACK routine dlahqr " - "could not be reordered by LAPACK routine dtrsen. " - "Re-enter subroutine dneupd with IPARAM(5)NCV and " - "increase the size of the arrays DR and DI to have " - "dimension at least dimension NCV and allocate at least NCV " - "columns for Z. NOTE: Not necessary if Z and V share " - "the same space. Please notify the authors if this error" - "occurs.", - -1: "N must be positive.", - -2: "NEV must be positive.", - -3: "NCV-NEV >= 2 and less than or equal to N.", - -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", - -6: "BMAT must be one of 'I' or 'G'.", - -7: "Length of private work WORKL array is not sufficient.", - -8: "Error return from calculation of a real Schur form. " - "Informational error from LAPACK routine dlahqr .", - -9: "Error return from calculation of eigenvectors. " - "Informational error from LAPACK routine dtrevc.", - -10: "IPARAM(7) must be 1,2,3,4.", - -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", - -12: "HOWMNY = 'S' not yet implemented", - -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", - -14: "DNAUPD did not find any eigenvalues to sufficient " - "accuracy.", - -15: "DNEUPD got a different count of the number of converged " - "Ritz values than DNAUPD got. This indicates the user " - "probably made an error in passing data from DNAUPD to " - "DNEUPD or that the data was modified before entering " - "DNEUPD", -} - -SNEUPD_ERRORS = DNEUPD_ERRORS.copy() -SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr " - "could not be reordered by LAPACK routine strsen . " - "Re-enter subroutine dneupd with IPARAM(5)=NCV and " - "increase the size of the arrays DR and DI to have " - "dimension at least dimension NCV and allocate at least " - "NCV columns for Z. NOTE: Not necessary if Z and V share " - "the same space. Please notify the authors if this error " - "occurs.") -SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient " - "accuracy.") -SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of " - "converged Ritz values than SNAUPD got. This indicates " - "the user probably made an error in passing data from " - "SNAUPD to SNEUPD or that the data was modified before " - "entering SNEUPD") - -ZNEUPD_ERRORS = {0: "Normal exit.", - 1: "The Schur form computed by LAPACK routine csheqr " - "could not be reordered by LAPACK routine ztrsen. " - "Re-enter subroutine zneupd with IPARAM(5)=NCV and " - "increase the size of the array D to have " - "dimension at least dimension NCV and allocate at least " - "NCV columns for Z. NOTE: Not necessary if Z and V share " - "the same space. Please notify the authors if this error " - "occurs.", - -1: "N must be positive.", - -2: "NEV must be positive.", - -3: "NCV-NEV >= 1 and less than or equal to N.", - -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", - -6: "BMAT must be one of 'I' or 'G'.", - -7: "Length of private work WORKL array is not sufficient.", - -8: "Error return from LAPACK eigenvalue calculation. " - "This should never happened.", - -9: "Error return from calculation of eigenvectors. " - "Informational error from LAPACK routine ztrevc.", - -10: "IPARAM(7) must be 1,2,3", - -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", - -12: "HOWMNY = 'S' not yet implemented", - -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", - -14: "ZNAUPD did not find any eigenvalues to sufficient " - "accuracy.", - -15: "ZNEUPD got a different count of the number of " - "converged Ritz values than ZNAUPD got. This " - "indicates the user probably made an error in passing " - "data from ZNAUPD to ZNEUPD or that the data was " - "modified before entering ZNEUPD" -} - -CNEUPD_ERRORS = ZNEUPD_ERRORS.copy() -CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient " - "accuracy.") -CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of " - "converged Ritz values than CNAUPD got. This indicates " - "the user probably made an error in passing data from " - "CNAUPD to CNEUPD or that the data was modified before " - "entering CNEUPD") - -DSEUPD_ERRORS = { - 0: "Normal exit.", - -1: "N must be positive.", - -2: "NEV must be positive.", - -3: "NCV must be greater than NEV and less than or equal to N.", - -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", - -6: "BMAT must be one of 'I' or 'G'.", - -7: "Length of private work WORKL array is not sufficient.", - -8: ("Error return from trid. eigenvalue calculation; " - "Information error from LAPACK routine dsteqr."), - -9: "Starting vector is zero.", - -10: "IPARAM(7) must be 1,2,3,4,5.", - -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", - -12: "NEV and WHICH = 'BE' are incompatible.", - -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.", - -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.", - -16: "HOWMNY = 'S' not yet implemented", - -17: ("DSEUPD got a different count of the number of converged " - "Ritz values than DSAUPD got. This indicates the user " - "probably made an error in passing data from DSAUPD to " - "DSEUPD or that the data was modified before entering " - "DSEUPD.") -} - -SSEUPD_ERRORS = DSEUPD_ERRORS.copy() -SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues " - "to sufficient accuracy.") -SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of " - "converged " - "Ritz values than SSAUPD got. This indicates the user " - "probably made an error in passing data from SSAUPD to " - "SSEUPD or that the data was modified before entering " - "SSEUPD.") - -_SAUPD_ERRORS = {'d': DSAUPD_ERRORS, - 's': SSAUPD_ERRORS} -_NAUPD_ERRORS = {'d': DNAUPD_ERRORS, - 's': SNAUPD_ERRORS, - 'z': ZNAUPD_ERRORS, - 'c': CNAUPD_ERRORS} -_SEUPD_ERRORS = {'d': DSEUPD_ERRORS, - 's': SSEUPD_ERRORS} -_NEUPD_ERRORS = {'d': DNEUPD_ERRORS, - 's': SNEUPD_ERRORS, - 'z': ZNEUPD_ERRORS, - 'c': CNEUPD_ERRORS} - -# accepted values of parameter WHICH in _SEUPD -_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE'] - -# accepted values of parameter WHICH in _NAUPD -_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI'] - - -class ArpackError(RuntimeError): - """ - ARPACK error - """ - def __init__(self, info, infodict=_NAUPD_ERRORS): - msg = infodict.get(info, "Unknown error") - RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg)) - - -class ArpackNoConvergence(ArpackError): - """ - ARPACK iteration did not converge - - Attributes - ---------- - eigenvalues : ndarray - Partial result. Converged eigenvalues. - eigenvectors : ndarray - Partial result. Converged eigenvectors. - - """ - def __init__(self, msg, eigenvalues, eigenvectors): - ArpackError.__init__(self, -1, {-1: msg}) - self.eigenvalues = eigenvalues - self.eigenvectors = eigenvectors - - -class _ArpackParams(object): - def __init__(self, n, k, tp, mode=1, sigma=None, - ncv=None, v0=None, maxiter=None, which="LM", tol=0): - if k <= 0: - raise ValueError("k must be positive, k=%d" % k) - - if maxiter is None: - maxiter = n * 10 - if maxiter <= 0: - raise ValueError("maxiter must be positive, maxiter=%d" % maxiter) - - if tp not in 'fdFD': - raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") - - tp = _single_precision_cast(tp) - - if v0 is not None: - # ARPACK overwrites its initial resid, make a copy - self.resid = np.array(v0, copy=True) - info = 1 - else: - self.resid = np.zeros(n, tp) - info = 0 - - if sigma is None: - #sigma not used - self.sigma = 0 - else: - self.sigma = sigma - - if ncv is None: - ncv = 2 * k + 1 - ncv = min(ncv, n) - - self.v = np.zeros((n, ncv), tp) # holds Ritz vectors - self.iparam = np.zeros(11, "int") - - # set solver mode and parameters - ishfts = 1 - self.mode = mode - self.iparam[0] = ishfts - self.iparam[2] = maxiter - self.iparam[3] = 1 - self.iparam[6] = mode - - self.n = n - self.tol = tol - self.k = k - self.maxiter = maxiter - self.ncv = ncv - self.which = which - self.tp = tp - self.info = info - - self.converged = False - self.ido = 0 - - def _raise_no_convergence(self): - msg = "No convergence (%d iterations, %d/%d eigenvectors converged)" - k_ok = self.iparam[4] - num_iter = self.iparam[2] - try: - ev, vec = self.extract(True) - except ArpackError, err: - msg = "%s [%s]" % (msg, err) - ev = np.zeros((0,)) - vec = np.zeros((self.n, 0)) - k_ok = 0 - raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec) - - -class _SymmetricArpackParams(_ArpackParams): - def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, - Minv_matvec=None, sigma=None, - ncv=None, v0=None, maxiter=None, which="LM", tol=0): - # The following modes are supported: - # mode = 1: - # Solve the standard eigenvalue problem: - # A*x = lambda*x : - # A - symmetric - # Arguments should be - # matvec = left multiplication by A - # M_matvec = None [not used] - # Minv_matvec = None [not used] - # - # mode = 2: - # Solve the general eigenvalue problem: - # A*x = lambda*M*x - # A - symmetric - # M - symmetric positive definite - # Arguments should be - # matvec = left multiplication by A - # M_matvec = left multiplication by M - # Minv_matvec = left multiplication by M^-1 - # - # mode = 3: - # Solve the general eigenvalue problem in shift-invert mode: - # A*x = lambda*M*x - # A - symmetric - # M - symmetric positive semi-definite - # Arguments should be - # matvec = None [not used] - # M_matvec = left multiplication by M - # or None, if M is the identity - # Minv_matvec = left multiplication by [A-sigma*M]^-1 - # - # mode = 4: - # Solve the general eigenvalue problem in Buckling mode: - # A*x = lambda*AG*x - # A - symmetric positive semi-definite - # AG - symmetric indefinite - # Arguments should be - # matvec = left multiplication by A - # M_matvec = None [not used] - # Minv_matvec = left multiplication by [A-sigma*AG]^-1 - # - # mode = 5: - # Solve the general eigenvalue problem in Cayley-transformed mode: - # A*x = lambda*M*x - # A - symmetric - # M - symmetric positive semi-definite - # Arguments should be - # matvec = left multiplication by A - # M_matvec = left multiplication by M - # or None, if M is the identity - # Minv_matvec = left multiplication by [A-sigma*M]^-1 - if mode == 1: - if matvec is None: - raise ValueError("matvec must be specified for mode=1") - if M_matvec is not None: - raise ValueError("M_matvec cannot be specified for mode=1") - if Minv_matvec is not None: - raise ValueError("Minv_matvec cannot be specified for mode=1") - - self.OP = matvec - self.B = lambda x: x - self.bmat = 'I' - elif mode == 2: - if matvec is None: - raise ValueError("matvec must be specified for mode=2") - if M_matvec is None: - raise ValueError("M_matvec must be specified for mode=2") - if Minv_matvec is None: - raise ValueError("Minv_matvec must be specified for mode=2") - - self.OP = lambda x: Minv_matvec(matvec(x)) - self.OPa = Minv_matvec - self.OPb = matvec - self.B = M_matvec - self.bmat = 'G' - elif mode == 3: - if matvec is not None: - raise ValueError("matvec must not be specified for mode=3") - if Minv_matvec is None: - raise ValueError("Minv_matvec must be specified for mode=3") - - if M_matvec is None: - self.OP = Minv_matvec - self.OPa = Minv_matvec - self.B = lambda x: x - self.bmat = 'I' - else: - self.OP = lambda x: Minv_matvec(M_matvec(x)) - self.OPa = Minv_matvec - self.B = M_matvec - self.bmat = 'G' - elif mode == 4: - if matvec is None: - raise ValueError("matvec must be specified for mode=4") - if M_matvec is not None: - raise ValueError("M_matvec must not be specified for mode=4") - if Minv_matvec is None: - raise ValueError("Minv_matvec must be specified for mode=4") - self.OPa = Minv_matvec - self.OP = lambda x: self.OPa(matvec(x)) - self.B = matvec - self.bmat = 'G' - elif mode == 5: - if matvec is None: - raise ValueError("matvec must be specified for mode=5") - if Minv_matvec is None: - raise ValueError("Minv_matvec must be specified for mode=5") - - self.OPa = Minv_matvec - self.A_matvec = matvec - - if M_matvec is None: - self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x) - self.B = lambda x: x - self.bmat = 'I' - else: - self.OP = lambda x: Minv_matvec(matvec(x) - + sigma * M_matvec(x)) - self.B = M_matvec - self.bmat = 'G' - else: - raise ValueError("mode=%i not implemented" % mode) - - if which not in _SEUPD_WHICH: - raise ValueError("which must be one of %s" - % ' '.join(_SEUPD_WHICH)) - if k >= n: - raise ValueError("k must be less than rank(A), k=%d" % k) - - _ArpackParams.__init__(self, n, k, tp, mode, sigma, - ncv, v0, maxiter, which, tol) - - if self.ncv > n or self.ncv <= k: - raise ValueError("ncv must be k= n - 1: - raise ValueError("k must be less than rank(A)-1, k=%d" % k) - - _ArpackParams.__init__(self, n, k, tp, mode, sigma, - ncv, v0, maxiter, which, tol) - - if self.ncv > n or self.ncv <= k + 1: - raise ValueError("ncv must be k+1 k, so we'll - # throw out this case. - nreturned -= 1 - i += 1 - - else: - # real matrix, mode 3 or 4, imag(sigma) is nonzero: - # see remark 3 in neupd.f - # Build complex eigenvalues from real and imaginary parts - i = 0 - while i <= k: - if abs(d[i].imag) == 0: - d[i] = np.dot(zr[:, i], self.matvec(zr[:, i])) - else: - if i < k: - z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] - z[:, i + 1] = z[:, i].conjugate() - d[i] = ((np.dot(zr[:, i], - self.matvec(zr[:, i])) - + np.dot(zr[:, i + 1], - self.matvec(zr[:, i + 1]))) - + 1j * (np.dot(zr[:, i], - self.matvec(zr[:, i + 1])) - - np.dot(zr[:, i + 1], - self.matvec(zr[:, i])))) - d[i + 1] = d[i].conj() - i += 1 - else: - #last eigenvalue is complex: the imaginary part of - # the eigenvector has not been returned - #this can only happen if nreturned > k, so we'll - # throw out this case. - nreturned -= 1 - i += 1 - - # Now we have k+1 possible eigenvalues and eigenvectors - # Return the ones specified by the keyword "which" - - if nreturned <= k: - # we got less or equal as many eigenvalues we wanted - d = d[:nreturned] - z = z[:, :nreturned] - else: - # we got one extra eigenvalue (likely a cc pair, but which?) - # cut at approx precision for sorting - rd = np.round(d, decimals=_ndigits[self.tp]) - if self.which in ['LR', 'SR']: - ind = np.argsort(rd.real) - elif self.which in ['LI', 'SI']: - # for LI,SI ARPACK returns largest,smallest - # abs(imaginary) why? - ind = np.argsort(abs(rd.imag)) - else: - ind = np.argsort(abs(rd)) - if self.which in ['LR', 'LM', 'LI']: - d = d[ind[-k:]] - z = z[:, ind[-k:]] - if self.which in ['SR', 'SM', 'SI']: - d = d[ind[:k]] - z = z[:, ind[:k]] - else: - # complex is so much simpler... - d, z, ierr =\ - self._arpack_extract(return_eigenvectors, - howmny, sselect, self.sigma, workev, - self.bmat, self.which, k, self.tol, self.resid, - self.v, self.iparam, self.ipntr, - self.workd, self.workl, self.rwork, ierr) - - if ierr != 0: - raise ArpackError(ierr, infodict=self.extract_infodict) - - k_ok = self.iparam[4] - d = d[:k_ok] - z = z[:, :k_ok] - - if return_eigenvectors: - return d, z - else: - return d - - -def _aslinearoperator_with_dtype(m): - m = aslinearoperator(m) - if not hasattr(m, 'dtype'): - x = np.zeros(m.shape[1]) - m.dtype = (m * x).dtype - return m - -class SpLuInv(LinearOperator): - """ - SpLuInv: - helper class to repeatedly solve M*x=b - using a sparse LU-decopposition of M - """ - def __init__(self, M): - self.M_lu = splu(M) - LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) - self.isreal = not np.issubdtype(self.dtype, np.complexfloating) - - def _matvec(self, x): - # careful here: splu.solve will throw away imaginary - # part of x if M is real - if self.isreal and np.issubdtype(x.dtype, np.complexfloating): - return (self.M_lu.solve(np.real(x).astype(self.dtype)) - + 1j * self.M_lu.solve(np.imag(x).astype(self.dtype))) - else: - return self.M_lu.solve(x.astype(self.dtype)) - -class LuInv(LinearOperator): - """ - LuInv: - helper class to repeatedly solve M*x=b - using an LU-decomposition of M - """ - def __init__(self, M): - self.M_lu = lu_factor(M) - LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) - - def _matvec(self, x): - return lu_solve(self.M_lu, x) - - -class IterInv(LinearOperator): - """ - IterInv: - helper class to repeatedly solve M*x=b - using an iterative method. - """ - def __init__(self, M, ifunc=gmres, tol=0): - if tol <= 0: - # when tol=0, ARPACK uses machine tolerance as calculated - # by LAPACK's _LAMCH function. We should match this - tol = 2 * np.finfo(M.dtype).eps - self.M = M - self.ifunc = ifunc - self.tol = tol - if hasattr(M, 'dtype'): - dtype = M.dtype - else: - x = np.zeros(M.shape[1]) - dtype = (M * x).dtype - LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype) - - def _matvec(self, x): - b, info = self.ifunc(self.M, x, tol=self.tol) - if info != 0: - raise ValueError("Error in inverting M: function " - "%s did not converge (info = %i)." - % (self.ifunc.__name__, info)) - return b - - -class IterOpInv(LinearOperator): - """ - IterOpInv: - helper class to repeatedly solve [A-sigma*M]*x = b - using an iterative method - """ - def __init__(self, A, M, sigma, ifunc=gmres, tol=0): - if tol <= 0: - # when tol=0, ARPACK uses machine tolerance as calculated - # by LAPACK's _LAMCH function. We should match this - tol = 2 * np.finfo(A.dtype).eps - self.A = A - self.M = M - self.sigma = sigma - self.ifunc = ifunc - self.tol = tol - - x = np.zeros(A.shape[1]) - if M is None: - dtype = self.mult_func_M_None(x).dtype - self.OP = LinearOperator(self.A.shape, - self.mult_func_M_None, - dtype=dtype) - else: - dtype = self.mult_func(x).dtype - self.OP = LinearOperator(self.A.shape, - self.mult_func, - dtype=dtype) - LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype) - - def mult_func(self, x): - return self.A.matvec(x) - self.sigma * self.M.matvec(x) - - def mult_func_M_None(self, x): - return self.A.matvec(x) - self.sigma * x - - def _matvec(self, x): - b, info = self.ifunc(self.OP, x, tol=self.tol) - if info != 0: - raise ValueError("Error in inverting [A-sigma*M]: function " - "%s did not converge (info = %i)." - % (self.ifunc.__name__, info)) - return b - -def get_inv_matvec(M, symmetric=False, tol=0): - if isdense(M): - return LuInv(M).matvec - elif isspmatrix(M): - if isspmatrix_csr(M) and symmetric: - M = M.T - return SpLuInv(M).matvec - else: - return IterInv(M, tol=tol).matvec - - -def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0): - if sigma == 0: - return get_inv_matvec(A, symmetric=symmetric, tol=tol) - - if M is None: - #M is the identity matrix - if isdense(A): - if (np.issubdtype(A.dtype, np.complexfloating) - or np.imag(sigma) == 0): - A = np.copy(A) - else: - A = A + 0j - A.flat[::A.shape[1] + 1] -= sigma - return LuInv(A).matvec - elif isspmatrix(A): - A = A - sigma * identity(A.shape[0]) - if symmetric and isspmatrix_csr(A): - A = A.T - return SpLuInv(A.tocsc()).matvec - else: - return IterOpInv(_aslinearoperator_with_dtype(A), - M, sigma, tol=tol).matvec - else: - if ((not isdense(A) and not isspmatrix(A)) or - (not isdense(M) and not isspmatrix(M))): - return IterOpInv(_aslinearoperator_with_dtype(A), - _aslinearoperator_with_dtype(M), - sigma, tol=tol).matvec - elif isdense(A) or isdense(M): - return LuInv(A - sigma * M).matvec - else: - OP = A - sigma * M - if symmetric and isspmatrix_csr(OP): - OP = OP.T - return SpLuInv(OP.tocsc()).matvec - - -def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, - ncv=None, maxiter=None, tol=0, return_eigenvectors=True, - Minv=None, OPinv=None, OPpart=None): - """ - Find k eigenvalues and eigenvectors of the square matrix A. - - Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem - for w[i] eigenvalues with corresponding eigenvectors x[i]. - - If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the - generalized eigenvalue problem for w[i] eigenvalues - with corresponding eigenvectors x[i] - - Parameters - ---------- - A : An N x N matrix, array, sparse matrix, or LinearOperator representing - the operation A * x, where A is a real or complex square matrix. - k : integer - The number of eigenvalues and eigenvectors desired. - `k` must be smaller than N. It is not possible to compute all - eigenvectors of a matrix. - - Returns - ------- - w : array - Array of k eigenvalues. - v : array - An array of `k` eigenvectors. - ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i]. - - Other Parameters - ---------------- - M : An N x N matrix, array, sparse matrix, or LinearOperator representing - the operation M*x for the generalized eigenvalue problem - ``A * x = w * M * x`` - M must represent a real symmetric matrix. For best results, M should - be of the same type as A. Additionally: - * If sigma==None, M is positive definite - * If sigma is specified, M is positive semi-definite - If sigma==None, eigs requires an operator to compute the solution - of the linear equation `M * x = b`. This is done internally via a - (sparse) LU decomposition for an explicit matrix M, or via an - iterative solver for a general linear operator. Alternatively, - the user can supply the matrix or operator Minv, which gives - x = Minv * b = M^-1 * b - sigma : real or complex - Find eigenvalues near sigma using shift-invert mode. This requires - an operator to compute the solution of the linear system - `[A - sigma * M] * x = b`, where M is the identity matrix if - unspecified. This is computed internally via a (sparse) LU - decomposition for explicit matrices A & M, or via an iterative - solver if either A or M is a general linear operator. - Alternatively, the user can supply the matrix or operator OPinv, - which gives x = OPinv * b = [A - sigma * M]^-1 * b. - For a real matrix A, shift-invert can either be done in imaginary - mode or real mode, specified by the parameter OPpart ('r' or 'i'). - Note that when sigma is specified, the keyword 'which' (below) - refers to the shifted eigenvalues w'[i] where: - * If A is real and OPpart == 'r' (default), - w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ] - * If A is real and OPpart == 'i', - w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ] - * If A is complex, - w'[i] = 1/(w[i]-sigma) - v0 : array - Starting vector for iteration. - ncv : integer - The number of Lanczos vectors generated - `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``. - which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] - Which `k` eigenvectors and eigenvalues to find: - - 'LM' : largest magnitude - - 'SM' : smallest magnitude - - 'LR' : largest real part - - 'SR' : smallest real part - - 'LI' : largest imaginary part - - 'SI' : smallest imaginary part - When sigma != None, 'which' refers to the shifted eigenvalues w'[i] - (see discussion in 'sigma', above). ARPACK is generally better - at finding large values than small values. If small eigenvalues are - desired, consider using shift-invert mode for better performance. - maxiter : integer - Maximum number of Arnoldi update iterations allowed - tol : float - Relative accuracy for eigenvalues (stopping criterion) - The default value of 0 implies machine precision. - return_eigenvectors : boolean - Return eigenvectors (True) in addition to eigenvalues - Minv : N x N matrix, array, sparse matrix, or linear operator - See notes in M, above. - OPinv : N x N matrix, array, sparse matrix, or linear operator - See notes in sigma, above. - OPpart : 'r' or 'i'. - See notes in sigma, above - - Raises - ------ - ArpackNoConvergence - When the requested convergence is not obtained. - - The currently converged eigenvalues and eigenvectors can be found - as ``eigenvalues`` and ``eigenvectors`` attributes of the exception - object. - - See Also - -------- - eigsh : eigenvalues and eigenvectors for symmetric matrix A - svds : singular value decomposition for a matrix A - - Notes - ----- - This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, - ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to - find the eigenvalues and eigenvectors [2]_. - - Examples - -------- - Find 6 eigenvectors of the identity matrix: - - >>> id = np.identity(13) - >>> vals, vecs = sp.sparse.linalg.eigs(id, k=6) - >>> vals - array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) - >>> vecs.shape - (13, 6) - - References - ---------- - .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ - .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: - Solution of Large Scale Eigenvalue Problems by Implicitly Restarted - Arnoldi Methods. SIAM, Philadelphia, PA, 1998. - """ - if A.shape[0] != A.shape[1]: - raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) - if M is not None: - if M.shape != A.shape: - raise ValueError('wrong M dimensions %s, should be %s' - % (M.shape, A.shape)) - if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): - import warnings - warnings.warn('M does not have the same type precision as A. ' - 'This may adversely affect ARPACK convergence') - n = A.shape[0] - - if k <= 0 or k >= n: - raise ValueError("k must be between 1 and rank(A)-1") - - if sigma is None: - matvec = _aslinearoperator_with_dtype(A).matvec - - if OPinv is not None: - raise ValueError("OPinv should not be specified " - "with sigma = None.") - if OPpart is not None: - raise ValueError("OPpart should not be specified with " - "sigma = None or complex A") - - if M is None: - #standard eigenvalue problem - mode = 1 - M_matvec = None - Minv_matvec = None - if Minv is not None: - raise ValueError("Minv should not be " - "specified with M = None.") - else: - #general eigenvalue problem - mode = 2 - if Minv is None: - Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) - else: - Minv = _aslinearoperator_with_dtype(Minv) - Minv_matvec = Minv.matvec - M_matvec = _aslinearoperator_with_dtype(M).matvec - else: - #sigma is not None: shift-invert mode - if np.issubdtype(A.dtype, np.complexfloating): - if OPpart is not None: - raise ValueError("OPpart should not be specified " - "with sigma=None or complex A") - mode = 3 - elif OPpart is None or OPpart.lower() == 'r': - mode = 3 - elif OPpart.lower() == 'i': - if np.imag(sigma) == 0: - raise ValueError("OPpart cannot be 'i' if sigma is real") - mode = 4 - else: - raise ValueError("OPpart must be one of ('r','i')") - - matvec = _aslinearoperator_with_dtype(A).matvec - if Minv is not None: - raise ValueError("Minv should not be specified when sigma is") - if OPinv is None: - Minv_matvec = get_OPinv_matvec(A, M, sigma, - symmetric=False, tol=tol) - else: - OPinv = _aslinearoperator_with_dtype(OPinv) - Minv_matvec = OPinv.matvec - if M is None: - M_matvec = None - else: - M_matvec = _aslinearoperator_with_dtype(M).matvec - - params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode, - M_matvec, Minv_matvec, sigma, - ncv, v0, maxiter, which, tol) - - while not params.converged: - params.iterate() - - return params.extract(return_eigenvectors) - - -def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, - ncv=None, maxiter=None, tol=0, return_eigenvectors=True, - Minv=None, OPinv=None, mode='normal'): - """ - Find k eigenvalues and eigenvectors of the real symmetric square matrix - or complex hermitian matrix A. - - Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for - w[i] eigenvalues with corresponding eigenvectors x[i]. - - If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the - generalized eigenvalue problem for w[i] eigenvalues - with corresponding eigenvectors x[i] - - - Parameters - ---------- - A : An N x N matrix, array, sparse matrix, or LinearOperator representing - the operation A * x, where A is a real symmetric matrix - For buckling mode (see below) A must additionally be positive-definite - k : integer - The number of eigenvalues and eigenvectors desired. - `k` must be smaller than N. It is not possible to compute all - eigenvectors of a matrix. - - Returns - ------- - w : array - Array of k eigenvalues - v : array - An array of k eigenvectors - The v[i] is the eigenvector corresponding to the eigenvector w[i] - - Other Parameters - ---------------- - M : An N x N matrix, array, sparse matrix, or linear operator representing - the operation M * x for the generalized eigenvalue problem - ``A * x = w * M * x``. - M must represent a real, symmetric matrix. For best results, M should - be of the same type as A. Additionally: - * If sigma == None, M is symmetric positive definite - * If sigma is specified, M is symmetric positive semi-definite - * In buckling mode, M is symmetric indefinite. - If sigma == None, eigsh requires an operator to compute the solution - of the linear equation `M * x = b`. This is done internally via a - (sparse) LU decomposition for an explicit matrix M, or via an - iterative solver for a general linear operator. Alternatively, - the user can supply the matrix or operator Minv, which gives - x = Minv * b = M^-1 * b - sigma : real - Find eigenvalues near sigma using shift-invert mode. This requires - an operator to compute the solution of the linear system - `[A - sigma * M] x = b`, where M is the identity matrix if - unspecified. This is computed internally via a (sparse) LU - decomposition for explicit matrices A & M, or via an iterative - solver if either A or M is a general linear operator. - Alternatively, the user can supply the matrix or operator OPinv, - which gives x = OPinv * b = [A - sigma * M]^-1 * b. - Note that when sigma is specified, the keyword 'which' refers to - the shifted eigenvalues w'[i] where: - - if mode == 'normal', - w'[i] = 1 / (w[i] - sigma) - - if mode == 'cayley', - w'[i] = (w[i] + sigma) / (w[i] - sigma) - - if mode == 'buckling', - w'[i] = w[i] / (w[i] - sigma) - (see further discussion in 'mode' below) - v0 : array - Starting vector for iteration. - ncv : integer - The number of Lanczos vectors generated - ncv must be greater than k and smaller than n; - it is recommended that ncv > 2*k - which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE'] - If A is a complex hermitian matrix, 'BE' is invalid. - Which `k` eigenvectors and eigenvalues to find: - - 'LM' : Largest (in magnitude) eigenvalues - - 'SM' : Smallest (in magnitude) eigenvalues - - 'LA' : Largest (algebraic) eigenvalues - - 'SA' : Smallest (algebraic) eigenvalues - - 'BE' : Half (k/2) from each end of the spectrum - When k is odd, return one more (k/2+1) from the high end - When sigma != None, 'which' refers to the shifted eigenvalues w'[i] - (see discussion in 'sigma', above). ARPACK is generally better - at finding large values than small values. If small eigenvalues are - desired, consider using shift-invert mode for better performance. - maxiter : integer - Maximum number of Arnoldi update iterations allowed - tol : float - Relative accuracy for eigenvalues (stopping criterion). - The default value of 0 implies machine precision. - Minv : N x N matrix, array, sparse matrix, or LinearOperator - See notes in M, above - OPinv : N x N matrix, array, sparse matrix, or LinearOperator - See notes in sigma, above. - return_eigenvectors : boolean - Return eigenvectors (True) in addition to eigenvalues - mode : string ['normal' | 'buckling' | 'cayley'] - Specify strategy to use for shift-invert mode. This argument applies - only for real-valued A and sigma != None. For shift-invert mode, - ARPACK internally solves the eigenvalue problem - ``OP * x'[i] = w'[i] * B * x'[i]`` - and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i] - into the desired eigenvectors and eigenvalues of the problem - ``A * x[i] = w[i] * M * x[i]``. - The modes are as follows: - - 'normal' : OP = [A - sigma * M]^-1 * M - B = M - w'[i] = 1 / (w[i] - sigma) - - 'buckling' : OP = [A - sigma * M]^-1 * A - B = A - w'[i] = w[i] / (w[i] - sigma) - - 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M] - B = M - w'[i] = (w[i] + sigma) / (w[i] - sigma) - The choice of mode will affect which eigenvalues are selected by - the keyword 'which', and can also impact the stability of - convergence (see [2] for a discussion) - - Raises - ------ - ArpackNoConvergence - When the requested convergence is not obtained. - - The currently converged eigenvalues and eigenvectors can be found - as ``eigenvalues`` and ``eigenvectors`` attributes of the exception - object. - - See Also - -------- - eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A - svds : singular value decomposition for a matrix A - - Notes - ----- - This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD - functions which use the Implicitly Restarted Lanczos Method to - find the eigenvalues and eigenvectors [2]_. - - Examples - -------- - >>> id = np.identity(13) - >>> vals, vecs = sp.sparse.linalg.eigsh(id, k=6) - >>> vals - array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) - >>> vecs.shape - (13, 6) - - References - ---------- - .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ - .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: - Solution of Large Scale Eigenvalue Problems by Implicitly Restarted - Arnoldi Methods. SIAM, Philadelphia, PA, 1998. - """ - # complex hermitian matrices should be solved with eigs - if np.issubdtype(A.dtype, np.complexfloating): - if mode != 'normal': - raise ValueError("mode=%s cannot be used with " - "complex matrix A" % mode) - if which == 'BE': - raise ValueError("which='BE' cannot be used with complex matrix A") - elif which == 'LA': - which = 'LR' - elif which == 'SA': - which = 'SR' - ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0, - ncv=ncv, maxiter=maxiter, tol=tol, - return_eigenvectors=return_eigenvectors, Minv=Minv, - OPinv=OPinv) - - if return_eigenvectors: - return ret[0].real, ret[1] - else: - return ret.real - - if A.shape[0] != A.shape[1]: - raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) - if M is not None: - if M.shape != A.shape: - raise ValueError('wrong M dimensions %s, should be %s' - % (M.shape, A.shape)) - if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): - import warnings - warnings.warn('M does not have the same type precision as A. ' - 'This may adversely affect ARPACK convergence') - n = A.shape[0] - - if k <= 0 or k >= n: - raise ValueError("k must be between 1 and rank(A)-1") - - if sigma is None: - A = _aslinearoperator_with_dtype(A) - matvec = A.matvec - - if OPinv is not None: - raise ValueError("OPinv should not be specified " - "with sigma = None.") - if M is None: - #standard eigenvalue problem - mode = 1 - M_matvec = None - Minv_matvec = None - if Minv is not None: - raise ValueError("Minv should not be " - "specified with M = None.") - else: - #general eigenvalue problem - mode = 2 - if Minv is None: - Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) - else: - Minv = _aslinearoperator_with_dtype(Minv) - Minv_matvec = Minv.matvec - M_matvec = _aslinearoperator_with_dtype(M).matvec - else: - # sigma is not None: shift-invert mode - if Minv is not None: - raise ValueError("Minv should not be specified when sigma is") - - # normal mode - if mode == 'normal': - mode = 3 - matvec = None - if OPinv is None: - Minv_matvec = get_OPinv_matvec(A, M, sigma, - symmetric=True, tol=tol) - else: - OPinv = _aslinearoperator_with_dtype(OPinv) - Minv_matvec = OPinv.matvec - if M is None: - M_matvec = None - else: - M = _aslinearoperator_with_dtype(M) - M_matvec = M.matvec - - # buckling mode - elif mode == 'buckling': - mode = 4 - if OPinv is None: - Minv_matvec = get_OPinv_matvec(A, M, sigma, - symmetric=True, tol=tol) - else: - Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec - matvec = _aslinearoperator_with_dtype(A).matvec - M_matvec = None - - # cayley-transform mode - elif mode == 'cayley': - mode = 5 - matvec = _aslinearoperator_with_dtype(A).matvec - if OPinv is None: - Minv_matvec = get_OPinv_matvec(A, M, sigma, - symmetric=True, tol=tol) - else: - Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec - if M is None: - M_matvec = None - else: - M_matvec = _aslinearoperator_with_dtype(M).matvec - - # unrecognized mode - else: - raise ValueError("unrecognized mode '%s'" % mode) - - params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode, - M_matvec, Minv_matvec, sigma, - ncv, v0, maxiter, which, tol) - - while not params.converged: - params.iterate() - - return params.extract(return_eigenvectors) - - -def svds(A, k=6, ncv=None, tol=0): - """Compute k singular values/vectors for a sparse matrix using ARPACK. - - Parameters - ---------- - A : sparse matrix - Array to compute the SVD on - k : int, optional - Number of singular values and vectors to compute. - ncv : integer - The number of Lanczos vectors generated - ncv must be greater than k+1 and smaller than n; - it is recommended that ncv > 2*k - tol : float, optional - Tolerance for singular values. Zero (default) means machine precision. - - Note - ---- - This is a naive implementation using an eigensolver on A.H * A or - A * A.H, depending on which one is more efficient. - - """ - if not (isinstance(A, np.ndarray) or isspmatrix(A)): - A = np.asarray(A) - - n, m = A.shape - - if np.issubdtype(A.dtype, np.complexfloating): - herm = lambda x: x.T.conjugate() - eigensolver = eigs - else: - herm = lambda x: x.T - eigensolver = eigsh - - if n > m: - X = A - XH = herm(A) - else: - XH = A - X = herm(A) - - def matvec_XH_X(x): - return XH.dot(X.dot(x)) - - XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype, - shape=(X.shape[1], X.shape[1])) - - eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2) - s = np.sqrt(eigvals) - - if n > m: - v = eigvec - u = X.dot(v) / s - vh = herm(v) - else: - u = eigvec - vh = herm(X.dot(u) / s) - - return u, s, vh diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/arpack.pyf.src b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/arpack.pyf.src deleted file mode 100644 index 5e9b5da8e4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/arpack.pyf.src +++ /dev/null @@ -1,207 +0,0 @@ -! -*- f90 -*- -! Note: the context of this file is case sensitive. - -python module _arpack ! in - <_rd=real,double precision> - <_cd=complex,double complex> - interface ! in :_arpack - subroutine saupd(ido,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,info) ! in :_arpack:src/ssaupd.f - integer intent(in,out):: ido - character*1 :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_rd> dimension(n),intent(in,out) :: resid - integer optional,check(shape(v,1)==ncv),depend(v) :: ncv=shape(v,1) - <_rd> dimension(ldv,ncv),intent(in,out) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11),intent(in,out) :: iparam - integer dimension(11),intent(in,out) :: ipntr - <_rd> dimension(3 * n),depend(n),intent(inout) :: workd - <_rd> dimension(lworkl),intent(inout) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - integer intent(in,out):: info - end subroutine saupd - - subroutine seupd(rvec,howmny,select,d,z,ldz,sigma,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,info) ! in :_arpack:src/sseupd.f - logical :: rvec - character :: howmny - logical dimension(ncv) :: select - <_rd> dimension(nev),intent(out),depend(nev) :: d - <_rd> dimension(n,nev),intent(out),depend(nev) :: z - integer optional,check(shape(z,0)==ldz),depend(z) :: ldz=shape(z,0) - <_rd> :: sigma - character :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_rd> dimension(n) :: resid - integer optional,check(len(select)>=ncv),depend(select) :: ncv=len(select) - <_rd> dimension(ldv,ncv),depend(ncv) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(7) :: iparam - integer dimension(11) :: ipntr - <_rd> dimension(2 * n),depend(n) :: workd - <_rd> dimension(lworkl) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - integer intent(in,out):: info - end subroutine seupd - - subroutine naupd(ido,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,info) ! in :_arpack:src/snaupd.f - integer intent(in,out):: ido - character*1 :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_rd> dimension(n),intent(in,out) :: resid - integer optional,check(shape(v,1)==ncv),depend(v) :: ncv=shape(v,1) - <_rd> dimension(ldv,ncv),intent(in,out) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11),intent(in,out) :: iparam - integer dimension(14),intent(in,out) :: ipntr - <_rd> dimension(3 * n),depend(n),intent(inout) :: workd - <_rd> dimension(lworkl),intent(inout) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - integer intent(in,out):: info - end subroutine naupd - - subroutine neupd(rvec,howmny,select,dr,di,z,ldz,sigmar,sigmai,workev,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,info) ! in ARPACK/SRC/sneupd.f - logical :: rvec - character :: howmny - logical dimension(ncv) :: select - <_rd> dimension(nev + 1),depend(nev),intent(out) :: dr - <_rd> dimension(nev + 1),depend(nev),intent(out) :: di - <_rd> dimension(n,nev+1),depend(n,nev),intent(out) :: z - integer optional,check(shape(z,0)==ldz),depend(z) :: ldz=shape(z,0) - <_rd> :: sigmar - <_rd> :: sigmai - <_rd> dimension(3 * ncv),depend(ncv) :: workev - character :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_rd> dimension(n) :: resid - integer optional,check(len(select)>=ncv),depend(select) :: ncv=len(select) - <_rd> dimension(n,ncv),depend(n,ncv) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11) :: iparam - integer dimension(14) :: ipntr - <_rd> dimension(3 * n),depend(n):: workd - <_rd> dimension(lworkl) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - integer intent(in,out):: info - end subroutine neupd - - subroutine naupd(ido,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,rwork,info) ! in :_arpack:src/snaupd.f - integer intent(in,out):: ido - character*1 :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_cd> dimension(n),intent(in,out) :: resid - integer optional,check(shape(v,1)==ncv),depend(v) :: ncv=shape(v,1) - <_cd> dimension(ldv,ncv),intent(in,out) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11),intent(in,out) :: iparam - integer dimension(14),intent(in,out) :: ipntr - <_cd> dimension(3 * n),depend(n),intent(inout) :: workd - <_cd> dimension(lworkl),intent(inout) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - <_rd> dimension(ncv),depend(ncv),intent(inout) :: rwork - integer intent(in,out):: info - end subroutine naupd - - subroutine neupd(rvec,howmny,select,d,z,ldz,sigma,workev,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,rwork,info) ! in :_arpack:src/sneupd.f - logical :: rvec - character :: howmny - logical dimension(ncv) :: select - <_cd> dimension(nev),depend(nev),intent(out) :: d - <_cd> dimension(n,nev), depend(nev),intent(out) :: z - integer optional,check(shape(z,0)==ldz),depend(z) :: ldz=shape(z,0) - <_cd> :: sigma - <_cd> dimension(3 * ncv),depend(ncv) :: workev - character :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_cd> dimension(n) :: resid - integer optional,check(len(select)>=ncv),depend(select) :: ncv=len(select) - <_cd> dimension(ldv,ncv),depend(ncv) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11) :: iparam - integer dimension(14) :: ipntr - <_cd> dimension(3 * n),depend(n) :: workd - <_cd> dimension(lworkl) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - <_rd> dimension(ncv),depend(ncv) :: rwork - integer intent(in,out):: info - end subroutine neupd - integer :: logfil - integer :: ndigit - integer :: mgetv0 - integer :: msaupd - integer :: msaup2 - integer :: msaitr - integer :: mseigt - integer :: msapps - integer :: msgets - integer :: mseupd - integer :: mnaupd - integer :: mnaup2 - integer :: mnaitr - integer :: mneigh - integer :: mnapps - integer :: mngets - integer :: mneupd - integer :: mcaupd - integer :: mcaup2 - integer :: mcaitr - integer :: mceigh - integer :: mcapps - integer :: mcgets - integer :: mceupd - integer :: nopx - integer :: nbx - integer :: nrorth - integer :: nitref - integer :: nrstrt - real :: tsaupd - real :: tsaup2 - real :: tsaitr - real :: tseigt - real :: tsgets - real :: tsapps - real :: tsconv - real :: tnaupd - real :: tnaup2 - real :: tnaitr - real :: tneigh - real :: tngets - real :: tnapps - real :: tnconv - real :: tcaupd - real :: tcaup2 - real :: tcaitr - real :: tceigh - real :: tcgets - real :: tcapps - real :: tcconv - real :: tmvopx - real :: tmvbx - real :: tgetv0 - real :: titref - real :: trvec - common /debug/ logfil,ndigit,mgetv0,msaupd,msaup2,msaitr,mseigt,msapps,msgets,mseupd,mnaupd,mnaup2,mnaitr,mneigh,mnapps,mngets,mneupd,mcaupd,mcaup2,mcaitr,mceigh,mcapps,mcgets,mceupd - common /timing/ nopx,nbx,nrorth,nitref,nrstrt,tsaupd,tsaup2,tsaitr,tseigt,tsgets,tsapps,tsconv,tnaupd,tnaup2,tnaitr,tneigh,tngets,tnapps,tnconv,tcaupd,tcaup2,tcaitr,tceigh,tcgets,tcapps,tcconv,tmvopx,tmvbx,tgetv0,titref,trvec - - end interface -end python module _arpack - -! This file was auto-generated with f2py (version:2_3198). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/bento.info b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/bento.info deleted file mode 100644 index aa0b1b0a11..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/bento.info +++ /dev/null @@ -1,10 +0,0 @@ -HookFile: bscript - -Library: - CompiledLibrary: arpack - Sources: - ARPACK/SRC/*.f, - ARPACK/UTIL/*.f, - ARPACK/LAPACK/*.f - Extension: _arpack - Sources: arpack.pyf.src diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/bscript b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/bscript deleted file mode 100644 index e6bb1a7b41..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/bscript +++ /dev/null @@ -1,32 +0,0 @@ -import sys - -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - - def builder(extension): - # FIXME: detect this properly - if sys.platform == "darwin": - use_c_calling = True - else: - use_c_calling = False - sources = extension.sources[:] - if use_c_calling: - sources.append("ARPACK/FWRAPPERS/veclib_cabi_f.f") - sources.append("ARPACK/FWRAPPERS/veclib_cabi_c.c") - else: - sources.append("ARPACK/FWRAPPERS/dummy.f") - return default_builder(extension, - features="c fc cstlib pyext bento", - source=sources, - # Hack to make use of claoadable flags for static archive - use="cshlib") - context.register_compiled_library_builder("arpack", builder) - - def builder(extension): - return default_builder(extension, - features="c fc pyext cshlib f2py bento", - use="arpack FLAPACK CLIB") - context.register_builder("_arpack", builder) diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/info.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/info.py deleted file mode 100644 index f961b7528b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/info.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Eigenvalue solver using iterative methods. - -Find k eigenvectors and eigenvalues of a matrix A using the -Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_. - -These methods are most useful for large sparse matrices. - - - eigs(A,k) - - eigsh(A,k) - -References ----------- -.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ -.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: - Solution of Large Scale Eigenvalue Problems by Implicitly Restarted - Arnoldi Methods. SIAM, Philadelphia, PA, 1998. -""" -global_symbols = [] -postpone_import = 1 diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/setup.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/setup.py deleted file mode 100755 index 4e63641feb..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/setup.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python -import re -from os.path import join - -def needs_veclib_wrapper(info): - """Returns true if needs special veclib wrapper.""" - import re - r_accel = re.compile("Accelerate") - r_vec = re.compile("vecLib") - res = False - try: - tmpstr = info['extra_link_args'] - for i in tmpstr: - if r_accel.search(i) or r_vec.search(i): - res = True - except KeyError: - pass - - return res - -def configuration(parent_package='',top_path=None): - from numpy.distutils.system_info import get_info, NotFoundError - from numpy.distutils.misc_util import Configuration - - config = Configuration('arpack',parent_package,top_path) - - lapack_opt = get_info('lapack_opt') - - if not lapack_opt: - raise NotFoundError('no lapack/blas resources found') - - config = Configuration('arpack', parent_package, top_path) - - arpack_sources=[join('ARPACK','SRC', '*.f')] - arpack_sources.extend([join('ARPACK','UTIL', '*.f')]) - arpack_sources.extend([join('ARPACK','LAPACK', '*.f')]) - - if needs_veclib_wrapper(lapack_opt): - arpack_sources += [join('ARPACK', 'FWRAPPERS', 'veclib_cabi_f.f'), - join('ARPACK', 'FWRAPPERS', 'veclib_cabi_c.c')] - else: - arpack_sources += [join('ARPACK', 'FWRAPPERS', 'dummy.f')] - - config.add_library('arpack_scipy', sources=arpack_sources, - include_dirs=[join('ARPACK', 'SRC')], - depends = [join('ARPACK', 'FWRAPPERS', - 'veclib_cabi_f.f'), - join('ARPACK', 'FWRAPPERS', - 'veclib_cabi_c.c'), - join('ARPACK', 'FWRAPPERS', - 'dummy.f')]) - - - config.add_extension('_arpack', - sources='arpack.pyf.src', - libraries=['arpack_scipy'], - extra_info = lapack_opt - ) - - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/setupscons.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/setupscons.py deleted file mode 100755 index 6257abbe6d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/setupscons.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('arpack',parent_package,top_path) -# -# lapack_opt = get_info('lapack_opt') -# -# if not lapack_opt: -# raise NotFoundError,'no lapack/blas resources found' -# -# config = Configuration('arpack', parent_package, top_path) -# -# arpack_sources=[join('ARPACK','SRC', '*.f')] -# arpack_sources.extend([join('ARPACK','UTIL', '*.f')]) -# arpack_sources.extend([join('ARPACK','LAPACK', '*.f')]) -# -# config.add_library('arpack', sources=arpack_sources, -# include_dirs=[join('ARPACK', 'SRC')]) -# -# -# config.add_extension('_arpack', -# sources='arpack.pyf.src', -# libraries=['arpack'], -# extra_info = lapack_opt -# ) -# - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py deleted file mode 100644 index d2dce7d5f4..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py +++ /dev/null @@ -1,566 +0,0 @@ -__usage__ = """ -To run tests locally: - python tests/test_arpack.py [-l] [-v] - -""" - -import numpy as np - -from numpy.testing import assert_allclose, \ - assert_array_almost_equal_nulp, TestCase, run_module_suite, dec, \ - assert_raises, verbose, assert_equal - -from numpy import array, finfo, argsort, dot, round, conj, random -from scipy.linalg import eig, eigh -from scipy.sparse import csc_matrix, csr_matrix, lil_matrix, isspmatrix -from scipy.sparse.linalg import LinearOperator, aslinearoperator -from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \ - ArpackNoConvergence - -from scipy.linalg import svd - -# precision for tests -_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11} - -def _get_test_tolerance(type_char, mattype=None, sigma=None): - """ - Return tolerance values suitable for a given test: - - Parameters - ---------- - type_char : {'f', 'd', 'F', 'D'} - Data type in ARPACK eigenvalue problem - mattype : {csr_matrix, aslinearoperator, asarray}, optional - Linear operator type - - Returns - ------- - tol - Tolerance to pass to the ARPACK routine - rtol - Relative tolerance for outputs - atol - Absolute tolerance for outputs - - """ - - rtol = {'f': 3000 * np.finfo(np.float32).eps, - 'F': 3000 * np.finfo(np.float32).eps, - 'd': 2000 * np.finfo(np.float64).eps, - 'D': 2000 * np.finfo(np.float64).eps}[type_char] - atol = rtol - tol = 0 - - if mattype is aslinearoperator and type_char in ('f', 'F'): - # iterative methods in single precision: worse errors - # also: bump ARPACK tolerance so that the iterative method converges - tol = 30 * np.finfo(np.float32).eps - rtol *= 5 - - if sigma is not None: - # XXX: do not check the results in this case: the operation - # involves iterative single-precision inverses, which can - # fail on certain platforms. Still check the test runs, - # though. - atol = np.inf - rtol = np.inf - - if mattype is csr_matrix and type_char in ('f', 'F'): - # sparse in single precision: worse errors - rtol *= 5 - - return tol, rtol, atol - -def generate_matrix(N, complex=False, hermitian=False, - pos_definite=False, sparse=False): - M = np.random.random((N,N)) - if complex: - M = M + 1j * np.random.random((N,N)) - - if hermitian: - if pos_definite: - if sparse: - i = np.arange(N) - j = np.random.randint(N, size=N-2) - i, j = np.meshgrid(i, j) - M[i,j] = 0 - M = np.dot(M.conj(), M.T) - else: - M = np.dot(M.conj(), M.T) - if sparse: - i = np.random.randint(N, size=N * N / 4) - j = np.random.randint(N, size=N * N / 4) - ind = np.where(i == j) - j[ind] = (j[ind] + 1) % N - M[i,j] = 0 - M[j,i] = 0 - else: - if sparse: - i = np.random.randint(N, size=N * N / 2) - j = np.random.randint(N, size=N * N / 2) - M[i,j] = 0 - return M - - -def _aslinearoperator_with_dtype(m): - m = aslinearoperator(m) - if not hasattr(m, 'dtype'): - x = np.zeros(m.shape[1]) - m.dtype = (m * x).dtype - return m - - -def assert_allclose_cc(actual, desired, **kw): - """Almost equal or complex conjugates almost equal""" - try: - assert_allclose(actual, desired, **kw) - except: - assert_allclose(actual, conj(desired), **kw) - - -def argsort_which(eval, typ, k, which, - sigma=None, OPpart=None, mode=None): - """Return sorted indices of eigenvalues using the "which" keyword - from eigs and eigsh""" - if sigma is None: - reval = np.round(eval, decimals=_ndigits[typ]) - else: - if mode is None or mode=='normal': - if OPpart is None: - reval = 1. / (eval - sigma) - elif OPpart == 'r': - reval = 0.5 * (1. / (eval - sigma) - + 1. / (eval - np.conj(sigma))) - elif OPpart == 'i': - reval = -0.5j * (1. / (eval - sigma) - - 1. / (eval - np.conj(sigma))) - elif mode=='cayley': - reval = (eval + sigma) / (eval - sigma) - elif mode=='buckling': - reval = eval / (eval - sigma) - else: - raise ValueError("mode='%s' not recognized" % mode) - - reval = np.round(reval, decimals=_ndigits[typ]) - - if which in ['LM', 'SM']: - ind = np.argsort(abs(reval)) - elif which in ['LR', 'SR', 'LA', 'SA', 'BE']: - ind = np.argsort(np.real(reval)) - elif which in ['LI', 'SI']: - # for LI,SI ARPACK returns largest,smallest abs(imaginary) why? - if typ.islower(): - ind = np.argsort(abs(np.imag(reval))) - else: - ind = np.argsort(np.imag(reval)) - else: - raise ValueError("which='%s' is unrecognized" % which) - - if which in ['LM', 'LA', 'LR', 'LI']: - return ind[-k:] - elif which in ['SM', 'SA', 'SR', 'SI']: - return ind[:k] - elif which == 'BE': - return np.concatenate((ind[:k/2], ind[k/2-k:])) - - -def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None, - mattype=np.asarray, OPpart=None, mode='normal'): - general = ('bmat' in d) - - if symmetric: - eigs_func = eigsh - else: - eigs_func = eigs - - if general: - err = ("error for %s:general, typ=%s, which=%s, sigma=%s, " - "mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__, - typ, which, sigma, - mattype.__name__, - OPpart, mode)) - else: - err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, " - "mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__, - typ, which, sigma, - mattype.__name__, - OPpart, mode)) - - a = d['mat'].astype(typ) - ac = mattype(a) - - if general: - b = d['bmat'].astype(typ.lower()) - bc = mattype(b) - - # get exact eigenvalues - exact_eval = d['eval'].astype(typ.upper()) - ind = argsort_which(exact_eval, typ, k, which, - sigma, OPpart, mode) - exact_eval_a = exact_eval - exact_eval = exact_eval[ind] - - # compute arpack eigenvalues - kwargs = dict(which=which, v0=v0, sigma=sigma) - if eigs_func is eigsh: - kwargs['mode'] = mode - else: - kwargs['OPpart'] = OPpart - - # compute suitable tolerances - kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype, sigma) - - # solve - if general: - try: - eval, evec = eigs_func(ac, k, bc, **kwargs) - except ArpackNoConvergence: - kwargs['maxiter'] = 20*a.shape[0] - eval, evec = eigs_func(ac, k, bc, **kwargs) - else: - try: - eval, evec = eigs_func(ac, k, **kwargs) - except ArpackNoConvergence: - kwargs['maxiter'] = 20*a.shape[0] - eval, evec = eigs_func(ac, k, **kwargs) - - ind = argsort_which(eval, typ, k, which, - sigma, OPpart, mode) - eval_a = eval - eval = eval[ind] - evec = evec[:,ind] - - # check eigenvalues - assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol, err_msg=err) - - # check eigenvectors - LHS = np.dot(a, evec) - if general: - RHS = eval * np.dot(b, evec) - else: - RHS = eval * evec - - assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err) - -class DictWithRepr(dict): - def __init__(self, name): - self.name = name - def __repr__(self): - return "<%s>" % self.name - -class SymmetricParams: - def __init__(self): - self.eigs = eigsh - self.which = ['LM', 'SM', 'LA', 'SA', 'BE'] - self.mattypes = [csr_matrix, aslinearoperator, np.asarray] - self.sigmas_modes = {None : ['normal'], - 0.5 : ['normal', 'buckling', 'cayley']} - - #generate matrices - # these should all be float32 so that the eigenvalues - # are the same in float32 and float64 - N = 6 - np.random.seed(2300) - Ar = generate_matrix(N, hermitian=True, - pos_definite=True).astype('f').astype('d') - M = generate_matrix(N, hermitian=True, - pos_definite=True).astype('f').astype('d') - Ac = generate_matrix(N, hermitian=True, pos_definite=True, - complex=True).astype('F').astype('D') - v0 = np.random.random(N) - - # standard symmetric problem - SS = DictWithRepr("std-symmetric") - SS['mat'] = Ar - SS['v0'] = v0 - SS['eval'] = eigh(SS['mat'], eigvals_only=True) - - # general symmetric problem - GS = DictWithRepr("gen-symmetric") - GS['mat'] = Ar - GS['bmat'] = M - GS['v0'] = v0 - GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True) - - # standard hermitian problem - SH = DictWithRepr("std-hermitian") - SH['mat'] = Ac - SH['v0'] = v0 - SH['eval'] = eigh(SH['mat'], eigvals_only=True) - - # general hermitian problem - GH = DictWithRepr("gen-hermitian") - GH['mat'] = Ac - GH['bmat'] = M - GH['v0'] = v0 - GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True) - - self.real_test_cases = [SS, GS] - self.complex_test_cases = [SH, GH] - -class NonSymmetricParams: - def __init__(self): - self.eigs = eigs - self.which = ['LM', 'LR', 'LI']#, 'SM', 'LR', 'SR', 'LI', 'SI'] - self.mattypes = [csr_matrix, aslinearoperator, np.asarray] - self.sigmas_OPparts = {None : [None], - 0.1 : ['r'], - 0.1 + 0.1j : ['r', 'i']} - - #generate matrices - # these should all be float32 so that the eigenvalues - # are the same in float32 and float64 - N = 6 - np.random.seed(2300) - Ar = generate_matrix(N).astype('f').astype('d') - M = generate_matrix(N, hermitian=True, - pos_definite=True).astype('f').astype('d') - Ac = generate_matrix(N, complex=True).astype('F').astype('D') - v0 = np.random.random(N) - - # standard real nonsymmetric problem - SNR = DictWithRepr("std-real-nonsym") - SNR['mat'] = Ar - SNR['v0'] = v0 - SNR['eval'] = eig(SNR['mat'], left=False, right=False) - - # general real nonsymmetric problem - GNR = DictWithRepr("gen-real-nonsym") - GNR['mat'] = Ar - GNR['bmat'] = M - GNR['v0'] = v0 - GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False) - - # standard complex nonsymmetric problem - SNC = DictWithRepr("std-cmplx-nonsym") - SNC['mat'] = Ac - SNC['v0'] = v0 - SNC['eval'] = eig(SNC['mat'], left=False, right=False) - - # general complex nonsymmetric problem - GNC = DictWithRepr("gen-cmplx-nonsym") - GNC['mat'] = Ac - GNC['bmat'] = M - GNC['v0'] = v0 - GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False) - - self.real_test_cases = [SNR, GNR] - self.complex_test_cases = [SNC, GNC] - - -def test_symmetric_modes(): - params = SymmetricParams() - k = 2 - symmetric = True - for D in params.real_test_cases: - for typ in 'fd': - for which in params.which: - for mattype in params.mattypes: - for (sigma, modes) in params.sigmas_modes.iteritems(): - for mode in modes: - yield (eval_evec, symmetric, D, typ, k, which, - None, sigma, mattype, None, mode) - - -def test_hermitian_modes(): - params = SymmetricParams() - k = 2 - symmetric = True - for D in params.complex_test_cases: - for typ in 'FD': - for which in params.which: - if which == 'BE': continue # BE invalid for complex - for mattype in params.mattypes: - for sigma in params.sigmas_modes: - yield (eval_evec, symmetric, D, typ, k, which, - None, sigma, mattype) - - -def test_symmetric_starting_vector(): - params = SymmetricParams() - symmetric = True - for k in [1, 2, 3, 4, 5]: - for D in params.real_test_cases: - for typ in 'fd': - v0 = random.rand(len(D['v0'])).astype(typ) - yield (eval_evec, symmetric, D, typ, k, 'LM', v0) - - -def test_symmetric_no_convergence(): - np.random.seed(1234) - m = generate_matrix(30, hermitian=True, pos_definite=True) - tol, rtol, atol = _get_test_tolerance('d') - try: - w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol) - raise AssertionError("Spurious no-error exit") - except ArpackNoConvergence, err: - k = len(err.eigenvalues) - if k <= 0: - raise AssertionError("Spurious no-eigenvalues-found case") - w, v = err.eigenvalues, err.eigenvectors - assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol) - - -def test_real_nonsymmetric_modes(): - params = NonSymmetricParams() - k = 2 - symmetric = False - for D in params.real_test_cases: - for typ in 'fd': - for which in params.which: - for mattype in params.mattypes: - for sigma, OPparts in params.sigmas_OPparts.iteritems(): - for OPpart in OPparts: - yield (eval_evec, symmetric, D, typ, k, which, - None, sigma, mattype, OPpart) - - -def test_complex_nonsymmetric_modes(): - params = NonSymmetricParams() - k = 2 - symmetric = False - for D in params.complex_test_cases: - for typ in 'DF': - for which in params.which: - for mattype in params.mattypes: - for sigma in params.sigmas_OPparts: - yield (eval_evec, symmetric, D, typ, k, which, - None, sigma, mattype) - - -def test_standard_nonsymmetric_starting_vector(): - params = NonSymmetricParams() - sigma = None - symmetric = False - for k in [1, 2, 3, 4]: - for d in params.complex_test_cases: - for typ in 'FD': - A = d['mat'] - n = A.shape[0] - v0 = random.rand(n).astype(typ) - yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma) - - -def test_general_nonsymmetric_starting_vector(): - params = NonSymmetricParams() - sigma = None - symmetric = False - for k in [1, 2, 3, 4]: - for d in params.complex_test_cases: - for typ in 'FD': - A = d['mat'] - n = A.shape[0] - v0 = random.rand(n).astype(typ) - yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma) - - -def test_standard_nonsymmetric_no_convergence(): - np.random.seed(1234) - m = generate_matrix(30, complex=True) - tol, rtol, atol = _get_test_tolerance('d') - try: - w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol) - raise AssertionError("Spurious no-error exit") - except ArpackNoConvergence, err: - k = len(err.eigenvalues) - if k <= 0: - raise AssertionError("Spurious no-eigenvalues-found case") - w, v = err.eigenvalues, err.eigenvectors - for ww, vv in zip(w, v.T): - assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol) - - -def test_eigen_bad_shapes(): - # A is not square. - A = csc_matrix(np.zeros((2, 3))) - assert_raises(ValueError, eigs, A) - - -def test_eigen_bad_kwargs(): - # Test eigen on wrong keyword argument - A = csc_matrix(np.zeros((2, 2))) - assert_raises(ValueError, eigs, A, which='XX') - -def test_ticket_1459_arpack_crash(): - for dtype in [np.float32, np.float64]: - # XXX: this test does not seem to catch the issue for float32, - # but we made the same fix there, just to be sure - - N = 6 - k = 2 - - np.random.seed(2301) - A = np.random.random((N, N)).astype(dtype) - v0 = np.array([-0.71063568258907849895, -0.83185111795729227424, - -0.34365925382227402451, 0.46122533684552280420, - -0.58001341115969040629, -0.78844877570084292984e-01], - dtype=dtype) - - # Should not crash: - evals, evecs = eigs(A, k, v0=v0) - - -#---------------------------------------------------------------------- -# sparse SVD tests - -def sorted_svd(m, k): - #Compute svd of a dense matrix m, and return singular vectors/values - #sorted. - if isspmatrix(m): - m = m.todense() - u, s, vh = svd(m) - ii = np.argsort(s)[-k:] - - return u[:, ii], s[ii], vh[ii] - - -def svd_estimate(u, s, vh): - return np.dot(u, np.dot(np.diag(s), vh)) - - -def test_svd_simple_real(): - x = np.array([[1, 2, 3], - [3, 4, 3], - [1, 0, 2], - [0, 0, 1]], np.float) - y = np.array([[1, 2, 3, 8], - [3, 4, 3, 5], - [1, 0, 2, 3], - [0, 0, 1, 0]], np.float) - z = csc_matrix(x) - - for m in [x.T, x, y, z, z.T]: - for k in range(1, min(m.shape)): - u, s, vh = sorted_svd(m, k) - su, ss, svh = svds(m, k) - - m_hat = svd_estimate(u, s, vh) - sm_hat = svd_estimate(su, ss, svh) - - assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000) - - -def test_svd_simple_complex(): - x = np.array([[1, 2, 3], - [3, 4, 3], - [1 + 1j, 0, 2], - [0, 0, 1]], np.complex) - y = np.array([[1, 2, 3, 8 + 5j], - [3 - 2j, 4, 3, 5], - [1, 0, 2, 3], - [0, 0, 1, 0]], np.complex) - z = csc_matrix(x) - - for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]: - for k in range(1, min(m.shape) - 1): - u, s, vh = sorted_svd(m, k) - su, ss, svh = svds(m, k) - - m_hat = svd_estimate(u, s, vh) - sm_hat = svd_estimate(su, ss, svh) - - assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/bento.info b/scipy-0.10.1/scipy/sparse/linalg/eigen/bento.info deleted file mode 100644 index 89b05d1461..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/bento.info +++ /dev/null @@ -1,4 +0,0 @@ -Recurse: arpack - -Library: - Packages: arpack, lobpcg diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/info.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/info.py deleted file mode 100644 index 3e5a1559a6..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/info.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Sparse Eigenvalue Solvers -------------------------- - -The submodules of sparse.linalg.eigen: - 1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method - - -Examples --------- - - - -""" - -__docformat__ = "restructuredtext en" - -#TODO show examples - -postpone_import = 1 diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/__init__.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/__init__.py deleted file mode 100644 index 4310a47ebd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""LOBPCG eigensolver""" - -from info import __doc__ -import lobpcg -if __doc__ and lobpcg.__doc__: - __doc__ = '\n\n'.join( (lobpcg.__doc__, __doc__) ) -del lobpcg - -from lobpcg import * - -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/info.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/info.py deleted file mode 100644 index 30afbfb7ec..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/info.py +++ /dev/null @@ -1,108 +0,0 @@ -""" -Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG) - -LOBPCG is a preconditioned eigensolver for large symmetric positive definite -(SPD) generalized eigenproblems. - -Call the function lobpcg - see help for lobpcg.lobpcg. See also lobpcg.as2d, -which can be used in the preconditioner (example below) - -Acknowledgements ----------------- - -lobpcg.py code was written by Robert Cimrman. Many thanks belong to Andrew -Knyazev, the author of the algorithm, for lots of advice and support. - - -Examples --------- - ->>> # Solve A x = lambda B x with constraints and preconditioning. ->>> n = 100 ->>> vals = [nm.arange( n, dtype = nm.float64 ) + 1] ->>> # Matrix A. ->>> operatorA = spdiags( vals, 0, n, n ) ->>> # Matrix B ->>> operatorB = nm.eye( n, n ) ->>> # Constraints. ->>> Y = nm.eye( n, 3 ) ->>> # Initial guess for eigenvectors, should have linearly independent ->>> # columns. Column dimension = number of requested eigenvalues. ->>> X = sc.rand( n, 3 ) ->>> # Preconditioner - inverse of A. ->>> ivals = [1./vals[0]] ->>> def precond( x ): - invA = spdiags( ivals, 0, n, n ) - y = invA * x - if sp.issparse( y ): - y = y.toarray() - - return as2d( y ) - - ->>> ->>> # Alternative way of providing the same preconditioner. ->>> #precond = spdiags( ivals, 0, n, n ) ->>> ->>> tt = time.clock() ->>> eigs, vecs = lobpcg( X, operatorA, operatorB, blockVectorY = Y, ->>> operatorT = precond, ->>> residualTolerance = 1e-4, maxIterations = 40, ->>> largest = False, verbosityLevel = 1 ) ->>> print 'solution time:', time.clock() - tt ->>> print eigs - - - -Notes ------ - -In the following ``n`` denotes the matrix size and ``m`` the number -of required eigenvalues (smallest or largest). - -The LOBPCG code internally solves eigenproblems of the size 3``m`` on every -iteration by calling the "standard" dense eigensolver, so if ``m`` is not -small enough compared to ``n``, it does not make sense to call the LOBPCG -code, but rather one should use the "standard" eigensolver, e.g. scipy or symeig -function in this case. If one calls the LOBPCG algorithm for 5``m``>``n``, -it will most likely break internally, so the code tries to call the standard -function instead. - -It is not that n should be large for the LOBPCG to work, but rather the -ratio ``n``/``m`` should be large. It you call the LOBPCG code with ``m``=1 -and ``n``=10, it should work, though ``n`` is small. The method is intended -for extremely large ``n``/``m``, see e.g., reference [28] in -http://arxiv.org/abs/0705.2626 - -The convergence speed depends basically on two factors: - -1. How well relatively separated the seeking eigenvalues are from the rest of - the eigenvalues. One can try to vary ``m`` to make this better. - -2. How well conditioned the problem is. This can be changed by using proper - preconditioning. For example, a rod vibration test problem (under tests - directory) is ill-conditioned for large ``n``, so convergence will be - slow, unless efficient preconditioning is used. For this specific problem, - a good simple preconditioner function would be a linear solve for A, which - is easy to code since A is tridiagonal. - - -References ----------- -A. V. Knyazev, Toward the Optimal Preconditioned Eigensolver: Locally Optimal -Block Preconditioned Conjugate Gradient Method. SIAM Journal on Scientific -Computing 23 (2001), no. 2, -pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124 - -A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov, Block Locally -Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) in hypre and PETSc -(2007). http://arxiv.org/abs/0705.2626 - -A. V. Knyazev's C and MATLAB implementations: -http://www-math.cudenver.edu/~aknyazev/software/BLOPEX/ - -""" - -__docformat__ = "restructuredtext en" - -postpone_import = 1 diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py deleted file mode 100644 index 32fd79df06..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py +++ /dev/null @@ -1,575 +0,0 @@ -""" -Pure SciPy implementation of Locally Optimal Block Preconditioned Conjugate -Gradient Method (LOBPCG), see -http://www-math.cudenver.edu/~aknyazev/software/BLOPEX/ - -License: BSD - -Authors: Robert Cimrman, Andrew Knyazev - -Examples in tests directory contributed by Nils Wagner. -""" - -import sys -import numpy as np -import scipy as sp - -from scipy.sparse.linalg import aslinearoperator, LinearOperator - -__all__ = ['lobpcg'] - -## try: -## from symeig import symeig -## except: -## raise ImportError('lobpcg requires symeig') - -def symeig( mtxA, mtxB = None, eigenvectors = True, select = None ): - import scipy.linalg as sla - import scipy.lib.lapack as ll - if select is None: - if np.iscomplexobj( mtxA ): - if mtxB is None: - fun = ll.get_lapack_funcs( ['heev'], arrays = (mtxA,) )[0] - else: - fun = ll.get_lapack_funcs( ['hegv'], arrays = (mtxA,) )[0] - else: - if mtxB is None: - fun = ll.get_lapack_funcs( ['syev'], arrays = (mtxA,) )[0] - else: - fun = ll.get_lapack_funcs( ['sygv'], arrays = (mtxA,) )[0] -## print fun - if mtxB is None: - out = fun( mtxA ) - else: - out = fun( mtxA, mtxB ) -## print w -## print v -## print info -## from symeig import symeig -## print symeig( mtxA, mtxB ) - else: - out = sla.eig( mtxA, mtxB, right = eigenvectors ) - w = out[0] - ii = np.argsort( w ) - w = w[slice( *select )] - if eigenvectors: - v = out[1][:,ii] - v = v[:,slice( *select )] - out = w, v, 0 - else: - out = w, 0 - - return out[:-1] - -def pause(): - raw_input() - -def save( ar, fileName ): - from numpy import savetxt - savetxt( fileName, ar, precision = 8 ) - -## -# 21.05.2007, c -def as2d( ar ): - """ - If the input array is 2D return it, if it is 1D, append a dimension, - making it a column vector. - """ - if ar.ndim == 2: - return ar - else: # Assume 1! - aux = np.array( ar, copy = False ) - aux.shape = (ar.shape[0], 1) - return aux - -class CallableLinearOperator(LinearOperator): - def __call__(self, x): - return self.matmat(x) - -def makeOperator( operatorInput, expectedShape ): - """Internal. Takes a dense numpy array or a sparse matrix or - a function and makes an operator performing matrix * blockvector - products. - - Examples - -------- - >>> A = makeOperator( arrayA, (n, n) ) - >>> vectorB = A( vectorX ) - - """ - if operatorInput is None: - def ident(x): - return x - operator = LinearOperator(expectedShape, ident, matmat=ident) - else: - operator = aslinearoperator(operatorInput) - - if operator.shape != expectedShape: - raise ValueError('operator has invalid shape') - - if sys.version_info[0] >= 3: - # special methods are looked up on the class -- so make a new one - operator.__class__ = CallableLinearOperator - else: - operator.__call__ = operator.matmat - - return operator - - - -def applyConstraints( blockVectorV, factYBY, blockVectorBY, blockVectorY ): - """Internal. Changes blockVectorV in place.""" - gramYBV = sp.dot( blockVectorBY.T, blockVectorV ) - import scipy.linalg as sla - tmp = sla.cho_solve( factYBY, gramYBV ) - blockVectorV -= sp.dot( blockVectorY, tmp ) - - -def b_orthonormalize( B, blockVectorV, - blockVectorBV = None, retInvR = False ): - """Internal.""" - import scipy.linalg as sla - if blockVectorBV is None: - if B is not None: - blockVectorBV = B( blockVectorV ) - else: - blockVectorBV = blockVectorV # Shared data!!! - gramVBV = sp.dot( blockVectorV.T, blockVectorBV ) - gramVBV = sla.cholesky( gramVBV ) - gramVBV = sla.inv( gramVBV, overwrite_a = True ) - # gramVBV is now R^{-1}. - blockVectorV = sp.dot( blockVectorV, gramVBV ) - if B is not None: - blockVectorBV = sp.dot( blockVectorBV, gramVBV ) - - if retInvR: - return blockVectorV, blockVectorBV, gramVBV - else: - return blockVectorV, blockVectorBV - -def lobpcg( A, X, - B=None, M=None, Y=None, - tol= None, maxiter=20, - largest = True, verbosityLevel = 0, - retLambdaHistory = False, retResidualNormsHistory = False ): - """Solve symmetric partial eigenproblems with optional preconditioning - - This function implements the Locally Optimal Block Preconditioned - Conjugate Gradient Method (LOBPCG). - - Parameters - ---------- - A : {sparse matrix, dense matrix, LinearOperator} - The symmetric linear operator of the problem, usually a - sparse matrix. Often called the "stiffness matrix". - X : array_like - Initial approximation to the k eigenvectors. If A has - shape=(n,n) then X should have shape shape=(n,k). - B : {dense matrix, sparse matrix, LinearOperator}, optional - the right hand side operator in a generalized eigenproblem. - by default, B = Identity - often called the "mass matrix" - M : {dense matrix, sparse matrix, LinearOperator}, optional - preconditioner to A; by default M = Identity - M should approximate the inverse of A - Y : array_like, optional - n-by-sizeY matrix of constraints, sizeY < n - The iterations will be performed in the B-orthogonal complement - of the column-space of Y. Y must be full rank. - - Returns - ------- - w : array - Array of k eigenvalues - v : array - An array of k eigenvectors. V has the same shape as X. - - Other Parameters - ---------------- - tol : scalar, optional - Solver tolerance (stopping criterion) - by default: tol=n*sqrt(eps) - maxiter: integer, optional - maximum number of iterations - by default: maxiter=min(n,20) - largest : boolean, optional - when True, solve for the largest eigenvalues, otherwise the smallest - verbosityLevel : integer, optional - controls solver output. default: verbosityLevel = 0. - retLambdaHistory : boolean, optional - whether to return eigenvalue history - retResidualNormsHistory : boolean, optional - whether to return history of residual norms - - - Notes - ----- - If both retLambdaHistory and retResidualNormsHistory are True, the - return tuple has the following format - (lambda, V, lambda history, residual norms history) - - """ - failureFlag = True - import scipy.linalg as sla - - blockVectorX = X - blockVectorY = Y - residualTolerance = tol - maxIterations = maxiter - - if blockVectorY is not None: - sizeY = blockVectorY.shape[1] - else: - sizeY = 0 - - # Block size. - if len(blockVectorX.shape) != 2: - raise ValueError('expected rank-2 array for argument X') - - n, sizeX = blockVectorX.shape - if sizeX > n: - raise ValueError('X column dimension exceeds the row dimension') - - A = makeOperator(A, (n,n)) - B = makeOperator(B, (n,n)) - M = makeOperator(M, (n,n)) - - if (n - sizeY) < (5 * sizeX): - #warn('The problem size is small compared to the block size.' \ - # ' Using dense eigensolver instead of LOBPCG.') - - if blockVectorY is not None: - raise NotImplementedError('symeig does not support constraints') - - if largest: - lohi = (n - sizeX, n) - else: - lohi = (1, sizeX) - - A_dense = A(np.eye(n)) - - if B is not None: - B_dense = B(np.eye(n)) - _lambda, eigBlockVector = symeig(A_dense, B_dense, select=lohi ) - else: - _lambda, eigBlockVector = symeig(A_dense, select=lohi ) - - return _lambda, eigBlockVector - - - if residualTolerance is None: - residualTolerance = np.sqrt( 1e-15 ) * n - - maxIterations = min( n, maxIterations ) - - if verbosityLevel: - aux = "Solving " - if B is None: - aux += "standard" - else: - aux += "generalized" - aux += " eigenvalue problem with" - if M is None: - aux += "out" - aux += " preconditioning\n\n" - aux += "matrix size %d\n" % n - aux += "block size %d\n\n" % sizeX - if blockVectorY is None: - aux += "No constraints\n\n" - else: - if sizeY > 1: - aux += "%d constraints\n\n" % sizeY - else: - aux += "%d constraint\n\n" % sizeY - print aux - - ## - # Apply constraints to X. - if blockVectorY is not None: - - if B is not None: - blockVectorBY = B( blockVectorY ) - else: - blockVectorBY = blockVectorY - - # gramYBY is a dense array. - gramYBY = sp.dot( blockVectorY.T, blockVectorBY ) - try: - # gramYBY is a Cholesky factor from now on... - gramYBY = sla.cho_factor( gramYBY ) - except: - raise ValueError('cannot handle linearly dependent constraints') - - applyConstraints( blockVectorX, gramYBY, blockVectorBY, blockVectorY ) - - ## - # B-orthonormalize X. - blockVectorX, blockVectorBX = b_orthonormalize( B, blockVectorX ) - - ## - # Compute the initial Ritz vectors: solve the eigenproblem. - blockVectorAX = A( blockVectorX ) - gramXAX = sp.dot( blockVectorX.T, blockVectorAX ) - # gramXBX is X^T * X. - gramXBX = sp.dot( blockVectorX.T, blockVectorX ) - - _lambda, eigBlockVector = symeig( gramXAX ) - ii = np.argsort( _lambda )[:sizeX] - if largest: - ii = ii[::-1] - _lambda = _lambda[ii] - - eigBlockVector = np.asarray( eigBlockVector[:,ii] ) - blockVectorX = sp.dot( blockVectorX, eigBlockVector ) - blockVectorAX = sp.dot( blockVectorAX, eigBlockVector ) - if B is not None: - blockVectorBX = sp.dot( blockVectorBX, eigBlockVector ) - - ## - # Active index set. - activeMask = np.ones( (sizeX,), dtype = np.bool ) - - lambdaHistory = [_lambda] - residualNormsHistory = [] - - previousBlockSize = sizeX - ident = np.eye( sizeX, dtype = A.dtype ) - ident0 = np.eye( sizeX, dtype = A.dtype ) - - ## - # Main iteration loop. - for iterationNumber in xrange( maxIterations ): - if verbosityLevel > 0: - print 'iteration %d' % iterationNumber - - aux = blockVectorBX * _lambda[np.newaxis,:] - blockVectorR = blockVectorAX - aux - - aux = np.sum( blockVectorR.conjugate() * blockVectorR, 0 ) - residualNorms = np.sqrt( aux ) - - residualNormsHistory.append( residualNorms ) - - ii = np.where( residualNorms > residualTolerance, True, False ) - activeMask = activeMask & ii - if verbosityLevel > 2: - print activeMask - - currentBlockSize = activeMask.sum() - if currentBlockSize != previousBlockSize: - previousBlockSize = currentBlockSize - ident = np.eye( currentBlockSize, dtype = A.dtype ) - - if currentBlockSize == 0: - failureFlag = False # All eigenpairs converged. - break - - if verbosityLevel > 0: - print 'current block size:', currentBlockSize - print 'eigenvalue:', _lambda - print 'residual norms:', residualNorms - if verbosityLevel > 10: - print eigBlockVector - - activeBlockVectorR = as2d( blockVectorR[:,activeMask] ) - - if iterationNumber > 0: - activeBlockVectorP = as2d( blockVectorP [:,activeMask] ) - activeBlockVectorAP = as2d( blockVectorAP[:,activeMask] ) - activeBlockVectorBP = as2d( blockVectorBP[:,activeMask] ) - - if M is not None: - # Apply preconditioner T to the active residuals. - activeBlockVectorR = M( activeBlockVectorR ) - - ## - # Apply constraints to the preconditioned residuals. - if blockVectorY is not None: - applyConstraints( activeBlockVectorR, - gramYBY, blockVectorBY, blockVectorY ) - - ## - # B-orthonormalize the preconditioned residuals. - - aux = b_orthonormalize( B, activeBlockVectorR ) - activeBlockVectorR, activeBlockVectorBR = aux - - activeBlockVectorAR = A( activeBlockVectorR ) - - if iterationNumber > 0: - aux = b_orthonormalize( B, activeBlockVectorP, - activeBlockVectorBP, retInvR = True ) - activeBlockVectorP, activeBlockVectorBP, invR = aux - activeBlockVectorAP = sp.dot( activeBlockVectorAP, invR ) - - ## - # Perform the Rayleigh Ritz Procedure: - # Compute symmetric Gram matrices: - - xaw = sp.dot( blockVectorX.T, activeBlockVectorAR ) - waw = sp.dot( activeBlockVectorR.T, activeBlockVectorAR ) - xbw = sp.dot( blockVectorX.T, activeBlockVectorBR ) - - if iterationNumber > 0: - xap = sp.dot( blockVectorX.T, activeBlockVectorAP ) - wap = sp.dot( activeBlockVectorR.T, activeBlockVectorAP ) - pap = sp.dot( activeBlockVectorP.T, activeBlockVectorAP ) - xbp = sp.dot( blockVectorX.T, activeBlockVectorBP ) - wbp = sp.dot( activeBlockVectorR.T, activeBlockVectorBP ) - - gramA = np.bmat( [[np.diag( _lambda ), xaw, xap], - [ xaw.T, waw, wap], - [ xap.T, wap.T, pap]] ) - - gramB = np.bmat( [[ident0, xbw, xbp], - [ xbw.T, ident, wbp], - [ xbp.T, wbp.T, ident]] ) - else: - gramA = np.bmat( [[np.diag( _lambda ), xaw], - [ xaw.T, waw]] ) - gramB = np.bmat( [[ident0, xbw], - [ xbw.T, ident]] ) - - try: - assert np.allclose( gramA.T, gramA ) - except: - print gramA.T - gramA - raise - - try: - assert np.allclose( gramB.T, gramB ) - except: - print gramB.T - gramB - raise - - if verbosityLevel > 10: - save( gramA, 'gramA' ) - save( gramB, 'gramB' ) - - ## - # Solve the generalized eigenvalue problem. -# _lambda, eigBlockVector = la.eig( gramA, gramB ) - _lambda, eigBlockVector = symeig( gramA, gramB ) - ii = np.argsort( _lambda )[:sizeX] - if largest: - ii = ii[::-1] - if verbosityLevel > 10: - print ii - - _lambda = _lambda[ii].astype( np.float64 ) - eigBlockVector = np.asarray( eigBlockVector[:,ii].astype( np.float64 ) ) - - lambdaHistory.append( _lambda ) - - if verbosityLevel > 10: - print 'lambda:', _lambda -## # Normalize eigenvectors! -## aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 ) -## eigVecNorms = np.sqrt( aux ) -## eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:] -# eigBlockVector, aux = b_orthonormalize( B, eigBlockVector ) - - if verbosityLevel > 10: - print eigBlockVector - pause() - - ## - # Compute Ritz vectors. - if iterationNumber > 0: - eigBlockVectorX = eigBlockVector[:sizeX] - eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize] - eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:] - - pp = sp.dot( activeBlockVectorR, eigBlockVectorR ) - pp += sp.dot( activeBlockVectorP, eigBlockVectorP ) - - app = sp.dot( activeBlockVectorAR, eigBlockVectorR ) - app += sp.dot( activeBlockVectorAP, eigBlockVectorP ) - - bpp = sp.dot( activeBlockVectorBR, eigBlockVectorR ) - bpp += sp.dot( activeBlockVectorBP, eigBlockVectorP ) - else: - eigBlockVectorX = eigBlockVector[:sizeX] - eigBlockVectorR = eigBlockVector[sizeX:] - - pp = sp.dot( activeBlockVectorR, eigBlockVectorR ) - app = sp.dot( activeBlockVectorAR, eigBlockVectorR ) - bpp = sp.dot( activeBlockVectorBR, eigBlockVectorR ) - - if verbosityLevel > 10: - print pp - print app - print bpp - pause() - - blockVectorX = sp.dot( blockVectorX, eigBlockVectorX ) + pp - blockVectorAX = sp.dot( blockVectorAX, eigBlockVectorX ) + app - blockVectorBX = sp.dot( blockVectorBX, eigBlockVectorX ) + bpp - - blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp - - aux = blockVectorBX * _lambda[np.newaxis,:] - blockVectorR = blockVectorAX - aux - - aux = np.sum( blockVectorR.conjugate() * blockVectorR, 0 ) - residualNorms = np.sqrt( aux ) - - - if verbosityLevel > 0: - print 'final eigenvalue:', _lambda - print 'final residual norms:', residualNorms - - if retLambdaHistory: - if retResidualNormsHistory: - return _lambda, blockVectorX, lambdaHistory, residualNormsHistory - else: - return _lambda, blockVectorX, lambdaHistory - else: - if retResidualNormsHistory: - return _lambda, blockVectorX, residualNormsHistory - else: - return _lambda, blockVectorX - -########################################################################### -if __name__ == '__main__': - from scipy.sparse import spdiags, speye, issparse - import time - -## def B( vec ): -## return vec - - n = 100 - vals = [np.arange( n, dtype = np.float64 ) + 1] - A = spdiags( vals, 0, n, n ) - B = speye( n, n ) -# B[0,0] = 0 - B = np.eye( n, n ) - Y = np.eye( n, 3 ) - - -# X = sp.rand( n, 3 ) - xfile = {100 : 'X.txt', 1000 : 'X2.txt', 10000 : 'X3.txt'} - X = np.fromfile( xfile[n], dtype = np.float64, sep = ' ' ) - X.shape = (n, 3) - - ivals = [1./vals[0]] - def precond( x ): - invA = spdiags( ivals, 0, n, n ) - y = invA * x - if issparse( y ): - y = y.toarray() - - return as2d( y ) - - precond = spdiags( ivals, 0, n, n ) -# precond = None - tt = time.clock() -# B = None - eigs, vecs = lobpcg( X, A, B, blockVectorY = Y, - M = precond, - residualTolerance = 1e-4, maxIterations = 40, - largest = False, verbosityLevel = 1 ) - print 'solution time:', time.clock() - tt - - print vecs - print eigs diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/setup.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/setup.py deleted file mode 100755 index 97778f5394..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - - config = Configuration('lobpcg',parent_package,top_path) - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/setupscons.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/setupscons.py deleted file mode 100755 index 97778f5394..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/setupscons.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - - config = Configuration('lobpcg',parent_package,top_path) - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py deleted file mode 100644 index a47fa4373a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py +++ /dev/null @@ -1,64 +0,0 @@ -from scipy import * -from scipy.sparse.linalg import lobpcg -from symeig import symeig -from pylab import plot, show, legend, xlabel, ylabel -set_printoptions(precision=3,linewidth=90) -import time - -def test(n): - x = arange(1,n+1) - B = diag(1./x) - y = arange(n-1,0,-1) - z = arange(2*n-1,0,-2) - A = diag(z)-diag(y,-1)-diag(y,1) - return A,B - -def as2d( ar ): - if ar.ndim == 2: - return ar - else: # Assume 1! - aux = nm.array( ar, copy = False ) - aux.shape = (ar.shape[0], 1) - return aux - -def precond(x): - y= linalg.cho_solve((LorU, lower),x) - return as2d(y) - -m = 10 # Blocksize -N = array(([128,256,512,1024,2048])) # Increasing matrix size - -data1=[] -data2=[] - -for n in N: - print '******', n - A,B = test(n) # Mikota pair - X = rand(n,m) - X = linalg.orth(X) - - tt = time.clock() - (LorU, lower) = linalg.cho_factor(A, lower=0, overwrite_a=0) - eigs,vecs = lobpcg.lobpcg(X,A,B,operatorT = precond, - residualTolerance = 1e-4, maxIterations = 40) - data1.append(time.clock()-tt) - eigs = sort(eigs) - print - print 'Results by LOBPCG' - print - print n,eigs - - tt = time.clock() - w,v=symeig(A,B,range=(1,m)) - data2.append(time.clock()-tt) - print - print 'Results by symeig' - print - print n, w - -xlabel(r'Size $n$') -ylabel(r'Elapsed time $t$') -plot(N,data1,label='LOBPCG') -plot(N,data2,label='SYMEIG') -legend() -show() diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/large_scale.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/large_scale.py deleted file mode 100644 index 6392b16b87..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/large_scale.py +++ /dev/null @@ -1,51 +0,0 @@ -from scipy import array, arange, ones, sort, cos, pi, rand, \ - set_printoptions, r_ -from scipy.sparse.linalg import lobpcg -from scipy import sparse -from pylab import loglog, show, xlabel, ylabel, title -set_printoptions(precision=8,linewidth=90) -import time - -def sakurai(n): - """ Example taken from - T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima - A moment-based method for large-scale generalized eigenvalue problems - Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """ - - A = sparse.eye( n, n ) - d0 = array(r_[5,6*ones(n-2),5]) - d1 = -4*ones(n) - d2 = ones(n) - B = sparse.spdiags([d2,d1,d0,d1,d2],[-2,-1,0,1,2],n,n) - - k = arange(1,n+1) - w_ex = sort(1./(16.*pow(cos(0.5*k*pi/(n+1)),4))) # exact eigenvalues - - return A,B, w_ex - -m = 3 # Blocksize - -# -# Large scale -# -n = 2500 -A,B, w_ex = sakurai(n) # Mikota pair -X = rand(n,m) -data=[] -tt = time.clock() -eigs,vecs, resnh = lobpcg(X,A,B, residualTolerance = 1e-6, maxIterations =500, retResidualNormsHistory=1) -data.append(time.clock()-tt) -print 'Results by LOBPCG for n='+str(n) -print -print eigs -print -print 'Exact eigenvalues' -print -print w_ex[:m] -print -print 'Elapsed time',data[0] -loglog(arange(1,n+1),w_ex,'b.') -xlabel(r'Number $i$') -ylabel(r'$\lambda_i$') -title('Eigenvalue distribution') -show() diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py deleted file mode 100644 index 40c68f04a3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -""" Test functions for the sparse.linalg.eigen.lobpcg module -""" - -import numpy -from numpy.testing import assert_almost_equal, run_module_suite - -from scipy import arange, ones, rand, set_printoptions, r_, diag, linalg, eye -from scipy.linalg import eig -from scipy.sparse.linalg.eigen.lobpcg import lobpcg - - -set_printoptions(precision=3,linewidth=90) - - - -def ElasticRod(n): - # Fixed-free elastic rod - L = 1.0 - le=L/n - rho = 7.85e3 - S = 1.e-4 - E = 2.1e11 - mass = rho*S*le/6. - k = E*S/le - A = k*(diag(r_[2.*ones(n-1),1])-diag(ones(n-1),1)-diag(ones(n-1),-1)) - B = mass*(diag(r_[4.*ones(n-1),2])+diag(ones(n-1),1)+diag(ones(n-1),-1)) - return A,B - -def MikotaPair(n): - # Mikota pair acts as a nice test since the eigenvalues - # are the squares of the integers n, n=1,2,... - x = arange(1,n+1) - B = diag(1./x) - y = arange(n-1,0,-1) - z = arange(2*n-1,0,-2) - A = diag(z)-diag(y,-1)-diag(y,1) - return A,B - - -def compare_solutions(A,B,m): - n = A.shape[0] - - numpy.random.seed(0) - - V = rand(n,m) - X = linalg.orth(V) - - eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30) - eigs.sort() - - #w,v = symeig(A,B) - w,v = eig(A,b=B) - w.sort() - - assert_almost_equal(w[:m/2],eigs[:m/2],decimal=2) - - #from pylab import plot, show, legend, xlabel, ylabel - #plot(arange(0,len(w[:m])),w[:m],'bx',label='Results by symeig') - #plot(arange(0,len(eigs)),eigs,'r+',label='Results by lobpcg') - #legend() - #xlabel(r'Eigenvalue $i$') - #ylabel(r'$\lambda_i$') - #show() - -def test_Small(): - A,B = ElasticRod(10) - compare_solutions(A,B,10) - A,B = MikotaPair(10) - compare_solutions(A,B,10) - -def test_ElasticRod(): - A,B = ElasticRod(100) - compare_solutions(A,B,20) - -def test_MikotaPair(): - A,B = MikotaPair(100) - compare_solutions(A,B,20) - -def test_trivial(): - n = 5 - X = ones((n, 1)) - A = eye(n) - compare_solutions(A, None, n) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/setup.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/setup.py deleted file mode 100755 index f5938d14ec..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('eigen',parent_package,top_path) - - config.add_subpackage(('arpack')) - config.add_subpackage(('lobpcg')) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/eigen/setupscons.py b/scipy-0.10.1/scipy/sparse/linalg/eigen/setupscons.py deleted file mode 100755 index bd10842a5f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/eigen/setupscons.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('eigen',parent_package,top_path, setup_name = 'setupscons.py') - - config.add_subpackage(('arpack')) - config.add_subpackage(('lobpcg')) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/info.py b/scipy-0.10.1/scipy/sparse/linalg/info.py deleted file mode 100644 index e7bd2749e3..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/info.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -================================================== -Sparse linear algebra (:mod:`scipy.sparse.linalg`) -================================================== - -.. currentmodule:: scipy.sparse.linalg - -Abstract linear operators -------------------------- - -.. autosummary:: - :toctree: generated/ - - LinearOperator -- abstract representation of a linear operator - aslinearoperator -- convert an object to an abstract linear operator - -Solving linear problems ------------------------ - -Direct methods for linear equation systems: - -.. autosummary:: - :toctree: generated/ - - spsolve -- Solve the sparse linear system Ax=b - factorized -- Pre-factorize matrix to a function solving a linear system - -Iterative methods for linear equation systems: - -.. autosummary:: - :toctree: generated/ - - bicg -- Use BIConjugate Gradient iteration to solve A x = b - bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b - cg -- Use Conjugate Gradient iteration to solve A x = b - cgs -- Use Conjugate Gradient Squared iteration to solve A x = b - gmres -- Use Generalized Minimal RESidual iteration to solve A x = b - lgmres -- Solve a matrix equation using the LGMRES algorithm - minres -- Use MINimum RESidual iteration to solve Ax = b - qmr -- Use Quasi-Minimal Residual iteration to solve A x = b - -Iterative methods for least-squares problems: - -.. autosummary:: - :toctree: generated/ - - lsqr -- Find the least-squares solution to a sparse linear equation system - -Matrix factorizations ---------------------- - -Eigenvalue problems: - -.. autosummary:: - :toctree: generated/ - - eigs -- Find k eigenvalues and eigenvectors of the square matrix A - eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix - lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning - -Singular values problems: - -.. autosummary:: - :toctree: generated/ - - svds -- Compute k singular values/vectors for a sparse matrix - -Complete or incomplete LU factorizations - -.. autosummary:: - :toctree: generated/ - - splu -- Compute a LU decomposition for a sparse matrix - spilu -- Compute an incomplete LU decomposition for a sparse matrix - - -Exceptions ----------- - -.. autosummary:: - :toctree: generated/ - - ArpackNoConvergence - ArpackError - -""" - -__docformat__ = "restructuredtext en" - -postpone_import = 1 diff --git a/scipy-0.10.1/scipy/sparse/linalg/interface.py b/scipy-0.10.1/scipy/sparse/linalg/interface.py deleted file mode 100644 index 687136e5ae..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/interface.py +++ /dev/null @@ -1,280 +0,0 @@ -import numpy as np -from scipy.sparse.sputils import isshape -from scipy.sparse import isspmatrix - -__all__ = ['LinearOperator', 'aslinearoperator'] - -class LinearOperator: - """Common interface for performing matrix vector products - - Many iterative methods (e.g. cg, gmres) do not need to know the - individual entries of a matrix to solve a linear system A*x=b. - Such solvers only require the computation of matrix vector - products, A*v where v is a dense vector. This class serves as - an abstract interface between iterative solvers and matrix-like - objects. - - Parameters - ---------- - shape : tuple - Matrix dimensions (M,N) - matvec : callable f(v) - Returns returns A * v. - - Other Parameters - ---------------- - rmatvec : callable f(v) - Returns A^H * v, where A^H is the conjugate transpose of A. - matmat : callable f(V) - Returns A * V, where V is a dense matrix with dimensions (N,K). - dtype : dtype - Data type of the matrix. - - See Also - -------- - aslinearoperator : Construct LinearOperators - - Notes - ----- - The user-defined matvec() function must properly handle the case - where v has shape (N,) as well as the (N,1) case. The shape of - the return type is handled internally by LinearOperator. - - Examples - -------- - >>> from scipy.sparse.linalg import LinearOperator - >>> from scipy import * - >>> def mv(v): - ... return array([ 2*v[0], 3*v[1]]) - ... - >>> A = LinearOperator( (2,2), matvec=mv ) - >>> A - <2x2 LinearOperator with unspecified dtype> - >>> A.matvec( ones(2) ) - array([ 2., 3.]) - >>> A * ones(2) - array([ 2., 3.]) - - """ - def __init__(self, shape, matvec, rmatvec=None, matmat=None, dtype=None): - - shape = tuple(shape) - - if not isshape(shape): - raise ValueError('invalid shape') - - self.shape = shape - self._matvec = matvec - - if rmatvec is None: - def rmatvec(v): - raise NotImplementedError('rmatvec is not defined') - self.rmatvec = rmatvec - else: - self.rmatvec = rmatvec - - if matmat is not None: - # matvec each column of V - self._matmat = matmat - - if dtype is not None: - self.dtype = np.dtype(dtype) - - - def _matmat(self, X): - """Default matrix-matrix multiplication handler. Falls back on - the user-defined matvec() routine, which is always provided. - """ - - return np.hstack( [ self.matvec(col.reshape(-1,1)) for col in X.T ] ) - - - def matvec(self, x): - """Matrix-vector multiplication - - Performs the operation y=A*x where A is an MxN linear - operator and x is a column vector or rank-1 array. - - Parameters - ---------- - x : {matrix, ndarray} - An array with shape (N,) or (N,1). - - Returns - ------- - y : {matrix, ndarray} - A matrix or ndarray with shape (M,) or (M,1) depending - on the type and shape of the x argument. - - Notes - ----- - This matvec wraps the user-specified matvec routine to ensure that - y has the correct shape and type. - - """ - - x = np.asanyarray(x) - - M,N = self.shape - - if x.shape != (N,) and x.shape != (N,1): - raise ValueError('dimension mismatch') - - y = self._matvec(x) - - if isinstance(x, np.matrix): - y = np.asmatrix(y) - else: - y = np.asarray(y) - - if x.ndim == 1: - y = y.reshape(M) - elif x.ndim == 2: - y = y.reshape(M,1) - else: - raise ValueError('invalid shape returned by user-defined matvec()') - - - return y - - - def matmat(self, X): - """Matrix-matrix multiplication - - Performs the operation y=A*X where A is an MxN linear - operator and X dense N*K matrix or ndarray. - - Parameters - ---------- - X : {matrix, ndarray} - An array with shape (N,K). - - Returns - ------- - Y : {matrix, ndarray} - A matrix or ndarray with shape (M,K) depending on - the type of the X argument. - - Notes - ----- - This matmat wraps any user-specified matmat routine to ensure that - y has the correct type. - - """ - - X = np.asanyarray(X) - - if X.ndim != 2: - raise ValueError('expected rank-2 ndarray or matrix') - - M,N = self.shape - - if X.shape[0] != N: - raise ValueError('dimension mismatch') - - Y = self._matmat(X) - - if isinstance(Y, np.matrix): - Y = np.asmatrix(Y) - - return Y - - - def __mul__(self,x): - x = np.asarray(x) - - if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1: - return self.matvec(x) - elif x.ndim == 2: - return self.matmat(x) - else: - raise ValueError('expected rank-1 or rank-2 array or matrix') - - - def __repr__(self): - M,N = self.shape - if hasattr(self,'dtype'): - dt = 'dtype=' + str(self.dtype) - else: - dt = 'unspecified dtype' - - return '<%dx%d LinearOperator with %s>' % (M,N,dt) - -class MatrixLinearOperator(LinearOperator): - def __init__(self, A): - LinearOperator.__init__(self, shape=A.shape, dtype=A.dtype, - matvec=None, rmatvec=self.rmatvec) - self.matvec = A.dot - self.matmat = A.dot - self.__mul__ = A.dot - self.A = A - self.A_conj = None - - def rmatvec(self, x): - if self.A_conj is None: - self.A_conj = self.A.T.conj() - return self.A_conj.dot(x) - -class IdentityOperator(LinearOperator): - def __init__(self, shape, dtype): - LinearOperator.__init__(self, shape=shape, dtype=dtype, matvec=None, - rmatvec=self.rmatvec) - - def matvec(self, x): - return x - - def rmatvec(self, x): - return x - - def matmat(self, x): - return x - - def __mul__(self, x): - return x - -def aslinearoperator(A): - """Return A as a LinearOperator. - - 'A' may be any of the following types: - - ndarray - - matrix - - sparse matrix (e.g. csr_matrix, lil_matrix, etc.) - - LinearOperator - - An object with .shape and .matvec attributes - - See the LinearOperator documentation for additonal information. - - Examples - -------- - >>> from scipy import matrix - >>> M = matrix( [[1,2,3],[4,5,6]], dtype='int32' ) - >>> aslinearoperator( M ) - <2x3 LinearOperator with dtype=int32> - - """ - if isinstance(A, LinearOperator): - return A - - elif isinstance(A, np.ndarray) or isinstance(A, np.matrix): - if A.ndim > 2: - raise ValueError('array must have rank <= 2') - A = np.atleast_2d(np.asarray(A)) - return MatrixLinearOperator(A) - - elif isspmatrix(A): - return MatrixLinearOperator(A) - - else: - if hasattr(A, 'shape') and hasattr(A, 'matvec'): - rmatvec = None - dtype = None - - if hasattr(A, 'rmatvec'): - rmatvec = A.rmatvec - if hasattr(A, 'dtype'): - dtype = A.dtype - return LinearOperator(A.shape, A.matvec, - rmatvec=rmatvec, dtype=dtype) - - else: - raise TypeError('type not understood') diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/SConscript b/scipy-0.10.1/scipy/sparse/linalg/isolve/SConscript deleted file mode 100644 index 8ec27fdca8..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/SConscript +++ /dev/null @@ -1,73 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python - -from os.path import join as pjoin, splitext - -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK - -from numscons import write_info, IsAccelerate, IsVeclib - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('f2py') -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK}) - -#----------------- -# Checking Lapack -#----------------- -st = config.CheckLAPACK() -if not st: - raise RuntimeError("no lapack found, necessary for isolve module") - -config.Finish() -write_info(env) - -#-------------------- -# iterative methods -#-------------------- -methods = ['BiCGREVCOM.f.src', - 'BiCGSTABREVCOM.f.src', - 'CGREVCOM.f.src', - 'CGSREVCOM.f.src', -# 'ChebyREVCOM.f.src', - 'GMRESREVCOM.f.src', -# 'JacobiREVCOM.f.src', - 'QMRREVCOM.f.src', -# 'SORREVCOM.f.src' - ] -Util = ['STOPTEST2.f.src','getbreak.f.src'] -raw_sources = methods + Util + ['_iterative.pyf.src'] - -sources = [] -for method in raw_sources: - target = splitext(method)[0] - res = env.FromFTemplate(target, pjoin('iterative', method)) - sources.append(res[0]) - -#-------------------------------------------------- -# BLAS wrapper to fix ABI incompatibilities on OS X -# ------------------------------------------------- -use_c_calling = IsAccelerate(env, "lapack") or IsVeclib(env, "lapack") - -if use_c_calling: - blas_wrapper = ['veclib_cabi_c.c', 'veclib_cabi_f.f'] -else: - blas_wrapper = ['dummy.f'] - -blas_wrapper = [pjoin('iterative', 'FWRAPPERS', wrapper) \ - for wrapper in blas_wrapper] - -env.AppendUnique(LIBPATH = ['.']) -veclibwrap_lib = env.DistutilsStaticExtLibrary('_veclibwrap', source=blas_wrapper) -env.Prepend(LIBS='_veclibwrap') - -env.NumpyPythonExtension('_iterative', source=sources) diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/SConstruct b/scipy-0.10.1/scipy/sparse/linalg/isolve/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/__init__.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/__init__.py deleted file mode 100644 index 71c255c93c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"Iterative Solvers for Sparse Linear Systems" - -#from info import __doc__ -from iterative import * -from minres import minres -from lgmres import lgmres -from lsqr import lsqr - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/bento.info b/scipy-0.10.1/scipy/sparse/linalg/isolve/bento.info deleted file mode 100644 index 07dd00e8e7..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/bento.info +++ /dev/null @@ -1,14 +0,0 @@ -HookFile: bscript - -Library: - Extension: _iterative - Sources: - iterative/BiCGREVCOM.f.src, - iterative/BiCGSTABREVCOM.f.src, - iterative/CGREVCOM.f.src, - iterative/CGSREVCOM.f.src, - iterative/GMRESREVCOM.f.src, - iterative/QMRREVCOM.f.src, - iterative/STOPTEST2.f.src, - iterative/getbreak.f.src, - iterative/_iterative.pyf.src diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/bscript b/scipy-0.10.1/scipy/sparse/linalg/isolve/bscript deleted file mode 100644 index 50d18074db..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/bscript +++ /dev/null @@ -1,24 +0,0 @@ -import sys - -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - - def builder(extension): - if sys.platform == "darwin": - use_c_calling = True - else: - use_c_calling = False - sources = extension.sources[:] - if use_c_calling: - sources.append("iterative/FWRAPPERS/veclib_cabi_f.f") - sources.append("iterative/FWRAPPERS/veclib_cabi_c.c") - else: - sources.append("iterative/FWRAPPERS/dummy.f") - return default_builder(extension, - features="c pyext fc bento cshlib f2py", - source=sources, - use="FLAPACK") - context.register_builder("_iterative", builder) diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative.py deleted file mode 100644 index 528c63fe92..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative.py +++ /dev/null @@ -1,618 +0,0 @@ -"""Iterative methods for solving linear systems""" - -__all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr'] - -import _iterative -import numpy as np - -from scipy.sparse.linalg.interface import LinearOperator -from utils import make_system - -_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} - - -# Part of the docstring common to all iterative solvers -common_doc1 = \ -""" -Parameters ----------- -A : {sparse matrix, dense matrix, LinearOperator}""" - -common_doc2 = \ -"""b : {array, matrix} - Right hand side of the linear system. Has shape (N,) or (N,1). - -Returns -------- -x : {array, matrix} - The converged solution. -info : integer - Provides convergence information: - 0 : successful exit - >0 : convergence to tolerance not achieved, number of iterations - <0 : illegal input or breakdown - -Other Parameters ----------------- -x0 : {array, matrix} - Starting guess for the solution. -tol : float - Tolerance to achieve. The algorithm terminates when either the relative - or the absolute residual is below `tol`. -maxiter : integer - Maximum number of iterations. Iteration will stop after maxiter - steps even if the specified tolerance has not been achieved. -M : {sparse matrix, dense matrix, LinearOperator} - Preconditioner for A. The preconditioner should approximate the - inverse of A. Effective preconditioning dramatically improves the - rate of convergence, which implies that fewer iterations are needed - to reach a given error tolerance. -callback : function - User-supplied function to call after each iteration. It is called - as callback(xk), where xk is the current solution vector. -xtype : {'f','d','F','D'} - This parameter is deprecated -- avoid using it. - - The type of the result. If None, then it will be determined from - A.dtype.char and b. If A does not have a typecode method then it - will compute A.matvec(x0) to get a typecode. To save the extra - computation when A does not have a typecode attribute use xtype=0 - for the same type as b or use xtype='f','d','F',or 'D'. - This parameter has been superceeded by LinearOperator. - -""" - - -def set_docstring(header, Ainfo, footer=''): - def combine(fn): - fn.__doc__ = '\n'.join((header, common_doc1, - ' ' + Ainfo.replace('\n', '\n '), - common_doc2, footer)) - return fn - return combine - - - -@set_docstring('Use BIConjugate Gradient iteration to solve A x = b', - 'The real or complex N-by-N matrix of the linear system\n' - 'It is required that the linear operator can produce\n' - '``Ax`` and ``A^T x``.') -def bicg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None): - A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) - - n = len(b) - if maxiter is None: - maxiter = n*10 - - matvec, rmatvec = A.matvec, A.rmatvec - psolve, rpsolve = M.matvec, M.rmatvec - ltr = _type_conv[x.dtype.char] - revcom = getattr(_iterative, ltr + 'bicgrevcom') - stoptest = getattr(_iterative, ltr + 'stoptest2') - - resid = tol - ndx1 = 1 - ndx2 = -1 - work = np.zeros(6*n,dtype=x.dtype) - ijob = 1 - info = 0 - ftflag = True - bnrm2 = -1.0 - iter_ = maxiter - while True: - olditer = iter_ - x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ - revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) - if callback is not None and iter_ > olditer: - callback(x) - slice1 = slice(ndx1-1, ndx1-1+n) - slice2 = slice(ndx2-1, ndx2-1+n) - if (ijob == -1): - if callback is not None: - callback(x) - break - elif (ijob == 1): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(work[slice1]) - elif (ijob == 2): - work[slice2] *= sclr2 - work[slice2] += sclr1*rmatvec(work[slice1]) - elif (ijob == 3): - work[slice1] = psolve(work[slice2]) - elif (ijob == 4): - work[slice1] = rpsolve(work[slice2]) - elif (ijob == 5): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(x) - elif (ijob == 6): - if ftflag: - info = -1 - ftflag = False - bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) - ijob = 2 - - if info > 0 and iter_ == maxiter and resid > tol: - #info isn't set appropriately otherwise - info = iter_ - - return postprocess(x), info - -@set_docstring('Use BIConjugate Gradient STABilized iteration to solve A x = b', - 'The real or complex N-by-N matrix of the linear system\n' - '``A`` must represent a hermitian, positive definite matrix') -def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None): - A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) - - n = len(b) - if maxiter is None: - maxiter = n*10 - - matvec = A.matvec - psolve = M.matvec - ltr = _type_conv[x.dtype.char] - revcom = getattr(_iterative, ltr + 'bicgstabrevcom') - stoptest = getattr(_iterative, ltr + 'stoptest2') - - resid = tol - ndx1 = 1 - ndx2 = -1 - work = np.zeros(7*n,dtype=x.dtype) - ijob = 1 - info = 0 - ftflag = True - bnrm2 = -1.0 - iter_ = maxiter - while True: - olditer = iter_ - x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ - revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) - if callback is not None and iter_ > olditer: - callback(x) - slice1 = slice(ndx1-1, ndx1-1+n) - slice2 = slice(ndx2-1, ndx2-1+n) - if (ijob == -1): - if callback is not None: - callback(x) - break - elif (ijob == 1): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(work[slice1]) - elif (ijob == 2): - if psolve is None: - psolve = get_psolve(A) - work[slice1] = psolve(work[slice2]) - elif (ijob == 3): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(x) - elif (ijob == 4): - if ftflag: - info = -1 - ftflag = False - bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) - ijob = 2 - - if info > 0 and iter_ == maxiter and resid > tol: - #info isn't set appropriately otherwise - info = iter_ - - return postprocess(x), info - -@set_docstring('Use Conjugate Gradient iteration to solve A x = b', - 'The real or complex N-by-N matrix of the linear system\n' - '``A`` must represent a hermitian, positive definite matrix') -def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None): - A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) - - n = len(b) - if maxiter is None: - maxiter = n*10 - - matvec = A.matvec - psolve = M.matvec - ltr = _type_conv[x.dtype.char] - revcom = getattr(_iterative, ltr + 'cgrevcom') - stoptest = getattr(_iterative, ltr + 'stoptest2') - - resid = tol - ndx1 = 1 - ndx2 = -1 - work = np.zeros(4*n,dtype=x.dtype) - ijob = 1 - info = 0 - ftflag = True - bnrm2 = -1.0 - iter_ = maxiter - while True: - olditer = iter_ - x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ - revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) - if callback is not None and iter_ > olditer: - callback(x) - slice1 = slice(ndx1-1, ndx1-1+n) - slice2 = slice(ndx2-1, ndx2-1+n) - if (ijob == -1): - if callback is not None: - callback(x) - break - elif (ijob == 1): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(work[slice1]) - elif (ijob == 2): - work[slice1] = psolve(work[slice2]) - elif (ijob == 3): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(x) - elif (ijob == 4): - if ftflag: - info = -1 - ftflag = False - bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) - ijob = 2 - - - if info > 0 and iter_ == maxiter and resid > tol: - #info isn't set appropriately otherwise - info = iter_ - - return postprocess(x), info - - -@set_docstring('Use Conjugate Gradient Squared iteration to solve A x = b', - 'The real-valued N-by-N matrix of the linear system') -def cgs(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None): - A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) - - n = len(b) - if maxiter is None: - maxiter = n*10 - - matvec = A.matvec - psolve = M.matvec - ltr = _type_conv[x.dtype.char] - revcom = getattr(_iterative, ltr + 'cgsrevcom') - stoptest = getattr(_iterative, ltr + 'stoptest2') - - resid = tol - ndx1 = 1 - ndx2 = -1 - work = np.zeros(7*n,dtype=x.dtype) - ijob = 1 - info = 0 - ftflag = True - bnrm2 = -1.0 - iter_ = maxiter - while True: - olditer = iter_ - x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ - revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) - if callback is not None and iter_ > olditer: - callback(x) - slice1 = slice(ndx1-1, ndx1-1+n) - slice2 = slice(ndx2-1, ndx2-1+n) - if (ijob == -1): - if callback is not None: - callback(x) - break - elif (ijob == 1): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(work[slice1]) - elif (ijob == 2): - work[slice1] = psolve(work[slice2]) - elif (ijob == 3): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(x) - elif (ijob == 4): - if ftflag: - info = -1 - ftflag = False - bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) - ijob = 2 - - if info > 0 and iter_ == maxiter and resid > tol: - #info isn't set appropriately otherwise - info = iter_ - - return postprocess(x), info - - -def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, xtype=None, M=None, callback=None, restrt=None): - """ - Use Generalized Minimal RESidual iteration to solve A x = b. - - Parameters - ---------- - A : {sparse matrix, dense matrix, LinearOperator} - The real or complex N-by-N matrix of the linear system. - b : {array, matrix} - Right hand side of the linear system. Has shape (N,) or (N,1). - - Returns - ------- - x : {array, matrix} - The converged solution. - info : int - Provides convergence information: - * 0 : successful exit - * >0 : convergence to tolerance not achieved, number of iterations - * <0 : illegal input or breakdown - - Other parameters - ---------------- - x0 : {array, matrix} - Starting guess for the solution (a vector of zeros by default). - tol : float - Tolerance to achieve. The algorithm terminates when either the relative - or the absolute residual is below `tol`. - restart : int, optional - Number of iterations between restarts. Larger values increase - iteration cost, but may be necessary for convergence. - Default is 20. - maxiter : int, optional - Maximum number of iterations. Iteration will stop after maxiter - steps even if the specified tolerance has not been achieved. - M : {sparse matrix, dense matrix, LinearOperator} - Inverse of the preconditioner of A. M should approximate the - inverse of A and be easy to solve for (see Notes). Effective - preconditioning dramatically improves the rate of convergence, - which implies that fewer iterations are needed to reach a given - error tolerance. By default, no preconditioner is used. - callback : function - User-supplied function to call after each iteration. It is called - as callback(rk), where rk is the current residual vector. - - See Also - -------- - LinearOperator - - Notes - ----- - A preconditioner, P, is chosen such that P is close to A but easy to solve for. - The preconditioner parameter required by this routine is ``M = P^-1``. - The inverse should preferably not be calculated explicitly. Rather, use the - following template to produce M:: - - # Construct a linear operator that computes P^-1 * x. - import scipy.sparse.linalg as spla - M_x = lambda x: spla.spsolve(P, x) - M = spla.LinearOperator((n, n), M_x) - - Deprecated Parameters - --------------------- - xtype : {'f','d','F','D'} - This parameter is DEPRECATED --- avoid using it. - - The type of the result. If None, then it will be determined from - A.dtype.char and b. If A does not have a typecode method then it - will compute A.matvec(x0) to get a typecode. To save the extra - computation when A does not have a typecode attribute use xtype=0 - for the same type as b or use xtype='f','d','F',or 'D'. - This parameter has been superceeded by LinearOperator. - - See Also - -------- - LinearOperator - - """ - - # Change 'restrt' keyword to 'restart' - if restrt is None: - restrt = restart - elif restart is not None: - raise ValueError("Cannot specify both restart and restrt keywords. " - "Preferably use 'restart' only.") - - A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) - - n = len(b) - if maxiter is None: - maxiter = n*10 - - if restrt is None: - restrt = 20 - restrt = min(restrt, n) - - matvec = A.matvec - psolve = M.matvec - ltr = _type_conv[x.dtype.char] - revcom = getattr(_iterative, ltr + 'gmresrevcom') - stoptest = getattr(_iterative, ltr + 'stoptest2') - - resid = tol - ndx1 = 1 - ndx2 = -1 - work = np.zeros((6+restrt)*n,dtype=x.dtype) - work2 = np.zeros((restrt+1)*(2*restrt+2),dtype=x.dtype) - ijob = 1 - info = 0 - ftflag = True - bnrm2 = -1.0 - iter_ = maxiter - old_ijob = ijob - first_pass = True - resid_ready = False - iter_num = 1 - while True: - olditer = iter_ - x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ - revcom(b, x, restrt, work, work2, iter_, resid, info, ndx1, ndx2, ijob) - #if callback is not None and iter_ > olditer: - # callback(x) - slice1 = slice(ndx1-1, ndx1-1+n) - slice2 = slice(ndx2-1, ndx2-1+n) - if (ijob == -1): # gmres success, update last residual - if resid_ready and callback is not None: - callback(resid) - resid_ready = False - - break - elif (ijob == 1): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(x) - elif (ijob == 2): - work[slice1] = psolve(work[slice2]) - if not first_pass and old_ijob==3: - resid_ready = True - - first_pass = False - elif (ijob == 3): - work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(work[slice1]) - if resid_ready and callback is not None: - callback(resid) - resid_ready = False - iter_num = iter_num+1 - - elif (ijob == 4): - if ftflag: - info = -1 - ftflag = False - bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) - - old_ijob = ijob - ijob = 2 - - if iter_num > maxiter: - break - - if info >= 0 and resid > tol: - #info isn't set appropriately otherwise - info = maxiter - - return postprocess(x), info - - -def qmr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M1=None, M2=None, callback=None): - """Use Quasi-Minimal Residual iteration to solve A x = b - - Parameters - ---------- - A : {sparse matrix, dense matrix, LinearOperator} - The real-valued N-by-N matrix of the linear system. - It is required that the linear operator can produce - ``Ax`` and ``A^T x``. - b : {array, matrix} - Right hand side of the linear system. Has shape (N,) or (N,1). - - Returns - ------- - x : {array, matrix} - The converged solution. - info : integer - Provides convergence information: - 0 : successful exit - >0 : convergence to tolerance not achieved, number of iterations - <0 : illegal input or breakdown - - Other Parameters - ---------------- - x0 : {array, matrix} - Starting guess for the solution. - tol : float - Tolerance to achieve. The algorithm terminates when either the relative - or the absolute residual is below `tol`. - maxiter : integer - Maximum number of iterations. Iteration will stop after maxiter - steps even if the specified tolerance has not been achieved. - M1 : {sparse matrix, dense matrix, LinearOperator} - Left preconditioner for A. - M2 : {sparse matrix, dense matrix, LinearOperator} - Right preconditioner for A. Used together with the left - preconditioner M1. The matrix M1*A*M2 should have better - conditioned than A alone. - callback : function - User-supplied function to call after each iteration. It is called - as callback(xk), where xk is the current solution vector. - xtype : {'f','d','F','D'} - This parameter is DEPRECATED -- avoid using it. - - The type of the result. If None, then it will be determined from - A.dtype.char and b. If A does not have a typecode method then it - will compute A.matvec(x0) to get a typecode. To save the extra - computation when A does not have a typecode attribute use xtype=0 - for the same type as b or use xtype='f','d','F',or 'D'. - This parameter has been superceeded by LinearOperator. - - See Also - -------- - LinearOperator - - """ - A_ = A - A,M,x,b,postprocess = make_system(A,None,x0,b,xtype) - - if M1 is None and M2 is None: - if hasattr(A_,'psolve'): - def left_psolve(b): - return A_.psolve(b,'left') - def right_psolve(b): - return A_.psolve(b,'right') - def left_rpsolve(b): - return A_.rpsolve(b,'left') - def right_rpsolve(b): - return A_.rpsolve(b,'right') - M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve) - M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve) - else: - def id(b): - return b - M1 = LinearOperator(A.shape, matvec=id, rmatvec=id) - M2 = LinearOperator(A.shape, matvec=id, rmatvec=id) - - n = len(b) - if maxiter is None: - maxiter = n*10 - - ltr = _type_conv[x.dtype.char] - revcom = getattr(_iterative, ltr + 'qmrrevcom') - stoptest = getattr(_iterative, ltr + 'stoptest2') - - resid = tol - ndx1 = 1 - ndx2 = -1 - work = np.zeros(11*n,x.dtype) - ijob = 1 - info = 0 - ftflag = True - bnrm2 = -1.0 - iter_ = maxiter - while True: - olditer = iter_ - x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ - revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) - if callback is not None and iter_ > olditer: - callback(x) - slice1 = slice(ndx1-1, ndx1-1+n) - slice2 = slice(ndx2-1, ndx2-1+n) - if (ijob == -1): - if callback is not None: - callback(x) - break - elif (ijob == 1): - work[slice2] *= sclr2 - work[slice2] += sclr1*A.matvec(work[slice1]) - elif (ijob == 2): - work[slice2] *= sclr2 - work[slice2] += sclr1*A.rmatvec(work[slice1]) - elif (ijob == 3): - work[slice1] = M1.matvec(work[slice2]) - elif (ijob == 4): - work[slice1] = M2.matvec(work[slice2]) - elif (ijob == 5): - work[slice1] = M1.rmatvec(work[slice2]) - elif (ijob == 6): - work[slice1] = M2.rmatvec(work[slice2]) - elif (ijob == 7): - work[slice2] *= sclr2 - work[slice2] += sclr1*A.matvec(x) - elif (ijob == 8): - if ftflag: - info = -1 - ftflag = False - bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) - ijob = 2 - - if info > 0 and iter_ == maxiter and resid > tol: - #info isn't set appropriately otherwise - info = iter_ - - return postprocess(x), info diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/BiCGREVCOM.f.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/BiCGREVCOM.f.src deleted file mode 100644 index b6f3b885e2..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/BiCGREVCOM.f.src +++ /dev/null @@ -1,396 +0,0 @@ -* -*- fortran -*- - SUBROUTINE <_c>BICGREVCOM( N, B, X, WORK, LDW, ITER, RESID, INFO, - $ NDX1, NDX2, SCLR1, SCLR2, IJOB) -* -* -* -- Iterative template routine -- -* Univ. of Tennessee and Oak Ridge National Laboratory -* October 1, 1993 -* Details of this algorithm are described in "Templates for the -* Solution of Linear Systems: Building Blocks for Iterative -* Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra, -* Eijkhout, Pozo, Romine, and van der Vorst, SIAM Publications, -* 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps). -* -* .. Scalar Arguments .. - INTEGER N, LDW, ITER, INFO - RESID - INTEGER NDX1, NDX2 - <_t> SCLR1, SCLR2 - INTEGER IJOB -* .. -* .. Array Arguments .. - <_t> X( * ), B( * ), WORK( LDW,* ) -* -* .. -* -* Purpose -* ======= -* -* BiCG solves the linear system Ax = b using the -* BiConjugate Gradient iterative method with preconditioning. -* -* Arguments -* ========= -* -* N (input) INTEGER. -* On entry, the dimension of the matrix. -* Unchanged on exit. -* -* B (input) DOUBLE PRECISION array, dimension N. -* On entry, right hand side vector B. -* Unchanged on exit. -* -* X (input/output) DOUBLE PRECISION array, dimension N. -* On input, the initial guess; on exit, the iterated solution. -* -* WORK (workspace) DOUBLE PRECISION array, dimension (LDW,6). -* Workspace for residual, direction vector, etc. -* Note that Z and Q, and ZTLD and QTLD share workspace. -* -* LDW (input) INTEGER -* The leading dimension of the array WORK. LDW .gt. = max(1,N). -* -* ITER (input/output) INTEGER -* On input, the maximum iterations to be performed. -* On output, actual number of iterations performed. -* -* RESID (input/output) DOUBLE PRECISION -* On input, the allowable convergence measure for -* norm( b - A*x ) / norm( b ). -* On output, the final value of this measure. -* -* INFO (output) INTEGER -* -* = 0: Successful exit. Iterated approximate solution returned. -* -* .gt. 0: Convergence to tolerance not achieved. This will be -* set to the number of iterations performed. -* -* .ls. 0: Illegal input parameter, or breakdown occurred -* during iteration. -* -* Illegal parameter: -* -* -1: matrix dimension N .ls. 0 -* -2: LDW .ls. N -* -3: Maximum number of iterations ITER .ls. = 0. -* -5: Erroneous NDX1/NDX2 in INIT call. -* -6: Erroneous RLBL. -* -* BREAKDOWN: If parameters RHO or OMEGA become smaller -* than some tolerance, the program will terminate. -* Here we check against tolerance BREAKTOL. -* -* -10: RHO .ls. BREAKTOL: RHO and RTLD have become -* orthogonal. -* -* BREAKTOL is set in func GETBREAK. -* -* NDX1 (input/output) INTEGER. -* NDX2 On entry in INIT call contain indices required by interface -* level for stopping test. -* All other times, used as output, to indicate indices into -* WORK[] for the MATVEC, PSOLVE done by the interface level. -* -* SCLR1 (output) DOUBLE PRECISION. -* SCLR2 Used to pass the scalars used in MATVEC. Scalars are reqd because -* original routines use dgemv. -* -* IJOB (input/output) INTEGER. -* Used to communicate job code between the two levels. -* -* BLAS CALLS: DAXPY, DCOPY, DDOT, DNRM2, -* ============================================================== -* -* .. Parameters .. - ZERO, ONE - PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) -* .. -* .. Local Scalars .. - INTEGER R, RTLD, Z, ZTLD, P, PTLD, Q, QTLD, MAXIT, - $ NEED1, NEED2 - TOL, BNRM2, RHOTOL, - $ GETBREAK, - $ NRM2 - <_t> ALPHA, BETA, RHO, RHO1, -* -* indicates where to resume from. Only valid when IJOB = 2! - INTEGER RLBL -* -* saving all. - SAVE -* -* .. -* .. External Routines .. - EXTERNAL <_c>AXPY, <_c>COPY, , NRM2 -* .. -* .. Executable Statements .. -* -* Entry point, so test IJOB - IF (IJOB .eq. 1) THEN - GOTO 1 - ELSEIF (IJOB .eq. 2) THEN -* here we do resumption handling - IF (RLBL .eq. 2) GOTO 2 - IF (RLBL .eq. 3) GOTO 3 - IF (RLBL .eq. 4) GOTO 4 - IF (RLBL .eq. 5) GOTO 5 - IF (RLBL .eq. 6) GOTO 6 - IF (RLBL .eq. 7) GOTO 7 -* if neither of these, then error - INFO = -6 - GOTO 20 - ENDIF -* -* init. -***************** - 1 CONTINUE -***************** -* - INFO = 0 - MAXIT = ITER - TOL = RESID -* -* Alias workspace columns. -* - R = 1 - RTLD = 2 - Z = 3 - ZTLD = 4 - P = 5 - PTLD = 6 - Q = 3 - QTLD = 4 -* -* Check if caller will need indexing info. -* - IF( NDX1.NE.-1 ) THEN - IF( NDX1.EQ.1 ) THEN - NEED1 = ((R - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.2 ) THEN - NEED1 = ((RTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.3 ) THEN - NEED1 = ((Z - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.4 ) THEN - NEED1 = ((ZTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.5 ) THEN - NEED1 = ((P - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.6 ) THEN - NEED1 = ((PTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.7 ) THEN - NEED1 = ((Q - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.8 ) THEN - NEED1 = ((QTLD - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED1 = NDX1 - ENDIF -* - IF( NDX2.NE.-1 ) THEN - IF( NDX2.EQ.1 ) THEN - NEED2 = ((R - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.2 ) THEN - NEED2 = ((RTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.3 ) THEN - NEED2 = ((Z - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.4 ) THEN - NEED2 = ((ZTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.5 ) THEN - NEED2 = ((P - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.6 ) THEN - NEED2 = ((PTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.7 ) THEN - NEED2 = ((Q - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.8 ) THEN - NEED2 = ((QTLD - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED2 = NDX2 - ENDIF -* -* Set breakdown parameters. -* - RHOTOL = GETBREAK() -* -* Set initial residual. -* - CALL <_c>COPY( N, B, 1, WORK(1,R), 1 ) - IF ( NRM2( N, X, 1 ).NE.ZERO ) THEN -*********CALL MATVEC( -ONE, X, ZERO, WORK(1,R) ) -* using WORK[RTLD] as temp -*********CALL <_c>COPY( N, X, 1, WORK(1,RTLD), 1 ) - SCLR1 = -ONE - SCLR2 = ZERO - NDX1 = ((RTLD - 1) * LDW) + 1 - NDX2 = ((R - 1) * LDW) + 1 - RLBL = 2 - IJOB = 5 - RETURN - ENDIF -***************** - 2 CONTINUE -***************** -* - IF ( NRM2( N, WORK(1,R), 1 ).LE.TOL ) GO TO 30 -* - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,RTLD), 1 ) - BNRM2 = NRM2( N, B, 1 ) - IF ( BNRM2.EQ.ZERO ) BNRM2 = ONE -* - ITER = 0 -* - 10 CONTINUE -* -* Perform BiConjugate Gradient iteration. -* - ITER = ITER + 1 -* -* Compute direction vectors PK and PTLD. -* -*********CALL PSOLVE( WORK(1,Z), WORK(1,R) ) -* - NDX1 = ((Z - 1) * LDW) + 1 - NDX2 = ((R - 1) * LDW) + 1 - RLBL = 3 - IJOB = 3 - RETURN -***************** - 3 CONTINUE -***************** -* -*********CALL PSOLVETRANS( WORK(1,ZTLD), WORK(1,RTLD) ) -* - NDX1 = ((ZTLD - 1) * LDW) + 1 - NDX2 = ((RTLD - 1) * LDW) + 1 - RLBL = 4 - IJOB = 4 - RETURN -***************** - 4 CONTINUE -***************** -* -* -* RHO = ( N, WORK(1,Z), 1, WORK(1,RTLD), 1 ) - RHO = ( N, WORK(1,RTLD), 1, WORK(1,Z), 1 ) - IF ( ABS( RHO ).LT.RHOTOL ) GO TO 25 -* - IF ( ITER.GT.1 ) THEN - BETA = RHO / RHO1 - CALL <_c>AXPY( N, BETA, WORK(1,P), 1, WORK(1,Z), 1 ) -* CALL <_c>AXPY( N, BETA, WORK(1,PTLD), 1, WORK(1,ZTLD), 1 ) - CALL <_c>AXPY( N, (BETA), - $ WORK(1,PTLD), 1, WORK(1,ZTLD), 1 ) - CALL <_c>COPY( N, WORK(1,Z), 1, WORK(1,P), 1 ) - CALL <_c>COPY( N, WORK(1,ZTLD), 1, WORK(1,PTLD), 1 ) - ELSE - CALL <_c>COPY( N, WORK(1,Z), 1, WORK(1,P), 1 ) - CALL <_c>COPY( N, WORK(1,ZTLD), 1, WORK(1,PTLD), 1 ) - ENDIF -* -*********CALL MATVEC( ONE, WORK(1,P), ZERO, WORK(1,Q) ) -* - SCLR1 = ONE - SCLR2 = ZERO - NDX1 = ((P - 1) * LDW) + 1 - NDX2 = ((Q - 1) * LDW) + 1 - RLBL = 5 - IJOB = 1 - RETURN -***************** - 5 CONTINUE -***************** -* -*********CALL MATVECTRANS( ONE, WORK(1,PTLD), ZERO, WORK(1,QTLD) ) -* - SCLR1 = ONE - SCLR2 = ZERO - NDX1 = ((PTLD - 1) * LDW) + 1 - NDX2 = ((QTLD - 1) * LDW) + 1 - RLBL = 6 - IJOB = 2 - RETURN -***************** - 6 CONTINUE -***************** - ALPHA = RHO / ( N, WORK(1,PTLD), 1, WORK(1,Q), 1 ) -* -* Compute current solution vector x. -* - CALL <_c>AXPY( N, ALPHA, WORK(1,P), 1, X, 1 ) -* -* Compute residual vector rk, find norm, -* then check for tolerance. -* - CALL <_c>AXPY( N, -ALPHA, WORK(1,Q), 1, WORK(1,R), 1 ) -* -*********RESID = NRM2( N, WORK(1,R), 1 ) / BNRM2 -*********IF ( RESID.LE.TOL ) GO TO 30 -* - NDX1 = NEED1 - NDX2 = NEED2 -* Prepare for resumption & return - RLBL = 7 - IJOB = 6 - RETURN -* -***************** - 7 CONTINUE -***************** - IF( INFO.EQ.1 ) GO TO 30 -* - IF ( ITER.EQ.MAXIT ) THEN - INFO = 1 - GO TO 20 - ENDIF -* -* CALL <_c>AXPY( N, -ALPHA, WORK(1,QTLD), 1, WORK(1,RTLD), 1 ) - CALL <_c>AXPY( N, -(ALPHA) - $ , WORK(1,QTLD), 1, WORK(1,RTLD), 1 ) - RHO1 = RHO -* - GO TO 10 -* - 20 CONTINUE -* -* Iteration fails. -* - RLBL = -1 - IJOB = -1 - RETURN -* - 25 CONTINUE -* -* Set breakdown flag. -* - INFO = -10 - RLBL = -1 - IJOB = -1 - RETURN -* - 30 CONTINUE -* -* Iteration successful; return. -* - INFO = 0 - RLBL = -1 - IJOB = -1 - RETURN -* -* End of BICGREVCOM -* - END -* END SUBROUTINE <_c>BICGREVCOM - - - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/BiCGSTABREVCOM.f.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/BiCGSTABREVCOM.f.src deleted file mode 100644 index e958f5214d..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/BiCGSTABREVCOM.f.src +++ /dev/null @@ -1,428 +0,0 @@ -* -*- fortran -*- - SUBROUTINE <_c>BICGSTABREVCOM(N, B, X, WORK, LDW, ITER, RESID, - $ INFO,NDX1, NDX2, SCLR1, SCLR2, IJOB) -* -* -- Iterative template routine -- -* Univ. of Tennessee and Oak Ridge National Laboratory -* October 1, 1993 -* Details of this algorithm are described in "Templates for the -* Solution of Linear Systems: Building Blocks for Iterative -* Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra, -* Eijkhout, Pozo, Romine, and van der Vorst, SIAM Publications, -* 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps). -* -* .. Scalar Arguments .. - INTEGER N, LDW, ITER, INFO - RESID - INTEGER NDX1, NDX2 - <_t> SCLR1, SCLR2 - INTEGER IJOB -* .. -* .. Array Arguments .. - <_t> X( * ), B( * ), WORK( LDW,* ) -* .. -* -* Purpose -* ======= -* -* BICGSTAB solves the linear system A*x = b using the -* BiConjugate Gradient Stabilized iterative method with -* preconditioning. -* -* Arguments -* ========= -* -* N (input) INTEGER. -* On entry, the dimension of the matrix. -* Unchanged on exit. -* -* B (input) DOUBLE PRECISION array, dimension N. -* On entry, right hand side vector B. -* Unchanged on exit. -* -* X (input/output) DOUBLE PRECISION array, dimension N. -* On input, the initial guess. This is commonly set to -* the zero vector. -* On exit, if INFO = 0, the iterated approximate solution. -* -* WORK (workspace) DOUBLE PRECISION array, dimension (LDW,7) -* Workspace for residual, direction vector, etc. -* Note that vectors R and S shared the same workspace. -* -* LDW (input) INTEGER -* The leading dimension of the array WORK. LDW .gt. = max(1,N). -* -* ITER (input/output) INTEGER -* On input, the maximum iterations to be performed. -* On output, actual number of iterations performed. -* -* RESID (input/output) DOUBLE PRECISION -* On input, the allowable convergence measure for -* norm( b - A*x ) / norm( b ). -* On output, the final value of this measure. -* -* INFO (output) INTEGER -* -* = 0: Successful exit. Iterated approximate solution returned. -* -* .gt. 0: Convergence to tolerance not achieved. This will be -* set to the number of iterations performed. -* -* .ls. 0: Illegal input parameter, or breakdown occurred -* during iteration. -* -* Illegal parameter: -* -* -1: matrix dimension N .ls. 0 -* -2: LDW .ls. N -* -3: Maximum number of iterations ITER .ls. = 0. -* -5: Erroneous NDX1/NDX2 in INIT call. -* -6: Erroneous RLBL. -* -* BREAKDOWN: If parameters RHO or OMEGA become smaller -* than some tolerance, the program will terminate. -* Here we check against tolerance BREAKTOL. -* -* -10: RHO .ls. BREAKTOL: RHO and RTLD have become -* orthogonal. -* -11: OMEGA .ls. BREAKTOL: S and T have become -* orthogonal relative to T'*T. -* -* BREAKTOL is set in func GETBREAK. -* -* NDX1 (input/output) INTEGER. -* NDX2 On entry in INIT call contain indices required by interface -* level for stopping test. -* All other times, used as output, to indicate indices into -* WORK[] for the MATVEC, PSOLVE done by the interface level. -* -* SCLR1 (output) DOUBLE PRECISION. -* SCLR2 Used to pass the scalars used in MATVEC. Scalars are reqd because -* original routines use dgemv. -* -* IJOB (input/output) INTEGER. -* Used to communicate job code between the two levels. -* -* BLAS CALLS: DAXPY, DCOPY, DDOT, DNRM2, DSCAL -* ============================================================== -* -* .. Parameters .. - ZERO, ONE - PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) -* .. -* .. Local Scalars .. - INTEGER R, RTLD, P, PHAT, V, S, SHAT, T, MAXIT, - $ NEED1, NEED2 - TOL, BNRM2, - $ RHOTOL, OMEGATOL, GETBREAK, - $ NRM2 - <_t> ALPHA, BETA, RHO, RHO1, OMEGA, TMPVAL, - $ -* indicates where to resume from. Only valid when IJOB = 2! - INTEGER RLBL -* -* saving all. - SAVE -* .. -* .. External Funcs .. - EXTERNAL GETBREAK, <_c>AXPY, <_c>COPY, - $ , NRM2, <_c>SCAL -* .. -* .. Intrinsic Funcs .. - INTRINSIC ABS, MAX -* .. -* .. Executable Statements .. -* -* Entry point, so test IJOB - IF (IJOB .eq. 1) THEN - GOTO 1 - ELSEIF (IJOB .eq. 2) THEN -* here we do resumption handling - IF (RLBL .eq. 2) GOTO 2 - IF (RLBL .eq. 3) GOTO 3 - IF (RLBL .eq. 4) GOTO 4 - IF (RLBL .eq. 5) GOTO 5 - IF (RLBL .eq. 6) GOTO 6 - IF (RLBL .eq. 7) GOTO 7 -* if neither of these, then error - INFO = -6 - GOTO 20 - ENDIF -* -* -***************** - 1 CONTINUE -***************** -* - INFO = 0 - MAXIT = ITER - TOL = RESID -* -* Alias workspace columns. -* - R = 1 - RTLD = 2 - P = 3 - V = 4 - T = 5 - PHAT = 6 - SHAT = 7 - S = 1 -* -* Check if caller will need indexing info. -* - IF( NDX1.NE.-1 ) THEN - IF( NDX1.EQ.1 ) THEN - NEED1 = ((R - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.2 ) THEN - NEED1 = ((RTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.3 ) THEN - NEED1 = ((P - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.4 ) THEN - NEED1 = ((V - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.5 ) THEN - NEED1 = ((T - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.6 ) THEN - NEED1 = ((PHAT - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.7 ) THEN - NEED1 = ((SHAT - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.8 ) THEN - NEED1 = ((S - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED1 = NDX1 - ENDIF -* - IF( NDX2.NE.-1 ) THEN - IF( NDX2.EQ.1 ) THEN - NEED2 = ((R - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.2 ) THEN - NEED2 = ((RTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.3 ) THEN - NEED2 = ((P - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.4 ) THEN - NEED2 = ((V - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.5 ) THEN - NEED2 = ((T - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.6 ) THEN - NEED2 = ((PHAT - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.7 ) THEN - NEED2 = ((SHAT - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.8 ) THEN - NEED2 = ((S - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED2 = NDX2 - ENDIF -* -* Set parameter tolerances. -* - RHOTOL = GETBREAK() - OMEGATOL = GETBREAK() -* -* Set initial residual. -* - CALL <_c>COPY( N, B, 1, WORK(1,R), 1 ) - IF ( NRM2( N, X, 1 ).NE.ZERO ) THEN -*********CALL <_c>MATVEC( -ONE, X, ONE, WORK(1,R) ) -* Note: using RTLD[] as temp. storage. -*********CALL <_c>COPY(N, X, 1, WORK(1,RTLD), 1) - SCLR1 = -ONE - SCLR2 = ONE - NDX1 = -1 - NDX2 = ((R - 1) * LDW) + 1 -* -* Prepare for resumption & return - RLBL = 2 - IJOB = 3 - RETURN - ENDIF -* -***************** - 2 CONTINUE -***************** -* - IF ( NRM2( N, WORK(1,R), 1 ).LE.TOL ) GO TO 30 - - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,RTLD), 1 ) -* - BNRM2 = NRM2( N, B, 1 ) - IF ( BNRM2 .EQ. ZERO ) BNRM2 = ONE -* - ITER = 0 -* - 10 CONTINUE -* -* Perform BiConjugate Gradient Stabilized iteration. -* - ITER = ITER + 1 -* - RHO = ( N, WORK(1,RTLD), 1, WORK(1,R), 1 ) - IF ( ABS( RHO ).LT.RHOTOL ) GO TO 25 -* -* Compute vector P. -* - IF ( ITER.GT.1 ) THEN - BETA = ( RHO / RHO1 ) * ( ALPHA / OMEGA ) - CALL <_c>AXPY( N, -OMEGA, WORK(1,V), 1, WORK(1,P), 1 ) - CALL <_c>SCAL( N, BETA, WORK(1,P), 1 ) - TMPVAL = ONE - CALL <_c>AXPY( N, TMPVAL, WORK(1,R), 1, WORK(1,P), 1 ) - ELSE - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,P), 1 ) - ENDIF -* -* Compute direction adjusting vector PHAT and scalar ALPHA. -* -*********CALL PSOLVE( WORK(1,PHAT), WORK(1,P) ) -* - NDX1 = ((PHAT - 1) * LDW) + 1 - NDX2 = ((P - 1) * LDW) + 1 -* Prepare for return & return - RLBL = 3 - IJOB = 2 - RETURN -* -***************** - 3 CONTINUE -***************** -* -*********CALL MATVEC( ONE, WORK(1,PHAT), ZERO, WORK(1,V) ) -* - NDX1 = ((PHAT - 1) * LDW) + 1 - NDX2 = ((V - 1) * LDW) + 1 -* Prepare for return & return - SCLR1 = ONE - SCLR2 = ZERO - RLBL = 4 - IJOB = 1 - RETURN -* -***************** - 4 CONTINUE -***************** -* - ALPHA = RHO / ( N, WORK(1,RTLD), 1, WORK(1,V), 1 ) -* -* Early check for tolerance. -* - CALL <_c>AXPY( N, -ALPHA, WORK(1,V), 1, WORK(1,R), 1 ) - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,S), 1 ) - IF ( NRM2( N, WORK(1,S), 1 ).LE.TOL ) THEN - CALL <_c>AXPY( N, ALPHA, WORK(1,PHAT), 1, X, 1 ) - RESID = NRM2( N, WORK(1,S), 1 ) / BNRM2 - GO TO 30 - ELSE -* -* Compute stabilizer vector SHAT and scalar OMEGA. -* -************CALL PSOLVE( WORK(1,SHAT), WORK(1,S) ) -* - NDX1 = ((SHAT - 1) * LDW) + 1 - NDX2 = ((S - 1) * LDW) + 1 -* Prepare for return & return - RLBL = 5 - IJOB = 2 - RETURN - ENDIF -* -***************** - 5 CONTINUE -***************** -* -************CALL MATVEC( ONE, WORK(1,SHAT), ZERO, WORK(1,T) ) -* - NDX1 = ((SHAT - 1) * LDW) + 1 - NDX2 = ((T - 1) * LDW) + 1 -* Prepare for return & return - SCLR1 = ONE - SCLR2 = ZERO - RLBL = 6 - IJOB = 1 - RETURN -* -***************** - 6 CONTINUE -***************** -* - OMEGA = ( N, WORK(1,T), 1, WORK(1,S), 1 ) / - $ ( N, WORK(1,T), 1, WORK(1,T), 1 ) -* -* Compute new solution approximation vector X. -* - CALL <_c>AXPY( N, ALPHA, WORK(1,PHAT), 1, X, 1 ) - CALL <_c>AXPY( N, OMEGA, WORK(1,SHAT), 1, X, 1 ) -* -* Compute residual R, check for tolerance. -* - CALL <_c>AXPY( N, -OMEGA, WORK(1,T), 1, WORK(1,R), 1 ) -* -************RESID = DNRM2( N, WORK(1,R), 1 ) / BNRM2 -************IF ( RESID.LE.TOL ) GO TO 30 -* - NDX1 = NEED1 - NDX2 = NEED2 -* Prepare for resumption & return - RLBL = 7 - IJOB = 4 - RETURN -* -***************** - 7 CONTINUE -***************** - IF( INFO.EQ.1 ) GO TO 30 -* - IF ( ITER.EQ.MAXIT ) THEN - INFO = 1 - GO TO 20 - ENDIF -* - IF ( ABS( OMEGA ).LT.OMEGATOL ) THEN - GO TO 25 - ELSE - RHO1 = RHO - GO TO 10 - ENDIF -* - 20 CONTINUE -* -* Iteration fails. -* - RLBL = -1 - IJOB = -1 - RETURN -* - 25 CONTINUE -* -* Set breakdown flag. -* - IF ( ABS( RHO ).LT.RHOTOL ) THEN - INFO = -10 - ELSE IF ( ABS( OMEGA ).LT.OMEGATOL ) THEN - INFO = -11 - ENDIF - RLBL = -1 - IJOB = -1 - RETURN -* - 30 CONTINUE -* -* Iteration successful; return. -* - INFO = 0 - RLBL = -1 - IJOB = -1 - RETURN -* -* End of BICGSTABREVCOM -* - END -* END SUBROUTINE <_c>BICGSTABREVCOM diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/CGREVCOM.f.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/CGREVCOM.f.src deleted file mode 100644 index e2b4e54b75..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/CGREVCOM.f.src +++ /dev/null @@ -1,318 +0,0 @@ -* -*- fortran -*- - SUBROUTINE <_c>CGREVCOM( N, B, X, WORK, LDW, ITER, RESID, INFO, - $ NDX1, NDX2, SCLR1, SCLR2, IJOB) -* -* -- Iterative template routine -- -* Univ. of Tennessee and Oak Ridge National Laboratory -* October 1, 1993 -* Details of this algorithm are described in "Templates for the -* Solution of Linear Systems: Building Blocks for Iterative -* Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra, -* Eijkhout, Pozo, Romine, and van der Vorst, SIAM Publications, -* 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps). -* -* .. Scalar Arguments .. - INTEGER N, LDW, ITER, INFO - RESID -* INTEGER NDX1, NDX2 - <_t> SCLR1, SCLR2 - INTEGER IJOB -* .. -* .. Array Arguments .. - <_t> X( * ), B( * ), WORK( LDW,* ) -* -* (output) for matvec and solve. These index into WORK[] - INTEGER NDX1, NDX2 -* .. -* -* Purpose -* ======= -* -* CG solves the linear system Ax = b using the -* Conjugate Gradient iterative method with preconditioning. -* -* Arguments -* ========= -* -* N (input) INTEGER. -* On entry, the dimension of the matrix. -* Unchanged on exit. -* -* B (input) DOUBLE PRECISION array, dimension N. -* On entry, right hand side vector B. -* Unchanged on exit. -* -* X (input/output) DOUBLE PRECISION array, dimension N. -* On input, the initial guess. This is commonly set to -* the zero vector. -* On exit, if INFO = 0, the iterated approximate solution. -* -* WORK (workspace) DOUBLE PRECISION array, dimension (LDW,4). -* Workspace for residual, direction vector, etc. -* -* LDW (input) INTEGER -* The leading dimension of the array WORK. LDW .gt. = max(1,N). -* -* ITER (input/output) INTEGER -* On input, the maximum iterations to be performed. -* On output, actual number of iterations performed. -* -* RESID (input/output) DOUBLE PRECISION -* On input, the allowable convergence measure for -* norm( b - A*x ) / norm( b ). -* On output, the final value of this measure. -* -* INFO (output) INTEGER -* -* = 0: Successful exit. Iterated approximate solution returned. -* -* .gt. 0: Convergence to tolerance not achieved. This will be -* set to the number of iterations performed. -* -* .ls. 0: Illegal input parameter. -* -* -1: matrix dimension N .ls. 0 -* -2: LDW .ls. N -* -3: Maximum number of iterations ITER .ls. = 0. -* -5: Erroneous NDX1/NDX2 in INIT call. -* -6: Erroneous RLBL. -* -* NDX1 (input/output) INTEGER. -* NDX2 On entry in INIT call contain indices required by interface -* level for stopping test. -* All other times, used as output, to indicate indices into -* WORK[] for the MATVEC, PSOLVE done by the interface level. -* -* SCLR1 (output) DOUBLE PRECISION. -* SCLR2 Used to pass the scalars used in MATVEC. Scalars are reqd because -* original routines use dgemv. -* -* IJOB (input/output) INTEGER. -* Used to communicate job code between the two levels. -* -* BLAS CALLS: DAXPY, DCOPY, DDOT, DNRM2 -* ============================================================ -* -* .. Parameters .. - ZERO, ONE - PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) -* .. -* .. Local Scalars .. - INTEGER MAXIT, R, Z, P, Q, NEED1, NEED2 - <_t> ALPHA, BETA, RHO, RHO1, - $ - NRM2, TOL -* -* indicates where to resume from. Only valid when IJOB = 2! - INTEGER RLBL -* -* saving all. - SAVE -* .. -* .. External Routines .. - EXTERNAL <_c>AXPY, <_c>COPY, , NRM2 -* .. -* .. Executable Statements .. -* -* Entry point, so test IJOB - IF (IJOB .eq. 1) THEN - GOTO 1 - ELSEIF (IJOB .eq. 2) THEN -* here we do resumption handling - IF (RLBL .eq. 2) GOTO 2 - IF (RLBL .eq. 3) GOTO 3 - IF (RLBL .eq. 4) GOTO 4 - IF (RLBL .eq. 5) GOTO 5 -* if neither of these, then error - INFO = -6 - GOTO 20 - ENDIF -* -* init. -***************** - 1 CONTINUE -***************** -* - INFO = 0 - MAXIT = ITER - TOL = RESID -* -* Alias workspace columns. -* - R = 1 - Z = 2 - P = 3 - Q = 4 -* -* Check if caller will need indexing info. -* - IF( NDX1.NE.-1 ) THEN - IF( NDX1.EQ.1 ) THEN - NEED1 = ((R - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.2 ) THEN - NEED1 = ((Z - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.3 ) THEN - NEED1 = ((P - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.4 ) THEN - NEED1 = ((Q - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED1 = NDX1 - ENDIF -* - IF( NDX2.NE.-1 ) THEN - IF( NDX2.EQ.1 ) THEN - NEED2 = ((R - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.2 ) THEN - NEED2 = ((Z - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.3 ) THEN - NEED2 = ((P - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.4 ) THEN - NEED2 = ((Q - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED2 = NDX2 - ENDIF -* -* Set initial residual. -* - CALL <_c>COPY( N, B, 1, WORK(1,R), 1 ) - IF ( NRM2( N, X, 1 ).NE.ZERO ) THEN - -*********CALL MATVEC( -ONE, X, ONE, WORK(1,R) ) -* -* Set args for revcom return - SCLR1 = -ONE - SCLR2 = ONE - NDX1 = -1 - NDX2 = ((R - 1) * LDW) + 1 -* -* Prepare for resumption & return - RLBL = 2 - IJOB = 3 - RETURN - ENDIF -* -***************** - 2 CONTINUE -***************** -* - IF ( NRM2( N, WORK(1,R), 1 ).LT.TOL ) GO TO 30 -* - ITER = 0 -* - 10 CONTINUE -* -* Perform Preconditioned Conjugate Gradient iteration. -* - ITER = ITER + 1 -* -* Preconditioner Solve. -* -*********CALL PSOLVE( WORK(1,Z), WORK(1,R) ) -* - NDX1 = ((Z - 1) * LDW) + 1 - NDX2 = ((R - 1) * LDW) + 1 -* Prepare for return & return - RLBL = 3 - IJOB = 2 - RETURN -* -***************** - 3 CONTINUE -***************** -* - RHO = ( N, WORK(1,R), 1, WORK(1,Z), 1 ) -* -* Compute direction vector P. -* - IF ( ITER.GT.1 ) THEN - BETA = RHO / RHO1 - CALL <_c>AXPY( N, BETA, WORK(1,P), 1, WORK(1,Z), 1 ) -* - CALL <_c>COPY( N, WORK(1,Z), 1, WORK(1,P), 1 ) - ELSE - CALL <_c>COPY( N, WORK(1,Z), 1, WORK(1,P), 1 ) - ENDIF -* -* Compute scalar ALPHA (save A*P to Q). -* -*********CALL MATVEC( ONE, WORK(1,P), ZERO, WORK(1,Q) ) -* - NDX1 = ((P - 1) * LDW) + 1 - NDX2 = ((Q - 1) * LDW) + 1 -* Prepare for return & return - SCLR1 = ONE - SCLR2 = ZERO - RLBL = 4 - IJOB = 1 - RETURN -* -***************** - 4 CONTINUE -***************** -* - ALPHA = RHO / ( N, WORK(1,P), 1, WORK(1,Q), 1 ) -* -* Compute current solution vector X. -* - CALL <_c>AXPY( N, ALPHA, WORK(1,P), 1, X, 1 ) -* -* Compute residual vector R, find norm, -* then check for tolerance. -* - CALL <_c>AXPY( N, -ALPHA, WORK(1,Q), 1, WORK(1,R), 1 ) -* -*********RESID = NRM2( N, WORK(1,R), 1 ) / BNRM2 -*********IF ( RESID.LE.TOL ) GO TO 30 -* - NDX1 = NEED1 - NDX2 = NEED2 -* Prepare for resumption & return - RLBL = 5 - IJOB = 4 - RETURN -* -***************** - 5 CONTINUE -***************** - IF( INFO.EQ.1 ) GO TO 30 -* - IF ( ITER.EQ.MAXIT ) THEN - INFO = 1 - GO TO 20 - ENDIF -* - RHO1 = RHO -* - GO TO 10 -* - 20 CONTINUE -* -* Iteration fails. -* - RLBL = -1 - IJOB = -1 - RETURN -* - 30 CONTINUE -* -* Iteration successful; return. -* - INFO = 0 - RLBL = -1 - IJOB = -1 - RETURN -* -* End of CGREVCOM -* - END -* END SUBROUTINE <_c>CGREVCOM diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/CGSREVCOM.f.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/CGSREVCOM.f.src deleted file mode 100644 index 9a71d7b746..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/CGSREVCOM.f.src +++ /dev/null @@ -1,433 +0,0 @@ -* -*- fortran -*- - SUBROUTINE <_c>CGSREVCOM(N, B, X, WORK, LDW, ITER, RESID, INFO, - $ NDX1, NDX2, SCLR1, SCLR2, IJOB) -* -* -- Iterative template routine -- -* Univ. of Tennessee and Oak Ridge National Laboratory -* October 1, 1993 -* Details of this algorithm are described in "Templates for the -* Solution of Linear Systems: Building Blocks for Iterative -* Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra, -* Eijkhout, Pozo, Romine, and van der Vorst, SIAM Publications, -* 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps). -* -* .. Scalar Arguments .. - INTEGER N, LDW, ITER, INFO - RESID - INTEGER NDX1, NDX2 - <_t> SCLR1, SCLR2 - INTEGER IJOB -* .. -* .. Array Arguments .. - <_t> X( * ), B( * ), WORK( LDW,* ) -* .. -* -* Purpose -* ======= -* -* CGS solves the linear system Ax = b using the -* Conjugate Gradient Squared iterative method with preconditioning. -* -* Convergence test: ( norm( b - A*x ) / norm( b ) ) .ls. TOL. -* For other measures, see the above reference. -* -* Arguments -* ========= -* -* N (input) INTEGER. -* On entry, the dimension of the matrix. -* Unchanged on exit. -* -* B (input) DOUBLE PRECISION array, dimension N. -* On entry, right hand side vector B. -* Unchanged on exit. -* -* X (input/output) DOUBLE PRECISION array, dimension N. -* On input, the initial guess. This is commonly set to -* the zero vector. The user should be warned that for -* this particular algorithm, an initial guess close to -* the actual solution can result in divergence. -* On exit, the iterated solution. -* -* WORK (workspace) DOUBLE PRECISION array, dimension (LDW,7) -* Workspace for residual, direction vector, etc. -* Note that vectors PHAT and QHAT, and UHAT and VHAT share -* the same workspace. -* -* LDW (input) INTEGER -* The leading dimension of the array WORK. LDW .gt. = max(1,N). -* -* ITER (input/output) INTEGER -* On input, the maximum iterations to be performed. -* On output, actual number of iterations performed. -* -* RESID (input/output) DOUBLE PRECISION -* On input, the allowable convergence measure for -* norm( b - A*x ) / norm( b ). -* On ouput, the final value of this measure. -* -* INFO (output) INTEGER -* -* = 0: Successful exit. -* .gt. 0: Convergence not achieved. This will be set -* to the number of iterations performed. -* -* .ls. 0: Illegal input parameter, or breakdown occured -* during iteration. -* -* Illegal parameter: -* -* -1: matrix dimension N .ls. 0 -* -2: LDW .ls. N -* -3: Maximum number of iterations ITER .ls. = 0. -* -5: Erroneous NDX1/NDX2 in INIT call. -* -6: Erroneous RLBL. -* -* BREAKDOWN: If RHO become smaller than some tolerance, -* the program will terminate. Here we check -* against tolerance BREAKTOL. -* -* -10: RHO .ls. BREAKTOL: RHO and RTLD have become -* orthogonal. -* -* NDX1 (input/output) INTEGER. -* NDX2 On entry in INIT call contain indices required by interface -* level for stopping test. -* All other times, used as output, to indicate indices into -* WORK[] for the MATVEC, PSOLVE done by the interface level. -* -* SCLR1 (output) DOUBLE PRECISION. -* SCLR2 Used to pass the scalars used in MATVEC. Scalars are reqd because -* original routines use dgemv. -* -* IJOB (input/output) INTEGER. -* Used to communicate job code between the two levels. -* -* BLAS CALLS: DAXPY, DCOPY, DDOT, DNRM2, DSCAL -* ============================================================= -* -* .. Parameters .. - ONE, ZERO - PARAMETER ( ONE = 1.0D+0 , ZERO = 0.0D+0 ) -* .. -* .. Local Scalars .. - INTEGER R, RTLD, P, PHAT, Q, QHAT, U, UHAT, VHAT, - $ MAXIT, NEED1, NEED2 - TOL, BNRM2, RHOTOL, - $ GETBREAK, - $ NRM2 - - <_t> ALPHA, BETA, RHO, RHO1, TMPVAL, - $ -* .. -* indicates where to resume from. Only valid when IJOB = 2! - INTEGER RLBL -* -* saving all. - SAVE -* -* .. External Funcs .. - EXTERNAL GETBREAK, <_c>AXPY, - $ <_c>COPY, , NRM2, <_c>SCAL -* .. -* .. Intrinsic Funcs .. - INTRINSIC ABS, MAX -* .. -* .. Executable Statements .. -* -* Entry point, test IJOB - IF (IJOB .eq. 1) THEN - GOTO 1 - ELSEIF (IJOB .eq. 2) THEN -* here we do resumption handling - IF (RLBL .eq. 2) GOTO 2 - IF (RLBL .eq. 3) GOTO 3 - IF (RLBL .eq. 4) GOTO 4 - IF (RLBL .eq. 5) GOTO 5 - IF (RLBL .eq. 6) GOTO 6 - IF (RLBL .eq. 7) GOTO 7 -* if neither of these, then error - INFO = -6 - GOTO 20 - ENDIF -* -* -***************** - 1 CONTINUE -***************** -* - INFO = 0 - MAXIT = ITER - TOL = RESID -* -* Alias workspace columns. -* - R = 1 - RTLD = 2 - P = 3 - PHAT = 4 - Q = 5 - QHAT = 6 - U = 6 - UHAT = 7 - VHAT = 7 -* -* Check if caller will need indexing info. -* - IF( NDX1.NE.-1 ) THEN - IF( NDX1.EQ.1 ) THEN - NEED1 = ((R - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.2 ) THEN - NEED1 = ((RTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.3 ) THEN - NEED1 = ((P - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.4 ) THEN - NEED1 = ((PHAT - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.5 ) THEN - NEED1 = ((Q - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.6 ) THEN - NEED1 = ((QHAT - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.7 ) THEN - NEED1 = ((U - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.8 ) THEN - NEED1 = ((UHAT - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.9 ) THEN - NEED1 = ((VHAT - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED1 = NDX1 - ENDIF -* - IF( NDX2.NE.-1 ) THEN - IF( NDX2.EQ.1 ) THEN - NEED2 = ((R - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.2 ) THEN - NEED2 = ((RTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.3 ) THEN - NEED2 = ((P - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.4 ) THEN - NEED2 = ((PHAT - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.5 ) THEN - NEED2 = ((Q - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.6 ) THEN - NEED2 = ((QHAT - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.7 ) THEN - NEED2 = ((U - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.8 ) THEN - NEED2 = ((UHAT - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.9 ) THEN - NEED2 = ((VHAT - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED2 = NDX2 - ENDIF -* -* Set breakdown tolerance parameter. -* - RHOTOL = GETBREAK() -* -* Set initial residual. -* - CALL <_c>COPY( N, B, 1, WORK(1,R), 1 ) - IF ( NRM2( N, X, 1 ).NE.ZERO ) THEN -*********CALL MATVEC( -ONE, X, ONE, WORK(1,R) ) -* Note: using RTLD[] as temp. storage. -*********CALL <_c>COPY(N, X, 1, WORK(1,RTLD), 1) - SCLR1 = -ONE - SCLR2 = ONE - NDX1 = -1 - NDX2 = ((R - 1) * LDW) + 1 -* -* Prepare for resumption & return - RLBL = 2 - IJOB = 3 - RETURN - ENDIF -* -***************** - 2 CONTINUE -***************** -* - IF ( NRM2( N, WORK(1,R), 1 ).LE.TOL ) GO TO 30 -* - BNRM2 = NRM2( N, B, 1 ) - IF ( BNRM2.EQ.ZERO ) BNRM2 = ONE -* -* Choose RTLD such that initially, (R,RTLD) = RHO is not equal to 0. -* Here we choose RTLD = R. -* - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,RTLD), 1 ) -* - ITER = 0 -* - 10 CONTINUE -* -* Perform Conjugate Gradient Squared iteration. -* - ITER = ITER + 1 -* - RHO = ( N, WORK(1,RTLD), 1, WORK(1,R), 1 ) - IF ( ABS( RHO ).LT.RHOTOL ) GO TO 25 -* -* Compute direction vectors U and P. -* - IF ( ITER.GT.1 ) THEN -* -* Compute U. -* - BETA = RHO / RHO1 - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,U), 1 ) - CALL <_c>AXPY( N, BETA, WORK(1,Q), 1, WORK(1,U), 1 ) -* -* Compute P. -* - CALL <_c>SCAL( N, BETA**2, WORK(1,P), 1 ) - CALL <_c>AXPY( N, BETA, WORK(1,Q), 1, WORK(1,P), 1 ) - TMPVAL = ONE - CALL <_c>AXPY( N, TMPVAL, WORK(1,U), 1, WORK(1,P), 1 ) - ELSE - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,U), 1 ) - CALL <_c>COPY( N, WORK(1,U), 1, WORK(1,P), 1 ) - ENDIF -* -* Compute direction adjusting scalar ALPHA. -* -*********CALL PSOLVE( WORK(1,PHAT), WORK(1,P) ) -* - NDX1 = ((PHAT - 1) * LDW) + 1 - NDX2 = ((P - 1) * LDW) + 1 -* Prepare for return & return - RLBL = 3 - IJOB = 2 - RETURN -* -***************** - 3 CONTINUE -***************** -* -*********CALL MATVEC( ONE, WORK(1,PHAT), ZERO, WORK(1,VHAT) ) -* - NDX1 = ((PHAT - 1) * LDW) + 1 - NDX2 = ((VHAT - 1) * LDW) + 1 -* Prepare for return & return - SCLR1 = ONE - SCLR2 = ZERO - RLBL = 4 - IJOB = 1 - RETURN -* -***************** - 4 CONTINUE -***************** -* - ALPHA = RHO / ( N, WORK(1,RTLD), 1, WORK(1,VHAT), 1 ) -* - CALL <_c>COPY( N, WORK(1,U), 1, WORK(1,Q), 1 ) - CALL <_c>AXPY( N, -ALPHA, WORK(1,VHAT), 1, WORK(1,Q), 1 ) -* -* Compute direction adjusting vectORT UHAT. -* PHAT is being used as temporary storage here. -* - CALL <_c>COPY( N, WORK(1,Q), 1, WORK(1,PHAT), 1 ) - TMPVAL = ONE - CALL <_c>AXPY( N, TMPVAL, WORK(1,U), 1, WORK(1,PHAT), 1 ) -*********CALL PSOLVE( WORK(1,UHAT), WORK(1,PHAT) ) -* - NDX1 = ((UHAT - 1) * LDW) + 1 - NDX2 = ((PHAT - 1) * LDW) + 1 -* Prepare for return & return - RLBL = 5 - IJOB = 2 - RETURN -* -***************** - 5 CONTINUE -***************** -* -* Compute new solution approximation vector X. -* - CALL <_c>AXPY( N, ALPHA, WORK(1,UHAT), 1, X, 1 ) -* -* Compute residual R and check for tolerance. -* -*********CALL MATVEC( ONE, WORK(1,UHAT), ZERO, WORK(1,QHAT) ) -* - NDX1 = ((UHAT - 1) * LDW) + 1 - NDX2 = ((QHAT - 1) * LDW) + 1 -* Prepare for return & return - SCLR1 = ONE - SCLR2 = ZERO - RLBL = 6 - IJOB = 1 - RETURN -* -***************** - 6 CONTINUE -***************** -* - CALL <_c>AXPY( N, -ALPHA, WORK(1,QHAT), 1, WORK(1,R), 1 ) -* -*********RESID = NRM2( N, WORK(1,R), 1 ) / BNRM2 -*********IF ( RESID.LE.TOL ) GO TO 30 -* - NDX1 = NEED1 - NDX2 = NEED2 -* Prepare for resumption & return - RLBL = 7 - IJOB = 4 - RETURN -* -***************** - 7 CONTINUE -***************** - IF( INFO.EQ.1 ) GO TO 30 -* - IF ( ITER.EQ.MAXIT ) THEN - INFO = 1 - GO TO 20 - ENDIF -* - RHO1 = RHO -* - GO TO 10 -* - 20 CONTINUE -* -* Iteration fails. -* - RLBL = -1 - IJOB = -1 - RETURN -* - 25 CONTINUE -* -* Set breakdown flag. -* - IF ( ABS( RHO ).LT.RHOTOL ) INFO = -10 -* - 30 CONTINUE -* -* Iteration successful; return. -* - INFO = 0 - RLBL = -1 - IJOB = -1 - RETURN -* -* End of CGSREVCOM -* - END -* END SUBROUTINE <_c>CGSREVCOM - - - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/dummy.f b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/dummy.f deleted file mode 100644 index 46a7148871..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/dummy.f +++ /dev/null @@ -1,57 +0,0 @@ - double complex function wzdotc(n, zx, incx, zy, incy) - double complex zx(*), zy(*), z - double complex zdotc - integer n, incx, incy - - z = zdotc(n, zx, incx, zy, incy) - wzdotc = z - - end - - double complex function wzdotu(n, zx, incx, zy, incy) - double complex zx(*), zy(*), z, zdotu - integer n, incx, incy - - z = zdotu(n, zx, incx, zy, incy) - wzdotu = z - - return - end - - complex function wcdotc(n, cx, incx, cy, incy) - complex cx(*), cy(*), c, cdotc - integer n, incx, incy - - c = cdotc(n, cx, incx, cy, incy) - wcdotc = c - - return - end - - complex function wcdotu(n, cx, incx, cy, incy) - complex cx(*), cy(*), c, cdotu - integer n, incx, incy - - c = cdotu(n, cx, incx, cy, incy) - wcdotu = c - - return - end - - complex function wcladiv(x, y) - complex x, y, z - complex cladiv - - z = cladiv(x, y) - wcladiv = z - return - end - - double complex function wzladiv(x, y) - double complex x, y, z - double complex zladiv - - z = zladiv(x, y) - wzladiv = z - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/veclib_cabi_c.c b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/veclib_cabi_c.c deleted file mode 100644 index 64565a28cd..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/veclib_cabi_c.c +++ /dev/null @@ -1,26 +0,0 @@ -#include -#include - -#define WRAP_F77(a) a##_ -void WRAP_F77(veclib_cdotc)(const int *N, const complex float *X, const int *incX, -const complex float *Y, const int *incY, complex float *dotc) -{ - cblas_cdotc_sub(*N, X, *incX, Y, *incY, dotc); -} - -void WRAP_F77(veclib_cdotu)(const int *N, const complex float *X, const int *incX, -const complex float *Y, const int *incY, complex float* dotu) -{ - cblas_cdotu_sub(*N, X, *incX, Y, *incY, dotu); -} - -void WRAP_F77(veclib_zdotc)(const int *N, const double complex *X, const int -*incX, const double complex *Y, const int *incY, double complex *dotu) -{ - cblas_zdotc_sub(*N, X, *incX, Y, *incY, dotu); -} -void WRAP_F77(veclib_zdotu)(const int *N, const double complex *X, const int -*incX, const double complex *Y, const int *incY, double complex *dotu) -{ - cblas_zdotu_sub(*N, X, *incX, Y, *incY, dotu); -} diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/veclib_cabi_f.f b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/veclib_cabi_f.f deleted file mode 100644 index e050f8250e..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/FWRAPPERS/veclib_cabi_f.f +++ /dev/null @@ -1,55 +0,0 @@ - double complex function wzdotc(n, zx, incx, zy, incy) - double complex zx(*), zy(*), z - integer n, incx, incy - - call veclib_zdotc(n, zx, incx, zy, incy, z) - - wzdotc = z - return - end - - double complex function wzdotu(n, zx, incx, zy, incy) - double complex zx(*), zy(*), z - integer n, incx, incy - - call veclib_zdotu(n, zx, incx, zy, incy, z) - - wzdotu = z - return - end - - complex function wcdotc(n, cx, incx, cy, incy) - complex cx(*), cy(*), c - integer n, incx, incy - - call veclib_cdotc(n, cx, incx, cy, incy, c) - - wcdotc = c - return - end - - complex function wcdotu(n, cx, incx, cy, incy) - complex cx(*), cy(*), c - integer n, incx, incy - - call veclib_cdotu(n, cx, incx, cy, incy, c) - - wcdotu = c - return - end - - complex function wcladiv(x, y) - complex x, y, z - - call cladiv(z, x, y) - wcladiv = z - return - end - - double complex function wzladiv(x, y) - double complex x, y, z - - call zladiv(z, x, y) - wzladiv = z - return - end diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/GMRESREVCOM.f.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/GMRESREVCOM.f.src deleted file mode 100644 index 743c24e775..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/GMRESREVCOM.f.src +++ /dev/null @@ -1,593 +0,0 @@ -* -*- fortran -*- - SUBROUTINE <_c>GMRESREVCOM(N, B, X, RESTRT, WORK, LDW, WORK2, - $ LDW2, ITER, RESID, INFO, NDX1, NDX2, SCLR1, - $ SCLR2, IJOB) -* -* -- Iterative template routine -- -* Univ. of Tennessee and Oak Ridge National Laboratory -* October 1, 1993 -* Details of this algorithm are described in "Templates for the -* Solution of Linear Systems: Building Blocks for Iterative -* Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra, -* EiITERkhout, Pozo, Romine, and van der Vorst, SIAM Publications, -* 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps). -* -* .. Scalar Arguments .. - INTEGER N, RESTRT, LDW, LDW2, ITER, INFO - RESID - INTEGER NDX1, NDX2 - <_t> SCLR1, SCLR2 - INTEGER IJOB -* .. -* .. Array Arguments .. - <_t> B( * ), X( * ), WORK( LDW,* ), WORK2( LDW2,* ) -* .. -* -* Purpose -* ======= -* -* GMRES solves the linear system Ax = b using the -* Generalized Minimal Residual iterative method with preconditioning. -* -* Arguments -* ========= -* -* N (input) INTEGER. -* On entry, the dimension of the matrix. -* Unchanged on exit. -* -* B (input) DOUBLE PRECISION array, dimension N. -* On entry, right hand side vector B. -* Unchanged on exit. -* -* X (input/output) DOUBLE PRECISION array, dimension N. -* On input, the initial guess; on exit, the iterated solution. -* -* RESTRT (input) INTEGER -* Restart parameter, .ls. = N. This parameter controls the amount -* of memory required for matrix WORK2. -* -* WORK (workspace) DOUBLE PRECISION array, dimension (LDW,6+restrt). -* Note that if the initial guess is the zero vector, then -* storing the initial residual is not necessary. -* -* LDW (input) INTEGER -* The leading dimension of the array WORK. LDW .gt. = max(1,N). -* -* WORK2 (workspace) DOUBLE PRECISION array, dimension (LDW2,2*RESTRT+2). -* This workspace is used for constructing and storing the -* upper Hessenberg matrix. The two extra columns are used to -* store the Givens rotation matrices. -* -* LDW2 (input) INTEGER -* The leading dimension of the array WORK2. -* LDW2 .gt. = max(2,RESTRT+1). -* -* ITER (input/output) INTEGER -* On input, the maximum iterations to be performed. -* On output, actual number of iterations performed. -* -* RESID (input/output) DOUBLE PRECISION -* On input, the allowable error tolerance. -* On ouput, the norm of the residual vector if solution -* approximated to tolerance, otherwise reset to input -* tolerance. -* -* INFO (output) INTEGER -* = 0: successful exit -* = 1: maximum number of iterations performed; -* convergence not achieved. -* -5: Erroneous NDX1/NDX2 in INIT call. -* -6: Erroneous RLBL. -* -* NDX1 (input/output) INTEGER. -* NDX2 On entry in INIT call contain indices required by interface -* level for stopping test. -* All other times, used as output, to indicate indices into -* WORK[] for the MATVEC, PSOLVE done by the interface level. -* -* SCLR1 (output) DOUBLE PRECISION. -* SCLR2 Used to pass the scalars used in MATVEC. Scalars are reqd because -* original routines use dgemv. -* -* IJOB (input/output) INTEGER. -* Used to communicate job code between the two levels. -* -* ============================================================ -* -* .. Parameters .. - ZERO, ONE - PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) - INTEGER OFSET - PARAMETER ( OFSET = 1000 ) -* .. -* .. Local Scalars .. - INTEGER I, MAXIT, AV, GIV, H, R, S, V, W, Y, - $ NEED1, NEED2 - <_t> - <_t> toz - <_t> TMPVAL - BNRM2, RNORM, TOL, - $ NRM2, - $ APPROXRES - -* -* indicates where to resume from. Only valid when IJOB = 2! - INTEGER RLBL -* -* saving all. - SAVE -* -* .. -* .. External Routines .. - EXTERNAL <_c>AXPY, <_c>COPY, , NRM2, <_c>SCAL -* .. -* .. Executable Statements .. -* -* Entry point, so test IJOB - IF (IJOB .eq. 1) THEN - GOTO 1 - ELSEIF (IJOB .eq. 2) THEN -* here we do resumption handling - IF (RLBL .eq. 2) GOTO 2 - IF (RLBL .eq. 3) GOTO 3 - IF (RLBL .eq. 4) GOTO 4 - IF (RLBL .eq. 5) GOTO 5 - IF (RLBL .eq. 6) GOTO 6 - IF (RLBL .eq. 7) GOTO 7 -* if neither of these, then error - INFO = -6 - GOTO 200 - ENDIF -* -* init. -***************** - 1 CONTINUE -***************** -* - INFO = 0 - MAXIT = ITER - TOL = RESID -* -* Alias workspace columns. -* - R = 1 - S = 2 - W = 3 - Y = 4 - AV = 5 - V = 6 -* - H = 1 - GIV = H + RESTRT -* -* Check if caller will need indexing info. -* - IF( NDX1.NE.-1 ) THEN - IF( NDX1.EQ.1 ) THEN - NEED1 = ((R - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.2 ) THEN - NEED1 = ((S - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.3 ) THEN - NEED1 = ((W - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.4 ) THEN - NEED1 = ((Y - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.5 ) THEN - NEED1 = ((AV - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.6 ) THEN - NEED1 = ((V - 1) * LDW) + 1 - ELSEIF( ( NDX1.GT.V*OFSET ) .AND. - $ ( NDX1.LE.V*OFSET+RESTRT ) ) THEN - NEED1 = ((NDX1-V*OFSET - 1) * LDW) + 1 - ELSEIF( ( NDX1.GT.GIV*OFSET ) .AND. - $ ( NDX1.LE.GIV*OFSET+RESTRT ) ) THEN - NEED1 = ((NDX1-GIV*OFSET - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 100 - ENDIF - ELSE - NEED1 = NDX1 - ENDIF -* - IF( NDX2.NE.-1 ) THEN - IF( NDX2.EQ.1 ) THEN - NEED2 = ((R - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.2 ) THEN - NEED2 = ((S - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.3 ) THEN - NEED2 = ((W - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.4 ) THEN - NEED2 = ((Y - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.5 ) THEN - NEED2 = ((AV - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.6 ) THEN - NEED2 = ((V - 1) * LDW) + 1 - ELSEIF( ( NDX2.GT.V*OFSET ) .AND. - $ ( NDX2.LE.V*OFSET+RESTRT ) ) THEN - NEED2 = ((NDX2-V*OFSET - 1) * LDW) + 1 - ELSEIF( ( NDX2.GT.GIV*OFSET ) .AND. - $ ( NDX2.LE.GIV*OFSET+RESTRT ) ) THEN - NEED2 = ((NDX2-GIV*OFSET - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 100 - ENDIF - ELSE - NEED2 = NDX2 - ENDIF -* -* Set initial residual. -* - CALL <_c>COPY( N, B, 1, WORK(1,R), 1 ) - IF ( NRM2( N, X, 1 ).NE.ZERO ) THEN -*********CALL MATVEC( -ONE, X, ONE, WORK(1,R) ) -* Note: using X directly - SCLR1 = -ONE - SCLR2 = ONE - NDX1 = -1 - NDX2 = ((R - 1) * LDW) + 1 -* -* Prepare for resumption & return - RLBL = 2 - IJOB = 1 - RETURN - ENDIF -* -***************** - 2 CONTINUE -***************** -* - IF ( NRM2( N, WORK(1,R), 1 ).LT.TOL ) GO TO 200 - BNRM2 = NRM2( N, B, 1 ) - IF ( BNRM2.EQ.ZERO ) BNRM2 = ONE -* - ITER = 0 - 10 CONTINUE -* - ITER = ITER + 1 -* -* Construct the first column of V, and initialize S to the -* elementary vector E1 scaled by RNORM. -* -*********CALL PSOLVE( WORK( 1,V ), WORK( 1,R ) ) -* - NDX1 = ((V - 1) * LDW) + 1 - NDX2 = ((R - 1) * LDW) + 1 -* Prepare for return & return - RLBL = 3 - IJOB = 2 - RETURN -* -***************** - 3 CONTINUE -***************** -* - RNORM = NRM2( N, WORK( 1,V ), 1 ) - toz = ONE/RNORM - CALL <_c>SCAL( N, toz, WORK( 1,V ), 1 ) - TMPVAL = RNORM - CALL <_c>ELEMVEC( 1, N, TMPVAL, WORK( 1,S ) ) -* -* DO 50 I = 1, RESTRT - i = 1 - 49 if (i.gt.restrt) go to 50 -************CALL MATVEC( ONE, WORK( 1,V+I-1 ), ZERO, WORK( 1,AV ) ) -* - NDX1 = ((V+I-1 - 1) * LDW) + 1 - NDX2 = ((AV - 1) * LDW) + 1 -* Prepare for return & return - SCLR1 = ONE - SCLR2 = ZERO - RLBL = 4 - IJOB = 3 - RETURN -* -***************** - 4 CONTINUE -***************** -* -*********CALL PSOLVE( WORK( 1,W ), WORK( 1,AV ) ) -* - NDX1 = ((W - 1) * LDW) + 1 - NDX2 = ((AV - 1) * LDW) + 1 -* Prepare for return & return - RLBL = 5 - IJOB = 2 - RETURN -* -***************** - 5 CONTINUE -***************** -* -* Construct I-th column of H so that it is orthnormal to -* the previous I-1 columns. -* - CALL <_c>ORTHOH( I, N, WORK2( 1,I+H-1 ), WORK( 1,V ), LDW, - $ WORK( 1,W ) ) -* - IF ( I.GT.0 ) -* -* Apply Givens rotations to the I-th column of H. This -* effectively reduces the Hessenberg matrix to upper -* triangular form during the RESTRT iterations. -* - $ CALL <_c>APPLYGIVENS(I, WORK2( 1,I+H-1 ), WORK2( 1,GIV ), - $ LDW2 ) -* -* Approxiyate residual norm. Check tolerance. If okay, compute -* final approximation vector X and quit. -* - RESID = APPROXRES( I, WORK2( 1,I+H-1 ), WORK( 1,S ), - $ WORK2( 1,GIV ), LDW2 ) / BNRM2 - IF ( RESID.LE.TOL ) THEN - CALL <_c>UPDATE(I, N, X, WORK2( 1,H ), LDW2, - $ WORK(1,Y), WORK(1,S), WORK( 1,V ), LDW) - GO TO 200 - ENDIF - i = i + 1 - go to 49 - 50 CONTINUE - i = restrt -* -* Compute current solution vector X. -* - CALL <_c>UPDATE(RESTRT, N, X, WORK2( 1,H ), LDW2, - $ WORK(1,Y), WORK( 1,S ), WORK( 1,V ), LDW ) -* -* Compute residual vector R, find norm, -* then check for tolerance. -* - CALL <_c>COPY( N, B, 1, WORK( 1,R ), 1 ) -*********CALL MATVEC( -ONE, X, ONE, WORK( 1,R ) ) -* - NDX1 = -1 - NDX2 = ((R - 1) * LDW) + 1 -* Prepare for return & return - SCLR1 = -ONE - SCLR2 = ONE - RLBL = 6 - IJOB = 1 - RETURN -* -***************** - 6 CONTINUE -***************** -* - WORK( I+1,S ) = NRM2( N, WORK( 1,R ), 1 ) -* -*********RESID = WORK( I+1,S ) / BNRM2 -*********IF ( RESID.LE.TOL ) GO TO 200 -* - NDX1 = NEED1 - NDX2 = NEED2 -* Prepare for resumption & return - RLBL = 7 - IJOB = 4 - RETURN -* -***************** - 7 CONTINUE -***************** - IF( INFO.EQ.1 ) GO TO 200 -* - IF ( ITER.EQ.MAXIT ) THEN - INFO = 1 - GO TO 100 - ENDIF -* - GO TO 10 -* - 100 CONTINUE -* -* Iteration fails. -* - RLBL = -1 - IJOB = -1 - RETURN -* - 200 CONTINUE -* -* Iteration successful; return. -* - INFO = 0 - RLBL = -1 - IJOB = -1 - - RETURN -* -* End of GMRESREVCOM -* - END -* END SUBROUTINE <_c>GMRESREVCOM -* -* ========================================================= - SUBROUTINE <_c>ORTHOH( I, N, H, V, LDV, W ) -* - INTEGER I, N, LDV - <_t> H( * ), W( * ), V( LDV,* ) -* -* Construct the I-th column of the upper Hessenberg matrix H -* using the Gram-Schmidt process on V and W. -* - INTEGER K - - $ NRM2, ONE - PARAMETER ( ONE = 1.0D+0 ) - <_t> - <_t> TMPVAL - EXTERNAL <_c>AXPY, <_c>COPY, , NRM2, <_c>SCAL -* - DO 10 K = 1, I - H( K ) = ( N, V( 1,K ), 1, W, 1 ) - CALL <_c>AXPY( N, -H( K ), V( 1,K ), 1, W, 1 ) - 10 CONTINUE - H( I+1 ) = NRM2( N, W, 1 ) - CALL <_c>COPY( N, W, 1, V( 1,I+1 ), 1 ) - TMPVAL = ONE / H( I+1 ) - CALL <_c>SCAL( N, TMPVAL, V( 1,I+1 ), 1 ) -* - RETURN -* - END -* END SUBROUTINE <_c>ORTHOH -* ========================================================= - SUBROUTINE <_c>APPLYGIVENS( I, H, GIVENS, LDG ) -* - INTEGER I, LDG - <_t> H( * ), GIVENS( LDG,* ) -* -* This routine applies a sequence of I-1 Givens rotations to -* the I-th column of H. The Givens parameters are stored, so that -* the first I-2 Givens rotation matrices are known. The I-1st -* Givens rotation is computed using BLAS 1 routine DROTG. Each -* rotation is applied to the 2x1 vector [H( J ), H( J+1 )]', -* which results in H( J+1 ) = 0. -* - INTEGER J -* DOUBLE PRECISION TEMP - EXTERNAL <_c>ROTG -* -* .. Executable Statements .. -* -* Construct I-1st rotation matrix. -* -* CALL <_c>ROTG( H( I ), H( I+1 ), GIVENS( I,1 ), GIVENS( I,2 ) ) -* CALL <_c>GETGIV( H( I ), H( I+1 ), GIVENS( I,1 ), GIVENS( I,2 ) ) -* -* Apply 1,...,I-1st rotation matrices to the I-th column of H. -* - DO 10 J = 1, I-1 - CALL <_c>ROTVEC(H( J ), H( J+1 ), GIVENS( J,1 ), GIVENS( J,2 )) -* TEMP = GIVENS( J,1 ) * H( J ) + GIVENS( J,2 ) * H( J+1 ) -* H( J+1 ) = -GIVENS( J,2 ) * H( J ) + GIVENS( J,1 ) * H( J+1 ) -* H( J ) = TEMP - 10 CONTINUE - call <_c>getgiv( H( I ), H( I+1 ), GIVENS( I,1 ), GIVENS( I,2 ) ) - call <_c>rotvec( H( I ), H( I+1 ), GIVENS( I,1 ), GIVENS( I,2 ) ) -* - RETURN -* - END -* END SUBROUTINE <_c>APPLYGIVENS -* -* =============================================================== - - $ FUNCTION APPROXRES( I, H, S, GIVENS, LDG ) -* - INTEGER I, LDG - <_t> H( * ), S( * ), GIVENS( LDG,* ) -* -* This func allows the user to approximate the residual -* using an updating scheme involving Givens rotations. The -* rotation matrix is formed using [H( I ),H( I+1 )]' with the -* intent of zeroing H( I+1 ), but here is applied to the 2x1 -* vector [S(I), S(I+1)]'. -* - INTRINSIC ABS - EXTERNAL <_c>ROTG -* -* .. Executable Statements .. -* -* CALL <_c>ROTG( H( I ), H( I+1 ), GIVENS( I,1 ), GIVENS( I,2 ) ) -* CALL <_c>GETGIV( H( I ), H( I+1 ), GIVENS( I,1 ), GIVENS( I,2 ) ) - CALL <_c>ROTVEC( S( I ), S( I+1 ), GIVENS( I,1 ), GIVENS( I,2 ) ) -* - APPROXRES = ABS( S( I+1 ) ) -* - RETURN -* - END -* END FUNCTION APPROXRES -* =============================================================== - SUBROUTINE <_c>UPDATE( I, N, X, H, LDH, Y, S, V, LDV ) -* - INTEGER N, I, J, LDH, LDV - <_t> X( * ), Y( * ), S( * ), H( LDH,* ), V( LDV,* ) - EXTERNAL <_c>AXPY, <_c>COPY, <_c>TRSV -* -* Solve H*y = s for upper triangualar H. -* - CALL <_c>COPY( I, S, 1, Y, 1 ) - CALL <_c>TRSV( 'UPPER', 'NOTRANS', 'NONUNIT', I, H, LDH, Y, 1 ) -* -* Compute current solution vector X. -* - DO 10 J = 1, I - CALL <_c>AXPY( N, Y( J ), V( 1,J ), 1, X, 1 ) - 10 CONTINUE -* - RETURN -* - END -* END SUBROUTINE <_c>UPDATE -* -* =============================================================== - SUBROUTINE <_c>GETGIV( A, B, C, S ) -* - <_t> A, B, C, S, TEMP, ZERO, ONE - PARAMETER ( - $ ZERO = 0.0, - $ ONE = 1.0 ) -* - IF ( ABS( B ).EQ.ZERO ) THEN - C = ONE - S = ZERO - ELSE IF ( ABS( B ).GT.ABS( A ) ) THEN - TEMP = -A / B - S = ONE / SQRT( ONE + abs(TEMP)**2 ) - C = TEMP * S -* S = b / SQRT( abs(a)**2 + abs(b)**2 ) -* C = -a / SQRT( abs(a)**2 + abs(b)**2 ) - ELSE - TEMP = -B / A - C = ONE / SQRT( ONE + abs(TEMP)**2 ) - S = TEMP * C -* S = -b / SQRT( abs(a)**2 + abs(b)**2 ) -* C = a / SQRT( abs(a)**2 + abs(b)**2 ) - ENDIF -* - RETURN -* - END -* END SUBROUTINE <_c>GETGIV -* -* ================================================================ - SUBROUTINE <_c>ROTVEC( X, Y, C, S ) -* - <_t> X, Y, C, S, TEMP - -* - TEMP = (C) * X - (S) * Y - Y = S * X + C * Y - X = TEMP -* - RETURN -* - END -* END SUBROUTINE <_c>ROTVEC -* -* =============================================================== - SUBROUTINE <_c>ELEMVEC( I, N, ALPHA, E ) -* -* Construct the I-th elementary vector E, scaled by ALPHA. -* - INTEGER I, J, N - <_t> ALPHA, E( * ) -* -* .. Parameters .. - ZERO - PARAMETER ( ZERO = 0.0D+0 ) -* - DO 10 J = 1, N - E( J ) = ZERO - 10 CONTINUE - E( I ) = ALPHA -* - RETURN -* - END -* END SUBROUTINE <_c>ELEMVEC - diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/QMRREVCOM.f.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/QMRREVCOM.f.src deleted file mode 100644 index ebc8ccb3d7..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/QMRREVCOM.f.src +++ /dev/null @@ -1,565 +0,0 @@ -* -*- fortran -*- - SUBROUTINE <_c>QMRREVCOM(N, B, X, WORK, LDW, ITER, RESID, INFO, - $ NDX1, NDX2, SCLR1, SCLR2, IJOB) -* -* -* -- Iterative template routine -- -* Univ. of Tennessee and Oak Ridge National Laboratory -* October 1, 1993 -* Details of this algorithm are described in "Templates for the -* Solution of Linear Systems: Building Blocks for Iterative -* Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra, -* Eijkhout, Pozo, Romine, and van der Vorst, SIAM Publications, -* 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps). -* -* .. Scalar Arguments .. - INTEGER N, LDW, ITER, INFO - RESID - INTEGER NDX1, NDX2 - <_t> SCLR1, SCLR2 - INTEGER IJOB -* .. -* .. Array Arguments .. - <_t> X( * ), B( * ), WORK( LDW,* ) -* .. -* Purpose -* ======= -* -* QMR Method solves the linear system Ax = b using the -* Quasi-Minimal Residual iterative method with preconditioning. -* -* Arguments -* ========= -* -* N (input) INTEGER. -* On entry, the dimension of the matrix. -* Unchanged on exit. -* -* B (input) DOUBLE PRECISION array, dimension N. -* On entry, right hand side vector B. -* Unchanged on exit. -* -* X (input/output) DOUBLE PRECISION array, dimension N. -* On input, the initial guess; on exit, the iterated solution. -* -* -* WORK (workspace) DOUBLE PRECISION array, dimension (LDW,11). -* Workspace for residual, direction vector, etc. -* Note that W and WTLD, Y and YTLD, and Z and ZTLD share -* workspace. -* -* LDW (input) INTEGER -* The leading dimension of the array WORK. LDW .gt. = max(1,N). -* -* ITER (input/output) INTEGER -* On input, the maximum iterations to be performed. -* On output, actual number of iterations performed. -* -* RESID (input/output) DOUBLE PRECISION -* On input, the allowable convergence measure for -* norm( b - A*x ) / norm( b ). -* On output, the final value of this measure. -* -* INFO (output) INTEGER -* -* = 0: Successful exit. Iterated approximate solution returned. -* -5: Erroneous NDX1/NDX2 in INIT call. -* -6: Erroneous RLBL. -* -* .gt. 0: Convergence to tolerance not achieved. This will be -* set to the number of iterations performed. -* -* .ls. 0: Illegal input parameter, or breakdown occurred -* during iteration. -* -* Illegal parameter: -* -* -1: matrix dimension N .ls. 0 -* -2: LDW .ls. N -* -3: Maximum number of iterations ITER .ls. = 0. -* -* BREAKDOWN: If parameters RHO or OMEGA become smaller -* than some tolerance, the program will terminate. -* Here we check against tolerance BREAKTOL. -* -* -10: RHO .ls. BREAKTOL: RHO and RTLD have become -* orthogonal. -* -11: BETA .ls. BREAKTOL: EPS too small in relation to DELT -* Convergence has stalled. -* -12: GAMMA .ls. BREAKTOL: THETA too large. -* Convergence has stalled. -* -13: DELTA .ls. BREAKTOL: Y and Z have become -* orthogonal. -* -14: EPS .ls. BREAKTOL: Q and PTLD have become -* orthogonal. -* -15: XI .ls. BREAKTOL: Z too small. -* Convergence has stalled. -* -* BREAKTOL is set in func GETBREAK. -* -* NDX1 (input/output) INTEGER. -* NDX2 On entry in INIT call contain indices required by interface -* level for stopping test. -* All other times, used as output, to indicate indices into -* WORK[] for the MATVEC, PSOLVE done by the interface level. -* -* SCLR1 (output) DOUBLE PRECISION. -* SCLR2 Used to pass the scalars used in MATVEC. Scalars are reqd because -* original routines use dgemv. -* -* IJOB (input/output) INTEGER. -* Used to communicate job code between the two levels. -* -* BLAS CALLS: DAXPY, DCOPY, DDOT, DNRM2, DSCAL -* ============================================================== -* -* .. Parameters .. - ONE, ZERO - PARAMETER ( ONE = 1.0D+0 , ZERO = 0.0D+0) -* -* .. Local Scalars .. - INTEGER R, D, P, PTLD, Q, S, V, VTLD, W, WTLD, Y, YTLD, - $ Z, ZTLD, MAXIT, NEED1, NEED2 - TOL, BNRM2, RHOTOL, BETATOL, - $ GAMMATOL, DELTATOL, - $ EPSTOL, XITOL, - $ GETBREAK, - $ NRM2 - - - <_t> BETA, GAMMA, GAMMA1, DELTA, EPS, ETA, XI, - $ RHO, RHO1, THETA, THETA1, C1, TMPVAL, - $ , - $ toz -* -* indicates where to resume from. Only valid when IJOB = 2! - INTEGER RLBL -* -* saving all. - SAVE -* -* .. -* .. External Routines .. - EXTERNAL <_c>AXPY, <_c>COPY, , NRM2, <_c>SCAL -* .. -* .. Intrinsic Funcs .. - INTRINSIC ABS, SQRT -* .. -* .. Executable Statements .. -* -* Entry point, so test IJOB - IF (IJOB .eq. 1) THEN - GOTO 1 - ELSEIF (IJOB .eq. 2) THEN -* here we do resumption handling - IF (RLBL .eq. 2) GOTO 2 - IF (RLBL .eq. 3) GOTO 3 - IF (RLBL .eq. 4) GOTO 4 - IF (RLBL .eq. 5) GOTO 5 - IF (RLBL .eq. 6) GOTO 6 - IF (RLBL .eq. 7) GOTO 7 - IF (RLBL .eq. 8) GOTO 8 - IF (RLBL .eq. 9) GOTO 9 - IF (RLBL .eq. 10) GOTO 10 - IF (RLBL .eq. 11) GOTO 11 -* if neither of these, then error - INFO = -6 - GOTO 20 - ENDIF -* -* -***************** - 1 CONTINUE -***************** -* - INFO = 0 - MAXIT = ITER - TOL = RESID -* -* Alias workspace columns. -* - R = 1 - D = 2 - P = 3 - PTLD = 4 - Q = 5 - S = 6 - V = 7 - VTLD = 8 - W = 9 - WTLD = 9 - Y = 10 - YTLD = 10 - Z = 11 - ZTLD = 11 -* -* Check if caller will need indexing info. -* - IF( NDX1.NE.-1 ) THEN - IF( NDX1.EQ.1 ) THEN - NEED1 = ((R - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.2 ) THEN - NEED1 = ((D - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.3 ) THEN - NEED1 = ((P - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.4 ) THEN - NEED1 = ((PTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.5 ) THEN - NEED1 = ((Q - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.6 ) THEN - NEED1 = ((S - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.7 ) THEN - NEED1 = ((V - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.8 ) THEN - NEED1 = ((VTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.9 ) THEN - NEED1 = ((W - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.10 ) THEN - NEED1 = ((WTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.11 ) THEN - NEED1 = ((Y - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.12 ) THEN - NEED1 = ((YTLD - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.13 ) THEN - NEED1 = ((Z - 1) * LDW) + 1 - ELSEIF( NDX1.EQ.14 ) THEN - NEED1 = ((ZTLD - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED1 = NDX1 - ENDIF -* - IF( NDX2.NE.-1 ) THEN - IF( NDX2.EQ.1 ) THEN - NEED2 = ((R - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.2 ) THEN - NEED2 = ((D - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.3 ) THEN - NEED2 = ((P - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.4 ) THEN - NEED2 = ((PTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.5 ) THEN - NEED2 = ((Q - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.6 ) THEN - NEED2 = ((S - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.7 ) THEN - NEED2 = ((V - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.8 ) THEN - NEED2 = ((VTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.9 ) THEN - NEED2 = ((W - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.10 ) THEN - NEED2 = ((WTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.11 ) THEN - NEED2 = ((Y - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.12 ) THEN - NEED2 = ((YTLD - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.13 ) THEN - NEED2 = ((Z - 1) * LDW) + 1 - ELSEIF( NDX2.EQ.14 ) THEN - NEED2 = ((ZTLD - 1) * LDW) + 1 - ELSE -* report error - INFO = -5 - GO TO 20 - ENDIF - ELSE - NEED2 = NDX2 - ENDIF -* -* Set breakdown tolerances. -* - RHOTOL = GETBREAK() - BETATOL = GETBREAK() - GAMMATOL = GETBREAK() - DELTATOL = GETBREAK() - EPSTOL = GETBREAK() - XITOL = GETBREAK() -* -* Set initial residual. -* - CALL <_c>COPY( N, B, 1, WORK(1,R), 1 ) - IF ( NRM2( N, X, 1 ).NE.ZERO ) THEN -*********CALL MATVEC( -ONE, X, ZERO, WORK(1,R) ) -* Note: using D as temp -*********CALL <_c>COPY( N, X, 1, WORK(1,D), 1 ) - SCLR1 = -ONE - SCLR2 = ZERO - NDX1 = ((D - 1) * LDW) + 1 - NDX2 = ((R - 1) * LDW) + 1 - RLBL = 2 - IJOB = 7 - RETURN - ENDIF -***************** - 2 CONTINUE -***************** -* - IF ( NRM2( N, WORK(1,R), 1 ) .LT. TOL ) GO TO 30 -* - BNRM2 = NRM2( N, B, 1 ) - IF ( BNRM2.EQ.ZERO ) BNRM2 = ONE -* - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,VTLD), 1 ) -******CALL PSOLVEQ( WORK(1,Y), WORK(1,VTLD), 'LEFT' ) -* - NDX1 = ((Y - 1) * LDW) + 1 - NDX2 = ((VTLD - 1) * LDW) + 1 - RLBL = 3 - IJOB = 3 - RETURN -***************** - 3 CONTINUE -***************** -* - RHO = NRM2( N, WORK(1,Y), 1 ) -* - CALL <_c>COPY( N, WORK(1,R), 1, WORK(1,WTLD), 1 ) -******CALL PSOLVETRANSQ( WORK(1,Z), WORK(1,WTLD), 'RIGHT' ) -* - NDX1 = ((Z - 1) * LDW) + 1 - NDX2 = ((WTLD - 1) * LDW) + 1 - RLBL = 4 - IJOB = 6 - RETURN -***************** - 4 CONTINUE -***************** -* - XI = NRM2( N, WORK(1,Z), 1 ) -* - GAMMA = ONE - ETA = -ONE - THETA = ZERO -* - ITER = 0 -* - 40 CONTINUE -* -* Perform Preconditioned QMR iteration. -* - ITER = ITER + 1 -* - IF ( ( ABS( RHO ).LT.RHOTOL ).OR.( ABS( XI ).LT.XITOL ) ) - $ GO TO 25 -* - CALL <_c>COPY( N, WORK(1,VTLD), 1, WORK(1,V), 1 ) - TMPVAL = ONE / RHO - CALL <_c>SCAL( N, TMPVAL, WORK(1,V), 1 ) - CALL <_c>SCAL( N, TMPVAL, WORK(1,Y), 1 ) -* - TMPVAL = ONE / XI - CALL <_c>COPY( N, WORK(1,WTLD), 1, WORK(1,W), 1 ) - CALL <_c>SCAL( N, TMPVAL, WORK(1,W), 1 ) - CALL <_c>SCAL( N, TMPVAL, WORK(1,Z), 1 ) -* - DELTA = ( N, WORK(1,Z), 1, WORK(1,Y), 1 ) - IF ( ABS( DELTA ).LT.DELTATOL ) GO TO 25 -* -*********CALL PSOLVEQ( WORK(1,YTLD), WORK(1,Y), 'RIGHT' ) -* - NDX1 = ((YTLD - 1) * LDW) + 1 - NDX2 = ((Y - 1) * LDW) + 1 - RLBL = 5 - IJOB = 4 - RETURN -***************** - 5 CONTINUE -***************** -* -*********CALL PSOLVETRANSQ( WORK(1,ZTLD), WORK(1,Z), 'LEFT' ) -* - NDX1 = ((ZTLD - 1) * LDW) + 1 - NDX2 = ((Z - 1) * LDW) + 1 - RLBL = 6 - IJOB = 5 - RETURN -***************** - 6 CONTINUE -***************** -* -* - IF ( ITER.GT.1 ) THEN - C1 = -( XI * DELTA / EPS ) - CALL <_c>AXPY( N, C1, WORK(1,P), 1, WORK(1,YTLD), 1 ) - CALL <_c>COPY( N, WORK(1,YTLD), 1, WORK(1,P), 1 ) - CALL <_c>AXPY( N, -( RHO * - $ (DELTA / EPS) ), - $ WORK(1,Q), 1, WORK(1,ZTLD), 1 ) - CALL <_c>COPY( N, WORK(1,ZTLD), 1, WORK(1,Q), 1 ) - ELSE - CALL <_c>COPY( N, WORK(1,YTLD), 1, WORK(1,P), 1 ) - CALL <_c>COPY( N, WORK(1,ZTLD), 1, WORK(1,Q), 1 ) - ENDIF -* -*********CALL MATVEC( ONE, WORK(1,P), ZERO, WORK(1,PTLD) ) -* - SCLR1 = ONE - SCLR2 = ZERO - NDX1 = ((P - 1) * LDW) + 1 - NDX2 = ((PTLD - 1) * LDW) + 1 - RLBL = 7 - IJOB = 1 - RETURN -***************** - 7 CONTINUE -***************** -* -* - EPS = ( N, WORK(1,Q), 1, WORK(1,PTLD), 1 ) - IF ( ABS( EPS ).LT.EPSTOL ) GO TO 25 -* - BETA = EPS / DELTA - IF ( ABS( BETA ).LT.BETATOL ) GO TO 25 -* - CALL <_c>COPY( N, WORK(1,PTLD), 1, WORK(1,VTLD), 1 ) - CALL <_c>AXPY( N, -BETA, WORK(1,V), 1, WORK(1,VTLD), 1 ) - -******CALL PSOLVEQ( WORK(1,Y), WORK(1,VTLD), 'LEFT' ) -* - NDX1 = ((Y - 1) * LDW) + 1 - NDX2 = ((VTLD - 1) * LDW) + 1 - RLBL = 8 - IJOB = 3 - RETURN -* -***************** - 8 CONTINUE -***************** - - RHO1 = RHO - RHO = NRM2( N, WORK(1,Y), 1 ) -* - CALL <_c>COPY( N, WORK(1,W), 1, WORK(1,WTLD), 1 ) -*********CALL MATVECTRANS( ONE, WORK(1,Q), -BETA, WORK(1,WTLD) ) -* - SCLR1 = ONE - SCLR2 = -(BETA) - NDX1 = ((Q - 1) * LDW) + 1 - NDX2 = ((WTLD - 1) * LDW) + 1 - RLBL = 9 - IJOB = 2 - RETURN -***************** - 9 CONTINUE -***************** -* -*********CALL PSOLVETRANSQ( WORK(1,Z), WORK(1,WTLD), 'RIGHT' ) -* - NDX1 = ((Z - 1) * LDW) + 1 - NDX2 = ((WTLD - 1) * LDW) + 1 - RLBL = 10 - IJOB = 6 - RETURN -***************** - 10 CONTINUE -***************** -* -* - XI = NRM2( N, WORK(1,Z), 1 ) -* - GAMMA1 = GAMMA - THETA1 = THETA -* - THETA = RHO / ( GAMMA1 * ABS( BETA ) ) - GAMMA = ONE / SQRT( ONE + THETA**2 ) - IF ( ABS( GAMMA ).LT.GAMMATOL ) GO TO 25 -* - ETA = -ETA * RHO1 * GAMMA**2 / ( BETA * GAMMA1**2 ) -* - IF ( ITER.GT.1 ) THEN - CALL <_c>SCAL( N, ( THETA1*GAMMA )**2, WORK(1,D), 1 ) - CALL <_c>AXPY( N, ETA, WORK(1,P), 1, WORK(1,D), 1 ) - CALL <_c>SCAL( N, ( THETA1 * GAMMA )**2, WORK(1,S), 1 ) - CALL <_c>AXPY( N, ETA, WORK(1,PTLD), 1, WORK(1,S), 1 ) - ELSE - CALL <_c>COPY( N, WORK(1,P), 1, WORK(1,D), 1 ) - CALL <_c>SCAL( N, ETA, WORK(1,D), 1 ) - CALL <_c>COPY( N, WORK(1,PTLD), 1, WORK(1,S), 1 ) - CALL <_c>SCAL( N, ETA, WORK(1,S), 1 ) - ENDIF -* -* Compute current solution vector x. -* - TMPVAL = ONE - CALL <_c>AXPY( N, TMPVAL, WORK(1,D), 1, X, 1 ) -* -* Compute residual vector rk, find norm, -* then check for tolerance. -* - toz = one - CALL <_c>AXPY( N, -toz, WORK(1,S), 1, WORK(1,R), 1 ) -* -*********RESID = NRM2( N, WORK(1,R), 1 ) / BNRM2 -*********IF ( RESID .LE. TOL ) GO TO 30 -* - NDX1 = NEED1 - NDX2 = NEED2 -* Prepare for resumption & return - RLBL = 11 - IJOB = 8 - RETURN -* -***************** - 11 CONTINUE -***************** - IF( INFO.EQ.1 ) GO TO 30 -* - IF ( ITER.EQ.MAXIT ) THEN - INFO = 1 - GO TO 20 - ENDIF -* - GO TO 40 -* - 20 CONTINUE -* -* Iteration fails. -* - RLBL = -1 - IJOB = -1 -* - RETURN -* - 25 CONTINUE -* -* Method breakdown. -* - IF ( ABS( RHO ).LT.RHOTOL ) THEN - INFO = -10 - ELSE IF ( ABS( BETA ).LT.BETATOL ) THEN - INFO = -11 - ELSE IF ( ABS( GAMMA ).LT.GAMMATOL ) THEN - INFO = -12 - ELSE IF ( ABS( DELTA ).LT.DELTATOL ) THEN - INFO = -13 - ELSE IF ( ABS( EPS ).LT.EPSTOL ) THEN - INFO = -14 - ELSE IF ( ABS( XI ).LT.XITOL ) THEN - INFO = -15 - ENDIF -* -* - RLBL = -1 - IJOB = -1 -* - RETURN -* - 30 CONTINUE -* -* Iteration successful; return. -* - INFO = 0 - RLBL = -1 - IJOB = -1 -* - RETURN -* -* End of QMRREVCOM -* - END -* END SUBROUTINE <_c>QMRREVCOM diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/STOPTEST2.f.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/STOPTEST2.f.src deleted file mode 100644 index 520ceb396a..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/STOPTEST2.f.src +++ /dev/null @@ -1,62 +0,0 @@ -* -*- fortran -*- -C STOPTEST2 - -* Purpose -* ======= -* -* Computes the stopping criterion 2. -* -* Arguments -* ========= -* -* N (input) INTEGER. -* On entry, the dimension of the matrix. -* Unchanged on exit. -* -* INFO (output) INTEGER -* On exit, 1/0 depending on whether stopping criterion -* was met or not. -* -* BNRM2 (input/output) DOUBLE PRECISION. -* On first time entry, will be -1.0. -* On first time exit will contain norm2(B) -* On all subsequent entry/exit's unchanged. -* -* RESID (output) DOUBLE PRECISION. -* On exit, the computed stopping measure. -* -* TOL (input) DOUBLE PRECISION. -* On input, the allowable convergence measure. -* -* R (input) DOUBLE PRECISION array, dimension N. -* On entry, the residual. -* Unchanged on exit. -* -* B (input) DOUBLE PRECISION array, dimension N. -* On entry, right hand side vector B. -* Unchanged on exit. -* -* BLAS CALLS: DNRM2 -* ============================================================ -* - - SUBROUTINE <_c>STOPTEST2( N, R, B, BNRM2, RESID, TOL, INFO ) - INTEGER N, INFO - RESID, TOL, BNRM2 - <_t> R( * ), B( * ) - ZERO, ONE - PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) - NRM2 - EXTERNAL NRM2 - IF( INFO.EQ.-1 ) THEN - BNRM2 = NRM2( N, B, 1 ) - IF ( BNRM2.EQ.ZERO ) BNRM2 = ONE - ENDIF - RESID = NRM2( N, R, 1 ) / BNRM2 - INFO = 0 - IF ( RESID.LE.TOL ) - $ INFO = 1 - RETURN - END -* END SUBROUTINE <_c>STOPTEST2 - diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/_iterative.pyf.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/_iterative.pyf.src deleted file mode 100644 index d4fb80a7ce..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/_iterative.pyf.src +++ /dev/null @@ -1,118 +0,0 @@ -! -*- f90 -*- -! -! Iterative Package for SciPy -! Hongze Liu, Travis E. Oliphant, -! Brigham Young University -! 2004 -! - -python module _iterative ! in - interface ! in :_iterative - subroutine <_c>bicgrevcom(n,b,x,work,ldw,iter,resid,info,ndx1,ndx2,sclr1,sclr2,ijob) ! in :iterative:BiCG.f - integer, intent(hide), depend(b) :: n=len(b) - <_t> dimension(n) :: b - <_t> dimension(n), intent(in,out) :: x - <_t> intent(inout), dimension(ldw*6) :: work - integer, intent(hide), depend(n) :: ldw=MAX(1,n) - integer, intent(in,out) :: iter - , intent(in,out) :: resid - integer, intent(in, out) :: info - integer, intent(in, out) :: ndx1 - integer, intent(in, out) :: ndx2 - <_t>, intent(out) :: sclr1 - <_t>, intent(out) :: sclr2 - integer, intent(in, out) :: ijob - end subroutine <_c>bicgrevcom - subroutine <_c>bicgstabrevcom(n,b,x,work,ldw,iter,resid,info,ndx1,ndx2,sclr1,sclr2,ijob) ! in :iterative:BiCGSTAB.f - integer, intent(hide), depend(b) :: n=len(b) - <_t> dimension(n) :: b - <_t> dimension(n), intent(in,out) :: x - <_t> intent(inout), dimension(ldw*7) :: work - integer, intent(hide), depend(n) :: ldw=MAX(1,n) - integer, intent(in,out) :: iter - , intent(in,out) :: resid - integer, intent(in, out) :: info - integer, intent(in, out) :: ndx1 - integer, intent(in, out) :: ndx2 - <_t>, intent(out) :: sclr1 - <_t>, intent(out) :: sclr2 - integer, intent(in, out) :: ijob - end subroutine <_c>bicgstabrevcom - subroutine <_c>cgrevcom(n,b,x,work,ldw,iter,resid,info,ndx1,ndx2,sclr1,sclr2,ijob) ! in :iterative:CG.f - integer, intent(hide), depend(b) :: n=len(b) - <_t> dimension(n) :: b - <_t> dimension(n), intent(in,out) :: x - <_t> intent(inout), dimension(ldw*4) :: work - integer, intent(hide), depend(n) :: ldw=MAX(1,n) - integer, intent(in,out) :: iter - , intent(in,out) :: resid - integer, intent(in, out) :: info - integer, intent(in, out) :: ndx1 - integer, intent(in, out) :: ndx2 - <_t>, intent(out) :: sclr1 - <_t>, intent(out) :: sclr2 - integer, intent(in, out) :: ijob - end subroutine <_c>cgrevcom - subroutine <_c>cgsrevcom(n,b,x,work,ldw,iter,resid,info,ndx1,ndx2,sclr1,sclr2,ijob) ! in :iterative:CGS.f - integer, intent(hide), depend(b) :: n=len(b) - <_t> dimension(n) :: b - <_t> dimension(n), intent(in,out) :: x - <_t> intent(inout), dimension(ldw*7) :: work - integer, intent(hide), depend(n) :: ldw=MAX(1,n) - integer, intent(in,out) :: iter - , intent(in,out) :: resid - integer, intent(in, out) :: info - integer, intent(in, out) :: ndx1 - integer, intent(in, out) :: ndx2 - <_t>, intent(out) :: sclr1 - <_t>, intent(out) :: sclr2 - integer, intent(in, out) :: ijob - end subroutine <_c>cgsrevcom - subroutine <_c>qmrrevcom(n,b,x,work,ldw,iter,resid,info,ndx1,ndx2,sclr1,sclr2,ijob) ! in :iterative:QMR.f - integer, intent(hide), depend(b) :: n=len(b) - <_t> dimension(n) :: b - <_t> dimension(n), intent(in,out) :: x - <_t> intent(inout), dimension(ldw*11) :: work - integer, intent(hide), depend(n) :: ldw=MAX(1,n) - integer, intent(in,out) :: iter - , intent(in,out) :: resid - integer, intent(in, out) :: info - integer, intent(in, out) :: ndx1 - integer, intent(in, out) :: ndx2 - <_t>, intent(out) :: sclr1 - <_t>, intent(out) :: sclr2 - integer, intent(in, out) :: ijob - end subroutine <_c>qmrrevcom - subroutine <_c>gmresrevcom(n,b,x,restrt,work,ldw,work2,ldw2,iter,resid,info,ndx1,ndx2,sclr1,sclr2,ijob) ! in :iterative:GMRESREVCOM.f - integer, intent(hide), depend(b) :: n=len(b) - <_t> dimension(n) :: b - <_t> dimension(n), intent(in,out) :: x - integer, intent(in), depend(n), check((0 intent(inout), dimension(ldw*(6+restrt)) :: work - integer intent(hide) :: ldw=MAX(1,n) - <_t> intent(inout), depend(restrt,ldw2), dimension(ldw2*(2*restrt+2)) :: work2 - integer intent(hide), depend(restrt) :: ldw2=MAX(2,restrt+1) - integer intent(in, out) :: iter - , intent(in,out) :: resid - integer intent(in, out) :: info - integer intent(in, out) :: ndx1 - integer intent(in, out) :: ndx2 - <_t> intent(out) :: sclr1 - <_t> intent(out) :: sclr2 - integer intent(in, out) :: ijob - end subroutine <_c>gmresrevcom - - subroutine <_c>stoptest2(n,r,b,bnrm2,resid,tol,info) ! in STOPTEST2.f - integer, intent(hide), depend(b) :: n=len(b) - <_t>, dimension(n), intent(in) :: r - <_t>, dimension(n), intent(in) :: b - , intent(in, out) :: bnrm2 - , intent(out) :: resid - , intent(in) :: tol - integer, intent(in, out) :: info - end subroutine <_c>stoptest2 - end interface -end python module _iterative - -! This file was auto-generated with f2py (version:2.39.235_1703). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/getbreak.f.src b/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/getbreak.f.src deleted file mode 100644 index 5c222036a2..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/iterative/getbreak.f.src +++ /dev/null @@ -1,20 +0,0 @@ -* -*- fortran -*- -* GetBreak - - $ FUNCTION <_c>GETBREAK() -* -* Get breakdown parameter tolerance; for the test routine, -* set to machine precision. -* - EPS, LAMCH -* - EPS = LAMCH('EPS') - <_c>GETBREAK = EPS**2 -* - RETURN -* - END -* END FUNCTION <_c>GETBREAK - - - diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/lgmres.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/lgmres.py deleted file mode 100644 index 4df0e6b7e7..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/lgmres.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (C) 2009, Pauli Virtanen -# Distributed under the same license as Scipy. - -import numpy as np -from scipy.linalg import get_blas_funcs -from utils import make_system - -__all__ = ['lgmres'] - -def norm2(q): - q = np.asarray(q) - nrm2 = get_blas_funcs('nrm2', dtype=q.dtype) - return nrm2(q) - -def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None, - inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True): - """ - Solve a matrix equation using the LGMRES algorithm. - - The LGMRES algorithm [BJM]_ [BPh]_ is designed to avoid some problems - in the convergence in restarted GMRES, and often converges in fewer - iterations. - - Parameters - ---------- - A : {sparse matrix, dense matrix, LinearOperator} - The real or complex N-by-N matrix of the linear system. - b : {array, matrix} - Right hand side of the linear system. Has shape (N,) or (N,1). - x0 : {array, matrix} - Starting guess for the solution. - tol : float - Tolerance to achieve. The algorithm terminates when either the relative - or the absolute residual is below `tol`. - maxiter : integer - Maximum number of iterations. Iteration will stop after maxiter - steps even if the specified tolerance has not been achieved. - M : {sparse matrix, dense matrix, LinearOperator} - Preconditioner for A. The preconditioner should approximate the - inverse of A. Effective preconditioning dramatically improves the - rate of convergence, which implies that fewer iterations are needed - to reach a given error tolerance. - callback : function - User-supplied function to call after each iteration. It is called - as callback(xk), where xk is the current solution vector. - - Additional parameters - --------------------- - inner_m : int, optional - Number of inner GMRES iterations per each outer iteration. - outer_k : int, optional - Number of vectors to carry between inner GMRES iterations. - According to [BJM]_, good values are in the range of 1...3. - However, note that if you want to use the additional vectors to - accelerate solving multiple similar problems, larger values may - be beneficial. - outer_v : list of tuples, optional - List containing tuples ``(v, Av)`` of vectors and corresponding - matrix-vector products, used to augment the Krylov subspace, and - carried between inner GMRES iterations. The element ``Av`` can - be `None` if the matrix-vector product should be re-evaluated. - This parameter is modified in-place by `lgmres`, and can be used - to pass "guess" vectors in and out of the algorithm when solving - similar problems. - store_outer_Av : bool, optional - Whether LGMRES should store also A*v in addition to vectors `v` - in the `outer_v` list. Default is True. - - Returns - ------- - x : array or matrix - The converged solution. - info : integer - Provides convergence information: - 0 : successful exit - >0 : convergence to tolerance not achieved, number of iterations - <0 : illegal input or breakdown - - Notes - ----- - The LGMRES algorithm [BJM]_ [BPh]_ is designed to avoid the - slowing of convergence in restarted GMRES, due to alternating - residual vectors. Typically, it often outperforms GMRES(m) of - comparable memory requirements by some measure, or at least is not - much worse. - - Another advantage in this algorithm is that you can supply it with - 'guess' vectors in the `outer_v` argument that augment the Krylov - subspace. If the solution lies close to the span of these vectors, - the algorithm converges faster. This can be useful if several very - similar matrices need to be inverted one after another, such as in - Newton-Krylov iteration where the Jacobian matrix often changes - little in the nonlinear steps. - - References - ---------- - .. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel, - SIAM J. Matrix Anal. Appl. 26, 962 (2005). - .. [BPh] A.H. Baker, PhD thesis, University of Colorado (2003). - http://amath.colorado.edu/activities/thesis/allisonb/Thesis.ps - - """ - from scipy.linalg.basic import lstsq - A,M,x,b,postprocess = make_system(A,M,x0,b) - - if not np.isfinite(b).all(): - raise ValueError("RHS must contain only finite numbers") - - matvec = A.matvec - psolve = M.matvec - - if outer_v is None: - outer_v = [] - - axpy, dot, scal = None, None, None - - b_norm = norm2(b) - if b_norm == 0: - b_norm = 1 - - for k_outer in xrange(maxiter): - r_outer = matvec(x) - b - - # -- callback - if callback is not None: - callback(x) - - # -- determine input type routines - if axpy is None: - if np.iscomplexobj(r_outer) and not np.iscomplexobj(x): - x = x.astype(r_outer.dtype) - axpy, dot, scal = get_blas_funcs(['axpy', 'dot', 'scal'], - (x, r_outer)) - - # -- check stopping condition - r_norm = norm2(r_outer) - if r_norm < tol * b_norm or r_norm < tol: - break - - # -- inner LGMRES iteration - vs0 = -psolve(r_outer) - inner_res_0 = norm2(vs0) - - if inner_res_0 == 0: - rnorm = norm2(r_outer) - raise RuntimeError("Preconditioner returned a zero vector; " - "|v| ~ %.1g, |M v| = 0" % rnorm) - - vs0 = scal(1.0/inner_res_0, vs0) - hs = [] - vs = [vs0] - ws = [] - y = None - - for j in xrange(1, 1 + inner_m + len(outer_v)): - # -- Arnoldi process: - # - # Build an orthonormal basis V and matrices W and H such that - # A W = V H - # Columns of W, V, and H are stored in `ws`, `vs` and `hs`. - # - # The first column of V is always the residual vector, `vs0`; - # V has *one more column* than the other of the three matrices. - # - # The other columns in V are built by feeding in, one - # by one, some vectors `z` and orthonormalizing them - # against the basis so far. The trick here is to - # feed in first some augmentation vectors, before - # starting to construct the Krylov basis on `v0`. - # - # It was shown in [BJM]_ that a good choice (the LGMRES choice) - # for these augmentation vectors are the `dx` vectors obtained - # from a couple of the previous restart cycles. - # - # Note especially that while `vs0` is always the first - # column in V, there is no reason why it should also be - # the first column in W. (In fact, below `vs0` comes in - # W only after the augmentation vectors.) - # - # The rest of the algorithm then goes as in GMRES, one - # solves a minimization problem in the smaller subspace - # spanned by W (range) and V (image). - # - # XXX: Below, I'm lazy and use `lstsq` to solve the - # small least squares problem. Performance-wise, this - # is in practice acceptable, but it could be nice to do - # it on the fly with Givens etc. - # - - # ++ evaluate - v_new = None - if j < len(outer_v) + 1: - z, v_new = outer_v[j-1] - elif j == len(outer_v) + 1: - z = vs0 - else: - z = vs[-1] - - if v_new is None: - v_new = psolve(matvec(z)) - else: - # Note: v_new is modified in-place below. Must make a - # copy to ensure that the outer_v vectors are not - # clobbered. - v_new = v_new.copy() - - # ++ orthogonalize - hcur = [] - for v in vs: - alpha = dot(v, v_new) - hcur.append(alpha) - v_new = axpy(v, v_new, v.shape[0], -alpha) # v_new -= alpha*v - hcur.append(norm2(v_new)) - - if hcur[-1] == 0: - # Exact solution found; bail out. - # Zero basis vector (v_new) in the least-squares problem - # does no harm, so we can just use the same code as usually; - # it will give zero (inner) residual as a result. - bailout = True - else: - bailout = False - v_new = scal(1.0/hcur[-1], v_new) - - vs.append(v_new) - hs.append(hcur) - ws.append(z) - - # XXX: Ugly: should implement the GMRES iteration properly, - # with Givens rotations and not using lstsq. Instead, we - # spare some work by solving the LSQ problem only every 5 - # iterations. - if not bailout and j % 5 != 1 and j < inner_m + len(outer_v) - 1: - continue - - # -- GMRES optimization problem - hess = np.zeros((j+1, j), x.dtype) - e1 = np.zeros((j+1,), x.dtype) - e1[0] = inner_res_0 - for q in xrange(j): - hess[:(q+2),q] = hs[q] - - y, resids, rank, s = lstsq(hess, e1) - inner_res = norm2(np.dot(hess, y) - e1) - - # -- check for termination - if inner_res < tol * inner_res_0: - break - - # -- GMRES terminated: eval solution - dx = ws[0]*y[0] - for w, yc in zip(ws[1:], y[1:]): - dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc - - # -- Store LGMRES augmentation vectors - nx = norm2(dx) - if store_outer_Av: - q = np.dot(hess, y) - ax = vs[0]*q[0] - for v, qc in zip(vs[1:], q[1:]): - ax = axpy(v, ax, ax.shape[0], qc) - outer_v.append((dx/nx, ax/nx)) - else: - outer_v.append((dx/nx, None)) - - # -- Retain only a finite number of augmentation vectors - while len(outer_v) > outer_k: - del outer_v[0] - - # -- Apply step - x += dx - else: - # didn't converge ... - return postprocess(x), maxiter - - return postprocess(x), 0 diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/lsqr.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/lsqr.py deleted file mode 100644 index 51f9406868..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/lsqr.py +++ /dev/null @@ -1,495 +0,0 @@ -"""Sparse Equations and Least Squares. - -The original Fortran code was written by C. C. Paige and M. A. Saunders as -described in - -C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear -equations and sparse least squares, TOMS 8(1), 43--71 (1982). - -C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear -equations and least-squares problems, TOMS 8(2), 195--209 (1982). - -It is licensed under the following BSD license: - -Copyright (c) 2006, Systems Optimization Laboratory -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of Stanford University nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The Fortran code was translated to Python for use in CVXOPT by Jeffery -Kline with contributions by Mridul Aanjaneya and Bob Myhill. - -Adapted for SciPy by Stefan van der Walt. - -""" - -__all__ = ['lsqr'] - -import numpy as np -from math import sqrt -from scipy.sparse.linalg.interface import aslinearoperator - -def _sym_ortho(a,b): - """ - Jeffery Kline noted: I added the routine 'SymOrtho' for numerical - stability. This is recommended by S.-C. Choi in [1]_. It removes - the unpleasant potential of ``1/eps`` in some important places - (see, for example text following "Compute the next - plane rotation Qk" in minres_py). - - References - ---------- - .. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations - and Least-Squares Problems", Dissertation, - http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf - - """ - aa = abs(a) - ab = abs(b) - if b == 0.: - s = 0. - r = aa - if aa == 0.: - c = 1. - else: - c = a/aa - elif a == 0.: - c = 0. - s = b / ab - r = ab - elif ab >= aa: - sb = 1 - if b < 0: sb=-1 - tau = a/b - s = sb * (1 + tau**2)**-0.5 - c = s * tau - r = b / s - elif aa > ab: - sa = 1 - if a < 0: sa = -1 - tau = b / a - c = sa * (1 + tau**2)**-0.5 - s = c * tau - r = a / c - - return c, s, r - - -def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8, - iter_lim=None, show=False, calc_var=False): - """Find the least-squares solution to a large, sparse, linear system - of equations. - - The function solves ``Ax = b`` or ``min ||b - Ax||^2`` or - ``min ||Ax - b||^2 + d^2 ||x||^2. - - The matrix A may be square or rectangular (over-determined or - under-determined), and may have any rank. - - :: - - 1. Unsymmetric equations -- solve A*x = b - - 2. Linear least squares -- solve A*x = b - in the least-squares sense - - 3. Damped least squares -- solve ( A )*x = ( b ) - ( damp*I ) ( 0 ) - in the least-squares sense - - Parameters - ---------- - A : {sparse matrix, ndarray, LinearOperatorLinear} - Representation of an m-by-n matrix. It is required that - the linear operator can produce ``Ax`` and ``A^T x``. - b : (m,) ndarray - Right-hand side vector ``b``. - damp : float - Damping coefficient. - atol, btol : float - Stopping tolerances. If both are 1.0e-9 (say), the final - residual norm should be accurate to about 9 digits. (The - final x will usually have fewer correct digits, depending on - cond(A) and the size of damp.) - conlim : float - Another stopping tolerance. lsqr terminates if an estimate of - ``cond(A)`` exceeds `conlim`. For compatible systems ``Ax = - b``, `conlim` could be as large as 1.0e+12 (say). For - least-squares problems, conlim should be less than 1.0e+8. - Maximum precision can be obtained by setting ``atol = btol = - conlim = zero``, but the number of iterations may then be - excessive. - iter_lim : int - Explicit limitation on number of iterations (for safety). - show : bool - Display an iteration log. - calc_var : bool - Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``. - - Returns - ------- - x : ndarray of float - The final solution. - istop : int - Gives the reason for termination. - 1 means x is an approximate solution to Ax = b. - 2 means x approximately solves the least-squares problem. - itn : int - Iteration number upon termination. - r1norm : float - ``norm(r)``, where ``r = b - Ax``. - r2norm : float - ``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if - ``damp == 0``. - anorm : float - Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``. - acond : float - Estimate of ``cond(Abar)``. - arnorm : float - Estimate of ``norm(A'*r - damp^2*x)``. - xnorm : float - ``norm(x)`` - var : ndarray of float - If ``calc_var`` is True, estimates all diagonals of - ``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A + - damp^2*I)^{-1}``. This is well defined if A has full column - rank or ``damp > 0``. (Not sure what var means if ``rank(A) - < n`` and ``damp = 0.``) - - Notes - ----- - LSQR uses an iterative method to approximate the solution. The - number of iterations required to reach a certain accuracy depends - strongly on the scaling of the problem. Poor scaling of the rows - or columns of A should therefore be avoided where possible. - - For example, in problem 1 the solution is unaltered by - row-scaling. If a row of A is very small or large compared to - the other rows of A, the corresponding row of ( A b ) should be - scaled up or down. - - In problems 1 and 2, the solution x is easily recovered - following column-scaling. Unless better information is known, - the nonzero columns of A should be scaled so that they all have - the same Euclidean norm (e.g., 1.0). - - In problem 3, there is no freedom to re-scale if damp is - nonzero. However, the value of damp should be assigned only - after attention has been paid to the scaling of A. - - The parameter damp is intended to help regularize - ill-conditioned systems, by preventing the true solution from - being very large. Another aid to regularization is provided by - the parameter acond, which may be used to terminate iterations - before the computed solution becomes very large. - - If some initial estimate ``x0`` is known and if ``damp == 0``, - one could proceed as follows: - - 1. Compute a residual vector ``r0 = b - A*x0``. - 2. Use LSQR to solve the system ``A*dx = r0``. - 3. Add the correction dx to obtain a final solution ``x = x0 + dx``. - - This requires that ``x0`` be available before and after the call - to LSQR. To judge the benefits, suppose LSQR takes k1 iterations - to solve A*x = b and k2 iterations to solve A*dx = r0. - If x0 is "good", norm(r0) will be smaller than norm(b). - If the same stopping tolerances atol and btol are used for each - system, k1 and k2 will be similar, but the final solution x0 + dx - should be more accurate. The only way to reduce the total work - is to use a larger stopping tolerance for the second system. - If some value btol is suitable for A*x = b, the larger value - btol*norm(b)/norm(r0) should be suitable for A*dx = r0. - - Preconditioning is another way to reduce the number of iterations. - If it is possible to solve a related system ``M*x = b`` - efficiently, where M approximates A in some helpful way (e.g. M - - A has low rank or its elements are small relative to those of A), - LSQR may converge more rapidly on the system ``A*M(inverse)*z = - b``, after which x can be recovered by solving M*x = z. - - If A is symmetric, LSQR should not be used! - - Alternatives are the symmetric conjugate-gradient method (cg) - and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that - applies to any symmetric A and will converge more rapidly than - LSQR. If A is positive definite, there are other implementations - of symmetric cg that require slightly less work per iteration than - SYMMLQ (but will take the same number of iterations). - - References - ---------- - .. [1] C. C. Paige and M. A. Saunders (1982a). - "LSQR: An algorithm for sparse linear equations and - sparse least squares", ACM TOMS 8(1), 43-71. - .. [2] C. C. Paige and M. A. Saunders (1982b). - "Algorithm 583. LSQR: Sparse linear equations and least - squares problems", ACM TOMS 8(2), 195-209. - .. [3] M. A. Saunders (1995). "Solution of sparse rectangular - systems using LSQR and CRAIG", BIT 35, 588-604. - - """ - A = aslinearoperator(A) - b = b.squeeze() - - m, n = A.shape - if iter_lim is None: iter_lim = 2 * n - var = np.zeros(n) - - msg=('The exact solution is x = 0 ', - 'Ax - b is small enough, given atol, btol ', - 'The least-squares solution is good enough, given atol ', - 'The estimate of cond(Abar) has exceeded conlim ', - 'Ax - b is small enough for this machine ', - 'The least-squares solution is good enough for this machine', - 'Cond(Abar) seems to be too large for this machine ', - 'The iteration limit has been reached '); - - if show: - print ' ' - print 'LSQR Least-squares solution of Ax = b' - str1 = 'The matrix A has %8g rows and %8g cols' % (m, n) - str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var) - str3 = 'atol = %8.2e conlim = %8.2e'%( atol, conlim) - str4 = 'btol = %8.2e iter_lim = %8g' %( btol, iter_lim) - print str1 - print str2 - print str3 - print str4 - - itn = 0 - istop = 0 - nstop = 0 - ctol = 0 - if conlim > 0: ctol = 1/conlim - anorm = 0 - acond = 0 - dampsq = damp**2 - ddnorm = 0 - res2 = 0 - xnorm = 0 - xxnorm = 0 - z = 0 - cs2 = -1 - sn2 = 0 - - """ - Set up the first vectors u and v for the bidiagonalization. - These satisfy beta*u = b, alfa*v = A'u. - """ - __xm = np.zeros(m) # a matrix for temporary holding - __xn = np.zeros(n) # a matrix for temporary holding - v = np.zeros(n) - u = b - x = np.zeros(n) - alfa = 0 - beta = np.linalg.norm(u) - w = np.zeros(n) - - if beta > 0: - u = (1/beta) * u - v = A.rmatvec(u) - alfa = np.linalg.norm(v) - - if alfa > 0: - v = (1/alfa) * v - w = v.copy() - - rhobar = alfa - phibar = beta - bnorm = beta - rnorm = beta - r1norm = rnorm - r2norm = rnorm - - # Reverse the order here from the original matlab code because - # there was an error on return when arnorm==0 - arnorm = alfa * beta - if arnorm == 0: - print msg[0]; - return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var - - head1 = ' Itn x[0] r1norm r2norm '; - head2 = ' Compatible LS Norm A Cond A'; - - if show: - print ' ' - print head1, head2 - test1 = 1; test2 = alfa / beta; - str1 = '%6g %12.5e' %( itn, x[0] ); - str2 = ' %10.3e %10.3e'%( r1norm, r2norm ); - str3 = ' %8.1e %8.1e' %( test1, test2 ); - print str1, str2, str3 - - # Main iteration loop. - while itn < iter_lim: - itn = itn + 1 - """ - % Perform the next step of the bidiagonalization to obtain the - % next beta, u, alfa, v. These satisfy the relations - % beta*u = a*v - alfa*u, - % alfa*v = A'*u - beta*v. - """ - u = A.matvec(v) - alfa * u - beta = np.linalg.norm(u) - - if beta > 0: - u = (1/beta) * u - anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2) - v = A.rmatvec(u) - beta * v - alfa = np.linalg.norm(v) - if alfa > 0: - v = (1 / alfa) * v - - # Use a plane rotation to eliminate the damping parameter. - # This alters the diagonal (rhobar) of the lower-bidiagonal matrix. - rhobar1 = sqrt(rhobar**2 + damp**2) - cs1 = rhobar / rhobar1 - sn1 = damp / rhobar1 - psi = sn1 * phibar - phibar = cs1 * phibar - - # Use a plane rotation to eliminate the subdiagonal element (beta) - # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix. - cs, sn, rho = _sym_ortho(rhobar1, beta) - - theta = sn * alfa - rhobar = -cs * alfa - phi = cs * phibar - phibar = sn * phibar - tau = sn * phi - - # Update x and w. - t1 = phi / rho - t2 = -theta / rho - dk = (1 / rho) * w - - x = x + t1 * w - w = v + t2 * w - ddnorm = ddnorm + np.linalg.norm(dk)**2 - - if calc_var: - var = var + dk**2 - - # Use a plane rotation on the right to eliminate the - # super-diagonal element (theta) of the upper-bidiagonal matrix. - # Then use the result to estimate norm(x). - delta = sn2 * rho - gambar = -cs2 * rho - rhs = phi - delta * z - zbar = rhs / gambar - xnorm = sqrt(xxnorm + zbar**2) - gamma = sqrt(gambar**2 +theta**2) - cs2 = gambar / gamma - sn2 = theta / gamma - z = rhs / gamma - xxnorm = xxnorm + z**2 - - # Test for convergence. - # First, estimate the condition of the matrix Abar, - # and the norms of rbar and Abar'rbar. - acond = anorm * sqrt(ddnorm) - res1 = phibar**2 - res2 = res2 + psi**2 - rnorm = sqrt(res1 + res2) - arnorm = alfa * abs(tau) - - # Distinguish between - # r1norm = ||b - Ax|| and - # r2norm = rnorm in current code - # = sqrt(r1norm^2 + damp^2*||x||^2). - # Estimate r1norm from - # r1norm = sqrt(r2norm^2 - damp^2*||x||^2). - # Although there is cancellation, it might be accurate enough. - r1sq = rnorm**2 - dampsq * xxnorm - r1norm = sqrt(abs(r1sq)) - if r1sq < 0: - r1norm = -r1norm - r2norm = rnorm - - # Now use these norms to estimate certain other quantities, - # some of which will be small near a solution. - test1 = rnorm / bnorm - test2 = arnorm / (anorm * rnorm) - test3 = 1 / acond - t1 = test1 / (1 + anorm * xnorm / bnorm) - rtol = btol + atol * anorm * xnorm / bnorm - - # The following tests guard against extremely small values of - # atol, btol or ctol. (The user may have set any or all of - # the parameters atol, btol, conlim to 0.) - # The effect is equivalent to the normal tests using - # atol = eps, btol = eps, conlim = 1/eps. - if itn >= iter_lim: istop = 7 - if 1 + test3 <= 1: istop = 6 - if 1 + test2 <= 1: istop = 5 - if 1 + t1 <= 1: istop = 4 - - # Allow for tolerances set by the user. - if test3 <= ctol: istop = 3 - if test2 <= atol: istop = 2 - if test1 <= rtol: istop = 1 - - # See if it is time to print something. - prnt = False; - if n <= 40: prnt = True - if itn <= 10: prnt = True - if itn >= iter_lim-10: prnt = True - # if itn%10 == 0: prnt = True - if test3 <= 2*ctol: prnt = True - if test2 <= 10*atol: prnt = True - if test1 <= 10*rtol: prnt = True - if istop != 0: prnt = True - - if prnt: - if show: - str1 = '%6g %12.5e' % (itn, x[0]) - str2 = ' %10.3e %10.3e' % (r1norm, r2norm) - str3 = ' %8.1e %8.1e' % (test1, test2) - str4 = ' %8.1e %8.1e' % (anorm, acond) - print str1, str2, str3, str4 - - if istop != 0: break - - # End of iteration loop. - # Print the stopping condition. - if show: - print ' ' - print 'LSQR finished' - print msg[istop] - print ' ' - str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm) - str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm) - str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm) - str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm) - print str1+ ' ' + str2 - print str3+ ' ' + str4 - print ' ' - - return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/minres.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/minres.py deleted file mode 100644 index 2fc4409893..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/minres.py +++ /dev/null @@ -1,311 +0,0 @@ -from numpy import sqrt, inner, finfo, zeros -from numpy.linalg import norm - -from utils import make_system -from iterative import set_docstring - -__all__ = ['minres'] - - -header = \ -"""Use MINimum RESidual iteration to solve Ax=b - -MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike -the Conjugate Gradient method, A can be indefinite or singular. - -If shift != 0 then the method solves (A - shift*I)x = b -""" - -Ainfo = "The real symmetric N-by-N matrix of the linear system" - -footer = \ -""" -Notes ------ -THIS FUNCTION IS EXPERIMENTAL AND SUBJECT TO CHANGE! - -References ----------- -Solution of sparse indefinite systems of linear equations, - C. C. Paige and M. A. Saunders (1975), - SIAM J. Numer. Anal. 12(4), pp. 617-629. - http://www.stanford.edu/group/SOL/software/minres.html - -This file is a translation of the following MATLAB implementation: - http://www.stanford.edu/group/SOL/software/minres/matlab/ -""" - -@set_docstring(header, - Ainfo, - footer) -def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, - M=None, callback=None, show=False, check=False): - A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) - - matvec = A.matvec - psolve = M.matvec - - first = 'Enter minres. ' - last = 'Exit minres. ' - - n = A.shape[0] - - if maxiter is None: - maxiter = 5 * n - - - msg =[' beta2 = 0. If M = I, b and x are eigenvectors ', # -1 - ' beta1 = 0. The exact solution is x = 0 ', # 0 - ' A solution to Ax = b was found, given rtol ', # 1 - ' A least-squares solution was found, given rtol ', # 2 - ' Reasonable accuracy achieved, given eps ', # 3 - ' x has converged to an eigenvector ', # 4 - ' acond has exceeded 0.1/eps ', # 5 - ' The iteration limit was reached ', # 6 - ' A does not define a symmetric matrix ', # 7 - ' M does not define a symmetric matrix ', # 8 - ' M does not define a pos-def preconditioner '] # 9 - - - if show: - print first + 'Solution of symmetric Ax = b' - print first + 'n = %3g shift = %23.14e' % (n,shift) - print first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol) - print - - istop = 0; itn = 0; Anorm = 0; Acond = 0; - rnorm = 0; ynorm = 0; - - xtype = x.dtype - - eps = finfo(xtype).eps - - x = zeros( n, dtype=xtype ) - - # Set up y and v for the first Lanczos vector v1. - # y = beta1 P' v1, where P = C**(-1). - # v is really P' v1. - - y = b - r1 = b - - y = psolve(b) - - beta1 = inner(b,y) - - if beta1 < 0: - raise ValueError('indefinite preconditioner') - elif beta1 == 0: - return (postprocess(x), 0) - - beta1 = sqrt( beta1 ) - - if check: - # are these too strict? - - # see if A is symmetric - w = matvec(y) - r2 = matvec(w) - s = inner(w,w) - t = inner(y,r2) - z = abs( s - t ) - epsa = (s + eps) * eps**(1.0/3.0) - if z > epsa: - raise ValueError('non-symmetric matrix') - - # see if M is symmetric - r2 = psolve(y) - s = inner(y,y) - t = inner(r1,r2) - z = abs( s - t ) - epsa = (s + eps) * eps**(1.0/3.0) - if z > epsa: - raise ValueError('non-symmetric preconditioner') - - - # Initialize other quantities - oldb = 0; beta = beta1; dbar = 0; epsln = 0; - qrnorm = beta1; phibar = beta1; rhs1 = beta1; - rhs2 = 0; tnorm2 = 0; ynorm2 = 0; - cs = -1; sn = 0; - w = zeros(n, dtype=xtype) - w2 = zeros(n, dtype=xtype) - r2 = r1 - - if show: - print - print - print ' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|' - - while itn < maxiter: - itn += 1 - - s = 1.0/beta - v = s*y - - y = matvec(v) - y = y - shift * v - - if itn >= 2: - y = y - (beta/oldb)*r1 - - alfa = inner(v,y) - y = y - (alfa/beta)*r2 - r1 = r2 - r2 = y - y = psolve(r2) - oldb = beta - beta = inner(r2,y) - if beta < 0: - raise ValueError('non-symmetric matrix') - beta = sqrt(beta) - tnorm2 += alfa**2 + oldb**2 + beta**2 - - if itn == 1: - if beta/beta1 <= 10*eps: - istop = -1 # Terminate later - #tnorm2 = alfa**2 ?? - gmax = abs(alfa) - gmin = gmax - - # Apply previous rotation Qk-1 to get - # [deltak epslnk+1] = [cs sn][dbark 0 ] - # [gbar k dbar k+1] [sn -cs][alfak betak+1]. - - oldeps = epsln - delta = cs * dbar + sn * alfa # delta1 = 0 deltak - gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k - epsln = sn * beta # epsln2 = 0 epslnk+1 - dbar = - cs * beta # dbar 2 = beta2 dbar k+1 - root = norm([gbar, dbar]) - Arnorm = phibar * root - - # Compute the next plane rotation Qk - - gamma = norm([gbar, beta]) # gammak - gamma = max(gamma, eps) - cs = gbar / gamma # ck - sn = beta / gamma # sk - phi = cs * phibar # phik - phibar = sn * phibar # phibark+1 - - # Update x. - - denom = 1.0/gamma - w1 = w2 - w2 = w - w = (v - oldeps*w1 - delta*w2) * denom - x = x + phi*w - - # Go round again. - - gmax = max(gmax, gamma) - gmin = min(gmin, gamma) - z = rhs1 / gamma - ynorm2 = z**2 + ynorm2 - rhs1 = rhs2 - delta*z - rhs2 = - epsln*z - - # Estimate various norms and test for convergence. - - Anorm = sqrt( tnorm2 ) - ynorm = sqrt( ynorm2 ) - epsa = Anorm * eps - epsx = Anorm * ynorm * eps - epsr = Anorm * ynorm * tol - diag = gbar - - if diag == 0: diag = epsa - - qrnorm = phibar - rnorm = qrnorm - test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||) - test2 = root / Anorm # ||Ar|| / (||A|| ||r||) - - # Estimate cond(A). - # In this version we look at the diagonals of R in the - # factorization of the lower Hessenberg matrix, Q * H = R, - # where H is the tridiagonal matrix from Lanczos with one - # extra row, beta(k+1) e_k^T. - - Acond = gmax/gmin - - # See if any of the stopping criteria are satisfied. - # In rare cases, istop is already -1 from above (Abar = const*I). - - if istop == 0: - t1 = 1 + test1 # These tests work if tol < eps - t2 = 1 + test2 - if t2 <= 1 : istop = 2 - if t1 <= 1 : istop = 1 - - if itn >= maxiter : istop = 6 - if Acond >= 0.1/eps : istop = 4 - if epsx >= beta1 : istop = 3 - #if rnorm <= epsx : istop = 2 - #if rnorm <= epsr : istop = 1 - if test2 <= tol : istop = 2 - if test1 <= tol : istop = 1 - - # See if it is time to print something. - - prnt = False - if n <= 40 : prnt = True - if itn <= 10 : prnt = True - if itn >= maxiter-10 : prnt = True - if itn % 10 == 0 : prnt = True - if qrnorm <= 10*epsx : prnt = True - if qrnorm <= 10*epsr : prnt = True - if Acond <= 1e-2/eps : prnt = True - if istop != 0 : prnt = True - - if show and prnt: - str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1) - str2 = ' %10.3e' % (test2,) - str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm) - - print str1 + str2 + str3 - - if itn % 10 == 0: print - - if callback is not None: - callback(x) - - if istop != 0: break #TODO check this - - - if show: - print - print last + ' istop = %3g itn =%5g' % (istop,itn) - print last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond) - print last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm) - print last + ' Arnorm = %12.4e' % (Arnorm,) - print last + msg[istop+1] - - if istop == 6: - info = maxiter - else: - info = 0 - - return (postprocess(x),info) - - -if __name__ == '__main__': - from scipy import ones, arange - from scipy.linalg import norm - from scipy.sparse import spdiags - - n = 10 - - residuals = [] - - def cb(x): - residuals.append(norm(b - A*x)) - - #A = poisson((10,),format='csr') - A = spdiags( [arange(1,n+1,dtype=float)], [0], n, n, format='csr') - M = spdiags( [1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr') - A.psolve = M.matvec - b = 0*ones( A.shape[0] ) - x = minres(A,b,tol=1e-12,maxiter=None,callback=cb) - #x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0] diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/setup.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/setup.py deleted file mode 100755 index 29a94b2b05..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/setup.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -import re -from distutils.dep_util import newer_group, newer -from glob import glob -from os.path import join - - -def needs_veclib_wrapper(info): - """Returns true if needs special veclib wrapper.""" - import re - r_accel = re.compile("Accelerate") - r_vec = re.compile("vecLib") - res = False - try: - tmpstr = info['extra_link_args'] - for i in tmpstr: - if r_accel.search(i) or r_vec.search(i): - res = True - except KeyError: - pass - - return res - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.system_info import get_info, NotFoundError - - from numpy.distutils.misc_util import Configuration - - config = Configuration('isolve',parent_package,top_path) - - lapack_opt = get_info('lapack_opt') - - if not lapack_opt: - raise NotFoundError('no lapack/blas resources found') - - # iterative methods - methods = ['BiCGREVCOM.f.src', - 'BiCGSTABREVCOM.f.src', - 'CGREVCOM.f.src', - 'CGSREVCOM.f.src', -# 'ChebyREVCOM.f.src', - 'GMRESREVCOM.f.src', -# 'JacobiREVCOM.f.src', - 'QMRREVCOM.f.src', -# 'SORREVCOM.f.src' - ] - - if needs_veclib_wrapper(lapack_opt): - methods += [join('FWRAPPERS', 'veclib_cabi_f.f'), - join('FWRAPPERS', 'veclib_cabi_c.c')] - else: - methods += [join('FWRAPPERS', 'dummy.f')] - - - Util = ['STOPTEST2.f.src','getbreak.f.src'] - sources = Util + methods + ['_iterative.pyf.src'] - config.add_extension('_iterative', - sources=[join('iterative', x) for x in sources], - extra_info=lapack_opt, - depends=[join('iterative', 'FWRAPPERS', x) for x in - ['veclib_cabi_f.f', 'veclib_cabi_c.c', 'dummy.f']] - ) - - config.add_data_dir('tests') - - return config - - -if __name__ == '__main__': - from numpy.distutils.core import setup - - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/setupscons.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/setupscons.py deleted file mode 100755 index 360d302402..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/setupscons.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('isolve',parent_package,top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/demo_lgmres.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/demo_lgmres.py deleted file mode 100644 index 8d9bd8ab8c..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/demo_lgmres.py +++ /dev/null @@ -1,57 +0,0 @@ -import scipy.sparse.linalg as la -import scipy.sparse as sp -import scipy.io as io -import numpy as np -import sys - -#problem = "SPARSKIT/drivcav/e05r0100" -problem = "SPARSKIT/drivcav/e05r0200" -#problem = "Harwell-Boeing/sherman/sherman1" -#problem = "misc/hamm/add32" - -mm = np.lib._datasource.Repository('ftp://math.nist.gov/pub/MatrixMarket2/') -f = mm.open('%s.mtx.gz' % problem) -Am = io.mmread(f).tocsr() -f.close() - -f = mm.open('%s_rhs1.mtx.gz' % problem) -b = np.array(io.mmread(f)).ravel() -f.close() - -count = [0] -def matvec(v): - count[0] += 1 - sys.stderr.write('%d\r' % count[0]) - return Am*v -A = la.LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) - -M = 100 - -print "MatrixMarket problem %s" % problem -print "Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz) - -count[0] = 0 -x0, info = la.gmres(A, b, restrt=M, tol=1e-14) -count_0 = count[0] -err0 = np.linalg.norm(Am*x0 - b) / np.linalg.norm(b) -print "GMRES(%d):" % M, count_0, "matvecs, residual", err0 -if info != 0: - print "Didn't converge" - -count[0] = 0 -x1, info = la.lgmres(A, b, inner_m=M-6*2, outer_k=6, tol=1e-14) -count_1 = count[0] -err1 = np.linalg.norm(Am*x1 - b) / np.linalg.norm(b) -print "LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1, \ - "matvecs, residual:", err1 -if info != 0: - print "Didn't converge" - -count[0] = 0 -x2, info = la.lgmres(A, b, inner_m=M-6, outer_k=6, tol=1e-14) -count_2 = count[0] -err2 = np.linalg.norm(Am*x2 - b) / np.linalg.norm(b) -print "LGMRES(%d,6) [same subspace size]:" % (M-6), count_2, \ - "matvecs, residual:", err2 -if info != 0: - print "Didn't converge" diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_iterative.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_iterative.py deleted file mode 100644 index ca003eaa53..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_iterative.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/env python -""" Test functions for the sparse.linalg.isolve module -""" - -import numpy as np - -from numpy.testing import TestCase, assert_equal, assert_array_equal, \ - assert_, assert_allclose - -from numpy import zeros, ones, arange, array, abs, max -from numpy.linalg import cond -from scipy.linalg import norm -from scipy.sparse import spdiags, csr_matrix - -from scipy.sparse.linalg import LinearOperator, aslinearoperator -from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres - -#TODO check that method preserve shape and type -#TODO test both preconditioner methods - -class Case(object): - def __init__(self, name, A, skip=None): - self.name = name - self.A = A - if skip is None: - self.skip = [] - else: - self.skip = skip - def __repr__(self): - return "<%s>" % self.name - -class IterativeParams(object): - def __init__(self): - # list of tuples (solver, symmetric, positive_definite ) - solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres] - sym_solvers = [minres, cg] - posdef_solvers = [cg] - real_solvers = [minres] - - self.solvers = solvers - - # list of tuples (A, symmetric, positive_definite ) - self.cases = [] - - # Symmetric and Positive Definite - N = 40 - data = ones((3,N)) - data[0,:] = 2 - data[1,:] = -1 - data[2,:] = -1 - Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr') - self.Poisson1D = Case("poisson1d", Poisson1D) - self.cases.append(self.Poisson1D) - - # Symmetric and Negative Definite - self.cases.append(Case("neg-poisson1d", -Poisson1D, - skip=posdef_solvers)) - - # Symmetric and Indefinite - data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d') - RandDiag = spdiags( data, [0], 10, 10, format='csr' ) - self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers)) - - # Random real-valued - np.random.seed(1234) - data = np.random.rand(4, 4) - self.cases.append(Case("rand", data, skip=posdef_solvers+sym_solvers)) - - # Random symmetric real-valued - np.random.seed(1234) - data = np.random.rand(4, 4) - data = data + data.T - self.cases.append(Case("rand-sym", data, skip=posdef_solvers)) - - # Random pos-def symmetric real - np.random.seed(1234) - data = np.random.rand(9, 9) - data = np.dot(data.conj(), data.T) - self.cases.append(Case("rand-sym-pd", data)) - - # Random complex-valued - np.random.seed(1234) - data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4) - self.cases.append(Case("rand-cmplx", data, - skip=posdef_solvers+sym_solvers+real_solvers)) - - # Random hermitian complex-valued - np.random.seed(1234) - data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4) - data = data + data.T.conj() - self.cases.append(Case("rand-cmplx-herm", data, - skip=posdef_solvers+real_solvers)) - - # Random pos-def hermitian complex-valued - np.random.seed(1234) - data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9) - data = np.dot(data.conj(), data.T) - self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers)) - - # Non-symmetric and Positive Definite - # - # cgs, qmr, and bicg fail to converge on this one - # -- algorithmic limitation apparently - data = ones((2,10)) - data[0,:] = 2 - data[1,:] = -1 - A = spdiags( data, [0,-1], 10, 10, format='csr') - self.cases.append(Case("nonsymposdef", A, - skip=sym_solvers+[cgs, qmr, bicg])) - -def setup_module(): - global params - params = IterativeParams() - -def check_maxiter(solver, case): - A = case.A - tol = 1e-12 - - b = arange(A.shape[0], dtype=float) - x0 = 0*b - - residuals = [] - def callback(x): - residuals.append(norm(b - case.A*x)) - - x, info = solver(A, b, x0=x0, tol=tol, maxiter=3, callback=callback) - - assert_equal(len(residuals), 3) - assert_equal(info, 3) - -def test_maxiter(): - case = params.Poisson1D - for solver in params.solvers: - if solver in case.skip: continue - yield check_maxiter, solver, case - -def assert_normclose(a, b, tol=1e-8): - residual = norm(a - b) - tolerance = tol*norm(b) - msg = "residual (%g) not smaller than tolerance %g" % (residual, tolerance) - assert_(residual < tolerance, msg=msg) - -def check_convergence(solver, case): - tol = 1e-8 - - A = case.A - - b = arange(A.shape[0], dtype=float) - x0 = 0*b - - x, info = solver(A, b, x0=x0, tol=tol) - - assert_array_equal(x0, 0*b) #ensure that x0 is not overwritten - assert_equal(info,0) - assert_normclose(A.dot(x), b, tol=tol) - -def test_convergence(): - for solver in params.solvers: - for case in params.cases: - if solver in case.skip: continue - yield check_convergence, solver, case - -def check_precond_dummy(solver, case): - tol = 1e-8 - - def identity(b,which=None): - """trivial preconditioner""" - return b - - A = case.A - - M,N = A.shape - D = spdiags( [1.0/A.diagonal()], [0], M, N) - - b = arange(A.shape[0], dtype=float) - x0 = 0*b - - precond = LinearOperator(A.shape, identity, rmatvec=identity) - - if solver is qmr: - x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol) - else: - x, info = solver(A, b, M=precond, x0=x0, tol=tol) - assert_equal(info,0) - assert_normclose(A.dot(x), b, tol) - - A = aslinearoperator(A) - A.psolve = identity - A.rpsolve = identity - - x, info = solver(A, b, x0=x0, tol=tol) - assert_equal(info,0) - assert_normclose(A*x, b, tol=tol) - -def test_precond_dummy(): - case = params.Poisson1D - for solver in params.solvers: - if solver in case.skip: continue - yield check_precond_dummy, solver, case - -def test_gmres_basic(): - A = np.vander(np.arange(10) + 1)[:, ::-1] - b = np.zeros(10) - b[0] = 1 - x = np.linalg.solve(A, b) - - x_gm, err = gmres(A, b, restart=5, maxiter=1) - - assert_allclose(x_gm[0], 0.359, rtol=1e-2) - - -#------------------------------------------------------------------------------ - -class TestQMR(TestCase): - def test_leftright_precond(self): - """Check that QMR works with left and right preconditioners""" - - from scipy.sparse.linalg.dsolve import splu - from scipy.sparse.linalg.interface import LinearOperator - - n = 100 - - dat = ones(n) - A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1] ,n,n) - b = arange(n,dtype='d') - - L = spdiags([-dat/2, dat], [-1,0], n, n) - U = spdiags([4*dat, -dat], [ 0,1], n, n) - - L_solver = splu(L) - U_solver = splu(U) - - def L_solve(b): - return L_solver.solve(b) - def U_solve(b): - return U_solver.solve(b) - def LT_solve(b): - return L_solver.solve(b,'T') - def UT_solve(b): - return U_solver.solve(b,'T') - - M1 = LinearOperator( (n,n), matvec=L_solve, rmatvec=LT_solve ) - M2 = LinearOperator( (n,n), matvec=U_solve, rmatvec=UT_solve ) - - x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) - - assert_equal(info,0) - assert_normclose(A*x, b, tol=1e-8) - -class TestGMRES(TestCase): - def test_callback(self): - - def store_residual(r, rvec): - rvec[rvec.nonzero()[0].max()+1] = r - - #Define, A,b - A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]])) - b = ones((A.shape[0],)) - maxiter=1 - rvec = zeros(maxiter+1) - rvec[0] = 1.0 - callback = lambda r:store_residual(r, rvec) - x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback) - diff = max(abs((rvec - array([1.0, 0.81649658092772603])))) - assert_(diff < 1e-5) - - -if __name__ == "__main__": - import nose - nose.run(argv=['', __file__]) diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_lgmres.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_lgmres.py deleted file mode 100644 index 89f6a29b47..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_lgmres.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python -"""Tests for the linalg.isolve.lgmres module -""" - -from numpy.testing import TestCase, assert_ - -from numpy import zeros, array, allclose -from scipy.linalg import norm -from scipy.sparse import csr_matrix - -from scipy.sparse.linalg.interface import LinearOperator -from scipy.sparse.linalg import splu -from scipy.sparse.linalg.isolve import lgmres - -Am = csr_matrix(array([[-2,1,0,0,0,9], - [1,-2,1,0,5,0], - [0,1,-2,1,0,0], - [0,0,1,-2,1,0], - [0,3,0,1,-2,1], - [1,0,0,0,1,-2]])) -b = array([1,2,3,4,5,6]) -count = [0] -def matvec(v): - count[0] += 1 - return Am*v -A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) -def do_solve(**kw): - count[0] = 0 - x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=6, tol=1e-14, **kw) - count_0 = count[0] - assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b)) - return x0, count_0 - - -class TestLGMRES(TestCase): - def test_preconditioner(self): - # Check that preconditioning works - pc = splu(Am.tocsc()) - M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype) - - x0, count_0 = do_solve() - x1, count_1 = do_solve(M=M) - - assert_(count_1 == 3) - assert_(count_1 < count_0/2) - assert_(allclose(x1, x0, rtol=1e-14)) - - def test_outer_v(self): - # Check that the augmentation vectors behave as expected - - outer_v = [] - x0, count_0 = do_solve(outer_k=6, outer_v=outer_v) - assert_(len(outer_v) > 0) - assert_(len(outer_v) <= 6) - - x1, count_1 = do_solve(outer_k=6, outer_v=outer_v) - assert_(count_1 == 2, count_1) - assert_(count_1 < count_0/2) - assert_(allclose(x1, x0, rtol=1e-14)) - - # --- - - outer_v = [] - x0, count_0 = do_solve(outer_k=6, outer_v=outer_v, store_outer_Av=False) - assert_(array([v[1] is None for v in outer_v]).all()) - assert_(len(outer_v) > 0) - assert_(len(outer_v) <= 6) - - x1, count_1 = do_solve(outer_k=6, outer_v=outer_v) - assert_(count_1 == 3, count_1) - assert_(count_1 < count_0/2) - assert_(allclose(x1, x0, rtol=1e-14)) - -if __name__ == "__main__": - import nose - nose.run(argv=['', __file__]) diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_lsqr.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_lsqr.py deleted file mode 100644 index c054e608e1..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_lsqr.py +++ /dev/null @@ -1,60 +0,0 @@ -import numpy as np -from numpy.testing import assert_ - -from scipy.sparse.linalg import lsqr -from time import time - -# Set up a test problem -n = 35 -G = np.eye(n) -normal = np.random.normal -norm = np.linalg.norm - -for jj in range(5): - gg = normal(size=n) - hh = gg * gg.T - G += (hh + hh.T) * 0.5 - G += normal(size=n) * normal(size=n) - -b = normal(size=n) - -tol = 1e-10 -show = False -maxit = None - -def test_basic(): - svx = np.linalg.solve(G, b) - X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit) - xo = X[0] - assert_(norm(svx - xo) < 1e-5) - -if __name__ == "__main__": - svx = np.linalg.solve(G, b) - - tic = time() - X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit) - xo = X[0] - phio = X[3] - psio = X[7] - k = X[2] - chio = X[8] - mg = np.amax(G - G.T) - if mg > 1e-14: - sym='No' - else: - sym='Yes' - - print 'LSQR' - print "Is linear operator symmetric? " + sym - print "n: %3g iterations: %3g" % (n, k) - print "Norms computed in %.2fs by LSQR" % (time() - tic) - print " ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " %( chio, phio, psio) - print "Residual norms computed directly:" - print " ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo), - norm(G*xo - b), - norm(G.T*(G*xo-b))) - print "Direct solution norms:" - print " ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx -b)) - print "" - print " || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo) - print "" diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_utils.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_utils.py deleted file mode 100644 index 0920964680..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/tests/test_utils.py +++ /dev/null @@ -1,9 +0,0 @@ - -import numpy as np -from numpy.testing import assert_raises - -from scipy.sparse.linalg import utils - - -def test_make_system_bad_shape(): - assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4)) diff --git a/scipy-0.10.1/scipy/sparse/linalg/isolve/utils.py b/scipy-0.10.1/scipy/sparse/linalg/isolve/utils.py deleted file mode 100644 index de0d47c0ea..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/isolve/utils.py +++ /dev/null @@ -1,130 +0,0 @@ -__docformat__ = "restructuredtext en" - -__all__ = [] - -from warnings import warn - -from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros - -from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \ - IdentityOperator - -_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F', - ('f','D'):'D', ('d','f'):'d', ('d','d'):'d', - ('d','F'):'D', ('d','D'):'D', ('F','f'):'F', - ('F','d'):'D', ('F','F'):'F', ('F','D'):'D', - ('D','f'):'D', ('D','d'):'D', ('D','F'):'D', - ('D','D'):'D'} - -def coerce(x,y): - if x not in 'fdFD': - x = 'd' - if y not in 'fdFD': - y = 'd' - return _coerce_rules[x,y] - -def id(x): - return x - -def make_system(A, M, x0, b, xtype=None): - """Make a linear system Ax=b - - Parameters - ---------- - A : LinearOperator - sparse or dense matrix (or any valid input to aslinearoperator) - M : {LinearOperator, Nones} - preconditioner - sparse or dense matrix (or any valid input to aslinearoperator) - x0 : {array_like, None} - initial guess to iterative method - b : array_like - right hand side - xtype : {'f', 'd', 'F', 'D', None} - dtype of the x vector - - Returns - ------- - (A, M, x, b, postprocess) - A : LinearOperator - matrix of the linear system - M : LinearOperator - preconditioner - x : rank 1 ndarray - initial guess - b : rank 1 ndarray - right hand side - postprocess : function - converts the solution vector to the appropriate - type and dimensions (e.g. (N,1) matrix) - - """ - A_ = A - A = aslinearoperator(A) - - if A.shape[0] != A.shape[1]: - raise ValueError('expected square matrix, but got shape=%s' % (A.shape,)) - - N = A.shape[0] - - b = asanyarray(b) - - if not (b.shape == (N,1) or b.shape == (N,)): - raise ValueError('A and b have incompatible dimensions') - - if b.dtype.char not in 'fdFD': - b = b.astype('d') # upcast non-FP types to double - - def postprocess(x): - if isinstance(b,matrix): - x = asmatrix(x) - return x.reshape(b.shape) - - if xtype is None: - if hasattr(A,'dtype'): - xtype = A.dtype.char - else: - xtype = A.matvec(b).dtype.char - xtype = coerce(xtype, b.dtype.char) - else: - warn('Use of xtype argument is deprecated. '\ - 'Use LinearOperator( ... , dtype=xtype) instead.',\ - DeprecationWarning) - if xtype == 0: - xtype = b.dtype.char - else: - if xtype not in 'fdFD': - raise ValueError("xtype must be 'f', 'd', 'F', or 'D'") - - b = asarray(b,dtype=xtype) #make b the same type as x - b = b.ravel() - - if x0 is None: - x = zeros(N, dtype=xtype) - else: - x = array(x0, dtype=xtype) - if not (x.shape == (N,1) or x.shape == (N,)): - raise ValueError('A and x have incompatible dimensions') - x = x.ravel() - - # process preconditioner - if M is None: - if hasattr(A_,'psolve'): - psolve = A_.psolve - else: - psolve = id - if hasattr(A_,'rpsolve'): - rpsolve = A_.rpsolve - else: - rpsolve = id - if psolve is id and rpsolve is id: - M = IdentityOperator(shape=A.shape, dtype=A.dtype) - else: - M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve, - dtype=A.dtype) - else: - M = aslinearoperator(M) - if A.shape != M.shape: - raise ValueError('matrix and preconditioner have different shapes') - - return A, M, x, b, postprocess diff --git a/scipy-0.10.1/scipy/sparse/linalg/setup.py b/scipy-0.10.1/scipy/sparse/linalg/setup.py deleted file mode 100755 index 29a04aa199..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('linalg',parent_package,top_path) - - config.add_subpackage(('isolve')) - config.add_subpackage(('dsolve')) - config.add_subpackage(('eigen')) - - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/setupscons.py b/scipy-0.10.1/scipy/sparse/linalg/setupscons.py deleted file mode 100755 index 402ea0119f..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/setupscons.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('linalg',parent_package,top_path, - setup_name = 'setupscons.py') - - config.add_subpackage(('isolve')) - config.add_subpackage(('dsolve')) - config.add_subpackage(('eigen')) - - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/linalg/tests/test_interface.py b/scipy-0.10.1/scipy/sparse/linalg/tests/test_interface.py deleted file mode 100644 index d70b9fc621..0000000000 --- a/scipy-0.10.1/scipy/sparse/linalg/tests/test_interface.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Test functions for the sparse.linalg.interface module -""" - -from numpy.testing import TestCase, assert_, assert_equal, \ - assert_raises - -import numpy as np -import scipy.sparse as sparse - -from scipy.sparse.linalg import interface - - -class TestLinearOperator(TestCase): - def setUp(self): - self.matvecs = [] - - # these matvecs do not preserve type or shape - def matvec1(x): - return np.array([ 1*x[0] + 2*x[1] + 3*x[2], - 4*x[0] + 5*x[1] + 6*x[2]]) - def matvec2(x): - return np.matrix(matvec1(x).reshape(2,1)) - - self.matvecs.append(matvec1) - self.matvecs.append(matvec2) - - def test_matvec(self): - - for matvec in self.matvecs: - A = interface.LinearOperator((2,3), matvec) - - assert_equal(A.matvec(np.array([1,2,3])), [14,32]) - assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]]) - assert_equal(A * np.array([1,2,3]), [14,32]) - assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]]) - - assert_equal(A.matvec(np.matrix([[1],[2],[3]])), [[14],[32]]) - assert_equal(A * np.matrix([[1],[2],[3]]), [[14],[32]]) - - assert_( isinstance(A.matvec(np.array([1,2,3])), np.ndarray) ) - assert_( isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray) ) - assert_( isinstance(A * np.array([1,2,3]), np.ndarray) ) - assert_( isinstance(A * np.array([[1],[2],[3]]), np.ndarray) ) - - assert_( isinstance(A.matvec(np.matrix([[1],[2],[3]])), np.ndarray) ) - assert_( isinstance(A * np.matrix([[1],[2],[3]]), np.ndarray) ) - - assert_raises(ValueError, A.matvec, np.array([1,2])) - assert_raises(ValueError, A.matvec, np.array([1,2,3,4])) - assert_raises(ValueError, A.matvec, np.array([[1],[2]])) - assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]])) - - - -class TestAsLinearOperator(TestCase): - def setUp(self): - self.cases = [] - - def make_cases(dtype): - self.cases.append( np.matrix([[1,2,3],[4,5,6]], dtype=dtype) ) - self.cases.append( np.array([[1,2,3],[4,5,6]], dtype=dtype) ) - self.cases.append( sparse.csr_matrix([[1,2,3],[4,5,6]], dtype=dtype) ) - - class matlike: - def __init__(self, dtype): - self.dtype = np.dtype(dtype) - self.shape = (2,3) - def matvec(self,x): - y = np.array([ 1*x[0] + 2*x[1] + 3*x[2], - 4*x[0] + 5*x[1] + 6*x[2]], dtype=self.dtype) - if len(x.shape) == 2: - y = y.reshape(-1,1) - return y - - def rmatvec(self,x): - return np.array([ 1*x[0] + 4*x[1], - 2*x[0] + 5*x[1], - 3*x[0] + 6*x[1]], dtype=self.dtype) - self.cases.append( matlike('int') ) - - make_cases('int32') - make_cases('float32') - make_cases('float64') - - def test_basic(self): - - for M in self.cases: - A = interface.aslinearoperator(M) - M,N = A.shape - - assert_equal(A.matvec(np.array([1,2,3])), [14,32]) - assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]]) - - assert_equal(A * np.array([1,2,3]), [14,32]) - assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]]) - - assert_equal(A.rmatvec(np.array([1,2])), [9,12,15]) - assert_equal(A.rmatvec(np.array([[1],[2]])), [[9],[12],[15]]) - - assert_equal(A.matmat(np.array([[1,4],[2,5],[3,6]])), [[14,32],[32,77]] ) - - assert_equal(A * np.array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]] ) - - if hasattr(M,'dtype'): - assert_equal(A.dtype, M.dtype) diff --git a/scipy-0.10.1/scipy/sparse/setup.py b/scipy-0.10.1/scipy/sparse/setup.py deleted file mode 100755 index 2c2b9da6b0..0000000000 --- a/scipy-0.10.1/scipy/sparse/setup.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - import numpy - from numpy.distutils.misc_util import Configuration - - config = Configuration('sparse',parent_package,top_path) - - config.add_data_dir('tests') - config.add_data_dir('benchmarks') - - config.add_subpackage('linalg') - config.add_subpackage('sparsetools') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/setupscons.py b/scipy-0.10.1/scipy/sparse/setupscons.py deleted file mode 100755 index e93f8226e8..0000000000 --- a/scipy-0.10.1/scipy/sparse/setupscons.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python - -from os.path import join -import sys - -def configuration(parent_package='',top_path=None): - import numpy - from numpy.distutils.misc_util import Configuration - - config = Configuration('sparse',parent_package,top_path, - setup_name = 'setupscons.py') - - config.add_data_dir('tests') - config.add_subpackage('linalg') - config.add_subpackage('sparsetools') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/SConscript b/scipy-0.10.1/scipy/sparse/sparsetools/SConscript deleted file mode 100644 index f98dcdaf36..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/SConscript +++ /dev/null @@ -1,10 +0,0 @@ -# Last Change: Wed Mar 05 09:00 PM 2008 J -# vim:syntax=python -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) -env.PrependUnique(CPPDEFINES = '__STDC_FORMAT_MACROS') - -for fmt in ['csr','csc','coo','bsr','dia', 'csgraph']: - sources = [ fmt + '_wrap.cxx' ] - env.NumpyPythonExtension('_%s' % fmt, source = sources) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/SConstruct b/scipy-0.10.1/scipy/sparse/sparsetools/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/__init__.py b/scipy-0.10.1/scipy/sparse/sparsetools/__init__.py deleted file mode 100644 index d5e39e37eb..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""sparsetools - a collection of routines for sparse matrix operations -""" - -from csr import * -from csc import * -from coo import * -from dia import * -from bsr import * -from csgraph import * diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/bento.info b/scipy-0.10.1/scipy/sparse/sparsetools/bento.info deleted file mode 100644 index 93b4a846aa..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/bento.info +++ /dev/null @@ -1,15 +0,0 @@ -HookFile: bscript - -Library: - Extension: _csr - Sources: csr_wrap.cxx - Extension: _csc - Sources: csc_wrap.cxx - Extension: _coo - Sources: coo_wrap.cxx - Extension: _bsr - Sources: bsr_wrap.cxx - Extension: _dia - Sources: dia_wrap.cxx - Extension: _csgraph - Sources: csgraph_wrap.cxx diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/bscript b/scipy-0.10.1/scipy/sparse/sparsetools/bscript deleted file mode 100644 index d507bdaa08..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/bscript +++ /dev/null @@ -1,13 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - bld = context.waf_context - - def builder(extension): - bld(features="cxx cxxshlib pyext bento", - source=extension.sources, - defines=('__STDC_FORMAT_MACROS',), - target=extension.name) - for e in ["_csr", "_csc", "_coo", "_bsr", "_dia", "_csgraph"]: - context.register_builder(e, builder) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/bsr.h b/scipy-0.10.1/scipy/sparse/sparsetools/bsr.h deleted file mode 100644 index 36eb05fd2b..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/bsr.h +++ /dev/null @@ -1,747 +0,0 @@ -#ifndef __BSR_H__ -#define __BSR_H__ - -#include -#include -#include - -#include "csr.h" -#include "dense.h" - - -template -void bsr_diagonal(const I n_brow, - const I n_bcol, - const I R, - const I C, - const I Ap[], - const I Aj[], - const T Ax[], - T Yx[]) -{ - const I D = std::min(R*n_brow, C*n_bcol); - const I RC = R*C; - - for(I i = 0; i < D; i++){ - Yx[i] = 0; - } - - if ( R == C ){ - //main diagonal with square blocks - const I end = std::min(n_brow,n_bcol); - for(I i = 0; i < end; i++){ - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - if (i == Aj[jj]){ - I row = R*i; - const T * val = Ax + RC*jj; - for(I bi = 0; bi < R; bi++){ - Yx[row + bi] = *val; - val += C + 1; - } - } - } - } - } - else - { - //This could be made faster - const I end = (D/R) + (D % R == 0 ? 0 : 1); - for(I i = 0; i < end; i++){ - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - const I base_row = R*i; - const I base_col = C*Aj[jj]; - const T * base_val = Ax + RC*jj; - - for(I bi = 0; bi < R; bi++){ - const I row = base_row + bi; - if (row >= D) break; - - for(I bj = 0; bj < C; bj++){ - const I col = base_col + bj; - if (row == col){ - Yx[row] = base_val[bi*C + bj]; - } - } - } - } - } - } -} - - - -/* - * Scale the rows of a BSR matrix *in place* - * - * A[i,:] *= X[i] - * - */ -template -void bsr_scale_rows(const I n_brow, - const I n_bcol, - const I R, - const I C, - const I Ap[], - const I Aj[], - T Ax[], - const T Xx[]) -{ - const I RC = R*C; - - for(I i = 0; i < n_brow; i++){ - const T * row_scales = Xx + R*i; - - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - T * block = Ax + RC*jj; - - for(I bi = 0; bi < R; bi++){ - scal(C, row_scales[bi], block + C*bi); - } - } - } -} - -/* - * Scale the columns of a BSR matrix *in place* - * - * A[:,i] *= X[i] - * - */ -template -void bsr_scale_columns(const I n_brow, - const I n_bcol, - const I R, - const I C, - const I Ap[], - const I Aj[], - T Ax[], - const T Xx[]) -{ - const I bnnz = Ap[n_brow]; - const I RC = R*C; - for(I i = 0; i < bnnz; i++){ - const T * scales = Xx + C*Aj[i] ; - T * block = Ax + RC*i; - - for(I bi = 0; bi < R; bi++){ - for(I bj = 0; bj < C; bj++){ - block[C*bi + bj] *= scales[bj]; - } - } - - } -} - - - -/* - * Sort the column block indices of a BSR matrix inplace - * - * Input Arguments: - * I n_brow - number of row blocks in A - * I n_bcol - number of column blocks in A - * I R - rows per block - * I C - columns per block - * I Ap[n_brow+1] - row pointer - * I Aj[nblk(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * - */ -template -void bsr_sort_indices(const I n_brow, - const I n_bcol, - const I R, - const I C, - I Ap[], - I Aj[], - T Ax[]) -{ - if( R == 1 && C == 1 ){ - csr_sort_indices(n_brow, Ap, Aj, Ax); - return; - } - - - const I nblks = Ap[n_brow]; - const I RC = R*C; - const I nnz = RC*nblks; - - //compute permutation of blocks using CSR - std::vector perm(nblks); - - for(I i = 0; i < nblks; i++) - perm[i] = i; - - csr_sort_indices(n_brow, Ap, Aj, &perm[0]); - - std::vector Ax_copy(nnz); - std::copy(Ax, Ax + nnz, Ax_copy.begin()); - - for(I i = 0; i < nblks; i++){ - const T * input = &Ax_copy[RC * perm[i]]; - T * output = Ax + RC*i; - std::copy(input, input + RC, output); - } -} - - -/* - * Compute transpose(A) BSR matrix A - * - * Input Arguments: - * I n_brow - number of row blocks in A - * I n_bcol - number of column blocks in A - * I R - rows per block - * I C - columns per block - * I Ap[n_brow+1] - row pointer - * I Aj[nblk(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * - * Output Arguments: - * I Bp[n_col+1] - row pointer - * I Bj[nblk(A)] - column indices - * T Bx[nnz(A)] - nonzeros - * - * Note: - * Output arrays Bp, Bj, Bx must be preallocated - * - * Note: - * Input: column indices *are not* assumed to be in sorted order - * Output: row indices *will be* in sorted order - * - * Complexity: Linear. Specifically O(nnz(A) + max(n_row,n_col)) - * - */ -template -void bsr_transpose(const I n_brow, - const I n_bcol, - const I R, - const I C, - const I Ap[], - const I Aj[], - const T Ax[], - I Bp[], - I Bj[], - T Bx[]) -{ - const I nblks = Ap[n_brow]; - const I RC = R*C; - - //compute permutation of blocks using tranpose(CSR) - std::vector perm_in (nblks); - std::vector perm_out(nblks); - - for(I i = 0; i < nblks; i++) - perm_in[i] = i; - - csr_tocsc(n_brow, n_bcol, Ap, Aj, &perm_in[0], Bp, Bj, &perm_out[0]); - - for(I i = 0; i < nblks; i++){ - const T * Ax_blk = Ax + RC * perm_out[i]; - T * Bx_blk = Bx + RC * i; - for(I r = 0; r < R; r++){ - for(I c = 0; c < C; c++){ - Bx_blk[c * R + r] = Ax_blk[r * C + c]; - } - } - } -} - - - -template -void bsr_matmat_pass2(const I n_brow, const I n_bcol, - const I R, const I C, const I N, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - assert(R > 0 && C > 0 && N > 0); - - if( R == 1 && N == 1 && C == 1 ){ - // Use CSR for 1x1 blocksize - csr_matmat_pass2(n_brow, n_bcol, Ap, Aj, Ax, Bp, Bj, Bx, Cp, Cj, Cx); - return; - } - - const I RC = R*C; - const I RN = R*N; - const I NC = N*C; - - std::fill( Cx, Cx + RC * Cp[n_brow], 0 ); //clear output array - - std::vector next(n_bcol,-1); - std::vector mats(n_bcol); - - I nnz = 0; - Cp[0] = 0; - - for(I i = 0; i < n_brow; i++){ - I head = -2; - I length = 0; - - I jj_start = Ap[i]; - I jj_end = Ap[i+1]; - for(I jj = jj_start; jj < jj_end; jj++){ - I j = Aj[jj]; - - I kk_start = Bp[j]; - I kk_end = Bp[j+1]; - for(I kk = kk_start; kk < kk_end; kk++){ - I k = Bj[kk]; - - if(next[k] == -1){ - next[k] = head; - head = k; - Cj[nnz] = k; - mats[k] = Cx + RC*nnz; - nnz++; - length++; - } - - const T * A = Ax + jj*RN; - const T * B = Bx + kk*NC; - - gemm(R, C, N, A, B, mats[k]); - } - } - - for(I jj = 0; jj < length; jj++){ - I temp = head; - head = next[head]; - next[temp] = -1; //clear arrays - } - - } -} - - - - -template -bool is_nonzero_block(const T block[], const I blocksize){ - for(I i = 0; i < blocksize; i++){ - if(block[i] != 0){ - return true; - } - } - return false; -} - - - -/* - * Compute C = A (binary_op) B for BSR matrices that are not - * necessarily canonical BSR format. Specifically, this method - * works even when the input matrices have duplicate and/or - * unsorted column indices within a given row. - * - * Refer to bsr_binop_bsr() for additional information - * - * Note: - * Output arrays Cp, Cj, and Cx must be preallocated - * If nnz(C) is not known a priori, a conservative bound is: - * nnz(C) <= nnz(A) + nnz(B) - * - * Note: - * Input: A and B column indices are not assumed to be in sorted order - * Output: C column indices are not generally in sorted order - * C will not contain any duplicate entries or explicit zeros. - * - */ -template -void bsr_binop_bsr_general(const I n_brow, const I n_bcol, - const I R, const I C, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], - const bin_op& op) -{ - //Method that works for duplicate and/or unsorted indices - const I RC = R*C; - - Cp[0] = 0; - I nnz = 0; - - std::vector next(n_bcol, -1); - std::vector A_row(n_bcol * RC, 0); // this approach can be problematic for large R - std::vector B_row(n_bcol * RC, 0); - - for(I i = 0; i < n_brow; i++){ - I head = -2; - I length = 0; - - //add a row of A to A_row - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - I j = Aj[jj]; - - for(I n = 0; n < RC; n++) - A_row[RC*j + n] += Ax[RC*jj + n]; - - if(next[j] == -1){ - next[j] = head; - head = j; - length++; - } - } - - //add a row of B to B_row - for(I jj = Bp[i]; jj < Bp[i+1]; jj++){ - I j = Bj[jj]; - - for(I n = 0; n < RC; n++) - B_row[RC*j + n] += Bx[RC*jj + n]; - - if(next[j] == -1){ - next[j] = head; - head = j; - length++; - } - } - - - for(I jj = 0; jj < length; jj++){ - // compute op(block_A, block_B) - for(I n = 0; n < RC; n++) - Cx[RC * nnz + n] = op(A_row[RC*head + n], B_row[RC*head + n]); - - // advance counter if block is nonzero - if( is_nonzero_block(Cx + (RC * nnz), RC) ) - Cj[nnz++] = head; - - // clear block_A and block_B values - for(I n = 0; n < RC; n++){ - A_row[RC*head + n] = 0; - B_row[RC*head + n] = 0; - } - - I temp = head; - head = next[head]; - next[temp] = -1; - } - - Cp[i + 1] = nnz; - } -} - - -/* - * Compute C = A (binary_op) B for BSR matrices that are in the - * canonical BSR format. Specifically, this method requires that - * the rows of the input matrices are free of duplicate column indices - * and that the column indices are in sorted order. - * - * Refer to bsr_binop_bsr() for additional information - * - * Note: - * Input: A and B column indices are assumed to be in sorted order - * Output: C column indices will be in sorted order - * Cx will not contain any zero entries - * - */ -template -void bsr_binop_bsr_canonical(const I n_brow, const I n_bcol, - const I R, const I C, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], - const bin_op& op) -{ - const I RC = R*C; - T * result = Cx; - - Cp[0] = 0; - I nnz = 0; - - for(I i = 0; i < n_brow; i++){ - I A_pos = Ap[i]; - I B_pos = Bp[i]; - I A_end = Ap[i+1]; - I B_end = Bp[i+1]; - - //while not finished with either row - while(A_pos < A_end && B_pos < B_end){ - I A_j = Aj[A_pos]; - I B_j = Bj[B_pos]; - - if(A_j == B_j){ - for(I n = 0; n < RC; n++){ - result[n] = op(Ax[RC*A_pos + n], Bx[RC*B_pos + n]); - } - - if( is_nonzero_block(result,RC) ){ - Cj[nnz] = A_j; - result += RC; - nnz++; - } - - A_pos++; - B_pos++; - } else if (A_j < B_j) { - for(I n = 0; n < RC; n++){ - result[n] = op(Ax[RC*A_pos + n], 0); - } - - if(is_nonzero_block(result,RC)){ - Cj[nnz] = A_j; - result += RC; - nnz++; - } - - A_pos++; - } else { - //B_j < A_j - for(I n = 0; n < RC; n++){ - result[n] = op(0, Bx[RC*B_pos + n]); - } - if(is_nonzero_block(result,RC)){ - Cj[nnz] = B_j; - result += RC; - nnz++; - } - - B_pos++; - } - } - - //tail - while(A_pos < A_end){ - for(I n = 0; n < RC; n++){ - result[n] = op(Ax[RC*A_pos + n], 0); - } - - if(is_nonzero_block(result, RC)){ - Cj[nnz] = Aj[A_pos]; - result += RC; - nnz++; - } - - A_pos++; - } - while(B_pos < B_end){ - for(I n = 0; n < RC; n++){ - result[n] = op(0,Bx[RC*B_pos + n]); - } - - if(is_nonzero_block(result, RC)){ - Cj[nnz] = Bj[B_pos]; - result += RC; - nnz++; - } - - B_pos++; - } - - Cp[i+1] = nnz; - } -} - - -/* - * Compute C = A (binary_op) B for CSR matrices A,B where the column - * indices with the rows of A and B are known to be sorted. - * - * binary_op(x,y) - binary operator to apply elementwise - * - * Input Arguments: - * I n_row - number of rows in A (and B) - * I n_col - number of columns in A (and B) - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * I Bp[n_row+1] - row pointer - * I Bj[nnz(B)] - column indices - * T Bx[nnz(B)] - nonzeros - * Output Arguments: - * I Cp[n_row+1] - row pointer - * I Cj[nnz(C)] - column indices - * T Cx[nnz(C)] - nonzeros - * - * Note: - * Output arrays Cp, Cj, and Cx must be preallocated - * If nnz(C) is not known a priori, a conservative bound is: - * nnz(C) <= nnz(A) + nnz(B) - * - * Note: - * Input: A and B column indices are not assumed to be in sorted order. - * Output: C column indices will be in sorted if both A and B have sorted indices. - * Cx will not contain any zero entries - * - */ -template -void bsr_binop_bsr(const I n_brow, const I n_bcol, - const I R, const I C, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], - const bin_op& op) -{ - assert( R > 0 && C > 0); - - if( R == 1 && C == 1 ){ - //use CSR for 1x1 blocksize - csr_binop_csr(n_brow, n_bcol, Ap, Aj, Ax, Bp, Bj, Bx, Cp, Cj, Cx, op); - } - else if ( csr_has_canonical_format(n_brow, Ap, Aj) && csr_has_canonical_format(n_brow, Bp, Bj) ){ - // prefer faster implementation - bsr_binop_bsr_canonical(n_brow, n_bcol, R, C, Ap, Aj, Ax, Bp, Bj, Bx, Cp, Cj, Cx, op); - } - else { - // slower fallback method - bsr_binop_bsr_general(n_brow, n_bcol, R, C, Ap, Aj, Ax, Bp, Bj, Bx, Cp, Cj, Cx, op); - } -} - -/* element-wise binary operations */ -template -void bsr_elmul_bsr(const I n_row, const I n_col, const I R, const I C, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::multiplies()); -} - -template -void bsr_eldiv_bsr(const I n_row, const I n_col, const I R, const I C, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::divides()); -} - - -template -void bsr_plus_bsr(const I n_row, const I n_col, const I R, const I C, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::plus()); -} - -template -void bsr_minus_bsr(const I n_row, const I n_col, const I R, const I C, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::minus()); -} - - - - - -//template -//void bsr_tocsr(const I n_brow, -// const I n_bcol, -// const I R, -// const I C, -// const I Ap[], -// const I Aj[], -// const T Ax[], -// I Bp[], -// I Bj[] -// T Bx[]) -//{ -// const I RC = R*C; -// -// for(I brow = 0; brow < n_brow; brow++){ -// I row_size = C * (Ap[brow + 1] - Ap[brow]); -// for(I r = 0; r < R; r++){ -// Bp[R*brow + r] = RC * Ap[brow] + r * row_size -// } -// } -//} - - -template -void bsr_matvec(const I n_brow, - const I n_bcol, - const I R, - const I C, - const I Ap[], - const I Aj[], - const T Ax[], - const T Xx[], - T Yx[]) -{ - assert(R > 0 && C > 0); - - if( R == 1 && C == 1 ){ - //use CSR for 1x1 blocksize - csr_matvec(n_brow, n_bcol, Ap, Aj, Ax, Xx, Yx); - return; - } - - const I RC = R*C; - for(I i = 0; i < n_brow; i++){ - T * y = Yx + R * i; - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - const I j = Aj[jj]; - const T * A = Ax + RC * jj; - const T * x = Xx + C * j; - gemv(R, C, A, x, y); // y += A*x - } - } -} - - -/* - * Compute Y += A*X for BSR matrix A and dense block vectors X,Y - * - * - * Input Arguments: - * I n_brow - number of row blocks in A - * I n_bcol - number of column blocks in A - * I n_vecs - number of column vectors in X and Y - * I R - rows per block - * I C - columns per block - * I Ap[n_brow+1] - row pointer - * I Aj[nblks(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * T Xx[C*n_bcol,n_vecs] - input vector - * - * Output Arguments: - * T Yx[R*n_brow,n_vecs] - output vector - * - */ -template -void bsr_matvecs(const I n_brow, - const I n_bcol, - const I n_vecs, - const I R, - const I C, - const I Ap[], - const I Aj[], - const T Ax[], - const T Xx[], - T Yx[]) -{ - assert(R > 0 && C > 0); - - if( R == 1 && C == 1 ){ - //use CSR for 1x1 blocksize - csr_matvecs(n_brow, n_bcol, n_vecs, Ap, Aj, Ax, Xx, Yx); - return; - } - - const I A_bs = R*C; //Ax blocksize - const I Y_bs = n_vecs*R; //Yx blocksize - const I X_bs = C*n_vecs; //Xx blocksize - - for(I i = 0; i < n_brow; i++){ - T * y = Yx + Y_bs * i; - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - const I j = Aj[jj]; - const T * A = Ax + A_bs * jj; - const T * x = Xx + X_bs * j; - gemm(R, n_vecs, C, A, x, y); // y += A*x - } - } -} - - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/bsr.py b/scipy-0.10.1/scipy/sparse/sparsetools/bsr.py deleted file mode 100644 index 8b5789a871..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/bsr.py +++ /dev/null @@ -1,549 +0,0 @@ -# This file was automatically generated by SWIG (http://www.swig.org). -# Version 2.0.1+capsulehack -# -# Do not make changes to this file unless you know what you are doing--modify -# the SWIG interface file instead. -# This file is compatible with both classic and new-style classes. - -from sys import version_info -if version_info >= (2,6,0): - def swig_import_helper(): - from os.path import dirname - import imp - fp = None - try: - fp, pathname, description = imp.find_module('_bsr', [dirname(__file__)]) - except ImportError: - import _bsr - return _bsr - if fp is not None: - try: - _mod = imp.load_module('_bsr', fp, pathname, description) - finally: - fp.close() - return _mod - _bsr = swig_import_helper() - del swig_import_helper -else: - import _bsr -del version_info -try: - _swig_property = property -except NameError: - pass # Python < 2.2 doesn't have 'property'. -def _swig_setattr_nondynamic(self,class_type,name,value,static=1): - if (name == "thisown"): return self.this.own(value) - if (name == "this"): - if type(value).__name__ == 'SwigPyObject': - self.__dict__[name] = value - return - method = class_type.__swig_setmethods__.get(name,None) - if method: return method(self,value) - if (not static) or hasattr(self,name): - self.__dict__[name] = value - else: - raise AttributeError("You cannot add attributes to %s" % self) - -def _swig_setattr(self,class_type,name,value): - return _swig_setattr_nondynamic(self,class_type,name,value,0) - -def _swig_getattr(self,class_type,name): - if (name == "thisown"): return self.this.own() - method = class_type.__swig_getmethods__.get(name,None) - if method: return method(self) - raise AttributeError(name) - -def _swig_repr(self): - try: strthis = "proxy of " + self.this.__repr__() - except: strthis = "" - return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) - -try: - _object = object - _newclass = 1 -except AttributeError: - class _object : pass - _newclass = 0 - - -def bsr_diagonal(*args): - """ - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - signed char Ax, signed char Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned char Ax, unsigned char Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - short Ax, short Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned short Ax, unsigned short Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - int Ax, int Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned int Ax, unsigned int Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long long Ax, long long Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned long long Ax, unsigned long long Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - float Ax, float Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - double Ax, double Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long double Ax, long double Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, npy_cfloat_wrapper Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, npy_cdouble_wrapper Yx) - bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Yx) - """ - return _bsr.bsr_diagonal(*args) - -def bsr_scale_rows(*args): - """ - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - signed char Ax, signed char Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned char Ax, unsigned char Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - short Ax, short Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned short Ax, unsigned short Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - int Ax, int Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned int Ax, unsigned int Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long long Ax, long long Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned long long Ax, unsigned long long Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - float Ax, float Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - double Ax, double Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long double Ax, long double Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx) - bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx) - """ - return _bsr.bsr_scale_rows(*args) - -def bsr_scale_columns(*args): - """ - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - signed char Ax, signed char Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned char Ax, unsigned char Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - short Ax, short Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned short Ax, unsigned short Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - int Ax, int Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned int Ax, unsigned int Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long long Ax, long long Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned long long Ax, unsigned long long Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - float Ax, float Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - double Ax, double Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long double Ax, long double Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx) - bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx) - """ - return _bsr.bsr_scale_columns(*args) - -def bsr_transpose(*args): - """ - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - signed char Ax, int Bp, int Bj, signed char Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned char Ax, int Bp, int Bj, unsigned char Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - short Ax, int Bp, int Bj, short Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned short Ax, int Bp, int Bj, unsigned short Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - int Ax, int Bp, int Bj, int Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned int Ax, int Bp, int Bj, unsigned int Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long long Ax, int Bp, int Bj, long long Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned long long Ax, int Bp, int Bj, unsigned long long Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - float Ax, int Bp, int Bj, float Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - double Ax, int Bp, int Bj, double Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long double Ax, int Bp, int Bj, long double Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx) - bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, int Bp, int Bj, - npy_clongdouble_wrapper Bx) - """ - return _bsr.bsr_transpose(*args) - -def bsr_matmat_pass2(*args): - """ - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, signed char Ax, int Bp, int Bj, signed char Bx, - int Cp, int Cj, signed char Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, unsigned char Ax, int Bp, int Bj, unsigned char Bx, - int Cp, int Cj, unsigned char Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, short Ax, int Bp, int Bj, short Bx, - int Cp, int Cj, short Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, unsigned short Ax, int Bp, int Bj, - unsigned short Bx, int Cp, int Cj, unsigned short Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, int Ax, int Bp, int Bj, int Bx, int Cp, - int Cj, int Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, unsigned int Ax, int Bp, int Bj, unsigned int Bx, - int Cp, int Cj, unsigned int Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, long long Ax, int Bp, int Bj, long long Bx, - int Cp, int Cj, long long Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, unsigned long long Ax, int Bp, int Bj, - unsigned long long Bx, int Cp, int Cj, unsigned long long Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, float Ax, int Bp, int Bj, float Bx, - int Cp, int Cj, float Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, double Ax, int Bp, int Bj, double Bx, - int Cp, int Cj, double Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, long double Ax, int Bp, int Bj, long double Bx, - int Cp, int Cj, long double Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, npy_cfloat_wrapper Ax, int Bp, int Bj, - npy_cfloat_wrapper Bx, int Cp, int Cj, npy_cfloat_wrapper Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, npy_cdouble_wrapper Ax, int Bp, int Bj, - npy_cdouble_wrapper Bx, int Cp, int Cj, - npy_cdouble_wrapper Cx) - bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, - int Aj, npy_clongdouble_wrapper Ax, int Bp, - int Bj, npy_clongdouble_wrapper Bx, int Cp, - int Cj, npy_clongdouble_wrapper Cx) - """ - return _bsr.bsr_matmat_pass2(*args) - -def bsr_matvec(*args): - """ - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - signed char Ax, signed char Xx, signed char Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned char Ax, unsigned char Xx, unsigned char Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - short Ax, short Xx, short Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned short Ax, unsigned short Xx, unsigned short Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - int Ax, int Xx, int Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned int Ax, unsigned int Xx, unsigned int Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long long Ax, long long Xx, long long Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned long long Ax, unsigned long long Xx, - unsigned long long Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - float Ax, float Xx, float Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - double Ax, double Xx, double Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long double Ax, long double Xx, long double Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx, - npy_cfloat_wrapper Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx, - npy_cdouble_wrapper Yx) - bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx, - npy_clongdouble_wrapper Yx) - """ - return _bsr.bsr_matvec(*args) - -def bsr_matvecs(*args): - """ - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, signed char Ax, signed char Xx, - signed char Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, unsigned char Ax, unsigned char Xx, - unsigned char Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, short Ax, short Xx, short Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, unsigned short Ax, unsigned short Xx, - unsigned short Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, int Ax, int Xx, int Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, unsigned int Ax, unsigned int Xx, - unsigned int Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, long long Ax, long long Xx, long long Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, unsigned long long Ax, unsigned long long Xx, - unsigned long long Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, float Ax, float Xx, float Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, double Ax, double Xx, double Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, long double Ax, long double Xx, - long double Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx, - npy_cfloat_wrapper Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx, - npy_cdouble_wrapper Yx) - bsr_matvecs(int n_brow, int n_bcol, int n_vecs, int R, int C, int Ap, - int Aj, npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx, - npy_clongdouble_wrapper Yx) - """ - return _bsr.bsr_matvecs(*args) - -def bsr_elmul_bsr(*args): - """ - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - signed char Ax, int Bp, int Bj, signed char Bx, - int Cp, int Cj, signed char Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned char Ax, int Bp, int Bj, unsigned char Bx, - int Cp, int Cj, unsigned char Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - short Ax, int Bp, int Bj, short Bx, int Cp, - int Cj, short Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned short Ax, int Bp, int Bj, unsigned short Bx, - int Cp, int Cj, unsigned short Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - int Ax, int Bp, int Bj, int Bx, int Cp, int Cj, - int Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned int Ax, int Bp, int Bj, unsigned int Bx, - int Cp, int Cj, unsigned int Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long long Ax, int Bp, int Bj, long long Bx, - int Cp, int Cj, long long Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned long long Ax, int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - float Ax, int Bp, int Bj, float Bx, int Cp, - int Cj, float Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - double Ax, int Bp, int Bj, double Bx, int Cp, - int Cj, double Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long double Ax, int Bp, int Bj, long double Bx, - int Cp, int Cj, long double Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, int Bp, int Bj, - npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _bsr.bsr_elmul_bsr(*args) - -def bsr_eldiv_bsr(*args): - """ - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - signed char Ax, int Bp, int Bj, signed char Bx, - int Cp, int Cj, signed char Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned char Ax, int Bp, int Bj, unsigned char Bx, - int Cp, int Cj, unsigned char Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - short Ax, int Bp, int Bj, short Bx, int Cp, - int Cj, short Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned short Ax, int Bp, int Bj, unsigned short Bx, - int Cp, int Cj, unsigned short Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - int Ax, int Bp, int Bj, int Bx, int Cp, int Cj, - int Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned int Ax, int Bp, int Bj, unsigned int Bx, - int Cp, int Cj, unsigned int Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long long Ax, int Bp, int Bj, long long Bx, - int Cp, int Cj, long long Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned long long Ax, int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - float Ax, int Bp, int Bj, float Bx, int Cp, - int Cj, float Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - double Ax, int Bp, int Bj, double Bx, int Cp, - int Cj, double Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long double Ax, int Bp, int Bj, long double Bx, - int Cp, int Cj, long double Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, int Bp, int Bj, - npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _bsr.bsr_eldiv_bsr(*args) - -def bsr_plus_bsr(*args): - """ - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - signed char Ax, int Bp, int Bj, signed char Bx, - int Cp, int Cj, signed char Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned char Ax, int Bp, int Bj, unsigned char Bx, - int Cp, int Cj, unsigned char Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - short Ax, int Bp, int Bj, short Bx, int Cp, - int Cj, short Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned short Ax, int Bp, int Bj, unsigned short Bx, - int Cp, int Cj, unsigned short Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - int Ax, int Bp, int Bj, int Bx, int Cp, int Cj, - int Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned int Ax, int Bp, int Bj, unsigned int Bx, - int Cp, int Cj, unsigned int Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long long Ax, int Bp, int Bj, long long Bx, - int Cp, int Cj, long long Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned long long Ax, int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - float Ax, int Bp, int Bj, float Bx, int Cp, - int Cj, float Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - double Ax, int Bp, int Bj, double Bx, int Cp, - int Cj, double Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long double Ax, int Bp, int Bj, long double Bx, - int Cp, int Cj, long double Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, int Bp, int Bj, - npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _bsr.bsr_plus_bsr(*args) - -def bsr_minus_bsr(*args): - """ - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - signed char Ax, int Bp, int Bj, signed char Bx, - int Cp, int Cj, signed char Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned char Ax, int Bp, int Bj, unsigned char Bx, - int Cp, int Cj, unsigned char Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - short Ax, int Bp, int Bj, short Bx, int Cp, - int Cj, short Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned short Ax, int Bp, int Bj, unsigned short Bx, - int Cp, int Cj, unsigned short Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - int Ax, int Bp, int Bj, int Bx, int Cp, int Cj, - int Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned int Ax, int Bp, int Bj, unsigned int Bx, - int Cp, int Cj, unsigned int Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long long Ax, int Bp, int Bj, long long Bx, - int Cp, int Cj, long long Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned long long Ax, int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - float Ax, int Bp, int Bj, float Bx, int Cp, - int Cj, float Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - double Ax, int Bp, int Bj, double Bx, int Cp, - int Cj, double Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long double Ax, int Bp, int Bj, long double Bx, - int Cp, int Cj, long double Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, int Bp, int Bj, - npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _bsr.bsr_minus_bsr(*args) - -def bsr_sort_indices(*args): - """ - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - signed char Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned char Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - short Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned short Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - int Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned int Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long long Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - unsigned long long Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - float Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - double Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - long double Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax) - bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax) - """ - return _bsr.bsr_sort_indices(*args) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/bsr_wrap.cxx.REMOVED.git-id b/scipy-0.10.1/scipy/sparse/sparsetools/bsr_wrap.cxx.REMOVED.git-id deleted file mode 100644 index ee4343ec31..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/bsr_wrap.cxx.REMOVED.git-id +++ /dev/null @@ -1 +0,0 @@ -36184a5849a9ae683a89759830be6dea7615ba71 \ No newline at end of file diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/complex_ops.h b/scipy-0.10.1/scipy/sparse/sparsetools/complex_ops.h deleted file mode 100644 index 7762001622..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/complex_ops.h +++ /dev/null @@ -1,98 +0,0 @@ -#ifndef COMPLEX_OPS_H -#define COMPLEX_OPS_H - -/* - * Functions to handle arithmetic operations on NumPy complex values - */ - -#include -#include - -template -class complex_wrapper : public npy_type { - template - friend std::ostream& operator<<(std::ostream&, const complex_wrapper& ); - - public: - complex_wrapper( const c_type r = c_type(0), const c_type i = c_type(0) ){ - npy_type::real = r; - npy_type::imag = i; - } - complex_wrapper operator-() const { - return complex_wrapper(-npy_type::real,-npy_type::imag); - } - complex_wrapper operator+(const complex_wrapper& B) const { - return complex_wrapper(npy_type::real + B.real, npy_type::imag + B.imag); - } - complex_wrapper operator-(const complex_wrapper& B) const { - return complex_wrapper(npy_type::real - B.real, npy_type::imag - B.imag); - } - complex_wrapper operator*(const complex_wrapper& B) const { - return complex_wrapper(npy_type::real * B.real - npy_type::imag * B.imag, - npy_type::real * B.imag + npy_type::imag * B.real); - } - complex_wrapper operator/(const complex_wrapper& B) const { - complex_wrapper result; - c_type denom = 1.0 / (B.real * B.real + B.imag * B.imag); - result.real = (npy_type::real * B.real + npy_type::imag * B.imag) * denom; - result.imag = (npy_type::imag * B.real - npy_type::real * B.imag) * denom; - return result; - } - complex_wrapper& operator+=(const complex_wrapper & B){ - npy_type::real += B.real; - npy_type::imag += B.imag; - return (*this); - } - complex_wrapper& operator-=(const complex_wrapper & B){ - npy_type::real -= B.real; - npy_type::imag -= B.imag; - return (*this); - } - complex_wrapper& operator*=(const complex_wrapper & B){ - c_type temp = npy_type::real * B.real - npy_type::imag * B.imag; - npy_type::imag = npy_type::real * B.imag + npy_type::imag * B.real; - npy_type::real = temp; - return (*this); - } - complex_wrapper& operator/=(const complex_wrapper & B){ - c_type denom = 1.0 / (B.real * B.real + B.imag * B.imag); - c_type temp = (npy_type::real * B.real + npy_type::imag * B.imag) * denom; - npy_type::imag = (npy_type::imag * B.real - npy_type::real * B.imag) * denom; - npy_type::real = temp; - return (*this); - } - bool operator==(const complex_wrapper& B) const{ - return npy_type::real == B.real && npy_type::imag == B.imag; - } - bool operator!=(const complex_wrapper& B) const{ - return npy_type::real != B.real || npy_type::imag != B.imag; - } - bool operator==(const c_type& B) const{ - return npy_type::real == B && npy_type::imag == c_type(0); - } - bool operator!=(const c_type& B) const{ - return npy_type::real != B || npy_type::imag != c_type(0); - } - complex_wrapper& operator=(const complex_wrapper& B){ - npy_type::real = B.real; - npy_type::imag = B.imag; - return (*this); - } - complex_wrapper& operator=(const c_type& B){ - npy_type::real = B; - npy_type::imag = c_type(0); - return (*this); - } -}; - -template -std::ostream& operator<<(std::ostream& out, const complex_wrapper& cw){ - return out << cw.real << " " << cw.imag; -} - -typedef complex_wrapper npy_cfloat_wrapper; -typedef complex_wrapper npy_cdouble_wrapper; -typedef complex_wrapper npy_clongdouble_wrapper; - - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/coo.h b/scipy-0.10.1/scipy/sparse/sparsetools/coo.h deleted file mode 100644 index 87d2341757..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/coo.h +++ /dev/null @@ -1,175 +0,0 @@ -#ifndef __COO_H__ -#define __COO_H__ - -#include -#include - -/* - * Compute B = A for COO matrix A, CSR matrix B - * - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I nnz - number of nonzeros in A - * I Ai[nnz(A)] - row indices - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * Output Arguments: - * I Bp - row pointer - * I Bj - column indices - * T Bx - nonzeros - * - * Note: - * Output arrays Bp, Bj, and Bx must be preallocated - * - * Note: - * Input: row and column indices *are not* assumed to be ordered - * - * Note: duplicate entries are carried over to the CSR represention - * - * Complexity: Linear. Specifically O(nnz(A) + max(n_row,n_col)) - * - */ -template -void coo_tocsr(const I n_row, - const I n_col, - const I nnz, - const I Ai[], - const I Aj[], - const T Ax[], - I Bp[], - I Bj[], - T Bx[]) -{ - //compute number of non-zero entries per row of A - std::fill(Bp, Bp + n_row, 0); - - for (I n = 0; n < nnz; n++){ - Bp[Ai[n]]++; - } - - //cumsum the nnz per row to get Bp[] - for(I i = 0, cumsum = 0; i < n_row; i++){ - I temp = Bp[i]; - Bp[i] = cumsum; - cumsum += temp; - } - Bp[n_row] = nnz; - - //write Aj,Ax into Bj,Bx - for(I n = 0; n < nnz; n++){ - I row = Ai[n]; - I dest = Bp[row]; - - Bj[dest] = Aj[n]; - Bx[dest] = Ax[n]; - - Bp[row]++; - } - - for(I i = 0, last = 0; i <= n_row; i++){ - I temp = Bp[i]; - Bp[i] = last; - last = temp; - } - - //now Bp,Bj,Bx form a CSR representation (with possible duplicates) -} - -template -void coo_tocsc(const I n_row, - const I n_col, - const I nnz, - const I Ai[], - const I Aj[], - const T Ax[], - I Bp[], - I Bi[], - T Bx[]) -{ coo_tocsr(n_col, n_row, nnz, Aj, Ai, Ax, Bp, Bi, Bx); } - -/* - * Compute B += A for COO matrix A, dense matrix B - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I nnz - number of nonzeros in A - * I Ai[nnz(A)] - row indices - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * T Bx[n_row*n_col] - dense matrix - * - */ -template -void coo_todense(const I n_row, - const I n_col, - const I nnz, - const I Ai[], - const I Aj[], - const T Ax[], - T Bx[]) -{ - for(I n = 0; n < nnz; n++){ - Bx[ n_col * Ai[n] + Aj[n] ] += Ax[n]; - } -} - - -/* - * Compute Y += A*X for COO matrix A and dense vectors X,Y - * - * - * Input Arguments: - * I nnz - number of nonzeros in A - * I Ai[nnz] - row indices - * I Aj[nnz] - column indices - * T Ax[nnz] - nonzero values - * T Xx[n_col] - input vector - * - * Output Arguments: - * T Yx[n_row] - output vector - * - * Notes: - * Output array Yx must be preallocated - * - * Complexity: Linear. Specifically O(nnz(A)) - * - */ -template -void coo_matvec(const I nnz, - const I Ai[], - const I Aj[], - const T Ax[], - const T Xx[], - T Yx[]) -{ - for(I n = 0; n < nnz; n++){ - Yx[Ai[n]] += Ax[n] * Xx[Aj[n]]; - } -} - -/* - * Count the number of occupied diagonals in COO matrix A - * - * Input Arguments: - * I nnz - number of nonzeros in A - * I Ai[nnz(A)] - row indices - * I Aj[nnz(A)] - column indices - * - */ -template -I coo_count_diagonals(const I nnz, - const I Ai[], - const I Aj[]) -{ - std::set diagonals; - for(I n = 0; n < nnz; n++){ - diagonals.insert(Aj[n] - Ai[n]); - } - return diagonals.size(); -} - - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/coo.py b/scipy-0.10.1/scipy/sparse/sparsetools/coo.py deleted file mode 100644 index 62440c9728..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/coo.py +++ /dev/null @@ -1,201 +0,0 @@ -# This file was automatically generated by SWIG (http://www.swig.org). -# Version 2.0.1+capsulehack -# -# Do not make changes to this file unless you know what you are doing--modify -# the SWIG interface file instead. -# This file is compatible with both classic and new-style classes. - -from sys import version_info -if version_info >= (2,6,0): - def swig_import_helper(): - from os.path import dirname - import imp - fp = None - try: - fp, pathname, description = imp.find_module('_coo', [dirname(__file__)]) - except ImportError: - import _coo - return _coo - if fp is not None: - try: - _mod = imp.load_module('_coo', fp, pathname, description) - finally: - fp.close() - return _mod - _coo = swig_import_helper() - del swig_import_helper -else: - import _coo -del version_info -try: - _swig_property = property -except NameError: - pass # Python < 2.2 doesn't have 'property'. -def _swig_setattr_nondynamic(self,class_type,name,value,static=1): - if (name == "thisown"): return self.this.own(value) - if (name == "this"): - if type(value).__name__ == 'SwigPyObject': - self.__dict__[name] = value - return - method = class_type.__swig_setmethods__.get(name,None) - if method: return method(self,value) - if (not static) or hasattr(self,name): - self.__dict__[name] = value - else: - raise AttributeError("You cannot add attributes to %s" % self) - -def _swig_setattr(self,class_type,name,value): - return _swig_setattr_nondynamic(self,class_type,name,value,0) - -def _swig_getattr(self,class_type,name): - if (name == "thisown"): return self.this.own() - method = class_type.__swig_getmethods__.get(name,None) - if method: return method(self) - raise AttributeError(name) - -def _swig_repr(self): - try: strthis = "proxy of " + self.this.__repr__() - except: strthis = "" - return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) - -try: - _object = object - _newclass = 1 -except AttributeError: - class _object : pass - _newclass = 0 - - - -def coo_count_diagonals(*args): - """coo_count_diagonals(int nnz, int Ai, int Aj) -> int""" - return _coo.coo_count_diagonals(*args) - - -def coo_tocsr(*args): - """ - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, - int Bp, int Bj, signed char Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, - int Bp, int Bj, unsigned char Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, - int Bp, int Bj, short Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, - int Bp, int Bj, unsigned short Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, - int Bp, int Bj, int Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, - int Bp, int Bj, unsigned int Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, - int Bp, int Bj, long long Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, - int Bp, int Bj, unsigned long long Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, - int Bp, int Bj, float Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, - int Bp, int Bj, double Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, - int Bp, int Bj, long double Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, - int Bp, int Bj, npy_cfloat_wrapper Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, - int Bp, int Bj, npy_cdouble_wrapper Bx) - coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, - int Bp, int Bj, npy_clongdouble_wrapper Bx) - """ - return _coo.coo_tocsr(*args) - -def coo_tocsc(*args): - """ - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, - int Bp, int Bi, signed char Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, - int Bp, int Bi, unsigned char Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, - int Bp, int Bi, short Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, - int Bp, int Bi, unsigned short Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, - int Bp, int Bi, int Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, - int Bp, int Bi, unsigned int Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, - int Bp, int Bi, long long Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, - int Bp, int Bi, unsigned long long Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, - int Bp, int Bi, float Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, - int Bp, int Bi, double Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, - int Bp, int Bi, long double Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, - int Bp, int Bi, npy_cfloat_wrapper Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, - int Bp, int Bi, npy_cdouble_wrapper Bx) - coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, - int Bp, int Bi, npy_clongdouble_wrapper Bx) - """ - return _coo.coo_tocsc(*args) - -def coo_todense(*args): - """ - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, - signed char Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, - unsigned char Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, - short Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, - unsigned short Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, - int Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, - unsigned int Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, - long long Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, - unsigned long long Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, - float Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, - double Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, - long double Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Bx) - coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Bx) - """ - return _coo.coo_todense(*args) - -def coo_matvec(*args): - """ - coo_matvec(int nnz, int Ai, int Aj, signed char Ax, signed char Xx, - signed char Yx) - coo_matvec(int nnz, int Ai, int Aj, unsigned char Ax, unsigned char Xx, - unsigned char Yx) - coo_matvec(int nnz, int Ai, int Aj, short Ax, short Xx, short Yx) - coo_matvec(int nnz, int Ai, int Aj, unsigned short Ax, unsigned short Xx, - unsigned short Yx) - coo_matvec(int nnz, int Ai, int Aj, int Ax, int Xx, int Yx) - coo_matvec(int nnz, int Ai, int Aj, unsigned int Ax, unsigned int Xx, - unsigned int Yx) - coo_matvec(int nnz, int Ai, int Aj, long long Ax, long long Xx, - long long Yx) - coo_matvec(int nnz, int Ai, int Aj, unsigned long long Ax, unsigned long long Xx, - unsigned long long Yx) - coo_matvec(int nnz, int Ai, int Aj, float Ax, float Xx, float Yx) - coo_matvec(int nnz, int Ai, int Aj, double Ax, double Xx, double Yx) - coo_matvec(int nnz, int Ai, int Aj, long double Ax, long double Xx, - long double Yx) - coo_matvec(int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx, - npy_cfloat_wrapper Yx) - coo_matvec(int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx, - npy_cdouble_wrapper Yx) - coo_matvec(int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx) - """ - return _coo.coo_matvec(*args) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/coo_wrap.cxx b/scipy-0.10.1/scipy/sparse/sparsetools/coo_wrap.cxx deleted file mode 100644 index 5423138e8e..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/coo_wrap.cxx +++ /dev/null @@ -1,14002 +0,0 @@ -/* ---------------------------------------------------------------------------- - * This file was automatically generated by SWIG (http://www.swig.org). - * Version 2.0.1+capsulehack - * - * This file is not intended to be easily readable and contains a number of - * coding conventions designed to improve portability and efficiency. Do not make - * changes to this file unless you know what you are doing--modify the SWIG - * interface file instead. - * ----------------------------------------------------------------------------- */ - -#define SWIGPYTHON -#define SWIG_PYTHON_DIRECTOR_NO_VTABLE - - -#ifdef __cplusplus -/* SwigValueWrapper is described in swig.swg */ -template class SwigValueWrapper { - struct SwigMovePointer { - T *ptr; - SwigMovePointer(T *p) : ptr(p) { } - ~SwigMovePointer() { delete ptr; } - SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } - } pointer; - SwigValueWrapper& operator=(const SwigValueWrapper& rhs); - SwigValueWrapper(const SwigValueWrapper& rhs); -public: - SwigValueWrapper() : pointer(0) { } - SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } - operator T&() const { return *pointer.ptr; } - T *operator&() { return pointer.ptr; } -}; - -template T SwigValueInit() { - return T(); -} -#endif - -/* ----------------------------------------------------------------------------- - * This section contains generic SWIG labels for method/variable - * declarations/attributes, and other compiler dependent labels. - * ----------------------------------------------------------------------------- */ - -/* template workaround for compilers that cannot correctly implement the C++ standard */ -#ifndef SWIGTEMPLATEDISAMBIGUATOR -# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) -# define SWIGTEMPLATEDISAMBIGUATOR template -# elif defined(__HP_aCC) -/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ -/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ -# define SWIGTEMPLATEDISAMBIGUATOR template -# else -# define SWIGTEMPLATEDISAMBIGUATOR -# endif -#endif - -/* inline attribute */ -#ifndef SWIGINLINE -# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) -# define SWIGINLINE inline -# else -# define SWIGINLINE -# endif -#endif - -/* attribute recognised by some compilers to avoid 'unused' warnings */ -#ifndef SWIGUNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -# elif defined(__ICC) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -#endif - -#ifndef SWIG_MSC_UNSUPPRESS_4505 -# if defined(_MSC_VER) -# pragma warning(disable : 4505) /* unreferenced local function has been removed */ -# endif -#endif - -#ifndef SWIGUNUSEDPARM -# ifdef __cplusplus -# define SWIGUNUSEDPARM(p) -# else -# define SWIGUNUSEDPARM(p) p SWIGUNUSED -# endif -#endif - -/* internal SWIG method */ -#ifndef SWIGINTERN -# define SWIGINTERN static SWIGUNUSED -#endif - -/* internal inline SWIG method */ -#ifndef SWIGINTERNINLINE -# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE -#endif - -/* exporting methods */ -#if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) -# ifndef GCC_HASCLASSVISIBILITY -# define GCC_HASCLASSVISIBILITY -# endif -#endif - -#ifndef SWIGEXPORT -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# if defined(STATIC_LINKED) -# define SWIGEXPORT -# else -# define SWIGEXPORT __declspec(dllexport) -# endif -# else -# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) -# define SWIGEXPORT __attribute__ ((visibility("default"))) -# else -# define SWIGEXPORT -# endif -# endif -#endif - -/* calling conventions for Windows */ -#ifndef SWIGSTDCALL -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# define SWIGSTDCALL __stdcall -# else -# define SWIGSTDCALL -# endif -#endif - -/* Deal with Microsoft's attempt at deprecating C standard runtime functions */ -#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) -# define _CRT_SECURE_NO_DEPRECATE -#endif - -/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ -#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) -# define _SCL_SECURE_NO_DEPRECATE -#endif - - - -/* Python.h has to appear first */ -#include - -/* ----------------------------------------------------------------------------- - * swigrun.swg - * - * This file contains generic C API SWIG runtime support for pointer - * type checking. - * ----------------------------------------------------------------------------- */ - -/* This should only be incremented when either the layout of swig_type_info changes, - or for whatever reason, the runtime changes incompatibly */ -#define SWIG_RUNTIME_VERSION "4" - -/* define SWIG_TYPE_TABLE_NAME as "SWIG_TYPE_TABLE" */ -#ifdef SWIG_TYPE_TABLE -# define SWIG_QUOTE_STRING(x) #x -# define SWIG_EXPAND_AND_QUOTE_STRING(x) SWIG_QUOTE_STRING(x) -# define SWIG_TYPE_TABLE_NAME SWIG_EXPAND_AND_QUOTE_STRING(SWIG_TYPE_TABLE) -#else -# define SWIG_TYPE_TABLE_NAME -#endif - -/* - You can use the SWIGRUNTIME and SWIGRUNTIMEINLINE macros for - creating a static or dynamic library from the SWIG runtime code. - In 99.9% of the cases, SWIG just needs to declare them as 'static'. - - But only do this if strictly necessary, ie, if you have problems - with your compiler or suchlike. -*/ - -#ifndef SWIGRUNTIME -# define SWIGRUNTIME SWIGINTERN -#endif - -#ifndef SWIGRUNTIMEINLINE -# define SWIGRUNTIMEINLINE SWIGRUNTIME SWIGINLINE -#endif - -/* Generic buffer size */ -#ifndef SWIG_BUFFER_SIZE -# define SWIG_BUFFER_SIZE 1024 -#endif - -/* Flags for pointer conversions */ -#define SWIG_POINTER_DISOWN 0x1 -#define SWIG_CAST_NEW_MEMORY 0x2 - -/* Flags for new pointer objects */ -#define SWIG_POINTER_OWN 0x1 - - -/* - Flags/methods for returning states. - - The SWIG conversion methods, as ConvertPtr, return an integer - that tells if the conversion was successful or not. And if not, - an error code can be returned (see swigerrors.swg for the codes). - - Use the following macros/flags to set or process the returning - states. - - In old versions of SWIG, code such as the following was usually written: - - if (SWIG_ConvertPtr(obj,vptr,ty.flags) != -1) { - // success code - } else { - //fail code - } - - Now you can be more explicit: - - int res = SWIG_ConvertPtr(obj,vptr,ty.flags); - if (SWIG_IsOK(res)) { - // success code - } else { - // fail code - } - - which is the same really, but now you can also do - - Type *ptr; - int res = SWIG_ConvertPtr(obj,(void **)(&ptr),ty.flags); - if (SWIG_IsOK(res)) { - // success code - if (SWIG_IsNewObj(res) { - ... - delete *ptr; - } else { - ... - } - } else { - // fail code - } - - I.e., now SWIG_ConvertPtr can return new objects and you can - identify the case and take care of the deallocation. Of course that - also requires SWIG_ConvertPtr to return new result values, such as - - int SWIG_ConvertPtr(obj, ptr,...) { - if () { - if () { - *ptr = ; - return SWIG_NEWOBJ; - } else { - *ptr = ; - return SWIG_OLDOBJ; - } - } else { - return SWIG_BADOBJ; - } - } - - Of course, returning the plain '0(success)/-1(fail)' still works, but you can be - more explicit by returning SWIG_BADOBJ, SWIG_ERROR or any of the - SWIG errors code. - - Finally, if the SWIG_CASTRANK_MODE is enabled, the result code - allows to return the 'cast rank', for example, if you have this - - int food(double) - int fooi(int); - - and you call - - food(1) // cast rank '1' (1 -> 1.0) - fooi(1) // cast rank '0' - - just use the SWIG_AddCast()/SWIG_CheckState() -*/ - -#define SWIG_OK (0) -#define SWIG_ERROR (-1) -#define SWIG_IsOK(r) (r >= 0) -#define SWIG_ArgError(r) ((r != SWIG_ERROR) ? r : SWIG_TypeError) - -/* The CastRankLimit says how many bits are used for the cast rank */ -#define SWIG_CASTRANKLIMIT (1 << 8) -/* The NewMask denotes the object was created (using new/malloc) */ -#define SWIG_NEWOBJMASK (SWIG_CASTRANKLIMIT << 1) -/* The TmpMask is for in/out typemaps that use temporal objects */ -#define SWIG_TMPOBJMASK (SWIG_NEWOBJMASK << 1) -/* Simple returning values */ -#define SWIG_BADOBJ (SWIG_ERROR) -#define SWIG_OLDOBJ (SWIG_OK) -#define SWIG_NEWOBJ (SWIG_OK | SWIG_NEWOBJMASK) -#define SWIG_TMPOBJ (SWIG_OK | SWIG_TMPOBJMASK) -/* Check, add and del mask methods */ -#define SWIG_AddNewMask(r) (SWIG_IsOK(r) ? (r | SWIG_NEWOBJMASK) : r) -#define SWIG_DelNewMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_NEWOBJMASK) : r) -#define SWIG_IsNewObj(r) (SWIG_IsOK(r) && (r & SWIG_NEWOBJMASK)) -#define SWIG_AddTmpMask(r) (SWIG_IsOK(r) ? (r | SWIG_TMPOBJMASK) : r) -#define SWIG_DelTmpMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_TMPOBJMASK) : r) -#define SWIG_IsTmpObj(r) (SWIG_IsOK(r) && (r & SWIG_TMPOBJMASK)) - -/* Cast-Rank Mode */ -#if defined(SWIG_CASTRANK_MODE) -# ifndef SWIG_TypeRank -# define SWIG_TypeRank unsigned long -# endif -# ifndef SWIG_MAXCASTRANK /* Default cast allowed */ -# define SWIG_MAXCASTRANK (2) -# endif -# define SWIG_CASTRANKMASK ((SWIG_CASTRANKLIMIT) -1) -# define SWIG_CastRank(r) (r & SWIG_CASTRANKMASK) -SWIGINTERNINLINE int SWIG_AddCast(int r) { - return SWIG_IsOK(r) ? ((SWIG_CastRank(r) < SWIG_MAXCASTRANK) ? (r + 1) : SWIG_ERROR) : r; -} -SWIGINTERNINLINE int SWIG_CheckState(int r) { - return SWIG_IsOK(r) ? SWIG_CastRank(r) + 1 : 0; -} -#else /* no cast-rank mode */ -# define SWIG_AddCast -# define SWIG_CheckState(r) (SWIG_IsOK(r) ? 1 : 0) -#endif - - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void *(*swig_converter_func)(void *, int *); -typedef struct swig_type_info *(*swig_dycast_func)(void **); - -/* Structure to store information on one type */ -typedef struct swig_type_info { - const char *name; /* mangled name of this type */ - const char *str; /* human readable name of this type */ - swig_dycast_func dcast; /* dynamic cast function down a hierarchy */ - struct swig_cast_info *cast; /* linked list of types that can cast into this type */ - void *clientdata; /* language specific type data */ - int owndata; /* flag if the structure owns the clientdata */ -} swig_type_info; - -/* Structure to store a type and conversion function used for casting */ -typedef struct swig_cast_info { - swig_type_info *type; /* pointer to type that is equivalent to this type */ - swig_converter_func converter; /* function to cast the void pointers */ - struct swig_cast_info *next; /* pointer to next cast in linked list */ - struct swig_cast_info *prev; /* pointer to the previous cast */ -} swig_cast_info; - -/* Structure used to store module information - * Each module generates one structure like this, and the runtime collects - * all of these structures and stores them in a circularly linked list.*/ -typedef struct swig_module_info { - swig_type_info **types; /* Array of pointers to swig_type_info structures that are in this module */ - size_t size; /* Number of types in this module */ - struct swig_module_info *next; /* Pointer to next element in circularly linked list */ - swig_type_info **type_initial; /* Array of initially generated type structures */ - swig_cast_info **cast_initial; /* Array of initially generated casting structures */ - void *clientdata; /* Language specific module data */ -} swig_module_info; - -/* - Compare two type names skipping the space characters, therefore - "char*" == "char *" and "Class" == "Class", etc. - - Return 0 when the two name types are equivalent, as in - strncmp, but skipping ' '. -*/ -SWIGRUNTIME int -SWIG_TypeNameComp(const char *f1, const char *l1, - const char *f2, const char *l2) { - for (;(f1 != l1) && (f2 != l2); ++f1, ++f2) { - while ((*f1 == ' ') && (f1 != l1)) ++f1; - while ((*f2 == ' ') && (f2 != l2)) ++f2; - if (*f1 != *f2) return (*f1 > *f2) ? 1 : -1; - } - return (int)((l1 - f1) - (l2 - f2)); -} - -/* - Check type equivalence in a name list like ||... - Return 0 if not equal, 1 if equal -*/ -SWIGRUNTIME int -SWIG_TypeEquiv(const char *nb, const char *tb) { - int equiv = 0; - const char* te = tb + strlen(tb); - const char* ne = nb; - while (!equiv && *ne) { - for (nb = ne; *ne; ++ne) { - if (*ne == '|') break; - } - equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; - if (*ne) ++ne; - } - return equiv; -} - -/* - Check type equivalence in a name list like ||... - Return 0 if equal, -1 if nb < tb, 1 if nb > tb -*/ -SWIGRUNTIME int -SWIG_TypeCompare(const char *nb, const char *tb) { - int equiv = 0; - const char* te = tb + strlen(tb); - const char* ne = nb; - while (!equiv && *ne) { - for (nb = ne; *ne; ++ne) { - if (*ne == '|') break; - } - equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; - if (*ne) ++ne; - } - return equiv; -} - - -/* - Check the typename -*/ -SWIGRUNTIME swig_cast_info * -SWIG_TypeCheck(const char *c, swig_type_info *ty) { - if (ty) { - swig_cast_info *iter = ty->cast; - while (iter) { - if (strcmp(iter->type->name, c) == 0) { - if (iter == ty->cast) - return iter; - /* Move iter to the top of the linked list */ - iter->prev->next = iter->next; - if (iter->next) - iter->next->prev = iter->prev; - iter->next = ty->cast; - iter->prev = 0; - if (ty->cast) ty->cast->prev = iter; - ty->cast = iter; - return iter; - } - iter = iter->next; - } - } - return 0; -} - -/* - Identical to SWIG_TypeCheck, except strcmp is replaced with a pointer comparison -*/ -SWIGRUNTIME swig_cast_info * -SWIG_TypeCheckStruct(swig_type_info *from, swig_type_info *ty) { - if (ty) { - swig_cast_info *iter = ty->cast; - while (iter) { - if (iter->type == from) { - if (iter == ty->cast) - return iter; - /* Move iter to the top of the linked list */ - iter->prev->next = iter->next; - if (iter->next) - iter->next->prev = iter->prev; - iter->next = ty->cast; - iter->prev = 0; - if (ty->cast) ty->cast->prev = iter; - ty->cast = iter; - return iter; - } - iter = iter->next; - } - } - return 0; -} - -/* - Cast a pointer up an inheritance hierarchy -*/ -SWIGRUNTIMEINLINE void * -SWIG_TypeCast(swig_cast_info *ty, void *ptr, int *newmemory) { - return ((!ty) || (!ty->converter)) ? ptr : (*ty->converter)(ptr, newmemory); -} - -/* - Dynamic pointer casting. Down an inheritance hierarchy -*/ -SWIGRUNTIME swig_type_info * -SWIG_TypeDynamicCast(swig_type_info *ty, void **ptr) { - swig_type_info *lastty = ty; - if (!ty || !ty->dcast) return ty; - while (ty && (ty->dcast)) { - ty = (*ty->dcast)(ptr); - if (ty) lastty = ty; - } - return lastty; -} - -/* - Return the name associated with this type -*/ -SWIGRUNTIMEINLINE const char * -SWIG_TypeName(const swig_type_info *ty) { - return ty->name; -} - -/* - Return the pretty name associated with this type, - that is an unmangled type name in a form presentable to the user. -*/ -SWIGRUNTIME const char * -SWIG_TypePrettyName(const swig_type_info *type) { - /* The "str" field contains the equivalent pretty names of the - type, separated by vertical-bar characters. We choose - to print the last name, as it is often (?) the most - specific. */ - if (!type) return NULL; - if (type->str != NULL) { - const char *last_name = type->str; - const char *s; - for (s = type->str; *s; s++) - if (*s == '|') last_name = s+1; - return last_name; - } - else - return type->name; -} - -/* - Set the clientdata field for a type -*/ -SWIGRUNTIME void -SWIG_TypeClientData(swig_type_info *ti, void *clientdata) { - swig_cast_info *cast = ti->cast; - /* if (ti->clientdata == clientdata) return; */ - ti->clientdata = clientdata; - - while (cast) { - if (!cast->converter) { - swig_type_info *tc = cast->type; - if (!tc->clientdata) { - SWIG_TypeClientData(tc, clientdata); - } - } - cast = cast->next; - } -} -SWIGRUNTIME void -SWIG_TypeNewClientData(swig_type_info *ti, void *clientdata) { - SWIG_TypeClientData(ti, clientdata); - ti->owndata = 1; -} - -/* - Search for a swig_type_info structure only by mangled name - Search is a O(log #types) - - We start searching at module start, and finish searching when start == end. - Note: if start == end at the beginning of the function, we go all the way around - the circular list. -*/ -SWIGRUNTIME swig_type_info * -SWIG_MangledTypeQueryModule(swig_module_info *start, - swig_module_info *end, - const char *name) { - swig_module_info *iter = start; - do { - if (iter->size) { - register size_t l = 0; - register size_t r = iter->size - 1; - do { - /* since l+r >= 0, we can (>> 1) instead (/ 2) */ - register size_t i = (l + r) >> 1; - const char *iname = iter->types[i]->name; - if (iname) { - register int compare = strcmp(name, iname); - if (compare == 0) { - return iter->types[i]; - } else if (compare < 0) { - if (i) { - r = i - 1; - } else { - break; - } - } else if (compare > 0) { - l = i + 1; - } - } else { - break; /* should never happen */ - } - } while (l <= r); - } - iter = iter->next; - } while (iter != end); - return 0; -} - -/* - Search for a swig_type_info structure for either a mangled name or a human readable name. - It first searches the mangled names of the types, which is a O(log #types) - If a type is not found it then searches the human readable names, which is O(#types). - - We start searching at module start, and finish searching when start == end. - Note: if start == end at the beginning of the function, we go all the way around - the circular list. -*/ -SWIGRUNTIME swig_type_info * -SWIG_TypeQueryModule(swig_module_info *start, - swig_module_info *end, - const char *name) { - /* STEP 1: Search the name field using binary search */ - swig_type_info *ret = SWIG_MangledTypeQueryModule(start, end, name); - if (ret) { - return ret; - } else { - /* STEP 2: If the type hasn't been found, do a complete search - of the str field (the human readable name) */ - swig_module_info *iter = start; - do { - register size_t i = 0; - for (; i < iter->size; ++i) { - if (iter->types[i]->str && (SWIG_TypeEquiv(iter->types[i]->str, name))) - return iter->types[i]; - } - iter = iter->next; - } while (iter != end); - } - - /* neither found a match */ - return 0; -} - -/* - Pack binary data into a string -*/ -SWIGRUNTIME char * -SWIG_PackData(char *c, void *ptr, size_t sz) { - static const char hex[17] = "0123456789abcdef"; - register const unsigned char *u = (unsigned char *) ptr; - register const unsigned char *eu = u + sz; - for (; u != eu; ++u) { - register unsigned char uu = *u; - *(c++) = hex[(uu & 0xf0) >> 4]; - *(c++) = hex[uu & 0xf]; - } - return c; -} - -/* - Unpack binary data from a string -*/ -SWIGRUNTIME const char * -SWIG_UnpackData(const char *c, void *ptr, size_t sz) { - register unsigned char *u = (unsigned char *) ptr; - register const unsigned char *eu = u + sz; - for (; u != eu; ++u) { - register char d = *(c++); - register unsigned char uu; - if ((d >= '0') && (d <= '9')) - uu = ((d - '0') << 4); - else if ((d >= 'a') && (d <= 'f')) - uu = ((d - ('a'-10)) << 4); - else - return (char *) 0; - d = *(c++); - if ((d >= '0') && (d <= '9')) - uu |= (d - '0'); - else if ((d >= 'a') && (d <= 'f')) - uu |= (d - ('a'-10)); - else - return (char *) 0; - *u = uu; - } - return c; -} - -/* - Pack 'void *' into a string buffer. -*/ -SWIGRUNTIME char * -SWIG_PackVoidPtr(char *buff, void *ptr, const char *name, size_t bsz) { - char *r = buff; - if ((2*sizeof(void *) + 2) > bsz) return 0; - *(r++) = '_'; - r = SWIG_PackData(r,&ptr,sizeof(void *)); - if (strlen(name) + 1 > (bsz - (r - buff))) return 0; - strcpy(r,name); - return buff; -} - -SWIGRUNTIME const char * -SWIG_UnpackVoidPtr(const char *c, void **ptr, const char *name) { - if (*c != '_') { - if (strcmp(c,"NULL") == 0) { - *ptr = (void *) 0; - return name; - } else { - return 0; - } - } - return SWIG_UnpackData(++c,ptr,sizeof(void *)); -} - -SWIGRUNTIME char * -SWIG_PackDataName(char *buff, void *ptr, size_t sz, const char *name, size_t bsz) { - char *r = buff; - size_t lname = (name ? strlen(name) : 0); - if ((2*sz + 2 + lname) > bsz) return 0; - *(r++) = '_'; - r = SWIG_PackData(r,ptr,sz); - if (lname) { - strncpy(r,name,lname+1); - } else { - *r = 0; - } - return buff; -} - -SWIGRUNTIME const char * -SWIG_UnpackDataName(const char *c, void *ptr, size_t sz, const char *name) { - if (*c != '_') { - if (strcmp(c,"NULL") == 0) { - memset(ptr,0,sz); - return name; - } else { - return 0; - } - } - return SWIG_UnpackData(++c,ptr,sz); -} - -#ifdef __cplusplus -} -#endif - -/* Errors in SWIG */ -#define SWIG_UnknownError -1 -#define SWIG_IOError -2 -#define SWIG_RuntimeError -3 -#define SWIG_IndexError -4 -#define SWIG_TypeError -5 -#define SWIG_DivisionByZero -6 -#define SWIG_OverflowError -7 -#define SWIG_SyntaxError -8 -#define SWIG_ValueError -9 -#define SWIG_SystemError -10 -#define SWIG_AttributeError -11 -#define SWIG_MemoryError -12 -#define SWIG_NullReferenceError -13 - - - -/* Compatibility macros for Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - -#define PyClass_Check(obj) PyObject_IsInstance(obj, (PyObject *)&PyType_Type) -#define PyInt_Check(x) PyLong_Check(x) -#define PyInt_AsLong(x) PyLong_AsLong(x) -#define PyInt_FromLong(x) PyLong_FromLong(x) -#define PyString_Format(fmt, args) PyUnicode_Format(fmt, args) - -#endif - -#ifndef Py_TYPE -# define Py_TYPE(op) ((op)->ob_type) -#endif - -/* SWIG APIs for compatibility of both Python 2 & 3 */ - -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_Python_str_FromFormat PyUnicode_FromFormat -#else -# define SWIG_Python_str_FromFormat PyString_FromFormat -#endif - - -/* Warning: This function will allocate a new string in Python 3, - * so please call SWIG_Python_str_DelForPy3(x) to free the space. - */ -SWIGINTERN char* -SWIG_Python_str_AsChar(PyObject *str) -{ -#if PY_VERSION_HEX >= 0x03000000 - char *cstr; - char *newstr; - Py_ssize_t len; - str = PyUnicode_AsUTF8String(str); - PyBytes_AsStringAndSize(str, &cstr, &len); - newstr = (char *) malloc(len+1); - memcpy(newstr, cstr, len+1); - Py_XDECREF(str); - return newstr; -#else - return PyString_AsString(str); -#endif -} - -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_Python_str_DelForPy3(x) free( (void*) (x) ) -#else -# define SWIG_Python_str_DelForPy3(x) -#endif - - -SWIGINTERN PyObject* -SWIG_Python_str_FromChar(const char *c) -{ -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_FromString(c); -#else - return PyString_FromString(c); -#endif -} - -/* Add PyOS_snprintf for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 -# if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) -# define PyOS_snprintf _snprintf -# else -# define PyOS_snprintf snprintf -# endif -#endif - -/* A crude PyString_FromFormat implementation for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 - -#ifndef SWIG_PYBUFFER_SIZE -# define SWIG_PYBUFFER_SIZE 1024 -#endif - -static PyObject * -PyString_FromFormat(const char *fmt, ...) { - va_list ap; - char buf[SWIG_PYBUFFER_SIZE * 2]; - int res; - va_start(ap, fmt); - res = vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - return (res < 0 || res >= (int)sizeof(buf)) ? 0 : PyString_FromString(buf); -} -#endif - -/* Add PyObject_Del for old Pythons */ -#if PY_VERSION_HEX < 0x01060000 -# define PyObject_Del(op) PyMem_DEL((op)) -#endif -#ifndef PyObject_DEL -# define PyObject_DEL PyObject_Del -#endif - -/* A crude PyExc_StopIteration exception for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 -# ifndef PyExc_StopIteration -# define PyExc_StopIteration PyExc_RuntimeError -# endif -# ifndef PyObject_GenericGetAttr -# define PyObject_GenericGetAttr 0 -# endif -#endif - -/* Py_NotImplemented is defined in 2.1 and up. */ -#if PY_VERSION_HEX < 0x02010000 -# ifndef Py_NotImplemented -# define Py_NotImplemented PyExc_RuntimeError -# endif -#endif - -/* A crude PyString_AsStringAndSize implementation for old Pythons */ -#if PY_VERSION_HEX < 0x02010000 -# ifndef PyString_AsStringAndSize -# define PyString_AsStringAndSize(obj, s, len) {*s = PyString_AsString(obj); *len = *s ? strlen(*s) : 0;} -# endif -#endif - -/* PySequence_Size for old Pythons */ -#if PY_VERSION_HEX < 0x02000000 -# ifndef PySequence_Size -# define PySequence_Size PySequence_Length -# endif -#endif - -/* PyBool_FromLong for old Pythons */ -#if PY_VERSION_HEX < 0x02030000 -static -PyObject *PyBool_FromLong(long ok) -{ - PyObject *result = ok ? Py_True : Py_False; - Py_INCREF(result); - return result; -} -#endif - -/* Py_ssize_t for old Pythons */ -/* This code is as recommended by: */ -/* http://www.python.org/dev/peps/pep-0353/#conversion-guidelines */ -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef int Py_ssize_t; -# define PY_SSIZE_T_MAX INT_MAX -# define PY_SSIZE_T_MIN INT_MIN -#endif - -/* ----------------------------------------------------------------------------- - * error manipulation - * ----------------------------------------------------------------------------- */ - -SWIGRUNTIME PyObject* -SWIG_Python_ErrorType(int code) { - PyObject* type = 0; - switch(code) { - case SWIG_MemoryError: - type = PyExc_MemoryError; - break; - case SWIG_IOError: - type = PyExc_IOError; - break; - case SWIG_RuntimeError: - type = PyExc_RuntimeError; - break; - case SWIG_IndexError: - type = PyExc_IndexError; - break; - case SWIG_TypeError: - type = PyExc_TypeError; - break; - case SWIG_DivisionByZero: - type = PyExc_ZeroDivisionError; - break; - case SWIG_OverflowError: - type = PyExc_OverflowError; - break; - case SWIG_SyntaxError: - type = PyExc_SyntaxError; - break; - case SWIG_ValueError: - type = PyExc_ValueError; - break; - case SWIG_SystemError: - type = PyExc_SystemError; - break; - case SWIG_AttributeError: - type = PyExc_AttributeError; - break; - default: - type = PyExc_RuntimeError; - } - return type; -} - - -SWIGRUNTIME void -SWIG_Python_AddErrorMsg(const char* mesg) -{ - PyObject *type = 0; - PyObject *value = 0; - PyObject *traceback = 0; - - if (PyErr_Occurred()) PyErr_Fetch(&type, &value, &traceback); - if (value) { - char *tmp; - PyObject *old_str = PyObject_Str(value); - PyErr_Clear(); - Py_XINCREF(type); - - PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(old_str); - Py_DECREF(value); - } else { - PyErr_SetString(PyExc_RuntimeError, mesg); - } -} - -#if defined(SWIG_PYTHON_NO_THREADS) -# if defined(SWIG_PYTHON_THREADS) -# undef SWIG_PYTHON_THREADS -# endif -#endif -#if defined(SWIG_PYTHON_THREADS) /* Threading support is enabled */ -# if !defined(SWIG_PYTHON_USE_GIL) && !defined(SWIG_PYTHON_NO_USE_GIL) -# if (PY_VERSION_HEX >= 0x02030000) /* For 2.3 or later, use the PyGILState calls */ -# define SWIG_PYTHON_USE_GIL -# endif -# endif -# if defined(SWIG_PYTHON_USE_GIL) /* Use PyGILState threads calls */ -# ifndef SWIG_PYTHON_INITIALIZE_THREADS -# define SWIG_PYTHON_INITIALIZE_THREADS PyEval_InitThreads() -# endif -# ifdef __cplusplus /* C++ code */ - class SWIG_Python_Thread_Block { - bool status; - PyGILState_STATE state; - public: - void end() { if (status) { PyGILState_Release(state); status = false;} } - SWIG_Python_Thread_Block() : status(true), state(PyGILState_Ensure()) {} - ~SWIG_Python_Thread_Block() { end(); } - }; - class SWIG_Python_Thread_Allow { - bool status; - PyThreadState *save; - public: - void end() { if (status) { PyEval_RestoreThread(save); status = false; }} - SWIG_Python_Thread_Allow() : status(true), save(PyEval_SaveThread()) {} - ~SWIG_Python_Thread_Allow() { end(); } - }; -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK SWIG_Python_Thread_Block _swig_thread_block -# define SWIG_PYTHON_THREAD_END_BLOCK _swig_thread_block.end() -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW SWIG_Python_Thread_Allow _swig_thread_allow -# define SWIG_PYTHON_THREAD_END_ALLOW _swig_thread_allow.end() -# else /* C code */ -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK PyGILState_STATE _swig_thread_block = PyGILState_Ensure() -# define SWIG_PYTHON_THREAD_END_BLOCK PyGILState_Release(_swig_thread_block) -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW PyThreadState *_swig_thread_allow = PyEval_SaveThread() -# define SWIG_PYTHON_THREAD_END_ALLOW PyEval_RestoreThread(_swig_thread_allow) -# endif -# else /* Old thread way, not implemented, user must provide it */ -# if !defined(SWIG_PYTHON_INITIALIZE_THREADS) -# define SWIG_PYTHON_INITIALIZE_THREADS -# endif -# if !defined(SWIG_PYTHON_THREAD_BEGIN_BLOCK) -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK -# endif -# if !defined(SWIG_PYTHON_THREAD_END_BLOCK) -# define SWIG_PYTHON_THREAD_END_BLOCK -# endif -# if !defined(SWIG_PYTHON_THREAD_BEGIN_ALLOW) -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW -# endif -# if !defined(SWIG_PYTHON_THREAD_END_ALLOW) -# define SWIG_PYTHON_THREAD_END_ALLOW -# endif -# endif -#else /* No thread support */ -# define SWIG_PYTHON_INITIALIZE_THREADS -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK -# define SWIG_PYTHON_THREAD_END_BLOCK -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW -# define SWIG_PYTHON_THREAD_END_ALLOW -#endif - -/* ----------------------------------------------------------------------------- - * Python API portion that goes into the runtime - * ----------------------------------------------------------------------------- */ - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* cc-mode */ -#endif -#endif - -/* ----------------------------------------------------------------------------- - * Constant declarations - * ----------------------------------------------------------------------------- */ - -/* Constant Types */ -#define SWIG_PY_POINTER 4 -#define SWIG_PY_BINARY 5 - -/* Constant information structure */ -typedef struct swig_const_info { - int type; - char *name; - long lvalue; - double dvalue; - void *pvalue; - swig_type_info **ptype; -} swig_const_info; - - -/* ----------------------------------------------------------------------------- - * Wrapper of PyInstanceMethod_New() used in Python 3 - * It is exported to the generated module, used for -fastproxy - * ----------------------------------------------------------------------------- */ -SWIGRUNTIME PyObject* SWIG_PyInstanceMethod_New(PyObject *self, PyObject *func) -{ -#if PY_VERSION_HEX >= 0x03000000 - return PyInstanceMethod_New(func); -#else - return NULL; -#endif -} - -#ifdef __cplusplus -#if 0 -{ /* cc-mode */ -#endif -} -#endif - - -/* ----------------------------------------------------------------------------- - * pyrun.swg - * - * This file contains the runtime support for Python modules - * and includes code for managing global variables and pointer - * type checking. - * - * ----------------------------------------------------------------------------- */ - -/* Common SWIG API */ - -/* for raw pointers */ -#define SWIG_Python_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, 0) -#define SWIG_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtr(obj, pptr, type, flags) -#define SWIG_ConvertPtrAndOwn(obj,pptr,type,flags,own) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, own) -#define SWIG_NewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(ptr, type, flags) -#define SWIG_CheckImplicit(ty) SWIG_Python_CheckImplicit(ty) -#define SWIG_AcquirePtr(ptr, src) SWIG_Python_AcquirePtr(ptr, src) -#define swig_owntype int - -/* for raw packed data */ -#define SWIG_ConvertPacked(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) -#define SWIG_NewPackedObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) - -/* for class or struct pointers */ -#define SWIG_ConvertInstance(obj, pptr, type, flags) SWIG_ConvertPtr(obj, pptr, type, flags) -#define SWIG_NewInstanceObj(ptr, type, flags) SWIG_NewPointerObj(ptr, type, flags) - -/* for C or C++ function pointers */ -#define SWIG_ConvertFunctionPtr(obj, pptr, type) SWIG_Python_ConvertFunctionPtr(obj, pptr, type) -#define SWIG_NewFunctionPtrObj(ptr, type) SWIG_Python_NewPointerObj(ptr, type, 0) - -/* for C++ member pointers, ie, member methods */ -#define SWIG_ConvertMember(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) -#define SWIG_NewMemberObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) - - -/* Runtime API */ - -#define SWIG_GetModule(clientdata) SWIG_Python_GetModule() -#define SWIG_SetModule(clientdata, pointer) SWIG_Python_SetModule(pointer) -#define SWIG_NewClientData(obj) SwigPyClientData_New(obj) - -#define SWIG_SetErrorObj SWIG_Python_SetErrorObj -#define SWIG_SetErrorMsg SWIG_Python_SetErrorMsg -#define SWIG_ErrorType(code) SWIG_Python_ErrorType(code) -#define SWIG_Error(code, msg) SWIG_Python_SetErrorMsg(SWIG_ErrorType(code), msg) -#define SWIG_fail goto fail - -/* - * Python 2.7 and newer and Python 3.1 and newer should use Capsules API instead of - * CObjects API. - */ -#if ((PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION > 6) || \ - (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION > 0)) -#define USE_CAPSULES -#define TYPE_POINTER_NAME \ - ((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION ".type_pointer_capsule" SWIG_TYPE_TABLE_NAME) -#endif - -/* Runtime API implementation */ - -/* Error manipulation */ - -SWIGINTERN void -SWIG_Python_SetErrorObj(PyObject *errtype, PyObject *obj) { - SWIG_PYTHON_THREAD_BEGIN_BLOCK; - PyErr_SetObject(errtype, obj); - Py_DECREF(obj); - SWIG_PYTHON_THREAD_END_BLOCK; -} - -SWIGINTERN void -SWIG_Python_SetErrorMsg(PyObject *errtype, const char *msg) { - SWIG_PYTHON_THREAD_BEGIN_BLOCK; - PyErr_SetString(errtype, (char *) msg); - SWIG_PYTHON_THREAD_END_BLOCK; -} - -#define SWIG_Python_Raise(obj, type, desc) SWIG_Python_SetErrorObj(SWIG_Python_ExceptionType(desc), obj) - -/* Set a constant value */ - -SWIGINTERN void -SWIG_Python_SetConstant(PyObject *d, const char *name, PyObject *obj) { - PyDict_SetItemString(d, (char*) name, obj); - Py_DECREF(obj); -} - -/* Append a value to the result obj */ - -SWIGINTERN PyObject* -SWIG_Python_AppendOutput(PyObject* result, PyObject* obj) { -#if !defined(SWIG_PYTHON_OUTPUT_TUPLE) - if (!result) { - result = obj; - } else if (result == Py_None) { - Py_DECREF(result); - result = obj; - } else { - if (!PyList_Check(result)) { - PyObject *o2 = result; - result = PyList_New(1); - PyList_SetItem(result, 0, o2); - } - PyList_Append(result,obj); - Py_DECREF(obj); - } - return result; -#else - PyObject* o2; - PyObject* o3; - if (!result) { - result = obj; - } else if (result == Py_None) { - Py_DECREF(result); - result = obj; - } else { - if (!PyTuple_Check(result)) { - o2 = result; - result = PyTuple_New(1); - PyTuple_SET_ITEM(result, 0, o2); - } - o3 = PyTuple_New(1); - PyTuple_SET_ITEM(o3, 0, obj); - o2 = result; - result = PySequence_Concat(o2, o3); - Py_DECREF(o2); - Py_DECREF(o3); - } - return result; -#endif -} - -/* Unpack the argument tuple */ - -SWIGINTERN int -SWIG_Python_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, PyObject **objs) -{ - if (!args) { - if (!min && !max) { - return 1; - } else { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got none", - name, (min == max ? "" : "at least "), (int)min); - return 0; - } - } - if (!PyTuple_Check(args)) { - PyErr_SetString(PyExc_SystemError, "UnpackTuple() argument list is not a tuple"); - return 0; - } else { - register Py_ssize_t l = PyTuple_GET_SIZE(args); - if (l < min) { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", - name, (min == max ? "" : "at least "), (int)min, (int)l); - return 0; - } else if (l > max) { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", - name, (min == max ? "" : "at most "), (int)max, (int)l); - return 0; - } else { - register int i; - for (i = 0; i < l; ++i) { - objs[i] = PyTuple_GET_ITEM(args, i); - } - for (; l < max; ++l) { - objs[l] = 0; - } - return i + 1; - } - } -} - -/* A functor is a function object with one single object argument */ -#if PY_VERSION_HEX >= 0x02020000 -#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunctionObjArgs(functor, obj, NULL); -#else -#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunction(functor, "O", obj); -#endif - -/* - Helper for static pointer initialization for both C and C++ code, for example - static PyObject *SWIG_STATIC_POINTER(MyVar) = NewSomething(...); -*/ -#ifdef __cplusplus -#define SWIG_STATIC_POINTER(var) var -#else -#define SWIG_STATIC_POINTER(var) var = 0; if (!var) var -#endif - -/* ----------------------------------------------------------------------------- - * Pointer declarations - * ----------------------------------------------------------------------------- */ - -/* Flags for new pointer objects */ -#define SWIG_POINTER_NOSHADOW (SWIG_POINTER_OWN << 1) -#define SWIG_POINTER_NEW (SWIG_POINTER_NOSHADOW | SWIG_POINTER_OWN) - -#define SWIG_POINTER_IMPLICIT_CONV (SWIG_POINTER_DISOWN << 1) - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* cc-mode */ -#endif -#endif - -/* How to access Py_None */ -#if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# ifndef SWIG_PYTHON_NO_BUILD_NONE -# ifndef SWIG_PYTHON_BUILD_NONE -# define SWIG_PYTHON_BUILD_NONE -# endif -# endif -#endif - -#ifdef SWIG_PYTHON_BUILD_NONE -# ifdef Py_None -# undef Py_None -# define Py_None SWIG_Py_None() -# endif -SWIGRUNTIMEINLINE PyObject * -_SWIG_Py_None(void) -{ - PyObject *none = Py_BuildValue((char*)""); - Py_DECREF(none); - return none; -} -SWIGRUNTIME PyObject * -SWIG_Py_None(void) -{ - static PyObject *SWIG_STATIC_POINTER(none) = _SWIG_Py_None(); - return none; -} -#endif - -/* The python void return value */ - -SWIGRUNTIMEINLINE PyObject * -SWIG_Py_Void(void) -{ - PyObject *none = Py_None; - Py_INCREF(none); - return none; -} - -/* SwigPyClientData */ - -typedef struct { - PyObject *klass; - PyObject *newraw; - PyObject *newargs; - PyObject *destroy; - int delargs; - int implicitconv; -} SwigPyClientData; - -SWIGRUNTIMEINLINE int -SWIG_Python_CheckImplicit(swig_type_info *ty) -{ - SwigPyClientData *data = (SwigPyClientData *)ty->clientdata; - return data ? data->implicitconv : 0; -} - -SWIGRUNTIMEINLINE PyObject * -SWIG_Python_ExceptionType(swig_type_info *desc) { - SwigPyClientData *data = desc ? (SwigPyClientData *) desc->clientdata : 0; - PyObject *klass = data ? data->klass : 0; - return (klass ? klass : PyExc_RuntimeError); -} - - -SWIGRUNTIME SwigPyClientData * -SwigPyClientData_New(PyObject* obj) -{ - if (!obj) { - return 0; - } else { - SwigPyClientData *data = (SwigPyClientData *)malloc(sizeof(SwigPyClientData)); - /* the klass element */ - data->klass = obj; - Py_INCREF(data->klass); - /* the newraw method and newargs arguments used to create a new raw instance */ - if (PyClass_Check(obj)) { - data->newraw = 0; - data->newargs = obj; - Py_INCREF(obj); - } else { -#if (PY_VERSION_HEX < 0x02020000) - data->newraw = 0; -#else - data->newraw = PyObject_GetAttrString(data->klass, (char *)"__new__"); -#endif - if (data->newraw) { - Py_INCREF(data->newraw); - data->newargs = PyTuple_New(1); - PyTuple_SetItem(data->newargs, 0, obj); - } else { - data->newargs = obj; - } - Py_INCREF(data->newargs); - } - /* the destroy method, aka as the C++ delete method */ - data->destroy = PyObject_GetAttrString(data->klass, (char *)"__swig_destroy__"); - if (PyErr_Occurred()) { - PyErr_Clear(); - data->destroy = 0; - } - if (data->destroy) { - int flags; - Py_INCREF(data->destroy); - flags = PyCFunction_GET_FLAGS(data->destroy); -#ifdef METH_O - data->delargs = !(flags & (METH_O)); -#else - data->delargs = 0; -#endif - } else { - data->delargs = 0; - } - data->implicitconv = 0; - return data; - } -} - -SWIGRUNTIME void -SwigPyClientData_Del(SwigPyClientData* data) -{ - Py_XDECREF(data->newraw); - Py_XDECREF(data->newargs); - Py_XDECREF(data->destroy); -} - -/* =============== SwigPyObject =====================*/ - -typedef struct { - PyObject_HEAD - void *ptr; - swig_type_info *ty; - int own; - PyObject *next; -} SwigPyObject; - -SWIGRUNTIME PyObject * -SwigPyObject_long(SwigPyObject *v) -{ - return PyLong_FromVoidPtr(v->ptr); -} - -SWIGRUNTIME PyObject * -SwigPyObject_format(const char* fmt, SwigPyObject *v) -{ - PyObject *res = NULL; - PyObject *args = PyTuple_New(1); - if (args) { - if (PyTuple_SetItem(args, 0, SwigPyObject_long(v)) == 0) { - PyObject *ofmt = SWIG_Python_str_FromChar(fmt); - if (ofmt) { -#if PY_VERSION_HEX >= 0x03000000 - res = PyUnicode_Format(ofmt,args); -#else - res = PyString_Format(ofmt,args); -#endif - Py_DECREF(ofmt); - } - Py_DECREF(args); - } - } - return res; -} - -SWIGRUNTIME PyObject * -SwigPyObject_oct(SwigPyObject *v) -{ - return SwigPyObject_format("%o",v); -} - -SWIGRUNTIME PyObject * -SwigPyObject_hex(SwigPyObject *v) -{ - return SwigPyObject_format("%x",v); -} - -SWIGRUNTIME PyObject * -#ifdef METH_NOARGS -SwigPyObject_repr(SwigPyObject *v) -#else -SwigPyObject_repr(SwigPyObject *v, PyObject *args) -#endif -{ - const char *name = SWIG_TypePrettyName(v->ty); - PyObject *repr = SWIG_Python_str_FromFormat("", name, v); - if (v->next) { -#ifdef METH_NOARGS - PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next); -#else - PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next, args); -#endif -#if PY_VERSION_HEX >= 0x03000000 - PyObject *joined = PyUnicode_Concat(repr, nrep); - Py_DecRef(repr); - Py_DecRef(nrep); - repr = joined; -#else - PyString_ConcatAndDel(&repr,nrep); -#endif - } - return repr; -} - -SWIGRUNTIME int -SwigPyObject_print(SwigPyObject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) -{ - char *str; -#ifdef METH_NOARGS - PyObject *repr = SwigPyObject_repr(v); -#else - PyObject *repr = SwigPyObject_repr(v, NULL); -#endif - if (repr) { - str = SWIG_Python_str_AsChar(repr); - fputs(str, fp); - SWIG_Python_str_DelForPy3(str); - Py_DECREF(repr); - return 0; - } else { - return 1; - } -} - -SWIGRUNTIME PyObject * -SwigPyObject_str(SwigPyObject *v) -{ - char result[SWIG_BUFFER_SIZE]; - return SWIG_PackVoidPtr(result, v->ptr, v->ty->name, sizeof(result)) ? - SWIG_Python_str_FromChar(result) : 0; -} - -SWIGRUNTIME int -SwigPyObject_compare(SwigPyObject *v, SwigPyObject *w) -{ - void *i = v->ptr; - void *j = w->ptr; - return (i < j) ? -1 : ((i > j) ? 1 : 0); -} - -/* Added for Python 3.x, would it also be useful for Python 2.x? */ -SWIGRUNTIME PyObject* -SwigPyObject_richcompare(SwigPyObject *v, SwigPyObject *w, int op) -{ - PyObject* res; - if( op != Py_EQ && op != Py_NE ) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - if( (SwigPyObject_compare(v, w)==0) == (op == Py_EQ) ) - res = Py_True; - else - res = Py_False; - Py_INCREF(res); - return res; -} - - -SWIGRUNTIME PyTypeObject* _PySwigObject_type(void); - -SWIGRUNTIME PyTypeObject* -SwigPyObject_type(void) { - static PyTypeObject *SWIG_STATIC_POINTER(type) = _PySwigObject_type(); - return type; -} - -SWIGRUNTIMEINLINE int -SwigPyObject_Check(PyObject *op) { - return (Py_TYPE(op) == SwigPyObject_type()) - || (strcmp(Py_TYPE(op)->tp_name,"SwigPyObject") == 0); -} - -SWIGRUNTIME PyObject * -SwigPyObject_New(void *ptr, swig_type_info *ty, int own); - -SWIGRUNTIME void -SwigPyObject_dealloc(PyObject *v) -{ - SwigPyObject *sobj = (SwigPyObject *) v; - PyObject *next = sobj->next; - if (sobj->own == SWIG_POINTER_OWN) { - swig_type_info *ty = sobj->ty; - SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; - PyObject *destroy = data ? data->destroy : 0; - if (destroy) { - /* destroy is always a VARARGS method */ - PyObject *res; - if (data->delargs) { - /* we need to create a temporary object to carry the destroy operation */ - PyObject *tmp = SwigPyObject_New(sobj->ptr, ty, 0); - res = SWIG_Python_CallFunctor(destroy, tmp); - Py_DECREF(tmp); - } else { - PyCFunction meth = PyCFunction_GET_FUNCTION(destroy); - PyObject *mself = PyCFunction_GET_SELF(destroy); - res = ((*meth)(mself, v)); - } - Py_XDECREF(res); - } -#if !defined(SWIG_PYTHON_SILENT_MEMLEAK) - else { - const char *name = SWIG_TypePrettyName(ty); - printf("swig/python detected a memory leak of type '%s', no destructor found.\n", (name ? name : "unknown")); - } -#endif - } - Py_XDECREF(next); - PyObject_DEL(v); -} - -SWIGRUNTIME PyObject* -SwigPyObject_append(PyObject* v, PyObject* next) -{ - SwigPyObject *sobj = (SwigPyObject *) v; -#ifndef METH_O - PyObject *tmp = 0; - if (!PyArg_ParseTuple(next,(char *)"O:append", &tmp)) return NULL; - next = tmp; -#endif - if (!SwigPyObject_Check(next)) { - return NULL; - } - sobj->next = next; - Py_INCREF(next); - return SWIG_Py_Void(); -} - -SWIGRUNTIME PyObject* -#ifdef METH_NOARGS -SwigPyObject_next(PyObject* v) -#else -SwigPyObject_next(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *) v; - if (sobj->next) { - Py_INCREF(sobj->next); - return sobj->next; - } else { - return SWIG_Py_Void(); - } -} - -SWIGINTERN PyObject* -#ifdef METH_NOARGS -SwigPyObject_disown(PyObject *v) -#else -SwigPyObject_disown(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *)v; - sobj->own = 0; - return SWIG_Py_Void(); -} - -SWIGINTERN PyObject* -#ifdef METH_NOARGS -SwigPyObject_acquire(PyObject *v) -#else -SwigPyObject_acquire(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *)v; - sobj->own = SWIG_POINTER_OWN; - return SWIG_Py_Void(); -} - -SWIGINTERN PyObject* -SwigPyObject_own(PyObject *v, PyObject *args) -{ - PyObject *val = 0; -#if (PY_VERSION_HEX < 0x02020000) - if (!PyArg_ParseTuple(args,(char *)"|O:own",&val)) -#else - if (!PyArg_UnpackTuple(args, (char *)"own", 0, 1, &val)) -#endif - { - return NULL; - } - else - { - SwigPyObject *sobj = (SwigPyObject *)v; - PyObject *obj = PyBool_FromLong(sobj->own); - if (val) { -#ifdef METH_NOARGS - if (PyObject_IsTrue(val)) { - SwigPyObject_acquire(v); - } else { - SwigPyObject_disown(v); - } -#else - if (PyObject_IsTrue(val)) { - SwigPyObject_acquire(v,args); - } else { - SwigPyObject_disown(v,args); - } -#endif - } - return obj; - } -} - -#ifdef METH_O -static PyMethodDef -swigobject_methods[] = { - {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_NOARGS, (char *)"releases ownership of the pointer"}, - {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_NOARGS, (char *)"aquires ownership of the pointer"}, - {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, - {(char *)"append", (PyCFunction)SwigPyObject_append, METH_O, (char *)"appends another 'this' object"}, - {(char *)"next", (PyCFunction)SwigPyObject_next, METH_NOARGS, (char *)"returns the next 'this' object"}, - {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_NOARGS, (char *)"returns object representation"}, - {0, 0, 0, 0} -}; -#else -static PyMethodDef -swigobject_methods[] = { - {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_VARARGS, (char *)"releases ownership of the pointer"}, - {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_VARARGS, (char *)"aquires ownership of the pointer"}, - {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, - {(char *)"append", (PyCFunction)SwigPyObject_append, METH_VARARGS, (char *)"appends another 'this' object"}, - {(char *)"next", (PyCFunction)SwigPyObject_next, METH_VARARGS, (char *)"returns the next 'this' object"}, - {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_VARARGS, (char *)"returns object representation"}, - {0, 0, 0, 0} -}; -#endif - -#if PY_VERSION_HEX < 0x02020000 -SWIGINTERN PyObject * -SwigPyObject_getattr(SwigPyObject *sobj,char *name) -{ - return Py_FindMethod(swigobject_methods, (PyObject *)sobj, name); -} -#endif - -SWIGRUNTIME PyTypeObject* -_PySwigObject_type(void) { - static char swigobject_doc[] = "Swig object carries a C/C++ instance pointer"; - - static PyNumberMethods SwigPyObject_as_number = { - (binaryfunc)0, /*nb_add*/ - (binaryfunc)0, /*nb_subtract*/ - (binaryfunc)0, /*nb_multiply*/ - /* nb_divide removed in Python 3 */ -#if PY_VERSION_HEX < 0x03000000 - (binaryfunc)0, /*nb_divide*/ -#endif - (binaryfunc)0, /*nb_remainder*/ - (binaryfunc)0, /*nb_divmod*/ - (ternaryfunc)0,/*nb_power*/ - (unaryfunc)0, /*nb_negative*/ - (unaryfunc)0, /*nb_positive*/ - (unaryfunc)0, /*nb_absolute*/ - (inquiry)0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ -#if PY_VERSION_HEX < 0x03000000 - 0, /*nb_coerce*/ -#endif - (unaryfunc)SwigPyObject_long, /*nb_int*/ -#if PY_VERSION_HEX < 0x03000000 - (unaryfunc)SwigPyObject_long, /*nb_long*/ -#else - 0, /*nb_reserved*/ -#endif - (unaryfunc)0, /*nb_float*/ -#if PY_VERSION_HEX < 0x03000000 - (unaryfunc)SwigPyObject_oct, /*nb_oct*/ - (unaryfunc)SwigPyObject_hex, /*nb_hex*/ -#endif -#if PY_VERSION_HEX >= 0x03000000 /* 3.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index, nb_inplace_divide removed */ -#elif PY_VERSION_HEX >= 0x02050000 /* 2.5.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index */ -#elif PY_VERSION_HEX >= 0x02020000 /* 2.2.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_true_divide */ -#elif PY_VERSION_HEX >= 0x02000000 /* 2.0.0 */ - 0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_or */ -#endif - }; - - static PyTypeObject swigpyobject_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - (char *)"SwigPyObject", /* tp_name */ - sizeof(SwigPyObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)SwigPyObject_dealloc, /* tp_dealloc */ - (printfunc)SwigPyObject_print, /* tp_print */ -#if PY_VERSION_HEX < 0x02020000 - (getattrfunc)SwigPyObject_getattr, /* tp_getattr */ -#else - (getattrfunc)0, /* tp_getattr */ -#endif - (setattrfunc)0, /* tp_setattr */ -#if PY_VERSION_HEX >= 0x03000000 - 0, /* tp_reserved in 3.0.1, tp_compare in 3.0.0 but not used */ -#else - (cmpfunc)SwigPyObject_compare, /* tp_compare */ -#endif - (reprfunc)SwigPyObject_repr, /* tp_repr */ - &SwigPyObject_as_number, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)SwigPyObject_str, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - swigobject_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)SwigPyObject_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0, /* tp_iter */ - 0, /* tp_iternext */ - swigobject_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - swigpyobject_type = tmp; - /* for Python 3 we already assigned ob_type in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - swigpyobject_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &swigpyobject_type; -} - -SWIGRUNTIME PyObject * -SwigPyObject_New(void *ptr, swig_type_info *ty, int own) -{ - SwigPyObject *sobj = PyObject_NEW(SwigPyObject, SwigPyObject_type()); - if (sobj) { - sobj->ptr = ptr; - sobj->ty = ty; - sobj->own = own; - sobj->next = 0; - } - return (PyObject *)sobj; -} - -/* ----------------------------------------------------------------------------- - * Implements a simple Swig Packed type, and use it instead of string - * ----------------------------------------------------------------------------- */ - -typedef struct { - PyObject_HEAD - void *pack; - swig_type_info *ty; - size_t size; -} SwigPyPacked; - -SWIGRUNTIME int -SwigPyPacked_print(SwigPyPacked *v, FILE *fp, int SWIGUNUSEDPARM(flags)) -{ - char result[SWIG_BUFFER_SIZE]; - fputs("pack, v->size, 0, sizeof(result))) { - fputs("at ", fp); - fputs(result, fp); - } - fputs(v->ty->name,fp); - fputs(">", fp); - return 0; -} - -SWIGRUNTIME PyObject * -SwigPyPacked_repr(SwigPyPacked *v) -{ - char result[SWIG_BUFFER_SIZE]; - if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))) { - return SWIG_Python_str_FromFormat("", result, v->ty->name); - } else { - return SWIG_Python_str_FromFormat("", v->ty->name); - } -} - -SWIGRUNTIME PyObject * -SwigPyPacked_str(SwigPyPacked *v) -{ - char result[SWIG_BUFFER_SIZE]; - if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))){ - return SWIG_Python_str_FromFormat("%s%s", result, v->ty->name); - } else { - return SWIG_Python_str_FromChar(v->ty->name); - } -} - -SWIGRUNTIME int -SwigPyPacked_compare(SwigPyPacked *v, SwigPyPacked *w) -{ - size_t i = v->size; - size_t j = w->size; - int s = (i < j) ? -1 : ((i > j) ? 1 : 0); - return s ? s : strncmp((char *)v->pack, (char *)w->pack, 2*v->size); -} - -SWIGRUNTIME PyTypeObject* _PySwigPacked_type(void); - -SWIGRUNTIME PyTypeObject* -SwigPyPacked_type(void) { - static PyTypeObject *SWIG_STATIC_POINTER(type) = _PySwigPacked_type(); - return type; -} - -SWIGRUNTIMEINLINE int -SwigPyPacked_Check(PyObject *op) { - return ((op)->ob_type == _PySwigPacked_type()) - || (strcmp((op)->ob_type->tp_name,"SwigPyPacked") == 0); -} - -SWIGRUNTIME void -SwigPyPacked_dealloc(PyObject *v) -{ - if (SwigPyPacked_Check(v)) { - SwigPyPacked *sobj = (SwigPyPacked *) v; - free(sobj->pack); - } - PyObject_DEL(v); -} - -SWIGRUNTIME PyTypeObject* -_PySwigPacked_type(void) { - static char swigpacked_doc[] = "Swig object carries a C/C++ instance pointer"; - static PyTypeObject swigpypacked_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX>=0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - (char *)"SwigPyPacked", /* tp_name */ - sizeof(SwigPyPacked), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)SwigPyPacked_dealloc, /* tp_dealloc */ - (printfunc)SwigPyPacked_print, /* tp_print */ - (getattrfunc)0, /* tp_getattr */ - (setattrfunc)0, /* tp_setattr */ -#if PY_VERSION_HEX>=0x03000000 - 0, /* tp_reserved in 3.0.1 */ -#else - (cmpfunc)SwigPyPacked_compare, /* tp_compare */ -#endif - (reprfunc)SwigPyPacked_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)SwigPyPacked_str, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - swigpacked_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - swigpypacked_type = tmp; - /* for Python 3 the ob_type already assigned in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - swigpypacked_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &swigpypacked_type; -} - -SWIGRUNTIME PyObject * -SwigPyPacked_New(void *ptr, size_t size, swig_type_info *ty) -{ - SwigPyPacked *sobj = PyObject_NEW(SwigPyPacked, SwigPyPacked_type()); - if (sobj) { - void *pack = malloc(size); - if (pack) { - memcpy(pack, ptr, size); - sobj->pack = pack; - sobj->ty = ty; - sobj->size = size; - } else { - PyObject_DEL((PyObject *) sobj); - sobj = 0; - } - } - return (PyObject *) sobj; -} - -SWIGRUNTIME swig_type_info * -SwigPyPacked_UnpackData(PyObject *obj, void *ptr, size_t size) -{ - if (SwigPyPacked_Check(obj)) { - SwigPyPacked *sobj = (SwigPyPacked *)obj; - if (sobj->size != size) return 0; - memcpy(ptr, sobj->pack, size); - return sobj->ty; - } else { - return 0; - } -} - -/* ----------------------------------------------------------------------------- - * pointers/data manipulation - * ----------------------------------------------------------------------------- */ - -SWIGRUNTIMEINLINE PyObject * -_SWIG_This(void) -{ - return SWIG_Python_str_FromChar("this"); -} - -static PyObject *swig_this = NULL; - -SWIGRUNTIME PyObject * -SWIG_This(void) -{ - if (swig_this == NULL) - swig_this = _SWIG_This(); - return swig_this; -} - -/* #define SWIG_PYTHON_SLOW_GETSET_THIS */ - -/* TODO: I don't know how to implement the fast getset in Python 3 right now */ -#if PY_VERSION_HEX>=0x03000000 -#define SWIG_PYTHON_SLOW_GETSET_THIS -#endif - -SWIGRUNTIME SwigPyObject * -SWIG_Python_GetSwigThis(PyObject *pyobj) -{ - if (SwigPyObject_Check(pyobj)) { - return (SwigPyObject *) pyobj; - } else { - PyObject *obj = 0; -#if (!defined(SWIG_PYTHON_SLOW_GETSET_THIS) && (PY_VERSION_HEX >= 0x02030000)) - if (PyInstance_Check(pyobj)) { - obj = _PyInstance_Lookup(pyobj, SWIG_This()); - } else { - PyObject **dictptr = _PyObject_GetDictPtr(pyobj); - if (dictptr != NULL) { - PyObject *dict = *dictptr; - obj = dict ? PyDict_GetItem(dict, SWIG_This()) : 0; - } else { -#ifdef PyWeakref_CheckProxy - if (PyWeakref_CheckProxy(pyobj)) { - PyObject *wobj = PyWeakref_GET_OBJECT(pyobj); - return wobj ? SWIG_Python_GetSwigThis(wobj) : 0; - } -#endif - obj = PyObject_GetAttr(pyobj,SWIG_This()); - if (obj) { - Py_DECREF(obj); - } else { - if (PyErr_Occurred()) PyErr_Clear(); - return 0; - } - } - } -#else - obj = PyObject_GetAttr(pyobj,SWIG_This()); - if (obj) { - Py_DECREF(obj); - } else { - if (PyErr_Occurred()) PyErr_Clear(); - return 0; - } -#endif - if (obj && !SwigPyObject_Check(obj)) { - /* a PyObject is called 'this', try to get the 'real this' - SwigPyObject from it */ - return SWIG_Python_GetSwigThis(obj); - } - return (SwigPyObject *)obj; - } -} - -/* Acquire a pointer value */ - -SWIGRUNTIME int -SWIG_Python_AcquirePtr(PyObject *obj, int own) { - if (own == SWIG_POINTER_OWN) { - SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); - if (sobj) { - int oldown = sobj->own; - sobj->own = own; - return oldown; - } - } - return 0; -} - -/* Convert a pointer value */ - -SWIGRUNTIME int -SWIG_Python_ConvertPtrAndOwn(PyObject *obj, void **ptr, swig_type_info *ty, int flags, int *own) { - if (!obj) return SWIG_ERROR; - if (obj == Py_None) { - if (ptr) *ptr = 0; - return SWIG_OK; - } else { - SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); - if (own) - *own = 0; - while (sobj) { - void *vptr = sobj->ptr; - if (ty) { - swig_type_info *to = sobj->ty; - if (to == ty) { - /* no type cast needed */ - if (ptr) *ptr = vptr; - break; - } else { - swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); - if (!tc) { - sobj = (SwigPyObject *)sobj->next; - } else { - if (ptr) { - int newmemory = 0; - *ptr = SWIG_TypeCast(tc,vptr,&newmemory); - if (newmemory == SWIG_CAST_NEW_MEMORY) { - assert(own); /* badly formed typemap which will lead to a memory leak - it must set and use own to delete *ptr */ - if (own) - *own = *own | SWIG_CAST_NEW_MEMORY; - } - } - break; - } - } - } else { - if (ptr) *ptr = vptr; - break; - } - } - if (sobj) { - if (own) - *own = *own | sobj->own; - if (flags & SWIG_POINTER_DISOWN) { - sobj->own = 0; - } - return SWIG_OK; - } else { - int res = SWIG_ERROR; - if (flags & SWIG_POINTER_IMPLICIT_CONV) { - SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; - if (data && !data->implicitconv) { - PyObject *klass = data->klass; - if (klass) { - PyObject *impconv; - data->implicitconv = 1; /* avoid recursion and call 'explicit' constructors*/ - impconv = SWIG_Python_CallFunctor(klass, obj); - data->implicitconv = 0; - if (PyErr_Occurred()) { - PyErr_Clear(); - impconv = 0; - } - if (impconv) { - SwigPyObject *iobj = SWIG_Python_GetSwigThis(impconv); - if (iobj) { - void *vptr; - res = SWIG_Python_ConvertPtrAndOwn((PyObject*)iobj, &vptr, ty, 0, 0); - if (SWIG_IsOK(res)) { - if (ptr) { - *ptr = vptr; - /* transfer the ownership to 'ptr' */ - iobj->own = 0; - res = SWIG_AddCast(res); - res = SWIG_AddNewMask(res); - } else { - res = SWIG_AddCast(res); - } - } - } - Py_DECREF(impconv); - } - } - } - } - return res; - } - } -} - -/* Convert a function ptr value */ - -SWIGRUNTIME int -SWIG_Python_ConvertFunctionPtr(PyObject *obj, void **ptr, swig_type_info *ty) { - if (!PyCFunction_Check(obj)) { - return SWIG_ConvertPtr(obj, ptr, ty, 0); - } else { - void *vptr = 0; - - /* here we get the method pointer for callbacks */ - const char *doc = (((PyCFunctionObject *)obj) -> m_ml -> ml_doc); - const char *desc = doc ? strstr(doc, "swig_ptr: ") : 0; - if (desc) - desc = ty ? SWIG_UnpackVoidPtr(desc + 10, &vptr, ty->name) : 0; - if (!desc) - return SWIG_ERROR; - if (ty) { - swig_cast_info *tc = SWIG_TypeCheck(desc,ty); - if (tc) { - int newmemory = 0; - *ptr = SWIG_TypeCast(tc,vptr,&newmemory); - assert(!newmemory); /* newmemory handling not yet implemented */ - } else { - return SWIG_ERROR; - } - } else { - *ptr = vptr; - } - return SWIG_OK; - } -} - -/* Convert a packed value value */ - -SWIGRUNTIME int -SWIG_Python_ConvertPacked(PyObject *obj, void *ptr, size_t sz, swig_type_info *ty) { - swig_type_info *to = SwigPyPacked_UnpackData(obj, ptr, sz); - if (!to) return SWIG_ERROR; - if (ty) { - if (to != ty) { - /* check type cast? */ - swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); - if (!tc) return SWIG_ERROR; - } - } - return SWIG_OK; -} - -/* ----------------------------------------------------------------------------- - * Create a new pointer object - * ----------------------------------------------------------------------------- */ - -/* - Create a new instance object, without calling __init__, and set the - 'this' attribute. -*/ - -SWIGRUNTIME PyObject* -SWIG_Python_NewShadowInstance(SwigPyClientData *data, PyObject *swig_this) -{ -#if (PY_VERSION_HEX >= 0x02020000) - PyObject *inst = 0; - PyObject *newraw = data->newraw; - if (newraw) { - inst = PyObject_Call(newraw, data->newargs, NULL); - if (inst) { -#if !defined(SWIG_PYTHON_SLOW_GETSET_THIS) - PyObject **dictptr = _PyObject_GetDictPtr(inst); - if (dictptr != NULL) { - PyObject *dict = *dictptr; - if (dict == NULL) { - dict = PyDict_New(); - *dictptr = dict; - PyDict_SetItem(dict, SWIG_This(), swig_this); - } - } -#else - PyObject *key = SWIG_This(); - PyObject_SetAttr(inst, key, swig_this); -#endif - } - } else { -#if PY_VERSION_HEX >= 0x03000000 - inst = PyBaseObject_Type.tp_new((PyTypeObject*) data->newargs, Py_None, Py_None); - PyObject_SetAttr(inst, SWIG_This(), swig_this); - Py_TYPE(inst)->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG; -#else - PyObject *dict = PyDict_New(); - PyDict_SetItem(dict, SWIG_This(), swig_this); - inst = PyInstance_NewRaw(data->newargs, dict); - Py_DECREF(dict); -#endif - } - return inst; -#else -#if (PY_VERSION_HEX >= 0x02010000) - PyObject *inst; - PyObject *dict = PyDict_New(); - PyDict_SetItem(dict, SWIG_This(), swig_this); - inst = PyInstance_NewRaw(data->newargs, dict); - Py_DECREF(dict); - return (PyObject *) inst; -#else - PyInstanceObject *inst = PyObject_NEW(PyInstanceObject, &PyInstance_Type); - if (inst == NULL) { - return NULL; - } - inst->in_class = (PyClassObject *)data->newargs; - Py_INCREF(inst->in_class); - inst->in_dict = PyDict_New(); - if (inst->in_dict == NULL) { - Py_DECREF(inst); - return NULL; - } -#ifdef Py_TPFLAGS_HAVE_WEAKREFS - inst->in_weakreflist = NULL; -#endif -#ifdef Py_TPFLAGS_GC - PyObject_GC_Init(inst); -#endif - PyDict_SetItem(inst->in_dict, SWIG_This(), swig_this); - return (PyObject *) inst; -#endif -#endif -} - -SWIGRUNTIME void -SWIG_Python_SetSwigThis(PyObject *inst, PyObject *swig_this) -{ - PyObject *dict; -#if (PY_VERSION_HEX >= 0x02020000) && !defined(SWIG_PYTHON_SLOW_GETSET_THIS) - PyObject **dictptr = _PyObject_GetDictPtr(inst); - if (dictptr != NULL) { - dict = *dictptr; - if (dict == NULL) { - dict = PyDict_New(); - *dictptr = dict; - } - PyDict_SetItem(dict, SWIG_This(), swig_this); - return; - } -#endif - dict = PyObject_GetAttrString(inst, (char*)"__dict__"); - PyDict_SetItem(dict, SWIG_This(), swig_this); - Py_DECREF(dict); -} - - -SWIGINTERN PyObject * -SWIG_Python_InitShadowInstance(PyObject *args) { - PyObject *obj[2]; - if (!SWIG_Python_UnpackTuple(args,(char*)"swiginit", 2, 2, obj)) { - return NULL; - } else { - SwigPyObject *sthis = SWIG_Python_GetSwigThis(obj[0]); - if (sthis) { - SwigPyObject_append((PyObject*) sthis, obj[1]); - } else { - SWIG_Python_SetSwigThis(obj[0], obj[1]); - } - return SWIG_Py_Void(); - } -} - -/* Create a new pointer object */ - -SWIGRUNTIME PyObject * -SWIG_Python_NewPointerObj(void *ptr, swig_type_info *type, int flags) { - if (!ptr) { - return SWIG_Py_Void(); - } else { - int own = (flags & SWIG_POINTER_OWN) ? SWIG_POINTER_OWN : 0; - PyObject *robj = SwigPyObject_New(ptr, type, own); - SwigPyClientData *clientdata = type ? (SwigPyClientData *)(type->clientdata) : 0; - if (clientdata && !(flags & SWIG_POINTER_NOSHADOW)) { - PyObject *inst = SWIG_Python_NewShadowInstance(clientdata, robj); - if (inst) { - Py_DECREF(robj); - robj = inst; - } - } - return robj; - } -} - -/* Create a new packed object */ - -SWIGRUNTIMEINLINE PyObject * -SWIG_Python_NewPackedObj(void *ptr, size_t sz, swig_type_info *type) { - return ptr ? SwigPyPacked_New((void *) ptr, sz, type) : SWIG_Py_Void(); -} - -/* -----------------------------------------------------------------------------* - * Get type list - * -----------------------------------------------------------------------------*/ - -#ifdef SWIG_LINK_RUNTIME -void *SWIG_ReturnGlobalTypeList(void *); -#endif - -SWIGRUNTIME swig_module_info * -SWIG_Python_GetModule(void) { - static void *type_pointer = (void *)0; - /* first check if module already created */ - if (!type_pointer) { -#ifdef SWIG_LINK_RUNTIME - type_pointer = SWIG_ReturnGlobalTypeList((void *)0); -#else -#ifdef USE_CAPSULES - type_pointer = PyCapsule_Import(TYPE_POINTER_NAME, 0); -#else - type_pointer = PyCObject_Import((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, - (char*)"type_pointer" SWIG_TYPE_TABLE_NAME); -#endif - if (PyErr_Occurred()) { - PyErr_Clear(); - type_pointer = (void *)0; - } -#endif - } - return (swig_module_info *) type_pointer; -} - -#if PY_MAJOR_VERSION < 2 -/* PyModule_AddObject function was introduced in Python 2.0. The following function - is copied out of Python/modsupport.c in python version 2.3.4 */ -SWIGINTERN int -PyModule_AddObject(PyObject *m, char *name, PyObject *o) -{ - PyObject *dict; - if (!PyModule_Check(m)) { - PyErr_SetString(PyExc_TypeError, - "PyModule_AddObject() needs module as first arg"); - return SWIG_ERROR; - } - if (!o) { - PyErr_SetString(PyExc_TypeError, - "PyModule_AddObject() needs non-NULL value"); - return SWIG_ERROR; - } - - dict = PyModule_GetDict(m); - if (dict == NULL) { - /* Internal error -- modules must have a dict! */ - PyErr_Format(PyExc_SystemError, "module '%s' has no __dict__", - PyModule_GetName(m)); - return SWIG_ERROR; - } - if (PyDict_SetItemString(dict, name, o)) - return SWIG_ERROR; - Py_DECREF(o); - return SWIG_OK; -} -#endif - -SWIGRUNTIME void -SWIG_Python_DestroyModule(void *vptr) -{ - size_t i; -#ifdef USE_CAPSULES - swig_module_info *swig_module = - (swig_module_info *) PyCapsule_GetPointer((PyObject *)vptr, TYPE_POINTER_NAME); -#else - swig_module_info *swig_module = (swig_module_info *) vptr; -#endif - swig_type_info **types = swig_module->types; - for (i =0; i < swig_module->size; ++i) { - swig_type_info *ty = types[i]; - if (ty->owndata) { - SwigPyClientData *data = (SwigPyClientData *) ty->clientdata; - if (data) SwigPyClientData_Del(data); - } - } - Py_DECREF(SWIG_This()); - swig_this = NULL; -} - -SWIGRUNTIME void -SWIG_Python_SetModule(swig_module_info *swig_module) { - static PyMethodDef swig_empty_runtime_method_table[] = { {NULL, NULL, 0, NULL} };/* Sentinel */ - -#if PY_VERSION_HEX >= 0x03000000 - /* Add a dummy module object into sys.modules */ - PyObject *module = PyImport_AddModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION); -#else - PyObject *module = Py_InitModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, - swig_empty_runtime_method_table); -#endif -#ifdef USE_CAPSULES - PyObject *pointer = PyCapsule_New((void *)swig_module, TYPE_POINTER_NAME, - (PyCapsule_Destructor)SWIG_Python_DestroyModule); -#else - PyObject *pointer = PyCObject_FromVoidPtr((void *) swig_module, SWIG_Python_DestroyModule); -#endif - if (pointer && module) { -#ifdef USE_CAPSULES - PyModule_AddObject(module, (char*)"type_pointer_capsule" SWIG_TYPE_TABLE_NAME, pointer); -#else - PyModule_AddObject(module, (char*)"type_pointer" SWIG_TYPE_TABLE_NAME, pointer); -#endif - } else { - Py_XDECREF(pointer); - } -} - -/* The python cached type query */ -SWIGRUNTIME PyObject * -SWIG_Python_TypeCache(void) { - static PyObject *SWIG_STATIC_POINTER(cache) = PyDict_New(); - return cache; -} - -SWIGRUNTIME swig_type_info * -SWIG_Python_TypeQuery(const char *type) -{ - PyObject *cache = SWIG_Python_TypeCache(); - PyObject *key = SWIG_Python_str_FromChar(type); - PyObject *obj = PyDict_GetItem(cache, key); - swig_type_info *descriptor; - if (obj) { -#ifdef USE_CAPSULES - descriptor = (swig_type_info *) PyCapsule_GetPointer(obj, type); -#else - descriptor = (swig_type_info *) PyCObject_AsVoidPtr(obj); -#endif - } else { - swig_module_info *swig_module = SWIG_Python_GetModule(); - descriptor = SWIG_TypeQueryModule(swig_module, swig_module, type); - if (descriptor) { -#ifdef USE_CAPSULES - obj = PyCapsule_New(descriptor, type, NULL); -#else - obj = PyCObject_FromVoidPtr(descriptor, NULL); -#endif - PyDict_SetItem(cache, key, obj); - Py_DECREF(obj); - } - } - Py_DECREF(key); - return descriptor; -} - -/* - For backward compatibility only -*/ -#define SWIG_POINTER_EXCEPTION 0 -#define SWIG_arg_fail(arg) SWIG_Python_ArgFail(arg) -#define SWIG_MustGetPtr(p, type, argnum, flags) SWIG_Python_MustGetPtr(p, type, argnum, flags) - -SWIGRUNTIME int -SWIG_Python_AddErrMesg(const char* mesg, int infront) -{ - if (PyErr_Occurred()) { - PyObject *type = 0; - PyObject *value = 0; - PyObject *traceback = 0; - PyErr_Fetch(&type, &value, &traceback); - if (value) { - char *tmp; - PyObject *old_str = PyObject_Str(value); - Py_XINCREF(type); - PyErr_Clear(); - if (infront) { - PyErr_Format(type, "%s %s", mesg, tmp = SWIG_Python_str_AsChar(old_str)); - } else { - PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); - } - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(old_str); - } - return 1; - } else { - return 0; - } -} - -SWIGRUNTIME int -SWIG_Python_ArgFail(int argnum) -{ - if (PyErr_Occurred()) { - /* add information about failing argument */ - char mesg[256]; - PyOS_snprintf(mesg, sizeof(mesg), "argument number %d:", argnum); - return SWIG_Python_AddErrMesg(mesg, 1); - } else { - return 0; - } -} - -SWIGRUNTIMEINLINE const char * -SwigPyObject_GetDesc(PyObject *self) -{ - SwigPyObject *v = (SwigPyObject *)self; - swig_type_info *ty = v ? v->ty : 0; - return ty ? ty->str : (char*)""; -} - -SWIGRUNTIME void -SWIG_Python_TypeError(const char *type, PyObject *obj) -{ - if (type) { -#if defined(SWIG_COBJECT_TYPES) - if (obj && SwigPyObject_Check(obj)) { - const char *otype = (const char *) SwigPyObject_GetDesc(obj); - if (otype) { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, 'SwigPyObject(%s)' is received", - type, otype); - return; - } - } else -#endif - { - const char *otype = (obj ? obj->ob_type->tp_name : 0); - if (otype) { - PyObject *str = PyObject_Str(obj); - const char *cstr = str ? SWIG_Python_str_AsChar(str) : 0; - if (cstr) { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s(%s)' is received", - type, otype, cstr); - SWIG_Python_str_DelForPy3(cstr); - } else { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s' is received", - type, otype); - } - Py_XDECREF(str); - return; - } - } - PyErr_Format(PyExc_TypeError, "a '%s' is expected", type); - } else { - PyErr_Format(PyExc_TypeError, "unexpected type is received"); - } -} - - -/* Convert a pointer value, signal an exception on a type mismatch */ -SWIGRUNTIME void * -SWIG_Python_MustGetPtr(PyObject *obj, swig_type_info *ty, int argnum, int flags) { - void *result; - if (SWIG_Python_ConvertPtr(obj, &result, ty, flags) == -1) { - PyErr_Clear(); -#if SWIG_POINTER_EXCEPTION - if (flags) { - SWIG_Python_TypeError(SWIG_TypePrettyName(ty), obj); - SWIG_Python_ArgFail(argnum); - } -#endif - } - return result; -} - - -#ifdef __cplusplus -#if 0 -{ /* cc-mode */ -#endif -} -#endif - - - -#define SWIG_exception_fail(code, msg) do { SWIG_Error(code, msg); SWIG_fail; } while(0) - -#define SWIG_contract_assert(expr, msg) if (!(expr)) { SWIG_Error(SWIG_RuntimeError, msg); SWIG_fail; } else - - - -/* -------- TYPES TABLE (BEGIN) -------- */ - -#define SWIGTYPE_p_char swig_types[0] -static swig_type_info *swig_types[2]; -static swig_module_info swig_module = {swig_types, 1, 0, 0, 0, 0}; -#define SWIG_TypeQuery(name) SWIG_TypeQueryModule(&swig_module, &swig_module, name) -#define SWIG_MangledTypeQuery(name) SWIG_MangledTypeQueryModule(&swig_module, &swig_module, name) - -/* -------- TYPES TABLE (END) -------- */ - -#if (PY_VERSION_HEX <= 0x02000000) -# if !defined(SWIG_PYTHON_CLASSIC) -# error "This python version requires swig to be run with the '-classic' option" -# endif -#endif - -/*----------------------------------------------- - @(target):= _coo.so - ------------------------------------------------*/ -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_init PyInit__coo - -#else -# define SWIG_init init_coo - -#endif -#define SWIG_name "_coo" - -#define SWIGVERSION 0x020001 -#define SWIG_VERSION SWIGVERSION - - -#define SWIG_as_voidptr(a) const_cast< void * >(static_cast< const void * >(a)) -#define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),reinterpret_cast< void** >(a)) - - -#include - - -namespace swig { - class SwigPtr_PyObject { - protected: - PyObject *_obj; - - public: - SwigPtr_PyObject() :_obj(0) - { - } - - SwigPtr_PyObject(const SwigPtr_PyObject& item) : _obj(item._obj) - { - Py_XINCREF(_obj); - } - - SwigPtr_PyObject(PyObject *obj, bool initial_ref = true) :_obj(obj) - { - if (initial_ref) { - Py_XINCREF(_obj); - } - } - - SwigPtr_PyObject & operator=(const SwigPtr_PyObject& item) - { - Py_XINCREF(item._obj); - Py_XDECREF(_obj); - _obj = item._obj; - return *this; - } - - ~SwigPtr_PyObject() - { - Py_XDECREF(_obj); - } - - operator PyObject *() const - { - return _obj; - } - - PyObject *operator->() const - { - return _obj; - } - }; -} - - -namespace swig { - struct SwigVar_PyObject : SwigPtr_PyObject { - SwigVar_PyObject(PyObject* obj = 0) : SwigPtr_PyObject(obj, false) { } - - SwigVar_PyObject & operator = (PyObject* obj) - { - Py_XDECREF(_obj); - _obj = obj; - return *this; - } - }; -} - - -#include "py3k.h" -#define SWIG_FILE_WITH_INIT -#include "Python.h" -#include "numpy/arrayobject.h" -#include "complex_ops.h" -/*#include "sparsetools.h"*/ - - -#ifndef SWIG_FILE_WITH_INIT -# define NO_IMPORT_ARRAY -#endif -#include "stdio.h" -#include -#include "complex_ops.h" - - -/* The following code originally appeared in - * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was - * translated from C++ to C by John Hunter. Bill Spotz has modified - * it slightly to fix some minor bugs, upgrade to numpy (all - * versions), add some comments and some functionality. - */ - -/* Macros to extract array attributes. - */ -#define is_array(a) ((a) && PyArray_Check((PyArrayObject *)a)) -#define array_type(a) (int)(PyArray_TYPE(a)) -#define array_numdims(a) (((PyArrayObject *)a)->nd) -#define array_dimensions(a) (((PyArrayObject *)a)->dimensions) -#define array_size(a,i) (((PyArrayObject *)a)->dimensions[i]) -#define array_data(a) (((PyArrayObject *)a)->data) -#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS(a)) -#define array_is_native(a) (PyArray_ISNOTSWAPPED(a)) - -/* Support older NumPy data type names -*/ -#if NDARRAY_VERSION < 0x01000000 -#define NPY_BOOL PyArray_BOOL -#define NPY_BYTE PyArray_BYTE -#define NPY_UBYTE PyArray_UBYTE -#define NPY_SHORT PyArray_SHORT -#define NPY_USHORT PyArray_USHORT -#define NPY_INT PyArray_INT -#define NPY_UINT PyArray_UINT -#define NPY_LONG PyArray_LONG -#define NPY_ULONG PyArray_ULONG -#define NPY_LONGLONG PyArray_LONGLONG -#define NPY_ULONGLONG PyArray_ULONGLONG -#define NPY_FLOAT PyArray_FLOAT -#define NPY_DOUBLE PyArray_DOUBLE -#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -#define NPY_CFLOAT PyArray_CFLOAT -#define NPY_CDOUBLE PyArray_CDOUBLE -#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -#define NPY_OBJECT PyArray_OBJECT -#define NPY_STRING PyArray_STRING -#define NPY_UNICODE PyArray_UNICODE -#define NPY_VOID PyArray_VOID -#define NPY_NTYPES PyArray_NTYPES -#define NPY_NOTYPE PyArray_NOTYPE -#define NPY_CHAR PyArray_CHAR -#define NPY_USERDEF PyArray_USERDEF -#define npy_intp intp -#endif - -/* Given a PyObject, return a string describing its type. - */ -const char* pytype_string(PyObject* py_obj) { - if (py_obj == NULL ) return "C NULL value"; - if (py_obj == Py_None ) return "Python None" ; - if (PyCallable_Check(py_obj)) return "callable" ; - if (PyString_Check( py_obj)) return "string" ; - if (PyInt_Check( py_obj)) return "int" ; - if (PyFloat_Check( py_obj)) return "float" ; - if (PyDict_Check( py_obj)) return "dict" ; - if (PyList_Check( py_obj)) return "list" ; - if (PyTuple_Check( py_obj)) return "tuple" ; - if (PyFile_Check( py_obj)) return "file" ; - if (PyModule_Check( py_obj)) return "module" ; - if (PyInstance_Check(py_obj)) return "instance" ; - - return "unkown type"; -} - -/* Given a NumPy typecode, return a string describing the type. - */ -const char* typecode_string(int typecode) { - static const char* type_names[25] = {"bool", "byte", "unsigned byte", - "short", "unsigned short", "int", - "unsigned int", "long", "unsigned long", - "long long", "unsigned long long", - "float", "double", "long double", - "complex float", "complex double", - "complex long double", "object", - "string", "unicode", "void", "ntypes", - "notype", "char", "unknown"}; - return typecode < 24 ? type_names[typecode] : type_names[24]; -} - -/* Make sure input has correct numpy type. Allow character and byte - * to match. Also allow int and long to match. This is deprecated. - * You should use PyArray_EquivTypenums() instead. - */ -int type_match(int actual_type, int desired_type) { - return PyArray_EquivTypenums(actual_type, desired_type); -} - -/* Given a PyObject pointer, cast it to a PyArrayObject pointer if - * legal. If not, set the python error string appropriately and - * return NULL. - */ -PyArrayObject* obj_to_array_no_conversion(PyObject* input, int typecode) { - PyArrayObject* ary = NULL; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input), typecode))) { - ary = (PyArrayObject*) input; - } - else if is_array(input) { - const char* desired_type = typecode_string(typecode); - const char* actual_type = typecode_string(array_type(input)); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", - desired_type, actual_type); - ary = NULL; - } - else { - const char * desired_type = typecode_string(typecode); - const char * actual_type = pytype_string(input); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", - desired_type, actual_type); - ary = NULL; - } - return ary; -} - -/* Convert the given PyObject to a NumPy array with the given - * typecode. On success, return a valid PyArrayObject* with the - * correct type. On failure, the python error string will be set and - * the routine returns NULL. - */ -PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, - int* is_new_object) { - PyArrayObject* ary = NULL; - PyObject* py_obj; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input),typecode))) { - ary = (PyArrayObject*) input; - *is_new_object = 0; - } - else { - py_obj = PyArray_FromObject(input, typecode, 0, 0); - /* If NULL, PyArray_FromObject will have set python error value.*/ - ary = (PyArrayObject*) py_obj; - *is_new_object = 1; - } - return ary; -} - -/* Given a PyArrayObject, check to see if it is contiguous. If so, - * return the input pointer and flag it as not a new object. If it is - * not contiguous, create a new PyArrayObject using the original data, - * flag it as a new object and return the pointer. - */ -PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) { - PyArrayObject* result; - if (array_is_contiguous(ary)) { - result = ary; - *is_new_object = 0; - } - else { - result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), - min_dims, - max_dims); - *is_new_object = 1; - } - return result; -} - -/* Convert a given PyObject to a contiguous PyArrayObject of the - * specified type. If the input object is not a contiguous - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ -PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, &is_new1); - if (ary1) { - ary2 = make_contiguous(ary1, &is_new2, 0, 0); - if ( is_new1 && is_new2) { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; -} - -/* Test whether a python object is contiguous. If array is - * contiguous, return 1. Otherwise, set the python error string and - * return 0. - */ -int require_contiguous(PyArrayObject* ary) { - int contiguous = 1; - if (!array_is_contiguous(ary)) { - PyErr_SetString(PyExc_TypeError, - "Array must be contiguous. A non-contiguous array was given"); - contiguous = 0; - } - return contiguous; -} - -/* Require that a numpy array is not byte-swapped. If the array is - * not byte-swapped, return 1. Otherwise, set the python error string - * and return 0. - */ -int require_native(PyArrayObject* ary) { - int native = 1; - if (!array_is_native(ary)) { - PyErr_SetString(PyExc_TypeError, - "Array must have native byteorder. A byte-swapped array was given"); - native = 0; - } - return native; -} - -/* Require the given PyArrayObject to have a specified number of - * dimensions. If the array has the specified number of dimensions, - * return 1. Otherwise, set the python error string and return 0. - */ -int require_dimensions(PyArrayObject* ary, int exact_dimensions) { - int success = 1; - if (array_numdims(ary) != exact_dimensions) { - PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", - exact_dimensions, array_numdims(ary)); - success = 0; - } - return success; -} - -/* Require the given PyArrayObject to have one of a list of specified - * number of dimensions. If the array has one of the specified number - * of dimensions, return 1. Otherwise, set the python error string - * and return 0. - */ -int require_dimensions_n(PyArrayObject* ary, int* exact_dimensions, int n) { - int success = 0; - int i; - char dims_str[255] = ""; - char s[255]; - for (i = 0; i < n && !success; i++) { - if (array_numdims(ary) == exact_dimensions[i]) { - success = 1; - } - } - if (!success) { - for (i = 0; i < n-1; i++) { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); - } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); - PyErr_Format(PyExc_TypeError, - "Array must be have %s dimensions. Given array has %d dimensions", - dims_str, array_numdims(ary)); - } - return success; -} - -/* Require the given PyArrayObject to have a specified shape. If the - * array has the specified shape, return 1. Otherwise, set the python - * error string and return 0. - */ -int require_size(PyArrayObject* ary, npy_intp* size, int n) { - int i; - int success = 1; - int len; - char desired_dims[255] = "["; - char s[255]; - char actual_dims[255] = "["; - for(i=0; i < n;i++) { - if (size[i] != -1 && size[i] != array_size(ary,i)) { - success = 0; - } - } - if (!success) { - for (i = 0; i < n; i++) { - if (size[i] == -1) { - sprintf(s, "*,"); - } - else - { - sprintf(s,"%" NPY_INTP_FMT ",", size[i]); - } - strcat(desired_dims,s); - } - len = strlen(desired_dims); - desired_dims[len-1] = ']'; - for (i = 0; i < n; i++) { - sprintf(s,"%" NPY_INTP_FMT ",", array_size(ary,i)); - strcat(actual_dims,s); - } - len = strlen(actual_dims); - actual_dims[len-1] = ']'; - PyErr_Format(PyExc_TypeError, - "Array must be have shape of %s. Given array has shape of %s", - desired_dims, actual_dims); - } - return success; -} -/* End John Hunter translation (with modifications by Bill Spotz) */ - - - - - -/*! - Appends @a what to @a where. On input, @a where need not to be a tuple, but on - return it always is. - - @par Revision history: - - 17.02.2005, c -*/ -PyObject *helper_appendToTuple( PyObject *where, PyObject *what ) { - PyObject *o2, *o3; - - if ((!where) || (where == Py_None)) { - where = what; - } else { - if (!PyTuple_Check( where )) { - o2 = where; - where = PyTuple_New( 1 ); - PyTuple_SetItem( where, 0, o2 ); - } - o3 = PyTuple_New( 1 ); - PyTuple_SetItem( o3, 0, what ); - o2 = where; - where = PySequence_Concat( o2, o3 ); - Py_DECREF( o2 ); - Py_DECREF( o3 ); - } - return where; -} - - - - - - -#include "coo.h" - - -#include -#if !defined(SWIG_NO_LLONG_MAX) -# if !defined(LLONG_MAX) && defined(__GNUC__) && defined (__LONG_LONG_MAX__) -# define LLONG_MAX __LONG_LONG_MAX__ -# define LLONG_MIN (-LLONG_MAX - 1LL) -# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) -# endif -#endif - - -SWIGINTERN int -SWIG_AsVal_double (PyObject *obj, double *val) -{ - int res = SWIG_TypeError; - if (PyFloat_Check(obj)) { - if (val) *val = PyFloat_AsDouble(obj); - return SWIG_OK; - } else if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - double v = PyLong_AsDouble(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - double d = PyFloat_AsDouble(obj); - if (!PyErr_Occurred()) { - if (val) *val = d; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_AddCast(SWIG_OK)); - } else { - PyErr_Clear(); - } - } - } -#endif - return res; -} - - -#include - - -#include - - -SWIGINTERNINLINE int -SWIG_CanCastAsInteger(double *d, double min, double max) { - double x = *d; - if ((min <= x && x <= max)) { - double fx = floor(x); - double cx = ceil(x); - double rd = ((x - fx) < 0.5) ? fx : cx; /* simple rint */ - if ((errno == EDOM) || (errno == ERANGE)) { - errno = 0; - } else { - double summ, reps, diff; - if (rd < x) { - diff = x - rd; - } else if (rd > x) { - diff = rd - x; - } else { - return 1; - } - summ = rd + x; - reps = diff/summ; - if (reps < 8*DBL_EPSILON) { - *d = rd; - return 1; - } - } - } - return 0; -} - - -SWIGINTERN int -SWIG_AsVal_long (PyObject *obj, long* val) -{ - if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - long v = PyInt_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal_double (obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { - if (val) *val = (long)(d); - return res; - } - } - } -#endif - return SWIG_TypeError; -} - - -SWIGINTERN int -SWIG_AsVal_int (PyObject * obj, int *val) -{ - long v; - int res = SWIG_AsVal_long (obj, &v); - if (SWIG_IsOK(res)) { - if ((v < INT_MIN || v > INT_MAX)) { - return SWIG_OverflowError; - } else { - if (val) *val = static_cast< int >(v); - } - } - return res; -} - - - #define SWIG_From_long PyInt_FromLong - - -SWIGINTERNINLINE PyObject * -SWIG_From_int (int value) -{ - return SWIG_From_long (value); -} - -#ifdef __cplusplus -extern "C" { -#endif -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - signed char *arg6 ; - int *arg7 ; - int *arg8 ; - signed char *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_BYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (signed char*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_BYTE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (signed char*) array_data(temp9); - } - coo_tocsr< int,signed char >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(signed char const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned char *arg6 ; - int *arg7 ; - int *arg8 ; - unsigned char *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UBYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned char*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_UBYTE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (unsigned char*) array_data(temp9); - } - coo_tocsr< int,unsigned char >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned char const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - short *arg6 ; - int *arg7 ; - int *arg8 ; - short *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_SHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (short*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_SHORT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (short*) array_data(temp9); - } - coo_tocsr< int,short >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(short const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned short *arg6 ; - int *arg7 ; - int *arg8 ; - unsigned short *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_USHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned short*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_USHORT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (unsigned short*) array_data(temp9); - } - coo_tocsr< int,unsigned short >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned short const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - coo_tocsr< int,int >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned int *arg6 ; - int *arg7 ; - int *arg8 ; - unsigned int *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UINT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_UINT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (unsigned int*) array_data(temp9); - } - coo_tocsr< int,unsigned int >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned int const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - long long *arg6 ; - int *arg7 ; - int *arg8 ; - long long *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long long*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_LONGLONG); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (long long*) array_data(temp9); - } - coo_tocsr< int,long long >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(long long const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned long long *arg6 ; - int *arg7 ; - int *arg8 ; - unsigned long long *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_ULONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned long long*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_ULONGLONG); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (unsigned long long*) array_data(temp9); - } - coo_tocsr< int,unsigned long long >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned long long const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - float *arg6 ; - int *arg7 ; - int *arg8 ; - float *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_FLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (float*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_FLOAT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (float*) array_data(temp9); - } - coo_tocsr< int,float >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(float const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - double *arg6 ; - int *arg7 ; - int *arg8 ; - double *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_DOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (double*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_DOUBLE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (double*) array_data(temp9); - } - coo_tocsr< int,double >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(double const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - long double *arg6 ; - int *arg7 ; - int *arg8 ; - long double *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long double*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_LONGDOUBLE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (long double*) array_data(temp9); - } - coo_tocsr< int,long double >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(long double const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_cfloat_wrapper *arg6 ; - int *arg7 ; - int *arg8 ; - npy_cfloat_wrapper *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CFLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cfloat_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_CFLOAT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (npy_cfloat_wrapper*) array_data(temp9); - } - coo_tocsr< int,npy_cfloat_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_cfloat_wrapper const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_cdouble_wrapper *arg6 ; - int *arg7 ; - int *arg8 ; - npy_cdouble_wrapper *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cdouble_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_CDOUBLE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (npy_cdouble_wrapper*) array_data(temp9); - } - coo_tocsr< int,npy_cdouble_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_cdouble_wrapper const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_clongdouble_wrapper *arg6 ; - int *arg7 ; - int *arg8 ; - npy_clongdouble_wrapper *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsr" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CLONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_clongdouble_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_CLONGDOUBLE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (npy_clongdouble_wrapper*) array_data(temp9); - } - coo_tocsr< int,npy_clongdouble_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_clongdouble_wrapper const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsr(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[10]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 9); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsr__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'coo_tocsr'.\n" - " Possible C/C++ prototypes are:\n" - " coo_tocsr< int,signed char >(int const,int const,int const,int const [],int const [],signed char const [],int [],int [],signed char [])\n" - " coo_tocsr< int,unsigned char >(int const,int const,int const,int const [],int const [],unsigned char const [],int [],int [],unsigned char [])\n" - " coo_tocsr< int,short >(int const,int const,int const,int const [],int const [],short const [],int [],int [],short [])\n" - " coo_tocsr< int,unsigned short >(int const,int const,int const,int const [],int const [],unsigned short const [],int [],int [],unsigned short [])\n" - " coo_tocsr< int,int >(int const,int const,int const,int const [],int const [],int const [],int [],int [],int [])\n" - " coo_tocsr< int,unsigned int >(int const,int const,int const,int const [],int const [],unsigned int const [],int [],int [],unsigned int [])\n" - " coo_tocsr< int,long long >(int const,int const,int const,int const [],int const [],long long const [],int [],int [],long long [])\n" - " coo_tocsr< int,unsigned long long >(int const,int const,int const,int const [],int const [],unsigned long long const [],int [],int [],unsigned long long [])\n" - " coo_tocsr< int,float >(int const,int const,int const,int const [],int const [],float const [],int [],int [],float [])\n" - " coo_tocsr< int,double >(int const,int const,int const,int const [],int const [],double const [],int [],int [],double [])\n" - " coo_tocsr< int,long double >(int const,int const,int const,int const [],int const [],long double const [],int [],int [],long double [])\n" - " coo_tocsr< int,npy_cfloat_wrapper >(int const,int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int [],int [],npy_cfloat_wrapper [])\n" - " coo_tocsr< int,npy_cdouble_wrapper >(int const,int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int [],int [],npy_cdouble_wrapper [])\n" - " coo_tocsr< int,npy_clongdouble_wrapper >(int const,int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],int [],int [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - signed char *arg6 ; - int *arg7 ; - int *arg8 ; - signed char *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_BYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (signed char*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_BYTE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (signed char*) array_data(temp9); - } - coo_tocsc< int,signed char >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(signed char const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned char *arg6 ; - int *arg7 ; - int *arg8 ; - unsigned char *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UBYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned char*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_UBYTE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (unsigned char*) array_data(temp9); - } - coo_tocsc< int,unsigned char >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned char const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - short *arg6 ; - int *arg7 ; - int *arg8 ; - short *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_SHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (short*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_SHORT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (short*) array_data(temp9); - } - coo_tocsc< int,short >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(short const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned short *arg6 ; - int *arg7 ; - int *arg8 ; - unsigned short *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_USHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned short*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_USHORT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (unsigned short*) array_data(temp9); - } - coo_tocsc< int,unsigned short >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned short const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - coo_tocsc< int,int >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned int *arg6 ; - int *arg7 ; - int *arg8 ; - unsigned int *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UINT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_UINT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (unsigned int*) array_data(temp9); - } - coo_tocsc< int,unsigned int >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned int const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - long long *arg6 ; - int *arg7 ; - int *arg8 ; - long long *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long long*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_LONGLONG); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (long long*) array_data(temp9); - } - coo_tocsc< int,long long >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(long long const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned long long *arg6 ; - int *arg7 ; - int *arg8 ; - unsigned long long *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_ULONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned long long*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_ULONGLONG); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (unsigned long long*) array_data(temp9); - } - coo_tocsc< int,unsigned long long >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned long long const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - float *arg6 ; - int *arg7 ; - int *arg8 ; - float *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_FLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (float*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_FLOAT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (float*) array_data(temp9); - } - coo_tocsc< int,float >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(float const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - double *arg6 ; - int *arg7 ; - int *arg8 ; - double *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_DOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (double*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_DOUBLE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (double*) array_data(temp9); - } - coo_tocsc< int,double >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(double const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - long double *arg6 ; - int *arg7 ; - int *arg8 ; - long double *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long double*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_LONGDOUBLE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (long double*) array_data(temp9); - } - coo_tocsc< int,long double >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(long double const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_cfloat_wrapper *arg6 ; - int *arg7 ; - int *arg8 ; - npy_cfloat_wrapper *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CFLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cfloat_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_CFLOAT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (npy_cfloat_wrapper*) array_data(temp9); - } - coo_tocsc< int,npy_cfloat_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_cfloat_wrapper const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_cdouble_wrapper *arg6 ; - int *arg7 ; - int *arg8 ; - npy_cdouble_wrapper *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cdouble_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_CDOUBLE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (npy_cdouble_wrapper*) array_data(temp9); - } - coo_tocsc< int,npy_cdouble_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_cdouble_wrapper const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_clongdouble_wrapper *arg6 ; - int *arg7 ; - int *arg8 ; - npy_clongdouble_wrapper *arg9 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyArrayObject *temp9 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:coo_tocsc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_tocsc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_tocsc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_tocsc" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CLONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_clongdouble_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_CLONGDOUBLE); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (npy_clongdouble_wrapper*) array_data(temp9); - } - coo_tocsc< int,npy_clongdouble_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_clongdouble_wrapper const (*))arg6,arg7,arg8,arg9); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_tocsc(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[10]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 9); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - } - if (argc == 9) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_tocsc__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'coo_tocsc'.\n" - " Possible C/C++ prototypes are:\n" - " coo_tocsc< int,signed char >(int const,int const,int const,int const [],int const [],signed char const [],int [],int [],signed char [])\n" - " coo_tocsc< int,unsigned char >(int const,int const,int const,int const [],int const [],unsigned char const [],int [],int [],unsigned char [])\n" - " coo_tocsc< int,short >(int const,int const,int const,int const [],int const [],short const [],int [],int [],short [])\n" - " coo_tocsc< int,unsigned short >(int const,int const,int const,int const [],int const [],unsigned short const [],int [],int [],unsigned short [])\n" - " coo_tocsc< int,int >(int const,int const,int const,int const [],int const [],int const [],int [],int [],int [])\n" - " coo_tocsc< int,unsigned int >(int const,int const,int const,int const [],int const [],unsigned int const [],int [],int [],unsigned int [])\n" - " coo_tocsc< int,long long >(int const,int const,int const,int const [],int const [],long long const [],int [],int [],long long [])\n" - " coo_tocsc< int,unsigned long long >(int const,int const,int const,int const [],int const [],unsigned long long const [],int [],int [],unsigned long long [])\n" - " coo_tocsc< int,float >(int const,int const,int const,int const [],int const [],float const [],int [],int [],float [])\n" - " coo_tocsc< int,double >(int const,int const,int const,int const [],int const [],double const [],int [],int [],double [])\n" - " coo_tocsc< int,long double >(int const,int const,int const,int const [],int const [],long double const [],int [],int [],long double [])\n" - " coo_tocsc< int,npy_cfloat_wrapper >(int const,int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int [],int [],npy_cfloat_wrapper [])\n" - " coo_tocsc< int,npy_cdouble_wrapper >(int const,int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int [],int [],npy_cdouble_wrapper [])\n" - " coo_tocsc< int,npy_clongdouble_wrapper >(int const,int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],int [],int [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - signed char *arg6 ; - signed char *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_BYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (signed char*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_BYTE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (signed char*) array_data(temp7); - } - coo_todense< int,signed char >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(signed char const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned char *arg6 ; - unsigned char *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UBYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned char*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_UBYTE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (unsigned char*) array_data(temp7); - } - coo_todense< int,unsigned char >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned char const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - short *arg6 ; - short *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_SHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (short*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_SHORT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (short*) array_data(temp7); - } - coo_todense< int,short >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(short const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned short *arg6 ; - unsigned short *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_USHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned short*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_USHORT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (unsigned short*) array_data(temp7); - } - coo_todense< int,unsigned short >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned short const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - coo_todense< int,int >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned int *arg6 ; - unsigned int *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UINT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_UINT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (unsigned int*) array_data(temp7); - } - coo_todense< int,unsigned int >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned int const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - long long *arg6 ; - long long *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long long*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_LONGLONG); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (long long*) array_data(temp7); - } - coo_todense< int,long long >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(long long const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned long long *arg6 ; - unsigned long long *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_ULONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned long long*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_ULONGLONG); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (unsigned long long*) array_data(temp7); - } - coo_todense< int,unsigned long long >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned long long const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - float *arg6 ; - float *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_FLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (float*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_FLOAT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (float*) array_data(temp7); - } - coo_todense< int,float >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(float const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - double *arg6 ; - double *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_DOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (double*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_DOUBLE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (double*) array_data(temp7); - } - coo_todense< int,double >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(double const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - long double *arg6 ; - long double *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long double*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_LONGDOUBLE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (long double*) array_data(temp7); - } - coo_todense< int,long double >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(long double const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_cfloat_wrapper *arg6 ; - npy_cfloat_wrapper *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CFLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cfloat_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_CFLOAT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (npy_cfloat_wrapper*) array_data(temp7); - } - coo_todense< int,npy_cfloat_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_cfloat_wrapper const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_cdouble_wrapper *arg6 ; - npy_cdouble_wrapper *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cdouble_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_CDOUBLE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (npy_cdouble_wrapper*) array_data(temp7); - } - coo_todense< int,npy_cdouble_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_cdouble_wrapper const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_clongdouble_wrapper *arg6 ; - npy_clongdouble_wrapper *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:coo_todense",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_todense" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "coo_todense" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "coo_todense" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CLONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_clongdouble_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_CLONGDOUBLE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (npy_clongdouble_wrapper*) array_data(temp7); - } - coo_todense< int,npy_clongdouble_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_clongdouble_wrapper const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_todense(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[8]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 7); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_1(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_2(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_3(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_4(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_5(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_6(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_7(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_8(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_9(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_10(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_11(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_12(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_13(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_todense__SWIG_14(self, args); - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'coo_todense'.\n" - " Possible C/C++ prototypes are:\n" - " coo_todense< int,signed char >(int const,int const,int const,int const [],int const [],signed char const [],signed char [])\n" - " coo_todense< int,unsigned char >(int const,int const,int const,int const [],int const [],unsigned char const [],unsigned char [])\n" - " coo_todense< int,short >(int const,int const,int const,int const [],int const [],short const [],short [])\n" - " coo_todense< int,unsigned short >(int const,int const,int const,int const [],int const [],unsigned short const [],unsigned short [])\n" - " coo_todense< int,int >(int const,int const,int const,int const [],int const [],int const [],int [])\n" - " coo_todense< int,unsigned int >(int const,int const,int const,int const [],int const [],unsigned int const [],unsigned int [])\n" - " coo_todense< int,long long >(int const,int const,int const,int const [],int const [],long long const [],long long [])\n" - " coo_todense< int,unsigned long long >(int const,int const,int const,int const [],int const [],unsigned long long const [],unsigned long long [])\n" - " coo_todense< int,float >(int const,int const,int const,int const [],int const [],float const [],float [])\n" - " coo_todense< int,double >(int const,int const,int const,int const [],int const [],double const [],double [])\n" - " coo_todense< int,long double >(int const,int const,int const,int const [],int const [],long double const [],long double [])\n" - " coo_todense< int,npy_cfloat_wrapper >(int const,int const,int const,int const [],int const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper [])\n" - " coo_todense< int,npy_cdouble_wrapper >(int const,int const,int const,int const [],int const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper [])\n" - " coo_todense< int,npy_clongdouble_wrapper >(int const,int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - signed char *arg4 ; - signed char *arg5 ; - signed char *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_BYTE, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (signed char*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_BYTE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (signed char*) array_data(temp6); - } - coo_matvec< int,signed char >(arg1,(int const (*))arg2,(int const (*))arg3,(signed char const (*))arg4,(signed char const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - unsigned char *arg4 ; - unsigned char *arg5 ; - unsigned char *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_UBYTE, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (unsigned char*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_UBYTE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (unsigned char*) array_data(temp6); - } - coo_matvec< int,unsigned char >(arg1,(int const (*))arg2,(int const (*))arg3,(unsigned char const (*))arg4,(unsigned char const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - short *arg4 ; - short *arg5 ; - short *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_SHORT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (short*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_SHORT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (short*) array_data(temp6); - } - coo_matvec< int,short >(arg1,(int const (*))arg2,(int const (*))arg3,(short const (*))arg4,(short const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - unsigned short *arg4 ; - unsigned short *arg5 ; - unsigned short *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_USHORT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (unsigned short*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_USHORT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (unsigned short*) array_data(temp6); - } - coo_matvec< int,unsigned short >(arg1,(int const (*))arg2,(int const (*))arg3,(unsigned short const (*))arg4,(unsigned short const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - coo_matvec< int,int >(arg1,(int const (*))arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - unsigned int *arg4 ; - unsigned int *arg5 ; - unsigned int *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_UINT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (unsigned int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_UINT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (unsigned int*) array_data(temp6); - } - coo_matvec< int,unsigned int >(arg1,(int const (*))arg2,(int const (*))arg3,(unsigned int const (*))arg4,(unsigned int const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - long long *arg4 ; - long long *arg5 ; - long long *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_LONGLONG, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (long long*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_LONGLONG); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (long long*) array_data(temp6); - } - coo_matvec< int,long long >(arg1,(int const (*))arg2,(int const (*))arg3,(long long const (*))arg4,(long long const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - unsigned long long *arg4 ; - unsigned long long *arg5 ; - unsigned long long *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_ULONGLONG, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (unsigned long long*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_ULONGLONG); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (unsigned long long*) array_data(temp6); - } - coo_matvec< int,unsigned long long >(arg1,(int const (*))arg2,(int const (*))arg3,(unsigned long long const (*))arg4,(unsigned long long const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - float *arg4 ; - float *arg5 ; - float *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_FLOAT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (float*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_FLOAT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (float*) array_data(temp6); - } - coo_matvec< int,float >(arg1,(int const (*))arg2,(int const (*))arg3,(float const (*))arg4,(float const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - double *arg4 ; - double *arg5 ; - double *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_DOUBLE, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (double*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_DOUBLE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (double*) array_data(temp6); - } - coo_matvec< int,double >(arg1,(int const (*))arg2,(int const (*))arg3,(double const (*))arg4,(double const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - long double *arg4 ; - long double *arg5 ; - long double *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_LONGDOUBLE, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (long double*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_LONGDOUBLE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (long double*) array_data(temp6); - } - coo_matvec< int,long double >(arg1,(int const (*))arg2,(int const (*))arg3,(long double const (*))arg4,(long double const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - npy_cfloat_wrapper *arg4 ; - npy_cfloat_wrapper *arg5 ; - npy_cfloat_wrapper *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_CFLOAT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (npy_cfloat_wrapper*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_CFLOAT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (npy_cfloat_wrapper*) array_data(temp6); - } - coo_matvec< int,npy_cfloat_wrapper >(arg1,(int const (*))arg2,(int const (*))arg3,(npy_cfloat_wrapper const (*))arg4,(npy_cfloat_wrapper const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - npy_cdouble_wrapper *arg4 ; - npy_cdouble_wrapper *arg5 ; - npy_cdouble_wrapper *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_CDOUBLE, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (npy_cdouble_wrapper*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_CDOUBLE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (npy_cdouble_wrapper*) array_data(temp6); - } - coo_matvec< int,npy_cdouble_wrapper >(arg1,(int const (*))arg2,(int const (*))arg3,(npy_cdouble_wrapper const (*))arg4,(npy_cdouble_wrapper const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - npy_clongdouble_wrapper *arg4 ; - npy_clongdouble_wrapper *arg5 ; - npy_clongdouble_wrapper *arg6 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:coo_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_CLONGDOUBLE, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (npy_clongdouble_wrapper*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_CLONGDOUBLE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (npy_clongdouble_wrapper*) array_data(temp6); - } - coo_matvec< int,npy_clongdouble_wrapper >(arg1,(int const (*))arg2,(int const (*))arg3,(npy_clongdouble_wrapper const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_matvec(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[7]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 6); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_1(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_2(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_3(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_4(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_5(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_6(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_7(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_8(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_9(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_10(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_11(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_12(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_13(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[1]) && PyArray_CanCastSafely(PyArray_TYPE(argv[1]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_coo_matvec__SWIG_14(self, args); - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'coo_matvec'.\n" - " Possible C/C++ prototypes are:\n" - " coo_matvec< int,signed char >(int const,int const [],int const [],signed char const [],signed char const [],signed char [])\n" - " coo_matvec< int,unsigned char >(int const,int const [],int const [],unsigned char const [],unsigned char const [],unsigned char [])\n" - " coo_matvec< int,short >(int const,int const [],int const [],short const [],short const [],short [])\n" - " coo_matvec< int,unsigned short >(int const,int const [],int const [],unsigned short const [],unsigned short const [],unsigned short [])\n" - " coo_matvec< int,int >(int const,int const [],int const [],int const [],int const [],int [])\n" - " coo_matvec< int,unsigned int >(int const,int const [],int const [],unsigned int const [],unsigned int const [],unsigned int [])\n" - " coo_matvec< int,long long >(int const,int const [],int const [],long long const [],long long const [],long long [])\n" - " coo_matvec< int,unsigned long long >(int const,int const [],int const [],unsigned long long const [],unsigned long long const [],unsigned long long [])\n" - " coo_matvec< int,float >(int const,int const [],int const [],float const [],float const [],float [])\n" - " coo_matvec< int,double >(int const,int const [],int const [],double const [],double const [],double [])\n" - " coo_matvec< int,long double >(int const,int const [],int const [],long double const [],long double const [],long double [])\n" - " coo_matvec< int,npy_cfloat_wrapper >(int const,int const [],int const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper [])\n" - " coo_matvec< int,npy_cdouble_wrapper >(int const,int const [],int const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper [])\n" - " coo_matvec< int,npy_clongdouble_wrapper >(int const,int const [],int const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_coo_count_diagonals(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - int result; - - if (!PyArg_ParseTuple(args,(char *)"OOO:coo_count_diagonals",&obj0,&obj1,&obj2)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "coo_count_diagonals" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - result = (int)coo_count_diagonals< int >(arg1,(int const (*))arg2,(int const (*))arg3); - resultobj = SWIG_From_int(static_cast< int >(result)); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - return NULL; -} - - -static PyMethodDef SwigMethods[] = { - { (char *)"SWIG_PyInstanceMethod_New", (PyCFunction)SWIG_PyInstanceMethod_New, METH_O, NULL}, - { (char *)"coo_tocsr", _wrap_coo_tocsr, METH_VARARGS, (char *)"\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, \n" - " int Bp, int Bj, signed char Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, \n" - " int Bp, int Bj, unsigned char Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, \n" - " int Bp, int Bj, short Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, \n" - " int Bp, int Bj, unsigned short Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, \n" - " int Bp, int Bj, int Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, \n" - " int Bp, int Bj, unsigned int Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, \n" - " int Bp, int Bj, long long Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, \n" - " int Bp, int Bj, unsigned long long Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, \n" - " int Bp, int Bj, float Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, \n" - " int Bp, int Bj, double Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, \n" - " int Bp, int Bj, long double Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bj, npy_cfloat_wrapper Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_cdouble_wrapper Bx)\n" - "coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_clongdouble_wrapper Bx)\n" - ""}, - { (char *)"coo_tocsc", _wrap_coo_tocsc, METH_VARARGS, (char *)"\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, \n" - " int Bp, int Bi, signed char Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, \n" - " int Bp, int Bi, unsigned char Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, \n" - " int Bp, int Bi, short Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, \n" - " int Bp, int Bi, unsigned short Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, \n" - " int Bp, int Bi, int Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, \n" - " int Bp, int Bi, unsigned int Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, \n" - " int Bp, int Bi, long long Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, \n" - " int Bp, int Bi, unsigned long long Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, \n" - " int Bp, int Bi, float Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, \n" - " int Bp, int Bi, double Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, \n" - " int Bp, int Bi, long double Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx)\n" - "coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_clongdouble_wrapper Bx)\n" - ""}, - { (char *)"coo_todense", _wrap_coo_todense, METH_VARARGS, (char *)"\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, \n" - " signed char Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, \n" - " unsigned char Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, \n" - " short Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, \n" - " unsigned short Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, \n" - " int Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, \n" - " unsigned int Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, \n" - " long long Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, \n" - " unsigned long long Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, \n" - " float Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, \n" - " double Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, \n" - " long double Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, \n" - " npy_cfloat_wrapper Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, \n" - " npy_cdouble_wrapper Bx)\n" - "coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, \n" - " npy_clongdouble_wrapper Bx)\n" - ""}, - { (char *)"coo_matvec", _wrap_coo_matvec, METH_VARARGS, (char *)"\n" - "coo_matvec(int nnz, int Ai, int Aj, signed char Ax, signed char Xx, \n" - " signed char Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, unsigned char Ax, unsigned char Xx, \n" - " unsigned char Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, short Ax, short Xx, short Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, unsigned short Ax, unsigned short Xx, \n" - " unsigned short Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, int Ax, int Xx, int Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, unsigned int Ax, unsigned int Xx, \n" - " unsigned int Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, long long Ax, long long Xx, \n" - " long long Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, unsigned long long Ax, unsigned long long Xx, \n" - " unsigned long long Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, float Ax, float Xx, float Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, double Ax, double Xx, double Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, long double Ax, long double Xx, \n" - " long double Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx, \n" - " npy_cfloat_wrapper Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx, \n" - " npy_cdouble_wrapper Yx)\n" - "coo_matvec(int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, \n" - " npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)\n" - ""}, - { (char *)"coo_count_diagonals", _wrap_coo_count_diagonals, METH_VARARGS, (char *)"coo_count_diagonals(int nnz, int Ai, int Aj) -> int"}, - { NULL, NULL, 0, NULL } -}; - - -/* -------- TYPE CONVERSION AND EQUIVALENCE RULES (BEGIN) -------- */ - -static swig_type_info _swigt__p_char = {"_p_char", "char *", 0, 0, (void*)0, 0}; - -static swig_type_info *swig_type_initial[] = { - &_swigt__p_char, -}; - -static swig_cast_info _swigc__p_char[] = { {&_swigt__p_char, 0, 0, 0},{0, 0, 0, 0}}; - -static swig_cast_info *swig_cast_initial[] = { - _swigc__p_char, -}; - - -/* -------- TYPE CONVERSION AND EQUIVALENCE RULES (END) -------- */ - -static swig_const_info swig_const_table[] = { -{0, 0, 0, 0.0, 0, 0}}; - -#ifdef __cplusplus -} -#endif -/* ----------------------------------------------------------------------------- - * Type initialization: - * This problem is tough by the requirement that no dynamic - * memory is used. Also, since swig_type_info structures store pointers to - * swig_cast_info structures and swig_cast_info structures store pointers back - * to swig_type_info structures, we need some lookup code at initialization. - * The idea is that swig generates all the structures that are needed. - * The runtime then collects these partially filled structures. - * The SWIG_InitializeModule function takes these initial arrays out of - * swig_module, and does all the lookup, filling in the swig_module.types - * array with the correct data and linking the correct swig_cast_info - * structures together. - * - * The generated swig_type_info structures are assigned staticly to an initial - * array. We just loop through that array, and handle each type individually. - * First we lookup if this type has been already loaded, and if so, use the - * loaded structure instead of the generated one. Then we have to fill in the - * cast linked list. The cast data is initially stored in something like a - * two-dimensional array. Each row corresponds to a type (there are the same - * number of rows as there are in the swig_type_initial array). Each entry in - * a column is one of the swig_cast_info structures for that type. - * The cast_initial array is actually an array of arrays, because each row has - * a variable number of columns. So to actually build the cast linked list, - * we find the array of casts associated with the type, and loop through it - * adding the casts to the list. The one last trick we need to do is making - * sure the type pointer in the swig_cast_info struct is correct. - * - * First off, we lookup the cast->type name to see if it is already loaded. - * There are three cases to handle: - * 1) If the cast->type has already been loaded AND the type we are adding - * casting info to has not been loaded (it is in this module), THEN we - * replace the cast->type pointer with the type pointer that has already - * been loaded. - * 2) If BOTH types (the one we are adding casting info to, and the - * cast->type) are loaded, THEN the cast info has already been loaded by - * the previous module so we just ignore it. - * 3) Finally, if cast->type has not already been loaded, then we add that - * swig_cast_info to the linked list (because the cast->type) pointer will - * be correct. - * ----------------------------------------------------------------------------- */ - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* c-mode */ -#endif -#endif - -#if 0 -#define SWIGRUNTIME_DEBUG -#endif - - -SWIGRUNTIME void -SWIG_InitializeModule(void *clientdata) { - size_t i; - swig_module_info *module_head, *iter; - int found, init; - - clientdata = clientdata; - - /* check to see if the circular list has been setup, if not, set it up */ - if (swig_module.next==0) { - /* Initialize the swig_module */ - swig_module.type_initial = swig_type_initial; - swig_module.cast_initial = swig_cast_initial; - swig_module.next = &swig_module; - init = 1; - } else { - init = 0; - } - - /* Try and load any already created modules */ - module_head = SWIG_GetModule(clientdata); - if (!module_head) { - /* This is the first module loaded for this interpreter */ - /* so set the swig module into the interpreter */ - SWIG_SetModule(clientdata, &swig_module); - module_head = &swig_module; - } else { - /* the interpreter has loaded a SWIG module, but has it loaded this one? */ - found=0; - iter=module_head; - do { - if (iter==&swig_module) { - found=1; - break; - } - iter=iter->next; - } while (iter!= module_head); - - /* if the is found in the list, then all is done and we may leave */ - if (found) return; - /* otherwise we must add out module into the list */ - swig_module.next = module_head->next; - module_head->next = &swig_module; - } - - /* When multiple interpeters are used, a module could have already been initialized in - a different interpreter, but not yet have a pointer in this interpreter. - In this case, we do not want to continue adding types... everything should be - set up already */ - if (init == 0) return; - - /* Now work on filling in swig_module.types */ -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: size %d\n", swig_module.size); -#endif - for (i = 0; i < swig_module.size; ++i) { - swig_type_info *type = 0; - swig_type_info *ret; - swig_cast_info *cast; - -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); -#endif - - /* if there is another module already loaded */ - if (swig_module.next != &swig_module) { - type = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, swig_module.type_initial[i]->name); - } - if (type) { - /* Overwrite clientdata field */ -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: found type %s\n", type->name); -#endif - if (swig_module.type_initial[i]->clientdata) { - type->clientdata = swig_module.type_initial[i]->clientdata; -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: found and overwrite type %s \n", type->name); -#endif - } - } else { - type = swig_module.type_initial[i]; - } - - /* Insert casting types */ - cast = swig_module.cast_initial[i]; - while (cast->type) { - /* Don't need to add information already in the list */ - ret = 0; -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: look cast %s\n", cast->type->name); -#endif - if (swig_module.next != &swig_module) { - ret = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, cast->type->name); -#ifdef SWIGRUNTIME_DEBUG - if (ret) printf("SWIG_InitializeModule: found cast %s\n", ret->name); -#endif - } - if (ret) { - if (type == swig_module.type_initial[i]) { -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: skip old type %s\n", ret->name); -#endif - cast->type = ret; - ret = 0; - } else { - /* Check for casting already in the list */ - swig_cast_info *ocast = SWIG_TypeCheck(ret->name, type); -#ifdef SWIGRUNTIME_DEBUG - if (ocast) printf("SWIG_InitializeModule: skip old cast %s\n", ret->name); -#endif - if (!ocast) ret = 0; - } - } - - if (!ret) { -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: adding cast %s\n", cast->type->name); -#endif - if (type->cast) { - type->cast->prev = cast; - cast->next = type->cast; - } - type->cast = cast; - } - cast++; - } - /* Set entry in modules->types array equal to the type */ - swig_module.types[i] = type; - } - swig_module.types[i] = 0; - -#ifdef SWIGRUNTIME_DEBUG - printf("**** SWIG_InitializeModule: Cast List ******\n"); - for (i = 0; i < swig_module.size; ++i) { - int j = 0; - swig_cast_info *cast = swig_module.cast_initial[i]; - printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); - while (cast->type) { - printf("SWIG_InitializeModule: cast type %s\n", cast->type->name); - cast++; - ++j; - } - printf("---- Total casts: %d\n",j); - } - printf("**** SWIG_InitializeModule: Cast List ******\n"); -#endif -} - -/* This function will propagate the clientdata field of type to -* any new swig_type_info structures that have been added into the list -* of equivalent types. It is like calling -* SWIG_TypeClientData(type, clientdata) a second time. -*/ -SWIGRUNTIME void -SWIG_PropagateClientData(void) { - size_t i; - swig_cast_info *equiv; - static int init_run = 0; - - if (init_run) return; - init_run = 1; - - for (i = 0; i < swig_module.size; i++) { - if (swig_module.types[i]->clientdata) { - equiv = swig_module.types[i]->cast; - while (equiv) { - if (!equiv->converter) { - if (equiv->type && !equiv->type->clientdata) - SWIG_TypeClientData(equiv->type, swig_module.types[i]->clientdata); - } - equiv = equiv->next; - } - } - } -} - -#ifdef __cplusplus -#if 0 -{ - /* c-mode */ -#endif -} -#endif - - - -#ifdef __cplusplus -extern "C" { -#endif - - /* Python-specific SWIG API */ -#define SWIG_newvarlink() SWIG_Python_newvarlink() -#define SWIG_addvarlink(p, name, get_attr, set_attr) SWIG_Python_addvarlink(p, name, get_attr, set_attr) -#define SWIG_InstallConstants(d, constants) SWIG_Python_InstallConstants(d, constants) - - /* ----------------------------------------------------------------------------- - * global variable support code. - * ----------------------------------------------------------------------------- */ - - typedef struct swig_globalvar { - char *name; /* Name of global variable */ - PyObject *(*get_attr)(void); /* Return the current value */ - int (*set_attr)(PyObject *); /* Set the value */ - struct swig_globalvar *next; - } swig_globalvar; - - typedef struct swig_varlinkobject { - PyObject_HEAD - swig_globalvar *vars; - } swig_varlinkobject; - - SWIGINTERN PyObject * - swig_varlink_repr(swig_varlinkobject *SWIGUNUSEDPARM(v)) { -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_InternFromString(""); -#else - return PyString_FromString(""); -#endif - } - - SWIGINTERN PyObject * - swig_varlink_str(swig_varlinkobject *v) { -#if PY_VERSION_HEX >= 0x03000000 - PyObject *str = PyUnicode_InternFromString("("); - PyObject *tail; - PyObject *joined; - swig_globalvar *var; - for (var = v->vars; var; var=var->next) { - tail = PyUnicode_FromString(var->name); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; - if (var->next) { - tail = PyUnicode_InternFromString(", "); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; - } - } - tail = PyUnicode_InternFromString(")"); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; -#else - PyObject *str = PyString_FromString("("); - swig_globalvar *var; - for (var = v->vars; var; var=var->next) { - PyString_ConcatAndDel(&str,PyString_FromString(var->name)); - if (var->next) PyString_ConcatAndDel(&str,PyString_FromString(", ")); - } - PyString_ConcatAndDel(&str,PyString_FromString(")")); -#endif - return str; - } - - SWIGINTERN int - swig_varlink_print(swig_varlinkobject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) { - char *tmp; - PyObject *str = swig_varlink_str(v); - fprintf(fp,"Swig global variables "); - fprintf(fp,"%s\n", tmp = SWIG_Python_str_AsChar(str)); - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(str); - return 0; - } - - SWIGINTERN void - swig_varlink_dealloc(swig_varlinkobject *v) { - swig_globalvar *var = v->vars; - while (var) { - swig_globalvar *n = var->next; - free(var->name); - free(var); - var = n; - } - } - - SWIGINTERN PyObject * - swig_varlink_getattr(swig_varlinkobject *v, char *n) { - PyObject *res = NULL; - swig_globalvar *var = v->vars; - while (var) { - if (strcmp(var->name,n) == 0) { - res = (*var->get_attr)(); - break; - } - var = var->next; - } - if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_NameError,"Unknown C global variable"); - } - return res; - } - - SWIGINTERN int - swig_varlink_setattr(swig_varlinkobject *v, char *n, PyObject *p) { - int res = 1; - swig_globalvar *var = v->vars; - while (var) { - if (strcmp(var->name,n) == 0) { - res = (*var->set_attr)(p); - break; - } - var = var->next; - } - if (res == 1 && !PyErr_Occurred()) { - PyErr_SetString(PyExc_NameError,"Unknown C global variable"); - } - return res; - } - - SWIGINTERN PyTypeObject* - swig_varlink_type(void) { - static char varlink__doc__[] = "Swig var link object"; - static PyTypeObject varlink_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* Number of items in variable part (ob_size) */ -#endif - (char *)"swigvarlink", /* Type name (tp_name) */ - sizeof(swig_varlinkobject), /* Basic size (tp_basicsize) */ - 0, /* Itemsize (tp_itemsize) */ - (destructor) swig_varlink_dealloc, /* Deallocator (tp_dealloc) */ - (printfunc) swig_varlink_print, /* Print (tp_print) */ - (getattrfunc) swig_varlink_getattr, /* get attr (tp_getattr) */ - (setattrfunc) swig_varlink_setattr, /* Set attr (tp_setattr) */ - 0, /* tp_compare */ - (reprfunc) swig_varlink_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc) swig_varlink_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - varlink__doc__, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* tp_iter -> tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - varlink_type = tmp; - /* for Python 3 we already assigned ob_type in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - varlink_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &varlink_type; - } - - /* Create a variable linking object for use later */ - SWIGINTERN PyObject * - SWIG_Python_newvarlink(void) { - swig_varlinkobject *result = PyObject_NEW(swig_varlinkobject, swig_varlink_type()); - if (result) { - result->vars = 0; - } - return ((PyObject*) result); - } - - SWIGINTERN void - SWIG_Python_addvarlink(PyObject *p, char *name, PyObject *(*get_attr)(void), int (*set_attr)(PyObject *p)) { - swig_varlinkobject *v = (swig_varlinkobject *) p; - swig_globalvar *gv = (swig_globalvar *) malloc(sizeof(swig_globalvar)); - if (gv) { - size_t size = strlen(name)+1; - gv->name = (char *)malloc(size); - if (gv->name) { - strncpy(gv->name,name,size); - gv->get_attr = get_attr; - gv->set_attr = set_attr; - gv->next = v->vars; - } - } - v->vars = gv; - } - - SWIGINTERN PyObject * - SWIG_globals(void) { - static PyObject *_SWIG_globals = 0; - if (!_SWIG_globals) _SWIG_globals = SWIG_newvarlink(); - return _SWIG_globals; - } - - /* ----------------------------------------------------------------------------- - * constants/methods manipulation - * ----------------------------------------------------------------------------- */ - - /* Install Constants */ - SWIGINTERN void - SWIG_Python_InstallConstants(PyObject *d, swig_const_info constants[]) { - PyObject *obj = 0; - size_t i; - for (i = 0; constants[i].type; ++i) { - switch(constants[i].type) { - case SWIG_PY_POINTER: - obj = SWIG_NewPointerObj(constants[i].pvalue, *(constants[i]).ptype,0); - break; - case SWIG_PY_BINARY: - obj = SWIG_NewPackedObj(constants[i].pvalue, constants[i].lvalue, *(constants[i].ptype)); - break; - default: - obj = 0; - break; - } - if (obj) { - PyDict_SetItemString(d, constants[i].name, obj); - Py_DECREF(obj); - } - } - } - - /* -----------------------------------------------------------------------------*/ - /* Fix SwigMethods to carry the callback ptrs when needed */ - /* -----------------------------------------------------------------------------*/ - - SWIGINTERN void - SWIG_Python_FixMethods(PyMethodDef *methods, - swig_const_info *const_table, - swig_type_info **types, - swig_type_info **types_initial) { - size_t i; - for (i = 0; methods[i].ml_name; ++i) { - const char *c = methods[i].ml_doc; - if (c && (c = strstr(c, "swig_ptr: "))) { - int j; - swig_const_info *ci = 0; - const char *name = c + 10; - for (j = 0; const_table[j].type; ++j) { - if (strncmp(const_table[j].name, name, - strlen(const_table[j].name)) == 0) { - ci = &(const_table[j]); - break; - } - } - if (ci) { - void *ptr = (ci->type == SWIG_PY_POINTER) ? ci->pvalue : 0; - if (ptr) { - size_t shift = (ci->ptype) - types; - swig_type_info *ty = types_initial[shift]; - size_t ldoc = (c - methods[i].ml_doc); - size_t lptr = strlen(ty->name)+2*sizeof(void*)+2; - char *ndoc = (char*)malloc(ldoc + lptr + 10); - if (ndoc) { - char *buff = ndoc; - strncpy(buff, methods[i].ml_doc, ldoc); - buff += ldoc; - strncpy(buff, "swig_ptr: ", 10); - buff += 10; - SWIG_PackVoidPtr(buff, ptr, ty->name, lptr); - methods[i].ml_doc = ndoc; - } - } - } - } - } - } - -#ifdef __cplusplus -} -#endif - -/* -----------------------------------------------------------------------------* - * Partial Init method - * -----------------------------------------------------------------------------*/ - -#ifdef __cplusplus -extern "C" -#endif - -SWIGEXPORT -#if PY_VERSION_HEX >= 0x03000000 -PyObject* -#else -void -#endif -SWIG_init(void) { - PyObject *m, *d; -#if PY_VERSION_HEX >= 0x03000000 - static struct PyModuleDef SWIG_module = { - PyModuleDef_HEAD_INIT, - (char *) SWIG_name, - NULL, - -1, - SwigMethods, - NULL, - NULL, - NULL, - NULL - }; -#endif - - /* Fix SwigMethods to carry the callback ptrs when needed */ - SWIG_Python_FixMethods(SwigMethods, swig_const_table, swig_types, swig_type_initial); - -#if PY_VERSION_HEX >= 0x03000000 - m = PyModule_Create(&SWIG_module); -#else - m = Py_InitModule((char *) SWIG_name, SwigMethods); -#endif - d = PyModule_GetDict(m); - - SWIG_InitializeModule(0); - SWIG_InstallConstants(d,swig_const_table); - - - - import_array(); - -#if PY_VERSION_HEX >= 0x03000000 - return m; -#else - return; -#endif -} - diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csc.h b/scipy-0.10.1/scipy/sparse/sparsetools/csc.h deleted file mode 100644 index aac9b0ac16..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csc.h +++ /dev/null @@ -1,183 +0,0 @@ -#ifndef __CSC_H__ -#define __CSC_H__ - - -#include "csr.h" - - -/* - * Compute Y += A*X for CSC matrix A and dense vectors X,Y - * - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I Ap[n_row+1] - column pointer - * I Ai[nnz(A)] - row indices - * T Ax[n_col] - nonzeros - * T Xx[n_col] - input vector - * - * Output Arguments: - * T Yx[n_row] - output vector - * - * Note: - * Output array Yx must be preallocated - * - * Complexity: Linear. Specifically O(nnz(A) + n_col) - * - */ -template -void csc_matvec(const I n_row, - const I n_col, - const I Ap[], - const I Ai[], - const T Ax[], - const T Xx[], - T Yx[]) -{ - for(I j = 0; j < n_col; j++){ - I col_start = Ap[j]; - I col_end = Ap[j+1]; - - for(I ii = col_start; ii < col_end; ii++){ - I i = Ai[ii]; - Yx[i] += Ax[ii] * Xx[j]; - } - } -} - - -/* - * Compute Y += A*X for CSC matrix A and dense block vectors X,Y - * - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I n_vecs - number of column vectors in X and Y - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * T Xx[n_col,n_vecs] - input vector - * - * Output Arguments: - * T Yx[n_row,n_vecs] - output vector - * - * Note: - * Output array Yx must be preallocated - * - */ -template -void csc_matvecs(const I n_row, - const I n_col, - const I n_vecs, - const I Ap[], - const I Ai[], - const T Ax[], - const T Xx[], - T Yx[]) -{ - for(I j = 0; j < n_col; j++){ - for(I ii = Ap[j]; ii < Ap[j+1]; ii++){ - const I i = Ai[ii]; - axpy(n_vecs, Ax[ii], Xx + n_vecs * j, Yx + n_vecs * i); - } - } -} - - - - -/* - * Derived methods - */ -template -void csc_diagonal(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - T Yx[]) -{ csr_diagonal(n_col, n_row, Ap, Aj, Ax, Yx); } - - -template -void csc_tocsr(const I n_row, - const I n_col, - const I Ap[], - const I Ai[], - const T Ax[], - I Bp[], - I Bj[], - T Bx[]) -{ csr_tocsc(n_col, n_row, Ap, Ai, Ax, Bp, Bj, Bx); } - - -template -void csc_matmat_pass1(const I n_row, - const I n_col, - const I Ap[], - const I Ai[], - const I Bp[], - const I Bi[], - I Cp[]) -{ csr_matmat_pass1(n_col, n_row, Bp, Bi, Ap, Ai, Cp); } - -template -void csc_matmat_pass2(const I n_row, - const I n_col, - const I Ap[], - const I Ai[], - const T Ax[], - const I Bp[], - const I Bi[], - const T Bx[], - I Cp[], - I Ci[], - T Cx[]) -{ csr_matmat_pass2(n_col, n_row, Bp, Bi, Bx, Ap, Ai, Ax, Cp, Ci, Cx); } - - - - - -template -void csc_elmul_csc(const I n_row, const I n_col, - const I Ap[], const I Ai[], const T Ax[], - const I Bp[], const I Bi[], const T Bx[], - I Cp[], I Ci[], T Cx[]) -{ - csr_elmul_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); -} - -template -void csc_eldiv_csc(const I n_row, const I n_col, - const I Ap[], const I Ai[], const T Ax[], - const I Bp[], const I Bi[], const T Bx[], - I Cp[], I Ci[], T Cx[]) -{ - csr_eldiv_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); -} - - -template -void csc_plus_csc(const I n_row, const I n_col, - const I Ap[], const I Ai[], const T Ax[], - const I Bp[], const I Bi[], const T Bx[], - I Cp[], I Ci[], T Cx[]) -{ - csr_plus_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); -} - -template -void csc_minus_csc(const I n_row, const I n_col, - const I Ap[], const I Ai[], const T Ax[], - const I Bp[], const I Bi[], const T Bx[], - I Cp[], I Ci[], T Cx[]) -{ - csr_minus_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); -} - - - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csc.py b/scipy-0.10.1/scipy/sparse/sparsetools/csc.py deleted file mode 100644 index 9403e95116..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csc.py +++ /dev/null @@ -1,423 +0,0 @@ -# This file was automatically generated by SWIG (http://www.swig.org). -# Version 2.0.1+capsulehack -# -# Do not make changes to this file unless you know what you are doing--modify -# the SWIG interface file instead. -# This file is compatible with both classic and new-style classes. - -from sys import version_info -if version_info >= (2,6,0): - def swig_import_helper(): - from os.path import dirname - import imp - fp = None - try: - fp, pathname, description = imp.find_module('_csc', [dirname(__file__)]) - except ImportError: - import _csc - return _csc - if fp is not None: - try: - _mod = imp.load_module('_csc', fp, pathname, description) - finally: - fp.close() - return _mod - _csc = swig_import_helper() - del swig_import_helper -else: - import _csc -del version_info -try: - _swig_property = property -except NameError: - pass # Python < 2.2 doesn't have 'property'. -def _swig_setattr_nondynamic(self,class_type,name,value,static=1): - if (name == "thisown"): return self.this.own(value) - if (name == "this"): - if type(value).__name__ == 'SwigPyObject': - self.__dict__[name] = value - return - method = class_type.__swig_setmethods__.get(name,None) - if method: return method(self,value) - if (not static) or hasattr(self,name): - self.__dict__[name] = value - else: - raise AttributeError("You cannot add attributes to %s" % self) - -def _swig_setattr(self,class_type,name,value): - return _swig_setattr_nondynamic(self,class_type,name,value,0) - -def _swig_getattr(self,class_type,name): - if (name == "thisown"): return self.this.own() - method = class_type.__swig_getmethods__.get(name,None) - if method: return method(self) - raise AttributeError(name) - -def _swig_repr(self): - try: strthis = "proxy of " + self.this.__repr__() - except: strthis = "" - return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) - -try: - _object = object - _newclass = 1 -except AttributeError: - class _object : pass - _newclass = 0 - - - -def csc_matmat_pass1(*args): - """ - csc_matmat_pass1(int n_row, int n_col, int Ap, int Ai, int Bp, int Bi, - int Cp) - """ - return _csc.csc_matmat_pass1(*args) - - -def csc_diagonal(*args): - """ - csc_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax, - signed char Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - unsigned char Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - unsigned short Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - unsigned int Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax, - long long Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - unsigned long long Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax, - long double Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Yx) - csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Yx) - """ - return _csc.csc_diagonal(*args) - -def csc_tocsr(*args): - """ - csc_tocsr(int n_row, int n_col, int Ap, int Ai, signed char Ax, - int Bp, int Bj, signed char Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, - int Bp, int Bj, unsigned char Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, - int Bj, short Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, - int Bp, int Bj, unsigned short Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, - int Bj, int Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, - int Bp, int Bj, unsigned int Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, long long Ax, - int Bp, int Bj, long long Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, - int Bp, int Bj, unsigned long long Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, - int Bj, float Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, - int Bj, double Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, long double Ax, - int Bp, int Bj, long double Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, - int Bp, int Bj, npy_cfloat_wrapper Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, - int Bp, int Bj, npy_cdouble_wrapper Bx) - csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, - int Bp, int Bj, npy_clongdouble_wrapper Bx) - """ - return _csc.csc_tocsr(*args) - -def csc_matmat_pass2(*args): - """ - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, signed char Ax, - int Bp, int Bi, signed char Bx, int Cp, int Ci, - signed char Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, - int Bp, int Bi, unsigned char Bx, int Cp, - int Ci, unsigned char Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, - int Bi, short Bx, int Cp, int Ci, short Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, - int Bp, int Bi, unsigned short Bx, int Cp, - int Ci, unsigned short Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, - int Bi, int Bx, int Cp, int Ci, int Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, - int Bp, int Bi, unsigned int Bx, int Cp, - int Ci, unsigned int Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long long Ax, - int Bp, int Bi, long long Bx, int Cp, int Ci, - long long Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, - int Bp, int Bi, unsigned long long Bx, - int Cp, int Ci, unsigned long long Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, - int Bi, float Bx, int Cp, int Ci, float Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, - int Bi, double Bx, int Cp, int Ci, double Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long double Ax, - int Bp, int Bi, long double Bx, int Cp, int Ci, - long double Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, - int Bp, int Bi, npy_cfloat_wrapper Bx, - int Cp, int Ci, npy_cfloat_wrapper Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, - int Bp, int Bi, npy_cdouble_wrapper Bx, - int Cp, int Ci, npy_cdouble_wrapper Cx) - csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, - int Bp, int Bi, npy_clongdouble_wrapper Bx, - int Cp, int Ci, npy_clongdouble_wrapper Cx) - """ - return _csc.csc_matmat_pass2(*args) - -def csc_matvec(*args): - """ - csc_matvec(int n_row, int n_col, int Ap, int Ai, signed char Ax, - signed char Xx, signed char Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, - unsigned char Xx, unsigned char Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, short Ax, short Xx, - short Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, - unsigned short Xx, unsigned short Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, int Ax, int Xx, - int Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, - unsigned int Xx, unsigned int Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, long long Ax, - long long Xx, long long Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, - unsigned long long Xx, unsigned long long Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, float Ax, float Xx, - float Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, double Ax, double Xx, - double Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, long double Ax, - long double Xx, long double Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx) - csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx) - """ - return _csc.csc_matvec(*args) - -def csc_matvecs(*args): - """ - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, signed char Ax, - signed char Xx, signed char Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned char Ax, - unsigned char Xx, unsigned char Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, short Ax, - short Xx, short Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned short Ax, - unsigned short Xx, unsigned short Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, int Ax, - int Xx, int Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned int Ax, - unsigned int Xx, unsigned int Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, long long Ax, - long long Xx, long long Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned long long Ax, - unsigned long long Xx, - unsigned long long Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, float Ax, - float Xx, float Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, double Ax, - double Xx, double Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, long double Ax, - long double Xx, long double Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Xx, - npy_cfloat_wrapper Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Xx, - npy_cdouble_wrapper Yx) - csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Xx, - npy_clongdouble_wrapper Yx) - """ - return _csc.csc_matvecs(*args) - -def csc_elmul_csc(*args): - """ - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, - int Bp, int Bi, signed char Bx, int Cp, int Ci, - signed char Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, - int Bp, int Bi, unsigned char Bx, int Cp, - int Ci, unsigned char Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, - int Bi, short Bx, int Cp, int Ci, short Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, - int Bp, int Bi, unsigned short Bx, int Cp, - int Ci, unsigned short Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, - int Bi, int Bx, int Cp, int Ci, int Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, - int Bp, int Bi, unsigned int Bx, int Cp, - int Ci, unsigned int Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, - int Bp, int Bi, long long Bx, int Cp, int Ci, - long long Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, - int Bp, int Bi, unsigned long long Bx, - int Cp, int Ci, unsigned long long Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, - int Bi, float Bx, int Cp, int Ci, float Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, - int Bi, double Bx, int Cp, int Ci, double Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, - int Bp, int Bi, long double Bx, int Cp, int Ci, - long double Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, - int Bp, int Bi, npy_cfloat_wrapper Bx, - int Cp, int Ci, npy_cfloat_wrapper Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, - int Bp, int Bi, npy_cdouble_wrapper Bx, - int Cp, int Ci, npy_cdouble_wrapper Cx) - csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, - int Bp, int Bi, npy_clongdouble_wrapper Bx, - int Cp, int Ci, npy_clongdouble_wrapper Cx) - """ - return _csc.csc_elmul_csc(*args) - -def csc_eldiv_csc(*args): - """ - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, - int Bp, int Bi, signed char Bx, int Cp, int Ci, - signed char Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, - int Bp, int Bi, unsigned char Bx, int Cp, - int Ci, unsigned char Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, - int Bi, short Bx, int Cp, int Ci, short Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, - int Bp, int Bi, unsigned short Bx, int Cp, - int Ci, unsigned short Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, - int Bi, int Bx, int Cp, int Ci, int Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, - int Bp, int Bi, unsigned int Bx, int Cp, - int Ci, unsigned int Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, - int Bp, int Bi, long long Bx, int Cp, int Ci, - long long Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, - int Bp, int Bi, unsigned long long Bx, - int Cp, int Ci, unsigned long long Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, - int Bi, float Bx, int Cp, int Ci, float Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, - int Bi, double Bx, int Cp, int Ci, double Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, - int Bp, int Bi, long double Bx, int Cp, int Ci, - long double Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, - int Bp, int Bi, npy_cfloat_wrapper Bx, - int Cp, int Ci, npy_cfloat_wrapper Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, - int Bp, int Bi, npy_cdouble_wrapper Bx, - int Cp, int Ci, npy_cdouble_wrapper Cx) - csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, - int Bp, int Bi, npy_clongdouble_wrapper Bx, - int Cp, int Ci, npy_clongdouble_wrapper Cx) - """ - return _csc.csc_eldiv_csc(*args) - -def csc_plus_csc(*args): - """ - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, - int Bp, int Bi, signed char Bx, int Cp, int Ci, - signed char Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, - int Bp, int Bi, unsigned char Bx, int Cp, - int Ci, unsigned char Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, - int Bi, short Bx, int Cp, int Ci, short Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, - int Bp, int Bi, unsigned short Bx, int Cp, - int Ci, unsigned short Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, - int Bi, int Bx, int Cp, int Ci, int Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, - int Bp, int Bi, unsigned int Bx, int Cp, - int Ci, unsigned int Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, - int Bp, int Bi, long long Bx, int Cp, int Ci, - long long Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, - int Bp, int Bi, unsigned long long Bx, - int Cp, int Ci, unsigned long long Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, - int Bi, float Bx, int Cp, int Ci, float Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, - int Bi, double Bx, int Cp, int Ci, double Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, - int Bp, int Bi, long double Bx, int Cp, int Ci, - long double Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, - int Bp, int Bi, npy_cfloat_wrapper Bx, - int Cp, int Ci, npy_cfloat_wrapper Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, - int Bp, int Bi, npy_cdouble_wrapper Bx, - int Cp, int Ci, npy_cdouble_wrapper Cx) - csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, - int Bp, int Bi, npy_clongdouble_wrapper Bx, - int Cp, int Ci, npy_clongdouble_wrapper Cx) - """ - return _csc.csc_plus_csc(*args) - -def csc_minus_csc(*args): - """ - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, - int Bp, int Bi, signed char Bx, int Cp, int Ci, - signed char Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, - int Bp, int Bi, unsigned char Bx, int Cp, - int Ci, unsigned char Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, - int Bi, short Bx, int Cp, int Ci, short Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, - int Bp, int Bi, unsigned short Bx, int Cp, - int Ci, unsigned short Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, - int Bi, int Bx, int Cp, int Ci, int Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, - int Bp, int Bi, unsigned int Bx, int Cp, - int Ci, unsigned int Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, - int Bp, int Bi, long long Bx, int Cp, int Ci, - long long Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, - int Bp, int Bi, unsigned long long Bx, - int Cp, int Ci, unsigned long long Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, - int Bi, float Bx, int Cp, int Ci, float Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, - int Bi, double Bx, int Cp, int Ci, double Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, - int Bp, int Bi, long double Bx, int Cp, int Ci, - long double Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, - int Bp, int Bi, npy_cfloat_wrapper Bx, - int Cp, int Ci, npy_cfloat_wrapper Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, - int Bp, int Bi, npy_cdouble_wrapper Bx, - int Cp, int Ci, npy_cdouble_wrapper Cx) - csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, - int Bp, int Bi, npy_clongdouble_wrapper Bx, - int Cp, int Ci, npy_clongdouble_wrapper Cx) - """ - return _csc.csc_minus_csc(*args) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csc_wrap.cxx b/scipy-0.10.1/scipy/sparse/sparsetools/csc_wrap.cxx deleted file mode 100644 index ce036ce4f3..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csc_wrap.cxx +++ /dev/null @@ -1,32385 +0,0 @@ -/* ---------------------------------------------------------------------------- - * This file was automatically generated by SWIG (http://www.swig.org). - * Version 2.0.1+capsulehack - * - * This file is not intended to be easily readable and contains a number of - * coding conventions designed to improve portability and efficiency. Do not make - * changes to this file unless you know what you are doing--modify the SWIG - * interface file instead. - * ----------------------------------------------------------------------------- */ - -#define SWIGPYTHON -#define SWIG_PYTHON_DIRECTOR_NO_VTABLE - - -#ifdef __cplusplus -/* SwigValueWrapper is described in swig.swg */ -template class SwigValueWrapper { - struct SwigMovePointer { - T *ptr; - SwigMovePointer(T *p) : ptr(p) { } - ~SwigMovePointer() { delete ptr; } - SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } - } pointer; - SwigValueWrapper& operator=(const SwigValueWrapper& rhs); - SwigValueWrapper(const SwigValueWrapper& rhs); -public: - SwigValueWrapper() : pointer(0) { } - SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } - operator T&() const { return *pointer.ptr; } - T *operator&() { return pointer.ptr; } -}; - -template T SwigValueInit() { - return T(); -} -#endif - -/* ----------------------------------------------------------------------------- - * This section contains generic SWIG labels for method/variable - * declarations/attributes, and other compiler dependent labels. - * ----------------------------------------------------------------------------- */ - -/* template workaround for compilers that cannot correctly implement the C++ standard */ -#ifndef SWIGTEMPLATEDISAMBIGUATOR -# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) -# define SWIGTEMPLATEDISAMBIGUATOR template -# elif defined(__HP_aCC) -/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ -/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ -# define SWIGTEMPLATEDISAMBIGUATOR template -# else -# define SWIGTEMPLATEDISAMBIGUATOR -# endif -#endif - -/* inline attribute */ -#ifndef SWIGINLINE -# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) -# define SWIGINLINE inline -# else -# define SWIGINLINE -# endif -#endif - -/* attribute recognised by some compilers to avoid 'unused' warnings */ -#ifndef SWIGUNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -# elif defined(__ICC) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -#endif - -#ifndef SWIG_MSC_UNSUPPRESS_4505 -# if defined(_MSC_VER) -# pragma warning(disable : 4505) /* unreferenced local function has been removed */ -# endif -#endif - -#ifndef SWIGUNUSEDPARM -# ifdef __cplusplus -# define SWIGUNUSEDPARM(p) -# else -# define SWIGUNUSEDPARM(p) p SWIGUNUSED -# endif -#endif - -/* internal SWIG method */ -#ifndef SWIGINTERN -# define SWIGINTERN static SWIGUNUSED -#endif - -/* internal inline SWIG method */ -#ifndef SWIGINTERNINLINE -# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE -#endif - -/* exporting methods */ -#if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) -# ifndef GCC_HASCLASSVISIBILITY -# define GCC_HASCLASSVISIBILITY -# endif -#endif - -#ifndef SWIGEXPORT -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# if defined(STATIC_LINKED) -# define SWIGEXPORT -# else -# define SWIGEXPORT __declspec(dllexport) -# endif -# else -# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) -# define SWIGEXPORT __attribute__ ((visibility("default"))) -# else -# define SWIGEXPORT -# endif -# endif -#endif - -/* calling conventions for Windows */ -#ifndef SWIGSTDCALL -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# define SWIGSTDCALL __stdcall -# else -# define SWIGSTDCALL -# endif -#endif - -/* Deal with Microsoft's attempt at deprecating C standard runtime functions */ -#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) -# define _CRT_SECURE_NO_DEPRECATE -#endif - -/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ -#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) -# define _SCL_SECURE_NO_DEPRECATE -#endif - - - -/* Python.h has to appear first */ -#include - -/* ----------------------------------------------------------------------------- - * swigrun.swg - * - * This file contains generic C API SWIG runtime support for pointer - * type checking. - * ----------------------------------------------------------------------------- */ - -/* This should only be incremented when either the layout of swig_type_info changes, - or for whatever reason, the runtime changes incompatibly */ -#define SWIG_RUNTIME_VERSION "4" - -/* define SWIG_TYPE_TABLE_NAME as "SWIG_TYPE_TABLE" */ -#ifdef SWIG_TYPE_TABLE -# define SWIG_QUOTE_STRING(x) #x -# define SWIG_EXPAND_AND_QUOTE_STRING(x) SWIG_QUOTE_STRING(x) -# define SWIG_TYPE_TABLE_NAME SWIG_EXPAND_AND_QUOTE_STRING(SWIG_TYPE_TABLE) -#else -# define SWIG_TYPE_TABLE_NAME -#endif - -/* - You can use the SWIGRUNTIME and SWIGRUNTIMEINLINE macros for - creating a static or dynamic library from the SWIG runtime code. - In 99.9% of the cases, SWIG just needs to declare them as 'static'. - - But only do this if strictly necessary, ie, if you have problems - with your compiler or suchlike. -*/ - -#ifndef SWIGRUNTIME -# define SWIGRUNTIME SWIGINTERN -#endif - -#ifndef SWIGRUNTIMEINLINE -# define SWIGRUNTIMEINLINE SWIGRUNTIME SWIGINLINE -#endif - -/* Generic buffer size */ -#ifndef SWIG_BUFFER_SIZE -# define SWIG_BUFFER_SIZE 1024 -#endif - -/* Flags for pointer conversions */ -#define SWIG_POINTER_DISOWN 0x1 -#define SWIG_CAST_NEW_MEMORY 0x2 - -/* Flags for new pointer objects */ -#define SWIG_POINTER_OWN 0x1 - - -/* - Flags/methods for returning states. - - The SWIG conversion methods, as ConvertPtr, return an integer - that tells if the conversion was successful or not. And if not, - an error code can be returned (see swigerrors.swg for the codes). - - Use the following macros/flags to set or process the returning - states. - - In old versions of SWIG, code such as the following was usually written: - - if (SWIG_ConvertPtr(obj,vptr,ty.flags) != -1) { - // success code - } else { - //fail code - } - - Now you can be more explicit: - - int res = SWIG_ConvertPtr(obj,vptr,ty.flags); - if (SWIG_IsOK(res)) { - // success code - } else { - // fail code - } - - which is the same really, but now you can also do - - Type *ptr; - int res = SWIG_ConvertPtr(obj,(void **)(&ptr),ty.flags); - if (SWIG_IsOK(res)) { - // success code - if (SWIG_IsNewObj(res) { - ... - delete *ptr; - } else { - ... - } - } else { - // fail code - } - - I.e., now SWIG_ConvertPtr can return new objects and you can - identify the case and take care of the deallocation. Of course that - also requires SWIG_ConvertPtr to return new result values, such as - - int SWIG_ConvertPtr(obj, ptr,...) { - if () { - if () { - *ptr = ; - return SWIG_NEWOBJ; - } else { - *ptr = ; - return SWIG_OLDOBJ; - } - } else { - return SWIG_BADOBJ; - } - } - - Of course, returning the plain '0(success)/-1(fail)' still works, but you can be - more explicit by returning SWIG_BADOBJ, SWIG_ERROR or any of the - SWIG errors code. - - Finally, if the SWIG_CASTRANK_MODE is enabled, the result code - allows to return the 'cast rank', for example, if you have this - - int food(double) - int fooi(int); - - and you call - - food(1) // cast rank '1' (1 -> 1.0) - fooi(1) // cast rank '0' - - just use the SWIG_AddCast()/SWIG_CheckState() -*/ - -#define SWIG_OK (0) -#define SWIG_ERROR (-1) -#define SWIG_IsOK(r) (r >= 0) -#define SWIG_ArgError(r) ((r != SWIG_ERROR) ? r : SWIG_TypeError) - -/* The CastRankLimit says how many bits are used for the cast rank */ -#define SWIG_CASTRANKLIMIT (1 << 8) -/* The NewMask denotes the object was created (using new/malloc) */ -#define SWIG_NEWOBJMASK (SWIG_CASTRANKLIMIT << 1) -/* The TmpMask is for in/out typemaps that use temporal objects */ -#define SWIG_TMPOBJMASK (SWIG_NEWOBJMASK << 1) -/* Simple returning values */ -#define SWIG_BADOBJ (SWIG_ERROR) -#define SWIG_OLDOBJ (SWIG_OK) -#define SWIG_NEWOBJ (SWIG_OK | SWIG_NEWOBJMASK) -#define SWIG_TMPOBJ (SWIG_OK | SWIG_TMPOBJMASK) -/* Check, add and del mask methods */ -#define SWIG_AddNewMask(r) (SWIG_IsOK(r) ? (r | SWIG_NEWOBJMASK) : r) -#define SWIG_DelNewMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_NEWOBJMASK) : r) -#define SWIG_IsNewObj(r) (SWIG_IsOK(r) && (r & SWIG_NEWOBJMASK)) -#define SWIG_AddTmpMask(r) (SWIG_IsOK(r) ? (r | SWIG_TMPOBJMASK) : r) -#define SWIG_DelTmpMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_TMPOBJMASK) : r) -#define SWIG_IsTmpObj(r) (SWIG_IsOK(r) && (r & SWIG_TMPOBJMASK)) - -/* Cast-Rank Mode */ -#if defined(SWIG_CASTRANK_MODE) -# ifndef SWIG_TypeRank -# define SWIG_TypeRank unsigned long -# endif -# ifndef SWIG_MAXCASTRANK /* Default cast allowed */ -# define SWIG_MAXCASTRANK (2) -# endif -# define SWIG_CASTRANKMASK ((SWIG_CASTRANKLIMIT) -1) -# define SWIG_CastRank(r) (r & SWIG_CASTRANKMASK) -SWIGINTERNINLINE int SWIG_AddCast(int r) { - return SWIG_IsOK(r) ? ((SWIG_CastRank(r) < SWIG_MAXCASTRANK) ? (r + 1) : SWIG_ERROR) : r; -} -SWIGINTERNINLINE int SWIG_CheckState(int r) { - return SWIG_IsOK(r) ? SWIG_CastRank(r) + 1 : 0; -} -#else /* no cast-rank mode */ -# define SWIG_AddCast -# define SWIG_CheckState(r) (SWIG_IsOK(r) ? 1 : 0) -#endif - - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void *(*swig_converter_func)(void *, int *); -typedef struct swig_type_info *(*swig_dycast_func)(void **); - -/* Structure to store information on one type */ -typedef struct swig_type_info { - const char *name; /* mangled name of this type */ - const char *str; /* human readable name of this type */ - swig_dycast_func dcast; /* dynamic cast function down a hierarchy */ - struct swig_cast_info *cast; /* linked list of types that can cast into this type */ - void *clientdata; /* language specific type data */ - int owndata; /* flag if the structure owns the clientdata */ -} swig_type_info; - -/* Structure to store a type and conversion function used for casting */ -typedef struct swig_cast_info { - swig_type_info *type; /* pointer to type that is equivalent to this type */ - swig_converter_func converter; /* function to cast the void pointers */ - struct swig_cast_info *next; /* pointer to next cast in linked list */ - struct swig_cast_info *prev; /* pointer to the previous cast */ -} swig_cast_info; - -/* Structure used to store module information - * Each module generates one structure like this, and the runtime collects - * all of these structures and stores them in a circularly linked list.*/ -typedef struct swig_module_info { - swig_type_info **types; /* Array of pointers to swig_type_info structures that are in this module */ - size_t size; /* Number of types in this module */ - struct swig_module_info *next; /* Pointer to next element in circularly linked list */ - swig_type_info **type_initial; /* Array of initially generated type structures */ - swig_cast_info **cast_initial; /* Array of initially generated casting structures */ - void *clientdata; /* Language specific module data */ -} swig_module_info; - -/* - Compare two type names skipping the space characters, therefore - "char*" == "char *" and "Class" == "Class", etc. - - Return 0 when the two name types are equivalent, as in - strncmp, but skipping ' '. -*/ -SWIGRUNTIME int -SWIG_TypeNameComp(const char *f1, const char *l1, - const char *f2, const char *l2) { - for (;(f1 != l1) && (f2 != l2); ++f1, ++f2) { - while ((*f1 == ' ') && (f1 != l1)) ++f1; - while ((*f2 == ' ') && (f2 != l2)) ++f2; - if (*f1 != *f2) return (*f1 > *f2) ? 1 : -1; - } - return (int)((l1 - f1) - (l2 - f2)); -} - -/* - Check type equivalence in a name list like ||... - Return 0 if not equal, 1 if equal -*/ -SWIGRUNTIME int -SWIG_TypeEquiv(const char *nb, const char *tb) { - int equiv = 0; - const char* te = tb + strlen(tb); - const char* ne = nb; - while (!equiv && *ne) { - for (nb = ne; *ne; ++ne) { - if (*ne == '|') break; - } - equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; - if (*ne) ++ne; - } - return equiv; -} - -/* - Check type equivalence in a name list like ||... - Return 0 if equal, -1 if nb < tb, 1 if nb > tb -*/ -SWIGRUNTIME int -SWIG_TypeCompare(const char *nb, const char *tb) { - int equiv = 0; - const char* te = tb + strlen(tb); - const char* ne = nb; - while (!equiv && *ne) { - for (nb = ne; *ne; ++ne) { - if (*ne == '|') break; - } - equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; - if (*ne) ++ne; - } - return equiv; -} - - -/* - Check the typename -*/ -SWIGRUNTIME swig_cast_info * -SWIG_TypeCheck(const char *c, swig_type_info *ty) { - if (ty) { - swig_cast_info *iter = ty->cast; - while (iter) { - if (strcmp(iter->type->name, c) == 0) { - if (iter == ty->cast) - return iter; - /* Move iter to the top of the linked list */ - iter->prev->next = iter->next; - if (iter->next) - iter->next->prev = iter->prev; - iter->next = ty->cast; - iter->prev = 0; - if (ty->cast) ty->cast->prev = iter; - ty->cast = iter; - return iter; - } - iter = iter->next; - } - } - return 0; -} - -/* - Identical to SWIG_TypeCheck, except strcmp is replaced with a pointer comparison -*/ -SWIGRUNTIME swig_cast_info * -SWIG_TypeCheckStruct(swig_type_info *from, swig_type_info *ty) { - if (ty) { - swig_cast_info *iter = ty->cast; - while (iter) { - if (iter->type == from) { - if (iter == ty->cast) - return iter; - /* Move iter to the top of the linked list */ - iter->prev->next = iter->next; - if (iter->next) - iter->next->prev = iter->prev; - iter->next = ty->cast; - iter->prev = 0; - if (ty->cast) ty->cast->prev = iter; - ty->cast = iter; - return iter; - } - iter = iter->next; - } - } - return 0; -} - -/* - Cast a pointer up an inheritance hierarchy -*/ -SWIGRUNTIMEINLINE void * -SWIG_TypeCast(swig_cast_info *ty, void *ptr, int *newmemory) { - return ((!ty) || (!ty->converter)) ? ptr : (*ty->converter)(ptr, newmemory); -} - -/* - Dynamic pointer casting. Down an inheritance hierarchy -*/ -SWIGRUNTIME swig_type_info * -SWIG_TypeDynamicCast(swig_type_info *ty, void **ptr) { - swig_type_info *lastty = ty; - if (!ty || !ty->dcast) return ty; - while (ty && (ty->dcast)) { - ty = (*ty->dcast)(ptr); - if (ty) lastty = ty; - } - return lastty; -} - -/* - Return the name associated with this type -*/ -SWIGRUNTIMEINLINE const char * -SWIG_TypeName(const swig_type_info *ty) { - return ty->name; -} - -/* - Return the pretty name associated with this type, - that is an unmangled type name in a form presentable to the user. -*/ -SWIGRUNTIME const char * -SWIG_TypePrettyName(const swig_type_info *type) { - /* The "str" field contains the equivalent pretty names of the - type, separated by vertical-bar characters. We choose - to print the last name, as it is often (?) the most - specific. */ - if (!type) return NULL; - if (type->str != NULL) { - const char *last_name = type->str; - const char *s; - for (s = type->str; *s; s++) - if (*s == '|') last_name = s+1; - return last_name; - } - else - return type->name; -} - -/* - Set the clientdata field for a type -*/ -SWIGRUNTIME void -SWIG_TypeClientData(swig_type_info *ti, void *clientdata) { - swig_cast_info *cast = ti->cast; - /* if (ti->clientdata == clientdata) return; */ - ti->clientdata = clientdata; - - while (cast) { - if (!cast->converter) { - swig_type_info *tc = cast->type; - if (!tc->clientdata) { - SWIG_TypeClientData(tc, clientdata); - } - } - cast = cast->next; - } -} -SWIGRUNTIME void -SWIG_TypeNewClientData(swig_type_info *ti, void *clientdata) { - SWIG_TypeClientData(ti, clientdata); - ti->owndata = 1; -} - -/* - Search for a swig_type_info structure only by mangled name - Search is a O(log #types) - - We start searching at module start, and finish searching when start == end. - Note: if start == end at the beginning of the function, we go all the way around - the circular list. -*/ -SWIGRUNTIME swig_type_info * -SWIG_MangledTypeQueryModule(swig_module_info *start, - swig_module_info *end, - const char *name) { - swig_module_info *iter = start; - do { - if (iter->size) { - register size_t l = 0; - register size_t r = iter->size - 1; - do { - /* since l+r >= 0, we can (>> 1) instead (/ 2) */ - register size_t i = (l + r) >> 1; - const char *iname = iter->types[i]->name; - if (iname) { - register int compare = strcmp(name, iname); - if (compare == 0) { - return iter->types[i]; - } else if (compare < 0) { - if (i) { - r = i - 1; - } else { - break; - } - } else if (compare > 0) { - l = i + 1; - } - } else { - break; /* should never happen */ - } - } while (l <= r); - } - iter = iter->next; - } while (iter != end); - return 0; -} - -/* - Search for a swig_type_info structure for either a mangled name or a human readable name. - It first searches the mangled names of the types, which is a O(log #types) - If a type is not found it then searches the human readable names, which is O(#types). - - We start searching at module start, and finish searching when start == end. - Note: if start == end at the beginning of the function, we go all the way around - the circular list. -*/ -SWIGRUNTIME swig_type_info * -SWIG_TypeQueryModule(swig_module_info *start, - swig_module_info *end, - const char *name) { - /* STEP 1: Search the name field using binary search */ - swig_type_info *ret = SWIG_MangledTypeQueryModule(start, end, name); - if (ret) { - return ret; - } else { - /* STEP 2: If the type hasn't been found, do a complete search - of the str field (the human readable name) */ - swig_module_info *iter = start; - do { - register size_t i = 0; - for (; i < iter->size; ++i) { - if (iter->types[i]->str && (SWIG_TypeEquiv(iter->types[i]->str, name))) - return iter->types[i]; - } - iter = iter->next; - } while (iter != end); - } - - /* neither found a match */ - return 0; -} - -/* - Pack binary data into a string -*/ -SWIGRUNTIME char * -SWIG_PackData(char *c, void *ptr, size_t sz) { - static const char hex[17] = "0123456789abcdef"; - register const unsigned char *u = (unsigned char *) ptr; - register const unsigned char *eu = u + sz; - for (; u != eu; ++u) { - register unsigned char uu = *u; - *(c++) = hex[(uu & 0xf0) >> 4]; - *(c++) = hex[uu & 0xf]; - } - return c; -} - -/* - Unpack binary data from a string -*/ -SWIGRUNTIME const char * -SWIG_UnpackData(const char *c, void *ptr, size_t sz) { - register unsigned char *u = (unsigned char *) ptr; - register const unsigned char *eu = u + sz; - for (; u != eu; ++u) { - register char d = *(c++); - register unsigned char uu; - if ((d >= '0') && (d <= '9')) - uu = ((d - '0') << 4); - else if ((d >= 'a') && (d <= 'f')) - uu = ((d - ('a'-10)) << 4); - else - return (char *) 0; - d = *(c++); - if ((d >= '0') && (d <= '9')) - uu |= (d - '0'); - else if ((d >= 'a') && (d <= 'f')) - uu |= (d - ('a'-10)); - else - return (char *) 0; - *u = uu; - } - return c; -} - -/* - Pack 'void *' into a string buffer. -*/ -SWIGRUNTIME char * -SWIG_PackVoidPtr(char *buff, void *ptr, const char *name, size_t bsz) { - char *r = buff; - if ((2*sizeof(void *) + 2) > bsz) return 0; - *(r++) = '_'; - r = SWIG_PackData(r,&ptr,sizeof(void *)); - if (strlen(name) + 1 > (bsz - (r - buff))) return 0; - strcpy(r,name); - return buff; -} - -SWIGRUNTIME const char * -SWIG_UnpackVoidPtr(const char *c, void **ptr, const char *name) { - if (*c != '_') { - if (strcmp(c,"NULL") == 0) { - *ptr = (void *) 0; - return name; - } else { - return 0; - } - } - return SWIG_UnpackData(++c,ptr,sizeof(void *)); -} - -SWIGRUNTIME char * -SWIG_PackDataName(char *buff, void *ptr, size_t sz, const char *name, size_t bsz) { - char *r = buff; - size_t lname = (name ? strlen(name) : 0); - if ((2*sz + 2 + lname) > bsz) return 0; - *(r++) = '_'; - r = SWIG_PackData(r,ptr,sz); - if (lname) { - strncpy(r,name,lname+1); - } else { - *r = 0; - } - return buff; -} - -SWIGRUNTIME const char * -SWIG_UnpackDataName(const char *c, void *ptr, size_t sz, const char *name) { - if (*c != '_') { - if (strcmp(c,"NULL") == 0) { - memset(ptr,0,sz); - return name; - } else { - return 0; - } - } - return SWIG_UnpackData(++c,ptr,sz); -} - -#ifdef __cplusplus -} -#endif - -/* Errors in SWIG */ -#define SWIG_UnknownError -1 -#define SWIG_IOError -2 -#define SWIG_RuntimeError -3 -#define SWIG_IndexError -4 -#define SWIG_TypeError -5 -#define SWIG_DivisionByZero -6 -#define SWIG_OverflowError -7 -#define SWIG_SyntaxError -8 -#define SWIG_ValueError -9 -#define SWIG_SystemError -10 -#define SWIG_AttributeError -11 -#define SWIG_MemoryError -12 -#define SWIG_NullReferenceError -13 - - - -/* Compatibility macros for Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - -#define PyClass_Check(obj) PyObject_IsInstance(obj, (PyObject *)&PyType_Type) -#define PyInt_Check(x) PyLong_Check(x) -#define PyInt_AsLong(x) PyLong_AsLong(x) -#define PyInt_FromLong(x) PyLong_FromLong(x) -#define PyString_Format(fmt, args) PyUnicode_Format(fmt, args) - -#endif - -#ifndef Py_TYPE -# define Py_TYPE(op) ((op)->ob_type) -#endif - -/* SWIG APIs for compatibility of both Python 2 & 3 */ - -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_Python_str_FromFormat PyUnicode_FromFormat -#else -# define SWIG_Python_str_FromFormat PyString_FromFormat -#endif - - -/* Warning: This function will allocate a new string in Python 3, - * so please call SWIG_Python_str_DelForPy3(x) to free the space. - */ -SWIGINTERN char* -SWIG_Python_str_AsChar(PyObject *str) -{ -#if PY_VERSION_HEX >= 0x03000000 - char *cstr; - char *newstr; - Py_ssize_t len; - str = PyUnicode_AsUTF8String(str); - PyBytes_AsStringAndSize(str, &cstr, &len); - newstr = (char *) malloc(len+1); - memcpy(newstr, cstr, len+1); - Py_XDECREF(str); - return newstr; -#else - return PyString_AsString(str); -#endif -} - -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_Python_str_DelForPy3(x) free( (void*) (x) ) -#else -# define SWIG_Python_str_DelForPy3(x) -#endif - - -SWIGINTERN PyObject* -SWIG_Python_str_FromChar(const char *c) -{ -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_FromString(c); -#else - return PyString_FromString(c); -#endif -} - -/* Add PyOS_snprintf for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 -# if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) -# define PyOS_snprintf _snprintf -# else -# define PyOS_snprintf snprintf -# endif -#endif - -/* A crude PyString_FromFormat implementation for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 - -#ifndef SWIG_PYBUFFER_SIZE -# define SWIG_PYBUFFER_SIZE 1024 -#endif - -static PyObject * -PyString_FromFormat(const char *fmt, ...) { - va_list ap; - char buf[SWIG_PYBUFFER_SIZE * 2]; - int res; - va_start(ap, fmt); - res = vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - return (res < 0 || res >= (int)sizeof(buf)) ? 0 : PyString_FromString(buf); -} -#endif - -/* Add PyObject_Del for old Pythons */ -#if PY_VERSION_HEX < 0x01060000 -# define PyObject_Del(op) PyMem_DEL((op)) -#endif -#ifndef PyObject_DEL -# define PyObject_DEL PyObject_Del -#endif - -/* A crude PyExc_StopIteration exception for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 -# ifndef PyExc_StopIteration -# define PyExc_StopIteration PyExc_RuntimeError -# endif -# ifndef PyObject_GenericGetAttr -# define PyObject_GenericGetAttr 0 -# endif -#endif - -/* Py_NotImplemented is defined in 2.1 and up. */ -#if PY_VERSION_HEX < 0x02010000 -# ifndef Py_NotImplemented -# define Py_NotImplemented PyExc_RuntimeError -# endif -#endif - -/* A crude PyString_AsStringAndSize implementation for old Pythons */ -#if PY_VERSION_HEX < 0x02010000 -# ifndef PyString_AsStringAndSize -# define PyString_AsStringAndSize(obj, s, len) {*s = PyString_AsString(obj); *len = *s ? strlen(*s) : 0;} -# endif -#endif - -/* PySequence_Size for old Pythons */ -#if PY_VERSION_HEX < 0x02000000 -# ifndef PySequence_Size -# define PySequence_Size PySequence_Length -# endif -#endif - -/* PyBool_FromLong for old Pythons */ -#if PY_VERSION_HEX < 0x02030000 -static -PyObject *PyBool_FromLong(long ok) -{ - PyObject *result = ok ? Py_True : Py_False; - Py_INCREF(result); - return result; -} -#endif - -/* Py_ssize_t for old Pythons */ -/* This code is as recommended by: */ -/* http://www.python.org/dev/peps/pep-0353/#conversion-guidelines */ -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef int Py_ssize_t; -# define PY_SSIZE_T_MAX INT_MAX -# define PY_SSIZE_T_MIN INT_MIN -#endif - -/* ----------------------------------------------------------------------------- - * error manipulation - * ----------------------------------------------------------------------------- */ - -SWIGRUNTIME PyObject* -SWIG_Python_ErrorType(int code) { - PyObject* type = 0; - switch(code) { - case SWIG_MemoryError: - type = PyExc_MemoryError; - break; - case SWIG_IOError: - type = PyExc_IOError; - break; - case SWIG_RuntimeError: - type = PyExc_RuntimeError; - break; - case SWIG_IndexError: - type = PyExc_IndexError; - break; - case SWIG_TypeError: - type = PyExc_TypeError; - break; - case SWIG_DivisionByZero: - type = PyExc_ZeroDivisionError; - break; - case SWIG_OverflowError: - type = PyExc_OverflowError; - break; - case SWIG_SyntaxError: - type = PyExc_SyntaxError; - break; - case SWIG_ValueError: - type = PyExc_ValueError; - break; - case SWIG_SystemError: - type = PyExc_SystemError; - break; - case SWIG_AttributeError: - type = PyExc_AttributeError; - break; - default: - type = PyExc_RuntimeError; - } - return type; -} - - -SWIGRUNTIME void -SWIG_Python_AddErrorMsg(const char* mesg) -{ - PyObject *type = 0; - PyObject *value = 0; - PyObject *traceback = 0; - - if (PyErr_Occurred()) PyErr_Fetch(&type, &value, &traceback); - if (value) { - char *tmp; - PyObject *old_str = PyObject_Str(value); - PyErr_Clear(); - Py_XINCREF(type); - - PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(old_str); - Py_DECREF(value); - } else { - PyErr_SetString(PyExc_RuntimeError, mesg); - } -} - -#if defined(SWIG_PYTHON_NO_THREADS) -# if defined(SWIG_PYTHON_THREADS) -# undef SWIG_PYTHON_THREADS -# endif -#endif -#if defined(SWIG_PYTHON_THREADS) /* Threading support is enabled */ -# if !defined(SWIG_PYTHON_USE_GIL) && !defined(SWIG_PYTHON_NO_USE_GIL) -# if (PY_VERSION_HEX >= 0x02030000) /* For 2.3 or later, use the PyGILState calls */ -# define SWIG_PYTHON_USE_GIL -# endif -# endif -# if defined(SWIG_PYTHON_USE_GIL) /* Use PyGILState threads calls */ -# ifndef SWIG_PYTHON_INITIALIZE_THREADS -# define SWIG_PYTHON_INITIALIZE_THREADS PyEval_InitThreads() -# endif -# ifdef __cplusplus /* C++ code */ - class SWIG_Python_Thread_Block { - bool status; - PyGILState_STATE state; - public: - void end() { if (status) { PyGILState_Release(state); status = false;} } - SWIG_Python_Thread_Block() : status(true), state(PyGILState_Ensure()) {} - ~SWIG_Python_Thread_Block() { end(); } - }; - class SWIG_Python_Thread_Allow { - bool status; - PyThreadState *save; - public: - void end() { if (status) { PyEval_RestoreThread(save); status = false; }} - SWIG_Python_Thread_Allow() : status(true), save(PyEval_SaveThread()) {} - ~SWIG_Python_Thread_Allow() { end(); } - }; -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK SWIG_Python_Thread_Block _swig_thread_block -# define SWIG_PYTHON_THREAD_END_BLOCK _swig_thread_block.end() -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW SWIG_Python_Thread_Allow _swig_thread_allow -# define SWIG_PYTHON_THREAD_END_ALLOW _swig_thread_allow.end() -# else /* C code */ -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK PyGILState_STATE _swig_thread_block = PyGILState_Ensure() -# define SWIG_PYTHON_THREAD_END_BLOCK PyGILState_Release(_swig_thread_block) -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW PyThreadState *_swig_thread_allow = PyEval_SaveThread() -# define SWIG_PYTHON_THREAD_END_ALLOW PyEval_RestoreThread(_swig_thread_allow) -# endif -# else /* Old thread way, not implemented, user must provide it */ -# if !defined(SWIG_PYTHON_INITIALIZE_THREADS) -# define SWIG_PYTHON_INITIALIZE_THREADS -# endif -# if !defined(SWIG_PYTHON_THREAD_BEGIN_BLOCK) -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK -# endif -# if !defined(SWIG_PYTHON_THREAD_END_BLOCK) -# define SWIG_PYTHON_THREAD_END_BLOCK -# endif -# if !defined(SWIG_PYTHON_THREAD_BEGIN_ALLOW) -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW -# endif -# if !defined(SWIG_PYTHON_THREAD_END_ALLOW) -# define SWIG_PYTHON_THREAD_END_ALLOW -# endif -# endif -#else /* No thread support */ -# define SWIG_PYTHON_INITIALIZE_THREADS -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK -# define SWIG_PYTHON_THREAD_END_BLOCK -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW -# define SWIG_PYTHON_THREAD_END_ALLOW -#endif - -/* ----------------------------------------------------------------------------- - * Python API portion that goes into the runtime - * ----------------------------------------------------------------------------- */ - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* cc-mode */ -#endif -#endif - -/* ----------------------------------------------------------------------------- - * Constant declarations - * ----------------------------------------------------------------------------- */ - -/* Constant Types */ -#define SWIG_PY_POINTER 4 -#define SWIG_PY_BINARY 5 - -/* Constant information structure */ -typedef struct swig_const_info { - int type; - char *name; - long lvalue; - double dvalue; - void *pvalue; - swig_type_info **ptype; -} swig_const_info; - - -/* ----------------------------------------------------------------------------- - * Wrapper of PyInstanceMethod_New() used in Python 3 - * It is exported to the generated module, used for -fastproxy - * ----------------------------------------------------------------------------- */ -SWIGRUNTIME PyObject* SWIG_PyInstanceMethod_New(PyObject *self, PyObject *func) -{ -#if PY_VERSION_HEX >= 0x03000000 - return PyInstanceMethod_New(func); -#else - return NULL; -#endif -} - -#ifdef __cplusplus -#if 0 -{ /* cc-mode */ -#endif -} -#endif - - -/* ----------------------------------------------------------------------------- - * pyrun.swg - * - * This file contains the runtime support for Python modules - * and includes code for managing global variables and pointer - * type checking. - * - * ----------------------------------------------------------------------------- */ - -/* Common SWIG API */ - -/* for raw pointers */ -#define SWIG_Python_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, 0) -#define SWIG_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtr(obj, pptr, type, flags) -#define SWIG_ConvertPtrAndOwn(obj,pptr,type,flags,own) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, own) -#define SWIG_NewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(ptr, type, flags) -#define SWIG_CheckImplicit(ty) SWIG_Python_CheckImplicit(ty) -#define SWIG_AcquirePtr(ptr, src) SWIG_Python_AcquirePtr(ptr, src) -#define swig_owntype int - -/* for raw packed data */ -#define SWIG_ConvertPacked(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) -#define SWIG_NewPackedObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) - -/* for class or struct pointers */ -#define SWIG_ConvertInstance(obj, pptr, type, flags) SWIG_ConvertPtr(obj, pptr, type, flags) -#define SWIG_NewInstanceObj(ptr, type, flags) SWIG_NewPointerObj(ptr, type, flags) - -/* for C or C++ function pointers */ -#define SWIG_ConvertFunctionPtr(obj, pptr, type) SWIG_Python_ConvertFunctionPtr(obj, pptr, type) -#define SWIG_NewFunctionPtrObj(ptr, type) SWIG_Python_NewPointerObj(ptr, type, 0) - -/* for C++ member pointers, ie, member methods */ -#define SWIG_ConvertMember(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) -#define SWIG_NewMemberObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) - - -/* Runtime API */ - -#define SWIG_GetModule(clientdata) SWIG_Python_GetModule() -#define SWIG_SetModule(clientdata, pointer) SWIG_Python_SetModule(pointer) -#define SWIG_NewClientData(obj) SwigPyClientData_New(obj) - -#define SWIG_SetErrorObj SWIG_Python_SetErrorObj -#define SWIG_SetErrorMsg SWIG_Python_SetErrorMsg -#define SWIG_ErrorType(code) SWIG_Python_ErrorType(code) -#define SWIG_Error(code, msg) SWIG_Python_SetErrorMsg(SWIG_ErrorType(code), msg) -#define SWIG_fail goto fail - -/* - * Python 2.7 and newer and Python 3.1 and newer should use Capsules API instead of - * CObjects API. - */ -#if ((PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION > 6) || \ - (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION > 0)) -#define USE_CAPSULES -#define TYPE_POINTER_NAME \ - ((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION ".type_pointer_capsule" SWIG_TYPE_TABLE_NAME) -#endif - -/* Runtime API implementation */ - -/* Error manipulation */ - -SWIGINTERN void -SWIG_Python_SetErrorObj(PyObject *errtype, PyObject *obj) { - SWIG_PYTHON_THREAD_BEGIN_BLOCK; - PyErr_SetObject(errtype, obj); - Py_DECREF(obj); - SWIG_PYTHON_THREAD_END_BLOCK; -} - -SWIGINTERN void -SWIG_Python_SetErrorMsg(PyObject *errtype, const char *msg) { - SWIG_PYTHON_THREAD_BEGIN_BLOCK; - PyErr_SetString(errtype, (char *) msg); - SWIG_PYTHON_THREAD_END_BLOCK; -} - -#define SWIG_Python_Raise(obj, type, desc) SWIG_Python_SetErrorObj(SWIG_Python_ExceptionType(desc), obj) - -/* Set a constant value */ - -SWIGINTERN void -SWIG_Python_SetConstant(PyObject *d, const char *name, PyObject *obj) { - PyDict_SetItemString(d, (char*) name, obj); - Py_DECREF(obj); -} - -/* Append a value to the result obj */ - -SWIGINTERN PyObject* -SWIG_Python_AppendOutput(PyObject* result, PyObject* obj) { -#if !defined(SWIG_PYTHON_OUTPUT_TUPLE) - if (!result) { - result = obj; - } else if (result == Py_None) { - Py_DECREF(result); - result = obj; - } else { - if (!PyList_Check(result)) { - PyObject *o2 = result; - result = PyList_New(1); - PyList_SetItem(result, 0, o2); - } - PyList_Append(result,obj); - Py_DECREF(obj); - } - return result; -#else - PyObject* o2; - PyObject* o3; - if (!result) { - result = obj; - } else if (result == Py_None) { - Py_DECREF(result); - result = obj; - } else { - if (!PyTuple_Check(result)) { - o2 = result; - result = PyTuple_New(1); - PyTuple_SET_ITEM(result, 0, o2); - } - o3 = PyTuple_New(1); - PyTuple_SET_ITEM(o3, 0, obj); - o2 = result; - result = PySequence_Concat(o2, o3); - Py_DECREF(o2); - Py_DECREF(o3); - } - return result; -#endif -} - -/* Unpack the argument tuple */ - -SWIGINTERN int -SWIG_Python_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, PyObject **objs) -{ - if (!args) { - if (!min && !max) { - return 1; - } else { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got none", - name, (min == max ? "" : "at least "), (int)min); - return 0; - } - } - if (!PyTuple_Check(args)) { - PyErr_SetString(PyExc_SystemError, "UnpackTuple() argument list is not a tuple"); - return 0; - } else { - register Py_ssize_t l = PyTuple_GET_SIZE(args); - if (l < min) { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", - name, (min == max ? "" : "at least "), (int)min, (int)l); - return 0; - } else if (l > max) { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", - name, (min == max ? "" : "at most "), (int)max, (int)l); - return 0; - } else { - register int i; - for (i = 0; i < l; ++i) { - objs[i] = PyTuple_GET_ITEM(args, i); - } - for (; l < max; ++l) { - objs[l] = 0; - } - return i + 1; - } - } -} - -/* A functor is a function object with one single object argument */ -#if PY_VERSION_HEX >= 0x02020000 -#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunctionObjArgs(functor, obj, NULL); -#else -#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunction(functor, "O", obj); -#endif - -/* - Helper for static pointer initialization for both C and C++ code, for example - static PyObject *SWIG_STATIC_POINTER(MyVar) = NewSomething(...); -*/ -#ifdef __cplusplus -#define SWIG_STATIC_POINTER(var) var -#else -#define SWIG_STATIC_POINTER(var) var = 0; if (!var) var -#endif - -/* ----------------------------------------------------------------------------- - * Pointer declarations - * ----------------------------------------------------------------------------- */ - -/* Flags for new pointer objects */ -#define SWIG_POINTER_NOSHADOW (SWIG_POINTER_OWN << 1) -#define SWIG_POINTER_NEW (SWIG_POINTER_NOSHADOW | SWIG_POINTER_OWN) - -#define SWIG_POINTER_IMPLICIT_CONV (SWIG_POINTER_DISOWN << 1) - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* cc-mode */ -#endif -#endif - -/* How to access Py_None */ -#if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# ifndef SWIG_PYTHON_NO_BUILD_NONE -# ifndef SWIG_PYTHON_BUILD_NONE -# define SWIG_PYTHON_BUILD_NONE -# endif -# endif -#endif - -#ifdef SWIG_PYTHON_BUILD_NONE -# ifdef Py_None -# undef Py_None -# define Py_None SWIG_Py_None() -# endif -SWIGRUNTIMEINLINE PyObject * -_SWIG_Py_None(void) -{ - PyObject *none = Py_BuildValue((char*)""); - Py_DECREF(none); - return none; -} -SWIGRUNTIME PyObject * -SWIG_Py_None(void) -{ - static PyObject *SWIG_STATIC_POINTER(none) = _SWIG_Py_None(); - return none; -} -#endif - -/* The python void return value */ - -SWIGRUNTIMEINLINE PyObject * -SWIG_Py_Void(void) -{ - PyObject *none = Py_None; - Py_INCREF(none); - return none; -} - -/* SwigPyClientData */ - -typedef struct { - PyObject *klass; - PyObject *newraw; - PyObject *newargs; - PyObject *destroy; - int delargs; - int implicitconv; -} SwigPyClientData; - -SWIGRUNTIMEINLINE int -SWIG_Python_CheckImplicit(swig_type_info *ty) -{ - SwigPyClientData *data = (SwigPyClientData *)ty->clientdata; - return data ? data->implicitconv : 0; -} - -SWIGRUNTIMEINLINE PyObject * -SWIG_Python_ExceptionType(swig_type_info *desc) { - SwigPyClientData *data = desc ? (SwigPyClientData *) desc->clientdata : 0; - PyObject *klass = data ? data->klass : 0; - return (klass ? klass : PyExc_RuntimeError); -} - - -SWIGRUNTIME SwigPyClientData * -SwigPyClientData_New(PyObject* obj) -{ - if (!obj) { - return 0; - } else { - SwigPyClientData *data = (SwigPyClientData *)malloc(sizeof(SwigPyClientData)); - /* the klass element */ - data->klass = obj; - Py_INCREF(data->klass); - /* the newraw method and newargs arguments used to create a new raw instance */ - if (PyClass_Check(obj)) { - data->newraw = 0; - data->newargs = obj; - Py_INCREF(obj); - } else { -#if (PY_VERSION_HEX < 0x02020000) - data->newraw = 0; -#else - data->newraw = PyObject_GetAttrString(data->klass, (char *)"__new__"); -#endif - if (data->newraw) { - Py_INCREF(data->newraw); - data->newargs = PyTuple_New(1); - PyTuple_SetItem(data->newargs, 0, obj); - } else { - data->newargs = obj; - } - Py_INCREF(data->newargs); - } - /* the destroy method, aka as the C++ delete method */ - data->destroy = PyObject_GetAttrString(data->klass, (char *)"__swig_destroy__"); - if (PyErr_Occurred()) { - PyErr_Clear(); - data->destroy = 0; - } - if (data->destroy) { - int flags; - Py_INCREF(data->destroy); - flags = PyCFunction_GET_FLAGS(data->destroy); -#ifdef METH_O - data->delargs = !(flags & (METH_O)); -#else - data->delargs = 0; -#endif - } else { - data->delargs = 0; - } - data->implicitconv = 0; - return data; - } -} - -SWIGRUNTIME void -SwigPyClientData_Del(SwigPyClientData* data) -{ - Py_XDECREF(data->newraw); - Py_XDECREF(data->newargs); - Py_XDECREF(data->destroy); -} - -/* =============== SwigPyObject =====================*/ - -typedef struct { - PyObject_HEAD - void *ptr; - swig_type_info *ty; - int own; - PyObject *next; -} SwigPyObject; - -SWIGRUNTIME PyObject * -SwigPyObject_long(SwigPyObject *v) -{ - return PyLong_FromVoidPtr(v->ptr); -} - -SWIGRUNTIME PyObject * -SwigPyObject_format(const char* fmt, SwigPyObject *v) -{ - PyObject *res = NULL; - PyObject *args = PyTuple_New(1); - if (args) { - if (PyTuple_SetItem(args, 0, SwigPyObject_long(v)) == 0) { - PyObject *ofmt = SWIG_Python_str_FromChar(fmt); - if (ofmt) { -#if PY_VERSION_HEX >= 0x03000000 - res = PyUnicode_Format(ofmt,args); -#else - res = PyString_Format(ofmt,args); -#endif - Py_DECREF(ofmt); - } - Py_DECREF(args); - } - } - return res; -} - -SWIGRUNTIME PyObject * -SwigPyObject_oct(SwigPyObject *v) -{ - return SwigPyObject_format("%o",v); -} - -SWIGRUNTIME PyObject * -SwigPyObject_hex(SwigPyObject *v) -{ - return SwigPyObject_format("%x",v); -} - -SWIGRUNTIME PyObject * -#ifdef METH_NOARGS -SwigPyObject_repr(SwigPyObject *v) -#else -SwigPyObject_repr(SwigPyObject *v, PyObject *args) -#endif -{ - const char *name = SWIG_TypePrettyName(v->ty); - PyObject *repr = SWIG_Python_str_FromFormat("", name, v); - if (v->next) { -#ifdef METH_NOARGS - PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next); -#else - PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next, args); -#endif -#if PY_VERSION_HEX >= 0x03000000 - PyObject *joined = PyUnicode_Concat(repr, nrep); - Py_DecRef(repr); - Py_DecRef(nrep); - repr = joined; -#else - PyString_ConcatAndDel(&repr,nrep); -#endif - } - return repr; -} - -SWIGRUNTIME int -SwigPyObject_print(SwigPyObject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) -{ - char *str; -#ifdef METH_NOARGS - PyObject *repr = SwigPyObject_repr(v); -#else - PyObject *repr = SwigPyObject_repr(v, NULL); -#endif - if (repr) { - str = SWIG_Python_str_AsChar(repr); - fputs(str, fp); - SWIG_Python_str_DelForPy3(str); - Py_DECREF(repr); - return 0; - } else { - return 1; - } -} - -SWIGRUNTIME PyObject * -SwigPyObject_str(SwigPyObject *v) -{ - char result[SWIG_BUFFER_SIZE]; - return SWIG_PackVoidPtr(result, v->ptr, v->ty->name, sizeof(result)) ? - SWIG_Python_str_FromChar(result) : 0; -} - -SWIGRUNTIME int -SwigPyObject_compare(SwigPyObject *v, SwigPyObject *w) -{ - void *i = v->ptr; - void *j = w->ptr; - return (i < j) ? -1 : ((i > j) ? 1 : 0); -} - -/* Added for Python 3.x, would it also be useful for Python 2.x? */ -SWIGRUNTIME PyObject* -SwigPyObject_richcompare(SwigPyObject *v, SwigPyObject *w, int op) -{ - PyObject* res; - if( op != Py_EQ && op != Py_NE ) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - if( (SwigPyObject_compare(v, w)==0) == (op == Py_EQ) ) - res = Py_True; - else - res = Py_False; - Py_INCREF(res); - return res; -} - - -SWIGRUNTIME PyTypeObject* _PySwigObject_type(void); - -SWIGRUNTIME PyTypeObject* -SwigPyObject_type(void) { - static PyTypeObject *SWIG_STATIC_POINTER(type) = _PySwigObject_type(); - return type; -} - -SWIGRUNTIMEINLINE int -SwigPyObject_Check(PyObject *op) { - return (Py_TYPE(op) == SwigPyObject_type()) - || (strcmp(Py_TYPE(op)->tp_name,"SwigPyObject") == 0); -} - -SWIGRUNTIME PyObject * -SwigPyObject_New(void *ptr, swig_type_info *ty, int own); - -SWIGRUNTIME void -SwigPyObject_dealloc(PyObject *v) -{ - SwigPyObject *sobj = (SwigPyObject *) v; - PyObject *next = sobj->next; - if (sobj->own == SWIG_POINTER_OWN) { - swig_type_info *ty = sobj->ty; - SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; - PyObject *destroy = data ? data->destroy : 0; - if (destroy) { - /* destroy is always a VARARGS method */ - PyObject *res; - if (data->delargs) { - /* we need to create a temporary object to carry the destroy operation */ - PyObject *tmp = SwigPyObject_New(sobj->ptr, ty, 0); - res = SWIG_Python_CallFunctor(destroy, tmp); - Py_DECREF(tmp); - } else { - PyCFunction meth = PyCFunction_GET_FUNCTION(destroy); - PyObject *mself = PyCFunction_GET_SELF(destroy); - res = ((*meth)(mself, v)); - } - Py_XDECREF(res); - } -#if !defined(SWIG_PYTHON_SILENT_MEMLEAK) - else { - const char *name = SWIG_TypePrettyName(ty); - printf("swig/python detected a memory leak of type '%s', no destructor found.\n", (name ? name : "unknown")); - } -#endif - } - Py_XDECREF(next); - PyObject_DEL(v); -} - -SWIGRUNTIME PyObject* -SwigPyObject_append(PyObject* v, PyObject* next) -{ - SwigPyObject *sobj = (SwigPyObject *) v; -#ifndef METH_O - PyObject *tmp = 0; - if (!PyArg_ParseTuple(next,(char *)"O:append", &tmp)) return NULL; - next = tmp; -#endif - if (!SwigPyObject_Check(next)) { - return NULL; - } - sobj->next = next; - Py_INCREF(next); - return SWIG_Py_Void(); -} - -SWIGRUNTIME PyObject* -#ifdef METH_NOARGS -SwigPyObject_next(PyObject* v) -#else -SwigPyObject_next(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *) v; - if (sobj->next) { - Py_INCREF(sobj->next); - return sobj->next; - } else { - return SWIG_Py_Void(); - } -} - -SWIGINTERN PyObject* -#ifdef METH_NOARGS -SwigPyObject_disown(PyObject *v) -#else -SwigPyObject_disown(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *)v; - sobj->own = 0; - return SWIG_Py_Void(); -} - -SWIGINTERN PyObject* -#ifdef METH_NOARGS -SwigPyObject_acquire(PyObject *v) -#else -SwigPyObject_acquire(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *)v; - sobj->own = SWIG_POINTER_OWN; - return SWIG_Py_Void(); -} - -SWIGINTERN PyObject* -SwigPyObject_own(PyObject *v, PyObject *args) -{ - PyObject *val = 0; -#if (PY_VERSION_HEX < 0x02020000) - if (!PyArg_ParseTuple(args,(char *)"|O:own",&val)) -#else - if (!PyArg_UnpackTuple(args, (char *)"own", 0, 1, &val)) -#endif - { - return NULL; - } - else - { - SwigPyObject *sobj = (SwigPyObject *)v; - PyObject *obj = PyBool_FromLong(sobj->own); - if (val) { -#ifdef METH_NOARGS - if (PyObject_IsTrue(val)) { - SwigPyObject_acquire(v); - } else { - SwigPyObject_disown(v); - } -#else - if (PyObject_IsTrue(val)) { - SwigPyObject_acquire(v,args); - } else { - SwigPyObject_disown(v,args); - } -#endif - } - return obj; - } -} - -#ifdef METH_O -static PyMethodDef -swigobject_methods[] = { - {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_NOARGS, (char *)"releases ownership of the pointer"}, - {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_NOARGS, (char *)"aquires ownership of the pointer"}, - {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, - {(char *)"append", (PyCFunction)SwigPyObject_append, METH_O, (char *)"appends another 'this' object"}, - {(char *)"next", (PyCFunction)SwigPyObject_next, METH_NOARGS, (char *)"returns the next 'this' object"}, - {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_NOARGS, (char *)"returns object representation"}, - {0, 0, 0, 0} -}; -#else -static PyMethodDef -swigobject_methods[] = { - {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_VARARGS, (char *)"releases ownership of the pointer"}, - {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_VARARGS, (char *)"aquires ownership of the pointer"}, - {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, - {(char *)"append", (PyCFunction)SwigPyObject_append, METH_VARARGS, (char *)"appends another 'this' object"}, - {(char *)"next", (PyCFunction)SwigPyObject_next, METH_VARARGS, (char *)"returns the next 'this' object"}, - {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_VARARGS, (char *)"returns object representation"}, - {0, 0, 0, 0} -}; -#endif - -#if PY_VERSION_HEX < 0x02020000 -SWIGINTERN PyObject * -SwigPyObject_getattr(SwigPyObject *sobj,char *name) -{ - return Py_FindMethod(swigobject_methods, (PyObject *)sobj, name); -} -#endif - -SWIGRUNTIME PyTypeObject* -_PySwigObject_type(void) { - static char swigobject_doc[] = "Swig object carries a C/C++ instance pointer"; - - static PyNumberMethods SwigPyObject_as_number = { - (binaryfunc)0, /*nb_add*/ - (binaryfunc)0, /*nb_subtract*/ - (binaryfunc)0, /*nb_multiply*/ - /* nb_divide removed in Python 3 */ -#if PY_VERSION_HEX < 0x03000000 - (binaryfunc)0, /*nb_divide*/ -#endif - (binaryfunc)0, /*nb_remainder*/ - (binaryfunc)0, /*nb_divmod*/ - (ternaryfunc)0,/*nb_power*/ - (unaryfunc)0, /*nb_negative*/ - (unaryfunc)0, /*nb_positive*/ - (unaryfunc)0, /*nb_absolute*/ - (inquiry)0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ -#if PY_VERSION_HEX < 0x03000000 - 0, /*nb_coerce*/ -#endif - (unaryfunc)SwigPyObject_long, /*nb_int*/ -#if PY_VERSION_HEX < 0x03000000 - (unaryfunc)SwigPyObject_long, /*nb_long*/ -#else - 0, /*nb_reserved*/ -#endif - (unaryfunc)0, /*nb_float*/ -#if PY_VERSION_HEX < 0x03000000 - (unaryfunc)SwigPyObject_oct, /*nb_oct*/ - (unaryfunc)SwigPyObject_hex, /*nb_hex*/ -#endif -#if PY_VERSION_HEX >= 0x03000000 /* 3.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index, nb_inplace_divide removed */ -#elif PY_VERSION_HEX >= 0x02050000 /* 2.5.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index */ -#elif PY_VERSION_HEX >= 0x02020000 /* 2.2.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_true_divide */ -#elif PY_VERSION_HEX >= 0x02000000 /* 2.0.0 */ - 0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_or */ -#endif - }; - - static PyTypeObject swigpyobject_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - (char *)"SwigPyObject", /* tp_name */ - sizeof(SwigPyObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)SwigPyObject_dealloc, /* tp_dealloc */ - (printfunc)SwigPyObject_print, /* tp_print */ -#if PY_VERSION_HEX < 0x02020000 - (getattrfunc)SwigPyObject_getattr, /* tp_getattr */ -#else - (getattrfunc)0, /* tp_getattr */ -#endif - (setattrfunc)0, /* tp_setattr */ -#if PY_VERSION_HEX >= 0x03000000 - 0, /* tp_reserved in 3.0.1, tp_compare in 3.0.0 but not used */ -#else - (cmpfunc)SwigPyObject_compare, /* tp_compare */ -#endif - (reprfunc)SwigPyObject_repr, /* tp_repr */ - &SwigPyObject_as_number, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)SwigPyObject_str, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - swigobject_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)SwigPyObject_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0, /* tp_iter */ - 0, /* tp_iternext */ - swigobject_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - swigpyobject_type = tmp; - /* for Python 3 we already assigned ob_type in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - swigpyobject_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &swigpyobject_type; -} - -SWIGRUNTIME PyObject * -SwigPyObject_New(void *ptr, swig_type_info *ty, int own) -{ - SwigPyObject *sobj = PyObject_NEW(SwigPyObject, SwigPyObject_type()); - if (sobj) { - sobj->ptr = ptr; - sobj->ty = ty; - sobj->own = own; - sobj->next = 0; - } - return (PyObject *)sobj; -} - -/* ----------------------------------------------------------------------------- - * Implements a simple Swig Packed type, and use it instead of string - * ----------------------------------------------------------------------------- */ - -typedef struct { - PyObject_HEAD - void *pack; - swig_type_info *ty; - size_t size; -} SwigPyPacked; - -SWIGRUNTIME int -SwigPyPacked_print(SwigPyPacked *v, FILE *fp, int SWIGUNUSEDPARM(flags)) -{ - char result[SWIG_BUFFER_SIZE]; - fputs("pack, v->size, 0, sizeof(result))) { - fputs("at ", fp); - fputs(result, fp); - } - fputs(v->ty->name,fp); - fputs(">", fp); - return 0; -} - -SWIGRUNTIME PyObject * -SwigPyPacked_repr(SwigPyPacked *v) -{ - char result[SWIG_BUFFER_SIZE]; - if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))) { - return SWIG_Python_str_FromFormat("", result, v->ty->name); - } else { - return SWIG_Python_str_FromFormat("", v->ty->name); - } -} - -SWIGRUNTIME PyObject * -SwigPyPacked_str(SwigPyPacked *v) -{ - char result[SWIG_BUFFER_SIZE]; - if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))){ - return SWIG_Python_str_FromFormat("%s%s", result, v->ty->name); - } else { - return SWIG_Python_str_FromChar(v->ty->name); - } -} - -SWIGRUNTIME int -SwigPyPacked_compare(SwigPyPacked *v, SwigPyPacked *w) -{ - size_t i = v->size; - size_t j = w->size; - int s = (i < j) ? -1 : ((i > j) ? 1 : 0); - return s ? s : strncmp((char *)v->pack, (char *)w->pack, 2*v->size); -} - -SWIGRUNTIME PyTypeObject* _PySwigPacked_type(void); - -SWIGRUNTIME PyTypeObject* -SwigPyPacked_type(void) { - static PyTypeObject *SWIG_STATIC_POINTER(type) = _PySwigPacked_type(); - return type; -} - -SWIGRUNTIMEINLINE int -SwigPyPacked_Check(PyObject *op) { - return ((op)->ob_type == _PySwigPacked_type()) - || (strcmp((op)->ob_type->tp_name,"SwigPyPacked") == 0); -} - -SWIGRUNTIME void -SwigPyPacked_dealloc(PyObject *v) -{ - if (SwigPyPacked_Check(v)) { - SwigPyPacked *sobj = (SwigPyPacked *) v; - free(sobj->pack); - } - PyObject_DEL(v); -} - -SWIGRUNTIME PyTypeObject* -_PySwigPacked_type(void) { - static char swigpacked_doc[] = "Swig object carries a C/C++ instance pointer"; - static PyTypeObject swigpypacked_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX>=0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - (char *)"SwigPyPacked", /* tp_name */ - sizeof(SwigPyPacked), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)SwigPyPacked_dealloc, /* tp_dealloc */ - (printfunc)SwigPyPacked_print, /* tp_print */ - (getattrfunc)0, /* tp_getattr */ - (setattrfunc)0, /* tp_setattr */ -#if PY_VERSION_HEX>=0x03000000 - 0, /* tp_reserved in 3.0.1 */ -#else - (cmpfunc)SwigPyPacked_compare, /* tp_compare */ -#endif - (reprfunc)SwigPyPacked_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)SwigPyPacked_str, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - swigpacked_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - swigpypacked_type = tmp; - /* for Python 3 the ob_type already assigned in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - swigpypacked_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &swigpypacked_type; -} - -SWIGRUNTIME PyObject * -SwigPyPacked_New(void *ptr, size_t size, swig_type_info *ty) -{ - SwigPyPacked *sobj = PyObject_NEW(SwigPyPacked, SwigPyPacked_type()); - if (sobj) { - void *pack = malloc(size); - if (pack) { - memcpy(pack, ptr, size); - sobj->pack = pack; - sobj->ty = ty; - sobj->size = size; - } else { - PyObject_DEL((PyObject *) sobj); - sobj = 0; - } - } - return (PyObject *) sobj; -} - -SWIGRUNTIME swig_type_info * -SwigPyPacked_UnpackData(PyObject *obj, void *ptr, size_t size) -{ - if (SwigPyPacked_Check(obj)) { - SwigPyPacked *sobj = (SwigPyPacked *)obj; - if (sobj->size != size) return 0; - memcpy(ptr, sobj->pack, size); - return sobj->ty; - } else { - return 0; - } -} - -/* ----------------------------------------------------------------------------- - * pointers/data manipulation - * ----------------------------------------------------------------------------- */ - -SWIGRUNTIMEINLINE PyObject * -_SWIG_This(void) -{ - return SWIG_Python_str_FromChar("this"); -} - -static PyObject *swig_this = NULL; - -SWIGRUNTIME PyObject * -SWIG_This(void) -{ - if (swig_this == NULL) - swig_this = _SWIG_This(); - return swig_this; -} - -/* #define SWIG_PYTHON_SLOW_GETSET_THIS */ - -/* TODO: I don't know how to implement the fast getset in Python 3 right now */ -#if PY_VERSION_HEX>=0x03000000 -#define SWIG_PYTHON_SLOW_GETSET_THIS -#endif - -SWIGRUNTIME SwigPyObject * -SWIG_Python_GetSwigThis(PyObject *pyobj) -{ - if (SwigPyObject_Check(pyobj)) { - return (SwigPyObject *) pyobj; - } else { - PyObject *obj = 0; -#if (!defined(SWIG_PYTHON_SLOW_GETSET_THIS) && (PY_VERSION_HEX >= 0x02030000)) - if (PyInstance_Check(pyobj)) { - obj = _PyInstance_Lookup(pyobj, SWIG_This()); - } else { - PyObject **dictptr = _PyObject_GetDictPtr(pyobj); - if (dictptr != NULL) { - PyObject *dict = *dictptr; - obj = dict ? PyDict_GetItem(dict, SWIG_This()) : 0; - } else { -#ifdef PyWeakref_CheckProxy - if (PyWeakref_CheckProxy(pyobj)) { - PyObject *wobj = PyWeakref_GET_OBJECT(pyobj); - return wobj ? SWIG_Python_GetSwigThis(wobj) : 0; - } -#endif - obj = PyObject_GetAttr(pyobj,SWIG_This()); - if (obj) { - Py_DECREF(obj); - } else { - if (PyErr_Occurred()) PyErr_Clear(); - return 0; - } - } - } -#else - obj = PyObject_GetAttr(pyobj,SWIG_This()); - if (obj) { - Py_DECREF(obj); - } else { - if (PyErr_Occurred()) PyErr_Clear(); - return 0; - } -#endif - if (obj && !SwigPyObject_Check(obj)) { - /* a PyObject is called 'this', try to get the 'real this' - SwigPyObject from it */ - return SWIG_Python_GetSwigThis(obj); - } - return (SwigPyObject *)obj; - } -} - -/* Acquire a pointer value */ - -SWIGRUNTIME int -SWIG_Python_AcquirePtr(PyObject *obj, int own) { - if (own == SWIG_POINTER_OWN) { - SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); - if (sobj) { - int oldown = sobj->own; - sobj->own = own; - return oldown; - } - } - return 0; -} - -/* Convert a pointer value */ - -SWIGRUNTIME int -SWIG_Python_ConvertPtrAndOwn(PyObject *obj, void **ptr, swig_type_info *ty, int flags, int *own) { - if (!obj) return SWIG_ERROR; - if (obj == Py_None) { - if (ptr) *ptr = 0; - return SWIG_OK; - } else { - SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); - if (own) - *own = 0; - while (sobj) { - void *vptr = sobj->ptr; - if (ty) { - swig_type_info *to = sobj->ty; - if (to == ty) { - /* no type cast needed */ - if (ptr) *ptr = vptr; - break; - } else { - swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); - if (!tc) { - sobj = (SwigPyObject *)sobj->next; - } else { - if (ptr) { - int newmemory = 0; - *ptr = SWIG_TypeCast(tc,vptr,&newmemory); - if (newmemory == SWIG_CAST_NEW_MEMORY) { - assert(own); /* badly formed typemap which will lead to a memory leak - it must set and use own to delete *ptr */ - if (own) - *own = *own | SWIG_CAST_NEW_MEMORY; - } - } - break; - } - } - } else { - if (ptr) *ptr = vptr; - break; - } - } - if (sobj) { - if (own) - *own = *own | sobj->own; - if (flags & SWIG_POINTER_DISOWN) { - sobj->own = 0; - } - return SWIG_OK; - } else { - int res = SWIG_ERROR; - if (flags & SWIG_POINTER_IMPLICIT_CONV) { - SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; - if (data && !data->implicitconv) { - PyObject *klass = data->klass; - if (klass) { - PyObject *impconv; - data->implicitconv = 1; /* avoid recursion and call 'explicit' constructors*/ - impconv = SWIG_Python_CallFunctor(klass, obj); - data->implicitconv = 0; - if (PyErr_Occurred()) { - PyErr_Clear(); - impconv = 0; - } - if (impconv) { - SwigPyObject *iobj = SWIG_Python_GetSwigThis(impconv); - if (iobj) { - void *vptr; - res = SWIG_Python_ConvertPtrAndOwn((PyObject*)iobj, &vptr, ty, 0, 0); - if (SWIG_IsOK(res)) { - if (ptr) { - *ptr = vptr; - /* transfer the ownership to 'ptr' */ - iobj->own = 0; - res = SWIG_AddCast(res); - res = SWIG_AddNewMask(res); - } else { - res = SWIG_AddCast(res); - } - } - } - Py_DECREF(impconv); - } - } - } - } - return res; - } - } -} - -/* Convert a function ptr value */ - -SWIGRUNTIME int -SWIG_Python_ConvertFunctionPtr(PyObject *obj, void **ptr, swig_type_info *ty) { - if (!PyCFunction_Check(obj)) { - return SWIG_ConvertPtr(obj, ptr, ty, 0); - } else { - void *vptr = 0; - - /* here we get the method pointer for callbacks */ - const char *doc = (((PyCFunctionObject *)obj) -> m_ml -> ml_doc); - const char *desc = doc ? strstr(doc, "swig_ptr: ") : 0; - if (desc) - desc = ty ? SWIG_UnpackVoidPtr(desc + 10, &vptr, ty->name) : 0; - if (!desc) - return SWIG_ERROR; - if (ty) { - swig_cast_info *tc = SWIG_TypeCheck(desc,ty); - if (tc) { - int newmemory = 0; - *ptr = SWIG_TypeCast(tc,vptr,&newmemory); - assert(!newmemory); /* newmemory handling not yet implemented */ - } else { - return SWIG_ERROR; - } - } else { - *ptr = vptr; - } - return SWIG_OK; - } -} - -/* Convert a packed value value */ - -SWIGRUNTIME int -SWIG_Python_ConvertPacked(PyObject *obj, void *ptr, size_t sz, swig_type_info *ty) { - swig_type_info *to = SwigPyPacked_UnpackData(obj, ptr, sz); - if (!to) return SWIG_ERROR; - if (ty) { - if (to != ty) { - /* check type cast? */ - swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); - if (!tc) return SWIG_ERROR; - } - } - return SWIG_OK; -} - -/* ----------------------------------------------------------------------------- - * Create a new pointer object - * ----------------------------------------------------------------------------- */ - -/* - Create a new instance object, without calling __init__, and set the - 'this' attribute. -*/ - -SWIGRUNTIME PyObject* -SWIG_Python_NewShadowInstance(SwigPyClientData *data, PyObject *swig_this) -{ -#if (PY_VERSION_HEX >= 0x02020000) - PyObject *inst = 0; - PyObject *newraw = data->newraw; - if (newraw) { - inst = PyObject_Call(newraw, data->newargs, NULL); - if (inst) { -#if !defined(SWIG_PYTHON_SLOW_GETSET_THIS) - PyObject **dictptr = _PyObject_GetDictPtr(inst); - if (dictptr != NULL) { - PyObject *dict = *dictptr; - if (dict == NULL) { - dict = PyDict_New(); - *dictptr = dict; - PyDict_SetItem(dict, SWIG_This(), swig_this); - } - } -#else - PyObject *key = SWIG_This(); - PyObject_SetAttr(inst, key, swig_this); -#endif - } - } else { -#if PY_VERSION_HEX >= 0x03000000 - inst = PyBaseObject_Type.tp_new((PyTypeObject*) data->newargs, Py_None, Py_None); - PyObject_SetAttr(inst, SWIG_This(), swig_this); - Py_TYPE(inst)->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG; -#else - PyObject *dict = PyDict_New(); - PyDict_SetItem(dict, SWIG_This(), swig_this); - inst = PyInstance_NewRaw(data->newargs, dict); - Py_DECREF(dict); -#endif - } - return inst; -#else -#if (PY_VERSION_HEX >= 0x02010000) - PyObject *inst; - PyObject *dict = PyDict_New(); - PyDict_SetItem(dict, SWIG_This(), swig_this); - inst = PyInstance_NewRaw(data->newargs, dict); - Py_DECREF(dict); - return (PyObject *) inst; -#else - PyInstanceObject *inst = PyObject_NEW(PyInstanceObject, &PyInstance_Type); - if (inst == NULL) { - return NULL; - } - inst->in_class = (PyClassObject *)data->newargs; - Py_INCREF(inst->in_class); - inst->in_dict = PyDict_New(); - if (inst->in_dict == NULL) { - Py_DECREF(inst); - return NULL; - } -#ifdef Py_TPFLAGS_HAVE_WEAKREFS - inst->in_weakreflist = NULL; -#endif -#ifdef Py_TPFLAGS_GC - PyObject_GC_Init(inst); -#endif - PyDict_SetItem(inst->in_dict, SWIG_This(), swig_this); - return (PyObject *) inst; -#endif -#endif -} - -SWIGRUNTIME void -SWIG_Python_SetSwigThis(PyObject *inst, PyObject *swig_this) -{ - PyObject *dict; -#if (PY_VERSION_HEX >= 0x02020000) && !defined(SWIG_PYTHON_SLOW_GETSET_THIS) - PyObject **dictptr = _PyObject_GetDictPtr(inst); - if (dictptr != NULL) { - dict = *dictptr; - if (dict == NULL) { - dict = PyDict_New(); - *dictptr = dict; - } - PyDict_SetItem(dict, SWIG_This(), swig_this); - return; - } -#endif - dict = PyObject_GetAttrString(inst, (char*)"__dict__"); - PyDict_SetItem(dict, SWIG_This(), swig_this); - Py_DECREF(dict); -} - - -SWIGINTERN PyObject * -SWIG_Python_InitShadowInstance(PyObject *args) { - PyObject *obj[2]; - if (!SWIG_Python_UnpackTuple(args,(char*)"swiginit", 2, 2, obj)) { - return NULL; - } else { - SwigPyObject *sthis = SWIG_Python_GetSwigThis(obj[0]); - if (sthis) { - SwigPyObject_append((PyObject*) sthis, obj[1]); - } else { - SWIG_Python_SetSwigThis(obj[0], obj[1]); - } - return SWIG_Py_Void(); - } -} - -/* Create a new pointer object */ - -SWIGRUNTIME PyObject * -SWIG_Python_NewPointerObj(void *ptr, swig_type_info *type, int flags) { - if (!ptr) { - return SWIG_Py_Void(); - } else { - int own = (flags & SWIG_POINTER_OWN) ? SWIG_POINTER_OWN : 0; - PyObject *robj = SwigPyObject_New(ptr, type, own); - SwigPyClientData *clientdata = type ? (SwigPyClientData *)(type->clientdata) : 0; - if (clientdata && !(flags & SWIG_POINTER_NOSHADOW)) { - PyObject *inst = SWIG_Python_NewShadowInstance(clientdata, robj); - if (inst) { - Py_DECREF(robj); - robj = inst; - } - } - return robj; - } -} - -/* Create a new packed object */ - -SWIGRUNTIMEINLINE PyObject * -SWIG_Python_NewPackedObj(void *ptr, size_t sz, swig_type_info *type) { - return ptr ? SwigPyPacked_New((void *) ptr, sz, type) : SWIG_Py_Void(); -} - -/* -----------------------------------------------------------------------------* - * Get type list - * -----------------------------------------------------------------------------*/ - -#ifdef SWIG_LINK_RUNTIME -void *SWIG_ReturnGlobalTypeList(void *); -#endif - -SWIGRUNTIME swig_module_info * -SWIG_Python_GetModule(void) { - static void *type_pointer = (void *)0; - /* first check if module already created */ - if (!type_pointer) { -#ifdef SWIG_LINK_RUNTIME - type_pointer = SWIG_ReturnGlobalTypeList((void *)0); -#else -#ifdef USE_CAPSULES - type_pointer = PyCapsule_Import(TYPE_POINTER_NAME, 0); -#else - type_pointer = PyCObject_Import((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, - (char*)"type_pointer" SWIG_TYPE_TABLE_NAME); -#endif - if (PyErr_Occurred()) { - PyErr_Clear(); - type_pointer = (void *)0; - } -#endif - } - return (swig_module_info *) type_pointer; -} - -#if PY_MAJOR_VERSION < 2 -/* PyModule_AddObject function was introduced in Python 2.0. The following function - is copied out of Python/modsupport.c in python version 2.3.4 */ -SWIGINTERN int -PyModule_AddObject(PyObject *m, char *name, PyObject *o) -{ - PyObject *dict; - if (!PyModule_Check(m)) { - PyErr_SetString(PyExc_TypeError, - "PyModule_AddObject() needs module as first arg"); - return SWIG_ERROR; - } - if (!o) { - PyErr_SetString(PyExc_TypeError, - "PyModule_AddObject() needs non-NULL value"); - return SWIG_ERROR; - } - - dict = PyModule_GetDict(m); - if (dict == NULL) { - /* Internal error -- modules must have a dict! */ - PyErr_Format(PyExc_SystemError, "module '%s' has no __dict__", - PyModule_GetName(m)); - return SWIG_ERROR; - } - if (PyDict_SetItemString(dict, name, o)) - return SWIG_ERROR; - Py_DECREF(o); - return SWIG_OK; -} -#endif - -SWIGRUNTIME void -SWIG_Python_DestroyModule(void *vptr) -{ - size_t i; -#ifdef USE_CAPSULES - swig_module_info *swig_module = - (swig_module_info *) PyCapsule_GetPointer((PyObject *)vptr, TYPE_POINTER_NAME); -#else - swig_module_info *swig_module = (swig_module_info *) vptr; -#endif - swig_type_info **types = swig_module->types; - for (i =0; i < swig_module->size; ++i) { - swig_type_info *ty = types[i]; - if (ty->owndata) { - SwigPyClientData *data = (SwigPyClientData *) ty->clientdata; - if (data) SwigPyClientData_Del(data); - } - } - Py_DECREF(SWIG_This()); - swig_this = NULL; -} - -SWIGRUNTIME void -SWIG_Python_SetModule(swig_module_info *swig_module) { - static PyMethodDef swig_empty_runtime_method_table[] = { {NULL, NULL, 0, NULL} };/* Sentinel */ - -#if PY_VERSION_HEX >= 0x03000000 - /* Add a dummy module object into sys.modules */ - PyObject *module = PyImport_AddModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION); -#else - PyObject *module = Py_InitModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, - swig_empty_runtime_method_table); -#endif -#ifdef USE_CAPSULES - PyObject *pointer = PyCapsule_New((void *)swig_module, TYPE_POINTER_NAME, - (PyCapsule_Destructor)SWIG_Python_DestroyModule); -#else - PyObject *pointer = PyCObject_FromVoidPtr((void *) swig_module, SWIG_Python_DestroyModule); -#endif - if (pointer && module) { -#ifdef USE_CAPSULES - PyModule_AddObject(module, (char*)"type_pointer_capsule" SWIG_TYPE_TABLE_NAME, pointer); -#else - PyModule_AddObject(module, (char*)"type_pointer" SWIG_TYPE_TABLE_NAME, pointer); -#endif - } else { - Py_XDECREF(pointer); - } -} - -/* The python cached type query */ -SWIGRUNTIME PyObject * -SWIG_Python_TypeCache(void) { - static PyObject *SWIG_STATIC_POINTER(cache) = PyDict_New(); - return cache; -} - -SWIGRUNTIME swig_type_info * -SWIG_Python_TypeQuery(const char *type) -{ - PyObject *cache = SWIG_Python_TypeCache(); - PyObject *key = SWIG_Python_str_FromChar(type); - PyObject *obj = PyDict_GetItem(cache, key); - swig_type_info *descriptor; - if (obj) { -#ifdef USE_CAPSULES - descriptor = (swig_type_info *) PyCapsule_GetPointer(obj, type); -#else - descriptor = (swig_type_info *) PyCObject_AsVoidPtr(obj); -#endif - } else { - swig_module_info *swig_module = SWIG_Python_GetModule(); - descriptor = SWIG_TypeQueryModule(swig_module, swig_module, type); - if (descriptor) { -#ifdef USE_CAPSULES - obj = PyCapsule_New(descriptor, type, NULL); -#else - obj = PyCObject_FromVoidPtr(descriptor, NULL); -#endif - PyDict_SetItem(cache, key, obj); - Py_DECREF(obj); - } - } - Py_DECREF(key); - return descriptor; -} - -/* - For backward compatibility only -*/ -#define SWIG_POINTER_EXCEPTION 0 -#define SWIG_arg_fail(arg) SWIG_Python_ArgFail(arg) -#define SWIG_MustGetPtr(p, type, argnum, flags) SWIG_Python_MustGetPtr(p, type, argnum, flags) - -SWIGRUNTIME int -SWIG_Python_AddErrMesg(const char* mesg, int infront) -{ - if (PyErr_Occurred()) { - PyObject *type = 0; - PyObject *value = 0; - PyObject *traceback = 0; - PyErr_Fetch(&type, &value, &traceback); - if (value) { - char *tmp; - PyObject *old_str = PyObject_Str(value); - Py_XINCREF(type); - PyErr_Clear(); - if (infront) { - PyErr_Format(type, "%s %s", mesg, tmp = SWIG_Python_str_AsChar(old_str)); - } else { - PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); - } - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(old_str); - } - return 1; - } else { - return 0; - } -} - -SWIGRUNTIME int -SWIG_Python_ArgFail(int argnum) -{ - if (PyErr_Occurred()) { - /* add information about failing argument */ - char mesg[256]; - PyOS_snprintf(mesg, sizeof(mesg), "argument number %d:", argnum); - return SWIG_Python_AddErrMesg(mesg, 1); - } else { - return 0; - } -} - -SWIGRUNTIMEINLINE const char * -SwigPyObject_GetDesc(PyObject *self) -{ - SwigPyObject *v = (SwigPyObject *)self; - swig_type_info *ty = v ? v->ty : 0; - return ty ? ty->str : (char*)""; -} - -SWIGRUNTIME void -SWIG_Python_TypeError(const char *type, PyObject *obj) -{ - if (type) { -#if defined(SWIG_COBJECT_TYPES) - if (obj && SwigPyObject_Check(obj)) { - const char *otype = (const char *) SwigPyObject_GetDesc(obj); - if (otype) { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, 'SwigPyObject(%s)' is received", - type, otype); - return; - } - } else -#endif - { - const char *otype = (obj ? obj->ob_type->tp_name : 0); - if (otype) { - PyObject *str = PyObject_Str(obj); - const char *cstr = str ? SWIG_Python_str_AsChar(str) : 0; - if (cstr) { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s(%s)' is received", - type, otype, cstr); - SWIG_Python_str_DelForPy3(cstr); - } else { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s' is received", - type, otype); - } - Py_XDECREF(str); - return; - } - } - PyErr_Format(PyExc_TypeError, "a '%s' is expected", type); - } else { - PyErr_Format(PyExc_TypeError, "unexpected type is received"); - } -} - - -/* Convert a pointer value, signal an exception on a type mismatch */ -SWIGRUNTIME void * -SWIG_Python_MustGetPtr(PyObject *obj, swig_type_info *ty, int argnum, int flags) { - void *result; - if (SWIG_Python_ConvertPtr(obj, &result, ty, flags) == -1) { - PyErr_Clear(); -#if SWIG_POINTER_EXCEPTION - if (flags) { - SWIG_Python_TypeError(SWIG_TypePrettyName(ty), obj); - SWIG_Python_ArgFail(argnum); - } -#endif - } - return result; -} - - -#ifdef __cplusplus -#if 0 -{ /* cc-mode */ -#endif -} -#endif - - - -#define SWIG_exception_fail(code, msg) do { SWIG_Error(code, msg); SWIG_fail; } while(0) - -#define SWIG_contract_assert(expr, msg) if (!(expr)) { SWIG_Error(SWIG_RuntimeError, msg); SWIG_fail; } else - - - -/* -------- TYPES TABLE (BEGIN) -------- */ - -#define SWIGTYPE_p_char swig_types[0] -static swig_type_info *swig_types[2]; -static swig_module_info swig_module = {swig_types, 1, 0, 0, 0, 0}; -#define SWIG_TypeQuery(name) SWIG_TypeQueryModule(&swig_module, &swig_module, name) -#define SWIG_MangledTypeQuery(name) SWIG_MangledTypeQueryModule(&swig_module, &swig_module, name) - -/* -------- TYPES TABLE (END) -------- */ - -#if (PY_VERSION_HEX <= 0x02000000) -# if !defined(SWIG_PYTHON_CLASSIC) -# error "This python version requires swig to be run with the '-classic' option" -# endif -#endif - -/*----------------------------------------------- - @(target):= _csc.so - ------------------------------------------------*/ -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_init PyInit__csc - -#else -# define SWIG_init init_csc - -#endif -#define SWIG_name "_csc" - -#define SWIGVERSION 0x020001 -#define SWIG_VERSION SWIGVERSION - - -#define SWIG_as_voidptr(a) const_cast< void * >(static_cast< const void * >(a)) -#define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),reinterpret_cast< void** >(a)) - - -#include - - -namespace swig { - class SwigPtr_PyObject { - protected: - PyObject *_obj; - - public: - SwigPtr_PyObject() :_obj(0) - { - } - - SwigPtr_PyObject(const SwigPtr_PyObject& item) : _obj(item._obj) - { - Py_XINCREF(_obj); - } - - SwigPtr_PyObject(PyObject *obj, bool initial_ref = true) :_obj(obj) - { - if (initial_ref) { - Py_XINCREF(_obj); - } - } - - SwigPtr_PyObject & operator=(const SwigPtr_PyObject& item) - { - Py_XINCREF(item._obj); - Py_XDECREF(_obj); - _obj = item._obj; - return *this; - } - - ~SwigPtr_PyObject() - { - Py_XDECREF(_obj); - } - - operator PyObject *() const - { - return _obj; - } - - PyObject *operator->() const - { - return _obj; - } - }; -} - - -namespace swig { - struct SwigVar_PyObject : SwigPtr_PyObject { - SwigVar_PyObject(PyObject* obj = 0) : SwigPtr_PyObject(obj, false) { } - - SwigVar_PyObject & operator = (PyObject* obj) - { - Py_XDECREF(_obj); - _obj = obj; - return *this; - } - }; -} - - -#include "py3k.h" -#define SWIG_FILE_WITH_INIT -#include "Python.h" -#include "numpy/arrayobject.h" -#include "complex_ops.h" -/*#include "sparsetools.h"*/ - - -#ifndef SWIG_FILE_WITH_INIT -# define NO_IMPORT_ARRAY -#endif -#include "stdio.h" -#include -#include "complex_ops.h" - - -/* The following code originally appeared in - * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was - * translated from C++ to C by John Hunter. Bill Spotz has modified - * it slightly to fix some minor bugs, upgrade to numpy (all - * versions), add some comments and some functionality. - */ - -/* Macros to extract array attributes. - */ -#define is_array(a) ((a) && PyArray_Check((PyArrayObject *)a)) -#define array_type(a) (int)(PyArray_TYPE(a)) -#define array_numdims(a) (((PyArrayObject *)a)->nd) -#define array_dimensions(a) (((PyArrayObject *)a)->dimensions) -#define array_size(a,i) (((PyArrayObject *)a)->dimensions[i]) -#define array_data(a) (((PyArrayObject *)a)->data) -#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS(a)) -#define array_is_native(a) (PyArray_ISNOTSWAPPED(a)) - -/* Support older NumPy data type names -*/ -#if NDARRAY_VERSION < 0x01000000 -#define NPY_BOOL PyArray_BOOL -#define NPY_BYTE PyArray_BYTE -#define NPY_UBYTE PyArray_UBYTE -#define NPY_SHORT PyArray_SHORT -#define NPY_USHORT PyArray_USHORT -#define NPY_INT PyArray_INT -#define NPY_UINT PyArray_UINT -#define NPY_LONG PyArray_LONG -#define NPY_ULONG PyArray_ULONG -#define NPY_LONGLONG PyArray_LONGLONG -#define NPY_ULONGLONG PyArray_ULONGLONG -#define NPY_FLOAT PyArray_FLOAT -#define NPY_DOUBLE PyArray_DOUBLE -#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -#define NPY_CFLOAT PyArray_CFLOAT -#define NPY_CDOUBLE PyArray_CDOUBLE -#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -#define NPY_OBJECT PyArray_OBJECT -#define NPY_STRING PyArray_STRING -#define NPY_UNICODE PyArray_UNICODE -#define NPY_VOID PyArray_VOID -#define NPY_NTYPES PyArray_NTYPES -#define NPY_NOTYPE PyArray_NOTYPE -#define NPY_CHAR PyArray_CHAR -#define NPY_USERDEF PyArray_USERDEF -#define npy_intp intp -#endif - -/* Given a PyObject, return a string describing its type. - */ -const char* pytype_string(PyObject* py_obj) { - if (py_obj == NULL ) return "C NULL value"; - if (py_obj == Py_None ) return "Python None" ; - if (PyCallable_Check(py_obj)) return "callable" ; - if (PyString_Check( py_obj)) return "string" ; - if (PyInt_Check( py_obj)) return "int" ; - if (PyFloat_Check( py_obj)) return "float" ; - if (PyDict_Check( py_obj)) return "dict" ; - if (PyList_Check( py_obj)) return "list" ; - if (PyTuple_Check( py_obj)) return "tuple" ; - if (PyFile_Check( py_obj)) return "file" ; - if (PyModule_Check( py_obj)) return "module" ; - if (PyInstance_Check(py_obj)) return "instance" ; - - return "unkown type"; -} - -/* Given a NumPy typecode, return a string describing the type. - */ -const char* typecode_string(int typecode) { - static const char* type_names[25] = {"bool", "byte", "unsigned byte", - "short", "unsigned short", "int", - "unsigned int", "long", "unsigned long", - "long long", "unsigned long long", - "float", "double", "long double", - "complex float", "complex double", - "complex long double", "object", - "string", "unicode", "void", "ntypes", - "notype", "char", "unknown"}; - return typecode < 24 ? type_names[typecode] : type_names[24]; -} - -/* Make sure input has correct numpy type. Allow character and byte - * to match. Also allow int and long to match. This is deprecated. - * You should use PyArray_EquivTypenums() instead. - */ -int type_match(int actual_type, int desired_type) { - return PyArray_EquivTypenums(actual_type, desired_type); -} - -/* Given a PyObject pointer, cast it to a PyArrayObject pointer if - * legal. If not, set the python error string appropriately and - * return NULL. - */ -PyArrayObject* obj_to_array_no_conversion(PyObject* input, int typecode) { - PyArrayObject* ary = NULL; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input), typecode))) { - ary = (PyArrayObject*) input; - } - else if is_array(input) { - const char* desired_type = typecode_string(typecode); - const char* actual_type = typecode_string(array_type(input)); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", - desired_type, actual_type); - ary = NULL; - } - else { - const char * desired_type = typecode_string(typecode); - const char * actual_type = pytype_string(input); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", - desired_type, actual_type); - ary = NULL; - } - return ary; -} - -/* Convert the given PyObject to a NumPy array with the given - * typecode. On success, return a valid PyArrayObject* with the - * correct type. On failure, the python error string will be set and - * the routine returns NULL. - */ -PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, - int* is_new_object) { - PyArrayObject* ary = NULL; - PyObject* py_obj; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input),typecode))) { - ary = (PyArrayObject*) input; - *is_new_object = 0; - } - else { - py_obj = PyArray_FromObject(input, typecode, 0, 0); - /* If NULL, PyArray_FromObject will have set python error value.*/ - ary = (PyArrayObject*) py_obj; - *is_new_object = 1; - } - return ary; -} - -/* Given a PyArrayObject, check to see if it is contiguous. If so, - * return the input pointer and flag it as not a new object. If it is - * not contiguous, create a new PyArrayObject using the original data, - * flag it as a new object and return the pointer. - */ -PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) { - PyArrayObject* result; - if (array_is_contiguous(ary)) { - result = ary; - *is_new_object = 0; - } - else { - result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), - min_dims, - max_dims); - *is_new_object = 1; - } - return result; -} - -/* Convert a given PyObject to a contiguous PyArrayObject of the - * specified type. If the input object is not a contiguous - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ -PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, &is_new1); - if (ary1) { - ary2 = make_contiguous(ary1, &is_new2, 0, 0); - if ( is_new1 && is_new2) { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; -} - -/* Test whether a python object is contiguous. If array is - * contiguous, return 1. Otherwise, set the python error string and - * return 0. - */ -int require_contiguous(PyArrayObject* ary) { - int contiguous = 1; - if (!array_is_contiguous(ary)) { - PyErr_SetString(PyExc_TypeError, - "Array must be contiguous. A non-contiguous array was given"); - contiguous = 0; - } - return contiguous; -} - -/* Require that a numpy array is not byte-swapped. If the array is - * not byte-swapped, return 1. Otherwise, set the python error string - * and return 0. - */ -int require_native(PyArrayObject* ary) { - int native = 1; - if (!array_is_native(ary)) { - PyErr_SetString(PyExc_TypeError, - "Array must have native byteorder. A byte-swapped array was given"); - native = 0; - } - return native; -} - -/* Require the given PyArrayObject to have a specified number of - * dimensions. If the array has the specified number of dimensions, - * return 1. Otherwise, set the python error string and return 0. - */ -int require_dimensions(PyArrayObject* ary, int exact_dimensions) { - int success = 1; - if (array_numdims(ary) != exact_dimensions) { - PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", - exact_dimensions, array_numdims(ary)); - success = 0; - } - return success; -} - -/* Require the given PyArrayObject to have one of a list of specified - * number of dimensions. If the array has one of the specified number - * of dimensions, return 1. Otherwise, set the python error string - * and return 0. - */ -int require_dimensions_n(PyArrayObject* ary, int* exact_dimensions, int n) { - int success = 0; - int i; - char dims_str[255] = ""; - char s[255]; - for (i = 0; i < n && !success; i++) { - if (array_numdims(ary) == exact_dimensions[i]) { - success = 1; - } - } - if (!success) { - for (i = 0; i < n-1; i++) { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); - } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); - PyErr_Format(PyExc_TypeError, - "Array must be have %s dimensions. Given array has %d dimensions", - dims_str, array_numdims(ary)); - } - return success; -} - -/* Require the given PyArrayObject to have a specified shape. If the - * array has the specified shape, return 1. Otherwise, set the python - * error string and return 0. - */ -int require_size(PyArrayObject* ary, npy_intp* size, int n) { - int i; - int success = 1; - int len; - char desired_dims[255] = "["; - char s[255]; - char actual_dims[255] = "["; - for(i=0; i < n;i++) { - if (size[i] != -1 && size[i] != array_size(ary,i)) { - success = 0; - } - } - if (!success) { - for (i = 0; i < n; i++) { - if (size[i] == -1) { - sprintf(s, "*,"); - } - else - { - sprintf(s,"%" NPY_INTP_FMT ",", size[i]); - } - strcat(desired_dims,s); - } - len = strlen(desired_dims); - desired_dims[len-1] = ']'; - for (i = 0; i < n; i++) { - sprintf(s,"%" NPY_INTP_FMT ",", array_size(ary,i)); - strcat(actual_dims,s); - } - len = strlen(actual_dims); - actual_dims[len-1] = ']'; - PyErr_Format(PyExc_TypeError, - "Array must be have shape of %s. Given array has shape of %s", - desired_dims, actual_dims); - } - return success; -} -/* End John Hunter translation (with modifications by Bill Spotz) */ - - - - - -/*! - Appends @a what to @a where. On input, @a where need not to be a tuple, but on - return it always is. - - @par Revision history: - - 17.02.2005, c -*/ -PyObject *helper_appendToTuple( PyObject *where, PyObject *what ) { - PyObject *o2, *o3; - - if ((!where) || (where == Py_None)) { - where = what; - } else { - if (!PyTuple_Check( where )) { - o2 = where; - where = PyTuple_New( 1 ); - PyTuple_SetItem( where, 0, o2 ); - } - o3 = PyTuple_New( 1 ); - PyTuple_SetItem( o3, 0, what ); - o2 = where; - where = PySequence_Concat( o2, o3 ); - Py_DECREF( o2 ); - Py_DECREF( o3 ); - } - return where; -} - - - - - - -#include "csc.h" - - -#include -#if !defined(SWIG_NO_LLONG_MAX) -# if !defined(LLONG_MAX) && defined(__GNUC__) && defined (__LONG_LONG_MAX__) -# define LLONG_MAX __LONG_LONG_MAX__ -# define LLONG_MIN (-LLONG_MAX - 1LL) -# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) -# endif -#endif - - -SWIGINTERN int -SWIG_AsVal_double (PyObject *obj, double *val) -{ - int res = SWIG_TypeError; - if (PyFloat_Check(obj)) { - if (val) *val = PyFloat_AsDouble(obj); - return SWIG_OK; - } else if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - double v = PyLong_AsDouble(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - double d = PyFloat_AsDouble(obj); - if (!PyErr_Occurred()) { - if (val) *val = d; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_AddCast(SWIG_OK)); - } else { - PyErr_Clear(); - } - } - } -#endif - return res; -} - - -#include - - -#include - - -SWIGINTERNINLINE int -SWIG_CanCastAsInteger(double *d, double min, double max) { - double x = *d; - if ((min <= x && x <= max)) { - double fx = floor(x); - double cx = ceil(x); - double rd = ((x - fx) < 0.5) ? fx : cx; /* simple rint */ - if ((errno == EDOM) || (errno == ERANGE)) { - errno = 0; - } else { - double summ, reps, diff; - if (rd < x) { - diff = x - rd; - } else if (rd > x) { - diff = rd - x; - } else { - return 1; - } - summ = rd + x; - reps = diff/summ; - if (reps < 8*DBL_EPSILON) { - *d = rd; - return 1; - } - } - } - return 0; -} - - -SWIGINTERN int -SWIG_AsVal_long (PyObject *obj, long* val) -{ - if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - long v = PyInt_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal_double (obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { - if (val) *val = (long)(d); - return res; - } - } - } -#endif - return SWIG_TypeError; -} - - -SWIGINTERN int -SWIG_AsVal_int (PyObject * obj, int *val) -{ - long v; - int res = SWIG_AsVal_long (obj, &v); - if (SWIG_IsOK(res)) { - if ((v < INT_MIN || v > INT_MAX)) { - return SWIG_OverflowError; - } else { - if (val) *val = static_cast< int >(v); - } - } - return res; -} - -#ifdef __cplusplus -extern "C" { -#endif -SWIGINTERN PyObject *_wrap_csc_matmat_pass1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matmat_pass1",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass1" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass1" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - csc_matmat_pass1< int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - signed char *arg5 ; - signed char *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_BYTE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (signed char*) array_data(temp6); - } - csc_diagonal< int,signed char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(signed char const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned char *arg5 ; - unsigned char *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_UBYTE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (unsigned char*) array_data(temp6); - } - csc_diagonal< int,unsigned char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned char const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - short *arg5 ; - short *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_SHORT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (short*) array_data(temp6); - } - csc_diagonal< int,short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(short const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned short *arg5 ; - unsigned short *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_USHORT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (unsigned short*) array_data(temp6); - } - csc_diagonal< int,unsigned short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned short const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - csc_diagonal< int,int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned int *arg5 ; - unsigned int *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_UINT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (unsigned int*) array_data(temp6); - } - csc_diagonal< int,unsigned int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned int const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long long *arg5 ; - long long *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_LONGLONG); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (long long*) array_data(temp6); - } - csc_diagonal< int,long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long long const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned long long *arg5 ; - unsigned long long *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_ULONGLONG); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (unsigned long long*) array_data(temp6); - } - csc_diagonal< int,unsigned long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned long long const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - float *arg5 ; - float *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_FLOAT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (float*) array_data(temp6); - } - csc_diagonal< int,float >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - double *arg5 ; - double *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_DOUBLE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (double*) array_data(temp6); - } - csc_diagonal< int,double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long double *arg5 ; - long double *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_LONGDOUBLE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (long double*) array_data(temp6); - } - csc_diagonal< int,long double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long double const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cfloat_wrapper *arg5 ; - npy_cfloat_wrapper *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_CFLOAT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (npy_cfloat_wrapper*) array_data(temp6); - } - csc_diagonal< int,npy_cfloat_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cdouble_wrapper *arg5 ; - npy_cdouble_wrapper *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_CDOUBLE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (npy_cdouble_wrapper*) array_data(temp6); - } - csc_diagonal< int,npy_cdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_clongdouble_wrapper *arg5 ; - npy_clongdouble_wrapper *arg6 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_diagonal" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_diagonal" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_CLONGDOUBLE); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (npy_clongdouble_wrapper*) array_data(temp6); - } - csc_diagonal< int,npy_clongdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,arg6); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_diagonal(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[7]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 6); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_1(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_2(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_3(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_4(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_5(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_6(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_7(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_8(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_9(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_10(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_11(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_12(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_13(self, args); - } - } - } - } - } - } - } - if (argc == 6) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_diagonal__SWIG_14(self, args); - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_diagonal'.\n" - " Possible C/C++ prototypes are:\n" - " csc_diagonal< int,signed char >(int const,int const,int const [],int const [],signed char const [],signed char [])\n" - " csc_diagonal< int,unsigned char >(int const,int const,int const [],int const [],unsigned char const [],unsigned char [])\n" - " csc_diagonal< int,short >(int const,int const,int const [],int const [],short const [],short [])\n" - " csc_diagonal< int,unsigned short >(int const,int const,int const [],int const [],unsigned short const [],unsigned short [])\n" - " csc_diagonal< int,int >(int const,int const,int const [],int const [],int const [],int [])\n" - " csc_diagonal< int,unsigned int >(int const,int const,int const [],int const [],unsigned int const [],unsigned int [])\n" - " csc_diagonal< int,long long >(int const,int const,int const [],int const [],long long const [],long long [])\n" - " csc_diagonal< int,unsigned long long >(int const,int const,int const [],int const [],unsigned long long const [],unsigned long long [])\n" - " csc_diagonal< int,float >(int const,int const,int const [],int const [],float const [],float [])\n" - " csc_diagonal< int,double >(int const,int const,int const [],int const [],double const [],double [])\n" - " csc_diagonal< int,long double >(int const,int const,int const [],int const [],long double const [],long double [])\n" - " csc_diagonal< int,npy_cfloat_wrapper >(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper [])\n" - " csc_diagonal< int,npy_cdouble_wrapper >(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper [])\n" - " csc_diagonal< int,npy_clongdouble_wrapper >(int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - signed char *arg5 ; - int *arg6 ; - int *arg7 ; - signed char *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_BYTE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (signed char*) array_data(temp8); - } - csc_tocsr< int,signed char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(signed char const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned char *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned char *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_UBYTE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned char*) array_data(temp8); - } - csc_tocsr< int,unsigned char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned char const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - short *arg5 ; - int *arg6 ; - int *arg7 ; - short *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_SHORT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (short*) array_data(temp8); - } - csc_tocsr< int,short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(short const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned short *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned short *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_USHORT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned short*) array_data(temp8); - } - csc_tocsr< int,unsigned short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned short const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - csc_tocsr< int,int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned int *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned int *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_UINT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned int*) array_data(temp8); - } - csc_tocsr< int,unsigned int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned int const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long long *arg5 ; - int *arg6 ; - int *arg7 ; - long long *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_LONGLONG); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (long long*) array_data(temp8); - } - csc_tocsr< int,long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long long const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned long long *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned long long *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_ULONGLONG); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned long long*) array_data(temp8); - } - csc_tocsr< int,unsigned long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned long long const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - float *arg5 ; - int *arg6 ; - int *arg7 ; - float *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_FLOAT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (float*) array_data(temp8); - } - csc_tocsr< int,float >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - double *arg5 ; - int *arg6 ; - int *arg7 ; - double *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_DOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (double*) array_data(temp8); - } - csc_tocsr< int,double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long double *arg5 ; - int *arg6 ; - int *arg7 ; - long double *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_LONGDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (long double*) array_data(temp8); - } - csc_tocsr< int,long double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long double const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cfloat_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cfloat_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CFLOAT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_cfloat_wrapper*) array_data(temp8); - } - csc_tocsr< int,npy_cfloat_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cdouble_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_cdouble_wrapper*) array_data(temp8); - } - csc_tocsr< int,npy_cdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_clongdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_clongdouble_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *temp6 = NULL ; - PyArrayObject *temp7 = NULL ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_tocsr",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_tocsr" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_tocsr" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - temp6 = obj_to_array_no_conversion(obj5,PyArray_INT); - if (!temp6 || !require_contiguous(temp6) || !require_native(temp6)) SWIG_fail; - arg6 = (int*) array_data(temp6); - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CLONGDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_clongdouble_wrapper*) array_data(temp8); - } - csc_tocsr< int,npy_clongdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,arg6,arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_tocsr(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[9]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 8); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_tocsr__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_tocsr'.\n" - " Possible C/C++ prototypes are:\n" - " csc_tocsr< int,signed char >(int const,int const,int const [],int const [],signed char const [],int [],int [],signed char [])\n" - " csc_tocsr< int,unsigned char >(int const,int const,int const [],int const [],unsigned char const [],int [],int [],unsigned char [])\n" - " csc_tocsr< int,short >(int const,int const,int const [],int const [],short const [],int [],int [],short [])\n" - " csc_tocsr< int,unsigned short >(int const,int const,int const [],int const [],unsigned short const [],int [],int [],unsigned short [])\n" - " csc_tocsr< int,int >(int const,int const,int const [],int const [],int const [],int [],int [],int [])\n" - " csc_tocsr< int,unsigned int >(int const,int const,int const [],int const [],unsigned int const [],int [],int [],unsigned int [])\n" - " csc_tocsr< int,long long >(int const,int const,int const [],int const [],long long const [],int [],int [],long long [])\n" - " csc_tocsr< int,unsigned long long >(int const,int const,int const [],int const [],unsigned long long const [],int [],int [],unsigned long long [])\n" - " csc_tocsr< int,float >(int const,int const,int const [],int const [],float const [],int [],int [],float [])\n" - " csc_tocsr< int,double >(int const,int const,int const [],int const [],double const [],int [],int [],double [])\n" - " csc_tocsr< int,long double >(int const,int const,int const [],int const [],long double const [],int [],int [],long double [])\n" - " csc_tocsr< int,npy_cfloat_wrapper >(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int [],int [],npy_cfloat_wrapper [])\n" - " csc_tocsr< int,npy_cdouble_wrapper >(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int [],int [],npy_cdouble_wrapper [])\n" - " csc_tocsr< int,npy_clongdouble_wrapper >(int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],int [],int [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - signed char *arg5 ; - int *arg6 ; - int *arg7 ; - signed char *arg8 ; - int *arg9 ; - int *arg10 ; - signed char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_BYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (signed char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_BYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (signed char*) array_data(temp11); - } - csc_matmat_pass2< int,signed char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(signed char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(signed char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned char *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned char *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UBYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UBYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned char*) array_data(temp11); - } - csc_matmat_pass2< int,unsigned char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - short *arg5 ; - int *arg6 ; - int *arg7 ; - short *arg8 ; - int *arg9 ; - int *arg10 ; - short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_SHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_SHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (short*) array_data(temp11); - } - csc_matmat_pass2< int,short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned short *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned short *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_USHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_USHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned short*) array_data(temp11); - } - csc_matmat_pass2< int,unsigned short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int *arg9 ; - int *arg10 ; - int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_INT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_INT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (int*) array_data(temp11); - } - csc_matmat_pass2< int,int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned int *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned int *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UINT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UINT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned int*) array_data(temp11); - } - csc_matmat_pass2< int,unsigned int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long long *arg5 ; - int *arg6 ; - int *arg7 ; - long long *arg8 ; - int *arg9 ; - int *arg10 ; - long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long long*) array_data(temp11); - } - csc_matmat_pass2< int,long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned long long *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned long long *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_ULONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_ULONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned long long*) array_data(temp11); - } - csc_matmat_pass2< int,unsigned long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - float *arg5 ; - int *arg6 ; - int *arg7 ; - float *arg8 ; - int *arg9 ; - int *arg10 ; - float *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_FLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (float*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_FLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (float*) array_data(temp11); - } - csc_matmat_pass2< int,float >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,(int const (*))arg6,(int const (*))arg7,(float const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - double *arg5 ; - int *arg6 ; - int *arg7 ; - double *arg8 ; - int *arg9 ; - int *arg10 ; - double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_DOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_DOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (double*) array_data(temp11); - } - csc_matmat_pass2< int,double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long double *arg5 ; - int *arg6 ; - int *arg7 ; - long double *arg8 ; - int *arg9 ; - int *arg10 ; - long double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long double*) array_data(temp11); - } - csc_matmat_pass2< int,long double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cfloat_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cfloat_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cfloat_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CFLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cfloat_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CFLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cfloat_wrapper*) array_data(temp11); - } - csc_matmat_pass2< int,npy_cfloat_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cfloat_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cdouble_wrapper*) array_data(temp11); - } - csc_matmat_pass2< int,npy_cdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_clongdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_clongdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_clongdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_matmat_pass2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matmat_pass2" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matmat_pass2" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CLONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_clongdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CLONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_clongdouble_wrapper*) array_data(temp11); - } - csc_matmat_pass2< int,npy_clongdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_clongdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matmat_pass2(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[12]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 11); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matmat_pass2__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_matmat_pass2'.\n" - " Possible C/C++ prototypes are:\n" - " csc_matmat_pass2< int,signed char >(int const,int const,int const [],int const [],signed char const [],int const [],int const [],signed char const [],int [],int [],signed char [])\n" - " csc_matmat_pass2< int,unsigned char >(int const,int const,int const [],int const [],unsigned char const [],int const [],int const [],unsigned char const [],int [],int [],unsigned char [])\n" - " csc_matmat_pass2< int,short >(int const,int const,int const [],int const [],short const [],int const [],int const [],short const [],int [],int [],short [])\n" - " csc_matmat_pass2< int,unsigned short >(int const,int const,int const [],int const [],unsigned short const [],int const [],int const [],unsigned short const [],int [],int [],unsigned short [])\n" - " csc_matmat_pass2< int,int >(int const,int const,int const [],int const [],int const [],int const [],int const [],int const [],int [],int [],int [])\n" - " csc_matmat_pass2< int,unsigned int >(int const,int const,int const [],int const [],unsigned int const [],int const [],int const [],unsigned int const [],int [],int [],unsigned int [])\n" - " csc_matmat_pass2< int,long long >(int const,int const,int const [],int const [],long long const [],int const [],int const [],long long const [],int [],int [],long long [])\n" - " csc_matmat_pass2< int,unsigned long long >(int const,int const,int const [],int const [],unsigned long long const [],int const [],int const [],unsigned long long const [],int [],int [],unsigned long long [])\n" - " csc_matmat_pass2< int,float >(int const,int const,int const [],int const [],float const [],int const [],int const [],float const [],int [],int [],float [])\n" - " csc_matmat_pass2< int,double >(int const,int const,int const [],int const [],double const [],int const [],int const [],double const [],int [],int [],double [])\n" - " csc_matmat_pass2< int,long double >(int const,int const,int const [],int const [],long double const [],int const [],int const [],long double const [],int [],int [],long double [])\n" - " csc_matmat_pass2< int,npy_cfloat_wrapper >(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int const [],int const [],npy_cfloat_wrapper const [],int [],int [],npy_cfloat_wrapper [])\n" - " csc_matmat_pass2< int,npy_cdouble_wrapper >(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int const [],int const [],npy_cdouble_wrapper const [],int [],int [],npy_cdouble_wrapper [])\n" - " csc_matmat_pass2< int,npy_clongdouble_wrapper >(int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],int const [],int const [],npy_clongdouble_wrapper const [],int [],int [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - signed char *arg5 ; - signed char *arg6 ; - signed char *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_BYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (signed char*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_BYTE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (signed char*) array_data(temp7); - } - csc_matvec< int,signed char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(signed char const (*))arg5,(signed char const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned char *arg5 ; - unsigned char *arg6 ; - unsigned char *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UBYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned char*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_UBYTE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (unsigned char*) array_data(temp7); - } - csc_matvec< int,unsigned char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned char const (*))arg5,(unsigned char const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - short *arg5 ; - short *arg6 ; - short *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_SHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (short*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_SHORT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (short*) array_data(temp7); - } - csc_matvec< int,short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(short const (*))arg5,(short const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned short *arg5 ; - unsigned short *arg6 ; - unsigned short *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_USHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned short*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_USHORT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (unsigned short*) array_data(temp7); - } - csc_matvec< int,unsigned short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned short const (*))arg5,(unsigned short const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_INT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (int*) array_data(temp7); - } - csc_matvec< int,int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned int *arg5 ; - unsigned int *arg6 ; - unsigned int *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UINT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned int*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_UINT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (unsigned int*) array_data(temp7); - } - csc_matvec< int,unsigned int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned int const (*))arg5,(unsigned int const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long long *arg5 ; - long long *arg6 ; - long long *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long long*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_LONGLONG); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (long long*) array_data(temp7); - } - csc_matvec< int,long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long long const (*))arg5,(long long const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned long long *arg5 ; - unsigned long long *arg6 ; - unsigned long long *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_ULONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned long long*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_ULONGLONG); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (unsigned long long*) array_data(temp7); - } - csc_matvec< int,unsigned long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned long long const (*))arg5,(unsigned long long const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - float *arg5 ; - float *arg6 ; - float *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_FLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (float*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_FLOAT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (float*) array_data(temp7); - } - csc_matvec< int,float >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,(float const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - double *arg5 ; - double *arg6 ; - double *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_DOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (double*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_DOUBLE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (double*) array_data(temp7); - } - csc_matvec< int,double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,(double const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long double *arg5 ; - long double *arg6 ; - long double *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long double*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_LONGDOUBLE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (long double*) array_data(temp7); - } - csc_matvec< int,long double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long double const (*))arg5,(long double const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cfloat_wrapper *arg5 ; - npy_cfloat_wrapper *arg6 ; - npy_cfloat_wrapper *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CFLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cfloat_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_CFLOAT); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (npy_cfloat_wrapper*) array_data(temp7); - } - csc_matvec< int,npy_cfloat_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,(npy_cfloat_wrapper const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cdouble_wrapper *arg5 ; - npy_cdouble_wrapper *arg6 ; - npy_cdouble_wrapper *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cdouble_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_CDOUBLE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (npy_cdouble_wrapper*) array_data(temp7); - } - csc_matvec< int,npy_cdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,(npy_cdouble_wrapper const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_clongdouble_wrapper *arg5 ; - npy_clongdouble_wrapper *arg6 ; - npy_clongdouble_wrapper *arg7 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *temp7 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOO:csc_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CLONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_clongdouble_wrapper*) array6->data; - } - { - temp7 = obj_to_array_no_conversion(obj6,PyArray_CLONGDOUBLE); - if (!temp7 || !require_contiguous(temp7) || !require_native(temp7)) SWIG_fail; - arg7 = (npy_clongdouble_wrapper*) array_data(temp7); - } - csc_matvec< int,npy_clongdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,(npy_clongdouble_wrapper const (*))arg6,arg7); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvec(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[8]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 7); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_1(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_2(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_3(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_4(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_5(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_6(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_7(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_8(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_9(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_10(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_11(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_12(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_13(self, args); - } - } - } - } - } - } - } - } - if (argc == 7) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvec__SWIG_14(self, args); - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_matvec'.\n" - " Possible C/C++ prototypes are:\n" - " csc_matvec< int,signed char >(int const,int const,int const [],int const [],signed char const [],signed char const [],signed char [])\n" - " csc_matvec< int,unsigned char >(int const,int const,int const [],int const [],unsigned char const [],unsigned char const [],unsigned char [])\n" - " csc_matvec< int,short >(int const,int const,int const [],int const [],short const [],short const [],short [])\n" - " csc_matvec< int,unsigned short >(int const,int const,int const [],int const [],unsigned short const [],unsigned short const [],unsigned short [])\n" - " csc_matvec< int,int >(int const,int const,int const [],int const [],int const [],int const [],int [])\n" - " csc_matvec< int,unsigned int >(int const,int const,int const [],int const [],unsigned int const [],unsigned int const [],unsigned int [])\n" - " csc_matvec< int,long long >(int const,int const,int const [],int const [],long long const [],long long const [],long long [])\n" - " csc_matvec< int,unsigned long long >(int const,int const,int const [],int const [],unsigned long long const [],unsigned long long const [],unsigned long long [])\n" - " csc_matvec< int,float >(int const,int const,int const [],int const [],float const [],float const [],float [])\n" - " csc_matvec< int,double >(int const,int const,int const [],int const [],double const [],double const [],double [])\n" - " csc_matvec< int,long double >(int const,int const,int const [],int const [],long double const [],long double const [],long double [])\n" - " csc_matvec< int,npy_cfloat_wrapper >(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper [])\n" - " csc_matvec< int,npy_cdouble_wrapper >(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper [])\n" - " csc_matvec< int,npy_clongdouble_wrapper >(int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - signed char *arg6 ; - signed char *arg7 ; - signed char *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_BYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (signed char*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_BYTE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (signed char*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_BYTE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (signed char*) array_data(temp8); - } - csc_matvecs< int,signed char >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(signed char const (*))arg6,(signed char const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned char *arg6 ; - unsigned char *arg7 ; - unsigned char *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UBYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned char*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_UBYTE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (unsigned char*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_UBYTE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned char*) array_data(temp8); - } - csc_matvecs< int,unsigned char >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned char const (*))arg6,(unsigned char const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - short *arg6 ; - short *arg7 ; - short *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_SHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (short*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_SHORT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (short*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_SHORT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (short*) array_data(temp8); - } - csc_matvecs< int,short >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(short const (*))arg6,(short const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned short *arg6 ; - unsigned short *arg7 ; - unsigned short *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_USHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned short*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_USHORT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (unsigned short*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_USHORT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned short*) array_data(temp8); - } - csc_matvecs< int,unsigned short >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned short const (*))arg6,(unsigned short const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - csc_matvecs< int,int >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,(int const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned int *arg6 ; - unsigned int *arg7 ; - unsigned int *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UINT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_UINT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (unsigned int*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_UINT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned int*) array_data(temp8); - } - csc_matvecs< int,unsigned int >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned int const (*))arg6,(unsigned int const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - long long *arg6 ; - long long *arg7 ; - long long *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long long*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_LONGLONG, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (long long*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_LONGLONG); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (long long*) array_data(temp8); - } - csc_matvecs< int,long long >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(long long const (*))arg6,(long long const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - unsigned long long *arg6 ; - unsigned long long *arg7 ; - unsigned long long *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_ULONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (unsigned long long*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_ULONGLONG, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (unsigned long long*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_ULONGLONG); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned long long*) array_data(temp8); - } - csc_matvecs< int,unsigned long long >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(unsigned long long const (*))arg6,(unsigned long long const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - float *arg6 ; - float *arg7 ; - float *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_FLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (float*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_FLOAT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (float*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_FLOAT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (float*) array_data(temp8); - } - csc_matvecs< int,float >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(float const (*))arg6,(float const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - double *arg6 ; - double *arg7 ; - double *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_DOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (double*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_DOUBLE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (double*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_DOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (double*) array_data(temp8); - } - csc_matvecs< int,double >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(double const (*))arg6,(double const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - long double *arg6 ; - long double *arg7 ; - long double *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (long double*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_LONGDOUBLE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (long double*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_LONGDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (long double*) array_data(temp8); - } - csc_matvecs< int,long double >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(long double const (*))arg6,(long double const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_cfloat_wrapper *arg6 ; - npy_cfloat_wrapper *arg7 ; - npy_cfloat_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CFLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cfloat_wrapper*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_CFLOAT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (npy_cfloat_wrapper*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CFLOAT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_cfloat_wrapper*) array_data(temp8); - } - csc_matvecs< int,npy_cfloat_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_cfloat_wrapper const (*))arg6,(npy_cfloat_wrapper const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_cdouble_wrapper *arg6 ; - npy_cdouble_wrapper *arg7 ; - npy_cdouble_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_cdouble_wrapper*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_CDOUBLE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (npy_cdouble_wrapper*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_cdouble_wrapper*) array_data(temp8); - } - csc_matvecs< int,npy_cdouble_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_cdouble_wrapper const (*))arg6,(npy_cdouble_wrapper const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int *arg4 ; - int *arg5 ; - npy_clongdouble_wrapper *arg6 ; - npy_clongdouble_wrapper *arg7 ; - npy_clongdouble_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:csc_matvecs",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_matvecs" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_matvecs" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "csc_matvecs" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CLONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (npy_clongdouble_wrapper*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_CLONGDOUBLE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (npy_clongdouble_wrapper*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CLONGDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_clongdouble_wrapper*) array_data(temp8); - } - csc_matvecs< int,npy_clongdouble_wrapper >(arg1,arg2,arg3,(int const (*))arg4,(int const (*))arg5,(npy_clongdouble_wrapper const (*))arg6,(npy_clongdouble_wrapper const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_matvecs(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[9]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 8); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_matvecs__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_matvecs'.\n" - " Possible C/C++ prototypes are:\n" - " csc_matvecs< int,signed char >(int const,int const,int const,int const [],int const [],signed char const [],signed char const [],signed char [])\n" - " csc_matvecs< int,unsigned char >(int const,int const,int const,int const [],int const [],unsigned char const [],unsigned char const [],unsigned char [])\n" - " csc_matvecs< int,short >(int const,int const,int const,int const [],int const [],short const [],short const [],short [])\n" - " csc_matvecs< int,unsigned short >(int const,int const,int const,int const [],int const [],unsigned short const [],unsigned short const [],unsigned short [])\n" - " csc_matvecs< int,int >(int const,int const,int const,int const [],int const [],int const [],int const [],int [])\n" - " csc_matvecs< int,unsigned int >(int const,int const,int const,int const [],int const [],unsigned int const [],unsigned int const [],unsigned int [])\n" - " csc_matvecs< int,long long >(int const,int const,int const,int const [],int const [],long long const [],long long const [],long long [])\n" - " csc_matvecs< int,unsigned long long >(int const,int const,int const,int const [],int const [],unsigned long long const [],unsigned long long const [],unsigned long long [])\n" - " csc_matvecs< int,float >(int const,int const,int const,int const [],int const [],float const [],float const [],float [])\n" - " csc_matvecs< int,double >(int const,int const,int const,int const [],int const [],double const [],double const [],double [])\n" - " csc_matvecs< int,long double >(int const,int const,int const,int const [],int const [],long double const [],long double const [],long double [])\n" - " csc_matvecs< int,npy_cfloat_wrapper >(int const,int const,int const,int const [],int const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper [])\n" - " csc_matvecs< int,npy_cdouble_wrapper >(int const,int const,int const,int const [],int const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper [])\n" - " csc_matvecs< int,npy_clongdouble_wrapper >(int const,int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - signed char *arg5 ; - int *arg6 ; - int *arg7 ; - signed char *arg8 ; - int *arg9 ; - int *arg10 ; - signed char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_BYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (signed char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_BYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (signed char*) array_data(temp11); - } - csc_elmul_csc< int,signed char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(signed char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(signed char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned char *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned char *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UBYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UBYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned char*) array_data(temp11); - } - csc_elmul_csc< int,unsigned char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - short *arg5 ; - int *arg6 ; - int *arg7 ; - short *arg8 ; - int *arg9 ; - int *arg10 ; - short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_SHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_SHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (short*) array_data(temp11); - } - csc_elmul_csc< int,short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned short *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned short *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_USHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_USHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned short*) array_data(temp11); - } - csc_elmul_csc< int,unsigned short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int *arg9 ; - int *arg10 ; - int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_INT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_INT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (int*) array_data(temp11); - } - csc_elmul_csc< int,int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned int *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned int *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UINT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UINT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned int*) array_data(temp11); - } - csc_elmul_csc< int,unsigned int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long long *arg5 ; - int *arg6 ; - int *arg7 ; - long long *arg8 ; - int *arg9 ; - int *arg10 ; - long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long long*) array_data(temp11); - } - csc_elmul_csc< int,long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned long long *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned long long *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_ULONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_ULONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned long long*) array_data(temp11); - } - csc_elmul_csc< int,unsigned long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - float *arg5 ; - int *arg6 ; - int *arg7 ; - float *arg8 ; - int *arg9 ; - int *arg10 ; - float *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_FLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (float*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_FLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (float*) array_data(temp11); - } - csc_elmul_csc< int,float >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,(int const (*))arg6,(int const (*))arg7,(float const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - double *arg5 ; - int *arg6 ; - int *arg7 ; - double *arg8 ; - int *arg9 ; - int *arg10 ; - double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_DOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_DOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (double*) array_data(temp11); - } - csc_elmul_csc< int,double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long double *arg5 ; - int *arg6 ; - int *arg7 ; - long double *arg8 ; - int *arg9 ; - int *arg10 ; - long double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long double*) array_data(temp11); - } - csc_elmul_csc< int,long double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cfloat_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cfloat_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cfloat_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CFLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cfloat_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CFLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cfloat_wrapper*) array_data(temp11); - } - csc_elmul_csc< int,npy_cfloat_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cfloat_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cdouble_wrapper*) array_data(temp11); - } - csc_elmul_csc< int,npy_cdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_clongdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_clongdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_clongdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_elmul_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_elmul_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_elmul_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CLONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_clongdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CLONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_clongdouble_wrapper*) array_data(temp11); - } - csc_elmul_csc< int,npy_clongdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_clongdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_elmul_csc(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[12]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 11); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_elmul_csc__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_elmul_csc'.\n" - " Possible C/C++ prototypes are:\n" - " csc_elmul_csc< int,signed char >(int const,int const,int const [],int const [],signed char const [],int const [],int const [],signed char const [],int [],int [],signed char [])\n" - " csc_elmul_csc< int,unsigned char >(int const,int const,int const [],int const [],unsigned char const [],int const [],int const [],unsigned char const [],int [],int [],unsigned char [])\n" - " csc_elmul_csc< int,short >(int const,int const,int const [],int const [],short const [],int const [],int const [],short const [],int [],int [],short [])\n" - " csc_elmul_csc< int,unsigned short >(int const,int const,int const [],int const [],unsigned short const [],int const [],int const [],unsigned short const [],int [],int [],unsigned short [])\n" - " csc_elmul_csc< int,int >(int const,int const,int const [],int const [],int const [],int const [],int const [],int const [],int [],int [],int [])\n" - " csc_elmul_csc< int,unsigned int >(int const,int const,int const [],int const [],unsigned int const [],int const [],int const [],unsigned int const [],int [],int [],unsigned int [])\n" - " csc_elmul_csc< int,long long >(int const,int const,int const [],int const [],long long const [],int const [],int const [],long long const [],int [],int [],long long [])\n" - " csc_elmul_csc< int,unsigned long long >(int const,int const,int const [],int const [],unsigned long long const [],int const [],int const [],unsigned long long const [],int [],int [],unsigned long long [])\n" - " csc_elmul_csc< int,float >(int const,int const,int const [],int const [],float const [],int const [],int const [],float const [],int [],int [],float [])\n" - " csc_elmul_csc< int,double >(int const,int const,int const [],int const [],double const [],int const [],int const [],double const [],int [],int [],double [])\n" - " csc_elmul_csc< int,long double >(int const,int const,int const [],int const [],long double const [],int const [],int const [],long double const [],int [],int [],long double [])\n" - " csc_elmul_csc< int,npy_cfloat_wrapper >(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int const [],int const [],npy_cfloat_wrapper const [],int [],int [],npy_cfloat_wrapper [])\n" - " csc_elmul_csc< int,npy_cdouble_wrapper >(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int const [],int const [],npy_cdouble_wrapper const [],int [],int [],npy_cdouble_wrapper [])\n" - " csc_elmul_csc< int,npy_clongdouble_wrapper >(int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],int const [],int const [],npy_clongdouble_wrapper const [],int [],int [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - signed char *arg5 ; - int *arg6 ; - int *arg7 ; - signed char *arg8 ; - int *arg9 ; - int *arg10 ; - signed char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_BYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (signed char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_BYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (signed char*) array_data(temp11); - } - csc_eldiv_csc< int,signed char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(signed char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(signed char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned char *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned char *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UBYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UBYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned char*) array_data(temp11); - } - csc_eldiv_csc< int,unsigned char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - short *arg5 ; - int *arg6 ; - int *arg7 ; - short *arg8 ; - int *arg9 ; - int *arg10 ; - short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_SHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_SHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (short*) array_data(temp11); - } - csc_eldiv_csc< int,short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned short *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned short *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_USHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_USHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned short*) array_data(temp11); - } - csc_eldiv_csc< int,unsigned short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int *arg9 ; - int *arg10 ; - int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_INT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_INT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (int*) array_data(temp11); - } - csc_eldiv_csc< int,int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned int *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned int *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UINT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UINT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned int*) array_data(temp11); - } - csc_eldiv_csc< int,unsigned int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long long *arg5 ; - int *arg6 ; - int *arg7 ; - long long *arg8 ; - int *arg9 ; - int *arg10 ; - long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long long*) array_data(temp11); - } - csc_eldiv_csc< int,long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned long long *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned long long *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_ULONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_ULONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned long long*) array_data(temp11); - } - csc_eldiv_csc< int,unsigned long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - float *arg5 ; - int *arg6 ; - int *arg7 ; - float *arg8 ; - int *arg9 ; - int *arg10 ; - float *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_FLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (float*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_FLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (float*) array_data(temp11); - } - csc_eldiv_csc< int,float >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,(int const (*))arg6,(int const (*))arg7,(float const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - double *arg5 ; - int *arg6 ; - int *arg7 ; - double *arg8 ; - int *arg9 ; - int *arg10 ; - double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_DOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_DOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (double*) array_data(temp11); - } - csc_eldiv_csc< int,double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long double *arg5 ; - int *arg6 ; - int *arg7 ; - long double *arg8 ; - int *arg9 ; - int *arg10 ; - long double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long double*) array_data(temp11); - } - csc_eldiv_csc< int,long double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cfloat_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cfloat_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cfloat_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CFLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cfloat_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CFLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cfloat_wrapper*) array_data(temp11); - } - csc_eldiv_csc< int,npy_cfloat_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cfloat_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cdouble_wrapper*) array_data(temp11); - } - csc_eldiv_csc< int,npy_cdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_clongdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_clongdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_clongdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_eldiv_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_eldiv_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_eldiv_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CLONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_clongdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CLONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_clongdouble_wrapper*) array_data(temp11); - } - csc_eldiv_csc< int,npy_clongdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_clongdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_eldiv_csc(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[12]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 11); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_eldiv_csc__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_eldiv_csc'.\n" - " Possible C/C++ prototypes are:\n" - " csc_eldiv_csc< int,signed char >(int const,int const,int const [],int const [],signed char const [],int const [],int const [],signed char const [],int [],int [],signed char [])\n" - " csc_eldiv_csc< int,unsigned char >(int const,int const,int const [],int const [],unsigned char const [],int const [],int const [],unsigned char const [],int [],int [],unsigned char [])\n" - " csc_eldiv_csc< int,short >(int const,int const,int const [],int const [],short const [],int const [],int const [],short const [],int [],int [],short [])\n" - " csc_eldiv_csc< int,unsigned short >(int const,int const,int const [],int const [],unsigned short const [],int const [],int const [],unsigned short const [],int [],int [],unsigned short [])\n" - " csc_eldiv_csc< int,int >(int const,int const,int const [],int const [],int const [],int const [],int const [],int const [],int [],int [],int [])\n" - " csc_eldiv_csc< int,unsigned int >(int const,int const,int const [],int const [],unsigned int const [],int const [],int const [],unsigned int const [],int [],int [],unsigned int [])\n" - " csc_eldiv_csc< int,long long >(int const,int const,int const [],int const [],long long const [],int const [],int const [],long long const [],int [],int [],long long [])\n" - " csc_eldiv_csc< int,unsigned long long >(int const,int const,int const [],int const [],unsigned long long const [],int const [],int const [],unsigned long long const [],int [],int [],unsigned long long [])\n" - " csc_eldiv_csc< int,float >(int const,int const,int const [],int const [],float const [],int const [],int const [],float const [],int [],int [],float [])\n" - " csc_eldiv_csc< int,double >(int const,int const,int const [],int const [],double const [],int const [],int const [],double const [],int [],int [],double [])\n" - " csc_eldiv_csc< int,long double >(int const,int const,int const [],int const [],long double const [],int const [],int const [],long double const [],int [],int [],long double [])\n" - " csc_eldiv_csc< int,npy_cfloat_wrapper >(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int const [],int const [],npy_cfloat_wrapper const [],int [],int [],npy_cfloat_wrapper [])\n" - " csc_eldiv_csc< int,npy_cdouble_wrapper >(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int const [],int const [],npy_cdouble_wrapper const [],int [],int [],npy_cdouble_wrapper [])\n" - " csc_eldiv_csc< int,npy_clongdouble_wrapper >(int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],int const [],int const [],npy_clongdouble_wrapper const [],int [],int [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - signed char *arg5 ; - int *arg6 ; - int *arg7 ; - signed char *arg8 ; - int *arg9 ; - int *arg10 ; - signed char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_BYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (signed char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_BYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (signed char*) array_data(temp11); - } - csc_plus_csc< int,signed char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(signed char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(signed char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned char *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned char *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UBYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UBYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned char*) array_data(temp11); - } - csc_plus_csc< int,unsigned char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - short *arg5 ; - int *arg6 ; - int *arg7 ; - short *arg8 ; - int *arg9 ; - int *arg10 ; - short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_SHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_SHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (short*) array_data(temp11); - } - csc_plus_csc< int,short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned short *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned short *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_USHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_USHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned short*) array_data(temp11); - } - csc_plus_csc< int,unsigned short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int *arg9 ; - int *arg10 ; - int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_INT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_INT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (int*) array_data(temp11); - } - csc_plus_csc< int,int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned int *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned int *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UINT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UINT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned int*) array_data(temp11); - } - csc_plus_csc< int,unsigned int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long long *arg5 ; - int *arg6 ; - int *arg7 ; - long long *arg8 ; - int *arg9 ; - int *arg10 ; - long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long long*) array_data(temp11); - } - csc_plus_csc< int,long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned long long *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned long long *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_ULONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_ULONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned long long*) array_data(temp11); - } - csc_plus_csc< int,unsigned long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - float *arg5 ; - int *arg6 ; - int *arg7 ; - float *arg8 ; - int *arg9 ; - int *arg10 ; - float *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_FLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (float*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_FLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (float*) array_data(temp11); - } - csc_plus_csc< int,float >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,(int const (*))arg6,(int const (*))arg7,(float const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - double *arg5 ; - int *arg6 ; - int *arg7 ; - double *arg8 ; - int *arg9 ; - int *arg10 ; - double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_DOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_DOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (double*) array_data(temp11); - } - csc_plus_csc< int,double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long double *arg5 ; - int *arg6 ; - int *arg7 ; - long double *arg8 ; - int *arg9 ; - int *arg10 ; - long double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long double*) array_data(temp11); - } - csc_plus_csc< int,long double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cfloat_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cfloat_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cfloat_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CFLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cfloat_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CFLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cfloat_wrapper*) array_data(temp11); - } - csc_plus_csc< int,npy_cfloat_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cfloat_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cdouble_wrapper*) array_data(temp11); - } - csc_plus_csc< int,npy_cdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_clongdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_clongdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_clongdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_plus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_plus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_plus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CLONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_clongdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CLONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_clongdouble_wrapper*) array_data(temp11); - } - csc_plus_csc< int,npy_clongdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_clongdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_plus_csc(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[12]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 11); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_plus_csc__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_plus_csc'.\n" - " Possible C/C++ prototypes are:\n" - " csc_plus_csc< int,signed char >(int const,int const,int const [],int const [],signed char const [],int const [],int const [],signed char const [],int [],int [],signed char [])\n" - " csc_plus_csc< int,unsigned char >(int const,int const,int const [],int const [],unsigned char const [],int const [],int const [],unsigned char const [],int [],int [],unsigned char [])\n" - " csc_plus_csc< int,short >(int const,int const,int const [],int const [],short const [],int const [],int const [],short const [],int [],int [],short [])\n" - " csc_plus_csc< int,unsigned short >(int const,int const,int const [],int const [],unsigned short const [],int const [],int const [],unsigned short const [],int [],int [],unsigned short [])\n" - " csc_plus_csc< int,int >(int const,int const,int const [],int const [],int const [],int const [],int const [],int const [],int [],int [],int [])\n" - " csc_plus_csc< int,unsigned int >(int const,int const,int const [],int const [],unsigned int const [],int const [],int const [],unsigned int const [],int [],int [],unsigned int [])\n" - " csc_plus_csc< int,long long >(int const,int const,int const [],int const [],long long const [],int const [],int const [],long long const [],int [],int [],long long [])\n" - " csc_plus_csc< int,unsigned long long >(int const,int const,int const [],int const [],unsigned long long const [],int const [],int const [],unsigned long long const [],int [],int [],unsigned long long [])\n" - " csc_plus_csc< int,float >(int const,int const,int const [],int const [],float const [],int const [],int const [],float const [],int [],int [],float [])\n" - " csc_plus_csc< int,double >(int const,int const,int const [],int const [],double const [],int const [],int const [],double const [],int [],int [],double [])\n" - " csc_plus_csc< int,long double >(int const,int const,int const [],int const [],long double const [],int const [],int const [],long double const [],int [],int [],long double [])\n" - " csc_plus_csc< int,npy_cfloat_wrapper >(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int const [],int const [],npy_cfloat_wrapper const [],int [],int [],npy_cfloat_wrapper [])\n" - " csc_plus_csc< int,npy_cdouble_wrapper >(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int const [],int const [],npy_cdouble_wrapper const [],int [],int [],npy_cdouble_wrapper [])\n" - " csc_plus_csc< int,npy_clongdouble_wrapper >(int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],int const [],int const [],npy_clongdouble_wrapper const [],int [],int [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - signed char *arg5 ; - int *arg6 ; - int *arg7 ; - signed char *arg8 ; - int *arg9 ; - int *arg10 ; - signed char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_BYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (signed char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_BYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (signed char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_BYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (signed char*) array_data(temp11); - } - csc_minus_csc< int,signed char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(signed char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(signed char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned char *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned char *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned char *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UBYTE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned char*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UBYTE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned char*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UBYTE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned char*) array_data(temp11); - } - csc_minus_csc< int,unsigned char >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned char const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned char const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - short *arg5 ; - int *arg6 ; - int *arg7 ; - short *arg8 ; - int *arg9 ; - int *arg10 ; - short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_SHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_SHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_SHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (short*) array_data(temp11); - } - csc_minus_csc< int,short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned short *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned short *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned short *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_USHORT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned short*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_USHORT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned short*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_USHORT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned short*) array_data(temp11); - } - csc_minus_csc< int,unsigned short >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned short const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned short const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int *arg9 ; - int *arg10 ; - int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_INT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_INT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (int*) array_data(temp11); - } - csc_minus_csc< int,int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned int *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned int *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned int *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_UINT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned int*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_UINT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned int*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_UINT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned int*) array_data(temp11); - } - csc_minus_csc< int,unsigned int >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned int const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned int const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long long *arg5 ; - int *arg6 ; - int *arg7 ; - long long *arg8 ; - int *arg9 ; - int *arg10 ; - long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long long*) array_data(temp11); - } - csc_minus_csc< int,long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - unsigned long long *arg5 ; - int *arg6 ; - int *arg7 ; - unsigned long long *arg8 ; - int *arg9 ; - int *arg10 ; - unsigned long long *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_ULONGLONG, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (unsigned long long*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_ULONGLONG, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (unsigned long long*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_ULONGLONG); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (unsigned long long*) array_data(temp11); - } - csc_minus_csc< int,unsigned long long >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(unsigned long long const (*))arg5,(int const (*))arg6,(int const (*))arg7,(unsigned long long const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - float *arg5 ; - int *arg6 ; - int *arg7 ; - float *arg8 ; - int *arg9 ; - int *arg10 ; - float *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (float*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_FLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (float*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_FLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (float*) array_data(temp11); - } - csc_minus_csc< int,float >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,(int const (*))arg6,(int const (*))arg7,(float const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - double *arg5 ; - int *arg6 ; - int *arg7 ; - double *arg8 ; - int *arg9 ; - int *arg10 ; - double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_DOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_DOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (double*) array_data(temp11); - } - csc_minus_csc< int,double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - long double *arg5 ; - int *arg6 ; - int *arg7 ; - long double *arg8 ; - int *arg9 ; - int *arg10 ; - long double *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (long double*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_LONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (long double*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_LONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (long double*) array_data(temp11); - } - csc_minus_csc< int,long double >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long double const (*))arg5,(int const (*))arg6,(int const (*))arg7,(long double const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cfloat_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cfloat_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cfloat_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cfloat_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CFLOAT, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cfloat_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CFLOAT); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cfloat_wrapper*) array_data(temp11); - } - csc_minus_csc< int,npy_cfloat_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cfloat_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_cdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_cdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_cdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_cdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_cdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_cdouble_wrapper*) array_data(temp11); - } - csc_minus_csc< int,npy_cdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_cdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int *arg3 ; - int *arg4 ; - npy_clongdouble_wrapper *arg5 ; - int *arg6 ; - int *arg7 ; - npy_clongdouble_wrapper *arg8 ; - int *arg9 ; - int *arg10 ; - npy_clongdouble_wrapper *arg11 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *array4 = NULL ; - int is_new_object4 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *array8 = NULL ; - int is_new_object8 ; - PyArrayObject *temp9 = NULL ; - PyArrayObject *temp10 = NULL ; - PyArrayObject *temp11 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - PyObject * obj8 = 0 ; - PyObject * obj9 = 0 ; - PyObject * obj10 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOOOO:csc_minus_csc",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8,&obj9,&obj10)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "csc_minus_csc" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "csc_minus_csc" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - npy_intp size[1] = { - -1 - }; - array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); - if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1) - || !require_contiguous(array4) || !require_native(array4)) SWIG_fail; - - arg4 = (int*) array4->data; - } - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CLONGDOUBLE, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (npy_clongdouble_wrapper*) array5->data; - } - { - npy_intp size[1] = { - -1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,1) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - npy_intp size[1] = { - -1 - }; - array8 = obj_to_array_contiguous_allow_conversion(obj7, PyArray_CLONGDOUBLE, &is_new_object8); - if (!array8 || !require_dimensions(array8,1) || !require_size(array8,size,1) - || !require_contiguous(array8) || !require_native(array8)) SWIG_fail; - - arg8 = (npy_clongdouble_wrapper*) array8->data; - } - { - temp9 = obj_to_array_no_conversion(obj8,PyArray_INT); - if (!temp9 || !require_contiguous(temp9) || !require_native(temp9)) SWIG_fail; - arg9 = (int*) array_data(temp9); - } - { - temp10 = obj_to_array_no_conversion(obj9,PyArray_INT); - if (!temp10 || !require_contiguous(temp10) || !require_native(temp10)) SWIG_fail; - arg10 = (int*) array_data(temp10); - } - { - temp11 = obj_to_array_no_conversion(obj10,PyArray_CLONGDOUBLE); - if (!temp11 || !require_contiguous(temp11) || !require_native(temp11)) SWIG_fail; - arg11 = (npy_clongdouble_wrapper*) array_data(temp11); - } - csc_minus_csc< int,npy_clongdouble_wrapper >(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_clongdouble_wrapper const (*))arg5,(int const (*))arg6,(int const (*))arg7,(npy_clongdouble_wrapper const (*))arg8,arg9,arg10,arg11); - resultobj = SWIG_Py_Void(); - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return resultobj; -fail: - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - { - if (is_new_object4 && array4) { - Py_DECREF(array4); - } - } - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - { - if (is_new_object8 && array8) { - Py_DECREF(array8); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_csc_minus_csc(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[12]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 11); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - if (argc == 11) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[8]) && PyArray_CanCastSafely(PyArray_TYPE(argv[8]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[9]) && PyArray_CanCastSafely(PyArray_TYPE(argv[9]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[10]) && PyArray_CanCastSafely(PyArray_TYPE(argv[10]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_csc_minus_csc__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'csc_minus_csc'.\n" - " Possible C/C++ prototypes are:\n" - " csc_minus_csc< int,signed char >(int const,int const,int const [],int const [],signed char const [],int const [],int const [],signed char const [],int [],int [],signed char [])\n" - " csc_minus_csc< int,unsigned char >(int const,int const,int const [],int const [],unsigned char const [],int const [],int const [],unsigned char const [],int [],int [],unsigned char [])\n" - " csc_minus_csc< int,short >(int const,int const,int const [],int const [],short const [],int const [],int const [],short const [],int [],int [],short [])\n" - " csc_minus_csc< int,unsigned short >(int const,int const,int const [],int const [],unsigned short const [],int const [],int const [],unsigned short const [],int [],int [],unsigned short [])\n" - " csc_minus_csc< int,int >(int const,int const,int const [],int const [],int const [],int const [],int const [],int const [],int [],int [],int [])\n" - " csc_minus_csc< int,unsigned int >(int const,int const,int const [],int const [],unsigned int const [],int const [],int const [],unsigned int const [],int [],int [],unsigned int [])\n" - " csc_minus_csc< int,long long >(int const,int const,int const [],int const [],long long const [],int const [],int const [],long long const [],int [],int [],long long [])\n" - " csc_minus_csc< int,unsigned long long >(int const,int const,int const [],int const [],unsigned long long const [],int const [],int const [],unsigned long long const [],int [],int [],unsigned long long [])\n" - " csc_minus_csc< int,float >(int const,int const,int const [],int const [],float const [],int const [],int const [],float const [],int [],int [],float [])\n" - " csc_minus_csc< int,double >(int const,int const,int const [],int const [],double const [],int const [],int const [],double const [],int [],int [],double [])\n" - " csc_minus_csc< int,long double >(int const,int const,int const [],int const [],long double const [],int const [],int const [],long double const [],int [],int [],long double [])\n" - " csc_minus_csc< int,npy_cfloat_wrapper >(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int const [],int const [],npy_cfloat_wrapper const [],int [],int [],npy_cfloat_wrapper [])\n" - " csc_minus_csc< int,npy_cdouble_wrapper >(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int const [],int const [],npy_cdouble_wrapper const [],int [],int [],npy_cdouble_wrapper [])\n" - " csc_minus_csc< int,npy_clongdouble_wrapper >(int const,int const,int const [],int const [],npy_clongdouble_wrapper const [],int const [],int const [],npy_clongdouble_wrapper const [],int [],int [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -static PyMethodDef SwigMethods[] = { - { (char *)"SWIG_PyInstanceMethod_New", (PyCFunction)SWIG_PyInstanceMethod_New, METH_O, NULL}, - { (char *)"csc_matmat_pass1", _wrap_csc_matmat_pass1, METH_VARARGS, (char *)"\n" - "csc_matmat_pass1(int n_row, int n_col, int Ap, int Ai, int Bp, int Bi, \n" - " int Cp)\n" - ""}, - { (char *)"csc_diagonal", _wrap_csc_diagonal, METH_VARARGS, (char *)"\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax, \n" - " signed char Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, \n" - " unsigned char Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, \n" - " unsigned short Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, \n" - " unsigned int Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax, \n" - " long long Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, \n" - " unsigned long long Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax, \n" - " long double Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " npy_cfloat_wrapper Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " npy_cdouble_wrapper Yx)\n" - "csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, \n" - " npy_clongdouble_wrapper Yx)\n" - ""}, - { (char *)"csc_tocsr", _wrap_csc_tocsr, METH_VARARGS, (char *)"\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, signed char Ax, \n" - " int Bp, int Bj, signed char Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, \n" - " int Bp, int Bj, unsigned char Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, \n" - " int Bj, short Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, \n" - " int Bp, int Bj, unsigned short Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bj, int Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, \n" - " int Bp, int Bj, unsigned int Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, long long Ax, \n" - " int Bp, int Bj, long long Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, \n" - " int Bp, int Bj, unsigned long long Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bj, float Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bj, double Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, long double Ax, \n" - " int Bp, int Bj, long double Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bj, npy_cfloat_wrapper Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_cdouble_wrapper Bx)\n" - "csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_clongdouble_wrapper Bx)\n" - ""}, - { (char *)"csc_matmat_pass2", _wrap_csc_matmat_pass2, METH_VARARGS, (char *)"\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, signed char Ax, \n" - " int Bp, int Bi, signed char Bx, int Cp, int Ci, \n" - " signed char Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, \n" - " int Bp, int Bi, unsigned char Bx, int Cp, \n" - " int Ci, unsigned char Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, \n" - " int Bi, short Bx, int Cp, int Ci, short Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, \n" - " int Bp, int Bi, unsigned short Bx, int Cp, \n" - " int Ci, unsigned short Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, int Cp, int Ci, int Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, \n" - " int Bp, int Bi, unsigned int Bx, int Cp, \n" - " int Ci, unsigned int Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long long Ax, \n" - " int Bp, int Bi, long long Bx, int Cp, int Ci, \n" - " long long Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, \n" - " int Bp, int Bi, unsigned long long Bx, \n" - " int Cp, int Ci, unsigned long long Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, int Cp, int Ci, float Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, int Cp, int Ci, double Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long double Ax, \n" - " int Bp, int Bi, long double Bx, int Cp, int Ci, \n" - " long double Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " int Cp, int Ci, npy_cfloat_wrapper Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_cdouble_wrapper Cx)\n" - "csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_clongdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_clongdouble_wrapper Cx)\n" - ""}, - { (char *)"csc_matvec", _wrap_csc_matvec, METH_VARARGS, (char *)"\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, signed char Ax, \n" - " signed char Xx, signed char Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, \n" - " unsigned char Xx, unsigned char Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, short Ax, short Xx, \n" - " short Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, \n" - " unsigned short Xx, unsigned short Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, int Ax, int Xx, \n" - " int Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, \n" - " unsigned int Xx, unsigned int Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, long long Ax, \n" - " long long Xx, long long Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, \n" - " unsigned long long Xx, unsigned long long Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, float Ax, float Xx, \n" - " float Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, double Ax, double Xx, \n" - " double Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, long double Ax, \n" - " long double Xx, long double Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx)\n" - "csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, \n" - " npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)\n" - ""}, - { (char *)"csc_matvecs", _wrap_csc_matvecs, METH_VARARGS, (char *)"\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, signed char Ax, \n" - " signed char Xx, signed char Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned char Ax, \n" - " unsigned char Xx, unsigned char Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, short Ax, \n" - " short Xx, short Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned short Ax, \n" - " unsigned short Xx, unsigned short Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, int Ax, \n" - " int Xx, int Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned int Ax, \n" - " unsigned int Xx, unsigned int Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, long long Ax, \n" - " long long Xx, long long Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned long long Ax, \n" - " unsigned long long Xx, \n" - " unsigned long long Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, float Ax, \n" - " float Xx, float Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, double Ax, \n" - " double Xx, double Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, long double Ax, \n" - " long double Xx, long double Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " npy_cfloat_wrapper Xx, \n" - " npy_cfloat_wrapper Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " npy_cdouble_wrapper Xx, \n" - " npy_cdouble_wrapper Yx)\n" - "csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_clongdouble_wrapper Ax, \n" - " npy_clongdouble_wrapper Xx, \n" - " npy_clongdouble_wrapper Yx)\n" - ""}, - { (char *)"csc_elmul_csc", _wrap_csc_elmul_csc, METH_VARARGS, (char *)"\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, \n" - " int Bp, int Bi, signed char Bx, int Cp, int Ci, \n" - " signed char Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, \n" - " int Bp, int Bi, unsigned char Bx, int Cp, \n" - " int Ci, unsigned char Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, \n" - " int Bi, short Bx, int Cp, int Ci, short Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, \n" - " int Bp, int Bi, unsigned short Bx, int Cp, \n" - " int Ci, unsigned short Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, int Cp, int Ci, int Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, \n" - " int Bp, int Bi, unsigned int Bx, int Cp, \n" - " int Ci, unsigned int Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, \n" - " int Bp, int Bi, long long Bx, int Cp, int Ci, \n" - " long long Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, \n" - " int Bp, int Bi, unsigned long long Bx, \n" - " int Cp, int Ci, unsigned long long Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, int Cp, int Ci, float Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, int Cp, int Ci, double Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, \n" - " int Bp, int Bi, long double Bx, int Cp, int Ci, \n" - " long double Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " int Cp, int Ci, npy_cfloat_wrapper Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_cdouble_wrapper Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_clongdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_clongdouble_wrapper Cx)\n" - ""}, - { (char *)"csc_eldiv_csc", _wrap_csc_eldiv_csc, METH_VARARGS, (char *)"\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, \n" - " int Bp, int Bi, signed char Bx, int Cp, int Ci, \n" - " signed char Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, \n" - " int Bp, int Bi, unsigned char Bx, int Cp, \n" - " int Ci, unsigned char Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, \n" - " int Bi, short Bx, int Cp, int Ci, short Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, \n" - " int Bp, int Bi, unsigned short Bx, int Cp, \n" - " int Ci, unsigned short Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, int Cp, int Ci, int Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, \n" - " int Bp, int Bi, unsigned int Bx, int Cp, \n" - " int Ci, unsigned int Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, \n" - " int Bp, int Bi, long long Bx, int Cp, int Ci, \n" - " long long Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, \n" - " int Bp, int Bi, unsigned long long Bx, \n" - " int Cp, int Ci, unsigned long long Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, int Cp, int Ci, float Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, int Cp, int Ci, double Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, \n" - " int Bp, int Bi, long double Bx, int Cp, int Ci, \n" - " long double Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " int Cp, int Ci, npy_cfloat_wrapper Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_cdouble_wrapper Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_clongdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_clongdouble_wrapper Cx)\n" - ""}, - { (char *)"csc_plus_csc", _wrap_csc_plus_csc, METH_VARARGS, (char *)"\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, \n" - " int Bp, int Bi, signed char Bx, int Cp, int Ci, \n" - " signed char Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, \n" - " int Bp, int Bi, unsigned char Bx, int Cp, \n" - " int Ci, unsigned char Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, \n" - " int Bi, short Bx, int Cp, int Ci, short Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, \n" - " int Bp, int Bi, unsigned short Bx, int Cp, \n" - " int Ci, unsigned short Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, int Cp, int Ci, int Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, \n" - " int Bp, int Bi, unsigned int Bx, int Cp, \n" - " int Ci, unsigned int Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, \n" - " int Bp, int Bi, long long Bx, int Cp, int Ci, \n" - " long long Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, \n" - " int Bp, int Bi, unsigned long long Bx, \n" - " int Cp, int Ci, unsigned long long Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, int Cp, int Ci, float Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, int Cp, int Ci, double Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, \n" - " int Bp, int Bi, long double Bx, int Cp, int Ci, \n" - " long double Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " int Cp, int Ci, npy_cfloat_wrapper Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_cdouble_wrapper Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_clongdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_clongdouble_wrapper Cx)\n" - ""}, - { (char *)"csc_minus_csc", _wrap_csc_minus_csc, METH_VARARGS, (char *)"\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, \n" - " int Bp, int Bi, signed char Bx, int Cp, int Ci, \n" - " signed char Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, \n" - " int Bp, int Bi, unsigned char Bx, int Cp, \n" - " int Ci, unsigned char Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, \n" - " int Bi, short Bx, int Cp, int Ci, short Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, \n" - " int Bp, int Bi, unsigned short Bx, int Cp, \n" - " int Ci, unsigned short Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, int Cp, int Ci, int Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, \n" - " int Bp, int Bi, unsigned int Bx, int Cp, \n" - " int Ci, unsigned int Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, \n" - " int Bp, int Bi, long long Bx, int Cp, int Ci, \n" - " long long Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, \n" - " int Bp, int Bi, unsigned long long Bx, \n" - " int Cp, int Ci, unsigned long long Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, int Cp, int Ci, float Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, int Cp, int Ci, double Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, \n" - " int Bp, int Bi, long double Bx, int Cp, int Ci, \n" - " long double Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " int Cp, int Ci, npy_cfloat_wrapper Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_cdouble_wrapper Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_clongdouble_wrapper Bx, \n" - " int Cp, int Ci, npy_clongdouble_wrapper Cx)\n" - ""}, - { NULL, NULL, 0, NULL } -}; - - -/* -------- TYPE CONVERSION AND EQUIVALENCE RULES (BEGIN) -------- */ - -static swig_type_info _swigt__p_char = {"_p_char", "char *", 0, 0, (void*)0, 0}; - -static swig_type_info *swig_type_initial[] = { - &_swigt__p_char, -}; - -static swig_cast_info _swigc__p_char[] = { {&_swigt__p_char, 0, 0, 0},{0, 0, 0, 0}}; - -static swig_cast_info *swig_cast_initial[] = { - _swigc__p_char, -}; - - -/* -------- TYPE CONVERSION AND EQUIVALENCE RULES (END) -------- */ - -static swig_const_info swig_const_table[] = { -{0, 0, 0, 0.0, 0, 0}}; - -#ifdef __cplusplus -} -#endif -/* ----------------------------------------------------------------------------- - * Type initialization: - * This problem is tough by the requirement that no dynamic - * memory is used. Also, since swig_type_info structures store pointers to - * swig_cast_info structures and swig_cast_info structures store pointers back - * to swig_type_info structures, we need some lookup code at initialization. - * The idea is that swig generates all the structures that are needed. - * The runtime then collects these partially filled structures. - * The SWIG_InitializeModule function takes these initial arrays out of - * swig_module, and does all the lookup, filling in the swig_module.types - * array with the correct data and linking the correct swig_cast_info - * structures together. - * - * The generated swig_type_info structures are assigned staticly to an initial - * array. We just loop through that array, and handle each type individually. - * First we lookup if this type has been already loaded, and if so, use the - * loaded structure instead of the generated one. Then we have to fill in the - * cast linked list. The cast data is initially stored in something like a - * two-dimensional array. Each row corresponds to a type (there are the same - * number of rows as there are in the swig_type_initial array). Each entry in - * a column is one of the swig_cast_info structures for that type. - * The cast_initial array is actually an array of arrays, because each row has - * a variable number of columns. So to actually build the cast linked list, - * we find the array of casts associated with the type, and loop through it - * adding the casts to the list. The one last trick we need to do is making - * sure the type pointer in the swig_cast_info struct is correct. - * - * First off, we lookup the cast->type name to see if it is already loaded. - * There are three cases to handle: - * 1) If the cast->type has already been loaded AND the type we are adding - * casting info to has not been loaded (it is in this module), THEN we - * replace the cast->type pointer with the type pointer that has already - * been loaded. - * 2) If BOTH types (the one we are adding casting info to, and the - * cast->type) are loaded, THEN the cast info has already been loaded by - * the previous module so we just ignore it. - * 3) Finally, if cast->type has not already been loaded, then we add that - * swig_cast_info to the linked list (because the cast->type) pointer will - * be correct. - * ----------------------------------------------------------------------------- */ - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* c-mode */ -#endif -#endif - -#if 0 -#define SWIGRUNTIME_DEBUG -#endif - - -SWIGRUNTIME void -SWIG_InitializeModule(void *clientdata) { - size_t i; - swig_module_info *module_head, *iter; - int found, init; - - clientdata = clientdata; - - /* check to see if the circular list has been setup, if not, set it up */ - if (swig_module.next==0) { - /* Initialize the swig_module */ - swig_module.type_initial = swig_type_initial; - swig_module.cast_initial = swig_cast_initial; - swig_module.next = &swig_module; - init = 1; - } else { - init = 0; - } - - /* Try and load any already created modules */ - module_head = SWIG_GetModule(clientdata); - if (!module_head) { - /* This is the first module loaded for this interpreter */ - /* so set the swig module into the interpreter */ - SWIG_SetModule(clientdata, &swig_module); - module_head = &swig_module; - } else { - /* the interpreter has loaded a SWIG module, but has it loaded this one? */ - found=0; - iter=module_head; - do { - if (iter==&swig_module) { - found=1; - break; - } - iter=iter->next; - } while (iter!= module_head); - - /* if the is found in the list, then all is done and we may leave */ - if (found) return; - /* otherwise we must add out module into the list */ - swig_module.next = module_head->next; - module_head->next = &swig_module; - } - - /* When multiple interpeters are used, a module could have already been initialized in - a different interpreter, but not yet have a pointer in this interpreter. - In this case, we do not want to continue adding types... everything should be - set up already */ - if (init == 0) return; - - /* Now work on filling in swig_module.types */ -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: size %d\n", swig_module.size); -#endif - for (i = 0; i < swig_module.size; ++i) { - swig_type_info *type = 0; - swig_type_info *ret; - swig_cast_info *cast; - -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); -#endif - - /* if there is another module already loaded */ - if (swig_module.next != &swig_module) { - type = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, swig_module.type_initial[i]->name); - } - if (type) { - /* Overwrite clientdata field */ -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: found type %s\n", type->name); -#endif - if (swig_module.type_initial[i]->clientdata) { - type->clientdata = swig_module.type_initial[i]->clientdata; -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: found and overwrite type %s \n", type->name); -#endif - } - } else { - type = swig_module.type_initial[i]; - } - - /* Insert casting types */ - cast = swig_module.cast_initial[i]; - while (cast->type) { - /* Don't need to add information already in the list */ - ret = 0; -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: look cast %s\n", cast->type->name); -#endif - if (swig_module.next != &swig_module) { - ret = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, cast->type->name); -#ifdef SWIGRUNTIME_DEBUG - if (ret) printf("SWIG_InitializeModule: found cast %s\n", ret->name); -#endif - } - if (ret) { - if (type == swig_module.type_initial[i]) { -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: skip old type %s\n", ret->name); -#endif - cast->type = ret; - ret = 0; - } else { - /* Check for casting already in the list */ - swig_cast_info *ocast = SWIG_TypeCheck(ret->name, type); -#ifdef SWIGRUNTIME_DEBUG - if (ocast) printf("SWIG_InitializeModule: skip old cast %s\n", ret->name); -#endif - if (!ocast) ret = 0; - } - } - - if (!ret) { -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: adding cast %s\n", cast->type->name); -#endif - if (type->cast) { - type->cast->prev = cast; - cast->next = type->cast; - } - type->cast = cast; - } - cast++; - } - /* Set entry in modules->types array equal to the type */ - swig_module.types[i] = type; - } - swig_module.types[i] = 0; - -#ifdef SWIGRUNTIME_DEBUG - printf("**** SWIG_InitializeModule: Cast List ******\n"); - for (i = 0; i < swig_module.size; ++i) { - int j = 0; - swig_cast_info *cast = swig_module.cast_initial[i]; - printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); - while (cast->type) { - printf("SWIG_InitializeModule: cast type %s\n", cast->type->name); - cast++; - ++j; - } - printf("---- Total casts: %d\n",j); - } - printf("**** SWIG_InitializeModule: Cast List ******\n"); -#endif -} - -/* This function will propagate the clientdata field of type to -* any new swig_type_info structures that have been added into the list -* of equivalent types. It is like calling -* SWIG_TypeClientData(type, clientdata) a second time. -*/ -SWIGRUNTIME void -SWIG_PropagateClientData(void) { - size_t i; - swig_cast_info *equiv; - static int init_run = 0; - - if (init_run) return; - init_run = 1; - - for (i = 0; i < swig_module.size; i++) { - if (swig_module.types[i]->clientdata) { - equiv = swig_module.types[i]->cast; - while (equiv) { - if (!equiv->converter) { - if (equiv->type && !equiv->type->clientdata) - SWIG_TypeClientData(equiv->type, swig_module.types[i]->clientdata); - } - equiv = equiv->next; - } - } - } -} - -#ifdef __cplusplus -#if 0 -{ - /* c-mode */ -#endif -} -#endif - - - -#ifdef __cplusplus -extern "C" { -#endif - - /* Python-specific SWIG API */ -#define SWIG_newvarlink() SWIG_Python_newvarlink() -#define SWIG_addvarlink(p, name, get_attr, set_attr) SWIG_Python_addvarlink(p, name, get_attr, set_attr) -#define SWIG_InstallConstants(d, constants) SWIG_Python_InstallConstants(d, constants) - - /* ----------------------------------------------------------------------------- - * global variable support code. - * ----------------------------------------------------------------------------- */ - - typedef struct swig_globalvar { - char *name; /* Name of global variable */ - PyObject *(*get_attr)(void); /* Return the current value */ - int (*set_attr)(PyObject *); /* Set the value */ - struct swig_globalvar *next; - } swig_globalvar; - - typedef struct swig_varlinkobject { - PyObject_HEAD - swig_globalvar *vars; - } swig_varlinkobject; - - SWIGINTERN PyObject * - swig_varlink_repr(swig_varlinkobject *SWIGUNUSEDPARM(v)) { -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_InternFromString(""); -#else - return PyString_FromString(""); -#endif - } - - SWIGINTERN PyObject * - swig_varlink_str(swig_varlinkobject *v) { -#if PY_VERSION_HEX >= 0x03000000 - PyObject *str = PyUnicode_InternFromString("("); - PyObject *tail; - PyObject *joined; - swig_globalvar *var; - for (var = v->vars; var; var=var->next) { - tail = PyUnicode_FromString(var->name); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; - if (var->next) { - tail = PyUnicode_InternFromString(", "); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; - } - } - tail = PyUnicode_InternFromString(")"); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; -#else - PyObject *str = PyString_FromString("("); - swig_globalvar *var; - for (var = v->vars; var; var=var->next) { - PyString_ConcatAndDel(&str,PyString_FromString(var->name)); - if (var->next) PyString_ConcatAndDel(&str,PyString_FromString(", ")); - } - PyString_ConcatAndDel(&str,PyString_FromString(")")); -#endif - return str; - } - - SWIGINTERN int - swig_varlink_print(swig_varlinkobject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) { - char *tmp; - PyObject *str = swig_varlink_str(v); - fprintf(fp,"Swig global variables "); - fprintf(fp,"%s\n", tmp = SWIG_Python_str_AsChar(str)); - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(str); - return 0; - } - - SWIGINTERN void - swig_varlink_dealloc(swig_varlinkobject *v) { - swig_globalvar *var = v->vars; - while (var) { - swig_globalvar *n = var->next; - free(var->name); - free(var); - var = n; - } - } - - SWIGINTERN PyObject * - swig_varlink_getattr(swig_varlinkobject *v, char *n) { - PyObject *res = NULL; - swig_globalvar *var = v->vars; - while (var) { - if (strcmp(var->name,n) == 0) { - res = (*var->get_attr)(); - break; - } - var = var->next; - } - if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_NameError,"Unknown C global variable"); - } - return res; - } - - SWIGINTERN int - swig_varlink_setattr(swig_varlinkobject *v, char *n, PyObject *p) { - int res = 1; - swig_globalvar *var = v->vars; - while (var) { - if (strcmp(var->name,n) == 0) { - res = (*var->set_attr)(p); - break; - } - var = var->next; - } - if (res == 1 && !PyErr_Occurred()) { - PyErr_SetString(PyExc_NameError,"Unknown C global variable"); - } - return res; - } - - SWIGINTERN PyTypeObject* - swig_varlink_type(void) { - static char varlink__doc__[] = "Swig var link object"; - static PyTypeObject varlink_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* Number of items in variable part (ob_size) */ -#endif - (char *)"swigvarlink", /* Type name (tp_name) */ - sizeof(swig_varlinkobject), /* Basic size (tp_basicsize) */ - 0, /* Itemsize (tp_itemsize) */ - (destructor) swig_varlink_dealloc, /* Deallocator (tp_dealloc) */ - (printfunc) swig_varlink_print, /* Print (tp_print) */ - (getattrfunc) swig_varlink_getattr, /* get attr (tp_getattr) */ - (setattrfunc) swig_varlink_setattr, /* Set attr (tp_setattr) */ - 0, /* tp_compare */ - (reprfunc) swig_varlink_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc) swig_varlink_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - varlink__doc__, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* tp_iter -> tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - varlink_type = tmp; - /* for Python 3 we already assigned ob_type in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - varlink_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &varlink_type; - } - - /* Create a variable linking object for use later */ - SWIGINTERN PyObject * - SWIG_Python_newvarlink(void) { - swig_varlinkobject *result = PyObject_NEW(swig_varlinkobject, swig_varlink_type()); - if (result) { - result->vars = 0; - } - return ((PyObject*) result); - } - - SWIGINTERN void - SWIG_Python_addvarlink(PyObject *p, char *name, PyObject *(*get_attr)(void), int (*set_attr)(PyObject *p)) { - swig_varlinkobject *v = (swig_varlinkobject *) p; - swig_globalvar *gv = (swig_globalvar *) malloc(sizeof(swig_globalvar)); - if (gv) { - size_t size = strlen(name)+1; - gv->name = (char *)malloc(size); - if (gv->name) { - strncpy(gv->name,name,size); - gv->get_attr = get_attr; - gv->set_attr = set_attr; - gv->next = v->vars; - } - } - v->vars = gv; - } - - SWIGINTERN PyObject * - SWIG_globals(void) { - static PyObject *_SWIG_globals = 0; - if (!_SWIG_globals) _SWIG_globals = SWIG_newvarlink(); - return _SWIG_globals; - } - - /* ----------------------------------------------------------------------------- - * constants/methods manipulation - * ----------------------------------------------------------------------------- */ - - /* Install Constants */ - SWIGINTERN void - SWIG_Python_InstallConstants(PyObject *d, swig_const_info constants[]) { - PyObject *obj = 0; - size_t i; - for (i = 0; constants[i].type; ++i) { - switch(constants[i].type) { - case SWIG_PY_POINTER: - obj = SWIG_NewPointerObj(constants[i].pvalue, *(constants[i]).ptype,0); - break; - case SWIG_PY_BINARY: - obj = SWIG_NewPackedObj(constants[i].pvalue, constants[i].lvalue, *(constants[i].ptype)); - break; - default: - obj = 0; - break; - } - if (obj) { - PyDict_SetItemString(d, constants[i].name, obj); - Py_DECREF(obj); - } - } - } - - /* -----------------------------------------------------------------------------*/ - /* Fix SwigMethods to carry the callback ptrs when needed */ - /* -----------------------------------------------------------------------------*/ - - SWIGINTERN void - SWIG_Python_FixMethods(PyMethodDef *methods, - swig_const_info *const_table, - swig_type_info **types, - swig_type_info **types_initial) { - size_t i; - for (i = 0; methods[i].ml_name; ++i) { - const char *c = methods[i].ml_doc; - if (c && (c = strstr(c, "swig_ptr: "))) { - int j; - swig_const_info *ci = 0; - const char *name = c + 10; - for (j = 0; const_table[j].type; ++j) { - if (strncmp(const_table[j].name, name, - strlen(const_table[j].name)) == 0) { - ci = &(const_table[j]); - break; - } - } - if (ci) { - void *ptr = (ci->type == SWIG_PY_POINTER) ? ci->pvalue : 0; - if (ptr) { - size_t shift = (ci->ptype) - types; - swig_type_info *ty = types_initial[shift]; - size_t ldoc = (c - methods[i].ml_doc); - size_t lptr = strlen(ty->name)+2*sizeof(void*)+2; - char *ndoc = (char*)malloc(ldoc + lptr + 10); - if (ndoc) { - char *buff = ndoc; - strncpy(buff, methods[i].ml_doc, ldoc); - buff += ldoc; - strncpy(buff, "swig_ptr: ", 10); - buff += 10; - SWIG_PackVoidPtr(buff, ptr, ty->name, lptr); - methods[i].ml_doc = ndoc; - } - } - } - } - } - } - -#ifdef __cplusplus -} -#endif - -/* -----------------------------------------------------------------------------* - * Partial Init method - * -----------------------------------------------------------------------------*/ - -#ifdef __cplusplus -extern "C" -#endif - -SWIGEXPORT -#if PY_VERSION_HEX >= 0x03000000 -PyObject* -#else -void -#endif -SWIG_init(void) { - PyObject *m, *d; -#if PY_VERSION_HEX >= 0x03000000 - static struct PyModuleDef SWIG_module = { - PyModuleDef_HEAD_INIT, - (char *) SWIG_name, - NULL, - -1, - SwigMethods, - NULL, - NULL, - NULL, - NULL - }; -#endif - - /* Fix SwigMethods to carry the callback ptrs when needed */ - SWIG_Python_FixMethods(SwigMethods, swig_const_table, swig_types, swig_type_initial); - -#if PY_VERSION_HEX >= 0x03000000 - m = PyModule_Create(&SWIG_module); -#else - m = Py_InitModule((char *) SWIG_name, SwigMethods); -#endif - d = PyModule_GetDict(m); - - SWIG_InitializeModule(0); - SWIG_InstallConstants(d,swig_const_table); - - - - import_array(); - -#if PY_VERSION_HEX >= 0x03000000 - return m; -#else - return; -#endif -} - diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csgraph.h b/scipy-0.10.1/scipy/sparse/sparsetools/csgraph.h deleted file mode 100644 index 7c6dab42df..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csgraph.h +++ /dev/null @@ -1,76 +0,0 @@ -#ifndef __CSGRAPH_H__ -#define __CSGRAPH_H__ - -#include - -/* - * Determine connected compoments of a compressed sparse graph. - * Note: - * Output array flag must be preallocated - */ -template -I cs_graph_components(const I n_nod, - const I Ap[], - const I Aj[], - I flag[]) -{ - // pos is a work array: list of nodes (rows) to process. - std::vector pos(n_nod,01); - I n_comp = 0; - I n_tot, n_pos, n_pos_new, n_pos0, n_new, n_stop; - I icomp, ii, ir, ic; - - n_stop = n_nod; - for (ir = 0; ir < n_nod; ir++) { - flag[ir] = -1; - if (Ap[ir+1] == Ap[ir]) { - n_stop--; - flag[ir] = -2; - } - } - - n_tot = 0; - for (icomp = 0; icomp < n_nod; icomp++) { - // Find seed. - ii = 0; - while ((flag[ii] >= 0) || (flag[ii] == -2)) { - ii++; - if (ii >= n_nod) { - /* Sanity check, if this happens, the graph is corrupted. */ - return -1; - } - } - - flag[ii] = icomp; - pos[0] = ii; - n_pos0 = 0; - n_pos_new = n_pos = 1; - - for (ii = 0; ii < n_nod; ii++) { - n_new = 0; - for (ir = n_pos0; ir < n_pos; ir++) { - for (ic = Ap[pos[ir]]; ic < Ap[pos[ir]+1]; ic++) { - if (flag[Aj[ic]] == -1) { - flag[Aj[ic]] = icomp; - pos[n_pos_new] = Aj[ic]; - n_pos_new++; - n_new++; - } - } - } - n_pos0 = n_pos; - n_pos = n_pos_new; - if (n_new == 0) break; - } - n_tot += n_pos; - - if (n_tot == n_stop) { - n_comp = icomp + 1; - break; - } - } - - return n_comp; -} - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csgraph.py b/scipy-0.10.1/scipy/sparse/sparsetools/csgraph.py deleted file mode 100644 index d4ce5db714..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csgraph.py +++ /dev/null @@ -1,72 +0,0 @@ -# This file was automatically generated by SWIG (http://www.swig.org). -# Version 2.0.1+capsulehack -# -# Do not make changes to this file unless you know what you are doing--modify -# the SWIG interface file instead. -# This file is compatible with both classic and new-style classes. - -from sys import version_info -if version_info >= (2,6,0): - def swig_import_helper(): - from os.path import dirname - import imp - fp = None - try: - fp, pathname, description = imp.find_module('_csgraph', [dirname(__file__)]) - except ImportError: - import _csgraph - return _csgraph - if fp is not None: - try: - _mod = imp.load_module('_csgraph', fp, pathname, description) - finally: - fp.close() - return _mod - _csgraph = swig_import_helper() - del swig_import_helper -else: - import _csgraph -del version_info -try: - _swig_property = property -except NameError: - pass # Python < 2.2 doesn't have 'property'. -def _swig_setattr_nondynamic(self,class_type,name,value,static=1): - if (name == "thisown"): return self.this.own(value) - if (name == "this"): - if type(value).__name__ == 'SwigPyObject': - self.__dict__[name] = value - return - method = class_type.__swig_setmethods__.get(name,None) - if method: return method(self,value) - if (not static) or hasattr(self,name): - self.__dict__[name] = value - else: - raise AttributeError("You cannot add attributes to %s" % self) - -def _swig_setattr(self,class_type,name,value): - return _swig_setattr_nondynamic(self,class_type,name,value,0) - -def _swig_getattr(self,class_type,name): - if (name == "thisown"): return self.this.own() - method = class_type.__swig_getmethods__.get(name,None) - if method: return method(self) - raise AttributeError(name) - -def _swig_repr(self): - try: strthis = "proxy of " + self.this.__repr__() - except: strthis = "" - return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) - -try: - _object = object - _newclass = 1 -except AttributeError: - class _object : pass - _newclass = 0 - - - -def cs_graph_components(*args): - """cs_graph_components(int n_nod, int Ap, int Aj, int flag) -> int""" - return _csgraph.cs_graph_components(*args) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csgraph_wrap.cxx b/scipy-0.10.1/scipy/sparse/sparsetools/csgraph_wrap.cxx deleted file mode 100644 index 6329d07f64..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csgraph_wrap.cxx +++ /dev/null @@ -1,4039 +0,0 @@ -/* ---------------------------------------------------------------------------- - * This file was automatically generated by SWIG (http://www.swig.org). - * Version 2.0.1+capsulehack - * - * This file is not intended to be easily readable and contains a number of - * coding conventions designed to improve portability and efficiency. Do not make - * changes to this file unless you know what you are doing--modify the SWIG - * interface file instead. - * ----------------------------------------------------------------------------- */ - -#define SWIGPYTHON -#define SWIG_PYTHON_DIRECTOR_NO_VTABLE - - -#ifdef __cplusplus -/* SwigValueWrapper is described in swig.swg */ -template class SwigValueWrapper { - struct SwigMovePointer { - T *ptr; - SwigMovePointer(T *p) : ptr(p) { } - ~SwigMovePointer() { delete ptr; } - SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } - } pointer; - SwigValueWrapper& operator=(const SwigValueWrapper& rhs); - SwigValueWrapper(const SwigValueWrapper& rhs); -public: - SwigValueWrapper() : pointer(0) { } - SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } - operator T&() const { return *pointer.ptr; } - T *operator&() { return pointer.ptr; } -}; - -template T SwigValueInit() { - return T(); -} -#endif - -/* ----------------------------------------------------------------------------- - * This section contains generic SWIG labels for method/variable - * declarations/attributes, and other compiler dependent labels. - * ----------------------------------------------------------------------------- */ - -/* template workaround for compilers that cannot correctly implement the C++ standard */ -#ifndef SWIGTEMPLATEDISAMBIGUATOR -# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) -# define SWIGTEMPLATEDISAMBIGUATOR template -# elif defined(__HP_aCC) -/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ -/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ -# define SWIGTEMPLATEDISAMBIGUATOR template -# else -# define SWIGTEMPLATEDISAMBIGUATOR -# endif -#endif - -/* inline attribute */ -#ifndef SWIGINLINE -# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) -# define SWIGINLINE inline -# else -# define SWIGINLINE -# endif -#endif - -/* attribute recognised by some compilers to avoid 'unused' warnings */ -#ifndef SWIGUNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -# elif defined(__ICC) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -#endif - -#ifndef SWIG_MSC_UNSUPPRESS_4505 -# if defined(_MSC_VER) -# pragma warning(disable : 4505) /* unreferenced local function has been removed */ -# endif -#endif - -#ifndef SWIGUNUSEDPARM -# ifdef __cplusplus -# define SWIGUNUSEDPARM(p) -# else -# define SWIGUNUSEDPARM(p) p SWIGUNUSED -# endif -#endif - -/* internal SWIG method */ -#ifndef SWIGINTERN -# define SWIGINTERN static SWIGUNUSED -#endif - -/* internal inline SWIG method */ -#ifndef SWIGINTERNINLINE -# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE -#endif - -/* exporting methods */ -#if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) -# ifndef GCC_HASCLASSVISIBILITY -# define GCC_HASCLASSVISIBILITY -# endif -#endif - -#ifndef SWIGEXPORT -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# if defined(STATIC_LINKED) -# define SWIGEXPORT -# else -# define SWIGEXPORT __declspec(dllexport) -# endif -# else -# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) -# define SWIGEXPORT __attribute__ ((visibility("default"))) -# else -# define SWIGEXPORT -# endif -# endif -#endif - -/* calling conventions for Windows */ -#ifndef SWIGSTDCALL -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# define SWIGSTDCALL __stdcall -# else -# define SWIGSTDCALL -# endif -#endif - -/* Deal with Microsoft's attempt at deprecating C standard runtime functions */ -#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) -# define _CRT_SECURE_NO_DEPRECATE -#endif - -/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ -#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) -# define _SCL_SECURE_NO_DEPRECATE -#endif - - - -/* Python.h has to appear first */ -#include - -/* ----------------------------------------------------------------------------- - * swigrun.swg - * - * This file contains generic C API SWIG runtime support for pointer - * type checking. - * ----------------------------------------------------------------------------- */ - -/* This should only be incremented when either the layout of swig_type_info changes, - or for whatever reason, the runtime changes incompatibly */ -#define SWIG_RUNTIME_VERSION "4" - -/* define SWIG_TYPE_TABLE_NAME as "SWIG_TYPE_TABLE" */ -#ifdef SWIG_TYPE_TABLE -# define SWIG_QUOTE_STRING(x) #x -# define SWIG_EXPAND_AND_QUOTE_STRING(x) SWIG_QUOTE_STRING(x) -# define SWIG_TYPE_TABLE_NAME SWIG_EXPAND_AND_QUOTE_STRING(SWIG_TYPE_TABLE) -#else -# define SWIG_TYPE_TABLE_NAME -#endif - -/* - You can use the SWIGRUNTIME and SWIGRUNTIMEINLINE macros for - creating a static or dynamic library from the SWIG runtime code. - In 99.9% of the cases, SWIG just needs to declare them as 'static'. - - But only do this if strictly necessary, ie, if you have problems - with your compiler or suchlike. -*/ - -#ifndef SWIGRUNTIME -# define SWIGRUNTIME SWIGINTERN -#endif - -#ifndef SWIGRUNTIMEINLINE -# define SWIGRUNTIMEINLINE SWIGRUNTIME SWIGINLINE -#endif - -/* Generic buffer size */ -#ifndef SWIG_BUFFER_SIZE -# define SWIG_BUFFER_SIZE 1024 -#endif - -/* Flags for pointer conversions */ -#define SWIG_POINTER_DISOWN 0x1 -#define SWIG_CAST_NEW_MEMORY 0x2 - -/* Flags for new pointer objects */ -#define SWIG_POINTER_OWN 0x1 - - -/* - Flags/methods for returning states. - - The SWIG conversion methods, as ConvertPtr, return an integer - that tells if the conversion was successful or not. And if not, - an error code can be returned (see swigerrors.swg for the codes). - - Use the following macros/flags to set or process the returning - states. - - In old versions of SWIG, code such as the following was usually written: - - if (SWIG_ConvertPtr(obj,vptr,ty.flags) != -1) { - // success code - } else { - //fail code - } - - Now you can be more explicit: - - int res = SWIG_ConvertPtr(obj,vptr,ty.flags); - if (SWIG_IsOK(res)) { - // success code - } else { - // fail code - } - - which is the same really, but now you can also do - - Type *ptr; - int res = SWIG_ConvertPtr(obj,(void **)(&ptr),ty.flags); - if (SWIG_IsOK(res)) { - // success code - if (SWIG_IsNewObj(res) { - ... - delete *ptr; - } else { - ... - } - } else { - // fail code - } - - I.e., now SWIG_ConvertPtr can return new objects and you can - identify the case and take care of the deallocation. Of course that - also requires SWIG_ConvertPtr to return new result values, such as - - int SWIG_ConvertPtr(obj, ptr,...) { - if () { - if () { - *ptr = ; - return SWIG_NEWOBJ; - } else { - *ptr = ; - return SWIG_OLDOBJ; - } - } else { - return SWIG_BADOBJ; - } - } - - Of course, returning the plain '0(success)/-1(fail)' still works, but you can be - more explicit by returning SWIG_BADOBJ, SWIG_ERROR or any of the - SWIG errors code. - - Finally, if the SWIG_CASTRANK_MODE is enabled, the result code - allows to return the 'cast rank', for example, if you have this - - int food(double) - int fooi(int); - - and you call - - food(1) // cast rank '1' (1 -> 1.0) - fooi(1) // cast rank '0' - - just use the SWIG_AddCast()/SWIG_CheckState() -*/ - -#define SWIG_OK (0) -#define SWIG_ERROR (-1) -#define SWIG_IsOK(r) (r >= 0) -#define SWIG_ArgError(r) ((r != SWIG_ERROR) ? r : SWIG_TypeError) - -/* The CastRankLimit says how many bits are used for the cast rank */ -#define SWIG_CASTRANKLIMIT (1 << 8) -/* The NewMask denotes the object was created (using new/malloc) */ -#define SWIG_NEWOBJMASK (SWIG_CASTRANKLIMIT << 1) -/* The TmpMask is for in/out typemaps that use temporal objects */ -#define SWIG_TMPOBJMASK (SWIG_NEWOBJMASK << 1) -/* Simple returning values */ -#define SWIG_BADOBJ (SWIG_ERROR) -#define SWIG_OLDOBJ (SWIG_OK) -#define SWIG_NEWOBJ (SWIG_OK | SWIG_NEWOBJMASK) -#define SWIG_TMPOBJ (SWIG_OK | SWIG_TMPOBJMASK) -/* Check, add and del mask methods */ -#define SWIG_AddNewMask(r) (SWIG_IsOK(r) ? (r | SWIG_NEWOBJMASK) : r) -#define SWIG_DelNewMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_NEWOBJMASK) : r) -#define SWIG_IsNewObj(r) (SWIG_IsOK(r) && (r & SWIG_NEWOBJMASK)) -#define SWIG_AddTmpMask(r) (SWIG_IsOK(r) ? (r | SWIG_TMPOBJMASK) : r) -#define SWIG_DelTmpMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_TMPOBJMASK) : r) -#define SWIG_IsTmpObj(r) (SWIG_IsOK(r) && (r & SWIG_TMPOBJMASK)) - -/* Cast-Rank Mode */ -#if defined(SWIG_CASTRANK_MODE) -# ifndef SWIG_TypeRank -# define SWIG_TypeRank unsigned long -# endif -# ifndef SWIG_MAXCASTRANK /* Default cast allowed */ -# define SWIG_MAXCASTRANK (2) -# endif -# define SWIG_CASTRANKMASK ((SWIG_CASTRANKLIMIT) -1) -# define SWIG_CastRank(r) (r & SWIG_CASTRANKMASK) -SWIGINTERNINLINE int SWIG_AddCast(int r) { - return SWIG_IsOK(r) ? ((SWIG_CastRank(r) < SWIG_MAXCASTRANK) ? (r + 1) : SWIG_ERROR) : r; -} -SWIGINTERNINLINE int SWIG_CheckState(int r) { - return SWIG_IsOK(r) ? SWIG_CastRank(r) + 1 : 0; -} -#else /* no cast-rank mode */ -# define SWIG_AddCast -# define SWIG_CheckState(r) (SWIG_IsOK(r) ? 1 : 0) -#endif - - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void *(*swig_converter_func)(void *, int *); -typedef struct swig_type_info *(*swig_dycast_func)(void **); - -/* Structure to store information on one type */ -typedef struct swig_type_info { - const char *name; /* mangled name of this type */ - const char *str; /* human readable name of this type */ - swig_dycast_func dcast; /* dynamic cast function down a hierarchy */ - struct swig_cast_info *cast; /* linked list of types that can cast into this type */ - void *clientdata; /* language specific type data */ - int owndata; /* flag if the structure owns the clientdata */ -} swig_type_info; - -/* Structure to store a type and conversion function used for casting */ -typedef struct swig_cast_info { - swig_type_info *type; /* pointer to type that is equivalent to this type */ - swig_converter_func converter; /* function to cast the void pointers */ - struct swig_cast_info *next; /* pointer to next cast in linked list */ - struct swig_cast_info *prev; /* pointer to the previous cast */ -} swig_cast_info; - -/* Structure used to store module information - * Each module generates one structure like this, and the runtime collects - * all of these structures and stores them in a circularly linked list.*/ -typedef struct swig_module_info { - swig_type_info **types; /* Array of pointers to swig_type_info structures that are in this module */ - size_t size; /* Number of types in this module */ - struct swig_module_info *next; /* Pointer to next element in circularly linked list */ - swig_type_info **type_initial; /* Array of initially generated type structures */ - swig_cast_info **cast_initial; /* Array of initially generated casting structures */ - void *clientdata; /* Language specific module data */ -} swig_module_info; - -/* - Compare two type names skipping the space characters, therefore - "char*" == "char *" and "Class" == "Class", etc. - - Return 0 when the two name types are equivalent, as in - strncmp, but skipping ' '. -*/ -SWIGRUNTIME int -SWIG_TypeNameComp(const char *f1, const char *l1, - const char *f2, const char *l2) { - for (;(f1 != l1) && (f2 != l2); ++f1, ++f2) { - while ((*f1 == ' ') && (f1 != l1)) ++f1; - while ((*f2 == ' ') && (f2 != l2)) ++f2; - if (*f1 != *f2) return (*f1 > *f2) ? 1 : -1; - } - return (int)((l1 - f1) - (l2 - f2)); -} - -/* - Check type equivalence in a name list like ||... - Return 0 if not equal, 1 if equal -*/ -SWIGRUNTIME int -SWIG_TypeEquiv(const char *nb, const char *tb) { - int equiv = 0; - const char* te = tb + strlen(tb); - const char* ne = nb; - while (!equiv && *ne) { - for (nb = ne; *ne; ++ne) { - if (*ne == '|') break; - } - equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; - if (*ne) ++ne; - } - return equiv; -} - -/* - Check type equivalence in a name list like ||... - Return 0 if equal, -1 if nb < tb, 1 if nb > tb -*/ -SWIGRUNTIME int -SWIG_TypeCompare(const char *nb, const char *tb) { - int equiv = 0; - const char* te = tb + strlen(tb); - const char* ne = nb; - while (!equiv && *ne) { - for (nb = ne; *ne; ++ne) { - if (*ne == '|') break; - } - equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; - if (*ne) ++ne; - } - return equiv; -} - - -/* - Check the typename -*/ -SWIGRUNTIME swig_cast_info * -SWIG_TypeCheck(const char *c, swig_type_info *ty) { - if (ty) { - swig_cast_info *iter = ty->cast; - while (iter) { - if (strcmp(iter->type->name, c) == 0) { - if (iter == ty->cast) - return iter; - /* Move iter to the top of the linked list */ - iter->prev->next = iter->next; - if (iter->next) - iter->next->prev = iter->prev; - iter->next = ty->cast; - iter->prev = 0; - if (ty->cast) ty->cast->prev = iter; - ty->cast = iter; - return iter; - } - iter = iter->next; - } - } - return 0; -} - -/* - Identical to SWIG_TypeCheck, except strcmp is replaced with a pointer comparison -*/ -SWIGRUNTIME swig_cast_info * -SWIG_TypeCheckStruct(swig_type_info *from, swig_type_info *ty) { - if (ty) { - swig_cast_info *iter = ty->cast; - while (iter) { - if (iter->type == from) { - if (iter == ty->cast) - return iter; - /* Move iter to the top of the linked list */ - iter->prev->next = iter->next; - if (iter->next) - iter->next->prev = iter->prev; - iter->next = ty->cast; - iter->prev = 0; - if (ty->cast) ty->cast->prev = iter; - ty->cast = iter; - return iter; - } - iter = iter->next; - } - } - return 0; -} - -/* - Cast a pointer up an inheritance hierarchy -*/ -SWIGRUNTIMEINLINE void * -SWIG_TypeCast(swig_cast_info *ty, void *ptr, int *newmemory) { - return ((!ty) || (!ty->converter)) ? ptr : (*ty->converter)(ptr, newmemory); -} - -/* - Dynamic pointer casting. Down an inheritance hierarchy -*/ -SWIGRUNTIME swig_type_info * -SWIG_TypeDynamicCast(swig_type_info *ty, void **ptr) { - swig_type_info *lastty = ty; - if (!ty || !ty->dcast) return ty; - while (ty && (ty->dcast)) { - ty = (*ty->dcast)(ptr); - if (ty) lastty = ty; - } - return lastty; -} - -/* - Return the name associated with this type -*/ -SWIGRUNTIMEINLINE const char * -SWIG_TypeName(const swig_type_info *ty) { - return ty->name; -} - -/* - Return the pretty name associated with this type, - that is an unmangled type name in a form presentable to the user. -*/ -SWIGRUNTIME const char * -SWIG_TypePrettyName(const swig_type_info *type) { - /* The "str" field contains the equivalent pretty names of the - type, separated by vertical-bar characters. We choose - to print the last name, as it is often (?) the most - specific. */ - if (!type) return NULL; - if (type->str != NULL) { - const char *last_name = type->str; - const char *s; - for (s = type->str; *s; s++) - if (*s == '|') last_name = s+1; - return last_name; - } - else - return type->name; -} - -/* - Set the clientdata field for a type -*/ -SWIGRUNTIME void -SWIG_TypeClientData(swig_type_info *ti, void *clientdata) { - swig_cast_info *cast = ti->cast; - /* if (ti->clientdata == clientdata) return; */ - ti->clientdata = clientdata; - - while (cast) { - if (!cast->converter) { - swig_type_info *tc = cast->type; - if (!tc->clientdata) { - SWIG_TypeClientData(tc, clientdata); - } - } - cast = cast->next; - } -} -SWIGRUNTIME void -SWIG_TypeNewClientData(swig_type_info *ti, void *clientdata) { - SWIG_TypeClientData(ti, clientdata); - ti->owndata = 1; -} - -/* - Search for a swig_type_info structure only by mangled name - Search is a O(log #types) - - We start searching at module start, and finish searching when start == end. - Note: if start == end at the beginning of the function, we go all the way around - the circular list. -*/ -SWIGRUNTIME swig_type_info * -SWIG_MangledTypeQueryModule(swig_module_info *start, - swig_module_info *end, - const char *name) { - swig_module_info *iter = start; - do { - if (iter->size) { - register size_t l = 0; - register size_t r = iter->size - 1; - do { - /* since l+r >= 0, we can (>> 1) instead (/ 2) */ - register size_t i = (l + r) >> 1; - const char *iname = iter->types[i]->name; - if (iname) { - register int compare = strcmp(name, iname); - if (compare == 0) { - return iter->types[i]; - } else if (compare < 0) { - if (i) { - r = i - 1; - } else { - break; - } - } else if (compare > 0) { - l = i + 1; - } - } else { - break; /* should never happen */ - } - } while (l <= r); - } - iter = iter->next; - } while (iter != end); - return 0; -} - -/* - Search for a swig_type_info structure for either a mangled name or a human readable name. - It first searches the mangled names of the types, which is a O(log #types) - If a type is not found it then searches the human readable names, which is O(#types). - - We start searching at module start, and finish searching when start == end. - Note: if start == end at the beginning of the function, we go all the way around - the circular list. -*/ -SWIGRUNTIME swig_type_info * -SWIG_TypeQueryModule(swig_module_info *start, - swig_module_info *end, - const char *name) { - /* STEP 1: Search the name field using binary search */ - swig_type_info *ret = SWIG_MangledTypeQueryModule(start, end, name); - if (ret) { - return ret; - } else { - /* STEP 2: If the type hasn't been found, do a complete search - of the str field (the human readable name) */ - swig_module_info *iter = start; - do { - register size_t i = 0; - for (; i < iter->size; ++i) { - if (iter->types[i]->str && (SWIG_TypeEquiv(iter->types[i]->str, name))) - return iter->types[i]; - } - iter = iter->next; - } while (iter != end); - } - - /* neither found a match */ - return 0; -} - -/* - Pack binary data into a string -*/ -SWIGRUNTIME char * -SWIG_PackData(char *c, void *ptr, size_t sz) { - static const char hex[17] = "0123456789abcdef"; - register const unsigned char *u = (unsigned char *) ptr; - register const unsigned char *eu = u + sz; - for (; u != eu; ++u) { - register unsigned char uu = *u; - *(c++) = hex[(uu & 0xf0) >> 4]; - *(c++) = hex[uu & 0xf]; - } - return c; -} - -/* - Unpack binary data from a string -*/ -SWIGRUNTIME const char * -SWIG_UnpackData(const char *c, void *ptr, size_t sz) { - register unsigned char *u = (unsigned char *) ptr; - register const unsigned char *eu = u + sz; - for (; u != eu; ++u) { - register char d = *(c++); - register unsigned char uu; - if ((d >= '0') && (d <= '9')) - uu = ((d - '0') << 4); - else if ((d >= 'a') && (d <= 'f')) - uu = ((d - ('a'-10)) << 4); - else - return (char *) 0; - d = *(c++); - if ((d >= '0') && (d <= '9')) - uu |= (d - '0'); - else if ((d >= 'a') && (d <= 'f')) - uu |= (d - ('a'-10)); - else - return (char *) 0; - *u = uu; - } - return c; -} - -/* - Pack 'void *' into a string buffer. -*/ -SWIGRUNTIME char * -SWIG_PackVoidPtr(char *buff, void *ptr, const char *name, size_t bsz) { - char *r = buff; - if ((2*sizeof(void *) + 2) > bsz) return 0; - *(r++) = '_'; - r = SWIG_PackData(r,&ptr,sizeof(void *)); - if (strlen(name) + 1 > (bsz - (r - buff))) return 0; - strcpy(r,name); - return buff; -} - -SWIGRUNTIME const char * -SWIG_UnpackVoidPtr(const char *c, void **ptr, const char *name) { - if (*c != '_') { - if (strcmp(c,"NULL") == 0) { - *ptr = (void *) 0; - return name; - } else { - return 0; - } - } - return SWIG_UnpackData(++c,ptr,sizeof(void *)); -} - -SWIGRUNTIME char * -SWIG_PackDataName(char *buff, void *ptr, size_t sz, const char *name, size_t bsz) { - char *r = buff; - size_t lname = (name ? strlen(name) : 0); - if ((2*sz + 2 + lname) > bsz) return 0; - *(r++) = '_'; - r = SWIG_PackData(r,ptr,sz); - if (lname) { - strncpy(r,name,lname+1); - } else { - *r = 0; - } - return buff; -} - -SWIGRUNTIME const char * -SWIG_UnpackDataName(const char *c, void *ptr, size_t sz, const char *name) { - if (*c != '_') { - if (strcmp(c,"NULL") == 0) { - memset(ptr,0,sz); - return name; - } else { - return 0; - } - } - return SWIG_UnpackData(++c,ptr,sz); -} - -#ifdef __cplusplus -} -#endif - -/* Errors in SWIG */ -#define SWIG_UnknownError -1 -#define SWIG_IOError -2 -#define SWIG_RuntimeError -3 -#define SWIG_IndexError -4 -#define SWIG_TypeError -5 -#define SWIG_DivisionByZero -6 -#define SWIG_OverflowError -7 -#define SWIG_SyntaxError -8 -#define SWIG_ValueError -9 -#define SWIG_SystemError -10 -#define SWIG_AttributeError -11 -#define SWIG_MemoryError -12 -#define SWIG_NullReferenceError -13 - - - -/* Compatibility macros for Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - -#define PyClass_Check(obj) PyObject_IsInstance(obj, (PyObject *)&PyType_Type) -#define PyInt_Check(x) PyLong_Check(x) -#define PyInt_AsLong(x) PyLong_AsLong(x) -#define PyInt_FromLong(x) PyLong_FromLong(x) -#define PyString_Format(fmt, args) PyUnicode_Format(fmt, args) - -#endif - -#ifndef Py_TYPE -# define Py_TYPE(op) ((op)->ob_type) -#endif - -/* SWIG APIs for compatibility of both Python 2 & 3 */ - -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_Python_str_FromFormat PyUnicode_FromFormat -#else -# define SWIG_Python_str_FromFormat PyString_FromFormat -#endif - - -/* Warning: This function will allocate a new string in Python 3, - * so please call SWIG_Python_str_DelForPy3(x) to free the space. - */ -SWIGINTERN char* -SWIG_Python_str_AsChar(PyObject *str) -{ -#if PY_VERSION_HEX >= 0x03000000 - char *cstr; - char *newstr; - Py_ssize_t len; - str = PyUnicode_AsUTF8String(str); - PyBytes_AsStringAndSize(str, &cstr, &len); - newstr = (char *) malloc(len+1); - memcpy(newstr, cstr, len+1); - Py_XDECREF(str); - return newstr; -#else - return PyString_AsString(str); -#endif -} - -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_Python_str_DelForPy3(x) free( (void*) (x) ) -#else -# define SWIG_Python_str_DelForPy3(x) -#endif - - -SWIGINTERN PyObject* -SWIG_Python_str_FromChar(const char *c) -{ -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_FromString(c); -#else - return PyString_FromString(c); -#endif -} - -/* Add PyOS_snprintf for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 -# if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) -# define PyOS_snprintf _snprintf -# else -# define PyOS_snprintf snprintf -# endif -#endif - -/* A crude PyString_FromFormat implementation for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 - -#ifndef SWIG_PYBUFFER_SIZE -# define SWIG_PYBUFFER_SIZE 1024 -#endif - -static PyObject * -PyString_FromFormat(const char *fmt, ...) { - va_list ap; - char buf[SWIG_PYBUFFER_SIZE * 2]; - int res; - va_start(ap, fmt); - res = vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - return (res < 0 || res >= (int)sizeof(buf)) ? 0 : PyString_FromString(buf); -} -#endif - -/* Add PyObject_Del for old Pythons */ -#if PY_VERSION_HEX < 0x01060000 -# define PyObject_Del(op) PyMem_DEL((op)) -#endif -#ifndef PyObject_DEL -# define PyObject_DEL PyObject_Del -#endif - -/* A crude PyExc_StopIteration exception for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 -# ifndef PyExc_StopIteration -# define PyExc_StopIteration PyExc_RuntimeError -# endif -# ifndef PyObject_GenericGetAttr -# define PyObject_GenericGetAttr 0 -# endif -#endif - -/* Py_NotImplemented is defined in 2.1 and up. */ -#if PY_VERSION_HEX < 0x02010000 -# ifndef Py_NotImplemented -# define Py_NotImplemented PyExc_RuntimeError -# endif -#endif - -/* A crude PyString_AsStringAndSize implementation for old Pythons */ -#if PY_VERSION_HEX < 0x02010000 -# ifndef PyString_AsStringAndSize -# define PyString_AsStringAndSize(obj, s, len) {*s = PyString_AsString(obj); *len = *s ? strlen(*s) : 0;} -# endif -#endif - -/* PySequence_Size for old Pythons */ -#if PY_VERSION_HEX < 0x02000000 -# ifndef PySequence_Size -# define PySequence_Size PySequence_Length -# endif -#endif - -/* PyBool_FromLong for old Pythons */ -#if PY_VERSION_HEX < 0x02030000 -static -PyObject *PyBool_FromLong(long ok) -{ - PyObject *result = ok ? Py_True : Py_False; - Py_INCREF(result); - return result; -} -#endif - -/* Py_ssize_t for old Pythons */ -/* This code is as recommended by: */ -/* http://www.python.org/dev/peps/pep-0353/#conversion-guidelines */ -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef int Py_ssize_t; -# define PY_SSIZE_T_MAX INT_MAX -# define PY_SSIZE_T_MIN INT_MIN -#endif - -/* ----------------------------------------------------------------------------- - * error manipulation - * ----------------------------------------------------------------------------- */ - -SWIGRUNTIME PyObject* -SWIG_Python_ErrorType(int code) { - PyObject* type = 0; - switch(code) { - case SWIG_MemoryError: - type = PyExc_MemoryError; - break; - case SWIG_IOError: - type = PyExc_IOError; - break; - case SWIG_RuntimeError: - type = PyExc_RuntimeError; - break; - case SWIG_IndexError: - type = PyExc_IndexError; - break; - case SWIG_TypeError: - type = PyExc_TypeError; - break; - case SWIG_DivisionByZero: - type = PyExc_ZeroDivisionError; - break; - case SWIG_OverflowError: - type = PyExc_OverflowError; - break; - case SWIG_SyntaxError: - type = PyExc_SyntaxError; - break; - case SWIG_ValueError: - type = PyExc_ValueError; - break; - case SWIG_SystemError: - type = PyExc_SystemError; - break; - case SWIG_AttributeError: - type = PyExc_AttributeError; - break; - default: - type = PyExc_RuntimeError; - } - return type; -} - - -SWIGRUNTIME void -SWIG_Python_AddErrorMsg(const char* mesg) -{ - PyObject *type = 0; - PyObject *value = 0; - PyObject *traceback = 0; - - if (PyErr_Occurred()) PyErr_Fetch(&type, &value, &traceback); - if (value) { - char *tmp; - PyObject *old_str = PyObject_Str(value); - PyErr_Clear(); - Py_XINCREF(type); - - PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(old_str); - Py_DECREF(value); - } else { - PyErr_SetString(PyExc_RuntimeError, mesg); - } -} - -#if defined(SWIG_PYTHON_NO_THREADS) -# if defined(SWIG_PYTHON_THREADS) -# undef SWIG_PYTHON_THREADS -# endif -#endif -#if defined(SWIG_PYTHON_THREADS) /* Threading support is enabled */ -# if !defined(SWIG_PYTHON_USE_GIL) && !defined(SWIG_PYTHON_NO_USE_GIL) -# if (PY_VERSION_HEX >= 0x02030000) /* For 2.3 or later, use the PyGILState calls */ -# define SWIG_PYTHON_USE_GIL -# endif -# endif -# if defined(SWIG_PYTHON_USE_GIL) /* Use PyGILState threads calls */ -# ifndef SWIG_PYTHON_INITIALIZE_THREADS -# define SWIG_PYTHON_INITIALIZE_THREADS PyEval_InitThreads() -# endif -# ifdef __cplusplus /* C++ code */ - class SWIG_Python_Thread_Block { - bool status; - PyGILState_STATE state; - public: - void end() { if (status) { PyGILState_Release(state); status = false;} } - SWIG_Python_Thread_Block() : status(true), state(PyGILState_Ensure()) {} - ~SWIG_Python_Thread_Block() { end(); } - }; - class SWIG_Python_Thread_Allow { - bool status; - PyThreadState *save; - public: - void end() { if (status) { PyEval_RestoreThread(save); status = false; }} - SWIG_Python_Thread_Allow() : status(true), save(PyEval_SaveThread()) {} - ~SWIG_Python_Thread_Allow() { end(); } - }; -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK SWIG_Python_Thread_Block _swig_thread_block -# define SWIG_PYTHON_THREAD_END_BLOCK _swig_thread_block.end() -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW SWIG_Python_Thread_Allow _swig_thread_allow -# define SWIG_PYTHON_THREAD_END_ALLOW _swig_thread_allow.end() -# else /* C code */ -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK PyGILState_STATE _swig_thread_block = PyGILState_Ensure() -# define SWIG_PYTHON_THREAD_END_BLOCK PyGILState_Release(_swig_thread_block) -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW PyThreadState *_swig_thread_allow = PyEval_SaveThread() -# define SWIG_PYTHON_THREAD_END_ALLOW PyEval_RestoreThread(_swig_thread_allow) -# endif -# else /* Old thread way, not implemented, user must provide it */ -# if !defined(SWIG_PYTHON_INITIALIZE_THREADS) -# define SWIG_PYTHON_INITIALIZE_THREADS -# endif -# if !defined(SWIG_PYTHON_THREAD_BEGIN_BLOCK) -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK -# endif -# if !defined(SWIG_PYTHON_THREAD_END_BLOCK) -# define SWIG_PYTHON_THREAD_END_BLOCK -# endif -# if !defined(SWIG_PYTHON_THREAD_BEGIN_ALLOW) -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW -# endif -# if !defined(SWIG_PYTHON_THREAD_END_ALLOW) -# define SWIG_PYTHON_THREAD_END_ALLOW -# endif -# endif -#else /* No thread support */ -# define SWIG_PYTHON_INITIALIZE_THREADS -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK -# define SWIG_PYTHON_THREAD_END_BLOCK -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW -# define SWIG_PYTHON_THREAD_END_ALLOW -#endif - -/* ----------------------------------------------------------------------------- - * Python API portion that goes into the runtime - * ----------------------------------------------------------------------------- */ - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* cc-mode */ -#endif -#endif - -/* ----------------------------------------------------------------------------- - * Constant declarations - * ----------------------------------------------------------------------------- */ - -/* Constant Types */ -#define SWIG_PY_POINTER 4 -#define SWIG_PY_BINARY 5 - -/* Constant information structure */ -typedef struct swig_const_info { - int type; - char *name; - long lvalue; - double dvalue; - void *pvalue; - swig_type_info **ptype; -} swig_const_info; - - -/* ----------------------------------------------------------------------------- - * Wrapper of PyInstanceMethod_New() used in Python 3 - * It is exported to the generated module, used for -fastproxy - * ----------------------------------------------------------------------------- */ -SWIGRUNTIME PyObject* SWIG_PyInstanceMethod_New(PyObject *self, PyObject *func) -{ -#if PY_VERSION_HEX >= 0x03000000 - return PyInstanceMethod_New(func); -#else - return NULL; -#endif -} - -#ifdef __cplusplus -#if 0 -{ /* cc-mode */ -#endif -} -#endif - - -/* ----------------------------------------------------------------------------- - * pyrun.swg - * - * This file contains the runtime support for Python modules - * and includes code for managing global variables and pointer - * type checking. - * - * ----------------------------------------------------------------------------- */ - -/* Common SWIG API */ - -/* for raw pointers */ -#define SWIG_Python_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, 0) -#define SWIG_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtr(obj, pptr, type, flags) -#define SWIG_ConvertPtrAndOwn(obj,pptr,type,flags,own) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, own) -#define SWIG_NewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(ptr, type, flags) -#define SWIG_CheckImplicit(ty) SWIG_Python_CheckImplicit(ty) -#define SWIG_AcquirePtr(ptr, src) SWIG_Python_AcquirePtr(ptr, src) -#define swig_owntype int - -/* for raw packed data */ -#define SWIG_ConvertPacked(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) -#define SWIG_NewPackedObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) - -/* for class or struct pointers */ -#define SWIG_ConvertInstance(obj, pptr, type, flags) SWIG_ConvertPtr(obj, pptr, type, flags) -#define SWIG_NewInstanceObj(ptr, type, flags) SWIG_NewPointerObj(ptr, type, flags) - -/* for C or C++ function pointers */ -#define SWIG_ConvertFunctionPtr(obj, pptr, type) SWIG_Python_ConvertFunctionPtr(obj, pptr, type) -#define SWIG_NewFunctionPtrObj(ptr, type) SWIG_Python_NewPointerObj(ptr, type, 0) - -/* for C++ member pointers, ie, member methods */ -#define SWIG_ConvertMember(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) -#define SWIG_NewMemberObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) - - -/* Runtime API */ - -#define SWIG_GetModule(clientdata) SWIG_Python_GetModule() -#define SWIG_SetModule(clientdata, pointer) SWIG_Python_SetModule(pointer) -#define SWIG_NewClientData(obj) SwigPyClientData_New(obj) - -#define SWIG_SetErrorObj SWIG_Python_SetErrorObj -#define SWIG_SetErrorMsg SWIG_Python_SetErrorMsg -#define SWIG_ErrorType(code) SWIG_Python_ErrorType(code) -#define SWIG_Error(code, msg) SWIG_Python_SetErrorMsg(SWIG_ErrorType(code), msg) -#define SWIG_fail goto fail - -/* - * Python 2.7 and newer and Python 3.1 and newer should use Capsules API instead of - * CObjects API. - */ -#if ((PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION > 6) || \ - (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION > 0)) -#define USE_CAPSULES -#define TYPE_POINTER_NAME \ - ((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION ".type_pointer_capsule" SWIG_TYPE_TABLE_NAME) -#endif - -/* Runtime API implementation */ - -/* Error manipulation */ - -SWIGINTERN void -SWIG_Python_SetErrorObj(PyObject *errtype, PyObject *obj) { - SWIG_PYTHON_THREAD_BEGIN_BLOCK; - PyErr_SetObject(errtype, obj); - Py_DECREF(obj); - SWIG_PYTHON_THREAD_END_BLOCK; -} - -SWIGINTERN void -SWIG_Python_SetErrorMsg(PyObject *errtype, const char *msg) { - SWIG_PYTHON_THREAD_BEGIN_BLOCK; - PyErr_SetString(errtype, (char *) msg); - SWIG_PYTHON_THREAD_END_BLOCK; -} - -#define SWIG_Python_Raise(obj, type, desc) SWIG_Python_SetErrorObj(SWIG_Python_ExceptionType(desc), obj) - -/* Set a constant value */ - -SWIGINTERN void -SWIG_Python_SetConstant(PyObject *d, const char *name, PyObject *obj) { - PyDict_SetItemString(d, (char*) name, obj); - Py_DECREF(obj); -} - -/* Append a value to the result obj */ - -SWIGINTERN PyObject* -SWIG_Python_AppendOutput(PyObject* result, PyObject* obj) { -#if !defined(SWIG_PYTHON_OUTPUT_TUPLE) - if (!result) { - result = obj; - } else if (result == Py_None) { - Py_DECREF(result); - result = obj; - } else { - if (!PyList_Check(result)) { - PyObject *o2 = result; - result = PyList_New(1); - PyList_SetItem(result, 0, o2); - } - PyList_Append(result,obj); - Py_DECREF(obj); - } - return result; -#else - PyObject* o2; - PyObject* o3; - if (!result) { - result = obj; - } else if (result == Py_None) { - Py_DECREF(result); - result = obj; - } else { - if (!PyTuple_Check(result)) { - o2 = result; - result = PyTuple_New(1); - PyTuple_SET_ITEM(result, 0, o2); - } - o3 = PyTuple_New(1); - PyTuple_SET_ITEM(o3, 0, obj); - o2 = result; - result = PySequence_Concat(o2, o3); - Py_DECREF(o2); - Py_DECREF(o3); - } - return result; -#endif -} - -/* Unpack the argument tuple */ - -SWIGINTERN int -SWIG_Python_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, PyObject **objs) -{ - if (!args) { - if (!min && !max) { - return 1; - } else { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got none", - name, (min == max ? "" : "at least "), (int)min); - return 0; - } - } - if (!PyTuple_Check(args)) { - PyErr_SetString(PyExc_SystemError, "UnpackTuple() argument list is not a tuple"); - return 0; - } else { - register Py_ssize_t l = PyTuple_GET_SIZE(args); - if (l < min) { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", - name, (min == max ? "" : "at least "), (int)min, (int)l); - return 0; - } else if (l > max) { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", - name, (min == max ? "" : "at most "), (int)max, (int)l); - return 0; - } else { - register int i; - for (i = 0; i < l; ++i) { - objs[i] = PyTuple_GET_ITEM(args, i); - } - for (; l < max; ++l) { - objs[l] = 0; - } - return i + 1; - } - } -} - -/* A functor is a function object with one single object argument */ -#if PY_VERSION_HEX >= 0x02020000 -#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunctionObjArgs(functor, obj, NULL); -#else -#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunction(functor, "O", obj); -#endif - -/* - Helper for static pointer initialization for both C and C++ code, for example - static PyObject *SWIG_STATIC_POINTER(MyVar) = NewSomething(...); -*/ -#ifdef __cplusplus -#define SWIG_STATIC_POINTER(var) var -#else -#define SWIG_STATIC_POINTER(var) var = 0; if (!var) var -#endif - -/* ----------------------------------------------------------------------------- - * Pointer declarations - * ----------------------------------------------------------------------------- */ - -/* Flags for new pointer objects */ -#define SWIG_POINTER_NOSHADOW (SWIG_POINTER_OWN << 1) -#define SWIG_POINTER_NEW (SWIG_POINTER_NOSHADOW | SWIG_POINTER_OWN) - -#define SWIG_POINTER_IMPLICIT_CONV (SWIG_POINTER_DISOWN << 1) - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* cc-mode */ -#endif -#endif - -/* How to access Py_None */ -#if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# ifndef SWIG_PYTHON_NO_BUILD_NONE -# ifndef SWIG_PYTHON_BUILD_NONE -# define SWIG_PYTHON_BUILD_NONE -# endif -# endif -#endif - -#ifdef SWIG_PYTHON_BUILD_NONE -# ifdef Py_None -# undef Py_None -# define Py_None SWIG_Py_None() -# endif -SWIGRUNTIMEINLINE PyObject * -_SWIG_Py_None(void) -{ - PyObject *none = Py_BuildValue((char*)""); - Py_DECREF(none); - return none; -} -SWIGRUNTIME PyObject * -SWIG_Py_None(void) -{ - static PyObject *SWIG_STATIC_POINTER(none) = _SWIG_Py_None(); - return none; -} -#endif - -/* The python void return value */ - -SWIGRUNTIMEINLINE PyObject * -SWIG_Py_Void(void) -{ - PyObject *none = Py_None; - Py_INCREF(none); - return none; -} - -/* SwigPyClientData */ - -typedef struct { - PyObject *klass; - PyObject *newraw; - PyObject *newargs; - PyObject *destroy; - int delargs; - int implicitconv; -} SwigPyClientData; - -SWIGRUNTIMEINLINE int -SWIG_Python_CheckImplicit(swig_type_info *ty) -{ - SwigPyClientData *data = (SwigPyClientData *)ty->clientdata; - return data ? data->implicitconv : 0; -} - -SWIGRUNTIMEINLINE PyObject * -SWIG_Python_ExceptionType(swig_type_info *desc) { - SwigPyClientData *data = desc ? (SwigPyClientData *) desc->clientdata : 0; - PyObject *klass = data ? data->klass : 0; - return (klass ? klass : PyExc_RuntimeError); -} - - -SWIGRUNTIME SwigPyClientData * -SwigPyClientData_New(PyObject* obj) -{ - if (!obj) { - return 0; - } else { - SwigPyClientData *data = (SwigPyClientData *)malloc(sizeof(SwigPyClientData)); - /* the klass element */ - data->klass = obj; - Py_INCREF(data->klass); - /* the newraw method and newargs arguments used to create a new raw instance */ - if (PyClass_Check(obj)) { - data->newraw = 0; - data->newargs = obj; - Py_INCREF(obj); - } else { -#if (PY_VERSION_HEX < 0x02020000) - data->newraw = 0; -#else - data->newraw = PyObject_GetAttrString(data->klass, (char *)"__new__"); -#endif - if (data->newraw) { - Py_INCREF(data->newraw); - data->newargs = PyTuple_New(1); - PyTuple_SetItem(data->newargs, 0, obj); - } else { - data->newargs = obj; - } - Py_INCREF(data->newargs); - } - /* the destroy method, aka as the C++ delete method */ - data->destroy = PyObject_GetAttrString(data->klass, (char *)"__swig_destroy__"); - if (PyErr_Occurred()) { - PyErr_Clear(); - data->destroy = 0; - } - if (data->destroy) { - int flags; - Py_INCREF(data->destroy); - flags = PyCFunction_GET_FLAGS(data->destroy); -#ifdef METH_O - data->delargs = !(flags & (METH_O)); -#else - data->delargs = 0; -#endif - } else { - data->delargs = 0; - } - data->implicitconv = 0; - return data; - } -} - -SWIGRUNTIME void -SwigPyClientData_Del(SwigPyClientData* data) -{ - Py_XDECREF(data->newraw); - Py_XDECREF(data->newargs); - Py_XDECREF(data->destroy); -} - -/* =============== SwigPyObject =====================*/ - -typedef struct { - PyObject_HEAD - void *ptr; - swig_type_info *ty; - int own; - PyObject *next; -} SwigPyObject; - -SWIGRUNTIME PyObject * -SwigPyObject_long(SwigPyObject *v) -{ - return PyLong_FromVoidPtr(v->ptr); -} - -SWIGRUNTIME PyObject * -SwigPyObject_format(const char* fmt, SwigPyObject *v) -{ - PyObject *res = NULL; - PyObject *args = PyTuple_New(1); - if (args) { - if (PyTuple_SetItem(args, 0, SwigPyObject_long(v)) == 0) { - PyObject *ofmt = SWIG_Python_str_FromChar(fmt); - if (ofmt) { -#if PY_VERSION_HEX >= 0x03000000 - res = PyUnicode_Format(ofmt,args); -#else - res = PyString_Format(ofmt,args); -#endif - Py_DECREF(ofmt); - } - Py_DECREF(args); - } - } - return res; -} - -SWIGRUNTIME PyObject * -SwigPyObject_oct(SwigPyObject *v) -{ - return SwigPyObject_format("%o",v); -} - -SWIGRUNTIME PyObject * -SwigPyObject_hex(SwigPyObject *v) -{ - return SwigPyObject_format("%x",v); -} - -SWIGRUNTIME PyObject * -#ifdef METH_NOARGS -SwigPyObject_repr(SwigPyObject *v) -#else -SwigPyObject_repr(SwigPyObject *v, PyObject *args) -#endif -{ - const char *name = SWIG_TypePrettyName(v->ty); - PyObject *repr = SWIG_Python_str_FromFormat("", name, v); - if (v->next) { -#ifdef METH_NOARGS - PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next); -#else - PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next, args); -#endif -#if PY_VERSION_HEX >= 0x03000000 - PyObject *joined = PyUnicode_Concat(repr, nrep); - Py_DecRef(repr); - Py_DecRef(nrep); - repr = joined; -#else - PyString_ConcatAndDel(&repr,nrep); -#endif - } - return repr; -} - -SWIGRUNTIME int -SwigPyObject_print(SwigPyObject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) -{ - char *str; -#ifdef METH_NOARGS - PyObject *repr = SwigPyObject_repr(v); -#else - PyObject *repr = SwigPyObject_repr(v, NULL); -#endif - if (repr) { - str = SWIG_Python_str_AsChar(repr); - fputs(str, fp); - SWIG_Python_str_DelForPy3(str); - Py_DECREF(repr); - return 0; - } else { - return 1; - } -} - -SWIGRUNTIME PyObject * -SwigPyObject_str(SwigPyObject *v) -{ - char result[SWIG_BUFFER_SIZE]; - return SWIG_PackVoidPtr(result, v->ptr, v->ty->name, sizeof(result)) ? - SWIG_Python_str_FromChar(result) : 0; -} - -SWIGRUNTIME int -SwigPyObject_compare(SwigPyObject *v, SwigPyObject *w) -{ - void *i = v->ptr; - void *j = w->ptr; - return (i < j) ? -1 : ((i > j) ? 1 : 0); -} - -/* Added for Python 3.x, would it also be useful for Python 2.x? */ -SWIGRUNTIME PyObject* -SwigPyObject_richcompare(SwigPyObject *v, SwigPyObject *w, int op) -{ - PyObject* res; - if( op != Py_EQ && op != Py_NE ) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - if( (SwigPyObject_compare(v, w)==0) == (op == Py_EQ) ) - res = Py_True; - else - res = Py_False; - Py_INCREF(res); - return res; -} - - -SWIGRUNTIME PyTypeObject* _PySwigObject_type(void); - -SWIGRUNTIME PyTypeObject* -SwigPyObject_type(void) { - static PyTypeObject *SWIG_STATIC_POINTER(type) = _PySwigObject_type(); - return type; -} - -SWIGRUNTIMEINLINE int -SwigPyObject_Check(PyObject *op) { - return (Py_TYPE(op) == SwigPyObject_type()) - || (strcmp(Py_TYPE(op)->tp_name,"SwigPyObject") == 0); -} - -SWIGRUNTIME PyObject * -SwigPyObject_New(void *ptr, swig_type_info *ty, int own); - -SWIGRUNTIME void -SwigPyObject_dealloc(PyObject *v) -{ - SwigPyObject *sobj = (SwigPyObject *) v; - PyObject *next = sobj->next; - if (sobj->own == SWIG_POINTER_OWN) { - swig_type_info *ty = sobj->ty; - SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; - PyObject *destroy = data ? data->destroy : 0; - if (destroy) { - /* destroy is always a VARARGS method */ - PyObject *res; - if (data->delargs) { - /* we need to create a temporary object to carry the destroy operation */ - PyObject *tmp = SwigPyObject_New(sobj->ptr, ty, 0); - res = SWIG_Python_CallFunctor(destroy, tmp); - Py_DECREF(tmp); - } else { - PyCFunction meth = PyCFunction_GET_FUNCTION(destroy); - PyObject *mself = PyCFunction_GET_SELF(destroy); - res = ((*meth)(mself, v)); - } - Py_XDECREF(res); - } -#if !defined(SWIG_PYTHON_SILENT_MEMLEAK) - else { - const char *name = SWIG_TypePrettyName(ty); - printf("swig/python detected a memory leak of type '%s', no destructor found.\n", (name ? name : "unknown")); - } -#endif - } - Py_XDECREF(next); - PyObject_DEL(v); -} - -SWIGRUNTIME PyObject* -SwigPyObject_append(PyObject* v, PyObject* next) -{ - SwigPyObject *sobj = (SwigPyObject *) v; -#ifndef METH_O - PyObject *tmp = 0; - if (!PyArg_ParseTuple(next,(char *)"O:append", &tmp)) return NULL; - next = tmp; -#endif - if (!SwigPyObject_Check(next)) { - return NULL; - } - sobj->next = next; - Py_INCREF(next); - return SWIG_Py_Void(); -} - -SWIGRUNTIME PyObject* -#ifdef METH_NOARGS -SwigPyObject_next(PyObject* v) -#else -SwigPyObject_next(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *) v; - if (sobj->next) { - Py_INCREF(sobj->next); - return sobj->next; - } else { - return SWIG_Py_Void(); - } -} - -SWIGINTERN PyObject* -#ifdef METH_NOARGS -SwigPyObject_disown(PyObject *v) -#else -SwigPyObject_disown(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *)v; - sobj->own = 0; - return SWIG_Py_Void(); -} - -SWIGINTERN PyObject* -#ifdef METH_NOARGS -SwigPyObject_acquire(PyObject *v) -#else -SwigPyObject_acquire(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *)v; - sobj->own = SWIG_POINTER_OWN; - return SWIG_Py_Void(); -} - -SWIGINTERN PyObject* -SwigPyObject_own(PyObject *v, PyObject *args) -{ - PyObject *val = 0; -#if (PY_VERSION_HEX < 0x02020000) - if (!PyArg_ParseTuple(args,(char *)"|O:own",&val)) -#else - if (!PyArg_UnpackTuple(args, (char *)"own", 0, 1, &val)) -#endif - { - return NULL; - } - else - { - SwigPyObject *sobj = (SwigPyObject *)v; - PyObject *obj = PyBool_FromLong(sobj->own); - if (val) { -#ifdef METH_NOARGS - if (PyObject_IsTrue(val)) { - SwigPyObject_acquire(v); - } else { - SwigPyObject_disown(v); - } -#else - if (PyObject_IsTrue(val)) { - SwigPyObject_acquire(v,args); - } else { - SwigPyObject_disown(v,args); - } -#endif - } - return obj; - } -} - -#ifdef METH_O -static PyMethodDef -swigobject_methods[] = { - {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_NOARGS, (char *)"releases ownership of the pointer"}, - {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_NOARGS, (char *)"aquires ownership of the pointer"}, - {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, - {(char *)"append", (PyCFunction)SwigPyObject_append, METH_O, (char *)"appends another 'this' object"}, - {(char *)"next", (PyCFunction)SwigPyObject_next, METH_NOARGS, (char *)"returns the next 'this' object"}, - {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_NOARGS, (char *)"returns object representation"}, - {0, 0, 0, 0} -}; -#else -static PyMethodDef -swigobject_methods[] = { - {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_VARARGS, (char *)"releases ownership of the pointer"}, - {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_VARARGS, (char *)"aquires ownership of the pointer"}, - {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, - {(char *)"append", (PyCFunction)SwigPyObject_append, METH_VARARGS, (char *)"appends another 'this' object"}, - {(char *)"next", (PyCFunction)SwigPyObject_next, METH_VARARGS, (char *)"returns the next 'this' object"}, - {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_VARARGS, (char *)"returns object representation"}, - {0, 0, 0, 0} -}; -#endif - -#if PY_VERSION_HEX < 0x02020000 -SWIGINTERN PyObject * -SwigPyObject_getattr(SwigPyObject *sobj,char *name) -{ - return Py_FindMethod(swigobject_methods, (PyObject *)sobj, name); -} -#endif - -SWIGRUNTIME PyTypeObject* -_PySwigObject_type(void) { - static char swigobject_doc[] = "Swig object carries a C/C++ instance pointer"; - - static PyNumberMethods SwigPyObject_as_number = { - (binaryfunc)0, /*nb_add*/ - (binaryfunc)0, /*nb_subtract*/ - (binaryfunc)0, /*nb_multiply*/ - /* nb_divide removed in Python 3 */ -#if PY_VERSION_HEX < 0x03000000 - (binaryfunc)0, /*nb_divide*/ -#endif - (binaryfunc)0, /*nb_remainder*/ - (binaryfunc)0, /*nb_divmod*/ - (ternaryfunc)0,/*nb_power*/ - (unaryfunc)0, /*nb_negative*/ - (unaryfunc)0, /*nb_positive*/ - (unaryfunc)0, /*nb_absolute*/ - (inquiry)0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ -#if PY_VERSION_HEX < 0x03000000 - 0, /*nb_coerce*/ -#endif - (unaryfunc)SwigPyObject_long, /*nb_int*/ -#if PY_VERSION_HEX < 0x03000000 - (unaryfunc)SwigPyObject_long, /*nb_long*/ -#else - 0, /*nb_reserved*/ -#endif - (unaryfunc)0, /*nb_float*/ -#if PY_VERSION_HEX < 0x03000000 - (unaryfunc)SwigPyObject_oct, /*nb_oct*/ - (unaryfunc)SwigPyObject_hex, /*nb_hex*/ -#endif -#if PY_VERSION_HEX >= 0x03000000 /* 3.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index, nb_inplace_divide removed */ -#elif PY_VERSION_HEX >= 0x02050000 /* 2.5.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index */ -#elif PY_VERSION_HEX >= 0x02020000 /* 2.2.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_true_divide */ -#elif PY_VERSION_HEX >= 0x02000000 /* 2.0.0 */ - 0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_or */ -#endif - }; - - static PyTypeObject swigpyobject_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - (char *)"SwigPyObject", /* tp_name */ - sizeof(SwigPyObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)SwigPyObject_dealloc, /* tp_dealloc */ - (printfunc)SwigPyObject_print, /* tp_print */ -#if PY_VERSION_HEX < 0x02020000 - (getattrfunc)SwigPyObject_getattr, /* tp_getattr */ -#else - (getattrfunc)0, /* tp_getattr */ -#endif - (setattrfunc)0, /* tp_setattr */ -#if PY_VERSION_HEX >= 0x03000000 - 0, /* tp_reserved in 3.0.1, tp_compare in 3.0.0 but not used */ -#else - (cmpfunc)SwigPyObject_compare, /* tp_compare */ -#endif - (reprfunc)SwigPyObject_repr, /* tp_repr */ - &SwigPyObject_as_number, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)SwigPyObject_str, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - swigobject_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)SwigPyObject_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0, /* tp_iter */ - 0, /* tp_iternext */ - swigobject_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - swigpyobject_type = tmp; - /* for Python 3 we already assigned ob_type in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - swigpyobject_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &swigpyobject_type; -} - -SWIGRUNTIME PyObject * -SwigPyObject_New(void *ptr, swig_type_info *ty, int own) -{ - SwigPyObject *sobj = PyObject_NEW(SwigPyObject, SwigPyObject_type()); - if (sobj) { - sobj->ptr = ptr; - sobj->ty = ty; - sobj->own = own; - sobj->next = 0; - } - return (PyObject *)sobj; -} - -/* ----------------------------------------------------------------------------- - * Implements a simple Swig Packed type, and use it instead of string - * ----------------------------------------------------------------------------- */ - -typedef struct { - PyObject_HEAD - void *pack; - swig_type_info *ty; - size_t size; -} SwigPyPacked; - -SWIGRUNTIME int -SwigPyPacked_print(SwigPyPacked *v, FILE *fp, int SWIGUNUSEDPARM(flags)) -{ - char result[SWIG_BUFFER_SIZE]; - fputs("pack, v->size, 0, sizeof(result))) { - fputs("at ", fp); - fputs(result, fp); - } - fputs(v->ty->name,fp); - fputs(">", fp); - return 0; -} - -SWIGRUNTIME PyObject * -SwigPyPacked_repr(SwigPyPacked *v) -{ - char result[SWIG_BUFFER_SIZE]; - if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))) { - return SWIG_Python_str_FromFormat("", result, v->ty->name); - } else { - return SWIG_Python_str_FromFormat("", v->ty->name); - } -} - -SWIGRUNTIME PyObject * -SwigPyPacked_str(SwigPyPacked *v) -{ - char result[SWIG_BUFFER_SIZE]; - if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))){ - return SWIG_Python_str_FromFormat("%s%s", result, v->ty->name); - } else { - return SWIG_Python_str_FromChar(v->ty->name); - } -} - -SWIGRUNTIME int -SwigPyPacked_compare(SwigPyPacked *v, SwigPyPacked *w) -{ - size_t i = v->size; - size_t j = w->size; - int s = (i < j) ? -1 : ((i > j) ? 1 : 0); - return s ? s : strncmp((char *)v->pack, (char *)w->pack, 2*v->size); -} - -SWIGRUNTIME PyTypeObject* _PySwigPacked_type(void); - -SWIGRUNTIME PyTypeObject* -SwigPyPacked_type(void) { - static PyTypeObject *SWIG_STATIC_POINTER(type) = _PySwigPacked_type(); - return type; -} - -SWIGRUNTIMEINLINE int -SwigPyPacked_Check(PyObject *op) { - return ((op)->ob_type == _PySwigPacked_type()) - || (strcmp((op)->ob_type->tp_name,"SwigPyPacked") == 0); -} - -SWIGRUNTIME void -SwigPyPacked_dealloc(PyObject *v) -{ - if (SwigPyPacked_Check(v)) { - SwigPyPacked *sobj = (SwigPyPacked *) v; - free(sobj->pack); - } - PyObject_DEL(v); -} - -SWIGRUNTIME PyTypeObject* -_PySwigPacked_type(void) { - static char swigpacked_doc[] = "Swig object carries a C/C++ instance pointer"; - static PyTypeObject swigpypacked_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX>=0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - (char *)"SwigPyPacked", /* tp_name */ - sizeof(SwigPyPacked), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)SwigPyPacked_dealloc, /* tp_dealloc */ - (printfunc)SwigPyPacked_print, /* tp_print */ - (getattrfunc)0, /* tp_getattr */ - (setattrfunc)0, /* tp_setattr */ -#if PY_VERSION_HEX>=0x03000000 - 0, /* tp_reserved in 3.0.1 */ -#else - (cmpfunc)SwigPyPacked_compare, /* tp_compare */ -#endif - (reprfunc)SwigPyPacked_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)SwigPyPacked_str, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - swigpacked_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - swigpypacked_type = tmp; - /* for Python 3 the ob_type already assigned in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - swigpypacked_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &swigpypacked_type; -} - -SWIGRUNTIME PyObject * -SwigPyPacked_New(void *ptr, size_t size, swig_type_info *ty) -{ - SwigPyPacked *sobj = PyObject_NEW(SwigPyPacked, SwigPyPacked_type()); - if (sobj) { - void *pack = malloc(size); - if (pack) { - memcpy(pack, ptr, size); - sobj->pack = pack; - sobj->ty = ty; - sobj->size = size; - } else { - PyObject_DEL((PyObject *) sobj); - sobj = 0; - } - } - return (PyObject *) sobj; -} - -SWIGRUNTIME swig_type_info * -SwigPyPacked_UnpackData(PyObject *obj, void *ptr, size_t size) -{ - if (SwigPyPacked_Check(obj)) { - SwigPyPacked *sobj = (SwigPyPacked *)obj; - if (sobj->size != size) return 0; - memcpy(ptr, sobj->pack, size); - return sobj->ty; - } else { - return 0; - } -} - -/* ----------------------------------------------------------------------------- - * pointers/data manipulation - * ----------------------------------------------------------------------------- */ - -SWIGRUNTIMEINLINE PyObject * -_SWIG_This(void) -{ - return SWIG_Python_str_FromChar("this"); -} - -static PyObject *swig_this = NULL; - -SWIGRUNTIME PyObject * -SWIG_This(void) -{ - if (swig_this == NULL) - swig_this = _SWIG_This(); - return swig_this; -} - -/* #define SWIG_PYTHON_SLOW_GETSET_THIS */ - -/* TODO: I don't know how to implement the fast getset in Python 3 right now */ -#if PY_VERSION_HEX>=0x03000000 -#define SWIG_PYTHON_SLOW_GETSET_THIS -#endif - -SWIGRUNTIME SwigPyObject * -SWIG_Python_GetSwigThis(PyObject *pyobj) -{ - if (SwigPyObject_Check(pyobj)) { - return (SwigPyObject *) pyobj; - } else { - PyObject *obj = 0; -#if (!defined(SWIG_PYTHON_SLOW_GETSET_THIS) && (PY_VERSION_HEX >= 0x02030000)) - if (PyInstance_Check(pyobj)) { - obj = _PyInstance_Lookup(pyobj, SWIG_This()); - } else { - PyObject **dictptr = _PyObject_GetDictPtr(pyobj); - if (dictptr != NULL) { - PyObject *dict = *dictptr; - obj = dict ? PyDict_GetItem(dict, SWIG_This()) : 0; - } else { -#ifdef PyWeakref_CheckProxy - if (PyWeakref_CheckProxy(pyobj)) { - PyObject *wobj = PyWeakref_GET_OBJECT(pyobj); - return wobj ? SWIG_Python_GetSwigThis(wobj) : 0; - } -#endif - obj = PyObject_GetAttr(pyobj,SWIG_This()); - if (obj) { - Py_DECREF(obj); - } else { - if (PyErr_Occurred()) PyErr_Clear(); - return 0; - } - } - } -#else - obj = PyObject_GetAttr(pyobj,SWIG_This()); - if (obj) { - Py_DECREF(obj); - } else { - if (PyErr_Occurred()) PyErr_Clear(); - return 0; - } -#endif - if (obj && !SwigPyObject_Check(obj)) { - /* a PyObject is called 'this', try to get the 'real this' - SwigPyObject from it */ - return SWIG_Python_GetSwigThis(obj); - } - return (SwigPyObject *)obj; - } -} - -/* Acquire a pointer value */ - -SWIGRUNTIME int -SWIG_Python_AcquirePtr(PyObject *obj, int own) { - if (own == SWIG_POINTER_OWN) { - SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); - if (sobj) { - int oldown = sobj->own; - sobj->own = own; - return oldown; - } - } - return 0; -} - -/* Convert a pointer value */ - -SWIGRUNTIME int -SWIG_Python_ConvertPtrAndOwn(PyObject *obj, void **ptr, swig_type_info *ty, int flags, int *own) { - if (!obj) return SWIG_ERROR; - if (obj == Py_None) { - if (ptr) *ptr = 0; - return SWIG_OK; - } else { - SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); - if (own) - *own = 0; - while (sobj) { - void *vptr = sobj->ptr; - if (ty) { - swig_type_info *to = sobj->ty; - if (to == ty) { - /* no type cast needed */ - if (ptr) *ptr = vptr; - break; - } else { - swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); - if (!tc) { - sobj = (SwigPyObject *)sobj->next; - } else { - if (ptr) { - int newmemory = 0; - *ptr = SWIG_TypeCast(tc,vptr,&newmemory); - if (newmemory == SWIG_CAST_NEW_MEMORY) { - assert(own); /* badly formed typemap which will lead to a memory leak - it must set and use own to delete *ptr */ - if (own) - *own = *own | SWIG_CAST_NEW_MEMORY; - } - } - break; - } - } - } else { - if (ptr) *ptr = vptr; - break; - } - } - if (sobj) { - if (own) - *own = *own | sobj->own; - if (flags & SWIG_POINTER_DISOWN) { - sobj->own = 0; - } - return SWIG_OK; - } else { - int res = SWIG_ERROR; - if (flags & SWIG_POINTER_IMPLICIT_CONV) { - SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; - if (data && !data->implicitconv) { - PyObject *klass = data->klass; - if (klass) { - PyObject *impconv; - data->implicitconv = 1; /* avoid recursion and call 'explicit' constructors*/ - impconv = SWIG_Python_CallFunctor(klass, obj); - data->implicitconv = 0; - if (PyErr_Occurred()) { - PyErr_Clear(); - impconv = 0; - } - if (impconv) { - SwigPyObject *iobj = SWIG_Python_GetSwigThis(impconv); - if (iobj) { - void *vptr; - res = SWIG_Python_ConvertPtrAndOwn((PyObject*)iobj, &vptr, ty, 0, 0); - if (SWIG_IsOK(res)) { - if (ptr) { - *ptr = vptr; - /* transfer the ownership to 'ptr' */ - iobj->own = 0; - res = SWIG_AddCast(res); - res = SWIG_AddNewMask(res); - } else { - res = SWIG_AddCast(res); - } - } - } - Py_DECREF(impconv); - } - } - } - } - return res; - } - } -} - -/* Convert a function ptr value */ - -SWIGRUNTIME int -SWIG_Python_ConvertFunctionPtr(PyObject *obj, void **ptr, swig_type_info *ty) { - if (!PyCFunction_Check(obj)) { - return SWIG_ConvertPtr(obj, ptr, ty, 0); - } else { - void *vptr = 0; - - /* here we get the method pointer for callbacks */ - const char *doc = (((PyCFunctionObject *)obj) -> m_ml -> ml_doc); - const char *desc = doc ? strstr(doc, "swig_ptr: ") : 0; - if (desc) - desc = ty ? SWIG_UnpackVoidPtr(desc + 10, &vptr, ty->name) : 0; - if (!desc) - return SWIG_ERROR; - if (ty) { - swig_cast_info *tc = SWIG_TypeCheck(desc,ty); - if (tc) { - int newmemory = 0; - *ptr = SWIG_TypeCast(tc,vptr,&newmemory); - assert(!newmemory); /* newmemory handling not yet implemented */ - } else { - return SWIG_ERROR; - } - } else { - *ptr = vptr; - } - return SWIG_OK; - } -} - -/* Convert a packed value value */ - -SWIGRUNTIME int -SWIG_Python_ConvertPacked(PyObject *obj, void *ptr, size_t sz, swig_type_info *ty) { - swig_type_info *to = SwigPyPacked_UnpackData(obj, ptr, sz); - if (!to) return SWIG_ERROR; - if (ty) { - if (to != ty) { - /* check type cast? */ - swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); - if (!tc) return SWIG_ERROR; - } - } - return SWIG_OK; -} - -/* ----------------------------------------------------------------------------- - * Create a new pointer object - * ----------------------------------------------------------------------------- */ - -/* - Create a new instance object, without calling __init__, and set the - 'this' attribute. -*/ - -SWIGRUNTIME PyObject* -SWIG_Python_NewShadowInstance(SwigPyClientData *data, PyObject *swig_this) -{ -#if (PY_VERSION_HEX >= 0x02020000) - PyObject *inst = 0; - PyObject *newraw = data->newraw; - if (newraw) { - inst = PyObject_Call(newraw, data->newargs, NULL); - if (inst) { -#if !defined(SWIG_PYTHON_SLOW_GETSET_THIS) - PyObject **dictptr = _PyObject_GetDictPtr(inst); - if (dictptr != NULL) { - PyObject *dict = *dictptr; - if (dict == NULL) { - dict = PyDict_New(); - *dictptr = dict; - PyDict_SetItem(dict, SWIG_This(), swig_this); - } - } -#else - PyObject *key = SWIG_This(); - PyObject_SetAttr(inst, key, swig_this); -#endif - } - } else { -#if PY_VERSION_HEX >= 0x03000000 - inst = PyBaseObject_Type.tp_new((PyTypeObject*) data->newargs, Py_None, Py_None); - PyObject_SetAttr(inst, SWIG_This(), swig_this); - Py_TYPE(inst)->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG; -#else - PyObject *dict = PyDict_New(); - PyDict_SetItem(dict, SWIG_This(), swig_this); - inst = PyInstance_NewRaw(data->newargs, dict); - Py_DECREF(dict); -#endif - } - return inst; -#else -#if (PY_VERSION_HEX >= 0x02010000) - PyObject *inst; - PyObject *dict = PyDict_New(); - PyDict_SetItem(dict, SWIG_This(), swig_this); - inst = PyInstance_NewRaw(data->newargs, dict); - Py_DECREF(dict); - return (PyObject *) inst; -#else - PyInstanceObject *inst = PyObject_NEW(PyInstanceObject, &PyInstance_Type); - if (inst == NULL) { - return NULL; - } - inst->in_class = (PyClassObject *)data->newargs; - Py_INCREF(inst->in_class); - inst->in_dict = PyDict_New(); - if (inst->in_dict == NULL) { - Py_DECREF(inst); - return NULL; - } -#ifdef Py_TPFLAGS_HAVE_WEAKREFS - inst->in_weakreflist = NULL; -#endif -#ifdef Py_TPFLAGS_GC - PyObject_GC_Init(inst); -#endif - PyDict_SetItem(inst->in_dict, SWIG_This(), swig_this); - return (PyObject *) inst; -#endif -#endif -} - -SWIGRUNTIME void -SWIG_Python_SetSwigThis(PyObject *inst, PyObject *swig_this) -{ - PyObject *dict; -#if (PY_VERSION_HEX >= 0x02020000) && !defined(SWIG_PYTHON_SLOW_GETSET_THIS) - PyObject **dictptr = _PyObject_GetDictPtr(inst); - if (dictptr != NULL) { - dict = *dictptr; - if (dict == NULL) { - dict = PyDict_New(); - *dictptr = dict; - } - PyDict_SetItem(dict, SWIG_This(), swig_this); - return; - } -#endif - dict = PyObject_GetAttrString(inst, (char*)"__dict__"); - PyDict_SetItem(dict, SWIG_This(), swig_this); - Py_DECREF(dict); -} - - -SWIGINTERN PyObject * -SWIG_Python_InitShadowInstance(PyObject *args) { - PyObject *obj[2]; - if (!SWIG_Python_UnpackTuple(args,(char*)"swiginit", 2, 2, obj)) { - return NULL; - } else { - SwigPyObject *sthis = SWIG_Python_GetSwigThis(obj[0]); - if (sthis) { - SwigPyObject_append((PyObject*) sthis, obj[1]); - } else { - SWIG_Python_SetSwigThis(obj[0], obj[1]); - } - return SWIG_Py_Void(); - } -} - -/* Create a new pointer object */ - -SWIGRUNTIME PyObject * -SWIG_Python_NewPointerObj(void *ptr, swig_type_info *type, int flags) { - if (!ptr) { - return SWIG_Py_Void(); - } else { - int own = (flags & SWIG_POINTER_OWN) ? SWIG_POINTER_OWN : 0; - PyObject *robj = SwigPyObject_New(ptr, type, own); - SwigPyClientData *clientdata = type ? (SwigPyClientData *)(type->clientdata) : 0; - if (clientdata && !(flags & SWIG_POINTER_NOSHADOW)) { - PyObject *inst = SWIG_Python_NewShadowInstance(clientdata, robj); - if (inst) { - Py_DECREF(robj); - robj = inst; - } - } - return robj; - } -} - -/* Create a new packed object */ - -SWIGRUNTIMEINLINE PyObject * -SWIG_Python_NewPackedObj(void *ptr, size_t sz, swig_type_info *type) { - return ptr ? SwigPyPacked_New((void *) ptr, sz, type) : SWIG_Py_Void(); -} - -/* -----------------------------------------------------------------------------* - * Get type list - * -----------------------------------------------------------------------------*/ - -#ifdef SWIG_LINK_RUNTIME -void *SWIG_ReturnGlobalTypeList(void *); -#endif - -SWIGRUNTIME swig_module_info * -SWIG_Python_GetModule(void) { - static void *type_pointer = (void *)0; - /* first check if module already created */ - if (!type_pointer) { -#ifdef SWIG_LINK_RUNTIME - type_pointer = SWIG_ReturnGlobalTypeList((void *)0); -#else -#ifdef USE_CAPSULES - type_pointer = PyCapsule_Import(TYPE_POINTER_NAME, 0); -#else - type_pointer = PyCObject_Import((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, - (char*)"type_pointer" SWIG_TYPE_TABLE_NAME); -#endif - if (PyErr_Occurred()) { - PyErr_Clear(); - type_pointer = (void *)0; - } -#endif - } - return (swig_module_info *) type_pointer; -} - -#if PY_MAJOR_VERSION < 2 -/* PyModule_AddObject function was introduced in Python 2.0. The following function - is copied out of Python/modsupport.c in python version 2.3.4 */ -SWIGINTERN int -PyModule_AddObject(PyObject *m, char *name, PyObject *o) -{ - PyObject *dict; - if (!PyModule_Check(m)) { - PyErr_SetString(PyExc_TypeError, - "PyModule_AddObject() needs module as first arg"); - return SWIG_ERROR; - } - if (!o) { - PyErr_SetString(PyExc_TypeError, - "PyModule_AddObject() needs non-NULL value"); - return SWIG_ERROR; - } - - dict = PyModule_GetDict(m); - if (dict == NULL) { - /* Internal error -- modules must have a dict! */ - PyErr_Format(PyExc_SystemError, "module '%s' has no __dict__", - PyModule_GetName(m)); - return SWIG_ERROR; - } - if (PyDict_SetItemString(dict, name, o)) - return SWIG_ERROR; - Py_DECREF(o); - return SWIG_OK; -} -#endif - -SWIGRUNTIME void -SWIG_Python_DestroyModule(void *vptr) -{ - size_t i; -#ifdef USE_CAPSULES - swig_module_info *swig_module = - (swig_module_info *) PyCapsule_GetPointer((PyObject *)vptr, TYPE_POINTER_NAME); -#else - swig_module_info *swig_module = (swig_module_info *) vptr; -#endif - swig_type_info **types = swig_module->types; - for (i =0; i < swig_module->size; ++i) { - swig_type_info *ty = types[i]; - if (ty->owndata) { - SwigPyClientData *data = (SwigPyClientData *) ty->clientdata; - if (data) SwigPyClientData_Del(data); - } - } - Py_DECREF(SWIG_This()); - swig_this = NULL; -} - -SWIGRUNTIME void -SWIG_Python_SetModule(swig_module_info *swig_module) { - static PyMethodDef swig_empty_runtime_method_table[] = { {NULL, NULL, 0, NULL} };/* Sentinel */ - -#if PY_VERSION_HEX >= 0x03000000 - /* Add a dummy module object into sys.modules */ - PyObject *module = PyImport_AddModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION); -#else - PyObject *module = Py_InitModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, - swig_empty_runtime_method_table); -#endif -#ifdef USE_CAPSULES - PyObject *pointer = PyCapsule_New((void *)swig_module, TYPE_POINTER_NAME, - (PyCapsule_Destructor)SWIG_Python_DestroyModule); -#else - PyObject *pointer = PyCObject_FromVoidPtr((void *) swig_module, SWIG_Python_DestroyModule); -#endif - if (pointer && module) { -#ifdef USE_CAPSULES - PyModule_AddObject(module, (char*)"type_pointer_capsule" SWIG_TYPE_TABLE_NAME, pointer); -#else - PyModule_AddObject(module, (char*)"type_pointer" SWIG_TYPE_TABLE_NAME, pointer); -#endif - } else { - Py_XDECREF(pointer); - } -} - -/* The python cached type query */ -SWIGRUNTIME PyObject * -SWIG_Python_TypeCache(void) { - static PyObject *SWIG_STATIC_POINTER(cache) = PyDict_New(); - return cache; -} - -SWIGRUNTIME swig_type_info * -SWIG_Python_TypeQuery(const char *type) -{ - PyObject *cache = SWIG_Python_TypeCache(); - PyObject *key = SWIG_Python_str_FromChar(type); - PyObject *obj = PyDict_GetItem(cache, key); - swig_type_info *descriptor; - if (obj) { -#ifdef USE_CAPSULES - descriptor = (swig_type_info *) PyCapsule_GetPointer(obj, type); -#else - descriptor = (swig_type_info *) PyCObject_AsVoidPtr(obj); -#endif - } else { - swig_module_info *swig_module = SWIG_Python_GetModule(); - descriptor = SWIG_TypeQueryModule(swig_module, swig_module, type); - if (descriptor) { -#ifdef USE_CAPSULES - obj = PyCapsule_New(descriptor, type, NULL); -#else - obj = PyCObject_FromVoidPtr(descriptor, NULL); -#endif - PyDict_SetItem(cache, key, obj); - Py_DECREF(obj); - } - } - Py_DECREF(key); - return descriptor; -} - -/* - For backward compatibility only -*/ -#define SWIG_POINTER_EXCEPTION 0 -#define SWIG_arg_fail(arg) SWIG_Python_ArgFail(arg) -#define SWIG_MustGetPtr(p, type, argnum, flags) SWIG_Python_MustGetPtr(p, type, argnum, flags) - -SWIGRUNTIME int -SWIG_Python_AddErrMesg(const char* mesg, int infront) -{ - if (PyErr_Occurred()) { - PyObject *type = 0; - PyObject *value = 0; - PyObject *traceback = 0; - PyErr_Fetch(&type, &value, &traceback); - if (value) { - char *tmp; - PyObject *old_str = PyObject_Str(value); - Py_XINCREF(type); - PyErr_Clear(); - if (infront) { - PyErr_Format(type, "%s %s", mesg, tmp = SWIG_Python_str_AsChar(old_str)); - } else { - PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); - } - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(old_str); - } - return 1; - } else { - return 0; - } -} - -SWIGRUNTIME int -SWIG_Python_ArgFail(int argnum) -{ - if (PyErr_Occurred()) { - /* add information about failing argument */ - char mesg[256]; - PyOS_snprintf(mesg, sizeof(mesg), "argument number %d:", argnum); - return SWIG_Python_AddErrMesg(mesg, 1); - } else { - return 0; - } -} - -SWIGRUNTIMEINLINE const char * -SwigPyObject_GetDesc(PyObject *self) -{ - SwigPyObject *v = (SwigPyObject *)self; - swig_type_info *ty = v ? v->ty : 0; - return ty ? ty->str : (char*)""; -} - -SWIGRUNTIME void -SWIG_Python_TypeError(const char *type, PyObject *obj) -{ - if (type) { -#if defined(SWIG_COBJECT_TYPES) - if (obj && SwigPyObject_Check(obj)) { - const char *otype = (const char *) SwigPyObject_GetDesc(obj); - if (otype) { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, 'SwigPyObject(%s)' is received", - type, otype); - return; - } - } else -#endif - { - const char *otype = (obj ? obj->ob_type->tp_name : 0); - if (otype) { - PyObject *str = PyObject_Str(obj); - const char *cstr = str ? SWIG_Python_str_AsChar(str) : 0; - if (cstr) { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s(%s)' is received", - type, otype, cstr); - SWIG_Python_str_DelForPy3(cstr); - } else { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s' is received", - type, otype); - } - Py_XDECREF(str); - return; - } - } - PyErr_Format(PyExc_TypeError, "a '%s' is expected", type); - } else { - PyErr_Format(PyExc_TypeError, "unexpected type is received"); - } -} - - -/* Convert a pointer value, signal an exception on a type mismatch */ -SWIGRUNTIME void * -SWIG_Python_MustGetPtr(PyObject *obj, swig_type_info *ty, int argnum, int flags) { - void *result; - if (SWIG_Python_ConvertPtr(obj, &result, ty, flags) == -1) { - PyErr_Clear(); -#if SWIG_POINTER_EXCEPTION - if (flags) { - SWIG_Python_TypeError(SWIG_TypePrettyName(ty), obj); - SWIG_Python_ArgFail(argnum); - } -#endif - } - return result; -} - - -#ifdef __cplusplus -#if 0 -{ /* cc-mode */ -#endif -} -#endif - - - -#define SWIG_exception_fail(code, msg) do { SWIG_Error(code, msg); SWIG_fail; } while(0) - -#define SWIG_contract_assert(expr, msg) if (!(expr)) { SWIG_Error(SWIG_RuntimeError, msg); SWIG_fail; } else - - - -/* -------- TYPES TABLE (BEGIN) -------- */ - -#define SWIGTYPE_p_char swig_types[0] -static swig_type_info *swig_types[2]; -static swig_module_info swig_module = {swig_types, 1, 0, 0, 0, 0}; -#define SWIG_TypeQuery(name) SWIG_TypeQueryModule(&swig_module, &swig_module, name) -#define SWIG_MangledTypeQuery(name) SWIG_MangledTypeQueryModule(&swig_module, &swig_module, name) - -/* -------- TYPES TABLE (END) -------- */ - -#if (PY_VERSION_HEX <= 0x02000000) -# if !defined(SWIG_PYTHON_CLASSIC) -# error "This python version requires swig to be run with the '-classic' option" -# endif -#endif - -/*----------------------------------------------- - @(target):= _csgraph.so - ------------------------------------------------*/ -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_init PyInit__csgraph - -#else -# define SWIG_init init_csgraph - -#endif -#define SWIG_name "_csgraph" - -#define SWIGVERSION 0x020001 -#define SWIG_VERSION SWIGVERSION - - -#define SWIG_as_voidptr(a) const_cast< void * >(static_cast< const void * >(a)) -#define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),reinterpret_cast< void** >(a)) - - -#include - - -namespace swig { - class SwigPtr_PyObject { - protected: - PyObject *_obj; - - public: - SwigPtr_PyObject() :_obj(0) - { - } - - SwigPtr_PyObject(const SwigPtr_PyObject& item) : _obj(item._obj) - { - Py_XINCREF(_obj); - } - - SwigPtr_PyObject(PyObject *obj, bool initial_ref = true) :_obj(obj) - { - if (initial_ref) { - Py_XINCREF(_obj); - } - } - - SwigPtr_PyObject & operator=(const SwigPtr_PyObject& item) - { - Py_XINCREF(item._obj); - Py_XDECREF(_obj); - _obj = item._obj; - return *this; - } - - ~SwigPtr_PyObject() - { - Py_XDECREF(_obj); - } - - operator PyObject *() const - { - return _obj; - } - - PyObject *operator->() const - { - return _obj; - } - }; -} - - -namespace swig { - struct SwigVar_PyObject : SwigPtr_PyObject { - SwigVar_PyObject(PyObject* obj = 0) : SwigPtr_PyObject(obj, false) { } - - SwigVar_PyObject & operator = (PyObject* obj) - { - Py_XDECREF(_obj); - _obj = obj; - return *this; - } - }; -} - - -#include "py3k.h" -#define SWIG_FILE_WITH_INIT -#include "Python.h" -#include "numpy/arrayobject.h" -#include "complex_ops.h" -/*#include "sparsetools.h"*/ - - -#ifndef SWIG_FILE_WITH_INIT -# define NO_IMPORT_ARRAY -#endif -#include "stdio.h" -#include -#include "complex_ops.h" - - -/* The following code originally appeared in - * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was - * translated from C++ to C by John Hunter. Bill Spotz has modified - * it slightly to fix some minor bugs, upgrade to numpy (all - * versions), add some comments and some functionality. - */ - -/* Macros to extract array attributes. - */ -#define is_array(a) ((a) && PyArray_Check((PyArrayObject *)a)) -#define array_type(a) (int)(PyArray_TYPE(a)) -#define array_numdims(a) (((PyArrayObject *)a)->nd) -#define array_dimensions(a) (((PyArrayObject *)a)->dimensions) -#define array_size(a,i) (((PyArrayObject *)a)->dimensions[i]) -#define array_data(a) (((PyArrayObject *)a)->data) -#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS(a)) -#define array_is_native(a) (PyArray_ISNOTSWAPPED(a)) - -/* Support older NumPy data type names -*/ -#if NDARRAY_VERSION < 0x01000000 -#define NPY_BOOL PyArray_BOOL -#define NPY_BYTE PyArray_BYTE -#define NPY_UBYTE PyArray_UBYTE -#define NPY_SHORT PyArray_SHORT -#define NPY_USHORT PyArray_USHORT -#define NPY_INT PyArray_INT -#define NPY_UINT PyArray_UINT -#define NPY_LONG PyArray_LONG -#define NPY_ULONG PyArray_ULONG -#define NPY_LONGLONG PyArray_LONGLONG -#define NPY_ULONGLONG PyArray_ULONGLONG -#define NPY_FLOAT PyArray_FLOAT -#define NPY_DOUBLE PyArray_DOUBLE -#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -#define NPY_CFLOAT PyArray_CFLOAT -#define NPY_CDOUBLE PyArray_CDOUBLE -#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -#define NPY_OBJECT PyArray_OBJECT -#define NPY_STRING PyArray_STRING -#define NPY_UNICODE PyArray_UNICODE -#define NPY_VOID PyArray_VOID -#define NPY_NTYPES PyArray_NTYPES -#define NPY_NOTYPE PyArray_NOTYPE -#define NPY_CHAR PyArray_CHAR -#define NPY_USERDEF PyArray_USERDEF -#define npy_intp intp -#endif - -/* Given a PyObject, return a string describing its type. - */ -const char* pytype_string(PyObject* py_obj) { - if (py_obj == NULL ) return "C NULL value"; - if (py_obj == Py_None ) return "Python None" ; - if (PyCallable_Check(py_obj)) return "callable" ; - if (PyString_Check( py_obj)) return "string" ; - if (PyInt_Check( py_obj)) return "int" ; - if (PyFloat_Check( py_obj)) return "float" ; - if (PyDict_Check( py_obj)) return "dict" ; - if (PyList_Check( py_obj)) return "list" ; - if (PyTuple_Check( py_obj)) return "tuple" ; - if (PyFile_Check( py_obj)) return "file" ; - if (PyModule_Check( py_obj)) return "module" ; - if (PyInstance_Check(py_obj)) return "instance" ; - - return "unkown type"; -} - -/* Given a NumPy typecode, return a string describing the type. - */ -const char* typecode_string(int typecode) { - static const char* type_names[25] = {"bool", "byte", "unsigned byte", - "short", "unsigned short", "int", - "unsigned int", "long", "unsigned long", - "long long", "unsigned long long", - "float", "double", "long double", - "complex float", "complex double", - "complex long double", "object", - "string", "unicode", "void", "ntypes", - "notype", "char", "unknown"}; - return typecode < 24 ? type_names[typecode] : type_names[24]; -} - -/* Make sure input has correct numpy type. Allow character and byte - * to match. Also allow int and long to match. This is deprecated. - * You should use PyArray_EquivTypenums() instead. - */ -int type_match(int actual_type, int desired_type) { - return PyArray_EquivTypenums(actual_type, desired_type); -} - -/* Given a PyObject pointer, cast it to a PyArrayObject pointer if - * legal. If not, set the python error string appropriately and - * return NULL. - */ -PyArrayObject* obj_to_array_no_conversion(PyObject* input, int typecode) { - PyArrayObject* ary = NULL; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input), typecode))) { - ary = (PyArrayObject*) input; - } - else if is_array(input) { - const char* desired_type = typecode_string(typecode); - const char* actual_type = typecode_string(array_type(input)); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", - desired_type, actual_type); - ary = NULL; - } - else { - const char * desired_type = typecode_string(typecode); - const char * actual_type = pytype_string(input); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", - desired_type, actual_type); - ary = NULL; - } - return ary; -} - -/* Convert the given PyObject to a NumPy array with the given - * typecode. On success, return a valid PyArrayObject* with the - * correct type. On failure, the python error string will be set and - * the routine returns NULL. - */ -PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, - int* is_new_object) { - PyArrayObject* ary = NULL; - PyObject* py_obj; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input),typecode))) { - ary = (PyArrayObject*) input; - *is_new_object = 0; - } - else { - py_obj = PyArray_FromObject(input, typecode, 0, 0); - /* If NULL, PyArray_FromObject will have set python error value.*/ - ary = (PyArrayObject*) py_obj; - *is_new_object = 1; - } - return ary; -} - -/* Given a PyArrayObject, check to see if it is contiguous. If so, - * return the input pointer and flag it as not a new object. If it is - * not contiguous, create a new PyArrayObject using the original data, - * flag it as a new object and return the pointer. - */ -PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) { - PyArrayObject* result; - if (array_is_contiguous(ary)) { - result = ary; - *is_new_object = 0; - } - else { - result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), - min_dims, - max_dims); - *is_new_object = 1; - } - return result; -} - -/* Convert a given PyObject to a contiguous PyArrayObject of the - * specified type. If the input object is not a contiguous - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ -PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, &is_new1); - if (ary1) { - ary2 = make_contiguous(ary1, &is_new2, 0, 0); - if ( is_new1 && is_new2) { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; -} - -/* Test whether a python object is contiguous. If array is - * contiguous, return 1. Otherwise, set the python error string and - * return 0. - */ -int require_contiguous(PyArrayObject* ary) { - int contiguous = 1; - if (!array_is_contiguous(ary)) { - PyErr_SetString(PyExc_TypeError, - "Array must be contiguous. A non-contiguous array was given"); - contiguous = 0; - } - return contiguous; -} - -/* Require that a numpy array is not byte-swapped. If the array is - * not byte-swapped, return 1. Otherwise, set the python error string - * and return 0. - */ -int require_native(PyArrayObject* ary) { - int native = 1; - if (!array_is_native(ary)) { - PyErr_SetString(PyExc_TypeError, - "Array must have native byteorder. A byte-swapped array was given"); - native = 0; - } - return native; -} - -/* Require the given PyArrayObject to have a specified number of - * dimensions. If the array has the specified number of dimensions, - * return 1. Otherwise, set the python error string and return 0. - */ -int require_dimensions(PyArrayObject* ary, int exact_dimensions) { - int success = 1; - if (array_numdims(ary) != exact_dimensions) { - PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", - exact_dimensions, array_numdims(ary)); - success = 0; - } - return success; -} - -/* Require the given PyArrayObject to have one of a list of specified - * number of dimensions. If the array has one of the specified number - * of dimensions, return 1. Otherwise, set the python error string - * and return 0. - */ -int require_dimensions_n(PyArrayObject* ary, int* exact_dimensions, int n) { - int success = 0; - int i; - char dims_str[255] = ""; - char s[255]; - for (i = 0; i < n && !success; i++) { - if (array_numdims(ary) == exact_dimensions[i]) { - success = 1; - } - } - if (!success) { - for (i = 0; i < n-1; i++) { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); - } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); - PyErr_Format(PyExc_TypeError, - "Array must be have %s dimensions. Given array has %d dimensions", - dims_str, array_numdims(ary)); - } - return success; -} - -/* Require the given PyArrayObject to have a specified shape. If the - * array has the specified shape, return 1. Otherwise, set the python - * error string and return 0. - */ -int require_size(PyArrayObject* ary, npy_intp* size, int n) { - int i; - int success = 1; - int len; - char desired_dims[255] = "["; - char s[255]; - char actual_dims[255] = "["; - for(i=0; i < n;i++) { - if (size[i] != -1 && size[i] != array_size(ary,i)) { - success = 0; - } - } - if (!success) { - for (i = 0; i < n; i++) { - if (size[i] == -1) { - sprintf(s, "*,"); - } - else - { - sprintf(s,"%" NPY_INTP_FMT ",", size[i]); - } - strcat(desired_dims,s); - } - len = strlen(desired_dims); - desired_dims[len-1] = ']'; - for (i = 0; i < n; i++) { - sprintf(s,"%" NPY_INTP_FMT ",", array_size(ary,i)); - strcat(actual_dims,s); - } - len = strlen(actual_dims); - actual_dims[len-1] = ']'; - PyErr_Format(PyExc_TypeError, - "Array must be have shape of %s. Given array has shape of %s", - desired_dims, actual_dims); - } - return success; -} -/* End John Hunter translation (with modifications by Bill Spotz) */ - - - - - -/*! - Appends @a what to @a where. On input, @a where need not to be a tuple, but on - return it always is. - - @par Revision history: - - 17.02.2005, c -*/ -PyObject *helper_appendToTuple( PyObject *where, PyObject *what ) { - PyObject *o2, *o3; - - if ((!where) || (where == Py_None)) { - where = what; - } else { - if (!PyTuple_Check( where )) { - o2 = where; - where = PyTuple_New( 1 ); - PyTuple_SetItem( where, 0, o2 ); - } - o3 = PyTuple_New( 1 ); - PyTuple_SetItem( o3, 0, what ); - o2 = where; - where = PySequence_Concat( o2, o3 ); - Py_DECREF( o2 ); - Py_DECREF( o3 ); - } - return where; -} - - - - - - -#include "csgraph.h" - - -#include -#if !defined(SWIG_NO_LLONG_MAX) -# if !defined(LLONG_MAX) && defined(__GNUC__) && defined (__LONG_LONG_MAX__) -# define LLONG_MAX __LONG_LONG_MAX__ -# define LLONG_MIN (-LLONG_MAX - 1LL) -# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) -# endif -#endif - - -SWIGINTERN int -SWIG_AsVal_double (PyObject *obj, double *val) -{ - int res = SWIG_TypeError; - if (PyFloat_Check(obj)) { - if (val) *val = PyFloat_AsDouble(obj); - return SWIG_OK; - } else if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - double v = PyLong_AsDouble(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - double d = PyFloat_AsDouble(obj); - if (!PyErr_Occurred()) { - if (val) *val = d; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_AddCast(SWIG_OK)); - } else { - PyErr_Clear(); - } - } - } -#endif - return res; -} - - -#include - - -#include - - -SWIGINTERNINLINE int -SWIG_CanCastAsInteger(double *d, double min, double max) { - double x = *d; - if ((min <= x && x <= max)) { - double fx = floor(x); - double cx = ceil(x); - double rd = ((x - fx) < 0.5) ? fx : cx; /* simple rint */ - if ((errno == EDOM) || (errno == ERANGE)) { - errno = 0; - } else { - double summ, reps, diff; - if (rd < x) { - diff = x - rd; - } else if (rd > x) { - diff = rd - x; - } else { - return 1; - } - summ = rd + x; - reps = diff/summ; - if (reps < 8*DBL_EPSILON) { - *d = rd; - return 1; - } - } - } - return 0; -} - - -SWIGINTERN int -SWIG_AsVal_long (PyObject *obj, long* val) -{ - if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - long v = PyInt_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal_double (obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { - if (val) *val = (long)(d); - return res; - } - } - } -#endif - return SWIG_TypeError; -} - - -SWIGINTERN int -SWIG_AsVal_int (PyObject * obj, int *val) -{ - long v; - int res = SWIG_AsVal_long (obj, &v); - if (SWIG_IsOK(res)) { - if ((v < INT_MIN || v > INT_MAX)) { - return SWIG_OverflowError; - } else { - if (val) *val = static_cast< int >(v); - } - } - return res; -} - - - #define SWIG_From_long PyInt_FromLong - - -SWIGINTERNINLINE PyObject * -SWIG_From_int (int value) -{ - return SWIG_From_long (value); -} - - -#include "csgraph.h" - -#ifdef __cplusplus -extern "C" { -#endif -SWIGINTERN PyObject *_wrap_cs_graph_components(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int *arg2 ; - int *arg3 ; - int *arg4 ; - int val1 ; - int ecode1 = 0 ; - PyArrayObject *array2 = NULL ; - int is_new_object2 ; - PyArrayObject *array3 = NULL ; - int is_new_object3 ; - PyArrayObject *temp4 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - int result; - - if (!PyArg_ParseTuple(args,(char *)"OOOO:cs_graph_components",&obj0,&obj1,&obj2,&obj3)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "cs_graph_components" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - { - npy_intp size[1] = { - -1 - }; - array2 = obj_to_array_contiguous_allow_conversion(obj1, PyArray_INT, &is_new_object2); - if (!array2 || !require_dimensions(array2,1) || !require_size(array2,size,1) - || !require_contiguous(array2) || !require_native(array2)) SWIG_fail; - - arg2 = (int*) array2->data; - } - { - npy_intp size[1] = { - -1 - }; - array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); - if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1) - || !require_contiguous(array3) || !require_native(array3)) SWIG_fail; - - arg3 = (int*) array3->data; - } - { - temp4 = obj_to_array_no_conversion(obj3,PyArray_INT); - if (!temp4 || !require_contiguous(temp4) || !require_native(temp4)) SWIG_fail; - arg4 = (int*) array_data(temp4); - } - result = (int)cs_graph_components< int >(arg1,(int const (*))arg2,(int const (*))arg3,arg4); - resultobj = SWIG_From_int(static_cast< int >(result)); - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - return resultobj; -fail: - { - if (is_new_object2 && array2) { - Py_DECREF(array2); - } - } - { - if (is_new_object3 && array3) { - Py_DECREF(array3); - } - } - return NULL; -} - - -static PyMethodDef SwigMethods[] = { - { (char *)"SWIG_PyInstanceMethod_New", (PyCFunction)SWIG_PyInstanceMethod_New, METH_O, NULL}, - { (char *)"cs_graph_components", _wrap_cs_graph_components, METH_VARARGS, (char *)"cs_graph_components(int n_nod, int Ap, int Aj, int flag) -> int"}, - { NULL, NULL, 0, NULL } -}; - - -/* -------- TYPE CONVERSION AND EQUIVALENCE RULES (BEGIN) -------- */ - -static swig_type_info _swigt__p_char = {"_p_char", "char *", 0, 0, (void*)0, 0}; - -static swig_type_info *swig_type_initial[] = { - &_swigt__p_char, -}; - -static swig_cast_info _swigc__p_char[] = { {&_swigt__p_char, 0, 0, 0},{0, 0, 0, 0}}; - -static swig_cast_info *swig_cast_initial[] = { - _swigc__p_char, -}; - - -/* -------- TYPE CONVERSION AND EQUIVALENCE RULES (END) -------- */ - -static swig_const_info swig_const_table[] = { -{0, 0, 0, 0.0, 0, 0}}; - -#ifdef __cplusplus -} -#endif -/* ----------------------------------------------------------------------------- - * Type initialization: - * This problem is tough by the requirement that no dynamic - * memory is used. Also, since swig_type_info structures store pointers to - * swig_cast_info structures and swig_cast_info structures store pointers back - * to swig_type_info structures, we need some lookup code at initialization. - * The idea is that swig generates all the structures that are needed. - * The runtime then collects these partially filled structures. - * The SWIG_InitializeModule function takes these initial arrays out of - * swig_module, and does all the lookup, filling in the swig_module.types - * array with the correct data and linking the correct swig_cast_info - * structures together. - * - * The generated swig_type_info structures are assigned staticly to an initial - * array. We just loop through that array, and handle each type individually. - * First we lookup if this type has been already loaded, and if so, use the - * loaded structure instead of the generated one. Then we have to fill in the - * cast linked list. The cast data is initially stored in something like a - * two-dimensional array. Each row corresponds to a type (there are the same - * number of rows as there are in the swig_type_initial array). Each entry in - * a column is one of the swig_cast_info structures for that type. - * The cast_initial array is actually an array of arrays, because each row has - * a variable number of columns. So to actually build the cast linked list, - * we find the array of casts associated with the type, and loop through it - * adding the casts to the list. The one last trick we need to do is making - * sure the type pointer in the swig_cast_info struct is correct. - * - * First off, we lookup the cast->type name to see if it is already loaded. - * There are three cases to handle: - * 1) If the cast->type has already been loaded AND the type we are adding - * casting info to has not been loaded (it is in this module), THEN we - * replace the cast->type pointer with the type pointer that has already - * been loaded. - * 2) If BOTH types (the one we are adding casting info to, and the - * cast->type) are loaded, THEN the cast info has already been loaded by - * the previous module so we just ignore it. - * 3) Finally, if cast->type has not already been loaded, then we add that - * swig_cast_info to the linked list (because the cast->type) pointer will - * be correct. - * ----------------------------------------------------------------------------- */ - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* c-mode */ -#endif -#endif - -#if 0 -#define SWIGRUNTIME_DEBUG -#endif - - -SWIGRUNTIME void -SWIG_InitializeModule(void *clientdata) { - size_t i; - swig_module_info *module_head, *iter; - int found, init; - - clientdata = clientdata; - - /* check to see if the circular list has been setup, if not, set it up */ - if (swig_module.next==0) { - /* Initialize the swig_module */ - swig_module.type_initial = swig_type_initial; - swig_module.cast_initial = swig_cast_initial; - swig_module.next = &swig_module; - init = 1; - } else { - init = 0; - } - - /* Try and load any already created modules */ - module_head = SWIG_GetModule(clientdata); - if (!module_head) { - /* This is the first module loaded for this interpreter */ - /* so set the swig module into the interpreter */ - SWIG_SetModule(clientdata, &swig_module); - module_head = &swig_module; - } else { - /* the interpreter has loaded a SWIG module, but has it loaded this one? */ - found=0; - iter=module_head; - do { - if (iter==&swig_module) { - found=1; - break; - } - iter=iter->next; - } while (iter!= module_head); - - /* if the is found in the list, then all is done and we may leave */ - if (found) return; - /* otherwise we must add out module into the list */ - swig_module.next = module_head->next; - module_head->next = &swig_module; - } - - /* When multiple interpeters are used, a module could have already been initialized in - a different interpreter, but not yet have a pointer in this interpreter. - In this case, we do not want to continue adding types... everything should be - set up already */ - if (init == 0) return; - - /* Now work on filling in swig_module.types */ -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: size %d\n", swig_module.size); -#endif - for (i = 0; i < swig_module.size; ++i) { - swig_type_info *type = 0; - swig_type_info *ret; - swig_cast_info *cast; - -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); -#endif - - /* if there is another module already loaded */ - if (swig_module.next != &swig_module) { - type = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, swig_module.type_initial[i]->name); - } - if (type) { - /* Overwrite clientdata field */ -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: found type %s\n", type->name); -#endif - if (swig_module.type_initial[i]->clientdata) { - type->clientdata = swig_module.type_initial[i]->clientdata; -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: found and overwrite type %s \n", type->name); -#endif - } - } else { - type = swig_module.type_initial[i]; - } - - /* Insert casting types */ - cast = swig_module.cast_initial[i]; - while (cast->type) { - /* Don't need to add information already in the list */ - ret = 0; -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: look cast %s\n", cast->type->name); -#endif - if (swig_module.next != &swig_module) { - ret = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, cast->type->name); -#ifdef SWIGRUNTIME_DEBUG - if (ret) printf("SWIG_InitializeModule: found cast %s\n", ret->name); -#endif - } - if (ret) { - if (type == swig_module.type_initial[i]) { -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: skip old type %s\n", ret->name); -#endif - cast->type = ret; - ret = 0; - } else { - /* Check for casting already in the list */ - swig_cast_info *ocast = SWIG_TypeCheck(ret->name, type); -#ifdef SWIGRUNTIME_DEBUG - if (ocast) printf("SWIG_InitializeModule: skip old cast %s\n", ret->name); -#endif - if (!ocast) ret = 0; - } - } - - if (!ret) { -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: adding cast %s\n", cast->type->name); -#endif - if (type->cast) { - type->cast->prev = cast; - cast->next = type->cast; - } - type->cast = cast; - } - cast++; - } - /* Set entry in modules->types array equal to the type */ - swig_module.types[i] = type; - } - swig_module.types[i] = 0; - -#ifdef SWIGRUNTIME_DEBUG - printf("**** SWIG_InitializeModule: Cast List ******\n"); - for (i = 0; i < swig_module.size; ++i) { - int j = 0; - swig_cast_info *cast = swig_module.cast_initial[i]; - printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); - while (cast->type) { - printf("SWIG_InitializeModule: cast type %s\n", cast->type->name); - cast++; - ++j; - } - printf("---- Total casts: %d\n",j); - } - printf("**** SWIG_InitializeModule: Cast List ******\n"); -#endif -} - -/* This function will propagate the clientdata field of type to -* any new swig_type_info structures that have been added into the list -* of equivalent types. It is like calling -* SWIG_TypeClientData(type, clientdata) a second time. -*/ -SWIGRUNTIME void -SWIG_PropagateClientData(void) { - size_t i; - swig_cast_info *equiv; - static int init_run = 0; - - if (init_run) return; - init_run = 1; - - for (i = 0; i < swig_module.size; i++) { - if (swig_module.types[i]->clientdata) { - equiv = swig_module.types[i]->cast; - while (equiv) { - if (!equiv->converter) { - if (equiv->type && !equiv->type->clientdata) - SWIG_TypeClientData(equiv->type, swig_module.types[i]->clientdata); - } - equiv = equiv->next; - } - } - } -} - -#ifdef __cplusplus -#if 0 -{ - /* c-mode */ -#endif -} -#endif - - - -#ifdef __cplusplus -extern "C" { -#endif - - /* Python-specific SWIG API */ -#define SWIG_newvarlink() SWIG_Python_newvarlink() -#define SWIG_addvarlink(p, name, get_attr, set_attr) SWIG_Python_addvarlink(p, name, get_attr, set_attr) -#define SWIG_InstallConstants(d, constants) SWIG_Python_InstallConstants(d, constants) - - /* ----------------------------------------------------------------------------- - * global variable support code. - * ----------------------------------------------------------------------------- */ - - typedef struct swig_globalvar { - char *name; /* Name of global variable */ - PyObject *(*get_attr)(void); /* Return the current value */ - int (*set_attr)(PyObject *); /* Set the value */ - struct swig_globalvar *next; - } swig_globalvar; - - typedef struct swig_varlinkobject { - PyObject_HEAD - swig_globalvar *vars; - } swig_varlinkobject; - - SWIGINTERN PyObject * - swig_varlink_repr(swig_varlinkobject *SWIGUNUSEDPARM(v)) { -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_InternFromString(""); -#else - return PyString_FromString(""); -#endif - } - - SWIGINTERN PyObject * - swig_varlink_str(swig_varlinkobject *v) { -#if PY_VERSION_HEX >= 0x03000000 - PyObject *str = PyUnicode_InternFromString("("); - PyObject *tail; - PyObject *joined; - swig_globalvar *var; - for (var = v->vars; var; var=var->next) { - tail = PyUnicode_FromString(var->name); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; - if (var->next) { - tail = PyUnicode_InternFromString(", "); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; - } - } - tail = PyUnicode_InternFromString(")"); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; -#else - PyObject *str = PyString_FromString("("); - swig_globalvar *var; - for (var = v->vars; var; var=var->next) { - PyString_ConcatAndDel(&str,PyString_FromString(var->name)); - if (var->next) PyString_ConcatAndDel(&str,PyString_FromString(", ")); - } - PyString_ConcatAndDel(&str,PyString_FromString(")")); -#endif - return str; - } - - SWIGINTERN int - swig_varlink_print(swig_varlinkobject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) { - char *tmp; - PyObject *str = swig_varlink_str(v); - fprintf(fp,"Swig global variables "); - fprintf(fp,"%s\n", tmp = SWIG_Python_str_AsChar(str)); - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(str); - return 0; - } - - SWIGINTERN void - swig_varlink_dealloc(swig_varlinkobject *v) { - swig_globalvar *var = v->vars; - while (var) { - swig_globalvar *n = var->next; - free(var->name); - free(var); - var = n; - } - } - - SWIGINTERN PyObject * - swig_varlink_getattr(swig_varlinkobject *v, char *n) { - PyObject *res = NULL; - swig_globalvar *var = v->vars; - while (var) { - if (strcmp(var->name,n) == 0) { - res = (*var->get_attr)(); - break; - } - var = var->next; - } - if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_NameError,"Unknown C global variable"); - } - return res; - } - - SWIGINTERN int - swig_varlink_setattr(swig_varlinkobject *v, char *n, PyObject *p) { - int res = 1; - swig_globalvar *var = v->vars; - while (var) { - if (strcmp(var->name,n) == 0) { - res = (*var->set_attr)(p); - break; - } - var = var->next; - } - if (res == 1 && !PyErr_Occurred()) { - PyErr_SetString(PyExc_NameError,"Unknown C global variable"); - } - return res; - } - - SWIGINTERN PyTypeObject* - swig_varlink_type(void) { - static char varlink__doc__[] = "Swig var link object"; - static PyTypeObject varlink_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* Number of items in variable part (ob_size) */ -#endif - (char *)"swigvarlink", /* Type name (tp_name) */ - sizeof(swig_varlinkobject), /* Basic size (tp_basicsize) */ - 0, /* Itemsize (tp_itemsize) */ - (destructor) swig_varlink_dealloc, /* Deallocator (tp_dealloc) */ - (printfunc) swig_varlink_print, /* Print (tp_print) */ - (getattrfunc) swig_varlink_getattr, /* get attr (tp_getattr) */ - (setattrfunc) swig_varlink_setattr, /* Set attr (tp_setattr) */ - 0, /* tp_compare */ - (reprfunc) swig_varlink_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc) swig_varlink_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - varlink__doc__, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* tp_iter -> tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - varlink_type = tmp; - /* for Python 3 we already assigned ob_type in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - varlink_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &varlink_type; - } - - /* Create a variable linking object for use later */ - SWIGINTERN PyObject * - SWIG_Python_newvarlink(void) { - swig_varlinkobject *result = PyObject_NEW(swig_varlinkobject, swig_varlink_type()); - if (result) { - result->vars = 0; - } - return ((PyObject*) result); - } - - SWIGINTERN void - SWIG_Python_addvarlink(PyObject *p, char *name, PyObject *(*get_attr)(void), int (*set_attr)(PyObject *p)) { - swig_varlinkobject *v = (swig_varlinkobject *) p; - swig_globalvar *gv = (swig_globalvar *) malloc(sizeof(swig_globalvar)); - if (gv) { - size_t size = strlen(name)+1; - gv->name = (char *)malloc(size); - if (gv->name) { - strncpy(gv->name,name,size); - gv->get_attr = get_attr; - gv->set_attr = set_attr; - gv->next = v->vars; - } - } - v->vars = gv; - } - - SWIGINTERN PyObject * - SWIG_globals(void) { - static PyObject *_SWIG_globals = 0; - if (!_SWIG_globals) _SWIG_globals = SWIG_newvarlink(); - return _SWIG_globals; - } - - /* ----------------------------------------------------------------------------- - * constants/methods manipulation - * ----------------------------------------------------------------------------- */ - - /* Install Constants */ - SWIGINTERN void - SWIG_Python_InstallConstants(PyObject *d, swig_const_info constants[]) { - PyObject *obj = 0; - size_t i; - for (i = 0; constants[i].type; ++i) { - switch(constants[i].type) { - case SWIG_PY_POINTER: - obj = SWIG_NewPointerObj(constants[i].pvalue, *(constants[i]).ptype,0); - break; - case SWIG_PY_BINARY: - obj = SWIG_NewPackedObj(constants[i].pvalue, constants[i].lvalue, *(constants[i].ptype)); - break; - default: - obj = 0; - break; - } - if (obj) { - PyDict_SetItemString(d, constants[i].name, obj); - Py_DECREF(obj); - } - } - } - - /* -----------------------------------------------------------------------------*/ - /* Fix SwigMethods to carry the callback ptrs when needed */ - /* -----------------------------------------------------------------------------*/ - - SWIGINTERN void - SWIG_Python_FixMethods(PyMethodDef *methods, - swig_const_info *const_table, - swig_type_info **types, - swig_type_info **types_initial) { - size_t i; - for (i = 0; methods[i].ml_name; ++i) { - const char *c = methods[i].ml_doc; - if (c && (c = strstr(c, "swig_ptr: "))) { - int j; - swig_const_info *ci = 0; - const char *name = c + 10; - for (j = 0; const_table[j].type; ++j) { - if (strncmp(const_table[j].name, name, - strlen(const_table[j].name)) == 0) { - ci = &(const_table[j]); - break; - } - } - if (ci) { - void *ptr = (ci->type == SWIG_PY_POINTER) ? ci->pvalue : 0; - if (ptr) { - size_t shift = (ci->ptype) - types; - swig_type_info *ty = types_initial[shift]; - size_t ldoc = (c - methods[i].ml_doc); - size_t lptr = strlen(ty->name)+2*sizeof(void*)+2; - char *ndoc = (char*)malloc(ldoc + lptr + 10); - if (ndoc) { - char *buff = ndoc; - strncpy(buff, methods[i].ml_doc, ldoc); - buff += ldoc; - strncpy(buff, "swig_ptr: ", 10); - buff += 10; - SWIG_PackVoidPtr(buff, ptr, ty->name, lptr); - methods[i].ml_doc = ndoc; - } - } - } - } - } - } - -#ifdef __cplusplus -} -#endif - -/* -----------------------------------------------------------------------------* - * Partial Init method - * -----------------------------------------------------------------------------*/ - -#ifdef __cplusplus -extern "C" -#endif - -SWIGEXPORT -#if PY_VERSION_HEX >= 0x03000000 -PyObject* -#else -void -#endif -SWIG_init(void) { - PyObject *m, *d; -#if PY_VERSION_HEX >= 0x03000000 - static struct PyModuleDef SWIG_module = { - PyModuleDef_HEAD_INIT, - (char *) SWIG_name, - NULL, - -1, - SwigMethods, - NULL, - NULL, - NULL, - NULL - }; -#endif - - /* Fix SwigMethods to carry the callback ptrs when needed */ - SWIG_Python_FixMethods(SwigMethods, swig_const_table, swig_types, swig_type_initial); - -#if PY_VERSION_HEX >= 0x03000000 - m = PyModule_Create(&SWIG_module); -#else - m = Py_InitModule((char *) SWIG_name, SwigMethods); -#endif - d = PyModule_GetDict(m); - - SWIG_InitializeModule(0); - SWIG_InstallConstants(d,swig_const_table); - - - - import_array(); - -#if PY_VERSION_HEX >= 0x03000000 - return m; -#else - return; -#endif -} - diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csr.h b/scipy-0.10.1/scipy/sparse/sparsetools/csr.h deleted file mode 100644 index 168993dd8e..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csr.h +++ /dev/null @@ -1,1252 +0,0 @@ -#ifndef __CSR_H__ -#define __CSR_H__ - -#include -#include -#include -#include - -#include "dense.h" - -/* - * Extract main diagonal of CSR matrix A - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[n_col] - nonzeros - * - * Output Arguments: - * T Yx[min(n_row,n_col)] - diagonal entries - * - * Note: - * Output array Yx must be preallocated - * - * Duplicate entries will be summed. - * - * Complexity: Linear. Specifically O(nnz(A) + min(n_row,n_col)) - * - */ -template -void csr_diagonal(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - T Yx[]) -{ - const I N = std::min(n_row, n_col); - - for(I i = 0; i < N; i++){ - const I row_start = Ap[i]; - const I row_end = Ap[i+1]; - - T diag = 0; - for(I jj = row_start; jj < row_end; jj++){ - if (Aj[jj] == i) - diag += Ax[jj]; - } - - Yx[i] = diag; - } -} - - -/* - * Expand a compressed row pointer into a row array - * - * Input Arguments: - * I n_row - number of rows in A - * I Ap[n_row+1] - row pointer - * - * Output Arguments: - * Bi - row indices - * - * Note: - * Output array Bi must be preallocated - * - * Note: - * Complexity: Linear - * - */ -template -void expandptr(const I n_row, - const I Ap[], - I Bi[]) -{ - for(I i = 0; i < n_row; i++){ - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - Bi[jj] = i; - } - } -} - - -/* - * Scale the rows of a CSR matrix *in place* - * - * A[i,:] *= X[i] - * - */ -template -void csr_scale_rows(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - T Ax[], - const T Xx[]) -{ - for(I i = 0; i < n_row; i++){ - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - Ax[jj] *= Xx[i]; - } - } -} - - -/* - * Scale the columns of a CSR matrix *in place* - * - * A[:,i] *= X[i] - * - */ -template -void csr_scale_columns(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - T Ax[], - const T Xx[]) -{ - const I nnz = Ap[n_row]; - for(I i = 0; i < nnz; i++){ - Ax[i] *= Xx[Aj[i]]; - } -} - - -/* - * Compute the number of occupied RxC blocks in a matrix - * - * Input Arguments: - * I n_row - number of rows in A - * I R - row blocksize - * I C - column blocksize - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * - * Output Arguments: - * I num_blocks - number of blocks - * - * Note: - * Complexity: Linear - * - */ -template -I csr_count_blocks(const I n_row, - const I n_col, - const I R, - const I C, - const I Ap[], - const I Aj[]) -{ - std::vector mask(n_col/C + 1,-1); - I n_blks = 0; - for(I i = 0; i < n_row; i++){ - I bi = i/R; - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - I bj = Aj[jj]/C; - if(mask[bj] != bi){ - mask[bj] = bi; - n_blks++; - } - } - } - return n_blks; -} - - -/* - * Convert a CSR matrix to BSR format - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I R - row blocksize - * I C - column blocksize - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzero values - * - * Output Arguments: - * I Bp[n_row/R + 1] - block row pointer - * I Bj[nnz(B)] - column indices - * T Bx[nnz(B)] - nonzero blocks - * - * Note: - * Complexity: Linear - * Output arrays must be preallocated (with Bx initialized to zero) - * - * - */ -template -void csr_tobsr(const I n_row, - const I n_col, - const I R, - const I C, - const I Ap[], - const I Aj[], - const T Ax[], - I Bp[], - I Bj[], - T Bx[]) -{ - std::vector blocks(n_col/C + 1, (T*)0 ); - - assert( n_row % R == 0 ); - assert( n_col % C == 0 ); - - I n_brow = n_row / R; - //I n_bcol = n_col / C; - - I RC = R*C; - I n_blks = 0; - - Bp[0] = 0; - - for(I bi = 0; bi < n_brow; bi++){ - for(I r = 0; r < R; r++){ - I i = R*bi + r; //row index - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - I j = Aj[jj]; //column index - - I bj = j / C; - I c = j % C; - - if( blocks[bj] == 0 ){ - blocks[bj] = Bx + RC*n_blks; - Bj[n_blks] = bj; - n_blks++; - } - - *(blocks[bj] + C*r + c) += Ax[jj]; - } - } - - for(I jj = Ap[R*bi]; jj < Ap[R*(bi+1)]; jj++){ - blocks[Aj[jj] / C] = 0; - } - - Bp[bi+1] = n_blks; - } -} - - -/* - * Determine whether the CSR column indices are in sorted order. - * - * Input Arguments: - * I n_row - number of rows in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * - */ -template -bool csr_has_sorted_indices(const I n_row, - const I Ap[], - const I Aj[]) -{ - for(I i = 0; i < n_row; i++){ - for(I jj = Ap[i]; jj < Ap[i+1] - 1; jj++){ - if(Aj[jj] > Aj[jj+1]){ - return false; - } - } - } - return true; -} - - - -/* - * Determine whether the matrix structure is canonical CSR. - * Canonical CSR implies that column indices within each row - * are (1) sorted and (2) unique. Matrices that meet these - * conditions facilitate faster matrix computations. - * - * Input Arguments: - * I n_row - number of rows in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * - */ -template -bool csr_has_canonical_format(const I n_row, - const I Ap[], - const I Aj[]) -{ - for(I i = 0; i < n_row; i++){ - if (Ap[i] > Ap[i+1]) - return false; - for(I jj = Ap[i] + 1; jj < Ap[i+1]; jj++){ - if( !(Aj[jj-1] < Aj[jj]) ){ - return false; - } - } - } - return true; -} - - -template< class T1, class T2 > -bool kv_pair_less(const std::pair& x, const std::pair& y){ - return x.first < y.first; -} - -/* - * Sort CSR column indices inplace - * - * Input Arguments: - * I n_row - number of rows in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * - */ -template -void csr_sort_indices(const I n_row, - const I Ap[], - I Aj[], - T Ax[]) -{ - std::vector< std::pair > temp; - - for(I i = 0; i < n_row; i++){ - I row_start = Ap[i]; - I row_end = Ap[i+1]; - - temp.clear(); - - for(I jj = row_start; jj < row_end; jj++){ - temp.push_back(std::make_pair(Aj[jj],Ax[jj])); - } - - std::sort(temp.begin(),temp.end(),kv_pair_less); - - for(I jj = row_start, n = 0; jj < row_end; jj++, n++){ - Aj[jj] = temp[n].first; - Ax[jj] = temp[n].second; - } - } -} - - - - -/* - * Compute B = A for CSR matrix A, CSC matrix B - * - * Also, with the appropriate arguments can also be used to: - * - compute B = A^t for CSR matrix A, CSR matrix B - * - compute B = A^t for CSC matrix A, CSC matrix B - * - convert CSC->CSR - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * - * Output Arguments: - * I Bp[n_col+1] - column pointer - * I Bj[nnz(A)] - row indices - * T Bx[nnz(A)] - nonzeros - * - * Note: - * Output arrays Bp, Bj, Bx must be preallocated - * - * Note: - * Input: column indices *are not* assumed to be in sorted order - * Output: row indices *will be* in sorted order - * - * Complexity: Linear. Specifically O(nnz(A) + max(n_row,n_col)) - * - */ -template -void csr_tocsc(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - I Bp[], - I Bi[], - T Bx[]) -{ - const I nnz = Ap[n_row]; - - //compute number of non-zero entries per column of A - std::fill(Bp, Bp + n_col, 0); - - for (I n = 0; n < nnz; n++){ - Bp[Aj[n]]++; - } - - //cumsum the nnz per column to get Bp[] - for(I col = 0, cumsum = 0; col < n_col; col++){ - I temp = Bp[col]; - Bp[col] = cumsum; - cumsum += temp; - } - Bp[n_col] = nnz; - - for(I row = 0; row < n_row; row++){ - for(I jj = Ap[row]; jj < Ap[row+1]; jj++){ - I col = Aj[jj]; - I dest = Bp[col]; - - Bi[dest] = row; - Bx[dest] = Ax[jj]; - - Bp[col]++; - } - } - - for(I col = 0, last = 0; col <= n_col; col++){ - I temp = Bp[col]; - Bp[col] = last; - last = temp; - } -} - - - -/* - * Compute B = A for CSR matrix A, ELL matrix B - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * I row_length - maximum nnz in a row of A - * - * Output Arguments: - * I Bj[n_row * row_length] - column indices - * T Bx[n_row * row_length] - nonzeros - * - * Note: - * Output arrays Bj, Bx must be preallocated - * Duplicate entries in A are not merged. - * Explicit zeros in A are carried over to B. - * Rows with fewer than row_length columns are padded with zeros. - * - */ -template -void csr_toell(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - const I row_length, - I Bj[], - T Bx[]) -{ - const I ell_nnz = row_length * n_row; - std::fill(Bj, Bj + ell_nnz, 0); - std::fill(Bx, Bx + ell_nnz, 0); - - for(I i = 0; i < n_row; i++){ - I * Bj_row = Bj + row_length * i; - T * Bx_row = Bx + row_length * i; - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - *Bj_row = Aj[jj]; - *Bx_row = Ax[jj]; - Bj_row++; - Bx_row++; - } - } -} - - -/* - * Compute C = A*B for CSR matrices A,B - * - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in B (hence C is n_row by n_col) - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * I Bp[?] - row pointer - * I Bj[nnz(B)] - column indices - * T Bx[nnz(B)] - nonzeros - * Output Arguments: - * I Cp[n_row+1] - row pointer - * I Cj[nnz(C)] - column indices - * T Cx[nnz(C)] - nonzeros - * - * Note: - * Output arrays Cp, Cj, and Cx must be preallocated - * The value of nnz(C) will be stored in Ap[n_row] after the first pass. - * - * Note: - * Input: A and B column indices *are not* assumed to be in sorted order - * Output: C column indices *are not* assumed to be in sorted order - * Cx will not contain any zero entries - * - * Complexity: O(n_row*K^2 + max(n_row,n_col)) - * where K is the maximum nnz in a row of A - * and column of B. - * - * - * This is an implementation of the SMMP algorithm: - * - * "Sparse Matrix Multiplication Package (SMMP)" - * Randolph E. Bank and Craig C. Douglas - * - * http://citeseer.ist.psu.edu/445062.html - * http://www.mgnet.org/~douglas/ccd-codes.html - * - */ - - -/* - * Pass 1 computes CSR row pointer for the matrix product C = A * B - * - */ -template -void csr_matmat_pass1(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const I Bp[], - const I Bj[], - I Cp[]) -{ - // method that uses O(n) temp storage - std::vector mask(n_col, -1); - Cp[0] = 0; - - I nnz = 0; - for(I i = 0; i < n_row; i++){ - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - I j = Aj[jj]; - for(I kk = Bp[j]; kk < Bp[j+1]; kk++){ - I k = Bj[kk]; - if(mask[k] != i){ - mask[k] = i; - nnz++; - } - } - } - Cp[i+1] = nnz; - } -} - -/* - * Pass 2 computes CSR entries for matrix C = A*B using the - * row pointer Cp[] computed in Pass 1. - * - */ -template -void csr_matmat_pass2(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - const I Bp[], - const I Bj[], - const T Bx[], - I Cp[], - I Cj[], - T Cx[]) -{ - std::vector next(n_col,-1); - std::vector sums(n_col, 0); - - I nnz = 0; - - Cp[0] = 0; - - for(I i = 0; i < n_row; i++){ - I head = -2; - I length = 0; - - I jj_start = Ap[i]; - I jj_end = Ap[i+1]; - for(I jj = jj_start; jj < jj_end; jj++){ - I j = Aj[jj]; - T v = Ax[jj]; - - I kk_start = Bp[j]; - I kk_end = Bp[j+1]; - for(I kk = kk_start; kk < kk_end; kk++){ - I k = Bj[kk]; - - sums[k] += v*Bx[kk]; - - if(next[k] == -1){ - next[k] = head; - head = k; - length++; - } - } - } - - for(I jj = 0; jj < length; jj++){ - - if(sums[head] != 0){ - Cj[nnz] = head; - Cx[nnz] = sums[head]; - nnz++; - } - - I temp = head; - head = next[head]; - - next[temp] = -1; //clear arrays - sums[temp] = 0; - } - - Cp[i+1] = nnz; - } -} - - -/* - * Compute C = A (binary_op) B for CSR matrices that are not - * necessarily canonical CSR format. Specifically, this method - * works even when the input matrices have duplicate and/or - * unsorted column indices within a given row. - * - * Refer to csr_binop_csr() for additional information - * - * Note: - * Output arrays Cp, Cj, and Cx must be preallocated - * If nnz(C) is not known a priori, a conservative bound is: - * nnz(C) <= nnz(A) + nnz(B) - * - * Note: - * Input: A and B column indices are not assumed to be in sorted order - * Output: C column indices are not generally in sorted order - * C will not contain any duplicate entries or explicit zeros. - * - */ -template -void csr_binop_csr_general(const I n_row, const I n_col, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], - const binary_op& op) -{ - //Method that works for duplicate and/or unsorted indices - - std::vector next(n_col,-1); - std::vector A_row(n_col, 0); - std::vector B_row(n_col, 0); - - I nnz = 0; - Cp[0] = 0; - - for(I i = 0; i < n_row; i++){ - I head = -2; - I length = 0; - - //add a row of A to A_row - I i_start = Ap[i]; - I i_end = Ap[i+1]; - for(I jj = i_start; jj < i_end; jj++){ - I j = Aj[jj]; - - A_row[j] += Ax[jj]; - - if(next[j] == -1){ - next[j] = head; - head = j; - length++; - } - } - - //add a row of B to B_row - i_start = Bp[i]; - i_end = Bp[i+1]; - for(I jj = i_start; jj < i_end; jj++){ - I j = Bj[jj]; - - B_row[j] += Bx[jj]; - - if(next[j] == -1){ - next[j] = head; - head = j; - length++; - } - } - - - // scan through columns where A or B has - // contributed a non-zero entry - for(I jj = 0; jj < length; jj++){ - T result = op(A_row[head], B_row[head]); - - if(result != 0){ - Cj[nnz] = head; - Cx[nnz] = result; - nnz++; - } - - I temp = head; - head = next[head]; - - next[temp] = -1; - A_row[temp] = 0; - B_row[temp] = 0; - } - - Cp[i + 1] = nnz; - } -} - - - -/* - * Compute C = A (binary_op) B for CSR matrices that are in the - * canonical CSR format. Specifically, this method requires that - * the rows of the input matrices are free of duplicate column indices - * and that the column indices are in sorted order. - * - * Refer to csr_binop_csr() for additional information - * - * Note: - * Input: A and B column indices are assumed to be in sorted order - * Output: C column indices will be in sorted order - * Cx will not contain any zero entries - * - */ -template -void csr_binop_csr_canonical(const I n_row, const I n_col, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], - const binary_op& op) -{ - //Method that works for canonical CSR matrices - - Cp[0] = 0; - I nnz = 0; - - for(I i = 0; i < n_row; i++){ - I A_pos = Ap[i]; - I B_pos = Bp[i]; - I A_end = Ap[i+1]; - I B_end = Bp[i+1]; - - //while not finished with either row - while(A_pos < A_end && B_pos < B_end){ - I A_j = Aj[A_pos]; - I B_j = Bj[B_pos]; - - if(A_j == B_j){ - T result = op(Ax[A_pos],Bx[B_pos]); - if(result != 0){ - Cj[nnz] = A_j; - Cx[nnz] = result; - nnz++; - } - A_pos++; - B_pos++; - } else if (A_j < B_j) { - T result = op(Ax[A_pos],0); - if (result != 0){ - Cj[nnz] = A_j; - Cx[nnz] = result; - nnz++; - } - A_pos++; - } else { - //B_j < A_j - T result = op(0,Bx[B_pos]); - if (result != 0){ - Cj[nnz] = B_j; - Cx[nnz] = result; - nnz++; - } - B_pos++; - } - } - - //tail - while(A_pos < A_end){ - T result = op(Ax[A_pos],0); - if (result != 0){ - Cj[nnz] = Aj[A_pos]; - Cx[nnz] = result; - nnz++; - } - A_pos++; - } - while(B_pos < B_end){ - T result = op(0,Bx[B_pos]); - if (result != 0){ - Cj[nnz] = Bj[B_pos]; - Cx[nnz] = result; - nnz++; - } - B_pos++; - } - - Cp[i+1] = nnz; - } -} - - -/* - * Compute C = A (binary_op) B for CSR matrices A,B where the column - * indices with the rows of A and B are known to be sorted. - * - * binary_op(x,y) - binary operator to apply elementwise - * - * Input Arguments: - * I n_row - number of rows in A (and B) - * I n_col - number of columns in A (and B) - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * I Bp[n_row+1] - row pointer - * I Bj[nnz(B)] - column indices - * T Bx[nnz(B)] - nonzeros - * Output Arguments: - * I Cp[n_row+1] - row pointer - * I Cj[nnz(C)] - column indices - * T Cx[nnz(C)] - nonzeros - * - * Note: - * Output arrays Cp, Cj, and Cx must be preallocated - * If nnz(C) is not known a priori, a conservative bound is: - * nnz(C) <= nnz(A) + nnz(B) - * - * Note: - * Input: A and B column indices are not assumed to be in sorted order. - * Output: C column indices will be in sorted if both A and B have sorted indices. - * Cx will not contain any zero entries - * - */ -template -void csr_binop_csr(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - const I Bp[], - const I Bj[], - const T Bx[], - I Cp[], - I Cj[], - T Cx[], - const binary_op& op) -{ - if (csr_has_canonical_format(n_row,Ap,Aj) && csr_has_canonical_format(n_row,Bp,Bj)) - csr_binop_csr_canonical(n_row, n_col, Ap, Aj, Ax, Bp, Bj, Bx, Cp, Cj, Cx, op); - else - csr_binop_csr_general(n_row, n_col, Ap, Aj, Ax, Bp, Bj, Bx, Cp, Cj, Cx, op); -} - - - -/* element-wise binary operations*/ -template -void csr_elmul_csr(const I n_row, const I n_col, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::multiplies()); -} - -template -void csr_eldiv_csr(const I n_row, const I n_col, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::divides()); -} - - -template -void csr_plus_csr(const I n_row, const I n_col, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::plus()); -} - -template -void csr_minus_csr(const I n_row, const I n_col, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) -{ - csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::minus()); -} - - - -/* - * Sum together duplicate column entries in each row of CSR matrix A - * - * - * Input Arguments: - * I n_row - number of rows in A (and B) - * I n_col - number of columns in A (and B) - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * - * Note: - * The column indicies within each row must be in sorted order. - * Explicit zeros are retained. - * Ap, Aj, and Ax will be modified *inplace* - * - */ -template -void csr_sum_duplicates(const I n_row, - const I n_col, - I Ap[], - I Aj[], - T Ax[]) -{ - I nnz = 0; - I row_end = 0; - for(I i = 0; i < n_row; i++){ - I jj = row_end; - row_end = Ap[i+1]; - while( jj < row_end ){ - I j = Aj[jj]; - T x = Ax[jj]; - jj++; - while( jj < row_end && Aj[jj] == j ){ - x += Ax[jj]; - jj++; - } - Aj[nnz] = j; - Ax[nnz] = x; - nnz++; - } - Ap[i+1] = nnz; - } -} - -/* - * Eliminate zero entries from CSR matrix A - * - * - * Input Arguments: - * I n_row - number of rows in A (and B) - * I n_col - number of columns in A (and B) - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * - * Note: - * Ap, Aj, and Ax will be modified *inplace* - * - */ -template -void csr_eliminate_zeros(const I n_row, - const I n_col, - I Ap[], - I Aj[], - T Ax[]) -{ - I nnz = 0; - I row_end = 0; - for(I i = 0; i < n_row; i++){ - I jj = row_end; - row_end = Ap[i+1]; - while( jj < row_end ){ - I j = Aj[jj]; - T x = Ax[jj]; - if(x != 0){ - Aj[nnz] = j; - Ax[nnz] = x; - nnz++; - } - jj++; - } - Ap[i+1] = nnz; - } -} - - - -/* - * Compute Y += A*X for CSR matrix A and dense vectors X,Y - * - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * T Xx[n_col] - input vector - * - * Output Arguments: - * T Yx[n_row] - output vector - * - * Note: - * Output array Yx must be preallocated - * - * Complexity: Linear. Specifically O(nnz(A) + n_row) - * - */ -template -void csr_matvec(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - const T Xx[], - T Yx[]) -{ - for(I i = 0; i < n_row; i++){ - T sum = Yx[i]; - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - sum += Ax[jj] * Xx[Aj[jj]]; - } - Yx[i] = sum; - } -} - - -/* - * Compute Y += A*X for CSR matrix A and dense block vectors X,Y - * - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I n_vecs - number of column vectors in X and Y - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * T Xx[n_col,n_vecs] - input vector - * - * Output Arguments: - * T Yx[n_row,n_vecs] - output vector - * - */ -template -void csr_matvecs(const I n_row, - const I n_col, - const I n_vecs, - const I Ap[], - const I Aj[], - const T Ax[], - const T Xx[], - T Yx[]) -{ - for(I i = 0; i < n_row; i++){ - T * y = Yx + n_vecs * i; - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - const I j = Aj[jj]; - const T a = Ax[jj]; - const T * x = Xx + n_vecs * j; - axpy(n_vecs, a, x, y); - } - } -} - - - - -template -void get_csr_submatrix(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - const I ir0, - const I ir1, - const I ic0, - const I ic1, - std::vector* Bp, - std::vector* Bj, - std::vector* Bx) -{ - I new_n_row = ir1 - ir0; - //I new_n_col = ic1 - ic0; //currently unused - I new_nnz = 0; - I kk = 0; - - // Count nonzeros total/per row. - for(I i = 0; i < new_n_row; i++){ - I row_start = Ap[ir0+i]; - I row_end = Ap[ir0+i+1]; - - for(I jj = row_start; jj < row_end; jj++){ - if ((Aj[jj] >= ic0) && (Aj[jj] < ic1)) { - new_nnz++; - } - } - } - - // Allocate. - Bp->resize(new_n_row+1); - Bj->resize(new_nnz); - Bx->resize(new_nnz); - - // Assign. - (*Bp)[0] = 0; - for(I i = 0; i < new_n_row; i++){ - I row_start = Ap[ir0+i]; - I row_end = Ap[ir0+i+1]; - - for(I jj = row_start; jj < row_end; jj++){ - if ((Aj[jj] >= ic0) && (Aj[jj] < ic1)) { - (*Bj)[kk] = Aj[jj] - ic0; - (*Bx)[kk] = Ax[jj]; - kk++; - } - } - (*Bp)[i+1] = kk; - } -} - - -/* - * Count the number of occupied diagonals in CSR matrix A - * - * Input Arguments: - * I nnz - number of nonzeros in A - * I Ai[nnz(A)] - row indices - * I Aj[nnz(A)] - column indices - * - */ -template -I csr_count_diagonals(const I n_row, - const I Ap[], - const I Aj[]) -{ - std::set diagonals; - - for(I i = 0; i < n_row; i++){ - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - diagonals.insert(Aj[jj] - i); - } - } - return diagonals.size(); -} - - -/* - * Sample the matrix at specific locations - * - * Determine the matrix value for each row,col pair - * Bx[n] = A(Bi[n],Bj[n]) - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * I n_samples - number of samples - * I Bi[N] - sample rows - * I Bj[N] - sample columns - * - * Output Arguments: - * T Bx[N] - sample values - * - * Note: - * Output array Yx must be preallocated - * - * Complexity: varies - * - * TODO handle other cases with asymptotically optimal method - * - */ -template -void csr_sample_values(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - const I n_samples, - const I Bi[], - const I Bj[], - T Bx[]) -{ - // ideally we'd do the following - // Case 1: A is canonical and B is sorted by row and column - // -> special purpose csr_binop_csr() (optimized form) - // Case 2: A is canonical and B is unsorted and max(log(Ap[i+1] - Ap[i])) > log(num_samples) - // -> do binary searches for each sample - // Case 3: A is canonical and B is unsorted and max(log(Ap[i+1] - Ap[i])) < log(num_samples) - // -> sort B by row and column and use Case 1 - // Case 4: A is not canonical and num_samples ~ nnz - // -> special purpose csr_binop_csr() (general form) - // Case 5: A is not canonical and num_samples << nnz - // -> do linear searches for each sample - - const I nnz = Ap[n_row]; - - const I threshold = nnz / 10; // constant is arbitrary - - if (n_samples > threshold && csr_has_canonical_format(n_row, Ap, Aj)) - { - for(I n = 0; n < n_samples; n++) - { - const I i = Bi[n] < 0 ? Bi[n] + n_row : Bi[n]; // sample row - const I j = Bj[n] < 0 ? Bj[n] + n_col : Bj[n]; // sample column - - const I row_start = Ap[i]; - const I row_end = Ap[i+1]; - - if (row_start < row_end) - { - const I offset = std::lower_bound(Aj + row_start, Aj + row_end, j) - Aj; - - if (offset < row_end && Aj[offset] == j) - Bx[n] = Ax[offset]; - else - Bx[n] = 0; - } - else - { - Bx[n] = 0; - } - - } - } - else - { - for(I n = 0; n < n_samples; n++) - { - const I i = Bi[n] < 0 ? Bi[n] + n_row : Bi[n]; // sample row - const I j = Bj[n] < 0 ? Bj[n] + n_col : Bj[n]; // sample column - - const I row_start = Ap[i]; - const I row_end = Ap[i+1]; - - T x = 0; - - for(I jj = row_start; jj < row_end; jj++) - { - if (Aj[jj] == j) - x += Ax[jj]; - } - - Bx[n] = x; - } - - } -} - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csr.py b/scipy-0.10.1/scipy/sparse/sparsetools/csr.py deleted file mode 100644 index c7248d93ef..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csr.py +++ /dev/null @@ -1,651 +0,0 @@ -# This file was automatically generated by SWIG (http://www.swig.org). -# Version 2.0.1+capsulehack -# -# Do not make changes to this file unless you know what you are doing--modify -# the SWIG interface file instead. -# This file is compatible with both classic and new-style classes. - -from sys import version_info -if version_info >= (2,6,0): - def swig_import_helper(): - from os.path import dirname - import imp - fp = None - try: - fp, pathname, description = imp.find_module('_csr', [dirname(__file__)]) - except ImportError: - import _csr - return _csr - if fp is not None: - try: - _mod = imp.load_module('_csr', fp, pathname, description) - finally: - fp.close() - return _mod - _csr = swig_import_helper() - del swig_import_helper -else: - import _csr -del version_info -try: - _swig_property = property -except NameError: - pass # Python < 2.2 doesn't have 'property'. -def _swig_setattr_nondynamic(self,class_type,name,value,static=1): - if (name == "thisown"): return self.this.own(value) - if (name == "this"): - if type(value).__name__ == 'SwigPyObject': - self.__dict__[name] = value - return - method = class_type.__swig_setmethods__.get(name,None) - if method: return method(self,value) - if (not static) or hasattr(self,name): - self.__dict__[name] = value - else: - raise AttributeError("You cannot add attributes to %s" % self) - -def _swig_setattr(self,class_type,name,value): - return _swig_setattr_nondynamic(self,class_type,name,value,0) - -def _swig_getattr(self,class_type,name): - if (name == "thisown"): return self.this.own() - method = class_type.__swig_getmethods__.get(name,None) - if method: return method(self) - raise AttributeError(name) - -def _swig_repr(self): - try: strthis = "proxy of " + self.this.__repr__() - except: strthis = "" - return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) - -try: - _object = object - _newclass = 1 -except AttributeError: - class _object : pass - _newclass = 0 - - - -def expandptr(*args): - """expandptr(int n_row, int Ap, int Bi)""" - return _csr.expandptr(*args) - -def csr_matmat_pass1(*args): - """ - csr_matmat_pass1(int n_row, int n_col, int Ap, int Aj, int Bp, int Bj, - int Cp) - """ - return _csr.csr_matmat_pass1(*args) - -def csr_count_blocks(*args): - """csr_count_blocks(int n_row, int n_col, int R, int C, int Ap, int Aj) -> int""" - return _csr.csr_count_blocks(*args) - -def csr_has_sorted_indices(*args): - """csr_has_sorted_indices(int n_row, int Ap, int Aj) -> bool""" - return _csr.csr_has_sorted_indices(*args) - - -def csr_diagonal(*args): - """ - csr_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax, - signed char Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - unsigned char Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - unsigned short Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - unsigned int Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax, - long long Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - unsigned long long Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax, - long double Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Yx) - csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Yx) - """ - return _csr.csr_diagonal(*args) - -def csr_scale_rows(*args): - """ - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, signed char Ax, - signed char Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - unsigned char Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - unsigned short Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - unsigned int Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, long long Ax, - long long Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - unsigned long long Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, long double Ax, - long double Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Xx) - csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Xx) - """ - return _csr.csr_scale_rows(*args) - -def csr_scale_columns(*args): - """ - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, signed char Ax, - signed char Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - unsigned char Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - unsigned short Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - unsigned int Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, long long Ax, - long long Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - unsigned long long Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, long double Ax, - long double Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Xx) - csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Xx) - """ - return _csr.csr_scale_columns(*args) - -def csr_tocsc(*args): - """ - csr_tocsc(int n_row, int n_col, int Ap, int Aj, signed char Ax, - int Bp, int Bi, signed char Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - int Bp, int Bi, unsigned char Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, - int Bi, short Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - int Bp, int Bi, unsigned short Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, - int Bi, int Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - int Bp, int Bi, unsigned int Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, long long Ax, - int Bp, int Bi, long long Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - int Bp, int Bi, unsigned long long Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, - int Bi, float Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, - int Bi, double Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, long double Ax, - int Bp, int Bi, long double Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - int Bp, int Bi, npy_cfloat_wrapper Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - int Bp, int Bi, npy_cdouble_wrapper Bx) - csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - int Bp, int Bi, npy_clongdouble_wrapper Bx) - """ - return _csr.csr_tocsc(*args) - -def csr_tobsr(*args): - """ - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - signed char Ax, int Bp, int Bj, signed char Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned char Ax, int Bp, int Bj, unsigned char Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - short Ax, int Bp, int Bj, short Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned short Ax, int Bp, int Bj, unsigned short Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - int Ax, int Bp, int Bj, int Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned int Ax, int Bp, int Bj, unsigned int Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long long Ax, int Bp, int Bj, long long Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - unsigned long long Ax, int Bp, int Bj, unsigned long long Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - float Ax, int Bp, int Bj, float Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - double Ax, int Bp, int Bj, double Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - long double Ax, int Bp, int Bj, long double Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx) - csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, - npy_clongdouble_wrapper Ax, int Bp, int Bj, - npy_clongdouble_wrapper Bx) - """ - return _csr.csr_tobsr(*args) - -def csr_matmat_pass2(*args): - """ - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, signed char Ax, - int Bp, int Bj, signed char Bx, int Cp, int Cj, - signed char Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - int Bp, int Bj, unsigned char Bx, int Cp, - int Cj, unsigned char Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, - int Bj, short Bx, int Cp, int Cj, short Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - int Bp, int Bj, unsigned short Bx, int Cp, - int Cj, unsigned short Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, - int Bj, int Bx, int Cp, int Cj, int Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - int Bp, int Bj, unsigned int Bx, int Cp, - int Cj, unsigned int Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, long long Ax, - int Bp, int Bj, long long Bx, int Cp, int Cj, - long long Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, - int Bj, float Bx, int Cp, int Cj, float Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, - int Bj, double Bx, int Cp, int Cj, double Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, long double Ax, - int Bp, int Bj, long double Bx, int Cp, int Cj, - long double Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - int Bp, int Bj, npy_clongdouble_wrapper Bx, - int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _csr.csr_matmat_pass2(*args) - -def csr_matvec(*args): - """ - csr_matvec(int n_row, int n_col, int Ap, int Aj, signed char Ax, - signed char Xx, signed char Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - unsigned char Xx, unsigned char Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx, - short Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - unsigned short Xx, unsigned short Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx, - int Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - unsigned int Xx, unsigned int Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, long long Ax, - long long Xx, long long Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - unsigned long long Xx, unsigned long long Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx, - float Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx, - double Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, long double Ax, - long double Xx, long double Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx) - csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx) - """ - return _csr.csr_matvec(*args) - -def csr_matvecs(*args): - """ - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, signed char Ax, - signed char Xx, signed char Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, unsigned char Ax, - unsigned char Xx, unsigned char Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, short Ax, - short Xx, short Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, unsigned short Ax, - unsigned short Xx, unsigned short Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, int Ax, - int Xx, int Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, unsigned int Ax, - unsigned int Xx, unsigned int Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, long long Ax, - long long Xx, long long Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, unsigned long long Ax, - unsigned long long Xx, - unsigned long long Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, float Ax, - float Xx, float Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, double Ax, - double Xx, double Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, long double Ax, - long double Xx, long double Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, npy_cfloat_wrapper Ax, - npy_cfloat_wrapper Xx, - npy_cfloat_wrapper Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, npy_cdouble_wrapper Ax, - npy_cdouble_wrapper Xx, - npy_cdouble_wrapper Yx) - csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, npy_clongdouble_wrapper Ax, - npy_clongdouble_wrapper Xx, - npy_clongdouble_wrapper Yx) - """ - return _csr.csr_matvecs(*args) - -def csr_elmul_csr(*args): - """ - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax, - int Bp, int Bj, signed char Bx, int Cp, int Cj, - signed char Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - int Bp, int Bj, unsigned char Bx, int Cp, - int Cj, unsigned char Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, - int Bj, short Bx, int Cp, int Cj, short Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - int Bp, int Bj, unsigned short Bx, int Cp, - int Cj, unsigned short Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, - int Bj, int Bx, int Cp, int Cj, int Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - int Bp, int Bj, unsigned int Bx, int Cp, - int Cj, unsigned int Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long long Ax, - int Bp, int Bj, long long Bx, int Cp, int Cj, - long long Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, - int Bj, float Bx, int Cp, int Cj, float Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, - int Bj, double Bx, int Cp, int Cj, double Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long double Ax, - int Bp, int Bj, long double Bx, int Cp, int Cj, - long double Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - int Bp, int Bj, npy_clongdouble_wrapper Bx, - int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _csr.csr_elmul_csr(*args) - -def csr_eldiv_csr(*args): - """ - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax, - int Bp, int Bj, signed char Bx, int Cp, int Cj, - signed char Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - int Bp, int Bj, unsigned char Bx, int Cp, - int Cj, unsigned char Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, - int Bj, short Bx, int Cp, int Cj, short Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - int Bp, int Bj, unsigned short Bx, int Cp, - int Cj, unsigned short Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, - int Bj, int Bx, int Cp, int Cj, int Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - int Bp, int Bj, unsigned int Bx, int Cp, - int Cj, unsigned int Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long long Ax, - int Bp, int Bj, long long Bx, int Cp, int Cj, - long long Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, - int Bj, float Bx, int Cp, int Cj, float Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, - int Bj, double Bx, int Cp, int Cj, double Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long double Ax, - int Bp, int Bj, long double Bx, int Cp, int Cj, - long double Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - int Bp, int Bj, npy_clongdouble_wrapper Bx, - int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _csr.csr_eldiv_csr(*args) - -def csr_plus_csr(*args): - """ - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax, - int Bp, int Bj, signed char Bx, int Cp, int Cj, - signed char Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - int Bp, int Bj, unsigned char Bx, int Cp, - int Cj, unsigned char Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, - int Bj, short Bx, int Cp, int Cj, short Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - int Bp, int Bj, unsigned short Bx, int Cp, - int Cj, unsigned short Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, - int Bj, int Bx, int Cp, int Cj, int Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - int Bp, int Bj, unsigned int Bx, int Cp, - int Cj, unsigned int Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long long Ax, - int Bp, int Bj, long long Bx, int Cp, int Cj, - long long Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, - int Bj, float Bx, int Cp, int Cj, float Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, - int Bj, double Bx, int Cp, int Cj, double Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long double Ax, - int Bp, int Bj, long double Bx, int Cp, int Cj, - long double Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - int Bp, int Bj, npy_clongdouble_wrapper Bx, - int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _csr.csr_plus_csr(*args) - -def csr_minus_csr(*args): - """ - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax, - int Bp, int Bj, signed char Bx, int Cp, int Cj, - signed char Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - int Bp, int Bj, unsigned char Bx, int Cp, - int Cj, unsigned char Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, - int Bj, short Bx, int Cp, int Cj, short Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - int Bp, int Bj, unsigned short Bx, int Cp, - int Cj, unsigned short Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, - int Bj, int Bx, int Cp, int Cj, int Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - int Bp, int Bj, unsigned int Bx, int Cp, - int Cj, unsigned int Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long long Ax, - int Bp, int Bj, long long Bx, int Cp, int Cj, - long long Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - int Bp, int Bj, unsigned long long Bx, - int Cp, int Cj, unsigned long long Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, - int Bj, float Bx, int Cp, int Cj, float Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, - int Bj, double Bx, int Cp, int Cj, double Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long double Ax, - int Bp, int Bj, long double Bx, int Cp, int Cj, - long double Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - int Bp, int Bj, npy_cfloat_wrapper Bx, - int Cp, int Cj, npy_cfloat_wrapper Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - int Bp, int Bj, npy_cdouble_wrapper Bx, - int Cp, int Cj, npy_cdouble_wrapper Cx) - csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - int Bp, int Bj, npy_clongdouble_wrapper Bx, - int Cp, int Cj, npy_clongdouble_wrapper Cx) - """ - return _csr.csr_minus_csr(*args) - -def csr_sort_indices(*args): - """ - csr_sort_indices(int n_row, int Ap, int Aj, signed char Ax) - csr_sort_indices(int n_row, int Ap, int Aj, unsigned char Ax) - csr_sort_indices(int n_row, int Ap, int Aj, short Ax) - csr_sort_indices(int n_row, int Ap, int Aj, unsigned short Ax) - csr_sort_indices(int n_row, int Ap, int Aj, int Ax) - csr_sort_indices(int n_row, int Ap, int Aj, unsigned int Ax) - csr_sort_indices(int n_row, int Ap, int Aj, long long Ax) - csr_sort_indices(int n_row, int Ap, int Aj, unsigned long long Ax) - csr_sort_indices(int n_row, int Ap, int Aj, float Ax) - csr_sort_indices(int n_row, int Ap, int Aj, double Ax) - csr_sort_indices(int n_row, int Ap, int Aj, long double Ax) - csr_sort_indices(int n_row, int Ap, int Aj, npy_cfloat_wrapper Ax) - csr_sort_indices(int n_row, int Ap, int Aj, npy_cdouble_wrapper Ax) - csr_sort_indices(int n_row, int Ap, int Aj, npy_clongdouble_wrapper Ax) - """ - return _csr.csr_sort_indices(*args) - -def csr_eliminate_zeros(*args): - """ - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, signed char Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned char Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, short Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned short Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, int Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned int Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, long long Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, float Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, double Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, long double Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax) - csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax) - """ - return _csr.csr_eliminate_zeros(*args) - -def csr_sum_duplicates(*args): - """ - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, signed char Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned char Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, short Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned short Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, int Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned int Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, long long Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, float Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, double Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, long double Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax) - csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax) - """ - return _csr.csr_sum_duplicates(*args) - -def get_csr_submatrix(*args): - """ - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, signed char Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, short Ax, int ir0, - int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, int Ax, int ir0, - int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long long Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, float Ax, int ir0, - int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, double Ax, int ir0, - int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long double Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - int ir0, int ir1, int ic0, int ic1) - get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - int ir0, int ir1, int ic0, int ic1) - """ - return _csr.get_csr_submatrix(*args) - -def csr_sample_values(*args): - """ - csr_sample_values(int n_row, int n_col, int Ap, int Aj, signed char Ax, - int n_samples, int Bi, int Bj, signed char Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, - int n_samples, int Bi, int Bj, unsigned char Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, short Ax, int n_samples, - int Bi, int Bj, short Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, - int n_samples, int Bi, int Bj, unsigned short Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, int Ax, int n_samples, - int Bi, int Bj, int Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, - int n_samples, int Bi, int Bj, unsigned int Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, long long Ax, - int n_samples, int Bi, int Bj, long long Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, - int n_samples, int Bi, int Bj, unsigned long long Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, float Ax, int n_samples, - int Bi, int Bj, float Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, double Ax, int n_samples, - int Bi, int Bj, double Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, long double Ax, - int n_samples, int Bi, int Bj, long double Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, - int n_samples, int Bi, int Bj, npy_cfloat_wrapper Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, - int n_samples, int Bi, int Bj, npy_cdouble_wrapper Bx) - csr_sample_values(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, - int n_samples, int Bi, int Bj, - npy_clongdouble_wrapper Bx) - """ - return _csr.csr_sample_values(*args) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/csr_wrap.cxx.REMOVED.git-id b/scipy-0.10.1/scipy/sparse/sparsetools/csr_wrap.cxx.REMOVED.git-id deleted file mode 100644 index 2edbdcde84..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/csr_wrap.cxx.REMOVED.git-id +++ /dev/null @@ -1 +0,0 @@ -476d70df6664b55043416fab38254781e9c1f69e \ No newline at end of file diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/dense.h b/scipy-0.10.1/scipy/sparse/sparsetools/dense.h deleted file mode 100644 index 5c1191ae56..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/dense.h +++ /dev/null @@ -1,83 +0,0 @@ -#ifndef __DENSE_H__ -#define __DENSE_H__ - -// Simplified BLAS routines and other dense linear algebra functions - -/* - * Level 1 - */ - -// y += a*x -template -void axpy(const I n, const T a, const T * x, T * y){ - for(I i = 0; i < n; i++){ - y[i] += a * x[i]; - } -} - -// scale a vector in-place -template -void scal(const I n, const T a, T * x){ - for(I i = 0; i < n; i++){ - x[i] *= a; - } -} - - -// dot product -template -void dot(const I n, const T * x, const T * y){ - T dp = 0; - for(I i = 0; i < n; i++){ - dp += x[i] * y[i]; - } - return dp; -} - - -// vectorize a binary operation -template -void vector_binop(const I n, const T * x, const T * y, T * z, - const binary_operator& op) -{ - for(I i = 0; i < n; i++){ - z[i] = op(x[i],y[i]); - } -} - -//template -//void vector_multiply(const I n, const T * x, const T * y, T * z){ -//{ -// vector_binop(n,x,y,z, std::multiplies() ); -//} - - - -// Level 2 -template -void gemv(const I m, const I n, const T * A, const T * x, T * y){ - for(I i = 0; i < m; i++){ - T dot = y[i]; - for(I j = 0; j < n; j++){ - dot += A[n * i + j] * x[j]; - } - y[i] = dot; - } -} - -// Level 3 -template -void gemm(const I m, const I n, const I k, const T * A, const T * B, T * C){ - for(I i = 0; i < m; i++){ - for(I j = 0; j < n; j++){ - T dot = C[n * i + j]; - for(I _d = 0; _d < k; _d++){ - dot += A[k * i + _d] * B[n * _d + j]; - } - C[n * i + j] = dot; - } - } -} - - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/dia.h b/scipy-0.10.1/scipy/sparse/sparsetools/dia.h deleted file mode 100644 index 6d9cd20702..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/dia.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef __DIA_H__ -#define __DIA_H__ - -#include - - -/* - * Compute Y += A*X for DIA matrix A and dense vectors X,Y - * - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I n_diags - number of diagonals - * I L - length of each diagonal - * I offsets[n_diags] - diagonal offsets - * T diags[n_diags,L] - nonzeros - * T Xx[n_col] - input vector - * - * Output Arguments: - * T Yx[n_row] - output vector - * - * Note: - * Output array Yx must be preallocated - * Negative offsets correspond to lower diagonals - * Positive offsets correspond to upper diagonals - * - */ -template -void dia_matvec(const I n_row, - const I n_col, - const I n_diags, - const I L, - const I offsets[], - const T diags[], - const T Xx[], - T Yx[]) -{ - for(I i = 0; i < n_diags; i++){ - const I k = offsets[i]; //diagonal offset - - const I i_start = std::max(0,-k); - const I j_start = std::max(0, k); - const I j_end = std::min(std::min(n_row + k, n_col),L); - - const I N = j_end - j_start; //number of elements to process - - const T * diag = diags + i*L + j_start; - const T * x = Xx + j_start; - T * y = Yx + i_start; - - for(I n = 0; n < N; n++){ - y[n] += diag[n] * x[n]; - } - } -} - - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/dia.py b/scipy-0.10.1/scipy/sparse/sparsetools/dia.py deleted file mode 100644 index 896dcecb04..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/dia.py +++ /dev/null @@ -1,105 +0,0 @@ -# This file was automatically generated by SWIG (http://www.swig.org). -# Version 2.0.1+capsulehack -# -# Do not make changes to this file unless you know what you are doing--modify -# the SWIG interface file instead. -# This file is compatible with both classic and new-style classes. - -from sys import version_info -if version_info >= (2,6,0): - def swig_import_helper(): - from os.path import dirname - import imp - fp = None - try: - fp, pathname, description = imp.find_module('_dia', [dirname(__file__)]) - except ImportError: - import _dia - return _dia - if fp is not None: - try: - _mod = imp.load_module('_dia', fp, pathname, description) - finally: - fp.close() - return _mod - _dia = swig_import_helper() - del swig_import_helper -else: - import _dia -del version_info -try: - _swig_property = property -except NameError: - pass # Python < 2.2 doesn't have 'property'. -def _swig_setattr_nondynamic(self,class_type,name,value,static=1): - if (name == "thisown"): return self.this.own(value) - if (name == "this"): - if type(value).__name__ == 'SwigPyObject': - self.__dict__[name] = value - return - method = class_type.__swig_setmethods__.get(name,None) - if method: return method(self,value) - if (not static) or hasattr(self,name): - self.__dict__[name] = value - else: - raise AttributeError("You cannot add attributes to %s" % self) - -def _swig_setattr(self,class_type,name,value): - return _swig_setattr_nondynamic(self,class_type,name,value,0) - -def _swig_getattr(self,class_type,name): - if (name == "thisown"): return self.this.own() - method = class_type.__swig_getmethods__.get(name,None) - if method: return method(self) - raise AttributeError(name) - -def _swig_repr(self): - try: strthis = "proxy of " + self.this.__repr__() - except: strthis = "" - return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) - -try: - _object = object - _newclass = 1 -except AttributeError: - class _object : pass - _newclass = 0 - - -def dia_matvec(*args): - """ - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - signed char diags, signed char Xx, signed char Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - unsigned char diags, unsigned char Xx, unsigned char Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - short diags, short Xx, short Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - unsigned short diags, unsigned short Xx, - unsigned short Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - int diags, int Xx, int Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - unsigned int diags, unsigned int Xx, unsigned int Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - long long diags, long long Xx, long long Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - unsigned long long diags, unsigned long long Xx, - unsigned long long Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - float diags, float Xx, float Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - double diags, double Xx, double Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - long double diags, long double Xx, long double Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - npy_cfloat_wrapper diags, npy_cfloat_wrapper Xx, - npy_cfloat_wrapper Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - npy_cdouble_wrapper diags, npy_cdouble_wrapper Xx, - npy_cdouble_wrapper Yx) - dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, - npy_clongdouble_wrapper diags, npy_clongdouble_wrapper Xx, - npy_clongdouble_wrapper Yx) - """ - return _dia.dia_matvec(*args) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/dia_wrap.cxx b/scipy-0.10.1/scipy/sparse/sparsetools/dia_wrap.cxx deleted file mode 100644 index 8e347bd007..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/dia_wrap.cxx +++ /dev/null @@ -1,6464 +0,0 @@ -/* ---------------------------------------------------------------------------- - * This file was automatically generated by SWIG (http://www.swig.org). - * Version 2.0.1+capsulehack - * - * This file is not intended to be easily readable and contains a number of - * coding conventions designed to improve portability and efficiency. Do not make - * changes to this file unless you know what you are doing--modify the SWIG - * interface file instead. - * ----------------------------------------------------------------------------- */ - -#define SWIGPYTHON -#define SWIG_PYTHON_DIRECTOR_NO_VTABLE - - -#ifdef __cplusplus -/* SwigValueWrapper is described in swig.swg */ -template class SwigValueWrapper { - struct SwigMovePointer { - T *ptr; - SwigMovePointer(T *p) : ptr(p) { } - ~SwigMovePointer() { delete ptr; } - SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } - } pointer; - SwigValueWrapper& operator=(const SwigValueWrapper& rhs); - SwigValueWrapper(const SwigValueWrapper& rhs); -public: - SwigValueWrapper() : pointer(0) { } - SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } - operator T&() const { return *pointer.ptr; } - T *operator&() { return pointer.ptr; } -}; - -template T SwigValueInit() { - return T(); -} -#endif - -/* ----------------------------------------------------------------------------- - * This section contains generic SWIG labels for method/variable - * declarations/attributes, and other compiler dependent labels. - * ----------------------------------------------------------------------------- */ - -/* template workaround for compilers that cannot correctly implement the C++ standard */ -#ifndef SWIGTEMPLATEDISAMBIGUATOR -# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) -# define SWIGTEMPLATEDISAMBIGUATOR template -# elif defined(__HP_aCC) -/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ -/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ -# define SWIGTEMPLATEDISAMBIGUATOR template -# else -# define SWIGTEMPLATEDISAMBIGUATOR -# endif -#endif - -/* inline attribute */ -#ifndef SWIGINLINE -# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) -# define SWIGINLINE inline -# else -# define SWIGINLINE -# endif -#endif - -/* attribute recognised by some compilers to avoid 'unused' warnings */ -#ifndef SWIGUNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -# elif defined(__ICC) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -#endif - -#ifndef SWIG_MSC_UNSUPPRESS_4505 -# if defined(_MSC_VER) -# pragma warning(disable : 4505) /* unreferenced local function has been removed */ -# endif -#endif - -#ifndef SWIGUNUSEDPARM -# ifdef __cplusplus -# define SWIGUNUSEDPARM(p) -# else -# define SWIGUNUSEDPARM(p) p SWIGUNUSED -# endif -#endif - -/* internal SWIG method */ -#ifndef SWIGINTERN -# define SWIGINTERN static SWIGUNUSED -#endif - -/* internal inline SWIG method */ -#ifndef SWIGINTERNINLINE -# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE -#endif - -/* exporting methods */ -#if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) -# ifndef GCC_HASCLASSVISIBILITY -# define GCC_HASCLASSVISIBILITY -# endif -#endif - -#ifndef SWIGEXPORT -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# if defined(STATIC_LINKED) -# define SWIGEXPORT -# else -# define SWIGEXPORT __declspec(dllexport) -# endif -# else -# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) -# define SWIGEXPORT __attribute__ ((visibility("default"))) -# else -# define SWIGEXPORT -# endif -# endif -#endif - -/* calling conventions for Windows */ -#ifndef SWIGSTDCALL -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# define SWIGSTDCALL __stdcall -# else -# define SWIGSTDCALL -# endif -#endif - -/* Deal with Microsoft's attempt at deprecating C standard runtime functions */ -#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) -# define _CRT_SECURE_NO_DEPRECATE -#endif - -/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ -#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) -# define _SCL_SECURE_NO_DEPRECATE -#endif - - - -/* Python.h has to appear first */ -#include - -/* ----------------------------------------------------------------------------- - * swigrun.swg - * - * This file contains generic C API SWIG runtime support for pointer - * type checking. - * ----------------------------------------------------------------------------- */ - -/* This should only be incremented when either the layout of swig_type_info changes, - or for whatever reason, the runtime changes incompatibly */ -#define SWIG_RUNTIME_VERSION "4" - -/* define SWIG_TYPE_TABLE_NAME as "SWIG_TYPE_TABLE" */ -#ifdef SWIG_TYPE_TABLE -# define SWIG_QUOTE_STRING(x) #x -# define SWIG_EXPAND_AND_QUOTE_STRING(x) SWIG_QUOTE_STRING(x) -# define SWIG_TYPE_TABLE_NAME SWIG_EXPAND_AND_QUOTE_STRING(SWIG_TYPE_TABLE) -#else -# define SWIG_TYPE_TABLE_NAME -#endif - -/* - You can use the SWIGRUNTIME and SWIGRUNTIMEINLINE macros for - creating a static or dynamic library from the SWIG runtime code. - In 99.9% of the cases, SWIG just needs to declare them as 'static'. - - But only do this if strictly necessary, ie, if you have problems - with your compiler or suchlike. -*/ - -#ifndef SWIGRUNTIME -# define SWIGRUNTIME SWIGINTERN -#endif - -#ifndef SWIGRUNTIMEINLINE -# define SWIGRUNTIMEINLINE SWIGRUNTIME SWIGINLINE -#endif - -/* Generic buffer size */ -#ifndef SWIG_BUFFER_SIZE -# define SWIG_BUFFER_SIZE 1024 -#endif - -/* Flags for pointer conversions */ -#define SWIG_POINTER_DISOWN 0x1 -#define SWIG_CAST_NEW_MEMORY 0x2 - -/* Flags for new pointer objects */ -#define SWIG_POINTER_OWN 0x1 - - -/* - Flags/methods for returning states. - - The SWIG conversion methods, as ConvertPtr, return an integer - that tells if the conversion was successful or not. And if not, - an error code can be returned (see swigerrors.swg for the codes). - - Use the following macros/flags to set or process the returning - states. - - In old versions of SWIG, code such as the following was usually written: - - if (SWIG_ConvertPtr(obj,vptr,ty.flags) != -1) { - // success code - } else { - //fail code - } - - Now you can be more explicit: - - int res = SWIG_ConvertPtr(obj,vptr,ty.flags); - if (SWIG_IsOK(res)) { - // success code - } else { - // fail code - } - - which is the same really, but now you can also do - - Type *ptr; - int res = SWIG_ConvertPtr(obj,(void **)(&ptr),ty.flags); - if (SWIG_IsOK(res)) { - // success code - if (SWIG_IsNewObj(res) { - ... - delete *ptr; - } else { - ... - } - } else { - // fail code - } - - I.e., now SWIG_ConvertPtr can return new objects and you can - identify the case and take care of the deallocation. Of course that - also requires SWIG_ConvertPtr to return new result values, such as - - int SWIG_ConvertPtr(obj, ptr,...) { - if () { - if () { - *ptr = ; - return SWIG_NEWOBJ; - } else { - *ptr = ; - return SWIG_OLDOBJ; - } - } else { - return SWIG_BADOBJ; - } - } - - Of course, returning the plain '0(success)/-1(fail)' still works, but you can be - more explicit by returning SWIG_BADOBJ, SWIG_ERROR or any of the - SWIG errors code. - - Finally, if the SWIG_CASTRANK_MODE is enabled, the result code - allows to return the 'cast rank', for example, if you have this - - int food(double) - int fooi(int); - - and you call - - food(1) // cast rank '1' (1 -> 1.0) - fooi(1) // cast rank '0' - - just use the SWIG_AddCast()/SWIG_CheckState() -*/ - -#define SWIG_OK (0) -#define SWIG_ERROR (-1) -#define SWIG_IsOK(r) (r >= 0) -#define SWIG_ArgError(r) ((r != SWIG_ERROR) ? r : SWIG_TypeError) - -/* The CastRankLimit says how many bits are used for the cast rank */ -#define SWIG_CASTRANKLIMIT (1 << 8) -/* The NewMask denotes the object was created (using new/malloc) */ -#define SWIG_NEWOBJMASK (SWIG_CASTRANKLIMIT << 1) -/* The TmpMask is for in/out typemaps that use temporal objects */ -#define SWIG_TMPOBJMASK (SWIG_NEWOBJMASK << 1) -/* Simple returning values */ -#define SWIG_BADOBJ (SWIG_ERROR) -#define SWIG_OLDOBJ (SWIG_OK) -#define SWIG_NEWOBJ (SWIG_OK | SWIG_NEWOBJMASK) -#define SWIG_TMPOBJ (SWIG_OK | SWIG_TMPOBJMASK) -/* Check, add and del mask methods */ -#define SWIG_AddNewMask(r) (SWIG_IsOK(r) ? (r | SWIG_NEWOBJMASK) : r) -#define SWIG_DelNewMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_NEWOBJMASK) : r) -#define SWIG_IsNewObj(r) (SWIG_IsOK(r) && (r & SWIG_NEWOBJMASK)) -#define SWIG_AddTmpMask(r) (SWIG_IsOK(r) ? (r | SWIG_TMPOBJMASK) : r) -#define SWIG_DelTmpMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_TMPOBJMASK) : r) -#define SWIG_IsTmpObj(r) (SWIG_IsOK(r) && (r & SWIG_TMPOBJMASK)) - -/* Cast-Rank Mode */ -#if defined(SWIG_CASTRANK_MODE) -# ifndef SWIG_TypeRank -# define SWIG_TypeRank unsigned long -# endif -# ifndef SWIG_MAXCASTRANK /* Default cast allowed */ -# define SWIG_MAXCASTRANK (2) -# endif -# define SWIG_CASTRANKMASK ((SWIG_CASTRANKLIMIT) -1) -# define SWIG_CastRank(r) (r & SWIG_CASTRANKMASK) -SWIGINTERNINLINE int SWIG_AddCast(int r) { - return SWIG_IsOK(r) ? ((SWIG_CastRank(r) < SWIG_MAXCASTRANK) ? (r + 1) : SWIG_ERROR) : r; -} -SWIGINTERNINLINE int SWIG_CheckState(int r) { - return SWIG_IsOK(r) ? SWIG_CastRank(r) + 1 : 0; -} -#else /* no cast-rank mode */ -# define SWIG_AddCast -# define SWIG_CheckState(r) (SWIG_IsOK(r) ? 1 : 0) -#endif - - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void *(*swig_converter_func)(void *, int *); -typedef struct swig_type_info *(*swig_dycast_func)(void **); - -/* Structure to store information on one type */ -typedef struct swig_type_info { - const char *name; /* mangled name of this type */ - const char *str; /* human readable name of this type */ - swig_dycast_func dcast; /* dynamic cast function down a hierarchy */ - struct swig_cast_info *cast; /* linked list of types that can cast into this type */ - void *clientdata; /* language specific type data */ - int owndata; /* flag if the structure owns the clientdata */ -} swig_type_info; - -/* Structure to store a type and conversion function used for casting */ -typedef struct swig_cast_info { - swig_type_info *type; /* pointer to type that is equivalent to this type */ - swig_converter_func converter; /* function to cast the void pointers */ - struct swig_cast_info *next; /* pointer to next cast in linked list */ - struct swig_cast_info *prev; /* pointer to the previous cast */ -} swig_cast_info; - -/* Structure used to store module information - * Each module generates one structure like this, and the runtime collects - * all of these structures and stores them in a circularly linked list.*/ -typedef struct swig_module_info { - swig_type_info **types; /* Array of pointers to swig_type_info structures that are in this module */ - size_t size; /* Number of types in this module */ - struct swig_module_info *next; /* Pointer to next element in circularly linked list */ - swig_type_info **type_initial; /* Array of initially generated type structures */ - swig_cast_info **cast_initial; /* Array of initially generated casting structures */ - void *clientdata; /* Language specific module data */ -} swig_module_info; - -/* - Compare two type names skipping the space characters, therefore - "char*" == "char *" and "Class" == "Class", etc. - - Return 0 when the two name types are equivalent, as in - strncmp, but skipping ' '. -*/ -SWIGRUNTIME int -SWIG_TypeNameComp(const char *f1, const char *l1, - const char *f2, const char *l2) { - for (;(f1 != l1) && (f2 != l2); ++f1, ++f2) { - while ((*f1 == ' ') && (f1 != l1)) ++f1; - while ((*f2 == ' ') && (f2 != l2)) ++f2; - if (*f1 != *f2) return (*f1 > *f2) ? 1 : -1; - } - return (int)((l1 - f1) - (l2 - f2)); -} - -/* - Check type equivalence in a name list like ||... - Return 0 if not equal, 1 if equal -*/ -SWIGRUNTIME int -SWIG_TypeEquiv(const char *nb, const char *tb) { - int equiv = 0; - const char* te = tb + strlen(tb); - const char* ne = nb; - while (!equiv && *ne) { - for (nb = ne; *ne; ++ne) { - if (*ne == '|') break; - } - equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; - if (*ne) ++ne; - } - return equiv; -} - -/* - Check type equivalence in a name list like ||... - Return 0 if equal, -1 if nb < tb, 1 if nb > tb -*/ -SWIGRUNTIME int -SWIG_TypeCompare(const char *nb, const char *tb) { - int equiv = 0; - const char* te = tb + strlen(tb); - const char* ne = nb; - while (!equiv && *ne) { - for (nb = ne; *ne; ++ne) { - if (*ne == '|') break; - } - equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; - if (*ne) ++ne; - } - return equiv; -} - - -/* - Check the typename -*/ -SWIGRUNTIME swig_cast_info * -SWIG_TypeCheck(const char *c, swig_type_info *ty) { - if (ty) { - swig_cast_info *iter = ty->cast; - while (iter) { - if (strcmp(iter->type->name, c) == 0) { - if (iter == ty->cast) - return iter; - /* Move iter to the top of the linked list */ - iter->prev->next = iter->next; - if (iter->next) - iter->next->prev = iter->prev; - iter->next = ty->cast; - iter->prev = 0; - if (ty->cast) ty->cast->prev = iter; - ty->cast = iter; - return iter; - } - iter = iter->next; - } - } - return 0; -} - -/* - Identical to SWIG_TypeCheck, except strcmp is replaced with a pointer comparison -*/ -SWIGRUNTIME swig_cast_info * -SWIG_TypeCheckStruct(swig_type_info *from, swig_type_info *ty) { - if (ty) { - swig_cast_info *iter = ty->cast; - while (iter) { - if (iter->type == from) { - if (iter == ty->cast) - return iter; - /* Move iter to the top of the linked list */ - iter->prev->next = iter->next; - if (iter->next) - iter->next->prev = iter->prev; - iter->next = ty->cast; - iter->prev = 0; - if (ty->cast) ty->cast->prev = iter; - ty->cast = iter; - return iter; - } - iter = iter->next; - } - } - return 0; -} - -/* - Cast a pointer up an inheritance hierarchy -*/ -SWIGRUNTIMEINLINE void * -SWIG_TypeCast(swig_cast_info *ty, void *ptr, int *newmemory) { - return ((!ty) || (!ty->converter)) ? ptr : (*ty->converter)(ptr, newmemory); -} - -/* - Dynamic pointer casting. Down an inheritance hierarchy -*/ -SWIGRUNTIME swig_type_info * -SWIG_TypeDynamicCast(swig_type_info *ty, void **ptr) { - swig_type_info *lastty = ty; - if (!ty || !ty->dcast) return ty; - while (ty && (ty->dcast)) { - ty = (*ty->dcast)(ptr); - if (ty) lastty = ty; - } - return lastty; -} - -/* - Return the name associated with this type -*/ -SWIGRUNTIMEINLINE const char * -SWIG_TypeName(const swig_type_info *ty) { - return ty->name; -} - -/* - Return the pretty name associated with this type, - that is an unmangled type name in a form presentable to the user. -*/ -SWIGRUNTIME const char * -SWIG_TypePrettyName(const swig_type_info *type) { - /* The "str" field contains the equivalent pretty names of the - type, separated by vertical-bar characters. We choose - to print the last name, as it is often (?) the most - specific. */ - if (!type) return NULL; - if (type->str != NULL) { - const char *last_name = type->str; - const char *s; - for (s = type->str; *s; s++) - if (*s == '|') last_name = s+1; - return last_name; - } - else - return type->name; -} - -/* - Set the clientdata field for a type -*/ -SWIGRUNTIME void -SWIG_TypeClientData(swig_type_info *ti, void *clientdata) { - swig_cast_info *cast = ti->cast; - /* if (ti->clientdata == clientdata) return; */ - ti->clientdata = clientdata; - - while (cast) { - if (!cast->converter) { - swig_type_info *tc = cast->type; - if (!tc->clientdata) { - SWIG_TypeClientData(tc, clientdata); - } - } - cast = cast->next; - } -} -SWIGRUNTIME void -SWIG_TypeNewClientData(swig_type_info *ti, void *clientdata) { - SWIG_TypeClientData(ti, clientdata); - ti->owndata = 1; -} - -/* - Search for a swig_type_info structure only by mangled name - Search is a O(log #types) - - We start searching at module start, and finish searching when start == end. - Note: if start == end at the beginning of the function, we go all the way around - the circular list. -*/ -SWIGRUNTIME swig_type_info * -SWIG_MangledTypeQueryModule(swig_module_info *start, - swig_module_info *end, - const char *name) { - swig_module_info *iter = start; - do { - if (iter->size) { - register size_t l = 0; - register size_t r = iter->size - 1; - do { - /* since l+r >= 0, we can (>> 1) instead (/ 2) */ - register size_t i = (l + r) >> 1; - const char *iname = iter->types[i]->name; - if (iname) { - register int compare = strcmp(name, iname); - if (compare == 0) { - return iter->types[i]; - } else if (compare < 0) { - if (i) { - r = i - 1; - } else { - break; - } - } else if (compare > 0) { - l = i + 1; - } - } else { - break; /* should never happen */ - } - } while (l <= r); - } - iter = iter->next; - } while (iter != end); - return 0; -} - -/* - Search for a swig_type_info structure for either a mangled name or a human readable name. - It first searches the mangled names of the types, which is a O(log #types) - If a type is not found it then searches the human readable names, which is O(#types). - - We start searching at module start, and finish searching when start == end. - Note: if start == end at the beginning of the function, we go all the way around - the circular list. -*/ -SWIGRUNTIME swig_type_info * -SWIG_TypeQueryModule(swig_module_info *start, - swig_module_info *end, - const char *name) { - /* STEP 1: Search the name field using binary search */ - swig_type_info *ret = SWIG_MangledTypeQueryModule(start, end, name); - if (ret) { - return ret; - } else { - /* STEP 2: If the type hasn't been found, do a complete search - of the str field (the human readable name) */ - swig_module_info *iter = start; - do { - register size_t i = 0; - for (; i < iter->size; ++i) { - if (iter->types[i]->str && (SWIG_TypeEquiv(iter->types[i]->str, name))) - return iter->types[i]; - } - iter = iter->next; - } while (iter != end); - } - - /* neither found a match */ - return 0; -} - -/* - Pack binary data into a string -*/ -SWIGRUNTIME char * -SWIG_PackData(char *c, void *ptr, size_t sz) { - static const char hex[17] = "0123456789abcdef"; - register const unsigned char *u = (unsigned char *) ptr; - register const unsigned char *eu = u + sz; - for (; u != eu; ++u) { - register unsigned char uu = *u; - *(c++) = hex[(uu & 0xf0) >> 4]; - *(c++) = hex[uu & 0xf]; - } - return c; -} - -/* - Unpack binary data from a string -*/ -SWIGRUNTIME const char * -SWIG_UnpackData(const char *c, void *ptr, size_t sz) { - register unsigned char *u = (unsigned char *) ptr; - register const unsigned char *eu = u + sz; - for (; u != eu; ++u) { - register char d = *(c++); - register unsigned char uu; - if ((d >= '0') && (d <= '9')) - uu = ((d - '0') << 4); - else if ((d >= 'a') && (d <= 'f')) - uu = ((d - ('a'-10)) << 4); - else - return (char *) 0; - d = *(c++); - if ((d >= '0') && (d <= '9')) - uu |= (d - '0'); - else if ((d >= 'a') && (d <= 'f')) - uu |= (d - ('a'-10)); - else - return (char *) 0; - *u = uu; - } - return c; -} - -/* - Pack 'void *' into a string buffer. -*/ -SWIGRUNTIME char * -SWIG_PackVoidPtr(char *buff, void *ptr, const char *name, size_t bsz) { - char *r = buff; - if ((2*sizeof(void *) + 2) > bsz) return 0; - *(r++) = '_'; - r = SWIG_PackData(r,&ptr,sizeof(void *)); - if (strlen(name) + 1 > (bsz - (r - buff))) return 0; - strcpy(r,name); - return buff; -} - -SWIGRUNTIME const char * -SWIG_UnpackVoidPtr(const char *c, void **ptr, const char *name) { - if (*c != '_') { - if (strcmp(c,"NULL") == 0) { - *ptr = (void *) 0; - return name; - } else { - return 0; - } - } - return SWIG_UnpackData(++c,ptr,sizeof(void *)); -} - -SWIGRUNTIME char * -SWIG_PackDataName(char *buff, void *ptr, size_t sz, const char *name, size_t bsz) { - char *r = buff; - size_t lname = (name ? strlen(name) : 0); - if ((2*sz + 2 + lname) > bsz) return 0; - *(r++) = '_'; - r = SWIG_PackData(r,ptr,sz); - if (lname) { - strncpy(r,name,lname+1); - } else { - *r = 0; - } - return buff; -} - -SWIGRUNTIME const char * -SWIG_UnpackDataName(const char *c, void *ptr, size_t sz, const char *name) { - if (*c != '_') { - if (strcmp(c,"NULL") == 0) { - memset(ptr,0,sz); - return name; - } else { - return 0; - } - } - return SWIG_UnpackData(++c,ptr,sz); -} - -#ifdef __cplusplus -} -#endif - -/* Errors in SWIG */ -#define SWIG_UnknownError -1 -#define SWIG_IOError -2 -#define SWIG_RuntimeError -3 -#define SWIG_IndexError -4 -#define SWIG_TypeError -5 -#define SWIG_DivisionByZero -6 -#define SWIG_OverflowError -7 -#define SWIG_SyntaxError -8 -#define SWIG_ValueError -9 -#define SWIG_SystemError -10 -#define SWIG_AttributeError -11 -#define SWIG_MemoryError -12 -#define SWIG_NullReferenceError -13 - - - -/* Compatibility macros for Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - -#define PyClass_Check(obj) PyObject_IsInstance(obj, (PyObject *)&PyType_Type) -#define PyInt_Check(x) PyLong_Check(x) -#define PyInt_AsLong(x) PyLong_AsLong(x) -#define PyInt_FromLong(x) PyLong_FromLong(x) -#define PyString_Format(fmt, args) PyUnicode_Format(fmt, args) - -#endif - -#ifndef Py_TYPE -# define Py_TYPE(op) ((op)->ob_type) -#endif - -/* SWIG APIs for compatibility of both Python 2 & 3 */ - -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_Python_str_FromFormat PyUnicode_FromFormat -#else -# define SWIG_Python_str_FromFormat PyString_FromFormat -#endif - - -/* Warning: This function will allocate a new string in Python 3, - * so please call SWIG_Python_str_DelForPy3(x) to free the space. - */ -SWIGINTERN char* -SWIG_Python_str_AsChar(PyObject *str) -{ -#if PY_VERSION_HEX >= 0x03000000 - char *cstr; - char *newstr; - Py_ssize_t len; - str = PyUnicode_AsUTF8String(str); - PyBytes_AsStringAndSize(str, &cstr, &len); - newstr = (char *) malloc(len+1); - memcpy(newstr, cstr, len+1); - Py_XDECREF(str); - return newstr; -#else - return PyString_AsString(str); -#endif -} - -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_Python_str_DelForPy3(x) free( (void*) (x) ) -#else -# define SWIG_Python_str_DelForPy3(x) -#endif - - -SWIGINTERN PyObject* -SWIG_Python_str_FromChar(const char *c) -{ -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_FromString(c); -#else - return PyString_FromString(c); -#endif -} - -/* Add PyOS_snprintf for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 -# if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) -# define PyOS_snprintf _snprintf -# else -# define PyOS_snprintf snprintf -# endif -#endif - -/* A crude PyString_FromFormat implementation for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 - -#ifndef SWIG_PYBUFFER_SIZE -# define SWIG_PYBUFFER_SIZE 1024 -#endif - -static PyObject * -PyString_FromFormat(const char *fmt, ...) { - va_list ap; - char buf[SWIG_PYBUFFER_SIZE * 2]; - int res; - va_start(ap, fmt); - res = vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - return (res < 0 || res >= (int)sizeof(buf)) ? 0 : PyString_FromString(buf); -} -#endif - -/* Add PyObject_Del for old Pythons */ -#if PY_VERSION_HEX < 0x01060000 -# define PyObject_Del(op) PyMem_DEL((op)) -#endif -#ifndef PyObject_DEL -# define PyObject_DEL PyObject_Del -#endif - -/* A crude PyExc_StopIteration exception for old Pythons */ -#if PY_VERSION_HEX < 0x02020000 -# ifndef PyExc_StopIteration -# define PyExc_StopIteration PyExc_RuntimeError -# endif -# ifndef PyObject_GenericGetAttr -# define PyObject_GenericGetAttr 0 -# endif -#endif - -/* Py_NotImplemented is defined in 2.1 and up. */ -#if PY_VERSION_HEX < 0x02010000 -# ifndef Py_NotImplemented -# define Py_NotImplemented PyExc_RuntimeError -# endif -#endif - -/* A crude PyString_AsStringAndSize implementation for old Pythons */ -#if PY_VERSION_HEX < 0x02010000 -# ifndef PyString_AsStringAndSize -# define PyString_AsStringAndSize(obj, s, len) {*s = PyString_AsString(obj); *len = *s ? strlen(*s) : 0;} -# endif -#endif - -/* PySequence_Size for old Pythons */ -#if PY_VERSION_HEX < 0x02000000 -# ifndef PySequence_Size -# define PySequence_Size PySequence_Length -# endif -#endif - -/* PyBool_FromLong for old Pythons */ -#if PY_VERSION_HEX < 0x02030000 -static -PyObject *PyBool_FromLong(long ok) -{ - PyObject *result = ok ? Py_True : Py_False; - Py_INCREF(result); - return result; -} -#endif - -/* Py_ssize_t for old Pythons */ -/* This code is as recommended by: */ -/* http://www.python.org/dev/peps/pep-0353/#conversion-guidelines */ -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef int Py_ssize_t; -# define PY_SSIZE_T_MAX INT_MAX -# define PY_SSIZE_T_MIN INT_MIN -#endif - -/* ----------------------------------------------------------------------------- - * error manipulation - * ----------------------------------------------------------------------------- */ - -SWIGRUNTIME PyObject* -SWIG_Python_ErrorType(int code) { - PyObject* type = 0; - switch(code) { - case SWIG_MemoryError: - type = PyExc_MemoryError; - break; - case SWIG_IOError: - type = PyExc_IOError; - break; - case SWIG_RuntimeError: - type = PyExc_RuntimeError; - break; - case SWIG_IndexError: - type = PyExc_IndexError; - break; - case SWIG_TypeError: - type = PyExc_TypeError; - break; - case SWIG_DivisionByZero: - type = PyExc_ZeroDivisionError; - break; - case SWIG_OverflowError: - type = PyExc_OverflowError; - break; - case SWIG_SyntaxError: - type = PyExc_SyntaxError; - break; - case SWIG_ValueError: - type = PyExc_ValueError; - break; - case SWIG_SystemError: - type = PyExc_SystemError; - break; - case SWIG_AttributeError: - type = PyExc_AttributeError; - break; - default: - type = PyExc_RuntimeError; - } - return type; -} - - -SWIGRUNTIME void -SWIG_Python_AddErrorMsg(const char* mesg) -{ - PyObject *type = 0; - PyObject *value = 0; - PyObject *traceback = 0; - - if (PyErr_Occurred()) PyErr_Fetch(&type, &value, &traceback); - if (value) { - char *tmp; - PyObject *old_str = PyObject_Str(value); - PyErr_Clear(); - Py_XINCREF(type); - - PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(old_str); - Py_DECREF(value); - } else { - PyErr_SetString(PyExc_RuntimeError, mesg); - } -} - -#if defined(SWIG_PYTHON_NO_THREADS) -# if defined(SWIG_PYTHON_THREADS) -# undef SWIG_PYTHON_THREADS -# endif -#endif -#if defined(SWIG_PYTHON_THREADS) /* Threading support is enabled */ -# if !defined(SWIG_PYTHON_USE_GIL) && !defined(SWIG_PYTHON_NO_USE_GIL) -# if (PY_VERSION_HEX >= 0x02030000) /* For 2.3 or later, use the PyGILState calls */ -# define SWIG_PYTHON_USE_GIL -# endif -# endif -# if defined(SWIG_PYTHON_USE_GIL) /* Use PyGILState threads calls */ -# ifndef SWIG_PYTHON_INITIALIZE_THREADS -# define SWIG_PYTHON_INITIALIZE_THREADS PyEval_InitThreads() -# endif -# ifdef __cplusplus /* C++ code */ - class SWIG_Python_Thread_Block { - bool status; - PyGILState_STATE state; - public: - void end() { if (status) { PyGILState_Release(state); status = false;} } - SWIG_Python_Thread_Block() : status(true), state(PyGILState_Ensure()) {} - ~SWIG_Python_Thread_Block() { end(); } - }; - class SWIG_Python_Thread_Allow { - bool status; - PyThreadState *save; - public: - void end() { if (status) { PyEval_RestoreThread(save); status = false; }} - SWIG_Python_Thread_Allow() : status(true), save(PyEval_SaveThread()) {} - ~SWIG_Python_Thread_Allow() { end(); } - }; -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK SWIG_Python_Thread_Block _swig_thread_block -# define SWIG_PYTHON_THREAD_END_BLOCK _swig_thread_block.end() -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW SWIG_Python_Thread_Allow _swig_thread_allow -# define SWIG_PYTHON_THREAD_END_ALLOW _swig_thread_allow.end() -# else /* C code */ -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK PyGILState_STATE _swig_thread_block = PyGILState_Ensure() -# define SWIG_PYTHON_THREAD_END_BLOCK PyGILState_Release(_swig_thread_block) -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW PyThreadState *_swig_thread_allow = PyEval_SaveThread() -# define SWIG_PYTHON_THREAD_END_ALLOW PyEval_RestoreThread(_swig_thread_allow) -# endif -# else /* Old thread way, not implemented, user must provide it */ -# if !defined(SWIG_PYTHON_INITIALIZE_THREADS) -# define SWIG_PYTHON_INITIALIZE_THREADS -# endif -# if !defined(SWIG_PYTHON_THREAD_BEGIN_BLOCK) -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK -# endif -# if !defined(SWIG_PYTHON_THREAD_END_BLOCK) -# define SWIG_PYTHON_THREAD_END_BLOCK -# endif -# if !defined(SWIG_PYTHON_THREAD_BEGIN_ALLOW) -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW -# endif -# if !defined(SWIG_PYTHON_THREAD_END_ALLOW) -# define SWIG_PYTHON_THREAD_END_ALLOW -# endif -# endif -#else /* No thread support */ -# define SWIG_PYTHON_INITIALIZE_THREADS -# define SWIG_PYTHON_THREAD_BEGIN_BLOCK -# define SWIG_PYTHON_THREAD_END_BLOCK -# define SWIG_PYTHON_THREAD_BEGIN_ALLOW -# define SWIG_PYTHON_THREAD_END_ALLOW -#endif - -/* ----------------------------------------------------------------------------- - * Python API portion that goes into the runtime - * ----------------------------------------------------------------------------- */ - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* cc-mode */ -#endif -#endif - -/* ----------------------------------------------------------------------------- - * Constant declarations - * ----------------------------------------------------------------------------- */ - -/* Constant Types */ -#define SWIG_PY_POINTER 4 -#define SWIG_PY_BINARY 5 - -/* Constant information structure */ -typedef struct swig_const_info { - int type; - char *name; - long lvalue; - double dvalue; - void *pvalue; - swig_type_info **ptype; -} swig_const_info; - - -/* ----------------------------------------------------------------------------- - * Wrapper of PyInstanceMethod_New() used in Python 3 - * It is exported to the generated module, used for -fastproxy - * ----------------------------------------------------------------------------- */ -SWIGRUNTIME PyObject* SWIG_PyInstanceMethod_New(PyObject *self, PyObject *func) -{ -#if PY_VERSION_HEX >= 0x03000000 - return PyInstanceMethod_New(func); -#else - return NULL; -#endif -} - -#ifdef __cplusplus -#if 0 -{ /* cc-mode */ -#endif -} -#endif - - -/* ----------------------------------------------------------------------------- - * pyrun.swg - * - * This file contains the runtime support for Python modules - * and includes code for managing global variables and pointer - * type checking. - * - * ----------------------------------------------------------------------------- */ - -/* Common SWIG API */ - -/* for raw pointers */ -#define SWIG_Python_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, 0) -#define SWIG_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtr(obj, pptr, type, flags) -#define SWIG_ConvertPtrAndOwn(obj,pptr,type,flags,own) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, own) -#define SWIG_NewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(ptr, type, flags) -#define SWIG_CheckImplicit(ty) SWIG_Python_CheckImplicit(ty) -#define SWIG_AcquirePtr(ptr, src) SWIG_Python_AcquirePtr(ptr, src) -#define swig_owntype int - -/* for raw packed data */ -#define SWIG_ConvertPacked(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) -#define SWIG_NewPackedObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) - -/* for class or struct pointers */ -#define SWIG_ConvertInstance(obj, pptr, type, flags) SWIG_ConvertPtr(obj, pptr, type, flags) -#define SWIG_NewInstanceObj(ptr, type, flags) SWIG_NewPointerObj(ptr, type, flags) - -/* for C or C++ function pointers */ -#define SWIG_ConvertFunctionPtr(obj, pptr, type) SWIG_Python_ConvertFunctionPtr(obj, pptr, type) -#define SWIG_NewFunctionPtrObj(ptr, type) SWIG_Python_NewPointerObj(ptr, type, 0) - -/* for C++ member pointers, ie, member methods */ -#define SWIG_ConvertMember(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) -#define SWIG_NewMemberObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) - - -/* Runtime API */ - -#define SWIG_GetModule(clientdata) SWIG_Python_GetModule() -#define SWIG_SetModule(clientdata, pointer) SWIG_Python_SetModule(pointer) -#define SWIG_NewClientData(obj) SwigPyClientData_New(obj) - -#define SWIG_SetErrorObj SWIG_Python_SetErrorObj -#define SWIG_SetErrorMsg SWIG_Python_SetErrorMsg -#define SWIG_ErrorType(code) SWIG_Python_ErrorType(code) -#define SWIG_Error(code, msg) SWIG_Python_SetErrorMsg(SWIG_ErrorType(code), msg) -#define SWIG_fail goto fail - -/* - * Python 2.7 and newer and Python 3.1 and newer should use Capsules API instead of - * CObjects API. - */ -#if ((PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION > 6) || \ - (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION > 0)) -#define USE_CAPSULES -#define TYPE_POINTER_NAME \ - ((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION ".type_pointer_capsule" SWIG_TYPE_TABLE_NAME) -#endif - -/* Runtime API implementation */ - -/* Error manipulation */ - -SWIGINTERN void -SWIG_Python_SetErrorObj(PyObject *errtype, PyObject *obj) { - SWIG_PYTHON_THREAD_BEGIN_BLOCK; - PyErr_SetObject(errtype, obj); - Py_DECREF(obj); - SWIG_PYTHON_THREAD_END_BLOCK; -} - -SWIGINTERN void -SWIG_Python_SetErrorMsg(PyObject *errtype, const char *msg) { - SWIG_PYTHON_THREAD_BEGIN_BLOCK; - PyErr_SetString(errtype, (char *) msg); - SWIG_PYTHON_THREAD_END_BLOCK; -} - -#define SWIG_Python_Raise(obj, type, desc) SWIG_Python_SetErrorObj(SWIG_Python_ExceptionType(desc), obj) - -/* Set a constant value */ - -SWIGINTERN void -SWIG_Python_SetConstant(PyObject *d, const char *name, PyObject *obj) { - PyDict_SetItemString(d, (char*) name, obj); - Py_DECREF(obj); -} - -/* Append a value to the result obj */ - -SWIGINTERN PyObject* -SWIG_Python_AppendOutput(PyObject* result, PyObject* obj) { -#if !defined(SWIG_PYTHON_OUTPUT_TUPLE) - if (!result) { - result = obj; - } else if (result == Py_None) { - Py_DECREF(result); - result = obj; - } else { - if (!PyList_Check(result)) { - PyObject *o2 = result; - result = PyList_New(1); - PyList_SetItem(result, 0, o2); - } - PyList_Append(result,obj); - Py_DECREF(obj); - } - return result; -#else - PyObject* o2; - PyObject* o3; - if (!result) { - result = obj; - } else if (result == Py_None) { - Py_DECREF(result); - result = obj; - } else { - if (!PyTuple_Check(result)) { - o2 = result; - result = PyTuple_New(1); - PyTuple_SET_ITEM(result, 0, o2); - } - o3 = PyTuple_New(1); - PyTuple_SET_ITEM(o3, 0, obj); - o2 = result; - result = PySequence_Concat(o2, o3); - Py_DECREF(o2); - Py_DECREF(o3); - } - return result; -#endif -} - -/* Unpack the argument tuple */ - -SWIGINTERN int -SWIG_Python_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, PyObject **objs) -{ - if (!args) { - if (!min && !max) { - return 1; - } else { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got none", - name, (min == max ? "" : "at least "), (int)min); - return 0; - } - } - if (!PyTuple_Check(args)) { - PyErr_SetString(PyExc_SystemError, "UnpackTuple() argument list is not a tuple"); - return 0; - } else { - register Py_ssize_t l = PyTuple_GET_SIZE(args); - if (l < min) { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", - name, (min == max ? "" : "at least "), (int)min, (int)l); - return 0; - } else if (l > max) { - PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", - name, (min == max ? "" : "at most "), (int)max, (int)l); - return 0; - } else { - register int i; - for (i = 0; i < l; ++i) { - objs[i] = PyTuple_GET_ITEM(args, i); - } - for (; l < max; ++l) { - objs[l] = 0; - } - return i + 1; - } - } -} - -/* A functor is a function object with one single object argument */ -#if PY_VERSION_HEX >= 0x02020000 -#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunctionObjArgs(functor, obj, NULL); -#else -#define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunction(functor, "O", obj); -#endif - -/* - Helper for static pointer initialization for both C and C++ code, for example - static PyObject *SWIG_STATIC_POINTER(MyVar) = NewSomething(...); -*/ -#ifdef __cplusplus -#define SWIG_STATIC_POINTER(var) var -#else -#define SWIG_STATIC_POINTER(var) var = 0; if (!var) var -#endif - -/* ----------------------------------------------------------------------------- - * Pointer declarations - * ----------------------------------------------------------------------------- */ - -/* Flags for new pointer objects */ -#define SWIG_POINTER_NOSHADOW (SWIG_POINTER_OWN << 1) -#define SWIG_POINTER_NEW (SWIG_POINTER_NOSHADOW | SWIG_POINTER_OWN) - -#define SWIG_POINTER_IMPLICIT_CONV (SWIG_POINTER_DISOWN << 1) - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* cc-mode */ -#endif -#endif - -/* How to access Py_None */ -#if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# ifndef SWIG_PYTHON_NO_BUILD_NONE -# ifndef SWIG_PYTHON_BUILD_NONE -# define SWIG_PYTHON_BUILD_NONE -# endif -# endif -#endif - -#ifdef SWIG_PYTHON_BUILD_NONE -# ifdef Py_None -# undef Py_None -# define Py_None SWIG_Py_None() -# endif -SWIGRUNTIMEINLINE PyObject * -_SWIG_Py_None(void) -{ - PyObject *none = Py_BuildValue((char*)""); - Py_DECREF(none); - return none; -} -SWIGRUNTIME PyObject * -SWIG_Py_None(void) -{ - static PyObject *SWIG_STATIC_POINTER(none) = _SWIG_Py_None(); - return none; -} -#endif - -/* The python void return value */ - -SWIGRUNTIMEINLINE PyObject * -SWIG_Py_Void(void) -{ - PyObject *none = Py_None; - Py_INCREF(none); - return none; -} - -/* SwigPyClientData */ - -typedef struct { - PyObject *klass; - PyObject *newraw; - PyObject *newargs; - PyObject *destroy; - int delargs; - int implicitconv; -} SwigPyClientData; - -SWIGRUNTIMEINLINE int -SWIG_Python_CheckImplicit(swig_type_info *ty) -{ - SwigPyClientData *data = (SwigPyClientData *)ty->clientdata; - return data ? data->implicitconv : 0; -} - -SWIGRUNTIMEINLINE PyObject * -SWIG_Python_ExceptionType(swig_type_info *desc) { - SwigPyClientData *data = desc ? (SwigPyClientData *) desc->clientdata : 0; - PyObject *klass = data ? data->klass : 0; - return (klass ? klass : PyExc_RuntimeError); -} - - -SWIGRUNTIME SwigPyClientData * -SwigPyClientData_New(PyObject* obj) -{ - if (!obj) { - return 0; - } else { - SwigPyClientData *data = (SwigPyClientData *)malloc(sizeof(SwigPyClientData)); - /* the klass element */ - data->klass = obj; - Py_INCREF(data->klass); - /* the newraw method and newargs arguments used to create a new raw instance */ - if (PyClass_Check(obj)) { - data->newraw = 0; - data->newargs = obj; - Py_INCREF(obj); - } else { -#if (PY_VERSION_HEX < 0x02020000) - data->newraw = 0; -#else - data->newraw = PyObject_GetAttrString(data->klass, (char *)"__new__"); -#endif - if (data->newraw) { - Py_INCREF(data->newraw); - data->newargs = PyTuple_New(1); - PyTuple_SetItem(data->newargs, 0, obj); - } else { - data->newargs = obj; - } - Py_INCREF(data->newargs); - } - /* the destroy method, aka as the C++ delete method */ - data->destroy = PyObject_GetAttrString(data->klass, (char *)"__swig_destroy__"); - if (PyErr_Occurred()) { - PyErr_Clear(); - data->destroy = 0; - } - if (data->destroy) { - int flags; - Py_INCREF(data->destroy); - flags = PyCFunction_GET_FLAGS(data->destroy); -#ifdef METH_O - data->delargs = !(flags & (METH_O)); -#else - data->delargs = 0; -#endif - } else { - data->delargs = 0; - } - data->implicitconv = 0; - return data; - } -} - -SWIGRUNTIME void -SwigPyClientData_Del(SwigPyClientData* data) -{ - Py_XDECREF(data->newraw); - Py_XDECREF(data->newargs); - Py_XDECREF(data->destroy); -} - -/* =============== SwigPyObject =====================*/ - -typedef struct { - PyObject_HEAD - void *ptr; - swig_type_info *ty; - int own; - PyObject *next; -} SwigPyObject; - -SWIGRUNTIME PyObject * -SwigPyObject_long(SwigPyObject *v) -{ - return PyLong_FromVoidPtr(v->ptr); -} - -SWIGRUNTIME PyObject * -SwigPyObject_format(const char* fmt, SwigPyObject *v) -{ - PyObject *res = NULL; - PyObject *args = PyTuple_New(1); - if (args) { - if (PyTuple_SetItem(args, 0, SwigPyObject_long(v)) == 0) { - PyObject *ofmt = SWIG_Python_str_FromChar(fmt); - if (ofmt) { -#if PY_VERSION_HEX >= 0x03000000 - res = PyUnicode_Format(ofmt,args); -#else - res = PyString_Format(ofmt,args); -#endif - Py_DECREF(ofmt); - } - Py_DECREF(args); - } - } - return res; -} - -SWIGRUNTIME PyObject * -SwigPyObject_oct(SwigPyObject *v) -{ - return SwigPyObject_format("%o",v); -} - -SWIGRUNTIME PyObject * -SwigPyObject_hex(SwigPyObject *v) -{ - return SwigPyObject_format("%x",v); -} - -SWIGRUNTIME PyObject * -#ifdef METH_NOARGS -SwigPyObject_repr(SwigPyObject *v) -#else -SwigPyObject_repr(SwigPyObject *v, PyObject *args) -#endif -{ - const char *name = SWIG_TypePrettyName(v->ty); - PyObject *repr = SWIG_Python_str_FromFormat("", name, v); - if (v->next) { -#ifdef METH_NOARGS - PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next); -#else - PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next, args); -#endif -#if PY_VERSION_HEX >= 0x03000000 - PyObject *joined = PyUnicode_Concat(repr, nrep); - Py_DecRef(repr); - Py_DecRef(nrep); - repr = joined; -#else - PyString_ConcatAndDel(&repr,nrep); -#endif - } - return repr; -} - -SWIGRUNTIME int -SwigPyObject_print(SwigPyObject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) -{ - char *str; -#ifdef METH_NOARGS - PyObject *repr = SwigPyObject_repr(v); -#else - PyObject *repr = SwigPyObject_repr(v, NULL); -#endif - if (repr) { - str = SWIG_Python_str_AsChar(repr); - fputs(str, fp); - SWIG_Python_str_DelForPy3(str); - Py_DECREF(repr); - return 0; - } else { - return 1; - } -} - -SWIGRUNTIME PyObject * -SwigPyObject_str(SwigPyObject *v) -{ - char result[SWIG_BUFFER_SIZE]; - return SWIG_PackVoidPtr(result, v->ptr, v->ty->name, sizeof(result)) ? - SWIG_Python_str_FromChar(result) : 0; -} - -SWIGRUNTIME int -SwigPyObject_compare(SwigPyObject *v, SwigPyObject *w) -{ - void *i = v->ptr; - void *j = w->ptr; - return (i < j) ? -1 : ((i > j) ? 1 : 0); -} - -/* Added for Python 3.x, would it also be useful for Python 2.x? */ -SWIGRUNTIME PyObject* -SwigPyObject_richcompare(SwigPyObject *v, SwigPyObject *w, int op) -{ - PyObject* res; - if( op != Py_EQ && op != Py_NE ) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - if( (SwigPyObject_compare(v, w)==0) == (op == Py_EQ) ) - res = Py_True; - else - res = Py_False; - Py_INCREF(res); - return res; -} - - -SWIGRUNTIME PyTypeObject* _PySwigObject_type(void); - -SWIGRUNTIME PyTypeObject* -SwigPyObject_type(void) { - static PyTypeObject *SWIG_STATIC_POINTER(type) = _PySwigObject_type(); - return type; -} - -SWIGRUNTIMEINLINE int -SwigPyObject_Check(PyObject *op) { - return (Py_TYPE(op) == SwigPyObject_type()) - || (strcmp(Py_TYPE(op)->tp_name,"SwigPyObject") == 0); -} - -SWIGRUNTIME PyObject * -SwigPyObject_New(void *ptr, swig_type_info *ty, int own); - -SWIGRUNTIME void -SwigPyObject_dealloc(PyObject *v) -{ - SwigPyObject *sobj = (SwigPyObject *) v; - PyObject *next = sobj->next; - if (sobj->own == SWIG_POINTER_OWN) { - swig_type_info *ty = sobj->ty; - SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; - PyObject *destroy = data ? data->destroy : 0; - if (destroy) { - /* destroy is always a VARARGS method */ - PyObject *res; - if (data->delargs) { - /* we need to create a temporary object to carry the destroy operation */ - PyObject *tmp = SwigPyObject_New(sobj->ptr, ty, 0); - res = SWIG_Python_CallFunctor(destroy, tmp); - Py_DECREF(tmp); - } else { - PyCFunction meth = PyCFunction_GET_FUNCTION(destroy); - PyObject *mself = PyCFunction_GET_SELF(destroy); - res = ((*meth)(mself, v)); - } - Py_XDECREF(res); - } -#if !defined(SWIG_PYTHON_SILENT_MEMLEAK) - else { - const char *name = SWIG_TypePrettyName(ty); - printf("swig/python detected a memory leak of type '%s', no destructor found.\n", (name ? name : "unknown")); - } -#endif - } - Py_XDECREF(next); - PyObject_DEL(v); -} - -SWIGRUNTIME PyObject* -SwigPyObject_append(PyObject* v, PyObject* next) -{ - SwigPyObject *sobj = (SwigPyObject *) v; -#ifndef METH_O - PyObject *tmp = 0; - if (!PyArg_ParseTuple(next,(char *)"O:append", &tmp)) return NULL; - next = tmp; -#endif - if (!SwigPyObject_Check(next)) { - return NULL; - } - sobj->next = next; - Py_INCREF(next); - return SWIG_Py_Void(); -} - -SWIGRUNTIME PyObject* -#ifdef METH_NOARGS -SwigPyObject_next(PyObject* v) -#else -SwigPyObject_next(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *) v; - if (sobj->next) { - Py_INCREF(sobj->next); - return sobj->next; - } else { - return SWIG_Py_Void(); - } -} - -SWIGINTERN PyObject* -#ifdef METH_NOARGS -SwigPyObject_disown(PyObject *v) -#else -SwigPyObject_disown(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *)v; - sobj->own = 0; - return SWIG_Py_Void(); -} - -SWIGINTERN PyObject* -#ifdef METH_NOARGS -SwigPyObject_acquire(PyObject *v) -#else -SwigPyObject_acquire(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) -#endif -{ - SwigPyObject *sobj = (SwigPyObject *)v; - sobj->own = SWIG_POINTER_OWN; - return SWIG_Py_Void(); -} - -SWIGINTERN PyObject* -SwigPyObject_own(PyObject *v, PyObject *args) -{ - PyObject *val = 0; -#if (PY_VERSION_HEX < 0x02020000) - if (!PyArg_ParseTuple(args,(char *)"|O:own",&val)) -#else - if (!PyArg_UnpackTuple(args, (char *)"own", 0, 1, &val)) -#endif - { - return NULL; - } - else - { - SwigPyObject *sobj = (SwigPyObject *)v; - PyObject *obj = PyBool_FromLong(sobj->own); - if (val) { -#ifdef METH_NOARGS - if (PyObject_IsTrue(val)) { - SwigPyObject_acquire(v); - } else { - SwigPyObject_disown(v); - } -#else - if (PyObject_IsTrue(val)) { - SwigPyObject_acquire(v,args); - } else { - SwigPyObject_disown(v,args); - } -#endif - } - return obj; - } -} - -#ifdef METH_O -static PyMethodDef -swigobject_methods[] = { - {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_NOARGS, (char *)"releases ownership of the pointer"}, - {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_NOARGS, (char *)"aquires ownership of the pointer"}, - {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, - {(char *)"append", (PyCFunction)SwigPyObject_append, METH_O, (char *)"appends another 'this' object"}, - {(char *)"next", (PyCFunction)SwigPyObject_next, METH_NOARGS, (char *)"returns the next 'this' object"}, - {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_NOARGS, (char *)"returns object representation"}, - {0, 0, 0, 0} -}; -#else -static PyMethodDef -swigobject_methods[] = { - {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_VARARGS, (char *)"releases ownership of the pointer"}, - {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_VARARGS, (char *)"aquires ownership of the pointer"}, - {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, - {(char *)"append", (PyCFunction)SwigPyObject_append, METH_VARARGS, (char *)"appends another 'this' object"}, - {(char *)"next", (PyCFunction)SwigPyObject_next, METH_VARARGS, (char *)"returns the next 'this' object"}, - {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_VARARGS, (char *)"returns object representation"}, - {0, 0, 0, 0} -}; -#endif - -#if PY_VERSION_HEX < 0x02020000 -SWIGINTERN PyObject * -SwigPyObject_getattr(SwigPyObject *sobj,char *name) -{ - return Py_FindMethod(swigobject_methods, (PyObject *)sobj, name); -} -#endif - -SWIGRUNTIME PyTypeObject* -_PySwigObject_type(void) { - static char swigobject_doc[] = "Swig object carries a C/C++ instance pointer"; - - static PyNumberMethods SwigPyObject_as_number = { - (binaryfunc)0, /*nb_add*/ - (binaryfunc)0, /*nb_subtract*/ - (binaryfunc)0, /*nb_multiply*/ - /* nb_divide removed in Python 3 */ -#if PY_VERSION_HEX < 0x03000000 - (binaryfunc)0, /*nb_divide*/ -#endif - (binaryfunc)0, /*nb_remainder*/ - (binaryfunc)0, /*nb_divmod*/ - (ternaryfunc)0,/*nb_power*/ - (unaryfunc)0, /*nb_negative*/ - (unaryfunc)0, /*nb_positive*/ - (unaryfunc)0, /*nb_absolute*/ - (inquiry)0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ -#if PY_VERSION_HEX < 0x03000000 - 0, /*nb_coerce*/ -#endif - (unaryfunc)SwigPyObject_long, /*nb_int*/ -#if PY_VERSION_HEX < 0x03000000 - (unaryfunc)SwigPyObject_long, /*nb_long*/ -#else - 0, /*nb_reserved*/ -#endif - (unaryfunc)0, /*nb_float*/ -#if PY_VERSION_HEX < 0x03000000 - (unaryfunc)SwigPyObject_oct, /*nb_oct*/ - (unaryfunc)SwigPyObject_hex, /*nb_hex*/ -#endif -#if PY_VERSION_HEX >= 0x03000000 /* 3.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index, nb_inplace_divide removed */ -#elif PY_VERSION_HEX >= 0x02050000 /* 2.5.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index */ -#elif PY_VERSION_HEX >= 0x02020000 /* 2.2.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_true_divide */ -#elif PY_VERSION_HEX >= 0x02000000 /* 2.0.0 */ - 0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_or */ -#endif - }; - - static PyTypeObject swigpyobject_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - (char *)"SwigPyObject", /* tp_name */ - sizeof(SwigPyObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)SwigPyObject_dealloc, /* tp_dealloc */ - (printfunc)SwigPyObject_print, /* tp_print */ -#if PY_VERSION_HEX < 0x02020000 - (getattrfunc)SwigPyObject_getattr, /* tp_getattr */ -#else - (getattrfunc)0, /* tp_getattr */ -#endif - (setattrfunc)0, /* tp_setattr */ -#if PY_VERSION_HEX >= 0x03000000 - 0, /* tp_reserved in 3.0.1, tp_compare in 3.0.0 but not used */ -#else - (cmpfunc)SwigPyObject_compare, /* tp_compare */ -#endif - (reprfunc)SwigPyObject_repr, /* tp_repr */ - &SwigPyObject_as_number, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)SwigPyObject_str, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - swigobject_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)SwigPyObject_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0, /* tp_iter */ - 0, /* tp_iternext */ - swigobject_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - swigpyobject_type = tmp; - /* for Python 3 we already assigned ob_type in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - swigpyobject_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &swigpyobject_type; -} - -SWIGRUNTIME PyObject * -SwigPyObject_New(void *ptr, swig_type_info *ty, int own) -{ - SwigPyObject *sobj = PyObject_NEW(SwigPyObject, SwigPyObject_type()); - if (sobj) { - sobj->ptr = ptr; - sobj->ty = ty; - sobj->own = own; - sobj->next = 0; - } - return (PyObject *)sobj; -} - -/* ----------------------------------------------------------------------------- - * Implements a simple Swig Packed type, and use it instead of string - * ----------------------------------------------------------------------------- */ - -typedef struct { - PyObject_HEAD - void *pack; - swig_type_info *ty; - size_t size; -} SwigPyPacked; - -SWIGRUNTIME int -SwigPyPacked_print(SwigPyPacked *v, FILE *fp, int SWIGUNUSEDPARM(flags)) -{ - char result[SWIG_BUFFER_SIZE]; - fputs("pack, v->size, 0, sizeof(result))) { - fputs("at ", fp); - fputs(result, fp); - } - fputs(v->ty->name,fp); - fputs(">", fp); - return 0; -} - -SWIGRUNTIME PyObject * -SwigPyPacked_repr(SwigPyPacked *v) -{ - char result[SWIG_BUFFER_SIZE]; - if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))) { - return SWIG_Python_str_FromFormat("", result, v->ty->name); - } else { - return SWIG_Python_str_FromFormat("", v->ty->name); - } -} - -SWIGRUNTIME PyObject * -SwigPyPacked_str(SwigPyPacked *v) -{ - char result[SWIG_BUFFER_SIZE]; - if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))){ - return SWIG_Python_str_FromFormat("%s%s", result, v->ty->name); - } else { - return SWIG_Python_str_FromChar(v->ty->name); - } -} - -SWIGRUNTIME int -SwigPyPacked_compare(SwigPyPacked *v, SwigPyPacked *w) -{ - size_t i = v->size; - size_t j = w->size; - int s = (i < j) ? -1 : ((i > j) ? 1 : 0); - return s ? s : strncmp((char *)v->pack, (char *)w->pack, 2*v->size); -} - -SWIGRUNTIME PyTypeObject* _PySwigPacked_type(void); - -SWIGRUNTIME PyTypeObject* -SwigPyPacked_type(void) { - static PyTypeObject *SWIG_STATIC_POINTER(type) = _PySwigPacked_type(); - return type; -} - -SWIGRUNTIMEINLINE int -SwigPyPacked_Check(PyObject *op) { - return ((op)->ob_type == _PySwigPacked_type()) - || (strcmp((op)->ob_type->tp_name,"SwigPyPacked") == 0); -} - -SWIGRUNTIME void -SwigPyPacked_dealloc(PyObject *v) -{ - if (SwigPyPacked_Check(v)) { - SwigPyPacked *sobj = (SwigPyPacked *) v; - free(sobj->pack); - } - PyObject_DEL(v); -} - -SWIGRUNTIME PyTypeObject* -_PySwigPacked_type(void) { - static char swigpacked_doc[] = "Swig object carries a C/C++ instance pointer"; - static PyTypeObject swigpypacked_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX>=0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ -#endif - (char *)"SwigPyPacked", /* tp_name */ - sizeof(SwigPyPacked), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)SwigPyPacked_dealloc, /* tp_dealloc */ - (printfunc)SwigPyPacked_print, /* tp_print */ - (getattrfunc)0, /* tp_getattr */ - (setattrfunc)0, /* tp_setattr */ -#if PY_VERSION_HEX>=0x03000000 - 0, /* tp_reserved in 3.0.1 */ -#else - (cmpfunc)SwigPyPacked_compare, /* tp_compare */ -#endif - (reprfunc)SwigPyPacked_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)0, /* tp_hash */ - (ternaryfunc)0, /* tp_call */ - (reprfunc)SwigPyPacked_str, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - swigpacked_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - swigpypacked_type = tmp; - /* for Python 3 the ob_type already assigned in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - swigpypacked_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &swigpypacked_type; -} - -SWIGRUNTIME PyObject * -SwigPyPacked_New(void *ptr, size_t size, swig_type_info *ty) -{ - SwigPyPacked *sobj = PyObject_NEW(SwigPyPacked, SwigPyPacked_type()); - if (sobj) { - void *pack = malloc(size); - if (pack) { - memcpy(pack, ptr, size); - sobj->pack = pack; - sobj->ty = ty; - sobj->size = size; - } else { - PyObject_DEL((PyObject *) sobj); - sobj = 0; - } - } - return (PyObject *) sobj; -} - -SWIGRUNTIME swig_type_info * -SwigPyPacked_UnpackData(PyObject *obj, void *ptr, size_t size) -{ - if (SwigPyPacked_Check(obj)) { - SwigPyPacked *sobj = (SwigPyPacked *)obj; - if (sobj->size != size) return 0; - memcpy(ptr, sobj->pack, size); - return sobj->ty; - } else { - return 0; - } -} - -/* ----------------------------------------------------------------------------- - * pointers/data manipulation - * ----------------------------------------------------------------------------- */ - -SWIGRUNTIMEINLINE PyObject * -_SWIG_This(void) -{ - return SWIG_Python_str_FromChar("this"); -} - -static PyObject *swig_this = NULL; - -SWIGRUNTIME PyObject * -SWIG_This(void) -{ - if (swig_this == NULL) - swig_this = _SWIG_This(); - return swig_this; -} - -/* #define SWIG_PYTHON_SLOW_GETSET_THIS */ - -/* TODO: I don't know how to implement the fast getset in Python 3 right now */ -#if PY_VERSION_HEX>=0x03000000 -#define SWIG_PYTHON_SLOW_GETSET_THIS -#endif - -SWIGRUNTIME SwigPyObject * -SWIG_Python_GetSwigThis(PyObject *pyobj) -{ - if (SwigPyObject_Check(pyobj)) { - return (SwigPyObject *) pyobj; - } else { - PyObject *obj = 0; -#if (!defined(SWIG_PYTHON_SLOW_GETSET_THIS) && (PY_VERSION_HEX >= 0x02030000)) - if (PyInstance_Check(pyobj)) { - obj = _PyInstance_Lookup(pyobj, SWIG_This()); - } else { - PyObject **dictptr = _PyObject_GetDictPtr(pyobj); - if (dictptr != NULL) { - PyObject *dict = *dictptr; - obj = dict ? PyDict_GetItem(dict, SWIG_This()) : 0; - } else { -#ifdef PyWeakref_CheckProxy - if (PyWeakref_CheckProxy(pyobj)) { - PyObject *wobj = PyWeakref_GET_OBJECT(pyobj); - return wobj ? SWIG_Python_GetSwigThis(wobj) : 0; - } -#endif - obj = PyObject_GetAttr(pyobj,SWIG_This()); - if (obj) { - Py_DECREF(obj); - } else { - if (PyErr_Occurred()) PyErr_Clear(); - return 0; - } - } - } -#else - obj = PyObject_GetAttr(pyobj,SWIG_This()); - if (obj) { - Py_DECREF(obj); - } else { - if (PyErr_Occurred()) PyErr_Clear(); - return 0; - } -#endif - if (obj && !SwigPyObject_Check(obj)) { - /* a PyObject is called 'this', try to get the 'real this' - SwigPyObject from it */ - return SWIG_Python_GetSwigThis(obj); - } - return (SwigPyObject *)obj; - } -} - -/* Acquire a pointer value */ - -SWIGRUNTIME int -SWIG_Python_AcquirePtr(PyObject *obj, int own) { - if (own == SWIG_POINTER_OWN) { - SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); - if (sobj) { - int oldown = sobj->own; - sobj->own = own; - return oldown; - } - } - return 0; -} - -/* Convert a pointer value */ - -SWIGRUNTIME int -SWIG_Python_ConvertPtrAndOwn(PyObject *obj, void **ptr, swig_type_info *ty, int flags, int *own) { - if (!obj) return SWIG_ERROR; - if (obj == Py_None) { - if (ptr) *ptr = 0; - return SWIG_OK; - } else { - SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); - if (own) - *own = 0; - while (sobj) { - void *vptr = sobj->ptr; - if (ty) { - swig_type_info *to = sobj->ty; - if (to == ty) { - /* no type cast needed */ - if (ptr) *ptr = vptr; - break; - } else { - swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); - if (!tc) { - sobj = (SwigPyObject *)sobj->next; - } else { - if (ptr) { - int newmemory = 0; - *ptr = SWIG_TypeCast(tc,vptr,&newmemory); - if (newmemory == SWIG_CAST_NEW_MEMORY) { - assert(own); /* badly formed typemap which will lead to a memory leak - it must set and use own to delete *ptr */ - if (own) - *own = *own | SWIG_CAST_NEW_MEMORY; - } - } - break; - } - } - } else { - if (ptr) *ptr = vptr; - break; - } - } - if (sobj) { - if (own) - *own = *own | sobj->own; - if (flags & SWIG_POINTER_DISOWN) { - sobj->own = 0; - } - return SWIG_OK; - } else { - int res = SWIG_ERROR; - if (flags & SWIG_POINTER_IMPLICIT_CONV) { - SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; - if (data && !data->implicitconv) { - PyObject *klass = data->klass; - if (klass) { - PyObject *impconv; - data->implicitconv = 1; /* avoid recursion and call 'explicit' constructors*/ - impconv = SWIG_Python_CallFunctor(klass, obj); - data->implicitconv = 0; - if (PyErr_Occurred()) { - PyErr_Clear(); - impconv = 0; - } - if (impconv) { - SwigPyObject *iobj = SWIG_Python_GetSwigThis(impconv); - if (iobj) { - void *vptr; - res = SWIG_Python_ConvertPtrAndOwn((PyObject*)iobj, &vptr, ty, 0, 0); - if (SWIG_IsOK(res)) { - if (ptr) { - *ptr = vptr; - /* transfer the ownership to 'ptr' */ - iobj->own = 0; - res = SWIG_AddCast(res); - res = SWIG_AddNewMask(res); - } else { - res = SWIG_AddCast(res); - } - } - } - Py_DECREF(impconv); - } - } - } - } - return res; - } - } -} - -/* Convert a function ptr value */ - -SWIGRUNTIME int -SWIG_Python_ConvertFunctionPtr(PyObject *obj, void **ptr, swig_type_info *ty) { - if (!PyCFunction_Check(obj)) { - return SWIG_ConvertPtr(obj, ptr, ty, 0); - } else { - void *vptr = 0; - - /* here we get the method pointer for callbacks */ - const char *doc = (((PyCFunctionObject *)obj) -> m_ml -> ml_doc); - const char *desc = doc ? strstr(doc, "swig_ptr: ") : 0; - if (desc) - desc = ty ? SWIG_UnpackVoidPtr(desc + 10, &vptr, ty->name) : 0; - if (!desc) - return SWIG_ERROR; - if (ty) { - swig_cast_info *tc = SWIG_TypeCheck(desc,ty); - if (tc) { - int newmemory = 0; - *ptr = SWIG_TypeCast(tc,vptr,&newmemory); - assert(!newmemory); /* newmemory handling not yet implemented */ - } else { - return SWIG_ERROR; - } - } else { - *ptr = vptr; - } - return SWIG_OK; - } -} - -/* Convert a packed value value */ - -SWIGRUNTIME int -SWIG_Python_ConvertPacked(PyObject *obj, void *ptr, size_t sz, swig_type_info *ty) { - swig_type_info *to = SwigPyPacked_UnpackData(obj, ptr, sz); - if (!to) return SWIG_ERROR; - if (ty) { - if (to != ty) { - /* check type cast? */ - swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); - if (!tc) return SWIG_ERROR; - } - } - return SWIG_OK; -} - -/* ----------------------------------------------------------------------------- - * Create a new pointer object - * ----------------------------------------------------------------------------- */ - -/* - Create a new instance object, without calling __init__, and set the - 'this' attribute. -*/ - -SWIGRUNTIME PyObject* -SWIG_Python_NewShadowInstance(SwigPyClientData *data, PyObject *swig_this) -{ -#if (PY_VERSION_HEX >= 0x02020000) - PyObject *inst = 0; - PyObject *newraw = data->newraw; - if (newraw) { - inst = PyObject_Call(newraw, data->newargs, NULL); - if (inst) { -#if !defined(SWIG_PYTHON_SLOW_GETSET_THIS) - PyObject **dictptr = _PyObject_GetDictPtr(inst); - if (dictptr != NULL) { - PyObject *dict = *dictptr; - if (dict == NULL) { - dict = PyDict_New(); - *dictptr = dict; - PyDict_SetItem(dict, SWIG_This(), swig_this); - } - } -#else - PyObject *key = SWIG_This(); - PyObject_SetAttr(inst, key, swig_this); -#endif - } - } else { -#if PY_VERSION_HEX >= 0x03000000 - inst = PyBaseObject_Type.tp_new((PyTypeObject*) data->newargs, Py_None, Py_None); - PyObject_SetAttr(inst, SWIG_This(), swig_this); - Py_TYPE(inst)->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG; -#else - PyObject *dict = PyDict_New(); - PyDict_SetItem(dict, SWIG_This(), swig_this); - inst = PyInstance_NewRaw(data->newargs, dict); - Py_DECREF(dict); -#endif - } - return inst; -#else -#if (PY_VERSION_HEX >= 0x02010000) - PyObject *inst; - PyObject *dict = PyDict_New(); - PyDict_SetItem(dict, SWIG_This(), swig_this); - inst = PyInstance_NewRaw(data->newargs, dict); - Py_DECREF(dict); - return (PyObject *) inst; -#else - PyInstanceObject *inst = PyObject_NEW(PyInstanceObject, &PyInstance_Type); - if (inst == NULL) { - return NULL; - } - inst->in_class = (PyClassObject *)data->newargs; - Py_INCREF(inst->in_class); - inst->in_dict = PyDict_New(); - if (inst->in_dict == NULL) { - Py_DECREF(inst); - return NULL; - } -#ifdef Py_TPFLAGS_HAVE_WEAKREFS - inst->in_weakreflist = NULL; -#endif -#ifdef Py_TPFLAGS_GC - PyObject_GC_Init(inst); -#endif - PyDict_SetItem(inst->in_dict, SWIG_This(), swig_this); - return (PyObject *) inst; -#endif -#endif -} - -SWIGRUNTIME void -SWIG_Python_SetSwigThis(PyObject *inst, PyObject *swig_this) -{ - PyObject *dict; -#if (PY_VERSION_HEX >= 0x02020000) && !defined(SWIG_PYTHON_SLOW_GETSET_THIS) - PyObject **dictptr = _PyObject_GetDictPtr(inst); - if (dictptr != NULL) { - dict = *dictptr; - if (dict == NULL) { - dict = PyDict_New(); - *dictptr = dict; - } - PyDict_SetItem(dict, SWIG_This(), swig_this); - return; - } -#endif - dict = PyObject_GetAttrString(inst, (char*)"__dict__"); - PyDict_SetItem(dict, SWIG_This(), swig_this); - Py_DECREF(dict); -} - - -SWIGINTERN PyObject * -SWIG_Python_InitShadowInstance(PyObject *args) { - PyObject *obj[2]; - if (!SWIG_Python_UnpackTuple(args,(char*)"swiginit", 2, 2, obj)) { - return NULL; - } else { - SwigPyObject *sthis = SWIG_Python_GetSwigThis(obj[0]); - if (sthis) { - SwigPyObject_append((PyObject*) sthis, obj[1]); - } else { - SWIG_Python_SetSwigThis(obj[0], obj[1]); - } - return SWIG_Py_Void(); - } -} - -/* Create a new pointer object */ - -SWIGRUNTIME PyObject * -SWIG_Python_NewPointerObj(void *ptr, swig_type_info *type, int flags) { - if (!ptr) { - return SWIG_Py_Void(); - } else { - int own = (flags & SWIG_POINTER_OWN) ? SWIG_POINTER_OWN : 0; - PyObject *robj = SwigPyObject_New(ptr, type, own); - SwigPyClientData *clientdata = type ? (SwigPyClientData *)(type->clientdata) : 0; - if (clientdata && !(flags & SWIG_POINTER_NOSHADOW)) { - PyObject *inst = SWIG_Python_NewShadowInstance(clientdata, robj); - if (inst) { - Py_DECREF(robj); - robj = inst; - } - } - return robj; - } -} - -/* Create a new packed object */ - -SWIGRUNTIMEINLINE PyObject * -SWIG_Python_NewPackedObj(void *ptr, size_t sz, swig_type_info *type) { - return ptr ? SwigPyPacked_New((void *) ptr, sz, type) : SWIG_Py_Void(); -} - -/* -----------------------------------------------------------------------------* - * Get type list - * -----------------------------------------------------------------------------*/ - -#ifdef SWIG_LINK_RUNTIME -void *SWIG_ReturnGlobalTypeList(void *); -#endif - -SWIGRUNTIME swig_module_info * -SWIG_Python_GetModule(void) { - static void *type_pointer = (void *)0; - /* first check if module already created */ - if (!type_pointer) { -#ifdef SWIG_LINK_RUNTIME - type_pointer = SWIG_ReturnGlobalTypeList((void *)0); -#else -#ifdef USE_CAPSULES - type_pointer = PyCapsule_Import(TYPE_POINTER_NAME, 0); -#else - type_pointer = PyCObject_Import((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, - (char*)"type_pointer" SWIG_TYPE_TABLE_NAME); -#endif - if (PyErr_Occurred()) { - PyErr_Clear(); - type_pointer = (void *)0; - } -#endif - } - return (swig_module_info *) type_pointer; -} - -#if PY_MAJOR_VERSION < 2 -/* PyModule_AddObject function was introduced in Python 2.0. The following function - is copied out of Python/modsupport.c in python version 2.3.4 */ -SWIGINTERN int -PyModule_AddObject(PyObject *m, char *name, PyObject *o) -{ - PyObject *dict; - if (!PyModule_Check(m)) { - PyErr_SetString(PyExc_TypeError, - "PyModule_AddObject() needs module as first arg"); - return SWIG_ERROR; - } - if (!o) { - PyErr_SetString(PyExc_TypeError, - "PyModule_AddObject() needs non-NULL value"); - return SWIG_ERROR; - } - - dict = PyModule_GetDict(m); - if (dict == NULL) { - /* Internal error -- modules must have a dict! */ - PyErr_Format(PyExc_SystemError, "module '%s' has no __dict__", - PyModule_GetName(m)); - return SWIG_ERROR; - } - if (PyDict_SetItemString(dict, name, o)) - return SWIG_ERROR; - Py_DECREF(o); - return SWIG_OK; -} -#endif - -SWIGRUNTIME void -SWIG_Python_DestroyModule(void *vptr) -{ - size_t i; -#ifdef USE_CAPSULES - swig_module_info *swig_module = - (swig_module_info *) PyCapsule_GetPointer((PyObject *)vptr, TYPE_POINTER_NAME); -#else - swig_module_info *swig_module = (swig_module_info *) vptr; -#endif - swig_type_info **types = swig_module->types; - for (i =0; i < swig_module->size; ++i) { - swig_type_info *ty = types[i]; - if (ty->owndata) { - SwigPyClientData *data = (SwigPyClientData *) ty->clientdata; - if (data) SwigPyClientData_Del(data); - } - } - Py_DECREF(SWIG_This()); - swig_this = NULL; -} - -SWIGRUNTIME void -SWIG_Python_SetModule(swig_module_info *swig_module) { - static PyMethodDef swig_empty_runtime_method_table[] = { {NULL, NULL, 0, NULL} };/* Sentinel */ - -#if PY_VERSION_HEX >= 0x03000000 - /* Add a dummy module object into sys.modules */ - PyObject *module = PyImport_AddModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION); -#else - PyObject *module = Py_InitModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, - swig_empty_runtime_method_table); -#endif -#ifdef USE_CAPSULES - PyObject *pointer = PyCapsule_New((void *)swig_module, TYPE_POINTER_NAME, - (PyCapsule_Destructor)SWIG_Python_DestroyModule); -#else - PyObject *pointer = PyCObject_FromVoidPtr((void *) swig_module, SWIG_Python_DestroyModule); -#endif - if (pointer && module) { -#ifdef USE_CAPSULES - PyModule_AddObject(module, (char*)"type_pointer_capsule" SWIG_TYPE_TABLE_NAME, pointer); -#else - PyModule_AddObject(module, (char*)"type_pointer" SWIG_TYPE_TABLE_NAME, pointer); -#endif - } else { - Py_XDECREF(pointer); - } -} - -/* The python cached type query */ -SWIGRUNTIME PyObject * -SWIG_Python_TypeCache(void) { - static PyObject *SWIG_STATIC_POINTER(cache) = PyDict_New(); - return cache; -} - -SWIGRUNTIME swig_type_info * -SWIG_Python_TypeQuery(const char *type) -{ - PyObject *cache = SWIG_Python_TypeCache(); - PyObject *key = SWIG_Python_str_FromChar(type); - PyObject *obj = PyDict_GetItem(cache, key); - swig_type_info *descriptor; - if (obj) { -#ifdef USE_CAPSULES - descriptor = (swig_type_info *) PyCapsule_GetPointer(obj, type); -#else - descriptor = (swig_type_info *) PyCObject_AsVoidPtr(obj); -#endif - } else { - swig_module_info *swig_module = SWIG_Python_GetModule(); - descriptor = SWIG_TypeQueryModule(swig_module, swig_module, type); - if (descriptor) { -#ifdef USE_CAPSULES - obj = PyCapsule_New(descriptor, type, NULL); -#else - obj = PyCObject_FromVoidPtr(descriptor, NULL); -#endif - PyDict_SetItem(cache, key, obj); - Py_DECREF(obj); - } - } - Py_DECREF(key); - return descriptor; -} - -/* - For backward compatibility only -*/ -#define SWIG_POINTER_EXCEPTION 0 -#define SWIG_arg_fail(arg) SWIG_Python_ArgFail(arg) -#define SWIG_MustGetPtr(p, type, argnum, flags) SWIG_Python_MustGetPtr(p, type, argnum, flags) - -SWIGRUNTIME int -SWIG_Python_AddErrMesg(const char* mesg, int infront) -{ - if (PyErr_Occurred()) { - PyObject *type = 0; - PyObject *value = 0; - PyObject *traceback = 0; - PyErr_Fetch(&type, &value, &traceback); - if (value) { - char *tmp; - PyObject *old_str = PyObject_Str(value); - Py_XINCREF(type); - PyErr_Clear(); - if (infront) { - PyErr_Format(type, "%s %s", mesg, tmp = SWIG_Python_str_AsChar(old_str)); - } else { - PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); - } - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(old_str); - } - return 1; - } else { - return 0; - } -} - -SWIGRUNTIME int -SWIG_Python_ArgFail(int argnum) -{ - if (PyErr_Occurred()) { - /* add information about failing argument */ - char mesg[256]; - PyOS_snprintf(mesg, sizeof(mesg), "argument number %d:", argnum); - return SWIG_Python_AddErrMesg(mesg, 1); - } else { - return 0; - } -} - -SWIGRUNTIMEINLINE const char * -SwigPyObject_GetDesc(PyObject *self) -{ - SwigPyObject *v = (SwigPyObject *)self; - swig_type_info *ty = v ? v->ty : 0; - return ty ? ty->str : (char*)""; -} - -SWIGRUNTIME void -SWIG_Python_TypeError(const char *type, PyObject *obj) -{ - if (type) { -#if defined(SWIG_COBJECT_TYPES) - if (obj && SwigPyObject_Check(obj)) { - const char *otype = (const char *) SwigPyObject_GetDesc(obj); - if (otype) { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, 'SwigPyObject(%s)' is received", - type, otype); - return; - } - } else -#endif - { - const char *otype = (obj ? obj->ob_type->tp_name : 0); - if (otype) { - PyObject *str = PyObject_Str(obj); - const char *cstr = str ? SWIG_Python_str_AsChar(str) : 0; - if (cstr) { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s(%s)' is received", - type, otype, cstr); - SWIG_Python_str_DelForPy3(cstr); - } else { - PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s' is received", - type, otype); - } - Py_XDECREF(str); - return; - } - } - PyErr_Format(PyExc_TypeError, "a '%s' is expected", type); - } else { - PyErr_Format(PyExc_TypeError, "unexpected type is received"); - } -} - - -/* Convert a pointer value, signal an exception on a type mismatch */ -SWIGRUNTIME void * -SWIG_Python_MustGetPtr(PyObject *obj, swig_type_info *ty, int argnum, int flags) { - void *result; - if (SWIG_Python_ConvertPtr(obj, &result, ty, flags) == -1) { - PyErr_Clear(); -#if SWIG_POINTER_EXCEPTION - if (flags) { - SWIG_Python_TypeError(SWIG_TypePrettyName(ty), obj); - SWIG_Python_ArgFail(argnum); - } -#endif - } - return result; -} - - -#ifdef __cplusplus -#if 0 -{ /* cc-mode */ -#endif -} -#endif - - - -#define SWIG_exception_fail(code, msg) do { SWIG_Error(code, msg); SWIG_fail; } while(0) - -#define SWIG_contract_assert(expr, msg) if (!(expr)) { SWIG_Error(SWIG_RuntimeError, msg); SWIG_fail; } else - - - -/* -------- TYPES TABLE (BEGIN) -------- */ - -#define SWIGTYPE_p_char swig_types[0] -static swig_type_info *swig_types[2]; -static swig_module_info swig_module = {swig_types, 1, 0, 0, 0, 0}; -#define SWIG_TypeQuery(name) SWIG_TypeQueryModule(&swig_module, &swig_module, name) -#define SWIG_MangledTypeQuery(name) SWIG_MangledTypeQueryModule(&swig_module, &swig_module, name) - -/* -------- TYPES TABLE (END) -------- */ - -#if (PY_VERSION_HEX <= 0x02000000) -# if !defined(SWIG_PYTHON_CLASSIC) -# error "This python version requires swig to be run with the '-classic' option" -# endif -#endif - -/*----------------------------------------------- - @(target):= _dia.so - ------------------------------------------------*/ -#if PY_VERSION_HEX >= 0x03000000 -# define SWIG_init PyInit__dia - -#else -# define SWIG_init init_dia - -#endif -#define SWIG_name "_dia" - -#define SWIGVERSION 0x020001 -#define SWIG_VERSION SWIGVERSION - - -#define SWIG_as_voidptr(a) const_cast< void * >(static_cast< const void * >(a)) -#define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),reinterpret_cast< void** >(a)) - - -#include - - -namespace swig { - class SwigPtr_PyObject { - protected: - PyObject *_obj; - - public: - SwigPtr_PyObject() :_obj(0) - { - } - - SwigPtr_PyObject(const SwigPtr_PyObject& item) : _obj(item._obj) - { - Py_XINCREF(_obj); - } - - SwigPtr_PyObject(PyObject *obj, bool initial_ref = true) :_obj(obj) - { - if (initial_ref) { - Py_XINCREF(_obj); - } - } - - SwigPtr_PyObject & operator=(const SwigPtr_PyObject& item) - { - Py_XINCREF(item._obj); - Py_XDECREF(_obj); - _obj = item._obj; - return *this; - } - - ~SwigPtr_PyObject() - { - Py_XDECREF(_obj); - } - - operator PyObject *() const - { - return _obj; - } - - PyObject *operator->() const - { - return _obj; - } - }; -} - - -namespace swig { - struct SwigVar_PyObject : SwigPtr_PyObject { - SwigVar_PyObject(PyObject* obj = 0) : SwigPtr_PyObject(obj, false) { } - - SwigVar_PyObject & operator = (PyObject* obj) - { - Py_XDECREF(_obj); - _obj = obj; - return *this; - } - }; -} - - -#include "py3k.h" -#define SWIG_FILE_WITH_INIT -#include "Python.h" -#include "numpy/arrayobject.h" -#include "complex_ops.h" -/*#include "sparsetools.h"*/ - - -#ifndef SWIG_FILE_WITH_INIT -# define NO_IMPORT_ARRAY -#endif -#include "stdio.h" -#include -#include "complex_ops.h" - - -/* The following code originally appeared in - * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was - * translated from C++ to C by John Hunter. Bill Spotz has modified - * it slightly to fix some minor bugs, upgrade to numpy (all - * versions), add some comments and some functionality. - */ - -/* Macros to extract array attributes. - */ -#define is_array(a) ((a) && PyArray_Check((PyArrayObject *)a)) -#define array_type(a) (int)(PyArray_TYPE(a)) -#define array_numdims(a) (((PyArrayObject *)a)->nd) -#define array_dimensions(a) (((PyArrayObject *)a)->dimensions) -#define array_size(a,i) (((PyArrayObject *)a)->dimensions[i]) -#define array_data(a) (((PyArrayObject *)a)->data) -#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS(a)) -#define array_is_native(a) (PyArray_ISNOTSWAPPED(a)) - -/* Support older NumPy data type names -*/ -#if NDARRAY_VERSION < 0x01000000 -#define NPY_BOOL PyArray_BOOL -#define NPY_BYTE PyArray_BYTE -#define NPY_UBYTE PyArray_UBYTE -#define NPY_SHORT PyArray_SHORT -#define NPY_USHORT PyArray_USHORT -#define NPY_INT PyArray_INT -#define NPY_UINT PyArray_UINT -#define NPY_LONG PyArray_LONG -#define NPY_ULONG PyArray_ULONG -#define NPY_LONGLONG PyArray_LONGLONG -#define NPY_ULONGLONG PyArray_ULONGLONG -#define NPY_FLOAT PyArray_FLOAT -#define NPY_DOUBLE PyArray_DOUBLE -#define NPY_LONGDOUBLE PyArray_LONGDOUBLE -#define NPY_CFLOAT PyArray_CFLOAT -#define NPY_CDOUBLE PyArray_CDOUBLE -#define NPY_CLONGDOUBLE PyArray_CLONGDOUBLE -#define NPY_OBJECT PyArray_OBJECT -#define NPY_STRING PyArray_STRING -#define NPY_UNICODE PyArray_UNICODE -#define NPY_VOID PyArray_VOID -#define NPY_NTYPES PyArray_NTYPES -#define NPY_NOTYPE PyArray_NOTYPE -#define NPY_CHAR PyArray_CHAR -#define NPY_USERDEF PyArray_USERDEF -#define npy_intp intp -#endif - -/* Given a PyObject, return a string describing its type. - */ -const char* pytype_string(PyObject* py_obj) { - if (py_obj == NULL ) return "C NULL value"; - if (py_obj == Py_None ) return "Python None" ; - if (PyCallable_Check(py_obj)) return "callable" ; - if (PyString_Check( py_obj)) return "string" ; - if (PyInt_Check( py_obj)) return "int" ; - if (PyFloat_Check( py_obj)) return "float" ; - if (PyDict_Check( py_obj)) return "dict" ; - if (PyList_Check( py_obj)) return "list" ; - if (PyTuple_Check( py_obj)) return "tuple" ; - if (PyFile_Check( py_obj)) return "file" ; - if (PyModule_Check( py_obj)) return "module" ; - if (PyInstance_Check(py_obj)) return "instance" ; - - return "unkown type"; -} - -/* Given a NumPy typecode, return a string describing the type. - */ -const char* typecode_string(int typecode) { - static const char* type_names[25] = {"bool", "byte", "unsigned byte", - "short", "unsigned short", "int", - "unsigned int", "long", "unsigned long", - "long long", "unsigned long long", - "float", "double", "long double", - "complex float", "complex double", - "complex long double", "object", - "string", "unicode", "void", "ntypes", - "notype", "char", "unknown"}; - return typecode < 24 ? type_names[typecode] : type_names[24]; -} - -/* Make sure input has correct numpy type. Allow character and byte - * to match. Also allow int and long to match. This is deprecated. - * You should use PyArray_EquivTypenums() instead. - */ -int type_match(int actual_type, int desired_type) { - return PyArray_EquivTypenums(actual_type, desired_type); -} - -/* Given a PyObject pointer, cast it to a PyArrayObject pointer if - * legal. If not, set the python error string appropriately and - * return NULL. - */ -PyArrayObject* obj_to_array_no_conversion(PyObject* input, int typecode) { - PyArrayObject* ary = NULL; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input), typecode))) { - ary = (PyArrayObject*) input; - } - else if is_array(input) { - const char* desired_type = typecode_string(typecode); - const char* actual_type = typecode_string(array_type(input)); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", - desired_type, actual_type); - ary = NULL; - } - else { - const char * desired_type = typecode_string(typecode); - const char * actual_type = pytype_string(input); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", - desired_type, actual_type); - ary = NULL; - } - return ary; -} - -/* Convert the given PyObject to a NumPy array with the given - * typecode. On success, return a valid PyArrayObject* with the - * correct type. On failure, the python error string will be set and - * the routine returns NULL. - */ -PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, - int* is_new_object) { - PyArrayObject* ary = NULL; - PyObject* py_obj; - if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input),typecode))) { - ary = (PyArrayObject*) input; - *is_new_object = 0; - } - else { - py_obj = PyArray_FromObject(input, typecode, 0, 0); - /* If NULL, PyArray_FromObject will have set python error value.*/ - ary = (PyArrayObject*) py_obj; - *is_new_object = 1; - } - return ary; -} - -/* Given a PyArrayObject, check to see if it is contiguous. If so, - * return the input pointer and flag it as not a new object. If it is - * not contiguous, create a new PyArrayObject using the original data, - * flag it as a new object and return the pointer. - */ -PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) { - PyArrayObject* result; - if (array_is_contiguous(ary)) { - result = ary; - *is_new_object = 0; - } - else { - result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), - min_dims, - max_dims); - *is_new_object = 1; - } - return result; -} - -/* Convert a given PyObject to a contiguous PyArrayObject of the - * specified type. If the input object is not a contiguous - * PyArrayObject, a new one will be created and the new object flag - * will be set. - */ -PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) { - int is_new1 = 0; - int is_new2 = 0; - PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, &is_new1); - if (ary1) { - ary2 = make_contiguous(ary1, &is_new2, 0, 0); - if ( is_new1 && is_new2) { - Py_DECREF(ary1); - } - ary1 = ary2; - } - *is_new_object = is_new1 || is_new2; - return ary1; -} - -/* Test whether a python object is contiguous. If array is - * contiguous, return 1. Otherwise, set the python error string and - * return 0. - */ -int require_contiguous(PyArrayObject* ary) { - int contiguous = 1; - if (!array_is_contiguous(ary)) { - PyErr_SetString(PyExc_TypeError, - "Array must be contiguous. A non-contiguous array was given"); - contiguous = 0; - } - return contiguous; -} - -/* Require that a numpy array is not byte-swapped. If the array is - * not byte-swapped, return 1. Otherwise, set the python error string - * and return 0. - */ -int require_native(PyArrayObject* ary) { - int native = 1; - if (!array_is_native(ary)) { - PyErr_SetString(PyExc_TypeError, - "Array must have native byteorder. A byte-swapped array was given"); - native = 0; - } - return native; -} - -/* Require the given PyArrayObject to have a specified number of - * dimensions. If the array has the specified number of dimensions, - * return 1. Otherwise, set the python error string and return 0. - */ -int require_dimensions(PyArrayObject* ary, int exact_dimensions) { - int success = 1; - if (array_numdims(ary) != exact_dimensions) { - PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", - exact_dimensions, array_numdims(ary)); - success = 0; - } - return success; -} - -/* Require the given PyArrayObject to have one of a list of specified - * number of dimensions. If the array has one of the specified number - * of dimensions, return 1. Otherwise, set the python error string - * and return 0. - */ -int require_dimensions_n(PyArrayObject* ary, int* exact_dimensions, int n) { - int success = 0; - int i; - char dims_str[255] = ""; - char s[255]; - for (i = 0; i < n && !success; i++) { - if (array_numdims(ary) == exact_dimensions[i]) { - success = 1; - } - } - if (!success) { - for (i = 0; i < n-1; i++) { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); - } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); - PyErr_Format(PyExc_TypeError, - "Array must be have %s dimensions. Given array has %d dimensions", - dims_str, array_numdims(ary)); - } - return success; -} - -/* Require the given PyArrayObject to have a specified shape. If the - * array has the specified shape, return 1. Otherwise, set the python - * error string and return 0. - */ -int require_size(PyArrayObject* ary, npy_intp* size, int n) { - int i; - int success = 1; - int len; - char desired_dims[255] = "["; - char s[255]; - char actual_dims[255] = "["; - for(i=0; i < n;i++) { - if (size[i] != -1 && size[i] != array_size(ary,i)) { - success = 0; - } - } - if (!success) { - for (i = 0; i < n; i++) { - if (size[i] == -1) { - sprintf(s, "*,"); - } - else - { - sprintf(s,"%" NPY_INTP_FMT ",", size[i]); - } - strcat(desired_dims,s); - } - len = strlen(desired_dims); - desired_dims[len-1] = ']'; - for (i = 0; i < n; i++) { - sprintf(s,"%" NPY_INTP_FMT ",", array_size(ary,i)); - strcat(actual_dims,s); - } - len = strlen(actual_dims); - actual_dims[len-1] = ']'; - PyErr_Format(PyExc_TypeError, - "Array must be have shape of %s. Given array has shape of %s", - desired_dims, actual_dims); - } - return success; -} -/* End John Hunter translation (with modifications by Bill Spotz) */ - - - - - -/*! - Appends @a what to @a where. On input, @a where need not to be a tuple, but on - return it always is. - - @par Revision history: - - 17.02.2005, c -*/ -PyObject *helper_appendToTuple( PyObject *where, PyObject *what ) { - PyObject *o2, *o3; - - if ((!where) || (where == Py_None)) { - where = what; - } else { - if (!PyTuple_Check( where )) { - o2 = where; - where = PyTuple_New( 1 ); - PyTuple_SetItem( where, 0, o2 ); - } - o3 = PyTuple_New( 1 ); - PyTuple_SetItem( o3, 0, what ); - o2 = where; - where = PySequence_Concat( o2, o3 ); - Py_DECREF( o2 ); - Py_DECREF( o3 ); - } - return where; -} - - - - - - -#include "dia.h" - - -#include -#if !defined(SWIG_NO_LLONG_MAX) -# if !defined(LLONG_MAX) && defined(__GNUC__) && defined (__LONG_LONG_MAX__) -# define LLONG_MAX __LONG_LONG_MAX__ -# define LLONG_MIN (-LLONG_MAX - 1LL) -# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) -# endif -#endif - - -SWIGINTERN int -SWIG_AsVal_double (PyObject *obj, double *val) -{ - int res = SWIG_TypeError; - if (PyFloat_Check(obj)) { - if (val) *val = PyFloat_AsDouble(obj); - return SWIG_OK; - } else if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - double v = PyLong_AsDouble(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - double d = PyFloat_AsDouble(obj); - if (!PyErr_Occurred()) { - if (val) *val = d; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_AddCast(SWIG_OK)); - } else { - PyErr_Clear(); - } - } - } -#endif - return res; -} - - -#include - - -#include - - -SWIGINTERNINLINE int -SWIG_CanCastAsInteger(double *d, double min, double max) { - double x = *d; - if ((min <= x && x <= max)) { - double fx = floor(x); - double cx = ceil(x); - double rd = ((x - fx) < 0.5) ? fx : cx; /* simple rint */ - if ((errno == EDOM) || (errno == ERANGE)) { - errno = 0; - } else { - double summ, reps, diff; - if (rd < x) { - diff = x - rd; - } else if (rd > x) { - diff = rd - x; - } else { - return 1; - } - summ = rd + x; - reps = diff/summ; - if (reps < 8*DBL_EPSILON) { - *d = rd; - return 1; - } - } - } - return 0; -} - - -SWIGINTERN int -SWIG_AsVal_long (PyObject *obj, long* val) -{ - if (PyInt_Check(obj)) { - if (val) *val = PyInt_AsLong(obj); - return SWIG_OK; - } else if (PyLong_Check(obj)) { - long v = PyLong_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_OK; - } else { - PyErr_Clear(); - } - } -#ifdef SWIG_PYTHON_CAST_MODE - { - int dispatch = 0; - long v = PyInt_AsLong(obj); - if (!PyErr_Occurred()) { - if (val) *val = v; - return SWIG_AddCast(SWIG_OK); - } else { - PyErr_Clear(); - } - if (!dispatch) { - double d; - int res = SWIG_AddCast(SWIG_AsVal_double (obj,&d)); - if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { - if (val) *val = (long)(d); - return res; - } - } - } -#endif - return SWIG_TypeError; -} - - -SWIGINTERN int -SWIG_AsVal_int (PyObject * obj, int *val) -{ - long v; - int res = SWIG_AsVal_long (obj, &v); - if (SWIG_IsOK(res)) { - if ((v < INT_MIN || v > INT_MAX)) { - return SWIG_OverflowError; - } else { - if (val) *val = static_cast< int >(v); - } - } - return res; -} - -#ifdef __cplusplus -extern "C" { -#endif -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - signed char *arg6 ; - signed char *arg7 ; - signed char *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_BYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (signed char*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_BYTE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (signed char*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_BYTE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (signed char*) array_data(temp8); - } - dia_matvec< int,signed char >(arg1,arg2,arg3,arg4,(int const (*))arg5,(signed char const (*))arg6,(signed char const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - unsigned char *arg6 ; - unsigned char *arg7 ; - unsigned char *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UBYTE, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (unsigned char*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_UBYTE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (unsigned char*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_UBYTE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned char*) array_data(temp8); - } - dia_matvec< int,unsigned char >(arg1,arg2,arg3,arg4,(int const (*))arg5,(unsigned char const (*))arg6,(unsigned char const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - short *arg6 ; - short *arg7 ; - short *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_SHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (short*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_SHORT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (short*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_SHORT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (short*) array_data(temp8); - } - dia_matvec< int,short >(arg1,arg2,arg3,arg4,(int const (*))arg5,(short const (*))arg6,(short const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - unsigned short *arg6 ; - unsigned short *arg7 ; - unsigned short *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_USHORT, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (unsigned short*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_USHORT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (unsigned short*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_USHORT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned short*) array_data(temp8); - } - dia_matvec< int,unsigned short >(arg1,arg2,arg3,arg4,(int const (*))arg5,(unsigned short const (*))arg6,(unsigned short const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - int *arg6 ; - int *arg7 ; - int *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_INT, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_INT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (int*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_INT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (int*) array_data(temp8); - } - dia_matvec< int,int >(arg1,arg2,arg3,arg4,(int const (*))arg5,(int const (*))arg6,(int const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - unsigned int *arg6 ; - unsigned int *arg7 ; - unsigned int *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_UINT, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (unsigned int*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_UINT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (unsigned int*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_UINT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned int*) array_data(temp8); - } - dia_matvec< int,unsigned int >(arg1,arg2,arg3,arg4,(int const (*))arg5,(unsigned int const (*))arg6,(unsigned int const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_7(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - long long *arg6 ; - long long *arg7 ; - long long *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (long long*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_LONGLONG, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (long long*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_LONGLONG); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (long long*) array_data(temp8); - } - dia_matvec< int,long long >(arg1,arg2,arg3,arg4,(int const (*))arg5,(long long const (*))arg6,(long long const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_8(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - unsigned long long *arg6 ; - unsigned long long *arg7 ; - unsigned long long *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_ULONGLONG, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (unsigned long long*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_ULONGLONG, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (unsigned long long*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_ULONGLONG); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (unsigned long long*) array_data(temp8); - } - dia_matvec< int,unsigned long long >(arg1,arg2,arg3,arg4,(int const (*))arg5,(unsigned long long const (*))arg6,(unsigned long long const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_9(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - float *arg6 ; - float *arg7 ; - float *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_FLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (float*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_FLOAT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (float*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_FLOAT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (float*) array_data(temp8); - } - dia_matvec< int,float >(arg1,arg2,arg3,arg4,(int const (*))arg5,(float const (*))arg6,(float const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_10(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - double *arg6 ; - double *arg7 ; - double *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_DOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (double*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_DOUBLE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (double*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_DOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (double*) array_data(temp8); - } - dia_matvec< int,double >(arg1,arg2,arg3,arg4,(int const (*))arg5,(double const (*))arg6,(double const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_11(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - long double *arg6 ; - long double *arg7 ; - long double *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_LONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (long double*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_LONGDOUBLE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (long double*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_LONGDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (long double*) array_data(temp8); - } - dia_matvec< int,long double >(arg1,arg2,arg3,arg4,(int const (*))arg5,(long double const (*))arg6,(long double const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_12(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - npy_cfloat_wrapper *arg6 ; - npy_cfloat_wrapper *arg7 ; - npy_cfloat_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CFLOAT, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (npy_cfloat_wrapper*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_CFLOAT, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (npy_cfloat_wrapper*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CFLOAT); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_cfloat_wrapper*) array_data(temp8); - } - dia_matvec< int,npy_cfloat_wrapper >(arg1,arg2,arg3,arg4,(int const (*))arg5,(npy_cfloat_wrapper const (*))arg6,(npy_cfloat_wrapper const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_13(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - npy_cdouble_wrapper *arg6 ; - npy_cdouble_wrapper *arg7 ; - npy_cdouble_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (npy_cdouble_wrapper*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_CDOUBLE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (npy_cdouble_wrapper*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_cdouble_wrapper*) array_data(temp8); - } - dia_matvec< int,npy_cdouble_wrapper >(arg1,arg2,arg3,arg4,(int const (*))arg5,(npy_cdouble_wrapper const (*))arg6,(npy_cdouble_wrapper const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec__SWIG_14(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { - PyObject *resultobj = 0; - int arg1 ; - int arg2 ; - int arg3 ; - int arg4 ; - int *arg5 ; - npy_clongdouble_wrapper *arg6 ; - npy_clongdouble_wrapper *arg7 ; - npy_clongdouble_wrapper *arg8 ; - int val1 ; - int ecode1 = 0 ; - int val2 ; - int ecode2 = 0 ; - int val3 ; - int ecode3 = 0 ; - int val4 ; - int ecode4 = 0 ; - PyArrayObject *array5 = NULL ; - int is_new_object5 ; - PyArrayObject *array6 = NULL ; - int is_new_object6 ; - PyArrayObject *array7 = NULL ; - int is_new_object7 ; - PyArrayObject *temp8 = NULL ; - PyObject * obj0 = 0 ; - PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; - PyObject * obj6 = 0 ; - PyObject * obj7 = 0 ; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOOOO:dia_matvec",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7)) SWIG_fail; - ecode1 = SWIG_AsVal_int(obj0, &val1); - if (!SWIG_IsOK(ecode1)) { - SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "dia_matvec" "', argument " "1"" of type '" "int""'"); - } - arg1 = static_cast< int >(val1); - ecode2 = SWIG_AsVal_int(obj1, &val2); - if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "dia_matvec" "', argument " "2"" of type '" "int""'"); - } - arg2 = static_cast< int >(val2); - ecode3 = SWIG_AsVal_int(obj2, &val3); - if (!SWIG_IsOK(ecode3)) { - SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "dia_matvec" "', argument " "3"" of type '" "int""'"); - } - arg3 = static_cast< int >(val3); - ecode4 = SWIG_AsVal_int(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "dia_matvec" "', argument " "4"" of type '" "int""'"); - } - arg4 = static_cast< int >(val4); - { - npy_intp size[1] = { - -1 - }; - array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); - if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1) - || !require_contiguous(array5) || !require_native(array5)) SWIG_fail; - - arg5 = (int*) array5->data; - } - { - npy_intp size[2] = { - -1,-1 - }; - array6 = obj_to_array_contiguous_allow_conversion(obj5, PyArray_CLONGDOUBLE, &is_new_object6); - if (!array6 || !require_dimensions(array6,2) || !require_size(array6,size,1) - || !require_contiguous(array6) || !require_native(array6)) SWIG_fail; - arg6 = (npy_clongdouble_wrapper*) array6->data; - } - { - npy_intp size[1] = { - -1 - }; - array7 = obj_to_array_contiguous_allow_conversion(obj6, PyArray_CLONGDOUBLE, &is_new_object7); - if (!array7 || !require_dimensions(array7,1) || !require_size(array7,size,1) - || !require_contiguous(array7) || !require_native(array7)) SWIG_fail; - - arg7 = (npy_clongdouble_wrapper*) array7->data; - } - { - temp8 = obj_to_array_no_conversion(obj7,PyArray_CLONGDOUBLE); - if (!temp8 || !require_contiguous(temp8) || !require_native(temp8)) SWIG_fail; - arg8 = (npy_clongdouble_wrapper*) array_data(temp8); - } - dia_matvec< int,npy_clongdouble_wrapper >(arg1,arg2,arg3,arg4,(int const (*))arg5,(npy_clongdouble_wrapper const (*))arg6,(npy_clongdouble_wrapper const (*))arg7,arg8); - resultobj = SWIG_Py_Void(); - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return resultobj; -fail: - { - if (is_new_object5 && array5) { - Py_DECREF(array5); - } - } - { - if (is_new_object6 && array6) { - Py_DECREF(array6); - } - } - { - if (is_new_object7 && array7) { - Py_DECREF(array7); - } - } - return NULL; -} - - -SWIGINTERN PyObject *_wrap_dia_matvec(PyObject *self, PyObject *args) { - int argc; - PyObject *argv[9]; - int ii; - - if (!PyTuple_Check(args)) SWIG_fail; - argc = (int)PyObject_Length(args); - for (ii = 0; (ii < argc) && (ii < 8); ii++) { - argv[ii] = PyTuple_GET_ITEM(args,ii); - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_BYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_1(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UBYTE)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_2(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_SHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_3(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_USHORT)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_4(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_5(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_UINT)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_6(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_7(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_ULONGLONG)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_8(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_FLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_9(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_DOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_10(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_LONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_11(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CFLOAT)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_12(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_13(self, args); - } - } - } - } - } - } - } - } - } - if (argc == 8) { - int _v; - { - int res = SWIG_AsVal_int(argv[0], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[1], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[2], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - int res = SWIG_AsVal_int(argv[3], NULL); - _v = SWIG_CheckState(res); - } - if (_v) { - { - _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[5]) && PyArray_CanCastSafely(PyArray_TYPE(argv[5]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[6]) && PyArray_CanCastSafely(PyArray_TYPE(argv[6]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - { - _v = (is_array(argv[7]) && PyArray_CanCastSafely(PyArray_TYPE(argv[7]),PyArray_CLONGDOUBLE)) ? 1 : 0; - } - if (_v) { - return _wrap_dia_matvec__SWIG_14(self, args); - } - } - } - } - } - } - } - } - } - -fail: - SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number or type of arguments for overloaded function 'dia_matvec'.\n" - " Possible C/C++ prototypes are:\n" - " dia_matvec< int,signed char >(int const,int const,int const,int const,int const [],signed char const [],signed char const [],signed char [])\n" - " dia_matvec< int,unsigned char >(int const,int const,int const,int const,int const [],unsigned char const [],unsigned char const [],unsigned char [])\n" - " dia_matvec< int,short >(int const,int const,int const,int const,int const [],short const [],short const [],short [])\n" - " dia_matvec< int,unsigned short >(int const,int const,int const,int const,int const [],unsigned short const [],unsigned short const [],unsigned short [])\n" - " dia_matvec< int,int >(int const,int const,int const,int const,int const [],int const [],int const [],int [])\n" - " dia_matvec< int,unsigned int >(int const,int const,int const,int const,int const [],unsigned int const [],unsigned int const [],unsigned int [])\n" - " dia_matvec< int,long long >(int const,int const,int const,int const,int const [],long long const [],long long const [],long long [])\n" - " dia_matvec< int,unsigned long long >(int const,int const,int const,int const,int const [],unsigned long long const [],unsigned long long const [],unsigned long long [])\n" - " dia_matvec< int,float >(int const,int const,int const,int const,int const [],float const [],float const [],float [])\n" - " dia_matvec< int,double >(int const,int const,int const,int const,int const [],double const [],double const [],double [])\n" - " dia_matvec< int,long double >(int const,int const,int const,int const,int const [],long double const [],long double const [],long double [])\n" - " dia_matvec< int,npy_cfloat_wrapper >(int const,int const,int const,int const,int const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper const [],npy_cfloat_wrapper [])\n" - " dia_matvec< int,npy_cdouble_wrapper >(int const,int const,int const,int const,int const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper const [],npy_cdouble_wrapper [])\n" - " dia_matvec< int,npy_clongdouble_wrapper >(int const,int const,int const,int const,int const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper const [],npy_clongdouble_wrapper [])\n"); - return NULL; -} - - -static PyMethodDef SwigMethods[] = { - { (char *)"SWIG_PyInstanceMethod_New", (PyCFunction)SWIG_PyInstanceMethod_New, METH_O, NULL}, - { (char *)"dia_matvec", _wrap_dia_matvec, METH_VARARGS, (char *)"\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " signed char diags, signed char Xx, signed char Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " unsigned char diags, unsigned char Xx, unsigned char Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " short diags, short Xx, short Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " unsigned short diags, unsigned short Xx, \n" - " unsigned short Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " int diags, int Xx, int Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " unsigned int diags, unsigned int Xx, unsigned int Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " long long diags, long long Xx, long long Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " unsigned long long diags, unsigned long long Xx, \n" - " unsigned long long Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " float diags, float Xx, float Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " double diags, double Xx, double Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " long double diags, long double Xx, long double Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " npy_cfloat_wrapper diags, npy_cfloat_wrapper Xx, \n" - " npy_cfloat_wrapper Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " npy_cdouble_wrapper diags, npy_cdouble_wrapper Xx, \n" - " npy_cdouble_wrapper Yx)\n" - "dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, \n" - " npy_clongdouble_wrapper diags, npy_clongdouble_wrapper Xx, \n" - " npy_clongdouble_wrapper Yx)\n" - ""}, - { NULL, NULL, 0, NULL } -}; - - -/* -------- TYPE CONVERSION AND EQUIVALENCE RULES (BEGIN) -------- */ - -static swig_type_info _swigt__p_char = {"_p_char", "char *", 0, 0, (void*)0, 0}; - -static swig_type_info *swig_type_initial[] = { - &_swigt__p_char, -}; - -static swig_cast_info _swigc__p_char[] = { {&_swigt__p_char, 0, 0, 0},{0, 0, 0, 0}}; - -static swig_cast_info *swig_cast_initial[] = { - _swigc__p_char, -}; - - -/* -------- TYPE CONVERSION AND EQUIVALENCE RULES (END) -------- */ - -static swig_const_info swig_const_table[] = { -{0, 0, 0, 0.0, 0, 0}}; - -#ifdef __cplusplus -} -#endif -/* ----------------------------------------------------------------------------- - * Type initialization: - * This problem is tough by the requirement that no dynamic - * memory is used. Also, since swig_type_info structures store pointers to - * swig_cast_info structures and swig_cast_info structures store pointers back - * to swig_type_info structures, we need some lookup code at initialization. - * The idea is that swig generates all the structures that are needed. - * The runtime then collects these partially filled structures. - * The SWIG_InitializeModule function takes these initial arrays out of - * swig_module, and does all the lookup, filling in the swig_module.types - * array with the correct data and linking the correct swig_cast_info - * structures together. - * - * The generated swig_type_info structures are assigned staticly to an initial - * array. We just loop through that array, and handle each type individually. - * First we lookup if this type has been already loaded, and if so, use the - * loaded structure instead of the generated one. Then we have to fill in the - * cast linked list. The cast data is initially stored in something like a - * two-dimensional array. Each row corresponds to a type (there are the same - * number of rows as there are in the swig_type_initial array). Each entry in - * a column is one of the swig_cast_info structures for that type. - * The cast_initial array is actually an array of arrays, because each row has - * a variable number of columns. So to actually build the cast linked list, - * we find the array of casts associated with the type, and loop through it - * adding the casts to the list. The one last trick we need to do is making - * sure the type pointer in the swig_cast_info struct is correct. - * - * First off, we lookup the cast->type name to see if it is already loaded. - * There are three cases to handle: - * 1) If the cast->type has already been loaded AND the type we are adding - * casting info to has not been loaded (it is in this module), THEN we - * replace the cast->type pointer with the type pointer that has already - * been loaded. - * 2) If BOTH types (the one we are adding casting info to, and the - * cast->type) are loaded, THEN the cast info has already been loaded by - * the previous module so we just ignore it. - * 3) Finally, if cast->type has not already been loaded, then we add that - * swig_cast_info to the linked list (because the cast->type) pointer will - * be correct. - * ----------------------------------------------------------------------------- */ - -#ifdef __cplusplus -extern "C" { -#if 0 -} /* c-mode */ -#endif -#endif - -#if 0 -#define SWIGRUNTIME_DEBUG -#endif - - -SWIGRUNTIME void -SWIG_InitializeModule(void *clientdata) { - size_t i; - swig_module_info *module_head, *iter; - int found, init; - - clientdata = clientdata; - - /* check to see if the circular list has been setup, if not, set it up */ - if (swig_module.next==0) { - /* Initialize the swig_module */ - swig_module.type_initial = swig_type_initial; - swig_module.cast_initial = swig_cast_initial; - swig_module.next = &swig_module; - init = 1; - } else { - init = 0; - } - - /* Try and load any already created modules */ - module_head = SWIG_GetModule(clientdata); - if (!module_head) { - /* This is the first module loaded for this interpreter */ - /* so set the swig module into the interpreter */ - SWIG_SetModule(clientdata, &swig_module); - module_head = &swig_module; - } else { - /* the interpreter has loaded a SWIG module, but has it loaded this one? */ - found=0; - iter=module_head; - do { - if (iter==&swig_module) { - found=1; - break; - } - iter=iter->next; - } while (iter!= module_head); - - /* if the is found in the list, then all is done and we may leave */ - if (found) return; - /* otherwise we must add out module into the list */ - swig_module.next = module_head->next; - module_head->next = &swig_module; - } - - /* When multiple interpeters are used, a module could have already been initialized in - a different interpreter, but not yet have a pointer in this interpreter. - In this case, we do not want to continue adding types... everything should be - set up already */ - if (init == 0) return; - - /* Now work on filling in swig_module.types */ -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: size %d\n", swig_module.size); -#endif - for (i = 0; i < swig_module.size; ++i) { - swig_type_info *type = 0; - swig_type_info *ret; - swig_cast_info *cast; - -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); -#endif - - /* if there is another module already loaded */ - if (swig_module.next != &swig_module) { - type = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, swig_module.type_initial[i]->name); - } - if (type) { - /* Overwrite clientdata field */ -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: found type %s\n", type->name); -#endif - if (swig_module.type_initial[i]->clientdata) { - type->clientdata = swig_module.type_initial[i]->clientdata; -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: found and overwrite type %s \n", type->name); -#endif - } - } else { - type = swig_module.type_initial[i]; - } - - /* Insert casting types */ - cast = swig_module.cast_initial[i]; - while (cast->type) { - /* Don't need to add information already in the list */ - ret = 0; -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: look cast %s\n", cast->type->name); -#endif - if (swig_module.next != &swig_module) { - ret = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, cast->type->name); -#ifdef SWIGRUNTIME_DEBUG - if (ret) printf("SWIG_InitializeModule: found cast %s\n", ret->name); -#endif - } - if (ret) { - if (type == swig_module.type_initial[i]) { -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: skip old type %s\n", ret->name); -#endif - cast->type = ret; - ret = 0; - } else { - /* Check for casting already in the list */ - swig_cast_info *ocast = SWIG_TypeCheck(ret->name, type); -#ifdef SWIGRUNTIME_DEBUG - if (ocast) printf("SWIG_InitializeModule: skip old cast %s\n", ret->name); -#endif - if (!ocast) ret = 0; - } - } - - if (!ret) { -#ifdef SWIGRUNTIME_DEBUG - printf("SWIG_InitializeModule: adding cast %s\n", cast->type->name); -#endif - if (type->cast) { - type->cast->prev = cast; - cast->next = type->cast; - } - type->cast = cast; - } - cast++; - } - /* Set entry in modules->types array equal to the type */ - swig_module.types[i] = type; - } - swig_module.types[i] = 0; - -#ifdef SWIGRUNTIME_DEBUG - printf("**** SWIG_InitializeModule: Cast List ******\n"); - for (i = 0; i < swig_module.size; ++i) { - int j = 0; - swig_cast_info *cast = swig_module.cast_initial[i]; - printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); - while (cast->type) { - printf("SWIG_InitializeModule: cast type %s\n", cast->type->name); - cast++; - ++j; - } - printf("---- Total casts: %d\n",j); - } - printf("**** SWIG_InitializeModule: Cast List ******\n"); -#endif -} - -/* This function will propagate the clientdata field of type to -* any new swig_type_info structures that have been added into the list -* of equivalent types. It is like calling -* SWIG_TypeClientData(type, clientdata) a second time. -*/ -SWIGRUNTIME void -SWIG_PropagateClientData(void) { - size_t i; - swig_cast_info *equiv; - static int init_run = 0; - - if (init_run) return; - init_run = 1; - - for (i = 0; i < swig_module.size; i++) { - if (swig_module.types[i]->clientdata) { - equiv = swig_module.types[i]->cast; - while (equiv) { - if (!equiv->converter) { - if (equiv->type && !equiv->type->clientdata) - SWIG_TypeClientData(equiv->type, swig_module.types[i]->clientdata); - } - equiv = equiv->next; - } - } - } -} - -#ifdef __cplusplus -#if 0 -{ - /* c-mode */ -#endif -} -#endif - - - -#ifdef __cplusplus -extern "C" { -#endif - - /* Python-specific SWIG API */ -#define SWIG_newvarlink() SWIG_Python_newvarlink() -#define SWIG_addvarlink(p, name, get_attr, set_attr) SWIG_Python_addvarlink(p, name, get_attr, set_attr) -#define SWIG_InstallConstants(d, constants) SWIG_Python_InstallConstants(d, constants) - - /* ----------------------------------------------------------------------------- - * global variable support code. - * ----------------------------------------------------------------------------- */ - - typedef struct swig_globalvar { - char *name; /* Name of global variable */ - PyObject *(*get_attr)(void); /* Return the current value */ - int (*set_attr)(PyObject *); /* Set the value */ - struct swig_globalvar *next; - } swig_globalvar; - - typedef struct swig_varlinkobject { - PyObject_HEAD - swig_globalvar *vars; - } swig_varlinkobject; - - SWIGINTERN PyObject * - swig_varlink_repr(swig_varlinkobject *SWIGUNUSEDPARM(v)) { -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_InternFromString(""); -#else - return PyString_FromString(""); -#endif - } - - SWIGINTERN PyObject * - swig_varlink_str(swig_varlinkobject *v) { -#if PY_VERSION_HEX >= 0x03000000 - PyObject *str = PyUnicode_InternFromString("("); - PyObject *tail; - PyObject *joined; - swig_globalvar *var; - for (var = v->vars; var; var=var->next) { - tail = PyUnicode_FromString(var->name); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; - if (var->next) { - tail = PyUnicode_InternFromString(", "); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; - } - } - tail = PyUnicode_InternFromString(")"); - joined = PyUnicode_Concat(str, tail); - Py_DecRef(str); - Py_DecRef(tail); - str = joined; -#else - PyObject *str = PyString_FromString("("); - swig_globalvar *var; - for (var = v->vars; var; var=var->next) { - PyString_ConcatAndDel(&str,PyString_FromString(var->name)); - if (var->next) PyString_ConcatAndDel(&str,PyString_FromString(", ")); - } - PyString_ConcatAndDel(&str,PyString_FromString(")")); -#endif - return str; - } - - SWIGINTERN int - swig_varlink_print(swig_varlinkobject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) { - char *tmp; - PyObject *str = swig_varlink_str(v); - fprintf(fp,"Swig global variables "); - fprintf(fp,"%s\n", tmp = SWIG_Python_str_AsChar(str)); - SWIG_Python_str_DelForPy3(tmp); - Py_DECREF(str); - return 0; - } - - SWIGINTERN void - swig_varlink_dealloc(swig_varlinkobject *v) { - swig_globalvar *var = v->vars; - while (var) { - swig_globalvar *n = var->next; - free(var->name); - free(var); - var = n; - } - } - - SWIGINTERN PyObject * - swig_varlink_getattr(swig_varlinkobject *v, char *n) { - PyObject *res = NULL; - swig_globalvar *var = v->vars; - while (var) { - if (strcmp(var->name,n) == 0) { - res = (*var->get_attr)(); - break; - } - var = var->next; - } - if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_NameError,"Unknown C global variable"); - } - return res; - } - - SWIGINTERN int - swig_varlink_setattr(swig_varlinkobject *v, char *n, PyObject *p) { - int res = 1; - swig_globalvar *var = v->vars; - while (var) { - if (strcmp(var->name,n) == 0) { - res = (*var->set_attr)(p); - break; - } - var = var->next; - } - if (res == 1 && !PyErr_Occurred()) { - PyErr_SetString(PyExc_NameError,"Unknown C global variable"); - } - return res; - } - - SWIGINTERN PyTypeObject* - swig_varlink_type(void) { - static char varlink__doc__[] = "Swig var link object"; - static PyTypeObject varlink_type; - static int type_init = 0; - if (!type_init) { - const PyTypeObject tmp - = { - /* PyObject header changed in Python 3 */ -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(&PyType_Type, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /* Number of items in variable part (ob_size) */ -#endif - (char *)"swigvarlink", /* Type name (tp_name) */ - sizeof(swig_varlinkobject), /* Basic size (tp_basicsize) */ - 0, /* Itemsize (tp_itemsize) */ - (destructor) swig_varlink_dealloc, /* Deallocator (tp_dealloc) */ - (printfunc) swig_varlink_print, /* Print (tp_print) */ - (getattrfunc) swig_varlink_getattr, /* get attr (tp_getattr) */ - (setattrfunc) swig_varlink_setattr, /* Set attr (tp_setattr) */ - 0, /* tp_compare */ - (reprfunc) swig_varlink_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc) swig_varlink_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - varlink__doc__, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ -#if PY_VERSION_HEX >= 0x02020000 - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* tp_iter -> tp_weaklist */ -#endif -#if PY_VERSION_HEX >= 0x02030000 - 0, /* tp_del */ -#endif -#ifdef COUNT_ALLOCS - 0,0,0,0 /* tp_alloc -> tp_next */ -#endif - }; - varlink_type = tmp; - /* for Python 3 we already assigned ob_type in PyVarObject_HEAD_INIT() */ -#if PY_VERSION_HEX < 0x03000000 - varlink_type.ob_type = &PyType_Type; -#endif - type_init = 1; - } - return &varlink_type; - } - - /* Create a variable linking object for use later */ - SWIGINTERN PyObject * - SWIG_Python_newvarlink(void) { - swig_varlinkobject *result = PyObject_NEW(swig_varlinkobject, swig_varlink_type()); - if (result) { - result->vars = 0; - } - return ((PyObject*) result); - } - - SWIGINTERN void - SWIG_Python_addvarlink(PyObject *p, char *name, PyObject *(*get_attr)(void), int (*set_attr)(PyObject *p)) { - swig_varlinkobject *v = (swig_varlinkobject *) p; - swig_globalvar *gv = (swig_globalvar *) malloc(sizeof(swig_globalvar)); - if (gv) { - size_t size = strlen(name)+1; - gv->name = (char *)malloc(size); - if (gv->name) { - strncpy(gv->name,name,size); - gv->get_attr = get_attr; - gv->set_attr = set_attr; - gv->next = v->vars; - } - } - v->vars = gv; - } - - SWIGINTERN PyObject * - SWIG_globals(void) { - static PyObject *_SWIG_globals = 0; - if (!_SWIG_globals) _SWIG_globals = SWIG_newvarlink(); - return _SWIG_globals; - } - - /* ----------------------------------------------------------------------------- - * constants/methods manipulation - * ----------------------------------------------------------------------------- */ - - /* Install Constants */ - SWIGINTERN void - SWIG_Python_InstallConstants(PyObject *d, swig_const_info constants[]) { - PyObject *obj = 0; - size_t i; - for (i = 0; constants[i].type; ++i) { - switch(constants[i].type) { - case SWIG_PY_POINTER: - obj = SWIG_NewPointerObj(constants[i].pvalue, *(constants[i]).ptype,0); - break; - case SWIG_PY_BINARY: - obj = SWIG_NewPackedObj(constants[i].pvalue, constants[i].lvalue, *(constants[i].ptype)); - break; - default: - obj = 0; - break; - } - if (obj) { - PyDict_SetItemString(d, constants[i].name, obj); - Py_DECREF(obj); - } - } - } - - /* -----------------------------------------------------------------------------*/ - /* Fix SwigMethods to carry the callback ptrs when needed */ - /* -----------------------------------------------------------------------------*/ - - SWIGINTERN void - SWIG_Python_FixMethods(PyMethodDef *methods, - swig_const_info *const_table, - swig_type_info **types, - swig_type_info **types_initial) { - size_t i; - for (i = 0; methods[i].ml_name; ++i) { - const char *c = methods[i].ml_doc; - if (c && (c = strstr(c, "swig_ptr: "))) { - int j; - swig_const_info *ci = 0; - const char *name = c + 10; - for (j = 0; const_table[j].type; ++j) { - if (strncmp(const_table[j].name, name, - strlen(const_table[j].name)) == 0) { - ci = &(const_table[j]); - break; - } - } - if (ci) { - void *ptr = (ci->type == SWIG_PY_POINTER) ? ci->pvalue : 0; - if (ptr) { - size_t shift = (ci->ptype) - types; - swig_type_info *ty = types_initial[shift]; - size_t ldoc = (c - methods[i].ml_doc); - size_t lptr = strlen(ty->name)+2*sizeof(void*)+2; - char *ndoc = (char*)malloc(ldoc + lptr + 10); - if (ndoc) { - char *buff = ndoc; - strncpy(buff, methods[i].ml_doc, ldoc); - buff += ldoc; - strncpy(buff, "swig_ptr: ", 10); - buff += 10; - SWIG_PackVoidPtr(buff, ptr, ty->name, lptr); - methods[i].ml_doc = ndoc; - } - } - } - } - } - } - -#ifdef __cplusplus -} -#endif - -/* -----------------------------------------------------------------------------* - * Partial Init method - * -----------------------------------------------------------------------------*/ - -#ifdef __cplusplus -extern "C" -#endif - -SWIGEXPORT -#if PY_VERSION_HEX >= 0x03000000 -PyObject* -#else -void -#endif -SWIG_init(void) { - PyObject *m, *d; -#if PY_VERSION_HEX >= 0x03000000 - static struct PyModuleDef SWIG_module = { - PyModuleDef_HEAD_INIT, - (char *) SWIG_name, - NULL, - -1, - SwigMethods, - NULL, - NULL, - NULL, - NULL - }; -#endif - - /* Fix SwigMethods to carry the callback ptrs when needed */ - SWIG_Python_FixMethods(SwigMethods, swig_const_table, swig_types, swig_type_initial); - -#if PY_VERSION_HEX >= 0x03000000 - m = PyModule_Create(&SWIG_module); -#else - m = Py_InitModule((char *) SWIG_name, SwigMethods); -#endif - d = PyModule_GetDict(m); - - SWIG_InitializeModule(0); - SWIG_InstallConstants(d,swig_const_table); - - - - import_array(); - -#if PY_VERSION_HEX >= 0x03000000 - return m; -#else - return; -#endif -} - diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/fixed_size.h b/scipy-0.10.1/scipy/sparse/sparsetools/fixed_size.h deleted file mode 100644 index be90abfe36..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/fixed_size.h +++ /dev/null @@ -1,153 +0,0 @@ -#ifndef FIXED_SIZE_H -#define FIXED_SIZE_H - -/* - * templates for fixed size vector and matrix arithmetic - * - */ - - - -/* - * Dot Product - * - */ -template -class _dot -{ - public: - inline T operator()(const T * X, const T * Y) - { - _dot d; - return (*X) * (*Y) + d(X + SX, Y + SY); - } -}; -template -class _dot<1,SX,SY,T> -{ - public: - inline T operator()(const T * X, const T * Y) - { - return (*X) * (*Y); - } -}; - -template -inline T dot(const T * X, const T * Y) -{ - _dot d; - return d(X, Y); -} - - - -/* - * Matrix Vector Product Y = A*X - * - */ -template -class _matvec -{ - public: - inline void operator()(const T * A, const T * X, T * Y) - { - *Y += dot(A,X); - _matvec d; - d(A + N, X, Y + SY); - } -}; - -template -class _matvec<1,N,SX,SY,T> -{ - public: - inline void operator()(const T * A, const T * X, T * Y) - { - *Y += dot(A,X); - } -}; - -template -inline void matvec(const T * A, const T * X, T * Y) -{ - _matvec d; - d(A,X,Y); -} - - -/* - * Matrix Matrix Product C = A*B - * - * C is L*N - * A is L*M - * B is M*N - * - */ -template -class _matmat -{ - public: - inline void operator()(const T * A, const T * B, T * C) - { - matvec(A,B,C); - - _matmat d; - d(A, B + 1, C + 1); - } -}; -template -class _matmat -{ - public: - inline void operator()(const T * A, const T * B, T * C) - { - matvec(A,B,C); - } -}; - -template -inline void matmat(const T * A, const T * B, T * C) -{ - _matmat d; - d(A,B,C); -} - - - -/* - * Binary vector operation Z = op(X,Y) - * - */ - -template -class _vec_binop_vec -{ - public: - inline void operator()(const T * X, const T * Y, T * Z, const bin_op& op) - { - *Z = op( *X, *Y ); - _vec_binop_vec d; - d(X + 1, Y + 1, Z + 1, op); - } -}; -template -class _vec_binop_vec<1,T,bin_op> -{ - public: - inline void operator()(const T * X, const T * Y, T * Z, const bin_op& op) - { - *Z = op( *X, *Y ); - } -}; - -template -inline void vec_binop_vec(const T * X, const T * Y, T * Z, const bin_op& op) -{ - _vec_binop_vec d; - d(X,Y,Z,op); -} - - - - -#endif diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/py3k.h b/scipy-0.10.1/scipy/sparse/sparsetools/py3k.h deleted file mode 100644 index 169ab15294..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/py3k.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Undefine macros defined by SWIG - */ -#if PY_VERSION_HEX >= 0x03000000 - -#ifdef PyInt_Check -#undef PyInt_Check -#endif - -static int __pyfile_check_guard(PyObject *x) -{ - return 0; -} -#define PyFile_Check(x) __pyfile_check_guard((x)) -static int __pyinstance_check_guard(PyObject *x) -{ - return 0; -} -#define PyInstance_Check(x) __pyinstance_check_guard((x)) - -#endif - -#include "numpy/npy_3kcompat.h" diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/scratch.h b/scipy-0.10.1/scipy/sparse/sparsetools/scratch.h deleted file mode 100644 index 6ee3e5461f..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/scratch.h +++ /dev/null @@ -1,391 +0,0 @@ -/* - * These are sparsetools functions that are not currently used - * - */ - -/* - * Compute C = A*B for CSR matrices A,B - * - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in B (hence C is n_row by n_col) - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * I Bp[?] - row pointer - * I Bj[nnz(B)] - column indices - * T Bx[nnz(B)] - nonzeros - * Output Arguments: - * vec Cp - row pointer - * vec Cj - column indices - * vec Cx - nonzeros - * - * Note: - * Output arrays Cp, Cj, and Cx will be allocated within in the method - * - * Note: - * Input: A and B column indices *are not* assumed to be in sorted order - * Output: C column indices *are not* assumed to be in sorted order - * Cx will not contain any zero entries - * - * Complexity: O(n_row*K^2 + max(n_row,n_col)) - * where K is the maximum nnz in a row of A - * and column of B. - * - * - * This implementation closely follows the SMMP algorithm: - * - * "Sparse Matrix Multiplication Package (SMMP)" - * Randolph E. Bank and Craig C. Douglas - * - * http://citeseer.ist.psu.edu/445062.html - * http://www.mgnet.org/~douglas/ccd-codes.html - * - */ -template -void csrmucsr(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - const I Bp[], - const I Bj[], - const T Bx[], - std::vector* Cp, - std::vector* Cj, - std::vector* Cx) -{ - Cp->resize(n_row+1,0); - - std::vector next(n_col,-1); - std::vector sums(n_col, 0); - - for(I i = 0; i < n_row; i++){ - I head = -2; - I length = 0; - - I jj_start = Ap[i]; - I jj_end = Ap[i+1]; - for(I jj = jj_start; jj < jj_end; jj++){ - I j = Aj[jj]; - - I kk_start = Bp[j]; - I kk_end = Bp[j+1]; - for(I kk = kk_start; kk < kk_end; kk++){ - I k = Bj[kk]; - - sums[k] += Ax[jj]*Bx[kk]; - - if(next[k] == -1){ - next[k] = head; - head = k; - length++; - } - } - } - - for(I jj = 0; jj < length; jj++){ - if(sums[head] != 0){ - Cj->push_back(head); - Cx->push_back(sums[head]); - } - - I temp = head; - head = next[head]; - - next[temp] = -1; //clear arrays - sums[temp] = 0; - } - - (*Cp)[i+1] = Cx->size(); - } -} - - - - - - - - - - - - - -/* - * Compute M = A for CSR matrix A, dense matrix M - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * T Mx[n_row*n_col] - dense matrix - * - * Note: - * Output array Mx is assumed to be allocated and - * initialized to 0 by the caller. - * - */ -template -void csr_todense(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - T Mx[]) -{ - I row_base = 0; - for(I i = 0; i < n_row; i++){ - I row_start = Ap[i]; - I row_end = Ap[i+1]; - for(I jj = row_start; jj < row_end; jj++){ - I j = Aj[jj]; - Mx[row_base + j] = Ax[jj]; - } - row_base += n_col; - } -} -/* - * Compute B = A for CSR matrix A, COO matrix B - * - * Also, with the appropriate arguments can also be used to: - * - convert CSC->COO - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I Ap[n_row+1] - row pointer - * I Aj[nnz(A)] - column indices - * T Ax[nnz(A)] - nonzeros - * - * Output Arguments: - * vec Bi - row indices - * vec Bj - column indices - * vec Bx - nonzeros - * - * Note: - * Output arrays Bi, Bj, Bx will be allocated within in the method - * - * Note: - * Complexity: Linear. - * - */ -template -void csr_tocoo(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const T Ax[], - std::vector* Bi, - std::vector* Bj, - std::vector* Bx) -{ - I nnz = Ap[n_row]; - Bi->reserve(nnz); - Bi->reserve(nnz); - Bx->reserve(nnz); - for(I i = 0; i < n_row; i++){ - I row_start = Ap[i]; - I row_end = Ap[i+1]; - for(I jj = row_start; jj < row_end; jj++){ - Bi->push_back(i); - Bj->push_back(Aj[jj]); - Bx->push_back(Ax[jj]); - } - } -} - - -/* - * Construct CSC matrix A from diagonals - * - * Input Arguments: - * I n_row - number of rows in A - * I n_col - number of columns in A - * I n_diags - number of diagonals - * I diags_indx[n_diags] - where to place each diagonal - * T diags[n_diags][min(n_row,n_col)] - diagonals - * - * Output Arguments: - * vec Ap - row pointer - * vec Aj - column indices - * vec Ax - nonzeros - * - * Note: - * Output arrays Ap, Aj, Ax will be allocated within in the method - * - * Note: - * Output: row indices are not in sorted order - * - * Complexity: Linear - * - */ -template -void spdiags(const I n_row, - const I n_col, - const I n_diag, - const I offsets[], - const T diags[], - std::vector * Ap, - std::vector * Ai, - std::vector * Ax) -{ - const I diags_length = std::min(n_row,n_col); - Ap->push_back(0); - - for(I i = 0; i < n_col; i++){ - for(I j = 0; j < n_diag; j++){ - if(offsets[j] <= 0){ //sub-diagonal - I row = i - offsets[j]; - if (row >= n_row){ continue; } - - Ai->push_back(row); - Ax->push_back(diags[j*diags_length + i]); - } else { //super-diagonal - I row = i - offsets[j]; - if (row < 0 || row >= n_row){ continue; } - Ai->push_back(row); - Ax->push_back(diags[j*diags_length + row]); - } - } - Ap->push_back(Ai->size()); - } -} - -template -void bsr_binop_bsr_fixed(const I n_brow, const I n_bcol, - const I Ap[], const I Aj[], const T Ax[], - const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], - const bin_op& op) -{ - //Method that works for unsorted indices - const I RC = R*C; - T zeros[RC] = {0}; - Cp[0] = 0; - I nnz = 0; - - std::cout << "using bsr_ fixed" << std::endl; - for(I i = 0; i < n_brow; i++){ - I A_pos = Ap[i]; - I B_pos = Bp[i]; - I A_end = Ap[i+1]; - I B_end = Bp[i+1]; - - I A_j = Aj[A_pos]; - I B_j = Bj[B_pos]; - - //while not finished with either row - while(A_pos < A_end && B_pos < B_end){ - if(A_j == B_j){ - Cj[nnz] = A_j; - vec_binop_vec (Ax + RC*A_pos, Bx + RC*B_pos, Cx + RC*nnz, op); - if( is_nonzero_block(Cx + RC*nnz,RC) ){ - nnz++; - } - A_j = Aj[++A_pos]; - B_j = Bj[++B_pos]; - } else if (A_j < B_j) { - Cj[nnz] = A_j; - vec_binop_vec (Ax + RC*A_pos, zeros, Cx + RC*nnz, op); - if( is_nonzero_block(Cx + RC*nnz,RC) ){ - nnz++; - } - A_j = Aj[++A_pos]; - } else { - //B_j < A_j - Cj[nnz] = B_j; - vec_binop_vec (zeros, Bx + RC*A_pos, Cx + RC*nnz, op); - if( is_nonzero_block(Cx + RC*nnz,RC) ){ - nnz++; - } - B_j = Bj[++B_pos]; - } - } - - //tail - while(A_pos < A_end){ - Cj[nnz] = A_j; - vec_binop_vec (Ax + RC*A_pos, zeros, Cx + RC*nnz, op); - if( is_nonzero_block(Cx + RC*nnz,RC) ){ - nnz++; - } - A_j = Aj[++A_pos]; - } - while(B_pos < B_end){ - Cj[nnz] = B_j; - vec_binop_vec (zeros, Bx + RC*A_pos, Cx + RC*nnz, op); - if( is_nonzero_block(Cx + RC*nnz,RC) ){ - nnz++; - } - B_j = Bj[++B_pos]; - } - - Cp[i+1] = nnz; - } -} - - -/* - * Pass 1 computes CSR row pointer for the matrix product C = A * B - * - */ -template -void csr_matmat_pass1(const I n_row, - const I n_col, - const I Ap[], - const I Aj[], - const I Bp[], - const I Bj[], - I Cp[]) -{ - // method that uses O(1) temp storage - const I hash_size = 1 << 5; - I vals[hash_size]; - I mask[hash_size]; - - std::set spill; - - for(I i = 0; i < hash_size; i++){ - vals[i] = -1; - mask[i] = -1; - } - - Cp[0] = 0; - - I slow_inserts = 0; - I total_inserts = 0; - I nnz = 0; - for(I i = 0; i < n_row; i++){ - spill.clear(); - for(I jj = Ap[i]; jj < Ap[i+1]; jj++){ - I j = Aj[jj]; - for(I kk = Bp[j]; kk < Bp[j+1]; kk++){ - I k = Bj[kk]; - // I hash = k & (hash_size - 1); - I hash = ((I)2654435761 * k) & (hash_size -1 ); - total_inserts++; - if(mask[hash] != i){ - mask[hash] = i; - vals[hash] = k; - nnz++; - } else { - if (vals[hash] != k){ - slow_inserts++; - spill.insert(k); - } - } - } - } - nnz += spill.size(); - Cp[i+1] = nnz; - } - - std::cout << "slow fraction " << ((float) slow_inserts)/ ((float) total_inserts) << std::endl; -} - - diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/setup.py b/scipy-0.10.1/scipy/sparse/sparsetools/setup.py deleted file mode 100755 index 5c4d86b948..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - import numpy - from numpy.distutils.misc_util import Configuration - - config = Configuration('sparsetools',parent_package,top_path) - - for fmt in ['csr','csc','coo','bsr','dia','csgraph']: - sources = [ fmt + '_wrap.cxx' ] - depends = [ fmt + '.h' ] - config.add_extension('_' + fmt, sources=sources, - define_macros=[('__STDC_FORMAT_MACROS', 1)], - depends=depends) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/setupscons.py b/scipy-0.10.1/scipy/sparse/sparsetools/setupscons.py deleted file mode 100755 index 32544b7c84..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/setupscons.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python - -from os.path import join -import sys - -def configuration(parent_package='',top_path=None): - import numpy - from numpy.distutils.misc_util import Configuration - - config = Configuration('sparsetools',parent_package,top_path, - setup_name = 'setupscons.py') - - config.add_sconscript('SConstruct') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/sparse/sparsetools/sparsetools.h b/scipy-0.10.1/scipy/sparse/sparsetools/sparsetools.h deleted file mode 100644 index 79f1fa5893..0000000000 --- a/scipy-0.10.1/scipy/sparse/sparsetools/sparsetools.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef SPARSETOOLS_H -#define SPARSETOOLS_H - -/* - * sparsetools.h - * A collection of routines for sparse matrix operations: - * - * Original code by Nathan Bell ( http://www.wnbell.com/ ) - * - * Files/formats are: - * csr.h - Compressed Sparse Row format - * csc.h - Compressed Sparse Column format - * coo.h - COOrdinate format - * bsr.h - Block Sparse Row format - * dia.h - DIAgonal format - * - */ - -#include "csr.h" -#include "csc.h" -#include "coo.h" -#include "bsr.h" -#include "dia.h" - -#endif diff --git a/scipy-0.10.1/scipy/sparse/spfuncs.py b/scipy-0.10.1/scipy/sparse/spfuncs.py deleted file mode 100644 index a3868d7028..0000000000 --- a/scipy-0.10.1/scipy/sparse/spfuncs.py +++ /dev/null @@ -1,96 +0,0 @@ -""" Functions that operate on sparse matrices -""" - -__all__ = ['count_blocks','estimate_blocksize'] - -from csr import isspmatrix_csr, csr_matrix -from csc import isspmatrix_csc -from sparsetools import csr_count_blocks - -def extract_diagonal(A): - raise NotImplementedError('use .diagonal() instead') - -#def extract_diagonal(A): -# """extract_diagonal(A) returns the main diagonal of A.""" -# #TODO extract k-th diagonal -# if isspmatrix_csr(A) or isspmatrix_csc(A): -# fn = getattr(sparsetools, A.format + "_diagonal") -# y = empty( min(A.shape), dtype=upcast(A.dtype) ) -# fn(A.shape[0],A.shape[1],A.indptr,A.indices,A.data,y) -# return y -# elif isspmatrix_bsr(A): -# M,N = A.shape -# R,C = A.blocksize -# y = empty( min(M,N), dtype=upcast(A.dtype) ) -# fn = sparsetools.bsr_diagonal(M//R, N//C, R, C, \ -# A.indptr, A.indices, ravel(A.data), y) -# return y -# else: -# return extract_diagonal(csr_matrix(A)) - -def estimate_blocksize(A,efficiency=0.7): - """Attempt to determine the blocksize of a sparse matrix - - Returns a blocksize=(r,c) such that - - A.nnz / A.tobsr( (r,c) ).nnz > efficiency - """ - if not (isspmatrix_csr(A) or isspmatrix_csc(A)): - A = csr_matrix(A) - - if A.nnz == 0: - return (1,1) - - if not 0 < efficiency < 1.0: - raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0') - - high_efficiency = (1.0 + efficiency) / 2.0 - nnz = float(A.nnz) - M,N = A.shape - - if M % 2 == 0 and N % 2 == 0: - e22 = nnz / ( 4 * count_blocks(A,(2,2)) ) - else: - e22 = 0.0 - - if M % 3 == 0 and N % 3 == 0: - e33 = nnz / ( 9 * count_blocks(A,(3,3)) ) - else: - e33 = 0.0 - - - if e22 > high_efficiency and e33 > high_efficiency: - e66 = nnz / ( 36 * count_blocks(A,(6,6)) ) - if e66 > efficiency: - return (6,6) - else: - return (3,3) - else: - if M % 4 == 0 and N % 4 == 0: - e44 = nnz / ( 16 * count_blocks(A,(4,4)) ) - else: - e44 = 0.0 - - if e44 > efficiency: - return (4,4) - elif e33 > efficiency: - return (3,3) - elif e22 > efficiency: - return (2,2) - else: - return (1,1) - -def count_blocks(A,blocksize): - """For a given blocksize=(r,c) count the number of occupied - blocks in a sparse matrix A - """ - r,c = blocksize - if r < 1 or c < 1: - raise ValueError('r and c must be positive') - - if isspmatrix_csr(A): - M,N = A.shape - return csr_count_blocks(M,N,r,c,A.indptr,A.indices) - elif isspmatrix_csc(A): - return count_blocks(A.T,(c,r)) - else: - return count_blocks(csr_matrix(A),blocksize) diff --git a/scipy-0.10.1/scipy/sparse/sputils.py b/scipy-0.10.1/scipy/sparse/sputils.py deleted file mode 100644 index 3ea564c7bc..0000000000 --- a/scipy-0.10.1/scipy/sparse/sputils.py +++ /dev/null @@ -1,125 +0,0 @@ -""" Utility functions for sparse matrix module -""" - -__all__ = ['upcast','getdtype','isscalarlike','isintlike', - 'isshape','issequence','isdense'] - -import numpy as np - -# keep this list syncronized with sparsetools -#supported_dtypes = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', -# 'int64', 'uint64', 'float32', 'float64', -# 'complex64', 'complex128'] -supported_dtypes = ['int8','uint8','short','ushort','intc','uintc', - 'longlong','ulonglong','single','double','longdouble', - 'csingle','cdouble','clongdouble'] -supported_dtypes = [ np.typeDict[x] for x in supported_dtypes] - -def upcast(*args): - """Returns the nearest supported sparse dtype for the - combination of one or more types. - - upcast(t0, t1, ..., tn) -> T where T is a supported dtype - - Examples - -------- - - >>> upcast('int32') - - >>> upcast('bool') - - >>> upcast('int32','float32') - - >>> upcast('bool',complex,float) - - - """ - sample = np.array([0],dtype=args[0]) - for t in args[1:]: - sample = sample + np.array([0],dtype=t) - - upcast = sample.dtype - - for t in supported_dtypes: - if np.can_cast(sample.dtype,t): - return t - - raise TypeError('no supported conversion for types: %s' % args) - - -def to_native(A): - return np.asarray(A,dtype=A.dtype.newbyteorder('native')) - - -def getdtype(dtype, a=None, default=None): - """Function used to simplify argument processing. If 'dtype' is not - specified (is None), returns a.dtype; otherwise returns a np.dtype - object created from the specified dtype argument. If 'dtype' and 'a' - are both None, construct a data type out of the 'default' parameter. - Furthermore, 'dtype' must be in 'allowed' set. - """ - #TODO is this really what we want? - canCast = True - if dtype is None: - try: - newdtype = a.dtype - except AttributeError: - if default is not None: - newdtype = np.dtype(default) - canCast = False - else: - raise TypeError("could not interpret data type") - else: - newdtype = np.dtype(dtype) - - return newdtype - -def isscalarlike(x): - """Is x either a scalar, an array scalar, or a 0-dim array?""" - return np.isscalar(x) or (isdense(x) and x.ndim == 0) - -def isintlike(x): - """Is x appropriate as an index into a sparse matrix? Returns True - if it can be cast safely to a machine int. - """ - if issequence(x): - return False - else: - try: - if int(x) == x: - return True - else: - return False - except TypeError: - return False - -def isshape(x): - """Is x a valid 2-tuple of dimensions? - """ - try: - # Assume it's a tuple of matrix dimensions (M, N) - (M, N) = x - except: - return False - else: - if isintlike(M) and isintlike(N): - if np.rank(M) == 0 and np.rank(N) == 0: - return True - return False - - -def issequence(t): - return isinstance(t, (list, tuple))\ - or (isinstance(t, np.ndarray) and (t.ndim == 1)) - - -def _isinstance(x, _class): - ## - # This makes scipy.sparse.sparse.csc_matrix == __main__.csc_matrix. - c1 = ('%s' % x.__class__).split( '.' ) - c2 = ('%s' % _class).split( '.' ) - aux = c1[-1] == c2[-1] - return isinstance(x, _class) or aux - -def isdense(x): - return _isinstance(x, np.ndarray) diff --git a/scipy-0.10.1/scipy/sparse/tests/test_base.py b/scipy-0.10.1/scipy/sparse/tests/test_base.py deleted file mode 100644 index 236fb586e1..0000000000 --- a/scipy-0.10.1/scipy/sparse/tests/test_base.py +++ /dev/null @@ -1,1735 +0,0 @@ -# -# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others - -""" Test functions for sparse matrices - -""" -__usage__ = """ -Build sparse: - python setup.py build -Run tests if scipy is installed: - python -c 'import scipy;scipy.sparse.test()' -Run tests if sparse is not installed: - python tests/test_sparse.py -""" - -import sys -import warnings - -import numpy as np -from numpy import arange, zeros, array, dot, matrix, asmatrix, asarray, \ - vstack, ndarray, transpose, diag, kron, inf, conjugate, \ - int8, ComplexWarning - -import random -from numpy.testing import assert_raises, assert_equal, assert_array_equal, \ - assert_array_almost_equal, assert_almost_equal, assert_, \ - dec, TestCase, run_module_suite - -import scipy.sparse as sparse -from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, \ - coo_matrix, lil_matrix, dia_matrix, bsr_matrix, \ - eye, isspmatrix, SparseEfficiencyWarning -from scipy.sparse.sputils import supported_dtypes -from scipy.sparse.linalg import splu - - -warnings.simplefilter('ignore', SparseEfficiencyWarning) -warnings.simplefilter('ignore', ComplexWarning) - - -#TODO check that spmatrix( ... , copy=X ) is respected -#TODO test prune -#TODO test has_sorted_indices -class _TestCommon: - """test common functionality shared by all sparse formats""" - - def setUp(self): - self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d') - self.datsp = self.spmatrix(self.dat) - - def test_empty(self): - """create empty matrices""" - - assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3))) - assert_equal(self.spmatrix((3,3)).nnz, 0) - - def test_invalid_shapes(self): - assert_raises(ValueError, self.spmatrix, (-1,3) ) - assert_raises(ValueError, self.spmatrix, (3,-1) ) - assert_raises(ValueError, self.spmatrix, (-1,-1) ) - - def test_repr(self): - repr(self.datsp) - - def test_str(self): - str(self.datsp) - - def test_empty_arithmetic(self): - """Test manipulating empty matrices. Fails in SciPy SVN <= r1768 - """ - shape = (5, 5) - for mytype in [np.dtype('int32'), np.dtype('float32'), - np.dtype('float64'), np.dtype('complex64'), - np.dtype('complex128')]: - a = self.spmatrix(shape, dtype=mytype) - b = a + a - c = 2 * a - d = a * a.tocsc() - e = a * a.tocsr() - f = a * a.tocoo() - for m in [a,b,c,d,e,f]: - assert_equal(m.A, a.A*a.A) - # These fail in all revisions <= r1768: - assert_equal(m.dtype,mytype) - assert_equal(m.A.dtype,mytype) - - def test_abs(self): - A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d') - assert_equal(abs(A),abs(self.spmatrix(A)).todense()) - - def test_neg(self): - A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d') - assert_equal(-A,(-self.spmatrix(A)).todense()) - - def test_real(self): - D = matrix([[1 + 3j, 2 - 4j]]) - A = self.spmatrix(D) - assert_equal(A.real.todense(),D.real) - - def test_imag(self): - D = matrix([[1 + 3j, 2 - 4j]]) - A = self.spmatrix(D) - assert_equal(A.imag.todense(),D.imag) - - def test_diagonal(self): - """Does the matrix's .diagonal() method work? - """ - mats = [] - mats.append( [[1,0,2]] ) - mats.append( [[1],[0],[2]] ) - mats.append( [[0,1],[0,2],[0,3]] ) - mats.append( [[0,0,1],[0,0,2],[0,3,0]] ) - - mats.append( kron(mats[0],[[1,2]]) ) - mats.append( kron(mats[0],[[1],[2]]) ) - mats.append( kron(mats[1],[[1,2],[3,4]]) ) - mats.append( kron(mats[2],[[1,2],[3,4]]) ) - mats.append( kron(mats[3],[[1,2],[3,4]]) ) - mats.append( kron(mats[3],[[1,2,3,4]]) ) - - for m in mats: - assert_equal(self.spmatrix(m).diagonal(),diag(m)) - - - def test_nonzero(self): - A = array([[1, 0, 1],[0, 1, 1],[ 0, 0, 1]]) - Asp = self.spmatrix(A) - - A_nz = set( [tuple(ij) for ij in transpose(A.nonzero())] ) - Asp_nz = set( [tuple(ij) for ij in transpose(Asp.nonzero())] ) - - assert_equal(A_nz, Asp_nz) - - - def test_getrow(self): - assert_array_equal(self.datsp.getrow(1).todense(), self.dat[1,:]) - assert_array_equal(self.datsp.getrow(-1).todense(), self.dat[-1,:]) - - def test_getcol(self): - assert_array_equal(self.datsp.getcol(1).todense(), self.dat[:,1]) - assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1]) - - def test_sum(self): - """Does the matrix's .sum(axis=...) method work? - """ - assert_array_equal(self.dat.sum(), self.datsp.sum()) - assert_array_equal(self.dat.sum(axis=None), self.datsp.sum(axis=None)) - assert_array_equal(self.dat.sum(axis=0), self.datsp.sum(axis=0)) - assert_array_equal(self.dat.sum(axis=1), self.datsp.sum(axis=1)) - - def test_mean(self): - """Does the matrix's .mean(axis=...) method work? - """ - assert_array_equal(self.dat.mean(), self.datsp.mean()) - assert_array_equal(self.dat.mean(axis=None), self.datsp.mean(axis=None)) - assert_array_equal(self.dat.mean(axis=0), self.datsp.mean(axis=0)) - assert_array_equal(self.dat.mean(axis=1), self.datsp.mean(axis=1)) - - def test_from_array(self): - A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) - assert_array_equal(self.spmatrix(A).toarray(), A) - - A = array([[1.0 + 3j, 0, 0], - [ 0, 2.0 + 5, 0], - [ 0, 0, 0]]) - assert_array_equal(self.spmatrix(A).toarray(), A) - assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16')) - - def test_from_matrix(self): - A = matrix([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) - assert_array_equal(self.spmatrix(A).todense(), A) - - A = matrix([[1.0 + 3j, 0, 0], - [ 0, 2.0 + 5, 0], - [ 0, 0, 0]]) - assert_array_equal(self.spmatrix(A).toarray(), A) - assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16')) - - def test_from_list(self): - A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]] - assert_array_equal(self.spmatrix(A).todense(), A) - - A = [[1.0 + 3j, 0, 0], - [ 0, 2.0 + 5, 0], - [ 0, 0, 0]] - assert_array_equal(self.spmatrix(A).toarray(), array(A)) - assert_array_equal(self.spmatrix(A, dtype='int16').todense(), array(A).astype('int16')) - - def test_from_sparse(self): - D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) - S = csr_matrix(D) - assert_array_equal(self.spmatrix(S).toarray(), D) - S = self.spmatrix(D) - assert_array_equal(self.spmatrix(S).toarray(), D) - - - D = array([[1.0 + 3j, 0, 0], - [ 0, 2.0 + 5, 0], - [ 0, 0, 0]]) - S = csr_matrix(D) - assert_array_equal(self.spmatrix(S).toarray(), D) - assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16')) - S = self.spmatrix(D) - assert_array_equal(self.spmatrix(S).toarray(), D) - assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16')) - - #def test_array(self): - # """test array(A) where A is in sparse format""" - # assert_equal( array(self.datsp), self.dat ) - - def test_todense(self): - chk = self.datsp.todense() - assert_array_equal(chk,self.dat) - a = matrix([1.,2.,3.]) - dense_dot_dense = a * self.dat - check = a * self.datsp.todense() - assert_array_equal(dense_dot_dense, check) - b = matrix([1.,2.,3.,4.]).T - dense_dot_dense = self.dat * b - check2 = self.datsp.todense() * b - assert_array_equal(dense_dot_dense, check2) - - def test_toarray(self): - dat = asarray(self.dat) - chk = self.datsp.toarray() - assert_array_equal(chk, dat) - a = array([1.,2.,3.]) - dense_dot_dense = dot(a, dat) - check = dot(a, self.datsp.toarray()) - assert_array_equal(dense_dot_dense, check) - b = array([1.,2.,3.,4.]) - dense_dot_dense = dot(dat, b) - check2 = dot(self.datsp.toarray(), b) - assert_array_equal(dense_dot_dense, check2) - - def test_astype(self): - D = array([[1.0 + 3j, 0, 0], - [ 0, 2.0 + 5, 0], - [ 0, 0, 0]]) - S = self.spmatrix(D) - - for x in supported_dtypes: - assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type - assert_equal(S.astype(x).toarray(), D.astype(x)) # correct values - assert_equal(S.astype(x).format, S.format) # format preserved - - def test_asfptype(self): - A = self.spmatrix( arange(6,dtype='int32').reshape(2,3) ) - - assert_equal( A.dtype , np.dtype('int32') ) - assert_equal( A.asfptype().dtype, np.dtype('float64') ) - assert_equal( A.asfptype().format, A.format ) - assert_equal( A.astype('int16').asfptype().dtype , np.dtype('float32') ) - assert_equal( A.astype('complex128').asfptype().dtype , np.dtype('complex128') ) - - B = A.asfptype() - C = B.asfptype() - assert_( B is C ) - - - def test_mul_scalar(self): - assert_array_equal(self.dat*2,(self.datsp*2).todense()) - assert_array_equal(self.dat*17.3,(self.datsp*17.3).todense()) - - def test_rmul_scalar(self): - assert_array_equal(2*self.dat,(2*self.datsp).todense()) - assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense()) - - def test_add(self): - a = self.dat.copy() - a[0,2] = 2.0 - b = self.datsp - c = b + a - assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]]) - - def test_radd(self): - a = self.dat.copy() - a[0,2] = 2.0 - b = self.datsp - c = a + b - assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]]) - - def test_sub(self): - assert_array_equal((self.datsp - self.datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) - - A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) - assert_array_equal((self.datsp - A).todense(),self.dat - A.todense()) - assert_array_equal((A - self.datsp).todense(),A.todense() - self.dat) - - def test_rsub(self): - assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) - assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) - - A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) - assert_array_equal((self.dat - A),self.dat - A.todense()) - assert_array_equal((A - self.dat),A.todense() - self.dat) - assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat) - assert_array_equal(self.datsp - A.todense(),self.dat - A.todense()) - - def test_elementwise_multiply(self): - # real/real - A = array([[4,0,9],[2,-3,5]]) - B = array([[0,7,0],[0,-4,0]]) - Asp = self.spmatrix(A) - Bsp = self.spmatrix(B) - assert_almost_equal( Asp.multiply(Bsp).todense(), A*B) #sparse/sparse - assert_almost_equal( Asp.multiply(B), A*B) #sparse/dense - - # complex/complex - C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) - D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) - Csp = self.spmatrix(C) - Dsp = self.spmatrix(D) - assert_almost_equal( Csp.multiply(Dsp).todense(), C*D) #sparse/sparse - assert_almost_equal( Csp.multiply(D), C*D) #sparse/dense - - # real/complex - assert_almost_equal( Asp.multiply(Dsp).todense(), A*D) #sparse/sparse - assert_almost_equal( Asp.multiply(D), A*D) #sparse/dense - - - def test_elementwise_divide(self): - expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]] - assert_array_equal((self.datsp / self.datsp).todense(),expected) - - denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) - res = matrix([[1,0,0,0.5],[-3,0,inf,0],[0,0.25,0,0]],'d') - assert_array_equal((self.datsp / denom).todense(),res) - - # complex - A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) - B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) - Asp = self.spmatrix(A) - Bsp = self.spmatrix(B) - assert_almost_equal( (Asp / Bsp).todense(), A/B) - - def test_pow(self): - A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]]) - B = self.spmatrix( A ) - - for exponent in [0,1,2,3]: - assert_array_equal((B**exponent).todense(),A**exponent) - - #invalid exponents - for exponent in [-1, 2.2, 1 + 3j]: - self.assertRaises( Exception, B.__pow__, exponent ) - - #nonsquare matrix - B = self.spmatrix(A[:3,:]) - self.assertRaises( Exception, B.__pow__, 1 ) - - - def test_rmatvec(self): - M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) - assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray())) - row = matrix([[1,2,3,4]]) - assert_array_almost_equal(row*M, row*M.todense()) - - def test_small_multiplication(self): - """test that A*x works for x with shape () (1,) and (1,1) - """ - A = self.spmatrix([[1],[2],[3]]) - - assert_(isspmatrix(A * array(1))) - assert_equal((A * array(1)).todense(), [[1],[2],[3]]) - assert_equal(A * array([1]), array([1,2,3])) - assert_equal(A * array([[1]]), array([[1],[2],[3]])) - - def test_matvec(self): - M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) - col = matrix([1,2,3]).T - assert_array_almost_equal(M * col, M.todense() * col) - - #check result dimensions (ticket #514) - assert_equal((M * array([1,2,3])).shape,(4,)) - assert_equal((M * array([[1],[2],[3]])).shape,(4,1)) - assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1)) - - #check result type - assert_(isinstance( M * array([1,2,3]), ndarray)) - assert_(isinstance( M * matrix([1,2,3]).T, matrix)) - - #ensure exception is raised for improper dimensions - bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]), - matrix([1,2,3]), matrix([[1],[2]])] - for x in bad_vecs: - assert_raises(ValueError, M.__mul__, x) - - # Should this be supported or not?! - #flat = array([1,2,3]) - #assert_array_almost_equal(M*flat, M.todense()*flat) - # Currently numpy dense matrices promote the result to a 1x3 matrix, - # whereas sparse matrices leave the result as a rank-1 array. Which - # is preferable? - - # Note: the following command does not work. Both NumPy matrices - # and spmatrices should raise exceptions! - # assert_array_almost_equal(M*[1,2,3], M.todense()*[1,2,3]) - - # The current relationship between sparse matrix products and array - # products is as follows: - assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3])) - assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T) - # Note that the result of M * x is dense if x has a singleton dimension. - - # Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col) - # is rank-2. Is this desirable? - - def test_matmat_sparse(self): - a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) - a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) - b = matrix([[0,1],[1,0],[0,2]],'d') - asp = self.spmatrix(a) - bsp = self.spmatrix(b) - assert_array_almost_equal((asp*bsp).todense(), a*b) - assert_array_almost_equal( asp*b, a*b) - assert_array_almost_equal( a*bsp, a*b) - assert_array_almost_equal( a2*bsp, a*b) - - # Now try performing cross-type multplication: - csp = bsp.tocsc() - c = b - assert_array_almost_equal((asp*csp).todense(), a*c) - assert_array_almost_equal( asp*c, a*c) - - assert_array_almost_equal( a*csp, a*c) - assert_array_almost_equal( a2*csp, a*c) - csp = bsp.tocsr() - assert_array_almost_equal((asp*csp).todense(), a*c) - assert_array_almost_equal( asp*c, a*c) - - assert_array_almost_equal( a*csp, a*c) - assert_array_almost_equal( a2*csp, a*c) - csp = bsp.tocoo() - assert_array_almost_equal((asp*csp).todense(), a*c) - assert_array_almost_equal( asp*c, a*c) - - assert_array_almost_equal( a*csp, a*c) - assert_array_almost_equal( a2*csp, a*c) - - # Test provided by Andy Fraser, 2006-03-26 - L = 30 - frac = .3 - random.seed(0) # make runs repeatable - A = zeros((L,2)) - for i in xrange(L): - for j in xrange(2): - r = random.random() - if r < frac: - A[i,j] = r/frac - - A = self.spmatrix(A) - B = A*A.T - assert_array_almost_equal(B.todense(), A.todense() * A.T.todense()) - assert_array_almost_equal(B.todense(), A.todense() * A.todense().T) - - - # check dimension mismatch 2x2 times 3x2 - A = self.spmatrix( [[1,2],[3,4]] ) - B = self.spmatrix( [[1,2],[3,4],[5,6]] ) - assert_raises(ValueError, A.__mul__, B) - - def test_matmat_dense(self): - a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) - asp = self.spmatrix(a) - - # check both array and matrix types - bs = [ array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]]) ] - - for b in bs: - result = asp*b - assert_( isinstance(result, type(b)) ) - assert_equal( result.shape, (4,2) ) - assert_equal( result, dot(a,b) ) - - def test_sparse_format_conversions(self): - A = sparse.kron( [[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]] ) - D = A.todense() - A = self.spmatrix(A) - - for format in ['bsr','coo','csc','csr','dia','dok','lil']: - a = A.asformat(format) - assert_equal(a.format,format) - assert_array_equal(a.todense(), D) - - b = self.spmatrix(D+3j).asformat(format) - assert_equal(b.format,format) - assert_array_equal(b.todense(), D+3j) - - c = eval(format + '_matrix')(A) - assert_equal(c.format,format) - assert_array_equal(c.todense(), D) - - - def test_tobsr(self): - x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]]) - y = array([[0,1,2],[3,0,5]]) - A = kron(x,y) - Asp = self.spmatrix(A) - for format in ['bsr']: - fn = getattr(Asp, 'to' + format ) - - for X in [ 1, 2, 3, 6 ]: - for Y in [ 1, 2, 3, 4, 6, 12]: - assert_equal( fn(blocksize=(X,Y)).todense(), A) - - - def test_transpose(self): - a = self.datsp.transpose() - b = self.dat.transpose() - assert_array_equal(a.todense(), b) - assert_array_equal(a.transpose().todense(), self.dat) - - assert_array_equal( self.spmatrix((3,4)).T.todense(), zeros((4,3)) ) - - - def test_add_dense(self): - """ adding a dense matrix to a sparse matrix - """ - sum1 = self.dat + self.datsp - assert_array_equal(sum1, 2*self.dat) - sum2 = self.datsp + self.dat - assert_array_equal(sum2, 2*self.dat) - - def test_sub_dense(self): - """ subtracting a dense matrix to/from a sparse matrix - """ - sum1 = 3*self.dat - self.datsp - assert_array_equal(sum1, 2*self.dat) - sum2 = 3*self.datsp - self.dat - assert_array_equal(sum2, 2*self.dat) - - - def test_copy(self): - """ Check whether the copy=True and copy=False keywords work - """ - A = self.datsp - - #check that copy preserves format - assert_equal(A.copy().format, A.format) - assert_equal(A.__class__(A,copy=True).format, A.format) - assert_equal(A.__class__(A,copy=False).format, A.format) - - assert_equal(A.copy().todense(), A.todense()) - assert_equal(A.__class__(A,copy=True).todense(), A.todense()) - assert_equal(A.__class__(A,copy=False).todense(), A.todense()) - - #check that XXX_matrix.toXXX() works - toself = getattr(A,'to' + A.format) - assert_equal(toself().format, A.format) - assert_equal(toself(copy=True).format, A.format) - assert_equal(toself(copy=False).format, A.format) - - assert_equal(toself().todense(), A.todense()) - assert_equal(toself(copy=True).todense(), A.todense()) - assert_equal(toself(copy=False).todense(), A.todense()) - - - # check whether the data is copied? - # TODO: deal with non-indexable types somehow - B = A.copy() - try: - B[0,0] += 1 - assert_(B[0,0] != A[0,0]) - except NotImplementedError: - # not all sparse matrices can be indexed - pass - except TypeError: - # not all sparse matrices can be indexed - pass - - # Eventually we'd like to allow matrix products between dense - # and sparse matrices using the normal dot() function: - #def test_dense_dot_sparse(self): - # a = array([1.,2.,3.]) - # dense_dot_dense = dot(a, self.dat) - # dense_dot_sparse = dot(a, self.datsp) - # assert_array_equal(dense_dot_dense, dense_dot_sparse) - - #def test_sparse_dot_dense(self): - # b = array([1.,2.,3.,4.]) - # dense_dot_dense = dot(self.dat, b) - # dense_dot_sparse = dot(self.datsp, b) - # assert_array_equal(dense_dot_dense, dense_dot_sparse) - - - -class _TestInplaceArithmetic: - def test_imul_scalar(self): - a = self.datsp.copy() - a *= 2 - assert_array_equal(self.dat*2,a.todense()) - - a = self.datsp.copy() - a *= 17.3 - assert_array_equal(self.dat*17.3,a.todense()) - - def test_idiv_scalar(self): - a = self.datsp.copy() - a /= 2 - assert_array_equal(self.dat/2,a.todense()) - - a = self.datsp.copy() - a /= 17.3 - assert_array_equal(self.dat/17.3,a.todense()) - - -class _TestGetSet: - def test_setelement(self): - A = self.spmatrix((3,4)) - A[ 0, 0] = 0 # bug 870 - A[ 1, 2] = 4.0 - A[ 0, 1] = 3 - A[ 2, 0] = 2.0 - A[ 0,-1] = 8 - A[-1,-2] = 7 - A[ 0, 1] = 5 - assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]]) - - for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]: - assert_raises(IndexError, A.__setitem__, ij, 123.0) - - for v in [[1,2,3], array([1,2,3])]: - assert_raises(ValueError, A.__setitem__, (0,0), v) - - for v in [3j]: - assert_raises(TypeError, A.__setitem__, (0,0), v) - - def test_getelement(self): - D = array([[1,0,0], - [4,3,0], - [0,2,0], - [0,0,0]]) - A = self.spmatrix(D) - - M,N = D.shape - - for i in range(-M, M): - for j in range(-N, N): - assert_equal(A[i,j], D[i,j]) - - for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1)]: - assert_raises(IndexError, A.__getitem__, ij) - -class _TestSolve: - def test_solve(self): - """ Test whether the lu_solve command segfaults, as reported by Nils - Wagner for a 64-bit machine, 02 March 2005 (EJS) - """ - n = 20 - np.random.seed(0) #make tests repeatable - A = zeros((n,n), dtype=complex) - x = np.random.rand(n) - y = np.random.rand(n-1)+1j*np.random.rand(n-1) - r = np.random.rand(n) - for i in range(len(x)): - A[i,i] = x[i] - for i in range(len(y)): - A[i,i+1] = y[i] - A[i+1,i] = conjugate(y[i]) - A = self.spmatrix(A) - x = splu(A).solve(r) - assert_almost_equal(A*x,r) - - -class _TestHorizSlicing: - """Tests horizontal slicing (e.g. [0, :]). Tests for individual sparse - matrix types that implement this should derive from this class. - """ - def test_get_horiz_slice(self): - """Test for new slice functionality (EJS)""" - B = asmatrix(arange(50.).reshape(5,10)) - A = self.spmatrix(B) - assert_array_equal(B[1,:], A[1,:].todense()) - assert_array_equal(B[1,2:5], A[1,2:5].todense()) - - C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]]) - D = self.spmatrix(C) - assert_array_equal(C[1, 1:3], D[1, 1:3].todense()) - - # Now test slicing when a row contains only zeros - E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) - F = self.spmatrix(E) - assert_array_equal(E[1, 1:3], F[1, 1:3].todense()) - assert_array_equal(E[2, -2:], F[2, -2:].A) - - # The following should raise exceptions: - caught = 0 - try: - a = A[:,11] - except IndexError: - caught += 1 - try: - a = A[6,3:7] - except IndexError: - caught += 1 - assert_(caught == 2) - - -class _TestVertSlicing: - """Tests vertical slicing (e.g. [:, 0]). Tests for individual sparse - matrix types that implement this should derive from this class. - """ - def test_get_vert_slice(self): - """Test for new slice functionality (EJS)""" - B = asmatrix(arange(50.).reshape(5,10)) - A = self.spmatrix(B) - assert_array_equal(B[2:5,0], A[2:5,0].todense()) - assert_array_equal(B[:,1], A[:,1].todense()) - - C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]]) - D = self.spmatrix(C) - assert_array_equal(C[1:3, 1], D[1:3, 1].todense()) - assert_array_equal(C[:, 2], D[:, 2].todense()) - - # Now test slicing when a column contains only zeros - E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) - F = self.spmatrix(E) - assert_array_equal(E[:, 1], F[:, 1].todense()) - assert_array_equal(E[-2:, 2], F[-2:, 2].todense()) - - # The following should raise exceptions: - caught = 0 - try: - a = A[:,11] - except IndexError: - caught += 1 - try: - a = A[6,3:7] - except IndexError: - caught += 1 - assert_(caught == 2) - - - -class _TestBothSlicing: - """Tests vertical and horizontal slicing (e.g. [:,0:2]). Tests for - individual sparse matrix types that implement this should derive from this - class. - """ - def test_get_slices(self): - B = asmatrix(arange(50.).reshape(5,10)) - A = self.spmatrix(B) - assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3]) - assert_array_equal(A[1:,:-1].todense(), B[1:,:-1]) - assert_array_equal(A[:-1,1:].todense(), B[:-1,1:]) - - # Now test slicing when a column contains only zeros - E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) - F = self.spmatrix(E) - assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].todense()) - assert_array_equal(E[:, 1:], F[:, 1:].todense()) - -class _TestFancyIndexing: - """Tests fancy indexing features. The tests for any matrix formats - that implement these features should derive from this class. - """ - def test_fancy_indexing_set(self): - n, m = (5, 10) - def _test_set(i, j, nitems): - A = self.spmatrix((n, m)) - A[i, j] = 1 - assert_almost_equal(A.sum(), nitems) - assert_almost_equal(A[i, j], 1) - - # [i,j] - for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)), - (array(-1), array(-2))]: - _test_set(i, j, 1) - - # [i,1:2] - for i, j in [(2, slice(m)), (2, slice(5, -2)), (array(2), slice(5, -2))]: - _test_set(i, j, 3) - - def test_fancy_indexing(self): - B = asmatrix(arange(50).reshape(5,10)) - A = self.spmatrix( B ) - - # [i,j] - assert_equal(A[2,3], B[2,3]) - assert_equal(A[-1,8], B[-1,8]) - assert_equal(A[-1,-2],B[-1,-2]) - assert_equal(A[array(-1),-2],B[-1,-2]) - assert_equal(A[-1,array(-2)],B[-1,-2]) - assert_equal(A[array(-1),array(-2)],B[-1,-2]) - - # [i,1:2] - assert_equal(A[2,:].todense(), B[2,:]) - assert_equal(A[2,5:-2].todense(),B[2,5:-2]) - assert_equal(A[array(2),5:-2].todense(),B[2,5:-2]) - - # [i,[1,2]] - assert_equal(A[3,[1,3]].todense(), B[3,[1,3]]) - assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]]) - assert_equal(A[array(-1),[2,-5]].todense(),B[-1,[2,-5]]) - assert_equal(A[-1,array([2,-5])].todense(),B[-1,[2,-5]]) - assert_equal(A[array(-1),array([2,-5])].todense(),B[-1,[2,-5]]) - - # [1:2,j] - assert_equal(A[:,2].todense(), B[:,2]) - assert_equal(A[3:4,9].todense(), B[3:4,9]) - assert_equal(A[1:4,-5].todense(),B[1:4,-5]) - assert_equal(A[2:-1,3].todense(),B[2:-1,3]) - assert_equal(A[2:-1,array(3)].todense(),B[2:-1,3]) - - # [1:2,1:2] - assert_equal(A[1:2,1:2].todense(),B[1:2,1:2]) - assert_equal(A[4:,3:].todense(), B[4:,3:]) - assert_equal(A[:4,:5].todense(), B[:4,:5]) - assert_equal(A[2:-1,:5].todense(),B[2:-1,:5]) - - # [1:2,[1,2]] - assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]]) - assert_equal(A[3:4,[9]].todense(), B[3:4,[9]]) - assert_equal(A[1:4,[-1,-5]].todense(), B[1:4,[-1,-5]]) - assert_equal(A[1:4,array([-1,-5])].todense(), B[1:4,[-1,-5]]) - - # [[1,2],j] - assert_equal(A[[1,3],3].todense(), B[[1,3],3]) - assert_equal(A[[2,-5],-4].todense(), B[[2,-5],-4]) - assert_equal(A[array([2,-5]),-4].todense(), B[[2,-5],-4]) - assert_equal(A[[2,-5],array(-4)].todense(), B[[2,-5],-4]) - assert_equal(A[array([2,-5]),array(-4)].todense(), B[[2,-5],-4]) - - # [[1,2],1:2] - assert_equal(A[[1,3],:].todense(), B[[1,3],:]) - assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1]) - assert_equal(A[array([2,-5]),8:-1].todense(),B[[2,-5],8:-1]) - - # [[1,2],[1,2]] - assert_equal(A[[1,3],[2,4]], B[[1,3],[2,4]]) - assert_equal(A[[-1,-3],[2,-4]],B[[-1,-3],[2,-4]]) - assert_equal(A[array([-1,-3]),[2,-4]],B[[-1,-3],[2,-4]]) - assert_equal(A[[-1,-3],array([2,-4])],B[[-1,-3],[2,-4]]) - assert_equal(A[array([-1,-3]),array([2,-4])],B[[-1,-3],[2,-4]]) - - # [[[1],[2]],[1,2]] - assert_equal(A[[[1],[3]],[2,4]].todense(), B[[[1],[3]],[2,4]]) - assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]]) - assert_equal(A[array([[-1],[-3],[-2]]),[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]]) - assert_equal(A[[[-1],[-3],[-2]],array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]]) - assert_equal(A[array([[-1],[-3],[-2]]),array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]]) - - # [i] - assert_equal(A[1,:].todense(), B[1,:]) - assert_equal(A[-2,:].todense(),B[-2,:]) - assert_equal(A[array(-2),:].todense(),B[-2,:]) - - # [1:2] - assert_equal(A[1:4].todense(), B[1:4]) - assert_equal(A[1:-2].todense(),B[1:-2]) - - # [[1,2]] - assert_equal(A[[1,3]].todense(), B[[1,3]]) - assert_equal(A[[-1,-3]].todense(),B[[-1,-3]]) - assert_equal(A[array([-1,-3])].todense(),B[[-1,-3]]) - - # [[1,2],:][:,[1,2]] - assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]] ) - assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]] ) - assert_equal(A[array([-1,-3]),:][:,array([2,-4])].todense(), B[[-1,-3],:][:,[2,-4]] ) - - # [:,[1,2]][[1,2],:] - assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:] ) - assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:] ) - assert_equal(A[:,array([-1,-3])][array([2,-4]),:].todense(), B[:,[-1,-3]][[2,-4],:] ) - - - # Check bug reported by Robert Cimrman: - # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 - s = slice(int8(2),int8(4),None) - assert_equal(A[s,:].todense(), B[2:4,:]) - assert_equal(A[:,s].todense(), B[:,2:4]) - - def test_fancy_indexing_randomized(self): - random.seed(0) # make runs repeatable - - NUM_SAMPLES = 50 - M = 6 - N = 4 - - D = np.asmatrix(np.random.rand(M,N)) - D = np.multiply(D, D > 0.5) - - I = np.random.random_integers(-M + 1, M - 1, size=NUM_SAMPLES) - J = np.random.random_integers(-N + 1, N - 1, size=NUM_SAMPLES) - - S = self.spmatrix(D) - - assert_equal(S[I,J], D[I,J]) - - I_bad = I + M - J_bad = J - N - - assert_raises(IndexError, S.__getitem__, (I_bad,J)) - assert_raises(IndexError, S.__getitem__, (I,J_bad)) - - -class _TestArithmetic: - """ - Test real/complex arithmetic - """ - def arith_init(self): - #these can be represented exactly in FP (so arithmetic should be exact) - self.A = matrix([[ -1.5, 6.5, 0, 2.25, 0, 0], - [ 3.125, -7.875, 0.625, 0, 0, 0], - [ 0, 0, -0.125, 1.0, 0, 0], - [ 0, 0, 8.375, 0, 0, 0]],'float64') - self.B = matrix([[ 0.375, 0, 0, 0, -5, 2.5], - [ 14.25, -3.75, 0, 0, -0.125, 0], - [ 0, 7.25, 0, 0, 0, 0], - [ 18.5, -0.0625, 0, 0, 0, 0]],'complex128') - self.B.imag = matrix([[ 1.25, 0, 0, 0, 6, -3.875], - [ 2.25, 4.125, 0, 0, 0, 2.75], - [ 0, 4.125, 0, 0, 0, 0], - [ -0.0625, 0, 0, 0, 0, 0]],'float64') - - #fractions are all x/16ths - assert_array_equal((self.A*16).astype('int32'),16*self.A) - assert_array_equal((self.B.real*16).astype('int32'),16*self.B.real) - assert_array_equal((self.B.imag*16).astype('int32'),16*self.B.imag) - - self.Asp = self.spmatrix(self.A) - self.Bsp = self.spmatrix(self.B) - - def test_add_sub(self): - self.arith_init() - - #basic tests - assert_array_equal((self.Asp+self.Bsp).todense(),self.A+self.B) - - #check conversions - for x in supported_dtypes: - A = self.A.astype(x) - Asp = self.spmatrix(A) - for y in supported_dtypes: - B = self.B.astype(y) - Bsp = self.spmatrix(B) - - #addition - D1 = A + B - S1 = Asp + Bsp - - assert_equal(S1.dtype,D1.dtype) - assert_array_equal(S1.todense(),D1) - assert_array_equal(Asp + B,D1) #check sparse + dense - assert_array_equal(A + Bsp,D1) #check dense + sparse - - #subtraction - D1 = A - B - S1 = Asp - Bsp - - assert_equal(S1.dtype,D1.dtype) - assert_array_equal(S1.todense(),D1) - assert_array_equal(Asp - B,D1) #check sparse - dense - assert_array_equal(A - Bsp,D1) #check dense - sparse - - - def test_mu(self): - self.arith_init() - - #basic tests - assert_array_equal((self.Asp*self.Bsp.T).todense(),self.A*self.B.T) - - for x in supported_dtypes: - A = self.A.astype(x) - Asp = self.spmatrix(A) - for y in supported_dtypes: - B = self.B.astype(y) - Bsp = self.spmatrix(B) - - D1 = A * B.T - S1 = Asp * Bsp.T - - assert_array_equal(S1.todense(),D1) - assert_equal(S1.dtype,D1.dtype) - - - -class TestCSR(_TestCommon, _TestGetSet, _TestSolve, - _TestInplaceArithmetic, _TestArithmetic, - _TestHorizSlicing, _TestVertSlicing, _TestBothSlicing, - _TestFancyIndexing, TestCase): - spmatrix = csr_matrix - - @dec.knownfailureif(True, "Fancy indexing is known to be broken for CSR" \ - " matrices") - def test_fancy_indexing_set(self): - _TestFancyIndexing.test_fancy_indexing_set(self) - - def test_constructor1(self): - b = matrix([[0,4,0], - [3,0,0], - [0,2,0]],'d') - bsp = csr_matrix(b) - assert_array_almost_equal(bsp.data,[4,3,2]) - assert_array_equal(bsp.indices,[1,0,1]) - assert_array_equal(bsp.indptr,[0,1,2,3]) - assert_equal(bsp.getnnz(),3) - assert_equal(bsp.getformat(),'csr') - assert_array_equal(bsp.todense(),b) - - def test_constructor2(self): - b = zeros((6,6),'d') - b[3,4] = 5 - bsp = csr_matrix(b) - assert_array_almost_equal(bsp.data,[5]) - assert_array_equal(bsp.indices,[4]) - assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1]) - assert_array_almost_equal(bsp.todense(),b) - - def test_constructor3(self): - b = matrix([[1,0], - [0,2], - [3,0]],'d') - bsp = csr_matrix(b) - assert_array_almost_equal(bsp.data,[1,2,3]) - assert_array_equal(bsp.indices,[0,1,0]) - assert_array_equal(bsp.indptr,[0,1,2,3]) - assert_array_almost_equal(bsp.todense(),b) - -### currently disabled -## def test_constructor4(self): -## """try using int64 indices""" -## data = arange( 6 ) + 1 -## col = array( [1, 2, 1, 0, 0, 2], dtype='int64' ) -## ptr = array( [0, 2, 4, 6], dtype='int64' ) -## -## a = csr_matrix( (data, col, ptr), shape = (3,3) ) -## -## b = matrix([[0,1,2], -## [4,3,0], -## [5,0,6]],'d') -## -## assert_equal(a.indptr.dtype,numpy.dtype('int64')) -## assert_equal(a.indices.dtype,numpy.dtype('int64')) -## assert_array_equal(a.todense(),b) - - def test_constructor4(self): - """using (data, ij) format""" - row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) - col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) - data = array([ 6., 10., 3., 9., 1., 4., - 11., 2., 8., 5., 7.]) - - ij = vstack((row,col)) - csr = csr_matrix((data,ij),(4,3)) - assert_array_equal(arange(12).reshape(4,3),csr.todense()) - - def test_constructor5(self): - """infer dimensions from arrays""" - indptr = array([0,1,3,3]) - indices = array([0,5,1,2]) - data = array([1,2,3,4]) - csr = csr_matrix((data, indices, indptr)) - assert_array_equal(csr.shape,(3,6)) - - def test_sort_indices(self): - data = arange( 5 ) - indices = array( [7, 2, 1, 5, 4] ) - indptr = array( [0, 3, 5] ) - asp = csr_matrix( (data, indices, indptr), shape=(2,10) ) - bsp = asp.copy() - asp.sort_indices( ) - assert_array_equal(asp.indices,[1, 2, 7, 4, 5]) - assert_array_equal(asp.todense(),bsp.todense()) - - def test_eliminate_zeros(self): - data = array( [1, 0, 0, 0, 2, 0, 3, 0] ) - indices = array( [1, 2, 3, 4, 5, 6, 7, 8] ) - indptr = array( [0, 3, 8] ) - asp = csr_matrix( (data, indices, indptr), shape=(2,10) ) - bsp = asp.copy() - asp.eliminate_zeros( ) - assert_array_equal(asp.nnz, 3) - assert_array_equal(asp.data,[1, 2, 3]) - assert_array_equal(asp.todense(),bsp.todense()) - - def test_unsorted_arithmetic(self): - data = arange( 5 ) - indices = array( [7, 2, 1, 5, 4] ) - indptr = array( [0, 3, 5] ) - asp = csr_matrix( (data, indices, indptr), shape=(2,10) ) - data = arange( 6 ) - indices = array( [8, 1, 5, 7, 2, 4] ) - indptr = array( [0, 2, 6] ) - bsp = csr_matrix( (data, indices, indptr), shape=(2,10) ) - assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense()) - - - - -class TestCSC(_TestCommon, _TestGetSet, _TestSolve, - _TestInplaceArithmetic, _TestArithmetic, - _TestHorizSlicing, _TestVertSlicing, _TestBothSlicing, - _TestFancyIndexing, TestCase): - spmatrix = csc_matrix - - @dec.knownfailureif(True, "Fancy indexing is known to be broken for CSC" \ - " matrices") - def test_fancy_indexing_set(self): - _TestFancyIndexing.test_fancy_indexing_set(self) - - def test_constructor1(self): - b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') - bsp = csc_matrix(b) - assert_array_almost_equal(bsp.data,[1,2,1,3]) - assert_array_equal(bsp.indices,[0,2,1,2]) - assert_array_equal(bsp.indptr,[0,1,2,3,4]) - assert_equal(bsp.getnnz(),4) - assert_equal(bsp.shape,b.shape) - assert_equal(bsp.getformat(),'csc') - - def test_constructor2(self): - b = zeros((6,6),'d') - b[2,4] = 5 - bsp = csc_matrix(b) - assert_array_almost_equal(bsp.data,[5]) - assert_array_equal(bsp.indices,[2]) - assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1]) - - def test_constructor3(self): - b = matrix([[1,0],[0,0],[0,2]],'d') - bsp = csc_matrix(b) - assert_array_almost_equal(bsp.data,[1,2]) - assert_array_equal(bsp.indices,[0,2]) - assert_array_equal(bsp.indptr,[0,1,2]) - - def test_constructor4(self): - """using (data, ij) format""" - row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) - col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) - data = array([ 6., 10., 3., 9., 1., 4., - 11., 2., 8., 5., 7.]) - - ij = vstack((row,col)) - csc = csc_matrix((data,ij),(4,3)) - assert_array_equal(arange(12).reshape(4,3),csc.todense()) - - def test_constructor5(self): - """infer dimensions from arrays""" - indptr = array([0,1,3,3]) - indices = array([0,5,1,2]) - data = array([1,2,3,4]) - csc = csc_matrix((data, indices, indptr)) - assert_array_equal(csc.shape,(6,3)) - - def test_eliminate_zeros(self): - data = array( [1, 0, 0, 0, 2, 0, 3, 0] ) - indices = array( [1, 2, 3, 4, 5, 6, 7, 8] ) - indptr = array( [0, 3, 8] ) - asp = csc_matrix( (data, indices, indptr), shape=(10,2) ) - bsp = asp.copy() - asp.eliminate_zeros( ) - assert_array_equal(asp.nnz, 3) - assert_array_equal(asp.data,[1, 2, 3]) - assert_array_equal(asp.todense(),bsp.todense()) - - def test_sort_indices(self): - data = arange( 5 ) - row = array( [7, 2, 1, 5, 4] ) - ptr = [0, 3, 5] - asp = csc_matrix( (data, row, ptr), shape=(10,2) ) - bsp = asp.copy() - asp.sort_indices() - assert_array_equal(asp.indices,[1, 2, 7, 4, 5]) - assert_array_equal(asp.todense(),bsp.todense()) - - def test_unsorted_arithmetic(self): - data = arange( 5 ) - indices = array( [7, 2, 1, 5, 4] ) - indptr = array( [0, 3, 5] ) - asp = csc_matrix( (data, indices, indptr), shape=(10,2) ) - data = arange( 6 ) - indices = array( [8, 1, 5, 7, 2, 4] ) - indptr = array( [0, 2, 6] ) - bsp = csc_matrix( (data, indices, indptr), shape=(10,2) ) - assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense()) - -class TestDOK(_TestCommon, _TestGetSet, _TestSolve, TestCase): - spmatrix = dok_matrix - - def test_mult(self): - A = dok_matrix((10,10)) - A[0,3] = 10 - A[5,6] = 20 - D = A*A.T - E = A*A.H - assert_array_equal(D.A, E.A) - - def test_add(self): - A = dok_matrix((3,2)) - A[0,1] = -10 - A[2,0] = 20 - A = A + 10 - B = matrix([[10, 0], [10, 10], [30, 10]]) - assert_array_equal(A.todense(), B) - - def test_convert(self): - """Test provided by Andrew Straw. Fails in SciPy <= r1477. - """ - (m, n) = (6, 7) - a=dok_matrix((m, n)) - - # set a few elements, but none in the last column - a[2,1]=1 - a[0,2]=2 - a[3,1]=3 - a[1,5]=4 - a[4,3]=5 - a[4,2]=6 - - # assert that the last column is all zeros - assert_array_equal( a.toarray()[:,n-1], zeros(m,) ) - - # make sure it still works for CSC format - csc=a.tocsc() - assert_array_equal( csc.toarray()[:,n-1], zeros(m,) ) - - # now test CSR - (m, n) = (n, m) - b = a.transpose() - assert_equal(b.shape, (m, n)) - # assert that the last row is all zeros - assert_array_equal( b.toarray()[m-1,:], zeros(n,) ) - - # make sure it still works for CSR format - csr=b.tocsr() - assert_array_equal( csr.toarray()[m-1,:], zeros(n,)) - - def test_set_slice(self): - """Test for slice functionality (EJS)""" - A = dok_matrix((5,10)) - B = zeros((5,10), float) - A[:,0] = 1 - B[:,0] = 1 - assert_array_equal(A.todense(), B) - A[1,:] = 2 - B[1,:] = 2 - assert_array_equal(A.todense(), B) - A[:,:] = 3 - B[:,:] = 3 - assert_array_equal(A.todense(), B) - A[1:5, 3] = 4 - B[1:5, 3] = 4 - assert_array_equal(A.todense(), B) - A[1, 3:6] = 5 - B[1, 3:6] = 5 - assert_array_equal(A.todense(), B) - A[1:4, 3:6] = 6 - B[1:4, 3:6] = 6 - assert_array_equal(A.todense(), B) - A[1, 3:10:3] = 7 - B[1, 3:10:3] = 7 - assert_array_equal(A.todense(), B) - A[1:5, 0] = range(1,5) - B[1:5, 0] = range(1,5) - assert_array_equal(A.todense(), B) - A[0, 1:10:2] = xrange(1,10,2) - B[0, 1:10:2] = xrange(1,10,2) - assert_array_equal(A.todense(), B) - caught = 0 - # The next 6 commands should raise exceptions - try: - A[0,0] = range(100) - except ValueError: - caught += 1 - try: - A[0,0] = arange(100) - except ValueError: - caught += 1 - try: - A[0,:] = range(100) - except ValueError: - caught += 1 - try: - A[:,1] = range(100) - except ValueError: - caught += 1 - try: - A[:,1] = A.copy() - except: - caught += 1 - assert_equal(caught,5) - - def test_ctor(self): - caught = 0 - # Empty ctor - try: - A = dok_matrix() - except TypeError, e: - caught+=1 - assert_equal(caught, 1) - - # Dense ctor - b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') - A = dok_matrix(b) - assert_equal(A.todense(), b) - - # Sparse ctor - c = csr_matrix(b) - assert_equal(A.todense(), c.todense()) - - def test_resize(self): - """A couple basic tests of the resize() method. - - resize(shape) resizes the array in-place. - """ - a = dok_matrix((5,5)) - a[:,0] = 1 - a.resize((2,2)) - expected1 = array([[1,0],[1,0]]) - assert_array_equal(a.todense(), expected1) - a.resize((3,2)) - expected2 = array([[1,0],[1,0],[0,0]]) - assert_array_equal(a.todense(), expected2) - - - def test_ticket1160(self): - """Regression test for ticket #1160.""" - a = dok_matrix((3,3)) - a[0,0] = 0 - # This assert would fail, because the above assignment would - # incorrectly call __set_item__ even though the value was 0. - assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys") - - # Slice assignments were also affected. - b = dok_matrix((3,3)) - b[:,0] = 0 - assert_(len(b.keys())==0, "Unexpected entries in keys") - - # The following five tests are duplicates from _TestCommon, so they can be - # marked as knownfail for Python 2.4. Once 2.4 is no longer supported, - # these duplicates can be removed again. - - @dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559") - def test_add_dense(self): - """ adding a dense matrix to a sparse matrix - """ - sum1 = self.dat + self.datsp - assert_array_equal(sum1, 2*self.dat) - sum2 = self.datsp + self.dat - assert_array_equal(sum2, 2*self.dat) - - @dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559") - def test_radd(self): - a = self.dat.copy() - a[0,2] = 2.0 - b = self.datsp - c = a + b - assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]]) - - @dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559") - def test_rsub(self): - assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) - assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) - - A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) - assert_array_equal((self.dat - A),self.dat - A.todense()) - assert_array_equal((A - self.dat),A.todense() - self.dat) - assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat) - assert_array_equal(self.datsp - A.todense(),self.dat - A.todense()) - - @dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559") - def test_matmat_sparse(self): - a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) - a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) - b = matrix([[0,1],[1,0],[0,2]],'d') - asp = self.spmatrix(a) - bsp = self.spmatrix(b) - assert_array_almost_equal((asp*bsp).todense(), a*b) - assert_array_almost_equal( asp*b, a*b) - assert_array_almost_equal( a*bsp, a*b) - assert_array_almost_equal( a2*bsp, a*b) - - # Now try performing cross-type multplication: - csp = bsp.tocsc() - c = b - assert_array_almost_equal((asp*csp).todense(), a*c) - assert_array_almost_equal( asp*c, a*c) - - assert_array_almost_equal( a*csp, a*c) - assert_array_almost_equal( a2*csp, a*c) - csp = bsp.tocsr() - assert_array_almost_equal((asp*csp).todense(), a*c) - assert_array_almost_equal( asp*c, a*c) - - assert_array_almost_equal( a*csp, a*c) - assert_array_almost_equal( a2*csp, a*c) - csp = bsp.tocoo() - assert_array_almost_equal((asp*csp).todense(), a*c) - assert_array_almost_equal( asp*c, a*c) - - assert_array_almost_equal( a*csp, a*c) - assert_array_almost_equal( a2*csp, a*c) - - # Test provided by Andy Fraser, 2006-03-26 - L = 30 - frac = .3 - random.seed(0) # make runs repeatable - A = zeros((L,2)) - for i in xrange(L): - for j in xrange(2): - r = random.random() - if r < frac: - A[i,j] = r/frac - - A = self.spmatrix(A) - B = A*A.T - assert_array_almost_equal(B.todense(), A.todense() * A.T.todense()) - assert_array_almost_equal(B.todense(), A.todense() * A.todense().T) - - # check dimension mismatch 2x2 times 3x2 - A = self.spmatrix( [[1,2],[3,4]] ) - B = self.spmatrix( [[1,2],[3,4],[5,6]] ) - assert_raises(ValueError, A.__mul__, B) - - @dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559") - def test_sub_dense(self): - """ subtracting a dense matrix to/from a sparse matrix - """ - sum1 = 3*self.dat - self.datsp - assert_array_equal(sum1, 2*self.dat) - sum2 = 3*self.datsp - self.dat - assert_array_equal(sum2, 2*self.dat) - - -class TestLIL( _TestCommon, _TestHorizSlicing, _TestVertSlicing, - _TestBothSlicing, _TestGetSet, _TestSolve, - _TestArithmetic, _TestInplaceArithmetic, _TestFancyIndexing, - TestCase): - spmatrix = lil_matrix - - B = lil_matrix((4,3)) - B[0,0] = 2 - B[1,2] = 7 - B[2,1] = 3 - B[3,0] = 10 - - - @dec.knownfailureif(True, "Fancy indexing is known to be broken for LIL" \ - " matrices") - def test_fancy_indexing_set(self): - _TestFancyIndexing.test_fancy_indexing_set(self) - - @dec.knownfailureif(True, "Fancy indexing is known to be broken for LIL" \ - " matrices") - def test_fancy_indexing_randomized(self): - _TestFancyIndexing.test_fancy_indexing_randomized(self) - - def test_dot(self): - A = matrix(zeros((10,10))) - A[0,3] = 10 - A[5,6] = 20 - - B = lil_matrix((10,10)) - B[0,3] = 10 - B[5,6] = 20 - assert_array_equal(A * A.T, (B * B.T).todense()) - assert_array_equal(A * A.H, (B * B.H).todense()) - - def test_scalar_mul(self): - x = lil_matrix((3,3)) - x[0,0] = 2 - - x = x*2 - assert_equal(x[0,0],4) - - x = x*0 - assert_equal(x[0,0],0) - - def test_reshape(self): - x = lil_matrix((4,3)) - x[0,0] = 1 - x[2,1] = 3 - x[3,2] = 5 - x[0,2] = 7 - - for s in [(12,1),(1,12)]: - assert_array_equal(x.reshape(s).todense(), - x.todense().reshape(s)) - - def test_lil_lil_assignment(self): - """ Tests whether a row of one lil_matrix can be assigned to - another. - """ - B = self.B.copy() - A = B / 10 - B[0,:] = A[0,:] - assert_array_equal(A[0,:].A, B[0,:].A) - - - def test_inplace_ops(self): - A = lil_matrix([[0,2,3],[4,0,6]]) - B = lil_matrix([[0,1,0],[0,2,3]]) - - data = {'add': (B,A + B), - 'sub': (B,A - B), - 'mul': (3,A * 3)} - - for op,(other,expected) in data.iteritems(): - result = A.copy() - getattr(result, '__i%s__' % op)(other) - - assert_array_equal(result.todense(), expected.todense()) - - - def test_lil_slice_assignment(self): - B = lil_matrix((4,3)) - B[0,0] = 5 - B[1,2] = 3 - B[2,1] = 7 - - expected = array([[10,0,0], - [0,0,6], - [0,14,0], - [0,0,0]]) - - B[:,:] = B+B - assert_array_equal(B.todense(),expected) - - block = [[1,0],[0,4]] - B[:2,:2] = csc_matrix(array(block)) - assert_array_equal(B.todense()[:2,:2],block) - - def test_lil_sequence_assignment(self): - A = lil_matrix((4,3)) - B = eye(3,4,format='lil') - - i0 = [0,1,2] - i1 = (0,1,2) - i2 = array( i0 ) - - A[0,i0] = B[i0,0] - A[1,i1] = B[i1,1] - A[2,i2] = B[i2,2] - assert_array_equal(A.todense(),B.T.todense()) - - # column slice - A = lil_matrix((2,3)) - A[1,1:3] = [10,20] - assert_array_equal(A.todense(), [[0,0,0],[0,10,20]]) - - # column slice - A = lil_matrix((3,2)) - A[1:3,1] = [[10],[20]] - assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]]) - - def test_lil_iteration(self): - row_data = [[1,2,3],[4,5,6]] - B = lil_matrix(array(row_data)) - for r,row in enumerate(B): - assert_array_equal(row.todense(),array(row_data[r],ndmin=2)) - - def test_lil_from_csr(self): - """ Tests whether a lil_matrix can be constructed from a - csr_matrix. - """ - B = lil_matrix((10,10)) - B[0,3] = 10 - B[5,6] = 20 - B[8,3] = 30 - B[3,8] = 40 - B[8,9] = 50 - C = B.tocsr() - D = lil_matrix(C) - assert_array_equal(C.A, D.A) - - def test_fancy_indexing(self): - M = arange(25).reshape(5,5) - A = lil_matrix( M ) - - assert_equal(A[array([1,2,3]),2:3].todense(), M[array([1,2,3]),2:3]) - - def test_point_wise_multiply(self): - l = lil_matrix((4,3)) - l[0,0] = 1 - l[1,1] = 2 - l[2,2] = 3 - l[3,1] = 4 - - m = lil_matrix((4,3)) - m[0,0] = 1 - m[0,1] = 2 - m[2,2] = 3 - m[3,1] = 4 - m[3,2] = 4 - - assert_array_equal(l.multiply(m).todense(), - m.multiply(l).todense()) - - assert_array_equal(l.multiply(m).todense(), - [[1,0,0], - [0,0,0], - [0,0,9], - [0,16,0]]) - - - -class TestCOO(_TestCommon, TestCase): - spmatrix = coo_matrix - def test_constructor1(self): - """unsorted triplet format""" - row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) - col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) - data = array([ 6., 10., 3., 9., 1., 4., - 11., 2., 8., 5., 7.]) - - coo = coo_matrix((data,(row,col)),(4,3)) - - assert_array_equal(arange(12).reshape(4,3),coo.todense()) - - def test_constructor2(self): - """unsorted triplet format with duplicates (which are summed)""" - row = array([0,1,2,2,2,2,0,0,2,2]) - col = array([0,2,0,2,1,1,1,0,0,2]) - data = array([2,9,-4,5,7,0,-1,2,1,-5]) - coo = coo_matrix((data,(row,col)),(3,3)) - - mat = matrix([[4,-1,0],[0,0,9],[-3,7,0]]) - - assert_array_equal(mat,coo.todense()) - - def test_constructor3(self): - """empty matrix""" - coo = coo_matrix( (4,3) ) - - assert_array_equal(coo.shape,(4,3)) - assert_array_equal(coo.row,[]) - assert_array_equal(coo.col,[]) - assert_array_equal(coo.data,[]) - assert_array_equal(coo.todense(),zeros((4,3))) - - def test_constructor4(self): - """from dense matrix""" - mat = array([[0,1,0,0], - [7,0,3,0], - [0,4,0,0]]) - coo = coo_matrix(mat) - assert_array_equal(coo.todense(),mat) - - #upgrade rank 1 arrays to row matrix - mat = array([0,1,0,0]) - coo = coo_matrix(mat) - assert_array_equal(coo.todense(),mat.reshape(1,-1)) - - -class TestDIA(_TestCommon, _TestArithmetic, TestCase): - spmatrix = dia_matrix - - def test_constructor1(self): - D = matrix([[1, 0, 3, 0], - [1, 2, 0, 4], - [0, 2, 3, 0], - [0, 0, 3, 4]]) - data = np.array([[1,2,3,4]]).repeat(3,axis=0) - offsets = np.array([0,-1,2]) - assert_equal(dia_matrix( (data,offsets), shape=(4,4)).todense(), D) - - - -class TestBSR(_TestCommon, _TestArithmetic, _TestInplaceArithmetic, TestCase): - spmatrix = bsr_matrix - - def test_constructor1(self): - """check native BSR format constructor""" - indptr = array([0,2,2,4]) - indices = array([0,2,2,3]) - data = zeros((4,2,3)) - - data[0] = array([[ 0, 1, 2], - [ 3, 0, 5]]) - data[1] = array([[ 0, 2, 4], - [ 6, 0, 10]]) - data[2] = array([[ 0, 4, 8], - [12, 0, 20]]) - data[3] = array([[ 0, 5, 10], - [15, 0, 25]]) - - A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] ) - Asp = bsr_matrix((data,indices,indptr),shape=(6,12)) - assert_equal(Asp.todense(),A) - - #infer shape from arrays - Asp = bsr_matrix((data,indices,indptr)) - assert_equal(Asp.todense(),A) - - def test_constructor2(self): - """construct from dense""" - - #test zero mats - for shape in [ (1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]: - A = zeros(shape) - assert_equal(bsr_matrix(A).todense(),A) - A = zeros((4,6)) - assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A) - assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A) - - A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] ) - assert_equal(bsr_matrix(A).todense(),A) - assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A) - assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A) - assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A) - assert_equal(bsr_matrix(A,blocksize=(2,6)).todense(),A) - assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A) - assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A) - assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A) - - A = kron( [[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]] ) - assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A) - - def test_eliminate_zeros(self): - data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T - data = data.reshape(-1,2,2) - indices = array( [1, 2, 3, 4, 5, 6, 7, 8] ) - indptr = array( [0, 3, 8] ) - asp = bsr_matrix( (data, indices, indptr), shape=(4,20) ) - bsp = asp.copy() - asp.eliminate_zeros() - assert_array_equal(asp.nnz, 3*4) - assert_array_equal(asp.todense(),bsp.todense()) - - def test_bsr_matvec(self): - A = bsr_matrix( arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5) ) - x = arange(A.shape[1]).reshape(-1,1) - assert_equal(A*x, A.todense()*x) - - def test_bsr_matvecs(self): - A = bsr_matrix( arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5) ) - x = arange(A.shape[1]*6).reshape(-1,6) - assert_equal(A*x, A.todense()*x) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/sparse/tests/test_construct.py b/scipy-0.10.1/scipy/sparse/tests/test_construct.py deleted file mode 100644 index 353c910c15..0000000000 --- a/scipy-0.10.1/scipy/sparse/tests/test_construct.py +++ /dev/null @@ -1,198 +0,0 @@ -"""test sparse matrix construction functions""" - -import numpy as np -from numpy import array, matrix -from numpy.testing import TestCase, run_module_suite, assert_equal, \ - assert_array_equal, assert_raises - - -from scipy.sparse import csr_matrix, coo_matrix - -from scipy.sparse import construct -from scipy.sparse.construct import rand as sprand - -sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok'] - -#TODO check whether format=XXX is respected - -class TestConstructUtils(TestCase): - def test_spdiags(self): - diags1 = array( [[ 1, 2, 3, 4, 5]] ) - diags2 = array( [[ 1, 2, 3, 4, 5], - [ 6, 7, 8, 9,10]] ) - diags3 = array( [[ 1, 2, 3, 4, 5], - [ 6, 7, 8, 9,10], - [11,12,13,14,15]] ) - - cases = [] - cases.append( (diags1, 0, 1, 1, [[1]]) ) - cases.append( (diags1, [0], 1, 1, [[1]]) ) - cases.append( (diags1, [0], 2, 1, [[1],[0]]) ) - cases.append( (diags1, [0], 1, 2, [[1,0]]) ) - cases.append( (diags1, [1], 1, 2, [[0,2]]) ) - cases.append( (diags1,[-1], 1, 2, [[0,0]]) ) - cases.append( (diags1, [0], 2, 2, [[1,0],[0,2]]) ) - cases.append( (diags1,[-1], 2, 2, [[0,0],[1,0]]) ) - cases.append( (diags1, [3], 2, 2, [[0,0],[0,0]]) ) - cases.append( (diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]]) ) - cases.append( (diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]]) ) - cases.append( (diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]]) ) - - cases.append( (diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]]) ) - cases.append( (diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]]) ) - cases.append( (diags2, [2,-3], 6, 6, [[0,0,3,0,0,0], - [0,0,0,4,0,0], - [0,0,0,0,5,0], - [6,0,0,0,0,0], - [0,7,0,0,0,0], - [0,0,8,0,0,0]]) ) - - cases.append( (diags3, [-1,0,1], 6, 6, [[ 6,12, 0, 0, 0, 0], - [ 1, 7,13, 0, 0, 0], - [ 0, 2, 8,14, 0, 0], - [ 0, 0, 3, 9,15, 0], - [ 0, 0, 0, 4,10, 0], - [ 0, 0, 0, 0, 5, 0]]) ) - cases.append( (diags3, [-4,2,-1], 6, 5, [[ 0, 0, 8, 0, 0], - [11, 0, 0, 9, 0], - [ 0,12, 0, 0,10], - [ 0, 0,13, 0, 0], - [ 1, 0, 0,14, 0], - [ 0, 2, 0, 0,15]]) ) - - for d,o,m,n,result in cases: - assert_equal( construct.spdiags(d,o,m,n).todense(), result ) - - - def test_identity(self): - assert_equal(construct.identity(1).toarray(), [[1]]) - assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]]) - - I = construct.identity(3, dtype='int8', format='dia') - assert_equal( I.dtype, np.dtype('int8') ) - assert_equal( I.format, 'dia' ) - - for fmt in sparse_formats: - I = construct.identity( 3, format=fmt ) - assert_equal( I.format, fmt ) - assert_equal( I.toarray(), [[1,0,0],[0,1,0],[0,0,1]]) - - def test_eye(self): - assert_equal(construct.eye(1,1).toarray(), [[1]]) - assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]]) - assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]]) - assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]]) - - assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16')) - - for m in [3, 5]: - for n in [3, 5]: - for k in range(-5,6): - assert_equal(construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k)) - - def test_kron(self): - cases = [] - - cases.append(array([[ 0]])) - cases.append(array([[-1]])) - cases.append(array([[ 4]])) - cases.append(array([[10]])) - cases.append(array([[0],[0]])) - cases.append(array([[0,0]])) - cases.append(array([[1,2],[3,4]])) - cases.append(array([[0,2],[5,0]])) - cases.append(array([[0,2,-6],[8,0,14]])) - cases.append(array([[5,4],[0,0],[6,0]])) - cases.append(array([[5,4,4],[1,0,0],[6,0,8]])) - cases.append(array([[0,1,0,2,0,5,8]])) - cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]])) - - for a in cases: - for b in cases: - result = construct.kron(csr_matrix(a),csr_matrix(b)).todense() - expected = np.kron(a,b) - assert_array_equal(result,expected) - - def test_kronsum(self): - cases = [] - - cases.append(array([[ 0]])) - cases.append(array([[-1]])) - cases.append(array([[ 4]])) - cases.append(array([[10]])) - cases.append(array([[1,2],[3,4]])) - cases.append(array([[0,2],[5,0]])) - cases.append(array([[0,2,-6],[8,0,14],[0,3,0]])) - cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]])) - - for a in cases: - for b in cases: - result = construct.kronsum(csr_matrix(a),csr_matrix(b)).todense() - expected = np.kron(np.eye(len(b)), a) + \ - np.kron(b, np.eye(len(a))) - assert_array_equal(result,expected) - - def test_vstack(self): - - A = coo_matrix([[1,2],[3,4]]) - B = coo_matrix([[5,6]]) - - expected = matrix([[1, 2], - [3, 4], - [5, 6]]) - assert_equal( construct.vstack( [A,B] ).todense(), expected ) - - def test_hstack(self): - - A = coo_matrix([[1,2],[3,4]]) - B = coo_matrix([[5],[6]]) - - expected = matrix([[1, 2, 5], - [3, 4, 6]]) - assert_equal( construct.hstack( [A,B] ).todense(), expected ) - - def test_bmat(self): - - A = coo_matrix([[1,2],[3,4]]) - B = coo_matrix([[5],[6]]) - C = coo_matrix([[7]]) - - expected = matrix([[1, 2, 5], - [3, 4, 6], - [0, 0, 7]]) - assert_equal( construct.bmat( [[A,B],[None,C]] ).todense(), expected ) - - - expected = matrix([[1, 2, 0], - [3, 4, 0], - [0, 0, 7]]) - assert_equal( construct.bmat( [[A,None],[None,C]] ).todense(), expected ) - - expected = matrix([[0, 5], - [0, 6], - [7, 0]]) - assert_equal( construct.bmat( [[None,B],[C,None]] ).todense(), expected ) - - #TODO test failure cases - - def test_rand(self): - # Simple sanity checks for sparse.rand - for t in [np.float32, np.float64, np.longdouble]: - x = sprand(5, 10, density=0.1, dtype=t) - assert_equal(x.dtype, t) - assert_equal(x.shape, (5, 10)) - assert_equal(x.nonzero()[0].size, 5) - - x = sprand(5, 10, density=0.1) - assert_equal(x.dtype, np.double) - - for fmt in ['coo', 'csc', 'csr', 'lil']: - x = sprand(5, 10, format=fmt) - assert_equal(x.format, fmt) - - assert_raises(ValueError, lambda: sprand(5, 10, 1.1)) - assert_raises(ValueError, lambda: sprand(5, 10, -0.1)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/sparse/tests/test_extract.py b/scipy-0.10.1/scipy/sparse/tests/test_extract.py deleted file mode 100644 index 92f94360a8..0000000000 --- a/scipy-0.10.1/scipy/sparse/tests/test_extract.py +++ /dev/null @@ -1,44 +0,0 @@ -"""test sparse matrix construction functions""" - -from numpy.testing import TestCase, assert_equal -from scipy.sparse import csr_matrix - -import numpy as np -from scipy.sparse import extract - - -class TestExtract(TestCase): - def setUp(self): - cases = [] - - cases.append( csr_matrix( [[1,2]] ) ) - cases.append( csr_matrix( [[1,0]] ) ) - cases.append( csr_matrix( [[0,0]] ) ) - cases.append( csr_matrix( [[1],[2]] ) ) - cases.append( csr_matrix( [[1],[0]] ) ) - cases.append( csr_matrix( [[0],[0]] ) ) - cases.append( csr_matrix( [[1,2],[3,4]] ) ) - cases.append( csr_matrix( [[0,1],[0,0]] ) ) - cases.append( csr_matrix( [[0,0],[1,0]] ) ) - cases.append( csr_matrix( [[0,0],[0,0]] ) ) - cases.append( csr_matrix( [[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]] ) ) - cases.append( csr_matrix( [[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]] ).T ) - - self.cases = cases - - def find(self): - for A in self.cases: - I,J,V = extract.find(A) - assert_equal( A.toarray(), csr_matrix(((I,J),V), shape=A.shape) ) - - def test_tril(self): - for A in self.cases: - B = A.toarray() - for k in [-3,-2,-1,0,1,2,3]: - assert_equal( extract.tril(A,k=k).toarray(), np.tril(B,k=k)) - - def test_triu(self): - for A in self.cases: - B = A.toarray() - for k in [-3,-2,-1,0,1,2,3]: - assert_equal( extract.triu(A,k=k).toarray(), np.triu(B,k=k)) diff --git a/scipy-0.10.1/scipy/sparse/tests/test_spfuncs.py b/scipy-0.10.1/scipy/sparse/tests/test_spfuncs.py deleted file mode 100644 index c4c2427fdf..0000000000 --- a/scipy-0.10.1/scipy/sparse/tests/test_spfuncs.py +++ /dev/null @@ -1,123 +0,0 @@ -from numpy import array, kron, matrix, diag -from numpy.testing import TestCase, run_module_suite, assert_, assert_equal - -from scipy.sparse import spfuncs -from scipy.sparse import csr_matrix, csc_matrix, bsr_matrix -from scipy.sparse.sparsetools import csr_scale_rows, csr_scale_columns, \ - bsr_scale_rows, bsr_scale_columns - -class TestSparseFunctions(TestCase): - def test_scale_rows_and_cols(self): - D = matrix([[1,0,0,2,3], - [0,4,0,5,0], - [0,0,6,7,0]]) - - - #TODO expose through function - S = csr_matrix(D) - v = array([1,2,3]) - csr_scale_rows(3,5,S.indptr,S.indices,S.data,v) - assert_equal(S.todense(), diag(v)*D ) - - S = csr_matrix(D) - v = array([1,2,3,4,5]) - csr_scale_columns(3,5,S.indptr,S.indices,S.data,v) - assert_equal(S.todense(), D*diag(v) ) - - # blocks - E = kron(D,[[1,2],[3,4]]) - S = bsr_matrix(E,blocksize=(2,2)) - v = array([1,2,3,4,5,6]) - bsr_scale_rows(3,5,2,2,S.indptr,S.indices,S.data,v) - assert_equal(S.todense(), diag(v)*E ) - - S = bsr_matrix(E,blocksize=(2,2)) - v = array([1,2,3,4,5,6,7,8,9,10]) - bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v) - assert_equal(S.todense(), E*diag(v) ) - - E = kron(D,[[1,2,3],[4,5,6]]) - S = bsr_matrix(E,blocksize=(2,3)) - v = array([1,2,3,4,5,6]) - bsr_scale_rows(3,5,2,3,S.indptr,S.indices,S.data,v) - assert_equal(S.todense(), diag(v)*E ) - - S = bsr_matrix(E,blocksize=(2,3)) - v = array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) - bsr_scale_columns(3,5,2,3,S.indptr,S.indices,S.data,v) - assert_equal(S.todense(), E*diag(v) ) - - - def test_estimate_blocksize(self): - mats = [] - mats.append( [[0,1],[1,0]] ) - mats.append( [[1,1,0],[0,0,1],[1,0,1]] ) - mats.append( [[0],[0],[1]] ) - mats = [array(x) for x in mats] - - blks = [] - blks.append( [[1]] ) - blks.append( [[1,1],[1,1]] ) - blks.append( [[1,1],[0,1]] ) - blks.append( [[1,1,0],[1,0,1],[1,1,1]] ) - blks = [array(x) for x in blks] - - for A in mats: - for B in blks: - X = kron(A,B) - r,c = spfuncs.estimate_blocksize(X) - assert_(r >= B.shape[0]) - assert_(c >= B.shape[1]) - - def test_count_blocks(self): - def gold(A,bs): - R,C = bs - I,J = A.nonzero() - return len( set( zip(I//R,J//C) ) ) - - mats = [] - mats.append( [[0]] ) - mats.append( [[1]] ) - mats.append( [[1,0]] ) - mats.append( [[1,1]] ) - mats.append( [[0,1],[1,0]] ) - mats.append( [[1,1,0],[0,0,1],[1,0,1]] ) - mats.append( [[0],[0],[1]] ) - - for A in mats: - for B in mats: - X = kron(A,B) - Y = csr_matrix(X) - for R in range(1,6): - for C in range(1,6): - assert_equal(spfuncs.count_blocks(Y, (R, C)), gold(X, (R, C))) - - X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]]) - Y = csc_matrix(X) - assert_equal(spfuncs.count_blocks(X, (1, 2)), gold(X, (1, 2))) - assert_equal(spfuncs.count_blocks(Y, (1, 2)), gold(X, (1, 2))) - - def test_cs_graph_components(self): - import numpy as np - from scipy.sparse import csr_matrix, cs_graph_components - - D = np.eye(4, dtype=np.bool) - - n_comp, flag = cs_graph_components(csr_matrix(D)) - assert_(n_comp == 4) - assert_equal(flag, [0, 1, 2, 3]) - - D[0,1] = D[1,0] = 1 - - n_comp, flag = cs_graph_components(csr_matrix(D)) - assert_(n_comp == 3) - assert_equal(flag, [0, 0, 1, 2]) - - # A pathological case... - D[2,2] = 0 - n_comp, flag = cs_graph_components(csr_matrix(D)) - assert_(n_comp == 2) - assert_equal(flag, [0, 0, -2, 1]) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/sparse/tests/test_sputils.py b/scipy-0.10.1/scipy/sparse/tests/test_sputils.py deleted file mode 100644 index 287a2372cf..0000000000 --- a/scipy-0.10.1/scipy/sparse/tests/test_sputils.py +++ /dev/null @@ -1,69 +0,0 @@ -"""unit tests for sparse utility functions""" - -import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_equal -from scipy.sparse import sputils - - -class TestSparseUtils(TestCase): - - def test_upcast(self): - assert_equal(sputils.upcast('intc'),np.intc) - assert_equal(sputils.upcast('int32','float32'),np.float64) - assert_equal(sputils.upcast('bool',complex,float),np.complex128) - assert_equal(sputils.upcast('i','d'),np.float64) - - def test_getdtype(self): - A = np.array([1],dtype='int8') - - assert_equal(sputils.getdtype(None,default=float),np.float) - assert_equal(sputils.getdtype(None,a=A),np.int8) - - def test_isscalarlike(self): - assert_equal(sputils.isscalarlike(3.0),True) - assert_equal(sputils.isscalarlike(-4),True) - assert_equal(sputils.isscalarlike(2.5),True) - assert_equal(sputils.isscalarlike(1 + 3j),True) - assert_equal(sputils.isscalarlike(np.array(3)),True) - assert_equal(sputils.isscalarlike( "16" ), True) - - assert_equal(sputils.isscalarlike( np.array([3])), False) - assert_equal(sputils.isscalarlike( [[3]] ), False) - assert_equal(sputils.isscalarlike( (1,) ), False) - assert_equal(sputils.isscalarlike( (1,2) ), False) - - def test_isintlike(self): - assert_equal(sputils.isintlike(3.0),True) - assert_equal(sputils.isintlike(-4),True) - assert_equal(sputils.isintlike(np.array(3)),True) - assert_equal(sputils.isintlike(np.array([3])), False) - - assert_equal(sputils.isintlike(2.5),False) - assert_equal(sputils.isintlike(1 + 3j),False) - assert_equal(sputils.isintlike( (1,) ), False) - assert_equal(sputils.isintlike( (1,2) ), False) - - def test_isshape(self): - assert_equal(sputils.isshape( (1,2) ),True) - assert_equal(sputils.isshape( (5,2) ),True) - - assert_equal(sputils.isshape( (1.5,2) ),False) - assert_equal(sputils.isshape( (2,2,2) ),False) - assert_equal(sputils.isshape( ([2],2) ),False) - - def test_issequence(self): - assert_equal(sputils.issequence( (1,) ),True) - assert_equal(sputils.issequence( (1,2,3) ),True) - assert_equal(sputils.issequence( [1] ),True) - assert_equal(sputils.issequence( [1,2,3] ),True) - assert_equal(sputils.issequence( np.array([1,2,3]) ),True) - - assert_equal(sputils.issequence( np.array([[1],[2],[3]]) ),False) - assert_equal(sputils.issequence( 3 ),False) - - def test_isdense(self): - assert_equal(sputils.isdense( np.array([1]) ),True) - assert_equal(sputils.isdense( np.matrix([1]) ),True) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/spatial/SConscript b/scipy-0.10.1/scipy/spatial/SConscript deleted file mode 100644 index 50f772fbb4..0000000000 --- a/scipy-0.10.1/scipy/spatial/SConscript +++ /dev/null @@ -1,48 +0,0 @@ -# Last Change: Mon Nov 03 06:00 PM 2008 J -# vim:syntax=python -from os.path import join -from numpy.distutils.misc_util import get_numpy_include_dirs, get_pkg_info -from distutils.sysconfig import get_python_inc -from numscons import GetNumpyEnvironment, CheckF77LAPACK, CheckF77Clib -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) -env.PrependUnique(CPPPATH=[get_numpy_include_dirs(), get_python_inc(), - env["PYEXTCPPPATH"]]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK}) - -#----------------- -# Checking Lapack -#----------------- -st = config.CheckLAPACK() -if not st: - has_lapack = 0 -else: - has_lapack = 1 - -config.Finish() -write_info(env) - - -#========== -# Build -#========== - -env.NumpyPythonExtension('ckdtree', source = ['ckdtree.c']) - -env.NumpyPythonExtension('_distance_wrap', - source = [join('src', 'distance_wrap.c'), - join('src', 'distance.c')]) - -# Build qhull -src = [join('qhull', 'src', s) for s in [ - 'geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c', - 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c', - 'random.c', 'rboxlib.c', 'stat.c', 'user.c', 'usermem.c', - 'userprintf.c']] - -env.NumpyPythonExtension('qhull', source = ['qhull.c'] + src) diff --git a/scipy-0.10.1/scipy/spatial/SConstruct b/scipy-0.10.1/scipy/spatial/SConstruct deleted file mode 100644 index a377d8391b..0000000000 --- a/scipy-0.10.1/scipy/spatial/SConstruct +++ /dev/null @@ -1,2 +0,0 @@ -from numscons import GetInitEnvironment -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') diff --git a/scipy-0.10.1/scipy/spatial/__init__.py b/scipy-0.10.1/scipy/spatial/__init__.py deleted file mode 100644 index 6010ecabfd..0000000000 --- a/scipy-0.10.1/scipy/spatial/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -============================================================= -Spatial algorithms and data structures (:mod:`scipy.spatial`) -============================================================= - -Nearest-neighbor queries: - -.. autosummary:: - :toctree: generated/ - - KDTree -- class for efficient nearest-neighbor queries - cKDTree -- class for efficient nearest-neighbor queries (faster impl.) - distance -- module containing many different distance measures - -Delaunay triangulation: - -.. autosummary:: - :toctree: generated/ - - Delaunay - tsearch - -""" - -from kdtree import * -from ckdtree import * -from qhull import * - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -__all__ += ['distance'] - -import distance -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/spatial/bento.info b/scipy-0.10.1/scipy/spatial/bento.info deleted file mode 100644 index 479e6b3ec2..0000000000 --- a/scipy-0.10.1/scipy/spatial/bento.info +++ /dev/null @@ -1,26 +0,0 @@ -HookFile: bscript - -Library: - Extension: ckdtree - Sources: ckdtree.c - Extension: _distance_wrap - Sources: src/distance_wrap.c, src/distance.c - Extension: qhull - Sources: - qhull.c, - qhull/src/geom.c, - qhull/src/geom2.c, - qhull/src/global.c, - qhull/src/io.c, - qhull/src/libqhull.c, - qhull/src/mem.c, - qhull/src/merge.c, - qhull/src/poly.c, - qhull/src/poly2.c, - qhull/src/qset.c, - qhull/src/random.c, - qhull/src/rboxlib.c, - qhull/src/stat.c, - qhull/src/user.c, - qhull/src/usermem.c, - qhull/src/userprintf.c diff --git a/scipy-0.10.1/scipy/spatial/bscript b/scipy-0.10.1/scipy/spatial/bscript deleted file mode 100644 index 1ba2f6e631..0000000000 --- a/scipy-0.10.1/scipy/spatial/bscript +++ /dev/null @@ -1,6 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - context.register_builder("qhull", lambda e: default_builder(e, use="FLAPACK CLIB")) diff --git a/scipy-0.10.1/scipy/spatial/ckdtree.c b/scipy-0.10.1/scipy/spatial/ckdtree.c deleted file mode 100644 index 8ef9cb45c9..0000000000 --- a/scipy-0.10.1/scipy/spatial/ckdtree.c +++ /dev/null @@ -1,10231 +0,0 @@ -/* Generated by Cython 0.15 on Tue Nov 1 18:19:34 2011 */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#else - -#include /* For offsetof */ -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__spatial__ckdtree -#define __PYX_HAVE_API__scipy__spatial__ckdtree -#include "stdio.h" -#include "stdlib.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - -/* inline attribute */ -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -/* unused attribute */ -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ - - -/* Type Conversion Predeclarations */ - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - /* Test for GCC > 2.95 */ - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else /* __GNUC__ > 2 ... */ - #define likely(x) (x) - #define unlikely(x) (x) - #endif /* __GNUC__ > 2 ... */ -#else /* __GNUC__ */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - -static const char *__pyx_f[] = { - "ckdtree.pyx", - "numpy.pxd", -}; - -/* "numpy.pxd":719 - * # in Cython to enable them only on the right systems. - * - * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - */ -typedef npy_int8 __pyx_t_5numpy_int8_t; - -/* "numpy.pxd":720 - * - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t - */ -typedef npy_int16 __pyx_t_5numpy_int16_t; - -/* "numpy.pxd":721 - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< - * ctypedef npy_int64 int64_t - * #ctypedef npy_int96 int96_t - */ -typedef npy_int32 __pyx_t_5numpy_int32_t; - -/* "numpy.pxd":722 - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< - * #ctypedef npy_int96 int96_t - * #ctypedef npy_int128 int128_t - */ -typedef npy_int64 __pyx_t_5numpy_int64_t; - -/* "numpy.pxd":726 - * #ctypedef npy_int128 int128_t - * - * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - */ -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - -/* "numpy.pxd":727 - * - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t - */ -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - -/* "numpy.pxd":728 - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< - * ctypedef npy_uint64 uint64_t - * #ctypedef npy_uint96 uint96_t - */ -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - -/* "numpy.pxd":729 - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< - * #ctypedef npy_uint96 uint96_t - * #ctypedef npy_uint128 uint128_t - */ -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - -/* "numpy.pxd":733 - * #ctypedef npy_uint128 uint128_t - * - * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< - * ctypedef npy_float64 float64_t - * #ctypedef npy_float80 float80_t - */ -typedef npy_float32 __pyx_t_5numpy_float32_t; - -/* "numpy.pxd":734 - * - * ctypedef npy_float32 float32_t - * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< - * #ctypedef npy_float80 float80_t - * #ctypedef npy_float128 float128_t - */ -typedef npy_float64 __pyx_t_5numpy_float64_t; - -/* "numpy.pxd":743 - * # The int types are mapped a bit surprising -- - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t - */ -typedef npy_long __pyx_t_5numpy_int_t; - -/* "numpy.pxd":744 - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong longlong_t - * - */ -typedef npy_longlong __pyx_t_5numpy_long_t; - -/* "numpy.pxd":745 - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_ulong uint_t - */ -typedef npy_longlong __pyx_t_5numpy_longlong_t; - -/* "numpy.pxd":747 - * ctypedef npy_longlong longlong_t - * - * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t - */ -typedef npy_ulong __pyx_t_5numpy_uint_t; - -/* "numpy.pxd":748 - * - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulonglong_t - * - */ -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - -/* "numpy.pxd":749 - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_intp intp_t - */ -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - -/* "numpy.pxd":751 - * ctypedef npy_ulonglong ulonglong_t - * - * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< - * ctypedef npy_uintp uintp_t - * - */ -typedef npy_intp __pyx_t_5numpy_intp_t; - -/* "numpy.pxd":752 - * - * ctypedef npy_intp intp_t - * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< - * - * ctypedef npy_double float_t - */ -typedef npy_uintp __pyx_t_5numpy_uintp_t; - -/* "numpy.pxd":754 - * ctypedef npy_uintp uintp_t - * - * ctypedef npy_double float_t # <<<<<<<<<<<<<< - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t - */ -typedef npy_double __pyx_t_5numpy_float_t; - -/* "numpy.pxd":755 - * - * ctypedef npy_double float_t - * ctypedef npy_double double_t # <<<<<<<<<<<<<< - * ctypedef npy_longdouble longdouble_t - * - */ -typedef npy_double __pyx_t_5numpy_double_t; - -/* "numpy.pxd":756 - * ctypedef npy_double float_t - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cfloat cfloat_t - */ -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif - -/*--- Type declarations ---*/ -struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree; - -/* "numpy.pxd":758 - * ctypedef npy_longdouble longdouble_t - * - * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t - */ -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - -/* "numpy.pxd":759 - * - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< - * ctypedef npy_clongdouble clongdouble_t - * - */ -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - -/* "numpy.pxd":760 - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cdouble complex_t - */ -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - -/* "numpy.pxd":762 - * ctypedef npy_clongdouble clongdouble_t - * - * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew1(a): - */ -typedef npy_cdouble __pyx_t_5numpy_complex_t; -union __pyx_t_5scipy_7spatial_7ckdtree_heapcontents; -struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem; -struct __pyx_t_5scipy_7spatial_7ckdtree_heap; -struct __pyx_t_5scipy_7spatial_7ckdtree_innernode; -struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode; -struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo; - -/* "scipy/spatial/ckdtree.pyx":15 - * - * # priority queue - * cdef union heapcontents: # <<<<<<<<<<<<<< - * int intdata - * char* ptrdata - */ -union __pyx_t_5scipy_7spatial_7ckdtree_heapcontents { - int intdata; - char *ptrdata; -}; - -/* "scipy/spatial/ckdtree.pyx":19 - * char* ptrdata - * - * cdef struct heapitem: # <<<<<<<<<<<<<< - * double priority - * heapcontents contents - */ -struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem { - double priority; - union __pyx_t_5scipy_7spatial_7ckdtree_heapcontents contents; -}; - -/* "scipy/spatial/ckdtree.pyx":23 - * heapcontents contents - * - * cdef struct heap: # <<<<<<<<<<<<<< - * int n - * heapitem* heap - */ -struct __pyx_t_5scipy_7spatial_7ckdtree_heap { - int n; - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem *heap; - int space; -}; - -/* "scipy/spatial/ckdtree.pyx":139 - * - * # Tree structure - * cdef struct innernode: # <<<<<<<<<<<<<< - * int split_dim - * int n_points - */ -struct __pyx_t_5scipy_7spatial_7ckdtree_innernode { - int split_dim; - int n_points; - double split; - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *less; - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *greater; -}; - -/* "scipy/spatial/ckdtree.pyx":145 - * innernode* less - * innernode* greater - * cdef struct leafnode: # <<<<<<<<<<<<<< - * int split_dim - * int n_points - */ -struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode { - int split_dim; - int n_points; - int start_idx; - int end_idx; -}; - -/* "scipy/spatial/ckdtree.pyx":153 - * # this is the standard trick for variable-size arrays: - * # malloc sizeof(nodeinfo)+self.m*sizeof(double) bytes. - * cdef struct nodeinfo: # <<<<<<<<<<<<<< - * innernode* node - * double side_distances[0] - */ -struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo { - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *node; - double side_distances[0]; -}; - -/* "scipy/spatial/ckdtree.pyx":157 - * double side_distances[0] - * - * cdef class cKDTree: # <<<<<<<<<<<<<< - * """kd-tree for quick nearest-neighbor lookup - * - */ -struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree { - PyObject_HEAD - struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *__pyx_vtab; - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *tree; - PyObject *data; - double *raw_data; - int n; - int m; - int leafsize; - PyObject *maxes; - double *raw_maxes; - PyObject *mins; - double *raw_mins; - PyObject *indices; - __pyx_t_5numpy_int32_t *raw_indices; -}; - - - -struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree { - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *(*__build)(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *, int, int, double *, double *); - PyObject *(*__free_tree)(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *, struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *); - void (*__query)(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *, double *, int *, double *, int, double, double, double); -}; -static struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *__pyx_vtabptr_5scipy_7spatial_7ckdtree_cKDTree; - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ - -static CYTHON_INLINE long __Pyx_div_long(long, long); /* proto */ - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); /*proto*/ - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ - -/* Run-time type information about structs used with buffers */ -struct __Pyx_StructField_; - -typedef struct { - const char* name; /* for error messages only */ - struct __Pyx_StructField_* fields; - size_t size; /* sizeof(type) */ - char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */ -} __Pyx_TypeInfo; - -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; - -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; - - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); - -static void __Pyx_RaiseBufferFallbackError(void); /*proto*/ - - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} - - -#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_List_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Tuple_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - - -#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { - PyObject *r; - if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) { - r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) { - r = PySequence_GetItem(o, i); - } - else { - r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); - } - return r; -} -static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/ -#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); -static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else -#define __Pyx_GetBuffer PyObject_GetBuffer -#define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - -Py_ssize_t __Pyx_zeros[] = {0, 0}; -Py_ssize_t __Pyx_minusones[] = {-1, -1}; - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eqf(a, b) ((a)==(b)) - #define __Pyx_c_sumf(a, b) ((a)+(b)) - #define __Pyx_c_difff(a, b) ((a)-(b)) - #define __Pyx_c_prodf(a, b) ((a)*(b)) - #define __Pyx_c_quotf(a, b) ((a)/(b)) - #define __Pyx_c_negf(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zerof(z) ((z)==(float)0) - #define __Pyx_c_conjf(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_absf(z) (::std::abs(z)) - #define __Pyx_c_powf(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zerof(z) ((z)==0) - #define __Pyx_c_conjf(z) (conjf(z)) - #if 1 - #define __Pyx_c_absf(z) (cabsf(z)) - #define __Pyx_c_powf(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq(a, b) ((a)==(b)) - #define __Pyx_c_sum(a, b) ((a)+(b)) - #define __Pyx_c_diff(a, b) ((a)-(b)) - #define __Pyx_c_prod(a, b) ((a)*(b)) - #define __Pyx_c_quot(a, b) ((a)/(b)) - #define __Pyx_c_neg(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero(z) ((z)==(double)0) - #define __Pyx_c_conj(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs(z) (::std::abs(z)) - #define __Pyx_c_pow(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero(z) ((z)==0) - #define __Pyx_c_conj(z) (conj(z)) - #if 1 - #define __Pyx_c_abs(z) (cabs(z)) - #define __Pyx_c_pow(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename); /*proto*/ - -static int __Pyx_check_binary_version(void); - -static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/ - -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ - -static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'libc.stdlib' */ - -/* Module declarations from 'numpy' */ - -/* Module declarations from 'numpy' */ -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/ - -/* Module declarations from 'stdlib' */ - -/* Module declarations from 'scipy.spatial.ckdtree' */ -static PyTypeObject *__pyx_ptype_5scipy_7spatial_7ckdtree_cKDTree = 0; -static double __pyx_v_5scipy_7spatial_7ckdtree_infinity; -static CYTHON_INLINE PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heapcreate(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *, int); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heapdestroy(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heapresize(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *, int); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heappush(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *, struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem); /*proto*/ -static struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_f_5scipy_7spatial_7ckdtree_heappeek(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *); /*proto*/ -static PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heapremove(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *); /*proto*/ -static struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_f_5scipy_7spatial_7ckdtree_heappop(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *); /*proto*/ -static CYTHON_INLINE double __pyx_f_5scipy_7spatial_7ckdtree_dmax(double, double); /*proto*/ -static CYTHON_INLINE double __pyx_f_5scipy_7spatial_7ckdtree_dabs(double); /*proto*/ -static CYTHON_INLINE double __pyx_f_5scipy_7spatial_7ckdtree__distance_p(double *, double *, double, int, double); /*proto*/ -static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), 'R' }; -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t = { "int32_t", NULL, sizeof(__pyx_t_5numpy_int32_t), 'I' }; -static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), 'I' }; -#define __Pyx_MODULE_NAME "scipy.spatial.ckdtree" -int __pyx_module_is_main_scipy__spatial__ckdtree = 0; - -/* Implementation of 'scipy.spatial.ckdtree' */ -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_RuntimeError; -static char __pyx_k_1[] = "Heap containing %d items cannot be resized to %d"; -static char __pyx_k_2[] = "leafsize must be at least 1"; -static char __pyx_k_4[] = "distance_upper_bound"; -static char __pyx_k_6[] = "x must consist of vectors of length %d but has shape %s"; -static char __pyx_k_7[] = "Only p-norms with 1<=p<=infinity permitted"; -static char __pyx_k_12[] = "ndarray is not C contiguous"; -static char __pyx_k_14[] = "ndarray is not Fortran contiguous"; -static char __pyx_k_16[] = "Non-native byte order not supported"; -static char __pyx_k_18[] = "unknown dtype code in numpy.pxd (%d)"; -static char __pyx_k_19[] = "Format string allocated too short, see comment in numpy.pxd"; -static char __pyx_k_22[] = "Format string allocated too short."; -static char __pyx_k__B[] = "B"; -static char __pyx_k__H[] = "H"; -static char __pyx_k__I[] = "I"; -static char __pyx_k__L[] = "L"; -static char __pyx_k__O[] = "O"; -static char __pyx_k__Q[] = "Q"; -static char __pyx_k__b[] = "b"; -static char __pyx_k__d[] = "d"; -static char __pyx_k__f[] = "f"; -static char __pyx_k__g[] = "g"; -static char __pyx_k__h[] = "h"; -static char __pyx_k__i[] = "i"; -static char __pyx_k__k[] = "k"; -static char __pyx_k__l[] = "l"; -static char __pyx_k__p[] = "p"; -static char __pyx_k__q[] = "q"; -static char __pyx_k__x[] = "x"; -static char __pyx_k__Zd[] = "Zd"; -static char __pyx_k__Zf[] = "Zf"; -static char __pyx_k__Zg[] = "Zg"; -static char __pyx_k__np[] = "np"; -static char __pyx_k__eps[] = "eps"; -static char __pyx_k__inf[] = "inf"; -static char __pyx_k__amax[] = "amax"; -static char __pyx_k__amin[] = "amin"; -static char __pyx_k__axis[] = "axis"; -static char __pyx_k__data[] = "data"; -static char __pyx_k__fill[] = "fill"; -static char __pyx_k__prod[] = "prod"; -static char __pyx_k__dtype[] = "dtype"; -static char __pyx_k__empty[] = "empty"; -static char __pyx_k__float[] = "float"; -static char __pyx_k__int32[] = "int32"; -static char __pyx_k__numpy[] = "numpy"; -static char __pyx_k__range[] = "range"; -static char __pyx_k__shape[] = "shape"; -static char __pyx_k__arange[] = "arange"; -static char __pyx_k__astype[] = "astype"; -static char __pyx_k__kdtree[] = "kdtree"; -static char __pyx_k____all__[] = "__all__"; -static char __pyx_k__asarray[] = "asarray"; -static char __pyx_k__cKDTree[] = "cKDTree"; -static char __pyx_k__newaxis[] = "newaxis"; -static char __pyx_k__reshape[] = "reshape"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k__leafsize[] = "leafsize"; -static char __pyx_k__ValueError[] = "ValueError"; -static char __pyx_k__RuntimeError[] = "RuntimeError"; -static char __pyx_k__ascontiguousarray[] = "ascontiguousarray"; -static PyObject *__pyx_kp_s_1; -static PyObject *__pyx_kp_u_12; -static PyObject *__pyx_kp_u_14; -static PyObject *__pyx_kp_u_16; -static PyObject *__pyx_kp_u_18; -static PyObject *__pyx_kp_u_19; -static PyObject *__pyx_kp_s_2; -static PyObject *__pyx_kp_u_22; -static PyObject *__pyx_n_s_4; -static PyObject *__pyx_kp_s_6; -static PyObject *__pyx_kp_s_7; -static PyObject *__pyx_n_s__RuntimeError; -static PyObject *__pyx_n_s__ValueError; -static PyObject *__pyx_n_s____all__; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s__amax; -static PyObject *__pyx_n_s__amin; -static PyObject *__pyx_n_s__arange; -static PyObject *__pyx_n_s__asarray; -static PyObject *__pyx_n_s__ascontiguousarray; -static PyObject *__pyx_n_s__astype; -static PyObject *__pyx_n_s__axis; -static PyObject *__pyx_n_s__cKDTree; -static PyObject *__pyx_n_s__data; -static PyObject *__pyx_n_s__dtype; -static PyObject *__pyx_n_s__empty; -static PyObject *__pyx_n_s__eps; -static PyObject *__pyx_n_s__fill; -static PyObject *__pyx_n_s__float; -static PyObject *__pyx_n_s__i; -static PyObject *__pyx_n_s__inf; -static PyObject *__pyx_n_s__int32; -static PyObject *__pyx_n_s__k; -static PyObject *__pyx_n_s__kdtree; -static PyObject *__pyx_n_s__leafsize; -static PyObject *__pyx_n_s__newaxis; -static PyObject *__pyx_n_s__np; -static PyObject *__pyx_n_s__numpy; -static PyObject *__pyx_n_s__p; -static PyObject *__pyx_n_s__prod; -static PyObject *__pyx_n_s__range; -static PyObject *__pyx_n_s__reshape; -static PyObject *__pyx_n_s__shape; -static PyObject *__pyx_n_s__x; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_15; -static double __pyx_k_5; -static PyObject *__pyx_k_slice_9; -static PyObject *__pyx_k_tuple_3; -static PyObject *__pyx_k_tuple_8; -static PyObject *__pyx_k_tuple_10; -static PyObject *__pyx_k_tuple_11; -static PyObject *__pyx_k_tuple_13; -static PyObject *__pyx_k_tuple_15; -static PyObject *__pyx_k_tuple_17; -static PyObject *__pyx_k_tuple_20; -static PyObject *__pyx_k_tuple_21; -static PyObject *__pyx_k_tuple_23; - -/* "scipy/spatial/ckdtree.pyx":28 - * int space - * - * cdef inline heapcreate(heap* self,int initial_size): # <<<<<<<<<<<<<< - * self.space = initial_size - * self.heap = stdlib.malloc(sizeof(heapitem)*self.space) - */ - -static CYTHON_INLINE PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heapcreate(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *__pyx_v_self, int __pyx_v_initial_size) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("heapcreate"); - - /* "scipy/spatial/ckdtree.pyx":29 - * - * cdef inline heapcreate(heap* self,int initial_size): - * self.space = initial_size # <<<<<<<<<<<<<< - * self.heap = stdlib.malloc(sizeof(heapitem)*self.space) - * self.n=0 - */ - __pyx_v_self->space = __pyx_v_initial_size; - - /* "scipy/spatial/ckdtree.pyx":30 - * cdef inline heapcreate(heap* self,int initial_size): - * self.space = initial_size - * self.heap = stdlib.malloc(sizeof(heapitem)*self.space) # <<<<<<<<<<<<<< - * self.n=0 - * - */ - __pyx_v_self->heap = ((struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem *)malloc(((sizeof(struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem)) * __pyx_v_self->space))); - - /* "scipy/spatial/ckdtree.pyx":31 - * self.space = initial_size - * self.heap = stdlib.malloc(sizeof(heapitem)*self.space) - * self.n=0 # <<<<<<<<<<<<<< - * - * cdef inline heapdestroy(heap* self): - */ - __pyx_v_self->n = 0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":33 - * self.n=0 - * - * cdef inline heapdestroy(heap* self): # <<<<<<<<<<<<<< - * stdlib.free(self.heap) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heapdestroy(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("heapdestroy"); - - /* "scipy/spatial/ckdtree.pyx":34 - * - * cdef inline heapdestroy(heap* self): - * stdlib.free(self.heap) # <<<<<<<<<<<<<< - * - * cdef inline heapresize(heap* self, int new_space): - */ - free(__pyx_v_self->heap); - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":36 - * stdlib.free(self.heap) - * - * cdef inline heapresize(heap* self, int new_space): # <<<<<<<<<<<<<< - * if new_spacen); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":38 - * cdef inline heapresize(heap* self, int new_space): - * if new_spacestdlib.realloc(self.heap,new_space*sizeof(heapitem)) - */ - __pyx_t_2 = PyInt_FromLong(__pyx_v_self->n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromLong(__pyx_v_new_space); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_1), ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/spatial/ckdtree.pyx":39 - * if new_spacestdlib.realloc(self.heap,new_space*sizeof(heapitem)) - * - */ - __pyx_v_self->space = __pyx_v_new_space; - - /* "scipy/spatial/ckdtree.pyx":40 - * raise ValueError("Heap containing %d items cannot be resized to %d" % (self.n, new_space)) - * self.space = new_space - * self.heap = stdlib.realloc(self.heap,new_space*sizeof(heapitem)) # <<<<<<<<<<<<<< - * - * cdef inline heappush(heap* self, heapitem item): - */ - __pyx_v_self->heap = ((struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem *)realloc(((void *)__pyx_v_self->heap), (__pyx_v_new_space * (sizeof(struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem))))); - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.spatial.ckdtree.heapresize", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":42 - * self.heap = stdlib.realloc(self.heap,new_space*sizeof(heapitem)) - * - * cdef inline heappush(heap* self, heapitem item): # <<<<<<<<<<<<<< - * cdef int i - * cdef heapitem t - */ - -static CYTHON_INLINE PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heappush(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *__pyx_v_self, struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_v_item) { - int __pyx_v_i; - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_v_t; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("heappush"); - - /* "scipy/spatial/ckdtree.pyx":46 - * cdef heapitem t - * - * self.n += 1 # <<<<<<<<<<<<<< - * if self.n>self.space: - * heapresize(self,2*self.space+1) - */ - __pyx_v_self->n = (__pyx_v_self->n + 1); - - /* "scipy/spatial/ckdtree.pyx":47 - * - * self.n += 1 - * if self.n>self.space: # <<<<<<<<<<<<<< - * heapresize(self,2*self.space+1) - * - */ - __pyx_t_1 = (__pyx_v_self->n > __pyx_v_self->space); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":48 - * self.n += 1 - * if self.n>self.space: - * heapresize(self,2*self.space+1) # <<<<<<<<<<<<<< - * - * i = self.n-1 - */ - __pyx_t_2 = __pyx_f_5scipy_7spatial_7ckdtree_heapresize(__pyx_v_self, ((2 * __pyx_v_self->space) + 1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/spatial/ckdtree.pyx":50 - * heapresize(self,2*self.space+1) - * - * i = self.n-1 # <<<<<<<<<<<<<< - * self.heap[i] = item - * while i>0 and self.heap[i].priorityn - 1); - - /* "scipy/spatial/ckdtree.pyx":51 - * - * i = self.n-1 - * self.heap[i] = item # <<<<<<<<<<<<<< - * while i>0 and self.heap[i].priorityheap[__pyx_v_i]) = __pyx_v_item; - - /* "scipy/spatial/ckdtree.pyx":52 - * i = self.n-1 - * self.heap[i] = item - * while i>0 and self.heap[i].priority 0); - if (__pyx_t_1) { - __pyx_t_3 = ((__pyx_v_self->heap[__pyx_v_i]).priority < (__pyx_v_self->heap[__Pyx_div_long((__pyx_v_i - 1), 2)]).priority); - __pyx_t_4 = __pyx_t_3; - } else { - __pyx_t_4 = __pyx_t_1; - } - if (!__pyx_t_4) break; - - /* "scipy/spatial/ckdtree.pyx":53 - * self.heap[i] = item - * while i>0 and self.heap[i].priorityheap[__Pyx_div_long((__pyx_v_i - 1), 2)]); - - /* "scipy/spatial/ckdtree.pyx":54 - * while i>0 and self.heap[i].priorityheap[__Pyx_div_long((__pyx_v_i - 1), 2)]) = (__pyx_v_self->heap[__pyx_v_i]); - - /* "scipy/spatial/ckdtree.pyx":55 - * t = self.heap[(i-1)//2] - * self.heap[(i-1)//2] = self.heap[i] - * self.heap[i] = t # <<<<<<<<<<<<<< - * i = (i-1)//2 - * - */ - (__pyx_v_self->heap[__pyx_v_i]) = __pyx_v_t; - - /* "scipy/spatial/ckdtree.pyx":56 - * self.heap[(i-1)//2] = self.heap[i] - * self.heap[i] = t - * i = (i-1)//2 # <<<<<<<<<<<<<< - * - * cdef heapitem heappeek(heap* self): - */ - __pyx_v_i = __Pyx_div_long((__pyx_v_i - 1), 2); - } - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.spatial.ckdtree.heappush", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":58 - * i = (i-1)//2 - * - * cdef heapitem heappeek(heap* self): # <<<<<<<<<<<<<< - * return self.heap[0] - * - */ - -static struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_f_5scipy_7spatial_7ckdtree_heappeek(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *__pyx_v_self) { - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("heappeek"); - - /* "scipy/spatial/ckdtree.pyx":59 - * - * cdef heapitem heappeek(heap* self): - * return self.heap[0] # <<<<<<<<<<<<<< - * - * cdef heapremove(heap* self): - */ - __pyx_r = (__pyx_v_self->heap[0]); - goto __pyx_L0; - - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":61 - * return self.heap[0] - * - * cdef heapremove(heap* self): # <<<<<<<<<<<<<< - * cdef heapitem t - * cdef int i, j, k, l - */ - -static PyObject *__pyx_f_5scipy_7spatial_7ckdtree_heapremove(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *__pyx_v_self) { - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_v_t; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_k; - int __pyx_v_l; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("heapremove"); - - /* "scipy/spatial/ckdtree.pyx":65 - * cdef int i, j, k, l - * - * self.heap[0] = self.heap[self.n-1] # <<<<<<<<<<<<<< - * self.n -= 1 - * if self.n < self.space//4 and self.space>40: #FIXME: magic number - */ - (__pyx_v_self->heap[0]) = (__pyx_v_self->heap[(__pyx_v_self->n - 1)]); - - /* "scipy/spatial/ckdtree.pyx":66 - * - * self.heap[0] = self.heap[self.n-1] - * self.n -= 1 # <<<<<<<<<<<<<< - * if self.n < self.space//4 and self.space>40: #FIXME: magic number - * heapresize(self,self.space//2+1) - */ - __pyx_v_self->n = (__pyx_v_self->n - 1); - - /* "scipy/spatial/ckdtree.pyx":67 - * self.heap[0] = self.heap[self.n-1] - * self.n -= 1 - * if self.n < self.space//4 and self.space>40: #FIXME: magic number # <<<<<<<<<<<<<< - * heapresize(self,self.space//2+1) - * - */ - __pyx_t_1 = (__pyx_v_self->n < __Pyx_div_long(__pyx_v_self->space, 4)); - if (__pyx_t_1) { - __pyx_t_2 = (__pyx_v_self->space > 40); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - /* "scipy/spatial/ckdtree.pyx":68 - * self.n -= 1 - * if self.n < self.space//4 and self.space>40: #FIXME: magic number - * heapresize(self,self.space//2+1) # <<<<<<<<<<<<<< - * - * i=0 - */ - __pyx_t_4 = __pyx_f_5scipy_7spatial_7ckdtree_heapresize(__pyx_v_self, (__Pyx_div_long(__pyx_v_self->space, 2) + 1)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/spatial/ckdtree.pyx":70 - * heapresize(self,self.space//2+1) - * - * i=0 # <<<<<<<<<<<<<< - * j=1 - * k=2 - */ - __pyx_v_i = 0; - - /* "scipy/spatial/ckdtree.pyx":71 - * - * i=0 - * j=1 # <<<<<<<<<<<<<< - * k=2 - * while ((j self.heap[j].priority or - */ - __pyx_v_k = 2; - - /* "scipy/spatial/ckdtree.pyx":73 - * j=1 - * k=2 - * while ((j self.heap[j].priority or - * k self.heap[j].priority or # <<<<<<<<<<<<<< - * k self.heap[k].priority)): - */ - __pyx_t_3 = (__pyx_v_j < __pyx_v_self->n); - if (__pyx_t_3) { - __pyx_t_1 = ((__pyx_v_self->heap[__pyx_v_i]).priority > (__pyx_v_self->heap[__pyx_v_j]).priority); - __pyx_t_2 = __pyx_t_1; - } else { - __pyx_t_2 = __pyx_t_3; - } - if (!__pyx_t_2) { - - /* "scipy/spatial/ckdtree.pyx":75 - * while ((j self.heap[j].priority or - * k self.heap[k].priority)): - * if kself.heap[k].priority: - */ - __pyx_t_3 = (__pyx_v_k < __pyx_v_self->n); - if (__pyx_t_3) { - - /* "scipy/spatial/ckdtree.pyx":76 - * self.heap[i].priority > self.heap[j].priority or - * k self.heap[k].priority)): # <<<<<<<<<<<<<< - * if kself.heap[k].priority: - * l = k - */ - __pyx_t_1 = ((__pyx_v_self->heap[__pyx_v_i]).priority > (__pyx_v_self->heap[__pyx_v_k]).priority); - __pyx_t_5 = __pyx_t_1; - } else { - __pyx_t_5 = __pyx_t_3; - } - __pyx_t_3 = __pyx_t_5; - } else { - __pyx_t_3 = __pyx_t_2; - } - if (!__pyx_t_3) break; - - /* "scipy/spatial/ckdtree.pyx":77 - * k self.heap[k].priority)): - * if kself.heap[k].priority: # <<<<<<<<<<<<<< - * l = k - * else: - */ - __pyx_t_3 = (__pyx_v_k < __pyx_v_self->n); - if (__pyx_t_3) { - __pyx_t_2 = ((__pyx_v_self->heap[__pyx_v_j]).priority > (__pyx_v_self->heap[__pyx_v_k]).priority); - __pyx_t_5 = __pyx_t_2; - } else { - __pyx_t_5 = __pyx_t_3; - } - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":78 - * self.heap[i].priority > self.heap[k].priority)): - * if kself.heap[k].priority: - * l = k # <<<<<<<<<<<<<< - * else: - * l = j - */ - __pyx_v_l = __pyx_v_k; - goto __pyx_L6; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":80 - * l = k - * else: - * l = j # <<<<<<<<<<<<<< - * t = self.heap[l] - * self.heap[l] = self.heap[i] - */ - __pyx_v_l = __pyx_v_j; - } - __pyx_L6:; - - /* "scipy/spatial/ckdtree.pyx":81 - * else: - * l = j - * t = self.heap[l] # <<<<<<<<<<<<<< - * self.heap[l] = self.heap[i] - * self.heap[i] = t - */ - __pyx_v_t = (__pyx_v_self->heap[__pyx_v_l]); - - /* "scipy/spatial/ckdtree.pyx":82 - * l = j - * t = self.heap[l] - * self.heap[l] = self.heap[i] # <<<<<<<<<<<<<< - * self.heap[i] = t - * i = l - */ - (__pyx_v_self->heap[__pyx_v_l]) = (__pyx_v_self->heap[__pyx_v_i]); - - /* "scipy/spatial/ckdtree.pyx":83 - * t = self.heap[l] - * self.heap[l] = self.heap[i] - * self.heap[i] = t # <<<<<<<<<<<<<< - * i = l - * j = 2*i+1 - */ - (__pyx_v_self->heap[__pyx_v_i]) = __pyx_v_t; - - /* "scipy/spatial/ckdtree.pyx":84 - * self.heap[l] = self.heap[i] - * self.heap[i] = t - * i = l # <<<<<<<<<<<<<< - * j = 2*i+1 - * k = 2*i+2 - */ - __pyx_v_i = __pyx_v_l; - - /* "scipy/spatial/ckdtree.pyx":85 - * self.heap[i] = t - * i = l - * j = 2*i+1 # <<<<<<<<<<<<<< - * k = 2*i+2 - * - */ - __pyx_v_j = ((2 * __pyx_v_i) + 1); - - /* "scipy/spatial/ckdtree.pyx":86 - * i = l - * j = 2*i+1 - * k = 2*i+2 # <<<<<<<<<<<<<< - * - * cdef heapitem heappop(heap* self): - */ - __pyx_v_k = ((2 * __pyx_v_i) + 2); - } - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.spatial.ckdtree.heapremove", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":88 - * k = 2*i+2 - * - * cdef heapitem heappop(heap* self): # <<<<<<<<<<<<<< - * cdef heapitem it - * it = heappeek(self) - */ - -static struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_f_5scipy_7spatial_7ckdtree_heappop(struct __pyx_t_5scipy_7spatial_7ckdtree_heap *__pyx_v_self) { - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_v_it; - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("heappop"); - - /* "scipy/spatial/ckdtree.pyx":90 - * cdef heapitem heappop(heap* self): - * cdef heapitem it - * it = heappeek(self) # <<<<<<<<<<<<<< - * heapremove(self) - * return it - */ - __pyx_v_it = __pyx_f_5scipy_7spatial_7ckdtree_heappeek(__pyx_v_self); - - /* "scipy/spatial/ckdtree.pyx":91 - * cdef heapitem it - * it = heappeek(self) - * heapremove(self) # <<<<<<<<<<<<<< - * return it - * - */ - __pyx_t_1 = __pyx_f_5scipy_7spatial_7ckdtree_heapremove(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":92 - * it = heappeek(self) - * heapremove(self) - * return it # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_it; - goto __pyx_L0; - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_WriteUnraisable("scipy.spatial.ckdtree.heappop", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":99 - * - * # utility functions - * cdef inline double dmax(double x, double y): # <<<<<<<<<<<<<< - * if x>y: - * return x - */ - -static CYTHON_INLINE double __pyx_f_5scipy_7spatial_7ckdtree_dmax(double __pyx_v_x, double __pyx_v_y) { - double __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("dmax"); - - /* "scipy/spatial/ckdtree.pyx":100 - * # utility functions - * cdef inline double dmax(double x, double y): - * if x>y: # <<<<<<<<<<<<<< - * return x - * else: - */ - __pyx_t_1 = (__pyx_v_x > __pyx_v_y); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":101 - * cdef inline double dmax(double x, double y): - * if x>y: - * return x # <<<<<<<<<<<<<< - * else: - * return y - */ - __pyx_r = __pyx_v_x; - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":103 - * return x - * else: - * return y # <<<<<<<<<<<<<< - * cdef inline double dabs(double x): - * if x>0: - */ - __pyx_r = __pyx_v_y; - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = 0; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":104 - * else: - * return y - * cdef inline double dabs(double x): # <<<<<<<<<<<<<< - * if x>0: - * return x - */ - -static CYTHON_INLINE double __pyx_f_5scipy_7spatial_7ckdtree_dabs(double __pyx_v_x) { - double __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("dabs"); - - /* "scipy/spatial/ckdtree.pyx":105 - * return y - * cdef inline double dabs(double x): - * if x>0: # <<<<<<<<<<<<<< - * return x - * else: - */ - __pyx_t_1 = (__pyx_v_x > 0.0); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":106 - * cdef inline double dabs(double x): - * if x>0: - * return x # <<<<<<<<<<<<<< - * else: - * return -x - */ - __pyx_r = __pyx_v_x; - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":108 - * return x - * else: - * return -x # <<<<<<<<<<<<<< - * cdef inline double _distance_p(double*x,double*y,double p,int k,double upperbound): - * """Compute the distance between x and y - */ - __pyx_r = (-__pyx_v_x); - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = 0; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":109 - * else: - * return -x - * cdef inline double _distance_p(double*x,double*y,double p,int k,double upperbound): # <<<<<<<<<<<<<< - * """Compute the distance between x and y - * - */ - -static CYTHON_INLINE double __pyx_f_5scipy_7spatial_7ckdtree__distance_p(double *__pyx_v_x, double *__pyx_v_y, double __pyx_v_p, int __pyx_v_k, double __pyx_v_upperbound) { - int __pyx_v_i; - double __pyx_v_r; - double __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - __Pyx_RefNannySetupContext("_distance_p"); - - /* "scipy/spatial/ckdtree.pyx":118 - * cdef int i - * cdef double r - * r = 0 # <<<<<<<<<<<<<< - * if p==infinity: - * for i in range(k): - */ - __pyx_v_r = 0.0; - - /* "scipy/spatial/ckdtree.pyx":119 - * cdef double r - * r = 0 - * if p==infinity: # <<<<<<<<<<<<<< - * for i in range(k): - * r = dmax(r,dabs(x[i]-y[i])) - */ - __pyx_t_1 = (__pyx_v_p == __pyx_v_5scipy_7spatial_7ckdtree_infinity); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":120 - * r = 0 - * if p==infinity: - * for i in range(k): # <<<<<<<<<<<<<< - * r = dmax(r,dabs(x[i]-y[i])) - * if r>upperbound: - */ - __pyx_t_2 = __pyx_v_k; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":121 - * if p==infinity: - * for i in range(k): - * r = dmax(r,dabs(x[i]-y[i])) # <<<<<<<<<<<<<< - * if r>upperbound: - * return r - */ - __pyx_v_r = __pyx_f_5scipy_7spatial_7ckdtree_dmax(__pyx_v_r, __pyx_f_5scipy_7spatial_7ckdtree_dabs(((__pyx_v_x[__pyx_v_i]) - (__pyx_v_y[__pyx_v_i])))); - - /* "scipy/spatial/ckdtree.pyx":122 - * for i in range(k): - * r = dmax(r,dabs(x[i]-y[i])) - * if r>upperbound: # <<<<<<<<<<<<<< - * return r - * elif p==1: - */ - __pyx_t_1 = (__pyx_v_r > __pyx_v_upperbound); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":123 - * r = dmax(r,dabs(x[i]-y[i])) - * if r>upperbound: - * return r # <<<<<<<<<<<<<< - * elif p==1: - * for i in range(k): - */ - __pyx_r = __pyx_v_r; - goto __pyx_L0; - goto __pyx_L6; - } - __pyx_L6:; - } - goto __pyx_L3; - } - - /* "scipy/spatial/ckdtree.pyx":124 - * if r>upperbound: - * return r - * elif p==1: # <<<<<<<<<<<<<< - * for i in range(k): - * r += dabs(x[i]-y[i]) - */ - __pyx_t_1 = (__pyx_v_p == 1.0); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":125 - * return r - * elif p==1: - * for i in range(k): # <<<<<<<<<<<<<< - * r += dabs(x[i]-y[i]) - * if r>upperbound: - */ - __pyx_t_2 = __pyx_v_k; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":126 - * elif p==1: - * for i in range(k): - * r += dabs(x[i]-y[i]) # <<<<<<<<<<<<<< - * if r>upperbound: - * return r - */ - __pyx_v_r = (__pyx_v_r + __pyx_f_5scipy_7spatial_7ckdtree_dabs(((__pyx_v_x[__pyx_v_i]) - (__pyx_v_y[__pyx_v_i])))); - - /* "scipy/spatial/ckdtree.pyx":127 - * for i in range(k): - * r += dabs(x[i]-y[i]) - * if r>upperbound: # <<<<<<<<<<<<<< - * return r - * else: - */ - __pyx_t_1 = (__pyx_v_r > __pyx_v_upperbound); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":128 - * r += dabs(x[i]-y[i]) - * if r>upperbound: - * return r # <<<<<<<<<<<<<< - * else: - * for i in range(k): - */ - __pyx_r = __pyx_v_r; - goto __pyx_L0; - goto __pyx_L9; - } - __pyx_L9:; - } - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":130 - * return r - * else: - * for i in range(k): # <<<<<<<<<<<<<< - * r += dabs(x[i]-y[i])**p - * if r>upperbound: - */ - __pyx_t_2 = __pyx_v_k; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":131 - * else: - * for i in range(k): - * r += dabs(x[i]-y[i])**p # <<<<<<<<<<<<<< - * if r>upperbound: - * return r - */ - __pyx_v_r = (__pyx_v_r + pow(__pyx_f_5scipy_7spatial_7ckdtree_dabs(((__pyx_v_x[__pyx_v_i]) - (__pyx_v_y[__pyx_v_i]))), __pyx_v_p)); - - /* "scipy/spatial/ckdtree.pyx":132 - * for i in range(k): - * r += dabs(x[i]-y[i])**p - * if r>upperbound: # <<<<<<<<<<<<<< - * return r - * return r - */ - __pyx_t_1 = (__pyx_v_r > __pyx_v_upperbound); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":133 - * r += dabs(x[i]-y[i])**p - * if r>upperbound: - * return r # <<<<<<<<<<<<<< - * return r - * - */ - __pyx_r = __pyx_v_r; - goto __pyx_L0; - goto __pyx_L12; - } - __pyx_L12:; - } - } - __pyx_L3:; - - /* "scipy/spatial/ckdtree.pyx":134 - * if r>upperbound: - * return r - * return r # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_r; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":207 - * cdef object indices - * cdef np.int32_t* raw_indices - * def __init__(cKDTree self, data, int leafsize=10): # <<<<<<<<<<<<<< - * cdef np.ndarray[double, ndim=2] inner_data - * cdef np.ndarray[double, ndim=1] inner_maxes - */ - -static int __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_data = 0; - int __pyx_v_leafsize; - PyArrayObject *__pyx_v_inner_data = 0; - PyArrayObject *__pyx_v_inner_maxes = 0; - PyArrayObject *__pyx_v_inner_mins = 0; - PyArrayObject *__pyx_v_inner_indices = 0; - Py_buffer __pyx_bstruct_inner_indices; - Py_ssize_t __pyx_bstride_0_inner_indices = 0; - Py_ssize_t __pyx_bshape_0_inner_indices = 0; - Py_buffer __pyx_bstruct_inner_maxes; - Py_ssize_t __pyx_bstride_0_inner_maxes = 0; - Py_ssize_t __pyx_bshape_0_inner_maxes = 0; - Py_buffer __pyx_bstruct_inner_data; - Py_ssize_t __pyx_bstride_0_inner_data = 0; - Py_ssize_t __pyx_bstride_1_inner_data = 0; - Py_ssize_t __pyx_bshape_0_inner_data = 0; - Py_ssize_t __pyx_bshape_1_inner_data = 0; - Py_buffer __pyx_bstruct_inner_mins; - Py_ssize_t __pyx_bstride_0_inner_mins = 0; - Py_ssize_t __pyx_bshape_0_inner_mins = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *(*__pyx_t_6)(PyObject *); - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - PyArrayObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - PyObject *__pyx_t_13 = NULL; - PyObject *__pyx_t_14 = NULL; - PyArrayObject *__pyx_t_15 = NULL; - PyArrayObject *__pyx_t_16 = NULL; - PyArrayObject *__pyx_t_17 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__data,&__pyx_n_s__leafsize,0}; - __Pyx_RefNannySetupContext("__init__"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__data); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__leafsize); - if (value) { values[1] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_data = values[0]; - if (values[1]) { - __pyx_v_leafsize = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_leafsize == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_leafsize = ((int)10); - } - } else { - __pyx_v_leafsize = ((int)10); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: __pyx_v_leafsize = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_leafsize == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 1: __pyx_v_data = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_bstruct_inner_data.buf = NULL; - __pyx_bstruct_inner_maxes.buf = NULL; - __pyx_bstruct_inner_mins.buf = NULL; - __pyx_bstruct_inner_indices.buf = NULL; - - /* "scipy/spatial/ckdtree.pyx":212 - * cdef np.ndarray[double, ndim=1] inner_mins - * cdef np.ndarray[np.int32_t, ndim=1] inner_indices - * self.data = np.ascontiguousarray(data,dtype=np.float) # <<<<<<<<<<<<<< - * self.n, self.m = np.shape(self.data) - * self.leafsize = leafsize - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_data); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_data); - __Pyx_GIVEREF(__pyx_v_data); - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__float); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__dtype), __pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_GIVEREF(__pyx_t_5); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __Pyx_DECREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data = __pyx_t_5; - __pyx_t_5 = 0; - - /* "scipy/spatial/ckdtree.pyx":213 - * cdef np.ndarray[np.int32_t, ndim=1] inner_indices - * self.data = np.ascontiguousarray(data,dtype=np.float) - * self.n, self.m = np.shape(self.data) # <<<<<<<<<<<<<< - * self.leafsize = leafsize - * if self.leafsize<1: - */ - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - PyTuple_SET_ITEM(__pyx_t_5, 0, ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __Pyx_GIVEREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { - PyObject* sequence = __pyx_t_1; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 2)) { - if (PyList_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_5 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = Py_TYPE(__pyx_t_2)->tp_iternext; - index = 0; __pyx_t_5 = __pyx_t_6(__pyx_t_2); if (unlikely(!__pyx_t_5)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_5); - index = 1; __pyx_t_3 = __pyx_t_6(__pyx_t_2); if (unlikely(!__pyx_t_3)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_2), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - goto __pyx_L7_unpacking_done; - __pyx_L6_unpacking_failed:; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_L7_unpacking_done:; - } - __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_t_5); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_8 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->n = __pyx_t_7; - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->m = __pyx_t_8; - - /* "scipy/spatial/ckdtree.pyx":214 - * self.data = np.ascontiguousarray(data,dtype=np.float) - * self.n, self.m = np.shape(self.data) - * self.leafsize = leafsize # <<<<<<<<<<<<<< - * if self.leafsize<1: - * raise ValueError("leafsize must be at least 1") - */ - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->leafsize = __pyx_v_leafsize; - - /* "scipy/spatial/ckdtree.pyx":215 - * self.n, self.m = np.shape(self.data) - * self.leafsize = leafsize - * if self.leafsize<1: # <<<<<<<<<<<<<< - * raise ValueError("leafsize must be at least 1") - * self.maxes = np.ascontiguousarray(np.amax(self.data,axis=0)) - */ - __pyx_t_9 = (((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->leafsize < 1); - if (__pyx_t_9) { - - /* "scipy/spatial/ckdtree.pyx":216 - * self.leafsize = leafsize - * if self.leafsize<1: - * raise ValueError("leafsize must be at least 1") # <<<<<<<<<<<<<< - * self.maxes = np.ascontiguousarray(np.amax(self.data,axis=0)) - * self.mins = np.ascontiguousarray(np.amin(self.data,axis=0)) - */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - /* "scipy/spatial/ckdtree.pyx":217 - * if self.leafsize<1: - * raise ValueError("leafsize must be at least 1") - * self.maxes = np.ascontiguousarray(np.amax(self.data,axis=0)) # <<<<<<<<<<<<<< - * self.mins = np.ascontiguousarray(np.amin(self.data,axis=0)) - * self.indices = np.ascontiguousarray(np.arange(self.n,dtype=np.int32)) - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__amax); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __Pyx_GIVEREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__axis), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_5, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_GIVEREF(__pyx_t_4); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes); - __Pyx_DECREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes); - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes = __pyx_t_4; - __pyx_t_4 = 0; - - /* "scipy/spatial/ckdtree.pyx":218 - * raise ValueError("leafsize must be at least 1") - * self.maxes = np.ascontiguousarray(np.amax(self.data,axis=0)) - * self.mins = np.ascontiguousarray(np.amin(self.data,axis=0)) # <<<<<<<<<<<<<< - * self.indices = np.ascontiguousarray(np.arange(self.n,dtype=np.int32)) - * - */ - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__amin); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __Pyx_GIVEREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__axis), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_GIVEREF(__pyx_t_5); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins); - __Pyx_DECREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins); - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins = __pyx_t_5; - __pyx_t_5 = 0; - - /* "scipy/spatial/ckdtree.pyx":219 - * self.maxes = np.ascontiguousarray(np.amax(self.data,axis=0)) - * self.mins = np.ascontiguousarray(np.amin(self.data,axis=0)) - * self.indices = np.ascontiguousarray(np.arange(self.n,dtype=np.int32)) # <<<<<<<<<<<<<< - * - * inner_data = self.data - */ - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__arange); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->n); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_10 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__int32); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__dtype), __pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_10); - __Pyx_GIVEREF(__pyx_t_10); - __pyx_t_10 = 0; - __pyx_t_10 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __Pyx_GIVEREF(__pyx_t_10); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->indices); - __Pyx_DECREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->indices); - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->indices = __pyx_t_10; - __pyx_t_10 = 0; - - /* "scipy/spatial/ckdtree.pyx":221 - * self.indices = np.ascontiguousarray(np.arange(self.n,dtype=np.int32)) - * - * inner_data = self.data # <<<<<<<<<<<<<< - * self.raw_data = inner_data.data - * inner_maxes = self.maxes - */ - if (!(likely(((((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data) == Py_None) || likely(__Pyx_TypeTest(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_11 = ((PyArrayObject *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_data); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_inner_data, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_inner_data, (PyObject*)__pyx_v_inner_data, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14); - } - } - __pyx_bstride_0_inner_data = __pyx_bstruct_inner_data.strides[0]; __pyx_bstride_1_inner_data = __pyx_bstruct_inner_data.strides[1]; - __pyx_bshape_0_inner_data = __pyx_bstruct_inner_data.shape[0]; __pyx_bshape_1_inner_data = __pyx_bstruct_inner_data.shape[1]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_11 = 0; - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __pyx_v_inner_data = ((PyArrayObject *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - - /* "scipy/spatial/ckdtree.pyx":222 - * - * inner_data = self.data - * self.raw_data = inner_data.data # <<<<<<<<<<<<<< - * inner_maxes = self.maxes - * self.raw_maxes = inner_maxes.data - */ - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->raw_data = ((double *)__pyx_v_inner_data->data); - - /* "scipy/spatial/ckdtree.pyx":223 - * inner_data = self.data - * self.raw_data = inner_data.data - * inner_maxes = self.maxes # <<<<<<<<<<<<<< - * self.raw_maxes = inner_maxes.data - * inner_mins = self.mins - */ - if (!(likely(((((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes) == Py_None) || likely(__Pyx_TypeTest(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_15 = ((PyArrayObject *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_maxes); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_inner_maxes, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_14, &__pyx_t_13, &__pyx_t_12); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_inner_maxes, (PyObject*)__pyx_v_inner_maxes, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_14, __pyx_t_13, __pyx_t_12); - } - } - __pyx_bstride_0_inner_maxes = __pyx_bstruct_inner_maxes.strides[0]; - __pyx_bshape_0_inner_maxes = __pyx_bstruct_inner_maxes.shape[0]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_15 = 0; - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes); - __pyx_v_inner_maxes = ((PyArrayObject *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes); - - /* "scipy/spatial/ckdtree.pyx":224 - * self.raw_data = inner_data.data - * inner_maxes = self.maxes - * self.raw_maxes = inner_maxes.data # <<<<<<<<<<<<<< - * inner_mins = self.mins - * self.raw_mins = inner_mins.data - */ - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->raw_maxes = ((double *)__pyx_v_inner_maxes->data); - - /* "scipy/spatial/ckdtree.pyx":225 - * inner_maxes = self.maxes - * self.raw_maxes = inner_maxes.data - * inner_mins = self.mins # <<<<<<<<<<<<<< - * self.raw_mins = inner_mins.data - * inner_indices = self.indices - */ - if (!(likely(((((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins) == Py_None) || likely(__Pyx_TypeTest(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_16 = ((PyArrayObject *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_mins); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_inner_mins, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_inner_mins, (PyObject*)__pyx_v_inner_mins, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14); - } - } - __pyx_bstride_0_inner_mins = __pyx_bstruct_inner_mins.strides[0]; - __pyx_bshape_0_inner_mins = __pyx_bstruct_inner_mins.shape[0]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_16 = 0; - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins); - __pyx_v_inner_mins = ((PyArrayObject *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins); - - /* "scipy/spatial/ckdtree.pyx":226 - * self.raw_maxes = inner_maxes.data - * inner_mins = self.mins - * self.raw_mins = inner_mins.data # <<<<<<<<<<<<<< - * inner_indices = self.indices - * self.raw_indices = inner_indices.data - */ - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->raw_mins = ((double *)__pyx_v_inner_mins->data); - - /* "scipy/spatial/ckdtree.pyx":227 - * inner_mins = self.mins - * self.raw_mins = inner_mins.data - * inner_indices = self.indices # <<<<<<<<<<<<<< - * self.raw_indices = inner_indices.data - * - */ - if (!(likely(((((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->indices) == Py_None) || likely(__Pyx_TypeTest(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->indices, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_17 = ((PyArrayObject *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->indices); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_indices); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_inner_indices, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_14, &__pyx_t_13, &__pyx_t_12); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_inner_indices, (PyObject*)__pyx_v_inner_indices, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_14, __pyx_t_13, __pyx_t_12); - } - } - __pyx_bstride_0_inner_indices = __pyx_bstruct_inner_indices.strides[0]; - __pyx_bshape_0_inner_indices = __pyx_bstruct_inner_indices.shape[0]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_17 = 0; - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->indices); - __pyx_v_inner_indices = ((PyArrayObject *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->indices); - - /* "scipy/spatial/ckdtree.pyx":228 - * self.raw_mins = inner_mins.data - * inner_indices = self.indices - * self.raw_indices = inner_indices.data # <<<<<<<<<<<<<< - * - * self.tree = self.__build(0, self.n, self.raw_maxes, self.raw_mins) - */ - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->raw_indices = ((__pyx_t_5numpy_int32_t *)__pyx_v_inner_indices->data); - - /* "scipy/spatial/ckdtree.pyx":230 - * self.raw_indices = inner_indices.data - * - * self.tree = self.__build(0, self.n, self.raw_maxes, self.raw_mins) # <<<<<<<<<<<<<< - * - * cdef innernode* __build(cKDTree self, int start_idx, int end_idx, double* maxes, double* mins): - */ - ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->tree = ((struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->__pyx_vtab)->__build(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self), 0, ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->n, ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->raw_maxes, ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->raw_mins); - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_10); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_indices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_maxes); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_data); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_mins); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_indices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_maxes); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_data); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_inner_mins); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_inner_data); - __Pyx_XDECREF((PyObject *)__pyx_v_inner_maxes); - __Pyx_XDECREF((PyObject *)__pyx_v_inner_mins); - __Pyx_XDECREF((PyObject *)__pyx_v_inner_indices); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":232 - * self.tree = self.__build(0, self.n, self.raw_maxes, self.raw_mins) - * - * cdef innernode* __build(cKDTree self, int start_idx, int end_idx, double* maxes, double* mins): # <<<<<<<<<<<<<< - * cdef leafnode* n - * cdef innernode* ni - */ - -static struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *__pyx_f_5scipy_7spatial_7ckdtree_7cKDTree___build(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *__pyx_v_self, int __pyx_v_start_idx, int __pyx_v_end_idx, double *__pyx_v_maxes, double *__pyx_v_mins) { - struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode *__pyx_v_n; - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *__pyx_v_ni; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_t; - int __pyx_v_p; - int __pyx_v_q; - int __pyx_v_d; - double __pyx_v_size; - double __pyx_v_split; - double __pyx_v_minval; - double __pyx_v_maxval; - double *__pyx_v_mids; - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *__pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - long __pyx_t_4; - __Pyx_RefNannySetupContext("__build"); - - /* "scipy/spatial/ckdtree.pyx":238 - * cdef double size, split, minval, maxval - * cdef double*mids - * if end_idx-start_idx<=self.leafsize: # <<<<<<<<<<<<<< - * n = stdlib.malloc(sizeof(leafnode)) - * n.split_dim = -1 - */ - __pyx_t_1 = ((__pyx_v_end_idx - __pyx_v_start_idx) <= __pyx_v_self->leafsize); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":239 - * cdef double*mids - * if end_idx-start_idx<=self.leafsize: - * n = stdlib.malloc(sizeof(leafnode)) # <<<<<<<<<<<<<< - * n.split_dim = -1 - * n.start_idx = start_idx - */ - __pyx_v_n = ((struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode *)malloc((sizeof(struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode)))); - - /* "scipy/spatial/ckdtree.pyx":240 - * if end_idx-start_idx<=self.leafsize: - * n = stdlib.malloc(sizeof(leafnode)) - * n.split_dim = -1 # <<<<<<<<<<<<<< - * n.start_idx = start_idx - * n.end_idx = end_idx - */ - __pyx_v_n->split_dim = -1; - - /* "scipy/spatial/ckdtree.pyx":241 - * n = stdlib.malloc(sizeof(leafnode)) - * n.split_dim = -1 - * n.start_idx = start_idx # <<<<<<<<<<<<<< - * n.end_idx = end_idx - * return n - */ - __pyx_v_n->start_idx = __pyx_v_start_idx; - - /* "scipy/spatial/ckdtree.pyx":242 - * n.split_dim = -1 - * n.start_idx = start_idx - * n.end_idx = end_idx # <<<<<<<<<<<<<< - * return n - * else: - */ - __pyx_v_n->end_idx = __pyx_v_end_idx; - - /* "scipy/spatial/ckdtree.pyx":243 - * n.start_idx = start_idx - * n.end_idx = end_idx - * return n # <<<<<<<<<<<<<< - * else: - * d = 0 - */ - __pyx_r = ((struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *)__pyx_v_n); - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":245 - * return n - * else: - * d = 0 # <<<<<<<<<<<<<< - * size = 0 - * for i in range(self.m): - */ - __pyx_v_d = 0; - - /* "scipy/spatial/ckdtree.pyx":246 - * else: - * d = 0 - * size = 0 # <<<<<<<<<<<<<< - * for i in range(self.m): - * if maxes[i]-mins[i] > size: - */ - __pyx_v_size = 0.0; - - /* "scipy/spatial/ckdtree.pyx":247 - * d = 0 - * size = 0 - * for i in range(self.m): # <<<<<<<<<<<<<< - * if maxes[i]-mins[i] > size: - * d = i - */ - __pyx_t_2 = __pyx_v_self->m; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":248 - * size = 0 - * for i in range(self.m): - * if maxes[i]-mins[i] > size: # <<<<<<<<<<<<<< - * d = i - * size = maxes[i]-mins[i] - */ - __pyx_t_1 = (((__pyx_v_maxes[__pyx_v_i]) - (__pyx_v_mins[__pyx_v_i])) > __pyx_v_size); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":249 - * for i in range(self.m): - * if maxes[i]-mins[i] > size: - * d = i # <<<<<<<<<<<<<< - * size = maxes[i]-mins[i] - * maxval = maxes[d] - */ - __pyx_v_d = __pyx_v_i; - - /* "scipy/spatial/ckdtree.pyx":250 - * if maxes[i]-mins[i] > size: - * d = i - * size = maxes[i]-mins[i] # <<<<<<<<<<<<<< - * maxval = maxes[d] - * minval = mins[d] - */ - __pyx_v_size = ((__pyx_v_maxes[__pyx_v_i]) - (__pyx_v_mins[__pyx_v_i])); - goto __pyx_L6; - } - __pyx_L6:; - } - - /* "scipy/spatial/ckdtree.pyx":251 - * d = i - * size = maxes[i]-mins[i] - * maxval = maxes[d] # <<<<<<<<<<<<<< - * minval = mins[d] - * if maxval==minval: - */ - __pyx_v_maxval = (__pyx_v_maxes[__pyx_v_d]); - - /* "scipy/spatial/ckdtree.pyx":252 - * size = maxes[i]-mins[i] - * maxval = maxes[d] - * minval = mins[d] # <<<<<<<<<<<<<< - * if maxval==minval: - * # all points are identical; warn user? - */ - __pyx_v_minval = (__pyx_v_mins[__pyx_v_d]); - - /* "scipy/spatial/ckdtree.pyx":253 - * maxval = maxes[d] - * minval = mins[d] - * if maxval==minval: # <<<<<<<<<<<<<< - * # all points are identical; warn user? - * n = stdlib.malloc(sizeof(leafnode)) - */ - __pyx_t_1 = (__pyx_v_maxval == __pyx_v_minval); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":255 - * if maxval==minval: - * # all points are identical; warn user? - * n = stdlib.malloc(sizeof(leafnode)) # <<<<<<<<<<<<<< - * n.split_dim = -1 - * n.start_idx = start_idx - */ - __pyx_v_n = ((struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode *)malloc((sizeof(struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode)))); - - /* "scipy/spatial/ckdtree.pyx":256 - * # all points are identical; warn user? - * n = stdlib.malloc(sizeof(leafnode)) - * n.split_dim = -1 # <<<<<<<<<<<<<< - * n.start_idx = start_idx - * n.end_idx = end_idx - */ - __pyx_v_n->split_dim = -1; - - /* "scipy/spatial/ckdtree.pyx":257 - * n = stdlib.malloc(sizeof(leafnode)) - * n.split_dim = -1 - * n.start_idx = start_idx # <<<<<<<<<<<<<< - * n.end_idx = end_idx - * return n - */ - __pyx_v_n->start_idx = __pyx_v_start_idx; - - /* "scipy/spatial/ckdtree.pyx":258 - * n.split_dim = -1 - * n.start_idx = start_idx - * n.end_idx = end_idx # <<<<<<<<<<<<<< - * return n - * - */ - __pyx_v_n->end_idx = __pyx_v_end_idx; - - /* "scipy/spatial/ckdtree.pyx":259 - * n.start_idx = start_idx - * n.end_idx = end_idx - * return n # <<<<<<<<<<<<<< - * - * split = (maxval+minval)/2 - */ - __pyx_r = ((struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *)__pyx_v_n); - goto __pyx_L0; - goto __pyx_L7; - } - __pyx_L7:; - - /* "scipy/spatial/ckdtree.pyx":261 - * return n - * - * split = (maxval+minval)/2 # <<<<<<<<<<<<<< - * - * p = start_idx - */ - __pyx_v_split = ((__pyx_v_maxval + __pyx_v_minval) / 2.0); - - /* "scipy/spatial/ckdtree.pyx":263 - * split = (maxval+minval)/2 - * - * p = start_idx # <<<<<<<<<<<<<< - * q = end_idx-1 - * while p<=q: - */ - __pyx_v_p = __pyx_v_start_idx; - - /* "scipy/spatial/ckdtree.pyx":264 - * - * p = start_idx - * q = end_idx-1 # <<<<<<<<<<<<<< - * while p<=q: - * if self.raw_data[self.raw_indices[p]*self.m+d]=split: - */ - __pyx_t_1 = ((__pyx_v_self->raw_data[(((__pyx_v_self->raw_indices[__pyx_v_p]) * __pyx_v_self->m) + __pyx_v_d)]) < __pyx_v_split); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":267 - * while p<=q: - * if self.raw_data[self.raw_indices[p]*self.m+d]=split: - * q-=1 - */ - __pyx_v_p = (__pyx_v_p + 1); - goto __pyx_L10; - } - - /* "scipy/spatial/ckdtree.pyx":268 - * if self.raw_data[self.raw_indices[p]*self.m+d]=split: # <<<<<<<<<<<<<< - * q-=1 - * else: - */ - __pyx_t_1 = ((__pyx_v_self->raw_data[(((__pyx_v_self->raw_indices[__pyx_v_q]) * __pyx_v_self->m) + __pyx_v_d)]) >= __pyx_v_split); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":269 - * p+=1 - * elif self.raw_data[self.raw_indices[q]*self.m+d]>=split: - * q-=1 # <<<<<<<<<<<<<< - * else: - * t = self.raw_indices[p] - */ - __pyx_v_q = (__pyx_v_q - 1); - goto __pyx_L10; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":271 - * q-=1 - * else: - * t = self.raw_indices[p] # <<<<<<<<<<<<<< - * self.raw_indices[p] = self.raw_indices[q] - * self.raw_indices[q] = t - */ - __pyx_v_t = (__pyx_v_self->raw_indices[__pyx_v_p]); - - /* "scipy/spatial/ckdtree.pyx":272 - * else: - * t = self.raw_indices[p] - * self.raw_indices[p] = self.raw_indices[q] # <<<<<<<<<<<<<< - * self.raw_indices[q] = t - * p+=1 - */ - (__pyx_v_self->raw_indices[__pyx_v_p]) = (__pyx_v_self->raw_indices[__pyx_v_q]); - - /* "scipy/spatial/ckdtree.pyx":273 - * t = self.raw_indices[p] - * self.raw_indices[p] = self.raw_indices[q] - * self.raw_indices[q] = t # <<<<<<<<<<<<<< - * p+=1 - * q-=1 - */ - (__pyx_v_self->raw_indices[__pyx_v_q]) = __pyx_v_t; - - /* "scipy/spatial/ckdtree.pyx":274 - * self.raw_indices[p] = self.raw_indices[q] - * self.raw_indices[q] = t - * p+=1 # <<<<<<<<<<<<<< - * q-=1 - * - */ - __pyx_v_p = (__pyx_v_p + 1); - - /* "scipy/spatial/ckdtree.pyx":275 - * self.raw_indices[q] = t - * p+=1 - * q-=1 # <<<<<<<<<<<<<< - * - * # slide midpoint if necessary - */ - __pyx_v_q = (__pyx_v_q - 1); - } - __pyx_L10:; - } - - /* "scipy/spatial/ckdtree.pyx":278 - * - * # slide midpoint if necessary - * if p==start_idx: # <<<<<<<<<<<<<< - * # no points less than split - * j = start_idx - */ - __pyx_t_1 = (__pyx_v_p == __pyx_v_start_idx); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":280 - * if p==start_idx: - * # no points less than split - * j = start_idx # <<<<<<<<<<<<<< - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * for i in range(start_idx+1, end_idx): - */ - __pyx_v_j = __pyx_v_start_idx; - - /* "scipy/spatial/ckdtree.pyx":281 - * # no points less than split - * j = start_idx - * split = self.raw_data[self.raw_indices[j]*self.m+d] # <<<<<<<<<<<<<< - * for i in range(start_idx+1, end_idx): - * if self.raw_data[self.raw_indices[i]*self.m+d]raw_data[(((__pyx_v_self->raw_indices[__pyx_v_j]) * __pyx_v_self->m) + __pyx_v_d)]); - - /* "scipy/spatial/ckdtree.pyx":282 - * j = start_idx - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * for i in range(start_idx+1, end_idx): # <<<<<<<<<<<<<< - * if self.raw_data[self.raw_indices[i]*self.m+d]raw_data[(((__pyx_v_self->raw_indices[__pyx_v_i]) * __pyx_v_self->m) + __pyx_v_d)]) < __pyx_v_split); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":284 - * for i in range(start_idx+1, end_idx): - * if self.raw_data[self.raw_indices[i]*self.m+d]raw_data[(((__pyx_v_self->raw_indices[__pyx_v_j]) * __pyx_v_self->m) + __pyx_v_d)]); - goto __pyx_L14; - } - __pyx_L14:; - } - - /* "scipy/spatial/ckdtree.pyx":286 - * j = i - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * t = self.raw_indices[start_idx] # <<<<<<<<<<<<<< - * self.raw_indices[start_idx] = self.raw_indices[j] - * self.raw_indices[j] = t - */ - __pyx_v_t = (__pyx_v_self->raw_indices[__pyx_v_start_idx]); - - /* "scipy/spatial/ckdtree.pyx":287 - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * t = self.raw_indices[start_idx] - * self.raw_indices[start_idx] = self.raw_indices[j] # <<<<<<<<<<<<<< - * self.raw_indices[j] = t - * p = start_idx+1 - */ - (__pyx_v_self->raw_indices[__pyx_v_start_idx]) = (__pyx_v_self->raw_indices[__pyx_v_j]); - - /* "scipy/spatial/ckdtree.pyx":288 - * t = self.raw_indices[start_idx] - * self.raw_indices[start_idx] = self.raw_indices[j] - * self.raw_indices[j] = t # <<<<<<<<<<<<<< - * p = start_idx+1 - * q = start_idx - */ - (__pyx_v_self->raw_indices[__pyx_v_j]) = __pyx_v_t; - - /* "scipy/spatial/ckdtree.pyx":289 - * self.raw_indices[start_idx] = self.raw_indices[j] - * self.raw_indices[j] = t - * p = start_idx+1 # <<<<<<<<<<<<<< - * q = start_idx - * elif p==end_idx: - */ - __pyx_v_p = (__pyx_v_start_idx + 1); - - /* "scipy/spatial/ckdtree.pyx":290 - * self.raw_indices[j] = t - * p = start_idx+1 - * q = start_idx # <<<<<<<<<<<<<< - * elif p==end_idx: - * # no points greater than split - */ - __pyx_v_q = __pyx_v_start_idx; - goto __pyx_L11; - } - - /* "scipy/spatial/ckdtree.pyx":291 - * p = start_idx+1 - * q = start_idx - * elif p==end_idx: # <<<<<<<<<<<<<< - * # no points greater than split - * j = end_idx-1 - */ - __pyx_t_1 = (__pyx_v_p == __pyx_v_end_idx); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":293 - * elif p==end_idx: - * # no points greater than split - * j = end_idx-1 # <<<<<<<<<<<<<< - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * for i in range(start_idx, end_idx-1): - */ - __pyx_v_j = (__pyx_v_end_idx - 1); - - /* "scipy/spatial/ckdtree.pyx":294 - * # no points greater than split - * j = end_idx-1 - * split = self.raw_data[self.raw_indices[j]*self.m+d] # <<<<<<<<<<<<<< - * for i in range(start_idx, end_idx-1): - * if self.raw_data[self.raw_indices[i]*self.m+d]>split: - */ - __pyx_v_split = (__pyx_v_self->raw_data[(((__pyx_v_self->raw_indices[__pyx_v_j]) * __pyx_v_self->m) + __pyx_v_d)]); - - /* "scipy/spatial/ckdtree.pyx":295 - * j = end_idx-1 - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * for i in range(start_idx, end_idx-1): # <<<<<<<<<<<<<< - * if self.raw_data[self.raw_indices[i]*self.m+d]>split: - * j = i - */ - __pyx_t_4 = (__pyx_v_end_idx - 1); - for (__pyx_t_2 = __pyx_v_start_idx; __pyx_t_2 < __pyx_t_4; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - /* "scipy/spatial/ckdtree.pyx":296 - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * for i in range(start_idx, end_idx-1): - * if self.raw_data[self.raw_indices[i]*self.m+d]>split: # <<<<<<<<<<<<<< - * j = i - * split = self.raw_data[self.raw_indices[j]*self.m+d] - */ - __pyx_t_1 = ((__pyx_v_self->raw_data[(((__pyx_v_self->raw_indices[__pyx_v_i]) * __pyx_v_self->m) + __pyx_v_d)]) > __pyx_v_split); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":297 - * for i in range(start_idx, end_idx-1): - * if self.raw_data[self.raw_indices[i]*self.m+d]>split: - * j = i # <<<<<<<<<<<<<< - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * t = self.raw_indices[end_idx-1] - */ - __pyx_v_j = __pyx_v_i; - - /* "scipy/spatial/ckdtree.pyx":298 - * if self.raw_data[self.raw_indices[i]*self.m+d]>split: - * j = i - * split = self.raw_data[self.raw_indices[j]*self.m+d] # <<<<<<<<<<<<<< - * t = self.raw_indices[end_idx-1] - * self.raw_indices[end_idx-1] = self.raw_indices[j] - */ - __pyx_v_split = (__pyx_v_self->raw_data[(((__pyx_v_self->raw_indices[__pyx_v_j]) * __pyx_v_self->m) + __pyx_v_d)]); - goto __pyx_L17; - } - __pyx_L17:; - } - - /* "scipy/spatial/ckdtree.pyx":299 - * j = i - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * t = self.raw_indices[end_idx-1] # <<<<<<<<<<<<<< - * self.raw_indices[end_idx-1] = self.raw_indices[j] - * self.raw_indices[j] = t - */ - __pyx_v_t = (__pyx_v_self->raw_indices[(__pyx_v_end_idx - 1)]); - - /* "scipy/spatial/ckdtree.pyx":300 - * split = self.raw_data[self.raw_indices[j]*self.m+d] - * t = self.raw_indices[end_idx-1] - * self.raw_indices[end_idx-1] = self.raw_indices[j] # <<<<<<<<<<<<<< - * self.raw_indices[j] = t - * p = end_idx-1 - */ - (__pyx_v_self->raw_indices[(__pyx_v_end_idx - 1)]) = (__pyx_v_self->raw_indices[__pyx_v_j]); - - /* "scipy/spatial/ckdtree.pyx":301 - * t = self.raw_indices[end_idx-1] - * self.raw_indices[end_idx-1] = self.raw_indices[j] - * self.raw_indices[j] = t # <<<<<<<<<<<<<< - * p = end_idx-1 - * q = end_idx-2 - */ - (__pyx_v_self->raw_indices[__pyx_v_j]) = __pyx_v_t; - - /* "scipy/spatial/ckdtree.pyx":302 - * self.raw_indices[end_idx-1] = self.raw_indices[j] - * self.raw_indices[j] = t - * p = end_idx-1 # <<<<<<<<<<<<<< - * q = end_idx-2 - * - */ - __pyx_v_p = (__pyx_v_end_idx - 1); - - /* "scipy/spatial/ckdtree.pyx":303 - * self.raw_indices[j] = t - * p = end_idx-1 - * q = end_idx-2 # <<<<<<<<<<<<<< - * - * # construct new node representation - */ - __pyx_v_q = (__pyx_v_end_idx - 2); - goto __pyx_L11; - } - __pyx_L11:; - - /* "scipy/spatial/ckdtree.pyx":306 - * - * # construct new node representation - * ni = stdlib.malloc(sizeof(innernode)) # <<<<<<<<<<<<<< - * - * mids = stdlib.malloc(sizeof(double)*self.m) - */ - __pyx_v_ni = ((struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *)malloc((sizeof(struct __pyx_t_5scipy_7spatial_7ckdtree_innernode)))); - - /* "scipy/spatial/ckdtree.pyx":308 - * ni = stdlib.malloc(sizeof(innernode)) - * - * mids = stdlib.malloc(sizeof(double)*self.m) # <<<<<<<<<<<<<< - * for i in range(self.m): - * mids[i] = maxes[i] - */ - __pyx_v_mids = ((double *)malloc(((sizeof(double)) * __pyx_v_self->m))); - - /* "scipy/spatial/ckdtree.pyx":309 - * - * mids = stdlib.malloc(sizeof(double)*self.m) - * for i in range(self.m): # <<<<<<<<<<<<<< - * mids[i] = maxes[i] - * mids[d] = split - */ - __pyx_t_2 = __pyx_v_self->m; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":310 - * mids = stdlib.malloc(sizeof(double)*self.m) - * for i in range(self.m): - * mids[i] = maxes[i] # <<<<<<<<<<<<<< - * mids[d] = split - * ni.less = self.__build(start_idx,p,mids,mins) - */ - (__pyx_v_mids[__pyx_v_i]) = (__pyx_v_maxes[__pyx_v_i]); - } - - /* "scipy/spatial/ckdtree.pyx":311 - * for i in range(self.m): - * mids[i] = maxes[i] - * mids[d] = split # <<<<<<<<<<<<<< - * ni.less = self.__build(start_idx,p,mids,mins) - * - */ - (__pyx_v_mids[__pyx_v_d]) = __pyx_v_split; - - /* "scipy/spatial/ckdtree.pyx":312 - * mids[i] = maxes[i] - * mids[d] = split - * ni.less = self.__build(start_idx,p,mids,mins) # <<<<<<<<<<<<<< - * - * for i in range(self.m): - */ - __pyx_v_ni->less = ((struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self->__pyx_vtab)->__build(__pyx_v_self, __pyx_v_start_idx, __pyx_v_p, __pyx_v_mids, __pyx_v_mins); - - /* "scipy/spatial/ckdtree.pyx":314 - * ni.less = self.__build(start_idx,p,mids,mins) - * - * for i in range(self.m): # <<<<<<<<<<<<<< - * mids[i] = mins[i] - * mids[d] = split - */ - __pyx_t_2 = __pyx_v_self->m; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":315 - * - * for i in range(self.m): - * mids[i] = mins[i] # <<<<<<<<<<<<<< - * mids[d] = split - * ni.greater = self.__build(p,end_idx,maxes,mids) - */ - (__pyx_v_mids[__pyx_v_i]) = (__pyx_v_mins[__pyx_v_i]); - } - - /* "scipy/spatial/ckdtree.pyx":316 - * for i in range(self.m): - * mids[i] = mins[i] - * mids[d] = split # <<<<<<<<<<<<<< - * ni.greater = self.__build(p,end_idx,maxes,mids) - * - */ - (__pyx_v_mids[__pyx_v_d]) = __pyx_v_split; - - /* "scipy/spatial/ckdtree.pyx":317 - * mids[i] = mins[i] - * mids[d] = split - * ni.greater = self.__build(p,end_idx,maxes,mids) # <<<<<<<<<<<<<< - * - * stdlib.free(mids) - */ - __pyx_v_ni->greater = ((struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self->__pyx_vtab)->__build(__pyx_v_self, __pyx_v_p, __pyx_v_end_idx, __pyx_v_maxes, __pyx_v_mids); - - /* "scipy/spatial/ckdtree.pyx":319 - * ni.greater = self.__build(p,end_idx,maxes,mids) - * - * stdlib.free(mids) # <<<<<<<<<<<<<< - * - * ni.split_dim = d - */ - free(__pyx_v_mids); - - /* "scipy/spatial/ckdtree.pyx":321 - * stdlib.free(mids) - * - * ni.split_dim = d # <<<<<<<<<<<<<< - * ni.split = split - * - */ - __pyx_v_ni->split_dim = __pyx_v_d; - - /* "scipy/spatial/ckdtree.pyx":322 - * - * ni.split_dim = d - * ni.split = split # <<<<<<<<<<<<<< - * - * return ni - */ - __pyx_v_ni->split = __pyx_v_split; - - /* "scipy/spatial/ckdtree.pyx":324 - * ni.split = split - * - * return ni # <<<<<<<<<<<<<< - * - * cdef __free_tree(cKDTree self, innernode* node): - */ - __pyx_r = __pyx_v_ni; - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = 0; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":326 - * return ni - * - * cdef __free_tree(cKDTree self, innernode* node): # <<<<<<<<<<<<<< - * if node.split_dim!=-1: - * self.__free_tree(node.less) - */ - -static PyObject *__pyx_f_5scipy_7spatial_7ckdtree_7cKDTree___free_tree(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *__pyx_v_self, struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *__pyx_v_node) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__free_tree"); - - /* "scipy/spatial/ckdtree.pyx":327 - * - * cdef __free_tree(cKDTree self, innernode* node): - * if node.split_dim!=-1: # <<<<<<<<<<<<<< - * self.__free_tree(node.less) - * self.__free_tree(node.greater) - */ - __pyx_t_1 = (__pyx_v_node->split_dim != -1); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":328 - * cdef __free_tree(cKDTree self, innernode* node): - * if node.split_dim!=-1: - * self.__free_tree(node.less) # <<<<<<<<<<<<<< - * self.__free_tree(node.greater) - * stdlib.free(node) - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self->__pyx_vtab)->__free_tree(__pyx_v_self, __pyx_v_node->less); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/spatial/ckdtree.pyx":329 - * if node.split_dim!=-1: - * self.__free_tree(node.less) - * self.__free_tree(node.greater) # <<<<<<<<<<<<<< - * stdlib.free(node) - * - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self->__pyx_vtab)->__free_tree(__pyx_v_self, __pyx_v_node->greater); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 329; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/spatial/ckdtree.pyx":330 - * self.__free_tree(node.less) - * self.__free_tree(node.greater) - * stdlib.free(node) # <<<<<<<<<<<<<< - * - * def __dealloc__(cKDTree self): - */ - free(__pyx_v_node); - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.__free_tree", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":332 - * stdlib.free(node) - * - * def __dealloc__(cKDTree self): # <<<<<<<<<<<<<< - * if (self.tree) == 0: - * # should happen only if __init__ was never called - */ - -static void __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1__dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1__dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__dealloc__"); - - /* "scipy/spatial/ckdtree.pyx":333 - * - * def __dealloc__(cKDTree self): - * if (self.tree) == 0: # <<<<<<<<<<<<<< - * # should happen only if __init__ was never called - * return - */ - __pyx_t_1 = (((int)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->tree) == 0); - if (__pyx_t_1) { - - /* "scipy/spatial/ckdtree.pyx":335 - * if (self.tree) == 0: - * # should happen only if __init__ was never called - * return # <<<<<<<<<<<<<< - * self.__free_tree(self.tree) - * - */ - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - /* "scipy/spatial/ckdtree.pyx":336 - * # should happen only if __init__ was never called - * return - * self.__free_tree(self.tree) # <<<<<<<<<<<<<< - * - * cdef void __query(cKDTree self, - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->__pyx_vtab)->__free_tree(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self), ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->tree); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.__dealloc__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_L0:; - __Pyx_RefNannyFinishContext(); -} - -/* "scipy/spatial/ckdtree.pyx":338 - * self.__free_tree(self.tree) - * - * cdef void __query(cKDTree self, # <<<<<<<<<<<<<< - * double*result_distances, - * int*result_indices, - */ - -static void __pyx_f_5scipy_7spatial_7ckdtree_7cKDTree___query(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *__pyx_v_self, double *__pyx_v_result_distances, int *__pyx_v_result_indices, double *__pyx_v_x, int __pyx_v_k, double __pyx_v_eps, double __pyx_v_p, double __pyx_v_distance_upper_bound) { - struct __pyx_t_5scipy_7spatial_7ckdtree_heap __pyx_v_q; - struct __pyx_t_5scipy_7spatial_7ckdtree_heap __pyx_v_neighbors; - int __pyx_v_i; - double __pyx_v_t; - struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo *__pyx_v_inf; - struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo *__pyx_v_inf2; - double __pyx_v_d; - double __pyx_v_epsfac; - double __pyx_v_min_distance; - double __pyx_v_far_min_distance; - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_v_it; - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_v_it2; - struct __pyx_t_5scipy_7spatial_7ckdtree_heapitem __pyx_v_neighbor; - struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode *__pyx_v_node; - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *__pyx_v_inode; - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *__pyx_v_near; - struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *__pyx_v_far; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - double __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__query"); - - /* "scipy/spatial/ckdtree.pyx":369 - * # distances between the nearest side of the cell and the target - * # the head node of the cell - * heapcreate(&q,12) # <<<<<<<<<<<<<< - * - * # priority queue for the nearest neighbors - */ - __pyx_t_1 = __pyx_f_5scipy_7spatial_7ckdtree_heapcreate((&__pyx_v_q), 12); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":374 - * # furthest known neighbor first - * # entries are (-distance**p, i) - * heapcreate(&neighbors,k) # <<<<<<<<<<<<<< - * - * # set up first nodeinfo - */ - __pyx_t_1 = __pyx_f_5scipy_7spatial_7ckdtree_heapcreate((&__pyx_v_neighbors), __pyx_v_k); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":377 - * - * # set up first nodeinfo - * inf = stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) # <<<<<<<<<<<<<< - * inf.node = self.tree - * for i in range(self.m): - */ - __pyx_v_inf = ((struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo *)malloc(((sizeof(struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo)) + (__pyx_v_self->m * (sizeof(double)))))); - - /* "scipy/spatial/ckdtree.pyx":378 - * # set up first nodeinfo - * inf = stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) - * inf.node = self.tree # <<<<<<<<<<<<<< - * for i in range(self.m): - * inf.side_distances[i] = 0 - */ - __pyx_v_inf->node = __pyx_v_self->tree; - - /* "scipy/spatial/ckdtree.pyx":379 - * inf = stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) - * inf.node = self.tree - * for i in range(self.m): # <<<<<<<<<<<<<< - * inf.side_distances[i] = 0 - * t = x[i]-self.raw_maxes[i] - */ - __pyx_t_2 = __pyx_v_self->m; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":380 - * inf.node = self.tree - * for i in range(self.m): - * inf.side_distances[i] = 0 # <<<<<<<<<<<<<< - * t = x[i]-self.raw_maxes[i] - * if t>inf.side_distances[i]: - */ - (__pyx_v_inf->side_distances[__pyx_v_i]) = 0.0; - - /* "scipy/spatial/ckdtree.pyx":381 - * for i in range(self.m): - * inf.side_distances[i] = 0 - * t = x[i]-self.raw_maxes[i] # <<<<<<<<<<<<<< - * if t>inf.side_distances[i]: - * inf.side_distances[i] = t - */ - __pyx_v_t = ((__pyx_v_x[__pyx_v_i]) - (__pyx_v_self->raw_maxes[__pyx_v_i])); - - /* "scipy/spatial/ckdtree.pyx":382 - * inf.side_distances[i] = 0 - * t = x[i]-self.raw_maxes[i] - * if t>inf.side_distances[i]: # <<<<<<<<<<<<<< - * inf.side_distances[i] = t - * else: - */ - __pyx_t_4 = (__pyx_v_t > (__pyx_v_inf->side_distances[__pyx_v_i])); - if (__pyx_t_4) { - - /* "scipy/spatial/ckdtree.pyx":383 - * t = x[i]-self.raw_maxes[i] - * if t>inf.side_distances[i]: - * inf.side_distances[i] = t # <<<<<<<<<<<<<< - * else: - * t = self.raw_mins[i]-x[i] - */ - (__pyx_v_inf->side_distances[__pyx_v_i]) = __pyx_v_t; - goto __pyx_L5; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":385 - * inf.side_distances[i] = t - * else: - * t = self.raw_mins[i]-x[i] # <<<<<<<<<<<<<< - * if t>inf.side_distances[i]: - * inf.side_distances[i] = t - */ - __pyx_v_t = ((__pyx_v_self->raw_mins[__pyx_v_i]) - (__pyx_v_x[__pyx_v_i])); - - /* "scipy/spatial/ckdtree.pyx":386 - * else: - * t = self.raw_mins[i]-x[i] - * if t>inf.side_distances[i]: # <<<<<<<<<<<<<< - * inf.side_distances[i] = t - * if p!=1 and p!=infinity: - */ - __pyx_t_4 = (__pyx_v_t > (__pyx_v_inf->side_distances[__pyx_v_i])); - if (__pyx_t_4) { - - /* "scipy/spatial/ckdtree.pyx":387 - * t = self.raw_mins[i]-x[i] - * if t>inf.side_distances[i]: - * inf.side_distances[i] = t # <<<<<<<<<<<<<< - * if p!=1 and p!=infinity: - * inf.side_distances[i]=inf.side_distances[i]**p - */ - (__pyx_v_inf->side_distances[__pyx_v_i]) = __pyx_v_t; - goto __pyx_L6; - } - __pyx_L6:; - } - __pyx_L5:; - - /* "scipy/spatial/ckdtree.pyx":388 - * if t>inf.side_distances[i]: - * inf.side_distances[i] = t - * if p!=1 and p!=infinity: # <<<<<<<<<<<<<< - * inf.side_distances[i]=inf.side_distances[i]**p - * - */ - __pyx_t_4 = (__pyx_v_p != 1.0); - if (__pyx_t_4) { - __pyx_t_5 = (__pyx_v_p != __pyx_v_5scipy_7spatial_7ckdtree_infinity); - __pyx_t_6 = __pyx_t_5; - } else { - __pyx_t_6 = __pyx_t_4; - } - if (__pyx_t_6) { - - /* "scipy/spatial/ckdtree.pyx":389 - * inf.side_distances[i] = t - * if p!=1 and p!=infinity: - * inf.side_distances[i]=inf.side_distances[i]**p # <<<<<<<<<<<<<< - * - * # compute first distance - */ - (__pyx_v_inf->side_distances[__pyx_v_i]) = pow((__pyx_v_inf->side_distances[__pyx_v_i]), __pyx_v_p); - goto __pyx_L7; - } - __pyx_L7:; - } - - /* "scipy/spatial/ckdtree.pyx":392 - * - * # compute first distance - * min_distance = 0. # <<<<<<<<<<<<<< - * for i in range(self.m): - * if p==infinity: - */ - __pyx_v_min_distance = 0.; - - /* "scipy/spatial/ckdtree.pyx":393 - * # compute first distance - * min_distance = 0. - * for i in range(self.m): # <<<<<<<<<<<<<< - * if p==infinity: - * min_distance = dmax(min_distance,inf.side_distances[i]) - */ - __pyx_t_2 = __pyx_v_self->m; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":394 - * min_distance = 0. - * for i in range(self.m): - * if p==infinity: # <<<<<<<<<<<<<< - * min_distance = dmax(min_distance,inf.side_distances[i]) - * else: - */ - __pyx_t_6 = (__pyx_v_p == __pyx_v_5scipy_7spatial_7ckdtree_infinity); - if (__pyx_t_6) { - - /* "scipy/spatial/ckdtree.pyx":395 - * for i in range(self.m): - * if p==infinity: - * min_distance = dmax(min_distance,inf.side_distances[i]) # <<<<<<<<<<<<<< - * else: - * min_distance += inf.side_distances[i] - */ - __pyx_v_min_distance = __pyx_f_5scipy_7spatial_7ckdtree_dmax(__pyx_v_min_distance, (__pyx_v_inf->side_distances[__pyx_v_i])); - goto __pyx_L10; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":397 - * min_distance = dmax(min_distance,inf.side_distances[i]) - * else: - * min_distance += inf.side_distances[i] # <<<<<<<<<<<<<< - * - * # fiddle approximation factor - */ - __pyx_v_min_distance = (__pyx_v_min_distance + (__pyx_v_inf->side_distances[__pyx_v_i])); - } - __pyx_L10:; - } - - /* "scipy/spatial/ckdtree.pyx":400 - * - * # fiddle approximation factor - * if eps==0: # <<<<<<<<<<<<<< - * epsfac=1 - * elif p==infinity: - */ - __pyx_t_6 = (__pyx_v_eps == 0.0); - if (__pyx_t_6) { - - /* "scipy/spatial/ckdtree.pyx":401 - * # fiddle approximation factor - * if eps==0: - * epsfac=1 # <<<<<<<<<<<<<< - * elif p==infinity: - * epsfac = 1/(1+eps) - */ - __pyx_v_epsfac = 1.0; - goto __pyx_L11; - } - - /* "scipy/spatial/ckdtree.pyx":402 - * if eps==0: - * epsfac=1 - * elif p==infinity: # <<<<<<<<<<<<<< - * epsfac = 1/(1+eps) - * else: - */ - __pyx_t_6 = (__pyx_v_p == __pyx_v_5scipy_7spatial_7ckdtree_infinity); - if (__pyx_t_6) { - - /* "scipy/spatial/ckdtree.pyx":403 - * epsfac=1 - * elif p==infinity: - * epsfac = 1/(1+eps) # <<<<<<<<<<<<<< - * else: - * epsfac = 1/(1+eps)**p - */ - __pyx_t_7 = (1.0 + __pyx_v_eps); - if (unlikely(__pyx_t_7 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_v_epsfac = (1.0 / __pyx_t_7); - goto __pyx_L11; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":405 - * epsfac = 1/(1+eps) - * else: - * epsfac = 1/(1+eps)**p # <<<<<<<<<<<<<< - * - * # internally we represent all distances as distance**p - */ - __pyx_t_7 = pow((1.0 + __pyx_v_eps), __pyx_v_p); - if (unlikely(__pyx_t_7 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_v_epsfac = (1.0 / __pyx_t_7); - } - __pyx_L11:; - - /* "scipy/spatial/ckdtree.pyx":408 - * - * # internally we represent all distances as distance**p - * if p!=infinity and distance_upper_bound!=infinity: # <<<<<<<<<<<<<< - * distance_upper_bound = distance_upper_bound**p - * - */ - __pyx_t_6 = (__pyx_v_p != __pyx_v_5scipy_7spatial_7ckdtree_infinity); - if (__pyx_t_6) { - __pyx_t_4 = (__pyx_v_distance_upper_bound != __pyx_v_5scipy_7spatial_7ckdtree_infinity); - __pyx_t_5 = __pyx_t_4; - } else { - __pyx_t_5 = __pyx_t_6; - } - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":409 - * # internally we represent all distances as distance**p - * if p!=infinity and distance_upper_bound!=infinity: - * distance_upper_bound = distance_upper_bound**p # <<<<<<<<<<<<<< - * - * while True: - */ - __pyx_v_distance_upper_bound = pow(__pyx_v_distance_upper_bound, __pyx_v_p); - goto __pyx_L12; - } - __pyx_L12:; - - /* "scipy/spatial/ckdtree.pyx":411 - * distance_upper_bound = distance_upper_bound**p - * - * while True: # <<<<<<<<<<<<<< - * if inf.node.split_dim==-1: - * node = inf.node - */ - while (1) { - if (!1) break; - - /* "scipy/spatial/ckdtree.pyx":412 - * - * while True: - * if inf.node.split_dim==-1: # <<<<<<<<<<<<<< - * node = inf.node - * - */ - __pyx_t_5 = (__pyx_v_inf->node->split_dim == -1); - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":413 - * while True: - * if inf.node.split_dim==-1: - * node = inf.node # <<<<<<<<<<<<<< - * - * # brute-force - */ - __pyx_v_node = ((struct __pyx_t_5scipy_7spatial_7ckdtree_leafnode *)__pyx_v_inf->node); - - /* "scipy/spatial/ckdtree.pyx":416 - * - * # brute-force - * for i in range(node.start_idx,node.end_idx): # <<<<<<<<<<<<<< - * d = _distance_p( - * self.raw_data+self.raw_indices[i]*self.m, - */ - __pyx_t_2 = __pyx_v_node->end_idx; - for (__pyx_t_3 = __pyx_v_node->start_idx; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":419 - * d = _distance_p( - * self.raw_data+self.raw_indices[i]*self.m, - * x,p,self.m,distance_upper_bound) # <<<<<<<<<<<<<< - * - * if draw_data + ((__pyx_v_self->raw_indices[__pyx_v_i]) * __pyx_v_self->m)), __pyx_v_x, __pyx_v_p, __pyx_v_self->m, __pyx_v_distance_upper_bound); - - /* "scipy/spatial/ckdtree.pyx":421 - * x,p,self.m,distance_upper_bound) - * - * if draw_indices[__pyx_v_i]); - - /* "scipy/spatial/ckdtree.pyx":427 - * neighbor.priority = -d - * neighbor.contents.intdata = self.raw_indices[i] - * heappush(&neighbors,neighbor) # <<<<<<<<<<<<<< - * - * # adjust upper bound for efficiency - */ - __pyx_t_1 = __pyx_f_5scipy_7spatial_7ckdtree_heappush((&__pyx_v_neighbors), __pyx_v_neighbor); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":430 - * - * # adjust upper bound for efficiency - * if neighbors.n==k: # <<<<<<<<<<<<<< - * distance_upper_bound = -heappeek(&neighbors).priority - * # done with this node, get another - */ - __pyx_t_5 = (__pyx_v_neighbors.n == __pyx_v_k); - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":431 - * # adjust upper bound for efficiency - * if neighbors.n==k: - * distance_upper_bound = -heappeek(&neighbors).priority # <<<<<<<<<<<<<< - * # done with this node, get another - * stdlib.free(inf) - */ - __pyx_v_distance_upper_bound = (-__pyx_f_5scipy_7spatial_7ckdtree_heappeek((&__pyx_v_neighbors)).priority); - goto __pyx_L20; - } - __pyx_L20:; - goto __pyx_L18; - } - __pyx_L18:; - } - - /* "scipy/spatial/ckdtree.pyx":433 - * distance_upper_bound = -heappeek(&neighbors).priority - * # done with this node, get another - * stdlib.free(inf) # <<<<<<<<<<<<<< - * if q.n==0: - * # no more nodes to visit - */ - free(__pyx_v_inf); - - /* "scipy/spatial/ckdtree.pyx":434 - * # done with this node, get another - * stdlib.free(inf) - * if q.n==0: # <<<<<<<<<<<<<< - * # no more nodes to visit - * break - */ - __pyx_t_5 = (__pyx_v_q.n == 0); - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":436 - * if q.n==0: - * # no more nodes to visit - * break # <<<<<<<<<<<<<< - * else: - * it = heappop(&q) - */ - goto __pyx_L14_break; - goto __pyx_L21; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":438 - * break - * else: - * it = heappop(&q) # <<<<<<<<<<<<<< - * inf = it.contents.ptrdata - * min_distance = it.priority - */ - __pyx_v_it = __pyx_f_5scipy_7spatial_7ckdtree_heappop((&__pyx_v_q)); - - /* "scipy/spatial/ckdtree.pyx":439 - * else: - * it = heappop(&q) - * inf = it.contents.ptrdata # <<<<<<<<<<<<<< - * min_distance = it.priority - * else: - */ - __pyx_v_inf = ((struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo *)__pyx_v_it.contents.ptrdata); - - /* "scipy/spatial/ckdtree.pyx":440 - * it = heappop(&q) - * inf = it.contents.ptrdata - * min_distance = it.priority # <<<<<<<<<<<<<< - * else: - * inode = inf.node - */ - __pyx_v_min_distance = __pyx_v_it.priority; - } - __pyx_L21:; - goto __pyx_L15; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":442 - * min_distance = it.priority - * else: - * inode = inf.node # <<<<<<<<<<<<<< - * - * # we don't push cells that are too far onto the queue at all, - */ - __pyx_v_inode = ((struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *)__pyx_v_inf->node); - - /* "scipy/spatial/ckdtree.pyx":447 - * # but since the distance_upper_bound decreases, we might get - * # here even if the cell's too far - * if min_distance>distance_upper_bound*epsfac: # <<<<<<<<<<<<<< - * # since this is the nearest cell, we're done, bail out - * stdlib.free(inf) - */ - __pyx_t_5 = (__pyx_v_min_distance > (__pyx_v_distance_upper_bound * __pyx_v_epsfac)); - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":449 - * if min_distance>distance_upper_bound*epsfac: - * # since this is the nearest cell, we're done, bail out - * stdlib.free(inf) # <<<<<<<<<<<<<< - * # free all the nodes still on the heap - * for i in range(q.n): - */ - free(__pyx_v_inf); - - /* "scipy/spatial/ckdtree.pyx":451 - * stdlib.free(inf) - * # free all the nodes still on the heap - * for i in range(q.n): # <<<<<<<<<<<<<< - * stdlib.free(q.heap[i].contents.ptrdata) - * break - */ - __pyx_t_2 = __pyx_v_q.n; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":452 - * # free all the nodes still on the heap - * for i in range(q.n): - * stdlib.free(q.heap[i].contents.ptrdata) # <<<<<<<<<<<<<< - * break - * - */ - free((__pyx_v_q.heap[__pyx_v_i]).contents.ptrdata); - } - - /* "scipy/spatial/ckdtree.pyx":453 - * for i in range(q.n): - * stdlib.free(q.heap[i].contents.ptrdata) - * break # <<<<<<<<<<<<<< - * - * # set up children for searching - */ - goto __pyx_L14_break; - goto __pyx_L22; - } - __pyx_L22:; - - /* "scipy/spatial/ckdtree.pyx":456 - * - * # set up children for searching - * if x[inode.split_dim]split_dim]) < __pyx_v_inode->split); - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":457 - * # set up children for searching - * if x[inode.split_dim]less; - - /* "scipy/spatial/ckdtree.pyx":458 - * if x[inode.split_dim]greater; - goto __pyx_L25; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":460 - * far = inode.greater - * else: - * near = inode.greater # <<<<<<<<<<<<<< - * far = inode.less - * - */ - __pyx_v_near = __pyx_v_inode->greater; - - /* "scipy/spatial/ckdtree.pyx":461 - * else: - * near = inode.greater - * far = inode.less # <<<<<<<<<<<<<< - * - * # near child is at the same distance as the current node - */ - __pyx_v_far = __pyx_v_inode->less; - } - __pyx_L25:; - - /* "scipy/spatial/ckdtree.pyx":466 - * # we're going here next, so no point pushing it on the queue - * # no need to recompute the distance or the side_distances - * inf.node = near # <<<<<<<<<<<<<< - * - * # far child is further by an amount depending only - */ - __pyx_v_inf->node = __pyx_v_near; - - /* "scipy/spatial/ckdtree.pyx":471 - * # on the split value; compute its distance and side_distances - * # and push it on the queue if it's near enough - * inf2 = stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) # <<<<<<<<<<<<<< - * it2.contents.ptrdata = inf2 - * inf2.node = far - */ - __pyx_v_inf2 = ((struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo *)malloc(((sizeof(struct __pyx_t_5scipy_7spatial_7ckdtree_nodeinfo)) + (__pyx_v_self->m * (sizeof(double)))))); - - /* "scipy/spatial/ckdtree.pyx":472 - * # and push it on the queue if it's near enough - * inf2 = stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) - * it2.contents.ptrdata = inf2 # <<<<<<<<<<<<<< - * inf2.node = far - * # most side distances unchanged - */ - __pyx_v_it2.contents.ptrdata = ((char *)__pyx_v_inf2); - - /* "scipy/spatial/ckdtree.pyx":473 - * inf2 = stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) - * it2.contents.ptrdata = inf2 - * inf2.node = far # <<<<<<<<<<<<<< - * # most side distances unchanged - * for i in range(self.m): - */ - __pyx_v_inf2->node = __pyx_v_far; - - /* "scipy/spatial/ckdtree.pyx":475 - * inf2.node = far - * # most side distances unchanged - * for i in range(self.m): # <<<<<<<<<<<<<< - * inf2.side_distances[i] = inf.side_distances[i] - * - */ - __pyx_t_2 = __pyx_v_self->m; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":476 - * # most side distances unchanged - * for i in range(self.m): - * inf2.side_distances[i] = inf.side_distances[i] # <<<<<<<<<<<<<< - * - * # one side distance changes - */ - (__pyx_v_inf2->side_distances[__pyx_v_i]) = (__pyx_v_inf->side_distances[__pyx_v_i]); - } - - /* "scipy/spatial/ckdtree.pyx":480 - * # one side distance changes - * # we can adjust the minimum distance without recomputing - * if p == infinity: # <<<<<<<<<<<<<< - * # we never use side_distances in the l_infinity case - * # inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim]) - */ - __pyx_t_5 = (__pyx_v_p == __pyx_v_5scipy_7spatial_7ckdtree_infinity); - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":483 - * # we never use side_distances in the l_infinity case - * # inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim]) - * far_min_distance = dmax(min_distance, dabs(inode.split-x[inode.split_dim])) # <<<<<<<<<<<<<< - * elif p == 1: - * inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim]) - */ - __pyx_v_far_min_distance = __pyx_f_5scipy_7spatial_7ckdtree_dmax(__pyx_v_min_distance, __pyx_f_5scipy_7spatial_7ckdtree_dabs((__pyx_v_inode->split - (__pyx_v_x[__pyx_v_inode->split_dim])))); - goto __pyx_L28; - } - - /* "scipy/spatial/ckdtree.pyx":484 - * # inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim]) - * far_min_distance = dmax(min_distance, dabs(inode.split-x[inode.split_dim])) - * elif p == 1: # <<<<<<<<<<<<<< - * inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim]) - * far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim] - */ - __pyx_t_5 = (__pyx_v_p == 1.0); - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":485 - * far_min_distance = dmax(min_distance, dabs(inode.split-x[inode.split_dim])) - * elif p == 1: - * inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim]) # <<<<<<<<<<<<<< - * far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim] - * else: - */ - (__pyx_v_inf2->side_distances[__pyx_v_inode->split_dim]) = __pyx_f_5scipy_7spatial_7ckdtree_dabs((__pyx_v_inode->split - (__pyx_v_x[__pyx_v_inode->split_dim]))); - - /* "scipy/spatial/ckdtree.pyx":486 - * elif p == 1: - * inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim]) - * far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim] # <<<<<<<<<<<<<< - * else: - * inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim])**p - */ - __pyx_v_far_min_distance = ((__pyx_v_min_distance - (__pyx_v_inf->side_distances[__pyx_v_inode->split_dim])) + (__pyx_v_inf2->side_distances[__pyx_v_inode->split_dim])); - goto __pyx_L28; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":488 - * far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim] - * else: - * inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim])**p # <<<<<<<<<<<<<< - * far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim] - * - */ - (__pyx_v_inf2->side_distances[__pyx_v_inode->split_dim]) = pow(__pyx_f_5scipy_7spatial_7ckdtree_dabs((__pyx_v_inode->split - (__pyx_v_x[__pyx_v_inode->split_dim]))), __pyx_v_p); - - /* "scipy/spatial/ckdtree.pyx":489 - * else: - * inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim])**p - * far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim] # <<<<<<<<<<<<<< - * - * it2.priority = far_min_distance - */ - __pyx_v_far_min_distance = ((__pyx_v_min_distance - (__pyx_v_inf->side_distances[__pyx_v_inode->split_dim])) + (__pyx_v_inf2->side_distances[__pyx_v_inode->split_dim])); - } - __pyx_L28:; - - /* "scipy/spatial/ckdtree.pyx":491 - * far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim] - * - * it2.priority = far_min_distance # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_it2.priority = __pyx_v_far_min_distance; - - /* "scipy/spatial/ckdtree.pyx":495 - * - * # far child might be too far, if so, don't bother pushing it - * if far_min_distance<=distance_upper_bound*epsfac: # <<<<<<<<<<<<<< - * heappush(&q,it2) - * else: - */ - __pyx_t_5 = (__pyx_v_far_min_distance <= (__pyx_v_distance_upper_bound * __pyx_v_epsfac)); - if (__pyx_t_5) { - - /* "scipy/spatial/ckdtree.pyx":496 - * # far child might be too far, if so, don't bother pushing it - * if far_min_distance<=distance_upper_bound*epsfac: - * heappush(&q,it2) # <<<<<<<<<<<<<< - * else: - * stdlib.free(inf2) - */ - __pyx_t_1 = __pyx_f_5scipy_7spatial_7ckdtree_heappush((&__pyx_v_q), __pyx_v_it2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L29; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":498 - * heappush(&q,it2) - * else: - * stdlib.free(inf2) # <<<<<<<<<<<<<< - * # just in case - * it2.contents.ptrdata = 0 - */ - free(__pyx_v_inf2); - - /* "scipy/spatial/ckdtree.pyx":500 - * stdlib.free(inf2) - * # just in case - * it2.contents.ptrdata = 0 # <<<<<<<<<<<<<< - * - * # fill output arrays with sorted neighbors - */ - __pyx_v_it2.contents.ptrdata = ((char *)0); - } - __pyx_L29:; - } - __pyx_L15:; - } - __pyx_L14_break:; - - /* "scipy/spatial/ckdtree.pyx":503 - * - * # fill output arrays with sorted neighbors - * for i in range(neighbors.n-1,-1,-1): # <<<<<<<<<<<<<< - * neighbor = heappop(&neighbors) # FIXME: neighbors may be realloced - * result_indices[i] = neighbor.contents.intdata - */ - for (__pyx_t_2 = (__pyx_v_neighbors.n - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { - __pyx_v_i = __pyx_t_2; - - /* "scipy/spatial/ckdtree.pyx":504 - * # fill output arrays with sorted neighbors - * for i in range(neighbors.n-1,-1,-1): - * neighbor = heappop(&neighbors) # FIXME: neighbors may be realloced # <<<<<<<<<<<<<< - * result_indices[i] = neighbor.contents.intdata - * if p==1 or p==infinity: - */ - __pyx_v_neighbor = __pyx_f_5scipy_7spatial_7ckdtree_heappop((&__pyx_v_neighbors)); - - /* "scipy/spatial/ckdtree.pyx":505 - * for i in range(neighbors.n-1,-1,-1): - * neighbor = heappop(&neighbors) # FIXME: neighbors may be realloced - * result_indices[i] = neighbor.contents.intdata # <<<<<<<<<<<<<< - * if p==1 or p==infinity: - * result_distances[i] = -neighbor.priority - */ - (__pyx_v_result_indices[__pyx_v_i]) = __pyx_v_neighbor.contents.intdata; - - /* "scipy/spatial/ckdtree.pyx":506 - * neighbor = heappop(&neighbors) # FIXME: neighbors may be realloced - * result_indices[i] = neighbor.contents.intdata - * if p==1 or p==infinity: # <<<<<<<<<<<<<< - * result_distances[i] = -neighbor.priority - * else: - */ - __pyx_t_5 = (__pyx_v_p == 1.0); - if (!__pyx_t_5) { - __pyx_t_6 = (__pyx_v_p == __pyx_v_5scipy_7spatial_7ckdtree_infinity); - __pyx_t_4 = __pyx_t_6; - } else { - __pyx_t_4 = __pyx_t_5; - } - if (__pyx_t_4) { - - /* "scipy/spatial/ckdtree.pyx":507 - * result_indices[i] = neighbor.contents.intdata - * if p==1 or p==infinity: - * result_distances[i] = -neighbor.priority # <<<<<<<<<<<<<< - * else: - * result_distances[i] = (-neighbor.priority)**(1./p) - */ - (__pyx_v_result_distances[__pyx_v_i]) = (-__pyx_v_neighbor.priority); - goto __pyx_L32; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":509 - * result_distances[i] = -neighbor.priority - * else: - * result_distances[i] = (-neighbor.priority)**(1./p) # <<<<<<<<<<<<<< - * - * heapdestroy(&q) - */ - if (unlikely(__pyx_v_p == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - (__pyx_v_result_distances[__pyx_v_i]) = pow((-__pyx_v_neighbor.priority), (1. / __pyx_v_p)); - } - __pyx_L32:; - } - - /* "scipy/spatial/ckdtree.pyx":511 - * result_distances[i] = (-neighbor.priority)**(1./p) - * - * heapdestroy(&q) # <<<<<<<<<<<<<< - * heapdestroy(&neighbors) - * - */ - __pyx_t_1 = __pyx_f_5scipy_7spatial_7ckdtree_heapdestroy((&__pyx_v_q)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":512 - * - * heapdestroy(&q) - * heapdestroy(&neighbors) # <<<<<<<<<<<<<< - * - * def query(cKDTree self, object x, int k=1, double eps=0, double p=2, - */ - __pyx_t_1 = __pyx_f_5scipy_7spatial_7ckdtree_heapdestroy((&__pyx_v_neighbors)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_WriteUnraisable("scipy.spatial.ckdtree.cKDTree.__query", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_L0:; - __Pyx_RefNannyFinishContext(); -} - -/* "scipy/spatial/ckdtree.pyx":514 - * heapdestroy(&neighbors) - * - * def query(cKDTree self, object x, int k=1, double eps=0, double p=2, # <<<<<<<<<<<<<< - * double distance_upper_bound=infinity): - * """query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf) - */ - -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_2query(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7spatial_7ckdtree_7cKDTree_2query[] = "query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf)\n \n Query the kd-tree for nearest neighbors.\n\n Parameters\n ----------\n x : array_like, last dimension self.m\n An array of points to query.\n k : int\n The number of nearest neighbors to return.\n eps : non-negative float\n Return approximate nearest neighbors; the k-th returned value \n is guaranteed to be no further than (1 + `eps`) times the \n distance to the real k-th nearest neighbor.\n p : float, 1 <= p <= infinity\n Which Minkowski p-norm to use. \n 1 is the sum-of-absolute-values \"Manhattan\" distance.\n 2 is the usual Euclidean distance.\n infinity is the maximum-coordinate-difference distance.\n distance_upper_bound : non-negative float\n Return only neighbors within this distance. This is used to prune\n tree searches, so if you are doing a series of nearest-neighbor\n queries, it may help to supply the distance to the nearest neighbor\n of the most recent point.\n\n Returns\n -------\n d : ndarray of floats\n The distances to the nearest neighbors. \n If `x` has shape tuple+(self.m,), then `d` has shape tuple+(k,).\n Missing neighbors are indicated with infinite distances.\n i : ndarray of ints\n The locations of the neighbors in self.data.\n If `x` has shape tuple+(self.m,), then `i` has shape tuple+(k,).\n Missing neighbors are indicated with self.n.\n\n "; -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_2query(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_x = 0; - int __pyx_v_k; - double __pyx_v_eps; - double __pyx_v_p; - double __pyx_v_distance_upper_bound; - PyArrayObject *__pyx_v_ii = 0; - PyArrayObject *__pyx_v_dd = 0; - PyArrayObject *__pyx_v_xx = 0; - int __pyx_v_c; - int __pyx_v_single; - PyObject *__pyx_v_retshape = NULL; - PyObject *__pyx_v_n = NULL; - Py_buffer __pyx_bstruct_ii; - Py_ssize_t __pyx_bstride_0_ii = 0; - Py_ssize_t __pyx_bstride_1_ii = 0; - Py_ssize_t __pyx_bshape_0_ii = 0; - Py_ssize_t __pyx_bshape_1_ii = 0; - Py_buffer __pyx_bstruct_xx; - Py_ssize_t __pyx_bstride_0_xx = 0; - Py_ssize_t __pyx_bstride_1_xx = 0; - Py_ssize_t __pyx_bshape_0_xx = 0; - Py_ssize_t __pyx_bshape_1_xx = 0; - Py_buffer __pyx_bstruct_dd; - Py_ssize_t __pyx_bstride_0_dd = 0; - Py_ssize_t __pyx_bstride_1_dd = 0; - Py_ssize_t __pyx_bshape_0_dd = 0; - Py_ssize_t __pyx_bshape_1_dd = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - PyArrayObject *__pyx_t_7 = NULL; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - PyArrayObject *__pyx_t_13 = NULL; - PyArrayObject *__pyx_t_14 = NULL; - long __pyx_t_15; - long __pyx_t_16; - long __pyx_t_17; - long __pyx_t_18; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__k,&__pyx_n_s__eps,&__pyx_n_s__p,&__pyx_n_s_4,0}; - __Pyx_RefNannySetupContext("query"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[5] = {0,0,0,0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__k); - if (value) { values[1] = value; kw_args--; } - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__eps); - if (value) { values[2] = value; kw_args--; } - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__p); - if (value) { values[3] = value; kw_args--; } - } - case 4: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_4); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "query") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_x = values[0]; - if (values[1]) { - __pyx_v_k = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_k == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_k = ((int)1); - } - if (values[2]) { - __pyx_v_eps = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_eps == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_eps = ((double)0.0); - } - if (values[3]) { - __pyx_v_p = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_p == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_p = ((double)2.0); - } - if (values[4]) { - __pyx_v_distance_upper_bound = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_distance_upper_bound == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 515; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else { - __pyx_v_distance_upper_bound = __pyx_k_5; - } - } else { - __pyx_v_k = ((int)1); - __pyx_v_eps = ((double)0.0); - __pyx_v_p = ((double)2.0); - __pyx_v_distance_upper_bound = __pyx_k_5; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: __pyx_v_distance_upper_bound = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 4)); if (unlikely((__pyx_v_distance_upper_bound == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 515; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 4: __pyx_v_p = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 3)); if (unlikely((__pyx_v_p == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 3: __pyx_v_eps = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 2)); if (unlikely((__pyx_v_eps == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 2: __pyx_v_k = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_k == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - case 1: __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("query", 0, 1, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.query", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_x); - __pyx_bstruct_ii.buf = NULL; - __pyx_bstruct_dd.buf = NULL; - __pyx_bstruct_xx.buf = NULL; - - /* "scipy/spatial/ckdtree.pyx":557 - * cdef np.ndarray[double, ndim=2] xx - * cdef int c - * x = np.asarray(x).astype(np.float) # <<<<<<<<<<<<<< - * if np.shape(x)[-1] != self.m: - * raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__asarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__astype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__float); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_x); - __pyx_v_x = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/spatial/ckdtree.pyx":558 - * cdef int c - * x = np.asarray(x).astype(np.float) - * if np.shape(x)[-1] != self.m: # <<<<<<<<<<<<<< - * raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) - * if p<1: - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->m); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { - - /* "scipy/spatial/ckdtree.pyx":559 - * x = np.asarray(x).astype(np.float) - * if np.shape(x)[-1] != self.m: - * raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) # <<<<<<<<<<<<<< - * if p<1: - * raise ValueError("Only p-norms with 1<=p<=infinity permitted") - */ - __pyx_t_3 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->m); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_3 = 0; - __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_6), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - /* "scipy/spatial/ckdtree.pyx":560 - * if np.shape(x)[-1] != self.m: - * raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) - * if p<1: # <<<<<<<<<<<<<< - * raise ValueError("Only p-norms with 1<=p<=infinity permitted") - * if len(x.shape)==1: - */ - __pyx_t_4 = (__pyx_v_p < 1.0); - if (__pyx_t_4) { - - /* "scipy/spatial/ckdtree.pyx":561 - * raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) - * if p<1: - * raise ValueError("Only p-norms with 1<=p<=infinity permitted") # <<<<<<<<<<<<<< - * if len(x.shape)==1: - * single = True - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "scipy/spatial/ckdtree.pyx":562 - * if p<1: - * raise ValueError("Only p-norms with 1<=p<=infinity permitted") - * if len(x.shape)==1: # <<<<<<<<<<<<<< - * single = True - * x = x[np.newaxis,:] - */ - __pyx_t_5 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__shape); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyObject_Length(__pyx_t_5); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_4 = (__pyx_t_6 == 1); - if (__pyx_t_4) { - - /* "scipy/spatial/ckdtree.pyx":563 - * raise ValueError("Only p-norms with 1<=p<=infinity permitted") - * if len(x.shape)==1: - * single = True # <<<<<<<<<<<<<< - * x = x[np.newaxis,:] - * else: - */ - __pyx_v_single = 1; - - /* "scipy/spatial/ckdtree.pyx":564 - * if len(x.shape)==1: - * single = True - * x = x[np.newaxis,:] # <<<<<<<<<<<<<< - * else: - * single = False - */ - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__newaxis); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_INCREF(__pyx_k_slice_9); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_k_slice_9); - __Pyx_GIVEREF(__pyx_k_slice_9); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetItem(__pyx_v_x, ((PyObject *)__pyx_t_5)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_v_x); - __pyx_v_x = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L8; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":566 - * x = x[np.newaxis,:] - * else: - * single = False # <<<<<<<<<<<<<< - * retshape = np.shape(x)[:-1] - * n = np.prod(retshape) - */ - __pyx_v_single = 0; - } - __pyx_L8:; - - /* "scipy/spatial/ckdtree.pyx":567 - * else: - * single = False - * retshape = np.shape(x)[:-1] # <<<<<<<<<<<<<< - * n = np.prod(retshape) - * xx = np.reshape(x,(n,self.m)) - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 567; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__shape); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 567; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 567; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 567; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PySequence_GetSlice(__pyx_t_3, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 567; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_retshape = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":568 - * single = False - * retshape = np.shape(x)[:-1] - * n = np.prod(retshape) # <<<<<<<<<<<<<< - * xx = np.reshape(x,(n,self.m)) - * xx = np.ascontiguousarray(xx) - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__prod); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_retshape); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_retshape); - __Pyx_GIVEREF(__pyx_v_retshape); - __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_v_n = __pyx_t_5; - __pyx_t_5 = 0; - - /* "scipy/spatial/ckdtree.pyx":569 - * retshape = np.shape(x)[:-1] - * n = np.prod(retshape) - * xx = np.reshape(x,(n,self.m)) # <<<<<<<<<<<<<< - * xx = np.ascontiguousarray(xx) - * dd = np.empty((n,k),dtype=np.float) - */ - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__reshape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->m); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = ((PyArrayObject *)__pyx_t_3); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xx); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_xx, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_xx, (PyObject*)__pyx_v_xx, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); - } - } - __pyx_bstride_0_xx = __pyx_bstruct_xx.strides[0]; __pyx_bstride_1_xx = __pyx_bstruct_xx.strides[1]; - __pyx_bshape_0_xx = __pyx_bstruct_xx.shape[0]; __pyx_bshape_1_xx = __pyx_bstruct_xx.shape[1]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_7 = 0; - __pyx_v_xx = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/spatial/ckdtree.pyx":570 - * n = np.prod(retshape) - * xx = np.reshape(x,(n,self.m)) - * xx = np.ascontiguousarray(xx) # <<<<<<<<<<<<<< - * dd = np.empty((n,k),dtype=np.float) - * dd.fill(infinity) - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(((PyObject *)__pyx_v_xx)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_xx)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_xx)); - __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xx); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_xx, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_xx, (PyObject*)__pyx_v_xx, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); - } - } - __pyx_bstride_0_xx = __pyx_bstruct_xx.strides[0]; __pyx_bstride_1_xx = __pyx_bstruct_xx.strides[1]; - __pyx_bshape_0_xx = __pyx_bstruct_xx.shape[0]; __pyx_bshape_1_xx = __pyx_bstruct_xx.shape[1]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_7 = 0; - __Pyx_DECREF(((PyObject *)__pyx_v_xx)); - __pyx_v_xx = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":571 - * xx = np.reshape(x,(n,self.m)) - * xx = np.ascontiguousarray(xx) - * dd = np.empty((n,k),dtype=np.float) # <<<<<<<<<<<<<< - * dd.fill(infinity) - * ii = np.empty((n,k),dtype='i') - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyInt_FromLong(__pyx_v_k); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_12 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__dtype), __pyx_t_12) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __pyx_t_12 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_12) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_12, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_13 = ((PyArrayObject *)__pyx_t_12); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_dd); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_dd, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_dd, (PyObject*)__pyx_v_dd, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); - } - } - __pyx_bstride_0_dd = __pyx_bstruct_dd.strides[0]; __pyx_bstride_1_dd = __pyx_bstruct_dd.strides[1]; - __pyx_bshape_0_dd = __pyx_bstruct_dd.shape[0]; __pyx_bshape_1_dd = __pyx_bstruct_dd.shape[1]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_13 = 0; - __pyx_v_dd = ((PyArrayObject *)__pyx_t_12); - __pyx_t_12 = 0; - - /* "scipy/spatial/ckdtree.pyx":572 - * xx = np.ascontiguousarray(xx) - * dd = np.empty((n,k),dtype=np.float) - * dd.fill(infinity) # <<<<<<<<<<<<<< - * ii = np.empty((n,k),dtype='i') - * ii.fill(self.n) - */ - __pyx_t_12 = PyObject_GetAttr(((PyObject *)__pyx_v_dd), __pyx_n_s__fill); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_5 = PyFloat_FromDouble(__pyx_v_5scipy_7spatial_7ckdtree_infinity); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_12, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "scipy/spatial/ckdtree.pyx":573 - * dd = np.empty((n,k),dtype=np.float) - * dd.fill(infinity) - * ii = np.empty((n,k),dtype='i') # <<<<<<<<<<<<<< - * ii.fill(self.n) - * for c in range(n): - */ - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__empty); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyInt_FromLong(__pyx_v_k); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_12)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_12)); - __pyx_t_12 = 0; - __pyx_t_12 = PyDict_New(); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_12)); - if (PyDict_SetItem(__pyx_t_12, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_n_s__i)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_5), ((PyObject *)__pyx_t_12)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_12)); __pyx_t_12 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_14 = ((PyArrayObject *)__pyx_t_3); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_ii); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_ii, (PyObject*)__pyx_t_14, &__Pyx_TypeInfo_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_ii, (PyObject*)__pyx_v_ii, &__Pyx_TypeInfo_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); - } - } - __pyx_bstride_0_ii = __pyx_bstruct_ii.strides[0]; __pyx_bstride_1_ii = __pyx_bstruct_ii.strides[1]; - __pyx_bshape_0_ii = __pyx_bstruct_ii.shape[0]; __pyx_bshape_1_ii = __pyx_bstruct_ii.shape[1]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_14 = 0; - __pyx_v_ii = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "scipy/spatial/ckdtree.pyx":574 - * dd.fill(infinity) - * ii = np.empty((n,k),dtype='i') - * ii.fill(self.n) # <<<<<<<<<<<<<< - * for c in range(n): - * self.__query( - */ - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_ii), __pyx_n_s__fill); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_12 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->n); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - __pyx_t_12 = 0; - __pyx_t_12 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - - /* "scipy/spatial/ckdtree.pyx":575 - * ii = np.empty((n,k),dtype='i') - * ii.fill(self.n) - * for c in range(n): # <<<<<<<<<<<<<< - * self.__query( - * (dd.data)+c*k, - */ - __pyx_t_15 = __Pyx_PyInt_AsLong(__pyx_v_n); if (unlikely((__pyx_t_15 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_15; __pyx_t_8+=1) { - __pyx_v_c = __pyx_t_8; - - /* "scipy/spatial/ckdtree.pyx":583 - * eps, - * p, - * distance_upper_bound) # <<<<<<<<<<<<<< - * if single: - * if k==1: - */ - ((struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree *)((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->__pyx_vtab)->__query(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self), (((double *)__pyx_v_dd->data) + (__pyx_v_c * __pyx_v_k)), (((int *)__pyx_v_ii->data) + (__pyx_v_c * __pyx_v_k)), (((double *)__pyx_v_xx->data) + (__pyx_v_c * ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->m)), __pyx_v_k, __pyx_v_eps, __pyx_v_p, __pyx_v_distance_upper_bound); - } - - /* "scipy/spatial/ckdtree.pyx":584 - * p, - * distance_upper_bound) - * if single: # <<<<<<<<<<<<<< - * if k==1: - * return dd[0,0], ii[0,0] - */ - if (__pyx_v_single) { - - /* "scipy/spatial/ckdtree.pyx":585 - * distance_upper_bound) - * if single: - * if k==1: # <<<<<<<<<<<<<< - * return dd[0,0], ii[0,0] - * else: - */ - __pyx_t_4 = (__pyx_v_k == 1); - if (__pyx_t_4) { - - /* "scipy/spatial/ckdtree.pyx":586 - * if single: - * if k==1: - * return dd[0,0], ii[0,0] # <<<<<<<<<<<<<< - * else: - * return dd[0], ii[0] - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_15 = 0; - __pyx_t_16 = 0; - __pyx_t_8 = -1; - if (__pyx_t_15 < 0) { - __pyx_t_15 += __pyx_bshape_0_dd; - if (unlikely(__pyx_t_15 < 0)) __pyx_t_8 = 0; - } else if (unlikely(__pyx_t_15 >= __pyx_bshape_0_dd)) __pyx_t_8 = 0; - if (__pyx_t_16 < 0) { - __pyx_t_16 += __pyx_bshape_1_dd; - if (unlikely(__pyx_t_16 < 0)) __pyx_t_8 = 1; - } else if (unlikely(__pyx_t_16 >= __pyx_bshape_1_dd)) __pyx_t_8 = 1; - if (unlikely(__pyx_t_8 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_8); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = PyFloat_FromDouble((*__Pyx_BufPtrStrided2d(double *, __pyx_bstruct_dd.buf, __pyx_t_15, __pyx_bstride_0_dd, __pyx_t_16, __pyx_bstride_1_dd))); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_17 = 0; - __pyx_t_18 = 0; - __pyx_t_8 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_bshape_0_ii; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_8 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_bshape_0_ii)) __pyx_t_8 = 0; - if (__pyx_t_18 < 0) { - __pyx_t_18 += __pyx_bshape_1_ii; - if (unlikely(__pyx_t_18 < 0)) __pyx_t_8 = 1; - } else if (unlikely(__pyx_t_18 >= __pyx_bshape_1_ii)) __pyx_t_8 = 1; - if (unlikely(__pyx_t_8 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_8); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_5 = PyInt_FromLong((*__Pyx_BufPtrStrided2d(int *, __pyx_bstruct_ii.buf, __pyx_t_17, __pyx_bstride_0_ii, __pyx_t_18, __pyx_bstride_1_ii))); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_12 = 0; - __pyx_t_5 = 0; - __pyx_r = ((PyObject *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - goto __pyx_L12; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":588 - * return dd[0,0], ii[0,0] - * else: - * return dd[0], ii[0] # <<<<<<<<<<<<<< - * else: - * if k==1: - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_GetItemInt(((PyObject *)__pyx_v_dd), 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_GetItemInt(((PyObject *)__pyx_v_ii), 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_12)); - PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_3 = 0; - __pyx_t_5 = 0; - __pyx_r = ((PyObject *)__pyx_t_12); - __pyx_t_12 = 0; - goto __pyx_L0; - } - __pyx_L12:; - goto __pyx_L11; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":590 - * return dd[0], ii[0] - * else: - * if k==1: # <<<<<<<<<<<<<< - * return np.reshape(dd[...,0],retshape), np.reshape(ii[...,0],retshape) - * else: - */ - __pyx_t_4 = (__pyx_v_k == 1); - if (__pyx_t_4) { - - /* "scipy/spatial/ckdtree.pyx":591 - * else: - * if k==1: - * return np.reshape(dd[...,0],retshape), np.reshape(ii[...,0],retshape) # <<<<<<<<<<<<<< - * else: - * return np.reshape(dd,retshape+(k,)), np.reshape(ii,retshape+(k,)) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_12 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_12, __pyx_n_s__reshape); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __pyx_t_12 = PyObject_GetItem(((PyObject *)__pyx_v_dd), ((PyObject *)__pyx_k_tuple_10)); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - __Pyx_INCREF(__pyx_v_retshape); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_retshape); - __Pyx_GIVEREF(__pyx_v_retshape); - __pyx_t_12 = 0; - __pyx_t_12 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__reshape); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_ii), ((PyObject *)__pyx_k_tuple_11)); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_retshape); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_retshape); - __Pyx_GIVEREF(__pyx_v_retshape); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_12 = 0; - __pyx_t_3 = 0; - __pyx_r = ((PyObject *)__pyx_t_1); - __pyx_t_1 = 0; - goto __pyx_L0; - goto __pyx_L13; - } - /*else*/ { - - /* "scipy/spatial/ckdtree.pyx":593 - * return np.reshape(dd[...,0],retshape), np.reshape(ii[...,0],retshape) - * else: - * return np.reshape(dd,retshape+(k,)), np.reshape(ii,retshape+(k,)) # <<<<<<<<<<<<<< - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyInt_FromLong(__pyx_v_k); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_12)); - PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_v_retshape, ((PyObject *)__pyx_t_12)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(((PyObject *)__pyx_t_12)); __pyx_t_12 = 0; - __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_12)); - __Pyx_INCREF(((PyObject *)__pyx_v_dd)); - PyTuple_SET_ITEM(__pyx_t_12, 0, ((PyObject *)__pyx_v_dd)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_dd)); - PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_12), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_12)); __pyx_t_12 = 0; - __pyx_t_12 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_12, __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __pyx_t_12 = PyInt_FromLong(__pyx_v_k); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - __pyx_t_12 = 0; - __pyx_t_12 = PyNumber_Add(__pyx_v_retshape, ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __Pyx_INCREF(((PyObject *)__pyx_v_ii)); - PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_ii)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_ii)); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - __pyx_t_12 = 0; - __pyx_t_12 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - __pyx_t_1 = 0; - __pyx_t_12 = 0; - __pyx_r = ((PyObject *)__pyx_t_5); - __pyx_t_5 = 0; - goto __pyx_L0; - } - __pyx_L13:; - } - __pyx_L11:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_12); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_ii); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xx); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_dd); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.query", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_ii); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_xx); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_dd); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_ii); - __Pyx_XDECREF((PyObject *)__pyx_v_dd); - __Pyx_XDECREF((PyObject *)__pyx_v_xx); - __Pyx_XDECREF(__pyx_v_retshape); - __Pyx_XDECREF(__pyx_v_n); - __Pyx_XDECREF(__pyx_v_x); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":197 - * - * cdef innernode* tree - * cdef readonly object data # <<<<<<<<<<<<<< - * cdef double* raw_data - * cdef readonly int n, m - */ - -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_4data___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_4data___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data); - __pyx_r = ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->data; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":199 - * cdef readonly object data - * cdef double* raw_data - * cdef readonly int n, m # <<<<<<<<<<<<<< - * cdef readonly int leafsize - * cdef readonly object maxes - */ - -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1n___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1n___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.n.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1m___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1m___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->m); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.m.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":200 - * cdef double* raw_data - * cdef readonly int n, m - * cdef readonly int leafsize # <<<<<<<<<<<<<< - * cdef readonly object maxes - * cdef double* raw_maxes - */ - -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_8leafsize___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_8leafsize___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->leafsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("scipy.spatial.ckdtree.cKDTree.leafsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":201 - * cdef readonly int n, m - * cdef readonly int leafsize - * cdef readonly object maxes # <<<<<<<<<<<<<< - * cdef double* raw_maxes - * cdef readonly object mins - */ - -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_5maxes___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_5maxes___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes); - __pyx_r = ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->maxes; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/spatial/ckdtree.pyx":203 - * cdef readonly object maxes - * cdef double* raw_maxes - * cdef readonly object mins # <<<<<<<<<<<<<< - * cdef double* raw_mins - * cdef object indices - */ - -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_4mins___get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_4mins___get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__"); - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins); - __pyx_r = ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)__pyx_v_self)->mins; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":190 - * # experimental exception made for __getbuffer__ and __releasebuffer__ - * # -- the details of this may change. - * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< - * # This implementation of getbuffer is geared towards Cython - * # requirements, and does not yet fullfill the PEP. - */ - -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_copy_shape; - int __pyx_v_i; - int __pyx_v_ndim; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - int __pyx_v_t; - char *__pyx_v_f; - PyArray_Descr *__pyx_v_descr = 0; - int __pyx_v_offset; - int __pyx_v_hasfields; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - char *__pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getbuffer__"); - if (__pyx_v_info != NULL) { - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - } - - /* "numpy.pxd":196 - * # of flags - * - * if info == NULL: return # <<<<<<<<<<<<<< - * - * cdef int copy_shape, i, ndim - */ - __pyx_t_1 = (__pyx_v_info == NULL); - if (__pyx_t_1) { - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":199 - * - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":200 - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * - * ndim = PyArray_NDIM(self) - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":202 - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":204 - * ndim = PyArray_NDIM(self) - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * copy_shape = 1 - * else: - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":205 - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * copy_shape = 1 # <<<<<<<<<<<<<< - * else: - * copy_shape = 0 - */ - __pyx_v_copy_shape = 1; - goto __pyx_L6; - } - /*else*/ { - - /* "numpy.pxd":207 - * copy_shape = 1 - * else: - * copy_shape = 0 # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - */ - __pyx_v_copy_shape = 0; - } - __pyx_L6:; - - /* "numpy.pxd":209 - * copy_shape = 0 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); - if (__pyx_t_1) { - - /* "numpy.pxd":210 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not C contiguous") - * - */ - __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_13), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "numpy.pxd":213 - * raise ValueError(u"ndarray is not C contiguous") - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") - */ - __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); - if (__pyx_t_3) { - - /* "numpy.pxd":214 - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not Fortran contiguous") - * - */ - __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); - __pyx_t_2 = __pyx_t_1; - } else { - __pyx_t_2 = __pyx_t_3; - } - if (__pyx_t_2) { - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_15), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - /* "numpy.pxd":217 - * raise ValueError(u"ndarray is not Fortran contiguous") - * - * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< - * info.ndim = ndim - * if copy_shape: - */ - __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":218 - * - * info.buf = PyArray_DATA(self) - * info.ndim = ndim # <<<<<<<<<<<<<< - * if copy_shape: - * # Allocate new buffer for strides and shape info. - */ - __pyx_v_info->ndim = __pyx_v_ndim; - - /* "numpy.pxd":219 - * info.buf = PyArray_DATA(self) - * info.ndim = ndim - * if copy_shape: # <<<<<<<<<<<<<< - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - */ - if (__pyx_v_copy_shape) { - - /* "numpy.pxd":222 - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< - * info.shape = info.strides + ndim - * for i in range(ndim): - */ - __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); - - /* "numpy.pxd":223 - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim # <<<<<<<<<<<<<< - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - */ - __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); - - /* "numpy.pxd":224 - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim - * for i in range(ndim): # <<<<<<<<<<<<<< - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] - */ - __pyx_t_5 = __pyx_v_ndim; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "numpy.pxd":225 - * info.shape = info.strides + ndim - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - */ - (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - - /* "numpy.pxd":226 - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< - * else: - * info.strides = PyArray_STRIDES(self) - */ - (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - } - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":228 - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - */ - __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":229 - * else: - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - */ - __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); - } - __pyx_L9:; - - /* "numpy.pxd":230 - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) - */ - __pyx_v_info->suboffsets = NULL; - - /* "numpy.pxd":231 - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< - * info.readonly = not PyArray_ISWRITEABLE(self) - * - */ - __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":232 - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< - * - * cdef int t - */ - __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":235 - * - * cdef int t - * cdef char* f = NULL # <<<<<<<<<<<<<< - * cdef dtype descr = self.descr - * cdef list stack - */ - __pyx_v_f = NULL; - - /* "numpy.pxd":236 - * cdef int t - * cdef char* f = NULL - * cdef dtype descr = self.descr # <<<<<<<<<<<<<< - * cdef list stack - * cdef int offset - */ - __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); - __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; - - /* "numpy.pxd":240 - * cdef int offset - * - * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< - * - * if not hasfields and not copy_shape: - */ - __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); - - /* "numpy.pxd":242 - * cdef bint hasfields = PyDataType_HASFIELDS(descr) - * - * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< - * # do not call releasebuffer - * info.obj = None - */ - __pyx_t_2 = (!__pyx_v_hasfields); - if (__pyx_t_2) { - __pyx_t_3 = (!__pyx_v_copy_shape); - __pyx_t_1 = __pyx_t_3; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":244 - * if not hasfields and not copy_shape: - * # do not call releasebuffer - * info.obj = None # <<<<<<<<<<<<<< - * else: - * # need to call releasebuffer - */ - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = Py_None; - goto __pyx_L12; - } - /*else*/ { - - /* "numpy.pxd":247 - * else: - * # need to call releasebuffer - * info.obj = self # <<<<<<<<<<<<<< - * - * if not hasfields: - */ - __Pyx_INCREF(__pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = __pyx_v_self; - } - __pyx_L12:; - - /* "numpy.pxd":249 - * info.obj = self - * - * if not hasfields: # <<<<<<<<<<<<<< - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - */ - __pyx_t_1 = (!__pyx_v_hasfields); - if (__pyx_t_1) { - - /* "numpy.pxd":250 - * - * if not hasfields: - * t = descr.type_num # <<<<<<<<<<<<<< - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - */ - __pyx_v_t = __pyx_v_descr->type_num; - - /* "numpy.pxd":251 - * if not hasfields: - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); - if (__pyx_t_1) { - __pyx_t_2 = __pyx_v_little_endian; - } else { - __pyx_t_2 = __pyx_t_1; - } - if (!__pyx_t_2) { - - /* "numpy.pxd":252 - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); - if (__pyx_t_1) { - __pyx_t_3 = (!__pyx_v_little_endian); - __pyx_t_7 = __pyx_t_3; - } else { - __pyx_t_7 = __pyx_t_1; - } - __pyx_t_1 = __pyx_t_7; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_17), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L14; - } - __pyx_L14:; - - /* "numpy.pxd":254 - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - */ - __pyx_t_1 = (__pyx_v_t == NPY_BYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__b; - goto __pyx_L15; - } - - /* "numpy.pxd":255 - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__B; - goto __pyx_L15; - } - - /* "numpy.pxd":256 - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - */ - __pyx_t_1 = (__pyx_v_t == NPY_SHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__h; - goto __pyx_L15; - } - - /* "numpy.pxd":257 - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - */ - __pyx_t_1 = (__pyx_v_t == NPY_USHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__H; - goto __pyx_L15; - } - - /* "numpy.pxd":258 - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - */ - __pyx_t_1 = (__pyx_v_t == NPY_INT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__i; - goto __pyx_L15; - } - - /* "numpy.pxd":259 - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UINT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__I; - goto __pyx_L15; - } - - /* "numpy.pxd":260 - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__l; - goto __pyx_L15; - } - - /* "numpy.pxd":261 - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__L; - goto __pyx_L15; - } - - /* "numpy.pxd":262 - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__q; - goto __pyx_L15; - } - - /* "numpy.pxd":263 - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Q; - goto __pyx_L15; - } - - /* "numpy.pxd":264 - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - */ - __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__f; - goto __pyx_L15; - } - - /* "numpy.pxd":265 - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - */ - __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__d; - goto __pyx_L15; - } - - /* "numpy.pxd":266 - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__g; - goto __pyx_L15; - } - - /* "numpy.pxd":267 - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zf; - goto __pyx_L15; - } - - /* "numpy.pxd":268 - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zd; - goto __pyx_L15; - } - - /* "numpy.pxd":269 - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f = "O" - * else: - */ - __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zg; - goto __pyx_L15; - } - - /* "numpy.pxd":270 - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__O; - goto __pyx_L15; - } - /*else*/ { - - /* "numpy.pxd":272 - * elif t == NPY_OBJECT: f = "O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * info.format = f - * return - */ - __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_18), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L15:; - - /* "numpy.pxd":273 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f # <<<<<<<<<<<<<< - * return - * else: - */ - __pyx_v_info->format = __pyx_v_f; - - /* "numpy.pxd":274 - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f - * return # <<<<<<<<<<<<<< - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - */ - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L13; - } - /*else*/ { - - /* "numpy.pxd":276 - * return - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 - */ - __pyx_v_info->format = ((char *)malloc(255)); - - /* "numpy.pxd":277 - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<< - * offset = 0 - * f = _util_dtypestring(descr, info.format + 1, - */ - (__pyx_v_info->format[0]) = '^'; - - /* "numpy.pxd":278 - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 # <<<<<<<<<<<<<< - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - */ - __pyx_v_offset = 0; - - /* "numpy.pxd":281 - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - * &offset) # <<<<<<<<<<<<<< - * f[0] = 0 # Terminate format string - * - */ - __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_9; - - /* "numpy.pxd":282 - * info.format + _buffer_format_string_len, - * &offset) - * f[0] = 0 # Terminate format string # <<<<<<<<<<<<<< - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - */ - (__pyx_v_f[0]) = 0; - } - __pyx_L13:; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(Py_None); - __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; - } - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_descr); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":284 - * f[0] = 0 # Terminate format string - * - * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - */ - -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__releasebuffer__"); - - /* "numpy.pxd":285 - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); - if (__pyx_t_1) { - - /* "numpy.pxd":286 - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) # <<<<<<<<<<<<<< - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) - */ - free(__pyx_v_info->format); - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":287 - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * stdlib.free(info.strides) - * # info.shape was stored after info.strides in the same block - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":288 - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) # <<<<<<<<<<<<<< - * # info.shape was stored after info.strides in the same block - * - */ - free(__pyx_v_info->strides); - goto __pyx_L6; - } - __pyx_L6:; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":764 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); - - /* "numpy.pxd":765 - * - * cdef inline object PyArray_MultiIterNew1(a): - * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew2(a, b): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":767 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); - - /* "numpy.pxd":768 - * - * cdef inline object PyArray_MultiIterNew2(a, b): - * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":770 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); - - /* "numpy.pxd":771 - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":773 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); - - /* "numpy.pxd":774 - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":776 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); - - /* "numpy.pxd":777 - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":779 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< - * # Recursive utility function used in __getbuffer__ to get format - * # string. The new location in the format string is returned. - */ - -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { - PyArray_Descr *__pyx_v_child = 0; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - PyObject *__pyx_v_fields = 0; - PyObject *__pyx_v_childname = NULL; - PyObject *__pyx_v_new_offset = NULL; - PyObject *__pyx_v_t = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - long __pyx_t_10; - char *__pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_util_dtypestring"); - - /* "numpy.pxd":786 - * cdef int delta_offset - * cdef tuple i - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * cdef tuple fields - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":787 - * cdef tuple i - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * cdef tuple fields - * - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":790 - * cdef tuple fields - * - * for childname in descr.names: # <<<<<<<<<<<<<< - * fields = descr.fields[childname] - * child, new_offset = fields - */ - if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - for (;;) { - if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; - __Pyx_XDECREF(__pyx_v_childname); - __pyx_v_childname = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":791 - * - * for childname in descr.names: - * fields = descr.fields[childname] # <<<<<<<<<<<<<< - * child, new_offset = fields - * - */ - __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); - __pyx_v_fields = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "numpy.pxd":792 - * for childname in descr.names: - * fields = descr.fields[childname] - * child, new_offset = fields # <<<<<<<<<<<<<< - * - * if (end - f) - (new_offset - offset[0]) < 15: - */ - if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { - PyObject* sequence = ((PyObject *)__pyx_v_fields); - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - } else { - __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_child)); - __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_v_new_offset); - __pyx_v_new_offset = __pyx_t_4; - __pyx_t_4 = 0; - - /* "numpy.pxd":794 - * child, new_offset = fields - * - * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - */ - __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_20), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":797 - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - * if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '>'); - if (__pyx_t_6) { - __pyx_t_7 = __pyx_v_little_endian; - } else { - __pyx_t_7 = __pyx_t_6; - } - if (!__pyx_t_7) { - - /* "numpy.pxd":798 - * - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * # One could encode it in the format string and have Cython - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '<'); - if (__pyx_t_6) { - __pyx_t_8 = (!__pyx_v_little_endian); - __pyx_t_9 = __pyx_t_8; - } else { - __pyx_t_9 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_9; - } else { - __pyx_t_6 = __pyx_t_7; - } - if (__pyx_t_6) { - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_21), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - /* "numpy.pxd":809 - * - * # Output padding bytes - * while offset[0] < new_offset: # <<<<<<<<<<<<<< - * f[0] = 120 # "x"; pad byte - * f += 1 - */ - while (1) { - __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!__pyx_t_6) break; - - /* "numpy.pxd":810 - * # Output padding bytes - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< - * f += 1 - * offset[0] += 1 - */ - (__pyx_v_f[0]) = 120; - - /* "numpy.pxd":811 - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte - * f += 1 # <<<<<<<<<<<<<< - * offset[0] += 1 - * - */ - __pyx_v_f = (__pyx_v_f + 1); - - /* "numpy.pxd":812 - * f[0] = 120 # "x"; pad byte - * f += 1 - * offset[0] += 1 # <<<<<<<<<<<<<< - * - * offset[0] += child.itemsize - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); - } - - /* "numpy.pxd":814 - * offset[0] += 1 - * - * offset[0] += child.itemsize # <<<<<<<<<<<<<< - * - * if not PyDataType_HASFIELDS(child): - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); - - /* "numpy.pxd":816 - * offset[0] += child.itemsize - * - * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< - * t = child.type_num - * if end - f < 5: - */ - __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); - if (__pyx_t_6) { - - /* "numpy.pxd":817 - * - * if not PyDataType_HASFIELDS(child): - * t = child.type_num # <<<<<<<<<<<<<< - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") - */ - __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_v_t); - __pyx_v_t = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":818 - * if not PyDataType_HASFIELDS(child): - * t = child.type_num - * if end - f < 5: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short.") - * - */ - __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); - if (__pyx_t_6) { - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_23), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L10; - } - __pyx_L10:; - - /* "numpy.pxd":822 - * - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - */ - __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 98; - goto __pyx_L11; - } - - /* "numpy.pxd":823 - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 66; - goto __pyx_L11; - } - - /* "numpy.pxd":824 - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - */ - __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 104; - goto __pyx_L11; - } - - /* "numpy.pxd":825 - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - */ - __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 72; - goto __pyx_L11; - } - - /* "numpy.pxd":826 - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - */ - __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 105; - goto __pyx_L11; - } - - /* "numpy.pxd":827 - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 73; - goto __pyx_L11; - } - - /* "numpy.pxd":828 - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 108; - goto __pyx_L11; - } - - /* "numpy.pxd":829 - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 76; - goto __pyx_L11; - } - - /* "numpy.pxd":830 - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 113; - goto __pyx_L11; - } - - /* "numpy.pxd":831 - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 81; - goto __pyx_L11; - } - - /* "numpy.pxd":832 - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - */ - __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 102; - goto __pyx_L11; - } - - /* "numpy.pxd":833 - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - */ - __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 100; - goto __pyx_L11; - } - - /* "numpy.pxd":834 - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 103; - goto __pyx_L11; - } - - /* "numpy.pxd":835 - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - */ - __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 102; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":836 - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" - */ - __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 100; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":837 - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - */ - __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 103; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":838 - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 79; - goto __pyx_L11; - } - /*else*/ { - - /* "numpy.pxd":840 - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * f += 1 - * else: - */ - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_18), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L11:; - - /* "numpy.pxd":841 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * f += 1 # <<<<<<<<<<<<<< - * else: - * # Cython ignores struct boundary information ("T{...}"), - */ - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":845 - * # Cython ignores struct boundary information ("T{...}"), - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< - * return f - * - */ - __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_11; - } - __pyx_L9:; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "numpy.pxd":846 - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) - * return f # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_f; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_child); - __Pyx_XDECREF(__pyx_v_fields); - __Pyx_XDECREF(__pyx_v_childname); - __Pyx_XDECREF(__pyx_v_new_offset); - __Pyx_XDECREF(__pyx_v_t); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":961 - * - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * cdef PyObject* baseptr - * if base is None: - */ - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - PyObject *__pyx_v_baseptr; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("set_array_base"); - - /* "numpy.pxd":963 - * cdef inline void set_array_base(ndarray arr, object base): - * cdef PyObject* baseptr - * if base is None: # <<<<<<<<<<<<<< - * baseptr = NULL - * else: - */ - __pyx_t_1 = (__pyx_v_base == Py_None); - if (__pyx_t_1) { - - /* "numpy.pxd":964 - * cdef PyObject* baseptr - * if base is None: - * baseptr = NULL # <<<<<<<<<<<<<< - * else: - * Py_INCREF(base) # important to do this before decref below! - */ - __pyx_v_baseptr = NULL; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":966 - * baseptr = NULL - * else: - * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< - * baseptr = base - * Py_XDECREF(arr.base) - */ - Py_INCREF(__pyx_v_base); - - /* "numpy.pxd":967 - * else: - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base # <<<<<<<<<<<<<< - * Py_XDECREF(arr.base) - * arr.base = baseptr - */ - __pyx_v_baseptr = ((PyObject *)__pyx_v_base); - } - __pyx_L3:; - - /* "numpy.pxd":968 - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base - * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< - * arr.base = baseptr - * - */ - Py_XDECREF(__pyx_v_arr->base); - - /* "numpy.pxd":969 - * baseptr = base - * Py_XDECREF(arr.base) - * arr.base = baseptr # <<<<<<<<<<<<<< - * - * cdef inline object get_array_base(ndarray arr): - */ - __pyx_v_arr->base = __pyx_v_baseptr; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base"); - - /* "numpy.pxd":972 - * - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: # <<<<<<<<<<<<<< - * return None - * else: - */ - __pyx_t_1 = (__pyx_v_arr->base == NULL); - if (__pyx_t_1) { - - /* "numpy.pxd":973 - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: - * return None # <<<<<<<<<<<<<< - * else: - * return arr.base - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_None); - __pyx_r = Py_None; - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":975 - * return None - * else: - * return arr.base # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); - __pyx_r = ((PyObject *)__pyx_v_arr->base); - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static struct __pyx_vtabstruct_5scipy_7spatial_7ckdtree_cKDTree __pyx_vtable_5scipy_7spatial_7ckdtree_cKDTree; - -static PyObject *__pyx_tp_new_5scipy_7spatial_7ckdtree_cKDTree(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *p; - PyObject *o = (*t->tp_alloc)(t, 0); - if (!o) return 0; - p = ((struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)o); - p->__pyx_vtab = __pyx_vtabptr_5scipy_7spatial_7ckdtree_cKDTree; - p->data = Py_None; Py_INCREF(Py_None); - p->maxes = Py_None; Py_INCREF(Py_None); - p->mins = Py_None; Py_INCREF(Py_None); - p->indices = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_5scipy_7spatial_7ckdtree_cKDTree(PyObject *o) { - struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *p = (struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)o; - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - ++Py_REFCNT(o); - __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1__dealloc__(o); - if (PyErr_Occurred()) PyErr_WriteUnraisable(o); - --Py_REFCNT(o); - PyErr_Restore(etype, eval, etb); - } - Py_XDECREF(p->data); - Py_XDECREF(p->maxes); - Py_XDECREF(p->mins); - Py_XDECREF(p->indices); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_5scipy_7spatial_7ckdtree_cKDTree(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *p = (struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)o; - if (p->data) { - e = (*v)(p->data, a); if (e) return e; - } - if (p->maxes) { - e = (*v)(p->maxes, a); if (e) return e; - } - if (p->mins) { - e = (*v)(p->mins, a); if (e) return e; - } - if (p->indices) { - e = (*v)(p->indices, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_5scipy_7spatial_7ckdtree_cKDTree(PyObject *o) { - struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *p = (struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *)o; - PyObject* tmp; - tmp = ((PyObject*)p->data); - p->data = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->maxes); - p->maxes = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->mins); - p->mins = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->indices); - p->indices = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyObject *__pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_data(PyObject *o, void *x) { - return __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_4data___get__(o); -} - -static PyObject *__pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_n(PyObject *o, void *x) { - return __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1n___get__(o); -} - -static PyObject *__pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_m(PyObject *o, void *x) { - return __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_1m___get__(o); -} - -static PyObject *__pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_leafsize(PyObject *o, void *x) { - return __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_8leafsize___get__(o); -} - -static PyObject *__pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_maxes(PyObject *o, void *x) { - return __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_5maxes___get__(o); -} - -static PyObject *__pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_mins(PyObject *o, void *x) { - return __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_4mins___get__(o); -} - -static PyMethodDef __pyx_methods_5scipy_7spatial_7ckdtree_cKDTree[] = { - {__Pyx_NAMESTR("query"), (PyCFunction)__pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree_2query, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_7ckdtree_7cKDTree_2query)}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_5scipy_7spatial_7ckdtree_cKDTree[] = { - {(char *)"data", __pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_data, 0, 0, 0}, - {(char *)"n", __pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_n, 0, 0, 0}, - {(char *)"m", __pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_m, 0, 0, 0}, - {(char *)"leafsize", __pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_leafsize, 0, 0, 0}, - {(char *)"maxes", __pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_maxes, 0, 0, 0}, - {(char *)"mins", __pyx_getprop_5scipy_7spatial_7ckdtree_7cKDTree_mins, 0, 0, 0}, - {0, 0, 0, 0, 0} -}; - -static PyNumberMethods __pyx_tp_as_number_cKDTree = { - 0, /*nb_add*/ - 0, /*nb_subtract*/ - 0, /*nb_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_divide*/ - #endif - 0, /*nb_remainder*/ - 0, /*nb_divmod*/ - 0, /*nb_power*/ - 0, /*nb_negative*/ - 0, /*nb_positive*/ - 0, /*nb_absolute*/ - 0, /*nb_nonzero*/ - 0, /*nb_invert*/ - 0, /*nb_lshift*/ - 0, /*nb_rshift*/ - 0, /*nb_and*/ - 0, /*nb_xor*/ - 0, /*nb_or*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_coerce*/ - #endif - 0, /*nb_int*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_long*/ - #else - 0, /*reserved*/ - #endif - 0, /*nb_float*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_oct*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*nb_hex*/ - #endif - 0, /*nb_inplace_add*/ - 0, /*nb_inplace_subtract*/ - 0, /*nb_inplace_multiply*/ - #if PY_MAJOR_VERSION < 3 - 0, /*nb_inplace_divide*/ - #endif - 0, /*nb_inplace_remainder*/ - 0, /*nb_inplace_power*/ - 0, /*nb_inplace_lshift*/ - 0, /*nb_inplace_rshift*/ - 0, /*nb_inplace_and*/ - 0, /*nb_inplace_xor*/ - 0, /*nb_inplace_or*/ - 0, /*nb_floor_divide*/ - 0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - #if PY_VERSION_HEX >= 0x02050000 - 0, /*nb_index*/ - #endif -}; - -static PySequenceMethods __pyx_tp_as_sequence_cKDTree = { - 0, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_cKDTree = { - 0, /*mp_length*/ - 0, /*mp_subscript*/ - 0, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_cKDTree = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_getbuffer*/ - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, /*bf_releasebuffer*/ - #endif -}; - -static PyTypeObject __pyx_type_5scipy_7spatial_7ckdtree_cKDTree = { - PyVarObject_HEAD_INIT(0, 0) - __Pyx_NAMESTR("scipy.spatial.ckdtree.cKDTree"), /*tp_name*/ - sizeof(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_5scipy_7spatial_7ckdtree_cKDTree, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #else - 0, /*reserved*/ - #endif - 0, /*tp_repr*/ - &__pyx_tp_as_number_cKDTree, /*tp_as_number*/ - &__pyx_tp_as_sequence_cKDTree, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_cKDTree, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_cKDTree, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - __Pyx_DOCSTR("kd-tree for quick nearest-neighbor lookup\n\n This class provides an index into a set of k-dimensional points\n which can be used to rapidly look up the nearest neighbors of any\n point. \n\n The algorithm used is described in Maneewongvatana and Mount 1999. \n The general idea is that the kd-tree is a binary trie, each of whose\n nodes represents an axis-aligned hyperrectangle. Each node specifies\n an axis and splits the set of points based on whether their coordinate\n along that axis is greater than or less than a particular value. \n\n During construction, the axis and splitting point are chosen by the \n \"sliding midpoint\" rule, which ensures that the cells do not all\n become long and thin. \n\n The tree can be queried for the r closest neighbors of any given point \n (optionally returning only those within some maximum distance of the \n point). It can also be queried, with a substantial gain in efficiency, \n for the r approximate closest neighbors.\n\n For large dimensions (20 is already large) do not expect this to run \n significantly faster than brute force. High-dimensional nearest-neighbor\n queries are a substantial open problem in computer science.\n\n Parameters\n ----------\n data : array-like, shape (n,m)\n The n data points of dimension mto be indexed. This array is \n not copied unless this is necessary to produce a contiguous \n array of doubles, and so modifying this data will result in \n bogus results.\n leafsize : positive integer\n The number of points at which the algorithm switches over to\n brute-force.\n\n "), /*tp_doc*/ - __pyx_tp_traverse_5scipy_7spatial_7ckdtree_cKDTree, /*tp_traverse*/ - __pyx_tp_clear_5scipy_7spatial_7ckdtree_cKDTree, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_5scipy_7spatial_7ckdtree_cKDTree, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_5scipy_7spatial_7ckdtree_cKDTree, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - __pyx_pf_5scipy_7spatial_7ckdtree_7cKDTree___init__, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_5scipy_7spatial_7ckdtree_cKDTree, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - #if PY_VERSION_HEX >= 0x02060000 - 0, /*tp_version_tag*/ - #endif -}; - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("ckdtree"), - 0, /* m_doc */ - -1, /* m_size */ - __pyx_methods /* m_methods */, - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, - {&__pyx_kp_u_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 1, 0, 0}, - {&__pyx_kp_u_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 1, 0, 0}, - {&__pyx_kp_u_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 1, 0, 0}, - {&__pyx_kp_u_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 1, 0, 0}, - {&__pyx_kp_u_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 1, 0, 0}, - {&__pyx_kp_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 0}, - {&__pyx_kp_u_22, __pyx_k_22, sizeof(__pyx_k_22), 0, 1, 0, 0}, - {&__pyx_n_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 1}, - {&__pyx_kp_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 0}, - {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0}, - {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, - {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, - {&__pyx_n_s____all__, __pyx_k____all__, sizeof(__pyx_k____all__), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s__amax, __pyx_k__amax, sizeof(__pyx_k__amax), 0, 0, 1, 1}, - {&__pyx_n_s__amin, __pyx_k__amin, sizeof(__pyx_k__amin), 0, 0, 1, 1}, - {&__pyx_n_s__arange, __pyx_k__arange, sizeof(__pyx_k__arange), 0, 0, 1, 1}, - {&__pyx_n_s__asarray, __pyx_k__asarray, sizeof(__pyx_k__asarray), 0, 0, 1, 1}, - {&__pyx_n_s__ascontiguousarray, __pyx_k__ascontiguousarray, sizeof(__pyx_k__ascontiguousarray), 0, 0, 1, 1}, - {&__pyx_n_s__astype, __pyx_k__astype, sizeof(__pyx_k__astype), 0, 0, 1, 1}, - {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, - {&__pyx_n_s__cKDTree, __pyx_k__cKDTree, sizeof(__pyx_k__cKDTree), 0, 0, 1, 1}, - {&__pyx_n_s__data, __pyx_k__data, sizeof(__pyx_k__data), 0, 0, 1, 1}, - {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, - {&__pyx_n_s__empty, __pyx_k__empty, sizeof(__pyx_k__empty), 0, 0, 1, 1}, - {&__pyx_n_s__eps, __pyx_k__eps, sizeof(__pyx_k__eps), 0, 0, 1, 1}, - {&__pyx_n_s__fill, __pyx_k__fill, sizeof(__pyx_k__fill), 0, 0, 1, 1}, - {&__pyx_n_s__float, __pyx_k__float, sizeof(__pyx_k__float), 0, 0, 1, 1}, - {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, - {&__pyx_n_s__inf, __pyx_k__inf, sizeof(__pyx_k__inf), 0, 0, 1, 1}, - {&__pyx_n_s__int32, __pyx_k__int32, sizeof(__pyx_k__int32), 0, 0, 1, 1}, - {&__pyx_n_s__k, __pyx_k__k, sizeof(__pyx_k__k), 0, 0, 1, 1}, - {&__pyx_n_s__kdtree, __pyx_k__kdtree, sizeof(__pyx_k__kdtree), 0, 0, 1, 1}, - {&__pyx_n_s__leafsize, __pyx_k__leafsize, sizeof(__pyx_k__leafsize), 0, 0, 1, 1}, - {&__pyx_n_s__newaxis, __pyx_k__newaxis, sizeof(__pyx_k__newaxis), 0, 0, 1, 1}, - {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, - {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, - {&__pyx_n_s__p, __pyx_k__p, sizeof(__pyx_k__p), 0, 0, 1, 1}, - {&__pyx_n_s__prod, __pyx_k__prod, sizeof(__pyx_k__prod), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, - {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, - {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - /* "scipy/spatial/ckdtree.pyx":216 - * self.leafsize = leafsize - * if self.leafsize<1: - * raise ValueError("leafsize must be at least 1") # <<<<<<<<<<<<<< - * self.maxes = np.ascontiguousarray(np.amax(self.data,axis=0)) - * self.mins = np.ascontiguousarray(np.amin(self.data,axis=0)) - */ - __pyx_k_tuple_3 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_3)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_2)); - PyTuple_SET_ITEM(__pyx_k_tuple_3, 0, ((PyObject *)__pyx_kp_s_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_3)); - - /* "scipy/spatial/ckdtree.pyx":561 - * raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) - * if p<1: - * raise ValueError("Only p-norms with 1<=p<=infinity permitted") # <<<<<<<<<<<<<< - * if len(x.shape)==1: - * single = True - */ - __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_8)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_7)); - PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_s_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_7)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); - - /* "scipy/spatial/ckdtree.pyx":564 - * if len(x.shape)==1: - * single = True - * x = x[np.newaxis,:] # <<<<<<<<<<<<<< - * else: - * single = False - */ - __pyx_k_slice_9 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_k_slice_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_slice_9); - __Pyx_GIVEREF(__pyx_k_slice_9); - - /* "scipy/spatial/ckdtree.pyx":591 - * else: - * if k==1: - * return np.reshape(dd[...,0],retshape), np.reshape(ii[...,0],retshape) # <<<<<<<<<<<<<< - * else: - * return np.reshape(dd,retshape+(k,)), np.reshape(ii,retshape+(k,)) - */ - __pyx_k_tuple_10 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_10)); - __Pyx_INCREF(Py_Ellipsis); - PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, Py_Ellipsis); - __Pyx_GIVEREF(Py_Ellipsis); - __Pyx_INCREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_k_tuple_10, 1, __pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); - __pyx_k_tuple_11 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_11)); - __Pyx_INCREF(Py_Ellipsis); - PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, Py_Ellipsis); - __Pyx_GIVEREF(Py_Ellipsis); - __Pyx_INCREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_k_tuple_11, 1, __pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_k_tuple_13 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_13)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_13)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_12)); - PyTuple_SET_ITEM(__pyx_k_tuple_13, 0, ((PyObject *)__pyx_kp_u_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_13)); - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_k_tuple_15 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_15)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_14)); - PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_kp_u_14)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_14)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_k_tuple_17 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_17)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_16)); - PyTuple_SET_ITEM(__pyx_k_tuple_17, 0, ((PyObject *)__pyx_kp_u_16)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_16)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_17)); - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_k_tuple_20 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_20)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_20)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_19)); - PyTuple_SET_ITEM(__pyx_k_tuple_20, 0, ((PyObject *)__pyx_kp_u_19)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_19)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_20)); - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_k_tuple_21 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_21)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_16)); - PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_kp_u_16)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_16)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_k_tuple_23 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_23)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_22)); - PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_kp_u_22)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_22)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initckdtree(void); /*proto*/ -PyMODINIT_FUNC initckdtree(void) -#else -PyMODINIT_FUNC PyInit_ckdtree(void); /*proto*/ -PyMODINIT_FUNC PyInit_ckdtree(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - double __pyx_t_3; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_ckdtree(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("ckdtree"), __pyx_methods, 0, 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - /*--- Initialize various global constants etc. ---*/ - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__spatial__ckdtree) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - /*--- Builtin init code ---*/ - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Constants init code ---*/ - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Global init code ---*/ - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - /*--- Type init code ---*/ - __pyx_vtabptr_5scipy_7spatial_7ckdtree_cKDTree = &__pyx_vtable_5scipy_7spatial_7ckdtree_cKDTree; - __pyx_vtable_5scipy_7spatial_7ckdtree_cKDTree.__build = (struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *(*)(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *, int, int, double *, double *))__pyx_f_5scipy_7spatial_7ckdtree_7cKDTree___build; - __pyx_vtable_5scipy_7spatial_7ckdtree_cKDTree.__free_tree = (PyObject *(*)(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *, struct __pyx_t_5scipy_7spatial_7ckdtree_innernode *))__pyx_f_5scipy_7spatial_7ckdtree_7cKDTree___free_tree; - __pyx_vtable_5scipy_7spatial_7ckdtree_cKDTree.__query = (void (*)(struct __pyx_obj_5scipy_7spatial_7ckdtree_cKDTree *, double *, int *, double *, int, double, double, double))__pyx_f_5scipy_7spatial_7ckdtree_7cKDTree___query; - if (PyType_Ready(&__pyx_type_5scipy_7spatial_7ckdtree_cKDTree) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetVtable(__pyx_type_5scipy_7spatial_7ckdtree_cKDTree.tp_dict, __pyx_vtabptr_5scipy_7spatial_7ckdtree_cKDTree) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "cKDTree", (PyObject *)&__pyx_type_5scipy_7spatial_7ckdtree_cKDTree) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5scipy_7spatial_7ckdtree_cKDTree = &__pyx_type_5scipy_7spatial_7ckdtree_cKDTree; - /*--- Type import code ---*/ - __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Variable import code ---*/ - /*--- Function import code ---*/ - /*--- Execution code ---*/ - - /* "scipy/spatial/ckdtree.pyx":3 - * # Copyright Anne M. Archibald 2008 - * # Released under the scipy license - * import numpy as np # <<<<<<<<<<<<<< - * cimport numpy as np - * cimport stdlib - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":7 - * cimport stdlib - * - * import kdtree # <<<<<<<<<<<<<< - * - * cdef double infinity = np.inf - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__kdtree), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__kdtree, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/spatial/ckdtree.pyx":9 - * import kdtree - * - * cdef double infinity = np.inf # <<<<<<<<<<<<<< - * - * __all__ = ['cKDTree'] - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__inf); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_5scipy_7spatial_7ckdtree_infinity = __pyx_t_3; - - /* "scipy/spatial/ckdtree.pyx":11 - * cdef double infinity = np.inf - * - * __all__ = ['cKDTree'] # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__cKDTree)); - PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s__cKDTree)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__cKDTree)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____all__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - - /* "scipy/spatial/ckdtree.pyx":515 - * - * def query(cKDTree self, object x, int k=1, double eps=0, double p=2, - * double distance_upper_bound=infinity): # <<<<<<<<<<<<<< - * """query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf) - * - */ - __pyx_k_5 = __pyx_v_5scipy_7spatial_7ckdtree_infinity; - - /* "scipy/spatial/ckdtree.pyx":1 - * # Copyright Anne M. Archibald 2008 # <<<<<<<<<<<<<< - * # Released under the scipy license - * import numpy as np - */ - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - - /* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.spatial.ckdtree", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.spatial.ckdtree"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - -/* Runtime support code */ - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - /* cause is unused */ - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - /* First, check the traceback argument, replacing None with NULL. */ - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - /* Next, replace a missing value with None */ - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - /* Raising an instance. The value should be a dummy. */ - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - /* Normalize to raise , */ - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else /* Python 3+ */ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - -static CYTHON_INLINE long __Pyx_div_long(long a, long b) { - long q = a / b; - long r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - /* unexpected keyword found */ - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", - index, (index == 1) ? "" : "s"); -} - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); -} - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } else if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { - PyErr_Clear(); - return 0; - } else { - return -1; - } - } - return 0; -} - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(PyObject_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { - unsigned int n = 1; - return *(unsigned char*)(&n) != 0; -} - -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; -} __Pyx_BufFmt_Context; - -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} - -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t < '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} - -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} - -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case 'b': return "'char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 0: return "end"; - default: return "unparseable format string"; - } -} - -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif - -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I'; - case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; - case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); - case 'O': return 'O'; - case 'P': return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} - -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset; - if (ctx->enc_type == 0) return 0; - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - } - - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - /* special case -- treat as struct rather than complex number */ - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - - ctx->fmt_offset += size; - - --ctx->enc_count; /* Consume from buffer string */ - - /* Done checking, move to next field, pushing or popping struct stack if needed */ - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; /* breaks both loops as ctx->enc_count == 0 */ - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; /* empty struct */ - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} - -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case 10: - case 13: - ++ts; - break; - case '<': - if (!__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': /* substruct */ - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - } - break; - case '}': /* end of substruct; either repeat or move on */ - ++ts; - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } /* fall through */ - case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': - if (ctx->enc_type == *ts && got_Z == ctx->is_complex && - ctx->enc_packmode == ctx->new_packmode) { - /* Continue pooling same type */ - ctx->enc_count += ctx->new_count; - } else { - /* New type */ - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - } - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - default: - { - int number = __Pyx_BufFmt_ParseNumber(&ts); - if (number == -1) { /* First char was not a digit */ - PyErr_Format(PyExc_ValueError, - "Does not understand character buffer dtype format string ('%c')", *ts); - return NULL; - } - ctx->new_count = (size_t)number; - } - } - } -} - -static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { - buf->buf = NULL; - buf->obj = NULL; - buf->strides = __Pyx_zeros; - buf->shape = __Pyx_zeros; - buf->suboffsets = __Pyx_minusones; -} - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { - if (obj == Py_None || obj == NULL) { - __Pyx_ZeroBuffer(buf); - return 0; - } - buf->buf = NULL; - if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; - if (buf->ndim != nd) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - nd, buf->ndim); - goto fail; - } - if (!cast) { - __Pyx_BufFmt_Context ctx; - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; - } - if ((unsigned)buf->itemsize != dtype->size) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)", - buf->itemsize, (buf->itemsize > 1) ? "s" : "", - dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; - return 0; -fail:; - __Pyx_ZeroBuffer(buf); - return -1; -} - -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { - if (info->buf == NULL) return; - if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; - __Pyx_ReleaseBuffer(info); -} - -static void __Pyx_RaiseBufferFallbackError(void) { - PyErr_Format(PyExc_ValueError, - "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); -} - - -static void __Pyx_RaiseBufferIndexError(int axis) { - PyErr_Format(PyExc_IndexError, - "Out of bounds on buffer access (axis %d)", axis); -} - - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags); - else { - PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; - } -} - -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject* obj = view->obj; - if (obj) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;} - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view); - Py_DECREF(obj); - view->obj = NULL; - } -} - -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(a, a); - case 3: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, a); - case 4: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_absf(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(a, a); - case 3: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, a); - case 4: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_abs(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename) { - PyObject *old_exc, *old_val, *old_tb; - PyObject *ctx; - __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); - #if PY_MAJOR_VERSION < 3 - ctx = PyString_FromString(name); - #else - ctx = PyUnicode_FromString(name); - #endif - __Pyx_ErrRestore(old_exc, old_val, old_tb); - if (!ctx) { - PyErr_WriteUnraisable(Py_None); - } else { - PyErr_WriteUnraisable(ctx); - Py_DECREF(ctx); - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -static int __Pyx_SetVtable(PyObject *dict, void *vtable) { -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) - PyObject *ob = PyCapsule_New(vtable, 0, 0); -#else - PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); -#endif - if (!ob) - goto bad; - if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0) - goto bad; - Py_DECREF(ob); - return 0; -bad: - Py_XDECREF(ob); - return -1; -} - -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, - size_t size, int strict) -{ - PyObject *py_module = 0; - PyObject *result = 0; - PyObject *py_name = 0; - char warning[200]; - - py_module = __Pyx_ImportModule(module_name); - if (!py_module) - goto bad; - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(class_name); - #else - py_name = PyUnicode_FromString(class_name); - #endif - if (!py_name) - goto bad; - result = PyObject_GetAttr(py_module, py_name); - Py_DECREF(py_name); - py_name = 0; - Py_DECREF(py_module); - py_module = 0; - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility", - module_name, class_name); - #if PY_VERSION_HEX < 0x02050000 - if (PyErr_Warn(NULL, warning) < 0) goto bad; - #else - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - #endif - } - else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { - PyErr_Format(PyExc_ValueError, - "%s.%s has the wrong size, try recompiling", - module_name, class_name); - goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(py_module); - Py_XDECREF(result); - return NULL; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(name); - #else - py_name = PyUnicode_FromString(name); - #endif - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - #if PY_MAJOR_VERSION >= 3 - 0, /*int kwonlyargcount,*/ - #endif - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else /* Python 3+ has unicode identifiers */ - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -/* Type Conversion Functions */ - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif /* Py_PYTHON_H */ diff --git a/scipy-0.10.1/scipy/spatial/common.h b/scipy-0.10.1/scipy/spatial/common.h deleted file mode 100644 index 91623534ce..0000000000 --- a/scipy-0.10.1/scipy/spatial/common.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * common.h - * - * Author: Damian Eads - * Date: September 22, 2007 (moved into new file on June 8, 2008) - * - * Copyright (c) 2007, 2008, Damian Eads. All rights reserved. - * Adapted for incorporation into Scipy, April 9, 2008. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the - * following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - Neither the name of the author nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _CLUSTER_COMMON_H -#define _CLUSTER_COMMON_H - -#define CPY_MAX(_x, _y) ((_x > _y) ? (_x) : (_y)) -#define CPY_MIN(_x, _y) ((_x < _y) ? (_x) : (_y)) - -#define NCHOOSE2(_n) ((_n)*(_n-1)/2) - -#define CPY_BITS_PER_CHAR (sizeof(unsigned char) * 8) -#define CPY_FLAG_ARRAY_SIZE_BYTES(num_bits) (CPY_CEIL_DIV((num_bits), \ - CPY_BITS_PER_CHAR)) -#define CPY_GET_BIT(_xx, i) (((_xx)[(i) / CPY_BITS_PER_CHAR] >> \ - ((CPY_BITS_PER_CHAR-1) - \ - ((i) % CPY_BITS_PER_CHAR))) & 0x1) -#define CPY_SET_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] |= \ - ((0x1) << ((CPY_BITS_PER_CHAR-1) \ - -((i) % CPY_BITS_PER_CHAR)))) -#define CPY_CLEAR_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] &= \ - ~((0x1) << ((CPY_BITS_PER_CHAR-1) \ - -((i) % CPY_BITS_PER_CHAR)))) - -#ifndef CPY_CEIL_DIV -#define CPY_CEIL_DIV(x, y) ((((double)x)/(double)y) == \ - ((double)((x)/(y))) ? ((x)/(y)) : ((x)/(y) + 1)) -#endif - - -#ifdef CPY_DEBUG -#define CPY_DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__) -#else -#define CPY_DEBUG_MSG(...) -#endif - -#endif diff --git a/scipy-0.10.1/scipy/spatial/distance.py b/scipy-0.10.1/scipy/spatial/distance.py deleted file mode 100644 index 1becb339b1..0000000000 --- a/scipy-0.10.1/scipy/spatial/distance.py +++ /dev/null @@ -1,2178 +0,0 @@ -""" -===================================================== -Distance computations (:mod:`scipy.spatial.distance`) -===================================================== - -.. sectionauthor:: Damian Eads - -Function Reference ------------------- - -Distance matrix computation from a collection of raw observation vectors -stored in a rectangular array. - -.. autosummary:: - :toctree: generated/ - - pdist -- pairwise distances between observation vectors. - cdist -- distances between between two collections of observation vectors - squareform -- convert distance matrix to a condensed one and vice versa - -Predicates for checking the validity of distance matrices, both -condensed and redundant. Also contained in this module are functions -for computing the number of observations in a distance matrix. - -.. autosummary:: - :toctree: generated/ - - is_valid_dm -- checks for a valid distance matrix - is_valid_y -- checks for a valid condensed distance matrix - num_obs_dm -- # of observations in a distance matrix - num_obs_y -- # of observations in a condensed distance matrix - -Distance functions between two vectors ``u`` and ``v``. Computing -distances over a large collection of vectors is inefficient for these -functions. Use ``pdist`` for this purpose. - -.. autosummary:: - :toctree: generated/ - - braycurtis -- the Bray-Curtis distance. - canberra -- the Canberra distance. - chebyshev -- the Chebyshev distance. - cityblock -- the Manhattan distance. - correlation -- the Correlation distance. - cosine -- the Cosine distance. - dice -- the Dice dissimilarity (boolean). - euclidean -- the Euclidean distance. - hamming -- the Hamming distance (boolean). - jaccard -- the Jaccard distance (boolean). - kulsinski -- the Kulsinski distance (boolean). - mahalanobis -- the Mahalanobis distance. - matching -- the matching dissimilarity (boolean). - minkowski -- the Minkowski distance. - rogerstanimoto -- the Rogers-Tanimoto dissimilarity (boolean). - russellrao -- the Russell-Rao dissimilarity (boolean). - seuclidean -- the normalized Euclidean distance. - sokalmichener -- the Sokal-Michener dissimilarity (boolean). - sokalsneath -- the Sokal-Sneath dissimilarity (boolean). - sqeuclidean -- the squared Euclidean distance. - yule -- the Yule dissimilarity (boolean). - - -References ----------- - -.. [Sta07] "Statistics toolbox." API Reference Documentation. The MathWorks. - http://www.mathworks.com/access/helpdesk/help/toolbox/stats/. - Accessed October 1, 2007. - -.. [Mti07] "Hierarchical clustering." API Reference Documentation. - The Wolfram Research, Inc. - http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/HierarchicalClustering.html. - Accessed October 1, 2007. - -.. [Gow69] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage - Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969. - -.. [War63] Ward Jr, JH. "Hierarchical grouping to optimize an objective - function." Journal of the American Statistical Association. 58(301): - pp. 236--44. 1963. - -.. [Joh66] Johnson, SC. "Hierarchical clustering schemes." Psychometrika. - 32(2): pp. 241--54. 1966. - -.. [Sne62] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp. - 855--60. 1962. - -.. [Bat95] Batagelj, V. "Comparing resemblance measures." Journal of - Classification. 12: pp. 73--90. 1995. - -.. [Sok58] Sokal, RR and Michener, CD. "A statistical method for evaluating - systematic relationships." Scientific Bulletins. 38(22): - pp. 1409--38. 1958. - -.. [Ede79] Edelbrock, C. "Mixture model tests of hierarchical clustering - algorithms: the problem of classifying everybody." Multivariate - Behavioral Research. 14: pp. 367--84. 1979. - -.. [Jai88] Jain, A., and Dubes, R., "Algorithms for Clustering Data." - Prentice-Hall. Englewood Cliffs, NJ. 1988. - -.. [Fis36] Fisher, RA "The use of multiple measurements in taxonomic - problems." Annals of Eugenics, 7(2): 179-188. 1936 - - -Copyright Notice ----------------- - -Copyright (C) Damian Eads, 2007-2008. New BSD License. - -""" - -import warnings -import numpy as np -from numpy.linalg import norm - -import _distance_wrap - - -def _copy_array_if_base_present(a): - """ - Copies the array if its base points to a parent array. - """ - if a.base is not None: - return a.copy() - elif np.issubsctype(a, np.float32): - return np.array(a, dtype=np.double) - else: - return a - - -def _copy_arrays_if_base_present(T): - """ - Accepts a tuple of arrays T. Copies the array T[i] if its base array - points to an actual array. Otherwise, the reference is just copied. - This is useful if the arrays are being passed to a C function that - does not do proper striding. - """ - l = [_copy_array_if_base_present(a) for a in T] - return l - - -def _convert_to_bool(X): - if X.dtype != np.bool: - X = np.bool_(X) - if not X.flags.contiguous: - X = X.copy() - return X - - -def _convert_to_double(X): - if X.dtype != np.double: - X = np.double(X) - if not X.flags.contiguous: - X = X.copy() - return X - - -def _validate_vector(u, dtype=None): - # XXX Is order='c' really necessary? - u = np.asarray(u, dtype=dtype, order='c').squeeze() - # Ensure values such as u=1 and u=[1] still return 1-D arrays. - u = np.atleast_1d(u) - if u.ndim > 1: - raise ValueError("Input vector should be 1-D.") - return u - - -def minkowski(u, v, p): - r""" - Computes the Minkowski distance between two vectors ``u`` and ``v``, - defined as - - .. math:: - - {||u-v||}_p = (\sum{|u_i - v_i|^p})^{1/p}. - - Parameters - ---------- - u : ndarray - An n-dimensional vector. - v : ndarray - An n-dimensional vector. - p : int - The order of the norm of the difference :math:`{||u-v||}_p`. - - Returns - ------- - d : double - The Minkowski distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - if p < 1: - raise ValueError("p must be at least 1") - dist = norm(u - v, ord=p) - return dist - - -def wminkowski(u, v, p, w): - r""" - Computes the weighted Minkowski distance between two vectors ``u`` - and ``v``, defined as - - .. math:: - - \left(\sum{(w_i |u_i - v_i|^p)}\right)^{1/p}. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - p : int - The order of the norm of the difference :math:`{||u-v||}_p`. - w : ndarray - The weight vector. - - Returns - ------- - d : double - The Minkowski distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - w = _validate_vector(w) - if p < 1: - raise ValueError("p must be at least 1") - dist = norm(w * (u - v), ord=p) - return dist - - -def euclidean(u, v): - """ - Computes the Euclidean distance between two n-vectors ``u`` and ``v``, - which is defined as - - .. math:: - - {||u-v||}_2 - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Euclidean distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - dist = norm(u - v) - return dist - - -def sqeuclidean(u, v): - """ - Computes the squared Euclidean distance between two n-vectors u and v, - which is defined as - - .. math:: - - {||u-v||}_2^2. - - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The squared Euclidean distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - dist = ((u - v) ** 2).sum() - return dist - - -def cosine(u, v): - r""" - Computes the Cosine distance between two n-vectors u and v, which - is defined as - - .. math:: - - 1 - \frac{uv^T} - {||u||_2 ||v||_2}. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Cosine distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - dist = 1.0 - np.dot(u, v) / (norm(u) * norm(v)) - return dist - - -def correlation(u, v): - r""" - Computes the correlation distance between two n-vectors ``u`` and - ``v``, which is defined as - - .. math:: - - 1 - frac{(u - \bar{u}){(v - \bar{v})}^T} - {{||(u - \bar{u})||}_2 {||(v - \bar{v})||}_2^T} - - where :math:`\bar{u}` is the mean of a vectors elements and ``n`` - is the common dimensionality of ``u`` and ``v``. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The correlation distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - umu = u.mean() - vmu = v.mean() - um = u - umu - vm = v - vmu - dist = 1.0 - np.dot(um, vm) / (norm(um) * norm(vm)) - return dist - - -def hamming(u, v): - r""" - Computes the Hamming distance between two n-vectors ``u`` and - ``v``, which is simply the proportion of disagreeing components in - ``u`` and ``v``. If ``u`` and ``v`` are boolean vectors, the Hamming - distance is - - .. math:: - - \frac{c_{01} + c_{10}}{n} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Hamming distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - return (u != v).mean() - - -def jaccard(u, v): - """ - Computes the Jaccard-Needham dissimilarity between two boolean - n-vectors u and v, which is - - .. math:: - - \frac{c_{TF} + c_{FT}} - {c_{TT} + c_{FT} + c_{TF}} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Jaccard distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - dist = (np.double(np.bitwise_and((u != v), - np.bitwise_or(u != 0, v != 0)).sum()) - / np.double(np.bitwise_or(u != 0, v != 0).sum())) - return dist - - -def kulsinski(u, v): - """ - Computes the Kulsinski dissimilarity between two boolean n-vectors - u and v, which is defined as - - .. math:: - - \frac{c_{TF} + c_{FT} - c_{TT} + n} - {c_{FT} + c_{TF} + n} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Kulsinski distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - n = float(len(u)) - (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) - - return (ntf + nft - ntt + n) / (ntf + nft + n) - - -def seuclidean(u, v, V): - """ - Returns the standardized Euclidean distance between two n-vectors - ``u`` and ``v``. ``V`` is an n-dimensional vector of component - variances. It is usually computed among a larger collection - vectors. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - V : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The standardized Euclidean distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - V = _validate_vector(V, dtype=np.float64) - if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]: - raise TypeError('V must be a 1-D array of the same dimension ' - 'as u and v.') - return np.sqrt(((u - v) ** 2 / V).sum()) - - -def cityblock(u, v): - """ - Computes the Manhattan distance between two n-vectors u and v, - which is defined as - - .. math:: - - \\sum_i {\\left| u_i - v_i \\right|}. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The City Block distance between vectors ``u`` and ``v``. - - """ - u = _validate_vector(u) - v = _validate_vector(v) - return abs(u - v).sum() - - -def mahalanobis(u, v, VI): - r""" - Computes the Mahalanobis distance between two n-vectors ``u`` and ``v``, - which is defiend as - - .. math:: - - (u-v)V^{-1}(u-v)^T - - where ``VI`` is the inverse covariance matrix :math:`V^{-1}`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Mahalanobis distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - VI = np.atleast_2d(VI) - delta = u - v - m = np.dot(np.dot(delta, VI), delta) - return np.sqrt(m) - - -def chebyshev(u, v): - r""" - Computes the Chebyshev distance between two n-vectors u and v, - which is defined as - - .. math:: - - \max_i {|u_i-v_i|}. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Chebyshev distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - return max(abs(u - v)) - - -def braycurtis(u, v): - r""" - Computes the Bray-Curtis distance between two n-vectors ``u`` and - ``v``, which is defined as - - .. math:: - - \sum{|u_i-v_i|} / \sum{|u_i+v_i|}. - - The Bray-Curtis distance is in the range [0, 1] if all coordinates are - positive, and is undefined if the inputs are of length zero. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Bray-Curtis distance between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v, dtype=np.float64) - return abs(u - v).sum() / abs(u + v).sum() - - -def canberra(u, v): - r""" - Computes the Canberra distance between two n-vectors u and v, - which is defined as - - .. math:: - - \sum_u \frac{|u_i-v_i|} - {(|u_i|+|v_i|)}. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Canberra distance between vectors ``u`` and ``v``. - - Notes - ----- - Whe u[i] and v[i] are 0 for given i, then the fraction 0/0 = 0 is used in - the calculation. - - """ - u = _validate_vector(u) - v = _validate_vector(v, dtype=np.float64) - olderr = np.seterr(invalid='ignore') - try: - d = np.nansum(abs(u - v) / (abs(u) + abs(v))) - finally: - np.seterr(**olderr) - return d - - -def _nbool_correspond_all(u, v): - if u.dtype != v.dtype: - raise TypeError("Arrays being compared must be of the same data type.") - - if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double: - not_u = 1.0 - u - not_v = 1.0 - v - nff = (not_u * not_v).sum() - nft = (not_u * v).sum() - ntf = (u * not_v).sum() - ntt = (u * v).sum() - elif u.dtype == np.bool: - not_u = ~u - not_v = ~v - nff = (not_u & not_v).sum() - nft = (not_u & v).sum() - ntf = (u & not_v).sum() - ntt = (u & v).sum() - else: - raise TypeError("Arrays being compared have unknown type.") - - return (nff, nft, ntf, ntt) - - -def _nbool_correspond_ft_tf(u, v): - if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double: - not_u = 1.0 - u - not_v = 1.0 - v - nft = (not_u * v).sum() - ntf = (u * not_v).sum() - else: - not_u = ~u - not_v = ~v - nft = (not_u & v).sum() - ntf = (u & not_v).sum() - return (nft, ntf) - - -def yule(u, v): - r""" - Computes the Yule dissimilarity between two boolean n-vectors u and v, - which is defined as - - - .. math:: - - \frac{R}{c_{TT} + c_{FF} + \frac{R}{2}} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n` and :math:`R = 2.0 * (c_{TF} + c_{FT})`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Yule dissimilarity between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) - return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft) - - -def matching(u, v): - r""" - Computes the Matching dissimilarity between two boolean n-vectors - u and v, which is defined as - - .. math:: - - \frac{c_{TF} + c_{FT}}{n} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Matching dissimilarity between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - (nft, ntf) = _nbool_correspond_ft_tf(u, v) - return float(nft + ntf) / float(len(u)) - - -def dice(u, v): - r""" - Computes the Dice dissimilarity between two boolean n-vectors - ``u`` and ``v``, which is - - .. math:: - - \frac{c_{TF} + c_{FT}} - {2c_{TT} + c_{FT} + c_{TF}} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Dice dissimilarity between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - if u.dtype == np.bool: - ntt = (u & v).sum() - else: - ntt = (u * v).sum() - (nft, ntf) = _nbool_correspond_ft_tf(u, v) - return float(ntf + nft) / float(2.0 * ntt + ntf + nft) - - -def rogerstanimoto(u, v): - r""" - Computes the Rogers-Tanimoto dissimilarity between two boolean - n-vectors ``u`` and ``v``, which is defined as - - .. math:: - \frac{R} - {c_{TT} + c_{FF} + R} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Rogers-Tanimoto dissimilarity between vectors - `u` and `v`. - """ - u = _validate_vector(u) - v = _validate_vector(v) - (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) - return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft))) - - -def russellrao(u, v): - r""" - Computes the Russell-Rao dissimilarity between two boolean n-vectors - ``u`` and ``v``, which is defined as - - .. math:: - - \frac{n - c_{TT}} - {n} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Russell-Rao dissimilarity between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - if u.dtype == np.bool: - ntt = (u & v).sum() - else: - ntt = (u * v).sum() - return float(len(u) - ntt) / float(len(u)) - - -def sokalmichener(u, v): - r""" - Computes the Sokal-Michener dissimilarity between two boolean vectors - ``u`` and ``v``, which is defined as - - .. math:: - - \frac{R} - {S + R} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and - :math:`S = c_{FF} + c_{TT}`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Sokal-Michener dissimilarity between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - if u.dtype == np.bool: - ntt = (u & v).sum() - nff = (~u & ~v).sum() - else: - ntt = (u * v).sum() - nff = ((1.0 - u) * (1.0 - v)).sum() - (nft, ntf) = _nbool_correspond_ft_tf(u, v) - return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft)) - - -def sokalsneath(u, v): - r""" - Computes the Sokal-Sneath dissimilarity between two boolean vectors - ``u`` and ``v``, - - .. math:: - - \frac{R} - {c_{TT} + R} - - where :math:`c_{ij}` is the number of occurrences of - :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for - :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`. - - Parameters - ---------- - u : ndarray - An :math:`n`-dimensional vector. - v : ndarray - An :math:`n`-dimensional vector. - - Returns - ------- - d : double - The Sokal-Sneath dissimilarity between vectors ``u`` and ``v``. - """ - u = _validate_vector(u) - v = _validate_vector(v) - if u.dtype == np.bool: - ntt = (u & v).sum() - else: - ntt = (u * v).sum() - (nft, ntf) = _nbool_correspond_ft_tf(u, v) - denom = ntt + 2.0 * (ntf + nft) - if denom == 0: - raise ValueError('Sokal-Sneath dissimilarity is not defined for ' - 'vectors that are entirely false.') - return float(2.0 * (ntf + nft)) / denom - - -def pdist(X, metric='euclidean', p=2, w=None, V=None, VI=None): - r""" - Computes the pairwise distances between m original observations in - n-dimensional space. Returns a condensed distance matrix Y. For - each :math:`i` and :math:`j` (where :math:`i=2 encoding distances - as described, X=squareform(v) returns a d by d distance matrix X. The - X[i, j] and X[j, i] values are set to - v[{n \choose 2}-{n-i \choose 2} + (j-u-1)] and all - diagonal elements are zero. - - """ - - X = _convert_to_double(np.asarray(X, order='c')) - - if not np.issubsctype(X, np.double): - raise TypeError('A double array must be passed.') - - s = X.shape - - if force.lower() == 'tomatrix': - if len(s) != 1: - raise ValueError("Forcing 'tomatrix' but input X is not a " - "distance vector.") - elif force.lower() == 'tovector': - if len(s) != 2: - raise ValueError("Forcing 'tovector' but input X is not a " - "distance matrix.") - - # X = squareform(v) - if len(s) == 1: - if X.shape[0] == 0: - return np.zeros((1, 1), dtype=np.double) - - # Grab the closest value to the square root of the number - # of elements times 2 to see if the number of elements - # is indeed a binomial coefficient. - d = int(np.ceil(np.sqrt(X.shape[0] * 2))) - - # Check that v is of valid dimensions. - if d * (d - 1) / 2 != int(s[0]): - raise ValueError('Incompatible vector size. It must be a binomial ' - 'coefficient n choose 2 for some integer n >= 2.') - - # Allocate memory for the distance matrix. - M = np.zeros((d, d), dtype=np.double) - - # Since the C code does not support striding using strides. - # The dimensions are used instead. - [X] = _copy_arrays_if_base_present([X]) - - # Fill in the values of the distance matrix. - _distance_wrap.to_squareform_from_vector_wrap(M, X) - - # Return the distance matrix. - M = M + M.transpose() - return M - elif len(s) == 2: - if s[0] != s[1]: - raise ValueError('The matrix argument must be square.') - if checks: - is_valid_dm(X, throw=True, name='X') - - # One-side of the dimensions is set here. - d = s[0] - - if d <= 1: - return np.array([], dtype=np.double) - - # Create a vector. - v = np.zeros(((d * (d - 1) / 2),), dtype=np.double) - - # Since the C code does not support striding using strides. - # The dimensions are used instead. - [X] = _copy_arrays_if_base_present([X]) - - # Convert the vector to squareform. - _distance_wrap.to_vector_from_squareform_wrap(X, v) - return v - else: - raise ValueError(('The first argument must be one or two dimensional ' - 'array. A %d-dimensional array is not ' - 'permitted') % len(s)) - - -def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False): - """ - Returns True if the variable D passed is a valid distance matrix. - Distance matrices must be 2-dimensional numpy arrays containing - doubles. They must have a zero-diagonal, and they must be symmetric. - - Parameters - ---------- - D : ndarray - The candidate object to test for validity. - tol : double - The distance matrix should be symmetric. tol is the maximum - difference between the :math:`ij`th entry and the - :math:`ji`th entry for the distance metric to be - considered symmetric. - throw : bool - An exception is thrown if the distance matrix passed is not - valid. - name : string - the name of the variable to checked. This is useful if - throw is set to ``True`` so the offending variable can be - identified in the exception message when an exception is - thrown. - warning : bool - Instead of throwing an exception, a warning message is - raised. - - Returns - ------- - Returns ``True`` if the variable ``D`` passed is a valid - distance matrix. Small numerical differences in ``D`` and - ``D.T`` and non-zeroness of the diagonal are ignored if they are - within the tolerance specified by ``tol``. - """ - D = np.asarray(D, order='c') - valid = True - try: - s = D.shape - if D.dtype != np.double: - if name: - raise TypeError(('Distance matrix \'%s\' must contain doubles ' - '(double).') % name) - else: - raise TypeError('Distance matrix must contain doubles ' - '(double).') - if len(D.shape) != 2: - if name: - raise ValueError(('Distance matrix \'%s\' must have shape=2 ' - '(i.e. be two-dimensional).') % name) - else: - raise ValueError('Distance matrix must have shape=2 (i.e. ' - 'be two-dimensional).') - if tol == 0.0: - if not (D == D.T).all(): - if name: - raise ValueError(('Distance matrix \'%s\' must be ' - 'symmetric.') % name) - else: - raise ValueError('Distance matrix must be symmetric.') - if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all(): - if name: - raise ValueError(('Distance matrix \'%s\' diagonal must ' - 'be zero.') % name) - else: - raise ValueError('Distance matrix diagonal must be zero.') - else: - if not (D - D.T <= tol).all(): - if name: - raise ValueError(('Distance matrix \'%s\' must be ' - 'symmetric within tolerance %d.') - % (name, tol)) - else: - raise ValueError('Distance matrix must be symmetric within' - ' tolerance %5.5f.' % tol) - if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all(): - if name: - raise ValueError(('Distance matrix \'%s\' diagonal must be' - ' close to zero within tolerance %5.5f.') - % (name, tol)) - else: - raise ValueError(('Distance matrix \'%s\' diagonal must be' - ' close to zero within tolerance %5.5f.') - % tol) - except Exception, e: - if throw: - raise - if warning: - warnings.warn(str(e)) - valid = False - return valid - - -def is_valid_y(y, warning=False, throw=False, name=None): - r""" - Returns ``True`` if the variable ``y`` passed is a valid condensed - distance matrix. Condensed distance matrices must be 1-dimensional - numpy arrays containing doubles. Their length must be a binomial - coefficient :math:`{n \choose 2}` for some positive integer n. - - - Parameters - ---------- - y : ndarray - The condensed distance matrix. - warning : bool, optional - Invokes a warning if the variable passed is not a valid - condensed distance matrix. The warning message explains why - the distance matrix is not valid. 'name' is used when - referencing the offending variable. - throws : throw, optional - Throws an exception if the variable passed is not a valid - condensed distance matrix. - name : bool, optional - Used when referencing the offending variable in the - warning or exception message. - - """ - y = np.asarray(y, order='c') - valid = True - try: - if type(y) != np.ndarray: - if name: - raise TypeError(('\'%s\' passed as a condensed distance ' - 'matrix is not a numpy array.') % name) - else: - raise TypeError('Variable is not a numpy array.') - if y.dtype != np.double: - if name: - raise TypeError(('Condensed distance matrix \'%s\' must ' - 'contain doubles (double).') % name) - else: - raise TypeError('Condensed distance matrix must contain ' - 'doubles (double).') - if len(y.shape) != 1: - if name: - raise ValueError(('Condensed distance matrix \'%s\' must ' - 'have shape=1 (i.e. be one-dimensional).') - % name) - else: - raise ValueError('Condensed distance matrix must have shape=1 ' - '(i.e. be one-dimensional).') - n = y.shape[0] - d = int(np.ceil(np.sqrt(n * 2))) - if (d * (d - 1) / 2) != n: - if name: - raise ValueError(('Length n of condensed distance matrix ' - '\'%s\' must be a binomial coefficient, i.e.' - 'there must be a k such that ' - '(k \choose 2)=n)!') % name) - else: - raise ValueError('Length n of condensed distance matrix must ' - 'be a binomial coefficient, i.e. there must ' - 'be a k such that (k \choose 2)=n)!') - except Exception, e: - if throw: - raise - if warning: - warnings.warn(str(e)) - valid = False - return valid - - -def num_obs_dm(d): - """ - Returns the number of original observations that correspond to a - square, redundant distance matrix ``D``. - - Parameters - ---------- - d : ndarray - The target distance matrix. - - Returns - ------- - numobs : int - The number of observations in the redundant distance matrix. - """ - d = np.asarray(d, order='c') - is_valid_dm(d, tol=np.inf, throw=True, name='d') - return d.shape[0] - - -def num_obs_y(Y): - """ - Returns the number of original observations that correspond to a - condensed distance matrix ``Y``. - - Parameters - ---------- - Y : ndarray - The number of original observations in the condensed - observation ``Y``. - - Returns - ------- - n : int - The number of observations in the condensed distance matrix - passed. - """ - Y = np.asarray(Y, order='c') - is_valid_y(Y, throw=True, name='Y') - k = Y.shape[0] - if k == 0: - raise ValueError("The number of observations cannot be determined on " - "an empty distance matrix.") - d = int(np.ceil(np.sqrt(k * 2))) - if (d * (d - 1) / 2) != k: - raise ValueError("Invalid condensed distance matrix passed. Must be " - "some k where k=(n choose 2) for some n >= 2.") - return d - - -def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None): - r""" - Computes distance between each pair of observation vectors in the - Cartesian product of two collections of vectors. ``XA`` is a - :math:`m_A` by :math:`n` array while ``XB`` is a :math:`m_B` by - :math:`n` array. A :math:`m_A` by :math:`m_B` array is - returned. An exception is thrown if ``XA`` and ``XB`` do not have - the same number of columns. - - A rectangular distance matrix ``Y`` is returned. For each :math:`i` - and :math:`j`, the metric ``dist(u=XA[i], v=XB[j])`` is computed - and stored in the :math:`ij` th entry. - - The following are common calling conventions: - - 1. ``Y = cdist(XA, XB, 'euclidean')`` - - Computes the distance between :math:`m` points using - Euclidean distance (2-norm) as the distance metric between the - points. The points are arranged as :math:`m` - :math:`n`-dimensional row vectors in the matrix X. - - 2. ``Y = cdist(XA, XB, 'minkowski', p)`` - - Computes the distances using the Minkowski distance - :math:`||u-v||_p` (:math:`p`-norm) where :math:`p \geq 1`. - - 3. ``Y = cdist(XA, XB, 'cityblock')`` - - Computes the city block or Manhattan distance between the - points. - - 4. ``Y = cdist(XA, XB, 'seuclidean', V=None)`` - - Computes the standardized Euclidean distance. The standardized - Euclidean distance between two n-vectors ``u`` and ``v`` is - - .. math:: - - \sqrt{\sum {(u_i-v_i)^2 / V[x_i]}}. - - V is the variance vector; V[i] is the variance computed over all - the i'th components of the points. If not passed, it is - automatically computed. - - 5. ``Y = cdist(XA, XB, 'sqeuclidean')`` - - Computes the squared Euclidean distance :math:`||u-v||_2^2` between - the vectors. - - 6. ``Y = cdist(XA, XB, 'cosine')`` - - Computes the cosine distance between vectors u and v, - - .. math:: - - \frac{1 - uv^T} - {{|u|}_2 {|v|}_2} - - where :math:`|*|_2` is the 2-norm of its argument *. - - 7. ``Y = cdist(XA, XB, 'correlation')`` - - Computes the correlation distance between vectors u and v. This is - - .. math:: - - \frac{1 - (u - n{|u|}_1){(v - n{|v|}_1)}^T} - {{|(u - n{|u|}_1)|}_2 {|(v - n{|v|}_1)|}^T} - - where :math:`|*|_1` is the Manhattan (or 1-norm) of its - argument, and :math:`n` is the common dimensionality of the - vectors. - - 8. ``Y = cdist(XA, XB, 'hamming')`` - - Computes the normalized Hamming distance, or the proportion of - those vector elements between two n-vectors ``u`` and ``v`` - which disagree. To save memory, the matrix ``X`` can be of type - boolean. - - 9. ``Y = cdist(XA, XB, 'jaccard')`` - - Computes the Jaccard distance between the points. Given two - vectors, ``u`` and ``v``, the Jaccard distance is the - proportion of those elements ``u[i]`` and ``v[i]`` that - disagree where at least one of them is non-zero. - - 10. ``Y = cdist(XA, XB, 'chebyshev')`` - - Computes the Chebyshev distance between the points. The - Chebyshev distance between two n-vectors ``u`` and ``v`` is the - maximum norm-1 distance between their respective elements. More - precisely, the distance is given by - - .. math:: - - d(u,v) = \max_i {|u_i-v_i|}. - - 11. ``Y = cdist(XA, XB, 'canberra')`` - - Computes the Canberra distance between the points. The - Canberra distance between two points ``u`` and ``v`` is - - .. math:: - - d(u,v) = \sum_u \frac{|u_i-v_i|} - {(|u_i|+|v_i|)} - - - 12. ``Y = cdist(XA, XB, 'braycurtis')`` - - Computes the Bray-Curtis distance between the points. The - Bray-Curtis distance between two points ``u`` and ``v`` is - - - .. math:: - - d(u,v) = \frac{\sum_i (u_i-v_i)} - {\sum_i (u_i+v_i)} - - 13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)`` - - Computes the Mahalanobis distance between the points. The - Mahalanobis distance between two points ``u`` and ``v`` is - :math:`(u-v)(1/V)(u-v)^T` where :math:`(1/V)` (the ``VI`` - variable) is the inverse covariance. If ``VI`` is not None, - ``VI`` will be used as the inverse covariance matrix. - - 14. ``Y = cdist(XA, XB, 'yule')`` - - Computes the Yule distance between the boolean - vectors. (see yule function documentation) - - 15. ``Y = cdist(XA, XB, 'matching')`` - - Computes the matching distance between the boolean - vectors. (see matching function documentation) - - 16. ``Y = cdist(XA, XB, 'dice')`` - - Computes the Dice distance between the boolean vectors. (see - dice function documentation) - - 17. ``Y = cdist(XA, XB, 'kulsinski')`` - - Computes the Kulsinski distance between the boolean - vectors. (see kulsinski function documentation) - - 18. ``Y = cdist(XA, XB, 'rogerstanimoto')`` - - Computes the Rogers-Tanimoto distance between the boolean - vectors. (see rogerstanimoto function documentation) - - 19. ``Y = cdist(XA, XB, 'russellrao')`` - - Computes the Russell-Rao distance between the boolean - vectors. (see russellrao function documentation) - - 20. ``Y = cdist(XA, XB, 'sokalmichener')`` - - Computes the Sokal-Michener distance between the boolean - vectors. (see sokalmichener function documentation) - - 21. ``Y = cdist(XA, XB, 'sokalsneath')`` - - Computes the Sokal-Sneath distance between the vectors. (see - sokalsneath function documentation) - - - 22. ``Y = cdist(XA, XB, 'wminkowski')`` - - Computes the weighted Minkowski distance between the - vectors. (see sokalsneath function documentation) - - 23. ``Y = cdist(XA, XB, f)`` - - Computes the distance between all pairs of vectors in X - using the user supplied 2-arity function f. For example, - Euclidean distance between the vectors could be computed - as follows:: - - dm = cdist(XA, XB, (lambda u, v: np.sqrt(((u-v)*(u-v).T).sum()))) - - Note that you should avoid passing a reference to one of - the distance functions defined in this library. For example,:: - - dm = cdist(XA, XB, sokalsneath) - - would calculate the pair-wise distances between the vectors in - X using the Python function sokalsneath. This would result in - sokalsneath being called :math:`{n \choose 2}` times, which - is inefficient. Instead, the optimized C version is more - efficient, and we call it using the following syntax.:: - - dm = cdist(XA, XB, 'sokalsneath') - - Parameters - ---------- - XA : ndarray - An :math:`m_A` by :math:`n` array of :math:`m_A` - original observations in an :math:`n`-dimensional space. - XB : ndarray - An :math:`m_B` by :math:`n` array of :math:`m_B` - original observations in an :math:`n`-dimensional space. - metric : string or function - The distance metric to use. The distance function can - be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', - 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', - 'jaccard', 'kulsinski', 'mahalanobis', 'matching', - 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', - 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', - 'yule'. - w : ndarray - The weight vector (for weighted Minkowski). - p : double - The p-norm to apply (for Minkowski, weighted and unweighted) - V : ndarray - The variance vector (for standardized Euclidean). - VI : ndarray - The inverse of the covariance matrix (for Mahalanobis). - - - Returns - ------- - Y : ndarray - A :math:`m_A` by :math:`m_B` distance matrix. - """ - -# 21. Y = cdist(XA, XB, 'test_Y') -# -# Computes the distance between all pairs of vectors in X -# using the distance metric Y but with a more succint, -# verifiable, but less efficient implementation. - - XA = np.asarray(XA, order='c') - XB = np.asarray(XB, order='c') - - #if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double): - # raise TypeError('Floating point arrays must be 64-bit (got %r).' % - # (X.dtype.type,)) - - # The C code doesn't do striding. - [XA] = _copy_arrays_if_base_present([_convert_to_double(XA)]) - [XB] = _copy_arrays_if_base_present([_convert_to_double(XB)]) - - s = XA.shape - sB = XB.shape - - if len(s) != 2: - raise ValueError('XA must be a 2-dimensional array.') - if len(sB) != 2: - raise ValueError('XB must be a 2-dimensional array.') - if s[1] != sB[1]: - raise ValueError('XA and XB must have the same number of columns ' - '(i.e. feature dimension.)') - - mA = s[0] - mB = sB[0] - n = s[1] - dm = np.zeros((mA, mB), dtype=np.double) - - if callable(metric): - if metric == minkowski: - for i in xrange(0, mA): - for j in xrange(0, mB): - dm[i, j] = minkowski(XA[i, :], XB[j, :], p) - elif metric == wminkowski: - for i in xrange(0, mA): - for j in xrange(0, mB): - dm[i, j] = wminkowski(XA[i, :], XB[j, :], p, w) - elif metric == seuclidean: - for i in xrange(0, mA): - for j in xrange(0, mB): - dm[i, j] = seuclidean(XA[i, :], XB[j, :], V) - elif metric == mahalanobis: - for i in xrange(0, mA): - for j in xrange(0, mB): - dm[i, j] = mahalanobis(XA[i, :], XB[j, :], V) - else: - for i in xrange(0, mA): - for j in xrange(0, mB): - dm[i, j] = metric(XA[i, :], XB[j, :]) - elif isinstance(metric, basestring): - mstr = metric.lower() - - #if XA.dtype != np.double and \ - # (mstr != 'hamming' and mstr != 'jaccard'): - # TypeError('A double array must be passed.') - if mstr in set(['euclidean', 'euclid', 'eu', 'e']): - _distance_wrap.cdist_euclidean_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm) - elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']): - _distance_wrap.cdist_euclidean_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm) - dm **= 2.0 - elif mstr in set(['cityblock', 'cblock', 'cb', 'c']): - _distance_wrap.cdist_city_block_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm) - elif mstr in set(['hamming', 'hamm', 'ha', 'h']): - if XA.dtype == np.bool: - _distance_wrap.cdist_hamming_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), - dm) - else: - _distance_wrap.cdist_hamming_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm) - elif mstr in set(['jaccard', 'jacc', 'ja', 'j']): - if XA.dtype == np.bool: - _distance_wrap.cdist_jaccard_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), - dm) - else: - _distance_wrap.cdist_jaccard_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm) - elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']): - _distance_wrap.cdist_chebyshev_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm) - elif mstr in set(['minkowski', 'mi', 'm', 'pnorm']): - _distance_wrap.cdist_minkowski_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm, p) - elif mstr in set(['wminkowski', 'wmi', 'wm', 'wpnorm']): - _distance_wrap.cdist_weighted_minkowski_wrap(_convert_to_double(XA), - _convert_to_double(XB), - dm, p, - _convert_to_double(w)) - elif mstr in set(['seuclidean', 'se', 's']): - if V is not None: - V = np.asarray(V, order='c') - if type(V) != np.ndarray: - raise TypeError('Variance vector V must be a numpy array') - if V.dtype != np.double: - raise TypeError('Variance vector V must contain doubles.') - if len(V.shape) != 1: - raise ValueError('Variance vector V must be ' - 'one-dimensional.') - if V.shape[0] != n: - raise ValueError('Variance vector V must be of the same ' - 'dimension as the vectors on which the ' - 'distances are computed.') - # The C code doesn't do striding. - [VV] = _copy_arrays_if_base_present([_convert_to_double(V)]) - else: - X = np.vstack([XA, XB]) - VV = np.var(X, axis=0, ddof=1) - X = None - del X - _distance_wrap.cdist_seuclidean_wrap(_convert_to_double(XA), - _convert_to_double(XB), VV, dm) - # Need to test whether vectorized cosine works better. - # Find out: Is there a dot subtraction operator so I can - # subtract matrices in a similar way to multiplying them? - # Need to get rid of as much unnecessary C code as possible. - elif mstr in set(['cosine', 'cos']): - normsA = np.sqrt(np.sum(XA * XA, axis=1)) - normsB = np.sqrt(np.sum(XB * XB, axis=1)) - _distance_wrap.cdist_cosine_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm, - normsA, - normsB) - elif mstr in set(['correlation', 'co']): - XA2 = XA - XA.mean(1)[:, np.newaxis] - XB2 = XB - XB.mean(1)[:, np.newaxis] - #X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n) - normsA = np.sqrt(np.sum(XA2 * XA2, axis=1)) - normsB = np.sqrt(np.sum(XB2 * XB2, axis=1)) - _distance_wrap.cdist_cosine_wrap(_convert_to_double(XA2), - _convert_to_double(XB2), - _convert_to_double(dm), - _convert_to_double(normsA), - _convert_to_double(normsB)) - elif mstr in set(['mahalanobis', 'mahal', 'mah']): - if VI is not None: - VI = _convert_to_double(np.asarray(VI, order='c')) - if type(VI) != np.ndarray: - raise TypeError('VI must be a numpy array.') - if VI.dtype != np.double: - raise TypeError('The array must contain 64-bit floats.') - [VI] = _copy_arrays_if_base_present([VI]) - else: - X = np.vstack([XA, XB]) - V = np.cov(X.T) - X = None - del X - VI = _convert_to_double(np.linalg.inv(V).T.copy()) - # (u-v)V^(-1)(u-v)^T - _distance_wrap.cdist_mahalanobis_wrap(_convert_to_double(XA), - _convert_to_double(XB), - VI, dm) - elif mstr == 'canberra': - _distance_wrap.cdist_canberra_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm) - elif mstr == 'braycurtis': - _distance_wrap.cdist_bray_curtis_wrap(_convert_to_double(XA), - _convert_to_double(XB), dm) - elif mstr == 'yule': - _distance_wrap.cdist_yule_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), dm) - elif mstr == 'matching': - _distance_wrap.cdist_matching_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), dm) - elif mstr == 'kulsinski': - _distance_wrap.cdist_kulsinski_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), dm) - elif mstr == 'dice': - _distance_wrap.cdist_dice_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), dm) - elif mstr == 'rogerstanimoto': - _distance_wrap.cdist_rogerstanimoto_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), - dm) - elif mstr == 'russellrao': - _distance_wrap.cdist_russellrao_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), dm) - elif mstr == 'sokalmichener': - _distance_wrap.cdist_sokalmichener_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), - dm) - elif mstr == 'sokalsneath': - _distance_wrap.cdist_sokalsneath_bool_wrap(_convert_to_bool(XA), - _convert_to_bool(XB), - dm) - elif metric == 'test_euclidean': - dm = cdist(XA, XB, euclidean) - elif metric == 'test_seuclidean': - if V is None: - V = np.var(np.vstack([XA, XB]), axis=0, ddof=1) - else: - V = np.asarray(V, order='c') - dm = cdist(XA, XB, lambda u, v: seuclidean(u, v, V)) - elif metric == 'test_sqeuclidean': - dm = cdist(XA, XB, lambda u, v: sqeuclidean(u, v)) - elif metric == 'test_braycurtis': - dm = cdist(XA, XB, braycurtis) - elif metric == 'test_mahalanobis': - if VI is None: - X = np.vstack([XA, XB]) - V = np.cov(X.T) - VI = np.linalg.inv(V) - X = None - del X - else: - VI = np.asarray(VI, order='c') - [VI] = _copy_arrays_if_base_present([VI]) - # (u-v)V^(-1)(u-v)^T - dm = cdist(XA, XB, (lambda u, v: mahalanobis(u, v, VI))) - elif metric == 'test_canberra': - dm = cdist(XA, XB, canberra) - elif metric == 'test_cityblock': - dm = cdist(XA, XB, cityblock) - elif metric == 'test_minkowski': - dm = cdist(XA, XB, minkowski, p=p) - elif metric == 'test_wminkowski': - dm = cdist(XA, XB, wminkowski, p=p, w=w) - elif metric == 'test_cosine': - dm = cdist(XA, XB, cosine) - elif metric == 'test_correlation': - dm = cdist(XA, XB, correlation) - elif metric == 'test_hamming': - dm = cdist(XA, XB, hamming) - elif metric == 'test_jaccard': - dm = cdist(XA, XB, jaccard) - elif metric == 'test_chebyshev' or metric == 'test_chebychev': - dm = cdist(XA, XB, chebyshev) - elif metric == 'test_yule': - dm = cdist(XA, XB, yule) - elif metric == 'test_matching': - dm = cdist(XA, XB, matching) - elif metric == 'test_dice': - dm = cdist(XA, XB, dice) - elif metric == 'test_kulsinski': - dm = cdist(XA, XB, kulsinski) - elif metric == 'test_rogerstanimoto': - dm = cdist(XA, XB, rogerstanimoto) - elif metric == 'test_russellrao': - dm = cdist(XA, XB, russellrao) - elif metric == 'test_sokalsneath': - dm = cdist(XA, XB, sokalsneath) - elif metric == 'test_sokalmichener': - dm = cdist(XA, XB, sokalmichener) - else: - raise ValueError('Unknown Distance Metric: %s' % mstr) - else: - raise TypeError('2nd argument metric must be a string identifier ' - 'or a function.') - return dm diff --git a/scipy-0.10.1/scipy/spatial/generate_qhull.py b/scipy-0.10.1/scipy/spatial/generate_qhull.py deleted file mode 100755 index cc7aed736a..0000000000 --- a/scipy-0.10.1/scipy/spatial/generate_qhull.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -import tempfile -import subprocess -import os -import sys -import re -import shutil - -tmp_dir = tempfile.mkdtemp() -try: - # Run Cython - dst_fn = os.path.join(tmp_dir, 'qhull.c') - ret = subprocess.call(['cython', '-o', dst_fn, 'qhull.pyx']) - if ret != 0: - sys.exit(ret) - - # Strip comments - f = open(dst_fn, 'r') - text = f.read() - f.close() - - r = re.compile(r'/\*(.*?)\*/', re.S) - - text = r.sub('', text) - f = open('qhull.c', 'w') - f.write(text) - f.close() -finally: - shutil.rmtree(tmp_dir) diff --git a/scipy-0.10.1/scipy/spatial/kdtree.py b/scipy-0.10.1/scipy/spatial/kdtree.py deleted file mode 100644 index 7a553f89d2..0000000000 --- a/scipy-0.10.1/scipy/spatial/kdtree.py +++ /dev/null @@ -1,881 +0,0 @@ -# Copyright Anne M. Archibald 2008 -# Released under the scipy license -import sys -import numpy as np -from heapq import heappush, heappop -import scipy.sparse - -__all__ = ['minkowski_distance_p', 'minkowski_distance', - 'distance_matrix', - 'Rectangle', 'KDTree'] - - -def minkowski_distance_p(x, y, p=2): - """ - Compute the p-th power of the L**p distance between x and y. - - For efficiency, this function computes the L**p distance but does - not extract the pth root. If p is 1 or infinity, this is equal to - the actual L**p distance. - - Parameters - ---------- - x : array_like, M by K - - y : array_like, N by K - - p : float, 1 <= p <= infinity - Which Minkowski p-norm to use. - - Examples - -------- - >>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]]) - array([2, 1]) - - """ - x = np.asarray(x) - y = np.asarray(y) - if p == np.inf: - return np.amax(np.abs(y-x), axis=-1) - elif p == 1: - return np.sum(np.abs(y-x), axis=-1) - else: - return np.sum(np.abs(y-x)**p, axis=-1) - -def minkowski_distance(x, y, p=2): - """ - Compute the L**p distance between x and y. - - Parameters - ---------- - x : array_like, M by K - - y : array_like, N by K - - p : float, 1 <= p <= infinity - Which Minkowski p-norm to use. - - Examples - -------- - >>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]]) - array([ 1.41421356, 1. ]) - - """ - x = np.asarray(x) - y = np.asarray(y) - if p == np.inf or p == 1: - return minkowski_distance_p(x, y, p) - else: - return minkowski_distance_p(x, y, p)**(1./p) - -class Rectangle(object): - """Hyperrectangle class. - - Represents a Cartesian product of intervals. - """ - def __init__(self, maxes, mins): - """Construct a hyperrectangle.""" - self.maxes = np.maximum(maxes,mins).astype(np.float) - self.mins = np.minimum(maxes,mins).astype(np.float) - self.m, = self.maxes.shape - - def __repr__(self): - return "" % zip(self.mins, self.maxes) - - def volume(self): - """Total volume.""" - return np.prod(self.maxes-self.mins) - - def split(self, d, split): - """Produce two hyperrectangles by splitting along axis d. - - In general, if you need to compute maximum and minimum - distances to the children, it can be done more efficiently - by updating the maximum and minimum distances to the parent. - """ # FIXME: do this - mid = np.copy(self.maxes) - mid[d] = split - less = Rectangle(self.mins, mid) - mid = np.copy(self.mins) - mid[d] = split - greater = Rectangle(mid, self.maxes) - return less, greater - - def min_distance_point(self, x, p=2.): - """Compute the minimum distance between x and a point in the hyperrectangle.""" - return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p) - - def max_distance_point(self, x, p=2.): - """Compute the maximum distance between x and a point in the hyperrectangle.""" - return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p) - - def min_distance_rectangle(self, other, p=2.): - """Compute the minimum distance between points in the two hyperrectangles.""" - return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p) - - def max_distance_rectangle(self, other, p=2.): - """Compute the maximum distance between points in the two hyperrectangles.""" - return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p) - - -class KDTree(object): - """ - kd-tree for quick nearest-neighbor lookup - - This class provides an index into a set of k-dimensional points - which can be used to rapidly look up the nearest neighbors of any - point. - - The algorithm used is described in Maneewongvatana and Mount 1999. - The general idea is that the kd-tree is a binary tree, each of whose - nodes represents an axis-aligned hyperrectangle. Each node specifies - an axis and splits the set of points based on whether their coordinate - along that axis is greater than or less than a particular value. - - During construction, the axis and splitting point are chosen by the - "sliding midpoint" rule, which ensures that the cells do not all - become long and thin. - - The tree can be queried for the r closest neighbors of any given point - (optionally returning only those within some maximum distance of the - point). It can also be queried, with a substantial gain in efficiency, - for the r approximate closest neighbors. - - For large dimensions (20 is already large) do not expect this to run - significantly faster than brute force. High-dimensional nearest-neighbor - queries are a substantial open problem in computer science. - - The tree also supports all-neighbors queries, both with arrays of points - and with other kd-trees. These do use a reasonably efficient algorithm, - but the kd-tree is not necessarily the best data structure for this - sort of calculation. - - """ - def __init__(self, data, leafsize=10): - """Construct a kd-tree. - - Parameters - ---------- - data : array_like, shape (n,k) - The data points to be indexed. This array is not copied, and - so modifying this data will result in bogus results. - leafsize : positive int - The number of points at which the algorithm switches over to - brute-force. - """ - self.data = np.asarray(data) - self.n, self.m = np.shape(self.data) - self.leafsize = int(leafsize) - if self.leafsize<1: - raise ValueError("leafsize must be at least 1") - self.maxes = np.amax(self.data,axis=0) - self.mins = np.amin(self.data,axis=0) - - self.tree = self.__build(np.arange(self.n), self.maxes, self.mins) - - class node(object): - if sys.version_info[0] >= 3: - def __lt__(self, other): id(self) < id(other) - def __gt__(self, other): id(self) > id(other) - def __le__(self, other): id(self) <= id(other) - def __ge__(self, other): id(self) >= id(other) - def __eq__(self, other): id(self) == id(other) - - class leafnode(node): - def __init__(self, idx): - self.idx = idx - self.children = len(idx) - - class innernode(node): - def __init__(self, split_dim, split, less, greater): - self.split_dim = split_dim - self.split = split - self.less = less - self.greater = greater - self.children = less.children+greater.children - - def __build(self, idx, maxes, mins): - if len(idx)<=self.leafsize: - return KDTree.leafnode(idx) - else: - data = self.data[idx] - #maxes = np.amax(data,axis=0) - #mins = np.amin(data,axis=0) - d = np.argmax(maxes-mins) - maxval = maxes[d] - minval = mins[d] - if maxval==minval: - # all points are identical; warn user? - return KDTree.leafnode(idx) - data = data[:,d] - - # sliding midpoint rule; see Maneewongvatana and Mount 1999 - # for arguments that this is a good idea. - split = (maxval+minval)/2 - less_idx = np.nonzero(data<=split)[0] - greater_idx = np.nonzero(data>split)[0] - if len(less_idx)==0: - split = np.amin(data) - less_idx = np.nonzero(data<=split)[0] - greater_idx = np.nonzero(data>split)[0] - if len(greater_idx)==0: - split = np.amax(data) - less_idx = np.nonzero(data=split)[0] - if len(less_idx)==0: - # _still_ zero? all must have the same value - if not np.all(data==data[0]): - raise ValueError("Troublesome data array: %s" % data) - split = data[0] - less_idx = np.arange(len(data)-1) - greater_idx = np.array([len(data)-1]) - - lessmaxes = np.copy(maxes) - lessmaxes[d] = split - greatermins = np.copy(mins) - greatermins[d] = split - return KDTree.innernode(d, split, - self.__build(idx[less_idx],lessmaxes,mins), - self.__build(idx[greater_idx],maxes,greatermins)) - - def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf): - - side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x)) - if p!=np.inf: - side_distances**=p - min_distance = np.sum(side_distances) - else: - min_distance = np.amax(side_distances) - - # priority queue for chasing nodes - # entries are: - # minimum distance between the cell and the target - # distances between the nearest side of the cell and the target - # the head node of the cell - q = [(min_distance, - tuple(side_distances), - self.tree)] - # priority queue for the nearest neighbors - # furthest known neighbor first - # entries are (-distance**p, i) - neighbors = [] - - if eps==0: - epsfac=1 - elif p==np.inf: - epsfac = 1/(1+eps) - else: - epsfac = 1/(1+eps)**p - - if p!=np.inf and distance_upper_bound!=np.inf: - distance_upper_bound = distance_upper_bound**p - - while q: - min_distance, side_distances, node = heappop(q) - if isinstance(node, KDTree.leafnode): - # brute-force - data = self.data[node.idx] - ds = minkowski_distance_p(data,x[np.newaxis,:],p) - for i in range(len(ds)): - if ds[i]distance_upper_bound*epsfac: - # since this is the nearest cell, we're done, bail out - break - # compute minimum distances to the children and push them on - if x[node.split_dim]>> from scipy.spatial import KDTree - >>> x, y = np.mgrid[0:5, 2:8] - >>> tree = KDTree(zip(x.ravel(), y.ravel())) - >>> tree.data - array([[0, 2], - [0, 3], - [0, 4], - [0, 5], - [0, 6], - [0, 7], - [1, 2], - [1, 3], - [1, 4], - [1, 5], - [1, 6], - [1, 7], - [2, 2], - [2, 3], - [2, 4], - [2, 5], - [2, 6], - [2, 7], - [3, 2], - [3, 3], - [3, 4], - [3, 5], - [3, 6], - [3, 7], - [4, 2], - [4, 3], - [4, 4], - [4, 5], - [4, 6], - [4, 7]]) - >>> pts = np.array([[0, 0], [2.1, 2.9]]) - >>> tree.query(pts) - (array([ 2. , 0.14142136]), array([ 0, 13])) - - """ - x = np.asarray(x) - if np.shape(x)[-1] != self.m: - raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) - if p<1: - raise ValueError("Only p-norms with 1<=p<=infinity permitted") - retshape = np.shape(x)[:-1] - if retshape!=(): - if k is None: - dd = np.empty(retshape,dtype=np.object) - ii = np.empty(retshape,dtype=np.object) - elif k>1: - dd = np.empty(retshape+(k,),dtype=np.float) - dd.fill(np.inf) - ii = np.empty(retshape+(k,),dtype=np.int) - ii.fill(self.n) - elif k==1: - dd = np.empty(retshape,dtype=np.float) - dd.fill(np.inf) - ii = np.empty(retshape,dtype=np.int) - ii.fill(self.n) - else: - raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None") - for c in np.ndindex(retshape): - hits = self.__query(x[c], k=k, p=p, distance_upper_bound=distance_upper_bound) - if k is None: - dd[c] = [d for (d,i) in hits] - ii[c] = [i for (d,i) in hits] - elif k>1: - for j in range(len(hits)): - dd[c+(j,)], ii[c+(j,)] = hits[j] - elif k==1: - if len(hits)>0: - dd[c], ii[c] = hits[0] - else: - dd[c] = np.inf - ii[c] = self.n - return dd, ii - else: - hits = self.__query(x, k=k, p=p, distance_upper_bound=distance_upper_bound) - if k is None: - return [d for (d,i) in hits], [i for (d,i) in hits] - elif k==1: - if len(hits)>0: - return hits[0] - else: - return np.inf, self.n - elif k>1: - dd = np.empty(k,dtype=np.float) - dd.fill(np.inf) - ii = np.empty(k,dtype=np.int) - ii.fill(self.n) - for j in range(len(hits)): - dd[j], ii[j] = hits[j] - return dd, ii - else: - raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None") - - - def __query_ball_point(self, x, r, p=2., eps=0): - R = Rectangle(self.maxes, self.mins) - - def traverse_checking(node, rect): - if rect.min_distance_point(x, p) > r / (1. + eps): - return [] - elif rect.max_distance_point(x, p) < r * (1. + eps): - return traverse_no_checking(node) - elif isinstance(node, KDTree.leafnode): - d = self.data[node.idx] - return node.idx[minkowski_distance(d, x, p) <= r].tolist() - else: - less, greater = rect.split(node.split_dim, node.split) - return traverse_checking(node.less, less) + \ - traverse_checking(node.greater, greater) - - def traverse_no_checking(node): - if isinstance(node, KDTree.leafnode): - return node.idx.tolist() - else: - return traverse_no_checking(node.less) + \ - traverse_no_checking(node.greater) - - return traverse_checking(self.tree, R) - - def query_ball_point(self, x, r, p=2., eps=0): - """Find all points within distance r of point(s) x. - - Parameters - ---------- - x : array_like, shape tuple + (self.m,) - The point or points to search for neighbors of. - r : positive float - The radius of points to return. - p : float, optional - Which Minkowski p-norm to use. Should be in the range [1, inf]. - eps : nonnegative float, optional - Approximate search. Branches of the tree are not explored if their - nearest points are further than ``r / (1 + eps)``, and branches are - added in bulk if their furthest points are nearer than - ``r * (1 + eps)``. - - Returns - ------- - results : list or array of lists - If `x` is a single point, returns a list of the indices of the - neighbors of `x`. If `x` is an array of points, returns an object - array of shape tuple containing lists of neighbors. - - Notes - ----- - If you have many points whose neighbors you want to find, you may save - substantial amounts of time by putting them in a KDTree and using - query_ball_tree. - - Examples - -------- - >>> from scipy import spatial - >>> x, y = np.mgrid[0:4, 0:4] - >>> points = zip(x.ravel(), y.ravel()) - >>> tree = spatial.KDTree(points) - >>> tree.query_ball_point([2, 0], 1) - [4, 8, 9, 12] - - """ - x = np.asarray(x) - if x.shape[-1] != self.m: - raise ValueError("Searching for a %d-dimensional point in a " \ - "%d-dimensional KDTree" % (x.shape[-1], self.m)) - if len(x.shape) == 1: - return self.__query_ball_point(x, r, p, eps) - else: - retshape = x.shape[:-1] - result = np.empty(retshape, dtype=np.object) - for c in np.ndindex(retshape): - result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps) - return result - - def query_ball_tree(self, other, r, p=2., eps=0): - """Find all pairs of points whose distance is at most r - - Parameters - ========== - - other : KDTree - The tree containing points to search against - r : positive float - The maximum distance - p : float 1<=p<=infinity - Which Minkowski norm to use - eps : nonnegative float - Approximate search. Branches of the tree are not explored - if their nearest points are further than r/(1+eps), and branches - are added in bulk if their furthest points are nearer than r*(1+eps). - - Returns - ======= - - results : list of lists - For each element self.data[i] of this tree, results[i] is a list of the - indices of its neighbors in other.data. - """ - results = [[] for i in range(self.n)] - def traverse_checking(node1, rect1, node2, rect2): - if rect1.min_distance_rectangle(rect2, p)>r/(1.+eps): - return - elif rect1.max_distance_rectangle(rect2, p)r/(1.+eps): - return - elif rect1.max_distance_rectangle(rect2, p)max_r - result[idx[c_greater]] += node1.children*node2.children - idx = idx[(min_r<=r[idx]) & (r[idx]<=max_r)] - if len(idx)==0: - return - - if isinstance(node1,KDTree.leafnode): - if isinstance(node2,KDTree.leafnode): - ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:], - other.data[node2.idx][np.newaxis,:,:], - p).ravel() - ds.sort() - result[idx] += np.searchsorted(ds,r[idx],side='right') - else: - less, greater = rect2.split(node2.split_dim, node2.split) - traverse(node1, rect1, node2.less, less, idx) - traverse(node1, rect1, node2.greater, greater, idx) - else: - if isinstance(node2,KDTree.leafnode): - less, greater = rect1.split(node1.split_dim, node1.split) - traverse(node1.less, less, node2, rect2, idx) - traverse(node1.greater, greater, node2, rect2, idx) - else: - less1, greater1 = rect1.split(node1.split_dim, node1.split) - less2, greater2 = rect2.split(node2.split_dim, node2.split) - traverse(node1.less,less1,node2.less,less2,idx) - traverse(node1.less,less1,node2.greater,greater2,idx) - traverse(node1.greater,greater1,node2.less,less2,idx) - traverse(node1.greater,greater1,node2.greater,greater2,idx) - R1 = Rectangle(self.maxes, self.mins) - R2 = Rectangle(other.maxes, other.mins) - if np.shape(r) == (): - r = np.array([r]) - result = np.zeros(1,dtype=int) - traverse(self.tree, R1, other.tree, R2, np.arange(1)) - return result[0] - elif len(np.shape(r))==1: - r = np.asarray(r) - n, = r.shape - result = np.zeros(n,dtype=int) - traverse(self.tree, R1, other.tree, R2, np.arange(n)) - return result - else: - raise ValueError("r must be either a single value or a one-dimensional array of values") - - def sparse_distance_matrix(self, other, max_distance, p=2.): - """Compute a sparse distance matrix - - Computes a distance matrix between two KDTrees, leaving as zero - any distance greater than max_distance. - - Parameters - ========== - - other : KDTree - - max_distance : positive float - - Returns - ======= - - result : dok_matrix - Sparse matrix representing the results in "dictionary of keys" format. - """ - result = scipy.sparse.dok_matrix((self.n,other.n)) - - def traverse(node1, rect1, node2, rect2): - if rect1.min_distance_rectangle(rect2, p)>max_distance: - return - elif isinstance(node1, KDTree.leafnode): - if isinstance(node2, KDTree.leafnode): - for i in node1.idx: - for j in node2.idx: - d = minkowski_distance(self.data[i],other.data[j],p) - if d<=max_distance: - result[i,j] = d - else: - less, greater = rect2.split(node2.split_dim, node2.split) - traverse(node1,rect1,node2.less,less) - traverse(node1,rect1,node2.greater,greater) - elif isinstance(node2, KDTree.leafnode): - less, greater = rect1.split(node1.split_dim, node1.split) - traverse(node1.less,less,node2,rect2) - traverse(node1.greater,greater,node2,rect2) - else: - less1, greater1 = rect1.split(node1.split_dim, node1.split) - less2, greater2 = rect2.split(node2.split_dim, node2.split) - traverse(node1.less,less1,node2.less,less2) - traverse(node1.less,less1,node2.greater,greater2) - traverse(node1.greater,greater1,node2.less,less2) - traverse(node1.greater,greater1,node2.greater,greater2) - traverse(self.tree, Rectangle(self.maxes, self.mins), - other.tree, Rectangle(other.maxes, other.mins)) - - return result - - -def distance_matrix(x,y,p=2,threshold=1000000): - """ - Compute the distance matrix. - - Returns the matrix of all pair-wise distances. - - Parameters - ---------- - x : array_like, `M` by `K` - TODO: description needed - y : array_like, `N` by `K` - TODO: description needed - p : float, 1 <= p <= infinity - Which Minkowski p-norm to use. - threshold : positive integer - If `M * N * K` > threshold, use a Python loop instead of creating - a very large temporary [what? array?]. - - Returns - ------- - result : array_like, `M` by `N` - - Examples - -------- - >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]]) - array([[ 1. , 1.41421356], - [ 1.41421356, 1. ]]) - - """ - - x = np.asarray(x) - m, k = x.shape - y = np.asarray(y) - n, kk = y.shape - - if k != kk: - raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk)) - - if m*n*k <= threshold: - return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p) - else: - result = np.empty((m,n),dtype=np.float) #FIXME: figure out the best dtype - if m -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__spatial__qhull -#define __PYX_HAVE_API__scipy__spatial__qhull -#include "numpy/ndarrayobject.h" -#include "stdio.h" -#include "stdlib.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#include "math.h" -#include "qhull/src/qset.h" -#include "qhull/src/qhull.h" -#include "qhull_blas.h" -#ifdef _OPENMP -#include -#endif - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - - -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - - -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - - - - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else - #define likely(x) (x) - #define unlikely(x) (x) - #endif -#else - #define likely(x) (x) - #define unlikely(x) (x) -#endif - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - -static const char *__pyx_f[] = { - "qhull.pyx", - "numpy.pxd", -}; - - -typedef npy_int8 __pyx_t_5numpy_int8_t; - - -typedef npy_int16 __pyx_t_5numpy_int16_t; - - -typedef npy_int32 __pyx_t_5numpy_int32_t; - - -typedef npy_int64 __pyx_t_5numpy_int64_t; - - -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - - -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - - -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - - -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - - -typedef npy_float32 __pyx_t_5numpy_float32_t; - - -typedef npy_float64 __pyx_t_5numpy_float64_t; - - -typedef npy_long __pyx_t_5numpy_int_t; - - -typedef npy_longlong __pyx_t_5numpy_long_t; - - -typedef npy_longlong __pyx_t_5numpy_longlong_t; - - -typedef npy_ulong __pyx_t_5numpy_uint_t; - - -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - - -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - - -typedef npy_intp __pyx_t_5numpy_intp_t; - - -typedef npy_uintp __pyx_t_5numpy_uintp_t; - - -typedef npy_double __pyx_t_5numpy_float_t; - - -typedef npy_double __pyx_t_5numpy_double_t; - - -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif - - -struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D; - - -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - - -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - - -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - - -typedef npy_cdouble __pyx_t_5numpy_complex_t; -struct __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t; -typedef struct __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t; -struct __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t; -typedef struct __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t; - - -struct __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t { - int ndim; - int npoints; - int nsimplex; - double *points; - int *vertices; - int *neighbors; - double *equations; - double *transform; - int *vertex_to_simplex; - double paraboloid_scale; - double paraboloid_shift; - double *max_bound; - double *min_bound; -}; - - -struct __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t { - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *info; - int index; - int vertex; - int vertex2; - int triangle; - int start_triangle; - int start_index; - int restart; -}; - - -struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D { - PyObject_HEAD - __pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t it; - PyObject *delaunay; - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t info; -}; - - - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact); - - -struct __Pyx_StructField_; - -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - char typegroup; -} __Pyx_TypeInfo; - -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; - -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; - - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -static void __Pyx_RaiseBufferFallbackError(void); - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); -#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) -#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) -#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2) - -static CYTHON_INLINE long __Pyx_div_long(long, long); - - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} - - -#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_List_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Tuple_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - - -#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { - PyObject *r; - if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) { - r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) { - r = PySequence_GetItem(o, i); - } - else { - r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); - } - return r; -} - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); -static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else -#define __Pyx_GetBuffer PyObject_GetBuffer -#define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - -Py_ssize_t __Pyx_zeros[] = {0, 0, 0}; -Py_ssize_t __Pyx_minusones[] = {-1, -1, -1}; - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); - -static PyObject *__Pyx_FindPy2Metaclass(PyObject *bases); - -static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, - PyObject *modname); - -#define __pyx_binding_PyCFunctionType_USED 1 - -typedef struct { - PyCFunctionObject func; -} __pyx_binding_PyCFunctionType_object; - -static PyTypeObject __pyx_binding_PyCFunctionType_type; -static PyTypeObject *__pyx_binding_PyCFunctionType = NULL; - -static PyObject *__pyx_binding_PyCFunctionType_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module); -#define __pyx_binding_PyCFunctionType_New(ml, self) __pyx_binding_PyCFunctionType_NewEx(ml, self, NULL) - -static int __pyx_binding_PyCFunctionType_init(void); - -#ifndef __PYX_FORCE_INIT_THREADS - #if PY_VERSION_HEX < 0x02040200 - #define __PYX_FORCE_INIT_THREADS 1 - #else - #define __PYX_FORCE_INIT_THREADS 0 - #endif -#endif - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_flagT(flagT); - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eqf(a, b) ((a)==(b)) - #define __Pyx_c_sumf(a, b) ((a)+(b)) - #define __Pyx_c_difff(a, b) ((a)-(b)) - #define __Pyx_c_prodf(a, b) ((a)*(b)) - #define __Pyx_c_quotf(a, b) ((a)/(b)) - #define __Pyx_c_negf(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zerof(z) ((z)==(float)0) - #define __Pyx_c_conjf(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_absf(z) (::std::abs(z)) - #define __Pyx_c_powf(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zerof(z) ((z)==0) - #define __Pyx_c_conjf(z) (conjf(z)) - #if 1 - #define __Pyx_c_absf(z) (cabsf(z)) - #define __Pyx_c_powf(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq(a, b) ((a)==(b)) - #define __Pyx_c_sum(a, b) ((a)+(b)) - #define __Pyx_c_diff(a, b) ((a)-(b)) - #define __Pyx_c_prod(a, b) ((a)*(b)) - #define __Pyx_c_quot(a, b) ((a)/(b)) - #define __Pyx_c_neg(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero(z) ((z)==(double)0) - #define __Pyx_c_conj(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs(z) (::std::abs(z)) - #define __Pyx_c_pow(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero(z) ((z)==0) - #define __Pyx_c_conj(z) (conj(z)) - #if 1 - #define __Pyx_c_abs(z) (cabs(z)) - #define __Pyx_c_pow(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename); - -static int __Pyx_check_binary_version(void); - -static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); - -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); - -static PyObject *__Pyx_ImportModule(const char *name); - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - - - - - - - - - - - - - - -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); - - - - - - -static PyTypeObject *__pyx_ptype_5scipy_7spatial_5qhull_RidgeIter2D = 0; -static void __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, PyObject *, int, int); -static int __pyx_f_5scipy_7spatial_5qhull__barycentric_inside(int, double *, double *, double *, double); -static void __pyx_f_5scipy_7spatial_5qhull__barycentric_coordinate_single(int, double *, double *, double *, int); -static void __pyx_f_5scipy_7spatial_5qhull__barycentric_coordinates(int, double *, double *, double *); -static void __pyx_f_5scipy_7spatial_5qhull__lift_point(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *); -static double __pyx_f_5scipy_7spatial_5qhull__distplane(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int, double *); -static int __pyx_f_5scipy_7spatial_5qhull__is_point_fully_outside(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double); -static int __pyx_f_5scipy_7spatial_5qhull__find_simplex_bruteforce(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, double); -static int __pyx_f_5scipy_7spatial_5qhull__find_simplex_directed(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, int *, double); -static int __pyx_f_5scipy_7spatial_5qhull__find_simplex(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, int *, double); -static void __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_init(__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *, __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int); -static void __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_next(__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *); -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_double_t = { "double_t", NULL, sizeof(__pyx_t_5numpy_double_t), 'R' }; -static __Pyx_TypeInfo __Pyx_TypeInfo_nn_npy_int = { "npy_int", NULL, sizeof(npy_int), 'I' }; -#define __Pyx_MODULE_NAME "scipy.spatial.qhull" -int __pyx_module_is_main_scipy__spatial__qhull = 0; - - -static PyObject *__pyx_builtin_object; -static PyObject *__pyx_builtin_property; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_RuntimeError; -static PyObject *__pyx_builtin_xrange; -static PyObject *__pyx_builtin_StopIteration; -static PyObject *__pyx_builtin_range; -static char __pyx_k_1[] = "qhull d Qz Qbb Qt"; -static char __pyx_k_2[] = "No points to triangulate"; -static char __pyx_k_4[] = "Need at least 2-D data to triangulate"; -static char __pyx_k_6[] = "Qhull error"; -static char __pyx_k_8[] = "_qhull_get_facet_array"; -static char __pyx_k_9[] = "qhull: did not free %d bytes (%d pieces)"; -static char __pyx_k_11[] = "non-simplical facet encountered"; -static char __pyx_k_13[] = "RidgeIter2D supports only 2-D"; -static char __pyx_k_15[] = "_get_barycentric_transforms"; -static char __pyx_k_18[] = "wrong dimensionality in xi"; -static char __pyx_k_20[] = "xi has different dimensionality than triangulation"; -static char __pyx_k_27[] = "ndarray is not C contiguous"; -static char __pyx_k_29[] = "ndarray is not Fortran contiguous"; -static char __pyx_k_31[] = "Non-native byte order not supported"; -static char __pyx_k_33[] = "unknown dtype code in numpy.pxd (%d)"; -static char __pyx_k_34[] = "Format string allocated too short, see comment in numpy.pxd"; -static char __pyx_k_37[] = "Format string allocated too short."; -static char __pyx_k_39[] = "\nWrappers for Qhull triangulation, plus some additional N-D geometry utilities\n\n.. versionadded:: 0.9\n\n"; -static char __pyx_k_40[] = "scipy.spatial.qhull"; -static char __pyx_k_41[] = "\n Delaunay(points)\n\n Delaunay tesselation in N dimensions\n\n .. versionadded:: 0.9\n\n Parameters\n ----------\n points : ndarray of floats, shape (npoints, ndim)\n Coordinates of points to triangulate\n\n Attributes\n ----------\n points : ndarray of double, shape (npoints, ndim)\n Points in the triangulation\n vertices : ndarray of ints, shape (nsimplex, ndim+1)\n Indices of vertices forming simplices in the triangulation\n neighbors : ndarray of ints, shape (nsimplex, ndim+1)\n Indices of neighbor simplices for each simplex.\n The kth neighbor is opposite to the kth vertex.\n For simplices at the boundary, -1 denotes no neighbor.\n equations : ndarray of double, shape (nsimplex, ndim+2)\n [normal, offset] forming the hyperplane equation of the facet\n on the paraboloid. (See [Qhull]_ documentation for more.)\n paraboloid_scale, paraboloid_shift : float\n Scale and shift for the extra paraboloid dimension.\n (See [Qhull]_ documentation for more.)\n transform : ndarray of double, shape (nsimplex, ndim+1, ndim)\n Affine transform from ``x`` to the barycentric coordinates ``c``.\n This is defined by::\n\n T c = x - r\n\n At vertex ``j``, ``c_j = 1`` and the other coordinates zero.\n\n For simplex ``i``, ``transform[i,:ndim,:ndim]`` contains\n inverse of the matrix ``T``, and ``transform[i,ndim,:]``\n contains the vector ``r``.\n vertex_to_simplex : ndarray of int, shape (npoints,)\n Lookup array, from a vertex, to some simplex which it is a part of.\n convex_hull : ndarray of int, shape (nfaces, ndim)\n Vertices of facets forming the convex hull of the point set.\n The array contains the indices of the points belonging to\n the (N-1)-dimensional facets that form the convex hull\n of the triangulation.\n\n Notes\n -----\n The tesselation is computed usi""ng the Qhull libary [Qhull]_.\n\n References\n ----------\n\n .. [Qhull] http://www.qhull.org/\n\n "; -static char __pyx_k__B[] = "B"; -static char __pyx_k__H[] = "H"; -static char __pyx_k__I[] = "I"; -static char __pyx_k__L[] = "L"; -static char __pyx_k__O[] = "O"; -static char __pyx_k__Q[] = "Q"; -static char __pyx_k__b[] = "b"; -static char __pyx_k__d[] = "d"; -static char __pyx_k__f[] = "f"; -static char __pyx_k__g[] = "g"; -static char __pyx_k__h[] = "h"; -static char __pyx_k__i[] = "i"; -static char __pyx_k__l[] = "l"; -static char __pyx_k__q[] = "q"; -static char __pyx_k__x[] = "x"; -static char __pyx_k__Zd[] = "Zd"; -static char __pyx_k__Zf[] = "Zf"; -static char __pyx_k__Zg[] = "Zg"; -static char __pyx_k__np[] = "np"; -static char __pyx_k__xi[] = "xi"; -static char __pyx_k__eps[] = "eps"; -static char __pyx_k__max[] = "max"; -static char __pyx_k__min[] = "min"; -static char __pyx_k__nan[] = "nan"; -static char __pyx_k__sum[] = "sum"; -static char __pyx_k__tri[] = "tri"; -static char __pyx_k__Lock[] = "Lock"; -static char __pyx_k__axis[] = "axis"; -static char __pyx_k__fill[] = "fill"; -static char __pyx_k__intc[] = "intc"; -static char __pyx_k__ndim[] = "ndim"; -static char __pyx_k__prod[] = "prod"; -static char __pyx_k__self[] = "self"; -static char __pyx_k__dtype[] = "dtype"; -static char __pyx_k__empty[] = "empty"; -static char __pyx_k__finfo[] = "finfo"; -static char __pyx_k__numpy[] = "numpy"; -static char __pyx_k__range[] = "range"; -static char __pyx_k__shape[] = "shape"; -static char __pyx_k__zeros[] = "zeros"; -static char __pyx_k__astype[] = "astype"; -static char __pyx_k__double[] = "double"; -static char __pyx_k__object[] = "object"; -static char __pyx_k__points[] = "points"; -static char __pyx_k__resize[] = "resize"; -static char __pyx_k__xrange[] = "xrange"; -static char __pyx_k____all__[] = "__all__"; -static char __pyx_k__acquire[] = "acquire"; -static char __pyx_k__ivertex[] = "ivertex"; -static char __pyx_k__npoints[] = "npoints"; -static char __pyx_k__release[] = "release"; -static char __pyx_k__reshape[] = "reshape"; -static char __pyx_k__tsearch[] = "tsearch"; -static char __pyx_k__Delaunay[] = "Delaunay"; -static char __pyx_k____init__[] = "__init__"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k__delaunay[] = "delaunay"; -static char __pyx_k__nsimplex[] = "nsimplex"; -static char __pyx_k__property[] = "property"; -static char __pyx_k__vertices[] = "vertices"; -static char __pyx_k__equations[] = "equations"; -static char __pyx_k__max_bound[] = "max_bound"; -static char __pyx_k__min_bound[] = "min_bound"; -static char __pyx_k__neighbors[] = "neighbors"; -static char __pyx_k__numpoints[] = "numpoints"; -static char __pyx_k__threading[] = "threading"; -static char __pyx_k__transform[] = "transform"; -static char __pyx_k__ValueError[] = "ValueError"; -static char __pyx_k___transform[] = "_transform"; -static char __pyx_k__asanyarray[] = "asanyarray"; -static char __pyx_k__bruteforce[] = "bruteforce"; -static char __pyx_k___qhull_lock[] = "_qhull_lock"; -static char __pyx_k__convex_hull[] = "convex_hull"; -static char __pyx_k__lift_points[] = "lift_points"; -static char __pyx_k__RuntimeError[] = "RuntimeError"; -static char __pyx_k__find_simplex[] = "find_simplex"; -static char __pyx_k__StopIteration[] = "StopIteration"; -static char __pyx_k__plane_distance[] = "plane_distance"; -static char __pyx_k__paraboloid_scale[] = "paraboloid_scale"; -static char __pyx_k__paraboloid_shift[] = "paraboloid_shift"; -static char __pyx_k__ascontiguousarray[] = "ascontiguousarray"; -static char __pyx_k__vertex_to_simplex[] = "vertex_to_simplex"; -static char __pyx_k___vertex_to_simplex[] = "_vertex_to_simplex"; -static char __pyx_k___construct_delaunay[] = "_construct_delaunay"; -static PyObject *__pyx_kp_s_11; -static PyObject *__pyx_kp_s_13; -static PyObject *__pyx_n_s_15; -static PyObject *__pyx_kp_s_18; -static PyObject *__pyx_kp_s_2; -static PyObject *__pyx_kp_s_20; -static PyObject *__pyx_kp_u_27; -static PyObject *__pyx_kp_u_29; -static PyObject *__pyx_kp_u_31; -static PyObject *__pyx_kp_u_33; -static PyObject *__pyx_kp_u_34; -static PyObject *__pyx_kp_u_37; -static PyObject *__pyx_kp_s_4; -static PyObject *__pyx_n_s_40; -static PyObject *__pyx_kp_s_41; -static PyObject *__pyx_kp_s_6; -static PyObject *__pyx_n_s_8; -static PyObject *__pyx_kp_s_9; -static PyObject *__pyx_n_s__Delaunay; -static PyObject *__pyx_n_s__Lock; -static PyObject *__pyx_n_s__RuntimeError; -static PyObject *__pyx_n_s__StopIteration; -static PyObject *__pyx_n_s__ValueError; -static PyObject *__pyx_n_s____all__; -static PyObject *__pyx_n_s____init__; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s___construct_delaunay; -static PyObject *__pyx_n_s___qhull_lock; -static PyObject *__pyx_n_s___transform; -static PyObject *__pyx_n_s___vertex_to_simplex; -static PyObject *__pyx_n_s__acquire; -static PyObject *__pyx_n_s__asanyarray; -static PyObject *__pyx_n_s__ascontiguousarray; -static PyObject *__pyx_n_s__astype; -static PyObject *__pyx_n_s__axis; -static PyObject *__pyx_n_s__bruteforce; -static PyObject *__pyx_n_s__convex_hull; -static PyObject *__pyx_n_s__delaunay; -static PyObject *__pyx_n_s__double; -static PyObject *__pyx_n_s__dtype; -static PyObject *__pyx_n_s__empty; -static PyObject *__pyx_n_s__eps; -static PyObject *__pyx_n_s__equations; -static PyObject *__pyx_n_s__fill; -static PyObject *__pyx_n_s__find_simplex; -static PyObject *__pyx_n_s__finfo; -static PyObject *__pyx_n_s__intc; -static PyObject *__pyx_n_s__ivertex; -static PyObject *__pyx_n_s__lift_points; -static PyObject *__pyx_n_s__max; -static PyObject *__pyx_n_s__max_bound; -static PyObject *__pyx_n_s__min; -static PyObject *__pyx_n_s__min_bound; -static PyObject *__pyx_n_s__nan; -static PyObject *__pyx_n_s__ndim; -static PyObject *__pyx_n_s__neighbors; -static PyObject *__pyx_n_s__np; -static PyObject *__pyx_n_s__npoints; -static PyObject *__pyx_n_s__nsimplex; -static PyObject *__pyx_n_s__numpoints; -static PyObject *__pyx_n_s__numpy; -static PyObject *__pyx_n_s__object; -static PyObject *__pyx_n_s__paraboloid_scale; -static PyObject *__pyx_n_s__paraboloid_shift; -static PyObject *__pyx_n_s__plane_distance; -static PyObject *__pyx_n_s__points; -static PyObject *__pyx_n_s__prod; -static PyObject *__pyx_n_s__property; -static PyObject *__pyx_n_s__range; -static PyObject *__pyx_n_s__release; -static PyObject *__pyx_n_s__reshape; -static PyObject *__pyx_n_s__resize; -static PyObject *__pyx_n_s__self; -static PyObject *__pyx_n_s__shape; -static PyObject *__pyx_n_s__sum; -static PyObject *__pyx_n_s__threading; -static PyObject *__pyx_n_s__transform; -static PyObject *__pyx_n_s__tri; -static PyObject *__pyx_n_s__tsearch; -static PyObject *__pyx_n_s__vertex_to_simplex; -static PyObject *__pyx_n_s__vertices; -static PyObject *__pyx_n_s__x; -static PyObject *__pyx_n_s__xi; -static PyObject *__pyx_n_s__xrange; -static PyObject *__pyx_n_s__zeros; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_2; -static PyObject *__pyx_int_neg_1; -static PyObject *__pyx_int_10; -static PyObject *__pyx_int_15; -static PyObject *__pyx_k_17; -static PyObject *__pyx_k_tuple_3; -static PyObject *__pyx_k_tuple_5; -static PyObject *__pyx_k_tuple_7; -static PyObject *__pyx_k_slice_22; -static PyObject *__pyx_k_tuple_10; -static PyObject *__pyx_k_tuple_12; -static PyObject *__pyx_k_tuple_14; -static PyObject *__pyx_k_tuple_16; -static PyObject *__pyx_k_tuple_19; -static PyObject *__pyx_k_tuple_21; -static PyObject *__pyx_k_tuple_23; -static PyObject *__pyx_k_tuple_24; -static PyObject *__pyx_k_tuple_25; -static PyObject *__pyx_k_tuple_26; -static PyObject *__pyx_k_tuple_28; -static PyObject *__pyx_k_tuple_30; -static PyObject *__pyx_k_tuple_32; -static PyObject *__pyx_k_tuple_35; -static PyObject *__pyx_k_tuple_36; -static PyObject *__pyx_k_tuple_38; - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull__construct_delaunay(PyObject *__pyx_self, PyObject *__pyx_v_points); -static char __pyx_doc_5scipy_7spatial_5qhull__construct_delaunay[] = "\n Perform Delaunay triangulation of the given set of points.\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull__construct_delaunay = {__Pyx_NAMESTR("_construct_delaunay"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull__construct_delaunay, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull__construct_delaunay)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull__construct_delaunay(PyObject *__pyx_self, PyObject *__pyx_v_points) { - char *__pyx_v_options; - int __pyx_v_curlong; - int __pyx_v_totlong; - int __pyx_v_dim; - int __pyx_v_numpoints; - int __pyx_v_exitcode; - double __pyx_v_paraboloid_scale; - double __pyx_v_paraboloid_shift; - PyObject *__pyx_v_vertices = NULL; - PyObject *__pyx_v_neighbors = NULL; - PyObject *__pyx_v_equations = NULL; - Py_buffer __pyx_bstruct_points; - Py_ssize_t __pyx_bstride_0_points = 0; - Py_ssize_t __pyx_bstride_1_points = 0; - Py_ssize_t __pyx_bshape_0_points = 0; - Py_ssize_t __pyx_bshape_1_points = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyArrayObject *__pyx_t_4 = NULL; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - realT __pyx_t_10; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - PyObject *(*__pyx_t_13)(PyObject *); - int __pyx_t_14; - int __pyx_t_15; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_construct_delaunay"); - __pyx_self = __pyx_self; - __Pyx_INCREF((PyObject *)__pyx_v_points); - __pyx_bstruct_points.buf = NULL; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_points), __pyx_ptype_5numpy_ndarray, 1, "points", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_v_points, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_points = __pyx_bstruct_points.strides[0]; __pyx_bstride_1_points = __pyx_bstruct_points.strides[1]; - __pyx_bshape_0_points = __pyx_bstruct_points.shape[0]; __pyx_bshape_1_points = __pyx_bstruct_points.shape[1]; - - - __pyx_v_options = __pyx_k_1; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_points); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_points); - __Pyx_GIVEREF(__pyx_v_points); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = __pyx_t_3; - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __pyx_t_5 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_t_4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_5 < 0)) { - PyErr_Fetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_v_points, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_6); Py_XDECREF(__pyx_t_7); Py_XDECREF(__pyx_t_8); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_6, __pyx_t_7, __pyx_t_8); - } - } - __pyx_bstride_0_points = __pyx_bstruct_points.strides[0]; __pyx_bstride_1_points = __pyx_bstruct_points.strides[1]; - __pyx_bshape_0_points = __pyx_bstruct_points.shape[0]; __pyx_bshape_1_points = __pyx_bstruct_points.shape[1]; - if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_v_points); - __pyx_v_points = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_v_numpoints = (((PyArrayObject *)__pyx_v_points)->dimensions[0]); - - - __pyx_v_dim = (((PyArrayObject *)__pyx_v_points)->dimensions[1]); - - - __pyx_t_9 = (__pyx_v_numpoints <= 0); - if (__pyx_t_9) { - - - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_3), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_t_9 = (__pyx_v_dim < 2); - if (__pyx_t_9) { - - - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s___qhull_lock); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__acquire); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - - { - - - qh_qh.NOerrexit = 1; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_v_exitcode = qh_new_qhull(__pyx_v_dim, __pyx_v_numpoints, ((realT *)((PyArrayObject *)__pyx_v_points)->data), 0, __pyx_v_options, NULL, stderr); - } - - - { - Py_BLOCK_THREADS - } - } - - - { - - - __pyx_t_9 = (__pyx_v_exitcode != 0); - if (__pyx_t_9) { - - - __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_7), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L14;} - goto __pyx_L16; - } - __pyx_L16:; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - qh_triangulate(); - } - - - { - Py_BLOCK_THREADS - } - } - - - if (qh_qh.SCALElast) { - - - __pyx_t_10 = (qh_qh.last_high - qh_qh.last_low); - if (unlikely(__pyx_t_10 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L14;} - } - __pyx_v_paraboloid_scale = (qh_qh.last_newhigh / __pyx_t_10); - - - __pyx_v_paraboloid_shift = ((-qh_qh.last_low) * __pyx_v_paraboloid_scale); - goto __pyx_L20; - } - { - - - __pyx_v_paraboloid_scale = 1.0; - - - __pyx_v_paraboloid_shift = 0.0; - } - __pyx_L20:; - - - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyInt_FromLong(__pyx_v_dim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromLong(__pyx_v_numpoints); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(((PyObject *)__pyx_t_11)); - PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_11), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_11)); __pyx_t_11 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { - PyObject* sequence = __pyx_t_2; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 3)) { - if (PyTuple_GET_SIZE(sequence) > 3) __Pyx_RaiseTooManyValuesError(3); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L14;} - } - __pyx_t_11 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 2); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 3)) { - if (PyList_GET_SIZE(sequence) > 3) __Pyx_RaiseTooManyValuesError(3); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L14;} - } - __pyx_t_11 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - __pyx_t_1 = PyList_GET_ITEM(sequence, 2); - } - __Pyx_INCREF(__pyx_t_11); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_12 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_13 = Py_TYPE(__pyx_t_12)->tp_iternext; - index = 0; __pyx_t_11 = __pyx_t_13(__pyx_t_12); if (unlikely(!__pyx_t_11)) goto __pyx_L21_unpacking_failed; - __Pyx_GOTREF(__pyx_t_11); - index = 1; __pyx_t_3 = __pyx_t_13(__pyx_t_12); if (unlikely(!__pyx_t_3)) goto __pyx_L21_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - index = 2; __pyx_t_1 = __pyx_t_13(__pyx_t_12); if (unlikely(!__pyx_t_1)) goto __pyx_L21_unpacking_failed; - __Pyx_GOTREF(__pyx_t_1); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_13(__pyx_t_12), 3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - goto __pyx_L22_unpacking_done; - __pyx_L21_unpacking_failed:; - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L14;} - __pyx_L22_unpacking_done:; - } - - - __pyx_v_vertices = __pyx_t_11; - __pyx_t_11 = 0; - __pyx_v_neighbors = __pyx_t_3; - __pyx_t_3 = 0; - __pyx_v_equations = __pyx_t_1; - __pyx_t_1 = 0; - - - __Pyx_XDECREF(__pyx_r); - - - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_paraboloid_scale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyFloat_FromDouble(__pyx_v_paraboloid_shift); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L14;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_vertices); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_vertices); - __Pyx_GIVEREF(__pyx_v_vertices); - __Pyx_INCREF(__pyx_v_neighbors); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_neighbors); - __Pyx_GIVEREF(__pyx_v_neighbors); - __Pyx_INCREF(__pyx_v_equations); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_equations); - __Pyx_GIVEREF(__pyx_v_equations); - PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 4, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_2 = 0; - __pyx_t_1 = 0; - __pyx_r = ((PyObject *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L13; - } - - - { - int __pyx_why; - PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb; - int __pyx_exc_lineno; - __pyx_exc_type = 0; __pyx_exc_value = 0; __pyx_exc_tb = 0; __pyx_exc_lineno = 0; - __pyx_why = 0; goto __pyx_L15; - __pyx_L13: __pyx_exc_type = 0; __pyx_exc_value = 0; __pyx_exc_tb = 0; __pyx_exc_lineno = 0; - __pyx_why = 3; goto __pyx_L15; - __pyx_L14: { - __pyx_why = 4; - __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; - __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_ErrFetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb); - __pyx_exc_lineno = __pyx_lineno; - goto __pyx_L15; - } - __pyx_L15:; - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - qh_freeqhull(0); - - - qh_memfreeshort((&__pyx_v_curlong), (&__pyx_v_totlong)); - } - - - { - Py_BLOCK_THREADS - } - } - - - __pyx_t_9 = (__pyx_v_curlong != 0); - if (!__pyx_t_9) { - __pyx_t_14 = (__pyx_v_totlong != 0); - __pyx_t_15 = __pyx_t_14; - } else { - __pyx_t_15 = __pyx_t_9; - } - if (__pyx_t_15) { - - - __pyx_t_3 = PyInt_FromLong(__pyx_v_totlong); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L23_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyInt_FromLong(__pyx_v_curlong); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L23_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L23_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_9), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L23_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L23_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L23_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L23_error;} - goto __pyx_L27; - } - __pyx_L27:; - goto __pyx_L28; - __pyx_L23_error:; - if (__pyx_why == 4) { - Py_XDECREF(__pyx_exc_type); - Py_XDECREF(__pyx_exc_value); - Py_XDECREF(__pyx_exc_tb); - } - goto __pyx_L8; - __pyx_L28:; - switch (__pyx_why) { - case 3: goto __pyx_L7; - case 4: { - __Pyx_ErrRestore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb); - __pyx_lineno = __pyx_exc_lineno; - __pyx_exc_type = 0; - __pyx_exc_value = 0; - __pyx_exc_tb = 0; - goto __pyx_L8; - } - } - } - } - - - { - int __pyx_why; - PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb; - int __pyx_exc_lineno; - __pyx_exc_type = 0; __pyx_exc_value = 0; __pyx_exc_tb = 0; __pyx_exc_lineno = 0; - __pyx_why = 0; goto __pyx_L9; - __pyx_L7: __pyx_exc_type = 0; __pyx_exc_value = 0; __pyx_exc_tb = 0; __pyx_exc_lineno = 0; - __pyx_why = 3; goto __pyx_L9; - __pyx_L8: { - __pyx_why = 4; - __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; - __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_ErrFetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb); - __pyx_exc_lineno = __pyx_lineno; - goto __pyx_L9; - } - __pyx_L9:; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___qhull_lock); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L29_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__release); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L29_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L29_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L30; - __pyx_L29_error:; - if (__pyx_why == 4) { - Py_XDECREF(__pyx_exc_type); - Py_XDECREF(__pyx_exc_value); - Py_XDECREF(__pyx_exc_tb); - } - goto __pyx_L1_error; - __pyx_L30:; - switch (__pyx_why) { - case 3: goto __pyx_L0; - case 4: { - __Pyx_ErrRestore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb); - __pyx_lineno = __pyx_exc_lineno; - __pyx_exc_type = 0; - __pyx_exc_value = 0; - __pyx_exc_tb = 0; - goto __pyx_L1_error; - } - } - } - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_XDECREF(__pyx_t_12); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.qhull._construct_delaunay", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __pyx_L2:; - __Pyx_XDECREF(__pyx_v_vertices); - __Pyx_XDECREF(__pyx_v_neighbors); - __Pyx_XDECREF(__pyx_v_equations); - __Pyx_XDECREF((PyObject *)__pyx_v_points); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_1_qhull_get_facet_array(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_5scipy_7spatial_5qhull_1_qhull_get_facet_array[] = "\n Return array of simplical facets currently in Qhull.\n\n Returns\n -------\n vertices : array of int, shape (nfacets, ndim+1)\n Indices of coordinates of vertices forming the simplical facets\n neighbors : array of int, shape (nfacets, ndim)\n Indices of neighboring facets. The kth neighbor is opposite\n the kth vertex, and the first neighbor is the horizon facet\n for the first vertex.\n\n Facets extending to infinity are denoted with index -1.\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_1_qhull_get_facet_array = {__Pyx_NAMESTR("_qhull_get_facet_array"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_1_qhull_get_facet_array, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_1_qhull_get_facet_array)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_1_qhull_get_facet_array(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - int __pyx_v_ndim; - int __pyx_v_numpoints; - facetT *__pyx_v_facet; - facetT *__pyx_v_neighbor; - vertexT *__pyx_v_vertex; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_point; - int __pyx_v_error_non_simplical; - PyArrayObject *__pyx_v_vertices = 0; - PyArrayObject *__pyx_v_neighbors = 0; - PyArrayObject *__pyx_v_equations = 0; - PyArrayObject *__pyx_v_id_map = 0; - Py_buffer __pyx_bstruct_neighbors; - Py_ssize_t __pyx_bstride_0_neighbors = 0; - Py_ssize_t __pyx_bstride_1_neighbors = 0; - Py_ssize_t __pyx_bshape_0_neighbors = 0; - Py_ssize_t __pyx_bshape_1_neighbors = 0; - Py_buffer __pyx_bstruct_id_map; - Py_ssize_t __pyx_bstride_0_id_map = 0; - Py_ssize_t __pyx_bshape_0_id_map = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - Py_buffer __pyx_bstruct_equations; - Py_ssize_t __pyx_bstride_0_equations = 0; - Py_ssize_t __pyx_bstride_1_equations = 0; - Py_ssize_t __pyx_bshape_0_equations = 0; - Py_ssize_t __pyx_bshape_1_equations = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - int __pyx_t_11; - int __pyx_t_12; - unsigned int __pyx_t_13; - PyArrayObject *__pyx_t_14 = NULL; - PyArrayObject *__pyx_t_15 = NULL; - PyArrayObject *__pyx_t_16 = NULL; - long __pyx_t_17; - int __pyx_t_18; - int __pyx_t_19; - unsigned int __pyx_t_20; - int __pyx_t_21; - int __pyx_t_22; - int __pyx_t_23; - int __pyx_t_24; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__ndim,&__pyx_n_s__numpoints,0}; - __Pyx_RefNannySetupContext("_qhull_get_facet_array"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ndim); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__numpoints); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_qhull_get_facet_array", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_qhull_get_facet_array") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_ndim = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_ndim == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_v_numpoints = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_numpoints == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_ndim = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_ndim == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_v_numpoints = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_numpoints == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_qhull_get_facet_array", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.qhull._qhull_get_facet_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_bstruct_vertices.buf = NULL; - __pyx_bstruct_neighbors.buf = NULL; - __pyx_bstruct_equations.buf = NULL; - __pyx_bstruct_id_map.buf = NULL; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyLong_FromUnsignedLong(qh_qh.facet_id); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); - __pyx_t_3 = 0; - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__intc); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__dtype), __pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_id_map); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_id_map, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_id_map, (PyObject*)__pyx_v_id_map, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); - } - } - __pyx_bstride_0_id_map = __pyx_bstruct_id_map.strides[0]; - __pyx_bshape_0_id_map = __pyx_bstruct_id_map.shape[0]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_6 = 0; - __pyx_v_id_map = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - - __pyx_t_5 = PyObject_GetAttr(((PyObject *)__pyx_v_id_map), __pyx_n_s__fill); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - - __pyx_v_facet = qh_qh.facet_list; - - - __pyx_v_j = 0; - - - while (1) { - if ((__pyx_v_facet != 0)) { - __pyx_t_11 = (__pyx_v_facet->next != 0); - } else { - __pyx_t_11 = (__pyx_v_facet != 0); - } - if (!__pyx_t_11) break; - - - if (__pyx_v_facet->simplicial) { - __pyx_t_11 = (!__pyx_v_facet->upperdelaunay); - __pyx_t_12 = __pyx_t_11; - } else { - __pyx_t_12 = __pyx_v_facet->simplicial; - } - if (__pyx_t_12) { - - - __pyx_t_13 = __pyx_v_facet->id; - *__Pyx_BufPtrStrided1d(npy_int *, __pyx_bstruct_id_map.buf, __pyx_t_13, __pyx_bstride_0_id_map) = __pyx_v_j; - - - __pyx_v_j = (__pyx_v_j + 1); - goto __pyx_L8; - } - __pyx_L8:; - - - __pyx_v_facet = __pyx_v_facet->next; - } - - - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__zeros); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyInt_FromLong(__pyx_v_j); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyInt_FromLong((__pyx_v_ndim + 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__intc); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_5, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_14 = ((PyArrayObject *)__pyx_t_4); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_t_14, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_v_vertices, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); - } - } - __pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_14 = 0; - __pyx_v_vertices = ((PyArrayObject *)__pyx_t_4); - __pyx_t_4 = 0; - - - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyInt_FromLong(__pyx_v_j); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyInt_FromLong((__pyx_v_ndim + 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_4 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__intc); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__dtype), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_15 = ((PyArrayObject *)__pyx_t_3); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_neighbors); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_neighbors, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_neighbors, (PyObject*)__pyx_v_neighbors, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); - } - } - __pyx_bstride_0_neighbors = __pyx_bstruct_neighbors.strides[0]; __pyx_bstride_1_neighbors = __pyx_bstruct_neighbors.strides[1]; - __pyx_bshape_0_neighbors = __pyx_bstruct_neighbors.shape[0]; __pyx_bshape_1_neighbors = __pyx_bstruct_neighbors.shape[1]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_15 = 0; - __pyx_v_neighbors = ((PyArrayObject *)__pyx_t_3); - __pyx_t_3 = 0; - - - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__zeros); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyInt_FromLong(__pyx_v_j); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyInt_FromLong((__pyx_v_ndim + 2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__double); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_5, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_16 = ((PyArrayObject *)__pyx_t_4); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_equations); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_equations, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_equations, (PyObject*)__pyx_v_equations, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); - } - } - __pyx_bstride_0_equations = __pyx_bstruct_equations.strides[0]; __pyx_bstride_1_equations = __pyx_bstruct_equations.strides[1]; - __pyx_bshape_0_equations = __pyx_bstruct_equations.shape[0]; __pyx_bshape_1_equations = __pyx_bstruct_equations.shape[1]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_16 = 0; - __pyx_v_equations = ((PyArrayObject *)__pyx_t_4); - __pyx_t_4 = 0; - - - __pyx_v_error_non_simplical = 0; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_v_facet = qh_qh.facet_list; - - - __pyx_v_j = 0; - - - while (1) { - if ((__pyx_v_facet != 0)) { - __pyx_t_12 = (__pyx_v_facet->next != 0); - } else { - __pyx_t_12 = (__pyx_v_facet != 0); - } - if (!__pyx_t_12) break; - - - __pyx_t_12 = (!__pyx_v_facet->simplicial); - if (__pyx_t_12) { - - - __pyx_v_error_non_simplical = 1; - - - goto __pyx_L13_break; - goto __pyx_L14; - } - __pyx_L14:; - - - if (__pyx_v_facet->upperdelaunay) { - - - __pyx_v_facet = __pyx_v_facet->next; - - - goto __pyx_L12_continue; - goto __pyx_L15; - } - __pyx_L15:; - - - __pyx_t_17 = (__pyx_v_ndim + 1); - for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_17; __pyx_t_7+=1) { - __pyx_v_i = __pyx_t_7; - - - __pyx_v_vertex = ((vertexT *)(__pyx_v_facet->vertices->e[__pyx_v_i]).p); - - - __pyx_v_point = qh_pointid(__pyx_v_vertex->point); - - - __pyx_t_18 = __pyx_v_j; - __pyx_t_19 = __pyx_v_i; - if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_0_vertices; - if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_bshape_1_vertices; - *__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_18, __pyx_bstride_0_vertices, __pyx_t_19, __pyx_bstride_1_vertices) = __pyx_v_point; - } - - - __pyx_t_17 = (__pyx_v_ndim + 1); - for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_17; __pyx_t_7+=1) { - __pyx_v_i = __pyx_t_7; - - - __pyx_v_neighbor = ((facetT *)(__pyx_v_facet->neighbors->e[__pyx_v_i]).p); - - - __pyx_t_20 = __pyx_v_neighbor->id; - __pyx_t_21 = __pyx_v_j; - __pyx_t_22 = __pyx_v_i; - if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_bshape_0_neighbors; - if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_bshape_1_neighbors; - *__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_neighbors.buf, __pyx_t_21, __pyx_bstride_0_neighbors, __pyx_t_22, __pyx_bstride_1_neighbors) = (*__Pyx_BufPtrStrided1d(npy_int *, __pyx_bstruct_id_map.buf, __pyx_t_20, __pyx_bstride_0_id_map)); - } - - - __pyx_t_17 = (__pyx_v_ndim + 1); - for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_17; __pyx_t_7+=1) { - __pyx_v_i = __pyx_t_7; - - - __pyx_t_23 = __pyx_v_j; - __pyx_t_24 = __pyx_v_i; - if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_bshape_0_equations; - if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_bshape_1_equations; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_equations.buf, __pyx_t_23, __pyx_bstride_0_equations, __pyx_t_24, __pyx_bstride_1_equations) = (__pyx_v_facet->normal[__pyx_v_i]); - } - - - __pyx_t_7 = __pyx_v_j; - __pyx_t_17 = (__pyx_v_ndim + 1); - if (__pyx_t_7 < 0) __pyx_t_7 += __pyx_bshape_0_equations; - if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_1_equations; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_equations.buf, __pyx_t_7, __pyx_bstride_0_equations, __pyx_t_17, __pyx_bstride_1_equations) = __pyx_v_facet->offset; - - - __pyx_v_j = (__pyx_v_j + 1); - - - __pyx_v_facet = __pyx_v_facet->next; - __pyx_L12_continue:; - } - __pyx_L13_break:; - } - - - { - Py_BLOCK_THREADS - } - } - - - if (__pyx_v_error_non_simplical) { - - - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L22; - } - __pyx_L22:; - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __Pyx_INCREF(((PyObject *)__pyx_v_vertices)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_vertices)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_vertices)); - __Pyx_INCREF(((PyObject *)__pyx_v_neighbors)); - PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_neighbors)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_neighbors)); - __Pyx_INCREF(((PyObject *)__pyx_v_equations)); - PyTuple_SET_ITEM(__pyx_t_4, 2, ((PyObject *)__pyx_v_equations)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_equations)); - __pyx_r = ((PyObject *)__pyx_t_4); - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_neighbors); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_id_map); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_equations); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.qhull._qhull_get_facet_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_neighbors); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_id_map); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_equations); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_vertices); - __Pyx_XDECREF((PyObject *)__pyx_v_neighbors); - __Pyx_XDECREF((PyObject *)__pyx_v_equations); - __Pyx_XDECREF((PyObject *)__pyx_v_id_map); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_2_get_barycentric_transforms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_5scipy_7spatial_5qhull_2_get_barycentric_transforms[] = "\n Compute barycentric affine coordinate transformations for given\n simplices.\n\n Returns\n -------\n Tinvs : array, shape (nsimplex, ndim+1, ndim)\n Barycentric transforms for each simplex.\n\n Tinvs[i,:ndim,:ndim] contains inverse of the matrix ``T``,\n and Tinvs[i,ndim,:] contains the vector ``r_n`` (see below).\n\n Notes\n -----\n Barycentric transform from ``x`` to ``c`` is defined by::\n\n T c = x - r_n\n\n where the ``r_1, ..., r_n`` are the vertices of the simplex.\n The matrix ``T`` is defined by the condition::\n\n T e_j = r_j - r_n\n\n where ``e_j`` is the unit axis vector, e.g, ``e_2 = [0,1,0,0,...]``\n This implies that ``T_ij = (r_j - r_n)_i``.\n\n For the barycentric transforms, we need to compute the inverse\n matrix ``T^-1`` and store the vectors ``r_n`` for each vertex.\n These are stacked into the `Tinvs` returned.\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_2_get_barycentric_transforms = {__Pyx_NAMESTR("_get_barycentric_transforms"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_2_get_barycentric_transforms, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_2_get_barycentric_transforms)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_2_get_barycentric_transforms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_points = 0; - PyArrayObject *__pyx_v_vertices = 0; - PyArrayObject *__pyx_v_T = 0; - PyArrayObject *__pyx_v_Tinvs = 0; - int __pyx_v_ivertex; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_n; - int __pyx_v_nrhs; - int __pyx_v_lda; - int __pyx_v_ldb; - int __pyx_v_info; - int __pyx_v_ipiv[(NPY_MAXDIMS + 1)]; - int __pyx_v_ndim; - int __pyx_v_nvertex; - double __pyx_v_nan; - double __pyx_v_x1; - double __pyx_v_x2; - double __pyx_v_x3; - double __pyx_v_y1; - double __pyx_v_y2; - double __pyx_v_y3; - double __pyx_v_det; - Py_buffer __pyx_bstruct_Tinvs; - Py_ssize_t __pyx_bstride_0_Tinvs = 0; - Py_ssize_t __pyx_bstride_1_Tinvs = 0; - Py_ssize_t __pyx_bstride_2_Tinvs = 0; - Py_ssize_t __pyx_bshape_0_Tinvs = 0; - Py_ssize_t __pyx_bshape_1_Tinvs = 0; - Py_ssize_t __pyx_bshape_2_Tinvs = 0; - Py_buffer __pyx_bstruct_T; - Py_ssize_t __pyx_bstride_0_T = 0; - Py_ssize_t __pyx_bstride_1_T = 0; - Py_ssize_t __pyx_bshape_0_T = 0; - Py_ssize_t __pyx_bshape_1_T = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - Py_buffer __pyx_bstruct_points; - Py_ssize_t __pyx_bstride_0_points = 0; - Py_ssize_t __pyx_bstride_1_points = 0; - Py_ssize_t __pyx_bshape_0_points = 0; - Py_ssize_t __pyx_bshape_1_points = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - double __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyArrayObject *__pyx_t_7 = NULL; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyArrayObject *__pyx_t_12 = NULL; - int __pyx_t_13; - int __pyx_t_14; - int __pyx_t_15; - long __pyx_t_16; - npy_int __pyx_t_17; - long __pyx_t_18; - int __pyx_t_19; - long __pyx_t_20; - npy_int __pyx_t_21; - long __pyx_t_22; - int __pyx_t_23; - long __pyx_t_24; - npy_int __pyx_t_25; - long __pyx_t_26; - int __pyx_t_27; - long __pyx_t_28; - npy_int __pyx_t_29; - long __pyx_t_30; - int __pyx_t_31; - long __pyx_t_32; - npy_int __pyx_t_33; - long __pyx_t_34; - int __pyx_t_35; - long __pyx_t_36; - npy_int __pyx_t_37; - long __pyx_t_38; - int __pyx_t_39; - long __pyx_t_40; - long __pyx_t_41; - int __pyx_t_42; - long __pyx_t_43; - long __pyx_t_44; - int __pyx_t_45; - long __pyx_t_46; - long __pyx_t_47; - int __pyx_t_48; - long __pyx_t_49; - long __pyx_t_50; - int __pyx_t_51; - long __pyx_t_52; - long __pyx_t_53; - int __pyx_t_54; - long __pyx_t_55; - long __pyx_t_56; - int __pyx_t_57; - int __pyx_t_58; - int __pyx_t_59; - int __pyx_t_60; - npy_int __pyx_t_61; - int __pyx_t_62; - int __pyx_t_63; - int __pyx_t_64; - int __pyx_t_65; - int __pyx_t_66; - int __pyx_t_67; - int __pyx_t_68; - int __pyx_t_69; - npy_int __pyx_t_70; - int __pyx_t_71; - int __pyx_t_72; - int __pyx_t_73; - int __pyx_t_74; - int __pyx_t_75; - int __pyx_t_76; - int __pyx_t_77; - long __pyx_t_78; - int __pyx_t_79; - int __pyx_t_80; - int __pyx_t_81; - int __pyx_t_82; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__points,&__pyx_n_s__vertices,0}; - __Pyx_RefNannySetupContext("_get_barycentric_transforms"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__points); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__vertices); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("_get_barycentric_transforms", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_get_barycentric_transforms") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_points = ((PyArrayObject *)values[0]); - __pyx_v_vertices = ((PyArrayObject *)values[1]); - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_points = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 0)); - __pyx_v_vertices = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 1)); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("_get_barycentric_transforms", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.qhull._get_barycentric_transforms", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_bstruct_T.buf = NULL; - __pyx_bstruct_Tinvs.buf = NULL; - __pyx_bstruct_points.buf = NULL; - __pyx_bstruct_vertices.buf = NULL; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_points), __pyx_ptype_5numpy_ndarray, 1, "points", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_vertices), __pyx_ptype_5numpy_ndarray, 1, "vertices", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_v_points, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_points = __pyx_bstruct_points.strides[0]; __pyx_bstride_1_points = __pyx_bstruct_points.strides[1]; - __pyx_bshape_0_points = __pyx_bstruct_points.shape[0]; __pyx_bshape_1_points = __pyx_bstruct_points.shape[1]; - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_v_vertices, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__nan); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_nan = __pyx_t_3; - - - __pyx_v_ndim = (__pyx_v_points->dimensions[1]); - - - __pyx_v_nvertex = (__pyx_v_vertices->dimensions[0]); - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyInt_FromLong(__pyx_v_ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyInt_FromLong(__pyx_v_ndim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_2 = 0; - __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__double); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__dtype), __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = ((PyArrayObject *)__pyx_t_6); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_T); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_T, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_T, (PyObject*)__pyx_v_T, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); - } - } - __pyx_bstride_0_T = __pyx_bstruct_T.strides[0]; __pyx_bstride_1_T = __pyx_bstruct_T.strides[1]; - __pyx_bshape_0_T = __pyx_bstruct_T.shape[0]; __pyx_bshape_1_T = __pyx_bstruct_T.shape[1]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_7 = 0; - __pyx_v_T = ((PyArrayObject *)__pyx_t_6); - __pyx_t_6 = 0; - - - __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__zeros); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyInt_FromLong(__pyx_v_nvertex); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_4 = PyInt_FromLong((__pyx_v_ndim + 1)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyInt_FromLong(__pyx_v_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_6 = 0; - __pyx_t_4 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__double); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyEval_CallObjectWithKeywords(__pyx_t_5, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_12 = ((PyArrayObject *)__pyx_t_6); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_Tinvs); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_Tinvs, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_Tinvs, (PyObject*)__pyx_v_Tinvs, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); - } - } - __pyx_bstride_0_Tinvs = __pyx_bstruct_Tinvs.strides[0]; __pyx_bstride_1_Tinvs = __pyx_bstruct_Tinvs.strides[1]; __pyx_bstride_2_Tinvs = __pyx_bstruct_Tinvs.strides[2]; - __pyx_bshape_0_Tinvs = __pyx_bstruct_Tinvs.shape[0]; __pyx_bshape_1_Tinvs = __pyx_bstruct_Tinvs.shape[1]; __pyx_bshape_2_Tinvs = __pyx_bstruct_Tinvs.shape[2]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = 0; - __pyx_v_Tinvs = ((PyArrayObject *)__pyx_t_6); - __pyx_t_6 = 0; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_8 = __pyx_v_nvertex; - for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_8; __pyx_t_13+=1) { - __pyx_v_ivertex = __pyx_t_13; - - - __pyx_t_14 = (__pyx_v_ndim == 2); - if (__pyx_t_14) { - - - __pyx_t_15 = __pyx_v_ivertex; - __pyx_t_16 = 0; - if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_vertices; - if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_vertices; - __pyx_t_17 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_15, __pyx_bstride_0_vertices, __pyx_t_16, __pyx_bstride_1_vertices)); - __pyx_t_18 = 0; - if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_points; - if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_points; - __pyx_v_x1 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_points.buf, __pyx_t_17, __pyx_bstride_0_points, __pyx_t_18, __pyx_bstride_1_points)); - - - __pyx_t_19 = __pyx_v_ivertex; - __pyx_t_20 = 1; - if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_bshape_0_vertices; - if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_bshape_1_vertices; - __pyx_t_21 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_19, __pyx_bstride_0_vertices, __pyx_t_20, __pyx_bstride_1_vertices)); - __pyx_t_22 = 0; - if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_bshape_0_points; - if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_bshape_1_points; - __pyx_v_x2 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_points.buf, __pyx_t_21, __pyx_bstride_0_points, __pyx_t_22, __pyx_bstride_1_points)); - - - __pyx_t_23 = __pyx_v_ivertex; - __pyx_t_24 = 2; - if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_bshape_0_vertices; - if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_bshape_1_vertices; - __pyx_t_25 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_23, __pyx_bstride_0_vertices, __pyx_t_24, __pyx_bstride_1_vertices)); - __pyx_t_26 = 0; - if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_bshape_0_points; - if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_bshape_1_points; - __pyx_v_x3 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_points.buf, __pyx_t_25, __pyx_bstride_0_points, __pyx_t_26, __pyx_bstride_1_points)); - - - __pyx_t_27 = __pyx_v_ivertex; - __pyx_t_28 = 0; - if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_bshape_0_vertices; - if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_bshape_1_vertices; - __pyx_t_29 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_27, __pyx_bstride_0_vertices, __pyx_t_28, __pyx_bstride_1_vertices)); - __pyx_t_30 = 1; - if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_bshape_0_points; - if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_bshape_1_points; - __pyx_v_y1 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_points.buf, __pyx_t_29, __pyx_bstride_0_points, __pyx_t_30, __pyx_bstride_1_points)); - - - __pyx_t_31 = __pyx_v_ivertex; - __pyx_t_32 = 1; - if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_bshape_0_vertices; - if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_bshape_1_vertices; - __pyx_t_33 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_31, __pyx_bstride_0_vertices, __pyx_t_32, __pyx_bstride_1_vertices)); - __pyx_t_34 = 1; - if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_bshape_0_points; - if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_bshape_1_points; - __pyx_v_y2 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_points.buf, __pyx_t_33, __pyx_bstride_0_points, __pyx_t_34, __pyx_bstride_1_points)); - - - __pyx_t_35 = __pyx_v_ivertex; - __pyx_t_36 = 2; - if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_bshape_0_vertices; - if (__pyx_t_36 < 0) __pyx_t_36 += __pyx_bshape_1_vertices; - __pyx_t_37 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_35, __pyx_bstride_0_vertices, __pyx_t_36, __pyx_bstride_1_vertices)); - __pyx_t_38 = 1; - if (__pyx_t_37 < 0) __pyx_t_37 += __pyx_bshape_0_points; - if (__pyx_t_38 < 0) __pyx_t_38 += __pyx_bshape_1_points; - __pyx_v_y3 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_points.buf, __pyx_t_37, __pyx_bstride_0_points, __pyx_t_38, __pyx_bstride_1_points)); - - - __pyx_v_x1 = (__pyx_v_x1 - __pyx_v_x3); - - - __pyx_v_x2 = (__pyx_v_x2 - __pyx_v_x3); - - - __pyx_v_y1 = (__pyx_v_y1 - __pyx_v_y3); - - - __pyx_v_y2 = (__pyx_v_y2 - __pyx_v_y3); - - - __pyx_v_det = ((__pyx_v_x1 * __pyx_v_y2) - (__pyx_v_x2 * __pyx_v_y1)); - - - __pyx_t_14 = (__pyx_v_det == 0.0); - if (__pyx_t_14) { - - - __pyx_v_info = 1; - goto __pyx_L12; - } - { - - - __pyx_v_info = 0; - - - __pyx_t_39 = __pyx_v_ivertex; - __pyx_t_40 = 0; - __pyx_t_41 = 0; - if (__pyx_t_39 < 0) __pyx_t_39 += __pyx_bshape_0_Tinvs; - if (__pyx_t_40 < 0) __pyx_t_40 += __pyx_bshape_1_Tinvs; - if (__pyx_t_41 < 0) __pyx_t_41 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_39, __pyx_bstride_0_Tinvs, __pyx_t_40, __pyx_bstride_1_Tinvs, __pyx_t_41, __pyx_bstride_2_Tinvs) = (__pyx_v_y2 / __pyx_v_det); - - - __pyx_t_42 = __pyx_v_ivertex; - __pyx_t_43 = 0; - __pyx_t_44 = 1; - if (__pyx_t_42 < 0) __pyx_t_42 += __pyx_bshape_0_Tinvs; - if (__pyx_t_43 < 0) __pyx_t_43 += __pyx_bshape_1_Tinvs; - if (__pyx_t_44 < 0) __pyx_t_44 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_42, __pyx_bstride_0_Tinvs, __pyx_t_43, __pyx_bstride_1_Tinvs, __pyx_t_44, __pyx_bstride_2_Tinvs) = ((-__pyx_v_x2) / __pyx_v_det); - - - __pyx_t_45 = __pyx_v_ivertex; - __pyx_t_46 = 1; - __pyx_t_47 = 0; - if (__pyx_t_45 < 0) __pyx_t_45 += __pyx_bshape_0_Tinvs; - if (__pyx_t_46 < 0) __pyx_t_46 += __pyx_bshape_1_Tinvs; - if (__pyx_t_47 < 0) __pyx_t_47 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_45, __pyx_bstride_0_Tinvs, __pyx_t_46, __pyx_bstride_1_Tinvs, __pyx_t_47, __pyx_bstride_2_Tinvs) = ((-__pyx_v_y1) / __pyx_v_det); - - - __pyx_t_48 = __pyx_v_ivertex; - __pyx_t_49 = 1; - __pyx_t_50 = 1; - if (__pyx_t_48 < 0) __pyx_t_48 += __pyx_bshape_0_Tinvs; - if (__pyx_t_49 < 0) __pyx_t_49 += __pyx_bshape_1_Tinvs; - if (__pyx_t_50 < 0) __pyx_t_50 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_48, __pyx_bstride_0_Tinvs, __pyx_t_49, __pyx_bstride_1_Tinvs, __pyx_t_50, __pyx_bstride_2_Tinvs) = (__pyx_v_x1 / __pyx_v_det); - - - __pyx_t_51 = __pyx_v_ivertex; - __pyx_t_52 = 2; - __pyx_t_53 = 0; - if (__pyx_t_51 < 0) __pyx_t_51 += __pyx_bshape_0_Tinvs; - if (__pyx_t_52 < 0) __pyx_t_52 += __pyx_bshape_1_Tinvs; - if (__pyx_t_53 < 0) __pyx_t_53 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_51, __pyx_bstride_0_Tinvs, __pyx_t_52, __pyx_bstride_1_Tinvs, __pyx_t_53, __pyx_bstride_2_Tinvs) = __pyx_v_x3; - - - __pyx_t_54 = __pyx_v_ivertex; - __pyx_t_55 = 2; - __pyx_t_56 = 1; - if (__pyx_t_54 < 0) __pyx_t_54 += __pyx_bshape_0_Tinvs; - if (__pyx_t_55 < 0) __pyx_t_55 += __pyx_bshape_1_Tinvs; - if (__pyx_t_56 < 0) __pyx_t_56 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_54, __pyx_bstride_0_Tinvs, __pyx_t_55, __pyx_bstride_1_Tinvs, __pyx_t_56, __pyx_bstride_2_Tinvs) = __pyx_v_y3; - } - __pyx_L12:; - goto __pyx_L11; - } - { - - - __pyx_t_57 = __pyx_v_ndim; - for (__pyx_t_58 = 0; __pyx_t_58 < __pyx_t_57; __pyx_t_58+=1) { - __pyx_v_i = __pyx_t_58; - - - __pyx_t_59 = __pyx_v_ivertex; - __pyx_t_60 = __pyx_v_ndim; - if (__pyx_t_59 < 0) __pyx_t_59 += __pyx_bshape_0_vertices; - if (__pyx_t_60 < 0) __pyx_t_60 += __pyx_bshape_1_vertices; - __pyx_t_61 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_59, __pyx_bstride_0_vertices, __pyx_t_60, __pyx_bstride_1_vertices)); - __pyx_t_62 = __pyx_v_i; - if (__pyx_t_61 < 0) __pyx_t_61 += __pyx_bshape_0_points; - if (__pyx_t_62 < 0) __pyx_t_62 += __pyx_bshape_1_points; - __pyx_t_63 = __pyx_v_ivertex; - __pyx_t_64 = __pyx_v_ndim; - __pyx_t_65 = __pyx_v_i; - if (__pyx_t_63 < 0) __pyx_t_63 += __pyx_bshape_0_Tinvs; - if (__pyx_t_64 < 0) __pyx_t_64 += __pyx_bshape_1_Tinvs; - if (__pyx_t_65 < 0) __pyx_t_65 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_63, __pyx_bstride_0_Tinvs, __pyx_t_64, __pyx_bstride_1_Tinvs, __pyx_t_65, __pyx_bstride_2_Tinvs) = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_points.buf, __pyx_t_61, __pyx_bstride_0_points, __pyx_t_62, __pyx_bstride_1_points)); - - - __pyx_t_66 = __pyx_v_ndim; - for (__pyx_t_67 = 0; __pyx_t_67 < __pyx_t_66; __pyx_t_67+=1) { - __pyx_v_j = __pyx_t_67; - - - __pyx_t_68 = __pyx_v_ivertex; - __pyx_t_69 = __pyx_v_j; - if (__pyx_t_68 < 0) __pyx_t_68 += __pyx_bshape_0_vertices; - if (__pyx_t_69 < 0) __pyx_t_69 += __pyx_bshape_1_vertices; - __pyx_t_70 = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_68, __pyx_bstride_0_vertices, __pyx_t_69, __pyx_bstride_1_vertices)); - __pyx_t_71 = __pyx_v_i; - if (__pyx_t_70 < 0) __pyx_t_70 += __pyx_bshape_0_points; - if (__pyx_t_71 < 0) __pyx_t_71 += __pyx_bshape_1_points; - - - __pyx_t_72 = __pyx_v_ivertex; - __pyx_t_73 = __pyx_v_ndim; - __pyx_t_74 = __pyx_v_i; - if (__pyx_t_72 < 0) __pyx_t_72 += __pyx_bshape_0_Tinvs; - if (__pyx_t_73 < 0) __pyx_t_73 += __pyx_bshape_1_Tinvs; - if (__pyx_t_74 < 0) __pyx_t_74 += __pyx_bshape_2_Tinvs; - - - __pyx_t_75 = __pyx_v_i; - __pyx_t_76 = __pyx_v_j; - if (__pyx_t_75 < 0) __pyx_t_75 += __pyx_bshape_0_T; - if (__pyx_t_76 < 0) __pyx_t_76 += __pyx_bshape_1_T; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_T.buf, __pyx_t_75, __pyx_bstride_0_T, __pyx_t_76, __pyx_bstride_1_T) = ((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_points.buf, __pyx_t_70, __pyx_bstride_0_points, __pyx_t_71, __pyx_bstride_1_points)) - (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_72, __pyx_bstride_0_Tinvs, __pyx_t_73, __pyx_bstride_1_Tinvs, __pyx_t_74, __pyx_bstride_2_Tinvs))); - } - - - __pyx_t_66 = __pyx_v_ivertex; - __pyx_t_67 = __pyx_v_i; - __pyx_t_77 = __pyx_v_i; - if (__pyx_t_66 < 0) __pyx_t_66 += __pyx_bshape_0_Tinvs; - if (__pyx_t_67 < 0) __pyx_t_67 += __pyx_bshape_1_Tinvs; - if (__pyx_t_77 < 0) __pyx_t_77 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_66, __pyx_bstride_0_Tinvs, __pyx_t_67, __pyx_bstride_1_Tinvs, __pyx_t_77, __pyx_bstride_2_Tinvs) = 1.0; - } - - - __pyx_v_n = __pyx_v_ndim; - - - __pyx_v_nrhs = __pyx_v_ndim; - - - __pyx_v_lda = __pyx_v_ndim; - - - __pyx_v_ldb = __pyx_v_ndim; - - - qh_dgesv((&__pyx_v_n), (&__pyx_v_nrhs), ((double *)__pyx_v_T->data), (&__pyx_v_lda), __pyx_v_ipiv, (((double *)__pyx_v_Tinvs->data) + ((__pyx_v_ndim * (__pyx_v_ndim + 1)) * __pyx_v_ivertex)), (&__pyx_v_ldb), (&__pyx_v_info)); - } - __pyx_L11:; - - - __pyx_t_14 = (__pyx_v_info != 0); - if (__pyx_t_14) { - - - __pyx_t_78 = (__pyx_v_ndim + 1); - for (__pyx_t_57 = 0; __pyx_t_57 < __pyx_t_78; __pyx_t_57+=1) { - __pyx_v_i = __pyx_t_57; - - - __pyx_t_58 = __pyx_v_ndim; - for (__pyx_t_79 = 0; __pyx_t_79 < __pyx_t_58; __pyx_t_79+=1) { - __pyx_v_j = __pyx_t_79; - - - __pyx_t_80 = __pyx_v_ivertex; - __pyx_t_81 = __pyx_v_i; - __pyx_t_82 = __pyx_v_j; - if (__pyx_t_80 < 0) __pyx_t_80 += __pyx_bshape_0_Tinvs; - if (__pyx_t_81 < 0) __pyx_t_81 += __pyx_bshape_1_Tinvs; - if (__pyx_t_82 < 0) __pyx_t_82 += __pyx_bshape_2_Tinvs; - *__Pyx_BufPtrStrided3d(__pyx_t_5numpy_double_t *, __pyx_bstruct_Tinvs.buf, __pyx_t_80, __pyx_bstride_0_Tinvs, __pyx_t_81, __pyx_bstride_1_Tinvs, __pyx_t_82, __pyx_bstride_2_Tinvs) = __pyx_v_nan; - } - } - goto __pyx_L17; - } - __pyx_L17:; - } - } - - - { - Py_BLOCK_THREADS - } - } - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_Tinvs)); - __pyx_r = ((PyObject *)__pyx_v_Tinvs); - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_Tinvs); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_T); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.qhull._get_barycentric_transforms", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_Tinvs); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_T); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_T); - __Pyx_XDECREF((PyObject *)__pyx_v_Tinvs); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static int __pyx_f_5scipy_7spatial_5qhull__barycentric_inside(int __pyx_v_ndim, double *__pyx_v_transform, double *__pyx_v_x, double *__pyx_v_c, double __pyx_v_eps) { - int __pyx_v_i; - int __pyx_v_j; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - - - (__pyx_v_c[__pyx_v_ndim]) = 1.0; - - - __pyx_t_1 = __pyx_v_ndim; - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - - (__pyx_v_c[__pyx_v_i]) = 0.0; - - - __pyx_t_3 = __pyx_v_ndim; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_j = __pyx_t_4; - - - __pyx_t_5 = __pyx_v_i; - (__pyx_v_c[__pyx_t_5]) = ((__pyx_v_c[__pyx_t_5]) + ((__pyx_v_transform[((__pyx_v_ndim * __pyx_v_i) + __pyx_v_j)]) * ((__pyx_v_x[__pyx_v_j]) - (__pyx_v_transform[((__pyx_v_ndim * __pyx_v_ndim) + __pyx_v_j)])))); - } - - - __pyx_t_3 = __pyx_v_ndim; - (__pyx_v_c[__pyx_t_3]) = ((__pyx_v_c[__pyx_t_3]) - (__pyx_v_c[__pyx_v_i])); - - - __pyx_t_6 = ((-__pyx_v_eps) <= (__pyx_v_c[__pyx_v_i])); - if (__pyx_t_6) { - __pyx_t_6 = ((__pyx_v_c[__pyx_v_i]) <= (1.0 + __pyx_v_eps)); - } - __pyx_t_7 = (!__pyx_t_6); - if (__pyx_t_7) { - - - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L7; - } - __pyx_L7:; - } - - - __pyx_t_7 = ((-__pyx_v_eps) <= (__pyx_v_c[__pyx_v_ndim])); - if (__pyx_t_7) { - __pyx_t_7 = ((__pyx_v_c[__pyx_v_ndim]) <= (1.0 + __pyx_v_eps)); - } - __pyx_t_6 = (!__pyx_t_7); - if (__pyx_t_6) { - - - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L8; - } - __pyx_L8:; - - - __pyx_r = 1; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - - - -static void __pyx_f_5scipy_7spatial_5qhull__barycentric_coordinate_single(int __pyx_v_ndim, double *__pyx_v_transform, double *__pyx_v_x, double *__pyx_v_c, int __pyx_v_i) { - int __pyx_v_j; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - - __pyx_t_1 = (__pyx_v_i == __pyx_v_ndim); - if (__pyx_t_1) { - - - (__pyx_v_c[__pyx_v_ndim]) = 1.0; - - - __pyx_t_2 = __pyx_v_ndim; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_j = __pyx_t_3; - - - __pyx_t_4 = __pyx_v_ndim; - (__pyx_v_c[__pyx_t_4]) = ((__pyx_v_c[__pyx_t_4]) - (__pyx_v_c[__pyx_v_j])); - } - goto __pyx_L3; - } - { - - - (__pyx_v_c[__pyx_v_i]) = 0.0; - - - __pyx_t_2 = __pyx_v_ndim; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_j = __pyx_t_3; - - - __pyx_t_4 = __pyx_v_i; - (__pyx_v_c[__pyx_t_4]) = ((__pyx_v_c[__pyx_t_4]) + ((__pyx_v_transform[((__pyx_v_ndim * __pyx_v_i) + __pyx_v_j)]) * ((__pyx_v_x[__pyx_v_j]) - (__pyx_v_transform[((__pyx_v_ndim * __pyx_v_ndim) + __pyx_v_j)])))); - } - } - __pyx_L3:; - -} - - - -static void __pyx_f_5scipy_7spatial_5qhull__barycentric_coordinates(int __pyx_v_ndim, double *__pyx_v_transform, double *__pyx_v_x, double *__pyx_v_c) { - int __pyx_v_i; - int __pyx_v_j; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - - - (__pyx_v_c[__pyx_v_ndim]) = 1.0; - - - __pyx_t_1 = __pyx_v_ndim; - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - - (__pyx_v_c[__pyx_v_i]) = 0.0; - - - __pyx_t_3 = __pyx_v_ndim; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_j = __pyx_t_4; - - - __pyx_t_5 = __pyx_v_i; - (__pyx_v_c[__pyx_t_5]) = ((__pyx_v_c[__pyx_t_5]) + ((__pyx_v_transform[((__pyx_v_ndim * __pyx_v_i) + __pyx_v_j)]) * ((__pyx_v_x[__pyx_v_j]) - (__pyx_v_transform[((__pyx_v_ndim * __pyx_v_ndim) + __pyx_v_j)])))); - } - - - __pyx_t_3 = __pyx_v_ndim; - (__pyx_v_c[__pyx_t_3]) = ((__pyx_v_c[__pyx_t_3]) - (__pyx_v_c[__pyx_v_i])); - } - -} - - - -static void __pyx_f_5scipy_7spatial_5qhull__lift_point(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, double *__pyx_v_x, double *__pyx_v_z) { - int __pyx_v_i; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - - - (__pyx_v_z[__pyx_v_d->ndim]) = 0.0; - - - __pyx_t_1 = __pyx_v_d->ndim; - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - - (__pyx_v_z[__pyx_v_i]) = (__pyx_v_x[__pyx_v_i]); - - - __pyx_t_3 = __pyx_v_d->ndim; - (__pyx_v_z[__pyx_t_3]) = ((__pyx_v_z[__pyx_t_3]) + pow((__pyx_v_x[__pyx_v_i]), 2.0)); - } - - - __pyx_t_1 = __pyx_v_d->ndim; - (__pyx_v_z[__pyx_t_1]) = ((__pyx_v_z[__pyx_t_1]) * __pyx_v_d->paraboloid_scale); - - - __pyx_t_1 = __pyx_v_d->ndim; - (__pyx_v_z[__pyx_t_1]) = ((__pyx_v_z[__pyx_t_1]) + __pyx_v_d->paraboloid_shift); - -} - - - -static double __pyx_f_5scipy_7spatial_5qhull__distplane(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, int __pyx_v_isimplex, double *__pyx_v_point) { - double __pyx_v_dist; - int __pyx_v_k; - double __pyx_r; - long __pyx_t_1; - int __pyx_t_2; - - - __pyx_v_dist = (__pyx_v_d->equations[(((__pyx_v_isimplex * (__pyx_v_d->ndim + 2)) + __pyx_v_d->ndim) + 1)]); - - - __pyx_t_1 = (__pyx_v_d->ndim + 1); - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_k = __pyx_t_2; - - - __pyx_v_dist = (__pyx_v_dist + ((__pyx_v_d->equations[((__pyx_v_isimplex * (__pyx_v_d->ndim + 2)) + __pyx_v_k)]) * (__pyx_v_point[__pyx_v_k]))); - } - - - __pyx_r = __pyx_v_dist; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - - - -static void __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_init(__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *__pyx_v_it, __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, int __pyx_v_vertex) { - int __pyx_v_k; - int __pyx_v_ivertex; - int __pyx_v_start; - int __pyx_t_1; - int __pyx_t_2; - - - __pyx_v_start = 0; - - - __pyx_v_it->info = __pyx_v_d; - - - __pyx_v_it->vertex = __pyx_v_vertex; - - - __pyx_v_it->triangle = (__pyx_v_d->vertex_to_simplex[__pyx_v_vertex]); - - - __pyx_v_it->start_triangle = __pyx_v_it->triangle; - - - __pyx_v_it->restart = 0; - - - __pyx_t_1 = (__pyx_v_it->triangle != -1); - if (__pyx_t_1) { - - - for (__pyx_t_2 = 0; __pyx_t_2 < 3; __pyx_t_2+=1) { - __pyx_v_k = __pyx_t_2; - - - __pyx_v_ivertex = (__pyx_v_it->info->vertices[((__pyx_v_it->triangle * 3) + __pyx_v_k)]); - - - __pyx_t_1 = (__pyx_v_ivertex != __pyx_v_vertex); - if (__pyx_t_1) { - - - __pyx_v_it->vertex2 = __pyx_v_ivertex; - - - __pyx_v_it->index = __pyx_v_k; - - - __pyx_v_it->start_index = __pyx_v_k; - - - goto __pyx_L5_break; - goto __pyx_L6; - } - __pyx_L6:; - } - __pyx_L5_break:; - goto __pyx_L3; - } - { - - - __pyx_v_it->start_index = -1; - - - __pyx_v_it->index = -1; - } - __pyx_L3:; - -} - - - -static void __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_next(__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *__pyx_v_it) { - int __pyx_v_itri; - int __pyx_v_k; - int __pyx_v_ivertex; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - - if (__pyx_v_it->restart) { - - - __pyx_t_1 = (__pyx_v_it->start_index == -1); - if (__pyx_t_1) { - - - __pyx_v_it->index = -1; - - - goto __pyx_L0; - goto __pyx_L4; - } - __pyx_L4:; - - - __pyx_v_it->triangle = __pyx_v_it->start_triangle; - - - for (__pyx_t_2 = 0; __pyx_t_2 < 3; __pyx_t_2+=1) { - __pyx_v_k = __pyx_t_2; - - - __pyx_v_ivertex = (__pyx_v_it->info->vertices[((__pyx_v_it->triangle * 3) + __pyx_v_k)]); - - - __pyx_t_1 = (__pyx_v_ivertex != __pyx_v_it->vertex); - if (__pyx_t_1) { - __pyx_t_3 = (__pyx_v_k != __pyx_v_it->start_index); - __pyx_t_4 = __pyx_t_3; - } else { - __pyx_t_4 = __pyx_t_1; - } - if (__pyx_t_4) { - - - __pyx_v_it->index = __pyx_v_k; - - - __pyx_v_it->vertex2 = __pyx_v_ivertex; - - - goto __pyx_L6_break; - goto __pyx_L7; - } - __pyx_L7:; - } - __pyx_L6_break:; - - - __pyx_v_it->start_index = -1; - - - __pyx_v_it->restart = 0; - - - __pyx_t_4 = ((__pyx_v_it->info->neighbors[((__pyx_v_it->triangle * 3) + __pyx_v_it->index)]) == -1); - if (__pyx_t_4) { - - - __pyx_v_it->index = -1; - - - goto __pyx_L0; - goto __pyx_L8; - } - { - - - __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_next(__pyx_v_it); - - - __pyx_t_4 = (__pyx_v_it->index == -1); - if (__pyx_t_4) { - - - goto __pyx_L0; - goto __pyx_L9; - } - __pyx_L9:; - } - __pyx_L8:; - goto __pyx_L3; - } - __pyx_L3:; - - - __pyx_v_itri = (__pyx_v_it->info->neighbors[((__pyx_v_it->triangle * 3) + __pyx_v_it->index)]); - - - __pyx_t_4 = (__pyx_v_itri == -1); - if (__pyx_t_4) { - - - for (__pyx_t_2 = 0; __pyx_t_2 < 3; __pyx_t_2+=1) { - __pyx_v_k = __pyx_t_2; - - - __pyx_v_ivertex = (__pyx_v_it->info->vertices[((__pyx_v_it->triangle * 3) + __pyx_v_k)]); - - - __pyx_t_4 = (__pyx_v_ivertex != __pyx_v_it->vertex); - if (__pyx_t_4) { - __pyx_t_1 = (__pyx_v_k != __pyx_v_it->index); - __pyx_t_3 = __pyx_t_1; - } else { - __pyx_t_3 = __pyx_t_4; - } - if (__pyx_t_3) { - - - __pyx_v_it->index = __pyx_v_k; - - - __pyx_v_it->vertex2 = __pyx_v_ivertex; - - - goto __pyx_L12_break; - goto __pyx_L13; - } - __pyx_L13:; - } - __pyx_L12_break:; - - - __pyx_v_it->restart = 1; - - - goto __pyx_L0; - goto __pyx_L10; - } - __pyx_L10:; - - - for (__pyx_t_2 = 0; __pyx_t_2 < 3; __pyx_t_2+=1) { - __pyx_v_k = __pyx_t_2; - - - __pyx_v_ivertex = (__pyx_v_it->info->vertices[((__pyx_v_itri * 3) + __pyx_v_k)]); - - - __pyx_t_3 = ((__pyx_v_it->info->neighbors[((__pyx_v_itri * 3) + __pyx_v_k)]) != __pyx_v_it->triangle); - if (__pyx_t_3) { - - - __pyx_t_4 = (__pyx_v_ivertex != __pyx_v_it->vertex); - __pyx_t_1 = __pyx_t_4; - } else { - __pyx_t_1 = __pyx_t_3; - } - if (__pyx_t_1) { - - - __pyx_v_it->index = __pyx_v_k; - - - __pyx_v_it->vertex2 = __pyx_v_ivertex; - - - goto __pyx_L15_break; - goto __pyx_L16; - } - __pyx_L16:; - } - __pyx_L15_break:; - - - __pyx_v_it->triangle = __pyx_v_itri; - - - __pyx_t_1 = (__pyx_v_it->triangle == __pyx_v_it->start_triangle); - if (__pyx_t_1) { - - - __pyx_v_it->index = -1; - - - goto __pyx_L0; - goto __pyx_L17; - } - __pyx_L17:; - - __pyx_L0:; -} - - - -static int __pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static int __pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_delaunay = 0; - PyObject *__pyx_v_ivertex = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__delaunay,&__pyx_n_s__ivertex,0}; - __Pyx_RefNannySetupContext("__init__"); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__delaunay); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ivertex); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_delaunay = values[0]; - __pyx_v_ivertex = values[1]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_delaunay = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_ivertex = PyTuple_GET_ITEM(__pyx_args, 1); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.qhull.RidgeIter2D.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_delaunay, __pyx_n_s__ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 615; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_int_2, Py_NE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 615; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 615; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { - - - __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - __Pyx_INCREF(__pyx_v_delaunay); - __Pyx_GIVEREF(__pyx_v_delaunay); - __Pyx_GOTREF(((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->delaunay); - __Pyx_DECREF(((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->delaunay); - ((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->delaunay = __pyx_v_delaunay; - - - __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info((&((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->info), __pyx_v_delaunay, 0, 1); - - - __pyx_t_4 = __Pyx_PyInt_AsInt(__pyx_v_ivertex); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 619; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_init((&((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->it), (&((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->info), __pyx_t_4); - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("scipy.spatial.qhull.RidgeIter2D.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D_1__iter__(PyObject *__pyx_v_self); -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D_1__iter__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__iter__"); - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self); - __pyx_r = __pyx_v_self; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D_2__next__(PyObject *__pyx_v_self); -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D_2__next__(PyObject *__pyx_v_self) { - PyObject *__pyx_v_ret = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__next__"); - - - __pyx_t_1 = (((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->it.index == -1); - if (__pyx_t_1) { - - - __pyx_t_2 = PyObject_Call(__pyx_builtin_StopIteration, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_t_2 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->it.vertex); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->it.vertex2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->it.index); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyInt_FromLong(((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->it.triangle); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyTuple_New(4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_6, 3, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_t_5 = 0; - __pyx_v_ret = __pyx_t_6; - __pyx_t_6 = 0; - - - __pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_next((&((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)__pyx_v_self)->it)); - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_ret)); - __pyx_r = ((PyObject *)__pyx_v_ret); - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("scipy.spatial.qhull.RidgeIter2D.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_ret); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static int __pyx_f_5scipy_7spatial_5qhull__is_point_fully_outside(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, double *__pyx_v_x, double __pyx_v_eps) { - int __pyx_v_i; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - - - __pyx_t_1 = __pyx_v_d->ndim; - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - - __pyx_t_3 = ((__pyx_v_x[__pyx_v_i]) < ((__pyx_v_d->min_bound[__pyx_v_i]) - __pyx_v_eps)); - if (!__pyx_t_3) { - __pyx_t_4 = ((__pyx_v_x[__pyx_v_i]) > ((__pyx_v_d->max_bound[__pyx_v_i]) + __pyx_v_eps)); - __pyx_t_5 = __pyx_t_4; - } else { - __pyx_t_5 = __pyx_t_3; - } - if (__pyx_t_5) { - - - __pyx_r = 1; - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - } - - - __pyx_r = 0; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - - - -static int __pyx_f_5scipy_7spatial_5qhull__find_simplex_bruteforce(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, double *__pyx_v_c, double *__pyx_v_x, double __pyx_v_eps) { - int __pyx_v_inside; - int __pyx_v_isimplex; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - - - __pyx_t_1 = __pyx_f_5scipy_7spatial_5qhull__is_point_fully_outside(__pyx_v_d, __pyx_v_x, __pyx_v_eps); - if (__pyx_t_1) { - - - __pyx_r = -1; - goto __pyx_L0; - goto __pyx_L3; - } - __pyx_L3:; - - - __pyx_t_1 = __pyx_v_d->nsimplex; - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_isimplex = __pyx_t_2; - - - __pyx_v_inside = __pyx_f_5scipy_7spatial_5qhull__barycentric_inside(__pyx_v_d->ndim, (__pyx_v_d->transform + ((__pyx_v_isimplex * __pyx_v_d->ndim) * (__pyx_v_d->ndim + 1))), __pyx_v_x, __pyx_v_c, __pyx_v_eps); - - - if (__pyx_v_inside) { - - - __pyx_r = __pyx_v_isimplex; - goto __pyx_L0; - goto __pyx_L6; - } - __pyx_L6:; - } - - - __pyx_r = -1; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - - - -static int __pyx_f_5scipy_7spatial_5qhull__find_simplex_directed(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, double *__pyx_v_c, double *__pyx_v_x, int *__pyx_v_start, double __pyx_v_eps) { - int __pyx_v_k; - int __pyx_v_m; - int __pyx_v_ndim; - int __pyx_v_inside; - int __pyx_v_isimplex; - int __pyx_v_cycle_k; - double *__pyx_v_transform; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - long __pyx_t_4; - int __pyx_t_5; - long __pyx_t_6; - int __pyx_t_7; - - - __pyx_v_ndim = __pyx_v_d->ndim; - - - __pyx_v_isimplex = (__pyx_v_start[0]); - - - __pyx_t_1 = (__pyx_v_isimplex < 0); - if (!__pyx_t_1) { - __pyx_t_2 = (__pyx_v_isimplex >= __pyx_v_d->nsimplex); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - - __pyx_v_isimplex = 0; - goto __pyx_L3; - } - __pyx_L3:; - - - __pyx_t_4 = (1 + __Pyx_div_long(__pyx_v_d->nsimplex, 4)); - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { - __pyx_v_cycle_k = __pyx_t_5; - - - __pyx_t_3 = (__pyx_v_isimplex == -1); - if (__pyx_t_3) { - - - goto __pyx_L5_break; - goto __pyx_L6; - } - __pyx_L6:; - - - __pyx_v_transform = (__pyx_v_d->transform + ((__pyx_v_isimplex * __pyx_v_ndim) * (__pyx_v_ndim + 1))); - - - __pyx_v_inside = 1; - - - __pyx_t_6 = (__pyx_v_ndim + 1); - for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { - __pyx_v_k = __pyx_t_7; - - - __pyx_f_5scipy_7spatial_5qhull__barycentric_coordinate_single(__pyx_v_ndim, __pyx_v_transform, __pyx_v_x, __pyx_v_c, __pyx_v_k); - - - __pyx_t_3 = ((__pyx_v_c[__pyx_v_k]) < (-__pyx_v_eps)); - if (__pyx_t_3) { - - - __pyx_v_m = (__pyx_v_d->neighbors[(((__pyx_v_ndim + 1) * __pyx_v_isimplex) + __pyx_v_k)]); - - - __pyx_t_3 = (__pyx_v_m == -1); - if (__pyx_t_3) { - - - (__pyx_v_start[0]) = __pyx_v_isimplex; - - - __pyx_r = -1; - goto __pyx_L0; - goto __pyx_L10; - } - __pyx_L10:; - - - __pyx_v_isimplex = __pyx_v_m; - - - __pyx_v_inside = -1; - - - goto __pyx_L8_break; - goto __pyx_L9; - } - - - __pyx_t_3 = ((__pyx_v_c[__pyx_v_k]) <= (1.0 + __pyx_v_eps)); - if (__pyx_t_3) { - goto __pyx_L9; - } - { - - - __pyx_v_inside = 0; - } - __pyx_L9:; - } - __pyx_L8_break:; - - - switch (__pyx_v_inside) { - - - case -1: - - - goto __pyx_L4_continue; - break; - - - case 1: - - - goto __pyx_L5_break; - break; - default: - - - __pyx_v_isimplex = __pyx_f_5scipy_7spatial_5qhull__find_simplex_bruteforce(__pyx_v_d, __pyx_v_c, __pyx_v_x, __pyx_v_eps); - - - goto __pyx_L5_break; - break; - } - __pyx_L4_continue:; - } - { - - - __pyx_v_isimplex = __pyx_f_5scipy_7spatial_5qhull__find_simplex_bruteforce(__pyx_v_d, __pyx_v_c, __pyx_v_x, __pyx_v_eps); - } - __pyx_L5_break:; - - - (__pyx_v_start[0]) = __pyx_v_isimplex; - - - __pyx_r = __pyx_v_isimplex; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - - - -static int __pyx_f_5scipy_7spatial_5qhull__find_simplex(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_d, double *__pyx_v_c, double *__pyx_v_x, int *__pyx_v_start, double __pyx_v_eps) { - int __pyx_v_isimplex; - int __pyx_v_k; - int __pyx_v_ineigh; - int __pyx_v_ndim; - double __pyx_v_z[(NPY_MAXDIMS + 1)]; - double __pyx_v_best_dist; - double __pyx_v_dist; - int __pyx_v_changed; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - long __pyx_t_5; - - - __pyx_t_1 = __pyx_f_5scipy_7spatial_5qhull__is_point_fully_outside(__pyx_v_d, __pyx_v_x, __pyx_v_eps); - if (__pyx_t_1) { - - - __pyx_r = -1; - goto __pyx_L0; - goto __pyx_L3; - } - __pyx_L3:; - - - __pyx_t_2 = (__pyx_v_d->nsimplex <= 0); - if (__pyx_t_2) { - - - __pyx_r = -1; - goto __pyx_L0; - goto __pyx_L4; - } - __pyx_L4:; - - - __pyx_v_ndim = __pyx_v_d->ndim; - - - __pyx_v_isimplex = (__pyx_v_start[0]); - - - __pyx_t_2 = (__pyx_v_isimplex < 0); - if (!__pyx_t_2) { - __pyx_t_3 = (__pyx_v_isimplex >= __pyx_v_d->nsimplex); - __pyx_t_4 = __pyx_t_3; - } else { - __pyx_t_4 = __pyx_t_2; - } - if (__pyx_t_4) { - - - __pyx_v_isimplex = 0; - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_f_5scipy_7spatial_5qhull__lift_point(__pyx_v_d, __pyx_v_x, __pyx_v_z); - - - __pyx_v_best_dist = __pyx_f_5scipy_7spatial_5qhull__distplane(__pyx_v_d, __pyx_v_isimplex, __pyx_v_z); - - - __pyx_v_changed = 1; - - - while (1) { - if (!__pyx_v_changed) break; - - - __pyx_t_4 = (__pyx_v_best_dist > 0.0); - if (__pyx_t_4) { - - - goto __pyx_L7_break; - goto __pyx_L8; - } - __pyx_L8:; - - - __pyx_v_changed = 0; - - - __pyx_t_5 = (__pyx_v_ndim + 1); - for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_5; __pyx_t_1+=1) { - __pyx_v_k = __pyx_t_1; - - - __pyx_v_ineigh = (__pyx_v_d->neighbors[(((__pyx_v_ndim + 1) * __pyx_v_isimplex) + __pyx_v_k)]); - - - __pyx_t_4 = (__pyx_v_ineigh == -1); - if (__pyx_t_4) { - - - goto __pyx_L9_continue; - goto __pyx_L11; - } - __pyx_L11:; - - - __pyx_v_dist = __pyx_f_5scipy_7spatial_5qhull__distplane(__pyx_v_d, __pyx_v_ineigh, __pyx_v_z); - - - __pyx_t_4 = (__pyx_v_dist > (__pyx_v_best_dist + (__pyx_v_eps * (1.0 + fabs(__pyx_v_best_dist))))); - if (__pyx_t_4) { - - - __pyx_v_isimplex = __pyx_v_ineigh; - - - __pyx_v_best_dist = __pyx_v_dist; - - - __pyx_v_changed = 1; - goto __pyx_L12; - } - __pyx_L12:; - __pyx_L9_continue:; - } - } - __pyx_L7_break:; - - - (__pyx_v_start[0]) = __pyx_v_isimplex; - - - __pyx_r = __pyx_f_5scipy_7spatial_5qhull__find_simplex_directed(__pyx_v_d, __pyx_v_c, __pyx_v_x, __pyx_v_start, __pyx_v_eps); - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_8Delaunay___init__ = {__Pyx_NAMESTR("__init__"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_8Delaunay___init__, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_points = 0; - PyObject *__pyx_v_vertices = NULL; - PyObject *__pyx_v_neighbors = NULL; - PyObject *__pyx_v_equations = NULL; - PyObject *__pyx_v_paraboloid_scale = NULL; - PyObject *__pyx_v_paraboloid_shift = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *(*__pyx_t_8)(PyObject *); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__points,0}; - __Pyx_RefNannySetupContext("__init__"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__points); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_points = values[1]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_points = PyTuple_GET_ITEM(__pyx_args, 1); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_points); - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_points); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_points); - __Pyx_GIVEREF(__pyx_v_points); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__astype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__double); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_points); - __pyx_v_points = __pyx_t_2; - __pyx_t_2 = 0; - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s___construct_delaunay); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 948; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 948; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_points); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_points); - __Pyx_GIVEREF(__pyx_v_points); - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 948; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { - PyObject* sequence = __pyx_t_1; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 5)) { - if (PyTuple_GET_SIZE(sequence) > 5) __Pyx_RaiseTooManyValuesError(5); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 947; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 2); - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 3); - __pyx_t_6 = PyTuple_GET_ITEM(sequence, 4); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 5)) { - if (PyList_GET_SIZE(sequence) > 5) __Pyx_RaiseTooManyValuesError(5); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 947; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyList_GET_ITEM(sequence, 0); - __pyx_t_2 = PyList_GET_ITEM(sequence, 1); - __pyx_t_4 = PyList_GET_ITEM(sequence, 2); - __pyx_t_5 = PyList_GET_ITEM(sequence, 3); - __pyx_t_6 = PyList_GET_ITEM(sequence, 4); - } - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_7 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 947; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext; - index = 0; __pyx_t_3 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_3)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - index = 1; __pyx_t_2 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_2)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_2); - index = 2; __pyx_t_4 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_4)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_4); - index = 3; __pyx_t_5 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_5)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_5); - index = 4; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_6); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 947; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - goto __pyx_L7_unpacking_done; - __pyx_L6_unpacking_failed:; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 947; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_L7_unpacking_done:; - } - - - __pyx_v_vertices = __pyx_t_3; - __pyx_t_3 = 0; - __pyx_v_neighbors = __pyx_t_2; - __pyx_t_2 = 0; - __pyx_v_equations = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_v_paraboloid_scale = __pyx_t_5; - __pyx_t_5 = 0; - __pyx_v_paraboloid_shift = __pyx_t_6; - __pyx_t_6 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 950; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 950; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__ndim, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 950; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - - - __pyx_t_6 = PyObject_GetAttr(__pyx_v_points, __pyx_n_s__shape); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 951; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_6, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 951; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__npoints, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 951; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_vertices, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__nsimplex, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__points, __pyx_v_points) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 953; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__vertices, __pyx_v_vertices) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__neighbors, __pyx_v_neighbors) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 955; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__equations, __pyx_v_equations) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__paraboloid_scale, __pyx_v_paraboloid_scale) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__paraboloid_shift, __pyx_v_paraboloid_shift) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__min); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__axis), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__min_bound, __pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - - __pyx_t_5 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__max); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__axis), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_1 = PyEval_CallObjectWithKeywords(__pyx_t_6, ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__max_bound, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s___transform, Py_None) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s___vertex_to_simplex, Py_None) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_vertices); - __Pyx_XDECREF(__pyx_v_neighbors); - __Pyx_XDECREF(__pyx_v_equations); - __Pyx_XDECREF(__pyx_v_paraboloid_scale); - __Pyx_XDECREF(__pyx_v_paraboloid_shift); - __Pyx_XDECREF(__pyx_v_points); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_1transform(PyObject *__pyx_self, PyObject *__pyx_v_self); -static char __pyx_doc_5scipy_7spatial_5qhull_8Delaunay_1transform[] = "\n Affine transform from ``x`` to the barycentric coordinates ``c``.\n\n :type: ndarray of double, shape (nsimplex, ndim+1, ndim)\n\n This is defined by::\n\n T c = x - r\n\n At vertex ``j``, ``c_j = 1`` and the other coordinates zero.\n\n For simplex ``i``, ``transform[i,:ndim,:ndim]`` contains\n inverse of the matrix ``T``, and ``transform[i,ndim,:]``\n contains the vector ``r``.\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_1transform = {__Pyx_NAMESTR("transform"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_1transform, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_8Delaunay_1transform)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_1transform(PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("transform"); - __pyx_self = __pyx_self; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___transform); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 982; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = (__pyx_t_1 == Py_None); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_2) { - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s_15); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 983; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__points); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 983; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - - - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__vertices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 984; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 983; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 983; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - - - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s___transform, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 983; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L5; - } - __pyx_L5:; - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_4 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___transform); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 985; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.transform", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_2vertex_to_simplex(PyObject *__pyx_self, PyObject *__pyx_v_self); -static char __pyx_doc_5scipy_7spatial_5qhull_8Delaunay_2vertex_to_simplex[] = "\n Lookup array, from a vertex, to some simplex which it is a part of.\n\n :type: ndarray of int, shape (npoints,)\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_2vertex_to_simplex = {__Pyx_NAMESTR("vertex_to_simplex"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_2vertex_to_simplex, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_8Delaunay_2vertex_to_simplex)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_2vertex_to_simplex(PyObject *__pyx_self, PyObject *__pyx_v_self) { - int __pyx_v_isimplex; - int __pyx_v_k; - int __pyx_v_ivertex; - int __pyx_v_nsimplex; - int __pyx_v_ndim; - PyArrayObject *__pyx_v_vertices = 0; - PyArrayObject *__pyx_v_arr = 0; - Py_buffer __pyx_bstruct_arr; - Py_ssize_t __pyx_bstride_0_arr = 0; - Py_ssize_t __pyx_bshape_0_arr = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyArrayObject *__pyx_t_7 = NULL; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyArrayObject *__pyx_t_12 = NULL; - int __pyx_t_13; - long __pyx_t_14; - int __pyx_t_15; - int __pyx_t_16; - int __pyx_t_17; - int __pyx_t_18; - int __pyx_t_19; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("vertex_to_simplex"); - __pyx_self = __pyx_self; - __pyx_bstruct_vertices.buf = NULL; - __pyx_bstruct_arr.buf = NULL; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___vertex_to_simplex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 999; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = (__pyx_t_1 == Py_None); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_2) { - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__npoints); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_4)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); - __pyx_t_4 = 0; - __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__intc); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyDict_SetItem(__pyx_t_4, ((PyObject *)__pyx_n_s__dtype), __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s___vertex_to_simplex, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - - - __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___vertex_to_simplex); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1001; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_4 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__fill); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1001; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_k_tuple_16), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1001; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - - - __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___vertex_to_simplex); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = ((PyArrayObject *)__pyx_t_6); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_v_arr, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); - } - } - __pyx_bstride_0_arr = __pyx_bstruct_arr.strides[0]; - __pyx_bshape_0_arr = __pyx_bstruct_arr.shape[0]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_7 = 0; - __pyx_v_arr = ((PyArrayObject *)__pyx_t_6); - __pyx_t_6 = 0; - - - __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__vertices); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_12 = ((PyArrayObject *)__pyx_t_6); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_8 < 0)) { - PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_v_vertices, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); - } - } - __pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = 0; - __pyx_v_vertices = ((PyArrayObject *)__pyx_t_6); - __pyx_t_6 = 0; - - - __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__nsimplex); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1006; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = __Pyx_PyInt_AsInt(__pyx_t_6); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1006; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_v_nsimplex = __pyx_t_8; - - - __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__ndim); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = __Pyx_PyInt_AsInt(__pyx_t_6); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_v_ndim = __pyx_t_8; - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_8 = __pyx_v_nsimplex; - for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_8; __pyx_t_13+=1) { - __pyx_v_isimplex = __pyx_t_13; - - - __pyx_t_14 = (__pyx_v_ndim + 1); - for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { - __pyx_v_k = __pyx_t_15; - - - __pyx_t_16 = __pyx_v_isimplex; - __pyx_t_17 = __pyx_v_k; - if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_0_vertices; - if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_1_vertices; - __pyx_v_ivertex = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_16, __pyx_bstride_0_vertices, __pyx_t_17, __pyx_bstride_1_vertices)); - - - __pyx_t_18 = __pyx_v_ivertex; - if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_0_arr; - __pyx_t_2 = ((*__Pyx_BufPtrStrided1d(npy_int *, __pyx_bstruct_arr.buf, __pyx_t_18, __pyx_bstride_0_arr)) == -1); - if (__pyx_t_2) { - - - __pyx_t_19 = __pyx_v_ivertex; - if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_bshape_0_arr; - *__Pyx_BufPtrStrided1d(npy_int *, __pyx_bstruct_arr.buf, __pyx_t_19, __pyx_bstride_0_arr) = __pyx_v_isimplex; - goto __pyx_L13; - } - __pyx_L13:; - } - } - } - - - { - Py_BLOCK_THREADS - } - } - goto __pyx_L5; - } - __pyx_L5:; - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___vertex_to_simplex); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1016; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_r = __pyx_t_6; - __pyx_t_6 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.vertex_to_simplex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_vertices); - __Pyx_XDECREF((PyObject *)__pyx_v_arr); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_3convex_hull(PyObject *__pyx_self, PyObject *__pyx_v_self); -static char __pyx_doc_5scipy_7spatial_5qhull_8Delaunay_3convex_hull[] = "\n Vertices of facets forming the convex hull of the point set.\n\n :type: ndarray of int, shape (nfaces, ndim)\n\n The array contains the indices of the points\n belonging to the (N-1)-dimensional facets that form the convex\n hull of the triangulation.\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_3convex_hull = {__Pyx_NAMESTR("convex_hull"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_3convex_hull, METH_O, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_8Delaunay_3convex_hull)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_3convex_hull(PyObject *__pyx_self, PyObject *__pyx_v_self) { - int __pyx_v_isimplex; - int __pyx_v_k; - int __pyx_v_j; - int __pyx_v_ndim; - int __pyx_v_nsimplex; - int __pyx_v_m; - int __pyx_v_msize; - PyArrayObject *__pyx_v_arr = 0; - PyArrayObject *__pyx_v_neighbors = 0; - PyArrayObject *__pyx_v_vertices = 0; - PyObject *__pyx_v_out = NULL; - Py_buffer __pyx_bstruct_neighbors; - Py_ssize_t __pyx_bstride_0_neighbors = 0; - Py_ssize_t __pyx_bstride_1_neighbors = 0; - Py_ssize_t __pyx_bshape_0_neighbors = 0; - Py_ssize_t __pyx_bshape_1_neighbors = 0; - Py_buffer __pyx_bstruct_arr; - Py_ssize_t __pyx_bstride_0_arr = 0; - Py_ssize_t __pyx_bstride_1_arr = 0; - Py_ssize_t __pyx_bshape_0_arr = 0; - Py_ssize_t __pyx_bshape_1_arr = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyArrayObject *__pyx_t_2 = NULL; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyArrayObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyArrayObject *__pyx_t_12 = NULL; - int __pyx_t_13; - long __pyx_t_14; - int __pyx_t_15; - int __pyx_t_16; - int __pyx_t_17; - int __pyx_t_18; - long __pyx_t_19; - int __pyx_t_20; - int __pyx_t_21; - int __pyx_t_22; - int __pyx_t_23; - int __pyx_t_24; - int __pyx_t_25; - int __pyx_t_26; - int __pyx_t_27; - long __pyx_t_28; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convex_hull"); - __pyx_self = __pyx_self; - __pyx_bstruct_arr.buf = NULL; - __pyx_bstruct_neighbors.buf = NULL; - __pyx_bstruct_vertices.buf = NULL; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__neighbors); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_neighbors); - __pyx_t_3 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_neighbors, (PyObject*)__pyx_t_2, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_3 < 0)) { - PyErr_Fetch(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_neighbors, (PyObject*)__pyx_v_neighbors, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_6); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_4, __pyx_t_5, __pyx_t_6); - } - } - __pyx_bstride_0_neighbors = __pyx_bstruct_neighbors.strides[0]; __pyx_bstride_1_neighbors = __pyx_bstruct_neighbors.strides[1]; - __pyx_bshape_0_neighbors = __pyx_bstruct_neighbors.shape[0]; __pyx_bshape_1_neighbors = __pyx_bstruct_neighbors.shape[1]; - if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_2 = 0; - __pyx_v_neighbors = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__vertices); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __pyx_t_3 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_3 < 0)) { - PyErr_Fetch(&__pyx_t_6, &__pyx_t_5, &__pyx_t_4); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_v_vertices, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_6); Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_6, __pyx_t_5, __pyx_t_4); - } - } - __pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_7 = 0; - __pyx_v_vertices = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_ndim = __pyx_t_3; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__nsimplex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_nsimplex = __pyx_t_3; - - - __pyx_v_msize = 10; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyInt_FromLong(__pyx_v_msize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = PyInt_FromLong(__pyx_v_ndim); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_10)); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); - __Pyx_GIVEREF(__pyx_t_9); - __pyx_t_1 = 0; - __pyx_t_9 = 0; - __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - PyTuple_SET_ITEM(__pyx_t_9, 0, ((PyObject *)__pyx_t_10)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_10)); - __pyx_t_10 = 0; - __pyx_t_10 = PyDict_New(); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_10)); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_11 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__intc); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__dtype), __pyx_t_11) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = PyEval_CallObjectWithKeywords(__pyx_t_8, ((PyObject *)__pyx_t_9), ((PyObject *)__pyx_t_10)); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0; - __pyx_v_out = __pyx_t_11; - __pyx_t_11 = 0; - - - if (!(likely(((__pyx_v_out) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_12 = ((PyArrayObject *)__pyx_v_out); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __pyx_t_3 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_3 < 0)) { - PyErr_Fetch(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_v_arr, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_6); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_4, __pyx_t_5, __pyx_t_6); - } - } - __pyx_bstride_0_arr = __pyx_bstruct_arr.strides[0]; __pyx_bstride_1_arr = __pyx_bstruct_arr.strides[1]; - __pyx_bshape_0_arr = __pyx_bstruct_arr.shape[0]; __pyx_bshape_1_arr = __pyx_bstruct_arr.shape[1]; - if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = 0; - __Pyx_INCREF(__pyx_v_out); - __pyx_v_arr = ((PyArrayObject *)__pyx_v_out); - - - __pyx_v_m = 0; - - - __pyx_t_3 = __pyx_v_nsimplex; - for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_3; __pyx_t_13+=1) { - __pyx_v_isimplex = __pyx_t_13; - - - __pyx_t_14 = (__pyx_v_ndim + 1); - for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { - __pyx_v_k = __pyx_t_15; - - - __pyx_t_16 = __pyx_v_isimplex; - __pyx_t_17 = __pyx_v_k; - if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_0_neighbors; - if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_1_neighbors; - __pyx_t_18 = ((*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_neighbors.buf, __pyx_t_16, __pyx_bstride_0_neighbors, __pyx_t_17, __pyx_bstride_1_neighbors)) == -1); - if (__pyx_t_18) { - - - __pyx_t_19 = (__pyx_v_ndim + 1); - for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { - __pyx_v_j = __pyx_t_20; - - - __pyx_t_18 = (__pyx_v_j < __pyx_v_k); - if (__pyx_t_18) { - - - __pyx_t_21 = __pyx_v_isimplex; - __pyx_t_22 = __pyx_v_j; - if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_bshape_0_vertices; - if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_bshape_1_vertices; - __pyx_t_23 = __pyx_v_m; - __pyx_t_24 = __pyx_v_j; - if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_bshape_0_arr; - if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_bshape_1_arr; - *__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_arr.buf, __pyx_t_23, __pyx_bstride_0_arr, __pyx_t_24, __pyx_bstride_1_arr) = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_21, __pyx_bstride_0_vertices, __pyx_t_22, __pyx_bstride_1_vertices)); - goto __pyx_L12; - } - - - __pyx_t_18 = (__pyx_v_j > __pyx_v_k); - if (__pyx_t_18) { - - - __pyx_t_25 = __pyx_v_isimplex; - __pyx_t_26 = __pyx_v_j; - if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_bshape_0_vertices; - if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_bshape_1_vertices; - __pyx_t_27 = __pyx_v_m; - __pyx_t_28 = (__pyx_v_j - 1); - if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_bshape_0_arr; - if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_bshape_1_arr; - *__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_arr.buf, __pyx_t_27, __pyx_bstride_0_arr, __pyx_t_28, __pyx_bstride_1_arr) = (*__Pyx_BufPtrStrided2d(npy_int *, __pyx_bstruct_vertices.buf, __pyx_t_25, __pyx_bstride_0_vertices, __pyx_t_26, __pyx_bstride_1_vertices)); - goto __pyx_L12; - } - __pyx_L12:; - } - - - __pyx_v_m = (__pyx_v_m + 1); - - - __pyx_t_18 = (__pyx_v_m >= __pyx_v_msize); - if (__pyx_t_18) { - - - __pyx_t_12 = ((PyArrayObject *)Py_None); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __pyx_t_20 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_20 < 0)) { - PyErr_Fetch(&__pyx_t_6, &__pyx_t_5, &__pyx_t_4); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_v_arr, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_6); Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_6, __pyx_t_5, __pyx_t_4); - } - } - __pyx_bstride_0_arr = __pyx_bstruct_arr.strides[0]; __pyx_bstride_1_arr = __pyx_bstruct_arr.strides[1]; - __pyx_bshape_0_arr = __pyx_bstruct_arr.shape[0]; __pyx_bshape_1_arr = __pyx_bstruct_arr.shape[1]; - if (unlikely(__pyx_t_20 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1057; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = 0; - __Pyx_INCREF(Py_None); - __Pyx_DECREF(((PyObject *)__pyx_v_arr)); - __pyx_v_arr = ((PyArrayObject *)Py_None); - - - __pyx_v_msize = ((2 * __pyx_v_msize) + 1); - - - __pyx_t_11 = PyObject_GetAttr(__pyx_v_out, __pyx_n_s__resize); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_10 = PyInt_FromLong(__pyx_v_msize); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_10); - __pyx_t_9 = PyInt_FromLong(__pyx_v_ndim); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_10); - __Pyx_GIVEREF(__pyx_t_10); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_9); - __Pyx_GIVEREF(__pyx_t_9); - __pyx_t_10 = 0; - __pyx_t_9 = 0; - __pyx_t_9 = PyObject_Call(__pyx_t_11, ((PyObject *)__pyx_t_8), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - - - if (!(likely(((__pyx_v_out) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_12 = ((PyArrayObject *)__pyx_v_out); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __pyx_t_20 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_20 < 0)) { - PyErr_Fetch(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_v_arr, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_6); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_4, __pyx_t_5, __pyx_t_6); - } - } - __pyx_bstride_0_arr = __pyx_bstruct_arr.strides[0]; __pyx_bstride_1_arr = __pyx_bstruct_arr.strides[1]; - __pyx_bshape_0_arr = __pyx_bstruct_arr.shape[0]; __pyx_bshape_1_arr = __pyx_bstruct_arr.shape[1]; - if (unlikely(__pyx_t_20 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = 0; - __Pyx_INCREF(__pyx_v_out); - __Pyx_DECREF(((PyObject *)__pyx_v_arr)); - __pyx_v_arr = ((PyArrayObject *)__pyx_v_out); - goto __pyx_L13; - } - __pyx_L13:; - goto __pyx_L9; - } - __pyx_L9:; - } - } - - - __pyx_t_12 = ((PyArrayObject *)Py_None); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __pyx_t_3 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_3 < 0)) { - PyErr_Fetch(&__pyx_t_6, &__pyx_t_5, &__pyx_t_4); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_arr, (PyObject*)__pyx_v_arr, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_6); Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_6, __pyx_t_5, __pyx_t_4); - } - } - __pyx_bstride_0_arr = __pyx_bstruct_arr.strides[0]; __pyx_bstride_1_arr = __pyx_bstruct_arr.strides[1]; - __pyx_bshape_0_arr = __pyx_bstruct_arr.shape[0]; __pyx_bshape_1_arr = __pyx_bstruct_arr.shape[1]; - if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1062; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = 0; - __Pyx_INCREF(Py_None); - __Pyx_DECREF(((PyObject *)__pyx_v_arr)); - __pyx_v_arr = ((PyArrayObject *)Py_None); - - - __pyx_t_9 = PyObject_GetAttr(__pyx_v_out, __pyx_n_s__resize); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_8 = PyInt_FromLong(__pyx_v_m); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_11 = PyInt_FromLong(__pyx_v_ndim); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_10)); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_8); - PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_11); - __Pyx_GIVEREF(__pyx_t_11); - __pyx_t_8 = 0; - __pyx_t_11 = 0; - __pyx_t_11 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_10), NULL); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_out); - __pyx_r = __pyx_v_out; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_XDECREF(__pyx_t_11); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_neighbors); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.convex_hull", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_neighbors); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_arr); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_arr); - __Pyx_XDECREF((PyObject *)__pyx_v_neighbors); - __Pyx_XDECREF((PyObject *)__pyx_v_vertices); - __Pyx_XDECREF(__pyx_v_out); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_4find_simplex(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_5scipy_7spatial_5qhull_8Delaunay_4find_simplex[] = "\n find_simplex(xi, bruteforce=False)\n\n Find the simplices containing the given points.\n\n Parameters\n ----------\n tri : DelaunayInfo\n Delaunay triangulation\n xi : ndarray of double, shape (..., ndim)\n Points to locate\n bruteforce : bool, optional\n Whether to only perform a brute-force search\n\n Returns\n -------\n i : ndarray of int, same shape as `xi`\n Indices of simplices containing each point.\n Points outside the triangulation get the value -1.\n\n Notes\n -----\n This uses an algorithm adapted from Qhull's qh_findbestfacet,\n which makes use of the connection between a convex hull and a\n Delaunay triangulation. After finding the simplex closest to\n the point in N+1 dimensions, the algorithm falls back to\n directed search in N dimensions.\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_4find_simplex = {__Pyx_NAMESTR("find_simplex"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_4find_simplex, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_8Delaunay_4find_simplex)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_4find_simplex(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_xi = 0; - PyObject *__pyx_v_bruteforce = 0; - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_v_info; - int __pyx_v_isimplex; - double __pyx_v_c[NPY_MAXDIMS]; - double __pyx_v_eps; - int __pyx_v_start; - int __pyx_v_k; - PyArrayObject *__pyx_v_x = 0; - PyArrayObject *__pyx_v_out_ = 0; - PyObject *__pyx_v_xi_shape = NULL; - PyObject *__pyx_v_out = NULL; - Py_buffer __pyx_bstruct_out_; - Py_ssize_t __pyx_bstride_0_out_ = 0; - Py_ssize_t __pyx_bshape_0_out_ = 0; - Py_buffer __pyx_bstruct_x; - Py_ssize_t __pyx_bstride_0_x = 0; - Py_ssize_t __pyx_bstride_1_x = 0; - Py_ssize_t __pyx_bshape_0_x = 0; - Py_ssize_t __pyx_bshape_1_x = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - double __pyx_t_11; - PyObject *__pyx_t_12 = NULL; - PyArrayObject *__pyx_t_13 = NULL; - npy_intp __pyx_t_14; - int __pyx_t_15; - int __pyx_t_16; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__xi,&__pyx_n_s__bruteforce,0}; - __Pyx_RefNannySetupContext("find_simplex"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = __pyx_k_17; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xi); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("find_simplex", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__bruteforce); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "find_simplex") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_xi = values[1]; - __pyx_v_bruteforce = values[2]; - } else { - __pyx_v_bruteforce = __pyx_k_17; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_bruteforce = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_xi = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("find_simplex", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.find_simplex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_xi); - __pyx_bstruct_x.buf = NULL; - __pyx_bstruct_out_.buf = NULL; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__asanyarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_xi); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_xi); - __Pyx_GIVEREF(__pyx_v_xi); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_xi); - __pyx_v_xi = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_3, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_NE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { - - - __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_19), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - __pyx_t_2 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_xi_shape = __pyx_t_2; - __pyx_t_2 = 0; - - - __pyx_t_2 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__prod); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PySequence_GetSlice(__pyx_t_3, 0, -1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_3, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_5 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_xi); - __pyx_v_xi = __pyx_t_1; - __pyx_t_1 = 0; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__astype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__double); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); - } - } - __pyx_bstride_0_x = __pyx_bstruct_x.strides[0]; __pyx_bstride_1_x = __pyx_bstruct_x.strides[1]; - __pyx_bshape_0_x = __pyx_bstruct_x.shape[0]; __pyx_bshape_1_x = __pyx_bstruct_x.shape[1]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_6 = 0; - __pyx_v_x = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - - __pyx_v_start = 0; - - - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__finfo); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__double); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__eps); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_t_5, __pyx_int_10); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_11 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_11 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_eps = __pyx_t_11; - - - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__zeros); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); - __pyx_t_3 = 0; - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_12 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__intc); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__dtype), __pyx_t_12) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __pyx_t_12 = PyEval_CallObjectWithKeywords(__pyx_t_5, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_out = __pyx_t_12; - __pyx_t_12 = 0; - - - if (!(likely(((__pyx_v_out) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_13 = ((PyArrayObject *)__pyx_v_out); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out_); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_out_, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_out_, (PyObject*)__pyx_v_out_, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); - } - } - __pyx_bstride_0_out_ = __pyx_bstruct_out_.strides[0]; - __pyx_bshape_0_out_ = __pyx_bstruct_out_.shape[0]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_13 = 0; - __Pyx_INCREF(__pyx_v_out); - __pyx_v_out_ = ((PyArrayObject *)__pyx_v_out); - - - __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info((&__pyx_v_info), __pyx_v_self, 1, 0); - - - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_bruteforce); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_t_4) { - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_14 = (__pyx_v_x->dimensions[0]); - for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_14; __pyx_t_7+=1) { - __pyx_v_k = __pyx_t_7; - - - __pyx_v_isimplex = __pyx_f_5scipy_7spatial_5qhull__find_simplex_bruteforce((&__pyx_v_info), __pyx_v_c, (((double *)__pyx_v_x->data) + (__pyx_v_info.ndim * __pyx_v_k)), __pyx_v_eps); - - - __pyx_t_15 = __pyx_v_k; - if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_out_; - *__Pyx_BufPtrStrided1d(npy_int *, __pyx_bstruct_out_.buf, __pyx_t_15, __pyx_bstride_0_out_) = __pyx_v_isimplex; - } - } - - - { - Py_BLOCK_THREADS - } - } - goto __pyx_L7; - } - { - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_14 = (__pyx_v_x->dimensions[0]); - for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_14; __pyx_t_7+=1) { - __pyx_v_k = __pyx_t_7; - - - __pyx_v_isimplex = __pyx_f_5scipy_7spatial_5qhull__find_simplex((&__pyx_v_info), __pyx_v_c, (((double *)__pyx_v_x->data) + (__pyx_v_info.ndim * __pyx_v_k)), (&__pyx_v_start), __pyx_v_eps); - - - __pyx_t_16 = __pyx_v_k; - if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_0_out_; - *__Pyx_BufPtrStrided1d(npy_int *, __pyx_bstruct_out_.buf, __pyx_t_16, __pyx_bstride_0_out_) = __pyx_v_isimplex; - } - } - - - { - Py_BLOCK_THREADS - } - } - } - __pyx_L7:; - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_12 = PyObject_GetAttr(__pyx_v_out, __pyx_n_s__reshape); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_3 = __Pyx_PySequence_GetSlice(__pyx_v_xi_shape, 0, -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_12, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_12); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out_); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.find_simplex", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out_); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_x); - __Pyx_XDECREF((PyObject *)__pyx_v_out_); - __Pyx_XDECREF(__pyx_v_xi_shape); - __Pyx_XDECREF(__pyx_v_out); - __Pyx_XDECREF(__pyx_v_xi); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_5plane_distance(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_5scipy_7spatial_5qhull_8Delaunay_5plane_distance[] = "\n plane_distance(xi)\n\n Compute hyperplane distances to the point `xi` from all simplices.\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_5plane_distance = {__Pyx_NAMESTR("plane_distance"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_5plane_distance, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_8Delaunay_5plane_distance)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_5plane_distance(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_xi = 0; - PyArrayObject *__pyx_v_x = 0; - PyArrayObject *__pyx_v_out_ = 0; - __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t __pyx_v_info; - double __pyx_v_z[(NPY_MAXDIMS + 1)]; - int __pyx_v_i; - int __pyx_v_j; - PyObject *__pyx_v_xi_shape = NULL; - PyObject *__pyx_v_out = NULL; - Py_buffer __pyx_bstruct_out_; - Py_ssize_t __pyx_bstride_0_out_ = 0; - Py_ssize_t __pyx_bstride_1_out_ = 0; - Py_ssize_t __pyx_bshape_0_out_ = 0; - Py_ssize_t __pyx_bshape_1_out_ = 0; - Py_buffer __pyx_bstruct_x; - Py_ssize_t __pyx_bstride_0_x = 0; - Py_ssize_t __pyx_bstride_1_x = 0; - Py_ssize_t __pyx_bshape_0_x = 0; - Py_ssize_t __pyx_bshape_1_x = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyArrayObject *__pyx_t_12 = NULL; - npy_intp __pyx_t_13; - int __pyx_t_14; - int __pyx_t_15; - int __pyx_t_16; - int __pyx_t_17; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__xi,0}; - __Pyx_RefNannySetupContext("plane_distance"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xi); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("plane_distance", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1141; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "plane_distance") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1141; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_self = values[0]; - __pyx_v_xi = values[1]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_xi = PyTuple_GET_ITEM(__pyx_args, 1); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("plane_distance", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1141; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.plane_distance", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_xi); - __pyx_bstruct_x.buf = NULL; - __pyx_bstruct_out_.buf = NULL; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { - - - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_21), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_xi_shape = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__prod); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PySequence_GetSlice(__pyx_t_1, 0, -1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_5 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_xi); - __pyx_v_xi = __pyx_t_2; - __pyx_t_2 = 0; - - - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__ascontiguousarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_v_xi, __pyx_n_s__astype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__double); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); - } - } - __pyx_bstride_0_x = __pyx_bstruct_x.strides[0]; __pyx_bstride_1_x = __pyx_bstruct_x.strides[1]; - __pyx_bshape_0_x = __pyx_bstruct_x.shape[0]; __pyx_bshape_1_x = __pyx_bstruct_x.shape[1]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_6 = 0; - __pyx_v_x = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - - __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info((&__pyx_v_info), __pyx_v_self, 0, 0); - - - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_x->dimensions[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyInt_FromLong(__pyx_v_info.nsimplex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_5 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_11 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__double); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_t_11) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = PyEval_CallObjectWithKeywords(__pyx_t_3, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_v_out = __pyx_t_11; - __pyx_t_11 = 0; - - - if (!(likely(((__pyx_v_out) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_12 = ((PyArrayObject *)__pyx_v_out); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out_); - __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_out_, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); - if (unlikely(__pyx_t_7 < 0)) { - PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_out_, (PyObject*)__pyx_v_out_, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); - } - } - __pyx_bstride_0_out_ = __pyx_bstruct_out_.strides[0]; __pyx_bstride_1_out_ = __pyx_bstruct_out_.strides[1]; - __pyx_bshape_0_out_ = __pyx_bstruct_out_.shape[0]; __pyx_bshape_1_out_ = __pyx_bstruct_out_.shape[1]; - if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = 0; - __Pyx_INCREF(__pyx_v_out); - __pyx_v_out_ = ((PyArrayObject *)__pyx_v_out); - - - { - #ifdef WITH_THREAD - PyThreadState *_save = NULL; - #endif - Py_UNBLOCK_THREADS - { - - - __pyx_t_13 = (__pyx_v_x->dimensions[0]); - for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_13; __pyx_t_7+=1) { - __pyx_v_i = __pyx_t_7; - - - __pyx_t_14 = __pyx_v_info.nsimplex; - for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { - __pyx_v_j = __pyx_t_15; - - - __pyx_f_5scipy_7spatial_5qhull__lift_point((&__pyx_v_info), (((double *)__pyx_v_x->data) + (__pyx_v_info.ndim * __pyx_v_i)), __pyx_v_z); - - - __pyx_t_16 = __pyx_v_i; - __pyx_t_17 = __pyx_v_j; - if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_0_out_; - if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_1_out_; - *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_bstruct_out_.buf, __pyx_t_16, __pyx_bstride_0_out_, __pyx_t_17, __pyx_bstride_1_out_) = __pyx_f_5scipy_7spatial_5qhull__distplane((&__pyx_v_info), __pyx_v_j, __pyx_v_z); - } - } - } - - - { - Py_BLOCK_THREADS - } - } - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_11 = PyObject_GetAttr(__pyx_v_out, __pyx_n_s__reshape); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_2 = __Pyx_PySequence_GetSlice(__pyx_v_xi_shape, 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__nsimplex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_2, ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_11, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_11); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out_); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.plane_distance", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_out_); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_x); - __Pyx_XDECREF((PyObject *)__pyx_v_out_); - __Pyx_XDECREF(__pyx_v_xi_shape); - __Pyx_XDECREF(__pyx_v_out); - __Pyx_XDECREF(__pyx_v_xi); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_6lift_points(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_5scipy_7spatial_5qhull_8Delaunay_6lift_points[] = "\n lift_points(tri, x)\n\n Lift points to the Qhull paraboloid.\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_6lift_points = {__Pyx_NAMESTR("lift_points"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_6lift_points, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_8Delaunay_6lift_points)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_8Delaunay_6lift_points(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_tri = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_z = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__tri,&__pyx_n_s__x,0}; - __Pyx_RefNannySetupContext("lift_points"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__tri); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("lift_points", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1175; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "lift_points") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1175; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_tri = values[0]; - __pyx_v_x = values[1]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_tri = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("lift_points", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1175; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.lift_points", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PySequence_GetSlice(__pyx_t_1, 0, -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_4, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_3, ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__double); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyEval_CallObjectWithKeywords(__pyx_t_2, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_v_z = __pyx_t_5; - __pyx_t_5 = 0; - - - if (PyObject_SetItem(__pyx_v_z, ((PyObject *)__pyx_k_tuple_23), __pyx_v_x) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - __pyx_t_5 = PyNumber_Power(__pyx_v_x, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__sum); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__axis), __pyx_int_neg_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - if (PyObject_SetItem(__pyx_v_z, ((PyObject *)__pyx_k_tuple_24), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - - __Pyx_INCREF(((PyObject *)__pyx_k_tuple_25)); - __pyx_t_6 = __pyx_k_tuple_25; - __pyx_t_4 = PyObject_GetItem(__pyx_v_z, ((PyObject *)__pyx_t_6)); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyObject_GetAttr(__pyx_v_tri, __pyx_n_s__paraboloid_scale); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyNumber_InPlaceMultiply(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyObject_SetItem(__pyx_v_z, ((PyObject *)__pyx_t_6), __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - - - __Pyx_INCREF(((PyObject *)__pyx_k_tuple_26)); - __pyx_t_6 = __pyx_k_tuple_26; - __pyx_t_1 = PyObject_GetItem(__pyx_v_z, ((PyObject *)__pyx_t_6)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyObject_GetAttr(__pyx_v_tri, __pyx_n_s__paraboloid_shift); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyObject_SetItem(__pyx_v_z, ((PyObject *)__pyx_t_6), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_z); - __pyx_r = __pyx_v_z; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(((PyObject *)__pyx_t_6)); - __Pyx_AddTraceback("scipy.spatial.qhull.Delaunay.lift_points", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_z); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_3tsearch(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); -static char __pyx_doc_5scipy_7spatial_5qhull_3tsearch[] = "\n tsearch(tri, xi)\n\n Find simplices containing the given points. This function does the\n same thing as Delaunay.find_simplex.\n\n .. versionadded:: 0.9\n\n See Also\n --------\n Delaunay.find_simplex\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7spatial_5qhull_3tsearch = {__Pyx_NAMESTR("tsearch"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_3tsearch, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7spatial_5qhull_3tsearch)}; -static PyObject *__pyx_pf_5scipy_7spatial_5qhull_3tsearch(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_tri = 0; - PyObject *__pyx_v_xi = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__tri,&__pyx_n_s__xi,0}; - __Pyx_RefNannySetupContext("tsearch"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__tri); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xi); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("tsearch", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "tsearch") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_tri = values[0]; - __pyx_v_xi = values[1]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_tri = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_xi = PyTuple_GET_ITEM(__pyx_args, 1); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("tsearch", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.spatial.qhull.tsearch", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_GetAttr(__pyx_v_tri, __pyx_n_s__find_simplex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_xi); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_xi); - __Pyx_GIVEREF(__pyx_v_xi); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.spatial.qhull.tsearch", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static void __pyx_f_5scipy_7spatial_5qhull__get_delaunay_info(__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *__pyx_v_info, PyObject *__pyx_v_obj, int __pyx_v_compute_transform, int __pyx_v_compute_vertex_to_simplex) { - PyArrayObject *__pyx_v_transform = 0; - PyArrayObject *__pyx_v_vertex_to_simplex = 0; - PyArrayObject *__pyx_v_points = 0; - PyArrayObject *__pyx_v_vertices = 0; - PyArrayObject *__pyx_v_neighbors = 0; - PyArrayObject *__pyx_v_equations = 0; - PyArrayObject *__pyx_v_min_bound = 0; - PyArrayObject *__pyx_v_max_bound = 0; - Py_buffer __pyx_bstruct_neighbors; - Py_ssize_t __pyx_bstride_0_neighbors = 0; - Py_ssize_t __pyx_bstride_1_neighbors = 0; - Py_ssize_t __pyx_bshape_0_neighbors = 0; - Py_ssize_t __pyx_bshape_1_neighbors = 0; - Py_buffer __pyx_bstruct_transform; - Py_ssize_t __pyx_bstride_0_transform = 0; - Py_ssize_t __pyx_bstride_1_transform = 0; - Py_ssize_t __pyx_bstride_2_transform = 0; - Py_ssize_t __pyx_bshape_0_transform = 0; - Py_ssize_t __pyx_bshape_1_transform = 0; - Py_ssize_t __pyx_bshape_2_transform = 0; - Py_buffer __pyx_bstruct_vertices; - Py_ssize_t __pyx_bstride_0_vertices = 0; - Py_ssize_t __pyx_bstride_1_vertices = 0; - Py_ssize_t __pyx_bshape_0_vertices = 0; - Py_ssize_t __pyx_bshape_1_vertices = 0; - Py_buffer __pyx_bstruct_points; - Py_ssize_t __pyx_bstride_0_points = 0; - Py_ssize_t __pyx_bstride_1_points = 0; - Py_ssize_t __pyx_bshape_0_points = 0; - Py_ssize_t __pyx_bshape_1_points = 0; - Py_buffer __pyx_bstruct_vertex_to_simplex; - Py_ssize_t __pyx_bstride_0_vertex_to_simplex = 0; - Py_ssize_t __pyx_bshape_0_vertex_to_simplex = 0; - Py_buffer __pyx_bstruct_min_bound; - Py_ssize_t __pyx_bstride_0_min_bound = 0; - Py_ssize_t __pyx_bshape_0_min_bound = 0; - Py_buffer __pyx_bstruct_max_bound; - Py_ssize_t __pyx_bstride_0_max_bound = 0; - Py_ssize_t __pyx_bshape_0_max_bound = 0; - Py_buffer __pyx_bstruct_equations; - Py_ssize_t __pyx_bstride_0_equations = 0; - Py_ssize_t __pyx_bstride_1_equations = 0; - Py_ssize_t __pyx_bshape_0_equations = 0; - Py_ssize_t __pyx_bshape_1_equations = 0; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyArrayObject *__pyx_t_2 = NULL; - PyArrayObject *__pyx_t_3 = NULL; - PyArrayObject *__pyx_t_4 = NULL; - PyArrayObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - PyArrayObject *__pyx_t_7 = NULL; - double __pyx_t_8; - PyArrayObject *__pyx_t_9 = NULL; - int __pyx_t_10; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - PyObject *__pyx_t_13 = NULL; - PyArrayObject *__pyx_t_14 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_get_delaunay_info"); - __pyx_bstruct_transform.buf = NULL; - __pyx_bstruct_vertex_to_simplex.buf = NULL; - __pyx_bstruct_points.buf = NULL; - __pyx_bstruct_vertices.buf = NULL; - __pyx_bstruct_neighbors.buf = NULL; - __pyx_bstruct_equations.buf = NULL; - __pyx_bstruct_min_bound.buf = NULL; - __pyx_bstruct_max_bound.buf = NULL; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__points); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_2 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_points, (PyObject*)__pyx_t_2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_points = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_points.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_points = __pyx_bstruct_points.strides[0]; __pyx_bstride_1_points = __pyx_bstruct_points.strides[1]; - __pyx_bshape_0_points = __pyx_bstruct_points.shape[0]; __pyx_bshape_1_points = __pyx_bstruct_points.shape[1]; - } - } - __pyx_t_2 = 0; - __pyx_v_points = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__vertices); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertices, (PyObject*)__pyx_t_3, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_vertices = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_vertices.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_vertices = __pyx_bstruct_vertices.strides[0]; __pyx_bstride_1_vertices = __pyx_bstruct_vertices.strides[1]; - __pyx_bshape_0_vertices = __pyx_bstruct_vertices.shape[0]; __pyx_bshape_1_vertices = __pyx_bstruct_vertices.shape[1]; - } - } - __pyx_t_3 = 0; - __pyx_v_vertices = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__neighbors); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_neighbors, (PyObject*)__pyx_t_4, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_neighbors = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_neighbors.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_neighbors = __pyx_bstruct_neighbors.strides[0]; __pyx_bstride_1_neighbors = __pyx_bstruct_neighbors.strides[1]; - __pyx_bshape_0_neighbors = __pyx_bstruct_neighbors.shape[0]; __pyx_bshape_1_neighbors = __pyx_bstruct_neighbors.shape[1]; - } - } - __pyx_t_4 = 0; - __pyx_v_neighbors = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__equations); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_equations, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_equations = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_equations.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_equations = __pyx_bstruct_equations.strides[0]; __pyx_bstride_1_equations = __pyx_bstruct_equations.strides[1]; - __pyx_bshape_0_equations = __pyx_bstruct_equations.shape[0]; __pyx_bshape_1_equations = __pyx_bstruct_equations.shape[1]; - } - } - __pyx_t_5 = 0; - __pyx_v_equations = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__min_bound); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_min_bound, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_min_bound = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_min_bound.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_min_bound = __pyx_bstruct_min_bound.strides[0]; - __pyx_bshape_0_min_bound = __pyx_bstruct_min_bound.shape[0]; - } - } - __pyx_t_6 = 0; - __pyx_v_min_bound = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__max_bound); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_max_bound, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_max_bound = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_max_bound.buf = NULL; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } else {__pyx_bstride_0_max_bound = __pyx_bstruct_max_bound.strides[0]; - __pyx_bshape_0_max_bound = __pyx_bstruct_max_bound.shape[0]; - } - } - __pyx_t_7 = 0; - __pyx_v_max_bound = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_v_info->ndim = (__pyx_v_points->dimensions[1]); - - - __pyx_v_info->npoints = (__pyx_v_points->dimensions[0]); - - - __pyx_v_info->nsimplex = (__pyx_v_vertices->dimensions[0]); - - - __pyx_v_info->points = ((double *)__pyx_v_points->data); - - - __pyx_v_info->vertices = ((int *)__pyx_v_vertices->data); - - - __pyx_v_info->neighbors = ((int *)__pyx_v_neighbors->data); - - - __pyx_v_info->equations = ((double *)__pyx_v_equations->data); - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__paraboloid_scale); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1231; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_8 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1231; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_info->paraboloid_scale = __pyx_t_8; - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__paraboloid_shift); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_8 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_info->paraboloid_shift = __pyx_t_8; - - - if (__pyx_v_compute_transform) { - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__transform); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_9 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_transform); - __pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_transform, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack); - if (unlikely(__pyx_t_10 < 0)) { - PyErr_Fetch(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_transform, (PyObject*)__pyx_v_transform, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_11, __pyx_t_12, __pyx_t_13); - } - } - __pyx_bstride_0_transform = __pyx_bstruct_transform.strides[0]; __pyx_bstride_1_transform = __pyx_bstruct_transform.strides[1]; __pyx_bstride_2_transform = __pyx_bstruct_transform.strides[2]; - __pyx_bshape_0_transform = __pyx_bstruct_transform.shape[0]; __pyx_bshape_1_transform = __pyx_bstruct_transform.shape[1]; __pyx_bshape_2_transform = __pyx_bstruct_transform.shape[2]; - if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_9 = 0; - __pyx_v_transform = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_v_info->transform = ((double *)__pyx_v_transform->data); - goto __pyx_L3; - } - { - - - __pyx_v_info->transform = NULL; - } - __pyx_L3:; - - - if (__pyx_v_compute_vertex_to_simplex) { - - - __pyx_t_1 = PyObject_GetAttr(__pyx_v_obj, __pyx_n_s__vertex_to_simplex); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_14 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertex_to_simplex); - __pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_vertex_to_simplex, (PyObject*)__pyx_t_14, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_10 < 0)) { - PyErr_Fetch(&__pyx_t_13, &__pyx_t_12, &__pyx_t_11); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_vertex_to_simplex, (PyObject*)__pyx_v_vertex_to_simplex, &__Pyx_TypeInfo_nn_npy_int, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_13, __pyx_t_12, __pyx_t_11); - } - } - __pyx_bstride_0_vertex_to_simplex = __pyx_bstruct_vertex_to_simplex.strides[0]; - __pyx_bshape_0_vertex_to_simplex = __pyx_bstruct_vertex_to_simplex.shape[0]; - if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_14 = 0; - __pyx_v_vertex_to_simplex = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - - __pyx_v_info->vertex_to_simplex = ((int *)__pyx_v_vertex_to_simplex->data); - goto __pyx_L4; - } - { - - - __pyx_v_info->vertex_to_simplex = NULL; - } - __pyx_L4:; - - - __pyx_v_info->min_bound = ((double *)__pyx_v_min_bound->data); - - - __pyx_v_info->max_bound = ((double *)__pyx_v_max_bound->data); - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_neighbors); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_transform); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertex_to_simplex); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_min_bound); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_max_bound); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_equations); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_WriteUnraisable("scipy.spatial.qhull._get_delaunay_info", __pyx_clineno, __pyx_lineno, __pyx_filename); - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_neighbors); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_transform); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertices); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_points); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_vertex_to_simplex); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_min_bound); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_max_bound); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_equations); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_transform); - __Pyx_XDECREF((PyObject *)__pyx_v_vertex_to_simplex); - __Pyx_XDECREF((PyObject *)__pyx_v_points); - __Pyx_XDECREF((PyObject *)__pyx_v_vertices); - __Pyx_XDECREF((PyObject *)__pyx_v_neighbors); - __Pyx_XDECREF((PyObject *)__pyx_v_equations); - __Pyx_XDECREF((PyObject *)__pyx_v_min_bound); - __Pyx_XDECREF((PyObject *)__pyx_v_max_bound); - __Pyx_RefNannyFinishContext(); -} - - - -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_copy_shape; - int __pyx_v_i; - int __pyx_v_ndim; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - int __pyx_v_t; - char *__pyx_v_f; - PyArray_Descr *__pyx_v_descr = 0; - int __pyx_v_offset; - int __pyx_v_hasfields; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - char *__pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getbuffer__"); - if (__pyx_v_info != NULL) { - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - } - - - __pyx_t_1 = (__pyx_v_info == NULL); - if (__pyx_t_1) { - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_v_endian_detector = 1; - - - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - - __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); - - - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - - __pyx_v_copy_shape = 1; - goto __pyx_L6; - } - { - - - __pyx_v_copy_shape = 0; - } - __pyx_L6:; - - - __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); - if (__pyx_t_1) { - - - __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_28), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - - __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); - if (__pyx_t_3) { - - - __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); - __pyx_t_2 = __pyx_t_1; - } else { - __pyx_t_2 = __pyx_t_3; - } - if (__pyx_t_2) { - - - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_30), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - - __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); - - - __pyx_v_info->ndim = __pyx_v_ndim; - - - if (__pyx_v_copy_shape) { - - - __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); - - - __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); - - - __pyx_t_5 = __pyx_v_ndim; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - - (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - - - (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - } - goto __pyx_L9; - } - { - - - __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); - - - __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); - } - __pyx_L9:; - - - __pyx_v_info->suboffsets = NULL; - - - __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); - - - __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); - - - __pyx_v_f = NULL; - - - __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); - __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; - - - __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); - - - __pyx_t_2 = (!__pyx_v_hasfields); - if (__pyx_t_2) { - __pyx_t_3 = (!__pyx_v_copy_shape); - __pyx_t_1 = __pyx_t_3; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = Py_None; - goto __pyx_L12; - } - { - - - __Pyx_INCREF(__pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = __pyx_v_self; - } - __pyx_L12:; - - - __pyx_t_1 = (!__pyx_v_hasfields); - if (__pyx_t_1) { - - - __pyx_v_t = __pyx_v_descr->type_num; - - - __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); - if (__pyx_t_1) { - __pyx_t_2 = __pyx_v_little_endian; - } else { - __pyx_t_2 = __pyx_t_1; - } - if (!__pyx_t_2) { - - - __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); - if (__pyx_t_1) { - __pyx_t_3 = (!__pyx_v_little_endian); - __pyx_t_7 = __pyx_t_3; - } else { - __pyx_t_7 = __pyx_t_1; - } - __pyx_t_1 = __pyx_t_7; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_32), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L14; - } - __pyx_L14:; - - - __pyx_t_1 = (__pyx_v_t == NPY_BYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__b; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__B; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_SHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__h; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_USHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__H; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_INT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__i; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_UINT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__I; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_LONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__l; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_ULONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__L; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__q; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Q; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__f; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__d; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__g; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zf; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zd; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zg; - goto __pyx_L15; - } - - - __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__O; - goto __pyx_L15; - } - { - - - __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_33), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L15:; - - - __pyx_v_info->format = __pyx_v_f; - - - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L13; - } - { - - - __pyx_v_info->format = ((char *)malloc(255)); - - - (__pyx_v_info->format[0]) = '^'; - - - __pyx_v_offset = 0; - - - __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_9; - - - (__pyx_v_f[0]) = 0; - } - __pyx_L13:; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(Py_None); - __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; - } - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_descr); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__releasebuffer__"); - - - __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); - if (__pyx_t_1) { - - - free(__pyx_v_info->format); - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - - free(__pyx_v_info->strides); - goto __pyx_L6; - } - __pyx_L6:; - - __Pyx_RefNannyFinishContext(); -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); - - - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { - PyArray_Descr *__pyx_v_child = 0; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - PyObject *__pyx_v_fields = 0; - PyObject *__pyx_v_childname = NULL; - PyObject *__pyx_v_new_offset = NULL; - PyObject *__pyx_v_t = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - long __pyx_t_10; - char *__pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_util_dtypestring"); - - - __pyx_v_endian_detector = 1; - - - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - - if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - for (;;) { - if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; - __Pyx_XDECREF(__pyx_v_childname); - __pyx_v_childname = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); - __pyx_v_fields = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - - if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { - PyObject* sequence = ((PyObject *)__pyx_v_fields); - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - } else { - __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_child)); - __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_v_new_offset); - __pyx_v_new_offset = __pyx_t_4; - __pyx_t_4 = 0; - - - __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - - - __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_35), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - - __pyx_t_6 = (__pyx_v_child->byteorder == '>'); - if (__pyx_t_6) { - __pyx_t_7 = __pyx_v_little_endian; - } else { - __pyx_t_7 = __pyx_t_6; - } - if (!__pyx_t_7) { - - - __pyx_t_6 = (__pyx_v_child->byteorder == '<'); - if (__pyx_t_6) { - __pyx_t_8 = (!__pyx_v_little_endian); - __pyx_t_9 = __pyx_t_8; - } else { - __pyx_t_9 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_9; - } else { - __pyx_t_6 = __pyx_t_7; - } - if (__pyx_t_6) { - - - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_36), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - - while (1) { - __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!__pyx_t_6) break; - - - (__pyx_v_f[0]) = 120; - - - __pyx_v_f = (__pyx_v_f + 1); - - - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); - } - - - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); - - - __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); - if (__pyx_t_6) { - - - __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_v_t); - __pyx_v_t = __pyx_t_3; - __pyx_t_3 = 0; - - - __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); - if (__pyx_t_6) { - - - __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_38), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L10; - } - __pyx_L10:; - - - __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 98; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 66; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 104; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 72; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 105; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 73; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 108; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 76; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 113; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 81; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 102; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 100; - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 103; - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 102; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 100; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - - __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 103; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - - __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 79; - goto __pyx_L11; - } - { - - - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_33), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L11:; - - - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L9; - } - { - - - __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_11; - } - __pyx_L9:; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_r = __pyx_v_f; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_child); - __Pyx_XDECREF(__pyx_v_fields); - __Pyx_XDECREF(__pyx_v_childname); - __Pyx_XDECREF(__pyx_v_new_offset); - __Pyx_XDECREF(__pyx_v_t); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - - - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - PyObject *__pyx_v_baseptr; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("set_array_base"); - - - __pyx_t_1 = (__pyx_v_base == Py_None); - if (__pyx_t_1) { - - - __pyx_v_baseptr = NULL; - goto __pyx_L3; - } - { - - - Py_INCREF(__pyx_v_base); - - - __pyx_v_baseptr = ((PyObject *)__pyx_v_base); - } - __pyx_L3:; - - - Py_XDECREF(__pyx_v_arr->base); - - - __pyx_v_arr->base = __pyx_v_baseptr; - - __Pyx_RefNannyFinishContext(); -} - - - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base"); - - - __pyx_t_1 = (__pyx_v_arr->base == NULL); - if (__pyx_t_1) { - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_None); - __pyx_r = Py_None; - goto __pyx_L0; - goto __pyx_L3; - } - { - - - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); - __pyx_r = ((PyObject *)__pyx_v_arr->base); - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_tp_new_5scipy_7spatial_5qhull_RidgeIter2D(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *p; - PyObject *o = (*t->tp_alloc)(t, 0); - if (!o) return 0; - p = ((struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)o); - p->delaunay = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_5scipy_7spatial_5qhull_RidgeIter2D(PyObject *o) { - struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *p = (struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)o; - Py_XDECREF(p->delaunay); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_5scipy_7spatial_5qhull_RidgeIter2D(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *p = (struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)o; - if (p->delaunay) { - e = (*v)(p->delaunay, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_5scipy_7spatial_5qhull_RidgeIter2D(PyObject *o) { - struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *p = (struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D *)o; - PyObject* tmp; - tmp = ((PyObject*)p->delaunay); - p->delaunay = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyMethodDef __pyx_methods_5scipy_7spatial_5qhull_RidgeIter2D[] = { - {__Pyx_NAMESTR("__next__"), (PyCFunction)__pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D_2__next__, METH_NOARGS|METH_COEXIST, __Pyx_DOCSTR(0)}, - {0, 0, 0, 0} -}; - -static PyNumberMethods __pyx_tp_as_number_RidgeIter2D = { - 0, - 0, - 0, - #if PY_MAJOR_VERSION < 3 - 0, - #endif - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - #if PY_MAJOR_VERSION < 3 - 0, - #endif - 0, - #if PY_MAJOR_VERSION < 3 - 0, - #else - 0, - #endif - 0, - #if PY_MAJOR_VERSION < 3 - 0, - #endif - #if PY_MAJOR_VERSION < 3 - 0, - #endif - 0, - 0, - 0, - #if PY_MAJOR_VERSION < 3 - 0, - #endif - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - #if PY_VERSION_HEX >= 0x02050000 - 0, - #endif -}; - -static PySequenceMethods __pyx_tp_as_sequence_RidgeIter2D = { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -}; - -static PyMappingMethods __pyx_tp_as_mapping_RidgeIter2D = { - 0, - 0, - 0, -}; - -static PyBufferProcs __pyx_tp_as_buffer_RidgeIter2D = { - #if PY_MAJOR_VERSION < 3 - 0, - #endif - #if PY_MAJOR_VERSION < 3 - 0, - #endif - #if PY_MAJOR_VERSION < 3 - 0, - #endif - #if PY_MAJOR_VERSION < 3 - 0, - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, - #endif - #if PY_VERSION_HEX >= 0x02060000 - 0, - #endif -}; - -static PyTypeObject __pyx_type_5scipy_7spatial_5qhull_RidgeIter2D = { - PyVarObject_HEAD_INIT(0, 0) - __Pyx_NAMESTR("scipy.spatial.qhull.RidgeIter2D"), - sizeof(struct __pyx_obj_5scipy_7spatial_5qhull_RidgeIter2D), - 0, - __pyx_tp_dealloc_5scipy_7spatial_5qhull_RidgeIter2D, - 0, - 0, - 0, - #if PY_MAJOR_VERSION < 3 - 0, - #else - 0, - #endif - 0, - &__pyx_tp_as_number_RidgeIter2D, - &__pyx_tp_as_sequence_RidgeIter2D, - &__pyx_tp_as_mapping_RidgeIter2D, - 0, - 0, - 0, - 0, - 0, - &__pyx_tp_as_buffer_RidgeIter2D, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, - 0, - __pyx_tp_traverse_5scipy_7spatial_5qhull_RidgeIter2D, - __pyx_tp_clear_5scipy_7spatial_5qhull_RidgeIter2D, - 0, - 0, - __pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D_1__iter__, - __pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D_2__next__, - __pyx_methods_5scipy_7spatial_5qhull_RidgeIter2D, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - __pyx_pf_5scipy_7spatial_5qhull_11RidgeIter2D___init__, - 0, - __pyx_tp_new_5scipy_7spatial_5qhull_RidgeIter2D, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - #if PY_VERSION_HEX >= 0x02060000 - 0, - #endif -}; - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("qhull"), - __Pyx_DOCSTR(__pyx_k_39), - -1, - __pyx_methods , - NULL, - NULL, - NULL, - NULL -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_s_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 1, 0}, - {&__pyx_kp_s_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 0, 1, 0}, - {&__pyx_n_s_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 0, 1, 1}, - {&__pyx_kp_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 0}, - {&__pyx_kp_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 0}, - {&__pyx_kp_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 0}, - {&__pyx_kp_u_27, __pyx_k_27, sizeof(__pyx_k_27), 0, 1, 0, 0}, - {&__pyx_kp_u_29, __pyx_k_29, sizeof(__pyx_k_29), 0, 1, 0, 0}, - {&__pyx_kp_u_31, __pyx_k_31, sizeof(__pyx_k_31), 0, 1, 0, 0}, - {&__pyx_kp_u_33, __pyx_k_33, sizeof(__pyx_k_33), 0, 1, 0, 0}, - {&__pyx_kp_u_34, __pyx_k_34, sizeof(__pyx_k_34), 0, 1, 0, 0}, - {&__pyx_kp_u_37, __pyx_k_37, sizeof(__pyx_k_37), 0, 1, 0, 0}, - {&__pyx_kp_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 0}, - {&__pyx_n_s_40, __pyx_k_40, sizeof(__pyx_k_40), 0, 0, 1, 1}, - {&__pyx_kp_s_41, __pyx_k_41, sizeof(__pyx_k_41), 0, 0, 1, 0}, - {&__pyx_kp_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 0}, - {&__pyx_n_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 1}, - {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0}, - {&__pyx_n_s__Delaunay, __pyx_k__Delaunay, sizeof(__pyx_k__Delaunay), 0, 0, 1, 1}, - {&__pyx_n_s__Lock, __pyx_k__Lock, sizeof(__pyx_k__Lock), 0, 0, 1, 1}, - {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, - {&__pyx_n_s__StopIteration, __pyx_k__StopIteration, sizeof(__pyx_k__StopIteration), 0, 0, 1, 1}, - {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, - {&__pyx_n_s____all__, __pyx_k____all__, sizeof(__pyx_k____all__), 0, 0, 1, 1}, - {&__pyx_n_s____init__, __pyx_k____init__, sizeof(__pyx_k____init__), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s___construct_delaunay, __pyx_k___construct_delaunay, sizeof(__pyx_k___construct_delaunay), 0, 0, 1, 1}, - {&__pyx_n_s___qhull_lock, __pyx_k___qhull_lock, sizeof(__pyx_k___qhull_lock), 0, 0, 1, 1}, - {&__pyx_n_s___transform, __pyx_k___transform, sizeof(__pyx_k___transform), 0, 0, 1, 1}, - {&__pyx_n_s___vertex_to_simplex, __pyx_k___vertex_to_simplex, sizeof(__pyx_k___vertex_to_simplex), 0, 0, 1, 1}, - {&__pyx_n_s__acquire, __pyx_k__acquire, sizeof(__pyx_k__acquire), 0, 0, 1, 1}, - {&__pyx_n_s__asanyarray, __pyx_k__asanyarray, sizeof(__pyx_k__asanyarray), 0, 0, 1, 1}, - {&__pyx_n_s__ascontiguousarray, __pyx_k__ascontiguousarray, sizeof(__pyx_k__ascontiguousarray), 0, 0, 1, 1}, - {&__pyx_n_s__astype, __pyx_k__astype, sizeof(__pyx_k__astype), 0, 0, 1, 1}, - {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, - {&__pyx_n_s__bruteforce, __pyx_k__bruteforce, sizeof(__pyx_k__bruteforce), 0, 0, 1, 1}, - {&__pyx_n_s__convex_hull, __pyx_k__convex_hull, sizeof(__pyx_k__convex_hull), 0, 0, 1, 1}, - {&__pyx_n_s__delaunay, __pyx_k__delaunay, sizeof(__pyx_k__delaunay), 0, 0, 1, 1}, - {&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1}, - {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, - {&__pyx_n_s__empty, __pyx_k__empty, sizeof(__pyx_k__empty), 0, 0, 1, 1}, - {&__pyx_n_s__eps, __pyx_k__eps, sizeof(__pyx_k__eps), 0, 0, 1, 1}, - {&__pyx_n_s__equations, __pyx_k__equations, sizeof(__pyx_k__equations), 0, 0, 1, 1}, - {&__pyx_n_s__fill, __pyx_k__fill, sizeof(__pyx_k__fill), 0, 0, 1, 1}, - {&__pyx_n_s__find_simplex, __pyx_k__find_simplex, sizeof(__pyx_k__find_simplex), 0, 0, 1, 1}, - {&__pyx_n_s__finfo, __pyx_k__finfo, sizeof(__pyx_k__finfo), 0, 0, 1, 1}, - {&__pyx_n_s__intc, __pyx_k__intc, sizeof(__pyx_k__intc), 0, 0, 1, 1}, - {&__pyx_n_s__ivertex, __pyx_k__ivertex, sizeof(__pyx_k__ivertex), 0, 0, 1, 1}, - {&__pyx_n_s__lift_points, __pyx_k__lift_points, sizeof(__pyx_k__lift_points), 0, 0, 1, 1}, - {&__pyx_n_s__max, __pyx_k__max, sizeof(__pyx_k__max), 0, 0, 1, 1}, - {&__pyx_n_s__max_bound, __pyx_k__max_bound, sizeof(__pyx_k__max_bound), 0, 0, 1, 1}, - {&__pyx_n_s__min, __pyx_k__min, sizeof(__pyx_k__min), 0, 0, 1, 1}, - {&__pyx_n_s__min_bound, __pyx_k__min_bound, sizeof(__pyx_k__min_bound), 0, 0, 1, 1}, - {&__pyx_n_s__nan, __pyx_k__nan, sizeof(__pyx_k__nan), 0, 0, 1, 1}, - {&__pyx_n_s__ndim, __pyx_k__ndim, sizeof(__pyx_k__ndim), 0, 0, 1, 1}, - {&__pyx_n_s__neighbors, __pyx_k__neighbors, sizeof(__pyx_k__neighbors), 0, 0, 1, 1}, - {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, - {&__pyx_n_s__npoints, __pyx_k__npoints, sizeof(__pyx_k__npoints), 0, 0, 1, 1}, - {&__pyx_n_s__nsimplex, __pyx_k__nsimplex, sizeof(__pyx_k__nsimplex), 0, 0, 1, 1}, - {&__pyx_n_s__numpoints, __pyx_k__numpoints, sizeof(__pyx_k__numpoints), 0, 0, 1, 1}, - {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, - {&__pyx_n_s__object, __pyx_k__object, sizeof(__pyx_k__object), 0, 0, 1, 1}, - {&__pyx_n_s__paraboloid_scale, __pyx_k__paraboloid_scale, sizeof(__pyx_k__paraboloid_scale), 0, 0, 1, 1}, - {&__pyx_n_s__paraboloid_shift, __pyx_k__paraboloid_shift, sizeof(__pyx_k__paraboloid_shift), 0, 0, 1, 1}, - {&__pyx_n_s__plane_distance, __pyx_k__plane_distance, sizeof(__pyx_k__plane_distance), 0, 0, 1, 1}, - {&__pyx_n_s__points, __pyx_k__points, sizeof(__pyx_k__points), 0, 0, 1, 1}, - {&__pyx_n_s__prod, __pyx_k__prod, sizeof(__pyx_k__prod), 0, 0, 1, 1}, - {&__pyx_n_s__property, __pyx_k__property, sizeof(__pyx_k__property), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__release, __pyx_k__release, sizeof(__pyx_k__release), 0, 0, 1, 1}, - {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, - {&__pyx_n_s__resize, __pyx_k__resize, sizeof(__pyx_k__resize), 0, 0, 1, 1}, - {&__pyx_n_s__self, __pyx_k__self, sizeof(__pyx_k__self), 0, 0, 1, 1}, - {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, - {&__pyx_n_s__sum, __pyx_k__sum, sizeof(__pyx_k__sum), 0, 0, 1, 1}, - {&__pyx_n_s__threading, __pyx_k__threading, sizeof(__pyx_k__threading), 0, 0, 1, 1}, - {&__pyx_n_s__transform, __pyx_k__transform, sizeof(__pyx_k__transform), 0, 0, 1, 1}, - {&__pyx_n_s__tri, __pyx_k__tri, sizeof(__pyx_k__tri), 0, 0, 1, 1}, - {&__pyx_n_s__tsearch, __pyx_k__tsearch, sizeof(__pyx_k__tsearch), 0, 0, 1, 1}, - {&__pyx_n_s__vertex_to_simplex, __pyx_k__vertex_to_simplex, sizeof(__pyx_k__vertex_to_simplex), 0, 0, 1, 1}, - {&__pyx_n_s__vertices, __pyx_k__vertices, sizeof(__pyx_k__vertices), 0, 0, 1, 1}, - {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, - {&__pyx_n_s__xi, __pyx_k__xi, sizeof(__pyx_k__xi), 0, 0, 1, 1}, - {&__pyx_n_s__xrange, __pyx_k__xrange, sizeof(__pyx_k__xrange), 0, 0, 1, 1}, - {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_object = __Pyx_GetName(__pyx_b, __pyx_n_s__object); if (!__pyx_builtin_object) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_property = __Pyx_GetName(__pyx_b, __pyx_n_s__property); if (!__pyx_builtin_property) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #if PY_MAJOR_VERSION >= 3 - __pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #else - __pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - __pyx_builtin_StopIteration = __Pyx_GetName(__pyx_b, __pyx_n_s__StopIteration); if (!__pyx_builtin_StopIteration) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - - __pyx_k_tuple_3 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_3)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_2)); - PyTuple_SET_ITEM(__pyx_k_tuple_3, 0, ((PyObject *)__pyx_kp_s_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_3)); - - - __pyx_k_tuple_5 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_5)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_4)); - PyTuple_SET_ITEM(__pyx_k_tuple_5, 0, ((PyObject *)__pyx_kp_s_4)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_4)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_5)); - - - __pyx_k_tuple_7 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_7)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_6)); - PyTuple_SET_ITEM(__pyx_k_tuple_7, 0, ((PyObject *)__pyx_kp_s_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_7)); - - - __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_10)); - __Pyx_INCREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); - - - __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_12)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_11)); - PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_s_11)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_11)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); - - - __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_14)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_13)); - PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_s_13)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_13)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); - - - __pyx_k_tuple_16 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1001; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_16)); - __Pyx_INCREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_k_tuple_16, 0, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16)); - - - __pyx_k_tuple_19 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_19)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_18)); - PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_kp_s_18)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_18)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); - - - __pyx_k_tuple_21 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_21)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_20)); - PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_kp_s_20)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_20)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); - - - __pyx_k_slice_22 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_slice_22); - __Pyx_GIVEREF(__pyx_k_slice_22); - __pyx_k_tuple_23 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_23)); - __Pyx_INCREF(Py_Ellipsis); - PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, Py_Ellipsis); - __Pyx_GIVEREF(Py_Ellipsis); - __Pyx_INCREF(__pyx_k_slice_22); - PyTuple_SET_ITEM(__pyx_k_tuple_23, 1, __pyx_k_slice_22); - __Pyx_GIVEREF(__pyx_k_slice_22); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); - - - __pyx_k_tuple_24 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_24)); - __Pyx_INCREF(Py_Ellipsis); - PyTuple_SET_ITEM(__pyx_k_tuple_24, 0, Py_Ellipsis); - __Pyx_GIVEREF(Py_Ellipsis); - __Pyx_INCREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_k_tuple_24, 1, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_24)); - - - __pyx_k_tuple_25 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_25)); - __Pyx_INCREF(Py_Ellipsis); - PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, Py_Ellipsis); - __Pyx_GIVEREF(Py_Ellipsis); - __Pyx_INCREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_k_tuple_25, 1, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25)); - - - __pyx_k_tuple_26 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_26)); - __Pyx_INCREF(Py_Ellipsis); - PyTuple_SET_ITEM(__pyx_k_tuple_26, 0, Py_Ellipsis); - __Pyx_GIVEREF(Py_Ellipsis); - __Pyx_INCREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_k_tuple_26, 1, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_26)); - - - __pyx_k_tuple_28 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_28)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_28)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_27)); - PyTuple_SET_ITEM(__pyx_k_tuple_28, 0, ((PyObject *)__pyx_kp_u_27)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_27)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_28)); - - - __pyx_k_tuple_30 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_30)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_30)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_29)); - PyTuple_SET_ITEM(__pyx_k_tuple_30, 0, ((PyObject *)__pyx_kp_u_29)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_29)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_30)); - - - __pyx_k_tuple_32 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_32)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_32)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_31)); - PyTuple_SET_ITEM(__pyx_k_tuple_32, 0, ((PyObject *)__pyx_kp_u_31)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_31)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_32)); - - - __pyx_k_tuple_35 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_35)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_35)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_34)); - PyTuple_SET_ITEM(__pyx_k_tuple_35, 0, ((PyObject *)__pyx_kp_u_34)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_34)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_35)); - - - __pyx_k_tuple_36 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_36)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_36)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_31)); - PyTuple_SET_ITEM(__pyx_k_tuple_36, 0, ((PyObject *)__pyx_kp_u_31)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_31)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_36)); - - - __pyx_k_tuple_38 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_38)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_38)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_37)); - PyTuple_SET_ITEM(__pyx_k_tuple_38, 0, ((PyObject *)__pyx_kp_u_37)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_37)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_38)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_10 = PyInt_FromLong(10); if (unlikely(!__pyx_int_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initqhull(void); -PyMODINIT_FUNC initqhull(void) -#else -PyMODINIT_FUNC PyInit_qhull(void); -PyMODINIT_FUNC PyInit_qhull(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_qhull(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - - - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD - PyEval_InitThreads(); - #endif - #endif - - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("qhull"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_39), 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__spatial__qhull) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - - if (__Pyx_ExportFunction("_get_delaunay_info", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__get_delaunay_info, "void (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, PyObject *, int, int)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_barycentric_inside", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__barycentric_inside, "int (int, double *, double *, double *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_barycentric_coordinate_single", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__barycentric_coordinate_single, "void (int, double *, double *, double *, int)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_barycentric_coordinates", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__barycentric_coordinates, "void (int, double *, double *, double *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_lift_point", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__lift_point, "void (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_distplane", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__distplane, "double (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int, double *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_is_point_fully_outside", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__is_point_fully_outside, "int (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_find_simplex_bruteforce", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__find_simplex_bruteforce, "int (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_find_simplex_directed", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__find_simplex_directed, "int (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, int *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_find_simplex", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__find_simplex, "int (__pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, double *, double *, int *, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_RidgeIter2D_init", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_init, "void (__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *, __pyx_t_5scipy_7spatial_5qhull_DelaunayInfo_t *, int)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_ExportFunction("_RidgeIter2D_next", (void (*)(void))__pyx_f_5scipy_7spatial_5qhull__RidgeIter2D_next, "void (__pyx_t_5scipy_7spatial_5qhull_RidgeIter2D_t *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - if (PyType_Ready(&__pyx_type_5scipy_7spatial_5qhull_RidgeIter2D) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "RidgeIter2D", (PyObject *)&__pyx_type_5scipy_7spatial_5qhull_RidgeIter2D) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5scipy_7spatial_5qhull_RidgeIter2D = &__pyx_type_5scipy_7spatial_5qhull_RidgeIter2D; - - __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - - - - - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__threading), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__threading, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__Delaunay)); - PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__Delaunay)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Delaunay)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__tsearch)); - PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_n_s__tsearch)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__tsearch)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____all__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - - - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__threading); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__Lock); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s___qhull_lock, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull__construct_delaunay, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s___construct_delaunay, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_1_qhull_get_facet_array, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_8, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_2_get_barycentric_transforms, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_15, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - - - __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_8Delaunay___init__, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s____init__, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_1transform, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__transform, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_n_s__transform); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_property, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__transform, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_2vertex_to_simplex, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 989; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__vertex_to_simplex, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 989; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_n_s__vertex_to_simplex); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 989; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 987; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_property, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 987; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__vertex_to_simplex, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 989; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_3convex_hull, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__convex_hull, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_n_s__convex_hull); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1018; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_property, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1018; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__convex_hull, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_k_17 = __pyx_t_2; - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_4find_simplex, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__find_simplex, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_5plane_distance, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__plane_distance, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_8Delaunay_6lift_points, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetItem(__pyx_t_1, __pyx_n_s__lift_points, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_builtin_object); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_builtin_object); - __Pyx_GIVEREF(__pyx_builtin_object); - if (PyDict_SetItemString(((PyObject *)__pyx_t_1), "__doc__", ((PyObject *)__pyx_kp_s_41)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = __Pyx_CreateClass(((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_1), __pyx_n_s__Delaunay, __pyx_n_s_40); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__Delaunay, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - - - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7spatial_5qhull_3tsearch, NULL, __pyx_n_s_40); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__tsearch, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.spatial.qhull", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.spatial.qhull"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - - - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, - const char *name, int exact) -{ - if (!type) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (none_allowed && obj == Py_None) return 1; - else if (exact) { - if (Py_TYPE(obj) == type) return 1; - } - else { - if (PyObject_TypeCheck(obj, type)) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%s' has incorrect type (expected %s, got %s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { - unsigned int n = 1; - return *(unsigned char*)(&n) != 0; -} - -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; -} __Pyx_BufFmt_Context; - -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} - -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t < '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} - -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} - -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case 'b': return "'char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 0: return "end"; - default: return "unparseable format string"; - } -} - -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif - -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I'; - case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; - case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); - case 'O': return 'O'; - case 'P': return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} - -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset; - if (ctx->enc_type == 0) return 0; - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - } - - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - - ctx->fmt_offset += size; - - --ctx->enc_count; - - - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} - -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case 10: - case 13: - ++ts; - break; - case '<': - if (!__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - } - break; - case '}': - ++ts; - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': - if (ctx->enc_type == *ts && got_Z == ctx->is_complex && - ctx->enc_packmode == ctx->new_packmode) { - - ctx->enc_count += ctx->new_count; - } else { - - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - } - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - default: - { - int number = __Pyx_BufFmt_ParseNumber(&ts); - if (number == -1) { - PyErr_Format(PyExc_ValueError, - "Does not understand character buffer dtype format string ('%c')", *ts); - return NULL; - } - ctx->new_count = (size_t)number; - } - } - } -} - -static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { - buf->buf = NULL; - buf->obj = NULL; - buf->strides = __Pyx_zeros; - buf->shape = __Pyx_zeros; - buf->suboffsets = __Pyx_minusones; -} - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { - if (obj == Py_None || obj == NULL) { - __Pyx_ZeroBuffer(buf); - return 0; - } - buf->buf = NULL; - if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; - if (buf->ndim != nd) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - nd, buf->ndim); - goto fail; - } - if (!cast) { - __Pyx_BufFmt_Context ctx; - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; - } - if ((unsigned)buf->itemsize != dtype->size) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)", - buf->itemsize, (buf->itemsize > 1) ? "s" : "", - dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; - return 0; -fail:; - __Pyx_ZeroBuffer(buf); - return -1; -} - -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { - if (info->buf == NULL) return; - if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; - __Pyx_ReleaseBuffer(info); -} - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(PyObject_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -static void __Pyx_RaiseBufferFallbackError(void) { - PyErr_Format(PyExc_ValueError, - "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); -} - - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", - index, (index == 1) ? "" : "s"); -} - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); -} - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } else if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { - PyErr_Clear(); - return 0; - } else { - return -1; - } - } - return 0; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -static CYTHON_INLINE long __Pyx_div_long(long a, long b) { - long q = a / b; - long r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags); - else { - PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; - } -} - -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject* obj = view->obj; - if (obj) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;} - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view); - Py_DECREF(obj); - view->obj = NULL; - } -} - -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -static PyObject *__Pyx_FindPy2Metaclass(PyObject *bases) { - PyObject *metaclass; - -#if PY_MAJOR_VERSION < 3 - if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) { - PyObject *base = PyTuple_GET_ITEM(bases, 0); - metaclass = PyObject_GetAttrString(base, (char *)"__class__"); - if (!metaclass) { - PyErr_Clear(); - metaclass = (PyObject*) Py_TYPE(base); - } - } else { - metaclass = (PyObject *) &PyClass_Type; - } -#else - if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) { - PyObject *base = PyTuple_GET_ITEM(bases, 0); - metaclass = (PyObject*) Py_TYPE(base); - } else { - metaclass = (PyObject *) &PyType_Type; - } -#endif - Py_INCREF(metaclass); - return metaclass; -} - -static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, - PyObject *modname) { - PyObject *result; - PyObject *metaclass; - - if (PyDict_SetItemString(dict, "__module__", modname) < 0) - return NULL; - - - metaclass = PyDict_GetItemString(dict, "__metaclass__"); - if (metaclass) { - Py_INCREF(metaclass); - } else { - metaclass = __Pyx_FindPy2Metaclass(bases); - } - result = PyObject_CallFunctionObjArgs(metaclass, name, bases, dict, NULL); - Py_DECREF(metaclass); - return result; -} - - -static PyObject *__pyx_binding_PyCFunctionType_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module) { - __pyx_binding_PyCFunctionType_object *op = PyObject_GC_New(__pyx_binding_PyCFunctionType_object, __pyx_binding_PyCFunctionType); - if (op == NULL) - return NULL; - op->func.m_ml = ml; - Py_XINCREF(self); - op->func.m_self = self; - Py_XINCREF(module); - op->func.m_module = module; - PyObject_GC_Track(op); - return (PyObject *)op; -} - -static void __pyx_binding_PyCFunctionType_dealloc(__pyx_binding_PyCFunctionType_object *m) { - PyObject_GC_UnTrack(m); - Py_XDECREF(m->func.m_self); - Py_XDECREF(m->func.m_module); - PyObject_GC_Del(m); -} - -static PyObject *__pyx_binding_PyCFunctionType_descr_get(PyObject *func, PyObject *obj, PyObject *type) { - if (obj == Py_None) - obj = NULL; - return PyMethod_New(func, obj, type); -} - -static int __pyx_binding_PyCFunctionType_init(void) { - __pyx_binding_PyCFunctionType_type = PyCFunction_Type; - __pyx_binding_PyCFunctionType_type.tp_name = __Pyx_NAMESTR("cython_binding_builtin_function_or_method"); - __pyx_binding_PyCFunctionType_type.tp_dealloc = (destructor)__pyx_binding_PyCFunctionType_dealloc; - __pyx_binding_PyCFunctionType_type.tp_descr_get = __pyx_binding_PyCFunctionType_descr_get; - if (PyType_Ready(&__pyx_binding_PyCFunctionType_type) < 0) { - return -1; - } - __pyx_binding_PyCFunctionType = &__pyx_binding_PyCFunctionType_type; - return 0; - -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_flagT(flagT val) { - const flagT neg_one = (flagT)-1, const_zero = (flagT)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(flagT) == sizeof(char)) || - (sizeof(flagT) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(flagT) == sizeof(int)) || - (sizeof(flagT) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(flagT) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(flagT), - little, !is_unsigned); - } -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { - const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(Py_intptr_t) == sizeof(char)) || - (sizeof(Py_intptr_t) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(Py_intptr_t) == sizeof(int)) || - (sizeof(Py_intptr_t) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), - little, !is_unsigned); - } -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(a, a); - case 3: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, a); - case 4: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_absf(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(a, a); - case 3: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, a); - case 4: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_abs(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename) { - PyObject *old_exc, *old_val, *old_tb; - PyObject *ctx; - __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); - #if PY_MAJOR_VERSION < 3 - ctx = PyString_FromString(name); - #else - ctx = PyUnicode_FromString(name); - #endif - __Pyx_ErrRestore(old_exc, old_val, old_tb); - if (!ctx) { - PyErr_WriteUnraisable(Py_None); - } else { - PyErr_WriteUnraisable(ctx); - Py_DECREF(ctx); - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) { - PyObject *d = 0; - PyObject *cobj = 0; - union { - void (*fp)(void); - void *p; - } tmp; - - d = PyObject_GetAttrString(__pyx_m, (char *)"__pyx_capi__"); - if (!d) { - PyErr_Clear(); - d = PyDict_New(); - if (!d) - goto bad; - Py_INCREF(d); - if (PyModule_AddObject(__pyx_m, (char *)"__pyx_capi__", d) < 0) - goto bad; - } - tmp.fp = f; -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) - cobj = PyCapsule_New(tmp.p, sig, 0); -#else - cobj = PyCObject_FromVoidPtrAndDesc(tmp.p, (void *)sig, 0); -#endif - if (!cobj) - goto bad; - if (PyDict_SetItemString(d, name, cobj) < 0) - goto bad; - Py_DECREF(cobj); - Py_DECREF(d); - return 0; -bad: - Py_XDECREF(cobj); - Py_XDECREF(d); - return -1; -} - -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, - size_t size, int strict) -{ - PyObject *py_module = 0; - PyObject *result = 0; - PyObject *py_name = 0; - char warning[200]; - - py_module = __Pyx_ImportModule(module_name); - if (!py_module) - goto bad; - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(class_name); - #else - py_name = PyUnicode_FromString(class_name); - #endif - if (!py_name) - goto bad; - result = PyObject_GetAttr(py_module, py_name); - Py_DECREF(py_name); - py_name = 0; - Py_DECREF(py_module); - py_module = 0; - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility", - module_name, class_name); - #if PY_VERSION_HEX < 0x02050000 - if (PyErr_Warn(NULL, warning) < 0) goto bad; - #else - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - #endif - } - else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { - PyErr_Format(PyExc_ValueError, - "%s.%s has the wrong size, try recompiling", - module_name, class_name); - goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(py_module); - Py_XDECREF(result); - return NULL; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(name); - #else - py_name = PyUnicode_FromString(name); - #endif - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, - #if PY_MAJOR_VERSION >= 3 - 0, - #endif - 0, - 0, - 0, - __pyx_empty_bytes, - __pyx_empty_tuple, - __pyx_empty_tuple, - __pyx_empty_tuple, - __pyx_empty_tuple, - __pyx_empty_tuple, - py_srcfile, - py_funcname, - __pyx_lineno, - __pyx_empty_bytes - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), - py_code, - py_globals, - 0 - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - - - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/geom.c b/scipy-0.10.1/scipy/spatial/qhull/src/geom.c deleted file mode 100644 index 9b683759ac..0000000000 --- a/scipy-0.10.1/scipy/spatial/qhull/src/geom.c +++ /dev/null @@ -1,1234 +0,0 @@ -/*
      ---------------------------------
    -
    -   geom.c
    -   geometric routines of qhull
    -
    -   see qh-geom.htm and geom.h
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/geom.c#29 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -
    -   infrequent code goes into geom2.c
    -*/
    -
    -#include "qhull_a.h"
    -
    -/*---------------------------------
    -
    -  qh_distplane( point, facet, dist )
    -    return distance from point to facet
    -
    -  returns:
    -    dist
    -    if qh.RANDOMdist, joggles result
    -
    -  notes:
    -    dist > 0 if point is above facet (i.e., outside)
    -    does not error (for qh_sortfacets, qh_outerinner)
    -
    -  see:
    -    qh_distnorm in geom2.c
    -    qh_distplane [geom.c], QhullFacet::distance, and QhullHyperplane::distance are copies
    -*/
    -void qh_distplane(pointT *point, facetT *facet, realT *dist) {
    -  coordT *normal= facet->normal, *coordp, randr;
    -  int k;
    -
    -  switch (qh hull_dim){
    -  case 2:
    -    *dist= facet->offset + point[0] * normal[0] + point[1] * normal[1];
    -    break;
    -  case 3:
    -    *dist= facet->offset + point[0] * normal[0] + point[1] * normal[1] + point[2] * normal[2];
    -    break;
    -  case 4:
    -    *dist= facet->offset+point[0]*normal[0]+point[1]*normal[1]+point[2]*normal[2]+point[3]*normal[3];
    -    break;
    -  case 5:
    -    *dist= facet->offset+point[0]*normal[0]+point[1]*normal[1]+point[2]*normal[2]+point[3]*normal[3]+point[4]*normal[4];
    -    break;
    -  case 6:
    -    *dist= facet->offset+point[0]*normal[0]+point[1]*normal[1]+point[2]*normal[2]+point[3]*normal[3]+point[4]*normal[4]+point[5]*normal[5];
    -    break;
    -  case 7:
    -    *dist= facet->offset+point[0]*normal[0]+point[1]*normal[1]+point[2]*normal[2]+point[3]*normal[3]+point[4]*normal[4]+point[5]*normal[5]+point[6]*normal[6];
    -    break;
    -  case 8:
    -    *dist= facet->offset+point[0]*normal[0]+point[1]*normal[1]+point[2]*normal[2]+point[3]*normal[3]+point[4]*normal[4]+point[5]*normal[5]+point[6]*normal[6]+point[7]*normal[7];
    -    break;
    -  default:
    -    *dist= facet->offset;
    -    coordp= point;
    -    for (k=qh hull_dim; k--; )
    -      *dist += *coordp++ * *normal++;
    -    break;
    -  }
    -  zinc_(Zdistplane);
    -  if (!qh RANDOMdist && qh IStracing < 4)
    -    return;
    -  if (qh RANDOMdist) {
    -    randr= qh_RANDOMint;
    -    *dist += (2.0 * randr / qh_RANDOMmax - 1.0) *
    -      qh RANDOMfactor * qh MAXabs_coord;
    -  }
    -  if (qh IStracing >= 4) {
    -    qh_fprintf(qh ferr, 8001, "qh_distplane: ");
    -    qh_fprintf(qh ferr, 8002, qh_REAL_1, *dist);
    -    qh_fprintf(qh ferr, 8003, "from p%d to f%d\n", qh_pointid(point), facet->id);
    -  }
    -  return;
    -} /* distplane */
    -
    -
    -/*---------------------------------
    -
    -  qh_findbest( point, startfacet, bestoutside, qh_ISnewfacets, qh_NOupper, dist, isoutside, numpart )
    -    find facet that is furthest below a point
    -    for upperDelaunay facets
    -      returns facet only if !qh_NOupper and clearly above
    -
    -  input:
    -    starts search at 'startfacet' (can not be flipped)
    -    if !bestoutside(qh_ALL), stops at qh.MINoutside
    -
    -  returns:
    -    best facet (reports error if NULL)
    -    early out if isoutside defined and bestdist > qh.MINoutside
    -    dist is distance to facet
    -    isoutside is true if point is outside of facet
    -    numpart counts the number of distance tests
    -
    -  see also:
    -    qh_findbestnew()
    -
    -  notes:
    -    If merging (testhorizon), searches horizon facets of coplanar best facets because
    -    after qh_distplane, this and qh_partitionpoint are the most expensive in 3-d
    -      avoid calls to distplane, function calls, and real number operations.
    -    caller traces result
    -    Optimized for outside points.   Tried recording a search set for qh_findhorizon.
    -    Made code more complicated.
    -
    -  when called by qh_partitionvisible():
    -    indicated by qh_ISnewfacets
    -    qh.newfacet_list is list of simplicial, new facets
    -    qh_findbestnew set if qh_sharpnewfacets returns True (to use qh_findbestnew)
    -    qh.bestfacet_notsharp set if qh_sharpnewfacets returns False
    -
    -  when called by qh_findfacet(), qh_partitionpoint(), qh_partitioncoplanar(),
    -                 qh_check_bestdist(), qh_addpoint()
    -    indicated by !qh_ISnewfacets
    -    returns best facet in neighborhood of given facet
    -      this is best facet overall if dist > -   qh.MAXcoplanar
    -        or hull has at least a "spherical" curvature
    -
    -  design:
    -    initialize and test for early exit
    -    repeat while there are better facets
    -      for each neighbor of facet
    -        exit if outside facet found
    -        test for better facet
    -    if point is inside and partitioning
    -      test for new facets with a "sharp" intersection
    -      if so, future calls go to qh_findbestnew()
    -    test horizon facets
    -*/
    -facetT *qh_findbest(pointT *point, facetT *startfacet,
    -                     boolT bestoutside, boolT isnewfacets, boolT noupper,
    -                     realT *dist, boolT *isoutside, int *numpart) {
    -  realT bestdist= -REALmax/2 /* avoid underflow */;
    -  facetT *facet, *neighbor, **neighborp;
    -  facetT *bestfacet= NULL, *lastfacet= NULL;
    -  int oldtrace= qh IStracing;
    -  unsigned int visitid= ++qh visit_id;
    -  int numpartnew=0;
    -  boolT testhorizon = True; /* needed if precise, e.g., rbox c D6 | qhull Q0 Tv */
    -
    -  zinc_(Zfindbest);
    -  if (qh IStracing >= 3 || (qh TRACElevel && qh TRACEpoint >= 0 && qh TRACEpoint == qh_pointid(point))) {
    -    if (qh TRACElevel > qh IStracing)
    -      qh IStracing= qh TRACElevel;
    -    qh_fprintf(qh ferr, 8004, "qh_findbest: point p%d starting at f%d isnewfacets? %d, unless %d exit if > %2.2g\n",
    -             qh_pointid(point), startfacet->id, isnewfacets, bestoutside, qh MINoutside);
    -    qh_fprintf(qh ferr, 8005, "  testhorizon? %d noupper? %d", testhorizon, noupper);
    -    qh_fprintf(qh ferr, 8006, "  Last point added was p%d.", qh furthest_id);
    -    qh_fprintf(qh ferr, 8007, "  Last merge was #%d.  max_outside %2.2g\n", zzval_(Ztotmerge), qh max_outside);
    -  }
    -  if (isoutside)
    -    *isoutside= True;
    -  if (!startfacet->flipped) {  /* test startfacet */
    -    *numpart= 1;
    -    qh_distplane(point, startfacet, dist);  /* this code is duplicated below */
    -    if (!bestoutside && *dist >= qh MINoutside
    -    && (!startfacet->upperdelaunay || !noupper)) {
    -      bestfacet= startfacet;
    -      goto LABELreturn_best;
    -    }
    -    bestdist= *dist;
    -    if (!startfacet->upperdelaunay) {
    -      bestfacet= startfacet;
    -    }
    -  }else
    -    *numpart= 0;
    -  startfacet->visitid= visitid;
    -  facet= startfacet;
    -  while (facet) {
    -    trace4((qh ferr, 4001, "qh_findbest: neighbors of f%d, bestdist %2.2g f%d\n",
    -                facet->id, bestdist, getid_(bestfacet)));
    -    lastfacet= facet;
    -    FOREACHneighbor_(facet) {
    -      if (!neighbor->newfacet && isnewfacets)
    -        continue;
    -      if (neighbor->visitid == visitid)
    -        continue;
    -      neighbor->visitid= visitid;
    -      if (!neighbor->flipped) {  /* code duplicated above */
    -        (*numpart)++;
    -        qh_distplane(point, neighbor, dist);
    -        if (*dist > bestdist) {
    -          if (!bestoutside && *dist >= qh MINoutside
    -          && (!neighbor->upperdelaunay || !noupper)) {
    -            bestfacet= neighbor;
    -            goto LABELreturn_best;
    -          }
    -          if (!neighbor->upperdelaunay) {
    -            bestfacet= neighbor;
    -            bestdist= *dist;
    -            break; /* switch to neighbor */
    -          }else if (!bestfacet) {
    -            bestdist= *dist;
    -            break; /* switch to neighbor */
    -          }
    -        } /* end of *dist>bestdist */
    -      } /* end of !flipped */
    -    } /* end of FOREACHneighbor */
    -    facet= neighbor;  /* non-NULL only if *dist>bestdist */
    -  } /* end of while facet (directed search) */
    -  if (isnewfacets) {
    -    if (!bestfacet) {
    -      bestdist= -REALmax/2;
    -      bestfacet= qh_findbestnew(point, startfacet->next, &bestdist, bestoutside, isoutside, &numpartnew);
    -      testhorizon= False; /* qh_findbestnew calls qh_findbesthorizon */
    -    }else if (!qh findbest_notsharp && bestdist < - qh DISTround) {
    -      if (qh_sharpnewfacets()) {
    -        /* seldom used, qh_findbestnew will retest all facets */
    -        zinc_(Zfindnewsharp);
    -        bestfacet= qh_findbestnew(point, bestfacet, &bestdist, bestoutside, isoutside, &numpartnew);
    -        testhorizon= False; /* qh_findbestnew calls qh_findbesthorizon */
    -        qh findbestnew= True;
    -      }else
    -        qh findbest_notsharp= True;
    -    }
    -  }
    -  if (!bestfacet)
    -    bestfacet= qh_findbestlower(lastfacet, point, &bestdist, numpart);
    -  if (testhorizon)
    -    bestfacet= qh_findbesthorizon(!qh_IScheckmax, point, bestfacet, noupper, &bestdist, &numpartnew);
    -  *dist= bestdist;
    -  if (isoutside && bestdist < qh MINoutside)
    -    *isoutside= False;
    -LABELreturn_best:
    -  zadd_(Zfindbesttot, *numpart);
    -  zmax_(Zfindbestmax, *numpart);
    -  (*numpart) += numpartnew;
    -  qh IStracing= oldtrace;
    -  return bestfacet;
    -}  /* findbest */
    -
    -
    -/*---------------------------------
    -
    -  qh_findbesthorizon( qh_IScheckmax, point, startfacet, qh_NOupper, &bestdist, &numpart )
    -    search coplanar and better horizon facets from startfacet/bestdist
    -    ischeckmax turns off statistics and minsearch update
    -    all arguments must be initialized
    -  returns(ischeckmax):
    -    best facet
    -  returns(!ischeckmax):
    -    best facet that is not upperdelaunay
    -    allows upperdelaunay that is clearly outside
    -  returns:
    -    bestdist is distance to bestfacet
    -    numpart -- updates number of distance tests
    -
    -  notes:
    -    no early out -- use qh_findbest() or qh_findbestnew()
    -    Searches coplanar or better horizon facets
    -
    -  when called by qh_check_maxout() (qh_IScheckmax)
    -    startfacet must be closest to the point
    -      Otherwise, if point is beyond and below startfacet, startfacet may be a local minimum
    -      even though other facets are below the point.
    -    updates facet->maxoutside for good, visited facets
    -    may return NULL
    -
    -    searchdist is qh.max_outside + 2 * DISTround
    -      + max( MINvisible('Vn'), MAXcoplanar('Un'));
    -    This setting is a guess.  It must be at least max_outside + 2*DISTround
    -    because a facet may have a geometric neighbor across a vertex
    -
    -  design:
    -    for each horizon facet of coplanar best facets
    -      continue if clearly inside
    -      unless upperdelaunay or clearly outside
    -         update best facet
    -*/
    -facetT *qh_findbesthorizon(boolT ischeckmax, pointT* point, facetT *startfacet, boolT noupper, realT *bestdist, int *numpart) {
    -  facetT *bestfacet= startfacet;
    -  realT dist;
    -  facetT *neighbor, **neighborp, *facet;
    -  facetT *nextfacet= NULL; /* optimize last facet of coplanarfacetset */
    -  int numpartinit= *numpart, coplanarfacetset_size;
    -  unsigned int visitid= ++qh visit_id;
    -  boolT newbest= False; /* for tracing */
    -  realT minsearch, searchdist;  /* skip facets that are too far from point */
    -
    -  if (!ischeckmax) {
    -    zinc_(Zfindhorizon);
    -  }else {
    -#if qh_MAXoutside
    -    if ((!qh ONLYgood || startfacet->good) && *bestdist > startfacet->maxoutside)
    -      startfacet->maxoutside= *bestdist;
    -#endif
    -  }
    -  searchdist= qh_SEARCHdist; /* multiple of qh.max_outside and precision constants */
    -  minsearch= *bestdist - searchdist;
    -  if (ischeckmax) {
    -    /* Always check coplanar facets.  Needed for RBOX 1000 s Z1 G1e-13 t996564279 | QHULL Tv */
    -    minimize_(minsearch, -searchdist);
    -  }
    -  coplanarfacetset_size= 0;
    -  facet= startfacet;
    -  while (True) {
    -    trace4((qh ferr, 4002, "qh_findbesthorizon: neighbors of f%d bestdist %2.2g f%d ischeckmax? %d noupper? %d minsearch %2.2g searchdist %2.2g\n",
    -                facet->id, *bestdist, getid_(bestfacet), ischeckmax, noupper,
    -                minsearch, searchdist));
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->visitid == visitid)
    -        continue;
    -      neighbor->visitid= visitid;
    -      if (!neighbor->flipped) {
    -        qh_distplane(point, neighbor, &dist);
    -        (*numpart)++;
    -        if (dist > *bestdist) {
    -          if (!neighbor->upperdelaunay || ischeckmax || (!noupper && dist >= qh MINoutside)) {
    -            bestfacet= neighbor;
    -            *bestdist= dist;
    -            newbest= True;
    -            if (!ischeckmax) {
    -              minsearch= dist - searchdist;
    -              if (dist > *bestdist + searchdist) {
    -                zinc_(Zfindjump);  /* everything in qh.coplanarfacetset at least searchdist below */
    -                coplanarfacetset_size= 0;
    -              }
    -            }
    -          }
    -        }else if (dist < minsearch)
    -          continue;  /* if ischeckmax, dist can't be positive */
    -#if qh_MAXoutside
    -        if (ischeckmax && dist > neighbor->maxoutside)
    -          neighbor->maxoutside= dist;
    -#endif
    -      } /* end of !flipped */
    -      if (nextfacet) {
    -        if (!coplanarfacetset_size++) {
    -          SETfirst_(qh coplanarfacetset)= nextfacet;
    -          SETtruncate_(qh coplanarfacetset, 1);
    -        }else
    -          qh_setappend(&qh coplanarfacetset, nextfacet); /* Was needed for RBOX 1000 s W1e-13 P0 t996547055 | QHULL d Qbb Qc Tv
    -                                                 and RBOX 1000 s Z1 G1e-13 t996564279 | qhull Tv  */
    -      }
    -      nextfacet= neighbor;
    -    } /* end of EACHneighbor */
    -    facet= nextfacet;
    -    if (facet)
    -      nextfacet= NULL;
    -    else if (!coplanarfacetset_size)
    -      break;
    -    else if (!--coplanarfacetset_size) {
    -      facet= SETfirstt_(qh coplanarfacetset, facetT);
    -      SETtruncate_(qh coplanarfacetset, 0);
    -    }else
    -      facet= (facetT*)qh_setdellast(qh coplanarfacetset);
    -  } /* while True, for each facet in qh.coplanarfacetset */
    -  if (!ischeckmax) {
    -    zadd_(Zfindhorizontot, *numpart - numpartinit);
    -    zmax_(Zfindhorizonmax, *numpart - numpartinit);
    -    if (newbest)
    -      zinc_(Zparthorizon);
    -  }
    -  trace4((qh ferr, 4003, "qh_findbesthorizon: newbest? %d bestfacet f%d bestdist %2.2g\n", newbest, getid_(bestfacet), *bestdist));
    -  return bestfacet;
    -}  /* findbesthorizon */
    -
    -/*---------------------------------
    -
    -  qh_findbestnew( point, startfacet, dist, isoutside, numpart )
    -    find best newfacet for point
    -    searches all of qh.newfacet_list starting at startfacet
    -    searches horizon facets of coplanar best newfacets
    -    searches all facets if startfacet == qh.facet_list
    -  returns:
    -    best new or horizon facet that is not upperdelaunay
    -    early out if isoutside and not 'Qf'
    -    dist is distance to facet
    -    isoutside is true if point is outside of facet
    -    numpart is number of distance tests
    -
    -  notes:
    -    Always used for merged new facets (see qh_USEfindbestnew)
    -    Avoids upperdelaunay facet unless (isoutside and outside)
    -
    -    Uses qh.visit_id, qh.coplanarfacetset.
    -    If share visit_id with qh_findbest, coplanarfacetset is incorrect.
    -
    -    If merging (testhorizon), searches horizon facets of coplanar best facets because
    -    a point maybe coplanar to the bestfacet, below its horizon facet,
    -    and above a horizon facet of a coplanar newfacet.  For example,
    -      rbox 1000 s Z1 G1e-13 | qhull
    -      rbox 1000 s W1e-13 P0 t992110337 | QHULL d Qbb Qc
    -
    -    qh_findbestnew() used if
    -       qh_sharpnewfacets -- newfacets contains a sharp angle
    -       if many merges, qh_premerge found a merge, or 'Qf' (qh.findbestnew)
    -
    -  see also:
    -    qh_partitionall() and qh_findbest()
    -
    -  design:
    -    for each new facet starting from startfacet
    -      test distance from point to facet
    -      return facet if clearly outside
    -      unless upperdelaunay and a lowerdelaunay exists
    -         update best facet
    -    test horizon facets
    -*/
    -facetT *qh_findbestnew(pointT *point, facetT *startfacet,
    -           realT *dist, boolT bestoutside, boolT *isoutside, int *numpart) {
    -  realT bestdist= -REALmax/2;
    -  facetT *bestfacet= NULL, *facet;
    -  int oldtrace= qh IStracing, i;
    -  unsigned int visitid= ++qh visit_id;
    -  realT distoutside= 0.0;
    -  boolT isdistoutside; /* True if distoutside is defined */
    -  boolT testhorizon = True; /* needed if precise, e.g., rbox c D6 | qhull Q0 Tv */
    -
    -  if (!startfacet) {
    -    if (qh MERGING)
    -      qh_fprintf(qh ferr, 6001, "qhull precision error (qh_findbestnew): merging has formed and deleted a cone of new facets.  Can not continue.\n");
    -    else
    -      qh_fprintf(qh ferr, 6002, "qhull internal error (qh_findbestnew): no new facets for point p%d\n",
    -              qh furthest_id);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  zinc_(Zfindnew);
    -  if (qh BESToutside || bestoutside)
    -    isdistoutside= False;
    -  else {
    -    isdistoutside= True;
    -    distoutside= qh_DISToutside; /* multiple of qh.MINoutside & qh.max_outside, see user.h */
    -  }
    -  if (isoutside)
    -    *isoutside= True;
    -  *numpart= 0;
    -  if (qh IStracing >= 3 || (qh TRACElevel && qh TRACEpoint >= 0 && qh TRACEpoint == qh_pointid(point))) {
    -    if (qh TRACElevel > qh IStracing)
    -      qh IStracing= qh TRACElevel;
    -    qh_fprintf(qh ferr, 8008, "qh_findbestnew: point p%d facet f%d. Stop? %d if dist > %2.2g\n",
    -             qh_pointid(point), startfacet->id, isdistoutside, distoutside);
    -    qh_fprintf(qh ferr, 8009, "  Last point added p%d visitid %d.",  qh furthest_id, visitid);
    -    qh_fprintf(qh ferr, 8010, "  Last merge was #%d.\n", zzval_(Ztotmerge));
    -  }
    -  /* visit all new facets starting with startfacet, maybe qh facet_list */
    -  for (i=0, facet=startfacet; i < 2; i++, facet= qh newfacet_list) {
    -    FORALLfacet_(facet) {
    -      if (facet == startfacet && i)
    -        break;
    -      facet->visitid= visitid;
    -      if (!facet->flipped) {
    -        qh_distplane(point, facet, dist);
    -        (*numpart)++;
    -        if (*dist > bestdist) {
    -          if (!facet->upperdelaunay || *dist >= qh MINoutside) {
    -            bestfacet= facet;
    -            if (isdistoutside && *dist >= distoutside)
    -              goto LABELreturn_bestnew;
    -            bestdist= *dist;
    -          }
    -        }
    -      } /* end of !flipped */
    -    } /* FORALLfacet from startfacet or qh newfacet_list */
    -  }
    -  if (testhorizon || !bestfacet)
    -    bestfacet= qh_findbesthorizon(!qh_IScheckmax, point, bestfacet ? bestfacet : startfacet,
    -                                        !qh_NOupper, &bestdist, numpart);
    -  *dist= bestdist;
    -  if (isoutside && *dist < qh MINoutside)
    -    *isoutside= False;
    -LABELreturn_bestnew:
    -  zadd_(Zfindnewtot, *numpart);
    -  zmax_(Zfindnewmax, *numpart);
    -  trace4((qh ferr, 4004, "qh_findbestnew: bestfacet f%d bestdist %2.2g\n", getid_(bestfacet), *dist));
    -  qh IStracing= oldtrace;
    -  return bestfacet;
    -}  /* findbestnew */
    -
    -/* ============ hyperplane functions -- keep code together [?] ============ */
    -
    -/*---------------------------------
    -
    -  qh_backnormal( rows, numrow, numcol, sign, normal, nearzero )
    -    given an upper-triangular rows array and a sign,
    -    solve for normal equation x using back substitution over rows U
    -
    -  returns:
    -     normal= x
    -
    -     if will not be able to divzero() when normalized(qh.MINdenom_2 and qh.MINdenom_1_2),
    -       if fails on last row
    -         this means that the hyperplane intersects [0,..,1]
    -         sets last coordinate of normal to sign
    -       otherwise
    -         sets tail of normal to [...,sign,0,...], i.e., solves for b= [0...0]
    -         sets nearzero
    -
    -  notes:
    -     assumes numrow == numcol-1
    -
    -     see Golub & van Loan 4.4-9 for back substitution
    -
    -     solves Ux=b where Ax=b and PA=LU
    -     b= [0,...,0,sign or 0]  (sign is either -1 or +1)
    -     last row of A= [0,...,0,1]
    -
    -     1) Ly=Pb == y=b since P only permutes the 0's of   b
    -
    -  design:
    -    for each row from end
    -      perform back substitution
    -      if near zero
    -        use qh_divzero for division
    -        if zero divide and not last row
    -          set tail of normal to 0
    -*/
    -void qh_backnormal(realT **rows, int numrow, int numcol, boolT sign,
    -        coordT *normal, boolT *nearzero) {
    -  int i, j;
    -  coordT *normalp, *normal_tail, *ai, *ak;
    -  realT diagonal;
    -  boolT waszero;
    -  int zerocol= -1;
    -
    -  normalp= normal + numcol - 1;
    -  *normalp--= (sign ? -1.0 : 1.0);
    -  for (i=numrow; i--; ) {
    -    *normalp= 0.0;
    -    ai= rows[i] + i + 1;
    -    ak= normalp+1;
    -    for (j=i+1; j < numcol; j++)
    -      *normalp -= *ai++ * *ak++;
    -    diagonal= (rows[i])[i];
    -    if (fabs_(diagonal) > qh MINdenom_2)
    -      *(normalp--) /= diagonal;
    -    else {
    -      waszero= False;
    -      *normalp= qh_divzero(*normalp, diagonal, qh MINdenom_1_2, &waszero);
    -      if (waszero) {
    -        zerocol= i;
    -        *(normalp--)= (sign ? -1.0 : 1.0);
    -        for (normal_tail= normalp+2; normal_tail < normal + numcol; normal_tail++)
    -          *normal_tail= 0.0;
    -      }else
    -        normalp--;
    -    }
    -  }
    -  if (zerocol != -1) {
    -    zzinc_(Zback0);
    -    *nearzero= True;
    -    trace4((qh ferr, 4005, "qh_backnormal: zero diagonal at column %d.\n", i));
    -    qh_precision("zero diagonal on back substitution");
    -  }
    -} /* backnormal */
    -
    -/*---------------------------------
    -
    -  qh_gausselim( rows, numrow, numcol, sign )
    -    Gaussian elimination with partial pivoting
    -
    -  returns:
    -    rows is upper triangular (includes row exchanges)
    -    flips sign for each row exchange
    -    sets nearzero if pivot[k] < qh.NEARzero[k], else clears it
    -
    -  notes:
    -    if nearzero, the determinant's sign may be incorrect.
    -    assumes numrow <= numcol
    -
    -  design:
    -    for each row
    -      determine pivot and exchange rows if necessary
    -      test for near zero
    -      perform gaussian elimination step
    -*/
    -void qh_gausselim(realT **rows, int numrow, int numcol, boolT *sign, boolT *nearzero) {
    -  realT *ai, *ak, *rowp, *pivotrow;
    -  realT n, pivot, pivot_abs= 0.0, temp;
    -  int i, j, k, pivoti, flip=0;
    -
    -  *nearzero= False;
    -  for (k=0; k < numrow; k++) {
    -    pivot_abs= fabs_((rows[k])[k]);
    -    pivoti= k;
    -    for (i=k+1; i < numrow; i++) {
    -      if ((temp= fabs_((rows[i])[k])) > pivot_abs) {
    -        pivot_abs= temp;
    -        pivoti= i;
    -      }
    -    }
    -    if (pivoti != k) {
    -      rowp= rows[pivoti];
    -      rows[pivoti]= rows[k];
    -      rows[k]= rowp;
    -      *sign ^= 1;
    -      flip ^= 1;
    -    }
    -    if (pivot_abs <= qh NEARzero[k]) {
    -      *nearzero= True;
    -      if (pivot_abs == 0.0) {   /* remainder of column == 0 */
    -        if (qh IStracing >= 4) {
    -          qh_fprintf(qh ferr, 8011, "qh_gausselim: 0 pivot at column %d. (%2.2g < %2.2g)\n", k, pivot_abs, qh DISTround);
    -          qh_printmatrix(qh ferr, "Matrix:", rows, numrow, numcol);
    -        }
    -        zzinc_(Zgauss0);
    -        qh_precision("zero pivot for Gaussian elimination");
    -        goto LABELnextcol;
    -      }
    -    }
    -    pivotrow= rows[k] + k;
    -    pivot= *pivotrow++;  /* signed value of pivot, and remainder of row */
    -    for (i=k+1; i < numrow; i++) {
    -      ai= rows[i] + k;
    -      ak= pivotrow;
    -      n= (*ai++)/pivot;   /* divzero() not needed since |pivot| >= |*ai| */
    -      for (j= numcol - (k+1); j--; )
    -        *ai++ -= n * *ak++;
    -    }
    -  LABELnextcol:
    -    ;
    -  }
    -  wmin_(Wmindenom, pivot_abs);  /* last pivot element */
    -  if (qh IStracing >= 5)
    -    qh_printmatrix(qh ferr, "qh_gausselem: result", rows, numrow, numcol);
    -} /* gausselim */
    -
    -
    -/*---------------------------------
    -
    -  qh_getangle( vect1, vect2 )
    -    returns the dot product of two vectors
    -    if qh.RANDOMdist, joggles result
    -
    -  notes:
    -    the angle may be > 1.0 or < -1.0 because of roundoff errors
    -
    -*/
    -realT qh_getangle(pointT *vect1, pointT *vect2) {
    -  realT angle= 0, randr;
    -  int k;
    -
    -  for (k=qh hull_dim; k--; )
    -    angle += *vect1++ * *vect2++;
    -  if (qh RANDOMdist) {
    -    randr= qh_RANDOMint;
    -    angle += (2.0 * randr / qh_RANDOMmax - 1.0) *
    -      qh RANDOMfactor;
    -  }
    -  trace4((qh ferr, 4006, "qh_getangle: %2.2g\n", angle));
    -  return(angle);
    -} /* getangle */
    -
    -
    -/*---------------------------------
    -
    -  qh_getcenter( vertices )
    -    returns arithmetic center of a set of vertices as a new point
    -
    -  notes:
    -    allocates point array for center
    -*/
    -pointT *qh_getcenter(setT *vertices) {
    -  int k;
    -  pointT *center, *coord;
    -  vertexT *vertex, **vertexp;
    -  int count= qh_setsize(vertices);
    -
    -  if (count < 2) {
    -    qh_fprintf(qh ferr, 6003, "qhull internal error (qh_getcenter): not defined for %d points\n", count);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  center= (pointT *)qh_memalloc(qh normal_size);
    -  for (k=0; k < qh hull_dim; k++) {
    -    coord= center+k;
    -    *coord= 0.0;
    -    FOREACHvertex_(vertices)
    -      *coord += vertex->point[k];
    -    *coord /= count;
    -  }
    -  return(center);
    -} /* getcenter */
    -
    -
    -/*---------------------------------
    -
    -  qh_getcentrum( facet )
    -    returns the centrum for a facet as a new point
    -
    -  notes:
    -    allocates the centrum
    -*/
    -pointT *qh_getcentrum(facetT *facet) {
    -  realT dist;
    -  pointT *centrum, *point;
    -
    -  point= qh_getcenter(facet->vertices);
    -  zzinc_(Zcentrumtests);
    -  qh_distplane(point, facet, &dist);
    -  centrum= qh_projectpoint(point, facet, dist);
    -  qh_memfree(point, qh normal_size);
    -  trace4((qh ferr, 4007, "qh_getcentrum: for f%d, %d vertices dist= %2.2g\n",
    -          facet->id, qh_setsize(facet->vertices), dist));
    -  return centrum;
    -} /* getcentrum */
    -
    -
    -/*---------------------------------
    -
    -  qh_getdistance( facet, neighbor, mindist, maxdist )
    -    returns the maxdist and mindist distance of any vertex from neighbor
    -
    -  returns:
    -    the max absolute value
    -
    -  design:
    -    for each vertex of facet that is not in neighbor
    -      test the distance from vertex to neighbor
    -*/
    -realT qh_getdistance(facetT *facet, facetT *neighbor, realT *mindist, realT *maxdist) {
    -  vertexT *vertex, **vertexp;
    -  realT dist, maxd, mind;
    -
    -  FOREACHvertex_(facet->vertices)
    -    vertex->seen= False;
    -  FOREACHvertex_(neighbor->vertices)
    -    vertex->seen= True;
    -  mind= 0.0;
    -  maxd= 0.0;
    -  FOREACHvertex_(facet->vertices) {
    -    if (!vertex->seen) {
    -      zzinc_(Zbestdist);
    -      qh_distplane(vertex->point, neighbor, &dist);
    -      if (dist < mind)
    -        mind= dist;
    -      else if (dist > maxd)
    -        maxd= dist;
    -    }
    -  }
    -  *mindist= mind;
    -  *maxdist= maxd;
    -  mind= -mind;
    -  if (maxd > mind)
    -    return maxd;
    -  else
    -    return mind;
    -} /* getdistance */
    -
    -
    -/*---------------------------------
    -
    -  qh_normalize( normal, dim, toporient )
    -    normalize a vector and report if too small
    -    does not use min norm
    -
    -  see:
    -    qh_normalize2
    -*/
    -void qh_normalize(coordT *normal, int dim, boolT toporient) {
    -  qh_normalize2( normal, dim, toporient, NULL, NULL);
    -} /* normalize */
    -
    -/*---------------------------------
    -
    -  qh_normalize2( normal, dim, toporient, minnorm, ismin )
    -    normalize a vector and report if too small
    -    qh.MINdenom/MINdenom1 are the upper limits for divide overflow
    -
    -  returns:
    -    normalized vector
    -    flips sign if !toporient
    -    if minnorm non-NULL,
    -      sets ismin if normal < minnorm
    -
    -  notes:
    -    if zero norm
    -       sets all elements to sqrt(1.0/dim)
    -    if divide by zero (divzero())
    -       sets largest element to   +/-1
    -       bumps Znearlysingular
    -
    -  design:
    -    computes norm
    -    test for minnorm
    -    if not near zero
    -      normalizes normal
    -    else if zero norm
    -      sets normal to standard value
    -    else
    -      uses qh_divzero to normalize
    -      if nearzero
    -        sets norm to direction of maximum value
    -*/
    -void qh_normalize2 (coordT *normal, int dim, boolT toporient,
    -            realT *minnorm, boolT *ismin) {
    -  int k;
    -  realT *colp, *maxp, norm= 0, temp, *norm1, *norm2, *norm3;
    -  boolT zerodiv;
    -
    -  norm1= normal+1;
    -  norm2= normal+2;
    -  norm3= normal+3;
    -  if (dim == 2)
    -    norm= sqrt((*normal)*(*normal) + (*norm1)*(*norm1));
    -  else if (dim == 3)
    -    norm= sqrt((*normal)*(*normal) + (*norm1)*(*norm1) + (*norm2)*(*norm2));
    -  else if (dim == 4) {
    -    norm= sqrt((*normal)*(*normal) + (*norm1)*(*norm1) + (*norm2)*(*norm2)
    -               + (*norm3)*(*norm3));
    -  }else if (dim > 4) {
    -    norm= (*normal)*(*normal) + (*norm1)*(*norm1) + (*norm2)*(*norm2)
    -               + (*norm3)*(*norm3);
    -    for (k=dim-4, colp=normal+4; k--; colp++)
    -      norm += (*colp) * (*colp);
    -    norm= sqrt(norm);
    -  }
    -  if (minnorm) {
    -    if (norm < *minnorm)
    -      *ismin= True;
    -    else
    -      *ismin= False;
    -  }
    -  wmin_(Wmindenom, norm);
    -  if (norm > qh MINdenom) {
    -    if (!toporient)
    -      norm= -norm;
    -    *normal /= norm;
    -    *norm1 /= norm;
    -    if (dim == 2)
    -      ; /* all done */
    -    else if (dim == 3)
    -      *norm2 /= norm;
    -    else if (dim == 4) {
    -      *norm2 /= norm;
    -      *norm3 /= norm;
    -    }else if (dim >4) {
    -      *norm2 /= norm;
    -      *norm3 /= norm;
    -      for (k=dim-4, colp=normal+4; k--; )
    -        *colp++ /= norm;
    -    }
    -  }else if (norm == 0.0) {
    -    temp= sqrt(1.0/dim);
    -    for (k=dim, colp=normal; k--; )
    -      *colp++ = temp;
    -  }else {
    -    if (!toporient)
    -      norm= -norm;
    -    for (k=dim, colp=normal; k--; colp++) { /* k used below */
    -      temp= qh_divzero(*colp, norm, qh MINdenom_1, &zerodiv);
    -      if (!zerodiv)
    -        *colp= temp;
    -      else {
    -        maxp= qh_maxabsval(normal, dim);
    -        temp= ((*maxp * norm >= 0.0) ? 1.0 : -1.0);
    -        for (k=dim, colp=normal; k--; colp++)
    -          *colp= 0.0;
    -        *maxp= temp;
    -        zzinc_(Znearlysingular);
    -        trace0((qh ferr, 1, "qh_normalize: norm=%2.2g too small during p%d\n",
    -               norm, qh furthest_id));
    -        return;
    -      }
    -    }
    -  }
    -} /* normalize */
    -
    -
    -/*---------------------------------
    -
    -  qh_projectpoint( point, facet, dist )
    -    project point onto a facet by dist
    -
    -  returns:
    -    returns a new point
    -
    -  notes:
    -    if dist= distplane(point,facet)
    -      this projects point to hyperplane
    -    assumes qh_memfree_() is valid for normal_size
    -*/
    -pointT *qh_projectpoint(pointT *point, facetT *facet, realT dist) {
    -  pointT *newpoint, *np, *normal;
    -  int normsize= qh normal_size;
    -  int k;
    -  void **freelistp; /* used !qh_NOmem */
    -
    -  qh_memalloc_(normsize, freelistp, newpoint, pointT);
    -  np= newpoint;
    -  normal= facet->normal;
    -  for (k=qh hull_dim; k--; )
    -    *(np++)= *point++ - dist * *normal++;
    -  return(newpoint);
    -} /* projectpoint */
    -
    -
    -/*---------------------------------
    -
    -  qh_setfacetplane( facet )
    -    sets the hyperplane for a facet
    -    if qh.RANDOMdist, joggles hyperplane
    -
    -  notes:
    -    uses global buffers qh.gm_matrix and qh.gm_row
    -    overwrites facet->normal if already defined
    -    updates Wnewvertex if PRINTstatistics
    -    sets facet->upperdelaunay if upper envelope of Delaunay triangulation
    -
    -  design:
    -    copy vertex coordinates to qh.gm_matrix/gm_row
    -    compute determinate
    -    if nearzero
    -      recompute determinate with gaussian elimination
    -      if nearzero
    -        force outside orientation by testing interior point
    -*/
    -void qh_setfacetplane(facetT *facet) {
    -  pointT *point;
    -  vertexT *vertex, **vertexp;
    -  int normsize= qh normal_size;
    -  int k,i, oldtrace= 0;
    -  realT dist;
    -  void **freelistp; /* used !qh_NOmem */
    -  coordT *coord, *gmcoord;
    -  pointT *point0= SETfirstt_(facet->vertices, vertexT)->point;
    -  boolT nearzero= False;
    -
    -  zzinc_(Zsetplane);
    -  if (!facet->normal)
    -    qh_memalloc_(normsize, freelistp, facet->normal, coordT);
    -  if (facet == qh tracefacet) {
    -    oldtrace= qh IStracing;
    -    qh IStracing= 5;
    -    qh_fprintf(qh ferr, 8012, "qh_setfacetplane: facet f%d created.\n", facet->id);
    -    qh_fprintf(qh ferr, 8013, "  Last point added to hull was p%d.", qh furthest_id);
    -    if (zzval_(Ztotmerge))
    -      qh_fprintf(qh ferr, 8014, "  Last merge was #%d.", zzval_(Ztotmerge));
    -    qh_fprintf(qh ferr, 8015, "\n\nCurrent summary is:\n");
    -      qh_printsummary(qh ferr);
    -  }
    -  if (qh hull_dim <= 4) {
    -    i= 0;
    -    if (qh RANDOMdist) {
    -      gmcoord= qh gm_matrix;
    -      FOREACHvertex_(facet->vertices) {
    -        qh gm_row[i++]= gmcoord;
    -        coord= vertex->point;
    -        for (k=qh hull_dim; k--; )
    -          *(gmcoord++)= *coord++ * qh_randomfactor(qh RANDOMa, qh RANDOMb);
    -      }
    -    }else {
    -      FOREACHvertex_(facet->vertices)
    -       qh gm_row[i++]= vertex->point;
    -    }
    -    qh_sethyperplane_det(qh hull_dim, qh gm_row, point0, facet->toporient,
    -                facet->normal, &facet->offset, &nearzero);
    -  }
    -  if (qh hull_dim > 4 || nearzero) {
    -    i= 0;
    -    gmcoord= qh gm_matrix;
    -    FOREACHvertex_(facet->vertices) {
    -      if (vertex->point != point0) {
    -        qh gm_row[i++]= gmcoord;
    -        coord= vertex->point;
    -        point= point0;
    -        for (k=qh hull_dim; k--; )
    -          *(gmcoord++)= *coord++ - *point++;
    -      }
    -    }
    -    qh gm_row[i]= gmcoord;  /* for areasimplex */
    -    if (qh RANDOMdist) {
    -      gmcoord= qh gm_matrix;
    -      for (i=qh hull_dim-1; i--; ) {
    -        for (k=qh hull_dim; k--; )
    -          *(gmcoord++) *= qh_randomfactor(qh RANDOMa, qh RANDOMb);
    -      }
    -    }
    -    qh_sethyperplane_gauss(qh hull_dim, qh gm_row, point0, facet->toporient,
    -                facet->normal, &facet->offset, &nearzero);
    -    if (nearzero) {
    -      if (qh_orientoutside(facet)) {
    -        trace0((qh ferr, 2, "qh_setfacetplane: flipped orientation after testing interior_point during p%d\n", qh furthest_id));
    -      /* this is part of using Gaussian Elimination.  For example in 5-d
    -           1 1 1 1 0
    -           1 1 1 1 1
    -           0 0 0 1 0
    -           0 1 0 0 0
    -           1 0 0 0 0
    -           norm= 0.38 0.38 -0.76 0.38 0
    -         has a determinate of 1, but g.e. after subtracting pt. 0 has
    -         0's in the diagonal, even with full pivoting.  It does work
    -         if you subtract pt. 4 instead. */
    -      }
    -    }
    -  }
    -  facet->upperdelaunay= False;
    -  if (qh DELAUNAY) {
    -    if (qh UPPERdelaunay) {     /* matches qh_triangulate_facet and qh.lower_threshold in qh_initbuild */
    -      if (facet->normal[qh hull_dim -1] >= qh ANGLEround * qh_ZEROdelaunay)
    -        facet->upperdelaunay= True;
    -    }else {
    -      if (facet->normal[qh hull_dim -1] > -qh ANGLEround * qh_ZEROdelaunay)
    -        facet->upperdelaunay= True;
    -    }
    -  }
    -  if (qh PRINTstatistics || qh IStracing || qh TRACElevel || qh JOGGLEmax < REALmax) {
    -    qh old_randomdist= qh RANDOMdist;
    -    qh RANDOMdist= False;
    -    FOREACHvertex_(facet->vertices) {
    -      if (vertex->point != point0) {
    -        boolT istrace= False;
    -        zinc_(Zdiststat);
    -        qh_distplane(vertex->point, facet, &dist);
    -        dist= fabs_(dist);
    -        zinc_(Znewvertex);
    -        wadd_(Wnewvertex, dist);
    -        if (dist > wwval_(Wnewvertexmax)) {
    -          wwval_(Wnewvertexmax)= dist;
    -          if (dist > qh max_outside) {
    -            qh max_outside= dist;  /* used by qh_maxouter() */
    -            if (dist > qh TRACEdist)
    -              istrace= True;
    -          }
    -        }else if (-dist > qh TRACEdist)
    -          istrace= True;
    -        if (istrace) {
    -          qh_fprintf(qh ferr, 8016, "qh_setfacetplane: ====== vertex p%d(v%d) increases max_outside to %2.2g for new facet f%d last p%d\n",
    -                qh_pointid(vertex->point), vertex->id, dist, facet->id, qh furthest_id);
    -          qh_errprint("DISTANT", facet, NULL, NULL, NULL);
    -        }
    -      }
    -    }
    -    qh RANDOMdist= qh old_randomdist;
    -  }
    -  if (qh IStracing >= 3) {
    -    qh_fprintf(qh ferr, 8017, "qh_setfacetplane: f%d offset %2.2g normal: ",
    -             facet->id, facet->offset);
    -    for (k=0; k < qh hull_dim; k++)
    -      qh_fprintf(qh ferr, 8018, "%2.2g ", facet->normal[k]);
    -    qh_fprintf(qh ferr, 8019, "\n");
    -  }
    -  if (facet == qh tracefacet)
    -    qh IStracing= oldtrace;
    -} /* setfacetplane */
    -
    -
    -/*---------------------------------
    -
    -  qh_sethyperplane_det( dim, rows, point0, toporient, normal, offset, nearzero )
    -    given dim X dim array indexed by rows[], one row per point,
    -        toporient(flips all signs),
    -        and point0 (any row)
    -    set normalized hyperplane equation from oriented simplex
    -
    -  returns:
    -    normal (normalized)
    -    offset (places point0 on the hyperplane)
    -    sets nearzero if hyperplane not through points
    -
    -  notes:
    -    only defined for dim == 2..4
    -    rows[] is not modified
    -    solves det(P-V_0, V_n-V_0, ..., V_1-V_0)=0, i.e. every point is on hyperplane
    -    see Bower & Woodworth, A programmer's geometry, Butterworths 1983.
    -
    -  derivation of 3-d minnorm
    -    Goal: all vertices V_i within qh.one_merge of hyperplane
    -    Plan: exactly translate the facet so that V_0 is the origin
    -          exactly rotate the facet so that V_1 is on the x-axis and y_2=0.
    -          exactly rotate the effective perturbation to only effect n_0
    -             this introduces a factor of sqrt(3)
    -    n_0 = ((y_2-y_0)*(z_1-z_0) - (z_2-z_0)*(y_1-y_0)) / norm
    -    Let M_d be the max coordinate difference
    -    Let M_a be the greater of M_d and the max abs. coordinate
    -    Let u be machine roundoff and distround be max error for distance computation
    -    The max error for n_0 is sqrt(3) u M_a M_d / norm.  n_1 is approx. 1 and n_2 is approx. 0
    -    The max error for distance of V_1 is sqrt(3) u M_a M_d M_d / norm.  Offset=0 at origin
    -    Then minnorm = 1.8 u M_a M_d M_d / qh.ONEmerge
    -    Note that qh.one_merge is approx. 45.5 u M_a and norm is usually about M_d M_d
    -
    -  derivation of 4-d minnorm
    -    same as above except rotate the facet so that V_1 on x-axis and w_2, y_3, w_3=0
    -     [if two vertices fixed on x-axis, can rotate the other two in yzw.]
    -    n_0 = det3_(...) = y_2 det2_(z_1, w_1, z_3, w_3) = - y_2 w_1 z_3
    -     [all other terms contain at least two factors nearly zero.]
    -    The max error for n_0 is sqrt(4) u M_a M_d M_d / norm
    -    Then minnorm = 2 u M_a M_d M_d M_d / qh.ONEmerge
    -    Note that qh.one_merge is approx. 82 u M_a and norm is usually about M_d M_d M_d
    -*/
    -void qh_sethyperplane_det(int dim, coordT **rows, coordT *point0,
    -          boolT toporient, coordT *normal, realT *offset, boolT *nearzero) {
    -  realT maxround, dist;
    -  int i;
    -  pointT *point;
    -
    -
    -  if (dim == 2) {
    -    normal[0]= dY(1,0);
    -    normal[1]= dX(0,1);
    -    qh_normalize2 (normal, dim, toporient, NULL, NULL);
    -    *offset= -(point0[0]*normal[0]+point0[1]*normal[1]);
    -    *nearzero= False;  /* since nearzero norm => incident points */
    -  }else if (dim == 3) {
    -    normal[0]= det2_(dY(2,0), dZ(2,0),
    -                     dY(1,0), dZ(1,0));
    -    normal[1]= det2_(dX(1,0), dZ(1,0),
    -                     dX(2,0), dZ(2,0));
    -    normal[2]= det2_(dX(2,0), dY(2,0),
    -                     dX(1,0), dY(1,0));
    -    qh_normalize2 (normal, dim, toporient, NULL, NULL);
    -    *offset= -(point0[0]*normal[0] + point0[1]*normal[1]
    -               + point0[2]*normal[2]);
    -    maxround= qh DISTround;
    -    for (i=dim; i--; ) {
    -      point= rows[i];
    -      if (point != point0) {
    -        dist= *offset + (point[0]*normal[0] + point[1]*normal[1]
    -               + point[2]*normal[2]);
    -        if (dist > maxround || dist < -maxround) {
    -          *nearzero= True;
    -          break;
    -        }
    -      }
    -    }
    -  }else if (dim == 4) {
    -    normal[0]= - det3_(dY(2,0), dZ(2,0), dW(2,0),
    -                        dY(1,0), dZ(1,0), dW(1,0),
    -                        dY(3,0), dZ(3,0), dW(3,0));
    -    normal[1]=   det3_(dX(2,0), dZ(2,0), dW(2,0),
    -                        dX(1,0), dZ(1,0), dW(1,0),
    -                        dX(3,0), dZ(3,0), dW(3,0));
    -    normal[2]= - det3_(dX(2,0), dY(2,0), dW(2,0),
    -                        dX(1,0), dY(1,0), dW(1,0),
    -                        dX(3,0), dY(3,0), dW(3,0));
    -    normal[3]=   det3_(dX(2,0), dY(2,0), dZ(2,0),
    -                        dX(1,0), dY(1,0), dZ(1,0),
    -                        dX(3,0), dY(3,0), dZ(3,0));
    -    qh_normalize2 (normal, dim, toporient, NULL, NULL);
    -    *offset= -(point0[0]*normal[0] + point0[1]*normal[1]
    -               + point0[2]*normal[2] + point0[3]*normal[3]);
    -    maxround= qh DISTround;
    -    for (i=dim; i--; ) {
    -      point= rows[i];
    -      if (point != point0) {
    -        dist= *offset + (point[0]*normal[0] + point[1]*normal[1]
    -               + point[2]*normal[2] + point[3]*normal[3]);
    -        if (dist > maxround || dist < -maxround) {
    -          *nearzero= True;
    -          break;
    -        }
    -      }
    -    }
    -  }
    -  if (*nearzero) {
    -    zzinc_(Zminnorm);
    -    trace0((qh ferr, 3, "qh_sethyperplane_det: degenerate norm during p%d.\n", qh furthest_id));
    -    zzinc_(Znearlysingular);
    -  }
    -} /* sethyperplane_det */
    -
    -
    -/*---------------------------------
    -
    -  qh_sethyperplane_gauss( dim, rows, point0, toporient, normal, offset, nearzero )
    -    given(dim-1) X dim array of rows[i]= V_{i+1} - V_0 (point0)
    -    set normalized hyperplane equation from oriented simplex
    -
    -  returns:
    -    normal (normalized)
    -    offset (places point0 on the hyperplane)
    -
    -  notes:
    -    if nearzero
    -      orientation may be incorrect because of incorrect sign flips in gausselim
    -    solves [V_n-V_0,...,V_1-V_0, 0 .. 0 1] * N == [0 .. 0 1]
    -        or [V_n-V_0,...,V_1-V_0, 0 .. 0 1] * N == [0]
    -    i.e., N is normal to the hyperplane, and the unnormalized
    -        distance to [0 .. 1] is either 1 or   0
    -
    -  design:
    -    perform gaussian elimination
    -    flip sign for negative values
    -    perform back substitution
    -    normalize result
    -    compute offset
    -*/
    -void qh_sethyperplane_gauss(int dim, coordT **rows, pointT *point0,
    -                boolT toporient, coordT *normal, coordT *offset, boolT *nearzero) {
    -  coordT *pointcoord, *normalcoef;
    -  int k;
    -  boolT sign= toporient, nearzero2= False;
    -
    -  qh_gausselim(rows, dim-1, dim, &sign, nearzero);
    -  for (k=dim-1; k--; ) {
    -    if ((rows[k])[k] < 0)
    -      sign ^= 1;
    -  }
    -  if (*nearzero) {
    -    zzinc_(Znearlysingular);
    -    trace0((qh ferr, 4, "qh_sethyperplane_gauss: nearly singular or axis parallel hyperplane during p%d.\n", qh furthest_id));
    -    qh_backnormal(rows, dim-1, dim, sign, normal, &nearzero2);
    -  }else {
    -    qh_backnormal(rows, dim-1, dim, sign, normal, &nearzero2);
    -    if (nearzero2) {
    -      zzinc_(Znearlysingular);
    -      trace0((qh ferr, 5, "qh_sethyperplane_gauss: singular or axis parallel hyperplane at normalization during p%d.\n", qh furthest_id));
    -    }
    -  }
    -  if (nearzero2)
    -    *nearzero= True;
    -  qh_normalize2(normal, dim, True, NULL, NULL);
    -  pointcoord= point0;
    -  normalcoef= normal;
    -  *offset= -(*pointcoord++ * *normalcoef++);
    -  for (k=dim-1; k--; )
    -    *offset -= *pointcoord++ * *normalcoef++;
    -} /* sethyperplane_gauss */
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/geom.h b/scipy-0.10.1/scipy/spatial/qhull/src/geom.h
    deleted file mode 100644
    index b9d7627c24..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/geom.h
    +++ /dev/null
    @@ -1,176 +0,0 @@
    -/*
      ---------------------------------
    -
    -  geom.h
    -    header file for geometric routines
    -
    -   see qh-geom.htm and geom.c
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/geom.h#18 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#ifndef qhDEFgeom
    -#define qhDEFgeom 1
    -
    -#include "libqhull.h"
    -
    -/* ============ -macros- ======================== */
    -
    -/*----------------------------------
    -
    -  fabs_(a)
    -    returns the absolute value of a
    -*/
    -#define fabs_( a ) ((( a ) < 0 ) ? -( a ):( a ))
    -
    -/*----------------------------------
    -
    -  fmax_(a,b)
    -    returns the maximum value of a and b
    -*/
    -#define fmax_( a,b )  ( ( a ) < ( b ) ? ( b ) : ( a ) )
    -
    -/*----------------------------------
    -
    -  fmin_(a,b)
    -    returns the minimum value of a and b
    -*/
    -#define fmin_( a,b )  ( ( a ) > ( b ) ? ( b ) : ( a ) )
    -
    -/*----------------------------------
    -
    -  maximize_(maxval, val)
    -    set maxval to val if val is greater than maxval
    -*/
    -#define maximize_( maxval, val ) { if (( maxval ) < ( val )) ( maxval )= ( val ); }
    -
    -/*----------------------------------
    -
    -  minimize_(minval, val)
    -    set minval to val if val is less than minval
    -*/
    -#define minimize_( minval, val ) { if (( minval ) > ( val )) ( minval )= ( val ); }
    -
    -/*----------------------------------
    -
    -  det2_(a1, a2,
    -        b1, b2)
    -
    -    compute a 2-d determinate
    -*/
    -#define det2_( a1,a2,b1,b2 ) (( a1 )*( b2 ) - ( a2 )*( b1 ))
    -
    -/*----------------------------------
    -
    -  det3_(a1, a2, a3,
    -       b1, b2, b3,
    -       c1, c2, c3)
    -
    -    compute a 3-d determinate
    -*/
    -#define det3_( a1,a2,a3,b1,b2,b3,c1,c2,c3 ) ( ( a1 )*det2_( b2,b3,c2,c3 ) \
    -                - ( b1 )*det2_( a2,a3,c2,c3 ) + ( c1 )*det2_( a2,a3,b2,b3 ) )
    -
    -/*----------------------------------
    -
    -  dX( p1, p2 )
    -  dY( p1, p2 )
    -  dZ( p1, p2 )
    -
    -    given two indices into rows[],
    -
    -    compute the difference between X, Y, or Z coordinates
    -*/
    -#define dX( p1,p2 )  ( *( rows[p1] ) - *( rows[p2] ))
    -#define dY( p1,p2 )  ( *( rows[p1]+1 ) - *( rows[p2]+1 ))
    -#define dZ( p1,p2 )  ( *( rows[p1]+2 ) - *( rows[p2]+2 ))
    -#define dW( p1,p2 )  ( *( rows[p1]+3 ) - *( rows[p2]+3 ))
    -
    -/*============= prototypes in alphabetical order, infrequent at end ======= */
    -
    -void    qh_backnormal(realT **rows, int numrow, int numcol, boolT sign, coordT *normal, boolT *nearzero);
    -void    qh_distplane(pointT *point, facetT *facet, realT *dist);
    -facetT *qh_findbest(pointT *point, facetT *startfacet,
    -                     boolT bestoutside, boolT isnewfacets, boolT noupper,
    -                     realT *dist, boolT *isoutside, int *numpart);
    -facetT *qh_findbesthorizon(boolT ischeckmax, pointT *point,
    -                     facetT *startfacet, boolT noupper, realT *bestdist, int *numpart);
    -facetT *qh_findbestnew(pointT *point, facetT *startfacet, realT *dist,
    -                     boolT bestoutside, boolT *isoutside, int *numpart);
    -void    qh_gausselim(realT **rows, int numrow, int numcol, boolT *sign, boolT *nearzero);
    -realT   qh_getangle(pointT *vect1, pointT *vect2);
    -pointT *qh_getcenter(setT *vertices);
    -pointT *qh_getcentrum(facetT *facet);
    -realT   qh_getdistance(facetT *facet, facetT *neighbor, realT *mindist, realT *maxdist);
    -void    qh_normalize(coordT *normal, int dim, boolT toporient);
    -void    qh_normalize2 (coordT *normal, int dim, boolT toporient,
    -            realT *minnorm, boolT *ismin);
    -pointT *qh_projectpoint(pointT *point, facetT *facet, realT dist);
    -
    -void    qh_setfacetplane(facetT *newfacets);
    -void    qh_sethyperplane_det(int dim, coordT **rows, coordT *point0,
    -              boolT toporient, coordT *normal, realT *offset, boolT *nearzero);
    -void    qh_sethyperplane_gauss(int dim, coordT **rows, pointT *point0,
    -             boolT toporient, coordT *normal, coordT *offset, boolT *nearzero);
    -boolT   qh_sharpnewfacets(void);
    -
    -/*========= infrequently used code in geom2.c =============*/
    -
    -coordT *qh_copypoints(coordT *points, int numpoints, int dimension);
    -void    qh_crossproduct(int dim, realT vecA[3], realT vecB[3], realT vecC[3]);
    -realT   qh_determinant(realT **rows, int dim, boolT *nearzero);
    -realT   qh_detjoggle(pointT *points, int numpoints, int dimension);
    -void    qh_detroundoff(void);
    -realT   qh_detsimplex(pointT *apex, setT *points, int dim, boolT *nearzero);
    -realT   qh_distnorm(int dim, pointT *point, pointT *normal, realT *offsetp);
    -realT   qh_distround(int dimension, realT maxabs, realT maxsumabs);
    -realT   qh_divzero(realT numer, realT denom, realT mindenom1, boolT *zerodiv);
    -realT   qh_facetarea(facetT *facet);
    -realT   qh_facetarea_simplex(int dim, coordT *apex, setT *vertices,
    -          vertexT *notvertex,  boolT toporient, coordT *normal, realT *offset);
    -pointT *qh_facetcenter(setT *vertices);
    -facetT *qh_findgooddist(pointT *point, facetT *facetA, realT *distp, facetT **facetlist);
    -void    qh_getarea(facetT *facetlist);
    -boolT   qh_gram_schmidt(int dim, realT **rows);
    -boolT   qh_inthresholds(coordT *normal, realT *angle);
    -void    qh_joggleinput(void);
    -realT  *qh_maxabsval(realT *normal, int dim);
    -setT   *qh_maxmin(pointT *points, int numpoints, int dimension);
    -realT   qh_maxouter(void);
    -void    qh_maxsimplex(int dim, setT *maxpoints, pointT *points, int numpoints, setT **simplex);
    -realT   qh_minabsval(realT *normal, int dim);
    -int     qh_mindiff(realT *vecA, realT *vecB, int dim);
    -boolT   qh_orientoutside(facetT *facet);
    -void    qh_outerinner(facetT *facet, realT *outerplane, realT *innerplane);
    -coordT  qh_pointdist(pointT *point1, pointT *point2, int dim);
    -void    qh_printmatrix(FILE *fp, const char *string, realT **rows, int numrow, int numcol);
    -void    qh_printpoints(FILE *fp, const char *string, setT *points);
    -void    qh_projectinput(void);
    -void    qh_projectpoints(signed char *project, int n, realT *points,
    -             int numpoints, int dim, realT *newpoints, int newdim);
    -void    qh_rotateinput(realT **rows);
    -void    qh_rotatepoints(realT *points, int numpoints, int dim, realT **rows);
    -void    qh_scaleinput(void);
    -void    qh_scalelast(coordT *points, int numpoints, int dim, coordT low,
    -                   coordT high, coordT newhigh);
    -void    qh_scalepoints(pointT *points, int numpoints, int dim,
    -                realT *newlows, realT *newhighs);
    -boolT   qh_sethalfspace(int dim, coordT *coords, coordT **nextp,
    -              coordT *normal, coordT *offset, coordT *feasible);
    -coordT *qh_sethalfspace_all(int dim, int count, coordT *halfspaces, pointT *feasible);
    -pointT *qh_voronoi_center(int dim, setT *points);
    -
    -#endif /* qhDEFgeom */
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/geom2.c b/scipy-0.10.1/scipy/spatial/qhull/src/geom2.c
    deleted file mode 100644
    index 33e2e4db1f..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/geom2.c
    +++ /dev/null
    @@ -1,2081 +0,0 @@
    -/*
      ---------------------------------
    -
    -
    -   geom2.c
    -   infrequently used geometric routines of qhull
    -
    -   see qh-geom.htm and geom.h
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/geom2.c#29 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -
    -   frequently used code goes into geom.c
    -*/
    -
    -#include "qhull_a.h"
    -
    -/*================== functions in alphabetic order ============*/
    -
    -/*---------------------------------
    -
    -  qh_copypoints( points, numpoints, dimension)
    -    return qh_malloc'd copy of points
    -*/
    -coordT *qh_copypoints(coordT *points, int numpoints, int dimension) {
    -  int size;
    -  coordT *newpoints;
    -
    -  size= numpoints * dimension * (int)sizeof(coordT);
    -  if (!(newpoints=(coordT*)qh_malloc((size_t)size))) {
    -    qh_fprintf(qh ferr, 6004, "qhull error: insufficient memory to copy %d points\n",
    -        numpoints);
    -    qh_errexit(qh_ERRmem, NULL, NULL);
    -  }
    -  memcpy((char *)newpoints, (char *)points, (size_t)size);
    -  return newpoints;
    -} /* copypoints */
    -
    -/*---------------------------------
    -
    -  qh_crossproduct( dim, vecA, vecB, vecC )
    -    crossproduct of 2 dim vectors
    -    C= A x B
    -
    -  notes:
    -    from Glasner, Graphics Gems I, p. 639
    -    only defined for dim==3
    -*/
    -void qh_crossproduct(int dim, realT vecA[3], realT vecB[3], realT vecC[3]){
    -
    -  if (dim == 3) {
    -    vecC[0]=   det2_(vecA[1], vecA[2],
    -                     vecB[1], vecB[2]);
    -    vecC[1]= - det2_(vecA[0], vecA[2],
    -                     vecB[0], vecB[2]);
    -    vecC[2]=   det2_(vecA[0], vecA[1],
    -                     vecB[0], vecB[1]);
    -  }
    -} /* vcross */
    -
    -/*---------------------------------
    -
    -  qh_determinant( rows, dim, nearzero )
    -    compute signed determinant of a square matrix
    -    uses qh.NEARzero to test for degenerate matrices
    -
    -  returns:
    -    determinant
    -    overwrites rows and the matrix
    -    if dim == 2 or 3
    -      nearzero iff determinant < qh NEARzero[dim-1]
    -      (!quite correct, not critical)
    -    if dim >= 4
    -      nearzero iff diagonal[k] < qh NEARzero[k]
    -*/
    -realT qh_determinant(realT **rows, int dim, boolT *nearzero) {
    -  realT det=0;
    -  int i;
    -  boolT sign= False;
    -
    -  *nearzero= False;
    -  if (dim < 2) {
    -    qh_fprintf(qh ferr, 6005, "qhull internal error (qh_determinate): only implemented for dimension >= 2\n");
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }else if (dim == 2) {
    -    det= det2_(rows[0][0], rows[0][1],
    -                 rows[1][0], rows[1][1]);
    -    if (fabs_(det) < qh NEARzero[1])  /* not really correct, what should this be? */
    -      *nearzero= True;
    -  }else if (dim == 3) {
    -    det= det3_(rows[0][0], rows[0][1], rows[0][2],
    -                 rows[1][0], rows[1][1], rows[1][2],
    -                 rows[2][0], rows[2][1], rows[2][2]);
    -    if (fabs_(det) < qh NEARzero[2])  /* not really correct, what should this be? */
    -      *nearzero= True;
    -  }else {
    -    qh_gausselim(rows, dim, dim, &sign, nearzero);  /* if nearzero, diagonal still ok*/
    -    det= 1.0;
    -    for (i=dim; i--; )
    -      det *= (rows[i])[i];
    -    if (sign)
    -      det= -det;
    -  }
    -  return det;
    -} /* determinant */
    -
    -/*---------------------------------
    -
    -  qh_detjoggle( points, numpoints, dimension )
    -    determine default max joggle for point array
    -      as qh_distround * qh_JOGGLEdefault
    -
    -  returns:
    -    initial value for JOGGLEmax from points and REALepsilon
    -
    -  notes:
    -    computes DISTround since qh_maxmin not called yet
    -    if qh SCALElast, last dimension will be scaled later to MAXwidth
    -
    -    loop duplicated from qh_maxmin
    -*/
    -realT qh_detjoggle(pointT *points, int numpoints, int dimension) {
    -  realT abscoord, distround, joggle, maxcoord, mincoord;
    -  pointT *point, *pointtemp;
    -  realT maxabs= -REALmax;
    -  realT sumabs= 0;
    -  realT maxwidth= 0;
    -  int k;
    -
    -  for (k=0; k < dimension; k++) {
    -    if (qh SCALElast && k == dimension-1)
    -      abscoord= maxwidth;
    -    else if (qh DELAUNAY && k == dimension-1) /* will qh_setdelaunay() */
    -      abscoord= 2 * maxabs * maxabs;  /* may be low by qh hull_dim/2 */
    -    else {
    -      maxcoord= -REALmax;
    -      mincoord= REALmax;
    -      FORALLpoint_(points, numpoints) {
    -        maximize_(maxcoord, point[k]);
    -        minimize_(mincoord, point[k]);
    -      }
    -      maximize_(maxwidth, maxcoord-mincoord);
    -      abscoord= fmax_(maxcoord, -mincoord);
    -    }
    -    sumabs += abscoord;
    -    maximize_(maxabs, abscoord);
    -  } /* for k */
    -  distround= qh_distround(qh hull_dim, maxabs, sumabs);
    -  joggle= distround * qh_JOGGLEdefault;
    -  maximize_(joggle, REALepsilon * qh_JOGGLEdefault);
    -  trace2((qh ferr, 2001, "qh_detjoggle: joggle=%2.2g maxwidth=%2.2g\n", joggle, maxwidth));
    -  return joggle;
    -} /* detjoggle */
    -
    -/*---------------------------------
    -
    -  qh_detroundoff()
    -    determine maximum roundoff errors from
    -      REALepsilon, REALmax, REALmin, qh.hull_dim, qh.MAXabs_coord,
    -      qh.MAXsumcoord, qh.MAXwidth, qh.MINdenom_1
    -
    -    accounts for qh.SETroundoff, qh.RANDOMdist, qh MERGEexact
    -      qh.premerge_cos, qh.postmerge_cos, qh.premerge_centrum,
    -      qh.postmerge_centrum, qh.MINoutside,
    -      qh_RATIOnearinside, qh_COPLANARratio, qh_WIDEcoplanar
    -
    -  returns:
    -    sets qh.DISTround, etc. (see below)
    -    appends precision constants to qh.qhull_options
    -
    -  see:
    -    qh_maxmin() for qh.NEARzero
    -
    -  design:
    -    determine qh.DISTround for distance computations
    -    determine minimum denominators for qh_divzero
    -    determine qh.ANGLEround for angle computations
    -    adjust qh.premerge_cos,... for roundoff error
    -    determine qh.ONEmerge for maximum error due to a single merge
    -    determine qh.NEARinside, qh.MAXcoplanar, qh.MINvisible,
    -      qh.MINoutside, qh.WIDEfacet
    -    initialize qh.max_vertex and qh.minvertex
    -*/
    -void qh_detroundoff(void) {
    -
    -  qh_option("_max-width", NULL, &qh MAXwidth);
    -  if (!qh SETroundoff) {
    -    qh DISTround= qh_distround(qh hull_dim, qh MAXabs_coord, qh MAXsumcoord);
    -    if (qh RANDOMdist)
    -      qh DISTround += qh RANDOMfactor * qh MAXabs_coord;
    -    qh_option("Error-roundoff", NULL, &qh DISTround);
    -  }
    -  qh MINdenom= qh MINdenom_1 * qh MAXabs_coord;
    -  qh MINdenom_1_2= sqrt(qh MINdenom_1 * qh hull_dim) ;  /* if will be normalized */
    -  qh MINdenom_2= qh MINdenom_1_2 * qh MAXabs_coord;
    -                                              /* for inner product */
    -  qh ANGLEround= 1.01 * qh hull_dim * REALepsilon;
    -  if (qh RANDOMdist)
    -    qh ANGLEround += qh RANDOMfactor;
    -  if (qh premerge_cos < REALmax/2) {
    -    qh premerge_cos -= qh ANGLEround;
    -    if (qh RANDOMdist)
    -      qh_option("Angle-premerge-with-random", NULL, &qh premerge_cos);
    -  }
    -  if (qh postmerge_cos < REALmax/2) {
    -    qh postmerge_cos -= qh ANGLEround;
    -    if (qh RANDOMdist)
    -      qh_option("Angle-postmerge-with-random", NULL, &qh postmerge_cos);
    -  }
    -  qh premerge_centrum += 2 * qh DISTround;    /*2 for centrum and distplane()*/
    -  qh postmerge_centrum += 2 * qh DISTround;
    -  if (qh RANDOMdist && (qh MERGEexact || qh PREmerge))
    -    qh_option("Centrum-premerge-with-random", NULL, &qh premerge_centrum);
    -  if (qh RANDOMdist && qh POSTmerge)
    -    qh_option("Centrum-postmerge-with-random", NULL, &qh postmerge_centrum);
    -  { /* compute ONEmerge, max vertex offset for merging simplicial facets */
    -    realT maxangle= 1.0, maxrho;
    -
    -    minimize_(maxangle, qh premerge_cos);
    -    minimize_(maxangle, qh postmerge_cos);
    -    /* max diameter * sin theta + DISTround for vertex to its hyperplane */
    -    qh ONEmerge= sqrt((realT)qh hull_dim) * qh MAXwidth *
    -      sqrt(1.0 - maxangle * maxangle) + qh DISTround;
    -    maxrho= qh hull_dim * qh premerge_centrum + qh DISTround;
    -    maximize_(qh ONEmerge, maxrho);
    -    maxrho= qh hull_dim * qh postmerge_centrum + qh DISTround;
    -    maximize_(qh ONEmerge, maxrho);
    -    if (qh MERGING)
    -      qh_option("_one-merge", NULL, &qh ONEmerge);
    -  }
    -  qh NEARinside= qh ONEmerge * qh_RATIOnearinside; /* only used if qh KEEPnearinside */
    -  if (qh JOGGLEmax < REALmax/2 && (qh KEEPcoplanar || qh KEEPinside)) {
    -    realT maxdist;             /* adjust qh.NEARinside for joggle */
    -    qh KEEPnearinside= True;
    -    maxdist= sqrt((realT)qh hull_dim) * qh JOGGLEmax + qh DISTround;
    -    maxdist= 2*maxdist;        /* vertex and coplanar point can joggle in opposite directions */
    -    maximize_(qh NEARinside, maxdist);  /* must agree with qh_nearcoplanar() */
    -  }
    -  if (qh KEEPnearinside)
    -    qh_option("_near-inside", NULL, &qh NEARinside);
    -  if (qh JOGGLEmax < qh DISTround) {
    -    qh_fprintf(qh ferr, 6006, "qhull error: the joggle for 'QJn', %.2g, is below roundoff for distance computations, %.2g\n",
    -         qh JOGGLEmax, qh DISTround);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (qh MINvisible > REALmax/2) {
    -    if (!qh MERGING)
    -      qh MINvisible= qh DISTround;
    -    else if (qh hull_dim <= 3)
    -      qh MINvisible= qh premerge_centrum;
    -    else
    -      qh MINvisible= qh_COPLANARratio * qh premerge_centrum;
    -    if (qh APPROXhull && qh MINvisible > qh MINoutside)
    -      qh MINvisible= qh MINoutside;
    -    qh_option("Visible-distance", NULL, &qh MINvisible);
    -  }
    -  if (qh MAXcoplanar > REALmax/2) {
    -    qh MAXcoplanar= qh MINvisible;
    -    qh_option("U-coplanar-distance", NULL, &qh MAXcoplanar);
    -  }
    -  if (!qh APPROXhull) {             /* user may specify qh MINoutside */
    -    qh MINoutside= 2 * qh MINvisible;
    -    if (qh premerge_cos < REALmax/2)
    -      maximize_(qh MINoutside, (1- qh premerge_cos) * qh MAXabs_coord);
    -    qh_option("Width-outside", NULL, &qh MINoutside);
    -  }
    -  qh WIDEfacet= qh MINoutside;
    -  maximize_(qh WIDEfacet, qh_WIDEcoplanar * qh MAXcoplanar);
    -  maximize_(qh WIDEfacet, qh_WIDEcoplanar * qh MINvisible);
    -  qh_option("_wide-facet", NULL, &qh WIDEfacet);
    -  if (qh MINvisible > qh MINoutside + 3 * REALepsilon
    -  && !qh BESToutside && !qh FORCEoutput)
    -    qh_fprintf(qh ferr, 7001, "qhull input warning: minimum visibility V%.2g is greater than \nminimum outside W%.2g.  Flipped facets are likely.\n",
    -             qh MINvisible, qh MINoutside);
    -  qh max_vertex= qh DISTround;
    -  qh min_vertex= -qh DISTround;
    -  /* numeric constants reported in printsummary */
    -} /* detroundoff */
    -
    -/*---------------------------------
    -
    -  qh_detsimplex( apex, points, dim, nearzero )
    -    compute determinant of a simplex with point apex and base points
    -
    -  returns:
    -     signed determinant and nearzero from qh_determinant
    -
    -  notes:
    -     uses qh.gm_matrix/qh.gm_row (assumes they're big enough)
    -
    -  design:
    -    construct qm_matrix by subtracting apex from points
    -    compute determinate
    -*/
    -realT qh_detsimplex(pointT *apex, setT *points, int dim, boolT *nearzero) {
    -  pointT *coorda, *coordp, *gmcoord, *point, **pointp;
    -  coordT **rows;
    -  int k,  i=0;
    -  realT det;
    -
    -  zinc_(Zdetsimplex);
    -  gmcoord= qh gm_matrix;
    -  rows= qh gm_row;
    -  FOREACHpoint_(points) {
    -    if (i == dim)
    -      break;
    -    rows[i++]= gmcoord;
    -    coordp= point;
    -    coorda= apex;
    -    for (k=dim; k--; )
    -      *(gmcoord++)= *coordp++ - *coorda++;
    -  }
    -  if (i < dim) {
    -    qh_fprintf(qh ferr, 6007, "qhull internal error (qh_detsimplex): #points %d < dimension %d\n",
    -               i, dim);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  det= qh_determinant(rows, dim, nearzero);
    -  trace2((qh ferr, 2002, "qh_detsimplex: det=%2.2g for point p%d, dim %d, nearzero? %d\n",
    -          det, qh_pointid(apex), dim, *nearzero));
    -  return det;
    -} /* detsimplex */
    -
    -/*---------------------------------
    -
    -  qh_distnorm( dim, point, normal, offset )
    -    return distance from point to hyperplane at normal/offset
    -
    -  returns:
    -    dist
    -
    -  notes:
    -    dist > 0 if point is outside of hyperplane
    -
    -  see:
    -    qh_distplane in geom.c
    -*/
    -realT qh_distnorm(int dim, pointT *point, pointT *normal, realT *offsetp) {
    -  coordT *normalp= normal, *coordp= point;
    -  realT dist;
    -  int k;
    -
    -  dist= *offsetp;
    -  for (k=dim; k--; )
    -    dist += *(coordp++) * *(normalp++);
    -  return dist;
    -} /* distnorm */
    -
    -/*---------------------------------
    -
    -  qh_distround(dimension, maxabs, maxsumabs )
    -    compute maximum round-off error for a distance computation
    -      to a normalized hyperplane
    -    maxabs is the maximum absolute value of a coordinate
    -    maxsumabs is the maximum possible sum of absolute coordinate values
    -
    -  returns:
    -    max dist round for REALepsilon
    -
    -  notes:
    -    calculate roundoff error according to
    -    Lemma 3.2-1 of Golub and van Loan "Matrix Computation"
    -    use sqrt(dim) since one vector is normalized
    -      or use maxsumabs since one vector is < 1
    -*/
    -realT qh_distround(int dimension, realT maxabs, realT maxsumabs) {
    -  realT maxdistsum, maxround;
    -
    -  maxdistsum= sqrt((realT)dimension) * maxabs;
    -  minimize_( maxdistsum, maxsumabs);
    -  maxround= REALepsilon * (dimension * maxdistsum * 1.01 + maxabs);
    -              /* adds maxabs for offset */
    -  trace4((qh ferr, 4008, "qh_distround: %2.2g maxabs %2.2g maxsumabs %2.2g maxdistsum %2.2g\n",
    -                 maxround, maxabs, maxsumabs, maxdistsum));
    -  return maxround;
    -} /* distround */
    -
    -/*---------------------------------
    -
    -  qh_divzero( numer, denom, mindenom1, zerodiv )
    -    divide by a number that's nearly zero
    -    mindenom1= minimum denominator for dividing into 1.0
    -
    -  returns:
    -    quotient
    -    sets zerodiv and returns 0.0 if it would overflow
    -
    -  design:
    -    if numer is nearly zero and abs(numer) < abs(denom)
    -      return numer/denom
    -    else if numer is nearly zero
    -      return 0 and zerodiv
    -    else if denom/numer non-zero
    -      return numer/denom
    -    else
    -      return 0 and zerodiv
    -*/
    -realT qh_divzero(realT numer, realT denom, realT mindenom1, boolT *zerodiv) {
    -  realT temp, numerx, denomx;
    -
    -
    -  if (numer < mindenom1 && numer > -mindenom1) {
    -    numerx= fabs_(numer);
    -    denomx= fabs_(denom);
    -    if (numerx < denomx) {
    -      *zerodiv= False;
    -      return numer/denom;
    -    }else {
    -      *zerodiv= True;
    -      return 0.0;
    -    }
    -  }
    -  temp= denom/numer;
    -  if (temp > mindenom1 || temp < -mindenom1) {
    -    *zerodiv= False;
    -    return numer/denom;
    -  }else {
    -    *zerodiv= True;
    -    return 0.0;
    -  }
    -} /* divzero */
    -
    -
    -/*---------------------------------
    -
    -  qh_facetarea( facet )
    -    return area for a facet
    -
    -  notes:
    -    if non-simplicial,
    -      uses centrum to triangulate facet and sums the projected areas.
    -    if (qh DELAUNAY),
    -      computes projected area instead for last coordinate
    -    assumes facet->normal exists
    -    projecting tricoplanar facets to the hyperplane does not appear to make a difference
    -
    -  design:
    -    if simplicial
    -      compute area
    -    else
    -      for each ridge
    -        compute area from centrum to ridge
    -    negate area if upper Delaunay facet
    -*/
    -realT qh_facetarea(facetT *facet) {
    -  vertexT *apex;
    -  pointT *centrum;
    -  realT area= 0.0;
    -  ridgeT *ridge, **ridgep;
    -
    -  if (facet->simplicial) {
    -    apex= SETfirstt_(facet->vertices, vertexT);
    -    area= qh_facetarea_simplex(qh hull_dim, apex->point, facet->vertices,
    -                    apex, facet->toporient, facet->normal, &facet->offset);
    -  }else {
    -    if (qh CENTERtype == qh_AScentrum)
    -      centrum= facet->center;
    -    else
    -      centrum= qh_getcentrum(facet);
    -    FOREACHridge_(facet->ridges)
    -      area += qh_facetarea_simplex(qh hull_dim, centrum, ridge->vertices,
    -                 NULL, (boolT)(ridge->top == facet),  facet->normal, &facet->offset);
    -    if (qh CENTERtype != qh_AScentrum)
    -      qh_memfree(centrum, qh normal_size);
    -  }
    -  if (facet->upperdelaunay && qh DELAUNAY)
    -    area= -area;  /* the normal should be [0,...,1] */
    -  trace4((qh ferr, 4009, "qh_facetarea: f%d area %2.2g\n", facet->id, area));
    -  return area;
    -} /* facetarea */
    -
    -/*---------------------------------
    -
    -  qh_facetarea_simplex( dim, apex, vertices, notvertex, toporient, normal, offset )
    -    return area for a simplex defined by
    -      an apex, a base of vertices, an orientation, and a unit normal
    -    if simplicial or tricoplanar facet,
    -      notvertex is defined and it is skipped in vertices
    -
    -  returns:
    -    computes area of simplex projected to plane [normal,offset]
    -    returns 0 if vertex too far below plane (qh WIDEfacet)
    -      vertex can't be apex of tricoplanar facet
    -
    -  notes:
    -    if (qh DELAUNAY),
    -      computes projected area instead for last coordinate
    -    uses qh gm_matrix/gm_row and qh hull_dim
    -    helper function for qh_facetarea
    -
    -  design:
    -    if Notvertex
    -      translate simplex to apex
    -    else
    -      project simplex to normal/offset
    -      translate simplex to apex
    -    if Delaunay
    -      set last row/column to 0 with -1 on diagonal
    -    else
    -      set last row to Normal
    -    compute determinate
    -    scale and flip sign for area
    -*/
    -realT qh_facetarea_simplex(int dim, coordT *apex, setT *vertices,
    -        vertexT *notvertex,  boolT toporient, coordT *normal, realT *offset) {
    -  pointT *coorda, *coordp, *gmcoord;
    -  coordT **rows, *normalp;
    -  int k,  i=0;
    -  realT area, dist;
    -  vertexT *vertex, **vertexp;
    -  boolT nearzero;
    -
    -  gmcoord= qh gm_matrix;
    -  rows= qh gm_row;
    -  FOREACHvertex_(vertices) {
    -    if (vertex == notvertex)
    -      continue;
    -    rows[i++]= gmcoord;
    -    coorda= apex;
    -    coordp= vertex->point;
    -    normalp= normal;
    -    if (notvertex) {
    -      for (k=dim; k--; )
    -        *(gmcoord++)= *coordp++ - *coorda++;
    -    }else {
    -      dist= *offset;
    -      for (k=dim; k--; )
    -        dist += *coordp++ * *normalp++;
    -      if (dist < -qh WIDEfacet) {
    -        zinc_(Znoarea);
    -        return 0.0;
    -      }
    -      coordp= vertex->point;
    -      normalp= normal;
    -      for (k=dim; k--; )
    -        *(gmcoord++)= (*coordp++ - dist * *normalp++) - *coorda++;
    -    }
    -  }
    -  if (i != dim-1) {
    -    qh_fprintf(qh ferr, 6008, "qhull internal error (qh_facetarea_simplex): #points %d != dim %d -1\n",
    -               i, dim);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  rows[i]= gmcoord;
    -  if (qh DELAUNAY) {
    -    for (i=0; i < dim-1; i++)
    -      rows[i][dim-1]= 0.0;
    -    for (k=dim; k--; )
    -      *(gmcoord++)= 0.0;
    -    rows[dim-1][dim-1]= -1.0;
    -  }else {
    -    normalp= normal;
    -    for (k=dim; k--; )
    -      *(gmcoord++)= *normalp++;
    -  }
    -  zinc_(Zdetsimplex);
    -  area= qh_determinant(rows, dim, &nearzero);
    -  if (toporient)
    -    area= -area;
    -  area *= qh AREAfactor;
    -  trace4((qh ferr, 4010, "qh_facetarea_simplex: area=%2.2g for point p%d, toporient %d, nearzero? %d\n",
    -          area, qh_pointid(apex), toporient, nearzero));
    -  return area;
    -} /* facetarea_simplex */
    -
    -/*---------------------------------
    -
    -  qh_facetcenter( vertices )
    -    return Voronoi center (Voronoi vertex) for a facet's vertices
    -
    -  returns:
    -    return temporary point equal to the center
    -
    -  see:
    -    qh_voronoi_center()
    -*/
    -pointT *qh_facetcenter(setT *vertices) {
    -  setT *points= qh_settemp(qh_setsize(vertices));
    -  vertexT *vertex, **vertexp;
    -  pointT *center;
    -
    -  FOREACHvertex_(vertices)
    -    qh_setappend(&points, vertex->point);
    -  center= qh_voronoi_center(qh hull_dim-1, points);
    -  qh_settempfree(&points);
    -  return center;
    -} /* facetcenter */
    -
    -/*---------------------------------
    -
    -  qh_findgooddist( point, facetA, dist, facetlist )
    -    find best good facet visible for point from facetA
    -    assumes facetA is visible from point
    -
    -  returns:
    -    best facet, i.e., good facet that is furthest from point
    -      distance to best facet
    -      NULL if none
    -
    -    moves good, visible facets (and some other visible facets)
    -      to end of qh facet_list
    -
    -  notes:
    -    uses qh visit_id
    -
    -  design:
    -    initialize bestfacet if facetA is good
    -    move facetA to end of facetlist
    -    for each facet on facetlist
    -      for each unvisited neighbor of facet
    -        move visible neighbors to end of facetlist
    -        update best good neighbor
    -        if no good neighbors, update best facet
    -*/
    -facetT *qh_findgooddist(pointT *point, facetT *facetA, realT *distp,
    -               facetT **facetlist) {
    -  realT bestdist= -REALmax, dist;
    -  facetT *neighbor, **neighborp, *bestfacet=NULL, *facet;
    -  boolT goodseen= False;
    -
    -  if (facetA->good) {
    -    zzinc_(Zcheckpart);  /* calls from check_bestdist occur after print stats */
    -    qh_distplane(point, facetA, &bestdist);
    -    bestfacet= facetA;
    -    goodseen= True;
    -  }
    -  qh_removefacet(facetA);
    -  qh_appendfacet(facetA);
    -  *facetlist= facetA;
    -  facetA->visitid= ++qh visit_id;
    -  FORALLfacet_(*facetlist) {
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->visitid == qh visit_id)
    -        continue;
    -      neighbor->visitid= qh visit_id;
    -      if (goodseen && !neighbor->good)
    -        continue;
    -      zzinc_(Zcheckpart);
    -      qh_distplane(point, neighbor, &dist);
    -      if (dist > 0) {
    -        qh_removefacet(neighbor);
    -        qh_appendfacet(neighbor);
    -        if (neighbor->good) {
    -          goodseen= True;
    -          if (dist > bestdist) {
    -            bestdist= dist;
    -            bestfacet= neighbor;
    -          }
    -        }
    -      }
    -    }
    -  }
    -  if (bestfacet) {
    -    *distp= bestdist;
    -    trace2((qh ferr, 2003, "qh_findgooddist: p%d is %2.2g above good facet f%d\n",
    -      qh_pointid(point), bestdist, bestfacet->id));
    -    return bestfacet;
    -  }
    -  trace4((qh ferr, 4011, "qh_findgooddist: no good facet for p%d above f%d\n",
    -      qh_pointid(point), facetA->id));
    -  return NULL;
    -}  /* findgooddist */
    -
    -/*---------------------------------
    -
    -  qh_getarea( facetlist )
    -    set area of all facets in facetlist
    -    collect statistics
    -    nop if hasAreaVolume
    -
    -  returns:
    -    sets qh totarea/totvol to total area and volume of convex hull
    -    for Delaunay triangulation, computes projected area of the lower or upper hull
    -      ignores upper hull if qh ATinfinity
    -
    -  notes:
    -    could compute outer volume by expanding facet area by rays from interior
    -    the following attempt at perpendicular projection underestimated badly:
    -      qh.totoutvol += (-dist + facet->maxoutside + qh DISTround)
    -                            * area/ qh hull_dim;
    -  design:
    -    for each facet on facetlist
    -      compute facet->area
    -      update qh.totarea and qh.totvol
    -*/
    -void qh_getarea(facetT *facetlist) {
    -  realT area;
    -  realT dist;
    -  facetT *facet;
    -
    -  if (qh hasAreaVolume)
    -    return;
    -  if (qh REPORTfreq)
    -    qh_fprintf(qh ferr, 8020, "computing area of each facet and volume of the convex hull\n");
    -  else
    -    trace1((qh ferr, 1001, "qh_getarea: computing volume and area for each facet\n"));
    -  qh totarea= qh totvol= 0.0;
    -  FORALLfacet_(facetlist) {
    -    if (!facet->normal)
    -      continue;
    -    if (facet->upperdelaunay && qh ATinfinity)
    -      continue;
    -    if (!facet->isarea) {
    -      facet->f.area= qh_facetarea(facet);
    -      facet->isarea= True;
    -    }
    -    area= facet->f.area;
    -    if (qh DELAUNAY) {
    -      if (facet->upperdelaunay == qh UPPERdelaunay)
    -        qh totarea += area;
    -    }else {
    -      qh totarea += area;
    -      qh_distplane(qh interior_point, facet, &dist);
    -      qh totvol += -dist * area/ qh hull_dim;
    -    }
    -    if (qh PRINTstatistics) {
    -      wadd_(Wareatot, area);
    -      wmax_(Wareamax, area);
    -      wmin_(Wareamin, area);
    -    }
    -  }
    -  qh hasAreaVolume= True;
    -} /* getarea */
    -
    -/*---------------------------------
    -
    -  qh_gram_schmidt( dim, row )
    -    implements Gram-Schmidt orthogonalization by rows
    -
    -  returns:
    -    false if zero norm
    -    overwrites rows[dim][dim]
    -
    -  notes:
    -    see Golub & van Loan Algorithm 6.2-2
    -    overflow due to small divisors not handled
    -
    -  design:
    -    for each row
    -      compute norm for row
    -      if non-zero, normalize row
    -      for each remaining rowA
    -        compute inner product of row and rowA
    -        reduce rowA by row * inner product
    -*/
    -boolT qh_gram_schmidt(int dim, realT **row) {
    -  realT *rowi, *rowj, norm;
    -  int i, j, k;
    -
    -  for (i=0; i < dim; i++) {
    -    rowi= row[i];
    -    for (norm= 0.0, k= dim; k--; rowi++)
    -      norm += *rowi * *rowi;
    -    norm= sqrt(norm);
    -    wmin_(Wmindenom, norm);
    -    if (norm == 0.0)  /* either 0 or overflow due to sqrt */
    -      return False;
    -    for (k=dim; k--; )
    -      *(--rowi) /= norm;
    -    for (j=i+1; j < dim; j++) {
    -      rowj= row[j];
    -      for (norm= 0.0, k=dim; k--; )
    -        norm += *rowi++ * *rowj++;
    -      for (k=dim; k--; )
    -        *(--rowj) -= *(--rowi) * norm;
    -    }
    -  }
    -  return True;
    -} /* gram_schmidt */
    -
    -
    -/*---------------------------------
    -
    -  qh_inthresholds( normal, angle )
    -    return True if normal within qh.lower_/upper_threshold
    -
    -  returns:
    -    estimate of angle by summing of threshold diffs
    -      angle may be NULL
    -      smaller "angle" is better
    -
    -  notes:
    -    invalid if qh.SPLITthresholds
    -
    -  see:
    -    qh.lower_threshold in qh_initbuild()
    -    qh_initthresholds()
    -
    -  design:
    -    for each dimension
    -      test threshold
    -*/
    -boolT qh_inthresholds(coordT *normal, realT *angle) {
    -  boolT within= True;
    -  int k;
    -  realT threshold;
    -
    -  if (angle)
    -    *angle= 0.0;
    -  for (k=0; k < qh hull_dim; k++) {
    -    threshold= qh lower_threshold[k];
    -    if (threshold > -REALmax/2) {
    -      if (normal[k] < threshold)
    -        within= False;
    -      if (angle) {
    -        threshold -= normal[k];
    -        *angle += fabs_(threshold);
    -      }
    -    }
    -    if (qh upper_threshold[k] < REALmax/2) {
    -      threshold= qh upper_threshold[k];
    -      if (normal[k] > threshold)
    -        within= False;
    -      if (angle) {
    -        threshold -= normal[k];
    -        *angle += fabs_(threshold);
    -      }
    -    }
    -  }
    -  return within;
    -} /* inthresholds */
    -
    -
    -/*---------------------------------
    -
    -  qh_joggleinput()
    -    randomly joggle input to Qhull by qh.JOGGLEmax
    -    initial input is qh.first_point/qh.num_points of qh.hull_dim
    -      repeated calls use qh.input_points/qh.num_points
    -
    -  returns:
    -    joggles points at qh.first_point/qh.num_points
    -    copies data to qh.input_points/qh.input_malloc if first time
    -    determines qh.JOGGLEmax if it was zero
    -    if qh.DELAUNAY
    -      computes the Delaunay projection of the joggled points
    -
    -  notes:
    -    if qh.DELAUNAY, unnecessarily joggles the last coordinate
    -    the initial 'QJn' may be set larger than qh_JOGGLEmaxincrease
    -
    -  design:
    -    if qh.DELAUNAY
    -      set qh.SCALElast for reduced precision errors
    -    if first call
    -      initialize qh.input_points to the original input points
    -      if qh.JOGGLEmax == 0
    -        determine default qh.JOGGLEmax
    -    else
    -      increase qh.JOGGLEmax according to qh.build_cnt
    -    joggle the input by adding a random number in [-qh.JOGGLEmax,qh.JOGGLEmax]
    -    if qh.DELAUNAY
    -      sets the Delaunay projection
    -*/
    -void qh_joggleinput(void) {
    -  int i, seed, size;
    -  coordT *coordp, *inputp;
    -  realT randr, randa, randb;
    -
    -  if (!qh input_points) { /* first call */
    -    qh input_points= qh first_point;
    -    qh input_malloc= qh POINTSmalloc;
    -    size= qh num_points * qh hull_dim * sizeof(coordT);
    -    if (!(qh first_point=(coordT*)qh_malloc((size_t)size))) {
    -      qh_fprintf(qh ferr, 6009, "qhull error: insufficient memory to joggle %d points\n",
    -          qh num_points);
    -      qh_errexit(qh_ERRmem, NULL, NULL);
    -    }
    -    qh POINTSmalloc= True;
    -    if (qh JOGGLEmax == 0.0) {
    -      qh JOGGLEmax= qh_detjoggle(qh input_points, qh num_points, qh hull_dim);
    -      qh_option("QJoggle", NULL, &qh JOGGLEmax);
    -    }
    -  }else {                 /* repeated call */
    -    if (!qh RERUN && qh build_cnt > qh_JOGGLEretry) {
    -      if (((qh build_cnt-qh_JOGGLEretry-1) % qh_JOGGLEagain) == 0) {
    -        realT maxjoggle= qh MAXwidth * qh_JOGGLEmaxincrease;
    -        if (qh JOGGLEmax < maxjoggle) {
    -          qh JOGGLEmax *= qh_JOGGLEincrease;
    -          minimize_(qh JOGGLEmax, maxjoggle);
    -        }
    -      }
    -    }
    -    qh_option("QJoggle", NULL, &qh JOGGLEmax);
    -  }
    -  if (qh build_cnt > 1 && qh JOGGLEmax > fmax_(qh MAXwidth/4, 0.1)) {
    -      qh_fprintf(qh ferr, 6010, "qhull error: the current joggle for 'QJn', %.2g, is too large for the width\nof the input.  If possible, recompile Qhull with higher-precision reals.\n",
    -                qh JOGGLEmax);
    -      qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  /* for some reason, using qh ROTATErandom and qh_RANDOMseed does not repeat the run. Use 'TRn' instead */
    -  seed= qh_RANDOMint;
    -  qh_option("_joggle-seed", &seed, NULL);
    -  trace0((qh ferr, 6, "qh_joggleinput: joggle input by %2.2g with seed %d\n",
    -    qh JOGGLEmax, seed));
    -  inputp= qh input_points;
    -  coordp= qh first_point;
    -  randa= 2.0 * qh JOGGLEmax/qh_RANDOMmax;
    -  randb= -qh JOGGLEmax;
    -  size= qh num_points * qh hull_dim;
    -  for (i=size; i--; ) {
    -    randr= qh_RANDOMint;
    -    *(coordp++)= *(inputp++) + (randr * randa + randb);
    -  }
    -  if (qh DELAUNAY) {
    -    qh last_low= qh last_high= qh last_newhigh= REALmax;
    -    qh_setdelaunay(qh hull_dim, qh num_points, qh first_point);
    -  }
    -} /* joggleinput */
    -
    -/*---------------------------------
    -
    -  qh_maxabsval( normal, dim )
    -    return pointer to maximum absolute value of a dim vector
    -    returns NULL if dim=0
    -*/
    -realT *qh_maxabsval(realT *normal, int dim) {
    -  realT maxval= -REALmax;
    -  realT *maxp= NULL, *colp, absval;
    -  int k;
    -
    -  for (k=dim, colp= normal; k--; colp++) {
    -    absval= fabs_(*colp);
    -    if (absval > maxval) {
    -      maxval= absval;
    -      maxp= colp;
    -    }
    -  }
    -  return maxp;
    -} /* maxabsval */
    -
    -
    -/*---------------------------------
    -
    -  qh_maxmin( points, numpoints, dimension )
    -    return max/min points for each dimension
    -    determine max and min coordinates
    -
    -  returns:
    -    returns a temporary set of max and min points
    -      may include duplicate points. Does not include qh.GOODpoint
    -    sets qh.NEARzero, qh.MAXabs_coord, qh.MAXsumcoord, qh.MAXwidth
    -         qh.MAXlastcoord, qh.MINlastcoord
    -    initializes qh.max_outside, qh.min_vertex, qh.WAScoplanar, qh.ZEROall_ok
    -
    -  notes:
    -    loop duplicated in qh_detjoggle()
    -
    -  design:
    -    initialize global precision variables
    -    checks definition of REAL...
    -    for each dimension
    -      for each point
    -        collect maximum and minimum point
    -      collect maximum of maximums and minimum of minimums
    -      determine qh.NEARzero for Gaussian Elimination
    -*/
    -setT *qh_maxmin(pointT *points, int numpoints, int dimension) {
    -  int k;
    -  realT maxcoord, temp;
    -  pointT *minimum, *maximum, *point, *pointtemp;
    -  setT *set;
    -
    -  qh max_outside= 0.0;
    -  qh MAXabs_coord= 0.0;
    -  qh MAXwidth= -REALmax;
    -  qh MAXsumcoord= 0.0;
    -  qh min_vertex= 0.0;
    -  qh WAScoplanar= False;
    -  if (qh ZEROcentrum)
    -    qh ZEROall_ok= True;
    -  if (REALmin < REALepsilon && REALmin < REALmax && REALmin > -REALmax
    -  && REALmax > 0.0 && -REALmax < 0.0)
    -    ; /* all ok */
    -  else {
    -    qh_fprintf(qh ferr, 6011, "qhull error: floating point constants in user.h are wrong\n\
    -REALepsilon %g REALmin %g REALmax %g -REALmax %g\n",
    -             REALepsilon, REALmin, REALmax, -REALmax);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  set= qh_settemp(2*dimension);
    -  for (k=0; k < dimension; k++) {
    -    if (points == qh GOODpointp)
    -      minimum= maximum= points + dimension;
    -    else
    -      minimum= maximum= points;
    -    FORALLpoint_(points, numpoints) {
    -      if (point == qh GOODpointp)
    -        continue;
    -      if (maximum[k] < point[k])
    -        maximum= point;
    -      else if (minimum[k] > point[k])
    -        minimum= point;
    -    }
    -    if (k == dimension-1) {
    -      qh MINlastcoord= minimum[k];
    -      qh MAXlastcoord= maximum[k];
    -    }
    -    if (qh SCALElast && k == dimension-1)
    -      maxcoord= qh MAXwidth;
    -    else {
    -      maxcoord= fmax_(maximum[k], -minimum[k]);
    -      if (qh GOODpointp) {
    -        temp= fmax_(qh GOODpointp[k], -qh GOODpointp[k]);
    -        maximize_(maxcoord, temp);
    -      }
    -      temp= maximum[k] - minimum[k];
    -      maximize_(qh MAXwidth, temp);
    -    }
    -    maximize_(qh MAXabs_coord, maxcoord);
    -    qh MAXsumcoord += maxcoord;
    -    qh_setappend(&set, maximum);
    -    qh_setappend(&set, minimum);
    -    /* calculation of qh NEARzero is based on error formula 4.4-13 of
    -       Golub & van Loan, authors say n^3 can be ignored and 10 be used in
    -       place of rho */
    -    qh NEARzero[k]= 80 * qh MAXsumcoord * REALepsilon;
    -  }
    -  if (qh IStracing >=1)
    -    qh_printpoints(qh ferr, "qh_maxmin: found the max and min points(by dim):", set);
    -  return(set);
    -} /* maxmin */
    -
    -/*---------------------------------
    -
    -  qh_maxouter()
    -    return maximum distance from facet to outer plane
    -    normally this is qh.max_outside+qh.DISTround
    -    does not include qh.JOGGLEmax
    -
    -  see:
    -    qh_outerinner()
    -
    -  notes:
    -    need to add another qh.DISTround if testing actual point with computation
    -
    -  for joggle:
    -    qh_setfacetplane() updated qh.max_outer for Wnewvertexmax (max distance to vertex)
    -    need to use Wnewvertexmax since could have a coplanar point for a high
    -      facet that is replaced by a low facet
    -    need to add qh.JOGGLEmax if testing input points
    -*/
    -realT qh_maxouter(void) {
    -  realT dist;
    -
    -  dist= fmax_(qh max_outside, qh DISTround);
    -  dist += qh DISTround;
    -  trace4((qh ferr, 4012, "qh_maxouter: max distance from facet to outer plane is %2.2g max_outside is %2.2g\n", dist, qh max_outside));
    -  return dist;
    -} /* maxouter */
    -
    -/*---------------------------------
    -
    -  qh_maxsimplex( dim, maxpoints, points, numpoints, simplex )
    -    determines maximum simplex for a set of points
    -    starts from points already in simplex
    -    skips qh.GOODpointp (assumes that it isn't in maxpoints)
    -
    -  returns:
    -    simplex with dim+1 points
    -
    -  notes:
    -    assumes at least pointsneeded points in points
    -    maximizes determinate for x,y,z,w, etc.
    -    uses maxpoints as long as determinate is clearly non-zero
    -
    -  design:
    -    initialize simplex with at least two points
    -      (find points with max or min x coordinate)
    -    for each remaining dimension
    -      add point that maximizes the determinate
    -        (use points from maxpoints first)
    -*/
    -void qh_maxsimplex(int dim, setT *maxpoints, pointT *points, int numpoints, setT **simplex) {
    -  pointT *point, **pointp, *pointtemp, *maxpoint, *minx=NULL, *maxx=NULL;
    -  boolT nearzero, maxnearzero= False;
    -  int k, sizinit;
    -  realT maxdet= -REALmax, det, mincoord= REALmax, maxcoord= -REALmax;
    -
    -  sizinit= qh_setsize(*simplex);
    -  if (sizinit < 2) {
    -    if (qh_setsize(maxpoints) >= 2) {
    -      FOREACHpoint_(maxpoints) {
    -        if (maxcoord < point[0]) {
    -          maxcoord= point[0];
    -          maxx= point;
    -        }
    -        if (mincoord > point[0]) {
    -          mincoord= point[0];
    -          minx= point;
    -        }
    -      }
    -    }else {
    -      FORALLpoint_(points, numpoints) {
    -        if (point == qh GOODpointp)
    -          continue;
    -        if (maxcoord < point[0]) {
    -          maxcoord= point[0];
    -          maxx= point;
    -        }
    -        if (mincoord > point[0]) {
    -          mincoord= point[0];
    -          minx= point;
    -        }
    -      }
    -    }
    -    qh_setunique(simplex, minx);
    -    if (qh_setsize(*simplex) < 2)
    -      qh_setunique(simplex, maxx);
    -    sizinit= qh_setsize(*simplex);
    -    if (sizinit < 2) {
    -      qh_precision("input has same x coordinate");
    -      if (zzval_(Zsetplane) > qh hull_dim+1) {
    -        qh_fprintf(qh ferr, 6012, "qhull precision error (qh_maxsimplex for voronoi_center):\n%d points with the same x coordinate.\n",
    -                 qh_setsize(maxpoints)+numpoints);
    -        qh_errexit(qh_ERRprec, NULL, NULL);
    -      }else {
    -        qh_fprintf(qh ferr, 6013, "qhull input error: input is less than %d-dimensional since it has the same x coordinate\n", qh hull_dim);
    -        qh_errexit(qh_ERRinput, NULL, NULL);
    -      }
    -    }
    -  }
    -  for (k=sizinit; k < dim+1; k++) {
    -    maxpoint= NULL;
    -    maxdet= -REALmax;
    -    FOREACHpoint_(maxpoints) {
    -      if (!qh_setin(*simplex, point)) {
    -        det= qh_detsimplex(point, *simplex, k, &nearzero);
    -        if ((det= fabs_(det)) > maxdet) {
    -          maxdet= det;
    -          maxpoint= point;
    -          maxnearzero= nearzero;
    -        }
    -      }
    -    }
    -    if (!maxpoint || maxnearzero) {
    -      zinc_(Zsearchpoints);
    -      if (!maxpoint) {
    -        trace0((qh ferr, 7, "qh_maxsimplex: searching all points for %d-th initial vertex.\n", k+1));
    -      }else {
    -        trace0((qh ferr, 8, "qh_maxsimplex: searching all points for %d-th initial vertex, better than p%d det %2.2g\n",
    -                k+1, qh_pointid(maxpoint), maxdet));
    -      }
    -      FORALLpoint_(points, numpoints) {
    -        if (point == qh GOODpointp)
    -          continue;
    -        if (!qh_setin(*simplex, point)) {
    -          det= qh_detsimplex(point, *simplex, k, &nearzero);
    -          if ((det= fabs_(det)) > maxdet) {
    -            maxdet= det;
    -            maxpoint= point;
    -            maxnearzero= nearzero;
    -          }
    -        }
    -      }
    -    } /* !maxpoint */
    -    if (!maxpoint) {
    -      qh_fprintf(qh ferr, 6014, "qhull internal error (qh_maxsimplex): not enough points available\n");
    -      qh_errexit(qh_ERRqhull, NULL, NULL);
    -    }
    -    qh_setappend(simplex, maxpoint);
    -    trace1((qh ferr, 1002, "qh_maxsimplex: selected point p%d for %d`th initial vertex, det=%2.2g\n",
    -            qh_pointid(maxpoint), k+1, maxdet));
    -  } /* k */
    -} /* maxsimplex */
    -
    -/*---------------------------------
    -
    -  qh_minabsval( normal, dim )
    -    return minimum absolute value of a dim vector
    -*/
    -realT qh_minabsval(realT *normal, int dim) {
    -  realT minval= 0;
    -  realT maxval= 0;
    -  realT *colp;
    -  int k;
    -
    -  for (k=dim, colp=normal; k--; colp++) {
    -    maximize_(maxval, *colp);
    -    minimize_(minval, *colp);
    -  }
    -  return fmax_(maxval, -minval);
    -} /* minabsval */
    -
    -
    -/*---------------------------------
    -
    -  qh_mindif ( vecA, vecB, dim )
    -    return index of min abs. difference of two vectors
    -*/
    -int qh_mindiff(realT *vecA, realT *vecB, int dim) {
    -  realT mindiff= REALmax, diff;
    -  realT *vecAp= vecA, *vecBp= vecB;
    -  int k, mink= 0;
    -
    -  for (k=0; k < dim; k++) {
    -    diff= *vecAp++ - *vecBp++;
    -    diff= fabs_(diff);
    -    if (diff < mindiff) {
    -      mindiff= diff;
    -      mink= k;
    -    }
    -  }
    -  return mink;
    -} /* mindiff */
    -
    -
    -
    -/*---------------------------------
    -
    -  qh_orientoutside( facet  )
    -    make facet outside oriented via qh.interior_point
    -
    -  returns:
    -    True if facet reversed orientation.
    -*/
    -boolT qh_orientoutside(facetT *facet) {
    -  int k;
    -  realT dist;
    -
    -  qh_distplane(qh interior_point, facet, &dist);
    -  if (dist > 0) {
    -    for (k=qh hull_dim; k--; )
    -      facet->normal[k]= -facet->normal[k];
    -    facet->offset= -facet->offset;
    -    return True;
    -  }
    -  return False;
    -} /* orientoutside */
    -
    -/*---------------------------------
    -
    -  qh_outerinner( facet, outerplane, innerplane  )
    -    if facet and qh.maxoutdone (i.e., qh_check_maxout)
    -      returns outer and inner plane for facet
    -    else
    -      returns maximum outer and inner plane
    -    accounts for qh.JOGGLEmax
    -
    -  see:
    -    qh_maxouter(), qh_check_bestdist(), qh_check_points()
    -
    -  notes:
    -    outerplaner or innerplane may be NULL
    -    facet is const
    -    Does not error (QhullFacet)
    -
    -    includes qh.DISTround for actual points
    -    adds another qh.DISTround if testing with floating point arithmetic
    -*/
    -void qh_outerinner(facetT *facet, realT *outerplane, realT *innerplane) {
    -  realT dist, mindist;
    -  vertexT *vertex, **vertexp;
    -
    -  if (outerplane) {
    -    if (!qh_MAXoutside || !facet || !qh maxoutdone) {
    -      *outerplane= qh_maxouter();       /* includes qh.DISTround */
    -    }else { /* qh_MAXoutside ... */
    -#if qh_MAXoutside
    -      *outerplane= facet->maxoutside + qh DISTround;
    -#endif
    -
    -    }
    -    if (qh JOGGLEmax < REALmax/2)
    -      *outerplane += qh JOGGLEmax * sqrt((realT)qh hull_dim);
    -  }
    -  if (innerplane) {
    -    if (facet) {
    -      mindist= REALmax;
    -      FOREACHvertex_(facet->vertices) {
    -        zinc_(Zdistio);
    -        qh_distplane(vertex->point, facet, &dist);
    -        minimize_(mindist, dist);
    -      }
    -      *innerplane= mindist - qh DISTround;
    -    }else
    -      *innerplane= qh min_vertex - qh DISTround;
    -    if (qh JOGGLEmax < REALmax/2)
    -      *innerplane -= qh JOGGLEmax * sqrt((realT)qh hull_dim);
    -  }
    -} /* outerinner */
    -
    -/*---------------------------------
    -
    -  qh_pointdist( point1, point2, dim )
    -    return distance between two points
    -
    -  notes:
    -    returns distance squared if 'dim' is negative
    -*/
    -coordT qh_pointdist(pointT *point1, pointT *point2, int dim) {
    -  coordT dist, diff;
    -  int k;
    -
    -  dist= 0.0;
    -  for (k= (dim > 0 ? dim : -dim); k--; ) {
    -    diff= *point1++ - *point2++;
    -    dist += diff * diff;
    -  }
    -  if (dim > 0)
    -    return(sqrt(dist));
    -  return dist;
    -} /* pointdist */
    -
    -
    -/*---------------------------------
    -
    -  qh_printmatrix( fp, string, rows, numrow, numcol )
    -    print matrix to fp given by row vectors
    -    print string as header
    -
    -  notes:
    -    print a vector by qh_printmatrix(fp, "", &vect, 1, len)
    -*/
    -void qh_printmatrix(FILE *fp, const char *string, realT **rows, int numrow, int numcol) {
    -  realT *rowp;
    -  realT r; /*bug fix*/
    -  int i,k;
    -
    -  qh_fprintf(fp, 9001, "%s\n", string);
    -  for (i=0; i < numrow; i++) {
    -    rowp= rows[i];
    -    for (k=0; k < numcol; k++) {
    -      r= *rowp++;
    -      qh_fprintf(fp, 9002, "%6.3g ", r);
    -    }
    -    qh_fprintf(fp, 9003, "\n");
    -  }
    -} /* printmatrix */
    -
    -
    -/*---------------------------------
    -
    -  qh_printpoints( fp, string, points )
    -    print pointids to fp for a set of points
    -    if string, prints string and 'p' point ids
    -*/
    -void qh_printpoints(FILE *fp, const char *string, setT *points) {
    -  pointT *point, **pointp;
    -
    -  if (string) {
    -    qh_fprintf(fp, 9004, "%s", string);
    -    FOREACHpoint_(points)
    -      qh_fprintf(fp, 9005, " p%d", qh_pointid(point));
    -    qh_fprintf(fp, 9006, "\n");
    -  }else {
    -    FOREACHpoint_(points)
    -      qh_fprintf(fp, 9007, " %d", qh_pointid(point));
    -    qh_fprintf(fp, 9008, "\n");
    -  }
    -} /* printpoints */
    -
    -
    -/*---------------------------------
    -
    -  qh_projectinput()
    -    project input points using qh.lower_bound/upper_bound and qh DELAUNAY
    -    if qh.lower_bound[k]=qh.upper_bound[k]= 0,
    -      removes dimension k
    -    if halfspace intersection
    -      removes dimension k from qh.feasible_point
    -    input points in qh first_point, num_points, input_dim
    -
    -  returns:
    -    new point array in qh first_point of qh hull_dim coordinates
    -    sets qh POINTSmalloc
    -    if qh DELAUNAY
    -      projects points to paraboloid
    -      lowbound/highbound is also projected
    -    if qh ATinfinity
    -      adds point "at-infinity"
    -    if qh POINTSmalloc
    -      frees old point array
    -
    -  notes:
    -    checks that qh.hull_dim agrees with qh.input_dim, PROJECTinput, and DELAUNAY
    -
    -
    -  design:
    -    sets project[k] to -1 (delete), 0 (keep), 1 (add for Delaunay)
    -    determines newdim and newnum for qh hull_dim and qh num_points
    -    projects points to newpoints
    -    projects qh.lower_bound to itself
    -    projects qh.upper_bound to itself
    -    if qh DELAUNAY
    -      if qh ATINFINITY
    -        projects points to paraboloid
    -        computes "infinity" point as vertex average and 10% above all points
    -      else
    -        uses qh_setdelaunay to project points to paraboloid
    -*/
    -void qh_projectinput(void) {
    -  int k,i;
    -  int newdim= qh input_dim, newnum= qh num_points;
    -  signed char *project;
    -  int size= (qh input_dim+1)*sizeof(*project);
    -  pointT *newpoints, *coord, *infinity;
    -  realT paraboloid, maxboloid= 0;
    -
    -  project= (signed char*)qh_memalloc(size);
    -  memset((char*)project, 0, (size_t)size);
    -  for (k=0; k < qh input_dim; k++) {   /* skip Delaunay bound */
    -    if (qh lower_bound[k] == 0 && qh upper_bound[k] == 0) {
    -      project[k]= -1;
    -      newdim--;
    -    }
    -  }
    -  if (qh DELAUNAY) {
    -    project[k]= 1;
    -    newdim++;
    -    if (qh ATinfinity)
    -      newnum++;
    -  }
    -  if (newdim != qh hull_dim) {
    -    qh_fprintf(qh ferr, 6015, "qhull internal error (qh_projectinput): dimension after projection %d != hull_dim %d\n", newdim, qh hull_dim);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  if (!(newpoints=(coordT*)qh_malloc(newnum*newdim*sizeof(coordT)))){
    -    qh_fprintf(qh ferr, 6016, "qhull error: insufficient memory to project %d points\n",
    -           qh num_points);
    -    qh_errexit(qh_ERRmem, NULL, NULL);
    -  }
    -  qh_projectpoints(project, qh input_dim+1, qh first_point,
    -                    qh num_points, qh input_dim, newpoints, newdim);
    -  trace1((qh ferr, 1003, "qh_projectinput: updating lower and upper_bound\n"));
    -  qh_projectpoints(project, qh input_dim+1, qh lower_bound,
    -                    1, qh input_dim+1, qh lower_bound, newdim+1);
    -  qh_projectpoints(project, qh input_dim+1, qh upper_bound,
    -                    1, qh input_dim+1, qh upper_bound, newdim+1);
    -  if (qh HALFspace) {
    -    if (!qh feasible_point) {
    -      qh_fprintf(qh ferr, 6017, "qhull internal error (qh_projectinput): HALFspace defined without qh.feasible_point\n");
    -      qh_errexit(qh_ERRqhull, NULL, NULL);
    -    }
    -    qh_projectpoints(project, qh input_dim, qh feasible_point,
    -                      1, qh input_dim, qh feasible_point, newdim);
    -  }
    -  qh_memfree(project, (qh input_dim+1)*sizeof(*project));
    -  if (qh POINTSmalloc)
    -    qh_free(qh first_point);
    -  qh first_point= newpoints;
    -  qh POINTSmalloc= True;
    -  if (qh DELAUNAY && qh ATinfinity) {
    -    coord= qh first_point;
    -    infinity= qh first_point + qh hull_dim * qh num_points;
    -    for (k=qh hull_dim-1; k--; )
    -      infinity[k]= 0.0;
    -    for (i=qh num_points; i--; ) {
    -      paraboloid= 0.0;
    -      for (k=0; k < qh hull_dim-1; k++) {
    -        paraboloid += *coord * *coord;
    -        infinity[k] += *coord;
    -        coord++;
    -      }
    -      *(coord++)= paraboloid;
    -      maximize_(maxboloid, paraboloid);
    -    }
    -    /* coord == infinity */
    -    for (k=qh hull_dim-1; k--; )
    -      *(coord++) /= qh num_points;
    -    *(coord++)= maxboloid * 1.1;
    -    qh num_points++;
    -    trace0((qh ferr, 9, "qh_projectinput: projected points to paraboloid for Delaunay\n"));
    -  }else if (qh DELAUNAY)  /* !qh ATinfinity */
    -    qh_setdelaunay( qh hull_dim, qh num_points, qh first_point);
    -} /* projectinput */
    -
    -
    -/*---------------------------------
    -
    -  qh_projectpoints( project, n, points, numpoints, dim, newpoints, newdim )
    -    project points/numpoints/dim to newpoints/newdim
    -    if project[k] == -1
    -      delete dimension k
    -    if project[k] == 1
    -      add dimension k by duplicating previous column
    -    n is size of project
    -
    -  notes:
    -    newpoints may be points if only adding dimension at end
    -
    -  design:
    -    check that 'project' and 'newdim' agree
    -    for each dimension
    -      if project == -1
    -        skip dimension
    -      else
    -        determine start of column in newpoints
    -        determine start of column in points
    -          if project == +1, duplicate previous column
    -        copy dimension (column) from points to newpoints
    -*/
    -void qh_projectpoints(signed char *project, int n, realT *points,
    -        int numpoints, int dim, realT *newpoints, int newdim) {
    -  int testdim= dim, oldk=0, newk=0, i,j=0,k;
    -  realT *newp, *oldp;
    -
    -  for (k=0; k < n; k++)
    -    testdim += project[k];
    -  if (testdim != newdim) {
    -    qh_fprintf(qh ferr, 6018, "qhull internal error (qh_projectpoints): newdim %d should be %d after projection\n",
    -      newdim, testdim);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  for (j=0; j= dim)
    -          continue;
    -        oldp= points+oldk;
    -      }else
    -        oldp= points+oldk++;
    -      for (i=numpoints; i--; ) {
    -        *newp= *oldp;
    -        newp += newdim;
    -        oldp += dim;
    -      }
    -    }
    -    if (oldk >= dim)
    -      break;
    -  }
    -  trace1((qh ferr, 1004, "qh_projectpoints: projected %d points from dim %d to dim %d\n",
    -    numpoints, dim, newdim));
    -} /* projectpoints */
    -
    -
    -/*---------------------------------
    -
    -  qh_rotateinput( rows )
    -    rotate input using row matrix
    -    input points given by qh first_point, num_points, hull_dim
    -    assumes rows[dim] is a scratch buffer
    -    if qh POINTSmalloc, overwrites input points, else mallocs a new array
    -
    -  returns:
    -    rotated input
    -    sets qh POINTSmalloc
    -
    -  design:
    -    see qh_rotatepoints
    -*/
    -void qh_rotateinput(realT **rows) {
    -
    -  if (!qh POINTSmalloc) {
    -    qh first_point= qh_copypoints(qh first_point, qh num_points, qh hull_dim);
    -    qh POINTSmalloc= True;
    -  }
    -  qh_rotatepoints(qh first_point, qh num_points, qh hull_dim, rows);
    -}  /* rotateinput */
    -
    -/*---------------------------------
    -
    -  qh_rotatepoints( points, numpoints, dim, row )
    -    rotate numpoints points by a d-dim row matrix
    -    assumes rows[dim] is a scratch buffer
    -
    -  returns:
    -    rotated points in place
    -
    -  design:
    -    for each point
    -      for each coordinate
    -        use row[dim] to compute partial inner product
    -      for each coordinate
    -        rotate by partial inner product
    -*/
    -void qh_rotatepoints(realT *points, int numpoints, int dim, realT **row) {
    -  realT *point, *rowi, *coord= NULL, sum, *newval;
    -  int i,j,k;
    -
    -  if (qh IStracing >= 1)
    -    qh_printmatrix(qh ferr, "qh_rotatepoints: rotate points by", row, dim, dim);
    -  for (point= points, j= numpoints; j--; point += dim) {
    -    newval= row[dim];
    -    for (i=0; i < dim; i++) {
    -      rowi= row[i];
    -      coord= point;
    -      for (sum= 0.0, k= dim; k--; )
    -        sum += *rowi++ * *coord++;
    -      *(newval++)= sum;
    -    }
    -    for (k=dim; k--; )
    -      *(--coord)= *(--newval);
    -  }
    -} /* rotatepoints */
    -
    -
    -/*---------------------------------
    -
    -  qh_scaleinput()
    -    scale input points using qh low_bound/high_bound
    -    input points given by qh first_point, num_points, hull_dim
    -    if qh POINTSmalloc, overwrites input points, else mallocs a new array
    -
    -  returns:
    -    scales coordinates of points to low_bound[k], high_bound[k]
    -    sets qh POINTSmalloc
    -
    -  design:
    -    see qh_scalepoints
    -*/
    -void qh_scaleinput(void) {
    -
    -  if (!qh POINTSmalloc) {
    -    qh first_point= qh_copypoints(qh first_point, qh num_points, qh hull_dim);
    -    qh POINTSmalloc= True;
    -  }
    -  qh_scalepoints(qh first_point, qh num_points, qh hull_dim,
    -       qh lower_bound, qh upper_bound);
    -}  /* scaleinput */
    -
    -/*---------------------------------
    -
    -  qh_scalelast( points, numpoints, dim, low, high, newhigh )
    -    scale last coordinate to [0,m] for Delaunay triangulations
    -    input points given by points, numpoints, dim
    -
    -  returns:
    -    changes scale of last coordinate from [low, high] to [0, newhigh]
    -    overwrites last coordinate of each point
    -    saves low/high/newhigh in qh.last_low, etc. for qh_setdelaunay()
    -
    -  notes:
    -    when called by qh_setdelaunay, low/high may not match actual data
    -
    -  design:
    -    compute scale and shift factors
    -    apply to last coordinate of each point
    -*/
    -void qh_scalelast(coordT *points, int numpoints, int dim, coordT low,
    -                   coordT high, coordT newhigh) {
    -  realT scale, shift;
    -  coordT *coord;
    -  int i;
    -  boolT nearzero= False;
    -
    -  trace4((qh ferr, 4013, "qh_scalelast: scale last coordinate from [%2.2g, %2.2g] to [0,%2.2g]\n",
    -    low, high, newhigh));
    -  qh last_low= low;
    -  qh last_high= high;
    -  qh last_newhigh= newhigh;
    -  scale= qh_divzero(newhigh, high - low,
    -                  qh MINdenom_1, &nearzero);
    -  if (nearzero) {
    -    if (qh DELAUNAY)
    -      qh_fprintf(qh ferr, 6019, "qhull input error: can not scale last coordinate.  Input is cocircular\n   or cospherical.   Use option 'Qz' to add a point at infinity.\n");
    -    else
    -      qh_fprintf(qh ferr, 6020, "qhull input error: can not scale last coordinate.  New bounds [0, %2.2g] are too wide for\nexisting bounds [%2.2g, %2.2g] (width %2.2g)\n",
    -                newhigh, low, high, high-low);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  shift= - low * newhigh / (high-low);
    -  coord= points + dim - 1;
    -  for (i=numpoints; i--; coord += dim)
    -    *coord= *coord * scale + shift;
    -} /* scalelast */
    -
    -/*---------------------------------
    -
    -  qh_scalepoints( points, numpoints, dim, newlows, newhighs )
    -    scale points to new lowbound and highbound
    -    retains old bound when newlow= -REALmax or newhigh= +REALmax
    -
    -  returns:
    -    scaled points
    -    overwrites old points
    -
    -  design:
    -    for each coordinate
    -      compute current low and high bound
    -      compute scale and shift factors
    -      scale all points
    -      enforce new low and high bound for all points
    -*/
    -void qh_scalepoints(pointT *points, int numpoints, int dim,
    -        realT *newlows, realT *newhighs) {
    -  int i,k;
    -  realT shift, scale, *coord, low, high, newlow, newhigh, mincoord, maxcoord;
    -  boolT nearzero= False;
    -
    -  for (k=0; k < dim; k++) {
    -    newhigh= newhighs[k];
    -    newlow= newlows[k];
    -    if (newhigh > REALmax/2 && newlow < -REALmax/2)
    -      continue;
    -    low= REALmax;
    -    high= -REALmax;
    -    for (i=numpoints, coord=points+k; i--; coord += dim) {
    -      minimize_(low, *coord);
    -      maximize_(high, *coord);
    -    }
    -    if (newhigh > REALmax/2)
    -      newhigh= high;
    -    if (newlow < -REALmax/2)
    -      newlow= low;
    -    if (qh DELAUNAY && k == dim-1 && newhigh < newlow) {
    -      qh_fprintf(qh ferr, 6021, "qhull input error: 'Qb%d' or 'QB%d' inverts paraboloid since high bound %.2g < low bound %.2g\n",
    -               k, k, newhigh, newlow);
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    scale= qh_divzero(newhigh - newlow, high - low,
    -                  qh MINdenom_1, &nearzero);
    -    if (nearzero) {
    -      qh_fprintf(qh ferr, 6022, "qhull input error: %d'th dimension's new bounds [%2.2g, %2.2g] too wide for\nexisting bounds [%2.2g, %2.2g]\n",
    -              k, newlow, newhigh, low, high);
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    shift= (newlow * high - low * newhigh)/(high-low);
    -    coord= points+k;
    -    for (i=numpoints; i--; coord += dim)
    -      *coord= *coord * scale + shift;
    -    coord= points+k;
    -    if (newlow < newhigh) {
    -      mincoord= newlow;
    -      maxcoord= newhigh;
    -    }else {
    -      mincoord= newhigh;
    -      maxcoord= newlow;
    -    }
    -    for (i=numpoints; i--; coord += dim) {
    -      minimize_(*coord, maxcoord);  /* because of roundoff error */
    -      maximize_(*coord, mincoord);
    -    }
    -    trace0((qh ferr, 10, "qh_scalepoints: scaled %d'th coordinate [%2.2g, %2.2g] to [%.2g, %.2g] for %d points by %2.2g and shifted %2.2g\n",
    -      k, low, high, newlow, newhigh, numpoints, scale, shift));
    -  }
    -} /* scalepoints */
    -
    -
    -/*---------------------------------
    -
    -  qh_setdelaunay( dim, count, points )
    -    project count points to dim-d paraboloid for Delaunay triangulation
    -
    -    dim is one more than the dimension of the input set
    -    assumes dim is at least 3 (i.e., at least a 2-d Delaunay triangulation)
    -
    -    points is a dim*count realT array.  The first dim-1 coordinates
    -    are the coordinates of the first input point.  array[dim] is
    -    the first coordinate of the second input point.  array[2*dim] is
    -    the first coordinate of the third input point.
    -
    -    if qh.last_low defined (i.e., 'Qbb' called qh_scalelast)
    -      calls qh_scalelast to scale the last coordinate the same as the other points
    -
    -  returns:
    -    for each point
    -      sets point[dim-1] to sum of squares of coordinates
    -    scale points to 'Qbb' if needed
    -
    -  notes:
    -    to project one point, use
    -      qh_setdelaunay(qh hull_dim, 1, point)
    -
    -    Do not use options 'Qbk', 'QBk', or 'QbB' since they scale
    -    the coordinates after the original projection.
    -
    -*/
    -void qh_setdelaunay(int dim, int count, pointT *points) {
    -  int i, k;
    -  coordT *coordp, coord;
    -  realT paraboloid;
    -
    -  trace0((qh ferr, 11, "qh_setdelaunay: project %d points to paraboloid for Delaunay triangulation\n", count));
    -  coordp= points;
    -  for (i=0; i < count; i++) {
    -    coord= *coordp++;
    -    paraboloid= coord*coord;
    -    for (k=dim-2; k--; ) {
    -      coord= *coordp++;
    -      paraboloid += coord*coord;
    -    }
    -    *coordp++ = paraboloid;
    -  }
    -  if (qh last_low < REALmax/2)
    -    qh_scalelast(points, count, dim, qh last_low, qh last_high, qh last_newhigh);
    -} /* setdelaunay */
    -
    -
    -/*---------------------------------
    -
    -  qh_sethalfspace( dim, coords, nextp, normal, offset, feasible )
    -    set point to dual of halfspace relative to feasible point
    -    halfspace is normal coefficients and offset.
    -
    -  returns:
    -    false if feasible point is outside of hull (error message already reported)
    -    overwrites coordinates for point at dim coords
    -    nextp= next point (coords)
    -
    -  design:
    -    compute distance from feasible point to halfspace
    -    divide each normal coefficient by -dist
    -*/
    -boolT qh_sethalfspace(int dim, coordT *coords, coordT **nextp,
    -         coordT *normal, coordT *offset, coordT *feasible) {
    -  coordT *normp= normal, *feasiblep= feasible, *coordp= coords;
    -  realT dist;
    -  realT r; /*bug fix*/
    -  int k;
    -  boolT zerodiv;
    -
    -  dist= *offset;
    -  for (k=dim; k--; )
    -    dist += *(normp++) * *(feasiblep++);
    -  if (dist > 0)
    -    goto LABELerroroutside;
    -  normp= normal;
    -  if (dist < -qh MINdenom) {
    -    for (k=dim; k--; )
    -      *(coordp++)= *(normp++) / -dist;
    -  }else {
    -    for (k=dim; k--; ) {
    -      *(coordp++)= qh_divzero(*(normp++), -dist, qh MINdenom_1, &zerodiv);
    -      if (zerodiv)
    -        goto LABELerroroutside;
    -    }
    -  }
    -  *nextp= coordp;
    -  if (qh IStracing >= 4) {
    -    qh_fprintf(qh ferr, 8021, "qh_sethalfspace: halfspace at offset %6.2g to point: ", *offset);
    -    for (k=dim, coordp=coords; k--; ) {
    -      r= *coordp++;
    -      qh_fprintf(qh ferr, 8022, " %6.2g", r);
    -    }
    -    qh_fprintf(qh ferr, 8023, "\n");
    -  }
    -  return True;
    -LABELerroroutside:
    -  feasiblep= feasible;
    -  normp= normal;
    -  qh_fprintf(qh ferr, 6023, "qhull input error: feasible point is not clearly inside halfspace\nfeasible point: ");
    -  for (k=dim; k--; )
    -    qh_fprintf(qh ferr, 8024, qh_REAL_1, r=*(feasiblep++));
    -  qh_fprintf(qh ferr, 8025, "\n     halfspace: ");
    -  for (k=dim; k--; )
    -    qh_fprintf(qh ferr, 8026, qh_REAL_1, r=*(normp++));
    -  qh_fprintf(qh ferr, 8027, "\n     at offset: ");
    -  qh_fprintf(qh ferr, 8028, qh_REAL_1, *offset);
    -  qh_fprintf(qh ferr, 8029, " and distance: ");
    -  qh_fprintf(qh ferr, 8030, qh_REAL_1, dist);
    -  qh_fprintf(qh ferr, 8031, "\n");
    -  return False;
    -} /* sethalfspace */
    -
    -/*---------------------------------
    -
    -  qh_sethalfspace_all( dim, count, halfspaces, feasible )
    -    generate dual for halfspace intersection with feasible point
    -    array of count halfspaces
    -      each halfspace is normal coefficients followed by offset
    -      the origin is inside the halfspace if the offset is negative
    -
    -  returns:
    -    malloc'd array of count X dim-1 points
    -
    -  notes:
    -    call before qh_init_B or qh_initqhull_globals
    -    unused/untested code: please email bradb@shore.net if this works ok for you
    -    If using option 'Fp', also set qh feasible_point. It is a malloc'd array
    -      that is freed by qh_freebuffers.
    -
    -  design:
    -    see qh_sethalfspace
    -*/
    -coordT *qh_sethalfspace_all(int dim, int count, coordT *halfspaces, pointT *feasible) {
    -  int i, newdim;
    -  pointT *newpoints;
    -  coordT *coordp, *normalp, *offsetp;
    -
    -  trace0((qh ferr, 12, "qh_sethalfspace_all: compute dual for halfspace intersection\n"));
    -  newdim= dim - 1;
    -  if (!(newpoints=(coordT*)qh_malloc(count*newdim*sizeof(coordT)))){
    -    qh_fprintf(qh ferr, 6024, "qhull error: insufficient memory to compute dual of %d halfspaces\n",
    -          count);
    -    qh_errexit(qh_ERRmem, NULL, NULL);
    -  }
    -  coordp= newpoints;
    -  normalp= halfspaces;
    -  for (i=0; i < count; i++) {
    -    offsetp= normalp + newdim;
    -    if (!qh_sethalfspace(newdim, coordp, &coordp, normalp, offsetp, feasible)) {
    -      qh_fprintf(qh ferr, 8032, "The halfspace was at index %d\n", i);
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    normalp= offsetp + 1;
    -  }
    -  return newpoints;
    -} /* sethalfspace_all */
    -
    -
    -/*---------------------------------
    -
    -  qh_sharpnewfacets()
    -
    -  returns:
    -    true if could be an acute angle (facets in different quadrants)
    -
    -  notes:
    -    for qh_findbest
    -
    -  design:
    -    for all facets on qh.newfacet_list
    -      if two facets are in different quadrants
    -        set issharp
    -*/
    -boolT qh_sharpnewfacets() {
    -  facetT *facet;
    -  boolT issharp = False;
    -  int *quadrant, k;
    -
    -  quadrant= (int*)qh_memalloc(qh hull_dim * sizeof(int));
    -  FORALLfacet_(qh newfacet_list) {
    -    if (facet == qh newfacet_list) {
    -      for (k=qh hull_dim; k--; )
    -        quadrant[ k]= (facet->normal[ k] > 0);
    -    }else {
    -      for (k=qh hull_dim; k--; ) {
    -        if (quadrant[ k] != (facet->normal[ k] > 0)) {
    -          issharp= True;
    -          break;
    -        }
    -      }
    -    }
    -    if (issharp)
    -      break;
    -  }
    -  qh_memfree( quadrant, qh hull_dim * sizeof(int));
    -  trace3((qh ferr, 3001, "qh_sharpnewfacets: %d\n", issharp));
    -  return issharp;
    -} /* sharpnewfacets */
    -
    -/*---------------------------------
    -
    -  qh_voronoi_center( dim, points )
    -    return Voronoi center for a set of points
    -    dim is the orginal dimension of the points
    -    gh.gm_matrix/qh.gm_row are scratch buffers
    -
    -  returns:
    -    center as a temporary point
    -    if non-simplicial,
    -      returns center for max simplex of points
    -
    -  notes:
    -    from Bowyer & Woodwark, A Programmer's Geometry, 1983, p. 65
    -
    -  design:
    -    if non-simplicial
    -      determine max simplex for points
    -    translate point0 of simplex to origin
    -    compute sum of squares of diagonal
    -    compute determinate
    -    compute Voronoi center (see Bowyer & Woodwark)
    -*/
    -pointT *qh_voronoi_center(int dim, setT *points) {
    -  pointT *point, **pointp, *point0;
    -  pointT *center= (pointT*)qh_memalloc(qh center_size);
    -  setT *simplex;
    -  int i, j, k, size= qh_setsize(points);
    -  coordT *gmcoord;
    -  realT *diffp, sum2, *sum2row, *sum2p, det, factor;
    -  boolT nearzero, infinite;
    -
    -  if (size == dim+1)
    -    simplex= points;
    -  else if (size < dim+1) {
    -    qh_fprintf(qh ferr, 6025, "qhull internal error (qh_voronoi_center):\n  need at least %d points to construct a Voronoi center\n",
    -             dim+1);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -    simplex= points;  /* never executed -- avoids warning */
    -  }else {
    -    simplex= qh_settemp(dim+1);
    -    qh_maxsimplex(dim, points, NULL, 0, &simplex);
    -  }
    -  point0= SETfirstt_(simplex, pointT);
    -  gmcoord= qh gm_matrix;
    -  for (k=0; k < dim; k++) {
    -    qh gm_row[k]= gmcoord;
    -    FOREACHpoint_(simplex) {
    -      if (point != point0)
    -        *(gmcoord++)= point[k] - point0[k];
    -    }
    -  }
    -  sum2row= gmcoord;
    -  for (i=0; i < dim; i++) {
    -    sum2= 0.0;
    -    for (k=0; k < dim; k++) {
    -      diffp= qh gm_row[k] + i;
    -      sum2 += *diffp * *diffp;
    -    }
    -    *(gmcoord++)= sum2;
    -  }
    -  det= qh_determinant(qh gm_row, dim, &nearzero);
    -  factor= qh_divzero(0.5, det, qh MINdenom, &infinite);
    -  if (infinite) {
    -    for (k=dim; k--; )
    -      center[k]= qh_INFINITE;
    -    if (qh IStracing)
    -      qh_printpoints(qh ferr, "qh_voronoi_center: at infinity for ", simplex);
    -  }else {
    -    for (i=0; i < dim; i++) {
    -      gmcoord= qh gm_matrix;
    -      sum2p= sum2row;
    -      for (k=0; k < dim; k++) {
    -        qh gm_row[k]= gmcoord;
    -        if (k == i) {
    -          for (j=dim; j--; )
    -            *(gmcoord++)= *sum2p++;
    -        }else {
    -          FOREACHpoint_(simplex) {
    -            if (point != point0)
    -              *(gmcoord++)= point[k] - point0[k];
    -          }
    -        }
    -      }
    -      center[i]= qh_determinant(qh gm_row, dim, &nearzero)*factor + point0[i];
    -    }
    -#ifndef qh_NOtrace
    -    if (qh IStracing >= 3) {
    -      qh_fprintf(qh ferr, 8033, "qh_voronoi_center: det %2.2g factor %2.2g ", det, factor);
    -      qh_printmatrix(qh ferr, "center:", ¢er, 1, dim);
    -      if (qh IStracing >= 5) {
    -        qh_printpoints(qh ferr, "points", simplex);
    -        FOREACHpoint_(simplex)
    -          qh_fprintf(qh ferr, 8034, "p%d dist %.2g, ", qh_pointid(point),
    -                   qh_pointdist(point, center, dim));
    -        qh_fprintf(qh ferr, 8035, "\n");
    -      }
    -    }
    -#endif
    -  }
    -  if (simplex != points)
    -    qh_settempfree(&simplex);
    -  return center;
    -} /* voronoi_center */
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/global.c b/scipy-0.10.1/scipy/spatial/qhull/src/global.c
    deleted file mode 100644
    index 3c6141a6a0..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/global.c
    +++ /dev/null
    @@ -1,2124 +0,0 @@
    -
    -/*
      ---------------------------------
    -
    -   global.c
    -   initializes all the globals of the qhull application
    -
    -   see README
    -
    -   see libqhull.h for qh.globals and function prototypes
    -
    -   see qhull_a.h for internal functions
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/global.c#60 $$Change: 1183 $
    -   $DateTime: 2010/01/13 20:59:32 $$Author: bbarber $
    - */
    -
    -#include "qhull_a.h"
    -
    -/*========= qh definition -- globals defined in libqhull.h =======================*/
    -
    -int qhull_inuse= 0; /* not used */
    -
    -#if qh_QHpointer
    -qhT *qh_qh= NULL;       /* pointer to all global variables */
    -#else
    -qhT qh_qh;              /* all global variables.
    -                           Add "= {0}" if this causes a compiler error.
    -                           Also qh_qhstat in stat.c and qhmem in mem.c.  */
    -#endif
    -
    -/*----------------------------------
    -
    -  qh_version
    -    version string by year and date
    -
    -    the revision increases on code changes only
    -
    -  notes:
    -    change date:    Changes.txt, Announce.txt, index.htm, README.txt,
    -                    qhull-news.html, Eudora signatures,
    -    change version: README.txt, qh-get.htm, File_id.diz, Makefile.txt
    -    change year:    Copying.txt
    -    check download size
    -    recompile user_eg.c, rbox.c, libqhull.c, qconvex.c, qdelaun.c qvoronoi.c, qhalf.c
    -*/
    -
    -const char *qh_version = "2010.1 2010/01/14";
    -
    -/*---------------------------------
    -
    -  qh_appendprint( printFormat )
    -    append printFormat to qh.PRINTout unless already defined
    -*/
    -void qh_appendprint(qh_PRINT format) {
    -  int i;
    -
    -  for (i=0; i < qh_PRINTEND; i++) {
    -    if (qh PRINTout[i] == format && format != qh_PRINTqhull)
    -      break;
    -    if (!qh PRINTout[i]) {
    -      qh PRINTout[i]= format;
    -      break;
    -    }
    -  }
    -} /* appendprint */
    -
    -/*---------------------------------
    -
    -  qh_checkflags( commandStr, hiddenFlags )
    -    errors if commandStr contains hiddenFlags
    -    hiddenFlags starts and ends with a space and is space deliminated (checked)
    -
    -  notes:
    -    ignores first word (e.g., "qconvex i")
    -    use qh_strtol/strtod since strtol/strtod may or may not skip trailing spaces
    -
    -  see:
    -    qh_initflags() initializes Qhull according to commandStr
    -*/
    -void qh_checkflags(char *command, char *hiddenflags) {
    -  char *s= command, *t, *chkerr; /* qh_skipfilename is non-const */
    -  char key, opt, prevopt;
    -  char chkkey[]= "   ";
    -  char chkopt[]=  "    ";
    -  char chkopt2[]= "     ";
    -  boolT waserr= False;
    -
    -  if (*hiddenflags != ' ' || hiddenflags[strlen(hiddenflags)-1] != ' ') {
    -    qh_fprintf(qh ferr, 6026, "qhull error (qh_checkflags): hiddenflags must start and end with a space: \"%s\"", hiddenflags);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (strpbrk(hiddenflags, ",\n\r\t")) {
    -    qh_fprintf(qh ferr, 6027, "qhull error (qh_checkflags): hiddenflags contains commas, newlines, or tabs: \"%s\"", hiddenflags);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  while (*s && !isspace(*s))  /* skip program name */
    -    s++;
    -  while (*s) {
    -    while (*s && isspace(*s))
    -      s++;
    -    if (*s == '-')
    -      s++;
    -    if (!*s)
    -      break;
    -    key = *s++;
    -    chkerr = NULL;
    -    if (key == 'T' && (*s == 'I' || *s == 'O')) {  /* TI or TO 'file name' */
    -      s= qh_skipfilename(++s);
    -      continue;
    -    }
    -    chkkey[1]= key;
    -    if (strstr(hiddenflags, chkkey)) {
    -      chkerr= chkkey;
    -    }else if (isupper(key)) {
    -      opt= ' ';
    -      prevopt= ' ';
    -      chkopt[1]= key;
    -      chkopt2[1]= key;
    -      while (!chkerr && *s && !isspace(*s)) {
    -        opt= *s++;
    -        if (isalpha(opt)) {
    -          chkopt[2]= opt;
    -          if (strstr(hiddenflags, chkopt))
    -            chkerr= chkopt;
    -          if (prevopt != ' ') {
    -            chkopt2[2]= prevopt;
    -            chkopt2[3]= opt;
    -            if (strstr(hiddenflags, chkopt2))
    -              chkerr= chkopt2;
    -          }
    -        }else if (key == 'Q' && isdigit(opt) && prevopt != 'b'
    -              && (prevopt == ' ' || islower(prevopt))) {
    -            chkopt[2]= opt;
    -            if (strstr(hiddenflags, chkopt))
    -              chkerr= chkopt;
    -        }else {
    -          qh_strtod(s-1, &t);
    -          if (s < t)
    -            s= t;
    -        }
    -        prevopt= opt;
    -      }
    -    }
    -    if (chkerr) {
    -      *chkerr= '\'';
    -      chkerr[strlen(chkerr)-1]=  '\'';
    -      qh_fprintf(qh ferr, 6029, "qhull error: option %s is not used with this program.\n             It may be used with qhull.\n", chkerr);
    -      waserr= True;
    -    }
    -  }
    -  if (waserr)
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -} /* checkflags */
    -
    -/*---------------------------------
    -
    -  qh_clear_outputflags()
    -    Clear output flags for QhullPoints
    -*/
    -void qh_clear_outputflags(void) {
    -  int i,k;
    -
    -  qh ANNOTATEoutput= False;
    -  qh DOintersections= False;
    -  qh DROPdim= -1;
    -  qh FORCEoutput= False;
    -  qh GETarea= False;
    -  qh GOODpoint= 0;
    -  qh GOODpointp= NULL;
    -  qh GOODthreshold= False;
    -  qh GOODvertex= 0;
    -  qh GOODvertexp= NULL;
    -  qh IStracing= 0;
    -  qh KEEParea= False;
    -  qh KEEPmerge= False;
    -  qh KEEPminArea= REALmax;
    -  qh PRINTcentrums= False;
    -  qh PRINTcoplanar= False;
    -  qh PRINTdots= False;
    -  qh PRINTgood= False;
    -  qh PRINTinner= False;
    -  qh PRINTneighbors= False;
    -  qh PRINTnoplanes= False;
    -  qh PRINToptions1st= False;
    -  qh PRINTouter= False;
    -  qh PRINTprecision= True;
    -  qh PRINTridges= False;
    -  qh PRINTspheres= False;
    -  qh PRINTstatistics= False;
    -  qh PRINTsummary= False;
    -  qh PRINTtransparent= False;
    -  qh SPLITthresholds= False;
    -  qh TRACElevel= 0;
    -  qh TRInormals= False;
    -  qh USEstdout= False;
    -  qh VERIFYoutput= False;
    -  for (k=qh input_dim+1; k--; ) {  /* duplicated in qh_initqhull_buffers and qh_clear_ouputflags */
    -    qh lower_threshold[k]= -REALmax;
    -    qh upper_threshold[k]= REALmax;
    -    qh lower_bound[k]= -REALmax;
    -    qh upper_bound[k]= REALmax;
    -  }
    -
    -  for (i=0; i < qh_PRINTEND; i++) {
    -    qh PRINTout[i]= qh_PRINTnone;
    -  }
    -
    -  if (!qh qhull_commandsiz2)
    -      qh qhull_commandsiz2= (int)strlen(qh qhull_command); /* WARN64 */
    -  else {
    -      qh qhull_command[qh qhull_commandsiz2]= '\0';
    -  }
    -  if (!qh qhull_optionsiz2)
    -    qh qhull_optionsiz2= (int)strlen(qh qhull_options);  /* WARN64 */
    -  else {
    -    qh qhull_options[qh qhull_optionsiz2]= '\0';
    -    qh qhull_optionlen= qh_OPTIONline;  /* start a new line */
    -  }
    -} /* clear_outputflags */
    -
    -/*---------------------------------
    -
    -  qh_clock()
    -    return user CPU time in 100ths (qh_SECtick)
    -    only defined for qh_CLOCKtype == 2
    -
    -  notes:
    -    use first value to determine time 0
    -    from Stevens '92 8.15
    -*/
    -unsigned long qh_clock(void) {
    -
    -#if (qh_CLOCKtype == 2)
    -  struct tms time;
    -  static long clktck;  /* initialized first call */
    -  double ratio, cpu;
    -  unsigned long ticks;
    -
    -  if (!clktck) {
    -    if ((clktck= sysconf(_SC_CLK_TCK)) < 0) {
    -      qh_fprintf(qh ferr, 6030, "qhull internal error (qh_clock): sysconf() failed.  Use qh_CLOCKtype 1 in user.h\n");
    -      qh_errexit(qh_ERRqhull, NULL, NULL);
    -    }
    -  }
    -  if (times(&time) == -1) {
    -    qh_fprintf(qh ferr, 6031, "qhull internal error (qh_clock): times() failed.  Use qh_CLOCKtype 1 in user.h\n");
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  ratio= qh_SECticks / (double)clktck;
    -  ticks= time.tms_utime * ratio;
    -  return ticks;
    -#else
    -  qh_fprintf(qh ferr, 6032, "qhull internal error (qh_clock): use qh_CLOCKtype 2 in user.h\n");
    -  qh_errexit(qh_ERRqhull, NULL, NULL); /* never returns */
    -  return 0;
    -#endif
    -} /* clock */
    -
    -/*---------------------------------
    -
    -  qh_freebuffers()
    -    free up global memory buffers
    -
    -  notes:
    -    must match qh_initbuffers()
    -*/
    -void qh_freebuffers(void) {
    -
    -  trace5((qh ferr, 5001, "qh_freebuffers: freeing up global memory buffers\n"));
    -  /* allocated by qh_initqhull_buffers */
    -  qh_memfree(qh NEARzero, qh hull_dim * sizeof(realT));
    -  qh_memfree(qh lower_threshold, (qh input_dim+1) * sizeof(realT));
    -  qh_memfree(qh upper_threshold, (qh input_dim+1) * sizeof(realT));
    -  qh_memfree(qh lower_bound, (qh input_dim+1) * sizeof(realT));
    -  qh_memfree(qh upper_bound, (qh input_dim+1) * sizeof(realT));
    -  qh_memfree(qh gm_matrix, (qh hull_dim+1) * qh hull_dim * sizeof(coordT));
    -  qh_memfree(qh gm_row, (qh hull_dim+1) * sizeof(coordT *));
    -  qh NEARzero= qh lower_threshold= qh upper_threshold= NULL;
    -  qh lower_bound= qh upper_bound= NULL;
    -  qh gm_matrix= NULL;
    -  qh gm_row= NULL;
    -  qh_setfree(&qh other_points);
    -  qh_setfree(&qh del_vertices);
    -  qh_setfree(&qh coplanarfacetset);
    -  if (qh line)                /* allocated by qh_readinput, freed if no error */
    -    qh_free(qh line);
    -  if (qh half_space)
    -    qh_free(qh half_space);
    -  if (qh temp_malloc)
    -    qh_free(qh temp_malloc);
    -  if (qh feasible_point)      /* allocated by qh_readfeasible */
    -    qh_free(qh feasible_point);
    -  if (qh feasible_string)     /* allocated by qh_initflags */
    -    qh_free(qh feasible_string);
    -  qh line= qh feasible_string= NULL;
    -  qh half_space= qh feasible_point= qh temp_malloc= NULL;
    -  /* usually allocated by qh_readinput */
    -  if (qh first_point && qh POINTSmalloc) {
    -    qh_free(qh first_point);
    -    qh first_point= NULL;
    -  }
    -  if (qh input_points && qh input_malloc) { /* set by qh_joggleinput */
    -    qh_free(qh input_points);
    -    qh input_points= NULL;
    -  }
    -  trace5((qh ferr, 5002, "qh_freebuffers: finished\n"));
    -} /* freebuffers */
    -
    -
    -/*---------------------------------
    -
    -  qh_freebuild( allmem )
    -    free global memory used by qh_initbuild and qh_buildhull
    -    if !allmem,
    -      does not free short memory (e.g., facetT, freed by qh_memfreeshort)
    -
    -  design:
    -    free centrums
    -    free each vertex
    -    mark unattached ridges
    -    for each facet
    -      free ridges
    -      free outside set, coplanar set, neighbor set, ridge set, vertex set
    -      free facet
    -    free hash table
    -    free interior point
    -    free merge set
    -    free temporary sets
    -*/
    -void qh_freebuild(boolT allmem) {
    -  facetT *facet;
    -  vertexT *vertex;
    -  ridgeT *ridge, **ridgep;
    -  mergeT *merge, **mergep;
    -
    -  trace1((qh ferr, 1005, "qh_freebuild: free memory from qh_inithull and qh_buildhull\n"));
    -  if (qh del_vertices)
    -    qh_settruncate(qh del_vertices, 0);
    -  if (allmem) {
    -    while ((vertex= qh vertex_list)) {
    -      if (vertex->next)
    -        qh_delvertex(vertex);
    -      else {
    -        qh_memfree(vertex, (int)sizeof(vertexT));
    -        qh newvertex_list= qh vertex_list= NULL;
    -      }
    -    }
    -  }else if (qh VERTEXneighbors) {
    -    FORALLvertices
    -      qh_setfreelong(&(vertex->neighbors));
    -  }
    -  qh VERTEXneighbors= False;
    -  qh GOODclosest= NULL;
    -  if (allmem) {
    -    FORALLfacets {
    -      FOREACHridge_(facet->ridges)
    -        ridge->seen= False;
    -    }
    -    FORALLfacets {
    -      if (facet->visible) {
    -        FOREACHridge_(facet->ridges) {
    -          if (!otherfacet_(ridge, facet)->visible)
    -            ridge->seen= True;  /* an unattached ridge */
    -        }
    -      }
    -    }
    -    while ((facet= qh facet_list)) {
    -      FOREACHridge_(facet->ridges) {
    -        if (ridge->seen) {
    -          qh_setfree(&(ridge->vertices));
    -          qh_memfree(ridge, (int)sizeof(ridgeT));
    -        }else
    -          ridge->seen= True;
    -      }
    -      qh_setfree(&(facet->outsideset));
    -      qh_setfree(&(facet->coplanarset));
    -      qh_setfree(&(facet->neighbors));
    -      qh_setfree(&(facet->ridges));
    -      qh_setfree(&(facet->vertices));
    -      if (facet->next)
    -        qh_delfacet(facet);
    -      else {
    -        qh_memfree(facet, (int)sizeof(facetT));
    -        qh visible_list= qh newfacet_list= qh facet_list= NULL;
    -      }
    -    }
    -  }else {
    -    FORALLfacets {
    -      qh_setfreelong(&(facet->outsideset));
    -      qh_setfreelong(&(facet->coplanarset));
    -      if (!facet->simplicial) {
    -        qh_setfreelong(&(facet->neighbors));
    -        qh_setfreelong(&(facet->ridges));
    -        qh_setfreelong(&(facet->vertices));
    -      }
    -    }
    -  }
    -  qh_setfree(&(qh hash_table));
    -  qh_memfree(qh interior_point, qh normal_size);
    -  qh interior_point= NULL;
    -  FOREACHmerge_(qh facet_mergeset)  /* usually empty */
    -    qh_memfree(merge, (int)sizeof(mergeT));
    -  qh facet_mergeset= NULL;  /* temp set */
    -  qh degen_mergeset= NULL;  /* temp set */
    -  qh_settempfree_all();
    -} /* freebuild */
    -
    -/*---------------------------------
    -
    -  qh_freeqhull( allmem )
    -    see qh_freeqhull2
    -    if qh_QHpointer, frees qh_qh
    -*/
    -void qh_freeqhull(boolT allmem) {
    -    qh_freeqhull2(allmem);
    -#if qh_QHpointer
    -    qh_free(qh_qh);
    -    qh_qh= NULL;
    -#endif
    -}
    -
    -/*---------------------------------
    -
    -qh_freeqhull2( allmem )
    -  free global memory
    -  if !allmem,
    -    does not free short memory (freed by qh_memfreeshort)
    -
    -notes:
    -  sets qh.NOerrexit in case caller forgets to
    -
    -see:
    -  see qh_initqhull_start2()
    -
    -design:
    -  free global and temporary memory from qh_initbuild and qh_buildhull
    -  free buffers
    -  free statistics
    -*/
    -void qh_freeqhull2(boolT allmem) {
    -
    -  trace1((qh ferr, 1006, "qh_freeqhull2: free global memory\n"));
    -  qh NOerrexit= True;  /* no more setjmp since called at exit and ~QhullQh */
    -  qh_freebuild(allmem);
    -  qh_freebuffers();
    -  qh_freestatistics();
    -#if qh_QHpointer
    -  memset((char *)qh_qh, 0, sizeof(qhT));
    -  /* qh_qh freed by caller, qh_freeqhull() */
    -#else
    -  memset((char *)&qh_qh, 0, sizeof(qhT));
    -#endif
    -  qh NOerrexit= True;
    -} /* freeqhull2 */
    -
    -/*---------------------------------
    -
    -  qh_init_A( infile, outfile, errfile, argc, argv )
    -    initialize memory and stdio files
    -    convert input options to option string (qh.qhull_command)
    -
    -  notes:
    -    infile may be NULL if qh_readpoints() is not called
    -
    -    errfile should always be defined.  It is used for reporting
    -    errors.  outfile is used for output and format options.
    -
    -    argc/argv may be 0/NULL
    -
    -    called before error handling initialized
    -    qh_errexit() may not be used
    -*/
    -void qh_init_A(FILE *infile, FILE *outfile, FILE *errfile, int argc, char *argv[]) {
    -  qh_meminit(errfile);
    -  qh_initqhull_start(infile, outfile, errfile);
    -  qh_init_qhull_command(argc, argv);
    -} /* init_A */
    -
    -/*---------------------------------
    -
    -  qh_init_B( points, numpoints, dim, ismalloc )
    -    initialize globals for points array
    -
    -    points has numpoints dim-dimensional points
    -      points[0] is the first coordinate of the first point
    -      points[1] is the second coordinate of the first point
    -      points[dim] is the first coordinate of the second point
    -
    -    ismalloc=True
    -      Qhull will call qh_free(points) on exit or input transformation
    -    ismalloc=False
    -      Qhull will allocate a new point array if needed for input transformation
    -
    -    qh.qhull_command
    -      is the option string.
    -      It is defined by qh_init_B(), qh_qhull_command(), or qh_initflags
    -
    -  returns:
    -    if qh.PROJECTinput or (qh.DELAUNAY and qh.PROJECTdelaunay)
    -      projects the input to a new point array
    -
    -        if qh.DELAUNAY,
    -          qh.hull_dim is increased by one
    -        if qh.ATinfinity,
    -          qh_projectinput adds point-at-infinity for Delaunay tri.
    -
    -    if qh.SCALEinput
    -      changes the upper and lower bounds of the input, see qh_scaleinput()
    -
    -    if qh.ROTATEinput
    -      rotates the input by a random rotation, see qh_rotateinput()
    -      if qh.DELAUNAY
    -        rotates about the last coordinate
    -
    -  notes:
    -    called after points are defined
    -    qh_errexit() may be used
    -*/
    -void qh_init_B(coordT *points, int numpoints, int dim, boolT ismalloc) {
    -  qh_initqhull_globals(points, numpoints, dim, ismalloc);
    -  if (qhmem.LASTsize == 0)
    -    qh_initqhull_mem();
    -  /* mem.c and qset.c are initialized */
    -  qh_initqhull_buffers();
    -  qh_initthresholds(qh qhull_command);
    -  if (qh PROJECTinput || (qh DELAUNAY && qh PROJECTdelaunay))
    -    qh_projectinput();
    -  if (qh SCALEinput)
    -    qh_scaleinput();
    -  if (qh ROTATErandom >= 0) {
    -    qh_randommatrix(qh gm_matrix, qh hull_dim, qh gm_row);
    -    if (qh DELAUNAY) {
    -      int k, lastk= qh hull_dim-1;
    -      for (k=0; k < lastk; k++) {
    -        qh gm_row[k][lastk]= 0.0;
    -        qh gm_row[lastk][k]= 0.0;
    -      }
    -      qh gm_row[lastk][lastk]= 1.0;
    -    }
    -    qh_gram_schmidt(qh hull_dim, qh gm_row);
    -    qh_rotateinput(qh gm_row);
    -  }
    -} /* init_B */
    -
    -/*---------------------------------
    -
    -  qh_init_qhull_command( argc, argv )
    -    build qh.qhull_command from argc/argv
    -
    -  returns:
    -    a space-delimited string of options (just as typed)
    -
    -  notes:
    -    makes option string easy to input and output
    -
    -    argc/argv may be 0/NULL
    -*/
    -void qh_init_qhull_command(int argc, char *argv[]) {
    -
    -  if (!qh_argv_to_command(argc, argv, qh qhull_command, (int)sizeof(qh qhull_command))){
    -    qh_exit(qh_ERRinput);  /* error reported, can not use qh_errexit */
    -  }
    -} /* init_qhull_command */
    -
    -/*---------------------------------
    -
    -  qh_initflags( commandStr )
    -    set flags and initialized constants from commandStr
    -
    -  returns:
    -    sets qh.qhull_command to command if needed
    -
    -  notes:
    -    ignores first word (e.g., "qhull d")
    -    use qh_strtol/strtod since strtol/strtod may or may not skip trailing spaces
    -
    -  see:
    -    qh_initthresholds() continues processing of 'Pdn' and 'PDn'
    -    'prompt' in unix.c for documentation
    -
    -  design:
    -    for each space-deliminated option group
    -      if top-level option
    -        check syntax
    -        append approriate option to option string
    -        set appropriate global variable or append printFormat to print options
    -      else
    -        for each sub-option
    -          check syntax
    -          append approriate option to option string
    -          set appropriate global variable or append printFormat to print options
    -*/
    -void qh_initflags(char *command) {
    -  int k, i, lastproject;
    -  char *s= command, *t, *prev_s, *start, key;
    -  boolT isgeom= False, wasproject;
    -  realT r;
    -
    -  if (command <= &qh qhull_command[0] || command > &qh qhull_command[0] + sizeof(qh qhull_command)) {
    -    if (command != &qh qhull_command[0]) {
    -      *qh qhull_command= '\0';
    -      strncat( qh qhull_command, command, sizeof( qh qhull_command));
    -    }
    -    while (*s && !isspace(*s))  /* skip program name */
    -      s++;
    -  }
    -  while (*s) {
    -    while (*s && isspace(*s))
    -      s++;
    -    if (*s == '-')
    -      s++;
    -    if (!*s)
    -      break;
    -    prev_s= s;
    -    switch (*s++) {
    -    case 'd':
    -      qh_option("delaunay", NULL, NULL);
    -      qh DELAUNAY= True;
    -      break;
    -    case 'f':
    -      qh_option("facets", NULL, NULL);
    -      qh_appendprint(qh_PRINTfacets);
    -      break;
    -    case 'i':
    -      qh_option("incidence", NULL, NULL);
    -      qh_appendprint(qh_PRINTincidences);
    -      break;
    -    case 'm':
    -      qh_option("mathematica", NULL, NULL);
    -      qh_appendprint(qh_PRINTmathematica);
    -      break;
    -    case 'n':
    -      qh_option("normals", NULL, NULL);
    -      qh_appendprint(qh_PRINTnormals);
    -      break;
    -    case 'o':
    -      qh_option("offFile", NULL, NULL);
    -      qh_appendprint(qh_PRINToff);
    -      break;
    -    case 'p':
    -      qh_option("points", NULL, NULL);
    -      qh_appendprint(qh_PRINTpoints);
    -      break;
    -    case 's':
    -      qh_option("summary", NULL, NULL);
    -      qh PRINTsummary= True;
    -      break;
    -    case 'v':
    -      qh_option("voronoi", NULL, NULL);
    -      qh VORONOI= True;
    -      qh DELAUNAY= True;
    -      break;
    -    case 'A':
    -      if (!isdigit(*s) && *s != '.' && *s != '-')
    -        qh_fprintf(qh ferr, 7002, "qhull warning: no maximum cosine angle given for option 'An'.  Ignored.\n");
    -      else {
    -        if (*s == '-') {
    -          qh premerge_cos= -qh_strtod(s, &s);
    -          qh_option("Angle-premerge-", NULL, &qh premerge_cos);
    -          qh PREmerge= True;
    -        }else {
    -          qh postmerge_cos= qh_strtod(s, &s);
    -          qh_option("Angle-postmerge", NULL, &qh postmerge_cos);
    -          qh POSTmerge= True;
    -        }
    -        qh MERGING= True;
    -      }
    -      break;
    -    case 'C':
    -      if (!isdigit(*s) && *s != '.' && *s != '-')
    -        qh_fprintf(qh ferr, 7003, "qhull warning: no centrum radius given for option 'Cn'.  Ignored.\n");
    -      else {
    -        if (*s == '-') {
    -          qh premerge_centrum= -qh_strtod(s, &s);
    -          qh_option("Centrum-premerge-", NULL, &qh premerge_centrum);
    -          qh PREmerge= True;
    -        }else {
    -          qh postmerge_centrum= qh_strtod(s, &s);
    -          qh_option("Centrum-postmerge", NULL, &qh postmerge_centrum);
    -          qh POSTmerge= True;
    -        }
    -        qh MERGING= True;
    -      }
    -      break;
    -    case 'E':
    -      if (*s == '-')
    -        qh_fprintf(qh ferr, 7004, "qhull warning: negative maximum roundoff given for option 'An'.  Ignored.\n");
    -      else if (!isdigit(*s))
    -        qh_fprintf(qh ferr, 7005, "qhull warning: no maximum roundoff given for option 'En'.  Ignored.\n");
    -      else {
    -        qh DISTround= qh_strtod(s, &s);
    -        qh_option("Distance-roundoff", NULL, &qh DISTround);
    -        qh SETroundoff= True;
    -      }
    -      break;
    -    case 'H':
    -      start= s;
    -      qh HALFspace= True;
    -      qh_strtod(s, &t);
    -      while (t > s)  {
    -        if (*t && !isspace(*t)) {
    -          if (*t == ',')
    -            t++;
    -          else
    -            qh_fprintf(qh ferr, 7006, "qhull warning: origin for Halfspace intersection should be 'Hn,n,n,...'\n");
    -        }
    -        s= t;
    -        qh_strtod(s, &t);
    -      }
    -      if (start < t) {
    -        if (!(qh feasible_string= (char*)calloc((size_t)(t-start+1), (size_t)1))) {
    -          qh_fprintf(qh ferr, 6034, "qhull error: insufficient memory for 'Hn,n,n'\n");
    -          qh_errexit(qh_ERRmem, NULL, NULL);
    -        }
    -        strncpy(qh feasible_string, start, (size_t)(t-start));
    -        qh_option("Halfspace-about", NULL, NULL);
    -        qh_option(qh feasible_string, NULL, NULL);
    -      }else
    -        qh_option("Halfspace", NULL, NULL);
    -      break;
    -    case 'R':
    -      if (!isdigit(*s))
    -        qh_fprintf(qh ferr, 7007, "qhull warning: missing random perturbation for option 'Rn'.  Ignored\n");
    -      else {
    -        qh RANDOMfactor= qh_strtod(s, &s);
    -        qh_option("Random_perturb", NULL, &qh RANDOMfactor);
    -        qh RANDOMdist= True;
    -      }
    -      break;
    -    case 'V':
    -      if (!isdigit(*s) && *s != '-')
    -        qh_fprintf(qh ferr, 7008, "qhull warning: missing visible distance for option 'Vn'.  Ignored\n");
    -      else {
    -        qh MINvisible= qh_strtod(s, &s);
    -        qh_option("Visible", NULL, &qh MINvisible);
    -      }
    -      break;
    -    case 'U':
    -      if (!isdigit(*s) && *s != '-')
    -        qh_fprintf(qh ferr, 7009, "qhull warning: missing coplanar distance for option 'Un'.  Ignored\n");
    -      else {
    -        qh MAXcoplanar= qh_strtod(s, &s);
    -        qh_option("U-coplanar", NULL, &qh MAXcoplanar);
    -      }
    -      break;
    -    case 'W':
    -      if (*s == '-')
    -        qh_fprintf(qh ferr, 7010, "qhull warning: negative outside width for option 'Wn'.  Ignored.\n");
    -      else if (!isdigit(*s))
    -        qh_fprintf(qh ferr, 7011, "qhull warning: missing outside width for option 'Wn'.  Ignored\n");
    -      else {
    -        qh MINoutside= qh_strtod(s, &s);
    -        qh_option("W-outside", NULL, &qh MINoutside);
    -        qh APPROXhull= True;
    -      }
    -      break;
    -    /************  sub menus ***************/
    -    case 'F':
    -      while (*s && !isspace(*s)) {
    -        switch (*s++) {
    -        case 'a':
    -          qh_option("Farea", NULL, NULL);
    -          qh_appendprint(qh_PRINTarea);
    -          qh GETarea= True;
    -          break;
    -        case 'A':
    -          qh_option("FArea-total", NULL, NULL);
    -          qh GETarea= True;
    -          break;
    -        case 'c':
    -          qh_option("Fcoplanars", NULL, NULL);
    -          qh_appendprint(qh_PRINTcoplanars);
    -          break;
    -        case 'C':
    -          qh_option("FCentrums", NULL, NULL);
    -          qh_appendprint(qh_PRINTcentrums);
    -          break;
    -        case 'd':
    -          qh_option("Fd-cdd-in", NULL, NULL);
    -          qh CDDinput= True;
    -          break;
    -        case 'D':
    -          qh_option("FD-cdd-out", NULL, NULL);
    -          qh CDDoutput= True;
    -          break;
    -        case 'F':
    -          qh_option("FFacets-xridge", NULL, NULL);
    -          qh_appendprint(qh_PRINTfacets_xridge);
    -          break;
    -        case 'i':
    -          qh_option("Finner", NULL, NULL);
    -          qh_appendprint(qh_PRINTinner);
    -          break;
    -        case 'I':
    -          qh_option("FIDs", NULL, NULL);
    -          qh_appendprint(qh_PRINTids);
    -          break;
    -        case 'm':
    -          qh_option("Fmerges", NULL, NULL);
    -          qh_appendprint(qh_PRINTmerges);
    -          break;
    -        case 'M':
    -          qh_option("FMaple", NULL, NULL);
    -          qh_appendprint(qh_PRINTmaple);
    -          break;
    -        case 'n':
    -          qh_option("Fneighbors", NULL, NULL);
    -          qh_appendprint(qh_PRINTneighbors);
    -          break;
    -        case 'N':
    -          qh_option("FNeighbors-vertex", NULL, NULL);
    -          qh_appendprint(qh_PRINTvneighbors);
    -          break;
    -        case 'o':
    -          qh_option("Fouter", NULL, NULL);
    -          qh_appendprint(qh_PRINTouter);
    -          break;
    -        case 'O':
    -          if (qh PRINToptions1st) {
    -            qh_option("FOptions", NULL, NULL);
    -            qh_appendprint(qh_PRINToptions);
    -          }else
    -            qh PRINToptions1st= True;
    -          break;
    -        case 'p':
    -          qh_option("Fpoint-intersect", NULL, NULL);
    -          qh_appendprint(qh_PRINTpointintersect);
    -          break;
    -        case 'P':
    -          qh_option("FPoint-nearest", NULL, NULL);
    -          qh_appendprint(qh_PRINTpointnearest);
    -          break;
    -        case 'Q':
    -          qh_option("FQhull", NULL, NULL);
    -          qh_appendprint(qh_PRINTqhull);
    -          break;
    -        case 's':
    -          qh_option("Fsummary", NULL, NULL);
    -          qh_appendprint(qh_PRINTsummary);
    -          break;
    -        case 'S':
    -          qh_option("FSize", NULL, NULL);
    -          qh_appendprint(qh_PRINTsize);
    -          qh GETarea= True;
    -          break;
    -        case 't':
    -          qh_option("Ftriangles", NULL, NULL);
    -          qh_appendprint(qh_PRINTtriangles);
    -          break;
    -        case 'v':
    -          /* option set in qh_initqhull_globals */
    -          qh_appendprint(qh_PRINTvertices);
    -          break;
    -        case 'V':
    -          qh_option("FVertex-average", NULL, NULL);
    -          qh_appendprint(qh_PRINTaverage);
    -          break;
    -        case 'x':
    -          qh_option("Fxtremes", NULL, NULL);
    -          qh_appendprint(qh_PRINTextremes);
    -          break;
    -        default:
    -          s--;
    -          qh_fprintf(qh ferr, 7012, "qhull warning: unknown 'F' output option %c, rest ignored\n", (int)s[0]);
    -          while (*++s && !isspace(*s));
    -          break;
    -        }
    -      }
    -      break;
    -    case 'G':
    -      isgeom= True;
    -      qh_appendprint(qh_PRINTgeom);
    -      while (*s && !isspace(*s)) {
    -        switch (*s++) {
    -        case 'a':
    -          qh_option("Gall-points", NULL, NULL);
    -          qh PRINTdots= True;
    -          break;
    -        case 'c':
    -          qh_option("Gcentrums", NULL, NULL);
    -          qh PRINTcentrums= True;
    -          break;
    -        case 'h':
    -          qh_option("Gintersections", NULL, NULL);
    -          qh DOintersections= True;
    -          break;
    -        case 'i':
    -          qh_option("Ginner", NULL, NULL);
    -          qh PRINTinner= True;
    -          break;
    -        case 'n':
    -          qh_option("Gno-planes", NULL, NULL);
    -          qh PRINTnoplanes= True;
    -          break;
    -        case 'o':
    -          qh_option("Gouter", NULL, NULL);
    -          qh PRINTouter= True;
    -          break;
    -        case 'p':
    -          qh_option("Gpoints", NULL, NULL);
    -          qh PRINTcoplanar= True;
    -          break;
    -        case 'r':
    -          qh_option("Gridges", NULL, NULL);
    -          qh PRINTridges= True;
    -          break;
    -        case 't':
    -          qh_option("Gtransparent", NULL, NULL);
    -          qh PRINTtransparent= True;
    -          break;
    -        case 'v':
    -          qh_option("Gvertices", NULL, NULL);
    -          qh PRINTspheres= True;
    -          break;
    -        case 'D':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 6035, "qhull input error: missing dimension for option 'GDn'\n");
    -          else {
    -            if (qh DROPdim >= 0)
    -              qh_fprintf(qh ferr, 7013, "qhull warning: can only drop one dimension.  Previous 'GD%d' ignored\n",
    -                   qh DROPdim);
    -            qh DROPdim= qh_strtol(s, &s);
    -            qh_option("GDrop-dim", &qh DROPdim, NULL);
    -          }
    -          break;
    -        default:
    -          s--;
    -          qh_fprintf(qh ferr, 7014, "qhull warning: unknown 'G' print option %c, rest ignored\n", (int)s[0]);
    -          while (*++s && !isspace(*s));
    -          break;
    -        }
    -      }
    -      break;
    -    case 'P':
    -      while (*s && !isspace(*s)) {
    -        switch (*s++) {
    -        case 'd': case 'D':  /* see qh_initthresholds() */
    -          key= s[-1];
    -          i= qh_strtol(s, &s);
    -          r= 0;
    -          if (*s == ':') {
    -            s++;
    -            r= qh_strtod(s, &s);
    -          }
    -          if (key == 'd')
    -            qh_option("Pdrop-facets-dim-less", &i, &r);
    -          else
    -            qh_option("PDrop-facets-dim-more", &i, &r);
    -          break;
    -        case 'g':
    -          qh_option("Pgood-facets", NULL, NULL);
    -          qh PRINTgood= True;
    -          break;
    -        case 'G':
    -          qh_option("PGood-facet-neighbors", NULL, NULL);
    -          qh PRINTneighbors= True;
    -          break;
    -        case 'o':
    -          qh_option("Poutput-forced", NULL, NULL);
    -          qh FORCEoutput= True;
    -          break;
    -        case 'p':
    -          qh_option("Pprecision-ignore", NULL, NULL);
    -          qh PRINTprecision= False;
    -          break;
    -        case 'A':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 6036, "qhull input error: missing facet count for keep area option 'PAn'\n");
    -          else {
    -            qh KEEParea= qh_strtol(s, &s);
    -            qh_option("PArea-keep", &qh KEEParea, NULL);
    -            qh GETarea= True;
    -          }
    -          break;
    -        case 'F':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 6037, "qhull input error: missing facet area for option 'PFn'\n");
    -          else {
    -            qh KEEPminArea= qh_strtod(s, &s);
    -            qh_option("PFacet-area-keep", NULL, &qh KEEPminArea);
    -            qh GETarea= True;
    -          }
    -          break;
    -        case 'M':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 6038, "qhull input error: missing merge count for option 'PMn'\n");
    -          else {
    -            qh KEEPmerge= qh_strtol(s, &s);
    -            qh_option("PMerge-keep", &qh KEEPmerge, NULL);
    -          }
    -          break;
    -        default:
    -          s--;
    -          qh_fprintf(qh ferr, 7015, "qhull warning: unknown 'P' print option %c, rest ignored\n", (int)s[0]);
    -          while (*++s && !isspace(*s));
    -          break;
    -        }
    -      }
    -      break;
    -    case 'Q':
    -      lastproject= -1;
    -      while (*s && !isspace(*s)) {
    -        switch (*s++) {
    -        case 'b': case 'B':  /* handled by qh_initthresholds */
    -          key= s[-1];
    -          if (key == 'b' && *s == 'B') {
    -            s++;
    -            r= qh_DEFAULTbox;
    -            qh SCALEinput= True;
    -            qh_option("QbBound-unit-box", NULL, &r);
    -            break;
    -          }
    -          if (key == 'b' && *s == 'b') {
    -            s++;
    -            qh SCALElast= True;
    -            qh_option("Qbbound-last", NULL, NULL);
    -            break;
    -          }
    -          k= qh_strtol(s, &s);
    -          r= 0.0;
    -          wasproject= False;
    -          if (*s == ':') {
    -            s++;
    -            if ((r= qh_strtod(s, &s)) == 0.0) {
    -              t= s;            /* need true dimension for memory allocation */
    -              while (*t && !isspace(*t)) {
    -                if (toupper(*t++) == 'B'
    -                 && k == qh_strtol(t, &t)
    -                 && *t++ == ':'
    -                 && qh_strtod(t, &t) == 0.0) {
    -                  qh PROJECTinput++;
    -                  trace2((qh ferr, 2004, "qh_initflags: project dimension %d\n", k));
    -                  qh_option("Qb-project-dim", &k, NULL);
    -                  wasproject= True;
    -                  lastproject= k;
    -                  break;
    -                }
    -              }
    -            }
    -          }
    -          if (!wasproject) {
    -            if (lastproject == k && r == 0.0)
    -              lastproject= -1;  /* doesn't catch all possible sequences */
    -            else if (key == 'b') {
    -              qh SCALEinput= True;
    -              if (r == 0.0)
    -                r= -qh_DEFAULTbox;
    -              qh_option("Qbound-dim-low", &k, &r);
    -            }else {
    -              qh SCALEinput= True;
    -              if (r == 0.0)
    -                r= qh_DEFAULTbox;
    -              qh_option("QBound-dim-high", &k, &r);
    -            }
    -          }
    -          break;
    -        case 'c':
    -          qh_option("Qcoplanar-keep", NULL, NULL);
    -          qh KEEPcoplanar= True;
    -          break;
    -        case 'f':
    -          qh_option("Qfurthest-outside", NULL, NULL);
    -          qh BESToutside= True;
    -          break;
    -        case 'g':
    -          qh_option("Qgood-facets-only", NULL, NULL);
    -          qh ONLYgood= True;
    -          break;
    -        case 'i':
    -          qh_option("Qinterior-keep", NULL, NULL);
    -          qh KEEPinside= True;
    -          break;
    -        case 'm':
    -          qh_option("Qmax-outside-only", NULL, NULL);
    -          qh ONLYmax= True;
    -          break;
    -        case 'r':
    -          qh_option("Qrandom-outside", NULL, NULL);
    -          qh RANDOMoutside= True;
    -          break;
    -        case 's':
    -          qh_option("Qsearch-initial-simplex", NULL, NULL);
    -          qh ALLpoints= True;
    -          break;
    -        case 't':
    -          qh_option("Qtriangulate", NULL, NULL);
    -          qh TRIangulate= True;
    -          break;
    -        case 'T':
    -          qh_option("QTestPoints", NULL, NULL);
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 6039, "qhull input error: missing number of test points for option 'QTn'\n");
    -          else {
    -            qh TESTpoints= qh_strtol(s, &s);
    -            qh_option("QTestPoints", &qh TESTpoints, NULL);
    -          }
    -          break;
    -        case 'u':
    -          qh_option("QupperDelaunay", NULL, NULL);
    -          qh UPPERdelaunay= True;
    -          break;
    -        case 'v':
    -          qh_option("Qvertex-neighbors-convex", NULL, NULL);
    -          qh TESTvneighbors= True;
    -          break;
    -        case 'x':
    -          qh_option("Qxact-merge", NULL, NULL);
    -          qh MERGEexact= True;
    -          break;
    -        case 'z':
    -          qh_option("Qz-infinity-point", NULL, NULL);
    -          qh ATinfinity= True;
    -          break;
    -        case '0':
    -          qh_option("Q0-no-premerge", NULL, NULL);
    -          qh NOpremerge= True;
    -          break;
    -        case '1':
    -          if (!isdigit(*s)) {
    -            qh_option("Q1-no-angle-sort", NULL, NULL);
    -            qh ANGLEmerge= False;
    -            break;
    -          }
    -          switch (*s++) {
    -          case '0':
    -            qh_option("Q10-no-narrow", NULL, NULL);
    -            qh NOnarrow= True;
    -            break;
    -          case '1':
    -            qh_option("Q11-trinormals Qtriangulate", NULL, NULL);
    -            qh TRInormals= True;
    -            qh TRIangulate= True;
    -            break;
    -          default:
    -            s--;
    -            qh_fprintf(qh ferr, 7016, "qhull warning: unknown 'Q' qhull option 1%c, rest ignored\n", (int)s[0]);
    -            while (*++s && !isspace(*s));
    -            break;
    -          }
    -          break;
    -        case '2':
    -          qh_option("Q2-no-merge-independent", NULL, NULL);
    -          qh MERGEindependent= False;
    -          goto LABELcheckdigit;
    -          break; /* no warnings */
    -        case '3':
    -          qh_option("Q3-no-merge-vertices", NULL, NULL);
    -          qh MERGEvertices= False;
    -        LABELcheckdigit:
    -          if (isdigit(*s))
    -            qh_fprintf(qh ferr, 7017, "qhull warning: can not follow '1', '2', or '3' with a digit.  '%c' skipped.\n",
    -                     *s++);
    -          break;
    -        case '4':
    -          qh_option("Q4-avoid-old-into-new", NULL, NULL);
    -          qh AVOIDold= True;
    -          break;
    -        case '5':
    -          qh_option("Q5-no-check-outer", NULL, NULL);
    -          qh SKIPcheckmax= True;
    -          break;
    -        case '6':
    -          qh_option("Q6-no-concave-merge", NULL, NULL);
    -          qh SKIPconvex= True;
    -          break;
    -        case '7':
    -          qh_option("Q7-no-breadth-first", NULL, NULL);
    -          qh VIRTUALmemory= True;
    -          break;
    -        case '8':
    -          qh_option("Q8-no-near-inside", NULL, NULL);
    -          qh NOnearinside= True;
    -          break;
    -        case '9':
    -          qh_option("Q9-pick-furthest", NULL, NULL);
    -          qh PICKfurthest= True;
    -          break;
    -        case 'G':
    -          i= qh_strtol(s, &t);
    -          if (qh GOODpoint)
    -            qh_fprintf(qh ferr, 7018, "qhull warning: good point already defined for option 'QGn'.  Ignored\n");
    -          else if (s == t)
    -            qh_fprintf(qh ferr, 7019, "qhull warning: missing good point id for option 'QGn'.  Ignored\n");
    -          else if (i < 0 || *s == '-') {
    -            qh GOODpoint= i-1;
    -            qh_option("QGood-if-dont-see-point", &i, NULL);
    -          }else {
    -            qh GOODpoint= i+1;
    -            qh_option("QGood-if-see-point", &i, NULL);
    -          }
    -          s= t;
    -          break;
    -        case 'J':
    -          if (!isdigit(*s) && *s != '-')
    -            qh JOGGLEmax= 0.0;
    -          else {
    -            qh JOGGLEmax= (realT) qh_strtod(s, &s);
    -            qh_option("QJoggle", NULL, &qh JOGGLEmax);
    -          }
    -          break;
    -        case 'R':
    -          if (!isdigit(*s) && *s != '-')
    -            qh_fprintf(qh ferr, 7020, "qhull warning: missing random seed for option 'QRn'.  Ignored\n");
    -          else {
    -            qh ROTATErandom= i= qh_strtol(s, &s);
    -            if (i > 0)
    -              qh_option("QRotate-id", &i, NULL );
    -            else if (i < -1)
    -              qh_option("QRandom-seed", &i, NULL );
    -          }
    -          break;
    -        case 'V':
    -          i= qh_strtol(s, &t);
    -          if (qh GOODvertex)
    -            qh_fprintf(qh ferr, 7021, "qhull warning: good vertex already defined for option 'QVn'.  Ignored\n");
    -          else if (s == t)
    -            qh_fprintf(qh ferr, 7022, "qhull warning: no good point id given for option 'QVn'.  Ignored\n");
    -          else if (i < 0) {
    -            qh GOODvertex= i - 1;
    -            qh_option("QV-good-facets-not-point", &i, NULL);
    -          }else {
    -            qh_option("QV-good-facets-point", &i, NULL);
    -            qh GOODvertex= i + 1;
    -          }
    -          s= t;
    -          break;
    -        default:
    -          s--;
    -          qh_fprintf(qh ferr, 7023, "qhull warning: unknown 'Q' qhull option %c, rest ignored\n", (int)s[0]);
    -          while (*++s && !isspace(*s));
    -          break;
    -        }
    -      }
    -      break;
    -    case 'T':
    -      while (*s && !isspace(*s)) {
    -        if (isdigit(*s) || *s == '-')
    -          qh IStracing= qh_strtol(s, &s);
    -        else switch (*s++) {
    -        case 'a':
    -          qh_option("Tannotate-output", NULL, NULL);
    -          qh ANNOTATEoutput= True;
    -          break;
    -        case 'c':
    -          qh_option("Tcheck-frequently", NULL, NULL);
    -          qh CHECKfrequently= True;
    -          break;
    -        case 's':
    -          qh_option("Tstatistics", NULL, NULL);
    -          qh PRINTstatistics= True;
    -          break;
    -        case 'v':
    -          qh_option("Tverify", NULL, NULL);
    -          qh VERIFYoutput= True;
    -          break;
    -        case 'z':
    -          if (qh ferr == qh_FILEstderr) {
    -            /* The C++ interface captures the output in qh_fprint_qhull() */
    -            qh_option("Tz-stdout", NULL, NULL);
    -            qh USEstdout= True;
    -          }else if (!qh fout)
    -            qh_fprintf(qh ferr, 7024, "qhull warning: output file undefined(stdout).  Option 'Tz' ignored.\n");
    -          else {
    -            qh_option("Tz-stdout", NULL, NULL);
    -            qh USEstdout= True;
    -            qh ferr= qh fout;
    -            qhmem.ferr= qh fout;
    -          }
    -          break;
    -        case 'C':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 7025, "qhull warning: missing point id for cone for trace option 'TCn'.  Ignored\n");
    -          else {
    -            i= qh_strtol(s, &s);
    -            qh_option("TCone-stop", &i, NULL);
    -            qh STOPcone= i + 1;
    -          }
    -          break;
    -        case 'F':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 7026, "qhull warning: missing frequency count for trace option 'TFn'.  Ignored\n");
    -          else {
    -            qh REPORTfreq= qh_strtol(s, &s);
    -            qh_option("TFacet-log", &qh REPORTfreq, NULL);
    -            qh REPORTfreq2= qh REPORTfreq/2;  /* for tracemerging() */
    -          }
    -          break;
    -        case 'I':
    -          if (!isspace(*s))
    -            qh_fprintf(qh ferr, 7027, "qhull warning: missing space between 'TI' and filename, %s\n", s);
    -          while (isspace(*s))
    -            s++;
    -          t= qh_skipfilename(s);
    -          {
    -            char filename[qh_FILENAMElen];
    -
    -            qh_copyfilename(filename, (int)sizeof(filename), s, (int)(t-s));   /* WARN64 */
    -            s= t;
    -            if (!freopen(filename, "r", stdin)) {
    -              qh_fprintf(qh ferr, 6041, "qhull error: could not open file \"%s\".", filename);
    -              qh_errexit(qh_ERRinput, NULL, NULL);
    -            }else {
    -              qh_option("TInput-file", NULL, NULL);
    -              qh_option(filename, NULL, NULL);
    -            }
    -          }
    -          break;
    -        case 'O':
    -            if (!isspace(*s))
    -                qh_fprintf(qh ferr, 7028, "qhull warning: missing space between 'TO' and filename, %s\n", s);
    -            while (isspace(*s))
    -                s++;
    -            t= qh_skipfilename(s);
    -            {
    -              char filename[qh_FILENAMElen];
    -
    -              qh_copyfilename(filename, (int)sizeof(filename), s, (int)(t-s));  /* WARN64 */
    -              s= t;
    -              if (!freopen(filename, "w", stdout)) {
    -                qh_fprintf(qh ferr, 6044, "qhull error: could not open file \"%s\".", filename);
    -                qh_errexit(qh_ERRinput, NULL, NULL);
    -              }else {
    -                qh_option("TOutput-file", NULL, NULL);
    -              qh_option(filename, NULL, NULL);
    -            }
    -          }
    -          break;
    -        case 'P':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 7029, "qhull warning: missing point id for trace option 'TPn'.  Ignored\n");
    -          else {
    -            qh TRACEpoint= qh_strtol(s, &s);
    -            qh_option("Trace-point", &qh TRACEpoint, NULL);
    -          }
    -          break;
    -        case 'M':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 7030, "qhull warning: missing merge id for trace option 'TMn'.  Ignored\n");
    -          else {
    -            qh TRACEmerge= qh_strtol(s, &s);
    -            qh_option("Trace-merge", &qh TRACEmerge, NULL);
    -          }
    -          break;
    -        case 'R':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 7031, "qhull warning: missing rerun count for trace option 'TRn'.  Ignored\n");
    -          else {
    -            qh RERUN= qh_strtol(s, &s);
    -            qh_option("TRerun", &qh RERUN, NULL);
    -          }
    -          break;
    -        case 'V':
    -          i= qh_strtol(s, &t);
    -          if (s == t)
    -            qh_fprintf(qh ferr, 7032, "qhull warning: missing furthest point id for trace option 'TVn'.  Ignored\n");
    -          else if (i < 0) {
    -            qh STOPpoint= i - 1;
    -            qh_option("TV-stop-before-point", &i, NULL);
    -          }else {
    -            qh STOPpoint= i + 1;
    -            qh_option("TV-stop-after-point", &i, NULL);
    -          }
    -          s= t;
    -          break;
    -        case 'W':
    -          if (!isdigit(*s))
    -            qh_fprintf(qh ferr, 7033, "qhull warning: missing max width for trace option 'TWn'.  Ignored\n");
    -          else {
    -            qh TRACEdist= (realT) qh_strtod(s, &s);
    -            qh_option("TWide-trace", NULL, &qh TRACEdist);
    -          }
    -          break;
    -        default:
    -          s--;
    -          qh_fprintf(qh ferr, 7034, "qhull warning: unknown 'T' trace option %c, rest ignored\n", (int)s[0]);
    -          while (*++s && !isspace(*s));
    -          break;
    -        }
    -      }
    -      break;
    -    default:
    -      qh_fprintf(qh ferr, 7035, "qhull warning: unknown flag %c(%x)\n", (int)s[-1],
    -               (int)s[-1]);
    -      break;
    -    }
    -    if (s-1 == prev_s && *s && !isspace(*s)) {
    -      qh_fprintf(qh ferr, 7036, "qhull warning: missing space after flag %c(%x); reserved for menu. Skipped.\n",
    -               (int)*prev_s, (int)*prev_s);
    -      while (*s && !isspace(*s))
    -        s++;
    -    }
    -  }
    -  if (qh STOPcone && qh JOGGLEmax < REALmax/2)
    -    qh_fprintf(qh ferr, 7078, "qhull warning: 'TCn' (stopCone) ignored when used with 'QJn' (joggle)\n");
    -  if (isgeom && !qh FORCEoutput && qh PRINTout[1])
    -    qh_fprintf(qh ferr, 7037, "qhull warning: additional output formats are not compatible with Geomview\n");
    -  /* set derived values in qh_initqhull_globals */
    -} /* initflags */
    -
    -
    -/*---------------------------------
    -
    -  qh_initqhull_buffers()
    -    initialize global memory buffers
    -
    -  notes:
    -    must match qh_freebuffers()
    -*/
    -void qh_initqhull_buffers(void) {
    -  int k;
    -
    -  qh TEMPsize= (qhmem.LASTsize - sizeof(setT))/SETelemsize;
    -  if (qh TEMPsize <= 0 || qh TEMPsize > qhmem.LASTsize)
    -    qh TEMPsize= 8;  /* e.g., if qh_NOmem */
    -  qh other_points= qh_setnew(qh TEMPsize);
    -  qh del_vertices= qh_setnew(qh TEMPsize);
    -  qh coplanarfacetset= qh_setnew(qh TEMPsize);
    -  qh NEARzero= (realT *)qh_memalloc(qh hull_dim * sizeof(realT));
    -  qh lower_threshold= (realT *)qh_memalloc((qh input_dim+1) * sizeof(realT));
    -  qh upper_threshold= (realT *)qh_memalloc((qh input_dim+1) * sizeof(realT));
    -  qh lower_bound= (realT *)qh_memalloc((qh input_dim+1) * sizeof(realT));
    -  qh upper_bound= (realT *)qh_memalloc((qh input_dim+1) * sizeof(realT));
    -  for (k=qh input_dim+1; k--; ) {  /* duplicated in qh_initqhull_buffers and qh_clear_ouputflags */
    -    qh lower_threshold[k]= -REALmax;
    -    qh upper_threshold[k]= REALmax;
    -    qh lower_bound[k]= -REALmax;
    -    qh upper_bound[k]= REALmax;
    -  }
    -  qh gm_matrix= (coordT *)qh_memalloc((qh hull_dim+1) * qh hull_dim * sizeof(coordT));
    -  qh gm_row= (coordT **)qh_memalloc((qh hull_dim+1) * sizeof(coordT *));
    -} /* initqhull_buffers */
    -
    -/*---------------------------------
    -
    -  qh_initqhull_globals( points, numpoints, dim, ismalloc )
    -    initialize globals
    -    if ismalloc
    -      points were malloc'd and qhull should free at end
    -
    -  returns:
    -    sets qh.first_point, num_points, input_dim, hull_dim and others
    -    seeds random number generator (seed=1 if tracing)
    -    modifies qh.hull_dim if ((qh.DELAUNAY and qh.PROJECTdelaunay) or qh.PROJECTinput)
    -    adjust user flags as needed
    -    also checks DIM3 dependencies and constants
    -
    -  notes:
    -    do not use qh_point() since an input transformation may move them elsewhere
    -
    -  see:
    -    qh_initqhull_start() sets default values for non-zero globals
    -
    -  design:
    -    initialize points array from input arguments
    -    test for qh.ZEROcentrum
    -      (i.e., use opposite vertex instead of cetrum for convexity testing)
    -    initialize qh.CENTERtype, qh.normal_size,
    -      qh.center_size, qh.TRACEpoint/level,
    -    initialize and test random numbers
    -    qh_initqhull_outputflags() -- adjust and test output flags
    -*/
    -void qh_initqhull_globals(coordT *points, int numpoints, int dim, boolT ismalloc) {
    -  int seed, pointsneeded, extra= 0, i, randi, k;
    -  realT randr;
    -  realT factorial;
    -
    -  time_t timedata;
    -
    -  trace0((qh ferr, 13, "qh_initqhull_globals: for %s | %s\n", qh rbox_command,
    -      qh qhull_command));
    -  qh POINTSmalloc= ismalloc;
    -  qh first_point= points;
    -  qh num_points= numpoints;
    -  qh hull_dim= qh input_dim= dim;
    -  if (!qh NOpremerge && !qh MERGEexact && !qh PREmerge && qh JOGGLEmax > REALmax/2) {
    -    qh MERGING= True;
    -    if (qh hull_dim <= 4) {
    -      qh PREmerge= True;
    -      qh_option("_pre-merge", NULL, NULL);
    -    }else {
    -      qh MERGEexact= True;
    -      qh_option("Qxact_merge", NULL, NULL);
    -    }
    -  }else if (qh MERGEexact)
    -    qh MERGING= True;
    -  if (!qh NOpremerge && qh JOGGLEmax > REALmax/2) {
    -#ifdef qh_NOmerge
    -    qh JOGGLEmax= 0.0;
    -#endif
    -  }
    -  if (qh TRIangulate && qh JOGGLEmax < REALmax/2 && qh PRINTprecision)
    -    qh_fprintf(qh ferr, 7038, "qhull warning: joggle('QJ') always produces simplicial output.  Triangulated output('Qt') does nothing.\n");
    -  if (qh JOGGLEmax < REALmax/2 && qh DELAUNAY && !qh SCALEinput && !qh SCALElast) {
    -    qh SCALElast= True;
    -    qh_option("Qbbound-last-qj", NULL, NULL);
    -  }
    -  if (qh MERGING && !qh POSTmerge && qh premerge_cos > REALmax/2
    -  && qh premerge_centrum == 0) {
    -    qh ZEROcentrum= True;
    -    qh ZEROall_ok= True;
    -    qh_option("_zero-centrum", NULL, NULL);
    -  }
    -  if (qh JOGGLEmax < REALmax/2 && REALepsilon > 2e-8 && qh PRINTprecision)
    -    qh_fprintf(qh ferr, 7039, "qhull warning: real epsilon, %2.2g, is probably too large for joggle('QJn')\nRecompile with double precision reals(see user.h).\n",
    -          REALepsilon);
    -#ifdef qh_NOmerge
    -  if (qh MERGING) {
    -    qh_fprintf(qh ferr, 6045, "qhull input error: merging not installed(qh_NOmerge + 'Qx', 'Cn' or 'An')\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -#endif
    -  if (qh DELAUNAY && qh KEEPcoplanar && !qh KEEPinside) {
    -    qh KEEPinside= True;
    -    qh_option("Qinterior-keep", NULL, NULL);
    -  }
    -  if (qh DELAUNAY && qh HALFspace) {
    -    qh_fprintf(qh ferr, 6046, "qhull input error: can not use Delaunay('d') or Voronoi('v') with halfspace intersection('H')\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (!qh DELAUNAY && (qh UPPERdelaunay || qh ATinfinity)) {
    -    qh_fprintf(qh ferr, 6047, "qhull input error: use upper-Delaunay('Qu') or infinity-point('Qz') with Delaunay('d') or Voronoi('v')\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (qh UPPERdelaunay && qh ATinfinity) {
    -    qh_fprintf(qh ferr, 6048, "qhull input error: can not use infinity-point('Qz') with upper-Delaunay('Qu')\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (qh SCALElast && !qh DELAUNAY && qh PRINTprecision)
    -    qh_fprintf(qh ferr, 7040, "qhull input warning: option 'Qbb' (scale-last-coordinate) is normally used with 'd' or 'v'\n");
    -  qh DOcheckmax= (!qh SKIPcheckmax && qh MERGING );
    -  qh KEEPnearinside= (qh DOcheckmax && !(qh KEEPinside && qh KEEPcoplanar)
    -                          && !qh NOnearinside);
    -  if (qh MERGING)
    -    qh CENTERtype= qh_AScentrum;
    -  else if (qh VORONOI)
    -    qh CENTERtype= qh_ASvoronoi;
    -  if (qh TESTvneighbors && !qh MERGING) {
    -    qh_fprintf(qh ferr, 6049, "qhull input error: test vertex neighbors('Qv') needs a merge option\n");
    -    qh_errexit(qh_ERRinput, NULL ,NULL);
    -  }
    -  if (qh PROJECTinput || (qh DELAUNAY && qh PROJECTdelaunay)) {
    -    qh hull_dim -= qh PROJECTinput;
    -    if (qh DELAUNAY) {
    -      qh hull_dim++;
    -      if (qh ATinfinity)
    -        extra= 1;
    -    }
    -  }
    -  if (qh hull_dim <= 1) {
    -    qh_fprintf(qh ferr, 6050, "qhull error: dimension %d must be > 1\n", qh hull_dim);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  for (k=2, factorial=1.0; k < qh hull_dim; k++)
    -    factorial *= k;
    -  qh AREAfactor= 1.0 / factorial;
    -  trace2((qh ferr, 2005, "qh_initqhull_globals: initialize globals.  dim %d numpoints %d malloc? %d projected %d to hull_dim %d\n",
    -        dim, numpoints, ismalloc, qh PROJECTinput, qh hull_dim));
    -  qh normal_size= qh hull_dim * sizeof(coordT);
    -  qh center_size= qh normal_size - sizeof(coordT);
    -  pointsneeded= qh hull_dim+1;
    -  if (qh hull_dim > qh_DIMmergeVertex) {
    -    qh MERGEvertices= False;
    -    qh_option("Q3-no-merge-vertices-dim-high", NULL, NULL);
    -  }
    -  if (qh GOODpoint)
    -    pointsneeded++;
    -#ifdef qh_NOtrace
    -  if (qh IStracing) {
    -    qh_fprintf(qh ferr, 6051, "qhull input error: tracing is not installed(qh_NOtrace in user.h)");
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -#endif
    -  if (qh RERUN > 1) {
    -    qh TRACElastrun= qh IStracing; /* qh_build_withrestart duplicates next conditional */
    -    if (qh IStracing != -1)
    -      qh IStracing= 0;
    -  }else if (qh TRACEpoint != -1 || qh TRACEdist < REALmax/2 || qh TRACEmerge) {
    -    qh TRACElevel= (qh IStracing? qh IStracing : 3);
    -    qh IStracing= 0;
    -  }
    -  if (qh ROTATErandom == 0 || qh ROTATErandom == -1) {
    -    seed= (int)time(&timedata);
    -    if (qh ROTATErandom  == -1) {
    -      seed= -seed;
    -      qh_option("QRandom-seed", &seed, NULL );
    -    }else
    -      qh_option("QRotate-random", &seed, NULL);
    -    qh ROTATErandom= seed;
    -  }
    -  seed= qh ROTATErandom;
    -  if (seed == INT_MIN)    /* default value */
    -    seed= 1;
    -  else if (seed < 0)
    -    seed= -seed;
    -  qh_RANDOMseed_(seed);
    -  randr= 0.0;
    -  for (i=1000; i--; ) {
    -    randi= qh_RANDOMint;
    -    randr += randi;
    -    if (randi > qh_RANDOMmax) {
    -      qh_fprintf(qh ferr, 8036, "\
    -qhull configuration error (qh_RANDOMmax in user.h):\n\
    -   random integer %d > qh_RANDOMmax(%.8g)\n",
    -               randi, qh_RANDOMmax);
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -  }
    -  qh_RANDOMseed_(seed);
    -  randr = randr/1000;
    -  if (randr < qh_RANDOMmax * 0.1
    -  || randr > qh_RANDOMmax * 0.9)
    -    qh_fprintf(qh ferr, 8037, "\
    -qhull configuration warning (qh_RANDOMmax in user.h):\n\
    -   average of 1000 random integers (%.2g) is much different than expected (%.2g).\n\
    -   Is qh_RANDOMmax (%.2g) wrong?\n",
    -             randr, qh_RANDOMmax * 0.5, qh_RANDOMmax);
    -  qh RANDOMa= 2.0 * qh RANDOMfactor/qh_RANDOMmax;
    -  qh RANDOMb= 1.0 - qh RANDOMfactor;
    -  if (qh_HASHfactor < 1.1) {
    -    qh_fprintf(qh ferr, 6052, "qhull internal error (qh_initqhull_globals): qh_HASHfactor %d must be at least 1.1.  Qhull uses linear hash probing\n",
    -      qh_HASHfactor);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  if (numpoints+extra < pointsneeded) {
    -    qh_fprintf(qh ferr, 6214, "qhull input error: not enough points(%d) to construct initial simplex(need %d)\n",
    -            numpoints, pointsneeded);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  qh_initqhull_outputflags();
    -} /* initqhull_globals */
    -
    -/*---------------------------------
    -
    -  qh_initqhull_mem(  )
    -    initialize mem.c for qhull
    -    qh.hull_dim and qh.normal_size determine some of the allocation sizes
    -    if qh.MERGING,
    -      includes ridgeT
    -    calls qh_user_memsizes() to add up to 10 additional sizes for quick allocation
    -      (see numsizes below)
    -
    -  returns:
    -    mem.c already for qh_memalloc/qh_memfree (errors if called beforehand)
    -
    -  notes:
    -    qh_produceoutput() prints memsizes
    -
    -*/
    -void qh_initqhull_mem(void) {
    -  int numsizes;
    -  int i;
    -
    -  numsizes= 8+10;
    -  qh_meminitbuffers(qh IStracing, qh_MEMalign, numsizes,
    -                     qh_MEMbufsize,qh_MEMinitbuf);
    -  qh_memsize((int)sizeof(vertexT));
    -  if (qh MERGING) {
    -    qh_memsize((int)sizeof(ridgeT));
    -    qh_memsize((int)sizeof(mergeT));
    -  }
    -  qh_memsize((int)sizeof(facetT));
    -  i= sizeof(setT) + (qh hull_dim - 1) * SETelemsize;  /* ridge.vertices */
    -  qh_memsize(i);
    -  qh_memsize(qh normal_size);        /* normal */
    -  i += SETelemsize;                 /* facet.vertices, .ridges, .neighbors */
    -  qh_memsize(i);
    -  qh_user_memsizes();
    -  qh_memsetup();
    -} /* initqhull_mem */
    -
    -/*---------------------------------
    -
    -  qh_initqhull_outputflags
    -    initialize flags concerned with output
    -
    -  returns:
    -    adjust user flags as needed
    -
    -  see:
    -    qh_clear_outputflags() resets the flags
    -
    -  design:
    -    test for qh.PRINTgood (i.e., only print 'good' facets)
    -    check for conflicting print output options
    -*/
    -void qh_initqhull_outputflags(void) {
    -  boolT printgeom= False, printmath= False, printcoplanar= False;
    -  int i;
    -
    -  trace3((qh ferr, 3024, "qh_initqhull_outputflags: %s\n", qh qhull_command));
    -  if (!(qh PRINTgood || qh PRINTneighbors)) {
    -    if (qh KEEParea || qh KEEPminArea < REALmax/2 || qh KEEPmerge || qh DELAUNAY
    -        || (!qh ONLYgood && (qh GOODvertex || qh GOODpoint))) {
    -      qh PRINTgood= True;
    -      qh_option("Pgood", NULL, NULL);
    -    }
    -  }
    -  if (qh PRINTtransparent) {
    -    if (qh hull_dim != 4 || !qh DELAUNAY || qh VORONOI || qh DROPdim >= 0) {
    -      qh_fprintf(qh ferr, 6215, "qhull input error: transparent Delaunay('Gt') needs 3-d Delaunay('d') w/o 'GDn'\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    qh DROPdim = 3;
    -    qh PRINTridges = True;
    -  }
    -  for (i=qh_PRINTEND; i--; ) {
    -    if (qh PRINTout[i] == qh_PRINTgeom)
    -      printgeom= True;
    -    else if (qh PRINTout[i] == qh_PRINTmathematica || qh PRINTout[i] == qh_PRINTmaple)
    -      printmath= True;
    -    else if (qh PRINTout[i] == qh_PRINTcoplanars)
    -      printcoplanar= True;
    -    else if (qh PRINTout[i] == qh_PRINTpointnearest)
    -      printcoplanar= True;
    -    else if (qh PRINTout[i] == qh_PRINTpointintersect && !qh HALFspace) {
    -      qh_fprintf(qh ferr, 6053, "qhull input error: option 'Fp' is only used for \nhalfspace intersection('Hn,n,n').\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }else if (qh PRINTout[i] == qh_PRINTtriangles && (qh HALFspace || qh VORONOI)) {
    -      qh_fprintf(qh ferr, 6054, "qhull input error: option 'Ft' is not available for Voronoi vertices or halfspace intersection\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }else if (qh PRINTout[i] == qh_PRINTcentrums && qh VORONOI) {
    -      qh_fprintf(qh ferr, 6055, "qhull input error: option 'FC' is not available for Voronoi vertices('v')\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }else if (qh PRINTout[i] == qh_PRINTvertices) {
    -      if (qh VORONOI)
    -        qh_option("Fvoronoi", NULL, NULL);
    -      else
    -        qh_option("Fvertices", NULL, NULL);
    -    }
    -  }
    -  if (printcoplanar && qh DELAUNAY && qh JOGGLEmax < REALmax/2) {
    -    if (qh PRINTprecision)
    -      qh_fprintf(qh ferr, 7041, "qhull input warning: 'QJ' (joggle) will usually prevent coincident input sites for options 'Fc' and 'FP'\n");
    -  }
    -  if (printmath && (qh hull_dim > 3 || qh VORONOI)) {
    -    qh_fprintf(qh ferr, 6056, "qhull input error: Mathematica and Maple output is only available for 2-d and 3-d convex hulls and 2-d Delaunay triangulations\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (printgeom) {
    -    if (qh hull_dim > 4) {
    -      qh_fprintf(qh ferr, 6057, "qhull input error: Geomview output is only available for 2-d, 3-d and 4-d\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    if (qh PRINTnoplanes && !(qh PRINTcoplanar + qh PRINTcentrums
    -     + qh PRINTdots + qh PRINTspheres + qh DOintersections + qh PRINTridges)) {
    -      qh_fprintf(qh ferr, 6058, "qhull input error: no output specified for Geomview\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    if (qh VORONOI && (qh hull_dim > 3 || qh DROPdim >= 0)) {
    -      qh_fprintf(qh ferr, 6059, "qhull input error: Geomview output for Voronoi diagrams only for 2-d\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    /* can not warn about furthest-site Geomview output: no lower_threshold */
    -    if (qh hull_dim == 4 && qh DROPdim == -1 &&
    -        (qh PRINTcoplanar || qh PRINTspheres || qh PRINTcentrums)) {
    -      qh_fprintf(qh ferr, 7042, "qhull input warning: coplanars, vertices, and centrums output not\n\
    -available for 4-d output(ignored).  Could use 'GDn' instead.\n");
    -      qh PRINTcoplanar= qh PRINTspheres= qh PRINTcentrums= False;
    -    }
    -  }
    -  if (!qh KEEPcoplanar && !qh KEEPinside && !qh ONLYgood) {
    -    if ((qh PRINTcoplanar && qh PRINTspheres) || printcoplanar) {
    -      if (qh QHULLfinished) {
    -        qh_fprintf(qh ferr, 7072, "qhull output warning: ignoring coplanar points, option 'Qc' was not set for the first run of qhull.\n");
    -      }else {
    -        qh KEEPcoplanar = True;
    -        qh_option("Qcoplanar", NULL, NULL);
    -      }
    -    }
    -  }
    -  qh PRINTdim= qh hull_dim;
    -  if (qh DROPdim >=0) {    /* after Geomview checks */
    -    if (qh DROPdim < qh hull_dim) {
    -      qh PRINTdim--;
    -      if (!printgeom || qh hull_dim < 3)
    -        qh_fprintf(qh ferr, 7043, "qhull input warning: drop dimension 'GD%d' is only available for 3-d/4-d Geomview\n", qh DROPdim);
    -    }else
    -      qh DROPdim= -1;
    -  }else if (qh VORONOI) {
    -    qh DROPdim= qh hull_dim-1;
    -    qh PRINTdim= qh hull_dim-1;
    -  }
    -} /* qh_initqhull_outputflags */
    -
    -/*---------------------------------
    -
    -  qh_initqhull_start( infile, outfile, errfile )
    -    allocate memory if needed and call qh_initqhull_start2()
    -*/
    -void qh_initqhull_start(FILE *infile, FILE *outfile, FILE *errfile) {
    -
    -#if qh_QHpointer
    -  if (qh_qh) {
    -    qh_fprintf(errfile, 6205, "qhull error (qh_initqhull_start): qh_qh already defined.  Call qh_save_qhull() first\n");
    -    qh_exit(qh_ERRqhull);  /* no error handler */
    -  }
    -  if (!(qh_qh= (qhT *)qh_malloc(sizeof(qhT)))) {
    -    qh_fprintf(errfile, 6060, "qhull error (qh_initqhull_start): insufficient memory\n");
    -    qh_exit(qh_ERRmem);  /* no error handler */
    -  }
    -#endif
    -  qh_initstatistics();
    -  qh_initqhull_start2(infile, outfile, errfile);
    -} /* initqhull_start */
    -
    -/*---------------------------------
    -
    -  qh_initqhull_start2( infile, outfile, errfile )
    -    start initialization of qhull
    -    initialize statistics, stdio, default values for global variables
    -    assumes qh_qh is defined
    -  notes:
    -    report errors elsewhere, error handling and g_qhull_output [Qhull.cpp, QhullQh()] not in initialized
    -  see:
    -    qh_maxmin() determines the precision constants
    -    qh_freeqhull2()
    -*/
    -void qh_initqhull_start2(FILE *infile, FILE *outfile, FILE *errfile) {
    -  time_t timedata;
    -  int seed;
    -
    -  qh_CPUclock; /* start the clock(for qh_clock).  One-shot. */
    -#if qh_QHpointer
    -  memset((char *)qh_qh, 0, sizeof(qhT));   /* every field is 0, FALSE, NULL */
    -#else
    -  memset((char *)&qh_qh, 0, sizeof(qhT));
    -#endif
    -  qh ANGLEmerge= True;
    -  qh DROPdim= -1;
    -  qh ferr= errfile;
    -  qh fin= infile;
    -  qh fout= outfile;
    -  qh furthest_id= -1;
    -  qh JOGGLEmax= REALmax;
    -  qh KEEPminArea = REALmax;
    -  qh last_low= REALmax;
    -  qh last_high= REALmax;
    -  qh last_newhigh= REALmax;
    -  qh max_outside= 0.0;
    -  qh max_vertex= 0.0;
    -  qh MAXabs_coord= 0.0;
    -  qh MAXsumcoord= 0.0;
    -  qh MAXwidth= -REALmax;
    -  qh MERGEindependent= True;
    -  qh MINdenom_1= fmax_(1.0/REALmax, REALmin); /* used by qh_scalepoints */
    -  qh MINoutside= 0.0;
    -  qh MINvisible= REALmax;
    -  qh MAXcoplanar= REALmax;
    -  qh outside_err= REALmax;
    -  qh premerge_centrum= 0.0;
    -  qh premerge_cos= REALmax;
    -  qh PRINTprecision= True;
    -  qh PRINTradius= 0.0;
    -  qh postmerge_cos= REALmax;
    -  qh postmerge_centrum= 0.0;
    -  qh ROTATErandom= INT_MIN;
    -  qh MERGEvertices= True;
    -  qh totarea= 0.0;
    -  qh totvol= 0.0;
    -  qh TRACEdist= REALmax;
    -  qh TRACEpoint= -1; /* recompile or use 'TPn' */
    -  qh tracefacet_id= UINT_MAX;  /* recompile to trace a facet */
    -  qh tracevertex_id= UINT_MAX; /* recompile to trace a vertex */
    -  seed= (int)time(&timedata);
    -  qh_RANDOMseed_(seed);
    -  qh run_id= qh_RANDOMint+1; /* disallow 0 [UsingLibQhull::NOqhRunId] */
    -  qh_option("run-id", &qh run_id, NULL);
    -  strcat(qh qhull, "qhull");
    -} /* initqhull_start2 */
    -
    -/*---------------------------------
    -
    -  qh_initthresholds( commandString )
    -    set thresholds for printing and scaling from commandString
    -
    -  returns:
    -    sets qh.GOODthreshold or qh.SPLITthreshold if 'Pd0D1' used
    -
    -  see:
    -    qh_initflags(), 'Qbk' 'QBk' 'Pdk' and 'PDk'
    -    qh_inthresholds()
    -
    -  design:
    -    for each 'Pdn' or 'PDn' option
    -      check syntax
    -      set qh.lower_threshold or qh.upper_threshold
    -    set qh.GOODthreshold if an unbounded threshold is used
    -    set qh.SPLITthreshold if a bounded threshold is used
    -*/
    -void qh_initthresholds(char *command) {
    -  realT value;
    -  int idx, maxdim, k;
    -  char *s= command; /* non-const due to strtol */
    -  char key;
    -
    -  maxdim= qh input_dim;
    -  if (qh DELAUNAY && (qh PROJECTdelaunay || qh PROJECTinput))
    -    maxdim++;
    -  while (*s) {
    -    if (*s == '-')
    -      s++;
    -    if (*s == 'P') {
    -      s++;
    -      while (*s && !isspace(key= *s++)) {
    -        if (key == 'd' || key == 'D') {
    -          if (!isdigit(*s)) {
    -            qh_fprintf(qh ferr, 7044, "qhull warning: no dimension given for Print option '%c' at: %s.  Ignored\n",
    -                    key, s-1);
    -            continue;
    -          }
    -          idx= qh_strtol(s, &s);
    -          if (idx >= qh hull_dim) {
    -            qh_fprintf(qh ferr, 7045, "qhull warning: dimension %d for Print option '%c' is >= %d.  Ignored\n",
    -                idx, key, qh hull_dim);
    -            continue;
    -          }
    -          if (*s == ':') {
    -            s++;
    -            value= qh_strtod(s, &s);
    -            if (fabs((double)value) > 1.0) {
    -              qh_fprintf(qh ferr, 7046, "qhull warning: value %2.4g for Print option %c is > +1 or < -1.  Ignored\n",
    -                      value, key);
    -              continue;
    -            }
    -          }else
    -            value= 0.0;
    -          if (key == 'd')
    -            qh lower_threshold[idx]= value;
    -          else
    -            qh upper_threshold[idx]= value;
    -        }
    -      }
    -    }else if (*s == 'Q') {
    -      s++;
    -      while (*s && !isspace(key= *s++)) {
    -        if (key == 'b' && *s == 'B') {
    -          s++;
    -          for (k=maxdim; k--; ) {
    -            qh lower_bound[k]= -qh_DEFAULTbox;
    -            qh upper_bound[k]= qh_DEFAULTbox;
    -          }
    -        }else if (key == 'b' && *s == 'b')
    -          s++;
    -        else if (key == 'b' || key == 'B') {
    -          if (!isdigit(*s)) {
    -            qh_fprintf(qh ferr, 7047, "qhull warning: no dimension given for Qhull option %c.  Ignored\n",
    -                    key);
    -            continue;
    -          }
    -          idx= qh_strtol(s, &s);
    -          if (idx >= maxdim) {
    -            qh_fprintf(qh ferr, 7048, "qhull warning: dimension %d for Qhull option %c is >= %d.  Ignored\n",
    -                idx, key, maxdim);
    -            continue;
    -          }
    -          if (*s == ':') {
    -            s++;
    -            value= qh_strtod(s, &s);
    -          }else if (key == 'b')
    -            value= -qh_DEFAULTbox;
    -          else
    -            value= qh_DEFAULTbox;
    -          if (key == 'b')
    -            qh lower_bound[idx]= value;
    -          else
    -            qh upper_bound[idx]= value;
    -        }
    -      }
    -    }else {
    -      while (*s && !isspace(*s))
    -        s++;
    -    }
    -    while (isspace(*s))
    -      s++;
    -  }
    -  for (k=qh hull_dim; k--; ) {
    -    if (qh lower_threshold[k] > -REALmax/2) {
    -      qh GOODthreshold= True;
    -      if (qh upper_threshold[k] < REALmax/2) {
    -        qh SPLITthresholds= True;
    -        qh GOODthreshold= False;
    -        break;
    -      }
    -    }else if (qh upper_threshold[k] < REALmax/2)
    -      qh GOODthreshold= True;
    -  }
    -} /* initthresholds */
    -
    -/*---------------------------------
    -
    -  qh_option( option, intVal, realVal )
    -    add an option description to qh.qhull_options
    -
    -  notes:
    -    NOerrors -- qh_option can not call qh_errexit() [qh_initqhull_start2]
    -    will be printed with statistics ('Ts') and errors
    -    strlen(option) < 40
    -*/
    -void qh_option(const char *option, int *i, realT *r) {
    -  char buf[200];
    -  int len, maxlen;
    -
    -  sprintf(buf, "  %s", option);
    -  if (i)
    -    sprintf(buf+strlen(buf), " %d", *i);
    -  if (r)
    -    sprintf(buf+strlen(buf), " %2.2g", *r);
    -  len= (int)strlen(buf);  /* WARN64 */
    -  qh qhull_optionlen += len;
    -  maxlen= sizeof(qh qhull_options) - len -1;
    -  maximize_(maxlen, 0);
    -  if (qh qhull_optionlen >= qh_OPTIONline && maxlen > 0) {
    -    qh qhull_optionlen= len;
    -    strncat(qh qhull_options, "\n", (size_t)(maxlen--));
    -  }
    -  strncat(qh qhull_options, buf, (size_t)maxlen);
    -} /* option */
    -
    -#if qh_QHpointer
    -/*---------------------------------
    -
    -  qh_restore_qhull( oldqh )
    -    restores a previously saved qhull
    -    also restores qh_qhstat and qhmem.tempstack
    -    Sets *oldqh to NULL
    -  notes:
    -    errors if current qhull hasn't been saved or freed
    -    uses qhmem for error reporting
    -
    -  NOTE 1998/5/11:
    -    Freeing memory after qh_save_qhull and qh_restore_qhull
    -    is complicated.  The procedures will be redesigned.
    -
    -  see:
    -    qh_save_qhull(), UsingLibQhull
    -*/
    -void qh_restore_qhull(qhT **oldqh) {
    -
    -  if (*oldqh && strcmp((*oldqh)->qhull, "qhull")) {
    -    qh_fprintf(qhmem.ferr, 6061, "qhull internal error (qh_restore_qhull): %p is not a qhull data structure\n",
    -                  *oldqh);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  if (qh_qh) {
    -    qh_fprintf(qhmem.ferr, 6062, "qhull internal error (qh_restore_qhull): did not save or free existing qhull\n");
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  if (!*oldqh || !(*oldqh)->old_qhstat) {
    -    qh_fprintf(qhmem.ferr, 6063, "qhull internal error (qh_restore_qhull): did not previously save qhull %p\n",
    -                  *oldqh);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  qh_qh= *oldqh;
    -  *oldqh= NULL;
    -  qh_qhstat= qh old_qhstat;
    -  qhmem.tempstack= qh old_tempstack;
    -  qh old_qhstat= 0;
    -  qh old_tempstack= 0;
    -  trace1((qh ferr, 1007, "qh_restore_qhull: restored qhull from %p\n", *oldqh));
    -} /* restore_qhull */
    -
    -/*---------------------------------
    -
    -  qh_save_qhull(  )
    -    saves qhull for a later qh_restore_qhull
    -    also saves qh_qhstat and qhmem.tempstack
    -
    -  returns:
    -    qh_qh=NULL
    -
    -  notes:
    -    need to initialize qhull or call qh_restore_qhull before continuing
    -
    -  NOTE 1998/5/11:
    -    Freeing memory after qh_save_qhull and qh_restore_qhull
    -    is complicated.  The procedures will be redesigned.
    -
    -  see:
    -    qh_restore_qhull()
    -*/
    -qhT *qh_save_qhull(void) {
    -  qhT *oldqh;
    -
    -  trace1((qhmem.ferr, 1045, "qh_save_qhull: save qhull %p\n", qh_qh));
    -  if (!qh_qh) {
    -    qh_fprintf(qhmem.ferr, 6064, "qhull internal error (qh_save_qhull): qhull not initialized\n");
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  qh old_qhstat= qh_qhstat;
    -  qh_qhstat= NULL;
    -  qh old_tempstack= qhmem.tempstack;
    -  qhmem.tempstack= NULL;
    -  oldqh= qh_qh;
    -  qh_qh= NULL;
    -  return oldqh;
    -} /* save_qhull */
    -
    -#endif
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/io.c b/scipy-0.10.1/scipy/spatial/qhull/src/io.c
    deleted file mode 100644
    index afa89c06ef..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/io.c
    +++ /dev/null
    @@ -1,4062 +0,0 @@
    -/*
      ---------------------------------
    -
    -   io.c
    -   Input/Output routines of qhull application
    -
    -   see qh-io.htm and io.h
    -
    -   see user.c for qh_errprint and qh_printfacetlist
    -
    -   unix.c calls qh_readpoints and qh_produce_output
    -
    -   unix.c and user.c are the only callers of io.c functions
    -   This allows the user to avoid loading io.o from qhull.a
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/io.c#38 $$Change: 1179 $
    -   $DateTime: 2010/01/12 19:53:15 $$Author: bbarber $
    -*/
    -
    -#include "qhull_a.h"
    -
    -/*========= -functions in alphabetical order after qh_produce_output()  =====*/
    -
    -/*---------------------------------
    -
    -  qh_produce_output()
    -  qh_produce_output2()
    -    prints out the result of qhull in desired format
    -    qh_produce_output2() does not call qh_prepare_output()
    -    if qh.GETarea
    -      computes and prints area and volume
    -    qh.PRINTout[] is an array of output formats
    -
    -  notes:
    -    prints output in qh.PRINTout order
    -*/
    -void qh_produce_output(void) {
    -    int tempsize= qh_setsize(qhmem.tempstack);
    -
    -    qh_prepare_output();
    -    qh_produce_output2();
    -    if (qh_setsize(qhmem.tempstack) != tempsize) {
    -        qh_fprintf(qh ferr, 6206, "qhull internal error (qh_produce_output): temporary sets not empty(%d)\n",
    -            qh_setsize(qhmem.tempstack));
    -        qh_errexit(qh_ERRqhull, NULL, NULL);
    -    }
    -} /* produce_output */
    -
    -
    -void qh_produce_output2(void) {
    -  int i, tempsize= qh_setsize(qhmem.tempstack), d_1;
    -
    -  if (qh PRINTsummary)
    -    qh_printsummary(qh ferr);
    -  else if (qh PRINTout[0] == qh_PRINTnone)
    -    qh_printsummary(qh fout);
    -  for (i=0; i < qh_PRINTEND; i++)
    -    qh_printfacets(qh fout, qh PRINTout[i], qh facet_list, NULL, !qh_ALL);
    -  qh_allstatistics();
    -  if (qh PRINTprecision && !qh MERGING && (qh JOGGLEmax > REALmax/2 || qh RERUN))
    -    qh_printstats(qh ferr, qhstat precision, NULL);
    -  if (qh VERIFYoutput && (zzval_(Zridge) > 0 || zzval_(Zridgemid) > 0))
    -    qh_printstats(qh ferr, qhstat vridges, NULL);
    -  if (qh PRINTstatistics) {
    -    qh_printstatistics(qh ferr, "");
    -    qh_memstatistics(qh ferr);
    -    d_1= sizeof(setT) + (qh hull_dim - 1) * SETelemsize;
    -    qh_fprintf(qh ferr, 8040, "\
    -    size in bytes: merge %d ridge %d vertex %d facet %d\n\
    -         normal %d ridge vertices %d facet vertices or neighbors %d\n",
    -            (int)sizeof(mergeT), (int)sizeof(ridgeT),
    -            (int)sizeof(vertexT), (int)sizeof(facetT),
    -            qh normal_size, d_1, d_1 + SETelemsize);
    -  }
    -  if (qh_setsize(qhmem.tempstack) != tempsize) {
    -    qh_fprintf(qh ferr, 6065, "qhull internal error (qh_produce_output2): temporary sets not empty(%d)\n",
    -             qh_setsize(qhmem.tempstack));
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -} /* produce_output2 */
    -
    -/*---------------------------------
    -
    -  dfacet( id )
    -    print facet by id, for debugging
    -
    -*/
    -void dfacet(unsigned id) {
    -  facetT *facet;
    -
    -  FORALLfacets {
    -    if (facet->id == id) {
    -      qh_printfacet(qh fout, facet);
    -      break;
    -    }
    -  }
    -} /* dfacet */
    -
    -
    -/*---------------------------------
    -
    -  dvertex( id )
    -    print vertex by id, for debugging
    -*/
    -void dvertex(unsigned id) {
    -  vertexT *vertex;
    -
    -  FORALLvertices {
    -    if (vertex->id == id) {
    -      qh_printvertex(qh fout, vertex);
    -      break;
    -    }
    -  }
    -} /* dvertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_compare_vertexpoint( p1, p2 )
    -    used by qsort() to order vertices by point id
    -*/
    -int qh_compare_vertexpoint(const void *p1, const void *p2) {
    -  const vertexT *a= *((vertexT *const*)p1), *b= *((vertexT *const*)p2);
    -
    -  return((qh_pointid(a->point) > qh_pointid(b->point)?1:-1));
    -} /* compare_vertexpoint */
    -
    -/*---------------------------------
    -
    -  qh_compare_facetarea( p1, p2 )
    -    used by qsort() to order facets by area
    -*/
    -int qh_compare_facetarea(const void *p1, const void *p2) {
    -  const facetT *a= *((facetT *const*)p1), *b= *((facetT *const*)p2);
    -
    -  if (!a->isarea)
    -    return -1;
    -  if (!b->isarea)
    -    return 1;
    -  if (a->f.area > b->f.area)
    -    return 1;
    -  else if (a->f.area == b->f.area)
    -    return 0;
    -  return -1;
    -} /* compare_facetarea */
    -
    -/*---------------------------------
    -
    -  qh_compare_facetmerge( p1, p2 )
    -    used by qsort() to order facets by number of merges
    -*/
    -int qh_compare_facetmerge(const void *p1, const void *p2) {
    -  const facetT *a= *((facetT *const*)p1), *b= *((facetT *const*)p2);
    -
    -  return(a->nummerge - b->nummerge);
    -} /* compare_facetvisit */
    -
    -/*---------------------------------
    -
    -  qh_compare_facetvisit( p1, p2 )
    -    used by qsort() to order facets by visit id or id
    -*/
    -int qh_compare_facetvisit(const void *p1, const void *p2) {
    -  const facetT *a= *((facetT *const*)p1), *b= *((facetT *const*)p2);
    -  int i,j;
    -
    -  if (!(i= a->visitid))
    -    i= 0 - a->id; /* do not convert to int, sign distinguishes id from visitid */
    -  if (!(j= b->visitid))
    -    j= 0 - b->id;
    -  return(i - j);
    -} /* compare_facetvisit */
    -
    -/*---------------------------------
    -
    -  qh_copyfilename( dest, size, source, length )
    -    copy filename identified by qh_skipfilename()
    -
    -  notes:
    -    see qh_skipfilename() for syntax
    -*/
    -void qh_copyfilename(char *filename, int size, const char* source, int length) {
    -  char c= *source;
    -
    -  if (length > size + 1) {
    -      qh_fprintf(qh ferr, 6040, "qhull error: filename is more than %d characters, %s\n",  size-1, source);
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  strncpy(filename, source, length);
    -  filename[length]= '\0';
    -  if (c == '\'' || c == '"') {
    -    char *s= filename + 1;
    -    char *t= filename;
    -    while (*s) {
    -      if (*s == c) {
    -          if (s[-1] == '\\')
    -              t[-1]= c;
    -      }else
    -          *t++= *s;
    -      s++;
    -    }
    -    *t= '\0';
    -  }
    -} /* copyfilename */
    -
    -/*---------------------------------
    -
    -  qh_countfacets( facetlist, facets, printall,
    -          numfacets, numsimplicial, totneighbors, numridges, numcoplanar, numtricoplanars  )
    -    count good facets for printing and set visitid
    -    if allfacets, ignores qh_skipfacet()
    -
    -  notes:
    -    qh_printsummary and qh_countfacets must match counts
    -
    -  returns:
    -    numfacets, numsimplicial, total neighbors, numridges, coplanars
    -    each facet with ->visitid indicating 1-relative position
    -      ->visitid==0 indicates not good
    -
    -  notes
    -    numfacets >= numsimplicial
    -    if qh.NEWfacets,
    -      does not count visible facets (matches qh_printafacet)
    -
    -  design:
    -    for all facets on facetlist and in facets set
    -      unless facet is skipped or visible (i.e., will be deleted)
    -        mark facet->visitid
    -        update counts
    -*/
    -void qh_countfacets(facetT *facetlist, setT *facets, boolT printall,
    -    int *numfacetsp, int *numsimplicialp, int *totneighborsp, int *numridgesp, int *numcoplanarsp, int *numtricoplanarsp) {
    -  facetT *facet, **facetp;
    -  int numfacets= 0, numsimplicial= 0, numridges= 0, totneighbors= 0, numcoplanars= 0, numtricoplanars= 0;
    -
    -  FORALLfacet_(facetlist) {
    -    if ((facet->visible && qh NEWfacets)
    -    || (!printall && qh_skipfacet(facet)))
    -      facet->visitid= 0;
    -    else {
    -      facet->visitid= ++numfacets;
    -      totneighbors += qh_setsize(facet->neighbors);
    -      if (facet->simplicial) {
    -        numsimplicial++;
    -        if (facet->keepcentrum && facet->tricoplanar)
    -          numtricoplanars++;
    -      }else
    -        numridges += qh_setsize(facet->ridges);
    -      if (facet->coplanarset)
    -        numcoplanars += qh_setsize(facet->coplanarset);
    -    }
    -  }
    -
    -  FOREACHfacet_(facets) {
    -    if ((facet->visible && qh NEWfacets)
    -    || (!printall && qh_skipfacet(facet)))
    -      facet->visitid= 0;
    -    else {
    -      facet->visitid= ++numfacets;
    -      totneighbors += qh_setsize(facet->neighbors);
    -      if (facet->simplicial){
    -        numsimplicial++;
    -        if (facet->keepcentrum && facet->tricoplanar)
    -          numtricoplanars++;
    -      }else
    -        numridges += qh_setsize(facet->ridges);
    -      if (facet->coplanarset)
    -        numcoplanars += qh_setsize(facet->coplanarset);
    -    }
    -  }
    -  qh visit_id += numfacets+1;
    -  *numfacetsp= numfacets;
    -  *numsimplicialp= numsimplicial;
    -  *totneighborsp= totneighbors;
    -  *numridgesp= numridges;
    -  *numcoplanarsp= numcoplanars;
    -  *numtricoplanarsp= numtricoplanars;
    -} /* countfacets */
    -
    -/*---------------------------------
    -
    -  qh_detvnorm( vertex, vertexA, centers, offset )
    -    compute separating plane of the Voronoi diagram for a pair of input sites
    -    centers= set of facets (i.e., Voronoi vertices)
    -      facet->visitid= 0 iff vertex-at-infinity (i.e., unbounded)
    -
    -  assumes:
    -    qh_ASvoronoi and qh_vertexneighbors() already set
    -
    -  returns:
    -    norm
    -      a pointer into qh.gm_matrix to qh.hull_dim-1 reals
    -      copy the data before reusing qh.gm_matrix
    -    offset
    -      if 'QVn'
    -        sign adjusted so that qh.GOODvertexp is inside
    -      else
    -        sign adjusted so that vertex is inside
    -
    -    qh.gm_matrix= simplex of points from centers relative to first center
    -
    -  notes:
    -    in io.c so that code for 'v Tv' can be removed by removing io.c
    -    returns pointer into qh.gm_matrix to avoid tracking of temporary memory
    -
    -  design:
    -    determine midpoint of input sites
    -    build points as the set of Voronoi vertices
    -    select a simplex from points (if necessary)
    -      include midpoint if the Voronoi region is unbounded
    -    relocate the first vertex of the simplex to the origin
    -    compute the normalized hyperplane through the simplex
    -    orient the hyperplane toward 'QVn' or 'vertex'
    -    if 'Tv' or 'Ts'
    -      if bounded
    -        test that hyperplane is the perpendicular bisector of the input sites
    -      test that Voronoi vertices not in the simplex are still on the hyperplane
    -    free up temporary memory
    -*/
    -pointT *qh_detvnorm(vertexT *vertex, vertexT *vertexA, setT *centers, realT *offsetp) {
    -  facetT *facet, **facetp;
    -  int  i, k, pointid, pointidA, point_i, point_n;
    -  setT *simplex= NULL;
    -  pointT *point, **pointp, *point0, *midpoint, *normal, *inpoint;
    -  coordT *coord, *gmcoord, *normalp;
    -  setT *points= qh_settemp(qh TEMPsize);
    -  boolT nearzero= False;
    -  boolT unbounded= False;
    -  int numcenters= 0;
    -  int dim= qh hull_dim - 1;
    -  realT dist, offset, angle, zero= 0.0;
    -
    -  midpoint= qh gm_matrix + qh hull_dim * qh hull_dim;  /* last row */
    -  for (k=0; k < dim; k++)
    -    midpoint[k]= (vertex->point[k] + vertexA->point[k])/2;
    -  FOREACHfacet_(centers) {
    -    numcenters++;
    -    if (!facet->visitid)
    -      unbounded= True;
    -    else {
    -      if (!facet->center)
    -        facet->center= qh_facetcenter(facet->vertices);
    -      qh_setappend(&points, facet->center);
    -    }
    -  }
    -  if (numcenters > dim) {
    -    simplex= qh_settemp(qh TEMPsize);
    -    qh_setappend(&simplex, vertex->point);
    -    if (unbounded)
    -      qh_setappend(&simplex, midpoint);
    -    qh_maxsimplex(dim, points, NULL, 0, &simplex);
    -    qh_setdelnth(simplex, 0);
    -  }else if (numcenters == dim) {
    -    if (unbounded)
    -      qh_setappend(&points, midpoint);
    -    simplex= points;
    -  }else {
    -    qh_fprintf(qh ferr, 6216, "qhull internal error (qh_detvnorm): too few points(%d) to compute separating plane\n", numcenters);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  i= 0;
    -  gmcoord= qh gm_matrix;
    -  point0= SETfirstt_(simplex, pointT);
    -  FOREACHpoint_(simplex) {
    -    if (qh IStracing >= 4)
    -      qh_printmatrix(qh ferr, "qh_detvnorm: Voronoi vertex or midpoint",
    -                              &point, 1, dim);
    -    if (point != point0) {
    -      qh gm_row[i++]= gmcoord;
    -      coord= point0;
    -      for (k=dim; k--; )
    -        *(gmcoord++)= *point++ - *coord++;
    -    }
    -  }
    -  qh gm_row[i]= gmcoord;  /* does not overlap midpoint, may be used later for qh_areasimplex */
    -  normal= gmcoord;
    -  qh_sethyperplane_gauss(dim, qh gm_row, point0, True,
    -                normal, &offset, &nearzero);
    -  if (qh GOODvertexp == vertexA->point)
    -    inpoint= vertexA->point;
    -  else
    -    inpoint= vertex->point;
    -  zinc_(Zdistio);
    -  dist= qh_distnorm(dim, inpoint, normal, &offset);
    -  if (dist > 0) {
    -    offset= -offset;
    -    normalp= normal;
    -    for (k=dim; k--; ) {
    -      *normalp= -(*normalp);
    -      normalp++;
    -    }
    -  }
    -  if (qh VERIFYoutput || qh PRINTstatistics) {
    -    pointid= qh_pointid(vertex->point);
    -    pointidA= qh_pointid(vertexA->point);
    -    if (!unbounded) {
    -      zinc_(Zdiststat);
    -      dist= qh_distnorm(dim, midpoint, normal, &offset);
    -      if (dist < 0)
    -        dist= -dist;
    -      zzinc_(Zridgemid);
    -      wwmax_(Wridgemidmax, dist);
    -      wwadd_(Wridgemid, dist);
    -      trace4((qh ferr, 4014, "qh_detvnorm: points %d %d midpoint dist %2.2g\n",
    -                 pointid, pointidA, dist));
    -      for (k=0; k < dim; k++)
    -        midpoint[k]= vertexA->point[k] - vertex->point[k];  /* overwrites midpoint! */
    -      qh_normalize(midpoint, dim, False);
    -      angle= qh_distnorm(dim, midpoint, normal, &zero); /* qh_detangle uses dim+1 */
    -      if (angle < 0.0)
    -        angle= angle + 1.0;
    -      else
    -        angle= angle - 1.0;
    -      if (angle < 0.0)
    -        angle -= angle;
    -      trace4((qh ferr, 4015, "qh_detvnorm: points %d %d angle %2.2g nearzero %d\n",
    -                 pointid, pointidA, angle, nearzero));
    -      if (nearzero) {
    -        zzinc_(Zridge0);
    -        wwmax_(Wridge0max, angle);
    -        wwadd_(Wridge0, angle);
    -      }else {
    -        zzinc_(Zridgeok)
    -        wwmax_(Wridgeokmax, angle);
    -        wwadd_(Wridgeok, angle);
    -      }
    -    }
    -    if (simplex != points) {
    -      FOREACHpoint_i_(points) {
    -        if (!qh_setin(simplex, point)) {
    -          facet= SETelemt_(centers, point_i, facetT);
    -          zinc_(Zdiststat);
    -          dist= qh_distnorm(dim, point, normal, &offset);
    -          if (dist < 0)
    -            dist= -dist;
    -          zzinc_(Zridge);
    -          wwmax_(Wridgemax, dist);
    -          wwadd_(Wridge, dist);
    -          trace4((qh ferr, 4016, "qh_detvnorm: points %d %d Voronoi vertex %d dist %2.2g\n",
    -                             pointid, pointidA, facet->visitid, dist));
    -        }
    -      }
    -    }
    -  }
    -  *offsetp= offset;
    -  if (simplex != points)
    -    qh_settempfree(&simplex);
    -  qh_settempfree(&points);
    -  return normal;
    -} /* detvnorm */
    -
    -/*---------------------------------
    -
    -  qh_detvridge( vertexA )
    -    determine Voronoi ridge from 'seen' neighbors of vertexA
    -    include one vertex-at-infinite if an !neighbor->visitid
    -
    -  returns:
    -    temporary set of centers (facets, i.e., Voronoi vertices)
    -    sorted by center id
    -*/
    -setT *qh_detvridge(vertexT *vertex) {
    -  setT *centers= qh_settemp(qh TEMPsize);
    -  setT *tricenters= qh_settemp(qh TEMPsize);
    -  facetT *neighbor, **neighborp;
    -  boolT firstinf= True;
    -
    -  FOREACHneighbor_(vertex) {
    -    if (neighbor->seen) {
    -      if (neighbor->visitid) {
    -        if (!neighbor->tricoplanar || qh_setunique(&tricenters, neighbor->center))
    -          qh_setappend(¢ers, neighbor);
    -      }else if (firstinf) {
    -        firstinf= False;
    -        qh_setappend(¢ers, neighbor);
    -      }
    -    }
    -  }
    -  qsort(SETaddr_(centers, facetT), (size_t)qh_setsize(centers),
    -             sizeof(facetT *), qh_compare_facetvisit);
    -  qh_settempfree(&tricenters);
    -  return centers;
    -} /* detvridge */
    -
    -/*---------------------------------
    -
    -  qh_detvridge3( atvertex, vertex )
    -    determine 3-d Voronoi ridge from 'seen' neighbors of atvertex and vertex
    -    include one vertex-at-infinite for !neighbor->visitid
    -    assumes all facet->seen2= True
    -
    -  returns:
    -    temporary set of centers (facets, i.e., Voronoi vertices)
    -    listed in adjacency order (!oriented)
    -    all facet->seen2= True
    -
    -  design:
    -    mark all neighbors of atvertex
    -    for each adjacent neighbor of both atvertex and vertex
    -      if neighbor selected
    -        add neighbor to set of Voronoi vertices
    -*/
    -setT *qh_detvridge3 (vertexT *atvertex, vertexT *vertex) {
    -  setT *centers= qh_settemp(qh TEMPsize);
    -  setT *tricenters= qh_settemp(qh TEMPsize);
    -  facetT *neighbor, **neighborp, *facet= NULL;
    -  boolT firstinf= True;
    -
    -  FOREACHneighbor_(atvertex)
    -    neighbor->seen2= False;
    -  FOREACHneighbor_(vertex) {
    -    if (!neighbor->seen2) {
    -      facet= neighbor;
    -      break;
    -    }
    -  }
    -  while (facet) {
    -    facet->seen2= True;
    -    if (neighbor->seen) {
    -      if (facet->visitid) {
    -        if (!facet->tricoplanar || qh_setunique(&tricenters, facet->center))
    -          qh_setappend(¢ers, facet);
    -      }else if (firstinf) {
    -        firstinf= False;
    -        qh_setappend(¢ers, facet);
    -      }
    -    }
    -    FOREACHneighbor_(facet) {
    -      if (!neighbor->seen2) {
    -        if (qh_setin(vertex->neighbors, neighbor))
    -          break;
    -        else
    -          neighbor->seen2= True;
    -      }
    -    }
    -    facet= neighbor;
    -  }
    -  if (qh CHECKfrequently) {
    -    FOREACHneighbor_(vertex) {
    -      if (!neighbor->seen2) {
    -          qh_fprintf(qh ferr, 6217, "qhull internal error (qh_detvridge3): neighbors of vertex p%d are not connected at facet %d\n",
    -                 qh_pointid(vertex->point), neighbor->id);
    -        qh_errexit(qh_ERRqhull, neighbor, NULL);
    -      }
    -    }
    -  }
    -  FOREACHneighbor_(atvertex)
    -    neighbor->seen2= True;
    -  qh_settempfree(&tricenters);
    -  return centers;
    -} /* detvridge3 */
    -
    -/*---------------------------------
    -
    -  qh_eachvoronoi( fp, printvridge, vertex, visitall, innerouter, inorder )
    -    if visitall,
    -      visit all Voronoi ridges for vertex (i.e., an input site)
    -    else
    -      visit all unvisited Voronoi ridges for vertex
    -      all vertex->seen= False if unvisited
    -    assumes
    -      all facet->seen= False
    -      all facet->seen2= True (for qh_detvridge3)
    -      all facet->visitid == 0 if vertex_at_infinity
    -                         == index of Voronoi vertex
    -                         >= qh.num_facets if ignored
    -    innerouter:
    -      qh_RIDGEall--  both inner (bounded) and outer(unbounded) ridges
    -      qh_RIDGEinner- only inner
    -      qh_RIDGEouter- only outer
    -
    -    if inorder
    -      orders vertices for 3-d Voronoi diagrams
    -
    -  returns:
    -    number of visited ridges (does not include previously visited ridges)
    -
    -    if printvridge,
    -      calls printvridge( fp, vertex, vertexA, centers)
    -        fp== any pointer (assumes FILE*)
    -        vertex,vertexA= pair of input sites that define a Voronoi ridge
    -        centers= set of facets (i.e., Voronoi vertices)
    -                 ->visitid == index or 0 if vertex_at_infinity
    -                 ordered for 3-d Voronoi diagram
    -  notes:
    -    uses qh.vertex_visit
    -
    -  see:
    -    qh_eachvoronoi_all()
    -
    -  design:
    -    mark selected neighbors of atvertex
    -    for each selected neighbor (either Voronoi vertex or vertex-at-infinity)
    -      for each unvisited vertex
    -        if atvertex and vertex share more than d-1 neighbors
    -          bump totalcount
    -          if printvridge defined
    -            build the set of shared neighbors (i.e., Voronoi vertices)
    -            call printvridge
    -*/
    -int qh_eachvoronoi(FILE *fp, printvridgeT printvridge, vertexT *atvertex, boolT visitall, qh_RIDGE innerouter, boolT inorder) {
    -  boolT unbounded;
    -  int count;
    -  facetT *neighbor, **neighborp, *neighborA, **neighborAp;
    -  setT *centers;
    -  setT *tricenters= qh_settemp(qh TEMPsize);
    -
    -  vertexT *vertex, **vertexp;
    -  boolT firstinf;
    -  unsigned int numfacets= (unsigned int)qh num_facets;
    -  int totridges= 0;
    -
    -  qh vertex_visit++;
    -  atvertex->seen= True;
    -  if (visitall) {
    -    FORALLvertices
    -      vertex->seen= False;
    -  }
    -  FOREACHneighbor_(atvertex) {
    -    if (neighbor->visitid < numfacets)
    -      neighbor->seen= True;
    -  }
    -  FOREACHneighbor_(atvertex) {
    -    if (neighbor->seen) {
    -      FOREACHvertex_(neighbor->vertices) {
    -        if (vertex->visitid != qh vertex_visit && !vertex->seen) {
    -          vertex->visitid= qh vertex_visit;
    -          count= 0;
    -          firstinf= True;
    -          qh_settruncate(tricenters, 0);
    -          FOREACHneighborA_(vertex) {
    -            if (neighborA->seen) {
    -              if (neighborA->visitid) {
    -                if (!neighborA->tricoplanar || qh_setunique(&tricenters, neighborA->center))
    -                  count++;
    -              }else if (firstinf) {
    -                count++;
    -                firstinf= False;
    -              }
    -            }
    -          }
    -          if (count >= qh hull_dim - 1) {  /* e.g., 3 for 3-d Voronoi */
    -            if (firstinf) {
    -              if (innerouter == qh_RIDGEouter)
    -                continue;
    -              unbounded= False;
    -            }else {
    -              if (innerouter == qh_RIDGEinner)
    -                continue;
    -              unbounded= True;
    -            }
    -            totridges++;
    -            trace4((qh ferr, 4017, "qh_eachvoronoi: Voronoi ridge of %d vertices between sites %d and %d\n",
    -                  count, qh_pointid(atvertex->point), qh_pointid(vertex->point)));
    -            if (printvridge && fp) {
    -              if (inorder && qh hull_dim == 3+1) /* 3-d Voronoi diagram */
    -                centers= qh_detvridge3 (atvertex, vertex);
    -              else
    -                centers= qh_detvridge(vertex);
    -              (*printvridge) (fp, atvertex, vertex, centers, unbounded);
    -              qh_settempfree(¢ers);
    -            }
    -          }
    -        }
    -      }
    -    }
    -  }
    -  FOREACHneighbor_(atvertex)
    -    neighbor->seen= False;
    -  qh_settempfree(&tricenters);
    -  return totridges;
    -} /* eachvoronoi */
    -
    -
    -/*---------------------------------
    -
    -  qh_eachvoronoi_all( fp, printvridge, isUpper, innerouter, inorder )
    -    visit all Voronoi ridges
    -
    -    innerouter:
    -      see qh_eachvoronoi()
    -
    -    if inorder
    -      orders vertices for 3-d Voronoi diagrams
    -
    -  returns
    -    total number of ridges
    -
    -    if isUpper == facet->upperdelaunay  (i.e., a Vornoi vertex)
    -      facet->visitid= Voronoi vertex index(same as 'o' format)
    -    else
    -      facet->visitid= 0
    -
    -    if printvridge,
    -      calls printvridge( fp, vertex, vertexA, centers)
    -      [see qh_eachvoronoi]
    -
    -  notes:
    -    Not used for qhull.exe
    -    same effect as qh_printvdiagram but ridges not sorted by point id
    -*/
    -int qh_eachvoronoi_all(FILE *fp, printvridgeT printvridge, boolT isUpper, qh_RIDGE innerouter, boolT inorder) {
    -  facetT *facet;
    -  vertexT *vertex;
    -  int numcenters= 1;  /* vertex 0 is vertex-at-infinity */
    -  int totridges= 0;
    -
    -  qh_clearcenters(qh_ASvoronoi);
    -  qh_vertexneighbors();
    -  maximize_(qh visit_id, (unsigned) qh num_facets);
    -  FORALLfacets {
    -    facet->visitid= 0;
    -    facet->seen= False;
    -    facet->seen2= True;
    -  }
    -  FORALLfacets {
    -    if (facet->upperdelaunay == isUpper)
    -      facet->visitid= numcenters++;
    -  }
    -  FORALLvertices
    -    vertex->seen= False;
    -  FORALLvertices {
    -    if (qh GOODvertex > 0 && qh_pointid(vertex->point)+1 != qh GOODvertex)
    -      continue;
    -    totridges += qh_eachvoronoi(fp, printvridge, vertex,
    -                   !qh_ALL, innerouter, inorder);
    -  }
    -  return totridges;
    -} /* eachvoronoi_all */
    -
    -/*---------------------------------
    -
    -  qh_facet2point( facet, point0, point1, mindist )
    -    return two projected temporary vertices for a 2-d facet
    -    may be non-simplicial
    -
    -  returns:
    -    point0 and point1 oriented and projected to the facet
    -    returns mindist (maximum distance below plane)
    -*/
    -void qh_facet2point(facetT *facet, pointT **point0, pointT **point1, realT *mindist) {
    -  vertexT *vertex0, *vertex1;
    -  realT dist;
    -
    -  if (facet->toporient ^ qh_ORIENTclock) {
    -    vertex0= SETfirstt_(facet->vertices, vertexT);
    -    vertex1= SETsecondt_(facet->vertices, vertexT);
    -  }else {
    -    vertex1= SETfirstt_(facet->vertices, vertexT);
    -    vertex0= SETsecondt_(facet->vertices, vertexT);
    -  }
    -  zadd_(Zdistio, 2);
    -  qh_distplane(vertex0->point, facet, &dist);
    -  *mindist= dist;
    -  *point0= qh_projectpoint(vertex0->point, facet, dist);
    -  qh_distplane(vertex1->point, facet, &dist);
    -  minimize_(*mindist, dist);
    -  *point1= qh_projectpoint(vertex1->point, facet, dist);
    -} /* facet2point */
    -
    -
    -/*---------------------------------
    -
    -  qh_facetvertices( facetlist, facets, allfacets )
    -    returns temporary set of vertices in a set and/or list of facets
    -    if allfacets, ignores qh_skipfacet()
    -
    -  returns:
    -    vertices with qh.vertex_visit
    -
    -  notes:
    -    optimized for allfacets of facet_list
    -
    -  design:
    -    if allfacets of facet_list
    -      create vertex set from vertex_list
    -    else
    -      for each selected facet in facets or facetlist
    -        append unvisited vertices to vertex set
    -*/
    -setT *qh_facetvertices(facetT *facetlist, setT *facets, boolT allfacets) {
    -  setT *vertices;
    -  facetT *facet, **facetp;
    -  vertexT *vertex, **vertexp;
    -
    -  qh vertex_visit++;
    -  if (facetlist == qh facet_list && allfacets && !facets) {
    -    vertices= qh_settemp(qh num_vertices);
    -    FORALLvertices {
    -      vertex->visitid= qh vertex_visit;
    -      qh_setappend(&vertices, vertex);
    -    }
    -  }else {
    -    vertices= qh_settemp(qh TEMPsize);
    -    FORALLfacet_(facetlist) {
    -      if (!allfacets && qh_skipfacet(facet))
    -        continue;
    -      FOREACHvertex_(facet->vertices) {
    -        if (vertex->visitid != qh vertex_visit) {
    -          vertex->visitid= qh vertex_visit;
    -          qh_setappend(&vertices, vertex);
    -        }
    -      }
    -    }
    -  }
    -  FOREACHfacet_(facets) {
    -    if (!allfacets && qh_skipfacet(facet))
    -      continue;
    -    FOREACHvertex_(facet->vertices) {
    -      if (vertex->visitid != qh vertex_visit) {
    -        vertex->visitid= qh vertex_visit;
    -        qh_setappend(&vertices, vertex);
    -      }
    -    }
    -  }
    -  return vertices;
    -} /* facetvertices */
    -
    -/*---------------------------------
    -
    -  qh_geomplanes( facet, outerplane, innerplane )
    -    return outer and inner planes for Geomview
    -    qh.PRINTradius is size of vertices and points (includes qh.JOGGLEmax)
    -
    -  notes:
    -    assume precise calculations in io.c with roundoff covered by qh_GEOMepsilon
    -*/
    -void qh_geomplanes(facetT *facet, realT *outerplane, realT *innerplane) {
    -  realT radius;
    -
    -  if (qh MERGING || qh JOGGLEmax < REALmax/2) {
    -    qh_outerinner(facet, outerplane, innerplane);
    -    radius= qh PRINTradius;
    -    if (qh JOGGLEmax < REALmax/2)
    -      radius -= qh JOGGLEmax * sqrt((realT)qh hull_dim);  /* already accounted for in qh_outerinner() */
    -    *outerplane += radius;
    -    *innerplane -= radius;
    -    if (qh PRINTcoplanar || qh PRINTspheres) {
    -      *outerplane += qh MAXabs_coord * qh_GEOMepsilon;
    -      *innerplane -= qh MAXabs_coord * qh_GEOMepsilon;
    -    }
    -  }else
    -    *innerplane= *outerplane= 0;
    -} /* geomplanes */
    -
    -
    -/*---------------------------------
    -
    -  qh_markkeep( facetlist )
    -    mark good facets that meet qh.KEEParea, qh.KEEPmerge, and qh.KEEPminArea
    -    ignores visible facets (!part of convex hull)
    -
    -  returns:
    -    may clear facet->good
    -    recomputes qh.num_good
    -
    -  design:
    -    get set of good facets
    -    if qh.KEEParea
    -      sort facets by area
    -      clear facet->good for all but n largest facets
    -    if qh.KEEPmerge
    -      sort facets by merge count
    -      clear facet->good for all but n most merged facets
    -    if qh.KEEPminarea
    -      clear facet->good if area too small
    -    update qh.num_good
    -*/
    -void qh_markkeep(facetT *facetlist) {
    -  facetT *facet, **facetp;
    -  setT *facets= qh_settemp(qh num_facets);
    -  int size, count;
    -
    -  trace2((qh ferr, 2006, "qh_markkeep: only keep %d largest and/or %d most merged facets and/or min area %.2g\n",
    -          qh KEEParea, qh KEEPmerge, qh KEEPminArea));
    -  FORALLfacet_(facetlist) {
    -    if (!facet->visible && facet->good)
    -      qh_setappend(&facets, facet);
    -  }
    -  size= qh_setsize(facets);
    -  if (qh KEEParea) {
    -    qsort(SETaddr_(facets, facetT), (size_t)size,
    -             sizeof(facetT *), qh_compare_facetarea);
    -    if ((count= size - qh KEEParea) > 0) {
    -      FOREACHfacet_(facets) {
    -        facet->good= False;
    -        if (--count == 0)
    -          break;
    -      }
    -    }
    -  }
    -  if (qh KEEPmerge) {
    -    qsort(SETaddr_(facets, facetT), (size_t)size,
    -             sizeof(facetT *), qh_compare_facetmerge);
    -    if ((count= size - qh KEEPmerge) > 0) {
    -      FOREACHfacet_(facets) {
    -        facet->good= False;
    -        if (--count == 0)
    -          break;
    -      }
    -    }
    -  }
    -  if (qh KEEPminArea < REALmax/2) {
    -    FOREACHfacet_(facets) {
    -      if (!facet->isarea || facet->f.area < qh KEEPminArea)
    -        facet->good= False;
    -    }
    -  }
    -  qh_settempfree(&facets);
    -  count= 0;
    -  FORALLfacet_(facetlist) {
    -    if (facet->good)
    -      count++;
    -  }
    -  qh num_good= count;
    -} /* markkeep */
    -
    -
    -/*---------------------------------
    -
    -  qh_markvoronoi( facetlist, facets, printall, isLower, numcenters )
    -    mark voronoi vertices for printing by site pairs
    -
    -  returns:
    -    temporary set of vertices indexed by pointid
    -    isLower set if printing lower hull (i.e., at least one facet is lower hull)
    -    numcenters= total number of Voronoi vertices
    -    bumps qh.printoutnum for vertex-at-infinity
    -    clears all facet->seen and sets facet->seen2
    -
    -    if selected
    -      facet->visitid= Voronoi vertex id
    -    else if upper hull (or 'Qu' and lower hull)
    -      facet->visitid= 0
    -    else
    -      facet->visitid >= qh num_facets
    -
    -  notes:
    -    ignores qh.ATinfinity, if defined
    -*/
    -setT *qh_markvoronoi(facetT *facetlist, setT *facets, boolT printall, boolT *isLowerp, int *numcentersp) {
    -  int numcenters=0;
    -  facetT *facet, **facetp;
    -  setT *vertices;
    -  boolT isLower= False;
    -
    -  qh printoutnum++;
    -  qh_clearcenters(qh_ASvoronoi);  /* in case, qh_printvdiagram2 called by user */
    -  qh_vertexneighbors();
    -  vertices= qh_pointvertex();
    -  if (qh ATinfinity)
    -    SETelem_(vertices, qh num_points-1)= NULL;
    -  qh visit_id++;
    -  maximize_(qh visit_id, (unsigned) qh num_facets);
    -  FORALLfacet_(facetlist) {
    -    if (printall || !qh_skipfacet(facet)) {
    -      if (!facet->upperdelaunay) {
    -        isLower= True;
    -        break;
    -      }
    -    }
    -  }
    -  FOREACHfacet_(facets) {
    -    if (printall || !qh_skipfacet(facet)) {
    -      if (!facet->upperdelaunay) {
    -        isLower= True;
    -        break;
    -      }
    -    }
    -  }
    -  FORALLfacets {
    -    if (facet->normal && (facet->upperdelaunay == isLower))
    -      facet->visitid= 0;  /* facetlist or facets may overwrite */
    -    else
    -      facet->visitid= qh visit_id;
    -    facet->seen= False;
    -    facet->seen2= True;
    -  }
    -  numcenters++;  /* qh_INFINITE */
    -  FORALLfacet_(facetlist) {
    -    if (printall || !qh_skipfacet(facet))
    -      facet->visitid= numcenters++;
    -  }
    -  FOREACHfacet_(facets) {
    -    if (printall || !qh_skipfacet(facet))
    -      facet->visitid= numcenters++;
    -  }
    -  *isLowerp= isLower;
    -  *numcentersp= numcenters;
    -  trace2((qh ferr, 2007, "qh_markvoronoi: isLower %d numcenters %d\n", isLower, numcenters));
    -  return vertices;
    -} /* markvoronoi */
    -
    -/*---------------------------------
    -
    -  qh_order_vertexneighbors( vertex )
    -    order facet neighbors of a 2-d or 3-d vertex by adjacency
    -
    -  notes:
    -    does not orient the neighbors
    -
    -  design:
    -    initialize a new neighbor set with the first facet in vertex->neighbors
    -    while vertex->neighbors non-empty
    -      select next neighbor in the previous facet's neighbor set
    -    set vertex->neighbors to the new neighbor set
    -*/
    -void qh_order_vertexneighbors(vertexT *vertex) {
    -  setT *newset;
    -  facetT *facet, *neighbor, **neighborp;
    -
    -  trace4((qh ferr, 4018, "qh_order_vertexneighbors: order neighbors of v%d for 3-d\n", vertex->id));
    -  newset= qh_settemp(qh_setsize(vertex->neighbors));
    -  facet= (facetT*)qh_setdellast(vertex->neighbors);
    -  qh_setappend(&newset, facet);
    -  while (qh_setsize(vertex->neighbors)) {
    -    FOREACHneighbor_(vertex) {
    -      if (qh_setin(facet->neighbors, neighbor)) {
    -        qh_setdel(vertex->neighbors, neighbor);
    -        qh_setappend(&newset, neighbor);
    -        facet= neighbor;
    -        break;
    -      }
    -    }
    -    if (!neighbor) {
    -      qh_fprintf(qh ferr, 6066, "qhull internal error (qh_order_vertexneighbors): no neighbor of v%d for f%d\n",
    -        vertex->id, facet->id);
    -      qh_errexit(qh_ERRqhull, facet, NULL);
    -    }
    -  }
    -  qh_setfree(&vertex->neighbors);
    -  qh_settemppop();
    -  vertex->neighbors= newset;
    -} /* order_vertexneighbors */
    -
    -/*---------------------------------
    -
    -  qh_prepare_output( )
    -    prepare for qh_produce_output2() according to
    -      qh.KEEPminArea, KEEParea, KEEPmerge, GOODvertex, GOODthreshold, GOODpoint, ONLYgood, SPLITthresholds
    -    does not reset facet->good
    -
    -  notes
    -    except for PRINTstatistics, no-op if previously called with same options
    -*/
    -void qh_prepare_output(void) {
    -  if (qh VORONOI) {
    -    qh_clearcenters (qh_ASvoronoi);
    -    qh_vertexneighbors();
    -  }
    -  if (qh TRIangulate && !qh hasTriangulation) {
    -    qh_triangulate();
    -    if (qh VERIFYoutput && !qh CHECKfrequently)
    -      qh_checkpolygon (qh facet_list);
    -  }
    -  qh_findgood_all (qh facet_list);
    -  if (qh GETarea)
    -    qh_getarea(qh facet_list);
    -  if (qh KEEParea || qh KEEPmerge || qh KEEPminArea < REALmax/2)
    -    qh_markkeep (qh facet_list);
    -  if (qh PRINTstatistics)
    -    qh_collectstatistics();
    -}
    -
    -/*---------------------------------
    -
    -  qh_printafacet( fp, format, facet, printall )
    -    print facet to fp in given output format (see qh.PRINTout)
    -
    -  returns:
    -    nop if !printall and qh_skipfacet()
    -    nop if visible facet and NEWfacets and format != PRINTfacets
    -    must match qh_countfacets
    -
    -  notes
    -    preserves qh.visit_id
    -    facet->normal may be null if PREmerge/MERGEexact and STOPcone before merge
    -
    -  see
    -    qh_printbegin() and qh_printend()
    -
    -  design:
    -    test for printing facet
    -    call appropriate routine for format
    -    or output results directly
    -*/
    -void qh_printafacet(FILE *fp, qh_PRINT format, facetT *facet, boolT printall) {
    -  realT color[4], offset, dist, outerplane, innerplane;
    -  boolT zerodiv;
    -  coordT *point, *normp, *coordp, **pointp, *feasiblep;
    -  int k;
    -  vertexT *vertex, **vertexp;
    -  facetT *neighbor, **neighborp;
    -
    -  if (!printall && qh_skipfacet(facet))
    -    return;
    -  if (facet->visible && qh NEWfacets && format != qh_PRINTfacets)
    -    return;
    -  qh printoutnum++;
    -  switch (format) {
    -  case qh_PRINTarea:
    -    if (facet->isarea) {
    -      qh_fprintf(fp, 9009, qh_REAL_1, facet->f.area);
    -      qh_fprintf(fp, 9010, "\n");
    -    }else
    -      qh_fprintf(fp, 9011, "0\n");
    -    break;
    -  case qh_PRINTcoplanars:
    -    qh_fprintf(fp, 9012, "%d", qh_setsize(facet->coplanarset));
    -    FOREACHpoint_(facet->coplanarset)
    -      qh_fprintf(fp, 9013, " %d", qh_pointid(point));
    -    qh_fprintf(fp, 9014, "\n");
    -    break;
    -  case qh_PRINTcentrums:
    -    qh_printcenter(fp, format, NULL, facet);
    -    break;
    -  case qh_PRINTfacets:
    -    qh_printfacet(fp, facet);
    -    break;
    -  case qh_PRINTfacets_xridge:
    -    qh_printfacetheader(fp, facet);
    -    break;
    -  case qh_PRINTgeom:  /* either 2 , 3, or 4-d by qh_printbegin */
    -    if (!facet->normal)
    -      break;
    -    for (k=qh hull_dim; k--; ) {
    -      color[k]= (facet->normal[k]+1.0)/2.0;
    -      maximize_(color[k], -1.0);
    -      minimize_(color[k], +1.0);
    -    }
    -    qh_projectdim3 (color, color);
    -    if (qh PRINTdim != qh hull_dim)
    -      qh_normalize2 (color, 3, True, NULL, NULL);
    -    if (qh hull_dim <= 2)
    -      qh_printfacet2geom(fp, facet, color);
    -    else if (qh hull_dim == 3) {
    -      if (facet->simplicial)
    -        qh_printfacet3geom_simplicial(fp, facet, color);
    -      else
    -        qh_printfacet3geom_nonsimplicial(fp, facet, color);
    -    }else {
    -      if (facet->simplicial)
    -        qh_printfacet4geom_simplicial(fp, facet, color);
    -      else
    -        qh_printfacet4geom_nonsimplicial(fp, facet, color);
    -    }
    -    break;
    -  case qh_PRINTids:
    -    qh_fprintf(fp, 9015, "%d\n", facet->id);
    -    break;
    -  case qh_PRINTincidences:
    -  case qh_PRINToff:
    -  case qh_PRINTtriangles:
    -    if (qh hull_dim == 3 && format != qh_PRINTtriangles)
    -      qh_printfacet3vertex(fp, facet, format);
    -    else if (facet->simplicial || qh hull_dim == 2 || format == qh_PRINToff)
    -      qh_printfacetNvertex_simplicial(fp, facet, format);
    -    else
    -      qh_printfacetNvertex_nonsimplicial(fp, facet, qh printoutvar++, format);
    -    break;
    -  case qh_PRINTinner:
    -    qh_outerinner(facet, NULL, &innerplane);
    -    offset= facet->offset - innerplane;
    -    goto LABELprintnorm;
    -    break; /* prevent warning */
    -  case qh_PRINTmerges:
    -    qh_fprintf(fp, 9016, "%d\n", facet->nummerge);
    -    break;
    -  case qh_PRINTnormals:
    -    offset= facet->offset;
    -    goto LABELprintnorm;
    -    break; /* prevent warning */
    -  case qh_PRINTouter:
    -    qh_outerinner(facet, &outerplane, NULL);
    -    offset= facet->offset - outerplane;
    -  LABELprintnorm:
    -    if (!facet->normal) {
    -      qh_fprintf(fp, 9017, "no normal for facet f%d\n", facet->id);
    -      break;
    -    }
    -    if (qh CDDoutput) {
    -      qh_fprintf(fp, 9018, qh_REAL_1, -offset);
    -      for (k=0; k < qh hull_dim; k++)
    -        qh_fprintf(fp, 9019, qh_REAL_1, -facet->normal[k]);
    -    }else {
    -      for (k=0; k < qh hull_dim; k++)
    -        qh_fprintf(fp, 9020, qh_REAL_1, facet->normal[k]);
    -      qh_fprintf(fp, 9021, qh_REAL_1, offset);
    -    }
    -    qh_fprintf(fp, 9022, "\n");
    -    break;
    -  case qh_PRINTmathematica:  /* either 2 or 3-d by qh_printbegin */
    -  case qh_PRINTmaple:
    -    if (qh hull_dim == 2)
    -      qh_printfacet2math(fp, facet, format, qh printoutvar++);
    -    else
    -      qh_printfacet3math(fp, facet, format, qh printoutvar++);
    -    break;
    -  case qh_PRINTneighbors:
    -    qh_fprintf(fp, 9023, "%d", qh_setsize(facet->neighbors));
    -    FOREACHneighbor_(facet)
    -      qh_fprintf(fp, 9024, " %d",
    -               neighbor->visitid ? neighbor->visitid - 1: 0 - neighbor->id);
    -    qh_fprintf(fp, 9025, "\n");
    -    break;
    -  case qh_PRINTpointintersect:
    -    if (!qh feasible_point) {
    -      qh_fprintf(qh ferr, 6067, "qhull input error (qh_printafacet): option 'Fp' needs qh feasible_point\n");
    -      qh_errexit( qh_ERRinput, NULL, NULL);
    -    }
    -    if (facet->offset > 0)
    -      goto LABELprintinfinite;
    -    point= coordp= (coordT*)qh_memalloc(qh normal_size);
    -    normp= facet->normal;
    -    feasiblep= qh feasible_point;
    -    if (facet->offset < -qh MINdenom) {
    -      for (k=qh hull_dim; k--; )
    -        *(coordp++)= (*(normp++) / - facet->offset) + *(feasiblep++);
    -    }else {
    -      for (k=qh hull_dim; k--; ) {
    -        *(coordp++)= qh_divzero(*(normp++), facet->offset, qh MINdenom_1,
    -                                 &zerodiv) + *(feasiblep++);
    -        if (zerodiv) {
    -          qh_memfree(point, qh normal_size);
    -          goto LABELprintinfinite;
    -        }
    -      }
    -    }
    -    qh_printpoint(fp, NULL, point);
    -    qh_memfree(point, qh normal_size);
    -    break;
    -  LABELprintinfinite:
    -    for (k=qh hull_dim; k--; )
    -      qh_fprintf(fp, 9026, qh_REAL_1, qh_INFINITE);
    -    qh_fprintf(fp, 9027, "\n");
    -    break;
    -  case qh_PRINTpointnearest:
    -    FOREACHpoint_(facet->coplanarset) {
    -      int id, id2;
    -      vertex= qh_nearvertex(facet, point, &dist);
    -      id= qh_pointid(vertex->point);
    -      id2= qh_pointid(point);
    -      qh_fprintf(fp, 9028, "%d %d %d " qh_REAL_1 "\n", id, id2, facet->id, dist);
    -    }
    -    break;
    -  case qh_PRINTpoints:  /* VORONOI only by qh_printbegin */
    -    if (qh CDDoutput)
    -      qh_fprintf(fp, 9029, "1 ");
    -    qh_printcenter(fp, format, NULL, facet);
    -    break;
    -  case qh_PRINTvertices:
    -    qh_fprintf(fp, 9030, "%d", qh_setsize(facet->vertices));
    -    FOREACHvertex_(facet->vertices)
    -      qh_fprintf(fp, 9031, " %d", qh_pointid(vertex->point));
    -    qh_fprintf(fp, 9032, "\n");
    -    break;
    -  default:
    -    break;
    -  }
    -} /* printafacet */
    -
    -/*---------------------------------
    -
    -  qh_printbegin(  )
    -    prints header for all output formats
    -
    -  returns:
    -    checks for valid format
    -
    -  notes:
    -    uses qh.visit_id for 3/4off
    -    changes qh.interior_point if printing centrums
    -    qh_countfacets clears facet->visitid for non-good facets
    -
    -  see
    -    qh_printend() and qh_printafacet()
    -
    -  design:
    -    count facets and related statistics
    -    print header for format
    -*/
    -void qh_printbegin(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall) {
    -  int numfacets, numsimplicial, numridges, totneighbors, numcoplanars, numtricoplanars;
    -  int i, num;
    -  facetT *facet, **facetp;
    -  vertexT *vertex, **vertexp;
    -  setT *vertices;
    -  pointT *point, **pointp, *pointtemp;
    -
    -  qh printoutnum= 0;
    -  qh_countfacets(facetlist, facets, printall, &numfacets, &numsimplicial,
    -      &totneighbors, &numridges, &numcoplanars, &numtricoplanars);
    -  switch (format) {
    -  case qh_PRINTnone:
    -    break;
    -  case qh_PRINTarea:
    -    qh_fprintf(fp, 9033, "%d\n", numfacets);
    -    break;
    -  case qh_PRINTcoplanars:
    -    qh_fprintf(fp, 9034, "%d\n", numfacets);
    -    break;
    -  case qh_PRINTcentrums:
    -    if (qh CENTERtype == qh_ASnone)
    -      qh_clearcenters(qh_AScentrum);
    -    qh_fprintf(fp, 9035, "%d\n%d\n", qh hull_dim, numfacets);
    -    break;
    -  case qh_PRINTfacets:
    -  case qh_PRINTfacets_xridge:
    -    if (facetlist)
    -      qh_printvertexlist(fp, "Vertices and facets:\n", facetlist, facets, printall);
    -    break;
    -  case qh_PRINTgeom:
    -    if (qh hull_dim > 4)  /* qh_initqhull_globals also checks */
    -      goto LABELnoformat;
    -    if (qh VORONOI && qh hull_dim > 3)  /* PRINTdim == DROPdim == hull_dim-1 */
    -      goto LABELnoformat;
    -    if (qh hull_dim == 2 && (qh PRINTridges || qh DOintersections))
    -      qh_fprintf(qh ferr, 7049, "qhull warning: output for ridges and intersections not implemented in 2-d\n");
    -    if (qh hull_dim == 4 && (qh PRINTinner || qh PRINTouter ||
    -                             (qh PRINTdim == 4 && qh PRINTcentrums)))
    -      qh_fprintf(qh ferr, 7050, "qhull warning: output for outer/inner planes and centrums not implemented in 4-d\n");
    -    if (qh PRINTdim == 4 && (qh PRINTspheres))
    -      qh_fprintf(qh ferr, 7051, "qhull warning: output for vertices not implemented in 4-d\n");
    -    if (qh PRINTdim == 4 && qh DOintersections && qh PRINTnoplanes)
    -      qh_fprintf(qh ferr, 7052, "qhull warning: 'Gnh' generates no output in 4-d\n");
    -    if (qh PRINTdim == 2) {
    -      qh_fprintf(fp, 9036, "{appearance {linewidth 3} LIST # %s | %s\n",
    -              qh rbox_command, qh qhull_command);
    -    }else if (qh PRINTdim == 3) {
    -      qh_fprintf(fp, 9037, "{appearance {+edge -evert linewidth 2} LIST # %s | %s\n",
    -              qh rbox_command, qh qhull_command);
    -    }else if (qh PRINTdim == 4) {
    -      qh visit_id++;
    -      num= 0;
    -      FORALLfacet_(facetlist)    /* get number of ridges to be printed */
    -        qh_printend4geom(NULL, facet, &num, printall);
    -      FOREACHfacet_(facets)
    -        qh_printend4geom(NULL, facet, &num, printall);
    -      qh ridgeoutnum= num;
    -      qh printoutvar= 0;  /* counts number of ridges in output */
    -      qh_fprintf(fp, 9038, "LIST # %s | %s\n", qh rbox_command, qh qhull_command);
    -    }
    -
    -    if (qh PRINTdots) {
    -      qh printoutnum++;
    -      num= qh num_points + qh_setsize(qh other_points);
    -      if (qh DELAUNAY && qh ATinfinity)
    -        num--;
    -      if (qh PRINTdim == 4)
    -        qh_fprintf(fp, 9039, "4VECT %d %d 1\n", num, num);
    -      else
    -        qh_fprintf(fp, 9040, "VECT %d %d 1\n", num, num);
    -
    -      for (i=num; i--; ) {
    -        if (i % 20 == 0)
    -          qh_fprintf(fp, 9041, "\n");
    -        qh_fprintf(fp, 9042, "1 ");
    -      }
    -      qh_fprintf(fp, 9043, "# 1 point per line\n1 ");
    -      for (i=num-1; i--; ) { /* num at least 3 for D2 */
    -        if (i % 20 == 0)
    -          qh_fprintf(fp, 9044, "\n");
    -        qh_fprintf(fp, 9045, "0 ");
    -      }
    -      qh_fprintf(fp, 9046, "# 1 color for all\n");
    -      FORALLpoints {
    -        if (!qh DELAUNAY || !qh ATinfinity || qh_pointid(point) != qh num_points-1) {
    -          if (qh PRINTdim == 4)
    -            qh_printpoint(fp, NULL, point);
    -            else
    -              qh_printpoint3 (fp, point);
    -        }
    -      }
    -      FOREACHpoint_(qh other_points) {
    -        if (qh PRINTdim == 4)
    -          qh_printpoint(fp, NULL, point);
    -        else
    -          qh_printpoint3 (fp, point);
    -      }
    -      qh_fprintf(fp, 9047, "0 1 1 1  # color of points\n");
    -    }
    -
    -    if (qh PRINTdim == 4  && !qh PRINTnoplanes)
    -      /* 4dview loads up multiple 4OFF objects slowly */
    -      qh_fprintf(fp, 9048, "4OFF %d %d 1\n", 3*qh ridgeoutnum, qh ridgeoutnum);
    -    qh PRINTcradius= 2 * qh DISTround;  /* include test DISTround */
    -    if (qh PREmerge) {
    -      maximize_(qh PRINTcradius, qh premerge_centrum + qh DISTround);
    -    }else if (qh POSTmerge)
    -      maximize_(qh PRINTcradius, qh postmerge_centrum + qh DISTround);
    -    qh PRINTradius= qh PRINTcradius;
    -    if (qh PRINTspheres + qh PRINTcoplanar)
    -      maximize_(qh PRINTradius, qh MAXabs_coord * qh_MINradius);
    -    if (qh premerge_cos < REALmax/2) {
    -      maximize_(qh PRINTradius, (1- qh premerge_cos) * qh MAXabs_coord);
    -    }else if (!qh PREmerge && qh POSTmerge && qh postmerge_cos < REALmax/2) {
    -      maximize_(qh PRINTradius, (1- qh postmerge_cos) * qh MAXabs_coord);
    -    }
    -    maximize_(qh PRINTradius, qh MINvisible);
    -    if (qh JOGGLEmax < REALmax/2)
    -      qh PRINTradius += qh JOGGLEmax * sqrt((realT)qh hull_dim);
    -    if (qh PRINTdim != 4 &&
    -        (qh PRINTcoplanar || qh PRINTspheres || qh PRINTcentrums)) {
    -      vertices= qh_facetvertices(facetlist, facets, printall);
    -      if (qh PRINTspheres && qh PRINTdim <= 3)
    -        qh_printspheres(fp, vertices, qh PRINTradius);
    -      if (qh PRINTcoplanar || qh PRINTcentrums) {
    -        qh firstcentrum= True;
    -        if (qh PRINTcoplanar&& !qh PRINTspheres) {
    -          FOREACHvertex_(vertices)
    -            qh_printpointvect2 (fp, vertex->point, NULL, qh interior_point, qh PRINTradius);
    -        }
    -        FORALLfacet_(facetlist) {
    -          if (!printall && qh_skipfacet(facet))
    -            continue;
    -          if (!facet->normal)
    -            continue;
    -          if (qh PRINTcentrums && qh PRINTdim <= 3)
    -            qh_printcentrum(fp, facet, qh PRINTcradius);
    -          if (!qh PRINTcoplanar)
    -            continue;
    -          FOREACHpoint_(facet->coplanarset)
    -            qh_printpointvect2 (fp, point, facet->normal, NULL, qh PRINTradius);
    -          FOREACHpoint_(facet->outsideset)
    -            qh_printpointvect2 (fp, point, facet->normal, NULL, qh PRINTradius);
    -        }
    -        FOREACHfacet_(facets) {
    -          if (!printall && qh_skipfacet(facet))
    -            continue;
    -          if (!facet->normal)
    -            continue;
    -          if (qh PRINTcentrums && qh PRINTdim <= 3)
    -            qh_printcentrum(fp, facet, qh PRINTcradius);
    -          if (!qh PRINTcoplanar)
    -            continue;
    -          FOREACHpoint_(facet->coplanarset)
    -            qh_printpointvect2 (fp, point, facet->normal, NULL, qh PRINTradius);
    -          FOREACHpoint_(facet->outsideset)
    -            qh_printpointvect2 (fp, point, facet->normal, NULL, qh PRINTradius);
    -        }
    -      }
    -      qh_settempfree(&vertices);
    -    }
    -    qh visit_id++; /* for printing hyperplane intersections */
    -    break;
    -  case qh_PRINTids:
    -    qh_fprintf(fp, 9049, "%d\n", numfacets);
    -    break;
    -  case qh_PRINTincidences:
    -    if (qh VORONOI && qh PRINTprecision)
    -      qh_fprintf(qh ferr, 7053, "qhull warning: writing Delaunay.  Use 'p' or 'o' for Voronoi centers\n");
    -    qh printoutvar= qh vertex_id;  /* centrum id for non-simplicial facets */
    -    if (qh hull_dim <= 3)
    -      qh_fprintf(fp, 9050, "%d\n", numfacets);
    -    else
    -      qh_fprintf(fp, 9051, "%d\n", numsimplicial+numridges);
    -    break;
    -  case qh_PRINTinner:
    -  case qh_PRINTnormals:
    -  case qh_PRINTouter:
    -    if (qh CDDoutput)
    -      qh_fprintf(fp, 9052, "%s | %s\nbegin\n    %d %d real\n", qh rbox_command,
    -            qh qhull_command, numfacets, qh hull_dim+1);
    -    else
    -      qh_fprintf(fp, 9053, "%d\n%d\n", qh hull_dim+1, numfacets);
    -    break;
    -  case qh_PRINTmathematica:
    -  case qh_PRINTmaple:
    -    if (qh hull_dim > 3)  /* qh_initbuffers also checks */
    -      goto LABELnoformat;
    -    if (qh VORONOI)
    -      qh_fprintf(qh ferr, 7054, "qhull warning: output is the Delaunay triangulation\n");
    -    if (format == qh_PRINTmaple) {
    -      if (qh hull_dim == 2)
    -        qh_fprintf(fp, 9054, "PLOT(CURVES(\n");
    -      else
    -        qh_fprintf(fp, 9055, "PLOT3D(POLYGONS(\n");
    -    }else
    -      qh_fprintf(fp, 9056, "{\n");
    -    qh printoutvar= 0;   /* counts number of facets for notfirst */
    -    break;
    -  case qh_PRINTmerges:
    -    qh_fprintf(fp, 9057, "%d\n", numfacets);
    -    break;
    -  case qh_PRINTpointintersect:
    -    qh_fprintf(fp, 9058, "%d\n%d\n", qh hull_dim, numfacets);
    -    break;
    -  case qh_PRINTneighbors:
    -    qh_fprintf(fp, 9059, "%d\n", numfacets);
    -    break;
    -  case qh_PRINToff:
    -  case qh_PRINTtriangles:
    -    if (qh VORONOI)
    -      goto LABELnoformat;
    -    num = qh hull_dim;
    -    if (format == qh_PRINToff || qh hull_dim == 2)
    -      qh_fprintf(fp, 9060, "%d\n%d %d %d\n", num,
    -        qh num_points+qh_setsize(qh other_points), numfacets, totneighbors/2);
    -    else { /* qh_PRINTtriangles */
    -      qh printoutvar= qh num_points+qh_setsize(qh other_points); /* first centrum */
    -      if (qh DELAUNAY)
    -        num--;  /* drop last dimension */
    -      qh_fprintf(fp, 9061, "%d\n%d %d %d\n", num, qh printoutvar
    -        + numfacets - numsimplicial, numsimplicial + numridges, totneighbors/2);
    -    }
    -    FORALLpoints
    -      qh_printpointid(qh fout, NULL, num, point, -1);
    -    FOREACHpoint_(qh other_points)
    -      qh_printpointid(qh fout, NULL, num, point, -1);
    -    if (format == qh_PRINTtriangles && qh hull_dim > 2) {
    -      FORALLfacets {
    -        if (!facet->simplicial && facet->visitid)
    -          qh_printcenter(qh fout, format, NULL, facet);
    -      }
    -    }
    -    break;
    -  case qh_PRINTpointnearest:
    -    qh_fprintf(fp, 9062, "%d\n", numcoplanars);
    -    break;
    -  case qh_PRINTpoints:
    -    if (!qh VORONOI)
    -      goto LABELnoformat;
    -    if (qh CDDoutput)
    -      qh_fprintf(fp, 9063, "%s | %s\nbegin\n%d %d real\n", qh rbox_command,
    -           qh qhull_command, numfacets, qh hull_dim);
    -    else
    -      qh_fprintf(fp, 9064, "%d\n%d\n", qh hull_dim-1, numfacets);
    -    break;
    -  case qh_PRINTvertices:
    -    qh_fprintf(fp, 9065, "%d\n", numfacets);
    -    break;
    -  case qh_PRINTsummary:
    -  default:
    -  LABELnoformat:
    -    qh_fprintf(qh ferr, 6068, "qhull internal error (qh_printbegin): can not use this format for dimension %d\n",
    -         qh hull_dim);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -} /* printbegin */
    -
    -/*---------------------------------
    -
    -  qh_printcenter( fp, string, facet )
    -    print facet->center as centrum or Voronoi center
    -    string may be NULL.  Don't include '%' codes.
    -    nop if qh CENTERtype neither CENTERvoronoi nor CENTERcentrum
    -    if upper envelope of Delaunay triangulation and point at-infinity
    -      prints qh_INFINITE instead;
    -
    -  notes:
    -    defines facet->center if needed
    -    if format=PRINTgeom, adds a 0 if would otherwise be 2-d
    -    Same as QhullFacet::printCenter
    -*/
    -void qh_printcenter(FILE *fp, qh_PRINT format, const char *string, facetT *facet) {
    -  int k, num;
    -
    -  if (qh CENTERtype != qh_ASvoronoi && qh CENTERtype != qh_AScentrum)
    -    return;
    -  if (string)
    -    qh_fprintf(fp, 9066, string);
    -  if (qh CENTERtype == qh_ASvoronoi) {
    -    num= qh hull_dim-1;
    -    if (!facet->normal || !facet->upperdelaunay || !qh ATinfinity) {
    -      if (!facet->center)
    -        facet->center= qh_facetcenter(facet->vertices);
    -      for (k=0; k < num; k++)
    -        qh_fprintf(fp, 9067, qh_REAL_1, facet->center[k]);
    -    }else {
    -      for (k=0; k < num; k++)
    -        qh_fprintf(fp, 9068, qh_REAL_1, qh_INFINITE);
    -    }
    -  }else /* qh CENTERtype == qh_AScentrum */ {
    -    num= qh hull_dim;
    -    if (format == qh_PRINTtriangles && qh DELAUNAY)
    -      num--;
    -    if (!facet->center)
    -      facet->center= qh_getcentrum(facet);
    -    for (k=0; k < num; k++)
    -      qh_fprintf(fp, 9069, qh_REAL_1, facet->center[k]);
    -  }
    -  if (format == qh_PRINTgeom && num == 2)
    -    qh_fprintf(fp, 9070, " 0\n");
    -  else
    -    qh_fprintf(fp, 9071, "\n");
    -} /* printcenter */
    -
    -/*---------------------------------
    -
    -  qh_printcentrum( fp, facet, radius )
    -    print centrum for a facet in OOGL format
    -    radius defines size of centrum
    -    2-d or 3-d only
    -
    -  returns:
    -    defines facet->center if needed
    -*/
    -void qh_printcentrum(FILE *fp, facetT *facet, realT radius) {
    -  pointT *centrum, *projpt;
    -  boolT tempcentrum= False;
    -  realT xaxis[4], yaxis[4], normal[4], dist;
    -  realT green[3]={0, 1, 0};
    -  vertexT *apex;
    -  int k;
    -
    -  if (qh CENTERtype == qh_AScentrum) {
    -    if (!facet->center)
    -      facet->center= qh_getcentrum(facet);
    -    centrum= facet->center;
    -  }else {
    -    centrum= qh_getcentrum(facet);
    -    tempcentrum= True;
    -  }
    -  qh_fprintf(fp, 9072, "{appearance {-normal -edge normscale 0} ");
    -  if (qh firstcentrum) {
    -    qh firstcentrum= False;
    -    qh_fprintf(fp, 9073, "{INST geom { define centrum CQUAD  # f%d\n\
    --0.3 -0.3 0.0001     0 0 1 1\n\
    - 0.3 -0.3 0.0001     0 0 1 1\n\
    - 0.3  0.3 0.0001     0 0 1 1\n\
    --0.3  0.3 0.0001     0 0 1 1 } transform { \n", facet->id);
    -  }else
    -    qh_fprintf(fp, 9074, "{INST geom { : centrum } transform { # f%d\n", facet->id);
    -  apex= SETfirstt_(facet->vertices, vertexT);
    -  qh_distplane(apex->point, facet, &dist);
    -  projpt= qh_projectpoint(apex->point, facet, dist);
    -  for (k=qh hull_dim; k--; ) {
    -    xaxis[k]= projpt[k] - centrum[k];
    -    normal[k]= facet->normal[k];
    -  }
    -  if (qh hull_dim == 2) {
    -    xaxis[2]= 0;
    -    normal[2]= 0;
    -  }else if (qh hull_dim == 4) {
    -    qh_projectdim3 (xaxis, xaxis);
    -    qh_projectdim3 (normal, normal);
    -    qh_normalize2 (normal, qh PRINTdim, True, NULL, NULL);
    -  }
    -  qh_crossproduct(3, xaxis, normal, yaxis);
    -  qh_fprintf(fp, 9075, "%8.4g %8.4g %8.4g 0\n", xaxis[0], xaxis[1], xaxis[2]);
    -  qh_fprintf(fp, 9076, "%8.4g %8.4g %8.4g 0\n", yaxis[0], yaxis[1], yaxis[2]);
    -  qh_fprintf(fp, 9077, "%8.4g %8.4g %8.4g 0\n", normal[0], normal[1], normal[2]);
    -  qh_printpoint3 (fp, centrum);
    -  qh_fprintf(fp, 9078, "1 }}}\n");
    -  qh_memfree(projpt, qh normal_size);
    -  qh_printpointvect(fp, centrum, facet->normal, NULL, radius, green);
    -  if (tempcentrum)
    -    qh_memfree(centrum, qh normal_size);
    -} /* printcentrum */
    -
    -/*---------------------------------
    -
    -  qh_printend( fp, format )
    -    prints trailer for all output formats
    -
    -  see:
    -    qh_printbegin() and qh_printafacet()
    -
    -*/
    -void qh_printend(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall) {
    -  int num;
    -  facetT *facet, **facetp;
    -
    -  if (!qh printoutnum)
    -    qh_fprintf(qh ferr, 7055, "qhull warning: no facets printed\n");
    -  switch (format) {
    -  case qh_PRINTgeom:
    -    if (qh hull_dim == 4 && qh DROPdim < 0  && !qh PRINTnoplanes) {
    -      qh visit_id++;
    -      num= 0;
    -      FORALLfacet_(facetlist)
    -        qh_printend4geom(fp, facet,&num, printall);
    -      FOREACHfacet_(facets)
    -        qh_printend4geom(fp, facet, &num, printall);
    -      if (num != qh ridgeoutnum || qh printoutvar != qh ridgeoutnum) {
    -        qh_fprintf(qh ferr, 6069, "qhull internal error (qh_printend): number of ridges %d != number printed %d and at end %d\n", qh ridgeoutnum, qh printoutvar, num);
    -        qh_errexit(qh_ERRqhull, NULL, NULL);
    -      }
    -    }else
    -      qh_fprintf(fp, 9079, "}\n");
    -    break;
    -  case qh_PRINTinner:
    -  case qh_PRINTnormals:
    -  case qh_PRINTouter:
    -    if (qh CDDoutput)
    -      qh_fprintf(fp, 9080, "end\n");
    -    break;
    -  case qh_PRINTmaple:
    -    qh_fprintf(fp, 9081, "));\n");
    -    break;
    -  case qh_PRINTmathematica:
    -    qh_fprintf(fp, 9082, "}\n");
    -    break;
    -  case qh_PRINTpoints:
    -    if (qh CDDoutput)
    -      qh_fprintf(fp, 9083, "end\n");
    -    break;
    -  default:
    -    break;
    -  }
    -} /* printend */
    -
    -/*---------------------------------
    -
    -  qh_printend4geom( fp, facet, numridges, printall )
    -    helper function for qh_printbegin/printend
    -
    -  returns:
    -    number of printed ridges
    -
    -  notes:
    -    just counts printed ridges if fp=NULL
    -    uses facet->visitid
    -    must agree with qh_printfacet4geom...
    -
    -  design:
    -    computes color for facet from its normal
    -    prints each ridge of facet
    -*/
    -void qh_printend4geom(FILE *fp, facetT *facet, int *nump, boolT printall) {
    -  realT color[3];
    -  int i, num= *nump;
    -  facetT *neighbor, **neighborp;
    -  ridgeT *ridge, **ridgep;
    -
    -  if (!printall && qh_skipfacet(facet))
    -    return;
    -  if (qh PRINTnoplanes || (facet->visible && qh NEWfacets))
    -    return;
    -  if (!facet->normal)
    -    return;
    -  if (fp) {
    -    for (i=0; i < 3; i++) {
    -      color[i]= (facet->normal[i]+1.0)/2.0;
    -      maximize_(color[i], -1.0);
    -      minimize_(color[i], +1.0);
    -    }
    -  }
    -  facet->visitid= qh visit_id;
    -  if (facet->simplicial) {
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->visitid != qh visit_id) {
    -        if (fp)
    -          qh_fprintf(fp, 9084, "3 %d %d %d %8.4g %8.4g %8.4g 1 # f%d f%d\n",
    -                 3*num, 3*num+1, 3*num+2, color[0], color[1], color[2],
    -                 facet->id, neighbor->id);
    -        num++;
    -      }
    -    }
    -  }else {
    -    FOREACHridge_(facet->ridges) {
    -      neighbor= otherfacet_(ridge, facet);
    -      if (neighbor->visitid != qh visit_id) {
    -        if (fp)
    -          qh_fprintf(fp, 9085, "3 %d %d %d %8.4g %8.4g %8.4g 1 #r%d f%d f%d\n",
    -                 3*num, 3*num+1, 3*num+2, color[0], color[1], color[2],
    -                 ridge->id, facet->id, neighbor->id);
    -        num++;
    -      }
    -    }
    -  }
    -  *nump= num;
    -} /* printend4geom */
    -
    -/*---------------------------------
    -
    -  qh_printextremes( fp, facetlist, facets, printall )
    -    print extreme points for convex hulls or halfspace intersections
    -
    -  notes:
    -    #points, followed by ids, one per line
    -
    -    sorted by id
    -    same order as qh_printpoints_out if no coplanar/interior points
    -*/
    -void qh_printextremes(FILE *fp, facetT *facetlist, setT *facets, boolT printall) {
    -  setT *vertices, *points;
    -  pointT *point;
    -  vertexT *vertex, **vertexp;
    -  int id;
    -  int numpoints=0, point_i, point_n;
    -  int allpoints= qh num_points + qh_setsize(qh other_points);
    -
    -  points= qh_settemp(allpoints);
    -  qh_setzero(points, 0, allpoints);
    -  vertices= qh_facetvertices(facetlist, facets, printall);
    -  FOREACHvertex_(vertices) {
    -    id= qh_pointid(vertex->point);
    -    if (id >= 0) {
    -      SETelem_(points, id)= vertex->point;
    -      numpoints++;
    -    }
    -  }
    -  qh_settempfree(&vertices);
    -  qh_fprintf(fp, 9086, "%d\n", numpoints);
    -  FOREACHpoint_i_(points) {
    -    if (point)
    -      qh_fprintf(fp, 9087, "%d\n", point_i);
    -  }
    -  qh_settempfree(&points);
    -} /* printextremes */
    -
    -/*---------------------------------
    -
    -  qh_printextremes_2d( fp, facetlist, facets, printall )
    -    prints point ids for facets in qh_ORIENTclock order
    -
    -  notes:
    -    #points, followed by ids, one per line
    -    if facetlist/facets are disjoint than the output includes skips
    -    errors if facets form a loop
    -    does not print coplanar points
    -*/
    -void qh_printextremes_2d(FILE *fp, facetT *facetlist, setT *facets, boolT printall) {
    -  int numfacets, numridges, totneighbors, numcoplanars, numsimplicial, numtricoplanars;
    -  setT *vertices;
    -  facetT *facet, *startfacet, *nextfacet;
    -  vertexT *vertexA, *vertexB;
    -
    -  qh_countfacets(facetlist, facets, printall, &numfacets, &numsimplicial,
    -      &totneighbors, &numridges, &numcoplanars, &numtricoplanars); /* marks qh visit_id */
    -  vertices= qh_facetvertices(facetlist, facets, printall);
    -  qh_fprintf(fp, 9088, "%d\n", qh_setsize(vertices));
    -  qh_settempfree(&vertices);
    -  if (!numfacets)
    -    return;
    -  facet= startfacet= facetlist ? facetlist : SETfirstt_(facets, facetT);
    -  qh vertex_visit++;
    -  qh visit_id++;
    -  do {
    -    if (facet->toporient ^ qh_ORIENTclock) {
    -      vertexA= SETfirstt_(facet->vertices, vertexT);
    -      vertexB= SETsecondt_(facet->vertices, vertexT);
    -      nextfacet= SETfirstt_(facet->neighbors, facetT);
    -    }else {
    -      vertexA= SETsecondt_(facet->vertices, vertexT);
    -      vertexB= SETfirstt_(facet->vertices, vertexT);
    -      nextfacet= SETsecondt_(facet->neighbors, facetT);
    -    }
    -    if (facet->visitid == qh visit_id) {
    -      qh_fprintf(qh ferr, 6218, "Qhull internal error (qh_printextremes_2d): loop in facet list.  facet %d nextfacet %d\n",
    -                 facet->id, nextfacet->id);
    -      qh_errexit2 (qh_ERRqhull, facet, nextfacet);
    -    }
    -    if (facet->visitid) {
    -      if (vertexA->visitid != qh vertex_visit) {
    -        vertexA->visitid= qh vertex_visit;
    -        qh_fprintf(fp, 9089, "%d\n", qh_pointid(vertexA->point));
    -      }
    -      if (vertexB->visitid != qh vertex_visit) {
    -        vertexB->visitid= qh vertex_visit;
    -        qh_fprintf(fp, 9090, "%d\n", qh_pointid(vertexB->point));
    -      }
    -    }
    -    facet->visitid= qh visit_id;
    -    facet= nextfacet;
    -  }while (facet && facet != startfacet);
    -} /* printextremes_2d */
    -
    -/*---------------------------------
    -
    -  qh_printextremes_d( fp, facetlist, facets, printall )
    -    print extreme points of input sites for Delaunay triangulations
    -
    -  notes:
    -    #points, followed by ids, one per line
    -
    -    unordered
    -*/
    -void qh_printextremes_d(FILE *fp, facetT *facetlist, setT *facets, boolT printall) {
    -  setT *vertices;
    -  vertexT *vertex, **vertexp;
    -  boolT upperseen, lowerseen;
    -  facetT *neighbor, **neighborp;
    -  int numpoints=0;
    -
    -  vertices= qh_facetvertices(facetlist, facets, printall);
    -  qh_vertexneighbors();
    -  FOREACHvertex_(vertices) {
    -    upperseen= lowerseen= False;
    -    FOREACHneighbor_(vertex) {
    -      if (neighbor->upperdelaunay)
    -        upperseen= True;
    -      else
    -        lowerseen= True;
    -    }
    -    if (upperseen && lowerseen) {
    -      vertex->seen= True;
    -      numpoints++;
    -    }else
    -      vertex->seen= False;
    -  }
    -  qh_fprintf(fp, 9091, "%d\n", numpoints);
    -  FOREACHvertex_(vertices) {
    -    if (vertex->seen)
    -      qh_fprintf(fp, 9092, "%d\n", qh_pointid(vertex->point));
    -  }
    -  qh_settempfree(&vertices);
    -} /* printextremes_d */
    -
    -/*---------------------------------
    -
    -  qh_printfacet( fp, facet )
    -    prints all fields of a facet to fp
    -
    -  notes:
    -    ridges printed in neighbor order
    -*/
    -void qh_printfacet(FILE *fp, facetT *facet) {
    -
    -  qh_printfacetheader(fp, facet);
    -  if (facet->ridges)
    -    qh_printfacetridges(fp, facet);
    -} /* printfacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacet2geom( fp, facet, color )
    -    print facet as part of a 2-d VECT for Geomview
    -
    -    notes:
    -      assume precise calculations in io.c with roundoff covered by qh_GEOMepsilon
    -      mindist is calculated within io.c.  maxoutside is calculated elsewhere
    -      so a DISTround error may have occured.
    -*/
    -void qh_printfacet2geom(FILE *fp, facetT *facet, realT color[3]) {
    -  pointT *point0, *point1;
    -  realT mindist, innerplane, outerplane;
    -  int k;
    -
    -  qh_facet2point(facet, &point0, &point1, &mindist);
    -  qh_geomplanes(facet, &outerplane, &innerplane);
    -  if (qh PRINTouter || (!qh PRINTnoplanes && !qh PRINTinner))
    -    qh_printfacet2geom_points(fp, point0, point1, facet, outerplane, color);
    -  if (qh PRINTinner || (!qh PRINTnoplanes && !qh PRINTouter &&
    -                outerplane - innerplane > 2 * qh MAXabs_coord * qh_GEOMepsilon)) {
    -    for (k=3; k--; )
    -      color[k]= 1.0 - color[k];
    -    qh_printfacet2geom_points(fp, point0, point1, facet, innerplane, color);
    -  }
    -  qh_memfree(point1, qh normal_size);
    -  qh_memfree(point0, qh normal_size);
    -} /* printfacet2geom */
    -
    -/*---------------------------------
    -
    -  qh_printfacet2geom_points( fp, point1, point2, facet, offset, color )
    -    prints a 2-d facet as a VECT with 2 points at some offset.
    -    The points are on the facet's plane.
    -*/
    -void qh_printfacet2geom_points(FILE *fp, pointT *point1, pointT *point2,
    -                               facetT *facet, realT offset, realT color[3]) {
    -  pointT *p1= point1, *p2= point2;
    -
    -  qh_fprintf(fp, 9093, "VECT 1 2 1 2 1 # f%d\n", facet->id);
    -  if (offset != 0.0) {
    -    p1= qh_projectpoint(p1, facet, -offset);
    -    p2= qh_projectpoint(p2, facet, -offset);
    -  }
    -  qh_fprintf(fp, 9094, "%8.4g %8.4g %8.4g\n%8.4g %8.4g %8.4g\n",
    -           p1[0], p1[1], 0.0, p2[0], p2[1], 0.0);
    -  if (offset != 0.0) {
    -    qh_memfree(p1, qh normal_size);
    -    qh_memfree(p2, qh normal_size);
    -  }
    -  qh_fprintf(fp, 9095, "%8.4g %8.4g %8.4g 1.0\n", color[0], color[1], color[2]);
    -} /* printfacet2geom_points */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacet2math( fp, facet, format, notfirst )
    -    print 2-d Maple or Mathematica output for a facet
    -    may be non-simplicial
    -
    -  notes:
    -    use %16.8f since Mathematica 2.2 does not handle exponential format
    -    see qh_printfacet3math
    -*/
    -void qh_printfacet2math(FILE *fp, facetT *facet, qh_PRINT format, int notfirst) {
    -  pointT *point0, *point1;
    -  realT mindist;
    -  const char *pointfmt;
    -
    -  qh_facet2point(facet, &point0, &point1, &mindist);
    -  if (notfirst)
    -    qh_fprintf(fp, 9096, ",");
    -  if (format == qh_PRINTmaple)
    -    pointfmt= "[[%16.8f, %16.8f], [%16.8f, %16.8f]]\n";
    -  else
    -    pointfmt= "Line[{{%16.8f, %16.8f}, {%16.8f, %16.8f}}]\n";
    -  qh_fprintf(fp, 9097, pointfmt, point0[0], point0[1], point1[0], point1[1]);
    -  qh_memfree(point1, qh normal_size);
    -  qh_memfree(point0, qh normal_size);
    -} /* printfacet2math */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacet3geom_nonsimplicial( fp, facet, color )
    -    print Geomview OFF for a 3-d nonsimplicial facet.
    -    if DOintersections, prints ridges to unvisited neighbors(qh visit_id)
    -
    -  notes
    -    uses facet->visitid for intersections and ridges
    -*/
    -void qh_printfacet3geom_nonsimplicial(FILE *fp, facetT *facet, realT color[3]) {
    -  ridgeT *ridge, **ridgep;
    -  setT *projectedpoints, *vertices;
    -  vertexT *vertex, **vertexp, *vertexA, *vertexB;
    -  pointT *projpt, *point, **pointp;
    -  facetT *neighbor;
    -  realT dist, outerplane, innerplane;
    -  int cntvertices, k;
    -  realT black[3]={0, 0, 0}, green[3]={0, 1, 0};
    -
    -  qh_geomplanes(facet, &outerplane, &innerplane);
    -  vertices= qh_facet3vertex(facet); /* oriented */
    -  cntvertices= qh_setsize(vertices);
    -  projectedpoints= qh_settemp(cntvertices);
    -  FOREACHvertex_(vertices) {
    -    zinc_(Zdistio);
    -    qh_distplane(vertex->point, facet, &dist);
    -    projpt= qh_projectpoint(vertex->point, facet, dist);
    -    qh_setappend(&projectedpoints, projpt);
    -  }
    -  if (qh PRINTouter || (!qh PRINTnoplanes && !qh PRINTinner))
    -    qh_printfacet3geom_points(fp, projectedpoints, facet, outerplane, color);
    -  if (qh PRINTinner || (!qh PRINTnoplanes && !qh PRINTouter &&
    -                outerplane - innerplane > 2 * qh MAXabs_coord * qh_GEOMepsilon)) {
    -    for (k=3; k--; )
    -      color[k]= 1.0 - color[k];
    -    qh_printfacet3geom_points(fp, projectedpoints, facet, innerplane, color);
    -  }
    -  FOREACHpoint_(projectedpoints)
    -    qh_memfree(point, qh normal_size);
    -  qh_settempfree(&projectedpoints);
    -  qh_settempfree(&vertices);
    -  if ((qh DOintersections || qh PRINTridges)
    -  && (!facet->visible || !qh NEWfacets)) {
    -    facet->visitid= qh visit_id;
    -    FOREACHridge_(facet->ridges) {
    -      neighbor= otherfacet_(ridge, facet);
    -      if (neighbor->visitid != qh visit_id) {
    -        if (qh DOintersections)
    -          qh_printhyperplaneintersection(fp, facet, neighbor, ridge->vertices, black);
    -        if (qh PRINTridges) {
    -          vertexA= SETfirstt_(ridge->vertices, vertexT);
    -          vertexB= SETsecondt_(ridge->vertices, vertexT);
    -          qh_printline3geom(fp, vertexA->point, vertexB->point, green);
    -        }
    -      }
    -    }
    -  }
    -} /* printfacet3geom_nonsimplicial */
    -
    -/*---------------------------------
    -
    -  qh_printfacet3geom_points( fp, points, facet, offset )
    -    prints a 3-d facet as OFF Geomview object.
    -    offset is relative to the facet's hyperplane
    -    Facet is determined as a list of points
    -*/
    -void qh_printfacet3geom_points(FILE *fp, setT *points, facetT *facet, realT offset, realT color[3]) {
    -  int k, n= qh_setsize(points), i;
    -  pointT *point, **pointp;
    -  setT *printpoints;
    -
    -  qh_fprintf(fp, 9098, "{ OFF %d 1 1 # f%d\n", n, facet->id);
    -  if (offset != 0.0) {
    -    printpoints= qh_settemp(n);
    -    FOREACHpoint_(points)
    -      qh_setappend(&printpoints, qh_projectpoint(point, facet, -offset));
    -  }else
    -    printpoints= points;
    -  FOREACHpoint_(printpoints) {
    -    for (k=0; k < qh hull_dim; k++) {
    -      if (k == qh DROPdim)
    -        qh_fprintf(fp, 9099, "0 ");
    -      else
    -        qh_fprintf(fp, 9100, "%8.4g ", point[k]);
    -    }
    -    if (printpoints != points)
    -      qh_memfree(point, qh normal_size);
    -    qh_fprintf(fp, 9101, "\n");
    -  }
    -  if (printpoints != points)
    -    qh_settempfree(&printpoints);
    -  qh_fprintf(fp, 9102, "%d ", n);
    -  for (i=0; i < n; i++)
    -    qh_fprintf(fp, 9103, "%d ", i);
    -  qh_fprintf(fp, 9104, "%8.4g %8.4g %8.4g 1.0 }\n", color[0], color[1], color[2]);
    -} /* printfacet3geom_points */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacet3geom_simplicial(  )
    -    print Geomview OFF for a 3-d simplicial facet.
    -
    -  notes:
    -    may flip color
    -    uses facet->visitid for intersections and ridges
    -
    -    assume precise calculations in io.c with roundoff covered by qh_GEOMepsilon
    -    innerplane may be off by qh DISTround.  Maxoutside is calculated elsewhere
    -    so a DISTround error may have occured.
    -*/
    -void qh_printfacet3geom_simplicial(FILE *fp, facetT *facet, realT color[3]) {
    -  setT *points, *vertices;
    -  vertexT *vertex, **vertexp, *vertexA, *vertexB;
    -  facetT *neighbor, **neighborp;
    -  realT outerplane, innerplane;
    -  realT black[3]={0, 0, 0}, green[3]={0, 1, 0};
    -  int k;
    -
    -  qh_geomplanes(facet, &outerplane, &innerplane);
    -  vertices= qh_facet3vertex(facet);
    -  points= qh_settemp(qh TEMPsize);
    -  FOREACHvertex_(vertices)
    -    qh_setappend(&points, vertex->point);
    -  if (qh PRINTouter || (!qh PRINTnoplanes && !qh PRINTinner))
    -    qh_printfacet3geom_points(fp, points, facet, outerplane, color);
    -  if (qh PRINTinner || (!qh PRINTnoplanes && !qh PRINTouter &&
    -              outerplane - innerplane > 2 * qh MAXabs_coord * qh_GEOMepsilon)) {
    -    for (k=3; k--; )
    -      color[k]= 1.0 - color[k];
    -    qh_printfacet3geom_points(fp, points, facet, innerplane, color);
    -  }
    -  qh_settempfree(&points);
    -  qh_settempfree(&vertices);
    -  if ((qh DOintersections || qh PRINTridges)
    -  && (!facet->visible || !qh NEWfacets)) {
    -    facet->visitid= qh visit_id;
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->visitid != qh visit_id) {
    -        vertices= qh_setnew_delnthsorted(facet->vertices, qh hull_dim,
    -                          SETindex_(facet->neighbors, neighbor), 0);
    -        if (qh DOintersections)
    -           qh_printhyperplaneintersection(fp, facet, neighbor, vertices, black);
    -        if (qh PRINTridges) {
    -          vertexA= SETfirstt_(vertices, vertexT);
    -          vertexB= SETsecondt_(vertices, vertexT);
    -          qh_printline3geom(fp, vertexA->point, vertexB->point, green);
    -        }
    -        qh_setfree(&vertices);
    -      }
    -    }
    -  }
    -} /* printfacet3geom_simplicial */
    -
    -/*---------------------------------
    -
    -  qh_printfacet3math( fp, facet, notfirst )
    -    print 3-d Maple or Mathematica output for a facet
    -
    -  notes:
    -    may be non-simplicial
    -    use %16.8f since Mathematica 2.2 does not handle exponential format
    -    see qh_printfacet2math
    -*/
    -void qh_printfacet3math(FILE *fp, facetT *facet, qh_PRINT format, int notfirst) {
    -  vertexT *vertex, **vertexp;
    -  setT *points, *vertices;
    -  pointT *point, **pointp;
    -  boolT firstpoint= True;
    -  realT dist;
    -  const char *pointfmt, *endfmt;
    -
    -  if (notfirst)
    -    qh_fprintf(fp, 9105, ",\n");
    -  vertices= qh_facet3vertex(facet);
    -  points= qh_settemp(qh_setsize(vertices));
    -  FOREACHvertex_(vertices) {
    -    zinc_(Zdistio);
    -    qh_distplane(vertex->point, facet, &dist);
    -    point= qh_projectpoint(vertex->point, facet, dist);
    -    qh_setappend(&points, point);
    -  }
    -  if (format == qh_PRINTmaple) {
    -    qh_fprintf(fp, 9106, "[");
    -    pointfmt= "[%16.8f, %16.8f, %16.8f]";
    -    endfmt= "]";
    -  }else {
    -    qh_fprintf(fp, 9107, "Polygon[{");
    -    pointfmt= "{%16.8f, %16.8f, %16.8f}";
    -    endfmt= "}]";
    -  }
    -  FOREACHpoint_(points) {
    -    if (firstpoint)
    -      firstpoint= False;
    -    else
    -      qh_fprintf(fp, 9108, ",\n");
    -    qh_fprintf(fp, 9109, pointfmt, point[0], point[1], point[2]);
    -  }
    -  FOREACHpoint_(points)
    -    qh_memfree(point, qh normal_size);
    -  qh_settempfree(&points);
    -  qh_settempfree(&vertices);
    -  qh_fprintf(fp, 9110, endfmt);
    -} /* printfacet3math */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacet3vertex( fp, facet, format )
    -    print vertices in a 3-d facet as point ids
    -
    -  notes:
    -    prints number of vertices first if format == qh_PRINToff
    -    the facet may be non-simplicial
    -*/
    -void qh_printfacet3vertex(FILE *fp, facetT *facet, qh_PRINT format) {
    -  vertexT *vertex, **vertexp;
    -  setT *vertices;
    -
    -  vertices= qh_facet3vertex(facet);
    -  if (format == qh_PRINToff)
    -    qh_fprintf(fp, 9111, "%d ", qh_setsize(vertices));
    -  FOREACHvertex_(vertices)
    -    qh_fprintf(fp, 9112, "%d ", qh_pointid(vertex->point));
    -  qh_fprintf(fp, 9113, "\n");
    -  qh_settempfree(&vertices);
    -} /* printfacet3vertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacet4geom_nonsimplicial(  )
    -    print Geomview 4OFF file for a 4d nonsimplicial facet
    -    prints all ridges to unvisited neighbors (qh.visit_id)
    -    if qh.DROPdim
    -      prints in OFF format
    -
    -  notes:
    -    must agree with printend4geom()
    -*/
    -void qh_printfacet4geom_nonsimplicial(FILE *fp, facetT *facet, realT color[3]) {
    -  facetT *neighbor;
    -  ridgeT *ridge, **ridgep;
    -  vertexT *vertex, **vertexp;
    -  pointT *point;
    -  int k;
    -  realT dist;
    -
    -  facet->visitid= qh visit_id;
    -  if (qh PRINTnoplanes || (facet->visible && qh NEWfacets))
    -    return;
    -  FOREACHridge_(facet->ridges) {
    -    neighbor= otherfacet_(ridge, facet);
    -    if (neighbor->visitid == qh visit_id)
    -      continue;
    -    if (qh PRINTtransparent && !neighbor->good)
    -      continue;
    -    if (qh DOintersections)
    -      qh_printhyperplaneintersection(fp, facet, neighbor, ridge->vertices, color);
    -    else {
    -      if (qh DROPdim >= 0)
    -        qh_fprintf(fp, 9114, "OFF 3 1 1 # f%d\n", facet->id);
    -      else {
    -        qh printoutvar++;
    -        qh_fprintf(fp, 9115, "# r%d between f%d f%d\n", ridge->id, facet->id, neighbor->id);
    -      }
    -      FOREACHvertex_(ridge->vertices) {
    -        zinc_(Zdistio);
    -        qh_distplane(vertex->point,facet, &dist);
    -        point=qh_projectpoint(vertex->point,facet, dist);
    -        for (k=0; k < qh hull_dim; k++) {
    -          if (k != qh DROPdim)
    -            qh_fprintf(fp, 9116, "%8.4g ", point[k]);
    -        }
    -        qh_fprintf(fp, 9117, "\n");
    -        qh_memfree(point, qh normal_size);
    -      }
    -      if (qh DROPdim >= 0)
    -        qh_fprintf(fp, 9118, "3 0 1 2 %8.4g %8.4g %8.4g\n", color[0], color[1], color[2]);
    -    }
    -  }
    -} /* printfacet4geom_nonsimplicial */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacet4geom_simplicial( fp, facet, color )
    -    print Geomview 4OFF file for a 4d simplicial facet
    -    prints triangles for unvisited neighbors (qh.visit_id)
    -
    -  notes:
    -    must agree with printend4geom()
    -*/
    -void qh_printfacet4geom_simplicial(FILE *fp, facetT *facet, realT color[3]) {
    -  setT *vertices;
    -  facetT *neighbor, **neighborp;
    -  vertexT *vertex, **vertexp;
    -  int k;
    -
    -  facet->visitid= qh visit_id;
    -  if (qh PRINTnoplanes || (facet->visible && qh NEWfacets))
    -    return;
    -  FOREACHneighbor_(facet) {
    -    if (neighbor->visitid == qh visit_id)
    -      continue;
    -    if (qh PRINTtransparent && !neighbor->good)
    -      continue;
    -    vertices= qh_setnew_delnthsorted(facet->vertices, qh hull_dim,
    -                          SETindex_(facet->neighbors, neighbor), 0);
    -    if (qh DOintersections)
    -      qh_printhyperplaneintersection(fp, facet, neighbor, vertices, color);
    -    else {
    -      if (qh DROPdim >= 0)
    -        qh_fprintf(fp, 9119, "OFF 3 1 1 # ridge between f%d f%d\n",
    -                facet->id, neighbor->id);
    -      else {
    -        qh printoutvar++;
    -        qh_fprintf(fp, 9120, "# ridge between f%d f%d\n", facet->id, neighbor->id);
    -      }
    -      FOREACHvertex_(vertices) {
    -        for (k=0; k < qh hull_dim; k++) {
    -          if (k != qh DROPdim)
    -            qh_fprintf(fp, 9121, "%8.4g ", vertex->point[k]);
    -        }
    -        qh_fprintf(fp, 9122, "\n");
    -      }
    -      if (qh DROPdim >= 0)
    -        qh_fprintf(fp, 9123, "3 0 1 2 %8.4g %8.4g %8.4g\n", color[0], color[1], color[2]);
    -    }
    -    qh_setfree(&vertices);
    -  }
    -} /* printfacet4geom_simplicial */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacetNvertex_nonsimplicial( fp, facet, id, format )
    -    print vertices for an N-d non-simplicial facet
    -    triangulates each ridge to the id
    -*/
    -void qh_printfacetNvertex_nonsimplicial(FILE *fp, facetT *facet, int id, qh_PRINT format) {
    -  vertexT *vertex, **vertexp;
    -  ridgeT *ridge, **ridgep;
    -
    -  if (facet->visible && qh NEWfacets)
    -    return;
    -  FOREACHridge_(facet->ridges) {
    -    if (format == qh_PRINTtriangles)
    -      qh_fprintf(fp, 9124, "%d ", qh hull_dim);
    -    qh_fprintf(fp, 9125, "%d ", id);
    -    if ((ridge->top == facet) ^ qh_ORIENTclock) {
    -      FOREACHvertex_(ridge->vertices)
    -        qh_fprintf(fp, 9126, "%d ", qh_pointid(vertex->point));
    -    }else {
    -      FOREACHvertexreverse12_(ridge->vertices)
    -        qh_fprintf(fp, 9127, "%d ", qh_pointid(vertex->point));
    -    }
    -    qh_fprintf(fp, 9128, "\n");
    -  }
    -} /* printfacetNvertex_nonsimplicial */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacetNvertex_simplicial( fp, facet, format )
    -    print vertices for an N-d simplicial facet
    -    prints vertices for non-simplicial facets
    -      2-d facets (orientation preserved by qh_mergefacet2d)
    -      PRINToff ('o') for 4-d and higher
    -*/
    -void qh_printfacetNvertex_simplicial(FILE *fp, facetT *facet, qh_PRINT format) {
    -  vertexT *vertex, **vertexp;
    -
    -  if (format == qh_PRINToff || format == qh_PRINTtriangles)
    -    qh_fprintf(fp, 9129, "%d ", qh_setsize(facet->vertices));
    -  if ((facet->toporient ^ qh_ORIENTclock)
    -  || (qh hull_dim > 2 && !facet->simplicial)) {
    -    FOREACHvertex_(facet->vertices)
    -      qh_fprintf(fp, 9130, "%d ", qh_pointid(vertex->point));
    -  }else {
    -    FOREACHvertexreverse12_(facet->vertices)
    -      qh_fprintf(fp, 9131, "%d ", qh_pointid(vertex->point));
    -  }
    -  qh_fprintf(fp, 9132, "\n");
    -} /* printfacetNvertex_simplicial */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacetheader( fp, facet )
    -    prints header fields of a facet to fp
    -
    -  notes:
    -    for 'f' output and debugging
    -    Same as QhullFacet::printHeader()
    -*/
    -void qh_printfacetheader(FILE *fp, facetT *facet) {
    -  pointT *point, **pointp, *furthest;
    -  facetT *neighbor, **neighborp;
    -  realT dist;
    -
    -  if (facet == qh_MERGEridge) {
    -    qh_fprintf(fp, 9133, " MERGEridge\n");
    -    return;
    -  }else if (facet == qh_DUPLICATEridge) {
    -    qh_fprintf(fp, 9134, " DUPLICATEridge\n");
    -    return;
    -  }else if (!facet) {
    -    qh_fprintf(fp, 9135, " NULLfacet\n");
    -    return;
    -  }
    -  qh old_randomdist= qh RANDOMdist;
    -  qh RANDOMdist= False;
    -  qh_fprintf(fp, 9136, "- f%d\n", facet->id);
    -  qh_fprintf(fp, 9137, "    - flags:");
    -  if (facet->toporient)
    -    qh_fprintf(fp, 9138, " top");
    -  else
    -    qh_fprintf(fp, 9139, " bottom");
    -  if (facet->simplicial)
    -    qh_fprintf(fp, 9140, " simplicial");
    -  if (facet->tricoplanar)
    -    qh_fprintf(fp, 9141, " tricoplanar");
    -  if (facet->upperdelaunay)
    -    qh_fprintf(fp, 9142, " upperDelaunay");
    -  if (facet->visible)
    -    qh_fprintf(fp, 9143, " visible");
    -  if (facet->newfacet)
    -    qh_fprintf(fp, 9144, " new");
    -  if (facet->tested)
    -    qh_fprintf(fp, 9145, " tested");
    -  if (!facet->good)
    -    qh_fprintf(fp, 9146, " notG");
    -  if (facet->seen)
    -    qh_fprintf(fp, 9147, " seen");
    -  if (facet->coplanar)
    -    qh_fprintf(fp, 9148, " coplanar");
    -  if (facet->mergehorizon)
    -    qh_fprintf(fp, 9149, " mergehorizon");
    -  if (facet->keepcentrum)
    -    qh_fprintf(fp, 9150, " keepcentrum");
    -  if (facet->dupridge)
    -    qh_fprintf(fp, 9151, " dupridge");
    -  if (facet->mergeridge && !facet->mergeridge2)
    -    qh_fprintf(fp, 9152, " mergeridge1");
    -  if (facet->mergeridge2)
    -    qh_fprintf(fp, 9153, " mergeridge2");
    -  if (facet->newmerge)
    -    qh_fprintf(fp, 9154, " newmerge");
    -  if (facet->flipped)
    -    qh_fprintf(fp, 9155, " flipped");
    -  if (facet->notfurthest)
    -    qh_fprintf(fp, 9156, " notfurthest");
    -  if (facet->degenerate)
    -    qh_fprintf(fp, 9157, " degenerate");
    -  if (facet->redundant)
    -    qh_fprintf(fp, 9158, " redundant");
    -  qh_fprintf(fp, 9159, "\n");
    -  if (facet->isarea)
    -    qh_fprintf(fp, 9160, "    - area: %2.2g\n", facet->f.area);
    -  else if (qh NEWfacets && facet->visible && facet->f.replace)
    -    qh_fprintf(fp, 9161, "    - replacement: f%d\n", facet->f.replace->id);
    -  else if (facet->newfacet) {
    -    if (facet->f.samecycle && facet->f.samecycle != facet)
    -      qh_fprintf(fp, 9162, "    - shares same visible/horizon as f%d\n", facet->f.samecycle->id);
    -  }else if (facet->tricoplanar /* !isarea */) {
    -    if (facet->f.triowner)
    -      qh_fprintf(fp, 9163, "    - owner of normal & centrum is facet f%d\n", facet->f.triowner->id);
    -  }else if (facet->f.newcycle)
    -    qh_fprintf(fp, 9164, "    - was horizon to f%d\n", facet->f.newcycle->id);
    -  if (facet->nummerge)
    -    qh_fprintf(fp, 9165, "    - merges: %d\n", facet->nummerge);
    -  qh_printpointid(fp, "    - normal: ", qh hull_dim, facet->normal, -1);
    -  qh_fprintf(fp, 9166, "    - offset: %10.7g\n", facet->offset);
    -  if (qh CENTERtype == qh_ASvoronoi || facet->center)
    -    qh_printcenter(fp, qh_PRINTfacets, "    - center: ", facet);
    -#if qh_MAXoutside
    -  if (facet->maxoutside > qh DISTround)
    -    qh_fprintf(fp, 9167, "    - maxoutside: %10.7g\n", facet->maxoutside);
    -#endif
    -  if (!SETempty_(facet->outsideset)) {
    -    furthest= (pointT*)qh_setlast(facet->outsideset);
    -    if (qh_setsize(facet->outsideset) < 6) {
    -      qh_fprintf(fp, 9168, "    - outside set(furthest p%d):\n", qh_pointid(furthest));
    -      FOREACHpoint_(facet->outsideset)
    -        qh_printpoint(fp, "     ", point);
    -    }else if (qh_setsize(facet->outsideset) < 21) {
    -      qh_printpoints(fp, "    - outside set:", facet->outsideset);
    -    }else {
    -      qh_fprintf(fp, 9169, "    - outside set:  %d points.", qh_setsize(facet->outsideset));
    -      qh_printpoint(fp, "  Furthest", furthest);
    -    }
    -#if !qh_COMPUTEfurthest
    -    qh_fprintf(fp, 9170, "    - furthest distance= %2.2g\n", facet->furthestdist);
    -#endif
    -  }
    -  if (!SETempty_(facet->coplanarset)) {
    -    furthest= (pointT*)qh_setlast(facet->coplanarset);
    -    if (qh_setsize(facet->coplanarset) < 6) {
    -      qh_fprintf(fp, 9171, "    - coplanar set(furthest p%d):\n", qh_pointid(furthest));
    -      FOREACHpoint_(facet->coplanarset)
    -        qh_printpoint(fp, "     ", point);
    -    }else if (qh_setsize(facet->coplanarset) < 21) {
    -      qh_printpoints(fp, "    - coplanar set:", facet->coplanarset);
    -    }else {
    -      qh_fprintf(fp, 9172, "    - coplanar set:  %d points.", qh_setsize(facet->coplanarset));
    -      qh_printpoint(fp, "  Furthest", furthest);
    -    }
    -    zinc_(Zdistio);
    -    qh_distplane(furthest, facet, &dist);
    -    qh_fprintf(fp, 9173, "      furthest distance= %2.2g\n", dist);
    -  }
    -  qh_printvertices(fp, "    - vertices:", facet->vertices);
    -  qh_fprintf(fp, 9174, "    - neighboring facets:");
    -  FOREACHneighbor_(facet) {
    -    if (neighbor == qh_MERGEridge)
    -      qh_fprintf(fp, 9175, " MERGE");
    -    else if (neighbor == qh_DUPLICATEridge)
    -      qh_fprintf(fp, 9176, " DUP");
    -    else
    -      qh_fprintf(fp, 9177, " f%d", neighbor->id);
    -  }
    -  qh_fprintf(fp, 9178, "\n");
    -  qh RANDOMdist= qh old_randomdist;
    -} /* printfacetheader */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacetridges( fp, facet )
    -    prints ridges of a facet to fp
    -
    -  notes:
    -    ridges printed in neighbor order
    -    assumes the ridges exist
    -    for 'f' output
    -    same as QhullFacet::printRidges
    -*/
    -void qh_printfacetridges(FILE *fp, facetT *facet) {
    -  facetT *neighbor, **neighborp;
    -  ridgeT *ridge, **ridgep;
    -  int numridges= 0;
    -
    -
    -  if (facet->visible && qh NEWfacets) {
    -    qh_fprintf(fp, 9179, "    - ridges(ids may be garbage):");
    -    FOREACHridge_(facet->ridges)
    -      qh_fprintf(fp, 9180, " r%d", ridge->id);
    -    qh_fprintf(fp, 9181, "\n");
    -  }else {
    -    qh_fprintf(fp, 9182, "    - ridges:\n");
    -    FOREACHridge_(facet->ridges)
    -      ridge->seen= False;
    -    if (qh hull_dim == 3) {
    -      ridge= SETfirstt_(facet->ridges, ridgeT);
    -      while (ridge && !ridge->seen) {
    -        ridge->seen= True;
    -        qh_printridge(fp, ridge);
    -        numridges++;
    -        ridge= qh_nextridge3d(ridge, facet, NULL);
    -        }
    -    }else {
    -      FOREACHneighbor_(facet) {
    -        FOREACHridge_(facet->ridges) {
    -          if (otherfacet_(ridge,facet) == neighbor) {
    -            ridge->seen= True;
    -            qh_printridge(fp, ridge);
    -            numridges++;
    -          }
    -        }
    -      }
    -    }
    -    if (numridges != qh_setsize(facet->ridges)) {
    -      qh_fprintf(fp, 9183, "     - all ridges:");
    -      FOREACHridge_(facet->ridges)
    -        qh_fprintf(fp, 9184, " r%d", ridge->id);
    -        qh_fprintf(fp, 9185, "\n");
    -    }
    -    FOREACHridge_(facet->ridges) {
    -      if (!ridge->seen)
    -        qh_printridge(fp, ridge);
    -    }
    -  }
    -} /* printfacetridges */
    -
    -/*---------------------------------
    -
    -  qh_printfacets( fp, format, facetlist, facets, printall )
    -    prints facetlist and/or facet set in output format
    -
    -  notes:
    -    also used for specialized formats ('FO' and summary)
    -    turns off 'Rn' option since want actual numbers
    -*/
    -void qh_printfacets(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall) {
    -  int numfacets, numsimplicial, numridges, totneighbors, numcoplanars, numtricoplanars;
    -  facetT *facet, **facetp;
    -  setT *vertices;
    -  coordT *center;
    -  realT outerplane, innerplane;
    -
    -  qh old_randomdist= qh RANDOMdist;
    -  qh RANDOMdist= False;
    -  if (qh CDDoutput && (format == qh_PRINTcentrums || format == qh_PRINTpointintersect || format == qh_PRINToff))
    -    qh_fprintf(qh ferr, 7056, "qhull warning: CDD format is not available for centrums, halfspace\nintersections, and OFF file format.\n");
    -  if (format == qh_PRINTnone)
    -    ; /* print nothing */
    -  else if (format == qh_PRINTaverage) {
    -    vertices= qh_facetvertices(facetlist, facets, printall);
    -    center= qh_getcenter(vertices);
    -    qh_fprintf(fp, 9186, "%d 1\n", qh hull_dim);
    -    qh_printpointid(fp, NULL, qh hull_dim, center, -1);
    -    qh_memfree(center, qh normal_size);
    -    qh_settempfree(&vertices);
    -  }else if (format == qh_PRINTextremes) {
    -    if (qh DELAUNAY)
    -      qh_printextremes_d(fp, facetlist, facets, printall);
    -    else if (qh hull_dim == 2)
    -      qh_printextremes_2d(fp, facetlist, facets, printall);
    -    else
    -      qh_printextremes(fp, facetlist, facets, printall);
    -  }else if (format == qh_PRINToptions)
    -    qh_fprintf(fp, 9187, "Options selected for Qhull %s:\n%s\n", qh_version, qh qhull_options);
    -  else if (format == qh_PRINTpoints && !qh VORONOI)
    -    qh_printpoints_out(fp, facetlist, facets, printall);
    -  else if (format == qh_PRINTqhull)
    -    qh_fprintf(fp, 9188, "%s | %s\n", qh rbox_command, qh qhull_command);
    -  else if (format == qh_PRINTsize) {
    -    qh_fprintf(fp, 9189, "0\n2 ");
    -    qh_fprintf(fp, 9190, qh_REAL_1, qh totarea);
    -    qh_fprintf(fp, 9191, qh_REAL_1, qh totvol);
    -    qh_fprintf(fp, 9192, "\n");
    -  }else if (format == qh_PRINTsummary) {
    -    qh_countfacets(facetlist, facets, printall, &numfacets, &numsimplicial,
    -      &totneighbors, &numridges, &numcoplanars, &numtricoplanars);
    -    vertices= qh_facetvertices(facetlist, facets, printall);
    -    qh_fprintf(fp, 9193, "10 %d %d %d %d %d %d %d %d %d %d\n2 ", qh hull_dim,
    -                qh num_points + qh_setsize(qh other_points),
    -                qh num_vertices, qh num_facets - qh num_visible,
    -                qh_setsize(vertices), numfacets, numcoplanars,
    -                numfacets - numsimplicial, zzval_(Zdelvertextot),
    -                numtricoplanars);
    -    qh_settempfree(&vertices);
    -    qh_outerinner(NULL, &outerplane, &innerplane);
    -    qh_fprintf(fp, 9194, qh_REAL_2n, outerplane, innerplane);
    -  }else if (format == qh_PRINTvneighbors)
    -    qh_printvneighbors(fp, facetlist, facets, printall);
    -  else if (qh VORONOI && format == qh_PRINToff)
    -    qh_printvoronoi(fp, format, facetlist, facets, printall);
    -  else if (qh VORONOI && format == qh_PRINTgeom) {
    -    qh_printbegin(fp, format, facetlist, facets, printall);
    -    qh_printvoronoi(fp, format, facetlist, facets, printall);
    -    qh_printend(fp, format, facetlist, facets, printall);
    -  }else if (qh VORONOI
    -  && (format == qh_PRINTvertices || format == qh_PRINTinner || format == qh_PRINTouter))
    -    qh_printvdiagram(fp, format, facetlist, facets, printall);
    -  else {
    -    qh_printbegin(fp, format, facetlist, facets, printall);
    -    FORALLfacet_(facetlist)
    -      qh_printafacet(fp, format, facet, printall);
    -    FOREACHfacet_(facets)
    -      qh_printafacet(fp, format, facet, printall);
    -    qh_printend(fp, format, facetlist, facets, printall);
    -  }
    -  qh RANDOMdist= qh old_randomdist;
    -} /* printfacets */
    -
    -
    -/*---------------------------------
    -
    -  qh_printhyperplaneintersection( fp, facet1, facet2, vertices, color )
    -    print Geomview OFF or 4OFF for the intersection of two hyperplanes in 3-d or 4-d
    -*/
    -void qh_printhyperplaneintersection(FILE *fp, facetT *facet1, facetT *facet2,
    -                   setT *vertices, realT color[3]) {
    -  realT costheta, denominator, dist1, dist2, s, t, mindenom, p[4];
    -  vertexT *vertex, **vertexp;
    -  int i, k;
    -  boolT nearzero1, nearzero2;
    -
    -  costheta= qh_getangle(facet1->normal, facet2->normal);
    -  denominator= 1 - costheta * costheta;
    -  i= qh_setsize(vertices);
    -  if (qh hull_dim == 3)
    -    qh_fprintf(fp, 9195, "VECT 1 %d 1 %d 1 ", i, i);
    -  else if (qh hull_dim == 4 && qh DROPdim >= 0)
    -    qh_fprintf(fp, 9196, "OFF 3 1 1 ");
    -  else
    -    qh printoutvar++;
    -  qh_fprintf(fp, 9197, "# intersect f%d f%d\n", facet1->id, facet2->id);
    -  mindenom= 1 / (10.0 * qh MAXabs_coord);
    -  FOREACHvertex_(vertices) {
    -    zadd_(Zdistio, 2);
    -    qh_distplane(vertex->point, facet1, &dist1);
    -    qh_distplane(vertex->point, facet2, &dist2);
    -    s= qh_divzero(-dist1 + costheta * dist2, denominator,mindenom,&nearzero1);
    -    t= qh_divzero(-dist2 + costheta * dist1, denominator,mindenom,&nearzero2);
    -    if (nearzero1 || nearzero2)
    -      s= t= 0.0;
    -    for (k=qh hull_dim; k--; )
    -      p[k]= vertex->point[k] + facet1->normal[k] * s + facet2->normal[k] * t;
    -    if (qh PRINTdim <= 3) {
    -      qh_projectdim3 (p, p);
    -      qh_fprintf(fp, 9198, "%8.4g %8.4g %8.4g # ", p[0], p[1], p[2]);
    -    }else
    -      qh_fprintf(fp, 9199, "%8.4g %8.4g %8.4g %8.4g # ", p[0], p[1], p[2], p[3]);
    -    if (nearzero1+nearzero2)
    -      qh_fprintf(fp, 9200, "p%d(coplanar facets)\n", qh_pointid(vertex->point));
    -    else
    -      qh_fprintf(fp, 9201, "projected p%d\n", qh_pointid(vertex->point));
    -  }
    -  if (qh hull_dim == 3)
    -    qh_fprintf(fp, 9202, "%8.4g %8.4g %8.4g 1.0\n", color[0], color[1], color[2]);
    -  else if (qh hull_dim == 4 && qh DROPdim >= 0)
    -    qh_fprintf(fp, 9203, "3 0 1 2 %8.4g %8.4g %8.4g 1.0\n", color[0], color[1], color[2]);
    -} /* printhyperplaneintersection */
    -
    -/*---------------------------------
    -
    -  qh_printline3geom( fp, pointA, pointB, color )
    -    prints a line as a VECT
    -    prints 0's for qh.DROPdim
    -
    -  notes:
    -    if pointA == pointB,
    -      it's a 1 point VECT
    -*/
    -void qh_printline3geom(FILE *fp, pointT *pointA, pointT *pointB, realT color[3]) {
    -  int k;
    -  realT pA[4], pB[4];
    -
    -  qh_projectdim3(pointA, pA);
    -  qh_projectdim3(pointB, pB);
    -  if ((fabs(pA[0] - pB[0]) > 1e-3) ||
    -      (fabs(pA[1] - pB[1]) > 1e-3) ||
    -      (fabs(pA[2] - pB[2]) > 1e-3)) {
    -    qh_fprintf(fp, 9204, "VECT 1 2 1 2 1\n");
    -    for (k=0; k < 3; k++)
    -       qh_fprintf(fp, 9205, "%8.4g ", pB[k]);
    -    qh_fprintf(fp, 9206, " # p%d\n", qh_pointid(pointB));
    -  }else
    -    qh_fprintf(fp, 9207, "VECT 1 1 1 1 1\n");
    -  for (k=0; k < 3; k++)
    -    qh_fprintf(fp, 9208, "%8.4g ", pA[k]);
    -  qh_fprintf(fp, 9209, " # p%d\n", qh_pointid(pointA));
    -  qh_fprintf(fp, 9210, "%8.4g %8.4g %8.4g 1\n", color[0], color[1], color[2]);
    -}
    -
    -/*---------------------------------
    -
    -  qh_printneighborhood( fp, format, facetA, facetB, printall )
    -    print neighborhood of one or two facets
    -
    -  notes:
    -    calls qh_findgood_all()
    -    bumps qh.visit_id
    -*/
    -void qh_printneighborhood(FILE *fp, qh_PRINT format, facetT *facetA, facetT *facetB, boolT printall) {
    -  facetT *neighbor, **neighborp, *facet;
    -  setT *facets;
    -
    -  if (format == qh_PRINTnone)
    -    return;
    -  qh_findgood_all(qh facet_list);
    -  if (facetA == facetB)
    -    facetB= NULL;
    -  facets= qh_settemp(2*(qh_setsize(facetA->neighbors)+1));
    -  qh visit_id++;
    -  for (facet= facetA; facet; facet= ((facet == facetA) ? facetB : NULL)) {
    -    if (facet->visitid != qh visit_id) {
    -      facet->visitid= qh visit_id;
    -      qh_setappend(&facets, facet);
    -    }
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->visitid == qh visit_id)
    -        continue;
    -      neighbor->visitid= qh visit_id;
    -      if (printall || !qh_skipfacet(neighbor))
    -        qh_setappend(&facets, neighbor);
    -    }
    -  }
    -  qh_printfacets(fp, format, NULL, facets, printall);
    -  qh_settempfree(&facets);
    -} /* printneighborhood */
    -
    -/*---------------------------------
    -
    -  qh_printpoint( fp, string, point )
    -  qh_printpointid( fp, string, dim, point, id )
    -    prints the coordinates of a point
    -
    -  returns:
    -    if string is defined
    -      prints 'string p%d' (skips p%d if id=-1)
    -
    -  notes:
    -    nop if point is NULL
    -    prints id unless it is undefined (-1)
    -    Same as QhullPoint's printPoint
    -*/
    -void qh_printpoint(FILE *fp, const char *string, pointT *point) {
    -  int id= qh_pointid( point);
    -
    -  qh_printpointid( fp, string, qh hull_dim, point, id);
    -} /* printpoint */
    -
    -void qh_printpointid(FILE *fp, const char *string, int dim, pointT *point, int id) {
    -  int k;
    -  realT r; /*bug fix*/
    -
    -  if (!point)
    -    return;
    -  if (string) {
    -    qh_fprintf(fp, 9211, "%s", string);
    -   if (id != -1)
    -      qh_fprintf(fp, 9212, " p%d: ", id);
    -  }
    -  for (k=dim; k--; ) {
    -    r= *point++;
    -    if (string)
    -      qh_fprintf(fp, 9213, " %8.4g", r);
    -    else
    -      qh_fprintf(fp, 9214, qh_REAL_1, r);
    -  }
    -  qh_fprintf(fp, 9215, "\n");
    -} /* printpointid */
    -
    -/*---------------------------------
    -
    -  qh_printpoint3( fp, point )
    -    prints 2-d, 3-d, or 4-d point as Geomview 3-d coordinates
    -*/
    -void qh_printpoint3 (FILE *fp, pointT *point) {
    -  int k;
    -  realT p[4];
    -
    -  qh_projectdim3 (point, p);
    -  for (k=0; k < 3; k++)
    -    qh_fprintf(fp, 9216, "%8.4g ", p[k]);
    -  qh_fprintf(fp, 9217, " # p%d\n", qh_pointid(point));
    -} /* printpoint3 */
    -
    -/*----------------------------------------
    --printpoints- print pointids for a set of points starting at index
    -   see geom.c
    -*/
    -
    -/*---------------------------------
    -
    -  qh_printpoints_out( fp, facetlist, facets, printall )
    -    prints vertices, coplanar/inside points, for facets by their point coordinates
    -    allows qh.CDDoutput
    -
    -  notes:
    -    same format as qhull input
    -    if no coplanar/interior points,
    -      same order as qh_printextremes
    -*/
    -void qh_printpoints_out(FILE *fp, facetT *facetlist, setT *facets, boolT printall) {
    -  int allpoints= qh num_points + qh_setsize(qh other_points);
    -  int numpoints=0, point_i, point_n;
    -  setT *vertices, *points;
    -  facetT *facet, **facetp;
    -  pointT *point, **pointp;
    -  vertexT *vertex, **vertexp;
    -  int id;
    -
    -  points= qh_settemp(allpoints);
    -  qh_setzero(points, 0, allpoints);
    -  vertices= qh_facetvertices(facetlist, facets, printall);
    -  FOREACHvertex_(vertices) {
    -    id= qh_pointid(vertex->point);
    -    if (id >= 0)
    -      SETelem_(points, id)= vertex->point;
    -  }
    -  if (qh KEEPinside || qh KEEPcoplanar || qh KEEPnearinside) {
    -    FORALLfacet_(facetlist) {
    -      if (!printall && qh_skipfacet(facet))
    -        continue;
    -      FOREACHpoint_(facet->coplanarset) {
    -        id= qh_pointid(point);
    -        if (id >= 0)
    -          SETelem_(points, id)= point;
    -      }
    -    }
    -    FOREACHfacet_(facets) {
    -      if (!printall && qh_skipfacet(facet))
    -        continue;
    -      FOREACHpoint_(facet->coplanarset) {
    -        id= qh_pointid(point);
    -        if (id >= 0)
    -          SETelem_(points, id)= point;
    -      }
    -    }
    -  }
    -  qh_settempfree(&vertices);
    -  FOREACHpoint_i_(points) {
    -    if (point)
    -      numpoints++;
    -  }
    -  if (qh CDDoutput)
    -    qh_fprintf(fp, 9218, "%s | %s\nbegin\n%d %d real\n", qh rbox_command,
    -             qh qhull_command, numpoints, qh hull_dim + 1);
    -  else
    -    qh_fprintf(fp, 9219, "%d\n%d\n", qh hull_dim, numpoints);
    -  FOREACHpoint_i_(points) {
    -    if (point) {
    -      if (qh CDDoutput)
    -        qh_fprintf(fp, 9220, "1 ");
    -      qh_printpoint(fp, NULL, point);
    -    }
    -  }
    -  if (qh CDDoutput)
    -    qh_fprintf(fp, 9221, "end\n");
    -  qh_settempfree(&points);
    -} /* printpoints_out */
    -
    -
    -/*---------------------------------
    -
    -  qh_printpointvect( fp, point, normal, center, radius, color )
    -    prints a 2-d, 3-d, or 4-d point as 3-d VECT's relative to normal or to center point
    -*/
    -void qh_printpointvect(FILE *fp, pointT *point, coordT *normal, pointT *center, realT radius, realT color[3]) {
    -  realT diff[4], pointA[4];
    -  int k;
    -
    -  for (k=qh hull_dim; k--; ) {
    -    if (center)
    -      diff[k]= point[k]-center[k];
    -    else if (normal)
    -      diff[k]= normal[k];
    -    else
    -      diff[k]= 0;
    -  }
    -  if (center)
    -    qh_normalize2 (diff, qh hull_dim, True, NULL, NULL);
    -  for (k=qh hull_dim; k--; )
    -    pointA[k]= point[k]+diff[k] * radius;
    -  qh_printline3geom(fp, point, pointA, color);
    -} /* printpointvect */
    -
    -/*---------------------------------
    -
    -  qh_printpointvect2( fp, point, normal, center, radius )
    -    prints a 2-d, 3-d, or 4-d point as 2 3-d VECT's for an imprecise point
    -*/
    -void qh_printpointvect2 (FILE *fp, pointT *point, coordT *normal, pointT *center, realT radius) {
    -  realT red[3]={1, 0, 0}, yellow[3]={1, 1, 0};
    -
    -  qh_printpointvect(fp, point, normal, center, radius, red);
    -  qh_printpointvect(fp, point, normal, center, -radius, yellow);
    -} /* printpointvect2 */
    -
    -/*---------------------------------
    -
    -  qh_printridge( fp, ridge )
    -    prints the information in a ridge
    -
    -  notes:
    -    for qh_printfacetridges()
    -    same as operator<< [QhullRidge.cpp]
    -*/
    -void qh_printridge(FILE *fp, ridgeT *ridge) {
    -
    -  qh_fprintf(fp, 9222, "     - r%d", ridge->id);
    -  if (ridge->tested)
    -    qh_fprintf(fp, 9223, " tested");
    -  if (ridge->nonconvex)
    -    qh_fprintf(fp, 9224, " nonconvex");
    -  qh_fprintf(fp, 9225, "\n");
    -  qh_printvertices(fp, "           vertices:", ridge->vertices);
    -  if (ridge->top && ridge->bottom)
    -    qh_fprintf(fp, 9226, "           between f%d and f%d\n",
    -            ridge->top->id, ridge->bottom->id);
    -} /* printridge */
    -
    -/*---------------------------------
    -
    -  qh_printspheres( fp, vertices, radius )
    -    prints 3-d vertices as OFF spheres
    -
    -  notes:
    -    inflated octahedron from Stuart Levy earth/mksphere2
    -*/
    -void qh_printspheres(FILE *fp, setT *vertices, realT radius) {
    -  vertexT *vertex, **vertexp;
    -
    -  qh printoutnum++;
    -  qh_fprintf(fp, 9227, "{appearance {-edge -normal normscale 0} {\n\
    -INST geom {define vsphere OFF\n\
    -18 32 48\n\
    -\n\
    -0 0 1\n\
    -1 0 0\n\
    -0 1 0\n\
    --1 0 0\n\
    -0 -1 0\n\
    -0 0 -1\n\
    -0.707107 0 0.707107\n\
    -0 -0.707107 0.707107\n\
    -0.707107 -0.707107 0\n\
    --0.707107 0 0.707107\n\
    --0.707107 -0.707107 0\n\
    -0 0.707107 0.707107\n\
    --0.707107 0.707107 0\n\
    -0.707107 0.707107 0\n\
    -0.707107 0 -0.707107\n\
    -0 0.707107 -0.707107\n\
    --0.707107 0 -0.707107\n\
    -0 -0.707107 -0.707107\n\
    -\n\
    -3 0 6 11\n\
    -3 0 7 6 \n\
    -3 0 9 7 \n\
    -3 0 11 9\n\
    -3 1 6 8 \n\
    -3 1 8 14\n\
    -3 1 13 6\n\
    -3 1 14 13\n\
    -3 2 11 13\n\
    -3 2 12 11\n\
    -3 2 13 15\n\
    -3 2 15 12\n\
    -3 3 9 12\n\
    -3 3 10 9\n\
    -3 3 12 16\n\
    -3 3 16 10\n\
    -3 4 7 10\n\
    -3 4 8 7\n\
    -3 4 10 17\n\
    -3 4 17 8\n\
    -3 5 14 17\n\
    -3 5 15 14\n\
    -3 5 16 15\n\
    -3 5 17 16\n\
    -3 6 13 11\n\
    -3 7 8 6\n\
    -3 9 10 7\n\
    -3 11 12 9\n\
    -3 14 8 17\n\
    -3 15 13 14\n\
    -3 16 12 15\n\
    -3 17 10 16\n} transforms { TLIST\n");
    -  FOREACHvertex_(vertices) {
    -    qh_fprintf(fp, 9228, "%8.4g 0 0 0 # v%d\n 0 %8.4g 0 0\n0 0 %8.4g 0\n",
    -      radius, vertex->id, radius, radius);
    -    qh_printpoint3 (fp, vertex->point);
    -    qh_fprintf(fp, 9229, "1\n");
    -  }
    -  qh_fprintf(fp, 9230, "}}}\n");
    -} /* printspheres */
    -
    -
    -/*----------------------------------------------
    --printsummary-
    -                see libqhull.c
    -*/
    -
    -/*---------------------------------
    -
    -  qh_printvdiagram( fp, format, facetlist, facets, printall )
    -    print voronoi diagram
    -      # of pairs of input sites
    -      #indices site1 site2 vertex1 ...
    -
    -    sites indexed by input point id
    -      point 0 is the first input point
    -    vertices indexed by 'o' and 'p' order
    -      vertex 0 is the 'vertex-at-infinity'
    -      vertex 1 is the first Voronoi vertex
    -
    -  see:
    -    qh_printvoronoi()
    -    qh_eachvoronoi_all()
    -
    -  notes:
    -    if all facets are upperdelaunay,
    -      prints upper hull (furthest-site Voronoi diagram)
    -*/
    -void qh_printvdiagram(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall) {
    -  setT *vertices;
    -  int totcount, numcenters;
    -  boolT isLower;
    -  qh_RIDGE innerouter= qh_RIDGEall;
    -  printvridgeT printvridge= NULL;
    -
    -  if (format == qh_PRINTvertices) {
    -    innerouter= qh_RIDGEall;
    -    printvridge= qh_printvridge;
    -  }else if (format == qh_PRINTinner) {
    -    innerouter= qh_RIDGEinner;
    -    printvridge= qh_printvnorm;
    -  }else if (format == qh_PRINTouter) {
    -    innerouter= qh_RIDGEouter;
    -    printvridge= qh_printvnorm;
    -  }else {
    -    qh_fprintf(qh ferr, 6219, "Qhull internal error (qh_printvdiagram): unknown print format %d.\n", format);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  vertices= qh_markvoronoi(facetlist, facets, printall, &isLower, &numcenters);
    -  totcount= qh_printvdiagram2 (NULL, NULL, vertices, innerouter, False);
    -  qh_fprintf(fp, 9231, "%d\n", totcount);
    -  totcount= qh_printvdiagram2 (fp, printvridge, vertices, innerouter, True /* inorder*/);
    -  qh_settempfree(&vertices);
    -#if 0  /* for testing qh_eachvoronoi_all */
    -  qh_fprintf(fp, 9232, "\n");
    -  totcount= qh_eachvoronoi_all(fp, printvridge, qh UPPERdelaunay, innerouter, True /* inorder*/);
    -  qh_fprintf(fp, 9233, "%d\n", totcount);
    -#endif
    -} /* printvdiagram */
    -
    -/*---------------------------------
    -
    -  qh_printvdiagram2( fp, printvridge, vertices, innerouter, inorder )
    -    visit all pairs of input sites (vertices) for selected Voronoi vertices
    -    vertices may include NULLs
    -
    -  innerouter:
    -    qh_RIDGEall   print inner ridges(bounded) and outer ridges(unbounded)
    -    qh_RIDGEinner print only inner ridges
    -    qh_RIDGEouter print only outer ridges
    -
    -  inorder:
    -    print 3-d Voronoi vertices in order
    -
    -  assumes:
    -    qh_markvoronoi marked facet->visitid for Voronoi vertices
    -    all facet->seen= False
    -    all facet->seen2= True
    -
    -  returns:
    -    total number of Voronoi ridges
    -    if printvridge,
    -      calls printvridge( fp, vertex, vertexA, centers) for each ridge
    -      [see qh_eachvoronoi()]
    -
    -  see:
    -    qh_eachvoronoi_all()
    -*/
    -int qh_printvdiagram2 (FILE *fp, printvridgeT printvridge, setT *vertices, qh_RIDGE innerouter, boolT inorder) {
    -  int totcount= 0;
    -  int vertex_i, vertex_n;
    -  vertexT *vertex;
    -
    -  FORALLvertices
    -    vertex->seen= False;
    -  FOREACHvertex_i_(vertices) {
    -    if (vertex) {
    -      if (qh GOODvertex > 0 && qh_pointid(vertex->point)+1 != qh GOODvertex)
    -        continue;
    -      totcount += qh_eachvoronoi(fp, printvridge, vertex, !qh_ALL, innerouter, inorder);
    -    }
    -  }
    -  return totcount;
    -} /* printvdiagram2 */
    -
    -/*---------------------------------
    -
    -  qh_printvertex( fp, vertex )
    -    prints the information in a vertex
    -    Duplicated as operator<< [QhullVertex.cpp]
    -*/
    -void qh_printvertex(FILE *fp, vertexT *vertex) {
    -  pointT *point;
    -  int k, count= 0;
    -  facetT *neighbor, **neighborp;
    -  realT r; /*bug fix*/
    -
    -  if (!vertex) {
    -    qh_fprintf(fp, 9234, "  NULLvertex\n");
    -    return;
    -  }
    -  qh_fprintf(fp, 9235, "- p%d(v%d):", qh_pointid(vertex->point), vertex->id);
    -  point= vertex->point;
    -  if (point) {
    -    for (k=qh hull_dim; k--; ) {
    -      r= *point++;
    -      qh_fprintf(fp, 9236, " %5.2g", r);
    -    }
    -  }
    -  if (vertex->deleted)
    -    qh_fprintf(fp, 9237, " deleted");
    -  if (vertex->delridge)
    -    qh_fprintf(fp, 9238, " ridgedeleted");
    -  qh_fprintf(fp, 9239, "\n");
    -  if (vertex->neighbors) {
    -    qh_fprintf(fp, 9240, "  neighbors:");
    -    FOREACHneighbor_(vertex) {
    -      if (++count % 100 == 0)
    -        qh_fprintf(fp, 9241, "\n     ");
    -      qh_fprintf(fp, 9242, " f%d", neighbor->id);
    -    }
    -    qh_fprintf(fp, 9243, "\n");
    -  }
    -} /* printvertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_printvertexlist( fp, string, facetlist, facets, printall )
    -    prints vertices used by a facetlist or facet set
    -    tests qh_skipfacet() if !printall
    -*/
    -void qh_printvertexlist(FILE *fp, const char* string, facetT *facetlist,
    -                         setT *facets, boolT printall) {
    -  vertexT *vertex, **vertexp;
    -  setT *vertices;
    -
    -  vertices= qh_facetvertices(facetlist, facets, printall);
    -  qh_fprintf(fp, 9244, "%s", string);
    -  FOREACHvertex_(vertices)
    -    qh_printvertex(fp, vertex);
    -  qh_settempfree(&vertices);
    -} /* printvertexlist */
    -
    -
    -/*---------------------------------
    -
    -  qh_printvertices( fp, string, vertices )
    -    prints vertices in a set
    -    duplicated as printVertexSet [QhullVertex.cpp]
    -*/
    -void qh_printvertices(FILE *fp, const char* string, setT *vertices) {
    -  vertexT *vertex, **vertexp;
    -
    -  qh_fprintf(fp, 9245, "%s", string);
    -  FOREACHvertex_(vertices)
    -    qh_fprintf(fp, 9246, " p%d(v%d)", qh_pointid(vertex->point), vertex->id);
    -  qh_fprintf(fp, 9247, "\n");
    -} /* printvertices */
    -
    -/*---------------------------------
    -
    -  qh_printvneighbors( fp, facetlist, facets, printall )
    -    print vertex neighbors of vertices in facetlist and facets ('FN')
    -
    -  notes:
    -    qh_countfacets clears facet->visitid for non-printed facets
    -
    -  design:
    -    collect facet count and related statistics
    -    if necessary, build neighbor sets for each vertex
    -    collect vertices in facetlist and facets
    -    build a point array for point->vertex and point->coplanar facet
    -    for each point
    -      list vertex neighbors or coplanar facet
    -*/
    -void qh_printvneighbors(FILE *fp, facetT* facetlist, setT *facets, boolT printall) {
    -  int numfacets, numsimplicial, numridges, totneighbors, numneighbors, numcoplanars, numtricoplanars;
    -  setT *vertices, *vertex_points, *coplanar_points;
    -  int numpoints= qh num_points + qh_setsize(qh other_points);
    -  vertexT *vertex, **vertexp;
    -  int vertex_i, vertex_n;
    -  facetT *facet, **facetp, *neighbor, **neighborp;
    -  pointT *point, **pointp;
    -
    -  qh_countfacets(facetlist, facets, printall, &numfacets, &numsimplicial,
    -      &totneighbors, &numridges, &numcoplanars, &numtricoplanars);  /* sets facet->visitid */
    -  qh_fprintf(fp, 9248, "%d\n", numpoints);
    -  qh_vertexneighbors();
    -  vertices= qh_facetvertices(facetlist, facets, printall);
    -  vertex_points= qh_settemp(numpoints);
    -  coplanar_points= qh_settemp(numpoints);
    -  qh_setzero(vertex_points, 0, numpoints);
    -  qh_setzero(coplanar_points, 0, numpoints);
    -  FOREACHvertex_(vertices)
    -    qh_point_add(vertex_points, vertex->point, vertex);
    -  FORALLfacet_(facetlist) {
    -    FOREACHpoint_(facet->coplanarset)
    -      qh_point_add(coplanar_points, point, facet);
    -  }
    -  FOREACHfacet_(facets) {
    -    FOREACHpoint_(facet->coplanarset)
    -      qh_point_add(coplanar_points, point, facet);
    -  }
    -  FOREACHvertex_i_(vertex_points) {
    -    if (vertex) {
    -      numneighbors= qh_setsize(vertex->neighbors);
    -      qh_fprintf(fp, 9249, "%d", numneighbors);
    -      if (qh hull_dim == 3)
    -        qh_order_vertexneighbors(vertex);
    -      else if (qh hull_dim >= 4)
    -        qsort(SETaddr_(vertex->neighbors, facetT), (size_t)numneighbors,
    -             sizeof(facetT *), qh_compare_facetvisit);
    -      FOREACHneighbor_(vertex)
    -        qh_fprintf(fp, 9250, " %d",
    -                 neighbor->visitid ? neighbor->visitid - 1 : 0 - neighbor->id);
    -      qh_fprintf(fp, 9251, "\n");
    -    }else if ((facet= SETelemt_(coplanar_points, vertex_i, facetT)))
    -      qh_fprintf(fp, 9252, "1 %d\n",
    -                  facet->visitid ? facet->visitid - 1 : 0 - facet->id);
    -    else
    -      qh_fprintf(fp, 9253, "0\n");
    -  }
    -  qh_settempfree(&coplanar_points);
    -  qh_settempfree(&vertex_points);
    -  qh_settempfree(&vertices);
    -} /* printvneighbors */
    -
    -/*---------------------------------
    -
    -  qh_printvoronoi( fp, format, facetlist, facets, printall )
    -    print voronoi diagram in 'o' or 'G' format
    -    for 'o' format
    -      prints voronoi centers for each facet and for infinity
    -      for each vertex, lists ids of printed facets or infinity
    -      assumes facetlist and facets are disjoint
    -    for 'G' format
    -      prints an OFF object
    -      adds a 0 coordinate to center
    -      prints infinity but does not list in vertices
    -
    -  see:
    -    qh_printvdiagram()
    -
    -  notes:
    -    if 'o',
    -      prints a line for each point except "at-infinity"
    -    if all facets are upperdelaunay,
    -      reverses lower and upper hull
    -*/
    -void qh_printvoronoi(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall) {
    -  int k, numcenters, numvertices= 0, numneighbors, numinf, vid=1, vertex_i, vertex_n;
    -  facetT *facet, **facetp, *neighbor, **neighborp;
    -  setT *vertices;
    -  vertexT *vertex;
    -  boolT isLower;
    -  unsigned int numfacets= (unsigned int) qh num_facets;
    -
    -  vertices= qh_markvoronoi(facetlist, facets, printall, &isLower, &numcenters);
    -  FOREACHvertex_i_(vertices) {
    -    if (vertex) {
    -      numvertices++;
    -      numneighbors = numinf = 0;
    -      FOREACHneighbor_(vertex) {
    -        if (neighbor->visitid == 0)
    -          numinf= 1;
    -        else if (neighbor->visitid < numfacets)
    -          numneighbors++;
    -      }
    -      if (numinf && !numneighbors) {
    -        SETelem_(vertices, vertex_i)= NULL;
    -        numvertices--;
    -      }
    -    }
    -  }
    -  if (format == qh_PRINTgeom)
    -    qh_fprintf(fp, 9254, "{appearance {+edge -face} OFF %d %d 1 # Voronoi centers and cells\n",
    -                numcenters, numvertices);
    -  else
    -    qh_fprintf(fp, 9255, "%d\n%d %d 1\n", qh hull_dim-1, numcenters, qh_setsize(vertices));
    -  if (format == qh_PRINTgeom) {
    -    for (k=qh hull_dim-1; k--; )
    -      qh_fprintf(fp, 9256, qh_REAL_1, 0.0);
    -    qh_fprintf(fp, 9257, " 0 # infinity not used\n");
    -  }else {
    -    for (k=qh hull_dim-1; k--; )
    -      qh_fprintf(fp, 9258, qh_REAL_1, qh_INFINITE);
    -    qh_fprintf(fp, 9259, "\n");
    -  }
    -  FORALLfacet_(facetlist) {
    -    if (facet->visitid && facet->visitid < numfacets) {
    -      if (format == qh_PRINTgeom)
    -        qh_fprintf(fp, 9260, "# %d f%d\n", vid++, facet->id);
    -      qh_printcenter(fp, format, NULL, facet);
    -    }
    -  }
    -  FOREACHfacet_(facets) {
    -    if (facet->visitid && facet->visitid < numfacets) {
    -      if (format == qh_PRINTgeom)
    -        qh_fprintf(fp, 9261, "# %d f%d\n", vid++, facet->id);
    -      qh_printcenter(fp, format, NULL, facet);
    -    }
    -  }
    -  FOREACHvertex_i_(vertices) {
    -    numneighbors= 0;
    -    numinf=0;
    -    if (vertex) {
    -      if (qh hull_dim == 3)
    -        qh_order_vertexneighbors(vertex);
    -      else if (qh hull_dim >= 4)
    -        qsort(SETaddr_(vertex->neighbors, vertexT),
    -             (size_t)qh_setsize(vertex->neighbors),
    -             sizeof(facetT *), qh_compare_facetvisit);
    -      FOREACHneighbor_(vertex) {
    -        if (neighbor->visitid == 0)
    -          numinf= 1;
    -        else if (neighbor->visitid < numfacets)
    -          numneighbors++;
    -      }
    -    }
    -    if (format == qh_PRINTgeom) {
    -      if (vertex) {
    -        qh_fprintf(fp, 9262, "%d", numneighbors);
    -        if (vertex) {
    -          FOREACHneighbor_(vertex) {
    -            if (neighbor->visitid && neighbor->visitid < numfacets)
    -              qh_fprintf(fp, 9263, " %d", neighbor->visitid);
    -          }
    -        }
    -        qh_fprintf(fp, 9264, " # p%d(v%d)\n", vertex_i, vertex->id);
    -      }else
    -        qh_fprintf(fp, 9265, " # p%d is coplanar or isolated\n", vertex_i);
    -    }else {
    -      if (numinf)
    -        numneighbors++;
    -      qh_fprintf(fp, 9266, "%d", numneighbors);
    -      if (vertex) {
    -        FOREACHneighbor_(vertex) {
    -          if (neighbor->visitid == 0) {
    -            if (numinf) {
    -              numinf= 0;
    -              qh_fprintf(fp, 9267, " %d", neighbor->visitid);
    -            }
    -          }else if (neighbor->visitid < numfacets)
    -            qh_fprintf(fp, 9268, " %d", neighbor->visitid);
    -        }
    -      }
    -      qh_fprintf(fp, 9269, "\n");
    -    }
    -  }
    -  if (format == qh_PRINTgeom)
    -    qh_fprintf(fp, 9270, "}\n");
    -  qh_settempfree(&vertices);
    -} /* printvoronoi */
    -
    -/*---------------------------------
    -
    -  qh_printvnorm( fp, vertex, vertexA, centers, unbounded )
    -    print one separating plane of the Voronoi diagram for a pair of input sites
    -    unbounded==True if centers includes vertex-at-infinity
    -
    -  assumes:
    -    qh_ASvoronoi and qh_vertexneighbors() already set
    -
    -  note:
    -    parameter unbounded is UNUSED by this callback
    -
    -  see:
    -    qh_printvdiagram()
    -    qh_eachvoronoi()
    -*/
    -void qh_printvnorm(FILE *fp, vertexT *vertex, vertexT *vertexA, setT *centers, boolT unbounded) {
    -  pointT *normal;
    -  realT offset;
    -  int k;
    -  QHULL_UNUSED(unbounded);
    -
    -  normal= qh_detvnorm(vertex, vertexA, centers, &offset);
    -  qh_fprintf(fp, 9271, "%d %d %d ",
    -      2+qh hull_dim, qh_pointid(vertex->point), qh_pointid(vertexA->point));
    -  for (k=0; k< qh hull_dim-1; k++)
    -    qh_fprintf(fp, 9272, qh_REAL_1, normal[k]);
    -  qh_fprintf(fp, 9273, qh_REAL_1, offset);
    -  qh_fprintf(fp, 9274, "\n");
    -} /* printvnorm */
    -
    -/*---------------------------------
    -
    -  qh_printvridge( fp, vertex, vertexA, centers, unbounded )
    -    print one ridge of the Voronoi diagram for a pair of input sites
    -    unbounded==True if centers includes vertex-at-infinity
    -
    -  see:
    -    qh_printvdiagram()
    -
    -  notes:
    -    the user may use a different function
    -    parameter unbounded is UNUSED
    -*/
    -void qh_printvridge(FILE *fp, vertexT *vertex, vertexT *vertexA, setT *centers, boolT unbounded) {
    -  facetT *facet, **facetp;
    -  QHULL_UNUSED(unbounded);
    -
    -  qh_fprintf(fp, 9275, "%d %d %d", qh_setsize(centers)+2,
    -       qh_pointid(vertex->point), qh_pointid(vertexA->point));
    -  FOREACHfacet_(centers)
    -    qh_fprintf(fp, 9276, " %d", facet->visitid);
    -  qh_fprintf(fp, 9277, "\n");
    -} /* printvridge */
    -
    -/*---------------------------------
    -
    -  qh_projectdim3( source, destination )
    -    project 2-d 3-d or 4-d point to a 3-d point
    -    uses qh.DROPdim and qh.hull_dim
    -    source and destination may be the same
    -
    -  notes:
    -    allocate 4 elements to destination just in case
    -*/
    -void qh_projectdim3 (pointT *source, pointT *destination) {
    -  int i,k;
    -
    -  for (k=0, i=0; k < qh hull_dim; k++) {
    -    if (qh hull_dim == 4) {
    -      if (k != qh DROPdim)
    -        destination[i++]= source[k];
    -    }else if (k == qh DROPdim)
    -      destination[i++]= 0;
    -    else
    -      destination[i++]= source[k];
    -  }
    -  while (i < 3)
    -    destination[i++]= 0.0;
    -} /* projectdim3 */
    -
    -/*---------------------------------
    -
    -  qh_readfeasible( dim, curline )
    -    read feasible point from current line and qh.fin
    -
    -  returns:
    -    number of lines read from qh.fin
    -    sets qh.FEASIBLEpoint with malloc'd coordinates
    -
    -  notes:
    -    checks for qh.HALFspace
    -    assumes dim > 1
    -
    -  see:
    -    qh_setfeasible
    -*/
    -int qh_readfeasible(int dim, const char *curline) {
    -  boolT isfirst= True;
    -  int linecount= 0, tokcount= 0;
    -  const char *s;
    -  char *t, firstline[qh_MAXfirst+1];
    -  coordT *coords, value;
    -
    -  if (!qh HALFspace) {
    -    qh_fprintf(qh ferr, 6070, "qhull input error: feasible point(dim 1 coords) is only valid for halfspace intersection\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (qh feasible_string)
    -    qh_fprintf(qh ferr, 7057, "qhull input warning: feasible point(dim 1 coords) overrides 'Hn,n,n' feasible point for halfspace intersection\n");
    -  if (!(qh feasible_point= (coordT*)qh_malloc(dim* sizeof(coordT)))) {
    -    qh_fprintf(qh ferr, 6071, "qhull error: insufficient memory for feasible point\n");
    -    qh_errexit(qh_ERRmem, NULL, NULL);
    -  }
    -  coords= qh feasible_point;
    -  while ((s= (isfirst ?  curline : fgets(firstline, qh_MAXfirst, qh fin)))) {
    -    if (isfirst)
    -      isfirst= False;
    -    else
    -      linecount++;
    -    while (*s) {
    -      while (isspace(*s))
    -        s++;
    -      value= qh_strtod(s, &t);
    -      if (s == t)
    -        break;
    -      s= t;
    -      *(coords++)= value;
    -      if (++tokcount == dim) {
    -        while (isspace(*s))
    -          s++;
    -        qh_strtod(s, &t);
    -        if (s != t) {
    -          qh_fprintf(qh ferr, 6072, "qhull input error: coordinates for feasible point do not finish out the line: %s\n",
    -               s);
    -          qh_errexit(qh_ERRinput, NULL, NULL);
    -        }
    -        return linecount;
    -      }
    -    }
    -  }
    -  qh_fprintf(qh ferr, 6073, "qhull input error: only %d coordinates.  Could not read %d-d feasible point.\n",
    -           tokcount, dim);
    -  qh_errexit(qh_ERRinput, NULL, NULL);
    -  return 0;
    -} /* readfeasible */
    -
    -/*---------------------------------
    -
    -  qh_readpoints( numpoints, dimension, ismalloc )
    -    read points from qh.fin into qh.first_point, qh.num_points
    -    qh.fin is lines of coordinates, one per vertex, first line number of points
    -    if 'rbox D4',
    -      gives message
    -    if qh.ATinfinity,
    -      adds point-at-infinity for Delaunay triangulations
    -
    -  returns:
    -    number of points, array of point coordinates, dimension, ismalloc True
    -    if qh.DELAUNAY & !qh.PROJECTinput, projects points to paraboloid
    -        and clears qh.PROJECTdelaunay
    -    if qh.HALFspace, reads optional feasible point, reads halfspaces,
    -        converts to dual.
    -
    -  for feasible point in "cdd format" in 3-d:
    -    3 1
    -    coordinates
    -    comments
    -    begin
    -    n 4 real/integer
    -    ...
    -    end
    -
    -  notes:
    -    dimension will change in qh_initqhull_globals if qh.PROJECTinput
    -    uses malloc() since qh_mem not initialized
    -    FIXUP QH10012: qh_readpoints needs rewriting, too long
    -*/
    -coordT *qh_readpoints(int *numpoints, int *dimension, boolT *ismalloc) {
    -  coordT *points, *coords, *infinity= NULL;
    -  realT paraboloid, maxboloid= -REALmax, value;
    -  realT *coordp= NULL, *offsetp= NULL, *normalp= NULL;
    -  char *s= 0, *t, firstline[qh_MAXfirst+1];
    -  int diminput=0, numinput=0, dimfeasible= 0, newnum, k, tempi;
    -  int firsttext=0, firstshort=0, firstlong=0, firstpoint=0;
    -  int tokcount= 0, linecount=0, maxcount, coordcount=0;
    -  boolT islong, isfirst= True, wasbegin= False;
    -  boolT isdelaunay= qh DELAUNAY && !qh PROJECTinput;
    -
    -  if (qh CDDinput) {
    -    while ((s= fgets(firstline, qh_MAXfirst, qh fin))) {
    -      linecount++;
    -      if (qh HALFspace && linecount == 1 && isdigit(*s)) {
    -        dimfeasible= qh_strtol(s, &s);
    -        while (isspace(*s))
    -          s++;
    -        if (qh_strtol(s, &s) == 1)
    -          linecount += qh_readfeasible(dimfeasible, s);
    -        else
    -          dimfeasible= 0;
    -      }else if (!memcmp(firstline, "begin", (size_t)5) || !memcmp(firstline, "BEGIN", (size_t)5))
    -        break;
    -      else if (!*qh rbox_command)
    -        strncat(qh rbox_command, s, sizeof(qh rbox_command)-1);
    -    }
    -    if (!s) {
    -      qh_fprintf(qh ferr, 6074, "qhull input error: missing \"begin\" for cdd-formated input\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -  }
    -  while (!numinput && (s= fgets(firstline, qh_MAXfirst, qh fin))) {
    -    linecount++;
    -    if (!memcmp(s, "begin", (size_t)5) || !memcmp(s, "BEGIN", (size_t)5))
    -      wasbegin= True;
    -    while (*s) {
    -      while (isspace(*s))
    -        s++;
    -      if (!*s)
    -        break;
    -      if (!isdigit(*s)) {
    -        if (!*qh rbox_command) {
    -          strncat(qh rbox_command, s, sizeof(qh rbox_command)-1);
    -          firsttext= linecount;
    -        }
    -        break;
    -      }
    -      if (!diminput)
    -        diminput= qh_strtol(s, &s);
    -      else {
    -        numinput= qh_strtol(s, &s);
    -        if (numinput == 1 && diminput >= 2 && qh HALFspace && !qh CDDinput) {
    -          linecount += qh_readfeasible(diminput, s); /* checks if ok */
    -          dimfeasible= diminput;
    -          diminput= numinput= 0;
    -        }else
    -          break;
    -      }
    -    }
    -  }
    -  if (!s) {
    -    qh_fprintf(qh ferr, 6075, "qhull input error: short input file.  Did not find dimension and number of points\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (diminput > numinput) {
    -    tempi= diminput;    /* exchange dim and n, e.g., for cdd input format */
    -    diminput= numinput;
    -    numinput= tempi;
    -  }
    -  if (diminput < 2) {
    -    qh_fprintf(qh ferr, 6220,"qhull input error: dimension %d(first number) should be at least 2\n",
    -            diminput);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (isdelaunay) {
    -    qh PROJECTdelaunay= False;
    -    if (qh CDDinput)
    -      *dimension= diminput;
    -    else
    -      *dimension= diminput+1;
    -    *numpoints= numinput;
    -    if (qh ATinfinity)
    -      (*numpoints)++;
    -  }else if (qh HALFspace) {
    -    *dimension= diminput - 1;
    -    *numpoints= numinput;
    -    if (diminput < 3) {
    -      qh_fprintf(qh ferr, 6221,"qhull input error: dimension %d(first number, includes offset) should be at least 3 for halfspaces\n",
    -            diminput);
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    if (dimfeasible) {
    -      if (dimfeasible != *dimension) {
    -        qh_fprintf(qh ferr, 6222,"qhull input error: dimension %d of feasible point is not one less than dimension %d for halfspaces\n",
    -          dimfeasible, diminput);
    -        qh_errexit(qh_ERRinput, NULL, NULL);
    -      }
    -    }else
    -      qh_setfeasible(*dimension);
    -  }else {
    -    if (qh CDDinput)
    -      *dimension= diminput-1;
    -    else
    -      *dimension= diminput;
    -    *numpoints= numinput;
    -  }
    -  qh normal_size= *dimension * sizeof(coordT); /* for tracing with qh_printpoint */
    -  if (qh HALFspace) {
    -    qh half_space= coordp= (coordT*)qh_malloc(qh normal_size + sizeof(coordT));
    -    if (qh CDDinput) {
    -      offsetp= qh half_space;
    -      normalp= offsetp + 1;
    -    }else {
    -      normalp= qh half_space;
    -      offsetp= normalp + *dimension;
    -    }
    -  }
    -  qh maxline= diminput * (qh_REALdigits + 5);
    -  maximize_(qh maxline, 500);
    -  qh line= (char*)qh_malloc((qh maxline+1) * sizeof(char));
    -  *ismalloc= True;  /* use malloc since memory not setup */
    -  coords= points= qh temp_malloc=
    -        (coordT*)qh_malloc((*numpoints)*(*dimension)*sizeof(coordT));
    -  if (!coords || !qh line || (qh HALFspace && !qh half_space)) {
    -    qh_fprintf(qh ferr, 6076, "qhull error: insufficient memory to read %d points\n",
    -            numinput);
    -    qh_errexit(qh_ERRmem, NULL, NULL);
    -  }
    -  if (isdelaunay && qh ATinfinity) {
    -    infinity= points + numinput * (*dimension);
    -    for (k= (*dimension) - 1; k--; )
    -      infinity[k]= 0.0;
    -  }
    -  maxcount= numinput * diminput;
    -  paraboloid= 0.0;
    -  while ((s= (isfirst ?  s : fgets(qh line, qh maxline, qh fin)))) {
    -    if (!isfirst) {
    -      linecount++;
    -      if (*s == 'e' || *s == 'E') {
    -        if (!memcmp(s, "end", (size_t)3) || !memcmp(s, "END", (size_t)3)) {
    -          if (qh CDDinput )
    -            break;
    -          else if (wasbegin)
    -            qh_fprintf(qh ferr, 7058, "qhull input warning: the input appears to be in cdd format.  If so, use 'Fd'\n");
    -        }
    -      }
    -    }
    -    islong= False;
    -    while (*s) {
    -      while (isspace(*s))
    -        s++;
    -      value= qh_strtod(s, &t);
    -      if (s == t) {
    -        if (!*qh rbox_command)
    -         strncat(qh rbox_command, s, sizeof(qh rbox_command)-1);
    -        if (*s && !firsttext)
    -          firsttext= linecount;
    -        if (!islong && !firstshort && coordcount)
    -          firstshort= linecount;
    -        break;
    -      }
    -      if (!firstpoint)
    -        firstpoint= linecount;
    -      s= t;
    -      if (++tokcount > maxcount)
    -        continue;
    -      if (qh HALFspace) {
    -        if (qh CDDinput)
    -          *(coordp++)= -value; /* both coefficients and offset */
    -        else
    -          *(coordp++)= value;
    -      }else {
    -        *(coords++)= value;
    -        if (qh CDDinput && !coordcount) {
    -          if (value != 1.0) {
    -            qh_fprintf(qh ferr, 6077, "qhull input error: for cdd format, point at line %d does not start with '1'\n",
    -                   linecount);
    -            qh_errexit(qh_ERRinput, NULL, NULL);
    -          }
    -          coords--;
    -        }else if (isdelaunay) {
    -          paraboloid += value * value;
    -          if (qh ATinfinity) {
    -            if (qh CDDinput)
    -              infinity[coordcount-1] += value;
    -            else
    -              infinity[coordcount] += value;
    -          }
    -        }
    -      }
    -      if (++coordcount == diminput) {
    -        coordcount= 0;
    -        if (isdelaunay) {
    -          *(coords++)= paraboloid;
    -          maximize_(maxboloid, paraboloid);
    -          paraboloid= 0.0;
    -        }else if (qh HALFspace) {
    -          if (!qh_sethalfspace(*dimension, coords, &coords, normalp, offsetp, qh feasible_point)) {
    -            qh_fprintf(qh ferr, 8048, "The halfspace was on line %d\n", linecount);
    -            if (wasbegin)
    -              qh_fprintf(qh ferr, 8049, "The input appears to be in cdd format.  If so, you should use option 'Fd'\n");
    -            qh_errexit(qh_ERRinput, NULL, NULL);
    -          }
    -          coordp= qh half_space;
    -        }
    -        while (isspace(*s))
    -          s++;
    -        if (*s) {
    -          islong= True;
    -          if (!firstlong)
    -            firstlong= linecount;
    -        }
    -      }
    -    }
    -    if (!islong && !firstshort && coordcount)
    -      firstshort= linecount;
    -    if (!isfirst && s - qh line >= qh maxline) {
    -      qh_fprintf(qh ferr, 6078, "qhull input error: line %d contained more than %d characters\n",
    -              linecount, (int) (s - qh line));   /* WARN64 */
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    isfirst= False;
    -  }
    -  if (tokcount != maxcount) {
    -    newnum= fmin_(numinput, tokcount/diminput);
    -    qh_fprintf(qh ferr, 7073,"\
    -qhull warning: instead of %d %d-dimensional points, input contains\n\
    -%d points and %d extra coordinates.  Line %d is the first\npoint",
    -       numinput, diminput, tokcount/diminput, tokcount % diminput, firstpoint);
    -    if (firsttext)
    -      qh_fprintf(qh ferr, 8051, ", line %d is the first comment", firsttext);
    -    if (firstshort)
    -      qh_fprintf(qh ferr, 8052, ", line %d is the first short\nline", firstshort);
    -    if (firstlong)
    -      qh_fprintf(qh ferr, 8053, ", line %d is the first long line", firstlong);
    -    qh_fprintf(qh ferr, 8054, ".  Continue with %d points.\n", newnum);
    -    numinput= newnum;
    -    if (isdelaunay && qh ATinfinity) {
    -      for (k= tokcount % diminput; k--; )
    -        infinity[k] -= *(--coords);
    -      *numpoints= newnum+1;
    -    }else {
    -      coords -= tokcount % diminput;
    -      *numpoints= newnum;
    -    }
    -  }
    -  if (isdelaunay && qh ATinfinity) {
    -    for (k= (*dimension) -1; k--; )
    -      infinity[k] /= numinput;
    -    if (coords == infinity)
    -      coords += (*dimension) -1;
    -    else {
    -      for (k=0; k < (*dimension) -1; k++)
    -        *(coords++)= infinity[k];
    -    }
    -    *(coords++)= maxboloid * 1.1;
    -  }
    -  if (qh rbox_command[0]) {
    -    qh rbox_command[strlen(qh rbox_command)-1]= '\0';
    -    if (!strcmp(qh rbox_command, "./rbox D4"))
    -      qh_fprintf(qh ferr, 8055, "\n\
    -This is the qhull test case.  If any errors or core dumps occur,\n\
    -recompile qhull with 'make new'.  If errors still occur, there is\n\
    -an incompatibility.  You should try a different compiler.  You can also\n\
    -change the choices in user.h.  If you discover the source of the problem,\n\
    -please send mail to qhull_bug@qhull.org.\n\
    -\n\
    -Type 'qhull' for a short list of options.\n");
    -  }
    -  qh_free(qh line);
    -  qh line= NULL;
    -  if (qh half_space) {
    -    qh_free(qh half_space);
    -    qh half_space= NULL;
    -  }
    -  qh temp_malloc= NULL;
    -  trace1((qh ferr, 1008,"qh_readpoints: read in %d %d-dimensional points\n",
    -          numinput, diminput));
    -  return(points);
    -} /* readpoints */
    -
    -
    -/*---------------------------------
    -
    -  qh_setfeasible( dim )
    -    set qh.FEASIBLEpoint from qh.feasible_string in "n,n,n" or "n n n" format
    -
    -  notes:
    -    "n,n,n" already checked by qh_initflags()
    -    see qh_readfeasible()
    -*/
    -void qh_setfeasible(int dim) {
    -  int tokcount= 0;
    -  char *s;
    -  coordT *coords, value;
    -
    -  if (!(s= qh feasible_string)) {
    -    qh_fprintf(qh ferr, 6223, "\
    -qhull input error: halfspace intersection needs a feasible point.\n\
    -Either prepend the input with 1 point or use 'Hn,n,n'.  See manual.\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (!(qh feasible_point= (pointT*)qh_malloc(dim * sizeof(coordT)))) {
    -    qh_fprintf(qh ferr, 6079, "qhull error: insufficient memory for 'Hn,n,n'\n");
    -    qh_errexit(qh_ERRmem, NULL, NULL);
    -  }
    -  coords= qh feasible_point;
    -  while (*s) {
    -    value= qh_strtod(s, &s);
    -    if (++tokcount > dim) {
    -      qh_fprintf(qh ferr, 7059, "qhull input warning: more coordinates for 'H%s' than dimension %d\n",
    -          qh feasible_string, dim);
    -      break;
    -    }
    -    *(coords++)= value;
    -    if (*s)
    -      s++;
    -  }
    -  while (++tokcount <= dim)
    -    *(coords++)= 0.0;
    -} /* setfeasible */
    -
    -/*---------------------------------
    -
    -  qh_skipfacet( facet )
    -    returns 'True' if this facet is not to be printed
    -
    -  notes:
    -    based on the user provided slice thresholds and 'good' specifications
    -*/
    -boolT qh_skipfacet(facetT *facet) {
    -  facetT *neighbor, **neighborp;
    -
    -  if (qh PRINTneighbors) {
    -    if (facet->good)
    -      return !qh PRINTgood;
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->good)
    -        return False;
    -    }
    -    return True;
    -  }else if (qh PRINTgood)
    -    return !facet->good;
    -  else if (!facet->normal)
    -    return True;
    -  return(!qh_inthresholds(facet->normal, NULL));
    -} /* skipfacet */
    -
    -/*---------------------------------
    -
    -  qh_skipfilename( string )
    -    returns pointer to character after filename
    -
    -  notes:
    -    skips leading spaces
    -    ends with spacing or eol
    -    if starts with ' or " ends with the same, skipping \' or \"
    -    For qhull, qh_argv_to_command() only uses double quotes
    -*/
    -char *qh_skipfilename(char *filename) {
    -  char *s= filename;  /* non-const due to return */
    -  char c;
    -
    -  while (*s && isspace(*s))
    -    s++;
    -  c= *s++;
    -  if (c == '\0') {
    -    qh_fprintf(qh ferr, 6204, "qhull input error: filename expected, none found.\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (c == '\'' || c == '"') {
    -    while (*s !=c || s[-1] == '\\') {
    -      if (!*s) {
    -        qh_fprintf(qh ferr, 6203, "qhull input error: missing quote after filename -- %s\n", filename);
    -        qh_errexit(qh_ERRinput, NULL, NULL);
    -      }
    -      s++;
    -    }
    -    s++;
    -  }
    -  else while (*s && !isspace(*s))
    -      s++;
    -  return s;
    -} /* skipfilename */
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/io.h b/scipy-0.10.1/scipy/spatial/qhull/src/io.h
    deleted file mode 100644
    index eb78b75550..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/io.h
    +++ /dev/null
    @@ -1,159 +0,0 @@
    -/*
      ---------------------------------
    -
    -   io.h
    -   declarations of Input/Output functions
    -
    -   see README, libqhull.h and io.c
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/io.h#25 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#ifndef qhDEFio
    -#define qhDEFio 1
    -
    -#include "libqhull.h"
    -
    -/*============ constants and flags ==================*/
    -
    -/*----------------------------------
    -
    -  qh_MAXfirst
    -    maximum length of first two lines of stdin
    -*/
    -#define qh_MAXfirst  200
    -
    -/*----------------------------------
    -
    -  qh_MINradius
    -    min radius for Gp and Gv, fraction of maxcoord
    -*/
    -#define qh_MINradius 0.02
    -
    -/*----------------------------------
    -
    -  qh_GEOMepsilon
    -    adjust outer planes for 'lines closer' and geomview roundoff.
    -    This prevents bleed through.
    -*/
    -#define qh_GEOMepsilon 2e-3
    -
    -/*----------------------------------
    -
    -  qh_WHITESPACE
    -    possible values of white space
    -*/
    -#define qh_WHITESPACE " \n\t\v\r\f"
    -
    -
    -/*----------------------------------
    -
    -  qh_RIDGE
    -    to select which ridges to print in qh_eachvoronoi
    -*/
    -typedef enum
    -{
    -    qh_RIDGEall = 0, qh_RIDGEinner, qh_RIDGEouter
    -}
    -qh_RIDGE;
    -
    -/*----------------------------------
    -
    -  printvridgeT
    -    prints results of qh_printvdiagram
    -
    -  see:
    -    qh_printvridge for an example
    -*/
    -typedef void (*printvridgeT)(FILE *fp, vertexT *vertex, vertexT *vertexA, setT *centers, boolT unbounded);
    -
    -/*============== -prototypes in alphabetical order =========*/
    -
    -void    dfacet(unsigned id);
    -void    dvertex(unsigned id);
    -int     qh_compare_facetarea(const void *p1, const void *p2);
    -int     qh_compare_facetmerge(const void *p1, const void *p2);
    -int     qh_compare_facetvisit(const void *p1, const void *p2);
    -int     qh_compare_vertexpoint(const void *p1, const void *p2); /* not used */
    -void    qh_copyfilename(char *filename, int size, const char* source, int length);
    -void    qh_countfacets(facetT *facetlist, setT *facets, boolT printall,
    -              int *numfacetsp, int *numsimplicialp, int *totneighborsp,
    -              int *numridgesp, int *numcoplanarsp, int *numnumtricoplanarsp);
    -pointT *qh_detvnorm(vertexT *vertex, vertexT *vertexA, setT *centers, realT *offsetp);
    -setT   *qh_detvridge(vertexT *vertex);
    -setT   *qh_detvridge3 (vertexT *atvertex, vertexT *vertex);
    -int     qh_eachvoronoi(FILE *fp, printvridgeT printvridge, vertexT *atvertex, boolT visitall, qh_RIDGE innerouter, boolT inorder);
    -int     qh_eachvoronoi_all(FILE *fp, printvridgeT printvridge, boolT isUpper, qh_RIDGE innerouter, boolT inorder);
    -void    qh_facet2point(facetT *facet, pointT **point0, pointT **point1, realT *mindist);
    -setT   *qh_facetvertices(facetT *facetlist, setT *facets, boolT allfacets);
    -void    qh_geomplanes(facetT *facet, realT *outerplane, realT *innerplane);
    -void    qh_markkeep(facetT *facetlist);
    -setT   *qh_markvoronoi(facetT *facetlist, setT *facets, boolT printall, boolT *isLowerp, int *numcentersp);
    -void    qh_order_vertexneighbors(vertexT *vertex);
    -void    qh_prepare_output(void);
    -void    qh_printafacet(FILE *fp, qh_PRINT format, facetT *facet, boolT printall);
    -void    qh_printbegin(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printcenter(FILE *fp, qh_PRINT format, const char *string, facetT *facet);
    -void    qh_printcentrum(FILE *fp, facetT *facet, realT radius);
    -void    qh_printend(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printend4geom(FILE *fp, facetT *facet, int *num, boolT printall);
    -void    qh_printextremes(FILE *fp, facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printextremes_2d(FILE *fp, facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printextremes_d(FILE *fp, facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printfacet(FILE *fp, facetT *facet);
    -void    qh_printfacet2math(FILE *fp, facetT *facet, qh_PRINT format, int notfirst);
    -void    qh_printfacet2geom(FILE *fp, facetT *facet, realT color[3]);
    -void    qh_printfacet2geom_points(FILE *fp, pointT *point1, pointT *point2,
    -                               facetT *facet, realT offset, realT color[3]);
    -void    qh_printfacet3math(FILE *fp, facetT *facet, qh_PRINT format, int notfirst);
    -void    qh_printfacet3geom_nonsimplicial(FILE *fp, facetT *facet, realT color[3]);
    -void    qh_printfacet3geom_points(FILE *fp, setT *points, facetT *facet, realT offset, realT color[3]);
    -void    qh_printfacet3geom_simplicial(FILE *fp, facetT *facet, realT color[3]);
    -void    qh_printfacet3vertex(FILE *fp, facetT *facet, qh_PRINT format);
    -void    qh_printfacet4geom_nonsimplicial(FILE *fp, facetT *facet, realT color[3]);
    -void    qh_printfacet4geom_simplicial(FILE *fp, facetT *facet, realT color[3]);
    -void    qh_printfacetNvertex_nonsimplicial(FILE *fp, facetT *facet, int id, qh_PRINT format);
    -void    qh_printfacetNvertex_simplicial(FILE *fp, facetT *facet, qh_PRINT format);
    -void    qh_printfacetheader(FILE *fp, facetT *facet);
    -void    qh_printfacetridges(FILE *fp, facetT *facet);
    -void    qh_printfacets(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printhyperplaneintersection(FILE *fp, facetT *facet1, facetT *facet2,
    -                   setT *vertices, realT color[3]);
    -void    qh_printneighborhood(FILE *fp, qh_PRINT format, facetT *facetA, facetT *facetB, boolT printall);
    -void    qh_printline3geom(FILE *fp, pointT *pointA, pointT *pointB, realT color[3]);
    -void    qh_printpoint(FILE *fp, const char *string, pointT *point);
    -void    qh_printpointid(FILE *fp, const char *string, int dim, pointT *point, int id);
    -void    qh_printpoint3 (FILE *fp, pointT *point);
    -void    qh_printpoints_out(FILE *fp, facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printpointvect(FILE *fp, pointT *point, coordT *normal, pointT *center, realT radius, realT color[3]);
    -void    qh_printpointvect2 (FILE *fp, pointT *point, coordT *normal, pointT *center, realT radius);
    -void    qh_printridge(FILE *fp, ridgeT *ridge);
    -void    qh_printspheres(FILE *fp, setT *vertices, realT radius);
    -void    qh_printvdiagram(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall);
    -int     qh_printvdiagram2 (FILE *fp, printvridgeT printvridge, setT *vertices, qh_RIDGE innerouter, boolT inorder);
    -void    qh_printvertex(FILE *fp, vertexT *vertex);
    -void    qh_printvertexlist(FILE *fp, const char* string, facetT *facetlist,
    -                         setT *facets, boolT printall);
    -void    qh_printvertices(FILE *fp, const char* string, setT *vertices);
    -void    qh_printvneighbors(FILE *fp, facetT* facetlist, setT *facets, boolT printall);
    -void    qh_printvoronoi(FILE *fp, qh_PRINT format, facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printvnorm(FILE *fp, vertexT *vertex, vertexT *vertexA, setT *centers, boolT unbounded);
    -void    qh_printvridge(FILE *fp, vertexT *vertex, vertexT *vertexA, setT *centers, boolT unbounded);
    -void    qh_produce_output(void);
    -void    qh_produce_output2(void);
    -void    qh_projectdim3 (pointT *source, pointT *destination);
    -int     qh_readfeasible(int dim, const char *curline);
    -coordT *qh_readpoints(int *numpoints, int *dimension, boolT *ismalloc);
    -void    qh_setfeasible(int dim);
    -boolT   qh_skipfacet(facetT *facet);
    -char   *qh_skipfilename(char *filename);
    -
    -#endif /* qhDEFio */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/libqhull.c b/scipy-0.10.1/scipy/spatial/qhull/src/libqhull.c
    deleted file mode 100644
    index 0854b0a2d7..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/libqhull.c
    +++ /dev/null
    @@ -1,1401 +0,0 @@
    -/*
      ---------------------------------
    -
    -   libqhull.c
    -   Quickhull algorithm for convex hulls
    -
    -   qhull() and top-level routines
    -
    -   see qh-qhull.htm, libqhull.h, unix.c
    -
    -   see qhull_a.h for internal functions
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/libqhull.c#7 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#include "qhull_a.h"
    -
    -/*============= functions in alphabetic order after qhull() =======*/
    -
    -/*---------------------------------
    -
    -  qh_qhull()
    -    compute DIM3 convex hull of qh.num_points starting at qh.first_point
    -    qh contains all global options and variables
    -
    -  returns:
    -    returns polyhedron
    -      qh.facet_list, qh.num_facets, qh.vertex_list, qh.num_vertices,
    -
    -    returns global variables
    -      qh.hulltime, qh.max_outside, qh.interior_point, qh.max_vertex, qh.min_vertex
    -
    -    returns precision constants
    -      qh.ANGLEround, centrum_radius, cos_max, DISTround, MAXabs_coord, ONEmerge
    -
    -  notes:
    -    unless needed for output
    -      qh.max_vertex and qh.min_vertex are max/min due to merges
    -
    -  see:
    -    to add individual points to either qh.num_points
    -      use qh_addpoint()
    -
    -    if qh.GETarea
    -      qh_produceoutput() returns qh.totarea and qh.totvol via qh_getarea()
    -
    -  design:
    -    record starting time
    -    initialize hull and partition points
    -    build convex hull
    -    unless early termination
    -      update facet->maxoutside for vertices, coplanar, and near-inside points
    -    error if temporary sets exist
    -    record end time
    -*/
    -
    -void qh_qhull(void) {
    -  int numoutside;
    -
    -  qh hulltime= qh_CPUclock;
    -  if (qh RERUN || qh JOGGLEmax < REALmax/2)
    -    qh_build_withrestart();
    -  else {
    -    qh_initbuild();
    -    qh_buildhull();
    -  }
    -  if (!qh STOPpoint && !qh STOPcone) {
    -    if (qh ZEROall_ok && !qh TESTvneighbors && qh MERGEexact)
    -      qh_checkzero( qh_ALL);
    -    if (qh ZEROall_ok && !qh TESTvneighbors && !qh WAScoplanar) {
    -      trace2((qh ferr, 2055, "qh_qhull: all facets are clearly convex and no coplanar points.  Post-merging and check of maxout not needed.\n"));
    -      qh DOcheckmax= False;
    -    }else {
    -      if (qh MERGEexact || (qh hull_dim > qh_DIMreduceBuild && qh PREmerge))
    -        qh_postmerge("First post-merge", qh premerge_centrum, qh premerge_cos,
    -             (qh POSTmerge ? False : qh TESTvneighbors));
    -      else if (!qh POSTmerge && qh TESTvneighbors)
    -        qh_postmerge("For testing vertex neighbors", qh premerge_centrum,
    -             qh premerge_cos, True);
    -      if (qh POSTmerge)
    -        qh_postmerge("For post-merging", qh postmerge_centrum,
    -             qh postmerge_cos, qh TESTvneighbors);
    -      if (qh visible_list == qh facet_list) { /* i.e., merging done */
    -        qh findbestnew= True;
    -        qh_partitionvisible(/*visible_list, newfacet_list*/ !qh_ALL, &numoutside);
    -        qh findbestnew= False;
    -        qh_deletevisible(/*qh visible_list*/);
    -        qh_resetlists(False, qh_RESETvisible /*qh visible_list newvertex_list newfacet_list */);
    -      }
    -    }
    -    if (qh DOcheckmax){
    -      if (qh REPORTfreq) {
    -        qh_buildtracing(NULL, NULL);
    -        qh_fprintf(qh ferr, 8115, "\nTesting all coplanar points.\n");
    -      }
    -      qh_check_maxout();
    -    }
    -    if (qh KEEPnearinside && !qh maxoutdone)
    -      qh_nearcoplanar();
    -  }
    -  if (qh_setsize(qhmem.tempstack) != 0) {
    -    qh_fprintf(qh ferr, 6164, "qhull internal error (qh_qhull): temporary sets not empty(%d)\n",
    -             qh_setsize(qhmem.tempstack));
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  qh hulltime= qh_CPUclock - qh hulltime;
    -  qh QHULLfinished= True;
    -  trace1((qh ferr, 1036, "Qhull: algorithm completed\n"));
    -} /* qhull */
    -
    -/*---------------------------------
    -
    -  qh_addpoint( furthest, facet, checkdist )
    -    add point (usually furthest point) above facet to hull
    -    if checkdist,
    -      check that point is above facet.
    -      if point is not outside of the hull, uses qh_partitioncoplanar()
    -      assumes that facet is defined by qh_findbestfacet()
    -    else if facet specified,
    -      assumes that point is above facet (major damage if below)
    -    for Delaunay triangulations,
    -      Use qh_setdelaunay() to lift point to paraboloid and scale by 'Qbb' if needed
    -      Do not use options 'Qbk', 'QBk', or 'QbB' since they scale the coordinates.
    -
    -  returns:
    -    returns False if user requested an early termination
    -     qh.visible_list, newfacet_list, delvertex_list, NEWfacets may be defined
    -    updates qh.facet_list, qh.num_facets, qh.vertex_list, qh.num_vertices
    -    clear qh.maxoutdone (will need to call qh_check_maxout() for facet->maxoutside)
    -    if unknown point, adds a pointer to qh.other_points
    -      do not deallocate the point's coordinates
    -
    -  notes:
    -    assumes point is near its best facet and not at a local minimum of a lens
    -      distributions.  Use qh_findbestfacet to avoid this case.
    -    uses qh.visible_list, qh.newfacet_list, qh.delvertex_list, qh.NEWfacets
    -
    -  see also:
    -    qh_triangulate() -- triangulate non-simplicial facets
    -
    -  design:
    -    add point to other_points if needed
    -    if checkdist
    -      if point not above facet
    -        partition coplanar point
    -        exit
    -    exit if pre STOPpoint requested
    -    find horizon and visible facets for point
    -    make new facets for point to horizon
    -    make hyperplanes for point
    -    compute balance statistics
    -    match neighboring new facets
    -    update vertex neighbors and delete interior vertices
    -    exit if STOPcone requested
    -    merge non-convex new facets
    -    if merge found, many merges, or 'Qf'
    -       use qh_findbestnew() instead of qh_findbest()
    -    partition outside points from visible facets
    -    delete visible facets
    -    check polyhedron if requested
    -    exit if post STOPpoint requested
    -    reset working lists of facets and vertices
    -*/
    -boolT qh_addpoint(pointT *furthest, facetT *facet, boolT checkdist) {
    -  int goodvisible, goodhorizon;
    -  vertexT *vertex;
    -  facetT *newfacet;
    -  realT dist, newbalance, pbalance;
    -  boolT isoutside= False;
    -  int numpart, numpoints, numnew, firstnew;
    -
    -  qh maxoutdone= False;
    -  if (qh_pointid(furthest) == -1)
    -    qh_setappend(&qh other_points, furthest);
    -  if (!facet) {
    -    qh_fprintf(qh ferr, 6213, "qhull internal error (qh_addpoint): NULL facet.  Need to call qh_findbestfacet first\n");
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  if (checkdist) {
    -    facet= qh_findbest(furthest, facet, !qh_ALL, !qh_ISnewfacets, !qh_NOupper,
    -                        &dist, &isoutside, &numpart);
    -    zzadd_(Zpartition, numpart);
    -    if (!isoutside) {
    -      zinc_(Znotmax);  /* last point of outsideset is no longer furthest. */
    -      facet->notfurthest= True;
    -      qh_partitioncoplanar(furthest, facet, &dist);
    -      return True;
    -    }
    -  }
    -  qh_buildtracing(furthest, facet);
    -  if (qh STOPpoint < 0 && qh furthest_id == -qh STOPpoint-1) {
    -    facet->notfurthest= True;
    -    return False;
    -  }
    -  qh_findhorizon(furthest, facet, &goodvisible, &goodhorizon);
    -  if (qh ONLYgood && !(goodvisible+goodhorizon) && !qh GOODclosest) {
    -    zinc_(Znotgood);
    -    facet->notfurthest= True;
    -    /* last point of outsideset is no longer furthest.  This is ok
    -       since all points of the outside are likely to be bad */
    -    qh_resetlists(False, qh_RESETvisible /*qh visible_list newvertex_list newfacet_list */);
    -    return True;
    -  }
    -  zzinc_(Zprocessed);
    -  firstnew= qh facet_id;
    -  vertex= qh_makenewfacets(furthest /*visible_list, attaches if !ONLYgood */);
    -  qh_makenewplanes(/* newfacet_list */);
    -  numnew= qh facet_id - firstnew;
    -  newbalance= numnew - (realT) (qh num_facets-qh num_visible)
    -                         * qh hull_dim/qh num_vertices;
    -  wadd_(Wnewbalance, newbalance);
    -  wadd_(Wnewbalance2, newbalance * newbalance);
    -  if (qh ONLYgood
    -  && !qh_findgood(qh newfacet_list, goodhorizon) && !qh GOODclosest) {
    -    FORALLnew_facets
    -      qh_delfacet(newfacet);
    -    qh_delvertex(vertex);
    -    qh_resetlists(True, qh_RESETvisible /*qh visible_list newvertex_list newfacet_list */);
    -    zinc_(Znotgoodnew);
    -    facet->notfurthest= True;
    -    return True;
    -  }
    -  if (qh ONLYgood)
    -    qh_attachnewfacets(/*visible_list*/);
    -  qh_matchnewfacets();
    -  qh_updatevertices();
    -  if (qh STOPcone && qh furthest_id == qh STOPcone-1) {
    -    facet->notfurthest= True;
    -    return False;  /* visible_list etc. still defined */
    -  }
    -  qh findbestnew= False;
    -  if (qh PREmerge || qh MERGEexact) {
    -    qh_premerge(vertex, qh premerge_centrum, qh premerge_cos);
    -    if (qh_USEfindbestnew)
    -      qh findbestnew= True;
    -    else {
    -      FORALLnew_facets {
    -        if (!newfacet->simplicial) {
    -          qh findbestnew= True;  /* use qh_findbestnew instead of qh_findbest*/
    -          break;
    -        }
    -      }
    -    }
    -  }else if (qh BESToutside)
    -    qh findbestnew= True;
    -  qh_partitionvisible(/*visible_list, newfacet_list*/ !qh_ALL, &numpoints);
    -  qh findbestnew= False;
    -  qh findbest_notsharp= False;
    -  zinc_(Zpbalance);
    -  pbalance= numpoints - (realT) qh hull_dim /* assumes all points extreme */
    -                * (qh num_points - qh num_vertices)/qh num_vertices;
    -  wadd_(Wpbalance, pbalance);
    -  wadd_(Wpbalance2, pbalance * pbalance);
    -  qh_deletevisible(/*qh visible_list*/);
    -  zmax_(Zmaxvertex, qh num_vertices);
    -  qh NEWfacets= False;
    -  if (qh IStracing >= 4) {
    -    if (qh num_facets < 2000)
    -      qh_printlists();
    -    qh_printfacetlist(qh newfacet_list, NULL, True);
    -    qh_checkpolygon(qh facet_list);
    -  }else if (qh CHECKfrequently) {
    -    if (qh num_facets < 50)
    -      qh_checkpolygon(qh facet_list);
    -    else
    -      qh_checkpolygon(qh newfacet_list);
    -  }
    -  if (qh STOPpoint > 0 && qh furthest_id == qh STOPpoint-1)
    -    return False;
    -  qh_resetlists(True, qh_RESETvisible /*qh visible_list newvertex_list newfacet_list */);
    -  /* qh_triangulate(); to test qh.TRInormals */
    -  trace2((qh ferr, 2056, "qh_addpoint: added p%d new facets %d new balance %2.2g point balance %2.2g\n",
    -    qh_pointid(furthest), numnew, newbalance, pbalance));
    -  return True;
    -} /* addpoint */
    -
    -/*---------------------------------
    -
    -  qh_build_withrestart()
    -    allow restarts due to qh.JOGGLEmax while calling qh_buildhull()
    -    qh.FIRSTpoint/qh.NUMpoints is point array
    -        it may be moved by qh_joggleinput()
    -*/
    -void qh_build_withrestart(void) {
    -  int restart;
    -
    -  qh ALLOWrestart= True;
    -  while (True) {
    -    restart= setjmp(qh restartexit); /* simple statement for CRAY J916 */
    -    if (restart) {       /* only from qh_precision() */
    -      zzinc_(Zretry);
    -      wmax_(Wretrymax, qh JOGGLEmax);
    -      /* QH7078 warns about using 'TCn' with 'QJn' */
    -      qh STOPcone= -1; /* if break from joggle, prevents normal output */
    -    }
    -    if (!qh RERUN && qh JOGGLEmax < REALmax/2) {
    -      if (qh build_cnt > qh_JOGGLEmaxretry) {
    -        qh_fprintf(qh ferr, 6229, "qhull precision error: %d attempts to construct a convex hull\n\
    -        with joggled input.  Increase joggle above 'QJ%2.2g'\n\
    -        or modify qh_JOGGLE... parameters in user.h\n",
    -           qh build_cnt, qh JOGGLEmax);
    -        qh_errexit(qh_ERRqhull, NULL, NULL);
    -      }
    -      if (qh build_cnt && !restart)
    -        break;
    -    }else if (qh build_cnt && qh build_cnt >= qh RERUN)
    -      break;
    -    qh STOPcone= 0;
    -    qh_freebuild(True);  /* first call is a nop */
    -    qh build_cnt++;
    -    if (!qh qhull_optionsiz)
    -      qh qhull_optionsiz= (int)strlen(qh qhull_options);   /* WARN64 */
    -    else {
    -      qh qhull_options [qh qhull_optionsiz]= '\0';
    -      qh qhull_optionlen= qh_OPTIONline;  /* starts a new line */
    -    }
    -    qh_option("_run", &qh build_cnt, NULL);
    -    if (qh build_cnt == qh RERUN) {
    -      qh IStracing= qh TRACElastrun;  /* duplicated from qh_initqhull_globals */
    -      if (qh TRACEpoint != -1 || qh TRACEdist < REALmax/2 || qh TRACEmerge) {
    -        qh TRACElevel= (qh IStracing? qh IStracing : 3);
    -        qh IStracing= 0;
    -      }
    -      qhmem.IStracing= qh IStracing;
    -    }
    -    if (qh JOGGLEmax < REALmax/2)
    -      qh_joggleinput();
    -    qh_initbuild();
    -    qh_buildhull();
    -    if (qh JOGGLEmax < REALmax/2 && !qh MERGING)
    -      qh_checkconvex(qh facet_list, qh_ALGORITHMfault);
    -  }
    -  qh ALLOWrestart= False;
    -} /* qh_build_withrestart */
    -
    -/*---------------------------------
    -
    -  qh_buildhull()
    -    construct a convex hull by adding outside points one at a time
    -
    -  returns:
    -
    -  notes:
    -    may be called multiple times
    -    checks facet and vertex lists for incorrect flags
    -    to recover from STOPcone, call qh_deletevisible and qh_resetlists
    -
    -  design:
    -    check visible facet and newfacet flags
    -    check newlist vertex flags and qh.STOPcone/STOPpoint
    -    for each facet with a furthest outside point
    -      add point to facet
    -      exit if qh.STOPcone or qh.STOPpoint requested
    -    if qh.NARROWhull for initial simplex
    -      partition remaining outside points to coplanar sets
    -*/
    -void qh_buildhull(void) {
    -  facetT *facet;
    -  pointT *furthest;
    -  vertexT *vertex;
    -  int id;
    -
    -  trace1((qh ferr, 1037, "qh_buildhull: start build hull\n"));
    -  FORALLfacets {
    -    if (facet->visible || facet->newfacet) {
    -      qh_fprintf(qh ferr, 6165, "qhull internal error (qh_buildhull): visible or new facet f%d in facet list\n",
    -                   facet->id);
    -      qh_errexit(qh_ERRqhull, facet, NULL);
    -    }
    -  }
    -  FORALLvertices {
    -    if (vertex->newlist) {
    -      qh_fprintf(qh ferr, 6166, "qhull internal error (qh_buildhull): new vertex f%d in vertex list\n",
    -                   vertex->id);
    -      qh_errprint("ERRONEOUS", NULL, NULL, NULL, vertex);
    -      qh_errexit(qh_ERRqhull, NULL, NULL);
    -    }
    -    id= qh_pointid(vertex->point);
    -    if ((qh STOPpoint>0 && id == qh STOPpoint-1) ||
    -        (qh STOPpoint<0 && id == -qh STOPpoint-1) ||
    -        (qh STOPcone>0 && id == qh STOPcone-1)) {
    -      trace1((qh ferr, 1038,"qh_buildhull: stop point or cone P%d in initial hull\n", id));
    -      return;
    -    }
    -  }
    -  qh facet_next= qh facet_list;      /* advance facet when processed */
    -  while ((furthest= qh_nextfurthest(&facet))) {
    -    qh num_outside--;  /* if ONLYmax, furthest may not be outside */
    -    if (!qh_addpoint(furthest, facet, qh ONLYmax))
    -      break;
    -  }
    -  if (qh NARROWhull) /* move points from outsideset to coplanarset */
    -    qh_outcoplanar( /* facet_list */ );
    -  if (qh num_outside && !furthest) {
    -    qh_fprintf(qh ferr, 6167, "qhull internal error (qh_buildhull): %d outside points were never processed.\n", qh num_outside);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  trace1((qh ferr, 1039, "qh_buildhull: completed the hull construction\n"));
    -} /* buildhull */
    -
    -
    -/*---------------------------------
    -
    -  qh_buildtracing( furthest, facet )
    -    trace an iteration of qh_buildhull() for furthest point and facet
    -    if !furthest, prints progress message
    -
    -  returns:
    -    tracks progress with qh.lastreport
    -    updates qh.furthest_id (-3 if furthest is NULL)
    -    also resets visit_id, vertext_visit on wrap around
    -
    -  see:
    -    qh_tracemerging()
    -
    -  design:
    -    if !furthest
    -      print progress message
    -      exit
    -    if 'TFn' iteration
    -      print progress message
    -    else if tracing
    -      trace furthest point and facet
    -    reset qh.visit_id and qh.vertex_visit if overflow may occur
    -    set qh.furthest_id for tracing
    -*/
    -void qh_buildtracing(pointT *furthest, facetT *facet) {
    -  realT dist= 0;
    -  float cpu;
    -  int total, furthestid;
    -  time_t timedata;
    -  struct tm *tp;
    -  vertexT *vertex;
    -
    -  qh old_randomdist= qh RANDOMdist;
    -  qh RANDOMdist= False;
    -  if (!furthest) {
    -    time(&timedata);
    -    tp= localtime(&timedata);
    -    cpu= (float)qh_CPUclock - (float)qh hulltime;
    -    cpu /= (float)qh_SECticks;
    -    total= zzval_(Ztotmerge) - zzval_(Zcyclehorizon) + zzval_(Zcyclefacettot);
    -    qh_fprintf(qh ferr, 8118, "\n\
    -At %02d:%02d:%02d & %2.5g CPU secs, qhull has created %d facets and merged %d.\n\
    - The current hull contains %d facets and %d vertices.  Last point was p%d\n",
    -      tp->tm_hour, tp->tm_min, tp->tm_sec, cpu, qh facet_id -1,
    -      total, qh num_facets, qh num_vertices, qh furthest_id);
    -    return;
    -  }
    -  furthestid= qh_pointid(furthest);
    -  if (qh TRACEpoint == furthestid) {
    -    qh IStracing= qh TRACElevel;
    -    qhmem.IStracing= qh TRACElevel;
    -  }else if (qh TRACEpoint != -1 && qh TRACEdist < REALmax/2) {
    -    qh IStracing= 0;
    -    qhmem.IStracing= 0;
    -  }
    -  if (qh REPORTfreq && (qh facet_id-1 > qh lastreport+qh REPORTfreq)) {
    -    qh lastreport= qh facet_id-1;
    -    time(&timedata);
    -    tp= localtime(&timedata);
    -    cpu= (float)qh_CPUclock - (float)qh hulltime;
    -    cpu /= (float)qh_SECticks;
    -    total= zzval_(Ztotmerge) - zzval_(Zcyclehorizon) + zzval_(Zcyclefacettot);
    -    zinc_(Zdistio);
    -    qh_distplane(furthest, facet, &dist);
    -    qh_fprintf(qh ferr, 8119, "\n\
    -At %02d:%02d:%02d & %2.5g CPU secs, qhull has created %d facets and merged %d.\n\
    - The current hull contains %d facets and %d vertices.  There are %d\n\
    - outside points.  Next is point p%d(v%d), %2.2g above f%d.\n",
    -      tp->tm_hour, tp->tm_min, tp->tm_sec, cpu, qh facet_id -1,
    -      total, qh num_facets, qh num_vertices, qh num_outside+1,
    -      furthestid, qh vertex_id, dist, getid_(facet));
    -  }else if (qh IStracing >=1) {
    -    cpu= (float)qh_CPUclock - (float)qh hulltime;
    -    cpu /= (float)qh_SECticks;
    -    qh_distplane(furthest, facet, &dist);
    -    qh_fprintf(qh ferr, 8120, "qh_addpoint: add p%d(v%d) to hull of %d facets(%2.2g above f%d) and %d outside at %4.4g CPU secs.  Previous was p%d.\n",
    -      furthestid, qh vertex_id, qh num_facets, dist,
    -      getid_(facet), qh num_outside+1, cpu, qh furthest_id);
    -  }
    -  zmax_(Zvisit2max, (int)qh visit_id/2);
    -  if (qh visit_id > (unsigned) INT_MAX) {
    -    zinc_(Zvisit);
    -    qh visit_id= 0;
    -    FORALLfacets
    -      facet->visitid= 0;
    -  }
    -  zmax_(Zvvisit2max, (int)qh vertex_visit/2);
    -  if (qh vertex_visit > (unsigned) INT_MAX/2) { /* 31 bits */
    -    zinc_(Zvvisit);
    -    qh vertex_visit= 0;
    -    FORALLvertices
    -      vertex->visitid= 0;
    -  }
    -  qh furthest_id= furthestid;
    -  qh RANDOMdist= qh old_randomdist;
    -} /* buildtracing */
    -
    -/*---------------------------------
    -
    -  qh_errexit2( exitcode, facet, otherfacet )
    -    return exitcode to system after an error
    -    report two facets
    -
    -  returns:
    -    assumes exitcode non-zero
    -
    -  see:
    -    normally use qh_errexit() in user.c(reports a facet and a ridge)
    -*/
    -void qh_errexit2(int exitcode, facetT *facet, facetT *otherfacet) {
    -
    -  qh_errprint("ERRONEOUS", facet, otherfacet, NULL, NULL);
    -  qh_errexit(exitcode, NULL, NULL);
    -} /* errexit2 */
    -
    -
    -/*---------------------------------
    -
    -  qh_findhorizon( point, facet, goodvisible, goodhorizon )
    -    given a visible facet, find the point's horizon and visible facets
    -    for all facets, !facet-visible
    -
    -  returns:
    -    returns qh.visible_list/num_visible with all visible facets
    -      marks visible facets with ->visible
    -    updates count of good visible and good horizon facets
    -    updates qh.max_outside, qh.max_vertex, facet->maxoutside
    -
    -  see:
    -    similar to qh_delpoint()
    -
    -  design:
    -    move facet to qh.visible_list at end of qh.facet_list
    -    for all visible facets
    -     for each unvisited neighbor of a visible facet
    -       compute distance of point to neighbor
    -       if point above neighbor
    -         move neighbor to end of qh.visible_list
    -       else if point is coplanar with neighbor
    -         update qh.max_outside, qh.max_vertex, neighbor->maxoutside
    -         mark neighbor coplanar (will create a samecycle later)
    -         update horizon statistics
    -*/
    -void qh_findhorizon(pointT *point, facetT *facet, int *goodvisible, int *goodhorizon) {
    -  facetT *neighbor, **neighborp, *visible;
    -  int numhorizon= 0, coplanar= 0;
    -  realT dist;
    -
    -  trace1((qh ferr, 1040,"qh_findhorizon: find horizon for point p%d facet f%d\n",qh_pointid(point),facet->id));
    -  *goodvisible= *goodhorizon= 0;
    -  zinc_(Ztotvisible);
    -  qh_removefacet(facet);  /* visible_list at end of qh facet_list */
    -  qh_appendfacet(facet);
    -  qh num_visible= 1;
    -  if (facet->good)
    -    (*goodvisible)++;
    -  qh visible_list= facet;
    -  facet->visible= True;
    -  facet->f.replace= NULL;
    -  if (qh IStracing >=4)
    -    qh_errprint("visible", facet, NULL, NULL, NULL);
    -  qh visit_id++;
    -  FORALLvisible_facets {
    -    if (visible->tricoplanar && !qh TRInormals) {
    -      qh_fprintf(qh ferr, 6230, "Qhull internal error (qh_findhorizon): does not work for tricoplanar facets.  Use option 'Q11'\n");
    -      qh_errexit(qh_ERRqhull, visible, NULL);
    -    }
    -    visible->visitid= qh visit_id;
    -    FOREACHneighbor_(visible) {
    -      if (neighbor->visitid == qh visit_id)
    -        continue;
    -      neighbor->visitid= qh visit_id;
    -      zzinc_(Znumvisibility);
    -      qh_distplane(point, neighbor, &dist);
    -      if (dist > qh MINvisible) {
    -        zinc_(Ztotvisible);
    -        qh_removefacet(neighbor);  /* append to end of qh visible_list */
    -        qh_appendfacet(neighbor);
    -        neighbor->visible= True;
    -        neighbor->f.replace= NULL;
    -        qh num_visible++;
    -        if (neighbor->good)
    -          (*goodvisible)++;
    -        if (qh IStracing >=4)
    -          qh_errprint("visible", neighbor, NULL, NULL, NULL);
    -      }else {
    -        if (dist > - qh MAXcoplanar) {
    -          neighbor->coplanar= True;
    -          zzinc_(Zcoplanarhorizon);
    -          qh_precision("coplanar horizon");
    -          coplanar++;
    -          if (qh MERGING) {
    -            if (dist > 0) {
    -              maximize_(qh max_outside, dist);
    -              maximize_(qh max_vertex, dist);
    -#if qh_MAXoutside
    -              maximize_(neighbor->maxoutside, dist);
    -#endif
    -            }else
    -              minimize_(qh min_vertex, dist);  /* due to merge later */
    -          }
    -          trace2((qh ferr, 2057, "qh_findhorizon: point p%d is coplanar to horizon f%d, dist=%2.7g < qh MINvisible(%2.7g)\n",
    -              qh_pointid(point), neighbor->id, dist, qh MINvisible));
    -        }else
    -          neighbor->coplanar= False;
    -        zinc_(Ztothorizon);
    -        numhorizon++;
    -        if (neighbor->good)
    -          (*goodhorizon)++;
    -        if (qh IStracing >=4)
    -          qh_errprint("horizon", neighbor, NULL, NULL, NULL);
    -      }
    -    }
    -  }
    -  if (!numhorizon) {
    -    qh_precision("empty horizon");
    -    qh_fprintf(qh ferr, 6168, "qhull precision error (qh_findhorizon): empty horizon\n\
    -QhullPoint p%d was above all facets.\n", qh_pointid(point));
    -    qh_printfacetlist(qh facet_list, NULL, True);
    -    qh_errexit(qh_ERRprec, NULL, NULL);
    -  }
    -  trace1((qh ferr, 1041, "qh_findhorizon: %d horizon facets(good %d), %d visible(good %d), %d coplanar\n",
    -       numhorizon, *goodhorizon, qh num_visible, *goodvisible, coplanar));
    -  if (qh IStracing >= 4 && qh num_facets < 50)
    -    qh_printlists();
    -} /* findhorizon */
    -
    -/*---------------------------------
    -
    -  qh_nextfurthest( visible )
    -    returns next furthest point and visible facet for qh_addpoint()
    -    starts search at qh.facet_next
    -
    -  returns:
    -    removes furthest point from outside set
    -    NULL if none available
    -    advances qh.facet_next over facets with empty outside sets
    -
    -  design:
    -    for each facet from qh.facet_next
    -      if empty outside set
    -        advance qh.facet_next
    -      else if qh.NARROWhull
    -        determine furthest outside point
    -        if furthest point is not outside
    -          advance qh.facet_next(point will be coplanar)
    -    remove furthest point from outside set
    -*/
    -pointT *qh_nextfurthest(facetT **visible) {
    -  facetT *facet;
    -  int size, idx;
    -  realT randr, dist;
    -  pointT *furthest;
    -
    -  while ((facet= qh facet_next) != qh facet_tail) {
    -    if (!facet->outsideset) {
    -      qh facet_next= facet->next;
    -      continue;
    -    }
    -    SETreturnsize_(facet->outsideset, size);
    -    if (!size) {
    -      qh_setfree(&facet->outsideset);
    -      qh facet_next= facet->next;
    -      continue;
    -    }
    -    if (qh NARROWhull) {
    -      if (facet->notfurthest)
    -        qh_furthestout(facet);
    -      furthest= (pointT*)qh_setlast(facet->outsideset);
    -#if qh_COMPUTEfurthest
    -      qh_distplane(furthest, facet, &dist);
    -      zinc_(Zcomputefurthest);
    -#else
    -      dist= facet->furthestdist;
    -#endif
    -      if (dist < qh MINoutside) { /* remainder of outside set is coplanar for qh_outcoplanar */
    -        qh facet_next= facet->next;
    -        continue;
    -      }
    -    }
    -    if (!qh RANDOMoutside && !qh VIRTUALmemory) {
    -      if (qh PICKfurthest) {
    -        qh_furthestnext(/* qh facet_list */);
    -        facet= qh facet_next;
    -      }
    -      *visible= facet;
    -      return((pointT*)qh_setdellast(facet->outsideset));
    -    }
    -    if (qh RANDOMoutside) {
    -      int outcoplanar = 0;
    -      if (qh NARROWhull) {
    -        FORALLfacets {
    -          if (facet == qh facet_next)
    -            break;
    -          if (facet->outsideset)
    -            outcoplanar += qh_setsize( facet->outsideset);
    -        }
    -      }
    -      randr= qh_RANDOMint;
    -      randr= randr/(qh_RANDOMmax+1);
    -      idx= (int)floor((qh num_outside - outcoplanar) * randr);
    -      FORALLfacet_(qh facet_next) {
    -        if (facet->outsideset) {
    -          SETreturnsize_(facet->outsideset, size);
    -          if (!size)
    -            qh_setfree(&facet->outsideset);
    -          else if (size > idx) {
    -            *visible= facet;
    -            return((pointT*)qh_setdelnth(facet->outsideset, idx));
    -          }else
    -            idx -= size;
    -        }
    -      }
    -      qh_fprintf(qh ferr, 6169, "qhull internal error (qh_nextfurthest): num_outside %d is too low\nby at least %d, or a random real %g >= 1.0\n",
    -              qh num_outside, idx+1, randr);
    -      qh_errexit(qh_ERRqhull, NULL, NULL);
    -    }else { /* VIRTUALmemory */
    -      facet= qh facet_tail->previous;
    -      if (!(furthest= (pointT*)qh_setdellast(facet->outsideset))) {
    -        if (facet->outsideset)
    -          qh_setfree(&facet->outsideset);
    -        qh_removefacet(facet);
    -        qh_prependfacet(facet, &qh facet_list);
    -        continue;
    -      }
    -      *visible= facet;
    -      return furthest;
    -    }
    -  }
    -  return NULL;
    -} /* nextfurthest */
    -
    -/*---------------------------------
    -
    -  qh_partitionall( vertices, points, numpoints )
    -    partitions all points in points/numpoints to the outsidesets of facets
    -    vertices= vertices in qh.facet_list(!partitioned)
    -
    -  returns:
    -    builds facet->outsideset
    -    does not partition qh.GOODpoint
    -    if qh.ONLYgood && !qh.MERGING,
    -      does not partition qh.GOODvertex
    -
    -  notes:
    -    faster if qh.facet_list sorted by anticipated size of outside set
    -
    -  design:
    -    initialize pointset with all points
    -    remove vertices from pointset
    -    remove qh.GOODpointp from pointset (unless it's qh.STOPcone or qh.STOPpoint)
    -    for all facets
    -      for all remaining points in pointset
    -        compute distance from point to facet
    -        if point is outside facet
    -          remove point from pointset (by not reappending)
    -          update bestpoint
    -          append point or old bestpoint to facet's outside set
    -      append bestpoint to facet's outside set (furthest)
    -    for all points remaining in pointset
    -      partition point into facets' outside sets and coplanar sets
    -*/
    -void qh_partitionall(setT *vertices, pointT *points, int numpoints){
    -  setT *pointset;
    -  vertexT *vertex, **vertexp;
    -  pointT *point, **pointp, *bestpoint;
    -  int size, point_i, point_n, point_end, remaining, i, id;
    -  facetT *facet;
    -  realT bestdist= -REALmax, dist, distoutside;
    -
    -  trace1((qh ferr, 1042, "qh_partitionall: partition all points into outside sets\n"));
    -  pointset= qh_settemp(numpoints);
    -  qh num_outside= 0;
    -  pointp= SETaddr_(pointset, pointT);
    -  for (i=numpoints, point= points; i--; point += qh hull_dim)
    -    *(pointp++)= point;
    -  qh_settruncate(pointset, numpoints);
    -  FOREACHvertex_(vertices) {
    -    if ((id= qh_pointid(vertex->point)) >= 0)
    -      SETelem_(pointset, id)= NULL;
    -  }
    -  id= qh_pointid(qh GOODpointp);
    -  if (id >=0 && qh STOPcone-1 != id && -qh STOPpoint-1 != id)
    -    SETelem_(pointset, id)= NULL;
    -  if (qh GOODvertexp && qh ONLYgood && !qh MERGING) { /* matches qhull()*/
    -    if ((id= qh_pointid(qh GOODvertexp)) >= 0)
    -      SETelem_(pointset, id)= NULL;
    -  }
    -  if (!qh BESToutside) {  /* matches conditional for qh_partitionpoint below */
    -    distoutside= qh_DISToutside; /* multiple of qh.MINoutside & qh.max_outside, see user.h */
    -    zval_(Ztotpartition)= qh num_points - qh hull_dim - 1; /*misses GOOD... */
    -    remaining= qh num_facets;
    -    point_end= numpoints;
    -    FORALLfacets {
    -      size= point_end/(remaining--) + 100;
    -      facet->outsideset= qh_setnew(size);
    -      bestpoint= NULL;
    -      point_end= 0;
    -      FOREACHpoint_i_(pointset) {
    -        if (point) {
    -          zzinc_(Zpartitionall);
    -          qh_distplane(point, facet, &dist);
    -          if (dist < distoutside)
    -            SETelem_(pointset, point_end++)= point;
    -          else {
    -            qh num_outside++;
    -            if (!bestpoint) {
    -              bestpoint= point;
    -              bestdist= dist;
    -            }else if (dist > bestdist) {
    -              qh_setappend(&facet->outsideset, bestpoint);
    -              bestpoint= point;
    -              bestdist= dist;
    -            }else
    -              qh_setappend(&facet->outsideset, point);
    -          }
    -        }
    -      }
    -      if (bestpoint) {
    -        qh_setappend(&facet->outsideset, bestpoint);
    -#if !qh_COMPUTEfurthest
    -        facet->furthestdist= bestdist;
    -#endif
    -      }else
    -        qh_setfree(&facet->outsideset);
    -      qh_settruncate(pointset, point_end);
    -    }
    -  }
    -  /* if !qh BESToutside, pointset contains points not assigned to outsideset */
    -  if (qh BESToutside || qh MERGING || qh KEEPcoplanar || qh KEEPinside) {
    -    qh findbestnew= True;
    -    FOREACHpoint_i_(pointset) {
    -      if (point)
    -        qh_partitionpoint(point, qh facet_list);
    -    }
    -    qh findbestnew= False;
    -  }
    -  zzadd_(Zpartitionall, zzval_(Zpartition));
    -  zzval_(Zpartition)= 0;
    -  qh_settempfree(&pointset);
    -  if (qh IStracing >= 4)
    -    qh_printfacetlist(qh facet_list, NULL, True);
    -} /* partitionall */
    -
    -
    -/*---------------------------------
    -
    -  qh_partitioncoplanar( point, facet, dist )
    -    partition coplanar point to a facet
    -    dist is distance from point to facet
    -    if dist NULL,
    -      searches for bestfacet and does nothing if inside
    -    if qh.findbestnew set,
    -      searches new facets instead of using qh_findbest()
    -
    -  returns:
    -    qh.max_ouside updated
    -    if qh.KEEPcoplanar or qh.KEEPinside
    -      point assigned to best coplanarset
    -
    -  notes:
    -    facet->maxoutside is updated at end by qh_check_maxout
    -
    -  design:
    -    if dist undefined
    -      find best facet for point
    -      if point sufficiently below facet (depends on qh.NEARinside and qh.KEEPinside)
    -        exit
    -    if keeping coplanar/nearinside/inside points
    -      if point is above furthest coplanar point
    -        append point to coplanar set (it is the new furthest)
    -        update qh.max_outside
    -      else
    -        append point one before end of coplanar set
    -    else if point is clearly outside of qh.max_outside and bestfacet->coplanarset
    -    and bestfacet is more than perpendicular to facet
    -      repartition the point using qh_findbest() -- it may be put on an outsideset
    -    else
    -      update qh.max_outside
    -*/
    -void qh_partitioncoplanar(pointT *point, facetT *facet, realT *dist) {
    -  facetT *bestfacet;
    -  pointT *oldfurthest;
    -  realT bestdist, dist2= 0, angle;
    -  int numpart= 0, oldfindbest;
    -  boolT isoutside;
    -
    -  qh WAScoplanar= True;
    -  if (!dist) {
    -    if (qh findbestnew)
    -      bestfacet= qh_findbestnew(point, facet, &bestdist, qh_ALL, &isoutside, &numpart);
    -    else
    -      bestfacet= qh_findbest(point, facet, qh_ALL, !qh_ISnewfacets, qh DELAUNAY,
    -                          &bestdist, &isoutside, &numpart);
    -    zinc_(Ztotpartcoplanar);
    -    zzadd_(Zpartcoplanar, numpart);
    -    if (!qh DELAUNAY && !qh KEEPinside) { /*  for 'd', bestdist skips upperDelaunay facets */
    -      if (qh KEEPnearinside) {
    -        if (bestdist < -qh NEARinside) {
    -          zinc_(Zcoplanarinside);
    -          trace4((qh ferr, 4062, "qh_partitioncoplanar: point p%d is more than near-inside facet f%d dist %2.2g findbestnew %d\n",
    -                  qh_pointid(point), bestfacet->id, bestdist, qh findbestnew));
    -          return;
    -        }
    -      }else if (bestdist < -qh MAXcoplanar) {
    -          trace4((qh ferr, 4063, "qh_partitioncoplanar: point p%d is inside facet f%d dist %2.2g findbestnew %d\n",
    -                  qh_pointid(point), bestfacet->id, bestdist, qh findbestnew));
    -        zinc_(Zcoplanarinside);
    -        return;
    -      }
    -    }
    -  }else {
    -    bestfacet= facet;
    -    bestdist= *dist;
    -  }
    -  if (bestdist > qh max_outside) {
    -    if (!dist && facet != bestfacet) {
    -      zinc_(Zpartangle);
    -      angle= qh_getangle(facet->normal, bestfacet->normal);
    -      if (angle < 0) {
    -        /* typically due to deleted vertex and coplanar facets, e.g.,
    -             RBOX 1000 s Z1 G1e-13 t1001185205 | QHULL Tv */
    -        zinc_(Zpartflip);
    -        trace2((qh ferr, 2058, "qh_partitioncoplanar: repartition point p%d from f%d.  It is above flipped facet f%d dist %2.2g\n",
    -                qh_pointid(point), facet->id, bestfacet->id, bestdist));
    -        oldfindbest= qh findbestnew;
    -        qh findbestnew= False;
    -        qh_partitionpoint(point, bestfacet);
    -        qh findbestnew= oldfindbest;
    -        return;
    -      }
    -    }
    -    qh max_outside= bestdist;
    -    if (bestdist > qh TRACEdist) {
    -      qh_fprintf(qh ferr, 8122, "qh_partitioncoplanar: ====== p%d from f%d increases max_outside to %2.2g of f%d last p%d\n",
    -                     qh_pointid(point), facet->id, bestdist, bestfacet->id, qh furthest_id);
    -      qh_errprint("DISTANT", facet, bestfacet, NULL, NULL);
    -    }
    -  }
    -  if (qh KEEPcoplanar + qh KEEPinside + qh KEEPnearinside) {
    -    oldfurthest= (pointT*)qh_setlast(bestfacet->coplanarset);
    -    if (oldfurthest) {
    -      zinc_(Zcomputefurthest);
    -      qh_distplane(oldfurthest, bestfacet, &dist2);
    -    }
    -    if (!oldfurthest || dist2 < bestdist)
    -      qh_setappend(&bestfacet->coplanarset, point);
    -    else
    -      qh_setappend2ndlast(&bestfacet->coplanarset, point);
    -  }
    -  trace4((qh ferr, 4064, "qh_partitioncoplanar: point p%d is coplanar with facet f%d(or inside) dist %2.2g\n",
    -          qh_pointid(point), bestfacet->id, bestdist));
    -} /* partitioncoplanar */
    -
    -/*---------------------------------
    -
    -  qh_partitionpoint( point, facet )
    -    assigns point to an outside set, coplanar set, or inside set (i.e., dropt)
    -    if qh.findbestnew
    -      uses qh_findbestnew() to search all new facets
    -    else
    -      uses qh_findbest()
    -
    -  notes:
    -    after qh_distplane(), this and qh_findbest() are most expensive in 3-d
    -
    -  design:
    -    find best facet for point
    -      (either exhaustive search of new facets or directed search from facet)
    -    if qh.NARROWhull
    -      retain coplanar and nearinside points as outside points
    -    if point is outside bestfacet
    -      if point above furthest point for bestfacet
    -        append point to outside set (it becomes the new furthest)
    -        if outside set was empty
    -          move bestfacet to end of qh.facet_list (i.e., after qh.facet_next)
    -        update bestfacet->furthestdist
    -      else
    -        append point one before end of outside set
    -    else if point is coplanar to bestfacet
    -      if keeping coplanar points or need to update qh.max_outside
    -        partition coplanar point into bestfacet
    -    else if near-inside point
    -      partition as coplanar point into bestfacet
    -    else is an inside point
    -      if keeping inside points
    -        partition as coplanar point into bestfacet
    -*/
    -void qh_partitionpoint(pointT *point, facetT *facet) {
    -  realT bestdist;
    -  boolT isoutside;
    -  facetT *bestfacet;
    -  int numpart;
    -#if qh_COMPUTEfurthest
    -  realT dist;
    -#endif
    -
    -  if (qh findbestnew)
    -    bestfacet= qh_findbestnew(point, facet, &bestdist, qh BESToutside, &isoutside, &numpart);
    -  else
    -    bestfacet= qh_findbest(point, facet, qh BESToutside, qh_ISnewfacets, !qh_NOupper,
    -                          &bestdist, &isoutside, &numpart);
    -  zinc_(Ztotpartition);
    -  zzadd_(Zpartition, numpart);
    -  if (qh NARROWhull) {
    -    if (qh DELAUNAY && !isoutside && bestdist >= -qh MAXcoplanar)
    -      qh_precision("nearly incident point(narrow hull)");
    -    if (qh KEEPnearinside) {
    -      if (bestdist >= -qh NEARinside)
    -        isoutside= True;
    -    }else if (bestdist >= -qh MAXcoplanar)
    -      isoutside= True;
    -  }
    -
    -  if (isoutside) {
    -    if (!bestfacet->outsideset
    -    || !qh_setlast(bestfacet->outsideset)) {
    -      qh_setappend(&(bestfacet->outsideset), point);
    -      if (!bestfacet->newfacet) {
    -        qh_removefacet(bestfacet);  /* make sure it's after qh facet_next */
    -        qh_appendfacet(bestfacet);
    -      }
    -#if !qh_COMPUTEfurthest
    -      bestfacet->furthestdist= bestdist;
    -#endif
    -    }else {
    -#if qh_COMPUTEfurthest
    -      zinc_(Zcomputefurthest);
    -      qh_distplane(oldfurthest, bestfacet, &dist);
    -      if (dist < bestdist)
    -        qh_setappend(&(bestfacet->outsideset), point);
    -      else
    -        qh_setappend2ndlast(&(bestfacet->outsideset), point);
    -#else
    -      if (bestfacet->furthestdist < bestdist) {
    -        qh_setappend(&(bestfacet->outsideset), point);
    -        bestfacet->furthestdist= bestdist;
    -      }else
    -        qh_setappend2ndlast(&(bestfacet->outsideset), point);
    -#endif
    -    }
    -    qh num_outside++;
    -    trace4((qh ferr, 4065, "qh_partitionpoint: point p%d is outside facet f%d new? %d(or narrowhull)\n",
    -          qh_pointid(point), bestfacet->id, bestfacet->newfacet));
    -  }else if (qh DELAUNAY || bestdist >= -qh MAXcoplanar) { /* for 'd', bestdist skips upperDelaunay facets */
    -    zzinc_(Zcoplanarpart);
    -    if (qh DELAUNAY)
    -      qh_precision("nearly incident point");
    -    if ((qh KEEPcoplanar + qh KEEPnearinside) || bestdist > qh max_outside)
    -      qh_partitioncoplanar(point, bestfacet, &bestdist);
    -    else {
    -      trace4((qh ferr, 4066, "qh_partitionpoint: point p%d is coplanar to facet f%d(dropped)\n",
    -          qh_pointid(point), bestfacet->id));
    -    }
    -  }else if (qh KEEPnearinside && bestdist > -qh NEARinside) {
    -    zinc_(Zpartnear);
    -    qh_partitioncoplanar(point, bestfacet, &bestdist);
    -  }else {
    -    zinc_(Zpartinside);
    -    trace4((qh ferr, 4067, "qh_partitionpoint: point p%d is inside all facets, closest to f%d dist %2.2g\n",
    -          qh_pointid(point), bestfacet->id, bestdist));
    -    if (qh KEEPinside)
    -      qh_partitioncoplanar(point, bestfacet, &bestdist);
    -  }
    -} /* partitionpoint */
    -
    -/*---------------------------------
    -
    -  qh_partitionvisible( allpoints, numoutside )
    -    partitions points in visible facets to qh.newfacet_list
    -    qh.visible_list= visible facets
    -    for visible facets
    -      1st neighbor (if any) points to a horizon facet or a new facet
    -    if allpoints(!used),
    -      repartitions coplanar points
    -
    -  returns:
    -    updates outside sets and coplanar sets of qh.newfacet_list
    -    updates qh.num_outside (count of outside points)
    -
    -  notes:
    -    qh.findbest_notsharp should be clear (extra work if set)
    -
    -  design:
    -    for all visible facets with outside set or coplanar set
    -      select a newfacet for visible facet
    -      if outside set
    -        partition outside set into new facets
    -      if coplanar set and keeping coplanar/near-inside/inside points
    -        if allpoints
    -          partition coplanar set into new facets, may be assigned outside
    -        else
    -          partition coplanar set into coplanar sets of new facets
    -    for each deleted vertex
    -      if allpoints
    -        partition vertex into new facets, may be assigned outside
    -      else
    -        partition vertex into coplanar sets of new facets
    -*/
    -void qh_partitionvisible(/*visible_list*/ boolT allpoints, int *numoutside) {
    -  facetT *visible, *newfacet;
    -  pointT *point, **pointp;
    -  int coplanar=0, size;
    -  unsigned count;
    -  vertexT *vertex, **vertexp;
    -
    -  if (qh ONLYmax)
    -    maximize_(qh MINoutside, qh max_vertex);
    -  *numoutside= 0;
    -  FORALLvisible_facets {
    -    if (!visible->outsideset && !visible->coplanarset)
    -      continue;
    -    newfacet= visible->f.replace;
    -    count= 0;
    -    while (newfacet && newfacet->visible) {
    -      newfacet= newfacet->f.replace;
    -      if (count++ > qh facet_id)
    -        qh_infiniteloop(visible);
    -    }
    -    if (!newfacet)
    -      newfacet= qh newfacet_list;
    -    if (newfacet == qh facet_tail) {
    -      qh_fprintf(qh ferr, 6170, "qhull precision error (qh_partitionvisible): all new facets deleted as\n        degenerate facets. Can not continue.\n");
    -      qh_errexit(qh_ERRprec, NULL, NULL);
    -    }
    -    if (visible->outsideset) {
    -      size= qh_setsize(visible->outsideset);
    -      *numoutside += size;
    -      qh num_outside -= size;
    -      FOREACHpoint_(visible->outsideset)
    -        qh_partitionpoint(point, newfacet);
    -    }
    -    if (visible->coplanarset && (qh KEEPcoplanar + qh KEEPinside + qh KEEPnearinside)) {
    -      size= qh_setsize(visible->coplanarset);
    -      coplanar += size;
    -      FOREACHpoint_(visible->coplanarset) {
    -        if (allpoints) /* not used */
    -          qh_partitionpoint(point, newfacet);
    -        else
    -          qh_partitioncoplanar(point, newfacet, NULL);
    -      }
    -    }
    -  }
    -  FOREACHvertex_(qh del_vertices) {
    -    if (vertex->point) {
    -      if (allpoints) /* not used */
    -        qh_partitionpoint(vertex->point, qh newfacet_list);
    -      else
    -        qh_partitioncoplanar(vertex->point, qh newfacet_list, NULL);
    -    }
    -  }
    -  trace1((qh ferr, 1043,"qh_partitionvisible: partitioned %d points from outsidesets and %d points from coplanarsets\n", *numoutside, coplanar));
    -} /* partitionvisible */
    -
    -
    -
    -/*---------------------------------
    -
    -  qh_precision( reason )
    -    restart on precision errors if not merging and if 'QJn'
    -*/
    -void qh_precision(const char *reason) {
    -
    -  if (qh ALLOWrestart && !qh PREmerge && !qh MERGEexact) {
    -    if (qh JOGGLEmax < REALmax/2) {
    -      trace0((qh ferr, 26, "qh_precision: qhull restart because of %s\n", reason));
    -      longjmp(qh restartexit, qh_ERRprec);
    -    }
    -  }
    -} /* qh_precision */
    -
    -/*---------------------------------
    -
    -  qh_printsummary( fp )
    -    prints summary to fp
    -
    -  notes:
    -    not in io.c so that user_eg.c can prevent io.c from loading
    -    qh_printsummary and qh_countfacets must match counts
    -
    -  design:
    -    determine number of points, vertices, and coplanar points
    -    print summary
    -*/
    -void qh_printsummary(FILE *fp) {
    -  realT ratio, outerplane, innerplane;
    -  float cpu;
    -  int size, id, nummerged, numvertices, numcoplanars= 0, nonsimplicial=0;
    -  int goodused;
    -  facetT *facet;
    -  const char *s;
    -  int numdel= zzval_(Zdelvertextot);
    -  int numtricoplanars= 0;
    -
    -  size= qh num_points + qh_setsize(qh other_points);
    -  numvertices= qh num_vertices - qh_setsize(qh del_vertices);
    -  id= qh_pointid(qh GOODpointp);
    -  FORALLfacets {
    -    if (facet->coplanarset)
    -      numcoplanars += qh_setsize( facet->coplanarset);
    -    if (facet->good) {
    -      if (facet->simplicial) {
    -        if (facet->keepcentrum && facet->tricoplanar)
    -          numtricoplanars++;
    -      }else if (qh_setsize(facet->vertices) != qh hull_dim)
    -        nonsimplicial++;
    -    }
    -  }
    -  if (id >=0 && qh STOPcone-1 != id && -qh STOPpoint-1 != id)
    -    size--;
    -  if (qh STOPcone || qh STOPpoint)
    -      qh_fprintf(fp, 9288, "\nAt a premature exit due to 'TVn', 'TCn', 'TRn', or precision error with 'QJn'.");
    -  if (qh UPPERdelaunay)
    -    goodused= qh GOODvertex + qh GOODpoint + qh SPLITthresholds;
    -  else if (qh DELAUNAY)
    -    goodused= qh GOODvertex + qh GOODpoint + qh GOODthreshold;
    -  else
    -    goodused= qh num_good;
    -  nummerged= zzval_(Ztotmerge) - zzval_(Zcyclehorizon) + zzval_(Zcyclefacettot);
    -  if (qh VORONOI) {
    -    if (qh UPPERdelaunay)
    -      qh_fprintf(fp, 9289, "\n\
    -Furthest-site Voronoi vertices by the convex hull of %d points in %d-d:\n\n", size, qh hull_dim);
    -    else
    -      qh_fprintf(fp, 9290, "\n\
    -Voronoi diagram by the convex hull of %d points in %d-d:\n\n", size, qh hull_dim);
    -    qh_fprintf(fp, 9291, "  Number of Voronoi regions%s: %d\n",
    -              qh ATinfinity ? " and at-infinity" : "", numvertices);
    -    if (numdel)
    -      qh_fprintf(fp, 9292, "  Total number of deleted points due to merging: %d\n", numdel);
    -    if (numcoplanars - numdel > 0)
    -      qh_fprintf(fp, 9293, "  Number of nearly incident points: %d\n", numcoplanars - numdel);
    -    else if (size - numvertices - numdel > 0)
    -      qh_fprintf(fp, 9294, "  Total number of nearly incident points: %d\n", size - numvertices - numdel);
    -    qh_fprintf(fp, 9295, "  Number of%s Voronoi vertices: %d\n",
    -              goodused ? " 'good'" : "", qh num_good);
    -    if (nonsimplicial)
    -      qh_fprintf(fp, 9296, "  Number of%s non-simplicial Voronoi vertices: %d\n",
    -              goodused ? " 'good'" : "", nonsimplicial);
    -  }else if (qh DELAUNAY) {
    -    if (qh UPPERdelaunay)
    -      qh_fprintf(fp, 9297, "\n\
    -Furthest-site Delaunay triangulation by the convex hull of %d points in %d-d:\n\n", size, qh hull_dim);
    -    else
    -      qh_fprintf(fp, 9298, "\n\
    -Delaunay triangulation by the convex hull of %d points in %d-d:\n\n", size, qh hull_dim);
    -    qh_fprintf(fp, 9299, "  Number of input sites%s: %d\n",
    -              qh ATinfinity ? " and at-infinity" : "", numvertices);
    -    if (numdel)
    -      qh_fprintf(fp, 9300, "  Total number of deleted points due to merging: %d\n", numdel);
    -    if (numcoplanars - numdel > 0)
    -      qh_fprintf(fp, 9301, "  Number of nearly incident points: %d\n", numcoplanars - numdel);
    -    else if (size - numvertices - numdel > 0)
    -      qh_fprintf(fp, 9302, "  Total number of nearly incident points: %d\n", size - numvertices - numdel);
    -    qh_fprintf(fp, 9303, "  Number of%s Delaunay regions: %d\n",
    -              goodused ? " 'good'" : "", qh num_good);
    -    if (nonsimplicial)
    -      qh_fprintf(fp, 9304, "  Number of%s non-simplicial Delaunay regions: %d\n",
    -              goodused ? " 'good'" : "", nonsimplicial);
    -  }else if (qh HALFspace) {
    -    qh_fprintf(fp, 9305, "\n\
    -Halfspace intersection by the convex hull of %d points in %d-d:\n\n", size, qh hull_dim);
    -    qh_fprintf(fp, 9306, "  Number of halfspaces: %d\n", size);
    -    qh_fprintf(fp, 9307, "  Number of non-redundant halfspaces: %d\n", numvertices);
    -    if (numcoplanars) {
    -      if (qh KEEPinside && qh KEEPcoplanar)
    -        s= "similar and redundant";
    -      else if (qh KEEPinside)
    -        s= "redundant";
    -      else
    -        s= "similar";
    -      qh_fprintf(fp, 9308, "  Number of %s halfspaces: %d\n", s, numcoplanars);
    -    }
    -    qh_fprintf(fp, 9309, "  Number of intersection points: %d\n", qh num_facets - qh num_visible);
    -    if (goodused)
    -      qh_fprintf(fp, 9310, "  Number of 'good' intersection points: %d\n", qh num_good);
    -    if (nonsimplicial)
    -      qh_fprintf(fp, 9311, "  Number of%s non-simplicial intersection points: %d\n",
    -              goodused ? " 'good'" : "", nonsimplicial);
    -  }else {
    -    qh_fprintf(fp, 9312, "\n\
    -Convex hull of %d points in %d-d:\n\n", size, qh hull_dim);
    -    qh_fprintf(fp, 9313, "  Number of vertices: %d\n", numvertices);
    -    if (numcoplanars) {
    -      if (qh KEEPinside && qh KEEPcoplanar)
    -        s= "coplanar and interior";
    -      else if (qh KEEPinside)
    -        s= "interior";
    -      else
    -        s= "coplanar";
    -      qh_fprintf(fp, 9314, "  Number of %s points: %d\n", s, numcoplanars);
    -    }
    -    qh_fprintf(fp, 9315, "  Number of facets: %d\n", qh num_facets - qh num_visible);
    -    if (goodused)
    -      qh_fprintf(fp, 9316, "  Number of 'good' facets: %d\n", qh num_good);
    -    if (nonsimplicial)
    -      qh_fprintf(fp, 9317, "  Number of%s non-simplicial facets: %d\n",
    -              goodused ? " 'good'" : "", nonsimplicial);
    -  }
    -  if (numtricoplanars)
    -      qh_fprintf(fp, 9318, "  Number of triangulated facets: %d\n", numtricoplanars);
    -  qh_fprintf(fp, 9319, "\nStatistics for: %s | %s",
    -                      qh rbox_command, qh qhull_command);
    -  if (qh ROTATErandom != INT_MIN)
    -    qh_fprintf(fp, 9320, " QR%d\n\n", qh ROTATErandom);
    -  else
    -    qh_fprintf(fp, 9321, "\n\n");
    -  qh_fprintf(fp, 9322, "  Number of points processed: %d\n", zzval_(Zprocessed));
    -  qh_fprintf(fp, 9323, "  Number of hyperplanes created: %d\n", zzval_(Zsetplane));
    -  if (qh DELAUNAY)
    -    qh_fprintf(fp, 9324, "  Number of facets in hull: %d\n", qh num_facets - qh num_visible);
    -  qh_fprintf(fp, 9325, "  Number of distance tests for qhull: %d\n", zzval_(Zpartition)+
    -      zzval_(Zpartitionall)+zzval_(Znumvisibility)+zzval_(Zpartcoplanar));
    -#if 0  /* NOTE: must print before printstatistics() */
    -  {realT stddev, ave;
    -  qh_fprintf(fp, 9326, "  average new facet balance: %2.2g\n",
    -          wval_(Wnewbalance)/zval_(Zprocessed));
    -  stddev= qh_stddev(zval_(Zprocessed), wval_(Wnewbalance),
    -                                 wval_(Wnewbalance2), &ave);
    -  qh_fprintf(fp, 9327, "  new facet standard deviation: %2.2g\n", stddev);
    -  qh_fprintf(fp, 9328, "  average partition balance: %2.2g\n",
    -          wval_(Wpbalance)/zval_(Zpbalance));
    -  stddev= qh_stddev(zval_(Zpbalance), wval_(Wpbalance),
    -                                 wval_(Wpbalance2), &ave);
    -  qh_fprintf(fp, 9329, "  partition standard deviation: %2.2g\n", stddev);
    -  }
    -#endif
    -  if (nummerged) {
    -    qh_fprintf(fp, 9330,"  Number of distance tests for merging: %d\n",zzval_(Zbestdist)+
    -          zzval_(Zcentrumtests)+zzval_(Zdistconvex)+zzval_(Zdistcheck)+
    -          zzval_(Zdistzero));
    -    qh_fprintf(fp, 9331,"  Number of distance tests for checking: %d\n",zzval_(Zcheckpart));
    -    qh_fprintf(fp, 9332,"  Number of merged facets: %d\n", nummerged);
    -  }
    -  if (!qh RANDOMoutside && qh QHULLfinished) {
    -    cpu= (float)qh hulltime;
    -    cpu /= (float)qh_SECticks;
    -    wval_(Wcpu)= cpu;
    -    qh_fprintf(fp, 9333, "  CPU seconds to compute hull (after input): %2.4g\n", cpu);
    -  }
    -  if (qh RERUN) {
    -    if (!qh PREmerge && !qh MERGEexact)
    -      qh_fprintf(fp, 9334, "  Percentage of runs with precision errors: %4.1f\n",
    -           zzval_(Zretry)*100.0/qh build_cnt);  /* careful of order */
    -  }else if (qh JOGGLEmax < REALmax/2) {
    -    if (zzval_(Zretry))
    -      qh_fprintf(fp, 9335, "  After %d retries, input joggled by: %2.2g\n",
    -         zzval_(Zretry), qh JOGGLEmax);
    -    else
    -      qh_fprintf(fp, 9336, "  Input joggled by: %2.2g\n", qh JOGGLEmax);
    -  }
    -  if (qh totarea != 0.0)
    -    qh_fprintf(fp, 9337, "  %s facet area:   %2.8g\n",
    -            zzval_(Ztotmerge) ? "Approximate" : "Total", qh totarea);
    -  if (qh totvol != 0.0)
    -    qh_fprintf(fp, 9338, "  %s volume:       %2.8g\n",
    -            zzval_(Ztotmerge) ? "Approximate" : "Total", qh totvol);
    -  if (qh MERGING) {
    -    qh_outerinner(NULL, &outerplane, &innerplane);
    -    if (outerplane > 2 * qh DISTround) {
    -      qh_fprintf(fp, 9339, "  Maximum distance of %spoint above facet: %2.2g",
    -            (qh QHULLfinished ? "" : "merged "), outerplane);
    -      ratio= outerplane/(qh ONEmerge + qh DISTround);
    -      /* don't report ratio if MINoutside is large */
    -      if (ratio > 0.05 && 2* qh ONEmerge > qh MINoutside && qh JOGGLEmax > REALmax/2)
    -        qh_fprintf(fp, 9340, " (%.1fx)\n", ratio);
    -      else
    -        qh_fprintf(fp, 9341, "\n");
    -    }
    -    if (innerplane < -2 * qh DISTround) {
    -      qh_fprintf(fp, 9342, "  Maximum distance of %svertex below facet: %2.2g",
    -            (qh QHULLfinished ? "" : "merged "), innerplane);
    -      ratio= -innerplane/(qh ONEmerge+qh DISTround);
    -      if (ratio > 0.05 && qh JOGGLEmax > REALmax/2)
    -        qh_fprintf(fp, 9343, " (%.1fx)\n", ratio);
    -      else
    -        qh_fprintf(fp, 9344, "\n");
    -    }
    -  }
    -  qh_fprintf(fp, 9345, "\n");
    -} /* printsummary */
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/libqhull.h b/scipy-0.10.1/scipy/spatial/qhull/src/libqhull.h
    deleted file mode 100644
    index 2bf4435f8d..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/libqhull.h
    +++ /dev/null
    @@ -1,1092 +0,0 @@
    -/*
      ---------------------------------
    -
    -   libqhull.h
    -   user-level header file for using qhull.a library
    -
    -   see qh-qhull.htm, qhull_a.h
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/libqhull.h#9 $$Change: 1172 $
    -   $DateTime: 2010/01/09 21:42:16 $$Author: bbarber $
    -
    -   NOTE: access to qh_qh is via the 'qh' macro.  This allows
    -   qh_qh to be either a pointer or a structure.  An example
    -   of using qh is "qh DROPdim" which accesses the DROPdim
    -   field of qh_qh.  Similarly, access to qh_qhstat is via
    -   the 'qhstat' macro.
    -
    -   includes function prototypes for libqhull.c, geom.c, global.c, io.c, user.c
    -
    -   use mem.h for mem.c
    -   use qset.h for qset.c
    -
    -   see unix.c for an example of using libqhull.h
    -
    -   recompile qhull if you change this file
    -*/
    -
    -#ifndef qhDEFlibqhull
    -#define qhDEFlibqhull 1
    -
    -/*=========================== -included files ==============*/
    -
    -#include "user.h"      /* user definable constants (e.g., qh_QHpointer) */
    -
    -#include 
    -#include 
    -#include 
    -#include 
    -
    -#if __MWERKS__ && __POWERPC__
    -#include  
    -#include  
    -#include        
    -#endif
    -
    -#ifndef __STDC__
    -#ifndef __cplusplus
    -#if     !_MSC_VER
    -#error  Neither __STDC__ nor __cplusplus is defined.  Please use strict ANSI C or C++ to compile
    -#error  Qhull.  You may need to turn off compiler extensions in your project configuration.  If
    -#error  your compiler is a standard C compiler, you can delete this warning from libqhull.h
    -#endif
    -#endif
    -#endif
    -
    -/*============ constants and basic types ====================*/
    -
    -extern const char *qh_version; /* defined in global.c */
    -
    -/*----------------------------------
    -
    -  coordT
    -    coordinates and coefficients are stored as realT (i.e., double)
    -
    -  notes:
    -    Qhull works well if realT is 'float'.  If so joggle (QJ) is not effective.
    -
    -    Could use 'float' for data and 'double' for calculations (realT vs. coordT)
    -      This requires many type casts, and adjusted error bounds.
    -      Also C compilers may do expressions in double anyway.
    -*/
    -#define coordT realT
    -
    -/*----------------------------------
    -
    -  pointT
    -    a point is an array of coordinates, usually qh.hull_dim
    -*/
    -#define pointT coordT
    -
    -/*----------------------------------
    -
    -  flagT
    -    Boolean flag as a bit
    -*/
    -#define flagT unsigned int
    -
    -/*----------------------------------
    -
    -  boolT
    -    boolean value, either True or False
    -
    -  notes:
    -    needed for portability
    -*/
    -#define boolT unsigned int
    -#ifdef False
    -#undef False
    -#endif
    -#ifdef True
    -#undef True
    -#endif
    -#define False 0
    -#define True 1
    -
    -/*----------------------------------
    -
    -  qh_CENTER
    -    to distinguish facet->center
    -*/
    -typedef enum
    -{
    -    qh_ASnone = 0, qh_ASvoronoi, qh_AScentrum
    -}
    -qh_CENTER;
    -
    -/*----------------------------------
    -
    -  qh_PRINT
    -    output formats for printing (qh.PRINTout).
    -    'Fa' 'FV' 'Fc' 'FC'
    -
    -
    -   notes:
    -   some of these names are similar to qh names.  The similar names are only
    -   used in switch statements in qh_printbegin() etc.
    -*/
    -typedef enum {qh_PRINTnone= 0,
    -  qh_PRINTarea, qh_PRINTaverage,           /* 'Fa' 'FV' 'Fc' 'FC' */
    -  qh_PRINTcoplanars, qh_PRINTcentrums,
    -  qh_PRINTfacets, qh_PRINTfacets_xridge,   /* 'f' 'FF' 'G' 'FI' 'Fi' 'Fn' */
    -  qh_PRINTgeom, qh_PRINTids, qh_PRINTinner, qh_PRINTneighbors,
    -  qh_PRINTnormals, qh_PRINTouter, qh_PRINTmaple, /* 'n' 'Fo' 'i' 'm' 'Fm' 'FM', 'o' */
    -  qh_PRINTincidences, qh_PRINTmathematica, qh_PRINTmerges, qh_PRINToff,
    -  qh_PRINToptions, qh_PRINTpointintersect, /* 'FO' 'Fp' 'FP' 'p' 'FQ' 'FS' */
    -  qh_PRINTpointnearest, qh_PRINTpoints, qh_PRINTqhull, qh_PRINTsize,
    -  qh_PRINTsummary, qh_PRINTtriangles,      /* 'Fs' 'Ft' 'Fv' 'FN' 'Fx' */
    -  qh_PRINTvertices, qh_PRINTvneighbors, qh_PRINTextremes,
    -  qh_PRINTEND} qh_PRINT;
    -
    -/*----------------------------------
    -
    -  qh_ALL
    -    argument flag for selecting everything
    -*/
    -#define qh_ALL      True
    -#define qh_NOupper  True     /* argument for qh_findbest */
    -#define qh_IScheckmax  True     /* argument for qh_findbesthorizon */
    -#define qh_ISnewfacets  True     /* argument for qh_findbest */
    -#define qh_RESETvisible  True     /* argument for qh_resetlists */
    -
    -/*----------------------------------
    -
    -  qh_ERR
    -    Qhull exit codes, for indicating errors
    -    See: MSG_ERROR and MSG_WARNING [user.h]
    -*/
    -#define qh_ERRnone  0    /* no error occurred during qhull */
    -#define qh_ERRinput 1    /* input inconsistency */
    -#define qh_ERRsingular 2 /* singular input data */
    -#define qh_ERRprec  3    /* precision error */
    -#define qh_ERRmem   4    /* insufficient memory, matches mem.h */
    -#define qh_ERRqhull 5    /* internal error detected, matches mem.h */
    -
    -/*----------------------------------
    -
    -qh_FILEstderr
    -Fake stderr to distinguish error output from normal output
    -For C++ interface.  Must redefine qh_fprintf_qhull
    -*/
    -#define qh_FILEstderr (FILE*)1
    -
    -/* ============ -structures- ====================
    -   each of the following structures is defined by a typedef
    -   all realT and coordT fields occur at the beginning of a structure
    -        (otherwise space may be wasted due to alignment)
    -   define all flags together and pack into 32-bit number
    -*/
    -
    -typedef struct vertexT vertexT;
    -typedef struct ridgeT ridgeT;
    -typedef struct facetT facetT;
    -#ifndef DEFsetT
    -#define DEFsetT 1
    -typedef struct setT setT;          /* defined in qset.h */
    -#endif
    -
    -#ifndef DEFqhstatT
    -#define DEFqhstatT 1
    -typedef struct qhstatT qhstatT;    /* defined in stat.h */
    -#endif
    -
    -/*----------------------------------
    -
    -  facetT
    -    defines a facet
    -
    -  notes:
    -   qhull() generates the hull as a list of facets.
    -
    -  topological information:
    -    f.previous,next     doubly-linked list of facets
    -    f.vertices          set of vertices
    -    f.ridges            set of ridges
    -    f.neighbors         set of neighbors
    -    f.toporient         True if facet has top-orientation (else bottom)
    -
    -  geometric information:
    -    f.offset,normal     hyperplane equation
    -    f.maxoutside        offset to outer plane -- all points inside
    -    f.center            centrum for testing convexity
    -    f.simplicial        True if facet is simplicial
    -    f.flipped           True if facet does not include qh.interior_point
    -
    -  for constructing hull:
    -    f.visible           True if facet on list of visible facets (will be deleted)
    -    f.newfacet          True if facet on list of newly created facets
    -    f.coplanarset       set of points coplanar with this facet
    -                        (includes near-inside points for later testing)
    -    f.outsideset        set of points outside of this facet
    -    f.furthestdist      distance to furthest point of outside set
    -    f.visitid           marks visited facets during a loop
    -    f.replace           replacement facet for to-be-deleted, visible facets
    -    f.samecycle,newcycle cycle of facets for merging into horizon facet
    -
    -  see below for other flags and fields
    -*/
    -struct facetT {
    -#if !qh_COMPUTEfurthest
    -  coordT   furthestdist;/* distance to furthest point of outsideset */
    -#endif
    -#if qh_MAXoutside
    -  coordT   maxoutside;  /* max computed distance of point to facet
    -                        Before QHULLfinished this is an approximation
    -                        since maxdist not always set for mergefacet
    -                        Actual outer plane is +DISTround and
    -                        computed outer plane is +2*DISTround */
    -#endif
    -  coordT   offset;      /* exact offset of hyperplane from origin */
    -  coordT  *normal;      /* normal of hyperplane, hull_dim coefficients */
    -                        /*   if tricoplanar, shared with a neighbor */
    -  union {               /* in order of testing */
    -   realT   area;        /* area of facet, only in io.c if  ->isarea */
    -   facetT *replace;     /*  replacement facet if ->visible and NEWfacets
    -                             is NULL only if qh_mergedegen_redundant or interior */
    -   facetT *samecycle;   /*  cycle of facets from the same visible/horizon intersection,
    -                             if ->newfacet */
    -   facetT *newcycle;    /*  in horizon facet, current samecycle of new facets */
    -   facetT *trivisible;  /* visible facet for ->tricoplanar facets during qh_triangulate() */
    -   facetT *triowner;    /* owner facet for ->tricoplanar, !isarea facets w/ ->keepcentrum */
    -  }f;
    -  coordT  *center;      /*  centrum for convexity, qh CENTERtype == qh_AScentrum */
    -                        /*  Voronoi center, qh CENTERtype == qh_ASvoronoi */
    -                        /*   if tricoplanar, shared with a neighbor */
    -  facetT  *previous;    /* previous facet in the facet_list */
    -  facetT  *next;        /* next facet in the facet_list */
    -  setT    *vertices;    /* vertices for this facet, inverse sorted by ID
    -                           if simplicial, 1st vertex was apex/furthest */
    -  setT    *ridges;      /* explicit ridges for nonsimplicial facets.
    -                           for simplicial facets, neighbors defines ridge */
    -  setT    *neighbors;   /* neighbors of the facet.  If simplicial, the kth
    -                           neighbor is opposite the kth vertex, and the first
    -                           neighbor is the horizon facet for the first vertex*/
    -  setT    *outsideset;  /* set of points outside this facet
    -                           if non-empty, last point is furthest
    -                           if NARROWhull, includes coplanars for partitioning*/
    -  setT    *coplanarset; /* set of points coplanar with this facet
    -                           > qh.min_vertex and <= facet->max_outside
    -                           a point is assigned to the furthest facet
    -                           if non-empty, last point is furthest away */
    -  unsigned visitid;     /* visit_id, for visiting all neighbors,
    -                           all uses are independent */
    -  unsigned id;          /* unique identifier from qh facet_id */
    -  unsigned nummerge:9;  /* number of merges */
    -#define qh_MAXnummerge 511 /*     2^9-1, 32 flags total, see "flags:" in io.c */
    -  flagT    tricoplanar:1; /* True if TRIangulate and simplicial and coplanar with a neighbor */
    -                          /*   all tricoplanars share the same ->center, ->normal, ->offset, ->maxoutside */
    -                          /*   all tricoplanars share the same apex */
    -                          /*   if ->degenerate, does not span facet (one logical ridge) */
    -                          /*   one tricoplanar has ->keepcentrum and ->coplanarset */
    -                          /*   during qh_triangulate, f.trivisible points to original facet */
    -  flagT    newfacet:1;  /* True if facet on qh newfacet_list (new or merged) */
    -  flagT    visible:1;   /* True if visible facet (will be deleted) */
    -  flagT    toporient:1; /* True if created with top orientation
    -                           after merging, use ridge orientation */
    -  flagT    simplicial:1;/* True if simplicial facet, ->ridges may be implicit */
    -  flagT    seen:1;      /* used to perform operations only once, like visitid */
    -  flagT    seen2:1;     /* used to perform operations only once, like visitid */
    -  flagT    flipped:1;   /* True if facet is flipped */
    -  flagT    upperdelaunay:1; /* True if facet is upper envelope of Delaunay triangulation */
    -  flagT    notfurthest:1; /* True if last point of outsideset is not furthest*/
    -
    -/*-------- flags primarily for output ---------*/
    -  flagT    good:1;      /* True if a facet marked good for output */
    -  flagT    isarea:1;    /* True if facet->f.area is defined */
    -
    -/*-------- flags for merging ------------------*/
    -  flagT    dupridge:1;  /* True if duplicate ridge in facet */
    -  flagT    mergeridge:1; /* True if facet or neighbor contains a qh_MERGEridge
    -                            ->normal defined (also defined for mergeridge2) */
    -  flagT    mergeridge2:1; /* True if neighbor contains a qh_MERGEridge (mark_dupridges */
    -  flagT    coplanar:1;  /* True if horizon facet is coplanar at last use */
    -  flagT     mergehorizon:1; /* True if will merge into horizon (->coplanar) */
    -  flagT     cycledone:1;/* True if mergecycle_all already done */
    -  flagT    tested:1;    /* True if facet convexity has been tested (false after merge */
    -  flagT    keepcentrum:1; /* True if keep old centrum after a merge, or marks owner for ->tricoplanar */
    -  flagT    newmerge:1;  /* True if facet is newly merged for reducevertices */
    -  flagT    degenerate:1; /* True if facet is degenerate (degen_mergeset or ->tricoplanar) */
    -  flagT    redundant:1;  /* True if facet is redundant (degen_mergeset) */
    -};
    -
    -
    -/*----------------------------------
    -
    -  ridgeT
    -    defines a ridge
    -
    -  notes:
    -  a ridge is hull_dim-1 simplex between two neighboring facets.  If the
    -  facets are non-simplicial, there may be more than one ridge between
    -  two facets.  E.G. a 4-d hypercube has two triangles between each pair
    -  of neighboring facets.
    -
    -  topological information:
    -    vertices            a set of vertices
    -    top,bottom          neighboring facets with orientation
    -
    -  geometric information:
    -    tested              True if ridge is clearly convex
    -    nonconvex           True if ridge is non-convex
    -*/
    -struct ridgeT {
    -  setT    *vertices;    /* vertices belonging to this ridge, inverse sorted by ID
    -                           NULL if a degen ridge (matchsame) */
    -  facetT  *top;         /* top facet this ridge is part of */
    -  facetT  *bottom;      /* bottom facet this ridge is part of */
    -  unsigned id:24;       /* unique identifier, =>room for 8 flags, bit field matches qh.ridge_id */
    -  flagT    seen:1;      /* used to perform operations only once */
    -  flagT    tested:1;    /* True when ridge is tested for convexity */
    -  flagT    nonconvex:1; /* True if getmergeset detected a non-convex neighbor
    -                           only one ridge between neighbors may have nonconvex */
    -};
    -
    -/*----------------------------------
    -
    -  vertexT
    -     defines a vertex
    -
    -  topological information:
    -    next,previous       doubly-linked list of all vertices
    -    neighbors           set of adjacent facets (only if qh.VERTEXneighbors)
    -
    -  geometric information:
    -    point               array of DIM3 coordinates
    -*/
    -struct vertexT {
    -  vertexT *next;        /* next vertex in vertex_list */
    -  vertexT *previous;    /* previous vertex in vertex_list */
    -  pointT  *point;       /* hull_dim coordinates (coordT) */
    -  setT    *neighbors;   /* neighboring facets of vertex, qh_vertexneighbors()
    -                           inits in io.c or after first merge */
    -  unsigned visitid:31;  /* for use with qh vertex_visit, size must match */
    -  flagT    seen2:1;     /* another seen flag */
    -  unsigned id:24;       /* unique identifier, bit field matches qh.vertex_id */
    -  unsigned dim:4;       /* dimension of point if non-zero, used by cpp */
    -                        /* =>room for 4 flags */
    -  flagT    seen:1;      /* used to perform operations only once */
    -  flagT    delridge:1;  /* vertex was part of a deleted ridge */
    -  flagT    deleted:1;   /* true if vertex on qh del_vertices */
    -  flagT    newlist:1;   /* true if vertex on qh newvertex_list */
    -};
    -
    -#define MAX_vdim 15  /* Maximum size of vertex->dim */
    -
    -/*======= -global variables -qh ============================*/
    -
    -/*----------------------------------
    -
    -  qh
    -   all global variables for qhull are in qh, qhmem, and qhstat
    -
    -  notes:
    -   qhmem is defined in mem.h, qhstat is defined in stat.h, qhrbox is defined in rboxpoints.h
    -   Access to qh_qh is via the "qh" macro.  See qh_QHpointer in user.h
    -
    -   All global variables for qhull are in qh, qhmem, and qhstat
    -   qh must be unique for each instance of qhull
    -   qhstat may be shared between qhull instances.
    -   qhmem may be shared across multiple instances of Qhull.
    -   Rbox uses global variables rbox_inuse and rbox, but does not persist data across calls.
    -
    -   notes:
    -   Qhull is not multithreaded.  Global state could be stored in thread-local storage.
    -*/
    -
    -extern int qhull_inuse;
    -
    -typedef struct qhT qhT;
    -#if qh_QHpointer
    -#define qh qh_qh->
    -extern qhT *qh_qh;     /* allocated in global.c */
    -#else
    -#define qh qh_qh.
    -extern qhT qh_qh;
    -#endif
    -
    -struct qhT {
    -
    -/*----------------------------------
    -
    -  qh constants
    -    configuration flags and constants for Qhull
    -
    -  notes:
    -    The user configures Qhull by defining flags.  They are
    -    copied into qh by qh_setflags().  qh-quick.htm#options defines the flags.
    -*/
    -  boolT ALLpoints;        /* true 'Qs' if search all points for initial simplex */
    -  boolT ANGLEmerge;       /* true 'Qa' if sort potential merges by angle */
    -  boolT APPROXhull;       /* true 'Wn' if MINoutside set */
    -  realT   MINoutside;     /*   'Wn' min. distance for an outside point */
    -  boolT ANNOTATEoutput;   /* true 'Ta' if annotate output with message codes */
    -  boolT ATinfinity;       /* true 'Qz' if point num_points-1 is "at-infinity"
    -                             for improving precision in Delaunay triangulations */
    -  boolT AVOIDold;         /* true 'Q4' if avoid old->new merges */
    -  boolT BESToutside;      /* true 'Qf' if partition points into best outsideset */
    -  boolT CDDinput;         /* true 'Pc' if input uses CDD format (1.0/offset first) */
    -  boolT CDDoutput;        /* true 'PC' if print normals in CDD format (offset first) */
    -  boolT CHECKfrequently;  /* true 'Tc' if checking frequently */
    -  realT premerge_cos;     /*   'A-n'   cos_max when pre merging */
    -  realT postmerge_cos;    /*   'An'    cos_max when post merging */
    -  boolT DELAUNAY;         /* true 'd' if computing DELAUNAY triangulation */
    -  boolT DOintersections;  /* true 'Gh' if print hyperplane intersections */
    -  int   DROPdim;          /* drops dim 'GDn' for 4-d -> 3-d output */
    -  boolT FORCEoutput;      /* true 'Po' if forcing output despite degeneracies */
    -  int   GOODpoint;        /* 1+n for 'QGn', good facet if visible/not(-) from point n*/
    -  pointT *GOODpointp;     /*   the actual point */
    -  boolT GOODthreshold;    /* true if qh lower_threshold/upper_threshold defined
    -                             false if qh SPLITthreshold */
    -  int   GOODvertex;       /* 1+n, good facet if vertex for point n */
    -  pointT *GOODvertexp;     /*   the actual point */
    -  boolT HALFspace;        /* true 'Hn,n,n' if halfspace intersection */
    -  int   IStracing;        /* trace execution, 0=none, 1=least, 4=most, -1=events */
    -  int   KEEParea;         /* 'PAn' number of largest facets to keep */
    -  boolT KEEPcoplanar;     /* true 'Qc' if keeping nearest facet for coplanar points */
    -  boolT KEEPinside;       /* true 'Qi' if keeping nearest facet for inside points
    -                              set automatically if 'd Qc' */
    -  int   KEEPmerge;        /* 'PMn' number of facets to keep with most merges */
    -  realT KEEPminArea;      /* 'PFn' minimum facet area to keep */
    -  realT MAXcoplanar;      /* 'Un' max distance below a facet to be coplanar*/
    -  boolT MERGEexact;       /* true 'Qx' if exact merges (coplanar, degen, dupridge, flipped) */
    -  boolT MERGEindependent; /* true 'Q2' if merging independent sets */
    -  boolT MERGING;          /* true if exact-, pre- or post-merging, with angle and centrum tests */
    -  realT   premerge_centrum;  /*   'C-n' centrum_radius when pre merging.  Default is round-off */
    -  realT   postmerge_centrum; /*   'Cn' centrum_radius when post merging.  Default is round-off */
    -  boolT MERGEvertices;    /* true 'Q3' if merging redundant vertices */
    -  realT MINvisible;       /* 'Vn' min. distance for a facet to be visible */
    -  boolT NOnarrow;         /* true 'Q10' if no special processing for narrow distributions */
    -  boolT NOnearinside;     /* true 'Q8' if ignore near-inside points when partitioning */
    -  boolT NOpremerge;       /* true 'Q0' if no defaults for C-0 or Qx */
    -  boolT ONLYgood;         /* true 'Qg' if process points with good visible or horizon facets */
    -  boolT ONLYmax;          /* true 'Qm' if only process points that increase max_outside */
    -  boolT PICKfurthest;     /* true 'Q9' if process furthest of furthest points*/
    -  boolT POSTmerge;        /* true if merging after buildhull (Cn or An) */
    -  boolT PREmerge;         /* true if merging during buildhull (C-n or A-n) */
    -                        /* NOTE: some of these names are similar to qh_PRINT names */
    -  boolT PRINTcentrums;    /* true 'Gc' if printing centrums */
    -  boolT PRINTcoplanar;    /* true 'Gp' if printing coplanar points */
    -  int   PRINTdim;         /* print dimension for Geomview output */
    -  boolT PRINTdots;        /* true 'Ga' if printing all points as dots */
    -  boolT PRINTgood;        /* true 'Pg' if printing good facets */
    -  boolT PRINTinner;       /* true 'Gi' if printing inner planes */
    -  boolT PRINTneighbors;   /* true 'PG' if printing neighbors of good facets */
    -  boolT PRINTnoplanes;    /* true 'Gn' if printing no planes */
    -  boolT PRINToptions1st;  /* true 'FO' if printing options to stderr */
    -  boolT PRINTouter;       /* true 'Go' if printing outer planes */
    -  boolT PRINTprecision;   /* false 'Pp' if not reporting precision problems */
    -  qh_PRINT PRINTout[qh_PRINTEND]; /* list of output formats to print */
    -  boolT PRINTridges;      /* true 'Gr' if print ridges */
    -  boolT PRINTspheres;     /* true 'Gv' if print vertices as spheres */
    -  boolT PRINTstatistics;  /* true 'Ts' if printing statistics to stderr */
    -  boolT PRINTsummary;     /* true 's' if printing summary to stderr */
    -  boolT PRINTtransparent; /* true 'Gt' if print transparent outer ridges */
    -  boolT PROJECTdelaunay;  /* true if DELAUNAY, no readpoints() and
    -                             need projectinput() for Delaunay in qh_init_B */
    -  int   PROJECTinput;     /* number of projected dimensions 'bn:0Bn:0' */
    -  boolT QUICKhelp;        /* true if quick help message for degen input */
    -  boolT RANDOMdist;       /* true if randomly change distplane and setfacetplane */
    -  realT RANDOMfactor;     /*    maximum random perturbation */
    -  realT RANDOMa;          /*    qh_randomfactor is randr * RANDOMa + RANDOMb */
    -  realT RANDOMb;
    -  boolT RANDOMoutside;    /* true if select a random outside point */
    -  int   REPORTfreq;       /* buildtracing reports every n facets */
    -  int   REPORTfreq2;      /* tracemerging reports every REPORTfreq/2 facets */
    -  int   RERUN;            /* 'TRn' rerun qhull n times (qh.build_cnt) */
    -  int   ROTATErandom;     /* 'QRn' seed, 0 time, >= rotate input */
    -  boolT SCALEinput;       /* true 'Qbk' if scaling input */
    -  boolT SCALElast;        /* true 'Qbb' if scale last coord to max prev coord */
    -  boolT SETroundoff;      /* true 'E' if qh DISTround is predefined */
    -  boolT SKIPcheckmax;     /* true 'Q5' if skip qh_check_maxout */
    -  boolT SKIPconvex;       /* true 'Q6' if skip convexity testing during pre-merge */
    -  boolT SPLITthresholds;  /* true if upper_/lower_threshold defines a region
    -                               used only for printing (!for qh ONLYgood) */
    -  int   STOPcone;         /* 'TCn' 1+n for stopping after cone for point n */
    -                          /*       also used by qh_build_withresart for err exit*/
    -  int   STOPpoint;        /* 'TVn' 'TV-n' 1+n for stopping after/before(-)
    -                                        adding point n */
    -  int   TESTpoints;       /* 'QTn' num of test points after qh.num_points.  Test points always coplanar. */
    -  boolT TESTvneighbors;   /*  true 'Qv' if test vertex neighbors at end */
    -  int   TRACElevel;       /* 'Tn' conditional IStracing level */
    -  int   TRACElastrun;     /*  qh.TRACElevel applies to last qh.RERUN */
    -  int   TRACEpoint;       /* 'TPn' start tracing when point n is a vertex */
    -  realT TRACEdist;        /* 'TWn' start tracing when merge distance too big */
    -  int   TRACEmerge;       /* 'TMn' start tracing before this merge */
    -  boolT TRIangulate;      /* true 'Qt' if triangulate non-simplicial facets */
    -  boolT TRInormals;       /* true 'Q11' if triangulate duplicates normals (sets Qt) */
    -  boolT UPPERdelaunay;    /* true 'Qu' if computing furthest-site Delaunay */
    -  boolT USEstdout;        /* true 'Tz' if using stdout instead of stderr */
    -  boolT VERIFYoutput;     /* true 'Tv' if verify output at end of qhull */
    -  boolT VIRTUALmemory;    /* true 'Q7' if depth-first processing in buildhull */
    -  boolT VORONOI;          /* true 'v' if computing Voronoi diagram */
    -
    -  /*--------input constants ---------*/
    -  realT AREAfactor;       /* 1/(hull_dim-1)! for converting det's to area */
    -  boolT DOcheckmax;       /* true if calling qh_check_maxout (qh_initqhull_globals) */
    -  char  *feasible_string;  /* feasible point 'Hn,n,n' for halfspace intersection */
    -  coordT *feasible_point;  /*    as coordinates, both malloc'd */
    -  boolT GETarea;          /* true 'Fa', 'FA', 'FS', 'PAn', 'PFn' if compute facet area/Voronoi volume in io.c */
    -  boolT KEEPnearinside;   /* true if near-inside points in coplanarset */
    -  int   hull_dim;         /* dimension of hull, set by initbuffers */
    -  int   input_dim;        /* dimension of input, set by initbuffers */
    -  int   num_points;       /* number of input points */
    -  pointT *first_point;    /* array of input points, see POINTSmalloc */
    -  boolT POINTSmalloc;     /*   true if qh first_point/num_points allocated */
    -  pointT *input_points;   /* copy of original qh.first_point for input points for qh_joggleinput */
    -  boolT input_malloc;     /* true if qh input_points malloc'd */
    -  char  qhull_command[256];/* command line that invoked this program */
    -  int   qhull_commandsiz2; /*    size of qhull_command at qh_clear_outputflags */
    -  char  rbox_command[256]; /* command line that produced the input points */
    -  char  qhull_options[512];/* descriptive list of options */
    -  int   qhull_optionlen;  /*    length of last line */
    -  int   qhull_optionsiz;  /*    size of qhull_options at qh_build_withrestart */
    -  int   qhull_optionsiz2; /*    size of qhull_options at qh_clear_outputflags */
    -  int   run_id;           /* non-zero, random identifier for this instance of qhull */
    -  boolT VERTEXneighbors;  /* true if maintaining vertex neighbors */
    -  boolT ZEROcentrum;      /* true if 'C-0' or 'C-0 Qx'.  sets ZEROall_ok */
    -  realT *upper_threshold; /* don't print if facet->normal[k]>=upper_threshold[k]
    -                             must set either GOODthreshold or SPLITthreshold
    -                             if Delaunay, default is 0.0 for upper envelope */
    -  realT *lower_threshold; /* don't print if facet->normal[k] <=lower_threshold[k] */
    -  realT *upper_bound;     /* scale point[k] to new upper bound */
    -  realT *lower_bound;     /* scale point[k] to new lower bound
    -                             project if both upper_ and lower_bound == 0 */
    -
    -/*----------------------------------
    -
    -  qh precision constants
    -    precision constants for Qhull
    -
    -  notes:
    -    qh_detroundoff() computes the maximum roundoff error for distance
    -    and other computations.  It also sets default values for the
    -    qh constants above.
    -*/
    -  realT ANGLEround;       /* max round off error for angles */
    -  realT centrum_radius;   /* max centrum radius for convexity (roundoff added) */
    -  realT cos_max;          /* max cosine for convexity (roundoff added) */
    -  realT DISTround;        /* max round off error for distances, 'E' overrides */
    -  realT MAXabs_coord;     /* max absolute coordinate */
    -  realT MAXlastcoord;     /* max last coordinate for qh_scalelast */
    -  realT MAXsumcoord;      /* max sum of coordinates */
    -  realT MAXwidth;         /* max rectilinear width of point coordinates */
    -  realT MINdenom_1;       /* min. abs. value for 1/x */
    -  realT MINdenom;         /*    use divzero if denominator < MINdenom */
    -  realT MINdenom_1_2;     /* min. abs. val for 1/x that allows normalization */
    -  realT MINdenom_2;       /*    use divzero if denominator < MINdenom_2 */
    -  realT MINlastcoord;     /* min. last coordinate for qh_scalelast */
    -  boolT NARROWhull;       /* set in qh_initialhull if angle < qh_MAXnarrow */
    -  realT *NEARzero;        /* hull_dim array for near zero in gausselim */
    -  realT NEARinside;       /* keep points for qh_check_maxout if close to facet */
    -  realT ONEmerge;         /* max distance for merging simplicial facets */
    -  realT outside_err;      /* application's epsilon for coplanar points
    -                             qh_check_bestdist() qh_check_points() reports error if point outside */
    -  realT WIDEfacet;        /* size of wide facet for skipping ridge in
    -                             area computation and locking centrum */
    -
    -/*----------------------------------
    -
    -  qh internal constants
    -    internal constants for Qhull
    -*/
    -  char qhull[sizeof("qhull")]; /* "qhull" for checking ownership while debugging */
    -  jmp_buf errexit;        /* exit label for qh_errexit, defined by setjmp() */
    -  char jmpXtra[40];       /* extra bytes in case jmp_buf is defined wrong by compiler */
    -  jmp_buf restartexit;    /* restart label for qh_errexit, defined by setjmp() */
    -  char jmpXtra2[40];      /* extra bytes in case jmp_buf is defined wrong by compiler*/
    -  FILE *fin;              /* pointer to input file, init by qh_meminit */
    -  FILE *fout;             /* pointer to output file */
    -  FILE *ferr;             /* pointer to error file */
    -  pointT *interior_point; /* center point of the initial simplex*/
    -  int normal_size;     /* size in bytes for facet normals and point coords*/
    -  int center_size;     /* size in bytes for Voronoi centers */
    -  int   TEMPsize;         /* size for small, temporary sets (in quick mem) */
    -
    -/*----------------------------------
    -
    -  qh facet and vertex lists
    -    defines lists of facets, new facets, visible facets, vertices, and
    -    new vertices.  Includes counts, next ids, and trace ids.
    -  see:
    -    qh_resetlists()
    -*/
    -  facetT *facet_list;     /* first facet */
    -  facetT  *facet_tail;     /* end of facet_list (dummy facet) */
    -  facetT *facet_next;     /* next facet for buildhull()
    -                             previous facets do not have outside sets
    -                             NARROWhull: previous facets may have coplanar outside sets for qh_outcoplanar */
    -  facetT *newfacet_list;  /* list of new facets to end of facet_list */
    -  facetT *visible_list;   /* list of visible facets preceeding newfacet_list,
    -                             facet->visible set */
    -  int       num_visible;  /* current number of visible facets */
    -  unsigned tracefacet_id;  /* set at init, then can print whenever */
    -  facetT *tracefacet;     /*   set in newfacet/mergefacet, undone in delfacet*/
    -  unsigned tracevertex_id;  /* set at buildtracing, can print whenever */
    -  vertexT *tracevertex;     /*   set in newvertex, undone in delvertex*/
    -  vertexT *vertex_list;     /* list of all vertices, to vertex_tail */
    -  vertexT  *vertex_tail;    /*      end of vertex_list (dummy vertex) */
    -  vertexT *newvertex_list; /* list of vertices in newfacet_list, to vertex_tail
    -                             all vertices have 'newlist' set */
    -  int   num_facets;       /* number of facets in facet_list
    -                             includes visble faces (num_visible) */
    -  int   num_vertices;     /* number of vertices in facet_list */
    -  int   num_outside;      /* number of points in outsidesets (for tracing and RANDOMoutside)
    -                               includes coplanar outsideset points for NARROWhull/qh_outcoplanar() */
    -  int   num_good;         /* number of good facets (after findgood_all) */
    -  unsigned facet_id;      /* ID of next, new facet from newfacet() */
    -  unsigned ridge_id:24;   /* ID of next, new ridge from newridge() */
    -  unsigned vertex_id:24;  /* ID of next, new vertex from newvertex() */
    -
    -/*----------------------------------
    -
    -  qh global variables
    -    defines minimum and maximum distances, next visit ids, several flags,
    -    and other global variables.
    -    initialize in qh_initbuild or qh_maxmin if used in qh_buildhull
    -*/
    -  unsigned long hulltime; /* ignore time to set up input and randomize */
    -                          /*   use unsigned to avoid wrap-around errors */
    -  boolT ALLOWrestart;     /* true if qh_precision can use qh.restartexit */
    -  int   build_cnt;        /* number of calls to qh_initbuild */
    -  qh_CENTER CENTERtype;   /* current type of facet->center, qh_CENTER */
    -  int   furthest_id;      /* pointid of furthest point, for tracing */
    -  facetT *GOODclosest;    /* closest facet to GOODthreshold in qh_findgood */
    -  boolT hasAreaVolume;    /* true if totarea, totvol was defined by qh_getarea */
    -  boolT hasTriangulation; /* true if triangulation created by qh_triangulate */
    -  realT JOGGLEmax;        /* set 'QJn' if randomly joggle input */
    -  boolT maxoutdone;       /* set qh_check_maxout(), cleared by qh_addpoint() */
    -  realT max_outside;      /* maximum distance from a point to a facet,
    -                               before roundoff, not simplicial vertices
    -                               actual outer plane is +DISTround and
    -                               computed outer plane is +2*DISTround */
    -  realT max_vertex;       /* maximum distance (>0) from vertex to a facet,
    -                               before roundoff, due to a merge */
    -  realT min_vertex;       /* minimum distance (<0) from vertex to a facet,
    -                               before roundoff, due to a merge
    -                               if qh.JOGGLEmax, qh_makenewplanes sets it
    -                               recomputed if qh.DOcheckmax, default -qh.DISTround */
    -  boolT NEWfacets;        /* true while visible facets invalid due to new or merge
    -                              from makecone/attachnewfacets to deletevisible */
    -  boolT findbestnew;      /* true if partitioning calls qh_findbestnew */
    -  boolT findbest_notsharp; /* true if new facets are at least 90 degrees */
    -  boolT NOerrexit;        /* true if qh.errexit is not available */
    -  realT PRINTcradius;     /* radius for printing centrums */
    -  realT PRINTradius;      /* radius for printing vertex spheres and points */
    -  boolT POSTmerging;      /* true when post merging */
    -  int   printoutvar;      /* temporary variable for qh_printbegin, etc. */
    -  int   printoutnum;      /* number of facets printed */
    -  boolT QHULLfinished;    /* True after qhull() is finished */
    -  realT totarea;          /* 'FA': total facet area computed by qh_getarea, hasAreaVolume */
    -  realT totvol;           /* 'FA': total volume computed by qh_getarea, hasAreaVolume */
    -  unsigned int visit_id;  /* unique ID for searching neighborhoods, */
    -  unsigned int vertex_visit:31; /* unique ID for searching vertices, reset with qh_buildtracing */
    -  boolT ZEROall_ok;       /* True if qh_checkzero always succeeds */
    -  boolT WAScoplanar;      /* True if qh_partitioncoplanar (qh_check_maxout) */
    -
    -/*----------------------------------
    -
    -  qh global sets
    -    defines sets for merging, initial simplex, hashing, extra input points,
    -    and deleted vertices
    -*/
    -  setT *facet_mergeset;   /* temporary set of merges to be done */
    -  setT *degen_mergeset;   /* temporary set of degenerate and redundant merges */
    -  setT *hash_table;       /* hash table for matching ridges in qh_matchfacets
    -                             size is setsize() */
    -  setT *other_points;     /* additional points */
    -  setT *del_vertices;     /* vertices to partition and delete with visible
    -                             facets.  Have deleted set for checkfacet */
    -
    -/*----------------------------------
    -
    -  qh global buffers
    -    defines buffers for maxtrix operations, input, and error messages
    -*/
    -  coordT *gm_matrix;      /* (dim+1)Xdim matrix for geom.c */
    -  coordT **gm_row;        /* array of gm_matrix rows */
    -  char* line;             /* malloc'd input line of maxline+1 chars */
    -  int maxline;
    -  coordT *half_space;     /* malloc'd input array for halfspace (qh normal_size+coordT) */
    -  coordT *temp_malloc;    /* malloc'd input array for points */
    -
    -/*----------------------------------
    -
    -  qh static variables
    -    defines static variables for individual functions
    -
    -  notes:
    -    do not use 'static' within a function.  Multiple instances of qhull
    -    may exist.
    -
    -    do not assume zero initialization, 'QPn' may cause a restart
    -*/
    -  boolT ERREXITcalled;    /* true during qh_errexit (prevents duplicate calls */
    -  boolT firstcentrum;     /* for qh_printcentrum */
    -  boolT old_randomdist;   /* save RANDOMdist flag during io, tracing, or statistics */
    -  setT *coplanarfacetset;  /* set of coplanar facets for searching qh_findbesthorizon() */
    -  realT last_low;         /* qh_scalelast parameters for qh_setdelaunay */
    -  realT last_high;
    -  realT last_newhigh;
    -  unsigned lastreport;    /* for qh_buildtracing */
    -  int mergereport;        /* for qh_tracemerging */
    -  qhstatT *old_qhstat;    /* for saving qh_qhstat in save_qhull() and UsingLibQhull.  Free with qh_free() */
    -  setT *old_tempstack;    /* for saving qhmem.tempstack in save_qhull */
    -  int   ridgeoutnum;      /* number of ridges for 4OFF output (qh_printbegin,etc) */
    -};
    -
    -/*=========== -macros- =========================*/
    -
    -/*----------------------------------
    -
    -  otherfacet_(ridge, facet)
    -    return neighboring facet for a ridge in facet
    -*/
    -#define otherfacet_(ridge, facet) \
    -                        (((ridge)->top == (facet)) ? (ridge)->bottom : (ridge)->top)
    -
    -/*----------------------------------
    -
    -  getid_(p)
    -    return int ID for facet, ridge, or vertex
    -    return -1 if NULL
    -*/
    -#define getid_(p)       ((p) ? (int)((p)->id) : -1)
    -
    -/*============== FORALL macros ===================*/
    -
    -/*----------------------------------
    -
    -  FORALLfacets { ... }
    -    assign 'facet' to each facet in qh.facet_list
    -
    -  notes:
    -    uses 'facetT *facet;'
    -    assumes last facet is a sentinel
    -
    -  see:
    -    FORALLfacet_( facetlist )
    -*/
    -#define FORALLfacets for (facet=qh facet_list;facet && facet->next;facet=facet->next)
    -
    -/*----------------------------------
    -
    -  FORALLpoints { ... }
    -    assign 'point' to each point in qh.first_point, qh.num_points
    -
    -  declare:
    -    coordT *point, *pointtemp;
    -*/
    -#define FORALLpoints FORALLpoint_(qh first_point, qh num_points)
    -
    -/*----------------------------------
    -
    -  FORALLpoint_( points, num) { ... }
    -    assign 'point' to each point in points array of num points
    -
    -  declare:
    -    coordT *point, *pointtemp;
    -*/
    -#define FORALLpoint_(points, num) for (point= (points), \
    -      pointtemp= (points)+qh hull_dim*(num); point < pointtemp; point += qh hull_dim)
    -
    -/*----------------------------------
    -
    -  FORALLvertices { ... }
    -    assign 'vertex' to each vertex in qh.vertex_list
    -
    -  declare:
    -    vertexT *vertex;
    -
    -  notes:
    -    assumes qh.vertex_list terminated with a sentinel
    -*/
    -#define FORALLvertices for (vertex=qh vertex_list;vertex && vertex->next;vertex= vertex->next)
    -
    -/*----------------------------------
    -
    -  FOREACHfacet_( facets ) { ... }
    -    assign 'facet' to each facet in facets
    -
    -  declare:
    -    facetT *facet, **facetp;
    -
    -  see:
    -    FOREACHsetelement_
    -*/
    -#define FOREACHfacet_(facets)    FOREACHsetelement_(facetT, facets, facet)
    -
    -/*----------------------------------
    -
    -  FOREACHneighbor_( facet ) { ... }
    -    assign 'neighbor' to each neighbor in facet->neighbors
    -
    -  FOREACHneighbor_( vertex ) { ... }
    -    assign 'neighbor' to each neighbor in vertex->neighbors
    -
    -  declare:
    -    facetT *neighbor, **neighborp;
    -
    -  see:
    -    FOREACHsetelement_
    -*/
    -#define FOREACHneighbor_(facet)  FOREACHsetelement_(facetT, facet->neighbors, neighbor)
    -
    -/*----------------------------------
    -
    -  FOREACHpoint_( points ) { ... }
    -    assign 'point' to each point in points set
    -
    -  declare:
    -    pointT *point, **pointp;
    -
    -  see:
    -    FOREACHsetelement_
    -*/
    -#define FOREACHpoint_(points)    FOREACHsetelement_(pointT, points, point)
    -
    -/*----------------------------------
    -
    -  FOREACHridge_( ridges ) { ... }
    -    assign 'ridge' to each ridge in ridges set
    -
    -  declare:
    -    ridgeT *ridge, **ridgep;
    -
    -  see:
    -    FOREACHsetelement_
    -*/
    -#define FOREACHridge_(ridges)    FOREACHsetelement_(ridgeT, ridges, ridge)
    -
    -/*----------------------------------
    -
    -  FOREACHvertex_( vertices ) { ... }
    -    assign 'vertex' to each vertex in vertices set
    -
    -  declare:
    -    vertexT *vertex, **vertexp;
    -
    -  see:
    -    FOREACHsetelement_
    -*/
    -#define FOREACHvertex_(vertices) FOREACHsetelement_(vertexT, vertices,vertex)
    -
    -/*----------------------------------
    -
    -  FOREACHfacet_i_( facets ) { ... }
    -    assign 'facet' and 'facet_i' for each facet in facets set
    -
    -  declare:
    -    facetT *facet;
    -    int     facet_n, facet_i;
    -
    -  see:
    -    FOREACHsetelement_i_
    -*/
    -#define FOREACHfacet_i_(facets)    FOREACHsetelement_i_(facetT, facets, facet)
    -
    -/*----------------------------------
    -
    -  FOREACHneighbor_i_( facet ) { ... }
    -    assign 'neighbor' and 'neighbor_i' for each neighbor in facet->neighbors
    -
    -  FOREACHneighbor_i_( vertex ) { ... }
    -    assign 'neighbor' and 'neighbor_i' for each neighbor in vertex->neighbors
    -
    -  declare:
    -    facetT *neighbor;
    -    int     neighbor_n, neighbor_i;
    -
    -  see:
    -    FOREACHsetelement_i_
    -*/
    -#define FOREACHneighbor_i_(facet)  FOREACHsetelement_i_(facetT, facet->neighbors, neighbor)
    -
    -/*----------------------------------
    -
    -  FOREACHpoint_i_( points ) { ... }
    -    assign 'point' and 'point_i' for each point in points set
    -
    -  declare:
    -    pointT *point;
    -    int     point_n, point_i;
    -
    -  see:
    -    FOREACHsetelement_i_
    -*/
    -#define FOREACHpoint_i_(points)    FOREACHsetelement_i_(pointT, points, point)
    -
    -/*----------------------------------
    -
    -  FOREACHridge_i_( ridges ) { ... }
    -    assign 'ridge' and 'ridge_i' for each ridge in ridges set
    -
    -  declare:
    -    ridgeT *ridge;
    -    int     ridge_n, ridge_i;
    -
    -  see:
    -    FOREACHsetelement_i_
    -*/
    -#define FOREACHridge_i_(ridges)    FOREACHsetelement_i_(ridgeT, ridges, ridge)
    -
    -/*----------------------------------
    -
    -  FOREACHvertex_i_( vertices ) { ... }
    -    assign 'vertex' and 'vertex_i' for each vertex in vertices set
    -
    -  declare:
    -    vertexT *vertex;
    -    int     vertex_n, vertex_i;
    -
    -  see:
    -    FOREACHsetelement_i_
    -*/
    -#define FOREACHvertex_i_(vertices) FOREACHsetelement_i_(vertexT, vertices,vertex)
    -
    -/********* -libqhull.c prototypes (duplicated from qhull_a.h) **********************/
    -
    -void    qh_qhull(void);
    -boolT   qh_addpoint(pointT *furthest, facetT *facet, boolT checkdist);
    -void    qh_printsummary(FILE *fp);
    -
    -/********* -user.c prototypes (alphabetical) **********************/
    -
    -void    qh_errexit(int exitcode, facetT *facet, ridgeT *ridge);
    -void    qh_errprint(const char* string, facetT *atfacet, facetT *otherfacet, ridgeT *atridge, vertexT *atvertex);
    -int     qh_new_qhull(int dim, int numpoints, coordT *points, boolT ismalloc,
    -                char *qhull_cmd, FILE *outfile, FILE *errfile);
    -void    qh_printfacetlist(facetT *facetlist, setT *facets, boolT printall);
    -void    qh_printhelp_degenerate(FILE *fp);
    -void    qh_printhelp_narrowhull(FILE *fp, realT minangle);
    -void    qh_printhelp_singular(FILE *fp);
    -void    qh_user_memsizes(void);
    -
    -/********* -usermem.c prototypes (alphabetical) **********************/
    -void    qh_exit(int exitcode);
    -void    qh_free(void *mem);
    -void   *qh_malloc(size_t size);
    -
    -/********* -userprintf.c prototypes (alphabetical) **********************/
    -void    qh_fprintf(FILE *fp, int msgcode, const char *fmt, ... );
    -void    qh_fprintf_rbox(FILE *fp, int msgcode, const char *fmt, ... );
    -
    -/***** -geom.c/geom2.c/random.c prototypes (duplicated from geom.h, random.h) ****************/
    -
    -facetT *qh_findbest(pointT *point, facetT *startfacet,
    -                     boolT bestoutside, boolT newfacets, boolT noupper,
    -                     realT *dist, boolT *isoutside, int *numpart);
    -facetT *qh_findbestnew(pointT *point, facetT *startfacet,
    -                     realT *dist, boolT bestoutside, boolT *isoutside, int *numpart);
    -boolT   qh_gram_schmidt(int dim, realT **rows);
    -void    qh_outerinner(facetT *facet, realT *outerplane, realT *innerplane);
    -void    qh_printsummary(FILE *fp);
    -void    qh_projectinput(void);
    -void    qh_randommatrix(realT *buffer, int dim, realT **row);
    -void    qh_rotateinput(realT **rows);
    -void    qh_scaleinput(void);
    -void    qh_setdelaunay(int dim, int count, pointT *points);
    -coordT  *qh_sethalfspace_all(int dim, int count, coordT *halfspaces, pointT *feasible);
    -
    -/***** -global.c prototypes (alphabetical) ***********************/
    -
    -unsigned long qh_clock(void);
    -void    qh_checkflags(char *command, char *hiddenflags);
    -void    qh_clear_outputflags(void);
    -void    qh_freebuffers(void);
    -void    qh_freeqhull(boolT allmem);
    -void    qh_freeqhull2(boolT allmem);
    -void    qh_init_A(FILE *infile, FILE *outfile, FILE *errfile, int argc, char *argv[]);
    -void    qh_init_B(coordT *points, int numpoints, int dim, boolT ismalloc);
    -void    qh_init_qhull_command(int argc, char *argv[]);
    -void    qh_initbuffers(coordT *points, int numpoints, int dim, boolT ismalloc);
    -void    qh_initflags(char *command);
    -void    qh_initqhull_buffers(void);
    -void    qh_initqhull_globals(coordT *points, int numpoints, int dim, boolT ismalloc);
    -void    qh_initqhull_mem(void);
    -void    qh_initqhull_outputflags(void);
    -void    qh_initqhull_start(FILE *infile, FILE *outfile, FILE *errfile);
    -void    qh_initqhull_start2(FILE *infile, FILE *outfile, FILE *errfile);
    -void    qh_initthresholds(char *command);
    -void    qh_option(const char *option, int *i, realT *r);
    -#if qh_QHpointer
    -void    qh_restore_qhull(qhT **oldqh);
    -qhT    *qh_save_qhull(void);
    -#endif
    -
    -/***** -io.c prototypes (duplicated from io.h) ***********************/
    -
    -void    dfacet( unsigned id);
    -void    dvertex( unsigned id);
    -void    qh_printneighborhood(FILE *fp, qh_PRINT format, facetT *facetA, facetT *facetB, boolT printall);
    -void    qh_produce_output(void);
    -coordT *qh_readpoints(int *numpoints, int *dimension, boolT *ismalloc);
    -
    -
    -/********* -mem.c prototypes (duplicated from mem.h) **********************/
    -
    -void qh_meminit(FILE *ferr);
    -void qh_memfreeshort(int *curlong, int *totlong);
    -
    -/********* -poly.c/poly2.c prototypes (duplicated from poly.h) **********************/
    -
    -void    qh_check_output(void);
    -void    qh_check_points(void);
    -setT   *qh_facetvertices(facetT *facetlist, setT *facets, boolT allfacets);
    -facetT *qh_findbestfacet(pointT *point, boolT bestoutside,
    -           realT *bestdist, boolT *isoutside);
    -vertexT *qh_nearvertex(facetT *facet, pointT *point, realT *bestdistp);
    -pointT *qh_point(int id);
    -setT   *qh_pointfacet(void /*qh.facet_list*/);
    -int     qh_pointid(pointT *point);
    -setT   *qh_pointvertex(void /*qh.facet_list*/);
    -void    qh_setvoronoi_all(void);
    -void    qh_triangulate(void /*qh facet_list*/);
    -
    -/********* -rboxpoints.c prototypes **********************/
    -int     qh_rboxpoints(FILE* fout, FILE* ferr, char* rbox_command);
    -void    qh_errexit_rbox(int exitcode);
    -
    -/********* -stat.c prototypes (duplicated from stat.h) **********************/
    -
    -void    qh_collectstatistics(void);
    -void    qh_printallstatistics(FILE *fp, const char *string);
    -
    -#endif /* qhDEFlibqhull */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/mem.c b/scipy-0.10.1/scipy/spatial/qhull/src/mem.c
    deleted file mode 100644
    index c1a533b3a7..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/mem.c
    +++ /dev/null
    @@ -1,539 +0,0 @@
    -/*
      ---------------------------------
    -
    -  mem.c
    -    memory management routines for qhull
    -
    -  This is a standalone program.
    -
    -  To initialize memory:
    -
    -    qh_meminit(stderr);
    -    qh_meminitbuffers(qh IStracing, qh_MEMalign, 7, qh_MEMbufsize,qh_MEMinitbuf);
    -    qh_memsize((int)sizeof(facetT));
    -    qh_memsize((int)sizeof(facetT));
    -    ...
    -    qh_memsetup();
    -
    -  To free up all memory buffers:
    -    qh_memfreeshort(&curlong, &totlong);
    -
    -  if qh_NOmem,
    -    malloc/free is used instead of mem.c
    -
    -  notes:
    -    uses Quickfit algorithm (freelists for commonly allocated sizes)
    -    assumes small sizes for freelists (it discards the tail of memory buffers)
    -
    -  see:
    -    qh-mem.htm and mem.h
    -    global.c (qh_initbuffers) for an example of using mem.c
    -
    -  copyright (c) 1993-2010 The Geometry Center.
    -  $Id: //product/qhull/main/rel/src/mem.c#32 $$Change: 1164 $
    -  $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#include "mem.h"
    -#include 
    -#include 
    -#include 
    -
    -#ifndef qhDEFlibqhull
    -typedef struct ridgeT ridgeT;
    -typedef struct facetT facetT;
    -#ifdef _MSC_VER  /* Microsoft Visual C++ -- warning level 4 */
    -#pragma warning( disable : 4127)  /* conditional expression is constant */
    -#pragma warning( disable : 4706)  /* assignment within conditional function */
    -#endif
    -void    qh_errexit(int exitcode, facetT *, ridgeT *);
    -void    qh_exit(int exitcode);
    -void    qh_fprintf(FILE *fp, int msgcode, const char *fmt, ... );
    -void    qh_free(void *mem);
    -void   *qh_malloc(size_t size);
    -#endif
    -
    -/*============ -global data structure ==============
    -    see mem.h for definition
    -*/
    -
    -qhmemT qhmem= {0,0,0,0,0,0,0,0,0,0,0,
    -               0,0,0,0,0,0,0,0,0,0,0,
    -               0,0,0,0,0,0,0};     /* remove "= {0}" if this causes a compiler error */
    -
    -#ifndef qh_NOmem
    -
    -/*============= internal functions ==============*/
    -
    -static int qh_intcompare(const void *i, const void *j);
    -
    -/*========== functions in alphabetical order ======== */
    -
    -/*---------------------------------
    -
    -  qh_intcompare( i, j )
    -    used by qsort and bsearch to compare two integers
    -*/
    -static int qh_intcompare(const void *i, const void *j) {
    -  return(*((const int *)i) - *((const int *)j));
    -} /* intcompare */
    -
    -
    -/*----------------------------------
    -
    -  qh_memalloc( insize )
    -    returns object of insize bytes
    -    qhmem is the global memory structure
    -
    -  returns:
    -    pointer to allocated memory
    -    errors if insufficient memory
    -
    -  notes:
    -    use explicit type conversion to avoid type warnings on some compilers
    -    actual object may be larger than insize
    -    use qh_memalloc_() for inline code for quick allocations
    -    logs allocations if 'T5'
    -
    -  design:
    -    if size < qhmem.LASTsize
    -      if qhmem.freelists[size] non-empty
    -        return first object on freelist
    -      else
    -        round up request to size of qhmem.freelists[size]
    -        allocate new allocation buffer if necessary
    -        allocate object from allocation buffer
    -    else
    -      allocate object with qh_malloc() in user.c
    -*/
    -void *qh_memalloc(int insize) {
    -  void **freelistp, *newbuffer;
    -  int idx, size, n;
    -  int outsize, bufsize;
    -  void *object;
    -
    -  if (insize <= qhmem.LASTsize) {
    -    idx= qhmem.indextable[insize];
    -    outsize= qhmem.sizetable[idx];
    -    qhmem.totshort += outsize;
    -    freelistp= qhmem.freelists+idx;
    -    if ((object= *freelistp)) {
    -      qhmem.cntquick++;
    -      qhmem.totfree -= outsize;
    -      *freelistp= *((void **)*freelistp);  /* replace freelist with next object */
    -#ifdef qh_TRACEshort
    -      n= qhmem.cntshort+qhmem.cntquick+qhmem.freeshort;
    -      if (qhmem.IStracing >= 5)
    -          qh_fprintf(qhmem.ferr, 8141, "qh_mem %p n %8d alloc quick: %d bytes (tot %d cnt %d)\n", object, n, outsize, qhmem.totshort, qhmem.cntshort+qhmem.cntquick-qhmem.freeshort);
    -#endif
    -      return(object);
    -    }else {
    -      qhmem.cntshort++;
    -      if (outsize > qhmem .freesize) {
    -        qhmem .totdropped += qhmem .freesize;
    -        if (!qhmem.curbuffer)
    -          bufsize= qhmem.BUFinit;
    -        else
    -          bufsize= qhmem.BUFsize;
    -        if (!(newbuffer= qh_malloc((size_t)bufsize))) {
    -          qh_fprintf(qhmem.ferr, 6080, "qhull error (qh_memalloc): insufficient memory to allocate short memory buffer (%d bytes)\n", bufsize);
    -          qh_errexit(qhmem_ERRmem, NULL, NULL);
    -        }
    -        *((void **)newbuffer)= qhmem.curbuffer;  /* prepend newbuffer to curbuffer
    -                                                    list */
    -        qhmem.curbuffer= newbuffer;
    -        size= (sizeof(void **) + qhmem.ALIGNmask) & ~qhmem.ALIGNmask;
    -        qhmem.freemem= (void *)((char *)newbuffer+size);
    -        qhmem.freesize= bufsize - size;
    -        qhmem.totbuffer += bufsize - size; /* easier to check */
    -        /* Periodically test totbuffer.  It matches at beginning and exit of every call */
    -        n = qhmem.totshort + qhmem.totfree + qhmem.totdropped + qhmem.freesize - outsize;
    -        if (qhmem.totbuffer != n) {
    -            qh_fprintf(qhmem.ferr, 6212, "qh_memalloc internal error: short totbuffer %d != totshort+totfree... %d\n", qhmem.totbuffer, n);
    -            qh_errexit(qhmem_ERRmem, NULL, NULL);
    -        }
    -      }
    -      object= qhmem.freemem;
    -      qhmem.freemem= (void *)((char *)qhmem.freemem + outsize);
    -      qhmem.freesize -= outsize;
    -      qhmem.totunused += outsize - insize;
    -#ifdef qh_TRACEshort
    -      n= qhmem.cntshort+qhmem.cntquick+qhmem.freeshort;
    -      if (qhmem.IStracing >= 5)
    -          qh_fprintf(qhmem.ferr, 8140, "qh_mem %p n %8d alloc short: %d bytes (tot %d cnt %d)\n", object, n, outsize, qhmem.totshort, qhmem.cntshort+qhmem.cntquick-qhmem.freeshort);
    -#endif
    -      return object;
    -    }
    -  }else {                     /* long allocation */
    -    if (!qhmem.indextable) {
    -      qh_fprintf(qhmem.ferr, 6081, "qhull internal error (qh_memalloc): qhmem has not been initialized.\n");
    -      qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -    }
    -    outsize= insize;
    -    qhmem .cntlong++;
    -    qhmem .totlong += outsize;
    -    if (qhmem.maxlong < qhmem.totlong)
    -      qhmem.maxlong= qhmem.totlong;
    -    if (!(object= qh_malloc((size_t)outsize))) {
    -      qh_fprintf(qhmem.ferr, 6082, "qhull error (qh_memalloc): insufficient memory to allocate %d bytes\n", outsize);
    -      qh_errexit(qhmem_ERRmem, NULL, NULL);
    -    }
    -    if (qhmem.IStracing >= 5)
    -      qh_fprintf(qhmem.ferr, 8057, "qh_mem %p n %8d alloc long: %d bytes (tot %d cnt %d)\n", object, qhmem.cntlong+qhmem.freelong, outsize, qhmem.totlong, qhmem.cntlong-qhmem.freelong);
    -  }
    -  return(object);
    -} /* memalloc */
    -
    -
    -/*----------------------------------
    -
    -  qh_memfree( object, insize )
    -    free up an object of size bytes
    -    size is insize from qh_memalloc
    -
    -  notes:
    -    object may be NULL
    -    type checking warns if using (void **)object
    -    use qh_memfree_() for quick free's of small objects
    -
    -  design:
    -    if size <= qhmem.LASTsize
    -      append object to corresponding freelist
    -    else
    -      call qh_free(object)
    -*/
    -void qh_memfree(void *object, int insize) {
    -  void **freelistp;
    -  int idx, outsize;
    -
    -  if (!object)
    -    return;
    -  if (insize <= qhmem.LASTsize) {
    -    qhmem .freeshort++;
    -    idx= qhmem.indextable[insize];
    -    outsize= qhmem.sizetable[idx];
    -    qhmem .totfree += outsize;
    -    qhmem .totshort -= outsize;
    -    freelistp= qhmem.freelists + idx;
    -    *((void **)object)= *freelistp;
    -    *freelistp= object;
    -#ifdef qh_TRACEshort
    -    idx= qhmem.cntshort+qhmem.cntquick+qhmem.freeshort;
    -    if (qhmem.IStracing >= 5)
    -        qh_fprintf(qhmem.ferr, 8142, "qh_mem %p n %8d free short: %d bytes (tot %d cnt %d)\n", object, idx, outsize, qhmem.totshort, qhmem.cntshort+qhmem.cntquick-qhmem.freeshort);
    -#endif
    -  }else {
    -    qhmem .freelong++;
    -    qhmem .totlong -= insize;
    -    qh_free(object);
    -    if (qhmem.IStracing >= 5)
    -      qh_fprintf(qhmem.ferr, 8058, "qh_mem %p n %8d free long: %d bytes (tot %d cnt %d)\n", object, qhmem.cntlong+qhmem.freelong, insize, qhmem.totlong, qhmem.cntlong-qhmem.freelong);
    -  }
    -} /* memfree */
    -
    -
    -/*---------------------------------
    -
    -  qh_memfreeshort( curlong, totlong )
    -    frees up all short and qhmem memory allocations
    -
    -  returns:
    -    number and size of current long allocations
    -
    -  see:
    -    qh_freeqhull(allMem)
    -    qh_memtotal(curlong, totlong, curshort, totshort, maxlong, totbuffer);
    -*/
    -void qh_memfreeshort(int *curlong, int *totlong) {
    -  void *buffer, *nextbuffer;
    -  FILE *ferr;
    -
    -  *curlong= qhmem .cntlong - qhmem .freelong;
    -  *totlong= qhmem .totlong;
    -  for (buffer= qhmem.curbuffer; buffer; buffer= nextbuffer) {
    -    nextbuffer= *((void **) buffer);
    -    qh_free(buffer);
    -  }
    -  qhmem.curbuffer= NULL;
    -  if (qhmem .LASTsize) {
    -    qh_free(qhmem .indextable);
    -    qh_free(qhmem .freelists);
    -    qh_free(qhmem .sizetable);
    -  }
    -  ferr= qhmem.ferr;
    -  memset((char *)&qhmem, 0, sizeof(qhmem));  /* every field is 0, FALSE, NULL */
    -  qhmem.ferr= ferr;
    -} /* memfreeshort */
    -
    -
    -/*----------------------------------
    -
    -  qh_meminit( ferr )
    -    initialize qhmem and test sizeof( void*)
    -*/
    -void qh_meminit(FILE *ferr) {
    -
    -  memset((char *)&qhmem, 0, sizeof(qhmem));  /* every field is 0, FALSE, NULL */
    -  qhmem.ferr= ferr;
    -  if (sizeof(void*) < sizeof(int)) {
    -    qh_fprintf(ferr, 6083, "qhull internal error (qh_meminit): sizeof(void*) %d < sizeof(int) %d.  qset.c will not work\n", (int)sizeof(void*), (int)sizeof(int));
    -    qh_exit(qhmem_ERRqhull);  /* can not use qh_errexit() */
    -  }
    -  if (sizeof(void*) > sizeof(ptr_intT)) {
    -      qh_fprintf(ferr, 6084, "qhull internal error (qh_meminit): sizeof(void*) %d > sizeof(ptr_intT) %d. Change ptr_intT in mem.h to 'long long'\n", (int)sizeof(void*), (int)sizeof(ptr_intT));
    -      qh_exit(qhmem_ERRqhull);  /* can not use qh_errexit() */
    -  }
    -} /* meminit */
    -
    -/*---------------------------------
    -
    -  qh_meminitbuffers( tracelevel, alignment, numsizes, bufsize, bufinit )
    -    initialize qhmem
    -    if tracelevel >= 5, trace memory allocations
    -    alignment= desired address alignment for memory allocations
    -    numsizes= number of freelists
    -    bufsize=  size of additional memory buffers for short allocations
    -    bufinit=  size of initial memory buffer for short allocations
    -*/
    -void qh_meminitbuffers(int tracelevel, int alignment, int numsizes, int bufsize, int bufinit) {
    -
    -  qhmem.IStracing= tracelevel;
    -  qhmem.NUMsizes= numsizes;
    -  qhmem.BUFsize= bufsize;
    -  qhmem.BUFinit= bufinit;
    -  qhmem.ALIGNmask= alignment-1;
    -  if (qhmem.ALIGNmask & ~qhmem.ALIGNmask) {
    -    qh_fprintf(qhmem.ferr, 6085, "qhull internal error (qh_meminit): memory alignment %d is not a power of 2\n", alignment);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  qhmem.sizetable= (int *) calloc((size_t)numsizes, sizeof(int));
    -  qhmem.freelists= (void **) calloc((size_t)numsizes, sizeof(void *));
    -  if (!qhmem.sizetable || !qhmem.freelists) {
    -    qh_fprintf(qhmem.ferr, 6086, "qhull error (qh_meminit): insufficient memory\n");
    -    qh_errexit(qhmem_ERRmem, NULL, NULL);
    -  }
    -  if (qhmem.IStracing >= 1)
    -    qh_fprintf(qhmem.ferr, 8059, "qh_meminitbuffers: memory initialized with alignment %d\n", alignment);
    -} /* meminitbuffers */
    -
    -/*---------------------------------
    -
    -  qh_memsetup()
    -    set up memory after running memsize()
    -*/
    -void qh_memsetup(void) {
    -  int k,i;
    -
    -  qsort(qhmem.sizetable, (size_t)qhmem.TABLEsize, sizeof(int), qh_intcompare);
    -  qhmem.LASTsize= qhmem.sizetable[qhmem.TABLEsize-1];
    -  if (qhmem .LASTsize >= qhmem .BUFsize || qhmem.LASTsize >= qhmem .BUFinit) {
    -    qh_fprintf(qhmem.ferr, 6087, "qhull error (qh_memsetup): largest mem size %d is >= buffer size %d or initial buffer size %d\n",
    -            qhmem .LASTsize, qhmem .BUFsize, qhmem .BUFinit);
    -    qh_errexit(qhmem_ERRmem, NULL, NULL);
    -  }
    -  if (!(qhmem.indextable= (int *)qh_malloc((qhmem.LASTsize+1) * sizeof(int)))) {
    -    qh_fprintf(qhmem.ferr, 6088, "qhull error (qh_memsetup): insufficient memory\n");
    -    qh_errexit(qhmem_ERRmem, NULL, NULL);
    -  }
    -  for (k=qhmem.LASTsize+1; k--; )
    -    qhmem.indextable[k]= k;
    -  i= 0;
    -  for (k=0; k <= qhmem.LASTsize; k++) {
    -    if (qhmem.indextable[k] <= qhmem.sizetable[i])
    -      qhmem.indextable[k]= i;
    -    else
    -      qhmem.indextable[k]= ++i;
    -  }
    -} /* memsetup */
    -
    -/*---------------------------------
    -
    -  qh_memsize( size )
    -    define a free list for this size
    -*/
    -void qh_memsize(int size) {
    -  int k;
    -
    -  if (qhmem .LASTsize) {
    -    qh_fprintf(qhmem.ferr, 6089, "qhull error (qh_memsize): called after qhmem_setup\n");
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  size= (size + qhmem.ALIGNmask) & ~qhmem.ALIGNmask;
    -  for (k=qhmem.TABLEsize; k--; ) {
    -    if (qhmem.sizetable[k] == size)
    -      return;
    -  }
    -  if (qhmem.TABLEsize < qhmem.NUMsizes)
    -    qhmem.sizetable[qhmem.TABLEsize++]= size;
    -  else
    -    qh_fprintf(qhmem.ferr, 7060, "qhull warning (memsize): free list table has room for only %d sizes\n", qhmem.NUMsizes);
    -} /* memsize */
    -
    -
    -/*---------------------------------
    -
    -  qh_memstatistics( fp )
    -    print out memory statistics
    -
    -    Verifies that qhmem.totfree == sum of freelists
    -*/
    -void qh_memstatistics(FILE *fp) {
    -  int i, count, totfree= 0;
    -  void *object;
    -
    -  for (i=0; i < qhmem.TABLEsize; i++) {
    -    count=0;
    -    for (object= qhmem .freelists[i]; object; object= *((void **)object))
    -      count++;
    -    totfree += qhmem.sizetable[i] * count;
    -  }
    -  if (totfree != qhmem .totfree) {
    -      qh_fprintf(qhmem.ferr, 6211, "qh_memstatistics internal error: totfree %d not equal to freelist total %d\n", qhmem.totfree, totfree);
    -      qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  qh_fprintf(fp, 9278, "\nmemory statistics:\n\
    -%7d quick allocations\n\
    -%7d short allocations\n\
    -%7d long allocations\n\
    -%7d short frees\n\
    -%7d long frees\n\
    -%7d bytes of short memory in use\n\
    -%7d bytes of short memory in freelists\n\
    -%7d bytes of dropped short memory\n\
    -%7d bytes of unused short memory (estimated)\n\
    -%7d bytes of long memory allocated (max, except for input)\n\
    -%7d bytes of long memory in use (in %d pieces)\n\
    -%7d bytes of short memory buffers (minus links)\n\
    -%7d bytes per short memory buffer (initially %d bytes)\n",
    -           qhmem .cntquick, qhmem .cntshort, qhmem .cntlong,
    -           qhmem .freeshort, qhmem .freelong,
    -           qhmem .totshort, qhmem .totfree,
    -           qhmem .totdropped + qhmem .freesize, qhmem .totunused,
    -           qhmem .maxlong, qhmem .totlong, qhmem .cntlong - qhmem .freelong,
    -           qhmem .totbuffer, qhmem .BUFsize, qhmem .BUFinit);
    -  if (qhmem.cntlarger) {
    -    qh_fprintf(fp, 9279, "%7d calls to qh_setlarger\n%7.2g     average copy size\n",
    -           qhmem.cntlarger, ((float)qhmem.totlarger)/(float)qhmem.cntlarger);
    -    qh_fprintf(fp, 9280, "  freelists(bytes->count):");
    -  }
    -  for (i=0; i < qhmem.TABLEsize; i++) {
    -    count=0;
    -    for (object= qhmem .freelists[i]; object; object= *((void **)object))
    -      count++;
    -    qh_fprintf(fp, 9281, " %d->%d", qhmem.sizetable[i], count);
    -  }
    -  qh_fprintf(fp, 9282, "\n\n");
    -} /* memstatistics */
    -
    -
    -/*---------------------------------
    -
    -  qh_NOmem
    -    turn off quick-fit memory allocation
    -
    -  notes:
    -    uses qh_malloc() and qh_free() instead
    -*/
    -#else /* qh_NOmem */
    -
    -void *qh_memalloc(int insize) {
    -  void *object;
    -
    -  if (!(object= qh_malloc((size_t)insize))) {
    -    qh_fprintf(qhmem.ferr, 6090, "qhull error (qh_memalloc): insufficient memory\n");
    -    qh_errexit(qhmem_ERRmem, NULL, NULL);
    -  }
    -  qhmem .cntlong++;
    -  qhmem .totlong += insize;
    -  if (qhmem.maxlong < qhmem.totlong)
    -      qhmem.maxlong= qhmem.totlong;
    -  if (qhmem.IStracing >= 5)
    -    qh_fprintf(qhmem.ferr, 8060, "qh_mem %p n %8d alloc long: %d bytes (tot %d cnt %d)\n", object, qhmem.cntlong+qhmem.freelong, insize, qhmem.totlong, qhmem.cntlong-qhmem.freelong);
    -  return object;
    -}
    -
    -void qh_memfree(void *object, int insize) {
    -
    -  if (!object)
    -    return;
    -  qh_free(object);
    -  qhmem .freelong++;
    -  qhmem .totlong -= insize;
    -  if (qhmem.IStracing >= 5)
    -    qh_fprintf(qhmem.ferr, 8061, "qh_mem %p n %8d free long: %d bytes (tot %d cnt %d)\n", object, qhmem.cntlong+qhmem.freelong, insize, qhmem.totlong, qhmem.cntlong-qhmem.freelong);
    -}
    -
    -void qh_memfreeshort(int *curlong, int *totlong) {
    -  *totlong= qhmem .totlong;
    -  *curlong= qhmem .cntlong - qhmem .freelong;
    -  memset((char *)&qhmem, 0, sizeof(qhmem));  /* every field is 0, FALSE, NULL */
    -}
    -
    -void qh_meminit(FILE *ferr) {
    -
    -  memset((char *)&qhmem, 0, sizeof(qhmem));  /* every field is 0, FALSE, NULL */
    -  qhmem.ferr= ferr;
    -  if (sizeof(void*) < sizeof(int)) {
    -    qh_fprintf(ferr, 6091, "qhull internal error (qh_meminit): sizeof(void*) %d < sizeof(int) %d.  qset.c will not work\n", (int)sizeof(void*), (int)sizeof(int));
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -}
    -
    -void qh_meminitbuffers(int tracelevel, int alignment, int numsizes, int bufsize, int bufinit) {
    -
    -  qhmem.IStracing= tracelevel;
    -}
    -
    -void qh_memsetup(void) {
    -
    -}
    -
    -void qh_memsize(int size) {
    -
    -}
    -
    -void qh_memstatistics(FILE *fp) {
    -
    -  qh_fprintf(fp, 9409, "\nmemory statistics:\n\
    -%7d long allocations\n\
    -%7d long frees\n\
    -%7d bytes of long memory allocated (max, except for input)\n\
    -%7d bytes of long memory in use (in %d pieces)\n",
    -           qhmem .cntlong,
    -           qhmem .freelong,
    -           qhmem .maxlong, qhmem .totlong, qhmem .cntlong - qhmem .freelong);
    -}
    -
    -#endif /* qh_NOmem */
    -
    -/*---------------------------------
    -
    -  qh_memtotal( totlong, curlong, totshort, curshort, maxlong, totbuffer )
    -    Return the total, allocated long and short memory
    -
    -  returns:
    -    Returns the total current bytes of long and short allocations
    -    Returns the current count of long and short allocations
    -    Returns the maximum long memory and total short buffer (minus one link per buffer)
    -    Does not error (UsingLibQhull.cpp)
    -*/
    -void qh_memtotal(int *totlong, int *curlong, int *totshort, int *curshort, int *maxlong, int *totbuffer) {
    -    *totlong= qhmem .totlong;
    -    *curlong= qhmem .cntlong - qhmem .freelong;
    -    *totshort= qhmem .totshort;
    -    *curshort= qhmem .cntshort + qhmem .cntquick - qhmem .freeshort;
    -    *maxlong= qhmem .maxlong;
    -    *totbuffer= qhmem .totbuffer;
    -} /* memtotlong */
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/mem.h b/scipy-0.10.1/scipy/spatial/qhull/src/mem.h
    deleted file mode 100644
    index 2a5022e5ee..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/mem.h
    +++ /dev/null
    @@ -1,219 +0,0 @@
    -/*
      ---------------------------------
    -
    -   mem.h
    -     prototypes for memory management functions
    -
    -   see qh-mem.htm, mem.c and qset.h
    -
    -   for error handling, writes message and calls
    -     qh_errexit(qhmem_ERRmem, NULL, NULL) if insufficient memory
    -       and
    -     qh_errexit(qhmem_ERRqhull, NULL, NULL) otherwise
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/mem.h#28 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#ifndef qhDEFmem
    -#define qhDEFmem 1
    -
    -#include 
    -#include 
    -
    -#include 
    -
    -/*---------------------------------
    -
    -  qh_NOmem
    -    turn off quick-fit memory allocation
    -
    -  notes:
    -    mem.c implements Quickfit memory allocation for about 20% time
    -    savings.  If it fails on your machine, try to locate the
    -    problem, and send the answer to qhull@qhull.org.  If this can
    -    not be done, define qh_NOmem to use malloc/free instead.
    -
    -   #define qh_NOmem
    -*/
    -
    -/*---------------------------------
    -
    -qh_TRACEshort
    -Trace short and quick memory allocations at T5
    -
    -*/
    -#define qh_TRACEshort
    -
    -/*-------------------------------------------
    -    to avoid bus errors, memory allocation must consider alignment requirements.
    -    malloc() automatically takes care of alignment.   Since mem.c manages
    -    its own memory, we need to explicitly specify alignment in
    -    qh_meminitbuffers().
    -
    -    A safe choice is sizeof(double).  sizeof(float) may be used if doubles
    -    do not occur in data structures and pointers are the same size.  Be careful
    -    of machines (e.g., DEC Alpha) with large pointers.  If gcc is available,
    -    use __alignof__(double) or fmax_(__alignof__(float), __alignof__(void *)).
    -
    -   see qh_MEMalign in user.h for qhull's alignment
    -*/
    -
    -#define qhmem_ERRmem 4    /* matches qh_ERRmem in libqhull.h */
    -#define qhmem_ERRqhull 5  /* matches qh_ERRqhull in libqhull.h */
    -
    -/*----------------------------------
    -
    -  ptr_intT
    -    for casting a void * to an integer-type that holds a pointer
    -    Used for integer expressions (e.g., computing qh_gethash() in poly.c)
    -
    -  notes:
    -    WARN64 -- these notes indicate 64-bit issues
    -    On 64-bit machines, a pointer may be larger than an 'int'.
    -    qh_meminit()/mem.c checks that 'ptr_intT' holds a 'void*'
    -    ptr_intT is not defined as 'long long' for portability to older compilers
    -    ptr_intT is typically a signed value, but not necessarily so
    -    size_t is typically unsigned, but should match the parameter type
    -    Qhull uses int instead of size_t except for system calls such as malloc, qsort, qh_malloc, etc.
    -    This matches Qt convention and is easier to work with.
    -*/
    -typedef npy_intp ptr_intT;
    -
    -/*----------------------------------
    -
    -  qhmemT
    -    global memory structure for mem.c
    -
    - notes:
    -   users should ignore qhmem except for writing extensions
    -   qhmem is allocated in mem.c
    -
    -   qhmem could be swapable like qh and qhstat, but then
    -   multiple qh's and qhmem's would need to keep in synch.
    -   A swapable qhmem would also waste memory buffers.  As long
    -   as memory operations are atomic, there is no problem with
    -   multiple qh structures being active at the same time.
    -   If you need separate address spaces, you can swap the
    -   contents of qhmem.
    -*/
    -typedef struct qhmemT qhmemT;
    -extern qhmemT qhmem;
    -
    -#ifndef DEFsetT
    -#define DEFsetT 1
    -typedef struct setT setT;          /* defined in qset.h */
    -#endif
    -
    -/* Update qhmem in mem.c if add or remove fields */
    -struct qhmemT {               /* global memory management variables */
    -  int      BUFsize;           /* size of memory allocation buffer */
    -  int      BUFinit;           /* initial size of memory allocation buffer */
    -  int      TABLEsize;         /* actual number of sizes in free list table */
    -  int      NUMsizes;          /* maximum number of sizes in free list table */
    -  int      LASTsize;          /* last size in free list table */
    -  int      ALIGNmask;         /* worst-case alignment, must be 2^n-1 */
    -  void   **freelists;          /* free list table, linked by offset 0 */
    -  int     *sizetable;         /* size of each freelist */
    -  int     *indextable;        /* size->index table */
    -  void    *curbuffer;         /* current buffer, linked by offset 0 */
    -  void    *freemem;           /*   free memory in curbuffer */
    -  int      freesize;          /*   size of freemem in bytes */
    -  setT    *tempstack;         /* stack of temporary memory, managed by users */
    -  FILE    *ferr;              /* file for reporting errors, only user is qh_fprintf() */
    -  int      IStracing;         /* =5 if tracing memory allocations */
    -  int      cntquick;          /* count of quick allocations */
    -                              /* Note: removing statistics doesn't effect speed */
    -  int      cntshort;          /* count of short allocations */
    -  int      cntlong;           /* count of long allocations */
    -  int      freeshort;         /* count of short memfrees */
    -  int      freelong;          /* count of long memfrees */
    -  int      totbuffer;         /* total short memory buffers minus buffer links */
    -  int      totdropped;        /* total dropped memory at end of short memory buffers (e.g., freesize) */
    -  int      totfree;           /* total size of free, short memory on freelists */
    -  int      totlong;           /* total size of long memory in use */
    -  int      maxlong;           /*   maximum totlong */
    -  int      totshort;          /* total size of short memory in use */
    -  int      totunused;         /* total unused short memory (estimated, short size - request size of first allocations) */
    -  int      cntlarger;         /* count of setlarger's */
    -  int      totlarger;         /* total copied by setlarger */
    -};
    -
    -
    -/*==================== -macros ====================*/
    -
    -/*----------------------------------
    -
    -  qh_memalloc_(insize, object, type)
    -    returns object of size bytes
    -        assumes size<=qhmem.LASTsize and void **freelistp is a temp
    -*/
    -
    -#if defined qh_NOmem
    -#define qh_memalloc_(insize, freelistp, object, type) {\
    -  object= (type*)qh_memalloc(insize); }
    -#elif defined qh_TRACEshort
    -#define qh_memalloc_(insize, freelistp, object, type) {\
    -    freelistp= NULL; /* Avoid warnings */ \
    -    object= (type*)qh_memalloc(insize); }
    -#else /* !qh_NOmem */
    -
    -#define qh_memalloc_(insize, freelistp, object, type) {\
    -  freelistp= qhmem.freelists + qhmem.indextable[insize];\
    -  if ((object= (type*)*freelistp)) {\
    -    qhmem.totshort += qhmem.sizetable[qhmem.indextable[insize]]; \
    -    qhmem.totfree -= qhmem.sizetable[qhmem.indextable[insize]]; \
    -    qhmem.cntquick++;  \
    -    *freelistp= *((void **)*freelistp);\
    -  }else object= (type*)qh_memalloc(insize);}
    -#endif
    -
    -/*----------------------------------
    -
    -  qh_memfree_(object, insize)
    -    free up an object
    -
    -  notes:
    -    object may be NULL
    -    assumes size<=qhmem.LASTsize and void **freelistp is a temp
    -*/
    -#if defined qh_NOmem
    -#define qh_memfree_(object, insize, freelistp) {\
    -  qh_memfree(object, insize); }
    -#elif defined qh_TRACEshort
    -#define qh_memfree_(object, insize, freelistp) {\
    -    freelistp= NULL; /* Avoid warnings */ \
    -    qh_memfree(object, insize); }
    -#else /* !qh_NOmem */
    -
    -#define qh_memfree_(object, insize, freelistp) {\
    -  if (object) { \
    -    qhmem .freeshort++;\
    -    freelistp= qhmem.freelists + qhmem.indextable[insize];\
    -    qhmem.totshort -= qhmem.sizetable[qhmem.indextable[insize]]; \
    -    qhmem.totfree += qhmem.sizetable[qhmem.indextable[insize]]; \
    -    *((void **)object)= *freelistp;\
    -    *freelistp= object;}}
    -#endif
    -
    -/*=============== prototypes in alphabetical order ============*/
    -
    -void *qh_memalloc(int insize);
    -void qh_memfree(void *object, int insize);
    -void qh_memfreeshort(int *curlong, int *totlong);
    -void qh_meminit(FILE *ferr);
    -void qh_meminitbuffers(int tracelevel, int alignment, int numsizes,
    -                        int bufsize, int bufinit);
    -void qh_memsetup(void);
    -void qh_memsize(int size);
    -void qh_memstatistics(FILE *fp);
    -void qh_memtotal(int *totlong, int *curlong, int *totshort, int *curshort, int *maxlong, int *totbuffer);
    -
    -#endif /* qhDEFmem */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/merge.c b/scipy-0.10.1/scipy/spatial/qhull/src/merge.c
    deleted file mode 100644
    index 8fcc1bb84c..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/merge.c
    +++ /dev/null
    @@ -1,3623 +0,0 @@
    -/*
      ---------------------------------
    -
    -   merge.c
    -   merges non-convex facets
    -
    -   see qh-merge.htm and merge.h
    -
    -   other modules call qh_premerge() and qh_postmerge()
    -
    -   the user may call qh_postmerge() to perform additional merges.
    -
    -   To remove deleted facets and vertices (qhull() in libqhull.c):
    -     qh_partitionvisible(!qh_ALL, &numoutside);  // visible_list, newfacet_list
    -     qh_deletevisible();         // qh.visible_list
    -     qh_resetlists(False, qh_RESETvisible);       // qh.visible_list newvertex_list newfacet_list
    -
    -   assumes qh.CENTERtype= centrum
    -
    -   merges occur in qh_mergefacet and in qh_mergecycle
    -   vertex->neighbors not set until the first merge occurs
    -
    -   copyright (c) 1993-2010 C.B. Barber.
    -   $Id: //product/qhull/main/rel/src/merge.c#27 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#include "qhull_a.h"
    -
    -#ifndef qh_NOmerge
    -
    -/*===== functions(alphabetical after premerge and postmerge) ======*/
    -
    -/*---------------------------------
    -
    -  qh_premerge( apex, maxcentrum )
    -    pre-merge nonconvex facets in qh.newfacet_list for apex
    -    maxcentrum defines coplanar and concave (qh_test_appendmerge)
    -
    -  returns:
    -    deleted facets added to qh.visible_list with facet->visible set
    -
    -  notes:
    -    uses globals, qh.MERGEexact, qh.PREmerge
    -
    -  design:
    -    mark duplicate ridges in qh.newfacet_list
    -    merge facet cycles in qh.newfacet_list
    -    merge duplicate ridges and concave facets in qh.newfacet_list
    -    check merged facet cycles for degenerate and redundant facets
    -    merge degenerate and redundant facets
    -    collect coplanar and concave facets
    -    merge concave, coplanar, degenerate, and redundant facets
    -*/
    -void qh_premerge(vertexT *apex, realT maxcentrum, realT maxangle) {
    -  boolT othermerge= False;
    -  facetT *newfacet;
    -
    -  if (qh ZEROcentrum && qh_checkzero(!qh_ALL))
    -    return;
    -  trace2((qh ferr, 2008, "qh_premerge: premerge centrum %2.2g angle %2.2g for apex v%d facetlist f%d\n",
    -            maxcentrum, maxangle, apex->id, getid_(qh newfacet_list)));
    -  if (qh IStracing >= 4 && qh num_facets < 50)
    -    qh_printlists();
    -  qh centrum_radius= maxcentrum;
    -  qh cos_max= maxangle;
    -  qh degen_mergeset= qh_settemp(qh TEMPsize);
    -  qh facet_mergeset= qh_settemp(qh TEMPsize);
    -  if (qh hull_dim >=3) {
    -    qh_mark_dupridges(qh newfacet_list); /* facet_mergeset */
    -    qh_mergecycle_all(qh newfacet_list, &othermerge);
    -    qh_forcedmerges(&othermerge /* qh facet_mergeset */);
    -    FORALLnew_facets {  /* test samecycle merges */
    -      if (!newfacet->simplicial && !newfacet->mergeridge)
    -        qh_degen_redundant_neighbors(newfacet, NULL);
    -    }
    -    if (qh_merge_degenredundant())
    -      othermerge= True;
    -  }else /* qh hull_dim == 2 */
    -    qh_mergecycle_all(qh newfacet_list, &othermerge);
    -  qh_flippedmerges(qh newfacet_list, &othermerge);
    -  if (!qh MERGEexact || zzval_(Ztotmerge)) {
    -    zinc_(Zpremergetot);
    -    qh POSTmerging= False;
    -    qh_getmergeset_initial(qh newfacet_list);
    -    qh_all_merges(othermerge, False);
    -  }
    -  qh_settempfree(&qh facet_mergeset);
    -  qh_settempfree(&qh degen_mergeset);
    -} /* premerge */
    -
    -/*---------------------------------
    -
    -  qh_postmerge( reason, maxcentrum, maxangle, vneighbors )
    -    post-merge nonconvex facets as defined by maxcentrum and maxangle
    -    'reason' is for reporting progress
    -    if vneighbors,
    -      calls qh_test_vneighbors at end of qh_all_merge
    -    if firstmerge,
    -      calls qh_reducevertices before qh_getmergeset
    -
    -  returns:
    -    if first call (qh.visible_list != qh.facet_list),
    -      builds qh.facet_newlist, qh.newvertex_list
    -    deleted facets added to qh.visible_list with facet->visible
    -    qh.visible_list == qh.facet_list
    -
    -  notes:
    -
    -
    -  design:
    -    if first call
    -      set qh.visible_list and qh.newfacet_list to qh.facet_list
    -      add all facets to qh.newfacet_list
    -      mark non-simplicial facets, facet->newmerge
    -      set qh.newvertext_list to qh.vertex_list
    -      add all vertices to qh.newvertex_list
    -      if a pre-merge occured
    -        set vertex->delridge {will retest the ridge}
    -        if qh.MERGEexact
    -          call qh_reducevertices()
    -      if no pre-merging
    -        merge flipped facets
    -    determine non-convex facets
    -    merge all non-convex facets
    -*/
    -void qh_postmerge(const char *reason, realT maxcentrum, realT maxangle,
    -                      boolT vneighbors) {
    -  facetT *newfacet;
    -  boolT othermerges= False;
    -  vertexT *vertex;
    -
    -  if (qh REPORTfreq || qh IStracing) {
    -    qh_buildtracing(NULL, NULL);
    -    qh_printsummary(qh ferr);
    -    if (qh PRINTstatistics)
    -      qh_printallstatistics(qh ferr, "reason");
    -    qh_fprintf(qh ferr, 8062, "\n%s with 'C%.2g' and 'A%.2g'\n",
    -        reason, maxcentrum, maxangle);
    -  }
    -  trace2((qh ferr, 2009, "qh_postmerge: postmerge.  test vneighbors? %d\n",
    -            vneighbors));
    -  qh centrum_radius= maxcentrum;
    -  qh cos_max= maxangle;
    -  qh POSTmerging= True;
    -  qh degen_mergeset= qh_settemp(qh TEMPsize);
    -  qh facet_mergeset= qh_settemp(qh TEMPsize);
    -  if (qh visible_list != qh facet_list) {  /* first call */
    -    qh NEWfacets= True;
    -    qh visible_list= qh newfacet_list= qh facet_list;
    -    FORALLnew_facets {
    -      newfacet->newfacet= True;
    -       if (!newfacet->simplicial)
    -        newfacet->newmerge= True;
    -     zinc_(Zpostfacets);
    -    }
    -    qh newvertex_list= qh vertex_list;
    -    FORALLvertices
    -      vertex->newlist= True;
    -    if (qh VERTEXneighbors) { /* a merge has occurred */
    -      FORALLvertices
    -        vertex->delridge= True; /* test for redundant, needed? */
    -      if (qh MERGEexact) {
    -        if (qh hull_dim <= qh_DIMreduceBuild)
    -          qh_reducevertices(); /* was skipped during pre-merging */
    -      }
    -    }
    -    if (!qh PREmerge && !qh MERGEexact)
    -      qh_flippedmerges(qh newfacet_list, &othermerges);
    -  }
    -  qh_getmergeset_initial(qh newfacet_list);
    -  qh_all_merges(False, vneighbors);
    -  qh_settempfree(&qh facet_mergeset);
    -  qh_settempfree(&qh degen_mergeset);
    -} /* post_merge */
    -
    -/*---------------------------------
    -
    -  qh_all_merges( othermerge, vneighbors )
    -    merge all non-convex facets
    -
    -    set othermerge if already merged facets (for qh_reducevertices)
    -    if vneighbors
    -      tests vertex neighbors for convexity at end
    -    qh.facet_mergeset lists the non-convex ridges in qh_newfacet_list
    -    qh.degen_mergeset is defined
    -    if qh.MERGEexact && !qh.POSTmerging,
    -      does not merge coplanar facets
    -
    -  returns:
    -    deleted facets added to qh.visible_list with facet->visible
    -    deleted vertices added qh.delvertex_list with vertex->delvertex
    -
    -  notes:
    -    unless !qh.MERGEindependent,
    -      merges facets in independent sets
    -    uses qh.newfacet_list as argument since merges call qh_removefacet()
    -
    -  design:
    -    while merges occur
    -      for each merge in qh.facet_mergeset
    -        unless one of the facets was already merged in this pass
    -          merge the facets
    -        test merged facets for additional merges
    -        add merges to qh.facet_mergeset
    -      if vertices record neighboring facets
    -        rename redundant vertices
    -          update qh.facet_mergeset
    -    if vneighbors ??
    -      tests vertex neighbors for convexity at end
    -*/
    -void qh_all_merges(boolT othermerge, boolT vneighbors) {
    -  facetT *facet1, *facet2;
    -  mergeT *merge;
    -  boolT wasmerge= True, isreduce;
    -  void **freelistp;  /* used !qh_NOmem */
    -  vertexT *vertex;
    -  mergeType mergetype;
    -  int numcoplanar=0, numconcave=0, numdegenredun= 0, numnewmerges= 0;
    -
    -  trace2((qh ferr, 2010, "qh_all_merges: starting to merge facets beginning from f%d\n",
    -            getid_(qh newfacet_list)));
    -  while (True) {
    -    wasmerge= False;
    -    while (qh_setsize(qh facet_mergeset)) {
    -      while ((merge= (mergeT*)qh_setdellast(qh facet_mergeset))) {
    -        facet1= merge->facet1;
    -        facet2= merge->facet2;
    -        mergetype= merge->type;
    -        qh_memfree_(merge, (int)sizeof(mergeT), freelistp);
    -        if (facet1->visible || facet2->visible) /*deleted facet*/
    -          continue;
    -        if ((facet1->newfacet && !facet1->tested)
    -                || (facet2->newfacet && !facet2->tested)) {
    -          if (qh MERGEindependent && mergetype <= MRGanglecoplanar)
    -            continue;      /* perform independent sets of merges */
    -        }
    -        qh_merge_nonconvex(facet1, facet2, mergetype);
    -        numdegenredun += qh_merge_degenredundant();
    -        numnewmerges++;
    -        wasmerge= True;
    -        if (mergetype == MRGconcave)
    -          numconcave++;
    -        else /* MRGcoplanar or MRGanglecoplanar */
    -          numcoplanar++;
    -      } /* while setdellast */
    -      if (qh POSTmerging && qh hull_dim <= qh_DIMreduceBuild
    -      && numnewmerges > qh_MAXnewmerges) {
    -        numnewmerges= 0;
    -        qh_reducevertices();  /* otherwise large post merges too slow */
    -      }
    -      qh_getmergeset(qh newfacet_list); /* facet_mergeset */
    -    } /* while mergeset */
    -    if (qh VERTEXneighbors) {
    -      isreduce= False;
    -      if (qh hull_dim >=4 && qh POSTmerging) {
    -        FORALLvertices
    -          vertex->delridge= True;
    -        isreduce= True;
    -      }
    -      if ((wasmerge || othermerge) && (!qh MERGEexact || qh POSTmerging)
    -          && qh hull_dim <= qh_DIMreduceBuild) {
    -        othermerge= False;
    -        isreduce= True;
    -      }
    -      if (isreduce) {
    -        if (qh_reducevertices()) {
    -          qh_getmergeset(qh newfacet_list); /* facet_mergeset */
    -          continue;
    -        }
    -      }
    -    }
    -    if (vneighbors && qh_test_vneighbors(/* qh newfacet_list */))
    -      continue;
    -    break;
    -  } /* while (True) */
    -  if (qh CHECKfrequently && !qh MERGEexact) {
    -    qh old_randomdist= qh RANDOMdist;
    -    qh RANDOMdist= False;
    -    qh_checkconvex(qh newfacet_list, qh_ALGORITHMfault);
    -    /* qh_checkconnect(); [this is slow and it changes the facet order] */
    -    qh RANDOMdist= qh old_randomdist;
    -  }
    -  trace1((qh ferr, 1009, "qh_all_merges: merged %d coplanar facets %d concave facets and %d degen or redundant facets.\n",
    -    numcoplanar, numconcave, numdegenredun));
    -  if (qh IStracing >= 4 && qh num_facets < 50)
    -    qh_printlists();
    -} /* all_merges */
    -
    -
    -/*---------------------------------
    -
    -  qh_appendmergeset( facet, neighbor, mergetype, angle )
    -    appends an entry to qh.facet_mergeset or qh.degen_mergeset
    -
    -    angle ignored if NULL or !qh.ANGLEmerge
    -
    -  returns:
    -    merge appended to facet_mergeset or degen_mergeset
    -      sets ->degenerate or ->redundant if degen_mergeset
    -
    -  see:
    -    qh_test_appendmerge()
    -
    -  design:
    -    allocate merge entry
    -    if regular merge
    -      append to qh.facet_mergeset
    -    else if degenerate merge and qh.facet_mergeset is all degenerate
    -      append to qh.degen_mergeset
    -    else if degenerate merge
    -      prepend to qh.degen_mergeset
    -    else if redundant merge
    -      append to qh.degen_mergeset
    -*/
    -void qh_appendmergeset(facetT *facet, facetT *neighbor, mergeType mergetype, realT *angle) {
    -  mergeT *merge, *lastmerge;
    -  void **freelistp; /* used !qh_NOmem */
    -
    -  if (facet->redundant)
    -    return;
    -  if (facet->degenerate && mergetype == MRGdegen)
    -    return;
    -  qh_memalloc_((int)sizeof(mergeT), freelistp, merge, mergeT);
    -  merge->facet1= facet;
    -  merge->facet2= neighbor;
    -  merge->type= mergetype;
    -  if (angle && qh ANGLEmerge)
    -    merge->angle= *angle;
    -  if (mergetype < MRGdegen)
    -    qh_setappend(&(qh facet_mergeset), merge);
    -  else if (mergetype == MRGdegen) {
    -    facet->degenerate= True;
    -    if (!(lastmerge= (mergeT*)qh_setlast(qh degen_mergeset))
    -    || lastmerge->type == MRGdegen)
    -      qh_setappend(&(qh degen_mergeset), merge);
    -    else
    -      qh_setaddnth(&(qh degen_mergeset), 0, merge);
    -  }else if (mergetype == MRGredundant) {
    -    facet->redundant= True;
    -    qh_setappend(&(qh degen_mergeset), merge);
    -  }else /* mergetype == MRGmirror */ {
    -    if (facet->redundant || neighbor->redundant) {
    -      qh_fprintf(qh ferr, 6092, "qhull error (qh_appendmergeset): facet f%d or f%d is already a mirrored facet\n",
    -           facet->id, neighbor->id);
    -      qh_errexit2 (qh_ERRqhull, facet, neighbor);
    -    }
    -    if (!qh_setequal(facet->vertices, neighbor->vertices)) {
    -      qh_fprintf(qh ferr, 6093, "qhull error (qh_appendmergeset): mirrored facets f%d and f%d do not have the same vertices\n",
    -           facet->id, neighbor->id);
    -      qh_errexit2 (qh_ERRqhull, facet, neighbor);
    -    }
    -    facet->redundant= True;
    -    neighbor->redundant= True;
    -    qh_setappend(&(qh degen_mergeset), merge);
    -  }
    -} /* appendmergeset */
    -
    -
    -/*---------------------------------
    -
    -  qh_basevertices( samecycle )
    -    return temporary set of base vertices for samecycle
    -    samecycle is first facet in the cycle
    -    assumes apex is SETfirst_( samecycle->vertices )
    -
    -  returns:
    -    vertices(settemp)
    -    all ->seen are cleared
    -
    -  notes:
    -    uses qh_vertex_visit;
    -
    -  design:
    -    for each facet in samecycle
    -      for each unseen vertex in facet->vertices
    -        append to result
    -*/
    -setT *qh_basevertices(facetT *samecycle) {
    -  facetT *same;
    -  vertexT *apex, *vertex, **vertexp;
    -  setT *vertices= qh_settemp(qh TEMPsize);
    -
    -  apex= SETfirstt_(samecycle->vertices, vertexT);
    -  apex->visitid= ++qh vertex_visit;
    -  FORALLsame_cycle_(samecycle) {
    -    if (same->mergeridge)
    -      continue;
    -    FOREACHvertex_(same->vertices) {
    -      if (vertex->visitid != qh vertex_visit) {
    -        qh_setappend(&vertices, vertex);
    -        vertex->visitid= qh vertex_visit;
    -        vertex->seen= False;
    -      }
    -    }
    -  }
    -  trace4((qh ferr, 4019, "qh_basevertices: found %d vertices\n",
    -         qh_setsize(vertices)));
    -  return vertices;
    -} /* basevertices */
    -
    -/*---------------------------------
    -
    -  qh_checkconnect()
    -    check that new facets are connected
    -    new facets are on qh.newfacet_list
    -
    -  notes:
    -    this is slow and it changes the order of the facets
    -    uses qh.visit_id
    -
    -  design:
    -    move first new facet to end of qh.facet_list
    -    for all newly appended facets
    -      append unvisited neighbors to end of qh.facet_list
    -    for all new facets
    -      report error if unvisited
    -*/
    -void qh_checkconnect(void /* qh newfacet_list */) {
    -  facetT *facet, *newfacet, *errfacet= NULL, *neighbor, **neighborp;
    -
    -  facet= qh newfacet_list;
    -  qh_removefacet(facet);
    -  qh_appendfacet(facet);
    -  facet->visitid= ++qh visit_id;
    -  FORALLfacet_(facet) {
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->visitid != qh visit_id) {
    -        qh_removefacet(neighbor);
    -        qh_appendfacet(neighbor);
    -        neighbor->visitid= qh visit_id;
    -      }
    -    }
    -  }
    -  FORALLnew_facets {
    -    if (newfacet->visitid == qh visit_id)
    -      break;
    -    qh_fprintf(qh ferr, 6094, "qhull error: f%d is not attached to the new facets\n",
    -         newfacet->id);
    -    errfacet= newfacet;
    -  }
    -  if (errfacet)
    -    qh_errexit(qh_ERRqhull, errfacet, NULL);
    -} /* checkconnect */
    -
    -/*---------------------------------
    -
    -  qh_checkzero( testall )
    -    check that facets are clearly convex for qh.DISTround with qh.MERGEexact
    -
    -    if testall,
    -      test all facets for qh.MERGEexact post-merging
    -    else
    -      test qh.newfacet_list
    -
    -    if qh.MERGEexact,
    -      allows coplanar ridges
    -      skips convexity test while qh.ZEROall_ok
    -
    -  returns:
    -    True if all facets !flipped, !dupridge, normal
    -         if all horizon facets are simplicial
    -         if all vertices are clearly below neighbor
    -         if all opposite vertices of horizon are below
    -    clears qh.ZEROall_ok if any problems or coplanar facets
    -
    -  notes:
    -    uses qh.vertex_visit
    -    horizon facets may define multiple new facets
    -
    -  design:
    -    for all facets in qh.newfacet_list or qh.facet_list
    -      check for flagged faults (flipped, etc.)
    -    for all facets in qh.newfacet_list or qh.facet_list
    -      for each neighbor of facet
    -        skip horizon facets for qh.newfacet_list
    -        test the opposite vertex
    -      if qh.newfacet_list
    -        test the other vertices in the facet's horizon facet
    -*/
    -boolT qh_checkzero(boolT testall) {
    -  facetT *facet, *neighbor, **neighborp;
    -  facetT *horizon, *facetlist;
    -  int neighbor_i;
    -  vertexT *vertex, **vertexp;
    -  realT dist;
    -
    -  if (testall)
    -    facetlist= qh facet_list;
    -  else {
    -    facetlist= qh newfacet_list;
    -    FORALLfacet_(facetlist) {
    -      horizon= SETfirstt_(facet->neighbors, facetT);
    -      if (!horizon->simplicial)
    -        goto LABELproblem;
    -      if (facet->flipped || facet->dupridge || !facet->normal)
    -        goto LABELproblem;
    -    }
    -    if (qh MERGEexact && qh ZEROall_ok) {
    -      trace2((qh ferr, 2011, "qh_checkzero: skip convexity check until first pre-merge\n"));
    -      return True;
    -    }
    -  }
    -  FORALLfacet_(facetlist) {
    -    qh vertex_visit++;
    -    neighbor_i= 0;
    -    horizon= NULL;
    -    FOREACHneighbor_(facet) {
    -      if (!neighbor_i && !testall) {
    -        horizon= neighbor;
    -        neighbor_i++;
    -        continue; /* horizon facet tested in qh_findhorizon */
    -      }
    -      vertex= SETelemt_(facet->vertices, neighbor_i++, vertexT);
    -      vertex->visitid= qh vertex_visit;
    -      zzinc_(Zdistzero);
    -      qh_distplane(vertex->point, neighbor, &dist);
    -      if (dist >= -qh DISTround) {
    -        qh ZEROall_ok= False;
    -        if (!qh MERGEexact || testall || dist > qh DISTround)
    -          goto LABELnonconvex;
    -      }
    -    }
    -    if (!testall) {
    -      FOREACHvertex_(horizon->vertices) {
    -        if (vertex->visitid != qh vertex_visit) {
    -          zzinc_(Zdistzero);
    -          qh_distplane(vertex->point, facet, &dist);
    -          if (dist >= -qh DISTround) {
    -            qh ZEROall_ok= False;
    -            if (!qh MERGEexact || dist > qh DISTround)
    -              goto LABELnonconvex;
    -          }
    -          break;
    -        }
    -      }
    -    }
    -  }
    -  trace2((qh ferr, 2012, "qh_checkzero: testall %d, facets are %s\n", testall,
    -        (qh MERGEexact && !testall) ?
    -           "not concave, flipped, or duplicate ridged" : "clearly convex"));
    -  return True;
    -
    - LABELproblem:
    -  qh ZEROall_ok= False;
    -  trace2((qh ferr, 2013, "qh_checkzero: facet f%d needs pre-merging\n",
    -       facet->id));
    -  return False;
    -
    - LABELnonconvex:
    -  trace2((qh ferr, 2014, "qh_checkzero: facet f%d and f%d are not clearly convex.  v%d dist %.2g\n",
    -         facet->id, neighbor->id, vertex->id, dist));
    -  return False;
    -} /* checkzero */
    -
    -/*---------------------------------
    -
    -  qh_compareangle( angle1, angle2 )
    -    used by qsort() to order merges by angle
    -*/
    -int qh_compareangle(const void *p1, const void *p2) {
    -  const mergeT *a= *((mergeT *const*)p1), *b= *((mergeT *const*)p2);
    -
    -  return((a->angle > b->angle) ? 1 : -1);
    -} /* compareangle */
    -
    -/*---------------------------------
    -
    -  qh_comparemerge( merge1, merge2 )
    -    used by qsort() to order merges
    -*/
    -int qh_comparemerge(const void *p1, const void *p2) {
    -  const mergeT *a= *((mergeT *const*)p1), *b= *((mergeT *const*)p2);
    -
    -  return(a->type - b->type);
    -} /* comparemerge */
    -
    -/*---------------------------------
    -
    -  qh_comparevisit( vertex1, vertex2 )
    -    used by qsort() to order vertices by their visitid
    -*/
    -int qh_comparevisit(const void *p1, const void *p2) {
    -  const vertexT *a= *((vertexT *const*)p1), *b= *((vertexT *const*)p2);
    -
    -  return(a->visitid - b->visitid);
    -} /* comparevisit */
    -
    -/*---------------------------------
    -
    -  qh_copynonconvex( atridge )
    -    set non-convex flag on other ridges (if any) between same neighbors
    -
    -  notes:
    -    may be faster if use smaller ridge set
    -
    -  design:
    -    for each ridge of atridge's top facet
    -      if ridge shares the same neighbor
    -        set nonconvex flag
    -*/
    -void qh_copynonconvex(ridgeT *atridge) {
    -  facetT *facet, *otherfacet;
    -  ridgeT *ridge, **ridgep;
    -
    -  facet= atridge->top;
    -  otherfacet= atridge->bottom;
    -  FOREACHridge_(facet->ridges) {
    -    if (otherfacet == otherfacet_(ridge, facet) && ridge != atridge) {
    -      ridge->nonconvex= True;
    -      trace4((qh ferr, 4020, "qh_copynonconvex: moved nonconvex flag from r%d to r%d\n",
    -              atridge->id, ridge->id));
    -      break;
    -    }
    -  }
    -} /* copynonconvex */
    -
    -/*---------------------------------
    -
    -  qh_degen_redundant_facet( facet )
    -    check facet for degen. or redundancy
    -
    -  notes:
    -    bumps vertex_visit
    -    called if a facet was redundant but no longer is (qh_merge_degenredundant)
    -    qh_appendmergeset() only appends first reference to facet (i.e., redundant)
    -
    -  see:
    -    qh_degen_redundant_neighbors()
    -
    -  design:
    -    test for redundant neighbor
    -    test for degenerate facet
    -*/
    -void qh_degen_redundant_facet(facetT *facet) {
    -  vertexT *vertex, **vertexp;
    -  facetT *neighbor, **neighborp;
    -
    -  trace4((qh ferr, 4021, "qh_degen_redundant_facet: test facet f%d for degen/redundant\n",
    -          facet->id));
    -  FOREACHneighbor_(facet) {
    -    qh vertex_visit++;
    -    FOREACHvertex_(neighbor->vertices)
    -      vertex->visitid= qh vertex_visit;
    -    FOREACHvertex_(facet->vertices) {
    -      if (vertex->visitid != qh vertex_visit)
    -        break;
    -    }
    -    if (!vertex) {
    -      qh_appendmergeset(facet, neighbor, MRGredundant, NULL);
    -      trace2((qh ferr, 2015, "qh_degen_redundant_facet: f%d is contained in f%d.  merge\n", facet->id, neighbor->id));
    -      return;
    -    }
    -  }
    -  if (qh_setsize(facet->neighbors) < qh hull_dim) {
    -    qh_appendmergeset(facet, facet, MRGdegen, NULL);
    -    trace2((qh ferr, 2016, "qh_degen_redundant_neighbors: f%d is degenerate.\n", facet->id));
    -  }
    -} /* degen_redundant_facet */
    -
    -
    -/*---------------------------------
    -
    -  qh_degen_redundant_neighbors( facet, delfacet,  )
    -    append degenerate and redundant neighbors to facet_mergeset
    -    if delfacet,
    -      only checks neighbors of both delfacet and facet
    -    also checks current facet for degeneracy
    -
    -  notes:
    -    bumps vertex_visit
    -    called for each qh_mergefacet() and qh_mergecycle()
    -    merge and statistics occur in merge_nonconvex
    -    qh_appendmergeset() only appends first reference to facet (i.e., redundant)
    -      it appends redundant facets after degenerate ones
    -
    -    a degenerate facet has fewer than hull_dim neighbors
    -    a redundant facet's vertices is a subset of its neighbor's vertices
    -    tests for redundant merges first (appendmergeset is nop for others)
    -    in a merge, only needs to test neighbors of merged facet
    -
    -  see:
    -    qh_merge_degenredundant() and qh_degen_redundant_facet()
    -
    -  design:
    -    test for degenerate facet
    -    test for redundant neighbor
    -    test for degenerate neighbor
    -*/
    -void qh_degen_redundant_neighbors(facetT *facet, facetT *delfacet) {
    -  vertexT *vertex, **vertexp;
    -  facetT *neighbor, **neighborp;
    -  int size;
    -
    -  trace4((qh ferr, 4022, "qh_degen_redundant_neighbors: test neighbors of f%d with delfacet f%d\n",
    -          facet->id, getid_(delfacet)));
    -  if ((size= qh_setsize(facet->neighbors)) < qh hull_dim) {
    -    qh_appendmergeset(facet, facet, MRGdegen, NULL);
    -    trace2((qh ferr, 2017, "qh_degen_redundant_neighbors: f%d is degenerate with %d neighbors.\n", facet->id, size));
    -  }
    -  if (!delfacet)
    -    delfacet= facet;
    -  qh vertex_visit++;
    -  FOREACHvertex_(facet->vertices)
    -    vertex->visitid= qh vertex_visit;
    -  FOREACHneighbor_(delfacet) {
    -    /* uses early out instead of checking vertex count */
    -    if (neighbor == facet)
    -      continue;
    -    FOREACHvertex_(neighbor->vertices) {
    -      if (vertex->visitid != qh vertex_visit)
    -        break;
    -    }
    -    if (!vertex) {
    -      qh_appendmergeset(neighbor, facet, MRGredundant, NULL);
    -      trace2((qh ferr, 2018, "qh_degen_redundant_neighbors: f%d is contained in f%d.  merge\n", neighbor->id, facet->id));
    -    }
    -  }
    -  FOREACHneighbor_(delfacet) {   /* redundant merges occur first */
    -    if (neighbor == facet)
    -      continue;
    -    if ((size= qh_setsize(neighbor->neighbors)) < qh hull_dim) {
    -      qh_appendmergeset(neighbor, neighbor, MRGdegen, NULL);
    -      trace2((qh ferr, 2019, "qh_degen_redundant_neighbors: f%d is degenerate with %d neighbors.  Neighbor of f%d.\n", neighbor->id, size, facet->id));
    -    }
    -  }
    -} /* degen_redundant_neighbors */
    -
    -
    -/*---------------------------------
    -
    -  qh_find_newvertex( oldvertex, vertices, ridges )
    -    locate new vertex for renaming old vertex
    -    vertices is a set of possible new vertices
    -      vertices sorted by number of deleted ridges
    -
    -  returns:
    -    newvertex or NULL
    -      each ridge includes both vertex and oldvertex
    -    vertices sorted by number of deleted ridges
    -
    -  notes:
    -    modifies vertex->visitid
    -    new vertex is in one of the ridges
    -    renaming will not cause a duplicate ridge
    -    renaming will minimize the number of deleted ridges
    -    newvertex may not be adjacent in the dual (though unlikely)
    -
    -  design:
    -    for each vertex in vertices
    -      set vertex->visitid to number of references in ridges
    -    remove unvisited vertices
    -    set qh.vertex_visit above all possible values
    -    sort vertices by number of references in ridges
    -    add each ridge to qh.hash_table
    -    for each vertex in vertices
    -      look for a vertex that would not cause a duplicate ridge after a rename
    -*/
    -vertexT *qh_find_newvertex(vertexT *oldvertex, setT *vertices, setT *ridges) {
    -  vertexT *vertex, **vertexp;
    -  setT *newridges;
    -  ridgeT *ridge, **ridgep;
    -  int size, hashsize;
    -  int hash;
    -
    -#ifndef qh_NOtrace
    -  if (qh IStracing >= 4) {
    -    qh_fprintf(qh ferr, 8063, "qh_find_newvertex: find new vertex for v%d from ",
    -             oldvertex->id);
    -    FOREACHvertex_(vertices)
    -      qh_fprintf(qh ferr, 8064, "v%d ", vertex->id);
    -    FOREACHridge_(ridges)
    -      qh_fprintf(qh ferr, 8065, "r%d ", ridge->id);
    -    qh_fprintf(qh ferr, 8066, "\n");
    -  }
    -#endif
    -  FOREACHvertex_(vertices)
    -    vertex->visitid= 0;
    -  FOREACHridge_(ridges) {
    -    FOREACHvertex_(ridge->vertices)
    -      vertex->visitid++;
    -  }
    -  FOREACHvertex_(vertices) {
    -    if (!vertex->visitid) {
    -      qh_setdelnth(vertices, SETindex_(vertices,vertex));
    -      vertexp--; /* repeat since deleted this vertex */
    -    }
    -  }
    -  qh vertex_visit += (unsigned int)qh_setsize(ridges);
    -  if (!qh_setsize(vertices)) {
    -    trace4((qh ferr, 4023, "qh_find_newvertex: vertices not in ridges for v%d\n",
    -            oldvertex->id));
    -    return NULL;
    -  }
    -  qsort(SETaddr_(vertices, vertexT), (size_t)qh_setsize(vertices),
    -                sizeof(vertexT *), qh_comparevisit);
    -  /* can now use qh vertex_visit */
    -  if (qh PRINTstatistics) {
    -    size= qh_setsize(vertices);
    -    zinc_(Zintersect);
    -    zadd_(Zintersecttot, size);
    -    zmax_(Zintersectmax, size);
    -  }
    -  hashsize= qh_newhashtable(qh_setsize(ridges));
    -  FOREACHridge_(ridges)
    -    qh_hashridge(qh hash_table, hashsize, ridge, oldvertex);
    -  FOREACHvertex_(vertices) {
    -    newridges= qh_vertexridges(vertex);
    -    FOREACHridge_(newridges) {
    -      if (qh_hashridge_find(qh hash_table, hashsize, ridge, vertex, oldvertex, &hash)) {
    -        zinc_(Zdupridge);
    -        break;
    -      }
    -    }
    -    qh_settempfree(&newridges);
    -    if (!ridge)
    -      break;  /* found a rename */
    -  }
    -  if (vertex) {
    -    /* counted in qh_renamevertex */
    -    trace2((qh ferr, 2020, "qh_find_newvertex: found v%d for old v%d from %d vertices and %d ridges.\n",
    -      vertex->id, oldvertex->id, qh_setsize(vertices), qh_setsize(ridges)));
    -  }else {
    -    zinc_(Zfindfail);
    -    trace0((qh ferr, 14, "qh_find_newvertex: no vertex for renaming v%d(all duplicated ridges) during p%d\n",
    -      oldvertex->id, qh furthest_id));
    -  }
    -  qh_setfree(&qh hash_table);
    -  return vertex;
    -} /* find_newvertex */
    -
    -/*---------------------------------
    -
    -  qh_findbest_test( testcentrum, facet, neighbor, bestfacet, dist, mindist, maxdist )
    -    test neighbor of facet for qh_findbestneighbor()
    -    if testcentrum,
    -      tests centrum (assumes it is defined)
    -    else
    -      tests vertices
    -
    -  returns:
    -    if a better facet (i.e., vertices/centrum of facet closer to neighbor)
    -      updates bestfacet, dist, mindist, and maxdist
    -*/
    -void qh_findbest_test(boolT testcentrum, facetT *facet, facetT *neighbor,
    -      facetT **bestfacet, realT *distp, realT *mindistp, realT *maxdistp) {
    -  realT dist, mindist, maxdist;
    -
    -  if (testcentrum) {
    -    zzinc_(Zbestdist);
    -    qh_distplane(facet->center, neighbor, &dist);
    -    dist *= qh hull_dim; /* estimate furthest vertex */
    -    if (dist < 0) {
    -      maxdist= 0;
    -      mindist= dist;
    -      dist= -dist;
    -    }else {
    -      mindist= 0;
    -      maxdist= dist;
    -    }
    -  }else
    -    dist= qh_getdistance(facet, neighbor, &mindist, &maxdist);
    -  if (dist < *distp) {
    -    *bestfacet= neighbor;
    -    *mindistp= mindist;
    -    *maxdistp= maxdist;
    -    *distp= dist;
    -  }
    -} /* findbest_test */
    -
    -/*---------------------------------
    -
    -  qh_findbestneighbor( facet, dist, mindist, maxdist )
    -    finds best neighbor (least dist) of a facet for merging
    -
    -  returns:
    -    returns min and max distances and their max absolute value
    -
    -  notes:
    -    avoids merging old into new
    -    assumes ridge->nonconvex only set on one ridge between a pair of facets
    -    could use an early out predicate but not worth it
    -
    -  design:
    -    if a large facet
    -      will test centrum
    -    else
    -      will test vertices
    -    if a large facet
    -      test nonconvex neighbors for best merge
    -    else
    -      test all neighbors for the best merge
    -    if testing centrum
    -      get distance information
    -*/
    -facetT *qh_findbestneighbor(facetT *facet, realT *distp, realT *mindistp, realT *maxdistp) {
    -  facetT *neighbor, **neighborp, *bestfacet= NULL;
    -  ridgeT *ridge, **ridgep;
    -  boolT nonconvex= True, testcentrum= False;
    -  int size= qh_setsize(facet->vertices);
    -
    -  *distp= REALmax;
    -  if (size > qh_BESTcentrum2 * qh hull_dim + qh_BESTcentrum) {
    -    testcentrum= True;
    -    zinc_(Zbestcentrum);
    -    if (!facet->center)
    -       facet->center= qh_getcentrum(facet);
    -  }
    -  if (size > qh hull_dim + qh_BESTnonconvex) {
    -    FOREACHridge_(facet->ridges) {
    -      if (ridge->nonconvex) {
    -        neighbor= otherfacet_(ridge, facet);
    -        qh_findbest_test(testcentrum, facet, neighbor,
    -                          &bestfacet, distp, mindistp, maxdistp);
    -      }
    -    }
    -  }
    -  if (!bestfacet) {
    -    nonconvex= False;
    -    FOREACHneighbor_(facet)
    -      qh_findbest_test(testcentrum, facet, neighbor,
    -                        &bestfacet, distp, mindistp, maxdistp);
    -  }
    -  if (!bestfacet) {
    -    qh_fprintf(qh ferr, 6095, "qhull internal error (qh_findbestneighbor): no neighbors for f%d\n", facet->id);
    -
    -    qh_errexit(qh_ERRqhull, facet, NULL);
    -  }
    -  if (testcentrum)
    -    qh_getdistance(facet, bestfacet, mindistp, maxdistp);
    -  trace3((qh ferr, 3002, "qh_findbestneighbor: f%d is best neighbor for f%d testcentrum? %d nonconvex? %d dist %2.2g min %2.2g max %2.2g\n",
    -     bestfacet->id, facet->id, testcentrum, nonconvex, *distp, *mindistp, *maxdistp));
    -  return(bestfacet);
    -} /* findbestneighbor */
    -
    -
    -/*---------------------------------
    -
    -  qh_flippedmerges( facetlist, wasmerge )
    -    merge flipped facets into best neighbor
    -    assumes qh.facet_mergeset at top of temporary stack
    -
    -  returns:
    -    no flipped facets on facetlist
    -    sets wasmerge if merge occurred
    -    degen/redundant merges passed through
    -
    -  notes:
    -    othermerges not needed since qh.facet_mergeset is empty before & after
    -      keep it in case of change
    -
    -  design:
    -    append flipped facets to qh.facetmergeset
    -    for each flipped merge
    -      find best neighbor
    -      merge facet into neighbor
    -      merge degenerate and redundant facets
    -    remove flipped merges from qh.facet_mergeset
    -*/
    -void qh_flippedmerges(facetT *facetlist, boolT *wasmerge) {
    -  facetT *facet, *neighbor, *facet1;
    -  realT dist, mindist, maxdist;
    -  mergeT *merge, **mergep;
    -  setT *othermerges;
    -  int nummerge=0;
    -
    -  trace4((qh ferr, 4024, "qh_flippedmerges: begin\n"));
    -  FORALLfacet_(facetlist) {
    -    if (facet->flipped && !facet->visible)
    -      qh_appendmergeset(facet, facet, MRGflip, NULL);
    -  }
    -  othermerges= qh_settemppop(); /* was facet_mergeset */
    -  qh facet_mergeset= qh_settemp(qh TEMPsize);
    -  qh_settemppush(othermerges);
    -  FOREACHmerge_(othermerges) {
    -    facet1= merge->facet1;
    -    if (merge->type != MRGflip || facet1->visible)
    -      continue;
    -    if (qh TRACEmerge-1 == zzval_(Ztotmerge))
    -      qhmem.IStracing= qh IStracing= qh TRACElevel;
    -    neighbor= qh_findbestneighbor(facet1, &dist, &mindist, &maxdist);
    -    trace0((qh ferr, 15, "qh_flippedmerges: merge flipped f%d into f%d dist %2.2g during p%d\n",
    -      facet1->id, neighbor->id, dist, qh furthest_id));
    -    qh_mergefacet(facet1, neighbor, &mindist, &maxdist, !qh_MERGEapex);
    -    nummerge++;
    -    if (qh PRINTstatistics) {
    -      zinc_(Zflipped);
    -      wadd_(Wflippedtot, dist);
    -      wmax_(Wflippedmax, dist);
    -    }
    -    qh_merge_degenredundant();
    -  }
    -  FOREACHmerge_(othermerges) {
    -    if (merge->facet1->visible || merge->facet2->visible)
    -      qh_memfree(merge, (int)sizeof(mergeT));
    -    else
    -      qh_setappend(&qh facet_mergeset, merge);
    -  }
    -  qh_settempfree(&othermerges);
    -  if (nummerge)
    -    *wasmerge= True;
    -  trace1((qh ferr, 1010, "qh_flippedmerges: merged %d flipped facets into a good neighbor\n", nummerge));
    -} /* flippedmerges */
    -
    -
    -/*---------------------------------
    -
    -  qh_forcedmerges( wasmerge )
    -    merge duplicated ridges
    -
    -  returns:
    -    removes all duplicate ridges on facet_mergeset
    -    wasmerge set if merge
    -    qh.facet_mergeset may include non-forced merges(none for now)
    -    qh.degen_mergeset includes degen/redun merges
    -
    -  notes:
    -    duplicate ridges occur when the horizon is pinched,
    -        i.e. a subridge occurs in more than two horizon ridges.
    -     could rename vertices that pinch the horizon
    -    assumes qh_merge_degenredundant() has not be called
    -    othermerges isn't needed since facet_mergeset is empty afterwards
    -      keep it in case of change
    -
    -  design:
    -    for each duplicate ridge
    -      find current facets by chasing f.replace links
    -      determine best direction for facet
    -      merge one facet into the other
    -      remove duplicate ridges from qh.facet_mergeset
    -*/
    -void qh_forcedmerges(boolT *wasmerge) {
    -  facetT *facet1, *facet2;
    -  mergeT *merge, **mergep;
    -  realT dist1, dist2, mindist1, mindist2, maxdist1, maxdist2;
    -  setT *othermerges;
    -  int nummerge=0, numflip=0;
    -
    -  if (qh TRACEmerge-1 == zzval_(Ztotmerge))
    -    qhmem.IStracing= qh IStracing= qh TRACElevel;
    -  trace4((qh ferr, 4025, "qh_forcedmerges: begin\n"));
    -  othermerges= qh_settemppop(); /* was facet_mergeset */
    -  qh facet_mergeset= qh_settemp(qh TEMPsize);
    -  qh_settemppush(othermerges);
    -  FOREACHmerge_(othermerges) {
    -    if (merge->type != MRGridge)
    -        continue;
    -    facet1= merge->facet1;
    -    facet2= merge->facet2;
    -    while (facet1->visible)      /* must exist, no qh_merge_degenredunant */
    -      facet1= facet1->f.replace; /* previously merged facet */
    -    while (facet2->visible)
    -      facet2= facet2->f.replace; /* previously merged facet */
    -    if (facet1 == facet2)
    -      continue;
    -    if (!qh_setin(facet2->neighbors, facet1)) {
    -      qh_fprintf(qh ferr, 6096, "qhull internal error (qh_forcedmerges): f%d and f%d had a duplicate ridge but as f%d and f%d they are no longer neighbors\n",
    -               merge->facet1->id, merge->facet2->id, facet1->id, facet2->id);
    -      qh_errexit2 (qh_ERRqhull, facet1, facet2);
    -    }
    -    if (qh TRACEmerge-1 == zzval_(Ztotmerge))
    -      qhmem.IStracing= qh IStracing= qh TRACElevel;
    -    dist1= qh_getdistance(facet1, facet2, &mindist1, &maxdist1);
    -    dist2= qh_getdistance(facet2, facet1, &mindist2, &maxdist2);
    -    trace0((qh ferr, 16, "qh_forcedmerges: duplicate ridge between f%d and f%d, dist %2.2g and reverse dist %2.2g during p%d\n",
    -            facet1->id, facet2->id, dist1, dist2, qh furthest_id));
    -    if (dist1 < dist2)
    -      qh_mergefacet(facet1, facet2, &mindist1, &maxdist1, !qh_MERGEapex);
    -    else {
    -      qh_mergefacet(facet2, facet1, &mindist2, &maxdist2, !qh_MERGEapex);
    -      dist1= dist2;
    -      facet1= facet2;
    -    }
    -    if (facet1->flipped) {
    -      zinc_(Zmergeflipdup);
    -      numflip++;
    -    }else
    -      nummerge++;
    -    if (qh PRINTstatistics) {
    -      zinc_(Zduplicate);
    -      wadd_(Wduplicatetot, dist1);
    -      wmax_(Wduplicatemax, dist1);
    -    }
    -  }
    -  FOREACHmerge_(othermerges) {
    -    if (merge->type == MRGridge)
    -      qh_memfree(merge, (int)sizeof(mergeT));
    -    else
    -      qh_setappend(&qh facet_mergeset, merge);
    -  }
    -  qh_settempfree(&othermerges);
    -  if (nummerge)
    -    *wasmerge= True;
    -  trace1((qh ferr, 1011, "qh_forcedmerges: merged %d facets and %d flipped facets across duplicated ridges\n",
    -                nummerge, numflip));
    -} /* forcedmerges */
    -
    -
    -/*---------------------------------
    -
    -  qh_getmergeset( facetlist )
    -    determines nonconvex facets on facetlist
    -    tests !tested ridges and nonconvex ridges of !tested facets
    -
    -  returns:
    -    returns sorted qh.facet_mergeset of facet-neighbor pairs to be merged
    -    all ridges tested
    -
    -  notes:
    -    assumes no nonconvex ridges with both facets tested
    -    uses facet->tested/ridge->tested to prevent duplicate tests
    -    can not limit tests to modified ridges since the centrum changed
    -    uses qh.visit_id
    -
    -  see:
    -    qh_getmergeset_initial()
    -
    -  design:
    -    for each facet on facetlist
    -      for each ridge of facet
    -        if untested ridge
    -          test ridge for convexity
    -          if non-convex
    -            append ridge to qh.facet_mergeset
    -    sort qh.facet_mergeset by angle
    -*/
    -void qh_getmergeset(facetT *facetlist) {
    -  facetT *facet, *neighbor, **neighborp;
    -  ridgeT *ridge, **ridgep;
    -  int nummerges;
    -
    -  nummerges= qh_setsize(qh facet_mergeset);
    -  trace4((qh ferr, 4026, "qh_getmergeset: started.\n"));
    -  qh visit_id++;
    -  FORALLfacet_(facetlist) {
    -    if (facet->tested)
    -      continue;
    -    facet->visitid= qh visit_id;
    -    facet->tested= True;  /* must be non-simplicial due to merge */
    -    FOREACHneighbor_(facet)
    -      neighbor->seen= False;
    -    FOREACHridge_(facet->ridges) {
    -      if (ridge->tested && !ridge->nonconvex)
    -        continue;
    -      /* if tested & nonconvex, need to append merge */
    -      neighbor= otherfacet_(ridge, facet);
    -      if (neighbor->seen) {
    -        ridge->tested= True;
    -        ridge->nonconvex= False;
    -      }else if (neighbor->visitid != qh visit_id) {
    -        ridge->tested= True;
    -        ridge->nonconvex= False;
    -        neighbor->seen= True;      /* only one ridge is marked nonconvex */
    -        if (qh_test_appendmerge(facet, neighbor))
    -          ridge->nonconvex= True;
    -      }
    -    }
    -  }
    -  nummerges= qh_setsize(qh facet_mergeset);
    -  if (qh ANGLEmerge)
    -    qsort(SETaddr_(qh facet_mergeset, mergeT), (size_t)nummerges, sizeof(mergeT *), qh_compareangle);
    -  else
    -    qsort(SETaddr_(qh facet_mergeset, mergeT), (size_t)nummerges, sizeof(mergeT *), qh_comparemerge);
    -  if (qh POSTmerging) {
    -    zadd_(Zmergesettot2, nummerges);
    -  }else {
    -    zadd_(Zmergesettot, nummerges);
    -    zmax_(Zmergesetmax, nummerges);
    -  }
    -  trace2((qh ferr, 2021, "qh_getmergeset: %d merges found\n", nummerges));
    -} /* getmergeset */
    -
    -
    -/*---------------------------------
    -
    -  qh_getmergeset_initial( facetlist )
    -    determine initial qh.facet_mergeset for facets
    -    tests all facet/neighbor pairs on facetlist
    -
    -  returns:
    -    sorted qh.facet_mergeset with nonconvex ridges
    -    sets facet->tested, ridge->tested, and ridge->nonconvex
    -
    -  notes:
    -    uses visit_id, assumes ridge->nonconvex is False
    -
    -  see:
    -    qh_getmergeset()
    -
    -  design:
    -    for each facet on facetlist
    -      for each untested neighbor of facet
    -        test facet and neighbor for convexity
    -        if non-convex
    -          append merge to qh.facet_mergeset
    -          mark one of the ridges as nonconvex
    -    sort qh.facet_mergeset by angle
    -*/
    -void qh_getmergeset_initial(facetT *facetlist) {
    -  facetT *facet, *neighbor, **neighborp;
    -  ridgeT *ridge, **ridgep;
    -  int nummerges;
    -
    -  qh visit_id++;
    -  FORALLfacet_(facetlist) {
    -    facet->visitid= qh visit_id;
    -    facet->tested= True;
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->visitid != qh visit_id) {
    -        if (qh_test_appendmerge(facet, neighbor)) {
    -          FOREACHridge_(neighbor->ridges) {
    -            if (facet == otherfacet_(ridge, neighbor)) {
    -              ridge->nonconvex= True;
    -              break;    /* only one ridge is marked nonconvex */
    -            }
    -          }
    -        }
    -      }
    -    }
    -    FOREACHridge_(facet->ridges)
    -      ridge->tested= True;
    -  }
    -  nummerges= qh_setsize(qh facet_mergeset);
    -  if (qh ANGLEmerge)
    -    qsort(SETaddr_(qh facet_mergeset, mergeT), (size_t)nummerges, sizeof(mergeT *), qh_compareangle);
    -  else
    -    qsort(SETaddr_(qh facet_mergeset, mergeT), (size_t)nummerges, sizeof(mergeT *), qh_comparemerge);
    -  if (qh POSTmerging) {
    -    zadd_(Zmergeinittot2, nummerges);
    -  }else {
    -    zadd_(Zmergeinittot, nummerges);
    -    zmax_(Zmergeinitmax, nummerges);
    -  }
    -  trace2((qh ferr, 2022, "qh_getmergeset_initial: %d merges found\n", nummerges));
    -} /* getmergeset_initial */
    -
    -
    -/*---------------------------------
    -
    -  qh_hashridge( hashtable, hashsize, ridge, oldvertex )
    -    add ridge to hashtable without oldvertex
    -
    -  notes:
    -    assumes hashtable is large enough
    -
    -  design:
    -    determine hash value for ridge without oldvertex
    -    find next empty slot for ridge
    -*/
    -void qh_hashridge(setT *hashtable, int hashsize, ridgeT *ridge, vertexT *oldvertex) {
    -  int hash;
    -  ridgeT *ridgeA;
    -
    -  hash= qh_gethash(hashsize, ridge->vertices, qh hull_dim-1, 0, oldvertex);
    -  while (True) {
    -    if (!(ridgeA= SETelemt_(hashtable, hash, ridgeT))) {
    -      SETelem_(hashtable, hash)= ridge;
    -      break;
    -    }else if (ridgeA == ridge)
    -      break;
    -    if (++hash == hashsize)
    -      hash= 0;
    -  }
    -} /* hashridge */
    -
    -
    -/*---------------------------------
    -
    -  qh_hashridge_find( hashtable, hashsize, ridge, vertex, oldvertex, hashslot )
    -    returns matching ridge without oldvertex in hashtable
    -      for ridge without vertex
    -    if oldvertex is NULL
    -      matches with any one skip
    -
    -  returns:
    -    matching ridge or NULL
    -    if no match,
    -      if ridge already in   table
    -        hashslot= -1
    -      else
    -        hashslot= next NULL index
    -
    -  notes:
    -    assumes hashtable is large enough
    -    can't match ridge to itself
    -
    -  design:
    -    get hash value for ridge without vertex
    -    for each hashslot
    -      return match if ridge matches ridgeA without oldvertex
    -*/
    -ridgeT *qh_hashridge_find(setT *hashtable, int hashsize, ridgeT *ridge,
    -              vertexT *vertex, vertexT *oldvertex, int *hashslot) {
    -  int hash;
    -  ridgeT *ridgeA;
    -
    -  *hashslot= 0;
    -  zinc_(Zhashridge);
    -  hash= qh_gethash(hashsize, ridge->vertices, qh hull_dim-1, 0, vertex);
    -  while ((ridgeA= SETelemt_(hashtable, hash, ridgeT))) {
    -    if (ridgeA == ridge)
    -      *hashslot= -1;
    -    else {
    -      zinc_(Zhashridgetest);
    -      if (qh_setequal_except(ridge->vertices, vertex, ridgeA->vertices, oldvertex))
    -        return ridgeA;
    -    }
    -    if (++hash == hashsize)
    -      hash= 0;
    -  }
    -  if (!*hashslot)
    -    *hashslot= hash;
    -  return NULL;
    -} /* hashridge_find */
    -
    -
    -/*---------------------------------
    -
    -  qh_makeridges( facet )
    -    creates explicit ridges between simplicial facets
    -
    -  returns:
    -    facet with ridges and without qh_MERGEridge
    -    ->simplicial is False
    -
    -  notes:
    -    allows qh_MERGEridge flag
    -    uses existing ridges
    -    duplicate neighbors ok if ridges already exist (qh_mergecycle_ridges)
    -
    -  see:
    -    qh_mergecycle_ridges()
    -
    -  design:
    -    look for qh_MERGEridge neighbors
    -    mark neighbors that already have ridges
    -    for each unprocessed neighbor of facet
    -      create a ridge for neighbor and facet
    -    if any qh_MERGEridge neighbors
    -      delete qh_MERGEridge flags (already handled by qh_mark_dupridges)
    -*/
    -void qh_makeridges(facetT *facet) {
    -  facetT *neighbor, **neighborp;
    -  ridgeT *ridge, **ridgep;
    -  int neighbor_i, neighbor_n;
    -  boolT toporient, mergeridge= False;
    -
    -  if (!facet->simplicial)
    -    return;
    -  trace4((qh ferr, 4027, "qh_makeridges: make ridges for f%d\n", facet->id));
    -  facet->simplicial= False;
    -  FOREACHneighbor_(facet) {
    -    if (neighbor == qh_MERGEridge)
    -      mergeridge= True;
    -    else
    -      neighbor->seen= False;
    -  }
    -  FOREACHridge_(facet->ridges)
    -    otherfacet_(ridge, facet)->seen= True;
    -  FOREACHneighbor_i_(facet) {
    -    if (neighbor == qh_MERGEridge)
    -      continue;  /* fixed by qh_mark_dupridges */
    -    else if (!neighbor->seen) {  /* no current ridges */
    -      ridge= qh_newridge();
    -      ridge->vertices= qh_setnew_delnthsorted(facet->vertices, qh hull_dim,
    -                                                          neighbor_i, 0);
    -      toporient= facet->toporient ^ (neighbor_i & 0x1);
    -      if (toporient) {
    -        ridge->top= facet;
    -        ridge->bottom= neighbor;
    -      }else {
    -        ridge->top= neighbor;
    -        ridge->bottom= facet;
    -      }
    -#if 0 /* this also works */
    -      flip= (facet->toporient ^ neighbor->toporient)^(skip1 & 0x1) ^ (skip2 & 0x1);
    -      if (facet->toporient ^ (skip1 & 0x1) ^ flip) {
    -        ridge->top= neighbor;
    -        ridge->bottom= facet;
    -      }else {
    -        ridge->top= facet;
    -        ridge->bottom= neighbor;
    -      }
    -#endif
    -      qh_setappend(&(facet->ridges), ridge);
    -      qh_setappend(&(neighbor->ridges), ridge);
    -    }
    -  }
    -  if (mergeridge) {
    -    while (qh_setdel(facet->neighbors, qh_MERGEridge))
    -      ; /* delete each one */
    -  }
    -} /* makeridges */
    -
    -
    -/*---------------------------------
    -
    -  qh_mark_dupridges( facetlist )
    -    add duplicated ridges to qh.facet_mergeset
    -    facet->dupridge is true
    -
    -  returns:
    -    duplicate ridges on qh.facet_mergeset
    -    ->mergeridge/->mergeridge2 set
    -    duplicate ridges marked by qh_MERGEridge and both sides facet->dupridge
    -    no MERGEridges in neighbor sets
    -
    -  notes:
    -    duplicate ridges occur when the horizon is pinched,
    -        i.e. a subridge occurs in more than two horizon ridges.
    -    could rename vertices that pinch the horizon
    -    uses qh.visit_id
    -
    -  design:
    -    for all facets on facetlist
    -      if facet contains a duplicate ridge
    -        for each neighbor of facet
    -          if neighbor marked qh_MERGEridge (one side of the merge)
    -            set facet->mergeridge
    -          else
    -            if neighbor contains a duplicate ridge
    -            and the back link is qh_MERGEridge
    -              append duplicate ridge to qh.facet_mergeset
    -   for each duplicate ridge
    -     make ridge sets in preparation for merging
    -     remove qh_MERGEridge from neighbor set
    -   for each duplicate ridge
    -     restore the missing neighbor from the neighbor set that was qh_MERGEridge
    -     add the missing ridge for this neighbor
    -*/
    -void qh_mark_dupridges(facetT *facetlist) {
    -  facetT *facet, *neighbor, **neighborp;
    -  int nummerge=0;
    -  mergeT *merge, **mergep;
    -
    -
    -  trace4((qh ferr, 4028, "qh_mark_dupridges: identify duplicate ridges\n"));
    -  FORALLfacet_(facetlist) {
    -    if (facet->dupridge) {
    -      FOREACHneighbor_(facet) {
    -        if (neighbor == qh_MERGEridge) {
    -          facet->mergeridge= True;
    -          continue;
    -        }
    -        if (neighbor->dupridge
    -        && !qh_setin(neighbor->neighbors, facet)) { /* qh_MERGEridge */
    -          qh_appendmergeset(facet, neighbor, MRGridge, NULL);
    -          facet->mergeridge2= True;
    -          facet->mergeridge= True;
    -          nummerge++;
    -        }
    -      }
    -    }
    -  }
    -  if (!nummerge)
    -    return;
    -  FORALLfacet_(facetlist) {            /* gets rid of qh_MERGEridge */
    -    if (facet->mergeridge && !facet->mergeridge2)
    -      qh_makeridges(facet);
    -  }
    -  FOREACHmerge_(qh facet_mergeset) {   /* restore the missing neighbors */
    -    if (merge->type == MRGridge) {
    -      qh_setappend(&merge->facet2->neighbors, merge->facet1);
    -      qh_makeridges(merge->facet1);   /* and the missing ridges */
    -    }
    -  }
    -  trace1((qh ferr, 1012, "qh_mark_dupridges: found %d duplicated ridges\n",
    -                nummerge));
    -} /* mark_dupridges */
    -
    -/*---------------------------------
    -
    -  qh_maydropneighbor( facet )
    -    drop neighbor relationship if no ridge between facet and neighbor
    -
    -  returns:
    -    neighbor sets updated
    -    appends degenerate facets to qh.facet_mergeset
    -
    -  notes:
    -    won't cause redundant facets since vertex inclusion is the same
    -    may drop vertex and neighbor if no ridge
    -    uses qh.visit_id
    -
    -  design:
    -    visit all neighbors with ridges
    -    for each unvisited neighbor of facet
    -      delete neighbor and facet from the neighbor sets
    -      if neighbor becomes degenerate
    -        append neighbor to qh.degen_mergeset
    -    if facet is degenerate
    -      append facet to qh.degen_mergeset
    -*/
    -void qh_maydropneighbor(facetT *facet) {
    -  ridgeT *ridge, **ridgep;
    -  realT angledegen= qh_ANGLEdegen;
    -  facetT *neighbor, **neighborp;
    -
    -  qh visit_id++;
    -  trace4((qh ferr, 4029, "qh_maydropneighbor: test f%d for no ridges to a neighbor\n",
    -          facet->id));
    -  FOREACHridge_(facet->ridges) {
    -    ridge->top->visitid= qh visit_id;
    -    ridge->bottom->visitid= qh visit_id;
    -  }
    -  FOREACHneighbor_(facet) {
    -    if (neighbor->visitid != qh visit_id) {
    -      trace0((qh ferr, 17, "qh_maydropneighbor: facets f%d and f%d are no longer neighbors during p%d\n",
    -            facet->id, neighbor->id, qh furthest_id));
    -      zinc_(Zdropneighbor);
    -      qh_setdel(facet->neighbors, neighbor);
    -      neighborp--;  /* repeat, deleted a neighbor */
    -      qh_setdel(neighbor->neighbors, facet);
    -      if (qh_setsize(neighbor->neighbors) < qh hull_dim) {
    -        zinc_(Zdropdegen);
    -        qh_appendmergeset(neighbor, neighbor, MRGdegen, &angledegen);
    -        trace2((qh ferr, 2023, "qh_maydropneighbors: f%d is degenerate.\n", neighbor->id));
    -      }
    -    }
    -  }
    -  if (qh_setsize(facet->neighbors) < qh hull_dim) {
    -    zinc_(Zdropdegen);
    -    qh_appendmergeset(facet, facet, MRGdegen, &angledegen);
    -    trace2((qh ferr, 2024, "qh_maydropneighbors: f%d is degenerate.\n", facet->id));
    -  }
    -} /* maydropneighbor */
    -
    -
    -/*---------------------------------
    -
    -  qh_merge_degenredundant()
    -    merge all degenerate and redundant facets
    -    qh.degen_mergeset contains merges from qh_degen_redundant_neighbors()
    -
    -  returns:
    -    number of merges performed
    -    resets facet->degenerate/redundant
    -    if deleted (visible) facet has no neighbors
    -      sets ->f.replace to NULL
    -
    -  notes:
    -    redundant merges happen before degenerate ones
    -    merging and renaming vertices can result in degen/redundant facets
    -
    -  design:
    -    for each merge on qh.degen_mergeset
    -      if redundant merge
    -        if non-redundant facet merged into redundant facet
    -          recheck facet for redundancy
    -        else
    -          merge redundant facet into other facet
    -*/
    -int qh_merge_degenredundant(void) {
    -  int size;
    -  mergeT *merge;
    -  facetT *bestneighbor, *facet1, *facet2;
    -  realT dist, mindist, maxdist;
    -  vertexT *vertex, **vertexp;
    -  int nummerges= 0;
    -  mergeType mergetype;
    -
    -  while ((merge= (mergeT*)qh_setdellast(qh degen_mergeset))) {
    -    facet1= merge->facet1;
    -    facet2= merge->facet2;
    -    mergetype= merge->type;
    -    qh_memfree(merge, (int)sizeof(mergeT));
    -    if (facet1->visible)
    -      continue;
    -    facet1->degenerate= False;
    -    facet1->redundant= False;
    -    if (qh TRACEmerge-1 == zzval_(Ztotmerge))
    -      qhmem.IStracing= qh IStracing= qh TRACElevel;
    -    if (mergetype == MRGredundant) {
    -      zinc_(Zneighbor);
    -      while (facet2->visible) {
    -        if (!facet2->f.replace) {
    -          qh_fprintf(qh ferr, 6097, "qhull internal error (qh_merge_degenredunant): f%d redundant but f%d has no replacement\n",
    -               facet1->id, facet2->id);
    -          qh_errexit2 (qh_ERRqhull, facet1, facet2);
    -        }
    -        facet2= facet2->f.replace;
    -      }
    -      if (facet1 == facet2) {
    -        qh_degen_redundant_facet(facet1); /* in case of others */
    -        continue;
    -      }
    -      trace2((qh ferr, 2025, "qh_merge_degenredundant: facet f%d is contained in f%d, will merge\n",
    -            facet1->id, facet2->id));
    -      qh_mergefacet(facet1, facet2, NULL, NULL, !qh_MERGEapex);
    -      /* merge distance is already accounted for */
    -      nummerges++;
    -    }else {  /* mergetype == MRGdegen, other merges may have fixed */
    -      if (!(size= qh_setsize(facet1->neighbors))) {
    -        zinc_(Zdelfacetdup);
    -        trace2((qh ferr, 2026, "qh_merge_degenredundant: facet f%d has no neighbors.  Deleted\n", facet1->id));
    -        qh_willdelete(facet1, NULL);
    -        FOREACHvertex_(facet1->vertices) {
    -          qh_setdel(vertex->neighbors, facet1);
    -          if (!SETfirst_(vertex->neighbors)) {
    -            zinc_(Zdegenvertex);
    -            trace2((qh ferr, 2027, "qh_merge_degenredundant: deleted v%d because f%d has no neighbors\n",
    -                 vertex->id, facet1->id));
    -            vertex->deleted= True;
    -            qh_setappend(&qh del_vertices, vertex);
    -          }
    -        }
    -        nummerges++;
    -      }else if (size < qh hull_dim) {
    -        bestneighbor= qh_findbestneighbor(facet1, &dist, &mindist, &maxdist);
    -        trace2((qh ferr, 2028, "qh_merge_degenredundant: facet f%d has %d neighbors, merge into f%d dist %2.2g\n",
    -              facet1->id, size, bestneighbor->id, dist));
    -        qh_mergefacet(facet1, bestneighbor, &mindist, &maxdist, !qh_MERGEapex);
    -        nummerges++;
    -        if (qh PRINTstatistics) {
    -          zinc_(Zdegen);
    -          wadd_(Wdegentot, dist);
    -          wmax_(Wdegenmax, dist);
    -        }
    -      } /* else, another merge fixed the degeneracy and redundancy tested */
    -    }
    -  }
    -  return nummerges;
    -} /* merge_degenredundant */
    -
    -/*---------------------------------
    -
    -  qh_merge_nonconvex( facet1, facet2, mergetype )
    -    remove non-convex ridge between facet1 into facet2
    -    mergetype gives why the facet's are non-convex
    -
    -  returns:
    -    merges one of the facets into the best neighbor
    -
    -  design:
    -    if one of the facets is a new facet
    -      prefer merging new facet into old facet
    -    find best neighbors for both facets
    -    merge the nearest facet into its best neighbor
    -    update the statistics
    -*/
    -void qh_merge_nonconvex(facetT *facet1, facetT *facet2, mergeType mergetype) {
    -  facetT *bestfacet, *bestneighbor, *neighbor;
    -  realT dist, dist2, mindist, mindist2, maxdist, maxdist2;
    -
    -  if (qh TRACEmerge-1 == zzval_(Ztotmerge))
    -    qhmem.IStracing= qh IStracing= qh TRACElevel;
    -  trace3((qh ferr, 3003, "qh_merge_nonconvex: merge #%d for f%d and f%d type %d\n",
    -      zzval_(Ztotmerge) + 1, facet1->id, facet2->id, mergetype));
    -  /* concave or coplanar */
    -  if (!facet1->newfacet) {
    -    bestfacet= facet2;   /* avoid merging old facet if new is ok */
    -    facet2= facet1;
    -    facet1= bestfacet;
    -  }else
    -    bestfacet= facet1;
    -  bestneighbor= qh_findbestneighbor(bestfacet, &dist, &mindist, &maxdist);
    -  neighbor= qh_findbestneighbor(facet2, &dist2, &mindist2, &maxdist2);
    -  if (dist < dist2) {
    -    qh_mergefacet(bestfacet, bestneighbor, &mindist, &maxdist, !qh_MERGEapex);
    -  }else if (qh AVOIDold && !facet2->newfacet
    -  && ((mindist >= -qh MAXcoplanar && maxdist <= qh max_outside)
    -       || dist * 1.5 < dist2)) {
    -    zinc_(Zavoidold);
    -    wadd_(Wavoidoldtot, dist);
    -    wmax_(Wavoidoldmax, dist);
    -    trace2((qh ferr, 2029, "qh_merge_nonconvex: avoid merging old facet f%d dist %2.2g.  Use f%d dist %2.2g instead\n",
    -           facet2->id, dist2, facet1->id, dist2));
    -    qh_mergefacet(bestfacet, bestneighbor, &mindist, &maxdist, !qh_MERGEapex);
    -  }else {
    -    qh_mergefacet(facet2, neighbor, &mindist2, &maxdist2, !qh_MERGEapex);
    -    dist= dist2;
    -  }
    -  if (qh PRINTstatistics) {
    -    if (mergetype == MRGanglecoplanar) {
    -      zinc_(Zacoplanar);
    -      wadd_(Wacoplanartot, dist);
    -      wmax_(Wacoplanarmax, dist);
    -    }else if (mergetype == MRGconcave) {
    -      zinc_(Zconcave);
    -      wadd_(Wconcavetot, dist);
    -      wmax_(Wconcavemax, dist);
    -    }else { /* MRGcoplanar */
    -      zinc_(Zcoplanar);
    -      wadd_(Wcoplanartot, dist);
    -      wmax_(Wcoplanarmax, dist);
    -    }
    -  }
    -} /* merge_nonconvex */
    -
    -/*---------------------------------
    -
    -  qh_mergecycle( samecycle, newfacet )
    -    merge a cycle of facets starting at samecycle into a newfacet
    -    newfacet is a horizon facet with ->normal
    -    samecycle facets are simplicial from an apex
    -
    -  returns:
    -    initializes vertex neighbors on first merge
    -    samecycle deleted (placed on qh.visible_list)
    -    newfacet at end of qh.facet_list
    -    deleted vertices on qh.del_vertices
    -
    -  see:
    -    qh_mergefacet()
    -    called by qh_mergecycle_all() for multiple, same cycle facets
    -
    -  design:
    -    make vertex neighbors if necessary
    -    make ridges for newfacet
    -    merge neighbor sets of samecycle into newfacet
    -    merge ridges of samecycle into newfacet
    -    merge vertex neighbors of samecycle into newfacet
    -    make apex of samecycle the apex of newfacet
    -    if newfacet wasn't a new facet
    -      add its vertices to qh.newvertex_list
    -    delete samecycle facets a make newfacet a newfacet
    -*/
    -void qh_mergecycle(facetT *samecycle, facetT *newfacet) {
    -  int traceonce= False, tracerestore= 0;
    -  vertexT *apex;
    -#ifndef qh_NOtrace
    -  facetT *same;
    -#endif
    -
    -  if (newfacet->tricoplanar) {
    -    if (!qh TRInormals) {
    -      qh_fprintf(qh ferr, 6224, "Qhull internal error (qh_mergecycle): does not work for tricoplanar facets.  Use option 'Q11'\n");
    -      qh_errexit(qh_ERRqhull, newfacet, NULL);
    -    }
    -    newfacet->tricoplanar= False;
    -    newfacet->keepcentrum= False;
    -  }
    -  if (!qh VERTEXneighbors)
    -    qh_vertexneighbors();
    -  zzinc_(Ztotmerge);
    -  if (qh REPORTfreq2 && qh POSTmerging) {
    -    if (zzval_(Ztotmerge) > qh mergereport + qh REPORTfreq2)
    -      qh_tracemerging();
    -  }
    -#ifndef qh_NOtrace
    -  if (qh TRACEmerge == zzval_(Ztotmerge))
    -    qhmem.IStracing= qh IStracing= qh TRACElevel;
    -  trace2((qh ferr, 2030, "qh_mergecycle: merge #%d for facets from cycle f%d into coplanar horizon f%d\n",
    -        zzval_(Ztotmerge), samecycle->id, newfacet->id));
    -  if (newfacet == qh tracefacet) {
    -    tracerestore= qh IStracing;
    -    qh IStracing= 4;
    -    qh_fprintf(qh ferr, 8068, "qh_mergecycle: ========= trace merge %d of samecycle %d into trace f%d, furthest is p%d\n",
    -               zzval_(Ztotmerge), samecycle->id, newfacet->id,  qh furthest_id);
    -    traceonce= True;
    -  }
    -  if (qh IStracing >=4) {
    -    qh_fprintf(qh ferr, 8069, "  same cycle:");
    -    FORALLsame_cycle_(samecycle)
    -      qh_fprintf(qh ferr, 8070, " f%d", same->id);
    -    qh_fprintf(qh ferr, 8071, "\n");
    -  }
    -  if (qh IStracing >=4)
    -    qh_errprint("MERGING CYCLE", samecycle, newfacet, NULL, NULL);
    -#endif /* !qh_NOtrace */
    -  apex= SETfirstt_(samecycle->vertices, vertexT);
    -  qh_makeridges(newfacet);
    -  qh_mergecycle_neighbors(samecycle, newfacet);
    -  qh_mergecycle_ridges(samecycle, newfacet);
    -  qh_mergecycle_vneighbors(samecycle, newfacet);
    -  if (SETfirstt_(newfacet->vertices, vertexT) != apex)
    -    qh_setaddnth(&newfacet->vertices, 0, apex);  /* apex has last id */
    -  if (!newfacet->newfacet)
    -    qh_newvertices(newfacet->vertices);
    -  qh_mergecycle_facets(samecycle, newfacet);
    -  qh_tracemerge(samecycle, newfacet);
    -  /* check for degen_redundant_neighbors after qh_forcedmerges() */
    -  if (traceonce) {
    -    qh_fprintf(qh ferr, 8072, "qh_mergecycle: end of trace facet\n");
    -    qh IStracing= tracerestore;
    -  }
    -} /* mergecycle */
    -
    -/*---------------------------------
    -
    -  qh_mergecycle_all( facetlist, wasmerge )
    -    merge all samecycles of coplanar facets into horizon
    -    don't merge facets with ->mergeridge (these already have ->normal)
    -    all facets are simplicial from apex
    -    all facet->cycledone == False
    -
    -  returns:
    -    all newfacets merged into coplanar horizon facets
    -    deleted vertices on  qh.del_vertices
    -    sets wasmerge if any merge
    -
    -  see:
    -    calls qh_mergecycle for multiple, same cycle facets
    -
    -  design:
    -    for each facet on facetlist
    -      skip facets with duplicate ridges and normals
    -      check that facet is in a samecycle (->mergehorizon)
    -      if facet only member of samecycle
    -        sets vertex->delridge for all vertices except apex
    -        merge facet into horizon
    -      else
    -        mark all facets in samecycle
    -        remove facets with duplicate ridges from samecycle
    -        merge samecycle into horizon (deletes facets from facetlist)
    -*/
    -void qh_mergecycle_all(facetT *facetlist, boolT *wasmerge) {
    -  facetT *facet, *same, *prev, *horizon;
    -  facetT *samecycle= NULL, *nextfacet, *nextsame;
    -  vertexT *apex, *vertex, **vertexp;
    -  int cycles=0, total=0, facets, nummerge;
    -
    -  trace2((qh ferr, 2031, "qh_mergecycle_all: begin\n"));
    -  for (facet= facetlist; facet && (nextfacet= facet->next); facet= nextfacet) {
    -    if (facet->normal)
    -      continue;
    -    if (!facet->mergehorizon) {
    -      qh_fprintf(qh ferr, 6225, "Qhull internal error (qh_mergecycle_all): f%d without normal\n", facet->id);
    -      qh_errexit(qh_ERRqhull, facet, NULL);
    -    }
    -    horizon= SETfirstt_(facet->neighbors, facetT);
    -    if (facet->f.samecycle == facet) {
    -      zinc_(Zonehorizon);
    -      /* merge distance done in qh_findhorizon */
    -      apex= SETfirstt_(facet->vertices, vertexT);
    -      FOREACHvertex_(facet->vertices) {
    -        if (vertex != apex)
    -          vertex->delridge= True;
    -      }
    -      horizon->f.newcycle= NULL;
    -      qh_mergefacet(facet, horizon, NULL, NULL, qh_MERGEapex);
    -    }else {
    -      samecycle= facet;
    -      facets= 0;
    -      prev= facet;
    -      for (same= facet->f.samecycle; same;  /* FORALLsame_cycle_(facet) */
    -           same= (same == facet ? NULL :nextsame)) { /* ends at facet */
    -        nextsame= same->f.samecycle;
    -        if (same->cycledone || same->visible)
    -          qh_infiniteloop(same);
    -        same->cycledone= True;
    -        if (same->normal) {
    -          prev->f.samecycle= same->f.samecycle; /* unlink ->mergeridge */
    -          same->f.samecycle= NULL;
    -        }else {
    -          prev= same;
    -          facets++;
    -        }
    -      }
    -      while (nextfacet && nextfacet->cycledone)  /* will delete samecycle */
    -        nextfacet= nextfacet->next;
    -      horizon->f.newcycle= NULL;
    -      qh_mergecycle(samecycle, horizon);
    -      nummerge= horizon->nummerge + facets;
    -      if (nummerge > qh_MAXnummerge)
    -        horizon->nummerge= qh_MAXnummerge;
    -      else
    -        horizon->nummerge= (short unsigned int)nummerge;
    -      zzinc_(Zcyclehorizon);
    -      total += facets;
    -      zzadd_(Zcyclefacettot, facets);
    -      zmax_(Zcyclefacetmax, facets);
    -    }
    -    cycles++;
    -  }
    -  if (cycles)
    -    *wasmerge= True;
    -  trace1((qh ferr, 1013, "qh_mergecycle_all: merged %d same cycles or facets into coplanar horizons\n", cycles));
    -} /* mergecycle_all */
    -
    -/*---------------------------------
    -
    -  qh_mergecycle_facets( samecycle, newfacet )
    -    finish merge of samecycle into newfacet
    -
    -  returns:
    -    samecycle prepended to visible_list for later deletion and partitioning
    -      each facet->f.replace == newfacet
    -
    -    newfacet moved to end of qh.facet_list
    -      makes newfacet a newfacet (get's facet1->id if it was old)
    -      sets newfacet->newmerge
    -      clears newfacet->center (unless merging into a large facet)
    -      clears newfacet->tested and ridge->tested for facet1
    -
    -    adds neighboring facets to facet_mergeset if redundant or degenerate
    -
    -  design:
    -    make newfacet a new facet and set its flags
    -    move samecycle facets to qh.visible_list for later deletion
    -    unless newfacet is large
    -      remove its centrum
    -*/
    -void qh_mergecycle_facets(facetT *samecycle, facetT *newfacet) {
    -  facetT *same, *next;
    -
    -  trace4((qh ferr, 4030, "qh_mergecycle_facets: make newfacet new and samecycle deleted\n"));
    -  qh_removefacet(newfacet);  /* append as a newfacet to end of qh facet_list */
    -  qh_appendfacet(newfacet);
    -  newfacet->newfacet= True;
    -  newfacet->simplicial= False;
    -  newfacet->newmerge= True;
    -
    -  for (same= samecycle->f.samecycle; same; same= (same == samecycle ?  NULL : next)) {
    -    next= same->f.samecycle;  /* reused by willdelete */
    -    qh_willdelete(same, newfacet);
    -  }
    -  if (newfacet->center
    -      && qh_setsize(newfacet->vertices) <= qh hull_dim + qh_MAXnewcentrum) {
    -    qh_memfree(newfacet->center, qh normal_size);
    -    newfacet->center= NULL;
    -  }
    -  trace3((qh ferr, 3004, "qh_mergecycle_facets: merged facets from cycle f%d into f%d\n",
    -             samecycle->id, newfacet->id));
    -} /* mergecycle_facets */
    -
    -/*---------------------------------
    -
    -  qh_mergecycle_neighbors( samecycle, newfacet )
    -    add neighbors for samecycle facets to newfacet
    -
    -  returns:
    -    newfacet with updated neighbors and vice-versa
    -    newfacet has ridges
    -    all neighbors of newfacet marked with qh.visit_id
    -    samecycle facets marked with qh.visit_id-1
    -    ridges updated for simplicial neighbors of samecycle with a ridge
    -
    -  notes:
    -    assumes newfacet not in samecycle
    -    usually, samecycle facets are new, simplicial facets without internal ridges
    -      not so if horizon facet is coplanar to two different samecycles
    -
    -  see:
    -    qh_mergeneighbors()
    -
    -  design:
    -    check samecycle
    -    delete neighbors from newfacet that are also in samecycle
    -    for each neighbor of a facet in samecycle
    -      if neighbor is simplicial
    -        if first visit
    -          move the neighbor relation to newfacet
    -          update facet links for its ridges
    -        else
    -          make ridges for neighbor
    -          remove samecycle reference
    -      else
    -        update neighbor sets
    -*/
    -void qh_mergecycle_neighbors(facetT *samecycle, facetT *newfacet) {
    -  facetT *same, *neighbor, **neighborp;
    -  int delneighbors= 0, newneighbors= 0;
    -  unsigned int samevisitid;
    -  ridgeT *ridge, **ridgep;
    -
    -  samevisitid= ++qh visit_id;
    -  FORALLsame_cycle_(samecycle) {
    -    if (same->visitid == samevisitid || same->visible)
    -      qh_infiniteloop(samecycle);
    -    same->visitid= samevisitid;
    -  }
    -  newfacet->visitid= ++qh visit_id;
    -  trace4((qh ferr, 4031, "qh_mergecycle_neighbors: delete shared neighbors from newfacet\n"));
    -  FOREACHneighbor_(newfacet) {
    -    if (neighbor->visitid == samevisitid) {
    -      SETref_(neighbor)= NULL;  /* samecycle neighbors deleted */
    -      delneighbors++;
    -    }else
    -      neighbor->visitid= qh visit_id;
    -  }
    -  qh_setcompact(newfacet->neighbors);
    -
    -  trace4((qh ferr, 4032, "qh_mergecycle_neighbors: update neighbors\n"));
    -  FORALLsame_cycle_(samecycle) {
    -    FOREACHneighbor_(same) {
    -      if (neighbor->visitid == samevisitid)
    -        continue;
    -      if (neighbor->simplicial) {
    -        if (neighbor->visitid != qh visit_id) {
    -          qh_setappend(&newfacet->neighbors, neighbor);
    -          qh_setreplace(neighbor->neighbors, same, newfacet);
    -          newneighbors++;
    -          neighbor->visitid= qh visit_id;
    -          FOREACHridge_(neighbor->ridges) { /* update ridge in case of qh_makeridges */
    -            if (ridge->top == same) {
    -              ridge->top= newfacet;
    -              break;
    -            }else if (ridge->bottom == same) {
    -              ridge->bottom= newfacet;
    -              break;
    -            }
    -          }
    -        }else {
    -          qh_makeridges(neighbor);
    -          qh_setdel(neighbor->neighbors, same);
    -          /* same can't be horizon facet for neighbor */
    -        }
    -      }else { /* non-simplicial neighbor */
    -        qh_setdel(neighbor->neighbors, same);
    -        if (neighbor->visitid != qh visit_id) {
    -          qh_setappend(&neighbor->neighbors, newfacet);
    -          qh_setappend(&newfacet->neighbors, neighbor);
    -          neighbor->visitid= qh visit_id;
    -          newneighbors++;
    -        }
    -      }
    -    }
    -  }
    -  trace2((qh ferr, 2032, "qh_mergecycle_neighbors: deleted %d neighbors and added %d\n",
    -             delneighbors, newneighbors));
    -} /* mergecycle_neighbors */
    -
    -/*---------------------------------
    -
    -  qh_mergecycle_ridges( samecycle, newfacet )
    -    add ridges/neighbors for facets in samecycle to newfacet
    -    all new/old neighbors of newfacet marked with qh.visit_id
    -    facets in samecycle marked with qh.visit_id-1
    -    newfacet marked with qh.visit_id
    -
    -  returns:
    -    newfacet has merged ridges
    -
    -  notes:
    -    ridge already updated for simplicial neighbors of samecycle with a ridge
    -
    -  see:
    -    qh_mergeridges()
    -    qh_makeridges()
    -
    -  design:
    -    remove ridges between newfacet and samecycle
    -    for each facet in samecycle
    -      for each ridge in facet
    -        update facet pointers in ridge
    -        skip ridges processed in qh_mergecycle_neighors
    -        free ridges between newfacet and samecycle
    -        free ridges between facets of samecycle (on 2nd visit)
    -        append remaining ridges to newfacet
    -      if simpilicial facet
    -        for each neighbor of facet
    -          if simplicial facet
    -          and not samecycle facet or newfacet
    -            make ridge between neighbor and newfacet
    -*/
    -void qh_mergecycle_ridges(facetT *samecycle, facetT *newfacet) {
    -  facetT *same, *neighbor= NULL;
    -  int numold=0, numnew=0;
    -  int neighbor_i, neighbor_n;
    -  unsigned int samevisitid;
    -  ridgeT *ridge, **ridgep;
    -  boolT toporient;
    -  void **freelistp; /* used !qh_NOmem */
    -
    -  trace4((qh ferr, 4033, "qh_mergecycle_ridges: delete shared ridges from newfacet\n"));
    -  samevisitid= qh visit_id -1;
    -  FOREACHridge_(newfacet->ridges) {
    -    neighbor= otherfacet_(ridge, newfacet);
    -    if (neighbor->visitid == samevisitid)
    -      SETref_(ridge)= NULL; /* ridge free'd below */
    -  }
    -  qh_setcompact(newfacet->ridges);
    -
    -  trace4((qh ferr, 4034, "qh_mergecycle_ridges: add ridges to newfacet\n"));
    -  FORALLsame_cycle_(samecycle) {
    -    FOREACHridge_(same->ridges) {
    -      if (ridge->top == same) {
    -        ridge->top= newfacet;
    -        neighbor= ridge->bottom;
    -      }else if (ridge->bottom == same) {
    -        ridge->bottom= newfacet;
    -        neighbor= ridge->top;
    -      }else if (ridge->top == newfacet || ridge->bottom == newfacet) {
    -        qh_setappend(&newfacet->ridges, ridge);
    -        numold++;  /* already set by qh_mergecycle_neighbors */
    -        continue;
    -      }else {
    -        qh_fprintf(qh ferr, 6098, "qhull internal error (qh_mergecycle_ridges): bad ridge r%d\n", ridge->id);
    -        qh_errexit(qh_ERRqhull, NULL, ridge);
    -      }
    -      if (neighbor == newfacet) {
    -        qh_setfree(&(ridge->vertices));
    -        qh_memfree_(ridge, (int)sizeof(ridgeT), freelistp);
    -        numold++;
    -      }else if (neighbor->visitid == samevisitid) {
    -        qh_setdel(neighbor->ridges, ridge);
    -        qh_setfree(&(ridge->vertices));
    -        qh_memfree_(ridge, (int)sizeof(ridgeT), freelistp);
    -        numold++;
    -      }else {
    -        qh_setappend(&newfacet->ridges, ridge);
    -        numold++;
    -      }
    -    }
    -    if (same->ridges)
    -      qh_settruncate(same->ridges, 0);
    -    if (!same->simplicial)
    -      continue;
    -    FOREACHneighbor_i_(same) {       /* note: !newfact->simplicial */
    -      if (neighbor->visitid != samevisitid && neighbor->simplicial) {
    -        ridge= qh_newridge();
    -        ridge->vertices= qh_setnew_delnthsorted(same->vertices, qh hull_dim,
    -                                                          neighbor_i, 0);
    -        toporient= same->toporient ^ (neighbor_i & 0x1);
    -        if (toporient) {
    -          ridge->top= newfacet;
    -          ridge->bottom= neighbor;
    -        }else {
    -          ridge->top= neighbor;
    -          ridge->bottom= newfacet;
    -        }
    -        qh_setappend(&(newfacet->ridges), ridge);
    -        qh_setappend(&(neighbor->ridges), ridge);
    -        numnew++;
    -      }
    -    }
    -  }
    -
    -  trace2((qh ferr, 2033, "qh_mergecycle_ridges: found %d old ridges and %d new ones\n",
    -             numold, numnew));
    -} /* mergecycle_ridges */
    -
    -/*---------------------------------
    -
    -  qh_mergecycle_vneighbors( samecycle, newfacet )
    -    create vertex neighbors for newfacet from vertices of facets in samecycle
    -    samecycle marked with visitid == qh.visit_id - 1
    -
    -  returns:
    -    newfacet vertices with updated neighbors
    -    marks newfacet with qh.visit_id-1
    -    deletes vertices that are merged away
    -    sets delridge on all vertices (faster here than in mergecycle_ridges)
    -
    -  see:
    -    qh_mergevertex_neighbors()
    -
    -  design:
    -    for each vertex of samecycle facet
    -      set vertex->delridge
    -      delete samecycle facets from vertex neighbors
    -      append newfacet to vertex neighbors
    -      if vertex only in newfacet
    -        delete it from newfacet
    -        add it to qh.del_vertices for later deletion
    -*/
    -void qh_mergecycle_vneighbors(facetT *samecycle, facetT *newfacet) {
    -  facetT *neighbor, **neighborp;
    -  unsigned int mergeid;
    -  vertexT *vertex, **vertexp, *apex;
    -  setT *vertices;
    -
    -  trace4((qh ferr, 4035, "qh_mergecycle_vneighbors: update vertex neighbors for newfacet\n"));
    -  mergeid= qh visit_id - 1;
    -  newfacet->visitid= mergeid;
    -  vertices= qh_basevertices(samecycle); /* temp */
    -  apex= SETfirstt_(samecycle->vertices, vertexT);
    -  qh_setappend(&vertices, apex);
    -  FOREACHvertex_(vertices) {
    -    vertex->delridge= True;
    -    FOREACHneighbor_(vertex) {
    -      if (neighbor->visitid == mergeid)
    -        SETref_(neighbor)= NULL;
    -    }
    -    qh_setcompact(vertex->neighbors);
    -    qh_setappend(&vertex->neighbors, newfacet);
    -    if (!SETsecond_(vertex->neighbors)) {
    -      zinc_(Zcyclevertex);
    -      trace2((qh ferr, 2034, "qh_mergecycle_vneighbors: deleted v%d when merging cycle f%d into f%d\n",
    -        vertex->id, samecycle->id, newfacet->id));
    -      qh_setdelsorted(newfacet->vertices, vertex);
    -      vertex->deleted= True;
    -      qh_setappend(&qh del_vertices, vertex);
    -    }
    -  }
    -  qh_settempfree(&vertices);
    -  trace3((qh ferr, 3005, "qh_mergecycle_vneighbors: merged vertices from cycle f%d into f%d\n",
    -             samecycle->id, newfacet->id));
    -} /* mergecycle_vneighbors */
    -
    -/*---------------------------------
    -
    -  qh_mergefacet( facet1, facet2, mindist, maxdist, mergeapex )
    -    merges facet1 into facet2
    -    mergeapex==qh_MERGEapex if merging new facet into coplanar horizon
    -
    -  returns:
    -    qh.max_outside and qh.min_vertex updated
    -    initializes vertex neighbors on first merge
    -
    -  returns:
    -    facet2 contains facet1's vertices, neighbors, and ridges
    -      facet2 moved to end of qh.facet_list
    -      makes facet2 a newfacet
    -      sets facet2->newmerge set
    -      clears facet2->center (unless merging into a large facet)
    -      clears facet2->tested and ridge->tested for facet1
    -
    -    facet1 prepended to visible_list for later deletion and partitioning
    -      facet1->f.replace == facet2
    -
    -    adds neighboring facets to facet_mergeset if redundant or degenerate
    -
    -  notes:
    -    mindist/maxdist may be NULL (only if both NULL)
    -    traces merge if fmax_(maxdist,-mindist) > TRACEdist
    -
    -  see:
    -    qh_mergecycle()
    -
    -  design:
    -    trace merge and check for degenerate simplex
    -    make ridges for both facets
    -    update qh.max_outside, qh.max_vertex, qh.min_vertex
    -    update facet2->maxoutside and keepcentrum
    -    update facet2->nummerge
    -    update tested flags for facet2
    -    if facet1 is simplicial
    -      merge facet1 into facet2
    -    else
    -      merge facet1's neighbors into facet2
    -      merge facet1's ridges into facet2
    -      merge facet1's vertices into facet2
    -      merge facet1's vertex neighbors into facet2
    -      add facet2's vertices to qh.new_vertexlist
    -      unless qh_MERGEapex
    -        test facet2 for degenerate or redundant neighbors
    -      move facet1 to qh.visible_list for later deletion
    -      move facet2 to end of qh.newfacet_list
    -*/
    -void qh_mergefacet(facetT *facet1, facetT *facet2, realT *mindist, realT *maxdist, boolT mergeapex) {
    -  boolT traceonce= False;
    -  vertexT *vertex, **vertexp;
    -  int tracerestore=0, nummerge;
    -
    -  if (facet1->tricoplanar || facet2->tricoplanar) {
    -    if (!qh TRInormals) {
    -      qh_fprintf(qh ferr, 6226, "Qhull internal error (qh_mergefacet): does not work for tricoplanar facets.  Use option 'Q11'\n");
    -      qh_errexit2 (qh_ERRqhull, facet1, facet2);
    -    }
    -    if (facet2->tricoplanar) {
    -      facet2->tricoplanar= False;
    -      facet2->keepcentrum= False;
    -    }
    -  }
    -  zzinc_(Ztotmerge);
    -  if (qh REPORTfreq2 && qh POSTmerging) {
    -    if (zzval_(Ztotmerge) > qh mergereport + qh REPORTfreq2)
    -      qh_tracemerging();
    -  }
    -#ifndef qh_NOtrace
    -  if (qh build_cnt >= qh RERUN) {
    -    if (mindist && (-*mindist > qh TRACEdist || *maxdist > qh TRACEdist)) {
    -      tracerestore= 0;
    -      qh IStracing= qh TRACElevel;
    -      traceonce= True;
    -      qh_fprintf(qh ferr, 8075, "qh_mergefacet: ========= trace wide merge #%d(%2.2g) for f%d into f%d, last point was p%d\n", zzval_(Ztotmerge),
    -             fmax_(-*mindist, *maxdist), facet1->id, facet2->id, qh furthest_id);
    -    }else if (facet1 == qh tracefacet || facet2 == qh tracefacet) {
    -      tracerestore= qh IStracing;
    -      qh IStracing= 4;
    -      traceonce= True;
    -      qh_fprintf(qh ferr, 8076, "qh_mergefacet: ========= trace merge #%d involving f%d, furthest is p%d\n",
    -                 zzval_(Ztotmerge), qh tracefacet_id,  qh furthest_id);
    -    }
    -  }
    -  if (qh IStracing >= 2) {
    -    realT mergemin= -2;
    -    realT mergemax= -2;
    -
    -    if (mindist) {
    -      mergemin= *mindist;
    -      mergemax= *maxdist;
    -    }
    -    qh_fprintf(qh ferr, 8077, "qh_mergefacet: #%d merge f%d into f%d, mindist= %2.2g, maxdist= %2.2g\n",
    -    zzval_(Ztotmerge), facet1->id, facet2->id, mergemin, mergemax);
    -  }
    -#endif /* !qh_NOtrace */
    -  if (facet1 == facet2 || facet1->visible || facet2->visible) {
    -    qh_fprintf(qh ferr, 6099, "qhull internal error (qh_mergefacet): either f%d and f%d are the same or one is a visible facet\n",
    -             facet1->id, facet2->id);
    -    qh_errexit2 (qh_ERRqhull, facet1, facet2);
    -  }
    -  if (qh num_facets - qh num_visible <= qh hull_dim + 1) {
    -    qh_fprintf(qh ferr, 6227, "\n\
    -qhull precision error: Only %d facets remain.  Can not merge another\n\
    -pair.  The input is too degenerate or the convexity constraints are\n\
    -too strong.\n", qh hull_dim+1);
    -    if (qh hull_dim >= 5 && !qh MERGEexact)
    -      qh_fprintf(qh ferr, 8079, "Option 'Qx' may avoid this problem.\n");
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (!qh VERTEXneighbors)
    -    qh_vertexneighbors();
    -  qh_makeridges(facet1);
    -  qh_makeridges(facet2);
    -  if (qh IStracing >=4)
    -    qh_errprint("MERGING", facet1, facet2, NULL, NULL);
    -  if (mindist) {
    -    maximize_(qh max_outside, *maxdist);
    -    maximize_(qh max_vertex, *maxdist);
    -#if qh_MAXoutside
    -    maximize_(facet2->maxoutside, *maxdist);
    -#endif
    -    minimize_(qh min_vertex, *mindist);
    -    if (!facet2->keepcentrum
    -    && (*maxdist > qh WIDEfacet || *mindist < -qh WIDEfacet)) {
    -      facet2->keepcentrum= True;
    -      zinc_(Zwidefacet);
    -    }
    -  }
    -  nummerge= facet1->nummerge + facet2->nummerge + 1;
    -  if (nummerge >= qh_MAXnummerge)
    -    facet2->nummerge= qh_MAXnummerge;
    -  else
    -    facet2->nummerge= (short unsigned int)nummerge;
    -  facet2->newmerge= True;
    -  facet2->dupridge= False;
    -  qh_updatetested  (facet1, facet2);
    -  if (qh hull_dim > 2 && qh_setsize(facet1->vertices) == qh hull_dim)
    -    qh_mergesimplex(facet1, facet2, mergeapex);
    -  else {
    -    qh vertex_visit++;
    -    FOREACHvertex_(facet2->vertices)
    -      vertex->visitid= qh vertex_visit;
    -    if (qh hull_dim == 2)
    -      qh_mergefacet2d(facet1, facet2);
    -    else {
    -      qh_mergeneighbors(facet1, facet2);
    -      qh_mergevertices(facet1->vertices, &facet2->vertices);
    -    }
    -    qh_mergeridges(facet1, facet2);
    -    qh_mergevertex_neighbors(facet1, facet2);
    -    if (!facet2->newfacet)
    -      qh_newvertices(facet2->vertices);
    -  }
    -  if (!mergeapex)
    -    qh_degen_redundant_neighbors(facet2, facet1);
    -  if (facet2->coplanar || !facet2->newfacet) {
    -    zinc_(Zmergeintohorizon);
    -  }else if (!facet1->newfacet && facet2->newfacet) {
    -    zinc_(Zmergehorizon);
    -  }else {
    -    zinc_(Zmergenew);
    -  }
    -  qh_willdelete(facet1, facet2);
    -  qh_removefacet(facet2);  /* append as a newfacet to end of qh facet_list */
    -  qh_appendfacet(facet2);
    -  facet2->newfacet= True;
    -  facet2->tested= False;
    -  qh_tracemerge(facet1, facet2);
    -  if (traceonce) {
    -    qh_fprintf(qh ferr, 8080, "qh_mergefacet: end of wide tracing\n");
    -    qh IStracing= tracerestore;
    -  }
    -} /* mergefacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_mergefacet2d( facet1, facet2 )
    -    in 2d, merges neighbors and vertices of facet1 into facet2
    -
    -  returns:
    -    build ridges for neighbors if necessary
    -    facet2 looks like a simplicial facet except for centrum, ridges
    -      neighbors are opposite the corresponding vertex
    -      maintains orientation of facet2
    -
    -  notes:
    -    qh_mergefacet() retains non-simplicial structures
    -      they are not needed in 2d, but later routines may use them
    -    preserves qh.vertex_visit for qh_mergevertex_neighbors()
    -
    -  design:
    -    get vertices and neighbors
    -    determine new vertices and neighbors
    -    set new vertices and neighbors and adjust orientation
    -    make ridges for new neighbor if needed
    -*/
    -void qh_mergefacet2d(facetT *facet1, facetT *facet2) {
    -  vertexT *vertex1A, *vertex1B, *vertex2A, *vertex2B, *vertexA, *vertexB;
    -  facetT *neighbor1A, *neighbor1B, *neighbor2A, *neighbor2B, *neighborA, *neighborB;
    -
    -  vertex1A= SETfirstt_(facet1->vertices, vertexT);
    -  vertex1B= SETsecondt_(facet1->vertices, vertexT);
    -  vertex2A= SETfirstt_(facet2->vertices, vertexT);
    -  vertex2B= SETsecondt_(facet2->vertices, vertexT);
    -  neighbor1A= SETfirstt_(facet1->neighbors, facetT);
    -  neighbor1B= SETsecondt_(facet1->neighbors, facetT);
    -  neighbor2A= SETfirstt_(facet2->neighbors, facetT);
    -  neighbor2B= SETsecondt_(facet2->neighbors, facetT);
    -  if (vertex1A == vertex2A) {
    -    vertexA= vertex1B;
    -    vertexB= vertex2B;
    -    neighborA= neighbor2A;
    -    neighborB= neighbor1A;
    -  }else if (vertex1A == vertex2B) {
    -    vertexA= vertex1B;
    -    vertexB= vertex2A;
    -    neighborA= neighbor2B;
    -    neighborB= neighbor1A;
    -  }else if (vertex1B == vertex2A) {
    -    vertexA= vertex1A;
    -    vertexB= vertex2B;
    -    neighborA= neighbor2A;
    -    neighborB= neighbor1B;
    -  }else { /* 1B == 2B */
    -    vertexA= vertex1A;
    -    vertexB= vertex2A;
    -    neighborA= neighbor2B;
    -    neighborB= neighbor1B;
    -  }
    -  /* vertexB always from facet2, neighborB always from facet1 */
    -  if (vertexA->id > vertexB->id) {
    -    SETfirst_(facet2->vertices)= vertexA;
    -    SETsecond_(facet2->vertices)= vertexB;
    -    if (vertexB == vertex2A)
    -      facet2->toporient= !facet2->toporient;
    -    SETfirst_(facet2->neighbors)= neighborA;
    -    SETsecond_(facet2->neighbors)= neighborB;
    -  }else {
    -    SETfirst_(facet2->vertices)= vertexB;
    -    SETsecond_(facet2->vertices)= vertexA;
    -    if (vertexB == vertex2B)
    -      facet2->toporient= !facet2->toporient;
    -    SETfirst_(facet2->neighbors)= neighborB;
    -    SETsecond_(facet2->neighbors)= neighborA;
    -  }
    -  qh_makeridges(neighborB);
    -  qh_setreplace(neighborB->neighbors, facet1, facet2);
    -  trace4((qh ferr, 4036, "qh_mergefacet2d: merged v%d and neighbor f%d of f%d into f%d\n",
    -       vertexA->id, neighborB->id, facet1->id, facet2->id));
    -} /* mergefacet2d */
    -
    -
    -/*---------------------------------
    -
    -  qh_mergeneighbors( facet1, facet2 )
    -    merges the neighbors of facet1 into facet2
    -
    -  see:
    -    qh_mergecycle_neighbors()
    -
    -  design:
    -    for each neighbor of facet1
    -      if neighbor is also a neighbor of facet2
    -        if neighbor is simpilicial
    -          make ridges for later deletion as a degenerate facet
    -        update its neighbor set
    -      else
    -        move the neighbor relation to facet2
    -    remove the neighbor relation for facet1 and facet2
    -*/
    -void qh_mergeneighbors(facetT *facet1, facetT *facet2) {
    -  facetT *neighbor, **neighborp;
    -
    -  trace4((qh ferr, 4037, "qh_mergeneighbors: merge neighbors of f%d and f%d\n",
    -          facet1->id, facet2->id));
    -  qh visit_id++;
    -  FOREACHneighbor_(facet2) {
    -    neighbor->visitid= qh visit_id;
    -  }
    -  FOREACHneighbor_(facet1) {
    -    if (neighbor->visitid == qh visit_id) {
    -      if (neighbor->simplicial)    /* is degen, needs ridges */
    -        qh_makeridges(neighbor);
    -      if (SETfirstt_(neighbor->neighbors, facetT) != facet1) /*keep newfacet->horizon*/
    -        qh_setdel(neighbor->neighbors, facet1);
    -      else {
    -        qh_setdel(neighbor->neighbors, facet2);
    -        qh_setreplace(neighbor->neighbors, facet1, facet2);
    -      }
    -    }else if (neighbor != facet2) {
    -      qh_setappend(&(facet2->neighbors), neighbor);
    -      qh_setreplace(neighbor->neighbors, facet1, facet2);
    -    }
    -  }
    -  qh_setdel(facet1->neighbors, facet2);  /* here for makeridges */
    -  qh_setdel(facet2->neighbors, facet1);
    -} /* mergeneighbors */
    -
    -
    -/*---------------------------------
    -
    -  qh_mergeridges( facet1, facet2 )
    -    merges the ridge set of facet1 into facet2
    -
    -  returns:
    -    may delete all ridges for a vertex
    -    sets vertex->delridge on deleted ridges
    -
    -  see:
    -    qh_mergecycle_ridges()
    -
    -  design:
    -    delete ridges between facet1 and facet2
    -      mark (delridge) vertices on these ridges for later testing
    -    for each remaining ridge
    -      rename facet1 to facet2
    -*/
    -void qh_mergeridges(facetT *facet1, facetT *facet2) {
    -  ridgeT *ridge, **ridgep;
    -  vertexT *vertex, **vertexp;
    -
    -  trace4((qh ferr, 4038, "qh_mergeridges: merge ridges of f%d and f%d\n",
    -          facet1->id, facet2->id));
    -  FOREACHridge_(facet2->ridges) {
    -    if ((ridge->top == facet1) || (ridge->bottom == facet1)) {
    -      FOREACHvertex_(ridge->vertices)
    -        vertex->delridge= True;
    -      qh_delridge(ridge);  /* expensive in high-d, could rebuild */
    -      ridgep--; /*repeat*/
    -    }
    -  }
    -  FOREACHridge_(facet1->ridges) {
    -    if (ridge->top == facet1)
    -      ridge->top= facet2;
    -    else
    -      ridge->bottom= facet2;
    -    qh_setappend(&(facet2->ridges), ridge);
    -  }
    -} /* mergeridges */
    -
    -
    -/*---------------------------------
    -
    -  qh_mergesimplex( facet1, facet2, mergeapex )
    -    merge simplicial facet1 into facet2
    -    mergeapex==qh_MERGEapex if merging samecycle into horizon facet
    -      vertex id is latest (most recently created)
    -    facet1 may be contained in facet2
    -    ridges exist for both facets
    -
    -  returns:
    -    facet2 with updated vertices, ridges, neighbors
    -    updated neighbors for facet1's vertices
    -    facet1 not deleted
    -    sets vertex->delridge on deleted ridges
    -
    -  notes:
    -    special case code since this is the most common merge
    -    called from qh_mergefacet()
    -
    -  design:
    -    if qh_MERGEapex
    -      add vertices of facet2 to qh.new_vertexlist if necessary
    -      add apex to facet2
    -    else
    -      for each ridge between facet1 and facet2
    -        set vertex->delridge
    -      determine the apex for facet1 (i.e., vertex to be merged)
    -      unless apex already in facet2
    -        insert apex into vertices for facet2
    -      add vertices of facet2 to qh.new_vertexlist if necessary
    -      add apex to qh.new_vertexlist if necessary
    -      for each vertex of facet1
    -        if apex
    -          rename facet1 to facet2 in its vertex neighbors
    -        else
    -          delete facet1 from vertex neighors
    -          if only in facet2
    -            add vertex to qh.del_vertices for later deletion
    -      for each ridge of facet1
    -        delete ridges between facet1 and facet2
    -        append other ridges to facet2 after renaming facet to facet2
    -*/
    -void qh_mergesimplex(facetT *facet1, facetT *facet2, boolT mergeapex) {
    -  vertexT *vertex, **vertexp, *apex;
    -  ridgeT *ridge, **ridgep;
    -  boolT issubset= False;
    -  int vertex_i= -1, vertex_n;
    -  facetT *neighbor, **neighborp, *otherfacet;
    -
    -  if (mergeapex) {
    -    if (!facet2->newfacet)
    -      qh_newvertices(facet2->vertices);  /* apex is new */
    -    apex= SETfirstt_(facet1->vertices, vertexT);
    -    if (SETfirstt_(facet2->vertices, vertexT) != apex)
    -      qh_setaddnth(&facet2->vertices, 0, apex);  /* apex has last id */
    -    else
    -      issubset= True;
    -  }else {
    -    zinc_(Zmergesimplex);
    -    FOREACHvertex_(facet1->vertices)
    -      vertex->seen= False;
    -    FOREACHridge_(facet1->ridges) {
    -      if (otherfacet_(ridge, facet1) == facet2) {
    -        FOREACHvertex_(ridge->vertices) {
    -          vertex->seen= True;
    -          vertex->delridge= True;
    -        }
    -        break;
    -      }
    -    }
    -    FOREACHvertex_(facet1->vertices) {
    -      if (!vertex->seen)
    -        break;  /* must occur */
    -    }
    -    apex= vertex;
    -    trace4((qh ferr, 4039, "qh_mergesimplex: merge apex v%d of f%d into facet f%d\n",
    -          apex->id, facet1->id, facet2->id));
    -    FOREACHvertex_i_(facet2->vertices) {
    -      if (vertex->id < apex->id) {
    -        break;
    -      }else if (vertex->id == apex->id) {
    -        issubset= True;
    -        break;
    -      }
    -    }
    -    if (!issubset)
    -      qh_setaddnth(&facet2->vertices, vertex_i, apex);
    -    if (!facet2->newfacet)
    -      qh_newvertices(facet2->vertices);
    -    else if (!apex->newlist) {
    -      qh_removevertex(apex);
    -      qh_appendvertex(apex);
    -    }
    -  }
    -  trace4((qh ferr, 4040, "qh_mergesimplex: update vertex neighbors of f%d\n",
    -          facet1->id));
    -  FOREACHvertex_(facet1->vertices) {
    -    if (vertex == apex && !issubset)
    -      qh_setreplace(vertex->neighbors, facet1, facet2);
    -    else {
    -      qh_setdel(vertex->neighbors, facet1);
    -      if (!SETsecond_(vertex->neighbors))
    -        qh_mergevertex_del(vertex, facet1, facet2);
    -    }
    -  }
    -  trace4((qh ferr, 4041, "qh_mergesimplex: merge ridges and neighbors of f%d into f%d\n",
    -          facet1->id, facet2->id));
    -  qh visit_id++;
    -  FOREACHneighbor_(facet2)
    -    neighbor->visitid= qh visit_id;
    -  FOREACHridge_(facet1->ridges) {
    -    otherfacet= otherfacet_(ridge, facet1);
    -    if (otherfacet == facet2) {
    -      qh_setdel(facet2->ridges, ridge);
    -      qh_setfree(&(ridge->vertices));
    -      qh_memfree(ridge, (int)sizeof(ridgeT));
    -      qh_setdel(facet2->neighbors, facet1);
    -    }else {
    -      qh_setappend(&facet2->ridges, ridge);
    -      if (otherfacet->visitid != qh visit_id) {
    -        qh_setappend(&facet2->neighbors, otherfacet);
    -        qh_setreplace(otherfacet->neighbors, facet1, facet2);
    -        otherfacet->visitid= qh visit_id;
    -      }else {
    -        if (otherfacet->simplicial)    /* is degen, needs ridges */
    -          qh_makeridges(otherfacet);
    -        if (SETfirstt_(otherfacet->neighbors, facetT) != facet1)
    -          qh_setdel(otherfacet->neighbors, facet1);
    -        else {   /*keep newfacet->neighbors->horizon*/
    -          qh_setdel(otherfacet->neighbors, facet2);
    -          qh_setreplace(otherfacet->neighbors, facet1, facet2);
    -        }
    -      }
    -      if (ridge->top == facet1) /* wait until after qh_makeridges */
    -        ridge->top= facet2;
    -      else
    -        ridge->bottom= facet2;
    -    }
    -  }
    -  SETfirst_(facet1->ridges)= NULL; /* it will be deleted */
    -  trace3((qh ferr, 3006, "qh_mergesimplex: merged simplex f%d apex v%d into facet f%d\n",
    -          facet1->id, getid_(apex), facet2->id));
    -} /* mergesimplex */
    -
    -/*---------------------------------
    -
    -  qh_mergevertex_del( vertex, facet1, facet2 )
    -    delete a vertex because of merging facet1 into facet2
    -
    -  returns:
    -    deletes vertex from facet2
    -    adds vertex to qh.del_vertices for later deletion
    -*/
    -void qh_mergevertex_del(vertexT *vertex, facetT *facet1, facetT *facet2) {
    -
    -  zinc_(Zmergevertex);
    -  trace2((qh ferr, 2035, "qh_mergevertex_del: deleted v%d when merging f%d into f%d\n",
    -          vertex->id, facet1->id, facet2->id));
    -  qh_setdelsorted(facet2->vertices, vertex);
    -  vertex->deleted= True;
    -  qh_setappend(&qh del_vertices, vertex);
    -} /* mergevertex_del */
    -
    -/*---------------------------------
    -
    -  qh_mergevertex_neighbors( facet1, facet2 )
    -    merge the vertex neighbors of facet1 to facet2
    -
    -  returns:
    -    if vertex is current qh.vertex_visit
    -      deletes facet1 from vertex->neighbors
    -    else
    -      renames facet1 to facet2 in vertex->neighbors
    -    deletes vertices if only one neighbor
    -
    -  notes:
    -    assumes vertex neighbor sets are good
    -*/
    -void qh_mergevertex_neighbors(facetT *facet1, facetT *facet2) {
    -  vertexT *vertex, **vertexp;
    -
    -  trace4((qh ferr, 4042, "qh_mergevertex_neighbors: merge vertex neighbors of f%d and f%d\n",
    -          facet1->id, facet2->id));
    -  if (qh tracevertex) {
    -    qh_fprintf(qh ferr, 8081, "qh_mergevertex_neighbors: of f%d and f%d at furthest p%d f0= %p\n",
    -             facet1->id, facet2->id, qh furthest_id, qh tracevertex->neighbors->e[0].p);
    -    qh_errprint("TRACE", NULL, NULL, NULL, qh tracevertex);
    -  }
    -  FOREACHvertex_(facet1->vertices) {
    -    if (vertex->visitid != qh vertex_visit)
    -      qh_setreplace(vertex->neighbors, facet1, facet2);
    -    else {
    -      qh_setdel(vertex->neighbors, facet1);
    -      if (!SETsecond_(vertex->neighbors))
    -        qh_mergevertex_del(vertex, facet1, facet2);
    -    }
    -  }
    -  if (qh tracevertex)
    -    qh_errprint("TRACE", NULL, NULL, NULL, qh tracevertex);
    -} /* mergevertex_neighbors */
    -
    -
    -/*---------------------------------
    -
    -  qh_mergevertices( vertices1, vertices2 )
    -    merges the vertex set of facet1 into facet2
    -
    -  returns:
    -    replaces vertices2 with merged set
    -    preserves vertex_visit for qh_mergevertex_neighbors
    -    updates qh.newvertex_list
    -
    -  design:
    -    create a merged set of both vertices (in inverse id order)
    -*/
    -void qh_mergevertices(setT *vertices1, setT **vertices2) {
    -  int newsize= qh_setsize(vertices1)+qh_setsize(*vertices2) - qh hull_dim + 1;
    -  setT *mergedvertices;
    -  vertexT *vertex, **vertexp, **vertex2= SETaddr_(*vertices2, vertexT);
    -
    -  mergedvertices= qh_settemp(newsize);
    -  FOREACHvertex_(vertices1) {
    -    if (!*vertex2 || vertex->id > (*vertex2)->id)
    -      qh_setappend(&mergedvertices, vertex);
    -    else {
    -      while (*vertex2 && (*vertex2)->id > vertex->id)
    -        qh_setappend(&mergedvertices, *vertex2++);
    -      if (!*vertex2 || (*vertex2)->id < vertex->id)
    -        qh_setappend(&mergedvertices, vertex);
    -      else
    -        qh_setappend(&mergedvertices, *vertex2++);
    -    }
    -  }
    -  while (*vertex2)
    -    qh_setappend(&mergedvertices, *vertex2++);
    -  if (newsize < qh_setsize(mergedvertices)) {
    -    qh_fprintf(qh ferr, 6100, "qhull internal error (qh_mergevertices): facets did not share a ridge\n");
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  qh_setfree(vertices2);
    -  *vertices2= mergedvertices;
    -  qh_settemppop();
    -} /* mergevertices */
    -
    -
    -/*---------------------------------
    -
    -  qh_neighbor_intersections( vertex )
    -    return intersection of all vertices in vertex->neighbors except for vertex
    -
    -  returns:
    -    returns temporary set of vertices
    -    does not include vertex
    -    NULL if a neighbor is simplicial
    -    NULL if empty set
    -
    -  notes:
    -    used for renaming vertices
    -
    -  design:
    -    initialize the intersection set with vertices of the first two neighbors
    -    delete vertex from the intersection
    -    for each remaining neighbor
    -      intersect its vertex set with the intersection set
    -      return NULL if empty
    -    return the intersection set
    -*/
    -setT *qh_neighbor_intersections(vertexT *vertex) {
    -  facetT *neighbor, **neighborp, *neighborA, *neighborB;
    -  setT *intersect;
    -  int neighbor_i, neighbor_n;
    -
    -  FOREACHneighbor_(vertex) {
    -    if (neighbor->simplicial)
    -      return NULL;
    -  }
    -  neighborA= SETfirstt_(vertex->neighbors, facetT);
    -  neighborB= SETsecondt_(vertex->neighbors, facetT);
    -  zinc_(Zintersectnum);
    -  if (!neighborA)
    -    return NULL;
    -  if (!neighborB)
    -    intersect= qh_setcopy(neighborA->vertices, 0);
    -  else
    -    intersect= qh_vertexintersect_new(neighborA->vertices, neighborB->vertices);
    -  qh_settemppush(intersect);
    -  qh_setdelsorted(intersect, vertex);
    -  FOREACHneighbor_i_(vertex) {
    -    if (neighbor_i >= 2) {
    -      zinc_(Zintersectnum);
    -      qh_vertexintersect(&intersect, neighbor->vertices);
    -      if (!SETfirst_(intersect)) {
    -        zinc_(Zintersectfail);
    -        qh_settempfree(&intersect);
    -        return NULL;
    -      }
    -    }
    -  }
    -  trace3((qh ferr, 3007, "qh_neighbor_intersections: %d vertices in neighbor intersection of v%d\n",
    -          qh_setsize(intersect), vertex->id));
    -  return intersect;
    -} /* neighbor_intersections */
    -
    -/*---------------------------------
    -
    -  qh_newvertices( vertices )
    -    add vertices to end of qh.vertex_list (marks as new vertices)
    -
    -  returns:
    -    vertices on qh.newvertex_list
    -    vertex->newlist set
    -*/
    -void qh_newvertices(setT *vertices) {
    -  vertexT *vertex, **vertexp;
    -
    -  FOREACHvertex_(vertices) {
    -    if (!vertex->newlist) {
    -      qh_removevertex(vertex);
    -      qh_appendvertex(vertex);
    -    }
    -  }
    -} /* newvertices */
    -
    -/*---------------------------------
    -
    -  qh_reducevertices()
    -    reduce extra vertices, shared vertices, and redundant vertices
    -    facet->newmerge is set if merged since last call
    -    if !qh.MERGEvertices, only removes extra vertices
    -
    -  returns:
    -    True if also merged degen_redundant facets
    -    vertices are renamed if possible
    -    clears facet->newmerge and vertex->delridge
    -
    -  notes:
    -    ignored if 2-d
    -
    -  design:
    -    merge any degenerate or redundant facets
    -    for each newly merged facet
    -      remove extra vertices
    -    if qh.MERGEvertices
    -      for each newly merged facet
    -        for each vertex
    -          if vertex was on a deleted ridge
    -            rename vertex if it is shared
    -      remove delridge flag from new vertices
    -*/
    -boolT qh_reducevertices(void) {
    -  int numshare=0, numrename= 0;
    -  boolT degenredun= False;
    -  facetT *newfacet;
    -  vertexT *vertex, **vertexp;
    -
    -  if (qh hull_dim == 2)
    -    return False;
    -  if (qh_merge_degenredundant())
    -    degenredun= True;
    - LABELrestart:
    -  FORALLnew_facets {
    -    if (newfacet->newmerge) {
    -      if (!qh MERGEvertices)
    -        newfacet->newmerge= False;
    -      qh_remove_extravertices(newfacet);
    -    }
    -  }
    -  if (!qh MERGEvertices)
    -    return False;
    -  FORALLnew_facets {
    -    if (newfacet->newmerge) {
    -      newfacet->newmerge= False;
    -      FOREACHvertex_(newfacet->vertices) {
    -        if (vertex->delridge) {
    -          if (qh_rename_sharedvertex(vertex, newfacet)) {
    -            numshare++;
    -            vertexp--; /* repeat since deleted vertex */
    -          }
    -        }
    -      }
    -    }
    -  }
    -  FORALLvertex_(qh newvertex_list) {
    -    if (vertex->delridge && !vertex->deleted) {
    -      vertex->delridge= False;
    -      if (qh hull_dim >= 4 && qh_redundant_vertex(vertex)) {
    -        numrename++;
    -        if (qh_merge_degenredundant()) {
    -          degenredun= True;
    -          goto LABELrestart;
    -        }
    -      }
    -    }
    -  }
    -  trace1((qh ferr, 1014, "qh_reducevertices: renamed %d shared vertices and %d redundant vertices. Degen? %d\n",
    -          numshare, numrename, degenredun));
    -  return degenredun;
    -} /* reducevertices */
    -
    -/*---------------------------------
    -
    -  qh_redundant_vertex( vertex )
    -    detect and rename a redundant vertex
    -    vertices have full vertex->neighbors
    -
    -  returns:
    -    returns true if find a redundant vertex
    -      deletes vertex(vertex->deleted)
    -
    -  notes:
    -    only needed if vertex->delridge and hull_dim >= 4
    -    may add degenerate facets to qh.facet_mergeset
    -    doesn't change vertex->neighbors or create redundant facets
    -
    -  design:
    -    intersect vertices of all facet neighbors of vertex
    -    determine ridges for these vertices
    -    if find a new vertex for vertex amoung these ridges and vertices
    -      rename vertex to the new vertex
    -*/
    -vertexT *qh_redundant_vertex(vertexT *vertex) {
    -  vertexT *newvertex= NULL;
    -  setT *vertices, *ridges;
    -
    -  trace3((qh ferr, 3008, "qh_redundant_vertex: check if v%d can be renamed\n", vertex->id));
    -  if ((vertices= qh_neighbor_intersections(vertex))) {
    -    ridges= qh_vertexridges(vertex);
    -    if ((newvertex= qh_find_newvertex(vertex, vertices, ridges)))
    -      qh_renamevertex(vertex, newvertex, ridges, NULL, NULL);
    -    qh_settempfree(&ridges);
    -    qh_settempfree(&vertices);
    -  }
    -  return newvertex;
    -} /* redundant_vertex */
    -
    -/*---------------------------------
    -
    -  qh_remove_extravertices( facet )
    -    remove extra vertices from non-simplicial facets
    -
    -  returns:
    -    returns True if it finds them
    -
    -  design:
    -    for each vertex in facet
    -      if vertex not in a ridge (i.e., no longer used)
    -        delete vertex from facet
    -        delete facet from vertice's neighbors
    -        unless vertex in another facet
    -          add vertex to qh.del_vertices for later deletion
    -*/
    -boolT qh_remove_extravertices(facetT *facet) {
    -  ridgeT *ridge, **ridgep;
    -  vertexT *vertex, **vertexp;
    -  boolT foundrem= False;
    -
    -  trace4((qh ferr, 4043, "qh_remove_extravertices: test f%d for extra vertices\n",
    -          facet->id));
    -  FOREACHvertex_(facet->vertices)
    -    vertex->seen= False;
    -  FOREACHridge_(facet->ridges) {
    -    FOREACHvertex_(ridge->vertices)
    -      vertex->seen= True;
    -  }
    -  FOREACHvertex_(facet->vertices) {
    -    if (!vertex->seen) {
    -      foundrem= True;
    -      zinc_(Zremvertex);
    -      qh_setdelsorted(facet->vertices, vertex);
    -      qh_setdel(vertex->neighbors, facet);
    -      if (!qh_setsize(vertex->neighbors)) {
    -        vertex->deleted= True;
    -        qh_setappend(&qh del_vertices, vertex);
    -        zinc_(Zremvertexdel);
    -        trace2((qh ferr, 2036, "qh_remove_extravertices: v%d deleted because it's lost all ridges\n", vertex->id));
    -      }else
    -        trace3((qh ferr, 3009, "qh_remove_extravertices: v%d removed from f%d because it's lost all ridges\n", vertex->id, facet->id));
    -      vertexp--; /*repeat*/
    -    }
    -  }
    -  return foundrem;
    -} /* remove_extravertices */
    -
    -/*---------------------------------
    -
    -  qh_rename_sharedvertex( vertex, facet )
    -    detect and rename if shared vertex in facet
    -    vertices have full ->neighbors
    -
    -  returns:
    -    newvertex or NULL
    -    the vertex may still exist in other facets (i.e., a neighbor was pinched)
    -    does not change facet->neighbors
    -    updates vertex->neighbors
    -
    -  notes:
    -    a shared vertex for a facet is only in ridges to one neighbor
    -    this may undo a pinched facet
    -
    -    it does not catch pinches involving multiple facets.  These appear
    -      to be difficult to detect, since an exhaustive search is too expensive.
    -
    -  design:
    -    if vertex only has two neighbors
    -      determine the ridges that contain the vertex
    -      determine the vertices shared by both neighbors
    -      if can find a new vertex in this set
    -        rename the vertex to the new vertex
    -*/
    -vertexT *qh_rename_sharedvertex(vertexT *vertex, facetT *facet) {
    -  facetT *neighbor, **neighborp, *neighborA= NULL;
    -  setT *vertices, *ridges;
    -  vertexT *newvertex;
    -
    -  if (qh_setsize(vertex->neighbors) == 2) {
    -    neighborA= SETfirstt_(vertex->neighbors, facetT);
    -    if (neighborA == facet)
    -      neighborA= SETsecondt_(vertex->neighbors, facetT);
    -  }else if (qh hull_dim == 3)
    -    return NULL;
    -  else {
    -    qh visit_id++;
    -    FOREACHneighbor_(facet)
    -      neighbor->visitid= qh visit_id;
    -    FOREACHneighbor_(vertex) {
    -      if (neighbor->visitid == qh visit_id) {
    -        if (neighborA)
    -          return NULL;
    -        neighborA= neighbor;
    -      }
    -    }
    -    if (!neighborA) {
    -      qh_fprintf(qh ferr, 6101, "qhull internal error (qh_rename_sharedvertex): v%d's neighbors not in f%d\n",
    -        vertex->id, facet->id);
    -      qh_errprint("ERRONEOUS", facet, NULL, NULL, vertex);
    -      qh_errexit(qh_ERRqhull, NULL, NULL);
    -    }
    -  }
    -  /* the vertex is shared by facet and neighborA */
    -  ridges= qh_settemp(qh TEMPsize);
    -  neighborA->visitid= ++qh visit_id;
    -  qh_vertexridges_facet(vertex, facet, &ridges);
    -  trace2((qh ferr, 2037, "qh_rename_sharedvertex: p%d(v%d) is shared by f%d(%d ridges) and f%d\n",
    -    qh_pointid(vertex->point), vertex->id, facet->id, qh_setsize(ridges), neighborA->id));
    -  zinc_(Zintersectnum);
    -  vertices= qh_vertexintersect_new(facet->vertices, neighborA->vertices);
    -  qh_setdel(vertices, vertex);
    -  qh_settemppush(vertices);
    -  if ((newvertex= qh_find_newvertex(vertex, vertices, ridges)))
    -    qh_renamevertex(vertex, newvertex, ridges, facet, neighborA);
    -  qh_settempfree(&vertices);
    -  qh_settempfree(&ridges);
    -  return newvertex;
    -} /* rename_sharedvertex */
    -
    -/*---------------------------------
    -
    -  qh_renameridgevertex( ridge, oldvertex, newvertex )
    -    renames oldvertex as newvertex in ridge
    -
    -  returns:
    -
    -  design:
    -    delete oldvertex from ridge
    -    if newvertex already in ridge
    -      copy ridge->noconvex to another ridge if possible
    -      delete the ridge
    -    else
    -      insert newvertex into the ridge
    -      adjust the ridge's orientation
    -*/
    -void qh_renameridgevertex(ridgeT *ridge, vertexT *oldvertex, vertexT *newvertex) {
    -  int nth= 0, oldnth;
    -  facetT *temp;
    -  vertexT *vertex, **vertexp;
    -
    -  oldnth= qh_setindex(ridge->vertices, oldvertex);
    -  qh_setdelnthsorted(ridge->vertices, oldnth);
    -  FOREACHvertex_(ridge->vertices) {
    -    if (vertex == newvertex) {
    -      zinc_(Zdelridge);
    -      if (ridge->nonconvex) /* only one ridge has nonconvex set */
    -        qh_copynonconvex(ridge);
    -      qh_delridge(ridge);
    -      trace2((qh ferr, 2038, "qh_renameridgevertex: ridge r%d deleted.  It contained both v%d and v%d\n",
    -        ridge->id, oldvertex->id, newvertex->id));
    -      return;
    -    }
    -    if (vertex->id < newvertex->id)
    -      break;
    -    nth++;
    -  }
    -  qh_setaddnth(&ridge->vertices, nth, newvertex);
    -  if (abs(oldnth - nth)%2) {
    -    trace3((qh ferr, 3010, "qh_renameridgevertex: swapped the top and bottom of ridge r%d\n",
    -            ridge->id));
    -    temp= ridge->top;
    -    ridge->top= ridge->bottom;
    -    ridge->bottom= temp;
    -  }
    -} /* renameridgevertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_renamevertex( oldvertex, newvertex, ridges, oldfacet, neighborA )
    -    renames oldvertex as newvertex in ridges
    -    gives oldfacet/neighborA if oldvertex is shared between two facets
    -
    -  returns:
    -    oldvertex may still exist afterwards
    -
    -
    -  notes:
    -    can not change neighbors of newvertex (since it's a subset)
    -
    -  design:
    -    for each ridge in ridges
    -      rename oldvertex to newvertex and delete degenerate ridges
    -    if oldfacet not defined
    -      for each neighbor of oldvertex
    -        delete oldvertex from neighbor's vertices
    -        remove extra vertices from neighbor
    -      add oldvertex to qh.del_vertices
    -    else if oldvertex only between oldfacet and neighborA
    -      delete oldvertex from oldfacet and neighborA
    -      add oldvertex to qh.del_vertices
    -    else oldvertex is in oldfacet and neighborA and other facets (i.e., pinched)
    -      delete oldvertex from oldfacet
    -      delete oldfacet from oldvertice's neighbors
    -      remove extra vertices (e.g., oldvertex) from neighborA
    -*/
    -void qh_renamevertex(vertexT *oldvertex, vertexT *newvertex, setT *ridges, facetT *oldfacet, facetT *neighborA) {
    -  facetT *neighbor, **neighborp;
    -  ridgeT *ridge, **ridgep;
    -  boolT istrace= False;
    -
    -  if (qh IStracing >= 2 || oldvertex->id == qh tracevertex_id ||
    -        newvertex->id == qh tracevertex_id)
    -    istrace= True;
    -  FOREACHridge_(ridges)
    -    qh_renameridgevertex(ridge, oldvertex, newvertex);
    -  if (!oldfacet) {
    -    zinc_(Zrenameall);
    -    if (istrace)
    -      qh_fprintf(qh ferr, 8082, "qh_renamevertex: renamed v%d to v%d in several facets\n",
    -               oldvertex->id, newvertex->id);
    -    FOREACHneighbor_(oldvertex) {
    -      qh_maydropneighbor(neighbor);
    -      qh_setdelsorted(neighbor->vertices, oldvertex);
    -      if (qh_remove_extravertices(neighbor))
    -        neighborp--; /* neighbor may be deleted */
    -    }
    -    if (!oldvertex->deleted) {
    -      oldvertex->deleted= True;
    -      qh_setappend(&qh del_vertices, oldvertex);
    -    }
    -  }else if (qh_setsize(oldvertex->neighbors) == 2) {
    -    zinc_(Zrenameshare);
    -    if (istrace)
    -      qh_fprintf(qh ferr, 8083, "qh_renamevertex: renamed v%d to v%d in oldfacet f%d\n",
    -               oldvertex->id, newvertex->id, oldfacet->id);
    -    FOREACHneighbor_(oldvertex)
    -      qh_setdelsorted(neighbor->vertices, oldvertex);
    -    oldvertex->deleted= True;
    -    qh_setappend(&qh del_vertices, oldvertex);
    -  }else {
    -    zinc_(Zrenamepinch);
    -    if (istrace || qh IStracing)
    -      qh_fprintf(qh ferr, 8084, "qh_renamevertex: renamed pinched v%d to v%d between f%d and f%d\n",
    -               oldvertex->id, newvertex->id, oldfacet->id, neighborA->id);
    -    qh_setdelsorted(oldfacet->vertices, oldvertex);
    -    qh_setdel(oldvertex->neighbors, oldfacet);
    -    qh_remove_extravertices(neighborA);
    -  }
    -} /* renamevertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_test_appendmerge( facet, neighbor )
    -    tests facet/neighbor for convexity
    -    appends to mergeset if non-convex
    -    if pre-merging,
    -      nop if qh.SKIPconvex, or qh.MERGEexact and coplanar
    -
    -  returns:
    -    true if appends facet/neighbor to mergeset
    -    sets facet->center as needed
    -    does not change facet->seen
    -
    -  design:
    -    if qh.cos_max is defined
    -      if the angle between facet normals is too shallow
    -        append an angle-coplanar merge to qh.mergeset
    -        return True
    -    make facet's centrum if needed
    -    if facet's centrum is above the neighbor
    -      set isconcave
    -    else
    -      if facet's centrum is not below the neighbor
    -        set iscoplanar
    -      make neighbor's centrum if needed
    -      if neighbor's centrum is above the facet
    -        set isconcave
    -      else if neighbor's centrum is not below the facet
    -        set iscoplanar
    -   if isconcave or iscoplanar
    -     get angle if needed
    -     append concave or coplanar merge to qh.mergeset
    -*/
    -boolT qh_test_appendmerge(facetT *facet, facetT *neighbor) {
    -  realT dist, dist2= -REALmax, angle= -REALmax;
    -  boolT isconcave= False, iscoplanar= False, okangle= False;
    -
    -  if (qh SKIPconvex && !qh POSTmerging)
    -    return False;
    -  if ((!qh MERGEexact || qh POSTmerging) && qh cos_max < REALmax/2) {
    -    angle= qh_getangle(facet->normal, neighbor->normal);
    -    zinc_(Zangletests);
    -    if (angle > qh cos_max) {
    -      zinc_(Zcoplanarangle);
    -      qh_appendmergeset(facet, neighbor, MRGanglecoplanar, &angle);
    -      trace2((qh ferr, 2039, "qh_test_appendmerge: coplanar angle %4.4g between f%d and f%d\n",
    -         angle, facet->id, neighbor->id));
    -      return True;
    -    }else
    -      okangle= True;
    -  }
    -  if (!facet->center)
    -    facet->center= qh_getcentrum(facet);
    -  zzinc_(Zcentrumtests);
    -  qh_distplane(facet->center, neighbor, &dist);
    -  if (dist > qh centrum_radius)
    -    isconcave= True;
    -  else {
    -    if (dist > -qh centrum_radius)
    -      iscoplanar= True;
    -    if (!neighbor->center)
    -      neighbor->center= qh_getcentrum(neighbor);
    -    zzinc_(Zcentrumtests);
    -    qh_distplane(neighbor->center, facet, &dist2);
    -    if (dist2 > qh centrum_radius)
    -      isconcave= True;
    -    else if (!iscoplanar && dist2 > -qh centrum_radius)
    -      iscoplanar= True;
    -  }
    -  if (!isconcave && (!iscoplanar || (qh MERGEexact && !qh POSTmerging)))
    -    return False;
    -  if (!okangle && qh ANGLEmerge) {
    -    angle= qh_getangle(facet->normal, neighbor->normal);
    -    zinc_(Zangletests);
    -  }
    -  if (isconcave) {
    -    zinc_(Zconcaveridge);
    -    if (qh ANGLEmerge)
    -      angle += qh_ANGLEconcave + 0.5;
    -    qh_appendmergeset(facet, neighbor, MRGconcave, &angle);
    -    trace0((qh ferr, 18, "qh_test_appendmerge: concave f%d to f%d dist %4.4g and reverse dist %4.4g angle %4.4g during p%d\n",
    -           facet->id, neighbor->id, dist, dist2, angle, qh furthest_id));
    -  }else /* iscoplanar */ {
    -    zinc_(Zcoplanarcentrum);
    -    qh_appendmergeset(facet, neighbor, MRGcoplanar, &angle);
    -    trace2((qh ferr, 2040, "qh_test_appendmerge: coplanar f%d to f%d dist %4.4g, reverse dist %4.4g angle %4.4g\n",
    -              facet->id, neighbor->id, dist, dist2, angle));
    -  }
    -  return True;
    -} /* test_appendmerge */
    -
    -/*---------------------------------
    -
    -  qh_test_vneighbors()
    -    test vertex neighbors for convexity
    -    tests all facets on qh.newfacet_list
    -
    -  returns:
    -    true if non-convex vneighbors appended to qh.facet_mergeset
    -    initializes vertex neighbors if needed
    -
    -  notes:
    -    assumes all facet neighbors have been tested
    -    this can be expensive
    -    this does not guarantee that a centrum is below all facets
    -      but it is unlikely
    -    uses qh.visit_id
    -
    -  design:
    -    build vertex neighbors if necessary
    -    for all new facets
    -      for all vertices
    -        for each unvisited facet neighbor of the vertex
    -          test new facet and neighbor for convexity
    -*/
    -boolT qh_test_vneighbors(void /* qh newfacet_list */) {
    -  facetT *newfacet, *neighbor, **neighborp;
    -  vertexT *vertex, **vertexp;
    -  int nummerges= 0;
    -
    -  trace1((qh ferr, 1015, "qh_test_vneighbors: testing vertex neighbors for convexity\n"));
    -  if (!qh VERTEXneighbors)
    -    qh_vertexneighbors();
    -  FORALLnew_facets
    -    newfacet->seen= False;
    -  FORALLnew_facets {
    -    newfacet->seen= True;
    -    newfacet->visitid= qh visit_id++;
    -    FOREACHneighbor_(newfacet)
    -      newfacet->visitid= qh visit_id;
    -    FOREACHvertex_(newfacet->vertices) {
    -      FOREACHneighbor_(vertex) {
    -        if (neighbor->seen || neighbor->visitid == qh visit_id)
    -          continue;
    -        if (qh_test_appendmerge(newfacet, neighbor))
    -          nummerges++;
    -      }
    -    }
    -  }
    -  zadd_(Ztestvneighbor, nummerges);
    -  trace1((qh ferr, 1016, "qh_test_vneighbors: found %d non-convex, vertex neighbors\n",
    -           nummerges));
    -  return (nummerges > 0);
    -} /* test_vneighbors */
    -
    -/*---------------------------------
    -
    -  qh_tracemerge( facet1, facet2 )
    -    print trace message after merge
    -*/
    -void qh_tracemerge(facetT *facet1, facetT *facet2) {
    -  boolT waserror= False;
    -
    -#ifndef qh_NOtrace
    -  if (qh IStracing >= 4)
    -    qh_errprint("MERGED", facet2, NULL, NULL, NULL);
    -  if (facet2 == qh tracefacet || (qh tracevertex && qh tracevertex->newlist)) {
    -    qh_fprintf(qh ferr, 8085, "qh_tracemerge: trace facet and vertex after merge of f%d and f%d, furthest p%d\n", facet1->id, facet2->id, qh furthest_id);
    -    if (facet2 != qh tracefacet)
    -      qh_errprint("TRACE", qh tracefacet,
    -        (qh tracevertex && qh tracevertex->neighbors) ?
    -           SETfirstt_(qh tracevertex->neighbors, facetT) : NULL,
    -        NULL, qh tracevertex);
    -  }
    -  if (qh tracevertex) {
    -    if (qh tracevertex->deleted)
    -      qh_fprintf(qh ferr, 8086, "qh_tracemerge: trace vertex deleted at furthest p%d\n",
    -            qh furthest_id);
    -    else
    -      qh_checkvertex(qh tracevertex);
    -  }
    -  if (qh tracefacet) {
    -    qh_checkfacet(qh tracefacet, True, &waserror);
    -    if (waserror)
    -      qh_errexit(qh_ERRqhull, qh tracefacet, NULL);
    -  }
    -#endif /* !qh_NOtrace */
    -  if (qh CHECKfrequently || qh IStracing >= 4) { /* can't check polygon here */
    -    qh_checkfacet(facet2, True, &waserror);
    -    if (waserror)
    -      qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -} /* tracemerge */
    -
    -/*---------------------------------
    -
    -  qh_tracemerging()
    -    print trace message during POSTmerging
    -
    -  returns:
    -    updates qh.mergereport
    -
    -  notes:
    -    called from qh_mergecycle() and qh_mergefacet()
    -
    -  see:
    -    qh_buildtracing()
    -*/
    -void qh_tracemerging(void) {
    -  realT cpu;
    -  int total;
    -  time_t timedata;
    -  struct tm *tp;
    -
    -  qh mergereport= zzval_(Ztotmerge);
    -  time(&timedata);
    -  tp= localtime(&timedata);
    -  cpu= qh_CPUclock;
    -  cpu /= qh_SECticks;
    -  total= zzval_(Ztotmerge) - zzval_(Zcyclehorizon) + zzval_(Zcyclefacettot);
    -  qh_fprintf(qh ferr, 8087, "\n\
    -At %d:%d:%d & %2.5g CPU secs, qhull has merged %d facets.  The hull\n\
    -  contains %d facets and %d vertices.\n",
    -      tp->tm_hour, tp->tm_min, tp->tm_sec, cpu,
    -      total, qh num_facets - qh num_visible,
    -      qh num_vertices-qh_setsize(qh del_vertices));
    -} /* tracemerging */
    -
    -/*---------------------------------
    -
    -  qh_updatetested( facet1, facet2 )
    -    clear facet2->tested and facet1->ridge->tested for merge
    -
    -  returns:
    -    deletes facet2->center unless it's already large
    -      if so, clears facet2->ridge->tested
    -
    -  design:
    -    clear facet2->tested
    -    clear ridge->tested for facet1's ridges
    -    if facet2 has a centrum
    -      if facet2 is large
    -        set facet2->keepcentrum
    -      else if facet2 has 3 vertices due to many merges, or not large and post merging
    -        clear facet2->keepcentrum
    -      unless facet2->keepcentrum
    -        clear facet2->center to recompute centrum later
    -        clear ridge->tested for facet2's ridges
    -*/
    -void qh_updatetested(facetT *facet1, facetT *facet2) {
    -  ridgeT *ridge, **ridgep;
    -  int size;
    -
    -  facet2->tested= False;
    -  FOREACHridge_(facet1->ridges)
    -    ridge->tested= False;
    -  if (!facet2->center)
    -    return;
    -  size= qh_setsize(facet2->vertices);
    -  if (!facet2->keepcentrum) {
    -    if (size > qh hull_dim + qh_MAXnewcentrum) {
    -      facet2->keepcentrum= True;
    -      zinc_(Zwidevertices);
    -    }
    -  }else if (size <= qh hull_dim + qh_MAXnewcentrum) {
    -    /* center and keepcentrum was set */
    -    if (size == qh hull_dim || qh POSTmerging)
    -      facet2->keepcentrum= False; /* if many merges need to recompute centrum */
    -  }
    -  if (!facet2->keepcentrum) {
    -    qh_memfree(facet2->center, qh normal_size);
    -    facet2->center= NULL;
    -    FOREACHridge_(facet2->ridges)
    -      ridge->tested= False;
    -  }
    -} /* updatetested */
    -
    -/*---------------------------------
    -
    -  qh_vertexridges( vertex )
    -    return temporary set of ridges adjacent to a vertex
    -    vertex->neighbors defined
    -
    -  ntoes:
    -    uses qh.visit_id
    -    does not include implicit ridges for simplicial facets
    -
    -  design:
    -    for each neighbor of vertex
    -      add ridges that include the vertex to ridges
    -*/
    -setT *qh_vertexridges(vertexT *vertex) {
    -  facetT *neighbor, **neighborp;
    -  setT *ridges= qh_settemp(qh TEMPsize);
    -  int size;
    -
    -  qh visit_id++;
    -  FOREACHneighbor_(vertex)
    -    neighbor->visitid= qh visit_id;
    -  FOREACHneighbor_(vertex) {
    -    if (*neighborp)   /* no new ridges in last neighbor */
    -      qh_vertexridges_facet(vertex, neighbor, &ridges);
    -  }
    -  if (qh PRINTstatistics || qh IStracing) {
    -    size= qh_setsize(ridges);
    -    zinc_(Zvertexridge);
    -    zadd_(Zvertexridgetot, size);
    -    zmax_(Zvertexridgemax, size);
    -    trace3((qh ferr, 3011, "qh_vertexridges: found %d ridges for v%d\n",
    -             size, vertex->id));
    -  }
    -  return ridges;
    -} /* vertexridges */
    -
    -/*---------------------------------
    -
    -  qh_vertexridges_facet( vertex, facet, ridges )
    -    add adjacent ridges for vertex in facet
    -    neighbor->visitid==qh.visit_id if it hasn't been visited
    -
    -  returns:
    -    ridges updated
    -    sets facet->visitid to qh.visit_id-1
    -
    -  design:
    -    for each ridge of facet
    -      if ridge of visited neighbor (i.e., unprocessed)
    -        if vertex in ridge
    -          append ridge to vertex
    -    mark facet processed
    -*/
    -void qh_vertexridges_facet(vertexT *vertex, facetT *facet, setT **ridges) {
    -  ridgeT *ridge, **ridgep;
    -  facetT *neighbor;
    -
    -  FOREACHridge_(facet->ridges) {
    -    neighbor= otherfacet_(ridge, facet);
    -    if (neighbor->visitid == qh visit_id
    -    && qh_setin(ridge->vertices, vertex))
    -      qh_setappend(ridges, ridge);
    -  }
    -  facet->visitid= qh visit_id-1;
    -} /* vertexridges_facet */
    -
    -/*---------------------------------
    -
    -  qh_willdelete( facet, replace )
    -    moves facet to visible list
    -    sets facet->f.replace to replace (may be NULL)
    -
    -  returns:
    -    bumps qh.num_visible
    -*/
    -void qh_willdelete(facetT *facet, facetT *replace) {
    -
    -  qh_removefacet(facet);
    -  qh_prependfacet(facet, &qh visible_list);
    -  qh num_visible++;
    -  facet->visible= True;
    -  facet->f.replace= replace;
    -} /* willdelete */
    -
    -#else /* qh_NOmerge */
    -void qh_premerge(vertexT *apex, realT maxcentrum, realT maxangle) {
    -}
    -void qh_postmerge(const char *reason, realT maxcentrum, realT maxangle,
    -                      boolT vneighbors) {
    -}
    -boolT qh_checkzero(boolT testall) {
    -   }
    -#endif /* qh_NOmerge */
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/merge.h b/scipy-0.10.1/scipy/spatial/qhull/src/merge.h
    deleted file mode 100644
    index 7ca9a4f5dd..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/merge.h
    +++ /dev/null
    @@ -1,178 +0,0 @@
    -/*
      ---------------------------------
    -
    -   merge.h
    -   header file for merge.c
    -
    -   see qh-merge.htm and merge.c
    -
    -   copyright (c) 1993-2010 C.B. Barber.
    -   $Id: //product/qhull/main/rel/src/merge.h#21 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#ifndef qhDEFmerge
    -#define qhDEFmerge 1
    -
    -#include "libqhull.h"
    -
    -
    -/*============ -constants- ==============*/
    -
    -/*----------------------------------
    -
    -  qh_ANGLEredundant
    -    indicates redundant merge in mergeT->angle
    -*/
    -#define qh_ANGLEredundant 6.0
    -
    -/*----------------------------------
    -
    -  qh_ANGLEdegen
    -    indicates degenerate facet in mergeT->angle
    -*/
    -#define qh_ANGLEdegen     5.0
    -
    -/*----------------------------------
    -
    -  qh_ANGLEconcave
    -    offset to indicate concave facets in mergeT->angle
    -
    -  notes:
    -    concave facets are assigned the range of [2,4] in mergeT->angle
    -    roundoff error may make the angle less than 2
    -*/
    -#define qh_ANGLEconcave  1.5
    -
    -/*----------------------------------
    -
    -  MRG... (mergeType)
    -    indicates the type of a merge (mergeT->type)
    -*/
    -typedef enum {  /* in sort order for facet_mergeset */
    -  MRGnone= 0,
    -  MRGcoplanar,          /* centrum coplanar */
    -  MRGanglecoplanar,     /* angle coplanar */
    -                        /* could detect half concave ridges */
    -  MRGconcave,           /* concave ridge */
    -  MRGflip,              /* flipped facet. facet1 == facet2 */
    -  MRGridge,             /* duplicate ridge (qh_MERGEridge) */
    -                        /* degen and redundant go onto degen_mergeset */
    -  MRGdegen,             /* degenerate facet (!enough neighbors) facet1 == facet2 */
    -  MRGredundant,         /* redundant facet (vertex subset) */
    -                        /* merge_degenredundant assumes degen < redundant */
    -  MRGmirror,            /* mirror facet from qh_triangulate */
    -  ENDmrg
    -} mergeType;
    -
    -/*----------------------------------
    -
    -  qh_MERGEapex
    -    flag for qh_mergefacet() to indicate an apex merge
    -*/
    -#define qh_MERGEapex     True
    -
    -/*============ -structures- ====================*/
    -
    -/*----------------------------------
    -
    -  mergeT
    -    structure used to merge facets
    -*/
    -
    -typedef struct mergeT mergeT;
    -struct mergeT {         /* initialize in qh_appendmergeset */
    -  realT   angle;        /* angle between normals of facet1 and facet2 */
    -  facetT *facet1;       /* will merge facet1 into facet2 */
    -  facetT *facet2;
    -  mergeType type;
    -};
    -
    -
    -/*=========== -macros- =========================*/
    -
    -/*----------------------------------
    -
    -  FOREACHmerge_( merges ) {...}
    -    assign 'merge' to each merge in merges
    -
    -  notes:
    -    uses 'mergeT *merge, **mergep;'
    -    if qh_mergefacet(),
    -      restart since qh.facet_mergeset may change
    -    see FOREACHsetelement_
    -*/
    -#define FOREACHmerge_( merges ) FOREACHsetelement_(mergeT, merges, merge)
    -
    -/*============ prototypes in alphabetical order after pre/postmerge =======*/
    -
    -void    qh_premerge(vertexT *apex, realT maxcentrum, realT maxangle);
    -void    qh_postmerge(const char *reason, realT maxcentrum, realT maxangle,
    -             boolT vneighbors);
    -void    qh_all_merges(boolT othermerge, boolT vneighbors);
    -void    qh_appendmergeset(facetT *facet, facetT *neighbor, mergeType mergetype, realT *angle);
    -setT   *qh_basevertices( facetT *samecycle);
    -void    qh_checkconnect(void /* qh new_facets */);
    -boolT   qh_checkzero(boolT testall);
    -int     qh_compareangle(const void *p1, const void *p2);
    -int     qh_comparemerge(const void *p1, const void *p2);
    -int     qh_comparevisit(const void *p1, const void *p2);
    -void    qh_copynonconvex(ridgeT *atridge);
    -void    qh_degen_redundant_facet(facetT *facet);
    -void    qh_degen_redundant_neighbors(facetT *facet, facetT *delfacet);
    -vertexT *qh_find_newvertex(vertexT *oldvertex, setT *vertices, setT *ridges);
    -void    qh_findbest_test(boolT testcentrum, facetT *facet, facetT *neighbor,
    -           facetT **bestfacet, realT *distp, realT *mindistp, realT *maxdistp);
    -facetT *qh_findbestneighbor(facetT *facet, realT *distp, realT *mindistp, realT *maxdistp);
    -void    qh_flippedmerges(facetT *facetlist, boolT *wasmerge);
    -void    qh_forcedmerges( boolT *wasmerge);
    -void    qh_getmergeset(facetT *facetlist);
    -void    qh_getmergeset_initial(facetT *facetlist);
    -void    qh_hashridge(setT *hashtable, int hashsize, ridgeT *ridge, vertexT *oldvertex);
    -ridgeT *qh_hashridge_find(setT *hashtable, int hashsize, ridgeT *ridge,
    -              vertexT *vertex, vertexT *oldvertex, int *hashslot);
    -void    qh_makeridges(facetT *facet);
    -void    qh_mark_dupridges(facetT *facetlist);
    -void    qh_maydropneighbor(facetT *facet);
    -int     qh_merge_degenredundant(void);
    -void    qh_merge_nonconvex( facetT *facet1, facetT *facet2, mergeType mergetype);
    -void    qh_mergecycle(facetT *samecycle, facetT *newfacet);
    -void    qh_mergecycle_all(facetT *facetlist, boolT *wasmerge);
    -void    qh_mergecycle_facets( facetT *samecycle, facetT *newfacet);
    -void    qh_mergecycle_neighbors(facetT *samecycle, facetT *newfacet);
    -void    qh_mergecycle_ridges(facetT *samecycle, facetT *newfacet);
    -void    qh_mergecycle_vneighbors( facetT *samecycle, facetT *newfacet);
    -void    qh_mergefacet(facetT *facet1, facetT *facet2, realT *mindist, realT *maxdist, boolT mergeapex);
    -void    qh_mergefacet2d(facetT *facet1, facetT *facet2);
    -void    qh_mergeneighbors(facetT *facet1, facetT *facet2);
    -void    qh_mergeridges(facetT *facet1, facetT *facet2);
    -void    qh_mergesimplex(facetT *facet1, facetT *facet2, boolT mergeapex);
    -void    qh_mergevertex_del(vertexT *vertex, facetT *facet1, facetT *facet2);
    -void    qh_mergevertex_neighbors(facetT *facet1, facetT *facet2);
    -void    qh_mergevertices(setT *vertices1, setT **vertices);
    -setT   *qh_neighbor_intersections(vertexT *vertex);
    -void    qh_newvertices(setT *vertices);
    -boolT   qh_reducevertices(void);
    -vertexT *qh_redundant_vertex(vertexT *vertex);
    -boolT   qh_remove_extravertices(facetT *facet);
    -vertexT *qh_rename_sharedvertex(vertexT *vertex, facetT *facet);
    -void    qh_renameridgevertex(ridgeT *ridge, vertexT *oldvertex, vertexT *newvertex);
    -void    qh_renamevertex(vertexT *oldvertex, vertexT *newvertex, setT *ridges,
    -                        facetT *oldfacet, facetT *neighborA);
    -boolT   qh_test_appendmerge(facetT *facet, facetT *neighbor);
    -boolT   qh_test_vneighbors(void /* qh newfacet_list */);
    -void    qh_tracemerge(facetT *facet1, facetT *facet2);
    -void    qh_tracemerging(void);
    -void    qh_updatetested( facetT *facet1, facetT *facet2);
    -setT   *qh_vertexridges(vertexT *vertex);
    -void    qh_vertexridges_facet(vertexT *vertex, facetT *facet, setT **ridges);
    -void    qh_willdelete(facetT *facet, facetT *replace);
    -
    -#endif /* qhDEFmerge */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/poly.c b/scipy-0.10.1/scipy/spatial/qhull/src/poly.c
    deleted file mode 100644
    index bc87bc6458..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/poly.c
    +++ /dev/null
    @@ -1,1198 +0,0 @@
    -/*
      ---------------------------------
    -
    -   poly.c
    -   implements polygons and simplices
    -
    -   see qh-poly.htm, poly.h and libqhull.h
    -
    -   infrequent code is in poly2.c
    -   (all but top 50 and their callers 12/3/95)
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/poly.c#26 $$Change: 1171 $
    -   $DateTime: 2010/01/09 10:11:25 $$Author: bbarber $
    -*/
    -
    -#include "qhull_a.h"
    -
    -/*======== functions in alphabetical order ==========*/
    -
    -/*---------------------------------
    -
    -  qh_appendfacet( facet )
    -    appends facet to end of qh.facet_list,
    -
    -  returns:
    -    updates qh.newfacet_list, facet_next, facet_list
    -    increments qh.numfacets
    -
    -  notes:
    -    assumes qh.facet_list/facet_tail is defined (createsimplex)
    -
    -  see:
    -    qh_removefacet()
    -
    -*/
    -void qh_appendfacet(facetT *facet) {
    -  facetT *tail= qh facet_tail;
    -
    -  if (tail == qh newfacet_list)
    -    qh newfacet_list= facet;
    -  if (tail == qh facet_next)
    -    qh facet_next= facet;
    -  facet->previous= tail->previous;
    -  facet->next= tail;
    -  if (tail->previous)
    -    tail->previous->next= facet;
    -  else
    -    qh facet_list= facet;
    -  tail->previous= facet;
    -  qh num_facets++;
    -  trace4((qh ferr, 4044, "qh_appendfacet: append f%d to facet_list\n", facet->id));
    -} /* appendfacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_appendvertex( vertex )
    -    appends vertex to end of qh.vertex_list,
    -
    -  returns:
    -    sets vertex->newlist
    -    updates qh.vertex_list, newvertex_list
    -    increments qh.num_vertices
    -
    -  notes:
    -    assumes qh.vertex_list/vertex_tail is defined (createsimplex)
    -
    -*/
    -void qh_appendvertex(vertexT *vertex) {
    -  vertexT *tail= qh vertex_tail;
    -
    -  if (tail == qh newvertex_list)
    -    qh newvertex_list= vertex;
    -  vertex->newlist= True;
    -  vertex->previous= tail->previous;
    -  vertex->next= tail;
    -  if (tail->previous)
    -    tail->previous->next= vertex;
    -  else
    -    qh vertex_list= vertex;
    -  tail->previous= vertex;
    -  qh num_vertices++;
    -  trace4((qh ferr, 4045, "qh_appendvertex: append v%d to vertex_list\n", vertex->id));
    -} /* appendvertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_attachnewfacets( )
    -    attach horizon facets to new facets in qh.newfacet_list
    -    newfacets have neighbor and ridge links to horizon but not vice versa
    -    only needed for qh.ONLYgood
    -
    -  returns:
    -    set qh.NEWfacets
    -    horizon facets linked to new facets
    -      ridges changed from visible facets to new facets
    -      simplicial ridges deleted
    -    qh.visible_list, no ridges valid
    -    facet->f.replace is a newfacet (if any)
    -
    -  design:
    -    delete interior ridges and neighbor sets by
    -      for each visible, non-simplicial facet
    -        for each ridge
    -          if last visit or if neighbor is simplicial
    -            if horizon neighbor
    -              delete ridge for horizon's ridge set
    -            delete ridge
    -        erase neighbor set
    -    attach horizon facets and new facets by
    -      for all new facets
    -        if corresponding horizon facet is simplicial
    -          locate corresponding visible facet {may be more than one}
    -          link visible facet to new facet
    -          replace visible facet with new facet in horizon
    -        else it's non-simplicial
    -          for all visible neighbors of the horizon facet
    -            link visible neighbor to new facet
    -            delete visible neighbor from horizon facet
    -          append new facet to horizon's neighbors
    -          the first ridge of the new facet is the horizon ridge
    -          link the new facet into the horizon ridge
    -*/
    -void qh_attachnewfacets(void ) {
    -  facetT *newfacet= NULL, *neighbor, **neighborp, *horizon, *visible;
    -  ridgeT *ridge, **ridgep;
    -
    -  qh NEWfacets= True;
    -  trace3((qh ferr, 3012, "qh_attachnewfacets: delete interior ridges\n"));
    -  qh visit_id++;
    -  FORALLvisible_facets {
    -    visible->visitid= qh visit_id;
    -    if (visible->ridges) {
    -      FOREACHridge_(visible->ridges) {
    -        neighbor= otherfacet_(ridge, visible);
    -        if (neighbor->visitid == qh visit_id
    -            || (!neighbor->visible && neighbor->simplicial)) {
    -          if (!neighbor->visible)  /* delete ridge for simplicial horizon */
    -            qh_setdel(neighbor->ridges, ridge);
    -          qh_setfree(&(ridge->vertices)); /* delete on 2nd visit */
    -          qh_memfree(ridge, (int)sizeof(ridgeT));
    -        }
    -      }
    -      SETfirst_(visible->ridges)= NULL;
    -    }
    -    SETfirst_(visible->neighbors)= NULL;
    -  }
    -  trace1((qh ferr, 1017, "qh_attachnewfacets: attach horizon facets to new facets\n"));
    -  FORALLnew_facets {
    -    horizon= SETfirstt_(newfacet->neighbors, facetT);
    -    if (horizon->simplicial) {
    -      visible= NULL;
    -      FOREACHneighbor_(horizon) {   /* may have more than one horizon ridge */
    -        if (neighbor->visible) {
    -          if (visible) {
    -            if (qh_setequal_skip(newfacet->vertices, 0, horizon->vertices,
    -                                  SETindex_(horizon->neighbors, neighbor))) {
    -              visible= neighbor;
    -              break;
    -            }
    -          }else
    -            visible= neighbor;
    -        }
    -      }
    -      if (visible) {
    -        visible->f.replace= newfacet;
    -        qh_setreplace(horizon->neighbors, visible, newfacet);
    -      }else {
    -        qh_fprintf(qh ferr, 6102, "qhull internal error (qh_attachnewfacets): couldn't find visible facet for horizon f%d of newfacet f%d\n",
    -                 horizon->id, newfacet->id);
    -        qh_errexit2 (qh_ERRqhull, horizon, newfacet);
    -      }
    -    }else { /* non-simplicial, with a ridge for newfacet */
    -      FOREACHneighbor_(horizon) {    /* may hold for many new facets */
    -        if (neighbor->visible) {
    -          neighbor->f.replace= newfacet;
    -          qh_setdelnth(horizon->neighbors,
    -                        SETindex_(horizon->neighbors, neighbor));
    -          neighborp--; /* repeat */
    -        }
    -      }
    -      qh_setappend(&horizon->neighbors, newfacet);
    -      ridge= SETfirstt_(newfacet->ridges, ridgeT);
    -      if (ridge->top == horizon)
    -        ridge->bottom= newfacet;
    -      else
    -        ridge->top= newfacet;
    -      }
    -  } /* newfacets */
    -  if (qh PRINTstatistics) {
    -    FORALLvisible_facets {
    -      if (!visible->f.replace)
    -        zinc_(Zinsidevisible);
    -    }
    -  }
    -} /* attachnewfacets */
    -
    -/*---------------------------------
    -
    -  qh_checkflipped( facet, dist, allerror )
    -    checks facet orientation to interior point
    -
    -    if allerror set,
    -      tests against qh.DISTround
    -    else
    -      tests against 0 since tested against DISTround before
    -
    -  returns:
    -    False if it flipped orientation (sets facet->flipped)
    -    distance if non-NULL
    -*/
    -boolT qh_checkflipped(facetT *facet, realT *distp, boolT allerror) {
    -  realT dist;
    -
    -  if (facet->flipped && !distp)
    -    return False;
    -  zzinc_(Zdistcheck);
    -  qh_distplane(qh interior_point, facet, &dist);
    -  if (distp)
    -    *distp= dist;
    -  if ((allerror && dist > -qh DISTround)|| (!allerror && dist >= 0.0)) {
    -    facet->flipped= True;
    -    zzinc_(Zflippedfacets);
    -    trace0((qh ferr, 19, "qh_checkflipped: facet f%d is flipped, distance= %6.12g during p%d\n",
    -              facet->id, dist, qh furthest_id));
    -    qh_precision("flipped facet");
    -    return False;
    -  }
    -  return True;
    -} /* checkflipped */
    -
    -/*---------------------------------
    -
    -  qh_delfacet( facet )
    -    removes facet from facet_list and frees up its memory
    -
    -  notes:
    -    assumes vertices and ridges already freed
    -*/
    -void qh_delfacet(facetT *facet) {
    -  void **freelistp; /* used !qh_NOmem */
    -
    -  trace4((qh ferr, 4046, "qh_delfacet: delete f%d\n", facet->id));
    -  if (facet == qh tracefacet)
    -    qh tracefacet= NULL;
    -  if (facet == qh GOODclosest)
    -    qh GOODclosest= NULL;
    -  qh_removefacet(facet);
    -  if (!facet->tricoplanar || facet->keepcentrum) {
    -    qh_memfree_(facet->normal, qh normal_size, freelistp);
    -    if (qh CENTERtype == qh_ASvoronoi) {   /* uses macro calls */
    -      qh_memfree_(facet->center, qh center_size, freelistp);
    -    }else /* AScentrum */ {
    -      qh_memfree_(facet->center, qh normal_size, freelistp);
    -    }
    -  }
    -  qh_setfree(&(facet->neighbors));
    -  if (facet->ridges)
    -    qh_setfree(&(facet->ridges));
    -  qh_setfree(&(facet->vertices));
    -  if (facet->outsideset)
    -    qh_setfree(&(facet->outsideset));
    -  if (facet->coplanarset)
    -    qh_setfree(&(facet->coplanarset));
    -  qh_memfree_(facet, (int)sizeof(facetT), freelistp);
    -} /* delfacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_deletevisible()
    -    delete visible facets and vertices
    -
    -  returns:
    -    deletes each facet and removes from facetlist
    -    at exit, qh.visible_list empty (== qh.newfacet_list)
    -
    -  notes:
    -    ridges already deleted
    -    horizon facets do not reference facets on qh.visible_list
    -    new facets in qh.newfacet_list
    -    uses   qh.visit_id;
    -*/
    -void qh_deletevisible(void /*qh visible_list*/) {
    -  facetT *visible, *nextfacet;
    -  vertexT *vertex, **vertexp;
    -  int numvisible= 0, numdel= qh_setsize(qh del_vertices);
    -
    -  trace1((qh ferr, 1018, "qh_deletevisible: delete %d visible facets and %d vertices\n",
    -         qh num_visible, numdel));
    -  for (visible= qh visible_list; visible && visible->visible;
    -                visible= nextfacet) { /* deleting current */
    -    nextfacet= visible->next;
    -    numvisible++;
    -    qh_delfacet(visible);
    -  }
    -  if (numvisible != qh num_visible) {
    -    qh_fprintf(qh ferr, 6103, "qhull internal error (qh_deletevisible): qh num_visible %d is not number of visible facets %d\n",
    -             qh num_visible, numvisible);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -  qh num_visible= 0;
    -  zadd_(Zvisfacettot, numvisible);
    -  zmax_(Zvisfacetmax, numvisible);
    -  zzadd_(Zdelvertextot, numdel);
    -  zmax_(Zdelvertexmax, numdel);
    -  FOREACHvertex_(qh del_vertices)
    -    qh_delvertex(vertex);
    -  qh_settruncate(qh del_vertices, 0);
    -} /* deletevisible */
    -
    -/*---------------------------------
    -
    -  qh_facetintersect( facetA, facetB, skipa, skipB, prepend )
    -    return vertices for intersection of two simplicial facets
    -    may include 1 prepended entry (if more, need to settemppush)
    -
    -  returns:
    -    returns set of qh.hull_dim-1 + prepend vertices
    -    returns skipped index for each test and checks for exactly one
    -
    -  notes:
    -    does not need settemp since set in quick memory
    -
    -  see also:
    -    qh_vertexintersect and qh_vertexintersect_new
    -    use qh_setnew_delnthsorted to get nth ridge (no skip information)
    -
    -  design:
    -    locate skipped vertex by scanning facet A's neighbors
    -    locate skipped vertex by scanning facet B's neighbors
    -    intersect the vertex sets
    -*/
    -setT *qh_facetintersect(facetT *facetA, facetT *facetB,
    -                         int *skipA,int *skipB, int prepend) {
    -  setT *intersect;
    -  int dim= qh hull_dim, i, j;
    -  facetT **neighborsA, **neighborsB;
    -
    -  neighborsA= SETaddr_(facetA->neighbors, facetT);
    -  neighborsB= SETaddr_(facetB->neighbors, facetT);
    -  i= j= 0;
    -  if (facetB == *neighborsA++)
    -    *skipA= 0;
    -  else if (facetB == *neighborsA++)
    -    *skipA= 1;
    -  else if (facetB == *neighborsA++)
    -    *skipA= 2;
    -  else {
    -    for (i=3; i < dim; i++) {
    -      if (facetB == *neighborsA++) {
    -        *skipA= i;
    -        break;
    -      }
    -    }
    -  }
    -  if (facetA == *neighborsB++)
    -    *skipB= 0;
    -  else if (facetA == *neighborsB++)
    -    *skipB= 1;
    -  else if (facetA == *neighborsB++)
    -    *skipB= 2;
    -  else {
    -    for (j=3; j < dim; j++) {
    -      if (facetA == *neighborsB++) {
    -        *skipB= j;
    -        break;
    -      }
    -    }
    -  }
    -  if (i >= dim || j >= dim) {
    -    qh_fprintf(qh ferr, 6104, "qhull internal error (qh_facetintersect): f%d or f%d not in others neighbors\n",
    -            facetA->id, facetB->id);
    -    qh_errexit2 (qh_ERRqhull, facetA, facetB);
    -  }
    -  intersect= qh_setnew_delnthsorted(facetA->vertices, qh hull_dim, *skipA, prepend);
    -  trace4((qh ferr, 4047, "qh_facetintersect: f%d skip %d matches f%d skip %d\n",
    -          facetA->id, *skipA, facetB->id, *skipB));
    -  return(intersect);
    -} /* facetintersect */
    -
    -/*---------------------------------
    -
    -  qh_gethash( hashsize, set, size, firstindex, skipelem )
    -    return hashvalue for a set with firstindex and skipelem
    -
    -  notes:
    -    returned hash is in [0,hashsize)
    -    assumes at least firstindex+1 elements
    -    assumes skipelem is NULL, in set, or part of hash
    -
    -    hashes memory addresses which may change over different runs of the same data
    -    using sum for hash does badly in high d
    -*/
    -int qh_gethash(int hashsize, setT *set, int size, int firstindex, void *skipelem) {
    -  void **elemp= SETelemaddr_(set, firstindex, void);
    -  ptr_intT hash = 0, elem;
    -  unsigned result;
    -  int i;
    -#ifdef _MSC_VER                   /* Microsoft Visual C++ -- warn about 64-bit issues */
    -#pragma warning( push)            /* WARN64 -- ptr_intT holds a 64-bit pointer */
    -#pragma warning( disable : 4311)  /* 'type cast': pointer truncation from 'void*' to 'ptr_intT' */
    -#endif
    -
    -  switch (size-firstindex) {
    -  case 1:
    -    hash= (ptr_intT)(*elemp) - (ptr_intT) skipelem;
    -    break;
    -  case 2:
    -    hash= (ptr_intT)(*elemp) + (ptr_intT)elemp[1] - (ptr_intT) skipelem;
    -    break;
    -  case 3:
    -    hash= (ptr_intT)(*elemp) + (ptr_intT)elemp[1] + (ptr_intT)elemp[2]
    -      - (ptr_intT) skipelem;
    -    break;
    -  case 4:
    -    hash= (ptr_intT)(*elemp) + (ptr_intT)elemp[1] + (ptr_intT)elemp[2]
    -      + (ptr_intT)elemp[3] - (ptr_intT) skipelem;
    -    break;
    -  case 5:
    -    hash= (ptr_intT)(*elemp) + (ptr_intT)elemp[1] + (ptr_intT)elemp[2]
    -      + (ptr_intT)elemp[3] + (ptr_intT)elemp[4] - (ptr_intT) skipelem;
    -    break;
    -  case 6:
    -    hash= (ptr_intT)(*elemp) + (ptr_intT)elemp[1] + (ptr_intT)elemp[2]
    -      + (ptr_intT)elemp[3] + (ptr_intT)elemp[4]+ (ptr_intT)elemp[5]
    -      - (ptr_intT) skipelem;
    -    break;
    -  default:
    -    hash= 0;
    -    i= 3;
    -    do {     /* this is about 10% in 10-d */
    -      if ((elem= (ptr_intT)*elemp++) != (ptr_intT)skipelem) {
    -        hash ^= (elem << i) + (elem >> (32-i));
    -        i += 3;
    -        if (i >= 32)
    -          i -= 32;
    -      }
    -    }while (*elemp);
    -    break;
    -  }
    -  if (hashsize<0) {
    -    qh_fprintf(qh ferr, 6232, "qhull internal error: negative hashsize %d passed to qh_gethash [poly.c]\n", hashsize);
    -    qh_errexit2 (qh_ERRqhull, NULL, NULL);
    -  }
    -  result= (unsigned)hash;
    -  result %= (unsigned)hashsize;
    -  /* result= 0; for debugging */
    -  return result;
    -#ifdef _MSC_VER
    -#pragma warning( pop)
    -#endif
    -} /* gethash */
    -
    -/*---------------------------------
    -
    -  qh_makenewfacet( vertices, toporient, horizon )
    -    creates a toporient? facet from vertices
    -
    -  returns:
    -    returns newfacet
    -      adds newfacet to qh.facet_list
    -      newfacet->vertices= vertices
    -      if horizon
    -        newfacet->neighbor= horizon, but not vice versa
    -    newvertex_list updated with vertices
    -*/
    -facetT *qh_makenewfacet(setT *vertices, boolT toporient,facetT *horizon) {
    -  facetT *newfacet;
    -  vertexT *vertex, **vertexp;
    -
    -  FOREACHvertex_(vertices) {
    -    if (!vertex->newlist) {
    -      qh_removevertex(vertex);
    -      qh_appendvertex(vertex);
    -    }
    -  }
    -  newfacet= qh_newfacet();
    -  newfacet->vertices= vertices;
    -  newfacet->toporient= (unsigned char)toporient;
    -  if (horizon)
    -    qh_setappend(&(newfacet->neighbors), horizon);
    -  qh_appendfacet(newfacet);
    -  return(newfacet);
    -} /* makenewfacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_makenewplanes()
    -    make new hyperplanes for facets on qh.newfacet_list
    -
    -  returns:
    -    all facets have hyperplanes or are marked for   merging
    -    doesn't create hyperplane if horizon is coplanar (will merge)
    -    updates qh.min_vertex if qh.JOGGLEmax
    -
    -  notes:
    -    facet->f.samecycle is defined for facet->mergehorizon facets
    -*/
    -void qh_makenewplanes(void /* newfacet_list */) {
    -  facetT *newfacet;
    -
    -  FORALLnew_facets {
    -    if (!newfacet->mergehorizon)
    -      qh_setfacetplane(newfacet);
    -  }
    -  if (qh JOGGLEmax < REALmax/2)
    -    minimize_(qh min_vertex, -wwval_(Wnewvertexmax));
    -} /* makenewplanes */
    -
    -/*---------------------------------
    -
    -  qh_makenew_nonsimplicial( visible, apex, numnew )
    -    make new facets for ridges of a visible facet
    -
    -  returns:
    -    first newfacet, bumps numnew as needed
    -    attaches new facets if !qh.ONLYgood
    -    marks ridge neighbors for simplicial visible
    -    if (qh.ONLYgood)
    -      ridges on newfacet, horizon, and visible
    -    else
    -      ridge and neighbors between newfacet and   horizon
    -      visible facet's ridges are deleted
    -
    -  notes:
    -    qh.visit_id if visible has already been processed
    -    sets neighbor->seen for building f.samecycle
    -      assumes all 'seen' flags initially false
    -
    -  design:
    -    for each ridge of visible facet
    -      get neighbor of visible facet
    -      if neighbor was already processed
    -        delete the ridge (will delete all visible facets later)
    -      if neighbor is a horizon facet
    -        create a new facet
    -        if neighbor coplanar
    -          adds newfacet to f.samecycle for later merging
    -        else
    -          updates neighbor's neighbor set
    -          (checks for non-simplicial facet with multiple ridges to visible facet)
    -        updates neighbor's ridge set
    -        (checks for simplicial neighbor to non-simplicial visible facet)
    -        (deletes ridge if neighbor is simplicial)
    -
    -*/
    -#ifndef qh_NOmerge
    -facetT *qh_makenew_nonsimplicial(facetT *visible, vertexT *apex, int *numnew) {
    -  void **freelistp; /* used !qh_NOmem */
    -  ridgeT *ridge, **ridgep;
    -  facetT *neighbor, *newfacet= NULL, *samecycle;
    -  setT *vertices;
    -  boolT toporient;
    -  int ridgeid;
    -
    -  FOREACHridge_(visible->ridges) {
    -    ridgeid= ridge->id;
    -    neighbor= otherfacet_(ridge, visible);
    -    if (neighbor->visible) {
    -      if (!qh ONLYgood) {
    -        if (neighbor->visitid == qh visit_id) {
    -          qh_setfree(&(ridge->vertices));  /* delete on 2nd visit */
    -          qh_memfree_(ridge, (int)sizeof(ridgeT), freelistp);
    -        }
    -      }
    -    }else {  /* neighbor is an horizon facet */
    -      toporient= (ridge->top == visible);
    -      vertices= qh_setnew(qh hull_dim); /* makes sure this is quick */
    -      qh_setappend(&vertices, apex);
    -      qh_setappend_set(&vertices, ridge->vertices);
    -      newfacet= qh_makenewfacet(vertices, toporient, neighbor);
    -      (*numnew)++;
    -      if (neighbor->coplanar) {
    -        newfacet->mergehorizon= True;
    -        if (!neighbor->seen) {
    -          newfacet->f.samecycle= newfacet;
    -          neighbor->f.newcycle= newfacet;
    -        }else {
    -          samecycle= neighbor->f.newcycle;
    -          newfacet->f.samecycle= samecycle->f.samecycle;
    -          samecycle->f.samecycle= newfacet;
    -        }
    -      }
    -      if (qh ONLYgood) {
    -        if (!neighbor->simplicial)
    -          qh_setappend(&(newfacet->ridges), ridge);
    -      }else {  /* qh_attachnewfacets */
    -        if (neighbor->seen) {
    -          if (neighbor->simplicial) {
    -            qh_fprintf(qh ferr, 6105, "qhull internal error (qh_makenew_nonsimplicial): simplicial f%d sharing two ridges with f%d\n",
    -                   neighbor->id, visible->id);
    -            qh_errexit2 (qh_ERRqhull, neighbor, visible);
    -          }
    -          qh_setappend(&(neighbor->neighbors), newfacet);
    -        }else
    -          qh_setreplace(neighbor->neighbors, visible, newfacet);
    -        if (neighbor->simplicial) {
    -          qh_setdel(neighbor->ridges, ridge);
    -          qh_setfree(&(ridge->vertices));
    -          qh_memfree(ridge, (int)sizeof(ridgeT));
    -        }else {
    -          qh_setappend(&(newfacet->ridges), ridge);
    -          if (toporient)
    -            ridge->top= newfacet;
    -          else
    -            ridge->bottom= newfacet;
    -        }
    -      trace4((qh ferr, 4048, "qh_makenew_nonsimplicial: created facet f%d from v%d and r%d of horizon f%d\n",
    -            newfacet->id, apex->id, ridgeid, neighbor->id));
    -      }
    -    }
    -    neighbor->seen= True;
    -  } /* for each ridge */
    -  if (!qh ONLYgood)
    -    SETfirst_(visible->ridges)= NULL;
    -  return newfacet;
    -} /* makenew_nonsimplicial */
    -#else /* qh_NOmerge */
    -facetT *qh_makenew_nonsimplicial(facetT *visible, vertexT *apex, int *numnew) {
    -  return NULL;
    -}
    -#endif /* qh_NOmerge */
    -
    -/*---------------------------------
    -
    -  qh_makenew_simplicial( visible, apex, numnew )
    -    make new facets for simplicial visible facet and apex
    -
    -  returns:
    -    attaches new facets if (!qh.ONLYgood)
    -      neighbors between newfacet and horizon
    -
    -  notes:
    -    nop if neighbor->seen or neighbor->visible(see qh_makenew_nonsimplicial)
    -
    -  design:
    -    locate neighboring horizon facet for visible facet
    -    determine vertices and orientation
    -    create new facet
    -    if coplanar,
    -      add new facet to f.samecycle
    -    update horizon facet's neighbor list
    -*/
    -facetT *qh_makenew_simplicial(facetT *visible, vertexT *apex, int *numnew) {
    -  facetT *neighbor, **neighborp, *newfacet= NULL;
    -  setT *vertices;
    -  boolT flip, toporient;
    -  int horizonskip, visibleskip;
    -
    -  FOREACHneighbor_(visible) {
    -    if (!neighbor->seen && !neighbor->visible) {
    -      vertices= qh_facetintersect(neighbor,visible, &horizonskip, &visibleskip, 1);
    -      SETfirst_(vertices)= apex;
    -      flip= ((horizonskip & 0x1) ^ (visibleskip & 0x1));
    -      if (neighbor->toporient)
    -        toporient= horizonskip & 0x1;
    -      else
    -        toporient= (horizonskip & 0x1) ^ 0x1;
    -      newfacet= qh_makenewfacet(vertices, toporient, neighbor);
    -      (*numnew)++;
    -      if (neighbor->coplanar && (qh PREmerge || qh MERGEexact)) {
    -#ifndef qh_NOmerge
    -        newfacet->f.samecycle= newfacet;
    -        newfacet->mergehorizon= True;
    -#endif
    -      }
    -      if (!qh ONLYgood)
    -        SETelem_(neighbor->neighbors, horizonskip)= newfacet;
    -      trace4((qh ferr, 4049, "qh_makenew_simplicial: create facet f%d top %d from v%d and horizon f%d skip %d top %d and visible f%d skip %d, flip? %d\n",
    -            newfacet->id, toporient, apex->id, neighbor->id, horizonskip,
    -              neighbor->toporient, visible->id, visibleskip, flip));
    -    }
    -  }
    -  return newfacet;
    -} /* makenew_simplicial */
    -
    -/*---------------------------------
    -
    -  qh_matchneighbor( newfacet, newskip, hashsize, hashcount )
    -    either match subridge of newfacet with neighbor or add to hash_table
    -
    -  returns:
    -    duplicate ridges are unmatched and marked by qh_DUPLICATEridge
    -
    -  notes:
    -    ridge is newfacet->vertices w/o newskip vertex
    -    do not allocate memory (need to free hash_table cleanly)
    -    uses linear hash chains
    -
    -  see also:
    -    qh_matchduplicates
    -
    -  design:
    -    for each possible matching facet in qh.hash_table
    -      if vertices match
    -        set ismatch, if facets have opposite orientation
    -        if ismatch and matching facet doesn't have a match
    -          match the facets by updating their neighbor sets
    -        else
    -          indicate a duplicate ridge
    -          set facet hyperplane for later testing
    -          add facet to hashtable
    -          unless the other facet was already a duplicate ridge
    -            mark both facets with a duplicate ridge
    -            add other facet (if defined) to hash table
    -*/
    -void qh_matchneighbor(facetT *newfacet, int newskip, int hashsize, int *hashcount) {
    -  boolT newfound= False;   /* True, if new facet is already in hash chain */
    -  boolT same, ismatch;
    -  int hash, scan;
    -  facetT *facet, *matchfacet;
    -  int skip, matchskip;
    -
    -  hash= qh_gethash(hashsize, newfacet->vertices, qh hull_dim, 1,
    -                     SETelem_(newfacet->vertices, newskip));
    -  trace4((qh ferr, 4050, "qh_matchneighbor: newfacet f%d skip %d hash %d hashcount %d\n",
    -          newfacet->id, newskip, hash, *hashcount));
    -  zinc_(Zhashlookup);
    -  for (scan= hash; (facet= SETelemt_(qh hash_table, scan, facetT));
    -       scan= (++scan >= hashsize ? 0 : scan)) {
    -    if (facet == newfacet) {
    -      newfound= True;
    -      continue;
    -    }
    -    zinc_(Zhashtests);
    -    if (qh_matchvertices(1, newfacet->vertices, newskip, facet->vertices, &skip, &same)) {
    -      if (SETelem_(newfacet->vertices, newskip) ==
    -          SETelem_(facet->vertices, skip)) {
    -        qh_precision("two facets with the same vertices");
    -        qh_fprintf(qh ferr, 6106, "qhull precision error: Vertex sets are the same for f%d and f%d.  Can not force output.\n",
    -          facet->id, newfacet->id);
    -        qh_errexit2 (qh_ERRprec, facet, newfacet);
    -      }
    -      ismatch= (same == (boolT)((newfacet->toporient ^ facet->toporient)));
    -      matchfacet= SETelemt_(facet->neighbors, skip, facetT);
    -      if (ismatch && !matchfacet) {
    -        SETelem_(facet->neighbors, skip)= newfacet;
    -        SETelem_(newfacet->neighbors, newskip)= facet;
    -        (*hashcount)--;
    -        trace4((qh ferr, 4051, "qh_matchneighbor: f%d skip %d matched with new f%d skip %d\n",
    -           facet->id, skip, newfacet->id, newskip));
    -        return;
    -      }
    -      if (!qh PREmerge && !qh MERGEexact) {
    -        qh_precision("a ridge with more than two neighbors");
    -        qh_fprintf(qh ferr, 6107, "qhull precision error: facets f%d, f%d and f%d meet at a ridge with more than 2 neighbors.  Can not continue.\n",
    -                 facet->id, newfacet->id, getid_(matchfacet));
    -        qh_errexit2 (qh_ERRprec, facet, newfacet);
    -      }
    -      SETelem_(newfacet->neighbors, newskip)= qh_DUPLICATEridge;
    -      newfacet->dupridge= True;
    -      if (!newfacet->normal)
    -        qh_setfacetplane(newfacet);
    -      qh_addhash(newfacet, qh hash_table, hashsize, hash);
    -      (*hashcount)++;
    -      if (!facet->normal)
    -        qh_setfacetplane(facet);
    -      if (matchfacet != qh_DUPLICATEridge) {
    -        SETelem_(facet->neighbors, skip)= qh_DUPLICATEridge;
    -        facet->dupridge= True;
    -        if (!facet->normal)
    -          qh_setfacetplane(facet);
    -        if (matchfacet) {
    -          matchskip= qh_setindex(matchfacet->neighbors, facet);
    -          SETelem_(matchfacet->neighbors, matchskip)= qh_DUPLICATEridge;
    -          matchfacet->dupridge= True;
    -          if (!matchfacet->normal)
    -            qh_setfacetplane(matchfacet);
    -          qh_addhash(matchfacet, qh hash_table, hashsize, hash);
    -          *hashcount += 2;
    -        }
    -      }
    -      trace4((qh ferr, 4052, "qh_matchneighbor: new f%d skip %d duplicates ridge for f%d skip %d matching f%d ismatch %d at hash %d\n",
    -           newfacet->id, newskip, facet->id, skip,
    -           (matchfacet == qh_DUPLICATEridge ? -2 : getid_(matchfacet)),
    -           ismatch, hash));
    -      return; /* end of duplicate ridge */
    -    }
    -  }
    -  if (!newfound)
    -    SETelem_(qh hash_table, scan)= newfacet;  /* same as qh_addhash */
    -  (*hashcount)++;
    -  trace4((qh ferr, 4053, "qh_matchneighbor: no match for f%d skip %d at hash %d\n",
    -           newfacet->id, newskip, hash));
    -} /* matchneighbor */
    -
    -
    -/*---------------------------------
    -
    -  qh_matchnewfacets()
    -    match newfacets in qh.newfacet_list to their newfacet neighbors
    -
    -  returns:
    -    qh.newfacet_list with full neighbor sets
    -      get vertices with nth neighbor by deleting nth vertex
    -    if qh.PREmerge/MERGEexact or qh.FORCEoutput
    -      sets facet->flippped if flipped normal (also prevents point partitioning)
    -    if duplicate ridges and qh.PREmerge/MERGEexact
    -      sets facet->dupridge
    -      missing neighbor links identifies extra ridges to be merging (qh_MERGEridge)
    -
    -  notes:
    -    newfacets already have neighbor[0] (horizon facet)
    -    assumes qh.hash_table is NULL
    -    vertex->neighbors has not been updated yet
    -    do not allocate memory after qh.hash_table (need to free it cleanly)
    -
    -  design:
    -    delete neighbor sets for all new facets
    -    initialize a hash table
    -    for all new facets
    -      match facet with neighbors
    -    if unmatched facets (due to duplicate ridges)
    -      for each new facet with a duplicate ridge
    -        match it with a facet
    -    check for flipped facets
    -*/
    -void qh_matchnewfacets(void /* qh newfacet_list */) {
    -  int numnew=0, hashcount=0, newskip;
    -  facetT *newfacet, *neighbor;
    -  int dim= qh hull_dim, hashsize, neighbor_i, neighbor_n;
    -  setT *neighbors;
    -#ifndef qh_NOtrace
    -  int facet_i, facet_n, numfree= 0;
    -  facetT *facet;
    -#endif
    -
    -  trace1((qh ferr, 1019, "qh_matchnewfacets: match neighbors for new facets.\n"));
    -  FORALLnew_facets {
    -    numnew++;
    -    {  /* inline qh_setzero(newfacet->neighbors, 1, qh hull_dim); */
    -      neighbors= newfacet->neighbors;
    -      neighbors->e[neighbors->maxsize].i= dim+1; /*may be overwritten*/
    -      memset((char *)SETelemaddr_(neighbors, 1, void), 0, dim * SETelemsize);
    -    }
    -  }
    -  qh_newhashtable(numnew*(qh hull_dim-1)); /* twice what is normally needed,
    -                                     but every ridge could be DUPLICATEridge */
    -  hashsize= qh_setsize(qh hash_table);
    -  FORALLnew_facets {
    -    for (newskip=1; newskipneighbors, k, facetT);
    -          if (!neighbor || neighbor == qh_DUPLICATEridge)
    -            count++;
    -        }
    -        if (facet == newfacet)
    -          break;
    -      }
    -      if (count != hashcount) {
    -        qh_fprintf(qh ferr, 8088, "qh_matchnewfacets: after adding facet %d, hashcount %d != count %d\n",
    -                 newfacet->id, hashcount, count);
    -        qh_errexit(qh_ERRqhull, newfacet, NULL);
    -      }
    -    }
    -#endif  /* end of trap code */
    -  }
    -  if (hashcount) {
    -    FORALLnew_facets {
    -      if (newfacet->dupridge) {
    -        FOREACHneighbor_i_(newfacet) {
    -          if (neighbor == qh_DUPLICATEridge) {
    -            qh_matchduplicates(newfacet, neighbor_i, hashsize, &hashcount);
    -                    /* this may report MERGEfacet */
    -          }
    -        }
    -      }
    -    }
    -  }
    -  if (hashcount) {
    -    qh_fprintf(qh ferr, 6108, "qhull internal error (qh_matchnewfacets): %d neighbors did not match up\n",
    -        hashcount);
    -    qh_printhashtable(qh ferr);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }
    -#ifndef qh_NOtrace
    -  if (qh IStracing >= 2) {
    -    FOREACHfacet_i_(qh hash_table) {
    -      if (!facet)
    -        numfree++;
    -    }
    -    qh_fprintf(qh ferr, 8089, "qh_matchnewfacets: %d new facets, %d unused hash entries .  hashsize %d\n",
    -             numnew, numfree, qh_setsize(qh hash_table));
    -  }
    -#endif /* !qh_NOtrace */
    -  qh_setfree(&qh hash_table);
    -  if (qh PREmerge || qh MERGEexact) {
    -    if (qh IStracing >= 4)
    -      qh_printfacetlist(qh newfacet_list, NULL, qh_ALL);
    -    FORALLnew_facets {
    -      if (newfacet->normal)
    -        qh_checkflipped(newfacet, NULL, qh_ALL);
    -    }
    -  }else if (qh FORCEoutput)
    -    qh_checkflipped_all(qh newfacet_list);  /* prints warnings for flipped */
    -} /* matchnewfacets */
    -
    -
    -/*---------------------------------
    -
    -  qh_matchvertices( firstindex, verticesA, skipA, verticesB, skipB, same )
    -    tests whether vertices match with a single skip
    -    starts match at firstindex since all new facets have a common vertex
    -
    -  returns:
    -    true if matched vertices
    -    skip index for each set
    -    sets same iff vertices have the same orientation
    -
    -  notes:
    -    assumes skipA is in A and both sets are the same size
    -
    -  design:
    -    set up pointers
    -    scan both sets checking for a match
    -    test orientation
    -*/
    -boolT qh_matchvertices(int firstindex, setT *verticesA, int skipA,
    -       setT *verticesB, int *skipB, boolT *same) {
    -  vertexT **elemAp, **elemBp, **skipBp=NULL, **skipAp;
    -
    -  elemAp= SETelemaddr_(verticesA, firstindex, vertexT);
    -  elemBp= SETelemaddr_(verticesB, firstindex, vertexT);
    -  skipAp= SETelemaddr_(verticesA, skipA, vertexT);
    -  do if (elemAp != skipAp) {
    -    while (*elemAp != *elemBp++) {
    -      if (skipBp)
    -        return False;
    -      skipBp= elemBp;  /* one extra like FOREACH */
    -    }
    -  }while (*(++elemAp));
    -  if (!skipBp)
    -    skipBp= ++elemBp;
    -  *skipB= SETindex_(verticesB, skipB); /* i.e., skipBp - verticesB */
    -  *same= !((skipA & 0x1) ^ (*skipB & 0x1)); /* result is 0 or 1 */
    -  trace4((qh ferr, 4054, "qh_matchvertices: matched by skip %d(v%d) and skip %d(v%d) same? %d\n",
    -          skipA, (*skipAp)->id, *skipB, (*(skipBp-1))->id, *same));
    -  return(True);
    -} /* matchvertices */
    -
    -/*---------------------------------
    -
    -  qh_newfacet()
    -    return a new facet
    -
    -  returns:
    -    all fields initialized or cleared   (NULL)
    -    preallocates neighbors set
    -*/
    -facetT *qh_newfacet(void) {
    -  facetT *facet;
    -  void **freelistp; /* used !qh_NOmem */
    -
    -  qh_memalloc_((int)sizeof(facetT), freelistp, facet, facetT);
    -  memset((char *)facet, (size_t)0, sizeof(facetT));
    -  if (qh facet_id == qh tracefacet_id)
    -    qh tracefacet= facet;
    -  facet->id= qh facet_id++;
    -  facet->neighbors= qh_setnew(qh hull_dim);
    -#if !qh_COMPUTEfurthest
    -  facet->furthestdist= 0.0;
    -#endif
    -#if qh_MAXoutside
    -  if (qh FORCEoutput && qh APPROXhull)
    -    facet->maxoutside= qh MINoutside;
    -  else
    -    facet->maxoutside= qh DISTround;
    -#endif
    -  facet->simplicial= True;
    -  facet->good= True;
    -  facet->newfacet= True;
    -  trace4((qh ferr, 4055, "qh_newfacet: created facet f%d\n", facet->id));
    -  return(facet);
    -} /* newfacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_newridge()
    -    return a new ridge
    -*/
    -ridgeT *qh_newridge(void) {
    -  ridgeT *ridge;
    -  void **freelistp;   /* used !qh_NOmem */
    -
    -  qh_memalloc_((int)sizeof(ridgeT), freelistp, ridge, ridgeT);
    -  memset((char *)ridge, (size_t)0, sizeof(ridgeT));
    -  zinc_(Ztotridges);
    -  if (qh ridge_id == 0xFFFFFF) {
    -    qh_fprintf(qh ferr, 7074, "\
    -qhull warning: more than %d ridges.  ID field overflows and two ridges\n\
    -may have the same identifier.  Otherwise output ok.\n", 0xFFFFFF);
    -  }
    -  ridge->id= qh ridge_id++;
    -  trace4((qh ferr, 4056, "qh_newridge: created ridge r%d\n", ridge->id));
    -  return(ridge);
    -} /* newridge */
    -
    -
    -/*---------------------------------
    -
    -  qh_pointid(  )
    -    return id for a point,
    -    returns -3 if null, -2 if interior, or -1 if not known
    -
    -  alternative code:
    -    unsigned long id;
    -    id= ((unsigned long)point - (unsigned long)qh.first_point)/qh.normal_size;
    -
    -  notes:
    -    WARN64 -- id truncated to 32-bits, at most 2G points
    -    NOerrors returned (QhullPoint::id)
    -    if point not in point array
    -      the code does a comparison of unrelated pointers.
    -*/
    -int qh_pointid(pointT *point) {
    -  ptr_intT offset, id;
    -
    -  if (!point)
    -    return -3;
    -  else if (point == qh interior_point)
    -    return -2;
    -  else if (point >= qh first_point
    -  && point < qh first_point + qh num_points * qh hull_dim) {
    -    offset= (ptr_intT)(point - qh first_point);
    -    id= offset / qh hull_dim;
    -  }else if ((id= qh_setindex(qh other_points, point)) != -1)
    -    id += qh num_points;
    -  else
    -    return -1;
    -  return (int)id;
    -} /* pointid */
    -
    -/*---------------------------------
    -
    -  qh_removefacet( facet )
    -    unlinks facet from qh.facet_list,
    -
    -  returns:
    -    updates qh.facet_list .newfacet_list .facet_next visible_list
    -    decrements qh.num_facets
    -
    -  see:
    -    qh_appendfacet
    -*/
    -void qh_removefacet(facetT *facet) {
    -  facetT *next= facet->next, *previous= facet->previous;
    -
    -  if (facet == qh newfacet_list)
    -    qh newfacet_list= next;
    -  if (facet == qh facet_next)
    -    qh facet_next= next;
    -  if (facet == qh visible_list)
    -    qh visible_list= next;
    -  if (previous) {
    -    previous->next= next;
    -    next->previous= previous;
    -  }else {  /* 1st facet in qh facet_list */
    -    qh facet_list= next;
    -    qh facet_list->previous= NULL;
    -  }
    -  qh num_facets--;
    -  trace4((qh ferr, 4057, "qh_removefacet: remove f%d from facet_list\n", facet->id));
    -} /* removefacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_removevertex( vertex )
    -    unlinks vertex from qh.vertex_list,
    -
    -  returns:
    -    updates qh.vertex_list .newvertex_list
    -    decrements qh.num_vertices
    -*/
    -void qh_removevertex(vertexT *vertex) {
    -  vertexT *next= vertex->next, *previous= vertex->previous;
    -
    -  if (vertex == qh newvertex_list)
    -    qh newvertex_list= next;
    -  if (previous) {
    -    previous->next= next;
    -    next->previous= previous;
    -  }else {  /* 1st vertex in qh vertex_list */
    -    qh vertex_list= vertex->next;
    -    qh vertex_list->previous= NULL;
    -  }
    -  qh num_vertices--;
    -  trace4((qh ferr, 4058, "qh_removevertex: remove v%d from vertex_list\n", vertex->id));
    -} /* removevertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_updatevertices()
    -    update vertex neighbors and delete interior vertices
    -
    -  returns:
    -    if qh.VERTEXneighbors, updates neighbors for each vertex
    -      if qh.newvertex_list,
    -         removes visible neighbors  from vertex neighbors
    -      if qh.newfacet_list
    -         adds new facets to vertex neighbors
    -    if qh.visible_list
    -       interior vertices added to qh.del_vertices for later partitioning
    -
    -  design:
    -    if qh.VERTEXneighbors
    -      deletes references to visible facets from vertex neighbors
    -      appends new facets to the neighbor list for each vertex
    -      checks all vertices of visible facets
    -        removes visible facets from neighbor lists
    -        marks unused vertices for deletion
    -*/
    -void qh_updatevertices(void /*qh newvertex_list, newfacet_list, visible_list*/) {
    -  facetT *newfacet= NULL, *neighbor, **neighborp, *visible;
    -  vertexT *vertex, **vertexp;
    -
    -  trace3((qh ferr, 3013, "qh_updatevertices: delete interior vertices and update vertex->neighbors\n"));
    -  if (qh VERTEXneighbors) {
    -    FORALLvertex_(qh newvertex_list) {
    -      FOREACHneighbor_(vertex) {
    -        if (neighbor->visible)
    -          SETref_(neighbor)= NULL;
    -      }
    -      qh_setcompact(vertex->neighbors);
    -    }
    -    FORALLnew_facets {
    -      FOREACHvertex_(newfacet->vertices)
    -        qh_setappend(&vertex->neighbors, newfacet);
    -    }
    -    FORALLvisible_facets {
    -      FOREACHvertex_(visible->vertices) {
    -        if (!vertex->newlist && !vertex->deleted) {
    -          FOREACHneighbor_(vertex) { /* this can happen under merging */
    -            if (!neighbor->visible)
    -              break;
    -          }
    -          if (neighbor)
    -            qh_setdel(vertex->neighbors, visible);
    -          else {
    -            vertex->deleted= True;
    -            qh_setappend(&qh del_vertices, vertex);
    -            trace2((qh ferr, 2041, "qh_updatevertices: delete vertex p%d(v%d) in f%d\n",
    -                  qh_pointid(vertex->point), vertex->id, visible->id));
    -          }
    -        }
    -      }
    -    }
    -  }else {  /* !VERTEXneighbors */
    -    FORALLvisible_facets {
    -      FOREACHvertex_(visible->vertices) {
    -        if (!vertex->newlist && !vertex->deleted) {
    -          vertex->deleted= True;
    -          qh_setappend(&qh del_vertices, vertex);
    -          trace2((qh ferr, 2042, "qh_updatevertices: delete vertex p%d(v%d) in f%d\n",
    -                  qh_pointid(vertex->point), vertex->id, visible->id));
    -        }
    -      }
    -    }
    -  }
    -} /* updatevertices */
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/poly.h b/scipy-0.10.1/scipy/spatial/qhull/src/poly.h
    deleted file mode 100644
    index 929de2c584..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/poly.h
    +++ /dev/null
    @@ -1,295 +0,0 @@
    -/*
      ---------------------------------
    -
    -   poly.h
    -   header file for poly.c and poly2.c
    -
    -   see qh-poly.htm, libqhull.h and poly.c
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/poly.h#22 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#ifndef qhDEFpoly
    -#define qhDEFpoly 1
    -
    -#include "libqhull.h"
    -
    -/*===============   constants ========================== */
    -
    -/*----------------------------------
    -
    -  ALGORITHMfault
    -    use as argument to checkconvex() to report errors during buildhull
    -*/
    -#define qh_ALGORITHMfault 0
    -
    -/*----------------------------------
    -
    -  DATAfault
    -    use as argument to checkconvex() to report errors during initialhull
    -*/
    -#define qh_DATAfault 1
    -
    -/*----------------------------------
    -
    -  DUPLICATEridge
    -    special value for facet->neighbor to indicate a duplicate ridge
    -
    -  notes:
    -    set by matchneighbor, used by matchmatch and mark_dupridge
    -*/
    -#define qh_DUPLICATEridge (facetT *)1L
    -
    -/*----------------------------------
    -
    -  MERGEridge       flag in facet
    -    special value for facet->neighbor to indicate a merged ridge
    -
    -  notes:
    -    set by matchneighbor, used by matchmatch and mark_dupridge
    -*/
    -#define qh_MERGEridge (facetT *)2L
    -
    -
    -/*============ -structures- ====================*/
    -
    -/*=========== -macros- =========================*/
    -
    -/*----------------------------------
    -
    -  FORALLfacet_( facetlist ) { ... }
    -    assign 'facet' to each facet in facetlist
    -
    -  notes:
    -    uses 'facetT *facet;'
    -    assumes last facet is a sentinel
    -
    -  see:
    -    FORALLfacets
    -*/
    -#define FORALLfacet_( facetlist ) if (facetlist ) for ( facet=( facetlist ); facet && facet->next; facet= facet->next )
    -
    -/*----------------------------------
    -
    -  FORALLnew_facets { ... }
    -    assign 'newfacet' to each facet in qh.newfacet_list
    -
    -  notes:
    -    uses 'facetT *newfacet;'
    -    at exit, newfacet==NULL
    -*/
    -#define FORALLnew_facets for ( newfacet=qh newfacet_list;newfacet && newfacet->next;newfacet=newfacet->next )
    -
    -/*----------------------------------
    -
    -  FORALLvertex_( vertexlist ) { ... }
    -    assign 'vertex' to each vertex in vertexlist
    -
    -  notes:
    -    uses 'vertexT *vertex;'
    -    at exit, vertex==NULL
    -*/
    -#define FORALLvertex_( vertexlist ) for (vertex=( vertexlist );vertex && vertex->next;vertex= vertex->next )
    -
    -/*----------------------------------
    -
    -  FORALLvisible_facets { ... }
    -    assign 'visible' to each visible facet in qh.visible_list
    -
    -  notes:
    -    uses 'vacetT *visible;'
    -    at exit, visible==NULL
    -*/
    -#define FORALLvisible_facets for (visible=qh visible_list; visible && visible->visible; visible= visible->next)
    -
    -/*----------------------------------
    -
    -  FORALLsame_( newfacet ) { ... }
    -    assign 'same' to each facet in newfacet->f.samecycle
    -
    -  notes:
    -    uses 'facetT *same;'
    -    stops when it returns to newfacet
    -*/
    -#define FORALLsame_(newfacet) for (same= newfacet->f.samecycle; same != newfacet; same= same->f.samecycle)
    -
    -/*----------------------------------
    -
    -  FORALLsame_cycle_( newfacet ) { ... }
    -    assign 'same' to each facet in newfacet->f.samecycle
    -
    -  notes:
    -    uses 'facetT *same;'
    -    at exit, same == NULL
    -*/
    -#define FORALLsame_cycle_(newfacet) \
    -     for (same= newfacet->f.samecycle; \
    -         same; same= (same == newfacet ?  NULL : same->f.samecycle))
    -
    -/*----------------------------------
    -
    -  FOREACHneighborA_( facet ) { ... }
    -    assign 'neighborA' to each neighbor in facet->neighbors
    -
    -  FOREACHneighborA_( vertex ) { ... }
    -    assign 'neighborA' to each neighbor in vertex->neighbors
    -
    -  declare:
    -    facetT *neighborA, **neighborAp;
    -
    -  see:
    -    FOREACHsetelement_
    -*/
    -#define FOREACHneighborA_(facet)  FOREACHsetelement_(facetT, facet->neighbors, neighborA)
    -
    -/*----------------------------------
    -
    -  FOREACHvisible_( facets ) { ... }
    -    assign 'visible' to each facet in facets
    -
    -  notes:
    -    uses 'facetT *facet, *facetp;'
    -    see FOREACHsetelement_
    -*/
    -#define FOREACHvisible_(facets) FOREACHsetelement_(facetT, facets, visible)
    -
    -/*----------------------------------
    -
    -  FOREACHnewfacet_( facets ) { ... }
    -    assign 'newfacet' to each facet in facets
    -
    -  notes:
    -    uses 'facetT *newfacet, *newfacetp;'
    -    see FOREACHsetelement_
    -*/
    -#define FOREACHnewfacet_(facets) FOREACHsetelement_(facetT, facets, newfacet)
    -
    -/*----------------------------------
    -
    -  FOREACHvertexA_( vertices ) { ... }
    -    assign 'vertexA' to each vertex in vertices
    -
    -  notes:
    -    uses 'vertexT *vertexA, *vertexAp;'
    -    see FOREACHsetelement_
    -*/
    -#define FOREACHvertexA_(vertices) FOREACHsetelement_(vertexT, vertices, vertexA)
    -
    -/*----------------------------------
    -
    -  FOREACHvertexreverse12_( vertices ) { ... }
    -    assign 'vertex' to each vertex in vertices
    -    reverse order of first two vertices
    -
    -  notes:
    -    uses 'vertexT *vertex, *vertexp;'
    -    see FOREACHsetelement_
    -*/
    -#define FOREACHvertexreverse12_(vertices) FOREACHsetelementreverse12_(vertexT, vertices, vertex)
    -
    -
    -/*=============== prototypes poly.c in alphabetical order ================*/
    -
    -void    qh_appendfacet(facetT *facet);
    -void    qh_appendvertex(vertexT *vertex);
    -void    qh_attachnewfacets(void);
    -boolT   qh_checkflipped(facetT *facet, realT *dist, boolT allerror);
    -void    qh_delfacet(facetT *facet);
    -void    qh_deletevisible(void /*qh visible_list, qh horizon_list*/);
    -setT   *qh_facetintersect(facetT *facetA, facetT *facetB, int *skipAp,int *skipBp, int extra);
    -int     qh_gethash(int hashsize, setT *set, int size, int firstindex, void *skipelem);
    -facetT *qh_makenewfacet(setT *vertices, boolT toporient, facetT *facet);
    -void    qh_makenewplanes(void /* newfacet_list */);
    -facetT *qh_makenew_nonsimplicial(facetT *visible, vertexT *apex, int *numnew);
    -facetT *qh_makenew_simplicial(facetT *visible, vertexT *apex, int *numnew);
    -void    qh_matchneighbor(facetT *newfacet, int newskip, int hashsize,
    -                          int *hashcount);
    -void    qh_matchnewfacets(void);
    -boolT   qh_matchvertices(int firstindex, setT *verticesA, int skipA,
    -                          setT *verticesB, int *skipB, boolT *same);
    -facetT *qh_newfacet(void);
    -ridgeT *qh_newridge(void);
    -int     qh_pointid(pointT *point);
    -void    qh_removefacet(facetT *facet);
    -void    qh_removevertex(vertexT *vertex);
    -void    qh_updatevertices(void);
    -
    -
    -/*========== -prototypes poly2.c in alphabetical order ===========*/
    -
    -void    qh_addhash(void* newelem, setT *hashtable, int hashsize, int hash);
    -void    qh_check_bestdist(void);
    -void    qh_check_maxout(void);
    -void    qh_check_output(void);
    -void    qh_check_point(pointT *point, facetT *facet, realT *maxoutside, realT *maxdist, facetT **errfacet1, facetT **errfacet2);
    -void    qh_check_points(void);
    -void    qh_checkconvex(facetT *facetlist, int fault);
    -void    qh_checkfacet(facetT *facet, boolT newmerge, boolT *waserrorp);
    -void    qh_checkflipped_all(facetT *facetlist);
    -void    qh_checkpolygon(facetT *facetlist);
    -void    qh_checkvertex(vertexT *vertex);
    -void    qh_clearcenters(qh_CENTER type);
    -void    qh_createsimplex(setT *vertices);
    -void    qh_delridge(ridgeT *ridge);
    -void    qh_delvertex(vertexT *vertex);
    -setT   *qh_facet3vertex(facetT *facet);
    -facetT *qh_findbestfacet(pointT *point, boolT bestoutside,
    -           realT *bestdist, boolT *isoutside);
    -facetT *qh_findbestlower(facetT *upperfacet, pointT *point, realT *bestdistp, int *numpart);
    -facetT *qh_findfacet_all(pointT *point, realT *bestdist, boolT *isoutside,
    -                          int *numpart);
    -int     qh_findgood(facetT *facetlist, int goodhorizon);
    -void    qh_findgood_all(facetT *facetlist);
    -void    qh_furthestnext(void /* qh facet_list */);
    -void    qh_furthestout(facetT *facet);
    -void    qh_infiniteloop(facetT *facet);
    -void    qh_initbuild(void);
    -void    qh_initialhull(setT *vertices);
    -setT   *qh_initialvertices(int dim, setT *maxpoints, pointT *points, int numpoints);
    -vertexT *qh_isvertex(pointT *point, setT *vertices);
    -vertexT *qh_makenewfacets(pointT *point /*horizon_list, visible_list*/);
    -void    qh_matchduplicates(facetT *atfacet, int atskip, int hashsize, int *hashcount);
    -void    qh_nearcoplanar(void /* qh.facet_list */);
    -vertexT *qh_nearvertex(facetT *facet, pointT *point, realT *bestdistp);
    -int     qh_newhashtable(int newsize);
    -vertexT *qh_newvertex(pointT *point);
    -ridgeT *qh_nextridge3d(ridgeT *atridge, facetT *facet, vertexT **vertexp);
    -void    qh_outcoplanar(void /* facet_list */);
    -pointT *qh_point(int id);
    -void    qh_point_add(setT *set, pointT *point, void *elem);
    -setT   *qh_pointfacet(void /*qh facet_list*/);
    -setT   *qh_pointvertex(void /*qh facet_list*/);
    -void    qh_prependfacet(facetT *facet, facetT **facetlist);
    -void    qh_printhashtable(FILE *fp);
    -void    qh_printlists(void);
    -void    qh_resetlists(boolT stats, boolT resetVisible /*qh newvertex_list newfacet_list visible_list*/);
    -void    qh_setvoronoi_all(void);
    -void    qh_triangulate(void /*qh facet_list*/);
    -void    qh_triangulate_facet(facetT *facetA, vertexT **first_vertex);
    -void    qh_triangulate_link(facetT *oldfacetA, facetT *facetA, facetT *oldfacetB, facetT *facetB);
    -void    qh_triangulate_mirror(facetT *facetA, facetT *facetB);
    -void    qh_triangulate_null(facetT *facetA);
    -void    qh_vertexintersect(setT **vertexsetA,setT *vertexsetB);
    -setT   *qh_vertexintersect_new(setT *vertexsetA,setT *vertexsetB);
    -void    qh_vertexneighbors(void /*qh facet_list*/);
    -boolT   qh_vertexsubset(setT *vertexsetA, setT *vertexsetB);
    -
    -
    -#endif /* qhDEFpoly */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/poly2.c b/scipy-0.10.1/scipy/spatial/qhull/src/poly2.c
    deleted file mode 100644
    index 451d66fe20..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/poly2.c
    +++ /dev/null
    @@ -1,3143 +0,0 @@
    -/*
      ---------------------------------
    -
    -   poly2.c
    -   implements polygons and simplices
    -
    -   see qh-poly.htm, poly.h and libqhull.h
    -
    -   frequently used code is in poly.c
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/poly2.c#40 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#include "qhull_a.h"
    -
    -/*======== functions in alphabetical order ==========*/
    -
    -/*---------------------------------
    -
    -  qh_addhash( newelem, hashtable, hashsize, hash )
    -    add newelem to linear hash table at hash if not already there
    -*/
    -void qh_addhash(void* newelem, setT *hashtable, int hashsize, int hash) {
    -  int scan;
    -  void *elem;
    -
    -  for (scan= (int)hash; (elem= SETelem_(hashtable, scan));
    -       scan= (++scan >= hashsize ? 0 : scan)) {
    -    if (elem == newelem)
    -      break;
    -  }
    -  /* loop terminates because qh_HASHfactor >= 1.1 by qh_initbuffers */
    -  if (!elem)
    -    SETelem_(hashtable, scan)= newelem;
    -} /* addhash */
    -
    -/*---------------------------------
    -
    -  qh_check_bestdist()
    -    check that all points are within max_outside of the nearest facet
    -    if qh.ONLYgood,
    -      ignores !good facets
    -
    -  see:
    -    qh_check_maxout(), qh_outerinner()
    -
    -  notes:
    -    only called from qh_check_points()
    -      seldom used since qh.MERGING is almost always set
    -    if notverified>0 at end of routine
    -      some points were well inside the hull.  If the hull contains
    -      a lens-shaped component, these points were not verified.  Use
    -      options 'Qi Tv' to verify all points.  (Exhaustive check also verifies)
    -
    -  design:
    -    determine facet for each point (if any)
    -    for each point
    -      start with the assigned facet or with the first facet
    -      find the best facet for the point and check all coplanar facets
    -      error if point is outside of facet
    -*/
    -void qh_check_bestdist(void) {
    -  boolT waserror= False, unassigned;
    -  facetT *facet, *bestfacet, *errfacet1= NULL, *errfacet2= NULL;
    -  facetT *facetlist;
    -  realT dist, maxoutside, maxdist= -REALmax;
    -  pointT *point;
    -  int numpart= 0, facet_i, facet_n, notgood= 0, notverified= 0;
    -  setT *facets;
    -
    -  trace1((qh ferr, 1020, "qh_check_bestdist: check points below nearest facet.  Facet_list f%d\n",
    -      qh facet_list->id));
    -  maxoutside= qh_maxouter();
    -  maxoutside += qh DISTround;
    -  /* one more qh.DISTround for check computation */
    -  trace1((qh ferr, 1021, "qh_check_bestdist: check that all points are within %2.2g of best facet\n", maxoutside));
    -  facets= qh_pointfacet(/*qh facet_list*/);
    -  if (!qh_QUICKhelp && qh PRINTprecision)
    -    qh_fprintf(qh ferr, 8091, "\n\
    -qhull output completed.  Verifying that %d points are\n\
    -below %2.2g of the nearest %sfacet.\n",
    -             qh_setsize(facets), maxoutside, (qh ONLYgood ?  "good " : ""));
    -  FOREACHfacet_i_(facets) {  /* for each point with facet assignment */
    -    if (facet)
    -      unassigned= False;
    -    else {
    -      unassigned= True;
    -      facet= qh facet_list;
    -    }
    -    point= qh_point(facet_i);
    -    if (point == qh GOODpointp)
    -      continue;
    -    qh_distplane(point, facet, &dist);
    -    numpart++;
    -    bestfacet= qh_findbesthorizon(!qh_IScheckmax, point, facet, qh_NOupper, &dist, &numpart);
    -    /* occurs after statistics reported */
    -    maximize_(maxdist, dist);
    -    if (dist > maxoutside) {
    -      if (qh ONLYgood && !bestfacet->good
    -          && !((bestfacet= qh_findgooddist(point, bestfacet, &dist, &facetlist))
    -               && dist > maxoutside))
    -        notgood++;
    -      else {
    -        waserror= True;
    -        qh_fprintf(qh ferr, 6109, "qhull precision error: point p%d is outside facet f%d, distance= %6.8g maxoutside= %6.8g\n",
    -                facet_i, bestfacet->id, dist, maxoutside);
    -        if (errfacet1 != bestfacet) {
    -          errfacet2= errfacet1;
    -          errfacet1= bestfacet;
    -        }
    -      }
    -    }else if (unassigned && dist < -qh MAXcoplanar)
    -      notverified++;
    -  }
    -  qh_settempfree(&facets);
    -  if (notverified && !qh DELAUNAY && !qh_QUICKhelp && qh PRINTprecision)
    -    qh_fprintf(qh ferr, 8092, "\n%d points were well inside the hull.  If the hull contains\n\
    -a lens-shaped component, these points were not verified.  Use\n\
    -options 'Qci Tv' to verify all points.\n", notverified);
    -  if (maxdist > qh outside_err) {
    -    qh_fprintf(qh ferr, 6110, "qhull precision error (qh_check_bestdist): a coplanar point is %6.2g from convex hull.  The maximum value(qh.outside_err) is %6.2g\n",
    -              maxdist, qh outside_err);
    -    qh_errexit2 (qh_ERRprec, errfacet1, errfacet2);
    -  }else if (waserror && qh outside_err > REALmax/2)
    -    qh_errexit2 (qh_ERRprec, errfacet1, errfacet2);
    -  /* else if waserror, the error was logged to qh.ferr but does not effect the output */
    -  trace0((qh ferr, 20, "qh_check_bestdist: max distance outside %2.2g\n", maxdist));
    -} /* check_bestdist */
    -
    -/*---------------------------------
    -
    -  qh_check_maxout()
    -    updates qh.max_outside by checking all points against bestfacet
    -    if qh.ONLYgood, ignores !good facets
    -
    -  returns:
    -    updates facet->maxoutside via qh_findbesthorizon()
    -    sets qh.maxoutdone
    -    if printing qh.min_vertex (qh_outerinner),
    -      it is updated to the current vertices
    -    removes inside/coplanar points from coplanarset as needed
    -
    -  notes:
    -    defines coplanar as min_vertex instead of MAXcoplanar
    -    may not need to check near-inside points because of qh.MAXcoplanar
    -      and qh.KEEPnearinside (before it was -DISTround)
    -
    -  see also:
    -    qh_check_bestdist()
    -
    -  design:
    -    if qh.min_vertex is needed
    -      for all neighbors of all vertices
    -        test distance from vertex to neighbor
    -    determine facet for each point (if any)
    -    for each point with an assigned facet
    -      find the best facet for the point and check all coplanar facets
    -        (updates outer planes)
    -    remove near-inside points from coplanar sets
    -*/
    -#ifndef qh_NOmerge
    -void qh_check_maxout(void) {
    -  facetT *facet, *bestfacet, *neighbor, **neighborp, *facetlist;
    -  realT dist, maxoutside, minvertex, old_maxoutside;
    -  pointT *point;
    -  int numpart= 0, facet_i, facet_n, notgood= 0;
    -  setT *facets, *vertices;
    -  vertexT *vertex;
    -
    -  trace1((qh ferr, 1022, "qh_check_maxout: check and update maxoutside for each facet.\n"));
    -  maxoutside= minvertex= 0;
    -  if (qh VERTEXneighbors
    -  && (qh PRINTsummary || qh KEEPinside || qh KEEPcoplanar
    -        || qh TRACElevel || qh PRINTstatistics
    -        || qh PRINTout[0] == qh_PRINTsummary || qh PRINTout[0] == qh_PRINTnone)) {
    -    trace1((qh ferr, 1023, "qh_check_maxout: determine actual maxoutside and minvertex\n"));
    -    vertices= qh_pointvertex(/*qh facet_list*/);
    -    FORALLvertices {
    -      FOREACHneighbor_(vertex) {
    -        zinc_(Zdistvertex);  /* distance also computed by main loop below */
    -        qh_distplane(vertex->point, neighbor, &dist);
    -        minimize_(minvertex, dist);
    -        if (-dist > qh TRACEdist || dist > qh TRACEdist
    -        || neighbor == qh tracefacet || vertex == qh tracevertex)
    -          qh_fprintf(qh ferr, 8093, "qh_check_maxout: p%d(v%d) is %.2g from f%d\n",
    -                    qh_pointid(vertex->point), vertex->id, dist, neighbor->id);
    -      }
    -    }
    -    if (qh MERGING) {
    -      wmin_(Wminvertex, qh min_vertex);
    -    }
    -    qh min_vertex= minvertex;
    -    qh_settempfree(&vertices);
    -  }
    -  facets= qh_pointfacet(/*qh facet_list*/);
    -  do {
    -    old_maxoutside= fmax_(qh max_outside, maxoutside);
    -    FOREACHfacet_i_(facets) {     /* for each point with facet assignment */
    -      if (facet) {
    -        point= qh_point(facet_i);
    -        if (point == qh GOODpointp)
    -          continue;
    -        zzinc_(Ztotcheck);
    -        qh_distplane(point, facet, &dist);
    -        numpart++;
    -        bestfacet= qh_findbesthorizon(qh_IScheckmax, point, facet, !qh_NOupper, &dist, &numpart);
    -        if (bestfacet && dist > maxoutside) {
    -          if (qh ONLYgood && !bestfacet->good
    -          && !((bestfacet= qh_findgooddist(point, bestfacet, &dist, &facetlist))
    -               && dist > maxoutside))
    -            notgood++;
    -          else
    -            maxoutside= dist;
    -        }
    -        if (dist > qh TRACEdist || (bestfacet && bestfacet == qh tracefacet))
    -          qh_fprintf(qh ferr, 8094, "qh_check_maxout: p%d is %.2g above f%d\n",
    -                     qh_pointid(point), dist, bestfacet->id);
    -      }
    -    }
    -  }while
    -    (maxoutside > 2*old_maxoutside);
    -    /* if qh.maxoutside increases substantially, qh_SEARCHdist is not valid
    -          e.g., RBOX 5000 s Z1 G1e-13 t1001200614 | qhull */
    -  zzadd_(Zcheckpart, numpart);
    -  qh_settempfree(&facets);
    -  wval_(Wmaxout)= maxoutside - qh max_outside;
    -  wmax_(Wmaxoutside, qh max_outside);
    -  qh max_outside= maxoutside;
    -  qh_nearcoplanar(/*qh.facet_list*/);
    -  qh maxoutdone= True;
    -  trace1((qh ferr, 1024, "qh_check_maxout: maxoutside %2.2g, min_vertex %2.2g, outside of not good %d\n",
    -       maxoutside, qh min_vertex, notgood));
    -} /* check_maxout */
    -#else /* qh_NOmerge */
    -void qh_check_maxout(void) {
    -}
    -#endif
    -
    -/*---------------------------------
    -
    -  qh_check_output()
    -    performs the checks at the end of qhull algorithm
    -    Maybe called after voronoi output.  Will recompute otherwise centrums are Voronoi centers instead
    -*/
    -void qh_check_output(void) {
    -  int i;
    -
    -  if (qh STOPcone)
    -    return;
    -  if (qh VERIFYoutput | qh IStracing | qh CHECKfrequently) {
    -    qh_checkpolygon(qh facet_list);
    -    qh_checkflipped_all(qh facet_list);
    -    qh_checkconvex(qh facet_list, qh_ALGORITHMfault);
    -  }else if (!qh MERGING && qh_newstats(qhstat precision, &i)) {
    -    qh_checkflipped_all(qh facet_list);
    -    qh_checkconvex(qh facet_list, qh_ALGORITHMfault);
    -  }
    -} /* check_output */
    -
    -
    -
    -/*---------------------------------
    -
    -  qh_check_point( point, facet, maxoutside, maxdist, errfacet1, errfacet2 )
    -    check that point is less than maxoutside from facet
    -*/
    -void qh_check_point(pointT *point, facetT *facet, realT *maxoutside, realT *maxdist, facetT **errfacet1, facetT **errfacet2) {
    -  realT dist;
    -
    -  /* occurs after statistics reported */
    -  qh_distplane(point, facet, &dist);
    -  if (dist > *maxoutside) {
    -    if (*errfacet1 != facet) {
    -      *errfacet2= *errfacet1;
    -      *errfacet1= facet;
    -    }
    -    qh_fprintf(qh ferr, 6111, "qhull precision error: point p%d is outside facet f%d, distance= %6.8g maxoutside= %6.8g\n",
    -              qh_pointid(point), facet->id, dist, *maxoutside);
    -  }
    -  maximize_(*maxdist, dist);
    -} /* qh_check_point */
    -
    -
    -/*---------------------------------
    -
    -  qh_check_points()
    -    checks that all points are inside all facets
    -
    -  notes:
    -    if many points and qh_check_maxout not called (i.e., !qh.MERGING),
    -       calls qh_findbesthorizon (seldom done).
    -    ignores flipped facets
    -    maxoutside includes 2 qh.DISTrounds
    -      one qh.DISTround for the computed distances in qh_check_points
    -    qh_printafacet and qh_printsummary needs only one qh.DISTround
    -    the computation for qh.VERIFYdirect does not account for qh.other_points
    -
    -  design:
    -    if many points
    -      use qh_check_bestdist()
    -    else
    -      for all facets
    -        for all points
    -          check that point is inside facet
    -*/
    -void qh_check_points(void) {
    -  facetT *facet, *errfacet1= NULL, *errfacet2= NULL;
    -  realT total, maxoutside, maxdist= -REALmax;
    -  pointT *point, **pointp, *pointtemp;
    -  boolT testouter;
    -
    -  maxoutside= qh_maxouter();
    -  maxoutside += qh DISTround;
    -  /* one more qh.DISTround for check computation */
    -  trace1((qh ferr, 1025, "qh_check_points: check all points below %2.2g of all facet planes\n",
    -          maxoutside));
    -  if (qh num_good)   /* miss counts other_points and !good facets */
    -     total= (float)qh num_good * (float)qh num_points;
    -  else
    -     total= (float)qh num_facets * (float)qh num_points;
    -  if (total >= qh_VERIFYdirect && !qh maxoutdone) {
    -    if (!qh_QUICKhelp && qh SKIPcheckmax && qh MERGING)
    -      qh_fprintf(qh ferr, 7075, "qhull input warning: merging without checking outer planes('Q5' or 'Po').\n\
    -Verify may report that a point is outside of a facet.\n");
    -    qh_check_bestdist();
    -  }else {
    -    if (qh_MAXoutside && qh maxoutdone)
    -      testouter= True;
    -    else
    -      testouter= False;
    -    if (!qh_QUICKhelp) {
    -      if (qh MERGEexact)
    -        qh_fprintf(qh ferr, 7076, "qhull input warning: exact merge ('Qx').  Verify may report that a point\n\
    -is outside of a facet.  See qh-optq.htm#Qx\n");
    -      else if (qh SKIPcheckmax || qh NOnearinside)
    -        qh_fprintf(qh ferr, 7077, "qhull input warning: no outer plane check ('Q5') or no processing of\n\
    -near-inside points ('Q8').  Verify may report that a point is outside\n\
    -of a facet.\n");
    -    }
    -    if (qh PRINTprecision) {
    -      if (testouter)
    -        qh_fprintf(qh ferr, 8098, "\n\
    -Output completed.  Verifying that all points are below outer planes of\n\
    -all %sfacets.  Will make %2.0f distance computations.\n",
    -              (qh ONLYgood ?  "good " : ""), total);
    -      else
    -        qh_fprintf(qh ferr, 8099, "\n\
    -Output completed.  Verifying that all points are below %2.2g of\n\
    -all %sfacets.  Will make %2.0f distance computations.\n",
    -              maxoutside, (qh ONLYgood ?  "good " : ""), total);
    -    }
    -    FORALLfacets {
    -      if (!facet->good && qh ONLYgood)
    -        continue;
    -      if (facet->flipped)
    -        continue;
    -      if (!facet->normal) {
    -        qh_fprintf(qh ferr, 7061, "qhull warning (qh_check_points): missing normal for facet f%d\n", facet->id);
    -        continue;
    -      }
    -      if (testouter) {
    -#if qh_MAXoutside
    -        maxoutside= facet->maxoutside + 2* qh DISTround;
    -        /* one DISTround to actual point and another to computed point */
    -#endif
    -      }
    -      FORALLpoints {
    -        if (point != qh GOODpointp)
    -          qh_check_point(point, facet, &maxoutside, &maxdist, &errfacet1, &errfacet2);
    -      }
    -      FOREACHpoint_(qh other_points) {
    -        if (point != qh GOODpointp)
    -          qh_check_point(point, facet, &maxoutside, &maxdist, &errfacet1, &errfacet2);
    -      }
    -    }
    -    if (maxdist > qh outside_err) {
    -      qh_fprintf(qh ferr, 6112, "qhull precision error (qh_check_points): a coplanar point is %6.2g from convex hull.  The maximum value(qh.outside_err) is %6.2g\n",
    -                maxdist, qh outside_err );
    -      qh_errexit2( qh_ERRprec, errfacet1, errfacet2 );
    -    }else if (errfacet1 && qh outside_err > REALmax/2)
    -        qh_errexit2( qh_ERRprec, errfacet1, errfacet2 );
    -    /* else if errfacet1, the error was logged to qh.ferr but does not effect the output */
    -    trace0((qh ferr, 21, "qh_check_points: max distance outside %2.2g\n", maxdist));
    -  }
    -} /* check_points */
    -
    -
    -/*---------------------------------
    -
    -  qh_checkconvex( facetlist, fault )
    -    check that each ridge in facetlist is convex
    -    fault = qh_DATAfault if reporting errors
    -          = qh_ALGORITHMfault otherwise
    -
    -  returns:
    -    counts Zconcaveridges and Zcoplanarridges
    -    errors if concaveridge or if merging an coplanar ridge
    -
    -  note:
    -    if not merging,
    -      tests vertices for neighboring simplicial facets
    -    else if ZEROcentrum,
    -      tests vertices for neighboring simplicial   facets
    -    else
    -      tests centrums of neighboring facets
    -
    -  design:
    -    for all facets
    -      report flipped facets
    -      if ZEROcentrum and simplicial neighbors
    -        test vertices for neighboring simplicial facets
    -      else
    -        test centrum against all neighbors
    -*/
    -void qh_checkconvex(facetT *facetlist, int fault) {
    -  facetT *facet, *neighbor, **neighborp, *errfacet1=NULL, *errfacet2=NULL;
    -  vertexT *vertex;
    -  realT dist;
    -  pointT *centrum;
    -  boolT waserror= False, centrum_warning= False, tempcentrum= False, allsimplicial;
    -  int neighbor_i;
    -
    -  trace1((qh ferr, 1026, "qh_checkconvex: check all ridges are convex\n"));
    -  if (!qh RERUN) {
    -    zzval_(Zconcaveridges)= 0;
    -    zzval_(Zcoplanarridges)= 0;
    -  }
    -  FORALLfacet_(facetlist) {
    -    if (facet->flipped) {
    -      qh_precision("flipped facet");
    -      qh_fprintf(qh ferr, 6113, "qhull precision error: f%d is flipped(interior point is outside)\n",
    -               facet->id);
    -      errfacet1= facet;
    -      waserror= True;
    -      continue;
    -    }
    -    if (qh MERGING && (!qh ZEROcentrum || !facet->simplicial || facet->tricoplanar))
    -      allsimplicial= False;
    -    else {
    -      allsimplicial= True;
    -      neighbor_i= 0;
    -      FOREACHneighbor_(facet) {
    -        vertex= SETelemt_(facet->vertices, neighbor_i++, vertexT);
    -        if (!neighbor->simplicial || neighbor->tricoplanar) {
    -          allsimplicial= False;
    -          continue;
    -        }
    -        qh_distplane(vertex->point, neighbor, &dist);
    -        if (dist > -qh DISTround) {
    -          if (fault == qh_DATAfault) {
    -            qh_precision("coplanar or concave ridge");
    -            qh_fprintf(qh ferr, 6114, "qhull precision error: initial simplex is not convex. Distance=%.2g\n", dist);
    -            qh_errexit(qh_ERRsingular, NULL, NULL);
    -          }
    -          if (dist > qh DISTround) {
    -            zzinc_(Zconcaveridges);
    -            qh_precision("concave ridge");
    -            qh_fprintf(qh ferr, 6115, "qhull precision error: f%d is concave to f%d, since p%d(v%d) is %6.4g above\n",
    -              facet->id, neighbor->id, qh_pointid(vertex->point), vertex->id, dist);
    -            errfacet1= facet;
    -            errfacet2= neighbor;
    -            waserror= True;
    -          }else if (qh ZEROcentrum) {
    -            if (dist > 0) {     /* qh_checkzero checks that dist < - qh DISTround */
    -              zzinc_(Zcoplanarridges);
    -              qh_precision("coplanar ridge");
    -              qh_fprintf(qh ferr, 6116, "qhull precision error: f%d is clearly not convex to f%d, since p%d(v%d) is %6.4g above\n",
    -                facet->id, neighbor->id, qh_pointid(vertex->point), vertex->id, dist);
    -              errfacet1= facet;
    -              errfacet2= neighbor;
    -              waserror= True;
    -            }
    -          }else {
    -            zzinc_(Zcoplanarridges);
    -            qh_precision("coplanar ridge");
    -            trace0((qh ferr, 22, "qhull precision error: f%d may be coplanar to f%d, since p%d(v%d) is within %6.4g during p%d\n",
    -              facet->id, neighbor->id, qh_pointid(vertex->point), vertex->id, dist, qh furthest_id));
    -          }
    -        }
    -      }
    -    }
    -    if (!allsimplicial) {
    -      if (qh CENTERtype == qh_AScentrum) {
    -        if (!facet->center)
    -          facet->center= qh_getcentrum(facet);
    -        centrum= facet->center;
    -      }else {
    -        if (!centrum_warning && (!facet->simplicial || facet->tricoplanar)) {
    -           centrum_warning= True;
    -           qh_fprintf(qh ferr, 7062, "qhull warning: recomputing centrums for convexity test.  This may lead to false, precision errors.\n");
    -        }
    -        centrum= qh_getcentrum(facet);
    -        tempcentrum= True;
    -      }
    -      FOREACHneighbor_(facet) {
    -        if (qh ZEROcentrum && facet->simplicial && neighbor->simplicial)
    -          continue;
    -        if (facet->tricoplanar || neighbor->tricoplanar)
    -          continue;
    -        zzinc_(Zdistconvex);
    -        qh_distplane(centrum, neighbor, &dist);
    -        if (dist > qh DISTround) {
    -          zzinc_(Zconcaveridges);
    -          qh_precision("concave ridge");
    -          qh_fprintf(qh ferr, 6117, "qhull precision error: f%d is concave to f%d.  Centrum of f%d is %6.4g above f%d\n",
    -            facet->id, neighbor->id, facet->id, dist, neighbor->id);
    -          errfacet1= facet;
    -          errfacet2= neighbor;
    -          waserror= True;
    -        }else if (dist >= 0.0) {   /* if arithmetic always rounds the same,
    -                                     can test against centrum radius instead */
    -          zzinc_(Zcoplanarridges);
    -          qh_precision("coplanar ridge");
    -          qh_fprintf(qh ferr, 6118, "qhull precision error: f%d is coplanar or concave to f%d.  Centrum of f%d is %6.4g above f%d\n",
    -            facet->id, neighbor->id, facet->id, dist, neighbor->id);
    -          errfacet1= facet;
    -          errfacet2= neighbor;
    -          waserror= True;
    -        }
    -      }
    -      if (tempcentrum)
    -        qh_memfree(centrum, qh normal_size);
    -    }
    -  }
    -  if (waserror && !qh FORCEoutput)
    -    qh_errexit2 (qh_ERRprec, errfacet1, errfacet2);
    -} /* checkconvex */
    -
    -
    -/*---------------------------------
    -
    -  qh_checkfacet( facet, newmerge, waserror )
    -    checks for consistency errors in facet
    -    newmerge set if from merge.c
    -
    -  returns:
    -    sets waserror if any error occurs
    -
    -  checks:
    -    vertex ids are inverse sorted
    -    unless newmerge, at least hull_dim neighbors and vertices (exactly if simplicial)
    -    if non-simplicial, at least as many ridges as neighbors
    -    neighbors are not duplicated
    -    ridges are not duplicated
    -    in 3-d, ridges=verticies
    -    (qh.hull_dim-1) ridge vertices
    -    neighbors are reciprocated
    -    ridge neighbors are facet neighbors and a ridge for every neighbor
    -    simplicial neighbors match facetintersect
    -    vertex intersection matches vertices of common ridges
    -    vertex neighbors and facet vertices agree
    -    all ridges have distinct vertex sets
    -
    -  notes:
    -    uses neighbor->seen
    -
    -  design:
    -    check sets
    -    check vertices
    -    check sizes of neighbors and vertices
    -    check for qh_MERGEridge and qh_DUPLICATEridge flags
    -    check neighbor set
    -    check ridge set
    -    check ridges, neighbors, and vertices
    -*/
    -void qh_checkfacet(facetT *facet, boolT newmerge, boolT *waserrorp) {
    -  facetT *neighbor, **neighborp, *errother=NULL;
    -  ridgeT *ridge, **ridgep, *errridge= NULL, *ridge2;
    -  vertexT *vertex, **vertexp;
    -  unsigned previousid= INT_MAX;
    -  int numneighbors, numvertices, numridges=0, numRvertices=0;
    -  boolT waserror= False;
    -  int skipA, skipB, ridge_i, ridge_n, i;
    -  setT *intersection;
    -
    -  if (facet->visible) {
    -    qh_fprintf(qh ferr, 6119, "qhull internal error (qh_checkfacet): facet f%d is on the visible_list\n",
    -      facet->id);
    -    qh_errexit(qh_ERRqhull, facet, NULL);
    -  }
    -  if (!facet->normal) {
    -    qh_fprintf(qh ferr, 6120, "qhull internal error (qh_checkfacet): facet f%d does not have  a normal\n",
    -      facet->id);
    -    waserror= True;
    -  }
    -  qh_setcheck(facet->vertices, "vertices for f", facet->id);
    -  qh_setcheck(facet->ridges, "ridges for f", facet->id);
    -  qh_setcheck(facet->outsideset, "outsideset for f", facet->id);
    -  qh_setcheck(facet->coplanarset, "coplanarset for f", facet->id);
    -  qh_setcheck(facet->neighbors, "neighbors for f", facet->id);
    -  FOREACHvertex_(facet->vertices) {
    -    if (vertex->deleted) {
    -      qh_fprintf(qh ferr, 6121, "qhull internal error (qh_checkfacet): deleted vertex v%d in f%d\n", vertex->id, facet->id);
    -      qh_errprint("ERRONEOUS", NULL, NULL, NULL, vertex);
    -      waserror= True;
    -    }
    -    if (vertex->id >= previousid) {
    -      qh_fprintf(qh ferr, 6122, "qhull internal error (qh_checkfacet): vertices of f%d are not in descending id order at v%d\n", facet->id, vertex->id);
    -      waserror= True;
    -      break;
    -    }
    -    previousid= vertex->id;
    -  }
    -  numneighbors= qh_setsize(facet->neighbors);
    -  numvertices= qh_setsize(facet->vertices);
    -  numridges= qh_setsize(facet->ridges);
    -  if (facet->simplicial) {
    -    if (numvertices+numneighbors != 2*qh hull_dim
    -    && !facet->degenerate && !facet->redundant) {
    -      qh_fprintf(qh ferr, 6123, "qhull internal error (qh_checkfacet): for simplicial facet f%d, #vertices %d + #neighbors %d != 2*qh hull_dim\n",
    -                facet->id, numvertices, numneighbors);
    -      qh_setprint(qh ferr, "", facet->neighbors);
    -      waserror= True;
    -    }
    -  }else { /* non-simplicial */
    -    if (!newmerge
    -    &&(numvertices < qh hull_dim || numneighbors < qh hull_dim)
    -    && !facet->degenerate && !facet->redundant) {
    -      qh_fprintf(qh ferr, 6124, "qhull internal error (qh_checkfacet): for facet f%d, #vertices %d or #neighbors %d < qh hull_dim\n",
    -         facet->id, numvertices, numneighbors);
    -       waserror= True;
    -    }
    -    /* in 3-d, can get a vertex twice in an edge list, e.g., RBOX 1000 s W1e-13 t995849315 D2 | QHULL d Tc Tv TP624 TW1e-13 T4 */
    -    if (numridges < numneighbors
    -    ||(qh hull_dim == 3 && numvertices > numridges && !qh NEWfacets)
    -    ||(qh hull_dim == 2 && numridges + numvertices + numneighbors != 6)) {
    -      if (!facet->degenerate && !facet->redundant) {
    -        qh_fprintf(qh ferr, 6125, "qhull internal error (qh_checkfacet): for facet f%d, #ridges %d < #neighbors %d or(3-d) > #vertices %d or(2-d) not all 2\n",
    -            facet->id, numridges, numneighbors, numvertices);
    -        waserror= True;
    -      }
    -    }
    -  }
    -  FOREACHneighbor_(facet) {
    -    if (neighbor == qh_MERGEridge || neighbor == qh_DUPLICATEridge) {
    -      qh_fprintf(qh ferr, 6126, "qhull internal error (qh_checkfacet): facet f%d still has a MERGE or DUP neighbor\n", facet->id);
    -      qh_errexit(qh_ERRqhull, facet, NULL);
    -    }
    -    neighbor->seen= True;
    -  }
    -  FOREACHneighbor_(facet) {
    -    if (!qh_setin(neighbor->neighbors, facet)) {
    -      qh_fprintf(qh ferr, 6127, "qhull internal error (qh_checkfacet): facet f%d has neighbor f%d, but f%d does not have neighbor f%d\n",
    -              facet->id, neighbor->id, neighbor->id, facet->id);
    -      errother= neighbor;
    -      waserror= True;
    -    }
    -    if (!neighbor->seen) {
    -      qh_fprintf(qh ferr, 6128, "qhull internal error (qh_checkfacet): facet f%d has a duplicate neighbor f%d\n",
    -              facet->id, neighbor->id);
    -      errother= neighbor;
    -      waserror= True;
    -    }
    -    neighbor->seen= False;
    -  }
    -  FOREACHridge_(facet->ridges) {
    -    qh_setcheck(ridge->vertices, "vertices for r", ridge->id);
    -    ridge->seen= False;
    -  }
    -  FOREACHridge_(facet->ridges) {
    -    if (ridge->seen) {
    -      qh_fprintf(qh ferr, 6129, "qhull internal error (qh_checkfacet): facet f%d has a duplicate ridge r%d\n",
    -              facet->id, ridge->id);
    -      errridge= ridge;
    -      waserror= True;
    -    }
    -    ridge->seen= True;
    -    numRvertices= qh_setsize(ridge->vertices);
    -    if (numRvertices != qh hull_dim - 1) {
    -      qh_fprintf(qh ferr, 6130, "qhull internal error (qh_checkfacet): ridge between f%d and f%d has %d vertices\n",
    -                ridge->top->id, ridge->bottom->id, numRvertices);
    -      errridge= ridge;
    -      waserror= True;
    -    }
    -    neighbor= otherfacet_(ridge, facet);
    -    neighbor->seen= True;
    -    if (!qh_setin(facet->neighbors, neighbor)) {
    -      qh_fprintf(qh ferr, 6131, "qhull internal error (qh_checkfacet): for facet f%d, neighbor f%d of ridge r%d not in facet\n",
    -           facet->id, neighbor->id, ridge->id);
    -      errridge= ridge;
    -      waserror= True;
    -    }
    -  }
    -  if (!facet->simplicial) {
    -    FOREACHneighbor_(facet) {
    -      if (!neighbor->seen) {
    -        qh_fprintf(qh ferr, 6132, "qhull internal error (qh_checkfacet): facet f%d does not have a ridge for neighbor f%d\n",
    -              facet->id, neighbor->id);
    -        errother= neighbor;
    -        waserror= True;
    -      }
    -      intersection= qh_vertexintersect_new(facet->vertices, neighbor->vertices);
    -      qh_settemppush(intersection);
    -      FOREACHvertex_(facet->vertices) {
    -        vertex->seen= False;
    -        vertex->seen2= False;
    -      }
    -      FOREACHvertex_(intersection)
    -        vertex->seen= True;
    -      FOREACHridge_(facet->ridges) {
    -        if (neighbor != otherfacet_(ridge, facet))
    -            continue;
    -        FOREACHvertex_(ridge->vertices) {
    -          if (!vertex->seen) {
    -            qh_fprintf(qh ferr, 6133, "qhull internal error (qh_checkfacet): vertex v%d in r%d not in f%d intersect f%d\n",
    -                  vertex->id, ridge->id, facet->id, neighbor->id);
    -            qh_errexit(qh_ERRqhull, facet, ridge);
    -          }
    -          vertex->seen2= True;
    -        }
    -      }
    -      if (!newmerge) {
    -        FOREACHvertex_(intersection) {
    -          if (!vertex->seen2) {
    -            if (qh IStracing >=3 || !qh MERGING) {
    -              qh_fprintf(qh ferr, 6134, "qhull precision error (qh_checkfacet): vertex v%d in f%d intersect f%d but\n\
    - not in a ridge.  This is ok under merging.  Last point was p%d\n",
    -                     vertex->id, facet->id, neighbor->id, qh furthest_id);
    -              if (!qh FORCEoutput && !qh MERGING) {
    -                qh_errprint("ERRONEOUS", facet, neighbor, NULL, vertex);
    -                if (!qh MERGING)
    -                  qh_errexit(qh_ERRqhull, NULL, NULL);
    -              }
    -            }
    -          }
    -        }
    -      }
    -      qh_settempfree(&intersection);
    -    }
    -  }else { /* simplicial */
    -    FOREACHneighbor_(facet) {
    -      if (neighbor->simplicial) {
    -        skipA= SETindex_(facet->neighbors, neighbor);
    -        skipB= qh_setindex(neighbor->neighbors, facet);
    -        if (!qh_setequal_skip(facet->vertices, skipA, neighbor->vertices, skipB)) {
    -          qh_fprintf(qh ferr, 6135, "qhull internal error (qh_checkfacet): facet f%d skip %d and neighbor f%d skip %d do not match \n",
    -                   facet->id, skipA, neighbor->id, skipB);
    -          errother= neighbor;
    -          waserror= True;
    -        }
    -      }
    -    }
    -  }
    -  if (qh hull_dim < 5 && (qh IStracing > 2 || qh CHECKfrequently)) {
    -    FOREACHridge_i_(facet->ridges) {           /* expensive */
    -      for (i=ridge_i+1; i < ridge_n; i++) {
    -        ridge2= SETelemt_(facet->ridges, i, ridgeT);
    -        if (qh_setequal(ridge->vertices, ridge2->vertices)) {
    -          qh_fprintf(qh ferr, 6227, "Qhull internal error (qh_checkfacet): ridges r%d and r%d have the same vertices\n",
    -                  ridge->id, ridge2->id);
    -          errridge= ridge;
    -          waserror= True;
    -        }
    -      }
    -    }
    -  }
    -  if (waserror) {
    -    qh_errprint("ERRONEOUS", facet, errother, errridge, NULL);
    -    *waserrorp= True;
    -  }
    -} /* checkfacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_checkflipped_all( facetlist )
    -    checks orientation of facets in list against interior point
    -*/
    -void qh_checkflipped_all(facetT *facetlist) {
    -  facetT *facet;
    -  boolT waserror= False;
    -  realT dist;
    -
    -  if (facetlist == qh facet_list)
    -    zzval_(Zflippedfacets)= 0;
    -  FORALLfacet_(facetlist) {
    -    if (facet->normal && !qh_checkflipped(facet, &dist, !qh_ALL)) {
    -      qh_fprintf(qh ferr, 6136, "qhull precision error: facet f%d is flipped, distance= %6.12g\n",
    -              facet->id, dist);
    -      if (!qh FORCEoutput) {
    -        qh_errprint("ERRONEOUS", facet, NULL, NULL, NULL);
    -        waserror= True;
    -      }
    -    }
    -  }
    -  if (waserror) {
    -    qh_fprintf(qh ferr, 8101, "\n\
    -A flipped facet occurs when its distance to the interior point is\n\
    -greater than %2.2g, the maximum roundoff error.\n", -qh DISTround);
    -    qh_errexit(qh_ERRprec, NULL, NULL);
    -  }
    -} /* checkflipped_all */
    -
    -/*---------------------------------
    -
    -  qh_checkpolygon( facetlist )
    -    checks the correctness of the structure
    -
    -  notes:
    -    call with either qh.facet_list or qh.newfacet_list
    -    checks num_facets and num_vertices if qh.facet_list
    -
    -  design:
    -    for each facet
    -      checks facet and outside set
    -    initializes vertexlist
    -    for each facet
    -      checks vertex set
    -    if checking all facets(qh.facetlist)
    -      check facet count
    -      if qh.VERTEXneighbors
    -        check vertex neighbors and count
    -      check vertex count
    -*/
    -void qh_checkpolygon(facetT *facetlist) {
    -  facetT *facet;
    -  vertexT *vertex, **vertexp, *vertexlist;
    -  int numfacets= 0, numvertices= 0, numridges= 0;
    -  int totvneighbors= 0, totvertices= 0;
    -  boolT waserror= False, nextseen= False, visibleseen= False;
    -
    -  trace1((qh ferr, 1027, "qh_checkpolygon: check all facets from f%d\n", facetlist->id));
    -  if (facetlist != qh facet_list || qh ONLYgood)
    -    nextseen= True;
    -  FORALLfacet_(facetlist) {
    -    if (facet == qh visible_list)
    -      visibleseen= True;
    -    if (!facet->visible) {
    -      if (!nextseen) {
    -        if (facet == qh facet_next)
    -          nextseen= True;
    -        else if (qh_setsize(facet->outsideset)) {
    -          if (!qh NARROWhull
    -#if !qh_COMPUTEfurthest
    -               || facet->furthestdist >= qh MINoutside
    -#endif
    -                        ) {
    -            qh_fprintf(qh ferr, 6137, "qhull internal error (qh_checkpolygon): f%d has outside points before qh facet_next\n",
    -                     facet->id);
    -            qh_errexit(qh_ERRqhull, facet, NULL);
    -          }
    -        }
    -      }
    -      numfacets++;
    -      qh_checkfacet(facet, False, &waserror);
    -    }
    -  }
    -  if (qh visible_list && !visibleseen && facetlist == qh facet_list) {
    -    qh_fprintf(qh ferr, 6138, "qhull internal error (qh_checkpolygon): visible list f%d no longer on facet list\n", qh visible_list->id);
    -    qh_printlists();
    -    qh_errexit(qh_ERRqhull, qh visible_list, NULL);
    -  }
    -  if (facetlist == qh facet_list)
    -    vertexlist= qh vertex_list;
    -  else if (facetlist == qh newfacet_list)
    -    vertexlist= qh newvertex_list;
    -  else
    -    vertexlist= NULL;
    -  FORALLvertex_(vertexlist) {
    -    vertex->seen= False;
    -    vertex->visitid= 0;
    -  }
    -  FORALLfacet_(facetlist) {
    -    if (facet->visible)
    -      continue;
    -    if (facet->simplicial)
    -      numridges += qh hull_dim;
    -    else
    -      numridges += qh_setsize(facet->ridges);
    -    FOREACHvertex_(facet->vertices) {
    -      vertex->visitid++;
    -      if (!vertex->seen) {
    -        vertex->seen= True;
    -        numvertices++;
    -        if (qh_pointid(vertex->point) == -1) {
    -          qh_fprintf(qh ferr, 6139, "qhull internal error (qh_checkpolygon): unknown point %p for vertex v%d first_point %p\n",
    -                   vertex->point, vertex->id, qh first_point);
    -          waserror= True;
    -        }
    -      }
    -    }
    -  }
    -  qh vertex_visit += (unsigned int)numfacets;
    -  if (facetlist == qh facet_list) {
    -    if (numfacets != qh num_facets - qh num_visible) {
    -      qh_fprintf(qh ferr, 6140, "qhull internal error (qh_checkpolygon): actual number of facets is %d, cumulative facet count is %d - %d visible facets\n",
    -              numfacets, qh num_facets, qh num_visible);
    -      waserror= True;
    -    }
    -    qh vertex_visit++;
    -    if (qh VERTEXneighbors) {
    -      FORALLvertices {
    -        qh_setcheck(vertex->neighbors, "neighbors for v", vertex->id);
    -        if (vertex->deleted)
    -          continue;
    -        totvneighbors += qh_setsize(vertex->neighbors);
    -      }
    -      FORALLfacet_(facetlist)
    -        totvertices += qh_setsize(facet->vertices);
    -      if (totvneighbors != totvertices) {
    -        qh_fprintf(qh ferr, 6141, "qhull internal error (qh_checkpolygon): vertex neighbors inconsistent.  Totvneighbors %d, totvertices %d\n",
    -                totvneighbors, totvertices);
    -        waserror= True;
    -      }
    -    }
    -    if (numvertices != qh num_vertices - qh_setsize(qh del_vertices)) {
    -      qh_fprintf(qh ferr, 6142, "qhull internal error (qh_checkpolygon): actual number of vertices is %d, cumulative vertex count is %d\n",
    -              numvertices, qh num_vertices - qh_setsize(qh del_vertices));
    -      waserror= True;
    -    }
    -    if (qh hull_dim == 2 && numvertices != numfacets) {
    -      qh_fprintf(qh ferr, 6143, "qhull internal error (qh_checkpolygon): #vertices %d != #facets %d\n",
    -        numvertices, numfacets);
    -      waserror= True;
    -    }
    -    if (qh hull_dim == 3 && numvertices + numfacets - numridges/2 != 2) {
    -      qh_fprintf(qh ferr, 7063, "qhull warning: #vertices %d + #facets %d - #edges %d != 2\n\
    -        A vertex appears twice in a edge list.  May occur during merging.",
    -        numvertices, numfacets, numridges/2);
    -      /* occurs if lots of merging and a vertex ends up twice in an edge list.  e.g., RBOX 1000 s W1e-13 t995849315 D2 | QHULL d Tc Tv */
    -    }
    -  }
    -  if (waserror)
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -} /* checkpolygon */
    -
    -
    -/*---------------------------------
    -
    -  qh_checkvertex( vertex )
    -    check vertex for consistency
    -    checks vertex->neighbors
    -
    -  notes:
    -    neighbors checked efficiently in checkpolygon
    -*/
    -void qh_checkvertex(vertexT *vertex) {
    -  boolT waserror= False;
    -  facetT *neighbor, **neighborp, *errfacet=NULL;
    -
    -  if (qh_pointid(vertex->point) == -1) {
    -    qh_fprintf(qh ferr, 6144, "qhull internal error (qh_checkvertex): unknown point id %p\n", vertex->point);
    -    waserror= True;
    -  }
    -  if (vertex->id >= qh vertex_id) {
    -    qh_fprintf(qh ferr, 6145, "qhull internal error (qh_checkvertex): unknown vertex id %d\n", vertex->id);
    -    waserror= True;
    -  }
    -  if (!waserror && !vertex->deleted) {
    -    if (qh_setsize(vertex->neighbors)) {
    -      FOREACHneighbor_(vertex) {
    -        if (!qh_setin(neighbor->vertices, vertex)) {
    -          qh_fprintf(qh ferr, 6146, "qhull internal error (qh_checkvertex): neighbor f%d does not contain v%d\n", neighbor->id, vertex->id);
    -          errfacet= neighbor;
    -          waserror= True;
    -        }
    -      }
    -    }
    -  }
    -  if (waserror) {
    -    qh_errprint("ERRONEOUS", NULL, NULL, NULL, vertex);
    -    qh_errexit(qh_ERRqhull, errfacet, NULL);
    -  }
    -} /* checkvertex */
    -
    -/*---------------------------------
    -
    -  qh_clearcenters( type )
    -    clear old data from facet->center
    -
    -  notes:
    -    sets new centertype
    -    nop if CENTERtype is the same
    -*/
    -void qh_clearcenters(qh_CENTER type) {
    -  facetT *facet;
    -
    -  if (qh CENTERtype != type) {
    -    FORALLfacets {
    -      if (facet->tricoplanar && !facet->keepcentrum)
    -          facet->center= NULL;
    -      else if (qh CENTERtype == qh_ASvoronoi){
    -        if (facet->center) {
    -          qh_memfree(facet->center, qh center_size);
    -          facet->center= NULL;
    -        }
    -      }else /* qh CENTERtype == qh_AScentrum */ {
    -        if (facet->center) {
    -          qh_memfree(facet->center, qh normal_size);
    -          facet->center= NULL;
    -        }
    -      }
    -    }
    -    qh CENTERtype= type;
    -  }
    -  trace2((qh ferr, 2043, "qh_clearcenters: switched to center type %d\n", type));
    -} /* clearcenters */
    -
    -/*---------------------------------
    -
    -  qh_createsimplex( vertices )
    -    creates a simplex from a set of vertices
    -
    -  returns:
    -    initializes qh.facet_list to the simplex
    -    initializes qh.newfacet_list, .facet_tail
    -    initializes qh.vertex_list, .newvertex_list, .vertex_tail
    -
    -  design:
    -    initializes lists
    -    for each vertex
    -      create a new facet
    -    for each new facet
    -      create its neighbor set
    -*/
    -void qh_createsimplex(setT *vertices) {
    -  facetT *facet= NULL, *newfacet;
    -  boolT toporient= True;
    -  int vertex_i, vertex_n, nth;
    -  setT *newfacets= qh_settemp(qh hull_dim+1);
    -  vertexT *vertex;
    -
    -  qh facet_list= qh newfacet_list= qh facet_tail= qh_newfacet();
    -  qh num_facets= qh num_vertices= qh num_visible= 0;
    -  qh vertex_list= qh newvertex_list= qh vertex_tail= qh_newvertex(NULL);
    -  FOREACHvertex_i_(vertices) {
    -    newfacet= qh_newfacet();
    -    newfacet->vertices= qh_setnew_delnthsorted(vertices, vertex_n,
    -                                                vertex_i, 0);
    -    newfacet->toporient= (unsigned char)toporient;
    -    qh_appendfacet(newfacet);
    -    newfacet->newfacet= True;
    -    qh_appendvertex(vertex);
    -    qh_setappend(&newfacets, newfacet);
    -    toporient ^= True;
    -  }
    -  FORALLnew_facets {
    -    nth= 0;
    -    FORALLfacet_(qh newfacet_list) {
    -      if (facet != newfacet)
    -        SETelem_(newfacet->neighbors, nth++)= facet;
    -    }
    -    qh_settruncate(newfacet->neighbors, qh hull_dim);
    -  }
    -  qh_settempfree(&newfacets);
    -  trace1((qh ferr, 1028, "qh_createsimplex: created simplex\n"));
    -} /* createsimplex */
    -
    -/*---------------------------------
    -
    -  qh_delridge( ridge )
    -    deletes ridge from data structures it belongs to
    -    frees up its memory
    -
    -  notes:
    -    in merge.c, caller sets vertex->delridge for each vertex
    -    ridges also freed in qh_freeqhull
    -*/
    -void qh_delridge(ridgeT *ridge) {
    -  void **freelistp; /* used !qh_NOmem */
    -
    -  qh_setdel(ridge->top->ridges, ridge);
    -  qh_setdel(ridge->bottom->ridges, ridge);
    -  qh_setfree(&(ridge->vertices));
    -  qh_memfree_(ridge, (int)sizeof(ridgeT), freelistp);
    -} /* delridge */
    -
    -
    -/*---------------------------------
    -
    -  qh_delvertex( vertex )
    -    deletes a vertex and frees its memory
    -
    -  notes:
    -    assumes vertex->adjacencies have been updated if needed
    -    unlinks from vertex_list
    -*/
    -void qh_delvertex(vertexT *vertex) {
    -
    -  if (vertex == qh tracevertex)
    -    qh tracevertex= NULL;
    -  qh_removevertex(vertex);
    -  qh_setfree(&vertex->neighbors);
    -  qh_memfree(vertex, (int)sizeof(vertexT));
    -} /* delvertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_facet3vertex(  )
    -    return temporary set of 3-d vertices in qh_ORIENTclock order
    -
    -  design:
    -    if simplicial facet
    -      build set from facet->vertices with facet->toporient
    -    else
    -      for each ridge in order
    -        build set from ridge's vertices
    -*/
    -setT *qh_facet3vertex(facetT *facet) {
    -  ridgeT *ridge, *firstridge;
    -  vertexT *vertex;
    -  int cntvertices, cntprojected=0;
    -  setT *vertices;
    -
    -  cntvertices= qh_setsize(facet->vertices);
    -  vertices= qh_settemp(cntvertices);
    -  if (facet->simplicial) {
    -    if (cntvertices != 3) {
    -      qh_fprintf(qh ferr, 6147, "qhull internal error (qh_facet3vertex): only %d vertices for simplicial facet f%d\n",
    -                  cntvertices, facet->id);
    -      qh_errexit(qh_ERRqhull, facet, NULL);
    -    }
    -    qh_setappend(&vertices, SETfirst_(facet->vertices));
    -    if (facet->toporient ^ qh_ORIENTclock)
    -      qh_setappend(&vertices, SETsecond_(facet->vertices));
    -    else
    -      qh_setaddnth(&vertices, 0, SETsecond_(facet->vertices));
    -    qh_setappend(&vertices, SETelem_(facet->vertices, 2));
    -  }else {
    -    ridge= firstridge= SETfirstt_(facet->ridges, ridgeT);   /* no infinite */
    -    while ((ridge= qh_nextridge3d(ridge, facet, &vertex))) {
    -      qh_setappend(&vertices, vertex);
    -      if (++cntprojected > cntvertices || ridge == firstridge)
    -        break;
    -    }
    -    if (!ridge || cntprojected != cntvertices) {
    -      qh_fprintf(qh ferr, 6148, "qhull internal error (qh_facet3vertex): ridges for facet %d don't match up.  got at least %d\n",
    -                  facet->id, cntprojected);
    -      qh_errexit(qh_ERRqhull, facet, ridge);
    -    }
    -  }
    -  return vertices;
    -} /* facet3vertex */
    -
    -/*---------------------------------
    -
    -  qh_findbestfacet( point, bestoutside, bestdist, isoutside )
    -    find facet that is furthest below a point
    -
    -    for Delaunay triangulations,
    -      Use qh_setdelaunay() to lift point to paraboloid and scale by 'Qbb' if needed
    -      Do not use options 'Qbk', 'QBk', or 'QbB' since they scale the coordinates.
    -
    -  returns:
    -    if bestoutside is set (e.g., qh_ALL)
    -      returns best facet that is not upperdelaunay
    -      if Delaunay and inside, point is outside circumsphere of bestfacet
    -    else
    -      returns first facet below point
    -      if point is inside, returns nearest, !upperdelaunay facet
    -    distance to facet
    -    isoutside set if outside of facet
    -
    -  notes:
    -    For tricoplanar facets, this finds one of the tricoplanar facets closest
    -    to the point.  For Delaunay triangulations, the point may be inside a
    -    different tricoplanar facet. See locate a facet with qh_findbestfacet()
    -
    -    If inside, qh_findbestfacet performs an exhaustive search
    -       this may be too conservative.  Sometimes it is clearly required.
    -
    -    qh_findbestfacet is not used by qhull.
    -    uses qh.visit_id and qh.coplanarset
    -
    -  see:
    -    qh_findbest
    -*/
    -facetT *qh_findbestfacet(pointT *point, boolT bestoutside,
    -           realT *bestdist, boolT *isoutside) {
    -  facetT *bestfacet= NULL;
    -  int numpart, totpart= 0;
    -
    -  bestfacet= qh_findbest(point, qh facet_list,
    -                            bestoutside, !qh_ISnewfacets, bestoutside /* qh_NOupper */,
    -                            bestdist, isoutside, &totpart);
    -  if (*bestdist < -qh DISTround) {
    -    bestfacet= qh_findfacet_all(point, bestdist, isoutside, &numpart);
    -    totpart += numpart;
    -    if ((isoutside && bestoutside)
    -    || (!isoutside && bestfacet->upperdelaunay)) {
    -      bestfacet= qh_findbest(point, bestfacet,
    -                            bestoutside, False, bestoutside,
    -                            bestdist, isoutside, &totpart);
    -      totpart += numpart;
    -    }
    -  }
    -  trace3((qh ferr, 3014, "qh_findbestfacet: f%d dist %2.2g isoutside %d totpart %d\n",
    -          bestfacet->id, *bestdist, *isoutside, totpart));
    -  return bestfacet;
    -} /* findbestfacet */
    -
    -/*---------------------------------
    -
    -  qh_findbestlower( facet, point, bestdist, numpart )
    -    returns best non-upper, non-flipped neighbor of facet for point
    -    if needed, searches vertex neighbors
    -
    -  returns:
    -    returns bestdist and updates numpart
    -
    -  notes:
    -    if Delaunay and inside, point is outside of circumsphere of bestfacet
    -    called by qh_findbest() for points above an upperdelaunay facet
    -
    -*/
    -facetT *qh_findbestlower(facetT *upperfacet, pointT *point, realT *bestdistp, int *numpart) {
    -  facetT *neighbor, **neighborp, *bestfacet= NULL;
    -  realT bestdist= -REALmax/2 /* avoid underflow */;
    -  realT dist;
    -  vertexT *vertex;
    -
    -  zinc_(Zbestlower);
    -  FOREACHneighbor_(upperfacet) {
    -    if (neighbor->upperdelaunay || neighbor->flipped)
    -      continue;
    -    (*numpart)++;
    -    qh_distplane(point, neighbor, &dist);
    -    if (dist > bestdist) {
    -      bestfacet= neighbor;
    -      bestdist= dist;
    -    }
    -  }
    -  if (!bestfacet) {
    -    zinc_(Zbestlowerv);
    -    /* rarely called, numpart does not count nearvertex computations */
    -    vertex= qh_nearvertex(upperfacet, point, &dist);
    -    qh_vertexneighbors();
    -    FOREACHneighbor_(vertex) {
    -      if (neighbor->upperdelaunay || neighbor->flipped)
    -        continue;
    -      (*numpart)++;
    -      qh_distplane(point, neighbor, &dist);
    -      if (dist > bestdist) {
    -        bestfacet= neighbor;
    -        bestdist= dist;
    -      }
    -    }
    -  }
    -  if (!bestfacet) {
    -    qh_fprintf(qh ferr, 6228, "\n\
    -Qhull internal error (qh_findbestlower): all neighbors of facet %d are flipped or upper Delaunay.\n\
    -Please report this error to qhull_bug@qhull.org with the input and all of the output.\n",
    -       upperfacet->id);
    -    qh_errexit(qh_ERRqhull, upperfacet, NULL);
    -  }
    -  *bestdistp= bestdist;
    -  trace3((qh ferr, 3015, "qh_findbestlower: f%d dist %2.2g for f%d p%d\n",
    -          bestfacet->id, bestdist, upperfacet->id, qh_pointid(point)));
    -  return bestfacet;
    -} /* findbestlower */
    -
    -/*---------------------------------
    -
    -  qh_findfacet_all( point, bestdist, isoutside, numpart )
    -    exhaustive search for facet below a point
    -
    -    for Delaunay triangulations,
    -      Use qh_setdelaunay() to lift point to paraboloid and scale by 'Qbb' if needed
    -      Do not use options 'Qbk', 'QBk', or 'QbB' since they scale the coordinates.
    -
    -  returns:
    -    returns first facet below point
    -    if point is inside,
    -      returns nearest facet
    -    distance to facet
    -    isoutside if point is outside of the hull
    -    number of distance tests
    -
    -  notes:
    -    for library users, not used by Qhull
    -*/
    -facetT *qh_findfacet_all(pointT *point, realT *bestdist, boolT *isoutside,
    -                          int *numpart) {
    -  facetT *bestfacet= NULL, *facet;
    -  realT dist;
    -  int totpart= 0;
    -
    -  *bestdist= -REALmax;
    -  *isoutside= False;
    -  FORALLfacets {
    -    if (facet->flipped || !facet->normal)
    -      continue;
    -    totpart++;
    -    qh_distplane(point, facet, &dist);
    -    if (dist > *bestdist) {
    -      *bestdist= dist;
    -      bestfacet= facet;
    -      if (dist > qh MINoutside) {
    -        *isoutside= True;
    -        break;
    -      }
    -    }
    -  }
    -  *numpart= totpart;
    -  trace3((qh ferr, 3016, "qh_findfacet_all: f%d dist %2.2g isoutside %d totpart %d\n",
    -          getid_(bestfacet), *bestdist, *isoutside, totpart));
    -  return bestfacet;
    -} /* findfacet_all */
    -
    -/*---------------------------------
    -
    -  qh_findgood( facetlist, goodhorizon )
    -    identify good facets for qh.PRINTgood
    -    if qh.GOODvertex>0
    -      facet includes point as vertex
    -      if !match, returns goodhorizon
    -      inactive if qh.MERGING
    -    if qh.GOODpoint
    -      facet is visible or coplanar (>0) or not visible (<0)
    -    if qh.GOODthreshold
    -      facet->normal matches threshold
    -    if !goodhorizon and !match,
    -      selects facet with closest angle
    -      sets GOODclosest
    -
    -  returns:
    -    number of new, good facets found
    -    determines facet->good
    -    may update qh.GOODclosest
    -
    -  notes:
    -    qh_findgood_all further reduces the good region
    -
    -  design:
    -    count good facets
    -    mark good facets for qh.GOODpoint
    -    mark good facets for qh.GOODthreshold
    -    if necessary
    -      update qh.GOODclosest
    -*/
    -int qh_findgood(facetT *facetlist, int goodhorizon) {
    -  facetT *facet, *bestfacet= NULL;
    -  realT angle, bestangle= REALmax, dist;
    -  int  numgood=0;
    -
    -  FORALLfacet_(facetlist) {
    -    if (facet->good)
    -      numgood++;
    -  }
    -  if (qh GOODvertex>0 && !qh MERGING) {
    -    FORALLfacet_(facetlist) {
    -      if (!qh_isvertex(qh GOODvertexp, facet->vertices)) {
    -        facet->good= False;
    -        numgood--;
    -      }
    -    }
    -  }
    -  if (qh GOODpoint && numgood) {
    -    FORALLfacet_(facetlist) {
    -      if (facet->good && facet->normal) {
    -        zinc_(Zdistgood);
    -        qh_distplane(qh GOODpointp, facet, &dist);
    -        if ((qh GOODpoint > 0) ^ (dist > 0.0)) {
    -          facet->good= False;
    -          numgood--;
    -        }
    -      }
    -    }
    -  }
    -  if (qh GOODthreshold && (numgood || goodhorizon || qh GOODclosest)) {
    -    FORALLfacet_(facetlist) {
    -      if (facet->good && facet->normal) {
    -        if (!qh_inthresholds(facet->normal, &angle)) {
    -          facet->good= False;
    -          numgood--;
    -          if (angle < bestangle) {
    -            bestangle= angle;
    -            bestfacet= facet;
    -          }
    -        }
    -      }
    -    }
    -    if (!numgood && (!goodhorizon || qh GOODclosest)) {
    -      if (qh GOODclosest) {
    -        if (qh GOODclosest->visible)
    -          qh GOODclosest= NULL;
    -        else {
    -          qh_inthresholds(qh GOODclosest->normal, &angle);
    -          if (angle < bestangle)
    -            bestfacet= qh GOODclosest;
    -        }
    -      }
    -      if (bestfacet && bestfacet != qh GOODclosest) {
    -        if (qh GOODclosest)
    -          qh GOODclosest->good= False;
    -        qh GOODclosest= bestfacet;
    -        bestfacet->good= True;
    -        numgood++;
    -        trace2((qh ferr, 2044, "qh_findgood: f%d is closest(%2.2g) to thresholds\n",
    -           bestfacet->id, bestangle));
    -        return numgood;
    -      }
    -    }else if (qh GOODclosest) { /* numgood > 0 */
    -      qh GOODclosest->good= False;
    -      qh GOODclosest= NULL;
    -    }
    -  }
    -  zadd_(Zgoodfacet, numgood);
    -  trace2((qh ferr, 2045, "qh_findgood: found %d good facets with %d good horizon\n",
    -               numgood, goodhorizon));
    -  if (!numgood && qh GOODvertex>0 && !qh MERGING)
    -    return goodhorizon;
    -  return numgood;
    -} /* findgood */
    -
    -/*---------------------------------
    -
    -  qh_findgood_all( facetlist )
    -    apply other constraints for good facets (used by qh.PRINTgood)
    -    if qh.GOODvertex
    -      facet includes (>0) or doesn't include (<0) point as vertex
    -      if last good facet and ONLYgood, prints warning and continues
    -    if qh.SPLITthresholds
    -      facet->normal matches threshold, or if none, the closest one
    -    calls qh_findgood
    -    nop if good not used
    -
    -  returns:
    -    clears facet->good if not good
    -    sets qh.num_good
    -
    -  notes:
    -    this is like qh_findgood but more restrictive
    -
    -  design:
    -    uses qh_findgood to mark good facets
    -    marks facets for qh.GOODvertex
    -    marks facets for qh.SPLITthreholds
    -*/
    -void qh_findgood_all(facetT *facetlist) {
    -  facetT *facet, *bestfacet=NULL;
    -  realT angle, bestangle= REALmax;
    -  int  numgood=0, startgood;
    -
    -  if (!qh GOODvertex && !qh GOODthreshold && !qh GOODpoint
    -  && !qh SPLITthresholds)
    -    return;
    -  if (!qh ONLYgood)
    -    qh_findgood(qh facet_list, 0);
    -  FORALLfacet_(facetlist) {
    -    if (facet->good)
    -      numgood++;
    -  }
    -  if (qh GOODvertex <0 || (qh GOODvertex > 0 && qh MERGING)) {
    -    FORALLfacet_(facetlist) {
    -      if (facet->good && ((qh GOODvertex > 0) ^ !!qh_isvertex(qh GOODvertexp, facet->vertices))) {
    -        if (!--numgood) {
    -          if (qh ONLYgood) {
    -            qh_fprintf(qh ferr, 7064, "qhull warning: good vertex p%d does not match last good facet f%d.  Ignored.\n",
    -               qh_pointid(qh GOODvertexp), facet->id);
    -            return;
    -          }else if (qh GOODvertex > 0)
    -            qh_fprintf(qh ferr, 7065, "qhull warning: point p%d is not a vertex('QV%d').\n",
    -                qh GOODvertex-1, qh GOODvertex-1);
    -          else
    -            qh_fprintf(qh ferr, 7066, "qhull warning: point p%d is a vertex for every facet('QV-%d').\n",
    -                -qh GOODvertex - 1, -qh GOODvertex - 1);
    -        }
    -        facet->good= False;
    -      }
    -    }
    -  }
    -  startgood= numgood;
    -  if (qh SPLITthresholds) {
    -    FORALLfacet_(facetlist) {
    -      if (facet->good) {
    -        if (!qh_inthresholds(facet->normal, &angle)) {
    -          facet->good= False;
    -          numgood--;
    -          if (angle < bestangle) {
    -            bestangle= angle;
    -            bestfacet= facet;
    -          }
    -        }
    -      }
    -    }
    -    if (!numgood && bestfacet) {
    -      bestfacet->good= True;
    -      numgood++;
    -      trace0((qh ferr, 23, "qh_findgood_all: f%d is closest(%2.2g) to thresholds\n",
    -           bestfacet->id, bestangle));
    -      return;
    -    }
    -  }
    -  qh num_good= numgood;
    -  trace0((qh ferr, 24, "qh_findgood_all: %d good facets remain out of %d facets\n",
    -        numgood, startgood));
    -} /* findgood_all */
    -
    -/*---------------------------------
    -
    -  qh_furthestnext()
    -    set qh.facet_next to facet with furthest of all furthest points
    -    searches all facets on qh.facet_list
    -
    -  notes:
    -    this may help avoid precision problems
    -*/
    -void qh_furthestnext(void /* qh facet_list */) {
    -  facetT *facet, *bestfacet= NULL;
    -  realT dist, bestdist= -REALmax;
    -
    -  FORALLfacets {
    -    if (facet->outsideset) {
    -#if qh_COMPUTEfurthest
    -      pointT *furthest;
    -      furthest= (pointT*)qh_setlast(facet->outsideset);
    -      zinc_(Zcomputefurthest);
    -      qh_distplane(furthest, facet, &dist);
    -#else
    -      dist= facet->furthestdist;
    -#endif
    -      if (dist > bestdist) {
    -        bestfacet= facet;
    -        bestdist= dist;
    -      }
    -    }
    -  }
    -  if (bestfacet) {
    -    qh_removefacet(bestfacet);
    -    qh_prependfacet(bestfacet, &qh facet_next);
    -    trace1((qh ferr, 1029, "qh_furthestnext: made f%d next facet(dist %.2g)\n",
    -            bestfacet->id, bestdist));
    -  }
    -} /* furthestnext */
    -
    -/*---------------------------------
    -
    -  qh_furthestout( facet )
    -    make furthest outside point the last point of outsideset
    -
    -  returns:
    -    updates facet->outsideset
    -    clears facet->notfurthest
    -    sets facet->furthestdist
    -
    -  design:
    -    determine best point of outsideset
    -    make it the last point of outsideset
    -*/
    -void qh_furthestout(facetT *facet) {
    -  pointT *point, **pointp, *bestpoint= NULL;
    -  realT dist, bestdist= -REALmax;
    -
    -  FOREACHpoint_(facet->outsideset) {
    -    qh_distplane(point, facet, &dist);
    -    zinc_(Zcomputefurthest);
    -    if (dist > bestdist) {
    -      bestpoint= point;
    -      bestdist= dist;
    -    }
    -  }
    -  if (bestpoint) {
    -    qh_setdel(facet->outsideset, point);
    -    qh_setappend(&facet->outsideset, point);
    -#if !qh_COMPUTEfurthest
    -    facet->furthestdist= bestdist;
    -#endif
    -  }
    -  facet->notfurthest= False;
    -  trace3((qh ferr, 3017, "qh_furthestout: p%d is furthest outside point of f%d\n",
    -          qh_pointid(point), facet->id));
    -} /* furthestout */
    -
    -
    -/*---------------------------------
    -
    -  qh_infiniteloop( facet )
    -    report infinite loop error due to facet
    -*/
    -void qh_infiniteloop(facetT *facet) {
    -
    -  qh_fprintf(qh ferr, 6149, "qhull internal error (qh_infiniteloop): potential infinite loop detected\n");
    -  qh_errexit(qh_ERRqhull, facet, NULL);
    -} /* qh_infiniteloop */
    -
    -/*---------------------------------
    -
    -  qh_initbuild()
    -    initialize hull and outside sets with point array
    -    qh.FIRSTpoint/qh.NUMpoints is point array
    -    if qh.GOODpoint
    -      adds qh.GOODpoint to initial hull
    -
    -  returns:
    -    qh_facetlist with initial hull
    -    points partioned into outside sets, coplanar sets, or inside
    -    initializes qh.GOODpointp, qh.GOODvertexp,
    -
    -  design:
    -    initialize global variables used during qh_buildhull
    -    determine precision constants and points with max/min coordinate values
    -      if qh.SCALElast, scale last coordinate(for 'd')
    -    build initial simplex
    -    partition input points into facets of initial simplex
    -    set up lists
    -    if qh.ONLYgood
    -      check consistency
    -      add qh.GOODvertex if defined
    -*/
    -void qh_initbuild( void) {
    -  setT *maxpoints, *vertices;
    -  facetT *facet;
    -  int i, numpart;
    -  realT dist;
    -  boolT isoutside;
    -
    -  qh furthest_id= -1;
    -  qh lastreport= 0;
    -  qh facet_id= qh vertex_id= qh ridge_id= 0;
    -  qh visit_id= qh vertex_visit= 0;
    -  qh maxoutdone= False;
    -
    -  if (qh GOODpoint > 0)
    -    qh GOODpointp= qh_point(qh GOODpoint-1);
    -  else if (qh GOODpoint < 0)
    -    qh GOODpointp= qh_point(-qh GOODpoint-1);
    -  if (qh GOODvertex > 0)
    -    qh GOODvertexp= qh_point(qh GOODvertex-1);
    -  else if (qh GOODvertex < 0)
    -    qh GOODvertexp= qh_point(-qh GOODvertex-1);
    -  if ((qh GOODpoint
    -       && (qh GOODpointp < qh first_point  /* also catches !GOODpointp */
    -           || qh GOODpointp > qh_point(qh num_points-1)))
    -    || (qh GOODvertex
    -        && (qh GOODvertexp < qh first_point  /* also catches !GOODvertexp */
    -            || qh GOODvertexp > qh_point(qh num_points-1)))) {
    -    qh_fprintf(qh ferr, 6150, "qhull input error: either QGn or QVn point is > p%d\n",
    -             qh num_points-1);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  maxpoints= qh_maxmin(qh first_point, qh num_points, qh hull_dim);
    -  if (qh SCALElast)
    -    qh_scalelast(qh first_point, qh num_points, qh hull_dim,
    -               qh MINlastcoord, qh MAXlastcoord, qh MAXwidth);
    -  qh_detroundoff();
    -  if (qh DELAUNAY && qh upper_threshold[qh hull_dim-1] > REALmax/2
    -                  && qh lower_threshold[qh hull_dim-1] < -REALmax/2) {
    -    for (i=qh_PRINTEND; i--; ) {
    -      if (qh PRINTout[i] == qh_PRINTgeom && qh DROPdim < 0
    -          && !qh GOODthreshold && !qh SPLITthresholds)
    -        break;  /* in this case, don't set upper_threshold */
    -    }
    -    if (i < 0) {
    -      if (qh UPPERdelaunay) { /* matches qh.upperdelaunay in qh_setfacetplane */
    -        qh lower_threshold[qh hull_dim-1]= qh ANGLEround * qh_ZEROdelaunay;
    -        qh GOODthreshold= True;
    -      }else {
    -        qh upper_threshold[qh hull_dim-1]= -qh ANGLEround * qh_ZEROdelaunay;
    -        if (!qh GOODthreshold)
    -          qh SPLITthresholds= True; /* build upper-convex hull even if Qg */
    -          /* qh_initqhull_globals errors if Qg without Pdk/etc. */
    -      }
    -    }
    -  }
    -  vertices= qh_initialvertices(qh hull_dim, maxpoints, qh first_point, qh num_points);
    -  qh_initialhull(vertices);  /* initial qh facet_list */
    -  qh_partitionall(vertices, qh first_point, qh num_points);
    -  if (qh PRINToptions1st || qh TRACElevel || qh IStracing) {
    -    if (qh TRACElevel || qh IStracing)
    -      qh_fprintf(qh ferr, 8103, "\nTrace level %d for %s | %s\n",
    -         qh IStracing ? qh IStracing : qh TRACElevel, qh rbox_command, qh qhull_command);
    -    qh_fprintf(qh ferr, 8104, "Options selected for Qhull %s:\n%s\n", qh_version, qh qhull_options);
    -  }
    -  qh_resetlists(False, qh_RESETvisible /*qh visible_list newvertex_list newfacet_list */);
    -  qh facet_next= qh facet_list;
    -  qh_furthestnext(/* qh facet_list */);
    -  if (qh PREmerge) {
    -    qh cos_max= qh premerge_cos;
    -    qh centrum_radius= qh premerge_centrum;
    -  }
    -  if (qh ONLYgood) {
    -    if (qh GOODvertex > 0 && qh MERGING) {
    -      qh_fprintf(qh ferr, 6151, "qhull input error: 'Qg QVn' (only good vertex) does not work with merging.\nUse 'QJ' to joggle the input or 'Q0' to turn off merging.\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    if (!(qh GOODthreshold || qh GOODpoint
    -         || (!qh MERGEexact && !qh PREmerge && qh GOODvertexp))) {
    -      qh_fprintf(qh ferr, 6152, "qhull input error: 'Qg' (ONLYgood) needs a good threshold('Pd0D0'), a\n\
    -good point(QGn or QG-n), or a good vertex with 'QJ' or 'Q0' (QVn).\n");
    -      qh_errexit(qh_ERRinput, NULL, NULL);
    -    }
    -    if (qh GOODvertex > 0  && !qh MERGING  /* matches qh_partitionall */
    -        && !qh_isvertex(qh GOODvertexp, vertices)) {
    -      facet= qh_findbestnew(qh GOODvertexp, qh facet_list,
    -                          &dist, !qh_ALL, &isoutside, &numpart);
    -      zadd_(Zdistgood, numpart);
    -      if (!isoutside) {
    -        qh_fprintf(qh ferr, 6153, "qhull input error: point for QV%d is inside initial simplex.  It can not be made a vertex.\n",
    -               qh_pointid(qh GOODvertexp));
    -        qh_errexit(qh_ERRinput, NULL, NULL);
    -      }
    -      if (!qh_addpoint(qh GOODvertexp, facet, False)) {
    -        qh_settempfree(&vertices);
    -        qh_settempfree(&maxpoints);
    -        return;
    -      }
    -    }
    -    qh_findgood(qh facet_list, 0);
    -  }
    -  qh_settempfree(&vertices);
    -  qh_settempfree(&maxpoints);
    -  trace1((qh ferr, 1030, "qh_initbuild: initial hull created and points partitioned\n"));
    -} /* initbuild */
    -
    -/*---------------------------------
    -
    -  qh_initialhull( vertices )
    -    constructs the initial hull as a DIM3 simplex of vertices
    -
    -  design:
    -    creates a simplex (initializes lists)
    -    determines orientation of simplex
    -    sets hyperplanes for facets
    -    doubles checks orientation (in case of axis-parallel facets with Gaussian elimination)
    -    checks for flipped facets and qh.NARROWhull
    -    checks the result
    -*/
    -void qh_initialhull(setT *vertices) {
    -  facetT *facet, *firstfacet, *neighbor, **neighborp;
    -  realT dist, angle, minangle= REALmax;
    -#ifndef qh_NOtrace
    -  int k;
    -#endif
    -
    -  qh_createsimplex(vertices);  /* qh facet_list */
    -  qh_resetlists(False, qh_RESETvisible);
    -  qh facet_next= qh facet_list;      /* advance facet when processed */
    -  qh interior_point= qh_getcenter(vertices);
    -  firstfacet= qh facet_list;
    -  qh_setfacetplane(firstfacet);
    -  zinc_(Znumvisibility); /* needs to be in printsummary */
    -  qh_distplane(qh interior_point, firstfacet, &dist);
    -  if (dist > 0) {
    -    FORALLfacets
    -      facet->toporient ^= (unsigned char)True;
    -  }
    -  FORALLfacets
    -    qh_setfacetplane(facet);
    -  FORALLfacets {
    -    if (!qh_checkflipped(facet, NULL, qh_ALL)) {/* due to axis-parallel facet */
    -      trace1((qh ferr, 1031, "qh_initialhull: initial orientation incorrect.  Correct all facets\n"));
    -      facet->flipped= False;
    -      FORALLfacets {
    -        facet->toporient ^= (unsigned char)True;
    -        qh_orientoutside(facet);
    -      }
    -      break;
    -    }
    -  }
    -  FORALLfacets {
    -    if (!qh_checkflipped(facet, NULL, !qh_ALL)) {  /* can happen with 'R0.1' */
    -      qh_precision("initial facet is coplanar with interior point");
    -      qh_fprintf(qh ferr, 6154, "qhull precision error: initial facet %d is coplanar with the interior point\n",
    -                   facet->id);
    -      qh_errexit(qh_ERRsingular, facet, NULL);
    -    }
    -    FOREACHneighbor_(facet) {
    -      angle= qh_getangle(facet->normal, neighbor->normal);
    -      minimize_( minangle, angle);
    -    }
    -  }
    -  if (minangle < qh_MAXnarrow && !qh NOnarrow) {
    -    realT diff= 1.0 + minangle;
    -
    -    qh NARROWhull= True;
    -    qh_option("_narrow-hull", NULL, &diff);
    -    if (minangle < qh_WARNnarrow && !qh RERUN && qh PRINTprecision)
    -      qh_printhelp_narrowhull(qh ferr, minangle);
    -  }
    -  zzval_(Zprocessed)= qh hull_dim+1;
    -  qh_checkpolygon(qh facet_list);
    -  qh_checkconvex(qh facet_list,   qh_DATAfault);
    -#ifndef qh_NOtrace
    -  if (qh IStracing >= 1) {
    -    qh_fprintf(qh ferr, 8105, "qh_initialhull: simplex constructed, interior point:");
    -    for (k=0; k < qh hull_dim; k++)
    -      qh_fprintf(qh ferr, 8106, " %6.4g", qh interior_point[k]);
    -    qh_fprintf(qh ferr, 8107, "\n");
    -  }
    -#endif
    -} /* initialhull */
    -
    -/*---------------------------------
    -
    -  qh_initialvertices( dim, maxpoints, points, numpoints )
    -    determines a non-singular set of initial vertices
    -    maxpoints may include duplicate points
    -
    -  returns:
    -    temporary set of dim+1 vertices in descending order by vertex id
    -    if qh.RANDOMoutside && !qh.ALLpoints
    -      picks random points
    -    if dim >= qh_INITIALmax,
    -      uses min/max x and max points with non-zero determinants
    -
    -  notes:
    -    unless qh.ALLpoints,
    -      uses maxpoints as long as determinate is non-zero
    -*/
    -setT *qh_initialvertices(int dim, setT *maxpoints, pointT *points, int numpoints) {
    -  pointT *point, **pointp;
    -  setT *vertices, *simplex, *tested;
    -  realT randr;
    -  int idx, point_i, point_n, k;
    -  boolT nearzero= False;
    -
    -  vertices= qh_settemp(dim + 1);
    -  simplex= qh_settemp(dim+1);
    -  if (qh ALLpoints)
    -    qh_maxsimplex(dim, NULL, points, numpoints, &simplex);
    -  else if (qh RANDOMoutside) {
    -    while (qh_setsize(simplex) != dim+1) {
    -      randr= qh_RANDOMint;
    -      randr= randr/(qh_RANDOMmax+1);
    -      idx= (int)floor(qh num_points * randr);
    -      while (qh_setin(simplex, qh_point(idx))) {
    -            idx++; /* in case qh_RANDOMint always returns the same value */
    -        idx= idx < qh num_points ? idx : 0;
    -      }
    -      qh_setappend(&simplex, qh_point(idx));
    -    }
    -  }else if (qh hull_dim >= qh_INITIALmax) {
    -    tested= qh_settemp(dim+1);
    -    qh_setappend(&simplex, SETfirst_(maxpoints));   /* max and min X coord */
    -    qh_setappend(&simplex, SETsecond_(maxpoints));
    -    qh_maxsimplex(fmin_(qh_INITIALsearch, dim), maxpoints, points, numpoints, &simplex);
    -    k= qh_setsize(simplex);
    -    FOREACHpoint_i_(maxpoints) {
    -      if (point_i & 0x1) {     /* first pick up max. coord. points */
    -        if (!qh_setin(simplex, point) && !qh_setin(tested, point)){
    -          qh_detsimplex(point, simplex, k, &nearzero);
    -          if (nearzero)
    -            qh_setappend(&tested, point);
    -          else {
    -            qh_setappend(&simplex, point);
    -            if (++k == dim)  /* use search for last point */
    -              break;
    -          }
    -        }
    -      }
    -    }
    -    while (k != dim && (point= (pointT*)qh_setdellast(maxpoints))) {
    -      if (!qh_setin(simplex, point) && !qh_setin(tested, point)){
    -        qh_detsimplex(point, simplex, k, &nearzero);
    -        if (nearzero)
    -          qh_setappend(&tested, point);
    -        else {
    -          qh_setappend(&simplex, point);
    -          k++;
    -        }
    -      }
    -    }
    -    idx= 0;
    -    while (k != dim && (point= qh_point(idx++))) {
    -      if (!qh_setin(simplex, point) && !qh_setin(tested, point)){
    -        qh_detsimplex(point, simplex, k, &nearzero);
    -        if (!nearzero){
    -          qh_setappend(&simplex, point);
    -          k++;
    -        }
    -      }
    -    }
    -    qh_settempfree(&tested);
    -    qh_maxsimplex(dim, maxpoints, points, numpoints, &simplex);
    -  }else
    -    qh_maxsimplex(dim, maxpoints, points, numpoints, &simplex);
    -  FOREACHpoint_(simplex)
    -    qh_setaddnth(&vertices, 0, qh_newvertex(point)); /* descending order */
    -  qh_settempfree(&simplex);
    -  return vertices;
    -} /* initialvertices */
    -
    -
    -/*---------------------------------
    -
    -  qh_isvertex(  )
    -    returns vertex if point is in vertex set, else returns NULL
    -
    -  notes:
    -    for qh.GOODvertex
    -*/
    -vertexT *qh_isvertex(pointT *point, setT *vertices) {
    -  vertexT *vertex, **vertexp;
    -
    -  FOREACHvertex_(vertices) {
    -    if (vertex->point == point)
    -      return vertex;
    -  }
    -  return NULL;
    -} /* isvertex */
    -
    -/*---------------------------------
    -
    -  qh_makenewfacets( point )
    -    make new facets from point and qh.visible_list
    -
    -  returns:
    -    qh.newfacet_list= list of new facets with hyperplanes and ->newfacet
    -    qh.newvertex_list= list of vertices in new facets with ->newlist set
    -
    -    if (qh.ONLYgood)
    -      newfacets reference horizon facets, but not vice versa
    -      ridges reference non-simplicial horizon ridges, but not vice versa
    -      does not change existing facets
    -    else
    -      sets qh.NEWfacets
    -      new facets attached to horizon facets and ridges
    -      for visible facets,
    -        visible->r.replace is corresponding new facet
    -
    -  see also:
    -    qh_makenewplanes() -- make hyperplanes for facets
    -    qh_attachnewfacets() -- attachnewfacets if not done here(qh ONLYgood)
    -    qh_matchnewfacets() -- match up neighbors
    -    qh_updatevertices() -- update vertex neighbors and delvertices
    -    qh_deletevisible() -- delete visible facets
    -    qh_checkpolygon() --check the result
    -    qh_triangulate() -- triangulate a non-simplicial facet
    -
    -  design:
    -    for each visible facet
    -      make new facets to its horizon facets
    -      update its f.replace
    -      clear its neighbor set
    -*/
    -vertexT *qh_makenewfacets(pointT *point /*visible_list*/) {
    -  facetT *visible, *newfacet= NULL, *newfacet2= NULL, *neighbor, **neighborp;
    -  vertexT *apex;
    -  int numnew=0;
    -
    -  qh newfacet_list= qh facet_tail;
    -  qh newvertex_list= qh vertex_tail;
    -  apex= qh_newvertex(point);
    -  qh_appendvertex(apex);
    -  qh visit_id++;
    -  if (!qh ONLYgood)
    -    qh NEWfacets= True;
    -  FORALLvisible_facets {
    -    FOREACHneighbor_(visible)
    -      neighbor->seen= False;
    -    if (visible->ridges) {
    -      visible->visitid= qh visit_id;
    -      newfacet2= qh_makenew_nonsimplicial(visible, apex, &numnew);
    -    }
    -    if (visible->simplicial)
    -      newfacet= qh_makenew_simplicial(visible, apex, &numnew);
    -    if (!qh ONLYgood) {
    -      if (newfacet2)  /* newfacet is null if all ridges defined */
    -        newfacet= newfacet2;
    -      if (newfacet)
    -        visible->f.replace= newfacet;
    -      else
    -        zinc_(Zinsidevisible);
    -      SETfirst_(visible->neighbors)= NULL;
    -    }
    -  }
    -  trace1((qh ferr, 1032, "qh_makenewfacets: created %d new facets from point p%d to horizon\n",
    -          numnew, qh_pointid(point)));
    -  if (qh IStracing >= 4)
    -    qh_printfacetlist(qh newfacet_list, NULL, qh_ALL);
    -  return apex;
    -} /* makenewfacets */
    -
    -/*---------------------------------
    -
    -  qh_matchduplicates( atfacet, atskip, hashsize, hashcount )
    -    match duplicate ridges in qh.hash_table for atfacet/atskip
    -    duplicates marked with ->dupridge and qh_DUPLICATEridge
    -
    -  returns:
    -    picks match with worst merge (min distance apart)
    -    updates hashcount
    -
    -  see also:
    -    qh_matchneighbor
    -
    -  notes:
    -
    -  design:
    -    compute hash value for atfacet and atskip
    -    repeat twice -- once to make best matches, once to match the rest
    -      for each possible facet in qh.hash_table
    -        if it is a matching facet and pass 2
    -          make match
    -          unless tricoplanar, mark match for merging (qh_MERGEridge)
    -          [e.g., tricoplanar RBOX s 1000 t993602376 | QHULL C-1e-3 d Qbb FA Qt]
    -        if it is a matching facet and pass 1
    -          test if this is a better match
    -      if pass 1,
    -        make best match (it will not be merged)
    -*/
    -#ifndef qh_NOmerge
    -void qh_matchduplicates(facetT *atfacet, int atskip, int hashsize, int *hashcount) {
    -  boolT same, ismatch;
    -  int hash, scan;
    -  facetT *facet, *newfacet, *maxmatch= NULL, *maxmatch2= NULL, *nextfacet;
    -  int skip, newskip, nextskip= 0, maxskip= 0, maxskip2= 0, makematch;
    -  realT maxdist= -REALmax, mindist, dist2, low, high;
    -
    -  hash= qh_gethash(hashsize, atfacet->vertices, qh hull_dim, 1,
    -                     SETelem_(atfacet->vertices, atskip));
    -  trace2((qh ferr, 2046, "qh_matchduplicates: find duplicate matches for f%d skip %d hash %d hashcount %d\n",
    -          atfacet->id, atskip, hash, *hashcount));
    -  for (makematch= 0; makematch < 2; makematch++) {
    -    qh visit_id++;
    -    for (newfacet= atfacet, newskip= atskip; newfacet; newfacet= nextfacet, newskip= nextskip) {
    -      zinc_(Zhashlookup);
    -      nextfacet= NULL;
    -      newfacet->visitid= qh visit_id;
    -      for (scan= hash; (facet= SETelemt_(qh hash_table, scan, facetT));
    -           scan= (++scan >= hashsize ? 0 : scan)) {
    -        if (!facet->dupridge || facet->visitid == qh visit_id)
    -          continue;
    -        zinc_(Zhashtests);
    -        if (qh_matchvertices(1, newfacet->vertices, newskip, facet->vertices, &skip, &same)) {
    -          ismatch= (same == (boolT)(newfacet->toporient ^ facet->toporient));
    -          if (SETelemt_(facet->neighbors, skip, facetT) != qh_DUPLICATEridge) {
    -            if (!makematch) {
    -              qh_fprintf(qh ferr, 6155, "qhull internal error (qh_matchduplicates): missing dupridge at f%d skip %d for new f%d skip %d hash %d\n",
    -                     facet->id, skip, newfacet->id, newskip, hash);
    -              qh_errexit2 (qh_ERRqhull, facet, newfacet);
    -            }
    -          }else if (ismatch && makematch) {
    -            if (SETelemt_(newfacet->neighbors, newskip, facetT) == qh_DUPLICATEridge) {
    -              SETelem_(facet->neighbors, skip)= newfacet;
    -              if (newfacet->tricoplanar)
    -                SETelem_(newfacet->neighbors, newskip)= facet;
    -              else
    -                SETelem_(newfacet->neighbors, newskip)= qh_MERGEridge;
    -              *hashcount -= 2; /* removed two unmatched facets */
    -              trace4((qh ferr, 4059, "qh_matchduplicates: duplicate f%d skip %d matched with new f%d skip %d merge\n",
    -                    facet->id, skip, newfacet->id, newskip));
    -            }
    -          }else if (ismatch) {
    -            mindist= qh_getdistance(facet, newfacet, &low, &high);
    -            dist2= qh_getdistance(newfacet, facet, &low, &high);
    -            minimize_(mindist, dist2);
    -            if (mindist > maxdist) {
    -              maxdist= mindist;
    -              maxmatch= facet;
    -              maxskip= skip;
    -              maxmatch2= newfacet;
    -              maxskip2= newskip;
    -            }
    -            trace3((qh ferr, 3018, "qh_matchduplicates: duplicate f%d skip %d new f%d skip %d at dist %2.2g, max is now f%d f%d\n",
    -                    facet->id, skip, newfacet->id, newskip, mindist,
    -                    maxmatch->id, maxmatch2->id));
    -          }else { /* !ismatch */
    -            nextfacet= facet;
    -            nextskip= skip;
    -          }
    -        }
    -        if (makematch && !facet
    -        && SETelemt_(facet->neighbors, skip, facetT) == qh_DUPLICATEridge) {
    -          qh_fprintf(qh ferr, 6156, "qhull internal error (qh_matchduplicates): no MERGEridge match for duplicate f%d skip %d at hash %d\n",
    -                     newfacet->id, newskip, hash);
    -          qh_errexit(qh_ERRqhull, newfacet, NULL);
    -        }
    -      }
    -    } /* end of for each new facet at hash */
    -    if (!makematch) {
    -      if (!maxmatch) {
    -        qh_fprintf(qh ferr, 6157, "qhull internal error (qh_matchduplicates): no maximum match at duplicate f%d skip %d at hash %d\n",
    -                     atfacet->id, atskip, hash);
    -        qh_errexit(qh_ERRqhull, atfacet, NULL);
    -      }
    -      SETelem_(maxmatch->neighbors, maxskip)= maxmatch2;
    -      SETelem_(maxmatch2->neighbors, maxskip2)= maxmatch;
    -      *hashcount -= 2; /* removed two unmatched facets */
    -      zzinc_(Zmultiridge);
    -      trace0((qh ferr, 25, "qh_matchduplicates: duplicate f%d skip %d matched with new f%d skip %d keep\n",
    -              maxmatch->id, maxskip, maxmatch2->id, maxskip2));
    -      qh_precision("ridge with multiple neighbors");
    -      if (qh IStracing >= 4)
    -        qh_errprint("DUPLICATED/MATCH", maxmatch, maxmatch2, NULL, NULL);
    -    }
    -  }
    -} /* matchduplicates */
    -
    -/*---------------------------------
    -
    -  qh_nearcoplanar()
    -    for all facets, remove near-inside points from facet->coplanarset
    -    coplanar points defined by innerplane from qh_outerinner()
    -
    -  returns:
    -    if qh KEEPcoplanar && !qh KEEPinside
    -      facet->coplanarset only contains coplanar points
    -    if qh.JOGGLEmax
    -      drops inner plane by another qh.JOGGLEmax diagonal since a
    -        vertex could shift out while a coplanar point shifts in
    -
    -  notes:
    -    used for qh.PREmerge and qh.JOGGLEmax
    -    must agree with computation of qh.NEARcoplanar in qh_detroundoff()
    -  design:
    -    if not keeping coplanar or inside points
    -      free all coplanar sets
    -    else if not keeping both coplanar and inside points
    -      remove !coplanar or !inside points from coplanar sets
    -*/
    -void qh_nearcoplanar(void /* qh.facet_list */) {
    -  facetT *facet;
    -  pointT *point, **pointp;
    -  int numpart;
    -  realT dist, innerplane;
    -
    -  if (!qh KEEPcoplanar && !qh KEEPinside) {
    -    FORALLfacets {
    -      if (facet->coplanarset)
    -        qh_setfree( &facet->coplanarset);
    -    }
    -  }else if (!qh KEEPcoplanar || !qh KEEPinside) {
    -    qh_outerinner(NULL, NULL, &innerplane);
    -    if (qh JOGGLEmax < REALmax/2)
    -      innerplane -= qh JOGGLEmax * sqrt((realT)qh hull_dim);
    -    numpart= 0;
    -    FORALLfacets {
    -      if (facet->coplanarset) {
    -        FOREACHpoint_(facet->coplanarset) {
    -          numpart++;
    -          qh_distplane(point, facet, &dist);
    -          if (dist < innerplane) {
    -            if (!qh KEEPinside)
    -              SETref_(point)= NULL;
    -          }else if (!qh KEEPcoplanar)
    -            SETref_(point)= NULL;
    -        }
    -        qh_setcompact(facet->coplanarset);
    -      }
    -    }
    -    zzadd_(Zcheckpart, numpart);
    -  }
    -} /* nearcoplanar */
    -
    -/*---------------------------------
    -
    -  qh_nearvertex( facet, point, bestdist )
    -    return nearest vertex in facet to point
    -
    -  returns:
    -    vertex and its distance
    -
    -  notes:
    -    if qh.DELAUNAY
    -      distance is measured in the input set
    -    searches neighboring tricoplanar facets (requires vertexneighbors)
    -      Slow implementation.  Recomputes vertex set for each point.
    -    The vertex set could be stored in the qh.keepcentrum facet.
    -*/
    -vertexT *qh_nearvertex(facetT *facet, pointT *point, realT *bestdistp) {
    -  realT bestdist= REALmax, dist;
    -  vertexT *bestvertex= NULL, *vertex, **vertexp, *apex;
    -  coordT *center;
    -  facetT *neighbor, **neighborp;
    -  setT *vertices;
    -  int dim= qh hull_dim;
    -
    -  if (qh DELAUNAY)
    -    dim--;
    -  if (facet->tricoplanar) {
    -    if (!qh VERTEXneighbors || !facet->center) {
    -      qh_fprintf(qh ferr, 6158, "qhull internal error (qh_nearvertex): qh.VERTEXneighbors and facet->center required for tricoplanar facets\n");
    -      qh_errexit(qh_ERRqhull, facet, NULL);
    -    }
    -    vertices= qh_settemp(qh TEMPsize);
    -    apex= SETfirstt_(facet->vertices, vertexT);
    -    center= facet->center;
    -    FOREACHneighbor_(apex) {
    -      if (neighbor->center == center) {
    -        FOREACHvertex_(neighbor->vertices)
    -          qh_setappend(&vertices, vertex);
    -      }
    -    }
    -  }else
    -    vertices= facet->vertices;
    -  FOREACHvertex_(vertices) {
    -    dist= qh_pointdist(vertex->point, point, -dim);
    -    if (dist < bestdist) {
    -      bestdist= dist;
    -      bestvertex= vertex;
    -    }
    -  }
    -  if (facet->tricoplanar)
    -    qh_settempfree(&vertices);
    -  *bestdistp= sqrt(bestdist);
    -  trace3((qh ferr, 3019, "qh_nearvertex: v%d dist %2.2g for f%d p%d\n",
    -        bestvertex->id, *bestdistp, facet->id, qh_pointid(point)));
    -  return bestvertex;
    -} /* nearvertex */
    -
    -/*---------------------------------
    -
    -  qh_newhashtable( newsize )
    -    returns size of qh.hash_table of at least newsize slots
    -
    -  notes:
    -    assumes qh.hash_table is NULL
    -    qh_HASHfactor determines the number of extra slots
    -    size is not divisible by 2, 3, or 5
    -*/
    -int qh_newhashtable(int newsize) {
    -  int size;
    -
    -  size= ((newsize+1)*qh_HASHfactor) | 0x1;  /* odd number */
    -  while (True) {
    -    if ((size%3) && (size%5))
    -      break;
    -    size += 2;
    -    /* loop terminates because there is an infinite number of primes */
    -  }
    -  qh hash_table= qh_setnew(size);
    -  qh_setzero(qh hash_table, 0, size);
    -  return size;
    -} /* newhashtable */
    -
    -/*---------------------------------
    -
    -  qh_newvertex( point )
    -    returns a new vertex for point
    -*/
    -vertexT *qh_newvertex(pointT *point) {
    -  vertexT *vertex;
    -
    -  zinc_(Ztotvertices);
    -  vertex= (vertexT *)qh_memalloc((int)sizeof(vertexT));
    -  memset((char *) vertex, (size_t)0, sizeof(vertexT));
    -  if (qh vertex_id == 0xFFFFFF) {
    -    qh_fprintf(qh ferr, 6159, "qhull input error: more than %d vertices.  ID field overflows and two vertices\n\
    -may have the same identifier.  Vertices not sorted correctly.\n", 0xFFFFFF);
    -    qh_errexit(qh_ERRinput, NULL, NULL);
    -  }
    -  if (qh vertex_id == qh tracevertex_id)
    -    qh tracevertex= vertex;
    -  vertex->id= qh vertex_id++;
    -  vertex->point= point;
    -  vertex->dim= (unsigned char)(qh hull_dim <= MAX_vdim ? qh hull_dim : 0);
    -  trace4((qh ferr, 4060, "qh_newvertex: vertex p%d(v%d) created\n", qh_pointid(vertex->point),
    -          vertex->id));
    -  return(vertex);
    -} /* newvertex */
    -
    -/*---------------------------------
    -
    -  qh_nextridge3d( atridge, facet, vertex )
    -    return next ridge and vertex for a 3d facet
    -    returns NULL on error
    -    [for QhullFacet::nextRidge3d] Does not call qh_errexit nor access qh_qh.
    -
    -  notes:
    -    in qh_ORIENTclock order
    -    this is a O(n^2) implementation to trace all ridges
    -    be sure to stop on any 2nd visit
    -    same as QhullRidge::nextRidge3d
    -    does not use qh_qh or qh_errexit [QhullFacet.cpp]
    -
    -  design:
    -    for each ridge
    -      exit if it is the ridge after atridge
    -*/
    -ridgeT *qh_nextridge3d(ridgeT *atridge, facetT *facet, vertexT **vertexp) {
    -  vertexT *atvertex, *vertex, *othervertex;
    -  ridgeT *ridge, **ridgep;
    -
    -  if ((atridge->top == facet) ^ qh_ORIENTclock)
    -    atvertex= SETsecondt_(atridge->vertices, vertexT);
    -  else
    -    atvertex= SETfirstt_(atridge->vertices, vertexT);
    -  FOREACHridge_(facet->ridges) {
    -    if (ridge == atridge)
    -      continue;
    -    if ((ridge->top == facet) ^ qh_ORIENTclock) {
    -      othervertex= SETsecondt_(ridge->vertices, vertexT);
    -      vertex= SETfirstt_(ridge->vertices, vertexT);
    -    }else {
    -      vertex= SETsecondt_(ridge->vertices, vertexT);
    -      othervertex= SETfirstt_(ridge->vertices, vertexT);
    -    }
    -    if (vertex == atvertex) {
    -      if (vertexp)
    -        *vertexp= othervertex;
    -      return ridge;
    -    }
    -  }
    -  return NULL;
    -} /* nextridge3d */
    -#else /* qh_NOmerge */
    -void qh_matchduplicates(facetT *atfacet, int atskip, int hashsize, int *hashcount) {
    -}
    -ridgeT *qh_nextridge3d(ridgeT *atridge, facetT *facet, vertexT **vertexp) {
    -
    -  return NULL;
    -}
    -#endif /* qh_NOmerge */
    -
    -/*---------------------------------
    -
    -  qh_outcoplanar()
    -    move points from all facets' outsidesets to their coplanarsets
    -
    -  notes:
    -    for post-processing under qh.NARROWhull
    -
    -  design:
    -    for each facet
    -      for each outside point for facet
    -        partition point into coplanar set
    -*/
    -void qh_outcoplanar(void /* facet_list */) {
    -  pointT *point, **pointp;
    -  facetT *facet;
    -  realT dist;
    -
    -  trace1((qh ferr, 1033, "qh_outcoplanar: move outsideset to coplanarset for qh NARROWhull\n"));
    -  FORALLfacets {
    -    FOREACHpoint_(facet->outsideset) {
    -      qh num_outside--;
    -      if (qh KEEPcoplanar || qh KEEPnearinside) {
    -        qh_distplane(point, facet, &dist);
    -        zinc_(Zpartition);
    -        qh_partitioncoplanar(point, facet, &dist);
    -      }
    -    }
    -    qh_setfree(&facet->outsideset);
    -  }
    -} /* outcoplanar */
    -
    -/*---------------------------------
    -
    -  qh_point( id )
    -    return point for a point id, or NULL if unknown
    -
    -  alternative code:
    -    return((pointT *)((unsigned   long)qh.first_point
    -           + (unsigned long)((id)*qh.normal_size)));
    -*/
    -pointT *qh_point(int id) {
    -
    -  if (id < 0)
    -    return NULL;
    -  if (id < qh num_points)
    -    return qh first_point + id * qh hull_dim;
    -  id -= qh num_points;
    -  if (id < qh_setsize(qh other_points))
    -    return SETelemt_(qh other_points, id, pointT);
    -  return NULL;
    -} /* point */
    -
    -/*---------------------------------
    -
    -  qh_point_add( set, point, elem )
    -    stores elem at set[point.id]
    -
    -  returns:
    -    access function for qh_pointfacet and qh_pointvertex
    -
    -  notes:
    -    checks point.id
    -*/
    -void qh_point_add(setT *set, pointT *point, void *elem) {
    -  int id, size;
    -
    -  SETreturnsize_(set, size);
    -  if ((id= qh_pointid(point)) < 0)
    -    qh_fprintf(qh ferr, 7067, "qhull internal warning (point_add): unknown point %p id %d\n",
    -      point, id);
    -  else if (id >= size) {
    -    qh_fprintf(qh ferr, 6160, "qhull internal errror(point_add): point p%d is out of bounds(%d)\n",
    -             id, size);
    -    qh_errexit(qh_ERRqhull, NULL, NULL);
    -  }else
    -    SETelem_(set, id)= elem;
    -} /* point_add */
    -
    -
    -/*---------------------------------
    -
    -  qh_pointfacet()
    -    return temporary set of facet for each point
    -    the set is indexed by point id
    -
    -  notes:
    -    vertices assigned to one of the facets
    -    coplanarset assigned to the facet
    -    outside set assigned to the facet
    -    NULL if no facet for point (inside)
    -      includes qh.GOODpointp
    -
    -  access:
    -    FOREACHfacet_i_(facets) { ... }
    -    SETelem_(facets, i)
    -
    -  design:
    -    for each facet
    -      add each vertex
    -      add each coplanar point
    -      add each outside point
    -*/
    -setT *qh_pointfacet(void /*qh facet_list*/) {
    -  int numpoints= qh num_points + qh_setsize(qh other_points);
    -  setT *facets;
    -  facetT *facet;
    -  vertexT *vertex, **vertexp;
    -  pointT *point, **pointp;
    -
    -  facets= qh_settemp(numpoints);
    -  qh_setzero(facets, 0, numpoints);
    -  qh vertex_visit++;
    -  FORALLfacets {
    -    FOREACHvertex_(facet->vertices) {
    -      if (vertex->visitid != qh vertex_visit) {
    -        vertex->visitid= qh vertex_visit;
    -        qh_point_add(facets, vertex->point, facet);
    -      }
    -    }
    -    FOREACHpoint_(facet->coplanarset)
    -      qh_point_add(facets, point, facet);
    -    FOREACHpoint_(facet->outsideset)
    -      qh_point_add(facets, point, facet);
    -  }
    -  return facets;
    -} /* pointfacet */
    -
    -/*---------------------------------
    -
    -  qh_pointvertex(  )
    -    return temporary set of vertices indexed by point id
    -    entry is NULL if no vertex for a point
    -      this will include qh.GOODpointp
    -
    -  access:
    -    FOREACHvertex_i_(vertices) { ... }
    -    SETelem_(vertices, i)
    -*/
    -setT *qh_pointvertex(void /*qh facet_list*/) {
    -  int numpoints= qh num_points + qh_setsize(qh other_points);
    -  setT *vertices;
    -  vertexT *vertex;
    -
    -  vertices= qh_settemp(numpoints);
    -  qh_setzero(vertices, 0, numpoints);
    -  FORALLvertices
    -    qh_point_add(vertices, vertex->point, vertex);
    -  return vertices;
    -} /* pointvertex */
    -
    -
    -/*---------------------------------
    -
    -  qh_prependfacet( facet, facetlist )
    -    prepend facet to the start of a facetlist
    -
    -  returns:
    -    increments qh.numfacets
    -    updates facetlist, qh.facet_list, facet_next
    -
    -  notes:
    -    be careful of prepending since it can lose a pointer.
    -      e.g., can lose _next by deleting and then prepending before _next
    -*/
    -void qh_prependfacet(facetT *facet, facetT **facetlist) {
    -  facetT *prevfacet, *list;
    -
    -
    -  trace4((qh ferr, 4061, "qh_prependfacet: prepend f%d before f%d\n",
    -          facet->id, getid_(*facetlist)));
    -  if (!*facetlist)
    -    (*facetlist)= qh facet_tail;
    -  list= *facetlist;
    -  prevfacet= list->previous;
    -  facet->previous= prevfacet;
    -  if (prevfacet)
    -    prevfacet->next= facet;
    -  list->previous= facet;
    -  facet->next= *facetlist;
    -  if (qh facet_list == list)  /* this may change *facetlist */
    -    qh facet_list= facet;
    -  if (qh facet_next == list)
    -    qh facet_next= facet;
    -  *facetlist= facet;
    -  qh num_facets++;
    -} /* prependfacet */
    -
    -
    -/*---------------------------------
    -
    -  qh_printhashtable( fp )
    -    print hash table to fp
    -
    -  notes:
    -    not in I/O to avoid bringing io.c in
    -
    -  design:
    -    for each hash entry
    -      if defined
    -        if unmatched or will merge (NULL, qh_MERGEridge, qh_DUPLICATEridge)
    -          print entry and neighbors
    -*/
    -void qh_printhashtable(FILE *fp) {
    -  facetT *facet, *neighbor;
    -  int id, facet_i, facet_n, neighbor_i= 0, neighbor_n= 0;
    -  vertexT *vertex, **vertexp;
    -
    -  FOREACHfacet_i_(qh hash_table) {
    -    if (facet) {
    -      FOREACHneighbor_i_(facet) {
    -        if (!neighbor || neighbor == qh_MERGEridge || neighbor == qh_DUPLICATEridge)
    -          break;
    -      }
    -      if (neighbor_i == neighbor_n)
    -        continue;
    -      qh_fprintf(fp, 9283, "hash %d f%d ", facet_i, facet->id);
    -      FOREACHvertex_(facet->vertices)
    -        qh_fprintf(fp, 9284, "v%d ", vertex->id);
    -      qh_fprintf(fp, 9285, "\n neighbors:");
    -      FOREACHneighbor_i_(facet) {
    -        if (neighbor == qh_MERGEridge)
    -          id= -3;
    -        else if (neighbor == qh_DUPLICATEridge)
    -          id= -2;
    -        else
    -          id= getid_(neighbor);
    -        qh_fprintf(fp, 9286, " %d", id);
    -      }
    -      qh_fprintf(fp, 9287, "\n");
    -    }
    -  }
    -} /* printhashtable */
    -
    -
    -/*---------------------------------
    -
    -  qh_printlists( fp )
    -    print out facet and vertex list for debugging (without 'f/v' tags)
    -*/
    -void qh_printlists(void) {
    -  facetT *facet;
    -  vertexT *vertex;
    -  int count= 0;
    -
    -  qh_fprintf(qh ferr, 8108, "qh_printlists: facets:");
    -  FORALLfacets {
    -    if (++count % 100 == 0)
    -      qh_fprintf(qh ferr, 8109, "\n     ");
    -    qh_fprintf(qh ferr, 8110, " %d", facet->id);
    -  }
    -  qh_fprintf(qh ferr, 8111, "\n  new facets %d visible facets %d next facet for qh_addpoint %d\n  vertices(new %d):",
    -     getid_(qh newfacet_list), getid_(qh visible_list), getid_(qh facet_next),
    -     getid_(qh newvertex_list));
    -  count = 0;
    -  FORALLvertices {
    -    if (++count % 100 == 0)
    -      qh_fprintf(qh ferr, 8112, "\n     ");
    -    qh_fprintf(qh ferr, 8113, " %d", vertex->id);
    -  }
    -  qh_fprintf(qh ferr, 8114, "\n");
    -} /* printlists */
    -
    -/*---------------------------------
    -
    -  qh_resetlists( stats, qh_RESETvisible )
    -    reset newvertex_list, newfacet_list, visible_list
    -    if stats,
    -      maintains statistics
    -
    -  returns:
    -    visible_list is empty if qh_deletevisible was called
    -*/
    -void qh_resetlists(boolT stats, boolT resetVisible /*qh newvertex_list newfacet_list visible_list*/) {
    -  vertexT *vertex;
    -  facetT *newfacet, *visible;
    -  int totnew=0, totver=0;
    -
    -  if (stats) {
    -    FORALLvertex_(qh newvertex_list)
    -      totver++;
    -    FORALLnew_facets
    -      totnew++;
    -    zadd_(Zvisvertextot, totver);
    -    zmax_(Zvisvertexmax, totver);
    -    zadd_(Znewfacettot, totnew);
    -    zmax_(Znewfacetmax, totnew);
    -  }
    -  FORALLvertex_(qh newvertex_list)
    -    vertex->newlist= False;
    -  qh newvertex_list= NULL;
    -  FORALLnew_facets
    -    newfacet->newfacet= False;
    -  qh newfacet_list= NULL;
    -  if (resetVisible) {
    -    FORALLvisible_facets {
    -      visible->f.replace= NULL;
    -      visible->visible= False;
    -    }
    -    qh num_visible= 0;
    -  }
    -  qh visible_list= NULL; /* may still have visible facets via qh_triangulate */
    -  qh NEWfacets= False;
    -} /* resetlists */
    -
    -/*---------------------------------
    -
    -  qh_setvoronoi_all()
    -    compute Voronoi centers for all facets
    -    includes upperDelaunay facets if qh.UPPERdelaunay ('Qu')
    -
    -  returns:
    -    facet->center is the Voronoi center
    -
    -  notes:
    -    this is unused/untested code
    -      please email bradb@shore.net if this works ok for you
    -
    -  use:
    -    FORALLvertices {...} to locate the vertex for a point.
    -    FOREACHneighbor_(vertex) {...} to visit the Voronoi centers for a Voronoi cell.
    -*/
    -void qh_setvoronoi_all(void) {
    -  facetT *facet;
    -
    -  qh_clearcenters(qh_ASvoronoi);
    -  qh_vertexneighbors();
    -
    -  FORALLfacets {
    -    if (!facet->normal || !facet->upperdelaunay || qh UPPERdelaunay) {
    -      if (!facet->center)
    -        facet->center= qh_facetcenter(facet->vertices);
    -    }
    -  }
    -} /* setvoronoi_all */
    -
    -#ifndef qh_NOmerge
    -
    -/*---------------------------------
    -
    -  qh_triangulate()
    -    triangulate non-simplicial facets on qh.facet_list,
    -    if qh VORONOI, sets Voronoi centers of non-simplicial facets
    -    nop if hasTriangulation
    -
    -  returns:
    -    all facets simplicial
    -    each tricoplanar facet has ->f.triowner == owner of ->center,normal,etc.
    -
    -  notes:
    -    call after qh_check_output since may switch to Voronoi centers
    -    Output may overwrite ->f.triowner with ->f.area
    -*/
    -void qh_triangulate(void /*qh facet_list*/) {
    -  facetT *facet, *nextfacet, *owner;
    -  int onlygood= qh ONLYgood;
    -  facetT *neighbor, *visible= NULL, *facet1, *facet2, *new_facet_list= NULL;
    -  facetT *orig_neighbor= NULL, *otherfacet;
    -  vertexT *new_vertex_list= NULL;
    -  mergeT *merge;
    -  mergeType mergetype;
    -  int neighbor_i, neighbor_n;
    -
    -  if (qh hasTriangulation)
    -      return;
    -  trace1((qh ferr, 1034, "qh_triangulate: triangulate non-simplicial facets\n"));
    -  if (qh hull_dim == 2)
    -    return;
    -  if (qh VORONOI) {  /* otherwise lose Voronoi centers [could rebuild vertex set from tricoplanar] */
    -    qh_clearcenters(qh_ASvoronoi);
    -    qh_vertexneighbors();
    -  }
    -  qh ONLYgood= False; /* for makenew_nonsimplicial */
    -  qh visit_id++;
    -  qh NEWfacets= True;
    -  qh degen_mergeset= qh_settemp(qh TEMPsize);
    -  qh newvertex_list= qh vertex_tail;
    -  for (facet= qh facet_list; facet && facet->next; facet= nextfacet) { /* non-simplicial facets moved to end */
    -    nextfacet= facet->next;
    -    if (facet->visible || facet->simplicial)
    -      continue;
    -    /* triangulate all non-simplicial facets, otherwise merging does not work, e.g., RBOX c P-0.1 P+0.1 P+0.1 D3 | QHULL d Qt Tv */
    -    if (!new_facet_list)
    -      new_facet_list= facet;  /* will be moved to end */
    -    qh_triangulate_facet(facet, &new_vertex_list);
    -  }
    -  trace2((qh ferr, 2047, "qh_triangulate: delete null facets from f%d -- apex same as second vertex\n", getid_(new_facet_list)));
    -  for (facet= new_facet_list; facet && facet->next; facet= nextfacet) { /* null facets moved to end */
    -    nextfacet= facet->next;
    -    if (facet->visible)
    -      continue;
    -    if (facet->ridges) {
    -      if (qh_setsize(facet->ridges) > 0) {
    -        qh_fprintf(qh ferr, 6161, "qhull error (qh_triangulate): ridges still defined for f%d\n", facet->id);
    -        qh_errexit(qh_ERRqhull, facet, NULL);
    -      }
    -      qh_setfree(&facet->ridges);
    -    }
    -    if (SETfirst_(facet->vertices) == SETsecond_(facet->vertices)) {
    -      zinc_(Ztrinull);
    -      qh_triangulate_null(facet);
    -    }
    -  }
    -  trace2((qh ferr, 2048, "qh_triangulate: delete %d or more mirror facets -- same vertices and neighbors\n", qh_setsize(qh degen_mergeset)));
    -  qh visible_list= qh facet_tail;
    -  while ((merge= (mergeT*)qh_setdellast(qh degen_mergeset))) {
    -    facet1= merge->facet1;
    -    facet2= merge->facet2;
    -    mergetype= merge->type;
    -    qh_memfree(merge, (int)sizeof(mergeT));
    -    if (mergetype == MRGmirror) {
    -      zinc_(Ztrimirror);
    -      qh_triangulate_mirror(facet1, facet2);
    -    }
    -  }
    -  qh_settempfree(&qh degen_mergeset);
    -  trace2((qh ferr, 2049, "qh_triangulate: update neighbor lists for vertices from v%d\n", getid_(new_vertex_list)));
    -  qh newvertex_list= new_vertex_list;  /* all vertices of new facets */
    -  qh visible_list= NULL;
    -  qh_updatevertices(/*qh newvertex_list, empty newfacet_list and visible_list*/);
    -  qh_resetlists(False, !qh_RESETvisible /*qh newvertex_list, empty newfacet_list and visible_list*/);
    -
    -  trace2((qh ferr, 2050, "qh_triangulate: identify degenerate tricoplanar facets from f%d\n", getid_(new_facet_list)));
    -  trace2((qh ferr, 2051, "qh_triangulate: and replace facet->f.triowner with tricoplanar facets that own center, normal, etc.\n"));
    -  FORALLfacet_(new_facet_list) {
    -    if (facet->tricoplanar && !facet->visible) {
    -      FOREACHneighbor_i_(facet) {
    -        if (neighbor_i == 0) {  /* first iteration */
    -          if (neighbor->tricoplanar)
    -            orig_neighbor= neighbor->f.triowner;
    -          else
    -            orig_neighbor= neighbor;
    -        }else {
    -          if (neighbor->tricoplanar)
    -            otherfacet= neighbor->f.triowner;
    -          else
    -            otherfacet= neighbor;
    -          if (orig_neighbor == otherfacet) {
    -            zinc_(Ztridegen);
    -            facet->degenerate= True;
    -            break;
    -          }
    -        }
    -      }
    -    }
    -  }
    -
    -  trace2((qh ferr, 2052, "qh_triangulate: delete visible facets -- non-simplicial, null, and mirrored facets\n"));
    -  owner= NULL;
    -  visible= NULL;
    -  for (facet= new_facet_list; facet && facet->next; facet= nextfacet) { /* may delete facet */
    -    nextfacet= facet->next;
    -    if (facet->visible) {
    -      if (facet->tricoplanar) { /* a null or mirrored facet */
    -        qh_delfacet(facet);
    -        qh num_visible--;
    -      }else {  /* a non-simplicial facet followed by its tricoplanars */
    -        if (visible && !owner) {
    -          /*  RBOX 200 s D5 t1001471447 | QHULL Qt C-0.01 Qx Qc Tv Qt -- f4483 had 6 vertices/neighbors and 8 ridges */
    -          trace2((qh ferr, 2053, "qh_triangulate: all tricoplanar facets degenerate for non-simplicial facet f%d\n",
    -                       visible->id));
    -          qh_delfacet(visible);
    -          qh num_visible--;
    -        }
    -        visible= facet;
    -        owner= NULL;
    -      }
    -    }else if (facet->tricoplanar) {
    -      if (facet->f.triowner != visible) {
    -        qh_fprintf(qh ferr, 6162, "qhull error (qh_triangulate): tricoplanar facet f%d not owned by its visible, non-simplicial facet f%d\n", facet->id, getid_(visible));
    -        qh_errexit2 (qh_ERRqhull, facet, visible);
    -      }
    -      if (owner)
    -        facet->f.triowner= owner;
    -      else if (!facet->degenerate) {
    -        owner= facet;
    -        nextfacet= visible->next; /* rescan tricoplanar facets with owner */
    -        facet->keepcentrum= True;  /* one facet owns ->normal, etc. */
    -        facet->coplanarset= visible->coplanarset;
    -        facet->outsideset= visible->outsideset;
    -        visible->coplanarset= NULL;
    -        visible->outsideset= NULL;
    -        if (!qh TRInormals) { /* center and normal copied to tricoplanar facets */
    -          visible->center= NULL;
    -          visible->normal= NULL;
    -        }
    -        qh_delfacet(visible);
    -        qh num_visible--;
    -      }
    -    }
    -  }
    -  if (visible && !owner) {
    -    trace2((qh ferr, 2054, "qh_triangulate: all tricoplanar facets degenerate for last non-simplicial facet f%d\n",
    -                 visible->id));
    -    qh_delfacet(visible);
    -    qh num_visible--;
    -  }
    -  qh NEWfacets= False;
    -  qh ONLYgood= onlygood; /* restore value */
    -  if (qh CHECKfrequently)
    -    qh_checkpolygon(qh facet_list);
    -  qh hasTriangulation= True;
    -} /* triangulate */
    -
    -
    -/*---------------------------------
    -
    -  qh_triangulate_facet(facetA)
    -    triangulate a non-simplicial facet
    -      if qh.CENTERtype=qh_ASvoronoi, sets its Voronoi center
    -  returns:
    -    qh.newfacet_list == simplicial facets
    -      facet->tricoplanar set and ->keepcentrum false
    -      facet->degenerate set if duplicated apex
    -      facet->f.trivisible set to facetA
    -      facet->center copied from facetA (created if qh_ASvoronoi)
    -        qh_eachvoronoi, qh_detvridge, qh_detvridge3 assume centers copied
    -      facet->normal,offset,maxoutside copied from facetA
    -
    -  notes:
    -      qh_makenew_nonsimplicial uses neighbor->seen for the same
    -
    -  see also:
    -      qh_addpoint() -- add a point
    -      qh_makenewfacets() -- construct a cone of facets for a new vertex
    -
    -  design:
    -      if qh_ASvoronoi,
    -         compute Voronoi center (facet->center)
    -      select first vertex (highest ID to preserve ID ordering of ->vertices)
    -      triangulate from vertex to ridges
    -      copy facet->center, normal, offset
    -      update vertex neighbors
    -*/
    -void qh_triangulate_facet(facetT *facetA, vertexT **first_vertex) {
    -  facetT *newfacet;
    -  facetT *neighbor, **neighborp;
    -  vertexT *apex;
    -  int numnew=0;
    -
    -  trace3((qh ferr, 3020, "qh_triangulate_facet: triangulate facet f%d\n", facetA->id));
    -
    -  if (qh IStracing >= 4)
    -    qh_printfacet(qh ferr, facetA);
    -  FOREACHneighbor_(facetA) {
    -    neighbor->seen= False;
    -    neighbor->coplanar= False;
    -  }
    -  if (qh CENTERtype == qh_ASvoronoi && !facetA->center  /* matches upperdelaunay in qh_setfacetplane() */
    -        && fabs_(facetA->normal[qh hull_dim -1]) >= qh ANGLEround * qh_ZEROdelaunay) {
    -    facetA->center= qh_facetcenter(facetA->vertices);
    -  }
    -  qh_willdelete(facetA, NULL);
    -  qh newfacet_list= qh facet_tail;
    -  facetA->visitid= qh visit_id;
    -  apex= SETfirstt_(facetA->vertices, vertexT);
    -  qh_makenew_nonsimplicial(facetA, apex, &numnew);
    -  SETfirst_(facetA->neighbors)= NULL;
    -  FORALLnew_facets {
    -    newfacet->tricoplanar= True;
    -    newfacet->f.trivisible= facetA;
    -    newfacet->degenerate= False;
    -    newfacet->upperdelaunay= facetA->upperdelaunay;
    -    newfacet->good= facetA->good;
    -    if (qh TRInormals) {
    -      newfacet->keepcentrum= True;
    -      newfacet->normal= qh_copypoints(facetA->normal, 1, qh hull_dim);
    -      if (qh CENTERtype == qh_AScentrum)
    -        newfacet->center= qh_getcentrum(newfacet);
    -      else
    -        newfacet->center= qh_copypoints(facetA->center, 1, qh hull_dim);
    -    }else {
    -      newfacet->keepcentrum= False;
    -      newfacet->normal= facetA->normal;
    -      newfacet->center= facetA->center;
    -    }
    -    newfacet->offset= facetA->offset;
    -#if qh_MAXoutside
    -    newfacet->maxoutside= facetA->maxoutside;
    -#endif
    -  }
    -  qh_matchnewfacets(/*qh newfacet_list*/);
    -  zinc_(Ztricoplanar);
    -  zadd_(Ztricoplanartot, numnew);
    -  zmax_(Ztricoplanarmax, numnew);
    -  qh visible_list= NULL;
    -  if (!(*first_vertex))
    -    (*first_vertex)= qh newvertex_list;
    -  qh newvertex_list= NULL;
    -  qh_updatevertices(/*qh newfacet_list, empty visible_list and newvertex_list*/);
    -  qh_resetlists(False, !qh_RESETvisible /*qh newfacet_list, empty visible_list and newvertex_list*/);
    -} /* triangulate_facet */
    -
    -/*---------------------------------
    -
    -  qh_triangulate_link(oldfacetA, facetA, oldfacetB, facetB)
    -    relink facetA to facetB via oldfacets
    -  returns:
    -    adds mirror facets to qh degen_mergeset (4-d and up only)
    -  design:
    -    if they are already neighbors, the opposing neighbors become MRGmirror facets
    -*/
    -void qh_triangulate_link(facetT *oldfacetA, facetT *facetA, facetT *oldfacetB, facetT *facetB) {
    -  int errmirror= False;
    -
    -  trace3((qh ferr, 3021, "qh_triangulate_link: relink old facets f%d and f%d between neighbors f%d and f%d\n",
    -         oldfacetA->id, oldfacetB->id, facetA->id, facetB->id));
    -  if (qh_setin(facetA->neighbors, facetB)) {
    -    if (!qh_setin(facetB->neighbors, facetA))
    -      errmirror= True;
    -    else
    -      qh_appendmergeset(facetA, facetB, MRGmirror, NULL);
    -  }else if (qh_setin(facetB->neighbors, facetA))
    -    errmirror= True;
    -  if (errmirror) {
    -    qh_fprintf(qh ferr, 6163, "qhull error (qh_triangulate_link): mirror facets f%d and f%d do not match for old facets f%d and f%d\n",
    -       facetA->id, facetB->id, oldfacetA->id, oldfacetB->id);
    -    qh_errexit2 (qh_ERRqhull, facetA, facetB);
    -  }
    -  qh_setreplace(facetB->neighbors, oldfacetB, facetA);
    -  qh_setreplace(facetA->neighbors, oldfacetA, facetB);
    -} /* triangulate_link */
    -
    -/*---------------------------------
    -
    -  qh_triangulate_mirror(facetA, facetB)
    -    delete mirrored facets from qh_triangulate_null() and qh_triangulate_mirror
    -      a mirrored facet shares the same vertices of a logical ridge
    -  design:
    -    since a null facet duplicates the first two vertices, the opposing neighbors absorb the null facet
    -    if they are already neighbors, the opposing neighbors become MRGmirror facets
    -*/
    -void qh_triangulate_mirror(facetT *facetA, facetT *facetB) {
    -  facetT *neighbor, *neighborB;
    -  int neighbor_i, neighbor_n;
    -
    -  trace3((qh ferr, 3022, "qh_triangulate_mirror: delete mirrored facets f%d and f%d\n",
    -         facetA->id, facetB->id));
    -  FOREACHneighbor_i_(facetA) {
    -    neighborB= SETelemt_(facetB->neighbors, neighbor_i, facetT);
    -    if (neighbor == neighborB)
    -      continue; /* occurs twice */
    -    qh_triangulate_link(facetA, neighbor, facetB, neighborB);
    -  }
    -  qh_willdelete(facetA, NULL);
    -  qh_willdelete(facetB, NULL);
    -} /* triangulate_mirror */
    -
    -/*---------------------------------
    -
    -  qh_triangulate_null(facetA)
    -    remove null facetA from qh_triangulate_facet()
    -      a null facet has vertex #1 (apex) == vertex #2
    -  returns:
    -    adds facetA to ->visible for deletion after qh_updatevertices
    -    qh degen_mergeset contains mirror facets (4-d and up only)
    -  design:
    -    since a null facet duplicates the first two vertices, the opposing neighbors absorb the null facet
    -    if they are already neighbors, the opposing neighbors become MRGmirror facets
    -*/
    -void qh_triangulate_null(facetT *facetA) {
    -  facetT *neighbor, *otherfacet;
    -
    -  trace3((qh ferr, 3023, "qh_triangulate_null: delete null facet f%d\n", facetA->id));
    -  neighbor= SETfirstt_(facetA->neighbors, facetT);
    -  otherfacet= SETsecondt_(facetA->neighbors, facetT);
    -  qh_triangulate_link(facetA, neighbor, facetA, otherfacet);
    -  qh_willdelete(facetA, NULL);
    -} /* triangulate_null */
    -
    -#else /* qh_NOmerge */
    -void qh_triangulate(void) {
    -}
    -#endif /* qh_NOmerge */
    -
    -   /*---------------------------------
    -
    -  qh_vertexintersect( vertexsetA, vertexsetB )
    -    intersects two vertex sets (inverse id ordered)
    -    vertexsetA is a temporary set at the top of qhmem.tempstack
    -
    -  returns:
    -    replaces vertexsetA with the intersection
    -
    -  notes:
    -    could overwrite vertexsetA if currently too slow
    -*/
    -void qh_vertexintersect(setT **vertexsetA,setT *vertexsetB) {
    -  setT *intersection;
    -
    -  intersection= qh_vertexintersect_new(*vertexsetA, vertexsetB);
    -  qh_settempfree(vertexsetA);
    -  *vertexsetA= intersection;
    -  qh_settemppush(intersection);
    -} /* vertexintersect */
    -
    -/*---------------------------------
    -
    -  qh_vertexintersect_new(  )
    -    intersects two vertex sets (inverse id ordered)
    -
    -  returns:
    -    a new set
    -*/
    -setT *qh_vertexintersect_new(setT *vertexsetA,setT *vertexsetB) {
    -  setT *intersection= qh_setnew(qh hull_dim - 1);
    -  vertexT **vertexA= SETaddr_(vertexsetA, vertexT);
    -  vertexT **vertexB= SETaddr_(vertexsetB, vertexT);
    -
    -  while (*vertexA && *vertexB) {
    -    if (*vertexA  == *vertexB) {
    -      qh_setappend(&intersection, *vertexA);
    -      vertexA++; vertexB++;
    -    }else {
    -      if ((*vertexA)->id > (*vertexB)->id)
    -        vertexA++;
    -      else
    -        vertexB++;
    -    }
    -  }
    -  return intersection;
    -} /* vertexintersect_new */
    -
    -/*---------------------------------
    -
    -  qh_vertexneighbors()
    -    for each vertex in qh.facet_list,
    -      determine its neighboring facets
    -
    -  returns:
    -    sets qh.VERTEXneighbors
    -      nop if qh.VERTEXneighbors already set
    -      qh_addpoint() will maintain them
    -
    -  notes:
    -    assumes all vertex->neighbors are NULL
    -
    -  design:
    -    for each facet
    -      for each vertex
    -        append facet to vertex->neighbors
    -*/
    -void qh_vertexneighbors(void /*qh facet_list*/) {
    -  facetT *facet;
    -  vertexT *vertex, **vertexp;
    -
    -  if (qh VERTEXneighbors)
    -    return;
    -  trace1((qh ferr, 1035, "qh_vertexneighbors: determing neighboring facets for each vertex\n"));
    -  qh vertex_visit++;
    -  FORALLfacets {
    -    if (facet->visible)
    -      continue;
    -    FOREACHvertex_(facet->vertices) {
    -      if (vertex->visitid != qh vertex_visit) {
    -        vertex->visitid= qh vertex_visit;
    -        vertex->neighbors= qh_setnew(qh hull_dim);
    -      }
    -      qh_setappend(&vertex->neighbors, facet);
    -    }
    -  }
    -  qh VERTEXneighbors= True;
    -} /* vertexneighbors */
    -
    -/*---------------------------------
    -
    -  qh_vertexsubset( vertexsetA, vertexsetB )
    -    returns True if vertexsetA is a subset of vertexsetB
    -    assumes vertexsets are sorted
    -
    -  note:
    -    empty set is a subset of any other set
    -*/
    -boolT qh_vertexsubset(setT *vertexsetA, setT *vertexsetB) {
    -  vertexT **vertexA= (vertexT **) SETaddr_(vertexsetA, vertexT);
    -  vertexT **vertexB= (vertexT **) SETaddr_(vertexsetB, vertexT);
    -
    -  while (True) {
    -    if (!*vertexA)
    -      return True;
    -    if (!*vertexB)
    -      return False;
    -    if ((*vertexA)->id > (*vertexB)->id)
    -      return False;
    -    if (*vertexA  == *vertexB)
    -      vertexA++;
    -    vertexB++;
    -  }
    -  return False; /* avoid warnings */
    -} /* vertexsubset */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/qhull.h b/scipy-0.10.1/scipy/spatial/qhull/src/qhull.h
    deleted file mode 100644
    index 714d8e8b42..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/qhull.h
    +++ /dev/null
    @@ -1,18 +0,0 @@
    -/*
      ---------------------------------
    -
    -   qhull.h
    -
    -   Proxy for libqhull.h for backwards compatability
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/qhull.h#51 $$Change: 1188 $
    -   $DateTime: 2010/01/14 22:35:43 $$Author: bbarber $
    -*/
    -
    -#ifndef qhDEFqhull
    -#define qhDEFqhull 1
    -
    -#include "libqhull.h"
    -
    -#endif /* qhDEFqhull */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/qhull_a.h b/scipy-0.10.1/scipy/spatial/qhull/src/qhull_a.h
    deleted file mode 100644
    index 521c3599fb..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/qhull_a.h
    +++ /dev/null
    @@ -1,149 +0,0 @@
    -/*
      ---------------------------------
    -
    -   qhull_a.h
    -   all header files for compiling qhull
    -
    -   see qh-qhull.htm
    -
    -   see libqhull.h for user-level definitions
    -
    -   see user.h for user-defineable constants
    -
    -   defines internal functions for libqhull.c global.c
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/qhull_a.h#27 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -
    -   Notes:  grep for ((" and (" to catch fprintf("lkasdjf");
    -           full parens around (x?y:z)
    -           use '#include qhull/qhull_a.h' to avoid name clashes
    -*/
    -
    -#ifndef qhDEFqhulla
    -#define qhDEFqhulla 1
    -
    -#include "libqhull.h"  /* Defines data types */
    -
    -#include "stat.h"
    -#include "random.h"
    -#include "mem.h"
    -#include "qset.h"
    -#include "geom.h"
    -#include "merge.h"
    -#include "poly.h"
    -#include "io.h"
    -
    -#include 
    -#include 
    -#include 
    -#include     /* some compilers will not need float.h */
    -#include 
    -#include 
    -#include 
    -#include 
    -#include 
    -/*** uncomment here and qset.c
    -     if string.h does not define memcpy()
    -#include 
    -*/
    -
    -#if qh_CLOCKtype == 2  /* defined in user.h from libqhull.h */
    -#include 
    -#include 
    -#include 
    -#endif
    -
    -#ifdef _MSC_VER  /* Microsoft Visual C++ -- warning level 4 */
    -#pragma warning( disable : 4100)  /* unreferenced formal parameter */
    -#pragma warning( disable : 4127)  /* conditional expression is constant */
    -#pragma warning( disable : 4706)  /* assignment within conditional function */
    -#pragma warning( disable : 4996)  /* function was declared deprecated(strcpy, localtime, etc.) */
    -#endif
    -
    -/* ======= -macros- =========== */
    -
    -/*----------------------------------
    -
    -  traceN((qh ferr, 0Nnnn, "format\n", vars));
    -    calls qh_fprintf if qh.IStracing >= N
    -
    -    Add debugging traps to the end of qh_fprintf
    -
    -  notes:
    -    removing tracing reduces code size but doesn't change execution speed
    -*/
    -#ifndef qh_NOtrace
    -#define trace0(args) {if (qh IStracing) qh_fprintf args;}
    -#define trace1(args) {if (qh IStracing >= 1) qh_fprintf args;}
    -#define trace2(args) {if (qh IStracing >= 2) qh_fprintf args;}
    -#define trace3(args) {if (qh IStracing >= 3) qh_fprintf args;}
    -#define trace4(args) {if (qh IStracing >= 4) qh_fprintf args;}
    -#define trace5(args) {if (qh IStracing >= 5) qh_fprintf args;}
    -#else /* qh_NOtrace */
    -#define trace0(args) {}
    -#define trace1(args) {}
    -#define trace2(args) {}
    -#define trace3(args) {}
    -#define trace4(args) {}
    -#define trace5(args) {}
    -#endif /* qh_NOtrace */
    -
    -/*----------------------------------
    -
    -*/
    -
    -/* See Qt's qglobal.h */
    -#if !defined(SAG_COM) && (defined(WIN64) || defined(_WIN64) || defined(__WIN64__) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__))
    -#   define QHULL_OS_WIN
    -#elif defined(__MWERKS__) && defined(__INTEL__)
    -#   define QHULL_OS_WIN
    -#endif
    -#if defined(__INTEL_COMPILER) && !defined(QHULL_OS_WIN)
    -#  define QHULL_UNUSED(x) (x)
    -#else
    -#  define QHULL_UNUSED(x) (void)x;
    -#endif
    -
    -/***** -libqhull.c prototypes (alphabetical after qhull) ********************/
    -
    -void    qh_qhull(void);
    -boolT   qh_addpoint(pointT *furthest, facetT *facet, boolT checkdist);
    -void    qh_buildhull(void);
    -void    qh_buildtracing(pointT *furthest, facetT *facet);
    -void    qh_build_withrestart(void);
    -void    qh_errexit2(int exitcode, facetT *facet, facetT *otherfacet);
    -void    qh_findhorizon(pointT *point, facetT *facet, int *goodvisible,int *goodhorizon);
    -pointT *qh_nextfurthest(facetT **visible);
    -void    qh_partitionall(setT *vertices, pointT *points,int npoints);
    -void    qh_partitioncoplanar(pointT *point, facetT *facet, realT *dist);
    -void    qh_partitionpoint(pointT *point, facetT *facet);
    -void    qh_partitionvisible(boolT allpoints, int *numpoints);
    -void    qh_precision(const char *reason);
    -void    qh_printsummary(FILE *fp);
    -
    -/***** -global.c internal prototypes (alphabetical) ***********************/
    -
    -void    qh_appendprint(qh_PRINT format);
    -void    qh_freebuild(boolT allmem);
    -void    qh_freebuffers(void);
    -void    qh_initbuffers(coordT *points, int numpoints, int dim, boolT ismalloc);
    -
    -/***** -stat.c internal prototypes (alphabetical) ***********************/
    -
    -void    qh_allstatA(void);
    -void    qh_allstatB(void);
    -void    qh_allstatC(void);
    -void    qh_allstatD(void);
    -void    qh_allstatE(void);
    -void    qh_allstatE2 (void);
    -void    qh_allstatF(void);
    -void    qh_allstatG(void);
    -void    qh_allstatH(void);
    -void    qh_freebuffers(void);
    -void    qh_initbuffers(coordT *points, int numpoints, int dim, boolT ismalloc);
    -
    -#endif /* qhDEFqhulla */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/qset.c b/scipy-0.10.1/scipy/spatial/qhull/src/qset.c
    deleted file mode 100644
    index 08082b403c..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/qset.c
    +++ /dev/null
    @@ -1,1299 +0,0 @@
    -/*
      ---------------------------------
    -
    -   qset.c
    -   implements set manipulations needed for quickhull
    -
    -   see qh-set.htm and qset.h
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/qset.c#29 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#include "qset.h"
    -#include "mem.h"
    -#include 
    -#include 
    -/*** uncomment here and qhull_a.h
    -     if string.h does not define memcpy()
    -#include 
    -*/
    -
    -#ifndef qhDEFlibqhull
    -typedef struct ridgeT ridgeT;
    -typedef struct facetT facetT;
    -void    qh_errexit(int exitcode, facetT *, ridgeT *);
    -void    qh_fprintf(FILE *fp, int msgcode, const char *fmt, ... );
    -#  ifdef _MSC_VER  /* Microsoft Visual C++ -- warning level 4 */
    -#  pragma warning( disable : 4127)  /* conditional expression is constant */
    -#  pragma warning( disable : 4706)  /* assignment within conditional function */
    -#  endif
    -#endif
    -
    -/*=============== internal macros ===========================*/
    -
    -/*============ functions in alphabetical order ===================*/
    -
    -/*----------------------------------
    -
    -  qh_setaddnth( setp, nth, newelem)
    -    adds newelem as n'th element of sorted or unsorted *setp
    -
    -  notes:
    -    *setp and newelem must be defined
    -    *setp may be a temp set
    -    nth=0 is first element
    -    errors if nth is out of bounds
    -
    -  design:
    -    expand *setp if empty or full
    -    move tail of *setp up one
    -    insert newelem
    -*/
    -void qh_setaddnth(setT **setp, int nth, void *newelem) {
    -  int *sizep, oldsize, i;
    -  void **oldp, **newp;
    -
    -  if (!*setp || !*(sizep= SETsizeaddr_(*setp))) {
    -    qh_setlarger(setp);
    -    sizep= SETsizeaddr_(*setp);
    -  }
    -  oldsize= *sizep - 1;
    -  if (nth < 0 || nth > oldsize) {
    -    qh_fprintf(qhmem.ferr, 6171, "qhull internal error (qh_setaddnth): nth %d is out-of-bounds for set:\n", nth);
    -    qh_setprint(qhmem.ferr, "", *setp);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  (*sizep)++;
    -  oldp= SETelemaddr_(*setp, oldsize, void);   /* NULL */
    -  newp= oldp+1;
    -  for (i=oldsize-nth+1; i--; )  /* move at least NULL  */
    -    *(newp--)= *(oldp--);       /* may overwrite *sizep */
    -  *newp= newelem;
    -} /* setaddnth */
    -
    -
    -/*----------------------------------
    -
    -  setaddsorted( setp, newelem )
    -    adds an newelem into sorted *setp
    -
    -  notes:
    -    *setp and newelem must be defined
    -    *setp may be a temp set
    -    nop if newelem already in set
    -
    -  design:
    -    find newelem's position in *setp
    -    insert newelem
    -*/
    -void qh_setaddsorted(setT **setp, void *newelem) {
    -  int newindex=0;
    -  void *elem, **elemp;
    -
    -  FOREACHelem_(*setp) {          /* could use binary search instead */
    -    if (elem < newelem)
    -      newindex++;
    -    else if (elem == newelem)
    -      return;
    -    else
    -      break;
    -  }
    -  qh_setaddnth(setp, newindex, newelem);
    -} /* setaddsorted */
    -
    -
    -/*---------------------------------
    -
    -  qh_setappend( setp, newelem)
    -    append newelem to *setp
    -
    -  notes:
    -    *setp may be a temp set
    -    *setp and newelem may be NULL
    -
    -  design:
    -    expand *setp if empty or full
    -    append newelem to *setp
    -
    -*/
    -void qh_setappend(setT **setp, void *newelem) {
    -  int *sizep, end_idx;
    -
    -  if (!newelem)
    -    return;
    -  if (!*setp || !*(sizep= SETsizeaddr_(*setp))) {
    -    qh_setlarger(setp);
    -    sizep= SETsizeaddr_(*setp);
    -  }
    -  end_idx = (*sizep)++ - 1;
    -  (*setp)->e[end_idx].p = newelem;
    -  (*setp)->e[end_idx + 1].p = NULL;
    -} /* setappend */
    -
    -/*---------------------------------
    -
    -  qh_setappend_set( setp, setA)
    -    appends setA to *setp
    -
    -  notes:
    -    *setp can not be a temp set
    -    *setp and setA may be NULL
    -
    -  design:
    -    setup for copy
    -    expand *setp if it is too small
    -    append all elements of setA to *setp
    -*/
    -void qh_setappend_set(setT **setp, setT *setA) {
    -  int *sizep, sizeA, size;
    -  setT *oldset;
    -
    -  if (!setA)
    -    return;
    -  SETreturnsize_(setA, sizeA);
    -  if (!*setp)
    -    *setp= qh_setnew(sizeA);
    -  sizep= SETsizeaddr_(*setp);
    -  if (!(size= *sizep))
    -    size= (*setp)->maxsize;
    -  else
    -    size--;
    -  if (size + sizeA > (*setp)->maxsize) {
    -    oldset= *setp;
    -    *setp= qh_setcopy(oldset, sizeA);
    -    qh_setfree(&oldset);
    -    sizep= SETsizeaddr_(*setp);
    -  }
    -  *sizep= size+sizeA+1;   /* memcpy may overwrite */
    -  if (sizeA > 0)
    -    memcpy((char *)&((*setp)->e[size].p), (char *)&(setA->e[0].p), (size_t)(sizeA+1) * SETelemsize);
    -} /* setappend_set */
    -
    -
    -/*---------------------------------
    -
    -  qh_setappend2ndlast( setp, newelem )
    -    makes newelem the next to the last element in *setp
    -
    -  notes:
    -    *setp must have at least one element
    -    newelem must be defined
    -    *setp may be a temp set
    -
    -  design:
    -    expand *setp if empty or full
    -    move last element of *setp up one
    -    insert newelem
    -*/
    -void qh_setappend2ndlast(setT **setp, void *newelem) {
    -  int *sizep;
    -  void **endp, **lastp;
    -
    -  if (!*setp || !*(sizep= SETsizeaddr_(*setp))) {
    -    qh_setlarger(setp);
    -    sizep= SETsizeaddr_(*setp);
    -  }
    -  endp= SETelemaddr_(*setp, (*sizep)++ -1, void); /* NULL */
    -  lastp= endp-1;
    -  *(endp++)= *lastp;
    -  *endp= NULL;    /* may overwrite *sizep */
    -  *lastp= newelem;
    -} /* setappend2ndlast */
    -
    -
    -/*---------------------------------
    -
    -  qh_setcheck( set, typename, id )
    -    check set for validity
    -    report errors with typename and id
    -
    -  design:
    -    checks that maxsize, actual size, and NULL terminator agree
    -*/
    -void qh_setcheck(setT *set, const char *tname, unsigned id) {
    -  int maxsize, size;
    -  int waserr= 0;
    -
    -  if (!set)
    -    return;
    -  SETreturnsize_(set, size);
    -  maxsize= set->maxsize;
    -  if (size > maxsize || !maxsize) {
    -    qh_fprintf(qhmem.ferr, 6172, "qhull internal error (qh_setcheck): actual size %d of %s%d is greater than max size %d\n",
    -             size, tname, id, maxsize);
    -    waserr= 1;
    -  }else if (set->e[size].p) {
    -    qh_fprintf(qhmem.ferr, 6173, "qhull internal error (qh_setcheck): %s%d(size %d max %d) is not null terminated.\n",
    -             tname, id, maxsize, size-1);
    -    waserr= 1;
    -  }
    -  if (waserr) {
    -    qh_setprint(qhmem.ferr, "ERRONEOUS", set);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -} /* setcheck */
    -
    -
    -/*---------------------------------
    -
    -  qh_setcompact( set )
    -    remove internal NULLs from an unsorted set
    -
    -  returns:
    -    updated set
    -
    -  notes:
    -    set may be NULL
    -    it would be faster to swap tail of set into holes, like qh_setdel
    -
    -  design:
    -    setup pointers into set
    -    skip NULLs while copying elements to start of set
    -    update the actual size
    -*/
    -void qh_setcompact(setT *set) {
    -  int size;
    -  void **destp, **elemp, **endp, **firstp;
    -
    -  if (!set)
    -    return;
    -  SETreturnsize_(set, size);
    -  destp= elemp= firstp= SETaddr_(set, void);
    -  endp= destp + size;
    -  while (1) {
    -    if (!(*destp++ = *elemp++)) {
    -      destp--;
    -      if (elemp > endp)
    -        break;
    -    }
    -  }
    -  qh_settruncate(set, (int)(destp-firstp));   /* WARN64 */
    -} /* setcompact */
    -
    -
    -/*---------------------------------
    -
    -  qh_setcopy( set, extra )
    -    make a copy of a sorted or unsorted set with extra slots
    -
    -  returns:
    -    new set
    -
    -  design:
    -    create a newset with extra slots
    -    copy the elements to the newset
    -
    -*/
    -setT *qh_setcopy(setT *set, int extra) {
    -  setT *newset;
    -  int size;
    -
    -  if (extra < 0)
    -    extra= 0;
    -  SETreturnsize_(set, size);
    -  newset= qh_setnew(size+extra);
    -  *SETsizeaddr_(newset)= size+1;    /* memcpy may overwrite */
    -  memcpy((char *)&(newset->e[0].p), (char *)&(set->e[0].p), (size_t)(size+1) * SETelemsize);
    -  return(newset);
    -} /* setcopy */
    -
    -
    -/*---------------------------------
    -
    -  qh_setdel( set, oldelem )
    -    delete oldelem from an unsorted set
    -
    -  returns:
    -    returns oldelem if found
    -    returns NULL otherwise
    -
    -  notes:
    -    set may be NULL
    -    oldelem must not be NULL;
    -    only deletes one copy of oldelem in set
    -
    -  design:
    -    locate oldelem
    -    update actual size if it was full
    -    move the last element to the oldelem's location
    -*/
    -void *qh_setdel(setT *set, void *oldelem) {
    -  void **elemp, **lastp;
    -  int *sizep;
    -
    -  if (!set)
    -    return NULL;
    -  elemp= SETaddr_(set, void);
    -  while (*elemp != oldelem && *elemp)
    -    elemp++;
    -  if (*elemp) {
    -    sizep= SETsizeaddr_(set);
    -    if (!(*sizep)--)         /*  if was a full set */
    -      *sizep= set->maxsize;  /*     *sizep= (maxsize-1)+ 1 */
    -    lastp= SETelemaddr_(set, *sizep-1, void);
    -    *elemp= *lastp;      /* may overwrite itself */
    -    *lastp= NULL;
    -    return oldelem;
    -  }
    -  return NULL;
    -} /* setdel */
    -
    -
    -/*---------------------------------
    -
    -  qh_setdellast( set)
    -    return last element of set or NULL
    -
    -  notes:
    -    deletes element from set
    -    set may be NULL
    -
    -  design:
    -    return NULL if empty
    -    if full set
    -      delete last element and set actual size
    -    else
    -      delete last element and update actual size
    -*/
    -void *qh_setdellast(setT *set) {
    -  int setsize;  /* actually, actual_size + 1 */
    -  int maxsize;
    -  int *sizep;
    -  void *returnvalue;
    -
    -  if (!set || !(set->e[0].p))
    -    return NULL;
    -  sizep= SETsizeaddr_(set);
    -  if ((setsize= *sizep)) {
    -    returnvalue= set->e[setsize - 2].p;
    -    set->e[setsize - 2].p= NULL;
    -    (*sizep)--;
    -  }else {
    -    maxsize= set->maxsize;
    -    returnvalue= set->e[maxsize - 1].p;
    -    set->e[maxsize - 1].p= NULL;
    -    *sizep= maxsize;
    -  }
    -  return returnvalue;
    -} /* setdellast */
    -
    -
    -/*---------------------------------
    -
    -  qh_setdelnth( set, nth )
    -    deletes nth element from unsorted set
    -    0 is first element
    -
    -  returns:
    -    returns the element (needs type conversion)
    -
    -  notes:
    -    errors if nth invalid
    -
    -  design:
    -    setup points and check nth
    -    delete nth element and overwrite with last element
    -*/
    -void *qh_setdelnth(setT *set, int nth) {
    -  void **elemp, **lastp, *elem;
    -  int *sizep;
    -
    -
    -  elemp= SETelemaddr_(set, nth, void);
    -  sizep= SETsizeaddr_(set);
    -  if (!(*sizep)--)         /*  if was a full set */
    -    *sizep= set->maxsize;  /*     *sizep= (maxsize-1)+ 1 */
    -  if (nth < 0 || nth >= *sizep) {
    -    qh_fprintf(qhmem.ferr, 6174, "qhull internal error (qh_setaddnth): nth %d is out-of-bounds for set:\n", nth);
    -    qh_setprint(qhmem.ferr, "", set);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  lastp= SETelemaddr_(set, *sizep-1, void);
    -  elem= *elemp;
    -  *elemp= *lastp;      /* may overwrite itself */
    -  *lastp= NULL;
    -  return elem;
    -} /* setdelnth */
    -
    -/*---------------------------------
    -
    -  qh_setdelnthsorted( set, nth )
    -    deletes nth element from sorted set
    -
    -  returns:
    -    returns the element (use type conversion)
    -
    -  notes:
    -    errors if nth invalid
    -
    -  see also:
    -    setnew_delnthsorted
    -
    -  design:
    -    setup points and check nth
    -    copy remaining elements down one
    -    update actual size
    -*/
    -void *qh_setdelnthsorted(setT *set, int nth) {
    -  void **newp, **oldp, *elem;
    -  int *sizep;
    -
    -  sizep= SETsizeaddr_(set);
    -  if (nth < 0 || (*sizep && nth >= *sizep-1) || nth >= set->maxsize) {
    -    qh_fprintf(qhmem.ferr, 6175, "qhull internal error (qh_setaddnth): nth %d is out-of-bounds for set:\n", nth);
    -    qh_setprint(qhmem.ferr, "", set);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  newp= SETelemaddr_(set, nth, void);
    -  elem= *newp;
    -  oldp= newp+1;
    -  while ((*(newp++)= *(oldp++)))
    -    ; /* copy remaining elements and NULL */
    -  if (!(*sizep)--)         /*  if was a full set */
    -    *sizep= set->maxsize;  /*     *sizep= (max size-1)+ 1 */
    -  return elem;
    -} /* setdelnthsorted */
    -
    -
    -/*---------------------------------
    -
    -  qh_setdelsorted( set, oldelem )
    -    deletes oldelem from sorted set
    -
    -  returns:
    -    returns oldelem if it was deleted
    -
    -  notes:
    -    set may be NULL
    -
    -  design:
    -    locate oldelem in set
    -    copy remaining elements down one
    -    update actual size
    -*/
    -void *qh_setdelsorted(setT *set, void *oldelem) {
    -  void **newp, **oldp;
    -  int *sizep;
    -
    -  if (!set)
    -    return NULL;
    -  newp= SETaddr_(set, void);
    -  while (*newp != oldelem && *newp)
    -    newp++;
    -  if (*newp) {
    -    oldp= newp+1;
    -    while ((*(newp++)= *(oldp++)))
    -      ; /* copy remaining elements */
    -    sizep= SETsizeaddr_(set);
    -    if (!(*sizep)--)    /*  if was a full set */
    -      *sizep= set->maxsize;  /*     *sizep= (max size-1)+ 1 */
    -    return oldelem;
    -  }
    -  return NULL;
    -} /* setdelsorted */
    -
    -
    -/*---------------------------------
    -
    -  qh_setduplicate( set, elemsize )
    -    duplicate a set of elemsize elements
    -
    -  notes:
    -    use setcopy if retaining old elements
    -
    -  design:
    -    create a new set
    -    for each elem of the old set
    -      create a newelem
    -      append newelem to newset
    -*/
    -setT *qh_setduplicate(setT *set, int elemsize) {
    -  void          *elem, **elemp, *newElem;
    -  setT          *newSet;
    -  int           size;
    -
    -  if (!(size= qh_setsize(set)))
    -    return NULL;
    -  newSet= qh_setnew(size);
    -  FOREACHelem_(set) {
    -    newElem= qh_memalloc(elemsize);
    -    memcpy(newElem, elem, (size_t)elemsize);
    -    qh_setappend(&newSet, newElem);
    -  }
    -  return newSet;
    -} /* setduplicate */
    -
    -
    -/*---------------------------------
    -
    -  qh_setequal(  )
    -    returns 1 if two sorted sets are equal, otherwise returns 0
    -
    -  notes:
    -    either set may be NULL
    -
    -  design:
    -    check size of each set
    -    setup pointers
    -    compare elements of each set
    -*/
    -int qh_setequal(setT *setA, setT *setB) {
    -  void **elemAp, **elemBp;
    -  int sizeA, sizeB;
    -
    -  SETreturnsize_(setA, sizeA);
    -  SETreturnsize_(setB, sizeB);
    -  if (sizeA != sizeB)
    -    return 0;
    -  if (!sizeA)
    -    return 1;
    -  elemAp= SETaddr_(setA, void);
    -  elemBp= SETaddr_(setB, void);
    -  if (!memcmp((char *)elemAp, (char *)elemBp, sizeA*SETelemsize))
    -    return 1;
    -  return 0;
    -} /* setequal */
    -
    -
    -/*---------------------------------
    -
    -  qh_setequal_except( setA, skipelemA, setB, skipelemB )
    -    returns 1 if sorted setA and setB are equal except for skipelemA & B
    -
    -  returns:
    -    false if either skipelemA or skipelemB are missing
    -
    -  notes:
    -    neither set may be NULL
    -
    -    if skipelemB is NULL,
    -      can skip any one element of setB
    -
    -  design:
    -    setup pointers
    -    search for skipelemA, skipelemB, and mismatches
    -    check results
    -*/
    -int qh_setequal_except(setT *setA, void *skipelemA, setT *setB, void *skipelemB) {
    -  void **elemA, **elemB;
    -  int skip=0;
    -
    -  elemA= SETaddr_(setA, void);
    -  elemB= SETaddr_(setB, void);
    -  while (1) {
    -    if (*elemA == skipelemA) {
    -      skip++;
    -      elemA++;
    -    }
    -    if (skipelemB) {
    -      if (*elemB == skipelemB) {
    -        skip++;
    -        elemB++;
    -      }
    -    }else if (*elemA != *elemB) {
    -      skip++;
    -      if (!(skipelemB= *elemB++))
    -        return 0;
    -    }
    -    if (!*elemA)
    -      break;
    -    if (*elemA++ != *elemB++)
    -      return 0;
    -  }
    -  if (skip != 2 || *elemB)
    -    return 0;
    -  return 1;
    -} /* setequal_except */
    -
    -
    -/*---------------------------------
    -
    -  qh_setequal_skip( setA, skipA, setB, skipB )
    -    returns 1 if sorted setA and setB are equal except for elements skipA & B
    -
    -  returns:
    -    false if different size
    -
    -  notes:
    -    neither set may be NULL
    -
    -  design:
    -    setup pointers
    -    search for mismatches while skipping skipA and skipB
    -*/
    -int qh_setequal_skip(setT *setA, int skipA, setT *setB, int skipB) {
    -  void **elemA, **elemB, **skipAp, **skipBp;
    -
    -  elemA= SETaddr_(setA, void);
    -  elemB= SETaddr_(setB, void);
    -  skipAp= SETelemaddr_(setA, skipA, void);
    -  skipBp= SETelemaddr_(setB, skipB, void);
    -  while (1) {
    -    if (elemA == skipAp)
    -      elemA++;
    -    if (elemB == skipBp)
    -      elemB++;
    -    if (!*elemA)
    -      break;
    -    if (*elemA++ != *elemB++)
    -      return 0;
    -  }
    -  if (*elemB)
    -    return 0;
    -  return 1;
    -} /* setequal_skip */
    -
    -
    -/*---------------------------------
    -
    -  qh_setfree( setp )
    -    frees the space occupied by a sorted or unsorted set
    -
    -  returns:
    -    sets setp to NULL
    -
    -  notes:
    -    set may be NULL
    -
    -  design:
    -    free array
    -    free set
    -*/
    -void qh_setfree(setT **setp) {
    -  int size;
    -  void **freelistp;  /* used !qh_NOmem */
    -
    -  if (*setp) {
    -    size= sizeof(setT) + ((*setp)->maxsize)*SETelemsize;
    -    if (size <= qhmem.LASTsize) {
    -      qh_memfree_(*setp, size, freelistp);
    -    }else
    -      qh_memfree(*setp, size);
    -    *setp= NULL;
    -  }
    -} /* setfree */
    -
    -
    -/*---------------------------------
    -
    -  qh_setfree2( setp, elemsize )
    -    frees the space occupied by a set and its elements
    -
    -  notes:
    -    set may be NULL
    -
    -  design:
    -    free each element
    -    free set
    -*/
    -void qh_setfree2 (setT **setp, int elemsize) {
    -  void          *elem, **elemp;
    -
    -  FOREACHelem_(*setp)
    -    qh_memfree(elem, elemsize);
    -  qh_setfree(setp);
    -} /* setfree2 */
    -
    -
    -
    -/*---------------------------------
    -
    -  qh_setfreelong( setp )
    -    frees a set only if it's in long memory
    -
    -  returns:
    -    sets setp to NULL if it is freed
    -
    -  notes:
    -    set may be NULL
    -
    -  design:
    -    if set is large
    -      free it
    -*/
    -void qh_setfreelong(setT **setp) {
    -  int size;
    -
    -  if (*setp) {
    -    size= sizeof(setT) + ((*setp)->maxsize)*SETelemsize;
    -    if (size > qhmem.LASTsize) {
    -      qh_memfree(*setp, size);
    -      *setp= NULL;
    -    }
    -  }
    -} /* setfreelong */
    -
    -
    -/*---------------------------------
    -
    -  qh_setin( set, setelem )
    -    returns 1 if setelem is in a set, 0 otherwise
    -
    -  notes:
    -    set may be NULL or unsorted
    -
    -  design:
    -    scans set for setelem
    -*/
    -int qh_setin(setT *set, void *setelem) {
    -  void *elem, **elemp;
    -
    -  FOREACHelem_(set) {
    -    if (elem == setelem)
    -      return 1;
    -  }
    -  return 0;
    -} /* setin */
    -
    -
    -/*---------------------------------
    -
    -  qh_setindex( set, atelem )
    -    returns the index of atelem in set.
    -    returns -1, if not in set or maxsize wrong
    -
    -  notes:
    -    set may be NULL and may contain nulls.
    -    NOerrors returned (qh_pointid, QhullPoint::id)
    -
    -  design:
    -    checks maxsize
    -    scans set for atelem
    -*/
    -int qh_setindex(setT *set, void *atelem) {
    -  void **elem;
    -  int size, i;
    -
    -  SETreturnsize_(set, size);
    -  if (size > set->maxsize)
    -    return -1;
    -  elem= SETaddr_(set, void);
    -  for (i=0; i < size; i++) {
    -    if (*elem++ == atelem)
    -      return i;
    -  }
    -  return -1;
    -} /* setindex */
    -
    -
    -/*---------------------------------
    -
    -  qh_setlarger( oldsetp )
    -    returns a larger set that contains all elements of *oldsetp
    -
    -  notes:
    -    the set is at least twice as large
    -    if temp set, updates qhmem.tempstack
    -
    -  design:
    -    creates a new set
    -    copies the old set to the new set
    -    updates pointers in tempstack
    -    deletes the old set
    -*/
    -void qh_setlarger(setT **oldsetp) {
    -  int size= 1, *sizep;
    -  setT *newset, *set, **setp, *oldset;
    -  void **oldp, **newp;
    -
    -  if (*oldsetp) {
    -    oldset= *oldsetp;
    -    SETreturnsize_(oldset, size);
    -    qhmem.cntlarger++;
    -    qhmem.totlarger += size+1;
    -    newset= qh_setnew(2 * size);
    -    oldp= SETaddr_(oldset, void);
    -    newp= SETaddr_(newset, void);
    -    memcpy((char *)newp, (char *)oldp, (size_t)(size+1) * SETelemsize);
    -    sizep= SETsizeaddr_(newset);
    -    *sizep= size+1;
    -    FOREACHset_(qhmem.tempstack) {
    -      if (set == oldset)
    -        *(setp-1)= newset;
    -    }
    -    qh_setfree(oldsetp);
    -  }else
    -    newset= qh_setnew(3);
    -  *oldsetp= newset;
    -} /* setlarger */
    -
    -
    -/*---------------------------------
    -
    -  qh_setlast(  )
    -    return last element of set or NULL (use type conversion)
    -
    -  notes:
    -    set may be NULL
    -
    -  design:
    -    return last element
    -*/
    -void *qh_setlast(setT *set) {
    -  int size;
    -
    -  if (set) {
    -    size= *SETsizeaddr_(set);
    -    if (!size)
    -      return SETelem_(set, set->maxsize - 1);
    -    else if (size > 1)
    -      return SETelem_(set, size - 2);
    -  }
    -  return NULL;
    -} /* setlast */
    -
    -
    -/*---------------------------------
    -
    -  qh_setnew( setsize )
    -    creates and allocates space for a set
    -
    -  notes:
    -    setsize means the number of elements (!including the NULL terminator)
    -    use qh_settemp/qh_setfreetemp if set is temporary
    -
    -  design:
    -    allocate memory for set
    -    roundup memory if small set
    -    initialize as empty set
    -*/
    -setT *qh_setnew(int setsize) {
    -  setT *set;
    -  int sizereceived; /* used !qh_NOmem */
    -  int size;
    -  void **freelistp; /* used !qh_NOmem */
    -
    -  if (!setsize)
    -    setsize++;
    -  size= sizeof(setT) + setsize * SETelemsize;
    -  if (size <= qhmem.LASTsize) {
    -    qh_memalloc_(size, freelistp, set, setT);
    -#ifndef qh_NOmem
    -    sizereceived= qhmem.sizetable[ qhmem.indextable[size]];
    -    if (sizereceived > size)
    -      setsize += (sizereceived - size)/SETelemsize;
    -#endif
    -  }else
    -    set= (setT*)qh_memalloc(size);
    -  set->maxsize= setsize;
    -  set->e[setsize].i= 1;
    -  set->e[0].p= NULL;
    -  return(set);
    -} /* setnew */
    -
    -
    -/*---------------------------------
    -
    -  qh_setnew_delnthsorted( set, size, nth, prepend )
    -    creates a sorted set not containing nth element
    -    if prepend, the first prepend elements are undefined
    -
    -  notes:
    -    set must be defined
    -    checks nth
    -    see also: setdelnthsorted
    -
    -  design:
    -    create new set
    -    setup pointers and allocate room for prepend'ed entries
    -    append head of old set to new set
    -    append tail of old set to new set
    -*/
    -setT *qh_setnew_delnthsorted(setT *set, int size, int nth, int prepend) {
    -  setT *newset;
    -  void **oldp, **newp;
    -  int tailsize= size - nth -1, newsize;
    -
    -  if (tailsize < 0) {
    -    qh_fprintf(qhmem.ferr, 6176, "qhull internal error (qh_setaddnth): nth %d is out-of-bounds for set:\n", nth);
    -    qh_setprint(qhmem.ferr, "", set);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  newsize= size-1 + prepend;
    -  newset= qh_setnew(newsize);
    -  newset->e[newset->maxsize].i= newsize+1;  /* may be overwritten */
    -  oldp= SETaddr_(set, void);
    -  newp= SETaddr_(newset, void) + prepend;
    -  switch (nth) {
    -  case 0:
    -    break;
    -  case 1:
    -    *(newp++)= *oldp++;
    -    break;
    -  case 2:
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    break;
    -  case 3:
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    break;
    -  case 4:
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    break;
    -  default:
    -    memcpy((char *)newp, (char *)oldp, (size_t)nth * SETelemsize);
    -    newp += nth;
    -    oldp += nth;
    -    break;
    -  }
    -  oldp++;
    -  switch (tailsize) {
    -  case 0:
    -    break;
    -  case 1:
    -    *(newp++)= *oldp++;
    -    break;
    -  case 2:
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    break;
    -  case 3:
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    break;
    -  case 4:
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    *(newp++)= *oldp++;
    -    break;
    -  default:
    -    memcpy((char *)newp, (char *)oldp, (size_t)tailsize * SETelemsize);
    -    newp += tailsize;
    -  }
    -  *newp= NULL;
    -  return(newset);
    -} /* setnew_delnthsorted */
    -
    -
    -/*---------------------------------
    -
    -  qh_setprint( fp, string, set )
    -    print set elements to fp with identifying string
    -
    -  notes:
    -    never errors
    -*/
    -void qh_setprint(FILE *fp, const char* string, setT *set) {
    -  int size, k;
    -
    -  if (!set)
    -    qh_fprintf(fp, 9346, "%s set is null\n", string);
    -  else {
    -    SETreturnsize_(set, size);
    -    qh_fprintf(fp, 9347, "%s set=%p maxsize=%d size=%d elems=",
    -             string, set, set->maxsize, size);
    -    if (size > set->maxsize)
    -      size= set->maxsize+1;
    -    for (k=0; k < size; k++)
    -      qh_fprintf(fp, 9348, " %p", set->e[k].p);
    -    qh_fprintf(fp, 9349, "\n");
    -  }
    -} /* setprint */
    -
    -/*---------------------------------
    -
    -  qh_setreplace( set, oldelem, newelem )
    -    replaces oldelem in set with newelem
    -
    -  notes:
    -    errors if oldelem not in the set
    -    newelem may be NULL, but it turns the set into an indexed set (no FOREACH)
    -
    -  design:
    -    find oldelem
    -    replace with newelem
    -*/
    -void qh_setreplace(setT *set, void *oldelem, void *newelem) {
    -  void **elemp;
    -
    -  elemp= SETaddr_(set, void);
    -  while (*elemp != oldelem && *elemp)
    -    elemp++;
    -  if (*elemp)
    -    *elemp= newelem;
    -  else {
    -    qh_fprintf(qhmem.ferr, 6177, "qhull internal error (qh_setreplace): elem %p not found in set\n",
    -       oldelem);
    -    qh_setprint(qhmem.ferr, "", set);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -} /* setreplace */
    -
    -
    -/*---------------------------------
    -
    -  qh_setsize( set )
    -    returns the size of a set
    -
    -  notes:
    -    errors if set's maxsize is incorrect
    -    same as SETreturnsize_(set)
    -    same code for qh_setsize [qset.c] and QhullSetBase::count
    -
    -  design:
    -    determine actual size of set from maxsize
    -*/
    -int qh_setsize(setT *set) {
    -  int size, *sizep;
    -
    -  if (!set)
    -    return(0);
    -  sizep= SETsizeaddr_(set);
    -  if ((size= *sizep)) {
    -    size--;
    -    if (size > set->maxsize) {
    -      qh_fprintf(qhmem.ferr, 6178, "qhull internal error (qh_setsize): current set size %d is greater than maximum size %d\n",
    -               size, set->maxsize);
    -      qh_setprint(qhmem.ferr, "set: ", set);
    -      qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -    }
    -  }else
    -    size= set->maxsize;
    -  return size;
    -} /* setsize */
    -
    -/*---------------------------------
    -
    -  qh_settemp( setsize )
    -    return a stacked, temporary set of upto setsize elements
    -
    -  notes:
    -    use settempfree or settempfree_all to release from qhmem.tempstack
    -    see also qh_setnew
    -
    -  design:
    -    allocate set
    -    append to qhmem.tempstack
    -
    -*/
    -setT *qh_settemp(int setsize) {
    -  setT *newset;
    -
    -  newset= qh_setnew(setsize);
    -  qh_setappend(&qhmem.tempstack, newset);
    -  if (qhmem.IStracing >= 5)
    -    qh_fprintf(qhmem.ferr, 8123, "qh_settemp: temp set %p of %d elements, depth %d\n",
    -       newset, newset->maxsize, qh_setsize(qhmem.tempstack));
    -  return newset;
    -} /* settemp */
    -
    -/*---------------------------------
    -
    -  qh_settempfree( set )
    -    free temporary set at top of qhmem.tempstack
    -
    -  notes:
    -    nop if set is NULL
    -    errors if set not from previous   qh_settemp
    -
    -  to locate errors:
    -    use 'T2' to find source and then find mis-matching qh_settemp
    -
    -  design:
    -    check top of qhmem.tempstack
    -    free it
    -*/
    -void qh_settempfree(setT **set) {
    -  setT *stackedset;
    -
    -  if (!*set)
    -    return;
    -  stackedset= qh_settemppop();
    -  if (stackedset != *set) {
    -    qh_settemppush(stackedset);
    -    qh_fprintf(qhmem.ferr, 6179, "qhull internal error (qh_settempfree): set %p(size %d) was not last temporary allocated(depth %d, set %p, size %d)\n",
    -             *set, qh_setsize(*set), qh_setsize(qhmem.tempstack)+1,
    -             stackedset, qh_setsize(stackedset));
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  qh_setfree(set);
    -} /* settempfree */
    -
    -/*---------------------------------
    -
    -  qh_settempfree_all(  )
    -    free all temporary sets in qhmem.tempstack
    -
    -  design:
    -    for each set in tempstack
    -      free set
    -    free qhmem.tempstack
    -*/
    -void qh_settempfree_all(void) {
    -  setT *set, **setp;
    -
    -  FOREACHset_(qhmem.tempstack)
    -    qh_setfree(&set);
    -  qh_setfree(&qhmem.tempstack);
    -} /* settempfree_all */
    -
    -/*---------------------------------
    -
    -  qh_settemppop(  )
    -    pop and return temporary set from qhmem.tempstack
    -
    -  notes:
    -    the returned set is permanent
    -
    -  design:
    -    pop and check top of qhmem.tempstack
    -*/
    -setT *qh_settemppop(void) {
    -  setT *stackedset;
    -
    -  stackedset= (setT*)qh_setdellast(qhmem.tempstack);
    -  if (!stackedset) {
    -    qh_fprintf(qhmem.ferr, 6180, "qhull internal error (qh_settemppop): pop from empty temporary stack\n");
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  if (qhmem.IStracing >= 5)
    -    qh_fprintf(qhmem.ferr, 8124, "qh_settemppop: depth %d temp set %p of %d elements\n",
    -       qh_setsize(qhmem.tempstack)+1, stackedset, qh_setsize(stackedset));
    -  return stackedset;
    -} /* settemppop */
    -
    -/*---------------------------------
    -
    -  qh_settemppush( set )
    -    push temporary set unto qhmem.tempstack (makes it temporary)
    -
    -  notes:
    -    duplicates settemp() for tracing
    -
    -  design:
    -    append set to tempstack
    -*/
    -void qh_settemppush(setT *set) {
    -
    -  qh_setappend(&qhmem.tempstack, set);
    -  if (qhmem.IStracing >= 5)
    -    qh_fprintf(qhmem.ferr, 8125, "qh_settemppush: depth %d temp set %p of %d elements\n",
    -      qh_setsize(qhmem.tempstack), set, qh_setsize(set));
    -} /* settemppush */
    -
    -
    -/*---------------------------------
    -
    -  qh_settruncate( set, size )
    -    truncate set to size elements
    -
    -  notes:
    -    set must be defined
    -
    -  see:
    -    SETtruncate_
    -
    -  design:
    -    check size
    -    update actual size of set
    -*/
    -void qh_settruncate(setT *set, int size) {
    -
    -  if (size < 0 || size > set->maxsize) {
    -    qh_fprintf(qhmem.ferr, 6181, "qhull internal error (qh_settruncate): size %d out of bounds for set:\n", size);
    -    qh_setprint(qhmem.ferr, "", set);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  set->e[set->maxsize].i= size+1;   /* maybe overwritten */
    -  set->e[size].p= NULL;
    -} /* settruncate */
    -
    -/*---------------------------------
    -
    -  qh_setunique( set, elem )
    -    add elem to unsorted set unless it is already in set
    -
    -  notes:
    -    returns 1 if it is appended
    -
    -  design:
    -    if elem not in set
    -      append elem to set
    -*/
    -int qh_setunique(setT **set, void *elem) {
    -
    -  if (!qh_setin(*set, elem)) {
    -    qh_setappend(set, elem);
    -    return 1;
    -  }
    -  return 0;
    -} /* setunique */
    -
    -/*---------------------------------
    -
    -  qh_setzero( set, index, size )
    -    zero elements from index on
    -    set actual size of set to size
    -
    -  notes:
    -    set must be defined
    -    the set becomes an indexed set (can not use FOREACH...)
    -
    -  see also:
    -    qh_settruncate
    -
    -  design:
    -    check index and size
    -    update actual size
    -    zero elements starting at e[index]
    -*/
    -void qh_setzero(setT *set, int idx, int size) {
    -  int count;
    -
    -  if (idx < 0 || idx >= size || size > set->maxsize) {
    -    qh_fprintf(qhmem.ferr, 6182, "qhull internal error (qh_setzero): index %d or size %d out of bounds for set:\n", idx, size);
    -    qh_setprint(qhmem.ferr, "", set);
    -    qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -  }
    -  set->e[set->maxsize].i=  size+1;  /* may be overwritten */
    -  count= size - idx + 1;   /* +1 for NULL terminator */
    -  memset((char *)SETelemaddr_(set, idx, void), 0, (size_t)count * SETelemsize);
    -} /* setzero */
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/qset.h b/scipy-0.10.1/scipy/spatial/qhull/src/qset.h
    deleted file mode 100644
    index 611040bc2c..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/qset.h
    +++ /dev/null
    @@ -1,484 +0,0 @@
    -/*
      ---------------------------------
    -
    -   qset.h
    -     header file for qset.c that implements set
    -
    -   see qh-set.htm and qset.c
    -
    -   only uses mem.c, malloc/free
    -
    -   for error handling, writes message and calls
    -      qh_errexit(qhmem_ERRqhull, NULL, NULL);
    -
    -   set operations satisfy the following properties:
    -    - sets have a max size, the actual size (if different) is stored at the end
    -    - every set is NULL terminated
    -    - sets may be sorted or unsorted, the caller must distinguish this
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/qset.h#20 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#ifndef qhDEFset
    -#define qhDEFset 1
    -
    -#include 
    -
    -/*================= -structures- ===============*/
    -
    -#ifndef DEFsetT
    -#define DEFsetT 1
    -typedef struct setT setT;   /* a set is a sorted or unsorted array of pointers */
    -#endif
    -
    -/*------------------------------------------
    -
    -setT
    -  a set or list of pointers with maximum size and actual size.
    -
    -variations:
    -  unsorted, unique   -- a list of unique pointers with NULL terminator
    -                           user guarantees uniqueness
    -  sorted             -- a sorted list of unique pointers with NULL terminator
    -                           qset.c guarantees uniqueness
    -  unsorted           -- a list of pointers terminated with NULL
    -  indexed            -- an array of pointers with NULL elements
    -
    -structure for set of n elements:
    -
    -        --------------
    -        |  maxsize
    -        --------------
    -        |  e[0] - a pointer, may be NULL for indexed sets
    -        --------------
    -        |  e[1]
    -
    -        --------------
    -        |  ...
    -        --------------
    -        |  e[n-1]
    -        --------------
    -        |  e[n] = NULL
    -        --------------
    -        |  ...
    -        --------------
    -        |  e[maxsize] - n+1 or NULL (determines actual size of set)
    -        --------------
    -
    -*/
    -
    -/*-- setelemT -- internal type to allow both pointers and indices
    -*/
    -typedef union setelemT setelemT;
    -union setelemT {
    -  void    *p;
    -  int      i;         /* integer used for e[maxSize] */
    -};
    -
    -struct setT {
    -  int maxsize;          /* maximum number of elements (except NULL) */
    -  setelemT e[1];        /* array of pointers, tail is NULL */
    -                        /* last slot (unless NULL) is actual size+1
    -                           e[maxsize]==NULL or e[e[maxsize]-1]==NULL */
    -                        /* this may generate a warning since e[] contains
    -                           maxsize elements */
    -};
    -
    -/*=========== -constants- =========================*/
    -
    -/*-------------------------------------
    -
    -  SETelemsize
    -    size of a set element in bytes
    -*/
    -#define SETelemsize ((int)sizeof(setelemT))
    -
    -
    -/*=========== -macros- =========================*/
    -
    -/*-------------------------------------
    -
    -   FOREACHsetelement_(type, set, variable)
    -     define FOREACH iterator
    -
    -   declare:
    -     assumes *variable and **variablep are declared
    -     no space in "variable)" [DEC Alpha cc compiler]
    -
    -   each iteration:
    -     variable is set element
    -     variablep is one beyond variable.
    -
    -   to repeat an element:
    -     variablep--; / *repeat* /
    -
    -   at exit:
    -     variable is NULL at end of loop
    -
    -   example:
    -     #define FOREACHfacet_( facets ) FOREACHsetelement_( facetT, facets, facet )
    -
    -   notes:
    -     use FOREACHsetelement_i_() if need index or include NULLs
    -
    -   WARNING:
    -     nested loops can't use the same variable (define another FOREACH)
    -
    -     needs braces if nested inside another FOREACH
    -     this includes intervening blocks, e.g. FOREACH...{ if () FOREACH...} )
    -*/
    -#define FOREACHsetelement_(type, set, variable) \
    -        if (((variable= NULL), set)) for (\
    -          variable##p= (type **)&((set)->e[0].p); \
    -          (variable= *variable##p++);)
    -
    -/*------------------------------------------
    -
    -   FOREACHsetelement_i_(type, set, variable)
    -     define indexed FOREACH iterator
    -
    -   declare:
    -     type *variable, variable_n, variable_i;
    -
    -   each iteration:
    -     variable is set element, may be NULL
    -     variable_i is index, variable_n is qh_setsize()
    -
    -   to repeat an element:
    -     variable_i--; variable_n-- repeats for deleted element
    -
    -   at exit:
    -     variable==NULL and variable_i==variable_n
    -
    -   example:
    -     #define FOREACHfacet_i_( facets ) FOREACHsetelement_i_( facetT, facets, facet )
    -
    -   WARNING:
    -     nested loops can't use the same variable (define another FOREACH)
    -
    -     needs braces if nested inside another FOREACH
    -     this includes intervening blocks, e.g. FOREACH...{ if () FOREACH...} )
    -*/
    -#define FOREACHsetelement_i_(type, set, variable) \
    -        if (((variable= NULL), set)) for (\
    -          variable##_i= 0, variable= (type *)((set)->e[0].p), \
    -                   variable##_n= qh_setsize(set);\
    -          variable##_i < variable##_n;\
    -          variable= (type *)((set)->e[++variable##_i].p) )
    -
    -/*----------------------------------------
    -
    -   FOREACHsetelementreverse_(type, set, variable)-
    -     define FOREACH iterator in reverse order
    -
    -   declare:
    -     assumes *variable and **variablep are declared
    -     also declare 'int variabletemp'
    -
    -   each iteration:
    -     variable is set element
    -
    -   to repeat an element:
    -     variabletemp++; / *repeat* /
    -
    -   at exit:
    -     variable is NULL
    -
    -   example:
    -     #define FOREACHvertexreverse_( vertices ) FOREACHsetelementreverse_( vertexT, vertices, vertex )
    -
    -   notes:
    -     use FOREACHsetelementreverse12_() to reverse first two elements
    -     WARNING: needs braces if nested inside another FOREACH
    -*/
    -#define FOREACHsetelementreverse_(type, set, variable) \
    -        if (((variable= NULL), set)) for (\
    -           variable##temp= qh_setsize(set)-1, variable= qh_setlast(set);\
    -           variable; variable= \
    -           ((--variable##temp >= 0) ? SETelemt_(set, variable##temp, type) : NULL))
    -
    -/*-------------------------------------
    -
    -   FOREACHsetelementreverse12_(type, set, variable)-
    -     define FOREACH iterator with e[1] and e[0] reversed
    -
    -   declare:
    -     assumes *variable and **variablep are declared
    -
    -   each iteration:
    -     variable is set element
    -     variablep is one after variable.
    -
    -   to repeat an element:
    -     variablep--; / *repeat* /
    -
    -   at exit:
    -     variable is NULL at end of loop
    -
    -   example
    -     #define FOREACHvertexreverse12_( vertices ) FOREACHsetelementreverse12_( vertexT, vertices, vertex )
    -
    -   notes:
    -     WARNING: needs braces if nested inside another FOREACH
    -*/
    -#define FOREACHsetelementreverse12_(type, set, variable) \
    -        if (((variable= NULL), set)) for (\
    -          variable##p= (type **)&((set)->e[1].p); \
    -          (variable= *variable##p); \
    -          variable##p == ((type **)&((set)->e[0].p))?variable##p += 2: \
    -              (variable##p == ((type **)&((set)->e[1].p))?variable##p--:variable##p++))
    -
    -/*-------------------------------------
    -
    -   FOREACHelem_( set )-
    -     iterate elements in a set
    -
    -   declare:
    -     void *elem, *elemp;
    -
    -   each iteration:
    -     elem is set element
    -     elemp is one beyond
    -
    -   to repeat an element:
    -     elemp--; / *repeat* /
    -
    -   at exit:
    -     elem == NULL at end of loop
    -
    -   example:
    -     FOREACHelem_(set) {
    -
    -   notes:
    -     WARNING: needs braces if nested inside another FOREACH
    -*/
    -#define FOREACHelem_(set) FOREACHsetelement_(void, set, elem)
    -
    -/*-------------------------------------
    -
    -   FOREACHset_( set )-
    -     iterate a set of sets
    -
    -   declare:
    -     setT *set, **setp;
    -
    -   each iteration:
    -     set is set element
    -     setp is one beyond
    -
    -   to repeat an element:
    -     setp--; / *repeat* /
    -
    -   at exit:
    -     set == NULL at end of loop
    -
    -   example
    -     FOREACHset_(sets) {
    -
    -   notes:
    -     WARNING: needs braces if nested inside another FOREACH
    -*/
    -#define FOREACHset_(sets) FOREACHsetelement_(setT, sets, set)
    -
    -/*-------------------------------------------
    -
    -   SETindex_( set, elem )
    -     return index of elem in set
    -
    -   notes:
    -     for use with FOREACH iteration
    -     WARN64 -- Maximum set size is 2G
    -
    -   example:
    -     i= SETindex_(ridges, ridge)
    -*/
    -#define SETindex_(set, elem) ((int)((void **)elem##p - (void **)&(set)->e[1].p))
    -
    -/*-----------------------------------------
    -
    -   SETref_( elem )
    -     l.h.s. for modifying the current element in a FOREACH iteration
    -
    -   example:
    -     SETref_(ridge)= anotherridge;
    -*/
    -#define SETref_(elem) (elem##p[-1])
    -
    -/*-----------------------------------------
    -
    -   SETelem_(set, n)
    -     return the n'th element of set
    -
    -   notes:
    -      assumes that n is valid [0..size] and that set is defined
    -      use SETelemt_() for type cast
    -*/
    -#define SETelem_(set, n)           ((set)->e[n].p)
    -
    -/*-----------------------------------------
    -
    -   SETelemt_(set, n, type)
    -     return the n'th element of set as a type
    -
    -   notes:
    -      assumes that n is valid [0..size] and that set is defined
    -*/
    -#define SETelemt_(set, n, type)    ((type*)((set)->e[n].p))
    -
    -/*-----------------------------------------
    -
    -   SETelemaddr_(set, n, type)
    -     return address of the n'th element of a set
    -
    -   notes:
    -      assumes that n is valid [0..size] and set is defined
    -*/
    -#define SETelemaddr_(set, n, type) ((type **)(&((set)->e[n].p)))
    -
    -/*-----------------------------------------
    -
    -   SETfirst_(set)
    -     return first element of set
    -
    -*/
    -#define SETfirst_(set)             ((set)->e[0].p)
    -
    -/*-----------------------------------------
    -
    -   SETfirstt_(set, type)
    -     return first element of set as a type
    -
    -*/
    -#define SETfirstt_(set, type)      ((type*)((set)->e[0].p))
    -
    -/*-----------------------------------------
    -
    -   SETsecond_(set)
    -     return second element of set
    -
    -*/
    -#define SETsecond_(set)            ((set)->e[1].p)
    -
    -/*-----------------------------------------
    -
    -   SETsecondt_(set, type)
    -     return second element of set as a type
    -*/
    -#define SETsecondt_(set, type)     ((type*)((set)->e[1].p))
    -
    -/*-----------------------------------------
    -
    -   SETaddr_(set, type)
    -       return address of set's elements
    -*/
    -#define SETaddr_(set,type)         ((type **)(&((set)->e[0].p)))
    -
    -/*-----------------------------------------
    -
    -   SETreturnsize_(set, size)
    -     return size of a set
    -
    -   notes:
    -      set must be defined
    -      use qh_setsize(set) unless speed is critical
    -*/
    -#define SETreturnsize_(set, size) (((size)= ((set)->e[(set)->maxsize].i))?(--(size)):((size)= (set)->maxsize))
    -
    -/*-----------------------------------------
    -
    -   SETempty_(set)
    -     return true(1) if set is empty
    -
    -   notes:
    -      set may be NULL
    -*/
    -#define SETempty_(set)            (!set || (SETfirst_(set) ? 0 : 1))
    -
    -/*---------------------------------
    -
    -  SETsizeaddr_(set)
    -    return pointer to 'actual size+1' of set (set CANNOT be NULL!!)
    -
    -  notes:
    -    *SETsizeaddr==NULL or e[*SETsizeaddr-1].p==NULL
    -*/
    -#define SETsizeaddr_(set) (&((set)->e[(set)->maxsize].i))
    -
    -/*-----------------------------------------
    -
    -   SETtruncate_(set)
    -     return first element of set
    -
    -   see:
    -     qh_settruncate()
    -
    -*/
    -#define SETtruncate_(set, size) {set->e[set->maxsize].i= size+1; /* maybe overwritten */ \
    -      set->e[size].p= NULL;}
    -
    -/*======= prototypes in alphabetical order ============*/
    -
    -void  qh_setaddsorted(setT **setp, void *elem);
    -void  qh_setaddnth(setT **setp, int nth, void *newelem);
    -void  qh_setappend(setT **setp, void *elem);
    -void  qh_setappend_set(setT **setp, setT *setA);
    -void  qh_setappend2ndlast(setT **setp, void *elem);
    -void  qh_setcheck(setT *set, const char *tname, unsigned id);
    -void  qh_setcompact(setT *set);
    -setT *qh_setcopy(setT *set, int extra);
    -void *qh_setdel(setT *set, void *elem);
    -void *qh_setdellast(setT *set);
    -void *qh_setdelnth(setT *set, int nth);
    -void *qh_setdelnthsorted(setT *set, int nth);
    -void *qh_setdelsorted(setT *set, void *newelem);
    -setT *qh_setduplicate( setT *set, int elemsize);
    -int   qh_setequal(setT *setA, setT *setB);
    -int   qh_setequal_except(setT *setA, void *skipelemA, setT *setB, void *skipelemB);
    -int   qh_setequal_skip(setT *setA, int skipA, setT *setB, int skipB);
    -void  qh_setfree(setT **set);
    -void  qh_setfree2( setT **setp, int elemsize);
    -void  qh_setfreelong(setT **set);
    -int   qh_setin(setT *set, void *setelem);
    -int   qh_setindex(setT *set, void *setelem);
    -void  qh_setlarger(setT **setp);
    -void *qh_setlast(setT *set);
    -setT *qh_setnew(int size);
    -setT *qh_setnew_delnthsorted(setT *set, int size, int nth, int prepend);
    -void  qh_setprint(FILE *fp, const char* string, setT *set);
    -void  qh_setreplace(setT *set, void *oldelem, void *newelem);
    -int   qh_setsize(setT *set);
    -setT *qh_settemp(int setsize);
    -void  qh_settempfree(setT **set);
    -void  qh_settempfree_all(void);
    -setT *qh_settemppop(void);
    -void  qh_settemppush(setT *set);
    -void  qh_settruncate(setT *set, int size);
    -int   qh_setunique(setT **set, void *elem);
    -void  qh_setzero(setT *set, int idx, int size);
    -
    -
    -#endif /* qhDEFset */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/random.c b/scipy-0.10.1/scipy/spatial/qhull/src/random.c
    deleted file mode 100644
    index 30a67e9462..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/random.c
    +++ /dev/null
    @@ -1,243 +0,0 @@
    -/*
      ---------------------------------
    -
    -   random.c -- utilities
    -     Park & Miller's minimimal standard random number generator
    -     argc/argv conversion
    -*/
    -
    -#include "libqhull.h"
    -#include 
    -#include 
    -#include 
    -
    -#ifdef _MSC_VER  /* Microsoft Visual C++ -- warning level 4 */
    -#pragma warning( disable : 4706)  /* assignment within conditional function */
    -#pragma warning( disable : 4996)  /* function was declared deprecated(strcpy, localtime, etc.) */
    -#endif
    -
    -/*---------------------------------
    -
    - qh_argv_to_command( argc, argv, command, max_size )
    -
    -    build command from argc/argv
    -    max_size is at least
    -
    - returns:
    -    a space-delimited string of options (just as typed)
    -    returns false if max_size is too short
    -
    - notes:
    -    silently removes
    -    makes option string easy to input and output
    -    matches qh_argv_to_command_size()
    -
    -    argc may be 0
    -*/
    -int qh_argv_to_command(int argc, char *argv[], char* command, int max_size) {
    -  int i, remaining;
    -  char *s;
    -  *command= '\0';  /* max_size > 0 */
    -
    -  if (argc) {
    -    if ((s= strrchr( argv[0], '\\')) /* get filename w/o .exe extension */
    -    || (s= strrchr( argv[0], '/')))
    -        s++;
    -    else
    -        s= argv[0];
    -    if ((int)strlen(s) < max_size)   /* WARN64 */
    -        strcpy(command, s);
    -    else
    -        goto error_argv;
    -    if ((s= strstr(command, ".EXE"))
    -    ||  (s= strstr(command, ".exe")))
    -        *s= '\0';
    -  }
    -  for (i=1; i < argc; i++) {
    -    s= argv[i];
    -    remaining= max_size - (int)strlen(command) - (int)strlen(s) - 2;   /* WARN64 */
    -    if (!*s || strchr(s, ' ')) {
    -      char *t= command + strlen(command);
    -      remaining -= 2;
    -      if (remaining < 0) {
    -        goto error_argv;
    -      }
    -      *t++= ' ';
    -      *t++= '"';
    -      while (*s) {
    -        if (*s == '"') {
    -          if (--remaining < 0)
    -            goto error_argv;
    -          *t++= '\\';
    -        }
    -        *t++= *s++;
    -      }
    -      *t++= '"';
    -      *t= '\0';
    -    }else if (remaining < 0) {
    -      goto error_argv;
    -    }else
    -      strcat(command, " ");
    -      strcat(command, s);
    -  }
    -  return 1;
    -
    -error_argv:
    -  qh_fprintf(qh ferr, 6033, "qhull input error: more than %d characters in command line\n",
    -      max_size);
    -  return 0;
    -} /* argv_to_command */
    -
    -/*---------------------------------
    -
    -qh_argv_to_command_size( argc, argv )
    -
    -    return size to allocate for qh_argv_to_command()
    -
    -notes:
    -    argc may be 0
    -    actual size is usually shorter
    -*/
    -int qh_argv_to_command_size(int argc, char *argv[]) {
    -    unsigned int count= 1; /* null-terminator if argc==0 */
    -    int i;
    -    char *s;
    -
    -    for (i=0; i0 && strchr(argv[i], ' ')) {
    -        count += 2;  /* quote delimiters */
    -        for (s=argv[i]; *s; s++) {
    -          if (*s == '"') {
    -            count++;
    -          }
    -        }
    -      }
    -    }
    -    return count;
    -} /* argv_to_command_size */
    -
    -/*---------------------------------
    -
    -  qh_rand()
    -  qh_srand( seed )
    -    generate pseudo-random number between 1 and 2^31 -2
    -
    -  notes:
    -    For qhull and rbox, called from qh_RANDOMint(),etc. [user.h]
    -
    -    From Park & Miller's minimal standard random number generator
    -      Communications of the ACM, 31:1192-1201, 1988.
    -    Does not use 0 or 2^31 -1
    -      this is silently enforced by qh_srand()
    -    Can make 'Rn' much faster by moving qh_rand to qh_distplane
    -*/
    -
    -/* Global variables and constants */
    -
    -int qh_rand_seed= 1;  /* define as global variable instead of using qh */
    -
    -#define qh_rand_a 16807
    -#define qh_rand_m 2147483647
    -#define qh_rand_q 127773  /* m div a */
    -#define qh_rand_r 2836    /* m mod a */
    -
    -int qh_rand( void) {
    -    int lo, hi, test;
    -    int seed = qh_rand_seed;
    -
    -    hi = seed / qh_rand_q;  /* seed div q */
    -    lo = seed % qh_rand_q;  /* seed mod q */
    -    test = qh_rand_a * lo - qh_rand_r * hi;
    -    if (test > 0)
    -        seed= test;
    -    else
    -        seed= test + qh_rand_m;
    -    qh_rand_seed= seed;
    -    /* seed = seed < qh_RANDOMmax/2 ? 0 : qh_RANDOMmax;  for testing */
    -    /* seed = qh_RANDOMmax;  for testing */
    -    return seed;
    -} /* rand */
    -
    -void qh_srand( int seed) {
    -    if (seed < 1)
    -        qh_rand_seed= 1;
    -    else if (seed >= qh_rand_m)
    -        qh_rand_seed= qh_rand_m - 1;
    -    else
    -        qh_rand_seed= seed;
    -} /* qh_srand */
    -
    -/*---------------------------------
    -
    -qh_randomfactor( scale, offset )
    -return a random factor r * scale + offset
    -
    -notes:
    -qh.RANDOMa/b are defined in global.c
    -*/
    -realT qh_randomfactor(realT scale, realT offset) {
    -    realT randr;
    -
    -    randr= qh_RANDOMint;
    -    return randr * scale + offset;
    -} /* randomfactor */
    -
    -/*---------------------------------
    -
    -qh_randommatrix( buffer, dim, rows )
    -generate a random dim X dim matrix in range [-1,1]
    -assumes buffer is [dim+1, dim]
    -
    -returns:
    -sets buffer to random numbers
    -sets rows to rows of buffer
    -sets row[dim] as scratch row
    -*/
    -void qh_randommatrix(realT *buffer, int dim, realT **rows) {
    -    int i, k;
    -    realT **rowi, *coord, realr;
    -
    -    coord= buffer;
    -    rowi= rows;
    -    for (i=0; i < dim; i++) {
    -        *(rowi++)= coord;
    -        for (k=0; k < dim; k++) {
    -            realr= qh_RANDOMint;
    -            *(coord++)= 2.0 * realr/(qh_RANDOMmax+1) - 1.0;
    -        }
    -    }
    -    *rowi= coord;
    -} /* randommatrix */
    -
    -/*---------------------------------
    -
    -  qh_strtol( s, endp) qh_strtod( s, endp)
    -    internal versions of strtol() and strtod()
    -    does not skip trailing spaces
    -  notes:
    -    some implementations of strtol()/strtod() skip trailing spaces
    -*/
    -double qh_strtod(const char *s, char **endp) {
    -  double result;
    -
    -  result= strtod(s, endp);
    -  if (s < (*endp) && (*endp)[-1] == ' ')
    -    (*endp)--;
    -  return result;
    -} /* strtod */
    -
    -int qh_strtol(const char *s, char **endp) {
    -  int result;
    -
    -  result= (int) strtol(s, endp, 10);     /* WARN64 */
    -  if (s< (*endp) && (*endp)[-1] == ' ')
    -    (*endp)--;
    -  return result;
    -} /* strtol */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/random.h b/scipy-0.10.1/scipy/spatial/qhull/src/random.h
    deleted file mode 100644
    index 5a0ecd5f86..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/random.h
    +++ /dev/null
    @@ -1,34 +0,0 @@
    -/*
      ---------------------------------
    -
    -  random.h
    -    header file for random routines
    -
    -   see qh-geom.htm and random.c
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/random.h#11 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#ifndef qhDEFrandom
    -#define qhDEFrandom 1
    -
    -#include "libqhull.h"
    -
    -/*============= prototypes in alphabetical order ======= */
    -
    -
    -int     qh_argv_to_command(int argc, char *argv[], char* command, int max_size);
    -int     qh_argv_to_command_size(int argc, char *argv[]);
    -int     qh_rand( void);
    -void    qh_srand( int seed);
    -realT   qh_randomfactor(realT scale, realT offset);
    -void    qh_randommatrix(realT *buffer, int dim, realT **row);
    -int     qh_strtol(const char *s, char **endp);
    -double  qh_strtod(const char *s, char **endp);
    -
    -#endif /* qhDEFrandom */
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/rboxlib.c b/scipy-0.10.1/scipy/spatial/qhull/src/rboxlib.c
    deleted file mode 100644
    index 4f11d22727..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/rboxlib.c
    +++ /dev/null
    @@ -1,794 +0,0 @@
    -/*
      ---------------------------------
    -
    -   rboxlib.c
    -     Generate input points
    -
    -   notes:
    -     For documentation, see prompt[] of rbox.c
    -     50 points generated for 'rbox D4'
    -
    -   WARNING:
    -     incorrect range if qh_RANDOMmax is defined wrong (user.h)
    -*/
    -
    -#include "random.h"
    -#include "libqhull.h"
    -
    -#include 
    -#include 
    -#include 
    -#include 
    -#include 
    -#include 
    -#include 
    -#include 
    -
    -#ifdef _MSC_VER  /* Microsoft Visual C++ */
    -#pragma warning( disable : 4706)  /* assignment within conditional expression. */
    -#pragma warning( disable : 4996)  /* this function (strncat) or variable may be unsafe. */
    -#endif
    -
    -#define MAXdim 200
    -#define PI 3.1415926535897932384
    -
    -/* ------------------------------ prototypes ----------------*/
    -int roundi( double a);
    -void out1( double a);
    -void out2n( double a, double b);
    -void out3n( double a, double b, double c);
    -
    -void    qh_fprintf_rbox(FILE *fp, int msgcode, const char *fmt, ... );
    -void    qh_free(void *mem);
    -void   *qh_malloc(size_t size);
    -int     qh_rand( void);
    -void    qh_srand( int seed);
    -
    -
    -/* ------------------------------ globals -------------------*/
    -
    -/* No state is carried between rbox requests */
    -typedef struct rboxT rboxT;
    -struct rboxT {
    -  FILE *fout;
    -  FILE *ferr;
    -  int isinteger;
    -  double out_offset;
    -  jmp_buf errexit;        /* exit label for rboxpoints, defined by setjmp(), called by qh_errexit_rbox() */
    -};
    -
    -
    -int rbox_inuse= 0;
    -rboxT rbox;
    -
    -/*---------------------------------
    -
    -  qh_rboxpoints( fout, ferr, rbox_command )
    -    Generate points to fout according to rbox options
    -    Report errors on ferr
    -
    -  returns:
    -    0 (qh_ERRnone) on success
    -    1 (qh_ERRinput) on input error
    -    4 (qh_ERRmem) on memory error
    -    5 (qh_ERRqhull) on internal error
    -
    -  notes:
    -    To avoid stdio, redefine qh_malloc, qh_free, and qh_fprintf_rbox (user.c)
    -
    -  design:
    -    Straight line code (consider defining a struct and functions):
    -
    -    Parse arguments into variables
    -    Determine the number of points
    -    Generate the points
    -*/
    -int qh_rboxpoints(FILE* fout, FILE* ferr, char* rbox_command) {
    -  int i,j,k;
    -  int gendim;
    -  int cubesize, diamondsize, seed=0, count, apex;
    -  int dim=3 , numpoints= 0, totpoints, addpoints=0;
    -  int issphere=0, isaxis=0,  iscdd= 0, islens= 0, isregular=0, iswidth=0, addcube=0;
    -  int isgap=0, isspiral=0, NOcommand= 0, adddiamond=0;
    -  int israndom=0, istime=0;
    -  int isbox=0, issimplex=0, issimplex2=0, ismesh=0;
    -  double width=0.0, gap=0.0, radius= 0.0;
    -  double coord[MAXdim], offset, meshm=3.0, meshn=4.0, meshr=5.0;
    -  double *simplex= NULL, *simplexp;
    -  int nthroot, mult[MAXdim];
    -  double norm, factor, randr, rangap, lensangle= 0, lensbase= 1;
    -  double anglediff, angle, x, y, cube= 0.0, diamond= 0.0;
    -  double box= qh_DEFAULTbox; /* scale all numbers before output */
    -  double randmax= qh_RANDOMmax;
    -  char command[200], seedbuf[200];
    -  char *s= command, *t, *first_point= NULL;
    -  time_t timedata;
    -  int exitcode;
    -
    -  if (rbox_inuse) {
    -    qh_fprintf_rbox(rbox.ferr, 6188, "rbox error: rbox in use by another process.  Please lock calls to rbox.\n");
    -    return qh_ERRqhull;
    -  }
    -  rbox_inuse= True;
    -  rbox.ferr= ferr;
    -  rbox.fout= fout;
    -
    -  exitcode= setjmp(rbox.errexit);
    -  if (exitcode) {
    -    /* same code for error exit and normal return */
    -    if (simplex)
    -        qh_free(simplex);
    -    rbox_inuse= False;
    -    return exitcode;
    -  }
    -
    -  *command= '\0';
    -  strncat(command, rbox_command, sizeof(command));
    -
    -  while (*s && !isspace(*s))  /* skip program name */
    -    s++;
    -  while (*s) {
    -    while (*s && isspace(*s))
    -      s++;
    -    if (*s == '-')
    -      s++;
    -    if (!*s)
    -      break;
    -    if (isdigit(*s)) {
    -      numpoints= qh_strtol(s, &s);
    -      continue;
    -    }
    -    /* ============= read flags =============== */
    -    switch (*s++) {
    -    case 'c':
    -      addcube= 1;
    -      t= s;
    -      while (isspace(*t))
    -        t++;
    -      if (*t == 'G')
    -        cube= qh_strtod(++t, &s);
    -      break;
    -    case 'd':
    -      adddiamond= 1;
    -      t= s;
    -      while (isspace(*t))
    -        t++;
    -      if (*t == 'G')
    -        diamond= qh_strtod(++t, &s);
    -      break;
    -    case 'h':
    -      iscdd= 1;
    -      break;
    -    case 'l':
    -      isspiral= 1;
    -      break;
    -    case 'n':
    -      NOcommand= 1;
    -      break;
    -    case 'r':
    -      isregular= 1;
    -      break;
    -    case 's':
    -      issphere= 1;
    -      break;
    -    case 't':
    -      istime= 1;
    -      if (isdigit(*s)) {
    -        seed= qh_strtol(s, &s);
    -        israndom= 0;
    -      }else
    -        israndom= 1;
    -      break;
    -    case 'x':
    -      issimplex= 1;
    -      break;
    -    case 'y':
    -      issimplex2= 1;
    -      break;
    -    case 'z':
    -      rbox.isinteger= 1;
    -      break;
    -    case 'B':
    -      box= qh_strtod(s, &s);
    -      isbox= 1;
    -      break;
    -    case 'D':
    -      dim= qh_strtol(s, &s);
    -      if (dim < 1
    -      || dim > MAXdim) {
    -        qh_fprintf_rbox(rbox.ferr, 6189, "rbox error: dimension, D%d, out of bounds (>=%d or <=0)", dim, MAXdim);
    -        qh_errexit_rbox(qh_ERRinput);
    -      }
    -      break;
    -    case 'G':
    -      if (isdigit(*s))
    -        gap= qh_strtod(s, &s);
    -      else
    -        gap= 0.5;
    -      isgap= 1;
    -      break;
    -    case 'L':
    -      if (isdigit(*s))
    -        radius= qh_strtod(s, &s);
    -      else
    -        radius= 10;
    -      islens= 1;
    -      break;
    -    case 'M':
    -      ismesh= 1;
    -      if (*s)
    -        meshn= qh_strtod(s, &s);
    -      if (*s == ',') {
    -        ++s;
    -        meshm= qh_strtod(s, &s);
    -      }else
    -        meshm= 0.0;
    -      if (*s == ',') {
    -        ++s;
    -        meshr= qh_strtod(s, &s);
    -      }else
    -        meshr= sqrt(meshn*meshn + meshm*meshm);
    -      if (*s && !isspace(*s)) {
    -        qh_fprintf_rbox(rbox.ferr, 7069, "rbox warning: assuming 'M3,4,5' since mesh args are not integers or reals\n");
    -        meshn= 3.0, meshm=4.0, meshr=5.0;
    -      }
    -      break;
    -    case 'O':
    -      rbox.out_offset= qh_strtod(s, &s);
    -      break;
    -    case 'P':
    -      if (!first_point)
    -        first_point= s-1;
    -      addpoints++;
    -      while (*s && !isspace(*s))   /* read points later */
    -        s++;
    -      break;
    -    case 'W':
    -      width= qh_strtod(s, &s);
    -      iswidth= 1;
    -      break;
    -    case 'Z':
    -      if (isdigit(*s))
    -        radius= qh_strtod(s, &s);
    -      else
    -        radius= 1.0;
    -      isaxis= 1;
    -      break;
    -    default:
    -      qh_fprintf_rbox(rbox.ferr, 7070, "rbox error: unknown flag at %s.\nExecute 'rbox' without arguments for documentation.\n", s);
    -      qh_errexit_rbox(qh_ERRinput);
    -    }
    -    if (*s && !isspace(*s)) {
    -      qh_fprintf_rbox(rbox.ferr, 7071, "rbox error: missing space between flags at %s.\n", s);
    -      qh_errexit_rbox(qh_ERRinput);
    -    }
    -  }
    -
    -  /* ============= defaults, constants, and sizes =============== */
    -  if (rbox.isinteger && !isbox)
    -    box= qh_DEFAULTzbox;
    -  if (addcube) {
    -    cubesize= (int)floor(ldexp(1.0,dim)+0.5);
    -    if (cube == 0.0)
    -      cube= box;
    -  }else
    -    cubesize= 0;
    -  if (adddiamond) {
    -    diamondsize= 2*dim;
    -    if (diamond == 0.0)
    -      diamond= box;
    -  }else
    -    diamondsize= 0;
    -  if (islens) {
    -    if (isaxis) {
    -        qh_fprintf_rbox(rbox.ferr, 6190, "rbox error: can not combine 'Ln' with 'Zn'\n");
    -        qh_errexit_rbox(qh_ERRinput);
    -    }
    -    if (radius <= 1.0) {
    -        qh_fprintf_rbox(rbox.ferr, 6191, "rbox error: lens radius %.2g should be greater than 1.0\n",
    -               radius);
    -        qh_errexit_rbox(qh_ERRinput);
    -    }
    -    lensangle= asin(1.0/radius);
    -    lensbase= radius * cos(lensangle);
    -  }
    -
    -  if (!numpoints) {
    -    if (issimplex2)
    -        ; /* ok */
    -    else if (isregular + issimplex + islens + issphere + isaxis + isspiral + iswidth + ismesh) {
    -        qh_fprintf_rbox(rbox.ferr, 6192, "rbox error: missing count\n");
    -        qh_errexit_rbox(qh_ERRinput);
    -    }else if (adddiamond + addcube + addpoints)
    -        ; /* ok */
    -    else {
    -        numpoints= 50;  /* ./rbox D4 is the test case */
    -        issphere= 1;
    -    }
    -  }
    -  if ((issimplex + islens + isspiral + ismesh > 1)
    -  || (issimplex + issphere + isspiral + ismesh > 1)) {
    -    qh_fprintf_rbox(rbox.ferr, 6193, "rbox error: can only specify one of 'l', 's', 'x', 'Ln', or 'Mn,m,r' ('Ln s' is ok).\n");
    -    qh_errexit_rbox(qh_ERRinput);
    -  }
    -
    -  /* ============= print header with total points =============== */
    -  if (issimplex || ismesh)
    -    totpoints= numpoints;
    -  else if (issimplex2)
    -    totpoints= numpoints+dim+1;
    -  else if (isregular) {
    -    totpoints= numpoints;
    -    if (dim == 2) {
    -        if (islens)
    -          totpoints += numpoints - 2;
    -    }else if (dim == 3) {
    -        if (islens)
    -          totpoints += 2 * numpoints;
    -      else if (isgap)
    -        totpoints += 1 + numpoints;
    -      else
    -        totpoints += 2;
    -    }
    -  }else
    -    totpoints= numpoints + isaxis;
    -  totpoints += cubesize + diamondsize + addpoints;
    -
    -  /* ============= seed randoms =============== */
    -  if (istime == 0) {
    -    for (s=command; *s; s++) {
    -      if (issimplex2 && *s == 'y') /* make 'y' same seed as 'x' */
    -        i= 'x';
    -      else
    -        i= *s;
    -      seed= 11*seed + i;
    -    }
    -  }else if (israndom) {
    -    seed= (int)time(&timedata);
    -    sprintf(seedbuf, " t%d", seed);  /* appends an extra t, not worth removing */
    -    strncat(command, seedbuf, sizeof(command));
    -    t= strstr(command, " t ");
    -    if (t)
    -      strcpy(t+1, t+3); /* remove " t " */
    -  } /* else, seed explicitly set to n */
    -  qh_RANDOMseed_(seed);
    -
    -  /* ============= print header =============== */
    -
    -  if (iscdd)
    -      qh_fprintf_rbox(rbox.fout, 9391, "%s\nbegin\n        %d %d %s\n",
    -      NOcommand ? "" : command,
    -      totpoints, dim+1,
    -      rbox.isinteger ? "integer" : "real");
    -  else if (NOcommand)
    -      qh_fprintf_rbox(rbox.fout, 9392, "%d\n%d\n", dim, totpoints);
    -  else
    -      qh_fprintf_rbox(rbox.fout, 9393, "%d %s\n%d\n", dim, command, totpoints);
    -
    -  /* ============= explicit points =============== */
    -  if ((s= first_point)) {
    -    while (s && *s) { /* 'P' */
    -      count= 0;
    -      if (iscdd)
    -        out1( 1.0);
    -      while (*++s) {
    -        out1( qh_strtod(s, &s));
    -        count++;
    -        if (isspace(*s) || !*s)
    -          break;
    -        if (*s != ',') {
    -          qh_fprintf_rbox(rbox.ferr, 6194, "rbox error: missing comma after coordinate in %s\n\n", s);
    -          qh_errexit_rbox(qh_ERRinput);
    -        }
    -      }
    -      if (count < dim) {
    -        for (k=dim-count; k--; )
    -          out1( 0.0);
    -      }else if (count > dim) {
    -        qh_fprintf_rbox(rbox.ferr, 6195, "rbox error: %d coordinates instead of %d coordinates in %s\n\n",
    -                  count, dim, s);
    -        qh_errexit_rbox(qh_ERRinput);
    -      }
    -      qh_fprintf_rbox(rbox.fout, 9394, "\n");
    -      while ((s= strchr(s, 'P'))) {
    -        if (isspace(s[-1]))
    -          break;
    -      }
    -    }
    -  }
    -
    -  /* ============= simplex distribution =============== */
    -  if (issimplex+issimplex2) {
    -    if (!(simplex= (double*)qh_malloc( dim * (dim+1) * sizeof(double)))) {
    -      qh_fprintf_rbox(rbox.ferr, 6196, "rbox error: insufficient memory for simplex\n");
    -      qh_errexit_rbox(qh_ERRmem); /* qh_ERRmem */
    -    }
    -    simplexp= simplex;
    -    if (isregular) {
    -      for (i=0; i randmax/2)
    -          coord[dim-1]= -coord[dim-1];
    -      /* ============= project 'Wn' point toward boundary =============== */
    -      }else if (iswidth && !issphere) {
    -        j= qh_RANDOMint % gendim;
    -        if (coord[j] < 0)
    -          coord[j]= -1.0 - coord[j] * width;
    -        else
    -          coord[j]= 1.0 - coord[j] * width;
    -      }
    -      /* ============= write point =============== */
    -      if (iscdd)
    -        out1( 1.0);
    -      for (k=0; k < dim; k++)
    -        out1( coord[k] * box);
    -      qh_fprintf_rbox(rbox.fout, 9399, "\n");
    -    }
    -  }
    -
    -  /* ============= write cube vertices =============== */
    -  if (addcube) {
    -    for (j=0; j=0; k--) {
    -        if (j & ( 1 << k))
    -          out1( cube);
    -        else
    -          out1( -cube);
    -      }
    -      qh_fprintf_rbox(rbox.fout, 9400, "\n");
    -    }
    -  }
    -
    -  /* ============= write diamond vertices =============== */
    -  if (adddiamond) {
    -    for (j=0; j=0; k--) {
    -        if (j/2 != k)
    -          out1( 0.0);
    -        else if (j & 0x1)
    -          out1( diamond);
    -        else
    -          out1( -diamond);
    -      }
    -      qh_fprintf_rbox(rbox.fout, 9401, "\n");
    -    }
    -  }
    -
    -  if (iscdd)
    -    qh_fprintf_rbox(rbox.fout, 9402, "end\nhull\n");
    -
    -  /* same code for error exit and normal return */
    -  if (simplex)
    -    qh_free(simplex);
    -  rbox_inuse= False;
    -  return qh_ERRnone;
    -} /* rboxpoints */
    -
    -/*------------------------------------------------
    -outxxx - output functions
    -*/
    -int roundi( double a) {
    -  if (a < 0.0) {
    -    if (a - 0.5 < INT_MIN) {
    -      qh_fprintf_rbox(rbox.ferr, 6200, "rbox input error: negative coordinate %2.2g is too large.  Reduce 'Bn'\n", a);
    -      qh_errexit_rbox(qh_ERRinput);
    -    }
    -    return (int)(a - 0.5);
    -  }else {
    -    if (a + 0.5 > INT_MAX) {
    -      qh_fprintf_rbox(rbox.ferr, 6201, "rbox input error: coordinate %2.2g is too large.  Reduce 'Bn'\n", a);
    -      qh_errexit_rbox(qh_ERRinput);
    -    }
    -    return (int)(a + 0.5);
    -  }
    -} /* roundi */
    -
    -void out1(double a) {
    -
    -  if (rbox.isinteger)
    -    qh_fprintf_rbox(rbox.fout, 9403, "%d ", roundi( a+rbox.out_offset));
    -  else
    -    qh_fprintf_rbox(rbox.fout, 9404, qh_REAL_1, a+rbox.out_offset);
    -} /* out1 */
    -
    -void out2n( double a, double b) {
    -
    -  if (rbox.isinteger)
    -    qh_fprintf_rbox(rbox.fout, 9405, "%d %d\n", roundi(a+rbox.out_offset), roundi(b+rbox.out_offset));
    -  else
    -    qh_fprintf_rbox(rbox.fout, 9406, qh_REAL_2n, a+rbox.out_offset, b+rbox.out_offset);
    -} /* out2n */
    -
    -void out3n( double a, double b, double c) {
    -
    -  if (rbox.isinteger)
    -    qh_fprintf_rbox(rbox.fout, 9407, "%d %d %d\n", roundi(a+rbox.out_offset), roundi(b+rbox.out_offset), roundi(c+rbox.out_offset));
    -  else
    -    qh_fprintf_rbox(rbox.fout, 9408, qh_REAL_3n, a+rbox.out_offset, b+rbox.out_offset, c+rbox.out_offset);
    -} /* out3n */
    -
    -void qh_errexit_rbox(int exitcode)
    -{
    -    longjmp(rbox.errexit, exitcode);
    -} /* rbox_errexit */
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/stat.c b/scipy-0.10.1/scipy/spatial/qhull/src/stat.c
    deleted file mode 100644
    index ac158c8072..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/stat.c
    +++ /dev/null
    @@ -1,714 +0,0 @@
    -/*
      ---------------------------------
    -
    -   stat.c
    -   contains all statistics that are collected for qhull
    -
    -   see qh-stat.htm and stat.h
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/stat.c#28 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -*/
    -
    -#include "qhull_a.h"
    -
    -/*============ global data structure ==========*/
    -
    -#if qh_QHpointer
    -qhstatT *qh_qhstat=NULL;  /* global data structure */
    -#else
    -qhstatT qh_qhstat;   /* add "={0}" if this causes a compiler error */
    -#endif
    -
    -/*========== functions in alphabetic order ================*/
    -
    -/*---------------------------------
    -
    -  qh_allstatA()
    -    define statistics in groups of 20
    -
    -  notes:
    -    (otherwise, 'gcc -O2' uses too much memory)
    -    uses qhstat.next
    -*/
    -void qh_allstatA(void) {
    -
    -   /* zdef_(type,name,doc,average) */
    -  zzdef_(zdoc, Zdoc2, "precision statistics", -1);
    -  zdef_(zinc, Znewvertex, NULL, -1);
    -  zdef_(wadd, Wnewvertex, "ave. distance of a new vertex to a facet(!0s)", Znewvertex);
    -  zzdef_(wmax, Wnewvertexmax, "max. distance of a new vertex to a facet", -1);
    -  zdef_(wmax, Wvertexmax, "max. distance of an output vertex to a facet", -1);
    -  zdef_(wmin, Wvertexmin, "min. distance of an output vertex to a facet", -1);
    -  zdef_(wmin, Wmindenom, "min. denominator in hyperplane computation", -1);
    -
    -  qhstat precision= qhstat next;  /* call qh_precision for each of these */
    -  zzdef_(zdoc, Zdoc3, "precision problems (corrected unless 'Q0' or an error)", -1);
    -  zzdef_(zinc, Zcoplanarridges, "coplanar half ridges in output", -1);
    -  zzdef_(zinc, Zconcaveridges, "concave half ridges in output", -1);
    -  zzdef_(zinc, Zflippedfacets, "flipped facets", -1);
    -  zzdef_(zinc, Zcoplanarhorizon, "coplanar horizon facets for new vertices", -1);
    -  zzdef_(zinc, Zcoplanarpart, "coplanar points during partitioning", -1);
    -  zzdef_(zinc, Zminnorm, "degenerate hyperplanes recomputed with gaussian elimination", -1);
    -  zzdef_(zinc, Znearlysingular, "nearly singular or axis-parallel hyperplanes", -1);
    -  zzdef_(zinc, Zback0, "zero divisors during back substitute", -1);
    -  zzdef_(zinc, Zgauss0, "zero divisors during gaussian elimination", -1);
    -  zzdef_(zinc, Zmultiridge, "ridges with multiple neighbors", -1);
    -}
    -void qh_allstatB(void) {
    -  zzdef_(zdoc, Zdoc1, "summary information", -1);
    -  zdef_(zinc, Zvertices, "number of vertices in output", -1);
    -  zdef_(zinc, Znumfacets, "number of facets in output", -1);
    -  zdef_(zinc, Znonsimplicial, "number of non-simplicial facets in output", -1);
    -  zdef_(zinc, Znowsimplicial, "number of simplicial facets that were merged", -1);
    -  zdef_(zinc, Znumridges, "number of ridges in output", -1);
    -  zdef_(zadd, Znumridges, "average number of ridges per facet", Znumfacets);
    -  zdef_(zmax, Zmaxridges, "maximum number of ridges", -1);
    -  zdef_(zadd, Znumneighbors, "average number of neighbors per facet", Znumfacets);
    -  zdef_(zmax, Zmaxneighbors, "maximum number of neighbors", -1);
    -  zdef_(zadd, Znumvertices, "average number of vertices per facet", Znumfacets);
    -  zdef_(zmax, Zmaxvertices, "maximum number of vertices", -1);
    -  zdef_(zadd, Znumvneighbors, "average number of neighbors per vertex", Zvertices);
    -  zdef_(zmax, Zmaxvneighbors, "maximum number of neighbors", -1);
    -  zdef_(wadd, Wcpu, "cpu seconds for qhull after input", -1);
    -  zdef_(zinc, Ztotvertices, "vertices created altogether", -1);
    -  zzdef_(zinc, Zsetplane, "facets created altogether", -1);
    -  zdef_(zinc, Ztotridges, "ridges created altogether", -1);
    -  zdef_(zinc, Zpostfacets, "facets before post merge", -1);
    -  zdef_(zadd, Znummergetot, "average merges per facet(at most 511)", Znumfacets);
    -  zdef_(zmax, Znummergemax, "  maximum merges for a facet(at most 511)", -1);
    -  zdef_(zinc, Zangle, NULL, -1);
    -  zdef_(wadd, Wangle, "average angle(cosine) of facet normals for all ridges", Zangle);
    -  zdef_(wmax, Wanglemax, "  maximum angle(cosine) of facet normals across a ridge", -1);
    -  zdef_(wmin, Wanglemin, "  minimum angle(cosine) of facet normals across a ridge", -1);
    -  zdef_(wadd, Wareatot, "total area of facets", -1);
    -  zdef_(wmax, Wareamax, "  maximum facet area", -1);
    -  zdef_(wmin, Wareamin, "  minimum facet area", -1);
    -}
    -void qh_allstatC(void) {
    -  zdef_(zdoc, Zdoc9, "build hull statistics", -1);
    -  zzdef_(zinc, Zprocessed, "points processed", -1);
    -  zzdef_(zinc, Zretry, "retries due to precision problems", -1);
    -  zdef_(wmax, Wretrymax, "  max. random joggle", -1);
    -  zdef_(zmax, Zmaxvertex, "max. vertices at any one time", -1);
    -  zdef_(zinc, Ztotvisible, "ave. visible facets per iteration", Zprocessed);
    -  zdef_(zinc, Zinsidevisible, "  ave. visible facets without an horizon neighbor", Zprocessed);
    -  zdef_(zadd, Zvisfacettot,  "  ave. facets deleted per iteration", Zprocessed);
    -  zdef_(zmax, Zvisfacetmax,  "    maximum", -1);
    -  zdef_(zadd, Zvisvertextot, "ave. visible vertices per iteration", Zprocessed);
    -  zdef_(zmax, Zvisvertexmax, "    maximum", -1);
    -  zdef_(zinc, Ztothorizon, "ave. horizon facets per iteration", Zprocessed);
    -  zdef_(zadd, Znewfacettot,  "ave. new or merged facets per iteration", Zprocessed);
    -  zdef_(zmax, Znewfacetmax,  "    maximum(includes initial simplex)", -1);
    -  zdef_(wadd, Wnewbalance, "average new facet balance", Zprocessed);
    -  zdef_(wadd, Wnewbalance2, "  standard deviation", -1);
    -  zdef_(wadd, Wpbalance, "average partition balance", Zpbalance);
    -  zdef_(wadd, Wpbalance2, "  standard deviation", -1);
    -  zdef_(zinc, Zpbalance, "  number of trials", -1);
    -  zdef_(zinc, Zsearchpoints, "searches of all points for initial simplex", -1);
    -  zdef_(zinc, Zdetsimplex, "determinants computed(area & initial hull)", -1);
    -  zdef_(zinc, Znoarea, "determinants not computed because vertex too low", -1);
    -  zdef_(zinc, Znotmax, "points ignored(!above max_outside)", -1);
    -  zdef_(zinc, Znotgood, "points ignored(!above a good facet)", -1);
    -  zdef_(zinc, Znotgoodnew, "points ignored(didn't create a good new facet)", -1);
    -  zdef_(zinc, Zgoodfacet, "good facets found", -1);
    -  zzdef_(zinc, Znumvisibility, "distance tests for facet visibility", -1);
    -  zdef_(zinc, Zdistvertex, "distance tests to report minimum vertex", -1);
    -  zzdef_(zinc, Ztotcheck, "points checked for facets' outer planes", -1);
    -  zzdef_(zinc, Zcheckpart, "  ave. distance tests per check", Ztotcheck);
    -}
    -void qh_allstatD(void) {
    -  zdef_(zinc, Zvisit, "resets of visit_id", -1);
    -  zdef_(zinc, Zvvisit, "  resets of vertex_visit", -1);
    -  zdef_(zmax, Zvisit2max, "  max visit_id/2", -1);
    -  zdef_(zmax, Zvvisit2max, "  max vertex_visit/2", -1);
    -
    -  zdef_(zdoc, Zdoc4, "partitioning statistics(see previous for outer planes)", -1);
    -  zzdef_(zadd, Zdelvertextot, "total vertices deleted", -1);
    -  zdef_(zmax, Zdelvertexmax, "    maximum vertices deleted per iteration", -1);
    -  zdef_(zinc, Zfindbest, "calls to findbest", -1);
    -  zdef_(zadd, Zfindbesttot, " ave. facets tested", Zfindbest);
    -  zdef_(zmax, Zfindbestmax, " max. facets tested", -1);
    -  zdef_(zadd, Zfindcoplanar, " ave. coplanar search", Zfindbest);
    -  zdef_(zinc, Zfindnew, "calls to findbestnew", -1);
    -  zdef_(zadd, Zfindnewtot, " ave. facets tested", Zfindnew);
    -  zdef_(zmax, Zfindnewmax, " max. facets tested", -1);
    -  zdef_(zinc, Zfindnewjump, " ave. clearly better", Zfindnew);
    -  zdef_(zinc, Zfindnewsharp, " calls due to qh_sharpnewfacets", -1);
    -  zdef_(zinc, Zfindhorizon, "calls to findhorizon", -1);
    -  zdef_(zadd, Zfindhorizontot, " ave. facets tested", Zfindhorizon);
    -  zdef_(zmax, Zfindhorizonmax, " max. facets tested", -1);
    -  zdef_(zinc, Zfindjump,       " ave. clearly better", Zfindhorizon);
    -  zdef_(zinc, Zparthorizon, " horizon facets better than bestfacet", -1);
    -  zdef_(zinc, Zpartangle, "angle tests for repartitioned coplanar points", -1);
    -  zdef_(zinc, Zpartflip, "  repartitioned coplanar points for flipped orientation", -1);
    -}
    -void qh_allstatE(void) {
    -  zdef_(zinc, Zpartinside, "inside points", -1);
    -  zdef_(zinc, Zpartnear, "  inside points kept with a facet", -1);
    -  zdef_(zinc, Zcoplanarinside, "  inside points that were coplanar with a facet", -1);
    -  zdef_(zinc, Zbestlower, "calls to findbestlower", -1);
    -  zdef_(zinc, Zbestlowerv, "  with search of vertex neighbors", -1);
    -  zdef_(wadd, Wmaxout, "difference in max_outside at final check", -1);
    -  zzdef_(zinc, Zpartitionall, "distance tests for initial partition", -1);
    -  zdef_(zinc, Ztotpartition, "partitions of a point", -1);
    -  zzdef_(zinc, Zpartition, "distance tests for partitioning", -1);
    -  zzdef_(zinc, Zdistcheck, "distance tests for checking flipped facets", -1);
    -  zzdef_(zinc, Zdistconvex, "distance tests for checking convexity", -1);
    -  zdef_(zinc, Zdistgood, "distance tests for checking good point", -1);
    -  zdef_(zinc, Zdistio, "distance tests for output", -1);
    -  zdef_(zinc, Zdiststat, "distance tests for statistics", -1);
    -  zdef_(zinc, Zdistplane, "total number of distance tests", -1);
    -  zdef_(zinc, Ztotpartcoplanar, "partitions of coplanar points or deleted vertices", -1);
    -  zzdef_(zinc, Zpartcoplanar, "   distance tests for these partitions", -1);
    -  zdef_(zinc, Zcomputefurthest, "distance tests for computing furthest", -1);
    -}
    -void qh_allstatE2(void) {
    -  zdef_(zdoc, Zdoc5, "statistics for matching ridges", -1);
    -  zdef_(zinc, Zhashlookup, "total lookups for matching ridges of new facets", -1);
    -  zdef_(zinc, Zhashtests, "average number of tests to match a ridge", Zhashlookup);
    -  zdef_(zinc, Zhashridge, "total lookups of subridges(duplicates and boundary)", -1);
    -  zdef_(zinc, Zhashridgetest, "average number of tests per subridge", Zhashridge);
    -  zdef_(zinc, Zdupsame, "duplicated ridges in same merge cycle", -1);
    -  zdef_(zinc, Zdupflip, "duplicated ridges with flipped facets", -1);
    -
    -  zdef_(zdoc, Zdoc6, "statistics for determining merges", -1);
    -  zdef_(zinc, Zangletests, "angles computed for ridge convexity", -1);
    -  zdef_(zinc, Zbestcentrum, "best merges used centrum instead of vertices",-1);
    -  zzdef_(zinc, Zbestdist, "distance tests for best merge", -1);
    -  zzdef_(zinc, Zcentrumtests, "distance tests for centrum convexity", -1);
    -  zzdef_(zinc, Zdistzero, "distance tests for checking simplicial convexity", -1);
    -  zdef_(zinc, Zcoplanarangle, "coplanar angles in getmergeset", -1);
    -  zdef_(zinc, Zcoplanarcentrum, "coplanar centrums in getmergeset", -1);
    -  zdef_(zinc, Zconcaveridge, "concave ridges in getmergeset", -1);
    -}
    -void qh_allstatF(void) {
    -  zdef_(zdoc, Zdoc7, "statistics for merging", -1);
    -  zdef_(zinc, Zpremergetot, "merge iterations", -1);
    -  zdef_(zadd, Zmergeinittot, "ave. initial non-convex ridges per iteration", Zpremergetot);
    -  zdef_(zadd, Zmergeinitmax, "  maximum", -1);
    -  zdef_(zadd, Zmergesettot, "  ave. additional non-convex ridges per iteration", Zpremergetot);
    -  zdef_(zadd, Zmergesetmax, "  maximum additional in one pass", -1);
    -  zdef_(zadd, Zmergeinittot2, "initial non-convex ridges for post merging", -1);
    -  zdef_(zadd, Zmergesettot2, "  additional non-convex ridges", -1);
    -  zdef_(wmax, Wmaxoutside, "max distance of vertex or coplanar point above facet(w/roundoff)", -1);
    -  zdef_(wmin, Wminvertex, "max distance of merged vertex below facet(or roundoff)", -1);
    -  zdef_(zinc, Zwidefacet, "centrums frozen due to a wide merge", -1);
    -  zdef_(zinc, Zwidevertices, "centrums frozen due to extra vertices", -1);
    -  zzdef_(zinc, Ztotmerge, "total number of facets or cycles of facets merged", -1);
    -  zdef_(zinc, Zmergesimplex, "merged a simplex", -1);
    -  zdef_(zinc, Zonehorizon, "simplices merged into coplanar horizon", -1);
    -  zzdef_(zinc, Zcyclehorizon, "cycles of facets merged into coplanar horizon", -1);
    -  zzdef_(zadd, Zcyclefacettot, "  ave. facets per cycle", Zcyclehorizon);
    -  zdef_(zmax, Zcyclefacetmax, "  max. facets", -1);
    -  zdef_(zinc, Zmergeintohorizon, "new facets merged into horizon", -1);
    -  zdef_(zinc, Zmergenew, "new facets merged", -1);
    -  zdef_(zinc, Zmergehorizon, "horizon facets merged into new facets", -1);
    -  zdef_(zinc, Zmergevertex, "vertices deleted by merging", -1);
    -  zdef_(zinc, Zcyclevertex, "vertices deleted by merging into coplanar horizon", -1);
    -  zdef_(zinc, Zdegenvertex, "vertices deleted by degenerate facet", -1);
    -  zdef_(zinc, Zmergeflipdup, "merges due to flipped facets in duplicated ridge", -1);
    -  zdef_(zinc, Zneighbor, "merges due to redundant neighbors", -1);
    -  zdef_(zadd, Ztestvneighbor, "non-convex vertex neighbors", -1);
    -}
    -void qh_allstatG(void) {
    -  zdef_(zinc, Zacoplanar, "merges due to angle coplanar facets", -1);
    -  zdef_(wadd, Wacoplanartot, "  average merge distance", Zacoplanar);
    -  zdef_(wmax, Wacoplanarmax, "  maximum merge distance", -1);
    -  zdef_(zinc, Zcoplanar, "merges due to coplanar facets", -1);
    -  zdef_(wadd, Wcoplanartot, "  average merge distance", Zcoplanar);
    -  zdef_(wmax, Wcoplanarmax, "  maximum merge distance", -1);
    -  zdef_(zinc, Zconcave, "merges due to concave facets", -1);
    -  zdef_(wadd, Wconcavetot, "  average merge distance", Zconcave);
    -  zdef_(wmax, Wconcavemax, "  maximum merge distance", -1);
    -  zdef_(zinc, Zavoidold, "coplanar/concave merges due to avoiding old merge", -1);
    -  zdef_(wadd, Wavoidoldtot, "  average merge distance", Zavoidold);
    -  zdef_(wmax, Wavoidoldmax, "  maximum merge distance", -1);
    -  zdef_(zinc, Zdegen, "merges due to degenerate facets", -1);
    -  zdef_(wadd, Wdegentot, "  average merge distance", Zdegen);
    -  zdef_(wmax, Wdegenmax, "  maximum merge distance", -1);
    -  zdef_(zinc, Zflipped, "merges due to removing flipped facets", -1);
    -  zdef_(wadd, Wflippedtot, "  average merge distance", Zflipped);
    -  zdef_(wmax, Wflippedmax, "  maximum merge distance", -1);
    -  zdef_(zinc, Zduplicate, "merges due to duplicated ridges", -1);
    -  zdef_(wadd, Wduplicatetot, "  average merge distance", Zduplicate);
    -  zdef_(wmax, Wduplicatemax, "  maximum merge distance", -1);
    -}
    -void qh_allstatH(void) {
    -  zdef_(zdoc, Zdoc8, "renamed vertex statistics", -1);
    -  zdef_(zinc, Zrenameshare, "renamed vertices shared by two facets", -1);
    -  zdef_(zinc, Zrenamepinch, "renamed vertices in a pinched facet", -1);
    -  zdef_(zinc, Zrenameall, "renamed vertices shared by multiple facets", -1);
    -  zdef_(zinc, Zfindfail, "rename failures due to duplicated ridges", -1);
    -  zdef_(zinc, Zdupridge, "  duplicate ridges detected", -1);
    -  zdef_(zinc, Zdelridge, "deleted ridges due to renamed vertices", -1);
    -  zdef_(zinc, Zdropneighbor, "dropped neighbors due to renamed vertices", -1);
    -  zdef_(zinc, Zdropdegen, "degenerate facets due to dropped neighbors", -1);
    -  zdef_(zinc, Zdelfacetdup, "  facets deleted because of no neighbors", -1);
    -  zdef_(zinc, Zremvertex, "vertices removed from facets due to no ridges", -1);
    -  zdef_(zinc, Zremvertexdel, "  deleted", -1);
    -  zdef_(zinc, Zintersectnum, "vertex intersections for locating redundant vertices", -1);
    -  zdef_(zinc, Zintersectfail, "intersections failed to find a redundant vertex", -1);
    -  zdef_(zinc, Zintersect, "intersections found redundant vertices", -1);
    -  zdef_(zadd, Zintersecttot, "   ave. number found per vertex", Zintersect);
    -  zdef_(zmax, Zintersectmax, "   max. found for a vertex", -1);
    -  zdef_(zinc, Zvertexridge, NULL, -1);
    -  zdef_(zadd, Zvertexridgetot, "  ave. number of ridges per tested vertex", Zvertexridge);
    -  zdef_(zmax, Zvertexridgemax, "  max. number of ridges per tested vertex", -1);
    -
    -  zdef_(zdoc, Zdoc10, "memory usage statistics(in bytes)", -1);
    -  zdef_(zadd, Zmemfacets, "for facets and their normals, neighbor and vertex sets", -1);
    -  zdef_(zadd, Zmemvertices, "for vertices and their neighbor sets", -1);
    -  zdef_(zadd, Zmempoints, "for input points and outside and coplanar sets",-1);
    -  zdef_(zadd, Zmemridges, "for ridges and their vertex sets", -1);
    -} /* allstat */
    -
    -void qh_allstatI(void) {
    -  qhstat vridges= qhstat next;
    -  zzdef_(zdoc, Zdoc11, "Voronoi ridge statistics", -1);
    -  zzdef_(zinc, Zridge, "non-simplicial Voronoi vertices for all ridges", -1);
    -  zzdef_(wadd, Wridge, "  ave. distance to ridge", Zridge);
    -  zzdef_(wmax, Wridgemax, "  max. distance to ridge", -1);
    -  zzdef_(zinc, Zridgemid, "bounded ridges", -1);
    -  zzdef_(wadd, Wridgemid, "  ave. distance of midpoint to ridge", Zridgemid);
    -  zzdef_(wmax, Wridgemidmax, "  max. distance of midpoint to ridge", -1);
    -  zzdef_(zinc, Zridgeok, "bounded ridges with ok normal", -1);
    -  zzdef_(wadd, Wridgeok, "  ave. angle to ridge", Zridgeok);
    -  zzdef_(wmax, Wridgeokmax, "  max. angle to ridge", -1);
    -  zzdef_(zinc, Zridge0, "bounded ridges with near-zero normal", -1);
    -  zzdef_(wadd, Wridge0, "  ave. angle to ridge", Zridge0);
    -  zzdef_(wmax, Wridge0max, "  max. angle to ridge", -1);
    -
    -  zdef_(zdoc, Zdoc12, "Triangulation statistics(Qt)", -1);
    -  zdef_(zinc, Ztricoplanar, "non-simplicial facets triangulated", -1);
    -  zdef_(zadd, Ztricoplanartot, "  ave. new facets created(may be deleted)", Ztricoplanar);
    -  zdef_(zmax, Ztricoplanarmax, "  max. new facets created", -1);
    -  zdef_(zinc, Ztrinull, "null new facets deleted(duplicated vertex)", -1);
    -  zdef_(zinc, Ztrimirror, "mirrored pairs of new facets deleted(same vertices)", -1);
    -  zdef_(zinc, Ztridegen, "degenerate new facets in output(same ridge)", -1);
    -} /* allstat */
    -
    -/*---------------------------------
    -
    -  qh_allstatistics()
    -    reset printed flag for all statistics
    -*/
    -void qh_allstatistics(void) {
    -  int i;
    -
    -  for(i=ZEND; i--; )
    -    qhstat printed[i]= False;
    -} /* allstatistics */
    -
    -#if qh_KEEPstatistics
    -/*---------------------------------
    -
    -  qh_collectstatistics()
    -    collect statistics for qh.facet_list
    -
    -*/
    -void qh_collectstatistics(void) {
    -  facetT *facet, *neighbor, **neighborp;
    -  vertexT *vertex, **vertexp;
    -  realT dotproduct, dist;
    -  int sizneighbors, sizridges, sizvertices, i;
    -
    -  qh old_randomdist= qh RANDOMdist;
    -  qh RANDOMdist= False;
    -  zval_(Zmempoints)= qh num_points * qh normal_size +
    -                             sizeof(qhT) + sizeof(qhstatT);
    -  zval_(Zmemfacets)= 0;
    -  zval_(Zmemridges)= 0;
    -  zval_(Zmemvertices)= 0;
    -  zval_(Zangle)= 0;
    -  wval_(Wangle)= 0.0;
    -  zval_(Znumridges)= 0;
    -  zval_(Znumfacets)= 0;
    -  zval_(Znumneighbors)= 0;
    -  zval_(Znumvertices)= 0;
    -  zval_(Znumvneighbors)= 0;
    -  zval_(Znummergetot)= 0;
    -  zval_(Znummergemax)= 0;
    -  zval_(Zvertices)= qh num_vertices - qh_setsize(qh del_vertices);
    -  if (qh MERGING || qh APPROXhull || qh JOGGLEmax < REALmax/2)
    -    wmax_(Wmaxoutside, qh max_outside);
    -  if (qh MERGING)
    -    wmin_(Wminvertex, qh min_vertex);
    -  FORALLfacets
    -    facet->seen= False;
    -  if (qh DELAUNAY) {
    -    FORALLfacets {
    -      if (facet->upperdelaunay != qh UPPERdelaunay)
    -        facet->seen= True; /* remove from angle statistics */
    -    }
    -  }
    -  FORALLfacets {
    -    if (facet->visible && qh NEWfacets)
    -      continue;
    -    sizvertices= qh_setsize(facet->vertices);
    -    sizneighbors= qh_setsize(facet->neighbors);
    -    sizridges= qh_setsize(facet->ridges);
    -    zinc_(Znumfacets);
    -    zadd_(Znumvertices, sizvertices);
    -    zmax_(Zmaxvertices, sizvertices);
    -    zadd_(Znumneighbors, sizneighbors);
    -    zmax_(Zmaxneighbors, sizneighbors);
    -    zadd_(Znummergetot, facet->nummerge);
    -    i= facet->nummerge; /* avoid warnings */
    -    zmax_(Znummergemax, i);
    -    if (!facet->simplicial) {
    -      if (sizvertices == qh hull_dim) {
    -        zinc_(Znowsimplicial);
    -      }else {
    -        zinc_(Znonsimplicial);
    -      }
    -    }
    -    if (sizridges) {
    -      zadd_(Znumridges, sizridges);
    -      zmax_(Zmaxridges, sizridges);
    -    }
    -    zadd_(Zmemfacets, sizeof(facetT) + qh normal_size + 2*sizeof(setT)
    -       + SETelemsize * (sizneighbors + sizvertices));
    -    if (facet->ridges) {
    -      zadd_(Zmemridges,
    -         sizeof(setT) + SETelemsize * sizridges + sizridges *
    -         (sizeof(ridgeT) + sizeof(setT) + SETelemsize * (qh hull_dim-1))/2);
    -    }
    -    if (facet->outsideset)
    -      zadd_(Zmempoints, sizeof(setT) + SETelemsize * qh_setsize(facet->outsideset));
    -    if (facet->coplanarset)
    -      zadd_(Zmempoints, sizeof(setT) + SETelemsize * qh_setsize(facet->coplanarset));
    -    if (facet->seen) /* Delaunay upper envelope */
    -      continue;
    -    facet->seen= True;
    -    FOREACHneighbor_(facet) {
    -      if (neighbor == qh_DUPLICATEridge || neighbor == qh_MERGEridge
    -          || neighbor->seen || !facet->normal || !neighbor->normal)
    -        continue;
    -      dotproduct= qh_getangle(facet->normal, neighbor->normal);
    -      zinc_(Zangle);
    -      wadd_(Wangle, dotproduct);
    -      wmax_(Wanglemax, dotproduct)
    -      wmin_(Wanglemin, dotproduct)
    -    }
    -    if (facet->normal) {
    -      FOREACHvertex_(facet->vertices) {
    -        zinc_(Zdiststat);
    -        qh_distplane(vertex->point, facet, &dist);
    -        wmax_(Wvertexmax, dist);
    -        wmin_(Wvertexmin, dist);
    -      }
    -    }
    -  }
    -  FORALLvertices {
    -    if (vertex->deleted)
    -      continue;
    -    zadd_(Zmemvertices, sizeof(vertexT));
    -    if (vertex->neighbors) {
    -      sizneighbors= qh_setsize(vertex->neighbors);
    -      zadd_(Znumvneighbors, sizneighbors);
    -      zmax_(Zmaxvneighbors, sizneighbors);
    -      zadd_(Zmemvertices, sizeof(vertexT) + SETelemsize * sizneighbors);
    -    }
    -  }
    -  qh RANDOMdist= qh old_randomdist;
    -} /* collectstatistics */
    -#endif /* qh_KEEPstatistics */
    -
    -/*---------------------------------
    -
    -  qh_freestatistics(  )
    -    free memory used for statistics
    -*/
    -void qh_freestatistics(void) {
    -
    -#if qh_QHpointer
    -  qh_free(qh_qhstat);
    -  qh_qhstat= NULL;
    -#endif
    -} /* freestatistics */
    -
    -/*---------------------------------
    -
    -  qh_initstatistics(  )
    -    allocate and initialize statistics
    -
    -  notes:
    -    uses qh_malloc() instead of qh_memalloc() since mem.c not set up yet
    -    NOerrors -- qh_initstatistics can not use qh_errexit().  One first call, qh_memalloc is not initialized.  Also invoked by QhullQh().
    -*/
    -void qh_initstatistics(void) {
    -  int i;
    -  realT realx;
    -  int intx;
    -
    -#if qh_QHpointer
    -  if(qh_qhstat){  /* qh_initstatistics may be called from Qhull::resetStatistics() */
    -      qh_free(qh_qhstat);
    -      qh_qhstat= 0;
    -  }
    -  if (!(qh_qhstat= (qhstatT *)qh_malloc(sizeof(qhstatT)))) {
    -    qh_fprintf(qhmem.ferr, 6183, "qhull error (qh_initstatistics): insufficient memory\n");
    -    qh_exit(qh_ERRmem);  /* can not use qh_errexit() */
    -  }
    -#endif
    -
    -  qhstat next= 0;
    -  qh_allstatA();
    -  qh_allstatB();
    -  qh_allstatC();
    -  qh_allstatD();
    -  qh_allstatE();
    -  qh_allstatE2();
    -  qh_allstatF();
    -  qh_allstatG();
    -  qh_allstatH();
    -  qh_allstatI();
    -  if (qhstat next > (int)sizeof(qhstat id)) {
    -    qh_fprintf(qhmem.ferr, 6184, "qhull error (qh_initstatistics): increase size of qhstat.id[].\n\
    -      qhstat.next %d should be <= sizeof(qhstat id) %d\n", qhstat next, (int)sizeof(qhstat id));
    -#if 0 /* for locating error, Znumridges should be duplicated */
    -    for(i=0; i < ZEND; i++) {
    -      int j;
    -      for(j=i+1; j < ZEND; j++) {
    -        if (qhstat id[i] == qhstat id[j]) {
    -          qh_fprintf(qhmem.ferr, 6185, "qhull error (qh_initstatistics): duplicated statistic %d at indices %d and %d\n",
    -              qhstat id[i], i, j);
    -        }
    -      }
    -    }
    -#endif
    -    qh_exit(qh_ERRqhull);  /* can not use qh_errexit() */
    -  }
    -  qhstat init[zinc].i= 0;
    -  qhstat init[zadd].i= 0;
    -  qhstat init[zmin].i= INT_MAX;
    -  qhstat init[zmax].i= INT_MIN;
    -  qhstat init[wadd].r= 0;
    -  qhstat init[wmin].r= REALmax;
    -  qhstat init[wmax].r= -REALmax;
    -  for(i=0; i < ZEND; i++) {
    -    if (qhstat type[i] > ZTYPEreal) {
    -      realx= qhstat init[(unsigned char)(qhstat type[i])].r;
    -      qhstat stats[i].r= realx;
    -    }else if (qhstat type[i] != zdoc) {
    -      intx= qhstat init[(unsigned char)(qhstat type[i])].i;
    -      qhstat stats[i].i= intx;
    -    }
    -  }
    -} /* initstatistics */
    -
    -/*---------------------------------
    -
    -  qh_newstats(  )
    -    returns True if statistics for zdoc
    -
    -  returns:
    -    next zdoc
    -*/
    -boolT qh_newstats(int idx, int *nextindex) {
    -  boolT isnew= False;
    -  int start, i;
    -
    -  if (qhstat type[qhstat id[idx]] == zdoc)
    -    start= idx+1;
    -  else
    -    start= idx;
    -  for(i= start; i < qhstat next && qhstat type[qhstat id[i]] != zdoc; i++) {
    -    if (!qh_nostatistic(qhstat id[i]) && !qhstat printed[qhstat id[i]])
    -        isnew= True;
    -  }
    -  *nextindex= i;
    -  return isnew;
    -} /* newstats */
    -
    -/*---------------------------------
    -
    -  qh_nostatistic( index )
    -    true if no statistic to print
    -*/
    -boolT qh_nostatistic(int i) {
    -
    -  if ((qhstat type[i] > ZTYPEreal
    -       &&qhstat stats[i].r == qhstat init[(unsigned char)(qhstat type[i])].r)
    -      || (qhstat type[i] < ZTYPEreal
    -          &&qhstat stats[i].i == qhstat init[(unsigned char)(qhstat type[i])].i))
    -    return True;
    -  return False;
    -} /* nostatistic */
    -
    -#if qh_KEEPstatistics
    -/*---------------------------------
    -
    -  qh_printallstatistics( fp, string )
    -    print all statistics with header 'string'
    -*/
    -void qh_printallstatistics(FILE *fp, const char *string) {
    -
    -  qh_allstatistics();
    -  qh_collectstatistics();
    -  qh_printstatistics(fp, string);
    -  qh_memstatistics(fp);
    -}
    -
    -
    -/*---------------------------------
    -
    -  qh_printstatistics( fp, string )
    -    print statistics to a file with header 'string'
    -    skips statistics with qhstat.printed[] (reset with qh_allstatistics)
    -
    -  see:
    -    qh_printallstatistics()
    -*/
    -void qh_printstatistics(FILE *fp, const char *string) {
    -  int i, k;
    -  realT ave;
    -
    -  if (qh num_points != qh num_vertices) {
    -    wval_(Wpbalance)= 0;
    -    wval_(Wpbalance2)= 0;
    -  }else
    -    wval_(Wpbalance2)= qh_stddev(zval_(Zpbalance), wval_(Wpbalance),
    -                                 wval_(Wpbalance2), &ave);
    -  wval_(Wnewbalance2)= qh_stddev(zval_(Zprocessed), wval_(Wnewbalance),
    -                                 wval_(Wnewbalance2), &ave);
    -  qh_fprintf(fp, 9350, "\n\
    -%s\n\
    - qhull invoked by: %s | %s\n%s with options:\n%s\n", string, qh rbox_command,
    -     qh qhull_command, qh_version, qh qhull_options);
    -  qh_fprintf(fp, 9351, "\nprecision constants:\n\
    - %6.2g max. abs. coordinate in the (transformed) input('Qbd:n')\n\
    - %6.2g max. roundoff error for distance computation('En')\n\
    - %6.2g max. roundoff error for angle computations\n\
    - %6.2g min. distance for outside points ('Wn')\n\
    - %6.2g min. distance for visible facets ('Vn')\n\
    - %6.2g max. distance for coplanar facets ('Un')\n\
    - %6.2g max. facet width for recomputing centrum and area\n\
    -",
    -  qh MAXabs_coord, qh DISTround, qh ANGLEround, qh MINoutside,
    -        qh MINvisible, qh MAXcoplanar, qh WIDEfacet);
    -  if (qh KEEPnearinside)
    -    qh_fprintf(fp, 9352, "\
    - %6.2g max. distance for near-inside points\n", qh NEARinside);
    -  if (qh premerge_cos < REALmax/2) qh_fprintf(fp, 9353, "\
    - %6.2g max. cosine for pre-merge angle\n", qh premerge_cos);
    -  if (qh PREmerge) qh_fprintf(fp, 9354, "\
    - %6.2g radius of pre-merge centrum\n", qh premerge_centrum);
    -  if (qh postmerge_cos < REALmax/2) qh_fprintf(fp, 9355, "\
    - %6.2g max. cosine for post-merge angle\n", qh postmerge_cos);
    -  if (qh POSTmerge) qh_fprintf(fp, 9356, "\
    - %6.2g radius of post-merge centrum\n", qh postmerge_centrum);
    -  qh_fprintf(fp, 9357, "\
    - %6.2g max. distance for merging two simplicial facets\n\
    - %6.2g max. roundoff error for arithmetic operations\n\
    - %6.2g min. denominator for divisions\n\
    -  zero diagonal for Gauss: ", qh ONEmerge, REALepsilon, qh MINdenom);
    -  for(k=0; k < qh hull_dim; k++)
    -    qh_fprintf(fp, 9358, "%6.2e ", qh NEARzero[k]);
    -  qh_fprintf(fp, 9359, "\n\n");
    -  for(i=0 ; i < qhstat next; )
    -    qh_printstats(fp, i, &i);
    -} /* printstatistics */
    -#endif /* qh_KEEPstatistics */
    -
    -/*---------------------------------
    -
    -  qh_printstatlevel( fp, id )
    -    print level information for a statistic
    -
    -  notes:
    -    nop if id >= ZEND, printed, or same as initial value
    -*/
    -void qh_printstatlevel(FILE *fp, int id, int start) {
    -#define NULLfield "       "
    -
    -  if (id >= ZEND || qhstat printed[id])
    -    return;
    -  if (qhstat type[id] == zdoc) {
    -    qh_fprintf(fp, 9360, "%s\n", qhstat doc[id]);
    -    return;
    -  }
    -  start= 0; /* not used */
    -  if (qh_nostatistic(id) || !qhstat doc[id])
    -    return;
    -  qhstat printed[id]= True;
    -  if (qhstat count[id] != -1
    -      && qhstat stats[(unsigned char)(qhstat count[id])].i == 0)
    -    qh_fprintf(fp, 9361, " *0 cnt*");
    -  else if (qhstat type[id] >= ZTYPEreal && qhstat count[id] == -1)
    -    qh_fprintf(fp, 9362, "%7.2g", qhstat stats[id].r);
    -  else if (qhstat type[id] >= ZTYPEreal && qhstat count[id] != -1)
    -    qh_fprintf(fp, 9363, "%7.2g", qhstat stats[id].r/ qhstat stats[(unsigned char)(qhstat count[id])].i);
    -  else if (qhstat type[id] < ZTYPEreal && qhstat count[id] == -1)
    -    qh_fprintf(fp, 9364, "%7d", qhstat stats[id].i);
    -  else if (qhstat type[id] < ZTYPEreal && qhstat count[id] != -1)
    -    qh_fprintf(fp, 9365, "%7.3g", (realT) qhstat stats[id].i / qhstat stats[(unsigned char)(qhstat count[id])].i);
    -  qh_fprintf(fp, 9366, " %s\n", qhstat doc[id]);
    -} /* printstatlevel */
    -
    -
    -/*---------------------------------
    -
    -  qh_printstats( fp, index, nextindex )
    -    print statistics for a zdoc group
    -
    -  returns:
    -    next zdoc if non-null
    -*/
    -void qh_printstats(FILE *fp, int idx, int *nextindex) {
    -  int j, nexti;
    -
    -  if (qh_newstats(idx, &nexti)) {
    -    qh_fprintf(fp, 9367, "\n");
    -    for (j=idx; j--------------------------------
    -
    -  qh_stddev( num, tot, tot2, ave )
    -    compute the standard deviation and average from statistics
    -
    -    tot2 is the sum of the squares
    -  notes:
    -    computes r.m.s.:
    -      (x-ave)^2
    -      == x^2 - 2x tot/num +   (tot/num)^2
    -      == tot2 - 2 tot tot/num + tot tot/num
    -      == tot2 - tot ave
    -*/
    -realT qh_stddev(int num, realT tot, realT tot2, realT *ave) {
    -  realT stddev;
    -
    -  *ave= tot/num;
    -  stddev= sqrt(tot2/num - *ave * *ave);
    -  return stddev;
    -} /* stddev */
    -
    -#endif /* qh_KEEPstatistics */
    -
    -#if !qh_KEEPstatistics
    -void    qh_collectstatistics(void) {}
    -void    qh_printallstatistics(FILE *fp, char *string) {};
    -void    qh_printstatistics(FILE *fp, char *string) {}
    -#endif
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/stat.h b/scipy-0.10.1/scipy/spatial/qhull/src/stat.h
    deleted file mode 100644
    index da06165ca1..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/stat.h
    +++ /dev/null
    @@ -1,534 +0,0 @@
    -/*
      ---------------------------------
    -
    -   stat.h
    -     contains all statistics that are collected for qhull
    -
    -   see qh-stat.htm and stat.c
    -
    -   copyright (c) 1993-2010 The Geometry Center.
    -   $Id: //product/qhull/main/rel/src/stat.h#28 $$Change: 1164 $
    -   $DateTime: 2010/01/07 21:52:00 $$Author: bbarber $
    -
    -   recompile qhull if you change this file
    -
    -   Integer statistics are Z* while real statistics are W*.
    -
    -   define maydebugx to call a routine at every statistic event
    -
    -*/
    -
    -#ifndef qhDEFstat
    -#define qhDEFstat 1
    -
    -#include "libqhull.h"
    -
    -/*---------------------------------
    -
    -  qh_KEEPstatistics
    -    0 turns off statistic gathering (except zzdef/zzinc/zzadd/zzval/wwval)
    -*/
    -#ifndef qh_KEEPstatistics
    -#define qh_KEEPstatistics 1
    -#endif
    -
    -/*---------------------------------
    -
    -  Zxxx for integers, Wxxx for reals
    -
    -  notes:
    -    be sure that all statistics are defined in stat.c
    -      otherwise initialization may core dump
    -    can pick up all statistics by:
    -      grep '[zw].*_[(][ZW]' *.c >z.x
    -    remove trailers with query">-
    -    remove leaders with  query-replace-regexp [ ^I]+  (
    -*/
    -#if qh_KEEPstatistics
    -enum statistics {     /* alphabetical after Z/W */
    -    Zacoplanar,
    -    Wacoplanarmax,
    -    Wacoplanartot,
    -    Zangle,
    -    Wangle,
    -    Wanglemax,
    -    Wanglemin,
    -    Zangletests,
    -    Wareatot,
    -    Wareamax,
    -    Wareamin,
    -    Zavoidold,
    -    Wavoidoldmax,
    -    Wavoidoldtot,
    -    Zback0,
    -    Zbestcentrum,
    -    Zbestdist,
    -    Zbestlower,
    -    Zbestlowerv,
    -    Zcentrumtests,
    -    Zcheckpart,
    -    Zcomputefurthest,
    -    Zconcave,
    -    Wconcavemax,
    -    Wconcavetot,
    -    Zconcaveridges,
    -    Zconcaveridge,
    -    Zcoplanar,
    -    Wcoplanarmax,
    -    Wcoplanartot,
    -    Zcoplanarangle,
    -    Zcoplanarcentrum,
    -    Zcoplanarhorizon,
    -    Zcoplanarinside,
    -    Zcoplanarpart,
    -    Zcoplanarridges,
    -    Wcpu,
    -    Zcyclefacetmax,
    -    Zcyclefacettot,
    -    Zcyclehorizon,
    -    Zcyclevertex,
    -    Zdegen,
    -    Wdegenmax,
    -    Wdegentot,
    -    Zdegenvertex,
    -    Zdelfacetdup,
    -    Zdelridge,
    -    Zdelvertextot,
    -    Zdelvertexmax,
    -    Zdetsimplex,
    -    Zdistcheck,
    -    Zdistconvex,
    -    Zdistgood,
    -    Zdistio,
    -    Zdistplane,
    -    Zdiststat,
    -    Zdistvertex,
    -    Zdistzero,
    -    Zdoc1,
    -    Zdoc2,
    -    Zdoc3,
    -    Zdoc4,
    -    Zdoc5,
    -    Zdoc6,
    -    Zdoc7,
    -    Zdoc8,
    -    Zdoc9,
    -    Zdoc10,
    -    Zdoc11,
    -    Zdoc12,
    -    Zdropdegen,
    -    Zdropneighbor,
    -    Zdupflip,
    -    Zduplicate,
    -    Wduplicatemax,
    -    Wduplicatetot,
    -    Zdupridge,
    -    Zdupsame,
    -    Zflipped,
    -    Wflippedmax,
    -    Wflippedtot,
    -    Zflippedfacets,
    -    Zfindbest,
    -    Zfindbestmax,
    -    Zfindbesttot,
    -    Zfindcoplanar,
    -    Zfindfail,
    -    Zfindhorizon,
    -    Zfindhorizonmax,
    -    Zfindhorizontot,
    -    Zfindjump,
    -    Zfindnew,
    -    Zfindnewmax,
    -    Zfindnewtot,
    -    Zfindnewjump,
    -    Zfindnewsharp,
    -    Zgauss0,
    -    Zgoodfacet,
    -    Zhashlookup,
    -    Zhashridge,
    -    Zhashridgetest,
    -    Zhashtests,
    -    Zinsidevisible,
    -    Zintersect,
    -    Zintersectfail,
    -    Zintersectmax,
    -    Zintersectnum,
    -    Zintersecttot,
    -    Zmaxneighbors,
    -    Wmaxout,
    -    Wmaxoutside,
    -    Zmaxridges,
    -    Zmaxvertex,
    -    Zmaxvertices,
    -    Zmaxvneighbors,
    -    Zmemfacets,
    -    Zmempoints,
    -    Zmemridges,
    -    Zmemvertices,
    -    Zmergeflipdup,
    -    Zmergehorizon,
    -    Zmergeinittot,
    -    Zmergeinitmax,
    -    Zmergeinittot2,
    -    Zmergeintohorizon,
    -    Zmergenew,
    -    Zmergesettot,
    -    Zmergesetmax,
    -    Zmergesettot2,
    -    Zmergesimplex,
    -    Zmergevertex,
    -    Wmindenom,
    -    Wminvertex,
    -    Zminnorm,
    -    Zmultiridge,
    -    Znearlysingular,
    -    Zneighbor,
    -    Wnewbalance,
    -    Wnewbalance2,
    -    Znewfacettot,
    -    Znewfacetmax,
    -    Znewvertex,
    -    Wnewvertex,
    -    Wnewvertexmax,
    -    Znoarea,
    -    Znonsimplicial,
    -    Znowsimplicial,
    -    Znotgood,
    -    Znotgoodnew,
    -    Znotmax,
    -    Znumfacets,
    -    Znummergemax,
    -    Znummergetot,
    -    Znumneighbors,
    -    Znumridges,
    -    Znumvertices,
    -    Znumvisibility,
    -    Znumvneighbors,
    -    Zonehorizon,
    -    Zpartangle,
    -    Zpartcoplanar,
    -    Zpartflip,
    -    Zparthorizon,
    -    Zpartinside,
    -    Zpartition,
    -    Zpartitionall,
    -    Zpartnear,
    -    Zpbalance,
    -    Wpbalance,
    -    Wpbalance2,
    -    Zpostfacets,
    -    Zpremergetot,
    -    Zprocessed,
    -    Zremvertex,
    -    Zremvertexdel,
    -    Zrenameall,
    -    Zrenamepinch,
    -    Zrenameshare,
    -    Zretry,
    -    Wretrymax,
    -    Zridge,
    -    Wridge,
    -    Wridgemax,
    -    Zridge0,
    -    Wridge0,
    -    Wridge0max,
    -    Zridgemid,
    -    Wridgemid,
    -    Wridgemidmax,
    -    Zridgeok,
    -    Wridgeok,
    -    Wridgeokmax,
    -    Zsearchpoints,
    -    Zsetplane,
    -    Ztestvneighbor,
    -    Ztotcheck,
    -    Ztothorizon,
    -    Ztotmerge,
    -    Ztotpartcoplanar,
    -    Ztotpartition,
    -    Ztotridges,
    -    Ztotvertices,
    -    Ztotvisible,
    -    Ztricoplanar,
    -    Ztricoplanarmax,
    -    Ztricoplanartot,
    -    Ztridegen,
    -    Ztrimirror,
    -    Ztrinull,
    -    Wvertexmax,
    -    Wvertexmin,
    -    Zvertexridge,
    -    Zvertexridgetot,
    -    Zvertexridgemax,
    -    Zvertices,
    -    Zvisfacettot,
    -    Zvisfacetmax,
    -    Zvisit,
    -    Zvisit2max,
    -    Zvisvertextot,
    -    Zvisvertexmax,
    -    Zvvisit,
    -    Zvvisit2max,
    -    Zwidefacet,
    -    Zwidevertices,
    -    ZEND};
    -
    -/*---------------------------------
    -
    -  Zxxx/Wxxx statistics that remain defined if qh_KEEPstatistics=0
    -
    -  notes:
    -    be sure to use zzdef, zzinc, etc. with these statistics (no double checking!)
    -*/
    -#else
    -enum statistics {     /* for zzdef etc. macros */
    -  Zback0,
    -  Zbestdist,
    -  Zcentrumtests,
    -  Zcheckpart,
    -  Zconcaveridges,
    -  Zcoplanarhorizon,
    -  Zcoplanarpart,
    -  Zcoplanarridges,
    -  Zcyclefacettot,
    -  Zcyclehorizon,
    -  Zdelvertextot,
    -  Zdistcheck,
    -  Zdistconvex,
    -  Zdistzero,
    -  Zdoc1,
    -  Zdoc2,
    -  Zdoc3,
    -  Zdoc11,
    -  Zflippedfacets,
    -  Zgauss0,
    -  Zminnorm,
    -  Zmultiridge,
    -  Znearlysingular,
    -  Wnewvertexmax,
    -  Znumvisibility,
    -  Zpartcoplanar,
    -  Zpartition,
    -  Zpartitionall,
    -  Zprocessed,
    -  Zretry,
    -  Zridge,
    -  Wridge,
    -  Wridgemax,
    -  Zridge0,
    -  Wridge0,
    -  Wridge0max,
    -  Zridgemid,
    -  Wridgemid,
    -  Wridgemidmax,
    -  Zridgeok,
    -  Wridgeok,
    -  Wridgeokmax,
    -  Zsetplane,
    -  Ztotcheck,
    -  Ztotmerge,
    -    ZEND};
    -#endif
    -
    -/*---------------------------------
    -
    -  ztype
    -    the type of a statistic sets its initial value.
    -
    -  notes:
    -    The type should be the same as the macro for collecting the statistic
    -*/
    -enum ztypes {zdoc,zinc,zadd,zmax,zmin,ZTYPEreal,wadd,wmax,wmin,ZTYPEend};
    -
    -/*========== macros and constants =============*/
    -
    -/*----------------------------------
    -
    -  MAYdebugx
    -    define as maydebug() to be called frequently for error trapping
    -*/
    -#define MAYdebugx
    -
    -/*----------------------------------
    -
    -  zzdef_, zdef_( type, name, doc, -1)
    -    define a statistic (assumes 'qhstat.next= 0;')
    -
    -  zdef_( type, name, doc, count)
    -    define an averaged statistic
    -    printed as name/count
    -*/
    -#define zzdef_(stype,name,string,cnt) qhstat id[qhstat next++]=name; \
    -   qhstat doc[name]= string; qhstat count[name]= cnt; qhstat type[name]= stype
    -#if qh_KEEPstatistics
    -#define zdef_(stype,name,string,cnt) qhstat id[qhstat next++]=name; \
    -   qhstat doc[name]= string; qhstat count[name]= cnt; qhstat type[name]= stype
    -#else
    -#define zdef_(type,name,doc,count)
    -#endif
    -
    -/*----------------------------------
    -
    -  zzinc_( name ), zinc_( name)
    -    increment an integer statistic
    -*/
    -#define zzinc_(id) {MAYdebugx; qhstat stats[id].i++;}
    -#if qh_KEEPstatistics
    -#define zinc_(id) {MAYdebugx; qhstat stats[id].i++;}
    -#else
    -#define zinc_(id) {}
    -#endif
    -
    -/*----------------------------------
    -
    -  zzadd_( name, value ), zadd_( name, value ), wadd_( name, value )
    -    add value to an integer or real statistic
    -*/
    -#define zzadd_(id, val) {MAYdebugx; qhstat stats[id].i += (val);}
    -#define wwadd_(id, val) {MAYdebugx; qhstat stats[id].r += (val);}
    -#if qh_KEEPstatistics
    -#define zadd_(id, val) {MAYdebugx; qhstat stats[id].i += (val);}
    -#define wadd_(id, val) {MAYdebugx; qhstat stats[id].r += (val);}
    -#else
    -#define zadd_(id, val) {}
    -#define wadd_(id, val) {}
    -#endif
    -
    -/*----------------------------------
    -
    -  zzval_( name ), zval_( name ), wwval_( name )
    -    set or return value of a statistic
    -*/
    -#define zzval_(id) ((qhstat stats[id]).i)
    -#define wwval_(id) ((qhstat stats[id]).r)
    -#if qh_KEEPstatistics
    -#define zval_(id) ((qhstat stats[id]).i)
    -#define wval_(id) ((qhstat stats[id]).r)
    -#else
    -#define zval_(id) qhstat tempi
    -#define wval_(id) qhstat tempr
    -#endif
    -
    -/*----------------------------------
    -
    -  zmax_( id, val ), wmax_( id, value )
    -    maximize id with val
    -*/
    -#define wwmax_(id, val) {MAYdebugx; maximize_(qhstat stats[id].r,(val));}
    -#if qh_KEEPstatistics
    -#define zmax_(id, val) {MAYdebugx; maximize_(qhstat stats[id].i,(val));}
    -#define wmax_(id, val) {MAYdebugx; maximize_(qhstat stats[id].r,(val));}
    -#else
    -#define zmax_(id, val) {}
    -#define wmax_(id, val) {}
    -#endif
    -
    -/*----------------------------------
    -
    -  zmin_( id, val ), wmin_( id, value )
    -    minimize id with val
    -*/
    -#if qh_KEEPstatistics
    -#define zmin_(id, val) {MAYdebugx; minimize_(qhstat stats[id].i,(val));}
    -#define wmin_(id, val) {MAYdebugx; minimize_(qhstat stats[id].r,(val));}
    -#else
    -#define zmin_(id, val) {}
    -#define wmin_(id, val) {}
    -#endif
    -
    -/*================== stat.h types ==============*/
    -
    -
    -/*----------------------------------
    -
    -  intrealT
    -    union of integer and real, used for statistics
    -*/
    -typedef union intrealT intrealT;    /* union of int and realT */
    -union intrealT {
    -    int i;
    -    realT r;
    -};
    -
    -/*----------------------------------
    -
    -  qhstat
    -    global data structure for statistics, similar to qh and qhrbox
    -
    -  notes:
    -   access to qh_qhstat is via the "qhstat" macro.  There are two choices
    -   qh_QHpointer = 1     access globals via a pointer
    -                        enables qh_saveqhull() and qh_restoreqhull()
    -                = 0     qh_qhstat is a static data structure
    -                        only one instance of qhull() can be active at a time
    -                        default value
    -   qh_QHpointer is defined in libqhull.h
    -
    -   allocated in stat.c using qh_malloc()
    -*/
    -#ifndef DEFqhstatT
    -#define DEFqhstatT 1
    -typedef struct qhstatT qhstatT;
    -#endif
    -
    -#if qh_QHpointer
    -#define qhstat qh_qhstat->
    -extern qhstatT *qh_qhstat;
    -#else
    -#define qhstat qh_qhstat.
    -extern qhstatT qh_qhstat;
    -#endif
    -struct qhstatT {
    -  intrealT   stats[ZEND];     /* integer and real statistics */
    -  unsigned   char id[ZEND+10]; /* id's in print order */
    -  const char *doc[ZEND];       /* array of documentation strings */
    -  short int  count[ZEND];     /* -1 if none, else index of count to use */
    -  char       type[ZEND];      /* type, see ztypes above */
    -  char       printed[ZEND];   /* true, if statistic has been printed */
    -  intrealT   init[ZTYPEend];  /* initial values by types, set initstatistics */
    -
    -  int        next;            /* next index for zdef_ */
    -  int        precision;       /* index for precision problems */
    -  int        vridges;         /* index for Voronoi ridges */
    -  int        tempi;
    -  realT      tempr;
    -};
    -
    -/*========== function prototypes ===========*/
    -
    -void    qh_allstatA(void);
    -void    qh_allstatB(void);
    -void    qh_allstatC(void);
    -void    qh_allstatD(void);
    -void    qh_allstatE(void);
    -void    qh_allstatE2(void);
    -void    qh_allstatF(void);
    -void    qh_allstatG(void);
    -void    qh_allstatH(void);
    -void    qh_allstatI(void);
    -void    qh_allstatistics(void);
    -void    qh_collectstatistics(void);
    -void    qh_freestatistics(void);
    -void    qh_initstatistics(void);
    -boolT   qh_newstats(int idx, int *nextindex);
    -boolT   qh_nostatistic(int i);
    -void    qh_printallstatistics(FILE *fp, const char *string);
    -void    qh_printstatistics(FILE *fp, const char *string);
    -void    qh_printstatlevel(FILE *fp, int id, int start);
    -void    qh_printstats(FILE *fp, int idx, int *nextindex);
    -realT   qh_stddev(int num, realT tot, realT tot2, realT *ave);
    -
    -#endif   /* qhDEFstat */
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/user.c b/scipy-0.10.1/scipy/spatial/qhull/src/user.c
    deleted file mode 100644
    index 4c2b17ed1f..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/user.c
    +++ /dev/null
    @@ -1,519 +0,0 @@
    -/*
      ---------------------------------
    -
    -   user.c
    -   user redefinable functions
    -
    -   see user2.c for qh_fprintf, qh_malloc, qh_free
    -
    -   see README.txt  see COPYING.txt for copyright information.
    -
    -   see libqhull.h for data structures, macros, and user-callable functions.
    -
    -   see user_eg.c, unix.c, and qhull_interface.cpp for examples.
    -
    -   see user.h for user-definable constants
    -
    -      use qh_NOmem in mem.h to turn off memory management
    -      use qh_NOmerge in user.h to turn off facet merging
    -      set qh_KEEPstatistics in user.h to 0 to turn off statistics
    -
    -   This is unsupported software.  You're welcome to make changes,
    -   but you're on your own if something goes wrong.  Use 'Tc' to
    -   check frequently.  Usually qhull will report an error if
    -   a data structure becomes inconsistent.  If so, it also reports
    -   the last point added to the hull, e.g., 102.  You can then trace
    -   the execution of qhull with "T4P102".
    -
    -   Please report any errors that you fix to qhull@qhull.org
    -
    -   call_qhull is a template for calling qhull from within your application
    -
    -   if you recompile and load this module, then user.o will not be loaded
    -   from qhull.a
    -
    -   you can add additional quick allocation sizes in qh_user_memsizes
    -
    -   if the other functions here are redefined to not use qh_print...,
    -   then io.o will not be loaded from qhull.a.  See user_eg.c for an
    -   example.  We recommend keeping io.o for the extra debugging
    -   information it supplies.
    -*/
    -
    -#include "qhull_a.h"
    -
    -#include 
    -
    -/*---------------------------------
    -
    -  qh_call_qhull( void )
    -    template for calling qhull from inside your program
    -    remove #if 0, #endif to compile
    -
    -  returns:
    -    exit code(see qh_ERR... in libqhull.h)
    -    all memory freed
    -
    -  notes:
    -    This can be called any number of times.
    -
    -  see:
    -    qh_call_qhull_once()
    -
    -*/
    -#if 0
    -{
    -  int dim;                  /* dimension of points */
    -  int numpoints;            /* number of points */
    -  coordT *points;           /* array of coordinates for each point */
    -  boolT ismalloc;           /* True if qhull should free points in qh_freeqhull() or reallocation */
    -  char flags[]= "qhull Tv"; /* option flags for qhull, see qh_opt.htm */
    -  FILE *outfile= stdout;    /* output from qh_produce_output()
    -                               use NULL to skip qh_produce_output() */
    -  FILE *errfile= stderr;    /* error messages from qhull code */
    -  int exitcode;             /* 0 if no error from qhull */
    -  facetT *facet;            /* set by FORALLfacets */
    -  int curlong, totlong;     /* memory remaining after qh_memfreeshort */
    -
    -  /* initialize dim, numpoints, points[], ismalloc here */
    -  exitcode= qh_new_qhull(dim, numpoints, points, ismalloc,
    -                      flags, outfile, errfile);
    -  if (!exitcode) {                  /* if no error */
    -    /* 'qh facet_list' contains the convex hull */
    -    FORALLfacets {
    -       /* ... your code ... */
    -    }
    -  }
    -  qh_freeqhull(!qh_ALL);
    -  qh_memfreeshort(&curlong, &totlong);
    -  if (curlong || totlong)
    -    qh_fprintf(errfile, 7068, "qhull internal warning (main): did not free %d bytes of long memory(%d pieces)\n", totlong, curlong);
    -}
    -#endif
    -
    -/*---------------------------------
    -
    -  qh_new_qhull( dim, numpoints, points, ismalloc, qhull_cmd, outfile, errfile )
    -    build new qhull data structure and return exitcode (0 if no errors)
    -
    -  notes:
    -    do not modify points until finished with results.
    -      The qhull data structure contains pointers into the points array.
    -    do not call qhull functions before qh_new_qhull().
    -      The qhull data structure is not initialized until qh_new_qhull().
    -
    -    outfile may be null
    -    qhull_cmd must start with "qhull "
    -    projects points to a new point array for Delaunay triangulations ('d' and 'v')
    -    transforms points into a new point array for halfspace intersection ('H')
    -
    -
    -  To allow multiple, concurrent calls to qhull()
    -    - set qh_QHpointer in user.h
    -    - use qh_save_qhull and qh_restore_qhull to swap the global data structure between calls.
    -    - use qh_freeqhull(qh_ALL) to free intermediate convex hulls
    -
    -  see:
    -    user_eg.c for an example
    -*/
    -int qh_new_qhull(int dim, int numpoints, coordT *points, boolT ismalloc,
    -                char *qhull_cmd, FILE *outfile, FILE *errfile) {
    -  int exitcode, hulldim;
    -  boolT new_ismalloc;
    -  static boolT firstcall = True;
    -  coordT *new_points;
    -
    -  if (firstcall) {
    -    qh_meminit(errfile);
    -    firstcall= False;
    -  }
    -  if (strncmp(qhull_cmd,"qhull ", (size_t)6)) {
    -    qh_fprintf(errfile, 6186, "qhull error (qh_new_qhull): start qhull_cmd argument with \"qhull \"\n");
    -    qh_exit(qh_ERRinput);
    -  }
    -  qh_initqhull_start(NULL, outfile, errfile);
    -  trace1((qh ferr, 1044, "qh_new_qhull: build new Qhull for %d %d-d points with %s\n", numpoints, dim, qhull_cmd));
    -  exitcode = setjmp(qh errexit);
    -  if (!exitcode)
    -  {
    -    qh NOerrexit = False;
    -    qh_initflags(qhull_cmd);
    -    if (qh DELAUNAY)
    -      qh PROJECTdelaunay= True;
    -    if (qh HALFspace) {
    -      /* points is an array of halfspaces,
    -         the last coordinate of each halfspace is its offset */
    -      hulldim= dim-1;
    -      qh_setfeasible(hulldim);
    -      new_points= qh_sethalfspace_all(dim, numpoints, points, qh feasible_point);
    -      new_ismalloc= True;
    -      if (ismalloc)
    -        qh_free(points);
    -    }else {
    -      hulldim= dim;
    -      new_points= points;
    -      new_ismalloc= ismalloc;
    -    }
    -    qh_init_B(new_points, numpoints, hulldim, new_ismalloc);
    -    qh_qhull();
    -    qh_check_output();
    -    if (outfile)
    -      qh_produce_output();
    -    if (qh VERIFYoutput && !qh STOPpoint && !qh STOPcone)
    -      qh_check_points();
    -  }
    -  qh NOerrexit = True;
    -  return exitcode;
    -} /* new_qhull */
    -
    -/*---------------------------------
    -
    -  qh_errexit( exitcode, facet, ridge )
    -    report and exit from an error
    -    report facet and ridge if non-NULL
    -    reports useful information such as last point processed
    -    set qh.FORCEoutput to print neighborhood of facet
    -
    -  see:
    -    qh_errexit2() in libqhull.c for printing 2 facets
    -
    -  design:
    -    check for error within error processing
    -    compute qh.hulltime
    -    print facet and ridge (if any)
    -    report commandString, options, qh.furthest_id
    -    print summary and statistics (including precision statistics)
    -    if qh_ERRsingular
    -      print help text for singular data set
    -    exit program via long jump (if defined) or exit()
    -*/
    -void qh_errexit(int exitcode, facetT *facet, ridgeT *ridge) {
    -
    -  if (qh ERREXITcalled) {
    -    qh_fprintf(qh ferr, 8126, "\nqhull error while processing previous error.  Exit program\n");
    -    qh_exit(qh_ERRqhull);
    -  }
    -  qh ERREXITcalled= True;
    -  if (!qh QHULLfinished)
    -    qh hulltime= qh_CPUclock - qh hulltime;
    -  qh_errprint("ERRONEOUS", facet, NULL, ridge, NULL);
    -  qh_fprintf(qh ferr, 8127, "\nWhile executing: %s | %s\n", qh rbox_command, qh qhull_command);
    -  qh_fprintf(qh ferr, 8128, "Options selected for Qhull %s:\n%s\n", qh_version, qh qhull_options);
    -  if (qh furthest_id >= 0) {
    -    qh_fprintf(qh ferr, 8129, "Last point added to hull was p%d.", qh furthest_id);
    -    if (zzval_(Ztotmerge))
    -      qh_fprintf(qh ferr, 8130, "  Last merge was #%d.", zzval_(Ztotmerge));
    -    if (qh QHULLfinished)
    -      qh_fprintf(qh ferr, 8131, "\nQhull has finished constructing the hull.");
    -    else if (qh POSTmerging)
    -      qh_fprintf(qh ferr, 8132, "\nQhull has started post-merging.");
    -    qh_fprintf(qh ferr, 8133, "\n");
    -  }
    -  if (qh FORCEoutput && (qh QHULLfinished || (!facet && !ridge)))
    -    qh_produce_output();
    -  else {
    -    if (exitcode != qh_ERRsingular && zzval_(Zsetplane) > qh hull_dim+1) {
    -      qh_fprintf(qh ferr, 8134, "\nAt error exit:\n");
    -      qh_printsummary(qh ferr);
    -      if (qh PRINTstatistics) {
    -        qh_collectstatistics();
    -        qh_printstatistics(qh ferr, "at error exit");
    -        qh_memstatistics(qh ferr);
    -      }
    -    }
    -    if (qh PRINTprecision)
    -      qh_printstats(qh ferr, qhstat precision, NULL);
    -  }
    -  if (!exitcode)
    -    exitcode= qh_ERRqhull;
    -  else if (exitcode == qh_ERRsingular)
    -    qh_printhelp_singular(qh ferr);
    -  else if (exitcode == qh_ERRprec && !qh PREmerge)
    -    qh_printhelp_degenerate(qh ferr);
    -  if (qh NOerrexit) {
    -    qh_fprintf(qh ferr, 6187, "qhull error while ending program.  Exit program\n");
    -    qh_exit(qh_ERRqhull);
    -  }
    -  qh ERREXITcalled= False;
    -  qh NOerrexit= True;
    -  longjmp(qh errexit, exitcode);
    -} /* errexit */
    -
    -
    -/*---------------------------------
    -
    -  qh_errprint( fp, string, atfacet, otherfacet, atridge, atvertex )
    -    prints out the information of facets and ridges to fp
    -    also prints neighbors and geomview output
    -
    -  notes:
    -    except for string, any parameter may be NULL
    -*/
    -void qh_errprint(const char *string, facetT *atfacet, facetT *otherfacet, ridgeT *atridge, vertexT *atvertex) {
    -  int i;
    -
    -  if (atfacet) {
    -    qh_fprintf(qh ferr, 8135, "%s FACET:\n", string);
    -    qh_printfacet(qh ferr, atfacet);
    -  }
    -  if (otherfacet) {
    -    qh_fprintf(qh ferr, 8136, "%s OTHER FACET:\n", string);
    -    qh_printfacet(qh ferr, otherfacet);
    -  }
    -  if (atridge) {
    -    qh_fprintf(qh ferr, 8137, "%s RIDGE:\n", string);
    -    qh_printridge(qh ferr, atridge);
    -    if (atridge->top && atridge->top != atfacet && atridge->top != otherfacet)
    -      qh_printfacet(qh ferr, atridge->top);
    -    if (atridge->bottom
    -        && atridge->bottom != atfacet && atridge->bottom != otherfacet)
    -      qh_printfacet(qh ferr, atridge->bottom);
    -    if (!atfacet)
    -      atfacet= atridge->top;
    -    if (!otherfacet)
    -      otherfacet= otherfacet_(atridge, atfacet);
    -  }
    -  if (atvertex) {
    -    qh_fprintf(qh ferr, 8138, "%s VERTEX:\n", string);
    -    qh_printvertex(qh ferr, atvertex);
    -  }
    -  if (qh fout && qh FORCEoutput && atfacet && !qh QHULLfinished && !qh IStracing) {
    -    qh_fprintf(qh ferr, 8139, "ERRONEOUS and NEIGHBORING FACETS to output\n");
    -    for (i=0; i < qh_PRINTEND; i++)  /* use fout for geomview output */
    -      qh_printneighborhood(qh fout, qh PRINTout[i], atfacet, otherfacet,
    -                            !qh_ALL);
    -  }
    -} /* errprint */
    -
    -
    -/*---------------------------------
    -
    -  qh_printfacetlist( fp, facetlist, facets, printall )
    -    print all fields for a facet list and/or set of facets to fp
    -    if !printall,
    -      only prints good facets
    -
    -  notes:
    -    also prints all vertices
    -*/
    -void qh_printfacetlist(facetT *facetlist, setT *facets, boolT printall) {
    -  facetT *facet, **facetp;
    -
    -  qh_printbegin(qh ferr, qh_PRINTfacets, facetlist, facets, printall);
    -  FORALLfacet_(facetlist)
    -    qh_printafacet(qh ferr, qh_PRINTfacets, facet, printall);
    -  FOREACHfacet_(facets)
    -    qh_printafacet(qh ferr, qh_PRINTfacets, facet, printall);
    -  qh_printend(qh ferr, qh_PRINTfacets, facetlist, facets, printall);
    -} /* printfacetlist */
    -
    -
    -/*---------------------------------
    -
    -  qh_printhelp_degenerate( fp )
    -    prints descriptive message for precision error
    -
    -  notes:
    -    no message if qh_QUICKhelp
    -*/
    -void qh_printhelp_degenerate(FILE *fp) {
    -
    -  if (qh MERGEexact || qh PREmerge || qh JOGGLEmax < REALmax/2)
    -    qh_fprintf(fp, 9368, "\n\
    -A Qhull error has occurred.  Qhull should have corrected the above\n\
    -precision error.  Please send the input and all of the output to\n\
    -qhull_bug@qhull.org\n");
    -  else if (!qh_QUICKhelp) {
    -    qh_fprintf(fp, 9369, "\n\
    -Precision problems were detected during construction of the convex hull.\n\
    -This occurs because convex hull algorithms assume that calculations are\n\
    -exact, but floating-point arithmetic has roundoff errors.\n\
    -\n\
    -To correct for precision problems, do not use 'Q0'.  By default, Qhull\n\
    -selects 'C-0' or 'Qx' and merges non-convex facets.  With option 'QJ',\n\
    -Qhull joggles the input to prevent precision problems.  See \"Imprecision\n\
    -in Qhull\" (qh-impre.htm).\n\
    -\n\
    -If you use 'Q0', the output may include\n\
    -coplanar ridges, concave ridges, and flipped facets.  In 4-d and higher,\n\
    -Qhull may produce a ridge with four neighbors or two facets with the same \n\
    -vertices.  Qhull reports these events when they occur.  It stops when a\n\
    -concave ridge, flipped facet, or duplicate facet occurs.\n");
    -#if REALfloat
    -    qh_fprintf(fp, 9370, "\
    -\n\
    -Qhull is currently using single precision arithmetic.  The following\n\
    -will probably remove the precision problems:\n\
    -  - recompile qhull for realT precision(#define REALfloat 0 in user.h).\n");
    -#endif
    -    if (qh DELAUNAY && !qh SCALElast && qh MAXabs_coord > 1e4)
    -      qh_fprintf(fp, 9371, "\
    -\n\
    -When computing the Delaunay triangulation of coordinates > 1.0,\n\
    -  - use 'Qbb' to scale the last coordinate to [0,m] (max previous coordinate)\n");
    -    if (qh DELAUNAY && !qh ATinfinity)
    -      qh_fprintf(fp, 9372, "\
    -When computing the Delaunay triangulation:\n\
    -  - use 'Qz' to add a point at-infinity.  This reduces precision problems.\n");
    -
    -    qh_fprintf(fp, 9373, "\
    -\n\
    -If you need triangular output:\n\
    -  - use option 'Qt' to triangulate the output\n\
    -  - use option 'QJ' to joggle the input points and remove precision errors\n\
    -  - use option 'Ft'.  It triangulates non-simplicial facets with added points.\n\
    -\n\
    -If you must use 'Q0',\n\
    -try one or more of the following options.  They can not guarantee an output.\n\
    -  - use 'QbB' to scale the input to a cube.\n\
    -  - use 'Po' to produce output and prevent partitioning for flipped facets\n\
    -  - use 'V0' to set min. distance to visible facet as 0 instead of roundoff\n\
    -  - use 'En' to specify a maximum roundoff error less than %2.2g.\n\
    -  - options 'Qf', 'Qbb', and 'QR0' may also help\n",
    -               qh DISTround);
    -    qh_fprintf(fp, 9374, "\
    -\n\
    -To guarantee simplicial output:\n\
    -  - use option 'Qt' to triangulate the output\n\
    -  - use option 'QJ' to joggle the input points and remove precision errors\n\
    -  - use option 'Ft' to triangulate the output by adding points\n\
    -  - use exact arithmetic (see \"Imprecision in Qhull\", qh-impre.htm)\n\
    -");
    -  }
    -} /* printhelp_degenerate */
    -
    -
    -/*---------------------------------
    -
    -  qh_printhelp_narrowhull( minangle )
    -    Warn about a narrow hull
    -
    -  notes:
    -    Alternatively, reduce qh_WARNnarrow in user.h
    -
    -*/
    -void qh_printhelp_narrowhull(FILE *fp, realT minangle) {
    -
    -    qh_fprintf(fp, 9375, "qhull precision warning: \n\
    -The initial hull is narrow (cosine of min. angle is %.16f).\n\
    -A coplanar point may lead to a wide facet.  Options 'QbB' (scale to unit box)\n\
    -or 'Qbb' (scale last coordinate) may remove this warning.  Use 'Pp' to skip\n\
    -this warning.  See 'Limitations' in qh-impre.htm.\n",
    -          -minangle);   /* convert from angle between normals to angle between facets */
    -} /* printhelp_narrowhull */
    -
    -/*---------------------------------
    -
    -  qh_printhelp_singular( fp )
    -    prints descriptive message for singular input
    -*/
    -void qh_printhelp_singular(FILE *fp) {
    -  facetT *facet;
    -  vertexT *vertex, **vertexp;
    -  realT min, max, *coord, dist;
    -  int i,k;
    -
    -  qh_fprintf(fp, 9376, "\n\
    -The input to qhull appears to be less than %d dimensional, or a\n\
    -computation has overflowed.\n\n\
    -Qhull could not construct a clearly convex simplex from points:\n",
    -           qh hull_dim);
    -  qh_printvertexlist(fp, "", qh facet_list, NULL, qh_ALL);
    -  if (!qh_QUICKhelp)
    -    qh_fprintf(fp, 9377, "\n\
    -The center point is coplanar with a facet, or a vertex is coplanar\n\
    -with a neighboring facet.  The maximum round off error for\n\
    -computing distances is %2.2g.  The center point, facets and distances\n\
    -to the center point are as follows:\n\n", qh DISTround);
    -  qh_printpointid(fp, "center point", qh hull_dim, qh interior_point, -1);
    -  qh_fprintf(fp, 9378, "\n");
    -  FORALLfacets {
    -    qh_fprintf(fp, 9379, "facet");
    -    FOREACHvertex_(facet->vertices)
    -      qh_fprintf(fp, 9380, " p%d", qh_pointid(vertex->point));
    -    zinc_(Zdistio);
    -    qh_distplane(qh interior_point, facet, &dist);
    -    qh_fprintf(fp, 9381, " distance= %4.2g\n", dist);
    -  }
    -  if (!qh_QUICKhelp) {
    -    if (qh HALFspace)
    -      qh_fprintf(fp, 9382, "\n\
    -These points are the dual of the given halfspaces.  They indicate that\n\
    -the intersection is degenerate.\n");
    -    qh_fprintf(fp, 9383,"\n\
    -These points either have a maximum or minimum x-coordinate, or\n\
    -they maximize the determinant for k coordinates.  Trial points\n\
    -are first selected from points that maximize a coordinate.\n");
    -    if (qh hull_dim >= qh_INITIALmax)
    -      qh_fprintf(fp, 9384, "\n\
    -Because of the high dimension, the min x-coordinate and max-coordinate\n\
    -points are used if the determinant is non-zero.  Option 'Qs' will\n\
    -do a better, though much slower, job.  Instead of 'Qs', you can change\n\
    -the points by randomly rotating the input with 'QR0'.\n");
    -  }
    -  qh_fprintf(fp, 9385, "\nThe min and max coordinates for each dimension are:\n");
    -  for (k=0; k < qh hull_dim; k++) {
    -    min= REALmax;
    -    max= -REALmin;
    -    for (i=qh num_points, coord= qh first_point+k; i--; coord += qh hull_dim) {
    -      maximize_(max, *coord);
    -      minimize_(min, *coord);
    -    }
    -    qh_fprintf(fp, 9386, "  %d:  %8.4g  %8.4g  difference= %4.4g\n", k, min, max, max-min);
    -  }
    -  if (!qh_QUICKhelp) {
    -    qh_fprintf(fp, 9387, "\n\
    -If the input should be full dimensional, you have several options that\n\
    -may determine an initial simplex:\n\
    -  - use 'QJ'  to joggle the input and make it full dimensional\n\
    -  - use 'QbB' to scale the points to the unit cube\n\
    -  - use 'QR0' to randomly rotate the input for different maximum points\n\
    -  - use 'Qs'  to search all points for the initial simplex\n\
    -  - use 'En'  to specify a maximum roundoff error less than %2.2g.\n\
    -  - trace execution with 'T3' to see the determinant for each point.\n",
    -                     qh DISTround);
    -#if REALfloat
    -    qh_fprintf(fp, 9388, "\
    -  - recompile qhull for realT precision(#define REALfloat 0 in libqhull.h).\n");
    -#endif
    -    qh_fprintf(fp, 9389, "\n\
    -If the input is lower dimensional:\n\
    -  - use 'QJ' to joggle the input and make it full dimensional\n\
    -  - use 'Qbk:0Bk:0' to delete coordinate k from the input.  You should\n\
    -    pick the coordinate with the least range.  The hull will have the\n\
    -    correct topology.\n\
    -  - determine the flat containing the points, rotate the points\n\
    -    into a coordinate plane, and delete the other coordinates.\n\
    -  - add one or more points to make the input full dimensional.\n\
    -");
    -    if (qh DELAUNAY && !qh ATinfinity)
    -      qh_fprintf(fp, 9390, "\n\n\
    -This is a Delaunay triangulation and the input is co-circular or co-spherical:\n\
    -  - use 'Qz' to add a point \"at infinity\" (i.e., above the paraboloid)\n\
    -  - or use 'QJ' to joggle the input and avoid co-circular data\n");
    -  }
    -} /* printhelp_singular */
    -
    -/*---------------------------------
    -
    -  qh_user_memsizes()
    -    allocate up to 10 additional, quick allocation sizes
    -
    -  notes:
    -    increase maximum number of allocations in qh_initqhull_mem()
    -*/
    -void qh_user_memsizes(void) {
    -
    -  /* qh_memsize(size); */
    -} /* user_memsizes */
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/user.h b/scipy-0.10.1/scipy/spatial/qhull/src/user.h
    deleted file mode 100644
    index 52608c3cd8..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/user.h
    +++ /dev/null
    @@ -1,848 +0,0 @@
    -/*
      ---------------------------------
    -
    -   user.h
    -   user redefinable constants
    -
    -   see qh-user.htm.  see COPYING for copyright information.
    -
    -   before reading any code, review libqhull.h for data structure definitions and
    -   the "qh" macro.
    -
    -Sections:
    -   ============= qhull library constants ======================
    -   ============= data types and configuration macros ==========
    -   ============= performance related constants ================
    -   ============= memory constants =============================
    -   ============= joggle constants =============================
    -   ============= conditional compilation ======================
    -   ============= -merge constants- ============================
    -
    -Code flags --
    -  NOerrors -- the code does not call qh_errexit()
    -  WARN64 -- the code may be incompatible with 64-bit pointers
    -
    -*/
    -
    -#include 
    -
    -#ifndef qhDEFuser
    -#define qhDEFuser 1
    -
    -/*============================================================*/
    -/*============= qhull library constants ======================*/
    -/*============================================================*/
    -
    -/*----------------------------------
    -
    -  FILENAMElen -- max length for TI and TO filenames
    -
    -*/
    -
    -#define qh_FILENAMElen 500
    -
    -/*----------------------------------
    -
    -  msgcode -- Unique message codes for qh_fprintf
    -
    -  If add new messages, assign these values and increment.
    -
    -  def counters =  [27, 1047, 2059, 3025, 4068, 5003, 
    -     6233, 7079, 8143, 9410, 10026]
    -
    -  See: qh_ERR* [libqhull.h]
    -*/
    -
    -#define MSG_TRACE0 0
    -#define MSG_TRACE1 1000
    -#define MSG_TRACE2 2000
    -#define MSG_TRACE3 3000
    -#define MSG_TRACE4 4000
    -#define MSG_TRACE5 5000
    -#define MSG_ERROR  6000   /* errors written to qh.ferr */
    -#define MSG_WARNING 7000
    -#define MSG_STDERR  8000  /* log messages Written to qh.ferr */
    -#define MSG_OUTPUT  9000
    -#define MSG_FIXUP  10000
    -#define MSG_MAXLEN  3000 /* qh_printhelp_degenerate() in user.c */
    -
    -
    -/*----------------------------------
    -
    -  qh_OPTIONline -- max length of an option line 'FO'
    -*/
    -#define qh_OPTIONline 80
    -
    -/*============================================================*/
    -/*============= data types and configuration macros ==========*/
    -/*============================================================*/
    -
    -/*----------------------------------
    -
    -  realT
    -    set the size of floating point numbers
    -
    -  qh_REALdigits
    -    maximimum number of significant digits
    -
    -  qh_REAL_1, qh_REAL_2n, qh_REAL_3n
    -    format strings for printf
    -
    -  qh_REALmax, qh_REALmin
    -    maximum and minimum (near zero) values
    -
    -  qh_REALepsilon
    -    machine roundoff.  Maximum roundoff error for addition and multiplication.
    -
    -  notes:
    -   Select whether to store floating point numbers in single precision (float)
    -   or double precision (double).
    -
    -   Use 'float' to save about 8% in time and 25% in space.  This is particularly
    -   helpful if high-d where convex hulls are space limited.  Using 'float' also
    -   reduces the printed size of Qhull's output since numbers have 8 digits of
    -   precision.
    -
    -   Use 'double' when greater arithmetic precision is needed.  This is needed
    -   for Delaunay triangulations and Voronoi diagrams when you are not merging
    -   facets.
    -
    -   If 'double' gives insufficient precision, your data probably includes
    -   degeneracies.  If so you should use facet merging (done by default)
    -   or exact arithmetic (see imprecision section of manual, qh-impre.htm).
    -   You may also use option 'Po' to force output despite precision errors.
    -
    -   You may use 'long double', but many format statements need to be changed
    -   and you may need a 'long double' square root routine.  S. Grundmann
    -   (sg@eeiwzb.et.tu-dresden.de) has done this.  He reports that the code runs
    -   much slower with little gain in precision.
    -
    -   WARNING: on some machines,    int f(){realT a= REALmax;return (a == REALmax);}
    -      returns False.  Use (a > REALmax/2) instead of (a == REALmax).
    -
    -   REALfloat =   1      all numbers are 'float' type
    -             =   0      all numbers are 'double' type
    -*/
    -#define REALfloat 0
    -
    -#if (REALfloat == 1)
    -#define realT float
    -#define REALmax FLT_MAX
    -#define REALmin FLT_MIN
    -#define REALepsilon FLT_EPSILON
    -#define qh_REALdigits 8   /* maximum number of significant digits */
    -#define qh_REAL_1 "%6.8g "
    -#define qh_REAL_2n "%6.8g %6.8g\n"
    -#define qh_REAL_3n "%6.8g %6.8g %6.8g\n"
    -
    -#elif (REALfloat == 0)
    -#define realT double
    -#define REALmax DBL_MAX
    -#define REALmin DBL_MIN
    -#define REALepsilon DBL_EPSILON
    -#define qh_REALdigits 16    /* maximum number of significant digits */
    -#define qh_REAL_1 "%6.16g "
    -#define qh_REAL_2n "%6.16g %6.16g\n"
    -#define qh_REAL_3n "%6.16g %6.16g %6.16g\n"
    -
    -#else
    -#error unknown float option
    -#endif
    -
    -/*----------------------------------
    -
    -  qh_CPUclock
    -    define the clock() function for reporting the total time spent by Qhull
    -    returns CPU ticks as a 'long int'
    -    qh_CPUclock is only used for reporting the total time spent by Qhull
    -
    -  qh_SECticks
    -    the number of clock ticks per second
    -
    -  notes:
    -    looks for CLOCKS_PER_SEC, CLOCKS_PER_SECOND, or assumes microseconds
    -    to define a custom clock, set qh_CLOCKtype to 0
    -
    -    if your system does not use clock() to return CPU ticks, replace
    -    qh_CPUclock with the corresponding function.  It is converted
    -    to 'unsigned long' to prevent wrap-around during long runs.  By default,
    -     defines clock_t as 'long'
    -
    -   Set qh_CLOCKtype to
    -
    -     1          for CLOCKS_PER_SEC, CLOCKS_PER_SECOND, or microsecond
    -                Note:  may fail if more than 1 hour elapsed time
    -
    -     2          use qh_clock() with POSIX times() (see global.c)
    -*/
    -#define qh_CLOCKtype 1  /* change to the desired number */
    -
    -#if (qh_CLOCKtype == 1)
    -
    -#if defined(CLOCKS_PER_SECOND)
    -#define qh_CPUclock    ((unsigned long)clock())  /* return CPU clock */
    -#define qh_SECticks CLOCKS_PER_SECOND
    -
    -#elif defined(CLOCKS_PER_SEC)
    -#define qh_CPUclock    ((unsigned long)clock())  /* return CPU clock */
    -#define qh_SECticks CLOCKS_PER_SEC
    -
    -#elif defined(CLK_TCK)
    -#define qh_CPUclock    ((unsigned long)clock())  /* return CPU clock */
    -#define qh_SECticks CLK_TCK
    -
    -#else
    -#define qh_CPUclock    ((unsigned long)clock())  /* return CPU clock */
    -#define qh_SECticks 1E6
    -#endif
    -
    -#elif (qh_CLOCKtype == 2)
    -#define qh_CPUclock    qh_clock()  /* return CPU clock */
    -#define qh_SECticks 100
    -
    -#else /* qh_CLOCKtype == ? */
    -#error unknown clock option
    -#endif
    -
    -/*----------------------------------
    -
    -  qh_RANDOMtype, qh_RANDOMmax, qh_RANDOMseed
    -    define random number generator
    -
    -    qh_RANDOMint generates a random integer between 0 and qh_RANDOMmax.
    -    qh_RANDOMseed sets the random number seed for qh_RANDOMint
    -
    -  Set qh_RANDOMtype (default 5) to:
    -    1       for random() with 31 bits (UCB)
    -    2       for rand() with RAND_MAX or 15 bits (system 5)
    -    3       for rand() with 31 bits (Sun)
    -    4       for lrand48() with 31 bits (Solaris)
    -    5       for qh_rand() with 31 bits (included with Qhull)
    -
    -  notes:
    -    Random numbers are used by rbox to generate point sets.  Random
    -    numbers are used by Qhull to rotate the input ('QRn' option),
    -    simulate a randomized algorithm ('Qr' option), and to simulate
    -    roundoff errors ('Rn' option).
    -
    -    Random number generators differ between systems.  Most systems provide
    -    rand() but the period varies.  The period of rand() is not critical
    -    since qhull does not normally use random numbers.
    -
    -    The default generator is Park & Miller's minimal standard random
    -    number generator [CACM 31:1195 '88].  It is included with Qhull.
    -
    -    If qh_RANDOMmax is wrong, qhull will report a warning and Geomview
    -    output will likely be invisible.
    -*/
    -#define qh_RANDOMtype 5   /* *** change to the desired number *** */
    -
    -#if (qh_RANDOMtype == 1)
    -#define qh_RANDOMmax ((realT)0x7fffffffUL)  /* 31 bits, random()/MAX */
    -#define qh_RANDOMint random()
    -#define qh_RANDOMseed_(seed) srandom(seed);
    -
    -#elif (qh_RANDOMtype == 2)
    -#ifdef RAND_MAX
    -#define qh_RANDOMmax ((realT)RAND_MAX)
    -#else
    -#define qh_RANDOMmax ((realT)32767)   /* 15 bits (System 5) */
    -#endif
    -#define qh_RANDOMint  rand()
    -#define qh_RANDOMseed_(seed) srand((unsigned)seed);
    -
    -#elif (qh_RANDOMtype == 3)
    -#define qh_RANDOMmax ((realT)0x7fffffffUL)  /* 31 bits, Sun */
    -#define qh_RANDOMint  rand()
    -#define qh_RANDOMseed_(seed) srand((unsigned)seed);
    -
    -#elif (qh_RANDOMtype == 4)
    -#define qh_RANDOMmax ((realT)0x7fffffffUL)  /* 31 bits, lrand38()/MAX */
    -#define qh_RANDOMint lrand48()
    -#define qh_RANDOMseed_(seed) srand48(seed);
    -
    -#elif (qh_RANDOMtype == 5)
    -#define qh_RANDOMmax ((realT)2147483646UL)  /* 31 bits, qh_rand/MAX */
    -#define qh_RANDOMint qh_rand()
    -#define qh_RANDOMseed_(seed) qh_srand(seed);
    -/* unlike rand(), never returns 0 */
    -
    -#else
    -#error: unknown random option
    -#endif
    -
    -/*----------------------------------
    -
    -  qh_ORIENTclock
    -    0 for inward pointing normals by Geomview convention
    -*/
    -#define qh_ORIENTclock 0
    -
    -
    -/*============================================================*/
    -/*============= joggle constants =============================*/
    -/*============================================================*/
    -
    -/*----------------------------------
    -
    -qh_JOGGLEdefault
    -default qh.JOGGLEmax is qh.DISTround * qh_JOGGLEdefault
    -
    -notes:
    -rbox s r 100 | qhull QJ1e-15 QR0 generates 90% faults at distround 7e-16
    -rbox s r 100 | qhull QJ1e-14 QR0 generates 70% faults
    -rbox s r 100 | qhull QJ1e-13 QR0 generates 35% faults
    -rbox s r 100 | qhull QJ1e-12 QR0 generates 8% faults
    -rbox s r 100 | qhull QJ1e-11 QR0 generates 1% faults
    -rbox s r 100 | qhull QJ1e-10 QR0 generates 0% faults
    -rbox 1000 W0 | qhull QJ1e-12 QR0 generates 86% faults
    -rbox 1000 W0 | qhull QJ1e-11 QR0 generates 20% faults
    -rbox 1000 W0 | qhull QJ1e-10 QR0 generates 2% faults
    -the later have about 20 points per facet, each of which may interfere
    -
    -pick a value large enough to avoid retries on most inputs
    -*/
    -#define qh_JOGGLEdefault 30000.0
    -
    -/*----------------------------------
    -
    -qh_JOGGLEincrease
    -factor to increase qh.JOGGLEmax on qh_JOGGLEretry or qh_JOGGLEagain
    -*/
    -#define qh_JOGGLEincrease 10.0
    -
    -/*----------------------------------
    -
    -qh_JOGGLEretry
    -if ZZretry = qh_JOGGLEretry, increase qh.JOGGLEmax
    -
    -notes:
    -try twice at the original value in case of bad luck the first time
    -*/
    -#define qh_JOGGLEretry 2
    -
    -/*----------------------------------
    -
    -qh_JOGGLEagain
    -every following qh_JOGGLEagain, increase qh.JOGGLEmax
    -
    -notes:
    -1 is OK since it's already failed qh_JOGGLEretry times
    -*/
    -#define qh_JOGGLEagain 1
    -
    -/*----------------------------------
    -
    -qh_JOGGLEmaxincrease
    -maximum qh.JOGGLEmax due to qh_JOGGLEincrease
    -relative to qh.MAXwidth
    -
    -notes:
    -qh.joggleinput will retry at this value until qh_JOGGLEmaxretry
    -*/
    -#define qh_JOGGLEmaxincrease 1e-2
    -
    -/*----------------------------------
    -
    -qh_JOGGLEmaxretry
    -stop after qh_JOGGLEmaxretry attempts
    -*/
    -#define qh_JOGGLEmaxretry 100
    -
    -/*============================================================*/
    -/*============= performance related constants ================*/
    -/*============================================================*/
    -
    -/*----------------------------------
    -
    -  qh_HASHfactor
    -    total hash slots / used hash slots.  Must be at least 1.1.
    -
    -  notes:
    -    =2 for at worst 50% occupancy for qh hash_table and normally 25% occupancy
    -*/
    -#define qh_HASHfactor 2
    -
    -/*----------------------------------
    -
    -  qh_VERIFYdirect
    -    with 'Tv' verify all points against all facets if op count is smaller
    -
    -  notes:
    -    if greater, calls qh_check_bestdist() instead
    -*/
    -#define qh_VERIFYdirect 1000000
    -
    -/*----------------------------------
    -
    -  qh_INITIALsearch
    -     if qh_INITIALmax, search points up to this dimension
    -*/
    -#define qh_INITIALsearch 6
    -
    -/*----------------------------------
    -
    -  qh_INITIALmax
    -    if dim >= qh_INITIALmax, use min/max coordinate points for initial simplex
    -
    -  notes:
    -    from points with non-zero determinants
    -    use option 'Qs' to override (much slower)
    -*/
    -#define qh_INITIALmax 8
    -
    -/*============================================================*/
    -/*============= memory constants =============================*/
    -/*============================================================*/
    -
    -/*----------------------------------
    -
    -  qh_MEMalign
    -    memory alignment for qh_meminitbuffers() in global.c
    -
    -  notes:
    -    to avoid bus errors, memory allocation must consider alignment requirements.
    -    malloc() automatically takes care of alignment.   Since mem.c manages
    -    its own memory, we need to explicitly specify alignment in
    -    qh_meminitbuffers().
    -
    -    A safe choice is sizeof(double).  sizeof(float) may be used if doubles
    -    do not occur in data structures and pointers are the same size.  Be careful
    -    of machines (e.g., DEC Alpha) with large pointers.
    -
    -    If using gcc, best alignment is
    -              #define qh_MEMalign fmax_(__alignof__(realT),__alignof__(void *))
    -*/
    -#define qh_MEMalign ((int)(fmax_(sizeof(realT), sizeof(void *))))
    -
    -/*----------------------------------
    -
    -  qh_MEMbufsize
    -    size of additional memory buffers
    -
    -  notes:
    -    used for qh_meminitbuffers() in global.c
    -*/
    -#define qh_MEMbufsize 0x10000       /* allocate 64K memory buffers */
    -
    -/*----------------------------------
    -
    -  qh_MEMinitbuf
    -    size of initial memory buffer
    -
    -  notes:
    -    use for qh_meminitbuffers() in global.c
    -*/
    -#define qh_MEMinitbuf 0x20000      /* initially allocate 128K buffer */
    -
    -/*----------------------------------
    -
    -  qh_INFINITE
    -    on output, indicates Voronoi center at infinity
    -*/
    -#define qh_INFINITE  -10.101
    -
    -/*----------------------------------
    -
    -  qh_DEFAULTbox
    -    default box size (Geomview expects 0.5)
    -
    -  qh_DEFAULTbox
    -    default box size for integer coorindate (rbox only)
    -*/
    -#define qh_DEFAULTbox 0.5
    -#define qh_DEFAULTzbox 1e6
    -
    -/*============================================================*/
    -/*============= conditional compilation ======================*/
    -/*============================================================*/
    -
    -/*----------------------------------
    -
    -  __cplusplus
    -    defined by C++ compilers
    -
    -  __MSC_VER
    -    defined by Microsoft Visual C++
    -
    -  __MWERKS__ && __POWERPC__
    -    defined by Metrowerks when compiling for the Power Macintosh
    -
    -  __STDC__
    -    defined for strict ANSI C
    -*/
    -
    -/*----------------------------------
    -
    -  qh_COMPUTEfurthest
    -    compute furthest distance to an outside point instead of storing it with the facet
    -    =1 to compute furthest
    -
    -  notes:
    -    computing furthest saves memory but costs time
    -      about 40% more distance tests for partitioning
    -      removes facet->furthestdist
    -*/
    -#define qh_COMPUTEfurthest 0
    -
    -/*----------------------------------
    -
    -  qh_KEEPstatistics
    -    =0 removes most of statistic gathering and reporting
    -
    -  notes:
    -    if 0, code size is reduced by about 4%.
    -*/
    -#define qh_KEEPstatistics 1
    -
    -/*----------------------------------
    -
    -  qh_MAXoutside
    -    record outer plane for each facet
    -    =1 to record facet->maxoutside
    -
    -  notes:
    -    this takes a realT per facet and slightly slows down qhull
    -    it produces better outer planes for geomview output
    -*/
    -#define qh_MAXoutside 1
    -
    -/*----------------------------------
    -
    -  qh_NOmerge
    -    disables facet merging if defined
    -
    -  notes:
    -    This saves about 10% space.
    -
    -    Unless 'Q0'
    -      qh_NOmerge sets 'QJ' to avoid precision errors
    -
    -    #define qh_NOmerge
    -
    -  see:
    -    qh_NOmem in mem.c
    -
    -    see user.c/user_eg.c for removing io.o
    -*/
    -
    -/*----------------------------------
    -
    -  qh_NOtrace
    -    no tracing if defined
    -
    -  notes:
    -    This saves about 5% space.
    -
    -    #define qh_NOtrace
    -*/
    -
    -/*----------------------------------
    -
    -  qh_QHpointer
    -    access global data with pointer or static structure
    -
    -  qh_QHpointer  = 1     access globals via a pointer to allocated memory
    -                        enables qh_saveqhull() and qh_restoreqhull()
    -                        [2010, gcc] costs about 4% in time and 4% in space
    -                        [2003, msvc] costs about 8% in time and 2% in space
    -
    -                = 0     qh_qh and qh_qhstat are static data structures
    -                        only one instance of qhull() can be active at a time
    -                        default value
    -
    -  notes:
    -    all global variables for qhull are in qh, qhmem, and qhstat
    -    qh is defined in libqhull.h
    -    qhmem is defined in mem.h
    -    qhstat is defined in stat.h
    -    C++ build defines qh_QHpointer [libqhullp.pro, libqhullcpp.pro]
    -
    -  see:
    -    user_eg.c for an example
    -  FIXUP need to override for C++ (-Dqh_QHpointer=1)
    -*/
    -#ifndef qh_QHpointer
    -#define qh_QHpointer 0
    -#endif
    -#if 0  /* sample code */
    -    qhT *oldqhA, *oldqhB;
    -
    -    exitcode= qh_new_qhull(dim, numpoints, points, ismalloc,
    -                      flags, outfile, errfile);
    -    /* use results from first call to qh_new_qhull */
    -    oldqhA= qh_save_qhull();
    -    exitcode= qh_new_qhull(dimB, numpointsB, pointsB, ismalloc,
    -                      flags, outfile, errfile);
    -    /* use results from second call to qh_new_qhull */
    -    oldqhB= qh_save_qhull();
    -    qh_restore_qhull(&oldqhA);
    -    /* use results from first call to qh_new_qhull */
    -    qh_freeqhull(qh_ALL);  /* frees all memory used by first call */
    -    qh_restore_qhull(&oldqhB);
    -    /* use results from second call to qh_new_qhull */
    -    qh_freeqhull(!qh_ALL); /* frees long memory used by second call */
    -    qh_memfreeshort(&curlong, &totlong);  /* frees short memory and memory allocator */
    -#endif
    -
    -/*----------------------------------
    -
    -  qh_QUICKhelp
    -    =1 to use abbreviated help messages, e.g., for degenerate inputs
    -*/
    -#define qh_QUICKhelp    0
    -
    -/*============================================================*/
    -/*============= -merge constants- ============================*/
    -/*============================================================*/
    -/*
    -   These constants effect facet merging.  You probably will not need
    -   to modify them.  They effect the performance of facet merging.
    -*/
    -
    -/*----------------------------------
    -
    -  qh_DIMmergeVertex
    -    max dimension for vertex merging (it is not effective in high-d)
    -*/
    -#define qh_DIMmergeVertex 6
    -
    -/*----------------------------------
    -
    -  qh_DIMreduceBuild
    -     max dimension for vertex reduction during build (slow in high-d)
    -*/
    -#define qh_DIMreduceBuild 5
    -
    -/*----------------------------------
    -
    -  qh_BESTcentrum
    -     if > 2*dim+n vertices, qh_findbestneighbor() tests centrums (faster)
    -     else, qh_findbestneighbor() tests all vertices (much better merges)
    -
    -  qh_BESTcentrum2
    -     if qh_BESTcentrum2 * DIM3 + BESTcentrum < #vertices tests centrums
    -*/
    -#define qh_BESTcentrum 20
    -#define qh_BESTcentrum2 2
    -
    -/*----------------------------------
    -
    -  qh_BESTnonconvex
    -    if > dim+n neighbors, qh_findbestneighbor() tests nonconvex ridges.
    -
    -  notes:
    -    It is needed because qh_findbestneighbor is slow for large facets
    -*/
    -#define qh_BESTnonconvex 15
    -
    -/*----------------------------------
    -
    -  qh_MAXnewmerges
    -    if >n newmerges, qh_merge_nonconvex() calls qh_reducevertices_centrums.
    -
    -  notes:
    -    It is needed because postmerge can merge many facets at once
    -*/
    -#define qh_MAXnewmerges 2
    -
    -/*----------------------------------
    -
    -  qh_MAXnewcentrum
    -    if <= dim+n vertices (n approximates the number of merges),
    -      reset the centrum in qh_updatetested() and qh_mergecycle_facets()
    -
    -  notes:
    -    needed to reduce cost and because centrums may move too much if
    -    many vertices in high-d
    -*/
    -#define qh_MAXnewcentrum 5
    -
    -/*----------------------------------
    -
    -  qh_COPLANARratio
    -    for 3-d+ merging, qh.MINvisible is n*premerge_centrum
    -
    -  notes:
    -    for non-merging, it's DISTround
    -*/
    -#define qh_COPLANARratio 3
    -
    -/*----------------------------------
    -
    -  qh_DISToutside
    -    When is a point clearly outside of a facet?
    -    Stops search in qh_findbestnew or qh_partitionall
    -    qh_findbest uses qh.MINoutside since since it is only called if no merges.
    -
    -  notes:
    -    'Qf' always searches for best facet
    -    if !qh.MERGING, same as qh.MINoutside.
    -    if qh_USEfindbestnew, increase value since neighboring facets may be ill-behaved
    -      [Note: Zdelvertextot occurs normally with interior points]
    -            RBOX 1000 s Z1 G1e-13 t1001188774 | QHULL Tv
    -    When there is a sharp edge, need to move points to a
    -    clearly good facet; otherwise may be lost in another partitioning.
    -    if too big then O(n^2) behavior for partitioning in cone
    -    if very small then important points not processed
    -    Needed in qh_partitionall for
    -      RBOX 1000 s Z1 G1e-13 t1001032651 | QHULL Tv
    -    Needed in qh_findbestnew for many instances of
    -      RBOX 1000 s Z1 G1e-13 t | QHULL Tv
    -
    -  See:
    -    qh_DISToutside -- when is a point clearly outside of a facet
    -    qh_SEARCHdist -- when is facet coplanar with the best facet?
    -    qh_USEfindbestnew -- when to use qh_findbestnew for qh_partitionpoint()
    -*/
    -#define qh_DISToutside ((qh_USEfindbestnew ? 2 : 1) * \
    -     fmax_((qh MERGING ? 2 : 1)*qh MINoutside, qh max_outside))
    -
    -/*----------------------------------
    -
    -  qh_RATIOnearinside
    -    ratio of qh.NEARinside to qh.ONEmerge for retaining inside points for
    -    qh_check_maxout().
    -
    -  notes:
    -    This is overkill since do not know the correct value.
    -    It effects whether 'Qc' reports all coplanar points
    -    Not used for 'd' since non-extreme points are coplanar
    -*/
    -#define qh_RATIOnearinside 5
    -
    -/*----------------------------------
    -
    -  qh_SEARCHdist
    -    When is a facet coplanar with the best facet?
    -    qh_findbesthorizon: all coplanar facets of the best facet need to be searched.
    -
    -  See:
    -    qh_DISToutside -- when is a point clearly outside of a facet
    -    qh_SEARCHdist -- when is facet coplanar with the best facet?
    -    qh_USEfindbestnew -- when to use qh_findbestnew for qh_partitionpoint()
    -*/
    -#define qh_SEARCHdist ((qh_USEfindbestnew ? 2 : 1) * \
    -      (qh max_outside + 2 * qh DISTround + fmax_( qh MINvisible, qh MAXcoplanar)));
    -
    -/*----------------------------------
    -
    -  qh_USEfindbestnew
    -     Always use qh_findbestnew for qh_partitionpoint, otherwise use
    -     qh_findbestnew if merged new facet or sharpnewfacets.
    -
    -  See:
    -    qh_DISToutside -- when is a point clearly outside of a facet
    -    qh_SEARCHdist -- when is facet coplanar with the best facet?
    -    qh_USEfindbestnew -- when to use qh_findbestnew for qh_partitionpoint()
    -*/
    -#define qh_USEfindbestnew (zzval_(Ztotmerge) > 50)
    -
    -/*----------------------------------
    -
    -  qh_WIDEcoplanar
    -    n*MAXcoplanar or n*MINvisible for a WIDEfacet
    -
    -    if vertex is further than qh.WIDEfacet from the hyperplane
    -    then its ridges are not counted in computing the area, and
    -    the facet's centrum is frozen.
    -
    -  notes:
    -   qh.WIDEfacet= max(qh.MAXoutside,qh_WIDEcoplanar*qh.MAXcoplanar,
    -      qh_WIDEcoplanar * qh.MINvisible);
    -*/
    -#define qh_WIDEcoplanar 6
    -
    -/*----------------------------------
    -
    -  qh_MAXnarrow
    -    max. cosine in initial hull that sets qh.NARROWhull
    -
    -  notes:
    -    If qh.NARROWhull, the initial partition does not make
    -    coplanar points.  If narrow, a coplanar point can be
    -    coplanar to two facets of opposite orientations and
    -    distant from the exact convex hull.
    -
    -    Conservative estimate.  Don't actually see problems until it is -1.0
    -*/
    -#define qh_MAXnarrow -0.99999999
    -
    -/*----------------------------------
    -
    -  qh_WARNnarrow
    -    max. cosine in initial hull to warn about qh.NARROWhull
    -
    -  notes:
    -    this is a conservative estimate.
    -    Don't actually see problems until it is -1.0.  See qh-impre.htm
    -*/
    -#define qh_WARNnarrow -0.999999999999999
    -
    -/*----------------------------------
    -
    -  qh_ZEROdelaunay
    -    a zero Delaunay facet occurs for input sites coplanar with their convex hull
    -    the last normal coefficient of a zero Delaunay facet is within
    -        qh_ZEROdelaunay * qh.ANGLEround of 0
    -
    -  notes:
    -    qh_ZEROdelaunay does not allow for joggled input ('QJ').
    -
    -    You can avoid zero Delaunay facets by surrounding the input with a box.
    -
    -    Use option 'PDk:-n' to explicitly define zero Delaunay facets
    -      k= dimension of input sites (e.g., 3 for 3-d Delaunay triangulation)
    -      n= the cutoff for zero Delaunay facets (e.g., 'PD3:-1e-12')
    -*/
    -#define qh_ZEROdelaunay 2
    -
    -#endif /* qh_DEFuser */
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/usermem.c b/scipy-0.10.1/scipy/spatial/qhull/src/usermem.c
    deleted file mode 100644
    index 5a8ac480be..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/usermem.c
    +++ /dev/null
    @@ -1,64 +0,0 @@
    -/*
      ---------------------------------
    -
    -   usermem.c
    -   qh_exit(), qh_free(), and qh_malloc()
    -
    -   see README.txt  see COPYING.txt for copyright information.
    -
    -   If you redefine one of these functions you must redefine all of them.
    -   If you recompile and load this file, then usermem.o will not be loaded
    -   from qhull.a or qhull.lib
    -
    -   See libqhull.h for data structures, macros, and user-callable functions.
    -   See user.c for qhull-related, redefinable functions
    -   see user.h for user-definable constants
    -   See userprintf.c for qh_fprintf and qh_fprintf_rbox
    -
    -   Please report any errors that you fix to qhull@qhull.org
    -*/
    -
    -#include "libqhull.h"
    -
    -#include 
    -
    -/*---------------------------------
    -
    -  qh_exit( exitcode )
    -    exit program
    -
    -  notes:
    -    same as exit()
    -*/
    -void qh_exit(int exitcode) {
    -    exit(exitcode);
    -} /* exit */
    -
    -/*---------------------------------
    -
    -qh_free( mem )
    -free memory
    -
    -notes:
    -same as free()
    -*/
    -void qh_free(void *mem) {
    -    free(mem);
    -} /* free */
    -
    -/*---------------------------------
    -
    -    qh_malloc( mem )
    -      allocate memory
    -
    -    notes:
    -      same as malloc()
    -*/
    -void *qh_malloc(size_t size) {
    -    return malloc(size);
    -} /* malloc */
    -
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull/src/userprintf.c b/scipy-0.10.1/scipy/spatial/qhull/src/userprintf.c
    deleted file mode 100644
    index 9602453809..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull/src/userprintf.c
    +++ /dev/null
    @@ -1,79 +0,0 @@
    -/*
      ---------------------------------
    -
    -   userprintf.c
    -   qh_fprintf() and qh_fprintf_rbox()
    -
    -   see README.txt  see COPYING.txt for copyright information.
    -
    -   If you redefine one of these functions you must redefine all of them.
    -   If you recompile and load this file, then userprintf.o will not be loaded
    -   from qhull.a or qhull.lib
    -
    -   See libqhull.h for data structures, macros, and user-callable functions.
    -   See user.c for qhull-related, redefinable functions
    -   see user.h for user-definable constants
    -   See usermem.c for qh_exit(), qh_free(), and qh_malloc()
    -   see Qhull.cpp and RboxPoints.cpp for examples.
    -
    -   Please report any errors that you fix to qhull@qhull.org
    -*/
    -
    -#include "libqhull.h"
    -
    -#include 
    -#include 
    -#include 
    -
    -/*---------------------------------
    -
    -   qh_fprintf(fp, msgcode, format, list of args )
    -     print arguments to *fp according to format
    -     Use qh_fprintf_rbox() for rboxlib.c
    -
    -   notes:
    -     same as fprintf()
    -     fgets() is not trapped like fprintf()
    -     exit qh_fprintf via qh_errexit()
    -     exit qh_fprintf_rbox via qh_errexit_rbox()
    -*/
    -
    -void qh_fprintf(FILE *fp, int msgcode, const char *fmt, ... ) {
    -    va_list args;
    -
    -    if (!fp) {
    -        fprintf(stderr, "QH6232 Qhull internal error (userprintf.c): fp is 0.  Wrong qh_fprintf called.\n");
    -        qh_errexit(6232, NULL, NULL);
    -    }
    -    va_start(args, fmt);
    -#if qh_QHpointer
    -    if (qh_qh && qh ANNOTATEoutput) {
    -#else
    -    if (qh ANNOTATEoutput) {
    -#endif
    -      fprintf(fp, "[QH%.4d]", msgcode);
    -    }else if (msgcode >= MSG_ERROR && msgcode < MSG_STDERR ) {
    -      fprintf(fp, "QH%.4d ", msgcode);
    -    }
    -    vfprintf(fp, fmt, args);
    -    va_end(args);
    -
    -    /* Place debugging traps here. Use with option 'Tn' */
    -
    -} /* qh_fprintf */
    -
    -void qh_fprintf_rbox(FILE *fp, int msgcode, const char *fmt, ... ) {
    -    va_list args;
    -
    -    if (!fp) {
    -        fprintf(stderr, "QH6231 Qhull internal error (userprintf.c): fp is 0.  Wrong qh_fprintf_rbox called.\n");
    -        qh_errexit_rbox(6231);
    -    }
    -    if (msgcode >= MSG_ERROR && msgcode < MSG_STDERR)
    -      fprintf(fp, "QH%.4d ", msgcode);
    -    va_start(args, fmt);
    -    vfprintf(fp, fmt, args);
    -    va_end(args);
    -} /* qh_fprintf_rbox */
    -
    diff --git a/scipy-0.10.1/scipy/spatial/qhull_blas.h b/scipy-0.10.1/scipy/spatial/qhull_blas.h
    deleted file mode 100644
    index 3a32ca44d7..0000000000
    --- a/scipy-0.10.1/scipy/spatial/qhull_blas.h
    +++ /dev/null
    @@ -1,19 +0,0 @@
    -/*
    - * Handle different Fortran conventions.
    - */
    -
    -#if defined(NO_APPEND_FORTRAN)
    -#if defined(UPPERCASE_FORTRAN)
    -#define F_FUNC(f,F) F
    -#else
    -#define F_FUNC(f,F) f
    -#endif
    -#else
    -#if defined(UPPERCASE_FORTRAN)
    -#define F_FUNC(f,F) F##_
    -#else
    -#define F_FUNC(f,F) f##_
    -#endif
    -#endif
    -
    -#define qh_dgesv F_FUNC(dgesv,DGESV)
    diff --git a/scipy-0.10.1/scipy/spatial/setup.py b/scipy-0.10.1/scipy/spatial/setup.py
    deleted file mode 100755
    index 7401d55a12..0000000000
    --- a/scipy-0.10.1/scipy/spatial/setup.py
    +++ /dev/null
    @@ -1,54 +0,0 @@
    -#!/usr/bin/env python
    -
    -from os.path import join
    -
    -def configuration(parent_package = '', top_path = None):
    -    from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
    -    from numpy.distutils.system_info import get_info
    -    from distutils.sysconfig import get_python_inc
    -
    -    config = Configuration('spatial', parent_package, top_path)
    -
    -    config.add_data_dir('tests')
    -
    -    qhull_src = ['geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c',
    -                 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c',
    -                 'random.c', 'rboxlib.c', 'stat.c', 'user.c', 'usermem.c',
    -                 'userprintf.c']
    -
    -    config.add_library('qhull',
    -                       sources=[join('qhull', 'src', x) for x in qhull_src],
    -                       include_dirs=[get_python_inc(),
    -                                     get_numpy_include_dirs()],
    -                       # XXX: GCC dependency!
    -                       #extra_compiler_args=['-fno-strict-aliasing'],
    -                       )
    -
    -    lapack = dict(get_info('lapack_opt'))
    -    try:
    -        libs = ['qhull'] + lapack.pop('libraries')
    -    except KeyError:
    -        libs = ['qhull']
    -    config.add_extension('qhull',
    -                         sources=['qhull.c'],
    -                         libraries=libs,
    -                         **lapack)
    -
    -    config.add_extension('ckdtree', sources=['ckdtree.c']) # FIXME: cython
    -
    -    config.add_extension('_distance_wrap',
    -        sources=[join('src', 'distance_wrap.c'), join('src', 'distance.c')],
    -        include_dirs = [get_numpy_include_dirs()])
    -
    -    return config
    -
    -if __name__ == '__main__':
    -    from numpy.distutils.core import setup
    -    setup(maintainer = "SciPy Developers",
    -          author = "Anne Archibald",
    -          maintainer_email = "scipy-dev@scipy.org",
    -          description = "Spatial algorithms and data structures",
    -          url = "http://www.scipy.org",
    -          license = "SciPy License (BSD Style)",
    -          **configuration(top_path='').todict()
    -          )
    diff --git a/scipy-0.10.1/scipy/spatial/setupscons.py b/scipy-0.10.1/scipy/spatial/setupscons.py
    deleted file mode 100755
    index a236c2c019..0000000000
    --- a/scipy-0.10.1/scipy/spatial/setupscons.py
    +++ /dev/null
    @@ -1,23 +0,0 @@
    -#!/usr/bin/env python
    -
    -from os.path import join
    -
    -def configuration(parent_package = '', top_path = None):
    -    from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
    -    config = Configuration('spatial', parent_package, top_path)
    -
    -    config.add_data_dir('tests')
    -    config.add_sconscript('SConstruct')
    -
    -    return config
    -
    -if __name__ == '__main__':
    -    from numpy.distutils.core import setup
    -    setup(maintainer = "SciPy Developers",
    -          author = "Anne Archibald",
    -          maintainer_email = "scipy-dev@scipy.org",
    -          description = "Spatial algorithms and data structures",
    -          url = "http://www.scipy.org",
    -          license = "SciPy License (BSD Style)",
    -          **configuration(top_path='').todict()
    -          )
    diff --git a/scipy-0.10.1/scipy/spatial/src/common.h b/scipy-0.10.1/scipy/spatial/src/common.h
    deleted file mode 100644
    index 91623534ce..0000000000
    --- a/scipy-0.10.1/scipy/spatial/src/common.h
    +++ /dev/null
    @@ -1,70 +0,0 @@
    -/**
    - * common.h
    - *
    - * Author: Damian Eads
    - * Date:   September 22, 2007 (moved into new file on June 8, 2008)
    - *
    - * Copyright (c) 2007, 2008, Damian Eads. All rights reserved.
    - * Adapted for incorporation into Scipy, April 9, 2008.
    - *
    - * Redistribution and use in source and binary forms, with or without
    - * modification, are permitted provided that the following conditions
    - * are met:
    - *   - Redistributions of source code must retain the above
    - *     copyright notice, this list of conditions and the
    - *     following disclaimer.
    - *   - Redistributions in binary form must reproduce the above copyright
    - *     notice, this list of conditions and the following disclaimer
    - *     in the documentation and/or other materials provided with the
    - *     distribution.
    - *   - Neither the name of the author nor the names of its
    - *     contributors may be used to endorse or promote products derived
    - *     from this software without specific prior written permission.
    - *
    - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    - */
    -
    -#ifndef _CLUSTER_COMMON_H
    -#define _CLUSTER_COMMON_H
    -
    -#define CPY_MAX(_x, _y) ((_x > _y) ? (_x) : (_y))
    -#define CPY_MIN(_x, _y) ((_x < _y) ? (_x) : (_y))
    -
    -#define NCHOOSE2(_n) ((_n)*(_n-1)/2)
    -
    -#define CPY_BITS_PER_CHAR (sizeof(unsigned char) * 8)
    -#define CPY_FLAG_ARRAY_SIZE_BYTES(num_bits) (CPY_CEIL_DIV((num_bits), \
    -                                                          CPY_BITS_PER_CHAR))
    -#define CPY_GET_BIT(_xx, i) (((_xx)[(i) / CPY_BITS_PER_CHAR] >> \
    -                             ((CPY_BITS_PER_CHAR-1) - \
    -                              ((i) % CPY_BITS_PER_CHAR))) & 0x1)
    -#define CPY_SET_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] |= \
    -                              ((0x1) << ((CPY_BITS_PER_CHAR-1) \
    -                                         -((i) % CPY_BITS_PER_CHAR))))
    -#define CPY_CLEAR_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] &= \
    -                              ~((0x1) << ((CPY_BITS_PER_CHAR-1) \
    -                                         -((i) % CPY_BITS_PER_CHAR))))
    -
    -#ifndef CPY_CEIL_DIV
    -#define CPY_CEIL_DIV(x, y) ((((double)x)/(double)y) == \
    -                            ((double)((x)/(y))) ? ((x)/(y)) : ((x)/(y) + 1))
    -#endif
    -
    -
    -#ifdef CPY_DEBUG
    -#define CPY_DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__)
    -#else
    -#define CPY_DEBUG_MSG(...)
    -#endif
    -
    -#endif
    diff --git a/scipy-0.10.1/scipy/spatial/src/distance.c b/scipy-0.10.1/scipy/spatial/src/distance.c
    deleted file mode 100644
    index 8560c2280f..0000000000
    --- a/scipy-0.10.1/scipy/spatial/src/distance.c
    +++ /dev/null
    @@ -1,958 +0,0 @@
    -/**
    - * distance.c
    - *
    - * Author: Damian Eads
    - * Date:   September 22, 2007 (moved to new file on June 8, 2008)
    - *
    - * Copyright (c) 2007, 2008, Damian Eads. All rights reserved.
    - * Adapted for incorporation into Scipy, April 9, 2008.
    - *
    - * Redistribution and use in source and binary forms, with or without
    - * modification, are permitted provided that the following conditions
    - * are met:
    - *   - Redistributions of source code must retain the above
    - *     copyright notice, this list of conditions and the
    - *     following disclaimer.
    - *   - Redistributions in binary form must reproduce the above copyright
    - *     notice, this list of conditions and the following disclaimer
    - *     in the documentation and/or other materials provided with the
    - *     distribution.
    - *   - Neither the name of the author nor the names of its
    - *     contributors may be used to endorse or promote products derived
    - *     from this software without specific prior written permission.
    - *
    - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    - */
    -#include 
    -#include 
    -
    -#include 
    -#include 
    -#include "common.h"
    -#include "distance.h"
    -
    -static NPY_INLINE double euclidean_distance(const double *u, const double *v, int n) {
    -  int i = 0;
    -  double s = 0.0, d;
    -  for (i = 0; i < n; i++) {
    -    d = u[i] - v[i];
    -    s = s + d * d;
    -  }
    -  return sqrt(s);
    -}
    -
    -static NPY_INLINE double ess_distance(const double *u, const double *v, int n) {
    -  int i = 0;
    -  double s = 0.0, d;
    -  for (i = 0; i < n; i++) {
    -    d = fabs(u[i] - v[i]);
    -    s = s + d * d;
    -  }
    -  return s;
    -}
    -
    -static NPY_INLINE double chebyshev_distance(const double *u, const double *v, int n) {
    -  int i = 0;
    -  double d, maxv = 0.0;
    -  for (i = 0; i < n; i++) {
    -    d = fabs(u[i] - v[i]);
    -    if (d > maxv) {
    -      maxv = d;
    -    }
    -  }
    -  return maxv;
    -}
    -
    -static NPY_INLINE double canberra_distance(const double *u, const double *v, int n) {
    -  int i;
    -  double snum = 0.0, sdenom = 0.0, tot = 0.0;
    -  for (i = 0; i < n; i++) {
    -    snum = fabs(u[i] - v[i]);
    -    sdenom = fabs(u[i]) + fabs(v[i]);
    -    if (sdenom > 0.0) {
    -        tot += snum / sdenom;
    -    }
    -  }
    -  return tot;
    -}
    -
    -static NPY_INLINE double bray_curtis_distance(const double *u, const double *v, int n) {
    -  int i;
    -  double s1 = 0.0, s2 = 0.0;
    -  for (i = 0; i < n; i++) {
    -    s1 += fabs(u[i] - v[i]);
    -    s2 += fabs(u[i] + v[i]);
    -  }
    -  return s1 / s2;
    -}
    -
    -static NPY_INLINE double mahalanobis_distance(const double *u, const double *v,
    -			    const double *covinv, double *dimbuf1,
    -			    double *dimbuf2, int n) {
    -  int i, j;
    -  double s;
    -  const double *covrow = covinv;
    -  for (i = 0; i < n; i++) {
    -    dimbuf1[i] = u[i] - v[i];
    -  }
    -  for (i = 0; i < n; i++) {
    -    covrow = covinv + (i * n);
    -    s = 0.0;
    -    for (j = 0; j < n; j++) {
    -      s += dimbuf1[j] * covrow[j];
    -    }
    -    dimbuf2[i] = s;
    -  }
    -  s = 0.0;
    -  for (i = 0; i < n; i++) {
    -    s += dimbuf1[i] * dimbuf2[i];
    -  }
    -  return sqrt(s);
    -}
    -
    -double hamming_distance(const double *u, const double *v, int n) {
    -  int i = 0;
    -  double s = 0.0;
    -  for (i = 0; i < n; i++) {
    -    s = s + (u[i] != v[i]);
    -  }
    -  return s / (double)n;
    -}
    -
    -static NPY_INLINE double hamming_distance_bool(const char *u, const char *v, int n) {
    -  int i = 0;
    -  double s = 0.0;
    -  for (i = 0; i < n; i++) {
    -    s = s + (u[i] != v[i]);
    -  }
    -  return s / (double)n;
    -}
    -
    -static NPY_INLINE double yule_distance_bool(const char *u, const char *v, int n) {
    -  int i = 0;
    -  int ntt = 0, nff = 0, nft = 0, ntf = 0;
    -  for (i = 0; i < n; i++) {
    -    ntt += (u[i] && v[i]);
    -    ntf += (u[i] && !v[i]);
    -    nft += (!u[i] && v[i]);
    -    nff += (!u[i] && !v[i]);
    -  }
    -  return (2.0 * ntf * nft) / (double)(ntt * nff + ntf * nft);  
    -}
    -
    -static NPY_INLINE double matching_distance_bool(const char *u, const char *v, int n) {
    -  int i = 0;
    -  int nft = 0, ntf = 0;
    -  for (i = 0; i < n; i++) {
    -    ntf += (u[i] && !v[i]);
    -    nft += (!u[i] && v[i]);
    -  }
    -  return (double)(ntf + nft) / (double)(n);
    -}
    -
    -static NPY_INLINE double dice_distance_bool(const char *u, const char *v, int n) {
    -  int i = 0;
    -  int ntt = 0, nft = 0, ntf = 0;
    -  for (i = 0; i < n; i++) {
    -    ntt += (u[i] && v[i]);
    -    ntf += (u[i] && !v[i]);
    -    nft += (!u[i] && v[i]);
    -  }
    -  return (double)(nft + ntf) / (double)(2.0 * ntt + ntf + nft);
    -}
    -
    -
    -static NPY_INLINE double rogerstanimoto_distance_bool(const char *u, const char *v, int n) {
    -  int i = 0;
    -  int ntt = 0, nff = 0, nft = 0, ntf = 0;
    -  for (i = 0; i < n; i++) {
    -    ntt += (u[i] && v[i]);
    -    ntf += (u[i] && !v[i]);
    -    nft += (!u[i] && v[i]);
    -    nff += (!u[i] && !v[i]);
    -  }
    -  return (2.0 * (ntf + nft)) / ((double)ntt + nff + (2.0 * (ntf + nft)));
    -}
    -
    -static NPY_INLINE double russellrao_distance_bool(const char *u, const char *v, int n) {
    -  int i = 0;
    -  /**  int nff = 0, nft = 0, ntf = 0;**/
    -  int ntt = 0;
    -  for (i = 0; i < n; i++) {
    -    /**    nff += (!u[i] && !v[i]);
    -    ntf += (u[i] && !v[i]);
    -    nft += (!u[i] && v[i]);**/
    -    ntt += (u[i] && v[i]);
    -  }
    -  /**  return (double)(ntf + nft + nff) / (double)n;**/
    -  return (double) (n - ntt) / (double) n;
    -}
    -
    -static NPY_INLINE double kulsinski_distance_bool(const char *u, const char *v, int n) {
    -  int _i = 0;
    -  int ntt = 0, nft = 0, ntf = 0, nff = 0;
    -  for (_i = 0; _i < n; _i++) {
    -    ntt += (u[_i] && v[_i]);
    -    ntf += (u[_i] && !v[_i]);
    -    nft += (!u[_i] && v[_i]);
    -    nff += (!u[_i] && !v[_i]);
    -  }
    -  return ((double)(ntf + nft - ntt + n)) / ((double)(ntf + nft + n));
    -}
    -
    -static NPY_INLINE double sokalsneath_distance_bool(const char *u, const char *v, int n) {
    -  int _i = 0;
    -  int ntt = 0, nft = 0, ntf = 0;
    -  for (_i = 0; _i < n; _i++) {
    -    ntt += (u[_i] && v[_i]);
    -    ntf += (u[_i] && !v[_i]);
    -    nft += (!u[_i] && v[_i]);
    -  }
    -  return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt);
    -}
    -
    -static NPY_INLINE double sokalmichener_distance_bool(const char *u, const char *v, int n) {
    -  int _i = 0;
    -  int ntt = 0, nft = 0, ntf = 0, nff = 0;
    -  for (_i = 0; _i < n; _i++) {
    -    ntt += (u[_i] && v[_i]);
    -    nff += (!u[_i] && !v[_i]);
    -    ntf += (u[_i] && !v[_i]);
    -    nft += (!u[_i] && v[_i]);
    -  }
    -  return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt + nff);
    -}
    -
    -static NPY_INLINE double jaccard_distance(const double *u, const double *v, int n) {
    -  int i = 0;
    -  double denom = 0.0, num = 0.0;
    -  for (i = 0; i < n; i++) {
    -    num += (u[i] != v[i]) && ((u[i] != 0.0) || (v[i] != 0.0));
    -    denom += (u[i] != 0.0) || (v[i] != 0.0);
    -  }
    -  return num / denom;
    -}
    -
    -static NPY_INLINE double jaccard_distance_bool(const char *u, const char *v, int n) {
    -  int i = 0;
    -  double num = 0.0, denom = 0.0;
    -  for (i = 0; i < n; i++) {
    -    num += (u[i] != v[i]) && ((u[i] != 0) || (v[i] != 0));
    -    denom += (u[i] != 0) || (v[i] != 0);
    -  }
    -  return num / denom;
    -}
    -
    -static NPY_INLINE double dot_product(const double *u, const double *v, int n) {
    -  int i;
    -  double s = 0.0;
    -  for (i = 0; i < n; i++) {
    -    s += u[i] * v[i];
    -  }
    -  return s;
    -}
    -
    -static NPY_INLINE double cosine_distance(const double *u, const double *v, int n,
    -		       const double nu, const double nv) {
    -  return 1.0 - (dot_product(u, v, n) / (nu * nv));
    -}
    -
    -static NPY_INLINE double seuclidean_distance(const double *var,
    -			   const double *u, const double *v, int n) {
    -  int i = 0;
    -  double s = 0.0, d;
    -  for (i = 0; i < n; i++) {
    -    d = u[i] - v[i];
    -    s = s + (d * d) / var[i];
    -  }
    -  return sqrt(s);
    -}
    -
    -static NPY_INLINE double city_block_distance(const double *u, const double *v, int n) {
    -  int i = 0;
    -  double s = 0.0, d;
    -  for (i = 0; i < n; i++) {
    -    d = fabs(u[i] - v[i]);
    -    s = s + d;
    -  }
    -  return s;
    -}
    -
    -double minkowski_distance(const double *u, const double *v, int n, double p) {
    -  int i = 0;
    -  double s = 0.0, d;
    -  for (i = 0; i < n; i++) {
    -    d = fabs(u[i] - v[i]);
    -    s = s + pow(d, p);
    -  }
    -  return pow(s, 1.0 / p);
    -}
    -
    -double weighted_minkowski_distance(const double *u, const double *v, int n, double p, const double *w) {
    -  int i = 0;
    -  double s = 0.0, d;
    -  for (i = 0; i < n; i++) {
    -    d = fabs(u[i] - v[i]) * w[i];
    -    s = s + pow(d, p);
    -  }
    -  return pow(s, 1.0 / p);
    -}
    -
    -void compute_mean_vector(double *res, const double *X, int m, int n) {
    -  int i, j;
    -  const double *v;
    -  for (i = 0; i < n; i++) {
    -    res[i] = 0.0;
    -  }
    -  for (j = 0; j < m; j++) {
    -
    -    v = X + (j * n);
    -    for (i = 0; i < n; i++) {
    -      res[i] += v[i];
    -    }
    -  }
    -  for (i = 0; i < n; i++) {
    -    res[i] /= (double)m;
    -  }
    -}
    -
    -void pdist_euclidean(const double *X, double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = euclidean_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_mahalanobis(const double *X, const double *covinv,
    -		       double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  double *dimbuf1, *dimbuf2;
    -  dimbuf1 = (double*)malloc(sizeof(double) * 2 * n);
    -  dimbuf2 = dimbuf1 + n;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = mahalanobis_distance(u, v, covinv, dimbuf1, dimbuf2, n);
    -    }
    -  }
    -  dimbuf2 = 0;
    -  free(dimbuf1);
    -}
    -
    -void pdist_bray_curtis(const double *X, double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = bray_curtis_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_canberra(const double *X, double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = canberra_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_hamming(const double *X, double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = hamming_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_hamming_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = hamming_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_jaccard(const double *X, double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = jaccard_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_jaccard_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = jaccard_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -
    -void pdist_chebyshev(const double *X, double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = chebyshev_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = cosine_distance(u, v, n, norms[i], norms[j]);
    -    }
    -  }
    -}
    -
    -void pdist_seuclidean(const double *X, const double *var,
    -		     double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = seuclidean_distance(var, u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_city_block(const double *X, double *dm, int m, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = city_block_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_minkowski(const double *X, double *dm, int m, int n, double p) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = minkowski_distance(u, v, n, p);
    -    }
    -  }
    -}
    -
    -void pdist_weighted_minkowski(const double *X, double *dm, int m, int n, double p, const double *w) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = weighted_minkowski_distance(u, v, n, p, w);
    -    }
    -  }
    -}
    -
    -void pdist_yule_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = yule_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_matching_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = matching_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_dice_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = dice_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = rogerstanimoto_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_russellrao_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = russellrao_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_kulsinski_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = kulsinski_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = sokalsneath_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < m; i++) {
    -    for (j = i + 1; j < m; j++, it++) {
    -      u = X + (n * i);
    -      v = X + (n * j);
    -      *it = sokalmichener_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void dist_to_squareform_from_vector(double *M, const double *v, int n) {
    -  double *it;
    -  const double *cit;
    -  int i, j;
    -  cit = v;
    -  for (i = 0; i < n - 1; i++) {
    -    it = M + (i * n) + i + 1;
    -    for (j = i + 1; j < n; j++, it++, cit++) {
    -      *it = *cit;
    -    }
    -  }
    -}
    -
    -void dist_to_vector_from_squareform(const double *M, double *v, int n) {
    -  double *it;
    -  const double *cit;
    -  int i, j;
    -  it = v;
    -  for (i = 0; i < n - 1; i++) {
    -    cit = M + (i * n) + i + 1;
    -    for (j = i + 1; j < n; j++, it++, cit++) {
    -      *it = *cit;
    -    }
    -  }
    -}
    -
    -
    -/** cdist */
    -
    -void cdist_euclidean(const double *XA,
    -		     const double *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = euclidean_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_mahalanobis(const double *XA,
    -		       const double *XB,
    -		       const double *covinv,
    -		       double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  double *dimbuf1, *dimbuf2;
    -  dimbuf1 = (double*)malloc(sizeof(double) * 2 * n);
    -  dimbuf2 = dimbuf1 + n;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = mahalanobis_distance(u, v, covinv, dimbuf1, dimbuf2, n);
    -    }
    -  }
    -  dimbuf2 = 0;
    -  free(dimbuf1);
    -}
    -
    -void cdist_bray_curtis(const double *XA, const double *XB,
    -		       double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = bray_curtis_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_canberra(const double *XA,
    -		    const double *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = canberra_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_hamming(const double *XA,
    -		   const double *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = hamming_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_hamming_bool(const char *XA,
    -			const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = hamming_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_jaccard(const double *XA,
    -		   const double *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = jaccard_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_jaccard_bool(const char *XA,
    -			const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = jaccard_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -
    -void cdist_chebyshev(const double *XA,
    -		     const double *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = chebyshev_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_cosine(const double *XA,
    -		  const double *XB, double *dm, int mA, int mB, int n,
    -		  const double *normsA, const double *normsB) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = cosine_distance(u, v, n, normsA[i], normsB[j]);
    -    }
    -  }
    -}
    -
    -void cdist_seuclidean(const double *XA,
    -		      const double *XB,
    -		      const double *var,
    -		      double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = seuclidean_distance(var, u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_city_block(const double *XA, const double *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = city_block_distance(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_minkowski(const double *XA, const double *XB, double *dm, int mA, int mB, int n, double p) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = minkowski_distance(u, v, n, p);
    -    }
    -  }
    -}
    -
    -void cdist_weighted_minkowski(const double *XA, const double *XB, double *dm, int mA, int mB, int n, double p, const double *w) {
    -  int i, j;
    -  const double *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = weighted_minkowski_distance(u, v, n, p, w);
    -    }
    -  }
    -}
    -
    -void cdist_yule_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = yule_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_matching_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = matching_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_dice_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = dice_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_rogerstanimoto_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = rogerstanimoto_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_russellrao_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = russellrao_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_kulsinski_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = kulsinski_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_sokalsneath_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = sokalsneath_distance_bool(u, v, n);
    -    }
    -  }
    -}
    -
    -void cdist_sokalmichener_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
    -  int i, j;
    -  const char *u, *v;
    -  double *it = dm;
    -  for (i = 0; i < mA; i++) {
    -    for (j = 0; j < mB; j++, it++) {
    -      u = XA + (n * i);
    -      v = XB + (n * j);
    -      *it = sokalmichener_distance_bool(u, v, n);
    -    }
    -  }
    -}
    diff --git a/scipy-0.10.1/scipy/spatial/src/distance.h b/scipy-0.10.1/scipy/spatial/src/distance.h
    deleted file mode 100644
    index 69c83d13a3..0000000000
    --- a/scipy-0.10.1/scipy/spatial/src/distance.h
    +++ /dev/null
    @@ -1,116 +0,0 @@
    -/**
    - * distance.h
    - *
    - * Author: Damian Eads
    - * Date:   September 22, 2007 (moved to new file on June 8, 2008)
    - * Adapted for incorporation into Scipy, April 9, 2008.
    - *
    - * Copyright (c) 2007, 2008, Damian Eads. All rights reserved.
    - *
    - * Redistribution and use in source and binary forms, with or without
    - * modification, are permitted provided that the following conditions
    - * are met:
    - *   - Redistributions of source code must retain the above
    - *     copyright notice, this list of conditions and the
    - *     following disclaimer.
    - *   - Redistributions in binary form must reproduce the above copyright
    - *     notice, this list of conditions and the following disclaimer
    - *     in the documentation and/or other materials provided with the
    - *     distribution.
    - *   - Neither the name of the author nor the names of its
    - *     contributors may be used to endorse or promote products derived
    - *     from this software without specific prior written permission.
    - *
    - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    - */
    -
    -#ifndef _CPY_DISTANCE_H
    -#define _CPY_DISTANCE_H
    -
    -void dist_to_squareform_from_vector(double *M, const double *v, int n);
    -void dist_to_vector_from_squareform(const double *M, double *v, int n);
    -void pdist_euclidean(const double *X, double *dm, int m, int n);
    -void pdist_seuclidean(const double *X,
    -		      const double *var, double *dm, int m, int n);
    -void pdist_mahalanobis(const double *X, const double *covinv,
    -		       double *dm, int m, int n);
    -void pdist_bray_curtis(const double *X, double *dm, int m, int n);
    -void pdist_canberra(const double *X, double *dm, int m, int n);
    -void pdist_hamming(const double *X, double *dm, int m, int n);
    -void pdist_hamming_bool(const char *X, double *dm, int m, int n);
    -void pdist_city_block(const double *X, double *dm, int m, int n);
    -void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms);
    -void pdist_chebyshev(const double *X, double *dm, int m, int n);
    -void pdist_jaccard(const double *X, double *dm, int m, int n);
    -void pdist_jaccard_bool(const char *X, double *dm, int m, int n);
    -void pdist_kulsinski_bool(const char *X, double *dm, int m, int n);
    -void pdist_minkowski(const double *X, double *dm, int m, int n, double p);
    -void pdist_weighted_minkowski(const double *X, double *dm, int m, int n, double p, const double *w);
    -void pdist_yule_bool(const char *X, double *dm, int m, int n);
    -void pdist_matching_bool(const char *X, double *dm, int m, int n);
    -void pdist_dice_bool(const char *X, double *dm, int m, int n);
    -void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n);
    -void pdist_russellrao_bool(const char *X, double *dm, int m, int n);
    -void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n);
    -void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n);
    -
    -void cdist_euclidean(const double *XA, const double *XB, double *dm, int mA, int mB, int n);
    -void cdist_mahalanobis(const double *XA, const double *XB,
    -		       const double *covinv,
    -		       double *dm, int mA, int mB, int n);
    -void cdist_bray_curtis(const double *XA, const double *XB,
    -		       double *dm, int mA, int mB, int n);
    -void cdist_canberra(const double *XA,
    -		    const double *XB, double *dm, int mA, int mB, int n);
    -void cdist_hamming(const double *XA,
    -		   const double *XB, double *dm, int mA, int mB, int n);
    -void cdist_hamming_bool(const char *XA,
    -			const char *XB, double *dm,
    -			int mA, int mB, int n);
    -void cdist_jaccard(const double *XA,
    -		   const double *XB, double *dm, int mA, int mB, int n);
    -void cdist_jaccard_bool(const char *XA,
    -			const char *XB, double *dm, int mA, int mB, int n);
    -void cdist_chebyshev(const double *XA,
    -		     const double *XB, double *dm, int mA, int mB, int n);
    -void cdist_cosine(const double *XA,
    -		  const double *XB, double *dm, int mA, int mB, int n,
    -		  const double *normsA, const double *normsB);
    -void cdist_seuclidean(const double *XA,
    -		      const double *XB,
    -		      const double *var,
    -		      double *dm, int mA, int mB, int n);
    -void cdist_city_block(const double *XA, const double *XB, double *dm,
    -		      int mA, int mB, int n);
    -void cdist_minkowski(const double *XA, const double *XB, double *dm,
    -		     int mA, int mB, int n, double p);
    -void cdist_weighted_minkowski(const double *XA, const double *XB, double *dm,
    -			      int mA, int mB, int n, double p, const double *w);
    -void cdist_yule_bool(const char *XA, const char *XB, double *dm,
    -		     int mA, int mB, int n);
    -void cdist_matching_bool(const char *XA, const char *XB, double *dm,
    -			 int mA, int mB, int n);
    -void cdist_dice_bool(const char *XA, const char *XB, double *dm,
    -		     int mA, int mB, int n);
    -void cdist_rogerstanimoto_bool(const char *XA, const char *XB, double *dm,
    -			       int mA, int mB, int n);
    -void cdist_russellrao_bool(const char *XA, const char *XB, double *dm,
    -			   int mA, int mB, int n);
    -void cdist_kulsinski_bool(const char *XA, const char *XB, double *dm,
    -			  int mA, int mB, int n);
    -void cdist_sokalsneath_bool(const char *XA, const char *XB, double *dm,
    -			    int mA, int mB, int n);
    -void cdist_sokalmichener_bool(const char *XA, const char *XB, double *dm,
    -			      int mA, int mB, int n);
    -
    -#endif
    diff --git a/scipy-0.10.1/scipy/spatial/src/distance_wrap.c b/scipy-0.10.1/scipy/spatial/src/distance_wrap.c
    deleted file mode 100644
    index 0e810ad549..0000000000
    --- a/scipy-0.10.1/scipy/spatial/src/distance_wrap.c
    +++ /dev/null
    @@ -1,1163 +0,0 @@
    -/**
    - * distance_wrap.c
    - *
    - * Author: Damian Eads
    - * Date:   September 22, 2007 (moved to new file on June 8, 2008)
    - * Adapted for incorporation into Scipy, April 9, 2008.
    - *
    - * Copyright (c) 2007, Damian Eads. All rights reserved.
    - *
    - * Redistribution and use in source and binary forms, with or without
    - * modification, are permitted provided that the following conditions
    - * are met:
    - *   - Redistributions of source code must retain the above
    - *     copyright notice, this list of conditions and the
    - *     following disclaimer.
    - *   - Redistributions in binary form must reproduce the above copyright
    - *     notice, this list of conditions and the following disclaimer
    - *     in the documentation and/or other materials provided with the
    - *     distribution.
    - *   - Neither the name of the author nor the names of its
    - *     contributors may be used to endorse or promote products derived
    - *     from this software without specific prior written permission.
    - *
    - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    - */
    -
    -#include 
    -#include "distance.h"
    -#include "Python.h"
    -#include 
    -#include 
    -
    -extern PyObject *cdist_euclidean_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_euclidean(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_canberra_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_canberra(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_bray_curtis_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_bray_curtis(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -
    -extern PyObject *cdist_mahalanobis_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *covinv_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  const double *covinv;
    -  if (!PyArg_ParseTuple(args, "O!O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &covinv_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    covinv = (const double*)covinv_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_mahalanobis(XA, XB, covinv, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -
    -extern PyObject *cdist_chebyshev_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_chebyshev(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -
    -extern PyObject *cdist_cosine_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_, *normsA_, *normsB_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB, *normsA, *normsB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_,
    -			&PyArray_Type, &normsA_,
    -			&PyArray_Type, &normsB_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    normsA = (const double*)normsA_->data;
    -    normsB = (const double*)normsB_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_cosine(XA, XB, dm, mA, mB, n, normsA, normsB);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_seuclidean_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_, *var_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB, *var;
    -  if (!PyArg_ParseTuple(args, "O!O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &var_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    var = (double*)var_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_seuclidean(XA, XB, var, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_city_block_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_city_block(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_hamming_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_hamming(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_hamming_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_hamming_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_jaccard_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_jaccard(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_jaccard_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_jaccard_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_minkowski_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB;
    -  double p;
    -  if (!PyArg_ParseTuple(args, "O!O!O!d",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_,
    -			&p)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -    cdist_minkowski(XA, XB, dm, mA, mB, n, p);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_weighted_minkowski_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_, *w_;
    -  int mA, mB, n;
    -  double *dm;
    -  const double *XA, *XB, *w;
    -  double p;
    -  if (!PyArg_ParseTuple(args, "O!O!O!dO!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_,
    -			&p,
    -			&PyArray_Type, &w_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const double*)XA_->data;
    -    XB = (const double*)XB_->data;
    -    w = (const double*)w_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -    cdist_weighted_minkowski(XA, XB, dm, mA, mB, n, p, w);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *cdist_yule_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_yule_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *cdist_matching_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_matching_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *cdist_dice_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_dice_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *cdist_rogerstanimoto_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_rogerstanimoto_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *cdist_russellrao_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_russellrao_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *cdist_kulsinski_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_kulsinski_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *cdist_sokalmichener_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_sokalmichener_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *cdist_sokalsneath_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *XA_, *XB_, *dm_;
    -  int mA, mB, n;
    -  double *dm;
    -  const char *XA, *XB;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &XA_, &PyArray_Type, &XB_, 
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    XA = (const char*)XA_->data;
    -    XB = (const char*)XB_->data;
    -    dm = (double*)dm_->data;
    -    mA = XA_->dimensions[0];
    -    mB = XB_->dimensions[0];
    -    n = XA_->dimensions[1];
    -
    -    cdist_sokalsneath_bool(XA, XB, dm, mA, mB, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -/***************************** pdist ***/
    -
    -extern PyObject *pdist_euclidean_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const double *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_euclidean(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_canberra_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const double *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_canberra(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_bray_curtis_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const double *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_bray_curtis(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -
    -extern PyObject *pdist_mahalanobis_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *covinv_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const double *X;
    -  const double *covinv;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &covinv_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    covinv = (const double*)covinv_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_mahalanobis(X, covinv, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -
    -extern PyObject *pdist_chebyshev_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const double *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_chebyshev(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -
    -extern PyObject *pdist_cosine_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_, *norms_;
    -  int m, n;
    -  double *dm;
    -  const double *X, *norms;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_,
    -			&PyArray_Type, &norms_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    dm = (double*)dm_->data;
    -    norms = (const double*)norms_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_cosine(X, dm, m, n, norms);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_seuclidean_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_, *var_;
    -  int m, n;
    -  double *dm;
    -  const double *X, *var;
    -  if (!PyArg_ParseTuple(args, "O!O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &var_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (double*)X_->data;
    -    dm = (double*)dm_->data;
    -    var = (double*)var_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_seuclidean(X, var, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_city_block_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const double *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_city_block(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_hamming_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const double *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_hamming(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_hamming_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_hamming_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_jaccard_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const double *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const double*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_jaccard(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_jaccard_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_jaccard_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_minkowski_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm, *X;
    -  double p;
    -  if (!PyArg_ParseTuple(args, "O!O!d",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_,
    -			&p)) {
    -    return 0;
    -  }
    -  else {
    -    X = (double*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_minkowski(X, dm, m, n, p);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *pdist_weighted_minkowski_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_, *w_;
    -  int m, n;
    -  double *dm, *X, *w;
    -  double p;
    -  if (!PyArg_ParseTuple(args, "O!O!dO!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_,
    -			&p,
    -			&PyArray_Type, &w_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (double*)X_->data;
    -    dm = (double*)dm_->data;
    -    w = (const double*)w_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_weighted_minkowski(X, dm, m, n, p, w);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -
    -extern PyObject *pdist_yule_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_yule_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *pdist_matching_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_matching_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *pdist_dice_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_dice_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *pdist_rogerstanimoto_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_rogerstanimoto_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *pdist_russellrao_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_russellrao_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *pdist_kulsinski_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_kulsinski_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *pdist_sokalmichener_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_sokalmichener_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *pdist_sokalsneath_bool_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *X_, *dm_;
    -  int m, n;
    -  double *dm;
    -  const char *X;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &X_,
    -			&PyArray_Type, &dm_)) {
    -    return 0;
    -  }
    -  else {
    -    X = (const char*)X_->data;
    -    dm = (double*)dm_->data;
    -    m = X_->dimensions[0];
    -    n = X_->dimensions[1];
    -
    -    pdist_sokalsneath_bool(X, dm, m, n);
    -  }
    -  return Py_BuildValue("");
    -}
    -
    -extern PyObject *to_squareform_from_vector_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *M_, *v_;
    -  int n;
    -  const double *v;
    -  double *M;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &M_,
    -			&PyArray_Type, &v_)) {
    -    return 0;
    -  }
    -  else {
    -    M = (double*)M_->data;
    -    v = (const double*)v_->data;
    -    n = M_->dimensions[0];
    -    dist_to_squareform_from_vector(M, v, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -extern PyObject *to_vector_from_squareform_wrap(PyObject *self, PyObject *args) {
    -  PyArrayObject *M_, *v_;
    -  int n;
    -  double *v;
    -  const double *M;
    -  if (!PyArg_ParseTuple(args, "O!O!",
    -			&PyArray_Type, &M_,
    -			&PyArray_Type, &v_)) {
    -    return 0;
    -  }
    -  else {
    -    M = (const double*)M_->data;
    -    v = (double*)v_->data;
    -    n = M_->dimensions[0];
    -    dist_to_vector_from_squareform(M, v, n);
    -  }
    -  return Py_BuildValue("d", 0.0);
    -}
    -
    -
    -static PyMethodDef _distanceWrapMethods[] = {
    -  {"cdist_bray_curtis_wrap", cdist_bray_curtis_wrap, METH_VARARGS},
    -  {"cdist_canberra_wrap", cdist_canberra_wrap, METH_VARARGS},
    -  {"cdist_chebyshev_wrap", cdist_chebyshev_wrap, METH_VARARGS},
    -  {"cdist_city_block_wrap", cdist_city_block_wrap, METH_VARARGS},
    -  {"cdist_cosine_wrap", cdist_cosine_wrap, METH_VARARGS},
    -  {"cdist_dice_bool_wrap", cdist_dice_bool_wrap, METH_VARARGS},
    -  {"cdist_euclidean_wrap", cdist_euclidean_wrap, METH_VARARGS},
    -  {"cdist_hamming_wrap", cdist_hamming_wrap, METH_VARARGS},
    -  {"cdist_hamming_bool_wrap", cdist_hamming_bool_wrap, METH_VARARGS},
    -  {"cdist_jaccard_wrap", cdist_jaccard_wrap, METH_VARARGS},
    -  {"cdist_jaccard_bool_wrap", cdist_jaccard_bool_wrap, METH_VARARGS},
    -  {"cdist_kulsinski_bool_wrap", cdist_kulsinski_bool_wrap, METH_VARARGS},
    -  {"cdist_mahalanobis_wrap", cdist_mahalanobis_wrap, METH_VARARGS},
    -  {"cdist_matching_bool_wrap", cdist_matching_bool_wrap, METH_VARARGS},
    -  {"cdist_minkowski_wrap", cdist_minkowski_wrap, METH_VARARGS},
    -  {"cdist_weighted_minkowski_wrap", cdist_weighted_minkowski_wrap, METH_VARARGS},
    -  {"cdist_rogerstanimoto_bool_wrap", cdist_rogerstanimoto_bool_wrap, METH_VARARGS},
    -  {"cdist_russellrao_bool_wrap", cdist_russellrao_bool_wrap, METH_VARARGS},
    -  {"cdist_seuclidean_wrap", cdist_seuclidean_wrap, METH_VARARGS},
    -  {"cdist_sokalmichener_bool_wrap", cdist_sokalmichener_bool_wrap, METH_VARARGS},
    -  {"cdist_sokalsneath_bool_wrap", cdist_sokalsneath_bool_wrap, METH_VARARGS},
    -  {"cdist_yule_bool_wrap", cdist_yule_bool_wrap, METH_VARARGS},
    -  {"pdist_bray_curtis_wrap", pdist_bray_curtis_wrap, METH_VARARGS},
    -  {"pdist_canberra_wrap", pdist_canberra_wrap, METH_VARARGS},
    -  {"pdist_chebyshev_wrap", pdist_chebyshev_wrap, METH_VARARGS},
    -  {"pdist_city_block_wrap", pdist_city_block_wrap, METH_VARARGS},
    -  {"pdist_cosine_wrap", pdist_cosine_wrap, METH_VARARGS},
    -  {"pdist_dice_bool_wrap", pdist_dice_bool_wrap, METH_VARARGS},
    -  {"pdist_euclidean_wrap", pdist_euclidean_wrap, METH_VARARGS},
    -  {"pdist_hamming_wrap", pdist_hamming_wrap, METH_VARARGS},
    -  {"pdist_hamming_bool_wrap", pdist_hamming_bool_wrap, METH_VARARGS},
    -  {"pdist_jaccard_wrap", pdist_jaccard_wrap, METH_VARARGS},
    -  {"pdist_jaccard_bool_wrap", pdist_jaccard_bool_wrap, METH_VARARGS},
    -  {"pdist_kulsinski_bool_wrap", pdist_kulsinski_bool_wrap, METH_VARARGS},
    -  {"pdist_mahalanobis_wrap", pdist_mahalanobis_wrap, METH_VARARGS},
    -  {"pdist_matching_bool_wrap", pdist_matching_bool_wrap, METH_VARARGS},
    -  {"pdist_minkowski_wrap", pdist_minkowski_wrap, METH_VARARGS},
    -  {"pdist_weighted_minkowski_wrap", pdist_weighted_minkowski_wrap, METH_VARARGS},
    -  {"pdist_rogerstanimoto_bool_wrap", pdist_rogerstanimoto_bool_wrap, METH_VARARGS},
    -  {"pdist_russellrao_bool_wrap", pdist_russellrao_bool_wrap, METH_VARARGS},
    -  {"pdist_seuclidean_wrap", pdist_seuclidean_wrap, METH_VARARGS},
    -  {"pdist_sokalmichener_bool_wrap", pdist_sokalmichener_bool_wrap, METH_VARARGS},
    -  {"pdist_sokalsneath_bool_wrap", pdist_sokalsneath_bool_wrap, METH_VARARGS},
    -  {"pdist_yule_bool_wrap", pdist_yule_bool_wrap, METH_VARARGS},
    -  {"to_squareform_from_vector_wrap",
    -   to_squareform_from_vector_wrap, METH_VARARGS},
    -  {"to_vector_from_squareform_wrap",
    -   to_vector_from_squareform_wrap, METH_VARARGS},
    -  {NULL, NULL}     /* Sentinel - marks the end of this structure */
    -};
    -
    -#if PY_VERSION_HEX >= 0x03000000
    -static struct PyModuleDef moduledef = {
    -    PyModuleDef_HEAD_INIT,
    -    "_distance_wrap",
    -    NULL,
    -    -1,
    -    _distanceWrapMethods,
    -    NULL,
    -    NULL,
    -    NULL,
    -    NULL
    -};
    -
    -PyObject *PyInit__distance_wrap(void)
    -{
    -    PyObject *m;
    -
    -    m = PyModule_Create(&moduledef);
    -    import_array();
    -
    -    return m;
    -}
    -#else
    -PyMODINIT_FUNC init_distance_wrap(void)
    -{
    -  (void) Py_InitModule("_distance_wrap", _distanceWrapMethods);
    -  import_array();  // Must be present for NumPy.  Called first after above line.
    -}
    -#endif
    diff --git a/scipy-0.10.1/scipy/spatial/tests/cdist-X1.txt b/scipy-0.10.1/scipy/spatial/tests/cdist-X1.txt
    deleted file mode 100644
    index 833d5bdf2a..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/cdist-X1.txt
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -1.147593763490969421e-01 8.926156143344999849e-01 1.437758624645746330e-02 1.803435962879929022e-02 5.533046214065578949e-01 5.554315640747428118e-01 4.497546637814608950e-02 4.438089247948049376e-01 7.984582810220538507e-01 2.752880789161644692e-01 1.344667112315823809e-01 9.230479561452992199e-01 6.040471462941819913e-01 3.797251652770228247e-01 4.316042735592399149e-01 5.312356915348823705e-01 4.348143005129563310e-01 3.111531488508799681e-01 9.531194313908697424e-04 8.212995023500069269e-02 6.689953269869852726e-01 9.914864535288493430e-01 8.037556036341153565e-01
    -9.608925123801395074e-01 2.974451233678974127e-01 9.001110330654185088e-01 5.824163330415995654e-01 7.308574928293812834e-01 2.276154562412870952e-01 7.306791076039623745e-01 8.677244866905511333e-01 9.160806456176984192e-01 6.157216959991280714e-01 5.149053524695440531e-01 3.056427344890983999e-01 9.790557366933895223e-01 4.484995861076724877e-01 4.776550391081165747e-01 7.210436977670631187e-01 9.136399501661039979e-01 4.260275733550000776e-02 5.943900041968954717e-01 3.864571606342745991e-01 9.442027665110838131e-01 4.779949058608601309e-02 6.107551944250865228e-01
    -3.297286578103622023e-01 5.980207401936733502e-01 3.673301293561567205e-01 2.585830520887681949e-01 4.660558746104259686e-01 6.083795956610364986e-01 4.535206368070313632e-01 6.873989778785424276e-01 5.130152688495458468e-01 7.665877846542720198e-01 3.444402973525138023e-01 3.583658123644906102e-02 7.924818220986856732e-01 8.746685720522412444e-01 3.010105569182431884e-01 6.012239357385538163e-01 6.233737362204671006e-01 4.830438698668915176e-01 2.317286885842551047e-02 7.585989958123050547e-01 7.108257632278830451e-01 1.551024884178199281e-01 2.665485998155288083e-01
    -2.456278068903017253e-02 4.148739837711815648e-01 1.986372227934196655e-01 6.920408530298168825e-01 1.003067576685774398e-01 7.421560456480125190e-01 1.808453980608998313e-01 4.251297882537475870e-01 6.773002683522370004e-01 4.084108792570182445e-01 7.462888013191590897e-01 8.069930220529277776e-01 9.211110587681808903e-01 4.141491046181076108e-01 7.486318689260342829e-01 9.515405507589296263e-01 4.634288892577109742e-03 8.027593488166355762e-01 3.010346805217798405e-01 8.663248877242523127e-01 2.479968181181605447e-01 5.619851096054278017e-01 3.903886764590250857e-01
    -7.122019976035700584e-01 6.188878051047785878e-01 7.290897087051201320e-01 6.334802157757637442e-01 5.523084734954342156e-01 5.614937129563645213e-01 2.496741051791574462e-01 5.972227939599233926e-01 1.786590597761109622e-01 2.609525984850900038e-01 7.210438943286010538e-01 2.211429064605652250e-01 9.140497572472672250e-02 1.430242193668443962e-01 7.856446942916397447e-01 4.635256358156553125e-01 5.278744289813760426e-01 3.702808015407184072e-01 5.527073830480792038e-01 6.370732917599846168e-01 9.953487928925482953e-01 3.021789770611936765e-01 3.354901923998221402e-02
    -6.509638560895427695e-01 8.387598220902757751e-01 7.761375971745763103e-01 1.481627639227802717e-01 3.529474982902305324e-01 4.883093646287851586e-01 9.652923033658690199e-01 9.500680513565308294e-01 3.061885005078281985e-01 7.271902818906019750e-01 2.358962978196710303e-03 7.359889703223099211e-01 8.988893768074724955e-01 4.135279653937307121e-02 8.516441856688283796e-01 4.889597623270667270e-01 5.575909822114655245e-01 9.010853652261575641e-01 2.912844516556202246e-01 9.088759383368658629e-01 8.104351227460024898e-01 8.080695436776826890e-01 1.430530913253185155e-01
    -8.048001196608134400e-01 3.066089444418462762e-02 9.021887554292090661e-01 6.154331491807940591e-02 1.378912575206647784e-02 5.775720193142440673e-01 1.219298963069791464e-01 1.883270243412101808e-01 5.569262398688379356e-02 8.964817777510125651e-02 7.977092785346929782e-01 4.878149375226197293e-01 4.511973131518809410e-02 1.858690046801604323e-01 6.947686471083162063e-01 5.884058794291086025e-01 8.638884676612634816e-01 3.855470871341656336e-01 3.495049047300468059e-01 2.767740932353948136e-01 4.731087031714035218e-01 6.679001673437914288e-01 7.502944200696660682e-01
    -6.527328264244687261e-01 8.289483383553154505e-01 9.179741348282299818e-01 1.065639864466713105e-01 6.253616929058514184e-01 5.927750325266062381e-01 3.039157425463192563e-01 2.452766763359194302e-01 6.514027700704632107e-01 5.529218485487964463e-01 4.941158239308394151e-01 6.605306467722642516e-01 2.273688037050677346e-01 4.282616592244774534e-01 2.956128257930247250e-01 1.154803628237965896e-01 9.228220410235263849e-01 6.663525307676617659e-01 1.908852615936970087e-01 9.921383408926374159e-01 4.988716450388516188e-01 1.014900352736023414e-01 3.363930180244284474e-01
    -2.914369076275757919e-01 5.196673601143533272e-01 7.420144907858341465e-01 1.768984185504740569e-01 5.296766993228564369e-01 5.922023566159900776e-01 5.965161262020234334e-01 3.810272333046110793e-01 8.368797246118340194e-01 7.896422363801189892e-01 9.655797561098209414e-01 4.430034032346981121e-01 2.780869795706976122e-01 3.047310845416009162e-01 8.051138863500326703e-01 6.731468634690835895e-01 4.743383036815584930e-01 9.530709614322225853e-01 7.753587619850917934e-01 2.801137109357491051e-01 6.182543660889736614e-01 5.005218857766725593e-01 9.071447804755052857e-01
    -2.075071644012620453e-01 4.834950086973934802e-01 3.037011473860764532e-01 6.476084284887700937e-01 8.107195771564194020e-01 7.869075869075803364e-01 6.851234019375299633e-01 3.544187468104398331e-02 4.847673235908021017e-01 5.690262846164507726e-01 1.663354142616256803e-01 9.692796809752548537e-01 4.133441725866372485e-01 6.729167604487583665e-01 3.998813427407297283e-01 8.272617414104491695e-01 2.129248316324727774e-01 6.517004761357130249e-01 7.363013506605019520e-01 4.072375306356985636e-01 4.463336683526665238e-01 5.485059309728204102e-01 1.981745754527846071e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/cdist-X2.txt b/scipy-0.10.1/scipy/spatial/tests/cdist-X2.txt
    deleted file mode 100644
    index fc3ea19674..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/cdist-X2.txt
    +++ /dev/null
    @@ -1,20 +0,0 @@
    -7.680465556300619667e-02 4.675022344069014180e-01 8.955498989131543963e-01 3.816236071436276411e-01 1.109030077070989329e-01 2.318928815459808668e-02 7.477394240984251983e-01 1.202289789304434864e-01 8.007290497575981769e-01 6.795195698871731027e-01 6.568225762396605605e-01 2.231475263228478445e-01 7.064624077661341151e-02 1.081656666815267176e-02 1.592069359090128033e-01 1.363392203645097389e-01 9.277020735447568667e-01 8.103136564528209407e-01 5.229467676276455812e-02 7.708020259874025504e-01 6.527954747473352359e-02 5.516397414886525796e-01 3.653371861367954443e-01
    -8.144399106025798085e-01 7.731852525462976633e-01 6.909477620673205589e-01 9.696063817000286633e-01 4.297887511677249694e-01 6.989600553425188156e-01 7.310201335033380543e-01 3.135256147868910048e-01 5.715578037275241829e-01 3.935000744675094531e-01 2.057715781268398825e-01 5.892508589665171881e-01 8.512951599236765476e-01 9.569808799061578775e-01 6.164885878024699561e-01 4.714185430004367294e-01 6.128831737628155363e-01 6.641799309623502845e-01 6.001985185338730711e-01 4.231922889723856995e-01 7.605249308075449077e-01 1.064530958018087281e-01 6.306470691957204444e-01
    -4.265470127256254518e-01 5.933766716280767239e-01 3.698589270536845053e-02 2.173799740537294412e-01 3.032679325475639009e-01 4.271831790058847611e-01 1.828944535901013690e-01 4.772333422710156592e-01 2.564773455194128138e-01 7.120329875362141347e-01 8.952243430110462530e-01 1.808777012183288013e-01 3.612151871458374464e-01 3.960999167923041631e-01 1.821669970670747318e-02 8.835474857189200559e-01 1.353104648821573663e-01 3.457291739160937016e-01 1.126467375304566199e-01 4.107293162402323450e-01 4.051719311053743056e-01 4.007382985250427243e-01 1.286905671428811848e-01
    -2.910657003883979632e-01 9.616259180685315933e-03 2.033032441536681834e-01 1.096599110293863255e-01 4.191101704605176836e-01 5.462131536027151624e-01 8.393047907010142694e-01 9.046805198676335369e-01 7.009863472176891541e-01 2.508215985039629059e-01 6.754410796667598138e-01 6.740895474032024826e-01 1.358993708621679675e-01 8.219861775211464439e-01 6.322220445623235596e-01 2.766813559002430090e-01 6.575983861590951607e-01 9.515869708336625044e-01 8.654526462353933081e-01 3.450245117834797037e-01 5.649032890631299209e-01 4.717687914789682191e-01 3.296483580510030098e-01
    -9.172477457635394016e-01 3.057396583041891436e-01 7.335332344225760082e-01 8.370236206345178509e-01 3.765464253115927695e-01 5.089680319287778199e-01 1.202325719268168003e-01 9.717771065272349240e-01 5.907820104019682050e-01 9.809211614977710880e-01 9.064285003671219698e-01 8.848841466121748489e-01 2.043407730734815297e-01 9.157600394927275511e-01 4.532260315147775831e-01 4.241077335005828397e-01 1.751730149568804240e-01 4.090412146081819911e-01 3.632197861847064058e-02 5.832539334970230360e-01 4.041848151536805434e-01 3.603643989086504629e-01 1.838411383882069261e-01
    -2.508806403290032572e-01 4.381403985282813496e-01 4.694787405018008286e-02 6.353900562024634713e-01 1.200813444244532846e-01 6.072397042913001419e-01 9.937255904754030977e-01 4.916670237677555066e-01 3.473845913923001572e-01 3.526875922864345370e-01 5.448595548197197047e-01 2.245096010156972799e-01 9.003258279804994269e-01 3.534560469735994470e-01 2.989266066346342177e-01 4.621024982808636938e-01 9.626538866576676012e-01 9.791401720716153001e-01 7.138514287330390840e-01 9.832862333928654719e-01 3.233999591031431198e-01 5.406467224926423398e-01 9.581890295057201579e-01
    -5.210583601680578436e-01 4.598159993059653949e-01 2.111497132057748027e-01 5.949977700916546652e-01 6.342618461422359077e-01 9.888228769705599275e-01 6.096770711536318998e-01 7.548431368960863974e-01 7.490858664860100546e-01 3.186213496546415058e-01 7.895687083231245351e-01 4.178326793268141159e-01 8.095818334534051752e-01 7.886271673523481684e-01 4.038905626506847923e-01 3.652649247094948981e-01 8.267205959224892542e-01 6.433617243328785262e-01 3.117681563249452559e-01 9.675995575054980868e-01 3.675673836358472890e-01 5.863757289184046151e-01 9.099029857959717305e-02
    -4.024573981231733821e-01 3.578997554002771864e-01 3.519299868071553705e-01 7.417747693762357653e-01 2.963713903285800644e-01 9.602967989298948348e-01 3.811392331739601458e-01 5.493237898295448840e-01 6.835113342793640578e-01 2.304506220807415184e-01 3.727299857731285471e-01 5.450263991912108752e-01 6.951521210987908761e-01 6.474582745861203747e-01 6.316089475403589004e-01 5.672043967425510758e-02 9.034937506977609445e-01 2.332567550780038079e-01 1.096955741449157085e-02 8.870663813493575578e-01 4.384385452180562526e-01 7.100898998169548060e-01 3.245358176196319056e-01
    -9.162009194452818139e-01 5.572224742426723498e-02 3.445910686865658601e-01 9.683564008127462097e-01 9.375063149031520604e-01 9.128188852869822956e-02 9.613605414326487075e-01 5.298598697556915482e-01 6.724799695520149445e-01 1.269103938571825019e-02 1.008406153387807480e-01 8.951105272379104028e-01 1.585460318853607609e-01 6.739986455059543413e-01 5.345419321702655768e-01 6.248843899572337213e-01 3.050288488994817859e-01 1.423645553465189284e-01 1.802121190541096096e-01 9.474646822694763326e-01 2.345716438587298613e-01 9.688281784764296578e-01 1.845165243240991515e-01
    -2.548297646910531178e-01 2.580877375379494465e-01 1.355482532666937301e-01 6.478812986505504412e-01 9.971695982152032345e-01 2.606721082477282403e-01 5.483439686378906996e-01 4.409612606704470528e-01 4.396442074915688503e-01 7.414262832597111608e-01 7.308840725375539416e-01 8.072095530497225280e-02 6.829509968656330976e-01 5.700030854230387911e-01 3.801845336730320657e-01 2.481059916867158766e-01 3.977295094395927322e-03 5.749480512407895150e-01 4.112033136603401307e-01 8.676159710377848722e-01 9.062646588480167686e-01 3.326691167317923359e-01 8.498307982774666591e-01
    -4.464338109330643345e-01 8.546516760817471914e-01 7.384800352329814466e-01 3.692485164984804502e-02 2.915662689505471583e-02 9.010049994217171898e-01 8.622900253010918892e-01 9.786230638032608065e-01 6.546824077297251909e-01 6.342297560006789903e-01 2.230339826582647955e-01 7.658846744185553446e-01 4.603043831539479491e-01 2.017100469861691225e-01 4.891590639893540482e-01 1.937140918314912419e-01 8.161582138652878626e-01 5.597293607114051106e-02 8.423261093326828153e-02 5.105392204475533990e-02 8.234193902673621057e-01 1.784268309975372002e-01 9.118997881986501408e-02
    -8.588746913421980711e-01 1.479641118621310980e-02 1.375875301146138874e-01 7.533888774725254756e-01 5.782592791549248101e-01 9.128573037619659436e-01 1.831275762880391067e-01 3.471382864827737835e-01 4.859524740929310749e-02 8.955146541561730400e-01 4.787220791101074457e-01 4.222803577759057791e-01 8.469923964908064873e-01 6.300290047587608910e-02 1.020873237837905956e-01 3.585612487182909813e-02 6.320107119904569970e-01 5.891245970008752719e-01 1.104698053665007507e-01 4.233226558073774903e-01 4.432217054386708988e-01 2.864765416628194394e-01 2.489777211814803159e-02
    -5.343810659756068615e-01 4.829076396403546578e-01 8.364480888953172988e-01 8.931374995414760321e-01 6.034161442354715188e-01 3.578336000768178593e-03 4.100579775972763574e-01 3.968667908067096128e-01 5.897163653686778861e-01 3.003241263928478899e-01 2.520935203143799264e-01 3.112129371563532310e-02 9.052865295974613646e-01 1.172285124002711010e-01 4.840001666149388315e-01 3.424620676348436588e-01 5.526057133826853818e-01 6.346139530261846184e-01 5.747945930485597321e-01 1.389915612177697879e-01 2.413801217666421417e-01 7.829900796662081497e-01 7.213528084845653998e-01
    -9.384509283406079483e-01 6.303019601671526750e-01 1.787921522728125323e-01 1.556003868047917127e-02 5.662397078816850948e-01 3.437473614806091371e-01 8.615844972800188462e-01 7.624380237306396246e-01 1.096468347898514883e-01 1.276566836610887323e-01 8.479188493443535757e-01 3.634713454428405432e-01 7.478112314318967613e-01 9.856395696968375253e-01 6.250293654177319080e-02 1.919327272501809567e-01 1.415594476031050153e-01 7.224057351041784925e-01 8.452145259310355208e-01 5.434318833772002755e-01 5.177620959731277228e-02 3.358977598185840518e-01 2.542654881527960375e-01
    -4.800909104006243489e-01 3.651345393613150137e-01 3.657093052788148446e-01 8.579662326651369408e-01 5.787694361240260932e-01 6.491966196891312268e-01 3.252508517294879775e-01 8.639694334693422961e-01 3.028097078756678551e-01 6.295814666338699350e-01 7.305627351548695803e-01 6.975931849120264872e-03 8.321205159004851915e-01 2.681809305821257761e-01 3.628869474597150591e-01 9.598981434716586936e-01 5.947913523332928332e-01 7.794864238003402779e-01 2.819511239444029149e-01 5.134200958476284882e-01 7.284684743064278045e-01 3.099571109539331903e-01 1.502222882866774967e-01
    -2.463382654375219083e-01 4.465700737264240994e-01 7.180855317941433613e-01 5.056099420785193921e-01 6.182117344332578313e-01 2.370453793561340117e-01 9.831748018047525850e-01 6.397098184531551102e-01 8.260469782208745837e-02 7.474671691560941245e-01 9.963429983418570224e-02 5.450078811081275898e-01 5.370188678062637333e-02 2.774024442708808991e-01 2.082643088545442778e-01 2.704155352788065736e-01 7.225035580445194894e-01 4.866791976239246420e-01 1.357043111201584606e-01 7.911335827987711067e-01 7.278977102006007893e-01 6.880892094410231419e-01 1.029231496520791600e-01
    -6.901796117735281566e-01 1.558248977395644275e-01 4.241818789360329855e-01 5.055658246392458199e-01 1.756288758075611467e-01 4.215083703818177652e-01 7.809231602323289945e-01 1.170053878686481141e-01 6.497026323614403243e-01 5.733120641440232479e-01 4.407703406152092551e-01 5.608677124532297498e-01 7.471045703286000039e-01 3.334604336022076732e-01 8.927208811415126011e-01 9.794565286182396191e-01 9.621542824973521313e-01 3.945825239405253981e-01 8.338963875792834157e-01 9.310552325082104286e-01 7.688283033784242271e-01 3.798823731047119567e-01 1.459993613028365278e-02
    -7.848623555505630511e-01 2.681039365355797344e-03 7.833208051794043891e-01 8.184381915171493604e-01 4.682581645582317709e-01 2.391069309436419932e-01 1.765377537168698607e-01 9.863494676539893424e-01 4.378412300863872009e-01 7.494505491149090481e-01 1.942180356195394308e-01 9.981402467222395547e-01 7.992190944052800505e-01 1.350875702852057936e-01 4.950149186748543650e-01 7.243422481248201761e-01 3.544596746353472216e-01 8.320192561472177228e-01 9.776840296475269865e-01 7.733852731914863110e-01 2.305732998099923048e-01 9.746878189802981041e-01 7.747723331200035979e-01
    -6.521099013127149568e-01 5.452399443648201505e-01 8.146707517183656710e-01 3.827256063695345656e-01 7.954832091744263867e-01 7.834427643148527132e-01 9.661317930643520402e-02 9.215673965718058636e-01 4.914305728788055383e-01 4.105628408027649501e-01 9.844647830893304974e-02 3.974831165301851987e-01 3.857608898053827007e-01 5.520210781401946321e-01 3.445787541654143915e-03 4.552922057017416702e-01 7.456544561760444223e-01 4.753985092154335845e-01 2.821385239833401615e-01 7.560136035104459973e-01 8.453142510471420845e-01 6.679627143276523071e-01 6.910882868284401459e-01
    -8.526493480446283302e-01 1.183917973068240315e-01 6.163988861865119517e-01 5.751899460059114455e-01 1.638797964925038375e-01 8.214597298784013235e-01 5.424670654187370156e-01 1.806631819658732763e-01 9.268107278221827672e-01 4.127397378597359445e-01 7.529877485901653733e-01 1.714251090083847018e-01 2.601487784245806179e-01 2.028326156742237263e-01 5.299879450122358948e-01 7.587877062981395193e-01 4.070738595375062996e-01 3.546903049793261875e-01 8.695365138547607176e-01 1.447085661525142619e-01 3.193366245820845606e-01 8.797841086211429795e-01 2.666562188639977071e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/iris.txt b/scipy-0.10.1/scipy/spatial/tests/iris.txt
    deleted file mode 100644
    index 4d78390c25..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/iris.txt
    +++ /dev/null
    @@ -1,150 +0,0 @@
    -5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01
    -4.900000000000000355e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01
    -4.700000000000000178e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01
    -4.599999999999999645e+00 3.100000000000000089e+00 1.500000000000000000e+00 2.000000000000000111e-01
    -5.000000000000000000e+00 3.600000000000000089e+00 1.399999999999999911e+00 2.000000000000000111e-01
    -5.400000000000000355e+00 3.899999999999999911e+00 1.699999999999999956e+00 4.000000000000000222e-01
    -4.599999999999999645e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.999999999999999889e-01
    -5.000000000000000000e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01
    -4.400000000000000355e+00 2.899999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01
    -4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01
    -5.400000000000000355e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01
    -4.799999999999999822e+00 3.399999999999999911e+00 1.600000000000000089e+00 2.000000000000000111e-01
    -4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 1.000000000000000056e-01
    -4.299999999999999822e+00 3.000000000000000000e+00 1.100000000000000089e+00 1.000000000000000056e-01
    -5.799999999999999822e+00 4.000000000000000000e+00 1.199999999999999956e+00 2.000000000000000111e-01
    -5.700000000000000178e+00 4.400000000000000355e+00 1.500000000000000000e+00 4.000000000000000222e-01
    -5.400000000000000355e+00 3.899999999999999911e+00 1.300000000000000044e+00 4.000000000000000222e-01
    -5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01
    -5.700000000000000178e+00 3.799999999999999822e+00 1.699999999999999956e+00 2.999999999999999889e-01
    -5.099999999999999645e+00 3.799999999999999822e+00 1.500000000000000000e+00 2.999999999999999889e-01
    -5.400000000000000355e+00 3.399999999999999911e+00 1.699999999999999956e+00 2.000000000000000111e-01
    -5.099999999999999645e+00 3.700000000000000178e+00 1.500000000000000000e+00 4.000000000000000222e-01
    -4.599999999999999645e+00 3.600000000000000089e+00 1.000000000000000000e+00 2.000000000000000111e-01
    -5.099999999999999645e+00 3.299999999999999822e+00 1.699999999999999956e+00 5.000000000000000000e-01
    -4.799999999999999822e+00 3.399999999999999911e+00 1.899999999999999911e+00 2.000000000000000111e-01
    -5.000000000000000000e+00 3.000000000000000000e+00 1.600000000000000089e+00 2.000000000000000111e-01
    -5.000000000000000000e+00 3.399999999999999911e+00 1.600000000000000089e+00 4.000000000000000222e-01
    -5.200000000000000178e+00 3.500000000000000000e+00 1.500000000000000000e+00 2.000000000000000111e-01
    -5.200000000000000178e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01
    -4.700000000000000178e+00 3.200000000000000178e+00 1.600000000000000089e+00 2.000000000000000111e-01
    -4.799999999999999822e+00 3.100000000000000089e+00 1.600000000000000089e+00 2.000000000000000111e-01
    -5.400000000000000355e+00 3.399999999999999911e+00 1.500000000000000000e+00 4.000000000000000222e-01
    -5.200000000000000178e+00 4.099999999999999645e+00 1.500000000000000000e+00 1.000000000000000056e-01
    -5.500000000000000000e+00 4.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01
    -4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01
    -5.000000000000000000e+00 3.200000000000000178e+00 1.199999999999999956e+00 2.000000000000000111e-01
    -5.500000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01
    -4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01
    -4.400000000000000355e+00 3.000000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01
    -5.099999999999999645e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01
    -5.000000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.999999999999999889e-01
    -4.500000000000000000e+00 2.299999999999999822e+00 1.300000000000000044e+00 2.999999999999999889e-01
    -4.400000000000000355e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01
    -5.000000000000000000e+00 3.500000000000000000e+00 1.600000000000000089e+00 5.999999999999999778e-01
    -5.099999999999999645e+00 3.799999999999999822e+00 1.899999999999999911e+00 4.000000000000000222e-01
    -4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01
    -5.099999999999999645e+00 3.799999999999999822e+00 1.600000000000000089e+00 2.000000000000000111e-01
    -4.599999999999999645e+00 3.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01
    -5.299999999999999822e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01
    -5.000000000000000000e+00 3.299999999999999822e+00 1.399999999999999911e+00 2.000000000000000111e-01
    -7.000000000000000000e+00 3.200000000000000178e+00 4.700000000000000178e+00 1.399999999999999911e+00
    -6.400000000000000355e+00 3.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00
    -6.900000000000000355e+00 3.100000000000000089e+00 4.900000000000000355e+00 1.500000000000000000e+00
    -5.500000000000000000e+00 2.299999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00
    -6.500000000000000000e+00 2.799999999999999822e+00 4.599999999999999645e+00 1.500000000000000000e+00
    -5.700000000000000178e+00 2.799999999999999822e+00 4.500000000000000000e+00 1.300000000000000044e+00
    -6.299999999999999822e+00 3.299999999999999822e+00 4.700000000000000178e+00 1.600000000000000089e+00
    -4.900000000000000355e+00 2.399999999999999911e+00 3.299999999999999822e+00 1.000000000000000000e+00
    -6.599999999999999645e+00 2.899999999999999911e+00 4.599999999999999645e+00 1.300000000000000044e+00
    -5.200000000000000178e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.399999999999999911e+00
    -5.000000000000000000e+00 2.000000000000000000e+00 3.500000000000000000e+00 1.000000000000000000e+00
    -5.900000000000000355e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.500000000000000000e+00
    -6.000000000000000000e+00 2.200000000000000178e+00 4.000000000000000000e+00 1.000000000000000000e+00
    -6.099999999999999645e+00 2.899999999999999911e+00 4.700000000000000178e+00 1.399999999999999911e+00
    -5.599999999999999645e+00 2.899999999999999911e+00 3.600000000000000089e+00 1.300000000000000044e+00
    -6.700000000000000178e+00 3.100000000000000089e+00 4.400000000000000355e+00 1.399999999999999911e+00
    -5.599999999999999645e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00
    -5.799999999999999822e+00 2.700000000000000178e+00 4.099999999999999645e+00 1.000000000000000000e+00
    -6.200000000000000178e+00 2.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00
    -5.599999999999999645e+00 2.500000000000000000e+00 3.899999999999999911e+00 1.100000000000000089e+00
    -5.900000000000000355e+00 3.200000000000000178e+00 4.799999999999999822e+00 1.800000000000000044e+00
    -6.099999999999999645e+00 2.799999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00
    -6.299999999999999822e+00 2.500000000000000000e+00 4.900000000000000355e+00 1.500000000000000000e+00
    -6.099999999999999645e+00 2.799999999999999822e+00 4.700000000000000178e+00 1.199999999999999956e+00
    -6.400000000000000355e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00
    -6.599999999999999645e+00 3.000000000000000000e+00 4.400000000000000355e+00 1.399999999999999911e+00
    -6.799999999999999822e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.399999999999999911e+00
    -6.700000000000000178e+00 3.000000000000000000e+00 5.000000000000000000e+00 1.699999999999999956e+00
    -6.000000000000000000e+00 2.899999999999999911e+00 4.500000000000000000e+00 1.500000000000000000e+00
    -5.700000000000000178e+00 2.600000000000000089e+00 3.500000000000000000e+00 1.000000000000000000e+00
    -5.500000000000000000e+00 2.399999999999999911e+00 3.799999999999999822e+00 1.100000000000000089e+00
    -5.500000000000000000e+00 2.399999999999999911e+00 3.700000000000000178e+00 1.000000000000000000e+00
    -5.799999999999999822e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.199999999999999956e+00
    -6.000000000000000000e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.600000000000000089e+00
    -5.400000000000000355e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00
    -6.000000000000000000e+00 3.399999999999999911e+00 4.500000000000000000e+00 1.600000000000000089e+00
    -6.700000000000000178e+00 3.100000000000000089e+00 4.700000000000000178e+00 1.500000000000000000e+00
    -6.299999999999999822e+00 2.299999999999999822e+00 4.400000000000000355e+00 1.300000000000000044e+00
    -5.599999999999999645e+00 3.000000000000000000e+00 4.099999999999999645e+00 1.300000000000000044e+00
    -5.500000000000000000e+00 2.500000000000000000e+00 4.000000000000000000e+00 1.300000000000000044e+00
    -5.500000000000000000e+00 2.600000000000000089e+00 4.400000000000000355e+00 1.199999999999999956e+00
    -6.099999999999999645e+00 3.000000000000000000e+00 4.599999999999999645e+00 1.399999999999999911e+00
    -5.799999999999999822e+00 2.600000000000000089e+00 4.000000000000000000e+00 1.199999999999999956e+00
    -5.000000000000000000e+00 2.299999999999999822e+00 3.299999999999999822e+00 1.000000000000000000e+00
    -5.599999999999999645e+00 2.700000000000000178e+00 4.200000000000000178e+00 1.300000000000000044e+00
    -5.700000000000000178e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.199999999999999956e+00
    -5.700000000000000178e+00 2.899999999999999911e+00 4.200000000000000178e+00 1.300000000000000044e+00
    -6.200000000000000178e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00
    -5.099999999999999645e+00 2.500000000000000000e+00 3.000000000000000000e+00 1.100000000000000089e+00
    -5.700000000000000178e+00 2.799999999999999822e+00 4.099999999999999645e+00 1.300000000000000044e+00
    -6.299999999999999822e+00 3.299999999999999822e+00 6.000000000000000000e+00 2.500000000000000000e+00
    -5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00
    -7.099999999999999645e+00 3.000000000000000000e+00 5.900000000000000355e+00 2.100000000000000089e+00
    -6.299999999999999822e+00 2.899999999999999911e+00 5.599999999999999645e+00 1.800000000000000044e+00
    -6.500000000000000000e+00 3.000000000000000000e+00 5.799999999999999822e+00 2.200000000000000178e+00
    -7.599999999999999645e+00 3.000000000000000000e+00 6.599999999999999645e+00 2.100000000000000089e+00
    -4.900000000000000355e+00 2.500000000000000000e+00 4.500000000000000000e+00 1.699999999999999956e+00
    -7.299999999999999822e+00 2.899999999999999911e+00 6.299999999999999822e+00 1.800000000000000044e+00
    -6.700000000000000178e+00 2.500000000000000000e+00 5.799999999999999822e+00 1.800000000000000044e+00
    -7.200000000000000178e+00 3.600000000000000089e+00 6.099999999999999645e+00 2.500000000000000000e+00
    -6.500000000000000000e+00 3.200000000000000178e+00 5.099999999999999645e+00 2.000000000000000000e+00
    -6.400000000000000355e+00 2.700000000000000178e+00 5.299999999999999822e+00 1.899999999999999911e+00
    -6.799999999999999822e+00 3.000000000000000000e+00 5.500000000000000000e+00 2.100000000000000089e+00
    -5.700000000000000178e+00 2.500000000000000000e+00 5.000000000000000000e+00 2.000000000000000000e+00
    -5.799999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 2.399999999999999911e+00
    -6.400000000000000355e+00 3.200000000000000178e+00 5.299999999999999822e+00 2.299999999999999822e+00
    -6.500000000000000000e+00 3.000000000000000000e+00 5.500000000000000000e+00 1.800000000000000044e+00
    -7.700000000000000178e+00 3.799999999999999822e+00 6.700000000000000178e+00 2.200000000000000178e+00
    -7.700000000000000178e+00 2.600000000000000089e+00 6.900000000000000355e+00 2.299999999999999822e+00
    -6.000000000000000000e+00 2.200000000000000178e+00 5.000000000000000000e+00 1.500000000000000000e+00
    -6.900000000000000355e+00 3.200000000000000178e+00 5.700000000000000178e+00 2.299999999999999822e+00
    -5.599999999999999645e+00 2.799999999999999822e+00 4.900000000000000355e+00 2.000000000000000000e+00
    -7.700000000000000178e+00 2.799999999999999822e+00 6.700000000000000178e+00 2.000000000000000000e+00
    -6.299999999999999822e+00 2.700000000000000178e+00 4.900000000000000355e+00 1.800000000000000044e+00
    -6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.100000000000000089e+00
    -7.200000000000000178e+00 3.200000000000000178e+00 6.000000000000000000e+00 1.800000000000000044e+00
    -6.200000000000000178e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.800000000000000044e+00
    -6.099999999999999645e+00 3.000000000000000000e+00 4.900000000000000355e+00 1.800000000000000044e+00
    -6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.100000000000000089e+00
    -7.200000000000000178e+00 3.000000000000000000e+00 5.799999999999999822e+00 1.600000000000000089e+00
    -7.400000000000000355e+00 2.799999999999999822e+00 6.099999999999999645e+00 1.899999999999999911e+00
    -7.900000000000000355e+00 3.799999999999999822e+00 6.400000000000000355e+00 2.000000000000000000e+00
    -6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.200000000000000178e+00
    -6.299999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 1.500000000000000000e+00
    -6.099999999999999645e+00 2.600000000000000089e+00 5.599999999999999645e+00 1.399999999999999911e+00
    -7.700000000000000178e+00 3.000000000000000000e+00 6.099999999999999645e+00 2.299999999999999822e+00
    -6.299999999999999822e+00 3.399999999999999911e+00 5.599999999999999645e+00 2.399999999999999911e+00
    -6.400000000000000355e+00 3.100000000000000089e+00 5.500000000000000000e+00 1.800000000000000044e+00
    -6.000000000000000000e+00 3.000000000000000000e+00 4.799999999999999822e+00 1.800000000000000044e+00
    -6.900000000000000355e+00 3.100000000000000089e+00 5.400000000000000355e+00 2.100000000000000089e+00
    -6.700000000000000178e+00 3.100000000000000089e+00 5.599999999999999645e+00 2.399999999999999911e+00
    -6.900000000000000355e+00 3.100000000000000089e+00 5.099999999999999645e+00 2.299999999999999822e+00
    -5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00
    -6.799999999999999822e+00 3.200000000000000178e+00 5.900000000000000355e+00 2.299999999999999822e+00
    -6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.500000000000000000e+00
    -6.700000000000000178e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.299999999999999822e+00
    -6.299999999999999822e+00 2.500000000000000000e+00 5.000000000000000000e+00 1.899999999999999911e+00
    -6.500000000000000000e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.000000000000000000e+00
    -6.200000000000000178e+00 3.399999999999999911e+00 5.400000000000000355e+00 2.299999999999999822e+00
    -5.900000000000000355e+00 3.000000000000000000e+00 5.099999999999999645e+00 1.800000000000000044e+00
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-boolean-inp.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-boolean-inp.txt
    deleted file mode 100644
    index 0636cc9f45..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-boolean-inp.txt
    +++ /dev/null
    @@ -1,20 +0,0 @@
    -1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    -0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
    -1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
    -1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    -0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
    -1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
    -0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
    -1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
    -0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    -1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-chebychev-ml-iris.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-chebychev-ml-iris.txt
    deleted file mode 100644
    index 0aff1267ca..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-chebychev-ml-iris.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   5.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   8.0000000e-01   7.0000000e-01   9.0000000e-01   4.0000000e-01   1.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   2.0000000e-01   1.0000000e-01   1.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e-01   1.0000000e-01   1.2000000e+00   7.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   2.0000000e-01   2.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   2.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   1.0000000e-01   6.0000000e-01   1.0000000e+00   1.4000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   8.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   3.0000000e-01   5.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   2.0000000e-01   5.0000000e-01   1.1000000e+00   1.2000000e+00   1.0000000e-01   2.0000000e-01   6.0000000e-01   1.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e-01   8.0000000e-01   3.0000000e-01   7.0000000e-01   3.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   2.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   1.1000000e+00   1.2000000e+00   7.0000000e-01   4.0000000e-01   1.0000000e+00   6.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   1.0000000e+00   2.0000000e-01   3.0000000e-01   8.0000000e-01   2.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   1.0000000e-01   6.0000000e-01   3.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   5.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   2.0000000e-01   3.0000000e-01   8.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   1.2000000e+00   1.3000000e+00   8.0000000e-01   5.0000000e-01   1.1000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e-01   2.0000000e-01   8.0000000e-01   1.0000000e+00   1.1000000e+00   3.0000000e-01   4.0000000e-01   9.0000000e-01   3.0000000e-01   2.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   4.0000000e-01   4.0000000e-01   2.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   8.0000000e-01   4.0000000e-01   1.0000000e-01   7.0000000e-01   2.0000000e-01   4.0000000e-01   2.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   2.0000000e-01   2.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   2.0000000e-01   1.0000000e-01   1.3000000e+00   6.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   2.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   8.0000000e-01   5.0000000e-01   1.0000000e+00   8.0000000e-01   2.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   5.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   4.0000000e-01   1.6000000e+00   1.0000000e+00   4.0000000e-01   3.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   2.0000000e-01   6.0000000e-01   3.0000000e+00   2.8000000e+00   3.2000000e+00   2.3000000e+00   2.9000000e+00   2.8000000e+00   3.0000000e+00   1.6000000e+00   2.9000000e+00   2.2000000e+00   1.9000000e+00   2.5000000e+00   2.3000000e+00   3.0000000e+00   1.9000000e+00   2.7000000e+00   2.8000000e+00   2.4000000e+00   2.8000000e+00   2.2000000e+00   3.1000000e+00   2.3000000e+00   3.2000000e+00   3.0000000e+00   2.6000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.8000000e+00   1.8000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.7000000e+00   2.4000000e+00   2.3000000e+00   2.7000000e+00   2.9000000e+00   2.3000000e+00   1.6000000e+00   2.5000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   1.4000000e+00   2.4000000e+00   4.3000000e+00   3.4000000e+00   4.2000000e+00   3.9000000e+00   4.1000000e+00   4.9000000e+00   2.8000000e+00   4.6000000e+00   4.1000000e+00   4.4000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   5.0000000e+00   5.2000000e+00   3.3000000e+00   4.0000000e+00   3.2000000e+00   5.0000000e+00   3.2000000e+00   4.0000000e+00   4.3000000e+00   3.1000000e+00   3.2000000e+00   3.9000000e+00   4.1000000e+00   4.4000000e+00   4.7000000e+00   3.9000000e+00   3.4000000e+00   3.9000000e+00   4.4000000e+00   3.9000000e+00   3.8000000e+00   3.1000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.4000000e+00   4.2000000e+00   4.0000000e+00   3.5000000e+00   3.3000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   4.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   2.0000000e-01   4.0000000e-01   4.0000000e-01   1.2000000e+00   1.1000000e+00   8.0000000e-01   5.0000000e-01   1.1000000e+00   5.0000000e-01   8.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   2.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   1.1000000e+00   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   7.0000000e-01   4.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   3.0000000e-01   4.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   4.0000000e-01   4.0000000e-01   2.0000000e-01   2.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   1.0000000e-01   2.0000000e-01   1.1000000e+00   6.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   1.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   5.0000000e-01   1.0000000e+00   5.0000000e-01   4.0000000e-01   3.0000000e-01   1.4000000e+00   1.5000000e+00   1.0000000e+00   7.0000000e-01   1.3000000e+00   9.0000000e-01   1.0000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   1.2000000e+00   1.3000000e+00   5.0000000e-01   6.0000000e-01   1.1000000e+00   5.0000000e-01   1.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   9.0000000e-01   3.0000000e-01   9.0000000e-01   6.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   3.0000000e-01   1.0000000e-01   6.0000000e-01   9.0000000e-01   1.3000000e+00   8.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   1.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   1.0000000e-01   5.0000000e-01   1.0000000e+00   1.1000000e+00   0.0000000e+00   3.0000000e-01   6.0000000e-01   0.0000000e+00   5.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   6.0000000e-01   7.0000000e-01   1.1000000e+00   4.0000000e-01   7.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   2.0000000e-01   3.0000000e-01   7.0000000e-01   6.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   2.0000000e-01   6.0000000e-01   1.0000000e+00   3.0000000e-01   4.0000000e-01   1.4000000e+00   1.0000000e+00   4.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   8.0000000e-01   1.0000000e-01   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   4.0000000e-01   5.0000000e-01   1.0000000e+00   1.0000000e+00   6.0000000e-01   3.0000000e-01   9.0000000e-01   4.0000000e-01   6.0000000e-01   3.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   2.0000000e-01   4.0000000e-01   4.0000000e-01   2.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   1.1000000e+00   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   2.0000000e-01   5.0000000e-01   2.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   5.0000000e-01   1.0000000e+00   1.4000000e+00   9.0000000e-01   5.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   2.0000000e-01   6.0000000e-01   1.1000000e+00   1.2000000e+00   1.0000000e-01   2.0000000e-01   7.0000000e-01   1.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   5.0000000e-01   8.0000000e-01   2.0000000e-01   8.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   1.5000000e+00   1.4000000e+00   1.1000000e+00   8.0000000e-01   1.4000000e+00   8.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   8.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   9.0000000e-01   9.0000000e-01   5.0000000e-01   5.0000000e-01   1.1000000e+00   1.1000000e+00   1.2000000e+00   6.0000000e-01   7.0000000e-01   1.2000000e+00   6.0000000e-01   2.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   8.0000000e-01   3.0000000e-01   1.0000000e+00   7.0000000e-01   3.6000000e+00   3.4000000e+00   3.8000000e+00   2.9000000e+00   3.5000000e+00   3.4000000e+00   3.6000000e+00   2.2000000e+00   3.5000000e+00   2.8000000e+00   2.4000000e+00   3.1000000e+00   2.9000000e+00   3.6000000e+00   2.5000000e+00   3.3000000e+00   3.4000000e+00   3.0000000e+00   3.4000000e+00   2.8000000e+00   3.7000000e+00   2.9000000e+00   3.8000000e+00   3.6000000e+00   3.2000000e+00   3.3000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   2.4000000e+00   2.7000000e+00   2.6000000e+00   2.8000000e+00   4.0000000e+00   3.4000000e+00   3.4000000e+00   3.6000000e+00   3.3000000e+00   3.0000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   2.9000000e+00   2.2000000e+00   3.1000000e+00   3.1000000e+00   3.1000000e+00   3.2000000e+00   1.9000000e+00   3.0000000e+00   4.9000000e+00   4.0000000e+00   4.8000000e+00   4.5000000e+00   4.7000000e+00   5.5000000e+00   3.4000000e+00   5.2000000e+00   4.7000000e+00   5.0000000e+00   4.0000000e+00   4.2000000e+00   4.4000000e+00   3.9000000e+00   4.0000000e+00   4.2000000e+00   4.4000000e+00   5.6000000e+00   5.8000000e+00   3.9000000e+00   4.6000000e+00   3.8000000e+00   5.6000000e+00   3.8000000e+00   4.6000000e+00   4.9000000e+00   3.7000000e+00   3.8000000e+00   4.5000000e+00   4.7000000e+00   5.0000000e+00   5.3000000e+00   4.5000000e+00   4.0000000e+00   4.5000000e+00   5.0000000e+00   4.5000000e+00   4.4000000e+00   3.7000000e+00   4.3000000e+00   4.5000000e+00   4.0000000e+00   4.0000000e+00   4.8000000e+00   4.6000000e+00   4.1000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   4.0000000e+00   4.0000000e-01   4.0000000e-01   7.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   7.0000000e-01   1.2000000e+00   7.0000000e-01   1.0000000e+00   1.0000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   1.1000000e+00   1.0000000e+00   6.0000000e-01   6.0000000e-01   3.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   9.0000000e-01   1.4000000e+00   7.0000000e-01   8.0000000e-01   1.7000000e+00   1.4000000e+00   8.0000000e-01   7.0000000e-01   1.0000000e+00   7.0000000e-01   1.2000000e+00   5.0000000e-01   8.0000000e-01   3.5000000e+00   3.3000000e+00   3.7000000e+00   2.8000000e+00   3.4000000e+00   3.3000000e+00   3.5000000e+00   2.1000000e+00   3.4000000e+00   2.7000000e+00   2.3000000e+00   3.0000000e+00   2.8000000e+00   3.5000000e+00   2.4000000e+00   3.2000000e+00   3.3000000e+00   2.9000000e+00   3.3000000e+00   2.7000000e+00   3.6000000e+00   2.8000000e+00   3.7000000e+00   3.5000000e+00   3.1000000e+00   3.2000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   2.3000000e+00   2.6000000e+00   2.5000000e+00   2.7000000e+00   3.9000000e+00   3.3000000e+00   3.3000000e+00   3.5000000e+00   3.2000000e+00   2.9000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.8000000e+00   2.1000000e+00   3.0000000e+00   3.0000000e+00   3.0000000e+00   3.1000000e+00   1.8000000e+00   2.9000000e+00   4.8000000e+00   3.9000000e+00   4.7000000e+00   4.4000000e+00   4.6000000e+00   5.4000000e+00   3.3000000e+00   5.1000000e+00   4.6000000e+00   4.9000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   5.5000000e+00   5.7000000e+00   3.8000000e+00   4.5000000e+00   3.7000000e+00   5.5000000e+00   3.7000000e+00   4.5000000e+00   4.8000000e+00   3.6000000e+00   3.7000000e+00   4.4000000e+00   4.6000000e+00   4.9000000e+00   5.2000000e+00   4.4000000e+00   3.9000000e+00   4.4000000e+00   4.9000000e+00   4.4000000e+00   4.3000000e+00   3.6000000e+00   4.2000000e+00   4.4000000e+00   3.9000000e+00   3.9000000e+00   4.7000000e+00   4.5000000e+00   4.0000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.9000000e+00   5.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e+00   7.0000000e-01   1.1000000e+00   1.1000000e+00   1.0000000e+00   1.4000000e+00   1.0000000e+00   9.0000000e-01   1.0000000e+00   1.2000000e+00   1.3000000e+00   1.0000000e+00   5.0000000e-01   2.0000000e-01   1.3000000e+00   1.2000000e+00   9.0000000e-01   1.3000000e+00   1.4000000e+00   1.0000000e+00   9.0000000e-01   2.1000000e+00   1.3000000e+00   9.0000000e-01   6.0000000e-01   1.4000000e+00   6.0000000e-01   1.2000000e+00   7.0000000e-01   1.1000000e+00   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   2.0000000e+00   3.1000000e+00   2.4000000e+00   2.4000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   2.1000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.9000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   4.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   4.0000000e-01   1.6000000e+00   1.0000000e+00   4.0000000e-01   6.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   2.0000000e-01   6.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   6.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   2.0000000e-01   1.0000000e-01   1.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e-01   1.0000000e-01   1.2000000e+00   7.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   2.0000000e-01   2.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   4.0000000e-01   6.0000000e-01   1.1000000e+00   6.0000000e-01   9.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   8.0000000e-01   1.3000000e+00   6.0000000e-01   7.0000000e-01   1.5000000e+00   1.3000000e+00   7.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   1.1000000e+00   4.0000000e-01   7.0000000e-01   3.0000000e+00   2.8000000e+00   3.2000000e+00   2.3000000e+00   2.9000000e+00   2.8000000e+00   3.0000000e+00   1.6000000e+00   2.9000000e+00   2.2000000e+00   1.8000000e+00   2.5000000e+00   2.3000000e+00   3.0000000e+00   1.9000000e+00   2.7000000e+00   2.8000000e+00   2.4000000e+00   2.8000000e+00   2.2000000e+00   3.1000000e+00   2.3000000e+00   3.2000000e+00   3.0000000e+00   2.6000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.8000000e+00   1.8000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.7000000e+00   2.4000000e+00   2.3000000e+00   2.7000000e+00   2.9000000e+00   2.3000000e+00   1.6000000e+00   2.5000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   1.3000000e+00   2.4000000e+00   4.3000000e+00   3.4000000e+00   4.2000000e+00   3.9000000e+00   4.1000000e+00   4.9000000e+00   2.8000000e+00   4.6000000e+00   4.1000000e+00   4.4000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   5.0000000e+00   5.2000000e+00   3.3000000e+00   4.0000000e+00   3.2000000e+00   5.0000000e+00   3.2000000e+00   4.0000000e+00   4.3000000e+00   3.1000000e+00   3.2000000e+00   3.9000000e+00   4.1000000e+00   4.4000000e+00   4.7000000e+00   3.9000000e+00   3.4000000e+00   3.9000000e+00   4.4000000e+00   3.9000000e+00   3.8000000e+00   3.1000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.4000000e+00   4.2000000e+00   4.0000000e+00   3.5000000e+00   3.3000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   4.0000000e-01   1.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   4.0000000e-01   3.0000000e-01   1.5000000e+00   7.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e-01   6.0000000e-01   2.0000000e-01   5.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   3.0000000e-01   8.0000000e-01   3.0000000e-01   6.0000000e-01   4.0000000e-01   4.0000000e-01   2.0000000e-01   3.0000000e-01   7.0000000e-01   6.0000000e-01   2.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e+00   3.0000000e-01   4.0000000e-01   1.1000000e+00   1.0000000e+00   4.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e+00   2.8000000e+00   3.2000000e+00   2.3000000e+00   2.9000000e+00   2.8000000e+00   3.0000000e+00   1.6000000e+00   2.9000000e+00   2.2000000e+00   1.8000000e+00   2.5000000e+00   2.3000000e+00   3.0000000e+00   1.9000000e+00   2.7000000e+00   2.8000000e+00   2.4000000e+00   2.8000000e+00   2.2000000e+00   3.1000000e+00   2.3000000e+00   3.2000000e+00   3.0000000e+00   2.6000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.8000000e+00   1.8000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.7000000e+00   2.4000000e+00   2.3000000e+00   2.7000000e+00   2.9000000e+00   2.3000000e+00   1.6000000e+00   2.5000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   1.3000000e+00   2.4000000e+00   4.3000000e+00   3.4000000e+00   4.2000000e+00   3.9000000e+00   4.1000000e+00   4.9000000e+00   2.8000000e+00   4.6000000e+00   4.1000000e+00   4.4000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   5.0000000e+00   5.2000000e+00   3.3000000e+00   4.0000000e+00   3.2000000e+00   5.0000000e+00   3.2000000e+00   4.0000000e+00   4.3000000e+00   3.1000000e+00   3.2000000e+00   3.9000000e+00   4.1000000e+00   4.4000000e+00   4.7000000e+00   3.9000000e+00   3.4000000e+00   3.9000000e+00   4.4000000e+00   3.9000000e+00   3.8000000e+00   3.1000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.4000000e+00   4.2000000e+00   4.0000000e+00   3.5000000e+00   3.3000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   5.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   1.4000000e+00   7.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   5.0000000e-01   2.0000000e-01   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   7.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   9.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   1.3000000e+00   4.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   4.0000000e-01   3.7000000e+00   3.5000000e+00   3.9000000e+00   3.0000000e+00   3.6000000e+00   3.5000000e+00   3.7000000e+00   2.3000000e+00   3.6000000e+00   2.9000000e+00   2.5000000e+00   3.2000000e+00   3.0000000e+00   3.7000000e+00   2.6000000e+00   3.4000000e+00   3.5000000e+00   3.1000000e+00   3.5000000e+00   2.9000000e+00   3.8000000e+00   3.0000000e+00   3.9000000e+00   3.7000000e+00   3.3000000e+00   3.4000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   2.5000000e+00   2.8000000e+00   2.7000000e+00   2.9000000e+00   4.1000000e+00   3.5000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   3.1000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.0000000e+00   2.3000000e+00   3.2000000e+00   3.2000000e+00   3.2000000e+00   3.3000000e+00   2.0000000e+00   3.1000000e+00   5.0000000e+00   4.1000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.6000000e+00   3.5000000e+00   5.3000000e+00   4.8000000e+00   5.1000000e+00   4.1000000e+00   4.3000000e+00   4.5000000e+00   4.0000000e+00   4.1000000e+00   4.3000000e+00   4.5000000e+00   5.7000000e+00   5.9000000e+00   4.0000000e+00   4.7000000e+00   3.9000000e+00   5.7000000e+00   3.9000000e+00   4.7000000e+00   5.0000000e+00   3.8000000e+00   3.9000000e+00   4.6000000e+00   4.8000000e+00   5.1000000e+00   5.4000000e+00   4.6000000e+00   4.1000000e+00   4.6000000e+00   5.1000000e+00   4.6000000e+00   4.5000000e+00   3.8000000e+00   4.4000000e+00   4.6000000e+00   4.1000000e+00   4.1000000e+00   4.9000000e+00   4.7000000e+00   4.2000000e+00   4.0000000e+00   4.2000000e+00   4.4000000e+00   4.1000000e+00   3.0000000e-01   3.0000000e-01   1.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   9.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   7.0000000e-01   2.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e+00   2.8000000e+00   3.2000000e+00   2.3000000e+00   2.9000000e+00   2.8000000e+00   3.0000000e+00   1.6000000e+00   2.9000000e+00   2.2000000e+00   1.8000000e+00   2.5000000e+00   2.3000000e+00   3.0000000e+00   1.9000000e+00   2.7000000e+00   2.8000000e+00   2.4000000e+00   2.8000000e+00   2.2000000e+00   3.1000000e+00   2.3000000e+00   3.2000000e+00   3.0000000e+00   2.6000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.8000000e+00   1.8000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.7000000e+00   2.4000000e+00   2.3000000e+00   2.7000000e+00   2.9000000e+00   2.3000000e+00   1.6000000e+00   2.5000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   1.3000000e+00   2.4000000e+00   4.3000000e+00   3.4000000e+00   4.2000000e+00   3.9000000e+00   4.1000000e+00   4.9000000e+00   2.8000000e+00   4.6000000e+00   4.1000000e+00   4.4000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   5.0000000e+00   5.2000000e+00   3.3000000e+00   4.0000000e+00   3.2000000e+00   5.0000000e+00   3.2000000e+00   4.0000000e+00   4.3000000e+00   3.1000000e+00   3.2000000e+00   3.9000000e+00   4.1000000e+00   4.4000000e+00   4.7000000e+00   3.9000000e+00   3.4000000e+00   3.9000000e+00   4.4000000e+00   3.9000000e+00   3.8000000e+00   3.1000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.4000000e+00   4.2000000e+00   4.0000000e+00   3.5000000e+00   3.3000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   4.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   4.0000000e-01   7.0000000e-01   7.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   1.1000000e+00   6.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   2.8000000e+00   2.6000000e+00   3.0000000e+00   2.1000000e+00   2.7000000e+00   2.6000000e+00   2.8000000e+00   1.4000000e+00   2.7000000e+00   2.0000000e+00   1.6000000e+00   2.3000000e+00   2.1000000e+00   2.8000000e+00   1.7000000e+00   2.5000000e+00   2.6000000e+00   2.2000000e+00   2.6000000e+00   2.0000000e+00   2.9000000e+00   2.1000000e+00   3.0000000e+00   2.8000000e+00   2.4000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.6000000e+00   1.6000000e+00   1.9000000e+00   1.8000000e+00   2.0000000e+00   3.2000000e+00   2.6000000e+00   2.6000000e+00   2.8000000e+00   2.5000000e+00   2.2000000e+00   2.1000000e+00   2.5000000e+00   2.7000000e+00   2.1000000e+00   1.4000000e+00   2.3000000e+00   2.3000000e+00   2.3000000e+00   2.4000000e+00   1.1000000e+00   2.2000000e+00   4.1000000e+00   3.2000000e+00   4.0000000e+00   3.7000000e+00   3.9000000e+00   4.7000000e+00   2.6000000e+00   4.4000000e+00   3.9000000e+00   4.2000000e+00   3.2000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   3.2000000e+00   3.4000000e+00   3.6000000e+00   4.8000000e+00   5.0000000e+00   3.1000000e+00   3.8000000e+00   3.0000000e+00   4.8000000e+00   3.0000000e+00   3.8000000e+00   4.1000000e+00   2.9000000e+00   3.0000000e+00   3.7000000e+00   3.9000000e+00   4.2000000e+00   4.5000000e+00   3.7000000e+00   3.2000000e+00   3.7000000e+00   4.2000000e+00   3.7000000e+00   3.6000000e+00   2.9000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   3.2000000e+00   4.0000000e+00   3.8000000e+00   3.3000000e+00   3.1000000e+00   3.3000000e+00   3.5000000e+00   3.2000000e+00   4.0000000e-01   5.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   1.1000000e+00   1.2000000e+00   1.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   2.0000000e-01   8.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   2.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   3.0000000e-01   1.1000000e+00   6.0000000e-01   2.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   1.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e-01   2.0000000e-01   1.2000000e+00   8.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   2.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   5.0000000e-01   4.0000000e-01   2.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   1.0000000e-01   2.0000000e-01   1.1000000e+00   8.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   6.0000000e-01   3.0000000e-01   2.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   1.0000000e-01   7.0000000e-01   9.0000000e-01   1.0000000e+00   2.0000000e-01   4.0000000e-01   8.0000000e-01   2.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   3.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   6.0000000e-01   1.0000000e+00   1.1000000e+00   1.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   2.0000000e-01   6.0000000e-01   2.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   7.0000000e-01   8.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   5.0000000e-01   1.0000000e+00   3.0000000e-01   4.0000000e-01   1.1000000e+00   1.0000000e+00   4.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   3.0000000e-01   1.0000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   1.1000000e+00   7.0000000e-01   6.0000000e-01   1.8000000e+00   9.0000000e-01   6.0000000e-01   4.0000000e-01   1.1000000e+00   3.0000000e-01   9.0000000e-01   4.0000000e-01   8.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.1000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.6000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   1.1000000e+00   1.0000000e+00   7.0000000e-01   1.1000000e+00   1.2000000e+00   8.0000000e-01   7.0000000e-01   1.9000000e+00   1.1000000e+00   7.0000000e-01   5.0000000e-01   1.2000000e+00   4.0000000e-01   1.0000000e+00   5.0000000e-01   9.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.2000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.7000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   3.0000000e-01   6.0000000e-01   0.0000000e+00   5.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   5.0000000e-01   3.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   9.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   3.5000000e+00   3.3000000e+00   3.7000000e+00   2.8000000e+00   3.4000000e+00   3.3000000e+00   3.5000000e+00   2.1000000e+00   3.4000000e+00   2.7000000e+00   2.3000000e+00   3.0000000e+00   2.8000000e+00   3.5000000e+00   2.4000000e+00   3.2000000e+00   3.3000000e+00   2.9000000e+00   3.3000000e+00   2.7000000e+00   3.6000000e+00   2.8000000e+00   3.7000000e+00   3.5000000e+00   3.1000000e+00   3.2000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   2.3000000e+00   2.6000000e+00   2.5000000e+00   2.7000000e+00   3.9000000e+00   3.3000000e+00   3.3000000e+00   3.5000000e+00   3.2000000e+00   2.9000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.8000000e+00   2.1000000e+00   3.0000000e+00   3.0000000e+00   3.0000000e+00   3.1000000e+00   1.8000000e+00   2.9000000e+00   4.8000000e+00   3.9000000e+00   4.7000000e+00   4.4000000e+00   4.6000000e+00   5.4000000e+00   3.3000000e+00   5.1000000e+00   4.6000000e+00   4.9000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   5.5000000e+00   5.7000000e+00   3.8000000e+00   4.5000000e+00   3.7000000e+00   5.5000000e+00   3.7000000e+00   4.5000000e+00   4.8000000e+00   3.6000000e+00   3.7000000e+00   4.4000000e+00   4.6000000e+00   4.9000000e+00   5.2000000e+00   4.4000000e+00   3.9000000e+00   4.4000000e+00   4.9000000e+00   4.4000000e+00   4.3000000e+00   3.6000000e+00   4.2000000e+00   4.4000000e+00   3.9000000e+00   3.9000000e+00   4.7000000e+00   4.5000000e+00   4.0000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.9000000e+00   6.0000000e-01   1.1000000e+00   4.0000000e-01   5.0000000e-01   1.2000000e+00   1.1000000e+00   5.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   9.0000000e-01   2.0000000e-01   5.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   5.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   7.0000000e-01   6.0000000e-01   7.0000000e-01   2.0000000e-01   6.0000000e-01   8.0000000e-01   4.0000000e-01   8.0000000e-01   2.0000000e-01   9.0000000e-01   6.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   2.0000000e-01   1.1000000e+00   7.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   1.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   1.2000000e+00   6.0000000e-01   3.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   9.0000000e-01   1.2000000e+00   1.5000000e+00   7.0000000e-01   1.5000000e+00   9.0000000e-01   1.4000000e+00   1.0000000e+00   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   6.0000000e-01   7.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   9.0000000e-01   6.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   3.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   8.0000000e-01   3.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   2.8000000e+00   2.6000000e+00   3.0000000e+00   2.1000000e+00   2.7000000e+00   2.6000000e+00   2.8000000e+00   1.4000000e+00   2.7000000e+00   2.0000000e+00   1.8000000e+00   2.3000000e+00   2.1000000e+00   2.8000000e+00   1.7000000e+00   2.5000000e+00   2.6000000e+00   2.2000000e+00   2.6000000e+00   2.0000000e+00   2.9000000e+00   2.1000000e+00   3.0000000e+00   2.8000000e+00   2.4000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.6000000e+00   1.6000000e+00   1.9000000e+00   1.8000000e+00   2.0000000e+00   3.2000000e+00   2.6000000e+00   2.6000000e+00   2.8000000e+00   2.5000000e+00   2.2000000e+00   2.1000000e+00   2.5000000e+00   2.7000000e+00   2.1000000e+00   1.5000000e+00   2.3000000e+00   2.3000000e+00   2.3000000e+00   2.4000000e+00   1.3000000e+00   2.2000000e+00   4.1000000e+00   3.2000000e+00   4.0000000e+00   3.7000000e+00   3.9000000e+00   4.7000000e+00   2.6000000e+00   4.4000000e+00   3.9000000e+00   4.2000000e+00   3.2000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   3.2000000e+00   3.4000000e+00   3.6000000e+00   4.8000000e+00   5.0000000e+00   3.1000000e+00   3.8000000e+00   3.0000000e+00   4.8000000e+00   3.0000000e+00   3.8000000e+00   4.1000000e+00   2.9000000e+00   3.0000000e+00   3.7000000e+00   3.9000000e+00   4.2000000e+00   4.5000000e+00   3.7000000e+00   3.2000000e+00   3.7000000e+00   4.2000000e+00   3.7000000e+00   3.6000000e+00   2.9000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   3.2000000e+00   4.0000000e+00   3.8000000e+00   3.3000000e+00   3.1000000e+00   3.3000000e+00   3.5000000e+00   3.2000000e+00   8.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   2.0000000e-01   5.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   7.0000000e-01   4.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   2.0000000e-01   1.5000000e+00   5.0000000e-01   1.3000000e+00   7.0000000e-01   2.1000000e+00   4.0000000e-01   1.8000000e+00   2.0000000e+00   1.1000000e+00   1.0000000e+00   9.0000000e-01   1.4000000e+00   3.0000000e-01   1.4000000e+00   1.2000000e+00   1.0000000e+00   1.4000000e+00   1.1000000e+00   9.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   1.0000000e+00   1.3000000e+00   1.5000000e+00   1.5000000e+00   1.2000000e+00   1.0000000e+00   1.6000000e+00   1.0000000e+00   3.0000000e-01   9.0000000e-01   1.4000000e+00   1.5000000e+00   1.5000000e+00   9.0000000e-01   1.2000000e+00   2.0000000e+00   1.4000000e+00   1.3000000e+00   1.3000000e+00   8.0000000e-01   1.9000000e+00   1.3000000e+00   1.3000000e+00   1.2000000e+00   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   2.1000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   6.0000000e-01   6.0000000e-01   8.0000000e-01   1.3000000e+00   1.2000000e+00   9.0000000e-01   8.0000000e-01   2.0000000e+00   2.2000000e+00   1.0000000e+00   1.0000000e+00   1.4000000e+00   2.0000000e+00   7.0000000e-01   1.0000000e+00   1.3000000e+00   8.0000000e-01   9.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.7000000e+00   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.4000000e+00   1.0000000e+00   8.0000000e-01   1.0000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   1.2000000e+00   1.2000000e+00   1.1000000e+00   9.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   5.0000000e-01   9.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   1.5000000e+00   3.0000000e-01   1.2000000e+00   1.4000000e+00   5.0000000e-01   1.0000000e+00   3.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   1.0000000e+00   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   1.0000000e+00   9.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e+00   4.0000000e-01   3.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   3.0000000e-01   6.0000000e-01   1.4000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   3.0000000e-01   1.5000000e+00   7.0000000e-01   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   1.5000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   6.0000000e-01   8.0000000e-01   1.0000000e+00   7.0000000e-01   9.0000000e-01   8.0000000e-01   1.0000000e+00   2.2000000e+00   2.4000000e+00   1.0000000e+00   1.2000000e+00   8.0000000e-01   2.2000000e+00   5.0000000e-01   1.2000000e+00   1.5000000e+00   4.0000000e-01   4.0000000e-01   1.1000000e+00   1.3000000e+00   1.6000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.6000000e+00   1.1000000e+00   1.0000000e+00   4.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   1.4000000e+00   1.2000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   1.4000000e+00   4.0000000e-01   1.2000000e+00   6.0000000e-01   2.0000000e+00   3.0000000e-01   1.7000000e+00   1.9000000e+00   1.0000000e+00   9.0000000e-01   8.0000000e-01   1.3000000e+00   5.0000000e-01   1.3000000e+00   1.1000000e+00   9.0000000e-01   1.3000000e+00   1.0000000e+00   9.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   2.0000000e-01   9.0000000e-01   1.4000000e+00   1.4000000e+00   1.4000000e+00   1.1000000e+00   9.0000000e-01   1.5000000e+00   9.0000000e-01   2.0000000e-01   8.0000000e-01   1.3000000e+00   1.4000000e+00   1.4000000e+00   8.0000000e-01   1.1000000e+00   1.9000000e+00   1.3000000e+00   1.2000000e+00   1.2000000e+00   7.0000000e-01   1.9000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   1.0000000e+00   7.0000000e-01   9.0000000e-01   1.7000000e+00   2.0000000e+00   1.4000000e+00   9.0000000e-01   1.2000000e+00   5.0000000e-01   5.0000000e-01   6.0000000e-01   1.2000000e+00   1.1000000e+00   8.0000000e-01   6.0000000e-01   1.8000000e+00   2.0000000e+00   9.0000000e-01   8.0000000e-01   1.3000000e+00   1.8000000e+00   6.0000000e-01   8.0000000e-01   1.1000000e+00   7.0000000e-01   8.0000000e-01   7.0000000e-01   9.0000000e-01   1.2000000e+00   1.5000000e+00   7.0000000e-01   6.0000000e-01   8.0000000e-01   1.2000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   1.0000000e+00   5.0000000e-01   1.0000000e+00   7.0000000e-01   1.1000000e+00   4.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   1.2000000e+00   7.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   9.0000000e-01   6.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.1000000e+00   1.3000000e+00   1.2000000e+00   6.0000000e-01   5.0000000e-01   2.0000000e-01   3.0000000e-01   4.0000000e-01   1.1000000e+00   7.0000000e-01   1.1000000e+00   1.2000000e+00   8.0000000e-01   7.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   7.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   7.0000000e-01   1.0000000e+00   5.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   6.0000000e-01   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.2000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.4000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   8.0000000e-01   5.0000000e-01   1.6000000e+00   2.0000000e-01   1.3000000e+00   1.5000000e+00   6.0000000e-01   6.0000000e-01   4.0000000e-01   1.0000000e+00   3.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   7.0000000e-01   5.0000000e-01   1.1000000e+00   6.0000000e-01   3.0000000e-01   5.0000000e-01   9.0000000e-01   1.0000000e+00   1.0000000e+00   4.0000000e-01   7.0000000e-01   1.5000000e+00   9.0000000e-01   8.0000000e-01   8.0000000e-01   3.0000000e-01   1.6000000e+00   8.0000000e-01   1.4000000e+00   7.0000000e-01   1.3000000e+00   1.0000000e+00   1.2000000e+00   2.0000000e+00   1.6000000e+00   1.7000000e+00   1.2000000e+00   1.5000000e+00   5.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   2.1000000e+00   2.3000000e+00   6.0000000e-01   1.1000000e+00   9.0000000e-01   2.1000000e+00   3.0000000e-01   1.1000000e+00   1.4000000e+00   3.0000000e-01   4.0000000e-01   1.0000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   1.0000000e+00   5.0000000e-01   1.0000000e+00   1.5000000e+00   1.0000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   7.0000000e-01   1.3000000e+00   1.1000000e+00   8.0000000e-01   4.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   1.2000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   3.0000000e-01   6.0000000e-01   4.0000000e-01   9.0000000e-01   1.0000000e+00   2.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   9.0000000e-01   1.1000000e+00   1.0000000e+00   3.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   6.0000000e-01   1.0000000e+00   6.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   1.2000000e+00   3.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   1.5000000e+00   4.0000000e-01   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   8.0000000e-01   1.8000000e+00   1.3000000e+00   1.6000000e+00   8.0000000e-01   8.0000000e-01   1.1000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   2.2000000e+00   2.4000000e+00   6.0000000e-01   1.2000000e+00   7.0000000e-01   2.2000000e+00   6.0000000e-01   1.2000000e+00   1.5000000e+00   5.0000000e-01   5.0000000e-01   1.1000000e+00   1.5000000e+00   1.7000000e+00   2.2000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   2.0000000e+00   1.1000000e+00   1.0000000e+00   5.0000000e-01   1.2000000e+00   1.1000000e+00   1.2000000e+00   6.0000000e-01   1.4000000e+00   1.2000000e+00   1.0000000e+00   6.0000000e-01   8.0000000e-01   1.0000000e+00   6.0000000e-01   1.4000000e+00   4.0000000e-01   1.1000000e+00   1.3000000e+00   5.0000000e-01   1.1000000e+00   4.0000000e-01   1.1000000e+00   4.0000000e-01   7.0000000e-01   6.0000000e-01   1.1000000e+00   8.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   1.2000000e+00   9.0000000e-01   1.0000000e+00   8.0000000e-01   6.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   8.0000000e-01   3.0000000e-01   7.0000000e-01   1.4000000e+00   7.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   1.7000000e+00   6.0000000e-01   1.3000000e+00   6.0000000e-01   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   4.0000000e-01   6.0000000e-01   8.0000000e-01   8.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   2.0000000e+00   2.2000000e+00   1.1000000e+00   1.0000000e+00   7.0000000e-01   2.0000000e+00   6.0000000e-01   1.0000000e+00   1.3000000e+00   5.0000000e-01   3.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.7000000e+00   9.0000000e-01   5.0000000e-01   9.0000000e-01   1.4000000e+00   9.0000000e-01   8.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   1.2000000e+00   1.0000000e+00   7.0000000e-01   8.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   1.7000000e+00   6.0000000e-01   4.0000000e-01   1.0000000e+00   1.1000000e+00   1.4000000e+00   7.0000000e-01   1.8000000e+00   1.2000000e+00   9.0000000e-01   1.3000000e+00   7.0000000e-01   1.5000000e+00   1.2000000e+00   1.6000000e+00   1.4000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   1.8000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.8000000e+00   1.2000000e+00   1.2000000e+00   1.8000000e+00   1.4000000e+00   8.0000000e-01   7.0000000e-01   1.1000000e+00   1.3000000e+00   9.0000000e-01   1.0000000e-01   9.0000000e-01   9.0000000e-01   9.0000000e-01   1.3000000e+00   3.0000000e-01   8.0000000e-01   2.7000000e+00   1.8000000e+00   2.6000000e+00   2.3000000e+00   2.5000000e+00   3.3000000e+00   1.2000000e+00   3.0000000e+00   2.5000000e+00   2.8000000e+00   1.8000000e+00   2.0000000e+00   2.2000000e+00   1.7000000e+00   1.8000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   3.6000000e+00   1.7000000e+00   2.4000000e+00   1.6000000e+00   3.4000000e+00   1.6000000e+00   2.4000000e+00   2.7000000e+00   1.5000000e+00   1.6000000e+00   2.3000000e+00   2.5000000e+00   2.8000000e+00   3.1000000e+00   2.3000000e+00   1.8000000e+00   2.3000000e+00   2.8000000e+00   2.3000000e+00   2.2000000e+00   1.5000000e+00   2.1000000e+00   2.3000000e+00   2.0000000e+00   1.8000000e+00   2.6000000e+00   2.4000000e+00   1.9000000e+00   1.7000000e+00   1.9000000e+00   2.1000000e+00   1.8000000e+00   1.4000000e+00   1.6000000e+00   7.0000000e-01   7.0000000e-01   5.0000000e-01   1.0000000e+00   2.0000000e-01   1.0000000e+00   8.0000000e-01   7.0000000e-01   1.0000000e+00   7.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   2.0000000e-01   2.0000000e-01   4.0000000e-01   6.0000000e-01   1.1000000e+00   1.1000000e+00   1.1000000e+00   8.0000000e-01   6.0000000e-01   1.2000000e+00   6.0000000e-01   2.0000000e-01   6.0000000e-01   1.0000000e+00   1.1000000e+00   1.1000000e+00   5.0000000e-01   8.0000000e-01   1.6000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   4.0000000e-01   1.6000000e+00   9.0000000e-01   1.4000000e+00   8.0000000e-01   1.3000000e+00   1.0000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   1.7000000e+00   1.2000000e+00   1.5000000e+00   7.0000000e-01   7.0000000e-01   9.0000000e-01   9.0000000e-01   1.1000000e+00   1.0000000e+00   9.0000000e-01   2.1000000e+00   2.3000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   2.1000000e+00   5.0000000e-01   1.1000000e+00   1.4000000e+00   5.0000000e-01   5.0000000e-01   1.0000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   1.0000000e+00   5.0000000e-01   1.0000000e+00   1.5000000e+00   1.1000000e+00   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.1000000e+00   1.0000000e+00   8.0000000e-01   1.3000000e+00   1.2000000e+00   1.0000000e+00   6.0000000e-01   7.0000000e-01   1.0000000e+00   7.0000000e-01   7.0000000e-01   7.0000000e-01   8.0000000e-01   9.0000000e-01   4.0000000e-01   1.5000000e+00   6.0000000e-01   6.0000000e-01   1.0000000e+00   4.0000000e-01   9.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   5.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   1.2000000e+00   6.0000000e-01   8.0000000e-01   1.5000000e+00   1.1000000e+00   4.0000000e-01   3.0000000e-01   5.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   2.1000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   1.9000000e+00   2.7000000e+00   6.0000000e-01   2.4000000e+00   1.9000000e+00   2.2000000e+00   1.3000000e+00   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   2.8000000e+00   3.0000000e+00   1.1000000e+00   1.8000000e+00   1.0000000e+00   2.8000000e+00   1.1000000e+00   1.8000000e+00   2.1000000e+00   1.0000000e+00   1.0000000e+00   1.7000000e+00   2.0000000e+00   2.2000000e+00   2.7000000e+00   1.7000000e+00   1.2000000e+00   1.7000000e+00   2.5000000e+00   1.7000000e+00   1.6000000e+00   9.0000000e-01   1.7000000e+00   1.7000000e+00   1.7000000e+00   1.2000000e+00   2.0000000e+00   1.8000000e+00   1.5000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.2000000e+00   1.0000000e+00   1.0000000e+00   1.2000000e+00   9.0000000e-01   1.7000000e+00   1.0000000e+00   8.0000000e-01   1.2000000e+00   6.0000000e-01   1.3000000e+00   1.1000000e+00   1.4000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.8000000e+00   1.7000000e+00   1.0000000e+00   7.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   1.6000000e+00   1.0000000e+00   1.4000000e+00   1.7000000e+00   1.3000000e+00   1.0000000e+00   5.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   3.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   1.2000000e+00   5.0000000e-01   8.0000000e-01   2.5000000e+00   1.6000000e+00   2.4000000e+00   2.1000000e+00   2.3000000e+00   3.1000000e+00   1.0000000e+00   2.8000000e+00   2.3000000e+00   2.6000000e+00   1.6000000e+00   1.8000000e+00   2.0000000e+00   1.5000000e+00   1.6000000e+00   1.8000000e+00   2.0000000e+00   3.2000000e+00   3.4000000e+00   1.5000000e+00   2.2000000e+00   1.4000000e+00   3.2000000e+00   1.4000000e+00   2.2000000e+00   2.5000000e+00   1.3000000e+00   1.4000000e+00   2.1000000e+00   2.3000000e+00   2.6000000e+00   2.9000000e+00   2.1000000e+00   1.6000000e+00   2.1000000e+00   2.7000000e+00   2.1000000e+00   2.0000000e+00   1.3000000e+00   1.9000000e+00   2.1000000e+00   1.9000000e+00   1.6000000e+00   2.4000000e+00   2.2000000e+00   1.7000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   1.6000000e+00   8.0000000e-01   5.0000000e-01   6.0000000e-01   8.0000000e-01   3.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   6.0000000e-01   2.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   3.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   3.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   9.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   3.0000000e-01   1.2000000e+00   2.0000000e-01   1.8000000e+00   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.6000000e+00   2.4000000e+00   1.0000000e+00   2.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   8.0000000e-01   9.0000000e-01   1.1000000e+00   1.3000000e+00   2.5000000e+00   2.7000000e+00   8.0000000e-01   1.5000000e+00   7.0000000e-01   2.5000000e+00   7.0000000e-01   1.5000000e+00   1.8000000e+00   6.0000000e-01   7.0000000e-01   1.4000000e+00   1.6000000e+00   1.9000000e+00   2.2000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   1.9000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.0000000e+00   9.0000000e-01   1.7000000e+00   1.5000000e+00   1.0000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   7.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   1.0000000e+00   6.0000000e-01   9.0000000e-01   7.0000000e-01   7.0000000e-01   8.0000000e-01   8.0000000e-01   1.0000000e+00   7.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   1.1000000e+00   8.0000000e-01   1.2000000e+00   9.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   4.0000000e-01   1.0000000e+00   5.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   1.0000000e+00   6.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   1.1000000e+00   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.4000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   1.0000000e+00   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.3000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.3000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   1.1000000e+00   6.0000000e-01   5.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   4.0000000e-01   7.0000000e-01   4.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   2.0000000e-01   1.2000000e+00   9.0000000e-01   1.0000000e+00   8.0000000e-01   4.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   1.0000000e-01   7.0000000e-01   1.4000000e+00   5.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   1.7000000e+00   6.0000000e-01   1.3000000e+00   5.0000000e-01   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   1.2000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   6.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   1.0000000e+00   9.0000000e-01   8.0000000e-01   2.0000000e+00   2.2000000e+00   7.0000000e-01   1.0000000e+00   6.0000000e-01   2.0000000e+00   4.0000000e-01   1.0000000e+00   1.3000000e+00   4.0000000e-01   4.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.8000000e+00   9.0000000e-01   4.0000000e-01   9.0000000e-01   1.6000000e+00   1.0000000e+00   8.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   1.2000000e+00   1.1000000e+00   9.0000000e-01   5.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   1.1000000e+00   9.0000000e-01   5.0000000e-01   9.0000000e-01   4.0000000e-01   1.2000000e+00   5.0000000e-01   1.3000000e+00   1.1000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   1.4000000e+00   9.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   1.5000000e+00   9.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   4.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   2.4000000e+00   1.5000000e+00   2.3000000e+00   2.0000000e+00   2.2000000e+00   3.0000000e+00   9.0000000e-01   2.7000000e+00   2.2000000e+00   2.5000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   1.4000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   3.1000000e+00   3.3000000e+00   1.4000000e+00   2.1000000e+00   1.3000000e+00   3.1000000e+00   1.3000000e+00   2.1000000e+00   2.4000000e+00   1.2000000e+00   1.3000000e+00   2.0000000e+00   2.2000000e+00   2.5000000e+00   2.8000000e+00   2.0000000e+00   1.5000000e+00   2.0000000e+00   2.5000000e+00   2.0000000e+00   1.9000000e+00   1.2000000e+00   1.8000000e+00   2.0000000e+00   1.5000000e+00   1.5000000e+00   2.3000000e+00   2.1000000e+00   1.6000000e+00   1.4000000e+00   1.6000000e+00   1.8000000e+00   1.5000000e+00   1.1000000e+00   9.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   1.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   1.0000000e+00   1.2000000e+00   1.2000000e+00   9.0000000e-01   7.0000000e-01   1.3000000e+00   7.0000000e-01   3.0000000e-01   8.0000000e-01   1.1000000e+00   1.2000000e+00   1.2000000e+00   6.0000000e-01   9.0000000e-01   1.7000000e+00   1.1000000e+00   1.0000000e+00   1.0000000e+00   5.0000000e-01   1.6000000e+00   1.0000000e+00   1.6000000e+00   9.0000000e-01   1.5000000e+00   1.2000000e+00   1.4000000e+00   2.2000000e+00   1.8000000e+00   1.9000000e+00   1.4000000e+00   1.7000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   9.0000000e-01   1.1000000e+00   2.3000000e+00   2.5000000e+00   9.0000000e-01   1.3000000e+00   1.1000000e+00   2.3000000e+00   5.0000000e-01   1.3000000e+00   1.6000000e+00   5.0000000e-01   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.7000000e+00   2.0000000e+00   1.2000000e+00   7.0000000e-01   1.2000000e+00   1.7000000e+00   1.2000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   9.0000000e-01   1.5000000e+00   1.3000000e+00   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   5.0000000e-01   8.0000000e-01   6.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   1.2000000e+00   1.1000000e+00   4.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   2.0000000e-01   4.0000000e-01   1.1000000e+00   7.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.2000000e+00   3.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   1.5000000e+00   4.0000000e-01   1.5000000e+00   6.0000000e-01   1.5000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   7.0000000e-01   1.8000000e+00   1.3000000e+00   1.6000000e+00   9.0000000e-01   8.0000000e-01   1.2000000e+00   5.0000000e-01   9.0000000e-01   8.0000000e-01   1.0000000e+00   2.2000000e+00   2.4000000e+00   8.0000000e-01   1.3000000e+00   5.0000000e-01   2.2000000e+00   7.0000000e-01   1.2000000e+00   1.6000000e+00   6.0000000e-01   5.0000000e-01   1.1000000e+00   1.6000000e+00   1.8000000e+00   2.3000000e+00   1.1000000e+00   7.0000000e-01   1.1000000e+00   2.1000000e+00   1.1000000e+00   1.0000000e+00   4.0000000e-01   1.3000000e+00   1.1000000e+00   1.3000000e+00   6.0000000e-01   1.4000000e+00   1.2000000e+00   1.1000000e+00   7.0000000e-01   9.0000000e-01   9.0000000e-01   6.0000000e-01   5.0000000e-01   2.0000000e-01   8.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   6.0000000e-01   3.0000000e-01   4.0000000e-01   2.0000000e-01   1.0000000e+00   5.0000000e-01   7.0000000e-01   9.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   2.0000000e-01   8.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   1.1000000e+00   3.0000000e-01   1.9000000e+00   1.0000000e+00   1.8000000e+00   1.5000000e+00   1.7000000e+00   2.5000000e+00   9.0000000e-01   2.2000000e+00   1.7000000e+00   2.0000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.0000000e+00   1.4000000e+00   1.3000000e+00   1.4000000e+00   2.6000000e+00   2.8000000e+00   9.0000000e-01   1.6000000e+00   1.0000000e+00   2.6000000e+00   8.0000000e-01   1.6000000e+00   1.9000000e+00   8.0000000e-01   8.0000000e-01   1.5000000e+00   1.7000000e+00   2.0000000e+00   2.3000000e+00   1.5000000e+00   1.0000000e+00   1.5000000e+00   2.0000000e+00   1.5000000e+00   1.4000000e+00   8.0000000e-01   1.3000000e+00   1.5000000e+00   1.3000000e+00   1.0000000e+00   1.8000000e+00   1.6000000e+00   1.3000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   6.0000000e-01   1.0000000e+00   6.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   1.2000000e+00   9.0000000e-01   2.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   1.2000000e+00   6.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   1.5000000e+00   6.0000000e-01   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   1.3000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   1.0000000e+00   8.0000000e-01   1.0000000e+00   5.0000000e-01   9.0000000e-01   1.0000000e+00   1.0000000e+00   2.2000000e+00   2.4000000e+00   5.0000000e-01   1.2000000e+00   6.0000000e-01   2.2000000e+00   5.0000000e-01   1.2000000e+00   1.5000000e+00   6.0000000e-01   8.0000000e-01   1.1000000e+00   1.3000000e+00   1.6000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.6000000e+00   1.2000000e+00   1.0000000e+00   8.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   6.0000000e-01   1.4000000e+00   1.2000000e+00   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.2000000e+00   8.0000000e-01   9.0000000e-01   5.0000000e-01   1.0000000e+00   8.0000000e-01   8.0000000e-01   1.0000000e+00   1.2000000e+00   1.1000000e+00   6.0000000e-01   4.0000000e-01   1.0000000e-01   2.0000000e-01   2.0000000e-01   1.2000000e+00   6.0000000e-01   9.0000000e-01   1.1000000e+00   7.0000000e-01   5.0000000e-01   2.0000000e-01   5.0000000e-01   7.0000000e-01   2.0000000e-01   6.0000000e-01   3.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   9.0000000e-01   3.0000000e-01   2.1000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   1.9000000e+00   2.7000000e+00   7.0000000e-01   2.4000000e+00   1.9000000e+00   2.2000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.3000000e+00   1.4000000e+00   1.6000000e+00   2.8000000e+00   3.0000000e+00   1.1000000e+00   1.8000000e+00   1.0000000e+00   2.8000000e+00   1.0000000e+00   1.8000000e+00   2.1000000e+00   9.0000000e-01   1.0000000e+00   1.7000000e+00   1.9000000e+00   2.2000000e+00   2.5000000e+00   1.7000000e+00   1.2000000e+00   1.7000000e+00   2.2000000e+00   1.7000000e+00   1.6000000e+00   9.0000000e-01   1.5000000e+00   1.7000000e+00   1.3000000e+00   1.2000000e+00   2.0000000e+00   1.8000000e+00   1.3000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.2000000e+00   8.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   3.0000000e-01   1.3000000e+00   1.0000000e+00   1.1000000e+00   9.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   9.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   4.0000000e-01   8.0000000e-01   1.5000000e+00   6.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   1.8000000e+00   7.0000000e-01   1.2000000e+00   5.0000000e-01   1.2000000e+00   8.0000000e-01   1.0000000e+00   1.8000000e+00   1.0000000e+00   1.5000000e+00   1.0000000e+00   1.3000000e+00   6.0000000e-01   5.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   1.9000000e+00   2.1000000e+00   1.0000000e+00   1.0000000e+00   4.0000000e-01   1.9000000e+00   5.0000000e-01   9.0000000e-01   1.3000000e+00   4.0000000e-01   2.0000000e-01   8.0000000e-01   1.3000000e+00   1.5000000e+00   2.0000000e+00   8.0000000e-01   4.0000000e-01   8.0000000e-01   1.8000000e+00   8.0000000e-01   7.0000000e-01   2.0000000e-01   1.0000000e+00   8.0000000e-01   1.0000000e+00   5.0000000e-01   1.1000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   9.0000000e-01   7.0000000e-01   3.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   1.1000000e+00   7.0000000e-01   6.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   1.1000000e+00   5.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   1.0000000e+00   4.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   1.2000000e+00   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   3.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   1.4000000e+00   1.1000000e+00   1.2000000e+00   1.0000000e+00   3.0000000e-01   9.0000000e-01   9.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   9.0000000e-01   1.6000000e+00   7.0000000e-01   7.0000000e-01   7.0000000e-01   6.0000000e-01   1.9000000e+00   8.0000000e-01   1.1000000e+00   5.0000000e-01   1.0000000e+00   7.0000000e-01   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.4000000e+00   9.0000000e-01   1.2000000e+00   7.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   1.8000000e+00   2.0000000e+00   3.0000000e-01   8.0000000e-01   7.0000000e-01   1.8000000e+00   3.0000000e-01   8.0000000e-01   1.1000000e+00   3.0000000e-01   5.0000000e-01   7.0000000e-01   9.0000000e-01   1.2000000e+00   1.6000000e+00   7.0000000e-01   3.0000000e-01   7.0000000e-01   1.4000000e+00   9.0000000e-01   6.0000000e-01   5.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   1.0000000e+00   1.0000000e+00   8.0000000e-01   4.0000000e-01   5.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   3.0000000e-01   1.2000000e+00   9.0000000e-01   1.0000000e+00   8.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   2.0000000e-01   7.0000000e-01   1.4000000e+00   5.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   1.7000000e+00   6.0000000e-01   1.3000000e+00   7.0000000e-01   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   1.2000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   8.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   1.2000000e+00   1.1000000e+00   8.0000000e-01   2.0000000e+00   2.2000000e+00   6.0000000e-01   1.1000000e+00   8.0000000e-01   2.0000000e+00   6.0000000e-01   1.0000000e+00   1.3000000e+00   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.8000000e+00   1.0000000e+00   4.0000000e-01   9.0000000e-01   1.6000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   9.0000000e-01   1.2000000e+00   1.1000000e+00   7.0000000e-01   1.2000000e+00   1.3000000e+00   1.1000000e+00   7.0000000e-01   8.0000000e-01   1.1000000e+00   6.0000000e-01   2.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   4.0000000e-01   6.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   3.0000000e-01   6.0000000e-01   1.4000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   2.0000000e-01   1.3000000e+00   7.0000000e-01   1.7000000e+00   8.0000000e-01   1.6000000e+00   1.3000000e+00   1.5000000e+00   2.3000000e+00   1.5000000e+00   2.0000000e+00   1.5000000e+00   1.8000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.2000000e+00   2.4000000e+00   2.6000000e+00   7.0000000e-01   1.4000000e+00   8.0000000e-01   2.4000000e+00   6.0000000e-01   1.4000000e+00   1.7000000e+00   5.0000000e-01   6.0000000e-01   1.3000000e+00   1.5000000e+00   1.8000000e+00   2.1000000e+00   1.3000000e+00   8.0000000e-01   1.3000000e+00   1.8000000e+00   1.3000000e+00   1.2000000e+00   5.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   8.0000000e-01   1.6000000e+00   1.4000000e+00   1.0000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   1.1000000e+00   8.0000000e-01   7.0000000e-01   1.2000000e+00   6.0000000e-01   3.0000000e-01   7.0000000e-01   1.0000000e+00   1.1000000e+00   1.1000000e+00   5.0000000e-01   8.0000000e-01   1.6000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   4.0000000e-01   1.5000000e+00   9.0000000e-01   1.6000000e+00   8.0000000e-01   1.5000000e+00   1.2000000e+00   1.4000000e+00   2.2000000e+00   1.7000000e+00   1.9000000e+00   1.4000000e+00   1.7000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   1.0000000e+00   9.0000000e-01   1.1000000e+00   2.3000000e+00   2.5000000e+00   8.0000000e-01   1.3000000e+00   1.0000000e+00   2.3000000e+00   5.0000000e-01   1.3000000e+00   1.6000000e+00   4.0000000e-01   5.0000000e-01   1.2000000e+00   1.4000000e+00   1.7000000e+00   2.0000000e+00   1.2000000e+00   7.0000000e-01   1.2000000e+00   1.7000000e+00   1.2000000e+00   1.1000000e+00   6.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   8.0000000e-01   1.5000000e+00   1.3000000e+00   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   7.0000000e-01   3.0000000e-01   8.0000000e-01   1.3000000e+00   1.3000000e+00   1.3000000e+00   1.0000000e+00   8.0000000e-01   1.4000000e+00   8.0000000e-01   3.0000000e-01   5.0000000e-01   1.2000000e+00   1.3000000e+00   1.3000000e+00   7.0000000e-01   1.0000000e+00   1.8000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   6.0000000e-01   1.8000000e+00   1.1000000e+00   1.2000000e+00   1.0000000e+00   1.1000000e+00   8.0000000e-01   1.0000000e+00   1.8000000e+00   1.9000000e+00   1.5000000e+00   1.0000000e+00   1.3000000e+00   6.0000000e-01   5.0000000e-01   7.0000000e-01   1.1000000e+00   1.0000000e+00   9.0000000e-01   7.0000000e-01   1.9000000e+00   2.1000000e+00   8.0000000e-01   9.0000000e-01   1.2000000e+00   1.9000000e+00   5.0000000e-01   9.0000000e-01   1.2000000e+00   6.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   1.3000000e+00   1.6000000e+00   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.3000000e+00   1.0000000e+00   7.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   1.0000000e+00   1.1000000e+00   1.1000000e+00   9.0000000e-01   5.0000000e-01   6.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   1.5000000e+00   1.2000000e+00   1.3000000e+00   1.1000000e+00   7.0000000e-01   1.3000000e+00   7.0000000e-01   3.0000000e-01   7.0000000e-01   1.1000000e+00   1.2000000e+00   1.2000000e+00   6.0000000e-01   1.0000000e+00   1.7000000e+00   1.1000000e+00   1.0000000e+00   1.0000000e+00   7.0000000e-01   2.0000000e+00   1.0000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.6000000e+00   1.8000000e+00   1.3000000e+00   8.0000000e-01   1.1000000e+00   3.0000000e-01   3.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   6.0000000e-01   5.0000000e-01   1.7000000e+00   1.9000000e+00   8.0000000e-01   7.0000000e-01   1.1000000e+00   1.7000000e+00   4.0000000e-01   7.0000000e-01   1.0000000e+00   5.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   1.1000000e+00   1.4000000e+00   6.0000000e-01   4.0000000e-01   6.0000000e-01   1.1000000e+00   7.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e-01   5.0000000e-01   1.2000000e+00   4.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   1.5000000e+00   4.0000000e-01   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   1.1000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   6.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   9.0000000e-01   8.0000000e-01   1.0000000e+00   2.2000000e+00   2.4000000e+00   7.0000000e-01   1.2000000e+00   5.0000000e-01   2.2000000e+00   4.0000000e-01   1.2000000e+00   1.5000000e+00   3.0000000e-01   4.0000000e-01   1.1000000e+00   1.3000000e+00   1.6000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.7000000e+00   1.1000000e+00   1.0000000e+00   3.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   6.0000000e-01   1.4000000e+00   1.2000000e+00   8.0000000e-01   5.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   1.6000000e+00   1.0000000e+00   1.0000000e+00   1.2000000e+00   9.0000000e-01   6.0000000e-01   5.0000000e-01   9.0000000e-01   1.1000000e+00   5.0000000e-01   7.0000000e-01   7.0000000e-01   7.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   2.5000000e+00   1.6000000e+00   2.4000000e+00   2.1000000e+00   2.3000000e+00   3.1000000e+00   1.0000000e+00   2.8000000e+00   2.3000000e+00   2.6000000e+00   1.6000000e+00   1.8000000e+00   2.0000000e+00   1.5000000e+00   1.6000000e+00   1.8000000e+00   2.0000000e+00   3.2000000e+00   3.4000000e+00   1.5000000e+00   2.2000000e+00   1.4000000e+00   3.2000000e+00   1.4000000e+00   2.2000000e+00   2.5000000e+00   1.3000000e+00   1.4000000e+00   2.1000000e+00   2.3000000e+00   2.6000000e+00   2.9000000e+00   2.1000000e+00   1.6000000e+00   2.1000000e+00   2.6000000e+00   2.1000000e+00   2.0000000e+00   1.3000000e+00   1.9000000e+00   2.1000000e+00   1.6000000e+00   1.6000000e+00   2.4000000e+00   2.2000000e+00   1.7000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   1.6000000e+00   1.0000000e-01   3.0000000e-01   1.3000000e+00   7.0000000e-01   1.0000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   8.0000000e-01   3.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   8.0000000e-01   4.0000000e-01   2.2000000e+00   1.3000000e+00   2.1000000e+00   1.8000000e+00   2.0000000e+00   2.8000000e+00   7.0000000e-01   2.5000000e+00   2.0000000e+00   2.3000000e+00   1.3000000e+00   1.5000000e+00   1.7000000e+00   1.2000000e+00   1.3000000e+00   1.5000000e+00   1.7000000e+00   2.9000000e+00   3.1000000e+00   1.2000000e+00   1.9000000e+00   1.1000000e+00   2.9000000e+00   1.1000000e+00   1.9000000e+00   2.2000000e+00   1.0000000e+00   1.1000000e+00   1.8000000e+00   2.0000000e+00   2.3000000e+00   2.6000000e+00   1.8000000e+00   1.3000000e+00   1.8000000e+00   2.3000000e+00   1.8000000e+00   1.7000000e+00   1.0000000e+00   1.6000000e+00   1.8000000e+00   1.4000000e+00   1.3000000e+00   2.1000000e+00   1.9000000e+00   1.4000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.3000000e+00   3.0000000e-01   1.4000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   4.0000000e-01   2.3000000e+00   1.4000000e+00   2.2000000e+00   1.9000000e+00   2.1000000e+00   2.9000000e+00   8.0000000e-01   2.6000000e+00   2.1000000e+00   2.4000000e+00   1.4000000e+00   1.6000000e+00   1.8000000e+00   1.3000000e+00   1.4000000e+00   1.6000000e+00   1.8000000e+00   3.0000000e+00   3.2000000e+00   1.3000000e+00   2.0000000e+00   1.2000000e+00   3.0000000e+00   1.2000000e+00   2.0000000e+00   2.3000000e+00   1.1000000e+00   1.2000000e+00   1.9000000e+00   2.1000000e+00   2.4000000e+00   2.7000000e+00   1.9000000e+00   1.4000000e+00   1.9000000e+00   2.4000000e+00   1.9000000e+00   1.8000000e+00   1.1000000e+00   1.7000000e+00   1.9000000e+00   1.4000000e+00   1.4000000e+00   2.2000000e+00   2.0000000e+00   1.5000000e+00   1.3000000e+00   1.5000000e+00   1.7000000e+00   1.4000000e+00   1.2000000e+00   6.0000000e-01   7.0000000e-01   9.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   1.0000000e-01   8.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   9.0000000e-01   2.0000000e-01   2.1000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   1.9000000e+00   2.7000000e+00   9.0000000e-01   2.4000000e+00   1.9000000e+00   2.2000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   2.8000000e+00   3.0000000e+00   1.1000000e+00   1.8000000e+00   1.0000000e+00   2.8000000e+00   1.0000000e+00   1.8000000e+00   2.1000000e+00   9.0000000e-01   1.0000000e+00   1.7000000e+00   1.9000000e+00   2.2000000e+00   2.5000000e+00   1.7000000e+00   1.2000000e+00   1.7000000e+00   2.2000000e+00   1.7000000e+00   1.6000000e+00   9.0000000e-01   1.5000000e+00   1.7000000e+00   1.2000000e+00   1.2000000e+00   2.0000000e+00   1.8000000e+00   1.3000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.2000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   7.0000000e-01   1.0000000e+00   1.1000000e+00   7.0000000e-01   5.0000000e-01   1.1000000e+00   1.8000000e+00   9.0000000e-01   9.0000000e-01   9.0000000e-01   8.0000000e-01   2.1000000e+00   1.0000000e+00   9.0000000e-01   3.0000000e-01   1.1000000e+00   5.0000000e-01   7.0000000e-01   1.6000000e+00   1.1000000e+00   1.3000000e+00   7.0000000e-01   1.2000000e+00   5.0000000e-01   4.0000000e-01   8.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   1.7000000e+00   1.8000000e+00   5.0000000e-01   9.0000000e-01   4.0000000e-01   1.7000000e+00   3.0000000e-01   7.0000000e-01   1.2000000e+00   3.0000000e-01   3.0000000e-01   5.0000000e-01   1.2000000e+00   1.4000000e+00   1.9000000e+00   6.0000000e-01   3.0000000e-01   5.0000000e-01   1.7000000e+00   8.0000000e-01   4.0000000e-01   3.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   9.0000000e-01   7.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   3.0000000e-01   6.0000000e-01   1.3000000e+00   9.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   7.0000000e-01   5.0000000e-01   1.2000000e+00   3.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   1.5000000e+00   4.0000000e-01   1.5000000e+00   6.0000000e-01   1.7000000e+00   1.1000000e+00   1.3000000e+00   2.2000000e+00   5.0000000e-01   1.9000000e+00   1.3000000e+00   1.8000000e+00   1.1000000e+00   1.0000000e+00   1.4000000e+00   5.0000000e-01   9.0000000e-01   1.0000000e+00   1.1000000e+00   2.3000000e+00   2.4000000e+00   8.0000000e-01   1.5000000e+00   5.0000000e-01   2.3000000e+00   9.0000000e-01   1.3000000e+00   1.8000000e+00   8.0000000e-01   7.0000000e-01   1.1000000e+00   1.8000000e+00   2.0000000e+00   2.5000000e+00   1.1000000e+00   9.0000000e-01   1.1000000e+00   2.3000000e+00   1.1000000e+00   1.0000000e+00   6.0000000e-01   1.5000000e+00   1.3000000e+00   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.3000000e+00   1.3000000e+00   9.0000000e-01   1.1000000e+00   9.0000000e-01   6.0000000e-01   7.0000000e-01   1.1000000e+00   4.0000000e-01   9.0000000e-01   8.0000000e-01   4.0000000e-01   8.0000000e-01   1.2000000e+00   7.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.5000000e+00   6.0000000e-01   1.5000000e+00   7.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   1.1000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   6.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   8.0000000e-01   8.0000000e-01   1.0000000e+00   2.2000000e+00   2.4000000e+00   1.2000000e+00   1.2000000e+00   6.0000000e-01   2.2000000e+00   7.0000000e-01   1.2000000e+00   1.5000000e+00   6.0000000e-01   4.0000000e-01   1.1000000e+00   1.3000000e+00   1.6000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.7000000e+00   1.1000000e+00   1.0000000e+00   4.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   7.0000000e-01   1.4000000e+00   1.2000000e+00   7.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.1000000e+00   1.2000000e+00   1.2000000e+00   6.0000000e-01   9.0000000e-01   1.7000000e+00   1.1000000e+00   1.0000000e+00   1.0000000e+00   5.0000000e-01   1.7000000e+00   1.0000000e+00   1.3000000e+00   9.0000000e-01   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   1.8000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   5.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   8.0000000e-01   8.0000000e-01   2.0000000e+00   2.2000000e+00   9.0000000e-01   1.0000000e+00   1.1000000e+00   2.0000000e+00   4.0000000e-01   1.0000000e+00   1.3000000e+00   5.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.7000000e+00   9.0000000e-01   4.0000000e-01   9.0000000e-01   1.4000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   1.2000000e+00   1.0000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   1.3000000e+00   7.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   1.4000000e+00   6.0000000e-01   1.6000000e+00   7.0000000e-01   1.5000000e+00   1.2000000e+00   1.4000000e+00   2.2000000e+00   1.4000000e+00   1.9000000e+00   1.4000000e+00   1.7000000e+00   9.0000000e-01   9.0000000e-01   1.1000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.1000000e+00   2.3000000e+00   2.5000000e+00   6.0000000e-01   1.3000000e+00   7.0000000e-01   2.3000000e+00   5.0000000e-01   1.3000000e+00   1.6000000e+00   5.0000000e-01   7.0000000e-01   1.2000000e+00   1.4000000e+00   1.7000000e+00   2.0000000e+00   1.2000000e+00   7.0000000e-01   1.2000000e+00   1.7000000e+00   1.2000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   1.2000000e+00   1.0000000e+00   7.0000000e-01   1.5000000e+00   1.3000000e+00   1.0000000e+00   6.0000000e-01   8.0000000e-01   1.1000000e+00   7.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   3.0000000e-01   1.0000000e-01   1.0000000e-01   6.0000000e-01   1.1000000e+00   2.0000000e-01   1.9000000e+00   1.0000000e+00   1.8000000e+00   1.5000000e+00   1.7000000e+00   2.5000000e+00   7.0000000e-01   2.2000000e+00   1.7000000e+00   2.0000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   9.0000000e-01   1.1000000e+00   1.2000000e+00   1.4000000e+00   2.6000000e+00   2.8000000e+00   9.0000000e-01   1.6000000e+00   8.0000000e-01   2.6000000e+00   8.0000000e-01   1.6000000e+00   1.9000000e+00   7.0000000e-01   8.0000000e-01   1.5000000e+00   1.7000000e+00   2.0000000e+00   2.3000000e+00   1.5000000e+00   1.0000000e+00   1.5000000e+00   2.1000000e+00   1.5000000e+00   1.4000000e+00   7.0000000e-01   1.3000000e+00   1.5000000e+00   1.3000000e+00   1.0000000e+00   1.8000000e+00   1.6000000e+00   1.1000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   4.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   2.0000000e-01   5.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e+00   3.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   6.0000000e-01   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.2000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.4000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   6.0000000e-01   4.0000000e-01   1.1000000e+00   2.0000000e-01   4.0000000e-01   3.0000000e-01   7.0000000e-01   1.4000000e+00   3.0000000e-01   1.6000000e+00   7.0000000e-01   1.6000000e+00   1.2000000e+00   1.4000000e+00   2.2000000e+00   6.0000000e-01   1.9000000e+00   1.4000000e+00   1.7000000e+00   1.0000000e+00   9.0000000e-01   1.3000000e+00   8.0000000e-01   1.2000000e+00   1.1000000e+00   1.1000000e+00   2.3000000e+00   2.5000000e+00   6.0000000e-01   1.4000000e+00   8.0000000e-01   2.3000000e+00   8.0000000e-01   1.3000000e+00   1.7000000e+00   7.0000000e-01   6.0000000e-01   1.2000000e+00   1.7000000e+00   1.9000000e+00   2.4000000e+00   1.2000000e+00   8.0000000e-01   1.2000000e+00   2.2000000e+00   1.2000000e+00   1.1000000e+00   6.0000000e-01   1.4000000e+00   1.2000000e+00   1.4000000e+00   7.0000000e-01   1.5000000e+00   1.3000000e+00   1.2000000e+00   8.0000000e-01   1.0000000e+00   1.1000000e+00   7.0000000e-01   6.0000000e-01   1.3000000e+00   5.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   1.6000000e+00   5.0000000e-01   1.4000000e+00   5.0000000e-01   1.3000000e+00   1.0000000e+00   1.2000000e+00   2.0000000e+00   1.2000000e+00   1.7000000e+00   1.2000000e+00   1.5000000e+00   6.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   1.0000000e+00   9.0000000e-01   9.0000000e-01   2.1000000e+00   2.3000000e+00   8.0000000e-01   1.1000000e+00   6.0000000e-01   2.1000000e+00   4.0000000e-01   1.1000000e+00   1.4000000e+00   4.0000000e-01   4.0000000e-01   1.0000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   1.0000000e+00   5.0000000e-01   1.0000000e+00   1.6000000e+00   1.0000000e+00   9.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   1.3000000e+00   1.1000000e+00   9.0000000e-01   5.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   8.0000000e-01   2.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   2.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   9.0000000e-01   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.2000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   9.0000000e-01   9.0000000e-01   9.0000000e-01   1.2000000e+00   3.0000000e-01   8.0000000e-01   2.7000000e+00   1.8000000e+00   2.6000000e+00   2.3000000e+00   2.5000000e+00   3.3000000e+00   1.2000000e+00   3.0000000e+00   2.5000000e+00   2.8000000e+00   1.8000000e+00   2.0000000e+00   2.2000000e+00   1.7000000e+00   1.8000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   3.6000000e+00   1.7000000e+00   2.4000000e+00   1.6000000e+00   3.4000000e+00   1.6000000e+00   2.4000000e+00   2.7000000e+00   1.5000000e+00   1.6000000e+00   2.3000000e+00   2.5000000e+00   2.8000000e+00   3.1000000e+00   2.3000000e+00   1.8000000e+00   2.3000000e+00   2.8000000e+00   2.3000000e+00   2.2000000e+00   1.5000000e+00   2.1000000e+00   2.3000000e+00   1.9000000e+00   1.8000000e+00   2.6000000e+00   2.4000000e+00   1.9000000e+00   1.7000000e+00   1.9000000e+00   2.1000000e+00   1.8000000e+00   3.0000000e-01   2.0000000e-01   6.0000000e-01   1.2000000e+00   1.0000000e-01   1.8000000e+00   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.6000000e+00   2.4000000e+00   7.0000000e-01   2.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   8.0000000e-01   1.1000000e+00   1.1000000e+00   1.3000000e+00   2.5000000e+00   2.7000000e+00   8.0000000e-01   1.5000000e+00   7.0000000e-01   2.5000000e+00   7.0000000e-01   1.5000000e+00   1.8000000e+00   6.0000000e-01   7.0000000e-01   1.4000000e+00   1.6000000e+00   1.9000000e+00   2.3000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   2.1000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.3000000e+00   1.4000000e+00   1.3000000e+00   9.0000000e-01   1.7000000e+00   1.5000000e+00   1.1000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   1.0000000e-01   5.0000000e-01   1.2000000e+00   2.0000000e-01   1.8000000e+00   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.6000000e+00   2.4000000e+00   8.0000000e-01   2.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   8.0000000e-01   1.2000000e+00   1.1000000e+00   1.3000000e+00   2.5000000e+00   2.7000000e+00   8.0000000e-01   1.5000000e+00   8.0000000e-01   2.5000000e+00   7.0000000e-01   1.5000000e+00   1.8000000e+00   6.0000000e-01   7.0000000e-01   1.4000000e+00   1.6000000e+00   1.9000000e+00   2.2000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   2.0000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.2000000e+00   9.0000000e-01   1.7000000e+00   1.5000000e+00   1.1000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   5.0000000e-01   1.2000000e+00   1.0000000e-01   1.8000000e+00   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.6000000e+00   2.4000000e+00   8.0000000e-01   2.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   8.0000000e-01   1.1000000e+00   1.1000000e+00   1.3000000e+00   2.5000000e+00   2.7000000e+00   8.0000000e-01   1.5000000e+00   7.0000000e-01   2.5000000e+00   7.0000000e-01   1.5000000e+00   1.8000000e+00   6.0000000e-01   7.0000000e-01   1.4000000e+00   1.6000000e+00   1.9000000e+00   2.2000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   2.0000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.2000000e+00   9.0000000e-01   1.7000000e+00   1.5000000e+00   1.0000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   1.3000000e+00   5.0000000e-01   1.7000000e+00   8.0000000e-01   1.6000000e+00   1.3000000e+00   1.5000000e+00   2.3000000e+00   1.3000000e+00   2.0000000e+00   1.5000000e+00   1.8000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.2000000e+00   2.4000000e+00   2.6000000e+00   7.0000000e-01   1.4000000e+00   7.0000000e-01   2.4000000e+00   6.0000000e-01   1.4000000e+00   1.7000000e+00   5.0000000e-01   6.0000000e-01   1.3000000e+00   1.5000000e+00   1.8000000e+00   2.1000000e+00   1.3000000e+00   8.0000000e-01   1.3000000e+00   1.8000000e+00   1.3000000e+00   1.2000000e+00   5.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   8.0000000e-01   1.6000000e+00   1.4000000e+00   1.0000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   1.1000000e+00   3.0000000e+00   2.1000000e+00   2.9000000e+00   2.6000000e+00   2.8000000e+00   3.6000000e+00   1.5000000e+00   3.3000000e+00   2.8000000e+00   3.1000000e+00   2.1000000e+00   2.3000000e+00   2.5000000e+00   2.0000000e+00   2.1000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.9000000e+00   2.0000000e+00   2.7000000e+00   1.9000000e+00   3.7000000e+00   1.9000000e+00   2.7000000e+00   3.0000000e+00   1.8000000e+00   1.9000000e+00   2.6000000e+00   2.8000000e+00   3.1000000e+00   3.4000000e+00   2.6000000e+00   2.1000000e+00   2.6000000e+00   3.1000000e+00   2.6000000e+00   2.5000000e+00   1.8000000e+00   2.4000000e+00   2.6000000e+00   2.1000000e+00   2.1000000e+00   2.9000000e+00   2.7000000e+00   2.2000000e+00   2.0000000e+00   2.2000000e+00   2.4000000e+00   2.1000000e+00   1.9000000e+00   1.0000000e+00   1.8000000e+00   1.5000000e+00   1.7000000e+00   2.5000000e+00   8.0000000e-01   2.2000000e+00   1.7000000e+00   2.0000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   9.0000000e-01   1.1000000e+00   1.2000000e+00   1.4000000e+00   2.6000000e+00   2.8000000e+00   9.0000000e-01   1.6000000e+00   8.0000000e-01   2.6000000e+00   8.0000000e-01   1.6000000e+00   1.9000000e+00   7.0000000e-01   8.0000000e-01   1.5000000e+00   1.7000000e+00   2.0000000e+00   2.3000000e+00   1.5000000e+00   1.0000000e+00   1.5000000e+00   2.0000000e+00   1.5000000e+00   1.4000000e+00   7.0000000e-01   1.3000000e+00   1.5000000e+00   1.2000000e+00   1.0000000e+00   1.8000000e+00   1.6000000e+00   1.1000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   3.0000000e-01   1.3000000e+00   1.5000000e+00   1.0000000e+00   8.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   7.0000000e-01   7.0000000e-01   1.4000000e+00   1.4000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.4000000e+00   1.1000000e+00   4.0000000e-01   9.0000000e-01   1.2000000e+00   1.1000000e+00   5.0000000e-01   9.0000000e-01   1.1000000e+00   1.6000000e+00   5.0000000e-01   1.0000000e+00   1.1000000e+00   1.4000000e+00   4.0000000e-01   7.0000000e-01   1.2000000e+00   6.0000000e-01   4.0000000e-01   9.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   6.0000000e-01   9.0000000e-01   1.3000000e+00   5.0000000e-01   7.0000000e-01   1.8000000e+00   9.0000000e-01   1.5000000e+00   9.0000000e-01   1.4000000e+00   7.0000000e-01   6.0000000e-01   1.0000000e+00   2.0000000e-01   5.0000000e-01   6.0000000e-01   7.0000000e-01   1.9000000e+00   1.9000000e+00   5.0000000e-01   1.1000000e+00   2.0000000e-01   1.9000000e+00   5.0000000e-01   9.0000000e-01   1.4000000e+00   4.0000000e-01   3.0000000e-01   6.0000000e-01   1.4000000e+00   1.6000000e+00   2.1000000e+00   6.0000000e-01   5.0000000e-01   5.0000000e-01   1.9000000e+00   7.0000000e-01   6.0000000e-01   3.0000000e-01   1.1000000e+00   9.0000000e-01   1.1000000e+00   0.0000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   7.0000000e-01   2.2000000e+00   4.0000000e-01   5.0000000e-01   6.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   1.4000000e+00   1.3000000e+00   7.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   1.1000000e+00   2.0000000e-01   1.5000000e+00   8.0000000e-01   1.0000000e+00   4.0000000e-01   3.0000000e-01   1.1000000e+00   1.0000000e+00   7.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   6.0000000e-01   8.0000000e-01   7.0000000e-01   1.1000000e+00   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.3000000e+00   3.0000000e-01   4.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.2000000e+00   4.0000000e-01   1.3000000e+00   1.4000000e+00   1.0000000e+00   4.0000000e-01   9.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   2.0000000e-01   1.4000000e+00   1.4000000e+00   7.0000000e-01   6.0000000e-01   7.0000000e-01   1.4000000e+00   7.0000000e-01   4.0000000e-01   9.0000000e-01   8.0000000e-01   7.0000000e-01   3.0000000e-01   9.0000000e-01   1.1000000e+00   1.6000000e+00   4.0000000e-01   5.0000000e-01   4.0000000e-01   1.4000000e+00   6.0000000e-01   2.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.1000000e+00   1.6000000e+00   8.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   1.2000000e+00   1.2000000e+00   8.0000000e-01   4.0000000e-01   9.0000000e-01   1.2000000e+00   9.0000000e-01   3.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   2.0000000e-01   7.0000000e-01   9.0000000e-01   1.4000000e+00   2.0000000e-01   7.0000000e-01   8.0000000e-01   1.2000000e+00   4.0000000e-01   4.0000000e-01   1.0000000e+00   4.0000000e-01   2.0000000e-01   7.0000000e-01   7.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   2.7000000e+00   3.0000000e-01   9.0000000e-01   6.0000000e-01   1.5000000e+00   1.3000000e+00   1.1000000e+00   1.9000000e+00   1.8000000e+00   1.3000000e+00   1.1000000e+00   8.0000000e-01   4.0000000e-01   1.6000000e+00   9.0000000e-01   2.0000000e+00   2.0000000e-01   1.7000000e+00   9.0000000e-01   6.0000000e-01   1.8000000e+00   1.7000000e+00   1.2000000e+00   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.2000000e+00   1.5000000e+00   1.5000000e+00   5.0000000e-01   1.3000000e+00   1.2000000e+00   1.8000000e+00   1.2000000e+00   1.0000000e+00   1.5000000e+00   1.8000000e+00   8.0000000e-01   9.0000000e-01   1.4000000e+00   1.6000000e+00   1.4000000e+00   1.4000000e+00   1.7000000e+00   2.4000000e+00   1.8000000e+00   2.3000000e+00   1.6000000e+00   1.5000000e+00   1.9000000e+00   8.0000000e-01   9.0000000e-01   1.5000000e+00   1.6000000e+00   2.8000000e+00   2.8000000e+00   1.1000000e+00   2.0000000e+00   7.0000000e-01   2.8000000e+00   1.4000000e+00   1.8000000e+00   2.3000000e+00   1.3000000e+00   1.2000000e+00   1.5000000e+00   2.3000000e+00   2.5000000e+00   3.0000000e+00   1.5000000e+00   1.4000000e+00   1.2000000e+00   2.8000000e+00   1.4000000e+00   1.5000000e+00   1.1000000e+00   2.0000000e+00   1.8000000e+00   2.0000000e+00   9.0000000e-01   1.9000000e+00   1.8000000e+00   1.8000000e+00   1.4000000e+00   1.6000000e+00   1.3000000e+00   1.0000000e+00   6.0000000e-01   7.0000000e-01   1.2000000e+00   1.0000000e+00   8.0000000e-01   1.6000000e+00   1.5000000e+00   1.0000000e+00   8.0000000e-01   9.0000000e-01   6.0000000e-01   1.3000000e+00   6.0000000e-01   1.7000000e+00   4.0000000e-01   1.4000000e+00   6.0000000e-01   3.0000000e-01   1.5000000e+00   1.4000000e+00   9.0000000e-01   5.0000000e-01   2.0000000e-01   9.0000000e-01   9.0000000e-01   1.2000000e+00   1.2000000e+00   5.0000000e-01   1.0000000e+00   9.0000000e-01   1.5000000e+00   9.0000000e-01   7.0000000e-01   1.2000000e+00   1.5000000e+00   5.0000000e-01   7.0000000e-01   1.1000000e+00   1.3000000e+00   1.1000000e+00   1.1000000e+00   1.4000000e+00   1.1000000e+00   7.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   7.0000000e-01   5.0000000e-01   1.3000000e+00   1.1000000e+00   8.0000000e-01   7.0000000e-01   1.1000000e+00   1.0000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   1.3000000e+00   4.0000000e-01   7.0000000e-01   6.0000000e-01   1.0000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   6.0000000e-01   6.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   6.0000000e-01   1.5000000e+00   1.4000000e+00   8.0000000e-01   7.0000000e-01   6.0000000e-01   1.0000000e+00   1.4000000e+00   4.0000000e-01   1.6000000e+00   8.0000000e-01   1.2000000e+00   5.0000000e-01   7.0000000e-01   1.3000000e+00   1.2000000e+00   8.0000000e-01   9.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   1.1000000e+00   6.0000000e-01   9.0000000e-01   8.0000000e-01   1.3000000e+00   7.0000000e-01   5.0000000e-01   1.0000000e+00   1.4000000e+00   4.0000000e-01   5.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   1.0000000e+00   1.3000000e+00   5.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   3.0000000e-01   4.0000000e-01   1.6000000e+00   1.8000000e+00   1.0000000e+00   6.0000000e-01   9.0000000e-01   1.6000000e+00   5.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   1.0000000e+00   1.4000000e+00   5.0000000e-01   5.0000000e-01   6.0000000e-01   1.2000000e+00   5.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   2.0000000e-01   3.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   1.4000000e+00   1.6000000e+00   5.0000000e-01   5.0000000e-01   8.0000000e-01   1.4000000e+00   4.0000000e-01   6.0000000e-01   8.0000000e-01   5.0000000e-01   4.0000000e-01   3.0000000e-01   8.0000000e-01   1.0000000e+00   1.5000000e+00   3.0000000e-01   4.0000000e-01   5.0000000e-01   1.3000000e+00   7.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   7.0000000e-01   5.0000000e-01   1.1000000e+00   1.0000000e+00   4.0000000e-01   3.0000000e-01   1.2000000e+00   1.4000000e+00   8.0000000e-01   2.0000000e-01   1.2000000e+00   1.2000000e+00   6.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   1.1000000e+00   4.0000000e-01   6.0000000e-01   7.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   4.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   2.0000000e+00   2.0000000e+00   5.0000000e-01   1.2000000e+00   3.0000000e-01   2.0000000e+00   6.0000000e-01   1.0000000e+00   1.5000000e+00   5.0000000e-01   5.0000000e-01   7.0000000e-01   1.5000000e+00   1.7000000e+00   2.2000000e+00   7.0000000e-01   6.0000000e-01   6.0000000e-01   2.0000000e+00   9.0000000e-01   7.0000000e-01   5.0000000e-01   1.2000000e+00   1.0000000e+00   1.2000000e+00   2.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   6.0000000e-01   8.0000000e-01   9.0000000e-01   5.0000000e-01   6.0000000e-01   7.0000000e-01   1.9000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   4.0000000e-01   1.9000000e+00   6.0000000e-01   9.0000000e-01   1.4000000e+00   6.0000000e-01   6.0000000e-01   6.0000000e-01   1.4000000e+00   1.6000000e+00   2.1000000e+00   6.0000000e-01   9.0000000e-01   1.0000000e+00   1.9000000e+00   6.0000000e-01   6.0000000e-01   6.0000000e-01   1.1000000e+00   9.0000000e-01   1.1000000e+00   5.0000000e-01   1.0000000e+00   9.0000000e-01   9.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   1.4000000e+00   1.6000000e+00   1.0000000e+00   5.0000000e-01   8.0000000e-01   1.4000000e+00   5.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   1.5000000e+00   4.0000000e-01   8.0000000e-01   9.0000000e-01   1.3000000e+00   3.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   3.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   5.0000000e-01   1.2000000e+00   1.4000000e+00   8.0000000e-01   5.0000000e-01   9.0000000e-01   1.2000000e+00   6.0000000e-01   3.0000000e-01   7.0000000e-01   7.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   1.4000000e+00   4.0000000e-01   4.0000000e-01   4.0000000e-01   1.2000000e+00   6.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   1.2000000e+00   1.7000000e+00   1.0000000e+00   2.1000000e+00   1.0000000e+00   1.8000000e+00   1.0000000e+00   7.0000000e-01   1.9000000e+00   1.8000000e+00   1.3000000e+00   9.0000000e-01   1.0000000e+00   3.0000000e-01   1.3000000e+00   1.6000000e+00   1.6000000e+00   8.0000000e-01   1.4000000e+00   1.3000000e+00   1.9000000e+00   1.3000000e+00   1.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.0000000e+00   1.5000000e+00   1.7000000e+00   1.5000000e+00   1.5000000e+00   1.8000000e+00   1.9000000e+00   1.2000000e+00   2.1000000e+00   3.0000000e-01   2.0000000e+00   1.2000000e+00   9.0000000e-01   2.1000000e+00   2.0000000e+00   1.3000000e+00   1.1000000e+00   8.0000000e-01   1.2000000e+00   1.3000000e+00   1.8000000e+00   1.6000000e+00   8.0000000e-01   1.4000000e+00   1.4000000e+00   2.1000000e+00   1.5000000e+00   1.3000000e+00   1.8000000e+00   1.9000000e+00   1.0000000e+00   1.2000000e+00   1.7000000e+00   1.9000000e+00   1.7000000e+00   1.5000000e+00   1.8000000e+00   1.0000000e+00   6.0000000e-01   1.7000000e+00   5.0000000e-01   1.1000000e+00   1.2000000e+00   6.0000000e-01   8.0000000e-01   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.9000000e+00   7.0000000e-01   6.0000000e-01   6.0000000e-01   1.7000000e+00   1.2000000e+00   9.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   9.0000000e-01   5.0000000e-01   1.0000000e+00   1.1000000e+00   8.0000000e-01   4.0000000e-01   8.0000000e-01   1.2000000e+00   8.0000000e-01   1.3000000e+00   1.0000000e+00   8.0000000e-01   2.0000000e-01   5.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   1.0000000e+00   5.0000000e-01   8.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   5.0000000e-01   9.0000000e-01   3.0000000e-01   2.0000000e-01   6.0000000e-01   1.1000000e+00   2.0000000e-01   2.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   7.0000000e-01   1.0000000e+00   2.1000000e+00   7.0000000e-01   1.1000000e+00   1.6000000e+00   6.0000000e-01   5.0000000e-01   8.0000000e-01   1.6000000e+00   1.8000000e+00   2.3000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   2.1000000e+00   7.0000000e-01   8.0000000e-01   4.0000000e-01   1.3000000e+00   1.1000000e+00   1.3000000e+00   2.0000000e-01   1.2000000e+00   1.1000000e+00   1.1000000e+00   7.0000000e-01   9.0000000e-01   6.0000000e-01   3.0000000e-01   1.8000000e+00   1.0000000e+00   7.0000000e-01   1.9000000e+00   1.8000000e+00   1.3000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   1.3000000e+00   1.6000000e+00   1.6000000e+00   6.0000000e-01   1.4000000e+00   1.3000000e+00   1.9000000e+00   1.3000000e+00   1.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.0000000e+00   1.5000000e+00   1.7000000e+00   1.5000000e+00   1.5000000e+00   1.8000000e+00   8.0000000e-01   1.1000000e+00   1.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   1.2000000e+00   1.6000000e+00   7.0000000e-01   3.0000000e-01   7.0000000e-01   1.4000000e+00   7.0000000e-01   6.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   1.0000000e+00   8.0000000e-01   5.0000000e-01   2.0000000e-01   3.0000000e-01   7.0000000e-01   4.0000000e-01   5.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   1.2000000e+00   5.0000000e-01   6.0000000e-01   7.0000000e-01   1.0000000e+00   4.0000000e-01   3.0000000e-01   9.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   1.2000000e+00   1.1000000e+00   8.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   9.0000000e-01   1.1000000e+00   5.0000000e-01   9.0000000e-01   8.0000000e-01   1.2000000e+00   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.4000000e+00   5.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   1.0000000e+00   1.3000000e+00   2.0000000e-01   8.0000000e-01   1.0000000e+00   1.3000000e+00   1.7000000e+00   8.0000000e-01   3.0000000e-01   8.0000000e-01   1.5000000e+00   8.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   1.1000000e+00   9.0000000e-01   5.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   1.1000000e+00   1.3000000e+00   1.8000000e+00   7.0000000e-01   3.0000000e-01   7.0000000e-01   1.6000000e+00   7.0000000e-01   6.0000000e-01   1.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   1.0000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   8.0000000e-01   1.0000000e+00   1.5000000e+00   1.0000000e-01   6.0000000e-01   7.0000000e-01   1.3000000e+00   6.0000000e-01   3.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   8.0000000e-01   9.0000000e-01   1.1000000e+00   7.0000000e-01   9.0000000e-01   8.0000000e-01   1.2000000e+00   5.0000000e-01   8.0000000e-01   7.0000000e-01   1.4000000e+00   7.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   1.0000000e+00   1.3000000e+00   1.0000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   4.0000000e-01   1.1000000e+00   1.0000000e+00   1.4000000e+00   7.0000000e-01   7.0000000e-01   1.0000000e+00   1.6000000e+00   6.0000000e-01   7.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   1.2000000e+00   1.5000000e+00   1.5000000e+00   1.6000000e+00   1.8000000e+00   8.0000000e-01   1.6000000e+00   1.5000000e+00   1.9000000e+00   1.0000000e+00   1.2000000e+00   1.3000000e+00   2.1000000e+00   1.1000000e+00   1.2000000e+00   1.2000000e+00   1.6000000e+00   1.4000000e+00   1.7000000e+00   2.0000000e+00   7.0000000e-01   8.0000000e-01   1.3000000e+00   6.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   1.4000000e+00   9.0000000e-01   4.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   4.0000000e-01   5.0000000e-01   8.0000000e-01   4.0000000e-01   1.6000000e+00   1.0000000e+00   5.0000000e-01   8.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   1.4000000e+00   1.3000000e+00   1.7000000e+00   8.0000000e-01   1.0000000e+00   1.0000000e+00   1.9000000e+00   9.0000000e-01   1.0000000e+00   1.0000000e+00   1.4000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   6.0000000e-01   8.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   9.0000000e-01   4.0000000e-01   2.0000000e-01   6.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   3.0000000e-01   1.1000000e+00   9.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   1.1000000e+00   5.0000000e-01   4.0000000e-01   2.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e+00   5.0000000e-01   9.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   8.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-chebychev-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-chebychev-ml.txt
    deleted file mode 100644
    index 7864862959..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-chebychev-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   8.9084734e-01   9.3573853e-01   9.3507398e-01   9.6040691e-01   9.2918157e-01   9.6617342e-01   9.0430930e-01   9.5753424e-01   8.7106898e-01   9.2169905e-01   9.7401159e-01   8.9013416e-01   9.3956689e-01   9.0041896e-01   9.2588355e-01   9.3849417e-01   8.9713468e-01   9.1481804e-01   9.7500539e-01   9.0012586e-01   9.0962559e-01   8.5860091e-01   8.6981095e-01   8.9995771e-01   8.8070172e-01   9.1456657e-01   8.6711474e-01   9.2593917e-01   8.7560376e-01   8.5193121e-01   9.0898542e-01   8.7765302e-01   8.6555584e-01   8.6093485e-01   9.0447028e-01   8.7614405e-01   9.4803522e-01   8.4998062e-01   7.8398996e-01   8.9538612e-01   8.3902291e-01   9.9039470e-01   9.5480519e-01   8.9152195e-01   9.1623329e-01   7.9094921e-01   9.1777100e-01   9.8972335e-01   9.0429093e-01   8.7646362e-01   9.2136649e-01   9.7178177e-01   8.9610979e-01   9.4710327e-01   9.3612450e-01   9.0241499e-01   7.7992538e-01   8.7262126e-01   9.3325183e-01   8.5796531e-01   9.4267977e-01   6.7224167e-01   7.9568368e-01   8.6411267e-01   9.3311642e-01   9.0160114e-01   9.0698887e-01   8.5833256e-01   9.6902830e-01   9.5072298e-01   8.6808495e-01   9.7879599e-01   8.8060729e-01   8.2818573e-01   8.4366706e-01   8.4506700e-01   9.4532981e-01   9.1792306e-01   7.8917825e-01   9.8337805e-01   8.1751613e-01   9.3037855e-01   9.1618832e-01   8.6568874e-01   8.9751397e-01   8.7923710e-01   8.6814329e-01   9.0330164e-01   8.2426213e-01   9.4644643e-01   8.8431293e-01   8.8497426e-01   9.0633818e-01   9.5537161e-01   8.2167575e-01   8.7771053e-01   9.0681167e-01   8.7626143e-01   8.7463464e-01   9.8033940e-01   9.2920881e-01   9.5108549e-01   9.1287466e-01   8.0052218e-01   9.2409517e-01   8.8252650e-01   8.7873923e-01   9.2989402e-01   9.1985043e-01   9.6172646e-01   8.8223856e-01   9.4477822e-01   8.8310948e-01   9.4461306e-01   9.1875210e-01   9.1233363e-01   9.2124013e-01   9.5460897e-01   8.4640982e-01   9.0882657e-01   9.8169468e-01   9.7828355e-01   8.4150533e-01   8.6888923e-01   9.7138825e-01   8.7988144e-01   9.6720910e-01   8.9450147e-01   9.5331584e-01   8.8871809e-01   8.9736685e-01   8.6258146e-01   9.1331565e-01   9.0968870e-01   9.4833654e-01   9.0536967e-01   9.5099871e-01   8.0251958e-01   9.2526150e-01   9.8971957e-01   9.0340947e-01   9.4955892e-01   9.6838162e-01   8.7534901e-01   9.1178797e-01   9.2649154e-01   9.5260993e-01   9.3178143e-01   9.4943000e-01   8.7816171e-01   9.6506542e-01   8.3422958e-01   9.3443585e-01   9.3220084e-01   8.5706573e-01   8.4666325e-01   9.0474744e-01   9.1080644e-01   9.2406899e-01   8.7901768e-01   9.3265263e-01   9.5992829e-01   9.5696271e-01   9.1932272e-01   8.0937044e-01   9.0904917e-01   8.9516756e-01   9.4797906e-01   8.4159421e-01   9.6773901e-01   9.7099825e-01   9.6941820e-01   9.8174088e-01   9.7569951e-01   9.3655362e-01   8.4130333e-01   9.5994549e-01   8.4235414e-01   9.1429418e-01   9.3418117e-01   8.4600977e-01   8.8166496e-01   8.7594776e-01   8.8571112e-01   9.6308174e-01   9.5315927e-01   8.6997519e-01   8.9383032e-01   9.4686804e-01   9.4399596e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-cityblock-ml-iris.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-cityblock-ml-iris.txt
    deleted file mode 100644
    index 6722928a4a..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-cityblock-ml-iris.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   7.0000000e-01   8.0000000e-01   1.0000000e+00   2.0000000e-01   1.2000000e+00   7.0000000e-01   3.0000000e-01   1.3000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.8000000e+00   1.0000000e+00   1.0000000e-01   1.3000000e+00   5.0000000e-01   7.0000000e-01   5.0000000e-01   1.0000000e+00   8.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   2.0000000e-01   2.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   1.3000000e+00   2.0000000e-01   3.0000000e-01   2.0000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.1000000e+00   4.0000000e+00   6.4000000e+00   4.6000000e+00   4.5000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   4.0000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   3.5000000e+00   5.1000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   5.8000000e+00   9.3000000e+00   8.6000000e+00   9.2000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   1.0200000e+01   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   9.9000000e+00   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   5.0000000e-01   5.0000000e-01   7.0000000e-01   1.9000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   1.3000000e+00   7.0000000e-01   2.0000000e-01   1.0000000e+00   2.1000000e+00   2.5000000e+00   1.7000000e+00   8.0000000e-01   2.0000000e+00   1.2000000e+00   1.2000000e+00   1.2000000e+00   1.3000000e+00   1.1000000e+00   1.0000000e+00   3.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   4.0000000e-01   1.2000000e+00   1.6000000e+00   1.8000000e+00   3.0000000e-01   5.0000000e-01   1.2000000e+00   3.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   1.3000000e+00   8.0000000e-01   1.2000000e+00   1.7000000e+00   2.0000000e-01   1.2000000e+00   5.0000000e-01   1.2000000e+00   4.0000000e-01   6.8000000e+00   6.1000000e+00   6.9000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   6.4000000e+00   3.3000000e+00   6.1000000e+00   4.3000000e+00   4.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   6.1000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   6.2000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   4.9000000e+00   6.0000000e+00   6.5000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   3.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   3.2000000e+00   4.8000000e+00   8.6000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   5.1000000e+00   9.0000000e+00   8.3000000e+00   9.9000000e+00   7.3000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.7000000e+00   7.3000000e+00   1.0900000e+01   1.0800000e+01   6.8000000e+00   8.6000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   8.3000000e+00   8.7000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   1.0600000e+01   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   8.2000000e+00   7.3000000e+00   6.1000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.6000000e+00   8.7000000e+00   8.7000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.8000000e+00   6.3000000e+00   4.0000000e-01   8.0000000e-01   2.0000000e+00   5.0000000e-01   7.0000000e-01   7.0000000e-01   6.0000000e-01   1.4000000e+00   6.0000000e-01   5.0000000e-01   9.0000000e-01   2.0000000e+00   2.6000000e+00   1.6000000e+00   9.0000000e-01   2.1000000e+00   1.3000000e+00   1.3000000e+00   1.3000000e+00   8.0000000e-01   1.2000000e+00   9.0000000e-01   8.0000000e-01   1.0000000e+00   1.0000000e+00   8.0000000e-01   3.0000000e-01   5.0000000e-01   1.3000000e+00   1.7000000e+00   1.9000000e+00   6.0000000e-01   4.0000000e-01   1.1000000e+00   6.0000000e-01   5.0000000e-01   8.0000000e-01   7.0000000e-01   1.2000000e+00   3.0000000e-01   1.3000000e+00   1.8000000e+00   5.0000000e-01   1.3000000e+00   2.0000000e-01   1.3000000e+00   5.0000000e-01   6.9000000e+00   6.2000000e+00   7.2000000e+00   5.5000000e+00   6.8000000e+00   5.7000000e+00   6.5000000e+00   3.8000000e+00   6.6000000e+00   4.8000000e+00   4.5000000e+00   5.6000000e+00   5.8000000e+00   6.3000000e+00   4.6000000e+00   6.4000000e+00   5.6000000e+00   5.2000000e+00   7.0000000e+00   5.1000000e+00   6.3000000e+00   5.6000000e+00   7.2000000e+00   6.2000000e+00   6.1000000e+00   6.4000000e+00   7.2000000e+00   7.4000000e+00   6.1000000e+00   4.6000000e+00   5.0000000e+00   4.8000000e+00   5.2000000e+00   7.0000000e+00   5.4000000e+00   6.1000000e+00   6.8000000e+00   6.7000000e+00   5.0000000e+00   5.3000000e+00   5.5000000e+00   6.1000000e+00   5.4000000e+00   4.0000000e+00   5.4000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   3.7000000e+00   5.3000000e+00   8.7000000e+00   7.1000000e+00   9.1000000e+00   7.8000000e+00   8.5000000e+00   1.0300000e+01   5.6000000e+00   9.5000000e+00   8.8000000e+00   1.0000000e+01   7.4000000e+00   7.9000000e+00   8.4000000e+00   7.2000000e+00   7.5000000e+00   7.8000000e+00   7.8000000e+00   1.1000000e+01   1.1300000e+01   7.3000000e+00   8.7000000e+00   6.7000000e+00   1.0600000e+01   7.3000000e+00   8.4000000e+00   8.8000000e+00   7.0000000e+00   6.8000000e+00   8.3000000e+00   8.6000000e+00   9.6000000e+00   1.0700000e+01   8.4000000e+00   7.1000000e+00   7.5000000e+00   1.0100000e+01   8.3000000e+00   7.6000000e+00   6.6000000e+00   8.3000000e+00   8.6000000e+00   8.2000000e+00   7.1000000e+00   8.8000000e+00   8.8000000e+00   8.2000000e+00   7.7000000e+00   7.7000000e+00   7.9000000e+00   6.8000000e+00   1.0000000e+00   2.0000000e+00   5.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   1.4000000e+00   6.0000000e-01   5.0000000e-01   9.0000000e-01   2.4000000e+00   2.6000000e+00   2.0000000e+00   1.1000000e+00   2.1000000e+00   1.3000000e+00   1.3000000e+00   1.3000000e+00   1.0000000e+00   1.2000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   1.0000000e+00   1.0000000e+00   3.0000000e-01   3.0000000e-01   1.3000000e+00   1.7000000e+00   2.1000000e+00   4.0000000e-01   8.0000000e-01   1.5000000e+00   4.0000000e-01   5.0000000e-01   8.0000000e-01   1.1000000e+00   1.2000000e+00   5.0000000e-01   1.3000000e+00   1.8000000e+00   5.0000000e-01   1.3000000e+00   2.0000000e-01   1.3000000e+00   7.0000000e-01   6.9000000e+00   6.2000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.5000000e+00   3.6000000e+00   6.4000000e+00   4.6000000e+00   4.3000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.3000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   6.1000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   3.8000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   3.5000000e+00   5.1000000e+00   8.7000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   5.4000000e+00   9.3000000e+00   8.6000000e+00   1.0000000e+01   7.4000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.8000000e+00   7.6000000e+00   1.1000000e+01   1.1100000e+01   7.1000000e+00   8.7000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.4000000e+00   8.8000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   1.0700000e+01   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   8.3000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.8000000e+00   8.8000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.9000000e+00   6.6000000e+00   1.2000000e+00   7.0000000e-01   3.0000000e-01   1.3000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.8000000e+00   1.0000000e+00   3.0000000e-01   1.3000000e+00   5.0000000e-01   9.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   8.0000000e-01   6.0000000e-01   4.0000000e-01   4.0000000e-01   9.0000000e-01   9.0000000e-01   9.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   1.3000000e+00   4.0000000e-01   3.0000000e-01   2.0000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   6.9000000e+00   6.2000000e+00   7.2000000e+00   5.5000000e+00   6.8000000e+00   5.7000000e+00   6.3000000e+00   4.0000000e+00   6.6000000e+00   4.8000000e+00   4.5000000e+00   5.6000000e+00   5.8000000e+00   6.3000000e+00   4.6000000e+00   6.4000000e+00   5.6000000e+00   5.2000000e+00   7.0000000e+00   5.1000000e+00   6.3000000e+00   5.6000000e+00   7.2000000e+00   6.2000000e+00   6.1000000e+00   6.4000000e+00   7.2000000e+00   7.4000000e+00   6.1000000e+00   4.6000000e+00   5.0000000e+00   4.8000000e+00   5.2000000e+00   7.0000000e+00   5.4000000e+00   5.7000000e+00   6.8000000e+00   6.7000000e+00   5.0000000e+00   5.3000000e+00   5.5000000e+00   6.1000000e+00   5.4000000e+00   4.0000000e+00   5.4000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   3.7000000e+00   5.3000000e+00   8.5000000e+00   7.1000000e+00   9.1000000e+00   7.8000000e+00   8.5000000e+00   1.0300000e+01   5.8000000e+00   9.5000000e+00   8.8000000e+00   9.2000000e+00   7.4000000e+00   7.9000000e+00   8.4000000e+00   7.2000000e+00   7.5000000e+00   7.8000000e+00   7.8000000e+00   1.0200000e+01   1.1300000e+01   7.3000000e+00   8.7000000e+00   6.7000000e+00   1.0600000e+01   7.3000000e+00   8.2000000e+00   8.8000000e+00   7.0000000e+00   6.8000000e+00   8.3000000e+00   8.6000000e+00   9.6000000e+00   9.9000000e+00   8.4000000e+00   7.1000000e+00   7.5000000e+00   1.0100000e+01   7.9000000e+00   7.6000000e+00   6.6000000e+00   8.3000000e+00   8.6000000e+00   8.2000000e+00   7.1000000e+00   8.8000000e+00   8.6000000e+00   8.2000000e+00   7.7000000e+00   7.7000000e+00   7.5000000e+00   6.8000000e+00   1.7000000e+00   1.3000000e+00   2.5000000e+00   1.8000000e+00   6.0000000e-01   1.4000000e+00   2.1000000e+00   2.9000000e+00   1.2000000e+00   1.0000000e+00   4.0000000e-01   1.1000000e+00   5.0000000e-01   7.0000000e-01   7.0000000e-01   7.0000000e-01   2.0000000e+00   1.0000000e+00   1.5000000e+00   1.6000000e+00   1.0000000e+00   1.0000000e+00   1.2000000e+00   1.7000000e+00   1.7000000e+00   7.0000000e-01   9.0000000e-01   9.0000000e-01   1.8000000e+00   1.8000000e+00   1.1000000e+00   1.8000000e+00   2.5000000e+00   1.2000000e+00   1.3000000e+00   3.0000000e+00   2.3000000e+00   1.1000000e+00   6.0000000e-01   1.9000000e+00   7.0000000e-01   2.0000000e+00   7.0000000e-01   1.5000000e+00   6.3000000e+00   5.6000000e+00   6.6000000e+00   4.9000000e+00   6.2000000e+00   5.1000000e+00   5.7000000e+00   4.2000000e+00   6.0000000e+00   4.6000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.7000000e+00   4.0000000e+00   5.8000000e+00   5.0000000e+00   4.6000000e+00   6.4000000e+00   4.5000000e+00   5.7000000e+00   5.0000000e+00   6.6000000e+00   5.6000000e+00   5.5000000e+00   5.8000000e+00   6.6000000e+00   6.8000000e+00   5.5000000e+00   4.0000000e+00   4.4000000e+00   4.2000000e+00   4.6000000e+00   6.4000000e+00   4.8000000e+00   5.1000000e+00   6.2000000e+00   6.1000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   4.8000000e+00   4.2000000e+00   4.8000000e+00   4.5000000e+00   4.7000000e+00   5.3000000e+00   3.7000000e+00   4.7000000e+00   7.9000000e+00   6.5000000e+00   8.5000000e+00   7.2000000e+00   7.9000000e+00   9.7000000e+00   6.0000000e+00   8.9000000e+00   8.2000000e+00   8.6000000e+00   6.8000000e+00   7.3000000e+00   7.8000000e+00   6.6000000e+00   6.9000000e+00   7.2000000e+00   7.2000000e+00   9.2000000e+00   1.0700000e+01   6.7000000e+00   8.1000000e+00   6.1000000e+00   1.0000000e+01   6.7000000e+00   7.6000000e+00   8.2000000e+00   6.4000000e+00   6.2000000e+00   7.7000000e+00   8.0000000e+00   9.0000000e+00   8.9000000e+00   7.8000000e+00   6.5000000e+00   6.9000000e+00   9.5000000e+00   7.3000000e+00   7.0000000e+00   6.0000000e+00   7.7000000e+00   8.0000000e+00   7.6000000e+00   6.5000000e+00   8.2000000e+00   8.0000000e+00   7.6000000e+00   7.1000000e+00   7.1000000e+00   6.9000000e+00   6.2000000e+00   6.0000000e-01   8.0000000e-01   9.0000000e-01   1.3000000e+00   5.0000000e-01   8.0000000e-01   1.2000000e+00   2.1000000e+00   2.3000000e+00   1.5000000e+00   6.0000000e-01   1.8000000e+00   1.0000000e+00   1.2000000e+00   1.0000000e+00   7.0000000e-01   1.1000000e+00   8.0000000e-01   1.1000000e+00   7.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   1.6000000e+00   1.8000000e+00   9.0000000e-01   9.0000000e-01   1.2000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   6.0000000e-01   1.3000000e+00   6.0000000e-01   1.0000000e+00   1.5000000e+00   6.0000000e-01   1.2000000e+00   3.0000000e-01   1.2000000e+00   6.0000000e-01   7.0000000e+00   6.3000000e+00   7.3000000e+00   5.6000000e+00   6.9000000e+00   5.8000000e+00   6.4000000e+00   3.9000000e+00   6.7000000e+00   4.9000000e+00   4.6000000e+00   5.7000000e+00   5.9000000e+00   6.4000000e+00   4.7000000e+00   6.5000000e+00   5.7000000e+00   5.3000000e+00   7.1000000e+00   5.2000000e+00   6.4000000e+00   5.7000000e+00   7.3000000e+00   6.3000000e+00   6.2000000e+00   6.5000000e+00   7.3000000e+00   7.5000000e+00   6.2000000e+00   4.7000000e+00   5.1000000e+00   4.9000000e+00   5.3000000e+00   7.1000000e+00   5.5000000e+00   5.8000000e+00   6.9000000e+00   6.8000000e+00   5.1000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   5.5000000e+00   4.1000000e+00   5.5000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   3.8000000e+00   5.4000000e+00   8.6000000e+00   7.2000000e+00   9.2000000e+00   7.9000000e+00   8.6000000e+00   1.0400000e+01   5.7000000e+00   9.6000000e+00   8.9000000e+00   9.7000000e+00   7.5000000e+00   8.0000000e+00   8.5000000e+00   7.3000000e+00   7.6000000e+00   7.9000000e+00   7.9000000e+00   1.0700000e+01   1.1400000e+01   7.4000000e+00   8.8000000e+00   6.8000000e+00   1.0700000e+01   7.4000000e+00   8.3000000e+00   8.9000000e+00   7.1000000e+00   6.9000000e+00   8.4000000e+00   8.7000000e+00   9.7000000e+00   1.0400000e+01   8.5000000e+00   7.2000000e+00   7.6000000e+00   1.0200000e+01   8.0000000e+00   7.7000000e+00   6.7000000e+00   8.4000000e+00   8.7000000e+00   8.3000000e+00   7.2000000e+00   8.9000000e+00   8.7000000e+00   8.3000000e+00   7.8000000e+00   7.8000000e+00   7.6000000e+00   6.9000000e+00   1.2000000e+00   5.0000000e-01   7.0000000e-01   3.0000000e-01   8.0000000e-01   1.6000000e+00   1.7000000e+00   1.9000000e+00   1.3000000e+00   4.0000000e-01   1.4000000e+00   6.0000000e-01   6.0000000e-01   6.0000000e-01   1.1000000e+00   7.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e+00   1.4000000e+00   5.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   1.2000000e+00   1.0000000e-01   4.0000000e-01   1.9000000e+00   1.0000000e+00   6.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   2.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   3.7000000e+00   6.3000000e+00   4.5000000e+00   4.2000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.4000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.7000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.5000000e+00   9.2000000e+00   8.5000000e+00   9.3000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   1.0300000e+01   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   1.0000000e+01   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.6000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.2000000e+00   6.5000000e+00   9.0000000e-01   1.9000000e+00   1.1000000e+00   6.0000000e-01   6.0000000e-01   2.7000000e+00   3.1000000e+00   2.3000000e+00   1.4000000e+00   2.6000000e+00   1.8000000e+00   1.8000000e+00   1.8000000e+00   1.3000000e+00   1.7000000e+00   1.4000000e+00   9.0000000e-01   1.5000000e+00   1.5000000e+00   1.3000000e+00   8.0000000e-01   8.0000000e-01   1.8000000e+00   2.2000000e+00   2.4000000e+00   9.0000000e-01   1.1000000e+00   1.8000000e+00   9.0000000e-01   2.0000000e-01   1.3000000e+00   1.4000000e+00   9.0000000e-01   4.0000000e-01   1.8000000e+00   2.3000000e+00   6.0000000e-01   1.8000000e+00   5.0000000e-01   1.8000000e+00   1.0000000e+00   7.4000000e+00   6.7000000e+00   7.5000000e+00   5.4000000e+00   6.7000000e+00   5.6000000e+00   7.0000000e+00   3.7000000e+00   6.5000000e+00   4.7000000e+00   4.4000000e+00   5.7000000e+00   5.7000000e+00   6.2000000e+00   4.5000000e+00   6.7000000e+00   5.7000000e+00   5.1000000e+00   6.9000000e+00   5.0000000e+00   6.8000000e+00   5.5000000e+00   7.1000000e+00   6.1000000e+00   6.0000000e+00   6.5000000e+00   7.1000000e+00   7.5000000e+00   6.0000000e+00   4.5000000e+00   4.9000000e+00   4.7000000e+00   5.1000000e+00   6.9000000e+00   5.5000000e+00   6.6000000e+00   7.1000000e+00   6.6000000e+00   5.1000000e+00   5.2000000e+00   5.4000000e+00   6.2000000e+00   5.3000000e+00   3.9000000e+00   5.3000000e+00   5.2000000e+00   5.2000000e+00   5.8000000e+00   3.6000000e+00   5.2000000e+00   9.2000000e+00   7.0000000e+00   9.2000000e+00   7.7000000e+00   8.6000000e+00   1.0400000e+01   5.5000000e+00   9.4000000e+00   8.7000000e+00   1.0500000e+01   7.9000000e+00   7.8000000e+00   8.5000000e+00   7.1000000e+00   7.4000000e+00   8.3000000e+00   7.9000000e+00   1.1500000e+01   1.1200000e+01   7.2000000e+00   9.2000000e+00   6.6000000e+00   1.0500000e+01   7.2000000e+00   8.9000000e+00   9.3000000e+00   6.9000000e+00   6.9000000e+00   8.2000000e+00   8.7000000e+00   9.5000000e+00   1.1200000e+01   8.3000000e+00   7.0000000e+00   7.4000000e+00   1.0200000e+01   8.8000000e+00   7.9000000e+00   6.7000000e+00   8.6000000e+00   8.9000000e+00   8.5000000e+00   7.0000000e+00   9.3000000e+00   9.3000000e+00   8.3000000e+00   7.6000000e+00   7.8000000e+00   8.4000000e+00   6.9000000e+00   1.2000000e+00   6.0000000e-01   3.0000000e-01   1.1000000e+00   2.2000000e+00   2.4000000e+00   1.8000000e+00   9.0000000e-01   1.9000000e+00   1.1000000e+00   1.1000000e+00   1.1000000e+00   1.4000000e+00   1.0000000e+00   9.0000000e-01   4.0000000e-01   8.0000000e-01   8.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   1.1000000e+00   1.3000000e+00   1.9000000e+00   0.0000000e+00   6.0000000e-01   1.3000000e+00   0.0000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   1.6000000e+00   9.0000000e-01   1.1000000e+00   1.6000000e+00   5.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   5.0000000e-01   6.7000000e+00   6.0000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   6.3000000e+00   3.4000000e+00   6.2000000e+00   4.4000000e+00   4.1000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   6.1000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.9000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   3.6000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.3000000e+00   4.9000000e+00   8.5000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.2000000e+00   9.1000000e+00   8.4000000e+00   9.8000000e+00   7.2000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.6000000e+00   7.4000000e+00   1.0800000e+01   1.0900000e+01   6.9000000e+00   8.5000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   8.2000000e+00   8.6000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   1.0500000e+01   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   8.1000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.6000000e+00   8.6000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.7000000e+00   6.4000000e+00   1.0000000e+00   1.5000000e+00   2.3000000e+00   1.0000000e+00   1.2000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   1.4000000e+00   1.2000000e+00   1.3000000e+00   1.2000000e+00   1.0000000e+00   4.0000000e-01   6.0000000e-01   1.3000000e+00   1.3000000e+00   5.0000000e-01   7.0000000e-01   7.0000000e-01   1.2000000e+00   1.2000000e+00   5.0000000e-01   1.2000000e+00   1.9000000e+00   6.0000000e-01   9.0000000e-01   2.6000000e+00   1.7000000e+00   1.1000000e+00   1.0000000e+00   1.5000000e+00   5.0000000e-01   1.4000000e+00   1.0000000e-01   9.0000000e-01   6.5000000e+00   5.8000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   5.9000000e+00   4.4000000e+00   6.2000000e+00   4.8000000e+00   4.9000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   5.9000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.3000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   4.4000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.9000000e+00   4.9000000e+00   8.1000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   6.2000000e+00   9.1000000e+00   8.4000000e+00   8.8000000e+00   7.0000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.4000000e+00   7.4000000e+00   9.6000000e+00   1.0900000e+01   6.9000000e+00   8.3000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   7.8000000e+00   8.4000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   9.3000000e+00   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   7.5000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.4000000e+00   8.2000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.1000000e+00   6.4000000e+00   7.0000000e-01   1.5000000e+00   2.0000000e+00   2.2000000e+00   1.6000000e+00   7.0000000e-01   1.5000000e+00   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.0000000e+00   8.0000000e-01   3.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   9.0000000e-01   1.3000000e+00   1.7000000e+00   6.0000000e-01   8.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   4.0000000e-01   7.0000000e-01   1.8000000e+00   9.0000000e-01   7.0000000e-01   1.2000000e+00   7.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.1000000e+00   3.6000000e+00   6.4000000e+00   4.6000000e+00   4.3000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   3.8000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   3.5000000e+00   5.1000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   5.4000000e+00   9.3000000e+00   8.6000000e+00   9.4000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   1.0400000e+01   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   1.0100000e+01   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   8.0000000e-01   2.3000000e+00   2.7000000e+00   1.9000000e+00   1.0000000e+00   2.2000000e+00   1.4000000e+00   1.4000000e+00   1.4000000e+00   1.3000000e+00   1.3000000e+00   1.0000000e+00   5.0000000e-01   1.1000000e+00   1.1000000e+00   9.0000000e-01   6.0000000e-01   4.0000000e-01   1.4000000e+00   1.6000000e+00   2.0000000e+00   3.0000000e-01   7.0000000e-01   1.4000000e+00   3.0000000e-01   6.0000000e-01   9.0000000e-01   1.0000000e+00   1.3000000e+00   8.0000000e-01   1.4000000e+00   1.9000000e+00   2.0000000e-01   1.4000000e+00   5.0000000e-01   1.4000000e+00   6.0000000e-01   7.0000000e+00   6.3000000e+00   7.1000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.6000000e+00   3.5000000e+00   6.3000000e+00   4.5000000e+00   4.2000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.3000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.4000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   6.2000000e+00   6.7000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.7000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.8000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.3000000e+00   9.2000000e+00   8.5000000e+00   1.0100000e+01   7.5000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.9000000e+00   7.5000000e+00   1.1100000e+01   1.1000000e+01   7.0000000e+00   8.8000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   8.5000000e+00   8.9000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   1.0800000e+01   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   8.4000000e+00   7.5000000e+00   6.3000000e+00   8.2000000e+00   8.5000000e+00   8.1000000e+00   6.8000000e+00   8.9000000e+00   8.9000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   8.0000000e+00   6.5000000e+00   2.7000000e+00   3.5000000e+00   2.5000000e+00   1.8000000e+00   3.0000000e+00   2.2000000e+00   2.2000000e+00   2.2000000e+00   1.1000000e+00   2.1000000e+00   1.8000000e+00   1.3000000e+00   1.9000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.2000000e+00   2.2000000e+00   2.4000000e+00   2.8000000e+00   1.1000000e+00   1.1000000e+00   2.0000000e+00   1.1000000e+00   4.0000000e-01   1.7000000e+00   1.6000000e+00   1.3000000e+00   6.0000000e-01   2.2000000e+00   2.7000000e+00   1.0000000e+00   2.2000000e+00   9.0000000e-01   2.2000000e+00   1.4000000e+00   7.8000000e+00   7.1000000e+00   7.9000000e+00   6.0000000e+00   7.3000000e+00   6.2000000e+00   7.4000000e+00   4.3000000e+00   7.1000000e+00   5.3000000e+00   5.0000000e+00   6.1000000e+00   6.3000000e+00   6.8000000e+00   5.1000000e+00   7.1000000e+00   6.1000000e+00   5.7000000e+00   7.5000000e+00   5.6000000e+00   7.2000000e+00   6.1000000e+00   7.7000000e+00   6.7000000e+00   6.6000000e+00   6.9000000e+00   7.7000000e+00   7.9000000e+00   6.6000000e+00   5.1000000e+00   5.5000000e+00   5.3000000e+00   5.7000000e+00   7.5000000e+00   5.9000000e+00   7.0000000e+00   7.5000000e+00   7.2000000e+00   5.5000000e+00   5.8000000e+00   6.0000000e+00   6.6000000e+00   5.9000000e+00   4.5000000e+00   5.9000000e+00   5.6000000e+00   5.8000000e+00   6.4000000e+00   4.2000000e+00   5.8000000e+00   9.6000000e+00   7.6000000e+00   9.6000000e+00   8.3000000e+00   9.0000000e+00   1.0800000e+01   6.1000000e+00   1.0000000e+01   9.3000000e+00   1.0900000e+01   8.3000000e+00   8.4000000e+00   8.9000000e+00   7.7000000e+00   8.0000000e+00   8.7000000e+00   8.3000000e+00   1.1900000e+01   1.1800000e+01   7.8000000e+00   9.6000000e+00   7.2000000e+00   1.1100000e+01   7.8000000e+00   9.3000000e+00   9.7000000e+00   7.5000000e+00   7.3000000e+00   8.8000000e+00   9.1000000e+00   1.0100000e+01   1.1600000e+01   8.9000000e+00   7.6000000e+00   8.0000000e+00   1.0600000e+01   9.2000000e+00   8.3000000e+00   7.1000000e+00   9.0000000e+00   9.3000000e+00   8.9000000e+00   7.6000000e+00   9.7000000e+00   9.7000000e+00   8.7000000e+00   8.2000000e+00   8.2000000e+00   8.8000000e+00   7.3000000e+00   1.0000000e+00   8.0000000e-01   1.5000000e+00   9.0000000e-01   1.3000000e+00   1.5000000e+00   1.5000000e+00   1.8000000e+00   2.2000000e+00   2.3000000e+00   2.2000000e+00   2.0000000e+00   1.4000000e+00   1.4000000e+00   2.3000000e+00   2.3000000e+00   1.5000000e+00   1.1000000e+00   7.0000000e-01   2.2000000e+00   1.6000000e+00   9.0000000e-01   2.2000000e+00   2.5000000e+00   1.6000000e+00   1.5000000e+00   3.2000000e+00   2.3000000e+00   2.1000000e+00   1.8000000e+00   2.3000000e+00   1.3000000e+00   2.2000000e+00   1.1000000e+00   1.7000000e+00   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.9000000e+00   6.6000000e+00   5.7000000e+00   6.1000000e+00   5.4000000e+00   6.4000000e+00   5.8000000e+00   5.9000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.8000000e+00   6.2000000e+00   5.8000000e+00   5.0000000e+00   6.8000000e+00   5.3000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.6000000e+00   5.4000000e+00   5.2000000e+00   5.0000000e+00   6.8000000e+00   6.0000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   5.2000000e+00   5.7000000e+00   5.9000000e+00   5.9000000e+00   5.2000000e+00   5.4000000e+00   5.6000000e+00   5.1000000e+00   5.3000000e+00   5.7000000e+00   4.9000000e+00   5.3000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   7.2000000e+00   9.3000000e+00   8.6000000e+00   9.0000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.2000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   9.6000000e+00   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.9000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   9.3000000e+00   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   1.3000000e+00   1.7000000e+00   1.3000000e+00   2.6000000e+00   2.0000000e+00   2.5000000e+00   2.4000000e+00   1.8000000e+00   1.6000000e+00   1.8000000e+00   2.5000000e+00   2.5000000e+00   1.3000000e+00   1.1000000e+00   7.0000000e-01   2.4000000e+00   2.4000000e+00   1.5000000e+00   2.4000000e+00   3.1000000e+00   1.8000000e+00   1.9000000e+00   3.6000000e+00   2.9000000e+00   1.9000000e+00   1.6000000e+00   2.5000000e+00   1.5000000e+00   2.6000000e+00   1.3000000e+00   2.1000000e+00   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.7000000e+00   6.6000000e+00   5.5000000e+00   6.1000000e+00   5.2000000e+00   6.4000000e+00   5.6000000e+00   5.7000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.6000000e+00   6.2000000e+00   5.6000000e+00   5.0000000e+00   6.8000000e+00   5.1000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   5.2000000e+00   5.0000000e+00   5.0000000e+00   6.8000000e+00   5.8000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   5.0000000e+00   5.5000000e+00   5.7000000e+00   5.9000000e+00   5.2000000e+00   5.2000000e+00   5.4000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   4.7000000e+00   5.1000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   7.0000000e+00   9.3000000e+00   8.6000000e+00   9.0000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   9.6000000e+00   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.7000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   9.3000000e+00   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   9.0000000e-01   9.0000000e-01   7.0000000e-01   1.1000000e+00   7.0000000e-01   1.6000000e+00   1.4000000e+00   1.9000000e+00   1.8000000e+00   1.2000000e+00   1.0000000e+00   1.0000000e+00   1.9000000e+00   1.9000000e+00   7.0000000e-01   9.0000000e-01   7.0000000e-01   1.8000000e+00   1.4000000e+00   7.0000000e-01   1.8000000e+00   2.1000000e+00   1.2000000e+00   9.0000000e-01   2.6000000e+00   1.9000000e+00   1.3000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   1.8000000e+00   7.0000000e-01   1.3000000e+00   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.1000000e+00   4.6000000e+00   6.4000000e+00   5.0000000e+00   5.1000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   4.6000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   4.1000000e+00   5.1000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   6.4000000e+00   9.3000000e+00   8.6000000e+00   9.0000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   9.6000000e+00   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   9.3000000e+00   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   1.2000000e+00   4.0000000e-01   8.0000000e-01   4.0000000e-01   1.1000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   1.0000000e+00   1.0000000e+00   6.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   1.4000000e+00   3.0000000e-01   2.0000000e-01   1.9000000e+00   1.2000000e+00   6.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   4.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   3.9000000e+00   6.3000000e+00   4.5000000e+00   4.4000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.4000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.9000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.7000000e+00   9.2000000e+00   8.5000000e+00   9.1000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   1.0100000e+01   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   9.8000000e+00   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.6000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.2000000e+00   6.5000000e+00   8.0000000e-01   8.0000000e-01   1.0000000e+00   2.1000000e+00   1.3000000e+00   1.6000000e+00   1.7000000e+00   1.3000000e+00   1.1000000e+00   1.3000000e+00   1.8000000e+00   1.8000000e+00   1.0000000e+00   1.2000000e+00   1.0000000e+00   1.9000000e+00   1.9000000e+00   1.0000000e+00   1.9000000e+00   2.6000000e+00   1.3000000e+00   1.4000000e+00   3.1000000e+00   2.4000000e+00   1.4000000e+00   9.0000000e-01   2.0000000e+00   8.0000000e-01   2.1000000e+00   8.0000000e-01   1.6000000e+00   6.0000000e+00   5.3000000e+00   6.3000000e+00   5.0000000e+00   5.9000000e+00   4.8000000e+00   5.4000000e+00   4.5000000e+00   5.7000000e+00   4.9000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.4000000e+00   3.9000000e+00   5.5000000e+00   4.9000000e+00   4.3000000e+00   6.1000000e+00   4.4000000e+00   5.4000000e+00   4.7000000e+00   6.3000000e+00   5.3000000e+00   5.2000000e+00   5.5000000e+00   6.3000000e+00   6.5000000e+00   5.2000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   4.3000000e+00   6.1000000e+00   5.1000000e+00   4.8000000e+00   5.9000000e+00   5.8000000e+00   4.3000000e+00   4.8000000e+00   5.0000000e+00   5.2000000e+00   4.5000000e+00   4.5000000e+00   4.7000000e+00   4.2000000e+00   4.4000000e+00   5.0000000e+00   4.0000000e+00   4.4000000e+00   7.6000000e+00   6.2000000e+00   8.2000000e+00   6.9000000e+00   7.6000000e+00   9.4000000e+00   6.3000000e+00   8.6000000e+00   7.9000000e+00   8.3000000e+00   6.5000000e+00   7.0000000e+00   7.5000000e+00   6.3000000e+00   6.6000000e+00   6.9000000e+00   6.9000000e+00   8.9000000e+00   1.0400000e+01   6.4000000e+00   7.8000000e+00   6.0000000e+00   9.7000000e+00   6.4000000e+00   7.3000000e+00   7.9000000e+00   6.1000000e+00   5.9000000e+00   7.4000000e+00   7.7000000e+00   8.7000000e+00   8.6000000e+00   7.5000000e+00   6.2000000e+00   6.6000000e+00   9.2000000e+00   7.0000000e+00   6.7000000e+00   5.7000000e+00   7.4000000e+00   7.7000000e+00   7.3000000e+00   6.2000000e+00   7.9000000e+00   7.7000000e+00   7.3000000e+00   6.8000000e+00   6.8000000e+00   6.6000000e+00   5.9000000e+00   1.0000000e+00   2.0000000e-01   1.3000000e+00   9.0000000e-01   1.2000000e+00   1.1000000e+00   7.0000000e-01   5.0000000e-01   7.0000000e-01   1.2000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   1.0000000e+00   1.1000000e+00   1.1000000e+00   1.0000000e+00   1.1000000e+00   1.8000000e+00   5.0000000e-01   6.0000000e-01   2.3000000e+00   1.6000000e+00   8.0000000e-01   5.0000000e-01   1.2000000e+00   2.0000000e-01   1.3000000e+00   4.0000000e-01   8.0000000e-01   6.8000000e+00   6.1000000e+00   7.1000000e+00   5.4000000e+00   6.7000000e+00   5.6000000e+00   6.2000000e+00   4.1000000e+00   6.5000000e+00   4.7000000e+00   4.6000000e+00   5.5000000e+00   5.7000000e+00   6.2000000e+00   4.5000000e+00   6.3000000e+00   5.5000000e+00   5.1000000e+00   6.9000000e+00   5.0000000e+00   6.2000000e+00   5.5000000e+00   7.1000000e+00   6.1000000e+00   6.0000000e+00   6.3000000e+00   7.1000000e+00   7.3000000e+00   6.0000000e+00   4.5000000e+00   4.9000000e+00   4.7000000e+00   5.1000000e+00   6.9000000e+00   5.3000000e+00   5.6000000e+00   6.7000000e+00   6.6000000e+00   4.9000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   5.3000000e+00   4.1000000e+00   5.3000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   3.6000000e+00   5.2000000e+00   8.4000000e+00   7.0000000e+00   9.0000000e+00   7.7000000e+00   8.4000000e+00   1.0200000e+01   5.9000000e+00   9.4000000e+00   8.7000000e+00   9.1000000e+00   7.3000000e+00   7.8000000e+00   8.3000000e+00   7.1000000e+00   7.4000000e+00   7.7000000e+00   7.7000000e+00   9.7000000e+00   1.1200000e+01   7.2000000e+00   8.6000000e+00   6.6000000e+00   1.0500000e+01   7.2000000e+00   8.1000000e+00   8.7000000e+00   6.9000000e+00   6.7000000e+00   8.2000000e+00   8.5000000e+00   9.5000000e+00   9.4000000e+00   8.3000000e+00   7.0000000e+00   7.4000000e+00   1.0000000e+01   7.8000000e+00   7.5000000e+00   6.5000000e+00   8.2000000e+00   8.5000000e+00   8.1000000e+00   7.0000000e+00   8.7000000e+00   8.5000000e+00   8.1000000e+00   7.6000000e+00   7.6000000e+00   7.4000000e+00   6.7000000e+00   1.0000000e+00   1.7000000e+00   7.0000000e-01   8.0000000e-01   9.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e+00   1.0000000e+00   4.0000000e-01   1.2000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.8000000e+00   5.0000000e-01   1.0000000e+00   2.5000000e+00   1.6000000e+00   1.0000000e+00   1.1000000e+00   1.4000000e+00   8.0000000e-01   1.3000000e+00   6.0000000e-01   8.0000000e-01   6.0000000e+00   5.3000000e+00   6.3000000e+00   4.6000000e+00   5.9000000e+00   4.8000000e+00   5.4000000e+00   3.9000000e+00   5.7000000e+00   4.3000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.4000000e+00   3.7000000e+00   5.5000000e+00   4.7000000e+00   4.3000000e+00   6.1000000e+00   4.2000000e+00   5.4000000e+00   4.7000000e+00   6.3000000e+00   5.3000000e+00   5.2000000e+00   5.5000000e+00   6.3000000e+00   6.5000000e+00   5.2000000e+00   3.7000000e+00   4.1000000e+00   3.9000000e+00   4.3000000e+00   6.1000000e+00   4.5000000e+00   4.8000000e+00   5.9000000e+00   5.8000000e+00   4.1000000e+00   4.4000000e+00   4.6000000e+00   5.2000000e+00   4.5000000e+00   3.9000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.0000000e+00   3.4000000e+00   4.4000000e+00   7.6000000e+00   6.2000000e+00   8.2000000e+00   6.9000000e+00   7.6000000e+00   9.4000000e+00   5.7000000e+00   8.6000000e+00   7.9000000e+00   8.7000000e+00   6.5000000e+00   7.0000000e+00   7.5000000e+00   6.3000000e+00   6.6000000e+00   6.9000000e+00   6.9000000e+00   9.7000000e+00   1.0400000e+01   6.4000000e+00   7.8000000e+00   5.8000000e+00   9.7000000e+00   6.4000000e+00   7.3000000e+00   7.9000000e+00   6.1000000e+00   5.9000000e+00   7.4000000e+00   7.7000000e+00   8.7000000e+00   9.4000000e+00   7.5000000e+00   6.2000000e+00   6.6000000e+00   9.2000000e+00   7.0000000e+00   6.7000000e+00   5.7000000e+00   7.4000000e+00   7.7000000e+00   7.3000000e+00   6.2000000e+00   7.9000000e+00   7.7000000e+00   7.3000000e+00   6.8000000e+00   6.8000000e+00   6.6000000e+00   5.9000000e+00   1.3000000e+00   7.0000000e-01   1.2000000e+00   1.1000000e+00   5.0000000e-01   5.0000000e-01   7.0000000e-01   1.2000000e+00   1.2000000e+00   6.0000000e-01   8.0000000e-01   1.2000000e+00   1.1000000e+00   1.1000000e+00   1.0000000e+00   1.1000000e+00   1.8000000e+00   5.0000000e-01   6.0000000e-01   2.3000000e+00   1.6000000e+00   6.0000000e-01   5.0000000e-01   1.2000000e+00   4.0000000e-01   1.3000000e+00   4.0000000e-01   8.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   3.9000000e+00   6.3000000e+00   4.5000000e+00   4.4000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.4000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.9000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.7000000e+00   9.2000000e+00   8.5000000e+00   8.9000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   9.7000000e+00   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   9.4000000e+00   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.6000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.2000000e+00   6.5000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   1.4000000e+00   1.2000000e+00   1.2000000e+00   1.1000000e+00   1.3000000e+00   1.7000000e+00   1.7000000e+00   1.9000000e+00   1.4000000e+00   1.0000000e+00   1.3000000e+00   1.4000000e+00   1.1000000e+00   1.2000000e+00   9.0000000e-01   1.8000000e+00   9.0000000e-01   1.5000000e+00   1.8000000e+00   1.3000000e+00   1.3000000e+00   8.0000000e-01   1.3000000e+00   1.1000000e+00   7.7000000e+00   7.0000000e+00   8.0000000e+00   6.3000000e+00   7.6000000e+00   6.5000000e+00   7.1000000e+00   4.6000000e+00   7.4000000e+00   5.6000000e+00   5.3000000e+00   6.4000000e+00   6.6000000e+00   7.1000000e+00   5.4000000e+00   7.2000000e+00   6.4000000e+00   6.0000000e+00   7.8000000e+00   5.9000000e+00   7.1000000e+00   6.4000000e+00   8.0000000e+00   7.0000000e+00   6.9000000e+00   7.2000000e+00   8.0000000e+00   8.2000000e+00   6.9000000e+00   5.4000000e+00   5.8000000e+00   5.6000000e+00   6.0000000e+00   7.8000000e+00   6.2000000e+00   6.5000000e+00   7.6000000e+00   7.5000000e+00   5.8000000e+00   6.1000000e+00   6.3000000e+00   6.9000000e+00   6.2000000e+00   4.8000000e+00   6.2000000e+00   5.9000000e+00   6.1000000e+00   6.7000000e+00   4.5000000e+00   6.1000000e+00   9.3000000e+00   7.9000000e+00   9.9000000e+00   8.6000000e+00   9.3000000e+00   1.1100000e+01   6.4000000e+00   1.0300000e+01   9.6000000e+00   1.0000000e+01   8.2000000e+00   8.7000000e+00   9.2000000e+00   8.0000000e+00   8.3000000e+00   8.6000000e+00   8.6000000e+00   1.1000000e+01   1.2100000e+01   8.1000000e+00   9.5000000e+00   7.5000000e+00   1.1400000e+01   8.1000000e+00   9.0000000e+00   9.6000000e+00   7.8000000e+00   7.6000000e+00   9.1000000e+00   9.4000000e+00   1.0400000e+01   1.0700000e+01   9.2000000e+00   7.9000000e+00   8.3000000e+00   1.0900000e+01   8.7000000e+00   8.4000000e+00   7.4000000e+00   9.1000000e+00   9.4000000e+00   9.0000000e+00   7.9000000e+00   9.6000000e+00   9.4000000e+00   9.0000000e+00   8.5000000e+00   8.5000000e+00   8.3000000e+00   7.6000000e+00   9.0000000e-01   8.0000000e-01   4.0000000e-01   8.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   1.5000000e+00   1.9000000e+00   1.0000000e+00   1.0000000e+00   1.3000000e+00   1.0000000e+00   1.7000000e+00   6.0000000e-01   9.0000000e-01   2.2000000e+00   1.5000000e+00   5.0000000e-01   8.0000000e-01   1.1000000e+00   9.0000000e-01   1.2000000e+00   1.1000000e+00   7.0000000e-01   5.9000000e+00   5.2000000e+00   6.2000000e+00   4.5000000e+00   5.8000000e+00   4.7000000e+00   5.3000000e+00   3.2000000e+00   5.6000000e+00   3.8000000e+00   3.7000000e+00   4.6000000e+00   4.8000000e+00   5.3000000e+00   3.6000000e+00   5.4000000e+00   4.6000000e+00   4.2000000e+00   6.0000000e+00   4.1000000e+00   5.3000000e+00   4.6000000e+00   6.2000000e+00   5.2000000e+00   5.1000000e+00   5.4000000e+00   6.2000000e+00   6.4000000e+00   5.1000000e+00   3.6000000e+00   4.0000000e+00   3.8000000e+00   4.2000000e+00   6.0000000e+00   4.4000000e+00   4.9000000e+00   5.8000000e+00   5.7000000e+00   4.0000000e+00   4.3000000e+00   4.5000000e+00   5.1000000e+00   4.4000000e+00   3.2000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   4.9000000e+00   2.7000000e+00   4.3000000e+00   7.5000000e+00   6.1000000e+00   8.1000000e+00   6.8000000e+00   7.5000000e+00   9.3000000e+00   5.0000000e+00   8.5000000e+00   7.8000000e+00   8.8000000e+00   6.4000000e+00   6.9000000e+00   7.4000000e+00   6.2000000e+00   6.5000000e+00   6.8000000e+00   6.8000000e+00   9.8000000e+00   1.0300000e+01   6.3000000e+00   7.7000000e+00   5.7000000e+00   9.6000000e+00   6.3000000e+00   7.2000000e+00   7.8000000e+00   6.0000000e+00   5.8000000e+00   7.3000000e+00   7.6000000e+00   8.6000000e+00   9.5000000e+00   7.4000000e+00   6.1000000e+00   6.5000000e+00   9.1000000e+00   7.1000000e+00   6.6000000e+00   5.6000000e+00   7.3000000e+00   7.6000000e+00   7.2000000e+00   6.1000000e+00   7.8000000e+00   7.6000000e+00   7.2000000e+00   6.7000000e+00   6.7000000e+00   6.7000000e+00   5.8000000e+00   9.0000000e-01   7.0000000e-01   9.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   1.2000000e+00   1.6000000e+00   2.0000000e+00   9.0000000e-01   1.1000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   7.0000000e-01   1.0000000e+00   2.1000000e+00   1.2000000e+00   1.0000000e+00   9.0000000e-01   1.0000000e+00   1.0000000e+00   9.0000000e-01   1.2000000e+00   8.0000000e-01   6.4000000e+00   5.7000000e+00   6.7000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   5.8000000e+00   3.3000000e+00   6.1000000e+00   4.3000000e+00   4.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   5.9000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   5.8000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   4.9000000e+00   5.2000000e+00   6.3000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   3.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   3.2000000e+00   4.8000000e+00   8.0000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   5.1000000e+00   9.0000000e+00   8.3000000e+00   9.1000000e+00   6.9000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.3000000e+00   7.3000000e+00   1.0100000e+01   1.0800000e+01   6.8000000e+00   8.2000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   7.7000000e+00   8.3000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   9.8000000e+00   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   7.4000000e+00   7.1000000e+00   6.1000000e+00   7.8000000e+00   8.1000000e+00   7.7000000e+00   6.6000000e+00   8.3000000e+00   8.1000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.0000000e+00   6.3000000e+00   6.0000000e-01   8.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   1.1000000e+00   1.5000000e+00   1.9000000e+00   4.0000000e-01   6.0000000e-01   1.3000000e+00   4.0000000e-01   9.0000000e-01   6.0000000e-01   9.0000000e-01   1.6000000e+00   1.1000000e+00   9.0000000e-01   1.4000000e+00   5.0000000e-01   9.0000000e-01   8.0000000e-01   1.1000000e+00   5.0000000e-01   6.5000000e+00   5.8000000e+00   6.6000000e+00   4.7000000e+00   6.0000000e+00   4.9000000e+00   6.1000000e+00   3.2000000e+00   5.8000000e+00   4.0000000e+00   3.7000000e+00   4.8000000e+00   5.0000000e+00   5.5000000e+00   3.8000000e+00   5.8000000e+00   4.8000000e+00   4.4000000e+00   6.2000000e+00   4.3000000e+00   5.9000000e+00   4.8000000e+00   6.4000000e+00   5.4000000e+00   5.3000000e+00   5.6000000e+00   6.4000000e+00   6.6000000e+00   5.3000000e+00   3.8000000e+00   4.2000000e+00   4.0000000e+00   4.4000000e+00   6.2000000e+00   4.6000000e+00   5.7000000e+00   6.2000000e+00   5.9000000e+00   4.2000000e+00   4.5000000e+00   4.7000000e+00   5.3000000e+00   4.6000000e+00   3.2000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.1000000e+00   2.9000000e+00   4.5000000e+00   8.3000000e+00   6.3000000e+00   8.3000000e+00   7.0000000e+00   7.7000000e+00   9.5000000e+00   5.0000000e+00   8.7000000e+00   8.0000000e+00   9.6000000e+00   7.0000000e+00   7.1000000e+00   7.6000000e+00   6.4000000e+00   6.7000000e+00   7.4000000e+00   7.0000000e+00   1.0600000e+01   1.0500000e+01   6.5000000e+00   8.3000000e+00   5.9000000e+00   9.8000000e+00   6.5000000e+00   8.0000000e+00   8.4000000e+00   6.2000000e+00   6.0000000e+00   7.5000000e+00   7.8000000e+00   8.8000000e+00   1.0300000e+01   7.6000000e+00   6.3000000e+00   6.7000000e+00   9.3000000e+00   7.9000000e+00   7.0000000e+00   5.8000000e+00   7.7000000e+00   8.0000000e+00   7.6000000e+00   6.3000000e+00   8.4000000e+00   8.4000000e+00   7.4000000e+00   6.9000000e+00   6.9000000e+00   7.5000000e+00   6.0000000e+00   6.0000000e-01   6.0000000e-01   7.0000000e-01   7.0000000e-01   5.0000000e-01   1.3000000e+00   1.7000000e+00   8.0000000e-01   8.0000000e-01   1.1000000e+00   8.0000000e-01   1.5000000e+00   4.0000000e-01   5.0000000e-01   2.0000000e+00   1.3000000e+00   3.0000000e-01   8.0000000e-01   9.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   6.3000000e+00   5.6000000e+00   6.6000000e+00   4.9000000e+00   6.2000000e+00   5.1000000e+00   5.7000000e+00   3.4000000e+00   6.0000000e+00   4.2000000e+00   3.9000000e+00   5.0000000e+00   5.2000000e+00   5.7000000e+00   4.0000000e+00   5.8000000e+00   5.0000000e+00   4.6000000e+00   6.4000000e+00   4.5000000e+00   5.7000000e+00   5.0000000e+00   6.6000000e+00   5.6000000e+00   5.5000000e+00   5.8000000e+00   6.6000000e+00   6.8000000e+00   5.5000000e+00   4.0000000e+00   4.4000000e+00   4.2000000e+00   4.6000000e+00   6.4000000e+00   4.8000000e+00   5.1000000e+00   6.2000000e+00   6.1000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   4.8000000e+00   3.4000000e+00   4.8000000e+00   4.5000000e+00   4.7000000e+00   5.3000000e+00   3.1000000e+00   4.7000000e+00   7.9000000e+00   6.5000000e+00   8.5000000e+00   7.2000000e+00   7.9000000e+00   9.7000000e+00   5.2000000e+00   8.9000000e+00   8.2000000e+00   9.0000000e+00   6.8000000e+00   7.3000000e+00   7.8000000e+00   6.6000000e+00   6.9000000e+00   7.2000000e+00   7.2000000e+00   1.0000000e+01   1.0700000e+01   6.7000000e+00   8.1000000e+00   6.1000000e+00   1.0000000e+01   6.7000000e+00   7.6000000e+00   8.2000000e+00   6.4000000e+00   6.2000000e+00   7.7000000e+00   8.0000000e+00   9.0000000e+00   9.7000000e+00   7.8000000e+00   6.5000000e+00   6.9000000e+00   9.5000000e+00   7.3000000e+00   7.0000000e+00   6.0000000e+00   7.7000000e+00   8.0000000e+00   7.6000000e+00   6.5000000e+00   8.2000000e+00   8.0000000e+00   7.6000000e+00   7.1000000e+00   7.1000000e+00   6.9000000e+00   6.2000000e+00   2.0000000e-01   9.0000000e-01   9.0000000e-01   5.0000000e-01   7.0000000e-01   1.1000000e+00   8.0000000e-01   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.5000000e+00   2.0000000e-01   5.0000000e-01   2.2000000e+00   1.3000000e+00   7.0000000e-01   1.0000000e+00   1.1000000e+00   5.0000000e-01   1.0000000e+00   3.0000000e-01   5.0000000e-01   6.5000000e+00   5.8000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   5.9000000e+00   4.0000000e+00   6.2000000e+00   4.4000000e+00   4.5000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   5.9000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.3000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   4.0000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.5000000e+00   4.9000000e+00   8.1000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.8000000e+00   9.1000000e+00   8.4000000e+00   9.0000000e+00   7.0000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.4000000e+00   7.4000000e+00   1.0000000e+01   1.0900000e+01   6.9000000e+00   8.3000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   7.8000000e+00   8.4000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   9.7000000e+00   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   7.5000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.4000000e+00   8.2000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.1000000e+00   6.4000000e+00   9.0000000e-01   9.0000000e-01   5.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   1.3000000e+00   2.0000000e-01   5.0000000e-01   2.0000000e+00   1.1000000e+00   9.0000000e-01   1.2000000e+00   9.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   6.5000000e+00   5.8000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   5.9000000e+00   4.0000000e+00   6.2000000e+00   4.4000000e+00   4.5000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   5.9000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.3000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   4.0000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.5000000e+00   4.9000000e+00   8.1000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.8000000e+00   9.1000000e+00   8.4000000e+00   9.2000000e+00   7.0000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.4000000e+00   7.4000000e+00   1.0200000e+01   1.0900000e+01   6.9000000e+00   8.3000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   7.8000000e+00   8.4000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   9.9000000e+00   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   7.5000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.4000000e+00   8.2000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.1000000e+00   6.4000000e+00   2.0000000e-01   1.2000000e+00   1.6000000e+00   2.0000000e+00   5.0000000e-01   7.0000000e-01   1.4000000e+00   5.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   1.5000000e+00   6.0000000e-01   1.0000000e+00   1.5000000e+00   6.0000000e-01   1.0000000e+00   3.0000000e-01   1.2000000e+00   6.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.2000000e+00   3.5000000e+00   6.3000000e+00   4.5000000e+00   4.2000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.8000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.7000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.4000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.3000000e+00   9.2000000e+00   8.5000000e+00   9.7000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   1.0700000e+01   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   8.1000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   1.0400000e+01   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   8.0000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.5000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.6000000e+00   6.5000000e+00   1.2000000e+00   1.6000000e+00   2.0000000e+00   3.0000000e-01   7.0000000e-01   1.4000000e+00   3.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   1.5000000e+00   8.0000000e-01   1.0000000e+00   1.5000000e+00   4.0000000e-01   1.0000000e+00   5.0000000e-01   1.2000000e+00   6.0000000e-01   6.6000000e+00   5.9000000e+00   6.7000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   6.2000000e+00   3.3000000e+00   6.1000000e+00   4.3000000e+00   4.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   5.9000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   6.0000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   4.9000000e+00   5.8000000e+00   6.3000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   3.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   3.2000000e+00   4.8000000e+00   8.4000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   5.1000000e+00   9.0000000e+00   8.3000000e+00   9.7000000e+00   7.1000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.5000000e+00   7.3000000e+00   1.0700000e+01   1.0800000e+01   6.8000000e+00   8.4000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   8.1000000e+00   8.5000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   1.0400000e+01   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   8.0000000e+00   7.1000000e+00   6.1000000e+00   7.8000000e+00   8.1000000e+00   7.7000000e+00   6.6000000e+00   8.5000000e+00   8.5000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.6000000e+00   6.3000000e+00   1.2000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.8000000e+00   5.0000000e-01   8.0000000e-01   2.3000000e+00   1.6000000e+00   8.0000000e-01   1.1000000e+00   1.2000000e+00   1.0000000e+00   1.3000000e+00   6.0000000e-01   8.0000000e-01   6.0000000e+00   5.3000000e+00   6.3000000e+00   4.6000000e+00   5.9000000e+00   4.8000000e+00   5.4000000e+00   3.9000000e+00   5.7000000e+00   4.3000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.4000000e+00   3.7000000e+00   5.5000000e+00   4.7000000e+00   4.3000000e+00   6.1000000e+00   4.2000000e+00   5.4000000e+00   4.7000000e+00   6.3000000e+00   5.3000000e+00   5.2000000e+00   5.5000000e+00   6.3000000e+00   6.5000000e+00   5.2000000e+00   3.7000000e+00   4.1000000e+00   3.9000000e+00   4.3000000e+00   6.1000000e+00   4.5000000e+00   4.8000000e+00   5.9000000e+00   5.8000000e+00   4.1000000e+00   4.4000000e+00   4.6000000e+00   5.2000000e+00   4.5000000e+00   3.9000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.0000000e+00   3.4000000e+00   4.4000000e+00   7.6000000e+00   6.2000000e+00   8.2000000e+00   6.9000000e+00   7.6000000e+00   9.4000000e+00   5.7000000e+00   8.6000000e+00   7.9000000e+00   8.7000000e+00   6.5000000e+00   7.0000000e+00   7.5000000e+00   6.3000000e+00   6.6000000e+00   6.9000000e+00   6.9000000e+00   9.7000000e+00   1.0400000e+01   6.4000000e+00   7.8000000e+00   5.8000000e+00   9.7000000e+00   6.4000000e+00   7.3000000e+00   7.9000000e+00   6.1000000e+00   5.9000000e+00   7.4000000e+00   7.7000000e+00   8.7000000e+00   9.4000000e+00   7.5000000e+00   6.2000000e+00   6.6000000e+00   9.2000000e+00   7.0000000e+00   6.7000000e+00   5.7000000e+00   7.4000000e+00   7.7000000e+00   7.3000000e+00   6.2000000e+00   7.9000000e+00   7.7000000e+00   7.3000000e+00   6.8000000e+00   6.8000000e+00   6.6000000e+00   5.9000000e+00   6.0000000e-01   1.3000000e+00   1.5000000e+00   1.2000000e+00   1.3000000e+00   2.2000000e+00   9.0000000e-01   1.2000000e+00   2.9000000e+00   2.0000000e+00   1.4000000e+00   1.1000000e+00   1.8000000e+00   6.0000000e-01   1.7000000e+00   6.0000000e-01   1.2000000e+00   7.2000000e+00   6.5000000e+00   7.5000000e+00   5.8000000e+00   7.1000000e+00   6.0000000e+00   6.6000000e+00   4.7000000e+00   6.9000000e+00   5.1000000e+00   5.2000000e+00   5.9000000e+00   6.1000000e+00   6.6000000e+00   4.9000000e+00   6.7000000e+00   5.9000000e+00   5.5000000e+00   7.3000000e+00   5.4000000e+00   6.6000000e+00   5.9000000e+00   7.5000000e+00   6.5000000e+00   6.4000000e+00   6.7000000e+00   7.5000000e+00   7.7000000e+00   6.4000000e+00   4.9000000e+00   5.3000000e+00   5.1000000e+00   5.5000000e+00   7.3000000e+00   5.7000000e+00   6.0000000e+00   7.1000000e+00   7.0000000e+00   5.3000000e+00   5.6000000e+00   5.8000000e+00   6.4000000e+00   5.7000000e+00   4.7000000e+00   5.7000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   4.2000000e+00   5.6000000e+00   8.8000000e+00   7.4000000e+00   9.4000000e+00   8.1000000e+00   8.8000000e+00   1.0600000e+01   6.5000000e+00   9.8000000e+00   9.1000000e+00   9.5000000e+00   7.7000000e+00   8.2000000e+00   8.7000000e+00   7.5000000e+00   7.8000000e+00   8.1000000e+00   8.1000000e+00   1.0100000e+01   1.1600000e+01   7.6000000e+00   9.0000000e+00   7.0000000e+00   1.0900000e+01   7.6000000e+00   8.5000000e+00   9.1000000e+00   7.3000000e+00   7.1000000e+00   8.6000000e+00   8.9000000e+00   9.9000000e+00   9.8000000e+00   8.7000000e+00   7.4000000e+00   7.8000000e+00   1.0400000e+01   8.2000000e+00   7.9000000e+00   6.9000000e+00   8.6000000e+00   8.9000000e+00   8.5000000e+00   7.4000000e+00   9.1000000e+00   8.9000000e+00   8.5000000e+00   8.0000000e+00   8.0000000e+00   7.8000000e+00   7.1000000e+00   1.9000000e+00   1.7000000e+00   8.0000000e-01   1.9000000e+00   2.4000000e+00   1.3000000e+00   1.4000000e+00   3.1000000e+00   2.2000000e+00   1.8000000e+00   1.5000000e+00   2.0000000e+00   1.0000000e+00   1.9000000e+00   8.0000000e-01   1.4000000e+00   7.0000000e+00   6.3000000e+00   7.3000000e+00   5.6000000e+00   6.9000000e+00   5.8000000e+00   6.4000000e+00   5.1000000e+00   6.7000000e+00   5.5000000e+00   5.6000000e+00   5.7000000e+00   5.9000000e+00   6.4000000e+00   4.7000000e+00   6.5000000e+00   5.7000000e+00   5.3000000e+00   7.1000000e+00   5.2000000e+00   6.4000000e+00   5.7000000e+00   7.3000000e+00   6.3000000e+00   6.2000000e+00   6.5000000e+00   7.3000000e+00   7.5000000e+00   6.2000000e+00   4.7000000e+00   5.1000000e+00   4.9000000e+00   5.3000000e+00   7.1000000e+00   5.7000000e+00   5.8000000e+00   6.9000000e+00   6.8000000e+00   5.1000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   5.5000000e+00   5.1000000e+00   5.5000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   4.6000000e+00   5.4000000e+00   8.6000000e+00   7.2000000e+00   9.2000000e+00   7.9000000e+00   8.6000000e+00   1.0400000e+01   6.9000000e+00   9.6000000e+00   8.9000000e+00   9.3000000e+00   7.5000000e+00   8.0000000e+00   8.5000000e+00   7.3000000e+00   7.6000000e+00   7.9000000e+00   7.9000000e+00   9.9000000e+00   1.1400000e+01   7.4000000e+00   8.8000000e+00   6.8000000e+00   1.0700000e+01   7.4000000e+00   8.3000000e+00   8.9000000e+00   7.1000000e+00   6.9000000e+00   8.4000000e+00   8.7000000e+00   9.7000000e+00   9.6000000e+00   8.5000000e+00   7.2000000e+00   7.6000000e+00   1.0200000e+01   8.0000000e+00   7.7000000e+00   6.7000000e+00   8.4000000e+00   8.7000000e+00   8.3000000e+00   7.2000000e+00   8.9000000e+00   8.7000000e+00   8.3000000e+00   7.8000000e+00   7.8000000e+00   7.6000000e+00   6.9000000e+00   6.0000000e-01   1.3000000e+00   0.0000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   1.6000000e+00   9.0000000e-01   1.1000000e+00   1.6000000e+00   5.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   5.0000000e-01   6.7000000e+00   6.0000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   6.3000000e+00   3.4000000e+00   6.2000000e+00   4.4000000e+00   4.1000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   6.1000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.9000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   3.6000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.3000000e+00   4.9000000e+00   8.5000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.2000000e+00   9.1000000e+00   8.4000000e+00   9.8000000e+00   7.2000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.6000000e+00   7.4000000e+00   1.0800000e+01   1.0900000e+01   6.9000000e+00   8.5000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   8.2000000e+00   8.6000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   1.0500000e+01   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   8.1000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.6000000e+00   8.6000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.7000000e+00   6.4000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   5.0000000e-01   1.6000000e+00   7.0000000e-01   1.1000000e+00   1.6000000e+00   7.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   3.0000000e-01   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.3000000e+00   3.8000000e+00   6.4000000e+00   4.6000000e+00   4.3000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   5.9000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   3.8000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   3.5000000e+00   5.1000000e+00   8.5000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   5.6000000e+00   9.3000000e+00   8.6000000e+00   9.8000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   1.0800000e+01   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.2000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   1.0500000e+01   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   8.1000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.6000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.7000000e+00   6.6000000e+00   1.3000000e+00   1.6000000e+00   7.0000000e-01   6.0000000e-01   2.3000000e+00   1.4000000e+00   1.2000000e+00   1.5000000e+00   1.4000000e+00   1.0000000e+00   1.3000000e+00   6.0000000e-01   8.0000000e-01   6.4000000e+00   5.7000000e+00   6.7000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   5.8000000e+00   4.5000000e+00   6.1000000e+00   4.9000000e+00   5.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   5.9000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   5.8000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   5.1000000e+00   5.2000000e+00   6.3000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   4.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   4.0000000e+00   4.8000000e+00   8.0000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   6.3000000e+00   9.0000000e+00   8.3000000e+00   8.9000000e+00   6.9000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.3000000e+00   7.3000000e+00   9.9000000e+00   1.0800000e+01   6.8000000e+00   8.2000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   7.7000000e+00   8.3000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   9.6000000e+00   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   7.4000000e+00   7.1000000e+00   6.1000000e+00   7.8000000e+00   8.1000000e+00   7.7000000e+00   6.6000000e+00   8.3000000e+00   8.1000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.0000000e+00   6.3000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   1.6000000e+00   9.0000000e-01   1.1000000e+00   1.6000000e+00   5.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   5.0000000e-01   6.7000000e+00   6.0000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   6.3000000e+00   3.4000000e+00   6.2000000e+00   4.4000000e+00   4.1000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   6.1000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.9000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   3.6000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.3000000e+00   4.9000000e+00   8.5000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.2000000e+00   9.1000000e+00   8.4000000e+00   9.8000000e+00   7.2000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.6000000e+00   7.4000000e+00   1.0800000e+01   1.0900000e+01   6.9000000e+00   8.5000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   8.2000000e+00   8.6000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   1.0500000e+01   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   8.1000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.6000000e+00   8.6000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.7000000e+00   6.4000000e+00   1.3000000e+00   1.2000000e+00   9.0000000e-01   2.0000000e-01   1.8000000e+00   2.3000000e+00   6.0000000e-01   1.8000000e+00   5.0000000e-01   1.8000000e+00   1.0000000e+00   7.4000000e+00   6.7000000e+00   7.5000000e+00   5.6000000e+00   6.9000000e+00   5.8000000e+00   7.0000000e+00   3.9000000e+00   6.7000000e+00   4.9000000e+00   4.6000000e+00   5.7000000e+00   5.9000000e+00   6.4000000e+00   4.7000000e+00   6.7000000e+00   5.7000000e+00   5.3000000e+00   7.1000000e+00   5.2000000e+00   6.8000000e+00   5.7000000e+00   7.3000000e+00   6.3000000e+00   6.2000000e+00   6.5000000e+00   7.3000000e+00   7.5000000e+00   6.2000000e+00   4.7000000e+00   5.1000000e+00   4.9000000e+00   5.3000000e+00   7.1000000e+00   5.5000000e+00   6.6000000e+00   7.1000000e+00   6.8000000e+00   5.1000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   5.5000000e+00   4.1000000e+00   5.5000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   3.8000000e+00   5.4000000e+00   9.2000000e+00   7.2000000e+00   9.2000000e+00   7.9000000e+00   8.6000000e+00   1.0400000e+01   5.7000000e+00   9.6000000e+00   8.9000000e+00   1.0500000e+01   7.9000000e+00   8.0000000e+00   8.5000000e+00   7.3000000e+00   7.6000000e+00   8.3000000e+00   7.9000000e+00   1.1500000e+01   1.1400000e+01   7.4000000e+00   9.2000000e+00   6.8000000e+00   1.0700000e+01   7.4000000e+00   8.9000000e+00   9.3000000e+00   7.1000000e+00   6.9000000e+00   8.4000000e+00   8.7000000e+00   9.7000000e+00   1.1200000e+01   8.5000000e+00   7.2000000e+00   7.6000000e+00   1.0200000e+01   8.8000000e+00   7.9000000e+00   6.7000000e+00   8.6000000e+00   8.9000000e+00   8.5000000e+00   7.2000000e+00   9.3000000e+00   9.3000000e+00   8.3000000e+00   7.8000000e+00   7.8000000e+00   8.4000000e+00   6.9000000e+00   5.0000000e-01   2.0000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   6.5000000e+00   5.8000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   5.9000000e+00   3.8000000e+00   6.2000000e+00   4.4000000e+00   4.3000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   5.9000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.3000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   3.8000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.3000000e+00   4.9000000e+00   8.1000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.6000000e+00   9.1000000e+00   8.4000000e+00   9.2000000e+00   7.0000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.4000000e+00   7.4000000e+00   1.0200000e+01   1.0900000e+01   6.9000000e+00   8.3000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   7.8000000e+00   8.4000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   9.9000000e+00   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   7.5000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.4000000e+00   8.2000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.1000000e+00   6.4000000e+00   1.7000000e+00   1.0000000e+00   6.0000000e-01   1.1000000e+00   8.0000000e-01   8.0000000e-01   9.0000000e-01   8.0000000e-01   4.0000000e-01   6.8000000e+00   6.1000000e+00   7.1000000e+00   5.4000000e+00   6.7000000e+00   5.6000000e+00   6.2000000e+00   3.9000000e+00   6.5000000e+00   4.7000000e+00   4.4000000e+00   5.5000000e+00   5.7000000e+00   6.2000000e+00   4.5000000e+00   6.3000000e+00   5.5000000e+00   5.1000000e+00   6.9000000e+00   5.0000000e+00   6.2000000e+00   5.5000000e+00   7.1000000e+00   6.1000000e+00   6.0000000e+00   6.3000000e+00   7.1000000e+00   7.3000000e+00   6.0000000e+00   4.5000000e+00   4.9000000e+00   4.7000000e+00   5.1000000e+00   6.9000000e+00   5.3000000e+00   5.6000000e+00   6.7000000e+00   6.6000000e+00   4.9000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   5.3000000e+00   3.9000000e+00   5.3000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   3.6000000e+00   5.2000000e+00   8.4000000e+00   7.0000000e+00   9.0000000e+00   7.7000000e+00   8.4000000e+00   1.0200000e+01   5.7000000e+00   9.4000000e+00   8.7000000e+00   9.3000000e+00   7.3000000e+00   7.8000000e+00   8.3000000e+00   7.1000000e+00   7.4000000e+00   7.7000000e+00   7.7000000e+00   1.0300000e+01   1.1200000e+01   7.2000000e+00   8.6000000e+00   6.6000000e+00   1.0500000e+01   7.2000000e+00   8.1000000e+00   8.7000000e+00   6.9000000e+00   6.7000000e+00   8.2000000e+00   8.5000000e+00   9.5000000e+00   1.0000000e+01   8.3000000e+00   7.0000000e+00   7.4000000e+00   1.0000000e+01   7.8000000e+00   7.5000000e+00   6.5000000e+00   8.2000000e+00   8.5000000e+00   8.1000000e+00   7.0000000e+00   8.7000000e+00   8.5000000e+00   8.1000000e+00   7.6000000e+00   7.6000000e+00   7.4000000e+00   6.7000000e+00   1.1000000e+00   2.3000000e+00   2.8000000e+00   1.1000000e+00   2.5000000e+00   1.2000000e+00   2.5000000e+00   1.7000000e+00   7.9000000e+00   7.2000000e+00   8.0000000e+00   4.7000000e+00   7.0000000e+00   5.9000000e+00   7.5000000e+00   3.2000000e+00   7.0000000e+00   4.8000000e+00   3.7000000e+00   6.2000000e+00   5.0000000e+00   6.7000000e+00   5.0000000e+00   7.2000000e+00   6.2000000e+00   5.2000000e+00   6.2000000e+00   4.7000000e+00   7.3000000e+00   5.8000000e+00   6.8000000e+00   6.4000000e+00   6.5000000e+00   7.0000000e+00   7.4000000e+00   8.0000000e+00   6.5000000e+00   4.4000000e+00   4.4000000e+00   4.2000000e+00   5.2000000e+00   7.0000000e+00   6.0000000e+00   7.1000000e+00   7.6000000e+00   5.9000000e+00   5.6000000e+00   4.9000000e+00   5.3000000e+00   6.7000000e+00   5.2000000e+00   3.2000000e+00   5.4000000e+00   5.7000000e+00   5.7000000e+00   6.3000000e+00   3.3000000e+00   5.5000000e+00   9.7000000e+00   7.1000000e+00   9.7000000e+00   8.2000000e+00   9.1000000e+00   1.0900000e+01   5.2000000e+00   9.9000000e+00   8.4000000e+00   1.1000000e+01   8.4000000e+00   7.9000000e+00   9.0000000e+00   6.8000000e+00   7.7000000e+00   8.8000000e+00   8.4000000e+00   1.2000000e+01   1.1100000e+01   6.5000000e+00   9.7000000e+00   6.9000000e+00   1.0800000e+01   7.3000000e+00   9.4000000e+00   9.8000000e+00   7.2000000e+00   7.4000000e+00   8.5000000e+00   9.2000000e+00   9.8000000e+00   1.1700000e+01   8.6000000e+00   7.3000000e+00   7.3000000e+00   1.0700000e+01   9.3000000e+00   8.4000000e+00   7.2000000e+00   9.1000000e+00   9.4000000e+00   9.0000000e+00   7.1000000e+00   9.8000000e+00   9.8000000e+00   8.8000000e+00   7.3000000e+00   8.3000000e+00   8.9000000e+00   7.4000000e+00   1.6000000e+00   2.1000000e+00   8.0000000e-01   1.6000000e+00   3.0000000e-01   1.6000000e+00   8.0000000e-01   7.2000000e+00   6.5000000e+00   7.5000000e+00   5.8000000e+00   7.1000000e+00   6.0000000e+00   6.8000000e+00   4.1000000e+00   6.9000000e+00   5.1000000e+00   4.8000000e+00   5.9000000e+00   6.1000000e+00   6.6000000e+00   4.9000000e+00   6.7000000e+00   5.9000000e+00   5.5000000e+00   7.3000000e+00   5.4000000e+00   6.6000000e+00   5.9000000e+00   7.5000000e+00   6.5000000e+00   6.4000000e+00   6.7000000e+00   7.5000000e+00   7.7000000e+00   6.4000000e+00   4.9000000e+00   5.3000000e+00   5.1000000e+00   5.5000000e+00   7.3000000e+00   5.7000000e+00   6.4000000e+00   7.1000000e+00   7.0000000e+00   5.3000000e+00   5.6000000e+00   5.8000000e+00   6.4000000e+00   5.7000000e+00   4.3000000e+00   5.7000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   4.0000000e+00   5.6000000e+00   9.0000000e+00   7.4000000e+00   9.4000000e+00   8.1000000e+00   8.8000000e+00   1.0600000e+01   5.9000000e+00   9.8000000e+00   9.1000000e+00   1.0300000e+01   7.7000000e+00   8.2000000e+00   8.7000000e+00   7.5000000e+00   7.8000000e+00   8.1000000e+00   8.1000000e+00   1.1300000e+01   1.1600000e+01   7.6000000e+00   9.0000000e+00   7.0000000e+00   1.0900000e+01   7.6000000e+00   8.7000000e+00   9.1000000e+00   7.3000000e+00   7.1000000e+00   8.6000000e+00   8.9000000e+00   9.9000000e+00   1.1000000e+01   8.7000000e+00   7.4000000e+00   7.8000000e+00   1.0400000e+01   8.6000000e+00   7.9000000e+00   6.9000000e+00   8.6000000e+00   8.9000000e+00   8.5000000e+00   7.4000000e+00   9.1000000e+00   9.1000000e+00   8.5000000e+00   8.0000000e+00   8.0000000e+00   8.2000000e+00   7.1000000e+00   9.0000000e-01   1.2000000e+00   8.0000000e-01   1.3000000e+00   1.0000000e+00   8.0000000e-01   6.2000000e+00   5.5000000e+00   6.5000000e+00   4.8000000e+00   6.1000000e+00   5.0000000e+00   5.6000000e+00   3.3000000e+00   5.9000000e+00   4.1000000e+00   3.8000000e+00   4.9000000e+00   5.1000000e+00   5.6000000e+00   3.9000000e+00   5.7000000e+00   4.9000000e+00   4.5000000e+00   6.3000000e+00   4.4000000e+00   5.6000000e+00   4.9000000e+00   6.5000000e+00   5.5000000e+00   5.4000000e+00   5.7000000e+00   6.5000000e+00   6.7000000e+00   5.4000000e+00   3.9000000e+00   4.3000000e+00   4.1000000e+00   4.5000000e+00   6.3000000e+00   4.7000000e+00   5.0000000e+00   6.1000000e+00   6.0000000e+00   4.3000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   4.7000000e+00   3.3000000e+00   4.7000000e+00   4.4000000e+00   4.6000000e+00   5.2000000e+00   3.0000000e+00   4.6000000e+00   7.8000000e+00   6.4000000e+00   8.4000000e+00   7.1000000e+00   7.8000000e+00   9.6000000e+00   5.1000000e+00   8.8000000e+00   8.1000000e+00   8.7000000e+00   6.7000000e+00   7.2000000e+00   7.7000000e+00   6.5000000e+00   6.8000000e+00   7.1000000e+00   7.1000000e+00   9.7000000e+00   1.0600000e+01   6.6000000e+00   8.0000000e+00   6.0000000e+00   9.9000000e+00   6.6000000e+00   7.5000000e+00   8.1000000e+00   6.3000000e+00   6.1000000e+00   7.6000000e+00   7.9000000e+00   8.9000000e+00   9.4000000e+00   7.7000000e+00   6.4000000e+00   6.8000000e+00   9.4000000e+00   7.2000000e+00   6.9000000e+00   5.9000000e+00   7.6000000e+00   7.9000000e+00   7.5000000e+00   6.4000000e+00   8.1000000e+00   7.9000000e+00   7.5000000e+00   7.0000000e+00   7.0000000e+00   6.8000000e+00   6.1000000e+00   1.7000000e+00   5.0000000e-01   1.8000000e+00   9.0000000e-01   1.3000000e+00   6.3000000e+00   5.6000000e+00   6.6000000e+00   4.9000000e+00   6.2000000e+00   5.1000000e+00   5.7000000e+00   3.6000000e+00   6.0000000e+00   4.2000000e+00   4.1000000e+00   5.0000000e+00   5.2000000e+00   5.7000000e+00   4.0000000e+00   5.8000000e+00   5.0000000e+00   4.6000000e+00   6.4000000e+00   4.5000000e+00   5.7000000e+00   5.0000000e+00   6.6000000e+00   5.6000000e+00   5.5000000e+00   5.8000000e+00   6.6000000e+00   6.8000000e+00   5.5000000e+00   4.0000000e+00   4.4000000e+00   4.2000000e+00   4.6000000e+00   6.4000000e+00   4.8000000e+00   5.1000000e+00   6.2000000e+00   6.1000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   4.8000000e+00   3.6000000e+00   4.8000000e+00   4.5000000e+00   4.7000000e+00   5.3000000e+00   3.1000000e+00   4.7000000e+00   7.9000000e+00   6.5000000e+00   8.5000000e+00   7.2000000e+00   7.9000000e+00   9.7000000e+00   5.4000000e+00   8.9000000e+00   8.2000000e+00   8.6000000e+00   6.8000000e+00   7.3000000e+00   7.8000000e+00   6.6000000e+00   6.9000000e+00   7.2000000e+00   7.2000000e+00   9.2000000e+00   1.0700000e+01   6.7000000e+00   8.1000000e+00   6.1000000e+00   1.0000000e+01   6.7000000e+00   7.6000000e+00   8.2000000e+00   6.4000000e+00   6.2000000e+00   7.7000000e+00   8.0000000e+00   9.0000000e+00   8.9000000e+00   7.8000000e+00   6.5000000e+00   6.9000000e+00   9.5000000e+00   7.3000000e+00   7.0000000e+00   6.0000000e+00   7.7000000e+00   8.0000000e+00   7.6000000e+00   6.5000000e+00   8.2000000e+00   8.0000000e+00   7.6000000e+00   7.1000000e+00   7.1000000e+00   6.9000000e+00   6.2000000e+00   1.4000000e+00   5.0000000e-01   1.4000000e+00   6.0000000e-01   6.8000000e+00   6.1000000e+00   6.9000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   6.4000000e+00   3.3000000e+00   6.1000000e+00   4.3000000e+00   4.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   6.1000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   6.2000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   4.9000000e+00   6.0000000e+00   6.5000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   3.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   3.2000000e+00   4.8000000e+00   8.6000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   5.1000000e+00   9.0000000e+00   8.3000000e+00   9.9000000e+00   7.3000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.7000000e+00   7.3000000e+00   1.0900000e+01   1.0800000e+01   6.8000000e+00   8.6000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   8.3000000e+00   8.7000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   1.0600000e+01   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   8.2000000e+00   7.3000000e+00   6.1000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.6000000e+00   8.7000000e+00   8.7000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.8000000e+00   6.3000000e+00   1.3000000e+00   4.0000000e-01   8.0000000e-01   6.8000000e+00   6.1000000e+00   7.1000000e+00   5.4000000e+00   6.7000000e+00   5.6000000e+00   6.2000000e+00   4.1000000e+00   6.5000000e+00   4.7000000e+00   4.6000000e+00   5.5000000e+00   5.7000000e+00   6.2000000e+00   4.5000000e+00   6.3000000e+00   5.5000000e+00   5.1000000e+00   6.9000000e+00   5.0000000e+00   6.2000000e+00   5.5000000e+00   7.1000000e+00   6.1000000e+00   6.0000000e+00   6.3000000e+00   7.1000000e+00   7.3000000e+00   6.0000000e+00   4.5000000e+00   4.9000000e+00   4.7000000e+00   5.1000000e+00   6.9000000e+00   5.3000000e+00   5.6000000e+00   6.7000000e+00   6.6000000e+00   4.9000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   5.3000000e+00   4.1000000e+00   5.3000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   3.6000000e+00   5.2000000e+00   8.4000000e+00   7.0000000e+00   9.0000000e+00   7.7000000e+00   8.4000000e+00   1.0200000e+01   5.9000000e+00   9.4000000e+00   8.7000000e+00   9.1000000e+00   7.3000000e+00   7.8000000e+00   8.3000000e+00   7.1000000e+00   7.4000000e+00   7.7000000e+00   7.7000000e+00   9.7000000e+00   1.1200000e+01   7.2000000e+00   8.6000000e+00   6.6000000e+00   1.0500000e+01   7.2000000e+00   8.1000000e+00   8.7000000e+00   6.9000000e+00   6.7000000e+00   8.2000000e+00   8.5000000e+00   9.5000000e+00   9.4000000e+00   8.3000000e+00   7.0000000e+00   7.4000000e+00   1.0000000e+01   7.8000000e+00   7.5000000e+00   6.5000000e+00   8.2000000e+00   8.5000000e+00   8.1000000e+00   7.0000000e+00   8.7000000e+00   8.5000000e+00   8.1000000e+00   7.6000000e+00   7.6000000e+00   7.4000000e+00   6.7000000e+00   1.3000000e+00   5.0000000e-01   6.9000000e+00   6.2000000e+00   7.2000000e+00   5.5000000e+00   6.8000000e+00   5.7000000e+00   6.5000000e+00   3.8000000e+00   6.6000000e+00   4.8000000e+00   4.5000000e+00   5.6000000e+00   5.8000000e+00   6.3000000e+00   4.6000000e+00   6.4000000e+00   5.6000000e+00   5.2000000e+00   7.0000000e+00   5.1000000e+00   6.3000000e+00   5.6000000e+00   7.2000000e+00   6.2000000e+00   6.1000000e+00   6.4000000e+00   7.2000000e+00   7.4000000e+00   6.1000000e+00   4.6000000e+00   5.0000000e+00   4.8000000e+00   5.2000000e+00   7.0000000e+00   5.4000000e+00   6.1000000e+00   6.8000000e+00   6.7000000e+00   5.0000000e+00   5.3000000e+00   5.5000000e+00   6.1000000e+00   5.4000000e+00   4.0000000e+00   5.4000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   3.7000000e+00   5.3000000e+00   8.7000000e+00   7.1000000e+00   9.1000000e+00   7.8000000e+00   8.5000000e+00   1.0300000e+01   5.6000000e+00   9.5000000e+00   8.8000000e+00   1.0000000e+01   7.4000000e+00   7.9000000e+00   8.4000000e+00   7.2000000e+00   7.5000000e+00   7.8000000e+00   7.8000000e+00   1.1000000e+01   1.1300000e+01   7.3000000e+00   8.7000000e+00   6.7000000e+00   1.0600000e+01   7.3000000e+00   8.4000000e+00   8.8000000e+00   7.0000000e+00   6.8000000e+00   8.3000000e+00   8.6000000e+00   9.6000000e+00   1.0700000e+01   8.4000000e+00   7.1000000e+00   7.5000000e+00   1.0100000e+01   8.3000000e+00   7.6000000e+00   6.6000000e+00   8.3000000e+00   8.6000000e+00   8.2000000e+00   7.1000000e+00   8.8000000e+00   8.8000000e+00   8.2000000e+00   7.7000000e+00   7.7000000e+00   7.9000000e+00   6.8000000e+00   8.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   4.3000000e+00   6.3000000e+00   4.7000000e+00   4.8000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.4000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   4.3000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.8000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   6.1000000e+00   9.2000000e+00   8.5000000e+00   8.9000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   9.7000000e+00   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   9.4000000e+00   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.6000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.2000000e+00   6.5000000e+00   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   3.7000000e+00   6.3000000e+00   4.5000000e+00   4.2000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.6000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.7000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.5000000e+00   9.2000000e+00   8.5000000e+00   9.5000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   1.0500000e+01   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   1.0200000e+01   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.8000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.4000000e+00   6.5000000e+00   9.0000000e-01   5.0000000e-01   3.2000000e+00   1.1000000e+00   2.0000000e+00   1.0000000e+00   4.7000000e+00   9.0000000e-01   3.1000000e+00   4.8000000e+00   1.9000000e+00   3.1000000e+00   1.2000000e+00   2.9000000e+00   7.0000000e-01   1.9000000e+00   2.7000000e+00   2.1000000e+00   3.2000000e+00   1.6000000e+00   2.1000000e+00   1.7000000e+00   1.5000000e+00   1.4000000e+00   9.0000000e-01   7.0000000e-01   1.1000000e+00   1.6000000e+00   3.5000000e+00   3.5000000e+00   3.7000000e+00   2.7000000e+00   2.1000000e+00   2.1000000e+00   1.6000000e+00   5.0000000e-01   2.0000000e+00   2.3000000e+00   3.0000000e+00   2.6000000e+00   1.2000000e+00   2.7000000e+00   4.7000000e+00   2.5000000e+00   2.2000000e+00   2.2000000e+00   1.6000000e+00   4.6000000e+00   2.4000000e+00   3.2000000e+00   2.6000000e+00   2.2000000e+00   2.3000000e+00   2.6000000e+00   3.4000000e+00   3.3000000e+00   2.6000000e+00   2.5000000e+00   3.1000000e+00   1.5000000e+00   2.2000000e+00   1.9000000e+00   2.9000000e+00   3.0000000e+00   2.1000000e+00   1.9000000e+00   4.1000000e+00   4.4000000e+00   2.4000000e+00   2.0000000e+00   2.6000000e+00   3.7000000e+00   1.8000000e+00   2.1000000e+00   1.9000000e+00   1.7000000e+00   1.7000000e+00   2.6000000e+00   1.7000000e+00   2.7000000e+00   3.8000000e+00   2.7000000e+00   1.6000000e+00   2.4000000e+00   3.2000000e+00   2.8000000e+00   1.9000000e+00   1.7000000e+00   1.6000000e+00   2.3000000e+00   1.5000000e+00   2.6000000e+00   2.3000000e+00   2.5000000e+00   1.9000000e+00   2.2000000e+00   1.8000000e+00   2.6000000e+00   2.1000000e+00   1.0000000e+00   2.5000000e+00   6.0000000e-01   1.3000000e+00   5.0000000e-01   4.0000000e+00   8.0000000e-01   2.4000000e+00   4.1000000e+00   1.0000000e+00   2.4000000e+00   9.0000000e-01   2.2000000e+00   6.0000000e-01   1.0000000e+00   2.0000000e+00   1.2000000e+00   2.5000000e+00   1.1000000e+00   1.4000000e+00   1.2000000e+00   1.2000000e+00   7.0000000e-01   6.0000000e-01   1.2000000e+00   1.2000000e+00   7.0000000e-01   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.0000000e+00   1.6000000e+00   1.2000000e+00   7.0000000e-01   6.0000000e-01   1.3000000e+00   1.6000000e+00   2.3000000e+00   1.9000000e+00   7.0000000e-01   2.0000000e+00   4.0000000e+00   1.8000000e+00   1.5000000e+00   1.5000000e+00   9.0000000e-01   3.9000000e+00   1.7000000e+00   2.7000000e+00   2.1000000e+00   2.9000000e+00   1.8000000e+00   2.3000000e+00   4.1000000e+00   2.4000000e+00   3.3000000e+00   2.6000000e+00   3.8000000e+00   1.2000000e+00   1.7000000e+00   2.2000000e+00   2.4000000e+00   2.5000000e+00   1.6000000e+00   1.6000000e+00   4.8000000e+00   5.1000000e+00   1.9000000e+00   2.5000000e+00   2.1000000e+00   4.4000000e+00   1.3000000e+00   2.2000000e+00   2.6000000e+00   1.2000000e+00   1.2000000e+00   2.1000000e+00   2.4000000e+00   3.4000000e+00   4.5000000e+00   2.2000000e+00   1.1000000e+00   2.1000000e+00   3.9000000e+00   2.3000000e+00   1.4000000e+00   1.2000000e+00   2.1000000e+00   2.4000000e+00   2.0000000e+00   2.1000000e+00   2.6000000e+00   2.6000000e+00   2.0000000e+00   1.7000000e+00   1.5000000e+00   2.1000000e+00   1.6000000e+00   3.3000000e+00   1.0000000e+00   2.1000000e+00   1.1000000e+00   4.8000000e+00   1.0000000e+00   3.2000000e+00   4.9000000e+00   1.8000000e+00   3.2000000e+00   1.3000000e+00   3.0000000e+00   8.0000000e-01   1.8000000e+00   2.8000000e+00   2.0000000e+00   3.3000000e+00   1.5000000e+00   2.2000000e+00   1.2000000e+00   1.6000000e+00   1.5000000e+00   1.0000000e+00   6.0000000e-01   6.0000000e-01   1.5000000e+00   3.6000000e+00   3.6000000e+00   3.8000000e+00   2.8000000e+00   1.6000000e+00   2.0000000e+00   1.7000000e+00   4.0000000e-01   2.1000000e+00   2.4000000e+00   3.1000000e+00   2.7000000e+00   1.3000000e+00   2.8000000e+00   4.8000000e+00   2.6000000e+00   2.3000000e+00   2.3000000e+00   1.7000000e+00   4.7000000e+00   2.5000000e+00   2.9000000e+00   2.1000000e+00   1.9000000e+00   1.8000000e+00   2.1000000e+00   3.1000000e+00   3.2000000e+00   2.3000000e+00   2.0000000e+00   3.0000000e+00   1.2000000e+00   1.7000000e+00   1.4000000e+00   2.4000000e+00   2.5000000e+00   1.8000000e+00   1.4000000e+00   4.0000000e+00   4.1000000e+00   1.9000000e+00   1.7000000e+00   2.1000000e+00   3.4000000e+00   1.3000000e+00   1.8000000e+00   1.8000000e+00   1.4000000e+00   1.2000000e+00   2.1000000e+00   1.4000000e+00   2.4000000e+00   3.7000000e+00   2.2000000e+00   1.1000000e+00   2.1000000e+00   2.9000000e+00   2.5000000e+00   1.4000000e+00   1.4000000e+00   1.1000000e+00   1.8000000e+00   1.0000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   1.4000000e+00   1.7000000e+00   1.3000000e+00   2.3000000e+00   1.6000000e+00   2.3000000e+00   1.2000000e+00   2.8000000e+00   1.7000000e+00   2.3000000e+00   9.0000000e-01   1.6000000e+00   1.5000000e+00   9.0000000e-01   2.0000000e+00   1.1000000e+00   2.5000000e+00   1.5000000e+00   1.1000000e+00   1.5000000e+00   6.0000000e-01   2.6000000e+00   1.1000000e+00   2.1000000e+00   1.9000000e+00   1.8000000e+00   2.3000000e+00   2.7000000e+00   3.3000000e+00   1.8000000e+00   1.3000000e+00   5.0000000e-01   7.0000000e-01   9.0000000e-01   2.3000000e+00   1.5000000e+00   2.4000000e+00   2.9000000e+00   1.2000000e+00   9.0000000e-01   2.0000000e-01   8.0000000e-01   2.0000000e+00   7.0000000e-01   1.5000000e+00   7.0000000e-01   1.2000000e+00   1.0000000e+00   1.6000000e+00   1.8000000e+00   8.0000000e-01   5.0000000e+00   2.4000000e+00   5.0000000e+00   3.5000000e+00   4.4000000e+00   6.2000000e+00   1.7000000e+00   5.2000000e+00   3.7000000e+00   6.3000000e+00   3.7000000e+00   3.2000000e+00   4.3000000e+00   2.1000000e+00   3.0000000e+00   4.1000000e+00   3.7000000e+00   7.3000000e+00   6.4000000e+00   1.8000000e+00   5.0000000e+00   2.2000000e+00   6.1000000e+00   2.6000000e+00   4.7000000e+00   5.1000000e+00   2.5000000e+00   2.7000000e+00   3.8000000e+00   4.5000000e+00   5.1000000e+00   7.0000000e+00   3.9000000e+00   2.6000000e+00   2.6000000e+00   6.0000000e+00   4.6000000e+00   3.7000000e+00   2.5000000e+00   4.4000000e+00   4.7000000e+00   4.3000000e+00   2.4000000e+00   5.1000000e+00   5.1000000e+00   4.1000000e+00   2.6000000e+00   3.6000000e+00   4.2000000e+00   2.7000000e+00   1.1000000e+00   9.0000000e-01   3.8000000e+00   4.0000000e-01   2.2000000e+00   3.9000000e+00   1.2000000e+00   2.2000000e+00   7.0000000e-01   2.2000000e+00   8.0000000e-01   1.2000000e+00   1.8000000e+00   1.0000000e+00   2.3000000e+00   1.5000000e+00   1.2000000e+00   8.0000000e-01   8.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e+00   7.0000000e-01   2.6000000e+00   2.6000000e+00   2.8000000e+00   1.8000000e+00   1.2000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.1000000e+00   1.8000000e+00   2.1000000e+00   1.7000000e+00   7.0000000e-01   1.8000000e+00   3.8000000e+00   1.6000000e+00   1.7000000e+00   1.5000000e+00   9.0000000e-01   3.7000000e+00   1.5000000e+00   3.1000000e+00   1.7000000e+00   2.7000000e+00   1.6000000e+00   2.1000000e+00   3.9000000e+00   2.2000000e+00   2.9000000e+00   2.0000000e+00   4.0000000e+00   1.4000000e+00   1.3000000e+00   2.0000000e+00   2.0000000e+00   2.1000000e+00   2.0000000e+00   1.4000000e+00   5.0000000e+00   4.5000000e+00   1.5000000e+00   2.7000000e+00   1.7000000e+00   3.8000000e+00   9.0000000e-01   2.4000000e+00   2.8000000e+00   8.0000000e-01   1.2000000e+00   1.7000000e+00   2.2000000e+00   2.8000000e+00   4.7000000e+00   1.8000000e+00   7.0000000e-01   1.7000000e+00   3.7000000e+00   2.7000000e+00   1.6000000e+00   1.2000000e+00   2.1000000e+00   2.4000000e+00   2.0000000e+00   1.7000000e+00   2.8000000e+00   2.8000000e+00   1.8000000e+00   1.3000000e+00   1.3000000e+00   2.5000000e+00   1.6000000e+00   1.6000000e+00   2.7000000e+00   1.1000000e+00   1.3000000e+00   2.8000000e+00   9.0000000e-01   1.7000000e+00   8.0000000e-01   1.1000000e+00   1.5000000e+00   5.0000000e-01   9.0000000e-01   1.3000000e+00   1.2000000e+00   1.4000000e+00   9.0000000e-01   1.5000000e+00   7.0000000e-01   1.0000000e+00   1.3000000e+00   1.5000000e+00   2.1000000e+00   6.0000000e-01   1.5000000e+00   1.5000000e+00   1.7000000e+00   9.0000000e-01   1.3000000e+00   7.0000000e-01   1.2000000e+00   1.7000000e+00   1.2000000e+00   7.0000000e-01   1.0000000e+00   6.0000000e-01   8.0000000e-01   9.0000000e-01   2.7000000e+00   5.0000000e-01   6.0000000e-01   4.0000000e-01   8.0000000e-01   2.6000000e+00   4.0000000e-01   3.8000000e+00   1.4000000e+00   3.8000000e+00   2.3000000e+00   3.2000000e+00   5.0000000e+00   1.5000000e+00   4.0000000e+00   3.1000000e+00   5.1000000e+00   2.5000000e+00   2.2000000e+00   3.1000000e+00   1.5000000e+00   1.8000000e+00   2.9000000e+00   2.5000000e+00   6.1000000e+00   5.6000000e+00   1.6000000e+00   3.8000000e+00   1.2000000e+00   4.9000000e+00   1.6000000e+00   3.5000000e+00   3.9000000e+00   1.3000000e+00   1.5000000e+00   2.6000000e+00   3.3000000e+00   3.9000000e+00   5.8000000e+00   2.7000000e+00   1.4000000e+00   1.8000000e+00   4.8000000e+00   3.4000000e+00   2.5000000e+00   1.3000000e+00   3.2000000e+00   3.5000000e+00   3.1000000e+00   1.4000000e+00   3.9000000e+00   3.9000000e+00   2.9000000e+00   2.0000000e+00   2.4000000e+00   3.0000000e+00   1.5000000e+00   4.3000000e+00   1.1000000e+00   2.7000000e+00   4.4000000e+00   1.3000000e+00   2.7000000e+00   8.0000000e-01   2.5000000e+00   1.1000000e+00   1.3000000e+00   2.3000000e+00   1.5000000e+00   2.8000000e+00   8.0000000e-01   1.7000000e+00   1.1000000e+00   1.1000000e+00   1.2000000e+00   1.1000000e+00   1.3000000e+00   1.1000000e+00   1.0000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   2.3000000e+00   1.3000000e+00   1.5000000e+00   6.0000000e-01   7.0000000e-01   1.6000000e+00   1.9000000e+00   2.6000000e+00   2.2000000e+00   8.0000000e-01   2.3000000e+00   4.3000000e+00   2.1000000e+00   1.8000000e+00   1.8000000e+00   1.2000000e+00   4.2000000e+00   2.0000000e+00   2.2000000e+00   1.8000000e+00   2.8000000e+00   1.5000000e+00   2.2000000e+00   4.0000000e+00   2.5000000e+00   3.2000000e+00   2.5000000e+00   3.5000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   2.1000000e+00   2.2000000e+00   1.5000000e+00   1.5000000e+00   4.5000000e+00   5.0000000e+00   1.8000000e+00   2.4000000e+00   1.8000000e+00   4.3000000e+00   1.0000000e+00   1.9000000e+00   2.5000000e+00   9.0000000e-01   9.0000000e-01   2.0000000e+00   2.3000000e+00   3.3000000e+00   4.2000000e+00   2.1000000e+00   1.0000000e+00   2.0000000e+00   3.8000000e+00   1.8000000e+00   1.3000000e+00   9.0000000e-01   2.0000000e+00   2.3000000e+00   1.9000000e+00   1.8000000e+00   2.5000000e+00   2.3000000e+00   1.9000000e+00   1.4000000e+00   1.4000000e+00   1.6000000e+00   1.3000000e+00   3.8000000e+00   1.6000000e+00   7.0000000e-01   3.0000000e+00   2.0000000e+00   3.5000000e+00   1.8000000e+00   4.0000000e+00   3.0000000e+00   2.0000000e+00   3.2000000e+00   1.5000000e+00   4.1000000e+00   2.6000000e+00   3.6000000e+00   3.2000000e+00   3.3000000e+00   3.8000000e+00   4.2000000e+00   4.8000000e+00   3.3000000e+00   1.2000000e+00   1.2000000e+00   1.0000000e+00   2.0000000e+00   3.8000000e+00   2.8000000e+00   3.9000000e+00   4.4000000e+00   2.9000000e+00   2.4000000e+00   1.7000000e+00   2.1000000e+00   3.5000000e+00   2.0000000e+00   2.0000000e-01   2.2000000e+00   2.5000000e+00   2.5000000e+00   3.1000000e+00   7.0000000e-01   2.3000000e+00   6.5000000e+00   3.9000000e+00   6.5000000e+00   5.0000000e+00   5.9000000e+00   7.7000000e+00   2.0000000e+00   6.7000000e+00   5.2000000e+00   7.8000000e+00   5.2000000e+00   4.7000000e+00   5.8000000e+00   3.6000000e+00   4.5000000e+00   5.6000000e+00   5.2000000e+00   8.8000000e+00   7.9000000e+00   3.5000000e+00   6.5000000e+00   3.7000000e+00   7.6000000e+00   4.1000000e+00   6.2000000e+00   6.6000000e+00   4.0000000e+00   4.2000000e+00   5.3000000e+00   6.0000000e+00   6.6000000e+00   8.5000000e+00   5.4000000e+00   4.1000000e+00   4.1000000e+00   7.5000000e+00   6.1000000e+00   5.2000000e+00   4.0000000e+00   5.9000000e+00   6.2000000e+00   5.8000000e+00   3.9000000e+00   6.6000000e+00   6.6000000e+00   5.6000000e+00   4.1000000e+00   5.1000000e+00   5.7000000e+00   4.2000000e+00   2.4000000e+00   3.9000000e+00   1.4000000e+00   2.2000000e+00   7.0000000e-01   2.0000000e+00   6.0000000e-01   1.4000000e+00   1.8000000e+00   1.4000000e+00   2.3000000e+00   1.7000000e+00   1.2000000e+00   1.2000000e+00   8.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   1.0000000e+00   9.0000000e-01   2.6000000e+00   2.6000000e+00   2.8000000e+00   1.8000000e+00   1.6000000e+00   1.6000000e+00   1.5000000e+00   6.0000000e-01   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.7000000e+00   7.0000000e-01   1.8000000e+00   3.8000000e+00   1.6000000e+00   1.5000000e+00   1.3000000e+00   7.0000000e-01   3.7000000e+00   1.5000000e+00   3.3000000e+00   2.1000000e+00   2.7000000e+00   1.8000000e+00   2.3000000e+00   3.9000000e+00   2.6000000e+00   2.9000000e+00   2.2000000e+00   4.0000000e+00   1.6000000e+00   1.7000000e+00   2.0000000e+00   2.4000000e+00   2.5000000e+00   2.2000000e+00   1.6000000e+00   5.0000000e+00   4.7000000e+00   1.9000000e+00   2.7000000e+00   2.1000000e+00   4.0000000e+00   1.3000000e+00   2.4000000e+00   2.8000000e+00   1.2000000e+00   1.4000000e+00   2.1000000e+00   2.2000000e+00   3.0000000e+00   4.7000000e+00   2.2000000e+00   1.1000000e+00   1.9000000e+00   3.7000000e+00   2.9000000e+00   1.8000000e+00   1.4000000e+00   2.1000000e+00   2.4000000e+00   2.0000000e+00   2.1000000e+00   2.8000000e+00   2.8000000e+00   1.8000000e+00   1.7000000e+00   1.5000000e+00   2.7000000e+00   1.8000000e+00   1.7000000e+00   1.4000000e+00   1.8000000e+00   1.9000000e+00   1.0000000e+00   2.4000000e+00   1.4000000e+00   1.2000000e+00   2.2000000e+00   9.0000000e-01   2.5000000e+00   1.2000000e+00   2.4000000e+00   2.0000000e+00   1.9000000e+00   2.2000000e+00   2.6000000e+00   3.2000000e+00   1.7000000e+00   1.4000000e+00   1.0000000e+00   1.2000000e+00   8.0000000e-01   2.2000000e+00   1.2000000e+00   2.3000000e+00   2.8000000e+00   2.1000000e+00   1.0000000e+00   7.0000000e-01   1.1000000e+00   1.9000000e+00   1.0000000e+00   1.6000000e+00   8.0000000e-01   1.3000000e+00   1.1000000e+00   1.7000000e+00   1.5000000e+00   9.0000000e-01   4.9000000e+00   2.3000000e+00   4.9000000e+00   3.4000000e+00   4.3000000e+00   6.1000000e+00   1.4000000e+00   5.1000000e+00   4.0000000e+00   6.2000000e+00   3.6000000e+00   3.1000000e+00   4.2000000e+00   2.4000000e+00   2.9000000e+00   4.0000000e+00   3.6000000e+00   7.2000000e+00   6.5000000e+00   2.5000000e+00   4.9000000e+00   2.1000000e+00   6.0000000e+00   2.5000000e+00   4.6000000e+00   5.0000000e+00   2.4000000e+00   2.6000000e+00   3.7000000e+00   4.4000000e+00   5.0000000e+00   6.9000000e+00   3.8000000e+00   2.5000000e+00   2.7000000e+00   5.9000000e+00   4.5000000e+00   3.6000000e+00   2.4000000e+00   4.3000000e+00   4.6000000e+00   4.2000000e+00   2.3000000e+00   5.0000000e+00   5.0000000e+00   4.0000000e+00   2.9000000e+00   3.5000000e+00   4.1000000e+00   2.6000000e+00   3.1000000e+00   1.7000000e+00   3.6000000e+00   1.9000000e+00   4.1000000e+00   3.1000000e+00   2.1000000e+00   2.9000000e+00   1.6000000e+00   4.2000000e+00   2.7000000e+00   3.7000000e+00   3.3000000e+00   3.4000000e+00   3.9000000e+00   4.3000000e+00   4.9000000e+00   3.4000000e+00   1.3000000e+00   1.3000000e+00   1.1000000e+00   2.1000000e+00   3.9000000e+00   2.9000000e+00   4.0000000e+00   4.5000000e+00   2.8000000e+00   2.5000000e+00   1.8000000e+00   2.2000000e+00   3.6000000e+00   2.1000000e+00   5.0000000e-01   2.3000000e+00   2.6000000e+00   2.6000000e+00   3.2000000e+00   1.2000000e+00   2.4000000e+00   6.6000000e+00   4.0000000e+00   6.6000000e+00   5.1000000e+00   6.0000000e+00   7.8000000e+00   2.3000000e+00   6.8000000e+00   5.3000000e+00   7.9000000e+00   5.3000000e+00   4.8000000e+00   5.9000000e+00   3.7000000e+00   4.6000000e+00   5.7000000e+00   5.3000000e+00   8.9000000e+00   8.0000000e+00   3.2000000e+00   6.6000000e+00   3.8000000e+00   7.7000000e+00   4.2000000e+00   6.3000000e+00   6.7000000e+00   4.1000000e+00   4.3000000e+00   5.4000000e+00   6.1000000e+00   6.7000000e+00   8.6000000e+00   5.5000000e+00   4.2000000e+00   4.2000000e+00   7.6000000e+00   6.2000000e+00   5.3000000e+00   4.1000000e+00   6.0000000e+00   6.3000000e+00   5.9000000e+00   4.0000000e+00   6.7000000e+00   6.7000000e+00   5.7000000e+00   4.2000000e+00   5.2000000e+00   5.8000000e+00   4.3000000e+00   1.6000000e+00   9.0000000e-01   1.2000000e+00   1.2000000e+00   6.0000000e-01   1.0000000e+00   1.4000000e+00   1.5000000e+00   1.1000000e+00   8.0000000e-01   1.6000000e+00   1.2000000e+00   9.0000000e-01   1.0000000e+00   1.8000000e+00   1.8000000e+00   5.0000000e-01   1.8000000e+00   1.8000000e+00   2.0000000e+00   1.0000000e+00   1.4000000e+00   8.0000000e-01   9.0000000e-01   1.4000000e+00   1.5000000e+00   6.0000000e-01   1.3000000e+00   1.3000000e+00   7.0000000e-01   1.0000000e+00   3.0000000e+00   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   2.9000000e+00   7.0000000e-01   3.5000000e+00   1.7000000e+00   3.5000000e+00   2.2000000e+00   2.9000000e+00   4.7000000e+00   2.0000000e+00   3.9000000e+00   3.2000000e+00   4.8000000e+00   2.2000000e+00   2.3000000e+00   2.8000000e+00   2.0000000e+00   2.1000000e+00   2.6000000e+00   2.2000000e+00   5.8000000e+00   5.7000000e+00   1.7000000e+00   3.5000000e+00   1.7000000e+00   5.0000000e+00   1.7000000e+00   3.2000000e+00   3.6000000e+00   1.4000000e+00   1.2000000e+00   2.7000000e+00   3.0000000e+00   4.0000000e+00   5.5000000e+00   2.8000000e+00   1.5000000e+00   2.1000000e+00   4.5000000e+00   3.1000000e+00   2.2000000e+00   1.0000000e+00   2.9000000e+00   3.2000000e+00   2.8000000e+00   1.7000000e+00   3.6000000e+00   3.6000000e+00   2.6000000e+00   2.1000000e+00   2.1000000e+00   2.7000000e+00   1.2000000e+00   1.9000000e+00   1.8000000e+00   2.4000000e+00   2.2000000e+00   8.0000000e-01   1.2000000e+00   9.0000000e-01   2.7000000e+00   1.0000000e+00   2.0000000e+00   1.6000000e+00   1.7000000e+00   2.2000000e+00   2.6000000e+00   3.2000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.0000000e+00   1.0000000e+00   2.2000000e+00   2.4000000e+00   2.3000000e+00   2.8000000e+00   1.1000000e+00   1.6000000e+00   1.1000000e+00   1.5000000e+00   1.9000000e+00   8.0000000e-01   1.8000000e+00   1.4000000e+00   1.5000000e+00   1.5000000e+00   1.5000000e+00   2.3000000e+00   1.3000000e+00   4.9000000e+00   2.7000000e+00   4.9000000e+00   3.4000000e+00   4.3000000e+00   6.1000000e+00   2.6000000e+00   5.1000000e+00   3.6000000e+00   6.2000000e+00   3.6000000e+00   3.1000000e+00   4.2000000e+00   2.6000000e+00   3.3000000e+00   4.0000000e+00   3.6000000e+00   7.2000000e+00   6.3000000e+00   1.5000000e+00   4.9000000e+00   2.9000000e+00   6.0000000e+00   2.5000000e+00   4.6000000e+00   5.0000000e+00   2.4000000e+00   2.6000000e+00   3.7000000e+00   4.4000000e+00   5.0000000e+00   6.9000000e+00   3.8000000e+00   2.5000000e+00   2.5000000e+00   5.9000000e+00   4.5000000e+00   3.6000000e+00   2.4000000e+00   4.3000000e+00   4.6000000e+00   4.2000000e+00   2.7000000e+00   5.0000000e+00   5.0000000e+00   4.0000000e+00   2.5000000e+00   3.5000000e+00   4.1000000e+00   2.8000000e+00   1.7000000e+00   1.1000000e+00   9.0000000e-01   1.5000000e+00   1.1000000e+00   2.0000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   1.3000000e+00   4.0000000e-01   2.3000000e+00   2.3000000e+00   2.5000000e+00   1.5000000e+00   9.0000000e-01   1.1000000e+00   1.0000000e+00   9.0000000e-01   1.2000000e+00   1.3000000e+00   1.8000000e+00   1.4000000e+00   2.0000000e-01   1.5000000e+00   3.5000000e+00   1.3000000e+00   1.2000000e+00   1.0000000e+00   6.0000000e-01   3.4000000e+00   1.2000000e+00   3.0000000e+00   1.4000000e+00   3.0000000e+00   1.5000000e+00   2.4000000e+00   4.2000000e+00   2.1000000e+00   3.2000000e+00   2.5000000e+00   4.3000000e+00   1.7000000e+00   1.6000000e+00   2.3000000e+00   1.7000000e+00   1.8000000e+00   2.1000000e+00   1.7000000e+00   5.3000000e+00   5.0000000e+00   1.2000000e+00   3.0000000e+00   1.4000000e+00   4.3000000e+00   1.0000000e+00   2.7000000e+00   3.1000000e+00   7.0000000e-01   7.0000000e-01   2.0000000e+00   2.5000000e+00   3.3000000e+00   5.0000000e+00   2.1000000e+00   8.0000000e-01   1.2000000e+00   4.0000000e+00   2.6000000e+00   1.7000000e+00   7.0000000e-01   2.4000000e+00   2.7000000e+00   2.3000000e+00   1.4000000e+00   3.1000000e+00   3.1000000e+00   2.1000000e+00   1.4000000e+00   1.6000000e+00   2.2000000e+00   1.1000000e+00   2.2000000e+00   1.2000000e+00   1.2000000e+00   2.4000000e+00   9.0000000e-01   2.3000000e+00   1.0000000e+00   2.6000000e+00   1.8000000e+00   1.5000000e+00   2.0000000e+00   2.6000000e+00   3.0000000e+00   1.5000000e+00   8.0000000e-01   1.0000000e+00   1.0000000e+00   8.0000000e-01   2.4000000e+00   1.4000000e+00   2.1000000e+00   2.6000000e+00   2.1000000e+00   6.0000000e-01   9.0000000e-01   1.3000000e+00   1.7000000e+00   1.0000000e+00   1.8000000e+00   8.0000000e-01   9.0000000e-01   7.0000000e-01   1.3000000e+00   1.7000000e+00   7.0000000e-01   4.7000000e+00   2.5000000e+00   4.7000000e+00   3.2000000e+00   4.1000000e+00   5.9000000e+00   2.4000000e+00   4.9000000e+00   4.2000000e+00   6.0000000e+00   3.4000000e+00   3.3000000e+00   4.0000000e+00   2.6000000e+00   2.9000000e+00   3.8000000e+00   3.4000000e+00   7.0000000e+00   6.7000000e+00   2.7000000e+00   4.7000000e+00   2.1000000e+00   6.0000000e+00   2.7000000e+00   4.4000000e+00   4.8000000e+00   2.4000000e+00   2.4000000e+00   3.7000000e+00   4.2000000e+00   5.0000000e+00   6.7000000e+00   3.8000000e+00   2.5000000e+00   2.9000000e+00   5.7000000e+00   4.3000000e+00   3.4000000e+00   2.2000000e+00   4.1000000e+00   4.4000000e+00   4.0000000e+00   2.5000000e+00   4.8000000e+00   4.8000000e+00   3.8000000e+00   3.1000000e+00   3.3000000e+00   3.9000000e+00   2.4000000e+00   1.4000000e+00   2.0000000e+00   1.6000000e+00   2.5000000e+00   1.7000000e+00   1.4000000e+00   1.6000000e+00   1.4000000e+00   7.0000000e-01   2.0000000e-01   8.0000000e-01   1.0000000e+00   1.1000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.0000000e+00   2.0000000e+00   1.6000000e+00   1.3000000e+00   4.0000000e-01   1.3000000e+00   1.6000000e+00   2.3000000e+00   1.9000000e+00   9.0000000e-01   2.0000000e+00   4.0000000e+00   1.8000000e+00   1.5000000e+00   1.5000000e+00   9.0000000e-01   3.9000000e+00   1.7000000e+00   3.3000000e+00   2.5000000e+00   2.7000000e+00   2.2000000e+00   2.5000000e+00   3.9000000e+00   2.8000000e+00   3.1000000e+00   2.4000000e+00   3.8000000e+00   1.6000000e+00   2.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.2000000e+00   1.8000000e+00   4.8000000e+00   4.9000000e+00   2.3000000e+00   2.5000000e+00   2.5000000e+00   4.2000000e+00   1.7000000e+00   2.2000000e+00   2.6000000e+00   1.6000000e+00   1.6000000e+00   2.5000000e+00   2.2000000e+00   3.2000000e+00   4.5000000e+00   2.6000000e+00   1.5000000e+00   2.3000000e+00   3.7000000e+00   2.9000000e+00   1.8000000e+00   1.6000000e+00   1.9000000e+00   2.2000000e+00   1.8000000e+00   2.5000000e+00   2.6000000e+00   2.6000000e+00   1.8000000e+00   2.1000000e+00   1.7000000e+00   2.7000000e+00   2.0000000e+00   1.4000000e+00   1.4000000e+00   1.5000000e+00   1.1000000e+00   1.4000000e+00   1.6000000e+00   1.2000000e+00   1.3000000e+00   1.2000000e+00   1.8000000e+00   1.8000000e+00   5.0000000e-01   2.0000000e+00   1.8000000e+00   2.0000000e+00   1.4000000e+00   1.4000000e+00   2.0000000e-01   9.0000000e-01   1.4000000e+00   1.7000000e+00   6.0000000e-01   1.3000000e+00   9.0000000e-01   7.0000000e-01   1.4000000e+00   3.0000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   1.1000000e+00   2.9000000e+00   9.0000000e-01   3.5000000e+00   1.5000000e+00   3.5000000e+00   2.2000000e+00   2.9000000e+00   4.7000000e+00   1.4000000e+00   3.9000000e+00   3.2000000e+00   4.8000000e+00   2.2000000e+00   2.3000000e+00   2.8000000e+00   1.6000000e+00   1.9000000e+00   2.6000000e+00   2.2000000e+00   5.8000000e+00   5.7000000e+00   1.7000000e+00   3.5000000e+00   1.1000000e+00   5.0000000e+00   1.7000000e+00   3.2000000e+00   3.6000000e+00   1.4000000e+00   1.2000000e+00   2.7000000e+00   3.0000000e+00   4.0000000e+00   5.5000000e+00   2.8000000e+00   1.5000000e+00   2.1000000e+00   4.5000000e+00   3.1000000e+00   2.2000000e+00   1.0000000e+00   2.9000000e+00   3.2000000e+00   2.8000000e+00   1.5000000e+00   3.6000000e+00   3.6000000e+00   2.6000000e+00   2.1000000e+00   2.1000000e+00   2.7000000e+00   1.2000000e+00   1.8000000e+00   7.0000000e-01   2.1000000e+00   8.0000000e-01   2.0000000e+00   1.2000000e+00   1.3000000e+00   1.8000000e+00   2.2000000e+00   2.8000000e+00   1.3000000e+00   8.0000000e-01   1.0000000e+00   1.0000000e+00   4.0000000e-01   1.8000000e+00   1.6000000e+00   1.9000000e+00   2.4000000e+00   1.5000000e+00   8.0000000e-01   9.0000000e-01   9.0000000e-01   1.5000000e+00   4.0000000e-01   2.0000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   1.1000000e+00   2.1000000e+00   5.0000000e-01   4.5000000e+00   1.9000000e+00   4.5000000e+00   3.0000000e+00   3.9000000e+00   5.7000000e+00   2.2000000e+00   4.7000000e+00   3.6000000e+00   5.8000000e+00   3.2000000e+00   2.7000000e+00   3.8000000e+00   2.2000000e+00   2.5000000e+00   3.6000000e+00   3.2000000e+00   6.8000000e+00   6.1000000e+00   2.1000000e+00   4.5000000e+00   2.1000000e+00   5.6000000e+00   2.1000000e+00   4.2000000e+00   4.6000000e+00   2.0000000e+00   2.2000000e+00   3.3000000e+00   4.0000000e+00   4.6000000e+00   6.5000000e+00   3.4000000e+00   2.1000000e+00   2.3000000e+00   5.5000000e+00   4.1000000e+00   3.2000000e+00   2.0000000e+00   3.9000000e+00   4.2000000e+00   3.8000000e+00   1.9000000e+00   4.6000000e+00   4.6000000e+00   3.6000000e+00   2.5000000e+00   3.1000000e+00   3.7000000e+00   2.2000000e+00   1.9000000e+00   1.9000000e+00   1.4000000e+00   8.0000000e-01   1.2000000e+00   1.3000000e+00   1.4000000e+00   1.6000000e+00   2.0000000e+00   9.0000000e-01   2.4000000e+00   2.0000000e+00   2.2000000e+00   1.8000000e+00   1.4000000e+00   1.6000000e+00   1.5000000e+00   1.6000000e+00   5.0000000e-01   2.0000000e+00   1.7000000e+00   1.5000000e+00   1.1000000e+00   1.6000000e+00   3.0000000e+00   1.6000000e+00   1.9000000e+00   1.7000000e+00   1.1000000e+00   3.3000000e+00   1.7000000e+00   3.7000000e+00   1.9000000e+00   3.7000000e+00   2.2000000e+00   3.1000000e+00   4.9000000e+00   1.8000000e+00   3.9000000e+00   2.4000000e+00   5.0000000e+00   2.4000000e+00   1.9000000e+00   3.0000000e+00   1.8000000e+00   2.5000000e+00   2.8000000e+00   2.4000000e+00   6.0000000e+00   5.1000000e+00   7.0000000e-01   3.7000000e+00   2.1000000e+00   4.8000000e+00   1.3000000e+00   3.4000000e+00   3.8000000e+00   1.2000000e+00   1.6000000e+00   2.5000000e+00   3.2000000e+00   3.8000000e+00   5.7000000e+00   2.6000000e+00   1.3000000e+00   1.7000000e+00   4.7000000e+00   3.3000000e+00   2.4000000e+00   1.6000000e+00   3.1000000e+00   3.4000000e+00   3.0000000e+00   1.9000000e+00   3.8000000e+00   3.8000000e+00   2.8000000e+00   1.3000000e+00   2.3000000e+00   2.9000000e+00   2.0000000e+00   2.6000000e+00   1.1000000e+00   2.1000000e+00   1.7000000e+00   1.8000000e+00   2.3000000e+00   2.7000000e+00   3.3000000e+00   1.8000000e+00   7.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   2.3000000e+00   1.7000000e+00   2.4000000e+00   2.9000000e+00   1.6000000e+00   9.0000000e-01   4.0000000e-01   8.0000000e-01   2.0000000e+00   5.0000000e-01   1.5000000e+00   7.0000000e-01   1.0000000e+00   1.0000000e+00   1.6000000e+00   1.4000000e+00   8.0000000e-01   5.0000000e+00   2.4000000e+00   5.0000000e+00   3.5000000e+00   4.4000000e+00   6.2000000e+00   1.9000000e+00   5.2000000e+00   3.7000000e+00   6.3000000e+00   3.7000000e+00   3.2000000e+00   4.3000000e+00   2.1000000e+00   3.0000000e+00   4.1000000e+00   3.7000000e+00   7.3000000e+00   6.4000000e+00   2.2000000e+00   5.0000000e+00   2.2000000e+00   6.1000000e+00   2.6000000e+00   4.7000000e+00   5.1000000e+00   2.5000000e+00   2.7000000e+00   3.8000000e+00   4.5000000e+00   5.1000000e+00   7.0000000e+00   3.9000000e+00   2.6000000e+00   2.6000000e+00   6.0000000e+00   4.6000000e+00   3.7000000e+00   2.5000000e+00   4.4000000e+00   4.7000000e+00   4.3000000e+00   2.4000000e+00   5.1000000e+00   5.1000000e+00   4.1000000e+00   2.6000000e+00   3.6000000e+00   4.2000000e+00   2.7000000e+00   1.9000000e+00   1.5000000e+00   1.3000000e+00   1.8000000e+00   1.7000000e+00   1.7000000e+00   1.3000000e+00   1.0000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   8.0000000e-01   1.3000000e+00   2.2000000e+00   1.7000000e+00   2.4000000e+00   2.0000000e+00   1.0000000e+00   2.1000000e+00   4.1000000e+00   1.9000000e+00   1.6000000e+00   1.6000000e+00   1.6000000e+00   4.0000000e+00   1.8000000e+00   2.4000000e+00   1.0000000e+00   2.8000000e+00   1.5000000e+00   2.2000000e+00   4.0000000e+00   2.1000000e+00   3.2000000e+00   2.5000000e+00   3.7000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.3000000e+00   1.4000000e+00   1.5000000e+00   1.5000000e+00   4.7000000e+00   5.0000000e+00   1.6000000e+00   2.4000000e+00   1.0000000e+00   4.3000000e+00   1.0000000e+00   2.1000000e+00   2.5000000e+00   7.0000000e-01   5.0000000e-01   2.0000000e+00   2.7000000e+00   3.3000000e+00   4.4000000e+00   2.1000000e+00   1.4000000e+00   2.0000000e+00   3.8000000e+00   2.0000000e+00   1.3000000e+00   3.0000000e-01   2.0000000e+00   2.3000000e+00   1.9000000e+00   1.0000000e+00   2.5000000e+00   2.5000000e+00   1.9000000e+00   1.4000000e+00   1.4000000e+00   1.6000000e+00   5.0000000e-01   1.6000000e+00   8.0000000e-01   7.0000000e-01   1.2000000e+00   1.6000000e+00   2.2000000e+00   9.0000000e-01   1.4000000e+00   1.4000000e+00   1.6000000e+00   6.0000000e-01   1.6000000e+00   1.6000000e+00   1.5000000e+00   1.8000000e+00   1.1000000e+00   8.0000000e-01   9.0000000e-01   1.3000000e+00   9.0000000e-01   6.0000000e-01   2.6000000e+00   8.0000000e-01   9.0000000e-01   7.0000000e-01   5.0000000e-01   2.5000000e+00   5.0000000e-01   3.9000000e+00   2.1000000e+00   3.9000000e+00   2.4000000e+00   3.3000000e+00   5.1000000e+00   2.4000000e+00   4.1000000e+00   3.2000000e+00   5.2000000e+00   2.6000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   2.5000000e+00   3.0000000e+00   2.6000000e+00   6.2000000e+00   5.7000000e+00   1.9000000e+00   3.9000000e+00   2.1000000e+00   5.0000000e+00   1.7000000e+00   3.6000000e+00   4.0000000e+00   1.4000000e+00   1.6000000e+00   2.7000000e+00   3.4000000e+00   4.0000000e+00   5.9000000e+00   2.8000000e+00   1.5000000e+00   1.9000000e+00   4.9000000e+00   3.5000000e+00   2.6000000e+00   1.6000000e+00   3.3000000e+00   3.6000000e+00   3.2000000e+00   2.1000000e+00   4.0000000e+00   4.0000000e+00   3.0000000e+00   2.1000000e+00   2.5000000e+00   3.1000000e+00   2.0000000e+00   1.0000000e+00   1.3000000e+00   1.4000000e+00   1.0000000e+00   1.2000000e+00   1.1000000e+00   2.6000000e+00   2.4000000e+00   2.6000000e+00   2.0000000e+00   8.0000000e-01   1.8000000e+00   1.7000000e+00   1.2000000e+00   9.0000000e-01   2.2000000e+00   1.9000000e+00   1.7000000e+00   1.1000000e+00   1.8000000e+00   3.6000000e+00   1.8000000e+00   2.1000000e+00   1.9000000e+00   1.3000000e+00   3.5000000e+00   1.9000000e+00   2.9000000e+00   1.3000000e+00   2.9000000e+00   1.4000000e+00   2.3000000e+00   4.1000000e+00   2.0000000e+00   3.1000000e+00   1.6000000e+00   4.2000000e+00   1.6000000e+00   1.1000000e+00   2.2000000e+00   1.2000000e+00   1.9000000e+00   2.0000000e+00   1.6000000e+00   5.2000000e+00   4.3000000e+00   7.0000000e-01   2.9000000e+00   1.5000000e+00   4.0000000e+00   5.0000000e-01   2.6000000e+00   3.0000000e+00   8.0000000e-01   1.0000000e+00   1.7000000e+00   2.4000000e+00   3.0000000e+00   4.9000000e+00   1.8000000e+00   5.0000000e-01   1.1000000e+00   3.9000000e+00   2.5000000e+00   1.6000000e+00   1.2000000e+00   2.3000000e+00   2.6000000e+00   2.2000000e+00   1.3000000e+00   3.0000000e+00   3.0000000e+00   2.0000000e+00   5.0000000e-01   1.5000000e+00   2.3000000e+00   1.4000000e+00   9.0000000e-01   1.2000000e+00   1.0000000e+00   1.6000000e+00   7.0000000e-01   2.0000000e+00   2.0000000e+00   2.2000000e+00   1.2000000e+00   1.0000000e+00   1.4000000e+00   1.3000000e+00   1.2000000e+00   1.1000000e+00   1.4000000e+00   1.7000000e+00   1.1000000e+00   5.0000000e-01   1.2000000e+00   3.2000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   7.0000000e-01   3.1000000e+00   1.1000000e+00   3.3000000e+00   1.5000000e+00   3.3000000e+00   1.8000000e+00   2.7000000e+00   4.5000000e+00   2.2000000e+00   3.5000000e+00   2.6000000e+00   4.6000000e+00   2.0000000e+00   1.7000000e+00   2.6000000e+00   1.8000000e+00   1.9000000e+00   2.4000000e+00   2.0000000e+00   5.6000000e+00   5.1000000e+00   1.3000000e+00   3.3000000e+00   1.5000000e+00   4.4000000e+00   1.1000000e+00   3.0000000e+00   3.4000000e+00   8.0000000e-01   1.0000000e+00   2.1000000e+00   2.8000000e+00   3.4000000e+00   5.3000000e+00   2.2000000e+00   9.0000000e-01   1.3000000e+00   4.3000000e+00   2.9000000e+00   2.0000000e+00   1.0000000e+00   2.7000000e+00   3.0000000e+00   2.6000000e+00   1.5000000e+00   3.4000000e+00   3.4000000e+00   2.4000000e+00   1.5000000e+00   1.9000000e+00   2.5000000e+00   1.4000000e+00   5.0000000e-01   1.1000000e+00   1.5000000e+00   8.0000000e-01   2.1000000e+00   2.1000000e+00   2.3000000e+00   1.3000000e+00   1.7000000e+00   1.5000000e+00   1.4000000e+00   1.1000000e+00   8.0000000e-01   1.1000000e+00   1.6000000e+00   1.4000000e+00   8.0000000e-01   1.3000000e+00   3.3000000e+00   1.1000000e+00   1.0000000e+00   8.0000000e-01   2.0000000e-01   3.2000000e+00   1.0000000e+00   3.4000000e+00   2.2000000e+00   3.2000000e+00   1.9000000e+00   2.6000000e+00   4.4000000e+00   2.5000000e+00   3.4000000e+00   2.7000000e+00   4.5000000e+00   1.9000000e+00   1.8000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   2.3000000e+00   1.9000000e+00   5.5000000e+00   5.2000000e+00   2.0000000e+00   3.2000000e+00   2.2000000e+00   4.5000000e+00   1.4000000e+00   2.9000000e+00   3.3000000e+00   1.3000000e+00   1.5000000e+00   2.2000000e+00   2.7000000e+00   3.5000000e+00   5.2000000e+00   2.3000000e+00   1.2000000e+00   2.0000000e+00   4.2000000e+00   3.0000000e+00   1.9000000e+00   1.5000000e+00   2.6000000e+00   2.9000000e+00   2.5000000e+00   2.2000000e+00   3.3000000e+00   3.3000000e+00   2.3000000e+00   1.8000000e+00   1.8000000e+00   2.8000000e+00   1.9000000e+00   8.0000000e-01   1.0000000e+00   9.0000000e-01   2.6000000e+00   2.6000000e+00   2.8000000e+00   1.8000000e+00   1.8000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.1000000e+00   1.4000000e+00   2.1000000e+00   1.7000000e+00   7.0000000e-01   1.8000000e+00   3.8000000e+00   1.6000000e+00   1.3000000e+00   1.3000000e+00   7.0000000e-01   3.7000000e+00   1.5000000e+00   3.3000000e+00   2.3000000e+00   2.7000000e+00   2.0000000e+00   2.3000000e+00   3.9000000e+00   2.6000000e+00   3.1000000e+00   2.4000000e+00   4.0000000e+00   1.6000000e+00   1.9000000e+00   2.0000000e+00   2.6000000e+00   2.7000000e+00   2.2000000e+00   1.6000000e+00   5.0000000e+00   4.9000000e+00   2.1000000e+00   2.7000000e+00   2.3000000e+00   4.2000000e+00   1.5000000e+00   2.4000000e+00   2.8000000e+00   1.4000000e+00   1.4000000e+00   2.3000000e+00   2.2000000e+00   3.2000000e+00   4.7000000e+00   2.4000000e+00   1.3000000e+00   2.1000000e+00   3.7000000e+00   2.9000000e+00   1.8000000e+00   1.4000000e+00   2.1000000e+00   2.4000000e+00   2.0000000e+00   2.3000000e+00   2.8000000e+00   2.8000000e+00   1.8000000e+00   1.9000000e+00   1.5000000e+00   2.7000000e+00   1.8000000e+00   8.0000000e-01   1.3000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.2000000e+00   1.4000000e+00   2.0000000e+00   1.9000000e+00   6.0000000e-01   1.5000000e+00   2.2000000e+00   2.5000000e+00   2.1000000e+00   1.1000000e+00   2.2000000e+00   4.2000000e+00   2.0000000e+00   2.1000000e+00   1.9000000e+00   1.3000000e+00   4.1000000e+00   1.9000000e+00   3.3000000e+00   1.9000000e+00   2.3000000e+00   1.8000000e+00   2.3000000e+00   3.5000000e+00   2.8000000e+00   2.5000000e+00   1.8000000e+00   3.6000000e+00   1.6000000e+00   1.5000000e+00   1.6000000e+00   2.2000000e+00   2.3000000e+00   2.2000000e+00   1.6000000e+00   4.6000000e+00   4.1000000e+00   1.7000000e+00   2.3000000e+00   1.9000000e+00   3.4000000e+00   1.1000000e+00   2.2000000e+00   2.4000000e+00   1.0000000e+00   1.4000000e+00   1.9000000e+00   1.8000000e+00   2.4000000e+00   4.3000000e+00   2.0000000e+00   9.0000000e-01   1.7000000e+00   3.3000000e+00   2.9000000e+00   1.8000000e+00   1.4000000e+00   1.7000000e+00   2.2000000e+00   1.6000000e+00   1.9000000e+00   2.4000000e+00   2.6000000e+00   1.6000000e+00   1.5000000e+00   1.5000000e+00   2.7000000e+00   1.8000000e+00   1.5000000e+00   3.6000000e+00   3.6000000e+00   3.8000000e+00   2.8000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   6.0000000e-01   2.1000000e+00   2.4000000e+00   3.1000000e+00   2.7000000e+00   1.3000000e+00   2.8000000e+00   4.8000000e+00   2.6000000e+00   2.3000000e+00   2.3000000e+00   1.7000000e+00   4.7000000e+00   2.5000000e+00   2.5000000e+00   1.5000000e+00   1.7000000e+00   1.2000000e+00   1.5000000e+00   2.9000000e+00   2.8000000e+00   2.1000000e+00   1.4000000e+00   3.0000000e+00   8.0000000e-01   1.1000000e+00   1.0000000e+00   1.8000000e+00   1.9000000e+00   1.4000000e+00   8.0000000e-01   4.0000000e+00   3.9000000e+00   1.7000000e+00   1.7000000e+00   1.7000000e+00   3.2000000e+00   9.0000000e-01   1.4000000e+00   1.8000000e+00   1.0000000e+00   8.0000000e-01   1.5000000e+00   1.4000000e+00   2.2000000e+00   3.7000000e+00   1.6000000e+00   9.0000000e-01   1.9000000e+00   2.7000000e+00   2.1000000e+00   1.0000000e+00   1.0000000e+00   1.1000000e+00   1.4000000e+00   1.0000000e+00   1.5000000e+00   1.8000000e+00   1.8000000e+00   8.0000000e-01   1.1000000e+00   7.0000000e-01   1.9000000e+00   1.0000000e+00   2.1000000e+00   2.1000000e+00   2.3000000e+00   1.3000000e+00   9.0000000e-01   7.0000000e-01   6.0000000e-01   1.1000000e+00   1.2000000e+00   1.1000000e+00   1.6000000e+00   1.2000000e+00   4.0000000e-01   1.3000000e+00   3.3000000e+00   1.1000000e+00   1.0000000e+00   8.0000000e-01   6.0000000e-01   3.2000000e+00   1.0000000e+00   3.2000000e+00   1.4000000e+00   3.2000000e+00   1.7000000e+00   2.6000000e+00   4.4000000e+00   1.7000000e+00   3.4000000e+00   2.7000000e+00   4.5000000e+00   1.9000000e+00   1.8000000e+00   2.5000000e+00   1.7000000e+00   1.8000000e+00   2.3000000e+00   1.9000000e+00   5.5000000e+00   5.2000000e+00   1.2000000e+00   3.2000000e+00   1.4000000e+00   4.5000000e+00   1.2000000e+00   2.9000000e+00   3.3000000e+00   9.0000000e-01   9.0000000e-01   2.2000000e+00   2.7000000e+00   3.5000000e+00   5.2000000e+00   2.3000000e+00   1.0000000e+00   1.6000000e+00   4.2000000e+00   2.8000000e+00   1.9000000e+00   7.0000000e-01   2.6000000e+00   2.9000000e+00   2.5000000e+00   1.4000000e+00   3.3000000e+00   3.3000000e+00   2.3000000e+00   1.6000000e+00   1.8000000e+00   2.4000000e+00   1.1000000e+00   8.0000000e-01   6.0000000e-01   8.0000000e-01   2.6000000e+00   2.2000000e+00   2.7000000e+00   3.2000000e+00   2.1000000e+00   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.3000000e+00   8.0000000e-01   1.2000000e+00   1.2000000e+00   1.3000000e+00   1.3000000e+00   1.9000000e+00   1.3000000e+00   1.1000000e+00   5.3000000e+00   2.7000000e+00   5.3000000e+00   3.8000000e+00   4.7000000e+00   6.5000000e+00   2.6000000e+00   5.5000000e+00   4.2000000e+00   6.6000000e+00   4.0000000e+00   3.5000000e+00   4.6000000e+00   2.6000000e+00   3.3000000e+00   4.4000000e+00   4.0000000e+00   7.6000000e+00   6.7000000e+00   2.7000000e+00   5.3000000e+00   2.7000000e+00   6.4000000e+00   2.9000000e+00   5.0000000e+00   5.4000000e+00   2.8000000e+00   3.0000000e+00   4.1000000e+00   4.8000000e+00   5.4000000e+00   7.3000000e+00   4.2000000e+00   2.9000000e+00   2.9000000e+00   6.3000000e+00   4.9000000e+00   4.0000000e+00   2.8000000e+00   4.7000000e+00   5.0000000e+00   4.6000000e+00   2.7000000e+00   5.4000000e+00   5.4000000e+00   4.4000000e+00   3.1000000e+00   3.9000000e+00   4.5000000e+00   3.0000000e+00   2.0000000e-01   8.0000000e-01   2.6000000e+00   1.8000000e+00   2.7000000e+00   3.2000000e+00   1.7000000e+00   1.2000000e+00   5.0000000e-01   9.0000000e-01   2.3000000e+00   8.0000000e-01   1.2000000e+00   1.0000000e+00   1.3000000e+00   1.3000000e+00   1.9000000e+00   1.3000000e+00   1.1000000e+00   5.3000000e+00   2.7000000e+00   5.3000000e+00   3.8000000e+00   4.7000000e+00   6.5000000e+00   2.0000000e+00   5.5000000e+00   4.0000000e+00   6.6000000e+00   4.0000000e+00   3.5000000e+00   4.6000000e+00   2.4000000e+00   3.3000000e+00   4.4000000e+00   4.0000000e+00   7.6000000e+00   6.7000000e+00   2.3000000e+00   5.3000000e+00   2.5000000e+00   6.4000000e+00   2.9000000e+00   5.0000000e+00   5.4000000e+00   2.8000000e+00   3.0000000e+00   4.1000000e+00   4.8000000e+00   5.4000000e+00   7.3000000e+00   4.2000000e+00   2.9000000e+00   2.9000000e+00   6.3000000e+00   4.9000000e+00   4.0000000e+00   2.8000000e+00   4.7000000e+00   5.0000000e+00   4.6000000e+00   2.7000000e+00   5.4000000e+00   5.4000000e+00   4.4000000e+00   2.9000000e+00   3.9000000e+00   4.5000000e+00   3.0000000e+00   1.0000000e+00   2.8000000e+00   2.0000000e+00   2.9000000e+00   3.4000000e+00   1.9000000e+00   1.4000000e+00   7.0000000e-01   1.1000000e+00   2.5000000e+00   1.0000000e+00   1.0000000e+00   1.2000000e+00   1.5000000e+00   1.5000000e+00   2.1000000e+00   1.3000000e+00   1.3000000e+00   5.5000000e+00   2.9000000e+00   5.5000000e+00   4.0000000e+00   4.9000000e+00   6.7000000e+00   2.2000000e+00   5.7000000e+00   4.2000000e+00   6.8000000e+00   4.2000000e+00   3.7000000e+00   4.8000000e+00   2.6000000e+00   3.5000000e+00   4.6000000e+00   4.2000000e+00   7.8000000e+00   6.9000000e+00   2.5000000e+00   5.5000000e+00   2.7000000e+00   6.6000000e+00   3.1000000e+00   5.2000000e+00   5.6000000e+00   3.0000000e+00   3.2000000e+00   4.3000000e+00   5.0000000e+00   5.6000000e+00   7.5000000e+00   4.4000000e+00   3.1000000e+00   3.1000000e+00   6.5000000e+00   5.1000000e+00   4.2000000e+00   3.0000000e+00   4.9000000e+00   5.2000000e+00   4.8000000e+00   2.9000000e+00   5.6000000e+00   5.6000000e+00   4.6000000e+00   3.1000000e+00   4.1000000e+00   4.7000000e+00   3.2000000e+00   1.8000000e+00   1.6000000e+00   1.9000000e+00   2.4000000e+00   1.5000000e+00   8.0000000e-01   7.0000000e-01   9.0000000e-01   1.5000000e+00   2.0000000e-01   2.0000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   1.1000000e+00   1.9000000e+00   5.0000000e-01   4.5000000e+00   1.9000000e+00   4.5000000e+00   3.0000000e+00   3.9000000e+00   5.7000000e+00   2.2000000e+00   4.7000000e+00   3.6000000e+00   5.8000000e+00   3.2000000e+00   2.7000000e+00   3.8000000e+00   2.2000000e+00   2.5000000e+00   3.6000000e+00   3.2000000e+00   6.8000000e+00   6.1000000e+00   2.1000000e+00   4.5000000e+00   2.1000000e+00   5.6000000e+00   2.1000000e+00   4.2000000e+00   4.6000000e+00   2.0000000e+00   2.2000000e+00   3.3000000e+00   4.0000000e+00   4.6000000e+00   6.5000000e+00   3.4000000e+00   2.1000000e+00   2.3000000e+00   5.5000000e+00   4.1000000e+00   3.2000000e+00   2.0000000e+00   3.9000000e+00   4.2000000e+00   3.8000000e+00   1.9000000e+00   4.6000000e+00   4.6000000e+00   3.6000000e+00   2.5000000e+00   3.1000000e+00   3.7000000e+00   2.2000000e+00   1.6000000e+00   1.3000000e+00   1.6000000e+00   1.7000000e+00   2.0000000e+00   2.1000000e+00   1.7000000e+00   1.1000000e+00   1.8000000e+00   3.8000000e+00   1.6000000e+00   1.9000000e+00   1.7000000e+00   1.5000000e+00   3.7000000e+00   1.7000000e+00   2.7000000e+00   5.0000000e-01   2.7000000e+00   1.2000000e+00   2.1000000e+00   3.9000000e+00   2.0000000e+00   2.9000000e+00   1.8000000e+00   4.0000000e+00   1.4000000e+00   9.0000000e-01   2.0000000e+00   1.0000000e+00   1.1000000e+00   1.8000000e+00   1.4000000e+00   5.0000000e+00   4.3000000e+00   7.0000000e-01   2.7000000e+00   1.1000000e+00   3.8000000e+00   7.0000000e-01   2.4000000e+00   2.8000000e+00   8.0000000e-01   8.0000000e-01   1.5000000e+00   2.2000000e+00   2.8000000e+00   4.7000000e+00   1.6000000e+00   5.0000000e-01   9.0000000e-01   3.7000000e+00   2.3000000e+00   1.4000000e+00   8.0000000e-01   2.1000000e+00   2.4000000e+00   2.0000000e+00   5.0000000e-01   2.8000000e+00   2.8000000e+00   1.8000000e+00   9.0000000e-01   1.3000000e+00   1.9000000e+00   6.0000000e-01   1.1000000e+00   1.6000000e+00   1.9000000e+00   8.0000000e-01   1.3000000e+00   9.0000000e-01   9.0000000e-01   1.6000000e+00   2.8000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   1.3000000e+00   2.7000000e+00   1.1000000e+00   3.7000000e+00   1.7000000e+00   3.7000000e+00   2.4000000e+00   3.1000000e+00   4.9000000e+00   1.2000000e+00   4.1000000e+00   3.4000000e+00   5.0000000e+00   2.4000000e+00   2.5000000e+00   3.0000000e+00   1.8000000e+00   2.1000000e+00   2.8000000e+00   2.4000000e+00   6.0000000e+00   5.9000000e+00   1.9000000e+00   3.7000000e+00   1.3000000e+00   5.2000000e+00   1.9000000e+00   3.4000000e+00   3.8000000e+00   1.6000000e+00   1.4000000e+00   2.9000000e+00   3.2000000e+00   4.2000000e+00   5.7000000e+00   3.0000000e+00   1.7000000e+00   2.3000000e+00   4.7000000e+00   3.3000000e+00   2.4000000e+00   1.2000000e+00   3.1000000e+00   3.4000000e+00   3.0000000e+00   1.7000000e+00   3.8000000e+00   3.8000000e+00   2.8000000e+00   2.3000000e+00   2.3000000e+00   2.9000000e+00   1.4000000e+00   1.3000000e+00   1.8000000e+00   1.5000000e+00   2.2000000e+00   1.8000000e+00   8.0000000e-01   1.9000000e+00   3.9000000e+00   1.7000000e+00   1.4000000e+00   1.4000000e+00   1.2000000e+00   3.8000000e+00   1.6000000e+00   2.8000000e+00   1.8000000e+00   3.4000000e+00   2.1000000e+00   2.8000000e+00   4.6000000e+00   2.1000000e+00   3.8000000e+00   3.1000000e+00   3.9000000e+00   1.7000000e+00   2.2000000e+00   2.7000000e+00   2.1000000e+00   2.2000000e+00   2.1000000e+00   2.1000000e+00   4.9000000e+00   5.6000000e+00   1.8000000e+00   3.0000000e+00   1.8000000e+00   4.9000000e+00   1.6000000e+00   2.5000000e+00   3.1000000e+00   1.3000000e+00   1.1000000e+00   2.6000000e+00   2.9000000e+00   3.9000000e+00   4.6000000e+00   2.7000000e+00   1.6000000e+00   2.2000000e+00   4.4000000e+00   2.2000000e+00   1.9000000e+00   9.0000000e-01   2.6000000e+00   2.9000000e+00   2.5000000e+00   1.8000000e+00   3.1000000e+00   2.9000000e+00   2.5000000e+00   2.0000000e+00   2.0000000e+00   1.8000000e+00   1.3000000e+00   1.7000000e+00   2.0000000e+00   2.7000000e+00   2.3000000e+00   9.0000000e-01   2.4000000e+00   4.4000000e+00   2.2000000e+00   1.9000000e+00   1.9000000e+00   1.3000000e+00   4.3000000e+00   2.1000000e+00   2.9000000e+00   2.1000000e+00   2.3000000e+00   1.8000000e+00   2.1000000e+00   3.5000000e+00   2.8000000e+00   2.7000000e+00   2.0000000e+00   3.4000000e+00   1.2000000e+00   1.7000000e+00   1.6000000e+00   2.4000000e+00   2.5000000e+00   1.8000000e+00   1.4000000e+00   4.4000000e+00   4.5000000e+00   1.9000000e+00   2.1000000e+00   2.1000000e+00   3.8000000e+00   1.3000000e+00   1.8000000e+00   2.2000000e+00   1.2000000e+00   1.2000000e+00   2.1000000e+00   1.8000000e+00   2.8000000e+00   4.1000000e+00   2.2000000e+00   1.1000000e+00   2.1000000e+00   3.3000000e+00   2.5000000e+00   1.4000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   1.4000000e+00   2.1000000e+00   2.2000000e+00   2.2000000e+00   1.4000000e+00   1.7000000e+00   1.3000000e+00   2.3000000e+00   1.6000000e+00   1.7000000e+00   1.4000000e+00   1.2000000e+00   1.2000000e+00   1.3000000e+00   2.7000000e+00   1.3000000e+00   1.6000000e+00   1.4000000e+00   8.0000000e-01   3.0000000e+00   1.4000000e+00   3.8000000e+00   2.2000000e+00   3.8000000e+00   2.3000000e+00   3.2000000e+00   5.0000000e+00   2.1000000e+00   4.0000000e+00   2.5000000e+00   5.1000000e+00   2.5000000e+00   2.0000000e+00   3.1000000e+00   2.1000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   6.1000000e+00   5.2000000e+00   1.2000000e+00   3.8000000e+00   2.4000000e+00   4.9000000e+00   1.4000000e+00   3.5000000e+00   3.9000000e+00   1.5000000e+00   1.9000000e+00   2.6000000e+00   3.3000000e+00   3.9000000e+00   5.8000000e+00   2.7000000e+00   1.4000000e+00   1.8000000e+00   4.8000000e+00   3.4000000e+00   2.5000000e+00   1.9000000e+00   3.2000000e+00   3.5000000e+00   3.1000000e+00   2.2000000e+00   3.9000000e+00   3.9000000e+00   2.9000000e+00   1.4000000e+00   2.4000000e+00   3.2000000e+00   2.3000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   2.4000000e+00   4.0000000e-01   3.0000000e-01   3.0000000e-01   9.0000000e-01   2.3000000e+00   3.0000000e-01   4.1000000e+00   2.1000000e+00   4.1000000e+00   2.8000000e+00   3.5000000e+00   5.3000000e+00   2.0000000e+00   4.5000000e+00   3.8000000e+00   5.4000000e+00   2.8000000e+00   2.9000000e+00   3.4000000e+00   2.2000000e+00   2.5000000e+00   3.2000000e+00   2.8000000e+00   6.4000000e+00   6.3000000e+00   2.3000000e+00   4.1000000e+00   1.7000000e+00   5.6000000e+00   2.3000000e+00   3.8000000e+00   4.2000000e+00   2.0000000e+00   1.8000000e+00   3.3000000e+00   3.6000000e+00   4.6000000e+00   6.1000000e+00   3.4000000e+00   2.1000000e+00   2.5000000e+00   5.1000000e+00   3.7000000e+00   2.8000000e+00   1.6000000e+00   3.5000000e+00   3.8000000e+00   3.4000000e+00   2.1000000e+00   4.2000000e+00   4.2000000e+00   3.2000000e+00   2.7000000e+00   2.7000000e+00   3.3000000e+00   1.8000000e+00   6.0000000e-01   1.8000000e+00   5.0000000e-01   1.7000000e+00   5.0000000e-01   1.0000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   6.0000000e-01   4.8000000e+00   2.2000000e+00   4.8000000e+00   3.3000000e+00   4.2000000e+00   6.0000000e+00   1.5000000e+00   5.0000000e+00   3.5000000e+00   6.1000000e+00   3.5000000e+00   3.0000000e+00   4.1000000e+00   1.9000000e+00   2.8000000e+00   3.9000000e+00   3.5000000e+00   7.1000000e+00   6.2000000e+00   2.0000000e+00   4.8000000e+00   2.0000000e+00   5.9000000e+00   2.4000000e+00   4.5000000e+00   4.9000000e+00   2.3000000e+00   2.5000000e+00   3.6000000e+00   4.3000000e+00   4.9000000e+00   6.8000000e+00   3.7000000e+00   2.4000000e+00   2.4000000e+00   5.8000000e+00   4.4000000e+00   3.5000000e+00   2.3000000e+00   4.2000000e+00   4.5000000e+00   4.1000000e+00   2.2000000e+00   4.9000000e+00   4.9000000e+00   3.9000000e+00   2.4000000e+00   3.4000000e+00   4.0000000e+00   2.5000000e+00   1.4000000e+00   7.0000000e-01   2.1000000e+00   5.0000000e-01   8.0000000e-01   8.0000000e-01   1.2000000e+00   2.0000000e+00   8.0000000e-01   4.4000000e+00   1.8000000e+00   4.4000000e+00   2.9000000e+00   3.8000000e+00   5.6000000e+00   1.3000000e+00   4.6000000e+00   3.3000000e+00   5.7000000e+00   3.1000000e+00   2.6000000e+00   3.7000000e+00   1.7000000e+00   2.4000000e+00   3.5000000e+00   3.1000000e+00   6.7000000e+00   5.8000000e+00   1.8000000e+00   4.4000000e+00   1.6000000e+00   5.5000000e+00   2.0000000e+00   4.1000000e+00   4.5000000e+00   1.9000000e+00   2.1000000e+00   3.2000000e+00   3.9000000e+00   4.5000000e+00   6.4000000e+00   3.3000000e+00   2.0000000e+00   2.0000000e+00   5.4000000e+00   4.0000000e+00   3.1000000e+00   1.9000000e+00   3.8000000e+00   4.1000000e+00   3.7000000e+00   1.8000000e+00   4.5000000e+00   4.5000000e+00   3.5000000e+00   2.2000000e+00   3.0000000e+00   3.6000000e+00   2.1000000e+00   1.5000000e+00   3.5000000e+00   1.3000000e+00   1.0000000e+00   1.0000000e+00   6.0000000e-01   3.4000000e+00   1.2000000e+00   3.0000000e+00   1.6000000e+00   3.0000000e+00   1.7000000e+00   2.4000000e+00   4.2000000e+00   2.1000000e+00   3.4000000e+00   2.7000000e+00   4.3000000e+00   1.7000000e+00   1.8000000e+00   2.3000000e+00   1.9000000e+00   2.0000000e+00   2.1000000e+00   1.7000000e+00   5.3000000e+00   5.2000000e+00   1.4000000e+00   3.0000000e+00   1.6000000e+00   4.5000000e+00   1.2000000e+00   2.7000000e+00   3.1000000e+00   9.0000000e-01   7.0000000e-01   2.2000000e+00   2.5000000e+00   3.5000000e+00   5.0000000e+00   2.3000000e+00   1.0000000e+00   1.4000000e+00   4.0000000e+00   2.6000000e+00   1.7000000e+00   7.0000000e-01   2.4000000e+00   2.7000000e+00   2.3000000e+00   1.6000000e+00   3.1000000e+00   3.1000000e+00   2.1000000e+00   1.6000000e+00   1.6000000e+00   2.2000000e+00   1.1000000e+00   2.0000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   1.1000000e+00   1.9000000e+00   5.0000000e-01   4.5000000e+00   1.9000000e+00   4.5000000e+00   3.0000000e+00   3.9000000e+00   5.7000000e+00   2.0000000e+00   4.7000000e+00   3.4000000e+00   5.8000000e+00   3.2000000e+00   2.7000000e+00   3.8000000e+00   2.0000000e+00   2.5000000e+00   3.6000000e+00   3.2000000e+00   6.8000000e+00   5.9000000e+00   1.9000000e+00   4.5000000e+00   2.1000000e+00   5.6000000e+00   2.1000000e+00   4.2000000e+00   4.6000000e+00   2.0000000e+00   2.2000000e+00   3.3000000e+00   4.0000000e+00   4.6000000e+00   6.5000000e+00   3.4000000e+00   2.1000000e+00   2.1000000e+00   5.5000000e+00   4.1000000e+00   3.2000000e+00   2.0000000e+00   3.9000000e+00   4.2000000e+00   3.8000000e+00   1.9000000e+00   4.6000000e+00   4.6000000e+00   3.6000000e+00   2.3000000e+00   3.1000000e+00   3.7000000e+00   2.2000000e+00   2.2000000e+00   2.5000000e+00   2.5000000e+00   3.1000000e+00   7.0000000e-01   2.3000000e+00   6.5000000e+00   3.9000000e+00   6.5000000e+00   5.0000000e+00   5.9000000e+00   7.7000000e+00   2.2000000e+00   6.7000000e+00   5.2000000e+00   7.8000000e+00   5.2000000e+00   4.7000000e+00   5.8000000e+00   3.6000000e+00   4.5000000e+00   5.6000000e+00   5.2000000e+00   8.8000000e+00   7.9000000e+00   3.3000000e+00   6.5000000e+00   3.7000000e+00   7.6000000e+00   4.1000000e+00   6.2000000e+00   6.6000000e+00   4.0000000e+00   4.2000000e+00   5.3000000e+00   6.0000000e+00   6.6000000e+00   8.5000000e+00   5.4000000e+00   4.1000000e+00   4.1000000e+00   7.5000000e+00   6.1000000e+00   5.2000000e+00   4.0000000e+00   5.9000000e+00   6.2000000e+00   5.8000000e+00   3.9000000e+00   6.6000000e+00   6.6000000e+00   5.6000000e+00   4.1000000e+00   5.1000000e+00   5.7000000e+00   4.2000000e+00   5.0000000e-01   3.0000000e-01   9.0000000e-01   2.1000000e+00   3.0000000e-01   4.3000000e+00   1.7000000e+00   4.3000000e+00   2.8000000e+00   3.7000000e+00   5.5000000e+00   1.6000000e+00   4.5000000e+00   3.4000000e+00   5.6000000e+00   3.0000000e+00   2.5000000e+00   3.6000000e+00   1.8000000e+00   2.3000000e+00   3.4000000e+00   3.0000000e+00   6.6000000e+00   5.9000000e+00   1.9000000e+00   4.3000000e+00   1.5000000e+00   5.4000000e+00   1.9000000e+00   4.0000000e+00   4.4000000e+00   1.8000000e+00   2.0000000e+00   3.1000000e+00   3.8000000e+00   4.4000000e+00   6.3000000e+00   3.2000000e+00   1.9000000e+00   2.1000000e+00   5.3000000e+00   3.9000000e+00   3.0000000e+00   1.8000000e+00   3.7000000e+00   4.0000000e+00   3.6000000e+00   1.7000000e+00   4.4000000e+00   4.4000000e+00   3.4000000e+00   2.3000000e+00   2.9000000e+00   3.5000000e+00   2.0000000e+00   2.0000000e-01   8.0000000e-01   2.4000000e+00   4.0000000e-01   4.0000000e+00   2.0000000e+00   4.0000000e+00   2.7000000e+00   3.4000000e+00   5.2000000e+00   2.1000000e+00   4.4000000e+00   3.7000000e+00   5.3000000e+00   2.7000000e+00   2.8000000e+00   3.3000000e+00   2.1000000e+00   2.4000000e+00   3.1000000e+00   2.7000000e+00   6.3000000e+00   6.2000000e+00   2.2000000e+00   4.0000000e+00   1.8000000e+00   5.5000000e+00   2.2000000e+00   3.7000000e+00   4.1000000e+00   1.9000000e+00   1.7000000e+00   3.2000000e+00   3.5000000e+00   4.5000000e+00   6.0000000e+00   3.3000000e+00   2.0000000e+00   2.4000000e+00   5.0000000e+00   3.6000000e+00   2.7000000e+00   1.5000000e+00   3.4000000e+00   3.7000000e+00   3.3000000e+00   2.0000000e+00   4.1000000e+00   4.1000000e+00   3.1000000e+00   2.6000000e+00   2.6000000e+00   3.2000000e+00   1.7000000e+00   6.0000000e-01   2.4000000e+00   2.0000000e-01   4.0000000e+00   1.8000000e+00   4.0000000e+00   2.5000000e+00   3.4000000e+00   5.2000000e+00   1.9000000e+00   4.2000000e+00   3.5000000e+00   5.3000000e+00   2.7000000e+00   2.6000000e+00   3.3000000e+00   1.9000000e+00   2.2000000e+00   3.1000000e+00   2.7000000e+00   6.3000000e+00   6.0000000e+00   2.0000000e+00   4.0000000e+00   1.6000000e+00   5.3000000e+00   2.0000000e+00   3.7000000e+00   4.1000000e+00   1.7000000e+00   1.7000000e+00   3.0000000e+00   3.5000000e+00   4.3000000e+00   6.0000000e+00   3.1000000e+00   1.8000000e+00   2.2000000e+00   5.0000000e+00   3.6000000e+00   2.7000000e+00   1.5000000e+00   3.4000000e+00   3.7000000e+00   3.3000000e+00   1.8000000e+00   4.1000000e+00   4.1000000e+00   3.1000000e+00   2.4000000e+00   2.6000000e+00   3.2000000e+00   1.7000000e+00   3.0000000e+00   8.0000000e-01   3.4000000e+00   2.0000000e+00   3.4000000e+00   1.9000000e+00   2.8000000e+00   4.6000000e+00   2.3000000e+00   3.6000000e+00   2.9000000e+00   4.7000000e+00   2.1000000e+00   2.0000000e+00   2.7000000e+00   2.3000000e+00   2.4000000e+00   2.5000000e+00   2.1000000e+00   5.7000000e+00   5.4000000e+00   1.8000000e+00   3.4000000e+00   2.0000000e+00   4.7000000e+00   1.4000000e+00   3.1000000e+00   3.5000000e+00   1.1000000e+00   1.3000000e+00   2.4000000e+00   2.9000000e+00   3.7000000e+00   5.4000000e+00   2.5000000e+00   1.2000000e+00   1.8000000e+00   4.4000000e+00   3.0000000e+00   2.1000000e+00   1.3000000e+00   2.8000000e+00   3.1000000e+00   2.7000000e+00   2.0000000e+00   3.5000000e+00   3.5000000e+00   2.5000000e+00   1.8000000e+00   2.0000000e+00   2.6000000e+00   1.7000000e+00   2.2000000e+00   6.4000000e+00   3.8000000e+00   6.4000000e+00   4.9000000e+00   5.8000000e+00   7.6000000e+00   2.3000000e+00   6.6000000e+00   5.1000000e+00   7.7000000e+00   5.1000000e+00   4.6000000e+00   5.7000000e+00   3.5000000e+00   4.4000000e+00   5.5000000e+00   5.1000000e+00   8.7000000e+00   7.8000000e+00   3.6000000e+00   6.4000000e+00   3.6000000e+00   7.5000000e+00   4.0000000e+00   6.1000000e+00   6.5000000e+00   3.9000000e+00   4.1000000e+00   5.2000000e+00   5.9000000e+00   6.5000000e+00   8.4000000e+00   5.3000000e+00   4.0000000e+00   4.0000000e+00   7.4000000e+00   6.0000000e+00   5.1000000e+00   3.9000000e+00   5.8000000e+00   6.1000000e+00   5.7000000e+00   3.8000000e+00   6.5000000e+00   6.5000000e+00   5.5000000e+00   4.0000000e+00   5.0000000e+00   5.6000000e+00   4.1000000e+00   4.2000000e+00   1.8000000e+00   4.2000000e+00   2.7000000e+00   3.6000000e+00   5.4000000e+00   1.9000000e+00   4.4000000e+00   3.5000000e+00   5.5000000e+00   2.9000000e+00   2.6000000e+00   3.5000000e+00   1.9000000e+00   2.2000000e+00   3.3000000e+00   2.9000000e+00   6.5000000e+00   6.0000000e+00   2.0000000e+00   4.2000000e+00   1.6000000e+00   5.3000000e+00   2.0000000e+00   3.9000000e+00   4.3000000e+00   1.7000000e+00   1.9000000e+00   3.0000000e+00   3.7000000e+00   4.3000000e+00   6.2000000e+00   3.1000000e+00   1.8000000e+00   2.2000000e+00   5.2000000e+00   3.8000000e+00   2.9000000e+00   1.7000000e+00   3.6000000e+00   3.9000000e+00   3.5000000e+00   1.8000000e+00   4.3000000e+00   4.3000000e+00   3.3000000e+00   2.4000000e+00   2.8000000e+00   3.4000000e+00   1.9000000e+00   2.6000000e+00   1.6000000e+00   1.5000000e+00   1.0000000e+00   2.6000000e+00   4.5000000e+00   2.4000000e+00   2.1000000e+00   1.3000000e+00   1.7000000e+00   2.0000000e+00   1.7000000e+00   2.9000000e+00   2.0000000e+00   1.1000000e+00   1.7000000e+00   2.9000000e+00   3.2000000e+00   3.4000000e+00   1.2000000e+00   2.8000000e+00   3.1000000e+00   2.4000000e+00   1.1000000e+00   1.7000000e+00   2.5000000e+00   2.3000000e+00   1.4000000e+00   2.3000000e+00   2.3000000e+00   3.0000000e+00   1.3000000e+00   2.4000000e+00   2.4000000e+00   2.0000000e+00   6.0000000e-01   1.5000000e+00   2.5000000e+00   1.8000000e+00   1.1000000e+00   1.9000000e+00   2.6000000e+00   9.0000000e-01   7.0000000e-01   1.7000000e+00   2.4000000e+00   1.8000000e+00   1.0000000e+00   2.3000000e+00   2.6000000e+00   1.3000000e+00   2.0000000e+00   3.8000000e+00   1.9000000e+00   3.0000000e+00   1.9000000e+00   3.9000000e+00   1.3000000e+00   8.0000000e-01   1.9000000e+00   5.0000000e-01   6.0000000e-01   1.7000000e+00   1.5000000e+00   4.9000000e+00   4.2000000e+00   1.2000000e+00   2.6000000e+00   6.0000000e-01   3.7000000e+00   8.0000000e-01   2.3000000e+00   2.9000000e+00   9.0000000e-01   9.0000000e-01   1.4000000e+00   2.7000000e+00   2.7000000e+00   4.6000000e+00   1.5000000e+00   1.0000000e+00   1.4000000e+00   3.6000000e+00   2.2000000e+00   1.5000000e+00   9.0000000e-01   2.0000000e+00   2.3000000e+00   1.9000000e+00   0.0000000e+00   2.7000000e+00   2.7000000e+00   1.7000000e+00   8.0000000e-01   1.2000000e+00   1.8000000e+00   5.0000000e-01   1.5000000e+00   8.0000000e-01   1.2000000e+00   4.5000000e+00   1.0000000e+00   1.3000000e+00   1.3000000e+00   1.7000000e+00   1.8000000e+00   7.0000000e-01   2.9000000e+00   2.6000000e+00   1.7000000e+00   1.3000000e+00   2.3000000e+00   2.2000000e+00   3.4000000e+00   8.0000000e-01   2.8000000e+00   1.7000000e+00   2.4000000e+00   9.0000000e-01   7.0000000e-01   2.5000000e+00   2.3000000e+00   1.2000000e+00   7.0000000e-01   9.0000000e-01   2.2000000e+00   1.3000000e+00   2.4000000e+00   2.4000000e+00   1.0000000e+00   1.8000000e+00   1.5000000e+00   2.5000000e+00   8.0000000e-01   1.1000000e+00   1.3000000e+00   2.6000000e+00   7.0000000e-01   1.3000000e+00   1.3000000e+00   2.4000000e+00   1.4000000e+00   2.0000000e+00   2.3000000e+00   9.0000000e-01   2.7000000e+00   3.0000000e+00   1.7000000e+00   1.0000000e+00   2.8000000e+00   1.2000000e+00   7.0000000e-01   1.0000000e+00   1.8000000e+00   1.7000000e+00   1.2000000e+00   4.0000000e-01   3.8000000e+00   3.5000000e+00   1.9000000e+00   1.5000000e+00   1.7000000e+00   2.8000000e+00   9.0000000e-01   1.2000000e+00   1.6000000e+00   1.0000000e+00   1.0000000e+00   5.0000000e-01   1.4000000e+00   1.8000000e+00   3.5000000e+00   6.0000000e-01   9.0000000e-01   9.0000000e-01   2.5000000e+00   1.1000000e+00   4.0000000e-01   1.2000000e+00   1.3000000e+00   1.2000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   1.6000000e+00   1.4000000e+00   1.1000000e+00   9.0000000e-01   1.3000000e+00   1.0000000e+00   2.0000000e+00   3.9000000e+00   1.8000000e+00   1.1000000e+00   1.9000000e+00   1.1000000e+00   1.2000000e+00   7.0000000e-01   2.3000000e+00   1.8000000e+00   9.0000000e-01   7.0000000e-01   2.9000000e+00   2.8000000e+00   2.8000000e+00   8.0000000e-01   2.2000000e+00   2.5000000e+00   1.8000000e+00   7.0000000e-01   1.5000000e+00   1.9000000e+00   1.7000000e+00   6.0000000e-01   1.3000000e+00   1.7000000e+00   3.0000000e+00   5.0000000e-01   1.8000000e+00   1.8000000e+00   1.6000000e+00   1.0000000e+00   9.0000000e-01   1.9000000e+00   1.0000000e+00   7.0000000e-01   1.3000000e+00   2.0000000e+00   7.0000000e-01   9.0000000e-01   9.0000000e-01   1.8000000e+00   8.0000000e-01   1.2000000e+00   1.7000000e+00   5.7000000e+00   1.0000000e+00   2.5000000e+00   1.9000000e+00   2.9000000e+00   3.0000000e+00   1.9000000e+00   4.1000000e+00   3.8000000e+00   2.9000000e+00   2.5000000e+00   1.1000000e+00   1.0000000e+00   4.6000000e+00   2.0000000e+00   4.0000000e+00   5.0000000e-01   3.6000000e+00   2.1000000e+00   1.5000000e+00   3.7000000e+00   3.5000000e+00   2.4000000e+00   1.7000000e+00   1.1000000e+00   1.4000000e+00   2.5000000e+00   3.6000000e+00   3.6000000e+00   8.0000000e-01   3.0000000e+00   2.7000000e+00   3.7000000e+00   2.0000000e+00   2.3000000e+00   2.5000000e+00   3.8000000e+00   1.9000000e+00   2.5000000e+00   2.5000000e+00   3.6000000e+00   2.6000000e+00   3.2000000e+00   3.5000000e+00   4.7000000e+00   3.2000000e+00   5.8000000e+00   3.2000000e+00   2.7000000e+00   3.8000000e+00   1.6000000e+00   2.5000000e+00   3.6000000e+00   3.2000000e+00   6.8000000e+00   5.9000000e+00   2.1000000e+00   4.5000000e+00   1.7000000e+00   5.6000000e+00   2.1000000e+00   4.2000000e+00   4.6000000e+00   2.0000000e+00   2.2000000e+00   3.3000000e+00   4.2000000e+00   4.6000000e+00   6.5000000e+00   3.4000000e+00   2.5000000e+00   2.7000000e+00   5.5000000e+00   4.1000000e+00   3.2000000e+00   2.0000000e+00   3.9000000e+00   4.2000000e+00   3.8000000e+00   1.9000000e+00   4.6000000e+00   4.6000000e+00   3.6000000e+00   2.1000000e+00   3.1000000e+00   3.7000000e+00   2.2000000e+00   1.5000000e+00   1.7000000e+00   2.5000000e+00   2.2000000e+00   1.7000000e+00   3.5000000e+00   3.4000000e+00   2.7000000e+00   1.7000000e+00   2.1000000e+00   1.8000000e+00   3.6000000e+00   1.8000000e+00   3.4000000e+00   1.1000000e+00   2.6000000e+00   1.9000000e+00   7.0000000e-01   2.7000000e+00   2.7000000e+00   2.0000000e+00   9.0000000e-01   5.0000000e-01   1.8000000e+00   2.1000000e+00   2.6000000e+00   2.6000000e+00   1.2000000e+00   2.8000000e+00   1.9000000e+00   2.9000000e+00   1.8000000e+00   2.1000000e+00   2.3000000e+00   3.0000000e+00   1.7000000e+00   2.3000000e+00   2.3000000e+00   2.8000000e+00   2.2000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   1.8000000e+00   1.1000000e+00   1.2000000e+00   2.0000000e+00   2.5000000e+00   2.0000000e+00   1.0000000e+00   3.6000000e+00   2.7000000e+00   2.1000000e+00   1.5000000e+00   2.5000000e+00   2.4000000e+00   1.5000000e+00   1.2000000e+00   1.4000000e+00   1.8000000e+00   2.0000000e+00   1.1000000e+00   1.2000000e+00   1.4000000e+00   3.3000000e+00   1.2000000e+00   1.7000000e+00   1.3000000e+00   2.3000000e+00   2.1000000e+00   1.2000000e+00   2.2000000e+00   1.5000000e+00   1.4000000e+00   2.0000000e+00   1.9000000e+00   1.4000000e+00   1.6000000e+00   1.6000000e+00   1.3000000e+00   1.5000000e+00   2.3000000e+00   2.0000000e+00   2.6000000e+00   3.1000000e+00   2.0000000e+00   4.2000000e+00   3.3000000e+00   2.2000000e+00   2.6000000e+00   1.6000000e+00   2.5000000e+00   4.7000000e+00   1.3000000e+00   4.1000000e+00   2.4000000e+00   3.7000000e+00   1.6000000e+00   1.2000000e+00   3.8000000e+00   3.6000000e+00   2.5000000e+00   1.8000000e+00   1.6000000e+00   1.7000000e+00   2.4000000e+00   3.7000000e+00   3.7000000e+00   1.3000000e+00   1.7000000e+00   2.6000000e+00   3.8000000e+00   1.9000000e+00   1.6000000e+00   2.0000000e+00   3.9000000e+00   1.2000000e+00   1.2000000e+00   2.2000000e+00   3.7000000e+00   2.7000000e+00   2.1000000e+00   3.6000000e+00   9.0000000e-01   1.0000000e+00   1.6000000e+00   1.5000000e+00   6.0000000e-01   8.0000000e-01   3.6000000e+00   3.9000000e+00   2.1000000e+00   1.3000000e+00   1.5000000e+00   3.2000000e+00   1.1000000e+00   1.0000000e+00   1.8000000e+00   1.2000000e+00   1.0000000e+00   1.1000000e+00   2.0000000e+00   2.4000000e+00   3.3000000e+00   1.2000000e+00   1.1000000e+00   2.1000000e+00   2.7000000e+00   1.3000000e+00   8.0000000e-01   1.2000000e+00   9.0000000e-01   1.2000000e+00   8.0000000e-01   1.3000000e+00   1.4000000e+00   1.4000000e+00   8.0000000e-01   1.1000000e+00   3.0000000e-01   1.1000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   1.4000000e+00   9.0000000e-01   7.0000000e-01   4.1000000e+00   3.4000000e+00   1.6000000e+00   1.8000000e+00   1.4000000e+00   2.9000000e+00   6.0000000e-01   1.5000000e+00   2.1000000e+00   9.0000000e-01   1.1000000e+00   6.0000000e-01   1.9000000e+00   1.9000000e+00   3.8000000e+00   7.0000000e-01   8.0000000e-01   1.2000000e+00   2.8000000e+00   1.6000000e+00   7.0000000e-01   1.3000000e+00   1.2000000e+00   1.5000000e+00   1.5000000e+00   8.0000000e-01   1.9000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   6.0000000e-01   1.4000000e+00   1.1000000e+00   2.2000000e+00   1.9000000e+00   1.0000000e+00   6.0000000e-01   3.0000000e+00   2.9000000e+00   2.7000000e+00   7.0000000e-01   2.1000000e+00   2.4000000e+00   1.7000000e+00   6.0000000e-01   1.4000000e+00   1.8000000e+00   1.6000000e+00   7.0000000e-01   1.2000000e+00   1.6000000e+00   2.9000000e+00   8.0000000e-01   1.7000000e+00   1.9000000e+00   1.7000000e+00   1.3000000e+00   8.0000000e-01   1.8000000e+00   3.0000000e-01   6.0000000e-01   8.0000000e-01   1.9000000e+00   8.0000000e-01   1.0000000e+00   6.0000000e-01   1.7000000e+00   7.0000000e-01   1.3000000e+00   1.6000000e+00   9.0000000e-01   2.0000000e+00   2.0000000e+00   5.2000000e+00   4.3000000e+00   1.1000000e+00   2.9000000e+00   5.0000000e-01   4.0000000e+00   1.1000000e+00   2.6000000e+00   3.4000000e+00   1.2000000e+00   1.2000000e+00   1.7000000e+00   3.2000000e+00   3.2000000e+00   4.9000000e+00   1.8000000e+00   1.5000000e+00   1.7000000e+00   3.9000000e+00   2.5000000e+00   2.0000000e+00   1.2000000e+00   2.3000000e+00   2.6000000e+00   2.2000000e+00   5.0000000e-01   3.0000000e+00   3.0000000e+00   2.0000000e+00   7.0000000e-01   1.5000000e+00   2.1000000e+00   1.0000000e+00   1.3000000e+00   1.9000000e+00   4.7000000e+00   4.0000000e+00   1.8000000e+00   2.2000000e+00   8.0000000e-01   3.9000000e+00   1.4000000e+00   2.3000000e+00   3.3000000e+00   1.3000000e+00   1.3000000e+00   1.4000000e+00   3.1000000e+00   3.1000000e+00   4.8000000e+00   1.3000000e+00   1.4000000e+00   2.0000000e+00   3.2000000e+00   1.6000000e+00   1.9000000e+00   1.3000000e+00   2.0000000e+00   1.7000000e+00   1.5000000e+00   6.0000000e-01   2.3000000e+00   2.1000000e+00   1.3000000e+00   1.4000000e+00   1.4000000e+00   1.4000000e+00   9.0000000e-01   1.0000000e+00   3.4000000e+00   3.5000000e+00   2.5000000e+00   9.0000000e-01   1.9000000e+00   3.4000000e+00   1.5000000e+00   1.0000000e+00   2.0000000e+00   1.6000000e+00   1.4000000e+00   9.0000000e-01   2.2000000e+00   2.6000000e+00   3.5000000e+00   8.0000000e-01   1.5000000e+00   2.1000000e+00   2.3000000e+00   7.0000000e-01   8.0000000e-01   1.6000000e+00   9.0000000e-01   8.0000000e-01   8.0000000e-01   1.7000000e+00   1.0000000e+00   1.0000000e+00   6.0000000e-01   1.5000000e+00   7.0000000e-01   5.0000000e-01   1.4000000e+00   3.6000000e+00   3.5000000e+00   2.1000000e+00   1.3000000e+00   1.9000000e+00   2.8000000e+00   1.1000000e+00   1.0000000e+00   1.4000000e+00   1.2000000e+00   1.0000000e+00   7.0000000e-01   1.2000000e+00   1.8000000e+00   3.3000000e+00   8.0000000e-01   1.1000000e+00   1.3000000e+00   2.3000000e+00   1.3000000e+00   2.0000000e-01   1.2000000e+00   9.0000000e-01   1.0000000e+00   1.4000000e+00   1.5000000e+00   1.4000000e+00   1.4000000e+00   1.0000000e+00   1.3000000e+00   5.0000000e-01   1.3000000e+00   1.0000000e+00   1.5000000e+00   5.7000000e+00   2.5000000e+00   5.1000000e+00   1.2000000e+00   4.7000000e+00   2.6000000e+00   2.2000000e+00   4.8000000e+00   4.6000000e+00   3.5000000e+00   2.8000000e+00   2.2000000e+00   7.0000000e-01   3.4000000e+00   4.7000000e+00   4.7000000e+00   1.5000000e+00   3.1000000e+00   3.6000000e+00   4.8000000e+00   2.9000000e+00   3.0000000e+00   3.2000000e+00   4.9000000e+00   2.4000000e+00   2.8000000e+00   3.4000000e+00   4.7000000e+00   3.7000000e+00   3.3000000e+00   4.6000000e+00   4.8000000e+00   2.6000000e+00   4.6000000e+00   7.0000000e-01   4.0000000e+00   3.1000000e+00   2.5000000e+00   4.3000000e+00   4.5000000e+00   3.0000000e+00   2.7000000e+00   1.7000000e+00   2.2000000e+00   2.9000000e+00   4.2000000e+00   3.8000000e+00   1.2000000e+00   3.6000000e+00   3.7000000e+00   4.7000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   4.2000000e+00   2.5000000e+00   3.1000000e+00   3.1000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.5000000e+00   3.4000000e+00   1.6000000e+00   4.5000000e+00   1.2000000e+00   3.1000000e+00   3.5000000e+00   1.3000000e+00   1.3000000e+00   2.2000000e+00   2.9000000e+00   3.5000000e+00   5.4000000e+00   2.3000000e+00   1.0000000e+00   1.2000000e+00   4.4000000e+00   3.0000000e+00   2.1000000e+00   1.3000000e+00   2.8000000e+00   3.1000000e+00   2.7000000e+00   1.2000000e+00   3.5000000e+00   3.5000000e+00   2.5000000e+00   1.0000000e+00   2.0000000e+00   2.6000000e+00   1.3000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   5.0000000e-01   1.1000000e+00   2.5000000e+00   2.3000000e+00   1.2000000e+00   1.3000000e+00   1.7000000e+00   2.6000000e+00   1.1000000e+00   2.4000000e+00   2.4000000e+00   1.4000000e+00   1.0000000e+00   1.3000000e+00   2.5000000e+00   6.0000000e-01   5.0000000e-01   7.0000000e-01   2.6000000e+00   3.0000000e-01   5.0000000e-01   9.0000000e-01   2.4000000e+00   1.4000000e+00   1.2000000e+00   2.3000000e+00   3.9000000e+00   1.0000000e+00   2.5000000e+00   3.3000000e+00   9.0000000e-01   9.0000000e-01   1.6000000e+00   3.1000000e+00   3.1000000e+00   4.8000000e+00   1.7000000e+00   1.4000000e+00   2.0000000e+00   3.8000000e+00   2.4000000e+00   1.9000000e+00   9.0000000e-01   2.2000000e+00   2.5000000e+00   2.1000000e+00   6.0000000e-01   2.9000000e+00   2.9000000e+00   1.9000000e+00   1.2000000e+00   1.4000000e+00   2.0000000e+00   9.0000000e-01   3.5000000e+00   2.6000000e+00   1.8000000e+00   3.6000000e+00   3.8000000e+00   2.5000000e+00   2.0000000e+00   1.0000000e+00   1.5000000e+00   2.6000000e+00   3.5000000e+00   3.5000000e+00   1.1000000e+00   3.5000000e+00   3.0000000e+00   4.0000000e+00   2.5000000e+00   2.8000000e+00   3.0000000e+00   3.7000000e+00   2.4000000e+00   3.0000000e+00   3.0000000e+00   3.5000000e+00   2.9000000e+00   3.7000000e+00   3.8000000e+00   2.1000000e+00   2.5000000e+00   3.0000000e-01   5.0000000e-01   1.2000000e+00   2.3000000e+00   2.5000000e+00   4.4000000e+00   1.3000000e+00   6.0000000e-01   1.4000000e+00   3.4000000e+00   2.0000000e+00   1.1000000e+00   7.0000000e-01   1.8000000e+00   2.1000000e+00   1.7000000e+00   8.0000000e-01   2.5000000e+00   2.5000000e+00   1.5000000e+00   4.0000000e-01   1.0000000e+00   1.8000000e+00   9.0000000e-01   1.2000000e+00   2.2000000e+00   2.0000000e+00   9.0000000e-01   1.4000000e+00   1.8000000e+00   2.5000000e+00   1.0000000e+00   2.1000000e+00   2.1000000e+00   1.9000000e+00   9.0000000e-01   1.0000000e+00   2.2000000e+00   7.0000000e-01   6.0000000e-01   1.2000000e+00   2.3000000e+00   6.0000000e-01   4.0000000e-01   1.0000000e+00   2.1000000e+00   1.1000000e+00   1.1000000e+00   2.0000000e+00   2.6000000e+00   2.4000000e+00   1.9000000e+00   6.0000000e-01   8.0000000e-01   1.9000000e+00   2.0000000e+00   2.5000000e+00   2.5000000e+00   1.3000000e+00   2.1000000e+00   1.4000000e+00   2.6000000e+00   1.3000000e+00   1.6000000e+00   1.8000000e+00   2.9000000e+00   1.0000000e+00   1.6000000e+00   2.0000000e+00   2.7000000e+00   1.9000000e+00   2.3000000e+00   2.4000000e+00   4.0000000e-01   1.3000000e+00   2.4000000e+00   2.6000000e+00   4.5000000e+00   1.4000000e+00   7.0000000e-01   1.5000000e+00   3.5000000e+00   2.1000000e+00   1.2000000e+00   4.0000000e-01   1.9000000e+00   2.2000000e+00   1.8000000e+00   9.0000000e-01   2.6000000e+00   2.6000000e+00   1.6000000e+00   7.0000000e-01   1.1000000e+00   1.7000000e+00   8.0000000e-01   1.5000000e+00   2.2000000e+00   2.8000000e+00   4.3000000e+00   1.6000000e+00   9.0000000e-01   1.5000000e+00   3.3000000e+00   1.9000000e+00   1.0000000e+00   2.0000000e-01   1.7000000e+00   2.0000000e+00   1.6000000e+00   9.0000000e-01   2.4000000e+00   2.4000000e+00   1.4000000e+00   9.0000000e-01   9.0000000e-01   1.5000000e+00   4.0000000e-01   1.7000000e+00   1.7000000e+00   3.4000000e+00   1.0000000e-01   1.2000000e+00   1.2000000e+00   2.2000000e+00   1.0000000e+00   7.0000000e-01   1.7000000e+00   1.0000000e+00   9.0000000e-01   1.5000000e+00   1.4000000e+00   1.3000000e+00   1.3000000e+00   1.1000000e+00   1.2000000e+00   8.0000000e-01   1.2000000e+00   1.5000000e+00   1.0000000e+00   2.5000000e+00   1.8000000e+00   1.9000000e+00   1.9000000e+00   1.5000000e+00   2.3000000e+00   1.4000000e+00   2.4000000e+00   1.3000000e+00   1.6000000e+00   1.8000000e+00   2.7000000e+00   1.4000000e+00   1.8000000e+00   1.8000000e+00   2.5000000e+00   1.7000000e+00   2.5000000e+00   2.2000000e+00   1.9000000e+00   1.8000000e+00   2.5000000e+00   2.5000000e+00   9.0000000e-01   2.7000000e+00   2.0000000e+00   3.0000000e+00   1.7000000e+00   2.0000000e+00   2.2000000e+00   2.7000000e+00   1.6000000e+00   2.2000000e+00   2.2000000e+00   2.5000000e+00   2.1000000e+00   2.9000000e+00   2.8000000e+00   3.5000000e+00   4.4000000e+00   4.4000000e+00   1.6000000e+00   3.2000000e+00   3.3000000e+00   4.5000000e+00   2.8000000e+00   3.1000000e+00   3.3000000e+00   4.6000000e+00   2.5000000e+00   2.9000000e+00   3.5000000e+00   4.4000000e+00   3.4000000e+00   3.4000000e+00   4.3000000e+00   1.3000000e+00   1.3000000e+00   2.1000000e+00   9.0000000e-01   8.0000000e-01   1.8000000e+00   1.1000000e+00   8.0000000e-01   1.4000000e+00   1.5000000e+00   1.2000000e+00   1.2000000e+00   1.0000000e+00   1.3000000e+00   9.0000000e-01   1.1000000e+00   1.6000000e+00   1.0000000e+00   3.4000000e+00   2.0000000e+00   1.1000000e+00   1.1000000e+00   1.8000000e+00   2.1000000e+00   1.7000000e+00   1.0000000e+00   2.5000000e+00   2.5000000e+00   1.5000000e+00   8.0000000e-01   1.0000000e+00   1.8000000e+00   9.0000000e-01   3.4000000e+00   2.0000000e+00   1.3000000e+00   1.7000000e+00   2.2000000e+00   2.1000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   2.5000000e+00   2.3000000e+00   1.4000000e+00   1.8000000e+00   2.0000000e+00   1.5000000e+00   2.4000000e+00   2.5000000e+00   3.5000000e+00   1.8000000e+00   1.7000000e+00   1.9000000e+00   3.6000000e+00   1.3000000e+00   1.9000000e+00   1.9000000e+00   3.4000000e+00   2.4000000e+00   2.6000000e+00   3.3000000e+00   1.1000000e+00   2.1000000e+00   1.4000000e+00   7.0000000e-01   1.5000000e+00   2.2000000e+00   1.1000000e+00   7.0000000e-01   1.3000000e+00   2.0000000e+00   1.4000000e+00   4.0000000e-01   1.9000000e+00   1.2000000e+00   9.0000000e-01   1.0000000e+00   1.4000000e+00   1.5000000e+00   1.4000000e+00   1.4000000e+00   1.2000000e+00   1.3000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.9000000e+00   2.2000000e+00   1.8000000e+00   9.0000000e-01   2.6000000e+00   2.6000000e+00   1.6000000e+00   1.1000000e+00   1.1000000e+00   1.7000000e+00   4.0000000e-01   7.0000000e-01   5.0000000e-01   2.0000000e+00   9.0000000e-01   1.1000000e+00   7.0000000e-01   1.8000000e+00   8.0000000e-01   1.2000000e+00   1.7000000e+00   8.0000000e-01   2.3000000e+00   6.0000000e-01   4.0000000e-01   6.0000000e-01   2.1000000e+00   1.1000000e+00   1.1000000e+00   2.0000000e+00   1.9000000e+00   1.0000000e+00   1.2000000e+00   4.0000000e-01   1.7000000e+00   9.0000000e-01   1.3000000e+00   1.6000000e+00   2.7000000e+00   2.7000000e+00   1.7000000e+00   8.0000000e-01   1.2000000e+00   1.8000000e+00   5.0000000e-01   6.0000000e-01   1.0000000e+00   2.5000000e+00   1.5000000e+00   1.3000000e+00   2.4000000e+00   1.0000000e+00   2.5000000e+00   1.5000000e+00   1.1000000e+00   2.4000000e+00   1.5000000e+00   5.0000000e-01   1.1000000e+00   1.4000000e+00   1.0000000e+00   1.8000000e+00   1.1000000e+00   1.2000000e+00   9.0000000e-01   1.5000000e+00
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-cityblock-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-cityblock-ml.txt
    deleted file mode 100644
    index 8fb22e6220..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-cityblock-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   3.2420590e+01   3.3246607e+01   3.0526910e+01   3.5166573e+01   3.1868301e+01   3.6025002e+01   3.2513623e+01   3.6557796e+01   3.3752212e+01   3.4422130e+01   3.2526018e+01   3.2581161e+01   3.3743555e+01   3.6960777e+01   3.4225270e+01   3.2965308e+01   3.4591031e+01   3.4204203e+01   3.4678123e+01   3.5728720e+01   3.0830047e+01   3.1550681e+01   3.3304790e+01   3.2676753e+01   3.2742330e+01   3.1684556e+01   3.2830915e+01   3.2956614e+01   2.7365639e+01   3.3207307e+01   3.3420925e+01   3.4357941e+01   2.8280126e+01   3.4523458e+01   3.2705274e+01   3.2455891e+01   3.1636060e+01   3.1594957e+01   3.1805202e+01   3.3886574e+01   3.3438829e+01   3.3330030e+01   3.4168514e+01   3.0637353e+01   4.2149167e+01   3.6340559e+01   2.9315308e+01   3.5778314e+01   3.7693050e+01   3.2598714e+01   3.2990836e+01   3.4967659e+01   3.9748920e+01   3.6745043e+01   2.7117550e+01   3.6014760e+01   2.9367558e+01   3.3845350e+01   3.5477339e+01   3.1513372e+01   3.2517953e+01   2.4755097e+01   3.0229897e+01   3.4799343e+01   3.3371710e+01   2.9600910e+01   3.3275088e+01   3.3567110e+01   3.4527016e+01   3.4942320e+01   3.2359383e+01   3.2607100e+01   3.1467914e+01   2.9032039e+01   3.3122878e+01   2.8496709e+01   2.9908448e+01   2.9962886e+01   3.0345299e+01   3.1737613e+01   2.8551485e+01   3.2610551e+01   3.3082660e+01   3.3719298e+01   3.6434018e+01   3.6589278e+01   3.3889586e+01   3.8036774e+01   3.1483497e+01   3.4196794e+01   3.5154035e+01   3.5488608e+01   3.6143183e+01   3.3473491e+01   3.4686446e+01   2.8687495e+01   3.5725742e+01   3.0188298e+01   3.3084534e+01   3.3538519e+01   3.6226849e+01   2.9052099e+01   3.6032733e+01   3.0811503e+01   3.2616190e+01   3.3888566e+01   3.3074570e+01   2.9683515e+01   3.0600771e+01   3.4345247e+01   3.6983843e+01   3.3692824e+01   3.3762461e+01   3.4024582e+01   3.3698854e+01   3.1238613e+01   3.4978833e+01   3.4991078e+01   3.4577741e+01   3.3749227e+01   3.4982272e+01   3.0487868e+01   3.2317632e+01   3.1125588e+01   3.4413791e+01   3.1881871e+01   3.1373821e+01   3.0416864e+01   3.2066187e+01   3.1128313e+01   3.0240249e+01   3.0125198e+01   3.1343454e+01   3.5479092e+01   3.4450767e+01   3.2953507e+01   3.4456795e+01   3.0136375e+01   3.3462150e+01   2.9894274e+01   3.1367432e+01   3.2839320e+01   3.1440398e+01   2.9400374e+01   3.1106338e+01   3.1242624e+01   3.5537892e+01   3.3056459e+01   2.8610281e+01   3.4296217e+01   3.5819772e+01   3.2503922e+01   3.0963029e+01   3.4762112e+01   3.4796284e+01   2.9645345e+01   3.4468088e+01   2.6975590e+01   3.3738555e+01   2.8825009e+01   3.2663999e+01   3.2547878e+01   3.2308091e+01   3.2489966e+01   3.0868597e+01   3.2974220e+01   3.0866111e+01   3.8197342e+01   3.0609568e+01   3.5478978e+01   2.9249184e+01   3.6185622e+01   3.1948258e+01   3.2649719e+01   3.3305650e+01   3.4643955e+01   3.6566241e+01   3.4968484e+01   3.2632218e+01   3.6741383e+01   3.5700008e+01   3.1962468e+01   3.1410623e+01   3.0412061e+01   3.3749077e+01   3.5649661e+01   3.7649263e+01   3.2832574e+01   3.1783914e+01   2.8264292e+01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-correlation-ml-iris.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-correlation-ml-iris.txt
    deleted file mode 100644
    index f297500381..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-correlation-ml-iris.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   4.0013388e-03   2.6088954e-05   1.8315482e-03   6.5266850e-04   4.1394685e-04   1.1888069e-03   4.6185289e-04   1.9233577e-03   3.4480388e-03   1.5150632e-05   1.9126718e-03   3.0974734e-03   2.2295833e-04   2.4043394e-03   5.0134320e-03   3.0165570e-03   1.3145239e-04   6.0759419e-04   1.6672981e-03   4.0036132e-03   6.1375191e-04   8.5916540e-03   3.0212269e-03   8.6923503e-03   7.7875235e-03   5.1612907e-04   2.9662451e-04   6.2402983e-04   2.7278440e-03   4.0510347e-03   3.0027154e-03   6.2616145e-03   4.1342211e-03   3.4480388e-03   1.5822510e-03   1.7143312e-03   3.4480388e-03   2.2462074e-04   6.1048465e-04   6.5190641e-04   2.4247873e-02   9.0785596e-04   2.1652052e-04   3.4845573e-03   3.2507646e-03   2.3346511e-03   4.0773355e-04   1.1278223e-04   5.0819669e-04   2.1340893e-01   2.1253858e-01   2.5193073e-01   2.9479565e-01   2.6774348e-01   2.8869785e-01   2.3348217e-01   1.9273490e-01   2.4443270e-01   2.4320510e-01   2.7679421e-01   2.1672263e-01   2.6813840e-01   2.8435705e-01   1.5561363e-01   2.0057173e-01   2.7812139e-01   2.2900256e-01   3.4724680e-01   2.3882260e-01   2.9132931e-01   2.0333645e-01   3.5307051e-01   2.8812452e-01   2.1722530e-01   2.1423111e-01   2.7396952e-01   2.9207940e-01   2.6626182e-01   1.7106032e-01   2.4279706e-01   2.2559055e-01   2.0940857e-01   3.8432412e-01   2.9354670e-01   2.0829958e-01   2.3669414e-01   3.0463326e-01   2.1035851e-01   2.6623117e-01   3.0835417e-01   2.5871089e-01   2.3465249e-01   2.0319416e-01   2.6292582e-01   2.1771735e-01   2.3212816e-01   2.2399387e-01   1.3799316e-01   2.3049526e-01   4.8512087e-01   4.2535066e-01   4.0184471e-01   4.1903049e-01   4.4627199e-01   4.4692268e-01   4.3569888e-01   4.2673251e-01   4.5731950e-01   3.7438176e-01   3.0619251e-01   4.0039114e-01   3.7245195e-01   4.5829878e-01   4.5814844e-01   3.6107062e-01   3.7600936e-01   3.7662883e-01   5.2492832e-01   4.2684428e-01   3.7975064e-01   4.0636707e-01   4.6364339e-01   3.4607190e-01   3.6988036e-01   3.6764668e-01   3.2524634e-01   3.1943549e-01   4.4481193e-01   3.5496498e-01   4.1356534e-01   3.2082320e-01   4.5322964e-01   3.4300770e-01   4.4485158e-01   3.9755578e-01   3.9702418e-01   3.7202285e-01   3.1131344e-01   3.4018064e-01   4.0217537e-01   3.1441868e-01   4.2535066e-01   4.1533176e-01   3.9695242e-01   3.5313531e-01   3.9400199e-01   3.4652657e-01   3.6608320e-01   3.6684161e-01   3.3929143e-03   2.6033698e-03   7.7673212e-03   6.4081099e-03   9.2794464e-03   2.8819447e-03   1.4536586e-03   9.6714455e-04   3.7992387e-03   5.9342609e-03   3.9974031e-04   6.0694735e-03   9.1304628e-03   1.7655983e-02   1.1643899e-02   4.0363794e-03   1.6463709e-03   1.0706739e-02   6.7984475e-04   7.6845878e-03   2.3516587e-02   9.9502337e-05   1.0315881e-02   1.0821735e-03   1.8887942e-03   2.4624674e-03   1.5760536e-03   3.6638868e-03   1.6253664e-03   7.8762517e-04   1.9487010e-02   1.6211862e-02   9.6714455e-04   2.2382105e-03   2.1712385e-03   9.6714455e-04   2.9674185e-03   1.9068589e-03   6.4555509e-03   8.8254342e-03   8.1777355e-03   3.4663084e-03   9.6481454e-03   9.7747764e-05   1.0706793e-02   4.2246850e-03   4.9836128e-03   1.6613867e-03   1.6856078e-01   1.6930583e-01   2.0381801e-01   2.4171317e-01   2.1689289e-01   2.4212069e-01   1.9027913e-01   1.5127382e-01   1.9696970e-01   1.9830901e-01   2.2503195e-01   1.7290786e-01   2.1618942e-01   2.3648275e-01   1.1720113e-01   1.5636322e-01   2.3357633e-01   1.8548772e-01   2.8791738e-01   1.9215793e-01   2.4470933e-01   1.5850128e-01   2.9662484e-01   2.4061109e-01   1.7172858e-01   1.6853658e-01   2.2283143e-01   2.4032537e-01   2.1849821e-01   1.2975740e-01   1.9502432e-01   1.7953012e-01   1.6504306e-01   3.2966120e-01   2.4985160e-01   1.6946778e-01   1.8991741e-01   2.4919698e-01   1.7046257e-01   2.1691882e-01   2.6018338e-01   2.1310883e-01   1.8776864e-01   1.5909082e-01   2.1620321e-01   1.7782256e-01   1.8911127e-01   1.7894094e-01   9.9649433e-02   1.8605378e-01   4.2702260e-01   3.6742642e-01   3.4284735e-01   3.6351462e-01   3.8703088e-01   3.8643219e-01   3.8033312e-01   3.6849688e-01   3.9561383e-01   3.1894438e-01   2.5437467e-01   3.4129756e-01   3.1464030e-01   3.9617829e-01   3.9513061e-01   3.0506617e-01   3.2181902e-01   3.2465822e-01   4.5849285e-01   3.6615509e-01   3.2201598e-01   3.4969244e-01   4.0175045e-01   2.8934891e-01   3.1614551e-01   3.1385643e-01   2.7059114e-01   2.6798818e-01   3.8429703e-01   3.0087709e-01   3.5349775e-01   2.7097983e-01   3.9156246e-01   2.9045741e-01   3.8972428e-01   3.3603762e-01   3.4254663e-01   3.1960575e-01   2.6050327e-01   2.8406500e-01   3.4225008e-01   2.5735736e-01   3.6742642e-01   3.5724408e-01   3.3861338e-01   2.9412113e-01   3.3288556e-01   2.9101723e-01   3.1374321e-01   3.1516519e-01   1.6665208e-03   9.3886805e-04   6.2270349e-04   1.5623211e-03   3.9549141e-04   1.6439076e-03   3.0144044e-03   3.0583810e-05   2.0234943e-03   2.6246966e-03   3.8983492e-04   2.5645160e-03   5.6944285e-03   3.3339055e-03   1.2831328e-04   4.1302346e-04   2.1101667e-03   3.4972086e-03   8.7482704e-04   9.4271115e-03   2.5080125e-03   8.7042936e-03   7.0125369e-03   3.5088415e-04   1.9451019e-04   3.9574419e-04   2.5986219e-03   3.6402032e-03   2.4900748e-03   7.0784673e-03   4.7935149e-03   3.0144044e-03   1.2869381e-03   1.4017897e-03   3.0144044e-03   1.7197161e-04   4.4525534e-04   7.8138074e-04   2.2693053e-02   1.2229281e-03   1.5754704e-04   3.7899670e-03   2.6957902e-03   2.7721274e-03   4.5733371e-04   2.2324575e-04   3.1003560e-04   2.1002397e-01   2.0931874e-01   2.4834042e-01   2.9081912e-01   2.6391644e-01   2.8536997e-01   2.3033428e-01   1.8962461e-01   2.4088505e-01   2.3991724e-01   2.7290008e-01   2.1345771e-01   2.6419304e-01   2.8088883e-01   1.5266518e-01   1.9720348e-01   2.7496549e-01   2.2580939e-01   3.4275207e-01   2.3533918e-01   2.8800391e-01   1.9991219e-01   3.4890690e-01   2.8470246e-01   2.1378625e-01   2.1075909e-01   2.7013290e-01   2.8823552e-01   2.6275308e-01   1.6787442e-01   2.3921107e-01   2.2212337e-01   2.0605918e-01   3.8041788e-01   2.9051019e-01   2.0550537e-01   2.3319159e-01   3.0043218e-01   2.0746652e-01   2.6256228e-01   3.0491843e-01   2.5539836e-01   2.3113139e-01   1.9984797e-01   2.5951290e-01   2.1484809e-01   2.2899468e-01   2.2062653e-01   1.3495710e-01   2.2721304e-01   4.8106467e-01   4.2120214e-01   3.9753504e-01   4.1511036e-01   4.4203186e-01   4.4255637e-01   4.3182495e-01   4.2255527e-01   4.5284888e-01   3.7037516e-01   3.0238339e-01   3.9606802e-01   3.6819494e-01   4.5378706e-01   4.5354234e-01   3.5697379e-01   3.7213234e-01   3.7297305e-01   5.2009409e-01   4.2241474e-01   3.7552000e-01   4.0230555e-01   4.5916611e-01   3.4185974e-01   3.6603540e-01   3.6379117e-01   3.2119479e-01   3.1570003e-01   4.4043880e-01   3.5104995e-01   4.0917086e-01   3.1725229e-01   4.4875464e-01   3.3921954e-01   4.4101743e-01   3.9296628e-01   3.9316311e-01   3.6831351e-01   3.0762140e-01   3.3601664e-01   3.9776902e-01   3.1006892e-01   4.2120214e-01   4.1114584e-01   3.9269988e-01   3.4869458e-01   3.8944692e-01   3.4244384e-01   3.6236872e-01   3.6319420e-01   3.2811792e-03   2.1674206e-03   3.8606330e-03   4.5444049e-04   1.6669051e-04   6.9315236e-04   1.5191179e-03   7.4896915e-04   1.0486334e-03   3.0115188e-03   8.3553530e-03   1.1528814e-02   9.5172421e-03   2.7099731e-03   7.1677618e-04   4.9455548e-03   1.1260396e-03   4.0113810e-03   1.7041109e-02   1.7048436e-03   3.2998306e-03   3.5839458e-03   6.5708756e-04   7.5073414e-04   1.6739794e-03   1.4404874e-04   6.5489426e-04   3.9918560e-03   9.7678136e-03   9.5698494e-03   6.9315236e-04   4.1051921e-03   4.2098821e-03   6.9315236e-04   7.7852178e-04   5.0066998e-04   4.6641147e-03   1.9877450e-02   2.9999880e-03   2.6696154e-03   2.3124511e-03   2.6940762e-03   3.6188953e-03   7.8131154e-04   1.7395433e-03   1.1236329e-03   1.8068407e-01   1.7911784e-01   2.1632614e-01   2.5732132e-01   2.3189913e-01   2.4898678e-01   1.9775994e-01   1.6095697e-01   2.0933838e-01   2.0710048e-01   2.4048237e-01   1.8306865e-01   2.3298507e-01   2.4543409e-01   1.2760114e-01   1.6921167e-01   2.3873595e-01   1.9385405e-01   3.0859675e-01   2.0396213e-01   2.5141439e-01   1.7197884e-01   3.1193260e-01   2.4875015e-01   1.8437187e-01   1.8189435e-01   2.3759181e-01   2.5412614e-01   2.2898904e-01   1.4232827e-01   2.0806035e-01   1.9199138e-01   1.7693745e-01   3.4002004e-01   2.5277786e-01   1.7384453e-01   2.0213849e-01   2.6767804e-01   1.7597754e-01   2.2968366e-01   2.6752309e-01   2.2134041e-01   2.0039734e-01   1.7140536e-01   2.2556075e-01   1.8258974e-01   1.9648053e-01   1.9006393e-01   1.1321036e-01   1.9554999e-01   4.3600761e-01   3.7954719e-01   3.5812457e-01   3.7278397e-01   3.9968201e-01   4.0081518e-01   3.8843945e-01   3.8096257e-01   4.1111272e-01   3.3107441e-01   2.6691137e-01   3.5682832e-01   3.3041468e-01   4.1223120e-01   4.1255773e-01   3.1903974e-01   3.3211211e-01   3.3199965e-01   4.7707633e-01   3.8217339e-01   3.3708398e-01   3.6131644e-01   4.1712805e-01   3.0571403e-01   3.2625308e-01   3.2419755e-01   2.8563873e-01   2.7883266e-01   3.9884997e-01   3.1256889e-01   3.6952768e-01   2.7953122e-01   4.0726631e-01   3.0093515e-01   3.9702583e-01   3.5566883e-01   3.5181973e-01   3.2782384e-01   2.7114516e-01   3.0000941e-01   3.5891976e-01   2.7762255e-01   3.7954719e-01   3.7024637e-01   3.5327118e-01   3.1362016e-01   3.5214869e-01   3.0546169e-01   3.2226324e-01   3.2277503e-01   1.1668032e-04   8.6044327e-05   1.4968429e-03   3.9691382e-03   6.2388252e-03   7.0588028e-04   1.9691519e-03   6.1279520e-03   1.9660913e-04   2.5761274e-03   2.5168387e-03   2.4029967e-03   9.7429318e-04   2.3122381e-03   2.3682626e-04   7.1643085e-03   1.3128642e-04   5.3939078e-03   6.2992904e-03   9.0353935e-03   1.2266741e-02   2.0706893e-03   1.5408774e-03   2.5522607e-03   3.9522692e-03   6.6899152e-03   6.3980861e-03   2.9039306e-03   1.6942568e-03   6.2388252e-03   3.9336207e-03   4.1533642e-03   6.2388252e-03   1.2180733e-03   2.1518176e-03   8.8270219e-04   3.2743785e-02   7.7572782e-05   1.3417853e-03   2.5322339e-03   6.7844678e-03   8.2761566e-04   8.6236157e-04   3.1792685e-04   2.2579028e-03   2.2976852e-01   2.2800773e-01   2.6917639e-01   3.1391346e-01   2.8619117e-01   3.0434544e-01   2.4844431e-01   2.0773635e-01   2.6148944e-01   2.5886513e-01   2.9555853e-01   2.3241008e-01   2.8723433e-01   3.0077518e-01   1.6999944e-01   2.1692490e-01   2.9290302e-01   2.4423212e-01   3.6894940e-01   2.5556289e-01   3.0695160e-01   2.1997775e-01   3.7292526e-01   3.0427758e-01   2.3385552e-01   2.3106102e-01   2.9243468e-01   3.1048744e-01   2.8298832e-01   1.8662712e-01   2.6007204e-01   2.4232174e-01   2.2560003e-01   4.0265980e-01   3.0762748e-01   2.2151447e-01   2.5355085e-01   3.2493368e-01   2.2408199e-01   2.8382455e-01   3.2448802e-01   2.7442187e-01   2.5162228e-01   2.1940897e-01   2.7915380e-01   2.3127910e-01   2.4702051e-01   2.4019206e-01   1.5301315e-01   2.4619533e-01   5.0391216e-01   4.4483392e-01   4.2228707e-01   4.3731273e-01   4.6617394e-01   4.6750253e-01   4.5367757e-01   4.4636521e-01   4.7842690e-01   3.9329580e-01   3.2434154e-01   4.2091234e-01   3.9272980e-01   4.7962551e-01   4.7998875e-01   3.8052757e-01   3.9422077e-01   3.9365636e-01   5.4778535e-01   4.4784024e-01   3.9985415e-01   4.2545240e-01   4.8476479e-01   3.6622351e-01   3.8794414e-01   3.8577592e-01   3.4462011e-01   3.3712507e-01   4.6543611e-01   3.7346604e-01   4.3442200e-01   3.3762412e-01   4.7437523e-01   3.6087712e-01   4.6258851e-01   4.1954525e-01   4.1507031e-01   3.8935328e-01   3.2880662e-01   3.6009673e-01   4.2314218e-01   3.3549029e-01   4.4483392e-01   4.3505302e-01   4.1710447e-01   3.7450914e-01   4.1581746e-01   3.6597096e-01   3.8346411e-01   3.8386195e-01   2.7739415e-04   8.2117467e-04   2.7843462e-03   4.7394226e-03   3.9365385e-04   1.1964598e-03   4.7400628e-03   2.5527396e-04   3.2634446e-03   3.7103657e-03   3.3188195e-03   8.6302611e-04   1.5635411e-03   6.0189508e-04   5.5859876e-03   3.8282951e-04   7.0925635e-03   5.0273924e-03   7.3470160e-03   1.0223636e-02   1.3503463e-03   9.3049535e-04   1.9663208e-03   2.7155903e-03   5.0798223e-03   5.5875952e-03   3.5987384e-03   2.6550151e-03   4.7394226e-03   3.5923878e-03   3.7937786e-03   4.7394226e-03   6.6480476e-04   1.3814035e-03   1.1581699e-03   3.0091048e-02   1.0888067e-04   1.1634967e-03   1.9052023e-03   5.6332034e-03   7.9034466e-04   3.5005887e-04   1.0179107e-04   1.6076683e-03   2.2018795e-01   2.1841167e-01   2.5888963e-01   3.0298911e-01   2.7568827e-01   2.9345793e-01   2.3845526e-01   1.9853879e-01   2.5133209e-01   2.4870042e-01   2.8491736e-01   2.2273608e-01   2.7678086e-01   2.8993699e-01   1.6165333e-01   2.0762176e-01   2.8220337e-01   2.3432093e-01   3.5743296e-01   2.4549712e-01   2.9602684e-01   2.1063470e-01   3.6116406e-01   2.9338620e-01   2.2421094e-01   2.2149308e-01   2.8182182e-01   2.9956540e-01   2.7243830e-01   1.7796817e-01   2.4995634e-01   2.3251210e-01   2.1609445e-01   3.9047864e-01   2.9675065e-01   2.1202908e-01   2.4353030e-01   3.1395037e-01   2.1453916e-01   2.7329907e-01   3.1330772e-01   2.6399518e-01   2.4164659e-01   2.1003834e-01   2.6865531e-01   2.2160867e-01   2.3705638e-01   2.3039039e-01   1.4523591e-01   2.3625874e-01   4.9071957e-01   4.3219633e-01   4.0993016e-01   4.2475440e-01   4.5332327e-01   4.5465382e-01   4.4096188e-01   4.3371359e-01   4.6548638e-01   3.8123224e-01   3.1318896e-01   4.0857587e-01   3.8073064e-01   4.6668253e-01   4.6707002e-01   3.6864305e-01   3.8213773e-01   3.8159377e-01   5.3428461e-01   4.3521869e-01   3.8775353e-01   4.1301893e-01   4.7176152e-01   3.5457739e-01   3.7593537e-01   3.7379344e-01   3.3323181e-01   3.2577190e-01   4.5261005e-01   3.6164142e-01   4.2194514e-01   3.2625778e-01   4.6147754e-01   3.4920516e-01   4.4979521e-01   4.0734174e-01   4.0275102e-01   3.7733353e-01   3.1756791e-01   3.4852048e-01   4.1080565e-01   3.2443308e-01   4.3219633e-01   4.2252463e-01   4.0479526e-01   3.6286403e-01   4.0364435e-01   3.5428112e-01   3.7151258e-01   3.7191203e-01   2.0478784e-03   4.7860280e-03   7.2727783e-03   1.2329906e-03   2.0550268e-03   7.3066158e-03   5.3810576e-04   3.2245377e-03   2.1674888e-03   2.7856851e-03   1.6387773e-03   3.1323423e-03   6.7307381e-05   8.3332066e-03   3.3954200e-04   4.9119026e-03   7.6276175e-03   8.9240504e-03   1.3851706e-02   2.8347122e-03   2.2069601e-03   3.5278340e-03   4.3892066e-03   7.6179982e-03   7.9398397e-03   2.0003115e-03   1.2885024e-03   7.2727783e-03   5.1809110e-03   5.4327573e-03   7.2727783e-03   1.7938257e-03   2.8924302e-03   1.4132511e-03   3.5877316e-02   5.5425651e-05   2.1070391e-03   2.2246722e-03   8.2675293e-03   4.8309816e-04   1.2152730e-03   6.6498554e-04   3.1323983e-03   2.3387083e-01   2.3171300e-01   2.7340823e-01   3.1873143e-01   2.9087099e-01   3.0765106e-01   2.5178166e-01   2.1138210e-01   2.6568600e-01   2.6244481e-01   3.0032621e-01   2.3618270e-01   2.9221666e-01   3.0443867e-01   1.7368514e-01   2.2112593e-01   2.9589691e-01   2.4771596e-01   3.7467992e-01   2.5965416e-01   3.1023215e-01   2.2429067e-01   3.7775475e-01   3.0780450e-01   2.3805322e-01   2.3537447e-01   2.9708156e-01   3.1499475e-01   2.8689460e-01   1.9071775e-01   2.6437980e-01   2.4650321e-01   2.2965596e-01   4.0666307e-01   3.1024566e-01   2.2426781e-01   2.5770992e-01   3.3024819e-01   2.2703835e-01   2.8812097e-01   3.2789845e-01   2.7792695e-01   2.5584753e-01   2.2352457e-01   2.8285984e-01   2.3411709e-01   2.5033634e-01   2.4414228e-01   1.5718330e-01   2.4987685e-01   5.0772777e-01   4.4916856e-01   4.2715006e-01   4.4114868e-01   4.7061118e-01   4.7223859e-01   4.5731921e-01   4.5076024e-01   4.8335950e-01   3.9759816e-01   3.2864932e-01   4.2581769e-01   3.9765561e-01   4.8465345e-01   4.8525127e-01   3.8513607e-01   3.9820652e-01   3.9712803e-01   5.5326906e-01   4.5284577e-01   4.0466523e-01   4.2968979e-01   4.8967945e-01   3.7122633e-01   3.9189300e-01   3.8976354e-01   3.4937792e-01   3.4115590e-01   4.7020033e-01   3.7767688e-01   4.3942138e-01   3.4125876e-01   4.7934102e-01   3.6486747e-01   4.6609349e-01   4.2514437e-01   4.1889348e-01   3.9297481e-01   3.3279384e-01   3.6502235e-01   4.2824304e-01   3.4111505e-01   4.4916856e-01   4.3953404e-01   4.2185806e-01   3.8004789e-01   4.2135185e-01   3.7064704e-01   3.8713375e-01   3.8737322e-01   5.9378937e-04   1.6263483e-03   3.1194349e-04   8.5089275e-04   1.6365846e-03   1.1579874e-03   4.9430863e-03   7.7957878e-03   5.8209267e-03   9.7423596e-04   2.1559031e-04   2.8280232e-03   2.1261057e-03   1.8496545e-03   1.2342594e-02   1.9347552e-03   5.4995961e-03   5.2624400e-03   1.3773080e-04   7.1496401e-05   7.1145768e-04   9.6706058e-04   1.9028496e-03   3.0842001e-03   7.5087003e-03   6.3709632e-03   1.6263483e-03   2.4219636e-03   2.5416684e-03   1.6263483e-03   4.5881830e-05   1.0508341e-04   2.2101780e-03   2.1711060e-02   1.4779987e-03   1.0004664e-03   2.4029906e-03   2.5527616e-03   2.4859397e-03   1.2918144e-04   4.6388898e-04   3.7292268e-04   1.9674800e-01   1.9551090e-01   2.3384591e-01   2.7581575e-01   2.4956432e-01   2.6852776e-01   2.1529223e-01   1.7652116e-01   2.2659936e-01   2.2483764e-01   2.5838801e-01   1.9958250e-01   2.5031938e-01   2.6459593e-01   1.4127677e-01   1.8459488e-01   2.5809699e-01   2.1110498e-01   3.2773890e-01   2.2109976e-01   2.7105695e-01   1.8736680e-01   3.3227725e-01   2.6813261e-01   2.0050552e-01   1.9777445e-01   2.5552383e-01   2.7284453e-01   2.4733170e-01   1.5638593e-01   2.2514733e-01   2.0849921e-01   1.9287136e-01   3.6191982e-01   2.7281863e-01   1.9071038e-01   2.1912633e-01   2.8593968e-01   1.9281596e-01   2.4768186e-01   2.8763479e-01   2.3971138e-01   2.1723753e-01   1.8699948e-01   2.4393940e-01   1.9979806e-01   2.1397418e-01   2.0672587e-01   1.2529166e-01   2.1270878e-01   4.6033946e-01   4.0222602e-01   3.7977808e-01   3.9565939e-01   4.2276626e-01   4.2367223e-01   4.1181849e-01   4.0362790e-01   4.3403229e-01   3.5248619e-01   2.8628855e-01   3.7840466e-01   3.5121985e-01   4.3508597e-01   4.3518504e-01   3.3982310e-01   3.5380487e-01   3.5403534e-01   5.0086936e-01   4.0431760e-01   3.5820087e-01   3.8360776e-01   4.4020334e-01   3.2567463e-01   3.4780712e-01   3.4566354e-01   3.0520609e-01   2.9886193e-01   4.2163480e-01   3.3351442e-01   3.9135037e-01   2.9988740e-01   4.3006423e-01   3.2170490e-01   4.2068520e-01   3.7643926e-01   3.7416586e-01   3.4964963e-01   2.9095264e-01   3.1987117e-01   3.8035231e-01   2.9582266e-01   4.0222602e-01   3.9256940e-01   3.7489846e-01   3.3318735e-01   3.7289799e-01   3.2576051e-01   3.4389968e-01   3.4452790e-01   2.6022528e-04   1.6357171e-03   1.5776794e-03   3.8821876e-04   3.3383101e-03   8.1348232e-03   1.2673676e-02   9.6454978e-03   2.5951087e-03   4.7710550e-04   5.9640558e-03   5.0935416e-04   4.5005321e-03   1.8295131e-02   8.0881241e-04   4.5548809e-03   2.4342161e-03   4.9437559e-04   7.1285200e-04   1.1827453e-03   5.3640347e-04   3.9274104e-04   2.7126384e-03   1.1795471e-02   1.0834355e-02   2.6022528e-04   3.1798475e-03   3.2407554e-03   2.6022528e-04   8.6271302e-04   3.7982619e-04   4.6816553e-03   1.6563494e-02   3.8583498e-03   2.4190920e-03   3.6876972e-03   1.5540748e-03   4.9548680e-03   1.1822578e-03   2.1020911e-03   7.8943086e-04   1.7690932e-01   1.7592254e-01   2.1244374e-01   2.5265403e-01   2.2738778e-01   2.4651370e-01   1.9514374e-01   1.5780110e-01   2.0549282e-01   2.0415245e-01   2.3585798e-01   1.7978451e-01   2.2802648e-01   2.4243736e-01   1.2428265e-01   1.6526016e-01   2.3669421e-01   1.9101658e-01   3.0265561e-01   2.0025949e-01   2.4898144e-01   1.6786898e-01   3.0733331e-01   2.4595698e-01   1.8046568e-01   1.7781045e-01   2.3314059e-01   2.4991164e-01   2.2560913e-01   1.3845901e-01   2.0404813e-01   1.8812805e-01   1.7322140e-01   3.3666075e-01   2.5129679e-01   1.7201674e-01   1.9833201e-01   2.6229128e-01   1.7386253e-01   2.2573363e-01   2.6492814e-01   2.1852944e-01   1.9648937e-01   1.6758641e-01   2.2246603e-01   1.8066071e-01   1.9389274e-01   1.8653629e-01   1.0911288e-01   1.9242842e-01   4.3299781e-01   3.7574545e-01   3.5353088e-01   3.6969868e-01   3.9574789e-01   3.9644649e-01   3.8564737e-01   3.7707427e-01   4.0646475e-01   3.2727058e-01   2.6301141e-01   3.5217112e-01   3.2569733e-01   4.0744443e-01   4.0742674e-01   3.1477415e-01   3.2876954e-01   3.2939944e-01   4.7166141e-01   3.7739415e-01   3.3254323e-01   3.5763917e-01   4.1251084e-01   3.0085067e-01   3.2295735e-01   3.2084322e-01   2.8110727e-01   2.7535557e-01   3.9443856e-01   3.0887601e-01   3.6474538e-01   2.7663010e-01   4.0256676e-01   2.9754793e-01   3.9443683e-01   3.4998590e-01   3.4873324e-01   3.2500314e-01   2.6771990e-01   2.9525187e-01   3.5397733e-01   2.7178914e-01   3.7574545e-01   3.6622316e-01   3.4883278e-01   3.0797294e-01   3.4655783e-01   3.0107914e-01   3.1936679e-01   3.2010756e-01   3.0868881e-03   2.8691382e-03   1.3643967e-04   5.3429196e-03   1.0581034e-02   1.6459895e-02   1.2551534e-02   4.1576220e-03   1.2160579e-03   8.7064401e-03   5.4418849e-05   6.8214146e-03   2.2732626e-02   5.6586090e-04   4.9831593e-03   1.1314391e-03   1.3090644e-03   1.7262411e-03   1.9770194e-03   1.0389613e-03   9.3641634e-05   2.8067647e-03   1.5471006e-02   1.4411876e-02   0.0000000e+00   4.0251529e-03   4.0402030e-03   0.0000000e+00   2.0104773e-03   1.1579294e-03   6.7733512e-03   1.3328703e-02   6.1195429e-03   3.8280621e-03   5.4471697e-03   1.3203495e-03   7.3930866e-03   2.5511055e-03   3.8011014e-03   1.5935161e-03   1.6488937e-01   1.6420208e-01   1.9946134e-01   2.3841307e-01   2.1377841e-01   2.3352484e-01   1.8323739e-01   1.4660873e-01   1.9269692e-01   1.9183992e-01   2.2200730e-01   1.6791595e-01   2.1423078e-01   2.2922867e-01   1.1407468e-01   1.5349471e-01   2.2418179e-01   1.7908948e-01   2.8692621e-01   1.8765985e-01   2.3596576e-01   1.5596498e-01   2.9203641e-01   2.3278981e-01   1.6829150e-01   1.6563525e-01   2.1942293e-01   2.3592536e-01   2.1256421e-01   1.2755291e-01   1.9121342e-01   1.7576718e-01   1.6132929e-01   3.2148865e-01   2.3885357e-01   1.6118125e-01   1.8573297e-01   2.4757051e-01   1.6279862e-01   2.1240660e-01   2.5149161e-01   2.0595435e-01   1.8389163e-01   1.5580881e-01   2.0964365e-01   1.6953436e-01   1.8203376e-01   1.7437090e-01   9.9184065e-02   1.8031354e-01   4.1664204e-01   3.5971944e-01   3.3744612e-01   3.5416868e-01   3.7936058e-01   3.7982315e-01   3.7006184e-01   3.6098196e-01   3.8956200e-01   3.1201251e-01   2.4889912e-01   3.3607844e-01   3.1002022e-01   3.9046127e-01   3.9028467e-01   2.9949936e-01   3.1373736e-01   3.1479561e-01   4.5355800e-01   3.6085089e-01   3.1682959e-01   3.4195608e-01   3.9553949e-01   2.8555816e-01   3.0804941e-01   3.0593834e-01   2.6633772e-01   2.6121338e-01   3.7782244e-01   2.9399549e-01   3.4839508e-01   2.6278429e-01   3.8569374e-01   2.8303600e-01   3.7885421e-01   3.3349568e-01   3.3352424e-01   3.1033775e-01   2.5375578e-01   2.8011018e-01   3.3772574e-01   2.5671985e-01   3.5971944e-01   3.5022311e-01   3.3289784e-01   2.9224130e-01   3.3016010e-01   2.8599652e-01   3.0475125e-01   3.0561759e-01   1.6232219e-03   2.8110674e-03   3.0992704e-04   2.8009412e-03   5.3859157e-03   3.4428307e-03   2.2389965e-04   4.8865483e-04   1.7600776e-03   3.6417064e-03   7.4576509e-04   9.1296419e-03   2.8123816e-03   8.0068765e-03   7.3379880e-03   3.9579356e-04   1.9713653e-04   6.0194434e-04   2.3367622e-03   3.6239908e-03   3.0185118e-03   6.3202257e-03   4.4046298e-03   3.0868881e-03   1.7141363e-03   1.8461990e-03   3.0868881e-03   1.2729358e-04   4.6571753e-04   8.6393185e-04   2.3903803e-02   9.0441478e-04   3.0528309e-04   3.1648190e-03   3.1223857e-03   2.2339929e-03   2.7724170e-04   9.5102356e-05   4.3913729e-04   2.1061066e-01   2.0964712e-01   2.4888926e-01   2.9163776e-01   2.6471732e-01   2.8522189e-01   2.3035873e-01   1.8998369e-01   2.4143327e-01   2.4006731e-01   2.7373417e-01   2.1381600e-01   2.6519773e-01   2.8097864e-01   1.5319882e-01   1.9790190e-01   2.7464921e-01   2.2594124e-01   3.4406181e-01   2.3583526e-01   2.8783351e-01   2.0067586e-01   3.4959337e-01   2.8469567e-01   2.1442337e-01   2.1148410e-01   2.7089383e-01   2.8885413e-01   2.6304757e-01   1.6861411e-01   2.3983808e-01   2.2272157e-01   2.0662897e-01   3.8050402e-01   2.8992390e-01   2.0524163e-01   2.3373909e-01   3.0156183e-01   2.0732304e-01   2.6311208e-01   3.0478863e-01   2.5545595e-01   2.3172924e-01   2.0047959e-01   2.5968741e-01   2.1460680e-01   2.2900957e-01   2.2107714e-01   1.3590375e-01   2.2746759e-01   4.8087096e-01   4.2142796e-01   3.9814594e-01   4.1502871e-01   4.4228995e-01   4.4300705e-01   4.3159396e-01   4.2281768e-01   4.5341178e-01   3.7067256e-01   3.0283474e-01   3.9670954e-01   3.6890449e-01   4.5441105e-01   4.5432159e-01   3.5749751e-01   3.7222314e-01   3.7273757e-01   5.2092654e-01   4.2307513e-01   3.7613897e-01   4.0250144e-01   4.5970761e-01   3.4267709e-01   3.6611458e-01   3.6389952e-01   3.2189682e-01   3.1593975e-01   4.4091119e-01   3.5132753e-01   4.0985044e-01   3.1723537e-01   4.4934566e-01   3.3938050e-01   4.4068685e-01   3.9407776e-01   3.9311145e-01   3.6818097e-01   3.0785214e-01   3.3679573e-01   3.9853670e-01   3.1138703e-01   4.2142796e-01   4.1148317e-01   3.9324794e-01   3.4985868e-01   3.9052142e-01   3.4304315e-01   3.6227810e-01   3.6300235e-01   3.5297445e-03   2.3993465e-03   8.1469449e-03   8.4116551e-03   8.4748907e-03   3.0443320e-03   1.8587915e-03   2.8140158e-03   3.7033592e-03   2.9317197e-03   1.3265454e-02   4.5015236e-03   2.6265400e-03   7.5415562e-03   1.6353241e-03   1.4046892e-03   3.1172532e-03   6.0159720e-04   2.6265400e-03   7.0517547e-03   5.5270478e-03   6.4518235e-03   2.8691382e-03   6.1014438e-03   6.3013427e-03   2.8691382e-03   1.1615614e-03   1.4498289e-03   4.4069463e-03   2.8268404e-02   1.4610903e-03   3.3082950e-03   4.5136771e-04   5.8170191e-03   1.2876475e-03   5.5964987e-04   1.3152056e-03   2.3446691e-03   1.9624436e-01   1.9365486e-01   2.3272539e-01   2.7570910e-01   2.4962218e-01   2.6341222e-01   2.1161968e-01   1.7505868e-01   2.2555325e-01   2.2171510e-01   2.5853430e-01   1.9783603e-01   2.5146360e-01   2.6075203e-01   1.4121466e-01   1.8483453e-01   2.5221719e-01   2.0803875e-01   3.2980663e-01   2.1983733e-01   2.6580206e-01   1.8792174e-01   3.3096405e-01   2.6375229e-01   2.0022607e-01   1.9799659e-01   2.5530781e-01   2.7170116e-01   2.4472945e-01   1.5721426e-01   2.2453551e-01   2.0791989e-01   1.9232403e-01   3.5721533e-01   2.6541719e-01   1.8583170e-01   2.1815822e-01   2.8740780e-01   1.8853156e-01   2.4642239e-01   2.8243457e-01   2.3594931e-01   2.1655773e-01   1.8685267e-01   2.4074340e-01   1.9492750e-01   2.1026662e-01   2.0538411e-01   1.2769778e-01   2.1026662e-01   4.5350142e-01   3.9795187e-01   3.7769065e-01   3.8983341e-01   4.1851849e-01   4.2042793e-01   4.0510768e-01   3.9953508e-01   4.3130939e-01   3.4895812e-01   2.8413412e-01   3.7648942e-01   3.4987508e-01   4.3267862e-01   4.3359924e-01   3.3758650e-01   3.4918773e-01   3.4772733e-01   4.9915788e-01   4.0231358e-01   3.5632097e-01   3.7931174e-01   4.3732697e-01   3.2511301e-01   3.4317696e-01   3.4120231e-01   3.0420772e-01   2.9548175e-01   4.1851849e-01   3.3003548e-01   3.8954108e-01   2.9516106e-01   4.2751620e-01   3.1771485e-01   4.1340131e-01   3.7704473e-01   3.6865281e-01   3.4390717e-01   2.8759676e-01   3.1915503e-01   3.7909158e-01   2.9821954e-01   3.9795187e-01   3.8894782e-01   3.7251565e-01   3.3442155e-01   3.7333196e-01   3.2403985e-01   3.3841983e-01   3.3852014e-01   4.9713816e-03   9.2903476e-03   1.5944722e-02   1.1386125e-02   3.5402300e-03   9.6201776e-04   8.6911918e-03   9.4953207e-05   6.4447745e-03   2.1940505e-02   1.4660203e-04   6.6774582e-03   1.0681613e-03   1.0987220e-03   1.5397922e-03   1.3925202e-03   1.6744918e-03   4.5493239e-04   1.7274513e-03   1.6063124e-02   1.4167352e-02   1.3643967e-04   2.8974888e-03   2.8893579e-03   1.3643967e-04   1.8844794e-03   1.0195101e-03   5.9835494e-03   1.1907169e-02   6.2144210e-03   3.1461009e-03   6.4603159e-03   6.0804116e-04   7.9398115e-03   2.6608779e-03   3.6611489e-03   1.1878605e-03   1.6827914e-01   1.6808878e-01   2.0330224e-01   2.4208927e-01   2.1725982e-01   2.3901101e-01   1.8791090e-01   1.5022413e-01   1.9646923e-01   1.9636982e-01   2.2550082e-01   1.7178784e-01   2.1730098e-01   2.3423675e-01   1.1690649e-01   1.5652486e-01   2.2988556e-01   1.8351752e-01   2.8999988e-01   1.9148148e-01   2.4151451e-01   1.5889324e-01   2.9641885e-01   2.3800995e-01   1.7162027e-01   1.6875690e-01   2.2303923e-01   2.3997805e-01   2.1702847e-01   1.3016155e-01   1.9481438e-01   1.7925806e-01   1.6471086e-01   3.2723835e-01   2.4516700e-01   1.6613136e-01   1.8943302e-01   2.5069233e-01   1.6755119e-01   2.1637456e-01   2.5710224e-01   2.1080083e-01   1.8747239e-01   1.5900127e-01   2.1430731e-01   1.7454101e-01   1.8671199e-01   1.7813660e-01   1.0093430e-01   1.8452279e-01   4.2348788e-01   3.6545764e-01   3.4229982e-01   3.6044592e-01   3.8515670e-01   3.8525367e-01   3.7671058e-01   3.6665866e-01   3.9483186e-01   3.1729700e-01   2.5339335e-01   3.4086300e-01   3.1448994e-01   3.9561710e-01   3.9513385e-01   3.0425707e-01   3.1942407e-01   3.2109044e-01   4.5863634e-01   3.6575820e-01   3.2152631e-01   3.4763745e-01   4.0088503e-01   2.8963041e-01   3.1371721e-01   3.1153634e-01   2.7048666e-01   2.6621754e-01   3.8319922e-01   2.9918600e-01   3.5318564e-01   2.6828212e-01   3.9088644e-01   2.8836369e-01   3.8573578e-01   3.3732047e-01   3.3961180e-01   3.1641339e-01   2.5871452e-01   2.8421639e-01   3.4227214e-01   2.5952711e-01   3.6545764e-01   3.5568934e-01   3.3784386e-01   2.9565951e-01   3.3403769e-01   2.9050448e-01   3.1070970e-01   3.1176741e-01   1.7225353e-03   3.1252650e-03   1.9141697e-03   3.0572974e-04   1.5621195e-03   7.7657730e-04   6.0730841e-03   9.6969946e-05   6.0804497e-03   4.8728559e-03   1.0019276e-02   1.0630759e-02   1.4028596e-03   1.0008907e-03   1.5106647e-03   3.9425625e-03   5.9922357e-03   4.5089760e-03   4.5296071e-03   2.4544132e-03   5.3429196e-03   2.3927105e-03   2.5658780e-03   5.3429196e-03   8.0759380e-04   1.5345623e-03   3.2653860e-04   2.8784931e-02   4.6959359e-04   5.2961514e-04   3.5529133e-03   5.0787251e-03   1.7528094e-03   7.9750642e-04   1.8469304e-04   1.3949343e-03   2.2601194e-01   2.2489065e-01   2.6538416e-01   3.0933559e-01   2.8173337e-01   3.0217927e-01   2.4601138e-01   2.0461695e-01   2.5772236e-01   2.5608721e-01   2.9099324e-01   2.2920421e-01   2.8227951e-01   2.9802519e-01   1.6660589e-01   2.1294215e-01   2.9118999e-01   2.4154604e-01   3.6304829e-01   2.5194222e-01   3.0483437e-01   2.1582549e-01   3.6852765e-01   3.0175865e-01   2.2996178e-01   2.2696306e-01   2.8805620e-01   3.0640689e-01   2.7978083e-01   1.8266242e-01   2.5611673e-01   2.3849427e-01   2.2189938e-01   3.9968280e-01   3.0655638e-01   2.1989254e-01   2.4981061e-01   3.1957344e-01   2.2214967e-01   2.7998655e-01   3.2222399e-01   2.7182647e-01   2.4776514e-01   2.1557967e-01   2.7625427e-01   2.2956697e-01   2.4461597e-01   2.3673300e-01   1.4870158e-01   2.4319918e-01   5.0146439e-01   4.4143183e-01   4.1797463e-01   4.3468983e-01   4.6265612e-01   4.6350594e-01   4.5140079e-01   4.4286970e-01   4.7413628e-01   3.8981309e-01   3.2063561e-01   4.1652733e-01   3.8823357e-01   4.7518267e-01   4.7516433e-01   3.7651294e-01   3.9124904e-01   3.9150135e-01   5.4273573e-01   4.4336023e-01   3.9556546e-01   4.2215915e-01   4.8051706e-01   3.6152094e-01   3.8501414e-01   3.8277781e-01   3.4024930e-01   3.3391004e-01   4.6138930e-01   3.7007400e-01   4.2991865e-01   3.3504580e-01   4.7002175e-01   3.5780202e-01   4.6054739e-01   4.1401655e-01   4.1241306e-01   3.8694896e-01   3.2563422e-01   3.5550143e-01   4.1844379e-01   3.2964964e-01   4.4143183e-01   4.3139176e-01   4.1295631e-01   3.6894646e-01   4.1038562e-01   3.6180237e-01   3.8096707e-01   3.8161756e-01   2.9059655e-03   2.3706161e-04   1.5622923e-03   4.6985391e-03   3.0899699e-03   1.1144685e-02   1.5452086e-03   4.3912180e-03   8.2378168e-03   1.9894938e-02   1.6022015e-02   4.6248887e-03   4.1434850e-03   3.4955495e-03   1.0239539e-02   1.2012056e-02   5.3911313e-03   8.1025404e-03   3.3141536e-03   1.0581034e-02   2.5128050e-03   2.6447621e-03   1.0581034e-02   4.0444636e-03   5.0265914e-03   5.7591609e-04   3.1111838e-02   3.5439088e-03   1.6935296e-03   1.0096079e-02   7.4536930e-03   6.1909555e-03   4.6446420e-03   2.9301624e-03   3.9714148e-03   2.5068010e-01   2.5098525e-01   2.9216380e-01   3.3638336e-01   3.0774280e-01   3.3430586e-01   2.7479523e-01   2.2946729e-01   2.8415608e-01   2.8467354e-01   3.1719659e-01   2.5534962e-01   3.0706036e-01   3.2874800e-01   1.8820706e-01   2.3625921e-01   3.2370915e-01   2.6954509e-01   3.8903955e-01   2.7840834e-01   3.3718014e-01   2.3890051e-01   3.9852630e-01   3.3314087e-01   2.5453269e-01   2.5085481e-01   3.1457631e-01   3.3452242e-01   3.0863300e-01   2.0400912e-01   2.8201538e-01   2.6372052e-01   2.4645516e-01   4.3401653e-01   3.4099078e-01   2.4885351e-01   2.7587880e-01   3.4510687e-01   2.5062404e-01   3.0740449e-01   3.5503881e-01   3.0161400e-01   2.7344211e-01   2.3943815e-01   3.0560987e-01   2.5890901e-01   2.7338361e-01   2.6272911e-01   1.6653571e-01   2.7061027e-01   5.3996294e-01   4.7622847e-01   4.4996413e-01   4.7092027e-01   4.9784258e-01   4.9765553e-01   4.8884384e-01   4.7750758e-01   5.0792579e-01   4.2270376e-01   3.5026703e-01   4.4829833e-01   4.1874266e-01   5.0865069e-01   5.0773433e-01   4.0771387e-01   4.2529697e-01   4.2723724e-01   5.7654330e-01   4.7578493e-01   4.2683086e-01   4.5657562e-01   5.1458642e-01   3.9050926e-01   4.1892650e-01   4.1646419e-01   3.6916765e-01   3.6521270e-01   4.9536279e-01   4.0243409e-01   4.6185742e-01   3.6775616e-01   5.0354755e-01   3.9037876e-01   4.9872590e-01   4.4290716e-01   4.4784995e-01   4.2202156e-01   3.5667822e-01   3.8450868e-01   4.4953729e-01   3.5436611e-01   4.7622847e-01   4.6530251e-01   4.4515647e-01   3.9606646e-01   4.3939461e-01   3.9207838e-01   4.1563418e-01   4.1682072e-01   1.5609953e-03   4.8490070e-03   9.0958370e-03   1.4989091e-03   1.7813868e-02   2.1261488e-03   5.5450301e-04   1.5694148e-02   1.9384410e-02   2.5209737e-02   8.6954304e-03   7.6213095e-03   8.7105486e-03   1.2654300e-02   1.7334342e-02   1.3883194e-02   2.2522789e-03   1.7443565e-04   1.6459895e-02   9.1718824e-03   9.4916987e-03   1.6459895e-02   6.9918464e-03   8.9833757e-03   2.9662983e-03   4.9266036e-02   2.9055422e-03   5.5731976e-03   8.0889759e-03   1.5739049e-02   3.7597322e-03   6.3200695e-03   4.4644236e-03   8.6395939e-03   2.7692079e-01   2.7499135e-01   3.1941212e-01   3.6719225e-01   3.3760508e-01   3.5656667e-01   2.9687968e-01   2.5298236e-01   3.1115442e-01   3.0819394e-01   3.4760947e-01   2.7976620e-01   3.3861131e-01   3.5300749e-01   2.1161836e-01   2.6294922e-01   3.4413759e-01   2.9242527e-01   4.2523864e-01   3.0477646e-01   3.5931443e-01   2.6623770e-01   4.2973462e-01   3.5665121e-01   2.8133678e-01   2.7828246e-01   3.4429599e-01   3.6357656e-01   3.3414792e-01   2.2982660e-01   3.0962503e-01   2.9049749e-01   2.7240490e-01   4.6073277e-01   3.5937207e-01   2.6744708e-01   3.0261187e-01   3.7874332e-01   2.7038484e-01   3.3511133e-01   3.7800754e-01   3.2481692e-01   3.0053112e-01   2.6567213e-01   3.2997477e-01   2.7805229e-01   2.9533686e-01   2.8819780e-01   1.9246360e-01   2.9461561e-01   5.6604002e-01   5.0500114e-01   4.8160322e-01   4.9684759e-01   5.2728223e-01   5.2878056e-01   5.1374073e-01   5.0662675e-01   5.4019822e-01   4.5105935e-01   3.7828674e-01   4.8016813e-01   4.5059090e-01   5.4146236e-01   5.4186046e-01   4.3772456e-01   4.5187440e-01   4.5090747e-01   6.1216296e-01   5.0833718e-01   4.5807252e-01   4.8471571e-01   5.4678369e-01   4.2264992e-01   4.4526564e-01   4.4301133e-01   3.9982165e-01   3.9174986e-01   5.2663733e-01   4.3017652e-01   4.9431552e-01   3.9206379e-01   5.3598953e-01   4.1681568e-01   5.2288475e-01   4.7864082e-01   4.7360696e-01   4.4651927e-01   3.8292357e-01   4.1618542e-01   4.8251093e-01   3.8978460e-01   5.0500114e-01   4.9485605e-01   4.7615846e-01   4.3123359e-01   4.7475023e-01   4.2239197e-01   4.4037545e-01   4.4066833e-01   2.2696323e-03   5.9674345e-03   2.4448133e-03   1.3335241e-02   1.4550127e-03   2.5899751e-03   1.0462849e-02   2.0483763e-02   1.9016265e-02   5.8048658e-03   5.1341121e-03   4.8749246e-03   1.1285550e-02   1.3907377e-02   7.6337585e-03   6.3180508e-03   2.0267734e-03   1.2551534e-02   4.1078635e-03   4.2919330e-03   1.2551534e-02   4.8854397e-03   6.2043549e-03   8.9134425e-04   3.6466805e-02   3.2782414e-03   2.5696799e-03   9.8307089e-03   9.8120264e-03   5.5450409e-03   5.1728098e-03   3.2838118e-03   5.2555481e-03   2.6123905e-01   2.6103645e-01   3.0329747e-01   3.4864780e-01   3.1958423e-01   3.4458162e-01   2.8459400e-01   2.3921225e-01   2.9516933e-01   2.9488851e-01   3.2923215e-01   2.6553322e-01   3.1926739e-01   3.3946038e-01   1.9751618e-01   2.4678743e-01   3.3347141e-01   2.7948950e-01   4.0283567e-01   2.8923185e-01   3.4744486e-01   2.4959732e-01   4.1128993e-01   3.4370645e-01   2.6525258e-01   2.6168140e-01   3.2643822e-01   3.4638099e-01   3.1949478e-01   2.1402441e-01   2.9314941e-01   2.7451414e-01   2.5691069e-01   4.4594923e-01   3.5036767e-01   2.5760360e-01   2.8676453e-01   3.5805235e-01   2.5967094e-01   3.1875975e-01   3.6562012e-01   3.1188296e-01   2.8438818e-01   2.4989387e-01   3.1618152e-01   2.6787548e-01   2.8314006e-01   2.7321717e-01   1.7615044e-01   2.8082637e-01   5.5224352e-01   4.8885819e-01   4.6311762e-01   4.8285549e-01   5.1073035e-01   5.1093091e-01   5.0061869e-01   4.9022404e-01   5.2151073e-01   4.3495694e-01   3.6199911e-01   4.6149506e-01   4.3176986e-01   5.2236122e-01   5.2173621e-01   4.2026138e-01   4.3714978e-01   4.3841019e-01   5.9117531e-01   4.8927834e-01   4.3976762e-01   4.6895974e-01   5.2818477e-01   4.0343191e-01   4.3068816e-01   4.2826096e-01   3.8162190e-01   3.7670056e-01   5.0866155e-01   4.1442940e-01   4.7525840e-01   3.7873655e-01   5.1715063e-01   4.0199874e-01   5.1036877e-01   4.5693051e-01   4.5962906e-01   4.3336423e-01   3.6804311e-01   3.9729031e-01   4.6298867e-01   3.6775880e-01   4.8885819e-01   4.7805886e-01   4.5813974e-01   4.0968699e-01   4.5331588e-01   4.0460089e-01   4.2699969e-01   4.2797954e-01   8.7894099e-04   2.0541035e-03   4.6305984e-03   6.7282118e-04   8.1166178e-03   3.1872717e-03   1.0880321e-02   8.3605717e-03   8.2577841e-04   6.1811314e-04   5.6936180e-04   3.8806955e-03   4.9821714e-03   2.5043588e-03   7.1844876e-03   4.2468498e-03   4.1576220e-03   9.9353221e-04   1.1055786e-03   4.1576220e-03   5.9733473e-04   9.9247040e-04   3.2081104e-04   2.3598005e-02   1.4307976e-03   3.0934240e-05   4.9322953e-03   3.1299098e-03   3.2877683e-03   9.9769768e-04   4.3968696e-04   6.2770216e-04   2.1786575e-01   2.1746374e-01   2.5686278e-01   2.9961632e-01   2.7235218e-01   2.9521657e-01   2.3917491e-01   1.9737534e-01   2.4929109e-01   2.4877587e-01   2.8142376e-01   2.2163964e-01   2.7235613e-01   2.9042411e-01   1.5945203e-01   2.0466964e-01   2.8483074e-01   2.3445929e-01   3.5153649e-01   2.4372083e-01   2.9790924e-01   2.0734139e-01   3.5860995e-01   2.9439415e-01   2.2162942e-01   2.1843466e-01   2.7871280e-01   2.9725443e-01   2.7179832e-01   1.7471289e-01   2.4749127e-01   2.3015922e-01   2.1385216e-01   3.9117409e-01   3.0083879e-01   2.1421404e-01   2.4147478e-01   3.0893498e-01   2.1609222e-01   2.7129944e-01   3.1500957e-01   2.6459749e-01   2.3931769e-01   2.0744985e-01   2.6864595e-01   2.2369874e-01   2.3782351e-01   2.2882042e-01   1.4076875e-01   2.3574843e-01   4.9305396e-01   4.3221625e-01   4.0786305e-01   4.2639713e-01   4.5320351e-01   4.5351054e-01   4.4342223e-01   4.3354113e-01   4.6376094e-01   3.8078797e-01   3.1179824e-01   4.0634323e-01   3.7808795e-01   4.6463298e-01   4.6419251e-01   3.6697079e-01   3.8279432e-01   3.8398235e-01   5.3121485e-01   4.3292773e-01   3.8560639e-01   4.1316779e-01   4.7015919e-01   3.5131205e-01   3.7664303e-01   3.7434225e-01   3.3054253e-01   3.2553357e-01   4.5134808e-01   3.6126826e-01   4.1953095e-01   3.2738535e-01   4.5959627e-01   3.4943014e-01   4.5279702e-01   4.0259529e-01   4.0419917e-01   3.7916847e-01   3.1736132e-01   3.4544845e-01   4.0790313e-01   3.1842804e-01   4.3221625e-01   4.2193584e-01   4.0305492e-01   3.5775833e-01   3.9908964e-01   3.5217975e-01   3.7311479e-01   3.7405234e-01   4.0128787e-03   1.5082918e-03   2.4327589e-03   1.3749582e-02   9.8644164e-04   7.0275865e-03   4.0509182e-03   9.2137987e-06   8.1954627e-05   2.0221322e-04   1.4919720e-03   1.6918938e-03   1.6810350e-03   9.8366461e-03   7.8161988e-03   1.2160579e-03   1.4072237e-03   1.4774681e-03   1.2160579e-03   1.9402564e-04   2.6249834e-05   2.2527118e-03   1.8043780e-02   2.4914194e-03   7.5228825e-04   4.0311549e-03   1.3209671e-03   4.0483306e-03   6.3837247e-04   9.1866746e-04   4.5668021e-05   1.9279238e-01   1.9221899e-01   2.2979083e-01   2.7088361e-01   2.4479658e-01   2.6610561e-01   2.1267629e-01   1.7326091e-01   2.2258210e-01   2.2184784e-01   2.5349552e-01   1.9619155e-01   2.4503806e-01   2.6157291e-01   1.3780150e-01   1.8042756e-01   2.5616217e-01   2.0823320e-01   3.2136818e-01   2.1724300e-01   2.6868196e-01   1.8301959e-01   3.2745065e-01   2.6534429e-01   1.9640162e-01   1.9346456e-01   2.5082861e-01   2.6843057e-01   2.4386055e-01   1.5229091e-01   2.2093793e-01   2.0444992e-01   1.8898039e-01   3.5852616e-01   2.7153123e-01   1.8897970e-01   2.1514921e-01   2.8018592e-01   1.9075855e-01   2.4355737e-01   2.8508289e-01   2.3688721e-01   2.1314274e-01   1.8298617e-01   2.4079289e-01   1.9795777e-01   2.1138990e-01   2.0306053e-01   1.2090533e-01   2.0951271e-01   4.5737829e-01   3.9834085e-01   3.7497739e-01   3.9259152e-01   4.1873643e-01   4.1914498e-01   4.0909121e-01   3.9964410e-01   4.2918834e-01   3.4857995e-01   2.8224226e-01   3.7353062e-01   3.4626594e-01   4.3008323e-01   4.2978752e-01   3.3538250e-01   3.5042715e-01   3.5150644e-01   4.9516533e-01   3.9931312e-01   3.5345241e-01   3.7985840e-01   4.3539662e-01   3.2054198e-01   3.4448056e-01   3.4226906e-01   3.0044482e-01   2.9530625e-01   4.1705793e-01   3.2972732e-01   3.8633770e-01   2.9699130e-01   4.2515986e-01   3.1826259e-01   4.1819279e-01   3.7038673e-01   3.7108888e-01   3.4686702e-01   2.8745249e-01   3.1485734e-01   3.7515178e-01   2.8956004e-01   3.9834085e-01   3.8842721e-01   3.7027395e-01   3.2715736e-01   3.6694987e-01   3.2117964e-01   3.4102816e-01   3.4191835e-01   9.8460504e-03   4.2097646e-04   3.8720695e-03   8.9609139e-03   1.0271114e-02   1.5759765e-02   3.6853519e-03   2.9640455e-03   4.3261623e-03   5.5313131e-03   9.1074119e-03   8.9974672e-03   1.5687499e-03   7.6879483e-04   8.7064401e-03   5.8625582e-03   6.1329529e-03   8.7064401e-03   2.4949105e-03   3.7729184e-03   1.5387455e-03   3.8489345e-02   2.4427633e-04   2.5870999e-03   2.8098846e-03   9.5472550e-03   6.5811256e-04   1.8465019e-03   1.0925425e-03   3.9557876e-03   2.4136396e-01   2.3914246e-01   2.8140056e-01   3.2726072e-01   2.9909180e-01   3.1587284e-01   2.5940651e-01   2.1853199e-01   2.7358477e-01   2.7023320e-01   3.0865618e-01   2.4367721e-01   3.0046030e-01   3.1269475e-01   1.8026794e-01   2.2845050e-01   3.0393397e-01   2.5531598e-01   3.8377418e-01   2.6747174e-01   3.1847415e-01   2.3166294e-01   3.8684625e-01   3.1607055e-01   2.4560721e-01   2.4289838e-01   3.0537010e-01   3.2346477e-01   2.9500292e-01   1.9758720e-01   2.7227129e-01   2.5416529e-01   2.3708995e-01   4.1588028e-01   3.1833018e-01   2.3144395e-01   2.6550973e-01   3.3890880e-01   2.3429142e-01   2.9628566e-01   3.3633600e-01   2.8587728e-01   2.6362949e-01   2.3087874e-01   2.9089607e-01   2.4143368e-01   2.5794041e-01   2.5175590e-01   1.6347149e-01   2.5753099e-01   5.1758017e-01   4.5875138e-01   4.3664497e-01   4.5058511e-01   4.8035455e-01   4.8203651e-01   4.6682330e-01   4.6036369e-01   4.9325486e-01   4.0679312e-01   3.3723664e-01   4.3530687e-01   4.0692383e-01   4.9456817e-01   4.9519341e-01   3.9426880e-01   4.0735509e-01   4.0616607e-01   5.6363656e-01   4.6254311e-01   4.1397979e-01   4.3911880e-01   4.9961302e-01   3.8027310e-01   4.0098514e-01   3.9884545e-01   3.5820683e-01   3.4982569e-01   4.7998848e-01   3.8669536e-01   4.4902142e-01   3.4986296e-01   4.8921417e-01   3.7374272e-01   4.7562308e-01   4.3467328e-01   4.2816870e-01   4.0201263e-01   3.4137888e-01   3.7400811e-01   4.3776854e-01   3.4988231e-01   4.5875138e-01   4.4907175e-01   4.3130086e-01   3.8919296e-01   4.3084858e-01   3.7966236e-01   3.9613519e-01   3.9634370e-01   7.6697767e-03   2.4232895e-02   4.2497383e-04   5.8645021e-03   7.1697323e-04   1.6459430e-03   2.1472725e-03   2.1701877e-03   1.5644328e-03   2.1493500e-04   2.5540450e-03   1.7185148e-02   1.5785034e-02   5.4418849e-05   4.0402433e-03   4.0294443e-03   5.4418849e-05   2.5045043e-03   1.5070390e-03   7.3845654e-03   1.1685406e-02   7.1165327e-03   4.2192668e-03   6.5870297e-03   1.0764642e-03   8.6213220e-03   3.2147266e-03   4.4993834e-03   1.8554035e-03   1.6163874e-01   1.6124582e-01   1.9600517e-01   2.3441471e-01   2.0995191e-01   2.3066019e-01   1.8049424e-01   1.4375647e-01   1.8928742e-01   1.8888935e-01   2.1809263e-01   1.6489764e-01   2.1018407e-01   2.2610729e-01   1.1131536e-01   1.5021138e-01   2.2157821e-01   1.7624974e-01   2.8213828e-01   1.8434338e-01   2.3311339e-01   1.5259174e-01   2.8788764e-01   2.2976085e-01   1.6495975e-01   1.6222817e-01   2.1560367e-01   2.3216078e-01   2.0930930e-01   1.2444580e-01   1.8772709e-01   1.7242257e-01   1.5812464e-01   3.1788081e-01   2.3649618e-01   1.5894784e-01   1.8237277e-01   2.4318751e-01   1.6040650e-01   2.0886424e-01   2.4850015e-01   2.0301750e-01   1.8048879e-01   1.5257957e-01   2.0654604e-01   1.6721007e-01   1.7931046e-01   1.7120054e-01   9.6139593e-02   1.7732284e-01   4.1297254e-01   3.5577891e-01   3.3316436e-01   3.5061301e-01   3.7530173e-01   3.7554177e-01   3.6661833e-01   3.5699377e-01   3.8511507e-01   3.0820403e-01   2.4524565e-01   3.3176869e-01   3.0575678e-01   3.8594227e-01   3.8559393e-01   2.9549451e-01   3.1016061e-01   3.1160241e-01   4.4857854e-01   3.5641190e-01   3.1263400e-01   3.3812708e-01   3.9109335e-01   2.8129908e-01   3.0451362e-01   3.0237858e-01   2.6230725e-01   2.5773246e-01   3.7352449e-01   2.9029824e-01   3.4398832e-01   2.5959514e-01   3.8123049e-01   2.7952979e-01   3.7549545e-01   3.2868655e-01   3.3002382e-01   3.0704202e-01   2.5032682e-01   2.7592132e-01   3.3326893e-01   2.5208920e-01   3.5577891e-01   3.4619466e-01   3.2870702e-01   2.8757710e-01   3.2540565e-01   2.8197541e-01   3.0143248e-01   3.0241595e-01   4.6779054e-03   6.3405358e-03   1.1072067e-02   1.2740784e-02   2.2273681e-03   1.7042107e-03   2.3469652e-03   4.9562680e-03   7.4940361e-03   5.7695726e-03   3.5981535e-03   1.5844867e-03   6.8214146e-03   3.1957366e-03   3.3967393e-03   6.8214146e-03   1.4289215e-03   2.3814252e-03   3.6906895e-04   3.2052685e-02   4.0942979e-04   9.9039775e-04   3.7975123e-03   6.5400285e-03   1.5831617e-03   1.2837357e-03   4.6903663e-04   2.2256509e-03   2.3440889e-01   2.3310632e-01   2.7431935e-01   3.1898347e-01   2.9103053e-01   3.1107217e-01   2.5431417e-01   2.1253549e-01   2.6655333e-01   2.6462531e-01   3.0042601e-01   2.3750489e-01   2.9168949e-01   3.0705757e-01   1.7397041e-01   2.2119992e-01   2.9980584e-01   2.4986709e-01   3.7353610e-01   2.6066067e-01   3.1374027e-01   2.2416418e-01   3.7874518e-01   3.1076095e-01   2.3844790e-01   2.3545346e-01   2.9741154e-01   3.1590317e-01   2.8873621e-01   1.9043311e-01   2.6497335e-01   2.4708373e-01   2.3022342e-01   4.0978464e-01   3.1511039e-01   2.2757419e-01   2.5853132e-01   3.2950090e-01   2.2996799e-01   2.8911377e-01   3.3136481e-01   2.8050509e-01   2.5648758e-01   2.2384164e-01   2.8507826e-01   2.3741411e-01   2.5289158e-01   2.4520543e-01   1.5591712e-01   2.5163277e-01   5.1215829e-01   4.5200156e-01   4.2861009e-01   4.4496538e-01   4.7341919e-01   4.7441196e-01   4.6168878e-01   4.5347805e-01   4.8519916e-01   3.9997999e-01   3.3019092e-01   4.2716946e-01   3.9865809e-01   4.8629326e-01   4.8637186e-01   3.8670691e-01   4.0127624e-01   4.0126667e-01   5.5444310e-01   4.5424309e-01   4.0600299e-01   4.3254422e-01   4.9161364e-01   3.7174460e-01   3.9497344e-01   3.9273809e-01   3.5018083e-01   3.4346685e-01   4.7229362e-01   3.8003558e-01   4.4070091e-01   3.4442025e-01   4.8107176e-01   3.6755422e-01   4.7082182e-01   4.2490414e-01   4.2252116e-01   3.9675769e-01   3.3509282e-01   3.6564070e-01   4.2918040e-01   3.3977682e-01   4.5200156e-01   4.4195847e-01   4.2350677e-01   3.7942836e-01   4.2122143e-01   3.7189771e-01   3.9075300e-01   3.9132598e-01   2.1401129e-02   2.6296569e-02   3.2566498e-02   1.3297832e-02   1.1999623e-02   1.2969856e-02   1.8466773e-02   2.3873417e-02   1.8518842e-02   4.0273028e-03   1.2616800e-03   2.2732626e-02   1.2827409e-02   1.3181090e-02   2.2732626e-02   1.1261737e-02   1.3701796e-02   5.3984869e-03   5.7610599e-02   5.9941455e-03   8.9691119e-03   1.2694193e-02   2.1144882e-02   7.0627421e-03   1.0543898e-02   8.0308076e-03   1.3063205e-02   2.9989796e-01   2.9808728e-01   3.4381381e-01   3.9275825e-01   3.6234621e-01   3.8238034e-01   3.2086021e-01   2.7526551e-01   3.3529881e-01   3.3248525e-01   3.7261196e-01   3.0300865e-01   3.6317903e-01   3.7865155e-01   2.3211430e-01   2.8532877e-01   3.6963026e-01   3.1621648e-01   4.5177033e-01   3.2875625e-01   3.8520789e-01   2.8867443e-01   4.5697798e-01   3.8242988e-01   3.0442487e-01   3.0118555e-01   3.6926913e-01   3.8920975e-01   3.5915749e-01   2.5088735e-01   3.3365900e-01   3.1393007e-01   2.9523721e-01   4.8898239e-01   3.8528778e-01   2.9050533e-01   3.2648373e-01   4.0430761e-01   2.9352285e-01   3.5998625e-01   4.0438969e-01   3.4965804e-01   3.2429593e-01   2.8821126e-01   3.5492121e-01   3.0147553e-01   3.1927061e-01   3.1166517e-01   2.1164881e-01   3.1841810e-01   5.9626248e-01   5.3406968e-01   5.1001241e-01   5.2586196e-01   5.5673824e-01   5.5816878e-01   5.4309284e-01   5.3570978e-01   5.6971774e-01   4.7901704e-01   4.0442977e-01   5.0852772e-01   4.7828326e-01   5.7096471e-01   5.7126109e-01   4.6526268e-01   4.7993533e-01   4.7902644e-01   6.4260357e-01   5.3722757e-01   4.8599135e-01   5.1340958e-01   5.7642669e-01   4.4962350e-01   4.7318809e-01   4.7087433e-01   4.2633712e-01   4.1835467e-01   5.5597597e-01   4.5768215e-01   5.2292685e-01   4.1877489e-01   5.6541976e-01   4.4406435e-01   5.5240885e-01   5.0657081e-01   5.0216108e-01   4.7452659e-01   4.0930806e-01   4.4303784e-01   5.1082488e-01   4.1541892e-01   5.3406968e-01   5.2368299e-01   5.0449934e-01   4.5807263e-01   5.0263701e-01   4.4952858e-01   4.6823972e-01   4.6855914e-01   8.7198254e-03   1.2902931e-03   1.1684021e-03   1.6341127e-03   1.0742763e-03   2.6208124e-03   1.1173357e-03   8.9620141e-04   1.6960301e-02   1.4197837e-02   5.6586090e-04   2.0157829e-03   1.9836256e-03   5.6586090e-04   2.0413623e-03   1.1637665e-03   5.4511725e-03   1.0718252e-02   6.5990100e-03   2.7263257e-03   7.7966832e-03   1.5754002e-04   8.7922744e-03   3.0619296e-03   3.8030551e-03   1.0550593e-03   1.7203244e-01   1.7235723e-01   2.0751307e-01   2.4611820e-01   2.2109400e-01   2.4491699e-01   1.9298922e-01   1.5421652e-01   2.0061094e-01   2.0129686e-01   2.2934517e-01   1.7603988e-01   2.2070889e-01   2.3964854e-01   1.2009621e-01   1.5990877e-01   2.3601974e-01   1.8834267e-01   2.9339046e-01   1.9567575e-01   2.4748464e-01   1.6217086e-01   3.0116422e-01   2.4364046e-01   1.7530938e-01   1.7223213e-01   2.2701156e-01   2.4439785e-01   2.2188150e-01   1.3311613e-01   1.9877894e-01   1.8311301e-01   1.6845668e-01   3.3339209e-01   2.5192873e-01   1.7150268e-01   1.9350184e-01   2.5414415e-01   1.7271733e-01   2.2071334e-01   2.6313203e-01   2.1605186e-01   1.9141813e-01   1.6255272e-01   2.1936783e-01   1.7996851e-01   1.9179556e-01   1.8227672e-01   1.0300751e-01   1.8912086e-01   4.3075013e-01   3.7158844e-01   3.4752045e-01   3.6713693e-01   3.9134187e-01   3.9105984e-01   3.8378198e-01   3.7272538e-01   4.0046876e-01   3.2297138e-01   2.5826699e-01   3.4601238e-01   3.1932057e-01   4.0113556e-01   4.0033459e-01   3.0938917e-01   3.2551476e-01   3.2781102e-01   4.6405632e-01   3.7102765e-01   3.2659021e-01   3.5371449e-01   4.0659862e-01   2.9405620e-01   3.1978995e-01   3.1753736e-01   2.7499746e-01   2.7161605e-01   3.8895029e-01   3.0476818e-01   3.5833770e-01   2.7419176e-01   3.9644448e-01   2.9409120e-01   3.9304592e-01   3.4147602e-01   3.4611220e-01   3.2290811e-01   2.6406812e-01   2.8867894e-01   3.4717424e-01   2.6265032e-01   3.7158844e-01   3.6154105e-01   3.4316147e-01   2.9940560e-01   3.3824889e-01   2.9538166e-01   3.1708459e-01   3.1834036e-01   8.7316989e-03   6.7615313e-03   6.7580543e-03   9.6081666e-03   2.0657605e-03   3.8371349e-03   1.4271025e-02   1.2184393e-02   1.6084765e-02   4.9831593e-03   1.4699244e-02   1.4912454e-02   4.9831593e-03   6.5028816e-03   6.2489819e-03   1.3726162e-02   3.1411524e-02   7.7445772e-03   1.1141212e-02   2.5000477e-03   1.1143820e-02   6.1604304e-03   5.3711085e-03   7.5849080e-03   8.1835455e-03   1.6688130e-01   1.6291433e-01   2.0003066e-01   2.4149909e-01   2.1722570e-01   2.2445012e-01   1.7758122e-01   1.4623090e-01   1.9343386e-01   1.8753794e-01   2.2568158e-01   1.6690274e-01   2.2030573e-01   2.2323949e-01   1.1699096e-01   1.5725818e-01   2.1323159e-01   1.7489777e-01   2.9532205e-01   1.8786273e-01   2.2655558e-01   1.6049736e-01   2.9226070e-01   2.2550223e-01   1.7083413e-01   1.6938578e-01   2.2221647e-01   2.3649790e-01   2.0956346e-01   1.3266766e-01   1.9299947e-01   1.7760033e-01   1.6320695e-01   3.1350837e-01   2.2421748e-01   1.5253844e-01   1.8664517e-01   2.5447595e-01   1.5560691e-01   2.1266187e-01   2.4223633e-01   2.0012528e-01   1.8549850e-01   1.5864276e-01   2.0519641e-01   1.6093217e-01   1.7628990e-01   1.7435363e-01   1.0801101e-01   1.7763987e-01   4.0351234e-01   3.5280598e-01   3.3582666e-01   3.4348161e-01   3.7251839e-01   3.7541634e-01   3.5722931e-01   3.5450932e-01   3.8642215e-01   3.0688243e-01   2.4703841e-01   3.3487550e-01   3.1018257e-01   3.8808655e-01   3.8990045e-01   2.9749231e-01   3.0596368e-01   3.0291072e-01   4.5287211e-01   3.5944458e-01   3.1570031e-01   3.3498082e-01   3.9202879e-01   2.8759651e-01   3.0023966e-01   2.9853735e-01   2.6729925e-01   2.5639363e-01   3.7372548e-01   2.8896987e-01   3.4745520e-01   2.5469579e-01   3.8297689e-01   2.7675896e-01   3.6463006e-01   3.3839246e-01   3.2358656e-01   2.9982548e-01   2.4898596e-01   2.8176933e-01   3.3810925e-01   2.6587897e-01   3.5280598e-01   3.4488951e-01   3.3055981e-01   2.9862862e-01   3.3464036e-01   2.8522813e-01   2.9487313e-01   2.9445589e-01   4.3312037e-03   5.1674385e-03   4.6600432e-03   4.1032398e-03   1.3249619e-03   3.6837745e-03   2.4877830e-02   2.2958753e-02   1.1314391e-03   6.3769043e-03   6.2810489e-03   1.1314391e-03   5.7702159e-03   4.1635898e-03   1.1902706e-02   7.2040189e-03   1.2293784e-02   7.6706139e-03   1.1339253e-02   1.8296747e-03   1.4280945e-02   6.9429868e-03   8.6587607e-03   4.4388406e-03   1.4462991e-01   1.4494206e-01   1.7756526e-01   2.1378771e-01   1.9031042e-01   2.1286860e-01   1.6419734e-01   1.2822571e-01   1.7113301e-01   1.7185341e-01   1.9804950e-01   1.4833973e-01   1.9005998e-01   2.0775634e-01   9.7105565e-02   1.3348080e-01   2.0467358e-01   1.5981923e-01   2.5874568e-01   1.6653873e-01   2.1530133e-01   1.3558698e-01   2.6575883e-01   2.1156399e-01   1.4766732e-01   1.4484877e-01   1.9583483e-01   2.1211626e-01   1.9101641e-01   1.0897783e-01   1.6943757e-01   1.5488355e-01   1.4132668e-01   2.9650216e-01   2.1994601e-01   1.4455458e-01   1.6451956e-01   2.2152336e-01   1.4555202e-01   1.8989624e-01   2.3000345e-01   1.8566766e-01   1.6258902e-01   1.3589561e-01   1.8870744e-01   1.5235300e-01   1.6309487e-01   1.5410176e-01   8.1942750e-02   1.6048904e-01   3.8999083e-01   3.3294245e-01   3.0988325e-01   3.2882841e-01   3.5187047e-01   3.5156617e-01   3.4487977e-01   3.3402051e-01   3.6059894e-01   2.8649814e-01   2.2516617e-01   3.0844683e-01   2.8301726e-01   3.6124416e-01   3.6050400e-01   2.7355262e-01   2.8899505e-01   2.9139815e-01   4.2192237e-01   3.3236960e-01   2.8992253e-01   3.1585310e-01   3.6648886e-01   2.5905330e-01   2.8355149e-01   2.8139447e-01   2.4097820e-01   2.3780572e-01   3.4954046e-01   2.6919588e-01   3.2022959e-01   2.4036369e-01   3.5673540e-01   2.5910558e-01   3.5384915e-01   3.0430469e-01   3.0871278e-01   2.8664912e-01   2.3068275e-01   2.5394489e-01   3.0958210e-01   2.2970603e-01   3.3294245e-01   3.2329736e-01   3.0571743e-01   2.6432145e-01   3.0120271e-01   2.6026006e-01   2.8107880e-01   2.8234890e-01   3.9333334e-05   2.5761851e-04   1.3896575e-03   1.7536063e-03   1.9216331e-03   9.2702772e-03   7.4022776e-03   1.3090644e-03   1.5444025e-03   1.6252591e-03   1.3090644e-03   1.2151407e-04   1.0618022e-05   2.1493524e-03   1.8833153e-02   2.2172388e-03   7.3253783e-04   3.6800655e-03   1.5497854e-03   3.6733784e-03   4.9421897e-04   7.6664670e-04   7.0183041e-05   1.9407897e-01   1.9337938e-01   2.3114302e-01   2.7244640e-01   2.4629979e-01   2.6718506e-01   2.1373312e-01   1.7438911e-01   2.2391848e-01   2.2298785e-01   2.5503185e-01   1.9737603e-01   2.4663659e-01   2.6276403e-01   1.3891547e-01   1.8173656e-01   2.5713590e-01   2.0933386e-01   3.2324703e-01   2.1854256e-01   2.6975489e-01   1.8436571e-01   3.2904939e-01   2.6649404e-01   1.9772084e-01   1.9481826e-01   2.5232520e-01   2.6989379e-01   2.4511828e-01   1.5354404e-01   2.2230840e-01   2.0576951e-01   1.9024966e-01   3.5987888e-01   2.7239312e-01   1.8983400e-01   2.1646886e-01   2.8191043e-01   1.9167912e-01   2.4493869e-01   2.8620678e-01   2.3801287e-01   2.1448199e-01   1.8427005e-01   2.4198503e-01   1.9884520e-01   2.1243903e-01   2.0430599e-01   1.2215728e-01   2.1067715e-01   4.5871846e-01   3.9981904e-01   3.7661173e-01   3.9390784e-01   4.2025710e-01   4.2076082e-01   4.1035332e-01   4.0114215e-01   4.3087129e-01   3.5002279e-01   2.8365040e-01   3.7517763e-01   3.4790525e-01   4.3179681e-01   4.3157518e-01   3.3691557e-01   3.5177012e-01   3.5268567e-01   4.9705280e-01   4.0100475e-01   3.5505921e-01   3.8129652e-01   4.3707681e-01   3.2219168e-01   3.4580868e-01   3.4360838e-01   3.0200588e-01   2.9663386e-01   4.1868171e-01   3.3113109e-01   3.8802094e-01   2.9819354e-01   4.2685142e-01   3.1959000e-01   4.1941543e-01   3.7225097e-01   3.7239048e-01   3.4809183e-01   2.8876177e-01   3.1647952e-01   3.7686145e-01   2.9138730e-01   3.9981904e-01   3.8994709e-01   3.7187123e-01   3.2897937e-01   3.6879196e-01   3.2272643e-01   3.4226532e-01   3.4310531e-01   3.4181612e-04   1.4644608e-03   2.1632573e-03   2.3231555e-03   8.1286280e-03   6.3818616e-03   1.7262411e-03   1.6624580e-03   1.7631926e-03   1.7262411e-03   2.4569053e-05   5.6859581e-05   1.7551896e-03   2.0541034e-02   1.6702311e-03   5.9025530e-04   3.2341087e-03   2.0408948e-03   3.0131078e-03   2.8888789e-04   4.5924146e-04   1.3293684e-04   1.9862431e-01   1.9776069e-01   2.3600084e-01   2.7779005e-01   2.5142895e-01   2.7187023e-01   2.1810101e-01   1.7860088e-01   2.2871390e-01   2.2752286e-01   2.6024955e-01   2.0181434e-01   2.5187795e-01   2.6758248e-01   1.4283878e-01   1.8622396e-01   2.6161990e-01   2.1373552e-01   3.2920505e-01   2.2325952e-01   2.7444329e-01   1.8891553e-01   3.3470963e-01   2.7127428e-01   2.0233024e-01   1.9944956e-01   2.5748056e-01   2.7510316e-01   2.4993365e-01   1.5775267e-01   2.2713624e-01   2.1043184e-01   1.9474801e-01   3.6534538e-01   2.7678503e-01   1.9377126e-01   2.2119834e-01   2.8749014e-01   1.9572304e-01   2.4991164e-01   2.9104517e-01   2.4261230e-01   2.1922346e-01   1.8874518e-01   2.4669621e-01   2.0288553e-01   2.1678869e-01   2.0886650e-01   1.2608625e-01   2.1517216e-01   4.6449792e-01   4.0560127e-01   3.8251194e-01   3.9944814e-01   4.2616553e-01   4.2679596e-01   4.1587264e-01   4.0695550e-01   4.3702628e-01   3.5557170e-01   2.8885981e-01   3.8108822e-01   3.5369655e-01   4.3799369e-01   4.3786355e-01   3.4252750e-01   3.5719184e-01   3.5788232e-01   5.0366583e-01   4.0706759e-01   3.6083966e-01   3.8695887e-01   4.4324858e-01   3.2788328e-01   3.5118655e-01   3.4899273e-01   3.0749578e-01   3.0179987e-01   4.2472010e-01   3.3655063e-01   3.9402558e-01   3.0319627e-01   4.3300399e-01   3.2485825e-01   4.2490754e-01   3.7841034e-01   3.7783349e-01   3.5333537e-01   2.9386653e-01   3.2211174e-01   3.8285611e-01   2.9712888e-01   4.0560127e-01   3.9574961e-01   3.7770634e-01   3.3490218e-01   3.7491174e-01   3.2829405e-01   3.4750328e-01   3.4827580e-01   2.7893549e-03   2.7277911e-03   9.4108270e-04   1.0799149e-02   7.7770747e-03   1.9770194e-03   5.4325768e-04   5.8945201e-04   1.9770194e-03   5.1514113e-04   3.6478892e-04   1.6876295e-03   1.7172006e-02   2.9808015e-03   3.7816236e-04   5.6735840e-03   1.0535544e-03   5.0675384e-03   1.2248969e-03   1.1742680e-03   5.8878380e-05   1.9840502e-01   1.9840646e-01   2.3601178e-01   2.7699198e-01   2.5062921e-01   2.7429513e-01   2.1981939e-01   1.7908511e-01   2.2870797e-01   2.2884763e-01   2.5935890e-01   2.0237179e-01   2.5038976e-01   2.6920634e-01   1.4261267e-01   1.8559031e-01   2.6457877e-01   2.1508394e-01   3.2684946e-01   2.2341437e-01   2.7694800e-01   1.8806980e-01   3.3447269e-01   2.7322826e-01   2.0195195e-01   1.9876628e-01   2.5683031e-01   2.7497319e-01   2.5083242e-01   1.5688509e-01   2.2686098e-01   2.1020792e-01   1.9457412e-01   3.6717222e-01   2.8066992e-01   1.9637709e-01   2.2117475e-01   2.8566080e-01   1.9793610e-01   2.4995417e-01   2.9345115e-01   2.4428403e-01   2.1902512e-01   1.8834472e-01   2.4798811e-01   2.0544380e-01   2.1853532e-01   2.0913443e-01   1.2439434e-01   2.1611468e-01   4.6739623e-01   4.0702486e-01   3.8261075e-01   4.0188705e-01   4.2751037e-01   4.2749738e-01   4.1883120e-01   4.0825884e-01   4.3736523e-01   3.5667688e-01   2.8932174e-01   3.8108192e-01   3.5341594e-01   4.3812879e-01   4.3747647e-01   3.4285168e-01   3.5898948e-01   3.6076578e-01   5.0317788e-01   4.0703744e-01   3.6087589e-01   3.8845434e-01   4.4366743e-01   3.2719263e-01   3.5301257e-01   3.5071770e-01   3.0715395e-01   3.0299726e-01   4.2534595e-01   3.3768843e-01   3.9391146e-01   3.0525217e-01   4.3324318e-01   3.2636388e-01   4.2820876e-01   3.7681443e-01   3.8013938e-01   3.5586945e-01   2.9507753e-01   3.2153910e-01   3.8242816e-01   2.9467977e-01   4.0702486e-01   3.9678588e-01   3.7800850e-01   3.3305200e-01   3.7343525e-01   3.2833826e-01   3.4988792e-01   3.5099795e-01   7.6889988e-04   5.5936308e-03   9.7548717e-03   1.0412603e-02   1.0389613e-03   5.7781385e-03   5.9052415e-03   1.0389613e-03   1.4340775e-03   1.1584691e-03   6.0259991e-03   2.1726707e-02   3.4598658e-03   3.9021886e-03   1.8160607e-03   3.9104990e-03   3.6199548e-03   1.2013216e-03   2.4248068e-03   2.0560641e-03   1.7726458e-01   1.7519789e-01   2.1239850e-01   2.5350621e-01   2.2831460e-01   2.4335474e-01   1.9301815e-01   1.5733953e-01   2.0548992e-01   2.0248973e-01   2.3687424e-01   1.7915768e-01   2.2981656e-01   2.4028800e-01   1.2483249e-01   1.6617415e-01   2.3289941e-01   1.8936545e-01   3.0533170e-01   2.0007138e-01   2.4571617e-01   1.6904051e-01   3.0733551e-01   2.4338605e-01   1.8100871e-01   1.7874491e-01   2.3386295e-01   2.4993365e-01   2.2441376e-01   1.3976123e-01   2.0438723e-01   1.8845276e-01   1.7353191e-01   3.3400984e-01   2.4631085e-01   1.6886156e-01   1.9837340e-01   2.6441281e-01   1.7118963e-01   2.2560913e-01   2.6174131e-01   2.1639026e-01   1.9675554e-01   1.6819987e-01   2.2078897e-01   1.7753606e-01   1.9173589e-01   1.8624992e-01   1.1156657e-01   1.9127963e-01   4.2879129e-01   3.7350470e-01   3.5299849e-01   3.6620603e-01   3.9355975e-01   3.9506070e-01   3.8147101e-01   3.7498057e-01   4.0550993e-01   3.2554347e-01   2.6225589e-01   3.5177359e-01   3.2570798e-01   4.0674255e-01   4.0737898e-01   3.1405475e-01   3.2617446e-01   3.2545044e-01   4.7160294e-01   3.7696723e-01   3.3214002e-01   3.5535112e-01   4.1144267e-01   3.0143840e-01   3.2034131e-01   3.1835849e-01   2.8130938e-01   2.7364725e-01   3.9315179e-01   3.0715437e-01   3.6445309e-01   2.7384746e-01   4.0174546e-01   2.9539622e-01   3.8981435e-01   3.5158797e-01   3.4545598e-01   3.2149982e-01   2.6601526e-01   2.9570609e-01   3.5410378e-01   2.7466513e-01   3.7350470e-01   3.6448827e-01   3.4805708e-01   3.0999906e-01   3.4801807e-01   3.0074518e-01   3.1606363e-01   3.1638243e-01   3.9211584e-03   1.5428451e-02   1.5051422e-02   9.3641634e-05   5.2251987e-03   5.2564865e-03   9.3641634e-05   2.4059863e-03   1.5411207e-03   7.7716003e-03   1.4517538e-02   6.4009679e-03   4.6963365e-03   4.9429047e-03   2.1131701e-03   7.3135995e-03   2.7662314e-03   4.2415669e-03   2.1979302e-03   1.6154877e-01   1.6047147e-01   1.9568300e-01   2.3468417e-01   2.1025394e-01   2.2838581e-01   1.7884422e-01   1.4313754e-01   1.8898793e-01   1.8753794e-01   2.1845622e-01   1.6418759e-01   2.1103286e-01   2.2447017e-01   1.1131548e-01   1.5046796e-01   2.1889519e-01   1.7489777e-01   2.8359045e-01   1.8392350e-01   2.3077309e-01   1.5301294e-01   2.8765002e-01   2.2785871e-01   1.6499082e-01   1.6250540e-01   2.1577964e-01   2.3190311e-01   2.0826660e-01   1.2493129e-01   1.8764327e-01   1.7232008e-01   1.5800409e-01   3.1597925e-01   2.3306671e-01   1.5663136e-01   1.8209705e-01   2.4426589e-01   1.5839676e-01   2.0849973e-01   2.4621874e-01   2.0137281e-01   1.8035428e-01   1.5264734e-01   2.0519641e-01   1.6491824e-01   1.7763987e-01   1.7071018e-01   9.7334595e-02   1.7628990e-01   4.1013753e-01   3.5415853e-01   3.3261378e-01   3.4819629e-01   3.7372548e-01   3.7447405e-01   3.6377706e-01   3.5546726e-01   3.8432564e-01   3.0688243e-01   2.4450319e-01   3.3130276e-01   3.0553473e-01   3.8531353e-01   3.8537934e-01   2.9480414e-01   3.0829006e-01   3.0887441e-01   4.4839112e-01   3.5594109e-01   3.1215369e-01   3.3646690e-01   3.9023537e-01   2.8142851e-01   3.0262656e-01   3.0057353e-01   2.6218143e-01   2.5639363e-01   3.7251839e-01   2.8896987e-01   3.4359589e-01   2.5757650e-01   3.8052352e-01   2.7792266e-01   3.7237623e-01   3.2948525e-01   3.2773210e-01   3.0459398e-01   2.4898596e-01   2.7596310e-01   3.3313575e-01   2.5365041e-01   3.5415853e-01   3.4488951e-01   3.2799981e-01   2.8862094e-01   3.2611271e-01   2.8152144e-01   2.9910812e-01   2.9982464e-01   1.7874931e-02   1.3166667e-02   2.8067647e-03   5.4483543e-04   4.8491153e-04   2.8067647e-03   2.7932574e-03   2.1120566e-03   4.0283644e-03   1.1006031e-02   7.1968081e-03   1.9824114e-03   1.0903705e-02   3.4373671e-04   1.0364284e-02   4.2441401e-03   4.1832512e-03   1.3456492e-03   1.8893734e-01   1.9027350e-01   2.2602463e-01   2.6493124e-01   2.3909907e-01   2.6731994e-01   2.1287273e-01   1.7119179e-01   2.1885648e-01   2.2106766e-01   2.4751153e-01   1.9400411e-01   2.3783986e-01   2.6104566e-01   1.3465024e-01   1.7581619e-01   2.5867158e-01   2.0765719e-01   3.1157756e-01   2.1392913e-01   2.7004976e-01   1.7791371e-01   3.2221942e-01   2.6551484e-01   1.9215572e-01   1.8858682e-01   2.4539617e-01   2.6396374e-01   2.4190361e-01   1.4753547e-01   2.1663955e-01   2.0044864e-01   1.8527044e-01   3.5759997e-01   2.7597540e-01   1.9143761e-01   2.1146655e-01   2.7190276e-01   1.9231433e-01   2.3971651e-01   2.8607417e-01   2.3671699e-01   2.0909909e-01   1.7885289e-01   2.3973819e-01   2.0020124e-01   2.1166375e-01   2.0015528e-01   1.1482314e-01   2.0802247e-01   4.5814031e-01   3.9622068e-01   3.7010544e-01   3.9277065e-01   4.1629822e-01   4.1528170e-01   4.1031284e-01   3.9724817e-01   4.2445794e-01   3.4613203e-01   2.7894605e-01   3.6843870e-01   3.4078468e-01   4.2489846e-01   4.2345869e-01   3.3131814e-01   3.4950009e-01   3.5299308e-01   4.8820349e-01   3.9397890e-01   3.4860920e-01   3.7803314e-01   4.3080058e-01   3.1437592e-01   3.4366436e-01   3.4124618e-01   2.9521701e-01   2.9351505e-01   4.1304236e-01   3.2750551e-01   3.8091343e-01   2.9709224e-01   4.2023724e-01   3.1695286e-01   4.2011817e-01   3.6184591e-01   3.7113199e-01   3.4760954e-01   2.8576301e-01   3.0899799e-01   3.6912287e-01   2.7985004e-01   3.9622068e-01   3.8552168e-01   3.6588486e-01   3.1841134e-01   3.5869504e-01   3.1661834e-01   3.4148439e-01   3.4312143e-01   1.2983249e-03   1.5471006e-02   1.3470267e-02   1.3877317e-02   1.5471006e-02   7.2739782e-03   9.2612860e-03   6.0792360e-03   5.4160667e-02   2.4574168e-03   8.1477262e-03   3.8451807e-03   1.8194605e-02   1.5067164e-03   5.6792549e-03   4.8846913e-03   1.0004616e-02   2.6108642e-01   2.5718656e-01   3.0162187e-01   3.4990044e-01   3.2118558e-01   3.3212064e-01   2.7586622e-01   2.3634761e-01   2.9366681e-01   2.8770633e-01   3.3110877e-01   2.6200119e-01   3.2383134e-01   3.3046521e-01   1.9829106e-01   2.4860521e-01   3.1887574e-01   2.7240198e-01   4.1018982e-01   2.8711577e-01   3.3461178e-01   2.5228731e-01   4.0944673e-01   3.3325697e-01   2.6572836e-01   2.6351234e-01   3.2731613e-01   3.4479515e-01   3.1382719e-01   2.1730674e-01   2.9282566e-01   2.7420761e-01   2.5662111e-01   4.3493335e-01   3.3165524e-01   2.4546083e-01   2.8544328e-01   3.6363165e-01   2.4916639e-01   3.1676067e-01   3.5299851e-01   3.0301330e-01   2.8384547e-01   2.5066938e-01   3.0887732e-01   2.5579752e-01   2.7431070e-01   2.7082223e-01   1.8355500e-01   2.7545202e-01   5.3566205e-01   4.7913827e-01   4.5930782e-01   4.6887050e-01   5.0113940e-01   5.0408511e-01   4.8425611e-01   4.8100390e-01   5.1611676e-01   4.2712908e-01   3.5770237e-01   4.5815125e-01   4.2990067e-01   5.1783166e-01   5.1944925e-01   4.1592191e-01   4.2634771e-01   4.2297928e-01   5.8870710e-01   4.8576650e-01   4.3645958e-01   4.5912638e-01   5.2238783e-01   4.0361731e-01   4.1983156e-01   4.1785974e-01   3.8054730e-01   3.6909926e-01   5.0215992e-01   4.0667536e-01   4.7224138e-01   3.6745640e-01   5.1222367e-01   3.9280819e-01   4.9246039e-01   4.6045456e-01   4.4643751e-01   4.1946868e-01   3.6048019e-01   3.9703515e-01   4.6143668e-01   3.7590038e-01   4.7913827e-01   4.7009550e-01   4.5350904e-01   4.1479012e-01   4.5636227e-01   4.0162438e-01   4.1380491e-01   4.1334093e-01   1.4411876e-02   8.7935053e-03   9.1184252e-03   1.4411876e-02   5.7453578e-03   7.6054776e-03   2.7409931e-03   4.7624677e-02   1.8780439e-03   4.9746662e-03   5.9684857e-03   1.4534556e-02   2.3601867e-03   4.9286760e-03   3.4466344e-03   7.5341006e-03   2.6746877e-01   2.6516664e-01   3.0921139e-01   3.5673992e-01   3.2754140e-01   3.4481450e-01   2.8624779e-01   2.4359422e-01   3.0108022e-01   2.9756219e-01   3.3745616e-01   2.6990583e-01   3.2887946e-01   3.4163113e-01   2.0331657e-01   2.5392638e-01   3.3233847e-01   2.8201820e-01   4.1489235e-01   2.9472025e-01   3.4749512e-01   2.5726587e-01   4.1828245e-01   3.4508966e-01   2.7189140e-01   2.6903465e-01   3.3406888e-01   3.5284896e-01   3.2333666e-01   2.2149953e-01   2.9970099e-01   2.8083524e-01   2.6300357e-01   4.4804341e-01   3.4702929e-01   2.5691578e-01   2.9267142e-01   3.6866600e-01   2.5996213e-01   3.2467728e-01   3.6599412e-01   3.1379996e-01   2.9070461e-01   2.5648991e-01   3.1904849e-01   2.6736905e-01   2.8471636e-01   2.7833907e-01   1.8530093e-01   2.8435281e-01   5.5199539e-01   4.9206433e-01   4.6946939e-01   4.8356355e-01   5.1418554e-01   5.1595196e-01   5.0010048e-01   4.9372697e-01   5.2743722e-01   4.3877522e-01   3.6710197e-01   4.6809646e-01   4.3893984e-01   5.2878375e-01   5.2942069e-01   4.2592703e-01   4.3928640e-01   4.3789059e-01   5.9924430e-01   4.9601715e-01   4.4619754e-01   4.7193405e-01   5.3393309e-01   4.1149136e-01   4.3273356e-01   4.3054493e-01   3.8875406e-01   3.8007672e-01   5.1386048e-01   4.1809676e-01   4.8216133e-01   3.8002405e-01   5.2330849e-01   4.0472333e-01   5.0903363e-01   4.6735136e-01   4.6059448e-01   4.3368525e-01   3.7134977e-01   4.0504167e-01   4.7061436e-01   3.7989795e-01   4.9206433e-01   4.8217825e-01   4.6398354e-01   4.2055878e-01   4.6343857e-01   4.1088828e-01   4.2766517e-01   4.2782051e-01   4.0251529e-03   4.0402030e-03   0.0000000e+00   2.0104773e-03   1.1579294e-03   6.7733512e-03   1.3328703e-02   6.1195429e-03   3.8280621e-03   5.4471697e-03   1.3203495e-03   7.3930866e-03   2.5511055e-03   3.8011014e-03   1.5935161e-03   1.6488937e-01   1.6420208e-01   1.9946134e-01   2.3841307e-01   2.1377841e-01   2.3352484e-01   1.8323739e-01   1.4660873e-01   1.9269692e-01   1.9183992e-01   2.2200730e-01   1.6791595e-01   2.1423078e-01   2.2922867e-01   1.1407468e-01   1.5349471e-01   2.2418179e-01   1.7908948e-01   2.8692621e-01   1.8765985e-01   2.3596576e-01   1.5596498e-01   2.9203641e-01   2.3278981e-01   1.6829150e-01   1.6563525e-01   2.1942293e-01   2.3592536e-01   2.1256421e-01   1.2755291e-01   1.9121342e-01   1.7576718e-01   1.6132929e-01   3.2148865e-01   2.3885357e-01   1.6118125e-01   1.8573297e-01   2.4757051e-01   1.6279862e-01   2.1240660e-01   2.5149161e-01   2.0595435e-01   1.8389163e-01   1.5580881e-01   2.0964365e-01   1.6953436e-01   1.8203376e-01   1.7437090e-01   9.9184065e-02   1.8031354e-01   4.1664204e-01   3.5971944e-01   3.3744612e-01   3.5416868e-01   3.7936058e-01   3.7982315e-01   3.7006184e-01   3.6098196e-01   3.8956200e-01   3.1201251e-01   2.4889912e-01   3.3607844e-01   3.1002022e-01   3.9046127e-01   3.9028467e-01   2.9949936e-01   3.1373736e-01   3.1479561e-01   4.5355800e-01   3.6085089e-01   3.1682959e-01   3.4195608e-01   3.9553949e-01   2.8555816e-01   3.0804941e-01   3.0593834e-01   2.6633772e-01   2.6121338e-01   3.7782244e-01   2.9399549e-01   3.4839508e-01   2.6278429e-01   3.8569374e-01   2.8303600e-01   3.7885421e-01   3.3349568e-01   3.3352424e-01   3.1033775e-01   2.5375578e-01   2.8011018e-01   3.3772574e-01   2.5671985e-01   3.5971944e-01   3.5022311e-01   3.3289784e-01   2.9224130e-01   3.3016010e-01   2.8599652e-01   3.0475125e-01   3.0561759e-01   3.0700267e-06   4.0251529e-03   1.9467123e-03   1.7971688e-03   1.7301608e-03   1.6293141e-02   4.7718752e-03   6.8454231e-04   9.3235659e-03   1.4106675e-03   7.7429721e-03   3.1097831e-03   2.5391665e-03   9.5764453e-04   2.0739268e-01   2.0836358e-01   2.4589773e-01   2.8656370e-01   2.5981614e-01   2.8739169e-01   2.3131893e-01   1.8849968e-01   2.3845740e-01   2.4007299e-01   2.6857108e-01   2.1230255e-01   2.5877066e-01   2.8137449e-01   1.5046312e-01   1.9386671e-01   2.7809337e-01   2.2610426e-01   3.3523485e-01   2.3325794e-01   2.9016640e-01   1.9614834e-01   3.4542234e-01   2.8580893e-01   2.1082239e-01   2.0722978e-01   2.6628244e-01   2.8528108e-01   2.6193836e-01   1.6431358e-01   2.3627495e-01   2.1939954e-01   2.0354344e-01   3.8077948e-01   2.9536596e-01   2.0839572e-01   2.3078002e-01   2.9414568e-01   2.0957408e-01   2.6009226e-01   3.0679254e-01   2.5613081e-01   2.2839644e-01   1.9694047e-01   2.5948108e-01   2.1757899e-01   2.3004331e-01   2.1886202e-01   1.3010315e-01   2.2671016e-01   4.8309156e-01   4.2059741e-01   3.9446827e-01   4.1650857e-01   4.4118694e-01   4.4046177e-01   4.3416675e-01   4.2171124e-01   4.5001131e-01   3.6938174e-01   3.0049261e-01   3.9280494e-01   3.6453208e-01   4.5055186e-01   4.4930066e-01   3.5453263e-01   3.7247610e-01   3.7543144e-01   5.1540859e-01   4.1899010e-01   3.7243361e-01   4.0192295e-01   4.5645786e-01   3.3753686e-01   3.6646355e-01   3.6403348e-01   3.1765048e-01   3.1516192e-01   4.3820660e-01   3.5021371e-01   4.0564147e-01   3.1837605e-01   4.4574076e-01   3.3915719e-01   4.4399141e-01   3.8666018e-01   3.9439941e-01   3.7011128e-01   3.0715348e-01   3.3195062e-01   3.9368522e-01   3.0254596e-01   4.2059741e-01   4.0983289e-01   3.9004780e-01   3.4211299e-01   3.8338579e-01   3.3953411e-01   3.6390344e-01   3.6538415e-01   4.0402030e-03   2.0647746e-03   1.8810615e-03   1.8744038e-03   1.5904825e-02   5.0089073e-03   7.7718458e-04   9.5962272e-03   1.3536136e-03   8.0354537e-03   3.2683077e-03   2.7053207e-03   1.0204136e-03   2.0663566e-01   2.0767698e-01   2.4509318e-01   2.8563097e-01   2.5892354e-01   2.8672996e-01   2.3068420e-01   1.8783705e-01   2.3766371e-01   2.3938906e-01   2.6765771e-01   2.1160129e-01   2.5782529e-01   2.8065126e-01   1.4982077e-01   1.9310124e-01   2.7749364e-01   2.2544624e-01   3.3411471e-01   2.3248629e-01   2.8950769e-01   1.9536146e-01   3.4445558e-01   2.8510792e-01   2.1004612e-01   2.0643532e-01   2.6539182e-01   2.8440424e-01   2.6118254e-01   1.6358877e-01   2.3546280e-01   2.1862058e-01   2.0279724e-01   3.7994304e-01   2.9482551e-01   2.0788172e-01   2.2999781e-01   2.9312113e-01   2.0902225e-01   2.5926765e-01   3.0610117e-01   2.5545076e-01   2.2760393e-01   1.9618808e-01   2.5876275e-01   2.1704368e-01   2.2941326e-01   2.1812470e-01   1.2939132e-01   2.2601611e-01   4.8224276e-01   4.1968249e-01   3.9347110e-01   4.1568549e-01   4.4024422e-01   4.3946585e-01   4.3337086e-01   4.2078495e-01   4.4897602e-01   3.6849705e-01   2.9964288e-01   3.9180103e-01   3.6353845e-01   4.4949934e-01   4.4820701e-01   3.5360058e-01   3.7164688e-01   3.7469388e-01   5.1424729e-01   4.1795596e-01   3.7145645e-01   4.0103422e-01   4.5542290e-01   3.3654347e-01   3.6564394e-01   3.6320808e-01   3.1671115e-01   3.1435391e-01   4.3720666e-01   3.4935506e-01   4.0461470e-01   3.1763784e-01   4.4470146e-01   3.3834387e-01   4.4321597e-01   3.8553656e-01   3.9358898e-01   3.6934910e-01   3.0635763e-01   3.3097385e-01   3.9264615e-01   3.0146236e-01   4.1968249e-01   4.0889669e-01   3.8907226e-01   3.4102273e-01   3.8227517e-01   3.3859771e-01   3.6313559e-01   3.6464431e-01   2.0104773e-03   1.1579294e-03   6.7733512e-03   1.3328703e-02   6.1195429e-03   3.8280621e-03   5.4471697e-03   1.3203495e-03   7.3930866e-03   2.5511055e-03   3.8011014e-03   1.5935161e-03   1.6488937e-01   1.6420208e-01   1.9946134e-01   2.3841307e-01   2.1377841e-01   2.3352484e-01   1.8323739e-01   1.4660873e-01   1.9269692e-01   1.9183992e-01   2.2200730e-01   1.6791595e-01   2.1423078e-01   2.2922867e-01   1.1407468e-01   1.5349471e-01   2.2418179e-01   1.7908948e-01   2.8692621e-01   1.8765985e-01   2.3596576e-01   1.5596498e-01   2.9203641e-01   2.3278981e-01   1.6829150e-01   1.6563525e-01   2.1942293e-01   2.3592536e-01   2.1256421e-01   1.2755291e-01   1.9121342e-01   1.7576718e-01   1.6132929e-01   3.2148865e-01   2.3885357e-01   1.6118125e-01   1.8573297e-01   2.4757051e-01   1.6279862e-01   2.1240660e-01   2.5149161e-01   2.0595435e-01   1.8389163e-01   1.5580881e-01   2.0964365e-01   1.6953436e-01   1.8203376e-01   1.7437090e-01   9.9184065e-02   1.8031354e-01   4.1664204e-01   3.5971944e-01   3.3744612e-01   3.5416868e-01   3.7936058e-01   3.7982315e-01   3.7006184e-01   3.6098196e-01   3.8956200e-01   3.1201251e-01   2.4889912e-01   3.3607844e-01   3.1002022e-01   3.9046127e-01   3.9028467e-01   2.9949936e-01   3.1373736e-01   3.1479561e-01   4.5355800e-01   3.6085089e-01   3.1682959e-01   3.4195608e-01   3.9553949e-01   2.8555816e-01   3.0804941e-01   3.0593834e-01   2.6633772e-01   2.6121338e-01   3.7782244e-01   2.9399549e-01   3.4839508e-01   2.6278429e-01   3.8569374e-01   2.8303600e-01   3.7885421e-01   3.3349568e-01   3.3352424e-01   3.1033775e-01   2.5375578e-01   2.8011018e-01   3.3772574e-01   2.5671985e-01   3.5971944e-01   3.5022311e-01   3.3289784e-01   2.9224130e-01   3.3016010e-01   2.8599652e-01   3.0475125e-01   3.0561759e-01   1.3213080e-04   1.6348389e-03   2.1958653e-02   1.3024074e-03   6.2459052e-04   2.7868209e-03   2.5128724e-03   2.4948096e-03   1.5213665e-04   2.9312203e-04   2.6269769e-04   2.0119891e-01   2.0014287e-01   2.3871519e-01   2.8086210e-01   2.5438649e-01   2.7419983e-01   2.2034638e-01   1.8091150e-01   2.3139686e-01   2.2990542e-01   2.6326545e-01   2.0423778e-01   2.5497864e-01   2.7008054e-01   1.4507978e-01   1.8881738e-01   2.6377709e-01   2.1604374e-01   3.3279319e-01   2.2588101e-01   2.7676493e-01   1.9156801e-01   3.3788258e-01   2.7371187e-01   2.0495838e-01   2.0212622e-01   2.6043406e-01   2.7802264e-01   2.5251791e-01   1.6022437e-01   2.2986910e-01   2.1306976e-01   1.9729190e-01   3.6816423e-01   2.7878040e-01   1.9567818e-01   2.2384809e-01   2.9081290e-01   1.9773475e-01   2.5268382e-01   2.9345893e-01   2.4498738e-01   2.2190073e-01   1.9130390e-01   2.4917712e-01   2.0485354e-01   2.1902087e-01   2.1139045e-01   1.2850635e-01   2.1757950e-01   4.6735126e-01   4.0863686e-01   3.8577163e-01   4.0223139e-01   4.2927752e-01   4.3005363e-01   4.1858115e-01   4.1002225e-01   4.4039161e-01   3.5852229e-01   2.9170762e-01   3.8436621e-01   3.5694453e-01   4.4140604e-01   4.4138801e-01   3.4560659e-01   3.5999045e-01   3.6042881e-01   5.0737364e-01   4.1042921e-01   3.6404336e-01   3.8992213e-01   4.4661300e-01   3.3112795e-01   3.5395838e-01   3.5178032e-01   3.1059068e-01   3.0453530e-01   4.2798873e-01   3.3942843e-01   3.9736659e-01   3.0574030e-01   4.3638021e-01   3.2761075e-01   4.2755962e-01   3.8201356e-01   3.8058140e-01   3.5594943e-01   2.9656762e-01   3.2531047e-01   3.8623001e-01   3.0061288e-01   4.0863686e-01   3.9884323e-01   3.8090672e-01   3.3841042e-01   3.7847943e-01   3.3138355e-01   3.5013260e-01   3.5082807e-01   2.3963109e-03   1.8894374e-02   2.2439950e-03   9.0594652e-04   3.4335324e-03   1.6224592e-03   3.5937956e-03   4.5783206e-04   8.1747242e-04   1.3119947e-04   1.9231717e-01   1.9150735e-01   2.2920314e-01   2.7047035e-01   2.4441394e-01   2.6481790e-01   2.1164496e-01   1.7262115e-01   2.2200818e-01   2.2091128e-01   2.5312771e-01   1.9549847e-01   2.4484600e-01   2.6050695e-01   1.3741417e-01   1.8008449e-01   2.5474237e-01   2.0730811e-01   3.2133505e-01   2.1663183e-01   2.6736983e-01   1.8273137e-01   3.2682425e-01   2.6418461e-01   1.9596515e-01   1.9311641e-01   2.5039748e-01   2.6783585e-01   2.4301257e-01   1.5205635e-01   2.2044020e-01   2.0395985e-01   1.8849810e-01   3.5730736e-01   2.6984619e-01   1.8774203e-01   2.1458977e-01   2.8004857e-01   1.8962388e-01   2.4294411e-01   2.8377929e-01   2.3583758e-01   2.1263399e-01   1.8257496e-01   2.3984158e-01   1.9672007e-01   2.1035264e-01   2.0243724e-01   1.2095575e-01   2.0869633e-01   4.5579154e-01   3.9719792e-01   3.7421582e-01   3.9117796e-01   4.1759891e-01   4.1818270e-01   4.0752420e-01   3.9853286e-01   4.2831769e-01   3.4756630e-01   2.8146997e-01   3.7279915e-01   3.4563396e-01   4.2926782e-01   4.2911586e-01   3.3459449e-01   3.4922354e-01   3.5000846e-01   4.9447633e-01   3.9856686e-01   3.5272682e-01   3.7871114e-01   4.3449948e-01   3.2004729e-01   3.4327468e-01   3.4109263e-01   2.9987407e-01   2.9431934e-01   4.1611792e-01   3.2872232e-01   3.8562509e-01   2.9576964e-01   4.2431958e-01   3.1716861e-01   4.1652649e-01   3.7009695e-01   3.6972936e-01   3.4546743e-01   2.8647014e-01   3.1433567e-01   3.7453491e-01   2.8958559e-01   3.9719792e-01   3.8739786e-01   3.6946063e-01   3.2697767e-01   3.6662992e-01   3.2048201e-01   3.3967430e-01   3.4047263e-01   2.8241524e-02   1.4713791e-03   4.6823304e-04   6.0296352e-03   5.2071190e-03   3.4182869e-03   1.9529381e-03   9.1025104e-04   1.8441969e-03   2.3388660e-01   2.3348281e-01   2.7404368e-01   3.1787596e-01   2.8991570e-01   3.1331298e-01   2.5580945e-01   2.1274052e-01   2.6625865e-01   2.6571335e-01   2.9921857e-01   2.3779036e-01   2.8985951e-01   3.0846478e-01   1.7343111e-01   2.2024476e-01   3.0259960e-01   2.5097696e-01   3.7082192e-01   2.6053197e-01   3.1606460e-01   2.2298918e-01   3.7823337e-01   3.1251210e-01   2.3776110e-01   2.3444923e-01   2.9645330e-01   3.1548927e-01   2.8938607e-01   1.8922982e-01   2.6439849e-01   2.4655891e-01   2.2974757e-01   4.1142478e-01   3.1887022e-01   2.2998734e-01   2.5821680e-01   3.2732410e-01   2.3197317e-01   2.8887378e-01   3.3359010e-01   2.8195681e-01   2.5599107e-01   2.2312925e-01   2.8613603e-01   2.3977955e-01   2.5441596e-01   2.4519140e-01   1.5385241e-01   2.5232104e-01   5.1493631e-01   4.5323214e-01   4.2845834e-01   4.4724986e-01   4.7457294e-01   4.7489714e-01   4.6451744e-01   4.5458397e-01   4.8531016e-01   4.0086775e-01   3.3039510e-01   4.2690815e-01   3.9810528e-01   4.8619268e-01   4.8572861e-01   3.8678000e-01   4.0288412e-01   4.0400345e-01   5.5371159e-01   4.5396321e-01   4.0578110e-01   4.3384503e-01   4.9180921e-01   3.7075981e-01   3.9660943e-01   3.9426853e-01   3.4955388e-01   3.4443823e-01   4.7269976e-01   3.8095285e-01   4.4033063e-01   3.4628429e-01   4.8107810e-01   3.6885113e-01   4.7400892e-01   4.2299055e-01   4.2466221e-01   3.9913020e-01   3.3607659e-01   3.6477722e-01   4.2848136e-01   3.3695835e-01   4.5323214e-01   4.4278413e-01   4.2356519e-01   3.7724050e-01   4.1943108e-01   3.7167660e-01   3.9296882e-01   3.9389283e-01   3.3787081e-02   2.1999955e-02   3.5606869e-02   9.7919779e-03   3.8730614e-02   2.5164827e-02   2.6849118e-02   1.7903003e-02   1.2662420e-01   1.3061214e-01   1.5776511e-01   1.8787898e-01   1.6611889e-01   2.0099100e-01   1.5292701e-01   1.1474225e-01   1.5177722e-01   1.5820095e-01   1.7289957e-01   1.3332512e-01   1.6306194e-01   1.9266388e-01   8.3782583e-02   1.1475472e-01   1.9583103e-01   1.4727273e-01   2.2350713e-01   1.4824286e-01   2.0360977e-01   1.1578494e-01   2.3876064e-01   1.9766401e-01   1.2872248e-01   1.2487918e-01   1.7184761e-01   1.8912261e-01   1.7398714e-01   9.1697206e-02   1.4908080e-01   1.3596066e-01   1.2380963e-01   2.7656824e-01   2.1428571e-01   1.3903777e-01   1.4563130e-01   1.9085533e-01   1.3814153e-01   1.6927103e-01   2.1683317e-01   1.7245190e-01   1.4312164e-01   1.1792045e-01   1.7357532e-01   1.4591400e-01   1.5204372e-01   1.3733781e-01   6.4362895e-02   1.4610010e-01   3.6994724e-01   3.0922665e-01   2.8172128e-01   3.0981046e-01   3.2683078e-01   3.2376302e-01   3.2734273e-01   3.0972403e-01   3.3079457e-01   2.6398840e-01   2.0377846e-01   2.7991998e-01   2.5479336e-01   3.3054260e-01   3.2764011e-01   2.4845492e-01   2.6936921e-01   2.7648193e-01   3.8578015e-01   3.0244531e-01   2.6262990e-01   2.9337350e-01   3.3673416e-01   2.3057418e-01   2.6437275e-01   2.6189855e-01   2.1511723e-01   2.1874548e-01   3.2151629e-01   2.4786804e-01   2.9060022e-01   2.2492263e-01   3.2669967e-01   2.4001378e-01   3.3739817e-01   2.6941492e-01   2.9020759e-01   2.7043921e-01   2.1216140e-01   2.2627991e-01   2.7921868e-01   1.9599823e-01   3.0922665e-01   2.9841488e-01   2.7865157e-01   2.3076134e-01   2.6697481e-01   2.3479056e-01   2.6453179e-01   2.6724220e-01   1.8463349e-03   1.7603525e-03   7.3051568e-03   4.1015204e-04   7.6986614e-04   4.1505661e-04   2.5516535e-03   2.2715123e-01   2.2501703e-01   2.6621562e-01   3.1107232e-01   2.8349906e-01   3.0014485e-01   2.4486000e-01   2.0495206e-01   2.5858102e-01   2.5538755e-01   2.9285644e-01   2.2943000e-01   2.8485147e-01   2.9693404e-01   1.6780939e-01   2.1457958e-01   2.8854018e-01   2.4083271e-01   3.6655716e-01   2.5261802e-01   3.0270375e-01   2.1770871e-01   3.6954821e-01   3.0027680e-01   2.3128364e-01   2.2864524e-01   2.8964053e-01   3.0736347e-01   2.7955757e-01   1.8460266e-01   2.5729263e-01   2.3962699e-01   2.2298939e-01   3.9825299e-01   3.0280555e-01   2.1772566e-01   2.5069761e-01   3.2250864e-01   2.2044120e-01   2.8076678e-01   3.2019205e-01   2.7070478e-01   2.4885895e-01   2.1694127e-01   2.7557358e-01   2.2744661e-01   2.4343281e-01   2.3729093e-01   1.5163937e-01   2.4296011e-01   4.9867564e-01   4.4043288e-01   4.1855224e-01   4.3250726e-01   4.6172807e-01   4.6333132e-01   4.4859207e-01   4.4201028e-01   4.7437581e-01   3.8923920e-01   3.2088439e-01   4.1722959e-01   3.8928568e-01   4.7566007e-01   4.7625383e-01   3.7686783e-01   3.8986190e-01   3.8884081e-01   5.4387473e-01   4.4406349e-01   3.9623886e-01   4.2109532e-01   4.8065646e-01   3.6308022e-01   3.8359994e-01   3.8148432e-01   3.4142085e-01   3.3328357e-01   4.6130540e-01   3.6948155e-01   4.3073385e-01   3.3341169e-01   4.7038237e-01   3.5679054e-01   4.5732763e-01   4.1658437e-01   4.1040883e-01   3.8470265e-01   3.2499997e-01   3.5692838e-01   4.1963835e-01   3.3330111e-01   4.4043288e-01   4.3085698e-01   4.1330060e-01   3.7185723e-01   4.1281711e-01   3.6250007e-01   3.7890193e-01   3.7915604e-01   5.4235993e-03   2.5998420e-03   3.8677378e-03   1.1491859e-03   6.2877149e-04   4.8332372e-04   2.1486898e-01   2.1467854e-01   2.5369922e-01   2.9604720e-01   2.6891688e-01   2.9246555e-01   2.3653315e-01   1.9467620e-01   2.4616446e-01   2.4597983e-01   2.7792166e-01   2.1880759e-01   2.6876410e-01   2.8748855e-01   1.5684504e-01   2.0165721e-01   2.8227299e-01   2.3174975e-01   3.4738814e-01   2.4066356e-01   2.9516630e-01   2.0426320e-01   3.5491429e-01   2.9152419e-01   2.1857252e-01   2.1532575e-01   2.7527993e-01   2.9385335e-01   2.6877316e-01   1.7184619e-01   2.4431046e-01   2.2708854e-01   2.1089056e-01   3.8786051e-01   2.9845589e-01   2.1195408e-01   2.3838722e-01   3.0508869e-01   2.1371586e-01   2.6806783e-01   3.1216051e-01   2.6180490e-01   2.3620065e-01   2.0447438e-01   2.6573552e-01   2.2136814e-01   2.3519672e-01   2.2587537e-01   1.3797486e-01   2.3292930e-01   4.8968343e-01   4.2865553e-01   4.0406082e-01   4.2311568e-01   4.4955467e-01   4.4970134e-01   4.4022012e-01   4.2994584e-01   4.5983149e-01   3.7733154e-01   3.0847182e-01   4.0252124e-01   3.7430526e-01   4.6065171e-01   4.6008825e-01   3.6337620e-01   3.7950484e-01   3.8097080e-01   5.2689771e-01   4.2900834e-01   3.8187058e-01   4.0969052e-01   4.6622956e-01   3.4753954e-01   3.7338441e-01   3.7106677e-01   3.2693971e-01   3.2232718e-01   4.4752724e-01   3.5789640e-01   4.1563695e-01   3.2438944e-01   4.5565562e-01   3.4619907e-01   4.4965557e-01   3.9841763e-01   4.0096111e-01   3.7608333e-01   3.1419485e-01   3.4172782e-01   4.0397551e-01   3.1440281e-01   4.2865553e-01   4.1831326e-01   3.9931889e-01   3.5369559e-01   3.9495166e-01   3.4857729e-01   3.7001412e-01   3.7103653e-01   9.4891023e-03   8.2375707e-04   1.6686708e-03   2.4475618e-03   4.6565778e-03   2.0491959e-01   2.0139969e-01   2.4160221e-01   2.8586265e-01   2.5951451e-01   2.7003471e-01   2.1844761e-01   1.8272084e-01   2.3436629e-01   2.2908460e-01   2.6861177e-01   2.0572725e-01   2.6206958e-01   2.6821933e-01   1.4908933e-01   1.9377976e-01   2.5813756e-01   2.1522061e-01   3.4200281e-01   2.2841742e-01   2.7236034e-01   1.9712279e-01   3.4101997e-01   2.7089218e-01   2.0911514e-01   2.0716704e-01   2.6510721e-01   2.8112922e-01   2.5280689e-01   1.6599180e-01   2.3361443e-01   2.1674754e-01   2.0090153e-01   3.6527528e-01   2.7043122e-01   1.9136502e-01   2.2690330e-01   2.9871211e-01   1.9453190e-01   2.5541554e-01   2.8925890e-01   2.4310626e-01   2.2546391e-01   1.9558871e-01   2.4836004e-01   2.0063248e-01   2.1704777e-01   2.1367234e-01   1.3678274e-01   2.1790721e-01   4.6096128e-01   4.0671584e-01   3.8773373e-01   3.9744288e-01   4.2748862e-01   4.3009711e-01   4.1223718e-01   4.0843810e-01   4.4142325e-01   3.5773584e-01   2.9306179e-01   3.8663461e-01   3.6011993e-01   4.4301487e-01   4.4448732e-01   3.4710733e-01   3.5722182e-01   3.5456119e-01   5.1046759e-01   4.1264841e-01   3.6628224e-01   3.8788026e-01   4.4738884e-01   3.3559118e-01   3.5113614e-01   3.4925623e-01   3.1415212e-01   3.0373195e-01   4.2825709e-01   3.3863565e-01   3.9988417e-01   3.0248234e-01   4.3771445e-01   3.2582243e-01   4.2019828e-01   3.8883040e-01   3.7626984e-01   3.5109855e-01   2.9576205e-01   3.2946337e-01   3.8969349e-01   3.1023698e-01   4.0671584e-01   3.9807129e-01   3.8231042e-01   3.4615104e-01   3.8497123e-01   3.3374807e-01   3.4573433e-01   3.4546409e-01   9.9827550e-03   3.7862368e-03   4.2584946e-03   1.2265791e-03   1.7617949e-01   1.7703821e-01   2.1212343e-01   2.5052805e-01   2.2530971e-01   2.5127302e-01   1.9850334e-01   1.5861692e-01   2.0515179e-01   2.0665158e-01   2.3356888e-01   1.8070269e-01   2.2448284e-01   2.4549391e-01   1.2367511e-01   1.6367665e-01   2.4261509e-01   1.9359579e-01   2.9712394e-01   2.0027260e-01   2.5390638e-01   1.6582785e-01   3.0629971e-01   2.4971132e-01   1.7938888e-01   1.7609087e-01   2.3136866e-01   2.4921343e-01   2.2715314e-01   1.3644726e-01   2.0313672e-01   1.8736201e-01   1.7259709e-01   3.3997757e-01   2.5916969e-01   1.7732718e-01   1.9796934e-01   2.5795325e-01   1.7832877e-01   2.2545225e-01   2.6961076e-01   2.2173788e-01   1.9575867e-01   1.6649347e-01   2.2485533e-01   1.8584864e-01   1.9731550e-01   1.8682160e-01   1.0543428e-01   1.9413835e-01   4.3845459e-01   3.7813833e-01   3.5313427e-01   3.7426892e-01   3.9794201e-01   3.9726718e-01   3.9130315e-01   3.7920848e-01   4.0649770e-01   3.2906316e-01   2.6354873e-01   3.5155280e-01   3.2453882e-01   4.0704153e-01   4.0591144e-01   3.1492301e-01   3.3203733e-01   3.3498583e-01   4.6984073e-01   3.7668481e-01   3.3204804e-01   3.6021418e-01   4.1270518e-01   2.9886261e-01   3.2629567e-01   3.2396944e-01   2.7989788e-01   2.7743776e-01   3.9510115e-01   3.1077003e-01   3.6387709e-01   2.8054253e-01   4.0239292e-01   3.0024699e-01   4.0081171e-01   3.4598760e-01   3.5305310e-01   3.2985030e-01   2.6984561e-01   2.9352511e-01   3.5245796e-01   2.6611615e-01   3.7813833e-01   3.6780473e-01   3.4887718e-01   3.0350575e-01   3.4281917e-01   3.0065558e-01   3.2390437e-01   3.2536506e-01   1.4872402e-03   1.4261828e-03   4.3370414e-03   2.2707675e-01   2.2400493e-01   2.6569285e-01   3.1123078e-01   2.8378554e-01   2.9683242e-01   2.4257696e-01   2.0422363e-01   2.5811380e-01   2.5348079e-01   2.9319960e-01   2.2849166e-01   2.8589147e-01   2.9450563e-01   1.6807288e-01   2.1502765e-01   2.8470332e-01   2.3897250e-01   3.6819788e-01   2.5200370e-01   2.9929335e-01   2.1837402e-01   3.6884033e-01   2.9747447e-01   2.3136263e-01   2.2907381e-01   2.8973378e-01   3.0680413e-01   2.7799960e-01   1.8547528e-01   2.5712752e-01   2.3949352e-01   2.2288675e-01   3.9517754e-01   2.9790586e-01   2.1461980e-01   2.5028856e-01   3.2375470e-01   2.1774627e-01   2.8014020e-01   3.1681085e-01   2.6834640e-01   2.4865097e-01   2.1711982e-01   2.7360556e-01   2.2434130e-01   2.4112622e-01   2.3662943e-01   1.5362542e-01   2.4153192e-01   4.9407199e-01   4.3763583e-01   4.1728329e-01   4.2863716e-01   4.5893922e-01   4.6123811e-01   4.4410036e-01   4.3933832e-01   4.7263926e-01   3.8698450e-01   3.1961653e-01   4.1608078e-01   3.8854631e-01   4.7414193e-01   4.7530563e-01   3.7551546e-01   3.8685112e-01   3.8465728e-01   5.4280771e-01   4.4284025e-01   3.9512469e-01   4.1829234e-01   4.7881043e-01   3.6289720e-01   3.8058317e-01   3.7858579e-01   3.4094528e-01   3.3115887e-01   4.5930198e-01   3.6727055e-01   4.2965213e-01   3.3035276e-01   4.6876907e-01   3.5423104e-01   4.5243416e-01   4.1718566e-01   4.0676646e-01   3.8091352e-01   3.2289623e-01   3.5664111e-01   4.1894270e-01   3.3518670e-01   4.3763583e-01   4.2851737e-01   4.1182914e-01   3.7291664e-01   4.1330116e-01   3.6151280e-01   3.7529785e-01   3.7518553e-01   2.1466294e-04   8.1135795e-04   2.0395325e-01   2.0232995e-01   2.4147692e-01   2.8429935e-01   2.5773107e-01   2.7550146e-01   2.2191392e-01   1.8311464e-01   2.3414066e-01   2.3177254e-01   2.6670079e-01   2.0650082e-01   2.5876572e-01   2.7190067e-01   1.4755667e-01   1.9178198e-01   2.6468375e-01   2.1783813e-01   3.3738596e-01   2.2849816e-01   2.7802206e-01   1.9468150e-01   3.4113386e-01   2.7532568e-01   2.0783209e-01   2.0518365e-01   2.6370466e-01   2.8101900e-01   2.5475268e-01   1.6319612e-01   2.3278117e-01   2.1587899e-01   1.9999930e-01   3.7014440e-01   2.7911349e-01   1.9654509e-01   2.2657275e-01   2.9494234e-01   1.9887441e-01   2.5547815e-01   2.9483790e-01   2.4668408e-01   2.2473055e-01   1.9413008e-01   2.5114079e-01   2.0579387e-01   2.2056413e-01   2.1387343e-01   1.3181835e-01   2.1964768e-01   4.6883654e-01   4.1098091e-01   3.8893009e-01   4.0388763e-01   4.3171852e-01   4.3291890e-01   4.1992695e-01   4.1244869e-01   4.4351517e-01   3.6095375e-01   2.9436456e-01   3.8758959e-01   3.6027619e-01   4.4466450e-01   4.4498549e-01   3.4851465e-01   3.6196876e-01   3.6168475e-01   5.1115719e-01   4.1372900e-01   3.6718805e-01   3.9217531e-01   4.4969696e-01   3.3465314e-01   3.5590188e-01   3.5378490e-01   3.1382959e-01   3.0675099e-01   4.3089908e-01   3.4178657e-01   4.0069101e-01   3.0739171e-01   4.3956059e-01   3.2969272e-01   4.2869495e-01   3.8625531e-01   3.8227459e-01   3.5742723e-01   2.9874912e-01   3.2874265e-01   3.8973129e-01   3.0516637e-01   4.1098091e-01   4.0141982e-01   3.8392089e-01   3.4269880e-01   3.8263609e-01   3.3443392e-01   3.5169446e-01   3.5216636e-01   9.1517643e-04   2.1524074e-01   2.1387759e-01   2.5373107e-01   2.9714785e-01   2.7003778e-01   2.8920001e-01   2.3426287e-01   1.9410928e-01   2.4622443e-01   2.4423617e-01   2.7915984e-01   2.1812334e-01   2.7081916e-01   2.8531140e-01   1.5727134e-01   2.0260081e-01   2.7829111e-01   2.2998414e-01   3.5056990e-01   2.4050939e-01   2.9179089e-01   2.0549251e-01   3.5521699e-01   2.8889714e-01   2.1915718e-01   2.1632646e-01   2.7619378e-01   2.9404932e-01   2.6759110e-01   1.7313032e-01   2.4473695e-01   2.2745766e-01   2.1120369e-01   3.8534612e-01   2.9320974e-01   2.0849982e-01   2.3847660e-01   3.0758274e-01   2.1079215e-01   2.6804702e-01   3.0890781e-01   2.5958017e-01   2.3652855e-01   2.0509955e-01   2.6402277e-01   2.1797131e-01   2.3288914e-01   2.2557846e-01   1.4040598e-01   2.3171289e-01   4.8566100e-01   4.2666426e-01   4.0387929e-01   4.1975185e-01   4.4765854e-01   4.4867725e-01   4.3614287e-01   4.2811681e-01   4.5929331e-01   3.7580128e-01   3.0785579e-01   4.0248311e-01   3.7465557e-01   4.6038945e-01   4.6053443e-01   3.6291292e-01   3.7703492e-01   3.7702914e-01   5.2744561e-01   4.2898757e-01   3.8178676e-01   4.0761255e-01   4.6557959e-01   3.4846205e-01   3.7087965e-01   3.6870032e-01   3.2740185e-01   3.2070546e-01   4.4660679e-01   3.5633353e-01   4.1573756e-01   3.2160319e-01   4.5525495e-01   3.4414484e-01   4.4510906e-01   4.0053809e-01   3.9779009e-01   3.7261777e-01   3.1255985e-01   3.4249381e-01   4.0450836e-01   3.1773210e-01   4.2666426e-01   4.1685273e-01   3.9886436e-01   3.5618753e-01   3.9691207e-01   3.4850257e-01   3.6675459e-01   3.6731925e-01   1.9628524e-01   1.9595152e-01   2.3363495e-01   2.7477230e-01   2.4851118e-01   2.7084921e-01   2.1685944e-01   1.7678722e-01   2.2636857e-01   2.2599655e-01   2.5724256e-01   1.9993208e-01   2.4854721e-01   2.6607935e-01   1.4078995e-01   1.8369551e-01   2.6097312e-01   2.1228442e-01   3.2508389e-01   2.2103528e-01   2.7346255e-01   1.8624671e-01   3.3183707e-01   2.6996197e-01   1.9987779e-01   1.9682726e-01   2.5462868e-01   2.7249816e-01   2.4805238e-01   1.5523356e-01   2.2463473e-01   2.0803519e-01   1.9245485e-01   3.6364465e-01   2.7666661e-01   1.9319723e-01   2.1887484e-01   2.8382186e-01   1.9489313e-01   2.4750809e-01   2.8994226e-01   2.4123726e-01   2.1680288e-01   1.8634448e-01   2.4506974e-01   2.0223799e-01   2.1557050e-01   2.0677515e-01   1.2326853e-01   2.1346465e-01   4.6321322e-01   4.0354170e-01   3.7970298e-01   3.9803381e-01   4.2400584e-01   4.2423988e-01   4.1474064e-01   4.0481837e-01   4.3422436e-01   3.5345426e-01   2.8656464e-01   3.7821934e-01   3.5073976e-01   4.3506541e-01   4.3461895e-01   3.3997143e-01   3.5549675e-01   3.5686166e-01   5.0021759e-01   4.0411514e-01   3.5805390e-01   3.8499293e-01   4.4048074e-01   3.2475847e-01   3.4952686e-01   3.4727706e-01   3.0464816e-01   2.9991017e-01   4.2212289e-01   3.3451084e-01   3.9105629e-01   3.0183284e-01   4.3015090e-01   3.2308204e-01   4.2396798e-01   3.7459262e-01   3.7639485e-01   3.5210962e-01   2.9201270e-01   3.1907654e-01   3.7972706e-01   2.9306033e-01   4.0354170e-01   3.9347753e-01   3.7503434e-01   3.3106203e-01   3.7117506e-01   3.2561211e-01   3.4620183e-01   3.4718282e-01   5.4794269e-04   1.8595150e-03   7.6486933e-03   3.6709934e-03   1.1297771e-02   3.2211024e-03   9.3536855e-04   1.2319946e-03   2.8277178e-03   4.9162548e-03   4.5756064e-04   4.5051431e-03   8.2041162e-03   5.0174936e-03   3.6228961e-04   1.1821527e-02   2.0977093e-03   2.1132418e-02   9.2067605e-04   1.2006919e-02   4.4257629e-04   2.0287770e-02   9.6871594e-03   3.6190304e-05   1.5767395e-04   4.3807979e-03   7.0604016e-03   4.2468409e-03   2.9914879e-03   1.0695136e-03   1.8996353e-04   2.2349983e-05   3.2007300e-02   1.8626112e-02   5.9280137e-03   7.1455899e-04   1.0704644e-02   4.3625889e-03   3.3835317e-03   1.4689427e-02   5.0222514e-03   5.6899707e-04   1.6818673e-04   4.5805421e-03   5.6585758e-03   3.2371570e-03   3.2354233e-04   1.1207565e-02   1.3610741e-03   7.1737216e-02   4.4570224e-02   3.4818230e-02   4.5037397e-02   5.2043753e-02   5.1252208e-02   5.2740741e-02   4.4819381e-02   5.4844272e-02   2.7492078e-02   9.9689819e-03   3.4289978e-02   2.5632216e-02   5.5069559e-02   5.4817042e-02   2.2717491e-02   2.9451557e-02   3.3085492e-02   8.2968826e-02   4.3268668e-02   2.7878625e-02   3.8225012e-02   5.7392372e-02   1.8533113e-02   2.7752880e-02   2.6863838e-02   1.3522246e-02   1.3720834e-02   5.0366251e-02   2.2136217e-02   3.8620607e-02   1.6104597e-02   5.3184340e-02   1.9805362e-02   5.7449954e-02   3.4065200e-02   3.7316903e-02   3.0490470e-02   1.2067932e-02   1.7032969e-02   3.4772493e-02   1.3995163e-02   4.4570224e-02   4.0370599e-02   3.3379869e-02   2.1584097e-02   3.2788169e-02   1.8689520e-02   2.8313400e-02   2.9690702e-02   2.1810130e-03   9.0584977e-03   5.0419708e-03   8.6169391e-03   1.3497185e-03   5.5621011e-04   1.5626898e-03   1.4715788e-03   6.4159866e-03   2.8759099e-05   6.7456588e-03   6.5333041e-03   5.4642837e-03   1.4108585e-03   8.4463171e-03   6.8795048e-04   2.4580787e-02   1.0388021e-03   9.2286561e-03   1.7639378e-03   2.1069750e-02   7.6032385e-03   7.9175727e-04   1.2932915e-03   5.5725803e-03   7.6219263e-03   3.4673161e-03   4.2795834e-03   1.7380404e-03   7.6483144e-04   5.0584546e-04   3.0216137e-02   1.4096242e-02   2.8861882e-03   1.0548294e-03   1.3430225e-02   1.8234348e-03   3.6876972e-03   1.2061426e-02   3.2511669e-03   1.1268214e-03   9.2690858e-04   3.2976984e-03   2.6938518e-03   1.3276664e-03   2.7368625e-04   1.3465991e-02   4.6822375e-04   6.8703048e-02   4.3340061e-02   3.5237451e-02   4.2511976e-02   5.0937508e-02   5.0961712e-02   4.9575019e-02   4.3743233e-02   5.5024281e-02   2.6603107e-02   9.8022959e-03   3.4840370e-02   2.6494597e-02   5.5508568e-02   5.5916268e-02   2.2800218e-02   2.7686924e-02   2.9943438e-02   8.4266289e-02   4.3880838e-02   2.8349604e-02   3.6881146e-02   5.7478275e-02   1.9879185e-02   2.5944410e-02   2.5180647e-02   1.4399125e-02   1.2641568e-02   5.0169037e-02   2.1180767e-02   3.9323276e-02   1.3938661e-02   5.3485998e-02   1.8367828e-02   5.3859405e-02   3.6620288e-02   3.4933732e-02   2.7786245e-02   1.0959937e-02   1.8222006e-02   3.5858197e-02   1.7515874e-02   4.3340061e-02   3.9619354e-02   3.3535832e-02   2.4403195e-02   3.5188163e-02   1.9107255e-02   2.5791231e-02   2.6740983e-02   2.3644893e-03   7.3259797e-04   5.5696730e-03   2.5707992e-03   4.5988577e-03   6.5028014e-05   1.3580837e-03   1.2238983e-03   1.7109321e-03   1.8714646e-03   2.9944669e-03   1.2905109e-02   3.6725511e-03   6.9487479e-03   1.9516582e-03   1.2369689e-02   2.1307052e-04   6.0423668e-03   3.5751225e-03   1.0028727e-02   4.0861108e-03   1.5902610e-03   2.1378325e-03   8.2628456e-04   1.7251178e-03   8.0406810e-04   9.4788056e-03   1.5999623e-04   8.7509151e-04   2.2560004e-03   1.8732619e-02   1.2379328e-02   7.5803848e-03   2.7223442e-04   5.1089250e-03   5.8562851e-03   2.2798360e-04   7.3027135e-03   2.0605789e-03   3.8651941e-04   3.1125030e-03   1.3286042e-03   6.3896783e-03   2.7088758e-03   9.7424018e-04   2.1879745e-02   1.1792881e-03   5.1158538e-02   2.8393852e-02   2.0746802e-02   2.9099880e-02   3.4416089e-02   3.3763806e-02   3.5540899e-02   2.8575694e-02   3.6757319e-02   1.5144171e-02   3.2315336e-03   2.0363783e-02   1.3898176e-02   3.6986917e-02   3.6943012e-02   1.1630303e-02   1.6809451e-02   2.0213112e-02   6.0723696e-02   2.7441217e-02   1.5470812e-02   2.3398093e-02   3.8846172e-02   9.0112370e-03   1.5570624e-02   1.4874310e-02   5.5171614e-03   5.6710746e-03   3.3048406e-02   1.1270408e-02   2.3776077e-02   7.7482583e-03   3.5413096e-02   9.7831561e-03   3.9569593e-02   2.1047269e-02   2.3004495e-02   1.8015778e-02   4.6822715e-03   7.9260822e-03   2.0880056e-02   7.9428824e-03   2.8393852e-02   2.5015882e-02   1.9595645e-02   1.2073748e-02   1.9972960e-02   8.8353740e-03   1.6316616e-02   1.7585650e-02   7.5921246e-04   7.6581519e-03   8.7602132e-03   1.3285408e-02   3.1081233e-03   6.1183864e-03   3.4258252e-04   8.0693053e-03   1.1527108e-03   4.3989827e-03   2.4770294e-02   1.0297780e-02   1.0716436e-02   8.0921947e-03   4.0468338e-03   3.9682629e-03   8.0100500e-03   9.6963228e-03   3.4263963e-03   5.5915243e-03   6.8271736e-03   7.3405400e-03   4.5375223e-04   3.3086446e-04   2.9090971e-03   1.8954919e-02   3.0536401e-03   5.4665114e-03   8.4934379e-03   1.2174442e-02   1.6139108e-02   1.7445625e-02   4.0248005e-03   8.4214697e-04   1.4968032e-02   1.3210674e-03   7.6674238e-03   5.8292743e-03   4.1363018e-03   9.7236406e-03   4.2254644e-03   1.5250051e-02   9.0649565e-03   6.2807525e-03   3.4303230e-02   6.6673397e-03   3.8666536e-02   1.8227060e-02   1.0732501e-02   2.0722480e-02   2.2627004e-02   2.1137328e-02   2.6644819e-02   1.8156140e-02   2.2956019e-02   8.4429075e-03   1.3075172e-03   1.0338488e-02   5.7120788e-03   2.2877982e-02   2.2249653e-02   5.0348740e-03   1.0858478e-02   1.5670368e-02   4.1328272e-02   1.5415087e-02   7.0760309e-03   1.4735153e-02   2.4690692e-02   2.4709117e-03   1.0136092e-02   9.4846153e-03   1.0671740e-03   3.5238125e-03   2.0483172e-02   6.1432951e-03   1.2637418e-02   6.5903261e-03   2.1786492e-02   6.0819601e-03   3.0486925e-02   9.5174641e-03   1.6103409e-02   1.3353819e-02   3.2356456e-03   1.9926682e-03   1.0243487e-02   2.2538296e-03   1.8227060e-02   1.5108364e-02   1.0188754e-02   3.7627514e-03   8.8362152e-03   3.0791096e-03   1.1920070e-02   1.3588585e-02   8.0203401e-03   5.9881494e-03   7.9998519e-03   1.0330180e-03   3.9563353e-03   9.0962406e-05   4.3155161e-03   3.4798297e-04   4.5913809e-03   1.6934319e-02   5.4778348e-03   1.0415690e-02   5.0724002e-03   7.4143773e-03   1.5625174e-03   8.5090504e-03   5.0363885e-03   7.2978538e-03   5.9687085e-03   3.0800386e-03   3.3854665e-03   6.0369763e-05   9.7766526e-04   2.1106407e-03   1.2150960e-02   8.7050457e-04   2.2450596e-03   4.2656078e-03   1.7573546e-02   1.6541615e-02   1.3010703e-02   1.4863348e-03   2.0318224e-03   1.0715058e-02   5.1959240e-04   9.1091800e-03   4.5577281e-03   1.4679901e-03   5.0882044e-03   3.2208315e-03   1.1416637e-02   6.2084512e-03   2.9761078e-03   2.5010280e-02   3.7556543e-03   4.8641699e-02   2.5613136e-02   1.7022941e-02   2.7738185e-02   3.0983818e-02   2.9540677e-02   3.4381880e-02   2.5612023e-02   3.1832087e-02   1.3388731e-02   2.7364434e-03   1.6556686e-02   1.0555947e-02   3.1795721e-02   3.1144834e-02   9.3248236e-03   1.5850102e-02   2.0586917e-02   5.3133864e-02   2.2885657e-02   1.2291162e-02   2.1220606e-02   3.3851933e-02   5.9611654e-03   1.4821288e-02   1.4065156e-02   3.4935277e-03   5.6486577e-03   2.8790647e-02   1.0114487e-02   1.9481208e-02   8.7344459e-03   3.0476411e-02   9.4253713e-03   3.8601009e-02   1.5492941e-02   2.2081481e-02   1.8082534e-02   4.9261084e-03   5.1866248e-03   1.6531969e-02   3.8794099e-03   2.5613136e-02   2.2037756e-02   1.6246851e-02   7.5034139e-03   1.4652134e-02   6.6254167e-03   1.6367612e-02   1.8037074e-02   3.5963937e-03   1.3253217e-02   6.2726815e-03   2.9800542e-03   8.1679414e-03   8.1170586e-03   1.1643531e-02   4.8148791e-04   2.7366574e-02   1.5666050e-02   3.5602925e-04   4.6080605e-03   1.9717886e-02   6.2846724e-03   1.0858138e-05   1.6108248e-02   9.2526017e-03   1.6342536e-04   1.1289374e-02   1.3182521e-02   7.1086671e-03   4.8107615e-03   2.1505038e-03   2.4867835e-02   7.4585494e-03   9.3457001e-03   1.1859292e-02   8.6506765e-03   1.5825531e-03   7.6287797e-03   7.1464196e-03   1.3469318e-02   7.0587993e-03   4.6011946e-03   3.8423475e-04   1.2933991e-03   8.0463849e-03   1.4044313e-02   1.5554975e-03   5.8246512e-03   3.7445923e-03   7.8179143e-03   4.3383609e-02   5.1292419e-03   3.1208966e-02   1.7106674e-02   1.5590525e-02   1.4711786e-02   2.2118312e-02   2.3677515e-02   1.8355265e-02   1.7620984e-02   2.7322237e-02   8.2125300e-03   3.2025084e-03   1.5673958e-02   1.1993026e-02   2.8215754e-02   3.0043744e-02   8.3225318e-03   7.3692394e-03   7.0283195e-03   5.0297559e-02   2.1175797e-02   1.1957606e-02   1.3050611e-02   2.8749586e-02   1.0337000e-02   6.4346633e-03   6.2339245e-03   7.0135662e-03   2.1302546e-03   2.3356399e-02   5.5096838e-03   1.8637999e-02   1.1462623e-03   2.6577654e-02   3.5495923e-03   2.0668821e-02   2.2280093e-02   1.0461671e-02   6.3164945e-03   1.6640428e-03   9.2261377e-03   1.7584245e-02   1.7910761e-02   1.7106674e-02   1.5703153e-02   1.4029303e-02   1.7152691e-02   2.0996496e-02   7.4959989e-03   5.5175599e-03   5.5902559e-03   3.2004692e-03   2.3419030e-03   2.3799055e-04   7.0894399e-03   1.3013007e-03   8.8089668e-03   2.9046172e-03   1.1317768e-02   5.4246845e-03   3.1331888e-03   1.2023171e-04   2.4596565e-02   1.8369557e-03   3.9714617e-03   5.9789699e-03   1.7350836e-02   3.3069650e-03   3.5250221e-03   4.6752456e-03   5.9850395e-03   6.3682068e-03   1.7841601e-03   1.0289989e-02   3.0278347e-03   2.8250687e-03   3.3130538e-03   2.2020473e-02   6.7612416e-03   1.5121522e-03   2.2413165e-03   1.4397994e-02   9.1427197e-04   3.3482085e-03   6.1881321e-03   7.4537620e-04   2.6935839e-03   4.4237033e-03   1.1678481e-03   9.0068892e-04   2.9616334e-06   1.6314282e-03   2.2817755e-02   4.4730602e-04   5.5111404e-02   3.4158218e-02   2.9021577e-02   3.2043073e-02   4.1089749e-02   4.2083473e-02   3.7707137e-02   3.4692829e-02   4.6329738e-02   1.9906109e-02   6.9820129e-03   2.8847990e-02   2.2051442e-02   4.7099521e-02   4.8343924e-02   1.7860817e-02   1.9868896e-02   2.0488578e-02   7.4490873e-02   3.6906910e-02   2.3107397e-02   2.8319511e-02   4.8426947e-02   1.7182411e-02   1.8331671e-02   1.7811306e-02   1.2023707e-02   8.1012404e-03   4.1487060e-02   1.5227136e-02   3.2990763e-02   8.0392167e-03   4.5098943e-02   1.2350504e-02   4.1149609e-02   3.3318195e-02   2.5517275e-02   1.9030633e-02   6.7530681e-03   1.5581926e-02   3.0469465e-02   1.9100479e-02   3.4158218e-02   3.1415243e-02   2.7190491e-02   2.3321177e-02   3.1828581e-02   1.5212907e-02   1.7498443e-02   1.7919743e-02   3.6042298e-03   3.7468828e-03   9.7768723e-03   7.8664808e-04   9.4997776e-03   1.0849175e-02   2.5690262e-03   9.8725606e-04   1.2645853e-02   2.2464135e-03   3.0594913e-02   2.8398433e-03   1.3996699e-02   1.4253747e-03   2.8011999e-02   1.2147431e-02   1.3365379e-03   1.5899927e-03   8.8722552e-03   1.1913349e-02   6.7897739e-03   2.1135064e-03   3.6254453e-03   1.7898196e-03   6.9240260e-04   3.8891340e-02   1.9082524e-02   3.5297395e-03   2.7024302e-03   1.7766622e-02   2.5342822e-03   6.8092652e-03   1.7593080e-02   6.3527044e-03   2.6240465e-03   7.1495011e-04   6.5371413e-03   3.8357773e-03   3.1136154e-03   1.3436442e-03   8.9702416e-03   2.0253115e-03   8.1348350e-02   5.3552444e-02   4.4248656e-02   5.2648527e-02   6.1923652e-02   6.1838703e-02   6.0414517e-02   5.3985372e-02   6.6201925e-02   3.4742321e-02   1.4883289e-02   4.3767758e-02   3.4228373e-02   6.6672589e-02   6.6939218e-02   3.0219175e-02   3.6020463e-02   3.8455365e-02   9.7619080e-02   5.3827294e-02   3.6463335e-02   4.6373140e-02   6.8906148e-02   2.6398982e-02   3.4032020e-02   3.3159361e-02   2.0120628e-02   1.8469842e-02   6.0950543e-02   2.8524017e-02   4.8737718e-02   1.9976319e-02   6.4489083e-02   2.5275791e-02   6.5080438e-02   4.4969016e-02   4.4191042e-02   3.6066964e-02   1.6430448e-02   2.4524789e-02   4.4743887e-02   2.2062365e-02   5.3552444e-02   4.9370398e-02   4.2403953e-02   3.0850071e-02   4.3434971e-02   2.5846885e-02   3.3807321e-02   3.4832107e-02   1.2988107e-03   1.6703175e-03   1.1675843e-03   2.1414615e-03   3.6300242e-03   1.1143098e-02   2.7878082e-03   7.4699291e-03   1.6417856e-03   1.3833381e-02   5.5119833e-05   6.7870415e-03   2.7321459e-03   1.1691818e-02   4.7830259e-03   1.0265050e-03   1.5056614e-03   1.2372212e-03   2.4522309e-03   1.1336677e-03   8.0050923e-03   5.6870746e-05   4.6915622e-04   1.5562032e-03   2.0887194e-02   1.3125417e-02   6.8837382e-03   7.1162950e-05   5.9522504e-03   5.2043728e-03   5.3649750e-04   8.3200456e-03   2.2696632e-03   1.4322584e-04   2.2876100e-03   1.6070427e-03   5.8684470e-03   2.4558869e-03   5.5570590e-04   1.9651663e-02   8.9172519e-04   5.4659464e-02   3.1114613e-02   2.3119551e-02   3.1736271e-02   3.7415588e-02   3.6761852e-02   3.8397207e-02   3.1313890e-02   3.9883303e-02   1.7150840e-02   4.2007421e-03   2.2713095e-02   1.5842502e-02   4.0120080e-02   4.0057089e-02   1.3428356e-02   1.8845203e-02   2.2251079e-02   6.4673609e-02   3.0152373e-02   1.7530451e-02   2.5859899e-02   4.2056336e-02   1.0552527e-02   1.7518114e-02   1.6789150e-02   6.7575680e-03   6.8601014e-03   3.6016991e-02   1.2994930e-02   2.6303908e-02   8.9623653e-03   3.8482980e-02   1.1333705e-02   4.2542187e-02   2.3287141e-02   2.5337550e-02   1.9991634e-02   5.7451264e-03   9.3841364e-03   2.3239540e-02   8.8973019e-03   3.1114613e-02   2.7595638e-02   2.1905943e-02   1.3658827e-02   2.2169613e-02   1.0410758e-02   1.8207584e-02   1.9484294e-02   4.8008453e-03   1.2650854e-03   6.4006179e-03   1.8900850e-03   1.2501460e-02   5.1363105e-03   3.0955001e-03   2.4385845e-04   2.0020418e-02   1.0205842e-03   3.3467158e-03   5.5033857e-03   1.3788482e-02   2.4101593e-03   2.9461849e-03   4.0056193e-03   3.8916824e-03   4.1559722e-03   7.4799809e-04   1.0679921e-02   1.8732675e-03   2.1213449e-03   3.0477499e-03   1.9047032e-02   6.9834653e-03   2.9438547e-03   1.3758132e-03   1.0992441e-02   2.0426183e-03   1.8183256e-03   5.1275661e-03   3.4870074e-04   1.7800086e-03   4.1941671e-03   4.4278656e-04   2.0594764e-03   2.9165015e-04   1.2452133e-03   2.3762069e-02   2.9165015e-04   5.0966362e-02   3.0166832e-02   2.4694798e-02   2.8788397e-02   3.6671707e-02   3.7274280e-02   3.4472569e-02   3.0604199e-02   4.1125938e-02   1.6692997e-02   4.7977197e-03   2.4489085e-02   1.8077937e-02   4.1761735e-02   4.2713992e-02   1.4455884e-02   1.7039782e-02   1.8322490e-02   6.7670508e-02   3.2041093e-02   1.9169462e-02   2.4724400e-02   4.3157837e-02   1.3510623e-02   1.5640518e-02   1.5102883e-02   8.9767687e-03   6.0642978e-03   3.6671707e-02   1.2419694e-02   2.8323093e-02   6.4935421e-03   3.9910361e-02   1.0006286e-02   3.7967777e-02   2.8141888e-02   2.2594035e-02   1.6734859e-02   4.8977754e-03   1.2095984e-02   2.5839912e-02   1.5166518e-02   3.0166832e-02   2.7378800e-02   2.3072794e-02   1.8885101e-02   2.6783663e-02   1.1941964e-02   1.5226991e-02   1.5844953e-02   5.5898027e-03   3.6211515e-04   4.6884489e-03   1.9469863e-02   6.9494812e-03   1.0856564e-02   6.2070186e-03   5.9174289e-03   2.3319364e-03   8.6215938e-03   6.4261295e-03   5.9157019e-03   6.0481987e-03   4.2268867e-03   4.5610911e-03   4.8970997e-05   6.8799458e-04   2.4420024e-03   1.4255275e-02   1.5079078e-03   3.2366850e-03   5.6006336e-03   1.5957034e-02   1.6871464e-02   1.4787237e-02   2.2762765e-03   1.3333689e-03   1.2378081e-02   7.3736127e-04   8.8932355e-03   5.1508306e-03   2.2779875e-03   6.5293055e-03   3.6737889e-03   1.2985539e-02   7.3417742e-03   4.0613774e-03   2.7837547e-02   4.7997557e-03   4.5639701e-02   2.3278576e-02   1.4877176e-02   2.5663407e-02   2.8314352e-02   2.6769516e-02   3.2135703e-02   2.3238033e-02   2.8854470e-02   1.1827077e-02   2.2644401e-03   1.4421001e-02   8.8429015e-03   2.8776903e-02   2.8063687e-02   7.8778602e-03   1.4365876e-02   1.9273282e-02   4.9067035e-02   2.0328263e-02   1.0496416e-02   1.9196637e-02   3.0789735e-02   4.6457607e-03   1.3443289e-02   1.2708604e-02   2.5804695e-03   5.0424172e-03   2.6041403e-02   8.8829912e-03   1.7122028e-02   8.2367466e-03   2.7547637e-02   8.4529986e-03   3.6274794e-02   1.3225786e-02   2.0335647e-02   1.6784857e-02   4.4669319e-03   3.9860599e-03   1.4316384e-02   2.9523808e-03   2.3278576e-02   1.9803941e-02   1.4205949e-02   5.9492702e-03   1.2454271e-02   5.3895801e-03   1.5143895e-02   1.6858271e-02   5.9387347e-03   5.9571758e-03   6.0526515e-03   1.4146691e-03   8.1320415e-03   6.3420687e-04   2.2943426e-02   7.2217865e-04   8.7156753e-03   1.7123420e-03   1.9617600e-02   7.0385795e-03   6.4044399e-04   1.1390332e-03   4.8019256e-03   6.7384180e-03   2.9679315e-03   4.6026472e-03   1.3268878e-03   5.4152853e-04   4.6516773e-04   2.8772311e-02   1.3783757e-02   3.2459159e-03   7.3821107e-04   1.2234521e-02   2.0992860e-03   3.0774015e-03   1.1372571e-02   2.9352447e-03   8.1022588e-04   9.2842537e-04   2.8855444e-03   2.9418595e-03   1.2977062e-03   1.2869337e-04   1.4124612e-02   3.4246729e-04   6.6626445e-02   4.1505551e-02   3.3416051e-02   4.0874061e-02   4.8928444e-02   4.8860648e-02   4.7883828e-02   4.1881126e-02   5.2796241e-02   2.5141882e-02   8.8605790e-03   3.3017625e-02   2.4870522e-02   5.3247026e-02   5.3590923e-02   2.1349209e-02   2.6308756e-02   2.8721666e-02   8.1439460e-02   4.1842449e-02   2.6703197e-02   3.5208876e-02   5.5212555e-02   1.8433774e-02   2.4622301e-02   2.3863121e-02   1.3181398e-02   1.1675955e-02   4.8074316e-02   1.9890418e-02   3.7380285e-02   1.3082557e-02   5.1276398e-02   1.7233265e-02   5.2150081e-02   3.4634218e-02   3.3454504e-02   2.6550201e-02   1.0070918e-02   1.6842626e-02   3.3968952e-02   1.6166125e-02   4.1505551e-02   3.7811539e-02   3.1780349e-02   2.2766214e-02   3.3244526e-02   1.7747224e-02   2.4583452e-02   2.5577750e-02   7.4058632e-03   1.7632325e-02   5.8672157e-03   1.4554963e-02   7.5139440e-03   6.3063306e-03   2.8227254e-03   1.2216172e-02   5.2171506e-03   8.3411571e-03   9.1198919e-03   3.7613430e-03   3.7527482e-03   5.6979631e-04   2.0412760e-03   4.1677011e-03   1.2238157e-02   1.7043766e-03   3.0984992e-03   5.1374687e-03   2.0730806e-02   2.1628161e-02   1.6669142e-02   2.5627983e-03   1.3966031e-03   1.3985502e-02   1.7150229e-03   1.2743810e-02   7.3773848e-03   2.3644296e-03   5.7186323e-03   5.6669191e-03   1.5046711e-02   9.0550035e-03   4.3221940e-03   2.4177800e-02   5.7926331e-03   5.2977640e-02   2.8412846e-02   1.8464622e-02   3.1548301e-02   3.3726407e-02   3.1637531e-02   3.8729422e-02   2.8292534e-02   3.3574657e-02   1.5809867e-02   4.4178873e-03   1.7890807e-02   1.1580028e-02   3.3334020e-02   3.2156672e-02   1.0943899e-02   1.8967943e-02   2.4791881e-02   5.4103864e-02   2.4225163e-02   1.3643641e-02   2.4071431e-02   3.5681100e-02   6.5373199e-03   1.7959936e-02   1.7099713e-02   4.3929724e-03   8.0887873e-03   3.0809355e-02   1.2543658e-02   2.0721585e-02   1.2037199e-02   3.2114506e-02   1.2207760e-02   4.3300211e-02   1.5218252e-02   2.5733556e-02   2.1932604e-02   7.3680112e-03   5.8739609e-03   1.7443010e-02   2.6299180e-03   2.8412846e-02   2.4437667e-02   1.7887656e-02   6.9159049e-03   1.4494165e-02   7.9434468e-03   2.0060731e-02   2.2068579e-02   2.3875094e-02   1.2000415e-02   1.3948342e-03   3.4546351e-03   1.4939730e-02   3.7950422e-03   6.0175173e-04   1.2211196e-02   7.0052335e-03   9.1256063e-05   8.0336825e-03   9.5484059e-03   3.8945506e-03   2.3206010e-03   7.0732153e-04   2.0727990e-02   4.4889370e-03   6.3258821e-03   8.7927798e-03   9.0069682e-03   3.7952179e-03   7.8092546e-03   4.4335690e-03   9.0550736e-03   6.7946167e-03   2.1531805e-03   9.6426638e-04   7.0770423e-04   5.1150602e-03   1.0670446e-02   5.5187206e-04   6.0114634e-03   3.0771240e-03   5.3655604e-03   3.8039285e-02   3.5100982e-03   3.3392676e-02   1.7228333e-02   1.4023464e-02   1.5984642e-02   2.2267037e-02   2.3075907e-02   2.0330300e-02   1.7609739e-02   2.6354220e-02   7.6657981e-03   1.4938436e-03   1.3975046e-02   9.8087979e-03   2.7013992e-02   2.8227679e-02   6.7815824e-03   7.6426994e-03   8.5739542e-03   4.8734857e-02   1.9601188e-02   1.0188446e-02   1.3147209e-02   2.7911001e-02   7.5145131e-03   6.7053137e-03   6.3732135e-03   4.4764490e-03   1.3642012e-03   2.2654181e-02   4.8866188e-03   1.6888522e-02   1.3930577e-03   2.5469441e-02   3.2860894e-03   2.3095694e-02   1.8694362e-02   1.1451986e-02   7.4272413e-03   8.6465452e-04   6.5073035e-03   1.5400191e-02   1.2824348e-02   1.7228333e-02   1.5313622e-02   1.2672619e-02   1.3006211e-02   1.7530867e-02   5.5400420e-03   6.4187909e-03   6.8900377e-03   3.1968437e-03   2.6175875e-02   9.5689725e-03   4.4766639e-02   9.9845360e-03   2.8418681e-02   3.6870693e-03   4.5253718e-02   2.5815239e-02   5.6468536e-03   5.2271680e-03   1.8578768e-02   2.3906603e-02   1.7414675e-02   6.6309054e-04   1.0701959e-02   7.1563943e-03   4.3943049e-03   6.0754711e-02   3.4699507e-02   9.6990814e-03   9.4374842e-03   2.8881695e-02   8.6238198e-03   1.6549411e-02   3.3492594e-02   1.6963829e-02   8.9575119e-03   3.4839312e-03   1.7214511e-02   1.0985852e-02   1.1115876e-02   7.0774378e-03   2.3223923e-03   9.1191389e-03   1.1169736e-01   7.8250518e-02   6.5808362e-02   7.7763348e-02   8.8114611e-02   8.7449743e-02   8.7215232e-02   7.8675005e-02   9.2203874e-02   5.5132383e-02   2.8794481e-02   6.5100824e-02   5.3025891e-02   9.2531519e-02   9.2220151e-02   4.8731252e-02   5.7186075e-02   6.0580024e-02   1.2745253e-01   7.7174441e-02   5.6198920e-02   6.9678402e-02   9.5449745e-02   4.2538093e-02   5.4722713e-02   5.3576276e-02   3.4860888e-02   3.4291475e-02   8.6329226e-02   4.7343142e-02   7.0978985e-02   3.6695928e-02   9.0088005e-02   4.3430772e-02   9.2828505e-02   6.4239353e-02   6.7481588e-02   5.7536867e-02   3.1542812e-02   4.0297150e-02   6.5753742e-02   3.3241661e-02   7.8250518e-02   7.2933055e-02   6.3794057e-02   4.6178781e-02   6.2571051e-02   4.2843100e-02   5.4666081e-02   5.6027673e-02   1.6139749e-02   3.9412782e-03   2.4265145e-02   2.3891707e-03   1.6498373e-02   4.1418612e-05   2.4988702e-02   1.3783039e-02   4.3146851e-04   2.5122323e-04   6.4815288e-03   1.0052630e-02   7.0587637e-03   1.3669961e-03   2.4020339e-03   9.7331181e-04   2.7171014e-04   3.8783427e-02   2.3881244e-02   7.7285447e-03   2.0010737e-03   1.2962382e-02   6.0316172e-03   5.6775409e-03   1.9642810e-02   8.0347010e-03   1.6772041e-03   5.1201506e-05   7.5139419e-03   7.7685816e-03   5.4104161e-03   1.3577183e-03   7.6646931e-03   3.0214900e-03   8.1621829e-02   5.2182884e-02   4.1040324e-02   5.3055858e-02   6.0133087e-02   5.8980137e-02   6.1457708e-02   5.2397706e-02   6.2608146e-02   3.3589093e-02   1.3750660e-02   4.0409320e-02   3.0837807e-02   6.2732754e-02   6.2152008e-02   2.7986481e-02   3.5988306e-02   4.0227814e-02   9.1799153e-02   5.0051858e-02   3.3478326e-02   4.5393111e-02   6.5357720e-02   2.2750138e-02   3.4139497e-02   3.3132950e-02   1.7367166e-02   1.8328224e-02   5.7997026e-02   2.7720666e-02   4.5013892e-02   2.1236242e-02   6.0787869e-02   2.5266646e-02   6.6573172e-02   3.9086224e-02   4.4693577e-02   3.7322864e-02   1.6444309e-02   2.1159286e-02   4.0674593e-02   1.6113197e-02   5.2182884e-02   4.7503868e-02   3.9598661e-02   2.5231075e-02   3.7795470e-02   2.3382629e-02   3.4903444e-02   3.6481922e-02   4.3266011e-03   2.5120283e-02   7.2104610e-03   3.6362605e-04   1.6818393e-02   1.3224351e-02   8.9751703e-04   1.2039448e-02   1.4078561e-02   9.5477546e-03   7.2983976e-03   3.1999143e-03   2.4639885e-02   8.8187561e-03   1.0207680e-02   1.2236717e-02   1.1661588e-02   8.0001293e-04   5.7297070e-03   8.1532898e-03   1.7517743e-02   5.5505771e-03   6.3202290e-03   1.2261175e-03   1.5275601e-03   9.1326114e-03   1.4417917e-02   2.2056571e-03   4.2297788e-03   3.2218738e-03   8.2583968e-03   4.2679879e-02   5.2144338e-03   3.5325690e-02   2.1286357e-02   2.0340751e-02   1.7932964e-02   2.6773752e-02   2.8858059e-02   2.1473619e-02   2.1925037e-02   3.2987864e-02   1.1596147e-02   5.5679058e-03   2.0475433e-02   1.6418368e-02   3.4058657e-02   3.6285268e-02   1.2036020e-02   1.0227865e-02   8.9974606e-03   5.7847652e-02   2.6531721e-02   1.6295053e-02   1.6788576e-02   3.4473710e-02   1.4498141e-02   9.1389529e-03   8.9727022e-03   1.0441422e-02   4.2212624e-03   2.8555402e-02   8.4473824e-03   2.3791713e-02   2.5698702e-03   3.2242281e-02   5.9266749e-03   2.3676462e-02   2.8216759e-02   1.3343464e-02   8.4809495e-03   3.5554866e-03   1.3166972e-02   2.2745344e-02   2.2629651e-02   2.1286357e-02   1.9992085e-02   1.8520730e-02   2.2324446e-02   2.6771848e-02   1.1107369e-02   7.6788410e-03   7.4852977e-03   2.3512869e-02   1.1702196e-03   5.0486445e-03   4.4067902e-03   1.7493736e-02   4.0658633e-03   2.3529372e-03   3.3047066e-03   5.2094902e-03   6.0430183e-03   1.7805903e-03   8.3604250e-03   2.1645857e-03   1.8214590e-03   2.1737913e-03   2.3530298e-02   8.5714286e-03   1.8444368e-03   1.4565864e-03   1.3243081e-02   1.0630397e-03   2.8808992e-03   7.3652636e-03   1.0835317e-03   1.7933322e-03   3.0961279e-03   1.3436220e-03   1.3043704e-03   1.2482375e-04   8.8034137e-04   2.0043242e-02   1.2482375e-04   5.7954445e-02   3.5775683e-02   2.9742194e-02   3.4149795e-02   4.2824229e-02   4.3477315e-02   4.0214177e-02   3.6256651e-02   4.7605297e-02   2.0950874e-02   7.1042975e-03   2.9499916e-02   2.2329817e-02   4.8271691e-02   4.9230028e-02   1.8375611e-02   2.1296157e-02   2.2492082e-02   7.5802989e-02   3.7751582e-02   2.3617894e-02   2.9827991e-02   4.9792717e-02   1.7017775e-02   1.9724142e-02   1.9131907e-02   1.1873908e-02   8.7349825e-03   4.2824229e-02   1.6130090e-02   3.3687060e-02   9.1410950e-03   4.6289887e-02   1.3350026e-02   4.3904542e-02   3.3071376e-02   2.7381594e-02   2.0813943e-02   7.3233676e-03   1.5432666e-02   3.0904955e-02   1.7682128e-02   3.5775683e-02   3.2747980e-02   2.7982984e-02   2.2571206e-02   3.1620085e-02   1.5454693e-02   1.9154732e-02   1.9759156e-02   1.5625666e-02   2.0014241e-02   2.2906319e-02   3.3689213e-03   1.6561417e-02   1.9555072e-02   1.9724522e-02   6.8229205e-03   6.0996359e-03   1.3507575e-02   3.5744409e-02   1.3348521e-02   1.7651623e-02   2.2506296e-02   1.4148535e-02   3.1413076e-02   3.8119216e-02   1.5501824e-02   1.7783197e-03   3.4445966e-02   9.9745771e-03   1.7822684e-02   1.8993068e-02   1.5436010e-02   2.3911299e-02   1.6070505e-02   3.4793519e-02   2.5109394e-02   1.9730191e-02   5.3384757e-02   2.0981999e-02   3.5016323e-02   1.5808551e-02   7.2216559e-03   2.1250521e-02   1.8323057e-02   1.5289849e-02   2.7077232e-02   1.5342317e-02   1.5450153e-02   9.5719689e-03   7.0769801e-03   6.7142336e-03   3.8618160e-03   1.4833874e-02   1.3052654e-02   5.5299114e-03   1.3477582e-02   2.0871841e-02   2.6745939e-02   9.5407500e-03   5.3018257e-03   1.4058965e-02   1.6863810e-02   2.0380823e-03   1.3353665e-02   1.2655783e-02   3.2775616e-03   9.7625272e-03   1.4632542e-02   9.0437643e-03   7.6711708e-03   1.4740530e-02   1.4381411e-02   1.0907741e-02   3.1026769e-02   2.6758954e-03   1.8148668e-02   1.8095332e-02   1.0256426e-02   2.3444102e-03   5.5327937e-03   1.0086149e-03   1.5808551e-02   1.2612237e-02   7.5482105e-03   2.3261323e-04   2.5482034e-03   4.2287654e-03   1.6814254e-02   1.9242327e-02   6.8098804e-03   2.4185222e-03   1.3095995e-02   4.9100577e-03   8.0319599e-04   1.3085229e-03   1.8113887e-03   3.1306373e-03   1.2541646e-03   7.2250359e-03   1.5182032e-04   3.2471009e-04   1.1793141e-03   2.2134336e-02   1.2798604e-02   5.8036245e-03   2.9068060e-05   7.1481800e-03   4.2574873e-03   8.5847073e-04   8.5715637e-03   2.1039195e-03   1.1389023e-04   1.8746094e-03   1.5895426e-03   4.9316123e-03   1.9257560e-03   2.7708506e-04   1.8515651e-02   5.3986259e-04   5.6637702e-02   3.2886175e-02   2.4967826e-02   3.3187187e-02   3.9414817e-02   3.8923256e-02   3.9880016e-02   3.3128964e-02   4.2231952e-02   1.8470852e-02   4.9097912e-03   2.4570551e-02   1.7467535e-02   4.2525770e-02   4.2580968e-02   1.4790908e-02   2.0011146e-02   2.3142926e-02   6.7845285e-02   3.2285576e-02   1.9161137e-02   2.7420216e-02   4.4447416e-02   1.1980855e-02   1.8611023e-02   1.7883360e-02   7.8729229e-03   7.5466400e-03   3.8175619e-02   1.4096206e-02   2.8321778e-02   9.4496873e-03   4.0815141e-02   1.2211823e-02   4.4024579e-02   2.5465922e-02   2.6596134e-02   2.0925904e-02   6.3340227e-03   1.0720439e-02   2.5211490e-02   1.0333158e-02   3.2886175e-02   2.9360405e-02   2.3656085e-02   1.5413665e-02   2.4285913e-02   1.1678199e-02   1.9116491e-02   2.0313205e-02   1.6950975e-02   9.2576768e-03   2.2830227e-04   1.1996475e-02   1.3943618e-02   7.5471408e-03   5.0950912e-03   2.4447813e-03   2.5903324e-02   8.0139448e-03   9.9879335e-03   1.2585436e-02   8.2754975e-03   1.4174352e-03   8.0478199e-03   7.7042954e-03   1.3901912e-02   7.5078674e-03   4.9957099e-03   2.9997761e-04   1.5395926e-03   8.6362248e-03   1.4833108e-02   1.8222710e-03   6.1957159e-03   4.1238914e-03   8.4102568e-03   4.4734160e-02   5.6076899e-03   3.0288189e-02   1.6597244e-02   1.5336169e-02   1.4117002e-02   2.1529085e-02   2.3159309e-02   1.7633168e-02   1.7119251e-02   2.6802924e-02   7.9632454e-03   3.3472740e-03   1.5438450e-02   1.1914773e-02   2.7716384e-02   2.9607261e-02   8.2241873e-03   7.0346123e-03   6.5727745e-03   4.9604200e-02   2.0828511e-02   1.1808642e-02   1.2615158e-02   2.8194719e-02   1.0429441e-02   6.1245969e-03   5.9452412e-03   7.1618546e-03   2.1261487e-03   2.2855826e-02   5.3452154e-03   1.8357871e-02   1.0397070e-03   2.6087088e-02   3.3990115e-03   1.9871685e-02   2.2251351e-02   9.9773797e-03   5.9192173e-03   1.6976055e-03   9.3317377e-03   1.7391404e-02   1.8375579e-02   1.6597244e-02   1.5280892e-02   1.3772451e-02   1.7356806e-02   2.0968489e-02   7.4978418e-03   5.1634523e-03   5.1963190e-03   2.4233623e-02   1.4077025e-02   4.3787020e-04   1.8512542e-04   6.0477386e-03   9.6703484e-03   7.1488601e-03   1.5477068e-03   2.2742281e-03   9.7171923e-04   3.9271863e-04   3.8509554e-02   2.4774335e-02   8.7360281e-03   1.9891111e-03   1.1995846e-02   6.8973897e-03   5.5019459e-03   1.9965157e-02   8.4152844e-03   1.6247783e-03   1.4531219e-04   7.7480972e-03   8.6972032e-03   5.9824611e-03   1.5176827e-03   7.8567259e-03   3.3493021e-03   8.1204385e-02   5.1575806e-02   4.0135980e-02   5.2803481e-02   5.9400299e-02   5.8042463e-02   6.1291964e-02   5.1745941e-02   6.1508632e-02   3.3138226e-02   1.3469654e-02   3.9479896e-02   2.9967140e-02   6.1565597e-02   6.0825097e-02   2.7361698e-02   3.5748038e-02   4.0333571e-02   9.0138895e-02   4.8984196e-02   3.2667108e-02   4.4902460e-02   6.4253355e-02   2.1894327e-02   3.3940189e-02   3.2913172e-02   1.6731486e-02   1.8202458e-02   5.7045856e-02   2.7382191e-02   4.3985602e-02   2.1375556e-02   5.9676750e-02   2.5108522e-02   6.6474949e-02   3.7689168e-02   4.4508235e-02   3.7339453e-02   1.6368548e-02   2.0371230e-02   3.9606943e-02   1.4908799e-02   5.1575806e-02   4.6824025e-02   3.8781771e-02   2.3989539e-02   3.6452117e-02   2.2748990e-02   3.4902979e-02   3.6586492e-02   7.5459794e-03   1.9125850e-02   2.0310725e-02   6.1222293e-03   3.4447226e-03   8.1418452e-03   3.7867035e-02   1.2077489e-02   1.6567789e-02   2.1605044e-02   3.9038067e-03   1.6430417e-02   2.8689503e-02   1.3569150e-02   4.2181405e-03   2.6167927e-02   7.2610935e-03   7.0630535e-03   1.1392972e-02   1.4088158e-02   2.3849569e-02   9.5566395e-03   2.5308356e-02   1.7804542e-02   1.7150362e-02   5.8921861e-02   1.6182419e-02   2.0027139e-02   6.1819655e-03   2.0410396e-03   8.6030014e-03   8.6357480e-03   7.5787415e-03   1.2598904e-02   6.0656768e-03   8.7139108e-03   1.7031846e-03   2.3331644e-03   1.8821103e-03   3.1682605e-04   8.7160052e-03   8.5600328e-03   2.8826583e-04   3.4529372e-03   7.5176091e-03   2.1668627e-02   4.3875084e-03   6.5654007e-04   4.4940457e-03   9.7914071e-03   1.9831433e-04   3.3391345e-03   2.9870521e-03   6.9985633e-04   2.5162956e-03   7.1853248e-03   1.3860021e-03   2.9754449e-03   4.8339512e-03   8.0121089e-03   2.3142577e-03   1.5373296e-02   2.8512223e-03   6.2399212e-03   5.8796015e-03   3.1092372e-03   2.3415322e-04   1.9852251e-03   5.2321529e-03   6.1819655e-03   4.3274341e-03   1.8113019e-03   1.9110595e-03   2.4033410e-03   9.3130265e-05   5.1498910e-03   6.5356041e-03   9.5659910e-03   1.1252405e-02   5.1556774e-03   3.2048021e-03   1.2695843e-03   2.2856756e-02   5.7848184e-03   7.7211451e-03   1.0283621e-02   8.3602499e-03   2.7615733e-03   7.9786726e-03   5.6505970e-03   1.0691105e-02   7.1332727e-03   3.1295602e-03   5.2401241e-04   9.5266815e-04   6.4319329e-03   1.2318850e-02   9.4872145e-04   6.1326072e-03   3.4758657e-03   6.5333653e-03   4.0853883e-02   4.3049208e-03   3.1649609e-02   1.6528693e-02   1.4101758e-02   1.4838879e-02   2.1483291e-02   2.2597676e-02   1.8841242e-02   1.6959579e-02   2.5991410e-02   7.4250762e-03   1.9970150e-03   1.4113092e-02   1.0250250e-02   2.6744340e-02   2.8212793e-02   6.9997737e-03   7.0607843e-03   7.4739664e-03   4.8397396e-02   1.9593840e-02   1.0430940e-02   1.2525634e-02   2.7473469e-02   8.3209543e-03   6.1476473e-03   5.8775226e-03   5.2455782e-03   1.4265861e-03   2.2224591e-02   4.7507930e-03   1.7000338e-02   1.0369313e-03   2.5178358e-02   3.0495461e-03   2.1392322e-02   1.9628131e-02   1.0500631e-02   6.5204272e-03   9.7357638e-04   7.2933833e-03   1.5732817e-02   1.4704532e-02   1.6528693e-02   1.4858839e-02   1.2681737e-02   1.4339393e-02   1.8427144e-02   5.9852538e-03   5.6194230e-03   5.9265324e-03   8.2461319e-05   3.7688699e-03   6.4113525e-03   4.0576095e-03   3.3120738e-03   8.1782115e-04   1.0911416e-04   1.0750826e-04   3.1097496e-02   1.8955233e-02   6.6951403e-03   5.7580225e-04   9.5785875e-03   5.0095389e-03   2.9949836e-03   1.4512717e-02   5.0859668e-03   4.0940931e-04   2.6323499e-04   4.5146422e-03   6.3221754e-03   3.5606197e-03   3.7002765e-04   1.1722539e-02   1.5048339e-03   7.0395338e-02   4.3268177e-02   3.3370831e-02   4.4028795e-02   5.0578307e-02   4.9629423e-02   5.1746432e-02   4.3478758e-02   5.3066435e-02   2.6481479e-02   9.3392135e-03   3.2828923e-02   2.4313691e-02   5.3237993e-02   5.2867243e-02   2.1623243e-02   2.8596153e-02   3.2496284e-02   8.0563616e-02   4.1614554e-02   2.6578058e-02   3.7074597e-02   5.5590704e-02   1.7323162e-02   2.6950223e-02   2.6053254e-02   1.2554151e-02   1.3157434e-02   4.8740123e-02   2.1279987e-02   3.7040705e-02   1.5739340e-02   5.1410982e-02   1.9129885e-02   5.6480014e-02   3.2271228e-02   3.6432542e-02   2.9847396e-02   1.1573499e-02   1.5895178e-02   3.3202746e-02   1.2659655e-02   4.3268177e-02   3.9046279e-02   3.2014714e-02   2.0067384e-02   3.1045819e-02   1.7642117e-02   2.7677552e-02   2.9133996e-02   4.1910853e-03   7.2076003e-03   5.1147587e-03   2.7507325e-03   1.1620134e-03   3.3310681e-04   2.1458858e-04   3.3405406e-02   2.1499526e-02   8.0088547e-03   9.8332257e-04   9.6892205e-03   6.1744631e-03   3.6704596e-03   1.6544301e-02   6.4217725e-03   7.2111274e-04   2.1270559e-04   5.7183304e-03   7.6981750e-03   4.7117366e-03   8.0177817e-04   1.0442534e-02   2.2910923e-03   7.3784227e-02   4.5661689e-02   3.5029236e-02   4.6823533e-02   5.3065363e-02   5.1836541e-02   5.4866908e-02   4.5828107e-02   5.5174565e-02   2.8408213e-02   1.0505539e-02   3.4430852e-02   2.5609623e-02   5.5260802e-02   5.4654205e-02   2.3114484e-02   3.0836583e-02   3.5207839e-02   8.2686924e-02   4.3372344e-02   2.8064998e-02   3.9380344e-02   5.7772002e-02   1.8254235e-02   2.9162191e-02   2.8206341e-02   1.3497345e-02   1.4745279e-02   5.0900941e-02   2.3090436e-02   3.8677830e-02   1.7710943e-02   5.3450122e-02   2.1019262e-02   5.9801906e-02   3.3130692e-02   3.9030212e-02   3.2379101e-02   1.3108535e-02   1.6839016e-02   3.4625964e-02   1.2551681e-02   4.5661689e-02   4.1207031e-02   3.3732757e-02   2.0513590e-02   3.1937443e-02   1.8908607e-02   3.0105345e-02   3.1711397e-02   5.5341249e-04   1.8100865e-03   1.3685556e-02   1.1595059e-03   2.7733190e-03   5.0259130e-03   1.5582273e-02   1.5303863e-02   1.3216313e-02   1.8061219e-03   1.8483286e-03   1.0961959e-02   4.0632184e-04   7.9419645e-03   4.2030049e-03   1.8572156e-03   5.9964081e-03   2.8843938e-03   1.1487297e-02   6.2195738e-03   3.4171878e-03   2.7328194e-02   3.9549773e-03   4.5323537e-02   2.3233798e-02   1.5194289e-02   2.5226902e-02   2.8389486e-02   2.7075250e-02   3.1585337e-02   2.3242139e-02   2.9332670e-02   1.1663012e-02   1.9950589e-03   1.4768251e-02   9.1566367e-03   2.9330780e-02   2.8799195e-02   7.9264454e-03   1.3962831e-02   1.8481959e-02   5.0115855e-02   2.0802698e-02   1.0733745e-02   1.9041166e-02   3.1266982e-02   4.9760517e-03   1.3001310e-02   1.2292231e-02   2.6877167e-03   4.5798906e-03   2.6364594e-02   8.6153969e-03   1.7566530e-02   7.4744318e-03   2.8043210e-02   7.9969256e-03   3.5640074e-02   1.4109863e-02   1.9847472e-02   1.6098385e-02   3.9587699e-03   4.2468630e-03   1.4818700e-02   3.7268555e-03   2.3233798e-02   1.9851942e-02   1.4429507e-02   6.7117241e-03   1.3281147e-02   5.4664867e-03   1.4483311e-02   1.6079322e-02   1.4644608e-03   1.8793440e-02   2.6529941e-03   4.9386280e-03   7.8423223e-03   1.0293845e-02   1.1871124e-02   1.4064748e-02   3.3517168e-03   2.2145112e-03   1.1983758e-02   7.1285200e-04   4.9198989e-03   3.5944894e-03   3.6244119e-03   9.2640212e-03   2.3814700e-03   1.1959906e-02   6.6391160e-03   5.2659669e-03   3.4768199e-02   5.0225808e-03   3.6096256e-02   1.6893661e-02   1.0608435e-02   1.8369401e-02   2.1445998e-02   2.0588370e-02   2.3840862e-02   1.6945577e-02   2.2798129e-02   7.2320779e-03   4.5716975e-04   1.0311574e-02   5.8475513e-03   2.2922553e-02   2.2802937e-02   4.5367353e-03   8.9750660e-03   1.2721749e-02   4.2209187e-02   1.5508457e-02   6.9302761e-03   1.3261817e-02   2.4477020e-02   2.9037640e-03   8.2014592e-03   7.6405998e-03   1.0779902e-03   2.0077167e-03   2.0000571e-02   4.8259503e-03   1.2764273e-02   4.2375783e-03   2.1711543e-02   4.3575270e-03   2.7378197e-02   1.1026559e-02   1.3799894e-02   1.0731586e-02   1.6733633e-03   2.2875292e-03   1.0634216e-02   4.2659077e-03   1.6893661e-02   1.4117780e-02   9.8480141e-03   5.3491991e-03   1.0216132e-02   2.7977678e-03   9.4220767e-03   1.0761419e-02   1.4255275e-02   1.6372127e-03   2.8542706e-03   4.7214122e-03   1.3332443e-02   7.0444510e-03   6.5532524e-03   1.6174955e-03   6.7559932e-03   5.2389698e-03   5.3640347e-04   3.3171183e-03   5.1283913e-04   2.0296126e-03   6.0950963e-03   1.2744293e-04   5.0948120e-03   1.9318373e-03   2.3608728e-03   2.9089432e-02   1.4755776e-03   4.1865124e-02   2.2429622e-02   1.7100637e-02   2.2021240e-02   2.8022948e-02   2.8179896e-02   2.7393595e-02   2.2729266e-02   3.1385076e-02   1.0928177e-02   1.7748140e-03   1.6893502e-02   1.1535806e-02   3.1856326e-02   3.2502251e-02   8.7863970e-03   1.1670124e-02   1.3706425e-02   5.4822004e-02   2.3316700e-02   1.2494202e-02   1.7833364e-02   3.3214276e-02   7.9216177e-03   1.0562159e-02   1.0054198e-02   4.5572702e-03   2.8793276e-03   2.7614983e-02   7.5454514e-03   2.0103411e-02   3.8323098e-03   3.0272091e-02   5.9246169e-03   3.0771609e-02   1.9772265e-02   1.6666117e-02   1.2065056e-02   2.1106407e-03   6.8455106e-03   1.7918371e-02   1.0155544e-02   2.2429622e-02   1.9803941e-02   1.5812785e-02   1.2351590e-02   1.8630292e-02   6.7616623e-03   1.0713593e-02   1.1544341e-02   7.3852619e-03   4.6030790e-03   2.5842708e-03   5.4300453e-02   3.3576641e-02   1.0668428e-02   6.6015056e-03   2.1814225e-02   9.0913366e-03   1.2584515e-02   3.0243821e-02   1.4861127e-02   6.0467884e-03   1.7442631e-03   1.4568415e-02   1.1492675e-02   1.0173171e-02   5.0439803e-03   2.6472861e-03   7.4513739e-03   1.0326079e-01   7.0098941e-02   5.7127711e-02   7.0823391e-02   7.9250920e-02   7.7946311e-02   8.0298570e-02   7.0363352e-02   8.2049228e-02   4.8294128e-02   2.3708406e-02   5.6369153e-02   4.4949110e-02   8.2157728e-02   8.1370723e-02   4.1592461e-02   5.0974970e-02   5.5456655e-02   1.1462128e-01   6.7593968e-02   4.8172633e-02   6.2189094e-02   8.5184220e-02   3.4981615e-02   4.8742735e-02   4.7569879e-02   2.8345798e-02   2.9430788e-02   7.6817222e-02   4.1186102e-02   6.1732592e-02   3.2638549e-02   7.9961084e-02   3.8043139e-02   8.5993831e-02   5.4182844e-02   6.1100206e-02   5.2203816e-02   2.6988218e-02   3.3049806e-02   5.6579852e-02   2.5305632e-02   7.0098941e-02   6.4715083e-02   5.5458622e-02   3.7374533e-02   5.2731313e-02   3.5938214e-02   4.9377578e-02   5.1061578e-02   3.5870177e-04   1.3937749e-03   2.2192133e-02   1.4887667e-02   7.8001469e-03   8.8171759e-05   5.5216302e-03   5.9793748e-03   7.0276376e-04   9.5746848e-03   3.0437686e-03   8.2181914e-05   2.0068727e-03   2.2506604e-03   6.8090409e-03   3.1469229e-03   6.3301284e-04   1.8528100e-02   1.2625125e-03   5.6704255e-02   3.2399710e-02   2.3806821e-02   3.3394775e-02   3.8737297e-02   3.7833623e-02   4.0310184e-02   3.2556548e-02   4.0845247e-02   1.8140183e-02   4.6897646e-03   2.3353931e-02   1.6279158e-02   4.1004664e-02   4.0734447e-02   1.4064354e-02   2.0110135e-02   2.3937572e-02   6.5512797e-02   3.0871191e-02   1.8117892e-02   2.7110445e-02   4.3069544e-02   1.0732705e-02   1.8772743e-02   1.7996705e-02   7.0003565e-03   7.6876035e-03   3.7052349e-02   1.3928781e-02   2.6945955e-02   1.0128877e-02   3.9392715e-02   1.2368176e-02   4.4616010e-02   2.3284644e-02   2.6871325e-02   2.1527670e-02   6.5473767e-03   9.5899926e-03   2.3719212e-02   8.2529644e-03   3.2399710e-02   2.8696517e-02   2.2657048e-02   1.3399527e-02   2.2203050e-02   1.0884330e-02   1.9665180e-02   2.1074599e-02   3.3845415e-04   2.7555720e-02   1.6677843e-02   6.3795274e-03   1.8362843e-04   8.3039101e-03   4.7168779e-03   1.9845708e-03   1.2200549e-02   3.8746792e-03   1.0179352e-04   6.8781671e-04   3.2951534e-03   5.8068152e-03   2.8838183e-03   1.7929502e-04   1.4079350e-02   1.0261263e-03   6.5064614e-02   3.9139337e-02   2.9910766e-02   3.9801101e-02   4.6137717e-02   4.5316574e-02   4.7159535e-02   3.9352955e-02   4.8674767e-02   2.3254889e-02   7.4636086e-03   2.9417103e-02   2.1432655e-02   4.8877998e-02   4.8632496e-02   1.8795647e-02   2.5200016e-02   2.8886320e-02   7.5371106e-02   3.7782169e-02   2.3498806e-02   3.3236011e-02   5.1084684e-02   1.4998262e-02   2.3652548e-02   2.2813769e-02   1.0515841e-02   1.0877794e-02   4.4476659e-02   1.8379623e-02   3.3440220e-02   1.3262540e-02   4.7104689e-02   1.6362923e-02   5.1690239e-02   2.9302173e-02   3.2587374e-02   2.6378456e-02   9.4404077e-03   1.3645256e-02   2.9862026e-02   1.1376615e-02   3.9139337e-02   3.5158865e-02   2.8588131e-02   1.7920264e-02   2.8103943e-02   1.5132216e-02   2.4337714e-02   2.5719532e-02   3.3433714e-02   1.9092201e-02   5.6820829e-03   9.6311300e-04   1.1687721e-02   4.1741711e-03   3.9152052e-03   1.5429746e-02   5.3660274e-03   8.0766059e-04   9.2937667e-05   4.9943675e-03   5.5161799e-03   3.3131521e-03   4.2533013e-04   1.0435178e-02   1.4826896e-03   7.3827823e-02   4.6346021e-02   3.6506307e-02   4.6683572e-02   5.3981470e-02   5.3241058e-02   5.4467293e-02   4.6614647e-02   5.6933591e-02   2.8894083e-02   1.0839802e-02   3.5973376e-02   2.7110780e-02   5.7179243e-02   5.6957917e-02   2.4065468e-02   3.0813759e-02   3.4360438e-02   8.5582303e-02   4.5156230e-02   2.9396418e-02   3.9850269e-02   5.9522160e-02   1.9818286e-02   2.9063291e-02   2.8164144e-02   1.4610346e-02   1.4656069e-02   5.2344759e-02   2.3377306e-02   4.0412481e-02   1.6984268e-02   5.5250411e-02   2.0918953e-02   5.9213325e-02   3.5814189e-02   3.8804285e-02   3.1759139e-02   1.2931018e-02   1.8261938e-02   3.6495356e-02   1.5089913e-02   4.6346021e-02   4.2098942e-02   3.5016667e-02   2.2986158e-02   3.4503485e-02   1.9934983e-02   2.9546649e-02   3.0900707e-02   1.1335824e-02   3.2301226e-02   2.3277392e-02   1.5629192e-02   3.0795664e-02   1.5041716e-02   5.4315810e-03   1.4713782e-02   2.4451968e-02   3.6761202e-02   1.3762814e-02   2.8473446e-02   2.2472023e-02   2.6736705e-02   8.0230549e-02   2.3341323e-02   8.1067950e-03   1.4485807e-03   2.4486060e-03   1.1703837e-03   3.1310445e-03   3.9732842e-03   2.8350922e-03   1.6242765e-03   5.7101906e-03   4.5289508e-04   6.6077306e-03   2.6813364e-03   3.3571693e-03   6.2840633e-03   7.7260784e-03   2.0715363e-03   5.6385417e-05   8.9610646e-04   1.8073537e-02   3.9621048e-03   2.3604444e-03   4.7560860e-04   6.2480089e-03   5.8573026e-03   1.7069942e-04   2.3254450e-04   6.3902073e-03   3.8624324e-03   3.9083302e-03   1.0347438e-03   3.4531305e-03   3.5564063e-03   5.5040957e-03   1.4887365e-03   4.1720167e-03   8.6822743e-03   2.8448886e-04   5.1458480e-04   4.8536300e-03   5.8397245e-03   3.9827712e-03   1.8058791e-02   1.4485807e-03   1.2408841e-03   1.8448387e-03   1.0796845e-02   8.0405443e-03   3.4808504e-03   5.2754145e-04   8.8520211e-04   8.8923137e-03   1.4043268e-02   2.4126814e-02   9.1698320e-03   1.1327807e-02   1.9771649e-03   4.5360079e-03   1.5317882e-02   2.1762150e-02   5.6097757e-03   7.1877025e-03   6.8496039e-03   1.4100359e-02   5.3810596e-02   9.9629280e-03   3.1307769e-02   2.0553715e-02   2.1937453e-02   1.5991855e-02   2.5676129e-02   2.8613140e-02   1.8469501e-02   2.1322428e-02   3.3003652e-02   1.2471005e-02   8.9752587e-03   2.2249180e-02   1.9221338e-02   3.4330234e-02   3.7274155e-02   1.4283960e-02   1.0136722e-02   7.4549090e-03   5.7532344e-02   2.7732649e-02   1.8492242e-02   1.6393364e-02   3.4241314e-02   1.8497141e-02   9.1551303e-03   9.1767161e-03   1.4470512e-02   6.3039218e-03   2.8459911e-02   9.7413243e-03   2.5409718e-02   3.4719577e-03   3.2484401e-02   7.0164005e-03   2.0017467e-02   3.2249701e-02   1.2084843e-02   7.5073944e-03   5.8227226e-03   1.7146634e-02   2.5055287e-02   2.9865216e-02   2.0553715e-02   2.0008085e-02   1.9953689e-02   2.7764157e-02   3.0721044e-02   1.4086953e-02   7.0439235e-03   6.3871700e-03   6.2932024e-03   2.4881477e-02   1.2570656e-04   9.2318283e-03   1.1425023e-02   3.9781027e-03   6.8287051e-03   6.6316807e-03   5.1943433e-03   1.2253382e-04   1.3842125e-03   4.4535183e-03   2.1436401e-02   2.8241776e-03   6.8828353e-02   4.7091014e-02   4.2520568e-02   4.3241334e-02   5.5155458e-02   5.7042053e-02   4.8981701e-02   4.7857882e-02   6.2261278e-02   3.0649818e-02   1.4761608e-02   4.2406580e-02   3.4493935e-02   6.3340671e-02   6.5239814e-02   2.8920518e-02   2.9750953e-02   2.8851937e-02   9.4568410e-02   5.1830706e-02   3.5536784e-02   4.0198717e-02   6.4555077e-02   2.8697450e-02   2.7846993e-02   2.7347316e-02   2.1934395e-02   1.5675611e-02   5.6443004e-02   2.4878970e-02   4.7356921e-02   1.4503825e-02   6.0963940e-02   2.0840976e-02   5.2367097e-02   4.8672170e-02   3.5784762e-02   2.7656172e-02   1.3836981e-02   2.6623769e-02   4.4650658e-02   3.0852069e-02   4.7091014e-02   4.4370840e-02   4.0176310e-02   3.6600204e-02   4.6858384e-02   2.5835018e-02   2.6019603e-02   2.5994222e-02   6.9728196e-03   4.6608676e-03   9.9835054e-04   9.5223148e-03   2.6272609e-03   2.7884021e-05   1.5635547e-03   2.0372846e-03   5.4615556e-03   2.3300808e-03   2.5698617e-04   1.7457304e-02   7.2996870e-04   5.8453388e-02   3.4095678e-02   2.5737672e-02   3.4628989e-02   4.0687367e-02   4.0032201e-02   4.1519056e-02   3.4313166e-02   4.3287054e-02   1.9386832e-02   5.3532424e-03   2.5306754e-02   1.8013458e-02   4.3531307e-02   4.3448361e-02   1.5446633e-02   2.1111383e-02   2.4519102e-02   6.8935827e-02   3.3123525e-02   1.9821803e-02   2.8570801e-02   4.5547846e-02   1.2302601e-02   1.9692146e-02   1.8929129e-02   8.1934144e-03   8.2421848e-03   3.9256624e-02   1.4936345e-02   2.9083818e-02   1.0370393e-02   4.1828239e-02   1.3093356e-02   4.5785106e-02   2.5766235e-02   2.7913838e-02   2.2194996e-02   6.9953757e-03   1.1047300e-02   2.5843766e-02   1.0033677e-02   3.4095678e-02   3.0429687e-02   2.4458901e-02   1.5454278e-02   2.4603652e-02   1.2196519e-02   2.0322394e-02   2.1606954e-02   2.1763592e-02   3.8778874e-03   1.3140898e-02   1.0973585e-02   6.8570288e-03   1.2689294e-02   8.7227464e-03   2.2446768e-02   1.4768924e-02   9.8837253e-03   3.6594036e-02   1.1176832e-02   4.2371132e-02   2.0489598e-02   1.1322791e-02   2.4620431e-02   2.4482623e-02   2.2010230e-02   3.1108475e-02   2.0217050e-02   2.3141445e-02   1.0944826e-02   3.8599335e-03   1.0791452e-02   6.1542383e-03   2.2743482e-02   2.1333545e-02   6.5515291e-03   1.4388718e-02   2.0852825e-02   3.9383251e-02   1.5461941e-02   7.8878514e-03   1.7372104e-02   2.4911352e-02   2.6880230e-03   1.3798611e-02   1.3025794e-02   2.1193973e-03   6.9838956e-03   2.1270749e-02   8.9787629e-03   1.2743052e-02   1.1335890e-02   2.1877049e-02   9.6533919e-03   3.5351983e-02   7.6384883e-03   2.0107777e-02   1.8067074e-02   6.8413076e-03   2.4741061e-03   1.0039063e-02   3.4670280e-04   2.0489598e-02   1.6923143e-02   1.1154236e-02   2.1521967e-03   7.1850256e-03   4.3898059e-03   1.6491142e-02   1.8665860e-02   7.4245740e-03   1.0715228e-02   3.1685550e-03   5.1120383e-03   5.0451240e-03   4.1323270e-03   1.4489604e-04   8.1318290e-04   3.0839979e-03   1.9714350e-02   1.7947024e-03   6.7440625e-02   4.5128820e-02   3.9916292e-02   4.1909560e-02   5.3040678e-02   5.4550431e-02   4.7872699e-02   4.5818357e-02   5.9509133e-02   2.8778013e-02   1.2927960e-02   3.9749997e-02   3.1835574e-02   6.0466685e-02   6.2068316e-02   2.6673332e-02   2.8274822e-02   2.8015780e-02   9.1028220e-02   4.9020613e-02   3.3018114e-02   3.8378554e-02   6.1817956e-02   2.5959804e-02   2.6422336e-02   2.5876047e-02   1.9520884e-02   1.4184191e-02   5.3918305e-02   2.3137863e-02   4.4568283e-02   1.3507580e-02   5.8174666e-02   1.9385371e-02   5.1416653e-02   4.5132833e-02   3.4491897e-02   2.6637939e-02   1.2404699e-02   2.3988175e-02   4.1744067e-02   2.7329966e-02   4.5128820e-02   4.2235431e-02   3.7715182e-02   3.3171215e-02   4.3405180e-02   2.3512609e-02   2.4946638e-02   2.5117607e-02   5.7451096e-03   2.0181202e-03   1.1916495e-03   5.0056293e-03   1.1565469e-03   7.7131020e-03   3.5313465e-03   2.1072743e-03   2.6393551e-02   2.0670282e-03   4.4935473e-02   2.3639481e-02   1.6646699e-02   2.4503230e-02   2.9137719e-02   2.8488385e-02   3.0530237e-02   2.3788614e-02   3.1241308e-02   1.1731452e-02   1.7654569e-03   1.6307697e-02   1.0602623e-02   3.1457386e-02   3.1449229e-02   8.6094127e-03   1.3341698e-02   1.6740022e-02   5.3656632e-02   2.2705161e-02   1.1958802e-02   1.9130319e-02   3.3171817e-02   6.4720106e-03   1.2268562e-02   1.1633623e-02   3.5425115e-03   3.7925712e-03   2.7828397e-02   8.3873180e-03   1.9384549e-02   5.8220699e-03   3.0002594e-02   7.2263237e-03   3.4339797e-02   1.7195852e-02   1.8977924e-02   1.4659749e-02   3.0403500e-03   5.5430802e-03   1.6804744e-02   6.5028482e-03   2.3639481e-02   2.0526700e-02   1.5612876e-02   9.4514606e-03   1.6202837e-02   6.2323724e-03   1.3120120e-02   1.4374340e-02   2.8180986e-03   1.0509318e-02   1.7897176e-02   2.8798358e-03   9.1928702e-03   6.3970688e-03   1.0746267e-02   5.0496414e-02   7.7981540e-03   2.4751970e-02   1.2458649e-02   1.1686269e-02   1.0356316e-02   1.6774912e-02   1.8282513e-02   1.3498932e-02   1.2919728e-02   2.1579044e-02   5.2962835e-03   2.7725666e-03   1.1818152e-02   9.1091800e-03   2.2436834e-02   2.4265521e-02   5.8512682e-03   4.4398598e-03   4.1822455e-03   4.2435194e-02   1.6425553e-02   8.8184300e-03   9.0473617e-03   2.2804959e-02   8.4055012e-03   3.7245140e-03   3.5955501e-03   5.8501155e-03   1.2261597e-03   1.8027694e-02   3.2889908e-03   1.4332649e-02   2.5252408e-04   2.0964539e-02   1.7865385e-03   1.5552416e-02   1.8549588e-02   6.8429145e-03   3.5948443e-03   1.0694978e-03   7.5092190e-03   1.3665818e-02   1.7252830e-02   1.2458649e-02   1.1370103e-02   1.0296741e-02   1.5007456e-02   1.7383467e-02   5.5463863e-03   2.9922345e-03   3.0734531e-03   3.1960387e-03   6.8662451e-03   1.2966654e-04   2.7649067e-03   8.3427366e-04   2.7984284e-03   2.9822521e-02   1.2731780e-03   4.3416537e-02   2.4875338e-02   2.0714079e-02   2.3200351e-02   3.0852531e-02   3.1753284e-02   2.8200300e-02   2.5332686e-02   3.5512031e-02   1.2989773e-02   3.4785945e-03   2.0603808e-02   1.5135483e-02   3.6226615e-02   3.7456354e-02   1.1531484e-02   1.2959687e-02   1.3737739e-02   6.0763799e-02   2.7435536e-02   1.5849420e-02   1.9923216e-02   3.7336849e-02   1.1567919e-02   1.1725372e-02   1.1300051e-02   7.4803077e-03   3.9656480e-03   3.1244978e-02   9.2703065e-03   2.4126524e-02   3.9496417e-03   3.4455719e-02   7.0383738e-03   3.1302098e-02   2.5203523e-02   1.7680030e-02   1.2436482e-02   3.0466203e-03   1.0266971e-02   2.2126952e-02   1.5206492e-02   2.4875338e-02   2.2550174e-02   1.9125072e-02   1.7383181e-02   2.3878119e-02   9.5944653e-03   1.1168635e-02   1.1615557e-02   1.3151667e-03   2.5320521e-03   6.0366399e-03   2.7823080e-03   2.9364031e-04   1.6476106e-02   9.7247557e-04   6.0284770e-02   3.5334723e-02   2.6546579e-02   3.6095524e-02   4.1987790e-02   4.1172424e-02   4.3178382e-02   3.5527420e-02   4.4374359e-02   2.0339225e-02   5.8436205e-03   2.6082735e-02   1.8603485e-02   4.4570054e-02   4.4351810e-02   1.6144347e-02   2.2244142e-02   2.5921932e-02   7.0056396e-02   3.3998717e-02   2.0524111e-02   2.9752453e-02   4.6679420e-02   1.2673386e-02   2.0806267e-02   2.0008665e-02   8.5632030e-03   8.9793060e-03   4.0369526e-02   1.5814708e-02   2.9884891e-02   1.1327921e-02   4.2874486e-02   1.4012365e-02   4.7563178e-02   2.6115270e-02   2.9259206e-02   2.3493076e-02   7.6990791e-03   1.1423166e-02   2.6517929e-02   9.7961479e-03   3.5334723e-02   3.1531604e-02   2.5300172e-02   1.5549538e-02   2.4969819e-02   1.2759840e-02   2.1558690e-02   2.2929217e-02   6.4418336e-03   6.6136731e-03   4.4102106e-03   9.1481615e-04   8.6358065e-03   2.3009102e-03   7.8700864e-02   5.0063075e-02   3.9499858e-02   5.0635713e-02   5.7926279e-02   5.6986449e-02   5.8777354e-02   5.0310505e-02   6.0682610e-02   3.1865387e-02   1.2666075e-02   3.8913195e-02   2.9597893e-02   6.0871779e-02   6.0471723e-02   2.6606279e-02   3.4024273e-02   3.7905442e-02   8.9816272e-02   4.8416482e-02   3.2086335e-02   4.3354864e-02   6.3371751e-02   2.1811352e-02   3.2202223e-02   3.1242353e-02   1.6436605e-02   1.6909958e-02   5.6040246e-02   2.6101775e-02   4.3480802e-02   1.9524603e-02   5.8918015e-02   2.3597831e-02   6.3733101e-02   3.8161980e-02   4.2440703e-02   3.5140814e-02   1.5073633e-02   2.0215594e-02   3.9313992e-02   1.6000311e-02   5.0063075e-02   4.5569203e-02   3.8016354e-02   2.4659247e-02   3.6849168e-02   2.2186423e-02   3.2805330e-02   3.4269626e-02   3.8467105e-03   1.2875815e-03   2.5072619e-03   2.9525973e-02   1.2875815e-03   4.2282453e-02   2.3357390e-02   1.8646096e-02   2.2313442e-02   2.9124686e-02   2.9658732e-02   2.7480923e-02   2.3735668e-02   3.3135090e-02   1.1714292e-02   2.4574323e-03   1.8489812e-02   1.3106245e-02   3.3729170e-02   3.4672500e-02   9.9345371e-03   1.2062833e-02   1.3461486e-02   5.7431716e-02   2.5093254e-02   1.3932087e-02   1.8599958e-02   3.4955869e-02   9.5459229e-03   1.0897419e-02   1.0433767e-02   5.8375168e-03   3.2324242e-03   2.9124686e-02   8.1819777e-03   2.1845036e-02   3.6925062e-03   3.2054971e-02   6.2644754e-03   3.0710474e-02   2.2249402e-02   1.6897068e-02   1.1997033e-02   2.3970471e-03   8.3620725e-03   1.9766550e-02   1.2536577e-02   2.3357390e-02   2.0894448e-02   1.7210222e-02   1.4675128e-02   2.1017625e-02   7.9709601e-03   1.0694253e-02   1.1329979e-02   8.0862109e-04   3.9488260e-03   2.3216413e-02   2.2109950e-03   6.3313725e-02   4.2461361e-02   3.8219621e-02   3.8832114e-02   5.0144792e-02   5.1966249e-02   4.4333772e-02   4.3192281e-02   5.6977170e-02   2.6944997e-02   1.2389873e-02   3.8126276e-02   3.0729621e-02   5.8026007e-02   5.9897200e-02   2.5415278e-02   2.6079828e-02   2.5293603e-02   8.8103550e-02   4.7063061e-02   3.1649890e-02   3.5923563e-02   5.9166899e-02   2.5434205e-02   2.4297609e-02   2.3831192e-02   1.9112973e-02   1.3083158e-02   5.1398726e-02   2.1557413e-02   4.2826183e-02   1.1968109e-02   5.5744441e-02   1.7792619e-02   4.7600638e-02   4.4377594e-02   3.1768113e-02   2.4143930e-02   1.1416637e-02   2.3484839e-02   4.0313562e-02   2.8205265e-02   4.2461361e-02   3.9889484e-02   3.5980112e-02   3.3161057e-02   4.2633793e-02   2.2600989e-02   2.2605649e-02   2.2606045e-02   1.6651142e-03   2.2593118e-02   4.7494657e-04   5.5766903e-02   3.4735344e-02   2.9594132e-02   3.2557240e-02   4.1723024e-02   4.2746173e-02   3.8238608e-02   3.5278801e-02   4.7033204e-02   2.0363061e-02   7.2717611e-03   2.9420951e-02   2.2560431e-02   4.7813714e-02   4.9078515e-02   1.8314288e-02   2.0299319e-02   2.0869007e-02   7.5386883e-02   3.7549246e-02   2.3623644e-02   2.8844458e-02   4.9142563e-02   1.7633849e-02   1.8744071e-02   1.8222205e-02   1.2402012e-02   8.3988430e-03   4.2147698e-02   1.5629140e-02   3.3602383e-02   8.3018288e-03   4.5796447e-02   1.2702505e-02   4.1687026e-02   3.3943329e-02   2.5980350e-02   1.9415550e-02   7.0260502e-03   1.6012157e-02   3.1063717e-02   1.9520820e-02   3.4735344e-02   3.1983672e-02   2.7741891e-02   2.3833980e-02   3.2440091e-02   1.5637054e-02   1.7874227e-02   1.8281297e-02   1.4972117e-02   3.8328039e-04   6.3723448e-02   3.8719440e-02   3.0384566e-02   3.8629777e-02   4.5827446e-02   4.5472032e-02   4.5653244e-02   3.9023449e-02   4.9116846e-02   2.2930601e-02   7.4083466e-03   2.9962579e-02   2.2100137e-02   4.9469036e-02   4.9598054e-02   1.8997460e-02   2.4379661e-02   2.7279745e-02   7.6566902e-02   3.8413283e-02   2.3958684e-02   3.2715136e-02   5.1484761e-02   1.5886249e-02   2.2794916e-02   2.2023153e-02   1.1094162e-02   1.0331639e-02   4.4681292e-02   1.7978253e-02   3.4097516e-02   1.2099271e-02   4.7608429e-02   1.5662138e-02   4.9957613e-02   3.0977854e-02   3.1455087e-02   2.5013254e-02   8.8604244e-03   1.4432007e-02   3.0714741e-02   1.3426163e-02   3.8719440e-02   3.4992868e-02   2.8902863e-02   1.9656006e-02   2.9684992e-02   1.5494562e-02   2.3064694e-02   2.4203450e-02   1.8836956e-02   1.3755831e-01   9.8685385e-02   8.2425470e-02   1.0000651e-01   1.0925759e-01   1.0726669e-01   1.1120208e-01   9.8918033e-02   1.1167083e-01   7.2661493e-02   4.1665349e-02   8.1426911e-02   6.7501542e-02   1.1159988e-01   1.1014878e-01   6.3989575e-02   7.6224987e-02   8.1835804e-02   1.4773346e-01   9.4587815e-02   7.1696153e-02   8.9447560e-02   1.1534202e-01   5.4876111e-02   7.3538690e-02   7.2081812e-02   4.6952332e-02   4.9385723e-02   1.0589901e-01   6.4038977e-02   8.7644093e-02   5.3625558e-02   1.0917520e-01   6.0322002e-02   1.1787190e-01   7.6918821e-02   8.8499122e-02   7.7885622e-02   4.6255911e-02   5.2612368e-02   8.1239445e-02   4.0019052e-02   9.8685385e-02   9.2131722e-02   8.0633770e-02   5.6243805e-02   7.5363077e-02   5.6811044e-02   7.4440381e-02   7.6512320e-02   5.8097767e-02   3.5211195e-02   2.8463781e-02   3.4215950e-02   4.2147698e-02   4.2414656e-02   4.0542228e-02   3.5615822e-02   4.6295613e-02   2.0363061e-02   6.3798220e-03   2.8163995e-02   2.0925233e-02   4.6838114e-02   4.7487902e-02   1.7368541e-02   2.1117782e-02   2.2967145e-02   7.3810390e-02   3.6316652e-02   2.2375879e-02   2.9367287e-02   4.8511603e-02   1.5463833e-02   1.9583828e-02   1.8938570e-02   1.0601468e-02   8.3988430e-03   4.1723024e-02   1.5629140e-02   3.2244606e-02   9.3153170e-03   4.4933305e-02   1.3111918e-02   4.4412271e-02   3.0809608e-02   2.7438827e-02   2.1093197e-02   7.0260502e-03   1.3969417e-02   2.9313944e-02   1.5218585e-02   3.5211195e-02   3.1983672e-02   2.6841235e-02   2.0313092e-02   2.9439905e-02   1.4332974e-02   1.9364178e-02   2.0170126e-02   3.9924060e-03   1.0538198e-02   3.2148883e-03   2.7366019e-03   4.4621109e-03   1.7397347e-03   4.1425525e-03   5.2723146e-03   1.1136640e-02   2.9112354e-02   1.1160954e-02   1.6541883e-02   6.0336652e-03   8.3590571e-03   1.6026960e-02   9.4770245e-03   8.7523451e-03   9.5282841e-03   8.5487892e-03   1.3900105e-02   5.7309995e-03   4.7773544e-03   2.3853571e-02   1.0495078e-02   1.1045936e-02   2.7111420e-02   2.3076623e-02   4.8207470e-03   1.4681091e-02   1.0141190e-02   2.1316125e-02   5.8141126e-03   1.6474359e-02   1.3064397e-03   2.0347529e-02   5.7992324e-03   9.5368199e-03   2.5369333e-02   2.4565418e-02   1.2896533e-02   4.4389699e-02   3.9924060e-03   5.8442541e-03   1.0341765e-02   2.9771308e-02   2.0024906e-02   2.0168043e-02   1.0668382e-02   1.0461554e-02   1.6977050e-03   8.0038314e-04   3.2736649e-04   7.3247771e-04   1.7861084e-03   1.2199790e-05   1.6168506e-03   2.0903986e-03   1.2529506e-02   1.9650001e-03   4.3150624e-03   2.0139182e-03   3.1759995e-03   4.1258242e-03   2.0323215e-03   3.5878910e-03   9.4977361e-03   1.5488203e-03   3.0112961e-03   2.7909675e-04   1.8152664e-03   8.3701176e-03   2.5714996e-03   2.7148441e-03   1.0520423e-02   9.2964166e-03   7.5284963e-04   3.9888853e-03   1.8331853e-03   9.4586856e-03   1.6039412e-03   5.4889906e-03   2.8052973e-03   7.3341015e-03   1.2316222e-03   3.2601592e-03   1.0868316e-02   8.8162037e-03   2.9210959e-03   2.1963225e-02   0.0000000e+00   1.8230646e-04   1.5254865e-03   1.2273284e-02   6.9788794e-03   6.3577637e-03   3.5631537e-03   4.1101326e-03   4.2047124e-03   2.6033919e-03   1.7916004e-03   6.7524244e-03   1.5279872e-03   2.3303877e-03   1.3468412e-03   7.9573793e-03   9.8329441e-06   8.0754860e-04   2.3639655e-03   2.4986002e-03   1.3799583e-03   2.6729899e-03   6.2996248e-03   1.0853281e-02   4.8574547e-04   3.9201723e-04   1.4418237e-03   2.9002781e-03   3.0725233e-03   3.0407572e-03   2.9093497e-03   5.1175009e-03   6.6599859e-03   1.5926877e-03   2.5889139e-03   1.3713626e-04   8.5117654e-03   1.9829707e-03   4.3442759e-03   8.7298845e-03   2.1325966e-03   3.5994846e-03   5.1472858e-03   7.9423211e-03   3.5289513e-03   2.0294631e-04   1.1925423e-02   1.6977050e-03   7.8274456e-04   4.8500499e-05   4.9289073e-03   1.8879551e-03   2.5218165e-03   4.9495642e-03   6.1589498e-03   1.4775011e-03   2.8508338e-03   3.8437964e-04   1.0082748e-03   4.3870644e-03   2.7766403e-03   1.3317076e-02   4.6107743e-03   7.0365754e-03   5.0970174e-03   7.0294656e-03   5.8145812e-03   1.6991258e-03   1.6190058e-03   1.4192459e-02   4.5311136e-03   5.3518685e-03   7.5607221e-04   4.5347742e-03   1.1372343e-02   2.1219078e-03   2.3903563e-03   1.2745790e-02   9.1602079e-03   2.9606388e-03   4.3905218e-03   4.7897027e-03   8.0493172e-03   4.4779450e-03   5.1908739e-03   9.8295198e-04   1.2281083e-02   3.7969538e-04   1.7665186e-03   1.0605818e-02   1.1571194e-02   6.2101277e-03   2.7119271e-02   8.0038314e-04   1.4492761e-03   3.6743113e-03   1.7056692e-02   1.1710246e-02   8.2821554e-03   2.2240766e-03   2.2684005e-03   3.0380682e-04   2.0343287e-03   2.8037036e-04   8.3123377e-04   3.9939935e-03   1.6647447e-02   2.9061444e-03   6.1278767e-03   1.1769582e-03   2.2926522e-03   6.3717175e-03   3.9793414e-03   5.6921961e-03   6.6386310e-03   1.6577942e-03   4.6178179e-03   1.2108195e-03   8.4773320e-04   1.1017765e-02   4.7223063e-03   4.9236953e-03   1.3900771e-02   1.3046714e-02   3.7972280e-04   6.5335900e-03   2.3480655e-03   1.3286542e-02   9.3142474e-04   8.4820747e-03   2.8343361e-03   8.2403259e-03   2.5815546e-03   5.4537681e-03   1.4900562e-02   1.1670902e-02   3.7659184e-03   2.5593433e-02   3.2736649e-04   7.7091107e-04   2.6079554e-03   1.4611634e-02   8.0055625e-03   9.0564615e-03   5.9260895e-03   6.5055071e-03   3.8779943e-03   5.7015696e-04   1.7303569e-04   4.1690936e-03   1.6322670e-02   1.9974930e-03   5.0009825e-03   3.2570724e-04   9.5057293e-04   5.8304604e-03   4.8059418e-03   7.5457291e-03   5.1241997e-03   6.9111123e-04   3.7782470e-03   1.7092344e-03   2.5698914e-04   9.5453465e-03   5.5846827e-03   5.6938512e-03   1.2788699e-02   1.3393372e-02   7.3809633e-06   6.7682476e-03   1.3200598e-03   1.4442150e-02   1.8441195e-04   9.0822620e-03   4.9911804e-03   5.8437761e-03   3.8589879e-03   6.9829557e-03   1.5273413e-02   1.0318402e-02   2.4584659e-03   2.2525564e-02   7.3247771e-04   8.2385517e-04   1.9942911e-03   1.2041849e-02   5.7258386e-03   8.2738022e-03   7.3225251e-03   8.2204997e-03   2.0515682e-03   5.4510656e-03   5.2217407e-03   1.7964531e-02   7.2742632e-03   1.0588415e-02   6.2949332e-03   8.6345788e-03   9.1795241e-03   3.5876856e-03   2.6991302e-03   1.4376485e-02   6.5862079e-03   8.4804528e-03   2.1372463e-03   5.3927480e-03   1.5896874e-02   4.1394106e-03   4.5375167e-03   1.7523452e-02   1.2909742e-02   4.0941134e-03   7.2891356e-03   7.2227860e-03   1.1126344e-02   5.7124492e-03   8.1126160e-03   1.3907276e-04   1.6260954e-02   1.3854449e-03   3.2122771e-03   1.4553993e-02   1.6151019e-02   9.1578470e-03   3.3857588e-02   1.7861084e-03   2.9619943e-03   6.1829720e-03   2.2326762e-02   1.5680260e-02   1.2223630e-02   3.9245535e-03   3.7050303e-03   1.3686449e-03   2.1603250e-03   1.2649428e-02   1.7801502e-03   4.1458992e-03   1.7278919e-03   2.7991042e-03   4.0888611e-03   2.2218452e-03   3.9716339e-03   8.9665631e-03   1.3013529e-03   2.8806398e-03   3.4594790e-04   1.5677728e-03   8.1916354e-03   2.7802616e-03   2.9107124e-03   1.0452698e-02   9.5134829e-03   5.8171764e-04   4.1098444e-03   1.6001879e-03   9.8257537e-03   1.3468918e-03   5.7037585e-03   3.1132824e-03   6.8805675e-03   1.4645264e-03   3.5912866e-03   1.1107885e-02   8.6722521e-03   2.6587395e-03   2.1561220e-02   1.2199790e-05   1.4772036e-04   1.4023821e-03   1.1878859e-02   6.5538274e-03   6.3060622e-03   3.8811059e-03   4.4873121e-03   5.6719205e-03   1.8601099e-02   2.5037679e-03   5.8026549e-03   3.0990295e-05   3.8568498e-04   7.1301713e-03   6.6581271e-03   9.9891912e-03   3.5467136e-03   7.7986655e-04   4.6294369e-03   2.9226786e-03   3.3006174e-05   1.0560037e-02   7.5497742e-03   7.6393818e-03   1.4326258e-02   1.5841028e-02   1.7171643e-04   8.5994033e-03   1.5671556e-03   1.7334506e-02   1.9316046e-05   1.1306536e-02   6.6195881e-03   5.5794173e-03   5.6606012e-03   9.3044548e-03   1.7865075e-02   1.1490070e-02   2.7178516e-03   2.3185889e-02   1.6168506e-03   1.6521970e-03   2.7157547e-03   1.2355858e-02   5.5768992e-03   9.6617637e-03   9.6558503e-03   1.0729574e-02   4.4050618e-03   1.4462579e-03   1.4084514e-03   6.0437228e-03   6.9280724e-03   5.9092958e-04   3.5267150e-04   2.3264461e-03   1.8099026e-02   3.0921484e-03   8.6005594e-04   9.1287231e-04   6.3843974e-03   3.0627943e-03   4.0297343e-04   3.2526954e-04   3.6399798e-03   2.6478661e-03   3.9939935e-03   3.1762278e-04   2.2988741e-03   3.2596799e-03   5.3016678e-03   9.7044163e-04   7.0435695e-03   5.7009823e-03   1.4263218e-03   1.5142545e-03   3.5226184e-03   3.0792135e-03   2.3251410e-03   1.2857609e-02   2.0903986e-03   1.3292907e-03   8.8570158e-04   6.8622586e-03   5.1349839e-03   1.4775225e-03   1.2815928e-03   1.9995407e-03   7.7887386e-03   4.2718762e-03   1.8880208e-02   1.9223691e-02   2.7240761e-03   5.5247078e-03   8.3688264e-03   3.7206770e-02   1.2358468e-02   4.8889738e-03   9.3012979e-03   2.0063076e-02   2.3808756e-03   4.8847228e-03   4.4654367e-03   8.0211221e-04   5.5572260e-04   1.5849918e-02   2.4737465e-03   1.0011166e-02   2.0320189e-03   1.7697241e-02   2.0106232e-03   2.1009310e-02   1.0002251e-02   9.3994441e-03   6.7786672e-03   4.3329436e-04   1.8154672e-03   8.4395042e-03   6.2214471e-03   1.2529506e-02   1.0325253e-02   7.1412079e-03   5.6981926e-03   9.1712120e-03   1.6108637e-03   5.7438691e-03   6.7900644e-03   6.8888062e-04   2.5080626e-03   2.5613963e-03   1.3274318e-03   2.8814590e-03   6.6724246e-03   1.1044919e-02   5.3338455e-04   3.3999007e-04   1.6762239e-03   3.1000026e-03   2.8256325e-03   3.2387801e-03   3.0867064e-03   4.8730365e-03   6.6378483e-03   1.7822362e-03   2.6462661e-03   1.4123935e-04   8.6230682e-03   2.1318487e-03   4.4281251e-03   9.3233552e-03   1.8673327e-03   3.9343847e-03   5.4568897e-03   7.9015195e-03   3.2859077e-03   1.3180217e-04   1.1320265e-02   1.9650001e-03   9.6799653e-04   7.8282421e-05   4.5166083e-03   1.6331972e-03   2.3904078e-03   5.2224500e-03   6.4841213e-03   5.7642096e-03   5.5885978e-03   3.7048759e-04   3.1695738e-03   7.3434699e-03   1.6768125e-02   2.3743894e-03   1.1932462e-04   3.2200650e-03   6.6976868e-03   7.3134819e-04   3.2564607e-03   2.9664521e-03   1.9437509e-03   4.0525881e-03   4.6657625e-03   1.7256268e-03   1.3667365e-03   6.4082141e-03   5.2167922e-03   3.0823107e-03   1.3133486e-02   1.7068449e-03   5.3434665e-03   5.8085657e-03   4.9261084e-03   9.6684380e-04   7.1816432e-04   6.8418503e-03   4.3150624e-03   2.7358698e-03   7.4246714e-04   2.1999970e-03   1.3715192e-03   6.3897855e-04   5.2531215e-03   6.6567087e-03   1.9959041e-04   7.2877827e-03   7.2348769e-03   1.0891220e-02   3.2459725e-03   7.4122702e-04   4.6749634e-03   3.3669230e-03   7.9583955e-05   1.0437521e-02   8.1436642e-03   8.2061472e-03   1.4337740e-02   1.6319841e-02   3.0462366e-04   9.0085125e-03   1.5214225e-03   1.8068443e-02   2.5934558e-05   1.1842923e-02   7.5545176e-03   5.1162105e-03   6.3606028e-03   1.0107865e-02   1.8360102e-02   1.1411116e-02   2.5943653e-03   2.2590562e-02   2.0139182e-03   1.9469619e-03   2.8165944e-03   1.1880076e-02   5.1539049e-03   9.7568917e-03   1.0422756e-02   1.1596876e-02   7.5663186e-03   8.6488322e-03   1.3133604e-02   3.0107568e-03   7.9406152e-04   4.7466095e-03   4.5711602e-03   4.9936224e-04   9.9296431e-03   9.5749791e-03   9.5587917e-03   1.4081306e-02   1.7234890e-02   8.7040364e-04   9.9040316e-03   1.5012448e-03   1.9639569e-02   3.0179394e-04   1.3010570e-02   1.0161281e-02   3.9519870e-03   8.1767554e-03   1.2080688e-02   1.9279439e-02   1.0989202e-02   2.3266303e-03   2.0713423e-02   3.1759995e-03   2.8067172e-03   3.1032715e-03   1.0490119e-02   4.0786420e-03   9.8110939e-03   1.2277454e-02   1.3709414e-02   1.7582839e-03   4.9489607e-03   1.9934064e-02   3.5016865e-03   3.4697267e-04   2.6310541e-03   8.0617166e-03   9.6411709e-04   1.7064375e-03   1.4666650e-03   1.4745772e-03   2.1108592e-03   5.5260764e-03   5.1802495e-04   2.3341578e-03   3.7167048e-03   6.5626516e-03   1.3180061e-03   1.1565092e-02   3.6658759e-03   3.8514458e-03   3.6514318e-03   2.8125792e-03   9.8545506e-04   1.7565517e-03   7.9513446e-03   4.1258242e-03   2.7177731e-03   1.0483123e-03   3.5558284e-03   3.1649242e-03   2.4110016e-04   3.1231514e-03   4.2303212e-03   8.7046320e-04   1.9803680e-02   4.5057787e-03   2.2875453e-03   8.1473256e-04   7.2784764e-03   5.2841523e-03   3.1936914e-05   6.0579771e-05   5.5427829e-03   2.9996038e-03   4.7111715e-03   6.7018226e-04   3.8206460e-03   2.7379425e-03   6.3961850e-03   9.7000606e-04   5.0455840e-03   8.7517746e-03   5.2181192e-04   4.2086901e-04   3.8740476e-03   5.1909001e-03   4.1846614e-03   1.6912712e-02   2.0323215e-03   1.6814934e-03   2.0140995e-03   1.0246238e-02   8.0709165e-03   2.9533168e-03   3.5001524e-04   7.2402168e-04   2.4525121e-02   8.3786268e-03   5.9730003e-03   2.2241688e-03   1.0490949e-02   1.0052150e-02   8.4481649e-04   1.0306755e-03   9.7104895e-03   4.7051149e-03   7.5682486e-03   2.5521754e-03   7.8053009e-03   3.0734717e-03   9.8801938e-03   2.2384251e-03   3.6192976e-03   1.5052125e-02   6.1736712e-04   1.0280118e-04   5.5375454e-03   9.7589612e-03   8.6474768e-03   2.4375316e-02   3.5878910e-03   3.8390770e-03   5.3089764e-03   1.6859170e-02   1.4176552e-02   6.5576789e-03   2.5984028e-04   9.3320862e-05   6.7547274e-03   1.5238284e-02   1.2713534e-02   3.0890016e-03   2.3712664e-02   2.1340343e-02   2.1521507e-02   3.0005294e-02   3.3960619e-02   5.2290080e-03   2.3006673e-02   8.7125215e-03   3.6416136e-02   3.8281789e-03   2.7389250e-02   1.5157530e-02   1.2639929e-02   1.7411617e-02   2.3820503e-02   3.6850697e-02   2.5373970e-02   1.0602298e-02   3.7784525e-02   9.4977361e-03   1.0004799e-02   1.1952848e-02   2.3559319e-02   1.3109247e-02   2.3603108e-02   2.4562463e-02   2.6017612e-02   1.7241949e-03   2.0286689e-03   1.1335916e-03   5.6322483e-03   5.1221157e-03   5.0559971e-03   8.5835151e-03   1.0650923e-02   5.5566853e-04   5.1134826e-03   1.3892193e-04   1.2640034e-02   5.6438567e-04   7.4313522e-03   8.3161300e-03   2.5369950e-03   4.7342848e-03   7.3166190e-03   1.2276381e-02   6.3456614e-03   5.8916640e-04   1.5594819e-02   1.5488203e-03   9.3082136e-04   7.5849611e-04   7.0579696e-03   2.4440449e-03   5.1791902e-03   7.3166180e-03   8.5826500e-03   2.1075364e-03   5.4157920e-03   1.3748154e-03   2.4371345e-03   2.2201300e-03   2.6881175e-03   4.1574302e-03   3.5021271e-03   1.4032793e-03   9.1188034e-04   6.1085103e-03   4.1365224e-03   2.7438304e-03   1.0770631e-02   2.1519140e-03   3.9794011e-03   4.6480198e-03   5.1312578e-03   1.6288841e-03   5.4679156e-04   8.7532006e-03   3.0112961e-03   1.7262374e-03   2.9055719e-04   3.3158230e-03   1.8013008e-03   9.2887256e-04   4.2320076e-03   5.4776329e-03   3.2889511e-03   6.5019207e-03   1.1654311e-03   1.2550970e-03   7.9744383e-03   6.4095512e-03   1.6783080e-03   2.2182877e-03   1.9380758e-03   6.5001122e-03   2.8053456e-03   3.3035106e-03   3.3582234e-03   7.0744470e-03   5.6581852e-04   1.8141689e-03   7.7202001e-03   6.7565678e-03   2.7207024e-03   1.9179880e-02   2.7909675e-04   2.2008347e-04   1.1064738e-03   1.0688407e-02   6.6082062e-03   4.4435997e-03   1.9606397e-03   2.4773484e-03   1.1763001e-02   8.2259536e-03   8.3480149e-03   1.5692475e-02   1.7083720e-02   2.8355914e-04   9.4960515e-03   2.0536401e-03   1.8469924e-02   1.0092056e-04   1.2279622e-02   6.4441301e-03   6.4046448e-03   6.0274458e-03   9.8857207e-03   1.9188640e-02   1.2736270e-02   3.3492132e-03   2.4941935e-02   1.8152664e-03   1.9865894e-03   3.3071796e-03   1.3641872e-02   6.4197769e-03   1.0769048e-02   1.0309787e-02   1.1341762e-02   5.1119032e-03   4.6662587e-03   4.6505160e-04   3.2269116e-03   9.0774431e-03   2.5215213e-03   4.0057628e-03   6.1199030e-03   9.7447895e-03   3.5360157e-03   1.8995080e-02   2.6805580e-03   8.6582433e-03   8.1342278e-03   3.7198152e-03   3.9443681e-05   2.6670326e-03   3.4016785e-03   8.3701176e-03   6.1426721e-03   2.9189037e-03   1.0083175e-03   2.2713681e-03   3.9595770e-04   7.2341855e-03   8.8447009e-03   1.1230173e-05   5.1389929e-03   2.4707449e-03   5.4731760e-03   5.1980845e-04   4.3112681e-03   2.1787917e-03   7.2534046e-03   6.7672528e-04   5.6623211e-03   9.1277214e-03   7.3619628e-04   3.7016392e-04   3.2564607e-03   4.9531882e-03   4.5742792e-03   1.6438776e-02   2.5714996e-03   2.1421258e-03   2.3256067e-03   1.0175775e-02   8.4088183e-03   2.7613050e-03   2.4755840e-04   6.0873636e-04   4.6703189e-03   2.2089026e-03   5.5642336e-03   3.7824549e-04   4.1878969e-03   2.0462491e-03   7.3148116e-03   5.4583143e-04   6.1390745e-03   8.7247882e-03   9.1073145e-04   4.9034572e-04   2.9664521e-03   4.5041455e-03   4.3650355e-03   1.5605807e-02   2.7148441e-03   2.1951828e-03   2.2077213e-03   9.5777719e-03   8.0120192e-03   2.4278861e-03   3.2552080e-04   7.3734328e-04   1.7636357e-03   1.2290813e-02   2.3919392e-03   6.5534766e-03   4.2355076e-03   1.3428854e-02   2.7760250e-03   2.0719142e-02   5.3736931e-03   9.3604766e-03   7.8350168e-03   1.9437509e-03   2.3451546e-04   4.9805974e-03   3.4615227e-03   1.0520423e-02   8.1642247e-03   4.6947450e-03   2.2490758e-03   4.7893620e-03   5.2359029e-04   6.8110246e-03   8.2524171e-03   1.3046714e-02   1.1318623e-03   8.6855001e-03   5.4130623e-04   1.5131426e-02   5.6179632e-04   1.5407719e-02   1.0669118e-02   5.8683852e-03   3.5797271e-03   6.2414181e-05   2.6714308e-03   7.7088950e-03   9.8081718e-03   9.2964166e-03   7.7221233e-03   5.7330158e-03   7.6711675e-03   9.7842038e-03   1.6419007e-03   2.8364539e-03   3.5173281e-03   6.5335900e-03   1.1340078e-03   1.4192754e-02   1.5964523e-04   8.8481511e-03   5.2711173e-03   5.4383051e-03   3.8856537e-03   6.9538378e-03   1.4900562e-02   9.8443835e-03   2.2013577e-03   2.1746585e-02   7.5284963e-04   7.7091107e-04   1.8026328e-03   1.1463429e-02   5.3229428e-03   7.8904169e-03   7.2583306e-03   8.1944055e-03   3.9135288e-03   1.7196090e-03   8.1113282e-03   2.2942459e-04   9.3463740e-03   6.8261058e-03   2.3665724e-03   1.6309596e-03   1.7256268e-03   2.3397606e-03   3.5922701e-03   1.1208568e-02   3.9888853e-03   2.9455145e-03   1.9660659e-03   6.5467152e-03   6.1492489e-03   9.3538874e-04   1.2212079e-03   1.9267388e-03   1.0793365e-02   1.2592720e-03   6.0147390e-03   9.1609131e-03   1.8537817e-03   4.5335508e-03   6.6172179e-03   1.0131803e-02   4.6089934e-03   1.7595473e-04   1.2987364e-02   1.8331853e-03   9.7087402e-04   3.3609260e-04   5.4018686e-03   1.7068395e-03   3.6646912e-03   6.4787894e-03   7.7901480e-03   1.6754655e-02   6.9641544e-04   1.3182224e-02   1.4651385e-02   4.9466239e-03   2.3931163e-03   5.9263439e-04   5.4211666e-03   1.0221921e-02   1.4928620e-02   9.4586856e-03   8.3819492e-03   7.3339215e-03   1.2011676e-02   1.3620169e-02   3.6119007e-03   1.8435886e-03   2.0979826e-03   1.0794235e-02   6.9811950e-03   4.9473584e-03   5.5993500e-03   9.1192858e-03   1.7103671e-02   1.0654852e-02   2.2936354e-03   2.1887320e-02   1.6039412e-03   1.5236521e-03   2.3684718e-03   1.1405606e-02   4.9416620e-03   8.9601309e-03   9.4128890e-03   1.0537337e-02   1.0144176e-02   9.3650956e-03   2.8049928e-03   1.4261619e-03   9.6795720e-04   3.1673467e-03   5.6308712e-03   1.2399657e-02   5.4889906e-03   4.4629844e-03   3.5148873e-03   8.2921709e-03   8.5570702e-03   1.5767832e-03   9.7941910e-04   1.5044401e-03   1.9162648e-02   2.3258011e-03   4.3611697e-03   1.7154674e-02   1.9266334e-02   1.1399040e-02   3.8294804e-02   2.8052973e-03   4.2987486e-03   8.1194560e-03   2.5933333e-02   1.8564970e-02   1.4940871e-02   5.2230072e-03   4.8296564e-03   1.1136786e-02   1.2996602e-02   1.1896568e-02   3.3692870e-03   1.0295085e-03   6.7928621e-03   7.3341015e-03   5.3674586e-03   2.6380919e-03   1.7126363e-03   1.9252416e-05   3.9628066e-03   1.2371798e-02   1.4417093e-02   5.5916770e-04   7.0172680e-03   8.6736288e-03   5.5025710e-03   2.2796596e-02   1.2316222e-03   1.4793666e-03   2.9451077e-03   1.4303806e-02   1.0475492e-02   5.7470026e-03   7.8882742e-04   9.0526661e-04   4.3645694e-03   7.8632956e-03   7.2260963e-03   2.1331409e-02   3.2601592e-03   3.2514734e-03   4.2300937e-03   1.4385386e-02   1.2160577e-02   5.0215597e-03   4.3279739e-05   5.1818418e-05   3.0767343e-03   9.0015857e-03   9.8042907e-03   1.0868316e-02   9.1698320e-03   6.9442206e-03   8.2610449e-03   1.0963075e-02   2.1355881e-03   3.5431999e-03   4.2180590e-03   3.2143555e-03   3.3819870e-03   8.8162037e-03   6.5643841e-03   3.2940615e-03   1.2995092e-03   2.9075684e-03   3.1897532e-04   6.9331287e-03   8.4863836e-03   1.0164058e-02   2.9210959e-03   1.7078622e-03   4.0814516e-04   3.6297378e-03   8.7693352e-04   2.6826046e-03   6.9132822e-03   8.3825591e-03   2.1963225e-02   1.8183756e-02   1.2010413e-02   1.7351817e-03   6.4958425e-03   5.7545512e-03   1.9704198e-02   2.2181841e-02   1.8230646e-04   1.5254865e-03   1.2273284e-02   6.9788794e-03   6.3577637e-03   3.5631537e-03   4.1101326e-03   6.5324999e-04   9.4729463e-03   5.0293123e-03   4.5622977e-03   3.3814637e-03   4.1233865e-03   5.1615257e-03   2.3326030e-03   2.1577255e-03   4.0325048e-03   5.1426622e-03   1.5351757e-03   2.6422594e-03   1.3282600e-02   1.5461999e-02   3.4307479e-03   1.1531055e-02   1.3518551e-02   4.2918744e-03   5.5398788e-03   8.4122900e-05
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-correlation-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-correlation-ml.txt
    deleted file mode 100644
    index 2a17a2a8fb..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-correlation-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   9.2507465e-01   9.6528566e-01   8.7255441e-01   1.1287379e+00   8.7318727e-01   1.0767102e+00   9.1419676e-01   1.1503304e+00   9.8074509e-01   1.0135025e+00   1.0495025e+00   9.4794536e-01   9.6829273e-01   1.1345767e+00   1.1048008e+00   9.2407796e-01   1.0228634e+00   9.3853195e-01   9.9377619e-01   1.0407662e+00   9.5048989e-01   9.0465688e-01   9.8056930e-01   8.9777156e-01   9.6357127e-01   9.3864452e-01   9.9754613e-01   9.7271356e-01   8.4383151e-01   9.6981983e-01   9.7510267e-01   1.0112663e+00   7.8730400e-01   1.0299498e+00   9.9307979e-01   9.0239520e-01   8.5428231e-01   8.8972742e-01   8.5933162e-01   9.6625934e-01   9.4175449e-01   9.9120729e-01   1.0503963e+00   8.8223053e-01   1.3261434e+00   1.1063209e+00   8.4058398e-01   1.0844267e+00   1.1153093e+00   1.0092643e+00   8.9585237e-01   1.0599818e+00   1.2321707e+00   1.1359624e+00   8.3503556e-01   1.1792243e+00   7.9159781e-01   1.0830419e+00   1.2181870e+00   9.9888500e-01   1.0227144e+00   6.8557277e-01   9.6836193e-01   1.1061227e+00   1.0883453e+00   9.5681974e-01   9.9436299e-01   1.0304323e+00   1.1273949e+00   1.0735563e+00   1.0582583e+00   9.6040272e-01   1.0032137e+00   8.4900547e-01   1.1035351e+00   8.7867480e-01   9.6433176e-01   9.1850122e-01   8.9337435e-01   1.0449390e+00   8.9639384e-01   9.6704971e-01   1.0084258e+00   1.0528587e+00   1.1764481e+00   1.0913280e+00   1.0136672e+00   1.2737156e+00   9.5130359e-01   1.0367909e+00   1.1983402e+00   1.1319901e+00   1.1117462e+00   1.0343695e+00   1.0838628e+00   7.5266057e-01   1.0763316e+00   8.8067924e-01   9.6734383e-01   9.8800551e-01   1.2265742e+00   7.8833055e-01   1.0338670e+00   8.6666625e-01   9.9039950e-01   9.7142684e-01   9.3138616e-01   8.5849977e-01   8.5486301e-01   1.0516028e+00   1.1105313e+00   9.5943505e-01   9.8845171e-01   1.0566288e+00   9.9712198e-01   9.5545756e-01   1.1817974e+00   9.9128482e-01   1.0117892e+00   1.0979115e+00   1.0493943e+00   9.1318848e-01   9.3157311e-01   8.7073304e-01   1.2459441e+00   9.3412689e-01   1.0482297e+00   9.4224032e-01   9.5134153e-01   9.0857493e-01   9.7264161e-01   8.2900820e-01   9.3140549e-01   1.1330242e+00   1.0333002e+00   1.0117861e+00   1.2053255e+00   8.5291396e-01   1.0148928e+00   8.6641379e-01   9.7080819e-01   9.5457159e-01   9.5207457e-01   9.3539674e-01   9.0769069e-01   9.5322590e-01   1.1181803e+00   9.9765614e-01   7.5370610e-01   1.0807114e+00   1.0804601e+00   9.0214124e-01   8.7101998e-01   1.0167435e+00   1.2045936e+00   8.7300539e-01   1.1054300e+00   7.9145574e-01   1.0279340e+00   8.7623462e-01   1.0034756e+00   1.0386933e+00   9.3910970e-01   1.0028455e+00   9.9868824e-01   9.8752945e-01   9.8319327e-01   1.3110209e+00   8.6180633e-01   1.0993856e+00   8.5912563e-01   1.1303979e+00   9.8690459e-01   9.6910090e-01   9.1456819e-01   1.1525339e+00   1.1064552e+00   1.1062255e+00   9.7226683e-01   1.1091447e+00   1.1072238e+00   9.6544444e-01   9.6681036e-01   9.3247685e-01   9.6854634e-01   1.1035119e+00   1.1317148e+00   9.5557793e-01   9.8908485e-01   7.4873648e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-cosine-ml-iris.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-cosine-ml-iris.txt
    deleted file mode 100644
    index 8b705b348f..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-cosine-ml-iris.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   1.4208365e-03   1.2652718e-05   8.9939315e-04   2.4232332e-04   9.9747033e-04   9.2045721e-04   2.2040648e-04   8.6480051e-04   1.2354911e-03   5.3650090e-06   1.0275886e-03   1.1695784e-03   2.3556571e-04   1.4590172e-03   1.8981327e-03   1.0939621e-03   1.2392314e-04   3.5850877e-04   8.6078038e-04   1.4490833e-03   8.4059347e-04   3.2873982e-03   2.7359832e-03   4.1316044e-03   2.7719149e-03   1.1814143e-03   1.1431285e-04   2.3850299e-04   1.3446247e-03   1.6406549e-03   1.2070654e-03   2.2241257e-03   1.4969348e-03   1.2354911e-03   7.6154552e-04   9.0853884e-04   1.2354911e-03   1.5825612e-04   2.3716586e-04   2.5806020e-04   8.5870759e-03   4.3447170e-04   2.6103416e-03   3.4026094e-03   1.2625429e-03   1.0000714e-03   2.7088099e-04   4.6161202e-05   1.7993015e-04   7.1619641e-02   7.4013940e-02   8.2336355e-02   9.3599031e-02   8.6542298e-02   9.2667602e-02   8.0934616e-02   6.7002415e-02   7.9695318e-02   8.3991107e-02   8.8330128e-02   7.6449243e-02   8.6123390e-02   9.1414445e-02   5.9767596e-02   6.8589764e-02   9.2363748e-02   7.5261304e-02   1.0768528e-01   7.8250149e-02   9.7383870e-02   6.9410330e-02   1.0895936e-01   9.1644587e-02   7.2677910e-02   7.2208930e-02   8.7635618e-02   9.3586395e-02   8.7700193e-02   5.8825053e-02   7.9271072e-02   7.4136423e-02   7.0977606e-02   1.1670751e-01   9.6691498e-02   7.7157266e-02   7.8793137e-02   9.6187418e-02   7.4355610e-02   8.6677009e-02   9.7286808e-02   8.5214421e-02   7.7419803e-02   6.8888638e-02   8.6192502e-02   7.4757686e-02   7.8851331e-02   7.5042247e-02   5.2484298e-02   7.8023694e-02   1.3991867e-01   1.2655756e-01   1.2099780e-01   1.2515784e-01   1.3134370e-01   1.3306336e-01   1.2911903e-01   1.2854613e-01   1.3655327e-01   1.1601604e-01   9.9632498e-02   1.2063863e-01   1.1404742e-01   1.3409335e-01   1.3451976e-01   1.1368563e-01   1.1469397e-01   1.1505768e-01   1.5479411e-01   1.2906390e-01   1.1634186e-01   1.2299625e-01   1.3892070e-01   1.0732534e-01   1.1401190e-01   1.1254699e-01   1.0266168e-01   1.0210743e-01   1.3111378e-01   1.0950615e-01   1.2501276e-01   1.0108759e-01   1.3297245e-01   1.0624129e-01   1.3360037e-01   1.2002867e-01   1.2233784e-01   1.1387071e-01   1.0061412e-01   1.0649150e-01   1.2174429e-01   1.0147290e-01   1.2655756e-01   1.2438709e-01   1.2138109e-01   1.1044406e-01   1.1910000e-01   1.0821359e-01   1.1609070e-01   1.1329724e-01   1.2085473e-03   1.2060695e-03   2.7592041e-03   3.0736184e-03   3.7201033e-03   1.0861043e-03   7.3910902e-04   3.4790667e-04   1.3491546e-03   2.4493052e-03   1.8482587e-04   2.3308566e-03   3.8997403e-03   6.3069928e-03   4.1362617e-03   1.5079538e-03   7.4890015e-04   4.0049414e-03   3.0763412e-04   3.2877725e-03   8.6909088e-03   1.8863199e-03   4.7592122e-03   4.5180751e-04   1.7148301e-03   8.8703626e-04   5.7128783e-04   1.7151033e-03   8.4814176e-04   4.7551630e-04   6.9313334e-03   5.8126778e-03   3.4790667e-04   9.7078221e-04   1.0390338e-03   3.4790667e-04   1.1371495e-03   7.0598263e-04   2.3100870e-03   3.1332241e-03   2.9870115e-03   3.7693564e-03   5.5008337e-03   2.0081767e-04   3.9261497e-03   1.6237803e-03   1.7731168e-03   5.9153033e-04   5.9997244e-02   6.3706418e-02   7.0131342e-02   8.0131815e-02   7.3670020e-02   8.1412444e-02   7.1132932e-02   5.6572408e-02   6.7223691e-02   7.3993918e-02   7.4363256e-02   6.6371013e-02   7.1106157e-02   7.9730716e-02   5.0610503e-02   5.7285563e-02   8.2536028e-02   6.3695818e-02   9.1877918e-02   6.6044079e-02   8.7700525e-02   5.7975072e-02   9.4407127e-02   7.9385033e-02   6.0900938e-02   6.0521931e-02   7.4070557e-02   8.1073873e-02   7.6438218e-02   4.7634460e-02   6.6728846e-02   6.1732271e-02   5.9656897e-02   1.0363139e-01   8.7312695e-02   6.8806126e-02   6.7142432e-02   8.0911573e-02   6.5091322e-02   7.4541034e-02   8.5313436e-02   7.4229332e-02   6.5328348e-02   5.7461491e-02   7.4891760e-02   6.5136264e-02   6.8598864e-02   6.3641018e-02   4.2790811e-02   6.7276779e-02   1.2872765e-01   1.1385917e-01   1.0708423e-01   1.1221780e-01   1.1844388e-01   1.1798239e-01   1.1767648e-01   1.1356773e-01   1.2073038e-01   1.0467824e-01   8.8441784e-02   1.0671832e-01   1.0091826e-01   1.2051300e-01   1.2244533e-01   1.0247664e-01   1.0203920e-01   1.0334656e-01   1.3764340e-01   1.1314999e-01   1.0390175e-01   1.1148602e-01   1.2274267e-01   9.3929112e-02   1.0239198e-01   9.9372667e-02   9.0109024e-02   9.0770318e-02   1.1749345e-01   9.5509620e-02   1.0956056e-01   8.9331297e-02   1.1936188e-01   9.3207628e-02   1.1935153e-01   1.0516553e-01   1.1204585e-01   1.0191688e-01   8.9582588e-02   9.3806716e-02   1.0922100e-01   8.9087100e-02   1.1385917e-01   1.1193127e-01   1.0978099e-01   9.7766696e-02   1.0448839e-01   9.5849546e-02   1.0619992e-01   1.0212555e-01   7.8301662e-04   3.3186074e-04   9.6097551e-04   9.6384587e-04   1.7160230e-04   7.1714495e-04   1.0915291e-03   1.4406904e-05   9.9431295e-04   1.0280837e-03   3.4520010e-04   1.6070142e-03   2.0814960e-03   1.1810349e-03   9.3270090e-05   2.4892291e-04   9.5000112e-04   1.2447556e-03   8.3736374e-04   3.6303226e-03   2.4141846e-03   3.9965261e-03   2.4688022e-03   1.0115165e-03   6.9871786e-05   1.7487334e-04   1.2251185e-03   1.4398826e-03   9.8199498e-04   2.5137187e-03   1.7466742e-03   1.0915291e-03   7.0690363e-04   8.5846505e-04   1.0915291e-03   1.0992291e-04   1.6427013e-04   2.8562896e-04   8.0123750e-03   5.0490687e-04   2.4076078e-03   3.3222239e-03   1.0270492e-03   1.0987887e-03   2.4862356e-04   7.8815959e-05   1.1120052e-04   7.0071463e-02   7.2494258e-02   8.0694698e-02   9.1816479e-02   8.4823937e-02   9.1055284e-02   7.9406161e-02   6.5540015e-02   7.8075821e-02   8.2418924e-02   8.6586217e-02   7.4908999e-02   8.4375857e-02   8.9771433e-02   5.8365951e-02   6.7055640e-02   9.0792516e-02   7.3755504e-02   1.0570869e-01   7.6652799e-02   9.5758989e-02   6.7858347e-02   1.0707149e-01   9.0015148e-02   7.1111432e-02   7.0634591e-02   8.5909852e-02   9.1841705e-02   8.6060650e-02   5.7382885e-02   7.7642663e-02   7.2560884e-02   6.9439824e-02   1.1486601e-01   9.5132094e-02   7.5722276e-02   7.7186494e-02   9.4329550e-02   7.2913445e-02   8.4999890e-02   9.5631654e-02   8.3632299e-02   7.5814411e-02   6.7360493e-02   8.4581854e-02   7.3324210e-02   7.7335911e-02   7.3484711e-02   5.1093482e-02   7.6474851e-02   1.3800148e-01   1.2463801e-01   1.1904450e-01   1.2328593e-01   1.2938789e-01   1.3104169e-01   1.2726294e-01   1.2658511e-01   1.3448678e-01   1.1418055e-01   9.7888383e-02   1.1868360e-01   1.1213978e-01   1.3206545e-01   1.3251384e-01   1.1184454e-01   1.1286955e-01   1.1328841e-01   1.5256500e-01   1.2703121e-01   1.1444439e-01   1.2112577e-01   1.3684054e-01   1.0544428e-01   1.1220824e-01   1.1073079e-01   1.0084086e-01   1.0036834e-01   1.2912019e-01   1.0768201e-01   1.2300696e-01   9.9385216e-02   1.3095409e-01   1.0446385e-01   1.3171213e-01   1.1800444e-01   1.2052688e-01   1.1209190e-01   9.8892088e-02   1.0463359e-01   1.1979721e-01   9.9600101e-02   1.2463801e-01   1.2247195e-01   1.1948197e-01   1.0852184e-01   1.1709036e-01   1.0637133e-01   1.1433097e-01   1.1154058e-01   1.2829581e-03   8.6520525e-04   1.3042912e-03   2.3052671e-04   6.0609671e-05   6.1408538e-04   7.9384016e-04   2.5551469e-04   9.4346154e-04   1.8930050e-03   4.6203036e-03   3.8649853e-03   3.3273220e-03   9.7135787e-04   2.5836286e-04   1.6395377e-03   4.6720392e-04   1.3833444e-03   6.8585778e-03   1.1817616e-03   1.4184724e-03   1.2935682e-03   4.4534899e-04   4.3337262e-04   9.9734142e-04   6.2957380e-05   2.1802414e-04   1.3452346e-03   3.6759458e-03   3.7514511e-03   6.1408538e-04   2.3527566e-03   2.5967147e-03   6.1408538e-04   3.1896708e-04   3.0643540e-04   1.7034162e-03   7.0964884e-03   1.0371098e-03   1.9760564e-03   1.6993217e-03   9.2490489e-04   1.2129757e-03   2.8785057e-04   7.8777499e-04   6.4144968e-04   5.7636535e-02   5.9786679e-02   6.7275391e-02   7.7706661e-02   7.1288776e-02   7.6308806e-02   6.5987844e-02   5.3398709e-02   6.4839697e-02   6.8887148e-02   7.2874646e-02   6.2111692e-02   7.1088473e-02   7.5274214e-02   4.7295630e-02   5.5048251e-02   7.6266639e-02   6.0532100e-02   9.0997542e-02   6.3501941e-02   8.1155480e-02   5.5841790e-02   9.1620605e-02   7.5304976e-02   5.8627379e-02   5.8302297e-02   7.2188128e-02   7.7632065e-02   7.2128571e-02   4.6353347e-02   6.4522763e-02   5.9860052e-02   5.7075256e-02   9.8501473e-02   8.0208982e-02   6.2676929e-02   6.4117314e-02   8.0306154e-02   5.9903400e-02   7.1264506e-02   8.0454669e-02   6.9667510e-02   6.2855874e-02   5.5234852e-02   7.0611788e-02   6.0083969e-02   6.3933681e-02   6.0638614e-02   4.1119113e-02   6.3291748e-02   1.2072945e-01   1.0797760e-01   1.0284307e-01   1.0630032e-01   1.1246316e-01   1.1377579e-01   1.1035397e-01   1.0939330e-01   1.1704519e-01   9.8543065e-02   8.3389076e-02   1.0253622e-01   9.6610654e-02   1.1523295e-01   1.1624035e-01   9.6621030e-02   9.6718555e-02   9.7003685e-02   1.3426257e-01   1.1013293e-01   9.8838972e-02   1.0496266e-01   1.1920082e-01   9.0400878e-02   9.6352086e-02   9.4617133e-02   8.6118226e-02   8.5443225e-02   1.1226469e-01   9.1815383e-02   1.0642172e-01   8.4132371e-02   1.1413570e-01   8.8823115e-02   1.1373227e-01   1.0228600e-01   1.0454965e-01   9.5917796e-02   8.4129252e-02   8.9732713e-02   1.0404039e-01   8.5714179e-02   1.0797760e-01   1.0611357e-01   1.0375975e-01   9.3828435e-02   1.0141953e-01   9.1231247e-02   9.8764813e-02   9.5558448e-02   7.0033377e-04   3.9650610e-04   5.3529876e-04   1.4703029e-03   2.2471049e-03   2.6137215e-04   9.1585095e-04   2.3098853e-03   3.2779352e-04   1.7003275e-03   9.5035099e-04   8.4163249e-04   3.6423601e-04   8.6760304e-04   2.6110376e-04   2.4965606e-03   5.0990123e-04   2.2208392e-03   3.4995017e-03   3.9813106e-03   4.2652650e-03   1.4776191e-03   5.3856223e-04   9.6152184e-04   1.6178695e-03   2.4296336e-03   2.2824176e-03   1.0483334e-03   6.6735604e-04   2.2471049e-03   1.7166964e-03   1.9224889e-03   2.2471049e-03   4.4953685e-04   7.5090712e-04   3.1050470e-04   1.1530910e-02   8.0837373e-05   2.6173161e-03   2.7612054e-03   2.3974656e-03   3.9140870e-04   3.5730731e-04   1.1232648e-04   8.0278741e-04   7.4728046e-02   7.6441141e-02   8.5477412e-02   9.7141382e-02   8.9947057e-02   9.5081677e-02   8.2962705e-02   6.9633999e-02   8.3013931e-02   8.6069979e-02   9.2215558e-02   7.8736928e-02   9.0603515e-02   9.4074986e-02   6.2034704e-02   7.1640320e-02   9.4150759e-02   7.8195110e-02   1.1214391e-01   8.1468219e-02   9.9059263e-02   7.2514318e-02   1.1269547e-01   9.4545020e-02   7.5842542e-02   7.5358360e-02   9.1332869e-02   9.6662705e-02   9.0277244e-02   6.2066860e-02   8.2644288e-02   7.7554694e-02   7.3959493e-02   1.1955630e-01   9.8181734e-02   7.8602674e-02   8.1755435e-02   1.0058819e-01   7.6248524e-02   8.9701900e-02   9.9938282e-02   8.7676596e-02   8.0619290e-02   7.1976555e-02   8.8793557e-02   7.6779152e-02   8.1107438e-02   7.7952944e-02   5.5245517e-02   8.0550459e-02   1.4162183e-01   1.2912349e-01   1.2423521e-01   1.2779447e-01   1.3393410e-01   1.3660889e-01   1.3105158e-01   1.3208577e-01   1.4040000e-01   1.1817736e-01   1.0200650e-01   1.2388995e-01   1.1706801e-01   1.3699958e-01   1.3682207e-01   1.1586916e-01   1.1739162e-01   1.1729454e-01   1.5902469e-01   1.3308573e-01   1.1901641e-01   1.2511327e-01   1.4289089e-01   1.1059070e-01   1.1627926e-01   1.1550831e-01   1.0561378e-01   1.0446495e-01   1.3405102e-01   1.1291439e-01   1.2888996e-01   1.0359625e-01   1.3590097e-01   1.0925250e-01   1.3665207e-01   1.2379539e-01   1.2392962e-01   1.1624448e-01   1.0286550e-01   1.0945264e-01   1.2440339e-01   1.0449561e-01   1.2912349e-01   1.2690130e-01   1.2362142e-01   1.1341467e-01   1.2276171e-01   1.1097585e-01   1.1759891e-01   1.1534218e-01   1.3143808e-04   7.3710840e-04   1.1313742e-03   2.6277162e-03   9.9332749e-04   4.8298989e-04   2.9659782e-03   1.8303797e-03   3.9657692e-03   1.4753738e-03   1.6266891e-03   7.0233916e-04   8.0313831e-04   3.4526160e-04   2.3291483e-03   1.3867759e-04   4.2228272e-03   1.6991343e-03   2.3223655e-03   3.8453210e-03   4.2904903e-04   9.9302567e-04   1.7706867e-03   9.4981017e-04   1.8259864e-03   2.0820613e-03   2.1473879e-03   2.0420431e-03   2.6277162e-03   3.0779094e-03   3.4332541e-03   2.6277162e-03   6.3280964e-04   1.0576914e-03   9.5198627e-04   1.0925795e-02   3.7286463e-04   7.9546610e-04   9.1841431e-04   2.1468126e-03   4.9129575e-04   4.3562197e-04   7.5083238e-04   1.3686608e-03   6.3901299e-02   6.4740623e-02   7.3708779e-02   8.4613714e-02   7.7866771e-02   8.2261058e-02   7.0449151e-02   5.8874682e-02   7.1767088e-02   7.3210535e-02   8.0660949e-02   6.6601983e-02   8.0033785e-02   8.1391959e-02   5.1369939e-02   6.0897790e-02   8.0716992e-02   6.7403323e-02   9.9203670e-02   7.0276809e-02   8.4922276e-02   6.1688045e-02   9.9339240e-02   8.2362360e-02   6.4928234e-02   6.4360101e-02   7.9641814e-02   8.3721620e-02   7.7549963e-02   5.2617898e-02   7.1414187e-02   6.6946935e-02   6.3031902e-02   1.0509118e-01   8.4332170e-02   6.6064468e-02   7.0064616e-02   8.8758294e-02   6.4379548e-02   7.7371173e-02   8.7052850e-02   7.5305342e-02   6.9340944e-02   6.1339869e-02   7.6377320e-02   6.5179636e-02   6.9093895e-02   6.6669498e-02   4.5609365e-02   6.8684945e-02   1.2445912e-01   1.1341836e-01   1.0935772e-01   1.1262566e-01   1.1789507e-01   1.2147174e-01   1.1488682e-01   1.1752559e-01   1.2531063e-01   1.0271865e-01   8.7888567e-02   1.0902443e-01   1.0234160e-01   1.2080033e-01   1.1990073e-01   1.0043696e-01   1.0286413e-01   1.0252340e-01   1.4292168e-01   1.1866325e-01   1.0381139e-01   1.0919240e-01   1.2785249e-01   9.6570465e-02   1.0127523e-01   1.0149554e-01   9.1688518e-02   9.0323099e-02   1.1822766e-01   9.9584713e-02   1.1452014e-01   9.0018133e-02   1.1983081e-01   9.5741335e-02   1.2190290e-01   1.0915996e-01   1.0773474e-01   1.0161859e-01   8.8729453e-02   9.5169428e-02   1.0868349e-01   9.0278091e-02   1.1341836e-01   1.1118524e-01   1.0767597e-01   9.8555096e-02   1.0809822e-01   9.6490550e-02   1.0179914e-01   1.0040847e-01   9.0953179e-04   1.6478123e-03   3.1324421e-03   9.3747882e-04   6.8074049e-04   3.4285457e-03   1.4256139e-03   3.3141786e-03   8.1135619e-04   1.2040955e-03   7.3894006e-04   1.1469835e-03   5.4914496e-05   3.0238895e-03   1.1512346e-04   2.9874978e-03   2.7356591e-03   2.9755481e-03   4.8570629e-03   9.8132331e-04   1.1267736e-03   1.9187302e-03   1.4320892e-03   2.5472569e-03   2.7129147e-03   1.2621760e-03   1.1868918e-03   3.1324421e-03   3.1260816e-03   3.4622842e-03   3.1324421e-03   7.8737454e-04   1.2923124e-03   7.7291736e-04   1.2676988e-02   1.5795155e-04   1.4073300e-03   1.3093851e-03   2.8558230e-03   2.3589004e-04   5.3160641e-04   6.3306680e-04   1.5563919e-03   6.9394652e-02   7.0160248e-02   7.9549278e-02   9.0909253e-02   8.3929778e-02   8.8133516e-02   7.5949213e-02   6.4094635e-02   7.7538115e-02   7.8838295e-02   8.6828513e-02   7.2078729e-02   8.6190925e-02   8.7328483e-02   5.6305232e-02   6.6307663e-02   8.6433769e-02   7.2861306e-02   1.0610432e-01   7.5977192e-02   9.0782134e-02   6.7147548e-02   1.0605640e-01   8.8295560e-02   7.0476640e-02   6.9912539e-02   8.5755505e-02   8.9909894e-02   8.3415192e-02   5.7694397e-02   7.7198547e-02   7.2551886e-02   6.8485682e-02   1.1174631e-01   9.0047290e-02   7.1258462e-02   7.5770197e-02   9.5276007e-02   6.9606963e-02   8.3332111e-02   9.3091350e-02   8.1019819e-02   7.5041473e-02   6.6748030e-02   8.2172293e-02   7.0413691e-02   7.4567733e-02   7.2221920e-02   5.0422561e-02   7.4234075e-02   1.3135838e-01   1.2029572e-01   1.1630277e-01   1.1941581e-01   1.2489530e-01   1.2871814e-01   1.2159882e-01   1.2460620e-01   1.3270425e-01   1.0925415e-01   9.4076611e-02   1.1596894e-01   1.0908894e-01   1.2799150e-01   1.2695158e-01   1.0694484e-01   1.0944639e-01   1.0895711e-01   1.5084375e-01   1.2591962e-01   1.1052596e-01   1.1587184e-01   1.3530738e-01   1.0320818e-01   1.0775506e-01   1.0806337e-01   9.8123191e-02   9.6541726e-02   1.2533326e-01   1.0616585e-01   1.2166800e-01   9.6181548e-02   1.2699662e-01   1.0216112e-01   1.2885603e-01   1.1626103e-01   1.1421827e-01   1.0807124e-01   9.4882428e-02   1.0171954e-01   1.1554226e-01   9.6763759e-02   1.2029572e-01   1.1801757e-01   1.1438908e-01   1.0525128e-01   1.1515210e-01   1.0301668e-01   1.0810316e-01   1.0676998e-01   2.4407151e-04   6.8243680e-04   1.6882982e-04   4.2217018e-04   8.1245396e-04   8.1915702e-04   2.7980568e-03   2.6783721e-03   2.0076713e-03   3.3526400e-04   9.3506008e-05   1.0407900e-03   7.3148476e-04   9.1895790e-04   4.8425923e-03   1.7878106e-03   2.5638304e-03   1.8092053e-03   6.2482332e-04   4.5470127e-05   3.8680919e-04   4.8577398e-04   7.0932539e-04   1.0773286e-03   2.7081281e-03   2.3916675e-03   6.8243680e-04   1.3234869e-03   1.5152295e-03   6.8243680e-04   1.7279927e-05   4.4719936e-05   7.6774714e-04   7.6386402e-03   5.1509749e-04   2.1386706e-03   2.3673979e-03   8.8641907e-04   8.8317423e-04   5.7646989e-05   1.8767975e-04   1.8238427e-04   6.4591491e-02   6.6891146e-02   7.4787553e-02   8.5653640e-02   7.8909235e-02   8.4481757e-02   7.3468926e-02   6.0165176e-02   7.2232139e-02   7.6459237e-02   8.0572670e-02   6.9287036e-02   7.8547451e-02   8.3338681e-02   5.3514192e-02   6.1787978e-02   8.4336540e-02   6.7840538e-02   9.9351761e-02   7.0839680e-02   8.9318727e-02   6.2598635e-02   1.0029777e-01   8.3444651e-02   6.5618944e-02   6.5228710e-02   7.9886645e-02   8.5622882e-02   7.9922508e-02   5.2526388e-02   7.1863670e-02   6.6948234e-02   6.3994975e-02   1.0763490e-01   8.8479248e-02   6.9931400e-02   7.1440370e-02   8.8224815e-02   6.7118281e-02   7.8968665e-02   8.8858891e-02   7.7432758e-02   7.0109240e-02   6.2023845e-02   7.8396402e-02   6.7393801e-02   7.1380489e-02   6.7813026e-02   4.6767795e-02   7.0645561e-02   1.3044475e-01   1.1734304e-01   1.1197394e-01   1.1577499e-01   1.2198760e-01   1.2346289e-01   1.1982320e-01   1.1899008e-01   1.2683842e-01   1.0736476e-01   9.1564623e-02   1.1164167e-01   1.0538958e-01   1.2475783e-01   1.2551509e-01   1.0524662e-01   1.0574315e-01   1.0607279e-01   1.4459461e-01   1.1962188e-01   1.0766800e-01   1.1407280e-01   1.2909426e-01   9.8905414e-02   1.0524346e-01   1.0359925e-01   9.4433579e-02   9.3820759e-02   1.2176744e-01   1.0065671e-01   1.1574436e-01   9.2625059e-02   1.2364363e-01   9.7538593e-02   1.2367543e-01   1.1121391e-01   1.1355049e-01   1.0493323e-01   9.2419908e-02   9.8167154e-02   1.1298864e-01   9.3668541e-02   1.1734304e-01   1.1533257e-01   1.1267510e-01   1.0222063e-01   1.1031800e-01   9.9779829e-02   1.0752614e-01   1.0448251e-01   3.8330702e-04   7.6710204e-04   5.4934344e-04   6.1141025e-04   1.8880070e-03   4.3782366e-03   4.2558302e-03   3.3445116e-03   9.0730658e-04   1.6460272e-04   1.9935351e-03   2.2277110e-04   1.5935452e-03   7.2001884e-03   1.0201171e-03   1.9163397e-03   8.7300929e-04   4.6754224e-04   3.6671499e-04   7.4258415e-04   2.1567602e-04   1.3361003e-04   9.1168360e-04   4.3156597e-03   4.1158943e-03   3.8330702e-04   1.9019978e-03   2.1146706e-03   3.8330702e-04   3.1982857e-04   2.1854146e-04   1.6719903e-03   5.9155088e-03   1.3110961e-03   2.0595508e-03   2.2774590e-03   5.2912957e-04   1.6598142e-03   4.0619000e-04   8.5702191e-04   4.6128261e-04   5.7335316e-02   5.9791552e-02   6.7034247e-02   7.7315388e-02   7.0912461e-02   7.6541197e-02   6.6231857e-02   5.3290914e-02   6.4524455e-02   6.9096848e-02   7.2330829e-02   6.2164647e-02   7.0266106e-02   7.5359485e-02   4.7211115e-02   5.4717217e-02   7.6724195e-02   6.0437574e-02   9.0217835e-02   6.3227153e-02   8.1624838e-02   5.5479636e-02   9.1272977e-02   7.5343817e-02   5.8299412e-02   5.7952393e-02   7.1727180e-02   7.7451339e-02   7.2165967e-02   4.5880845e-02   6.4164650e-02   5.9464600e-02   5.6817377e-02   9.8638775e-02   8.0838937e-02   6.3146715e-02   6.3916551e-02   7.9536591e-02   6.0201366e-02   7.1084400e-02   8.0631146e-02   6.9793274e-02   6.2554472e-02   5.4911579e-02   7.0667171e-02   6.0374722e-02   6.4102637e-02   6.0460719e-02   4.0707229e-02   6.3307106e-02   1.2135312e-01   1.0820155e-01   1.0273439e-01   1.0658023e-01   1.1268914e-01   1.1365695e-01   1.1088857e-01   1.0931070e-01   1.1680946e-01   9.8829208e-02   8.3503653e-02   1.0241354e-01   9.6518124e-02   1.1527856e-01   1.1644011e-01   9.6835948e-02   9.6892604e-02   9.7403729e-02   1.3389056e-01   1.0978147e-01   9.8889679e-02   1.0531384e-01   1.1893855e-01   9.0170315e-02   9.6654705e-02   9.4696682e-02   8.5999457e-02   8.5628213e-02   1.1232778e-01   9.1692367e-02   1.0609783e-01   8.4336563e-02   1.1417833e-01   8.8845800e-02   1.1399156e-01   1.0186496e-01   1.0510756e-01   9.6245658e-02   8.4341961e-02   8.9608741e-02   1.0408154e-01   8.5412047e-02   1.0820155e-01   1.0631639e-01   1.0398240e-01   9.3623884e-02   1.0104040e-01   9.1224725e-02   9.9332091e-02   9.5993921e-02   1.1067957e-03   1.4791390e-03   7.1256747e-05   2.0377231e-03   4.3755431e-03   5.9630791e-03   4.4970379e-03   1.5921641e-03   6.4984761e-04   3.3935862e-03   1.2039709e-04   3.0970780e-03   8.3950153e-03   2.1890332e-03   3.1326528e-03   5.0256002e-04   1.6389584e-03   6.4717383e-04   7.1019942e-04   8.9864077e-04   3.8255378e-04   1.2286350e-03   5.5229901e-03   5.1766813e-03   0.0000000e+00   1.5860612e-03   1.6773969e-03   0.0000000e+00   8.4337656e-04   4.6746407e-04   2.4549978e-03   4.7529836e-03   2.3235808e-03   4.0683267e-03   4.3260986e-03   6.7336618e-04   2.8454658e-03   1.0918918e-03   1.3756658e-03   5.7784546e-04   5.9573290e-02   6.3070670e-02   6.9597309e-02   7.9911457e-02   7.3480528e-02   7.9923883e-02   7.0144874e-02   5.5923876e-02   6.6635620e-02   7.3192589e-02   7.4096565e-02   6.5836734e-02   7.1022277e-02   7.8555696e-02   5.0423423e-02   5.7089619e-02   8.1093473e-02   6.2483167e-02   9.2251714e-02   6.5399221e-02   8.6573432e-02   5.7873871e-02   9.3861710e-02   7.7914479e-02   6.0545459e-02   6.0328179e-02   7.3725736e-02   8.0652769e-02   7.5662466e-02   4.7500835e-02   6.6267157e-02   6.1219907e-02   5.9246920e-02   1.0235525e-01   8.5584812e-02   6.7627185e-02   6.6676399e-02   8.1028522e-02   6.3874534e-02   7.4037098e-02   8.3735875e-02   7.3091049e-02   6.4875922e-02   5.7134332e-02   7.3898502e-02   6.3669341e-02   6.7483654e-02   6.3032151e-02   4.3195391e-02   6.6465430e-02   1.2757303e-01   1.1294171e-01   1.0654531e-01   1.1074736e-01   1.1756538e-01   1.1705185e-01   1.1633100e-01   1.1230305e-01   1.1988948e-01   1.0404710e-01   8.7981667e-02   1.0622547e-01   1.0061669e-01   1.2008497e-01   1.2242153e-01   1.0217012e-01   1.0084187e-01   1.0181343e-01   1.3714147e-01   1.1243585e-01   1.0356517e-01   1.1071692e-01   1.2181923e-01   9.3742899e-02   1.0137566e-01   9.8085445e-02   8.9840814e-02   8.9979544e-02   1.1682155e-01   9.4340727e-02   1.0893096e-01   8.8036209e-02   1.1887723e-01   9.1995894e-02   1.1718099e-01   1.0529554e-01   1.1115622e-01   1.0048991e-01   8.8823715e-02   9.3647258e-02   1.0909883e-01   8.9729548e-02   1.1294171e-01   1.1121254e-01   1.0947844e-01   9.8164553e-02   1.0458423e-01   9.5468337e-02   1.0529433e-01   1.0077315e-01   9.3075268e-04   1.0661545e-03   2.6597529e-04   1.6036733e-03   2.0280056e-03   1.2436972e-03   1.5688801e-04   3.1850165e-04   8.9411637e-04   1.3235015e-03   8.8731482e-04   3.4816593e-03   2.6719247e-03   3.9091992e-03   2.6159373e-03   1.1443019e-03   7.9601608e-05   2.3028989e-04   1.2135551e-03   1.4956336e-03   1.2137749e-03   2.2449952e-03   1.5932074e-03   1.1067957e-03   8.0827056e-04   9.5525701e-04   1.1067957e-03   1.2520454e-04   1.8684214e-04   3.3283736e-04   8.4659292e-03   4.3428627e-04   2.6431662e-03   3.3043825e-03   1.2192723e-03   9.6672170e-04   2.2673224e-04   4.0165289e-05   1.5556464e-04   7.0885985e-02   7.3312749e-02   8.1555170e-02   9.2789394e-02   8.5768022e-02   9.1811327e-02   8.0210428e-02   6.6299983e-02   7.8898236e-02   8.3277528e-02   8.7497229e-02   7.5768419e-02   8.5268176e-02   9.0575828e-02   5.9182538e-02   6.7899308e-02   9.1577973e-02   7.4436351e-02   1.0683101e-01   7.7459472e-02   9.6638219e-02   6.8724317e-02   1.0805306e-01   9.0746123e-02   7.1944393e-02   7.1498544e-02   8.6811878e-02   9.2796163e-02   8.6929388e-02   5.8156140e-02   7.8485798e-02   7.3355880e-02   7.0259533e-02   1.1577796e-01   9.5890635e-02   7.6480264e-02   7.8047377e-02   9.5335392e-02   7.3634293e-02   8.5899063e-02   9.6384778e-02   8.4415996e-02   7.6657094e-02   6.8177250e-02   8.5395861e-02   7.3990972e-02   7.8093423e-02   7.4294151e-02   5.1950910e-02   7.7279181e-02   1.3907744e-01   1.2568112e-01   1.2011323e-01   1.2420710e-01   1.3046004e-01   1.3207399e-01   1.2824918e-01   1.2752552e-01   1.3553911e-01   1.1524115e-01   9.8893820e-02   1.1975932e-01   1.1323008e-01   1.3322963e-01   1.3377253e-01   1.1295596e-01   1.1379378e-01   1.1416138e-01   1.5374990e-01   1.2806482e-01   1.1555054e-01   1.2219358e-01   1.3787989e-01   1.0651347e-01   1.1318026e-01   1.1161431e-01   1.0188137e-01   1.0132199e-01   1.3022140e-01   1.0855236e-01   1.2404638e-01   1.0022528e-01   1.3210134e-01   1.0532767e-01   1.3250558e-01   1.1917979e-01   1.2157791e-01   1.1297631e-01   9.9847302e-02   1.0571550e-01   1.2097128e-01   1.0080768e-01   1.2568112e-01   1.2354605e-01   1.2062969e-01   1.0973133e-01   1.1825900e-01   1.0742526e-01   1.1535080e-01   1.1244756e-01   1.9470856e-03   1.8498175e-03   4.7714250e-03   2.8358661e-03   3.0255426e-03   1.1308587e-03   6.7035566e-04   9.3284570e-04   1.3935241e-03   9.8369983e-04   5.6854836e-03   1.9144361e-03   1.0961099e-03   2.6770659e-03   6.7637792e-04   7.3922961e-04   1.6168588e-03   1.9795771e-04   8.8027763e-04   2.3819907e-03   2.3199642e-03   2.7913184e-03   1.4791390e-03   3.2257382e-03   3.5250868e-03   1.4791390e-03   4.9791374e-04   7.0216560e-04   1.6800207e-03   1.0022835e-02   5.5855445e-04   1.9786373e-03   9.4684044e-04   1.9956071e-03   4.5593799e-04   2.5049818e-04   7.2992180e-04   1.1563910e-03   6.0779252e-02   6.2273308e-02   7.0462169e-02   8.1326510e-02   7.4767830e-02   7.8734546e-02   6.8077240e-02   5.6059538e-02   6.8181020e-02   7.1050105e-02   7.6799016e-02   6.4482663e-02   7.5580358e-02   7.7962233e-02   4.9642606e-02   5.8153068e-02   7.8105114e-02   6.3436311e-02   9.5564553e-02   6.6739850e-02   8.2930379e-02   5.9008492e-02   9.5418848e-02   7.8187143e-02   6.1832470e-02   6.1508833e-02   7.5927040e-02   8.0793818e-02   7.4771898e-02   4.9618073e-02   6.7927507e-02   6.3289271e-02   6.0099335e-02   1.0140473e-01   8.1745663e-02   6.4187383e-02   6.7135505e-02   8.4768567e-02   6.1827019e-02   7.4354928e-02   8.3103529e-02   7.2159743e-02   6.6094709e-02   5.8361788e-02   7.3251937e-02   6.2103535e-02   6.6220430e-02   6.3584868e-02   4.3971154e-02   6.5863068e-02   1.2260074e-01   1.1066837e-01   1.0619606e-01   1.0899833e-01   1.1518894e-01   1.1739848e-01   1.1240635e-01   1.1296742e-01   1.2096568e-01   1.0086214e-01   8.5896751e-02   1.0590639e-01   9.9771313e-02   1.1830710e-01   1.1878365e-01   9.8989344e-02   9.9484141e-02   9.9300506e-02   1.3860781e-01   1.1421784e-01   1.0167360e-01   1.0723785e-01   1.2323222e-01   9.3791376e-02   9.8729091e-02   9.7617417e-02   8.9196630e-02   8.7906597e-02   1.1533851e-01   9.5241683e-02   1.1037299e-01   8.6684313e-02   1.1722334e-01   9.1866320e-02   1.1676032e-01   1.0620321e-01   1.0631345e-01   9.8352717e-02   8.6492752e-02   9.2837846e-02   1.0689111e-01   8.8947496e-02   1.1066837e-01   1.0877202e-01   1.0619549e-01   9.7005210e-02   1.0523294e-01   9.4129616e-02   1.0043708e-01   9.7689504e-02   1.8508011e-03   3.7513322e-03   6.0039368e-03   4.2304138e-03   1.5191600e-03   7.2789043e-04   3.6236504e-03   2.5132214e-04   3.2740913e-03   8.1034702e-03   2.4941139e-03   4.0964229e-03   6.0206143e-04   1.9190323e-03   6.6472571e-04   5.1664338e-04   1.3616103e-03   7.0613265e-04   1.0312088e-03   5.8211090e-03   5.1401914e-03   7.1256747e-05   1.1045080e-03   1.1556192e-03   7.1256747e-05   9.3818356e-04   5.1597856e-04   2.2957469e-03   4.3308939e-03   2.5276111e-03   4.3800580e-03   5.1684770e-03   5.8668191e-04   3.2395561e-03   1.2942225e-03   1.4104695e-03   4.9075437e-04   6.2060492e-02   6.5820549e-02   7.2324527e-02   8.2688153e-02   7.6151035e-02   8.3216525e-02   7.3194172e-02   5.8477367e-02   6.9270609e-02   7.6249005e-02   7.6681852e-02   6.8643243e-02   7.3331578e-02   8.1706941e-02   5.2792718e-02   5.9477668e-02   8.4501232e-02   6.5244243e-02   9.4903309e-02   6.8042766e-02   9.0018791e-02   6.0245936e-02   9.6930604e-02   8.1064051e-02   6.3026851e-02   6.2769846e-02   7.6368221e-02   8.3589775e-02   7.8680956e-02   4.9590571e-02   6.8853459e-02   6.3691512e-02   6.1750809e-02   1.0592213e-01   8.9183264e-02   7.0750492e-02   6.9363027e-02   8.3535040e-02   6.6868103e-02   7.6873580e-02   8.7073732e-02   7.6162997e-02   6.7469442e-02   5.9546534e-02   7.6928171e-02   6.6694450e-02   7.0471384e-02   6.5682900e-02   4.5133798e-02   6.9312951e-02   1.3167296e-01   1.1664541e-01   1.0993682e-01   1.1453374e-01   1.2132631e-01   1.2063781e-01   1.2028602e-01   1.1588513e-01   1.2343288e-01   1.0759677e-01   9.1184353e-02   1.0959799e-01   1.0389171e-01   1.2371908e-01   1.2606621e-01   1.0559841e-01   1.0439417e-01   1.0553794e-01   1.4077951e-01   1.1579171e-01   1.0695513e-01   1.1441431e-01   1.2538100e-01   9.6825962e-02   1.0496558e-01   1.0155971e-01   9.2933277e-02   9.3305204e-02   1.2046243e-01   9.7626604e-02   1.1224568e-01   9.1421248e-02   1.2250398e-01   9.5336276e-02   1.2112681e-01   1.0839632e-01   1.1495562e-01   1.0414545e-01   9.2136906e-02   9.6777242e-02   1.1252211e-01   9.2559606e-02   1.1664541e-01   1.1484781e-01   1.1301462e-01   1.0121871e-01   1.0770332e-01   9.8720694e-02   1.0901533e-01   1.0446815e-01   7.8277898e-04   1.7490530e-03   1.0345024e-03   5.6185312e-04   1.1591486e-03   1.1405764e-03   2.5549089e-03   1.4484284e-03   2.2580494e-03   4.5713265e-03   5.6870335e-03   4.1902203e-03   2.4320876e-03   6.0369458e-04   6.2286369e-04   2.4521502e-03   2.9038905e-03   2.2436415e-03   1.7675525e-03   9.5896000e-04   2.0377231e-03   8.9090360e-04   9.7827632e-04   2.0377231e-03   7.4516940e-04   8.4824201e-04   4.3724648e-04   1.0582513e-02   7.1366344e-04   4.1221085e-03   4.7945036e-03   2.3833891e-03   1.3170043e-03   8.5049004e-04   2.9093352e-04   6.7142903e-04   7.9558936e-02   8.2158081e-02   9.0820201e-02   1.0264403e-01   9.5277746e-02   1.0150997e-01   8.9387659e-02   7.4718776e-02   8.7974881e-02   9.2637642e-02   9.6993873e-02   8.4761019e-02   9.4485044e-02   1.0025701e-01   6.7224195e-02   7.6433848e-02   1.0126400e-01   8.3185627e-02   1.1729605e-01   8.6461029e-02   1.0658954e-01   7.7314215e-02   1.1857784e-01   1.0034666e-01   8.0683042e-02   8.0237049e-02   9.6300382e-02   1.0267579e-01   9.6489769e-02   6.6032956e-02   8.7552344e-02   8.2099586e-02   7.8915667e-02   1.2662250e-01   1.0571935e-01   8.5387558e-02   8.7148938e-02   1.0518820e-01   8.2417845e-02   9.5416475e-02   1.0627769e-01   9.3794810e-02   8.5650752e-02   7.6704239e-02   9.4843643e-02   8.2753514e-02   8.7142105e-02   8.3165683e-02   5.9528971e-02   8.6320416e-02   1.5080041e-01   1.3699340e-01   1.3123407e-01   1.3538631e-01   1.4196949e-01   1.4361623e-01   1.3957348e-01   1.3881335e-01   1.4720194e-01   1.2611020e-01   1.0906565e-01   1.3086970e-01   1.2408176e-01   1.4489915e-01   1.4541581e-01   1.2374159e-01   1.2456922e-01   1.2489938e-01   1.6613145e-01   1.3940980e-01   1.2649129e-01   1.3333884e-01   1.4960338e-01   1.1706680e-01   1.2394226e-01   1.2226369e-01   1.1222236e-01   1.1158045e-01   1.4175176e-01   1.1903032e-01   1.3525827e-01   1.1036833e-01   1.4372294e-01   1.1569638e-01   1.4384978e-01   1.3029466e-01   1.3261278e-01   1.2368173e-01   1.1003418e-01   1.1624174e-01   1.3214763e-01   1.1113124e-01   1.3699340e-01   1.3478604e-01   1.3174332e-01   1.2045775e-01   1.2933910e-01   1.1801062e-01   1.2611372e-01   1.2312584e-01   2.4038804e-03   9.9356679e-04   1.6379146e-03   2.9968879e-03   2.7777132e-03   5.0292552e-03   2.9485139e-03   1.7810659e-03   7.1187929e-03   1.0385224e-02   6.8192179e-03   4.7007047e-03   2.2536066e-03   1.6978265e-03   5.5995030e-03   5.8830752e-03   3.3086218e-03   3.5101479e-03   1.6089784e-03   4.3755431e-03   1.0574938e-03   1.0592785e-03   4.3755431e-03   2.5472554e-03   2.6641717e-03   1.0704333e-03   1.2070572e-02   2.4953874e-03   6.0785955e-03   8.5181088e-03   3.9627930e-03   3.6619699e-03   2.9233174e-03   1.7757672e-03   2.0595965e-03   9.0904959e-02   9.3846404e-02   1.0296173e-01   1.1510418e-01   1.0729700e-01   1.1521099e-01   1.0181822e-01   8.5988795e-02   1.0000742e-01   1.0504161e-01   1.0917671e-01   9.6459196e-02   1.0622808e-01   1.1358449e-01   7.7434068e-02   8.7329703e-02   1.1478646e-01   9.5582611e-02   1.2982843e-01   9.8465917e-02   1.1998512e-01   8.8160180e-02   1.3221652e-01   1.1399918e-01   9.2024225e-02   9.1371105e-02   1.0854159e-01   1.1533865e-01   1.0916600e-01   7.6158175e-02   9.9423258e-02   9.3687045e-02   9.0204262e-02   1.4138542e-01   1.1970815e-01   9.7663366e-02   9.8994798e-02   1.1736337e-01   9.4681982e-02   1.0777767e-01   1.2034441e-01   1.0671033e-01   9.7400706e-02   8.7763928e-02   1.0767516e-01   9.5332473e-02   9.9630913e-02   9.4930517e-02   6.8563663e-02   9.8462875e-02   1.6617515e-01   1.5176730e-01   1.4543395e-01   1.5072522e-01   1.5691405e-01   1.5882414e-01   1.5479314e-01   1.5416358e-01   1.6247914e-01   1.3996762e-01   1.2197918e-01   1.4500225e-01   1.3765005e-01   1.5950314e-01   1.5937612e-01   1.3710080e-01   1.3913130e-01   1.3976637e-01   1.8183098e-01   1.5420823e-01   1.4014252e-01   1.4766552e-01   1.6507182e-01   1.3020370e-01   1.3818766e-01   1.3684571e-01   1.2517707e-01   1.2500429e-01   1.5651711e-01   1.3335007e-01   1.4978001e-01   1.2432862e-01   1.5834992e-01   1.2988375e-01   1.6032290e-01   1.4373178e-01   1.4688056e-01   1.3840222e-01   1.2332107e-01   1.2926358e-01   1.4578293e-01   1.2292671e-01   1.5176730e-01   1.4922075e-01   1.4546636e-01   1.3299550e-01   1.4276363e-01   1.3134387e-01   1.4008961e-01   1.3766630e-01   5.7728358e-04   1.6620505e-03   3.0662753e-03   5.1693417e-04   6.0968463e-03   8.4744633e-04   8.7364721e-04   5.8106642e-03   6.7399476e-03   8.6083103e-03   3.1702748e-03   2.7104978e-03   3.3164143e-03   4.2509190e-03   5.8084215e-03   4.6709776e-03   9.8526568e-04   3.6786909e-04   5.9630791e-03   3.9566796e-03   4.2657824e-03   5.9630791e-03   2.3876668e-03   3.1383576e-03   1.0693622e-03   1.7187016e-02   9.8571152e-04   3.1367899e-03   3.7448988e-03   5.3108404e-03   1.2637202e-03   2.1359423e-03   1.6418787e-03   3.1352541e-03   8.3834616e-02   8.4243676e-02   9.4832859e-02   1.0703873e-01   9.9466934e-02   1.0403376e-01   9.0316788e-02   7.7914871e-02   9.2850027e-02   9.3299365e-02   1.0296391e-01   8.6082003e-02   1.0248600e-01   1.0318273e-01   6.8824902e-02   8.0299100e-02   1.0157055e-01   8.7955506e-02   1.2341155e-01   9.1142100e-02   1.0580332e-01   8.1173197e-02   1.2350931e-01   1.0460831e-01   8.4991511e-02   8.4254332e-02   1.0174542e-01   1.0573929e-01   9.8648070e-02   7.1061471e-02   9.2438015e-02   8.7508516e-02   8.2750270e-02   1.2928539e-01   1.0529438e-01   8.4835976e-02   9.0594121e-02   1.1204138e-01   8.3577196e-02   9.8754999e-02   1.0957368e-01   9.6258171e-02   8.9990776e-02   8.0898906e-02   9.7508458e-02   8.4744426e-02   8.9161769e-02   8.6853262e-02   6.2377571e-02   8.8830405e-02   1.4853162e-01   1.3772455e-01   1.3389458e-01   1.3729951e-01   1.4254710e-01   1.4753124e-01   1.3874272e-01   1.4343376e-01   1.5191156e-01   1.2542851e-01   1.0950060e-01   1.3351960e-01   1.2589032e-01   1.4575308e-01   1.4361388e-01   1.2273259e-01   1.2665814e-01   1.2593062e-01   1.7100462e-01   1.4482556e-01   1.2707776e-01   1.3245591e-01   1.5480441e-01   1.1982038e-01   1.2429326e-01   1.2550864e-01   1.1421155e-01   1.1236837e-01   1.4320023e-01   1.2379599e-01   1.4017288e-01   1.1252737e-01   1.4478118e-01   1.1925773e-01   1.4807486e-01   1.3379535e-01   1.3019772e-01   1.2505772e-01   1.1047444e-01   1.1793234e-01   1.3214851e-01   1.1203286e-01   1.3772455e-01   1.3511133e-01   1.3062456e-01   1.2117415e-01   1.3256036e-01   1.1928977e-01   1.2368263e-01   1.2328316e-01   7.8697050e-04   2.0732289e-03   9.4315564e-04   4.6001401e-03   8.4240364e-04   1.3015708e-03   4.6297460e-03   7.5292997e-03   6.5572401e-03   2.5566943e-03   1.7941741e-03   1.8226799e-03   3.9920133e-03   4.8070278e-03   2.6490734e-03   2.2737423e-03   8.3198965e-04   4.4970379e-03   1.8707122e-03   2.0801746e-03   4.4970379e-03   1.6864362e-03   2.1518496e-03   3.0908897e-04   1.2802000e-02   1.1444166e-03   2.7605116e-03   4.8428825e-03   3.3840997e-03   1.9469936e-03   1.7958172e-03   1.1565833e-03   1.8697862e-03   8.2127567e-02   8.3518119e-02   9.3314949e-02   1.0506408e-01   9.7541782e-02   1.0397584e-01   9.0341739e-02   7.6817337e-02   9.1083710e-02   9.3231274e-02   1.0048418e-01   8.5524503e-02   9.9112893e-02   1.0267345e-01   6.7846626e-02   7.8515797e-02   1.0225403e-01   8.6849217e-02   1.2022955e-01   8.9502113e-02   1.0655817e-01   7.9297559e-02   1.2165144e-01   1.0391861e-01   8.3203942e-02   8.2410584e-02   9.9528824e-02   1.0443382e-01   9.8019721e-02   6.8818975e-02   9.0543610e-02   8.5485551e-02   8.1186134e-02   1.2894588e-01   1.0651601e-01   8.5580315e-02   8.9213768e-02   1.0886353e-01   8.3754339e-02   9.7441118e-02   1.0932583e-01   9.5882843e-02   8.8282134e-02   7.9128322e-02   9.6917266e-02   8.4873719e-02   8.8927957e-02   8.5532654e-02   6.0384203e-02   8.8123284e-02   1.4980593e-01   1.3770759e-01   1.3282368e-01   1.3741111e-01   1.4254251e-01   1.4639384e-01   1.3970199e-01   1.4238069e-01   1.5040197e-01   1.2563911e-01   1.0916002e-01   1.3240724e-01   1.2489275e-01   1.4520334e-01   1.4360899e-01   1.2274102e-01   1.2644577e-01   1.2642535e-01   1.6908994e-01   1.4294672e-01   1.2654652e-01   1.3286867e-01   1.5320101e-01   1.1837980e-01   1.2451917e-01   1.2497748e-01   1.1312705e-01   1.1222677e-01   1.4268257e-01   1.2261254e-01   1.3839073e-01   1.1239899e-01   1.4421566e-01   1.1854547e-01   1.4805458e-01   1.3176737e-01   1.3127552e-01   1.2532607e-01   1.1042618e-01   1.1684289e-01   1.3160924e-01   1.1043724e-01   1.3770759e-01   1.3504379e-01   1.3066174e-01   1.1987723e-01   1.3066578e-01   1.1856367e-01   1.2478710e-01   1.2391087e-01   3.0983409e-04   7.5828890e-04   1.5917755e-03   4.8958816e-04   3.3744944e-03   2.1101097e-03   4.2400870e-03   2.8698866e-03   7.9583168e-04   2.4610899e-04   3.6436132e-04   1.4311801e-03   1.7294514e-03   8.6738167e-04   2.6111809e-03   1.6704106e-03   1.5921641e-03   8.6161593e-04   1.0547029e-03   1.5921641e-03   2.0427868e-04   3.5845705e-04   1.2194863e-04   8.2981219e-03   4.9180195e-04   1.7380522e-03   3.0734607e-03   1.0728608e-03   1.1397310e-03   3.4603128e-04   1.9200118e-04   2.8845040e-04   6.9779438e-02   7.1836392e-02   8.0319868e-02   9.1354890e-02   8.4353236e-02   9.0635736e-02   7.8599311e-02   6.5140986e-02   7.7899100e-02   8.1486701e-02   8.6472565e-02   7.4062042e-02   8.4619349e-02   8.9336326e-02   5.7575298e-02   6.6635212e-02   8.9946961e-02   7.3779929e-02   1.0532587e-01   7.6464940e-02   9.4587006e-02   6.7402630e-02   1.0673169e-01   8.9925800e-02   7.0797999e-02   7.0219100e-02   8.5721997e-02   9.1187854e-02   8.5377890e-02   5.7235173e-02   7.7431768e-02   7.2498793e-02   6.9063158e-02   1.1428266e-01   9.4218471e-02   7.4725647e-02   7.6703845e-02   9.4228329e-02   7.2261001e-02   8.4462132e-02   9.5360997e-02   8.3133678e-02   7.5506963e-02   6.7041127e-02   8.4070372e-02   7.2903149e-02   7.6784460e-02   7.3115143e-02   5.0413571e-02   7.5926129e-02   1.3636539e-01   1.2353657e-01   1.1821263e-01   1.2258303e-01   1.2822457e-01   1.3051322e-01   1.2599517e-01   1.2631512e-01   1.3406707e-01   1.1278003e-01   9.6722933e-02   1.1783731e-01   1.1110860e-01   1.3080317e-01   1.3063996e-01   1.1029647e-01   1.1216341e-01   1.1248813e-01   1.5199693e-01   1.2674108e-01   1.1318561e-01   1.1969790e-01   1.3653036e-01   1.0458853e-01   1.1112582e-01   1.1028100e-01   9.9890110e-02   9.9356661e-02   1.2805612e-01   1.0750038e-01   1.2261289e-01   9.8791075e-02   1.2975191e-01   1.0408196e-01   1.3162969e-01   1.1713290e-01   1.1886116e-01   1.1132823e-01   9.7814075e-02   1.0357451e-01   1.1833994e-01   9.8179977e-02   1.2353657e-01   1.2124412e-01   1.1787602e-01   1.0709455e-01   1.1618054e-01   1.0529428e-01   1.1269702e-01   1.1053049e-01   1.3650135e-03   5.3926014e-04   9.6875216e-04   5.5085642e-03   1.1951469e-03   2.8096772e-03   1.4033998e-03   3.8702395e-04   1.0970323e-04   3.3218009e-04   5.6326785e-04   5.8024795e-04   5.6723773e-04   3.5935032e-03   3.0059920e-03   6.4984761e-04   1.1677062e-03   1.3673782e-03   6.4984761e-04   7.6378345e-05   6.3488092e-05   8.1586688e-04   6.3954323e-03   8.4458294e-04   1.6959745e-03   2.5316364e-03   4.4648839e-04   1.3649198e-03   2.1646092e-04   4.0910219e-04   1.5323026e-04   6.2114649e-02   6.4461203e-02   7.2168083e-02   8.2712554e-02   7.6067484e-02   8.2127836e-02   7.1085856e-02   5.7869953e-02   6.9689694e-02   7.3941980e-02   7.7755077e-02   6.6772898e-02   7.5730146e-02   8.0858032e-02   5.1159438e-02   5.9263906e-02   8.1982206e-02   6.5662191e-02   9.5957099e-02   6.8346124e-02   8.6755825e-02   6.0017643e-02   9.7272303e-02   8.1103209e-02   6.3092478e-02   6.2637310e-02   7.7107544e-02   8.2765719e-02   7.7320565e-02   5.0179546e-02   6.9272103e-02   6.4477670e-02   6.1520691e-02   1.0482403e-01   8.6201836e-02   6.7723532e-02   6.8849001e-02   8.5128110e-02   6.4954612e-02   7.6260237e-02   8.6474261e-02   7.5037042e-02   6.7540885e-02   5.9554295e-02   7.5917230e-02   6.5334858e-02   6.9084098e-02   6.5352935e-02   4.4308417e-02   6.8222624e-02   1.2733314e-01   1.1424612e-01   1.0876932e-01   1.1294180e-01   1.1881213e-01   1.2027633e-01   1.1690957e-01   1.1601884e-01   1.2357066e-01   1.0430113e-01   8.8647048e-02   1.0842142e-01   1.0217893e-01   1.2134270e-01   1.2195833e-01   1.0207761e-01   1.0292571e-01   1.0341320e-01   1.4095409e-01   1.1639921e-01   1.0445121e-01   1.1097871e-01   1.2583780e-01   9.5736690e-02   1.0236688e-01   1.0085185e-01   9.1372067e-02   9.1009848e-02   1.1849408e-01   9.7905297e-02   1.1253291e-01   9.0050809e-02   1.2026601e-01   9.4848068e-02   1.2106318e-01   1.0772883e-01   1.1055160e-01   1.0223795e-01   8.9621067e-02   9.5003079e-02   1.0961118e-01   9.0242843e-02   1.1424612e-01   1.1217935e-01   1.0939970e-01   9.8767943e-02   1.0686010e-01   9.6691216e-02   1.0462140e-01   1.0177309e-01   3.4212913e-03   2.0350185e-04   2.2645835e-03   3.4346676e-03   3.6178579e-03   5.4115677e-03   1.4013939e-03   1.2010586e-03   1.9342083e-03   1.8303919e-03   3.0241355e-03   3.0182648e-03   8.7783530e-04   7.3200159e-04   3.3935862e-03   3.0016113e-03   3.3110105e-03   3.3935862e-03   9.0468402e-04   1.4291912e-03   6.5529771e-04   1.3482371e-02   1.1883350e-04   1.9129351e-03   1.8189821e-03   3.2224845e-03   2.2840556e-04   6.5009173e-04   5.8224397e-04   1.6275063e-03   7.3184911e-02   7.4026716e-02   8.3609924e-02   9.5240271e-02   8.8102740e-02   9.2390529e-02   7.9966102e-02   6.7767341e-02   8.1513618e-02   8.2938634e-02   9.0997666e-02   7.6011769e-02   9.0234918e-02   9.1578032e-02   5.9804069e-02   7.0034281e-02   9.0682677e-02   7.6686753e-02   1.1071727e-01   7.9918092e-02   9.5151478e-02   7.0899518e-02   1.1069472e-01   9.2509702e-02   7.4297007e-02   7.3732579e-02   8.9920224e-02   9.4250688e-02   8.7608739e-02   6.1121885e-02   8.1169501e-02   7.6375843e-02   7.2267735e-02   1.1652820e-01   9.4360734e-02   7.5150369e-02   7.9755441e-02   9.9609504e-02   7.3443983e-02   8.7507436e-02   9.7438140e-02   8.5130511e-02   7.8978434e-02   7.0471516e-02   8.6314807e-02   7.4241923e-02   7.8526940e-02   7.6102189e-02   5.3711374e-02   7.8190521e-02   1.3653802e-01   1.2528995e-01   1.2121146e-01   1.2435019e-01   1.2997976e-01   1.3382130e-01   1.2659694e-01   1.2959353e-01   1.3786369e-01   1.1404399e-01   9.8548323e-02   1.2087284e-01   1.1387348e-01   1.3314978e-01   1.3209550e-01   1.1169631e-01   1.1419600e-01   1.1368838e-01   1.5633317e-01   1.3093427e-01   1.1535002e-01   1.2078800e-01   1.4049523e-01   1.0785716e-01   1.1249675e-01   1.1275652e-01   1.0267406e-01   1.0105332e-01   1.3042827e-01   1.1078233e-01   1.2662105e-01   1.0064162e-01   1.3213313e-01   1.0672611e-01   1.3386868e-01   1.2116827e-01   1.1908244e-01   1.1278797e-01   9.9360911e-02   1.0635497e-01   1.2047378e-01   1.0130606e-01   1.2528995e-01   1.2297832e-01   1.1929072e-01   1.0997770e-01   1.2004306e-01   1.0767864e-01   1.1284273e-01   1.1147304e-01   2.8709378e-03   9.0791333e-03   1.3407674e-03   2.7138923e-03   2.4677711e-04   1.1444972e-03   7.6121265e-04   8.8810957e-04   7.0009018e-04   1.4757411e-04   9.0393445e-04   6.0883361e-03   5.6961877e-03   1.2039709e-04   1.8816976e-03   2.0265121e-03   1.2039709e-04   8.6239061e-04   5.2686780e-04   2.5538294e-03   4.1367540e-03   2.4461852e-03   3.1968429e-03   3.7345867e-03   3.8813913e-04   2.9749715e-03   1.1153685e-03   1.5845430e-03   6.9221070e-04   5.5493017e-02   5.8687008e-02   6.5182775e-02   7.5153394e-02   6.8882837e-02   7.5289957e-02   6.5516107e-02   5.1904720e-02   6.2434251e-02   6.8395322e-02   6.9725101e-02   6.1264247e-02   6.7000659e-02   7.3915045e-02   4.6337454e-02   5.2993228e-02   7.6203506e-02   5.8574285e-02   8.7235787e-02   6.1228310e-02   8.1349394e-02   5.3727422e-02   8.8860415e-02   7.3529860e-02   5.6419133e-02   5.6136434e-02   6.9314277e-02   7.5768587e-02   7.0921131e-02   4.3887190e-02   6.2047408e-02   5.7247874e-02   5.5122668e-02   9.7063471e-02   8.0589157e-02   6.3015465e-02   6.2273448e-02   7.6485434e-02   5.9534434e-02   6.9400424e-02   7.9100961e-02   6.8556756e-02   6.0632063e-02   5.3105900e-02   6.9320431e-02   5.9485747e-02   6.3075829e-02   5.8812039e-02   3.9389619e-02   6.2057390e-02   1.2120786e-01   1.0709958e-01   1.0095433e-01   1.0522389e-01   1.1158919e-01   1.1144992e-01   1.1038844e-01   1.0698853e-01   1.1429410e-01   9.8230586e-02   8.2643326e-02   1.0062987e-01   9.5029365e-02   1.1396256e-01   1.1592889e-01   9.6297509e-02   9.5506831e-02   9.6444702e-02   1.3110088e-01   1.0707131e-01   9.7795782e-02   1.0475251e-01   1.1626300e-01   8.8405248e-02   9.5813058e-02   9.2970070e-02   8.4549021e-02   8.4699662e-02   1.1089247e-01   8.9470273e-02   1.0356569e-01   8.3077190e-02   1.1281589e-01   8.7056416e-02   1.1197016e-01   9.9670877e-02   1.0510107e-01   9.5157512e-02   8.3537128e-02   8.8197538e-02   1.0308982e-01   8.4141549e-02   1.0709958e-01   1.0532428e-01   1.0341164e-01   9.2382015e-02   9.8953568e-02   8.9983057e-02   9.9390435e-02   9.5301345e-02   3.0771633e-03   2.2394953e-03   3.5785600e-03   4.5455517e-03   7.4728021e-04   1.0553655e-03   1.6471751e-03   1.6255623e-03   2.5337657e-03   2.0402330e-03   1.9207121e-03   1.4230439e-03   3.0970780e-03   2.6131629e-03   2.9423829e-03   3.0970780e-03   7.3620931e-04   1.2096052e-03   5.1210113e-04   1.1445573e-02   3.3743120e-04   9.3762253e-04   1.6709589e-03   2.3328362e-03   6.4367426e-04   6.1292579e-04   6.6980522e-04   1.3596425e-03   6.8771058e-02   6.9631829e-02   7.8939123e-02   9.0076585e-02   8.3106206e-02   8.8021023e-02   7.5587788e-02   6.3613086e-02   7.6977857e-02   7.8355410e-02   8.6058485e-02   7.1477483e-02   8.5350309e-02   8.7042060e-02   5.5585563e-02   6.5564323e-02   8.6285657e-02   7.2679829e-02   1.0489589e-01   7.5454083e-02   9.0429367e-02   6.6346194e-02   1.0535674e-01   8.8189227e-02   6.9809235e-02   6.9152322e-02   8.5027647e-02   8.9182145e-02   8.2908995e-02   5.6974544e-02   7.6567724e-02   7.1978204e-02   6.7853368e-02   1.1142320e-01   9.0045691e-02   7.1019587e-02   7.5131918e-02   9.4265212e-02   6.9407495e-02   8.2681448e-02   9.3017444e-02   8.0734547e-02   7.4408554e-02   6.6081064e-02   8.1800498e-02   7.0361312e-02   7.4293705e-02   7.1683996e-02   4.9412056e-02   7.3791248e-02   1.3087381e-01   1.1972349e-01   1.1554097e-01   1.1917243e-01   1.2428472e-01   1.2815053e-01   1.2126029e-01   1.2425112e-01   1.3208038e-01   1.0854852e-01   9.3334993e-02   1.1518188e-01   1.0821021e-01   1.2711496e-01   1.2583624e-01   1.0606084e-01   1.0908141e-01   1.0877992e-01   1.4997249e-01   1.2525272e-01   1.0965413e-01   1.1522136e-01   1.3472784e-01   1.0229252e-01   1.0728052e-01   1.0776484e-01   9.7267064e-02   9.5981774e-02   1.2460839e-01   1.0582154e-01   1.2096134e-01   9.5922459e-02   1.2615683e-01   1.0184377e-01   1.2900688e-01   1.1512576e-01   1.1363761e-01   1.0783825e-01   9.4308496e-02   1.0078579e-01   1.1452781e-01   9.5389301e-02   1.1972349e-01   1.1733679e-01   1.1347632e-01   1.0398208e-01   1.1403752e-01   1.0220157e-01   1.0755302e-01   1.0649118e-01   1.0147620e-02   1.1247381e-02   1.2064168e-02   6.5374061e-03   4.5897505e-03   4.8228518e-03   7.5563629e-03   9.2308338e-03   7.2321889e-03   1.6011806e-03   5.3396772e-04   8.3950153e-03   4.7739373e-03   4.9472694e-03   8.3950153e-03   4.4998786e-03   5.2439437e-03   2.2809748e-03   2.1071898e-02   2.7056396e-03   6.9610435e-03   7.8780829e-03   8.1126375e-03   3.2257880e-03   4.3393216e-03   3.1425928e-03   4.9174647e-03   1.0007131e-01   1.0106593e-01   1.1217080e-01   1.2532148e-01   1.1715442e-01   1.2248393e-01   1.0792073e-01   9.3860583e-02   1.0976919e-01   1.1122163e-01   1.2044721e-01   1.0322968e-01   1.1921347e-01   1.2149913e-01   8.4186074e-02   9.6291408e-02   1.2023049e-01   1.0442285e-01   1.4247402e-01   1.0795998e-01   1.2499239e-01   9.7246530e-02   1.4298267e-01   1.2269113e-01   1.0132833e-01   1.0059203e-01   1.1928692e-01   1.2425009e-01   1.1675840e-01   8.5744895e-02   1.0931412e-01   1.0376332e-01   9.9002718e-02   1.4971116e-01   1.2434000e-01   1.0215677e-01   1.0769100e-01   1.2998610e-01   1.0050022e-01   1.1660891e-01   1.2830083e-01   1.1408155e-01   1.0679553e-01   9.6866698e-02   1.1540492e-01   1.0158690e-01   1.0644304e-01   1.0354221e-01   7.6665035e-02   1.0598765e-01   1.7102185e-01   1.5912057e-01   1.5467066e-01   1.5843570e-01   1.6430112e-01   1.6898961e-01   1.6040927e-01   1.6442765e-01   1.7347908e-01   1.4613872e-01   1.2882261e-01   1.5426961e-01   1.4623773e-01   1.6767510e-01   1.6569930e-01   1.4326884e-01   1.4700928e-01   1.4639344e-01   1.9375995e-01   1.6572948e-01   1.4772209e-01   1.5370280e-01   1.7644069e-01   1.3951457e-01   1.4477511e-01   1.4552738e-01   1.3362833e-01   1.3186831e-01   1.6485780e-01   1.4332228e-01   1.6088049e-01   1.3176893e-01   1.6660706e-01   1.3873049e-01   1.6938110e-01   1.5434218e-01   1.5143663e-01   1.4540553e-01   1.2987912e-01   1.3768917e-01   1.5323248e-01   1.3136310e-01   1.5912057e-01   1.5638588e-01   1.5175254e-01   1.4128505e-01   1.5308268e-01   1.3923812e-01   1.4444485e-01   1.4370095e-01   2.6770864e-03   1.6033718e-03   4.5840441e-04   2.0276877e-03   2.4561050e-03   1.2787767e-03   1.0310968e-03   1.1138996e-03   7.3996134e-03   6.8122401e-03   2.1890332e-03   3.7576667e-03   4.1081994e-03   2.1890332e-03   1.7366871e-03   1.7496571e-03   3.0804223e-03   5.2624414e-03   3.0566387e-03   8.7139687e-04   2.3320271e-03   9.5854356e-04   3.5582665e-03   1.9033529e-03   2.7781156e-03   2.0582238e-03   4.8466175e-02   5.0119607e-02   5.7333122e-02   6.6635612e-02   6.0625611e-02   6.6546613e-02   5.6002103e-02   4.4610205e-02   5.5428894e-02   5.8355341e-02   6.2731565e-02   5.1936578e-02   6.1589720e-02   6.5254921e-02   3.8121898e-02   4.5707178e-02   6.5954438e-02   5.2310619e-02   7.8737277e-02   5.4221486e-02   6.9833666e-02   4.6306978e-02   8.0104533e-02   6.6061691e-02   4.9289042e-02   4.8703396e-02   6.2029842e-02   6.6459215e-02   6.1626890e-02   3.8139914e-02   5.4977441e-02   5.0955269e-02   4.7810568e-02   8.6879401e-02   6.9864176e-02   5.2960539e-02   5.4195866e-02   6.9397405e-02   5.0805294e-02   6.0771100e-02   7.0699543e-02   5.9928918e-02   5.3277849e-02   4.6137567e-02   6.0647902e-02   5.1507184e-02   5.4535520e-02   5.1271651e-02   3.2182295e-02   5.3653922e-02   1.0655841e-01   9.4809862e-02   8.9943103e-02   9.4257768e-02   9.8930460e-02   1.0102907e-01   9.7318001e-02   9.7573045e-02   1.0420091e-01   8.5339961e-02   7.1237738e-02   8.9593643e-02   8.3615246e-02   1.0099568e-01   1.0093948e-01   8.3072610e-02   8.4968711e-02   8.5464008e-02   1.2001752e-01   9.7755169e-02   8.5475001e-02   9.1482164e-02   1.0648093e-01   7.7912530e-02   8.4002788e-02   8.3421922e-02   7.3855173e-02   7.3645673e-02   9.8656580e-02   8.1073912e-02   9.4018658e-02   7.3420060e-02   1.0008227e-01   7.8012093e-02   1.0282145e-01   8.8787179e-02   9.1014600e-02   8.4377747e-02   7.2315249e-02   7.7005472e-02   8.9946277e-02   7.2132565e-02   9.4809862e-02   9.2717682e-02   8.9711332e-02   7.9921256e-02   8.7947099e-02   7.8589830e-02   8.5634568e-02   8.3685774e-02   3.6322977e-03   2.1046334e-03   3.2662345e-03   4.7576391e-03   8.8436254e-04   1.6169936e-03   5.0909036e-03   5.3937261e-03   6.9592372e-03   3.1326528e-03   7.3963134e-03   7.8155750e-03   3.1326528e-03   2.8162695e-03   2.9888005e-03   5.3841195e-03   1.1640832e-02   3.1058623e-03   3.5268449e-03   8.9518404e-04   4.1579453e-03   2.4418843e-03   2.3174283e-03   3.5804624e-03   3.9289766e-03   4.8779386e-02   4.9807262e-02   5.7295117e-02   6.7429274e-02   6.1528729e-02   6.3805672e-02   5.4672352e-02   4.4305273e-02   5.5263563e-02   5.7527840e-02   6.3439356e-02   5.1886311e-02   6.2844358e-02   6.3392021e-02   3.9105833e-02   4.6659036e-02   6.3315061e-02   5.0431450e-02   8.1151197e-02   5.3901750e-02   6.8036856e-02   4.7518043e-02   7.9948453e-02   6.3397617e-02   4.9788208e-02   4.9648688e-02   6.2516012e-02   6.6678586e-02   6.0874148e-02   3.9322135e-02   5.5164136e-02   5.1029588e-02   4.8159012e-02   8.4641068e-02   6.6410487e-02   5.1100232e-02   5.4352465e-02   7.1152366e-02   4.8870866e-02   6.0790663e-02   6.7705030e-02   5.8185400e-02   5.3489810e-02   4.6729049e-02   5.9304859e-02   4.8888831e-02   5.2875044e-02   5.1050585e-02   3.4854587e-02   5.2844524e-02   1.0451993e-01   9.3506986e-02   8.9732041e-02   9.1442484e-02   9.7717353e-02   9.9707944e-02   9.4802947e-02   9.5351404e-02   1.0312713e-01   8.4850971e-02   7.1294232e-02   8.9511382e-02   8.4075688e-02   1.0102482e-01   1.0205801e-01   8.3487981e-02   8.2948462e-02   8.2499658e-02   1.1982484e-01   9.7067919e-02   8.5815629e-02   9.0584285e-02   1.0517734e-01   7.8728615e-02   8.2465362e-02   8.1175179e-02   7.4451161e-02   7.2780961e-02   9.8027119e-02   7.9186352e-02   9.3572539e-02   7.1178913e-02   9.9960717e-02   7.6001190e-02   9.8069469e-02   9.0444802e-02   8.9768862e-02   8.1715688e-02   7.1540326e-02   7.7885383e-02   9.0855680e-02   7.5245252e-02   9.3506986e-02   9.1968167e-02   9.0109483e-02   8.2320804e-02   8.9509517e-02   7.8843854e-02   8.4366358e-02   8.1217849e-02   2.0130888e-03   1.8101212e-03   1.7679630e-03   1.5452657e-03   5.1945438e-04   1.2854163e-03   8.7808919e-03   8.2238558e-03   5.0256002e-04   2.7264330e-03   2.8467434e-03   5.0256002e-04   1.9812707e-03   1.4461308e-03   4.1136119e-03   2.5737383e-03   4.2121435e-03   4.2634118e-03   5.2197085e-03   6.4299253e-04   4.8925560e-03   2.3856210e-03   3.0302048e-03   1.5954763e-03   5.0938593e-02   5.4618269e-02   6.0341206e-02   6.9748949e-02   6.3744319e-02   7.0794949e-02   6.1613026e-02   4.7841585e-02   5.7516679e-02   6.4388046e-02   6.4176559e-02   5.7259869e-02   6.1078980e-02   6.9247074e-02   4.2780731e-02   4.8566082e-02   7.2247187e-02   5.4065395e-02   8.0844343e-02   5.6422768e-02   7.7401119e-02   4.9239970e-02   8.2976560e-02   6.8666014e-02   5.1795393e-02   5.1539865e-02   6.3922410e-02   7.0731007e-02   6.6410587e-02   3.9578438e-02   5.7098964e-02   5.2392525e-02   5.0683886e-02   9.1729236e-02   7.6796353e-02   5.9700691e-02   5.7658559e-02   7.0380963e-02   5.5891948e-02   6.4553084e-02   7.4314140e-02   6.4188376e-02   5.5862752e-02   4.8638731e-02   6.4818200e-02   5.5721441e-02   5.9023587e-02   5.4325379e-02   3.5659427e-02   5.7806321e-02   1.1646754e-01   1.0183406e-01   9.5237923e-02   9.9907095e-02   1.0622089e-01   1.0525522e-01   1.0561297e-01   1.0087833e-01   1.0779948e-01   9.3522115e-02   7.8070592e-02   9.4910280e-02   8.9633058e-02   1.0829781e-01   1.1081740e-01   9.1634033e-02   9.0337246e-02   9.1650208e-02   1.2399805e-01   1.0057671e-01   9.2649910e-02   9.9949877e-02   1.0962952e-01   8.2940269e-02   9.1028259e-02   8.7623472e-02   7.9432792e-02   8.0075100e-02   1.0524078e-01   8.3828651e-02   9.7259211e-02   7.8329945e-02   1.0714828e-01   8.1794698e-02   1.0616577e-01   9.3567438e-02   1.0077628e-01   9.0271060e-02   7.9035426e-02   8.3003648e-02   9.7873414e-02   7.9050596e-02   1.0183406e-01   1.0015100e-01   9.8558964e-02   8.7141498e-02   9.2951123e-02   8.4912476e-02   9.5252034e-02   9.0710349e-02   8.3242342e-04   1.3658963e-03   5.7230018e-04   8.1918836e-04   9.7205318e-04   4.1873206e-03   3.8010080e-03   1.6389584e-03   2.5918915e-03   2.9169588e-03   1.6389584e-03   5.6093257e-04   7.3131385e-04   1.3995850e-03   7.2853070e-03   1.1548608e-03   5.6661765e-04   1.3645766e-03   9.0462881e-04   1.5033145e-03   5.7640672e-04   1.1086000e-03   1.0041867e-03   5.6613254e-02   5.8073267e-02   6.6069758e-02   7.6238332e-02   6.9820331e-02   7.5150667e-02   6.4021588e-02   5.2221754e-02   6.4043055e-02   6.6631628e-02   7.2118099e-02   6.0001482e-02   7.1017077e-02   7.4033042e-02   4.5299125e-02   5.3744092e-02   7.4268794e-02   6.0257552e-02   8.9540407e-02   6.2698279e-02   7.8447791e-02   5.4448716e-02   9.0401373e-02   7.4795629e-02   5.7546133e-02   5.6993841e-02   7.1300530e-02   7.5820775e-02   7.0342915e-02   4.5615196e-02   6.3638314e-02   5.9296597e-02   5.5885275e-02   9.6910282e-02   7.8117499e-02   6.0390425e-02   6.2700176e-02   7.9484269e-02   5.8304382e-02   6.9718471e-02   7.9589842e-02   6.8316048e-02   6.1785991e-02   5.4151233e-02   6.9205713e-02   5.8981505e-02   6.2496988e-02   5.9482310e-02   3.9268283e-02   6.1808750e-02   1.1700097e-01   1.0528100e-01   1.0062122e-01   1.0448643e-01   1.0962576e-01   1.1218593e-01   1.0740486e-01   1.0838294e-01   1.1564442e-01   9.5242357e-02   8.0565058e-02   1.0027898e-01   9.3976493e-02   1.1211413e-01   1.1185843e-01   9.2981806e-02   9.4878465e-02   9.5039709e-02   1.3246555e-01   1.0898490e-01   9.5760022e-02   1.0161372e-01   1.1802422e-01   8.8109745e-02   9.3746381e-02   9.3301915e-02   8.3669649e-02   8.2970713e-02   1.0958444e-01   9.1015393e-02   1.0506496e-01   8.2570171e-02   1.1114727e-01   8.7646381e-02   1.1324005e-01   9.9872205e-02   1.0076379e-01   9.4009317e-02   8.1526386e-02   8.7041367e-02   1.0052094e-01   8.2193042e-02   1.0528100e-01   1.0314067e-01   9.9984788e-02   9.0308392e-02   9.8939123e-02   8.8538901e-02   9.5061969e-02   9.3157351e-02   1.7227462e-04   7.9314599e-04   8.9844173e-04   8.9518090e-04   2.8880348e-03   2.3244520e-03   6.4717383e-04   8.8327223e-04   1.0385020e-03   6.4717383e-04   4.2082433e-05   2.2535838e-05   6.1632160e-04   7.2421116e-03   6.3599645e-04   2.4204146e-03   3.0244507e-03   7.7555952e-04   1.1490838e-03   1.6721205e-04   1.6116507e-04   5.3985478e-05   6.6593275e-02   6.9140456e-02   7.6991415e-02   8.7909276e-02   8.1079796e-02   8.7140283e-02   7.5972851e-02   6.2229264e-02   7.4340995e-02   7.8983123e-02   8.2638003e-02   7.1602020e-02   8.0355120e-02   8.5886853e-02   5.5471847e-02   6.3724100e-02   8.7131187e-02   7.0026001e-02   1.0150375e-01   7.2956045e-02   9.2179245e-02   6.4526163e-02   1.0277569e-01   8.5954479e-02   6.7618774e-02   6.7207904e-02   8.2005017e-02   8.8025831e-02   8.2391127e-02   5.4193907e-02   7.3937560e-02   6.8913785e-02   6.6018815e-02   1.1053450e-01   9.1432865e-02   7.2512462e-02   7.3622571e-02   9.0228640e-02   6.9559505e-02   8.1277678e-02   9.1538587e-02   7.9923144e-02   7.2198625e-02   6.3968343e-02   8.0855092e-02   6.9835377e-02   7.3807917e-02   6.9953549e-02   4.8370205e-02   7.2961708e-02   1.3388298e-01   1.2040855e-01   1.1476271e-01   1.1886295e-01   1.2510613e-01   1.2637598e-01   1.2310531e-01   1.2187125e-01   1.2970708e-01   1.1033742e-01   9.4233672e-02   1.1441687e-01   1.0810566e-01   1.2778743e-01   1.2861773e-01   1.0813836e-01   1.0864380e-01   1.0911947e-01   1.4755823e-01   1.2232653e-01   1.1049966e-01   1.1716689e-01   1.3196625e-01   1.0144926e-01   1.0821364e-01   1.0641029e-01   9.6993665e-02   9.6571345e-02   1.2478057e-01   1.0328932e-01   1.1842664e-01   9.5377765e-02   1.2666075e-01   1.0023484e-01   1.2682498e-01   1.1377681e-01   1.1674990e-01   1.0792126e-01   9.5167233e-02   1.0076949e-01   1.1586910e-01   9.6070623e-02   1.2040855e-01   1.1835687e-01   1.1566030e-01   1.0480317e-01   1.1289922e-01   1.0248125e-01   1.1065853e-01   1.0752770e-01   1.5518070e-03   1.3360426e-03   6.1855325e-04   3.8684575e-03   2.7981409e-03   7.1019942e-04   2.9249692e-04   3.8108588e-04   7.1019942e-04   3.4811331e-04   2.0628507e-04   6.8481588e-04   6.1413327e-03   1.2640084e-03   3.0810810e-03   4.5162171e-03   6.1490495e-04   2.0823475e-03   6.6391763e-04   4.5940617e-04   4.0824218e-05   6.8980444e-02   7.2049409e-02   7.9659630e-02   9.0518795e-02   8.3601945e-02   9.0707158e-02   7.9362725e-02   6.4838475e-02   7.6843708e-02   8.2367370e-02   8.4912050e-02   7.4621958e-02   8.2116380e-02   8.9210086e-02   5.7968714e-02   6.6009408e-02   9.1025554e-02   7.2793579e-02   1.0368926e-01   7.5498978e-02   9.6154006e-02   6.6776572e-02   1.0567953e-01   8.9202124e-02   6.9982422e-02   6.9528749e-02   8.4404562e-02   9.0968518e-02   8.5580442e-02   5.6059636e-02   7.6365493e-02   7.1188605e-02   6.8464233e-02   1.1430428e-01   9.5645657e-02   7.6165252e-02   7.6295454e-02   9.2254316e-02   7.2920975e-02   8.4113452e-02   9.5088632e-02   8.3209827e-02   7.4687466e-02   6.6271074e-02   8.4049660e-02   7.3195324e-02   7.7054222e-02   7.2597553e-02   5.0198701e-02   7.5958466e-02   1.3864088e-01   1.2443023e-01   1.1820696e-01   1.2296103e-01   1.2918980e-01   1.2996108e-01   1.2761885e-01   1.2545185e-01   1.3315049e-01   1.1429516e-01   9.7708479e-02   1.1783417e-01   1.1147502e-01   1.3162480e-01   1.3265015e-01   1.1194384e-01   1.1244164e-01   1.1326233e-01   1.5100180e-01   1.2549100e-01   1.1411167e-01   1.2131599e-01   1.3539430e-01   1.0451333e-01   1.1218678e-01   1.1003412e-01   1.0016268e-01   1.0019858e-01   1.2861591e-01   1.0655136e-01   1.2158527e-01   9.9029274e-02   1.3048234e-01   1.0368193e-01   1.3098775e-01   1.1671318e-01   1.2117898e-01   1.1194010e-01   9.8811276e-02   1.0398171e-01   1.1952683e-01   9.8904418e-02   1.2443023e-01   1.2231211e-01   1.1957927e-01   1.0792296e-01   1.1588914e-01   1.0590028e-01   1.1501774e-01   1.1169315e-01   2.7458883e-04   1.9079543e-03   3.8013798e-03   4.1957403e-03   8.9864077e-04   3.1780627e-03   3.4575366e-03   8.9864077e-04   6.1086528e-04   6.3429136e-04   2.2515700e-03   7.8191545e-03   1.2413621e-03   2.0952535e-03   1.3093150e-03   1.3748953e-03   1.2388833e-03   4.8114625e-04   1.1404059e-03   1.0969768e-03   5.5584260e-02   5.7510469e-02   6.4993010e-02   7.5368447e-02   6.9057379e-02   7.3499750e-02   6.3437246e-02   5.1298347e-02   6.2635149e-02   6.6332421e-02   7.0700793e-02   5.9790543e-02   6.9153134e-02   7.2596138e-02   4.5395419e-02   5.3097576e-02   7.3376441e-02   5.8208889e-02   8.8752390e-02   6.1292160e-02   7.8242185e-02   5.3906344e-02   8.8989869e-02   7.2610553e-02   5.6579969e-02   5.6297092e-02   6.9966539e-02   7.5157531e-02   6.9589413e-02   4.4676337e-02   6.2364995e-02   5.7806901e-02   5.5013010e-02   9.5419957e-02   7.7143847e-02   6.0075763e-02   6.1882505e-02   7.8193894e-02   5.7405077e-02   6.8884889e-02   7.7590521e-02   6.7073564e-02   6.0698980e-02   5.3256477e-02   6.8055547e-02   5.7544133e-02   6.1428351e-02   5.8436070e-02   3.9618229e-02   6.0914936e-02   1.1719371e-01   1.0478727e-01   9.9928933e-02   1.0301520e-01   1.0921839e-01   1.1065431e-01   1.0694189e-01   1.0626748e-01   1.1395294e-01   9.5522682e-02   8.0692317e-02   9.9640584e-02   9.3821909e-02   1.1210560e-01   1.1313776e-01   9.3722341e-02   9.3659306e-02   9.3792773e-02   1.3107147e-01   1.0721381e-01   9.5955259e-02   1.0180084e-01   1.1608360e-01   8.7786080e-02   9.3279943e-02   9.1618132e-02   8.3505928e-02   8.2622852e-02   1.0912386e-01   8.8978076e-02   1.0355009e-01   8.1236966e-02   1.1101306e-01   8.5950464e-02   1.1026035e-01   9.9640402e-02   1.0131013e-01   9.2768985e-02   8.1325904e-02   8.7087186e-02   1.0113076e-01   8.3368795e-02   1.0478727e-01   1.0299507e-01   1.0075649e-01   9.1267488e-02   9.8760345e-02   8.8473047e-02   9.5602739e-02   9.2388034e-02   1.3192990e-03   5.6049942e-03   5.6260410e-03   3.8255378e-04   2.7098321e-03   2.9260165e-03   3.8255378e-04   8.5873923e-04   6.4551657e-04   2.7466704e-03   5.2436473e-03   2.1741200e-03   2.6488612e-03   2.5599967e-03   7.2546074e-04   2.4454411e-03   9.4817109e-04   1.6251073e-03   9.8969141e-04   5.2867682e-02   5.5519966e-02   6.2241614e-02   7.2187515e-02   6.6023504e-02   7.1524895e-02   6.1872329e-02   4.9090525e-02   5.9694897e-02   6.4712456e-02   6.7154795e-02   5.7947928e-02   6.4972194e-02   7.0357979e-02   4.3573654e-02   5.0441816e-02   7.2068579e-02   5.5680796e-02   8.4591184e-02   5.8459303e-02   7.7051055e-02   5.1193288e-02   8.5601942e-02   7.0117829e-02   5.3803216e-02   5.3535941e-02   6.6621467e-02   7.2473708e-02   6.7439009e-02   4.1816308e-02   5.9366143e-02   5.4760331e-02   5.2431175e-02   9.2978431e-02   7.6152091e-02   5.9131458e-02   5.9321453e-02   7.4096463e-02   5.5985281e-02   6.6256786e-02   7.5362720e-02   6.5046998e-02   5.7885295e-02   5.0561408e-02   6.5880549e-02   5.5988437e-02   5.9621488e-02   5.5930673e-02   3.7267604e-02   5.8818934e-02   1.1596549e-01   1.0264359e-01   9.7070235e-02   1.0079872e-01   1.0704759e-01   1.0746690e-01   1.0547300e-01   1.0308827e-01   1.1044375e-01   9.3809882e-02   7.8755194e-02   9.6766957e-02   9.1194118e-02   1.0959636e-01   1.1126853e-01   9.1984794e-02   9.1378676e-02   9.2003603e-02   1.2714849e-01   1.0351897e-01   9.3695356e-02   1.0013450e-01   1.1244271e-01   8.4898325e-02   9.1456450e-02   8.9057498e-02   8.0952577e-02   8.0704372e-02   1.0658329e-01   8.5941483e-02   1.0000975e-01   7.9159793e-02   1.0848060e-01   8.3337337e-02   1.0760265e-01   9.6215589e-02   1.0020263e-01   9.0833970e-02   7.9520366e-02   8.4523201e-02   9.8885306e-02   8.0736144e-02   1.0264359e-01   1.0090595e-01   9.8957167e-02   8.8690158e-02   9.5447639e-02   8.6121379e-02   9.4585258e-02   9.0802578e-02   6.3934178e-03   4.8940589e-03   1.2286350e-03   9.0839358e-04   1.0654961e-03   1.2286350e-03   9.6426336e-04   7.9049403e-04   1.4335758e-03   3.9777866e-03   2.4355051e-03   2.0107867e-03   4.6162602e-03   1.1770386e-04   3.4806567e-03   1.4352123e-03   1.5474906e-03   6.2303871e-04   6.0828103e-02   6.3671616e-02   7.0896871e-02   8.0864907e-02   7.4298413e-02   8.2081782e-02   7.0784097e-02   5.7058421e-02   6.8417644e-02   7.3405222e-02   7.5846906e-02   6.5924537e-02   7.3406010e-02   8.0389200e-02   5.0100593e-02   5.7793778e-02   8.2174514e-02   6.5233957e-02   9.3014438e-02   6.7186690e-02   8.6645560e-02   5.8420934e-02   9.5569964e-02   8.0832485e-02   6.1703014e-02   6.1092740e-02   7.5364208e-02   8.1326732e-02   7.6506202e-02   4.8648267e-02   6.7850123e-02   6.3129543e-02   6.0279722e-02   1.0421037e-01   8.6793624e-02   6.7911943e-02   6.7612814e-02   8.2552902e-02   6.4996479e-02   7.4977087e-02   8.6383221e-02   7.4662447e-02   6.6198278e-02   5.8199721e-02   7.5325266e-02   6.5572089e-02   6.8824430e-02   6.4314329e-02   4.2494903e-02   6.7537432e-02   1.2698890e-01   1.1333529e-01   1.0720337e-01   1.1257147e-01   1.1782339e-01   1.1890376e-01   1.1671052e-01   1.1501886e-01   1.2195484e-01   1.0334755e-01   8.7523621e-02   1.0680449e-01   1.0051071e-01   1.1973768e-01   1.2022270e-01   1.0080013e-01   1.0231108e-01   1.0334772e-01   1.3872306e-01   1.1462175e-01   1.0296153e-01   1.1014449e-01   1.2423991e-01   9.3883877e-02   1.0176675e-01   1.0022558e-01   8.9772340e-02   9.0213298e-02   1.1713918e-01   9.6981953e-02   1.1075521e-01   8.9707194e-02   1.1871449e-01   9.4162640e-02   1.2118172e-01   1.0525415e-01   1.1008916e-01   1.0200908e-01   8.8850235e-02   9.3264552e-02   1.0788029e-01   8.7700151e-02   1.1333529e-01   1.1110284e-01   1.0804665e-01   9.6438203e-02   1.0447205e-01   9.5256431e-02   1.0425068e-01   1.0162139e-01   4.7487225e-04   5.5229901e-03   5.0553045e-03   5.3192416e-03   5.5229901e-03   2.6440955e-03   3.2948907e-03   2.1817756e-03   1.9232390e-02   1.0060949e-03   5.2893888e-03   3.6565664e-03   6.4838776e-03   7.5722530e-04   2.1347736e-03   1.7405562e-03   3.5508478e-03   8.4906111e-02   8.5684875e-02   9.5941240e-02   1.0863973e-01   1.0110388e-01   1.0421406e-01   9.1634377e-02   7.8911086e-02   9.3575504e-02   9.5023267e-02   1.0394314e-01   8.7940160e-02   1.0307352e-01   1.0372095e-01   7.0927620e-02   8.1814584e-02   1.0248405e-01   8.7682138e-02   1.2553590e-01   9.1822634e-02   1.0758552e-01   8.2848043e-02   1.2459059e-01   1.0427303e-01   8.6172494e-02   8.5771372e-02   1.0276107e-01   1.0743739e-01   1.0000507e-01   7.2180344e-02   9.3352407e-02   8.8118179e-02   8.3972299e-02   1.2999219e-01   1.0601815e-01   8.6245777e-02   9.1942699e-02   1.1341937e-01   8.4389659e-02   1.0016617e-01   1.0942619e-01   9.6927434e-02   9.1068024e-02   8.2112078e-02   9.8354950e-02   8.4918144e-02   8.9929197e-02   8.7860129e-02   6.4913483e-02   8.9916257e-02   1.5108513e-01   1.3966025e-01   1.3579363e-01   1.3800221e-01   1.4462965e-01   1.4853849e-01   1.4049017e-01   1.4366186e-01   1.5284247e-01   1.2813521e-01   1.1198929e-01   1.3549048e-01   1.2835377e-01   1.4847864e-01   1.4775011e-01   1.2601872e-01   1.2764409e-01   1.2670935e-01   1.7255531e-01   1.4567287e-01   1.2987249e-01   1.3506836e-01   1.5547286e-01   1.2211481e-01   1.2607603e-01   1.2598262e-01   1.1656479e-01   1.1426125e-01   1.4534374e-01   1.2395854e-01   1.4127350e-01   1.1320894e-01   1.4734853e-01   1.1969377e-01   1.4703899e-01   1.3646872e-01   1.3305975e-01   1.2588961e-01   1.1250545e-01   1.2058130e-01   1.3549735e-01   1.1610735e-01   1.3966025e-01   1.3745925e-01   1.3401818e-01   1.2501338e-01   1.3525796e-01   1.2173379e-01   1.2646805e-01   1.2458940e-01   5.1766813e-03   3.3038909e-03   3.5089109e-03   5.1766813e-03   2.1988415e-03   2.7782772e-03   1.0688451e-03   1.7030893e-02   8.9726872e-04   4.6231995e-03   4.6736672e-03   5.3382818e-03   1.1647725e-03   1.9758755e-03   1.2750935e-03   2.7128598e-03   8.7833448e-02   8.9010914e-02   9.9253982e-02   1.1183035e-01   1.0412107e-01   1.0885582e-01   9.5547284e-02   8.2028755e-02   9.6837094e-02   9.8811318e-02   1.0697473e-01   9.1242752e-02   1.0570232e-01   1.0798221e-01   7.3423587e-02   8.4439842e-02   1.0715501e-01   9.1521191e-02   1.2824419e-01   9.5124610e-02   1.1205287e-01   8.5389026e-02   1.2841576e-01   1.0878552e-01   8.9048303e-02   8.8474213e-02   1.0589810e-01   1.1091852e-01   1.0379001e-01   7.4413565e-02   9.6465402e-02   9.1132676e-02   8.6893608e-02   1.3487393e-01   1.1110410e-01   9.0320856e-02   9.5130800e-02   1.1613190e-01   8.8387927e-02   1.0357744e-01   1.1422195e-01   1.0103671e-01   9.4160592e-02   8.4870839e-02   1.0232030e-01   8.9160088e-02   9.3889889e-02   9.1104413e-02   6.6493231e-02   9.3508454e-02   1.5639991e-01   1.4440871e-01   1.3995325e-01   1.4327783e-01   1.4942424e-01   1.5327636e-01   1.4579380e-01   1.4864466e-01   1.5749945e-01   1.3242397e-01   1.1574188e-01   1.3959357e-01   1.3216131e-01   1.5281767e-01   1.5172347e-01   1.2991524e-01   1.3242882e-01   1.3190251e-01   1.7712896e-01   1.5002220e-01   1.3380600e-01   1.3964037e-01   1.6023126e-01   1.2562692e-01   1.3071698e-01   1.3077115e-01   1.2010966e-01   1.1841338e-01   1.4987755e-01   1.2848062e-01   1.4548859e-01   1.1783301e-01   1.5172346e-01   1.2426441e-01   1.5309487e-01   1.3983154e-01   1.3778354e-01   1.3093423e-01   1.1660453e-01   1.2409289e-01   1.3931059e-01   1.1865130e-01   1.4440871e-01   1.4196632e-01   1.3805465e-01   1.2801436e-01   1.3865562e-01   1.2553921e-01   1.3109454e-01   1.2958547e-01   1.5860612e-03   1.6773969e-03   0.0000000e+00   8.4337656e-04   4.6746407e-04   2.4549978e-03   4.7529836e-03   2.3235808e-03   4.0683267e-03   4.3260986e-03   6.7336618e-04   2.8454658e-03   1.0918918e-03   1.3756658e-03   5.7784546e-04   5.9573290e-02   6.3070670e-02   6.9597309e-02   7.9911457e-02   7.3480528e-02   7.9923883e-02   7.0144874e-02   5.5923876e-02   6.6635620e-02   7.3192589e-02   7.4096565e-02   6.5836734e-02   7.1022277e-02   7.8555696e-02   5.0423423e-02   5.7089619e-02   8.1093473e-02   6.2483167e-02   9.2251714e-02   6.5399221e-02   8.6573432e-02   5.7873871e-02   9.3861710e-02   7.7914479e-02   6.0545459e-02   6.0328179e-02   7.3725736e-02   8.0652769e-02   7.5662466e-02   4.7500835e-02   6.6267157e-02   6.1219907e-02   5.9246920e-02   1.0235525e-01   8.5584812e-02   6.7627185e-02   6.6676399e-02   8.1028522e-02   6.3874534e-02   7.4037098e-02   8.3735875e-02   7.3091049e-02   6.4875922e-02   5.7134332e-02   7.3898502e-02   6.3669341e-02   6.7483654e-02   6.3032151e-02   4.3195391e-02   6.6465430e-02   1.2757303e-01   1.1294171e-01   1.0654531e-01   1.1074736e-01   1.1756538e-01   1.1705185e-01   1.1633100e-01   1.1230305e-01   1.1988948e-01   1.0404710e-01   8.7981667e-02   1.0622547e-01   1.0061669e-01   1.2008497e-01   1.2242153e-01   1.0217012e-01   1.0084187e-01   1.0181343e-01   1.3714147e-01   1.1243585e-01   1.0356517e-01   1.1071692e-01   1.2181923e-01   9.3742899e-02   1.0137566e-01   9.8085445e-02   8.9840814e-02   8.9979544e-02   1.1682155e-01   9.4340727e-02   1.0893096e-01   8.8036209e-02   1.1887723e-01   9.1995894e-02   1.1718099e-01   1.0529554e-01   1.1115622e-01   1.0048991e-01   8.8823715e-02   9.3647258e-02   1.0909883e-01   8.9729548e-02   1.1294171e-01   1.1121254e-01   1.0947844e-01   9.8164553e-02   1.0458423e-01   9.5468337e-02   1.0529433e-01   1.0077315e-01   1.0959804e-05   1.5860612e-03   1.2066237e-03   9.8801305e-04   9.8752232e-04   6.0992006e-03   2.3026455e-03   4.3295194e-03   6.8464966e-03   1.1467011e-03   3.5017117e-03   1.7326793e-03   1.1803657e-03   5.4725577e-04   7.4912594e-02   7.8462634e-02   8.6080465e-02   9.7055695e-02   8.9913940e-02   9.8246196e-02   8.6359614e-02   7.0875411e-02   8.3096610e-02   8.9373603e-02   9.1095332e-02   8.1132274e-02   8.7784959e-02   9.6471287e-02   6.3584445e-02   7.1727351e-02   9.8741504e-02   7.9297358e-02   1.1001947e-01   8.1763635e-02   1.0392038e-01   7.2462318e-02   1.1283140e-01   9.6493172e-02   7.5904241e-02   7.5362083e-02   9.0691159e-02   9.7798442e-02   9.2550220e-02   6.1191529e-02   8.2519289e-02   7.7118501e-02   7.4418954e-02   1.2241546e-01   1.0373213e-01   8.3271349e-02   8.2617382e-02   9.8302252e-02   7.9806174e-02   9.0742202e-02   1.0274136e-01   9.0294649e-02   8.0841362e-02   7.2046648e-02   9.1054206e-02   8.0166072e-02   8.3952641e-02   7.8850217e-02   5.4962225e-02   8.2583735e-02   1.4771497e-01   1.3277361e-01   1.2596285e-01   1.3150244e-01   1.3764710e-01   1.3814813e-01   1.3643324e-01   1.3364709e-01   1.4127174e-01   1.2228442e-01   1.0501430e-01   1.2555100e-01   1.1896947e-01   1.3983185e-01   1.4080838e-01   1.1967635e-01   1.2050783e-01   1.2165153e-01   1.5932394e-01   1.3324590e-01   1.2180922e-01   1.2960297e-01   1.4356572e-01   1.1163860e-01   1.2028439e-01   1.1797002e-01   1.0728569e-01   1.0776505e-01   1.3685139e-01   1.1414077e-01   1.2924282e-01   1.0675126e-01   1.3867943e-01   1.1135088e-01   1.3991357e-01   1.2389778e-01   1.2963231e-01   1.2019755e-01   1.0634263e-01   1.1117491e-01   1.2727853e-01   1.0546335e-01   1.3277361e-01   1.3050496e-01   1.2753116e-01   1.1493738e-01   1.2310368e-01   1.1333277e-01   1.2330887e-01   1.1999903e-01   1.6773969e-03   1.4044930e-03   1.1476188e-03   1.1728210e-03   6.0850239e-03   2.5611449e-03   4.7720148e-03   7.3487783e-03   1.2981958e-03   3.8044610e-03   1.9624179e-03   1.3573639e-03   6.7050163e-04   7.5875383e-02   7.9591742e-02   8.7130965e-02   9.8136915e-02   9.0968214e-02   9.9474916e-02   8.7612410e-02   7.1886206e-02   8.4072967e-02   9.0657173e-02   9.2037661e-02   8.2322796e-02   8.8559519e-02   9.7661149e-02   6.4636714e-02   7.2692172e-02   1.0010765e-01   8.0266168e-02   1.1103798e-01   8.2745924e-02   1.0537406e-01   7.3430328e-02   1.1396572e-01   9.7599809e-02   7.6869874e-02   7.6340270e-02   9.1668858e-02   9.8974404e-02   9.3760611e-02   6.2005453e-02   8.3489209e-02   7.8020463e-02   7.5407355e-02   1.2375637e-01   1.0517097e-01   8.4594299e-02   8.3682974e-02   9.9214366e-02   8.1008204e-02   9.1862911e-02   1.0394615e-01   9.1478420e-02   8.1837398e-02   7.2994499e-02   9.2227696e-02   8.1322109e-02   8.5126430e-02   7.9879628e-02   5.5860810e-02   8.3714500e-02   1.4946173e-01   1.3427629e-01   1.2730802e-01   1.3293539e-01   1.3918009e-01   1.3947497e-01   1.3805161e-01   1.3491280e-01   1.4255824e-01   1.2381647e-01   1.0639120e-01   1.2689399e-01   1.2032986e-01   1.4134878e-01   1.4247553e-01   1.2120787e-01   1.2187456e-01   1.2309328e-01   1.6066767e-01   1.3444738e-01   1.2325830e-01   1.3118369e-01   1.4483072e-01   1.1290137e-01   1.2175315e-01   1.1925251e-01   1.0857610e-01   1.0914142e-01   1.3832403e-01   1.1530308e-01   1.3045824e-01   1.0804612e-01   1.4018015e-01   1.1257905e-01   1.4124338e-01   1.2516443e-01   1.3130183e-01   1.2160995e-01   1.0773181e-01   1.1250108e-01   1.2878318e-01   1.0678720e-01   1.3427629e-01   1.3201801e-01   1.2910620e-01   1.1632723e-01   1.2438544e-01   1.1469977e-01   1.2494953e-01   1.2148287e-01   8.4337656e-04   4.6746407e-04   2.4549978e-03   4.7529836e-03   2.3235808e-03   4.0683267e-03   4.3260986e-03   6.7336618e-04   2.8454658e-03   1.0918918e-03   1.3756658e-03   5.7784546e-04   5.9573290e-02   6.3070670e-02   6.9597309e-02   7.9911457e-02   7.3480528e-02   7.9923883e-02   7.0144874e-02   5.5923876e-02   6.6635620e-02   7.3192589e-02   7.4096565e-02   6.5836734e-02   7.1022277e-02   7.8555696e-02   5.0423423e-02   5.7089619e-02   8.1093473e-02   6.2483167e-02   9.2251714e-02   6.5399221e-02   8.6573432e-02   5.7873871e-02   9.3861710e-02   7.7914479e-02   6.0545459e-02   6.0328179e-02   7.3725736e-02   8.0652769e-02   7.5662466e-02   4.7500835e-02   6.6267157e-02   6.1219907e-02   5.9246920e-02   1.0235525e-01   8.5584812e-02   6.7627185e-02   6.6676399e-02   8.1028522e-02   6.3874534e-02   7.4037098e-02   8.3735875e-02   7.3091049e-02   6.4875922e-02   5.7134332e-02   7.3898502e-02   6.3669341e-02   6.7483654e-02   6.3032151e-02   4.3195391e-02   6.6465430e-02   1.2757303e-01   1.1294171e-01   1.0654531e-01   1.1074736e-01   1.1756538e-01   1.1705185e-01   1.1633100e-01   1.1230305e-01   1.1988948e-01   1.0404710e-01   8.7981667e-02   1.0622547e-01   1.0061669e-01   1.2008497e-01   1.2242153e-01   1.0217012e-01   1.0084187e-01   1.0181343e-01   1.3714147e-01   1.1243585e-01   1.0356517e-01   1.1071692e-01   1.2181923e-01   9.3742899e-02   1.0137566e-01   9.8085445e-02   8.9840814e-02   8.9979544e-02   1.1682155e-01   9.4340727e-02   1.0893096e-01   8.8036209e-02   1.1887723e-01   9.1995894e-02   1.1718099e-01   1.0529554e-01   1.1115622e-01   1.0048991e-01   8.8823715e-02   9.3647258e-02   1.0909883e-01   8.9729548e-02   1.1294171e-01   1.1121254e-01   1.0947844e-01   9.8164553e-02   1.0458423e-01   9.5468337e-02   1.0529433e-01   1.0077315e-01   6.2742331e-05   5.7500583e-04   7.7277880e-03   4.4752900e-04   1.9151463e-03   2.3881652e-03   8.6221368e-04   8.6997251e-04   5.7712764e-05   1.4261239e-04   1.6337476e-04   6.5357511e-02   6.7564930e-02   7.5599895e-02   8.6482524e-02   7.9693502e-02   8.5392044e-02   7.4152863e-02   6.0880360e-02   7.3094884e-02   7.7108408e-02   8.1483729e-02   6.9905750e-02   7.9537736e-02   8.4220614e-02   5.4020845e-02   6.2478782e-02   8.5095725e-02   6.8776984e-02   1.0023818e-01   7.1693803e-02   8.9974847e-02   6.3277271e-02   1.0126704e-01   8.4456284e-02   6.6380508e-02   6.5944304e-02   8.0776371e-02   8.6401760e-02   8.0678654e-02   5.3232214e-02   7.2705055e-02   6.7807498e-02   6.4728976e-02   1.0860574e-01   8.9250516e-02   7.0532132e-02   7.2192370e-02   8.9155393e-02   6.7825786e-02   7.9751674e-02   8.9848634e-02   7.8255825e-02   7.0908403e-02   6.2757363e-02   7.9213209e-02   6.8195297e-02   7.2146158e-02   6.8587288e-02   4.7220467e-02   7.1390990e-02   1.3114639e-01   1.1816523e-01   1.1284157e-01   1.1675611e-01   1.2280854e-01   1.2450964e-01   1.2061835e-01   1.2011904e-01   1.2793055e-01   1.0801409e-01   9.2203467e-02   1.1250005e-01   1.0614185e-01   1.2553294e-01   1.2604996e-01   1.0581394e-01   1.0665851e-01   1.0697400e-01   1.4569576e-01   1.2071369e-01   1.0835453e-01   1.1475367e-01   1.3023667e-01   9.9676180e-02   1.0601570e-01   1.0459446e-01   9.5152798e-02   9.4544535e-02   1.2261107e-01   1.0171679e-01   1.1677938e-01   9.3513090e-02   1.2443770e-01   9.8519974e-02   1.2493623e-01   1.1202254e-01   1.1414514e-01   1.0583616e-01   9.3110547e-02   9.8863035e-02   1.1361961e-01   9.4163495e-02   1.1816523e-01   1.1608967e-01   1.1325987e-01   1.0277556e-01   1.1111172e-01   1.0049112e-01   1.0810161e-01   1.0529258e-01   8.3201047e-04   6.6553558e-03   8.0817319e-04   2.3666253e-03   2.9401972e-03   6.0430641e-04   1.3109323e-03   2.0042209e-04   2.9042771e-04   6.4823857e-05   6.4369752e-02   6.6980839e-02   7.4622998e-02   8.5371818e-02   7.8644884e-02   8.4714120e-02   7.3778054e-02   6.0124143e-02   7.1976215e-02   7.6757894e-02   8.0112003e-02   6.9445381e-02   7.7800626e-02   8.3451231e-02   5.3558729e-02   6.1563285e-02   8.4824350e-02   6.7739660e-02   9.8727492e-02   7.0619990e-02   8.9865595e-02   6.2353086e-02   1.0002589e-01   8.3464224e-02   6.5377675e-02   6.4985443e-02   7.9506921e-02   8.5546781e-02   8.0035925e-02   5.2147031e-02   7.1577598e-02   6.6610625e-02   6.3822994e-02   1.0780225e-01   8.9119904e-02   7.0461434e-02   7.1328305e-02   8.7570443e-02   6.7451743e-02   7.8878318e-02   8.9018945e-02   7.7592264e-02   6.9886540e-02   6.1789581e-02   7.8498655e-02   6.7684600e-02   7.1587759e-02   6.7704325e-02   4.6526712e-02   7.0724596e-02   1.3117528e-01   1.1766016e-01   1.1197322e-01   1.1607657e-01   1.2231416e-01   1.2340084e-01   1.2042522e-01   1.1891967e-01   1.2666041e-01   1.0778594e-01   9.1812847e-02   1.1163169e-01   1.0543690e-01   1.2494830e-01   1.2593147e-01   1.0563286e-01   1.0596141e-01   1.0649455e-01   1.4431904e-01   1.1933022e-01   1.0786987e-01   1.1454955e-01   1.2887601e-01   9.8812977e-02   1.0562884e-01   1.0369928e-01   9.4451580e-02   9.4102215e-02   1.2194237e-01   1.0054711e-01   1.1549365e-01   9.2857172e-02   1.2382256e-01   9.7583400e-02   1.2385907e-01   1.1095947e-01   1.1423829e-01   1.0528942e-01   9.2735747e-02   9.8196145e-02   1.1321168e-01   9.3608756e-02   1.1766016e-01   1.1565299e-01   1.1307354e-01   1.0223904e-01   1.1010492e-01   9.9908851e-02   1.0821954e-01   1.0496782e-01   9.9295890e-03   5.3473138e-04   2.1693539e-03   3.7076821e-03   1.8222092e-03   1.2380588e-03   7.0429726e-04   3.2576994e-04   6.7027380e-04   7.5169669e-02   7.7062740e-02   8.6033367e-02   9.7417771e-02   9.0184218e-02   9.6534643e-02   8.3912481e-02   7.0270670e-02   8.3626528e-02   8.6844061e-02   9.2545497e-02   7.9257265e-02   9.0777176e-02   9.5235076e-02   6.2208659e-02   7.1858472e-02   9.5535449e-02   7.9390740e-02   1.1189093e-01   8.2130752e-02   1.0013922e-01   7.2643917e-02   1.1330079e-01   9.5998078e-02   7.6221141e-02   7.5580594e-02   9.1730689e-02   9.7114882e-02   9.1048046e-02   6.2224749e-02   8.3137120e-02   7.8094271e-02   7.4382594e-02   1.2083561e-01   9.9831872e-02   7.9711043e-02   8.2237066e-02   1.0057287e-01   7.7408745e-02   9.0227459e-02   1.0148848e-01   8.8790024e-02   8.1095637e-02   7.2322440e-02   8.9773416e-02   7.8184960e-02   8.2186791e-02   7.8567440e-02   5.4863235e-02   8.1347337e-02   1.4275245e-01   1.3005259e-01   1.2482271e-01   1.2925722e-01   1.3482920e-01   1.3759096e-01   1.3236924e-01   1.3338627e-01   1.4130888e-01   1.1880967e-01   1.0247320e-01   1.2443347e-01   1.1741517e-01   1.3747488e-01   1.3688510e-01   1.1618864e-01   1.1858835e-01   1.1879679e-01   1.5963135e-01   1.3387511e-01   1.1938532e-01   1.2588037e-01   1.4388488e-01   1.1083228e-01   1.1728514e-01   1.1680010e-01   1.0591970e-01   1.0525056e-01   1.3476053e-01   1.1410719e-01   1.2959057e-01   1.0487194e-01   1.3643100e-01   1.1046993e-01   1.3880755e-01   1.2375874e-01   1.2479172e-01   1.1764923e-01   1.0361548e-01   1.0965651e-01   1.2456830e-01   1.0392711e-01   1.3005259e-01   1.2763609e-01   1.2394348e-01   1.1308481e-01   1.2275352e-01   1.1138648e-01   1.1846972e-01   1.1666230e-01   1.1839370e-02   9.5901133e-03   1.3740734e-02   3.5338001e-03   1.3550011e-02   8.8526707e-03   9.4699409e-03   6.3350154e-03   4.8467987e-02   5.3749987e-02   5.7717889e-02   6.5676566e-02   6.0050880e-02   7.0693363e-02   6.1877604e-02   4.6729706e-02   5.4665912e-02   6.4242849e-02   5.9590174e-02   5.6493218e-02   5.5132102e-02   6.8254515e-02   4.1974046e-02   4.5979775e-02   7.3375693e-02   5.2912711e-02   7.3889063e-02   5.3879214e-02   7.8254066e-02   4.6403740e-02   7.8645675e-02   6.7578949e-02   4.9098308e-02   4.8671019e-02   5.9827034e-02   6.7861186e-02   6.5136324e-02   3.6759287e-02   5.3980965e-02   4.9365653e-02   4.8462917e-02   9.0010105e-02   7.8830145e-02   6.1496889e-02   5.5380536e-02   6.4107134e-02   5.6871111e-02   6.2034296e-02   7.3759034e-02   6.3674900e-02   5.3119604e-02   4.6143136e-02   6.3811294e-02   5.6759011e-02   5.9030859e-02   5.2419966e-02   3.3258744e-02   5.6880106e-02   1.1655832e-01   1.0005043e-01   9.1684275e-02   9.8727002e-02   1.0425490e-01   1.0138126e-01   1.0565129e-01   9.7538138e-02   1.0314095e-01   9.2386824e-02   7.6571607e-02   9.1274575e-02   8.6336607e-02   1.0505203e-01   1.0829773e-01   9.0077109e-02   8.8858206e-02   9.1549427e-02   1.1781623e-01   9.5530481e-02   9.0068051e-02   9.8963246e-02   1.0479240e-01   7.9118056e-02   9.0207341e-02   8.5766485e-02   7.6442113e-02   7.8991975e-02   1.0229780e-01   8.0971468e-02   9.2462279e-02   7.7616682e-02   1.0394880e-01   7.9854175e-02   1.0493060e-01   8.8086863e-02   1.0105189e-01   8.9771099e-02   7.8157448e-02   7.9782471e-02   9.4953430e-02   7.4768636e-02   1.0005043e-01   9.8253215e-02   9.6744777e-02   8.3113340e-02   8.7744069e-02   8.2353939e-02   9.5830913e-02   9.0799350e-02   2.1341759e-03   1.9110407e-03   2.4747400e-03   1.4759127e-04   2.6111423e-04   2.1389787e-04   9.9338048e-04   7.1511570e-02   7.2884660e-02   8.1954557e-02   9.3478761e-02   8.6419800e-02   9.0995738e-02   7.9065079e-02   6.6366318e-02   7.9643396e-02   8.2112775e-02   8.8849792e-02   7.5066735e-02   8.7600830e-02   9.0111864e-02   5.8883625e-02   6.8498056e-02   8.9863635e-02   7.4803608e-02   1.0853136e-01   7.8097142e-02   9.4627165e-02   6.9370599e-02   1.0871906e-01   9.0676026e-02   7.2617660e-02   7.2144076e-02   8.7900274e-02   9.2810262e-02   8.6387249e-02   5.9335633e-02   7.9309675e-02   7.4397587e-02   7.0710353e-02   1.1504320e-01   9.3692385e-02   7.4629165e-02   7.8263498e-02   9.7248749e-02   7.2487054e-02   8.6011897e-02   9.5823155e-02   8.3806772e-02   7.7264691e-02   6.8838121e-02   8.4948991e-02   7.3047820e-02   7.7341294e-02   7.4548923e-02   5.2558496e-02   7.6909140e-02   1.3627575e-01   1.2430643e-01   1.1978148e-01   1.2301688e-01   1.2902659e-01   1.3200226e-01   1.2597225e-01   1.2757685e-01   1.3584109e-01   1.1348127e-01   9.7761057e-02   1.1945082e-01   1.1270375e-01   1.3215934e-01   1.3182973e-01   1.1125522e-01   1.1287096e-01   1.1260483e-01   1.5425448e-01   1.2876341e-01   1.1448536e-01   1.2024528e-01   1.3833407e-01   1.0647529e-01   1.1163750e-01   1.1113351e-01   1.0149473e-01   1.0013701e-01   1.2927004e-01   1.0879101e-01   1.2459621e-01   9.9330508e-02   1.3108770e-01   1.0504760e-01   1.3186075e-01   1.1958837e-01   1.1892921e-01   1.1162833e-01   9.8541878e-02   1.0525098e-01   1.1976536e-01   1.0049727e-01   1.2430643e-01   1.2212353e-01   1.1885667e-01   1.0916600e-01   1.1853397e-01   1.0665478e-01   1.1270995e-01   1.1063603e-01   1.5750019e-03   2.3061943e-03   2.5267586e-03   1.8816551e-03   2.4916769e-03   2.6490348e-03   5.6749564e-02   5.7171493e-02   6.5939847e-02   7.5932398e-02   6.9508351e-02   7.4656451e-02   6.2624290e-02   5.2024188e-02   6.4437230e-02   6.4937043e-02   7.2745199e-02   5.8611781e-02   7.2652586e-02   7.3573688e-02   4.4171072e-02   5.3594578e-02   7.2641168e-02   6.1050608e-02   8.9566976e-02   6.3051224e-02   7.6010398e-02   5.4225638e-02   9.0321512e-02   7.5195114e-02   5.7639533e-02   5.6852233e-02   7.1710391e-02   7.4949900e-02   6.9318916e-02   4.6230253e-02   6.3971485e-02   6.0026157e-02   5.5798348e-02   9.5989279e-02   7.6232404e-02   5.8444003e-02   6.2302257e-02   8.0224536e-02   5.7278169e-02   6.9150783e-02   7.9467477e-02   6.7704503e-02   6.1864652e-02   5.4241616e-02   6.8592620e-02   5.8516944e-02   6.1759106e-02   5.9360348e-02   3.8591759e-02   6.1151978e-02   1.1324556e-01   1.0303819e-01   9.9201558e-02   1.0318500e-01   1.0721879e-01   1.1147036e-01   1.0460948e-01   1.0828938e-01   1.1523751e-01   9.2280249e-02   7.8300492e-02   9.8832112e-02   9.2088327e-02   1.0954453e-01   1.0774163e-01   8.9704304e-02   9.3649245e-02   9.3477002e-02   1.3171389e-01   1.0896793e-01   9.3248418e-02   9.8534682e-02   1.1788866e-01   8.6730988e-02   9.1550165e-02   9.2743651e-02   8.2023117e-02   8.1036634e-02   1.0749375e-01   9.1211084e-02   1.0479473e-01   8.1639608e-02   1.0872731e-01   8.7298316e-02   1.1342725e-01   9.8507274e-02   9.7018335e-02   9.2572028e-02   7.9424005e-02   8.5123740e-02   9.7519007e-02   7.9504622e-02   1.0303819e-01   1.0060889e-01   9.6540999e-02   8.7528391e-02   9.7464514e-02   8.6517419e-02   9.1407615e-02   9.1076579e-02   4.2759888e-03   1.3998823e-03   1.8495751e-03   2.8301386e-03   3.7334484e-03   5.5009109e-02   5.4964037e-02   6.3820777e-02   7.4331569e-02   6.8062522e-02   7.0406681e-02   5.9595322e-02   4.9880526e-02   6.2237804e-02   6.2265151e-02   7.1113130e-02   5.6594278e-02   7.1451534e-02   6.9994053e-02   4.3126676e-02   5.2381128e-02   6.8520339e-02   5.7597897e-02   8.9058557e-02   6.0751652e-02   7.2510768e-02   5.3201106e-02   8.7849614e-02   7.0985642e-02   5.6028788e-02   5.5591108e-02   6.9920676e-02   7.2931234e-02   6.6633349e-02   4.5295849e-02   6.2044642e-02   5.8063752e-02   5.4097801e-02   9.1903182e-02   7.1473709e-02   5.5091248e-02   6.0376704e-02   7.9310761e-02   5.3854777e-02   6.7042056e-02   7.4968513e-02   6.4284932e-02   5.9983090e-02   5.2745182e-02   6.5463515e-02   5.4553279e-02   5.8476643e-02   5.7183259e-02   3.8909009e-02   5.8515093e-02   1.0925308e-01   9.9624317e-02   9.6622305e-02   9.8607255e-02   1.0383961e-01   1.0792916e-01   1.0029496e-01   1.0408880e-01   1.1184636e-01   8.9608429e-02   7.6157674e-02   9.6354194e-02   9.0095873e-02   1.0708448e-01   1.0618312e-01   8.7751513e-02   8.9756663e-02   8.8888346e-02   1.2885985e-01   1.0591974e-01   9.1173391e-02   9.5499762e-02   1.1429851e-01   8.5048829e-02   8.8069568e-02   8.8675858e-02   8.0228990e-02   7.8160661e-02   1.0454059e-01   8.7419578e-02   1.0196835e-01   7.7696889e-02   1.0615601e-01   8.3458946e-02   1.0727792e-01   9.7348785e-02   9.3770662e-02   8.8214646e-02   7.6638467e-02   8.3519036e-02   9.5849316e-02   7.9674217e-02   9.9624317e-02   9.7643485e-02   9.4514516e-02   8.7075933e-02   9.6245490e-02   8.4428758e-02   8.8195121e-02   8.6900395e-02   3.3656636e-03   1.2813341e-03   1.5508786e-03   5.5126016e-04   5.7906981e-02   6.0932190e-02   6.7790041e-02   7.7661589e-02   7.1242410e-02   7.8649743e-02   6.7963400e-02   5.4275670e-02   6.5215187e-02   7.0651008e-02   7.2495968e-02   6.3298898e-02   6.9915496e-02   7.7039648e-02   4.7868354e-02   5.5074415e-02   7.9095397e-02   6.1872293e-02   8.9630734e-02   6.4010779e-02   8.3786653e-02   5.5727300e-02   9.1943867e-02   7.7179306e-02   5.8787276e-02   5.8290887e-02   7.2059853e-02   7.8226343e-02   7.3481150e-02   4.5978845e-02   6.4704253e-02   5.9980041e-02   5.7435326e-02   1.0050646e-01   8.3655459e-02   6.5308029e-02   6.4668898e-02   7.9130910e-02   6.2159352e-02   7.1908147e-02   8.2739305e-02   7.1488143e-02   6.3159434e-02   5.5376396e-02   7.2163727e-02   6.2508559e-02   6.5824794e-02   6.1340761e-02   4.0472995e-02   6.4599849e-02   1.2380425e-01   1.0993395e-01   1.0373859e-01   1.0879322e-01   1.1440237e-01   1.1495444e-01   1.1335001e-01   1.1089530e-01   1.1789001e-01   1.0041407e-01   8.4708778e-02   1.0336455e-01   9.7357032e-02   1.1642404e-01   1.1748667e-01   9.8081847e-02   9.8747663e-02   9.9805801e-02   1.3456454e-01   1.1060300e-01   9.9947092e-02   1.0709744e-01   1.2004751e-01   9.0729860e-02   9.8544296e-02   9.6489030e-02   8.6759859e-02   8.7175826e-02   1.1367288e-01   9.3122372e-02   1.0688813e-01   8.6280635e-02   1.1536030e-01   9.0496365e-02   1.1670380e-01   1.0195807e-01   1.0724940e-01   9.8472199e-02   8.5899722e-02   9.0288199e-02   1.0497525e-01   8.5251170e-02   1.0993395e-01   1.0787024e-01   1.0524763e-01   9.3789157e-02   1.0121309e-01   9.2226653e-02   1.0148816e-01   9.8306001e-02   5.0782435e-04   6.2020689e-04   1.6756986e-03   7.0476887e-02   7.1501157e-02   8.0709837e-02   9.2336294e-02   8.5349594e-02   8.8954923e-02   7.7301298e-02   6.5155686e-02   7.8476830e-02   8.0402665e-02   8.7884855e-02   7.3650267e-02   8.6974315e-02   8.8299731e-02   5.7895678e-02   6.7595783e-02   8.7665774e-02   7.3314500e-02   1.0776923e-01   7.6895020e-02   9.2474273e-02   6.8514181e-02   1.0728576e-01   8.8812805e-02   7.1614033e-02   7.1214401e-02   8.6845407e-02   9.1428310e-02   8.4778392e-02   5.8702706e-02   7.8223013e-02   7.3389903e-02   6.9650470e-02   1.1290739e-01   9.1229062e-02   7.2678536e-02   7.7045799e-02   9.6518176e-02   7.0683794e-02   8.4677029e-02   9.3753287e-02   8.2039996e-02   7.6152676e-02   6.7885263e-02   8.3273481e-02   7.1166737e-02   7.5618620e-02   7.3311647e-02   5.2129744e-02   7.5414559e-02   1.3361288e-01   1.2212885e-01   1.1803753e-01   1.2062983e-01   1.2681873e-01   1.3003785e-01   1.2339855e-01   1.2552093e-01   1.3397302e-01   1.1145011e-01   9.6072866e-02   1.1773621e-01   1.1108813e-01   1.3021606e-01   1.2991222e-01   1.0941117e-01   1.1074904e-01   1.1019635e-01   1.5245649e-01   1.2709803e-01   1.1272646e-01   1.1805571e-01   1.3644429e-01   1.0507143e-01   1.0948152e-01   1.0908044e-01   1.0002018e-01   9.8259510e-02   1.2725753e-01   1.0697349e-01   1.2296793e-01   9.7285465e-02   1.2913723e-01   1.0312519e-01   1.2920697e-01   1.1832381e-01   1.1655849e-01   1.0932054e-01   9.6669076e-02   1.0377916e-01   1.1803847e-01   9.9488230e-02   1.2212885e-01   1.2004765e-01   1.1693776e-01   1.0790940e-01   1.1723226e-01   1.0500092e-01   1.1038445e-01   1.0827603e-01   1.5232780e-04   4.0007181e-04   6.5289213e-02   6.7204649e-02   7.5443787e-02   8.6444762e-02   7.9660759e-02   8.4749914e-02   7.3546883e-02   6.0641799e-02   7.3012359e-02   7.6531840e-02   8.1598668e-02   6.9499172e-02   7.9923854e-02   8.3724381e-02   5.3799024e-02   6.2453508e-02   8.4253942e-02   6.8486788e-02   1.0054294e-01   7.1576362e-02   8.9122210e-02   6.3281093e-02   1.0114322e-01   8.3993466e-02   6.6335786e-02   6.5925957e-02   8.0814161e-02   8.6165539e-02   8.0247902e-02   5.3368917e-02   7.2666724e-02   6.7816556e-02   6.4623203e-02   1.0800053e-01   8.8235002e-02   6.9728892e-02   7.2010954e-02   8.9470933e-02   6.7181463e-02   7.9529500e-02   8.9243318e-02   7.7739806e-02   7.0823200e-02   6.2720078e-02   7.8762930e-02   6.7548169e-02   7.1608713e-02   6.8390856e-02   4.7339693e-02   7.1004341e-02   1.3006516e-01   1.1747357e-01   1.1247378e-01   1.1599806e-01   1.2210677e-01   1.2411360e-01   1.1962941e-01   1.1968938e-01   1.2763702e-01   1.0730056e-01   9.1685935e-02   1.1214725e-01   1.0578284e-01   1.2500364e-01   1.2540001e-01   1.0518188e-01   1.0602704e-01   1.0612761e-01   1.4550231e-01   1.2054520e-01   1.0786099e-01   1.1396219e-01   1.2996046e-01   9.9460763e-02   1.0527691e-01   1.0405039e-01   9.4844256e-02   9.3945852e-02   1.2206303e-01   1.0136794e-01   1.1659636e-01   9.2882126e-02   1.2391115e-01   9.8044098e-02   1.2415930e-01   1.1196299e-01   1.1316845e-01   1.0506061e-01   9.2491851e-02   9.8554383e-02   1.1313163e-01   9.4057839e-02   1.1747357e-01   1.1542880e-01   1.1260458e-01   1.0255212e-01   1.1101490e-01   1.0006494e-01   1.0712947e-01   1.0442482e-01   3.2637736e-04   7.1228332e-02   7.3360309e-02   8.1848237e-02   9.3197783e-02   8.6156854e-02   9.1733116e-02   8.0052769e-02   6.6468809e-02   7.9277125e-02   8.3130576e-02   8.8073461e-02   7.5751270e-02   8.6114214e-02   9.0610516e-02   5.9264749e-02   6.8242422e-02   9.1255768e-02   7.4667198e-02   1.0756842e-01   7.7803425e-02   9.6260495e-02   6.9087389e-02   1.0845861e-01   9.0870006e-02   7.2305626e-02   7.1861030e-02   8.7311178e-02   9.3009359e-02   8.6960677e-02   5.8641082e-02   7.8892851e-02   7.3809649e-02   7.0554233e-02   1.1577001e-01   9.5424873e-02   7.6110400e-02   7.8288785e-02   9.6097634e-02   7.3451146e-02   8.6123272e-02   9.6377446e-02   8.4403314e-02   7.7003085e-02   6.8529959e-02   8.5437768e-02   7.3849362e-02   7.8032414e-02   7.4531055e-02   5.2299301e-02   7.7339214e-02   1.3853845e-01   1.2553252e-01   1.2026484e-01   1.2407451e-01   1.3030382e-01   1.3228892e-01   1.2783055e-01   1.2774776e-01   1.3586522e-01   1.1498861e-01   9.8800101e-02   1.1991948e-01   1.1333278e-01   1.3320266e-01   1.3352807e-01   1.1273476e-01   1.1373642e-01   1.1391093e-01   1.5417322e-01   1.2849633e-01   1.1550662e-01   1.2189074e-01   1.3824515e-01   1.0674666e-01   1.1296262e-01   1.1166908e-01   1.0200786e-01   1.0119861e-01   1.3020916e-01   1.0880524e-01   1.2443747e-01   1.0015009e-01   1.3208523e-01   1.0543261e-01   1.3248948e-01   1.1957063e-01   1.2107326e-01   1.1278773e-01   9.9690412e-02   1.0583253e-01   1.2090813e-01   1.0100447e-01   1.2553252e-01   1.2339293e-01   1.2039941e-01   1.0985773e-01   1.1861026e-01   1.0744824e-01   1.1483872e-01   1.1213527e-01   6.6878313e-02   6.9715409e-02   7.7364528e-02   8.8171063e-02   8.1336125e-02   8.7984980e-02   7.6788970e-02   6.2680317e-02   7.4638540e-02   7.9773718e-02   8.2744330e-02   7.2225777e-02   8.0191157e-02   8.6590874e-02   5.5913229e-02   6.3969698e-02   8.8177048e-02   7.0519412e-02   1.0143390e-01   7.3287383e-02   9.3242485e-02   6.4744474e-02   1.0311960e-01   8.6619094e-02   6.7881619e-02   6.7447267e-02   8.2187438e-02   8.8483348e-02   8.3036483e-02   5.4275541e-02   7.4193034e-02   6.9119612e-02   6.6341718e-02   1.1134910e-01   9.2648103e-02   7.3522039e-02   7.4021876e-02   9.0141442e-02   7.0408618e-02   8.1718494e-02   9.2345175e-02   8.0646828e-02   7.2501162e-02   6.4223536e-02   8.1515678e-02   7.0682605e-02   7.4552513e-02   7.0364536e-02   4.8478235e-02   7.3560525e-02   1.3517901e-01   1.2131760e-01   1.1535330e-01   1.1982809e-01   1.2602646e-01   1.2698846e-01   1.2430464e-01   1.2251062e-01   1.3021845e-01   1.1127258e-01   9.4973617e-02   1.1499323e-01   1.0869119e-01   1.2854331e-01   1.2950169e-01   1.0899747e-01   1.0948608e-01   1.1017110e-01   1.4797664e-01   1.2271397e-01   1.1121974e-01   1.1817834e-01   1.3245873e-01   1.0189203e-01   1.0916690e-01   1.0716280e-01   9.7527876e-02   9.7386912e-02   1.2555051e-01   1.0384747e-01   1.1883003e-01   9.6216108e-02   1.2741273e-01   1.0091958e-01   1.2779606e-01   1.1407020e-01   1.1794445e-01   1.0890370e-01   9.6003260e-02   1.0130628e-01   1.1658801e-01   9.6417670e-02   1.2131760e-01   1.1923852e-01   1.1654354e-01   1.0526503e-01   1.1322934e-01   1.0313209e-01   1.1184758e-01   1.0860395e-01   7.4548764e-04   4.1909104e-04   1.5729317e-03   7.9277531e-04   2.5985333e-03   2.1157871e-03   2.6363318e-04   2.7214324e-04   2.5006701e-03   1.1999234e-03   1.3641574e-03   2.4228657e-03   1.9193080e-03   1.5580763e-03   9.1547441e-05   4.2498838e-03   5.1587623e-04   4.4451535e-03   1.9710136e-04   5.9468529e-03   1.1637586e-04   4.0879090e-03   1.9577278e-03   7.5438506e-06   5.1321994e-05   9.5746588e-04   1.8362275e-03   1.6353958e-03   8.5567943e-04   2.3534169e-04   2.0155706e-04   2.8744085e-05   6.6464816e-03   6.0500135e-03   3.7609984e-03   3.0183871e-04   2.6908058e-03   1.8208811e-03   9.2994693e-04   3.0287331e-03   1.4489077e-03   1.1766146e-04   3.4058119e-05   1.3255619e-03   1.5104843e-03   1.2375803e-03   1.2042792e-04   2.2770079e-03   7.0925866e-04   1.7863102e-02   1.0250835e-02   7.3485693e-03   9.3316539e-03   1.1683662e-02   1.0169272e-02   1.2909901e-02   9.0217168e-03   1.1005819e-02   9.1383359e-03   4.6331293e-03   7.2658380e-03   6.2625155e-03   1.2163934e-02   1.5604510e-02   9.0849374e-03   6.4499430e-03   7.6430735e-03   1.6734199e-02   8.8633570e-03   7.8095179e-03   1.1083442e-02   1.1720144e-02   4.3192019e-03   7.6217573e-03   5.4833250e-03   3.8602370e-03   4.7396014e-03   1.0813609e-02   4.4772013e-03   7.7667524e-03   3.8237256e-03   1.1654961e-02   4.0471444e-03   1.1640132e-02   6.9842505e-03   1.3199398e-02   6.9808241e-03   4.8195688e-03   4.8204301e-03   9.7963275e-03   5.3012133e-03   1.0250835e-02   1.0025463e-02   1.1016974e-02   6.8718627e-03   6.8391566e-03   5.4173007e-03   1.1780352e-02   7.9156890e-03   7.3587704e-04   1.9839586e-03   1.2219042e-03   1.5405721e-03   4.1726993e-04   3.7297590e-04   1.1835574e-03   6.1737386e-04   2.7177989e-03   1.1395438e-04   5.3634867e-03   1.2095802e-03   9.5552631e-04   7.0211590e-04   1.8297765e-03   1.3098829e-03   5.8522900e-03   1.0011765e-03   2.8317516e-03   7.2859483e-04   4.4710316e-03   2.0244176e-03   7.6733040e-04   6.7685529e-04   2.1301657e-03   1.3518842e-03   6.1205846e-04   2.4644310e-03   1.1938153e-03   1.5975309e-03   5.1076446e-04   5.2791382e-03   3.0339660e-03   1.1907842e-03   3.5011674e-04   4.7257393e-03   3.5465173e-04   7.3647426e-04   2.3478030e-03   5.8214999e-04   7.6861523e-04   8.1449121e-04   5.9893306e-04   4.9750178e-04   2.3193980e-04   3.6882491e-04   2.8017975e-03   9.9802686e-05   1.3102890e-02   7.5969761e-03   6.0828162e-03   7.3064415e-03   8.8657957e-03   9.3996859e-03   9.1529212e-03   8.6958624e-03   1.0707084e-02   5.8653516e-03   2.3848099e-03   6.0110419e-03   4.6435179e-03   9.5841132e-03   1.1495876e-02   5.7351815e-03   4.7762002e-03   5.2279685e-03   1.6343054e-02   9.1684398e-03   5.3835136e-03   7.5338144e-03   1.1677909e-02   3.4411805e-03   4.9237859e-03   4.5530879e-03   2.5678146e-03   2.5808970e-03   8.5897045e-03   4.5950787e-03   7.8389611e-03   2.4182455e-03   9.2053995e-03   3.4479875e-03   1.0780684e-02   6.4366206e-03   8.7029459e-03   4.8244677e-03   2.4820766e-03   3.2994674e-03   6.9725103e-03   3.6004536e-03   7.5969761e-03   7.2055876e-03   7.4680636e-03   4.8244418e-03   6.1219436e-03   3.5524443e-03   7.3785563e-03   5.0327851e-03   4.5958248e-04   1.4323573e-04   1.2376608e-03   1.4854110e-03   8.9326822e-04   1.4516004e-04   1.6538068e-03   6.5338340e-04   1.1299287e-03   2.4074626e-03   6.9551596e-04   2.6949476e-03   7.2730987e-04   2.7608296e-03   6.5430290e-04   2.7448595e-03   1.4433165e-04   4.0981488e-03   7.0217976e-04   2.0017194e-03   8.4737221e-04   3.5875154e-04   4.2590326e-04   3.7194881e-04   5.5222558e-04   6.3134375e-04   2.4465093e-03   1.5551077e-04   5.7141821e-04   4.4617793e-04   3.7751002e-03   4.2307180e-03   3.3297057e-03   8.4238856e-05   1.8060244e-03   1.6735237e-03   1.3470834e-04   1.4289074e-03   6.1420964e-04   1.0488316e-04   6.6710002e-04   4.5037482e-04   1.3817339e-03   8.2351922e-04   1.8947713e-04   4.2527867e-03   4.2014447e-04   1.3415184e-02   6.6868372e-03   4.2910287e-03   5.8862692e-03   7.8406972e-03   6.4938295e-03   9.0912854e-03   5.6561218e-03   7.2322567e-03   6.1641630e-03   2.7540356e-03   4.2344365e-03   3.5847503e-03   8.2354910e-03   1.1610160e-02   6.2859853e-03   3.6577005e-03   4.7344286e-03   1.2033367e-02   5.6148647e-03   4.9101139e-03   7.6703569e-03   7.8664866e-03   2.1500523e-03   4.7726337e-03   2.9188064e-03   1.9107921e-03   2.6804057e-03   7.0603390e-03   2.2776004e-03   4.6992435e-03   1.8838392e-03   7.7943774e-03   1.9124732e-03   7.9102721e-03   4.1636243e-03   9.7823337e-03   4.1721395e-03   2.8414673e-03   2.6224860e-03   6.5787438e-03   3.5255969e-03   6.6868372e-03   6.5865547e-03   7.7526002e-03   4.4665713e-03   4.0404299e-03   3.0508638e-03   8.7371106e-03   5.0758684e-03   1.4741381e-04   1.6125952e-03   2.5451963e-03   2.5693849e-03   7.6673984e-04   2.4419486e-03   4.9090126e-04   2.2351893e-03   2.2914219e-03   9.4806103e-04   4.8625587e-03   2.0262116e-03   3.3701508e-03   1.9166326e-03   1.0683081e-03   9.0452408e-04   4.3648468e-03   1.8983778e-03   7.0324108e-04   1.1499379e-03   1.4013722e-03   1.4463883e-03   3.0992383e-04   2.7889447e-04   9.9370768e-04   4.4215550e-03   7.4609763e-04   1.5322654e-03   1.6670592e-03   2.5458750e-03   4.8164855e-03   4.9491294e-03   7.9622502e-04   9.3705610e-04   3.2918730e-03   3.3244867e-04   1.4917733e-03   1.2997427e-03   8.4964648e-04   1.9833350e-03   9.7663737e-04   3.0207145e-03   1.9767871e-03   1.2186915e-03   6.6584952e-03   1.4294810e-03   1.1447125e-02   4.9138585e-03   2.4483433e-03   4.3479747e-03   5.8020082e-03   4.0665925e-03   7.5951000e-03   3.6015682e-03   4.5184530e-03   5.0362911e-03   2.3873447e-03   2.3916463e-03   2.1166084e-03   5.8005881e-03   9.3326086e-03   5.1921662e-03   2.5583136e-03   3.9040792e-03   8.1942534e-03   3.2098756e-03   3.4610228e-03   6.2351763e-03   5.0378736e-03   9.3974884e-04   3.8135545e-03   1.8905878e-03   1.0935023e-03   2.2739982e-03   4.8352649e-03   1.2660181e-03   2.5068904e-03   1.6511455e-03   5.4229770e-03   1.1989559e-03   6.1054655e-03   2.0081796e-03   8.6788304e-03   3.3159020e-03   2.5546078e-03   1.5534900e-03   4.7975700e-03   2.5264412e-03   4.9138585e-03   4.8875866e-03   6.2264786e-03   3.0375207e-03   1.9750388e-03   2.0217929e-03   8.0063599e-03   4.3571190e-03   1.6611913e-03   2.0167984e-03   1.5438876e-03   3.6494229e-04   2.0217253e-03   4.6462511e-04   1.5312613e-03   2.1707116e-03   9.6955753e-04   3.3920004e-03   1.0848877e-03   3.2679078e-03   1.3217642e-03   1.7681092e-03   4.3634818e-04   4.3762590e-03   9.8982281e-04   1.4702047e-03   1.2309511e-03   6.6753502e-04   6.7519636e-04   2.4877587e-04   3.8105160e-04   8.2183923e-04   3.0399429e-03   3.2267433e-04   8.9534104e-04   8.4272052e-04   3.5139014e-03   4.8234985e-03   4.1432495e-03   3.0463297e-04   1.2193814e-03   2.4923401e-03   1.7011388e-04   1.7584273e-03   1.0444437e-03   3.3026408e-04   1.0723664e-03   7.7198092e-04   2.2859213e-03   1.4276818e-03   5.7687029e-04   4.8466907e-03   8.6936394e-04   1.2850654e-02   6.1088154e-03   3.5569698e-03   5.5719646e-03   7.1488198e-03   5.6571014e-03   8.7634622e-03   5.0674178e-03   6.2538450e-03   5.7468537e-03   2.5694606e-03   3.4855366e-03   2.9211508e-03   7.2454781e-03   1.0560745e-02   5.7784909e-03   3.4230741e-03   4.7152707e-03   1.0513823e-02   4.7114505e-03   4.2697686e-03   7.1803873e-03   6.8688743e-03   1.5447042e-03   4.5401495e-03   2.7361382e-03   1.4855517e-03   2.5857007e-03   6.2270622e-03   2.0576267e-03   3.8519560e-03   2.0130817e-03   6.8461641e-03   1.8250891e-03   7.7109246e-03   3.0910476e-03   9.4645289e-03   4.1075164e-03   2.7837137e-03   2.0667492e-03   5.7442304e-03   2.7416306e-03   6.1088154e-03   5.9744542e-03   7.0784056e-03   3.5969994e-03   3.0144395e-03   2.5834027e-03   8.5675229e-03   5.0446428e-03   9.3410747e-04   2.6108336e-03   1.8631164e-03   1.0514258e-03   2.7757095e-03   1.6027319e-03   5.9009151e-03   9.3507643e-05   4.8585250e-03   3.2279691e-03   7.0814372e-04   1.8064210e-03   4.7021523e-03   1.7906187e-03   1.7438865e-03   3.2726931e-03   2.1092470e-03   4.4199215e-04   2.5742583e-03   2.7576288e-03   2.1865740e-03   8.5954487e-04   4.3875130e-04   6.2385517e-03   2.0706463e-03   2.9895922e-03   2.4900150e-03   1.5452115e-03   1.2972442e-03   2.2089282e-03   1.3825902e-03   4.4047937e-03   1.3378707e-03   8.5528525e-04   1.4287812e-04   2.3200710e-04   1.9018574e-03   3.1193134e-03   2.7835858e-04   1.0440284e-03   6.7774868e-04   1.6434489e-03   8.2787646e-03   9.1805293e-04   8.2830461e-03   3.6334874e-03   2.7936555e-03   2.6701226e-03   4.5417785e-03   4.4199379e-03   4.7638022e-03   3.6853341e-03   5.4544340e-03   3.4799734e-03   1.6014887e-03   2.8117239e-03   2.4093914e-03   5.5035080e-03   8.3987820e-03   4.0331795e-03   1.3946498e-03   1.6013776e-03   9.9231424e-03   4.6841468e-03   3.0824955e-03   4.3724199e-03   6.0877536e-03   1.8755546e-03   2.0838499e-03   1.1387331e-03   1.4636530e-03   1.0937142e-03   4.3877001e-03   1.4753015e-03   3.8552640e-03   2.5868499e-04   5.1223304e-03   7.0168484e-04   4.3920187e-03   3.9679568e-03   5.8082286e-03   1.3720301e-03   1.2437314e-03   1.9911684e-03   4.5361159e-03   3.9571028e-03   3.6334874e-03   3.7423473e-03   5.0666856e-03   3.9975266e-03   3.7265040e-03   1.8650355e-03   5.0595778e-03   1.9211780e-03   1.4802848e-03   2.3288314e-03   6.8065434e-05   4.0423471e-03   2.2193959e-04   7.6466779e-03   9.2054349e-04   2.1021154e-03   2.1770952e-03   5.4412625e-04   2.3412508e-03   6.7735470e-03   2.1146424e-03   1.1473823e-03   2.2087962e-03   4.4699141e-03   2.0616206e-03   2.1327263e-03   2.0380999e-03   3.2539726e-03   1.3598045e-03   3.9670027e-04   4.8634117e-03   2.4197901e-03   3.2204932e-03   1.7622064e-03   3.8856461e-03   1.2334789e-03   4.0452577e-04   1.0887743e-03   6.2435560e-03   2.0292084e-04   1.0829901e-03   1.7786841e-03   4.1228310e-04   1.8859956e-03   2.3286589e-03   5.1772577e-04   4.9172870e-04   2.0007065e-04   1.2918676e-03   5.0330599e-03   3.9898095e-04   9.4287570e-03   5.3866372e-03   4.8647393e-03   5.2819054e-03   6.4598280e-03   8.0073865e-03   6.1852254e-03   7.5566712e-03   9.5128884e-03   3.7267827e-03   1.2843714e-03   4.8237356e-03   3.5085400e-03   7.3800011e-03   8.6672418e-03   3.7373665e-03   3.3006522e-03   3.2553459e-03   1.4768796e-02   8.5372346e-03   3.7597131e-03   5.0208833e-03   1.0540608e-02   2.9216135e-03   2.9987594e-03   3.5026889e-03   1.9346800e-03   1.3415748e-03   6.5619943e-03   4.2356898e-03   7.2096387e-03   1.4370251e-03   7.0771420e-03   2.7742326e-03   8.9575949e-03   5.8574207e-03   5.7119619e-03   3.0499081e-03   1.2067939e-03   2.4757916e-03   5.0772246e-03   3.1235792e-03   5.3866372e-03   5.0347068e-03   5.1609892e-03   3.8212989e-03   5.4644467e-03   2.4256658e-03   4.6018293e-03   2.9278326e-03   8.9754714e-04   1.9308173e-03   2.4780225e-03   8.6351250e-04   4.2337045e-03   2.1236680e-03   7.2381764e-04   2.1294516e-04   3.5897400e-03   7.7335429e-04   6.4715989e-03   7.0701488e-04   5.1814848e-03   2.9010443e-04   5.5409554e-03   2.4566735e-03   3.3429771e-04   3.2970103e-04   2.0470962e-03   2.3835444e-03   1.6383998e-03   1.0044594e-03   8.9092263e-04   8.3257442e-04   1.5048347e-04   7.3947599e-03   5.1765389e-03   2.4819362e-03   5.2929628e-04   4.5409213e-03   9.8467896e-04   1.3414871e-03   3.3640577e-03   1.3544701e-03   5.6680732e-04   2.1931945e-04   1.3672243e-03   8.5440953e-04   8.3314842e-04   2.6161270e-04   1.7366856e-03   5.2799539e-04   1.7649847e-02   1.0764587e-02   8.4383096e-03   9.9924928e-03   1.2273825e-02   1.1788899e-02   1.2850999e-02   1.0630699e-02   1.2964118e-02   9.0438994e-03   4.5288546e-03   8.3580766e-03   6.9988358e-03   1.3044370e-02   1.5771615e-02   8.9276151e-03   7.0025139e-03   7.7681345e-03   1.9248623e-02   1.0890288e-02   8.2148016e-03   1.1050411e-02   1.3845989e-02   5.2067991e-03   7.6819868e-03   6.2976924e-03   4.3733262e-03   4.7066273e-03   1.1733076e-02   5.6953630e-03   9.5825887e-03   4.0184455e-03   1.2555508e-02   4.8201445e-03   1.2938333e-02   8.5012649e-03   1.2613331e-02   7.2187981e-03   4.6491251e-03   5.3739269e-03   1.0213138e-02   5.7106829e-03   1.0764587e-02   1.0424589e-02   1.1013685e-02   7.3861586e-03   8.2413252e-03   5.8213434e-03   1.1031181e-02   7.7860554e-03   2.6482878e-03   4.2794287e-04   1.8319104e-03   1.5871773e-03   1.2340260e-03   2.9761573e-03   6.4181441e-04   3.9337040e-03   3.6570436e-04   2.9155752e-03   1.3382816e-05   5.6692471e-03   6.4631376e-04   2.4262663e-03   1.0063227e-03   2.3414789e-04   3.8352406e-04   2.6670153e-04   1.1841642e-03   1.3362953e-03   1.8214422e-03   1.1939941e-05   1.6841308e-04   4.0205285e-04   4.8423454e-03   5.6256276e-03   4.4224155e-03   3.0861840e-04   1.5326607e-03   2.2893524e-03   5.4845532e-04   1.9363566e-03   1.1483728e-03   6.7060376e-05   4.8890125e-04   9.7136579e-04   1.7676633e-03   1.3375465e-03   2.5372175e-04   4.0846507e-03   8.2917869e-04   1.6077651e-02   8.4460632e-03   5.5177458e-03   7.2638972e-03   9.7215743e-03   7.5597148e-03   1.1218034e-02   6.4234899e-03   8.1582316e-03   8.1119193e-03   4.1520915e-03   5.4624397e-03   4.9276011e-03   1.0142169e-02   1.4194600e-02   8.3023965e-03   4.8434975e-03   6.1466052e-03   1.3239048e-02   6.2447391e-03   6.5902144e-03   9.7724034e-03   8.6919298e-03   3.1454944e-03   6.3952659e-03   3.7622934e-03   3.0039560e-03   4.0045050e-03   8.7466217e-03   2.6847018e-03   5.4026497e-03   2.7900002e-03   9.6269482e-03   2.5687827e-03   8.7444600e-03   5.2241473e-03   1.2215015e-02   5.4919656e-03   4.2251052e-03   3.8716574e-03   8.5147448e-03   4.9424074e-03   8.4460632e-03   8.4294916e-03   9.9304629e-03   6.0757927e-03   5.1533590e-03   4.4115457e-03   1.1061881e-02   6.6918878e-03   4.2077380e-03   2.8084601e-04   8.0255307e-03   1.0009734e-03   2.4531898e-03   2.5259126e-03   4.7784400e-04   2.9272193e-03   6.4204523e-03   2.4613181e-03   8.1446100e-04   2.5131313e-03   4.2288269e-03   2.3479589e-03   2.4787654e-03   2.3219118e-03   3.4016339e-03   1.1923522e-03   3.6609993e-04   5.5309499e-03   2.7130308e-03   3.6876750e-03   2.1157012e-03   3.5197143e-03   1.1211667e-03   5.0040735e-04   1.2581494e-03   6.2453424e-03   4.9316252e-04   1.0956505e-03   1.9034882e-03   5.6262931e-04   2.1623642e-03   2.7381233e-03   6.2275542e-04   9.1647472e-04   4.3469066e-04   1.6016964e-03   5.5073116e-03   6.0170029e-04   8.2861929e-03   4.5965840e-03   4.2124912e-03   4.7715416e-03   5.5699315e-03   7.3813594e-03   5.3659110e-03   7.1660104e-03   8.8786575e-03   2.9267242e-03   8.0445420e-04   4.1635703e-03   2.8364579e-03   6.3397911e-03   7.3211567e-03   2.8639086e-03   2.8974680e-03   2.8626073e-03   1.3776826e-02   8.0525608e-03   2.9632152e-03   4.1293440e-03   9.9396499e-03   2.4124505e-03   2.4179141e-03   3.2613675e-03   1.4673422e-03   9.3714862e-04   5.6839580e-03   4.1506352e-03   6.7187933e-03   1.3083992e-03   6.0861850e-03   2.6486423e-03   8.7285257e-03   5.1099460e-03   4.7657924e-03   2.6722144e-03   7.9417630e-04   1.8862622e-03   4.0883053e-03   2.3510235e-03   4.5965840e-03   4.1923613e-03   4.1490574e-03   2.9383782e-03   4.7171745e-03   1.8344857e-03   3.7646957e-03   2.4446440e-03   3.4307781e-03   6.9180223e-04   1.9272484e-03   5.2229584e-03   1.7410764e-03   5.5262484e-03   1.3512624e-03   1.2930220e-03   5.9265821e-04   7.2336834e-03   1.6622228e-03   1.4643809e-03   1.4579196e-03   1.0665934e-03   1.2486050e-03   4.3399433e-05   1.3778574e-03   2.2390179e-03   3.1206766e-03   3.9982611e-04   7.0342073e-04   1.4793565e-03   4.5495005e-03   7.4063181e-03   6.9037801e-03   1.1215419e-03   3.4551859e-04   4.3750592e-03   1.0242073e-03   2.4559606e-03   2.2544417e-03   6.9180223e-04   1.5393939e-03   1.8990091e-03   3.7039856e-03   2.8782332e-03   1.2534261e-03   6.1345149e-03   2.1154370e-03   1.6389445e-02   8.1714731e-03   4.6971245e-03   6.9480013e-03   9.2874957e-03   6.0735661e-03   1.1557043e-02   5.0795005e-03   6.2695782e-03   8.5942939e-03   4.8981354e-03   4.6386510e-03   4.5456138e-03   9.3051239e-03   1.4039626e-02   8.8459552e-03   4.7683489e-03   6.5513763e-03   1.0462868e-02   4.3969158e-03   6.5113261e-03   1.0098096e-02   6.6155737e-03   2.7131547e-03   6.8044529e-03   3.4785563e-03   3.0414918e-03   4.6799978e-03   7.9508909e-03   2.0423594e-03   3.8146379e-03   3.3303959e-03   8.7946774e-03   2.4271150e-03   7.7798359e-03   3.9016462e-03   1.3149131e-02   5.7850137e-03   5.0633237e-03   3.7856360e-03   8.3083336e-03   4.9921128e-03   8.1714731e-03   8.2786520e-03   1.0189399e-02   5.8912773e-03   3.9649361e-03   4.4771942e-03   1.2268673e-02   7.3587328e-03   6.5787312e-03   1.3233357e-03   1.1455828e-03   1.2416155e-03   1.4002549e-03   2.1211045e-03   6.2047866e-03   1.6380421e-03   2.0343353e-03   1.2366343e-03   4.6737502e-03   2.4960613e-03   1.3635919e-03   1.1869805e-03   2.7420392e-03   1.3311387e-03   5.4030547e-04   3.4496369e-03   1.8301986e-03   2.4543913e-03   1.0360218e-03   4.9823495e-03   2.4358984e-03   7.7653049e-04   6.6085729e-04   5.4666528e-03   3.6251211e-04   9.0034369e-04   2.5308489e-03   6.8325669e-04   1.3068439e-03   1.4466712e-03   7.0328570e-04   7.2909544e-04   3.3427965e-04   8.2319726e-04   3.3236658e-03   2.7101948e-04   1.1415097e-02   6.6763180e-03   5.5975222e-03   6.7637660e-03   7.8425097e-03   9.1076654e-03   7.9396677e-03   8.6849227e-03   1.0537825e-02   4.7340864e-03   1.6981356e-03   5.5204544e-03   4.0238168e-03   8.5090240e-03   9.7837063e-03   4.5130967e-03   4.3644743e-03   4.6397935e-03   1.5929686e-02   9.2384755e-03   4.4702915e-03   6.2885333e-03   1.1607431e-02   3.1082253e-03   4.1080254e-03   4.4476701e-03   2.1448891e-03   1.9903866e-03   7.7241343e-03   4.8610346e-03   7.8308568e-03   2.2349743e-03   8.1986845e-03   3.4906235e-03   1.0785042e-02   6.0455598e-03   7.1678534e-03   4.3085958e-03   1.8327850e-03   2.7372439e-03   5.8248161e-03   2.8157351e-03   6.6763180e-03   6.1912806e-03   6.1165743e-03   3.9060199e-03   5.6854050e-03   2.9112999e-03   5.9286549e-03   4.2459890e-03   4.7261943e-03   7.5564450e-03   3.0589295e-03   9.9138151e-03   2.5436162e-03   2.3081715e-03   1.8344588e-03   1.2327595e-02   2.9913447e-03   3.4407458e-03   3.5166367e-03   2.2912956e-03   2.5985157e-03   1.0536407e-03   4.0148201e-03   5.3033668e-03   3.3636231e-03   1.5135209e-03   1.3379167e-03   2.9432336e-03   8.2210569e-03   1.2378534e-02   1.1256214e-02   3.0883819e-03   6.3877121e-04   7.6741777e-03   3.3091271e-03   5.2745018e-03   5.1312091e-03   2.0746888e-03   2.6973394e-03   4.6616800e-03   6.5765736e-03   5.8096774e-03   2.9820979e-03   7.3187243e-03   4.6783172e-03   2.3410581e-02   1.3144529e-02   8.3391705e-03   1.1305315e-02   1.4457465e-02   9.3485129e-03   1.7555848e-02   7.8838497e-03   9.1347689e-03   1.4043414e-02   9.2390419e-03   8.2683227e-03   8.4558564e-03   1.4291672e-02   2.0441558e-02   1.4348706e-02   8.7091477e-03   1.1171095e-02   1.3577182e-02   6.6301175e-03   1.1191012e-02   1.5865943e-02   9.2559664e-03   5.8683930e-03   1.1694065e-02   6.7001372e-03   6.5334334e-03   8.9560518e-03   1.2574664e-02   4.2523471e-03   6.2343724e-03   6.8791672e-03   1.3649249e-02   5.2782119e-03   1.1089361e-02   6.8777652e-03   1.9771053e-02   1.0157447e-02   9.4893989e-03   7.5319640e-03   1.3411350e-02   8.8625592e-03   1.3144529e-02   1.3390278e-02   1.5951074e-02   1.0173619e-02   7.0905829e-03   8.5554531e-03   1.8741319e-02   1.2400067e-02   4.2985441e-03   2.4763265e-03   1.0193789e-03   1.4380091e-03   3.6495663e-03   1.2003820e-03   2.0584975e-03   2.4845352e-03   1.6034070e-03   3.3059261e-04   1.8671770e-03   2.0088877e-03   1.4284810e-03   4.3098388e-04   2.3514979e-04   5.2698749e-03   1.3824059e-03   2.2360501e-03   1.8501267e-03   1.6468815e-03   1.8319470e-03   2.4154150e-03   8.5423988e-04   3.3259702e-03   1.3633177e-03   3.9750527e-04   2.1357310e-04   1.4089375e-04   1.2605369e-03   2.3906980e-03   1.0696291e-04   1.0906412e-03   5.9076498e-04   1.1304618e-03   7.2732511e-03   6.4012947e-04   8.9598694e-03   3.8314732e-03   2.5915433e-03   2.9763427e-03   4.7600376e-03   4.3053173e-03   5.3266981e-03   3.6190516e-03   5.2272723e-03   3.6549051e-03   1.5047575e-03   2.5888356e-03   2.1634980e-03   5.5055672e-03   8.5050147e-03   4.0885074e-03   1.5151975e-03   1.9844585e-03   9.6042351e-03   4.2899092e-03   2.9941597e-03   4.6609860e-03   5.8520656e-03   1.4443089e-03   2.3092039e-03   1.1621771e-03   1.1296414e-03   1.1169479e-03   4.4204603e-03   1.2641450e-03   3.4620250e-03   3.4971048e-04   5.1226200e-03   6.2752047e-04   4.7853928e-03   3.3731886e-03   6.3070664e-03   1.6662334e-03   1.2828407e-03   1.6597311e-03   4.4397739e-03   3.3292201e-03   3.8314732e-03   3.8867782e-03   5.1669823e-03   3.5417199e-03   3.1734550e-03   1.6842820e-03   5.5385508e-03   2.3108610e-03   1.0105990e-03   4.7523926e-03   2.9847642e-03   9.7105522e-03   2.6675945e-03   5.8410645e-03   1.0628436e-03   8.9773521e-03   5.4199214e-03   1.6519449e-03   1.3847497e-03   4.5932482e-03   4.2245140e-03   3.0278103e-03   1.7095869e-03   2.8675709e-03   2.8000554e-03   1.2095222e-03   1.0633409e-02   6.3588443e-03   2.3146826e-03   1.8579783e-03   7.7501487e-03   1.5279559e-03   3.0361588e-03   6.2292475e-03   2.9996444e-03   2.2092276e-03   1.2670329e-03   3.0585508e-03   1.9631739e-03   1.9448597e-03   1.6026176e-03   7.3057463e-04   1.6268750e-03   1.9360609e-02   1.3271536e-02   1.1367917e-02   1.3356276e-02   1.4871782e-02   1.6028245e-02   1.4980963e-02   1.5218503e-02   1.7630002e-02   1.0242398e-02   5.4451367e-03   1.1236806e-02   9.0841292e-03   1.5577818e-02   1.6739427e-02   9.6435556e-03   9.8426714e-03   1.0341095e-02   2.4442578e-02   1.5482723e-02   9.8424496e-03   1.2530745e-02   1.8870958e-02   7.3605563e-03   9.5556413e-03   9.6316193e-03   6.0281749e-03   6.1257342e-03   1.4596779e-02   9.4938425e-03   1.3739836e-02   6.3629397e-03   1.5181016e-02   7.9755348e-03   1.8111593e-02   1.1305790e-02   1.3376686e-02   9.8378125e-03   5.8135644e-03   6.9819428e-03   1.1631750e-02   6.1445613e-03   1.3271536e-02   1.2517306e-02   1.1992997e-02   8.3045136e-03   1.0906252e-02   7.4644491e-03   1.1536762e-02   9.7524154e-03   4.5852486e-03   9.8595493e-04   5.1231602e-03   5.3641159e-04   6.1291344e-03   9.0289414e-06   4.9719488e-03   2.7623691e-03   1.0135570e-04   4.9630072e-05   1.4573369e-03   2.2050307e-03   1.9067713e-03   6.5935996e-04   5.5655703e-04   4.9349156e-04   5.3910141e-05   7.6334144e-03   6.4541501e-03   3.6168243e-03   4.5600228e-04   3.3337204e-03   1.8551457e-03   1.2329546e-03   3.8576017e-03   1.8263928e-03   3.4082293e-04   2.8939589e-05   1.6992773e-03   1.7320592e-03   1.4340517e-03   2.7567571e-04   1.5103910e-03   8.5202938e-04   1.8535044e-02   1.1041503e-02   8.1659733e-03   1.0422568e-02   1.2515324e-02   1.1445689e-02   1.3674148e-02   1.0415137e-02   1.2394915e-02   9.4870366e-03   4.7984401e-03   8.0611256e-03   6.7876377e-03   1.2924053e-02   1.5886474e-02   9.2445598e-03   7.3109344e-03   8.4847047e-03   1.8293716e-02   1.0175519e-02   8.2306422e-03   1.1561315e-02   1.3225081e-02   4.8091709e-03   8.1976852e-03   6.4746796e-03   4.2076714e-03   5.1007444e-03   1.1683626e-02   5.5527590e-03   8.9342726e-03   4.4973686e-03   1.2442734e-02   4.9468838e-03   1.3343592e-02   7.6599210e-03   1.3497447e-02   7.8132331e-03   5.1004820e-03   5.1592495e-03   1.0155636e-02   5.1788357e-03   1.1041503e-02   1.0666367e-02   1.1283660e-02   6.9627068e-03   7.4793969e-03   5.8082013e-03   1.1981463e-02   8.5614846e-03   3.8688652e-03   7.5179133e-03   3.7407669e-03   3.1676786e-04   4.6173801e-03   4.2799166e-03   2.2501511e-03   4.2413983e-03   4.2204030e-03   4.6109813e-03   1.7937707e-03   8.2328963e-04   8.3021324e-03   4.1326900e-03   5.3767829e-03   3.8579826e-03   2.3734756e-03   1.6523409e-04   8.8835369e-04   2.5288831e-03   7.6911436e-03   1.1035999e-03   1.9572002e-03   1.3626197e-03   8.4501724e-04   3.6117590e-03   4.7013989e-03   1.0056440e-03   1.3674472e-03   9.9055737e-04   2.9505217e-03   8.8795046e-03   1.5067108e-03   6.0099517e-03   3.2148697e-03   3.5475051e-03   3.0620571e-03   4.0315627e-03   6.0415844e-03   3.4044063e-03   5.7826017e-03   7.5784861e-03   2.1065729e-03   8.8233953e-04   3.5528527e-03   2.5792431e-03   5.1245213e-03   6.2842876e-03   2.4095844e-03   1.8446522e-03   1.4167723e-03   1.2151930e-02   7.2262167e-03   2.4839286e-03   2.9019075e-03   8.5383029e-03   2.6181282e-03   1.4064800e-03   2.3288946e-03   1.7187301e-03   6.4203056e-04   4.4078979e-03   3.6116427e-03   6.0606537e-03   7.3312980e-04   4.8772717e-03   2.0701887e-03   6.3495233e-03   5.1782781e-03   3.3738477e-03   1.3922890e-03   5.6378115e-04   2.0528392e-03   3.5526857e-03   3.4142665e-03   3.2148697e-03   3.0384066e-03   3.3899600e-03   3.3852248e-03   4.7610217e-03   1.6939002e-03   2.6027995e-03   1.1563987e-03   4.9937739e-03   2.8043563e-04   6.0192505e-03   1.1047123e-03   3.7340869e-03   9.3891763e-04   5.7693223e-04   8.5742923e-04   1.1063653e-03   2.1518410e-03   1.7873418e-03   1.8443813e-03   4.7874237e-04   4.0562362e-04   6.3806313e-04   5.6896950e-03   5.3990383e-03   4.0838955e-03   7.6270784e-04   3.0255235e-03   1.8702287e-03   1.2256654e-03   1.9416278e-03   1.1863062e-03   4.8488269e-04   7.2384993e-04   1.1782779e-03   1.1397261e-03   1.1824844e-03   4.5012569e-04   4.3416102e-03   9.3532010e-04   1.7514227e-02   9.8600607e-03   7.1630436e-03   8.1480968e-03   1.1292909e-02   9.1710206e-03   1.2255610e-02   7.6051240e-03   9.9403988e-03   9.4620221e-03   5.2227781e-03   7.1398046e-03   6.5861173e-03   1.2201906e-02   1.6541006e-02   9.8696183e-03   5.7022093e-03   6.6806000e-03   1.5779735e-02   7.9447125e-03   8.2548534e-03   1.1179561e-02   1.0455124e-02   4.7301378e-03   7.3678164e-03   4.4876356e-03   4.3705173e-03   4.8557182e-03   1.0520487e-02   3.4331839e-03   7.0769130e-03   3.1675776e-03   1.1611243e-02   3.1862426e-03   9.1961094e-03   7.4495828e-03   1.3429738e-02   6.1002834e-03   5.0525215e-03   5.4421147e-03   1.0502545e-02   7.0546448e-03   9.8600607e-03   9.9581080e-03   1.1708746e-02   8.2306703e-03   7.3218404e-03   5.8241089e-03   1.2089443e-02   7.3253099e-03   3.2913459e-03   8.4499731e-03   4.8569537e-03   8.0216351e-04   3.5049401e-03   4.1209949e-03   4.1951632e-03   1.4430192e-03   2.1132422e-03   3.9581533e-03   7.7606295e-03   2.8139659e-03   3.8070595e-03   4.7702994e-03   3.8227367e-03   9.3108167e-03   1.0381275e-02   3.5346820e-03   5.2926972e-04   8.0977990e-03   2.5864687e-03   3.9952884e-03   4.6152713e-03   3.2808967e-03   5.0209478e-03   3.9992007e-03   7.6245140e-03   5.9395645e-03   4.2477403e-03   1.1072543e-02   4.9514063e-03   1.3733574e-02   6.2131974e-03   2.8230097e-03   5.6455623e-03   6.8638978e-03   3.4892544e-03   9.9148605e-03   3.2690483e-03   3.3086076e-03   7.4423917e-03   5.1197423e-03   2.7541244e-03   3.1219477e-03   6.1841097e-03   1.0726676e-02   7.6421585e-03   4.1594455e-03   6.2994749e-03   5.6115691e-03   2.0176931e-03   4.9542156e-03   8.4395555e-03   3.5502322e-03   1.8230265e-03   6.1478670e-03   3.1652703e-03   2.7049802e-03   4.9489115e-03   5.3042297e-03   1.9463142e-03   1.6986432e-03   4.1722516e-03   5.8173968e-03   2.6602408e-03   6.4952123e-03   1.5042394e-03   1.1842312e-02   5.5188608e-03   5.4735640e-03   2.9735027e-03   6.0829501e-03   4.0306925e-03   6.2131974e-03   6.3472325e-03   8.2596987e-03   4.2451973e-03   1.6828229e-03   3.7151898e-03   1.1574575e-02   7.1331719e-03   5.4811265e-03   5.5565221e-04   2.6852359e-03   1.0154324e-03   1.7597013e-04   3.1956114e-04   3.9523878e-04   1.2388706e-03   1.2655903e-03   1.6805675e-03   3.2406061e-05   1.6284558e-04   3.0055489e-04   4.9827743e-03   5.3992645e-03   4.0722394e-03   2.5388663e-04   1.8315484e-03   2.0093416e-03   5.5333384e-04   1.9333559e-03   1.0417210e-03   4.5631651e-05   3.9213732e-04   8.9628584e-04   1.5141190e-03   1.1540120e-03   1.6596111e-04   3.8108192e-03   6.9068728e-04   1.6110144e-02   8.5829426e-03   5.7514563e-03   7.4081873e-03   9.8865471e-03   7.9109476e-03   1.1246146e-02   6.7496818e-03   8.5805974e-03   8.1149529e-03   4.1088457e-03   5.6967153e-03   5.0837880e-03   1.0378372e-02   1.4309813e-02   8.2945922e-03   4.9454249e-03   6.1635386e-03   1.3817928e-02   6.6600542e-03   6.6926476e-03   9.8029996e-03   9.1474450e-03   3.3107453e-03   6.4113962e-03   3.9012077e-03   3.0861181e-03   3.9737498e-03   8.9756426e-03   2.8871345e-03   5.7720900e-03   2.7832463e-03   9.8623531e-03   2.6826772e-03   9.0033513e-03   5.5466456e-03   1.2137789e-02   5.5287165e-03   4.1655510e-03   3.9755943e-03   8.6395601e-03   5.0222445e-03   8.5829426e-03   8.5444862e-03   9.9728516e-03   6.1969528e-03   5.4521508e-03   4.4886104e-03   1.0936532e-02   6.6624227e-03   6.0879321e-03   5.2071133e-03   3.8816096e-03   5.8831678e-03   5.7002681e-03   6.1926865e-03   2.4416494e-03   1.5149583e-03   1.0539677e-02   5.8224280e-03   7.4292172e-03   5.4190952e-03   2.7168224e-03   2.8895677e-04   1.2473682e-03   3.7477469e-03   9.3088565e-03   2.0944009e-03   2.9293124e-03   2.5720420e-03   1.8897161e-03   5.1660356e-03   6.4102501e-03   2.0138936e-03   2.7413840e-03   2.0664916e-03   4.4338085e-03   1.0353410e-02   2.6118482e-03   4.2150650e-03   2.5222817e-03   3.4164904e-03   3.0582580e-03   3.1593359e-03   6.2318287e-03   2.3862805e-03   6.4839490e-03   7.9101598e-03   1.0920878e-03   5.4699759e-04   3.4046237e-03   2.2291279e-03   4.0543858e-03   4.2004727e-03   1.1970351e-03   2.0117915e-03   1.4112028e-03   1.1968526e-02   7.9088029e-03   1.7082776e-03   1.7239090e-03   9.0238817e-03   2.6874577e-03   9.4975956e-04   2.9722514e-03   1.7009640e-03   5.1343341e-04   3.7130353e-03   4.7806054e-03   6.6267337e-03   1.3220831e-03   3.9245747e-03   2.9403206e-03   7.2757164e-03   5.0266602e-03   1.8157078e-03   1.4701768e-03   3.5054511e-04   1.7883207e-03   2.3616946e-03   2.6617941e-03   2.5222817e-03   2.1723972e-03   1.9530095e-03   2.4533627e-03   4.5559375e-03   1.3609168e-03   1.2016297e-03   8.2861882e-04   4.8139327e-03   2.8209366e-03   1.1052349e-04   3.7194090e-05   1.3880093e-03   2.0960687e-03   1.8746430e-03   7.3307377e-04   5.4600497e-04   5.2531656e-04   7.7399437e-05   7.5260309e-03   6.5039124e-03   3.6958100e-03   4.3794778e-04   3.1691974e-03   1.9589162e-03   1.1747018e-03   3.8950000e-03   1.8610625e-03   3.3531102e-04   5.6607375e-05   1.7087501e-03   1.8728452e-03   1.4997100e-03   3.0123700e-04   1.5391826e-03   8.8147902e-04   1.8294246e-02   1.0829571e-02   7.9341800e-03   1.0299926e-02   1.2275737e-02   1.1220530e-02   1.3508784e-02   1.0262914e-02   1.2147087e-02   9.2823036e-03   4.6531594e-03   7.8245831e-03   6.5524640e-03   1.2610156e-02   1.5512672e-02   9.0061844e-03   7.1992659e-03   8.4158356e-03   1.7921554e-02   9.9492079e-03   7.9854978e-03   1.1342596e-02   1.2982212e-02   4.5954373e-03   8.0580657e-03   6.3922041e-03   4.0251341e-03   4.9895677e-03   1.1418706e-02   5.4787432e-03   8.7107499e-03   4.4679304e-03   1.2142235e-02   4.8867636e-03   1.3297934e-02   7.3458933e-03   1.3299484e-02   7.7358341e-03   4.9917221e-03   4.9413674e-03   9.8545070e-03   4.8770063e-03   1.0829571e-02   1.0435652e-02   1.1007902e-02   6.6474718e-03   7.1727397e-03   5.6106947e-03   1.1815640e-02   8.4729744e-03   1.5166983e-03   3.8470717e-03   4.0414722e-03   1.3464004e-03   1.0521554e-03   2.2256122e-03   8.2355212e-03   2.4972654e-03   3.6758320e-03   4.2952040e-03   1.2466978e-03   5.4237685e-03   7.4233145e-03   2.7434894e-03   1.3920410e-03   5.6705580e-03   1.6006987e-03   1.4795623e-03   2.5504964e-03   2.8217649e-03   4.8039535e-03   2.1716992e-03   5.1387834e-03   3.8511647e-03   3.4066344e-03   1.1638730e-02   3.4223746e-03   9.4998939e-03   3.3659956e-03   1.1438717e-03   2.4828929e-03   3.9653407e-03   1.5796312e-03   5.9993003e-03   1.2126234e-03   1.7377370e-03   4.6402796e-03   3.0884931e-03   1.1351744e-03   1.5562166e-03   3.9244608e-03   8.1556610e-03   5.1855221e-03   1.5226783e-03   2.8914073e-03   4.3431210e-03   9.2550137e-04   2.9272754e-03   5.3162687e-03   2.0104865e-03   7.9177243e-04   3.1975522e-03   8.1317449e-04   1.3785091e-03   2.5714969e-03   2.9511423e-03   2.8161529e-04   5.9442405e-04   1.5991249e-03   3.5615321e-03   5.8433150e-04   3.1011353e-03   9.7656296e-04   8.0983113e-03   2.3700003e-03   3.0461348e-03   1.6450583e-03   4.0872867e-03   3.5603497e-03   3.3659956e-03   3.6518311e-03   5.6844228e-03   3.2852236e-03   1.0303385e-03   1.9470044e-03   7.8778855e-03   3.7000111e-03   1.9300708e-03   2.2574717e-03   1.1284358e-03   1.0646979e-03   1.0312076e-03   5.0621817e-03   1.2060164e-03   1.8000639e-03   2.0650564e-03   2.1487796e-03   3.2333878e-03   4.0195479e-03   1.2411974e-03   2.7183002e-03   2.2726383e-03   8.5094120e-04   2.4056963e-04   6.3782113e-04   1.2937611e-03   2.4889893e-03   5.9722053e-04   1.5621785e-03   1.2375553e-03   1.3358412e-03   8.1240216e-03   1.2311022e-03   1.1517636e-02   5.2909227e-03   3.4509195e-03   3.7153220e-03   6.3154320e-03   4.5407961e-03   7.2027738e-03   3.4054800e-03   5.2043334e-03   5.7490488e-03   3.1731637e-03   3.4719309e-03   3.4347027e-03   7.1701181e-03   1.1379612e-02   6.4467389e-03   2.2692649e-03   2.9960956e-03   9.7363741e-03   4.0178908e-03   4.7338729e-03   6.7982603e-03   5.6061060e-03   2.3699712e-03   3.8192855e-03   1.4133848e-03   2.2956056e-03   2.5088406e-03   5.7314562e-03   9.5862581e-04   3.4107092e-03   9.7091810e-04   6.6778472e-03   7.6184177e-04   4.3247639e-03   4.2032521e-03   8.9746527e-03   2.5881501e-03   2.8171688e-03   3.0086239e-03   6.5256052e-03   5.3225794e-03   5.2909227e-03   5.5860698e-03   7.6013097e-03   5.5376614e-03   4.0938335e-03   3.1000848e-03   8.1789436e-03   3.7376243e-03   3.2267663e-05   8.3707549e-04   1.6905622e-03   1.5718936e-03   9.3681209e-04   1.8762095e-04   1.9684682e-04   4.1277465e-05   6.4440482e-03   6.0518000e-03   3.8422414e-03   2.6274935e-04   2.4665214e-03   1.9088192e-03   8.3864343e-04   2.9786928e-03   1.4388393e-03   8.3975474e-05   5.3416679e-05   1.2919166e-03   1.6159093e-03   1.2740163e-03   1.2266042e-04   2.3692761e-03   7.1636545e-04   1.7563748e-02   9.9676420e-03   7.0439691e-03   9.1049407e-03   1.1370346e-02   9.8276989e-03   1.2673450e-02   8.7383729e-03   1.0632094e-02   8.9036592e-03   4.4721835e-03   6.9588127e-03   5.9822651e-03   1.1786793e-02   1.5215597e-02   8.8361414e-03   6.2553172e-03   7.4909328e-03   1.6226352e-02   8.5173085e-03   7.5296182e-03   1.0823312e-02   1.1340207e-02   4.0626163e-03   7.4274805e-03   5.3041231e-03   3.6467217e-03   4.5925766e-03   1.0472609e-02   4.2980846e-03   7.4369871e-03   3.7248522e-03   1.1287932e-02   3.8978058e-03   1.1428778e-02   6.6149115e-03   1.2975959e-02   6.8214468e-03   4.6824073e-03   4.5741646e-03   9.4691004e-03   5.0187206e-03   9.9676420e-03   9.7385546e-03   1.0722589e-02   6.5565379e-03   6.4801586e-03   5.1854796e-03   1.1596572e-02   7.7633670e-03   9.9158601e-04   1.6686143e-03   1.5457871e-03   9.5071878e-04   3.0701864e-04   3.6362811e-04   4.2548991e-05   6.6288818e-03   6.0417331e-03   3.6564704e-03   2.5834714e-04   2.6458983e-03   1.8764428e-03   8.4820574e-04   3.2578450e-03   1.5209238e-03   1.5090465e-04   6.1935165e-05   1.3587649e-03   1.7158299e-03   1.3006818e-03   1.6628415e-04   2.0535582e-03   7.1188329e-04   1.7324639e-02   9.9147481e-03   7.0600417e-03   9.2840701e-03   1.1302120e-02   1.0064078e-02   1.2589616e-02   9.1080261e-03   1.0920912e-02   8.6425556e-03   4.2412792e-03   6.9624194e-03   5.8567924e-03   1.1639501e-02   1.4739829e-02   8.4651679e-03   6.3684651e-03   7.5923014e-03   1.6475338e-02   8.8252851e-03   7.3097093e-03   1.0588424e-02   1.1694482e-02   3.9807447e-03   7.3452100e-03   5.5353092e-03   3.5104587e-03   4.4753907e-03   1.0430724e-02   4.6273816e-03   7.6755144e-03   3.8450588e-03   1.1170257e-02   4.1281664e-03   1.1986478e-02   6.5423939e-03   1.2634016e-02   6.9257811e-03   4.5247923e-03   4.3922796e-03   9.1553155e-03   4.5676871e-03   9.9147481e-03   9.5995789e-03   1.0353234e-02   6.1627903e-03   6.3902007e-03   5.0235520e-03   1.1247115e-02   7.7428181e-03   9.7824889e-04   1.6624661e-03   3.0059873e-03   2.5249972e-04   6.2343657e-04   1.1719366e-03   4.0275459e-03   6.3575546e-03   5.8712921e-03   7.5872446e-04   5.6741436e-04   3.5908637e-03   6.4996051e-04   1.9776689e-03   1.6840088e-03   4.6219818e-04   1.2917587e-03   1.3743184e-03   3.0187893e-03   2.2373182e-03   9.0828703e-04   5.7426890e-03   1.5775049e-03   1.5168309e-02   7.4080646e-03   4.2538993e-03   6.3033105e-03   8.5100385e-03   5.7899432e-03   1.0520014e-02   4.8624768e-03   6.1169570e-03   7.6384090e-03   4.1091153e-03   4.1981402e-03   3.9923041e-03   8.6172736e-03   1.2997087e-02   7.8717119e-03   4.1667776e-03   5.7584992e-03   1.0387455e-02   4.3546484e-03   5.7746712e-03   9.1054055e-03   6.5301937e-03   2.3054088e-03   5.9614728e-03   3.0379183e-03   2.5067124e-03   3.9126904e-03   7.3239330e-03   1.8370981e-03   3.7021481e-03   2.7152943e-03   8.1304066e-03   2.0347479e-03   7.4172182e-03   3.6502880e-03   1.1915038e-02   5.0530642e-03   4.2458688e-03   3.2213237e-03   7.5116191e-03   4.3860606e-03   7.4080646e-03   7.4726409e-03   9.2071058e-03   5.2421868e-03   3.6675370e-03   3.8308993e-03   1.1024145e-02   6.4520833e-03   3.0133930e-04   5.1037905e-03   1.2017859e-03   2.1951761e-03   1.7686888e-03   1.8224411e-03   2.8788865e-03   3.1740291e-03   7.0420689e-04   2.2350252e-03   2.1605967e-03   1.6074282e-04   9.9312132e-04   6.3910600e-04   1.1068342e-03   2.2522084e-03   4.2563220e-04   2.1383582e-03   1.1781524e-03   1.1965920e-03   6.6826197e-03   8.9701315e-04   8.8374021e-03   3.5249181e-03   1.9006375e-03   3.2674819e-03   4.3522253e-03   3.8602822e-03   5.5032387e-03   3.5903574e-03   4.6280025e-03   3.2207897e-03   1.0860964e-03   1.8528751e-03   1.3287556e-03   4.5637650e-03   7.2040689e-03   3.3452381e-03   1.6438459e-03   2.5042800e-03   8.4078534e-03   3.6412189e-03   2.1968551e-03   4.2884610e-03   5.3072049e-03   5.5962180e-04   2.2924485e-03   1.3972917e-03   4.1775124e-04   1.0120628e-03   3.7672964e-03   1.3901921e-03   2.7802443e-03   7.8074497e-04   4.2513803e-03   8.6439557e-04   5.7017672e-03   1.9558095e-03   6.1305769e-03   2.0764524e-03   1.1750038e-03   7.8121442e-04   3.3536027e-03   1.6840705e-03   3.5249181e-03   3.4031938e-03   4.3169071e-03   2.0323666e-03   1.8104373e-03   1.0313169e-03   5.4663596e-03   2.6797009e-03   4.7017751e-03   1.4050521e-03   2.2967640e-03   1.4490557e-03   2.3196093e-03   1.7015479e-03   1.5767700e-03   5.5713099e-04   3.7392678e-03   8.9324098e-04   2.5002761e-04   8.6482380e-04   1.3585049e-04   1.1239692e-03   1.9761479e-03   8.2927740e-05   9.6014531e-04   3.4965263e-04   8.9552257e-04   5.7645956e-03   3.1975366e-04   8.7743211e-03   4.0156462e-03   2.9085611e-03   3.7265804e-03   4.9611690e-03   5.3200137e-03   5.4291747e-03   4.9090444e-03   6.4181467e-03   3.1699225e-03   9.0457257e-04   2.8707306e-03   2.0343345e-03   5.5511033e-03   7.6886258e-03   3.2962764e-03   1.9800100e-03   2.4033904e-03   1.0905558e-02   5.4434884e-03   2.6411651e-03   4.3219947e-03   7.2333465e-03   1.3483652e-03   2.2580657e-03   1.9023537e-03   8.2889320e-04   8.3511491e-04   4.6996589e-03   2.2192064e-03   4.3941392e-03   6.5409726e-04   5.2313685e-03   1.2879740e-03   6.5768476e-03   3.4674432e-03   5.6567012e-03   2.0849532e-03   8.7522015e-04   1.2925162e-03   3.9107304e-03   2.2116014e-03   4.0156462e-03   3.8265815e-03   4.4724719e-03   2.6832503e-03   3.2108081e-03   1.3825243e-03   4.7909587e-03   2.3944351e-03   1.6956470e-03   1.0108236e-03   9.3414345e-04   1.2241588e-02   1.0680036e-02   6.4457112e-03   2.1113831e-03   4.8851719e-03   3.9417362e-03   3.5304281e-03   6.9169294e-03   4.3255103e-03   1.5489174e-03   5.9380000e-04   4.2050996e-03   3.5612837e-03   3.5981180e-03   1.5473563e-03   1.0469113e-03   2.7881158e-03   2.6084642e-02   1.6915515e-02   1.3046936e-02   1.5813114e-02   1.8728660e-02   1.6622622e-02   2.0172358e-02   1.5047933e-02   1.7480656e-02   1.5121187e-02   8.9988271e-03   1.2921567e-02   1.1505555e-02   1.9202447e-02   2.2948431e-02   1.4807179e-02   1.1989561e-02   1.3522703e-02   2.4353472e-02   1.4534512e-02   1.3492376e-02   1.7699736e-02   1.8260951e-02   8.7551977e-03   1.3397972e-02   1.0605977e-02   8.1065177e-03   9.3705995e-03   1.7600384e-02   8.9170386e-03   1.3238432e-02   8.2394556e-03   1.8591466e-02   8.5466794e-03   1.8371580e-02   1.2139358e-02   2.0015536e-02   1.2667833e-02   9.3753580e-03   9.4099897e-03   1.5925767e-02   9.2577085e-03   1.6915515e-02   1.6530549e-02   1.7369847e-02   1.1736690e-02   1.2003814e-02   1.0321540e-02   1.8104120e-02   1.3766972e-02   1.5141485e-04   3.6186411e-04   5.0640473e-03   5.8846843e-03   4.5372473e-03   2.9889995e-04   1.4503455e-03   2.4048820e-03   5.6473356e-04   2.1694139e-03   1.2760726e-03   5.0001009e-05   4.2762331e-04   1.0752642e-03   1.9285330e-03   1.4459840e-03   2.6007132e-04   3.8461838e-03   8.8079628e-04   1.6321310e-02   8.6239000e-03   5.6156570e-03   7.5378743e-03   9.9017513e-03   7.7506746e-03   1.1476827e-02   6.6644304e-03   8.3385285e-03   8.2160708e-03   4.1911289e-03   5.5513041e-03   4.9688023e-03   1.0246372e-02   1.4224144e-02   8.3426053e-03   5.0473242e-03   6.4114140e-03   1.3391748e-02   6.3862951e-03   6.6425216e-03   9.9154668e-03   8.8906649e-03   3.1510729e-03   6.5653256e-03   3.9767727e-03   3.0148559e-03   4.1087786e-03   8.8891438e-03   2.8692574e-03   5.5219515e-03   2.9822205e-03   9.7392578e-03   2.7559926e-03   9.1569917e-03   5.1978118e-03   1.2376315e-02   5.7353587e-03   4.3202502e-03   3.8711781e-03   8.5388416e-03   4.7844327e-03   8.6239000e-03   8.5671038e-03   9.9856257e-03   5.9819812e-03   5.1316831e-03   4.4550423e-03   1.1213210e-02   6.9205612e-03   3.8131057e-04   6.7888158e-03   7.3363003e-03   5.3694436e-03   6.8516623e-04   1.8924051e-03   2.8895525e-03   1.2584203e-03   3.1106561e-03   1.9851820e-03   2.2993360e-04   3.0705767e-04   1.8065799e-03   2.2681792e-03   1.9895114e-03   4.4561501e-04   3.2950230e-03   1.3536253e-03   1.9442033e-02   1.0986408e-02   7.5586116e-03   9.6139706e-03   1.2432885e-02   9.8077912e-03   1.4077548e-02   8.4254038e-03   1.0370813e-02   1.0478515e-02   5.7856689e-03   7.4894276e-03   6.8497491e-03   1.2864462e-02   1.7242156e-02   1.0603978e-02   6.8089917e-03   8.2980029e-03   1.5941696e-02   8.0862824e-03   8.7712331e-03   1.2406133e-02   1.0894930e-02   4.6791769e-03   8.5829625e-03   5.4740472e-03   4.5017704e-03   5.7029968e-03   1.1309070e-02   4.0232333e-03   7.1989285e-03   4.2761837e-03   1.2289743e-02   4.0060693e-03   1.1002553e-02   7.0316526e-03   1.5021687e-02   7.5484658e-03   5.9167328e-03   5.5390768e-03   1.0934157e-02   6.4525090e-03   1.0986408e-02   1.0943118e-02   1.2502015e-02   7.9589531e-03   6.9802991e-03   6.2208864e-03   1.3663387e-02   8.8999016e-03   6.6062542e-03   5.5827314e-03   3.2137911e-03   2.4648342e-04   3.0882054e-03   1.4855278e-03   8.8238305e-04   3.0312113e-03   1.3048774e-03   1.7074342e-04   4.2409993e-05   1.2055084e-03   1.2882692e-03   1.0204762e-03   8.9780466e-05   2.0467514e-03   5.4370125e-04   1.7241595e-02   9.9776023e-03   7.2970604e-03   9.2205394e-03   1.1400153e-02   1.0313298e-02   1.2454804e-02   9.2538420e-03   1.1254653e-02   8.6534668e-03   4.2479840e-03   7.2108683e-03   6.0807502e-03   1.1908599e-02   1.4998922e-02   8.5435101e-03   6.3304436e-03   7.3992448e-03   1.7031719e-02   9.1810782e-03   7.4786833e-03   1.0592315e-02   1.2037933e-02   4.2382770e-03   7.2750145e-03   5.5074473e-03   3.6741461e-03   4.4146887e-03   1.0634443e-02   4.6823366e-03   8.0104480e-03   3.6842552e-03   1.1424845e-02   4.0945647e-03   1.1857190e-02   7.0169427e-03   1.2513676e-02   6.7752703e-03   4.4459599e-03   4.6022279e-03   9.3965771e-03   4.9465269e-03   9.9776023e-03   9.6902181e-03   1.0478640e-02   6.5221232e-03   6.8355801e-03   5.1573625e-03   1.1079009e-02   7.5518079e-03   2.6192775e-03   5.9216859e-03   4.3727502e-03   5.1296754e-03   5.3199310e-03   2.7670517e-03   1.1371035e-03   2.6020928e-03   5.1188394e-03   7.5382072e-03   2.4476107e-03   5.0616317e-03   3.9293810e-03   5.2302586e-03   1.5095801e-02   4.1435464e-03   4.4658108e-03   8.7058429e-04   4.4660793e-04   2.5555941e-04   1.2381894e-03   8.7244097e-04   2.0362511e-03   8.1311609e-04   1.5164656e-03   1.9481959e-03   1.9017569e-03   4.9004150e-04   8.1438359e-04   1.7141435e-03   4.7093504e-03   2.7065864e-03   6.0393489e-05   4.2915211e-04   3.9373776e-03   1.5669301e-03   1.2950596e-03   2.1025536e-03   1.9522193e-03   1.0322026e-03   8.4218839e-04   9.2905490e-05   1.2346326e-03   1.1781032e-03   9.8002670e-04   7.9339756e-04   1.1328834e-03   6.2948320e-04   1.4629322e-03   3.9084846e-04   1.4164544e-03   1.5298834e-03   3.8721999e-03   2.7001180e-04   1.5287601e-03   1.2669024e-03   2.1037534e-03   3.7134859e-03   8.7058429e-04   1.1775044e-03   2.8495857e-03   2.7086756e-03   1.4023687e-03   1.0191451e-03   3.8647977e-03   9.3179883e-04   1.2988786e-03   3.9786961e-03   9.7659791e-03   1.8948352e-03   3.2059058e-03   1.9902291e-03   1.7004118e-03   5.2879179e-03   6.5830280e-03   1.9368261e-03   2.2268965e-03   1.8962421e-03   4.4774419e-03   1.1162650e-02   2.6447647e-03   4.9365528e-03   3.0323131e-03   3.9972109e-03   2.9343480e-03   3.7523678e-03   6.3980393e-03   2.7120874e-03   6.2404178e-03   8.0807935e-03   1.9416596e-03   1.3024398e-03   4.0245181e-03   3.0697725e-03   5.0283287e-03   5.8270256e-03   2.3505477e-03   2.0375391e-03   1.2652208e-03   1.2527532e-02   8.0556250e-03   2.6750700e-03   2.5225228e-03   9.0764566e-03   3.4699713e-03   1.3308823e-03   2.7664696e-03   2.4678015e-03   9.5729618e-04   4.3858320e-03   4.4729680e-03   6.8828714e-03   1.1547480e-03   4.8183810e-03   2.7179325e-03   6.3087897e-03   6.0843336e-03   2.6715726e-03   1.3701564e-03   8.5085504e-04   2.7129190e-03   3.6187454e-03   4.3357364e-03   3.0323131e-03   2.8888833e-03   3.1752051e-03   4.0026050e-03   5.6110347e-03   2.1520192e-03   2.0021167e-03   9.3001678e-04   2.6139835e-03   9.7625461e-03   4.3820793e-04   2.7906691e-03   3.4740135e-03   1.5471352e-03   3.7324822e-03   3.8774805e-03   1.8049144e-03   1.0166909e-03   9.6586952e-04   2.7620874e-03   5.6379477e-03   1.4267399e-03   9.9420936e-03   6.9534902e-03   7.2046628e-03   7.1611325e-03   8.0864542e-03   1.1050679e-02   7.0236664e-03   1.0684883e-02   1.2982713e-02   4.4982901e-03   2.1523458e-03   7.1621069e-03   5.3786825e-03   9.3055499e-03   9.5487861e-03   4.3879387e-03   5.0693092e-03   4.4931267e-03   1.8805226e-02   1.2143973e-02   5.1616364e-03   5.8432704e-03   1.4239876e-02   5.0396297e-03   4.0643450e-03   5.6688069e-03   3.5911389e-03   2.3422208e-03   8.6067938e-03   6.9700590e-03   1.0540875e-02   2.8498735e-03   9.0488233e-03   4.9258801e-03   1.1781603e-02   8.6822745e-03   5.8089260e-03   4.4337196e-03   2.0267056e-03   4.1354243e-03   6.4148025e-03   4.4976834e-03   6.9534902e-03   6.4275292e-03   5.9584428e-03   5.3006657e-03   8.1437729e-03   3.9023880e-03   4.5299634e-03   3.7988456e-03   2.5123066e-03   1.1946309e-03   2.0097566e-04   1.7928786e-03   5.7143349e-04   1.2667176e-04   4.6644872e-04   4.4712161e-04   1.0683378e-03   5.7468992e-04   7.6554900e-05   3.3664265e-03   2.0582490e-04   1.3553574e-02   7.1051776e-03   4.8941009e-03   6.5257957e-03   8.3098959e-03   7.5373275e-03   9.3233389e-03   6.7621473e-03   8.4467156e-03   6.1291129e-03   2.5944515e-03   4.8248569e-03   3.9028342e-03   8.7515124e-03   1.1627531e-02   6.1178515e-03   4.1203288e-03   5.0572182e-03   1.3529172e-02   6.7984240e-03   5.0687125e-03   7.7357162e-03   9.2015610e-03   2.4873418e-03   4.9175701e-03   3.5355888e-03   2.0473249e-03   2.6605314e-03   7.6511613e-03   3.0628424e-03   5.7307078e-03   2.1169019e-03   8.3320524e-03   2.4574218e-03   9.1102030e-03   4.8098810e-03   9.5736130e-03   4.5225384e-03   2.7314130e-03   2.7690856e-03   6.7080946e-03   3.3506310e-03   7.1051776e-03   6.8798895e-03   7.7001955e-03   4.4569824e-03   4.6305164e-03   3.1848752e-03   8.4170139e-03   5.2045050e-03   6.9296246e-03   2.1360002e-03   3.7784686e-03   3.9888330e-03   1.9332806e-03   3.1165972e-03   3.4622240e-03   6.1843155e-03   4.9689115e-03   2.8276933e-03   8.4143362e-03   3.9488633e-03   1.7390613e-02   8.6823566e-03   4.7125346e-03   7.5297389e-03   9.6551734e-03   5.6073015e-03   1.2632980e-02   4.7967985e-03   5.4595549e-03   9.6779905e-03   6.1919843e-03   4.6437798e-03   4.8811137e-03   9.2634464e-03   1.4470481e-02   9.9239882e-03   5.5116951e-03   7.7393895e-03   8.8498403e-03   3.6104684e-03   7.0913900e-03   1.1068993e-02   5.6668826e-03   3.0002631e-03   7.9032607e-03   4.0760726e-03   3.7279027e-03   5.9729084e-03   7.9919733e-03   2.3383845e-03   3.2272638e-03   4.6096764e-03   8.7690968e-03   3.1364844e-03   7.9578373e-03   3.3962632e-03   1.4631125e-02   6.8663759e-03   6.4834920e-03   4.3461235e-03   8.7377082e-03   5.5255283e-03   8.6823566e-03   8.8561968e-03   1.1024987e-02   6.2645608e-03   3.5745084e-03   5.1960404e-03   1.3983177e-02   8.7265781e-03   1.5360474e-03   2.2738132e-03   6.3957013e-04   1.8470485e-03   1.9436948e-03   8.2753134e-04   1.4870044e-04   1.8575051e-04   1.1511457e-03   4.1570185e-03   4.2481246e-04   1.2022631e-02   7.4558885e-03   6.7319225e-03   7.0097246e-03   8.7236579e-03   1.0059866e-02   8.2686681e-03   9.2543911e-03   1.1644804e-02   5.5866311e-03   2.5062451e-03   6.6946690e-03   5.2622870e-03   9.8836223e-03   1.1452219e-02   5.6355000e-03   4.7339621e-03   4.6436729e-03   1.7620106e-02   1.0378141e-02   5.6675320e-03   7.1003305e-03   1.2674455e-02   4.3836233e-03   4.5891805e-03   4.7259960e-03   3.2437023e-03   2.5249532e-03   8.8366518e-03   5.2434703e-03   8.9867759e-03   2.2946416e-03   9.5089828e-03   3.7537168e-03   1.0503141e-02   7.8427036e-03   7.7804598e-03   4.4127994e-03   2.3620086e-03   3.9941388e-03   7.2903223e-03   4.7424240e-03   7.4558885e-03   7.1215300e-03   7.3539364e-03   5.7328206e-03   7.4284923e-03   3.9643246e-03   6.4306545e-03   4.3729078e-03   1.0852360e-03   3.9664584e-04   4.5123208e-04   1.2414442e-03   2.3150826e-04   1.4183402e-03   7.0426023e-04   4.8163546e-04   5.0741189e-03   3.9595383e-04   1.0937070e-02   5.0339955e-03   3.1168119e-03   4.5426488e-03   6.0432452e-03   5.3182662e-03   7.1300333e-03   4.7641753e-03   6.1358685e-03   4.4885462e-03   1.6916118e-03   3.0636842e-03   2.3987749e-03   6.3937387e-03   9.2584560e-03   4.5899059e-03   2.5786937e-03   3.4750392e-03   1.0541835e-02   4.8350707e-03   3.4370915e-03   5.7977832e-03   6.8277961e-03   1.3144796e-03   3.3638906e-03   2.1338739e-03   1.0504939e-03   1.6413828e-03   5.4173620e-03   1.8780432e-03   3.9014661e-03   1.1794316e-03   6.0207953e-03   1.3473818e-03   6.9135169e-03   3.1440431e-03   7.6724602e-03   2.9974710e-03   1.7751706e-03   1.5977690e-03   4.8531144e-03   2.4494451e-03   5.0339955e-03   4.8947958e-03   5.8464391e-03   3.1299587e-03   2.9881785e-03   1.9161069e-03   6.7804975e-03   3.6799724e-03   6.0827379e-04   2.1616612e-03   3.6536587e-03   6.0346559e-04   1.7560211e-03   1.3150048e-03   2.0808595e-03   9.6848049e-03   1.5050304e-03   8.5509882e-03   3.4934050e-03   2.3873070e-03   2.2266374e-03   4.3437966e-03   3.4432000e-03   4.8822411e-03   2.6118729e-03   4.2515298e-03   3.9280244e-03   2.1763551e-03   2.4215318e-03   2.3726469e-03   5.2469603e-03   8.8156975e-03   4.6543689e-03   1.1653780e-03   1.5634616e-03   8.3770205e-03   3.5034084e-03   3.2683461e-03   4.7014835e-03   4.7252691e-03   1.7941232e-03   2.2843067e-03   6.9328893e-04   1.6436639e-03   1.4866860e-03   4.0234517e-03   7.8970448e-04   2.8726545e-03   3.0564882e-04   4.8340126e-03   3.3681970e-04   3.2031086e-03   3.4962764e-03   6.5051951e-03   1.2935806e-03   1.7549327e-03   2.1673002e-03   4.7736712e-03   4.5514956e-03   3.4934050e-03   3.7661841e-03   5.5497035e-03   4.3927525e-03   3.3294822e-03   2.0638533e-03   5.8922322e-03   2.1421877e-03   1.0254014e-03   1.7967204e-03   2.4056491e-05   4.9606070e-04   1.5493043e-04   7.3786851e-04   5.7634641e-03   2.2821239e-04   9.8983432e-03   4.8234579e-03   3.6558248e-03   4.1003359e-03   5.8726550e-03   5.9033047e-03   6.1776943e-03   5.1708698e-03   7.0154090e-03   4.1004756e-03   1.5716690e-03   3.6383691e-03   2.8864008e-03   6.7180252e-03   9.3042807e-03   4.3918407e-03   2.3314998e-03   2.6748196e-03   1.1913776e-02   5.9266374e-03   3.6227612e-03   5.2997514e-03   7.7658365e-03   2.0655115e-03   2.8598574e-03   2.0567764e-03   1.5031818e-03   1.3330059e-03   5.6389436e-03   2.2318728e-03   4.9186471e-03   7.3077456e-04   6.3335619e-03   1.3529510e-03   6.4463256e-03   4.4695359e-03   6.6841393e-03   2.3650796e-03   1.3974597e-03   2.1137539e-03   5.1383865e-03   3.4413190e-03   4.8234579e-03   4.7468419e-03   5.6953492e-03   3.9468482e-03   4.2095924e-03   2.1520165e-03   5.7168561e-03   2.8181362e-03   2.6785665e-04   8.6267880e-04   1.5080867e-03   1.0657562e-03   9.1259932e-05   3.2834346e-03   5.6400090e-04   1.5768182e-02   8.4481987e-03   5.6912110e-03   7.5460195e-03   9.7379656e-03   8.1209399e-03   1.1078194e-02   7.1125751e-03   8.8544675e-03   7.7247994e-03   3.7380313e-03   5.6213520e-03   4.8502413e-03   1.0137958e-02   1.3681745e-02   7.7719782e-03   4.9953780e-03   6.2009166e-03   1.4059422e-02   6.9488117e-03   6.3504999e-03   9.4504672e-03   9.5001122e-03   3.1213299e-03   6.2307927e-03   4.0875652e-03   2.8287725e-03   3.7400310e-03   8.8571631e-03   3.1845212e-03   5.9757813e-03   2.8285304e-03   9.6559261e-03   2.8616391e-03   9.5937311e-03   5.3787128e-03   1.1658427e-02   5.5682032e-03   3.8827352e-03   3.6680728e-03   8.1913042e-03   4.3904219e-03   8.4481987e-03   8.3060404e-03   9.4592722e-03   5.6422254e-03   5.2631423e-03   4.2070121e-03   1.0442544e-02   6.5544595e-03   1.6777879e-03   1.6905742e-03   1.4559893e-03   2.3696578e-04   1.7661444e-03   8.8626613e-04   1.8929605e-02   1.1198434e-02   8.2126916e-03   1.0357192e-02   1.2691977e-02   1.1286676e-02   1.3897779e-02   1.0124890e-02   1.2174180e-02   9.8437553e-03   5.0898821e-03   8.1181082e-03   6.9678076e-03   1.3160374e-02   1.6460126e-02   9.6980805e-03   7.2881507e-03   8.4988687e-03   1.8112744e-02   9.9154333e-03   8.5214392e-03   1.1906445e-02   1.2939064e-02   4.9240987e-03   8.3797448e-03   6.3232280e-03   4.3836801e-03   5.2916266e-03   1.1816149e-02   5.2785357e-03   8.7418147e-03   4.4568647e-03   1.2648763e-02   4.7832566e-03   1.2911612e-02   7.7565330e-03   1.3968530e-02   7.8153448e-03   5.3323341e-03   5.3832666e-03   1.0542156e-02   5.6397042e-03   1.1198434e-02   1.0902962e-02   1.1744032e-02   7.3882415e-03   7.5970260e-03   6.0277743e-03   1.2454722e-02   8.6970669e-03   6.8936185e-04   2.4185836e-04   6.5755985e-04   5.6966351e-03   2.3035735e-04   9.8692354e-03   4.6426684e-03   3.3219514e-03   3.9792968e-03   5.6658817e-03   5.5182925e-03   6.1624599e-03   4.8454827e-03   6.5522795e-03   4.0009447e-03   1.4714328e-03   3.2975731e-03   2.5896752e-03   6.3872309e-03   9.0422907e-03   4.2582444e-03   2.2027730e-03   2.6736829e-03   1.1276803e-02   5.4428314e-03   3.3876877e-03   5.2005260e-03   7.2827649e-03   1.7342875e-03   2.7931367e-03   1.8984292e-03   1.2582586e-03   1.2661063e-03   5.3418398e-03   1.9890109e-03   4.4646783e-03   7.0136727e-04   6.0091268e-03   1.2000111e-03   6.3087140e-03   3.9600850e-03   6.7054154e-03   2.3290598e-03   1.3528267e-03   1.8394373e-03   4.8617299e-03   3.0806223e-03   4.6426684e-03   4.5595872e-03   5.5227532e-03   3.5904469e-03   3.7269772e-03   1.9354221e-03   5.7786823e-03   2.8379142e-03   1.6160265e-04   9.1316164e-04   4.5133200e-03   3.9636483e-04   1.2999342e-02   7.7990273e-03   6.7507696e-03   6.8565065e-03   9.1164934e-03   9.6187705e-03   8.8289255e-03   8.5171572e-03   1.1041936e-02   6.3870800e-03   3.1070828e-03   6.7336821e-03   5.5817760e-03   1.0377938e-02   1.2745315e-02   6.6393015e-03   4.6463353e-03   4.6865341e-03   1.7095703e-02   9.6546615e-03   6.2703784e-03   7.8942299e-03   1.1919237e-02   4.5195099e-03   4.9936591e-03   4.3235727e-03   3.5370732e-03   2.9138150e-03   9.0955590e-03   4.5162367e-03   8.4195838e-03   2.1592479e-03   9.9327314e-03   3.3056202e-03   9.5169563e-03   7.9097267e-03   8.9017922e-03   4.4124132e-03   2.8482486e-03   4.3995745e-03   8.1140558e-03   5.6448175e-03   7.7990273e-03   7.6396844e-03   8.3822247e-03   6.5839845e-03   7.5512097e-03   4.3854717e-03   7.5528676e-03   4.6977929e-03   6.1532842e-04   4.4755911e-03   9.8852555e-05   1.1287330e-02   6.2297232e-03   5.1189561e-03   5.6158860e-03   7.4118621e-03   7.9100135e-03   7.4458980e-03   7.1082862e-03   9.2159095e-03   4.9487763e-03   1.9996447e-03   5.0869737e-03   4.0020507e-03   8.3714289e-03   1.0564909e-02   5.1004357e-03   3.5205852e-03   3.7366566e-03   1.4679778e-02   7.9564069e-03   4.6553854e-03   6.3553187e-03   1.0091302e-02   3.0597107e-03   3.7953264e-03   3.3093658e-03   2.2411736e-03   1.9075393e-03   7.2834604e-03   3.5416682e-03   6.7651117e-03   1.4482690e-03   7.9807479e-03   2.4205104e-03   8.4840253e-03   5.9704010e-03   7.4790367e-03   3.4333236e-03   1.8691676e-03   2.9457380e-03   6.2606998e-03   3.9378134e-03   6.2297232e-03   6.0227040e-03   6.6475915e-03   4.7553767e-03   5.6493596e-03   2.9997214e-03   6.2979764e-03   3.7014773e-03   2.9064840e-03   2.5951953e-04   1.5239927e-02   8.3975944e-03   6.0152535e-03   7.5736149e-03   9.7205279e-03   8.7194769e-03   1.0665619e-02   7.7107157e-03   9.6513260e-03   7.3495285e-03   3.4129734e-03   5.9493098e-03   4.9891659e-03   1.0300134e-02   1.3429483e-02   7.3762928e-03   4.9967161e-03   5.9349958e-03   1.5151045e-02   7.8143476e-03   6.2821596e-03   9.0873094e-03   1.0390318e-02   3.3768176e-03   5.9427925e-03   4.2522990e-03   2.8707879e-03   3.4399883e-03   9.0442431e-03   3.6087055e-03   6.7269934e-03   2.6557188e-03   9.8316941e-03   3.0241465e-03   9.9672648e-03   5.9844482e-03   1.0964814e-02   5.3765757e-03   3.5060112e-03   3.7251338e-03   8.1205003e-03   4.4033527e-03   8.3975944e-03   8.1986819e-03   9.1329776e-03   5.6810271e-03   5.8010063e-03   4.1618459e-03   9.6695634e-03   6.1457695e-03   3.7030792e-03   2.6425725e-02   1.8599298e-02   1.5467357e-02   1.8635118e-02   2.0442342e-02   2.0541696e-02   2.1259986e-02   1.9510831e-02   2.1931276e-02   1.5294793e-02   9.1682566e-03   1.5285848e-02   1.2953400e-02   2.0823480e-02   2.2513198e-02   1.4419732e-02   1.4350471e-02   1.5480442e-02   2.9209010e-02   1.9066274e-02   1.4266029e-02   1.8093335e-02   2.3152907e-02   1.0464507e-02   1.4480769e-02   1.3685178e-02   9.2298310e-03   1.0125817e-02   1.9700965e-02   1.2730523e-02   1.7262155e-02   1.0225203e-02   2.0352884e-02   1.1535854e-02   2.3443654e-02   1.4503051e-02   1.9462802e-02   1.4723300e-02   9.8246219e-03   1.0395301e-02   1.6330792e-02   8.8773452e-03   1.8599298e-02   1.7720786e-02   1.7175716e-02   1.1774527e-02   1.4190018e-02   1.1287889e-02   1.7334951e-02   1.4959707e-02   1.2176711e-02   6.5819636e-03   5.0346277e-03   6.0397395e-03   7.7827629e-03   7.8599708e-03   8.1942510e-03   7.0899174e-03   9.0304038e-03   5.3331159e-03   2.1084043e-03   4.9833227e-03   3.9086435e-03   8.5223444e-03   1.0895531e-02   5.3819973e-03   3.7720778e-03   4.2725796e-03   1.4381123e-02   7.5978371e-03   4.7474248e-03   6.8472012e-03   9.8808760e-03   2.7678758e-03   4.1999833e-03   3.4360629e-03   2.0712504e-03   2.1185414e-03   7.4497841e-03   3.4016631e-03   6.4264729e-03   1.6858980e-03   8.1251590e-03   2.4514173e-03   8.9161864e-03   5.4840504e-03   8.2209587e-03   3.8745808e-03   2.1076714e-03   2.7824071e-03   6.3561971e-03   3.5186798e-03   6.5819636e-03   6.3377454e-03   6.9667686e-03   4.4964760e-03   5.2124475e-03   2.9917795e-03   7.0285944e-03   4.2716151e-03   1.5675754e-03   4.2437140e-03   2.9528525e-03   1.2801911e-03   5.5300675e-03   5.2313183e-04   7.0373258e-03   7.0028853e-03   1.5817688e-03   4.6017698e-03   4.2905415e-03   3.9763653e-03   1.7507004e-03   9.8088526e-04   2.0205251e-03   3.8651167e-03   2.8498196e-03   8.0083780e-03   8.7219602e-03   2.5429883e-03   8.6090275e-04   7.9906965e-03   6.1518285e-03   2.2097973e-03   5.7963314e-03   5.7460009e-03   4.2262969e-03   2.1872477e-03   8.9731860e-03   7.8793225e-03   5.8991938e-03   1.8999235e-03   7.1341168e-03   7.0170212e-03   6.4409068e-03   6.9062404e-04   3.2402506e-03   4.2346770e-03   4.9489360e-03   1.9844891e-03   6.6121605e-03   1.5675754e-03   1.4409377e-03   1.2937675e-03   4.5154908e-03   6.0031850e-03   4.0189646e-03   1.2526895e-03   2.2326743e-03   6.7976748e-04   4.4251056e-04   5.1964468e-05   1.5314410e-03   4.7093005e-04   2.2872076e-03   2.4409680e-03   6.1996952e-04   1.8531872e-03   7.0675084e-04   7.4672595e-04   2.9977980e-04   1.6320684e-03   1.1279305e-03   6.4871929e-04   6.2074215e-04   4.0109945e-03   3.1610099e-03   4.6600326e-04   4.3207236e-04   3.1187047e-03   1.7495883e-03   3.8892953e-04   1.4972893e-03   1.7881991e-03   1.3932149e-03   1.6264024e-04   3.1616233e-03   2.5553184e-03   1.9652949e-03   2.4129313e-04   2.2301817e-03   3.0218268e-03   1.9168834e-03   1.5054952e-03   6.4024254e-04   1.6084351e-03   1.4082859e-03   5.9336508e-04   3.2376306e-03   1.1102230e-16   6.3653022e-05   8.5949029e-04   1.8263557e-03   1.6881339e-03   9.7411877e-04   1.7845049e-03   6.1330704e-04   7.1368649e-04   9.0151789e-04   6.1896245e-04   2.2461726e-03   1.0406196e-03   1.1172454e-03   1.6730261e-03   1.8311894e-03   1.8071232e-06   2.5364008e-04   8.3907159e-04   3.3874526e-03   2.1375798e-03   4.5487046e-04   1.1672957e-03   2.8115805e-03   1.2138761e-03   7.0446879e-04   1.8201572e-03   1.6098088e-03   5.2152529e-04   1.0325997e-03   6.5974667e-04   9.0603694e-04   1.4060118e-03   4.3618665e-04   1.3469029e-03   7.5850938e-04   1.4500880e-03   6.7416609e-04   1.0299923e-03   2.5247687e-03   4.1453726e-04   3.7399848e-03   9.2294426e-04   1.7551096e-03   7.2749277e-04   1.1614195e-03   2.4439916e-03   6.7976748e-04   8.1575962e-04   2.1038526e-03   1.4799615e-03   3.3653084e-04   6.6936205e-04   3.8899674e-03   1.5033836e-03   6.1892938e-04   8.9565267e-04   1.1187414e-03   1.0854623e-03   1.6238473e-03   1.6997610e-03   2.5416419e-03   7.7939556e-04   1.2163589e-03   1.1645635e-03   3.7479516e-03   2.5673270e-03   2.8413719e-04   3.4882680e-04   3.5538869e-03   2.1358644e-03   1.3529179e-03   1.5262264e-03   2.0757398e-03   1.9224937e-03   7.7565401e-04   6.3114517e-04   2.1204969e-03   1.7089017e-03   5.9516371e-04   1.8253945e-03   1.7466546e-03   1.3725400e-03   9.7938279e-04   1.2586511e-03   1.2414457e-03   2.1946939e-03   3.0180258e-03   3.2655328e-04   2.0574339e-03   1.9692598e-03   1.9049224e-03   4.6812273e-03   4.4251056e-04   7.9383810e-04   2.4056659e-03   3.2171363e-03   2.0291670e-03   1.5121604e-03   3.2358269e-03   7.8092419e-04   1.6056499e-03   4.5150817e-04   2.5183573e-03   2.4854849e-03   8.1663583e-04   2.4301739e-03   9.3222022e-04   1.0572824e-03   1.7339381e-04   1.3855383e-03   1.3424761e-03   1.0242405e-03   9.7281350e-04   3.6981982e-03   3.3860618e-03   6.8052310e-04   4.8835842e-04   3.1532670e-03   2.2405021e-03   6.9163264e-04   1.9828394e-03   2.3495346e-03   1.9517373e-03   1.3548451e-04   3.7877283e-03   2.8247707e-03   2.6466102e-03   1.4866617e-04   2.8614588e-03   3.2185979e-03   2.1613637e-03   1.5430396e-03   1.0217871e-03   2.1845781e-03   1.8662401e-03   6.6257305e-04   3.7358116e-03   5.1964468e-05   1.2907411e-04   9.1062515e-04   2.1316335e-03   1.9388777e-03   1.3908595e-03   1.9526372e-03   9.5243046e-04   3.3534848e-03   2.1612689e-04   1.1409237e-04   3.7511533e-03   4.4135123e-03   6.6848080e-04   1.6638889e-03   1.5639090e-03   5.3797969e-03   4.6512834e-03   1.2304041e-03   2.1424312e-03   1.1209148e-03   3.9967200e-04   2.4397061e-03   3.5566606e-03   2.8773633e-04   1.9980195e-03   2.5921392e-03   1.0825130e-03   2.8538916e-03   3.5346603e-03   9.3770297e-04   1.4682668e-03   3.5169417e-04   2.8507687e-03   1.3414178e-03   1.7057263e-03   1.1395352e-03   1.1778138e-03   6.0686882e-03   1.8828714e-03   4.1140616e-03   2.6499598e-03   2.9837515e-03   5.4227271e-03   1.5314410e-03   2.0381041e-03   4.2816261e-03   3.9075302e-03   1.2227927e-03   2.5264748e-03   6.5268770e-03   2.9950818e-03   4.2523782e-03   4.6456758e-03   7.0284879e-04   2.6206169e-03   2.2973667e-03   2.1186617e-03   1.0662204e-03   1.4771683e-03   1.2714179e-03   1.6200996e-03   9.3400517e-04   6.3864823e-03   5.7928278e-03   1.2702417e-03   3.0680262e-04   5.4794582e-03   3.6431092e-03   7.0967201e-04   2.9368020e-03   3.3273895e-03   2.1071133e-03   1.0635659e-03   5.3575283e-03   5.0378701e-03   2.9935636e-03   1.0701915e-03   3.8904013e-03   4.2643219e-03   4.2480632e-03   6.5030200e-04   1.1722241e-03   2.1985353e-03   2.8568074e-03   1.2361632e-03   4.8426872e-03   4.7093005e-04   4.8520973e-04   9.1253531e-04   3.1519557e-03   3.8819044e-03   2.1030647e-03   9.4974174e-04   6.5992898e-04   2.7415573e-04   4.6934818e-03   4.8265016e-03   1.1075187e-03   2.2246224e-03   2.7094103e-03   7.2547386e-03   5.7651516e-03   1.2879775e-03   2.2498412e-03   1.7969640e-03   2.9988677e-04   3.2873557e-03   4.6281528e-03   3.4411066e-04   2.2557578e-03   3.0860670e-03   7.9349579e-04   3.1195585e-03   3.7700064e-03   1.7729958e-03   8.2927671e-04   3.1955044e-04   2.5127017e-03   2.3924363e-03   1.2355886e-03   6.3306711e-04   1.7271405e-03   7.3517242e-03   1.9482896e-03   4.3986367e-03   3.1257329e-03   4.1788193e-03   6.3227646e-03   2.2872076e-03   2.9291555e-03   5.5970414e-03   4.9483109e-03   1.7952206e-03   3.0186434e-03   7.6597710e-03   3.3340783e-03   5.0895780e-03   5.6951678e-03   1.1683986e-03   2.4173290e-03   2.2930538e-03   6.6766405e-03   6.0598142e-03   2.0196993e-03   3.2103351e-03   7.1260012e-04   2.1040770e-04   3.4457175e-03   4.8676879e-03   4.7880714e-05   2.6160358e-03   3.7638559e-03   1.6534967e-03   3.7166920e-03   4.7397184e-03   1.5966719e-03   1.7314841e-03   3.1225254e-04   3.8216967e-03   2.0442457e-03   2.2822879e-03   1.3421268e-03   1.4265946e-03   7.7649269e-03   2.8740126e-03   5.4160448e-03   3.5155679e-03   4.0306705e-03   6.4534752e-03   2.4409680e-03   3.0445932e-03   5.6145787e-03   4.8826195e-03   1.5492752e-03   3.4872724e-03   8.3107496e-03   4.2635519e-03   8.0787158e-04   1.6588755e-03   9.5088926e-04   1.1215622e-03   1.1319394e-03   1.0749073e-04   1.3651148e-03   1.0419898e-03   7.3951172e-03   5.6721517e-03   2.8717807e-04   1.2167194e-04   6.0929710e-03   1.9467902e-03   2.7497575e-04   2.6021687e-03   1.4711384e-03   7.6653532e-04   1.1979861e-03   4.6497866e-03   4.6528613e-03   2.0491908e-03   1.1214382e-03   3.1087470e-03   5.8331337e-03   2.9008505e-03   5.1851355e-04   1.1077981e-03   7.4180273e-04   1.1189833e-03   3.3213225e-04   1.9786092e-03   6.1996952e-04   3.2630432e-04   1.6788710e-04   1.0717507e-03   2.5327575e-03   6.9916673e-04   4.8860962e-04   5.0860173e-04   1.7852918e-03   8.1925898e-04   2.7637031e-03   3.4415701e-03   7.6048751e-04   1.3454814e-03   1.4168273e-03   9.1102129e-03   5.4634531e-03   7.0608839e-04   1.5253323e-03   6.6930140e-03   9.6900708e-04   7.0882160e-04   2.0455668e-03   3.9468549e-04   1.1422246e-04   2.4972040e-03   3.2985277e-03   4.3265068e-03   9.6084950e-04   2.6351726e-03   1.9457490e-03   6.5435669e-03   2.5549327e-03   2.2787630e-03   1.2770395e-03   6.6117888e-05   4.1362061e-04   1.2709485e-03   9.0111668e-04   1.8531872e-03   1.4879007e-03   1.4236785e-03   8.2396190e-04   2.2228636e-03   3.0260473e-04   1.7887502e-03   9.8733924e-04   2.2593145e-04   8.4529966e-04   3.3545284e-03   2.0967474e-03   4.8795197e-04   1.2181976e-03   2.8660735e-03   1.2480152e-03   6.7735579e-04   1.8245752e-03   1.6711447e-03   4.7937341e-04   1.0455121e-03   6.9960603e-04   8.5898853e-04   1.3874556e-03   4.5677202e-04   1.3792343e-03   7.7973864e-04   1.4683104e-03   6.8342524e-04   1.0561802e-03   2.6555506e-03   3.7342452e-04   3.7432085e-03   9.6667604e-04   1.7304819e-03   6.7810292e-04   1.1254700e-03   2.3291425e-03   7.0675084e-04   8.2471895e-04   2.0738735e-03   1.3951480e-03   2.9534051e-04   6.3569311e-04   3.8846725e-03   1.5360581e-03   9.9102840e-04   2.6998600e-03   1.1403522e-03   5.9435819e-04   1.1584769e-03   4.5439398e-03   2.4115910e-03   2.2376231e-04   1.3051908e-03   3.1170665e-03   2.4639357e-04   6.5093729e-04   1.0408914e-03   3.2170646e-04   6.8086675e-04   7.3723950e-04   1.9464154e-03   1.6938542e-03   1.2010351e-03   8.6567601e-04   1.2605984e-03   4.0415296e-03   6.0024917e-04   2.8241029e-03   9.3145876e-04   8.7987159e-04   1.5390142e-04   6.1621791e-04   1.2031695e-03   7.4672595e-04   6.4561848e-04   1.3132954e-03   5.7671738e-04   4.3996999e-04   1.2646330e-04   2.7819647e-03   1.1571313e-03   1.1519638e-03   1.4945239e-03   1.5137092e-03   1.7542391e-03   3.0584682e-03   3.1624125e-03   6.9423087e-04   8.0741075e-04   2.9564986e-03   2.1264808e-03   1.1956222e-03   2.4834226e-03   2.4046990e-03   2.4389289e-03   1.1428932e-04   4.1572751e-03   2.6270682e-03   3.3826290e-03   1.0906814e-05   3.3779688e-03   3.9749243e-03   1.6265579e-03   2.0735613e-03   1.7238890e-03   2.6977636e-03   1.8191981e-03   5.1180139e-04   3.3068455e-03   2.9977980e-04   3.0495320e-04   9.6732841e-04   1.7461140e-03   1.4560452e-03   1.4920651e-03   2.5772801e-03   1.7019143e-03   1.0341988e-03   4.0085546e-03   3.6492138e-03   7.4340196e-03   7.9703529e-03   1.4966880e-03   7.8440192e-04   7.7789554e-03   4.4532033e-03   2.2360209e-03   5.9110335e-03   4.1723764e-03   3.6115483e-03   1.8645164e-03   8.6587941e-03   6.9790025e-03   5.9223579e-03   1.3508421e-03   6.9679643e-03   8.8100022e-03   4.3884451e-03   9.3956602e-04   3.8113149e-03   3.5744927e-03   3.3082295e-03   7.5316334e-04   3.6491972e-03   1.6320684e-03   1.1884181e-03   4.5803930e-04   2.1497659e-03   4.0210128e-03   2.8148066e-03   1.4099016e-03   2.8014283e-03   2.0075866e-03   1.7351697e-03   8.4094224e-03   6.5677927e-03   3.9418601e-04   3.5055970e-04   7.1718250e-03   2.0973679e-03   6.4851765e-04   3.3744442e-03   1.5292791e-03   9.4071576e-04   1.7144529e-03   5.4753391e-03   5.4188405e-03   2.5828658e-03   1.5301373e-03   3.8027206e-03   7.3328244e-03   3.1192913e-03   6.2731613e-04   1.7741918e-03   8.4678687e-04   1.1412463e-03   3.3683903e-04   1.4744098e-03   1.1279305e-03   6.7560808e-04   1.3395103e-04   7.7360874e-04   2.7301747e-03   8.0481079e-04   5.3215427e-04   1.0082007e-03   2.3386408e-04   4.5650335e-03   2.1085336e-03   8.9398060e-04   1.5597528e-03   2.5626398e-03   8.9643929e-04   4.6401620e-04   2.1474828e-04   9.4200330e-04   7.4363388e-04   8.8544521e-04   1.1214375e-03   1.5513223e-03   4.7369920e-04   1.2949283e-03   5.0107186e-04   1.9686134e-03   1.5839083e-03   3.0734994e-03   1.1037013e-04   1.0184869e-03   9.4459481e-04   1.6215035e-03   3.1102930e-03   6.4871929e-04   8.4571319e-04   2.1956968e-03   2.1853516e-03   1.4074387e-03   6.7079422e-04   3.0128980e-03   5.5375407e-04   5.9745601e-03   3.5596915e-03   1.0825427e-03   1.1193941e-03   3.8619013e-03   1.7868782e-03   2.6699310e-04   7.4652731e-04   1.5644519e-03   7.6914730e-04   1.1868249e-03   2.1537215e-03   2.8904145e-03   6.7050048e-04   1.5688544e-03   1.1553603e-03   2.3985919e-03   2.8520067e-03   2.1226295e-03   2.6532317e-05   9.5443067e-04   1.5325109e-03   1.7403036e-03   3.8293675e-03   6.2074215e-04   7.8759073e-04   1.8993585e-03   2.7454611e-03   2.5765044e-03   1.0363780e-03   2.0443972e-03   1.4335859e-04   1.4016332e-03   5.4805806e-03   6.7405649e-03   6.4579738e-04   5.1748633e-03   6.3487765e-03   4.3874825e-03   6.7084849e-03   8.1049162e-03   2.6677096e-03   4.6345484e-03   1.7447861e-03   7.4893653e-03   2.9062036e-03   5.4610122e-03   3.0610024e-03   2.8530196e-03   9.9111331e-03   5.6471487e-03   8.9070297e-03   6.1633735e-03   5.5983777e-03   9.1911029e-03   4.0109945e-03   4.6927139e-03   7.3914201e-03   7.0233589e-03   3.0645626e-03   6.1283907e-03   1.0939049e-02   7.1096522e-03   3.7615860e-03   5.7439366e-03   2.3434979e-04   2.1625792e-03   4.1436319e-03   1.4386017e-03   3.2647240e-03   4.5819077e-03   2.2920937e-03   1.0610016e-03   6.9564930e-05   3.4218787e-03   2.8472107e-03   1.7665885e-03   1.7134963e-03   1.2022963e-03   8.8958832e-03   3.1032776e-03   5.2642601e-03   3.2432815e-03   4.6032265e-03   6.0005810e-03   3.1610099e-03   3.7408101e-03   6.4052558e-03   4.8371122e-03   1.3589516e-03   3.3936977e-03   9.2321743e-03   4.6963171e-03   5.0386940e-04   4.2984797e-03   8.6973796e-04   3.5526497e-04   1.7666127e-03   7.2192984e-04   6.5802622e-04   6.6868074e-04   3.2431480e-03   2.9035420e-03   1.6594087e-03   6.4854242e-04   2.1612810e-03   4.9124735e-03   1.3725331e-03   1.5050690e-03   9.8777331e-04   7.5260407e-04   4.1403958e-04   1.4453279e-04   1.2645371e-03   4.6600326e-04   2.4792089e-04   4.5627999e-04   4.7560079e-04   1.1214322e-03   2.2091820e-04   1.5167980e-03   7.9735767e-04   5.8276719e-03   2.5886189e-03   4.3280198e-04   2.9205975e-03   2.2021650e-03   1.3597486e-03   9.5466012e-04   5.1915037e-03   4.8194588e-03   2.6659023e-03   8.3877743e-04   3.6595156e-03   5.4665486e-03   3.2603866e-03   3.5246614e-04   1.2546385e-03   1.3738455e-03   1.7317120e-03   4.0173517e-04   2.8852033e-03   4.3207236e-04   2.1872967e-04   1.7262684e-04   1.6274416e-03   2.8950267e-03   1.1933707e-03   5.1057942e-04   6.2761497e-04   3.2515433e-03   4.5844712e-03   2.0397580e-03   4.4892831e-03   5.6236454e-03   2.1663608e-03   1.9457792e-03   4.4715761e-04   4.4562273e-03   2.6795180e-03   2.6855442e-03   1.3224094e-03   1.8950095e-03   8.9432615e-03   3.4981521e-03   6.3671383e-03   4.3127165e-03   4.9533188e-03   7.5137514e-03   3.1187047e-03   3.8229023e-03   6.6814123e-03   5.8612561e-03   2.0597987e-03   4.3006086e-03   9.5397630e-03   5.0775992e-03   1.3692874e-03   9.6403546e-04   1.3966479e-04   8.6664284e-04   1.6597338e-03   1.3212159e-03   1.4578093e-03   1.0432500e-03   1.9220792e-03   8.7918940e-04   4.4168060e-03   5.0670419e-04   4.4696559e-03   1.4208846e-03   1.1004494e-03   1.6436932e-04   1.5507648e-03   1.1191058e-03   1.7495883e-03   1.6621469e-03   2.5414268e-03   8.8363295e-04   4.0096791e-04   3.3482530e-04   4.2345389e-03   1.9374158e-03   1.2770848e-03   1.0395507e-03   3.9095383e-04   9.2829494e-04   2.8992176e-03   3.2975300e-03   9.5393320e-04   1.0925514e-03   1.6776296e-03   3.8507253e-03   2.3805613e-03   1.2082502e-03   2.8135004e-04   4.7639114e-04   8.7356741e-04   7.4310336e-04   2.3860646e-03   3.8892953e-04   3.1610282e-04   8.0643344e-04   1.4728895e-03   2.0712058e-03   4.6512964e-04   1.1229948e-03   8.8791739e-05   1.1956364e-03   1.3055469e-03   1.5870433e-03   3.7223827e-04   1.0219838e-03   4.8252621e-04   2.1760282e-03   1.1580239e-04   1.5036929e-03   1.5974284e-03   4.9006747e-03   5.1331960e-04   1.6812595e-03   1.3616634e-03   2.7816530e-03   3.8434871e-03   1.4972893e-03   1.8359326e-03   3.6884416e-03   3.0552141e-03   1.4996272e-03   1.2110783e-03   4.7697841e-03   1.3648602e-03   3.9191527e-04   1.9808749e-03   1.8113262e-03   2.3841094e-03   7.8154360e-04   2.2164780e-03   1.0439462e-03   5.1766229e-03   1.1157470e-03   3.6934914e-03   1.2626931e-03   5.1659909e-04   5.8959899e-05   1.4097594e-03   7.9470954e-04   1.7881991e-03   1.5872251e-03   2.1165612e-03   7.0368642e-04   9.2823209e-04   1.5533041e-04   3.3160103e-03   1.5048956e-03   2.0235000e-03   2.4966081e-03   3.5807846e-03   4.7284012e-04   2.2730788e-03   1.2853266e-03   5.0519729e-03   2.3762808e-03   2.2444800e-03   6.5500299e-04   2.4750944e-05   4.4815197e-04   1.3292061e-03   1.4780836e-03   1.3932149e-03   1.1924273e-03   1.5277304e-03   1.1896691e-03   2.0673918e-03   2.5400150e-04   1.8225589e-03   5.4227610e-04   2.9880363e-03   1.8314313e-03   2.5238977e-03   5.5757663e-05   2.3698503e-03   2.8213423e-03   1.3040881e-03   2.4190319e-03   1.1221589e-03   2.3332880e-03   1.5386845e-03   7.3215728e-04   3.3801838e-03   1.6264024e-04   2.8457958e-04   1.3051710e-03   1.8856170e-03   1.1599202e-03   1.2252261e-03   2.8396493e-03   1.3307965e-03   7.9081662e-04   1.1208231e-03   3.7513747e-03   2.2532467e-04   1.8915999e-03   1.8066444e-03   7.8138393e-03   1.7396624e-03   2.9887611e-03   2.1626603e-03   4.5716343e-03   4.7705983e-03   3.1616233e-03   3.6125170e-03   5.9695278e-03   4.2653532e-03   1.8208998e-03   2.2513824e-03   7.5822521e-03   3.0950854e-03   2.6638425e-03   2.3329339e-03   1.2833498e-03   1.8503904e-03   7.6221882e-04   7.7332789e-03   2.4545666e-03   4.1819304e-03   2.3678329e-03   3.7122984e-03   4.8118232e-03   2.5553184e-03   3.0167737e-03   5.3596333e-03   3.8038292e-03   8.6990936e-04   2.5148680e-03   7.9707274e-03   3.8479364e-03   3.0845972e-03   3.4545687e-04   3.2912795e-03   2.5772857e-03   4.0614792e-03   4.9082550e-04   6.4992316e-04   1.0688017e-03   2.7640538e-03   2.9796108e-03   1.9652949e-03   2.0639503e-03   3.2701588e-03   2.6492756e-03   2.3500195e-03   8.8549039e-04   3.5901884e-03   9.2048730e-04   3.0276112e-03   3.6195589e-03   1.4632965e-03   2.1763560e-03   1.5224813e-03   2.5475111e-03   1.6768950e-03   5.4331181e-04   3.2527051e-03   2.4129313e-04   2.7799169e-04   1.0453548e-03   1.7267901e-03   1.3027778e-03   1.3633707e-03   2.6503970e-03   1.5759024e-03   2.1731558e-03   1.7887862e-03   5.7263386e-03   8.5148931e-04   1.6383081e-03   1.3458097e-03   3.3594814e-03   3.6389958e-03   2.2301817e-03   2.5225544e-03   4.3534713e-03   3.1787209e-03   1.6954559e-03   1.3061945e-03   5.4111697e-03   1.7803664e-03   4.1741292e-03   7.9197469e-03   2.2821957e-03   5.7150571e-03   5.2344879e-03   5.9478982e-03   9.4860449e-03   3.0218268e-03   3.9157169e-03   7.0424278e-03   7.6315025e-03   4.1845322e-03   4.7751866e-03   8.3019483e-03   3.6628005e-03   5.6294536e-03   2.4239872e-03   2.7647556e-03   9.2421646e-04   1.7874511e-03   2.0292522e-03   1.9168834e-03   1.9458286e-03   3.1481713e-03   1.3908818e-03   1.3324310e-05   1.1782972e-03   5.7639460e-03   3.2058410e-03   2.4140579e-03   2.0956665e-03   3.1463866e-03   1.2261330e-03   4.0134358e-03   1.5054952e-03   1.1240475e-03   4.7360391e-04   2.7309553e-03   5.1301270e-03   2.4137049e-03   8.9922816e-05   1.2592279e-03   8.6164876e-04   1.2616309e-03   1.6936265e-03   3.4914911e-03   6.4024254e-04   8.0798030e-04   1.9823430e-03   2.5037231e-03   2.1768005e-03   8.4515239e-04   2.3117307e-03   2.1993248e-04   5.6812764e-04   1.3931392e-03   1.4308209e-03   1.6084351e-03   1.3403960e-03   1.4811963e-03   1.2106398e-03   2.4235208e-03   3.5787719e-04   1.6247178e-03   6.1681362e-04   9.2162383e-04   6.4491663e-04   1.4082859e-03   1.1774797e-03   1.5785221e-03   4.0122185e-04   7.3237681e-04   6.4530654e-05   2.8852212e-03   1.3991525e-03   1.5175131e-03   5.9336508e-04   2.9036861e-04   2.0099198e-04   5.5129609e-04   1.5179733e-03   6.8841417e-04   1.3866758e-03   1.2880812e-03   3.2376306e-03   2.6196338e-03   2.0860993e-03   2.5676122e-04   1.7880288e-03   9.1594192e-04   3.5976292e-03   3.2249197e-03   6.3653022e-05   8.5949029e-04   1.8263557e-03   1.6881339e-03   9.7411877e-04   1.7845049e-03   6.1330704e-04   4.6521065e-04   1.3489557e-03   1.6857799e-03   7.7549379e-04   1.3461987e-03   6.0478443e-04   1.0485877e-03   2.7784351e-03   1.1708179e-03   5.9587066e-04   1.2049026e-03   1.1592978e-03   4.9960727e-04   2.5830270e-03   2.2390567e-03   9.5053595e-04   5.2396832e-03   2.8622802e-03   2.1952710e-03   8.7416055e-04   1.1307517e-03
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-cosine-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-cosine-ml.txt
    deleted file mode 100644
    index 7c6b67fa43..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-cosine-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   2.5695885e-01   2.6882042e-01   2.3470353e-01   2.9299329e-01   2.2742702e-01   3.1253572e-01   2.4986352e-01   3.0770122e-01   2.5191977e-01   2.7931567e-01   2.8133743e-01   2.6316239e-01   2.6067201e-01   3.2982339e-01   2.8993002e-01   2.5506356e-01   2.8728051e-01   2.4952121e-01   2.8613379e-01   2.6894157e-01   2.3606353e-01   2.1670935e-01   2.3470242e-01   2.4294172e-01   2.4376454e-01   2.3228195e-01   2.3554918e-01   2.4851241e-01   2.0917546e-01   2.4971488e-01   2.4264224e-01   2.7405461e-01   1.9086415e-01   2.6346574e-01   2.5908801e-01   2.2138495e-01   2.2910721e-01   2.2169919e-01   2.0660065e-01   2.3207102e-01   2.5554688e-01   2.5153751e-01   2.6073682e-01   2.0919640e-01   3.3984433e-01   2.7503792e-01   2.1709889e-01   2.7068095e-01   3.0307041e-01   2.4529612e-01   2.2987015e-01   2.7736967e-01   3.0310708e-01   3.0544316e-01   1.9205388e-01   2.7098021e-01   2.0722466e-01   2.6387343e-01   2.8998308e-01   2.2633010e-01   2.5177075e-01   1.6347011e-01   2.4036389e-01   2.6485871e-01   2.8491965e-01   2.2273619e-01   2.4511873e-01   2.5930533e-01   2.6589995e-01   2.7797191e-01   2.3357373e-01   2.4279909e-01   2.3544532e-01   1.9447286e-01   2.3993534e-01   2.0856243e-01   2.2125251e-01   2.1988206e-01   2.0590152e-01   2.6441952e-01   2.0052739e-01   2.2978496e-01   2.4483670e-01   2.3879510e-01   2.9398425e-01   2.7541852e-01   2.3777469e-01   2.9151131e-01   2.0672752e-01   2.4584031e-01   2.7475025e-01   2.7064343e-01   2.5603684e-01   2.6165327e-01   2.4233155e-01   1.7892657e-01   2.6111203e-01   1.9965682e-01   2.4201634e-01   2.6281353e-01   3.1928221e-01   1.9731963e-01   2.7752862e-01   2.2633080e-01   2.6783167e-01   2.5447186e-01   2.6424243e-01   2.1960672e-01   2.2984242e-01   2.8788736e-01   2.8681630e-01   2.6949787e-01   2.3993685e-01   2.4440073e-01   2.5010397e-01   2.3230769e-01   2.9879682e-01   2.4200592e-01   2.6957748e-01   2.6073240e-01   2.6355347e-01   2.3403674e-01   2.2411413e-01   2.2956729e-01   2.8105976e-01   2.2913304e-01   2.4898608e-01   2.3304000e-01   2.2692988e-01   2.3728251e-01   2.2552243e-01   2.0364084e-01   2.3359511e-01   2.6619167e-01   2.6666588e-01   2.3666880e-01   2.7239113e-01   2.0146697e-01   2.3045559e-01   2.1695523e-01   2.1387991e-01   2.2366404e-01   2.2809635e-01   2.0901297e-01   2.2441100e-01   2.3418882e-01   2.8552218e-01   2.4609015e-01   2.0282492e-01   2.5940295e-01   2.7407006e-01   2.3344890e-01   2.1179142e-01   2.7047821e-01   2.9832768e-01   2.0859082e-01   2.8881331e-01   1.8384598e-01   2.5286491e-01   2.2012615e-01   2.3615775e-01   2.6845565e-01   2.3356355e-01   2.7164193e-01   2.4179380e-01   2.5247973e-01   2.5637548e-01   3.2126483e-01   2.3100774e-01   2.8832546e-01   2.0043257e-01   2.7918333e-01   2.4884522e-01   2.2904723e-01   2.3738940e-01   2.9461278e-01   2.9782005e-01   3.0332073e-01   2.5175971e-01   3.1203784e-01   2.6611535e-01   2.3713507e-01   2.2203585e-01   2.3602325e-01   2.5093670e-01   2.6860434e-01   3.0137874e-01   2.3759606e-01   2.6840346e-01   1.9200556e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-double-inp.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-double-inp.txt
    deleted file mode 100644
    index 7a77021775..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-double-inp.txt
    +++ /dev/null
    @@ -1,20 +0,0 @@
    -8.278938049410748956e-01 9.035293984476246987e-01 1.862188994679486731e-01 8.921151312310462433e-01 2.061859119379583216e-02 3.440636727385729676e-01 1.533779912830328662e-01 5.701372300009802663e-01 5.510020730211558915e-01 1.792362258426003496e-01 8.086175120876580857e-01 6.115487184317183189e-01 1.233471787164852618e-02 1.441643531871039663e-03 4.044309209045688913e-01 3.561398959499905148e-01 1.281985712929750720e-01 8.663300833847481508e-01 8.696027786291581352e-01 3.611727370363766454e-01 5.283537658772616830e-01 1.440241088090119526e-01 3.112457227138950566e-01 6.031280796897889873e-01 9.230324792742518047e-01 2.332121881136874908e-01 3.192652267403439659e-02 3.466206294995559656e-01 2.988687728046366399e-01 5.116749542048093513e-02 2.584975830914494344e-01 4.302023478042227289e-01 8.003972751713522849e-01 9.364931911368097328e-01 9.737098649964673891e-01 4.718038453972229762e-01 4.526591686607864817e-01 1.056485678520797666e-01 5.883019714285405710e-01 3.846092237676981274e-01 6.461500053435473845e-01 1.013239729848824933e-01 1.216151561651189761e-01 5.159668929484659827e-01 8.452074473510227115e-01 9.885170962247968873e-01 7.623883073490128615e-01 2.291163243615434997e-02 5.775530980802381364e-01 7.820699896828091635e-01 8.239186345842965942e-01 3.391800105260227571e-01 9.546318451614538292e-01 3.789677917867695367e-01 4.526533399649290690e-02 8.366786473238587707e-01 3.082636811049858094e-01 1.173936820793450853e-01 7.631994969169442200e-02 2.997416650722183329e-01 5.795208655160232203e-01 3.942350892542011431e-01 1.175126383297261379e-01 4.928232513950027149e-01 9.421293996225950096e-01 8.365391053841342295e-02 6.868059693571844093e-01 3.589527962429440722e-01 7.592939427166059962e-01 5.623849466131448649e-01 2.110746828032050715e-01 9.824683704668600859e-01 2.661230142246236996e-01 6.162272315007123469e-01 5.023254536607497656e-01 5.202854476669782624e-02 5.835090668842095596e-01 7.864642118889143552e-01 2.504012386867506823e-01 6.728308641135989365e-01 4.610793534576096420e-01 4.820508770515909980e-01 9.720403251022265989e-01 3.100069285263498120e-01 7.681017126461753275e-01 7.956539306007082146e-02 2.593389637887737464e-01 1.137852590403054531e-01 3.885303073284454012e-01 8.599094660075957686e-01 5.215167875918280682e-02 1.620908248572288102e-01 1.859236090457663249e-01 6.247716512610480555e-01 3.415128495520775020e-01 7.034903368378029320e-01 6.037564640019568163e-01 2.338969434423310290e-01 1.002104885609900187e-02 7.866058403969036217e-01
    -8.033694116033356369e-01 8.653264545544031572e-01 7.468340410754038539e-01 6.362430919910603278e-01 5.120006306625468628e-02 9.503348372633585450e-01 4.697732609626817935e-01 4.221305288459429317e-01 3.153452119838391354e-01 2.991014843442657556e-01 1.190667967280257811e-01 3.486567714509342109e-01 8.289493649885054660e-01 8.454811050800014049e-01 9.149673018211901265e-01 7.708707837193897738e-01 2.640157732122547785e-01 2.107897022189605396e-01 4.207633055054439408e-01 6.719500284654699174e-01 1.458031684893063007e-01 1.800412735886125493e-02 8.402733435220011149e-02 4.206760156883160295e-02 1.376933515041314227e-01 1.716717341022133692e-01 1.788220727652158892e-01 8.224310433402118869e-01 7.729093666867475898e-01 2.064223621025984556e-01 9.592092036227207741e-01 8.312490243754996344e-01 6.673289360369902834e-01 4.632847903690773261e-02 7.643954098358983762e-01 9.359341525615098023e-01 1.914966319163026176e-01 4.536590469402868031e-01 8.640836016538007147e-01 3.941529178175462444e-02 5.602101995205478469e-01 9.263806161941660067e-01 1.555995325944817820e-01 6.172208102950116348e-01 6.335576752812099866e-01 9.766975460368043649e-02 4.475795689539874278e-02 3.248842796104995934e-01 5.700377122149502540e-01 9.066962967256807504e-01 5.458460621505676347e-01 6.833401285581487405e-01 2.887244409544044155e-01 1.316338647016834784e-01 2.325673305245992140e-01 4.150121963188406760e-01 3.834845466366055833e-01 8.149365773968725302e-01 1.867003849450201702e-01 3.170322173543018707e-01 6.832093662682684476e-01 1.729728518929105618e-01 9.236557359702636250e-01 9.152941252150086360e-01 7.224879983096620384e-01 8.557920626598064517e-01 5.344883059251644974e-01 4.876873274449112783e-01 8.308277804506420949e-01 3.916624489322212410e-01 3.459695122273966916e-01 4.033512499027409604e-01 6.555726444913008155e-01 7.138452409380238173e-01 1.683937314599968094e-01 1.769382143486440961e-01 7.588683655178136700e-01 3.750589892880819010e-01 7.525176245126207197e-01 6.083961152538303052e-01 1.145972309907993258e-01 6.239614485809552580e-01 1.307655482065895880e-01 8.530458750670916190e-01 4.801602070124768584e-01 8.168122189863546989e-02 3.793139622744635675e-01 1.496986997776840189e-01 7.129023878302899186e-01 6.830979237438047358e-01 7.635375943876505644e-01 1.824004963251233402e-01 5.764695848992339444e-01 8.865113248731604223e-01 5.784337085544002388e-01 9.700026628755119562e-01 7.318207347905059112e-01 3.851401393936705331e-01 1.774291851193399161e-01 9.763423229242296220e-01
    -9.287178470949695175e-01 1.748282433617460718e-01 9.238531711586964734e-01 8.291274445125006443e-01 9.513259272578692416e-01 7.486316801165745494e-01 6.257378457524477300e-01 2.062711693536473101e-01 3.970721244184766130e-01 2.738325225026445597e-01 8.735038948299954642e-01 5.415282140033768066e-01 5.176317904298315398e-01 5.347036264518250093e-01 7.482056965410627258e-01 4.140672582824351800e-01 8.709067272363142376e-01 9.499605569181273079e-01 5.380266748336398619e-01 4.369252161707162241e-01 8.235722216228258397e-03 4.308187193646527691e-01 6.030581482859224129e-01 7.316831195156517920e-01 5.540499846834291420e-01 2.044203040111662872e-01 8.645251782981867583e-01 1.816095717570278545e-01 9.639119168018674966e-01 3.572031072322333634e-01 5.580226816834680248e-01 5.586629875016585478e-01 7.213854320902782780e-01 8.513998260042524580e-01 6.308764347277173723e-02 4.299855362100638567e-01 8.789303907444128150e-01 9.178850359236285783e-01 2.275205845091231582e-01 1.899395443939643213e-01 7.103070862773533944e-01 9.450015289553428399e-01 1.691856364522159595e-01 7.368719616877857925e-01 9.600189536623833231e-01 5.128846522932454244e-01 6.209162727118655578e-02 7.992250598838029907e-01 9.141050280518014937e-01 1.471297785256820978e-01 7.466162372930541524e-01 4.656107650642931084e-01 6.399324135161845728e-01 2.023617619481610230e-01 1.019104648900100996e-01 4.390693688536728700e-02 9.822620353006089600e-01 2.881951852926285529e-01 6.191575015960482098e-02 8.989580763251467932e-01 4.635958631890454429e-01 1.781973138114967270e-02 7.906911683818984571e-02 6.525270776225711167e-02 3.620583622807886925e-01 2.651673718940715796e-01 5.829372395929610651e-01 2.118159824373908595e-01 5.900287159143694504e-01 9.405929925178391215e-01 9.262415619063500971e-01 5.639581506302312475e-01 4.529556154689695635e-02 2.873819210518682166e-01 5.718545934306838996e-01 9.877670791317306742e-01 4.120364488714320927e-01 9.896078045634184583e-01 3.796586997026456523e-01 1.178183652203194098e-01 6.641068305236120795e-01 4.045960610587706618e-03 2.262690437428437340e-01 7.839938005832693957e-01 7.695391333937223743e-01 3.713918392552509884e-01 4.245533341514018399e-01 1.475072494020331915e-01 6.011975181419888514e-01 5.158174017998343741e-01 1.788706151398071764e-01 8.880707130134481986e-01 6.463351030474082659e-01 6.499920635615744624e-01 8.570273676455353318e-01 6.055019270899113515e-01 2.123561211054603159e-02 2.027688787664126968e-01 1.930834215328548487e-01 5.131906052747271518e-01
    -2.599990881903107010e-01 6.767857524909899336e-01 7.188217446352963558e-01 3.037178903357997672e-01 4.252381412838680541e-01 4.070924411439535984e-02 8.426710493038247485e-02 8.301517457289483426e-01 8.254603255702420705e-01 7.258533909453509514e-01 9.958706809470796451e-01 1.323408451651194584e-01 8.523995455245143571e-01 2.572405385832454705e-02 4.715363690065482727e-01 7.920130365690022378e-01 7.613745641534582775e-01 5.108305991695683002e-01 7.908714335912382376e-01 4.641131983754837043e-01 3.112627109831845873e-01 4.218013908715474436e-01 3.291577909008427394e-01 2.538715054071232213e-01 1.362470842487485401e-01 2.716429790290709745e-01 1.485325814161112534e-01 4.514539027544387517e-01 6.900835128673067365e-01 7.793407072946112457e-02 5.938024345270752624e-01 1.497853829906865553e-01 5.399567982652856424e-01 1.419209916759478496e-03 7.719776132867679497e-01 3.130795105576239523e-01 6.670071611167494030e-01 8.900596881158256979e-01 8.011158503301568645e-01 7.089295605187424520e-01 4.671116382997058114e-01 6.682965170673403899e-01 6.524835265739736823e-02 5.454288420771494783e-01 7.751910790556310049e-01 8.192595541387335256e-01 3.098855848167891835e-01 3.689971355659119601e-01 8.666507475054133769e-01 2.749042684253171220e-01 3.566565602478318775e-01 4.838173174723044978e-01 1.032975933616413489e-01 5.063065339610417492e-02 5.791168455729079900e-01 3.573337411289496668e-01 6.714098909652352898e-01 2.917057662433912846e-01 2.654964332620638467e-01 7.171804039048814694e-01 3.314488637898249657e-01 5.230399837442840649e-01 6.866534136026025692e-02 1.252966394621071178e-01 5.349397882659551184e-01 2.841423847455760709e-01 4.158473635710734362e-01 7.197062989831272128e-01 5.123869045047864113e-01 8.675622821594339840e-01 8.097441845042540054e-01 7.317178252133832439e-01 3.300847596465853462e-01 5.922311859141077273e-01 8.852619511417836318e-02 2.673412917259408994e-01 6.878259052441990651e-01 3.223000927116328462e-01 8.859387123976615319e-01 5.722722388300067742e-01 8.254877606669521750e-01 5.705299682290687624e-01 7.046478734972855262e-01 1.316324413616759559e-01 3.056358395675535800e-01 2.396516834600909140e-01 2.041201422493257311e-01 1.610755140653103989e-01 1.617012564641111538e-01 4.449920510036902144e-01 2.731012972755201274e-01 7.826874666257994662e-01 5.193612375350010746e-01 8.688804522977213729e-01 3.742157602758655610e-02 6.649628920608219307e-01 5.978149424619171315e-01 5.345645500553952711e-01 9.443202650415919441e-01 6.105837075491723498e-01
    -6.387761328141735584e-01 4.210087412162694109e-01 3.777306694964789324e-01 3.576349403292201634e-01 7.272699618880260619e-01 9.173392803607671731e-02 1.212535698300880593e-01 3.871229381194544183e-01 7.735150198351389284e-01 4.687200483013695962e-01 5.161778571874678923e-01 9.839646447226980674e-01 8.626932748911960713e-01 9.618485576577924245e-01 2.997996427525421170e-01 3.955404657388794654e-01 8.480126027102616870e-01 8.194992325050480808e-01 2.800213436873294492e-01 7.188391466620779324e-01 2.289766105875049584e-01 3.838547514028287644e-01 1.363553401061209369e-01 2.131328253542326134e-01 2.666779468144075960e-02 3.252883844200405994e-01 4.207860197469600605e-01 2.991365385037647595e-01 9.180779845534067229e-01 8.787338732192649937e-01 5.404510999105649471e-01 1.735493827761729335e-01 7.405224640747264386e-01 3.927355563629583157e-01 3.957109873399460298e-01 1.313029813325972128e-01 6.434498219738993274e-01 7.162213694578050127e-01 6.454998257494671821e-01 3.808124530008022424e-01 2.027201015737234435e-01 6.667632842770417900e-01 1.609491052365198405e-01 1.192413785409307536e-02 4.546773323526854815e-01 7.733541911050207940e-01 3.902525737195561284e-01 4.006023779897505133e-01 5.156517815815246930e-01 6.135685498584592112e-01 7.062153114980724844e-01 5.505858882117883324e-01 3.541308807182554919e-01 5.237151122342533771e-01 5.230649229131387745e-01 1.973541027697351957e-01 7.940327858595511712e-01 9.998588700623055603e-01 3.878271015153827994e-01 4.455006584967207139e-01 8.376414508056347907e-01 3.310833863524501597e-01 8.020469097392601832e-01 1.890327633084128989e-01 3.830289472395409511e-01 8.605040171046141051e-02 9.978185524023941433e-01 8.333890591892906263e-01 4.509013468741837061e-01 6.355778557686052599e-01 1.422515991097305088e-01 9.549891485963732940e-01 7.535776302868563148e-01 9.306005301880662106e-01 2.444330347211679522e-01 5.828218427569508142e-01 1.261938242968304591e-01 2.829188731405173352e-01 8.100246952078660190e-01 2.032739130996042975e-01 3.997268448390065565e-01 3.882777703107541667e-01 1.102505652624736765e-01 5.826634725328041498e-01 6.508734477956333864e-01 1.777287661702166011e-01 4.857051012052149286e-02 6.850537712379254351e-01 5.012281307761055071e-01 3.329154880061502286e-01 5.006261767216675374e-01 4.542081454976160115e-01 6.777801995399822532e-01 4.271303586474960445e-01 7.820470659692947413e-01 5.143462618485082904e-01 4.071273891563575997e-02 8.503383643856671226e-01 6.877485768345151795e-01 6.498843855014626580e-01
    -5.539512747016193117e-01 6.329206647391879548e-01 2.798533500321682688e-01 4.825977295850051307e-01 7.625297023172977751e-01 9.081309101427640362e-01 4.124792086535029600e-01 3.647019658319609059e-01 7.529595202332928228e-02 3.072404010876803593e-01 7.890673660964639957e-01 4.079781478915127657e-01 1.440519120695739064e-01 2.538968953804546791e-01 1.595028243568367143e-01 9.066545851872198636e-02 6.367601114674349416e-01 7.622263643880089479e-02 3.015728236404162654e-01 2.424070469873378375e-01 5.711440390241000475e-01 5.717001375511508998e-01 2.237853674032181939e-01 7.112101625753678436e-01 4.321054197012103026e-01 2.505322169010260058e-02 5.877307077139551916e-01 4.415771174397812304e-01 3.766022855145171322e-01 9.803490652619811785e-01 1.229258314111529860e-01 8.108351868714478439e-01 8.558595456964329662e-01 2.168217533833206589e-01 2.034022719386595623e-01 8.687457137579783772e-01 9.013327195854559104e-01 8.156766512673154779e-01 2.717576187546973943e-01 1.756417893371479133e-01 7.555856977566548505e-01 6.708809351312817748e-01 8.998789237886926085e-01 1.936367585946979775e-01 7.949724635465026390e-01 3.164799312763589834e-01 5.493048513173155456e-01 1.608917269168268493e-01 3.048667492191803330e-01 5.599401537727016764e-01 5.779501360842279611e-01 1.296714605309662316e-01 9.160752328055997706e-01 8.058674476110374574e-01 4.385508937505578908e-01 9.212419718012100356e-01 2.249887451242467140e-01 6.283927745352599903e-01 3.778992451536005159e-01 3.571958698867505611e-03 7.276526470528231760e-01 9.051678673805297892e-01 8.465837072484881931e-01 4.548317505393462135e-02 3.189318261926020748e-01 4.446388607398673587e-01 4.292356336344156365e-01 4.203980977718795309e-01 4.698059253071955599e-01 6.151991200848159203e-01 8.479986139404802614e-01 9.870993262459623052e-01 3.164206525899861955e-01 6.464672171639846976e-01 8.508781429592480183e-01 4.733667503354813677e-01 8.076014176740163863e-01 6.671443255679101458e-01 6.639213267047979761e-01 3.681688930741919830e-01 4.679870252651611162e-01 1.790041740686979521e-01 8.446070273663058847e-01 3.350737544979878191e-01 6.600272349677447359e-01 4.356083218487936115e-01 7.995134167346013010e-01 9.083660261041469619e-01 9.743975306734570241e-01 8.144839650654719376e-01 6.865011984586443239e-01 1.709747281999153268e-01 8.534933687161740945e-01 9.494753729726415070e-01 8.140124992294850426e-01 8.936241255316055287e-01 9.087976860818796077e-01 9.030687493451383663e-02 4.025785149840914734e-01 9.592005611533803711e-01
    -5.714058727476275523e-01 7.913573761505965365e-02 9.301773447377043036e-01 4.302822433307075256e-01 4.618892554175407783e-01 1.882471300213742760e-01 6.231472878215863487e-01 2.350437450940777717e-01 8.483410480771292894e-01 8.580803842040533036e-01 4.246398783388435350e-01 5.667321565946502604e-01 7.247417018955526480e-02 5.373984417482219333e-01 8.794242091541510931e-01 9.699025554453030162e-01 8.254197752548814160e-01 7.739723972867470492e-01 6.365819416181199841e-01 3.451230687021222820e-02 1.829102490094791644e-02 9.179618383026147965e-01 4.481667270072077214e-01 4.771270250445739380e-01 1.588469404953456454e-01 3.766332499200618633e-01 5.057026248713025751e-02 9.125900914275182352e-01 8.438133644246305076e-01 3.282972411719701222e-01 6.042003956122835584e-01 7.423456085393266290e-01 1.389012737541106546e-02 3.674754266702850991e-02 2.126646727703802586e-01 3.085666164246750887e-01 4.303440338750976757e-01 1.749037978865556342e-01 2.177699993322510519e-01 6.675614739991906355e-01 1.926533336347433512e-01 8.032010572660308600e-01 4.611412981769049679e-01 9.907201268457492827e-01 8.973785930837320235e-01 6.286342392657409128e-01 8.111266245859546364e-01 1.154230969025437092e-01 8.382880466301794176e-01 1.053753927827069115e-01 9.921712862234919328e-01 9.041662667920956631e-01 3.626267376021269362e-01 2.262225368932846425e-02 8.669003741626111204e-01 7.597054897704164089e-01 4.700318514995387442e-01 4.338185014241978665e-01 1.205425463362067573e-01 2.413879270602589111e-01 5.483334840461459025e-01 2.042653841254596925e-01 5.452588940366013270e-01 3.164646091706100339e-01 1.878958248945691301e-01 2.188622304737641855e-01 2.970982599823450698e-01 5.952148400199362976e-01 9.614251220149501176e-01 5.446813400697393392e-01 5.900748097930779146e-01 2.653062526715309621e-01 5.459933097767216692e-01 3.174185404661935550e-01 1.412133354129242457e-01 1.487441669790685594e-01 3.953776242211952674e-01 5.274261039692862418e-01 1.756132307607755072e-01 4.481942852746899630e-01 6.390660088765629521e-01 2.860380430081067571e-01 5.866902519902850166e-03 3.026687645174785946e-02 1.952533570196290924e-01 2.154769096186736066e-01 8.920573593276575064e-01 5.644513191915436767e-01 5.551464696654353492e-01 4.378199413349500579e-01 8.685737643974280608e-01 7.493934764293597173e-02 9.556749726352036234e-01 6.386433482536227890e-01 8.714694524097754691e-02 1.722786161701279628e-01 6.526867532768643176e-01 8.950304705281527662e-01 6.158198776753203152e-01 9.587176904005377809e-01
    -7.705718397401561948e-01 3.165816092999733655e-01 4.334200859975760878e-01 8.639807015515663657e-01 5.576514209532534849e-03 2.456745447057938625e-01 1.664686313299922338e-01 9.637084729617834133e-01 1.083448720752323569e-01 1.865218070380464388e-01 3.730358890475884426e-01 5.015351872138350542e-01 7.420710795841709562e-01 4.919420674769692248e-01 3.426558201886464872e-02 8.669984854934246199e-01 2.204243734202966376e-01 4.109792246853891662e-01 4.361732572946559472e-01 6.819306998053020763e-02 9.986304248057148447e-01 4.119289455392274313e-01 8.533050041845835487e-01 3.416914861912183632e-01 6.522191951039880697e-01 4.162803668786793088e-01 9.051674379917418189e-02 4.552378661306888397e-02 2.122677193466918633e-01 7.461518531655018105e-01 4.654688019259497489e-01 7.877564083548750373e-01 4.518328005682387127e-01 7.173857464237374248e-01 6.940056370290903498e-02 2.804574410412373764e-01 6.095681113112718652e-01 3.680596478602831123e-01 1.814569150719304025e-01 6.505055517979729807e-01 2.759585245701871026e-01 1.429501104786028431e-01 7.813891153083207808e-02 8.925314279991185540e-01 6.692056941902108091e-01 1.915141341107173822e-01 5.750233129581091562e-01 2.051961006251528108e-01 3.849013692629975614e-01 9.503788222043518807e-01 7.690419386411734282e-01 9.978147530014782607e-01 1.719584162437415298e-01 4.890758882401113894e-01 7.195660736040896399e-01 2.485818040997200828e-01 9.706486601870933928e-01 5.182604282071262558e-01 8.082072245463804983e-01 4.889961284821118248e-01 8.042893959057633158e-01 3.200685313413229593e-01 8.983245016887355661e-01 2.811495336955205371e-01 3.986095833814048417e-01 8.607229214132059436e-01 4.827620119717191960e-01 6.715610252037491623e-01 9.330824374137768329e-01 7.537710530085762750e-01 9.840804224010484269e-01 2.319352541177217564e-01 9.569114943157627229e-01 5.821928104654411351e-01 6.700479524814679788e-01 5.663434680086896211e-01 8.851091082101365526e-01 6.800562815862243315e-01 3.578475213752868589e-01 2.900164669281133367e-01 8.379170683569914235e-02 9.929972839740475177e-02 5.946248553621906741e-01 1.991332889320840405e-01 8.115065723822508792e-01 2.023388190440008616e-01 4.056545651129230823e-01 2.966825350250481552e-01 7.457176343507545546e-01 9.856015771246517954e-01 2.264338016147812160e-01 8.366528670045663141e-01 6.116829813603242849e-01 2.605933184296719274e-01 5.765962146558850643e-01 5.064075092266390188e-01 5.499615769589756287e-01 9.240234698632640020e-01 7.169900155229913530e-02 3.544181364560751168e-01
    -8.154844535553099627e-01 4.797965609394789777e-01 7.476703385713100447e-01 9.086708404761600910e-01 3.191752505450355937e-01 7.611128630021511965e-01 6.246790343299296611e-01 1.942001426217137006e-01 2.789860414631386565e-01 3.236359785042408621e-02 3.178191288741717413e-01 8.372264298357038337e-01 8.872692914664047636e-01 9.589758852077276963e-01 3.123722260380168425e-01 8.980164015338999439e-01 7.260784140459818348e-01 6.567013512265649222e-01 1.028743505926521529e-01 6.821705410750319443e-01 6.889838995316139858e-01 5.587525493094736007e-02 6.921487028366646310e-01 3.616312929861494885e-01 1.673758008792780583e-01 6.626504595920326146e-01 9.125680913222075086e-01 1.424077784972291871e-01 6.508496429060767197e-01 6.615417385775157477e-01 9.654167310675311198e-01 5.536662974550183858e-01 7.092622144968085962e-03 6.694595400455760625e-01 1.828533619119211417e-01 3.421514408394116247e-01 1.242580151818144518e-01 9.888774797458224075e-01 9.777955172739735135e-01 4.271370765628749178e-01 1.211608384809655936e-01 1.580132417172936954e-01 3.242705395708289640e-01 3.268994391754735940e-01 5.213767653645562383e-03 4.475169480357120699e-01 9.593245219293577986e-01 6.994304536782350867e-01 7.063863152769014331e-01 8.381620829497931080e-01 2.760441799736219615e-01 3.755200946645842475e-01 3.627729621737311172e-01 9.518310606719182498e-01 3.577273025276901386e-01 3.991159901003488164e-01 4.187060513068554535e-01 7.422605403637314581e-01 6.697944269780702342e-01 6.020599837037767799e-01 1.571185850817550245e-01 7.519860911185742847e-01 6.635775704496444938e-01 9.487848173531471252e-01 7.900030232338028924e-01 4.143783957270819052e-01 5.618429740858444932e-01 3.737804619062014000e-01 6.179941187802344693e-01 6.553638605616040058e-01 1.009709416658691739e-01 4.935037098582963910e-01 5.485489972455533936e-01 1.024147956480448984e-01 1.195764707555347917e-01 4.910516327810896531e-01 3.551185778630389089e-01 3.857601645798814927e-01 2.074975219600547760e-01 2.084038664460790002e-01 5.268616653491025037e-01 6.948014877618717833e-01 6.179744044618615817e-01 7.063658085955483168e-01 7.925757227686872630e-01 6.199016959584816577e-01 1.163676037434490107e-01 7.425752264755586252e-01 5.403115665133301215e-01 2.546191951391015840e-01 6.961300925345208501e-01 4.003013072125547467e-01 5.906120962720950995e-02 5.879915846330325824e-01 1.213602408288709800e-01 3.801780679842765576e-01 1.731477742402802722e-01 4.624568816669496485e-01 3.304453744619206823e-01 8.810445876116090869e-02
    -5.140190515373614932e-01 1.419225260054487459e-01 7.777845802285945354e-01 3.327562899409282071e-01 8.916875699762913943e-01 7.212852862736146564e-01 5.727327199433507321e-01 5.897820225918504189e-01 7.318614954542906892e-01 7.393985144455500480e-01 4.531340740296823100e-01 9.903061584426188224e-01 4.213350938331624773e-01 4.542342471963995987e-01 9.788786426453045530e-01 1.881707000343846303e-02 8.005433413647761176e-01 1.523502822273363755e-01 5.630164732287495921e-01 5.946603842470724599e-01 1.225547698678740582e-01 1.531136594724622491e-01 8.157973612638946825e-02 2.752046015644330490e-01 6.809045821946161370e-01 6.455289724528190387e-01 3.830356726830793646e-01 4.446144649678575034e-01 4.969038423960672191e-01 5.497873820641221432e-01 9.471879627821714331e-01 5.933046675329255448e-01 4.099233758501530378e-02 5.790409810134594659e-01 9.546095885251496549e-01 2.608616052375664074e-01 6.910160339170060562e-01 1.293709850476291168e-01 6.407264616302255078e-03 6.186037089828009261e-01 5.537861302543241049e-01 3.527421038298221845e-01 8.033232052121624944e-01 8.128114152830284711e-01 8.319982582278713235e-01 5.939566376046836460e-01 2.291090283499520597e-01 5.438101817725821130e-01 6.881146379117278888e-01 2.421968586304659166e-01 5.874047918543783275e-01 6.210102709484541794e-01 7.041387566450251212e-01 6.959223476278774134e-01 9.133877300988062498e-01 9.230647706207778525e-01 6.856884219815310155e-01 6.997988808693775820e-01 6.177944932528769417e-01 5.512902545683161515e-01 5.818280341729102911e-01 6.538267999985679646e-01 6.946673485935980219e-01 4.817938258357623571e-02 9.352008817207906333e-01 4.774162142215661042e-01 5.768063588692976529e-01 4.589648891483899540e-02 7.998946815651652997e-01 4.434260476954369201e-01 9.850053510925722566e-01 6.648626681529369309e-01 4.606293826856903140e-01 3.309042418210563774e-01 1.438901922508034614e-01 7.986559119276418484e-01 7.037818421334554042e-01 3.605119534240813772e-01 3.785959549258922641e-01 9.562491516841659100e-01 4.997955143590974147e-01 1.029540300938682762e-01 1.819017177001992502e-01 3.665425750262368831e-01 1.688063588370778412e-01 7.030735208313992901e-01 8.922375654244527610e-01 1.055706412056253152e-01 2.664739907746691561e-01 9.906029568647586325e-01 6.043845090140997911e-03 3.495786295043534775e-01 5.989441999519146131e-01 6.776147193866479679e-01 7.012991789852640601e-01 1.825838783477321536e-01 7.612293578749116385e-01 1.564769891240175292e-01 2.762157292905387251e-01 7.641900040015234818e-01
    -4.746013333880729768e-01 7.609202966712714788e-01 2.537820854162747830e-01 1.709362234877408460e-01 1.886635378734374813e-01 2.439567014093724229e-02 7.640304718272151741e-01 3.483216170435471382e-01 7.744289278738043514e-01 4.190437573644867353e-01 5.319091476394965934e-02 8.580130976087452233e-01 6.259446446786639529e-01 8.793213970773006150e-01 2.441023074890465994e-01 7.753405549489799098e-01 8.760187573193888300e-01 5.946480724009295393e-02 2.873093046571124631e-01 8.710837851946537924e-01 9.103181731924696596e-01 6.534637257615111272e-01 4.128420398577182793e-01 4.905858108576378607e-01 6.178275806701372108e-02 6.368043900016381320e-01 2.865296941219959148e-01 6.371773028539067241e-01 4.924322796636745325e-01 1.709313290387282080e-01 1.856892551689268700e-01 9.592782603102242289e-01 5.402593764193130976e-02 7.287312244390512506e-01 5.679467572000697073e-01 6.255587794305905724e-02 3.069660218141317953e-01 1.089960430557104232e-01 5.550748245336984965e-01 2.555948886689661803e-01 4.140925514039996980e-01 1.180376445052062628e-01 8.832322629884041820e-01 7.784546946701487169e-02 3.177678935473182698e-01 6.681804863429485764e-02 7.047099396645268854e-01 4.133897376851528582e-01 5.600656990480865627e-01 3.883995683475501837e-01 4.459430113152932362e-01 4.214077227574740681e-01 4.763369230200156235e-01 2.701480661168440545e-01 4.296286564389811824e-01 9.601402258758658936e-01 6.326999441846863359e-01 2.442086919688498670e-01 8.407708423957936938e-01 3.626867985638081437e-01 3.641441713291436733e-01 7.932397565989488530e-01 8.902073520619256941e-01 1.929173010337000838e-01 7.309376779324568973e-01 7.305852858337777977e-01 6.510197444582447313e-01 9.512661608643838695e-01 8.461467164366111016e-01 9.245490147941206605e-01 2.658844813385705663e-01 9.538758859344749208e-01 8.215517204998477041e-01 8.217795540390903097e-01 7.569662091300560780e-01 6.262685322871274218e-01 5.597770510574888725e-01 8.155720175123675197e-01 8.545688745180864965e-01 8.986051518529034610e-01 2.477911506572628708e-01 8.462580108996445860e-01 6.065941220995090255e-01 6.500490804973033665e-01 1.120463882674053169e-01 9.299049132942927010e-02 1.388364074229719858e-02 5.901199124540731367e-01 2.795110110544174464e-02 1.644097083463245124e-01 5.341029647603202646e-01 5.276816677181681570e-01 5.439849107754858304e-01 5.371677986392331405e-02 4.515163125788429488e-01 5.036243367087100964e-01 5.721818679625961801e-01 5.271368612400184617e-03 7.720961020546839304e-01 9.015383457479009266e-01
    -8.301526916287945701e-01 8.704609696144033348e-01 2.955689129581380303e-01 1.762209253489944727e-01 2.698172933050072553e-01 1.138095349991521399e-01 4.092588531860634760e-01 8.202978121681584467e-01 2.822241377079557356e-01 6.117376205659387223e-01 7.169923068016897938e-01 9.310256256264415331e-02 3.989664052931106708e-01 1.651874953308862803e-02 7.890202597932294282e-02 9.068686774810821305e-01 5.203866694486933842e-01 4.297748572844445336e-01 5.208786995443430712e-01 2.163224881365530816e-01 7.274307306357226111e-01 1.675784956180090823e-01 5.969822786565782691e-01 8.959750832846602453e-02 1.253794151891943764e-01 5.352628522116801291e-01 2.562706125890066300e-01 6.030433202137867044e-01 8.330717547440393833e-01 9.603613683422040914e-02 7.569714244468559450e-01 3.184801677796517128e-01 1.667069341164499896e-01 3.132470247801235619e-01 6.417752836394801097e-01 6.433909425912354152e-02 4.056860213146201710e-01 3.166772891331335327e-01 9.574059746098845247e-01 1.492907964460536974e-01 8.311513764927496162e-01 6.652928354977717396e-01 2.396804722185036374e-01 5.812361618600220270e-01 9.724228681350225445e-01 2.853983236378453414e-01 5.337719354896472979e-01 6.779446197712412081e-01 5.485102006140557540e-01 9.010109155962182648e-01 5.724439967467525037e-01 5.965540527411405947e-01 1.598667990086183321e-01 1.363934512727023041e-01 5.327536522697270405e-01 4.123866715061276222e-01 4.617251396918636841e-01 6.935944951381239898e-01 4.300337419593377453e-01 1.892407993760835128e-01 1.666936825594794724e-01 4.625634184864588772e-01 4.805197636774838355e-02 7.003542850133466224e-01 2.130226006716084974e-03 8.678863343041013367e-01 4.874478520451258623e-01 7.043560228741558848e-01 6.317719270475393722e-01 5.372392256296196766e-01 2.982649812986511995e-01 1.272558612133412037e-01 2.467337555730741983e-01 6.546893200021091097e-01 6.291921159383098150e-01 8.505920470407707379e-01 4.046520490181828578e-01 3.875732096593392795e-01 8.551517214319142024e-01 4.152602284179877090e-01 9.587779137989138611e-01 6.977437468944928112e-01 3.240620775541913634e-02 4.025873770391376061e-01 5.485549335619134270e-01 7.146066156157020455e-01 3.012702534568838519e-01 3.526414480395153594e-01 3.309707144485515284e-01 4.315687014460974913e-01 6.641934530697197747e-01 2.172886798352815507e-01 4.807480925564590057e-01 5.006795397998469177e-01 5.818100901154411586e-01 2.107716091585690732e-01 6.606606051140029301e-01 9.317629042790995797e-01 9.840326342340242061e-01 5.752000964817773898e-01
    -9.843444595454536872e-01 1.339523968066913540e-02 6.082172659959028671e-03 7.828244785439336662e-01 5.069653703872761819e-01 2.804896494365415327e-01 2.112385836660957139e-02 6.016479440778699228e-02 7.457477935084961818e-01 3.445503949245375397e-01 4.063494277166557200e-01 8.630275274433116817e-01 5.948396018456146850e-01 1.400867933474212457e-01 6.997522422654076646e-01 5.766519767930851081e-01 5.419976500582250889e-01 7.474121304089603735e-01 2.951600193008566686e-01 7.980170422334191827e-01 1.829036799578199757e-01 6.317636496261300749e-01 2.812612231140887431e-02 5.464747656105657381e-01 3.909873503320924204e-01 4.940850205957293406e-01 8.157850130814222611e-01 5.111092739445756150e-01 9.336823640685747439e-01 7.157105167170837445e-01 7.778989455994214097e-01 1.398722535910470466e-01 5.642653936300449091e-01 3.218717164845980028e-01 9.717427501967056402e-01 3.665791984428700134e-01 3.874321311211759156e-02 9.437600858738082188e-02 5.679526822961932231e-01 5.141385991358327079e-01 7.497840799582222715e-02 5.736515309094968318e-01 1.928132849879083954e-01 6.924244068001785823e-01 1.748389677952593146e-01 4.469577663506929532e-01 1.738527450963387455e-01 7.195287763517190793e-01 8.861150811892871682e-01 1.058443750714600506e-01 1.941789362229970894e-01 9.188374820700584422e-02 7.706736301449305104e-01 6.718642548609364828e-01 5.981029087121966237e-01 4.832880127232569434e-01 3.073688779938709148e-01 5.156312334804930009e-01 1.777418420119527553e-01 8.885462205165685079e-01 4.486254681289014723e-02 1.345398129556140132e-01 7.467627984379916484e-01 4.384565546058830643e-01 7.217750080760946263e-01 3.949550352625393890e-01 4.307950907642028593e-01 6.087680934849041270e-01 3.294516167246774874e-01 1.316682090209408962e-01 1.824857738754404046e-01 5.332379826483617524e-01 3.567136182864261151e-02 1.976220743086236631e-01 5.849349042822560296e-01 1.133174406357483344e-01 7.711522754393199675e-01 8.557306786807005183e-01 3.038353471344266143e-01 4.422747047768413875e-01 2.537160404215925702e-01 2.372714099723788328e-01 5.906462765375103396e-01 4.849909323133470007e-01 2.692576210504484813e-01 4.540849506602829821e-01 9.664935719107857759e-01 2.044371576459835804e-01 4.505417469690352616e-01 7.110722322201217249e-01 3.051357995214963870e-01 8.978937034341526457e-01 6.090501112506481185e-01 6.595415779178889215e-01 6.565426836745864581e-01 6.565608489824376059e-01 2.679102664248229626e-01 3.819533138204529443e-01 6.609794961162380744e-01 2.289558446859882856e-01
    -9.274935298374649140e-01 1.174096651033715855e-01 3.030761852629033637e-01 1.605508209527917174e-01 9.601854834873225775e-01 4.341959513718630648e-01 6.320768160802121560e-01 4.213429090614078110e-01 3.695553969042019160e-01 5.965457437116089556e-01 3.520335041155040479e-01 7.702703502247409961e-01 8.571112772962534709e-01 7.904077282532658844e-01 2.247339318352784554e-01 6.823720204503556097e-01 5.883435710582129996e-02 6.786037033312407596e-01 9.721137137641507886e-01 2.042576970668320557e-01 8.394085754806240862e-01 7.433082729552867862e-01 4.072614159870893147e-01 7.451483066617257123e-01 1.699472962789440045e-01 1.753052015584344314e-01 2.255269204788400428e-01 7.794755643807432799e-01 8.407732260470973662e-01 9.301182862857163558e-01 3.701995309382508648e-01 4.481909027604019657e-01 1.261889085033987001e-01 5.600591735875248833e-01 8.244692493969552061e-01 8.969188061645969601e-01 4.802217973423368313e-01 3.556164122713412201e-02 3.393317823164623270e-01 2.491242957582864292e-01 9.863253789366602797e-01 5.585415885291432625e-01 3.702350606362231344e-01 6.766101432620400535e-01 6.999259389475386284e-01 6.676108316872160220e-01 7.870681827507105544e-01 8.746765411259082024e-01 9.125268371282718727e-01 6.638849997061806452e-01 3.253268113800632522e-01 7.968625619248901337e-01 7.584122525443606211e-01 9.028886850768532701e-01 5.381622293189292083e-02 8.097562873320752752e-01 7.092942088208666895e-01 9.915538877968065323e-01 4.319294903327922652e-01 4.307127933969153721e-01 2.768507739641907772e-01 8.076253078288621046e-01 2.569233696442670967e-01 7.595893829724666979e-01 5.768081727897018673e-01 2.537536777625452045e-01 8.874419624636734616e-01 5.091705681832693342e-01 4.811826624992353585e-01 2.794462461940371290e-01 3.846927898276129021e-01 5.129562951959991679e-01 8.515004062224775794e-01 7.103144978683579858e-01 9.526388607201888847e-01 2.367905569592337889e-01 9.137336039323161740e-01 5.722969943101696710e-02 2.019723935481291255e-01 3.098764675203513619e-02 1.121146613918624357e-01 9.937693067724532314e-01 8.476717958861412772e-02 2.059652110343795917e-01 2.139791918759540446e-01 9.137860316709250919e-01 9.530862653366889425e-03 2.027843281683039400e-03 2.506229951837134484e-01 6.244523528392044165e-01 5.523937894075592325e-01 3.712168074031840792e-01 4.218847794299319665e-01 4.827576239387890711e-01 5.244634168840578425e-01 5.182241092381567604e-01 3.308639956263292881e-03 9.370528021570383448e-01 4.694554875029453012e-01 4.950447554541728135e-01
    -1.525818111800841814e-01 4.708012184002630107e-02 3.899035965341954846e-01 3.928304521031263929e-01 5.602286661727436945e-01 9.738256658043862313e-01 9.404465779766183475e-01 5.750862754958349088e-01 9.547546956257608741e-01 2.750275291553152535e-01 1.682773435862793265e-01 5.865928471016079726e-04 8.543378154943809255e-01 3.547649971465383079e-01 5.058056647397523031e-01 9.116332486700751137e-02 7.534666421106954726e-01 3.082429494433007733e-01 4.527145111847344916e-01 5.456680635225539255e-01 2.504131242494785914e-01 2.509240770568589296e-01 3.949236999582302898e-01 8.782959620323271821e-03 2.474641132111736752e-01 8.229417958971670943e-01 3.444225768479134420e-01 4.000027489436257522e-01 4.247741954177396417e-01 2.497745404169693373e-02 4.325768602588443423e-01 7.336592463477830117e-01 7.667663267650381975e-02 4.179022553581047683e-01 8.745172741480690126e-01 9.417705509525042817e-02 2.807522782799587446e-01 8.212710101351362590e-01 2.211181944001613386e-01 4.319929503523877168e-01 1.858636923768219873e-02 6.737037795085246694e-01 7.997187114913413275e-01 2.976552505976116647e-01 3.272347030789168887e-01 5.550935453236346406e-01 9.224109746648162522e-01 3.192827922106745708e-01 3.500098324549234530e-01 7.821988386980260888e-01 4.478417135239194380e-01 1.580956175222456572e-01 5.300807813550156844e-01 5.806154798468634581e-01 9.456842911054151868e-01 7.688127895655872956e-01 8.456527833650537840e-01 1.784229089865225770e-01 8.114517450321339087e-01 8.062506298824222428e-01 2.113482500442499523e-01 2.629226789210241666e-01 6.478686221690072022e-01 6.006672861605766300e-02 7.013679843242253131e-01 8.784753961212666828e-01 3.487138165323044880e-02 4.928426758517070461e-01 5.976224683315064512e-01 7.629063997052759616e-01 2.761721278953045422e-01 7.240740503283805696e-01 6.131065729985127888e-01 1.630885615792579957e-01 8.473783868551159060e-01 8.347614542399306448e-02 8.137265626844719657e-01 8.512508664918938539e-01 2.777097816703766320e-01 1.729154355214796990e-01 2.203382750835449766e-01 6.134780912629795857e-01 3.524352564238901753e-01 5.370314860129862256e-01 8.013986113284543578e-02 2.555842138998117852e-01 6.553915758947851389e-01 9.679125599178584061e-01 2.549566319678178150e-01 4.008180804370896633e-01 9.145789951670967310e-01 2.787926039163850511e-01 8.599455912576436933e-02 9.637558000691170967e-02 8.274101203974880692e-01 1.803747268179315411e-01 2.175735407836230095e-01 7.825994939720237742e-01 7.928519890958951599e-02 8.707949373106749213e-01
    -6.398420210047787160e-01 5.739624494012524059e-01 3.359672805578653998e-01 1.130399363175038641e-02 3.349439685346782269e-01 2.315484030880912147e-01 4.575228302577399875e-01 1.149494135594463229e-01 2.888244352925943836e-01 3.625470995156252485e-01 3.795973190611611203e-01 6.567047810450010736e-01 1.484039742710284715e-01 9.273251916560719676e-01 4.334256728976307871e-01 6.734771102219323513e-01 9.125080197222198430e-01 4.974393931097168542e-01 8.301481563280355136e-01 4.526450714147856047e-01 2.414236092573898151e-01 8.070129698367667359e-02 7.260400697427102923e-01 1.396509691839398215e-02 2.496450588391967429e-01 4.335741205447194435e-01 3.089314419194891803e-01 9.543503534526003307e-01 5.457977547458532364e-01 3.139663643587058406e-01 5.034762326753475792e-01 4.756788330475764104e-01 6.849334942793482428e-01 3.880666613022351052e-01 6.483446580176778218e-01 5.217503801099343530e-01 5.371145824070304720e-01 3.121260159429154468e-01 8.314121854062171968e-01 4.538695969561833410e-01 8.598896961203845724e-01 9.961993522734106099e-01 8.865717795946430613e-01 7.828987966783660379e-01 3.412415531643435695e-01 7.421170530151157685e-01 4.484104178639959359e-01 6.793217012099640462e-01 3.756179958191659951e-01 7.821287098222597933e-01 6.227726265188193722e-02 8.552983413221663112e-01 4.824668768009222619e-01 2.241531065858231031e-01 4.939536577599041856e-01 5.129566641128722182e-01 1.057984177672518511e-01 9.541452507300716146e-01 3.396646181755047511e-01 7.452588103611947901e-01 5.315559265659929311e-01 5.493475179850665358e-01 5.214824278139198466e-01 5.150075718147916204e-01 1.196075368500321146e-01 9.035665331176232495e-01 7.522653903639873185e-01 6.638708679914825384e-01 5.584174553800479446e-01 5.015819402508836511e-01 5.507698483308445248e-01 5.978677577011723976e-01 8.450418028759657529e-01 3.266677322748618995e-01 1.321610045897971819e-01 2.394354042746985600e-01 2.723972163557076831e-01 5.523301747352814539e-01 5.518043850608547185e-01 5.283968096837132755e-02 8.192733312104071297e-01 2.277106024970321219e-02 1.414998099027269252e-01 6.517281615256080851e-01 1.811694734825117781e-01 9.472370614713256920e-01 5.454497319021770485e-01 1.364119913158231556e-01 8.446142008509562871e-01 7.671725984742419069e-01 2.461161648406858804e-01 1.421724627107351369e-01 6.290652581179481118e-01 7.094144689448004248e-01 4.419656923472803367e-02 6.614741876652251440e-01 8.712193265403500586e-02 4.734931280852430202e-01 5.382037050480286133e-01 1.396459758005891283e-01
    -9.709329844415439670e-01 8.998575745276288229e-01 9.151313462895852568e-01 6.920489275523904471e-01 2.892231405199537919e-01 6.750679746268205550e-01 5.515642485826798280e-01 1.065253097812824956e-01 2.957026803465776510e-01 8.937347659632134400e-01 9.800016515925590310e-01 7.745900896182087436e-01 1.570977683146633774e-01 1.482028765821026273e-01 2.111147779712029271e-01 9.683759902485811200e-01 6.550951580826911425e-01 8.728324682592377703e-01 5.044803166579884257e-01 8.285704754811143991e-01 1.693574499337324735e-02 6.032669995180495182e-02 1.687026879086964692e-01 7.701554026145973619e-01 1.429888016593102718e-01 5.881172815379975827e-02 9.704206919487038396e-01 4.450807650730836951e-01 1.597445784258376689e-01 9.849229394397314152e-01 4.220083573536804744e-01 9.357693600374825671e-01 2.313199262338369033e-01 4.556443403861323294e-01 2.590791012828855822e-01 8.438664994487065085e-01 5.519045677502344427e-01 4.702170125676508050e-01 6.814723205638187897e-01 7.418295483665861001e-01 3.684921032028853904e-01 1.501895844844561845e-01 4.214513377519605308e-01 8.600279963652578408e-01 6.625616611189292238e-01 5.200151456470966105e-01 7.881072743086801058e-01 2.771703241081423519e-01 9.034135930616548071e-01 5.848441705791300738e-01 8.341698181274771473e-01 1.966638677318299777e-01 7.059747894371543042e-01 7.013854316067694716e-01 1.828430942760242983e-01 4.745548949934464966e-01 6.306422394641082452e-01 7.760751707194470939e-01 9.813187212598396547e-01 2.293595795266353266e-01 7.749261876107090830e-01 2.384106107787011819e-01 9.721209688979495223e-01 2.715569353686980714e-01 2.915573577694993146e-01 3.579601509630966349e-01 3.085697512342830962e-01 4.070219981627976047e-01 1.989632411372218579e-01 7.330003339460906542e-01 5.397259604481572381e-01 6.931009942216573849e-01 1.385457419653816080e-01 1.140339999976658358e-01 3.980752590866034613e-01 9.471822621683767540e-01 5.476643721405823895e-01 6.824131903515884279e-02 5.844099130744569992e-01 2.346881692012994236e-01 9.436439228519653000e-01 4.855518260479008141e-02 8.157036123302675579e-01 1.169761256455048581e-01 5.532962903488753970e-01 1.100965596251435308e-01 9.789490602992410029e-01 8.433487462016989733e-01 1.272410782852178013e-01 2.885715258680641160e-01 7.990943955388217779e-01 1.565305358979097727e-01 9.160846960406943129e-02 8.521842244411678147e-01 4.474243106736998099e-01 3.843945818845087015e-01 4.710645906071458944e-01 2.398348154123419729e-01 6.435351435258193087e-01 7.656897921129046658e-01
    -4.894328120406804539e-01 7.881019629214267574e-01 6.974585354155089512e-01 2.023858939857701156e-01 1.660984914264745926e-01 4.854517801734643534e-01 2.789848572630315715e-01 2.311636522410289718e-01 9.821076233980715608e-01 1.220641257408076052e-01 2.614036146663852866e-01 7.657560715165320220e-01 3.968360577545695378e-01 4.566023622802184434e-02 1.049701948619241598e-02 9.281162949127452766e-01 4.490137965769909201e-01 2.095846458383606725e-01 9.195504656719085679e-01 9.683515436855471004e-01 9.800174878114910060e-01 5.517610861380117804e-01 6.711570559348770670e-01 5.125258050287277989e-01 2.105581493613526423e-01 8.281813206544574868e-01 4.964783994807770995e-01 7.284974208756571645e-01 1.320629592816270348e-01 6.652194518096135045e-01 9.430156297917950958e-01 7.477263137894260003e-01 2.054087806450300979e-01 4.248209124837907247e-01 7.657518666018259257e-02 1.031614100713345028e-01 4.122242287567021712e-01 4.919658859336810686e-01 3.752650167259050651e-01 4.175771429986683270e-01 6.131376293448997927e-01 5.463797405837259591e-01 3.119918548921774004e-01 6.331762507678504459e-01 5.484632429281035559e-01 6.815448032785871302e-01 8.065695507425107991e-02 8.720129122297424207e-01 8.318188557125294480e-03 2.199323537180564170e-02 8.933872719887463454e-01 1.953120287872067706e-02 2.478721941404590234e-01 5.994061179859005994e-01 6.588362611693047155e-01 6.332808851020984564e-01 3.823849348043323326e-01 5.111091324899629251e-01 7.034808459110406531e-01 4.347681568463539481e-01 4.316973576672314961e-01 9.620411080123215664e-01 6.247837467655984467e-01 8.196961678222113301e-01 5.574601810887074294e-01 8.800635018469276094e-01 8.772255241161972528e-01 5.075275933138404527e-01 8.022583187266906224e-01 2.320670802521890286e-01 1.165626629103270195e-01 4.623759662685936744e-01 7.938327000737943617e-02 7.986374689793115378e-01 6.728842751465858862e-01 8.133909095059230765e-01 1.202639390769081329e-01 1.052937257108800262e-01 8.717600467040409473e-02 2.163819956545051104e-01 6.596483385763984852e-01 1.202843170392309258e-02 1.538789195854695091e-01 3.120247727263308901e-01 3.408168327248596308e-01 3.241861797851740556e-01 3.637074533655986208e-01 1.533669345890729119e-01 4.455921334699539660e-01 5.619140093874478437e-01 1.881731359879111887e-01 9.416670800570559052e-01 1.740018593664415247e-01 7.030242331869680505e-01 5.922055553954849172e-01 9.326211623391688077e-01 6.608322881013140027e-01 7.009721551241574478e-01 1.079126054675583202e-01 6.158176671761947940e-01
    -5.185079639625639336e-01 9.613742991518259284e-01 5.555312825626229634e-01 2.647628827924735084e-01 6.003697207460141350e-01 5.392112376769145898e-01 6.781186965667050925e-01 9.908971748181496508e-01 4.124155872095397468e-01 9.814941401724619485e-02 2.684237785531295994e-02 1.774652505962848181e-01 1.707589529595294753e-01 4.640932098465534450e-01 2.882179883914587348e-01 7.276822905806898945e-01 6.145789546745269449e-01 1.100959863917608805e-01 6.798859723042820491e-01 9.096984032948918220e-01 3.971368455178179158e-01 2.959494950971321980e-01 3.742088799298171065e-02 1.960739526210202310e-01 7.536102695342027369e-01 6.680915510628401277e-01 4.136507204312135366e-01 3.613996339406737590e-01 3.605422038261204554e-01 7.098503555159476619e-01 8.093719147087541366e-01 6.344097009128880638e-01 3.990082448083617228e-01 2.805918009906902544e-01 7.078488167363675698e-01 9.969917259866583059e-01 9.442054998992396309e-01 1.329075240769165278e-01 6.810681350588387861e-02 8.503491437913293094e-01 8.347117439165431252e-01 2.381858201903953587e-01 7.884260706938626129e-01 7.109907917419661105e-01 6.390916681983604963e-02 6.174365227062991179e-01 5.085733343630816083e-01 1.716846139694149231e-01 9.065664924270055991e-02 5.625330757196970177e-01 3.539663480209681579e-01 8.937139525947165319e-01 3.981380511900556307e-02 7.403597927449541150e-01 3.803872284089604427e-02 6.729519695709765825e-01 5.306080908840085097e-01 2.091237680402112664e-01 5.902903662907804661e-01 2.094778612095482551e-01 7.323447855684165342e-01 3.644574495843493356e-01 2.006215478057034041e-01 3.737617545555030896e-01 5.253471759602216240e-01 4.287889547869583318e-01 7.086098806190446187e-01 4.510792335515292351e-01 6.383187180169215269e-01 8.779355722397681472e-01 4.221338898667141848e-01 6.375840144651815367e-01 8.683057298299173832e-01 6.093730356952498095e-01 9.297141161056151626e-01 7.770838342807246946e-01 6.549661287008456956e-02 2.835060738158660110e-01 4.474138867374952699e-01 8.530336387421445510e-01 3.160209657891883683e-01 8.301538680518486535e-01 6.646903097549101691e-01 7.187130118106234145e-01 1.651862041735395747e-01 9.578252676762609719e-01 6.490273812885494209e-02 9.777273484666341163e-01 8.930729829254262508e-01 9.851054752118463265e-01 4.094323402286751401e-01 1.139176240124337713e-01 7.612865863899589414e-01 2.266379302491570158e-01 6.998882496157835531e-01 9.945043379099228753e-01 7.111578056749194854e-01 7.806190603886183910e-01 3.410170920712443099e-01 9.446084168886822452e-01
    -5.015172758330755931e-01 5.569527971282052237e-01 1.122406928736449094e-01 8.960352822124777461e-01 6.049568585854003810e-02 1.202196001338627918e-01 1.870314295763603196e-01 9.017590029396971296e-01 3.597904628087450485e-01 2.130941062746317671e-01 2.556281834629479111e-01 5.123669364829196438e-01 4.754061129282013409e-01 9.764470380372083369e-01 8.038663983900646848e-01 6.960491266420890666e-01 2.940135977911654264e-01 2.857282759910040326e-03 4.599343225832352999e-02 5.597554495210212977e-01 7.445266674304001908e-01 3.387528030535971180e-01 6.429542922125383031e-01 2.123331785532429627e-01 5.302332654117811739e-01 7.262555377662539557e-01 3.982425859900724507e-01 3.243388301740235402e-01 6.191064123738921898e-01 8.988047781373914580e-01 7.819700328765150088e-01 7.664269102804815992e-01 6.734095355422575757e-03 2.904762329148526945e-01 5.097537644843168625e-01 9.524734606001823423e-01 4.812869576591960463e-01 6.236868013640477493e-01 1.459170943214320726e-01 9.874505139403206844e-01 7.561708982837871407e-01 3.798591332432484924e-01 6.056633451375117438e-01 7.935708170258731764e-01 1.458141583518740569e-01 7.082511296391911237e-01 1.098798009731616343e-02 3.655618484905173160e-01 9.551862303858617009e-01 8.148959351152762487e-02 4.739306219219985294e-02 7.963357515359494876e-01 6.208332695202813944e-01 3.884182264923189409e-01 4.589167647950288531e-01 6.496652974138312775e-01 2.467528128074852889e-01 5.309593064844935206e-01 5.364606369543487574e-01 2.421352989851309756e-01 3.776834556696828660e-02 1.564861233558080267e-01 5.197231021782636740e-01 8.725375120634637494e-01 2.441225493455024820e-01 2.320363366041028330e-01 5.026358683423555185e-01 7.035766000474735771e-01 8.347805591467084563e-01 2.303229841813967393e-01 6.908373419683054850e-01 2.646662377366995056e-01 1.259467197942290007e-01 9.372770922994989595e-01 6.674216272867254940e-01 1.027944489143072238e-01 5.686267290346079806e-01 3.948222804451942958e-01 4.689706944496729868e-01 4.446117700449114807e-02 6.817992275557515081e-01 9.084821829413957106e-01 9.184021015315092518e-01 3.045815734169987632e-01 2.204958624923980537e-03 7.542672057172502553e-01 9.460844786545006269e-01 3.373139094575949848e-02 9.059565314915285494e-01 9.938525461318854504e-01 2.542072661725306437e-01 9.685734112479216229e-02 8.223629541824816203e-01 1.057429056898460118e-01 8.080679390260248063e-01 5.823014244609205914e-01 6.413551528031806725e-01 1.787341975438894170e-01 1.250471413912357388e-01 8.390281297596062782e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-euclidean-ml-iris.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-euclidean-ml-iris.txt
    deleted file mode 100644
    index 86de3c7592..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-euclidean-ml-iris.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   5.3851648e-01   5.0990195e-01   6.4807407e-01   1.4142136e-01   6.1644140e-01   5.1961524e-01   1.7320508e-01   9.2195445e-01   4.6904158e-01   3.7416574e-01   3.7416574e-01   5.9160798e-01   9.9498744e-01   8.8317609e-01   1.1045361e+00   5.4772256e-01   1.0000000e-01   7.4161985e-01   3.3166248e-01   4.3588989e-01   3.0000000e-01   6.4807407e-01   4.6904158e-01   5.9160798e-01   5.4772256e-01   3.1622777e-01   1.4142136e-01   1.4142136e-01   5.3851648e-01   5.3851648e-01   3.8729833e-01   6.2449980e-01   8.0622577e-01   4.6904158e-01   3.7416574e-01   4.1231056e-01   4.6904158e-01   8.6602540e-01   1.4142136e-01   1.7320508e-01   1.3490738e+00   7.6811457e-01   4.5825757e-01   6.1644140e-01   5.9160798e-01   3.6055513e-01   5.8309519e-01   3.0000000e-01   2.2360680e-01   4.0037482e+00   3.6166283e+00   4.1641326e+00   3.0935417e+00   3.7920970e+00   3.4161382e+00   3.7854986e+00   2.3452079e+00   3.7496667e+00   2.8879058e+00   2.7037012e+00   3.2280025e+00   3.1464265e+00   3.7000000e+00   2.5806976e+00   3.6276714e+00   3.4351128e+00   3.0099834e+00   3.7682887e+00   2.8827071e+00   3.8535698e+00   3.0757113e+00   4.0472213e+00   3.6578682e+00   3.4161382e+00   3.5972211e+00   4.0472213e+00   4.2449971e+00   3.5312887e+00   2.4939928e+00   2.8178006e+00   2.7018512e+00   2.8948230e+00   4.1352146e+00   3.4117444e+00   3.5199432e+00   3.9115214e+00   3.6180105e+00   3.0000000e+00   3.0215890e+00   3.3120990e+00   3.5958309e+00   3.0099834e+00   2.3874673e+00   3.1527766e+00   3.0740852e+00   3.1256999e+00   3.3451457e+00   2.0904545e+00   3.0577770e+00   5.2848841e+00   4.2083251e+00   5.3018865e+00   4.6904158e+00   5.0566788e+00   6.0950800e+00   3.5916570e+00   5.6364883e+00   5.0477718e+00   5.6391489e+00   4.3566042e+00   4.5199558e+00   4.8538644e+00   4.1904654e+00   4.4170126e+00   4.6260134e+00   4.6454279e+00   6.2401923e+00   6.4984614e+00   4.1412558e+00   5.1215232e+00   4.0286474e+00   6.2112801e+00   4.1097445e+00   4.9699095e+00   5.3122500e+00   3.9774364e+00   4.0074930e+00   4.8404545e+00   5.0970580e+00   5.5461698e+00   6.0141500e+00   4.8805737e+00   4.1605288e+00   4.5705580e+00   5.7887823e+00   4.8918299e+00   4.6065171e+00   3.8961519e+00   4.7968740e+00   5.0199602e+00   4.6368092e+00   4.2083251e+00   5.2573758e+00   5.1361464e+00   4.6540305e+00   4.2766810e+00   4.4598206e+00   4.6508064e+00   4.1400483e+00   3.0000000e-01   3.3166248e-01   6.0827625e-01   1.0908712e+00   5.0990195e-01   4.2426407e-01   5.0990195e-01   1.7320508e-01   8.6602540e-01   4.5825757e-01   1.4142136e-01   6.7823300e-01   1.3601471e+00   1.6278821e+00   1.0535654e+00   5.4772256e-01   1.1747340e+00   8.3666003e-01   7.0710678e-01   7.6157731e-01   7.8102497e-01   5.5677644e-01   6.4807407e-01   2.2360680e-01   5.0000000e-01   5.9160798e-01   5.0000000e-01   3.4641016e-01   2.4494897e-01   6.7823300e-01   1.1489125e+00   1.3416408e+00   1.7320508e-01   3.0000000e-01   7.8740079e-01   1.7320508e-01   5.0990195e-01   4.5825757e-01   5.2915026e-01   8.1853528e-01   5.4772256e-01   6.7823300e-01   9.8488578e-01   1.4142136e-01   8.4852814e-01   3.6055513e-01   8.1240384e-01   3.1622777e-01   4.0963398e+00   3.6864617e+00   4.2367440e+00   2.9698485e+00   3.8118237e+00   3.3911650e+00   3.8600518e+00   2.1470911e+00   3.7881394e+00   2.8053520e+00   2.4617067e+00   3.2449961e+00   3.0413813e+00   3.7121422e+00   2.5592968e+00   3.7000000e+00   3.4336569e+00   2.9715316e+00   3.6918830e+00   2.7928480e+00   3.8935845e+00   3.0740852e+00   4.0187063e+00   3.6565011e+00   3.4467376e+00   3.6510273e+00   4.0804412e+00   4.2953463e+00   3.5383612e+00   2.4186773e+00   2.7000000e+00   2.5787594e+00   2.8548205e+00   4.1170378e+00   3.3985291e+00   3.5972211e+00   3.9786933e+00   3.5580894e+00   2.9983329e+00   2.9291637e+00   3.2434549e+00   3.6221541e+00   2.9546573e+00   2.1794495e+00   3.1032241e+00   3.0789609e+00   3.1144823e+00   3.3645208e+00   1.9131126e+00   3.0298515e+00   5.3385391e+00   4.1809090e+00   5.3572381e+00   4.7085029e+00   5.0911688e+00   6.1595454e+00   3.4799425e+00   5.6868269e+00   5.0408333e+00   5.7471732e+00   4.4192760e+00   4.5210618e+00   4.9020404e+00   4.1340053e+00   4.4022721e+00   4.6808119e+00   4.6829478e+00   6.3694584e+00   6.5314623e+00   4.0620192e+00   5.1903757e+00   4.0024992e+00   6.2617889e+00   4.1060930e+00   5.0428167e+00   5.3898052e+00   3.9812058e+00   4.0311289e+00   4.8518038e+00   5.1584882e+00   5.5919585e+00   6.1546730e+00   4.8918299e+00   4.1689327e+00   4.5475268e+00   5.8600341e+00   4.9598387e+00   4.6508064e+00   3.9153544e+00   4.8600412e+00   5.0724747e+00   4.7021272e+00   4.1809090e+00   5.3207142e+00   5.2067264e+00   4.7000000e+00   4.2497059e+00   4.4988888e+00   4.7180504e+00   4.1533119e+00   2.4494897e-01   5.0990195e-01   1.0862780e+00   2.6457513e-01   4.1231056e-01   4.3588989e-01   3.1622777e-01   8.8317609e-01   3.7416574e-01   2.6457513e-01   5.0000000e-01   1.3638182e+00   1.5874508e+00   1.0099505e+00   5.1961524e-01   1.2369317e+00   7.5498344e-01   8.3066239e-01   7.0000000e-01   5.0990195e-01   6.4807407e-01   6.4031242e-01   4.6904158e-01   5.0990195e-01   6.1644140e-01   5.4772256e-01   3.0000000e-01   3.3166248e-01   7.8102497e-01   1.0535654e+00   1.2845233e+00   3.1622777e-01   3.1622777e-01   8.5440037e-01   3.1622777e-01   3.6055513e-01   4.8989795e-01   4.3588989e-01   9.2736185e-01   3.0000000e-01   6.5574385e-01   9.5916630e-01   2.6457513e-01   7.8102497e-01   1.4142136e-01   8.0622577e-01   3.3166248e-01   4.2766810e+00   3.8496753e+00   4.4158804e+00   3.1543621e+00   3.9974992e+00   3.5510562e+00   4.0112342e+00   2.3065125e+00   3.9749214e+00   2.9495762e+00   2.6476405e+00   3.4029399e+00   3.2588341e+00   3.8794329e+00   2.7202941e+00   3.8807216e+00   3.5749126e+00   3.1527766e+00   3.8961519e+00   2.9782545e+00   4.0311289e+00   3.2588341e+00   4.2071368e+00   3.8314488e+00   3.6318040e+00   3.8340579e+00   4.2731721e+00   4.4698993e+00   3.7027017e+00   2.6153394e+00   2.8879058e+00   2.7712813e+00   3.0364453e+00   4.2825226e+00   3.5298725e+00   3.7322915e+00   4.1545156e+00   3.7669616e+00   3.1464265e+00   3.1032241e+00   3.4073450e+00   3.7854986e+00   3.1400637e+00   2.3537205e+00   3.2680269e+00   3.2326460e+00   3.2726136e+00   3.5425979e+00   2.0856654e+00   3.1953091e+00   5.4726593e+00   4.3347434e+00   5.5290144e+00   4.8682646e+00   5.2469038e+00   6.3364028e+00   3.6083237e+00   5.8660038e+00   5.2249402e+00   5.8940648e+00   4.5738387e+00   4.6936127e+00   5.0695167e+00   4.2918527e+00   4.5442271e+00   4.8270074e+00   4.8456166e+00   6.5207362e+00   6.7178866e+00   4.2508823e+00   5.3488316e+00   4.1436699e+00   6.4467046e+00   4.2813549e+00   5.1942276e+00   5.5587768e+00   4.1496988e+00   4.1856899e+00   5.0149776e+00   5.3385391e+00   5.7775427e+00   6.3126856e+00   5.0537115e+00   4.3416587e+00   4.7169906e+00   6.0406953e+00   5.0921508e+00   4.8062459e+00   4.0669399e+00   5.0269275e+00   5.2287666e+00   4.8682646e+00   4.3347434e+00   5.4753995e+00   5.3535035e+00   4.8641546e+00   4.4305756e+00   4.6615448e+00   4.8487112e+00   4.2988371e+00   6.4807407e-01   1.1661904e+00   3.3166248e-01   5.0000000e-01   3.0000000e-01   3.1622777e-01   1.0000000e+00   3.7416574e-01   2.6457513e-01   5.1961524e-01   1.5297059e+00   1.7146428e+00   1.1661904e+00   6.5574385e-01   1.3228757e+00   8.6602540e-01   8.7749644e-01   8.0622577e-01   7.0710678e-01   6.4807407e-01   5.3851648e-01   4.2426407e-01   5.4772256e-01   7.2111026e-01   6.7823300e-01   1.7320508e-01   2.2360680e-01   8.7749644e-01   1.1704700e+00   1.4247807e+00   3.1622777e-01   5.0990195e-01   1.0049876e+00   3.1622777e-01   3.0000000e-01   5.8309519e-01   6.0827625e-01   8.3666003e-01   3.0000000e-01   7.0000000e-01   9.6953597e-01   2.6457513e-01   8.6602540e-01   1.4142136e-01   9.2195445e-01   4.5825757e-01   4.1773197e+00   3.7336309e+00   4.3058100e+00   2.9849623e+00   3.8729833e+00   3.3926391e+00   3.8897301e+00   2.1118712e+00   3.8548671e+00   2.7784888e+00   2.4515301e+00   3.2680269e+00   3.1080541e+00   3.7376463e+00   2.5806976e+00   3.7762415e+00   3.4205263e+00   3.0000000e+00   3.7496667e+00   2.8160256e+00   3.8923001e+00   3.1304952e+00   4.0620192e+00   3.6851052e+00   3.5114100e+00   3.7229021e+00   4.1545156e+00   4.3497126e+00   3.5623026e+00   2.4698178e+00   2.7202941e+00   2.6038433e+00   2.8913665e+00   4.1279535e+00   3.3674916e+00   3.6069378e+00   4.0422766e+00   3.6262929e+00   2.9966648e+00   2.9376862e+00   3.2357379e+00   3.6482873e+00   2.9899833e+00   2.1633308e+00   3.1080541e+00   3.0838288e+00   3.1224990e+00   3.4132096e+00   1.9157244e+00   3.0446675e+00   5.3357286e+00   4.1773197e+00   5.4064776e+00   4.7222876e+00   5.1097945e+00   6.2153037e+00   3.4205263e+00   5.7384667e+00   5.0813384e+00   5.7844619e+00   4.4519659e+00   4.5530210e+00   4.9457052e+00   4.1303753e+00   4.3965896e+00   4.7010637e+00   4.7095647e+00   6.4140471e+00   6.5901442e+00   4.0877867e+00   5.2297227e+00   3.9862263e+00   6.3229740e+00   4.1436699e+00   5.0695167e+00   5.4387499e+00   4.0124805e+00   4.0472213e+00   4.8733972e+00   5.2172790e+00   5.6550862e+00   6.2153037e+00   4.9132474e+00   4.1988094e+00   4.5552168e+00   5.9321160e+00   4.9628621e+00   4.6690470e+00   3.9268308e+00   4.9101935e+00   5.1048996e+00   4.7602521e+00   4.1773197e+00   5.3497664e+00   5.2325902e+00   4.7455242e+00   4.2883563e+00   4.5332108e+00   4.7191101e+00   4.1496988e+00   6.1644140e-01   4.5825757e-01   2.2360680e-01   9.2195445e-01   5.2915026e-01   4.2426407e-01   3.4641016e-01   6.4031242e-01   9.7467943e-01   9.1651514e-01   1.0862780e+00   5.4772256e-01   1.7320508e-01   7.9372539e-01   2.6457513e-01   5.3851648e-01   2.6457513e-01   5.6568542e-01   5.2915026e-01   5.7445626e-01   6.3245553e-01   3.4641016e-01   2.4494897e-01   2.8284271e-01   5.3851648e-01   5.7445626e-01   5.0000000e-01   5.5677644e-01   7.8102497e-01   5.2915026e-01   4.4721360e-01   5.1961524e-01   5.2915026e-01   8.5440037e-01   2.4494897e-01   1.7320508e-01   1.4000000e+00   7.2801099e-01   4.5825757e-01   5.8309519e-01   6.4031242e-01   3.0000000e-01   5.6568542e-01   3.3166248e-01   3.0000000e-01   4.0607881e+00   3.6633318e+00   4.2190046e+00   3.1480152e+00   3.8496753e+00   3.4568772e+00   3.8249183e+00   2.3874673e+00   3.8078866e+00   2.9223278e+00   2.7586228e+00   3.2710854e+00   3.2186954e+00   3.7456642e+00   2.6267851e+00   3.6851052e+00   3.4669872e+00   3.0626786e+00   3.8340579e+00   2.9376862e+00   3.8845849e+00   3.1336879e+00   4.1036569e+00   3.7067506e+00   3.4741906e+00   3.6551334e+00   4.1085277e+00   4.2965102e+00   3.5763109e+00   2.5573424e+00   2.8740216e+00   2.7604347e+00   2.9495762e+00   4.1785165e+00   3.4380227e+00   3.5510562e+00   3.9648455e+00   3.6864617e+00   3.0364453e+00   3.0708305e+00   3.3541020e+00   3.6400549e+00   3.0659419e+00   2.4372115e+00   3.1968735e+00   3.1128765e+00   3.1670175e+00   3.3985291e+00   2.1424285e+00   3.1032241e+00   5.3131911e+00   4.2461747e+00   5.3507009e+00   4.7307505e+00   5.0960769e+00   6.1457302e+00   3.6166283e+00   5.6877060e+00   5.1009803e+00   5.6762664e+00   4.3977267e+00   4.5683695e+00   4.9010203e+00   4.2308392e+00   4.4508426e+00   4.6626173e+00   4.6882833e+00   6.2785349e+00   6.5536250e+00   4.1964271e+00   5.1643005e+00   4.0607881e+00   6.2657801e+00   4.1605288e+00   5.0079936e+00   5.3591044e+00   4.0249224e+00   4.0472213e+00   4.8836462e+00   5.1497573e+00   5.6017854e+00   6.0572271e+00   4.9234135e+00   4.2083251e+00   4.6141088e+00   5.8438001e+00   4.9203658e+00   4.6454279e+00   3.9344631e+00   4.8445846e+00   5.0616203e+00   4.6861498e+00   4.2461747e+00   5.2971691e+00   5.1730069e+00   4.7010637e+00   4.3301270e+00   4.5044423e+00   4.6786750e+00   4.1737274e+00   9.9498744e-01   7.0000000e-01   1.4594520e+00   1.0099505e+00   3.4641016e-01   8.1240384e-01   1.1618950e+00   1.5716234e+00   6.7823300e-01   6.1644140e-01   4.0000000e-01   5.9160798e-01   3.3166248e-01   3.8729833e-01   5.3851648e-01   4.1231056e-01   1.1224972e+00   6.7823300e-01   8.3066239e-01   1.0099505e+00   6.4807407e-01   5.2915026e-01   6.4807407e-01   1.0148892e+00   1.0246951e+00   5.3851648e-01   4.5825757e-01   4.7958315e-01   1.0099505e+00   9.6953597e-01   6.0827625e-01   1.0099505e+00   1.4177447e+00   6.4807407e-01   7.0000000e-01   1.8814888e+00   1.3000000e+00   6.0827625e-01   3.7416574e-01   1.1269428e+00   3.8729833e-01   1.1224972e+00   3.6055513e-01   8.0622577e-01   3.6124784e+00   3.2465366e+00   3.7868192e+00   2.9444864e+00   3.4698703e+00   3.1543621e+00   3.4073450e+00   2.3280893e+00   3.4146742e+00   2.7055499e+00   2.7147744e+00   2.9189039e+00   2.9832868e+00   3.3896903e+00   2.3366643e+00   3.2588341e+00   3.1464265e+00   2.7784888e+00   3.5468296e+00   2.7073973e+00   3.5085610e+00   2.7928480e+00   3.7709415e+00   3.3674916e+00   3.0935417e+00   3.2465366e+00   3.7121422e+00   3.8832976e+00   3.2264532e+00   2.3194827e+00   2.6758176e+00   2.5729361e+00   2.6608269e+00   3.8470768e+00   3.1400637e+00   3.1448370e+00   3.5411862e+00   3.3867388e+00   2.7239677e+00   2.8407745e+00   3.1032241e+00   3.2726136e+00   2.7892651e+00   2.3748684e+00   2.9223278e+00   2.7910571e+00   2.8548205e+00   3.0347982e+00   2.0566964e+00   2.8053520e+00   4.9061186e+00   3.9255573e+00   4.9223978e+00   4.3566042e+00   4.6978719e+00   5.7052607e+00   3.4263683e+00   5.2659282e+00   4.7349762e+00   5.2057660e+00   3.9774364e+00   4.2011903e+00   4.4833024e+00   3.9370039e+00   4.1146081e+00   4.2497059e+00   4.2918527e+00   5.7913729e+00   6.1343296e+00   3.9179076e+00   4.7275787e+00   3.7483330e+00   5.8360946e+00   3.8013156e+00   4.5760245e+00   4.9173163e+00   3.6633318e+00   3.6742346e+00   4.5066617e+00   4.7222876e+00   5.1788030e+00   5.5596762e+00   4.5453273e+00   3.8457769e+00   4.2883563e+00   5.3916602e+00   4.5022217e+00   4.2473521e+00   3.5693137e+00   4.4124823e+00   4.6411206e+00   4.2497059e+00   3.9255573e+00   4.8682646e+00   4.7391982e+00   4.2848571e+00   3.9887341e+00   4.1024383e+00   4.2649736e+00   3.8183766e+00   4.2426407e-01   5.4772256e-01   4.7958315e-01   8.6602540e-01   3.0000000e-01   4.8989795e-01   6.1644140e-01   1.3601471e+00   1.4933185e+00   9.5393920e-01   5.0990195e-01   1.2083046e+00   6.4807407e-01   8.6023253e-01   6.0000000e-01   4.5825757e-01   6.2449980e-01   5.4772256e-01   6.0827625e-01   4.5825757e-01   6.2449980e-01   6.0827625e-01   3.1622777e-01   4.2426407e-01   8.1240384e-01   9.4868330e-01   1.2083046e+00   4.7958315e-01   5.0000000e-01   9.1651514e-01   4.7958315e-01   4.6904158e-01   5.1961524e-01   4.2426407e-01   1.1090537e+00   3.1622777e-01   5.4772256e-01   8.1853528e-01   4.4721360e-01   6.7823300e-01   2.2360680e-01   7.7459667e-01   4.2426407e-01   4.2308392e+00   3.7854986e+00   4.3669211e+00   3.1272992e+00   3.9560081e+00   3.4899857e+00   3.9344631e+00   2.2781571e+00   3.9357337e+00   2.8827071e+00   2.6495283e+00   3.3361655e+00   3.2634338e+00   3.8209946e+00   2.6627054e+00   3.8353618e+00   3.4942810e+00   3.1160873e+00   3.8794329e+00   2.9495762e+00   3.9420807e+00   3.2202484e+00   4.1701319e+00   3.7828561e+00   3.5916570e+00   3.7907783e+00   4.2391037e+00   4.4147480e+00   3.6414283e+00   2.5980762e+00   2.8653098e+00   2.7549955e+00   2.9983329e+00   4.2225585e+00   3.4423829e+00   3.6414283e+00   4.1024383e+00   3.7549967e+00   3.0740852e+00   3.0626786e+00   3.3555923e+00   3.7229021e+00   3.1064449e+00   2.3388031e+00   3.2140317e+00   3.1654384e+00   3.2093613e+00   3.4957117e+00   2.0639767e+00   3.1400637e+00   5.3758720e+00   4.2638011e+00   5.4680892e+00   4.7989582e+00   5.1710734e+00   6.2801274e+00   3.5312887e+00   5.8137767e+00   5.1797683e+00   5.8077534e+00   4.4977772e+00   4.6368092e+00   5.0049975e+00   4.2272923e+00   4.4609416e+00   4.7423623e+00   4.7780749e+00   6.4397205e+00   6.6708320e+00   4.2190046e+00   5.2744668e+00   4.0620192e+00   6.3992187e+00   4.2284749e+00   5.1137071e+00   5.4963624e+00   4.0902323e+00   4.1121770e+00   4.9477268e+00   5.2886671e+00   5.7314920e+00   6.2401923e+00   4.9849774e+00   4.2871902e+00   4.6626173e+00   5.9883220e+00   4.9939964e+00   4.7318073e+00   3.9912404e+00   4.9618545e+00   5.1526692e+00   4.8031240e+00   4.2638011e+00   5.3972215e+00   5.2678269e+00   4.7968740e+00   4.3840620e+00   4.5934736e+00   4.7497368e+00   4.2178193e+00   7.8740079e-01   3.3166248e-01   5.0000000e-01   2.2360680e-01   4.6904158e-01   9.0553851e-01   1.0440307e+00   1.2369317e+00   7.0000000e-01   2.0000000e-01   8.3666003e-01   4.2426407e-01   4.4721360e-01   3.7416574e-01   6.7082039e-01   3.8729833e-01   4.4721360e-01   4.1231056e-01   2.2360680e-01   2.2360680e-01   2.2360680e-01   3.7416574e-01   3.7416574e-01   4.4721360e-01   7.3484692e-01   9.4868330e-01   3.3166248e-01   3.6055513e-01   5.4772256e-01   3.3166248e-01   7.4833148e-01   1.0000000e-01   2.4494897e-01   1.2288206e+00   6.6332496e-01   4.2426407e-01   6.0827625e-01   4.6904158e-01   4.2426407e-01   4.5825757e-01   4.2426407e-01   1.4142136e-01   3.9648455e+00   3.5623026e+00   4.1170378e+00   2.9866369e+00   3.7296112e+00   3.3256578e+00   3.7282704e+00   2.2113344e+00   3.6918830e+00   2.7802878e+00   2.5690465e+00   3.1543621e+00   3.0545049e+00   3.6249138e+00   2.4959968e+00   3.5818989e+00   3.3481338e+00   2.9206164e+00   3.6837481e+00   2.7820855e+00   3.7815341e+00   3.0049958e+00   3.9686270e+00   3.5791060e+00   3.3555923e+00   3.5454196e+00   3.9912404e+00   4.1892720e+00   3.4554305e+00   2.4020824e+00   2.7110883e+00   2.5942244e+00   2.8089144e+00   4.0509258e+00   3.3181320e+00   3.4583233e+00   3.8613469e+00   3.5383612e+00   2.9137605e+00   2.9189039e+00   3.2093613e+00   3.5242020e+00   2.9206164e+00   2.2561028e+00   3.0577770e+00   2.9899833e+00   3.0397368e+00   3.2771939e+00   1.9697716e+00   2.9698485e+00   5.2191953e+00   4.1206796e+00   5.2478567e+00   4.6162756e+00   4.9899900e+00   6.0448325e+00   3.4741906e+00   5.5803226e+00   4.9749372e+00   5.5973208e+00   4.3000000e+00   4.4474712e+00   4.7968740e+00   4.0975602e+00   4.3358967e+00   4.5661800e+00   4.5793013e+00   6.2040309e+00   6.4420494e+00   4.0472213e+00   5.0695167e+00   3.9395431e+00   6.1587336e+00   4.0373258e+00   4.9142650e+00   5.2621288e+00   3.9051248e+00   3.9357337e+00   4.7686476e+00   5.0447993e+00   5.4927225e+00   5.9849812e+00   4.8093659e+00   4.0865633e+00   4.4833024e+00   5.7463032e+00   4.8311489e+00   4.5398238e+00   3.8223030e+00   4.7455242e+00   4.9628621e+00   4.5902070e+00   4.1206796e+00   5.2009614e+00   5.0823223e+00   4.5989129e+00   4.2000000e+00   4.3977267e+00   4.5891176e+00   4.0607881e+00   5.5677644e-01   1.2845233e+00   6.7082039e-01   4.2426407e-01   3.4641016e-01   1.7916473e+00   1.9974984e+00   1.4317821e+00   9.2736185e-01   1.6124515e+00   1.1489125e+00   1.1575837e+00   1.0862780e+00   8.3066239e-01   9.1104336e-01   8.1240384e-01   6.4031242e-01   8.3066239e-01   1.0049876e+00   9.4339811e-01   4.6904158e-01   4.8989795e-01   1.1401754e+00   1.4491377e+00   1.7029386e+00   5.5677644e-01   7.0000000e-01   1.2569805e+00   5.5677644e-01   1.4142136e-01   8.6602540e-01   8.6023253e-01   6.2449980e-01   3.1622777e-01   9.5916630e-01   1.2609520e+00   4.2426407e-01   1.1575837e+00   3.6055513e-01   1.2083046e+00   7.2111026e-01   4.3794977e+00   3.9230090e+00   4.4977772e+00   3.0886890e+00   4.0435133e+00   3.5383612e+00   4.0767634e+00   2.1794495e+00   4.0360872e+00   2.8930952e+00   2.4939928e+00   3.4336569e+00   3.2326460e+00   3.9012818e+00   2.7367864e+00   3.9711459e+00   3.5707142e+00   3.1511903e+00   3.8768544e+00   2.9427878e+00   4.0570926e+00   3.2969683e+00   4.2083251e+00   3.8457769e+00   3.6905284e+00   3.9102430e+00   4.3324358e+00   4.5287967e+00   3.7229021e+00   2.6134269e+00   2.8337255e+00   2.7184554e+00   3.0413813e+00   4.2720019e+00   3.5085610e+00   3.7920970e+00   4.2320208e+00   3.7656341e+00   3.1543621e+00   3.0561414e+00   3.3615473e+00   3.8183766e+00   3.1320920e+00   2.2293497e+00   3.2449961e+00   3.2465366e+00   3.2771939e+00   3.5860842e+00   2.0049938e+00   3.1937439e+00   5.4972721e+00   4.3104524e+00   5.5821143e+00   4.8795492e+00   5.2706736e+00   6.3953108e+00   3.5028560e+00   5.9143892e+00   5.2316345e+00   5.9757845e+00   4.6292548e+00   4.7053161e+00   5.1176166e+00   4.2485292e+00   4.5276926e+00   4.8692915e+00   4.8774994e+00   6.6174013e+00   6.7557383e+00   4.2071368e+00   5.4074023e+00   4.1158231e+00   6.4984614e+00   4.2965102e+00   5.2488094e+00   5.6258333e+00   4.1677332e+00   4.2083251e+00   5.0259327e+00   5.4009258e+00   5.8300943e+00   6.4265076e+00   5.0645829e+00   4.3588989e+00   4.6968074e+00   6.1155539e+00   5.1322510e+00   4.8383882e+00   4.0853396e+00   5.0892043e+00   5.2735187e+00   4.9386233e+00   4.3104524e+00   5.5235858e+00   5.4064776e+00   4.9142650e+00   4.4294469e+00   4.7010637e+00   4.8887626e+00   4.3023250e+00   7.8740079e-01   3.4641016e-01   1.7320508e-01   7.2801099e-01   1.3114877e+00   1.5556349e+00   1.0099505e+00   5.0000000e-01   1.1000000e+00   7.5498344e-01   6.2449980e-01   7.0000000e-01   7.7459667e-01   5.2915026e-01   5.1961524e-01   2.0000000e-01   4.4721360e-01   5.0990195e-01   4.4721360e-01   2.6457513e-01   1.7320508e-01   6.5574385e-01   1.0440307e+00   1.2609520e+00   0.0000000e+00   3.4641016e-01   7.5498344e-01   0.0000000e+00   5.5677644e-01   3.7416574e-01   5.0000000e-01   9.3808315e-01   5.5677644e-01   6.5574385e-01   8.8317609e-01   2.6457513e-01   7.4161985e-01   3.4641016e-01   7.2801099e-01   2.6457513e-01   4.0435133e+00   3.6359318e+00   4.1856899e+00   2.9478806e+00   3.7709415e+00   3.3421550e+00   3.8065733e+00   2.1307276e+00   3.7389838e+00   2.7748874e+00   2.4556058e+00   3.2031235e+00   3.0133038e+00   3.6619667e+00   2.5258662e+00   3.6523965e+00   3.3852622e+00   2.9223278e+00   3.6687873e+00   2.7586228e+00   3.8457769e+00   3.0364453e+00   3.9799497e+00   3.6027767e+00   3.4014703e+00   3.6055513e+00   4.0348482e+00   4.2497059e+00   3.4942810e+00   2.3874673e+00   2.6720778e+00   2.5495098e+00   2.8178006e+00   4.0718546e+00   3.3496268e+00   3.5425979e+00   3.9293765e+00   3.5284558e+00   2.9495762e+00   2.9000000e+00   3.1984371e+00   3.5707142e+00   2.9189039e+00   2.1679483e+00   3.0626786e+00   3.0248967e+00   3.0675723e+00   3.3181320e+00   1.9104973e+00   2.9883106e+00   5.2924474e+00   4.1436699e+00   5.3113087e+00   4.6583259e+00   5.0467812e+00   6.1081912e+00   3.4525353e+00   5.6329388e+00   4.9979996e+00   5.6973678e+00   4.3749286e+00   4.4821870e+00   4.8600412e+00   4.1060930e+00   4.3760713e+00   4.6411206e+00   4.6324939e+00   6.3071388e+00   6.4876806e+00   4.0286474e+00   5.1468437e+00   3.9686270e+00   6.2112801e+00   4.0706265e+00   4.9919936e+00   5.3329167e+00   3.9446166e+00   3.9874804e+00   4.8114447e+00   5.1029403e+00   5.5443665e+00   6.0917978e+00   4.8538644e+00   4.1194660e+00   4.4933284e+00   5.8180753e+00   4.9142650e+00   4.5978256e+00   3.8729833e+00   4.8176758e+00   5.0338852e+00   4.6690470e+00   4.1436699e+00   5.2744668e+00   5.1652686e+00   4.6669048e+00   4.2201896e+00   4.4575778e+00   4.6722586e+00   4.1060930e+00   6.7823300e-01   9.3273791e-01   1.3674794e+00   5.8309519e-01   7.8740079e-01   3.4641016e-01   3.8729833e-01   3.8729833e-01   3.3166248e-01   3.6055513e-01   3.6055513e-01   9.4868330e-01   6.1644140e-01   7.8102497e-01   8.1240384e-01   5.4772256e-01   2.8284271e-01   3.7416574e-01   8.6602540e-01   8.5440037e-01   3.6055513e-01   4.5825757e-01   5.1961524e-01   7.8740079e-01   7.0710678e-01   3.0000000e-01   7.8740079e-01   1.2369317e+00   4.2426407e-01   5.0000000e-01   1.6792856e+00   1.1357817e+00   6.0827625e-01   5.4772256e-01   9.3273791e-01   3.3166248e-01   9.4868330e-01   1.0000000e-01   5.7445626e-01   3.8065733e+00   3.4554305e+00   3.9824616e+00   3.0708305e+00   3.6496575e+00   3.3331667e+00   3.6290495e+00   2.4124676e+00   3.5916570e+00   2.8705400e+00   2.7730849e+00   3.1176915e+00   3.0822070e+00   3.5791060e+00   2.5099801e+00   3.4496377e+00   3.3496268e+00   2.9257478e+00   3.6851052e+00   2.8372522e+00   3.7349699e+00   2.9597297e+00   3.9370039e+00   3.5411862e+00   3.2695565e+00   3.4322005e+00   3.8858718e+00   4.0841156e+00   3.4190642e+00   2.4372115e+00   2.7928480e+00   2.6795522e+00   2.8142495e+00   4.0348482e+00   3.3436507e+00   3.3778692e+00   3.7389838e+00   3.5199432e+00   2.9154759e+00   2.9849623e+00   3.2603681e+00   3.4684290e+00   2.9359837e+00   2.4494897e+00   3.0886890e+00   2.9782545e+00   3.0380915e+00   3.2140317e+00   2.1424285e+00   2.9782545e+00   5.1487863e+00   4.1243181e+00   5.1332251e+00   4.5628938e+00   4.9183331e+00   5.9118525e+00   3.5972211e+00   5.4635154e+00   4.9173163e+00   5.4497706e+00   4.2023803e+00   4.3965896e+00   4.6968074e+00   4.1255303e+00   4.3324358e+00   4.4833024e+00   4.5011110e+00   6.0282667e+00   6.3300869e+00   4.0681691e+00   4.9547957e+00   3.9560081e+00   6.0315835e+00   3.9912404e+00   4.8062459e+00   5.1283526e+00   3.8600518e+00   3.8858718e+00   4.7148701e+00   4.9173163e+00   5.3721504e+00   5.7887823e+00   4.7560488e+00   4.0336088e+00   4.4665423e+00   5.5991071e+00   4.7486840e+00   4.4631827e+00   3.7815341e+00   4.6292548e+00   4.8682646e+00   4.4698993e+00   4.1243181e+00   5.0970580e+00   4.9779514e+00   4.5033321e+00   4.1701319e+00   4.3162484e+00   4.5110974e+00   4.0323690e+00   4.5825757e-01   8.1853528e-01   1.2328828e+00   1.3638182e+00   8.6023253e-01   3.8729833e-01   9.9498744e-01   5.1961524e-01   6.0827625e-01   4.7958315e-01   6.6332496e-01   4.4721360e-01   3.0000000e-01   4.4721360e-01   2.8284271e-01   4.2426407e-01   4.4721360e-01   2.2360680e-01   3.0000000e-01   6.4031242e-01   8.1853528e-01   1.0816654e+00   3.4641016e-01   4.8989795e-01   7.6811457e-01   3.4641016e-01   6.4031242e-01   3.1622777e-01   3.8729833e-01   1.1832160e+00   5.3851648e-01   4.5825757e-01   6.1644140e-01   4.5825757e-01   5.0000000e-01   3.4641016e-01   5.9160798e-01   3.0000000e-01   3.9912404e+00   3.5637059e+00   4.1327957e+00   2.9444864e+00   3.7336309e+00   3.2848135e+00   3.7188708e+00   2.1307276e+00   3.7013511e+00   2.7166155e+00   2.5000000e+00   3.1336879e+00   3.0463092e+00   3.6041643e+00   2.4698178e+00   3.6027767e+00   3.3015148e+00   2.8948230e+00   3.6742346e+00   2.7477263e+00   3.7483330e+00   3.0033315e+00   3.9547440e+00   3.5580894e+00   3.3630343e+00   3.5608988e+00   4.0049969e+00   4.1928511e+00   3.4336569e+00   2.3874673e+00   2.6720778e+00   2.5573424e+00   2.7892651e+00   4.0174619e+00   3.2588341e+00   3.4365681e+00   3.8729833e+00   3.5369478e+00   2.8740216e+00   2.8757608e+00   3.1575307e+00   3.5057096e+00   2.8982753e+00   2.1863211e+00   3.0166206e+00   2.9546573e+00   3.0049958e+00   3.2726136e+00   1.9157244e+00   2.9376862e+00   5.1874849e+00   4.0779897e+00   5.2488094e+00   4.5891176e+00   4.9689033e+00   6.0506198e+00   3.3882149e+00   5.5812185e+00   4.9618545e+00   5.5982140e+00   4.2918527e+00   4.4305756e+00   4.7937459e+00   4.0521599e+00   4.2953463e+00   4.5497253e+00   4.5628938e+00   6.2112801e+00   6.4459289e+00   4.0162171e+00   5.0665570e+00   3.8897301e+00   6.1660360e+00   4.0236799e+00   4.9030603e+00   5.2649786e+00   3.8884444e+00   3.9115214e+00   4.7465777e+00   5.0517324e+00   5.5009090e+00   6.0041652e+00   4.7874837e+00   4.0681691e+00   4.4463468e+00   5.7645468e+00   4.8052055e+00   4.5188494e+00   3.7947332e+00   4.7486840e+00   4.9537864e+00   4.6000000e+00   4.0779897e+00   5.1903757e+00   5.0714889e+00   4.5978256e+00   4.1844952e+00   4.3874822e+00   4.5617979e+00   4.0224371e+00   5.8309519e-01   1.4317821e+00   1.6941074e+00   1.1269428e+00   6.1644140e-01   1.2569805e+00   8.8317609e-01   7.8740079e-01   8.2462113e-01   7.5498344e-01   6.5574385e-01   6.4807407e-01   3.0000000e-01   5.7445626e-01   6.5574385e-01   5.7445626e-01   3.1622777e-01   2.4494897e-01   7.8740079e-01   1.1747340e+00   1.3928388e+00   1.7320508e-01   3.6055513e-01   8.7177979e-01   1.7320508e-01   4.2426407e-01   5.1961524e-01   5.8309519e-01   7.9372539e-01   4.6904158e-01   7.6157731e-01   1.0344080e+00   2.0000000e-01   8.8317609e-01   3.0000000e-01   8.7177979e-01   3.7416574e-01   4.1785165e+00   3.7643060e+00   4.3162484e+00   3.0298515e+00   3.8897301e+00   3.4496377e+00   3.9344631e+00   2.1886069e+00   3.8639358e+00   2.8618176e+00   2.5019992e+00   3.3181320e+00   3.1064449e+00   3.7788887e+00   2.6324893e+00   3.7828561e+00   3.4942810e+00   3.0315013e+00   3.7643060e+00   2.8530685e+00   3.9623226e+00   3.1511903e+00   4.0877867e+00   3.7188708e+00   3.5242020e+00   3.7322915e+00   4.1581246e+00   4.3737855e+00   3.6083237e+00   2.4879711e+00   2.7586228e+00   2.6362853e+00   2.9240383e+00   4.1797129e+00   3.4539832e+00   3.6687873e+00   4.0583248e+00   3.6304270e+00   3.0610456e+00   2.9899833e+00   3.2954514e+00   3.6905284e+00   3.0215890e+00   2.2248595e+00   3.1638584e+00   3.1400637e+00   3.1780497e+00   3.4380227e+00   1.9748418e+00   3.0951575e+00   5.4092513e+00   4.2449971e+00   5.4350713e+00   4.7738873e+00   5.1633323e+00   6.2353829e+00   3.5256205e+00   5.7584720e+00   5.1097945e+00   5.8283788e+00   4.4977772e+00   4.5934736e+00   4.9809638e+00   4.1988094e+00   4.4743715e+00   4.7592016e+00   4.7528939e+00   6.4459289e+00   6.6075714e+00   4.1231056e+00   5.2706736e+00   4.0669399e+00   6.3364028e+00   4.1809090e+00   5.1176166e+00   5.4635154e+00   4.0558600e+00   4.1024383e+00   4.9234135e+00   5.2316345e+00   5.6683331e+00   6.2337790e+00   4.9648766e+00   4.2355637e+00   4.6021734e+00   5.9447456e+00   5.0338852e+00   4.7191101e+00   3.9862263e+00   4.9416596e+00   5.1526692e+00   4.7906158e+00   4.2449971e+00   5.3972215e+00   5.2867760e+00   4.7843495e+00   4.3243497e+00   4.5760245e+00   4.7916594e+00   4.2178193e+00   1.8083141e+00   2.0420578e+00   1.4662878e+00   1.0099505e+00   1.7320508e+00   1.2165525e+00   1.3190906e+00   1.1747340e+00   6.8556546e-01   1.1180340e+00   1.0295630e+00   8.6602540e-01   9.9498744e-01   1.1090537e+00   1.0344080e+00   6.7823300e-01   7.2111026e-01   1.2727922e+00   1.4764823e+00   1.7262677e+00   7.2801099e-01   7.4161985e-01   1.3190906e+00   7.2801099e-01   2.4494897e-01   9.8488578e-01   9.0553851e-01   7.8102497e-01   3.1622777e-01   1.1135529e+00   1.4177447e+00   6.1644140e-01   1.2409674e+00   4.7958315e-01   1.2884099e+00   8.2462113e-01   4.6882833e+00   4.2391037e+00   4.8135226e+00   3.4322005e+00   4.3692105e+00   3.8729833e+00   4.3931765e+00   2.5238859e+00   4.3577517e+00   3.2295511e+00   2.8390139e+00   3.7589892e+00   3.5707142e+00   4.2308392e+00   3.0643107e+00   4.2836900e+00   3.9000000e+00   3.4856850e+00   4.2154478e+00   3.2832910e+00   4.3794977e+00   3.6235342e+00   4.5442271e+00   4.1773197e+00   4.0124805e+00   4.2272923e+00   4.6551047e+00   4.8507731e+00   4.0521599e+00   2.9478806e+00   3.1764760e+00   3.0610456e+00   3.3749074e+00   4.6076024e+00   3.8379682e+00   4.1060930e+00   4.5486262e+00   4.1012193e+00   3.4828150e+00   3.3970576e+00   3.7013511e+00   4.1448764e+00   3.4684290e+00   2.5748786e+00   3.5818989e+00   3.5749126e+00   3.6083237e+00   3.9115214e+00   2.3452079e+00   3.5270384e+00   5.8189346e+00   4.6454279e+00   5.9059292e+00   5.2105662e+00   5.5982140e+00   6.7186308e+00   3.8379682e+00   6.2401923e+00   5.5668663e+00   6.2872888e+00   4.9487372e+00   5.0378567e+00   5.4415071e+00   4.5858478e+00   4.8559242e+00   5.1894123e+00   5.2048055e+00   6.9260378e+00   7.0851958e+00   4.5497253e+00   5.7271284e+00   4.4474712e+00   6.8242216e+00   4.6281746e+00   5.5686623e+00   5.9455866e+00   4.4977772e+00   4.5354162e+00   5.3572381e+00   5.7227616e+00   6.1554854e+00   6.7305275e+00   5.3953684e+00   4.6904158e+00   5.0338852e+00   6.4342832e+00   5.4497706e+00   5.1643005e+00   4.4124823e+00   5.4092513e+00   5.5955339e+00   5.2545219e+00   4.6454279e+00   5.8455111e+00   5.7245087e+00   5.2354560e+00   4.7644517e+00   5.0259327e+00   5.2057660e+00   4.6314145e+00   5.4772256e-01   4.6904158e-01   8.8881944e-01   5.5677644e-01   7.9372539e-01   8.7749644e-01   8.4261498e-01   1.2806248e+00   1.1489125e+00   1.3601471e+00   1.3416408e+00   1.0954451e+00   8.3666003e-01   8.7177979e-01   1.4177447e+00   1.4035669e+00   8.0622577e-01   6.8556546e-01   4.1231056e-01   1.3114877e+00   1.1313708e+00   5.9160798e-01   1.3114877e+00   1.7233688e+00   9.6953597e-01   9.5393920e-01   2.1447611e+00   1.6155494e+00   1.1000000e+00   1.0295630e+00   1.4317821e+00   8.3066239e-01   1.4560220e+00   6.5574385e-01   1.0816654e+00   3.9711459e+00   3.6851052e+00   4.1713307e+00   3.4684290e+00   3.8961519e+00   3.6810325e+00   3.8665230e+00   2.9017236e+00   3.8236109e+00   3.2832910e+00   3.2511536e+00   3.4205263e+00   3.4292856e+00   3.8716921e+00   2.8670542e+00   3.6469165e+00   3.6905284e+00   3.2771939e+00   3.9974992e+00   3.2233523e+00   4.0211939e+00   3.2526912e+00   4.2284749e+00   3.8444766e+00   3.5199432e+00   3.6496575e+00   4.1036569e+00   4.3011626e+00   3.7188708e+00   2.8106939e+00   3.1968735e+00   3.0886890e+00   3.1591138e+00   4.3474130e+00   3.7067506e+00   3.6400549e+00   3.9446166e+00   3.8196859e+00   3.2649655e+00   3.3749074e+00   3.6455452e+00   3.7536649e+00   3.2863353e+00   2.9291637e+00   3.4554305e+00   3.3181320e+00   3.3808283e+00   3.4914181e+00   2.6057628e+00   3.3271610e+00   5.3916602e+00   4.4485953e+00   5.3282267e+00   4.8352870e+00   5.1623638e+00   6.0835845e+00   4.0249224e+00   5.6595053e+00   5.1749396e+00   5.6053546e+00   4.4249294e+00   4.6636895e+00   4.9091751e+00   4.4654227e+00   4.6357308e+00   4.7138095e+00   4.7476310e+00   6.1562976e+00   6.5169011e+00   4.4056782e+00   5.1487863e+00   4.2906876e+00   6.2080593e+00   4.2649736e+00   5.0159745e+00   5.3103672e+00   4.1376322e+00   4.1641326e+00   4.9769469e+00   5.1068581e+00   5.5587768e+00   5.8932164e+00   5.0159745e+00   4.3116122e+00   4.7801674e+00   5.7471732e+00   4.9809638e+00   4.7138095e+00   4.0693980e+00   4.8238988e+00   5.0813384e+00   4.6518813e+00   4.4485953e+00   5.3047149e+00   5.1807335e+00   4.7138095e+00   4.4530888e+00   4.5530210e+00   4.7507894e+00   4.3335897e+00   6.1644140e-01   1.0908712e+00   6.4031242e-01   8.5440037e-01   1.0816654e+00   9.2195445e-01   1.4628739e+00   1.2727922e+00   1.4177447e+00   1.5811388e+00   1.2247449e+00   1.0488088e+00   1.1401754e+00   1.5779734e+00   1.5968719e+00   1.0440307e+00   6.5574385e-01   3.6055513e-01   1.5556349e+00   1.4352700e+00   9.6436508e-01   1.5556349e+00   1.9313208e+00   1.1832160e+00   1.1618950e+00   2.4289916e+00   1.7916473e+00   1.1618950e+00   9.3808315e-01   1.6703293e+00   8.7749644e-01   1.6431677e+00   8.3066239e-01   1.3228757e+00   3.7907783e+00   3.4842503e+00   3.9874804e+00   3.3926391e+00   3.7443290e+00   3.5171011e+00   3.6400549e+00   2.8705400e+00   3.6715120e+00   3.1464265e+00   3.2572995e+00   3.2403703e+00   3.3970576e+00   3.6945906e+00   2.7349589e+00   3.4785054e+00   3.4899857e+00   3.1654384e+00   3.9115214e+00   3.1416556e+00   3.7854986e+00   3.1272992e+00   4.0914545e+00   3.6878178e+00   3.3749074e+00   3.4899857e+00   3.9572718e+00   4.1109610e+00   3.5425979e+00   2.7568098e+00   3.1336879e+00   3.0397368e+00   3.0495901e+00   4.1689327e+00   3.5014283e+00   3.3955854e+00   3.7603191e+00   3.7403208e+00   3.0886890e+00   3.2726136e+00   3.5114100e+00   3.5679126e+00   3.1843367e+00   2.9154759e+00   3.3166248e+00   3.1448370e+00   3.2171416e+00   3.3391616e+00   2.5903668e+00   3.1827661e+00   5.1215232e+00   4.2555846e+00   5.1156622e+00   4.6238512e+00   4.9325450e+00   5.8711157e+00   3.8652296e+00   5.4598535e+00   5.0059964e+00   5.3347915e+00   4.1952354e+00   4.4799554e+00   4.6968074e+00   4.2918527e+00   4.4192760e+00   4.4698993e+00   4.5343136e+00   5.8855756e+00   6.3253458e+00   4.2883563e+00   4.9122296e+00   4.0853396e+00   6.0133186e+00   4.0951190e+00   4.7686476e+00   5.0892043e+00   3.9572718e+00   3.9547440e+00   4.7696960e+00   4.9132474e+00   5.3721504e+00   5.6364883e+00   4.8062459e+00   4.1340053e+00   4.6054316e+00   5.5434646e+00   4.7085029e+00   4.4877611e+00   3.8600518e+00   4.6076024e+00   4.8476799e+00   4.4384682e+00   4.2555846e+00   5.0616203e+00   4.9254441e+00   4.5011110e+00   4.2976738e+00   4.3416587e+00   4.4799554e+00   4.1133928e+00   5.1961524e-01   5.1961524e-01   3.8729833e-01   6.7082039e-01   4.1231056e-01   9.2736185e-01   7.8740079e-01   1.0049876e+00   1.0488088e+00   7.0710678e-01   5.2915026e-01   5.8309519e-01   1.0535654e+00   1.0630146e+00   5.3851648e-01   4.5825757e-01   3.8729833e-01   1.0099505e+00   8.3666003e-01   4.5825757e-01   1.0099505e+00   1.3601471e+00   6.4807407e-01   5.7445626e-01   1.8384776e+00   1.2369317e+00   6.7082039e-01   6.7823300e-01   1.0908712e+00   4.7958315e-01   1.0862780e+00   3.6055513e-01   7.5498344e-01   3.9509493e+00   3.5972211e+00   4.1303753e+00   3.2664966e+00   3.8105118e+00   3.5142567e+00   3.7643060e+00   2.6191602e+00   3.7603191e+00   3.0397368e+00   2.9949958e+00   3.2680269e+00   3.3015148e+00   3.7483330e+00   2.6720778e+00   3.5972211e+00   3.5071356e+00   3.1304952e+00   3.8704005e+00   3.0413813e+00   3.8665230e+00   3.1304952e+00   4.1158231e+00   3.7282704e+00   3.4365681e+00   3.5860842e+00   4.0521599e+00   4.2284749e+00   3.5791060e+00   2.6419690e+00   3.0000000e+00   2.8948230e+00   3.0000000e+00   4.2047592e+00   3.5014283e+00   3.5057096e+00   3.8858718e+00   3.7134889e+00   3.0822070e+00   3.1733263e+00   3.4568772e+00   3.6318040e+00   3.1272992e+00   2.6608269e+00   3.2710854e+00   3.1543621e+00   3.2109189e+00   3.3837849e+00   2.3302360e+00   3.1543621e+00   5.2602281e+00   4.2766810e+00   5.2678269e+00   4.7180504e+00   5.0507425e+00   6.0522723e+00   3.7603191e+00   5.6187187e+00   5.0852729e+00   5.5479726e+00   4.3243497e+00   4.5486262e+00   4.8270074e+00   4.2778499e+00   4.4508426e+00   4.5934736e+00   4.6497312e+00   6.1400326e+00   6.4768820e+00   4.2602817e+00   5.0705029e+00   4.0951190e+00   6.1822326e+00   4.1436699e+00   4.9295030e+00   5.2706736e+00   4.0074930e+00   4.0274061e+00   4.8569538e+00   5.0734604e+00   5.5226805e+00   5.9016947e+00   4.8928519e+00   4.2035699e+00   4.6551047e+00   5.7227616e+00   4.8528342e+00   4.6086874e+00   3.9217343e+00   4.7528939e+00   4.9819675e+00   4.5760245e+00   4.2766810e+00   5.2172790e+00   5.0813384e+00   4.6173586e+00   4.3255058e+00   4.4485953e+00   4.6162756e+00   4.1785165e+00   7.3484692e-01   3.1622777e-01   4.4721360e-01   2.4494897e-01   6.5574385e-01   4.1231056e-01   6.0000000e-01   5.5677644e-01   2.6457513e-01   1.7320508e-01   1.7320508e-01   5.4772256e-01   5.4772256e-01   3.4641016e-01   6.4807407e-01   8.1240384e-01   5.0000000e-01   3.8729833e-01   4.2426407e-01   5.0000000e-01   8.7177979e-01   1.7320508e-01   1.4142136e-01   1.3453624e+00   7.7459667e-01   3.7416574e-01   5.9160798e-01   5.8309519e-01   3.7416574e-01   5.9160798e-01   3.1622777e-01   2.4494897e-01   3.9749214e+00   3.5818989e+00   4.1340053e+00   3.0594117e+00   3.7589892e+00   3.3852622e+00   3.7496667e+00   2.3130067e+00   3.7215588e+00   2.8478062e+00   2.6758176e+00   3.1890437e+00   3.1224990e+00   3.6687873e+00   2.5396850e+00   3.5958309e+00   3.3985291e+00   2.9849623e+00   3.7349699e+00   2.8530685e+00   3.8131352e+00   3.0413813e+00   4.0162171e+00   3.6318040e+00   3.3852622e+00   3.5651087e+00   4.0187063e+00   4.2107007e+00   3.4957117e+00   2.4637370e+00   2.7874720e+00   2.6739484e+00   2.8618176e+00   4.1024383e+00   3.3749074e+00   3.4813790e+00   3.8794329e+00   3.5888717e+00   2.9647934e+00   2.9866369e+00   3.2832910e+00   3.5637059e+00   2.9782545e+00   2.3558438e+00   3.1192948e+00   3.0430248e+00   3.0919250e+00   3.3136083e+00   2.0493902e+00   3.0232433e+00   5.2421370e+00   4.1689327e+00   5.2668776e+00   4.6572524e+00   5.0179677e+00   6.0646517e+00   3.5510562e+00   5.6089215e+00   5.0169712e+00   5.5991071e+00   4.3162484e+00   4.4833024e+00   4.8155997e+00   4.1484937e+00   4.3680659e+00   4.5814845e+00   4.6119410e+00   6.2088646e+00   6.4668385e+00   4.1109610e+00   5.0813384e+00   3.9849718e+00   6.1830413e+00   4.0718546e+00   4.9325450e+00   5.2829916e+00   3.9382737e+00   3.9686270e+00   4.8020829e+00   5.0705029e+00   5.5163394e+00   5.9849812e+00   4.8404545e+00   4.1303753e+00   4.5453273e+00   5.7532599e+00   4.8476799e+00   4.5727453e+00   3.8561639e+00   4.7581509e+00   4.9769469e+00   4.5923850e+00   4.1689327e+00   5.2182373e+00   5.0921508e+00   4.6097722e+00   4.2379240e+00   4.4204072e+00   4.6065171e+00   4.1024383e+00   6.3245553e-01   5.0990195e-01   6.4807407e-01   1.3228757e+00   8.0622577e-01   1.0099505e+00   1.0723805e+00   8.1853528e-01   6.2449980e-01   7.1414284e-01   1.1747340e+00   1.1489125e+00   5.4772256e-01   6.4807407e-01   5.4772256e-01   1.1000000e+00   1.0535654e+00   5.4772256e-01   1.1000000e+00   1.5811388e+00   7.5498344e-01   8.6023253e-01   1.9621417e+00   1.4899664e+00   8.2462113e-01   6.4031242e-01   1.2409674e+00   6.1644140e-01   1.2922848e+00   4.6904158e-01   9.1651514e-01   3.5014283e+00   3.1827661e+00   3.6891733e+00   2.9291637e+00   3.3896903e+00   3.1368774e+00   3.3615473e+00   2.3769729e+00   3.3211444e+00   2.7404379e+00   2.7313001e+00   2.8930952e+00   2.9034462e+00   3.3436507e+00   2.3302360e+00   3.1606961e+00   3.1511903e+00   2.7331301e+00   3.4770677e+00   2.6795522e+00   3.5014283e+00   2.7294688e+00   3.7054015e+00   3.3120990e+00   3.0099834e+00   3.1543621e+00   3.6097091e+00   3.8065733e+00   3.1906112e+00   2.2737634e+00   2.6551836e+00   2.5475478e+00   2.6210685e+00   3.8144462e+00   3.1638584e+00   3.1272992e+00   3.4539832e+00   3.3015148e+00   2.7221315e+00   2.8319605e+00   3.0951575e+00   3.2280025e+00   2.7477263e+00   2.4062419e+00   2.9103264e+00   2.7748874e+00   2.8390139e+00   2.9698485e+00   2.0928450e+00   2.7856777e+00   4.8928519e+00   3.9166312e+00   4.8456166e+00   4.3162484e+00   4.6583259e+00   5.6124861e+00   3.4828150e+00   5.1749396e+00   4.6636895e+00   5.1468437e+00   3.9306488e+00   4.1496988e+00   4.4192760e+00   3.9331921e+00   4.1206796e+00   4.2201896e+00   4.2391037e+00   5.7105166e+00   6.0398675e+00   3.8704005e+00   4.6690470e+00   3.7603191e+00   5.7349804e+00   3.7496667e+00   4.5265881e+00   4.8321838e+00   3.6207734e+00   3.6455452e+00   4.4654227e+00   4.6249324e+00   5.0803543e+00   5.4607692e+00   4.5066617e+00   3.7894591e+00   4.2449971e+00   5.2915026e+00   4.4877611e+00   4.2035699e+00   3.5482390e+00   4.3428102e+00   4.5945620e+00   4.1821047e+00   3.9166312e+00   4.8176758e+00   4.7000000e+00   4.2296572e+00   3.9370039e+00   4.0521599e+00   4.2544095e+00   3.8065733e+00   5.4772256e-01   1.4142136e-01   7.4161985e-01   5.7445626e-01   6.4807407e-01   8.1853528e-01   4.3588989e-01   3.3166248e-01   4.3588989e-01   7.3484692e-01   7.7459667e-01   5.0990195e-01   3.7416574e-01   5.8309519e-01   7.5498344e-01   6.8556546e-01   5.4772256e-01   7.5498344e-01   1.0862780e+00   4.1231056e-01   3.7416574e-01   1.6278821e+00   9.4868330e-01   4.4721360e-01   4.1231056e-01   8.6023253e-01   1.4142136e-01   7.9372539e-01   2.4494897e-01   5.2915026e-01   3.9268308e+00   3.5341194e+00   4.0902323e+00   3.1080541e+00   3.7429935e+00   3.3704599e+00   3.6905284e+00   2.3937418e+00   3.6972963e+00   2.8618176e+00   2.7820855e+00   3.1638584e+00   3.1796226e+00   3.6414283e+00   2.5436195e+00   3.5594943e+00   3.3660065e+00   2.9916551e+00   3.7696154e+00   2.8879058e+00   3.7603191e+00   3.0413813e+00   4.0162171e+00   3.6124784e+00   3.3674916e+00   3.5369478e+00   3.9987498e+00   4.1725292e+00   3.4727511e+00   2.5079872e+00   2.8372522e+00   2.7294688e+00   2.8757608e+00   4.0828911e+00   3.3421550e+00   3.4146742e+00   3.8379682e+00   3.6193922e+00   2.9410882e+00   3.0166206e+00   3.2893768e+00   3.5298725e+00   2.9983329e+00   2.4474477e+00   3.1224990e+00   3.0166206e+00   3.0757113e+00   3.2954514e+00   2.1400935e+00   3.0199338e+00   5.1749396e+00   4.1496988e+00   5.2191953e+00   4.6162756e+00   4.9699095e+00   6.0116553e+00   3.5623026e+00   5.5623736e+00   4.9989999e+00   5.5181519e+00   4.2626283e+00   4.4609416e+00   4.7717921e+00   4.1460825e+00   4.3428102e+00   4.5265881e+00   4.5661800e+00   6.1163715e+00   6.4311741e+00   4.1303753e+00   5.0239427e+00   3.9623226e+00   6.1392182e+00   4.0570926e+00   4.8672374e+00   5.2220686e+00   3.9179076e+00   3.9306488e+00   4.7686476e+00   5.0229473e+00   5.4781384e+00   5.8940648e+00   4.8072861e+00   4.1036569e+00   4.5232732e+00   5.7061370e+00   4.7770284e+00   4.5199558e+00   3.8196859e+00   4.7095647e+00   4.9264592e+00   4.5486262e+00   4.1496988e+00   5.1584882e+00   5.0289164e+00   4.5705580e+00   4.2355637e+00   4.3794977e+00   4.5365185e+00   4.0607881e+00   5.0990195e-01   1.0816654e+00   4.3588989e-01   6.3245553e-01   5.7445626e-01   4.5825757e-01   3.0000000e-01   3.6055513e-01   7.3484692e-01   6.7823300e-01   2.8284271e-01   7.6157731e-01   8.6023253e-01   6.2449980e-01   6.7082039e-01   4.2426407e-01   6.2449980e-01   1.1489125e+00   3.6055513e-01   5.8309519e-01   1.4798649e+00   1.0954451e+00   5.8309519e-01   5.7445626e-01   7.8740079e-01   5.0990195e-01   8.7749644e-01   3.7416574e-01   5.0990195e-01   3.6110940e+00   3.2511536e+00   3.7775654e+00   2.7784888e+00   3.4161382e+00   3.0822070e+00   3.4322005e+00   2.1095023e+00   3.3630343e+00   2.6095977e+00   2.4494897e+00   2.8896367e+00   2.7802878e+00   3.3436507e+00   2.2605309e+00   3.2419130e+00   3.1192948e+00   2.6551836e+00   3.4073450e+00   2.5495098e+00   3.5298725e+00   2.7110883e+00   3.6810325e+00   3.2939338e+00   3.0364453e+00   3.2140317e+00   3.6565011e+00   3.8716921e+00   3.1843367e+00   2.1470911e+00   2.4959968e+00   2.3769729e+00   2.5475478e+00   3.7907783e+00   3.1128765e+00   3.1874755e+00   3.5312887e+00   3.2434549e+00   2.6776856e+00   2.7055499e+00   2.9899833e+00   3.2403703e+00   2.6627054e+00   2.1377558e+00   2.8266588e+00   2.7386128e+00   2.7928480e+00   2.9765752e+00   1.8439089e+00   2.7239677e+00   4.9598387e+00   3.8858718e+00   4.9295030e+00   4.3393548e+00   4.7095647e+00   5.7113921e+00   3.3391616e+00   5.2516664e+00   4.6765372e+00   5.2848841e+00   4.0062451e+00   4.1641326e+00   4.4911023e+00   3.8768544e+00   4.1133928e+00   4.2906876e+00   4.2860238e+00   5.8694122e+00   6.1139185e+00   3.7920970e+00   4.7644517e+00   3.7255872e+00   5.8215118e+00   3.7549967e+00   4.6162756e+00   4.9325450e+00   3.6290495e+00   3.6674242e+00   4.4922155e+00   4.7085029e+00   5.1584882e+00   5.6338264e+00   4.5354162e+00   3.7973675e+00   4.2166337e+00   5.4055527e+00   4.5672749e+00   4.2532341e+00   3.5623026e+00   4.4317040e+00   4.6722586e+00   4.2790186e+00   3.8858718e+00   4.9040799e+00   4.7947888e+00   4.3023250e+00   3.9242834e+00   4.1060930e+00   4.3289722e+00   3.8118237e+00   7.4161985e-01   4.5825757e-01   6.1644140e-01   7.4161985e-01   3.3166248e-01   3.0000000e-01   3.8729833e-01   6.7823300e-01   7.0710678e-01   4.2426407e-01   5.0990195e-01   6.7823300e-01   7.0000000e-01   6.2449980e-01   5.2915026e-01   7.0000000e-01   1.0295630e+00   3.6055513e-01   3.1622777e-01   1.5394804e+00   9.0553851e-01   3.1622777e-01   4.1231056e-01   7.7459667e-01   2.4494897e-01   7.4161985e-01   2.8284271e-01   4.6904158e-01   3.8858718e+00   3.4856850e+00   4.0459857e+00   3.0298515e+00   3.6864617e+00   3.3136083e+00   3.6441734e+00   2.3086793e+00   3.6482873e+00   2.7874720e+00   2.6944387e+00   3.1032241e+00   3.1096624e+00   3.5888717e+00   2.4718414e+00   3.5114100e+00   3.3090784e+00   2.9342802e+00   3.6972963e+00   2.8178006e+00   3.7067506e+00   2.9782545e+00   3.9560081e+00   3.5623026e+00   3.3136083e+00   3.4856850e+00   3.9484174e+00   4.1218928e+00   3.4146742e+00   2.4351591e+00   2.7622455e+00   2.6551836e+00   2.8089144e+00   4.0261644e+00   3.2848135e+00   3.3674916e+00   3.7907783e+00   3.5524639e+00   2.8827071e+00   2.9427878e+00   3.2280025e+00   3.4785054e+00   2.9308702e+00   2.3600847e+00   3.0577770e+00   2.9631065e+00   3.0166206e+00   3.2403703e+00   2.0445048e+00   2.9563491e+00   5.1244512e+00   4.0865633e+00   5.1710734e+00   4.5661800e+00   4.9173163e+00   5.9699246e+00   3.4885527e+00   5.5208695e+00   4.9446941e+00   5.4763126e+00   4.2107007e+00   4.4022721e+00   4.7191101e+00   4.0755368e+00   4.2731721e+00   4.4710178e+00   4.5177428e+00   6.0868711e+00   6.3827894e+00   4.0644803e+00   4.9739320e+00   3.8961519e+00   6.0967204e+00   3.9949969e+00   4.8218254e+00   5.1836281e+00   3.8561639e+00   3.8742741e+00   4.7116876e+00   4.9829710e+00   5.4323107e+00   5.8668561e+00   4.7486840e+00   4.0521599e+00   4.4743715e+00   5.6586217e+00   4.7265209e+00   4.4732538e+00   3.7616486e+00   4.6583259e+00   4.8713448e+00   4.4911023e+00   4.0865633e+00   5.1097945e+00   4.9769469e+00   4.5110974e+00   4.1689327e+00   4.3243497e+00   4.4855323e+00   4.0062451e+00   9.5916630e-01   9.4339811e-01   9.3808315e-01   7.7459667e-01   7.8740079e-01   7.4833148e-01   7.2801099e-01   8.0622577e-01   9.8488578e-01   9.3273791e-01   1.1532563e+00   7.7459667e-01   6.0000000e-01   9.5393920e-01   7.7459667e-01   7.0000000e-01   7.3484692e-01   5.1961524e-01   1.3416408e+00   5.3851648e-01   8.3066239e-01   1.0677078e+00   7.5498344e-01   8.0622577e-01   5.6568542e-01   8.6602540e-01   6.4031242e-01   4.5880279e+00   4.1641326e+00   4.7370877e+00   3.5651087e+00   4.3474130e+00   3.9127995e+00   4.3162484e+00   2.7313001e+00   4.3197222e+00   3.3196385e+00   3.1000000e+00   3.7389838e+00   3.6823905e+00   4.2272923e+00   3.0757113e+00   4.2023803e+00   3.9115214e+00   3.5355339e+00   4.2965102e+00   3.3808283e+00   4.3416587e+00   3.6193922e+00   4.5825757e+00   4.1928511e+00   3.9786933e+00   4.1665333e+00   4.6216880e+00   4.7979162e+00   4.0484565e+00   3.0166206e+00   3.3015148e+00   3.1906112e+00   3.4146742e+00   4.6411206e+00   3.8652296e+00   4.0261644e+00   4.4766059e+00   4.1653331e+00   3.4899857e+00   3.4971417e+00   3.7907783e+00   4.1243181e+00   3.5270384e+00   2.7892651e+00   3.6414283e+00   3.5791060e+00   3.6262929e+00   3.8923001e+00   2.5039968e+00   3.5594943e+00   5.7680153e+00   4.6850827e+00   5.8506410e+00   5.2057660e+00   5.5686623e+00   6.6580778e+00   3.9749214e+00   6.1991935e+00   5.5874860e+00   6.1692787e+00   4.8805737e+00   5.0428167e+00   5.3907328e+00   4.6540305e+00   4.8713448e+00   5.1283526e+00   5.1749396e+00   6.7926431e+00   7.0590368e+00   4.6486557e+00   5.6524331e+00   4.4821870e+00   6.7808554e+00   4.6335731e+00   5.4954527e+00   5.8719673e+00   4.4944410e+00   4.5144213e+00   5.3525695e+00   5.6674509e+00   6.1139185e+00   6.5825527e+00   5.3888774e+00   4.6936127e+00   5.0842895e+00   6.3553127e+00   5.3786615e+00   5.1283526e+00   4.3954522e+00   5.3394756e+00   5.5371473e+00   5.1730069e+00   4.6850827e+00   5.7810034e+00   5.6462377e+00   5.1788030e+00   4.7947888e+00   4.9849774e+00   5.1351728e+00   4.6281746e+00   4.7958315e-01   4.4721360e-01   2.0000000e-01   4.2426407e-01   4.4721360e-01   5.1961524e-01   4.7958315e-01   3.8729833e-01   9.2195445e-01   1.0723805e+00   5.2915026e-01   6.0000000e-01   6.7082039e-01   5.2915026e-01   9.1104336e-01   3.7416574e-01   5.0000000e-01   1.2489996e+00   8.6602540e-01   2.6457513e-01   5.4772256e-01   5.5677644e-01   5.9160798e-01   6.6332496e-01   5.7445626e-01   4.3588989e-01   3.6646964e+00   3.2465366e+00   3.8105118e+00   2.6627054e+00   3.4088121e+00   3.0149627e+00   3.4132096e+00   1.9131126e+00   3.3852622e+00   2.4535688e+00   2.2781571e+00   2.8248894e+00   2.7495454e+00   3.3120990e+00   2.1587033e+00   3.2710854e+00   3.0298515e+00   2.6191602e+00   3.3555923e+00   2.4677925e+00   3.4568772e+00   2.6795522e+00   3.6496575e+00   3.2771939e+00   3.0413813e+00   3.2310989e+00   3.6823905e+00   3.8704005e+00   3.1320920e+00   2.0832667e+00   2.3958297e+00   2.2847319e+00   2.4859606e+00   3.7336309e+00   3.0033315e+00   3.1416556e+00   3.5496479e+00   3.2202484e+00   2.5961510e+00   2.5942244e+00   2.9034462e+00   3.2109189e+00   2.6000000e+00   1.9544820e+00   2.7386128e+00   2.6814175e+00   2.7221315e+00   2.9614186e+00   1.6401219e+00   2.6476405e+00   4.8918299e+00   3.7907783e+00   4.9284886e+00   4.3011626e+00   4.6636895e+00   5.7367238e+00   3.1559468e+00   5.2773099e+00   4.6583259e+00   5.2782573e+00   3.9724048e+00   4.1194660e+00   4.4698993e+00   3.7603191e+00   3.9887341e+00   4.2308392e+00   4.2638011e+00   5.9076222e+00   6.1261734e+00   3.7296112e+00   4.7423623e+00   3.6041643e+00   5.8532043e+00   3.7054015e+00   4.5956501e+00   4.9598387e+00   3.5721142e+00   3.6083237e+00   4.4395946e+00   4.7455242e+00   5.1826634e+00   5.6947344e+00   4.4766059e+00   3.7749172e+00   4.1844952e+00   5.4267854e+00   4.5022217e+00   4.2261093e+00   3.4928498e+00   4.4192760e+00   4.6281746e+00   4.2520583e+00   3.7907783e+00   4.8764741e+00   4.7497368e+00   4.2591079e+00   3.8639358e+00   4.0681691e+00   4.2602817e+00   3.7389838e+00   5.3851648e-01   4.1231056e-01   5.7445626e-01   6.4031242e-01   3.7416574e-01   4.2426407e-01   7.4833148e-01   9.0553851e-01   1.1747340e+00   5.1961524e-01   7.5498344e-01   9.2736185e-01   5.1961524e-01   8.2462113e-01   5.0000000e-01   6.4807407e-01   1.2922848e+00   7.4833148e-01   5.4772256e-01   5.3851648e-01   6.4807407e-01   5.8309519e-01   5.7445626e-01   7.0710678e-01   5.4772256e-01   3.7629775e+00   3.3241540e+00   3.8974351e+00   2.7055499e+00   3.4971417e+00   3.0232433e+00   3.4727511e+00   1.9000000e+00   3.4626579e+00   2.4677925e+00   2.2803509e+00   2.8896367e+00   2.8160256e+00   3.3496268e+00   2.2338308e+00   3.3749074e+00   3.0413813e+00   2.6400758e+00   3.4423829e+00   2.5019992e+00   3.4957117e+00   2.7694765e+00   3.7080992e+00   3.3000000e+00   3.1272992e+00   3.3301652e+00   3.7696154e+00   3.9534795e+00   3.1843367e+00   2.1563859e+00   2.4310492e+00   2.3173260e+00   2.5475478e+00   3.7589892e+00   2.9949958e+00   3.1874755e+00   3.6373067e+00   3.3045423e+00   2.6172505e+00   2.6305893e+00   2.8948230e+00   3.2526912e+00   2.6551836e+00   1.9621417e+00   2.7622455e+00   2.6944387e+00   2.7495454e+00   3.0298515e+00   1.7088007e+00   2.6870058e+00   4.9355851e+00   3.8236109e+00   5.0059964e+00   4.3301270e+00   4.7180504e+00   5.8051701e+00   3.1352831e+00   5.3310412e+00   4.7106263e+00   5.3600373e+00   4.0509258e+00   4.1833001e+00   4.5530210e+00   3.8039453e+00   4.0546270e+00   4.3092923e+00   4.3092923e+00   5.9674115e+00   6.2016127e+00   3.7656341e+00   4.8270074e+00   3.6386811e+00   5.9203040e+00   3.7815341e+00   4.6551047e+00   5.0169712e+00   3.6455452e+00   3.6619667e+00   4.4966654e+00   4.8052055e+00   5.2583267e+00   5.7671483e+00   4.5398238e+00   3.8131352e+00   4.1785165e+00   5.5335341e+00   4.5585085e+00   4.2626283e+00   3.5454196e+00   4.5122057e+00   4.7148701e+00   4.3760713e+00   3.8236109e+00   4.9446941e+00   4.8321838e+00   4.3669211e+00   3.9446166e+00   4.1448764e+00   4.3150898e+00   3.7643060e+00   4.4721360e-01   5.4772256e-01   4.8989795e-01   3.6055513e-01   2.2360680e-01   6.0827625e-01   1.1269428e+00   1.3152946e+00   2.0000000e-01   4.4721360e-01   7.6811457e-01   2.0000000e-01   6.7082039e-01   4.2426407e-01   5.9160798e-01   9.1651514e-01   7.0000000e-01   6.4031242e-01   8.8317609e-01   3.0000000e-01   8.0622577e-01   4.8989795e-01   7.6811457e-01   3.6055513e-01   3.8845849e+00   3.4785054e+00   4.0249224e+00   2.7766887e+00   3.6027767e+00   3.1859065e+00   3.6537652e+00   1.9748418e+00   3.5749126e+00   2.6191602e+00   2.2912878e+00   3.0430248e+00   2.8354894e+00   3.5028560e+00   2.3622024e+00   3.4899857e+00   3.2341923e+00   2.7604347e+00   3.4899857e+00   2.5903668e+00   3.6945906e+00   2.8670542e+00   3.8105118e+00   3.4438351e+00   3.2357379e+00   3.4409301e+00   3.8678159e+00   4.0865633e+00   3.3331667e+00   2.2135944e+00   2.5019992e+00   2.3790755e+00   2.6495283e+00   3.9115214e+00   3.2031235e+00   3.3955854e+00   3.7682887e+00   3.3511192e+00   2.7964263e+00   2.7331301e+00   3.0413813e+00   3.4132096e+00   2.7495454e+00   2.0049938e+00   2.9017236e+00   2.8722813e+00   2.9103264e+00   3.1543621e+00   1.7406895e+00   2.8266588e+00   5.1410116e+00   3.9837169e+00   5.1487863e+00   4.5011110e+00   4.8877398e+00   5.9472683e+00   3.3045423e+00   5.4726593e+00   4.8311489e+00   5.5443665e+00   4.2166337e+00   4.3162484e+00   4.6968074e+00   3.9420807e+00   4.2154478e+00   4.4833024e+00   4.4743715e+00   6.1595454e+00   6.3206012e+00   3.8587563e+00   4.9869831e+00   3.8118237e+00   6.0481402e+00   3.9025633e+00   4.8373546e+00   5.1768716e+00   3.7788887e+00   3.8288379e+00   4.6486557e+00   4.9436828e+00   5.3795911e+00   5.9439044e+00   4.6904158e+00   3.9585351e+00   4.3370497e+00   5.6524331e+00   4.7634021e+00   4.4429720e+00   3.7148351e+00   4.6551047e+00   4.8723711e+00   4.5033321e+00   3.9837169e+00   5.1166395e+00   5.0079936e+00   4.5011110e+00   4.0484565e+00   4.2953463e+00   4.5221676e+00   3.9522146e+00   3.1622777e-01   3.4641016e-01   4.1231056e-01   4.1231056e-01   4.1231056e-01   7.9372539e-01   9.8488578e-01   4.4721360e-01   4.8989795e-01   6.2449980e-01   4.4721360e-01   8.0622577e-01   2.4494897e-01   3.3166248e-01   1.2489996e+00   7.2801099e-01   2.2360680e-01   5.0990195e-01   5.0000000e-01   4.5825757e-01   5.2915026e-01   4.7958315e-01   3.0000000e-01   3.8275318e+00   3.4088121e+00   3.9749214e+00   2.8337255e+00   3.5805028e+00   3.1733263e+00   3.5707142e+00   2.0639767e+00   3.5524639e+00   2.6115130e+00   2.4351591e+00   2.9899833e+00   2.9257478e+00   3.4741906e+00   2.3280893e+00   3.4380227e+00   3.1843367e+00   2.7820855e+00   3.5355339e+00   2.6362853e+00   3.6124784e+00   2.8530685e+00   3.8209946e+00   3.4380227e+00   3.2109189e+00   3.4000000e+00   3.8522721e+00   4.0373258e+00   3.2969683e+00   2.2583180e+00   2.5651511e+00   2.4535688e+00   2.6570661e+00   3.8961519e+00   3.1527766e+00   3.2939338e+00   3.7148351e+00   3.3985291e+00   2.7531800e+00   2.7622455e+00   3.0610456e+00   3.3719431e+00   2.7712813e+00   2.1118712e+00   2.9017236e+00   2.8372522e+00   2.8827071e+00   3.1288976e+00   1.8083141e+00   2.8124722e+00   5.0467812e+00   3.9534795e+00   5.0941143e+00   4.4609416e+00   4.8259714e+00   5.9000000e+00   3.3045423e+00   5.4396691e+00   4.8270074e+00   5.4350713e+00   4.1352146e+00   4.2883563e+00   4.6368092e+00   3.9268308e+00   4.1533119e+00   4.3931765e+00   4.4249294e+00   6.0580525e+00   6.2952363e+00   3.9000000e+00   4.9061186e+00   3.7643060e+00   6.0183054e+00   3.8768544e+00   4.7539457e+00   5.1185936e+00   3.7416574e+00   3.7709415e+00   4.6054316e+00   4.9071377e+00   5.3497664e+00   5.8455111e+00   4.6432747e+00   3.9382737e+00   4.3416587e+00   5.5955339e+00   4.6572524e+00   4.3840620e+00   3.6551334e+00   4.5858478e+00   4.7937459e+00   4.4226689e+00   3.9534795e+00   5.0378567e+00   4.9112117e+00   4.4294469e+00   4.0385641e+00   4.2343831e+00   4.4147480e+00   3.8961519e+00   1.4142136e-01   5.9160798e-01   5.7445626e-01   3.0000000e-01   6.0827625e-01   7.6811457e-01   5.0990195e-01   4.6904158e-01   3.6055513e-01   5.0990195e-01   9.6436508e-01   1.4142136e-01   3.0000000e-01   1.4071247e+00   8.7749644e-01   4.5825757e-01   5.4772256e-01   6.5574385e-01   3.3166248e-01   6.7823300e-01   2.2360680e-01   3.0000000e-01   3.8742741e+00   3.4957117e+00   4.0373258e+00   2.9983329e+00   3.6715120e+00   3.3090784e+00   3.6674242e+00   2.2759613e+00   3.6249138e+00   2.8000000e+00   2.6324893e+00   3.1176915e+00   3.0364453e+00   3.5846897e+00   2.4779023e+00   3.5014283e+00   3.3316662e+00   2.8982753e+00   3.6578682e+00   2.7802878e+00   3.7456642e+00   2.9597297e+00   3.9319207e+00   3.5411862e+00   3.2939338e+00   3.4727511e+00   3.9217343e+00   4.1231056e+00   3.4190642e+00   2.3874673e+00   2.7202941e+00   2.6038433e+00   2.7856777e+00   4.0249224e+00   3.3136083e+00   3.4073450e+00   3.7868192e+00   3.5028560e+00   2.8948230e+00   2.9240383e+00   3.2109189e+00   3.4799425e+00   2.9017236e+00   2.3151674e+00   3.0495901e+00   2.9647934e+00   3.0182777e+00   3.2264532e+00   2.0174241e+00   2.9512709e+00   5.1759057e+00   4.1048752e+00   5.1797683e+00   4.5760245e+00   4.9426713e+00   5.9690870e+00   3.5128336e+00   5.5108983e+00   4.9295030e+00   5.5190579e+00   4.2402830e+00   4.4056782e+00   4.7349762e+00   4.0914545e+00   4.3185646e+00   4.5144213e+00   4.5276926e+00   6.1139185e+00   6.3741666e+00   4.0336088e+00   5.0029991e+00   3.9306488e+00   6.0844063e+00   3.9962482e+00   4.8518038e+00   5.1865210e+00   3.8652296e+00   3.8961519e+00   4.7275787e+00   4.9699095e+00   5.4203321e+00   5.8847260e+00   4.7686476e+00   4.0435133e+00   4.4575778e+00   5.6630381e+00   4.7822589e+00   4.4899889e+00   3.7868192e+00   4.6765372e+00   4.9050994e+00   4.5188494e+00   4.1048752e+00   5.1400389e+00   5.0219518e+00   4.5387223e+00   4.1653331e+00   4.3439613e+00   4.5420260e+00   4.0323690e+00   5.7445626e-01   5.3851648e-01   3.0000000e-01   7.1414284e-01   8.5440037e-01   4.4721360e-01   3.4641016e-01   3.3166248e-01   4.4721360e-01   9.0000000e-01   1.4142136e-01   2.6457513e-01   1.3114877e+00   8.3066239e-01   5.0000000e-01   6.7823300e-01   5.7445626e-01   4.5825757e-01   6.3245553e-01   3.3166248e-01   2.2360680e-01   3.9509493e+00   3.5749126e+00   4.1133928e+00   3.0446675e+00   3.7389838e+00   3.3808283e+00   3.7509999e+00   2.3108440e+00   3.6959437e+00   2.8600699e+00   2.6551836e+00   3.1906112e+00   3.0789609e+00   3.6592349e+00   2.5416530e+00   3.5749126e+00   3.4088121e+00   2.9631065e+00   3.7067506e+00   2.8337255e+00   3.8275318e+00   3.0232433e+00   3.9949969e+00   3.6138622e+00   3.3630343e+00   3.5440090e+00   3.9899875e+00   4.1976184e+00   3.4914181e+00   2.4372115e+00   2.7676705e+00   2.6495283e+00   2.8460499e+00   4.0963398e+00   3.3911650e+00   3.4942810e+00   3.8626416e+00   3.5538711e+00   2.9698485e+00   2.9782545e+00   3.2756679e+00   3.5566838e+00   2.9597297e+00   2.3452079e+00   3.1144823e+00   3.0413813e+00   3.0903074e+00   3.2969683e+00   2.0469489e+00   3.0182777e+00   5.2602281e+00   4.1749251e+00   5.2564246e+00   4.6540305e+00   5.0209561e+00   6.0473135e+00   3.5721142e+00   5.5883808e+00   4.9979996e+00   5.6053546e+00   4.3197222e+00   4.4754888e+00   4.8104054e+00   4.1545156e+00   4.3874822e+00   4.5934736e+00   4.6065171e+00   6.2048368e+00   6.4459289e+00   4.0902323e+00   5.0823223e+00   4.0012498e+00   6.1595454e+00   4.0632499e+00   4.9355851e+00   5.2687759e+00   3.9344631e+00   3.9724048e+00   4.8010416e+00   5.0477718e+00   5.4936327e+00   5.9741108e+00   4.8414874e+00   4.1170378e+00   4.5310043e+00   5.7367238e+00   4.8672374e+00   4.5716518e+00   3.8626416e+00   4.7528939e+00   4.9819675e+00   4.5912961e+00   4.1749251e+00   5.2211110e+00   5.1029403e+00   4.6108568e+00   4.2272923e+00   4.4192760e+00   4.6270941e+00   4.1109610e+00   1.4142136e-01   7.6157731e-01   1.0392305e+00   1.2961481e+00   2.6457513e-01   5.0000000e-01   9.0553851e-01   2.6457513e-01   4.6904158e-01   4.5825757e-01   5.2915026e-01   9.7467943e-01   4.2426407e-01   5.8309519e-01   8.0622577e-01   3.1622777e-01   7.2111026e-01   2.2360680e-01   7.8740079e-01   3.7416574e-01   4.0422766e+00   3.6041643e+00   4.1749251e+00   2.9017236e+00   3.7536649e+00   3.2832910e+00   3.7603191e+00   2.0518285e+00   3.7296112e+00   2.6888659e+00   2.4041631e+00   3.1511903e+00   3.0149627e+00   3.6193922e+00   2.4718414e+00   3.6455452e+00   3.3090784e+00   2.8896367e+00   3.6537652e+00   2.7202941e+00   3.7735925e+00   3.0149627e+00   3.9534795e+00   3.5679126e+00   3.3882149e+00   3.5958309e+00   4.0311289e+00   4.2249260e+00   3.4467376e+00   2.3685439e+00   2.6324893e+00   2.5159491e+00   2.7838822e+00   4.0187063e+00   3.2603681e+00   3.4785054e+00   3.9127995e+00   3.5242020e+00   2.8827071e+00   2.8460499e+00   3.1368774e+00   3.5270384e+00   2.8861739e+00   2.1047565e+00   3.0049958e+00   2.9664794e+00   3.0099834e+00   3.2924155e+00   1.8493242e+00   2.9359837e+00   5.2172790e+00   4.0743098e+00   5.2820451e+00   4.6054316e+00   4.9919936e+00   6.0876925e+00   3.3451457e+00   5.6124861e+00   4.9689033e+00   5.6524331e+00   4.3278170e+00   4.4407207e+00   4.8238988e+00   4.0360872e+00   4.2965102e+00   4.5814845e+00   4.5880279e+00   6.2745518e+00   6.4699304e+00   3.9924930e+00   5.1048996e+00   3.8858718e+00   6.1975802e+00   4.0323690e+00   4.9426713e+00   5.3075418e+00   3.9000000e+00   3.9306488e+00   4.7602521e+00   5.0882217e+00   5.5308227e+00   6.0728906e+00   4.8010416e+00   4.0816663e+00   4.4452222e+00   5.8051701e+00   4.8414874e+00   4.5464272e+00   3.8118237e+00   4.7853944e+00   4.9849774e+00   4.6378875e+00   4.0743098e+00   5.2258971e+00   5.1097945e+00   4.6270941e+00   4.1833001e+00   4.4136153e+00   4.5978256e+00   4.0360872e+00   7.0710678e-01   1.0862780e+00   1.3190906e+00   1.7320508e-01   4.5825757e-01   8.6023253e-01   1.7320508e-01   5.0990195e-01   4.3588989e-01   5.4772256e-01   9.1104336e-01   5.0990195e-01   6.0000000e-01   8.4261498e-01   2.4494897e-01   7.6157731e-01   3.0000000e-01   7.8740079e-01   3.4641016e-01   3.9874804e+00   3.5594943e+00   4.1218928e+00   2.8460499e+00   3.6972963e+00   3.2434549e+00   3.7229021e+00   2.0074860e+00   3.6728735e+00   2.6551836e+00   2.3452079e+00   3.1096624e+00   2.9410882e+00   3.5749126e+00   2.4269322e+00   3.5902646e+00   3.2787193e+00   2.8372522e+00   3.5874782e+00   2.6645825e+00   3.7443290e+00   2.9580399e+00   3.8974351e+00   3.5199432e+00   3.3316662e+00   3.5397740e+00   3.9711459e+00   4.1749251e+00   3.4029399e+00   2.3043437e+00   2.5748786e+00   2.4556058e+00   2.7294688e+00   3.9761791e+00   3.2357379e+00   3.4496377e+00   3.8613469e+00   3.4554305e+00   2.8478062e+00   2.7964263e+00   3.0951575e+00   3.4842503e+00   2.8301943e+00   2.0518285e+00   2.9614186e+00   2.9291637e+00   2.9698485e+00   3.2403703e+00   1.7944358e+00   2.8913665e+00   5.1903757e+00   4.0373258e+00   5.2345009e+00   4.5661800e+00   4.9537864e+00   6.0382117e+00   3.3211444e+00   5.5623736e+00   4.9162994e+00   5.6169387e+00   4.2883563e+00   4.3931765e+00   4.7780749e+00   3.9962482e+00   4.2638011e+00   4.5464272e+00   4.5464272e+00   6.2377881e+00   6.4156060e+00   3.9370039e+00   5.0635956e+00   3.8548671e+00   6.1441029e+00   3.9824616e+00   4.9061186e+00   5.2621288e+00   3.8535698e+00   3.8923001e+00   4.7180504e+00   5.0368641e+00   5.4763126e+00   6.0315835e+00   4.7592016e+00   4.0348482e+00   4.4022721e+00   5.7515215e+00   4.8145612e+00   4.5088801e+00   3.7749172e+00   4.7391982e+00   4.9446941e+00   4.5902070e+00   4.0373258e+00   5.1874849e+00   5.0744458e+00   4.5814845e+00   4.1303753e+00   4.3703547e+00   4.5716518e+00   4.0037482e+00   7.8740079e-01   8.3666003e-01   6.5574385e-01   5.7445626e-01   3.1622777e-01   6.5574385e-01   1.1135529e+00   3.6055513e-01   4.6904158e-01   1.4387495e+00   1.0583005e+00   4.6904158e-01   6.4031242e-01   7.3484692e-01   5.4772256e-01   8.5440037e-01   3.7416574e-01   4.6904158e-01   3.7202150e+00   3.3541020e+00   3.8871583e+00   2.8774989e+00   3.5199432e+00   3.2031235e+00   3.5355339e+00   2.2022716e+00   3.4799425e+00   2.7000000e+00   2.5455844e+00   2.9849623e+00   2.9000000e+00   3.4612137e+00   2.3473389e+00   3.3451457e+00   3.2264532e+00   2.7874720e+00   3.5057096e+00   2.6645825e+00   3.6249138e+00   2.8124722e+00   3.7934153e+00   3.4249088e+00   3.1464265e+00   3.3181320e+00   3.7696154e+00   3.9736633e+00   3.2893768e+00   2.2561028e+00   2.6057628e+00   2.4919872e+00   2.6551836e+00   3.9051248e+00   3.2202484e+00   3.2863353e+00   3.6373067e+00   3.3526109e+00   2.7874720e+00   2.8071338e+00   3.1144823e+00   3.3555923e+00   2.7730849e+00   2.2293497e+00   2.9376862e+00   2.8600699e+00   2.9051678e+00   3.0886890e+00   1.9078784e+00   2.8319605e+00   5.0477718e+00   3.9824616e+00   5.0299105e+00   4.4530888e+00   4.8062459e+00   5.8223707e+00   3.4278273e+00   5.3721504e+00   4.7906158e+00   5.3712196e+00   4.0951190e+00   4.2638011e+00   4.5836667e+00   3.9635842e+00   4.1809090e+00   4.3692105e+00   4.3965896e+00   5.9774577e+00   6.2209324e+00   3.9064050e+00   4.8518038e+00   3.8105118e+00   5.9371710e+00   3.8496753e+00   4.7148701e+00   5.0487622e+00   3.7215588e+00   3.7643060e+00   4.5891176e+00   4.8301139e+00   5.2697249e+00   5.7428216e+00   4.6270941e+00   3.9166312e+00   4.3520110e+00   5.4972721e+00   4.6497312e+00   4.3646306e+00   3.6565011e+00   4.5210618e+00   4.7528939e+00   4.3485630e+00   3.9824616e+00   4.9969991e+00   4.8733972e+00   4.3760713e+00   4.0149720e+00   4.1976184e+00   4.4113490e+00   3.9153544e+00   3.4641016e-01   1.0440307e+00   9.7467943e-01   7.0710678e-01   1.0440307e+00   1.3784049e+00   7.1414284e-01   6.9282032e-01   1.9519221e+00   1.2247449e+00   8.1240384e-01   5.9160798e-01   1.1916375e+00   3.4641016e-01   1.0908712e+00   4.2426407e-01   8.3666003e-01   3.9974992e+00   3.6345564e+00   4.1725292e+00   3.3196385e+00   3.8665230e+00   3.5185224e+00   3.7868192e+00   2.6514147e+00   3.8013156e+00   3.0675723e+00   3.0430248e+00   3.3090784e+00   3.3630343e+00   3.7656341e+00   2.7294688e+00   3.6537652e+00   3.5114100e+00   3.1448370e+00   3.9458839e+00   3.0789609e+00   3.8832976e+00   3.1921779e+00   4.1581246e+00   3.7349699e+00   3.4871192e+00   3.6428011e+00   4.1024383e+00   4.2743421e+00   3.6110940e+00   2.7037012e+00   3.0446675e+00   2.9376862e+00   3.0479501e+00   4.2201896e+00   3.4942810e+00   3.5185224e+00   3.9306488e+00   3.7815341e+00   3.0935417e+00   3.2155870e+00   3.4583233e+00   3.6496575e+00   3.1733263e+00   2.7073973e+00   3.2939338e+00   3.1559468e+00   3.2280025e+00   3.4234486e+00   2.4124676e+00   3.1843367e+00   5.2782573e+00   4.3034870e+00   5.3084838e+00   4.7275787e+00   5.0793700e+00   6.0811183e+00   3.7696154e+00   5.6373753e+00   5.1176166e+00   5.5830099e+00   4.3669211e+00   4.5912961e+00   4.8754487e+00   4.3208795e+00   4.5055521e+00   4.6400431e+00   4.6679760e+00   6.1473572e+00   6.5192024e+00   4.2965102e+00   5.1166395e+00   4.1255303e+00   6.2120850e+00   4.1976184e+00   4.9527770e+00   5.2867760e+00   4.0583248e+00   4.0583248e+00   4.8928519e+00   5.0941143e+00   5.5614746e+00   5.9160798e+00   4.9345719e+00   4.2213742e+00   4.6432747e+00   5.7844619e+00   4.8785244e+00   4.6184413e+00   3.9534795e+00   4.8062459e+00   5.0348784e+00   4.6572524e+00   4.3034870e+00   5.2507142e+00   5.1273775e+00   4.6893496e+00   4.3886217e+00   4.4944410e+00   4.6411206e+00   4.1892720e+00   1.2609520e+00   1.1357817e+00   7.0710678e-01   1.2609520e+00   1.6309506e+00   9.0000000e-01   8.7177979e-01   2.1517435e+00   1.4899664e+00   9.6953597e-01   7.8102497e-01   1.3928388e+00   6.0000000e-01   1.3453624e+00   5.4772256e-01   1.0295630e+00   3.9471509e+00   3.6207734e+00   4.1364236e+00   3.4029399e+00   3.8587563e+00   3.5805028e+00   3.7815341e+00   2.8017851e+00   3.7881394e+00   3.1670175e+00   3.1843367e+00   3.3361655e+00   3.4132096e+00   3.7920970e+00   2.7838822e+00   3.6180105e+00   3.5707142e+00   3.2046841e+00   3.9736633e+00   3.1559468e+00   3.9089641e+00   3.2078030e+00   4.1797129e+00   3.7696154e+00   3.4813790e+00   3.6180105e+00   4.0804412e+00   4.2532341e+00   3.6386811e+00   2.7658633e+00   3.1320920e+00   3.0282008e+00   3.0967725e+00   4.2602817e+00   3.5707142e+00   3.5298725e+00   3.9025633e+00   3.8026307e+00   3.1543621e+00   3.2954514e+00   3.5440090e+00   3.6715120e+00   3.2264532e+00   2.8478062e+00   3.3630343e+00   3.2124757e+00   3.2832910e+00   3.4351128e+00   2.5337719e+00   3.2403703e+00   5.2820451e+00   4.3497126e+00   5.2782573e+00   4.7465777e+00   5.0793700e+00   6.0415230e+00   3.8871583e+00   5.6124861e+00   5.1234754e+00   5.5344376e+00   4.3508620e+00   4.6000000e+00   4.8528342e+00   4.3737855e+00   4.5365185e+00   4.6292548e+00   4.6701178e+00   6.0901560e+00   6.4853681e+00   4.3474130e+00   5.0852729e+00   4.1785165e+00   6.1749494e+00   4.2071368e+00   4.9345719e+00   5.2545219e+00   4.0706265e+00   4.0755368e+00   4.9010203e+00   5.0645829e+00   5.5272054e+00   5.8446557e+00   4.9406477e+00   4.2402830e+00   4.6904158e+00   5.7253821e+00   4.8744230e+00   4.6249324e+00   3.9761791e+00   4.7728398e+00   5.0129831e+00   4.6119410e+00   4.3497126e+00   5.2297227e+00   5.1019604e+00   4.6615448e+00   4.4022721e+00   4.4855323e+00   4.6411206e+00   4.2249260e+00   3.4641016e-01   7.5498344e-01   0.0000000e+00   5.5677644e-01   3.7416574e-01   5.0000000e-01   9.3808315e-01   5.5677644e-01   6.5574385e-01   8.8317609e-01   2.6457513e-01   7.4161985e-01   3.4641016e-01   7.2801099e-01   2.6457513e-01   4.0435133e+00   3.6359318e+00   4.1856899e+00   2.9478806e+00   3.7709415e+00   3.3421550e+00   3.8065733e+00   2.1307276e+00   3.7389838e+00   2.7748874e+00   2.4556058e+00   3.2031235e+00   3.0133038e+00   3.6619667e+00   2.5258662e+00   3.6523965e+00   3.3852622e+00   2.9223278e+00   3.6687873e+00   2.7586228e+00   3.8457769e+00   3.0364453e+00   3.9799497e+00   3.6027767e+00   3.4014703e+00   3.6055513e+00   4.0348482e+00   4.2497059e+00   3.4942810e+00   2.3874673e+00   2.6720778e+00   2.5495098e+00   2.8178006e+00   4.0718546e+00   3.3496268e+00   3.5425979e+00   3.9293765e+00   3.5284558e+00   2.9495762e+00   2.9000000e+00   3.1984371e+00   3.5707142e+00   2.9189039e+00   2.1679483e+00   3.0626786e+00   3.0248967e+00   3.0675723e+00   3.3181320e+00   1.9104973e+00   2.9883106e+00   5.2924474e+00   4.1436699e+00   5.3113087e+00   4.6583259e+00   5.0467812e+00   6.1081912e+00   3.4525353e+00   5.6329388e+00   4.9979996e+00   5.6973678e+00   4.3749286e+00   4.4821870e+00   4.8600412e+00   4.1060930e+00   4.3760713e+00   4.6411206e+00   4.6324939e+00   6.3071388e+00   6.4876806e+00   4.0286474e+00   5.1468437e+00   3.9686270e+00   6.2112801e+00   4.0706265e+00   4.9919936e+00   5.3329167e+00   3.9446166e+00   3.9874804e+00   4.8114447e+00   5.1029403e+00   5.5443665e+00   6.0917978e+00   4.8538644e+00   4.1194660e+00   4.4933284e+00   5.8180753e+00   4.9142650e+00   4.5978256e+00   3.8729833e+00   4.8176758e+00   5.0338852e+00   4.6690470e+00   4.1436699e+00   5.2744668e+00   5.1652686e+00   4.6669048e+00   4.2201896e+00   4.4575778e+00   4.6722586e+00   4.1060930e+00   5.9160798e-01   3.4641016e-01   6.4031242e-01   3.7416574e-01   3.3166248e-01   1.0392305e+00   6.0827625e-01   6.4031242e-01   9.4868330e-01   3.6055513e-01   7.2801099e-01   4.4721360e-01   6.5574385e-01   2.2360680e-01   4.2059482e+00   3.8131352e+00   4.3588989e+00   3.1796226e+00   3.9572718e+00   3.5707142e+00   3.9887341e+00   2.3874673e+00   3.9268308e+00   3.0033315e+00   2.7147744e+00   3.3970576e+00   3.2372828e+00   3.8716921e+00   2.7239677e+00   3.8183766e+00   3.6027767e+00   3.1527766e+00   3.8755645e+00   2.9916551e+00   4.0410395e+00   3.2280025e+00   4.1904654e+00   3.8236109e+00   3.5874782e+00   3.7788887e+00   4.2190046e+00   4.4294469e+00   3.6972963e+00   2.6038433e+00   2.9086079e+00   2.7892651e+00   3.0298515e+00   4.2918527e+00   3.5749126e+00   3.7269290e+00   4.1036569e+00   3.7349699e+00   3.1654384e+00   3.1288976e+00   3.4423829e+00   3.7749172e+00   3.1368774e+00   2.4207437e+00   3.2893768e+00   3.2449961e+00   3.2848135e+00   3.5142567e+00   2.1330729e+00   3.2046841e+00   5.4799635e+00   4.3577517e+00   5.4909016e+00   4.8682646e+00   5.2392748e+00   6.2904690e+00   3.6932371e+00   5.8266629e+00   5.2057660e+00   5.8566202e+00   4.5497253e+00   4.6808119e+00   5.0378567e+00   4.3197222e+00   4.5661800e+00   4.8145612e+00   4.8311489e+00   6.4730209e+00   6.6745786e+00   4.2579338e+00   5.3169540e+00   4.1773197e+00   6.3984373e+00   4.2649736e+00   5.1730069e+00   5.5172457e+00   4.1376322e+00   4.1833001e+00   5.0089919e+00   5.2915026e+00   5.7288742e+00   6.2489999e+00   5.0477718e+00   4.3301270e+00   4.7296934e+00   5.9791304e+00   5.0921508e+00   4.7979162e+00   4.0693980e+00   4.9869831e+00   5.2057660e+00   4.8207883e+00   4.3577517e+00   5.4534393e+00   5.3329167e+00   4.8311489e+00   4.4170126e+00   4.6400431e+00   4.8507731e+00   4.3150898e+00   7.5498344e-01   1.2083046e+00   4.5825757e-01   5.0990195e-01   1.5652476e+00   1.1401754e+00   7.0710678e-01   8.0622577e-01   8.7177979e-01   5.8309519e-01   9.5393920e-01   3.4641016e-01   5.4772256e-01   3.9166312e+00   3.5818989e+00   4.0951190e+00   3.1527766e+00   3.7509999e+00   3.4612137e+00   3.7682887e+00   2.4919872e+00   3.6972963e+00   2.9883106e+00   2.8248894e+00   3.2419130e+00   3.1416556e+00   3.7040518e+00   2.6210685e+00   3.5566838e+00   3.4914181e+00   3.0347982e+00   3.7563280e+00   2.9291637e+00   3.8807216e+00   3.0577770e+00   4.0360872e+00   3.6619667e+00   3.3734256e+00   3.5369478e+00   3.9837169e+00   4.1988094e+00   3.5411862e+00   2.5159491e+00   2.8757608e+00   2.7586228e+00   2.9137605e+00   4.1581246e+00   3.4914181e+00   3.5298725e+00   3.8535698e+00   3.5916570e+00   3.0512293e+00   3.0822070e+00   3.3793490e+00   3.5972211e+00   3.0315013e+00   2.5159491e+00   3.2046841e+00   3.1144823e+00   3.1654384e+00   3.3256578e+00   2.2045408e+00   3.0951575e+00   5.2971691e+00   4.2497059e+00   5.2516664e+00   4.6957428e+00   5.0497525e+00   6.0299254e+00   3.7215588e+00   5.5821143e+00   5.0249378e+00   5.5883808e+00   4.3324358e+00   4.5099889e+00   4.8155997e+00   4.2391037e+00   4.4564560e+00   4.6162756e+00   4.6314145e+00   6.1717096e+00   6.4358372e+00   4.1617304e+00   5.0813384e+00   4.0865633e+00   6.1424751e+00   4.0987803e+00   4.9446941e+00   5.2564246e+00   3.9736633e+00   4.0162171e+00   4.8373546e+00   5.0348784e+00   5.4799635e+00   5.9245253e+00   4.8774994e+00   4.1545156e+00   4.5934736e+00   5.7043843e+00   4.8969378e+00   4.6010868e+00   3.9127995e+00   4.7476310e+00   4.9929951e+00   4.5793013e+00   4.2497059e+00   5.2297227e+00   5.1117512e+00   4.6162756e+00   4.2684892e+00   4.4384682e+00   4.6604721e+00   4.1725292e+00   5.5677644e-01   3.7416574e-01   5.0000000e-01   9.3808315e-01   5.5677644e-01   6.5574385e-01   8.8317609e-01   2.6457513e-01   7.4161985e-01   3.4641016e-01   7.2801099e-01   2.6457513e-01   4.0435133e+00   3.6359318e+00   4.1856899e+00   2.9478806e+00   3.7709415e+00   3.3421550e+00   3.8065733e+00   2.1307276e+00   3.7389838e+00   2.7748874e+00   2.4556058e+00   3.2031235e+00   3.0133038e+00   3.6619667e+00   2.5258662e+00   3.6523965e+00   3.3852622e+00   2.9223278e+00   3.6687873e+00   2.7586228e+00   3.8457769e+00   3.0364453e+00   3.9799497e+00   3.6027767e+00   3.4014703e+00   3.6055513e+00   4.0348482e+00   4.2497059e+00   3.4942810e+00   2.3874673e+00   2.6720778e+00   2.5495098e+00   2.8178006e+00   4.0718546e+00   3.3496268e+00   3.5425979e+00   3.9293765e+00   3.5284558e+00   2.9495762e+00   2.9000000e+00   3.1984371e+00   3.5707142e+00   2.9189039e+00   2.1679483e+00   3.0626786e+00   3.0248967e+00   3.0675723e+00   3.3181320e+00   1.9104973e+00   2.9883106e+00   5.2924474e+00   4.1436699e+00   5.3113087e+00   4.6583259e+00   5.0467812e+00   6.1081912e+00   3.4525353e+00   5.6329388e+00   4.9979996e+00   5.6973678e+00   4.3749286e+00   4.4821870e+00   4.8600412e+00   4.1060930e+00   4.3760713e+00   4.6411206e+00   4.6324939e+00   6.3071388e+00   6.4876806e+00   4.0286474e+00   5.1468437e+00   3.9686270e+00   6.2112801e+00   4.0706265e+00   4.9919936e+00   5.3329167e+00   3.9446166e+00   3.9874804e+00   4.8114447e+00   5.1029403e+00   5.5443665e+00   6.0917978e+00   4.8538644e+00   4.1194660e+00   4.4933284e+00   5.8180753e+00   4.9142650e+00   4.5978256e+00   3.8729833e+00   4.8176758e+00   5.0338852e+00   4.6690470e+00   4.1436699e+00   5.2744668e+00   5.1652686e+00   4.6669048e+00   4.2201896e+00   4.4575778e+00   4.6722586e+00   4.1060930e+00   8.3066239e-01   7.8740079e-01   7.1414284e-01   2.0000000e-01   9.2736185e-01   1.2369317e+00   4.2426407e-01   1.1045361e+00   3.0000000e-01   1.1575837e+00   6.7823300e-01   4.4497191e+00   3.9962482e+00   4.5727453e+00   3.1937439e+00   4.1267421e+00   3.6304270e+00   4.1496988e+00   2.2912878e+00   4.1170378e+00   2.9883106e+00   2.6153394e+00   3.5142567e+00   3.3361655e+00   3.9874804e+00   2.8195744e+00   4.0435133e+00   3.6565011e+00   3.2449961e+00   3.9761791e+00   3.0430248e+00   4.1352146e+00   3.3808283e+00   4.3023250e+00   3.9357337e+00   3.7709415e+00   3.9862263e+00   4.4147480e+00   4.6076024e+00   3.8078866e+00   2.7073973e+00   2.9376862e+00   2.8231188e+00   3.1320920e+00   4.3646306e+00   3.5958309e+00   3.8626416e+00   4.3069711e+00   3.8626416e+00   3.2388269e+00   3.1559468e+00   3.4612137e+00   3.9012818e+00   3.2264532e+00   2.3430749e+00   3.3391616e+00   3.3316662e+00   3.3645208e+00   3.6687873e+00   2.1071308e+00   3.2832910e+00   5.5749439e+00   4.4022721e+00   5.6621551e+00   4.9668904e+00   5.3535035e+00   6.4761099e+00   3.6041643e+00   5.9983331e+00   5.3244718e+00   6.0440053e+00   4.7042534e+00   4.7937459e+00   5.1971146e+00   4.3439613e+00   4.6130250e+00   4.9446941e+00   4.9608467e+00   6.6850580e+00   6.8425142e+00   4.3104524e+00   5.4827001e+00   4.2047592e+00   6.5825527e+00   4.3840620e+00   5.3244718e+00   5.7035077e+00   4.2532341e+00   4.2906876e+00   5.1127292e+00   5.4817880e+00   5.9135438e+00   6.4915329e+00   5.1507281e+00   4.4474712e+00   4.7937459e+00   6.1919302e+00   5.2057660e+00   4.9203658e+00   4.1677332e+00   5.1652686e+00   5.3507009e+00   5.0109879e+00   4.4022721e+00   5.6008928e+00   5.4799635e+00   4.9909919e+00   4.5210618e+00   4.7812132e+00   4.9618545e+00   4.3874822e+00   2.6457513e-01   1.2727922e+00   7.5498344e-01   4.3588989e-01   6.0000000e-01   5.1961524e-01   4.1231056e-01   5.4772256e-01   3.6055513e-01   1.7320508e-01   3.9153544e+00   3.5242020e+00   4.0718546e+00   2.9715316e+00   3.6905284e+00   3.3060551e+00   3.6945906e+00   2.2181073e+00   3.6496575e+00   2.7748874e+00   2.5709920e+00   3.1272992e+00   3.0232433e+00   3.5958309e+00   2.4738634e+00   3.5355339e+00   3.3316662e+00   2.8948230e+00   3.6523965e+00   2.7622455e+00   3.7589892e+00   2.9698485e+00   3.9370039e+00   3.5496479e+00   3.3151169e+00   3.5014283e+00   3.9471509e+00   4.1496988e+00   3.4278273e+00   2.3748684e+00   2.6944387e+00   2.5768197e+00   2.7820855e+00   4.0274061e+00   3.3075671e+00   3.4307434e+00   3.8183766e+00   3.5028560e+00   2.8948230e+00   2.9034462e+00   3.1953091e+00   3.4942810e+00   2.8948230e+00   2.2583180e+00   3.0397368e+00   2.9681644e+00   3.0182777e+00   3.2419130e+00   1.9672316e+00   2.9478806e+00   5.1951901e+00   4.1024383e+00   5.2086467e+00   4.5891176e+00   4.9608467e+00   6.0024995e+00   3.4785054e+00   5.5398556e+00   4.9416596e+00   5.5587768e+00   4.2661458e+00   4.4170126e+00   4.7602521e+00   4.0816663e+00   4.3185646e+00   4.5365185e+00   4.5475268e+00   6.1611687e+00   6.4007812e+00   4.0236799e+00   5.0328918e+00   3.9255573e+00   6.1155539e+00   4.0062451e+00   4.8805737e+00   5.2211110e+00   3.8755645e+00   3.9089641e+00   4.7402532e+00   5.0019996e+00   5.4497706e+00   5.9371710e+00   4.7812132e+00   4.0558600e+00   4.4598206e+00   5.7000000e+00   4.8052055e+00   4.5099889e+00   3.7973675e+00   4.7063787e+00   4.9295030e+00   4.5497253e+00   4.1024383e+00   5.1672043e+00   5.0497525e+00   4.5628938e+00   4.1701319e+00   4.3646306e+00   4.5639895e+00   4.0398020e+00   1.3000000e+00   6.7823300e-01   4.2426407e-01   6.8556546e-01   5.4772256e-01   4.4721360e-01   5.1961524e-01   4.2426407e-01   2.4494897e-01   4.1060930e+00   3.7054015e+00   4.2626283e+00   3.1591138e+00   3.8820098e+00   3.4957117e+00   3.8704005e+00   2.3895606e+00   3.8483763e+00   2.9410882e+00   2.7531800e+00   3.3030289e+00   3.2357379e+00   3.7868192e+00   2.6476405e+00   3.7242449e+00   3.5057096e+00   3.1000000e+00   3.8483763e+00   2.9597297e+00   3.9242834e+00   3.1606961e+00   4.1340053e+00   3.7509999e+00   3.5099858e+00   3.6918830e+00   4.1460825e+00   4.3347434e+00   3.6110940e+00   2.5748786e+00   2.8896367e+00   2.7766887e+00   2.9748950e+00   4.2154478e+00   3.4770677e+00   3.5972211e+00   4.0062451e+00   3.7067506e+00   3.0740852e+00   3.0886890e+00   3.3882149e+00   3.6823905e+00   3.0903074e+00   2.4351591e+00   3.2264532e+00   3.1559468e+00   3.2031235e+00   3.4351128e+00   2.1307276e+00   3.1336879e+00   5.3535035e+00   4.2755117e+00   5.3907328e+00   4.7738873e+00   5.1341991e+00   6.1919302e+00   3.6345564e+00   5.7358522e+00   5.1371198e+00   5.7210139e+00   4.4350874e+00   4.6000000e+00   4.9365980e+00   4.2508823e+00   4.4698993e+00   4.6957428e+00   4.7318073e+00   6.3364028e+00   6.5924199e+00   4.2213742e+00   5.2019227e+00   4.0865633e+00   6.3111013e+00   4.1880783e+00   5.0527220e+00   5.4101756e+00   4.0533936e+00   4.0828911e+00   4.9173163e+00   5.1990384e+00   5.6435804e+00   6.1155539e+00   4.9547957e+00   4.2497059e+00   4.6604721e+00   5.8804762e+00   4.9598387e+00   4.6914816e+00   3.9686270e+00   4.8805737e+00   5.0941143e+00   4.7127487e+00   4.2755117e+00   5.3376025e+00   5.2086467e+00   4.7275787e+00   4.3520110e+00   4.5387223e+00   4.7180504e+00   4.2130749e+00   9.1104336e-01   1.3674794e+00   1.7262677e+00   7.6811457e-01   1.6462078e+00   9.1651514e-01   1.6278821e+00   1.1269428e+00   4.4530888e+00   4.0124805e+00   4.5607017e+00   3.0479501e+00   4.0718546e+00   3.5958309e+00   4.1821047e+00   2.1587033e+00   4.0816663e+00   2.9359837e+00   2.3811762e+00   3.5071356e+00   3.1685959e+00   3.9610605e+00   2.8035692e+00   4.0373258e+00   3.6578682e+00   3.1906112e+00   3.8183766e+00   2.9410882e+00   4.1557190e+00   3.3316662e+00   4.2047592e+00   3.8961519e+00   3.7376463e+00   3.9648455e+00   4.3588989e+00   4.5803930e+00   3.7802116e+00   2.6191602e+00   2.8106939e+00   2.6944387e+00   3.0692019e+00   4.3058100e+00   3.6027767e+00   3.9230090e+00   4.2988371e+00   3.7215588e+00   3.2465366e+00   3.0545049e+00   3.3926391e+00   3.8923001e+00   3.1432467e+00   2.1771541e+00   3.2832910e+00   3.3391616e+00   3.3481338e+00   3.6400549e+00   1.9824228e+00   3.2449961e+00   5.5830099e+00   4.3416587e+00   5.6258333e+00   4.9335586e+00   5.3244718e+00   6.4366140e+00   3.5213634e+00   5.9539903e+00   5.2325902e+00   6.0712437e+00   4.7053161e+00   4.7254629e+00   5.1633323e+00   4.2497059e+00   4.5596052e+00   4.9416596e+00   4.9376108e+00   6.7275553e+00   6.7594378e+00   4.1701319e+00   5.4708317e+00   4.1605288e+00   6.5222695e+00   4.3139309e+00   5.3329167e+00   5.6956123e+00   4.2000000e+00   4.2731721e+00   5.0586559e+00   5.4516053e+00   5.8532043e+00   6.5352888e+00   5.0950957e+00   4.4011362e+00   4.7275787e+00   6.1457302e+00   5.2297227e+00   4.9132474e+00   4.1521079e+00   5.1429563e+00   5.3272882e+00   4.9839743e+00   4.3416587e+00   5.5910643e+00   5.4808758e+00   4.9537864e+00   4.4192760e+00   4.7528939e+00   4.9909919e+00   4.3749286e+00   8.3666003e-01   1.1180340e+00   4.6904158e-01   9.6953597e-01   2.2360680e-01   1.0488088e+00   6.1644140e-01   4.4452222e+00   3.9912404e+00   4.5727453e+00   3.2434549e+00   4.1412558e+00   3.6469165e+00   4.1400483e+00   2.3515952e+00   4.1267421e+00   3.0149627e+00   2.6981475e+00   3.5199432e+00   3.3896903e+00   3.9974992e+00   2.8337255e+00   4.0435133e+00   3.6619667e+00   3.2695565e+00   4.0211939e+00   3.0822070e+00   4.1303753e+00   3.3985291e+00   4.3301270e+00   3.9509493e+00   3.7815341e+00   3.9912404e+00   4.4283180e+00   4.6119410e+00   3.8183766e+00   2.7440845e+00   2.9849623e+00   2.8722813e+00   3.1575307e+00   4.3829214e+00   3.6013886e+00   3.8470768e+00   4.3069711e+00   3.9038443e+00   3.2449961e+00   3.1937439e+00   3.4899857e+00   3.9064050e+00   3.2572995e+00   2.4103942e+00   3.3630343e+00   3.3376639e+00   3.3763886e+00   3.6796739e+00   2.1633308e+00   3.3015148e+00   5.5677644e+00   4.4204072e+00   5.6656862e+00   4.9749372e+00   5.3572381e+00   6.4791975e+00   3.6373067e+00   6.0049979e+00   5.3469618e+00   6.0274373e+00   4.7000000e+00   4.8104054e+00   5.2009614e+00   4.3714986e+00   4.6260134e+00   4.9406477e+00   4.9648766e+00   6.6640828e+00   6.8571131e+00   4.3520110e+00   5.4790510e+00   4.2190046e+00   6.5916614e+00   4.4022721e+00   5.3169540e+00   5.7000000e+00   4.2673177e+00   4.2953463e+00   5.1244512e+00   5.4854353e+00   5.9236813e+00   6.4699304e+00   5.1623638e+00   4.4609416e+00   4.8145612e+00   6.1951594e+00   5.1942276e+00   4.9203658e+00   4.1725292e+00   5.1652686e+00   5.3507009e+00   5.0109879e+00   4.4204072e+00   5.5973208e+00   5.4726593e+00   4.9949975e+00   4.5475268e+00   4.7853944e+00   4.9497475e+00   4.3920383e+00   4.7958315e-01   6.4807407e-01   5.0990195e-01   6.7082039e-01   5.4772256e-01   4.8989795e-01   3.7868192e+00   3.3570821e+00   3.9331921e+00   2.8178006e+00   3.5425979e+00   3.1432467e+00   3.5128336e+00   2.0663978e+00   3.5227830e+00   2.5709920e+00   2.4535688e+00   2.9376862e+00   2.9342802e+00   3.4380227e+00   2.2825424e+00   3.3955854e+00   3.1352831e+00   2.7730849e+00   3.5142567e+00   2.6267851e+00   3.5468296e+00   2.8195744e+00   3.7934153e+00   3.4161382e+00   3.1780497e+00   3.3600595e+00   3.8223030e+00   3.9887341e+00   3.2526912e+00   2.2516660e+00   2.5592968e+00   2.4556058e+00   2.6324893e+00   3.8587563e+00   3.1032241e+00   3.2280025e+00   3.6701499e+00   3.3852622e+00   2.7110883e+00   2.7386128e+00   3.0430248e+00   3.3316662e+00   2.7513633e+00   2.1189620e+00   2.8722813e+00   2.8035692e+00   2.8460499e+00   3.0951575e+00   1.7944358e+00   2.7784888e+00   4.9699095e+00   3.9012818e+00   5.0398413e+00   4.4147480e+00   4.7644517e+00   5.8532043e+00   3.2603681e+00   5.4018515e+00   4.7927028e+00   5.3581713e+00   4.0681691e+00   4.2402830e+00   4.5771170e+00   3.8742741e+00   4.0767634e+00   4.3162484e+00   4.3760713e+00   5.9958319e+00   6.2513998e+00   3.8807216e+00   4.8373546e+00   3.7013511e+00   5.9791304e+00   3.8288379e+00   4.6893496e+00   5.0724747e+00   3.6891733e+00   3.7134889e+00   4.5497253e+00   4.8713448e+00   5.3094256e+00   5.7879185e+00   4.5836667e+00   3.9038443e+00   4.3197222e+00   5.5389530e+00   4.5760245e+00   4.3324358e+00   3.5958309e+00   4.5232732e+00   4.7212287e+00   4.3485630e+00   3.9012818e+00   4.9709154e+00   4.8321838e+00   4.3577517e+00   3.9924930e+00   4.1737274e+00   4.3335897e+00   3.8405729e+00   9.9498744e-01   3.6055513e-01   9.4868330e-01   5.0000000e-01   7.4161985e-01   3.5791060e+00   3.1654384e+00   3.7336309e+00   2.7622455e+00   3.3852622e+00   2.9883106e+00   3.3120990e+00   2.0784610e+00   3.3406586e+00   2.4939928e+00   2.4839485e+00   2.7892651e+00   2.8530685e+00   3.2634338e+00   2.1817424e+00   3.2093613e+00   2.9765752e+00   2.6267851e+00   3.4263683e+00   2.5357445e+00   3.3719431e+00   2.6870058e+00   3.6523965e+00   3.2372828e+00   3.0116441e+00   3.1843367e+00   3.6469165e+00   3.8078866e+00   3.0967725e+00   2.1725561e+00   2.4939928e+00   2.3916521e+00   2.5179357e+00   3.7013511e+00   2.9495762e+00   3.0282008e+00   3.4785054e+00   3.2787193e+00   2.5573424e+00   2.6589472e+00   2.9137605e+00   3.1511903e+00   2.6419690e+00   2.1400935e+00   2.7495454e+00   2.6324893e+00   2.6962938e+00   2.9308702e+00   1.8411953e+00   2.6476405e+00   4.7864392e+00   3.7669616e+00   4.8507731e+00   4.2308392e+00   4.5880279e+00   5.6453521e+00   3.1906112e+00   5.1932649e+00   4.6281746e+00   5.1478151e+00   3.8884444e+00   4.0877867e+00   4.4022721e+00   3.7709415e+00   3.9661064e+00   4.1496988e+00   4.1856899e+00   5.7480431e+00   6.0671245e+00   3.7669616e+00   4.6529560e+00   3.5791060e+00   5.7758116e+00   3.6891733e+00   4.4877611e+00   4.8518038e+00   3.5468296e+00   3.5496479e+00   4.3897608e+00   4.6583259e+00   5.1166395e+00   5.5362442e+00   4.4294469e+00   3.7269290e+00   4.1388404e+00   5.3525695e+00   4.3920383e+00   4.1352146e+00   3.4380227e+00   4.3439613e+00   4.5541190e+00   4.1928511e+00   3.7669616e+00   4.7812132e+00   4.6540305e+00   4.2071368e+00   3.8716921e+00   4.0062451e+00   4.1509035e+00   3.6715120e+00   8.8317609e-01   3.0000000e-01   8.7177979e-01   3.7416574e-01   4.1206796e+00   3.6945906e+00   4.2555846e+00   2.9563491e+00   3.8223030e+00   3.3852622e+00   3.8626416e+00   2.1142375e+00   3.8065733e+00   2.7766887e+00   2.4372115e+00   3.2388269e+00   3.0545049e+00   3.7148351e+00   2.5475478e+00   3.7188708e+00   3.4190642e+00   2.9782545e+00   3.6945906e+00   2.7892651e+00   3.8807216e+00   3.0805844e+00   4.0236799e+00   3.6646964e+00   3.4612137e+00   3.6674242e+00   4.1000000e+00   4.3046487e+00   3.5355339e+00   2.4228083e+00   2.6925824e+00   2.5748786e+00   2.8548205e+00   4.1121770e+00   3.3778692e+00   3.5916570e+00   3.9937451e+00   3.5693137e+00   2.9883106e+00   2.9154759e+00   3.2341923e+00   3.6249138e+00   2.9546573e+00   2.1517435e+00   3.0935417e+00   3.0757113e+00   3.1080541e+00   3.3734256e+00   1.8814888e+00   3.0232433e+00   5.3235327e+00   4.1641326e+00   5.3646994e+00   4.7063787e+00   5.0852729e+00   6.1741396e+00   3.4394767e+00   5.7026310e+00   5.0467812e+00   5.7489129e+00   4.4170126e+00   4.5188494e+00   4.9040799e+00   4.1121770e+00   4.3749286e+00   4.6701178e+00   4.6850827e+00   6.3835727e+00   6.5436993e+00   4.0595566e+00   5.1903757e+00   3.9774364e+00   6.2793312e+00   4.1036569e+00   5.0428167e+00   5.4046276e+00   3.9761791e+00   4.0236799e+00   4.8456166e+00   5.1778374e+00   5.6080300e+00   6.1757591e+00   4.8836462e+00   4.1737274e+00   4.5497253e+00   5.8736701e+00   4.9457052e+00   4.6508064e+00   3.9051248e+00   4.8641546e+00   5.0665570e+00   4.7021272e+00   4.1641326e+00   5.3188345e+00   5.1990384e+00   4.6957428e+00   4.2449971e+00   4.4966654e+00   4.7031904e+00   4.1412558e+00   8.0622577e-01   2.4494897e-01   5.4772256e-01   3.8755645e+00   3.4856850e+00   4.0385641e+00   3.0626786e+00   3.6945906e+00   3.3136083e+00   3.6414283e+00   2.3515952e+00   3.6428011e+00   2.8195744e+00   2.7386128e+00   3.1192948e+00   3.1256999e+00   3.5860842e+00   2.5039968e+00   3.5114100e+00   3.3151169e+00   2.9308702e+00   3.7242449e+00   2.8354894e+00   3.7148351e+00   2.9949958e+00   3.9635842e+00   3.5510562e+00   3.3166248e+00   3.4885527e+00   3.9458839e+00   4.1243181e+00   3.4234486e+00   2.4596748e+00   2.7874720e+00   2.6776856e+00   2.8266588e+00   4.0286474e+00   3.2908965e+00   3.3674916e+00   3.7881394e+00   3.5693137e+00   2.8896367e+00   2.9698485e+00   3.2310989e+00   3.4756294e+00   2.9478806e+00   2.4062419e+00   3.0708305e+00   2.9597297e+00   3.0232433e+00   3.2434549e+00   2.1118712e+00   2.9698485e+00   5.1322510e+00   4.1036569e+00   5.1710734e+00   4.5617979e+00   4.9234135e+00   5.9581876e+00   3.5199432e+00   5.5045436e+00   4.9446941e+00   5.4763126e+00   4.2201896e+00   4.4136153e+00   4.7275787e+00   4.1048752e+00   4.3104524e+00   4.4888751e+00   4.5133136e+00   6.0638272e+00   6.3796552e+00   4.0767634e+00   4.9819675e+00   3.9217343e+00   6.0835845e+00   4.0124805e+00   4.8197510e+00   5.1662365e+00   3.8742741e+00   3.8845849e+00   4.7222876e+00   4.9648766e+00   5.4249424e+00   5.8412327e+00   4.7634021e+00   4.0472213e+00   4.4586994e+00   5.6621551e+00   4.7370877e+00   4.4665423e+00   3.7749172e+00   4.6669048e+00   4.8877398e+00   4.5155288e+00   4.1036569e+00   5.1137071e+00   4.9909919e+00   4.5354162e+00   4.1928511e+00   4.3358967e+00   4.4966654e+00   4.0112342e+00   8.6602540e-01   4.1231056e-01   4.2532341e+00   3.8131352e+00   4.3863424e+00   3.0967725e+00   3.9623226e+00   3.4914181e+00   3.9686270e+00   2.2315914e+00   3.9420807e+00   2.8809721e+00   2.5787594e+00   3.3555923e+00   3.2186954e+00   3.8301436e+00   2.6720778e+00   3.8548671e+00   3.5128336e+00   3.1016125e+00   3.8548671e+00   2.9240383e+00   3.9761791e+00   3.2218007e+00   4.1617304e+00   3.7815341e+00   3.5986108e+00   3.8052595e+00   4.2426407e+00   4.4339599e+00   3.6537652e+00   2.5729361e+00   2.8319605e+00   2.7166155e+00   2.9899833e+00   4.2261093e+00   3.4612137e+00   3.6837481e+00   4.1231056e+00   3.7296112e+00   3.0886890e+00   3.0446675e+00   3.3421550e+00   3.7376463e+00   3.0919250e+00   2.2847319e+00   3.2093613e+00   3.1764760e+00   3.2171416e+00   3.5028560e+00   2.0273135e+00   3.1416556e+00   5.4175640e+00   4.2743421e+00   5.4909016e+00   4.8145612e+00   5.1971146e+00   6.3000000e+00   3.5270384e+00   5.8266629e+00   5.1788030e+00   5.8566202e+00   4.5321077e+00   4.6465041e+00   5.0299105e+00   4.2308392e+00   4.4866469e+00   4.7812132e+00   4.7979162e+00   6.4853681e+00   6.6805688e+00   4.1964271e+00   5.3094256e+00   4.0804412e+00   6.4109282e+00   4.2367440e+00   5.1497573e+00   5.5208695e+00   4.1036569e+00   4.1352146e+00   4.9648766e+00   5.3028294e+00   5.7428216e+00   6.2841069e+00   5.0039984e+00   4.2930176e+00   4.6572524e+00   6.0124870e+00   5.0408333e+00   4.7560488e+00   4.0149720e+00   4.9909919e+00   5.1865210e+00   4.8373546e+00   4.2743421e+00   5.4313902e+00   5.3103672e+00   4.8270074e+00   4.3852024e+00   4.6184413e+00   4.7968740e+00   4.2402830e+00   5.0990195e-01   3.8496753e+00   3.4856850e+00   4.0211939e+00   3.0757113e+00   3.6810325e+00   3.3436507e+00   3.6551334e+00   2.3937418e+00   3.6262929e+00   2.8653098e+00   2.7604347e+00   3.1352831e+00   3.1032241e+00   3.6000000e+00   2.5199206e+00   3.4885527e+00   3.3570821e+00   2.9410882e+00   3.7080992e+00   2.8460499e+00   3.7496667e+00   2.9849623e+00   3.9610605e+00   3.5623026e+00   3.3015148e+00   3.4684290e+00   3.9230090e+00   4.1170378e+00   3.4380227e+00   2.4515301e+00   2.7982137e+00   2.6851443e+00   2.8301943e+00   4.0509258e+00   3.3451457e+00   3.3970576e+00   3.7749172e+00   3.5468296e+00   2.9240383e+00   2.9899833e+00   3.2649655e+00   3.4899857e+00   2.9512709e+00   2.4351591e+00   3.0967725e+00   2.9899833e+00   3.0495901e+00   3.2403703e+00   2.1307276e+00   2.9899833e+00   5.1672043e+00   4.1352146e+00   5.1672043e+00   4.5836667e+00   4.9416596e+00   5.9497899e+00   3.5846897e+00   5.4990908e+00   4.9446941e+00   5.4836119e+00   4.2296572e+00   4.4204072e+00   4.7275787e+00   4.1340053e+00   4.3428102e+00   4.5066617e+00   4.5265881e+00   6.0671245e+00   6.3671030e+00   4.0841156e+00   4.9859803e+00   3.9623226e+00   6.0704201e+00   4.0149720e+00   4.8342528e+00   5.1643005e+00   3.8820098e+00   3.9051248e+00   4.7370877e+00   4.9547957e+00   5.4101756e+00   5.8326666e+00   4.7780749e+00   4.0570926e+00   4.4833024e+00   5.6409219e+00   4.7686476e+00   4.4866469e+00   3.7986840e+00   4.6626173e+00   4.8959167e+00   4.5044423e+00   4.1352146e+00   5.1254268e+00   5.0049975e+00   4.5332108e+00   4.1928511e+00   4.3428102e+00   4.5299007e+00   4.0459857e+00   4.0422766e+00   3.6428011e+00   4.1940434e+00   3.0364453e+00   3.7986840e+00   3.4000000e+00   3.8131352e+00   2.2516660e+00   3.7643060e+00   2.8442925e+00   2.5961510e+00   3.2295511e+00   3.1000000e+00   3.7013511e+00   2.5632011e+00   3.6565011e+00   3.4278273e+00   2.9883106e+00   3.7349699e+00   2.8390139e+00   3.8652296e+00   3.0708305e+00   4.0336088e+00   3.6537652e+00   3.4263683e+00   3.6180105e+00   4.0607881e+00   4.2649736e+00   3.5298725e+00   2.4556058e+00   2.7622455e+00   2.6438608e+00   2.8722813e+00   4.1243181e+00   3.3985291e+00   3.5468296e+00   3.9382737e+00   3.5916570e+00   2.9916551e+00   2.9765752e+00   3.2771939e+00   3.6027767e+00   2.9816103e+00   2.2912878e+00   3.1256999e+00   3.0692019e+00   3.1144823e+00   3.3496268e+00   2.0049938e+00   3.0397368e+00   5.3047149e+00   4.1928511e+00   5.3254108e+00   4.6957428e+00   5.0695167e+00   6.1237244e+00   3.5369478e+00   5.6586217e+00   5.0447993e+00   5.6841886e+00   4.3806392e+00   4.5188494e+00   4.8733972e+00   4.1629317e+00   4.4068129e+00   4.6465041e+00   4.6593991e+00   6.2952363e+00   6.5145990e+00   4.1060930e+00   5.1497573e+00   4.0124805e+00   6.2345810e+00   4.1060930e+00   4.9989999e+00   5.3450912e+00   3.9761791e+00   4.0137264e+00   4.8435524e+00   5.1234754e+00   5.5668663e+00   6.0745370e+00   4.8836462e+00   4.1617304e+00   4.5585085e+00   5.8206529e+00   4.9173163e+00   4.6227697e+00   3.9000000e+00   4.8228622e+00   5.0408333e+00   4.6636895e+00   4.1928511e+00   5.2829916e+00   5.1643005e+00   4.6722586e+00   4.2638011e+00   4.4743715e+00   4.6754679e+00   4.1412558e+00   6.4031242e-01   2.6457513e-01   1.8867962e+00   6.5574385e-01   1.3784049e+00   7.3484692e-01   2.6776856e+00   5.1961524e-01   2.0322401e+00   2.6532998e+00   1.2288206e+00   1.6278821e+00   9.4868330e-01   1.8083141e+00   4.3588989e-01   1.4317821e+00   1.4866069e+00   1.3000000e+00   1.7832555e+00   1.1747340e+00   1.2124356e+00   1.0148892e+00   1.0049876e+00   7.8740079e-01   5.3851648e-01   4.5825757e-01   5.5677644e-01   1.0677078e+00   1.9104973e+00   1.9467922e+00   2.0124612e+00   1.5394804e+00   1.2041595e+00   1.6278821e+00   1.0583005e+00   3.3166248e-01   1.1832160e+00   1.5394804e+00   1.8000000e+00   1.6552945e+00   9.2736185e-01   1.5264338e+00   2.6324893e+00   1.5716234e+00   1.4212670e+00   1.4282857e+00   9.4868330e-01   2.6608269e+00   1.4899664e+00   1.8439089e+00   1.4491377e+00   1.4071247e+00   1.2449900e+00   1.4628739e+00   2.1213203e+00   2.2427661e+00   1.7029386e+00   1.3964240e+00   1.8357560e+00   8.7749644e-01   1.1045361e+00   1.1000000e+00   1.6217275e+00   1.6613248e+00   1.2369317e+00   1.0440307e+00   2.3430749e+00   2.5495098e+00   1.4491377e+00   1.3490738e+00   1.5874508e+00   2.2383029e+00   9.6953597e-01   1.2609520e+00   1.3747727e+00   9.8488578e-01   1.0246951e+00   1.3490738e+00   1.1532563e+00   1.5905974e+00   2.1023796e+00   1.4035669e+00   9.0553851e-01   1.4071247e+00   1.8165902e+00   1.5297059e+00   1.0816654e+00   1.1000000e+00   1.0000000e+00   1.3820275e+00   9.9498744e-01   1.4491377e+00   1.5132746e+00   1.5198684e+00   1.0908712e+00   1.1489125e+00   9.4868330e-01   1.4071247e+00   1.2529964e+00   6.4807407e-01   1.3820275e+00   4.2426407e-01   8.3066239e-01   2.6457513e-01   2.1400935e+00   4.2426407e-01   1.4352700e+00   2.1563859e+00   6.1644140e-01   1.2884099e+00   4.7958315e-01   1.2569805e+00   3.4641016e-01   8.2462113e-01   1.0099505e+00   1.0198039e+00   1.2845233e+00   6.5574385e-01   7.3484692e-01   8.1240384e-01   6.1644140e-01   4.1231056e-01   3.1622777e-01   6.4807407e-01   6.4807407e-01   5.0000000e-01   1.4491377e+00   1.4491377e+00   1.5297059e+00   1.0295630e+00   8.8317609e-01   1.0198039e+00   4.5825757e-01   3.7416574e-01   9.3273791e-01   9.3808315e-01   1.2609520e+00   1.1269428e+00   3.8729833e-01   1.0295630e+00   2.1118712e+00   1.0099505e+00   8.4261498e-01   8.4261498e-01   4.5825757e-01   2.1424285e+00   9.2195445e-01   1.8083141e+00   1.0630146e+00   1.6881943e+00   1.1832160e+00   1.4933185e+00   2.5000000e+00   1.6673332e+00   2.0566964e+00   1.5362291e+00   2.0880613e+00   7.8740079e-01   1.0246951e+00   1.2489996e+00   1.2165525e+00   1.3000000e+00   1.1313708e+00   1.0677078e+00   2.7166155e+00   2.9068884e+00   1.1874342e+00   1.5264338e+00   1.1000000e+00   2.6343880e+00   7.1414284e-01   1.3784049e+00   1.7262677e+00   6.1644140e-01   6.1644140e-01   1.3152946e+00   1.5427249e+00   1.9697716e+00   2.5436195e+00   1.3638182e+00   7.2801099e-01   1.2922848e+00   2.2203603e+00   1.4387495e+00   1.0488088e+00   6.1644140e-01   1.1958261e+00   1.4560220e+00   1.1224972e+00   1.0630146e+00   1.6613248e+00   1.5937377e+00   1.1224972e+00   9.5393920e-01   8.8881944e-01   1.2369317e+00   8.6023253e-01   1.8574176e+00   5.8309519e-01   1.3152946e+00   6.7082039e-01   2.7018512e+00   5.0990195e-01   2.0149442e+00   2.6514147e+00   1.2247449e+00   1.6370706e+00   8.5440037e-01   1.8601075e+00   5.4772256e-01   1.3638182e+00   1.5033296e+00   1.2083046e+00   1.7916473e+00   1.0535654e+00   1.2569805e+00   8.4852814e-01   9.2736185e-01   8.3066239e-01   6.0000000e-01   3.4641016e-01   3.1622777e-01   1.0049876e+00   1.9748418e+00   1.9544820e+00   2.0346990e+00   1.5684387e+00   1.0099505e+00   1.5556349e+00   1.0344080e+00   2.8284271e-01   1.1357817e+00   1.5427249e+00   1.7804494e+00   1.5968719e+00   8.6602540e-01   1.5362291e+00   2.6570661e+00   1.5427249e+00   1.4247807e+00   1.4177447e+00   9.6436508e-01   2.7147744e+00   1.4866069e+00   1.6155494e+00   1.2529964e+00   1.1874342e+00   9.8994949e-01   1.2124356e+00   1.9364917e+00   2.1354157e+00   1.5000000e+00   1.1401754e+00   1.6673332e+00   6.7823300e-01   8.5440037e-01   8.6023253e-01   1.4352700e+00   1.4662878e+00   1.0295630e+00   7.8740079e-01   2.2045408e+00   2.3515952e+00   1.2767145e+00   1.1357817e+00   1.4247807e+00   2.0542639e+00   7.8102497e-01   1.0392305e+00   1.1832160e+00   8.2462113e-01   8.6023253e-01   1.0908712e+00   9.5916630e-01   1.3928388e+00   1.9974984e+00   1.1489125e+00   7.0000000e-01   1.1789826e+00   1.6522712e+00   1.3228757e+00   8.3666003e-01   9.5916630e-01   7.8102497e-01   1.1575837e+00   8.2462113e-01   1.2529964e+00   1.2884099e+00   1.3114877e+00   8.8317609e-01   9.4339811e-01   7.1414284e-01   1.2124356e+00   1.0677078e+00   1.2845233e+00   7.3484692e-01   1.4899664e+00   9.7467943e-01   1.3892444e+00   5.1961524e-01   8.2462113e-01   8.5440037e-01   5.9160798e-01   1.1045361e+00   7.2801099e-01   1.5000000e+00   8.8881944e-01   5.9160798e-01   8.8881944e-01   3.1622777e-01   1.3638182e+00   7.8102497e-01   1.2369317e+00   1.0535654e+00   1.1224972e+00   1.3674794e+00   1.6093477e+00   1.7578396e+00   9.4868330e-01   6.8556546e-01   3.0000000e-01   4.3588989e-01   5.1961524e-01   1.3076697e+00   8.8881944e-01   1.3416408e+00   1.6155494e+00   8.9442719e-01   7.1414284e-01   2.0000000e-01   5.0990195e-01   1.1045361e+00   4.3588989e-01   9.1104336e-01   4.5825757e-01   7.6157731e-01   6.6332496e-01   9.6953597e-01   1.1135529e+00   5.4772256e-01   2.6608269e+00   1.3490738e+00   2.7018512e+00   1.9519221e+00   2.3537205e+00   3.5071356e+00   9.0000000e-01   3.0232433e+00   2.2293497e+00   3.2295511e+00   1.8734994e+00   1.7378147e+00   2.2516660e+00   1.2529964e+00   1.6613248e+00   2.0760539e+00   1.9974984e+00   3.8974351e+00   3.7868192e+00   1.1401754e+00   2.5806976e+00   1.2489996e+00   3.5874782e+00   1.3638182e+00   2.4433583e+00   2.8195744e+00   1.2767145e+00   1.3820275e+00   2.0639767e+00   2.5903668e+00   2.9376862e+00   3.7762415e+00   2.1047565e+00   1.4628739e+00   1.7378147e+00   3.2771939e+00   2.3706539e+00   1.9874607e+00   1.2767145e+00   2.2803509e+00   2.4186773e+00   2.1931712e+00   1.3490738e+00   2.6664583e+00   2.6019224e+00   2.0904545e+00   1.4282857e+00   1.8493242e+00   2.1587033e+00   1.4525839e+00   8.3066239e-01   5.5677644e-01   2.1587033e+00   2.4494897e-01   1.4832397e+00   2.0856654e+00   7.4833148e-01   1.1045361e+00   4.3588989e-01   1.3638182e+00   4.2426407e-01   9.2736185e-01   1.0000000e+00   6.7823300e-01   1.2449900e+00   8.0622577e-01   7.4833148e-01   4.6904158e-01   5.0990195e-01   3.8729833e-01   3.1622777e-01   3.7416574e-01   5.2915026e-01   5.1961524e-01   1.4628739e+00   1.4000000e+00   1.4899664e+00   1.0392305e+00   7.2111026e-01   1.1224972e+00   7.9372539e-01   3.7416574e-01   6.0827625e-01   1.0677078e+00   1.2206556e+00   1.0816654e+00   4.5825757e-01   9.8994949e-01   2.1071308e+00   1.0099505e+00   9.6436508e-01   9.2195445e-01   4.7958315e-01   2.1840330e+00   9.6436508e-01   1.8027756e+00   9.5393920e-01   1.5652476e+00   1.0677078e+00   1.4035669e+00   2.3685439e+00   1.6431677e+00   1.9052559e+00   1.2884099e+00   2.0928450e+00   8.1240384e-01   8.1853528e-01   1.1401754e+00   1.0677078e+00   1.2449900e+00   1.1401754e+00   9.6953597e-01   2.7092434e+00   2.7221315e+00   8.7749644e-01   1.4730920e+00   1.0723805e+00   2.4698178e+00   4.7958315e-01   1.3638182e+00   1.6431677e+00   4.6904158e-01   6.1644140e-01   1.1704700e+00   1.4071247e+00   1.7944358e+00   2.5396850e+00   1.2247449e+00   5.3851648e-01   1.1000000e+00   2.0904545e+00   1.4866069e+00   1.0000000e+00   6.4807407e-01   1.1180340e+00   1.3928388e+00   1.0677078e+00   9.5393920e-01   1.6062378e+00   1.5811388e+00   1.0392305e+00   6.7082039e-01   8.0622577e-01   1.3152946e+00   8.6023253e-01   8.6023253e-01   1.5264338e+00   9.1104336e-01   7.9372539e-01   1.4899664e+00   4.5825757e-01   8.8881944e-01   4.6904158e-01   9.1104336e-01   1.0535654e+00   3.0000000e-01   5.1961524e-01   8.0622577e-01   7.0710678e-01   7.3484692e-01   6.4031242e-01   8.0622577e-01   4.5825757e-01   7.3484692e-01   9.3273791e-01   1.1445523e+00   1.2041595e+00   3.7416574e-01   1.0630146e+00   8.5440037e-01   9.6436508e-01   6.2449980e-01   7.4161985e-01   4.1231056e-01   7.3484692e-01   1.0816654e+00   7.8740079e-01   4.5825757e-01   6.1644140e-01   3.1622777e-01   4.6904158e-01   5.5677644e-01   1.5066519e+00   3.3166248e-01   3.7416574e-01   3.1622777e-01   5.4772256e-01   1.6552945e+00   4.0000000e-01   2.0736441e+00   8.6023253e-01   2.1447611e+00   1.3527749e+00   1.7832555e+00   2.9495762e+00   9.4339811e-01   2.4617067e+00   1.7406895e+00   2.6248809e+00   1.2845233e+00   1.2247449e+00   1.7000000e+00   9.1104336e-01   1.2569805e+00   1.5132746e+00   1.3892444e+00   3.2634338e+00   3.2863353e+00   8.6023253e-01   2.0099751e+00   8.1240384e-01   3.0545049e+00   8.8317609e-01   1.8248288e+00   2.2158520e+00   7.6811457e-01   7.8102497e-01   1.5297059e+00   2.0174241e+00   2.4103942e+00   3.1527766e+00   1.5842980e+00   8.7177979e-01   1.1916375e+00   2.7568098e+00   1.7720045e+00   1.3527749e+00   6.8556546e-01   1.7262677e+00   1.8734994e+00   1.7000000e+00   8.6023253e-01   2.0808652e+00   2.0322401e+00   1.5905974e+00   1.0295630e+00   1.2884099e+00   1.5556349e+00   8.3066239e-01   2.2561028e+00   5.9160798e-01   1.5000000e+00   2.2759613e+00   7.1414284e-01   1.4662878e+00   4.8989795e-01   1.3964240e+00   5.7445626e-01   7.9372539e-01   1.1532563e+00   1.1269428e+00   1.4212670e+00   4.6904158e-01   9.3273791e-01   8.3066239e-01   6.7082039e-01   6.4807407e-01   5.5677644e-01   7.4161985e-01   5.9160798e-01   5.4772256e-01   1.6278821e+00   1.5842980e+00   1.6763055e+00   1.1874342e+00   7.8102497e-01   9.7467943e-01   3.7416574e-01   4.5825757e-01   1.0862780e+00   1.0148892e+00   1.3638182e+00   1.1747340e+00   4.2426407e-01   1.1789826e+00   2.2383029e+00   1.0908712e+00   9.2736185e-01   9.2736185e-01   6.4807407e-01   2.2847319e+00   1.0295630e+00   1.5811388e+00   9.2736185e-01   1.5556349e+00   1.0049876e+00   1.3038405e+00   2.3748684e+00   1.6278821e+00   1.9390719e+00   1.4317821e+00   1.9157244e+00   6.0827625e-01   9.0553851e-01   1.1090537e+00   1.1180340e+00   1.1401754e+00   9.3273791e-01   9.0000000e-01   2.5632011e+00   2.7892651e+00   1.1832160e+00   1.3638182e+00   9.6953597e-01   2.5238859e+00   6.6332496e-01   1.1874342e+00   1.5968719e+00   5.5677644e-01   4.5825757e-01   1.1489125e+00   1.4525839e+00   1.8734994e+00   2.4207437e+00   1.1958261e+00   6.4807407e-01   1.1747340e+00   2.1213203e+00   1.2083046e+00   8.5440037e-01   4.7958315e-01   1.0677078e+00   1.2845233e+00   1.0246951e+00   9.2736185e-01   1.4798649e+00   1.4035669e+00   9.9498744e-01   9.0553851e-01   7.3484692e-01   1.0000000e+00   6.7082039e-01   2.2181073e+00   8.3666003e-01   4.5825757e-01   1.5556349e+00   1.3190906e+00   1.9519221e+00   9.5916630e-01   2.2583180e+00   1.5937377e+00   1.2409674e+00   1.8493242e+00   9.3273791e-01   2.1283797e+00   1.4764823e+00   2.1863211e+00   1.8973666e+00   1.8947295e+00   2.1494185e+00   2.4859606e+00   2.6419690e+00   1.7748239e+00   8.4852814e-01   7.8740079e-01   7.2111026e-01   1.1401754e+00   2.2135944e+00   1.5165751e+00   2.0024984e+00   2.4372115e+00   1.8083141e+00   1.2569805e+00   9.7467943e-01   1.2845233e+00   1.9104973e+00   1.1747340e+00   1.4142136e-01   1.2165525e+00   1.3601471e+00   1.3379088e+00   1.7406895e+00   3.8729833e-01   1.2369317e+00   3.5085610e+00   2.2248595e+00   3.6290495e+00   2.8530685e+00   3.2572995e+00   4.4440972e+00   1.3928388e+00   3.9560081e+00   3.1843367e+00   4.1012193e+00   2.7276363e+00   2.6739484e+00   3.1654384e+00   2.1307276e+00   2.4839485e+00   2.9291637e+00   2.8982753e+00   4.7749346e+00   4.7465777e+00   2.0952327e+00   3.4770677e+00   2.0518285e+00   4.5343136e+00   2.2912878e+00   3.3196385e+00   3.7229021e+00   2.1771541e+00   2.2360680e+00   2.9849623e+00   3.5014283e+00   3.8807216e+00   4.6443514e+00   3.0232433e+00   2.3685439e+00   2.6324893e+00   4.2107007e+00   3.1953091e+00   2.8670542e+00   2.1118712e+00   3.1796226e+00   3.3136083e+00   3.0692019e+00   2.2248595e+00   3.5637059e+00   3.4727511e+00   2.9832868e+00   2.3811762e+00   2.7440845e+00   2.9647934e+00   2.2891046e+00   1.5811388e+00   2.1610183e+00   8.3666003e-01   1.1401754e+00   5.1961524e-01   1.4142136e+00   3.1622777e-01   1.0295630e+00   1.0099505e+00   8.3666003e-01   1.3000000e+00   9.3273791e-01   7.8740079e-01   6.1644140e-01   5.2915026e-01   3.6055513e-01   2.4494897e-01   3.1622777e-01   5.8309519e-01   6.4031242e-01   1.4832397e+00   1.4628739e+00   1.5362291e+00   1.0862780e+00   8.6023253e-01   1.2247449e+00   8.4261498e-01   3.1622777e-01   7.0000000e-01   1.1224972e+00   1.3152946e+00   1.1618950e+00   5.1961524e-01   1.0488088e+00   2.1679483e+00   1.0954451e+00   9.9498744e-01   9.8488578e-01   5.0000000e-01   2.2383029e+00   1.0344080e+00   1.9104973e+00   1.1357817e+00   1.6093477e+00   1.1575837e+00   1.5066519e+00   2.3769729e+00   1.7944358e+00   1.9052559e+00   1.3638182e+00   2.1307276e+00   9.1651514e-01   9.6436508e-01   1.2247449e+00   1.2727922e+00   1.4525839e+00   1.2727922e+00   1.0392305e+00   2.6907248e+00   2.7549955e+00   1.0246951e+00   1.5459625e+00   1.2609520e+00   2.4738634e+00   6.8556546e-01   1.4212670e+00   1.6309506e+00   6.7823300e-01   7.7459667e-01   1.3000000e+00   1.3784049e+00   1.8055470e+00   2.4959968e+00   1.3638182e+00   6.2449980e-01   1.1618950e+00   2.1142375e+00   1.5968719e+00   1.0677078e+00   8.1240384e-01   1.1874342e+00   1.5033296e+00   1.1747340e+00   1.1357817e+00   1.6792856e+00   1.6792856e+00   1.1747340e+00   8.7749644e-01   9.3273791e-01   1.4317821e+00   1.0000000e+00   9.2195445e-01   8.2462113e-01   1.0295630e+00   1.2206556e+00   5.4772256e-01   1.6309506e+00   7.8740079e-01   7.4833148e-01   1.2727922e+00   5.3851648e-01   1.3076697e+00   9.1651514e-01   1.5033296e+00   1.2247449e+00   1.2845233e+00   1.5165751e+00   1.8384776e+00   1.9078784e+00   1.0246951e+00   7.6157731e-01   5.2915026e-01   6.1644140e-01   6.3245553e-01   1.4560220e+00   7.0710678e-01   1.2369317e+00   1.7492856e+00   1.2767145e+00   5.4772256e-01   3.8729833e-01   6.2449980e-01   1.1789826e+00   6.4807407e-01   8.4852814e-01   5.0990195e-01   6.8556546e-01   6.2449980e-01   1.1000000e+00   9.7467943e-01   5.5677644e-01   2.6814175e+00   1.4317821e+00   2.8618176e+00   2.0736441e+00   2.4556058e+00   3.6918830e+00   7.6157731e-01   3.2202484e+00   2.4617067e+00   3.2954514e+00   1.9339080e+00   1.9104973e+00   2.3874673e+00   1.3638182e+00   1.6763055e+00   2.1118712e+00   2.1213203e+00   3.9924930e+00   4.0087405e+00   1.4525839e+00   2.6814175e+00   1.2369317e+00   3.8026307e+00   1.5394804e+00   2.5179357e+00   2.9698485e+00   1.4071247e+00   1.4352700e+00   2.1977261e+00   2.7820855e+00   3.1527766e+00   3.8871583e+00   2.2315914e+00   1.6340135e+00   1.9261360e+00   3.4626579e+00   2.3643181e+00   2.0784610e+00   1.3038405e+00   2.4062419e+00   2.5099801e+00   2.3021729e+00   1.4317821e+00   2.7604347e+00   2.6570661e+00   2.2000000e+00   1.6462078e+00   1.9570386e+00   2.1330729e+00   1.4764823e+00   1.5968719e+00   1.1357817e+00   1.9026298e+00   1.1269428e+00   2.2516660e+00   1.6155494e+00   1.2206556e+00   1.6522712e+00   8.8317609e-01   2.1400935e+00   1.4798649e+00   2.0371549e+00   1.8248288e+00   1.8708287e+00   2.1283797e+00   2.3937418e+00   2.5748786e+00   1.7492856e+00   9.2195445e-01   7.1414284e-01   6.7082039e-01   1.1532563e+00   2.1000000e+00   1.5524175e+00   2.0784610e+00   2.4062419e+00   1.6370706e+00   1.3453624e+00   9.1651514e-01   1.2083046e+00   1.8920888e+00   1.1357817e+00   3.6055513e-01   1.1958261e+00   1.4212670e+00   1.3711309e+00   1.7262677e+00   7.2111026e-01   1.2569805e+00   3.4467376e+00   2.1213203e+00   3.5185224e+00   2.7477263e+00   3.1591138e+00   4.3104524e+00   1.3228757e+00   3.8183766e+00   3.0116441e+00   4.0509258e+00   2.6925824e+00   2.5495098e+00   3.0740852e+00   1.9974984e+00   2.4083189e+00   2.8861739e+00   2.8089144e+00   4.7127487e+00   4.5716518e+00   1.8814888e+00   3.4029399e+00   1.9899749e+00   4.3783559e+00   2.1863211e+00   3.2603681e+00   3.6290495e+00   2.1000000e+00   2.1931712e+00   2.8670542e+00   3.3896903e+00   3.7376463e+00   4.5891176e+00   2.9068884e+00   2.2671568e+00   2.4779023e+00   4.0914545e+00   3.1654384e+00   2.7946377e+00   2.0808652e+00   3.1048349e+00   3.2357379e+00   3.0116441e+00   2.1213203e+00   3.4828150e+00   3.4161382e+00   2.9103264e+00   2.2360680e+00   2.6720778e+00   2.9495762e+00   2.2383029e+00   9.6953597e-01   5.5677644e-01   7.0710678e-01   8.3666003e-01   4.2426407e-01   6.0000000e-01   9.0553851e-01   7.6811457e-01   7.0000000e-01   4.0000000e-01   9.4868330e-01   6.4807407e-01   5.5677644e-01   7.3484692e-01   1.1045361e+00   1.1489125e+00   3.3166248e-01   9.6953597e-01   9.1651514e-01   1.0099505e+00   5.2915026e-01   9.5916630e-01   5.8309519e-01   5.1961524e-01   9.4868330e-01   8.5440037e-01   3.7416574e-01   7.0000000e-01   6.7082039e-01   4.5825757e-01   5.4772256e-01   1.5362291e+00   4.6904158e-01   3.6055513e-01   3.0000000e-01   3.8729833e-01   1.5779734e+00   3.6055513e-01   2.1189620e+00   1.0344080e+00   2.1656408e+00   1.4899664e+00   1.8466185e+00   3.0016662e+00   1.1747340e+00   2.5436195e+00   1.8814888e+00   2.5806976e+00   1.2083046e+00   1.3076697e+00   1.6911535e+00   1.0862780e+00   1.2922848e+00   1.4628739e+00   1.4628739e+00   3.2588341e+00   3.3660065e+00   1.1357817e+00   1.9824228e+00   9.3273791e-01   3.1272992e+00   9.1104336e-01   1.8275667e+00   2.2494444e+00   7.6157731e-01   7.8740079e-01   1.6155494e+00   2.0639767e+00   2.4617067e+00   3.1192948e+00   1.6552945e+00   1.0049876e+00   1.4730920e+00   2.7367864e+00   1.7578396e+00   1.4282857e+00   6.7823300e-01   1.6763055e+00   1.8493242e+00   1.5684387e+00   1.0344080e+00   2.0928450e+00   1.9949937e+00   1.5099669e+00   1.1000000e+00   1.2688578e+00   1.5264338e+00   9.4868330e-01   1.0723805e+00   9.4868330e-01   1.2727922e+00   1.1401754e+00   5.4772256e-01   7.3484692e-01   5.1961524e-01   1.5132746e+00   6.7823300e-01   1.1135529e+00   9.4868330e-01   9.1104336e-01   1.1489125e+00   1.3416408e+00   1.6186414e+00   9.9498744e-01   7.0710678e-01   5.8309519e-01   6.1644140e-01   5.8309519e-01   1.3490738e+00   1.2247449e+00   1.4317821e+00   1.4282857e+00   5.9160798e-01   9.4868330e-01   6.5574385e-01   7.8102497e-01   1.0816654e+00   4.8989795e-01   1.2247449e+00   7.3484692e-01   9.0000000e-01   8.4261498e-01   8.4261498e-01   1.3820275e+00   7.4161985e-01   2.7477263e+00   1.5198684e+00   2.5826343e+00   1.9442222e+00   2.3600847e+00   3.3421550e+00   1.4282857e+00   2.8478062e+00   2.1118712e+00   3.1717503e+00   1.8601075e+00   1.7058722e+00   2.1771541e+00   1.4764823e+00   1.8894444e+00   2.1307276e+00   1.9442222e+00   3.7656341e+00   3.6262929e+00   1.1180340e+00   2.5278449e+00   1.5264338e+00   3.3970576e+00   1.3379088e+00   2.4083189e+00   2.6608269e+00   1.2961481e+00   1.4491377e+00   2.0712315e+00   2.3832751e+00   2.7459060e+00   3.5958309e+00   2.1260292e+00   1.3820275e+00   1.7000000e+00   3.1032241e+00   2.4596748e+00   1.9646883e+00   1.3856406e+00   2.1886069e+00   2.4124676e+00   2.1260292e+00   1.5198684e+00   2.6343880e+00   2.6153394e+00   2.0639767e+00   1.4106736e+00   1.8248288e+00   2.2649503e+00   1.5811388e+00   1.2124356e+00   7.0000000e-01   5.5677644e-01   8.0622577e-01   7.4161985e-01   1.0677078e+00   5.4772256e-01   7.1414284e-01   5.0000000e-01   2.2360680e-01   5.0990195e-01   5.9160798e-01   7.1414284e-01   7.4161985e-01   2.4494897e-01   1.3601471e+00   1.2288206e+00   1.3304135e+00   9.0000000e-01   5.0000000e-01   7.4161985e-01   5.8309519e-01   6.4031242e-01   7.0710678e-01   7.9372539e-01   1.0099505e+00   7.6157731e-01   1.4142136e-01   8.4261498e-01   1.9209373e+00   7.4161985e-01   6.7823300e-01   6.4807407e-01   4.2426407e-01   2.0346990e+00   7.3484692e-01   1.7606817e+00   7.3484692e-01   1.7146428e+00   1.0049876e+00   1.4212670e+00   2.5219040e+00   1.3152946e+00   2.0396078e+00   1.3747727e+00   2.2068076e+00   8.7749644e-01   8.6023253e-01   1.2767145e+00   8.7749644e-01   1.1224972e+00   1.1618950e+00   9.8488578e-01   2.8301943e+00   2.8809721e+00   7.7459667e-01   1.5937377e+00   8.1240384e-01   2.6324893e+00   5.2915026e-01   1.4177447e+00   1.7748239e+00   4.3588989e-01   4.5825757e-01   1.1832160e+00   1.5716234e+00   1.9773720e+00   2.7018512e+00   1.2449900e+00   4.6904158e-01   9.4868330e-01   2.3108440e+00   1.4491377e+00   9.6436508e-01   4.3588989e-01   1.2884099e+00   1.4866069e+00   1.2845233e+00   7.3484692e-01   1.6822604e+00   1.6522712e+00   1.1958261e+00   7.3484692e-01   8.8317609e-01   1.2489996e+00   6.0827625e-01   1.3784049e+00   9.2736185e-01   6.4807407e-01   1.3038405e+00   5.3851648e-01   1.3674794e+00   6.4807407e-01   1.5427249e+00   1.2165525e+00   1.0630146e+00   1.2884099e+00   1.7029386e+00   1.8275667e+00   1.0049876e+00   4.4721360e-01   5.8309519e-01   6.0000000e-01   4.2426407e-01   1.5937377e+00   9.4868330e-01   1.1445523e+00   1.5811388e+00   1.2206556e+00   5.0990195e-01   5.7445626e-01   8.6602540e-01   1.1269428e+00   5.4772256e-01   9.4868330e-01   6.3245553e-01   6.2449980e-01   6.0827625e-01   9.2195445e-01   9.0000000e-01   5.1961524e-01   2.8017851e+00   1.6401219e+00   2.8618176e+00   2.1771541e+00   2.5436195e+00   3.6945906e+00   1.2727922e+00   3.2295511e+00   2.5416530e+00   3.2771939e+00   1.9078784e+00   1.9824228e+00   2.3874673e+00   1.6186414e+00   1.8734994e+00   2.1494185e+00   2.1633308e+00   3.9547440e+00   4.0484565e+00   1.6278821e+00   2.6814175e+00   1.4798649e+00   3.8105118e+00   1.5716234e+00   2.5337719e+00   2.9427878e+00   1.4352700e+00   1.4832397e+00   2.3000000e+00   2.7386128e+00   3.1400637e+00   3.7986840e+00   2.3366643e+00   1.6703293e+00   2.0856654e+00   3.4161382e+00   2.4392622e+00   2.1307276e+00   1.3638182e+00   2.3685439e+00   2.5416530e+00   2.2315914e+00   1.6401219e+00   2.7964263e+00   2.6870058e+00   2.1863211e+00   1.7233688e+00   1.9672316e+00   2.2022716e+00   1.6124515e+00   1.1135529e+00   1.1045361e+00   1.0392305e+00   1.3820275e+00   9.8488578e-01   7.8740079e-01   8.8317609e-01   7.6157731e-01   3.8729833e-01   1.4142136e-01   5.0990195e-01   6.7823300e-01   7.4161985e-01   1.4899664e+00   1.5427249e+00   1.6062378e+00   1.1224972e+00   1.0862780e+00   1.3114877e+00   7.9372539e-01   3.1622777e-01   9.0000000e-01   1.1489125e+00   1.4035669e+00   1.3152946e+00   6.4031242e-01   1.1224972e+00   2.2135944e+00   1.1916375e+00   1.0440307e+00   1.0440307e+00   5.5677644e-01   2.2293497e+00   1.0908712e+00   1.9924859e+00   1.3076697e+00   1.7058722e+00   1.3416408e+00   1.6278821e+00   2.4799194e+00   1.9235384e+00   2.0420578e+00   1.5748016e+00   2.1447611e+00   9.4868330e-01   1.1445523e+00   1.3114877e+00   1.4422205e+00   1.5459625e+00   1.3114877e+00   1.1916375e+00   2.7239677e+00   2.8827071e+00   1.2922848e+00   1.5968719e+00   1.3820275e+00   2.5961510e+00   8.5440037e-01   1.4899664e+00   1.7262677e+00   8.1240384e-01   8.8317609e-01   1.4525839e+00   1.5033296e+00   1.9287302e+00   2.5079872e+00   1.5033296e+00   8.6602540e-01   1.4317821e+00   2.1702534e+00   1.6401219e+00   1.2083046e+00   9.0553851e-01   1.2369317e+00   1.5620499e+00   1.1575837e+00   1.3076697e+00   1.7549929e+00   1.7146428e+00   1.2083046e+00   1.0630146e+00   1.0246951e+00   1.4662878e+00   1.1401754e+00   7.3484692e-01   1.0000000e+00   8.7749644e-01   5.5677644e-01   7.6157731e-01   9.4868330e-01   6.4807407e-01   8.5440037e-01   1.0099505e+00   1.2569805e+00   1.2247449e+00   4.1231056e-01   1.1916375e+00   1.0099505e+00   1.1224972e+00   7.6157731e-01   7.8740079e-01   2.0000000e-01   5.7445626e-01   1.1224972e+00   1.0148892e+00   4.4721360e-01   7.4161985e-01   5.1961524e-01   5.1961524e-01   7.3484692e-01   1.5937377e+00   4.6904158e-01   4.3588989e-01   3.8729833e-01   6.7082039e-01   1.7058722e+00   5.0000000e-01   1.9570386e+00   8.0622577e-01   2.1377558e+00   1.3416408e+00   1.7291616e+00   2.9614186e+00   8.8317609e-01   2.4959968e+00   1.8000000e+00   2.5455844e+00   1.2083046e+00   1.2369317e+00   1.6733201e+00   8.7177979e-01   1.1180340e+00   1.4000000e+00   1.3784049e+00   3.2218007e+00   3.3120990e+00   1.0246951e+00   1.9519221e+00   6.7082039e-01   3.0886890e+00   9.1104336e-01   1.7606817e+00   2.2226111e+00   7.6157731e-01   7.0710678e-01   1.5000000e+00   2.0639767e+00   2.4494897e+00   3.1288976e+00   1.5427249e+00   9.4339811e-01   1.2767145e+00   2.7586228e+00   1.6340135e+00   1.3190906e+00   5.8309519e-01   1.6941074e+00   1.8000000e+00   1.6431677e+00   8.0622577e-01   2.0199010e+00   1.9339080e+00   1.5297059e+00   1.0723805e+00   1.2449900e+00   1.4035669e+00   7.3484692e-01   9.0553851e-01   3.6055513e-01   1.1789826e+00   4.4721360e-01   1.0862780e+00   7.0710678e-01   7.2801099e-01   9.8994949e-01   1.2884099e+00   1.4832397e+00   7.0000000e-01   6.1644140e-01   5.2915026e-01   5.8309519e-01   2.8284271e-01   1.1832160e+00   8.1240384e-01   1.0246951e+00   1.2569805e+00   7.6811457e-01   4.6904158e-01   4.7958315e-01   4.7958315e-01   7.6811457e-01   2.4494897e-01   1.2000000e+00   3.7416574e-01   3.8729833e-01   3.8729833e-01   5.7445626e-01   1.3228757e+00   3.3166248e-01   2.5436195e+00   1.3453624e+00   2.4959968e+00   1.7832555e+00   2.2158520e+00   3.2848135e+00   1.2247449e+00   2.7874720e+00   2.0928450e+00   3.0033315e+00   1.6552945e+00   1.6155494e+00   2.0639767e+00   1.3638182e+00   1.7233688e+00   1.9339080e+00   1.7832555e+00   3.6083237e+00   3.6262929e+00   1.1618950e+00   2.3895606e+00   1.3000000e+00   3.3734256e+00   1.2369317e+00   2.2226111e+00   2.5416530e+00   1.1401754e+00   1.2083046e+00   1.9570386e+00   2.3021729e+00   2.7166155e+00   3.4510868e+00   2.0149442e+00   1.2288206e+00   1.5842980e+00   3.0643107e+00   2.2248595e+00   1.7663522e+00   1.1224972e+00   2.0663978e+00   2.2759613e+00   2.0149442e+00   1.3453624e+00   2.4859606e+00   2.4454039e+00   1.9493589e+00   1.3820275e+00   1.6703293e+00   2.0074860e+00   1.3190906e+00   9.8488578e-01   1.1269428e+00   8.1240384e-01   5.0990195e-01   7.0710678e-01   7.8102497e-01   9.0553851e-01   9.0553851e-01   1.0862780e+00   7.2801099e-01   1.2884099e+00   1.0862780e+00   1.1916375e+00   9.2736185e-01   8.1240384e-01   1.1313708e+00   1.2206556e+00   1.0488088e+00   2.6457513e-01   1.0954451e+00   9.3273791e-01   8.6602540e-01   8.1853528e-01   8.1240384e-01   1.7720045e+00   8.6023253e-01   1.0344080e+00   9.3273791e-01   7.5498344e-01   1.9261360e+00   9.0000000e-01   2.1142375e+00   9.6436508e-01   1.9416488e+00   1.3416408e+00   1.7058722e+00   2.7147744e+00   1.3490738e+00   2.2427661e+00   1.4560220e+00   2.5534291e+00   1.3038405e+00   1.0440307e+00   1.5362291e+00   9.1651514e-01   1.3000000e+00   1.5231546e+00   1.3490738e+00   3.1843367e+00   2.9681644e+00   5.3851648e-01   1.8894444e+00   1.0630146e+00   2.7748874e+00   7.1414284e-01   1.8055470e+00   2.0832667e+00   7.3484692e-01   9.4868330e-01   1.4035669e+00   1.8275667e+00   2.1260292e+00   3.0512293e+00   1.4491377e+00   8.5440037e-01   1.1789826e+00   2.4677925e+00   1.8627936e+00   1.3928388e+00   9.2736185e-01   1.5716234e+00   1.7549929e+00   1.5165751e+00   9.6436508e-01   1.9899749e+00   1.9748418e+00   1.4212670e+00   7.1414284e-01   1.2124356e+00   1.7000000e+00   1.0862780e+00   1.3711309e+00   6.2449980e-01   1.2845233e+00   9.9498744e-01   1.0000000e+00   1.2609520e+00   1.5588457e+00   1.7406895e+00   9.1651514e-01   4.3588989e-01   1.7320508e-01   2.6457513e-01   3.0000000e-01   1.3747727e+00   9.0000000e-01   1.2569805e+00   1.5394804e+00   9.0553851e-01   5.7445626e-01   2.4494897e-01   5.2915026e-01   1.0392305e+00   2.6457513e-01   8.7749644e-01   4.1231056e-01   6.0000000e-01   5.4772256e-01   8.4852814e-01   1.0295630e+00   4.2426407e-01   2.7386128e+00   1.4696938e+00   2.7386128e+00   2.0074860e+00   2.4248711e+00   3.5411862e+00   1.1000000e+00   3.0495901e+00   2.3043437e+00   3.2511536e+00   1.8841444e+00   1.8110770e+00   2.2912878e+00   1.4247807e+00   1.8055470e+00   2.1283797e+00   2.0273135e+00   3.8923001e+00   3.8548671e+00   1.2727922e+00   2.6191602e+00   1.3784049e+00   3.6262929e+00   1.4212670e+00   2.4677925e+00   2.8195744e+00   1.3228757e+00   1.4106736e+00   2.1494185e+00   2.5826343e+00   2.9681644e+00   3.7469988e+00   2.1977261e+00   1.4764823e+00   1.8000000e+00   3.3075671e+00   2.4248711e+00   2.0124612e+00   1.3076697e+00   2.3021729e+00   2.4799194e+00   2.2203603e+00   1.4696938e+00   2.7147744e+00   2.6551836e+00   2.1424285e+00   1.5297059e+00   1.8867962e+00   2.2045408e+00   1.5066519e+00   1.0440307e+00   8.6602540e-01   7.5498344e-01   9.1651514e-01   9.2195445e-01   1.0630146e+00   8.5440037e-01   5.2915026e-01   1.6522712e+00   1.5132746e+00   1.6278821e+00   1.1958261e+00   6.2449980e-01   6.8556546e-01   4.2426407e-01   8.6602540e-01   1.1747340e+00   9.3273791e-01   1.2409674e+00   1.0198039e+00   5.2915026e-01   1.1704700e+00   2.1236761e+00   9.7467943e-01   8.9442719e-01   8.6023253e-01   8.2462113e-01   2.2045408e+00   9.6953597e-01   1.4491377e+00   6.0000000e-01   1.6673332e+00   9.4339811e-01   1.2489996e+00   2.5019992e+00   1.2609520e+00   2.0736441e+00   1.4594520e+00   2.0074860e+00   7.0000000e-01   8.7177979e-01   1.1958261e+00   7.8102497e-01   7.8740079e-01   8.6602540e-01   9.4339811e-01   2.7147744e+00   2.8740216e+00   1.0677078e+00   1.4352700e+00   5.4772256e-01   2.6551836e+00   6.4807407e-01   1.2449900e+00   1.7691806e+00   5.0000000e-01   3.0000000e-01   1.0677078e+00   1.6643317e+00   2.0273135e+00   2.6381812e+00   1.1000000e+00   7.0710678e-01   1.0954451e+00   2.2847319e+00   1.0954451e+00   8.6602540e-01   2.2360680e-01   1.2083046e+00   1.2845233e+00   1.1618950e+00   6.0000000e-01   1.5066519e+00   1.3964240e+00   1.0440307e+00   8.3666003e-01   7.7459667e-01   8.6023253e-01   3.6055513e-01   9.8994949e-01   7.0710678e-01   4.3588989e-01   6.7823300e-01   1.0677078e+00   1.2489996e+00   5.5677644e-01   7.3484692e-01   7.7459667e-01   8.3666003e-01   3.4641016e-01   1.1489125e+00   9.0553851e-01   8.4261498e-01   9.8994949e-01   6.7082039e-01   5.4772256e-01   6.7082039e-01   7.5498344e-01   6.4031242e-01   3.7416574e-01   1.4282857e+00   5.4772256e-01   5.0000000e-01   4.5825757e-01   3.3166248e-01   1.4594520e+00   4.1231056e-01   2.3937418e+00   1.2922848e+00   2.3000000e+00   1.6911535e+00   2.0615528e+00   3.1128765e+00   1.3928388e+00   2.6438608e+00   1.9849433e+00   2.7748874e+00   1.4212670e+00   1.4662878e+00   1.8493242e+00   1.3190906e+00   1.5842980e+00   1.7146428e+00   1.6431677e+00   3.4146742e+00   3.4655447e+00   1.1874342e+00   2.1656408e+00   1.2449900e+00   3.2155870e+00   1.0535654e+00   2.0346990e+00   2.3706539e+00   9.4868330e-01   1.0488088e+00   1.8138357e+00   2.1400935e+00   2.5416530e+00   3.2388269e+00   1.8601075e+00   1.1357817e+00   1.6155494e+00   2.8301943e+00   2.0420578e+00   1.6370706e+00   9.6953597e-01   1.8248288e+00   2.0542639e+00   1.7146428e+00   1.2922848e+00   2.2934690e+00   2.2226111e+00   1.6852300e+00   1.2206556e+00   1.4594520e+00   1.8248288e+00   1.2409674e+00   5.0990195e-01   7.5498344e-01   7.7459667e-01   6.0000000e-01   6.7823300e-01   6.4031242e-01   1.6062378e+00   1.4212670e+00   1.5297059e+00   1.1747340e+00   4.2426407e-01   1.1045361e+00   1.0344080e+00   7.4833148e-01   5.7445626e-01   1.1916375e+00   1.2206556e+00   9.9498744e-01   6.2449980e-01   1.0770330e+00   2.1307276e+00   1.0295630e+00   1.0908712e+00   1.0246951e+00   7.5498344e-01   2.2825424e+00   1.0630146e+00   1.6881943e+00   7.0000000e-01   1.5000000e+00   8.6023253e-01   1.2609520e+00   2.2781571e+00   1.4696938e+00   1.7916473e+00   1.0295630e+00   2.1118712e+00   9.0553851e-01   6.0827625e-01   1.1045361e+00   7.8740079e-01   1.0908712e+00   1.1401754e+00   8.6023253e-01   2.7166155e+00   2.5709920e+00   4.3588989e-01   1.4594520e+00   9.1104336e-01   2.3537205e+00   3.6055513e-01   1.3416408e+00   1.6124515e+00   4.4721360e-01   6.1644140e-01   9.7467943e-01   1.3711309e+00   1.7029386e+00   2.5980762e+00   1.0392305e+00   3.6055513e-01   7.4161985e-01   2.0712315e+00   1.4525839e+00   9.0553851e-01   6.6332496e-01   1.1532563e+00   1.3490738e+00   1.1832160e+00   7.0000000e-01   1.5427249e+00   1.5620499e+00   1.0677078e+00   4.1231056e-01   7.9372539e-01   1.3076697e+00   7.3484692e-01   5.1961524e-01   6.4807407e-01   7.3484692e-01   8.6023253e-01   3.8729833e-01   1.2961481e+00   1.1575837e+00   1.2489996e+00   8.6023253e-01   5.8309519e-01   8.1240384e-01   7.5498344e-01   7.3484692e-01   6.2449980e-01   8.1240384e-01   9.7467943e-01   7.0000000e-01   3.0000000e-01   7.8740079e-01   1.8601075e+00   7.2111026e-01   6.7082039e-01   6.5574385e-01   4.3588989e-01   1.9974984e+00   7.2801099e-01   1.9157244e+00   8.6602540e-01   1.8138357e+00   1.1045361e+00   1.5524175e+00   2.5903668e+00   1.3490738e+00   2.0904545e+00   1.4212670e+00   2.3452079e+00   1.0583005e+00   9.7467943e-01   1.4071247e+00   9.8994949e-01   1.3000000e+00   1.3490738e+00   1.0954451e+00   2.9257478e+00   2.9410882e+00   7.4161985e-01   1.7349352e+00   9.6436508e-01   2.6832816e+00   6.7082039e-01   1.5556349e+00   1.8493242e+00   6.1644140e-01   6.6332496e-01   1.3076697e+00   1.6186414e+00   2.0346990e+00   2.7874720e+00   1.3784049e+00   5.3851648e-01   9.4339811e-01   2.4020824e+00   1.6278821e+00   1.0862780e+00   6.4807407e-01   1.4247807e+00   1.6431677e+00   1.4491377e+00   8.6602540e-01   1.8165902e+00   1.8165902e+00   1.3638182e+00   8.4261498e-01   1.0440307e+00   1.4387495e+00   7.7459667e-01   2.6457513e-01   6.5574385e-01   8.6602540e-01   4.8989795e-01   1.1445523e+00   1.1618950e+00   1.2288206e+00   7.5498344e-01   9.6436508e-01   1.0440307e+00   7.3484692e-01   5.7445626e-01   6.1644140e-01   8.3066239e-01   1.0295630e+00   9.5916630e-01   4.4721360e-01   7.4161985e-01   1.8466185e+00   8.3066239e-01   7.2111026e-01   7.0710678e-01   2.0000000e-01   1.8920888e+00   7.3484692e-01   2.1213203e+00   1.1832160e+00   1.9235384e+00   1.3964240e+00   1.7549929e+00   2.7166155e+00   1.6155494e+00   2.2494444e+00   1.6583124e+00   2.4103942e+00   1.1090537e+00   1.1832160e+00   1.5000000e+00   1.2767145e+00   1.4899664e+00   1.4456832e+00   1.3076697e+00   3.0116441e+00   3.0886890e+00   1.0862780e+00   1.8165902e+00   1.2247449e+00   2.8195744e+00   8.1240384e-01   1.6881943e+00   1.9672316e+00   7.4161985e-01   8.4261498e-01   1.5297059e+00   1.7291616e+00   2.1470911e+00   2.8213472e+00   1.5842980e+00   8.3666003e-01   1.3711309e+00   2.4372115e+00   1.7776389e+00   1.3152946e+00   8.1853528e-01   1.4628739e+00   1.7406895e+00   1.3892444e+00   1.1832160e+00   1.9519221e+00   1.9104973e+00   1.3820275e+00   1.0099505e+00   1.1489125e+00   1.5811388e+00   1.0723805e+00   4.8989795e-01   6.7823300e-01   6.2449980e-01   1.3928388e+00   1.4212670e+00   1.4899664e+00   1.0099505e+00   9.8994949e-01   1.2083046e+00   7.5498344e-01   3.4641016e-01   7.6811457e-01   1.0488088e+00   1.2767145e+00   1.1874342e+00   5.3851648e-01   1.0000000e+00   2.1023796e+00   1.0677078e+00   9.4339811e-01   9.3273791e-01   4.3588989e-01   2.1330729e+00   9.7467943e-01   1.9874607e+00   1.2124356e+00   1.7291616e+00   1.3038405e+00   1.6155494e+00   2.5159491e+00   1.8000000e+00   2.0663978e+00   1.5427249e+00   2.1954498e+00   9.4868330e-01   1.0908712e+00   1.3190906e+00   1.3341664e+00   1.4730920e+00   1.3038405e+00   1.1747340e+00   2.7892651e+00   2.9034462e+00   1.1704700e+00   1.6217275e+00   1.2845233e+00   2.6267851e+00   7.6811457e-01   1.5099669e+00   1.7663522e+00   7.2111026e-01   8.1240384e-01   1.4177447e+00   1.5362291e+00   1.9544820e+00   2.5865034e+00   1.4696938e+00   7.9372539e-01   1.3601471e+00   2.2158520e+00   1.6401219e+00   1.1916375e+00   8.2462113e-01   1.2609520e+00   1.5684387e+00   1.1832160e+00   1.2124356e+00   1.7720045e+00   1.7320508e+00   1.2083046e+00   9.7467943e-01   1.0049876e+00   1.4594520e+00   1.0677078e+00   4.2426407e-01   8.6602540e-01   1.7606817e+00   1.7146428e+00   1.7944358e+00   1.3638182e+00   8.8317609e-01   1.4491377e+00   1.0630146e+00   3.4641016e-01   8.1853528e-01   1.4071247e+00   1.5588457e+00   1.3892444e+00   7.5498344e-01   1.3114877e+00   2.4289916e+00   1.3490738e+00   1.2845233e+00   1.2609520e+00   7.9372539e-01   2.5119713e+00   1.3076697e+00   1.7748239e+00   1.1618950e+00   1.3527749e+00   1.0295630e+00   1.3304135e+00   2.1000000e+00   1.9697716e+00   1.6340135e+00   1.1224972e+00   1.9235384e+00   8.3666003e-01   8.1853528e-01   1.0099505e+00   1.3038405e+00   1.4456832e+00   1.1747340e+00   8.8317609e-01   2.4617067e+00   2.4637370e+00   1.0246951e+00   1.3379088e+00   1.3453624e+00   2.1863211e+00   6.5574385e-01   1.2489996e+00   1.3856406e+00   7.2111026e-01   8.3666003e-01   1.1357817e+00   1.1135529e+00   1.5165751e+00   2.2649503e+00   1.2000000e+00   5.9160798e-01   1.0816654e+00   1.8303005e+00   1.5000000e+00   9.4868330e-01   9.1651514e-01   9.7467943e-01   1.3190906e+00   1.0000000e+00   1.1618950e+00   1.4764823e+00   1.5099669e+00   1.0099505e+00   7.9372539e-01   8.0622577e-01   1.3747727e+00   1.0488088e+00   8.8881944e-01   1.9748418e+00   1.8973666e+00   1.9949937e+00   1.5362291e+00   7.7459667e-01   1.4071247e+00   9.5393920e-01   3.7416574e-01   1.0816654e+00   1.4764823e+00   1.6881943e+00   1.4866069e+00   7.8102497e-01   1.4899664e+00   2.6000000e+00   1.4491377e+00   1.3747727e+00   1.3453624e+00   9.5393920e-01   2.6776856e+00   1.4177447e+00   1.3747727e+00   9.7467943e-01   1.0630146e+00   7.3484692e-01   9.6436508e-01   1.8788294e+00   1.9339080e+00   1.4387495e+00   9.4868330e-01   1.5684387e+00   4.2426407e-01   5.5677644e-01   6.4807407e-01   1.1575837e+00   1.1618950e+00   7.6157731e-01   5.4772256e-01   2.1863211e+00   2.2649503e+00   1.0816654e+00   9.6436508e-01   1.1618950e+00   2.0049938e+00   5.1961524e-01   8.6023253e-01   1.1401754e+00   5.8309519e-01   6.1644140e-01   8.0622577e-01   9.4868330e-01   1.3341664e+00   2.0322401e+00   8.6023253e-01   5.0000000e-01   9.8488578e-01   1.6031220e+00   1.0816654e+00   6.0000000e-01   7.3484692e-01   6.0827625e-01   9.2736185e-01   6.4807407e-01   9.7467943e-01   1.1045361e+00   1.1045361e+00   6.3245553e-01   6.7082039e-01   4.1231056e-01   9.6436508e-01   8.1240384e-01   1.1958261e+00   1.0723805e+00   1.1789826e+00   7.2801099e-01   6.4031242e-01   6.0827625e-01   5.0990195e-01   7.5498344e-01   7.0710678e-01   6.0827625e-01   8.3666003e-01   6.6332496e-01   2.0000000e-01   6.8556546e-01   1.7464249e+00   5.7445626e-01   5.2915026e-01   4.6904158e-01   3.4641016e-01   1.8384776e+00   5.4772256e-01   1.8708287e+00   7.7459667e-01   1.8814888e+00   1.1789826e+00   1.5620499e+00   2.7092434e+00   1.1874342e+00   2.2405357e+00   1.5588457e+00   2.3430749e+00   9.7467943e-01   1.0000000e+00   1.4177447e+00   8.6602540e-01   1.1045361e+00   1.2369317e+00   1.1618950e+00   3.0049958e+00   3.0626786e+00   8.6023253e-01   1.7262677e+00   7.6157731e-01   2.8266588e+00   6.1644140e-01   1.5652476e+00   1.9672316e+00   4.7958315e-01   5.1961524e-01   1.3190906e+00   1.7748239e+00   2.1656408e+00   2.8774989e+00   1.3674794e+00   6.7823300e-01   1.1489125e+00   2.4698178e+00   1.5362291e+00   1.1357817e+00   4.3588989e-01   1.4212670e+00   1.5968719e+00   1.3601471e+00   7.7459667e-01   1.8248288e+00   1.7578396e+00   1.2767145e+00   8.1240384e-01   1.0000000e+00   1.3190906e+00   6.8556546e-01   4.2426407e-01   3.4641016e-01   4.6904158e-01   1.7378147e+00   1.2247449e+00   1.4456832e+00   1.7146428e+00   1.1618950e+00   7.8740079e-01   6.2449980e-01   9.4339811e-01   1.3000000e+00   5.4772256e-01   7.8740079e-01   7.7459667e-01   8.3066239e-01   8.1853528e-01   1.0344080e+00   7.9372539e-01   7.0000000e-01   3.0577770e+00   1.8411953e+00   3.0149627e+00   2.3452079e+00   2.7440845e+00   3.8196859e+00   1.4628739e+00   3.3361655e+00   2.6343880e+00   3.5014283e+00   2.1354157e+00   2.1330729e+00   2.5651511e+00   1.8055470e+00   2.1377558e+00   2.4041631e+00   2.3323808e+00   4.1376322e+00   4.1533119e+00   1.6583124e+00   2.8861739e+00   1.7349352e+00   3.9089641e+00   1.7233688e+00   2.7459060e+00   3.0822070e+00   1.6186414e+00   1.7088007e+00   2.4799194e+00   2.8390139e+00   3.2403703e+00   3.9610605e+00   2.5258662e+00   1.7916473e+00   2.1748563e+00   3.5510562e+00   2.7147744e+00   2.3194827e+00   1.6062378e+00   2.5514702e+00   2.7604347e+00   2.4372115e+00   1.8411953e+00   3.0033315e+00   2.9291637e+00   2.3958297e+00   1.8520259e+00   2.1656408e+00   2.4879711e+00   1.8439089e+00   1.4142136e-01   4.4721360e-01   1.5099669e+00   1.0099505e+00   1.4106736e+00   1.7029386e+00   1.0246951e+00   7.0710678e-01   3.0000000e-01   6.4031242e-01   1.2041595e+00   4.2426407e-01   7.2111026e-01   5.4772256e-01   7.5498344e-01   7.0000000e-01   1.0148892e+00   9.0000000e-01   5.7445626e-01   2.8722813e+00   1.5842980e+00   2.8861739e+00   2.1494185e+00   2.5632011e+00   3.6891733e+00   1.1045361e+00   3.1984371e+00   2.4372115e+00   3.4029399e+00   2.0346990e+00   1.9467922e+00   2.4372115e+00   1.5165751e+00   1.9052559e+00   2.2671568e+00   2.1771541e+00   4.0521599e+00   3.9912404e+00   1.3747727e+00   2.7658633e+00   1.4798649e+00   3.7709415e+00   1.5588457e+00   2.6191602e+00   2.9765752e+00   1.4628739e+00   1.5556349e+00   2.2825424e+00   2.7386128e+00   3.1144823e+00   3.9102430e+00   2.3280893e+00   1.6278821e+00   1.9313208e+00   3.4539832e+00   2.5632011e+00   2.1633308e+00   1.4491377e+00   2.4515301e+00   2.6191602e+00   2.3622024e+00   1.5842980e+00   2.8600699e+00   2.7964263e+00   2.2803509e+00   1.6522712e+00   2.0322401e+00   2.3430749e+00   1.6431677e+00   5.0990195e-01   1.6309506e+00   1.1224972e+00   1.5000000e+00   1.7832555e+00   1.1090537e+00   7.8740079e-01   4.3588989e-01   7.5498344e-01   1.3000000e+00   5.0990195e-01   6.4807407e-01   6.6332496e-01   8.3066239e-01   7.9372539e-01   1.0908712e+00   8.1853528e-01   6.7082039e-01   2.9983329e+00   1.7175564e+00   2.9949958e+00   2.2671568e+00   2.6851443e+00   3.7934153e+00   1.2247449e+00   3.3000000e+00   2.5495098e+00   3.5128336e+00   2.1447611e+00   2.0663978e+00   2.5495098e+00   1.6552945e+00   2.0420578e+00   2.3874673e+00   2.2891046e+00   4.1521079e+00   4.1000000e+00   1.4933185e+00   2.8792360e+00   1.6155494e+00   3.8729833e+00   1.6763055e+00   2.7313001e+00   3.0757113e+00   1.5811388e+00   1.6733201e+00   2.4062419e+00   2.8319605e+00   3.2155870e+00   4.0012498e+00   2.4535688e+00   1.7349352e+00   2.0420578e+00   3.5566838e+00   2.6851443e+00   2.2759613e+00   1.5684387e+00   2.5592968e+00   2.7386128e+00   2.4698178e+00   1.7175564e+00   2.9765752e+00   2.9154759e+00   2.3958297e+00   1.7748239e+00   2.1470911e+00   2.4637370e+00   1.7663522e+00   1.2806248e+00   8.3666003e-01   1.0246951e+00   1.3038405e+00   8.1853528e-01   4.2426407e-01   3.8729833e-01   5.9160798e-01   8.4261498e-01   1.4142136e-01   1.0954451e+00   3.7416574e-01   4.3588989e-01   3.8729833e-01   6.0827625e-01   1.1618950e+00   2.6457513e-01   2.5903668e+00   1.3892444e+00   2.5670995e+00   1.8814888e+00   2.2781571e+00   3.3808283e+00   1.2083046e+00   2.9000000e+00   2.1954498e+00   3.0495901e+00   1.6792856e+00   1.6763055e+00   2.1118712e+00   1.3784049e+00   1.7000000e+00   1.9442222e+00   1.8708287e+00   3.6959437e+00   3.7188708e+00   1.2609520e+00   2.4310492e+00   1.3000000e+00   3.4785054e+00   1.2688578e+00   2.2847319e+00   2.6419690e+00   1.1575837e+00   1.2409674e+00   2.0174241e+00   2.4124676e+00   2.8106939e+00   3.5369478e+00   2.0639767e+00   1.3379088e+00   1.7406895e+00   3.1224990e+00   2.2516660e+00   1.8547237e+00   1.1401754e+00   2.1047565e+00   2.3021729e+00   2.0049938e+00   1.3892444e+00   2.5416530e+00   2.4698178e+00   1.9493589e+00   1.4106736e+00   1.7058722e+00   2.0273135e+00   1.3784049e+00   9.0553851e-01   9.2195445e-01   9.0553851e-01   9.1104336e-01   1.1575837e+00   1.2609520e+00   9.5393920e-01   6.2449980e-01   1.1916375e+00   2.1817424e+00   1.0295630e+00   1.0723805e+00   1.0148892e+00   9.0000000e-01   2.3473389e+00   1.0908712e+00   1.4387495e+00   3.6055513e-01   1.4798649e+00   6.4807407e-01   1.0908712e+00   2.2693611e+00   1.2727922e+00   1.7916473e+00   1.0295630e+00   2.0149442e+00   8.1240384e-01   5.3851648e-01   1.0677078e+00   5.4772256e-01   8.3066239e-01   9.6953597e-01   7.3484692e-01   2.6495283e+00   2.5748786e+00   5.1961524e-01   1.3820275e+00   6.0827625e-01   2.3706539e+00   4.1231056e-01   1.2083046e+00   1.5937377e+00   4.2426407e-01   4.2426407e-01   8.1853528e-01   1.4212670e+00   1.7492856e+00   2.5826343e+00   8.8317609e-01   3.3166248e-01   5.5677644e-01   2.1142375e+00   1.2124356e+00   7.2111026e-01   4.6904158e-01   1.1445523e+00   1.2409674e+00   1.2083046e+00   3.6055513e-01   1.4212670e+00   1.4212670e+00   1.0392305e+00   4.7958315e-01   7.1414284e-01   1.0535654e+00   3.7416574e-01   7.2801099e-01   1.3190906e+00   1.1618950e+00   4.8989795e-01   7.4161985e-01   5.1961524e-01   7.1414284e-01   8.1240384e-01   1.5297059e+00   5.0990195e-01   5.1961524e-01   4.7958315e-01   8.5440037e-01   1.6583124e+00   5.7445626e-01   2.0371549e+00   8.7749644e-01   2.2825424e+00   1.4560220e+00   1.8411953e+00   3.1000000e+00   7.3484692e-01   2.6362853e+00   1.9287302e+00   2.6758176e+00   1.3638182e+00   1.3747727e+00   1.8220867e+00   9.1651514e-01   1.1704700e+00   1.5231546e+00   1.5165751e+00   3.3555923e+00   3.4423829e+00   1.1180340e+00   2.0904545e+00   7.0000000e-01   3.2280025e+00   1.0723805e+00   1.8920888e+00   2.3706539e+00   9.2736185e-01   8.6023253e-01   1.6155494e+00   2.2226111e+00   2.6000000e+00   3.2787193e+00   1.6552945e+00   1.1000000e+00   1.3674794e+00   2.9137605e+00   1.7291616e+00   1.4491377e+00   7.3484692e-01   1.8520259e+00   1.9287302e+00   1.8055470e+00   8.7749644e-01   2.1447611e+00   2.0542639e+00   1.6792856e+00   1.2124356e+00   1.3964240e+00   1.5000000e+00   8.3666003e-01   7.9372539e-01   1.1832160e+00   7.5498344e-01   1.1832160e+00   1.0295630e+00   4.6904158e-01   1.0440307e+00   2.0024984e+00   9.1104336e-01   7.0710678e-01   7.2111026e-01   6.4807407e-01   2.0297783e+00   8.3666003e-01   1.7776389e+00   9.8994949e-01   1.8920888e+00   1.2609520e+00   1.5684387e+00   2.7166155e+00   1.4247807e+00   2.2847319e+00   1.7406895e+00   2.2022716e+00   9.0000000e-01   1.1747340e+00   1.4317821e+00   1.1445523e+00   1.1832160e+00   1.1532563e+00   1.2041595e+00   2.8722813e+00   3.1272992e+00   1.3038405e+00   1.6673332e+00   9.1651514e-01   2.8722813e+00   8.8317609e-01   1.4798649e+00   1.9416488e+00   7.2801099e-01   6.0827625e-01   1.4071247e+00   1.8138357e+00   2.2293497e+00   2.7459060e+00   1.4456832e+00   9.0553851e-01   1.3784049e+00   2.4698178e+00   1.3928388e+00   1.1357817e+00   5.3851648e-01   1.4000000e+00   1.5588457e+00   1.3228757e+00   9.8994949e-01   1.7691806e+00   1.6583124e+00   1.2767145e+00   1.1135529e+00   1.0295630e+00   1.1575837e+00   7.5498344e-01   9.6436508e-01   1.2727922e+00   1.5264338e+00   1.3674794e+00   6.2449980e-01   1.2806248e+00   2.3958297e+00   1.2884099e+00   1.1618950e+00   1.1532563e+00   7.0000000e-01   2.4433583e+00   1.2206556e+00   1.7000000e+00   1.1357817e+00   1.4035669e+00   1.0488088e+00   1.3228757e+00   2.1886069e+00   1.9183326e+00   1.7464249e+00   1.2884099e+00   1.8601075e+00   6.7823300e-01   8.7749644e-01   1.0099505e+00   1.3038405e+00   1.3674794e+00   1.0488088e+00   8.8317609e-01   2.4454039e+00   2.5942244e+00   1.1789826e+00   1.3000000e+00   1.2609520e+00   2.3108440e+00   6.7082039e-01   1.1832160e+00   1.4282857e+00   6.6332496e-01   7.0710678e-01   1.1618950e+00   1.2165525e+00   1.6431677e+00   2.2516660e+00   1.2165525e+00   6.4031242e-01   1.1958261e+00   1.9000000e+00   1.3674794e+00   9.0553851e-01   7.7459667e-01   9.4339811e-01   1.2727922e+00   9.1651514e-01   1.1357817e+00   1.4491377e+00   1.4282857e+00   9.4868330e-01   8.7749644e-01   7.4161985e-01   1.2124356e+00   9.4868330e-01   1.0344080e+00   9.1651514e-01   8.6023253e-01   7.6157731e-01   7.1414284e-01   1.7291616e+00   8.3066239e-01   9.4868330e-01   8.7177979e-01   6.1644140e-01   1.8654758e+00   8.3666003e-01   2.2360680e+00   1.1224972e+00   2.0049938e+00   1.4317821e+00   1.8165902e+00   2.7676705e+00   1.4730920e+00   2.2847319e+00   1.5524175e+00   2.6134269e+00   1.3527749e+00   1.1575837e+00   1.6093477e+00   1.1180340e+00   1.4832397e+00   1.6217275e+00   1.4106736e+00   3.2109189e+00   3.0495901e+00   7.0710678e-01   1.9646883e+00   1.2165525e+00   2.8266588e+00   8.1240384e-01   1.8681542e+00   2.1047565e+00   8.1853528e-01   1.0148892e+00   1.5297059e+00   1.8303005e+00   2.1702534e+00   3.0495901e+00   1.5842980e+00   8.8317609e-01   1.2569805e+00   2.5179357e+00   1.9646883e+00   1.4525839e+00   9.9498744e-01   1.6248077e+00   1.8574176e+00   1.5779734e+00   1.1224972e+00   2.0760539e+00   2.0712315e+00   1.5132746e+00   8.7177979e-01   1.2884099e+00   1.7944358e+00   1.1789826e+00   5.1961524e-01   5.1961524e-01   7.1414284e-01   4.6904158e-01   1.2569805e+00   3.1622777e-01   1.7320508e-01   1.7320508e-01   6.4031242e-01   1.3228757e+00   2.2360680e-01   2.3727621e+00   1.2206556e+00   2.4758837e+00   1.7320508e+00   2.1236761e+00   3.3000000e+00   1.0295630e+00   2.8266588e+00   2.1447611e+00   2.8913665e+00   1.5297059e+00   1.5905974e+00   2.0099751e+00   1.2489996e+00   1.5132746e+00   1.7663522e+00   1.7378147e+00   3.5524639e+00   3.6619667e+00   1.2845233e+00   2.3000000e+00   1.0816654e+00   3.4205263e+00   1.2124356e+00   2.1213203e+00   2.5416530e+00   1.0677078e+00   1.0677078e+00   1.8894444e+00   2.3537205e+00   2.7640550e+00   3.4219877e+00   1.9339080e+00   1.2529964e+00   1.6340135e+00   3.0675723e+00   2.0273135e+00   1.6911535e+00   9.4868330e-01   2.0074860e+00   2.1633308e+00   1.9235384e+00   1.2206556e+00   2.3916521e+00   2.3021729e+00   1.8493242e+00   1.3820275e+00   1.5842980e+00   1.7916473e+00   1.1575837e+00   4.2426407e-01   9.8994949e-01   3.3166248e-01   9.3273791e-01   3.0000000e-01   5.8309519e-01   4.8989795e-01   8.6023253e-01   1.0954451e+00   3.7416574e-01   2.5922963e+00   1.3038405e+00   2.6570661e+00   1.9000000e+00   2.3021729e+00   3.4727511e+00   8.7749644e-01   2.9899833e+00   2.2203603e+00   3.1543621e+00   1.7860571e+00   1.7029386e+00   2.1977261e+00   1.2369317e+00   1.6124515e+00   1.9974984e+00   1.9364917e+00   3.8249183e+00   3.7762415e+00   1.1747340e+00   2.5179357e+00   1.1832160e+00   3.5651087e+00   1.3190906e+00   2.3685439e+00   2.7622455e+00   1.2124356e+00   1.2922848e+00   2.0248457e+00   2.5436195e+00   2.9103264e+00   3.7013511e+00   2.0663978e+00   1.4071247e+00   1.7146428e+00   3.2403703e+00   2.2847319e+00   1.9157244e+00   1.1789826e+00   2.2181073e+00   2.3600847e+00   2.1283797e+00   1.3038405e+00   2.6057628e+00   2.5317978e+00   2.0322401e+00   1.4142136e+00   1.7832555e+00   2.0639767e+00   1.3674794e+00   7.7459667e-01   5.0000000e-01   1.2609520e+00   2.6457513e-01   4.8989795e-01   4.2426407e-01   7.7459667e-01   1.4628739e+00   4.2426407e-01   2.3194827e+00   1.0392305e+00   2.4041631e+00   1.5905974e+00   2.0297783e+00   3.1968735e+00   7.9372539e-01   2.7018512e+00   1.9416488e+00   2.9103264e+00   1.5779734e+00   1.4560220e+00   1.9672316e+00   1.0246951e+00   1.4352700e+00   1.7860571e+00   1.6522712e+00   3.5454196e+00   3.5071356e+00   9.2736185e-01   2.2847319e+00   9.6953597e-01   3.2878564e+00   1.1224972e+00   2.1047565e+00   2.4839485e+00   1.0246951e+00   1.0630146e+00   1.7606817e+00   2.2737634e+00   2.6514147e+00   3.4409301e+00   1.8138357e+00   1.1224972e+00   1.3564660e+00   3.0166206e+00   2.0396078e+00   1.6217275e+00   9.6436508e-01   2.0049938e+00   2.1377558e+00   1.9773720e+00   1.0392305e+00   2.3473389e+00   2.3043437e+00   1.8574176e+00   1.2247449e+00   1.5620499e+00   1.8275667e+00   1.0816654e+00   8.0622577e-01   1.8841444e+00   7.1414284e-01   6.0000000e-01   5.8309519e-01   3.4641016e-01   1.9748418e+00   6.7823300e-01   1.8165902e+00   8.2462113e-01   1.7832555e+00   1.1000000e+00   1.4966630e+00   2.5961510e+00   1.3379088e+00   2.1213203e+00   1.4866069e+00   2.2427661e+00   9.0000000e-01   9.5916630e-01   1.3379088e+00   9.6436508e-01   1.1747340e+00   1.1958261e+00   1.0630146e+00   2.8722813e+00   2.9698485e+00   9.0553851e-01   1.6431677e+00   8.6023253e-01   2.7147744e+00   6.1644140e-01   1.4662878e+00   1.8357560e+00   5.0000000e-01   5.0000000e-01   1.2727922e+00   1.6401219e+00   2.0566964e+00   2.7349589e+00   1.3304135e+00   5.8309519e-01   1.0770330e+00   2.3706539e+00   1.4832397e+00   1.0344080e+00   4.5825757e-01   1.3341664e+00   1.5394804e+00   1.3076697e+00   8.2462113e-01   1.7406895e+00   1.6941074e+00   1.2369317e+00   8.3666003e-01   9.3808315e-01   1.2727922e+00   6.7082039e-01   1.1224972e+00   3.1622777e-01   4.5825757e-01   3.8729833e-01   5.9160798e-01   1.2288206e+00   2.6457513e-01   2.5357445e+00   1.3076697e+00   2.5039968e+00   1.8055470e+00   2.2113344e+00   3.3120990e+00   1.1489125e+00   2.8266588e+00   2.1023796e+00   3.0099834e+00   1.6431677e+00   1.5968719e+00   2.0542639e+00   1.2884099e+00   1.6401219e+00   1.9026298e+00   1.8055470e+00   3.6523965e+00   3.6373067e+00   1.1357817e+00   2.3811762e+00   1.2369317e+00   3.4029399e+00   1.1958261e+00   2.2360680e+00   2.5845696e+00   1.0954451e+00   1.1916375e+00   1.9416488e+00   2.3494680e+00   2.7386128e+00   3.5000000e+00   1.9899749e+00   1.2609520e+00   1.6401219e+00   3.0643107e+00   2.2113344e+00   1.7944358e+00   1.0954451e+00   2.0566964e+00   2.2494444e+00   1.9697716e+00   1.3076697e+00   2.4859606e+00   2.4248711e+00   1.9026298e+00   1.3228757e+00   1.6522712e+00   1.9924859e+00   1.3190906e+00   1.1916375e+00   1.3527749e+00   1.3228757e+00   1.7000000e+00   3.8729833e-01   1.2124356e+00   3.4971417e+00   2.2022716e+00   3.5874782e+00   2.8248894e+00   3.2295511e+00   4.3988635e+00   1.4071247e+00   3.9102430e+00   3.1336879e+00   4.0767634e+00   2.7018512e+00   2.6324893e+00   3.1272992e+00   2.1023796e+00   2.4677925e+00   2.9086079e+00   2.8670542e+00   4.7476310e+00   4.6936127e+00   2.0371549e+00   3.4452866e+00   2.0420578e+00   4.4833024e+00   2.2472205e+00   3.2954514e+00   3.6851052e+00   2.1400935e+00   2.2135944e+00   2.9512709e+00   3.4554305e+00   3.8288379e+00   4.6119410e+00   2.9899833e+00   2.3302360e+00   2.5980762e+00   4.1605288e+00   3.1859065e+00   2.8425341e+00   2.0928450e+00   3.1416556e+00   3.2832910e+00   3.0298515e+00   2.2022716e+00   3.5355339e+00   3.4496377e+00   2.9461840e+00   2.3302360e+00   2.7110883e+00   2.9580399e+00   2.2759613e+00   3.3166248e-01   2.2360680e-01   6.4031242e-01   1.3304135e+00   1.7320508e-01   2.3515952e+00   1.1000000e+00   2.4228083e+00   1.6552945e+00   2.0663978e+00   3.2388269e+00   8.8317609e-01   2.7549955e+00   2.0149442e+00   2.9017236e+00   1.5362291e+00   1.4866069e+00   1.9646883e+00   1.0862780e+00   1.4387495e+00   1.7606817e+00   1.6852300e+00   3.5608988e+00   3.5651087e+00   1.0440307e+00   2.2781571e+00   9.9498744e-01   3.3406586e+00   1.1090537e+00   2.1118712e+00   2.5099801e+00   9.8994949e-01   1.0392305e+00   1.8027756e+00   2.3021729e+00   2.6870058e+00   3.4394767e+00   1.8493242e+00   1.1618950e+00   1.4933185e+00   3.0182777e+00   2.0371549e+00   1.6552945e+00   9.2736185e-01   1.9824228e+00   2.1307276e+00   1.9131126e+00   1.1000000e+00   2.3622024e+00   2.2934690e+00   1.8165902e+00   1.2369317e+00   1.5459625e+00   1.8138357e+00   1.1135529e+00   1.4142136e-01   5.2915026e-01   1.4352700e+00   2.4494897e-01   2.3194827e+00   1.1832160e+00   2.3790755e+00   1.6401219e+00   2.0493902e+00   3.1906112e+00   1.1090537e+00   2.7092434e+00   2.0420578e+00   2.8124722e+00   1.4594520e+00   1.5099669e+00   1.9261360e+00   1.2369317e+00   1.5165751e+00   1.7175564e+00   1.6401219e+00   3.4481879e+00   3.5580894e+00   1.2083046e+00   2.2226111e+00   1.0862780e+00   3.3060551e+00   1.1401754e+00   2.0371549e+00   2.4269322e+00   1.0049876e+00   1.0049876e+00   1.8165902e+00   2.2293497e+00   2.6514147e+00   3.3105891e+00   1.8681542e+00   1.1401754e+00   1.5231546e+00   2.9698485e+00   1.9798990e+00   1.5968719e+00   9.0000000e-01   1.9235384e+00   2.1000000e+00   1.8627936e+00   1.1832160e+00   2.3130067e+00   2.2427661e+00   1.7916473e+00   1.3190906e+00   1.5099669e+00   1.7492856e+00   1.1000000e+00   5.0990195e-01   1.4142136e+00   1.4142136e-01   2.2803509e+00   1.1045361e+00   2.3452079e+00   1.6031220e+00   2.0049938e+00   3.1654384e+00   1.0246951e+00   2.6870058e+00   1.9924859e+00   2.7910571e+00   1.4247807e+00   1.4491377e+00   1.8841444e+00   1.1357817e+00   1.4282857e+00   1.6703293e+00   1.6093477e+00   3.4452866e+00   3.5185224e+00   1.1224972e+00   2.1863211e+00   1.0000000e+00   3.2787193e+00   1.0677078e+00   2.0124612e+00   2.4145393e+00   9.3273791e-01   9.5393920e-01   1.7606817e+00   2.2158520e+00   2.6210685e+00   3.3136083e+00   1.8083141e+00   1.1045361e+00   1.4899664e+00   2.9359837e+00   1.9442222e+00   1.5716234e+00   8.4261498e-01   1.8867962e+00   2.0518285e+00   1.8138357e+00   1.1045361e+00   2.2781571e+00   2.2022716e+00   1.7349352e+00   1.2328828e+00   1.4628739e+00   1.7146428e+00   1.0535654e+00   1.7606817e+00   5.4772256e-01   2.1213203e+00   1.0954451e+00   2.0049938e+00   1.3964240e+00   1.7776389e+00   2.8106939e+00   1.4317821e+00   2.3366643e+00   1.7058722e+00   2.4839485e+00   1.1445523e+00   1.2000000e+00   1.5652476e+00   1.1789826e+00   1.4212670e+00   1.4594520e+00   1.3379088e+00   3.1032241e+00   3.1780497e+00   1.0295630e+00   1.8814888e+00   1.1045361e+00   2.9171904e+00   8.1240384e-01   1.7349352e+00   2.0566964e+00   7.1414284e-01   7.9372539e-01   1.5427249e+00   1.8303005e+00   2.2472205e+00   2.9325757e+00   1.5968719e+00   8.3666003e-01   1.3416408e+00   2.5495098e+00   1.7776389e+00   1.3304135e+00   7.4161985e-01   1.5427249e+00   1.7860571e+00   1.4730920e+00   1.0954451e+00   2.0024984e+00   1.9519221e+00   1.4387495e+00   1.0099505e+00   1.1832160e+00   1.5684387e+00   9.9498744e-01   1.3038405e+00   3.6110940e+00   2.3622024e+00   3.6959437e+00   2.9748950e+00   3.3555923e+00   4.5232732e+00   1.6278821e+00   4.0472213e+00   3.3000000e+00   4.1460825e+00   2.7694765e+00   2.7676705e+00   3.2233523e+00   2.2737634e+00   2.5845696e+00   2.9849623e+00   2.9916551e+00   4.8321838e+00   4.8394215e+00   2.2494444e+00   3.5298725e+00   2.1817424e+00   4.6206060e+00   2.3622024e+00   3.3896903e+00   3.7934153e+00   2.2427661e+00   2.3130067e+00   3.0886890e+00   3.5707142e+00   3.9534795e+00   4.6797436e+00   3.1224990e+00   2.4698178e+00   2.8035692e+00   4.2497059e+00   3.2710854e+00   2.9647934e+00   2.1886069e+00   3.2186954e+00   3.3719431e+00   3.0740852e+00   2.3622024e+00   3.6373067e+00   3.5284558e+00   3.0149627e+00   2.4657656e+00   2.8035692e+00   3.0364453e+00   2.4062419e+00   2.3790755e+00   1.1747340e+00   2.4248711e+00   1.6941074e+00   2.0928450e+00   3.2465366e+00   1.0246951e+00   2.7676705e+00   2.0566964e+00   2.8861739e+00   1.5132746e+00   1.5165751e+00   1.9621417e+00   1.1789826e+00   1.4899664e+00   1.7578396e+00   1.7000000e+00   3.5454196e+00   3.5888717e+00   1.1401754e+00   2.2715633e+00   1.0677078e+00   3.3541020e+00   1.1224972e+00   2.1095023e+00   2.5039968e+00   9.9498744e-01   1.0440307e+00   1.8384776e+00   2.2956481e+00   2.6925824e+00   3.4088121e+00   1.8841444e+00   1.1832160e+00   1.5684387e+00   3.0066593e+00   2.0445048e+00   1.6703293e+00   9.3273791e-01   1.9646883e+00   2.1330729e+00   1.8788294e+00   1.1747340e+00   2.3685439e+00   2.2912878e+00   1.8027756e+00   1.2727922e+00   1.5427249e+00   1.8165902e+00   1.1532563e+00   1.3341664e+00   9.4868330e-01   9.0000000e-01   5.0990195e-01   1.5165751e+00   2.3430749e+00   1.3190906e+00   1.1532563e+00   9.5393920e-01   1.0535654e+00   1.1045361e+00   8.6602540e-01   1.5000000e+00   1.1489125e+00   7.4161985e-01   9.3273791e-01   1.6703293e+00   1.8165902e+00   1.8165902e+00   7.0710678e-01   1.4832397e+00   1.7175564e+00   1.4352700e+00   6.4031242e-01   1.1445523e+00   1.4798649e+00   1.3527749e+00   7.6157731e-01   1.3228757e+00   1.3527749e+00   1.7944358e+00   7.1414284e-01   1.4352700e+00   1.3784049e+00   1.4491377e+00   4.2426407e-01   8.8881944e-01   1.4525839e+00   9.5916630e-01   6.0827625e-01   1.1180340e+00   1.3341664e+00   5.5677644e-01   5.0000000e-01   9.6436508e-01   1.4142136e+00   1.0099505e+00   6.4807407e-01   1.2449900e+00   1.5684387e+00   7.4161985e-01   1.0770330e+00   2.3706539e+00   1.1180340e+00   1.9339080e+00   1.1618950e+00   2.0322401e+00   8.6602540e-01   6.3245553e-01   1.1357817e+00   2.6457513e-01   5.0990195e-01   9.0000000e-01   8.6602540e-01   2.7331301e+00   2.6495283e+00   6.7823300e-01   1.4071247e+00   3.1622777e-01   2.4879711e+00   5.4772256e-01   1.2529964e+00   1.7406895e+00   5.1961524e-01   4.7958315e-01   8.1240384e-01   1.6217275e+00   1.8894444e+00   2.7055499e+00   8.4261498e-01   6.4807407e-01   7.7459667e-01   2.2045408e+00   1.1135529e+00   8.3066239e-01   4.7958315e-01   1.2247449e+00   1.2124356e+00   1.2369317e+00   0.0000000e+00   1.4317821e+00   1.3747727e+00   1.0344080e+00   5.4772256e-01   7.7459667e-01   9.4868330e-01   3.3166248e-01   9.1104336e-01   6.1644140e-01   8.6023253e-01   2.6851443e+00   5.4772256e-01   7.1414284e-01   7.5498344e-01   1.0246951e+00   9.8994949e-01   5.0000000e-01   1.7406895e+00   1.5684387e+00   9.6436508e-01   7.8102497e-01   1.2845233e+00   1.2489996e+00   1.7378147e+00   4.0000000e-01   1.8165902e+00   1.0246951e+00   1.3490738e+00   5.3851648e-01   3.8729833e-01   1.4662878e+00   1.4456832e+00   7.8740079e-01   5.1961524e-01   4.5825757e-01   1.2409674e+00   7.9372539e-01   1.2961481e+00   1.3190906e+00   6.6332496e-01   9.8994949e-01   8.6602540e-01   1.5842980e+00   5.4772256e-01   5.9160798e-01   8.5440037e-01   1.5684387e+00   4.1231056e-01   6.7082039e-01   8.3066239e-01   1.3190906e+00   9.2736185e-01   1.1224972e+00   1.4730920e+00   5.0000000e-01   1.6703293e+00   1.8275667e+00   1.2206556e+00   6.0000000e-01   1.4282857e+00   6.4807407e-01   3.8729833e-01   6.0000000e-01   9.5916630e-01   9.3273791e-01   6.6332496e-01   2.4494897e-01   2.0346990e+00   1.9974984e+00   1.0148892e+00   8.4261498e-01   1.0148892e+00   1.7944358e+00   7.2801099e-01   6.4807407e-01   1.0295630e+00   8.1240384e-01   7.3484692e-01   3.3166248e-01   9.4868330e-01   1.2165525e+00   2.0124612e+00   4.2426407e-01   5.9160798e-01   5.3851648e-01   1.5716234e+00   7.8102497e-01   2.4494897e-01   8.6023253e-01   7.2801099e-01   7.4833148e-01   9.4868330e-01   7.4161985e-01   8.2462113e-01   9.0553851e-01   7.6157731e-01   7.2801099e-01   5.0000000e-01   7.4161985e-01   6.4807407e-01   1.3638182e+00   2.1794495e+00   1.0295630e+00   6.7082039e-01   1.0148892e+00   7.5498344e-01   6.6332496e-01   4.3588989e-01   1.2529964e+00   1.0295630e+00   5.5677644e-01   5.0000000e-01   1.7000000e+00   1.6792856e+00   1.4212670e+00   4.6904158e-01   1.3038405e+00   1.5264338e+00   1.0488088e+00   3.8729833e-01   8.5440037e-01   1.1357817e+00   1.0630146e+00   3.1622777e-01   9.2195445e-01   1.0148892e+00   1.7320508e+00   3.0000000e-01   1.0295630e+00   1.0000000e+00   1.2409674e+00   5.2915026e-01   5.1961524e-01   1.1874342e+00   5.8309519e-01   3.6055513e-01   8.1853528e-01   1.0770330e+00   3.8729833e-01   4.7958315e-01   6.4031242e-01   1.0099505e+00   6.3245553e-01   6.4807407e-01   1.0049876e+00   3.4799425e+00   5.2915026e-01   1.3379088e+00   9.6436508e-01   1.8734994e+00   1.8055470e+00   1.3601471e+00   2.5357445e+00   2.3706539e+00   1.7916473e+00   1.5842980e+00   8.1853528e-01   5.4772256e-01   2.4738634e+00   1.1747340e+00   2.6343880e+00   2.6457513e-01   2.1817424e+00   1.3076697e+00   8.0622577e-01   2.3086793e+00   2.2869193e+00   1.5748016e+00   1.0246951e+00   6.0827625e-01   8.8317609e-01   1.5779734e+00   2.0832667e+00   1.9748418e+00   5.4772256e-01   1.7146428e+00   1.6583124e+00   2.4269322e+00   1.3928388e+00   1.3820275e+00   1.6703293e+00   2.3706539e+00   1.1000000e+00   1.3674794e+00   1.6763055e+00   2.1307276e+00   1.7832555e+00   1.8973666e+00   2.2869193e+00   3.0282008e+00   2.2226111e+00   3.1144823e+00   1.8708287e+00   1.7233688e+00   2.2405357e+00   9.8994949e-01   1.3228757e+00   1.9339080e+00   1.9544820e+00   3.8236109e+00   3.7376463e+00   1.2609520e+00   2.5079872e+00   9.1104336e-01   3.5860842e+00   1.4730920e+00   2.3409400e+00   2.8354894e+00   1.3711309e+00   1.3638182e+00   1.9261360e+00   2.6907248e+00   2.9899833e+00   3.7934153e+00   1.9493589e+00   1.5652476e+00   1.6583124e+00   3.3181320e+00   2.1142375e+00   1.9026298e+00   1.2489996e+00   2.3086793e+00   2.3021729e+00   2.2538855e+00   1.1180340e+00   2.5337719e+00   2.4413111e+00   2.0832667e+00   1.5000000e+00   1.8411953e+00   1.9157244e+00   1.2727922e+00   8.7749644e-01   1.0148892e+00   1.4866069e+00   1.3638182e+00   9.9498744e-01   2.1095023e+00   2.0149442e+00   1.4662878e+00   1.1357817e+00   1.1357817e+00   9.2736185e-01   1.9899749e+00   9.2736185e-01   2.2135944e+00   6.0827625e-01   1.7320508e+00   9.8488578e-01   4.3588989e-01   1.8627936e+00   1.8466185e+00   1.1832160e+00   5.5677644e-01   2.6457513e-01   1.1045361e+00   1.2124356e+00   1.5937377e+00   1.4764823e+00   6.7823300e-01   1.4491377e+00   1.2206556e+00   1.9874607e+00   1.0488088e+00   1.1180340e+00   1.3747727e+00   1.9339080e+00   8.6602540e-01   1.1704700e+00   1.3527749e+00   1.6911535e+00   1.3784049e+00   1.5874508e+00   1.8466185e+00   1.4282857e+00   1.0295630e+00   6.2449980e-01   6.6332496e-01   1.2961481e+00   1.3228757e+00   1.0392305e+00   6.1644140e-01   1.9131126e+00   1.5716234e+00   1.1445523e+00   8.8881944e-01   1.4662878e+00   1.3928388e+00   1.0049876e+00   8.6023253e-01   8.8317609e-01   1.1575837e+00   1.1916375e+00   5.5677644e-01   7.3484692e-01   8.2462113e-01   1.8788294e+00   6.1644140e-01   9.1104336e-01   7.5498344e-01   1.2609520e+00   1.1704700e+00   7.3484692e-01   1.3190906e+00   8.0622577e-01   8.7177979e-01   1.0677078e+00   1.1618950e+00   8.7177979e-01   1.0677078e+00   9.2736185e-01   9.0000000e-01   8.3066239e-01   1.2124356e+00   1.1747340e+00   1.3784049e+00   1.5652476e+00   1.0198039e+00   2.2181073e+00   1.9000000e+00   1.2165525e+00   1.3038405e+00   8.6023253e-01   1.3892444e+00   2.3685439e+00   6.7082039e-01   2.2113344e+00   1.2247449e+00   1.8841444e+00   8.1240384e-01   8.1240384e-01   1.9544820e+00   1.8708287e+00   1.3000000e+00   1.1224972e+00   1.0198039e+00   9.3273791e-01   1.2727922e+00   1.8574176e+00   1.9157244e+00   8.0622577e-01   1.0535654e+00   1.3190906e+00   1.9949937e+00   9.9498744e-01   8.7177979e-01   1.1747340e+00   2.0322401e+00   6.3245553e-01   7.0710678e-01   1.2083046e+00   1.8947295e+00   1.3820275e+00   1.2529964e+00   1.8814888e+00   5.5677644e-01   5.4772256e-01   1.0677078e+00   9.0000000e-01   3.7416574e-01   4.8989795e-01   2.0976177e+00   2.2649503e+00   1.2288206e+00   7.8102497e-01   1.0049876e+00   2.0396078e+00   6.0827625e-01   6.4807407e-01   1.1575837e+00   6.1644140e-01   5.2915026e-01   6.5574385e-01   1.0862780e+00   1.4071247e+00   2.0024984e+00   6.7823300e-01   6.7082039e-01   1.0630146e+00   1.6031220e+00   7.0000000e-01   4.6904158e-01   6.4807407e-01   5.1961524e-01   6.7823300e-01   5.0990195e-01   8.6602540e-01   9.0553851e-01   8.1240384e-01   4.2426407e-01   7.4161985e-01   2.2360680e-01   5.5677644e-01   6.6332496e-01   5.7445626e-01   7.9372539e-01   8.1240384e-01   6.4031242e-01   3.8729833e-01   2.2248595e+00   2.1023796e+00   8.1240384e-01   9.0553851e-01   9.0553851e-01   1.9157244e+00   4.2426407e-01   8.0622577e-01   1.1789826e+00   5.5677644e-01   5.9160798e-01   3.7416574e-01   1.0344080e+00   1.2845233e+00   2.1633308e+00   4.3588989e-01   4.6904158e-01   6.6332496e-01   1.6062378e+00   9.1651514e-01   4.5825757e-01   7.1414284e-01   6.7823300e-01   7.6811457e-01   7.8102497e-01   6.3245553e-01   9.6436508e-01   9.8488578e-01   5.9160798e-01   3.7416574e-01   3.4641016e-01   8.3666003e-01   6.2449980e-01   1.3114877e+00   1.1357817e+00   5.2915026e-01   4.2426407e-01   1.7029386e+00   1.7233688e+00   1.3747727e+00   3.6055513e-01   1.3601471e+00   1.5165751e+00   8.8881944e-01   3.7416574e-01   7.3484692e-01   9.8994949e-01   9.6953597e-01   4.5825757e-01   7.0710678e-01   8.9442719e-01   1.6340135e+00   4.6904158e-01   9.0000000e-01   1.0723805e+00   1.1000000e+00   7.1414284e-01   5.0990195e-01   1.1045361e+00   1.7320508e-01   3.4641016e-01   4.6904158e-01   1.1357817e+00   4.8989795e-01   5.4772256e-01   3.7416574e-01   8.8881944e-01   4.3588989e-01   7.5498344e-01   1.0295630e+00   5.1961524e-01   1.0770330e+00   1.0862780e+00   2.9359837e+00   2.7766887e+00   6.5574385e-01   1.5842980e+00   3.3166248e-01   2.6419690e+00   6.7082039e-01   1.4628739e+00   1.9442222e+00   6.4807407e-01   6.7823300e-01   9.7467943e-01   1.8165902e+00   2.0493902e+00   2.9137605e+00   9.8994949e-01   8.4261498e-01   9.4339811e-01   2.3558438e+00   1.3000000e+00   1.0677078e+00   6.4807407e-01   1.4035669e+00   1.3711309e+00   1.3784049e+00   2.6457513e-01   1.6124515e+00   1.5427249e+00   1.1747340e+00   6.0827625e-01   9.6436508e-01   1.1445523e+00   5.8309519e-01   7.5498344e-01   1.0246951e+00   2.6851443e+00   2.6267851e+00   1.1045361e+00   1.3190906e+00   4.8989795e-01   2.5159491e+00   8.1240384e-01   1.2288206e+00   1.8138357e+00   7.8102497e-01   7.2801099e-01   8.3666003e-01   1.7691806e+00   1.9519221e+00   2.6944387e+00   8.0622577e-01   1.0295630e+00   1.1747340e+00   2.1587033e+00   9.2736185e-01   9.8488578e-01   7.2801099e-01   1.2165525e+00   1.0723805e+00   1.1445523e+00   5.0990195e-01   1.3453624e+00   1.1958261e+00   9.3273791e-01   7.7459667e-01   8.3666003e-01   7.8740079e-01   6.4031242e-01   5.8309519e-01   2.0049938e+00   2.1470911e+00   1.3747727e+00   6.4031242e-01   1.0246951e+00   1.9748418e+00   8.1853528e-01   5.4772256e-01   1.1747340e+00   8.3666003e-01   7.3484692e-01   5.3851648e-01   1.1916375e+00   1.4000000e+00   1.9773720e+00   5.0990195e-01   9.2195445e-01   1.1618950e+00   1.5394804e+00   3.8729833e-01   5.4772256e-01   8.3666003e-01   5.5677644e-01   4.4721360e-01   5.4772256e-01   9.0000000e-01   7.2111026e-01   5.4772256e-01   3.7416574e-01   8.6602540e-01   3.8729833e-01   3.0000000e-01   7.6157731e-01   1.9183326e+00   1.9519221e+00   1.1090537e+00   7.0000000e-01   1.1180340e+00   1.7204651e+00   7.0000000e-01   5.0990195e-01   8.8317609e-01   7.8740079e-01   7.2111026e-01   3.8729833e-01   7.8740079e-01   1.1045361e+00   1.8574176e+00   4.6904158e-01   5.7445626e-01   7.0000000e-01   1.4317821e+00   7.5498344e-01   1.4142136e-01   8.6023253e-01   5.1961524e-01   6.4807407e-01   7.6157731e-01   8.6602540e-01   7.3484692e-01   8.1240384e-01   6.1644140e-01   7.4161985e-01   3.6055513e-01   7.1414284e-01   7.2111026e-01   1.2206556e+00   2.9715316e+00   1.4177447e+00   2.9478806e+00   1.0198039e+00   2.5632011e+00   1.5033296e+00   1.1224972e+00   2.6495283e+00   2.5690465e+00   1.9773720e+00   1.4352700e+00   1.2409674e+00   4.1231056e-01   1.9748418e+00   2.4515301e+00   2.4186773e+00   1.0049876e+00   1.8357560e+00   1.9442222e+00   2.7018512e+00   1.6822604e+00   1.6552945e+00   1.9235384e+00   2.7331301e+00   1.3490738e+00   1.5297059e+00   1.9748418e+00   2.5748786e+00   2.0904545e+00   2.0273135e+00   2.5690465e+00   2.7018512e+00   1.5620499e+00   2.9223278e+00   4.1231056e-01   2.4939928e+00   1.7233688e+00   1.2922848e+00   2.6362853e+00   2.6400758e+00   1.8601075e+00   1.4525839e+00   9.6436508e-01   1.3490738e+00   1.8520259e+00   2.4248711e+00   2.2494444e+00   8.9442719e-01   2.0736441e+00   2.0371549e+00   2.7766887e+00   1.7832555e+00   1.7175564e+00   2.0322401e+00   2.6495283e+00   1.4730920e+00   1.7233688e+00   2.0124612e+00   2.3958297e+00   2.1400935e+00   2.2671568e+00   2.6248809e+00   1.7146428e+00   8.8317609e-01   2.5278449e+00   6.6332496e-01   1.5968719e+00   1.8788294e+00   7.2801099e-01   8.6602540e-01   1.1135529e+00   1.6522712e+00   1.9209373e+00   2.8948230e+00   1.1704700e+00   6.7823300e-01   7.3484692e-01   2.3194827e+00   1.6431677e+00   1.1445523e+00   8.7749644e-01   1.4628739e+00   1.5716234e+00   1.5066519e+00   6.7823300e-01   1.7578396e+00   1.7860571e+00   1.3453624e+00   5.8309519e-01   1.0862780e+00   1.5099669e+00   8.6602540e-01   1.6062378e+00   1.3747727e+00   1.2247449e+00   3.0000000e-01   6.5574385e-01   1.3076697e+00   1.2529964e+00   6.7823300e-01   7.9372539e-01   8.5440037e-01   1.3928388e+00   6.5574385e-01   1.2328828e+00   1.3490738e+00   9.1651514e-01   6.4807407e-01   7.4161985e-01   1.3820275e+00   3.7416574e-01   2.6457513e-01   6.0827625e-01   1.4071247e+00   2.2360680e-01   3.0000000e-01   5.7445626e-01   1.2247449e+00   7.3484692e-01   7.8740079e-01   1.2845233e+00   2.7658633e+00   7.3484692e-01   1.4525839e+00   1.9924859e+00   6.4031242e-01   5.7445626e-01   1.0677078e+00   1.8894444e+00   2.1656408e+00   2.9223278e+00   1.0816654e+00   8.8317609e-01   1.0677078e+00   2.4454039e+00   1.2247449e+00   1.0630146e+00   5.0000000e-01   1.4282857e+00   1.3964240e+00   1.3820275e+00   3.1622777e-01   1.6401219e+00   1.5329710e+00   1.1958261e+00   7.7459667e-01   9.6953597e-01   1.0295630e+00   4.5825757e-01   2.2912878e+00   1.5033296e+00   9.6953597e-01   2.4289916e+00   2.4248711e+00   1.7058722e+00   1.1224972e+00   6.7823300e-01   1.0630146e+00   1.7146428e+00   2.1840330e+00   2.0420578e+00   7.0000000e-01   1.9209373e+00   1.8055470e+00   2.5651511e+00   1.5588457e+00   1.5684387e+00   1.8384776e+00   2.4879711e+00   1.3038405e+00   1.5811388e+00   1.8384776e+00   2.2248595e+00   1.9313208e+00   2.0952327e+00   2.4248711e+00   1.1180340e+00   1.5066519e+00   1.7320508e-01   3.6055513e-01   7.7459667e-01   1.3228757e+00   1.6340135e+00   2.4617067e+00   8.1853528e-01   3.7416574e-01   8.3666003e-01   1.9339080e+00   1.1575837e+00   7.2801099e-01   4.3588989e-01   9.2736185e-01   1.0816654e+00   9.0000000e-01   5.4772256e-01   1.3228757e+00   1.2845233e+00   7.6811457e-01   2.4494897e-01   5.0990195e-01   1.0000000e+00   5.3851648e-01   6.6332496e-01   1.1832160e+00   1.0862780e+00   5.9160798e-01   7.7459667e-01   9.6953597e-01   1.4798649e+00   6.0000000e-01   1.0630146e+00   1.1618950e+00   1.1357817e+00   5.1961524e-01   5.0990195e-01   1.2165525e+00   4.1231056e-01   3.7416574e-01   6.9282032e-01   1.2529964e+00   3.1622777e-01   4.0000000e-01   6.1644140e-01   1.1532563e+00   6.2449980e-01   6.2449980e-01   1.0862780e+00   1.6124515e+00   1.5684387e+00   1.0246951e+00   3.4641016e-01   4.6904158e-01   1.0246951e+00   1.0583005e+00   1.3674794e+00   1.3747727e+00   7.4161985e-01   1.1704700e+00   9.4868330e-01   1.7088007e+00   7.4161985e-01   8.8317609e-01   1.0770330e+00   1.7406895e+00   6.4807407e-01   9.1651514e-01   1.0862780e+00   1.5198684e+00   1.1000000e+00   1.2845233e+00   1.5937377e+00   2.4494897e-01   8.7749644e-01   1.4422205e+00   1.7720045e+00   2.5475478e+00   9.1651514e-01   4.3588989e-01   9.2195445e-01   2.0566964e+00   1.1704700e+00   7.8740079e-01   2.8284271e-01   1.0148892e+00   1.1575837e+00   9.5916630e-01   5.1961524e-01   1.4071247e+00   1.3416408e+00   8.3666003e-01   3.8729833e-01   5.7445626e-01   9.8488578e-01   4.6904158e-01   8.4261498e-01   1.4352700e+00   1.7832555e+00   2.4839485e+00   8.8317609e-01   4.5825757e-01   9.0000000e-01   2.0615528e+00   1.0246951e+00   6.7823300e-01   1.4142136e-01   9.9498744e-01   1.1045361e+00   9.6953597e-01   4.7958315e-01   1.3341664e+00   1.2569805e+00   8.3666003e-01   5.5677644e-01   5.3851648e-01   8.1853528e-01   2.8284271e-01   9.8488578e-01   1.1357817e+00   1.9748418e+00   1.0000000e-01   7.8740079e-01   7.8740079e-01   1.4212670e+00   6.7823300e-01   4.3588989e-01   9.6436508e-01   6.1644140e-01   5.1961524e-01   7.9372539e-01   8.1240384e-01   6.7082039e-01   7.1414284e-01   5.7445626e-01   7.0710678e-01   4.6904158e-01   6.9282032e-01   7.9372539e-01   5.0990195e-01   1.2845233e+00   1.0392305e+00   1.1618950e+00   1.2041595e+00   9.1104336e-01   1.2845233e+00   8.8317609e-01   1.5748016e+00   7.1414284e-01   9.6953597e-01   1.0392305e+00   1.6217275e+00   8.3666003e-01   1.0770330e+00   1.0488088e+00   1.3379088e+00   1.0049876e+00   1.3453624e+00   1.4899664e+00   1.1618950e+00   1.1575837e+00   1.5394804e+00   1.4933185e+00   5.3851648e-01   1.4387495e+00   1.2083046e+00   1.9235384e+00   9.3273791e-01   1.0392305e+00   1.2247449e+00   1.8894444e+00   8.4852814e-01   1.1224972e+00   1.2247449e+00   1.5842980e+00   1.2922848e+00   1.5652476e+00   1.8165902e+00   1.9824228e+00   2.3452079e+00   2.3832751e+00   9.2736185e-01   1.8761663e+00   1.8947295e+00   2.6172505e+00   1.5811388e+00   1.6522712e+00   1.8083141e+00   2.7055499e+00   1.3820275e+00   1.5588457e+00   1.9000000e+00   2.4939928e+00   2.0099751e+00   2.0346990e+00   2.5238859e+00   8.6602540e-01   8.7749644e-01   1.4106736e+00   6.4031242e-01   5.0990195e-01   1.0000000e+00   6.2449980e-01   4.6904158e-01   7.7459667e-01   8.4261498e-01   6.4807407e-01   6.6332496e-01   5.4772256e-01   7.4161985e-01   5.0000000e-01   6.7082039e-01   8.3666003e-01   5.8309519e-01   1.9078784e+00   1.1916375e+00   5.9160798e-01   5.5677644e-01   9.4868330e-01   1.1445523e+00   1.0440307e+00   6.4807407e-01   1.3000000e+00   1.3304135e+00   9.2195445e-01   5.0990195e-01   5.8309519e-01   1.0488088e+00   5.3851648e-01   1.9442222e+00   1.2961481e+00   7.1414284e-01   9.8488578e-01   1.1916375e+00   1.2688578e+00   1.3964240e+00   7.7459667e-01   1.3228757e+00   1.4387495e+00   1.2206556e+00   8.1240384e-01   9.1651514e-01   1.2247449e+00   7.8102497e-01   1.5427249e+00   1.5198684e+00   2.1977261e+00   1.0862780e+00   1.1269428e+00   1.2845233e+00   2.2045408e+00   9.4339811e-01   1.1357817e+00   1.3453624e+00   1.8920888e+00   1.5297059e+00   1.7029386e+00   2.1189620e+00   6.8556546e-01   1.1180340e+00   7.6157731e-01   5.0000000e-01   8.4261498e-01   1.1135529e+00   6.2449980e-01   4.3588989e-01   7.0000000e-01   1.1916375e+00   7.2111026e-01   2.4494897e-01   9.6436508e-01   8.1240384e-01   5.9160798e-01   6.7823300e-01   8.1240384e-01   8.3066239e-01   7.6157731e-01   8.1240384e-01   6.6332496e-01   7.9372539e-01   3.8729833e-01   6.2449980e-01   6.4807407e-01   1.1269428e+00   1.2247449e+00   1.0770330e+00   4.7958315e-01   1.4628739e+00   1.3711309e+00   9.4868330e-01   6.2449980e-01   6.7082039e-01   9.0000000e-01   3.1622777e-01   4.1231056e-01   3.6055513e-01   1.2247449e+00   5.5677644e-01   5.7445626e-01   3.6055513e-01   9.5916630e-01   4.6904158e-01   7.8740079e-01   1.0908712e+00   5.4772256e-01   1.2124356e+00   3.4641016e-01   2.4494897e-01   4.2426407e-01   1.0630146e+00   6.0827625e-01   6.2449980e-01   1.1224972e+00   1.2369317e+00   8.1240384e-01   6.9282032e-01   2.4494897e-01   9.4339811e-01   5.1961524e-01   8.1853528e-01   1.1224972e+00   1.4317821e+00   1.3747727e+00   1.0344080e+00   5.4772256e-01   7.7459667e-01   9.4868330e-01   3.3166248e-01   3.1622777e-01   7.3484692e-01   1.3076697e+00   8.4261498e-01   8.0622577e-01   1.3190906e+00   6.1644140e-01   1.2845233e+00   7.9372539e-01   6.2449980e-01   1.2569805e+00   7.8102497e-01   3.6055513e-01   6.7082039e-01   9.4868330e-01   5.8309519e-01   1.0677078e+00   6.5574385e-01   6.1644140e-01   6.4031242e-01   7.6811457e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-euclidean-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-euclidean-ml.txt
    deleted file mode 100644
    index 1b7552021b..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-euclidean-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   4.0515260e+00   4.2121458e+00   3.7357405e+00   4.2313317e+00   3.9136009e+00   4.3843298e+00   3.9811426e+00   4.3624182e+00   4.0642508e+00   4.2105933e+00   4.0747226e+00   3.9068586e+00   4.1637004e+00   4.4303203e+00   4.1841564e+00   4.1063279e+00   4.1862390e+00   4.0719925e+00   4.2227579e+00   4.3173531e+00   3.8811067e+00   3.7577567e+00   4.0623722e+00   3.9882453e+00   4.0432671e+00   3.9085109e+00   4.0283414e+00   4.0846110e+00   3.6459235e+00   3.9544001e+00   4.1134244e+00   4.1805752e+00   3.5121011e+00   4.2747789e+00   4.1048323e+00   3.9269426e+00   3.8932032e+00   3.8281172e+00   3.7288430e+00   4.0863477e+00   4.1527428e+00   4.1646409e+00   4.2027433e+00   3.8441594e+00   4.8419117e+00   4.2455384e+00   3.7622220e+00   4.3967923e+00   4.4663183e+00   4.0435853e+00   4.0421692e+00   4.3124625e+00   4.6499961e+00   4.5595743e+00   3.4230430e+00   4.2612266e+00   3.5676603e+00   4.0866580e+00   4.2307103e+00   3.8521940e+00   3.9951183e+00   3.1022409e+00   3.7290193e+00   4.1931517e+00   4.1127027e+00   3.6633651e+00   4.0235815e+00   3.9729858e+00   4.1980132e+00   4.1579993e+00   3.9948955e+00   3.9081966e+00   3.9031152e+00   3.5069036e+00   4.0015727e+00   3.6763496e+00   3.6614339e+00   3.6227109e+00   3.7357992e+00   4.0170026e+00   3.5216829e+00   3.9322227e+00   3.9094621e+00   4.0170286e+00   4.3264246e+00   4.3435483e+00   4.0788635e+00   4.4761765e+00   3.8468186e+00   4.1490333e+00   4.2800007e+00   4.2260191e+00   4.3031858e+00   4.1897413e+00   4.0530244e+00   3.5893641e+00   4.2186615e+00   3.7979503e+00   4.0915473e+00   4.1343073e+00   4.5063851e+00   3.6394889e+00   4.2508448e+00   3.7160826e+00   4.0105262e+00   4.1578269e+00   4.0290590e+00   3.6971819e+00   3.9414087e+00   4.2522313e+00   4.4091714e+00   4.1542292e+00   3.9594691e+00   4.0923600e+00   4.0855497e+00   3.8253075e+00   4.3034717e+00   4.0976731e+00   4.1316523e+00   4.0872717e+00   4.2643353e+00   3.8887280e+00   3.9411273e+00   3.8848001e+00   4.3481996e+00   3.8716733e+00   3.9084684e+00   3.7546361e+00   3.9354816e+00   3.8293694e+00   3.7568515e+00   3.7184961e+00   3.8404278e+00   4.2570811e+00   4.1423777e+00   4.0291411e+00   4.2094682e+00   3.6127418e+00   4.0459839e+00   3.7737985e+00   3.7647653e+00   3.9762006e+00   3.8999512e+00   3.8509090e+00   3.8975941e+00   3.8432839e+00   4.2109046e+00   4.1339124e+00   3.5898873e+00   4.0794519e+00   4.3504966e+00   3.8862612e+00   3.8332931e+00   4.2190310e+00   4.1366595e+00   3.7220268e+00   4.1250795e+00   3.3169452e+00   4.0757181e+00   3.6487114e+00   3.9513724e+00   4.0735549e+00   3.9137880e+00   3.9656942e+00   3.7724953e+00   4.0505153e+00   3.9062302e+00   4.5671852e+00   3.7542175e+00   4.3731708e+00   3.6733907e+00   4.4667545e+00   4.1004635e+00   4.0530038e+00   4.0346958e+00   4.2145752e+00   4.4298637e+00   4.2982360e+00   4.0878239e+00   4.4061563e+00   4.2115971e+00   3.8263277e+00   3.8603258e+00   3.8572375e+00   4.1051910e+00   4.3787786e+00   4.5309659e+00   4.0047055e+00   4.1308854e+00   3.6283561e+00
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-hamming-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-hamming-ml.txt
    deleted file mode 100644
    index bc4e1ddcb6..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-hamming-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   4.6000000e-01   4.3000000e-01   4.3000000e-01   5.4000000e-01   4.1000000e-01   5.3000000e-01   4.3000000e-01   5.9000000e-01   4.8000000e-01   4.7000000e-01   4.6000000e-01   4.9000000e-01   4.5000000e-01   5.5000000e-01   5.3000000e-01   4.5000000e-01   4.8000000e-01   4.7000000e-01   4.8000000e-01   5.1000000e-01   4.9000000e-01   4.4000000e-01   4.9000000e-01   4.7000000e-01   4.9000000e-01   4.7000000e-01   5.2000000e-01   4.7000000e-01   4.2000000e-01   4.9000000e-01   4.7000000e-01   5.5000000e-01   3.9000000e-01   5.5000000e-01   4.6000000e-01   4.5000000e-01   4.0000000e-01   4.8000000e-01   4.5000000e-01   4.8000000e-01   4.8000000e-01   5.0000000e-01   4.8000000e-01   4.5000000e-01   6.4000000e-01   5.7000000e-01   4.6000000e-01   5.4000000e-01   5.6000000e-01   4.8000000e-01   4.8000000e-01   5.3000000e-01   5.4000000e-01   5.3000000e-01   4.5000000e-01   5.8000000e-01   4.2000000e-01   5.4000000e-01   6.0000000e-01   5.1000000e-01   4.6000000e-01   4.1000000e-01   4.4000000e-01   5.6000000e-01   5.4000000e-01   4.8000000e-01   4.8000000e-01   5.1000000e-01   5.2000000e-01   5.5000000e-01   4.5000000e-01   4.3000000e-01   4.7000000e-01   4.7000000e-01   5.6000000e-01   4.9000000e-01   4.8000000e-01   4.5000000e-01   4.9000000e-01   4.7000000e-01   4.5000000e-01   4.5000000e-01   5.6000000e-01   4.9000000e-01   5.8000000e-01   5.4000000e-01   4.6000000e-01   5.8000000e-01   5.3000000e-01   5.4000000e-01   5.5000000e-01   5.0000000e-01   5.2000000e-01   4.8000000e-01   5.0000000e-01   3.8000000e-01   5.3000000e-01   4.8000000e-01   5.1000000e-01   4.8000000e-01   5.2000000e-01   4.7000000e-01   5.0000000e-01   4.3000000e-01   4.8000000e-01   5.2000000e-01   5.0000000e-01   4.2000000e-01   4.2000000e-01   4.7000000e-01   5.4000000e-01   5.1000000e-01   5.4000000e-01   5.1000000e-01   4.8000000e-01   4.7000000e-01   5.2000000e-01   5.2000000e-01   5.4000000e-01   5.4000000e-01   5.0000000e-01   4.5000000e-01   4.4000000e-01   4.1000000e-01   5.7000000e-01   4.6000000e-01   5.1000000e-01   5.2000000e-01   5.0000000e-01   4.8000000e-01   5.0000000e-01   4.4000000e-01   5.3000000e-01   5.2000000e-01   4.9000000e-01   5.7000000e-01   5.8000000e-01   4.9000000e-01   5.1000000e-01   4.5000000e-01   5.3000000e-01   4.5000000e-01   4.4000000e-01   3.5000000e-01   4.2000000e-01   5.3000000e-01   5.2000000e-01   5.0000000e-01   3.8000000e-01   5.2000000e-01   5.6000000e-01   4.7000000e-01   4.4000000e-01   5.1000000e-01   5.7000000e-01   4.5000000e-01   5.7000000e-01   4.3000000e-01   5.1000000e-01   3.8000000e-01   5.3000000e-01   4.8000000e-01   4.4000000e-01   5.0000000e-01   4.8000000e-01   5.0000000e-01   4.7000000e-01   6.4000000e-01   4.9000000e-01   5.2000000e-01   4.8000000e-01   5.6000000e-01   4.3000000e-01   4.8000000e-01   4.7000000e-01   6.0000000e-01   5.4000000e-01   5.5000000e-01   4.0000000e-01   5.5000000e-01   5.6000000e-01   4.9000000e-01   5.0000000e-01   4.3000000e-01   5.7000000e-01   5.0000000e-01   5.7000000e-01   4.9000000e-01   4.2000000e-01   3.9000000e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-jaccard-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-jaccard-ml.txt
    deleted file mode 100644
    index a7570d8c3f..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-jaccard-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   6.5714286e-01   6.0563380e-01   6.3235294e-01   7.3972603e-01   6.0294118e-01   7.3611111e-01   6.4179104e-01   7.7631579e-01   6.4000000e-01   6.6197183e-01   6.6666667e-01   7.0000000e-01   6.4285714e-01   7.7464789e-01   7.1621622e-01   6.4285714e-01   6.8571429e-01   6.4383562e-01   6.6666667e-01   6.5384615e-01   6.6216216e-01   6.1971831e-01   6.5333333e-01   6.5277778e-01   6.7123288e-01   6.4383562e-01   6.5000000e-01   6.3513514e-01   6.0000000e-01   6.7123288e-01   6.3513514e-01   7.4324324e-01   5.5714286e-01   7.0512821e-01   6.3888889e-01   6.0000000e-01   5.6338028e-01   6.3157895e-01   6.0810811e-01   6.2337662e-01   6.4000000e-01   6.5789474e-01   6.3157895e-01   5.6962025e-01   7.5294118e-01   7.1250000e-01   6.2162162e-01   6.7500000e-01   7.2727273e-01   6.2337662e-01   6.2337662e-01   6.7948718e-01   6.5853659e-01   6.6250000e-01   6.3380282e-01   7.3417722e-01   6.0869565e-01   7.2000000e-01   7.5949367e-01   6.4556962e-01   6.3013699e-01   5.9420290e-01   6.2857143e-01   7.1794872e-01   7.3972603e-01   6.4864865e-01   6.4864865e-01   6.8918919e-01   6.6666667e-01   7.0512821e-01   6.2500000e-01   6.2318841e-01   6.6197183e-01   6.5277778e-01   6.9135802e-01   6.6216216e-01   6.6666667e-01   6.4285714e-01   6.6216216e-01   6.8115942e-01   6.2500000e-01   6.2500000e-01   7.3684211e-01   6.4473684e-01   7.3417722e-01   7.1052632e-01   6.3888889e-01   7.3417722e-01   6.5432099e-01   6.9230769e-01   7.1428571e-01   6.7567568e-01   6.7532468e-01   6.7605634e-01   6.5789474e-01   5.4285714e-01   6.9736842e-01   6.2337662e-01   6.6233766e-01   6.7605634e-01   7.0270270e-01   6.1842105e-01   6.7567568e-01   6.2318841e-01   6.7605634e-01   6.9333333e-01   7.1428571e-01   6.0000000e-01   6.0000000e-01   6.6197183e-01   6.9230769e-01   6.8000000e-01   7.2000000e-01   6.5384615e-01   6.5753425e-01   6.6197183e-01   7.1232877e-01   6.9333333e-01   7.5000000e-01   7.1052632e-01   6.7567568e-01   6.4285714e-01   6.0273973e-01   5.8571429e-01   6.9512195e-01   6.3013699e-01   6.8918919e-01   7.0270270e-01   6.6666667e-01   6.8571429e-01   6.6666667e-01   6.1111111e-01   7.0666667e-01   6.6666667e-01   6.5333333e-01   6.8674699e-01   7.0731707e-01   6.3636364e-01   6.3750000e-01   6.1643836e-01   6.5432099e-01   5.8441558e-01   5.8666667e-01   4.7297297e-01   5.5263158e-01   6.9736842e-01   6.9333333e-01   6.5789474e-01   5.7575758e-01   6.7532468e-01   7.0886076e-01   6.4383562e-01   5.8666667e-01   6.6233766e-01   7.5000000e-01   6.2500000e-01   7.7027027e-01   6.0563380e-01   6.8000000e-01   5.6716418e-01   6.7948718e-01   6.4864865e-01   6.1971831e-01   7.1428571e-01   6.5753425e-01   6.7567568e-01   6.6197183e-01   7.7108434e-01   6.6216216e-01   7.1232877e-01   6.4000000e-01   7.0886076e-01   6.0563380e-01   6.2337662e-01   6.2666667e-01   7.7922078e-01   7.2972973e-01   7.5342466e-01   5.7971014e-01   7.3333333e-01   7.0886076e-01   6.6216216e-01   6.4102564e-01   5.8904110e-01   7.3076923e-01   6.4102564e-01   7.1250000e-01   6.4473684e-01   5.9154930e-01   5.3424658e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-3.2-ml-iris.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-3.2-ml-iris.txt
    deleted file mode 100644
    index dc396c8c16..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-3.2-ml-iris.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   5.0817745e-01   4.4535192e-01   5.6700421e-01   1.2418578e-01   4.8927739e-01   5.0180477e-01   1.4096146e-01   8.1242502e-01   4.1586001e-01   3.2586371e-01   3.2586371e-01   5.2942799e-01   8.6137722e-01   7.7039952e-01   9.7270522e-01   4.5581864e-01   1.0000000e-01   6.3861009e-01   3.0546431e-01   3.7427929e-01   2.5251796e-01   5.6700421e-01   3.8776762e-01   5.2942799e-01   5.0905001e-01   2.5651975e-01   1.2418578e-01   1.2418578e-01   4.5470518e-01   4.5470518e-01   3.2816937e-01   6.0181382e-01   7.3457830e-01   4.1586001e-01   3.2586371e-01   4.0147421e-01   4.1586001e-01   7.6752131e-01   1.2418578e-01   1.4096146e-01   1.2396136e+00   7.1462831e-01   4.1449626e-01   5.3588338e-01   5.2942799e-01   3.2352160e-01   5.2862779e-01   2.5251796e-01   2.0656129e-01   3.5031395e+00   3.2158090e+00   3.6682165e+00   2.7164367e+00   3.3288934e+00   3.1477087e+00   3.4033622e+00   2.0308266e+00   3.3209346e+00   2.5912926e+00   2.3257069e+00   2.8912179e+00   2.7273721e+00   3.3660466e+00   2.2876649e+00   3.1664710e+00   3.1642132e+00   2.7448172e+00   3.2474124e+00   2.5734684e+00   3.5025969e+00   2.6980573e+00   3.5983434e+00   3.3515288e+00   3.0113552e+00   3.1469325e+00   3.5526357e+00   3.7475562e+00   3.1812462e+00   2.1818668e+00   2.4927109e+00   2.3909738e+00   2.5729378e+00   3.7711998e+00   3.1620401e+00   3.1916270e+00   3.4478147e+00   3.1312883e+00   2.7541224e+00   2.6886547e+00   3.0483897e+00   3.2685282e+00   2.6752185e+00   2.0587064e+00   2.8619072e+00   2.8416143e+00   2.8554471e+00   2.9845926e+00   1.7697734e+00   2.7640668e+00   4.7690606e+00   3.8067806e+00   4.6866422e+00   4.2843668e+00   4.5417384e+00   5.4120246e+00   3.2161426e+00   5.0569442e+00   4.5165793e+00   4.9462324e+00   3.8595100e+00   4.0249346e+00   4.2787236e+00   3.7387507e+00   3.9160762e+00   4.0938708e+00   4.2028863e+00   5.5316487e+00   5.7297286e+00   3.6968486e+00   4.5074741e+00   3.6330985e+00   5.5146761e+00   3.6293227e+00   4.4495340e+00   4.7599229e+00   3.5255287e+00   3.6076762e+00   4.3339547e+00   4.5590471e+00   4.8997298e+00   5.2856169e+00   4.3511402e+00   3.7760534e+00   4.2460554e+00   5.0103780e+00   4.3808704e+00   4.1939019e+00   3.5087649e+00   4.2018804e+00   4.4140402e+00   3.9807996e+00   3.8067806e+00   4.6775324e+00   4.5250934e+00   4.0376133e+00   3.7473276e+00   3.9523060e+00   4.1709262e+00   3.7872951e+00   2.5251796e-01   3.0546431e-01   6.0060595e-01   9.5035453e-01   4.4535192e-01   4.0293660e-01   5.0090417e-01   1.4096146e-01   7.6752131e-01   4.1449626e-01   1.2418578e-01   6.2024833e-01   1.1845977e+00   1.4700179e+00   9.4309624e-01   5.0905001e-01   1.0003617e+00   8.0358695e-01   5.8851328e-01   7.0826681e-01   6.6384020e-01   4.3456114e-01   5.6700421e-01   2.0656129e-01   4.2667565e-01   5.2942799e-01   4.4417983e-01   2.8192292e-01   2.1269358e-01   5.7324170e-01   1.1056650e+00   1.2393677e+00   1.4096146e-01   2.5251796e-01   6.8961791e-01   1.4096146e-01   5.0090417e-01   4.1449626e-01   5.0270183e-01   7.3535471e-01   5.0905001e-01   5.7324170e-01   8.5690100e-01   1.2418578e-01   8.0587320e-01   3.2352160e-01   7.3496673e-01   3.0275928e-01   3.5601468e+00   3.2472699e+00   3.7137483e+00   2.6693888e+00   3.3563815e+00   3.1472333e+00   3.4276314e+00   1.9506288e+00   3.3563695e+00   2.5739370e+00   2.1870094e+00   2.9033014e+00   2.6860278e+00   3.3789262e+00   2.2884830e+00   3.2153154e+00   3.1667333e+00   2.7423060e+00   3.2269725e+00   2.5465772e+00   3.5123782e+00   2.7147889e+00   3.6030381e+00   3.3619470e+00   3.0427908e+00   3.1888219e+00   3.5910272e+00   3.7805671e+00   3.1921903e+00   2.1611020e+00   2.4491518e+00   2.3430978e+00   2.5700421e+00   3.7741357e+00   3.1615131e+00   3.2084454e+00   3.4884789e+00   3.1228939e+00   2.7575407e+00   2.6617768e+00   3.0343591e+00   3.2842184e+00   2.6656374e+00   1.9595652e+00   2.8539100e+00   2.8474367e+00   2.8585579e+00   3.0059712e+00   1.6867642e+00   2.7634340e+00   4.7806735e+00   3.8055585e+00   4.7194850e+00   4.2963997e+00   4.5579706e+00   5.4507801e+00   3.1945300e+00   5.0903533e+00   4.5297786e+00   4.9814379e+00   3.8841455e+00   4.0376849e+00   4.3069372e+00   3.7284750e+00   3.9173293e+00   4.1124749e+00   4.2221165e+00   5.5759608e+00   5.7633066e+00   3.6758942e+00   4.5370189e+00   3.6312130e+00   5.5536680e+00   3.6416405e+00   4.4736906e+00   4.7961103e+00   3.5380868e+00   3.6203213e+00   4.3467079e+00   4.5977693e+00   4.9380624e+00   5.3421274e+00   4.3637834e+00   3.7899304e+00   4.2477635e+00   5.0602038e+00   4.3953045e+00   4.2110583e+00   3.5192753e+00   4.2358121e+00   4.4378207e+00   4.0189525e+00   3.8055585e+00   4.7017335e+00   4.5483787e+00   4.0656879e+00   3.7516222e+00   3.9742971e+00   4.1845313e+00   3.7939847e+00   2.1269358e-01   4.4535192e-01   8.9366705e-01   2.1845981e-01   3.4378533e-01   3.7427929e-01   2.5651975e-01   7.7039952e-01   3.2586371e-01   2.1845981e-01   4.2667565e-01   1.2113327e+00   1.3801284e+00   8.7175869e-01   4.4651726e-01   1.0719360e+00   6.5223271e-01   7.3813096e-01   5.7867728e-01   4.4535192e-01   5.2655962e-01   6.0611244e-01   3.8776762e-01   4.0176783e-01   5.3588338e-01   5.0905001e-01   3.0000000e-01   3.0546431e-01   7.1169738e-01   9.4309624e-01   1.1327825e+00   2.5651975e-01   3.0275928e-01   8.1067767e-01   2.5651975e-01   3.2352160e-01   4.2538717e-01   3.7427929e-01   9.0252542e-01   3.0000000e-01   5.1138698e-01   7.7869083e-01   2.1845981e-01   6.6384020e-01   1.2418578e-01   6.9325418e-01   3.0546431e-01   3.7098973e+00   3.3770904e+00   3.8553941e+00   2.7868575e+00   3.4895316e+00   3.2571492e+00   3.5499573e+00   2.0646687e+00   3.4944845e+00   2.6743800e+00   2.3196869e+00   3.0181476e+00   2.8270253e+00   3.4973911e+00   2.3997585e+00   3.3600102e+00   3.2716172e+00   2.8619072e+00   3.3597438e+00   2.6649106e+00   3.6203213e+00   2.8440609e+00   3.7280682e+00   3.4822008e+00   3.1786890e+00   3.3296038e+00   3.7325066e+00   3.9121945e+00   3.3084060e+00   2.2888897e+00   2.5683989e+00   2.4649412e+00   2.6906230e+00   3.8866112e+00   3.2625043e+00   3.3219248e+00   3.6264668e+00   3.2609948e+00   2.8656468e+00   2.7738624e+00   3.1430282e+00   3.4033622e+00   2.7865812e+00   2.0797392e+00   2.9638836e+00   2.9589097e+00   2.9695568e+00   3.1337459e+00   1.7991433e+00   2.8758936e+00   4.8875515e+00   3.9111857e+00   4.8490379e+00   4.4107143e+00   4.6725771e+00   5.5854254e+00   3.2933477e+00   5.2226262e+00   4.6541348e+00   5.1068487e+00   4.0049607e+00   4.1564977e+00   4.4321573e+00   3.8331006e+00   4.0161098e+00   4.2255639e+00   4.3417782e+00   5.7091264e+00   5.8970064e+00   3.7961619e+00   4.6611065e+00   3.7313856e+00   5.6903014e+00   3.7618406e+00   4.5942943e+00   4.9290197e+00   3.6553612e+00   3.7333492e+00   4.4613366e+00   4.7342792e+00   5.0749049e+00   5.4844039e+00   4.4774673e+00   3.9102500e+00   4.3611782e+00   5.2016658e+00   4.5034762e+00   4.3281161e+00   3.6300436e+00   4.3648112e+00   4.5562166e+00   4.1482002e+00   3.9111857e+00   4.8218416e+00   4.6648403e+00   4.1879434e+00   3.8717400e+00   4.0945154e+00   4.2919258e+00   3.9013483e+00   5.6700421e-01   9.9714776e-01   3.0546431e-01   4.4417983e-01   2.5251796e-01   3.0275928e-01   8.8835966e-01   3.2586371e-01   2.1845981e-01   4.4651726e-01   1.3360558e+00   1.5022608e+00   9.9714776e-01   5.6769031e-01   1.1765359e+00   7.6752131e-01   8.1354181e-01   6.9325418e-01   6.2092891e-01   5.4292906e-01   4.5470518e-01   4.0293660e-01   4.5581864e-01   6.4704320e-01   6.2024833e-01   1.4096146e-01   2.0656129e-01   8.1354181e-01   1.0574300e+00   1.2554784e+00   3.0275928e-01   4.4535192e-01   9.2264612e-01   3.0275928e-01   2.5251796e-01   5.2862779e-01   5.0592043e-01   8.0358695e-01   2.5251796e-01   5.6454040e-01   7.9878917e-01   2.1845981e-01   7.6752131e-01   1.2418578e-01   8.1242502e-01   4.1449626e-01   3.5875094e+00   3.2277825e+00   3.7190120e+00   2.6019240e+00   3.3414931e+00   3.0741797e+00   3.3904673e+00   1.8683030e+00   3.3506325e+00   2.4892190e+00   2.1209506e+00   2.8530088e+00   2.6606291e+00   3.3264150e+00   2.2345869e+00   3.2325480e+00   3.0894572e+00   2.6859989e+00   3.1954750e+00   2.4836725e+00   3.4467337e+00   2.6928468e+00   3.5602810e+00   3.3090659e+00   3.0346426e+00   3.1953687e+00   3.5930845e+00   3.7635112e+00   3.1392617e+00   2.1242643e+00   2.3839455e+00   2.2806773e+00   2.5225548e+00   3.7070070e+00   3.0760590e+00   3.1551922e+00   3.4865435e+00   3.1033781e+00   2.6867856e+00   2.5906376e+00   2.9536363e+00   3.2348458e+00   2.6148507e+00   1.8841403e+00   2.7819255e+00   2.7801917e+00   2.7920574e+00   2.9774862e+00   1.6195190e+00   2.7001131e+00   4.7151191e+00   3.7310738e+00   4.6963107e+00   4.2348119e+00   4.5036643e+00   5.4345951e+00   3.1040223e+00   5.0660245e+00   4.4858951e+00   4.9576471e+00   3.8485743e+00   3.9894963e+00   4.2781102e+00   3.6535208e+00   3.8473084e+00   4.0656969e+00   4.1736133e+00   5.5611269e+00   5.7439963e+00   3.6142694e+00   4.5082936e+00   3.5527533e+00   5.5400450e+00   3.5988819e+00   4.4321573e+00   4.7754556e+00   3.4913787e+00   3.5638529e+00   4.2915574e+00   4.5844335e+00   4.9269527e+00   5.3501611e+00   4.3091163e+00   3.7395252e+00   4.1763853e+00   5.0687940e+00   4.3363292e+00   4.1568278e+00   3.4594086e+00   4.2175903e+00   4.4004449e+00   4.0139427e+00   3.7310738e+00   4.6611132e+00   4.5083524e+00   4.0415593e+00   3.7070350e+00   3.9354060e+00   4.1243443e+00   3.7225506e+00   4.8927739e-01   4.1449626e-01   2.0656129e-01   8.1242502e-01   5.0270183e-01   4.0293660e-01   2.8192292e-01   6.0611244e-01   8.2305664e-01   8.2899253e-01   9.3824087e-01   4.5581864e-01   1.4096146e-01   7.1840099e-01   2.1845981e-01   4.5470518e-01   2.1845981e-01   4.9674312e-01   4.2418962e-01   5.1607523e-01   6.0551856e-01   2.8192292e-01   2.1269358e-01   2.4837156e-01   4.5470518e-01   5.1607523e-01   4.2667565e-01   5.0991930e-01   6.8917100e-01   5.0270183e-01   4.1312257e-01   5.0180477e-01   5.0270183e-01   7.4549115e-01   2.1269358e-01   1.4096146e-01   1.3190071e+00   6.4755655e-01   4.1449626e-01   5.1691876e-01   6.0611244e-01   2.5251796e-01   4.9674312e-01   3.0546431e-01   3.0000000e-01   3.5310961e+00   3.2313174e+00   3.6912396e+00   2.7363446e+00   3.3486156e+00   3.1550780e+00   3.4146950e+00   2.0587064e+00   3.3422688e+00   2.6000813e+00   2.3658814e+00   2.9005672e+00   2.7581254e+00   3.3764180e+00   2.2982662e+00   3.1914715e+00   3.1684808e+00   2.7581145e+00   3.2719146e+00   2.5906376e+00   3.5076679e+00   2.7164648e+00   3.6146980e+00   3.3629944e+00   3.0317688e+00   3.1699749e+00   3.5767944e+00   3.7653940e+00   3.1912695e+00   2.2046610e+00   2.5131017e+00   2.4132939e+00   2.5882494e+00   3.7797776e+00   3.1649733e+00   3.1986968e+00   3.4685882e+00   3.1575873e+00   2.7599092e+00   2.7031874e+00   3.0575551e+00   3.2787144e+00   2.6914804e+00   2.0914773e+00   2.8714673e+00   2.8482104e+00   2.8631525e+00   3.0002861e+00   1.8009624e+00   2.7738624e+00   4.7744685e+00   3.8132783e+00   4.7036953e+00   4.2925903e+00   4.5507995e+00   5.4317036e+00   3.2245243e+00   5.0748136e+00   4.5314818e+00   4.9621679e+00   3.8715927e+00   4.0372136e+00   4.2937599e+00   3.7469906e+00   3.9213497e+00   4.1030149e+00   4.2136261e+00   5.5512721e+00   5.7499082e+00   3.7127205e+00   4.5218897e+00   3.6377830e+00   5.5357771e+00   3.6429670e+00   4.4609633e+00   4.7775824e+00   3.5373240e+00   3.6158814e+00   4.3437318e+00   4.5790474e+00   4.9211035e+00   5.3110568e+00   4.3608329e+00   3.7876656e+00   4.2543813e+00   5.0356467e+00   4.3872625e+00   4.2028863e+00   3.5161021e+00   4.2189979e+00   4.4261470e+00   4.0000622e+00   3.8132783e+00   4.6893387e+00   4.5361087e+00   4.0527696e+00   3.7622948e+00   3.9645936e+00   4.1768667e+00   3.7924679e+00   8.6137722e-01   5.7867728e-01   1.2470767e+00   8.6361309e-01   2.8192292e-01   6.9369532e-01   9.8450810e-01   1.2949422e+00   5.7324170e-01   5.3588338e-01   4.0000000e-01   4.8135521e-01   3.0546431e-01   3.2816937e-01   5.0817745e-01   3.4378533e-01   9.4558103e-01   6.2024833e-01   6.9728513e-01   9.2288144e-01   5.6700421e-01   4.3691963e-01   5.4292906e-01   8.7202528e-01   8.9095811e-01   5.0817745e-01   3.6171588e-01   3.8934542e-01   8.6361309e-01   7.9878917e-01   5.0592043e-01   8.6361309e-01   1.1959482e+00   5.4292906e-01   5.6454040e-01   1.6807352e+00   1.1055064e+00   5.0592043e-01   3.2586371e-01   9.7779835e-01   3.2816937e-01   9.4558103e-01   2.8507955e-01   6.6827038e-01   3.1533911e+00   2.8840079e+00   3.3274872e+00   2.5335921e+00   3.0169509e+00   2.8661222e+00   3.0732956e+00   1.9492232e+00   3.0013391e+00   2.3437032e+00   2.3116343e+00   2.5873149e+00   2.5591371e+00   3.0631725e+00   2.0220740e+00   2.8270253e+00   2.8656468e+00   2.4892190e+00   3.0178921e+00   2.3656538e+00   3.1846482e+00   2.4132559e+00   3.3163294e+00   3.0590735e+00   2.6993871e+00   2.8174914e+00   3.2310326e+00   3.4162231e+00   2.8802219e+00   1.9932786e+00   2.3173648e+00   2.2314118e+00   2.3212593e+00   3.4779999e+00   2.8654680e+00   2.8662571e+00   3.1113805e+00   2.8927401e+00   2.4634131e+00   2.4685230e+00   2.7948819e+00   2.9596963e+00   2.4341346e+00   2.0039447e+00   2.6000813e+00   2.5498770e+00   2.5700421e+00   2.6813098e+00   1.7123398e+00   2.4913669e+00   4.4418755e+00   3.5123791e+00   4.3488707e+00   3.9713081e+00   4.2172545e+00   5.0700045e+00   2.9631582e+00   4.7239900e+00   4.2113881e+00   4.5979409e+00   3.5255287e+00   3.7162377e+00   3.9448212e+00   3.4598280e+00   3.6097419e+00   3.7620043e+00   3.8810240e+00   5.1822310e+00   5.3953096e+00   3.4508156e+00   4.1665786e+00   3.3353616e+00   5.1763300e+00   3.3260356e+00   4.1143832e+00   4.4201622e+00   3.2188998e+00   3.2929599e+00   4.0183758e+00   4.2229849e+00   4.5637045e+00   4.9290256e+00   4.0343724e+00   3.4708900e+00   3.9559935e+00   4.6576736e+00   4.0502252e+00   3.8718131e+00   3.1963475e+00   3.8610636e+00   4.0785553e+00   3.6345765e+00   3.5123791e+00   4.3416284e+00   4.1864302e+00   3.7018916e+00   3.4568305e+00   3.6254423e+00   3.8415026e+00   3.4775621e+00   4.0293660e-01   5.0905001e-01   3.8934542e-01   8.1130291e-01   2.5251796e-01   4.2538717e-01   4.8927739e-01   1.2406194e+00   1.3074132e+00   8.5233811e-01   5.0090417e-01   1.1185330e+00   5.6700421e-01   8.1099042e-01   5.3022554e-01   4.1449626e-01   5.3665999e-01   5.0905001e-01   5.0592043e-01   4.1449626e-01   6.0181382e-01   6.0060595e-01   2.5651975e-01   3.4583729e-01   8.0064372e-01   8.1558458e-01   1.0597541e+00   3.8934542e-01   4.2667565e-01   9.0074515e-01   3.8934542e-01   4.1586001e-01   5.0180477e-01   4.0293660e-01   1.1003197e+00   2.5651975e-01   4.5581864e-01   6.6539428e-01   4.1312257e-01   5.7324170e-01   2.0656129e-01   7.1504098e-01   4.0293660e-01   3.6583368e+00   3.3018939e+00   3.7934214e+00   2.7118627e+00   3.4196168e+00   3.1646752e+00   3.4663954e+00   1.9965608e+00   3.4302944e+00   2.5753574e+00   2.2837561e+00   2.9283888e+00   2.7788099e+00   3.4116298e+00   2.3101107e+00   3.3028359e+00   3.1719381e+00   2.7826178e+00   3.2957091e+00   2.5882494e+00   3.5217244e+00   2.7724782e+00   3.6509512e+00   3.3998935e+00   3.1126354e+00   3.2681313e+00   3.6719821e+00   3.8384263e+00   3.2202056e+00   2.2240476e+00   2.4960474e+00   2.3970928e+00   2.6119950e+00   3.7951491e+00   3.1592993e+00   3.2300555e+00   3.5604418e+00   3.2025128e+00   2.7701355e+00   2.6892823e+00   3.0524247e+00   3.3177721e+00   2.7092568e+00   2.0227167e+00   2.8731220e+00   2.8671099e+00   2.8775912e+00   3.0586720e+00   1.7332099e+00   2.7865812e+00   4.7866828e+00   3.8123695e+00   4.7714708e+00   4.3189924e+00   4.5796358e+00   5.5132325e+00   3.1921277e+00   5.1489022e+00   4.5737586e+00   5.0249531e+00   3.9185849e+00   4.0697987e+00   4.3502657e+00   3.7349501e+00   3.9102370e+00   4.1311343e+00   4.2548570e+00   5.6362307e+00   5.8240252e+00   3.7174048e+00   4.5773480e+00   3.6270581e+00   5.6209145e+00   3.6774027e+00   4.5072397e+00   4.8555167e+00   3.5675580e+00   3.6401392e+00   4.3693804e+00   4.6657186e+00   5.0062061e+00   5.4227201e+00   4.3844239e+00   3.8261182e+00   4.2718480e+00   5.1373047e+00   4.4043123e+00   4.2383633e+00   3.5347392e+00   4.2868650e+00   4.4668117e+00   4.0716645e+00   3.8123695e+00   4.7338066e+00   4.5734052e+00   4.1036179e+00   3.7882079e+00   4.0078491e+00   4.1922661e+00   3.8027591e+00   6.8961791e-01   3.0546431e-01   4.4417983e-01   2.0656129e-01   4.1586001e-01   7.6625946e-01   8.9687438e-01   1.0919712e+00   5.7867728e-01   1.5422108e-01   7.3851529e-01   4.0293660e-01   4.1312257e-01   3.2586371e-01   5.7257017e-01   3.2816937e-01   4.1312257e-01   4.0147421e-01   2.0656129e-01   2.0656129e-01   2.0656129e-01   3.2586371e-01   3.2586371e-01   4.1312257e-01   7.0437330e-01   8.5205778e-01   3.0546431e-01   3.2352160e-01   5.0905001e-01   3.0546431e-01   6.5172743e-01   1.0000000e-01   2.1269358e-01   1.1283882e+00   6.1092863e-01   4.0293660e-01   5.0592043e-01   4.1586001e-01   4.0293660e-01   4.1449626e-01   3.7255734e-01   1.2418578e-01   3.4445326e+00   3.1392617e+00   3.6011035e+00   2.6118700e+00   3.2516941e+00   3.0511838e+00   3.3218097e+00   1.9189245e+00   3.2468925e+00   2.4924452e+00   2.2081024e+00   2.8038661e+00   2.6291264e+00   3.2767369e+00   2.1964719e+00   3.1025274e+00   3.0696611e+00   2.6485861e+00   3.1554034e+00   2.4715204e+00   3.4135983e+00   2.6151245e+00   3.5092032e+00   3.2604423e+00   2.9354140e+00   3.0782101e+00   3.4818889e+00   3.6726568e+00   3.0922811e+00   2.0843471e+00   2.3874354e+00   2.2845234e+00   2.4794505e+00   3.6775470e+00   3.0659000e+00   3.1055388e+00   3.3775462e+00   3.0430948e+00   2.6597612e+00   2.5873149e+00   2.9471553e+00   3.1807044e+00   2.5795723e+00   1.9450499e+00   2.7640668e+00   2.7473221e+00   2.7611864e+00   2.9015702e+00   1.6626642e+00   2.6693888e+00   4.6823704e+00   3.7130994e+00   4.6117428e+00   4.1946425e+00   4.4565357e+00   5.3399939e+00   3.1168466e+00   4.9805386e+00   4.4303862e+00   4.8738189e+00   3.7806643e+00   3.9387918e+00   4.2018804e+00   3.6441274e+00   3.8290120e+00   4.0132700e+00   4.1177139e+00   5.4615788e+00   5.6559440e+00   3.5983434e+00   4.4321573e+00   3.5405803e+00   5.4429455e+00   3.5441556e+00   4.3687483e+00   4.6853394e+00   3.4399664e+00   3.5203203e+00   4.2473048e+00   4.4861009e+00   4.8281381e+00   5.2242271e+00   4.2652659e+00   3.6876909e+00   4.1503255e+00   4.9488209e+00   4.2966585e+00   4.1071698e+00   3.4205830e+00   4.1292490e+00   4.3363292e+00   3.9150359e+00   3.7130994e+00   4.5977729e+00   4.4473292e+00   3.9643224e+00   3.6603913e+00   3.8715927e+00   4.0861975e+00   3.6954796e+00   5.0991930e-01   1.1327825e+00   5.7257017e-01   4.0293660e-01   3.0811765e-01   1.5771666e+00   1.7488874e+00   1.2431040e+00   8.1273630e-01   1.4170618e+00   1.0106392e+00   1.0389435e+00   9.3824087e-01   7.3813096e-01   7.5976039e-01   6.6491075e-01   6.0611244e-01   6.9728513e-01   8.8861541e-01   8.5177726e-01   3.8776762e-01   4.2538717e-01   1.0346741e+00   1.2943100e+00   1.5015203e+00   5.0991930e-01   6.2482915e-01   1.1473003e+00   5.0991930e-01   1.2418578e-01   7.6752131e-01   7.4586719e-01   6.0181382e-01   3.0275928e-01   7.7869083e-01   1.0440187e+00   4.0293660e-01   1.0120221e+00   3.2352160e-01   1.0597541e+00   6.4704320e-01   3.7504939e+00   3.3717768e+00   3.8731169e+00   2.7062054e+00   3.4865562e+00   3.1921903e+00   3.5262546e+00   1.9522524e+00   3.5018009e+00   2.5914913e+00   2.1818668e+00   2.9807120e+00   2.7874290e+00   3.4557351e+00   2.3604042e+00   3.3915488e+00   3.2027420e+00   2.8150728e+00   3.3206640e+00   2.6018930e+00   3.5642457e+00   2.8360166e+00   3.6902583e+00   3.4394878e+00   3.1847477e+00   3.3503379e+00   3.7461474e+00   3.9068076e+00   3.2666666e+00   2.2590074e+00   2.4950353e+00   2.3935209e+00   2.6534332e+00   3.8259590e+00   3.1834936e+00   3.2834077e+00   3.6377049e+00   3.2390016e+00   2.8060305e+00   2.7012392e+00   3.0647279e+00   3.3658240e+00   2.7423171e+00   1.9645331e+00   2.8984764e+00   2.9033203e+00   2.9139413e+00   3.1189900e+00   1.7118795e+00   2.8228127e+00   4.8290847e+00   3.8416142e+00   4.8350745e+00   4.3569606e+00   4.6261788e+00   5.5774554e+00   3.1958228e+00   5.2067803e+00   4.6153241e+00   5.0947934e+00   3.9803199e+00   4.1159553e+00   4.4131159e+00   3.7587872e+00   3.9513472e+00   4.1881498e+00   4.3024754e+00   5.7071195e+00   5.8839539e+00   3.7280682e+00   4.6419531e+00   3.6578722e+00   5.6843788e+00   3.7276068e+00   4.5625120e+00   4.9179194e+00   3.6182608e+00   3.6866535e+00   4.4136707e+00   4.7307689e+00   5.0723886e+00   5.5062533e+00   4.4301849e+00   3.8690719e+00   4.2943891e+00   5.2192815e+00   4.4536259e+00   4.2828634e+00   3.5797958e+00   4.3570079e+00   4.5278761e+00   4.1542558e+00   3.8416142e+00   4.7899951e+00   4.6341170e+00   4.1740602e+00   3.8316735e+00   4.0656969e+00   4.2413113e+00   3.8376713e+00   6.8961791e-01   3.0811765e-01   1.4096146e-01   6.4755655e-01   1.1229906e+00   1.3835747e+00   8.6361309e-01   4.2667565e-01   9.4009473e-01   7.0784540e-01   5.3665999e-01   6.2482915e-01   6.3977563e-01   4.3691963e-01   4.4651726e-01   1.5422108e-01   3.7598397e-01   4.4535192e-01   3.7598397e-01   2.1845981e-01   1.4096146e-01   5.5419992e-01   1.0065841e+00   1.1474460e+00   0.0000000e+00   3.0811765e-01   6.5223271e-01   0.0000000e+00   5.0991930e-01   3.2586371e-01   4.2667565e-01   8.3172002e-01   5.0991930e-01   5.6769031e-01   7.5082357e-01   2.1845981e-01   7.0479928e-01   3.0811765e-01   6.4755655e-01   2.1845981e-01   3.4865562e+00   3.1726595e+00   3.6377960e+00   2.5987470e+00   3.2814045e+00   3.0627375e+00   3.3515846e+00   1.8841865e+00   3.2769379e+00   2.5038079e+00   2.1311468e+00   2.8311678e+00   2.6104387e+00   3.2962520e+00   2.2214438e+00   3.1433122e+00   3.0878634e+00   2.6552472e+00   3.1570103e+00   2.4668912e+00   3.4394878e+00   2.6411293e+00   3.5233648e+00   3.2747247e+00   2.9659871e+00   3.1154783e+00   3.5134741e+00   3.7059620e+00   3.1148696e+00   2.0851901e+00   2.3731428e+00   2.2655571e+00   2.4927109e+00   3.6920087e+00   3.0823446e+00   3.1337459e+00   3.4135200e+00   3.0481703e+00   2.6780487e+00   2.5874301e+00   2.9489507e+00   3.2027420e+00   2.5873149e+00   1.8973383e+00   2.7738355e+00   2.7632614e+00   2.7778954e+00   2.9269923e+00   1.6390769e+00   2.6848587e+00   4.7106706e+00   3.7313856e+00   4.6446321e+00   4.2142736e+00   4.4836580e+00   5.3716885e+00   3.1250284e+00   5.0074019e+00   4.4485220e+00   4.9128219e+00   3.8150636e+00   3.9624529e+00   4.2358121e+00   3.6602286e+00   3.8605980e+00   4.0488387e+00   4.1418643e+00   5.4970002e+00   5.6855224e+00   3.5964347e+00   4.4685630e+00   3.5634461e+00   5.4730406e+00   3.5693950e+00   4.3989089e+00   4.7150659e+00   3.4668130e+00   3.5464993e+00   4.2723380e+00   4.5155386e+00   4.8594290e+00   5.2647079e+00   4.2921213e+00   3.7064459e+00   4.1581964e+00   4.9913682e+00   4.3286007e+00   4.1303097e+00   3.4468286e+00   4.1669742e+00   4.3729308e+00   3.9624170e+00   3.7313856e+00   4.6297577e+00   4.4844827e+00   4.0056359e+00   3.6817961e+00   3.9035218e+00   4.1179678e+00   3.7164366e+00   6.2024833e-01   8.1304731e-01   1.1868139e+00   4.8036801e-01   7.1799256e-01   2.8192292e-01   3.2816937e-01   3.2816937e-01   3.0546431e-01   3.2352160e-01   3.2352160e-01   8.5205778e-01   4.8927739e-01   6.6384020e-01   7.3496673e-01   4.5581864e-01   2.4837156e-01   3.2586371e-01   7.6752131e-01   7.4549115e-01   3.2352160e-01   4.1449626e-01   5.0180477e-01   6.8961791e-01   5.8851328e-01   2.5251796e-01   6.8961791e-01   1.0919712e+00   3.7255734e-01   4.2667565e-01   1.4993782e+00   1.0344911e+00   5.0592043e-01   4.5581864e-01   8.1304731e-01   3.0546431e-01   8.5205778e-01   1.0000000e-01   4.9766035e-01   3.3472053e+00   3.0922811e+00   3.5254266e+00   2.6661987e+00   3.2094276e+00   3.0570957e+00   3.2869053e+00   2.0190980e+00   3.1913594e+00   2.5206151e+00   2.3403819e+00   2.7928582e+00   2.6680945e+00   3.2615924e+00   2.2070201e+00   3.0233425e+00   3.0716969e+00   2.6575076e+00   3.1694367e+00   2.5088543e+00   3.4030318e+00   2.5954147e+00   3.4988409e+00   3.2483608e+00   2.8891737e+00   3.0123702e+00   3.4182420e+00   3.6203759e+00   3.0811775e+00   2.1190324e+00   2.4416796e+00   2.3440712e+00   2.4897570e+00   3.6753309e+00   3.0715435e+00   3.0851463e+00   3.3123070e+00   3.0424689e+00   2.6625505e+00   2.6241824e+00   2.9689697e+00   3.1616811e+00   2.5961850e+00   2.0559262e+00   2.7803619e+00   2.7462372e+00   2.7639489e+00   2.8736288e+00   1.7674365e+00   2.6773131e+00   4.6660957e+00   3.7173526e+00   4.5567672e+00   4.1782968e+00   4.4326194e+00   5.2720689e+00   3.1469325e+00   4.9232255e+00   4.4057732e+00   4.8164157e+00   3.7433882e+00   3.9194796e+00   4.1567419e+00   3.6582432e+00   3.8303544e+00   3.9861488e+00   4.0892044e+00   5.3882212e+00   5.5946413e+00   3.6180819e+00   4.3839191e+00   3.5469476e+00   5.3734444e+00   3.5262672e+00   4.3306501e+00   4.6237863e+00   3.4237160e+00   3.5051302e+00   4.2288456e+00   4.4201622e+00   4.7609637e+00   5.1280035e+00   4.2469785e+00   3.6684143e+00   4.1480002e+00   4.8602572e+00   4.2765700e+00   4.0824098e+00   3.4092877e+00   4.0737132e+00   4.2991233e+00   3.8524190e+00   3.7173526e+00   4.5590471e+00   4.4107160e+00   3.9202843e+00   3.6509512e+00   3.8388884e+00   4.0680120e+00   3.6894983e+00   4.1449626e-01   6.6539428e-01   1.0717668e+00   1.1847335e+00   7.0776547e-01   3.2816937e-01   9.2095040e-01   4.4651726e-01   6.0060595e-01   3.8934542e-01   6.1092863e-01   3.7598397e-01   3.0000000e-01   4.1312257e-01   2.4837156e-01   4.0293660e-01   4.1312257e-01   2.0656129e-01   3.0000000e-01   6.0611244e-01   7.3535471e-01   9.3801395e-01   3.0811765e-01   4.2538717e-01   7.1462831e-01   3.0811765e-01   5.2574978e-01   3.0275928e-01   3.2816937e-01   1.1107977e+00   4.5470518e-01   4.1449626e-01   4.8927739e-01   4.1449626e-01   4.4417983e-01   2.8192292e-01   5.2942799e-01   2.5251796e-01   3.4297053e+00   3.0906838e+00   3.5704156e+00   2.5301680e+00   3.2062204e+00   2.9663489e+00   3.2615889e+00   1.8330979e+00   3.2074600e+00   2.4030878e+00   2.1292724e+00   2.7344480e+00   2.5716369e+00   3.2053511e+00   2.1242643e+00   3.0798277e+00   2.9831836e+00   2.5729378e+00   3.0964590e+00   2.3917863e+00   3.3353616e+00   2.5635110e+00   3.4441347e+00   3.1882407e+00   2.8938821e+00   3.0477086e+00   3.4484194e+00   3.6265826e+00   3.0209783e+00   2.0203134e+00   2.3063579e+00   2.2046610e+00   2.4100833e+00   3.5972040e+00   2.9748436e+00   3.0349291e+00   3.3414931e+00   2.9918962e+00   2.5764694e+00   2.5038051e+00   2.8573838e+00   3.1113597e+00   2.5079404e+00   1.8623849e+00   2.6799601e+00   2.6656374e+00   2.6804452e+00   2.8458006e+00   1.5870088e+00   2.5906376e+00   4.6056614e+00   3.6293396e+00   4.5625120e+00   4.1184849e+00   4.3862724e+00   5.2957861e+00   3.0253131e+00   4.9300368e+00   4.3656957e+00   4.8256905e+00   3.7228356e+00   3.8717400e+00   4.1487889e+00   3.5605424e+00   3.7509165e+00   3.9489970e+00   4.0502806e+00   5.4193574e+00   5.6096505e+00   3.5201263e+00   4.3797165e+00   3.4555095e+00   5.4004015e+00   3.4805320e+00   4.3069452e+00   4.6373516e+00   3.3738930e+00   3.4478147e+00   4.1762321e+00   4.4428877e+00   4.7870294e+00   5.1982218e+00   4.1948678e+00   3.6180819e+00   4.0668114e+00   4.9227056e+00   4.2245318e+00   4.0358897e+00   3.3459883e+00   4.0835979e+00   4.2783731e+00   3.8797354e+00   3.6293396e+00   4.5370189e+00   4.3879553e+00   3.9155334e+00   3.5955337e+00   3.8113970e+00   4.0131848e+00   3.6132595e+00   5.2862779e-01   1.2431040e+00   1.5013525e+00   9.7779835e-01   5.3588338e-01   1.0669582e+00   8.1385214e-01   6.6432544e-01   7.2823007e-01   6.5223271e-01   5.1138698e-01   5.6700421e-01   2.5251796e-01   4.6472023e-01   5.6769031e-01   4.9766035e-01   2.5651975e-01   2.1269358e-01   6.6432544e-01   1.1134787e+00   1.2632199e+00   1.4096146e-01   2.8507955e-01   7.6787403e-01   1.4096146e-01   4.0293660e-01   4.4651726e-01   5.1691876e-01   7.1840099e-01   4.1586001e-01   6.3108414e-01   8.7021234e-01   2.0000000e-01   8.1385214e-01   2.5251796e-01   7.6787403e-01   3.2586371e-01   3.6025735e+00   3.2810515e+00   3.7511944e+00   2.6894009e+00   3.3904673e+00   3.1636869e+00   3.4574937e+00   1.9666356e+00   3.3893691e+00   2.5954173e+00   2.1997395e+00   2.9322283e+00   2.7092568e+00   3.4012145e+00   2.3186758e+00   3.2568914e+00   3.1861493e+00   2.7595194e+00   3.2561045e+00   2.5646808e+00   3.5381764e+00   2.7476411e+00   3.6278993e+00   3.3809159e+00   3.0768226e+00   3.2277675e+00   3.6265617e+00   3.8150532e+00   3.2176230e+00   2.1864840e+00   2.4668912e+00   2.3596992e+00   2.5949561e+00   3.7935487e+00   3.1789378e+00   3.2360886e+00   3.5252258e+00   3.1522058e+00   2.7777040e+00   2.6819136e+00   3.0473722e+00   3.3079290e+00   2.6886547e+00   1.9757309e+00   2.8726212e+00   2.8654680e+00   2.8788483e+00   3.0349462e+00   1.7160413e+00   2.7852734e+00   4.8087107e+00   3.8282466e+00   4.7531334e+00   4.3176393e+00   4.5857287e+00   5.4831923e+00   3.2147850e+00   5.1185883e+00   4.5544260e+00   5.0194259e+00   3.9185849e+00   4.0655452e+00   4.3416283e+00   3.7535680e+00   3.9509795e+00   4.1478442e+00   4.2472736e+00   5.6096505e+00   5.7957776e+00   3.6945993e+00   4.5734622e+00   3.6568202e+00   5.5854254e+00   3.6720840e+00   4.5038991e+00   4.8262859e+00   3.5684917e+00   3.6474985e+00   4.3740189e+00   4.6282931e+00   4.9713928e+00   5.3806679e+00   4.3928114e+00   3.8121990e+00   4.2612863e+00   5.1032991e+00   4.4267055e+00   4.2347444e+00   3.5464871e+00   4.2738510e+00   4.4745238e+00   4.0663411e+00   3.8282466e+00   4.7338066e+00   4.5852690e+00   4.1075310e+00   3.7823897e+00   4.0070636e+00   4.2156933e+00   3.8157950e+00   1.6177449e+00   1.7454671e+00   1.2604558e+00   8.6361309e-01   1.4955532e+00   1.0118409e+00   1.1594648e+00   9.6204649e-01   6.2081167e-01   9.1750357e-01   8.7504951e-01   7.6752131e-01   8.0660588e-01   9.5965467e-01   9.2859317e-01   5.7324170e-01   6.2205176e-01   1.1313840e+00   1.2653669e+00   1.4930627e+00   6.4755655e-01   7.0479928e-01   1.2236003e+00   6.4755655e-01   2.1269358e-01   8.5105559e-01   7.7360126e-01   7.1169738e-01   2.5651975e-01   8.7229670e-01   1.1327578e+00   5.3588338e-01   1.0269295e+00   3.8934542e-01   1.1042097e+00   7.2823007e-01   4.0317004e+00   3.6659830e+00   4.1618561e+00   3.0123702e+00   3.7804276e+00   3.4970843e+00   3.8244351e+00   2.2591077e+00   3.7930789e+00   2.8953397e+00   2.4889124e+00   3.2809188e+00   3.0866488e+00   3.7578933e+00   2.6595288e+00   3.6754272e+00   3.5073435e+00   3.1173742e+00   3.6212723e+00   2.9065572e+00   3.8667462e+00   3.1302383e+00   3.9918403e+00   3.7416229e+00   3.4760444e+00   3.6375992e+00   4.0358101e+00   4.2016915e+00   3.5683934e+00   2.5569968e+00   2.8007817e+00   2.6989368e+00   2.9539253e+00   4.1306024e+00   3.4882801e+00   3.5831257e+00   3.9280671e+00   3.5362697e+00   3.1099883e+00   3.0065416e+00   3.3706887e+00   3.6672620e+00   3.0442126e+00   2.2719663e+00   3.2032390e+00   3.2071637e+00   3.2176230e+00   3.4155491e+00   2.0139971e+00   3.1260028e+00   5.1310217e+00   4.1456639e+00   5.1323742e+00   4.6609614e+00   4.9284761e+00   5.8739676e+00   3.4984873e+00   5.5048216e+00   4.9175276e+00   5.3898806e+00   4.2781762e+00   4.4176533e+00   4.7107211e+00   4.0621414e+00   4.2494597e+00   4.4865569e+00   4.6045396e+00   6.0012333e+00   6.1816086e+00   4.0339598e+00   4.9390284e+00   3.9601358e+00   5.9803696e+00   4.0277694e+00   4.8626276e+00   5.2147981e+00   3.9185849e+00   3.9887029e+00   4.7161140e+00   5.0257341e+00   5.3673499e+00   5.7939320e+00   4.7320534e+00   4.1713411e+00   4.5995433e+00   5.5085511e+00   4.7536729e+00   4.5857356e+00   3.8819510e+00   4.6519178e+00   4.8256399e+00   4.4430890e+00   4.1456639e+00   5.0898961e+00   4.9316646e+00   4.4680158e+00   4.1325542e+00   4.3648035e+00   4.5412859e+00   4.1418557e+00   4.5581864e-01   4.1586001e-01   7.7074935e-01   5.0991930e-01   7.1840099e-01   7.2486328e-01   7.3145860e-01   1.2122249e+00   9.2112464e-01   1.1384810e+00   1.1451403e+00   9.1163729e-01   7.0386584e-01   7.4855857e-01   1.2220203e+00   1.1947245e+00   6.6827038e-01   6.2081167e-01   3.4378533e-01   1.1229906e+00   9.9348625e-01   5.2942799e-01   1.1229906e+00   1.5344133e+00   8.2275389e-01   8.5233811e-01   1.8985661e+00   1.4692412e+00   8.9653332e-01   8.7420176e-01   1.2431040e+00   7.3813096e-01   1.2951131e+00   5.5419992e-01   9.3801395e-01   3.5789198e+00   3.3663244e+00   3.7753619e+00   3.0049442e+00   3.4909841e+00   3.3695525e+00   3.5654259e+00   2.3989172e+00   3.4663502e+00   2.8427326e+00   2.7185849e+00   3.0894572e+00   3.0108764e+00   3.5617386e+00   2.5173832e+00   3.2758681e+00   3.3732554e+00   2.9816791e+00   3.4895316e+00   2.8451507e+00   3.6905956e+00   2.8989400e+00   3.8036776e+00   3.5549103e+00   3.1734856e+00   3.2772927e+00   3.6834126e+00   3.8868430e+00   3.3809159e+00   2.4588872e+00   2.7850016e+00   2.6918796e+00   2.8113773e+00   3.9804187e+00   3.3742776e+00   3.3692592e+00   3.5726491e+00   3.3587234e+00   2.9691171e+00   2.9539253e+00   3.2926883e+00   3.4584304e+00   2.9217347e+00   2.4321061e+00   3.0988783e+00   3.0546600e+00   3.0736357e+00   3.1699903e+00   2.1306832e+00   2.9913743e+00   4.9420128e+00   4.0177712e+00   4.8123874e+00   4.4703485e+00   4.7113827e+00   5.5147622e+00   3.4715574e+00   5.1811127e+00   4.6944291e+00   5.0587041e+00   4.0117533e+00   4.2085851e+00   4.4199792e+00   3.9616570e+00   4.1103933e+00   4.2537610e+00   4.3721243e+00   5.6218420e+00   5.8419148e+00   3.9412893e+00   4.6390186e+00   3.8408636e+00   5.6159291e+00   3.8175051e+00   4.5984929e+00   4.8771654e+00   3.7143727e+00   3.7943375e+00   4.5141564e+00   4.6734732e+00   5.0086627e+00   5.3396700e+00   4.5298770e+00   3.9647930e+00   4.4561969e+00   5.0778756e+00   4.5477422e+00   4.3671210e+00   3.6996802e+00   4.3269400e+00   4.5602465e+00   4.0901232e+00   4.0177712e+00   4.8233796e+00   4.6689006e+00   4.1757336e+00   3.9466531e+00   4.1130674e+00   4.3408596e+00   3.9840684e+00   5.3588338e-01   9.7098574e-01   6.0611244e-01   7.4549115e-01   1.0101422e+00   8.1242502e-01   1.2342162e+00   1.1486378e+00   1.1959482e+00   1.4468211e+00   1.0906388e+00   9.4287188e-01   1.0346741e+00   1.3793330e+00   1.4148192e+00   1.0065841e+00   5.5419992e-01   2.8507955e-01   1.3835747e+00   1.2681309e+00   9.0679720e-01   1.3835747e+00   1.6801917e+00   1.0588560e+00   1.0122141e+00   2.2040881e+00   1.5564198e+00   1.0122141e+00   7.7553525e-01   1.4987155e+00   7.4893123e-01   1.4320120e+00   7.3813096e-01   1.1765359e+00   3.3186105e+00   3.0934278e+00   3.5115632e+00   2.9015832e+00   3.2557855e+00   3.1381850e+00   3.2787144e+00   2.3983798e+00   3.2261964e+00   2.6655261e+00   2.7738368e+00   2.8425716e+00   2.9377092e+00   3.3097860e+00   2.3365894e+00   3.0236933e+00   3.1147370e+00   2.7988444e+00   3.3431646e+00   2.7201960e+00   3.4033622e+00   2.7009102e+00   3.5863979e+00   3.3171611e+00   2.9439491e+00   3.0327979e+00   3.4475678e+00   3.6195561e+00   3.1337459e+00   2.3758157e+00   2.6957302e+00   2.6219409e+00   2.6429556e+00   3.7305206e+00   3.1152653e+00   3.0762634e+00   3.3088561e+00   3.2115055e+00   2.7318540e+00   2.8101506e+00   3.0967820e+00   3.1998457e+00   2.7619926e+00   2.4596921e+00   2.9002737e+00   2.8148869e+00   2.8449691e+00   2.9378173e+00   2.1723936e+00   2.7843048e+00   4.6358686e+00   3.7621042e+00   4.5316360e+00   4.1928583e+00   4.4221843e+00   5.2364242e+00   3.2682245e+00   4.9072991e+00   4.4428425e+00   4.7561724e+00   3.7219581e+00   3.9498605e+00   4.1390009e+00   3.7275066e+00   3.8377652e+00   3.9569614e+00   4.0917968e+00   5.3289557e+00   5.5738695e+00   3.7540308e+00   4.3457024e+00   3.5797958e+00   5.3465693e+00   3.5720882e+00   4.3012547e+00   4.5936468e+00   3.4616111e+00   3.5205889e+00   4.2389017e+00   4.4031390e+00   4.7432976e+00   5.0569442e+00   4.2531342e+00   3.7092459e+00   4.2038056e+00   4.8064634e+00   4.2402361e+00   4.0806404e+00   3.4276314e+00   4.0438546e+00   4.2676463e+00   3.8085992e+00   3.7621042e+00   4.5272919e+00   4.3667579e+00   3.8946701e+00   3.7163265e+00   3.8338395e+00   4.0342445e+00   3.7061759e+00   4.4651726e-01   4.4651726e-01   3.2816937e-01   5.7257017e-01   3.4378533e-01   8.2384013e-01   6.6432544e-01   8.0758367e-01   9.3048953e-01   5.8851328e-01   4.3691963e-01   5.1691876e-01   8.8062848e-01   8.9917007e-01   5.0817745e-01   3.6171588e-01   3.2816937e-01   8.6361309e-01   7.3851529e-01   4.1449626e-01   8.6361309e-01   1.1845977e+00   5.4292906e-01   4.9766035e-01   1.6754036e+00   1.0919712e+00   5.3309112e-01   6.2024833e-01   9.7098574e-01   3.8934542e-01   9.3824087e-01   2.8507955e-01   6.5223271e-01   3.5185448e+00   3.2633258e+00   3.6996953e+00   2.8710255e+00   3.3892942e+00   3.2497279e+00   3.4561374e+00   2.2371784e+00   3.3772302e+00   2.7023432e+00   2.5704711e+00   2.9638836e+00   2.8904978e+00   3.4483274e+00   2.3826791e+00   3.1954061e+00   3.2493673e+00   2.8645447e+00   3.3669805e+00   2.7184506e+00   3.5654259e+00   2.7812639e+00   3.6908684e+00   3.4451701e+00   3.0736340e+00   3.1881331e+00   3.6017790e+00   3.7914024e+00   3.2604423e+00   2.3308454e+00   2.6548674e+00   2.5630676e+00   2.6859989e+00   3.8615219e+00   3.2492317e+00   3.2498302e+00   3.4856775e+00   3.2460061e+00   2.8456767e+00   2.8220742e+00   3.1709561e+00   3.3452644e+00   2.7965957e+00   2.2780262e+00   2.9733693e+00   2.9362769e+00   2.9511072e+00   3.0600825e+00   1.9688013e+00   2.8661222e+00   4.8176767e+00   3.8889176e+00   4.7230291e+00   4.3578295e+00   4.5962942e+00   5.4442203e+00   3.3242177e+00   5.1039076e+00   4.5914416e+00   4.9653393e+00   3.8994399e+00   4.0931001e+00   4.3174903e+00   3.8262108e+00   3.9673816e+00   4.1302370e+00   4.2654212e+00   5.5551457e+00   5.7673205e+00   3.8189943e+00   4.5366342e+00   3.7059360e+00   5.5500825e+00   3.6985459e+00   4.4934936e+00   4.7995387e+00   3.5922369e+00   3.6724425e+00   4.3963259e+00   4.6010378e+00   4.9364818e+00   5.2936070e+00   4.4094664e+00   3.8558772e+00   4.3453589e+00   5.0159331e+00   4.4225169e+00   4.2579435e+00   3.5745624e+00   4.2301614e+00   4.4459076e+00   3.9875954e+00   3.8889176e+00   4.7169919e+00   4.5531173e+00   4.0619315e+00   3.8238093e+00   3.9999729e+00   4.2141826e+00   3.8611742e+00   6.3808075e-01   3.0275928e-01   3.7598397e-01   2.1269358e-01   5.6769031e-01   3.4378533e-01   5.3022554e-01   5.0991930e-01   2.1845981e-01   1.4096146e-01   1.4096146e-01   4.5581864e-01   4.5581864e-01   3.0811765e-01   6.0670504e-01   7.3496673e-01   4.2667565e-01   3.2816937e-01   4.0293660e-01   4.2667565e-01   7.6787403e-01   1.4096146e-01   1.2418578e-01   1.2394907e+00   7.1504098e-01   3.2586371e-01   5.2942799e-01   5.2862779e-01   3.2586371e-01   5.2942799e-01   2.5651975e-01   2.1269358e-01   3.4944845e+00   3.2032390e+00   3.6588207e+00   2.7040077e+00   3.3172489e+00   3.1387381e+00   3.3902207e+00   2.0195610e+00   3.3129652e+00   2.5744164e+00   2.3173648e+00   2.8753045e+00   2.7215057e+00   3.3565935e+00   2.2694598e+00   3.1556501e+00   3.1511848e+00   2.7390328e+00   3.2351115e+00   2.5646808e+00   3.4858646e+00   2.6854398e+00   3.5885398e+00   3.3452644e+00   3.0014619e+00   3.1359624e+00   3.5442447e+00   3.7351231e+00   3.1683717e+00   2.1722580e+00   2.4832809e+00   2.3831271e+00   2.5617005e+00   3.7607269e+00   3.1489919e+00   3.1764760e+00   3.4370400e+00   3.1222134e+00   2.7420671e+00   2.6759392e+00   3.0406669e+00   3.2584404e+00   2.6649106e+00   2.0477765e+00   2.8508344e+00   2.8325946e+00   2.8443188e+00   2.9745020e+00   1.7495699e+00   2.7521074e+00   4.7498176e+00   3.7908064e+00   4.6736601e+00   4.2736523e+00   4.5261084e+00   5.4025762e+00   3.1986968e+00   5.0495127e+00   4.5070435e+00   4.9284820e+00   3.8418637e+00   4.0108142e+00   4.2628455e+00   3.7198158e+00   3.8891307e+00   4.0719001e+00   4.1917075e+00   5.5215376e+00   5.7192826e+00   3.6876129e+00   4.4897240e+00   3.6129201e+00   5.5066558e+00   3.6138577e+00   4.4349731e+00   4.7514299e+00   3.5090368e+00   3.5920050e+00   4.3185209e+00   4.5521574e+00   4.8905855e+00   5.2768100e+00   4.3339547e+00   3.7672401e+00   4.2403934e+00   4.9963306e+00   4.3598652e+00   4.1826701e+00   3.4920978e+00   4.1853524e+00   4.3933832e+00   3.9574195e+00   3.7908064e+00   4.6611791e+00   4.5034762e+00   4.0149574e+00   3.7307866e+00   3.9355645e+00   4.1498459e+00   3.7732223e+00   6.0551856e-01   4.4535192e-01   6.0670504e-01   1.1765359e+00   6.9325418e-01   9.2288144e-01   9.3637892e-01   7.3535471e-01   5.3665999e-01   5.8914551e-01   1.0576043e+00   1.0106392e+00   4.5581864e-01   5.4292906e-01   4.5581864e-01   9.4009473e-01   8.6290690e-01   4.5581864e-01   9.4009473e-01   1.3885563e+00   6.5223271e-01   7.4740267e-01   1.7041201e+00   1.3421549e+00   7.2823007e-01   6.0611244e-01   1.0653845e+00   6.0121055e-01   1.1521791e+00   4.1586001e-01   7.7919451e-01   3.1037808e+00   2.8727295e+00   3.2914954e+00   2.5112138e+00   2.9950832e+00   2.8632951e+00   3.0711321e+00   1.9332545e+00   2.9709745e+00   2.3448578e+00   2.2688022e+00   2.5914913e+00   2.5183808e+00   3.0579528e+00   2.0217308e+00   2.7906520e+00   2.8719896e+00   2.4738237e+00   2.9926636e+00   2.3440712e+00   3.1967616e+00   2.3980102e+00   3.3005331e+00   3.0483897e+00   2.6752185e+00   2.7868575e+00   3.1931777e+00   3.3968373e+00   2.8794765e+00   1.9640287e+00   2.2899742e+00   2.1990648e+00   2.3082381e+00   3.4762640e+00   2.8726212e+00   2.8752807e+00   3.0841055e+00   2.8599559e+00   2.4658566e+00   2.4543822e+00   2.7852734e+00   2.9558536e+00   2.4184985e+00   1.9730073e+00   2.5941661e+00   2.5490308e+00   2.5692104e+00   2.6676432e+00   1.6855044e+00   2.4875166e+00   4.4549901e+00   3.5192877e+00   4.3283747e+00   3.9701062e+00   4.2192020e+00   5.0364062e+00   2.9740218e+00   4.6944291e+00   4.1953299e+00   4.5849301e+00   3.5249823e+00   3.7114178e+00   3.9340128e+00   3.4663826e+00   3.6308220e+00   3.7719045e+00   3.8752244e+00   5.1489675e+00   5.3620178e+00   3.4363773e+00   4.1584261e+00   3.3485848e+00   5.1375979e+00   3.3209346e+00   4.1101337e+00   4.3926615e+00   3.2186045e+00   3.2982828e+00   4.0192434e+00   4.1885158e+00   4.5274663e+00   4.8785522e+00   4.0373008e+00   3.4619693e+00   3.9494193e+00   4.6147130e+00   4.0642632e+00   3.8698115e+00   3.2042572e+00   3.8459316e+00   4.0795080e+00   3.6227082e+00   3.5192877e+00   4.3377723e+00   4.1907472e+00   3.6992844e+00   3.4507210e+00   3.6230931e+00   3.8568809e+00   3.4855556e+00   4.5581864e-01   1.2418578e-01   6.2660376e-01   5.1607523e-01   5.2655962e-01   8.0096515e-01   4.0438741e-01   3.0546431e-01   4.0438741e-01   6.4806901e-01   7.1504098e-01   4.4535192e-01   3.2586371e-01   4.9857388e-01   7.0784540e-01   6.2081167e-01   4.5581864e-01   7.0784540e-01   9.3824087e-01   4.0147421e-01   3.2586371e-01   1.5252485e+00   8.1558458e-01   3.7598397e-01   4.0147421e-01   8.1099042e-01   1.2418578e-01   6.9006418e-01   2.1269358e-01   5.0270183e-01   3.4104878e+00   3.1150013e+00   3.5735680e+00   2.6813198e+00   3.2413086e+00   3.0598576e+00   3.2985843e+00   2.0418831e+00   3.2329790e+00   2.5171713e+00   2.3816204e+00   2.7937685e+00   2.7102198e+00   3.2724336e+00   2.2054062e+00   3.0737397e+00   3.0650685e+00   2.6738621e+00   3.1983741e+00   2.5253420e+00   3.3951420e+00   2.6185785e+00   3.5201263e+00   3.2642011e+00   2.9245132e+00   3.0559405e+00   3.4672191e+00   3.6501426e+00   3.0869409e+00   2.1449779e+00   2.4611582e+00   2.3678836e+00   2.5038051e+00   3.6798850e+00   3.0627375e+00   3.0833417e+00   3.3518108e+00   3.0810282e+00   2.6595270e+00   2.6323570e+00   2.9747184e+00   3.1719581e+00   2.6119950e+00   2.0875308e+00   2.7837517e+00   2.7481947e+00   2.7653278e+00   2.8959432e+00   1.7918633e+00   2.6810089e+00   4.6579399e+00   3.7114178e+00   4.5860934e+00   4.1845117e+00   4.4368376e+00   5.3138890e+00   3.1392617e+00   4.9608959e+00   4.4280037e+00   4.8387532e+00   3.7530908e+00   3.9304148e+00   4.1764880e+00   3.6510310e+00   3.8111653e+00   3.9838913e+00   4.1019789e+00   5.4302306e+00   5.6352651e+00   3.6334268e+00   4.4011799e+00   3.5332521e+00   5.4201202e+00   3.5378545e+00   4.3430510e+00   4.6603717e+00   3.4302376e+00   3.5053533e+00   4.2335883e+00   4.4640423e+00   4.8059592e+00   5.1881280e+00   4.2497073e+00   3.6834126e+00   4.1572779e+00   4.9128775e+00   4.2687104e+00   4.0908836e+00   3.4061169e+00   4.0989195e+00   4.3064904e+00   3.8759115e+00   3.7114178e+00   4.5707958e+00   4.4147019e+00   3.9326468e+00   3.6624594e+00   3.8494244e+00   4.0586633e+00   3.6843892e+00   4.0176783e-01   9.3801395e-01   3.7427929e-01   6.0551856e-01   4.9766035e-01   4.1449626e-01   2.5251796e-01   3.2352160e-01   7.0437330e-01   6.2024833e-01   2.4837156e-01   7.0826681e-01   8.1099042e-01   5.3665999e-01   5.7257017e-01   4.0293660e-01   5.3665999e-01   1.0321505e+00   3.2352160e-01   4.9857388e-01   1.2654843e+00   1.0181000e+00   4.9857388e-01   4.6472023e-01   6.6432544e-01   4.4535192e-01   8.1354181e-01   3.2586371e-01   4.4535192e-01   3.1652953e+00   2.9034751e+00   3.3383293e+00   2.4277398e+00   3.0113552e+00   2.8500355e+00   3.0981427e+00   1.7594421e+00   2.9944056e+00   2.3105335e+00   2.0559262e+00   2.5987706e+00   2.4171653e+00   3.0605340e+00   2.0054351e+00   2.8372675e+00   2.8748086e+00   2.4385827e+00   2.9411544e+00   2.2761106e+00   3.2149087e+00   2.3896630e+00   3.2878368e+00   3.0415738e+00   2.6906230e+00   2.8216976e+00   3.2222602e+00   3.4304873e+00   2.8822802e+00   1.8816502e+00   2.1994544e+00   2.0961718e+00   2.2729984e+00   3.4713427e+00   2.8746311e+00   2.8977380e+00   3.1239380e+00   2.8149627e+00   2.4626518e+00   2.3988202e+00   2.7512943e+00   2.9634715e+00   2.3744738e+00   1.7861398e+00   2.5679553e+00   2.5438761e+00   2.5602722e+00   2.6724144e+00   1.5132025e+00   2.4693940e+00   4.4818617e+00   3.5188715e+00   4.3693901e+00   3.9814082e+00   4.2430316e+00   5.0846750e+00   2.9378173e+00   4.7308926e+00   4.2026915e+00   4.6369546e+00   3.5592955e+00   3.7219741e+00   3.9702025e+00   3.4565374e+00   3.6485303e+00   3.8059948e+00   3.8952692e+00   5.2045888e+00   5.4038637e+00   3.3942456e+00   4.2018404e+00   3.3550631e+00   5.1838582e+00   3.3280757e+00   4.1439346e+00   4.4349731e+00   3.2284912e+00   3.3133518e+00   4.0354963e+00   4.2293106e+00   4.5707958e+00   4.9486891e+00   4.0555804e+00   3.4667821e+00   3.9402495e+00   4.6817169e+00   4.0952984e+00   3.8891597e+00   3.2181054e+00   3.8906833e+00   4.1179678e+00   3.6792093e+00   3.5188715e+00   4.3738746e+00   4.2318888e+00   3.7415007e+00   3.4482727e+00   3.6509580e+00   3.8867812e+00   3.4956677e+00   6.2660376e-01   4.1449626e-01   4.8927739e-01   7.0479928e-01   3.0546431e-01   2.5251796e-01   3.2816937e-01   5.7324170e-01   6.2538346e-01   3.7255734e-01   4.4535192e-01   5.7324170e-01   6.2482915e-01   5.3665999e-01   4.3691963e-01   6.2482915e-01   8.7420176e-01   3.2352160e-01   2.5651975e-01   1.4293465e+00   7.7360126e-01   2.5651975e-01   4.0147421e-01   7.1504098e-01   2.1269358e-01   6.2660376e-01   2.4837156e-01   4.1586001e-01   3.4011512e+00   3.1015495e+00   3.5629124e+00   2.6446848e+00   3.2242409e+00   3.0444970e+00   3.2854349e+00   1.9922212e+00   3.2208624e+00   2.4875432e+00   2.3235341e+00   2.7738624e+00   2.6761538e+00   3.2590031e+00   2.1770137e+00   3.0609726e+00   3.0488669e+00   2.6564689e+00   3.1671679e+00   2.4967542e+00   3.3778209e+00   2.5968629e+00   3.5012162e+00   3.2523394e+00   2.9093820e+00   3.0417674e+00   3.4541339e+00   3.6357801e+00   3.0695657e+00   2.1117686e+00   2.4265922e+00   2.3324013e+00   2.4794505e+00   3.6641654e+00   3.0465086e+00   3.0686943e+00   3.3395333e+00   3.0541897e+00   2.6428278e+00   2.6018930e+00   2.9558536e+00   3.1589081e+00   2.5867906e+00   2.0334278e+00   2.7624503e+00   2.7347909e+00   2.7481947e+00   2.8804789e+00   1.7294430e+00   2.6604040e+00   4.6390099e+00   3.6904099e+00   4.5721680e+00   4.1717456e+00   4.4201622e+00   5.3038306e+00   3.1101376e+00   4.9521292e+00   4.4131751e+00   4.8218478e+00   3.7351231e+00   3.9119157e+00   4.1593600e+00   3.6239123e+00   3.7806200e+00   3.9616940e+00   4.0893976e+00   5.4208566e+00   5.6225133e+00   3.6099406e+00   4.3833809e+00   3.5087649e+00   5.4106224e+00   3.5167400e+00   4.3287713e+00   4.6517741e+00   3.4091049e+00   3.4875352e+00   4.2154392e+00   4.4559638e+00   4.7948032e+00   5.1800729e+00   4.2298456e+00   3.6705550e+00   4.1464752e+00   4.8981257e+00   4.2482702e+00   4.0788778e+00   3.3871264e+00   4.0817153e+00   4.2852718e+00   3.8517038e+00   3.6904099e+00   4.5544260e+00   4.3933832e+00   3.9084933e+00   3.6377874e+00   3.8310704e+00   4.0381483e+00   3.6684338e+00   7.9016429e-01   9.0454394e-01   7.7553525e-01   6.5633874e-01   6.8961791e-01   6.5172743e-01   6.4755655e-01   6.9325418e-01   8.5690100e-01   7.5871717e-01   9.8800009e-01   6.3977563e-01   5.0503591e-01   9.0852141e-01   6.3977563e-01   6.2482915e-01   6.2605182e-01   4.4651726e-01   1.3039319e+00   4.5470518e-01   6.8801986e-01   9.4492923e-01   6.5223271e-01   6.9325418e-01   4.9674312e-01   7.6752131e-01   5.2574978e-01   3.9950977e+00   3.6682165e+00   4.1454421e+00   3.1171350e+00   3.7869887e+00   3.5623665e+00   3.8418637e+00   2.4093459e+00   3.7913585e+00   2.9787373e+00   2.6930133e+00   3.3123070e+00   3.1668302e+00   3.7985007e+00   2.6980573e+00   3.6472316e+00   3.5682291e+00   3.1756360e+00   3.6828708e+00   2.9891564e+00   3.9102500e+00   3.1455588e+00   4.0375495e+00   3.7881557e+00   3.4756264e+00   3.6204175e+00   4.0288578e+00   4.2043522e+00   3.6069560e+00   2.6127852e+00   2.9004348e+00   2.8010550e+00   3.0013391e+00   4.1901031e+00   3.5584876e+00   3.6126340e+00   3.9170234e+00   3.5823448e+00   3.1646752e+00   3.0923554e+00   3.4563987e+00   3.7021702e+00   3.1018442e+00   2.4341346e+00   3.2724336e+00   3.2604423e+00   3.2715632e+00   3.4335342e+00   2.1375243e+00   3.1807243e+00   5.1732049e+00   4.2085267e+00   5.1402548e+00   4.7090394e+00   4.9640507e+00   5.8783901e+00   3.5970425e+00   5.5195747e+00   4.9586651e+00   5.3905797e+00   4.2919639e+00   4.4545908e+00   4.7214902e+00   4.1323720e+00   4.2962344e+00   4.5078390e+00   4.6379987e+00   5.9985593e+00   6.1927610e+00   4.1173292e+00   4.9467209e+00   4.0217355e+00   5.9855018e+00   4.0597680e+00   4.8845913e+00   5.2228901e+00   3.9505682e+00   4.0262006e+00   4.7557226e+00   5.0295898e+00   5.3695643e+00   5.7704875e+00   4.7697480e+00   4.2124808e+00   4.6690776e+00   5.4857949e+00   4.7867344e+00   4.6237863e+00   3.9220577e+00   4.6512574e+00   4.8397561e+00   4.4244727e+00   4.2085267e+00   5.1102370e+00   4.9464234e+00   4.4688286e+00   4.1733964e+00   4.3844239e+00   4.5752087e+00   4.1957914e+00   3.8934542e-01   3.7598397e-01   1.5422108e-01   3.4583729e-01   3.7598397e-01   4.4651726e-01   3.8934542e-01   3.2816937e-01   8.2929029e-01   9.3610001e-01   4.3691963e-01   5.3022554e-01   5.3309112e-01   4.3691963e-01   7.5976039e-01   3.2586371e-01   4.2667565e-01   1.0733200e+00   7.4777660e-01   2.1845981e-01   5.0905001e-01   4.3456114e-01   5.2942799e-01   5.5492130e-01   4.6472023e-01   3.7427929e-01   3.2191540e+00   2.9033203e+00   3.3725057e+00   2.3744738e+00   3.0162346e+00   2.8254861e+00   3.0850817e+00   1.6867642e+00   3.0206125e+00   2.2489449e+00   1.9859316e+00   2.5612285e+00   2.4037412e+00   3.0483897e+00   1.9482601e+00   2.8711841e+00   2.8361445e+00   2.4280197e+00   2.9151666e+00   2.2428341e+00   3.1709561e+00   2.3770599e+00   3.2772927e+00   3.0392407e+00   2.7044574e+00   2.8456337e+00   3.2543813e+00   3.4363773e+00   2.8560815e+00   1.8517858e+00   2.1570512e+00   2.0577182e+00   2.2449618e+00   3.4472201e+00   2.8333788e+00   2.8654874e+00   3.1455372e+00   2.8102985e+00   2.4278629e+00   2.3504126e+00   2.7240842e+00   2.9511072e+00   2.3468577e+00   1.7140774e+00   2.5325623e+00   2.5220817e+00   2.5303132e+00   2.6704164e+00   1.4096199e+00   2.4355523e+00   4.4339908e+00   3.4713427e+00   4.3742064e+00   3.9639546e+00   4.2144772e+00   5.1104621e+00   2.8721481e+00   4.7555981e+00   4.1997133e+00   4.6264002e+00   3.5337158e+00   3.6986235e+00   3.9580816e+00   3.3951420e+00   3.5651172e+00   3.7578933e+00   3.8852294e+00   5.2312618e+00   5.4232729e+00   3.3678461e+00   4.1846468e+00   3.2909043e+00   5.2164277e+00   3.3005331e+00   4.1286019e+00   4.4583488e+00   3.1948184e+00   3.2785487e+00   4.0052019e+00   4.2627019e+00   4.5989546e+00   4.9978258e+00   4.0193656e+00   3.4602150e+00   3.9315374e+00   4.7096962e+00   4.0441539e+00   3.8751788e+00   3.1769821e+00   3.8841455e+00   4.0829381e+00   3.6559398e+00   3.4713427e+00   4.3535851e+00   4.1922661e+00   3.7063225e+00   3.4135466e+00   3.6262912e+00   3.8337160e+00   3.4586921e+00   4.5470518e-01   3.4378533e-01   4.9766035e-01   5.6631629e-01   3.2586371e-01   3.7255734e-01   6.5172743e-01   7.6625946e-01   9.7356960e-01   4.4651726e-01   7.0784540e-01   8.1273630e-01   4.4651726e-01   6.8757066e-01   4.4417983e-01   6.0670504e-01   1.1521791e+00   6.5172743e-01   4.5581864e-01   4.5470518e-01   5.6700421e-01   4.8036801e-01   5.1607523e-01   5.8851328e-01   5.0905001e-01   3.1972361e+00   2.8360340e+00   3.3243092e+00   2.2696891e+00   2.9531300e+00   2.6835197e+00   2.9981183e+00   1.5916843e+00   2.9546153e+00   2.1366783e+00   1.9099663e+00   2.4717637e+00   2.3219731e+00   2.9300203e+00   1.8705419e+00   2.8448793e+00   2.7044574e+00   2.2951567e+00   2.8430073e+00   2.1220080e+00   3.0654291e+00   2.3117864e+00   3.1749624e+00   2.9091307e+00   2.6433897e+00   2.8065044e+00   3.2000771e+00   3.3714688e+00   2.7511201e+00   1.7679293e+00   2.0426611e+00   1.9423536e+00   2.1457242e+00   3.3172489e+00   2.6940968e+00   2.7682296e+00   3.0935247e+00   2.7391698e+00   2.2996943e+00   2.2360451e+00   2.5729378e+00   2.8382774e+00   2.2413445e+00   1.6312555e+00   2.4031247e+00   2.3848740e+00   2.4037412e+00   2.5843380e+00   1.3803845e+00   2.3178393e+00   4.3373464e+00   3.3555449e+00   4.3029534e+00   3.8394246e+00   4.1166152e+00   5.0345534e+00   2.7564428e+00   4.6628709e+00   4.0929284e+00   4.5724955e+00   3.4657942e+00   3.6038441e+00   3.8912788e+00   3.2935105e+00   3.4985926e+00   3.6938082e+00   3.7771525e+00   5.1600819e+00   5.3477989e+00   3.2453592e+00   4.1245629e+00   3.1885527e+00   5.1389533e+00   3.2183480e+00   4.0411872e+00   4.3734530e+00   3.1116430e+00   3.1793624e+00   3.9065553e+00   4.1815080e+00   4.5288943e+00   4.9506208e+00   3.9281139e+00   3.3421539e+00   3.7789119e+00   4.6812838e+00   3.9623295e+00   3.7603559e+00   3.0782101e+00   3.8325036e+00   4.0241284e+00   3.6474081e+00   3.3555449e+00   4.2739171e+00   4.1339606e+00   3.6720159e+00   3.3349237e+00   3.5512382e+00   3.7511837e+00   3.3364095e+00   4.1312257e-01   5.0905001e-01   4.2538717e-01   3.2352160e-01   2.0656129e-01   5.0592043e-01   1.1017858e+00   1.2234738e+00   1.5422108e-01   4.1312257e-01   6.3924842e-01   1.5422108e-01   6.1968386e-01   4.0293660e-01   5.2942799e-01   7.7919451e-01   6.2482915e-01   5.6631629e-01   8.1385214e-01   2.5251796e-01   8.0032200e-01   4.2538717e-01   7.1462831e-01   3.2352160e-01   3.3601225e+00   3.0492285e+00   3.5130686e+00   2.4784234e+00   3.1574358e+00   2.9495683e+00   3.2305582e+00   1.7639552e+00   3.1543365e+00   2.3872777e+00   2.0066796e+00   2.7104259e+00   2.4866453e+00   3.1794134e+00   2.0999661e+00   3.0164205e+00   2.9733298e+00   2.5409084e+00   3.0313923e+00   2.3496997e+00   3.3211033e+00   2.5173832e+00   3.4035007e+00   3.1599783e+00   2.8424094e+00   2.9896107e+00   3.3894792e+00   3.5821206e+00   2.9960387e+00   1.9633030e+00   2.2546134e+00   2.1472921e+00   2.3729779e+00   3.5766918e+00   2.9692947e+00   3.0147040e+00   3.2887803e+00   2.9233984e+00   2.5628362e+00   2.4694536e+00   2.8371552e+00   3.0850817e+00   2.4681135e+00   1.7749726e+00   2.6585961e+00   2.6493446e+00   2.6623632e+00   2.8060305e+00   1.5124582e+00   2.5679553e+00   4.5912390e+00   3.6144502e+00   4.5212874e+00   4.0982070e+00   4.3637125e+00   5.2494547e+00   3.0086587e+00   4.8875515e+00   4.3294620e+00   4.7880147e+00   3.6912876e+00   3.8418637e+00   4.1117747e+00   3.5413188e+00   3.7389462e+00   3.9250135e+00   4.0233529e+00   5.3754694e+00   5.5627643e+00   3.4785161e+00   4.3436980e+00   3.4455964e+00   5.3512930e+00   3.4471566e+00   4.2776053e+00   4.5941143e+00   3.3449470e+00   3.4269051e+00   4.1524717e+00   4.3946634e+00   4.7366258e+00   5.1414650e+00   4.1713411e+00   3.5895201e+00   4.0467919e+00   4.8635964e+00   4.2075047e+00   4.0127353e+00   3.3273395e+00   4.0411872e+00   4.2480617e+00   3.8321687e+00   3.6144502e+00   4.5072985e+00   4.3598062e+00   3.8780834e+00   3.5586316e+00   3.7805671e+00   3.9971028e+00   3.6003219e+00   2.5651975e-01   2.8192292e-01   3.4378533e-01   3.4378533e-01   4.0147421e-01   7.1840099e-01   8.5690100e-01   3.7598397e-01   4.2538717e-01   5.3665999e-01   3.7598397e-01   6.6827038e-01   2.1269358e-01   3.0546431e-01   1.1320702e+00   6.2988288e-01   2.0656129e-01   4.4535192e-01   4.2667565e-01   4.1449626e-01   4.3691963e-01   3.8934542e-01   2.5251796e-01   3.3428183e+00   3.0232018e+00   3.4944845e+00   2.4950353e+00   3.1381402e+00   2.9363801e+00   3.2027420e+00   1.8084630e+00   3.1409294e+00   2.3642733e+00   2.1113036e+00   2.6784604e+00   2.5283251e+00   3.1625374e+00   2.0667297e+00   2.9950041e+00   2.9473422e+00   2.5410503e+00   3.0407257e+00   2.3596992e+00   3.2858223e+00   2.4986337e+00   3.3960124e+00   3.1519721e+00   2.8255193e+00   2.9686973e+00   3.3765772e+00   3.5575681e+00   2.9720515e+00   1.9732878e+00   2.2758449e+00   2.1765379e+00   2.3630256e+00   3.5606213e+00   2.9432282e+00   2.9813163e+00   3.2672636e+00   2.9349850e+00   2.5393641e+00   2.4678343e+00   2.8348218e+00   3.0655803e+00   2.4649412e+00   1.8381372e+00   2.6459992e+00   2.6324803e+00   2.6428278e+00   2.7886501e+00   1.5384446e+00   2.5500177e+00   4.5509522e+00   3.5863729e+00   4.4953238e+00   4.0776551e+00   4.3319640e+00   5.2308062e+00   2.9880146e+00   4.8736353e+00   4.3174903e+00   4.7496159e+00   3.6545040e+00   3.8173510e+00   4.0797075e+00   3.5129177e+00   3.6850739e+00   3.8789949e+00   4.0011311e+00   5.3516635e+00   5.5447338e+00   3.4854203e+00   4.3070102e+00   3.4066692e+00   5.3366535e+00   3.4209331e+00   4.2472818e+00   4.5769661e+00   3.3144678e+00   3.3951450e+00   4.1229838e+00   4.3815083e+00   4.7200939e+00   5.1200990e+00   4.1381062e+00   3.5750015e+00   4.0416512e+00   4.8355880e+00   4.1628805e+00   3.9898962e+00   3.2934163e+00   4.0073765e+00   4.2053647e+00   3.7839552e+00   3.5863729e+00   4.4735121e+00   4.3145846e+00   3.8316735e+00   3.5355330e+00   3.7466071e+00   3.9521135e+00   3.5718733e+00   1.2418578e-01   5.2942799e-01   4.9766035e-01   2.5251796e-01   6.0060595e-01   7.1462831e-01   4.4535192e-01   3.8776762e-01   3.2352160e-01   4.4535192e-01   8.5434758e-01   1.2418578e-01   2.5251796e-01   1.2643026e+00   8.1354181e-01   4.1449626e-01   4.5581864e-01   5.6769031e-01   3.0546431e-01   6.2024833e-01   2.0656129e-01   2.5251796e-01   3.3898078e+00   3.1105347e+00   3.5575681e+00   2.6249526e+00   3.2229246e+00   3.0488669e+00   3.3004172e+00   1.9465831e+00   3.2118493e+00   2.4993166e+00   2.2473120e+00   2.7928582e+00   2.6296047e+00   3.2639046e+00   2.1933937e+00   3.0559188e+00   3.0673749e+00   2.6440180e+00   3.1486786e+00   2.4776707e+00   3.4056271e+00   2.5954147e+00   3.4958702e+00   3.2483608e+00   2.9041534e+00   3.0378828e+00   3.4425295e+00   3.6411503e+00   3.0811775e+00   2.0851901e+00   2.3997585e+00   2.2980893e+00   2.4741664e+00   3.6714798e+00   3.0661139e+00   3.0923102e+00   3.3390355e+00   3.0288896e+00   2.6566259e+00   2.5949561e+00   2.9511072e+00   3.1662394e+00   2.5768305e+00   1.9764051e+00   2.7650187e+00   2.7420671e+00   2.7570191e+00   2.8802219e+00   1.6913411e+00   2.6662783e+00   4.6723657e+00   3.7109297e+00   4.5802461e+00   4.1829734e+00   4.4414351e+00   5.3024507e+00   3.1246867e+00   4.9479218e+00   4.4123750e+00   4.8421359e+00   3.7582429e+00   3.9238367e+00   4.1750713e+00   3.6455011e+00   3.8262353e+00   3.9966275e+00   4.0997246e+00   5.4219259e+00   5.6210533e+00   3.5985833e+00   4.4045665e+00   3.5402439e+00   5.4041845e+00   3.5288526e+00   4.3467079e+00   4.6509698e+00   3.4261305e+00   3.5087649e+00   4.2340479e+00   4.4487072e+00   4.7898591e+00   5.1726632e+00   4.2521321e+00   3.6728562e+00   4.1446028e+00   4.9002452e+00   4.2845042e+00   4.0915924e+00   3.4110551e+00   4.0971854e+00   4.3142588e+00   3.8789274e+00   3.7109297e+00   4.5753785e+00   4.4261431e+00   3.9377466e+00   3.6482464e+00   3.8509694e+00   4.0749843e+00   3.6894983e+00   5.1607523e-01   4.5470518e-01   2.5251796e-01   7.0086313e-01   8.1067767e-01   3.7598397e-01   2.8192292e-01   3.0546431e-01   3.7598397e-01   8.2654509e-01   1.2418578e-01   2.1845981e-01   1.1754055e+00   8.0326782e-01   4.2667565e-01   5.7324170e-01   4.9766035e-01   4.1449626e-01   6.0551856e-01   3.0546431e-01   2.0656129e-01   3.4780839e+00   3.2028668e+00   3.6478832e+00   2.7001131e+00   3.3123070e+00   3.1424188e+00   3.3940268e+00   2.0081113e+00   3.3027430e+00   2.5846998e+00   2.2899742e+00   2.8843735e+00   2.7014135e+00   3.3579678e+00   2.2804674e+00   3.1447330e+00   3.1614572e+00   2.7347909e+00   3.2266676e+00   2.5600441e+00   3.4989115e+00   2.6835197e+00   3.5848035e+00   3.3425343e+00   2.9944056e+00   3.1272113e+00   3.5315985e+00   3.7321938e+00   3.1736230e+00   2.1642821e+00   2.4763086e+00   2.3729779e+00   2.5613679e+00   3.7645411e+00   3.1602773e+00   3.1861493e+00   3.4298218e+00   3.1090174e+00   2.7503801e+00   2.6773131e+00   3.0414782e+00   3.2606191e+00   2.6626548e+00   2.0308266e+00   2.8549070e+00   2.8371552e+00   2.8500790e+00   2.9720515e+00   1.7439430e+00   2.7570191e+00   4.7646254e+00   3.8019108e+00   4.6714768e+00   4.2777154e+00   4.5341669e+00   5.3940437e+00   3.2096520e+00   5.0408889e+00   4.5037829e+00   4.9318274e+00   3.8493108e+00   4.0147814e+00   4.2656755e+00   3.7323432e+00   3.9122146e+00   4.0862794e+00   4.1939019e+00   5.5136120e+00   5.7113100e+00   3.6836404e+00   4.4947801e+00   3.6298047e+00   5.4953655e+00   3.6181784e+00   4.4396174e+00   4.7440323e+00   3.5161021e+00   3.6013159e+00   4.3259034e+00   4.5411167e+00   4.8804165e+00   5.2620015e+00   4.3431589e+00   3.7666196e+00   4.2394195e+00   4.9871255e+00   4.3755778e+00   4.1864815e+00   3.5032323e+00   4.1868319e+00   4.4036244e+00   3.9638383e+00   3.8019108e+00   4.6672392e+00   4.5155386e+00   4.0245873e+00   3.7349501e+00   3.9420153e+00   4.1660972e+00   3.7835217e+00   1.2418578e-01   7.0826681e-01   9.4125538e-01   1.1340084e+00   2.1845981e-01   4.4417983e-01   8.2105460e-01   2.1845981e-01   3.8776762e-01   4.1449626e-01   4.2418962e-01   9.1075311e-01   3.7255734e-01   4.8036801e-01   6.6827038e-01   2.5651975e-01   6.4704320e-01   2.0656129e-01   6.8961791e-01   3.2586371e-01   3.4686627e+00   3.1154621e+00   3.6024772e+00   2.5108475e+00   3.2290313e+00   2.9704704e+00   3.2809332e+00   1.7903952e+00   3.2349033e+00   2.3960250e+00   2.0600195e+00   2.7476411e+00   2.5610667e+00   3.2181149e+00   2.1323638e+00   3.1154281e+00   2.9881598e+00   2.5786309e+00   3.0947136e+00   2.3839455e+00   3.3448197e+00   2.5821869e+00   3.4532666e+00   3.1998457e+00   2.9200439e+00   3.0794219e+00   3.4771054e+00   3.6510733e+00   3.0328831e+00   2.0197525e+00   2.2893157e+00   2.1858172e+00   2.4166535e+00   3.6030381e+00   2.9770247e+00   3.0492285e+00   3.3713329e+00   2.9974031e+00   2.5833316e+00   2.4944673e+00   2.8535197e+00   3.1260028e+00   2.5104998e+00   1.8109223e+00   2.6804452e+00   2.6742360e+00   2.6875169e+00   2.8656044e+00   1.5447938e+00   2.5961850e+00   4.6147552e+00   3.6320149e+00   4.5850999e+00   4.1288948e+00   4.3989089e+00   5.3208661e+00   3.0146208e+00   4.9525575e+00   4.3778283e+00   4.8485483e+00   3.7416375e+00   3.8836706e+00   4.1692394e+00   3.5584719e+00   3.7545764e+00   3.9635178e+00   4.0653249e+00   5.4465238e+00   5.6318041e+00   3.5148434e+00   4.4004449e+00   3.4571917e+00   5.4256316e+00   3.4931266e+00   4.3243671e+00   4.6617210e+00   3.3863989e+00   3.4595139e+00   4.1872611e+00   4.4691596e+00   4.8126955e+00   5.2323771e+00   4.2057897e+00   3.6309135e+00   4.0714444e+00   4.9544547e+00   4.2355968e+00   4.0495222e+00   3.3563815e+00   4.1075398e+00   4.2957899e+00   3.9065020e+00   3.6320149e+00   4.5543025e+00   4.4046818e+00   3.9362563e+00   3.6038441e+00   3.8285790e+00   4.0238937e+00   3.6204282e+00   6.2538346e-01   1.0167353e+00   1.1763980e+00   1.4096146e-01   4.1449626e-01   7.4740267e-01   1.4096146e-01   4.4535192e-01   3.7427929e-01   4.5581864e-01   8.2135873e-01   4.4535192e-01   5.0503591e-01   7.3145860e-01   2.1269358e-01   7.1421512e-01   2.5251796e-01   6.8961791e-01   2.8192292e-01   3.4295980e+00   3.0905489e+00   3.5700123e+00   2.4944673e+00   3.2020292e+00   2.9613737e+00   3.2617087e+00   1.7750284e+00   3.2049793e+00   2.3909366e+00   2.0308266e+00   2.7326472e+00   2.5286673e+00   3.2028668e+00   2.1181082e+00   3.0792693e+00   2.9816970e+00   2.5624932e+00   3.0681391e+00   2.3677174e+00   3.3352475e+00   2.5566444e+00   3.4334259e+00   3.1839972e+00   2.8907702e+00   3.0462904e+00   3.4448498e+00   3.6256155e+00   3.0181476e+00   1.9946242e+00   2.2719663e+00   2.1665831e+00   2.3980102e+00   3.5922217e+00   2.9733478e+00   3.0355056e+00   3.3410264e+00   2.9673667e+00   2.5744164e+00   2.4820750e+00   2.8455141e+00   3.1100045e+00   2.4920874e+00   1.7903952e+00   2.6704164e+00   2.6637326e+00   2.6767607e+00   2.8425716e+00   1.5257596e+00   2.5839288e+00   4.6057175e+00   3.6244539e+00   4.5619285e+00   4.1170543e+00   4.3856360e+00   5.2953657e+00   3.0110441e+00   4.9290739e+00   4.3593510e+00   4.8266994e+00   3.7227460e+00   3.8675033e+00   4.1480696e+00   3.5505922e+00   3.7479505e+00   3.9489183e+00   4.0495222e+00   5.4213759e+00   5.6069704e+00   3.4988409e+00   4.3796539e+00   3.4519560e+00   5.3990720e+00   3.4751739e+00   4.3070102e+00   4.6372963e+00   3.3701472e+00   3.4467337e+00   4.1738909e+00   4.4422690e+00   4.7852959e+00   5.2004339e+00   4.1925494e+00   3.6148707e+00   4.0613681e+00   4.9222119e+00   4.2248103e+00   4.0355816e+00   3.3448336e+00   4.0832977e+00   4.2781022e+00   3.8793995e+00   3.6244539e+00   4.5369609e+00   4.3880177e+00   3.9147164e+00   3.5857963e+00   3.8105301e+00   4.0134966e+00   3.6122845e+00   7.1799256e-01   8.0358695e-01   5.5419992e-01   4.6472023e-01   2.5651975e-01   5.5419992e-01   1.0198386e+00   3.2352160e-01   4.1586001e-01   1.2565757e+00   1.0054037e+00   4.1586001e-01   5.2574978e-01   6.4806901e-01   4.5581864e-01   8.0619006e-01   3.2586371e-01   4.1586001e-01   3.3274681e+00   3.0643176e+00   3.5031390e+00   2.5831315e+00   3.1734856e+00   3.0256804e+00   3.2593968e+00   1.9049236e+00   3.1662394e+00   2.4587503e+00   2.1948123e+00   2.7522526e+00   2.5874301e+00   3.2341367e+00   2.1493214e+00   2.9966141e+00   3.0389019e+00   2.6214821e+00   3.0978571e+00   2.4463233e+00   3.3673968e+00   2.5500177e+00   3.4578354e+00   3.2240818e+00   2.8578051e+00   2.9828212e+00   3.3905763e+00   3.5904118e+00   3.0455175e+00   2.0467937e+00   2.3640301e+00   2.2638728e+00   2.4385827e+00   3.6424937e+00   3.0387448e+00   3.0543001e+00   3.2867025e+00   2.9810914e+00   2.6291524e+00   2.5579619e+00   2.9291061e+00   3.1351569e+00   2.5421955e+00   1.9274228e+00   2.7359835e+00   2.7196686e+00   2.7293044e+00   2.8418790e+00   1.6234861e+00   2.6349941e+00   4.6268918e+00   3.6735954e+00   4.5276539e+00   4.1519514e+00   4.3981255e+00   5.2510005e+00   3.0847983e+00   4.9049143e+00   4.3738893e+00   4.7804645e+00   3.7059360e+00   3.8806135e+00   4.1210200e+00   3.6007179e+00   3.7674167e+00   3.9401211e+00   4.0632074e+00   5.3683319e+00   5.5675337e+00   3.5650560e+00   4.3467079e+00   3.4964385e+00   5.3534247e+00   3.4817971e+00   4.3007421e+00   4.6057628e+00   3.3796707e+00   3.4684743e+00   4.1910954e+00   4.4033511e+00   4.7374076e+00   5.1107389e+00   4.2056854e+00   3.6417452e+00   4.1251907e+00   4.8290847e+00   4.2339606e+00   4.0576409e+00   3.3702841e+00   4.0376849e+00   4.2550416e+00   3.8015574e+00   3.6735954e+00   4.5249462e+00   4.3662620e+00   3.8699102e+00   3.5979726e+00   3.8007870e+00   4.0252278e+00   3.6568095e+00   3.0811765e-01   1.0065841e+00   9.1075311e-01   6.2538346e-01   1.0065841e+00   1.2125198e+00   7.0086313e-01   6.1623531e-01   1.8279039e+00   1.0613462e+00   6.9369532e-01   4.8135521e-01   1.1149070e+00   3.0811765e-01   9.7098574e-01   4.0293660e-01   8.0358695e-01   3.4154940e+00   3.1439160e+00   3.5872997e+00   2.8054691e+00   3.2839149e+00   3.1127876e+00   3.3274872e+00   2.2159139e+00   3.2598167e+00   2.6167778e+00   2.5758644e+00   2.8523754e+00   2.8256291e+00   3.3122081e+00   2.3003824e+00   3.0947136e+00   3.1160876e+00   2.7384939e+00   3.2940867e+00   2.6281170e+00   3.4401593e+00   2.6851662e+00   3.5758474e+00   3.3024121e+00   2.9636544e+00   3.0850893e+00   3.4935692e+00   3.6787177e+00   3.1382691e+00   2.2643860e+00   2.5838312e+00   2.4957147e+00   2.5876691e+00   3.7272092e+00   3.1148696e+00   3.1192689e+00   3.3731473e+00   3.1641238e+00   2.7164367e+00   2.7371177e+00   3.0433470e+00   3.2094276e+00   2.6989752e+00   2.2729593e+00   2.8576425e+00   2.7954161e+00   2.8234677e+00   2.9407805e+00   1.9980146e+00   2.7511201e+00   4.6993349e+00   3.7716613e+00   4.6090409e+00   4.2170163e+00   4.4740971e+00   5.3233695e+00   3.2312218e+00   4.9715358e+00   4.4649192e+00   4.8634712e+00   3.7907269e+00   3.9777091e+00   4.2102908e+00   3.7293867e+00   3.8887827e+00   4.0321207e+00   4.1304736e+00   5.4336694e+00   5.6535520e+00   3.7088191e+00   4.4333012e+00   3.6017014e+00   5.4287623e+00   3.5939935e+00   4.3696884e+00   4.6684884e+00   3.4865359e+00   3.5518450e+00   4.2777882e+00   4.4716883e+00   4.8204359e+00   5.1830573e+00   4.2975164e+00   3.7189281e+00   4.1915116e+00   4.9282164e+00   4.3126073e+00   4.1182993e+00   3.4568393e+00   4.1297239e+00   4.3496867e+00   3.9207320e+00   3.7716613e+00   4.6018276e+00   4.4564872e+00   3.9827168e+00   3.7295388e+00   3.8905364e+00   4.1037903e+00   3.7281480e+00   1.1474460e+00   1.0344911e+00   7.0043186e-01   1.1474460e+00   1.4311891e+00   8.2654509e-01   7.6787403e-01   1.9730918e+00   1.3073038e+00   7.9878917e-01   6.2407309e-01   1.2632199e+00   5.0503591e-01   1.1833480e+00   5.0905001e-01   9.4080461e-01   3.4392518e+00   3.2008338e+00   3.6261197e+00   2.9076510e+00   3.3439089e+00   3.2073032e+00   3.3907558e+00   2.3329978e+00   3.3168472e+00   2.7081478e+00   2.6929215e+00   2.9283888e+00   2.9264154e+00   3.3942456e+00   2.3848677e+00   3.1312883e+00   3.2027420e+00   2.8385969e+00   3.3781905e+00   2.7324845e+00   3.5133135e+00   2.7602023e+00   3.6563535e+00   3.3905763e+00   3.0256209e+00   3.1312883e+00   3.5423793e+00   3.7299679e+00   3.2179031e+00   2.3661539e+00   2.6906231e+00   2.6054995e+00   2.6801752e+00   3.8138933e+00   3.2027420e+00   3.1888037e+00   3.4187265e+00   3.2459231e+00   2.8060305e+00   2.8359967e+00   3.1453916e+00   3.2886661e+00   2.7943622e+00   2.3874574e+00   2.9533314e+00   2.8877105e+00   2.9141136e+00   3.0147886e+00   2.0992326e+00   2.8425716e+00   4.7630756e+00   3.8535531e+00   4.6550014e+00   4.2947468e+00   4.5389196e+00   5.3632740e+00   3.3234239e+00   5.0232338e+00   4.5363757e+00   4.8985515e+00   3.8441304e+00   4.0476997e+00   4.2599433e+00   3.8106152e+00   3.9516603e+00   4.0850903e+00   4.1995425e+00   5.4671477e+00   5.6957748e+00   3.8037734e+00   4.4771379e+00   3.6783395e+00   5.4690202e+00   3.6632177e+00   4.4259826e+00   4.7160634e+00   3.5560806e+00   3.6239123e+00   4.3464973e+00   4.5187406e+00   4.8619146e+00   5.2001625e+00   4.3635746e+00   3.7979761e+00   4.2843668e+00   4.9451734e+00   4.3709882e+00   4.1899199e+00   3.5299194e+00   4.1707976e+00   4.3972344e+00   3.9457376e+00   3.8535531e+00   4.6546556e+00   4.5024055e+00   4.0227020e+00   3.8004969e+00   3.9484773e+00   4.1635108e+00   3.8079860e+00   3.0811765e-01   6.5223271e-01   0.0000000e+00   5.0991930e-01   3.2586371e-01   4.2667565e-01   8.3172002e-01   5.0991930e-01   5.6769031e-01   7.5082357e-01   2.1845981e-01   7.0479928e-01   3.0811765e-01   6.4755655e-01   2.1845981e-01   3.4865562e+00   3.1726595e+00   3.6377960e+00   2.5987470e+00   3.2814045e+00   3.0627375e+00   3.3515846e+00   1.8841865e+00   3.2769379e+00   2.5038079e+00   2.1311468e+00   2.8311678e+00   2.6104387e+00   3.2962520e+00   2.2214438e+00   3.1433122e+00   3.0878634e+00   2.6552472e+00   3.1570103e+00   2.4668912e+00   3.4394878e+00   2.6411293e+00   3.5233648e+00   3.2747247e+00   2.9659871e+00   3.1154783e+00   3.5134741e+00   3.7059620e+00   3.1148696e+00   2.0851901e+00   2.3731428e+00   2.2655571e+00   2.4927109e+00   3.6920087e+00   3.0823446e+00   3.1337459e+00   3.4135200e+00   3.0481703e+00   2.6780487e+00   2.5874301e+00   2.9489507e+00   3.2027420e+00   2.5873149e+00   1.8973383e+00   2.7738355e+00   2.7632614e+00   2.7778954e+00   2.9269923e+00   1.6390769e+00   2.6848587e+00   4.7106706e+00   3.7313856e+00   4.6446321e+00   4.2142736e+00   4.4836580e+00   5.3716885e+00   3.1250284e+00   5.0074019e+00   4.4485220e+00   4.9128219e+00   3.8150636e+00   3.9624529e+00   4.2358121e+00   3.6602286e+00   3.8605980e+00   4.0488387e+00   4.1418643e+00   5.4970002e+00   5.6855224e+00   3.5964347e+00   4.4685630e+00   3.5634461e+00   5.4730406e+00   3.5693950e+00   4.3989089e+00   4.7150659e+00   3.4668130e+00   3.5464993e+00   4.2723380e+00   4.5155386e+00   4.8594290e+00   5.2647079e+00   4.2921213e+00   3.7064459e+00   4.1581964e+00   4.9913682e+00   4.3286007e+00   4.1303097e+00   3.4468286e+00   4.1669742e+00   4.3729308e+00   3.9624170e+00   3.7313856e+00   4.6297577e+00   4.4844827e+00   4.0056359e+00   3.6817961e+00   3.9035218e+00   4.1179678e+00   3.7164366e+00   5.2942799e-01   3.0811765e-01   6.0611244e-01   3.2586371e-01   3.0546431e-01   9.4125538e-01   6.0060595e-01   5.2574978e-01   8.1558458e-01   2.8507955e-01   6.4755655e-01   4.1312257e-01   5.5419992e-01   2.0656129e-01   3.7045940e+00   3.4142500e+00   3.8690719e+00   2.8688189e+00   3.5226542e+00   3.3385842e+00   3.6010215e+00   2.1580776e+00   3.5196916e+00   2.7652601e+00   2.4083873e+00   3.0820950e+00   2.8783309e+00   3.5617386e+00   2.4693940e+00   3.3658240e+00   3.3558214e+00   2.9322808e+00   3.4112518e+00   2.7424260e+00   3.6945405e+00   2.8867565e+00   3.7848217e+00   3.5471494e+00   3.2076743e+00   3.3449470e+00   3.7498842e+00   3.9450818e+00   3.3735875e+00   2.3490515e+00   2.6490839e+00   2.5444216e+00   2.7549369e+00   3.9621873e+00   3.3527310e+00   3.3865418e+00   3.6475099e+00   3.3024121e+00   2.9459653e+00   2.8566322e+00   3.2311957e+00   3.4653703e+00   2.8535197e+00   2.1708533e+00   3.0455175e+00   3.0364473e+00   3.0465086e+00   3.1799184e+00   1.8842354e+00   2.9509414e+00   4.9594922e+00   3.9924292e+00   4.8844442e+00   4.4804205e+00   4.7354764e+00   5.6130421e+00   3.3873806e+00   5.2583931e+00   4.7090394e+00   5.1413739e+00   4.0532097e+00   4.2156327e+00   4.4735121e+00   3.9153764e+00   4.0928954e+00   4.2826383e+00   4.4004808e+00   5.7341534e+00   5.9271927e+00   3.8700742e+00   4.7016733e+00   3.8156003e+00   5.7155096e+00   3.8175051e+00   4.6462020e+00   4.9620553e+00   3.7143727e+00   3.8000004e+00   4.5254820e+00   4.7613782e+00   5.0991856e+00   5.4888822e+00   4.5411167e+00   3.9718373e+00   4.4397363e+00   5.2074442e+00   4.5701358e+00   4.3916992e+00   3.6996802e+00   4.3970196e+00   4.6045465e+00   4.1691696e+00   3.9924292e+00   4.8725396e+00   4.7150659e+00   4.2256400e+00   3.9292780e+00   4.1454529e+00   4.3599285e+00   3.9798670e+00   6.5223271e-01   1.1268457e+00   4.1449626e-01   5.0090417e-01   1.3784393e+00   1.1053488e+00   5.8851328e-01   6.6827038e-01   7.6787403e-01   4.8036801e-01   9.0852141e-01   2.8192292e-01   5.0905001e-01   3.5117473e+00   3.2719724e+00   3.6961290e+00   2.8060101e+00   3.3799936e+00   3.2401159e+00   3.4709594e+00   2.1293854e+00   3.3643376e+00   2.6848587e+00   2.4115946e+00   2.9723937e+00   2.7984009e+00   3.4455106e+00   2.3749088e+00   3.1913397e+00   3.2574960e+00   2.8320532e+00   3.3150921e+00   2.6637326e+00   3.5883493e+00   2.7640668e+00   3.6695251e+00   3.4317240e+00   3.0617355e+00   3.1820648e+00   3.5855784e+00   3.7950050e+00   3.2620096e+00   2.2645766e+00   2.5831070e+00   2.4810276e+00   2.6563403e+00   3.8575846e+00   3.2574960e+00   3.2718360e+00   3.4856613e+00   3.1913594e+00   2.8466990e+00   2.7801709e+00   3.1437608e+00   3.3466419e+00   2.7595194e+00   2.1498672e+00   2.9543365e+00   2.9330570e+00   2.9459653e+00   3.0511838e+00   1.8555964e+00   2.8534301e+00   4.8490742e+00   3.8962785e+00   4.7308926e+00   4.3643842e+00   4.6144072e+00   5.4442665e+00   3.3129652e+00   5.0997539e+00   4.5816539e+00   4.9883395e+00   3.9213002e+00   4.0961125e+00   4.3313403e+00   3.8279951e+00   4.0003537e+00   4.1625074e+00   4.2731561e+00   5.5604940e+00   5.7635900e+00   3.7812981e+00   4.5580745e+00   3.7236571e+00   5.5437155e+00   3.6992145e+00   4.5120086e+00   4.8012240e+00   3.5989213e+00   3.6872980e+00   4.4084575e+00   4.5949523e+00   4.9306472e+00   5.2918657e+00   4.4250143e+00   3.8510143e+00   4.3338058e+00   5.0198276e+00   4.4571663e+00   4.2687771e+00   3.5910375e+00   4.2454653e+00   4.4729144e+00   4.0139676e+00   3.8962785e+00   4.7377574e+00   4.5853438e+00   4.0877338e+00   3.8179781e+00   4.0161568e+00   4.2490407e+00   3.8755800e+00   5.0991930e-01   3.2586371e-01   4.2667565e-01   8.3172002e-01   5.0991930e-01   5.6769031e-01   7.5082357e-01   2.1845981e-01   7.0479928e-01   3.0811765e-01   6.4755655e-01   2.1845981e-01   3.4865562e+00   3.1726595e+00   3.6377960e+00   2.5987470e+00   3.2814045e+00   3.0627375e+00   3.3515846e+00   1.8841865e+00   3.2769379e+00   2.5038079e+00   2.1311468e+00   2.8311678e+00   2.6104387e+00   3.2962520e+00   2.2214438e+00   3.1433122e+00   3.0878634e+00   2.6552472e+00   3.1570103e+00   2.4668912e+00   3.4394878e+00   2.6411293e+00   3.5233648e+00   3.2747247e+00   2.9659871e+00   3.1154783e+00   3.5134741e+00   3.7059620e+00   3.1148696e+00   2.0851901e+00   2.3731428e+00   2.2655571e+00   2.4927109e+00   3.6920087e+00   3.0823446e+00   3.1337459e+00   3.4135200e+00   3.0481703e+00   2.6780487e+00   2.5874301e+00   2.9489507e+00   3.2027420e+00   2.5873149e+00   1.8973383e+00   2.7738355e+00   2.7632614e+00   2.7778954e+00   2.9269923e+00   1.6390769e+00   2.6848587e+00   4.7106706e+00   3.7313856e+00   4.6446321e+00   4.2142736e+00   4.4836580e+00   5.3716885e+00   3.1250284e+00   5.0074019e+00   4.4485220e+00   4.9128219e+00   3.8150636e+00   3.9624529e+00   4.2358121e+00   3.6602286e+00   3.8605980e+00   4.0488387e+00   4.1418643e+00   5.4970002e+00   5.6855224e+00   3.5964347e+00   4.4685630e+00   3.5634461e+00   5.4730406e+00   3.5693950e+00   4.3989089e+00   4.7150659e+00   3.4668130e+00   3.5464993e+00   4.2723380e+00   4.5155386e+00   4.8594290e+00   5.2647079e+00   4.2921213e+00   3.7064459e+00   4.1581964e+00   4.9913682e+00   4.3286007e+00   4.1303097e+00   3.4468286e+00   4.1669742e+00   4.3729308e+00   3.9624170e+00   3.7313856e+00   4.6297577e+00   4.4844827e+00   4.0056359e+00   3.6817961e+00   3.9035218e+00   4.1179678e+00   3.7164366e+00   7.3813096e-01   6.8961791e-01   7.0086313e-01   2.0000000e-01   7.3805807e-01   1.0030700e+00   4.0293660e-01   9.4352681e-01   2.5251796e-01   1.0120221e+00   6.2024833e-01   3.8265307e+00   3.4552560e+00   3.9537404e+00   2.8022534e+00   3.5701225e+00   3.2863508e+00   3.6126198e+00   2.0524386e+00   3.5845127e+00   2.6848587e+00   2.2888897e+00   3.0684384e+00   2.8791325e+00   3.5464993e+00   2.4469125e+00   3.4686755e+00   3.2961206e+00   2.9072057e+00   3.4107902e+00   2.6959009e+00   3.6545040e+00   2.9195876e+00   3.7806187e+00   3.5312474e+00   3.2669151e+00   3.4295849e+00   3.8277021e+00   3.9909030e+00   3.3562690e+00   2.3465937e+00   2.5906376e+00   2.4892531e+00   2.7423171e+00   3.9193693e+00   3.2780620e+00   3.3708389e+00   3.7190229e+00   3.3268984e+00   2.8983020e+00   2.7954161e+00   3.1611584e+00   3.4557351e+00   2.8328337e+00   2.0658700e+00   2.9919517e+00   2.9960210e+00   3.0059712e+00   3.2048547e+00   1.8038968e+00   2.9141136e+00   4.9189452e+00   3.9342203e+00   4.9208723e+00   4.4494735e+00   4.7160542e+00   5.6635213e+00   3.2909043e+00   5.2946188e+00   4.7062325e+00   5.1779277e+00   4.0657707e+00   4.2053647e+00   4.4986279e+00   3.8509694e+00   4.0384339e+00   4.2739171e+00   4.3927415e+00   5.7909894e+00   5.9706827e+00   3.8237409e+00   4.7267599e+00   3.7490739e+00   5.7704875e+00   3.8154018e+00   4.6503268e+00   5.0044958e+00   3.7060524e+00   3.7762575e+00   4.5037232e+00   4.8164539e+00   5.1574084e+00   5.5860081e+00   4.5195237e+00   3.9601358e+00   4.3901702e+00   5.2992055e+00   4.5412859e+00   4.3739561e+00   3.6694918e+00   4.4403343e+00   4.6130415e+00   4.2323959e+00   3.9342203e+00   4.8773871e+00   4.7190595e+00   4.2559866e+00   3.9201797e+00   4.1523445e+00   4.3289315e+00   3.9302319e+00   2.1845981e-01   1.1486378e+00   7.0784540e-01   4.0438741e-01   5.0503591e-01   4.4651726e-01   4.0147421e-01   5.0905001e-01   3.2352160e-01   1.4096146e-01   3.4156574e+00   3.1235447e+00   3.5778307e+00   2.6097685e+00   3.2346686e+00   3.0478400e+00   3.3101102e+00   1.9193093e+00   3.2270948e+00   2.4922287e+00   2.2081369e+00   2.7965957e+00   2.6184141e+00   3.2685282e+00   2.1916898e+00   3.0773654e+00   3.0673749e+00   2.6423274e+00   3.1444983e+00   2.4678343e+00   3.4088888e+00   2.6016025e+00   3.4988409e+00   3.2521427e+00   2.9171710e+00   3.0559188e+00   3.4597125e+00   3.6553612e+00   3.0847983e+00   2.0765921e+00   2.3848740e+00   2.2817008e+00   2.4722095e+00   3.6724425e+00   3.0650478e+00   3.0981264e+00   3.3567173e+00   3.0288896e+00   2.6566259e+00   2.5851693e+00   2.9455446e+00   3.1719381e+00   2.5729378e+00   1.9450955e+00   2.7611864e+00   2.7431084e+00   2.7570191e+00   2.8884401e+00   1.6625998e+00   2.6648989e+00   4.6768871e+00   3.7101281e+00   4.5948688e+00   4.1876541e+00   4.4480565e+00   5.3202405e+00   3.1169790e+00   4.9630576e+00   4.4189653e+00   4.8572313e+00   3.7684708e+00   3.9292780e+00   4.1872611e+00   3.6418662e+00   3.8262353e+00   4.0041417e+00   4.1076175e+00   5.4411191e+00   5.6370076e+00   3.5929878e+00   4.4174698e+00   3.5389106e+00   5.4223305e+00   3.5340177e+00   4.3569684e+00   4.6672392e+00   3.4309563e+00   3.5133135e+00   4.2392500e+00   4.4661721e+00   4.8075242e+00   5.1977119e+00   4.2572858e+00   3.6784045e+00   4.1454521e+00   4.9233665e+00   4.2900309e+00   4.0984960e+00   3.4145942e+00   4.1120703e+00   4.3243538e+00   3.8957047e+00   3.7101281e+00   4.5857922e+00   4.4360042e+00   3.9497198e+00   3.6509512e+00   3.8600234e+00   4.0800357e+00   3.6915258e+00   1.2223099e+00   6.2024833e-01   3.7255734e-01   6.2081167e-01   5.0905001e-01   3.7598397e-01   4.4651726e-01   3.4583729e-01   2.1269358e-01   3.6091470e+00   3.3105724e+00   3.7708932e+00   2.7979838e+00   3.4251430e+00   3.2391031e+00   3.4951642e+00   2.1075335e+00   3.4235403e+00   2.6687055e+00   2.3989464e+00   2.9762945e+00   2.8216056e+00   3.4603628e+00   2.3673218e+00   3.2680041e+00   3.2498302e+00   2.8414525e+00   3.3361912e+00   2.6626548e+00   3.5849794e+00   2.7906520e+00   3.6926714e+00   3.4497714e+00   3.1105675e+00   3.2468925e+00   3.6557661e+00   3.8432801e+00   3.2705166e+00   2.2719663e+00   2.5786309e+00   2.4784234e+00   2.6629602e+00   3.8619321e+00   3.2465133e+00   3.2780765e+00   3.5475145e+00   3.2266676e+00   2.8416143e+00   2.7719788e+00   3.1392934e+00   3.3624692e+00   2.7656089e+00   2.1335398e+00   2.9496515e+00   2.9338590e+00   2.9447165e+00   3.0808399e+00   1.8330979e+00   2.8520904e+00   4.8482992e+00   3.8884996e+00   4.7814945e+00   4.3763964e+00   4.6280797e+00   5.5131379e+00   3.2922206e+00   5.1594703e+00   4.6125121e+00   5.0342965e+00   3.9453171e+00   4.1137002e+00   4.3683295e+00   3.8151836e+00   3.9816719e+00   4.1715493e+00   4.2963195e+00   5.6322635e+00   5.8289682e+00   3.7874513e+00   4.5945190e+00   3.7079546e+00   5.6180203e+00   3.7164611e+00   4.5394421e+00   4.8614468e+00   3.6107037e+00   3.6929889e+00   4.4201622e+00   4.6634723e+00   5.0015266e+00   5.3906681e+00   4.4348302e+00   3.8718490e+00   4.3427947e+00   5.1078309e+00   4.4583488e+00   4.2864208e+00   3.5920050e+00   4.2919639e+00   4.4953238e+00   4.0619633e+00   3.8884996e+00   4.7650352e+00   4.6046026e+00   4.1173867e+00   3.8320624e+00   4.0389546e+00   4.2480032e+00   3.8727359e+00   9.0049692e-01   1.2307737e+00   1.5483011e+00   7.1462831e-01   1.5272277e+00   9.0074515e-01   1.4700179e+00   1.0331736e+00   3.7896333e+00   3.4304205e+00   3.9179666e+00   2.7683644e+00   3.5321772e+00   3.2685282e+00   3.5962433e+00   2.0250421e+00   3.5485736e+00   2.6642702e+00   2.2244833e+00   3.0435803e+00   2.8325748e+00   3.5230967e+00   2.4205131e+00   3.4307077e+00   3.2815626e+00   2.8843735e+00   3.3658240e+00   2.6687055e+00   3.6389628e+00   2.8830783e+00   3.7490739e+00   3.5087649e+00   3.2305801e+00   3.3908902e+00   3.7876239e+00   3.9561876e+00   3.3306114e+00   2.3112968e+00   2.5604155e+00   2.4585271e+00   2.7122813e+00   3.8971376e+00   3.2667813e+00   3.3674720e+00   3.6849783e+00   3.2837729e+00   2.8840079e+00   2.7685572e+00   3.1442943e+00   3.4335342e+00   2.8028287e+00   2.0286682e+00   2.9704704e+00   2.9822537e+00   2.9868163e+00   3.1741954e+00   1.7644184e+00   2.8908296e+00   4.8985114e+00   3.9102500e+00   4.8863496e+00   4.4272873e+00   4.6888407e+00   5.6294495e+00   3.2706756e+00   5.2636641e+00   4.6765452e+00   5.1547576e+00   4.0378383e+00   4.1743230e+00   4.4638518e+00   3.8230269e+00   4.0056625e+00   4.2450961e+00   4.3676124e+00   5.7745449e+00   5.9344852e+00   3.7932062e+00   4.6942925e+00   3.7245053e+00   5.7354045e+00   3.7814598e+00   4.6271849e+00   4.9763401e+00   3.6736483e+00   3.7511498e+00   4.4747039e+00   4.7841150e+00   5.1205765e+00   5.5660104e+00   4.4889834e+00   3.9348528e+00   4.3728356e+00   5.2548517e+00   4.5226167e+00   4.3526639e+00   3.6449787e+00   4.4041536e+00   4.5787890e+00   4.1879339e+00   3.9102500e+00   4.8490587e+00   4.6897155e+00   4.2150171e+00   3.8841455e+00   4.1203091e+00   4.3121165e+00   3.9109678e+00   6.7975091e-01   9.0056222e-01   4.1586001e-01   8.2275389e-01   2.0656129e-01   9.4287188e-01   6.0121055e-01   3.8264361e+00   3.4551376e+00   3.9537404e+00   2.8149627e+00   3.5710248e+00   3.2874334e+00   3.6122384e+00   2.0711789e+00   3.5849006e+00   2.6879715e+00   2.3281827e+00   3.0685922e+00   2.8946126e+00   3.5468964e+00   2.4478108e+00   3.4686755e+00   3.2962520e+00   2.9098194e+00   3.4214793e+00   2.7033034e+00   3.6543993e+00   2.9209919e+00   3.7841436e+00   3.5321717e+00   3.2673909e+00   3.4297053e+00   3.8284763e+00   3.9909892e+00   3.3567173e+00   2.3533545e+00   2.6019240e+00   2.5015675e+00   2.7452885e+00   3.9207248e+00   3.2781950e+00   3.3698144e+00   3.7190229e+00   3.3356294e+00   2.8984764e+00   2.8022534e+00   3.1646752e+00   3.4558535e+00   2.8373077e+00   2.0905239e+00   2.9944056e+00   2.9961831e+00   3.0065426e+00   3.2053509e+00   1.8216743e+00   2.9155237e+00   4.9187518e+00   3.9355645e+00   4.9209267e+00   4.4497146e+00   4.7161140e+00   5.6635613e+00   3.2956846e+00   5.2947833e+00   4.7084108e+00   5.1767384e+00   4.0656879e+00   4.2065257e+00   4.4986941e+00   3.8543544e+00   4.0391220e+00   4.2738429e+00   4.3928114e+00   5.7890564e+00   5.9715517e+00   3.8320624e+00   4.7267005e+00   3.7498842e+00   5.7708014e+00   3.8168398e+00   4.6501080e+00   5.0044433e+00   3.7068836e+00   3.7763550e+00   4.5042646e+00   4.8165110e+00   5.1578102e+00   5.5839155e+00   4.5200609e+00   3.9608542e+00   4.3918790e+00   5.2992517e+00   4.5407542e+00   4.3739561e+00   3.6695956e+00   4.4403343e+00   4.6130415e+00   4.2323959e+00   3.9355645e+00   4.8773316e+00   4.7188476e+00   4.2560614e+00   3.9234348e+00   4.1524235e+00   4.3283407e+00   3.9303212e+00   3.8934542e-01   5.4292906e-01   4.4535192e-01   5.3309112e-01   4.5581864e-01   4.2538717e-01   3.3319064e+00   3.0058998e+00   3.4822680e+00   2.4967542e+00   3.1249915e+00   2.9284660e+00   3.1836200e+00   1.8265014e+00   3.1331426e+00   2.3481462e+00   2.1458939e+00   2.6572703e+00   2.5437119e+00   3.1519721e+00   2.0470220e+00   2.9815576e+00   2.9302134e+00   2.5421955e+00   3.0374850e+00   2.3632684e+00   3.2598750e+00   2.4873223e+00   3.3884269e+00   3.1477087e+00   2.8156804e+00   2.9556616e+00   3.3682605e+00   3.5401725e+00   2.9561205e+00   1.9790422e+00   2.2832934e+00   2.1885968e+00   2.3571494e+00   3.5486864e+00   2.9260462e+00   2.9587612e+00   3.2530866e+00   2.9361879e+00   2.5256527e+00   2.4631898e+00   2.8325946e+00   3.0534395e+00   2.4619105e+00   1.8618589e+00   2.6377354e+00   2.6235630e+00   2.6314198e+00   2.7785210e+00   1.5475473e+00   2.5392051e+00   4.5179415e+00   3.5641186e+00   4.4752183e+00   4.0626016e+00   4.3069105e+00   5.2164277e+00   2.9689697e+00   4.8634846e+00   4.3067423e+00   4.7194915e+00   3.6262912e+00   3.7979761e+00   4.0547736e+00   3.4875352e+00   3.6400497e+00   3.8418637e+00   3.9849753e+00   5.3352888e+00   5.5294519e+00   3.4830211e+00   4.2776053e+00   3.3760149e+00   5.3253207e+00   3.4004918e+00   4.2238184e+00   4.5645220e+00   3.2914954e+00   3.3718862e+00   4.0995928e+00   4.3725648e+00   4.7075069e+00   5.1063282e+00   4.1113292e+00   3.5651462e+00   4.0375056e+00   4.8132427e+00   4.1268905e+00   3.9732869e+00   3.2685282e+00   3.9810803e+00   4.1706050e+00   3.7449914e+00   3.5641186e+00   4.4464848e+00   4.2774083e+00   3.7941767e+00   3.5148434e+00   3.7206115e+00   3.9162695e+00   3.5510950e+00   8.6137722e-01   3.2352160e-01   7.6166891e-01   4.2667565e-01   6.2660376e-01   3.0634640e+00   2.7392828e+00   3.2125175e+00   2.3391296e+00   2.8734025e+00   2.6707502e+00   2.9145708e+00   1.7569738e+00   2.8671376e+00   2.1479276e+00   2.1308063e+00   2.4108292e+00   2.3854031e+00   2.8851564e+00   1.8368900e+00   2.7201546e+00   2.6724144e+00   2.2982662e+00   2.8483417e+00   2.1701312e+00   3.0045018e+00   2.2531409e+00   3.1444983e+00   2.8783309e+00   2.5586145e+00   2.6967931e+00   3.1073497e+00   3.2779401e+00   2.7018605e+00   1.8104298e+00   2.1219691e+00   2.0368741e+00   2.1367260e+00   3.2905600e+00   2.6692615e+00   2.6939411e+00   2.9863438e+00   2.7320931e+00   2.2698938e+00   2.2722516e+00   2.5933163e+00   2.7845473e+00   2.2472326e+00   1.8195937e+00   2.4037412e+00   2.3571494e+00   2.3781826e+00   2.5200525e+00   1.5411691e+00   2.3001580e+00   4.2694227e+00   3.3237039e+00   4.2115652e+00   3.7930080e+00   4.0498216e+00   4.9425633e+00   2.7652100e+00   4.5847429e+00   4.0464464e+00   4.4666326e+00   3.3738930e+00   3.5479683e+00   3.8004969e+00   3.2711669e+00   3.4346585e+00   3.6043418e+00   3.7146255e+00   5.0600559e+00   5.2639977e+00   3.2609948e+00   4.0260071e+00   3.1481222e+00   5.0506841e+00   3.1599876e+00   3.9594081e+00   4.2851208e+00   3.0501817e+00   3.1181310e+00   3.8476631e+00   4.0931004e+00   4.4378867e+00   4.8317916e+00   3.8652515e+00   3.2970551e+00   3.7653550e+00   4.5583070e+00   3.8836720e+00   3.7008091e+00   3.0188386e+00   3.7282629e+00   3.9300286e+00   3.5186510e+00   3.3237039e+00   4.1891573e+00   4.0376133e+00   3.5648733e+00   3.2885200e+00   3.4693398e+00   3.6732275e+00   3.2917360e+00   8.1385214e-01   2.5251796e-01   7.6787403e-01   3.2586371e-01   3.5846101e+00   3.2546607e+00   3.7315988e+00   2.6609901e+00   3.3659358e+00   3.1439065e+00   3.4298218e+00   1.9383545e+00   3.3723944e+00   2.5580763e+00   2.1777421e+00   2.8983020e+00   2.6954219e+00   3.3808052e+00   2.2790175e+00   3.2344170e+00   3.1579733e+00   2.7462372e+00   3.2292608e+00   2.5444216e+00   3.5028333e+00   2.7205595e+00   3.6067948e+00   3.3670797e+00   3.0557792e+00   3.2048395e+00   3.6088607e+00   3.7891577e+00   3.1900581e+00   2.1641873e+00   2.4447974e+00   2.3408917e+00   2.5700421e+00   3.7710363e+00   3.1506190e+00   3.2040124e+00   3.5027314e+00   3.1322650e+00   2.7512730e+00   2.6533250e+00   3.0299528e+00   3.2862185e+00   2.6656374e+00   1.9477421e+00   2.8481000e+00   2.8454952e+00   2.8544453e+00   3.0132514e+00   1.6658308e+00   2.7590026e+00   4.7688362e+00   3.7943375e+00   4.7263320e+00   4.2949506e+00   4.5532150e+00   5.4636648e+00   3.1768366e+00   5.1030228e+00   4.5342701e+00   4.9831843e+00   3.8820426e+00   4.0358897e+00   4.3088496e+00   3.7133362e+00   3.8949006e+00   4.1025163e+00   4.2237423e+00   5.5888590e+00   5.7742361e+00   3.6743314e+00   4.5370189e+00   3.6141649e+00   5.5687868e+00   3.6395551e+00   4.4736906e+00   4.8085584e+00   3.5338163e+00   3.6144782e+00   4.3417782e+00   4.6138035e+00   4.9524148e+00   5.3625968e+00   4.3570317e+00   3.7932922e+00   4.2488997e+00   5.0747397e+00   4.3832471e+00   4.2110583e+00   3.5113289e+00   4.2399031e+00   4.4320962e+00   4.0189525e+00   3.7943375e+00   4.7000551e+00   4.5409269e+00   4.0612006e+00   3.7475562e+00   3.9722979e+00   4.1719819e+00   3.7859347e+00   6.9325418e-01   2.1269358e-01   5.0905001e-01   3.3337914e+00   3.0376046e+00   3.4948415e+00   2.6099670e+00   3.1641230e+00   2.9745020e+00   3.2202056e+00   1.9796375e+00   3.1511769e+00   2.4469125e+00   2.3235342e+00   2.7196435e+00   2.6337042e+00   3.1881331e+00   2.1375243e+00   2.9985516e+00   2.9847499e+00   2.5867906e+00   3.1255136e+00   2.4464131e+00   3.3203249e+00   2.5432298e+00   3.4386456e+00   3.1757436e+00   2.8453413e+00   2.9797458e+00   3.3872988e+00   3.5731577e+00   3.0079222e+00   2.0710306e+00   2.3862284e+00   2.2923690e+00   2.4260574e+00   3.5964347e+00   2.9822786e+00   3.0067893e+00   3.2740296e+00   3.0040546e+00   2.5786309e+00   2.5579141e+00   2.8891491e+00   3.0885642e+00   2.5333642e+00   2.0283816e+00   2.7031874e+00   2.6626548e+00   2.6835197e+00   2.8149627e+00   1.7472675e+00   2.6016025e+00   4.5864727e+00   3.6358644e+00   4.5092329e+00   4.1008711e+00   4.3608329e+00   5.2329560e+00   3.0685922e+00   4.8761643e+00   4.3448624e+00   4.7688607e+00   3.6817961e+00   3.8534030e+00   4.1033021e+00   3.5811154e+00   3.7530357e+00   3.9183591e+00   4.0199092e+00   5.3504583e+00   5.5556442e+00   3.5517562e+00   4.3306939e+00   3.4641481e+00   5.3377017e+00   3.4637450e+00   4.2663890e+00   4.5772917e+00   3.3571533e+00   3.4296698e+00   4.1575709e+00   4.3797497e+00   4.7253427e+00   5.1097682e+00   4.1763898e+00   3.5983434e+00   4.0666282e+00   4.8419079e+00   4.2005634e+00   4.0083071e+00   3.3318481e+00   4.0278210e+00   4.2396851e+00   3.8171357e+00   3.6358644e+00   4.4969460e+00   4.3490518e+00   3.8705614e+00   3.5905415e+00   3.7766172e+00   3.9906340e+00   3.6052686e+00   7.6752131e-01   4.0147421e-01   3.6660608e+00   3.3135273e+00   3.8017611e+00   2.7018605e+00   3.4275679e+00   3.1699903e+00   3.4789136e+00   1.9730403e+00   3.4358679e+00   2.5840983e+00   2.2331309e+00   2.9422991e+00   2.7581254e+00   3.4189217e+00   2.3233512e+00   3.3121677e+00   3.1836200e+00   2.7812918e+00   3.2896000e+00   2.5816639e+00   3.5379744e+00   2.7795823e+00   3.6532797e+00   3.4029480e+00   3.1195329e+00   3.2770643e+00   3.6772238e+00   3.8493740e+00   3.2305582e+00   2.2173292e+00   2.4840285e+00   2.3814525e+00   2.6148507e+00   3.8019334e+00   3.1710836e+00   3.2447990e+00   3.5700242e+00   3.1955870e+00   2.7803619e+00   2.6879415e+00   3.0521985e+00   3.3264150e+00   2.7089627e+00   1.9908167e+00   2.8775912e+00   2.8744403e+00   2.8857836e+00   3.0658390e+00   1.7171798e+00   2.7936066e+00   4.8056033e+00   3.8247106e+00   4.7834029e+00   4.3283723e+00   4.5943507e+00   5.5219241e+00   3.2001457e+00   5.1552806e+00   4.5786596e+00   5.0423887e+00   3.9353963e+00   4.0804944e+00   4.3648743e+00   3.7469906e+00   3.9346929e+00   4.1523445e+00   4.2650653e+00   5.6468786e+00   5.8321617e+00   3.7127205e+00   4.5943003e+00   3.6444925e+00   5.6275915e+00   3.6885686e+00   4.5212945e+00   4.8635596e+00   3.5807904e+00   3.6545040e+00   4.3827087e+00   4.6717461e+00   5.0136174e+00   5.4320857e+00   4.3994790e+00   3.8323332e+00   4.2736523e+00   5.1501184e+00   4.4249262e+00   4.2490074e+00   3.5500567e+00   4.3022895e+00   4.4864974e+00   4.0933482e+00   3.8247106e+00   4.7495571e+00   4.5943072e+00   4.1245629e+00   3.7976741e+00   4.0232438e+00   4.2129603e+00   3.8158144e+00   4.4535192e-01   3.3681634e+00   3.1015495e+00   3.5417515e+00   2.6663854e+00   3.2198557e+00   3.0579528e+00   3.2934163e+00   2.0153916e+00   3.2040320e+00   2.5204039e+00   2.3388377e+00   2.7956674e+00   2.6725726e+00   3.2655357e+00   2.2078644e+00   3.0402181e+00   3.0721050e+00   2.6595270e+00   3.1749624e+00   2.5094912e+00   3.4048516e+00   2.6019240e+00   3.5045178e+00   3.2523394e+00   2.8999277e+00   3.0267460e+00   3.4333346e+00   3.6317584e+00   3.0844423e+00   2.1209506e+00   2.4419061e+00   2.3443191e+00   2.4920874e+00   3.6775470e+00   3.0715602e+00   3.0884019e+00   3.3261336e+00   3.0501817e+00   2.6631094e+00   2.6243758e+00   2.9691171e+00   3.1659032e+00   2.5983106e+00   2.0538718e+00   2.7808700e+00   2.7473221e+00   2.7650187e+00   2.8804789e+00   1.7660585e+00   2.6784604e+00   4.6691123e+00   3.7183181e+00   4.5689156e+00   4.1821417e+00   4.4377562e+00   5.2873851e+00   3.1455384e+00   4.9362269e+00   4.4131751e+00   4.8285697e+00   3.7508315e+00   3.9249912e+00   4.1665786e+00   3.6588207e+00   3.8312584e+00   3.9914600e+00   4.0953360e+00   5.4042844e+00   5.6094349e+00   3.6203759e+00   4.3940519e+00   3.5472449e+00   5.3896044e+00   3.5318477e+00   4.3383367e+00   4.6370771e+00   3.4283809e+00   3.5084967e+00   4.2335104e+00   4.4348302e+00   4.7765764e+00   5.1494118e+00   4.2515997e+00   3.6735311e+00   4.1503255e+00   4.8803856e+00   4.2802235e+00   4.0874500e+00   3.4119017e+00   4.0856134e+00   4.3069340e+00   3.8658667e+00   3.7183181e+00   4.5670800e+00   4.4180998e+00   3.9298460e+00   3.6561219e+00   3.8459316e+00   4.0712063e+00   3.6910219e+00   3.5300704e+00   3.2300705e+00   3.6894189e+00   2.6906230e+00   3.3402581e+00   3.1455455e+00   3.4142500e+00   1.9871921e+00   3.3364095e+00   2.5801041e+00   2.2579027e+00   2.8953397e+00   2.7040361e+00   3.3706887e+00   2.2848507e+00   3.1889632e+00   3.1641787e+00   2.7405950e+00   3.2351115e+00   2.5567836e+00   3.5066271e+00   2.7031874e+00   3.5985833e+00   3.3547156e+00   3.0245025e+00   3.1656773e+00   3.5695690e+00   3.7624530e+00   3.1847809e+00   2.1665831e+00   2.4678343e+00   2.3636654e+00   2.5680682e+00   3.7710578e+00   3.1606607e+00   3.1985717e+00   3.4665002e+00   3.1244487e+00   2.7540755e+00   2.6724144e+00   3.0392407e+00   3.2747247e+00   2.6671530e+00   2.0066796e+00   2.8554471e+00   2.8427684e+00   2.8549070e+00   2.9928504e+00   1.7230625e+00   2.7611864e+00   4.7742557e+00   3.8047268e+00   4.7018935e+00   4.2892152e+00   4.5488617e+00   5.4303909e+00   3.2037606e+00   5.0724791e+00   4.5217060e+00   4.9623634e+00   3.8707551e+00   4.0296740e+00   4.2915574e+00   3.7321091e+00   3.9154512e+00   4.1022778e+00   4.2113304e+00   5.5520135e+00   5.7453700e+00   3.6849669e+00   4.5212945e+00   3.6308222e+00   5.5330176e+00   3.6335075e+00   4.4607162e+00   4.7770551e+00   3.5299194e+00   3.6126659e+00   4.3390241e+00   4.5771358e+00   4.9175276e+00   5.3118739e+00   4.3561658e+00   3.7812981e+00   4.2455646e+00   5.0340960e+00   4.3872001e+00   4.2015182e+00   3.5126820e+00   4.2176412e+00   4.4249262e+00   3.9985367e+00   3.8047268e+00   4.6887893e+00   4.5358705e+00   4.0502685e+00   3.7475470e+00   3.9619683e+00   4.1767972e+00   3.7895730e+00   6.0611244e-01   2.1845981e-01   1.6212669e+00   5.6769031e-01   1.3103855e+00   7.0437330e-01   2.2923690e+00   4.4651726e-01   1.8497891e+00   2.2196852e+00   1.1283882e+00   1.3099706e+00   9.0827783e-01   1.5790055e+00   3.7427929e-01   1.4018200e+00   1.2701139e+00   1.1341579e+00   1.5133392e+00   1.1134787e+00   1.0264409e+00   8.7202528e-01   9.2264612e-01   6.6432544e-01   4.5470518e-01   4.1449626e-01   4.3456114e-01   1.0085601e+00   1.5838351e+00   1.6415861e+00   1.6742876e+00   1.3140585e+00   1.0496979e+00   1.6013574e+00   1.0054037e+00   3.0546431e-01   1.0168833e+00   1.4293465e+00   1.5774037e+00   1.5278635e+00   9.0252542e-01   1.2994764e+00   2.2231652e+00   1.4317371e+00   1.3207609e+00   1.3224963e+00   8.3649708e-01   2.2607507e+00   1.3421549e+00   1.5412452e+00   1.2539702e+00   1.2643026e+00   1.0324775e+00   1.2342162e+00   1.9387309e+00   2.1209313e+00   1.6105602e+00   1.1912106e+00   1.5832517e+00   7.2486328e-01   8.5585239e-01   9.4009473e-01   1.3873503e+00   1.3945703e+00   1.0313560e+00   8.7720955e-01   2.0658700e+00   2.2655571e+00   1.2460824e+00   1.1834841e+00   1.4368020e+00   2.0378171e+00   7.9878917e-01   1.0960883e+00   1.3102767e+00   8.5105559e-01   9.2480363e-01   1.0805899e+00   1.1043883e+00   1.4313279e+00   1.8006336e+00   1.1235486e+00   7.6625946e-01   1.1633029e+00   1.5390703e+00   1.2493717e+00   9.0965328e-01   1.0182895e+00   8.6983677e-01   1.1880428e+00   9.2095040e-01   1.2539702e+00   1.3335022e+00   1.3109705e+00   9.5035453e-01   9.2112464e-01   7.6166891e-01   1.1418127e+00   1.1276971e+00   5.6700421e-01   1.1449732e+00   4.0293660e-01   7.3813096e-01   2.1845981e-01   1.7551534e+00   3.4583729e-01   1.2603076e+00   1.7354460e+00   5.3588338e-01   1.0777972e+00   3.8934542e-01   1.0669582e+00   3.0811765e-01   8.0294841e-01   7.8768770e-01   1.0018083e+00   1.0175773e+00   5.5419992e-01   5.9426792e-01   7.3496673e-01   4.8927739e-01   3.4378533e-01   2.5651975e-01   5.2655962e-01   5.4292906e-01   4.4417983e-01   1.1634384e+00   1.1527805e+00   1.2020363e+00   8.1521713e-01   7.2526325e-01   1.0018083e+00   4.1449626e-01   3.2586371e-01   9.0277242e-01   8.3172002e-01   1.0440187e+00   9.7779835e-01   3.2816937e-01   8.1521713e-01   1.7083888e+00   8.6361309e-01   7.3145860e-01   7.3145860e-01   3.6171588e-01   1.7816674e+00   7.6914805e-01   1.6177449e+00   8.3060013e-01   1.4732400e+00   1.1107977e+00   1.3546017e+00   2.2147080e+00   1.5404344e+00   1.8624350e+00   1.3603471e+00   1.7544191e+00   6.8961791e-01   8.7478495e-01   1.0733200e+00   9.5271386e-01   1.0466623e+00   9.9348625e-01   1.0085601e+00   2.3452277e+00   2.5288464e+00   1.0480665e+00   1.3130641e+00   8.9653332e-01   2.3282127e+00   5.8914551e-01   1.2436109e+00   1.5625142e+00   4.8927739e-01   4.8927739e-01   1.1593224e+00   1.3813076e+00   1.7138020e+00   2.1603815e+00   1.1866786e+00   6.4755655e-01   1.1521791e+00   1.8620175e+00   1.2565757e+00   1.0067784e+00   4.8927739e-01   1.0056742e+00   1.2594846e+00   9.3049742e-01   8.3060013e-01   1.4762619e+00   1.3817041e+00   9.4558103e-01   7.9613242e-01   7.7074935e-01   1.0627606e+00   7.0776547e-01   1.5593809e+00   4.8036801e-01   1.2165505e+00   6.1151102e-01   2.2871743e+00   4.0176783e-01   1.7963441e+00   2.1851225e+00   1.0906388e+00   1.2884575e+00   8.0619006e-01   1.6156775e+00   5.0905001e-01   1.3093850e+00   1.2434795e+00   1.0262547e+00   1.4875372e+00   1.0069726e+00   1.0669582e+00   7.4511469e-01   8.2384013e-01   6.9728513e-01   5.3022554e-01   3.0811765e-01   2.5651975e-01   9.2264612e-01   1.6478667e+00   1.6180636e+00   1.6694817e+00   1.3199714e+00   9.2288144e-01   1.5068702e+00   9.2859317e-01   2.4837156e-01   9.3238528e-01   1.3813076e+00   1.5238543e+00   1.4346522e+00   8.1130291e-01   1.2794849e+00   2.2234347e+00   1.3629833e+00   1.2671726e+00   1.2652657e+00   8.1810461e-01   2.3116343e+00   1.2988558e+00   1.3410314e+00   1.1276971e+00   1.0590298e+00   8.2552685e-01   1.0264409e+00   1.7485421e+00   2.0171203e+00   1.4118594e+00   9.7949166e-01   1.3980896e+00   5.7324170e-01   6.6317860e-01   7.4586719e-01   1.2603076e+00   1.2604558e+00   8.7504951e-01   6.6432544e-01   1.8915404e+00   2.0711789e+00   1.1178264e+00   9.9368623e-01   1.3223897e+00   1.8515012e+00   6.6384020e-01   8.9303452e-01   1.1107977e+00   7.2823007e-01   8.1099042e-01   8.7170815e-01   9.0876485e-01   1.2370832e+00   1.6626615e+00   9.2112464e-01   6.2482915e-01   9.7377870e-01   1.3752391e+00   1.0720678e+00   7.0386584e-01   9.0876485e-01   6.8917100e-01   1.0120221e+00   8.0294841e-01   1.1276971e+00   1.1329323e+00   1.1353806e+00   8.1385214e-01   7.7588000e-01   5.8914551e-01   9.8054887e-01   1.0085601e+00   1.0879524e+00   6.2605182e-01   1.2079117e+00   8.2305664e-01   1.1903922e+00   4.4651726e-01   6.5648056e-01   7.4164639e-01   5.2942799e-01   8.9852394e-01   6.4755655e-01   1.3035649e+00   7.7074935e-01   4.8135521e-01   7.7074935e-01   2.5651975e-01   1.1022599e+00   6.8917100e-01   1.0627606e+00   8.6290690e-01   9.7759114e-01   1.1868139e+00   1.3969297e+00   1.4333755e+00   7.6166891e-01   5.6075294e-01   2.5251796e-01   3.7427929e-01   4.4651726e-01   1.1444449e+00   7.7074935e-01   1.1571858e+00   1.3491011e+00   8.2624515e-01   7.0086313e-01   2.0000000e-01   4.4535192e-01   8.9852394e-01   3.7427929e-01   7.7885297e-01   4.1449626e-01   7.0826681e-01   6.1092863e-01   8.2275389e-01   1.0198386e+00   5.0905001e-01   2.2002582e+00   1.1640914e+00   2.2347161e+00   1.6833015e+00   1.9584639e+00   2.9773446e+00   7.2852070e-01   2.5984158e+00   1.9494155e+00   2.5625921e+00   1.4644662e+00   1.4491244e+00   1.8190688e+00   1.0934620e+00   1.3861754e+00   1.6265426e+00   1.6626615e+00   3.1878246e+00   3.2549253e+00   1.0346741e+00   2.0606771e+00   1.0425476e+00   3.0882196e+00   1.1022599e+00   1.9692383e+00   2.3537589e+00   1.0082605e+00   1.0950112e+00   1.7332099e+00   2.1942739e+00   2.5032087e+00   3.0886055e+00   1.7538274e+00   1.2342162e+00   1.6237100e+00   2.7181432e+00   1.8926658e+00   1.6507294e+00   1.0082605e+00   1.8244836e+00   1.9254808e+00   1.7303440e+00   1.1640914e+00   2.1641182e+00   2.0565627e+00   1.6435752e+00   1.1782910e+00   1.4699978e+00   1.7142546e+00   1.2095267e+00   8.0326782e-01   5.0991930e-01   1.8350577e+00   2.1269358e-01   1.3537729e+00   1.7146525e+00   6.5172743e-01   8.5585239e-01   4.0438741e-01   1.1847335e+00   3.4583729e-01   9.0252542e-01   8.2372435e-01   6.2024833e-01   1.0324775e+00   6.6827038e-01   6.5172743e-01   3.8776762e-01   4.4535192e-01   3.2816937e-01   2.5651975e-01   3.2586371e-01   4.3691963e-01   5.0180477e-01   1.2342162e+00   1.1573546e+00   1.2172454e+00   8.7848692e-01   6.2205176e-01   1.1016264e+00   6.9006418e-01   3.2586371e-01   5.2371571e-01   9.4492923e-01   1.0646687e+00   1.0101422e+00   4.1449626e-01   8.2552685e-01   1.7679545e+00   9.2288144e-01   8.3888121e-01   8.2929029e-01   3.8934542e-01   1.8776878e+00   8.5434758e-01   1.5481649e+00   7.9613242e-01   1.3657247e+00   1.0085601e+00   1.2641849e+00   2.1002817e+00   1.6030661e+00   1.7482192e+00   1.2100024e+00   1.7005893e+00   6.6491075e-01   7.3535471e-01   9.7949166e-01   8.8358844e-01   1.0423677e+00   9.5498315e-01   9.1051084e-01   2.2737459e+00   2.4086493e+00   7.2486328e-01   1.2326306e+00   9.4832302e-01   2.2096958e+00   3.8934542e-01   1.1729895e+00   1.4561933e+00   3.8776762e-01   4.8927739e-01   1.0574300e+00   1.2643026e+00   1.5918956e+00   2.0914667e+00   1.0906388e+00   5.0817745e-01   1.0182895e+00   1.7457596e+00   1.2250414e+00   9.1663180e-01   5.4292906e-01   9.1750357e-01   1.1891470e+00   8.8358844e-01   7.9613242e-01   1.3916739e+00   1.3267389e+00   8.9303452e-01   5.3309112e-01   6.9325418e-01   1.0574013e+00   7.0776547e-01   7.0776547e-01   1.3071453e+00   9.0049692e-01   6.9006418e-01   1.2079117e+00   3.6171588e-01   7.1791510e-01   4.1586001e-01   9.0049692e-01   1.0069726e+00   2.5251796e-01   4.4651726e-01   6.9325418e-01   6.2538346e-01   5.9426792e-01   5.6631629e-01   6.6827038e-01   4.1449626e-01   7.0437330e-01   9.0277242e-01   1.1055069e+00   1.0496979e+00   3.2586371e-01   1.0083666e+00   7.4164639e-01   8.3888121e-01   6.0181382e-01   6.3861009e-01   3.4378533e-01   6.3808075e-01   1.0101422e+00   6.8961791e-01   4.1449626e-01   5.3588338e-01   2.5651975e-01   4.1586001e-01   5.0991930e-01   1.2869134e+00   3.0546431e-01   3.2586371e-01   3.0275928e-01   5.0905001e-01   1.5278635e+00   4.0000000e-01   1.7279861e+00   7.4586719e-01   1.7831878e+00   1.1718516e+00   1.4824233e+00   2.5111349e+00   8.3620494e-01   2.1256928e+00   1.4719311e+00   2.0814452e+00   1.0175773e+00   1.0014633e+00   1.3875139e+00   7.7885297e-01   1.1473003e+00   1.2144845e+00   1.1591754e+00   2.6773585e+00   2.7900071e+00   7.0776547e-01   1.6151673e+00   7.3496673e-01   2.6263773e+00   7.2526325e-01   1.4645804e+00   1.8755806e+00   6.3924842e-01   6.2407309e-01   1.2731262e+00   1.7507664e+00   2.0635966e+00   2.6116811e+00   1.3129189e+00   7.4855857e-01   1.1149070e+00   2.3160147e+00   1.4246028e+00   1.1229843e+00   5.6075294e-01   1.4120836e+00   1.5094575e+00   1.4108494e+00   7.4586719e-01   1.6884234e+00   1.6211869e+00   1.3017208e+00   8.1521713e-01   1.0401425e+00   1.2452704e+00   6.9728513e-01   1.8185955e+00   4.8135521e-01   1.2509218e+00   1.8049926e+00   5.8914551e-01   1.2205493e+00   4.2538717e-01   1.1912106e+00   4.6472023e-01   7.1840099e-01   8.9207714e-01   1.1017858e+00   1.1127329e+00   4.1586001e-01   7.8197925e-01   8.0326782e-01   5.7257017e-01   5.2655962e-01   4.3456114e-01   6.2660376e-01   4.8135521e-01   4.5581864e-01   1.3318128e+00   1.2468939e+00   1.3144065e+00   9.4935318e-01   6.6384020e-01   9.1075311e-01   3.2586371e-01   4.1449626e-01   1.0130748e+00   8.3280511e-01   1.0906119e+00   9.6204649e-01   3.4583729e-01   9.3296062e-01   1.7901543e+00   8.7170815e-01   7.3805807e-01   7.3805807e-01   5.2655962e-01   1.9041928e+00   8.1521713e-01   1.4138821e+00   7.3805807e-01   1.3166957e+00   9.2264612e-01   1.1533602e+00   2.0690479e+00   1.4700179e+00   1.7092525e+00   1.2231847e+00   1.5870088e+00   5.0592043e-01   7.5791688e-01   9.0575661e-01   9.1750357e-01   9.1802948e-01   8.1304731e-01   8.1638392e-01   2.1978861e+00   2.3802944e+00   1.1107977e+00   1.1386292e+00   7.9878917e-01   2.1900222e+00   6.1092863e-01   1.0480665e+00   1.4148192e+00   5.0991930e-01   3.6171588e-01   9.7825559e-01   1.2593659e+00   1.5912764e+00   2.0615043e+00   1.0056742e+00   5.6700421e-01   1.0137836e+00   1.7695175e+00   1.0597541e+00   8.0619006e-01   3.8934542e-01   8.6513410e-01   1.0755693e+00   8.4050231e-01   7.3805807e-01   1.2832075e+00   1.1947245e+00   8.0660588e-01   8.2105460e-01   5.9426792e-01   8.6983677e-01   5.3309112e-01   1.9083318e+00   6.7975091e-01   4.1449626e-01   1.2452704e+00   1.1763980e+00   1.6420607e+00   7.9016429e-01   1.9365498e+00   1.3172979e+00   1.0653845e+00   1.5684812e+00   8.1304731e-01   1.7169601e+00   1.2768639e+00   1.8804140e+00   1.6311692e+00   1.6315809e+00   1.8424891e+00   2.1489929e+00   2.2038673e+00   1.4613032e+00   8.0587320e-01   6.8961791e-01   6.4704320e-01   9.7949166e-01   1.9250543e+00   1.2802798e+00   1.5824669e+00   2.0485534e+00   1.5790055e+00   1.0078327e+00   8.2305664e-01   1.1498269e+00   1.5838351e+00   1.0137836e+00   1.2418578e-01   1.0230441e+00   1.1119327e+00   1.0941064e+00   1.4719311e+00   3.2816937e-01   1.0165138e+00   2.9338155e+00   1.9158303e+00   3.0455280e+00   2.4635485e+00   2.7430309e+00   3.7921012e+00   1.2632199e+00   3.4105293e+00   2.7619926e+00   3.3261421e+00   2.2045198e+00   2.2598424e+00   2.6204307e+00   1.8330979e+00   2.0701646e+00   2.3622531e+00   2.4525409e+00   3.9619101e+00   4.0743074e+00   1.8315269e+00   2.8475224e+00   1.7388184e+00   3.9054939e+00   1.9111264e+00   2.7377517e+00   3.1510494e+00   1.7964653e+00   1.8350071e+00   2.5277506e+00   2.9970778e+00   3.3196868e+00   3.8532018e+00   2.5453122e+00   2.0312250e+00   2.3887539e+00   3.5269824e+00   2.5986705e+00   2.4210417e+00   1.7228354e+00   2.6125646e+00   2.7062349e+00   2.4839132e+00   1.9158303e+00   2.9502077e+00   2.8139128e+00   2.4180244e+00   1.9947426e+00   2.2550764e+00   2.3956104e+00   1.9332869e+00   1.4468211e+00   1.8027242e+00   7.3851529e-01   9.0658670e-01   5.0180477e-01   1.2418578e+00   2.5651975e-01   1.0022010e+00   8.6361309e-01   7.3851529e-01   1.1055064e+00   7.8197925e-01   6.8961791e-01   4.8927739e-01   5.0270183e-01   3.2352160e-01   2.1269358e-01   2.5651975e-01   4.9857388e-01   6.0611244e-01   1.2633451e+00   1.2342162e+00   1.2794849e+00   9.3824087e-01   7.0776547e-01   1.2014753e+00   7.0429250e-01   2.5651975e-01   6.2482915e-01   1.0329901e+00   1.1593224e+00   1.1069580e+00   5.0180477e-01   8.9712482e-01   1.8394959e+00   1.0181000e+00   9.2095040e-01   9.2047746e-01   4.4417983e-01   1.9314297e+00   9.4103005e-01   1.6328100e+00   9.3238528e-01   1.3969297e+00   1.0389435e+00   1.3327491e+00   2.0961718e+00   1.7103548e+00   1.7405652e+00   1.2330392e+00   1.7474965e+00   7.7919451e-01   8.1810461e-01   1.0613462e+00   1.0417249e+00   1.2331989e+00   1.0974061e+00   9.4125538e-01   2.2568188e+00   2.4127176e+00   8.4050231e-01   1.3145067e+00   1.0960883e+00   2.1973666e+00   5.6075294e-01   1.2221471e+00   1.4467170e+00   5.7324170e-01   6.3977563e-01   1.1341579e+00   1.2436109e+00   1.5826476e+00   2.0476065e+00   1.1847335e+00   5.3665999e-01   1.0391247e+00   1.7521201e+00   1.3293211e+00   9.4492923e-01   6.9369532e-01   1.0019724e+00   1.3083079e+00   1.0406064e+00   9.3238528e-01   1.4580335e+00   1.4387122e+00   1.0576043e+00   7.0233835e-01   8.1304731e-01   1.1697902e+00   8.2372435e-01   7.6914805e-01   7.2823007e-01   8.7504951e-01   1.0611732e+00   4.5581864e-01   1.5204521e+00   6.6432544e-01   6.5172743e-01   1.0866092e+00   4.5470518e-01   1.0573285e+00   9.0074515e-01   1.3083079e+00   1.0613462e+00   1.2123540e+00   1.4190961e+00   1.6754036e+00   1.6596797e+00   8.9095811e-01   6.1947990e-01   4.2418962e-01   4.8927739e-01   6.0551856e-01   1.2951131e+00   6.2538346e-01   1.0030700e+00   1.5663312e+00   1.1396406e+00   4.5581864e-01   3.2816937e-01   5.3665999e-01   1.0166932e+00   6.0670504e-01   6.9167458e-01   4.4535192e-01   5.6075294e-01   5.3665999e-01   1.0182895e+00   9.1075311e-01   5.0991930e-01   2.2632657e+00   1.2601890e+00   2.4384530e+00   1.8269304e+00   2.0927845e+00   3.1870761e+00   6.4290921e-01   2.8096725e+00   2.1462316e+00   2.6900593e+00   1.5901181e+00   1.6364474e+00   2.0101738e+00   1.1729895e+00   1.4078246e+00   1.7083888e+00   1.8278268e+00   3.3435703e+00   3.4604677e+00   1.2331989e+00   2.2206574e+00   1.0719360e+00   3.3068858e+00   1.3163598e+00   2.0994872e+00   2.5539296e+00   1.1948578e+00   1.1991899e+00   1.8828324e+00   2.4245766e+00   2.7358293e+00   3.2702869e+00   1.8959565e+00   1.4312787e+00   1.7665622e+00   2.9527671e+00   1.9255490e+00   1.7860690e+00   1.0796583e+00   2.0205937e+00   2.0647798e+00   1.9168750e+00   1.2601890e+00   2.3069539e+00   2.1608869e+00   1.8128438e+00   1.3838212e+00   1.6376058e+00   1.7220696e+00   1.2768639e+00   1.2687651e+00   1.0344911e+00   1.5320003e+00   9.7779835e-01   1.8837258e+00   1.2979752e+00   1.0012667e+00   1.3957794e+00   7.2526325e-01   1.6850672e+00   1.2372418e+00   1.7004805e+00   1.4979666e+00   1.5611922e+00   1.7745022e+00   2.0153916e+00   2.0815027e+00   1.3830210e+00   8.1242502e-01   5.8914551e-01   5.7257017e-01   9.5676647e-01   1.7518264e+00   1.2724737e+00   1.6669115e+00   1.9675324e+00   1.4200435e+00   1.1136605e+00   7.1881659e-01   1.0072663e+00   1.5134954e+00   9.3238528e-01   3.2352160e-01   9.5222919e-01   1.1681971e+00   1.1043332e+00   1.4120836e+00   6.2205176e-01   1.0078327e+00   2.8028143e+00   1.7525933e+00   2.8815987e+00   2.2944257e+00   2.5825907e+00   3.6132031e+00   1.1179743e+00   3.2286633e+00   2.5673494e+00   3.2133201e+00   2.1127170e+00   2.0867931e+00   2.4721080e+00   1.6626615e+00   1.9456450e+00   2.2607446e+00   2.2983453e+00   3.8335668e+00   3.8818411e+00   1.6299374e+00   2.7127377e+00   1.6132118e+00   3.7182722e+00   1.7559391e+00   2.6123294e+00   2.9966900e+00   1.6625128e+00   1.7303440e+00   2.3560577e+00   2.8340159e+00   3.1407514e+00   3.7366777e+00   2.3765195e+00   1.8701780e+00   2.1933937e+00   3.3657099e+00   2.5066503e+00   2.2797241e+00   1.6331631e+00   2.4808010e+00   2.5698271e+00   2.3770285e+00   1.7525933e+00   2.8053367e+00   2.6963680e+00   2.2934334e+00   1.8202060e+00   2.1229819e+00   2.3231793e+00   1.8122257e+00   8.5462626e-01   5.0991930e-01   6.2538346e-01   8.0358695e-01   3.7255734e-01   5.3022554e-01   8.2105460e-01   6.0900723e-01   6.2482915e-01   3.0844217e-01   7.9580667e-01   5.4292906e-01   5.0991930e-01   7.0437330e-01   9.7270522e-01   9.9532071e-01   3.0546431e-01   7.9878917e-01   7.2343175e-01   7.8768770e-01   4.2418962e-01   9.0876485e-01   5.2862779e-01   4.4651726e-01   8.5205778e-01   7.4164639e-01   3.2586371e-01   5.7867728e-01   5.3309112e-01   4.1449626e-01   4.5581864e-01   1.2131545e+00   3.8776762e-01   3.2352160e-01   2.5251796e-01   3.2816937e-01   1.3221405e+00   2.8507955e-01   1.8873850e+00   9.2859317e-01   1.8730683e+00   1.4111029e+00   1.6550480e+00   2.6320302e+00   1.0406064e+00   2.2657813e+00   1.6658308e+00   2.1339968e+00   1.0072663e+00   1.1444449e+00   1.4417207e+00   8.9971984e-01   1.1192426e+00   1.2342162e+00   1.3367840e+00   2.7726042e+00   2.9277580e+00   9.9368623e-01   1.6694974e+00   7.8197925e-01   2.7493700e+00   7.5976039e-01   1.5849874e+00   1.9802196e+00   6.4290921e-01   7.1799256e-01   1.4445746e+00   1.8216794e+00   2.1462316e+00   2.6367554e+00   1.4616539e+00   9.2264612e-01   1.4088394e+00   2.3235034e+00   1.5120955e+00   1.3224963e+00   6.2024833e-01   1.4078246e+00   1.5587730e+00   1.2801437e+00   9.2859317e-01   1.8096161e+00   1.6710566e+00   1.2378278e+00   8.9653332e-01   1.0864449e+00   1.3071453e+00   9.0827783e-01   8.9159388e-01   7.7763126e-01   1.0417249e+00   9.1802948e-01   5.0905001e-01   6.2605182e-01   4.4651726e-01   1.2379511e+00   6.2024833e-01   9.5571254e-01   8.1558458e-01   7.5976039e-01   9.2944046e-01   1.0661822e+00   1.2662457e+00   8.2342214e-01   5.8851328e-01   5.1691876e-01   5.3588338e-01   5.1691876e-01   1.1717125e+00   9.6838716e-01   1.2601890e+00   1.1258723e+00   4.8135521e-01   8.3649708e-01   5.5419992e-01   6.2407309e-01   9.0965328e-01   4.2538717e-01   1.0906388e+00   5.9426792e-01   8.1638392e-01   7.3145860e-01   7.3145860e-01   1.1880428e+00   6.3861009e-01   2.2927296e+00   1.2766755e+00   2.1156916e+00   1.6869465e+00   1.9835684e+00   2.8209672e+00   1.2028939e+00   2.4458200e+00   1.8683030e+00   2.5149752e+00   1.4746001e+00   1.4371043e+00   1.7501772e+00   1.2500343e+00   1.5991931e+00   1.7211928e+00   1.6271057e+00   3.0580852e+00   3.1168283e+00   1.0328064e+00   2.0203543e+00   1.2344562e+00   2.9146252e+00   1.0941064e+00   1.9515265e+00   2.2002582e+00   1.0531192e+00   1.1790011e+00   1.7600233e+00   1.9895190e+00   2.3106402e+00   2.8876509e+00   1.7983401e+00   1.1763719e+00   1.6118154e+00   2.5100676e+00   2.0039716e+00   1.6449456e+00   1.1276917e+00   1.7245185e+00   1.9495298e+00   1.6638124e+00   1.2766755e+00   2.1512175e+00   2.1034605e+00   1.6449189e+00   1.1924295e+00   1.4645804e+00   1.8408873e+00   1.3037063e+00   1.1269972e+00   6.2482915e-01   5.0991930e-01   6.6827038e-01   7.0479928e-01   8.8358844e-01   4.5581864e-01   7.0086313e-01   4.2667565e-01   2.0656129e-01   4.4535192e-01   5.2942799e-01   7.0086313e-01   6.3861009e-01   2.1269358e-01   1.2261087e+00   1.0119857e+00   1.1001291e+00   8.1638392e-01   4.2667565e-01   7.0479928e-01   5.1691876e-01   6.0611244e-01   6.2538346e-01   6.9006418e-01   8.3812833e-01   6.4290921e-01   1.2418578e-01   7.3145860e-01   1.6044563e+00   6.2660376e-01   5.7324170e-01   5.6700421e-01   4.0293660e-01   1.7981158e+00   6.4806901e-01   1.5090287e+00   5.9426792e-01   1.4258804e+00   9.2264612e-01   1.2221471e+00   2.1613095e+00   1.2165505e+00   1.7814077e+00   1.1712156e+00   1.7475837e+00   7.0233835e-01   7.0776547e-01   1.0386594e+00   7.0233835e-01   1.0228981e+00   9.8450810e-01   8.5105559e-01   2.3257048e+00   2.4547248e+00   7.1504098e-01   1.2838690e+00   6.9369532e-01   2.2753334e+00   4.3691963e-01   1.1508502e+00   1.5109753e+00   4.0438741e-01   4.1449626e-01   1.0168833e+00   1.3670543e+00   1.6898941e+00   2.2253038e+00   1.0655560e+00   4.1586001e-01   9.0827783e-01   1.9262937e+00   1.2075315e+00   8.3888121e-01   4.0438741e-01   1.0401425e+00   1.2250414e+00   1.0755693e+00   5.9426792e-01   1.3866792e+00   1.3487634e+00   1.0056742e+00   5.9426792e-01   7.2526325e-01   1.0425476e+00   5.0592043e-01   1.2125198e+00   9.0252542e-01   5.4292906e-01   1.0679144e+00   4.5470518e-01   1.2307737e+00   5.6700421e-01   1.3629833e+00   1.1271488e+00   9.3592296e-01   1.1329323e+00   1.4903933e+00   1.5826638e+00   9.2264612e-01   3.7598397e-01   5.1691876e-01   5.3022554e-01   3.4583729e-01   1.5102079e+00   9.0478973e-01   9.6664346e-01   1.3678655e+00   1.0012667e+00   5.0090417e-01   4.9766035e-01   8.1130291e-01   1.0331736e+00   4.5581864e-01   7.6955924e-01   6.0551856e-01   6.0181382e-01   6.0060595e-01   8.1242502e-01   7.2852070e-01   5.0180477e-01   2.4944334e+00   1.5259640e+00   2.4897635e+00   2.0286682e+00   2.2758462e+00   3.2467454e+00   1.0417249e+00   2.8819633e+00   2.2804674e+00   2.7472449e+00   1.6234861e+00   1.7644184e+00   2.0587064e+00   1.4533724e+00   1.6559784e+00   1.8347926e+00   1.9605308e+00   3.3855928e+00   3.5452222e+00   1.4540815e+00   2.2852232e+00   1.3536716e+00   3.3617477e+00   1.3717027e+00   2.2096171e+00   2.5931780e+00   1.2603076e+00   1.3371180e+00   2.0643410e+00   2.4233813e+00   2.7521037e+00   3.2246201e+00   2.0784533e+00   1.5405106e+00   2.0088441e+00   2.9099860e+00   2.1140685e+00   1.9448322e+00   1.2330392e+00   2.0122105e+00   2.1687358e+00   1.8353933e+00   1.5259640e+00   2.4321505e+00   2.2783778e+00   1.8251179e+00   1.4795374e+00   1.7068208e+00   1.9049236e+00   1.5165339e+00   1.1004794e+00   9.4753140e-01   9.4125538e-01   1.1763719e+00   8.5105559e-01   6.6432544e-01   7.2526325e-01   6.4290921e-01   3.2816937e-01   1.2418578e-01   4.4535192e-01   6.2024833e-01   7.0479928e-01   1.2172454e+00   1.3021788e+00   1.3289150e+00   9.6141901e-01   8.9366705e-01   1.3003320e+00   7.1840099e-01   3.0275928e-01   8.2654509e-01   1.1056650e+00   1.2497790e+00   1.2234738e+00   6.0611244e-01   9.6141901e-01   1.8661545e+00   1.1149070e+00   1.0038051e+00   1.0038051e+00   5.0991930e-01   1.8886923e+00   1.0132664e+00   1.7427900e+00   1.0573285e+00   1.5462225e+00   1.2230220e+00   1.4700179e+00   2.2554582e+00   1.8183902e+00   1.9191337e+00   1.4359851e+00   1.8399871e+00   8.1558458e-01   9.6664346e-01   1.1754055e+00   1.1548215e+00   1.2523175e+00   1.1229906e+00   1.1149070e+00   2.3868096e+00   2.5734684e+00   1.0665149e+00   1.4148192e+00   1.1763719e+00   2.3591336e+00   6.6317860e-01   1.3545005e+00   1.6178623e+00   6.3735887e-01   7.2526325e-01   1.2709820e+00   1.4169523e+00   1.7425222e+00   2.1449779e+00   1.3015611e+00   7.4777660e-01   1.2601890e+00   1.8513630e+00   1.3897316e+00   1.1185330e+00   7.6625946e-01   1.0919712e+00   1.3783420e+00   1.0120221e+00   1.0573285e+00   1.5860263e+00   1.5022608e+00   1.0597541e+00   8.3060013e-01   8.9095811e-01   1.2107055e+00   9.5498315e-01   5.9426792e-01   8.8835966e-01   7.2486328e-01   4.3456114e-01   6.3108414e-01   7.9580667e-01   5.4292906e-01   8.0619006e-01   1.0003942e+00   1.2057554e+00   1.1282371e+00   4.0147421e-01   1.0482443e+00   8.3812833e-01   9.3049742e-01   6.4290921e-01   6.6432544e-01   2.0000000e-01   4.9766035e-01   1.1016264e+00   8.7202528e-01   4.1312257e-01   6.2660376e-01   4.4651726e-01   5.0180477e-01   5.9426792e-01   1.3172979e+00   3.8776762e-01   3.7427929e-01   3.2816937e-01   6.1151102e-01   1.5338492e+00   4.2667565e-01   1.6536633e+00   6.6827038e-01   1.8195408e+00   1.1798960e+00   1.4588731e+00   2.5552364e+00   7.7039952e-01   2.1764356e+00   1.5179392e+00   2.0659196e+00   1.0072663e+00   1.0165138e+00   1.4077317e+00   7.0523271e-01   9.7441804e-01   1.1290808e+00   1.1879078e+00   2.7003420e+00   2.8251568e+00   8.7478495e-01   1.6113870e+00   5.7257017e-01   2.6756977e+00   7.5976039e-01   1.4611141e+00   1.9290721e+00   6.4290921e-01   5.8851328e-01   1.2509218e+00   1.8216794e+00   2.1226924e+00   2.6556584e+00   1.2741904e+00   8.1527569e-01   1.1396406e+00   2.3658814e+00   1.3219975e+00   1.1377990e+00   4.8036801e-01   1.4418088e+00   1.4695582e+00   1.4097125e+00   6.6827038e-01   1.6762567e+00   1.5624022e+00   1.2731262e+00   8.4812820e-01   1.0423677e+00   1.1235486e+00   6.3808075e-01   7.0328431e-01   2.8507955e-01   9.7377870e-01   3.7598397e-01   8.9971984e-01   6.2538346e-01   6.2988288e-01   8.4591037e-01   1.1042097e+00   1.1949615e+00   5.7867728e-01   6.0121055e-01   4.2418962e-01   4.8036801e-01   2.4837156e-01   1.0588560e+00   6.3735887e-01   8.4050231e-01   1.0216438e+00   6.0900723e-01   3.8776762e-01   3.8934542e-01   3.8934542e-01   6.0900723e-01   2.1269358e-01   1.0100718e+00   3.2586371e-01   3.2816937e-01   3.2816937e-01   4.6472023e-01   1.1765359e+00   3.0546431e-01   2.1603815e+00   1.1833480e+00   2.0696037e+00   1.5733646e+00   1.8844302e+00   2.7913211e+00   1.0279631e+00   2.4069427e+00   1.8096161e+00   2.4013270e+00   1.3194762e+00   1.3641156e+00   1.6852518e+00   1.1847335e+00   1.5344133e+00   1.5901181e+00   1.5133392e+00   2.9601125e+00   3.0929882e+00   9.7994716e-01   1.9359434e+00   1.1341579e+00   2.8969791e+00   1.0267435e+00   1.8174459e+00   2.1353579e+00   9.5498315e-01   1.0067464e+00   1.6752254e+00   1.9600024e+00   2.3015655e+00   2.8161147e+00   1.7177705e+00   1.0636401e+00   1.5095556e+00   2.5229584e+00   1.8388413e+00   1.5016471e+00   9.4558103e-01   1.6620056e+00   1.8661202e+00   1.6246433e+00   1.1833480e+00   2.0524784e+00   1.9917352e+00   1.5896248e+00   1.1449732e+00   1.3635198e+00   1.6539414e+00   1.1377990e+00   7.8695083e-01   1.0194752e+00   6.9369532e-01   4.4535192e-01   6.2538346e-01   7.1169738e-01   8.2684479e-01   7.5791688e-01   8.9971984e-01   7.0394675e-01   1.0777972e+00   8.9366705e-01   9.7548738e-01   7.3805807e-01   6.9369532e-01   9.9348625e-01   1.2013436e+00   9.4287188e-01   2.1845981e-01   9.1163729e-01   7.8197925e-01   7.4777660e-01   8.0096515e-01   6.3735887e-01   1.5043029e+00   7.0776547e-01   8.7021234e-01   7.8197925e-01   7.0784540e-01   1.6629594e+00   7.2852070e-01   1.7521201e+00   7.5705927e-01   1.5812904e+00   1.1798960e+00   1.4306494e+00   2.2994849e+00   1.3047221e+00   1.9342059e+00   1.3259654e+00   2.0165210e+00   1.0919404e+00   8.7720955e-01   1.2180145e+00   7.1881659e-01   1.0466623e+00   1.2389598e+00   1.1426203e+00   2.5872805e+00   2.5766735e+00   5.0817745e-01   1.4924169e+00   8.3060013e-01   2.3982377e+00   5.8914551e-01   1.4728952e+00   1.7209381e+00   6.3808075e-01   8.3649708e-01   1.1916257e+00   1.5183917e+00   1.7983401e+00   2.4620092e+00   1.2174316e+00   7.4549115e-01   1.1136343e+00   1.9965599e+00   1.5255331e+00   1.1891470e+00   8.2384013e-01   1.2304904e+00   1.3937115e+00   1.1842231e+00   7.5705927e-01   1.6132118e+00   1.5725854e+00   1.1127329e+00   5.8914551e-01   9.8054887e-01   1.4089719e+00   9.0521488e-01   1.1043332e+00   5.3665999e-01   1.1040512e+00   8.6137722e-01   8.5335130e-01   1.0692258e+00   1.3395518e+00   1.4121163e+00   7.2343175e-01   4.0438741e-01   1.4096146e-01   2.1845981e-01   2.5251796e-01   1.2340567e+00   7.2852070e-01   1.0216438e+00   1.2599182e+00   7.7360126e-01   5.1607523e-01   2.1269358e-01   5.0270183e-01   8.3345577e-01   2.1845981e-01   7.4893123e-01   3.4378533e-01   5.3022554e-01   4.5581864e-01   6.9167458e-01   9.4080461e-01   3.4583729e-01   2.3056888e+00   1.2961380e+00   2.2790932e+00   1.7645599e+00   2.0520955e+00   3.0186066e+00   8.9827435e-01   2.6386155e+00   2.0191749e+00   2.5992685e+00   1.4843324e+00   1.5325189e+00   1.8691652e+00   1.2554784e+00   1.5582387e+00   1.7070813e+00   1.7171798e+00   3.2008583e+00   3.3121677e+00   1.1313840e+00   2.1147926e+00   1.1879078e+00   3.1280700e+00   1.1681971e+00   2.0145868e+00   2.3728666e+00   1.0720678e+00   1.1437669e+00   1.8347926e+00   2.2027051e+00   2.5315934e+00   3.0688850e+00   1.8636112e+00   1.2768639e+00   1.7126039e+00   2.7381221e+00   1.9739212e+00   1.7039473e+00   1.0573285e+00   1.8507968e+00   2.0095044e+00   1.7591313e+00   1.2961380e+00   2.2339736e+00   2.1358764e+00   1.7106141e+00   1.2731262e+00   1.5241199e+00   1.7828037e+00   1.2869134e+00   8.7720955e-01   7.4777660e-01   6.5223271e-01   7.1881659e-01   7.6914805e-01   9.3999899e-01   8.0619006e-01   4.2418962e-01   1.4104707e+00   1.2144845e+00   1.3128167e+00   1.0056742e+00   5.3665999e-01   5.6075294e-01   3.4583729e-01   8.1130291e-01   9.7730901e-01   7.8197925e-01   9.9089002e-01   8.0353565e-01   4.3691963e-01   9.6095130e-01   1.7110336e+00   7.7033318e-01   7.5196795e-01   7.0776547e-01   6.5648056e-01   1.8915404e+00   7.9878917e-01   1.2730931e+00   5.3022554e-01   1.4349259e+00   8.3620494e-01   1.0733200e+00   2.1767273e+00   1.0960883e+00   1.8048569e+00   1.2035173e+00   1.6539414e+00   6.2482915e-01   7.0523271e-01   1.0184370e+00   7.1169738e-01   6.6432544e-01   7.0480730e-01   8.1527569e-01   2.3116343e+00   2.4505705e+00   1.0085601e+00   1.2063335e+00   4.5581864e-01   2.3022338e+00   5.6700421e-01   1.0655560e+00   1.5550492e+00   4.4417983e-01   2.5251796e-01   8.8358844e-01   1.4559276e+00   1.7532140e+00   2.2755980e+00   8.9653332e-01   5.5160819e-01   9.1163729e-01   1.9862884e+00   9.1163729e-01   7.6752131e-01   2.0656129e-01   1.0632598e+00   1.0516761e+00   1.0391247e+00   5.3022554e-01   1.2756158e+00   1.1406052e+00   8.7720955e-01   7.3851529e-01   6.5633874e-01   7.0776547e-01   3.2352160e-01   9.1273187e-01   7.0043186e-01   3.7427929e-01   5.7324170e-01   9.3615100e-01   1.0733200e+00   5.0991930e-01   5.9426792e-01   6.5633874e-01   6.7975091e-01   3.0811765e-01   1.1056650e+00   7.7360126e-01   7.0429250e-01   8.2552685e-01   5.7257017e-01   5.0905001e-01   6.1968386e-01   6.5223271e-01   6.0611244e-01   3.2586371e-01   1.2028939e+00   5.0905001e-01   4.2667565e-01   4.1449626e-01   3.0546431e-01   1.2470767e+00   4.0147421e-01   2.1213832e+00   1.1521791e+00   2.0070710e+00   1.6126950e+00   1.8637576e+00   2.7490677e+00   1.2370832e+00   2.3910690e+00   1.8274132e+00   2.3004229e+00   1.1979861e+00   1.3368881e+00   1.5972416e+00   1.1093572e+00   1.3693737e+00   1.4644753e+00   1.5211725e+00   2.9013543e+00   3.0559175e+00   1.0590298e+00   1.8374244e+00   1.0423677e+00   2.8588399e+00   9.4309624e-01   1.7735968e+00   2.0979142e+00   8.5205778e-01   9.4287188e-01   1.6546836e+00   1.9109434e+00   2.2424413e+00   2.7118839e+00   1.6774684e+00   1.1029298e+00   1.6007141e+00   2.3898698e+00   1.7557336e+00   1.5191033e+00   8.5462626e-01   1.5344007e+00   1.7571295e+00   1.3898545e+00   1.1521791e+00   1.9987470e+00   1.8815752e+00   1.4085850e+00   1.0646687e+00   1.2740417e+00   1.5577803e+00   1.1296247e+00   4.0176783e-01   6.5223271e-01   6.3977563e-01   5.3022554e-01   5.7324170e-01   5.2574978e-01   1.4438552e+00   1.2221471e+00   1.3131724e+00   1.0406064e+00   3.4583729e-01   9.5943875e-01   9.2859317e-01   6.5172743e-01   5.1607523e-01   9.7548738e-01   1.0611732e+00   8.6137722e-01   5.3665999e-01   9.4854455e-01   1.8311457e+00   8.7420176e-01   8.7170815e-01   8.4050231e-01   6.5223271e-01   2.0303919e+00   8.9917007e-01   1.3866318e+00   5.7867728e-01   1.2002762e+00   7.4740267e-01   1.0440187e+00   1.9213397e+00   1.4087466e+00   1.5433565e+00   9.2836103e-01   1.6392533e+00   7.7360126e-01   5.0592043e-01   8.5585239e-01   6.8961791e-01   9.5035453e-01   9.5498315e-01   7.0776547e-01   2.1812146e+00   2.2081369e+00   3.7427929e-01   1.1335961e+00   7.7885297e-01   2.0291151e+00   3.2352160e-01   1.0661822e+00   1.3165513e+00   3.7598397e-01   5.3588338e-01   8.2305664e-01   1.1437730e+00   1.4415965e+00   2.0902718e+00   8.7848692e-01   3.2352160e-01   7.0479928e-01   1.6865203e+00   1.1904611e+00   7.5791688e-01   5.5492130e-01   8.9207714e-01   1.0805899e+00   9.6271042e-01   5.7867728e-01   1.2256881e+00   1.2481462e+00   8.8358844e-01   4.0147421e-01   6.4405773e-01   1.0887986e+00   5.9426792e-01   4.4651726e-01   5.4292906e-01   7.0437330e-01   7.0776547e-01   3.2816937e-01   1.2134101e+00   9.8820253e-01   1.0733200e+00   8.1099042e-01   4.9857388e-01   7.2172678e-01   6.5223271e-01   6.3808075e-01   5.3665999e-01   6.9369532e-01   8.2305664e-01   6.2482915e-01   2.5251796e-01   7.1799256e-01   1.5895397e+00   6.2205176e-01   5.7257017e-01   5.6769031e-01   4.0438741e-01   1.7935777e+00   6.4755655e-01   1.6267976e+00   7.4777660e-01   1.4807336e+00   9.7270522e-01   1.3173487e+00   2.1839601e+00   1.2277129e+00   1.7938033e+00   1.1948932e+00   1.8448199e+00   8.7383925e-01   8.2305664e-01   1.1418127e+00   8.4591037e-01   1.2153720e+00   1.1640914e+00   9.1163729e-01   2.3638833e+00   2.4815883e+00   6.3861009e-01   1.3946921e+00   8.5434758e-01   2.2902807e+00   6.1151102e-01   1.2452704e+00   1.5325394e+00   6.0121055e-01   6.1092863e-01   1.1228379e+00   1.3752705e+00   1.7103060e+00   2.2560685e+00   1.1879078e+00   4.5470518e-01   9.0454394e-01   1.9729066e+00   1.3650300e+00   9.0521488e-01   6.0670504e-01   1.1454006e+00   1.3674559e+00   1.2262704e+00   7.4777660e-01   1.4820085e+00   1.4947429e+00   1.1729895e+00   7.3145860e-01   8.7720955e-01   1.2163831e+00   6.5633874e-01   2.1845981e-01   5.6769031e-01   7.4777660e-01   4.2538717e-01   9.5099818e-01   9.7994716e-01   1.0119857e+00   6.5223271e-01   8.3888121e-01   1.0038051e+00   5.9426792e-01   4.6472023e-01   6.0121055e-01   8.0326782e-01   9.2836103e-01   9.0876485e-01   3.7598397e-01   6.3861009e-01   1.5602029e+00   8.0326782e-01   7.0129382e-01   7.0043186e-01   2.0000000e-01   1.6208239e+00   7.0437330e-01   1.8619092e+00   9.6271042e-01   1.6849072e+00   1.3188999e+00   1.5860263e+00   2.4084158e+00   1.5142414e+00   2.0543079e+00   1.5230852e+00   1.9980352e+00   9.4375082e-01   1.0588560e+00   1.3035649e+00   1.0035600e+00   1.2499342e+00   1.2459608e+00   1.2225634e+00   2.5586145e+00   2.7210925e+00   8.9366705e-01   1.5500052e+00   1.0014633e+00   2.5139485e+00   6.9369532e-01   1.4790710e+00   1.7580510e+00   6.2660376e-01   7.0429250e-01   1.3804167e+00   1.5625881e+00   1.8966943e+00   2.3518757e+00   1.4139741e+00   8.0358695e-01   1.3075101e+00   2.0455018e+00   1.5153654e+00   1.2234738e+00   6.6539428e-01   1.2342162e+00   1.5049644e+00   1.1591754e+00   9.6271042e-01   1.7107332e+00   1.6328100e+00   1.1880428e+00   8.3812833e-01   1.0106392e+00   1.3267389e+00   8.9767734e-01   4.2538717e-01   6.2024833e-01   6.0181382e-01   1.1431021e+00   1.1948932e+00   1.2269747e+00   8.6361309e-01   8.2552685e-01   1.2002640e+00   6.5223271e-01   3.0811765e-01   7.1462831e-01   1.0067784e+00   1.1396406e+00   1.1147518e+00   5.0817745e-01   8.5335130e-01   1.7711504e+00   1.0085601e+00   9.0454394e-01   9.0277242e-01   4.0438741e-01   1.8140813e+00   9.1075311e-01   1.7412567e+00   9.8054887e-01   1.5527694e+00   1.2155004e+00   1.4692412e+00   2.2702600e+00   1.7126039e+00   1.9279661e+00   1.4238090e+00   1.8539569e+00   8.1558458e-01   9.5035453e-01   1.1763980e+00   1.0621081e+00   1.2047214e+00   1.1205013e+00   1.1134787e+00   2.4108292e+00   2.5851693e+00   9.6095130e-01   1.4178113e+00   1.0879524e+00   2.3751496e+00   6.0900723e-01   1.3570688e+00   1.6277043e+00   5.7015910e-01   6.6491075e-01   1.2652657e+00   1.4292566e+00   1.7566567e+00   2.1846001e+00   1.2961380e+00   7.1840099e-01   1.2329148e+00   1.8795815e+00   1.3897316e+00   1.1149070e+00   6.8757066e-01   1.0960883e+00   1.3785366e+00   1.0168833e+00   9.8054887e-01   1.5871961e+00   1.5043071e+00   1.0597541e+00   7.7033318e-01   8.8861541e-01   1.2058675e+00   8.9134001e-01   3.4583729e-01   8.1130291e-01   1.5090287e+00   1.4644753e+00   1.5150043e+00   1.1847335e+00   8.1385214e-01   1.4041085e+00   8.9917007e-01   3.0811765e-01   6.6539428e-01   1.2643026e+00   1.3836712e+00   1.3112758e+00   7.0784540e-01   1.1353806e+00   2.0777059e+00   1.2396136e+00   1.1498269e+00   1.1474460e+00   6.9006418e-01   2.1775976e+00   1.1752673e+00   1.4613032e+00   1.0391247e+00   1.1810170e+00   8.7504951e-01   1.1390131e+00   1.8670836e+00   1.9048338e+00   1.5205305e+00   1.0228981e+00   1.5676403e+00   6.7975091e-01   6.6539428e-01   8.7175869e-01   1.1533602e+00   1.2459608e+00   9.7730901e-01   7.5082357e-01   2.0536508e+00   2.1838261e+00   8.9095811e-01   1.1306949e+00   1.2394907e+00   1.9665910e+00   5.6769031e-01   1.0425476e+00   1.2324706e+00   6.4704320e-01   7.3851529e-01   9.5476489e-01   1.0198386e+00   1.3510699e+00   1.8411319e+00   1.0100718e+00   5.2942799e-01   9.3801395e-01   1.5112621e+00   1.2002762e+00   7.7763126e-01   8.2899253e-01   8.2305664e-01   1.1377990e+00   9.1663180e-01   1.0391247e+00   1.2653669e+00   1.2757312e+00   9.2288144e-01   6.4405773e-01   6.6827038e-01   1.0851476e+00   9.3048953e-01   7.7074935e-01   1.6569692e+00   1.5391185e+00   1.6134578e+00   1.2794849e+00   7.1504098e-01   1.3197776e+00   7.9613242e-01   3.2586371e-01   8.6165877e-01   1.2653669e+00   1.4028652e+00   1.2701139e+00   6.6384020e-01   1.2172454e+00   2.1489775e+00   1.2262704e+00   1.1578646e+00   1.1452867e+00   7.9613242e-01   2.2808589e+00   1.1959482e+00   1.1500393e+00   9.1075311e-01   9.3999899e-01   6.4806901e-01   8.5434758e-01   1.6806723e+00   1.8184542e+00   1.3334814e+00   8.5205778e-01   1.2702636e+00   3.4583729e-01   4.3456114e-01   5.6700421e-01   1.0389435e+00   1.0122141e+00   6.4290921e-01   5.0905001e-01   1.8419636e+00   1.9902374e+00   9.3801395e-01   8.1810461e-01   1.1069580e+00   1.7940242e+00   4.4651726e-01   7.4740267e-01   1.0346741e+00   5.1691876e-01   6.0121055e-01   6.6827038e-01   8.5205778e-01   1.1776640e+00   1.6778021e+00   7.0776547e-01   4.2667565e-01   7.8695083e-01   1.3400806e+00   8.6165877e-01   5.3022554e-01   7.0437330e-01   5.0592043e-01   8.1273630e-01   6.0670504e-01   9.1075311e-01   9.7270522e-01   9.4352681e-01   6.0551856e-01   5.7257017e-01   3.4378533e-01   7.5705927e-01   8.0064372e-01   1.0450018e+00   8.4812820e-01   9.3847194e-01   6.2988288e-01   6.0611244e-01   6.0060595e-01   5.0090417e-01   7.0784540e-01   6.2538346e-01   5.0592043e-01   6.6932542e-01   5.5492130e-01   1.5422108e-01   5.6075294e-01   1.4235605e+00   4.6472023e-01   4.2418962e-01   3.8776762e-01   2.8192292e-01   1.5978583e+00   4.5581864e-01   1.6256459e+00   6.5633874e-01   1.5986180e+00   1.1106412e+00   1.3708966e+00   2.3519748e+00   1.1147518e+00   1.9798165e+00   1.3654173e+00   1.8856245e+00   7.7033318e-01   8.5335130e-01   1.1771643e+00   6.8076724e-01   9.7270522e-01   1.0165138e+00   1.0391247e+00   2.5081056e+00   2.6437138e+00   7.6716823e-01   1.4120836e+00   6.1947990e-01   2.4692682e+00   4.8927739e-01   1.3077572e+00   1.7030709e+00   3.8934542e-01   4.4651726e-01   1.1594648e+00   1.5551984e+00   1.8760773e+00   2.3977345e+00   1.1868139e+00   6.2024833e-01   1.1056650e+00   2.0821572e+00   1.2794849e+00   1.0244319e+00   3.7427929e-01   1.1646003e+00   1.3139135e+00   1.1119327e+00   6.5633874e-01   1.5344007e+00   1.4333755e+00   1.0386594e+00   6.3735887e-01   8.2372435e-01   1.0901359e+00   6.2081167e-01   3.4583729e-01   2.8192292e-01   4.1586001e-01   1.6237100e+00   1.0540105e+00   1.1816401e+00   1.4110536e+00   9.8450810e-01   6.6432544e-01   5.3665999e-01   9.0454394e-01   1.1389644e+00   5.0905001e-01   7.1799256e-01   7.1504098e-01   7.3813096e-01   7.2783368e-01   8.7021234e-01   6.9006418e-01   6.2482915e-01   2.6619364e+00   1.6754669e+00   2.5821869e+00   2.1421834e+00   2.4107926e+00   3.3209792e+00   1.2036484e+00   2.9531363e+00   2.3720162e+00   2.8824828e+00   1.7671769e+00   1.8842354e+00   2.1714345e+00   1.6176764e+00   1.8723516e+00   2.0134817e+00   2.0676751e+00   3.4808493e+00   3.6264553e+00   1.5230852e+00   2.4135751e+00   1.5351194e+00   3.4281547e+00   1.4948868e+00   2.3378347e+00   2.6680945e+00   1.3977032e+00   1.4832928e+00   2.1976523e+00   2.4795532e+00   2.8157449e+00   3.2956170e+00   2.2214438e+00   1.6336229e+00   2.1064881e+00   2.9775369e+00   2.2994849e+00   2.0603946e+00   1.3916739e+00   2.1189748e+00   2.3204945e+00   1.9672068e+00   1.6754669e+00   2.5635110e+00   2.4436082e+00   1.9753271e+00   1.6077195e+00   1.8374244e+00   2.0981613e+00   1.6585806e+00   1.2418578e-01   3.7598397e-01   1.3405045e+00   8.3812833e-01   1.1437669e+00   1.3915412e+00   8.9095811e-01   6.2538346e-01   2.5251796e-01   6.0611244e-01   9.6791960e-01   3.4583729e-01   6.2205176e-01   4.5581864e-01   6.5223271e-01   5.7867728e-01   8.2619017e-01   8.2654509e-01   4.6472023e-01   2.4061696e+00   1.3868130e+00   2.3985329e+00   1.8751958e+00   2.1591630e+00   3.1397173e+00   8.9852394e-01   2.7599154e+00   2.1335771e+00   2.7193241e+00   1.6021202e+00   1.6415861e+00   1.9851797e+00   1.3336069e+00   1.6224878e+00   1.8082080e+00   1.8350829e+00   3.3292261e+00   3.4297053e+00   1.2340567e+00   2.2298076e+00   1.2654843e+00   3.2489933e+00   1.2770118e+00   2.1337366e+00   2.4988032e+00   1.1786349e+00   1.2545301e+00   1.9393053e+00   2.3288114e+00   2.6536325e+00   3.2010862e+00   1.9648876e+00   1.3964978e+00   1.8188234e+00   2.8588023e+00   2.0766308e+00   1.8216743e+00   1.1634384e+00   1.9698860e+00   2.1147926e+00   1.8660999e+00   1.3868130e+00   2.3472906e+00   2.2417376e+00   1.8131734e+00   1.3752391e+00   1.6372749e+00   1.8856245e+00   1.3922071e+00   4.0176783e-01   1.4467170e+00   9.3049742e-01   1.2002762e+00   1.4411886e+00   9.4375082e-01   6.6432544e-01   3.7427929e-01   7.0784540e-01   1.0466623e+00   4.0176783e-01   5.6700421e-01   5.5492130e-01   6.9728513e-01   6.4405773e-01   8.7170815e-01   7.3535471e-01   5.3309112e-01   2.5193321e+00   1.5039793e+00   2.4895662e+00   1.9791576e+00   2.2673478e+00   3.2268480e+00   1.0014633e+00   2.8466240e+00   2.2303173e+00   2.8133325e+00   1.6962564e+00   1.7458338e+00   2.0805039e+00   1.4552205e+00   1.7454671e+00   1.9165907e+00   1.9332869e+00   3.4131779e+00   3.5208177e+00   1.3379696e+00   2.3275025e+00   1.3866044e+00   3.3340853e+00   1.3784233e+00   2.2317775e+00   2.5822073e+00   1.2828332e+00   1.3595018e+00   2.0485193e+00   2.4057315e+00   2.7355566e+00   3.2722484e+00   2.0762069e+00   1.4905436e+00   1.9191337e+00   2.9375895e+00   2.1864773e+00   1.9213461e+00   1.2702636e+00   2.0582667e+00   2.2206505e+00   1.9521784e+00   1.5039793e+00   2.4497583e+00   2.3480580e+00   1.9124077e+00   1.4817438e+00   1.7372199e+00   1.9937367e+00   1.5016471e+00   1.2122249e+00   6.7975091e-01   8.4050231e-01   1.0796583e+00   6.6539428e-01   3.4583729e-01   3.2816937e-01   5.2942799e-01   7.3145860e-01   1.2418578e-01   9.1163729e-01   3.2586371e-01   3.7427929e-01   3.2816937e-01   5.0592043e-01   1.0122141e+00   2.1845981e-01   2.2481791e+00   1.2631020e+00   2.1874158e+00   1.7295419e+00   1.9965608e+00   2.9333950e+00   1.0072663e+00   2.5632712e+00   1.9670002e+00   2.4858980e+00   1.3655398e+00   1.4724669e+00   1.7716601e+00   1.2125198e+00   1.4903113e+00   1.6105641e+00   1.6572339e+00   3.0940465e+00   3.2344170e+00   1.1332978e+00   2.0129643e+00   1.1341579e+00   3.0445772e+00   1.0864449e+00   1.9287276e+00   2.2802541e+00   9.8820253e-01   1.0688498e+00   1.7838044e+00   2.1039999e+00   2.4365934e+00   2.9349538e+00   1.8084630e+00   1.2266837e+00   1.7026843e+00   2.6138892e+00   1.8911946e+00   1.6476803e+00   9.7949166e-01   1.7305789e+00   1.9168750e+00   1.6065247e+00   1.2631020e+00   2.1541966e+00   2.0395401e+00   1.5896248e+00   1.1996741e+00   1.4306494e+00   1.6928004e+00   1.2436109e+00   7.5791688e-01   8.1242502e-01   7.6625946e-01   7.5976039e-01   1.0289803e+00   1.1332978e+00   7.9613242e-01   5.3665999e-01   1.1149070e+00   1.9007091e+00   9.2836103e-01   9.3610001e-01   9.1858284e-01   8.1638392e-01   2.1493214e+00   1.0132664e+00   1.1680362e+00   3.2352160e-01   1.2372418e+00   5.4292906e-01   8.7170815e-01   1.9366254e+00   1.1486378e+00   1.5564198e+00   8.7420176e-01   1.5682049e+00   6.6491075e-01   4.5470518e-01   8.8358844e-01   4.5581864e-01   8.0326782e-01   7.9878917e-01   5.9426792e-01   2.1460410e+00   2.1931311e+00   5.0180477e-01   1.0950112e+00   5.0592043e-01   2.0545952e+00   3.4378533e-01   9.3923979e-01   1.3512935e+00   3.4583729e-01   3.4583729e-01   6.6539428e-01   1.2670555e+00   1.5369942e+00   2.1465859e+00   7.2526325e-01   3.0546431e-01   5.0991930e-01   1.8206746e+00   9.8054887e-01   5.7015910e-01   3.8776762e-01   9.6664346e-01   9.9089002e-01   1.0262547e+00   3.2352160e-01   1.1127329e+00   1.1166017e+00   8.7848692e-01   3.8934542e-01   5.8914551e-01   8.8062848e-01   3.2586371e-01   6.4755655e-01   1.3011270e+00   1.0122141e+00   4.2538717e-01   6.2660376e-01   4.4651726e-01   7.0086313e-01   6.3735887e-01   1.2926374e+00   4.0176783e-01   4.2288438e-01   3.8934542e-01   8.0619006e-01   1.5230852e+00   4.6472023e-01   1.6933635e+00   7.0233835e-01   1.9584922e+00   1.2594846e+00   1.5411691e+00   2.6785768e+00   6.2605182e-01   2.3003744e+00   1.6284481e+00   2.1882128e+00   1.1729895e+00   1.1500393e+00   1.5577059e+00   7.1881659e-01   9.8985697e-01   1.2389598e+00   1.3108618e+00   2.8217641e+00   2.9357847e+00   9.3026633e-01   1.7457596e+00   5.7867728e-01   2.7994225e+00   9.3610001e-01   1.5801828e+00   2.0692197e+00   8.2384013e-01   7.4740267e-01   1.3410314e+00   1.9783833e+00   2.2683159e+00   2.8062463e+00   1.3610783e+00   9.7249562e-01   1.1868139e+00   2.5244698e+00   1.3852951e+00   1.2460824e+00   6.3808075e-01   1.6077195e+00   1.5873000e+00   1.5826476e+00   7.0233835e-01   1.7831878e+00   1.6667819e+00   1.4276261e+00   9.9519977e-01   1.1984588e+00   1.1903343e+00   7.0386584e-01   7.1840099e-01   1.1107977e+00   5.8624446e-01   9.8495853e-01   8.7504951e-01   4.1586001e-01   8.7720955e-01   1.5824669e+00   7.5976039e-01   5.5160819e-01   5.7741073e-01   5.4292906e-01   1.6736318e+00   6.7975091e-01   1.5883552e+00   8.2552685e-01   1.5948732e+00   1.1332978e+00   1.3595997e+00   2.3503762e+00   1.2554784e+00   1.9862884e+00   1.4596621e+00   1.8378727e+00   7.2852070e-01   9.6204649e-01   1.1697902e+00   9.6664346e-01   9.6271042e-01   9.5676647e-01   1.0496979e+00   2.4751922e+00   2.6546397e+00   1.2224367e+00   1.3843268e+00   7.2343175e-01   2.4751922e+00   7.5082357e-01   1.2832075e+00   1.7000773e+00   6.2988288e-01   5.0592043e-01   1.1833351e+00   1.5613251e+00   1.8886923e+00   2.3645560e+00   1.2016233e+00   7.5791688e-01   1.2125198e+00   2.0748074e+00   1.2155370e+00   1.0244319e+00   4.5470518e-01   1.1485394e+00   1.2770118e+00   1.0720678e+00   8.2552685e-01   1.5113992e+00   1.3835368e+00   1.0035600e+00   9.5571254e-01   8.2234151e-01   1.0120221e+00   6.5223271e-01   8.3888121e-01   1.1486378e+00   1.2994764e+00   1.2307737e+00   6.0181382e-01   1.0483827e+00   1.9867752e+00   1.1408504e+00   1.0391247e+00   1.0361698e+00   5.7867728e-01   2.0669733e+00   1.0646687e+00   1.4623898e+00   9.5866719e-01   1.2497790e+00   9.3048953e-01   1.1765359e+00   1.9666356e+00   1.8175297e+00   1.6242657e+00   1.1520347e+00   1.5603665e+00   5.7324170e-01   7.0233835e-01   8.8887100e-01   1.0919404e+00   1.1355826e+00   8.9712482e-01   8.1385214e-01   2.1052360e+00   2.2845234e+00   1.0166932e+00   1.1341579e+00   1.1332978e+00   2.0738150e+00   5.3309112e-01   1.0588560e+00   1.3224963e+00   5.5492130e-01   6.2538346e-01   9.8450810e-01   1.1271488e+00   1.4561933e+00   1.8911946e+00   1.0230441e+00   5.2574978e-01   1.0056742e+00   1.5916843e+00   1.1355826e+00   8.2105460e-01   7.1504098e-01   8.1527569e-01   1.1176720e+00   8.2899253e-01   9.5866719e-01   1.2943100e+00   1.2429818e+00   8.5205778e-01   7.0233835e-01   6.2660376e-01   9.8054887e-01   8.3649708e-01   8.7822463e-01   8.2899253e-01   8.1099042e-01   7.0826681e-01   5.8914551e-01   1.5042268e+00   7.3813096e-01   8.1558458e-01   7.4855857e-01   6.0121055e-01   1.6260946e+00   7.0386584e-01   1.8605327e+00   8.8503502e-01   1.6493191e+00   1.2601890e+00   1.5390703e+00   2.3592515e+00   1.4088394e+00   1.9940473e+00   1.4245508e+00   2.0716002e+00   1.1004436e+00   9.8820253e-01   1.2927814e+00   9.0056222e-01   1.2208301e+00   1.3194807e+00   1.1996741e+00   2.6153308e+00   2.6539963e+00   6.2538346e-01   1.5687169e+00   9.5271386e-01   2.4562038e+00   6.6491075e-01   1.5249255e+00   1.7538274e+00   6.6539428e-01   8.2619017e-01   1.3131724e+00   1.5409345e+00   1.8470010e+00   2.4533073e+00   1.3504603e+00   7.7039952e-01   1.2057554e+00   2.0352149e+00   1.6006330e+00   1.2331989e+00   8.0660588e-01   1.2747177e+00   1.5040391e+00   1.2426449e+00   8.8503502e-01   1.7019078e+00   1.6700310e+00   1.2144845e+00   7.4855857e-01   1.0401425e+00   1.4600567e+00   9.3296062e-01   5.0180477e-01   4.4651726e-01   6.2149089e-01   4.1586001e-01   1.0078327e+00   3.0275928e-01   1.4096146e-01   1.4096146e-01   6.0611244e-01   1.1536782e+00   2.0656129e-01   2.0491051e+00   1.0646687e+00   2.0979729e+00   1.5528443e+00   1.8279176e+00   2.8469870e+00   8.2234151e-01   2.4692682e+00   1.8399871e+00   2.3632803e+00   1.2493717e+00   1.3312249e+00   1.6756749e+00   1.0425476e+00   1.3092012e+00   1.4505265e+00   1.5123788e+00   2.9884772e+00   3.1361386e+00   1.0755693e+00   1.9017004e+00   9.3801395e-01   2.9635613e+00   9.8054887e-01   1.7833384e+00   2.1970231e+00   8.6513410e-01   8.9742724e-01   1.6159903e+00   2.0524973e+00   2.3760856e+00   2.8811560e+00   1.6399646e+00   1.0934620e+00   1.5205305e+00   2.5867433e+00   1.6928004e+00   1.4836711e+00   7.9580667e-01   1.6659943e+00   1.7839298e+00   1.5792930e+00   1.0646687e+00   2.0113485e+00   1.8901379e+00   1.5067717e+00   1.0950112e+00   1.3129189e+00   1.4875372e+00   1.0389435e+00   4.0293660e-01   8.0499049e-01   3.0546431e-01   7.8197925e-01   2.5251796e-01   5.1691876e-01   4.2538717e-01   7.4740267e-01   1.0181000e+00   3.2586371e-01   2.1717162e+00   1.1533602e+00   2.2234347e+00   1.6690840e+00   1.9433381e+00   2.9713636e+00   7.2486328e-01   2.5929835e+00   1.9489982e+00   2.5241649e+00   1.4089364e+00   1.4425304e+00   1.8012344e+00   1.0919712e+00   1.3726860e+00   1.5830057e+00   1.6408468e+00   3.1546522e+00   3.2544456e+00   1.0406064e+00   2.0352149e+00   1.0168833e+00   3.0859269e+00   1.0901359e+00   1.9325796e+00   2.3348454e+00   9.8054887e-01   1.0379132e+00   1.7250039e+00   2.1825260e+00   2.4995667e+00   3.0529994e+00   1.7458338e+00   1.2167151e+00   1.6214915e+00   2.7108297e+00   1.8418195e+00   1.6195190e+00   9.3847194e-01   1.7995863e+00   1.9034198e+00   1.7022897e+00   1.1533602e+00   2.1413027e+00   2.0233319e+00   1.6211869e+00   1.1770266e+00   1.4411886e+00   1.6502968e+00   1.1644030e+00   6.5633874e-01   4.4417983e-01   1.1332978e+00   2.1845981e-01   4.2538717e-01   3.4583729e-01   7.1504098e-01   1.4080793e+00   3.4583729e-01   1.8866180e+00   8.7848692e-01   1.9819543e+00   1.3312249e+00   1.6523803e+00   2.6988671e+00   6.9006418e-01   2.3100474e+00   1.6455737e+00   2.2934334e+00   1.2426449e+00   1.1905954e+00   1.5932297e+00   8.9095811e-01   1.2681309e+00   1.4065584e+00   1.3487634e+00   2.8835141e+00   2.9705897e+00   7.3805807e-01   1.8205354e+00   8.5462626e-01   2.8117234e+00   9.3049742e-01   1.6679957e+00   2.0758969e+00   8.4050231e-01   8.3060013e-01   1.4419145e+00   1.9521697e+00   2.2599493e+00   2.8310619e+00   1.4807336e+00   9.4558103e-01   1.2404967e+00   2.5235709e+00   1.6070713e+00   1.3102444e+00   7.5705927e-01   1.6281130e+00   1.7021627e+00   1.6240596e+00   8.7848692e-01   1.8790831e+00   1.8155245e+00   1.5040391e+00   1.0014633e+00   1.2481462e+00   1.4334902e+00   8.6165877e-01   6.6827038e-01   1.5475692e+00   5.8914551e-01   5.0503591e-01   4.9857388e-01   3.0811765e-01   1.7160413e+00   5.7324170e-01   1.5795964e+00   6.5648056e-01   1.4967461e+00   1.0182895e+00   1.3034549e+00   2.2380042e+00   1.2266837e+00   1.8619092e+00   1.2701139e+00   1.8007564e+00   7.2852070e-01   7.9016429e-01   1.0989735e+00   7.5705927e-01   1.0406064e+00   1.0184370e+00   9.3999899e-01   2.3886514e+00   2.5357185e+00   8.2684479e-01   1.3424112e+00   7.0776547e-01   2.3522207e+00   4.8927739e-01   1.2205493e+00   1.5832517e+00   4.2667565e-01   4.4417983e-01   1.0974061e+00   1.4319225e+00   1.7587110e+00   2.2711652e+00   1.1390131e+00   5.1691876e-01   1.0163549e+00   1.9782498e+00   1.2532075e+00   9.2859317e-01   4.1449626e-01   1.0852663e+00   1.2786117e+00   1.0887986e+00   6.5648056e-01   1.4596621e+00   1.3991741e+00   1.0313560e+00   6.6932542e-01   7.7553525e-01   1.0741917e+00   5.7257017e-01   9.4558103e-01   2.5651975e-01   4.1449626e-01   3.2816937e-01   4.8135521e-01   1.0908017e+00   2.1845981e-01   2.1701312e+00   1.1752673e+00   2.1084262e+00   1.6352583e+00   1.9101184e+00   2.8518881e+00   9.7825559e-01   2.4781934e+00   1.8745369e+00   2.4235816e+00   1.3078976e+00   1.3842113e+00   1.6965018e+00   1.1329323e+00   1.4319225e+00   1.5496439e+00   1.5691346e+00   3.0266197e+00   3.1503439e+00   1.0244319e+00   1.9425540e+00   1.0627606e+00   2.9623467e+00   1.0056742e+00   1.8539828e+00   2.2026387e+00   9.1163729e-01   9.9475949e-01   1.6952454e+00   2.0275673e+00   2.3581240e+00   2.8793190e+00   1.7227544e+00   1.1332978e+00   1.6029963e+00   2.5482247e+00   1.8278913e+00   1.5611067e+00   9.1163729e-01   1.6653066e+00   1.8462692e+00   1.5634147e+00   1.1752673e+00   2.0757295e+00   1.9739212e+00   1.5320003e+00   1.1179743e+00   1.3567326e+00   1.6362950e+00   1.1594648e+00   9.9475949e-01   1.1004436e+00   1.0720678e+00   1.4108494e+00   3.2816937e-01   9.8054887e-01   2.9240179e+00   1.9013501e+00   3.0016960e+00   2.4403742e+00   2.7185134e+00   3.7481490e+00   1.2643026e+00   3.3676733e+00   2.7249658e+00   3.2951180e+00   2.1701459e+00   2.2231652e+00   2.5778372e+00   1.8193838e+00   2.0594742e+00   2.3383666e+00   2.4210417e+00   3.9277558e+00   4.0319248e+00   1.8011138e+00   2.8105150e+00   1.7324239e+00   3.8595400e+00   1.8657887e+00   2.7098209e+00   3.1083486e+00   1.7551534e+00   1.8090259e+00   2.5002193e+00   2.9463843e+00   3.2688068e+00   3.8087204e+00   2.5182043e+00   1.9933741e+00   2.3692479e+00   3.4705738e+00   2.5885717e+00   2.3959721e+00   1.7005893e+00   2.5655126e+00   2.6734142e+00   2.4311441e+00   1.9013501e+00   2.9205331e+00   2.7876433e+00   2.3735872e+00   1.9516947e+00   2.2170194e+00   2.3879674e+00   1.9213461e+00   3.0546431e-01   2.0656129e-01   6.0611244e-01   1.2246352e+00   1.4096146e-01   1.9777274e+00   9.7249562e-01   2.0297383e+00   1.4616539e+00   1.7458338e+00   2.7737198e+00   7.5082357e-01   2.3931826e+00   1.7478866e+00   2.3213742e+00   1.2131545e+00   1.2498134e+00   1.6130724e+00   9.3824087e-01   1.2565757e+00   1.4029855e+00   1.4325768e+00   2.9414941e+00   3.0577671e+00   8.7720955e-01   1.8438146e+00   8.6956871e-01   2.8895427e+00   9.1310225e-01   1.7228354e+00   2.1321061e+00   8.0499049e-01   8.3345577e-01   1.5318874e+00   1.9898963e+00   2.3090270e+00   2.8491218e+00   1.5587730e+00   1.0122141e+00   1.4162017e+00   2.5326059e+00   1.6463627e+00   1.4047678e+00   7.3805807e-01   1.6165635e+00   1.7228488e+00   1.5520745e+00   9.7249562e-01   1.9420274e+00   1.8372522e+00   1.4628493e+00   1.0030700e+00   1.2523175e+00   1.4531349e+00   9.5571254e-01   1.2418578e-01   5.0270183e-01   1.2603076e+00   2.1269358e-01   1.9932786e+00   1.0168833e+00   1.9946994e+00   1.4557537e+00   1.7495699e+00   2.7337358e+00   9.0575661e-01   2.3519748e+00   1.7324239e+00   2.2796281e+00   1.1801240e+00   1.2450709e+00   1.5872286e+00   1.0267435e+00   1.3336069e+00   1.4152303e+00   1.4096199e+00   2.8778917e+00   3.0268604e+00   1.0067464e+00   1.8215944e+00   9.3824087e-01   2.8471683e+00   9.0658670e-01   1.6933635e+00   2.0801243e+00   8.0758367e-01   8.3783744e-01   1.5390703e+00   1.9310038e+00   2.2599493e+00   2.7651778e+00   1.5728839e+00   9.7949166e-01   1.4165336e+00   2.4822593e+00   1.6510537e+00   1.3842113e+00   7.5755387e-01   1.5773217e+00   1.7253276e+00   1.5255331e+00   1.0168833e+00   1.9287244e+00   1.8366596e+00   1.4599710e+00   1.0339865e+00   1.2378278e+00   1.4537266e+00   9.7249562e-01   5.0090417e-01   1.2507669e+00   1.2418578e-01   1.9589833e+00   9.7270522e-01   1.9792779e+00   1.4437673e+00   1.7230625e+00   2.7260686e+00   8.6012420e-01   2.3478326e+00   1.7190893e+00   2.2590861e+00   1.1454006e+00   1.2174316e+00   1.5614941e+00   9.5476489e-01   1.2555979e+00   1.3635198e+00   1.3969297e+00   2.8759951e+00   3.0161959e+00   9.4558103e-01   1.7925890e+00   8.6983677e-01   2.8416706e+00   8.6513410e-01   1.6742876e+00   2.0756986e+00   7.5871717e-01   7.9613242e-01   1.5107481e+00   1.9286915e+00   2.2531942e+00   2.7669732e+00   1.5384446e+00   9.7270522e-01   1.4111029e+00   2.4671050e+00   1.6105641e+00   1.3717027e+00   7.0429250e-01   1.5517600e+00   1.6837214e+00   1.4807336e+00   9.7270522e-01   1.9032219e+00   1.7953587e+00   1.4097072e+00   9.7855477e-01   1.2036484e+00   1.4110536e+00   9.4309624e-01   1.5090287e+00   5.0905001e-01   1.8619092e+00   9.1163729e-01   1.7230625e+00   1.3188999e+00   1.5883552e+00   2.4588872e+00   1.3193952e+00   2.0946464e+00   1.5338492e+00   2.0321740e+00   9.5099818e-01   1.0604511e+00   1.3277861e+00   9.3296062e-01   1.2221471e+00   1.2470767e+00   1.2266837e+00   2.6106370e+00   2.7667028e+00   8.7420176e-01   1.5746612e+00   8.9852394e-01   2.5679581e+00   6.9369532e-01   1.4905436e+00   1.8028753e+00   6.2149089e-01   6.9006418e-01   1.3813076e+00   1.6199747e+00   1.9552274e+00   2.4344864e+00   1.4148192e+00   8.0358695e-01   1.3039319e+00   2.1287551e+00   1.5153654e+00   1.2246352e+00   6.2660376e-01   1.2741904e+00   1.5160122e+00   1.2047214e+00   9.1163729e-01   1.7242097e+00   1.6420607e+00   1.2064640e+00   8.3812833e-01   1.0168833e+00   1.3257654e+00   8.6137722e-01   1.1533602e+00   3.1382691e+00   2.1485328e+00   3.1786790e+00   2.6799312e+00   2.9354140e+00   3.9353144e+00   1.5252485e+00   3.5658557e+00   2.9475994e+00   3.4455976e+00   2.3167786e+00   2.4320363e+00   2.7459122e+00   2.0598189e+00   2.2501104e+00   2.5013206e+00   2.6321552e+00   4.0910078e+00   4.2293986e+00   2.0521052e+00   2.9731112e+00   1.9619929e+00   4.0491656e+00   2.0481101e+00   2.8946126e+00   3.2860707e+00   1.9342059e+00   2.0025214e+00   2.7210925e+00   3.1148548e+00   3.4424959e+00   3.9360563e+00   2.7333517e+00   2.2078200e+00   2.6383936e+00   3.6047462e+00   2.7713578e+00   2.6121617e+00   1.8925840e+00   2.7075477e+00   2.8419886e+00   2.5215069e+00   2.1485328e+00   3.1102248e+00   2.9517129e+00   2.5041493e+00   2.1435335e+00   2.3887866e+00   2.5633372e+00   2.1544995e+00   2.0467316e+00   1.0576043e+00   2.0528819e+00   1.5379283e+00   1.8096161e+00   2.8029161e+00   8.6012420e-01   2.4272793e+00   1.8028753e+00   2.3372930e+00   1.2144845e+00   1.2985682e+00   1.6312555e+00   1.0166932e+00   1.3073038e+00   1.4333755e+00   1.4843487e+00   2.9588514e+00   3.0950947e+00   9.7949166e-01   1.8647706e+00   9.3615100e-01   2.9181741e+00   9.3049742e-01   1.7594421e+00   2.1522124e+00   8.2342214e-01   8.7720955e-01   1.5965946e+00   1.9973159e+00   2.3235032e+00   2.8379387e+00   1.6211988e+00   1.0588560e+00   1.5076049e+00   2.5254157e+00   1.6945041e+00   1.4637418e+00   7.8197925e-01   1.6130724e+00   1.7539916e+00   1.5193574e+00   1.0576043e+00   1.9849009e+00   1.8691652e+00   1.4607586e+00   1.0375119e+00   1.2741904e+00   1.4947429e+00   1.0361698e+00   1.0621081e+00   8.3649708e-01   7.6590510e-01   4.0176783e-01   1.3455136e+00   1.8827665e+00   1.1093572e+00   9.5676647e-01   9.0852141e-01   9.4309624e-01   8.9852394e-01   6.8076724e-01   1.2002762e+00   9.7825559e-01   7.0479928e-01   7.8197925e-01   1.4637418e+00   1.5390703e+00   1.4628493e+00   6.2538346e-01   1.2208301e+00   1.4754770e+00   1.2162549e+00   5.2574978e-01   1.0104465e+00   1.2832075e+00   1.1810170e+00   6.1947990e-01   1.1242402e+00   1.1718516e+00   1.6295015e+00   5.8914551e-01   1.2063335e+00   1.1879206e+00   1.4041085e+00   4.0293660e-01   7.7074935e-01   1.2709820e+00   7.7869083e-01   5.0592043e-01   9.7441804e-01   1.0621081e+00   5.0991930e-01   4.4417983e-01   8.3888121e-01   1.1770266e+00   8.6361309e-01   6.0670504e-01   1.0324775e+00   1.3844611e+00   6.2660376e-01   8.8695363e-01   2.0692197e+00   9.7441804e-01   1.6995747e+00   1.0122141e+00   1.6372749e+00   7.6752131e-01   6.0551856e-01   1.0244319e+00   2.1845981e-01   5.0090417e-01   7.2852070e-01   7.4777660e-01   2.2645802e+00   2.3019759e+00   5.7324170e-01   1.1833351e+00   2.5651975e-01   2.1907335e+00   5.0905001e-01   1.0330459e+00   1.5124582e+00   4.4651726e-01   3.8934542e-01   6.9369532e-01   1.4517959e+00   1.7036156e+00   2.3021295e+00   7.0429250e-01   5.6700421e-01   6.3977563e-01   1.9782093e+00   8.7229670e-01   6.8801986e-01   3.8934542e-01   1.1199472e+00   9.9519977e-01   1.1263042e+00   0.0000000e+00   1.1697902e+00   1.0851476e+00   9.2859317e-01   5.0905001e-01   7.1504098e-01   7.7763126e-01   3.0546431e-01   8.2135873e-01   6.0121055e-01   7.6716823e-01   2.3579605e+00   4.5581864e-01   5.8914551e-01   6.5223271e-01   8.9095811e-01   8.2552685e-01   4.4417983e-01   1.5124582e+00   1.3844611e+00   8.1810461e-01   6.6384020e-01   1.0516761e+00   1.0733200e+00   1.3725949e+00   3.0844217e-01   1.6183051e+00   8.9095811e-01   1.1426203e+00   4.5470518e-01   3.2816937e-01   1.2604558e+00   1.2459608e+00   7.1799256e-01   5.0180477e-01   3.6171588e-01   1.0269295e+00   7.1840099e-01   1.0531192e+00   1.1093572e+00   6.1092863e-01   8.4591037e-01   7.4777660e-01   1.3693737e+00   5.0905001e-01   4.8135521e-01   8.0619006e-01   1.3844611e+00   3.4378533e-01   5.3309112e-01   7.3813096e-01   1.0901359e+00   8.1273630e-01   9.6141901e-01   1.2978356e+00   4.2667565e-01   1.4573287e+00   1.5826638e+00   1.0904758e+00   5.0503591e-01   1.1258723e+00   5.4292906e-01   3.2816937e-01   5.3022554e-01   7.7869083e-01   7.5871717e-01   5.5492130e-01   2.1269358e-01   1.6596342e+00   1.6919202e+00   8.3280511e-01   7.0429250e-01   8.7202528e-01   1.5772389e+00   7.0394675e-01   5.2655962e-01   9.2836103e-01   8.0064372e-01   7.0437330e-01   3.0546431e-01   9.0478973e-01   1.1271488e+00   1.7235501e+00   4.0293660e-01   5.2942799e-01   4.5470518e-01   1.4317371e+00   6.8917100e-01   2.1269358e-01   8.1099042e-01   6.2988288e-01   6.5172743e-01   7.6166891e-01   6.2660376e-01   6.5648056e-01   7.6625946e-01   6.1947990e-01   6.4755655e-01   4.2667565e-01   6.2660376e-01   5.6700421e-01   1.2113327e+00   1.8396098e+00   8.7504951e-01   5.7257017e-01   8.3280511e-01   7.0784540e-01   5.5492130e-01   3.7427929e-01   1.0284501e+00   8.7420176e-01   5.0991930e-01   4.4417983e-01   1.4089719e+00   1.4387122e+00   1.1127329e+00   4.1586001e-01   1.1205013e+00   1.3344634e+00   9.3048953e-01   3.2816937e-01   7.4164639e-01   1.0244319e+00   9.3999899e-01   2.5651975e-01   8.1242502e-01   9.1858284e-01   1.4955532e+00   2.5251796e-01   8.7420176e-01   8.5335130e-01   1.2045536e+00   4.3691963e-01   4.4651726e-01   1.0480665e+00   4.9857388e-01   2.8507955e-01   7.3535471e-01   8.8695363e-01   3.2816937e-01   3.8934542e-01   6.0611244e-01   8.6361309e-01   6.0551856e-01   5.2655962e-01   8.3783744e-01   3.0351721e+00   4.2418962e-01   1.0941064e+00   7.5705927e-01   1.6559784e+00   1.5582387e+00   1.2112034e+00   2.1967372e+00   2.0692197e+00   1.5564198e+00   1.3693737e+00   8.0096515e-01   4.5581864e-01   2.0330276e+00   1.0137836e+00   2.3142399e+00   2.1845981e-01   1.9017011e+00   1.1228379e+00   6.6827038e-01   2.0223026e+00   1.9969203e+00   1.3792358e+00   8.7478495e-01   5.2371571e-01   8.1385214e-01   1.3793330e+00   1.7664528e+00   1.6569692e+00   5.0905001e-01   1.4644753e+00   1.4341959e+00   2.1204309e+00   1.2632199e+00   1.1880428e+00   1.5405106e+00   2.0692197e+00   9.4009473e-01   1.1355826e+00   1.4992973e+00   1.8311457e+00   1.5765737e+00   1.6311692e+00   1.9969203e+00   2.6670272e+00   1.9783833e+00   2.5784641e+00   1.6572339e+00   1.5613865e+00   1.9842916e+00   8.6110333e-01   1.0720678e+00   1.6180482e+00   1.7140774e+00   3.2123303e+00   3.2542669e+00   1.1332978e+00   2.1449779e+00   7.5976039e-01   3.1540626e+00   1.4088394e+00   1.9797139e+00   2.4825886e+00   1.3075101e+00   1.2330392e+00   1.6629594e+00   2.4148300e+00   2.6746409e+00   3.2573703e+00   1.6686069e+00   1.4322723e+00   1.4341959e+00   2.9471490e+00   1.6864366e+00   1.6385322e+00   1.1320702e+00   2.0632091e+00   1.9468380e+00   2.0389505e+00   9.7441804e-01   2.1303950e+00   2.0095672e+00   1.8517858e+00   1.4168607e+00   1.6483152e+00   1.5346983e+00   1.0866092e+00   7.2486328e-01   8.7202528e-01   1.2988558e+00   1.1847335e+00   8.6137722e-01   1.8265471e+00   1.7177705e+00   1.2107055e+00   9.9368623e-01   9.5866719e-01   7.3805807e-01   1.6506221e+00   7.3805807e-01   1.9449573e+00   5.0592043e-01   1.5350426e+00   7.8695083e-01   3.7427929e-01   1.6553809e+00   1.6249178e+00   1.0168833e+00   5.0991930e-01   2.1845981e-01   9.7270522e-01   1.0264409e+00   1.3817041e+00   1.2768639e+00   5.7324170e-01   1.1634384e+00   1.0611732e+00   1.7483574e+00   9.3048953e-01   9.0056222e-01   1.2340567e+00   1.6995747e+00   6.8076724e-01   9.1883539e-01   1.1718516e+00   1.4616896e+00   1.2125198e+00   1.2951888e+00   1.6249178e+00   1.2028939e+00   8.7420176e-01   5.3665999e-01   5.5492130e-01   1.1340084e+00   1.0720678e+00   8.3345577e-01   5.3588338e-01   1.5520745e+00   1.3258714e+00   9.5099818e-01   7.7074935e-01   1.2604558e+00   1.1891470e+00   9.2264612e-01   8.1099042e-01   7.7039952e-01   1.0389435e+00   1.0054794e+00   4.3456114e-01   6.2605182e-01   7.2823007e-01   1.5784191e+00   4.8927739e-01   7.5976039e-01   6.5223271e-01   1.0692258e+00   9.8985697e-01   6.3808075e-01   1.1178200e+00   6.6827038e-01   7.4855857e-01   8.6513410e-01   1.0122141e+00   7.6787403e-01   9.3615100e-01   7.5835500e-01   8.2654509e-01   6.9728513e-01   9.9519977e-01   9.7356960e-01   1.1306887e+00   1.2197188e+00   8.0353565e-01   1.7933375e+00   1.5916843e+00   1.0118409e+00   1.0089164e+00   7.0776547e-01   1.1591754e+00   1.8437762e+00   5.3309112e-01   1.8278913e+00   9.6838716e-01   1.4843324e+00   6.3735887e-01   7.3496673e-01   1.5570415e+00   1.5003972e+00   1.0421979e+00   9.7759114e-01   8.9070384e-01   7.8197925e-01   1.0329598e+00   1.4388174e+00   1.5204340e+00   6.9325418e-01   9.4309624e-01   1.0339865e+00   1.6134578e+00   8.0660588e-01   7.0523271e-01   1.0406064e+00   1.6372749e+00   5.1303949e-01   5.8851328e-01   1.0072663e+00   1.4951106e+00   1.0950112e+00   1.0934620e+00   1.5213929e+00   5.0991930e-01   4.5581864e-01   9.3615100e-01   7.6590510e-01   3.2586371e-01   4.2538717e-01   1.7942496e+00   1.9566981e+00   1.0636401e+00   6.6384020e-01   9.2264612e-01   1.7814077e+00   5.2371571e-01   6.0670504e-01   1.0120221e+00   4.8927739e-01   4.3691963e-01   5.6769031e-01   8.9366705e-01   1.1948578e+00   1.6982795e+00   5.7324170e-01   5.7257017e-01   8.3060013e-01   1.3824965e+00   5.7867728e-01   4.1586001e-01   5.4292906e-01   4.4651726e-01   5.7324170e-01   4.4535192e-01   7.6752131e-01   8.2105460e-01   6.9369532e-01   3.4583729e-01   7.0479928e-01   2.0656129e-01   4.3456114e-01   6.1092863e-01   4.6472023e-01   7.1840099e-01   6.9369532e-01   5.6631629e-01   3.2816937e-01   1.8058693e+00   1.8261179e+00   6.3735887e-01   7.0328431e-01   8.2684479e-01   1.6791597e+00   4.0293660e-01   6.6827038e-01   9.7377870e-01   5.0991930e-01   4.8135521e-01   3.2586371e-01   8.7021234e-01   1.1327825e+00   1.7839298e+00   3.7427929e-01   4.1586001e-01   5.5492130e-01   1.3916739e+00   7.7919451e-01   4.1449626e-01   5.8914551e-01   5.7324170e-01   6.0900723e-01   6.2407309e-01   6.0551856e-01   7.5705927e-01   7.8695083e-01   4.8135521e-01   3.2586371e-01   3.0811765e-01   7.3851529e-01   5.3665999e-01   1.1524979e+00   1.0244319e+00   4.3691963e-01   3.7255734e-01   1.4090646e+00   1.5060944e+00   1.0810263e+00   2.8507955e-01   1.2406194e+00   1.3336069e+00   7.1791510e-01   3.2586371e-01   5.9426792e-01   8.2552685e-01   8.2275389e-01   4.1449626e-01   5.8851328e-01   7.5196795e-01   1.3415658e+00   4.1586001e-01   7.2852070e-01   8.9159388e-01   9.7249562e-01   5.8914551e-01   4.4535192e-01   9.4352681e-01   1.4096146e-01   3.0811765e-01   4.1586001e-01   1.0244319e+00   4.2538717e-01   4.5581864e-01   3.2586371e-01   7.0869559e-01   3.7427929e-01   6.5223271e-01   9.2836103e-01   4.4651726e-01   8.8695363e-01   8.9971984e-01   2.4227359e+00   2.4243464e+00   5.5419992e-01   1.3235313e+00   3.0546431e-01   2.3149695e+00   6.1151102e-01   1.2036484e+00   1.6520677e+00   5.4292906e-01   5.7324170e-01   8.2305664e-01   1.5788188e+00   1.8238348e+00   2.4554026e+00   8.2552685e-01   7.0429250e-01   7.7588000e-01   2.0959492e+00   1.0466623e+00   8.6513410e-01   5.4292906e-01   1.2497790e+00   1.1215059e+00   1.2436109e+00   2.1845981e-01   1.3165513e+00   1.2256881e+00   1.0406064e+00   6.0060595e-01   8.5434758e-01   9.6664346e-01   5.1691876e-01   6.5223271e-01   8.4050231e-01   2.2451458e+00   2.2996030e+00   9.7270522e-01   1.1594648e+00   4.2538717e-01   2.1936248e+00   6.9369532e-01   1.0119857e+00   1.5297036e+00   6.6384020e-01   6.2988288e-01   7.0386584e-01   1.5113992e+00   1.7140171e+00   2.2868482e+00   6.9325418e-01   9.4080461e-01   1.0406064e+00   1.9734538e+00   7.5835500e-01   7.8695083e-01   6.2988288e-01   1.1158787e+00   9.4832302e-01   1.1055069e+00   5.0090417e-01   1.1452867e+00   1.0056742e+00   9.0277242e-01   6.3977563e-01   7.3851529e-01   6.6432544e-01   6.0611244e-01   5.1691876e-01   1.6983410e+00   1.8377590e+00   1.1500393e+00   5.6631629e-01   8.6012420e-01   1.6864433e+00   6.6539428e-01   4.5581864e-01   9.7356960e-01   6.6932542e-01   5.9426792e-01   4.5470518e-01   9.7548738e-01   1.1573546e+00   1.6772907e+00   4.4535192e-01   8.2929029e-01   9.8450810e-01   1.3812107e+00   3.2816937e-01   5.0905001e-01   6.6932542e-01   5.0991930e-01   3.7598397e-01   5.0905001e-01   7.2852070e-01   6.4704320e-01   4.5581864e-01   3.2586371e-01   7.4777660e-01   3.2816937e-01   2.5251796e-01   6.3108414e-01   1.5573817e+00   1.6420607e+00   9.0575661e-01   5.7867728e-01   9.7441804e-01   1.4917344e+00   6.2482915e-01   4.0176783e-01   7.7039952e-01   7.1799256e-01   6.4704320e-01   3.2816937e-01   7.1799256e-01   9.7270522e-01   1.5593809e+00   4.1586001e-01   4.6472023e-01   5.6454040e-01   1.2601890e+00   6.5223271e-01   1.2418578e-01   7.6716823e-01   4.4651726e-01   6.0670504e-01   6.1947990e-01   7.4777660e-01   5.9426792e-01   7.2172678e-01   5.3588338e-01   6.2660376e-01   3.2352160e-01   5.8914551e-01   6.4704320e-01   1.2013436e+00   2.3665136e+00   1.1771643e+00   2.4806944e+00   1.0018083e+00   2.1098467e+00   1.2627078e+00   8.8503502e-01   2.2024869e+00   2.1511385e+00   1.6189643e+00   1.1368070e+00   1.0688498e+00   3.4378533e-01   1.6188960e+00   1.9698860e+00   1.9254808e+00   8.8861541e-01   1.5832517e+00   1.5978297e+00   2.2712062e+00   1.4277162e+00   1.3610783e+00   1.6849072e+00   2.2645802e+00   1.1106525e+00   1.2665468e+00   1.6689743e+00   2.0995265e+00   1.7457596e+00   1.7532140e+00   2.1511385e+00   2.2712062e+00   1.3276804e+00   2.5485519e+00   3.4378533e-01   2.1870851e+00   1.4266198e+00   1.0379132e+00   2.3072128e+00   2.2736138e+00   1.6156775e+00   1.2095267e+00   8.3888121e-01   1.2277129e+00   1.6151153e+00   2.0528819e+00   1.8792214e+00   8.2624515e-01   1.7265353e+00   1.7004805e+00   2.3953564e+00   1.5733646e+00   1.4691764e+00   1.8497891e+00   2.3019759e+00   1.2238809e+00   1.4266198e+00   1.7962897e+00   2.1027465e+00   1.8635467e+00   1.9008621e+00   2.2439391e+00   1.3353353e+00   7.2526325e-01   2.1293320e+00   5.5492130e-01   1.2776560e+00   1.5193574e+00   6.2988288e-01   8.1130291e-01   8.6912228e-01   1.3752391e+00   1.6044563e+00   2.3474075e+00   9.1883539e-01   6.2024833e-01   6.4806901e-01   1.9000365e+00   1.3674559e+00   9.6664346e-01   8.1354181e-01   1.1751082e+00   1.2304904e+00   1.2256933e+00   5.7324170e-01   1.3628690e+00   1.4089364e+00   1.0866132e+00   4.8036801e-01   8.9971984e-01   1.3044654e+00   8.1130291e-01   1.3916739e+00   1.1500393e+00   9.6838716e-01   2.5251796e-01   5.5419992e-01   1.0573285e+00   1.0284501e+00   5.7324170e-01   7.1840099e-01   6.6317860e-01   1.1434428e+00   5.6769031e-01   9.7855477e-01   1.1106525e+00   8.2899253e-01   6.0670504e-01   6.2660376e-01   1.1449732e+00   3.2586371e-01   2.1845981e-01   6.0060595e-01   1.1833351e+00   2.0656129e-01   2.5251796e-01   5.1607523e-01   9.6324667e-01   5.9426792e-01   7.1799256e-01   1.0879524e+00   2.4372751e+00   7.0437330e-01   1.2331989e+00   1.7427900e+00   6.0611244e-01   5.1607523e-01   9.3615100e-01   1.6812503e+00   1.9411754e+00   2.5109747e+00   9.3801395e-01   7.7039952e-01   8.6513410e-01   2.2052183e+00   9.6324667e-01   8.9917007e-01   4.2667565e-01   1.3224963e+00   1.1912106e+00   1.3084046e+00   2.5651975e-01   1.3897316e+00   1.2541242e+00   1.1120775e+00   7.1504098e-01   9.1051084e-01   8.1521713e-01   3.6171588e-01   2.0209349e+00   1.2627078e+00   7.9878917e-01   2.1431239e+00   2.1198551e+00   1.5016009e+00   9.6141901e-01   6.2024833e-01   1.0083666e+00   1.5022608e+00   1.8803649e+00   1.7557336e+00   6.2482915e-01   1.6044563e+00   1.5582387e+00   2.2435182e+00   1.3836712e+00   1.3199714e+00   1.6568705e+00   2.1907335e+00   1.0796583e+00   1.2825987e+00   1.6205332e+00   1.9460721e+00   1.6995133e+00   1.7678302e+00   2.1198551e+00   9.1750357e-01   1.2756158e+00   1.4096146e-01   3.2352160e-01   7.1504098e-01   1.1242402e+00   1.4312787e+00   2.0223464e+00   7.3535471e-01   3.2586371e-01   7.3851529e-01   1.6386882e+00   9.4477932e-01   6.4755655e-01   3.7427929e-01   7.3805807e-01   8.6165877e-01   7.2852070e-01   5.0905001e-01   1.0922991e+00   1.0175773e+00   6.0900723e-01   2.1269358e-01   4.0176783e-01   8.2372435e-01   4.5470518e-01   5.5492130e-01   9.8495853e-01   9.0521488e-01   5.2942799e-01   6.3977563e-01   7.9878917e-01   1.2832075e+00   5.3022554e-01   8.3060013e-01   9.4500268e-01   1.0244319e+00   4.4651726e-01   4.0176783e-01   1.0230441e+00   3.4378533e-01   3.2586371e-01   6.1623531e-01   1.0330459e+00   2.5651975e-01   4.0000000e-01   5.3588338e-01   9.5676647e-01   5.3665999e-01   5.3665999e-01   9.0521488e-01   1.3865084e+00   1.3669552e+00   8.6012420e-01   2.8192292e-01   4.1586001e-01   8.4050231e-01   8.7383925e-01   1.1355826e+00   1.1712156e+00   6.2660376e-01   9.8985697e-01   8.5205778e-01   1.4909823e+00   6.3861009e-01   7.2526325e-01   9.4854455e-01   1.5124582e+00   5.6700421e-01   7.7919451e-01   8.9971984e-01   1.2483814e+00   9.4009473e-01   1.0879524e+00   1.4147273e+00   2.1269358e-01   8.1354181e-01   1.2441035e+00   1.5551238e+00   2.1137172e+00   8.2899253e-01   3.7427929e-01   8.2929029e-01   1.7587110e+00   9.6095130e-01   7.1799256e-01   2.4837156e-01   8.3280511e-01   9.3797093e-01   7.9016429e-01   4.4651726e-01   1.1833351e+00   1.0724413e+00   6.6932542e-01   3.2816937e-01   4.6472023e-01   8.0467258e-01   3.8776762e-01   7.3145860e-01   1.2564564e+00   1.5558094e+00   2.0983278e+00   7.5082357e-01   3.6171588e-01   7.6590510e-01   1.7862655e+00   8.4050231e-01   6.2024833e-01   1.2418578e-01   8.6137722e-01   8.9852394e-01   8.5462626e-01   3.8934542e-01   1.1192362e+00   1.0078327e+00   7.0386584e-01   5.0991930e-01   4.5470518e-01   6.6539428e-01   2.4837156e-01   8.5690100e-01   1.0344911e+00   1.6689743e+00   1.0000000e-01   6.8961791e-01   7.1799256e-01   1.3207609e+00   6.2024833e-01   3.7427929e-01   8.3888121e-01   5.3588338e-01   4.2288438e-01   6.4405773e-01   6.9369532e-01   5.3309112e-01   5.8914551e-01   4.6472023e-01   6.2538346e-01   4.1586001e-01   6.1623531e-01   6.4405773e-01   4.0176783e-01   1.0175773e+00   8.9303452e-01   1.0122141e+00   1.1161766e+00   7.7885297e-01   1.0755693e+00   8.1385214e-01   1.3792358e+00   5.8914551e-01   8.5462626e-01   8.7848692e-01   1.4517959e+00   7.3851529e-01   9.4854455e-01   8.6263408e-01   1.0941064e+00   8.3783744e-01   1.1172689e+00   1.3545005e+00   1.0391247e+00   1.0389435e+00   1.3163598e+00   1.3379696e+00   4.5470518e-01   1.1951875e+00   1.0632598e+00   1.6796759e+00   7.8197925e-01   8.3345577e-01   1.0540105e+00   1.7036156e+00   6.9167458e-01   8.8503502e-01   1.0279631e+00   1.3693737e+00   1.1192426e+00   1.3077572e+00   1.6183051e+00   1.6694974e+00   1.9094934e+00   1.9895190e+00   8.2384013e-01   1.6634400e+00   1.6218244e+00   2.2178691e+00   1.3008161e+00   1.3567326e+00   1.4994715e+00   2.3021295e+00   1.1763719e+00   1.3024224e+00   1.5535909e+00   2.0373882e+00   1.6756749e+00   1.7981158e+00   2.1739455e+00   7.6752131e-01   8.1354181e-01   1.3198846e+00   6.0611244e-01   4.4535192e-01   8.5335130e-01   5.3665999e-01   3.8776762e-01   6.3977563e-01   7.0429250e-01   5.2655962e-01   5.5492130e-01   4.5581864e-01   6.3861009e-01   4.2667565e-01   6.1151102e-01   6.6932542e-01   5.1691876e-01   1.5922648e+00   1.0054794e+00   4.8135521e-01   4.3456114e-01   7.6955924e-01   9.6664346e-01   8.9687438e-01   5.6700421e-01   1.0421979e+00   1.1001291e+00   8.2929029e-01   4.4535192e-01   5.1691876e-01   8.9712482e-01   4.5470518e-01   1.6914476e+00   1.1340084e+00   5.8914551e-01   8.5105559e-01   9.7548738e-01   1.0864449e+00   1.1160770e+00   6.3977563e-01   1.0720678e+00   1.2163831e+00   1.0047836e+00   6.9369532e-01   7.2343175e-01   1.0613462e+00   6.2407309e-01   1.4238090e+00   1.3511716e+00   1.9067300e+00   9.3824087e-01   1.0331736e+00   1.1327825e+00   1.9782093e+00   9.0454394e-01   1.0244319e+00   1.1833480e+00   1.5948732e+00   1.3360558e+00   1.5461469e+00   1.8900319e+00   6.2081167e-01   9.1750357e-01   6.4290921e-01   4.4417983e-01   7.0429250e-01   8.7229670e-01   5.3665999e-01   4.0438741e-01   5.6454040e-01   1.0054794e+00   5.7015910e-01   2.1269358e-01   7.5705927e-01   7.3496673e-01   5.2942799e-01   6.2024833e-01   6.6491075e-01   6.8801986e-01   6.1947990e-01   7.2172678e-01   5.5492130e-01   6.9006418e-01   3.2816937e-01   5.3665999e-01   5.6700421e-01   9.7779835e-01   1.0014633e+00   9.4854455e-01   3.8934542e-01   1.2342162e+00   1.1043332e+00   7.9580667e-01   5.3665999e-01   5.7257017e-01   7.2852070e-01   3.0275928e-01   3.4378533e-01   3.2352160e-01   1.1199472e+00   5.0991930e-01   4.6472023e-01   2.8507955e-01   7.7869083e-01   4.1586001e-01   7.1799256e-01   1.0132664e+00   5.0905001e-01   9.9519977e-01   3.0811765e-01   2.1269358e-01   4.0293660e-01   8.3060013e-01   5.0592043e-01   5.3665999e-01   9.3049742e-01   1.1263042e+00   8.0064372e-01   6.1623531e-01   2.1269358e-01   7.7588000e-01   4.4651726e-01   7.2783368e-01   1.0329901e+00   1.1697902e+00   1.0851476e+00   9.2859317e-01   5.0905001e-01   7.1504098e-01   7.7763126e-01   3.0546431e-01   2.5651975e-01   7.0437330e-01   1.0573285e+00   7.3145860e-01   6.9325418e-01   1.0901359e+00   5.3588338e-01   1.0175773e+00   6.4405773e-01   5.3665999e-01   1.0078327e+00   6.2407309e-01   3.2352160e-01   5.7257017e-01   8.5205778e-01   5.1691876e-01   9.4022486e-01   5.6769031e-01   4.8927739e-01   6.0611244e-01   6.0900723e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-3.2-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-3.2-ml.txt
    deleted file mode 100644
    index daa81110a2..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-3.2-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   2.0215050e+00   2.0988154e+00   1.8614681e+00   2.0510161e+00   1.9210911e+00   2.1323516e+00   1.9565454e+00   2.1029889e+00   1.9617871e+00   2.0544792e+00   2.0357408e+00   1.8811414e+00   2.0694693e+00   2.1245977e+00   2.0632165e+00   2.0452823e+00   2.0249330e+00   1.9635489e+00   2.0508580e+00   2.0838578e+00   1.9324052e+00   1.8224609e+00   1.9795343e+00   1.9536534e+00   1.9694910e+00   1.9075569e+00   1.9590397e+00   2.0022087e+00   1.8814000e+00   1.8884208e+00   1.9961121e+00   2.0215351e+00   1.7515769e+00   2.0756437e+00   2.0109476e+00   1.9234849e+00   1.9160076e+00   1.8550862e+00   1.7733640e+00   2.0071906e+00   2.0209542e+00   2.0616569e+00   2.0565503e+00   1.9083573e+00   2.2732431e+00   1.9975503e+00   1.9080072e+00   2.1437809e+00   2.1296295e+00   1.9739085e+00   1.9834166e+00   2.1078664e+00   2.2016840e+00   2.2080962e+00   1.7340579e+00   2.0549287e+00   1.7331748e+00   1.9559688e+00   2.0343364e+00   1.8736929e+00   1.9730416e+00   1.5308944e+00   1.8421831e+00   2.0174240e+00   2.0137378e+00   1.7956151e+00   1.9606596e+00   1.9074857e+00   2.0413879e+00   2.0070305e+00   1.9584677e+00   1.8977851e+00   1.9176239e+00   1.7067419e+00   1.9461927e+00   1.8431700e+00   1.8284576e+00   1.7778704e+00   1.8350329e+00   2.0175415e+00   1.7459063e+00   1.9242505e+00   1.8757370e+00   1.9312506e+00   2.0574808e+00   2.0894636e+00   1.9780203e+00   2.1374036e+00   1.8900436e+00   2.0273032e+00   2.0681953e+00   2.0234699e+00   2.0666449e+00   2.0663485e+00   1.9281402e+00   1.7846314e+00   2.0372479e+00   1.8831230e+00   2.0186015e+00   2.0193231e+00   2.2022665e+00   1.8145737e+00   2.0466545e+00   1.8092421e+00   1.9600687e+00   2.0322961e+00   1.9556364e+00   1.8266422e+00   1.9950345e+00   2.1038429e+00   2.1164145e+00   2.0188062e+00   1.8863331e+00   2.0006971e+00   1.9971068e+00   1.8771862e+00   2.1148855e+00   1.9570638e+00   1.9859615e+00   2.0030854e+00   2.0737344e+00   1.9739259e+00   1.9266524e+00   1.9200535e+00   2.1376689e+00   1.8944425e+00   1.9330553e+00   1.8561590e+00   1.9422954e+00   1.8874178e+00   1.8624808e+00   1.8265563e+00   1.8840519e+00   2.0515092e+00   2.0174226e+00   1.9771196e+00   2.0635988e+00   1.7334466e+00   1.9912604e+00   1.8915711e+00   1.8262636e+00   1.9369173e+00   1.9560446e+00   1.9549934e+00   1.9279230e+00   1.9021073e+00   2.0113391e+00   2.0305786e+00   1.8066806e+00   1.9656739e+00   2.1219217e+00   1.8820250e+00   1.8936826e+00   2.0565131e+00   1.9839441e+00   1.8553479e+00   1.9923760e+00   1.6393276e+00   1.9786440e+00   1.8274394e+00   1.9322611e+00   2.0404318e+00   1.9216532e+00   1.9361171e+00   1.8401373e+00   1.9908059e+00   1.9495117e+00   2.1975655e+00   1.8413913e+00   2.1528773e+00   1.8434374e+00   2.1668863e+00   2.0429273e+00   1.9980016e+00   1.9790129e+00   2.0264829e+00   2.1478843e+00   2.0899600e+00   2.0280670e+00   2.1210881e+00   1.9993891e+00   1.8646871e+00   1.9099983e+00   1.9263353e+00   2.0042495e+00   2.1365919e+00   2.1830279e+00   1.9631961e+00   2.0880004e+00   1.8348369e+00
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-5.8-ml-iris.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-5.8-ml-iris.txt
    deleted file mode 100644
    index aa26b0439f..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-minkowski-5.8-ml-iris.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   5.0042326e-01   4.1210927e-01   5.2133179e-01   1.1269424e-01   4.2362917e-01   5.0001522e-01   1.2085435e-01   7.4262850e-01   4.0127250e-01   3.0482299e-01   3.0482299e-01   5.0436965e-01   8.0923926e-01   7.1629168e-01   9.1424701e-01   4.1317535e-01   1.0000000e-01   6.0366256e-01   3.0017653e-01   3.3813251e-01   2.2573593e-01   5.2133179e-01   3.4080442e-01   5.0436965e-01   5.0043084e-01   2.2608083e-01   1.1269424e-01   1.1269424e-01   4.1315633e-01   4.1315633e-01   3.0490481e-01   6.0000952e-01   7.0462550e-01   4.0127250e-01   3.0482299e-01   4.0002221e-01   4.0127250e-01   7.1621748e-01   1.1269424e-01   1.2085435e-01   1.2036864e+00   7.0088477e-01   4.0125062e-01   5.0476836e-01   5.0436965e-01   3.0474106e-01   5.0436235e-01   2.2573593e-01   2.0061436e-01   3.3243227e+00   3.1068812e+00   3.5145413e+00   2.6080595e+00   3.2075731e+00   3.1014454e+00   3.3055260e+00   1.9156198e+00   3.2079238e+00   2.5066441e+00   2.1498493e+00   2.8059664e+00   2.6093989e+00   3.3021953e+00   2.2070266e+00   3.0158454e+00   3.1034764e+00   2.7009878e+00   3.1081779e+00   2.5032992e+00   3.4074959e+00   2.6050088e+00   3.5035589e+00   3.3011884e+00   2.9065890e+00   3.0117336e+00   3.4118782e+00   3.6094426e+00   3.1038958e+00   2.1042326e+00   2.4058620e+00   2.3063407e+00   2.5029614e+00   3.7025335e+00   3.1034636e+00   3.1057006e+00   3.3110189e+00   3.0065909e+00   2.7025941e+00   2.6047974e+00   3.0013665e+00   3.2025221e+00   2.6029242e+00   1.9242109e+00   2.8024935e+00   2.8013151e+00   2.8022622e+00   2.9036582e+00   1.6267693e+00   2.7028014e+00   4.6144526e+00   3.7071079e+00   4.5121787e+00   4.2031939e+00   4.4087839e+00   5.2153194e+00   3.1086291e+00   4.9093646e+00   4.4044245e+00   4.7202040e+00   3.7119486e+00   3.9066365e+00   4.1123628e+00   3.6114402e+00   3.7307413e+00   3.9194642e+00   4.1043951e+00   5.3177489e+00   5.5157728e+00   3.6035661e+00   4.3162097e+00   3.5127031e+00   5.3163123e+00   3.5077296e+00   4.3088507e+00   4.6100803e+00   3.4082578e+00   3.5068380e+00   4.2080636e+00   4.4113183e+00   4.7149608e+00   5.0316727e+00   4.2105572e+00   3.7024462e+00   4.2007769e+00   4.7331529e+00   4.2173557e+00   4.1039096e+00   3.4076329e+00   4.0157626e+00   4.2194897e+00   3.7329396e+00   3.7071079e+00   4.5119962e+00   4.3218071e+00   3.8249612e+00   3.6093673e+00   3.8105293e+00   4.0166459e+00   3.7050109e+00   2.2573593e-01   3.0017653e-01   6.0000317e-01   9.0534502e-01   4.1210927e-01   4.0004442e-01   5.0000761e-01   1.2085435e-01   7.1621748e-01   4.0125062e-01   1.1269424e-01   6.0184622e-01   1.0776294e+00   1.4092540e+00   9.0508756e-01   5.0043084e-01   9.0181717e-01   8.0004602e-01   5.2491131e-01   7.0017011e-01   6.1119267e-01   3.6452132e-01   5.2133179e-01   2.0061436e-01   4.0246123e-01   5.0436965e-01   4.1209001e-01   2.4170870e-01   2.0121983e-01   5.2167829e-01   1.1001015e+00   1.2036862e+00   1.2085435e-01   2.2573593e-01   6.3164977e-01   1.2085435e-01   5.0000761e-01   4.0125062e-01   5.0002283e-01   7.0462844e-01   5.0043084e-01   5.2167829e-01   8.0888055e-01   1.1269424e-01   8.0008884e-01   3.0474106e-01   7.0462697e-01   3.0008832e-01   3.3416860e+00   3.1112912e+00   3.5249966e+00   2.6033557e+00   3.2127499e+00   3.1015178e+00   3.3078313e+00   1.9025708e+00   3.2150318e+00   2.5060738e+00   2.1061951e+00   2.8068283e+00   2.6040016e+00   3.3032134e+00   2.2072454e+00   3.0286102e+00   3.1035443e+00   2.7011973e+00   3.1070853e+00   2.5014549e+00   3.4078435e+00   2.6080511e+00   3.5048916e+00   3.3021665e+00   2.9125999e+00   3.0213627e+00   3.4211337e+00   3.6148618e+00   3.1047537e+00   2.1027003e+00   2.4016639e+00   2.3011929e+00   2.5032633e+00   3.7028303e+00   3.1034629e+00   3.1065984e+00   3.3192072e+00   3.0078209e+00   2.7027260e+00   2.6031664e+00   3.0009332e+00   3.2037232e+00   2.6027120e+00   1.9031578e+00   2.8022915e+00   2.8015662e+00   2.8024715e+00   2.9065359e+00   1.6099792e+00   2.7029416e+00   4.6149181e+00   3.7071538e+00   4.5172866e+00   4.2039132e+00   4.4099272e+00   5.2224057e+00   3.1078968e+00   4.9146298e+00   4.4063795e+00   4.7253524e+00   3.7145622e+00   3.9080413e+00   4.1161770e+00   3.6111646e+00   3.7308314e+00   3.9209137e+00   4.1060063e+00   5.3254977e+00   5.5222404e+00   3.6024247e+00   4.3201293e+00   3.5126957e+00   5.3240486e+00   3.5093499e+00   4.3111749e+00   4.6158382e+00   3.4095576e+00   3.5076152e+00   4.2090727e+00   4.4184242e+00   4.7227808e+00   5.0458491e+00   4.2115634e+00   3.7037441e+00   4.2010125e+00   4.7466313e+00   4.2180733e+00   4.1050714e+00   3.4081972e+00   4.0212972e+00   4.2220584e+00   3.7407842e+00   3.7071538e+00   4.5144444e+00   4.3240980e+00   3.8290678e+00   3.6105228e+00   3.8128297e+00   4.0172657e+00   3.7052380e+00   2.0121983e-01   4.1210927e-01   7.9153339e-01   2.0181667e-01   3.0915245e-01   3.3813251e-01   2.2608083e-01   7.1629168e-01   3.0482299e-01   2.0181667e-01   4.0246123e-01   1.1281267e+00   1.2633045e+00   7.8890721e-01   4.1212852e-01   1.0095370e+00   6.0964891e-01   7.0470720e-01   5.2201750e-01   4.1210927e-01   4.5784410e-01   6.0017982e-01   3.4080442e-01   3.4342562e-01   5.0476836e-01   5.0043084e-01   3.0000000e-01   3.0017653e-01   7.0025283e-01   9.0508756e-01   1.0426513e+00   2.2608083e-01   3.0008832e-01   8.0046605e-01   2.2608083e-01   3.0474106e-01   4.0243965e-01   3.3813251e-01   9.0002570e-01   3.0000000e-01   4.3213914e-01   6.8170466e-01   2.0181667e-01   6.1119267e-01   1.1269424e-01   6.3178534e-01   3.0017653e-01   3.4595765e+00   3.2168311e+00   3.6364650e+00   2.7037323e+00   3.3192099e+00   3.2017763e+00   3.4107328e+00   2.0033798e+00   3.3237063e+00   2.6050967e+00   2.2121910e+00   2.9077087e+00   2.7085154e+00   3.4047917e+00   2.3071665e+00   3.1428042e+00   3.2033135e+00   2.8024935e+00   3.2103481e+00   2.6021247e+00   3.5076152e+00   2.7127272e+00   3.6073242e+00   3.4038884e+00   3.0203881e+00   3.1325879e+00   3.5317021e+00   3.7210979e+00   3.2059139e+00   2.2051638e+00   2.5023084e+00   2.4021168e+00   2.6048201e+00   3.8033004e+00   3.2030448e+00   3.2074921e+00   3.4286399e+00   3.1131211e+00   2.8028008e+00   2.7031257e+00   3.1010004e+00   3.3055260e+00   2.7040740e+00   2.0050309e+00   2.9023862e+00   2.9020767e+00   2.9028421e+00   3.0107283e+00   1.7089863e+00   2.8033666e+00   4.7142986e+00   3.8066401e+00   4.6226512e+00   4.3047830e+00   4.5107876e+00   5.3296471e+00   3.2068572e+00   5.0203871e+00   4.5089338e+00   4.8299744e+00   3.8170042e+00   4.0095939e+00   4.2200398e+00   3.7100654e+00   3.8275330e+00   4.0209836e+00   4.2079639e+00   5.4332277e+00   5.6287689e+00   3.7032748e+00   4.4237036e+00   3.6112573e+00   5.4319232e+00   3.6111754e+00   4.4135512e+00   4.7221364e+00   3.5107924e+00   3.6081749e+00   4.3098514e+00   4.5261773e+00   4.8309399e+00   5.1593152e+00   4.3120751e+00   3.8056232e+00   4.3015640e+00   4.8592534e+00   4.3174320e+00   4.2064763e+00   3.5083248e+00   4.1268500e+00   4.3236383e+00   3.8471097e+00   3.8066401e+00   4.6166518e+00   4.4251081e+00   3.9318948e+00   3.7118930e+00   3.9150333e+00   4.1165034e+00   3.8051417e+00   5.2133179e-01   9.0160400e-01   3.0017653e-01   4.1209001e-01   2.2573593e-01   3.0008832e-01   8.2418002e-01   3.0482299e-01   2.0181667e-01   4.1212852e-01   1.2363278e+00   1.3741498e+00   9.0160400e-01   5.2133802e-01   1.1133986e+00   7.1621748e-01   8.0051036e-01   6.3178534e-01   5.6347121e-01   5.0517282e-01   4.1315633e-01   4.0004442e-01   4.1317535e-01   6.0948212e-01   6.0184622e-01   1.2085435e-01   2.0061436e-01   8.0051036e-01   1.0087250e+00   1.1527669e+00   3.0008832e-01   4.1210927e-01   9.0142636e-01   3.0008832e-01   2.2573593e-01   5.0436235e-01   4.5148429e-01   8.0004602e-01   2.2573593e-01   4.8342635e-01   7.2044167e-01   2.0181667e-01   7.1621748e-01   1.1269424e-01   7.4262850e-01   4.0125062e-01   3.2983364e+00   3.0300451e+00   3.4603347e+00   2.5053901e+00   3.1338090e+00   3.0030658e+00   3.2183845e+00   1.8040969e+00   3.1419971e+00   2.4075162e+00   2.0123013e+00   2.7132680e+00   2.5163999e+00   3.2086215e+00   2.1132077e+00   2.9750754e+00   3.0049127e+00   2.6055197e+00   3.0177719e+00   2.4040962e+00   3.3110162e+00   2.5253371e+00   3.4126529e+00   3.2074182e+00   2.8380954e+00   2.9580787e+00   3.3536443e+00   3.5347730e+00   3.0101869e+00   2.0123796e+00   2.3038195e+00   2.2036797e+00   2.4099203e+00   3.6051707e+00   3.0042758e+00   3.0123228e+00   3.2490712e+00   2.9241808e+00   2.6047889e+00   2.5049231e+00   2.9016211e+00   3.1100277e+00   2.5081992e+00   1.8056342e+00   2.7040060e+00   2.7039988e+00   2.7050721e+00   2.8205713e+00   1.5147271e+00   2.6060742e+00   4.5183778e+00   3.6090052e+00   4.4337691e+00   4.1072664e+00   4.3151164e+00   5.1425125e+00   3.0092613e+00   4.8303615e+00   4.3139066e+00   4.6422789e+00   3.6259317e+00   3.8146285e+00   4.0301568e+00   3.5133848e+00   3.6358680e+00   3.8290678e+00   4.0124919e+00   5.2471177e+00   5.4403962e+00   3.5051114e+00   4.2343452e+00   3.4149831e+00   5.2455706e+00   3.4177035e+00   4.2200398e+00   4.5335328e+00   3.3168776e+00   3.4123846e+00   4.1140176e+00   4.3402553e+00   4.6459028e+00   4.9843016e+00   4.1167964e+00   3.6096226e+00   4.1026403e+00   4.6849407e+00   4.1230798e+00   4.0100505e+00   3.3123688e+00   3.9407837e+00   4.1330547e+00   3.6700537e+00   3.6090052e+00   4.4237036e+00   4.2343452e+00   3.7463488e+00   3.5181052e+00   3.7227931e+00   3.9220791e+00   3.6072781e+00   4.2362917e-01   4.0125062e-01   2.0061436e-01   7.4262850e-01   5.0002283e-01   4.0004442e-01   2.4170870e-01   6.0017982e-01   7.4329527e-01   8.0250123e-01   8.5406674e-01   4.1317535e-01   1.2085435e-01   7.0096858e-01   2.0181667e-01   4.1315633e-01   2.0181667e-01   4.5077696e-01   3.6259865e-01   5.0084481e-01   6.0017665e-01   2.4170870e-01   2.0121983e-01   2.2538848e-01   4.1315633e-01   5.0084481e-01   4.0246123e-01   5.0043842e-01   6.3164729e-01   5.0002283e-01   4.0122873e-01   5.0001522e-01   5.0002283e-01   6.7616723e-01   2.0121983e-01   1.2085435e-01   1.3008771e+00   6.0948506e-01   4.0125062e-01   5.0085236e-01   6.0017982e-01   2.2573593e-01   4.5077696e-01   3.0017653e-01   3.0000000e-01   3.3320240e+00   3.1087192e+00   3.5191371e+00   2.6110181e+00   3.2098845e+00   3.1016129e+00   3.3064697e+00   1.9242109e+00   3.2110200e+00   2.5072065e+00   2.1702438e+00   2.8063347e+00   2.6144115e+00   3.3026483e+00   2.2074446e+00   3.0213781e+00   3.1035271e+00   2.7015967e+00   3.1108570e+00   2.5049231e+00   3.4076266e+00   2.6065485e+00   3.5045818e+00   3.3016829e+00   2.9091905e+00   3.0158857e+00   3.4160038e+00   3.6117923e+00   3.1042949e+00   2.1068047e+00   2.4087956e+00   2.3099309e+00   2.5038387e+00   3.7027671e+00   3.1034919e+00   3.1060428e+00   3.3145595e+00   3.0095593e+00   2.7026925e+00   2.6061038e+00   3.0017811e+00   3.2030205e+00   2.6039803e+00   1.9366876e+00   2.8028640e+00   2.8014482e+00   2.8024453e+00   2.9049136e+00   1.6388635e+00   2.7031257e+00   4.6146430e+00   3.7072412e+00   4.5144508e+00   4.2035048e+00   4.4092709e+00   5.2185448e+00   3.1091788e+00   4.9117351e+00   4.4054277e+00   4.7224997e+00   3.7130507e+00   3.9073151e+00   4.1140274e+00   3.6117351e+00   3.7308330e+00   3.9200674e+00   4.1050815e+00   5.3212796e+00   5.5187578e+00   3.6046347e+00   4.3179262e+00   3.5127783e+00   5.3198559e+00   3.5085510e+00   4.3098508e+00   4.6126513e+00   3.4088749e+00   3.5071604e+00   4.2085176e+00   4.4144980e+00   4.7185095e+00   5.0381903e+00   4.2110099e+00   3.7030413e+00   4.2009868e+00   4.7393218e+00   4.2176488e+00   4.1043951e+00   3.4078683e+00   4.0181902e+00   4.2205976e+00   3.7363838e+00   3.7072412e+00   4.5130595e+00   4.3227928e+00   3.8267408e+00   3.6102542e+00   3.8115096e+00   4.0168944e+00   3.7051079e+00   8.0923926e-01   5.2201750e-01   1.1270411e+00   8.0928056e-01   2.4170870e-01   6.3178782e-01   9.1471442e-01   1.1573074e+00   5.2167829e-01   5.0476836e-01   4.0000000e-01   4.2270142e-01   3.0017653e-01   3.0490481e-01   5.0042326e-01   3.0915245e-01   8.5440680e-01   6.0184622e-01   6.3192325e-01   9.0142681e-01   5.2133179e-01   4.0363334e-01   5.0517282e-01   7.8890806e-01   8.2421923e-01   5.0042326e-01   3.1328089e-01   3.4085233e-01   8.0928056e-01   7.2044167e-01   4.5148429e-01   8.0928056e-01   1.0782211e+00   5.0517282e-01   4.8342635e-01   1.6097492e+00   1.0215068e+00   4.5148429e-01   3.0482299e-01   9.1446938e-01   3.0490481e-01   8.5440680e-01   2.4195741e-01   6.1135434e-01   3.0143288e+00   2.8035152e+00   3.2080663e+00   2.3476141e+00   2.9053991e+00   2.8028019e+00   3.0030626e+00   1.7519158e+00   2.9045816e+00   2.2149484e+00   2.0887699e+00   2.5048522e+00   2.3645147e+00   3.0018766e+00   1.9120303e+00   2.7085154e+00   2.8028008e+00   2.4075162e+00   2.8284908e+00   2.2272457e+00   3.1054022e+00   2.3075573e+00   3.2060163e+00   3.0018874e+00   2.6044486e+00   2.7064438e+00   3.1073418e+00   3.3054063e+00   2.8034238e+00   1.8447840e+00   2.1492024e+00   2.0607272e+00   2.2122063e+00   3.4028104e+00   2.8028007e+00   2.8036182e+00   3.0057998e+00   2.7234787e+00   2.4027927e+00   2.3234132e+00   2.7070699e+00   2.9017335e+00   2.3151346e+00   1.8036834e+00   2.5072065e+00   2.5017313e+00   2.5032633e+00   2.6031823e+00   1.5292174e+00   2.4058519e+00   4.3116266e+00   3.4064593e+00   4.2076930e+00   3.9021503e+00   4.1063936e+00   4.9099401e+00   2.8141516e+00   4.6055969e+00   4.1036742e+00   4.4145324e+00   3.4082578e+00   3.6052799e+00   3.8082804e+00   3.3123693e+00   3.4273179e+00   3.6154977e+00   3.8026444e+00   5.0117750e+00   5.2107474e+00   3.3130198e+00   4.0114753e+00   3.2109395e+00   5.0107787e+00   3.2067490e+00   4.0058313e+00   4.3058539e+00   3.1067996e+00   3.2049797e+00   3.9061098e+00   4.1066170e+00   4.4095056e+00   4.7221364e+00   3.9082316e+00   3.4019453e+00   3.9014304e+00   4.4232188e+00   3.9139973e+00   3.8023591e+00   3.1057392e+00   3.7104219e+00   3.9150553e+00   3.4248402e+00   3.4064593e+00   4.2084919e+00   4.0172759e+00   3.5193527e+00   3.3100431e+00   3.5073655e+00   3.7133435e+00   3.4036743e+00   4.0004442e-01   5.0043084e-01   3.4085233e-01   8.0046764e-01   2.2573593e-01   4.0243965e-01   4.2362917e-01   1.2036925e+00   1.1896595e+00   8.0879776e-01   5.0000761e-01   1.1006371e+00   5.2133179e-01   8.0046685e-01   5.0437695e-01   4.0125062e-01   5.0477564e-01   5.0043084e-01   4.5148429e-01   4.0125062e-01   6.0000952e-01   6.0000317e-01   2.2608083e-01   3.0922892e-01   8.0000160e-01   7.4269314e-01   9.6572569e-01   3.4085233e-01   4.0246123e-01   9.0000136e-01   3.4085233e-01   4.0127250e-01   5.0001522e-01   4.0004442e-01   1.1000003e+00   2.2608083e-01   4.1317535e-01   5.7609230e-01   4.0122873e-01   5.2167829e-01   2.0061436e-01   7.0088627e-01   4.0004442e-01   3.3852404e+00   3.1245391e+00   3.5521657e+00   2.6057331e+00   3.2281303e+00   3.1021033e+00   3.3145497e+00   1.9088256e+00   3.2358110e+00   2.5040476e+00   2.1337832e+00   2.8091158e+00   2.6173653e+00   3.3068237e+00   2.2078368e+00   3.0635687e+00   3.1029264e+00   2.7045714e+00   3.1156892e+00   2.5038387e+00   3.4072735e+00   2.6199287e+00   3.5105217e+00   3.3061800e+00   2.9316687e+00   3.0488379e+00   3.4462681e+00   3.6292576e+00   3.1074604e+00   2.1103491e+00   2.4046650e+00   2.3052527e+00   2.5074705e+00   3.7037846e+00   3.1023805e+00   3.1087156e+00   3.3416864e+00   3.0212423e+00   2.7029308e+00   2.6036513e+00   3.0012006e+00   3.2078939e+00   2.6064541e+00   1.9145304e+00   2.8026114e+00   2.8028068e+00   2.8033825e+00   2.9167099e+00   1.6147493e+00   2.7040740e+00   4.6133719e+00   3.7058811e+00   4.5290217e+00   4.2056470e+00   4.4115634e+00   5.2381327e+00   3.1057013e+00   4.9271590e+00   4.4118721e+00   4.7354168e+00   3.7201124e+00   3.9113698e+00   4.1247181e+00   3.6087856e+00   3.7244383e+00   3.9212835e+00   4.1101783e+00   5.3422962e+00   5.5362181e+00   3.6046999e+00   4.3279835e+00   3.5095358e+00   5.3412086e+00   3.5135120e+00   4.3162096e+00   4.6297141e+00   3.4124092e+00   3.5088081e+00   4.2105763e+00   4.4358170e+00   4.7408876e+00   5.0762364e+00   4.2125085e+00   3.7079173e+00   4.2021973e+00   4.7752666e+00   4.2166536e+00   4.1080028e+00   3.4084548e+00   4.0338654e+00   4.2256165e+00   3.7563734e+00   3.7058811e+00   4.5190617e+00   4.3264209e+00   3.8360186e+00   3.6136974e+00   3.8177300e+00   4.0156240e+00   3.7048582e+00   6.3164977e-01   3.0017653e-01   4.1209001e-01   2.0061436e-01   4.0127250e-01   7.0911112e-01   8.2458409e-01   1.0207396e+00   5.2201750e-01   1.2699992e-01   7.0470867e-01   4.0004442e-01   4.0122873e-01   3.0482299e-01   5.2167208e-01   3.0490481e-01   4.0122873e-01   4.0002221e-01   2.0061436e-01   2.0061436e-01   2.0061436e-01   3.0482299e-01   3.0482299e-01   4.0122873e-01   7.0008584e-01   8.0879701e-01   3.0017653e-01   3.0474106e-01   5.0043084e-01   3.0017653e-01   6.0964597e-01   1.0000000e-01   2.0121983e-01   1.1019599e+00   6.0035305e-01   4.0004442e-01   4.5148429e-01   4.0127250e-01   4.0004442e-01   4.0125062e-01   3.3808272e-01   1.1269424e-01   3.2369541e+00   3.0101869e+00   3.4219340e+00   2.5073576e+00   3.1113295e+00   3.0016913e+00   3.2074921e+00   1.8128536e+00   3.1127326e+00   2.4076937e+00   2.0429861e+00   2.7074657e+00   2.5087337e+00   3.2029987e+00   2.1087640e+00   2.9250474e+00   3.0040848e+00   2.6011837e+00   3.0090716e+00   2.4029250e+00   3.3087901e+00   2.5074281e+00   3.4046875e+00   3.2018065e+00   2.8107271e+00   2.9185950e+00   3.3183094e+00   3.5134617e+00   3.0049285e+00   2.0041542e+00   2.3049133e+00   2.2050331e+00   2.4035997e+00   3.6030023e+00   3.0040438e+00   3.0070658e+00   3.2168317e+00   2.9083216e+00   2.6031436e+00   2.5048522e+00   2.9013423e+00   3.1034810e+00   2.5032729e+00   1.8201043e+00   2.7028014e+00   2.7016556e+00   2.7027522e+00   2.8056775e+00   1.5256523e+00   2.6033557e+00   4.5162553e+00   3.6081006e+00   4.4160732e+00   4.1039121e+00   4.3103378e+00   5.1203327e+00   3.0096880e+00   4.8129366e+00   4.3058720e+00   4.6249088e+00   3.6148619e+00   3.8081633e+00   4.0157626e+00   3.5129206e+00   3.6349703e+00   3.8226858e+00   4.0057080e+00   5.2232912e+00   5.4204287e+00   3.5035589e+00   4.2200398e+00   3.4145570e+00   5.2217206e+00   3.4096180e+00   4.2110197e+00   4.5140458e+00   3.3101076e+00   3.4081996e+00   4.1095117e+00   4.3161641e+00   4.6204721e+00   4.9419857e+00   4.1123051e+00   3.6033860e+00   4.1009647e+00   4.6434791e+00   4.1197833e+00   4.0049425e+00   3.3090452e+00   3.9205015e+00   4.1230798e+00   3.6413278e+00   3.6081006e+00   4.4145323e+00   4.2254713e+00   3.7302938e+00   3.5112285e+00   3.7130507e+00   3.9190472e+00   3.6058055e+00   5.0043842e-01   1.0426513e+00   5.2167208e-01   4.0004442e-01   3.0026460e-01   1.4542931e+00   1.5965783e+00   1.1269511e+00   7.4262964e-01   1.3253871e+00   9.3306807e-01   1.0032293e+00   8.5406674e-01   7.0470720e-01   7.0633229e-01   5.7608844e-01   6.0017982e-01   6.3192325e-01   8.2418071e-01   8.0879625e-01   3.4080442e-01   4.0243965e-01   1.0030871e+00   1.2189645e+00   1.3741465e+00   5.0043842e-01   6.0201716e-01   1.1055705e+00   5.0043842e-01   1.1269424e-01   7.1621748e-01   6.7616902e-01   6.0000952e-01   3.0008832e-01   6.8170466e-01   9.3735629e-01   4.0004442e-01   9.3308853e-01   3.0474106e-01   9.6572569e-01   6.0948212e-01   3.4311880e+00   3.1440065e+00   3.5828092e+00   2.6061623e+00   3.2490712e+00   3.1047537e+00   3.3265679e+00   1.9024467e+00   3.2610547e+00   2.5066443e+00   2.1042326e+00   2.8182771e+00   2.6268573e+00   3.3136174e+00   2.2177383e+00   3.1042292e+00   3.1056084e+00   2.7106185e+00   3.1258664e+00   2.5072166e+00   3.4123850e+00   2.6397031e+00   3.5191318e+00   3.3125861e+00   2.9569988e+00   3.0825000e+00   3.4750557e+00   3.6484459e+00   3.1148203e+00   2.1231535e+00   2.4058952e+00   2.3063814e+00   2.5167763e+00   3.7071732e+00   3.1042001e+00   3.1166462e+00   3.3690976e+00   3.0370401e+00   2.7067267e+00   2.6060811e+00   3.0024163e+00   3.2157547e+00   2.6139440e+00   1.9029771e+00   2.8056558e+00   2.8068283e+00   2.8077255e+00   2.9323793e+00   1.6119586e+00   2.7091848e+00   4.6187512e+00   3.7092298e+00   4.5442576e+00   4.2099019e+00   4.4180513e+00   5.2548586e+00   3.1079005e+00   4.9407795e+00   4.4195588e+00   4.7516511e+00   3.7329384e+00   3.9191954e+00   4.1389224e+00   3.6127211e+00   3.7328453e+00   3.9318950e+00   4.1174259e+00   5.3601143e+00   5.5513974e+00   3.6073242e+00   4.3425018e+00   3.5138357e+00   5.3586950e+00   3.5235095e+00   4.3257995e+00   4.6452056e+00   3.4217238e+00   3.5154314e+00   4.2169032e+00   4.4544908e+00   4.7602896e+00   5.1057502e+00   4.2193718e+00   3.7147036e+00   4.2043114e+00   4.8058872e+00   4.2239687e+00   4.1138950e+00   3.4146523e+00   4.0526593e+00   4.2382079e+00   3.7847403e+00   3.7092298e+00   4.5291248e+00   4.3385161e+00   3.8547029e+00   3.6228903e+00   3.8290678e+00   4.0228342e+00   3.7082809e+00   6.3164977e-01   3.0026460e-01   1.2085435e-01   6.0948506e-01   1.0143978e+00   1.3131369e+00   8.0928056e-01   4.0246123e-01   8.5409862e-01   7.0016860e-01   5.0477564e-01   6.0201716e-01   5.6595908e-01   4.0363334e-01   4.1212852e-01   1.2699992e-01   3.3818226e-01   4.1210927e-01   3.3818226e-01   2.0181667e-01   1.2085435e-01   5.0855077e-01   1.0001598e+00   1.1055707e+00   0.0000000e+00   3.0026460e-01   6.0964891e-01   0.0000000e+00   5.0043842e-01   3.0482299e-01   4.0246123e-01   8.0254500e-01   5.0043842e-01   5.2133802e-01   7.0556260e-01   2.0181667e-01   7.0008735e-01   3.0026460e-01   6.0948506e-01   2.0181667e-01   3.2490712e+00   3.0153168e+00   3.4297841e+00   2.5067523e+00   3.1166337e+00   3.0027816e+00   3.2112793e+00   1.8068048e+00   3.1183051e+00   2.4116924e+00   2.0138832e+00   2.7116615e+00   2.5059537e+00   3.2048192e+00   2.1144760e+00   2.9351753e+00   3.0063019e+00   2.6019122e+00   3.0106587e+00   2.4030297e+00   3.3125861e+00   2.5120719e+00   3.4068163e+00   3.2029877e+00   2.8162444e+00   2.9267417e+00   3.3252407e+00   3.5189464e+00   3.0077107e+00   2.0051350e+00   2.3037132e+00   2.2028146e+00   2.4058620e+00   3.6044981e+00   3.0062070e+00   3.0107283e+00   3.2237456e+00   2.9105093e+00   2.6052541e+00   2.5062865e+00   2.9018772e+00   3.1056084e+00   2.5048522e+00   1.8082911e+00   2.7043948e+00   2.7029415e+00   2.7046027e+00   2.8091099e+00   1.5248852e+00   2.6055127e+00   4.5209020e+00   3.6112573e+00   4.4212031e+00   4.1056541e+00   4.3138986e+00   5.1255338e+00   3.0133997e+00   4.8167235e+00   4.3081273e+00   4.6319211e+00   3.6205854e+00   3.8114965e+00   4.0212972e+00   3.5173798e+00   3.6449970e+00   3.8299342e+00   4.0081754e+00   5.2290121e+00   5.4254411e+00   3.5039202e+00   4.2264145e+00   3.4198378e+00   5.2270034e+00   3.4138008e+00   4.2149806e+00   4.5183778e+00   3.3145502e+00   3.4118179e+00   4.1129687e+00   4.3210760e+00   4.6261633e+00   4.9512603e+00   4.1165035e+00   3.6051692e+00   4.1014742e+00   4.6540056e+00   4.1257291e+00   4.0071257e+00   3.3129914e+00   3.9274863e+00   4.1301604e+00   3.6542046e+00   3.6112573e+00   4.4192311e+00   4.2328883e+00   3.7399948e+00   3.5155767e+00   3.7180846e+00   3.9250546e+00   3.6083191e+00   6.0184622e-01   7.4263078e-01   1.1138955e+00   4.2268438e-01   7.0096708e-01   2.4170870e-01   3.0490481e-01   3.0490481e-01   3.0017653e-01   3.0474106e-01   3.0474106e-01   8.0879701e-01   4.2362917e-01   6.1119267e-01   7.0462697e-01   4.1317535e-01   2.2538848e-01   3.0482299e-01   7.1621748e-01   6.7616723e-01   3.0474106e-01   4.0125062e-01   5.0001522e-01   6.3164977e-01   5.2491131e-01   2.2573593e-01   6.3164977e-01   1.0207396e+00   3.3808272e-01   4.0246123e-01   1.4180463e+00   1.0030868e+00   4.5148429e-01   4.1317535e-01   7.4263078e-01   3.0017653e-01   8.0879701e-01   1.0000000e-01   4.5078948e-01   3.2116783e+00   3.0049285e+00   3.4072983e+00   2.5182898e+00   3.1051604e+00   3.0020136e+00   3.2049016e+00   1.8469618e+00   3.1036832e+00   2.4099081e+00   2.1180493e+00   2.7068820e+00   2.5224740e+00   3.2021231e+00   2.1097449e+00   2.9077617e+00   3.0041462e+00   2.6022422e+00   3.0134290e+00   2.4087504e+00   3.3085101e+00   2.5050799e+00   3.4038679e+00   3.2010814e+00   2.8036959e+00   2.9060895e+00   3.3058271e+00   3.5063866e+00   3.0043212e+00   2.0122773e+00   2.3159426e+00   2.2186306e+00   2.4051454e+00   3.6029749e+00   3.0041461e+00   3.0062373e+00   3.2059465e+00   2.9096170e+00   2.6032656e+00   2.5097004e+00   2.9028411e+00   3.1023606e+00   2.5057847e+00   1.8685354e+00   2.7039990e+00   2.7016498e+00   2.7029428e+00   2.8028074e+00   1.5747520e+00   2.6039937e+00   4.5157550e+00   3.6083209e+00   4.4088451e+00   4.1031691e+00   4.3089952e+00   5.1095334e+00   3.0117336e+00   4.8052574e+00   4.3035619e+00   4.6175091e+00   3.6116958e+00   3.8067089e+00   4.0107159e+00   3.5138361e+00   3.6350483e+00   3.8210210e+00   4.0037985e+00   5.2113565e+00   5.4105254e+00   3.5063553e+00   4.2147222e+00   3.4147657e+00   5.2097995e+00   3.4081036e+00   4.2080425e+00   4.5057296e+00   3.3089414e+00   3.4074852e+00   4.1084282e+00   4.3058539e+00   4.6088153e+00   4.9193995e+00   4.1112251e+00   3.6020843e+00   4.1009356e+00   4.6223848e+00   4.1190046e+00   4.0036188e+00   3.3085886e+00   3.9129256e+00   4.1197933e+00   3.6305006e+00   3.6083209e+00   4.4113183e+00   4.2225427e+00   3.7249938e+00   3.5105217e+00   3.7103007e+00   3.9184088e+00   3.6056580e+00   4.0125062e-01   5.7609230e-01   1.0095367e+00   1.0776296e+00   6.3322667e-01   3.0490481e-01   9.0140221e-01   4.1212852e-01   6.0000317e-01   3.4085233e-01   6.0035305e-01   3.3818226e-01   3.0000000e-01   4.0122873e-01   2.2538848e-01   4.0004442e-01   4.0122873e-01   2.0061436e-01   3.0000000e-01   6.0017982e-01   7.0462844e-01   8.5406616e-01   3.0026460e-01   4.0243965e-01   7.0088477e-01   3.0026460e-01   4.5783248e-01   3.0008832e-01   3.0490481e-01   1.1002025e+00   4.1315633e-01   4.0125062e-01   4.2362917e-01   4.0125062e-01   4.1209001e-01   2.4170870e-01   5.0436965e-01   2.2573593e-01   3.1712557e+00   2.9203034e+00   3.3425817e+00   2.4092081e+00   3.0228582e+00   2.9024211e+00   3.1131137e+00   1.7168003e+00   3.0276611e+00   2.3094323e+00   1.9540727e+00   2.6109956e+00   2.4153242e+00   3.1056218e+00   2.0123796e+00   2.8520945e+00   2.9050328e+00   2.5029614e+00   2.9148948e+00   2.3042831e+00   3.2109395e+00   2.4161682e+00   3.3086859e+00   3.1042389e+00   2.7244207e+00   2.8394157e+00   3.2369857e+00   3.4247142e+00   2.9077271e+00   1.9085444e+00   2.2064916e+00   2.1068047e+00   2.3066817e+00   3.5042241e+00   2.9048033e+00   2.9102290e+00   3.1338090e+00   2.8169587e+00   2.5042601e+00   2.4061715e+00   2.8017212e+00   3.0065627e+00   2.4058322e+00   1.7261843e+00   2.6037439e+00   2.6027120e+00   2.6040234e+00   2.7127458e+00   1.4350761e+00   2.5049231e+00   4.4189015e+00   3.5095669e+00   4.3257995e+00   4.0057109e+00   4.2135057e+00   5.0324952e+00   2.9113810e+00   4.7221382e+00   4.2099962e+00   4.5353918e+00   3.5215862e+00   3.7118930e+00   3.9240025e+00   3.4150232e+00   3.5401623e+00   3.7282910e+00   3.9092259e+00   5.1365012e+00   5.3314853e+00   3.4049933e+00   4.1286955e+00   3.3168890e+00   5.1347989e+00   3.3143385e+00   4.1161770e+00   4.4243750e+00   3.2143454e+00   3.3110189e+00   4.0125032e+00   4.2289520e+00   4.5343227e+00   4.8661173e+00   4.0156353e+00   3.5063553e+00   4.0017163e+00   4.5675364e+00   4.0235140e+00   3.9076272e+00   3.2116700e+00   3.8321139e+00   4.0301570e+00   3.5598557e+00   3.5095669e+00   4.3201293e+00   4.1322798e+00   3.6413292e+00   3.4157005e+00   3.6188994e+00   3.8226858e+00   3.5071409e+00   5.0436235e-01   1.1269511e+00   1.4180734e+00   9.1446938e-01   5.0476836e-01   9.6593231e-01   8.0051115e-01   6.1119558e-01   7.0176271e-01   6.0964891e-01   4.3213914e-01   5.2133179e-01   2.2573593e-01   4.1420960e-01   5.2133802e-01   4.5078948e-01   2.2608083e-01   2.0121983e-01   6.1119558e-01   1.1005364e+00   1.2089192e+00   1.2085435e-01   2.4195741e-01   7.1621884e-01   1.2085435e-01   4.0004442e-01   4.1212852e-01   5.0085236e-01   7.0096858e-01   4.0127250e-01   5.6394820e-01   8.0967961e-01   2.0000000e-01   8.0051115e-01   2.2573593e-01   7.1621884e-01   3.0482299e-01   3.3545239e+00   3.1166331e+00   3.5333785e+00   2.6054739e+00   3.2183845e+00   3.1025789e+00   3.3116521e+00   1.9046783e+00   3.2211369e+00   2.5096353e+00   2.1074907e+00   2.8107054e+00   2.6064541e+00   3.3051050e+00   2.2121875e+00   3.0393610e+00   3.1054994e+00   2.7022579e+00   3.1107490e+00   2.5027328e+00   3.4112739e+00   2.6129479e+00   3.5073688e+00   3.3035252e+00   2.9185900e+00   3.0300451e+00   3.4286400e+00   3.6205854e+00   3.1074470e+00   2.1053074e+00   2.4030297e+00   2.3022754e+00   2.5057763e+00   3.7043108e+00   3.1053329e+00   3.1100313e+00   3.3265652e+00   3.0118276e+00   2.7046025e+00   2.6052853e+00   3.0016501e+00   3.2059133e+00   2.6047974e+00   1.9052628e+00   2.8038694e+00   2.8028007e+00   2.8041967e+00   2.9102290e+00   1.6179159e+00   2.7049931e+00   4.6192199e+00   3.7100254e+00   4.5225779e+00   4.2056438e+00   4.4133506e+00   5.2278849e+00   3.1114444e+00   4.9186970e+00   4.4088300e+00   4.7323336e+00   3.7201124e+00   3.9113387e+00   4.1217116e+00   3.6152935e+00   3.7397620e+00   3.9276515e+00   4.1085246e+00   5.3314853e+00   5.5274937e+00   3.6037456e+00   4.3264210e+00   3.5173586e+00   5.3296471e+00   3.5134601e+00   4.3151165e+00   4.6204664e+00   3.4137985e+00   3.5110031e+00   4.2123903e+00   4.4237218e+00   4.7288133e+00   5.0555470e+00   4.2155430e+00   3.7056457e+00   4.2016096e+00   4.7574592e+00   4.2235569e+00   4.1072664e+00   3.4118179e+00   4.0283196e+00   4.2288238e+00   3.7532858e+00   3.7100254e+00   4.5190617e+00   4.3311362e+00   3.8383398e+00   3.6148683e+00   3.8177286e+00   4.0227665e+00   3.7075359e+00   1.5237054e+00   1.5778323e+00   1.1528553e+00   8.0928056e-01   1.4109657e+00   9.0296858e-01   1.1060939e+00   8.5617086e-01   6.0184934e-01   8.2671175e-01   8.1112984e-01   7.1621748e-01   7.2113820e-01   9.0642722e-01   9.0166476e-01   5.2167829e-01   5.6347978e-01   1.1011719e+00   1.1531951e+00   1.3523685e+00   6.0948506e-01   7.0008735e-01   1.2012929e+00   6.0948506e-01   2.0121983e-01   8.0488008e-01   7.1636719e-01   7.0025283e-01   2.2608083e-01   7.4418186e-01   9.6702272e-01   5.0476836e-01   9.0657583e-01   3.4085233e-01   1.0214933e+00   7.0176271e-01   3.7102713e+00   3.4382051e+00   3.8712380e+00   2.9060895e+00   3.5425492e+00   3.4047913e+00   3.6240078e+00   2.2025238e+00   3.5521653e+00   2.8062729e+00   2.4042873e+00   3.1166330e+00   2.9229849e+00   3.6127194e+00   2.5155829e+00   3.3866455e+00   3.4056098e+00   3.0096888e+00   3.4232171e+00   2.8068501e+00   3.7118528e+00   2.9335034e+00   3.8176417e+00   3.6116893e+00   3.2480452e+00   3.3690976e+00   3.7643838e+00   3.9429243e+00   3.4137984e+00   2.4191998e+00   2.7057317e+00   2.6060678e+00   2.8148779e+00   4.0071259e+00   3.4042418e+00   3.4154455e+00   3.6592943e+00   3.3320889e+00   3.0065584e+00   2.9059779e+00   3.3025806e+00   3.5145393e+00   2.9126049e+00   2.2031052e+00   3.1056091e+00   3.1065947e+00   3.1074470e+00   3.2280982e+00   1.9100994e+00   3.0087060e+00   4.9179684e+00   4.0090033e+00   4.8406432e+00   4.5097222e+00   4.7173415e+00   5.5505575e+00   3.4073974e+00   5.2376127e+00   4.7184838e+00   5.0477042e+00   4.0301568e+00   4.2181242e+00   4.4357103e+00   3.9120615e+00   4.0296437e+00   4.2295175e+00   4.4165186e+00   5.6553854e+00   5.8478137e+00   3.9072483e+00   4.6391758e+00   3.8129545e+00   5.6540008e+00   3.8217034e+00   4.6242406e+00   4.9412981e+00   3.7201124e+00   3.8146271e+00   4.5162248e+00   4.7490801e+00   5.0547228e+00   5.3951639e+00   4.5184830e+00   4.0138270e+00   4.5043856e+00   5.0949990e+00   4.5225786e+00   4.4133506e+00   3.7138970e+00   4.3475342e+00   4.5353918e+00   4.0747727e+00   4.0090033e+00   4.8274110e+00   4.6357670e+00   4.1493188e+00   3.9212879e+00   4.1268500e+00   4.3214438e+00   4.0081754e+00   4.1317535e-01   4.0127250e-01   7.1629303e-01   5.0043842e-01   7.0096858e-01   6.3912709e-01   7.0184453e-01   1.2003596e+00   7.9871893e-01   1.0286506e+00   1.0433442e+00   8.2635069e-01   6.3309012e-01   6.7626502e-01   1.1286016e+00   1.0782105e+00   6.1135434e-01   6.0184934e-01   3.0915245e-01   1.0143978e+00   9.0155393e-01   5.0436965e-01   1.0143978e+00   1.4324323e+00   7.4329414e-01   8.0879776e-01   1.7570482e+00   1.4092511e+00   8.1343016e-01   7.8895472e-01   1.1269511e+00   7.0470720e-01   1.2189701e+00   5.0855077e-01   8.5406616e-01   3.5025396e+00   3.3027388e+00   3.7022129e+00   2.8281704e+00   3.4036672e+00   3.3025779e+00   3.5030234e+00   2.1725076e+00   3.4018155e+00   2.7109019e+00   2.4518621e+00   3.0049127e+00   2.8364042e+00   3.5019450e+00   2.4088882e+00   3.2025657e+00   3.3031143e+00   2.9050277e+00   3.3192099e+00   2.7159616e+00   3.6057054e+00   2.8056568e+00   3.7048624e+00   3.5016345e+00   3.1026586e+00   3.2026875e+00   3.6024856e+00   3.8034160e+00   3.3035252e+00   2.3226028e+00   2.6270968e+00   2.5319718e+00   2.7081195e+00   3.9029099e+00   3.3031170e+00   3.3039553e+00   3.5023855e+00   3.2150432e+00   2.9028412e+00   2.8148779e+00   3.2051933e+00   3.4018781e+00   2.8098127e+00   2.1974660e+00   3.0055598e+00   3.0017653e+00   3.0030650e+00   3.1026235e+00   1.9002712e+00   2.9047832e+00   4.8115512e+00   3.9065683e+00   4.7047990e+00   4.4023911e+00   4.6064349e+00   5.4038141e+00   3.3119500e+00   5.1019028e+00   4.6029848e+00   4.9110422e+00   3.9076509e+00   4.1051819e+00   4.3067850e+00   3.8114951e+00   3.9246405e+00   4.1145310e+00   4.3025710e+00   5.5046681e+00   5.7049556e+00   3.8098343e+00   4.5095384e+00   3.7106240e+00   5.5035834e+00   3.7063915e+00   4.5052925e+00   4.8020893e+00   3.6066590e+00   3.7052383e+00   4.4062090e+00   4.6017113e+00   4.9033376e+00   5.2065497e+00   4.4082090e+00   3.9018737e+00   4.4013937e+00   4.9097097e+00   4.4135256e+00   4.3024877e+00   3.6059708e+00   4.2076410e+00   4.4136664e+00   3.9189006e+00   3.9065683e+00   4.7076752e+00   4.5157700e+00   4.0166034e+00   3.8091065e+00   4.0069397e+00   4.2129114e+00   3.9040721e+00   5.0476836e-01   9.1422402e-01   6.0017982e-01   6.7616723e-01   1.0001903e+00   7.4262850e-01   1.1298636e+00   1.1055799e+00   1.0782211e+00   1.4043036e+00   1.0207260e+00   9.0508712e-01   1.0030871e+00   1.2632996e+00   1.3253497e+00   1.0001598e+00   5.0855077e-01   2.4195741e-01   1.3131369e+00   1.2089895e+00   9.0007572e-01   1.3131369e+00   1.5263518e+00   1.0087393e+00   9.3308891e-01   2.1138769e+00   1.4140515e+00   9.3308891e-01   6.8160885e-01   1.4180436e+00   6.7626681e-01   1.3018145e+00   7.0470720e-01   1.1133986e+00   3.2054626e+00   3.0041787e+00   3.4044439e+00   2.6382589e+00   3.1129223e+00   3.0138245e+00   3.2030205e+00   2.1566438e+00   3.1086927e+00   2.4554557e+00   2.5269479e+00   2.7127202e+00   2.6737930e+00   3.2074207e+00   2.1510232e+00   2.9068053e+00   3.0077106e+00   2.6369414e+00   3.0816280e+00   2.4971338e+00   3.3055260e+00   2.5325155e+00   3.4206210e+00   3.2100081e+00   2.8135926e+00   2.9088609e+00   3.3100014e+00   3.5053029e+00   3.0107283e+00   2.1554609e+00   2.4508792e+00   2.3794578e+00   2.4537333e+00   3.6090037e+00   3.0077114e+00   3.0034200e+00   3.2047284e+00   2.9729700e+00   2.6131590e+00   2.5821442e+00   2.9309340e+00   3.1060464e+00   2.5610212e+00   2.2285414e+00   2.7317035e+00   2.7106184e+00   2.7159615e+00   2.8134622e+00   1.9767345e+00   2.6270952e+00   4.5095106e+00   3.6117736e+00   4.4050179e+00   4.1034650e+00   4.3058769e+00   5.1048430e+00   3.0395885e+00   4.8030341e+00   4.3077256e+00   4.6095748e+00   3.6067573e+00   3.8091370e+00   4.0067454e+00   3.5235094e+00   3.6257071e+00   3.8125141e+00   4.0031827e+00   5.2054101e+00   5.4066794e+00   3.5404367e+00   4.2082468e+00   3.4146523e+00   5.2054259e+00   3.4138230e+00   4.2042865e+00   4.5025743e+00   3.3123786e+00   3.4067938e+00   4.1072911e+00   4.3032007e+00   4.6053784e+00   4.9093646e+00   4.1089590e+00   3.6062594e+00   4.1061436e+00   4.6117560e+00   4.1111295e+00   4.0026055e+00   3.3078313e+00   3.9072844e+00   4.1120111e+00   3.6177796e+00   3.6117736e+00   4.4064445e+00   4.2133758e+00   3.7157992e+00   3.5215805e+00   3.7072609e+00   3.9105673e+00   3.6051689e+00   4.1212852e-01   4.1212852e-01   3.0490481e-01   5.2167208e-01   3.0915245e-01   8.0097499e-01   6.1119558e-01   6.9518117e-01   9.0168933e-01   5.2491131e-01   4.0363334e-01   5.0085236e-01   7.8940551e-01   8.2462252e-01   5.0042326e-01   3.1328089e-01   3.0490481e-01   8.0928056e-01   7.0470867e-01   4.0125062e-01   8.0928056e-01   1.0776294e+00   5.0517282e-01   4.5078948e-01   1.6096629e+00   1.0207396e+00   4.5847767e-01   6.0184622e-01   9.1422402e-01   3.4085233e-01   8.5406674e-01   2.4195741e-01   6.0964891e-01   3.4079041e+00   3.2018548e+00   3.6045966e+00   2.7227151e+00   3.3029106e+00   3.2014779e+00   3.4016816e+00   2.0608234e+00   3.3024690e+00   2.6067721e+00   2.3393392e+00   2.9023862e+00   2.7310942e+00   3.4010299e+00   2.3048574e+00   3.1044057e+00   3.2014774e+00   2.8036023e+00   3.2152055e+00   2.6124447e+00   3.5030234e+00   2.7035171e+00   3.6034258e+00   3.4010358e+00   3.0022434e+00   3.1033306e+00   3.5041120e+00   3.7031277e+00   3.2018065e+00   2.2177966e+00   2.5220687e+00   2.4265152e+00   2.6055197e+00   3.8016494e+00   3.2014773e+00   3.2019092e+00   3.4031881e+00   3.1122360e+00   2.8013347e+00   2.7110046e+00   3.1036553e+00   3.3009330e+00   2.7070770e+00   2.0855888e+00   2.9035486e+00   2.9008500e+00   2.9016034e+00   3.0016038e+00   1.7857124e+00   2.8028019e+00   4.7076061e+00   3.8037955e+00   4.6049801e+00   4.3013465e+00   4.5040960e+00   5.3068325e+00   3.2075036e+00   5.0037550e+00   4.5023523e+00   4.8096009e+00   3.8048551e+00   4.0031892e+00   4.2051335e+00   3.7071735e+00   3.8161632e+00   4.0093901e+00   4.2016368e+00   5.4081547e+00   5.6075434e+00   3.7075525e+00   4.4072835e+00   3.6062405e+00   5.4074634e+00   3.6038441e+00   4.4036959e+00   4.7038247e+00   3.5038075e+00   3.6028345e+00   4.3038299e+00   4.5042394e+00   4.8062731e+00   5.1150203e+00   4.3051629e+00   3.8011413e+00   4.3008955e+00   4.8153681e+00   4.3087925e+00   4.2014602e+00   3.5032125e+00   4.1063865e+00   4.3094598e+00   3.8146853e+00   3.8037955e+00   4.6054982e+00   4.4109814e+00   3.9115832e+00   3.7058197e+00   3.9043917e+00   4.1081838e+00   3.8021570e+00   6.0365948e-01   3.0008832e-01   3.3818226e-01   2.0121983e-01   5.2133802e-01   3.0915245e-01   5.0437695e-01   5.0043842e-01   2.0181667e-01   1.2085435e-01   1.2085435e-01   4.1317535e-01   4.1317535e-01   3.0026460e-01   6.0018299e-01   7.0462697e-01   4.0246123e-01   3.0490481e-01   4.0004442e-01   4.0246123e-01   7.1621884e-01   1.2085435e-01   1.1269424e-01   1.2036863e+00   7.0088627e-01   3.0482299e-01   5.0436965e-01   5.0436235e-01   3.0482299e-01   5.0436965e-01   2.2608083e-01   2.0121983e-01   3.3237063e+00   3.1056091e+00   3.5138377e+00   2.6067805e+00   3.2064817e+00   3.1008890e+00   3.3041599e+00   1.9144935e+00   3.2074507e+00   2.5042498e+00   2.1492024e+00   2.8038903e+00   2.6091437e+00   3.3015588e+00   2.2041706e+00   3.0148613e+00   3.1021975e+00   2.7007716e+00   3.1069083e+00   2.5027328e+00   3.4052051e+00   2.6037226e+00   3.5028446e+00   3.3009330e+00   2.9058292e+00   3.0107430e+00   3.4113341e+00   3.6081814e+00   3.1026177e+00   2.1035154e+00   2.4051766e+00   2.3058791e+00   2.5019964e+00   3.7017412e+00   3.1021847e+00   3.1038570e+00   3.3100818e+00   3.0059450e+00   2.7015162e+00   2.6035107e+00   3.0009631e+00   3.2017847e+00   2.6021247e+00   1.9231085e+00   2.8015882e+00   2.8007533e+00   2.8013565e+00   2.9028946e+00   1.6222582e+00   2.7017239e+00   4.6112605e+00   3.7050457e+00   4.5107898e+00   4.2023583e+00   4.4067851e+00   5.2146266e+00   3.1060428e+00   4.9089682e+00   4.4037570e+00   4.7173415e+00   3.7092301e+00   3.9050336e+00   4.1101936e+00   3.6083379e+00   3.7235997e+00   3.9149881e+00   4.1034584e+00   5.3169366e+00   5.5149065e+00   3.6029421e+00   4.3133953e+00   3.5091580e+00   5.3158283e+00   3.5057369e+00   4.3071174e+00   4.6095441e+00   3.4059695e+00   3.5048429e+00   4.2061215e+00   4.4109767e+00   4.7143113e+00   5.0310424e+00   4.2080636e+00   3.7018984e+00   4.2005765e+00   4.7313463e+00   4.2134002e+00   4.1029723e+00   3.4053426e+00   4.0133308e+00   4.2155438e+00   3.7272780e+00   3.7050457e+00   4.5097224e+00   4.3174320e+00   3.8199266e+00   3.6070223e+00   3.8081328e+00   4.0126677e+00   3.7034791e+00   6.0017665e-01   4.1210927e-01   6.0018299e-01   1.1133986e+00   6.3178534e-01   9.0142681e-01   8.5403486e-01   7.0462844e-01   5.0477564e-01   5.2491734e-01   1.0087252e+00   9.3306807e-01   4.1317535e-01   5.0517282e-01   4.1317535e-01   8.5409862e-01   7.5503094e-01   4.1317535e-01   8.5409862e-01   1.3133231e+00   6.0964891e-01   7.0548138e-01   1.5640758e+00   1.3027556e+00   7.0176271e-01   6.0017982e-01   9.6591433e-01   6.0000635e-01   1.1056693e+00   4.0127250e-01   7.1700909e-01   3.0056049e+00   2.8037508e+00   3.2038047e+00   2.3350905e+00   2.9043042e+00   2.8024566e+00   3.0040963e+00   1.7133143e+00   2.9021652e+00   2.2134864e+00   2.0299551e+00   2.5066443e+00   2.3464211e+00   3.0020171e+00   1.9120296e+00   2.7041827e+00   2.8038683e+00   2.4047867e+00   2.8219468e+00   2.2186306e+00   3.1079220e+00   2.3063026e+00   3.2048524e+00   3.0013665e+00   2.6029242e+00   2.7037323e+00   3.1033714e+00   3.3046348e+00   2.8041978e+00   1.8296471e+00   2.1344310e+00   2.0421601e+00   2.2088481e+00   3.4030563e+00   2.8038694e+00   2.8056176e+00   3.0035303e+00   2.7166861e+00   2.4032760e+00   2.3173405e+00   2.7049931e+00   2.9020943e+00   2.3107074e+00   1.7540491e+00   2.5057744e+00   2.5017294e+00   2.5032614e+00   2.6027348e+00   1.4738792e+00   2.4051328e+00   4.3150879e+00   3.4081972e+00   4.2065765e+00   3.9027794e+00   4.1082339e+00   4.9060122e+00   2.8144592e+00   4.6029848e+00   4.1031680e+00   4.4149701e+00   3.4105998e+00   3.6062867e+00   3.8091132e+00   3.3145497e+00   3.4354252e+00   3.6203140e+00   3.8031346e+00   5.0073650e+00   5.2071845e+00   3.3100796e+00   4.0129286e+00   3.2145637e+00   5.0059527e+00   3.2079238e+00   4.0069158e+00   4.3033003e+00   3.1086424e+00   3.2069551e+00   3.9078313e+00   4.1030253e+00   4.4053246e+00   4.7120702e+00   3.9105941e+00   3.4019027e+00   3.9011588e+00   4.4155733e+00   3.9183552e+00   3.8030509e+00   3.1080886e+00   3.7106642e+00   3.9186175e+00   3.4279064e+00   3.4081972e+00   4.2100505e+00   4.0214626e+00   3.5236480e+00   3.3110446e+00   3.5093262e+00   3.7177968e+00   3.4052047e+00   4.1317535e-01   1.1269424e-01   5.6371422e-01   5.0084481e-01   4.5784410e-01   8.0000239e-01   4.0006662e-01   3.0017653e-01   4.0006662e-01   6.0948800e-01   7.0088627e-01   4.1210927e-01   3.0482299e-01   4.5080200e-01   7.0016860e-01   6.0184934e-01   4.1317535e-01   7.0016860e-01   8.5406674e-01   4.0002221e-01   3.0482299e-01   1.5012719e+00   7.4269314e-01   3.3818226e-01   4.0002221e-01   8.0046685e-01   1.1269424e-01   6.3165225e-01   2.0121983e-01   5.0002283e-01   3.2274206e+00   3.0066036e+00   3.4159337e+00   2.5238518e+00   3.1081931e+00   3.0018108e+00   3.2048307e+00   1.8672243e+00   3.1090333e+00   2.4088880e+00   2.1557835e+00   2.7050009e+00   2.5327574e+00   3.2021240e+00   2.1075767e+00   2.9175658e+00   3.0027966e+00   2.6034860e+00   3.0173373e+00   2.4124132e+00   3.3060311e+00   2.5063233e+00   3.4049933e+00   3.2016467e+00   2.8074882e+00   2.9128790e+00   3.3135401e+00   3.5094636e+00   3.0034944e+00   2.0185049e+00   2.3226176e+00   2.2272636e+00   2.4061715e+00   3.6025233e+00   3.0027816e+00   3.0045159e+00   3.2117473e+00   2.9147768e+00   2.6022650e+00   2.5117111e+00   2.9035536e+00   3.1022707e+00   2.5074705e+00   1.8959538e+00   2.7040250e+00   2.7012715e+00   2.7023320e+00   2.8040244e+00   1.6015419e+00   2.6035923e+00   4.5125057e+00   3.6062867e+00   4.4120448e+00   4.1027436e+00   4.3076126e+00   5.1160601e+00   3.0101869e+00   4.8099407e+00   4.3047534e+00   4.6192050e+00   3.6105351e+00   3.8061121e+00   4.0115220e+00   3.5110245e+00   3.6271576e+00   3.8169672e+00   4.0039490e+00   5.2185416e+00   5.4163883e+00   3.5078417e+00   4.2149895e+00   3.4109302e+00   5.2173835e+00   3.4072913e+00   4.2079670e+00   4.5106052e+00   3.3073674e+00   3.4056857e+00   4.1070396e+00   4.3122866e+00   4.6159499e+00   4.9341410e+00   4.1092167e+00   3.6024856e+00   4.1011078e+00   4.6347105e+00   4.1150272e+00   4.0033723e+00   3.3063033e+00   3.9150644e+00   4.1174503e+00   3.6310621e+00   3.6062867e+00   4.4108290e+00   4.2194920e+00   3.7226812e+00   3.5095242e+00   3.7093173e+00   3.9142888e+00   3.6040604e+00   3.4342562e-01   8.5406616e-01   3.3813251e-01   6.0017665e-01   4.5078948e-01   4.0125062e-01   2.2573593e-01   3.0474106e-01   7.0008584e-01   6.0184622e-01   2.2538848e-01   7.0017011e-01   8.0046685e-01   5.0477564e-01   5.2167208e-01   4.0004442e-01   5.0477564e-01   1.0016896e+00   3.0474106e-01   4.5080200e-01   1.1531953e+00   1.0008617e+00   4.5080200e-01   4.1420960e-01   6.1119558e-01   4.1210927e-01   8.0051036e-01   3.0482299e-01   4.1210927e-01   3.0158412e+00   2.8068284e+00   3.2097099e+00   2.3108757e+00   2.9065890e+00   2.8022000e+00   3.0066661e+00   1.6225614e+00   2.9048017e+00   2.2116239e+00   1.8685354e+00   2.5096708e+00   2.3100354e+00   3.0026666e+00   1.9136652e+00   2.7108290e+00   2.8056165e+00   2.4010446e+00   2.8094419e+00   2.2042326e+00   3.1114445e+00   2.3060252e+00   3.2036636e+00   3.0010404e+00   2.6048201e+00   2.7083838e+00   3.1074856e+00   3.3083888e+00   2.8056953e+00   1.8055938e+00   2.1074903e+00   2.0078120e+00   2.2044098e+00   3.4034897e+00   2.8056164e+00   2.8086638e+00   3.0080453e+00   2.7058598e+00   2.4044765e+00   2.3071636e+00   2.7018643e+00   2.9031229e+00   2.3040302e+00   1.6345914e+00   2.5039390e+00   2.5021287e+00   2.5037126e+00   2.6035547e+00   1.3485963e+00   2.4045982e+00   4.3195471e+00   3.4105069e+00   4.2110205e+00   3.9039624e+00   4.1112641e+00   4.9115281e+00   2.8134622e+00   4.6064153e+00   4.1040152e+00   4.4216160e+00   3.4153329e+00   3.6083651e+00   3.8136432e+00   3.3170079e+00   3.4454804e+00   3.6271127e+00   3.8048208e+00   5.0136911e+00   5.2125102e+00   3.3041886e+00   4.0185527e+00   3.2193583e+00   5.0117789e+00   3.2102572e+00   4.0101485e+00   4.3071174e+00   3.1116727e+00   3.2099137e+00   3.9105756e+00   4.1073261e+00   4.4108290e+00   4.7236350e+00   3.9141183e+00   3.4025037e+00   3.9008223e+00   4.4275986e+00   3.9240716e+00   3.8046112e+00   3.1114731e+00   3.7165788e+00   3.9250546e+00   3.4397950e+00   3.4105069e+00   4.2141201e+00   4.0283725e+00   3.5323888e+00   3.3126331e+00   3.5133654e+00   3.7236064e+00   3.4073749e+00   5.6371422e-01   4.0125062e-01   4.2362917e-01   7.0008735e-01   3.0017653e-01   2.2573593e-01   3.0490481e-01   5.2167829e-01   6.0202028e-01   3.3808272e-01   4.1210927e-01   5.2167829e-01   6.0201716e-01   5.0477564e-01   4.0363334e-01   6.0201716e-01   7.8895472e-01   3.0474106e-01   2.2608083e-01   1.4017696e+00   7.1636719e-01   2.2608083e-01   4.0002221e-01   7.0088627e-01   2.0121983e-01   5.6371422e-01   2.2538848e-01   4.0127250e-01   3.2269400e+00   3.0055754e+00   3.4153574e+00   2.5158464e+00   3.1070003e+00   3.0010043e+00   3.2037264e+00   1.8447764e+00   3.1084925e+00   2.4051329e+00   2.1169795e+00   2.7031257e+00   2.5230194e+00   3.2014729e+00   2.1040690e+00   2.9167440e+00   3.0016616e+00   2.6020655e+00   3.0122358e+00   2.4077390e+00   3.3040889e+00   2.5044039e+00   3.4036241e+00   3.2011770e+00   2.8066054e+00   2.9119764e+00   3.3128842e+00   3.5083774e+00   3.0022542e+00   2.0112442e+00   2.3146813e+00   2.2178117e+00   2.4035997e+00   3.6016272e+00   3.0016466e+00   3.0030180e+00   3.2109724e+00   2.9107838e+00   2.6012056e+00   2.5072166e+00   2.9020943e+00   3.1016037e+00   2.5045154e+00   1.8665804e+00   2.7022828e+00   2.7006623e+00   2.7012715e+00   2.8031364e+00   1.5665127e+00   2.6019940e+00   4.5096473e+00   3.6042722e+00   4.4108394e+00   4.1020090e+00   4.3058539e+00   5.1154681e+00   3.0065585e+00   4.8095984e+00   4.3039464e+00   4.6166518e+00   3.6081814e+00   3.8045576e+00   4.0096177e+00   3.5076367e+00   3.6204901e+00   3.8129595e+00   4.0031500e+00   5.2178509e+00   5.4155855e+00   3.5053718e+00   4.2125027e+00   3.4076329e+00   5.2169557e+00   3.4052726e+00   4.2064771e+00   4.5101686e+00   3.3051940e+00   3.4039470e+00   4.1052782e+00   4.3120002e+00   4.6153660e+00   4.9336188e+00   4.1069501e+00   3.6018985e+00   4.1007374e+00   4.6331222e+00   4.1114855e+00   4.0025890e+00   3.3042986e+00   3.9129418e+00   4.1139050e+00   3.6259501e+00   3.6042722e+00   4.4088300e+00   4.2155438e+00   3.7181244e+00   3.5068261e+00   3.7072136e+00   3.9107458e+00   3.6027359e+00   7.1779518e-01   9.0005048e-01   6.8160885e-01   6.0980961e-01   6.3164977e-01   6.0964597e-01   6.0948506e-01   6.3178534e-01   8.0888055e-01   6.5712813e-01   9.1552331e-01   5.6595908e-01   4.5147187e-01   9.0026543e-01   5.6595908e-01   6.0201716e-01   5.6370994e-01   4.1212852e-01   1.3000455e+00   4.1315633e-01   6.1830764e-01   9.0511169e-01   6.0964891e-01   6.3178534e-01   4.5077696e-01   7.1621748e-01   4.5783248e-01   3.7510248e+00   3.5145413e+00   3.9319585e+00   3.0060350e+00   3.6168418e+00   3.5015800e+00   3.7092301e+00   2.3098782e+00   3.6209185e+00   2.9036019e+00   2.5319798e+00   3.2059465e+00   3.0125595e+00   3.7043511e+00   2.6050088e+00   3.4362995e+00   3.5023713e+00   3.1027850e+00   3.5112541e+00   2.9034027e+00   3.8056232e+00   3.0109656e+00   3.9070007e+00   3.7037948e+00   3.3177283e+00   3.4278716e+00   3.8279200e+00   4.0185639e+00   3.5049370e+00   2.5063489e+00   2.8048596e+00   2.7053910e+00   2.9045816e+00   4.1028806e+00   3.5020661e+00   3.5059137e+00   3.7249599e+00   3.4134555e+00   3.1021033e+00   3.0035422e+00   3.4012314e+00   3.6049329e+00   3.0042977e+00   2.3151346e+00   3.2021240e+00   3.2018065e+00   3.2023319e+00   3.3095196e+00   2.0139907e+00   3.1028259e+00   5.0111326e+00   4.1049446e+00   4.9203200e+00   4.6042055e+00   4.8089555e+00   5.6273643e+00   3.5051449e+00   5.3190012e+00   4.8083867e+00   5.1260178e+00   4.1140184e+00   4.3082037e+00   4.5172930e+00   4.0074572e+00   4.1195081e+00   4.3162103e+00   4.5071301e+00   5.7305902e+00   5.9266306e+00   4.0041352e+00   4.7202045e+00   3.9078680e+00   5.7295930e+00   3.9093593e+00   4.7117434e+00   5.0203874e+00   3.8087102e+00   3.9064537e+00   4.6081315e+00   4.8239967e+00   5.1282911e+00   5.4538016e+00   4.6097450e+00   4.1052255e+00   4.6016323e+00   5.1527860e+00   4.6133719e+00   4.5057296e+00   3.8063301e+00   4.4231469e+00   4.6192070e+00   4.1384491e+00   4.1049446e+00   4.9142249e+00   4.7202041e+00   4.2256252e+00   4.0099717e+00   4.2125085e+00   4.4124585e+00   4.1039188e+00   3.4085233e-01   3.3818226e-01   1.2699992e-01   3.0922892e-01   3.3818226e-01   4.1212852e-01   3.4085233e-01   3.0490481e-01   8.0250202e-01   9.0192695e-01   4.0363334e-01   5.0437695e-01   4.5847767e-01   4.0363334e-01   7.0633229e-01   3.0482299e-01   4.0246123e-01   1.0095513e+00   7.0548283e-01   2.0181667e-01   5.0043084e-01   3.6452132e-01   5.0436965e-01   5.0855778e-01   4.1420960e-01   3.3813251e-01   3.0360001e+00   2.8068283e+00   3.2199554e+00   2.3040302e+00   2.9083228e+00   2.8004229e+00   3.0040677e+00   1.6099792e+00   2.9111118e+00   2.2023225e+00   1.8444678e+00   2.5026969e+00   2.3072196e+00   3.0013665e+00   1.9023442e+00   2.7227152e+00   2.8012528e+00   2.4005053e+00   2.8054838e+00   2.2013444e+00   3.1036553e+00   2.3040711e+00   3.2026875e+00   3.0010106e+00   2.6084695e+00   2.7159628e+00   3.1166009e+00   3.3100796e+00   2.8019019e+00   1.8020058e+00   2.1029252e+00   2.0034861e+00   2.2011906e+00   3.4011297e+00   2.8012319e+00   2.8028007e+00   3.0142197e+00   2.7060544e+00   2.4007552e+00   2.3017471e+00   2.7003774e+00   2.9016034e+00   2.3011979e+00   1.6179000e+00   2.5007284e+00   2.5003793e+00   2.5007008e+00   2.6035320e+00   1.3154932e+00   2.4008859e+00   4.3091529e+00   3.4034897e+00   4.2123904e+00   3.9018704e+00   4.1056542e+00   4.9181670e+00   2.8038684e+00   4.6114520e+00   4.1039624e+00   4.4180514e+00   3.4084525e+00   3.6042874e+00   3.8104403e+00   3.3060311e+00   3.4198458e+00   3.6127194e+00   3.8032956e+00   5.0208687e+00   5.2178587e+00   3.3018328e+00   4.0133297e+00   3.2067991e+00   5.0200323e+00   3.2048524e+00   4.0067585e+00   4.3122441e+00   3.1047671e+00   3.2036090e+00   3.9049697e+00   4.1148052e+00   4.4184267e+00   4.7404155e+00   3.9065727e+00   3.4018864e+00   3.9004186e+00   4.4392802e+00   3.9110280e+00   3.8025990e+00   3.1038577e+00   3.7145622e+00   3.9140895e+00   3.4287259e+00   3.4034897e+00   4.2090841e+00   4.0156240e+00   3.5189468e+00   3.3056781e+00   3.5073617e+00   3.7102604e+00   3.4023494e+00   4.1315633e-01   3.0915245e-01   4.5078948e-01   5.2132556e-01   3.0482299e-01   3.3808272e-01   6.0964597e-01   7.0911112e-01   8.6051414e-01   4.1212852e-01   7.0016860e-01   7.4262964e-01   4.1212852e-01   6.1830489e-01   4.1209001e-01   6.0018299e-01   1.1056693e+00   6.0964597e-01   4.1317535e-01   4.1315633e-01   5.2133179e-01   4.2268438e-01   5.0084481e-01   5.2491131e-01   5.0043084e-01   2.9115264e+00   2.6338022e+00   3.0658361e+00   2.1172959e+00   2.7373419e+00   2.6040822e+00   2.8212040e+00   1.4407364e+00   2.7450517e+00   2.0182279e+00   1.7116683e+00   2.3196029e+00   2.1285888e+00   2.8091316e+00   1.7264084e+00   2.5863714e+00   2.6084695e+00   2.2054529e+00   2.6248910e+00   2.0083311e+00   2.9174382e+00   2.1301354e+00   3.0136614e+00   2.8068911e+00   2.4421128e+00   2.5659281e+00   2.9581275e+00   3.1380442e+00   2.6129786e+00   1.6191482e+00   1.9129988e+00   1.8141046e+00   2.0129571e+00   3.2064817e+00   2.6080847e+00   2.6171503e+00   2.8540078e+00   2.5288353e+00   2.2078337e+00   2.1116321e+00   2.5029614e+00   2.7108347e+00   2.1109969e+00   1.4620239e+00   2.3067198e+00   2.3048725e+00   2.3072196e+00   2.4221911e+00   1.1960519e+00   2.2090466e+00   4.1263920e+00   3.2146438e+00   4.0362393e+00   3.7082866e+00   3.9191966e+00   4.7434452e+00   2.6190662e+00   4.4302264e+00   3.9142233e+00   4.2488425e+00   3.2328627e+00   3.4177609e+00   3.6349468e+00   3.1232336e+00   3.2606481e+00   3.4419771e+00   3.6135028e+00   4.8484858e+00   5.0414126e+00   3.1077602e+00   3.8409517e+00   3.0264502e+00   4.8462395e+00   3.0224855e+00   3.8231767e+00   4.1339850e+00   2.9228238e+00   3.0173096e+00   3.7181009e+00   3.9409635e+00   4.2473796e+00   4.5888422e+00   3.7226114e+00   3.2097424e+00   3.7024938e+00   4.2924779e+00   3.7339169e+00   3.6111693e+00   2.9185950e+00   3.5470876e+00   3.7434043e+00   3.2896416e+00   3.2146438e+00   4.0283196e+00   3.8460162e+00   3.3616756e+00   3.1242731e+00   3.3284650e+00   3.5333785e+00   3.2109426e+00   4.0122873e-01   5.0043084e-01   4.0243965e-01   3.0474106e-01   2.0061436e-01   4.5148429e-01   1.1000100e+00   1.2012928e+00   1.2699992e-01   4.0122873e-01   5.6595488e-01   1.2699992e-01   6.0184309e-01   4.0004442e-01   5.0436965e-01   7.1700909e-01   6.0201716e-01   5.2132556e-01   8.0051115e-01   2.2573593e-01   8.0000080e-01   4.0243965e-01   7.0088477e-01   3.0474106e-01   3.1428043e+00   2.9119661e+00   3.3252402e+00   2.4048325e+00   3.0131919e+00   2.9019362e+00   3.1087162e+00   1.7043719e+00   3.0148571e+00   2.3090280e+00   1.9099608e+00   2.6089255e+00   2.4039780e+00   3.1034773e+00   2.0109328e+00   2.8295063e+00   2.9047982e+00   2.5011632e+00   2.9079901e+00   2.3019339e+00   3.2101766e+00   2.4088882e+00   3.3051149e+00   3.1020645e+00   2.7127201e+00   2.8219255e+00   3.2211370e+00   3.4154432e+00   2.9057760e+00   1.9031964e+00   2.2023924e+00   2.1016800e+00   2.3040177e+00   3.5033831e+00   2.9047500e+00   2.9083094e+00   3.1195526e+00   2.8078790e+00   2.5037817e+00   2.4045555e+00   2.8012577e+00   3.0040677e+00   2.4032886e+00   1.7053664e+00   2.6031367e+00   2.6019751e+00   2.6032655e+00   2.7067267e+00   1.4186217e+00   2.5039390e+00   4.4180854e+00   3.5092124e+00   4.3179254e+00   4.0043989e+00   4.2115634e+00   5.0223341e+00   2.9108451e+00   4.7142986e+00   4.2064794e+00   4.5276385e+00   3.5169841e+00   3.7092301e+00   3.9177719e+00   3.4145764e+00   3.5398518e+00   3.7257231e+00   3.9064409e+00   5.1255523e+00   5.3223081e+00   3.4028319e+00   4.1224590e+00   3.3167394e+00   5.1238108e+00   3.3110167e+00   4.1123595e+00   4.4156295e+00   3.2116669e+00   3.3094499e+00   4.0106882e+00   4.2180724e+00   4.5227110e+00   4.8462592e+00   4.0138270e+00   3.5038530e+00   4.0010263e+00   4.5481710e+00   4.0222346e+00   3.9055783e+00   3.2104685e+00   3.8231767e+00   4.0259306e+00   3.5470873e+00   3.5092124e+00   4.3162096e+00   4.1285334e+00   3.6344358e+00   3.4126369e+00   3.6148618e+00   3.8215373e+00   3.5066394e+00   2.2608083e-01   2.4170870e-01   3.0915245e-01   3.0915245e-01   4.0002221e-01   7.0096858e-01   8.0888055e-01   3.3818226e-01   4.0243965e-01   5.0477564e-01   3.3818226e-01   6.1135434e-01   2.0121983e-01   3.0017653e-01   1.1020506e+00   6.0219099e-01   2.0061436e-01   4.1210927e-01   4.0246123e-01   4.0125062e-01   4.0363334e-01   3.4085233e-01   2.2573593e-01   3.1414744e+00   2.9090612e+00   3.3237063e+00   2.4058952e+00   3.0107723e+00   2.9007492e+00   3.1056084e+00   1.7139238e+00   3.0138400e+00   2.3035512e+00   1.9525301e+00   2.6040007e+00   2.4100386e+00   3.1020779e+00   2.0037730e+00   2.8273039e+00   2.9018637e+00   2.5009579e+00   2.9077468e+00   2.3022754e+00   3.2048985e+00   2.4059812e+00   3.3038272e+00   3.1015567e+00   2.7110304e+00   2.8196992e+00   3.2199879e+00   3.4126307e+00   2.9028597e+00   1.9035634e+00   2.2044603e+00   2.1052067e+00   2.3021298e+00   3.5016873e+00   2.9018153e+00   2.9040211e+00   3.1174676e+00   2.8083845e+00   2.5012694e+00   2.4028385e+00   2.8006965e+00   3.0024198e+00   2.4021168e+00   1.7233814e+00   2.6012647e+00   2.6007117e+00   2.6012056e+00   2.7050190e+00   1.4220898e+00   2.5015263e+00   4.4109742e+00   3.5045842e+00   4.3148937e+00   4.0025815e+00   4.2071337e+00   5.0208677e+00   2.9053047e+00   4.7134688e+00   4.2051335e+00   4.5213131e+00   3.5107904e+00   3.7056862e+00   3.9129303e+00   3.4076849e+00   3.5232606e+00   3.7154827e+00   3.9043905e+00   5.1238111e+00   5.3204854e+00   3.4027174e+00   4.1161770e+00   3.3085473e+00   5.1228006e+00   3.3065392e+00   4.1085246e+00   4.4144908e+00   3.2064337e+00   3.3048953e+00   4.0063737e+00   4.2173565e+00   4.5213359e+00   4.8449108e+00   4.0082529e+00   3.5026815e+00   4.0006690e+00   4.5442582e+00   4.0132894e+00   3.9035247e+00   3.2051958e+00   3.8177289e+00   4.0170262e+00   3.5340950e+00   3.5045842e+00   4.3111747e+00   4.1186693e+00   3.6228903e+00   3.4075338e+00   3.6094379e+00   3.8124787e+00   3.5031928e+00   1.1269424e-01   5.0436965e-01   4.5078948e-01   2.2573593e-01   6.0000317e-01   7.0088477e-01   4.1210927e-01   3.4080442e-01   3.0474106e-01   4.1210927e-01   8.0883841e-01   1.1269424e-01   2.2573593e-01   1.2089253e+00   8.0051036e-01   4.0125062e-01   4.1317535e-01   5.2133802e-01   3.0017653e-01   6.0184622e-01   2.0061436e-01   2.2573593e-01   3.2211375e+00   3.0065592e+00   3.4126307e+00   2.5097024e+00   3.1069750e+00   3.0016616e+00   3.2056674e+00   1.8201133e+00   3.1066333e+00   2.4080686e+00   2.0619186e+00   2.7068820e+00   2.5107632e+00   3.2022485e+00   2.1087015e+00   2.9137660e+00   3.0040552e+00   2.6010528e+00   3.0089164e+00   2.4039766e+00   3.3085605e+00   2.5050799e+00   3.4035383e+00   3.2010814e+00   2.8057187e+00   2.9102474e+00   3.3101488e+00   3.5088135e+00   3.0043212e+00   2.0051350e+00   2.3071665e+00   2.2078183e+00   2.4034084e+00   3.6027901e+00   3.0040510e+00   3.0064311e+00   3.2097125e+00   2.9065741e+00   2.6030847e+00   2.5057763e+00   2.9016034e+00   3.1025924e+00   2.5033702e+00   1.8310475e+00   2.7029487e+00   2.7015162e+00   2.7026433e+00   2.8034238e+00   1.5358653e+00   2.6032968e+00   4.5159027e+00   3.6080734e+00   4.4115652e+00   4.1033603e+00   4.3094200e+00   5.1138780e+00   3.0100866e+00   4.8082318e+00   4.3041941e+00   4.6203464e+00   3.6127197e+00   3.8070338e+00   4.0124958e+00   3.5130651e+00   3.6349183e+00   3.8215362e+00   4.0044029e+00   5.2162094e+00   5.4145184e+00   3.5039680e+00   4.2166537e+00   3.4145702e+00   5.2146334e+00   3.4083285e+00   4.2090727e+00   4.5089181e+00   3.3091123e+00   3.4076329e+00   4.1087140e+00   4.3098021e+00   4.6133860e+00   4.9287684e+00   4.1115100e+00   3.6023704e+00   4.1007820e+00   4.6309835e+00   4.1192351e+00   4.0040240e+00   3.3086516e+00   3.9156759e+00   4.1209257e+00   3.6344375e+00   3.6080734e+00   4.4124586e+00   4.2235561e+00   3.7268101e+00   3.5102374e+00   3.7111714e+00   3.9185866e+00   3.6056580e+00   5.0084481e-01   4.1315633e-01   2.2573593e-01   7.0000303e-01   8.0046605e-01   3.3818226e-01   2.4170870e-01   3.0017653e-01   3.3818226e-01   8.0245824e-01   1.1269424e-01   2.0181667e-01   1.1133897e+00   8.0004523e-01   4.0246123e-01   5.2167829e-01   4.5078948e-01   4.0125062e-01   6.0017665e-01   3.0017653e-01   2.0061436e-01   3.3182813e+00   3.1056085e+00   3.5110034e+00   2.6060742e+00   3.2059465e+00   3.1013637e+00   3.3048926e+00   1.9099680e+00   3.2056789e+00   2.5063346e+00   2.1344310e+00   2.8057704e+00   2.6059875e+00   3.3019214e+00   2.2068463e+00   3.0117188e+00   3.1034568e+00   2.7006623e+00   3.1063566e+00   2.5023073e+00   3.4074246e+00   2.6040822e+00   3.5028878e+00   3.3008913e+00   2.9048017e+00   3.0087102e+00   3.4087684e+00   3.6077011e+00   3.1036688e+00   2.1027637e+00   2.4039664e+00   2.3040177e+00   2.5024922e+00   3.7023994e+00   3.1034532e+00   3.1054994e+00   3.3083865e+00   3.0045919e+00   2.7025560e+00   2.6039937e+00   3.0011260e+00   3.2022183e+00   2.6023240e+00   1.9156198e+00   2.8022964e+00   2.8012577e+00   2.8021795e+00   2.9028597e+00   1.6190551e+00   2.7026433e+00   4.6143249e+00   3.7070367e+00   4.5103890e+00   4.2029881e+00   4.4084395e+00   5.2126509e+00   3.1082889e+00   4.9074567e+00   4.4036930e+00   4.7183734e+00   3.7111657e+00   3.9061763e+00   4.1111077e+00   3.6112621e+00   3.7306950e+00   3.9190472e+00   4.1039096e+00   5.3148048e+00   5.5132902e+00   3.6028435e+00   4.3148929e+00   3.5126667e+00   5.3133599e+00   3.5071916e+00   4.3081091e+00   4.6080296e+00   3.4078683e+00   3.5066415e+00   4.2077543e+00   4.4087821e+00   4.7120754e+00   5.0261502e+00   4.2102487e+00   3.7020547e+00   4.2006494e+00   4.7279956e+00   4.2171591e+00   4.1035746e+00   3.4074978e+00   4.0138994e+00   4.2186688e+00   3.7302926e+00   3.7070367e+00   4.5111938e+00   4.3210760e+00   3.8236433e+00   3.6087856e+00   3.8098356e+00   4.0164852e+00   3.7049593e+00   1.1269424e-01   7.0017011e-01   9.0506343e-01   1.0426636e+00   2.0181667e-01   4.1209001e-01   8.0093081e-01   2.0181667e-01   3.4080442e-01   4.0125062e-01   3.6259865e-01   9.0029064e-01   3.3808272e-01   4.2268438e-01   6.1135434e-01   2.2608083e-01   6.0948212e-01   2.0061436e-01   6.3164977e-01   3.0482299e-01   3.1902650e+00   2.9267417e+00   3.3545239e+00   2.4065479e+00   3.0300492e+00   2.9028462e+00   3.1166330e+00   1.7073273e+00   3.0369978e+00   2.3091363e+00   1.9242178e+00   2.6129479e+00   2.4148785e+00   3.1074477e+00   2.0138888e+00   2.8680310e+00   2.9053048e+00   2.5042875e+00   2.9165009e+00   2.3038195e+00   3.2116668e+00   2.4221589e+00   3.3110857e+00   3.1060464e+00   2.7333589e+00   2.8520935e+00   3.2480481e+00   3.4313924e+00   2.9094538e+00   1.9103596e+00   2.2042535e+00   2.1040084e+00   2.3086427e+00   3.5048916e+00   2.9048754e+00   2.9119661e+00   3.1440058e+00   2.8212158e+00   2.5048147e+00   2.4054865e+00   2.8016296e+00   3.0087060e+00   2.4071454e+00   1.7108740e+00   2.6040234e+00   2.6035022e+00   2.6047905e+00   2.7176633e+00   1.4222462e+00   2.5057847e+00   4.4195581e+00   3.5098289e+00   4.3311360e+00   4.0067587e+00   4.2149806e+00   5.0390074e+00   2.9109559e+00   4.7273232e+00   4.2124122e+00   4.5405876e+00   3.5250718e+00   3.7139027e+00   3.9284285e+00   3.4150426e+00   3.5404383e+00   3.7302923e+00   3.9113385e+00   5.1434691e+00   5.3373009e+00   3.4049076e+00   4.1330547e+00   3.3170101e+00   5.1417717e+00   3.3168869e+00   4.1189487e+00   4.4302240e+00   3.2165105e+00   3.3123688e+00   4.0139003e+00   4.2362095e+00   4.5418862e+00   4.8784553e+00   4.0170271e+00   3.5083268e+00   4.0022121e+00   4.5797332e+00   4.0245435e+00   3.9092247e+00   3.2127499e+00   3.8383398e+00   4.0332236e+00   3.5687055e+00   3.5098289e+00   4.3229228e+00   4.1350002e+00   3.6463113e+00   3.4177609e+00   3.6219569e+00   3.8236419e+00   3.5076152e+00   6.0202028e-01   1.0008471e+00   1.1133984e+00   1.2085435e-01   4.0125062e-01   7.0548138e-01   1.2085435e-01   4.1210927e-01   3.3813251e-01   4.1317535e-01   8.0093160e-01   4.1210927e-01   4.5147187e-01   7.0184453e-01   2.0121983e-01   7.0088326e-01   2.2573593e-01   6.3164977e-01   2.4170870e-01   3.1712556e+00   2.9203033e+00   3.3425812e+00   2.4054865e+00   3.0228150e+00   2.9023686e+00   3.1131138e+00   1.7053664e+00   3.0276460e+00   2.3090554e+00   1.9156198e+00   2.6109872e+00   2.4094445e+00   3.1056085e+00   2.0122722e+00   2.8520934e+00   2.9050277e+00   2.5027053e+00   2.9125192e+00   2.3027405e+00   3.2109394e+00   2.4160415e+00   3.3084146e+00   3.1042008e+00   2.7243956e+00   2.8394100e+00   3.2369546e+00   3.4247119e+00   2.9077087e+00   1.9065546e+00   2.2031052e+00   2.1025721e+00   2.3063026e+00   3.5041732e+00   2.9047982e+00   2.9102300e+00   3.1338083e+00   2.8152056e+00   2.5042498e+00   2.4049187e+00   2.8014068e+00   3.0065584e+00   2.4051787e+00   1.7073273e+00   2.6035320e+00   2.6027034e+00   2.6039922e+00   2.7127202e+00   1.4197348e+00   2.5048165e+00   4.4189015e+00   3.5095163e+00   4.3257988e+00   4.0057069e+00   4.2135049e+00   5.0324949e+00   2.9108796e+00   4.7221364e+00   4.2099109e+00   4.5353940e+00   3.5215862e+00   3.7118544e+00   3.9240013e+00   3.4147901e+00   3.5401421e+00   3.7282909e+00   3.9092247e+00   5.1365094e+00   5.3314710e+00   3.4038679e+00   4.1286955e+00   3.3168613e+00   5.1347955e+00   3.3142720e+00   4.1161770e+00   4.4243750e+00   3.2143132e+00   3.3110162e+00   4.0124921e+00   4.2289511e+00   4.5343165e+00   4.8661278e+00   4.0156242e+00   3.5063341e+00   4.0016595e+00   4.5675358e+00   4.0235142e+00   3.9076269e+00   3.2116668e+00   3.8321137e+00   4.0301568e+00   3.5598554e+00   3.5095163e+00   4.3201293e+00   4.1322798e+00   3.6413274e+00   3.4154677e+00   3.6188977e+00   3.8226861e+00   3.5071388e+00   7.0096708e-01   8.0004602e-01   5.0855077e-01   4.1420960e-01   2.2608083e-01   5.0855077e-01   1.0008768e+00   3.0474106e-01   4.0127250e-01   1.1527746e+00   1.0000457e+00   4.0127250e-01   4.5783248e-01   6.0948800e-01   4.1317535e-01   8.0008964e-01   3.0482299e-01   4.0127250e-01   3.2104685e+00   3.0024155e+00   3.4059092e+00   2.5048145e+00   3.1026586e+00   3.0005260e+00   3.2022151e+00   1.8108200e+00   3.1025924e+00   2.4028974e+00   2.0417688e+00   2.7025751e+00   2.5062865e+00   3.2007416e+00   2.1027376e+00   2.9057769e+00   3.0015388e+00   2.6003213e+00   3.0043084e+00   2.4017233e+00   3.3039365e+00   2.5015263e+00   3.4013673e+00   3.2002931e+00   2.8019179e+00   2.9040262e+00   3.3045112e+00   3.5038551e+00   3.0015958e+00   2.0020172e+00   2.3035509e+00   2.2041009e+00   2.4010446e+00   3.6011254e+00   3.0015387e+00   3.0025850e+00   3.2040849e+00   2.9029297e+00   2.6009614e+00   2.5022969e+00   2.9005699e+00   3.1008537e+00   2.5011717e+00   1.8179820e+00   2.7009799e+00   2.7004102e+00   2.7008225e+00   2.8010266e+00   1.5160787e+00   2.6010449e+00   4.5093548e+00   3.6039076e+00   4.4060847e+00   4.1014985e+00   4.3050076e+00   5.1081747e+00   3.0045274e+00   4.8044751e+00   4.3019076e+00   4.6117611e+00   3.6062405e+00   3.8032982e+00   4.0063634e+00   3.5066398e+00   3.6202705e+00   3.8119529e+00   4.0019489e+00   5.2097659e+00   5.4087506e+00   3.5019664e+00   4.2090727e+00   3.4073888e+00   5.2088330e+00   3.4037266e+00   4.2046087e+00   4.5046939e+00   3.3041077e+00   3.4034672e+00   4.1044794e+00   4.3051857e+00   4.6074982e+00   4.9181673e+00   4.1061528e+00   3.6008588e+00   4.1002763e+00   4.6187512e+00   4.1110303e+00   4.0017844e+00   3.3039580e+00   3.9080413e+00   4.1118167e+00   3.6188744e+00   3.6039076e+00   4.4067826e+00   4.2136946e+00   3.7147052e+00   3.5048712e+00   3.7054764e+00   3.9103830e+00   3.6025973e+00   3.0026460e-01   1.0001598e+00   9.0029064e-01   6.0202028e-01   1.0001598e+00   1.1281352e+00   7.0000303e-01   6.0052920e-01   1.8012962e+00   9.6574369e-01   6.3178782e-01   4.2270142e-01   1.1005460e+00   3.0026460e-01   9.1422402e-01   4.0004442e-01   8.0004602e-01   3.2225450e+00   3.0091788e+00   3.4142737e+00   2.5659206e+00   3.1121190e+00   3.0065741e+00   3.2080663e+00   1.9795481e+00   3.1095927e+00   2.4291135e+00   2.3151880e+00   2.7129011e+00   2.5826416e+00   3.2051685e+00   2.1273517e+00   2.9165009e+00   3.0077149e+00   2.6132476e+00   3.0422268e+00   2.4403272e+00   3.3124044e+00   2.5166989e+00   3.4115621e+00   3.2044340e+00   2.8105357e+00   2.9137358e+00   3.3135329e+00   3.5115123e+00   3.0089448e+00   2.0634477e+00   2.3669955e+00   2.2798676e+00   2.4222743e+00   3.6065353e+00   3.0077107e+00   3.0095641e+00   3.2119165e+00   2.9352117e+00   2.6080595e+00   2.5371436e+00   2.9126008e+00   3.1051604e+00   2.5254396e+00   2.0316239e+00   2.7143597e+00   2.7050980e+00   2.7084026e+00   2.8082597e+00   1.7626307e+00   2.6129786e+00   4.5202811e+00   3.6136284e+00   4.4137920e+00   4.1051791e+00   4.3125133e+00   5.1149745e+00   3.0264326e+00   4.8090821e+00   4.3074243e+00   4.6242424e+00   3.6169385e+00   3.8113313e+00   4.0160029e+00   3.5235168e+00   3.6464145e+00   3.8279938e+00   4.0062032e+00   5.2173409e+00   5.4162239e+00   3.5202520e+00   4.2206877e+00   3.4219359e+00   5.2156030e+00   3.4146242e+00   4.2116109e+00   4.5097900e+00   3.3150972e+00   3.4115327e+00   4.1123767e+00   4.3106068e+00   4.6148396e+00   4.9296764e+00   4.1159139e+00   3.6049049e+00   4.1030781e+00   4.6336876e+00   4.1247374e+00   4.0056652e+00   3.3131419e+00   3.9194404e+00   4.1265834e+00   3.6428011e+00   3.6136284e+00   4.4157045e+00   4.2295866e+00   3.7344540e+00   3.5196603e+00   3.7152542e+00   3.9242137e+00   3.6086340e+00   1.1055707e+00   1.0030868e+00   7.0000151e-01   1.1055707e+00   1.3018102e+00   8.0245824e-01   7.1621884e-01   1.9078389e+00   1.1896594e+00   7.2044167e-01   5.3943256e-01   1.2089192e+00   4.5147187e-01   1.0776188e+00   5.0043084e-01   9.0506254e-01   3.3079985e+00   3.1046072e+00   3.5056118e+00   2.6709345e+00   3.2081329e+00   3.1065948e+00   3.3043807e+00   2.0901204e+00   3.2052035e+00   2.5276373e+00   2.4267777e+00   2.8091158e+00   2.6904888e+00   3.3041886e+00   2.2241050e+00   3.0065909e+00   3.1056084e+00   2.7155799e+00   3.1440950e+00   2.5451785e+00   3.4078458e+00   2.6152930e+00   3.5111169e+00   3.3045112e+00   2.9070941e+00   3.0065909e+00   3.4069929e+00   3.6059670e+00   3.1068940e+00   2.1702441e+00   2.4737523e+00   2.3880607e+00   2.5238437e+00   3.7056514e+00   3.1056084e+00   3.1055129e+00   3.3051256e+00   3.0372254e+00   2.7067267e+00   2.6397031e+00   3.0142197e+00   3.2037566e+00   2.6278605e+00   2.1424915e+00   2.8148768e+00   2.8047553e+00   2.8077256e+00   2.9066659e+00   1.8682644e+00   2.7127202e+00   4.6142218e+00   3.7103348e+00   4.5074842e+00   4.2035307e+00   4.4083412e+00   5.2074280e+00   3.1239158e+00   4.9041928e+00   4.4055872e+00   4.7149533e+00   3.7103439e+00   3.9081758e+00   4.1095835e+00   3.6188977e+00   3.7328455e+00   3.9187196e+00   4.1037709e+00   5.3087451e+00   5.5089283e+00   3.6218973e+00   4.3127798e+00   3.5155554e+00   5.3076917e+00   3.5109043e+00   4.3070065e+00   4.6043043e+00   3.4107931e+00   3.5076367e+00   4.2085644e+00   4.4044360e+00   4.7071579e+00   5.0144130e+00   4.2110565e+00   3.7038321e+00   4.2031939e+00   4.7176313e+00   4.2169539e+00   4.1034568e+00   3.4087523e+00   4.0110693e+00   4.2176552e+00   3.7262659e+00   3.7103348e+00   4.5099841e+00   4.3199893e+00   3.8223318e+00   3.6159248e+00   3.8096375e+00   4.0163546e+00   3.7058422e+00   3.0026460e-01   6.0964891e-01   0.0000000e+00   5.0043842e-01   3.0482299e-01   4.0246123e-01   8.0254500e-01   5.0043842e-01   5.2133802e-01   7.0556260e-01   2.0181667e-01   7.0008735e-01   3.0026460e-01   6.0948506e-01   2.0181667e-01   3.2490712e+00   3.0153168e+00   3.4297841e+00   2.5067523e+00   3.1166337e+00   3.0027816e+00   3.2112793e+00   1.8068048e+00   3.1183051e+00   2.4116924e+00   2.0138832e+00   2.7116615e+00   2.5059537e+00   3.2048192e+00   2.1144760e+00   2.9351753e+00   3.0063019e+00   2.6019122e+00   3.0106587e+00   2.4030297e+00   3.3125861e+00   2.5120719e+00   3.4068163e+00   3.2029877e+00   2.8162444e+00   2.9267417e+00   3.3252407e+00   3.5189464e+00   3.0077107e+00   2.0051350e+00   2.3037132e+00   2.2028146e+00   2.4058620e+00   3.6044981e+00   3.0062070e+00   3.0107283e+00   3.2237456e+00   2.9105093e+00   2.6052541e+00   2.5062865e+00   2.9018772e+00   3.1056084e+00   2.5048522e+00   1.8082911e+00   2.7043948e+00   2.7029415e+00   2.7046027e+00   2.8091099e+00   1.5248852e+00   2.6055127e+00   4.5209020e+00   3.6112573e+00   4.4212031e+00   4.1056541e+00   4.3138986e+00   5.1255338e+00   3.0133997e+00   4.8167235e+00   4.3081273e+00   4.6319211e+00   3.6205854e+00   3.8114965e+00   4.0212972e+00   3.5173798e+00   3.6449970e+00   3.8299342e+00   4.0081754e+00   5.2290121e+00   5.4254411e+00   3.5039202e+00   4.2264145e+00   3.4198378e+00   5.2270034e+00   3.4138008e+00   4.2149806e+00   4.5183778e+00   3.3145502e+00   3.4118179e+00   4.1129687e+00   4.3210760e+00   4.6261633e+00   4.9512603e+00   4.1165035e+00   3.6051692e+00   4.1014742e+00   4.6540056e+00   4.1257291e+00   4.0071257e+00   3.3129914e+00   3.9274863e+00   4.1301604e+00   3.6542046e+00   3.6112573e+00   4.4192311e+00   4.2328883e+00   3.7399948e+00   3.5155767e+00   3.7180846e+00   3.9250546e+00   3.6083191e+00   5.0436965e-01   3.0026460e-01   6.0017982e-01   3.0482299e-01   3.0017653e-01   9.0506343e-01   6.0000317e-01   4.5783248e-01   7.4269314e-01   2.4195741e-01   6.0948506e-01   4.0122873e-01   5.0855077e-01   2.0061436e-01   3.5243030e+00   3.3064692e+00   3.7147036e+00   2.8028227e+00   3.4072759e+00   3.3010449e+00   3.5048841e+00   2.1026764e+00   3.4081977e+00   2.7042302e+00   2.3098753e+00   3.0045117e+00   2.8027924e+00   3.5019450e+00   2.4045982e+00   3.2157547e+00   3.3025861e+00   2.9005886e+00   3.3047156e+00   2.7010554e+00   3.6058037e+00   2.8042692e+00   3.7029937e+00   3.5011559e+00   3.1065954e+00   3.2116669e+00   3.6121048e+00   3.8091015e+00   3.3031148e+00   2.3014285e+00   2.6014645e+00   2.5011992e+00   2.7018905e+00   3.9020189e+00   3.3025600e+00   3.3044825e+00   3.5110031e+00   3.2044340e+00   2.9018587e+00   2.8023124e+00   3.2006932e+00   3.4022345e+00   2.8016296e+00   2.1039819e+00   3.0015958e+00   3.0009948e+00   3.0016466e+00   3.1034780e+00   1.8068049e+00   2.9019412e+00   4.8119571e+00   3.9055005e+00   4.7117433e+00   4.4027872e+00   4.6074923e+00   5.4154953e+00   3.3059205e+00   5.1096877e+00   4.6042055e+00   4.9184662e+00   3.9101579e+00   4.1056576e+00   4.3111747e+00   3.8086187e+00   3.9240074e+00   4.1158326e+00   4.3040379e+00   5.5178488e+00   5.7157885e+00   3.8018678e+00   4.5144444e+00   3.7097243e+00   5.5166376e+00   3.7063915e+00   4.5079295e+00   4.8103281e+00   3.6066590e+00   3.7054748e+00   4.4067833e+00   4.6117275e+00   4.9151622e+00   5.2317566e+00   4.4087821e+00   3.9022961e+00   4.4006562e+00   4.9323259e+00   4.4141504e+00   4.3034963e+00   3.6059708e+00   4.2144276e+00   4.4165186e+00   3.9284285e+00   3.9055005e+00   4.7106152e+00   4.5183778e+00   4.0209837e+00   3.8074712e+00   4.0090032e+00   4.2134002e+00   3.9039579e+00   6.0964891e-01   1.1019501e+00   4.0125062e-01   5.0000761e-01   1.2632947e+00   1.1001012e+00   5.2491131e-01   6.1135434e-01   7.1621884e-01   4.2268438e-01   9.0026543e-01   2.4170870e-01   5.0043084e-01   3.4064574e+00   3.2033141e+00   3.6042703e+00   2.7067267e+00   3.3031847e+00   3.2012079e+00   3.4035361e+00   2.0125822e+00   3.3019706e+00   2.6055127e+00   2.2404570e+00   2.9047685e+00   2.7070958e+00   3.4014441e+00   2.3056303e+00   3.1043374e+00   3.2029748e+00   2.8006755e+00   3.2059945e+00   2.6027034e+00   3.5064150e+00   2.7028014e+00   3.6021536e+00   3.4005708e+00   3.0020583e+00   3.1034908e+00   3.5031921e+00   3.7043162e+00   3.2030081e+00   2.2031888e+00   2.5048145e+00   2.4051640e+00   2.6022353e+00   3.8020808e+00   3.2029748e+00   3.2045605e+00   3.4036086e+00   3.1036832e+00   2.8021575e+00   2.7039988e+00   3.1011640e+00   3.3016475e+00   2.7022579e+00   2.0191796e+00   2.9020892e+00   2.9010579e+00   2.9018587e+00   3.0016913e+00   1.7203066e+00   2.8022905e+00   4.7127831e+00   3.8062227e+00   4.6064153e+00   4.3024456e+00   4.5071341e+00   5.3066177e+00   3.2074507e+00   5.0034627e+00   4.5024143e+00   4.8135231e+00   3.8088290e+00   4.0049892e+00   4.2080447e+00   3.7100251e+00   3.8270894e+00   4.0163858e+00   4.2028588e+00   5.4079977e+00   5.6075453e+00   3.7029588e+00   4.4113160e+00   3.6111044e+00   5.4066727e+00   3.6058057e+00   4.4062018e+00   4.7037812e+00   3.5065182e+00   3.6056307e+00   4.3065776e+00   4.5036239e+00   4.8058364e+00   5.1131156e+00   4.3088094e+00   3.8014142e+00   4.3005452e+00   4.8156922e+00   4.3151195e+00   4.2027764e+00   3.5064276e+00   4.1095026e+00   4.3155223e+00   3.8226872e+00   3.8062227e+00   4.6088777e+00   4.4178507e+00   3.9190516e+00   3.7073875e+00   3.9078043e+00   4.1144923e+00   3.8043348e+00   5.0043842e-01   3.0482299e-01   4.0246123e-01   8.0254500e-01   5.0043842e-01   5.2133802e-01   7.0556260e-01   2.0181667e-01   7.0008735e-01   3.0026460e-01   6.0948506e-01   2.0181667e-01   3.2490712e+00   3.0153168e+00   3.4297841e+00   2.5067523e+00   3.1166337e+00   3.0027816e+00   3.2112793e+00   1.8068048e+00   3.1183051e+00   2.4116924e+00   2.0138832e+00   2.7116615e+00   2.5059537e+00   3.2048192e+00   2.1144760e+00   2.9351753e+00   3.0063019e+00   2.6019122e+00   3.0106587e+00   2.4030297e+00   3.3125861e+00   2.5120719e+00   3.4068163e+00   3.2029877e+00   2.8162444e+00   2.9267417e+00   3.3252407e+00   3.5189464e+00   3.0077107e+00   2.0051350e+00   2.3037132e+00   2.2028146e+00   2.4058620e+00   3.6044981e+00   3.0062070e+00   3.0107283e+00   3.2237456e+00   2.9105093e+00   2.6052541e+00   2.5062865e+00   2.9018772e+00   3.1056084e+00   2.5048522e+00   1.8082911e+00   2.7043948e+00   2.7029415e+00   2.7046027e+00   2.8091099e+00   1.5248852e+00   2.6055127e+00   4.5209020e+00   3.6112573e+00   4.4212031e+00   4.1056541e+00   4.3138986e+00   5.1255338e+00   3.0133997e+00   4.8167235e+00   4.3081273e+00   4.6319211e+00   3.6205854e+00   3.8114965e+00   4.0212972e+00   3.5173798e+00   3.6449970e+00   3.8299342e+00   4.0081754e+00   5.2290121e+00   5.4254411e+00   3.5039202e+00   4.2264145e+00   3.4198378e+00   5.2270034e+00   3.4138008e+00   4.2149806e+00   4.5183778e+00   3.3145502e+00   3.4118179e+00   4.1129687e+00   4.3210760e+00   4.6261633e+00   4.9512603e+00   4.1165035e+00   3.6051692e+00   4.1014742e+00   4.6540056e+00   4.1257291e+00   4.0071257e+00   3.3129914e+00   3.9274863e+00   4.1301604e+00   3.6542046e+00   3.6112573e+00   4.4192311e+00   4.2328883e+00   3.7399948e+00   3.5155767e+00   3.7180846e+00   3.9250546e+00   3.6083191e+00   7.0470720e-01   6.3164977e-01   7.0000303e-01   2.0000000e-01   6.4049114e-01   8.7212232e-01   4.0004442e-01   8.5437440e-01   2.2573593e-01   9.3308853e-01   6.0184622e-01   3.5152865e+00   3.2379971e+00   3.6729302e+00   2.7052554e+00   3.3425813e+00   3.2040843e+00   3.4230889e+00   2.0021220e+00   3.3530528e+00   2.6055127e+00   2.2051638e+00   2.9154879e+00   2.7227228e+00   3.4118179e+00   2.3143923e+00   3.1902650e+00   3.2048191e+00   2.8089346e+00   3.2223766e+00   2.6060092e+00   3.5107904e+00   2.7333577e+00   3.6167495e+00   3.4109217e+00   3.0488339e+00   3.1712556e+00   3.5658221e+00   3.7426726e+00   3.2127498e+00   2.2186491e+00   2.5049231e+00   2.4052960e+00   2.6139440e+00   3.8063158e+00   3.2036084e+00   3.2143157e+00   3.4603347e+00   3.1318587e+00   2.8056557e+00   2.7050980e+00   3.1020681e+00   3.3136174e+00   2.7116685e+00   2.0027889e+00   2.9047842e+00   2.9057760e+00   2.9065359e+00   3.0276459e+00   1.7091578e+00   2.8077256e+00   4.7169309e+00   3.8081280e+00   4.6399369e+00   4.3088507e+00   4.5162248e+00   5.3501952e+00   3.2067991e+00   5.0370912e+00   4.5175831e+00   4.8468186e+00   3.8290678e+00   4.0170262e+00   4.2347722e+00   3.7111714e+00   3.8289857e+00   4.0283196e+00   4.2155430e+00   5.4550926e+00   5.6472422e+00   3.7064735e+00   4.4381718e+00   3.6121030e+00   5.4538016e+00   3.6205857e+00   4.4231445e+00   4.7408825e+00   3.5189464e+00   3.6135011e+00   4.3151164e+00   4.5490925e+00   4.8546830e+00   5.1966534e+00   4.3173271e+00   3.8129545e+00   4.3038527e+00   4.8962803e+00   4.3214438e+00   4.2123903e+00   3.5127693e+00   4.1469662e+00   4.3342207e+00   3.8751227e+00   3.8081280e+00   4.6262568e+00   4.4345825e+00   3.9485180e+00   3.7201181e+00   3.9257254e+00   4.1203162e+00   3.8072915e+00   2.0181667e-01   1.1055799e+00   7.0016860e-01   4.0006662e-01   4.5147187e-01   4.1212852e-01   4.0002221e-01   5.0043084e-01   3.0474106e-01   1.2085435e-01   3.2280983e+00   3.0080445e+00   3.4166801e+00   2.5073304e+00   3.1087541e+00   3.0016255e+00   3.2064005e+00   1.8128544e+00   3.1091921e+00   2.4076934e+00   2.0429861e+00   2.7070770e+00   2.5077793e+00   3.2025221e+00   2.1086021e+00   2.9185909e+00   3.0040552e+00   2.6009247e+00   3.0080768e+00   2.4028385e+00   3.3086417e+00   2.5058827e+00   3.4038679e+00   3.2013290e+00   2.8077473e+00   2.9137660e+00   3.3136458e+00   3.5107924e+00   3.0045274e+00   2.0036964e+00   2.3048725e+00   2.2049827e+00   2.4032211e+00   3.6028345e+00   3.0040403e+00   3.0066661e+00   3.2127504e+00   2.9065741e+00   2.6030847e+00   2.5048249e+00   2.9013288e+00   3.1029264e+00   2.5029614e+00   1.8201043e+00   2.7027522e+00   2.7015465e+00   2.7026433e+00   2.8042851e+00   1.5256523e+00   2.6032253e+00   4.5160443e+00   3.6080467e+00   4.4135519e+00   4.1035779e+00   4.3098000e+00   5.1168009e+00   3.0096881e+00   4.8103297e+00   4.3048676e+00   4.6223708e+00   3.6136097e+00   3.8074712e+00   4.0139003e+00   3.5128896e+00   3.6349183e+00   3.8220063e+00   4.0049433e+00   5.2194303e+00   5.4171979e+00   3.5033669e+00   4.2181241e+00   3.4145410e+00   5.2178541e+00   3.4088042e+00   4.2099019e+00   4.5111938e+00   3.3094784e+00   3.4078458e+00   4.1090316e+00   4.3126257e+00   4.6165627e+00   4.9348334e+00   4.1118265e+00   3.6027619e+00   4.1008192e+00   4.6366757e+00   4.1194553e+00   4.0043991e+00   3.3087928e+00   3.9177721e+00   4.1218428e+00   3.6374332e+00   3.6080467e+00   4.4133507e+00   4.2243717e+00   3.7282925e+00   3.5105217e+00   3.7119499e+00   3.9187675e+00   3.6057072e+00   1.2012865e+00   6.0184622e-01   3.3808272e-01   6.0184934e-01   5.0043084e-01   3.3818226e-01   4.1212852e-01   3.0922892e-01   2.0121983e-01   3.4273160e+00   3.2064011e+00   3.6161317e+00   2.7056828e+00   3.3075153e+00   3.2008118e+00   3.4044261e+00   2.0113827e+00   3.3090710e+00   2.6035236e+00   2.2398619e+00   2.9035670e+00   2.7083020e+00   3.4017079e+00   2.3034787e+00   3.1174706e+00   3.2019092e+00   2.8008297e+00   3.2066700e+00   2.6023240e+00   3.5046442e+00   2.7041827e+00   3.6031099e+00   3.4011658e+00   3.0071110e+00   3.1127326e+00   3.5134157e+00   3.7092355e+00   3.2025439e+00   2.2031052e+00   2.5042875e+00   2.4048325e+00   2.6019131e+00   3.8016620e+00   3.2018790e+00   3.2036084e+00   3.4118203e+00   3.1063566e+00   2.8013151e+00   2.7029498e+00   3.1008327e+00   3.3019518e+00   2.7019892e+00   2.0181988e+00   2.9013773e+00   2.9007142e+00   2.9012240e+00   3.0034647e+00   1.7168003e+00   2.8015399e+00   4.7103355e+00   3.8044833e+00   4.6117631e+00   4.3023732e+00   4.5065283e+00   5.3163061e+00   3.2051927e+00   5.0102923e+00   4.5041827e+00   4.8177760e+00   3.8091017e+00   4.0050028e+00   4.2105704e+00   3.7073402e+00   3.8208501e+00   4.0138272e+00   4.2036880e+00   5.4187310e+00   5.6164039e+00   3.7027275e+00   4.4135513e+00   3.6080195e+00   5.4177192e+00   3.6056365e+00   4.4072751e+00   4.7109359e+00   3.5056751e+00   3.6045028e+00   4.3058539e+00   4.5127175e+00   4.8161488e+00   5.1342237e+00   4.3075896e+00   3.8021528e+00   4.3006308e+00   4.8339900e+00   4.3122441e+00   4.2030792e+00   3.5048429e+00   4.1140184e+00   4.3148937e+00   3.8271256e+00   3.8044833e+00   4.6097143e+00   4.4165186e+00   3.9191998e+00   3.7067060e+00   3.9080455e+00   4.1114854e+00   3.8031381e+00   9.0000091e-01   1.2014191e+00   1.5025345e+00   7.0088477e-01   1.5012926e+00   9.0000136e-01   1.4092540e+00   1.0030724e+00   3.4932678e+00   3.2284356e+00   3.6579694e+00   2.7029237e+00   3.3320310e+00   3.2025221e+00   3.4171529e+00   2.0008116e+00   3.3407229e+00   2.6032740e+00   2.2005685e+00   2.9103582e+00   2.7153679e+00   3.4082220e+00   2.3087475e+00   3.1706684e+00   3.2030687e+00   2.8057704e+00   3.2157547e+00   2.6035236e+00   3.5075879e+00   2.7233837e+00   3.6121030e+00   3.4076329e+00   3.0364247e+00   3.1548602e+00   3.5517240e+00   3.7328844e+00   3.2086545e+00   2.2116271e+00   2.5026949e+00   2.4028972e+00   2.6089340e+00   3.8042764e+00   3.2022967e+00   3.2108192e+00   3.4468872e+00   3.1231713e+00   2.8035152e+00   2.7029238e+00   3.1011647e+00   3.3095196e+00   2.7074599e+00   2.0008921e+00   2.9028462e+00   2.9036791e+00   2.9040745e+00   3.0197997e+00   1.7043730e+00   2.8047771e+00   4.7130344e+00   3.8056232e+00   4.6318991e+00   4.3063824e+00   4.5121922e+00   5.3416858e+00   3.2045522e+00   5.0302179e+00   4.5134527e+00   4.8380714e+00   3.8218532e+00   4.0124930e+00   4.2269486e+00   3.7079008e+00   3.8220112e+00   4.0214147e+00   4.2115853e+00   5.4465234e+00   5.6393868e+00   3.7043105e+00   4.4299700e+00   3.6085945e+00   5.4449998e+00   3.6148636e+00   4.4178341e+00   4.7331065e+00   3.5134671e+00   3.6094821e+00   4.3111775e+00   4.5398308e+00   4.8449119e+00   5.1826640e+00   4.3129030e+00   3.8093617e+00   4.3026669e+00   4.8806417e+00   4.3164776e+00   4.2091204e+00   3.5088587e+00   4.1368711e+00   4.3264627e+00   3.8592376e+00   3.8056232e+00   4.6204067e+00   4.4269727e+00   3.9374314e+00   3.7145622e+00   3.9192263e+00   4.1154804e+00   3.8050056e+00   6.1288055e-01   7.7603846e-01   4.0127250e-01   7.4329414e-01   2.0061436e-01   9.0508712e-01   6.0000635e-01   3.5152864e+00   3.2379970e+00   3.6729302e+00   2.7058598e+00   3.3425838e+00   3.2040874e+00   3.4230885e+00   2.0034894e+00   3.3530533e+00   2.6055423e+00   2.2123846e+00   2.9154880e+00   2.7237438e+00   3.4118184e+00   2.3143951e+00   3.1902650e+00   3.2048192e+00   2.8089552e+00   3.2228316e+00   2.6061975e+00   3.5107904e+00   2.7333644e+00   3.6167885e+00   3.4109240e+00   3.0488346e+00   3.1712557e+00   3.5658240e+00   3.7426726e+00   3.2127504e+00   2.2188249e+00   2.5053901e+00   2.4058634e+00   2.6139732e+00   3.8063206e+00   3.2036085e+00   3.2143126e+00   3.4603347e+00   3.1321581e+00   2.8056558e+00   2.7052554e+00   3.1021033e+00   3.3136175e+00   2.7117356e+00   2.0053411e+00   2.9048017e+00   2.9057761e+00   2.9065368e+00   3.0276467e+00   1.7105814e+00   2.8077315e+00   4.7169308e+00   3.8081328e+00   4.6399369e+00   4.3088509e+00   4.5162248e+00   5.3501952e+00   3.2068686e+00   5.0370912e+00   4.5175965e+00   4.8468145e+00   3.8290678e+00   4.0170299e+00   4.2347722e+00   3.7112059e+00   3.8289870e+00   4.0283196e+00   4.2155430e+00   5.4550814e+00   5.6472442e+00   3.7067060e+00   4.4381718e+00   3.6121048e+00   5.4538018e+00   3.6205918e+00   4.4231444e+00   4.7408825e+00   3.5189484e+00   3.6135011e+00   4.3151171e+00   4.5490925e+00   4.8546834e+00   5.1966394e+00   4.3173279e+00   3.8129558e+00   4.3038600e+00   4.8962803e+00   4.3214431e+00   4.2123903e+00   3.5127694e+00   4.1469662e+00   4.3342207e+00   3.8751227e+00   3.8081328e+00   4.6262568e+00   4.4345823e+00   3.9485180e+00   3.7201522e+00   3.9257254e+00   4.1203153e+00   3.8072915e+00   3.4085233e-01   5.0517282e-01   4.1210927e-01   4.5847767e-01   4.1317535e-01   4.0243965e-01   3.1409605e+00   2.9078360e+00   3.3230621e+00   2.4077390e+00   3.0097979e+00   2.9003941e+00   3.1042002e+00   1.7227897e+00   3.0135090e+00   2.3017319e+00   1.9755996e+00   2.6019350e+00   2.4142039e+00   3.1015567e+00   2.0014192e+00   2.8264551e+00   2.9006366e+00   2.5011717e+00   2.9082658e+00   2.3033727e+00   3.2022157e+00   2.4051094e+00   3.3034165e+00   3.1014454e+00   2.7104802e+00   2.8188502e+00   3.2195779e+00   3.4112824e+00   2.9016560e+00   1.9053006e+00   2.2068965e+00   2.1085395e+00   2.3018945e+00   3.5009585e+00   2.9005880e+00   2.9020766e+00   3.1165913e+00   2.8092629e+00   2.5004154e+00   2.4029432e+00   2.8007533e+00   3.0017918e+00   2.4022355e+00   1.7369589e+00   2.6007937e+00   2.6003442e+00   2.6005344e+00   2.7044628e+00   1.4329832e+00   2.5008032e+00   4.4064393e+00   3.5021596e+00   4.3131639e+00   4.0016670e+00   4.2045223e+00   5.0200323e+00   2.9028411e+00   4.7130517e+00   4.2044861e+00   4.5172866e+00   3.5073617e+00   3.7038321e+00   3.9101623e+00   3.4039470e+00   3.5128093e+00   3.7092301e+00   3.9033549e+00   5.1227972e+00   5.3193887e+00   3.4029615e+00   4.1123595e+00   3.3040255e+00   5.1222477e+00   3.3043123e+00   4.1063328e+00   4.4139149e+00   3.2038047e+00   3.3025879e+00   4.0039164e+00   4.2170349e+00   4.5206139e+00   4.8441812e+00   4.0049702e+00   3.5022104e+00   4.0005674e+00   4.5418878e+00   4.0077003e+00   3.9024858e+00   3.2025221e+00   3.8146102e+00   4.0114630e+00   3.5261349e+00   3.5021596e+00   4.3081194e+00   4.1123594e+00   3.6158324e+00   3.4049076e+00   3.6064417e+00   3.8069566e+00   3.5014492e+00   8.0923926e-01   3.0474106e-01   6.5724028e-01   4.0246123e-01   5.6371422e-01   2.8500310e+00   2.6110761e+00   3.0277528e+00   2.1510447e+00   2.7141512e+00   2.6027937e+00   2.8070667e+00   1.5787180e+00   2.7167342e+00   2.0166118e+00   1.9318287e+00   2.3071806e+00   2.1715812e+00   2.8031215e+00   1.7145956e+00   2.5336675e+00   2.6035547e+00   2.2074446e+00   2.6319765e+00   2.0282636e+00   2.9076123e+00   2.1122780e+00   3.0080768e+00   2.8027924e+00   2.4144060e+00   2.5243969e+00   2.9241685e+00   3.1150226e+00   2.6049377e+00   1.6499744e+00   1.9530826e+00   1.8666228e+00   2.0130304e+00   3.2033374e+00   2.6035250e+00   2.6059866e+00   2.8207213e+00   2.5287261e+00   2.2032583e+00   2.1244184e+00   2.5066544e+00   2.7033232e+00   2.1157724e+00   1.6395342e+00   2.3072196e+00   2.3018945e+00   2.3035850e+00   2.4072314e+00   1.3788456e+00   2.2062031e+00   4.1150297e+00   3.2079717e+00   4.0170857e+00   3.7033716e+00   3.9093672e+00   4.7228082e+00   2.6158579e+00   4.4145659e+00   3.9067187e+00   4.2256164e+00   3.2143454e+00   3.4081069e+00   3.6159248e+00   3.1148584e+00   3.2358776e+00   3.4219579e+00   3.6052692e+00   4.8260874e+00   5.0225486e+00   3.1131211e+00   3.8201108e+00   3.0142352e+00   4.8248264e+00   3.0102191e+00   3.8104451e+00   4.1158426e+00   2.9100849e+00   3.0073054e+00   3.7087638e+00   3.9191144e+00   4.2237272e+00   4.5500784e+00   3.7114840e+00   3.2036323e+00   3.7015743e+00   4.2506870e+00   3.7186993e+00   3.6043148e+00   2.9081165e+00   3.5216379e+00   3.7226348e+00   3.2449757e+00   3.2079717e+00   4.0139105e+00   3.8249612e+00   3.3311289e+00   3.1134242e+00   3.3125194e+00   3.5179632e+00   3.2049019e+00   8.0051115e-01   2.2573593e-01   7.1621884e-01   3.0482299e-01   3.3530529e+00   3.1135637e+00   3.5317001e+00   2.6021962e+00   3.2157548e+00   3.1011640e+00   3.3083865e+00   1.9014069e+00   3.2199554e+00   2.5036852e+00   2.1054816e+00   2.8056557e+00   2.6057308e+00   3.3035252e+00   2.2049636e+00   3.0369970e+00   3.1023768e+00   2.7016498e+00   3.1076516e+00   2.5011992e+00   3.4059088e+00   2.6097152e+00   3.5056297e+00   3.3028597e+00   2.9166917e+00   3.0276459e+00   3.4273156e+00   3.6176286e+00   3.1043337e+00   2.1032882e+00   2.4011650e+00   2.3009622e+00   2.5032633e+00   3.7024058e+00   3.1022094e+00   3.1056121e+00   3.3243222e+00   3.0101957e+00   2.7018643e+00   2.6020065e+00   3.0005955e+00   3.2040843e+00   2.6027120e+00   1.9019962e+00   2.8015673e+00   2.8013346e+00   2.8018959e+00   2.9083044e+00   1.6052507e+00   2.7022567e+00   4.6121162e+00   3.7052383e+00   4.5194334e+00   4.2036849e+00   4.4088275e+00   5.2263180e+00   3.1053076e+00   4.9177739e+00   4.4072684e+00   4.7260117e+00   3.7138970e+00   3.9076272e+00   4.1167962e+00   3.6081590e+00   3.7238338e+00   3.9176170e+00   4.1063328e+00   5.3296625e+00   5.5255577e+00   3.6022190e+00   4.3201293e+00   3.5092121e+00   5.3285444e+00   3.5088064e+00   4.3111749e+00   4.6192198e+00   3.4084525e+00   3.5063337e+00   4.2079639e+00   4.4229098e+00   4.7273231e+00   5.0541259e+00   4.2099019e+00   3.7043105e+00   4.2011108e+00   4.7534769e+00   4.2147199e+00   4.1050714e+00   3.4064570e+00   4.0228303e+00   4.2200398e+00   3.7407842e+00   3.7052383e+00   4.5139612e+00   4.3214432e+00   3.8271242e+00   3.6094426e+00   3.8122429e+00   4.0138280e+00   3.7039439e+00   6.3178534e-01   2.0121983e-01   5.0043084e-01   3.1326249e+00   2.9095058e+00   3.3192760e+00   2.4306373e+00   3.0110559e+00   2.9028946e+00   3.1074604e+00   1.7872757e+00   3.0111989e+00   2.3143923e+00   2.0898615e+00   2.6089349e+00   2.4398792e+00   3.1033306e+00   2.0139907e+00   2.8220753e+00   2.9050462e+00   2.5045154e+00   2.9220490e+00   2.3159976e+00   3.2100379e+00   2.4095513e+00   3.3067017e+00   3.1022615e+00   2.7099669e+00   2.8165723e+00   3.2163877e+00   3.4125151e+00   2.9058641e+00   1.9245956e+00   2.2287984e+00   2.1344529e+00   2.3089795e+00   3.5039202e+00   2.9050287e+00   2.9078401e+00   3.1149135e+00   2.8183214e+00   2.5042875e+00   2.4160514e+00   2.8047612e+00   3.0036601e+00   2.4102273e+00   1.8223604e+00   2.6061038e+00   2.6023240e+00   2.6040822e+00   2.7058598e+00   1.5384093e+00   2.5058827e+00   4.4178532e+00   3.5098740e+00   4.3151587e+00   4.0041429e+00   4.2110099e+00   5.0184788e+00   2.9154880e+00   4.7114737e+00   4.2061525e+00   4.5248210e+00   3.5155767e+00   3.7089995e+00   3.9157422e+00   3.4167041e+00   3.5401921e+00   3.7249704e+00   3.9056466e+00   5.1213055e+00   5.3189433e+00   3.4098171e+00   4.1203252e+00   3.3172667e+00   5.1196436e+00   3.3110366e+00   4.1111102e+00   4.4124656e+00   3.2115770e+00   3.3091938e+00   4.0103680e+00   4.2141675e+00   4.5185015e+00   4.8383751e+00   4.0135080e+00   3.5035589e+00   4.0015001e+00   4.5406852e+00   4.0218666e+00   3.9049967e+00   3.2103515e+00   3.8201314e+00   4.0245707e+00   3.5427188e+00   3.5098740e+00   4.3149009e+00   4.1273075e+00   3.6322643e+00   3.4139979e+00   3.6137088e+00   3.8212216e+00   3.5066417e+00   7.1621748e-01   4.0002221e-01   3.3858048e+00   3.1257747e+00   3.5528332e+00   2.6049377e+00   3.2291581e+00   3.1026235e+00   3.3158954e+00   1.9043238e+00   3.2362541e+00   2.5062158e+00   2.1151984e+00   2.8111676e+00   2.6144115e+00   3.3074459e+00   2.2106051e+00   3.0644792e+00   3.1042002e+00   2.7046286e+00   3.1155579e+00   2.5035275e+00   3.4095576e+00   2.6210983e+00   3.5110555e+00   3.3064076e+00   2.9323802e+00   3.0497665e+00   3.4467650e+00   3.6304824e+00   3.1087162e+00   2.1099919e+00   2.4034951e+00   2.3034398e+00   2.5081992e+00   3.7045399e+00   3.1036554e+00   3.1105454e+00   3.3425812e+00   3.0208515e+00   2.7039990e+00   2.6042124e+00   3.0014077e+00   3.2086215e+00   2.6068616e+00   1.9064532e+00   2.8033825e+00   2.8033607e+00   2.8042643e+00   2.9174390e+00   1.6121856e+00   2.7050791e+00   4.6165570e+00   3.7079065e+00   4.5303834e+00   4.2064764e+00   4.4135512e+00   5.2388103e+00   3.1079784e+00   4.9275471e+00   4.4124760e+00   4.7382279e+00   3.7227931e+00   3.9129335e+00   4.1268500e+00   3.6117351e+00   3.7315577e+00   3.9257254e+00   4.1111068e+00   5.3430927e+00   5.5370582e+00   3.6046347e+00   4.3307527e+00   3.5130597e+00   5.3416790e+00   3.5154388e+00   4.3179254e+00   4.6302391e+00   3.4146546e+00   3.5107904e+00   4.2125004e+00   4.4361489e+00   4.7415152e+00   5.0768435e+00   4.2149814e+00   3.7084460e+00   4.2023583e+00   4.7769945e+00   4.2205945e+00   4.1089343e+00   3.4107328e+00   4.0362383e+00   4.2295174e+00   3.7618281e+00   3.7079065e+00   4.5213131e+00   4.3307527e+00   3.8409517e+00   3.6158715e+00   3.8200965e+00   4.0195882e+00   3.7063858e+00   4.1210927e-01   3.2157661e+00   3.0055754e+00   3.4095823e+00   2.5182899e+00   3.1060146e+00   3.0020171e+00   3.2051958e+00   1.8468437e+00   3.1049591e+00   2.4099079e+00   2.1180305e+00   2.7069308e+00   2.5226256e+00   3.2022186e+00   2.1097489e+00   2.9102819e+00   3.0041469e+00   2.6022650e+00   3.0136614e+00   2.4087525e+00   3.3085287e+00   2.5053901e+00   3.4040883e+00   3.2011770e+00   2.8045980e+00   2.9078384e+00   3.3077457e+00   3.5074140e+00   3.0043867e+00   2.0123013e+00   2.3159429e+00   2.2186309e+00   2.4051787e+00   3.6030023e+00   3.0041461e+00   3.0063027e+00   3.2075252e+00   2.9100849e+00   2.6032671e+00   2.5097006e+00   2.9028412e+00   3.1024718e+00   2.5058120e+00   1.8685011e+00   2.7040002e+00   2.7016556e+00   2.7029487e+00   2.8031364e+00   1.5747356e+00   2.6040007e+00   4.5158117e+00   3.6083256e+00   4.4100325e+00   4.1032589e+00   4.3091726e+00   5.1114855e+00   3.0117223e+00   4.8065772e+00   4.3039464e+00   4.6187506e+00   3.6121095e+00   3.8069169e+00   4.0114753e+00   3.5138377e+00   3.6350529e+00   3.8212252e+00   4.0040508e+00   5.2135438e+00   5.4123528e+00   3.5063866e+00   4.2155461e+00   3.4147661e+00   5.2119900e+00   3.4083227e+00   4.2084707e+00   4.5071259e+00   3.3090897e+00   3.4075560e+00   4.1085724e+00   4.3075896e+00   4.6108642e+00   4.9236635e+00   4.1113689e+00   3.6022523e+00   4.1009647e+00   4.6262707e+00   4.1190929e+00   4.0037820e+00   3.3086298e+00   3.9141023e+00   4.1202674e+00   3.6321858e+00   3.6083256e+00   4.4117993e+00   4.2229640e+00   3.7257625e+00   3.5107118e+00   3.7106642e+00   3.9184747e+00   3.6056703e+00   3.3320214e+00   3.1087156e+00   3.5191298e+00   2.6048201e+00   3.2097208e+00   3.1014199e+00   3.3064692e+00   1.9064149e+00   3.2109426e+00   2.5061784e+00   2.1231492e+00   2.8062729e+00   2.6052659e+00   3.3025806e+00   2.2069764e+00   3.0213628e+00   3.1034889e+00   2.7008785e+00   3.1069083e+00   2.5018386e+00   3.4076243e+00   2.6061038e+00   3.5039680e+00   3.3015400e+00   2.9090661e+00   3.0158419e+00   3.4158824e+00   3.6117739e+00   3.1042038e+00   2.1025721e+00   2.4028385e+00   2.3026344e+00   2.5028039e+00   3.7026090e+00   3.1034538e+00   3.1060427e+00   3.3145497e+00   3.0064351e+00   2.7026184e+00   2.6035547e+00   3.0010106e+00   3.2029877e+00   2.6024546e+00   1.9099608e+00   2.8022622e+00   2.8013859e+00   2.8022964e+00   2.9047883e+00   1.6144390e+00   2.7027522e+00   4.6146429e+00   3.7070840e+00   4.5144445e+00   4.2034836e+00   4.4092638e+00   5.2185417e+00   3.1080879e+00   4.9117250e+00   4.4052231e+00   4.7224998e+00   3.7130492e+00   3.9071930e+00   4.1140176e+00   3.6112039e+00   3.7307535e+00   3.9200662e+00   4.1050716e+00   5.3212806e+00   5.5187164e+00   3.6026912e+00   4.3179254e+00   3.5126721e+00   5.3198415e+00   3.5083463e+00   4.3098506e+00   4.6126508e+00   3.4087523e+00   3.5071392e+00   4.2084730e+00   4.4144909e+00   4.7184838e+00   5.0381916e+00   4.2109654e+00   3.7029588e+00   4.2008334e+00   4.7393168e+00   4.2176488e+00   4.1043916e+00   3.4078439e+00   4.0181863e+00   4.2205945e+00   3.7363783e+00   3.7070840e+00   4.5130589e+00   4.3227927e+00   3.8267268e+00   3.6097220e+00   3.8114954e+00   4.0168944e+00   3.7050916e+00   6.0017982e-01   2.0181667e-01   1.5160570e+00   5.2133802e-01   1.3002451e+00   7.0008584e-01   2.1344529e+00   4.1212852e-01   1.8029854e+00   2.0342311e+00   1.1019599e+00   1.1393620e+00   9.0026497e-01   1.4543172e+00   3.3813251e-01   1.4000061e+00   1.2053003e+00   1.0426638e+00   1.4134492e+00   1.1005364e+00   9.3424697e-01   7.8890806e-01   9.0142636e-01   6.1119558e-01   4.1315633e-01   4.0125062e-01   3.6452132e-01   1.0001753e+00   1.4158897e+00   1.5195166e+00   1.5300146e+00   1.2201636e+00   1.0039209e+00   1.6000032e+00   1.0000457e+00   3.0017653e-01   9.3329055e-01   1.4017696e+00   1.5061610e+00   1.5012947e+00   9.0002570e-01   1.2124837e+00   2.0445123e+00   1.4012283e+00   1.3008855e+00   1.3009222e+00   8.0291749e-01   2.0440118e+00   1.3027556e+00   1.3788457e+00   1.2029161e+00   1.2089253e+00   9.3446811e-01   1.1298636e+00   1.9014076e+00   2.1006232e+00   1.6001224e+00   1.1139906e+00   1.4544336e+00   6.3912709e-01   7.1183012e-01   8.5409862e-01   1.3086191e+00   1.2638465e+00   9.2745734e-01   8.1117067e-01   2.0027889e+00   2.2028146e+00   1.1270327e+00   1.0776190e+00   1.4019372e+00   2.0011307e+00   7.2044167e-01   1.0208709e+00   1.3002450e+00   8.0488008e-01   9.0145141e-01   9.4622126e-01   1.1000289e+00   1.4009513e+00   1.7086186e+00   9.7694377e-01   7.0911112e-01   1.0224133e+00   1.4220925e+00   1.0923537e+00   8.2631334e-01   1.0008620e+00   7.8886139e-01   1.0777307e+00   9.0140221e-01   1.2029161e+00   1.2362755e+00   1.1897289e+00   9.0534502e-01   7.9871893e-01   6.5724028e-01   9.8998705e-01   1.1010807e+00   5.2133179e-01   1.0171340e+00   4.0004442e-01   7.0470720e-01   2.0181667e-01   1.5698091e+00   3.0922892e-01   1.2049541e+00   1.5104875e+00   5.0476836e-01   1.0069214e+00   3.4085233e-01   9.6593231e-01   3.0026460e-01   8.0004443e-01   6.6334810e-01   1.0000152e+00   8.7372177e-01   5.0855077e-01   5.2524663e-01   7.0462697e-01   4.2362917e-01   3.0915245e-01   2.2608083e-01   4.5784410e-01   5.0517282e-01   4.1209001e-01   1.0313359e+00   9.9085945e-01   1.0179856e+00   6.9600743e-01   6.3912943e-01   1.0000152e+00   4.0125062e-01   3.0482299e-01   9.0002615e-01   8.0254500e-01   9.3735629e-01   9.1446938e-01   3.0490481e-01   6.9600743e-01   1.4994060e+00   8.0928056e-01   7.0184453e-01   7.0184453e-01   3.1328089e-01   1.5989637e+00   7.0918894e-01   1.5237054e+00   6.9987517e-01   1.4060443e+00   1.1002025e+00   1.3061181e+00   2.1141220e+00   1.5030978e+00   1.8055480e+00   1.3062025e+00   1.6223413e+00   6.3164977e-01   8.1112909e-01   1.0095513e+00   8.0713433e-01   9.2867113e-01   9.0155393e-01   1.0001753e+00   2.2182690e+00   2.4124980e+00   1.0039060e+00   1.2201577e+00   8.1343016e-01   2.2176846e+00   5.2491734e-01   1.2037520e+00   1.5066999e+00   4.2362917e-01   4.2362917e-01   1.1060937e+00   1.3130978e+00   1.6177611e+00   1.9760242e+00   1.1138953e+00   6.0948506e-01   1.1056693e+00   1.6779798e+00   1.1527746e+00   1.0001601e+00   4.2362917e-01   9.1892454e-01   1.1528477e+00   8.3183672e-01   6.9987517e-01   1.4094144e+00   1.2633467e+00   8.5440680e-01   7.2036951e-01   7.1629303e-01   9.6576136e-01   6.3322667e-01   1.4267554e+00   4.2268438e-01   1.2004262e+00   6.0035621e-01   2.0860325e+00   3.4342562e-01   1.7133162e+00   1.9641993e+00   1.0207260e+00   1.0897469e+00   8.0008964e-01   1.4650300e+00   5.0043084e-01   1.3002407e+00   1.1303267e+00   9.3424659e-01   1.3473688e+00   1.0001604e+00   9.6593231e-01   6.7616545e-01   8.0097499e-01   6.3192325e-01   5.0437695e-01   3.0026460e-01   2.2608083e-01   9.0142636e-01   1.4861824e+00   1.4580174e+00   1.4889602e+00   1.1900969e+00   9.0142681e-01   1.5001212e+00   9.0166476e-01   2.2538848e-01   8.3187290e-01   1.3130978e+00   1.4197078e+00   1.4012600e+00   8.0046764e-01   1.1544060e+00   2.0075255e+00   1.3063533e+00   1.2089835e+00   1.2089313e+00   7.4275547e-01   2.0887699e+00   1.2190319e+00   1.1935069e+00   1.1010807e+00   1.0087396e+00   7.4335736e-01   9.3424697e-01   1.7023957e+00   2.0003507e+00   1.4002035e+00   9.1449234e-01   1.2643523e+00   5.2167829e-01   5.5450500e-01   6.7616902e-01   1.2049541e+00   1.1528553e+00   8.1112984e-01   6.1119558e-01   1.8053679e+00   2.0034894e+00   1.0142484e+00   9.0155438e-01   1.3009222e+00   1.8029948e+00   6.1119267e-01   8.2425704e-01   1.1002025e+00   7.0176271e-01   8.0046685e-01   7.5564478e-01   9.0026588e-01   1.2017042e+00   1.5269837e+00   7.9871893e-01   6.0201716e-01   8.6051471e-01   1.2366099e+00   9.4532171e-01   6.3309012e-01   9.0026588e-01   6.3164729e-01   9.3308853e-01   8.0004443e-01   1.1010807e+00   1.0426516e+00   1.0426760e+00   8.0051115e-01   6.8161057e-01   5.2491734e-01   8.6084272e-01   1.0001753e+00   1.0116865e+00   5.6370994e-01   1.0599087e+00   7.4329527e-01   1.1110092e+00   4.1212852e-01   5.6838732e-01   7.0478886e-01   5.0436965e-01   7.7598796e-01   6.0948506e-01   1.2192920e+00   7.1629303e-01   4.2270142e-01   7.1629303e-01   2.2608083e-01   9.7033357e-01   6.3164729e-01   9.6576136e-01   7.5503094e-01   9.1446896e-01   1.1138955e+00   1.3139296e+00   1.2705641e+00   6.5724028e-01   5.0894102e-01   2.2573593e-01   3.3813251e-01   4.1212852e-01   1.1025819e+00   7.1629303e-01   1.1039833e+00   1.2272550e+00   8.0245746e-01   7.0000303e-01   2.0000000e-01   4.1210927e-01   7.7598796e-01   3.3813251e-01   7.1700774e-01   4.0125062e-01   7.0017011e-01   6.0035305e-01   7.4329414e-01   1.0008768e+00   5.0043084e-01   2.0249458e+00   1.1061923e+00   2.0081838e+00   1.6061519e+00   1.8167511e+00   2.7171724e+00   6.3925756e-01   2.3875202e+00   1.8286180e+00   2.2239101e+00   1.2353587e+00   1.3278587e+00   1.6038059e+00   1.0207533e+00   1.2407946e+00   1.3868868e+00   1.5269837e+00   2.8396169e+00   2.9941208e+00   1.0030871e+00   1.8005082e+00   9.3733589e-01   2.8269381e+00   9.7033357e-01   1.7520952e+00   2.1193712e+00   8.6676847e-01   9.4912864e-01   1.6147493e+00   1.9768316e+00   2.2674825e+00   2.7199050e+00   1.6193612e+00   1.1298636e+00   1.6009488e+00   2.4288142e+00   1.6617386e+00   1.5199103e+00   8.6676847e-01   1.5881447e+00   1.6785116e+00   1.4886759e+00   1.1061923e+00   1.9457481e+00   1.7813291e+00   1.3946348e+00   1.0498347e+00   1.2771155e+00   1.4848797e+00   1.1157320e+00   8.0004523e-01   5.0043842e-01   1.6743483e+00   2.0121983e-01   1.3061139e+00   1.5464046e+00   6.0964597e-01   7.1183012e-01   4.0006662e-01   1.0776296e+00   3.0922892e-01   9.0002570e-01   7.3084171e-01   6.0184622e-01   9.3446811e-01   6.1135434e-01   6.0964597e-01   3.4080442e-01   4.1210927e-01   3.0490481e-01   2.2608083e-01   3.0482299e-01   4.0363334e-01   5.0001522e-01   1.1298636e+00   1.0440350e+00   1.0803561e+00   7.8935898e-01   5.6347978e-01   1.1000098e+00   6.3165225e-01   3.0482299e-01   5.0126466e-01   9.0511169e-01   1.0088926e+00   1.0001903e+00   4.0125062e-01   7.4335736e-01   1.5972311e+00   9.0142681e-01   8.0296037e-01   8.0250202e-01   3.4085233e-01   1.7081446e+00   8.0883841e-01   1.4329858e+00   7.2036951e-01   1.3050153e+00   1.0001753e+00   1.2089252e+00   2.0109333e+00   1.6000184e+00   1.7036944e+00   1.2001396e+00   1.5327217e+00   5.7608844e-01   7.0462844e-01   9.1449234e-01   8.1156529e-01   9.3733552e-01   8.5583415e-01   9.0029018e-01   2.1191883e+00   2.3098756e+00   6.3912709e-01   1.1290757e+00   9.0532049e-01   2.1139617e+00   3.4085233e-01   1.1074834e+00   1.4044980e+00   3.4080442e-01   4.2362917e-01   1.0087250e+00   1.2089253e+00   1.5132032e+00   1.8748226e+00   1.0207260e+00   5.0042326e-01   1.0008620e+00   1.5694554e+00   1.0837679e+00   9.0053003e-01   5.0517282e-01   8.2671175e-01   1.0777411e+00   8.1156529e-01   7.2036951e-01   1.3133662e+00   1.1910068e+00   8.2425704e-01   4.5847767e-01   6.3178534e-01   9.1590889e-01   6.3322667e-01   6.3322667e-01   1.2193537e+00   9.0000091e-01   6.3165225e-01   1.0599087e+00   3.1328089e-01   6.3451734e-01   4.0127250e-01   9.0000091e-01   1.0001604e+00   2.2573593e-01   4.1212852e-01   6.3178534e-01   6.0202028e-01   5.2524663e-01   5.2132556e-01   6.1135434e-01   4.0125062e-01   7.0008584e-01   9.0002615e-01   1.1001014e+00   1.0039209e+00   3.0482299e-01   1.0001751e+00   7.0478886e-01   8.0296037e-01   6.0000952e-01   6.0366256e-01   3.0915245e-01   6.0365948e-01   1.0001903e+00   6.3164977e-01   4.0125062e-01   5.0476836e-01   2.2608083e-01   4.0127250e-01   5.0043842e-01   1.2102248e+00   3.0017653e-01   3.0482299e-01   3.0008832e-01   5.0043084e-01   1.5012947e+00   4.0000000e-01   1.5653766e+00   6.7616902e-01   1.5829749e+00   1.1074742e+00   1.3373141e+00   2.2681751e+00   8.0291671e-01   1.9315820e+00   1.3458100e+00   1.7862938e+00   8.7372177e-01   8.7209348e-01   1.2093969e+00   7.1700774e-01   1.1055705e+00   1.0604287e+00   1.0451812e+00   2.3834499e+00   2.5286011e+00   6.3322667e-01   1.3903623e+00   7.0462697e-01   2.3796582e+00   6.3912943e-01   1.2792049e+00   1.6907308e+00   5.6595488e-01   5.3943256e-01   1.1400339e+00   1.5965952e+00   1.8639835e+00   2.3424496e+00   1.1635325e+00   6.7626502e-01   1.1005460e+00   2.0903382e+00   1.2459141e+00   1.0236548e+00   5.0894102e-01   1.2528590e+00   1.2949162e+00   1.2662318e+00   6.7616902e-01   1.4817248e+00   1.3908238e+00   1.1389163e+00   6.9600743e-01   8.9540816e-01   1.0858512e+00   6.3192325e-01   1.5890088e+00   4.2270142e-01   1.1330776e+00   1.5368468e+00   5.2491734e-01   1.1187430e+00   4.0243965e-01   1.1139906e+00   4.1420960e-01   7.0096858e-01   7.3895268e-01   1.1000100e+00   9.3861512e-01   4.0127250e-01   7.1708289e-01   8.0004523e-01   5.2167208e-01   4.5784410e-01   3.6452132e-01   5.6371422e-01   4.2270142e-01   4.1317535e-01   1.2159868e+00   1.0567817e+00   1.1138092e+00   8.3387677e-01   6.1119267e-01   9.0029064e-01   3.0482299e-01   4.0125062e-01   1.0003196e+00   7.4395693e-01   9.3459651e-01   8.5617086e-01   3.0922892e-01   8.0073117e-01   1.5493206e+00   7.5564478e-01   6.4049114e-01   6.4049114e-01   4.5784410e-01   1.7404389e+00   6.9600743e-01   1.3253457e+00   6.4049114e-01   1.2202193e+00   9.0142636e-01   1.1056785e+00   1.9348400e+00   1.4092540e+00   1.6176783e+00   1.1286101e+00   1.4350761e+00   4.5148429e-01   6.7720957e-01   8.1757693e-01   8.2671175e-01   8.1937731e-01   7.4263078e-01   8.0055465e-01   2.0418418e+00   2.2277117e+00   1.1002025e+00   1.0286508e+00   7.2044167e-01   2.0415798e+00   6.0035305e-01   1.0039060e+00   1.3253497e+00   5.0043842e-01   3.1328089e-01   9.0999313e-01   1.1528476e+00   1.4548293e+00   1.8637334e+00   9.1892454e-01   5.2133179e-01   9.3310976e-01   1.5801693e+00   9.6572569e-01   8.0008964e-01   3.4085233e-01   7.5508853e-01   9.6674360e-01   7.4618926e-01   6.4049114e-01   1.2101609e+00   1.0782105e+00   7.2113820e-01   8.0093081e-01   5.2524663e-01   7.8886139e-01   4.5847767e-01   1.7572657e+00   6.1288055e-01   4.0125062e-01   1.0858512e+00   1.1133984e+00   1.4858469e+00   7.1779518e-01   1.8187119e+00   1.2137020e+00   9.6591433e-01   1.4146346e+00   7.4263078e-01   1.5359852e+00   1.2093243e+00   1.7083042e+00   1.4853863e+00   1.5241361e+00   1.7234436e+00   1.9756319e+00   1.9771636e+00   1.3035495e+00   8.0008884e-01   6.3164977e-01   6.0948212e-01   9.1449234e-01   1.8179429e+00   1.2062153e+00   1.3486924e+00   1.8673780e+00   1.4543172e+00   8.7240114e-01   7.4329527e-01   1.1055892e+00   1.4158897e+00   9.3310976e-01   1.1269424e-01   9.3351278e-01   9.7600992e-01   9.6953662e-01   1.3458100e+00   3.0490481e-01   9.0320459e-01   2.7259033e+00   1.8109877e+00   2.7506971e+00   2.3226569e+00   2.5372438e+00   3.4590995e+00   1.2089192e+00   3.1281646e+00   2.5610212e+00   2.9500632e+00   1.9406671e+00   2.0633570e+00   2.3443688e+00   1.7168003e+00   1.8708330e+00   2.0857354e+00   2.2573821e+00   3.5725062e+00   3.7336869e+00   1.7229558e+00   2.5362836e+00   1.6198349e+00   3.5690915e+00   1.7116793e+00   2.4776152e+00   2.8599555e+00   1.6016303e+00   1.6534235e+00   2.3372717e+00   2.7159844e+00   3.0094888e+00   3.4430661e+00   2.3406025e+00   1.8663319e+00   2.3090404e+00   3.1586433e+00   2.3454995e+00   2.2408459e+00   1.5471213e+00   2.3192230e+00   2.4059451e+00   2.1751377e+00   1.8109877e+00   2.6757243e+00   2.4966678e+00   2.1111734e+00   1.7901165e+00   2.0121175e+00   2.1471399e+00   1.8133657e+00   1.4043036e+00   1.6388784e+00   7.0470867e-01   7.7652636e-01   5.0001522e-01   1.1269424e+00   2.2608083e-01   1.0000158e+00   8.0928056e-01   7.0470867e-01   1.0215068e+00   7.1708289e-01   6.3164977e-01   4.2362917e-01   5.0002283e-01   3.0474106e-01   2.0121983e-01   2.2608083e-01   4.5080200e-01   6.0017982e-01   1.1529284e+00   1.1298636e+00   1.1544060e+00   8.5406674e-01   6.3322667e-01   1.2000066e+00   6.3309258e-01   2.2608083e-01   6.0201716e-01   1.0030721e+00   1.1060937e+00   1.1001110e+00   5.0001522e-01   8.2458478e-01   1.6747799e+00   1.0008617e+00   9.0140221e-01   9.0140131e-01   4.1209001e-01   1.7511598e+00   9.0506299e-01   1.4854079e+00   8.3187290e-01   1.3139296e+00   1.0032293e+00   1.2362702e+00   2.0078120e+00   1.7001329e+00   1.7019430e+00   1.2016381e+00   1.5675442e+00   7.1700909e-01   7.4275547e-01   9.6574369e-01   9.3541878e-01   1.1298552e+00   1.0208844e+00   9.0506343e-01   2.1136134e+00   2.3085898e+00   7.4618926e-01   1.1897982e+00   1.0208709e+00   2.1090362e+00   5.0894102e-01   1.1286018e+00   1.4024091e+00   5.2167829e-01   5.6595908e-01   1.0426638e+00   1.2037520e+00   1.5079206e+00   1.8503663e+00   1.0776296e+00   5.0477564e-01   1.0032296e+00   1.5611241e+00   1.1910693e+00   9.0511169e-01   6.3178782e-01   9.0184172e-01   1.1896660e+00   1.0032443e+00   8.3187290e-01   1.3450688e+00   1.3020492e+00   1.0087252e+00   6.1990228e-01   7.4263078e-01   1.0458540e+00   7.3084171e-01   7.0918894e-01   7.0176271e-01   8.1112984e-01   9.6574336e-01   4.1317535e-01   1.5005626e+00   6.1119558e-01   6.0964597e-01   1.0116724e+00   4.1315633e-01   9.3848935e-01   9.0000136e-01   1.1896660e+00   9.6574369e-01   1.2003597e+00   1.4006465e+00   1.6096629e+00   1.5401713e+00   8.2421923e-01   5.3914287e-01   3.6259865e-01   4.2362917e-01   6.0017665e-01   1.2189701e+00   6.0202028e-01   8.7212232e-01   1.5067961e+00   1.1024820e+00   4.1317535e-01   3.0490481e-01   5.0477564e-01   9.3329017e-01   6.0018299e-01   6.1845783e-01   4.1210927e-01   5.0894102e-01   5.0477564e-01   1.0008620e+00   9.0029064e-01   5.0043842e-01   2.1169442e+00   1.2049539e+00   2.2014913e+00   1.7227908e+00   1.9366943e+00   2.8973091e+00   6.0383105e-01   2.5621105e+00   1.9756002e+00   2.3854144e+00   1.4163126e+00   1.4857201e+00   1.8044011e+00   1.1074834e+00   1.2661803e+00   1.4994060e+00   1.6741010e+00   3.0107625e+00   3.1586112e+00   1.1298552e+00   1.9796588e+00   1.0095370e+00   3.0090568e+00   1.1900276e+00   1.8963668e+00   2.3135911e+00   1.0782107e+00   1.0783219e+00   1.7384347e+00   2.2009981e+00   2.4793129e+00   2.9421182e+00   1.7402224e+00   1.3018103e+00   1.7072540e+00   2.6745468e+00   1.7367211e+00   1.6485141e+00   9.6691372e-01   1.8209763e+00   1.8293641e+00   1.7435092e+00   1.2049539e+00   2.0881329e+00   1.9090398e+00   1.6063540e+00   1.2407432e+00   1.4664722e+00   1.5386307e+00   1.2093243e+00   1.0943600e+00   1.0030868e+00   1.3272142e+00   9.1446938e-01   1.7295996e+00   1.1336109e+00   8.7209296e-01   1.2643054e+00   6.3912943e-01   1.4396100e+00   1.1299441e+00   1.5271597e+00   1.3148232e+00   1.4267817e+00   1.6268514e+00   1.8468437e+00   1.8305154e+00   1.1759988e+00   7.4262850e-01   5.2491734e-01   5.2167208e-01   8.5586571e-01   1.6206300e+00   1.1291536e+00   1.4631182e+00   1.7576822e+00   1.3254284e+00   1.0172489e+00   6.0605366e-01   9.1894698e-01   1.2951152e+00   8.3187290e-01   3.0474106e-01   8.1500329e-01   1.0396215e+00   9.6150595e-01   1.2528590e+00   5.6347978e-01   8.7240114e-01   2.5401214e+00   1.6166178e+00   2.5672376e+00   2.1256636e+00   2.3434890e+00   3.2706021e+00   1.0235120e+00   2.9379053e+00   2.3650373e+00   2.7819820e+00   1.7939428e+00   1.8718670e+00   2.1669217e+00   1.5269837e+00   1.7152309e+00   1.9257519e+00   2.0672316e+00   3.3962101e+00   3.5413320e+00   1.5241169e+00   2.3604685e+00   1.4422764e+00   3.3805191e+00   1.5352907e+00   2.2985560e+00   2.6785349e+00   1.4314424e+00   1.4886759e+00   2.1422340e+00   2.5406863e+00   2.8290490e+00   3.2861997e+00   2.1472832e+00   1.6782365e+00   2.1087015e+00   2.9939447e+00   2.1830772e+00   2.0525906e+00   1.3935744e+00   2.1564350e+00   2.2287876e+00   2.0426476e+00   1.6166178e+00   2.4889554e+00   2.3256006e+00   1.9561612e+00   1.6066555e+00   1.8386981e+00   2.0009986e+00   1.6313110e+00   8.0883916e-01   5.0043842e-01   6.0202028e-01   8.0004602e-01   3.3808272e-01   5.0437695e-01   8.0093081e-01   5.2838320e-01   6.0201716e-01   2.5399984e-01   7.2036819e-01   5.0517282e-01   5.0043842e-01   7.0008584e-01   9.1424701e-01   9.0157896e-01   3.0017653e-01   7.2044167e-01   6.2656178e-01   6.6334810e-01   3.6259865e-01   9.0026588e-01   5.0436235e-01   4.1212852e-01   8.0879701e-01   7.0478886e-01   3.0482299e-01   5.2201750e-01   4.5847767e-01   4.0125062e-01   4.1317535e-01   1.0363096e+00   3.4080442e-01   3.0474106e-01   2.2573593e-01   3.0490481e-01   1.2204839e+00   2.4195741e-01   1.8101835e+00   9.0166476e-01   1.7375279e+00   1.4002005e+00   1.6032003e+00   2.4532171e+00   1.0032443e+00   2.1331916e+00   1.6052507e+00   1.9422649e+00   9.1894698e-01   1.1025819e+00   1.3276411e+00   8.1719606e-01   1.0142626e+00   1.1298636e+00   1.3025621e+00   2.5612597e+00   2.7430487e+00   9.0155438e-01   1.5299064e+00   7.1708289e-01   2.5605373e+00   7.0633229e-01   1.5079428e+00   1.8443132e+00   6.0383105e-01   7.0096708e-01   1.4023806e+00   1.6740160e+00   1.9756002e+00   2.3801033e+00   1.4049093e+00   9.0142636e-01   1.4001717e+00   2.0898615e+00   1.4183606e+00   1.3009222e+00   6.0184622e-01   1.2661803e+00   1.4267527e+00   1.1084368e+00   9.0166476e-01   1.7108631e+00   1.5299252e+00   1.0782751e+00   8.1343016e-01   1.0116721e+00   1.2193537e+00   9.0026497e-01   7.9148746e-01   7.0993998e-01   9.3541878e-01   8.1937731e-01   5.0043084e-01   5.6370994e-01   4.1212852e-01   1.0782753e+00   6.0184622e-01   9.0557807e-01   7.4269314e-01   7.0633229e-01   8.2841920e-01   9.1695534e-01   1.0756891e+00   7.3084048e-01   5.2491131e-01   5.0085236e-01   5.0476836e-01   5.0085236e-01   1.1074740e+00   8.3916809e-01   1.2049539e+00   9.6501813e-01   4.2270142e-01   8.0291749e-01   5.0855077e-01   5.3943256e-01   8.2631334e-01   4.0243965e-01   1.0207260e+00   5.2524663e-01   8.0055465e-01   7.0184453e-01   7.0184453e-01   1.0777307e+00   6.0366256e-01   2.0696799e+00   1.1543334e+00   1.9286352e+00   1.6071727e+00   1.8312163e+00   2.6295459e+00   1.1153247e+00   2.3155068e+00   1.8040969e+00   2.1901871e+00   1.2562955e+00   1.3263639e+00   1.5518083e+00   1.1271226e+00   1.4557657e+00   1.4915559e+00   1.5136393e+00   2.7555974e+00   2.9267466e+00   1.0030718e+00   1.7744103e+00   1.0843333e+00   2.7324083e+00   9.6953662e-01   1.7456060e+00   2.0249458e+00   9.1568820e-01   1.0151258e+00   1.6309461e+00   1.8315348e+00   2.1358791e+00   2.5304748e+00   1.6492450e+00   1.1075720e+00   1.6001777e+00   2.2141198e+00   1.7441970e+00   1.5196090e+00   9.6683480e-01   1.4838225e+00   1.7167914e+00   1.4122282e+00   1.1543334e+00   1.9438463e+00   1.8374400e+00   1.4268530e+00   1.0778421e+00   1.2792049e+00   1.5857302e+00   1.1532421e+00   1.1019503e+00   6.0201716e-01   5.0043842e-01   6.1135434e-01   7.0008735e-01   8.1156529e-01   4.1317535e-01   7.0000303e-01   4.0246123e-01   2.0061436e-01   4.1210927e-01   5.0436965e-01   7.0000303e-01   6.0366256e-01   2.0121983e-01   1.2007726e+00   9.1916394e-01   1.0124729e+00   8.0055465e-01   4.0246123e-01   7.0008735e-01   5.0085236e-01   6.0017982e-01   6.0202028e-01   6.3165225e-01   7.4612830e-01   6.0383105e-01   1.1269424e-01   7.0184453e-01   1.4559030e+00   5.6371422e-01   5.2167829e-01   5.2133179e-01   4.0004442e-01   1.7133283e+00   6.0948800e-01   1.3743342e+00   5.2524663e-01   1.2702954e+00   9.0142636e-01   1.1286018e+00   1.9763960e+00   1.2004262e+00   1.6484371e+00   1.1066159e+00   1.5033966e+00   6.1990228e-01   6.3322667e-01   8.9538275e-01   6.1990228e-01   1.0010060e+00   9.1471442e-01   8.0488008e-01   2.0894199e+00   2.2581358e+00   7.0088627e-01   1.1085342e+00   6.3178782e-01   2.0855639e+00   4.0363334e-01   1.0293900e+00   1.3743657e+00   4.0006662e-01   4.0125062e-01   9.3329055e-01   1.2396422e+00   1.5267540e+00   1.9798779e+00   9.6591465e-01   4.0127250e-01   9.0026497e-01   1.7151603e+00   1.0797805e+00   8.0296037e-01   4.0006662e-01   8.9540816e-01   1.0837679e+00   9.6674360e-01   5.2524663e-01   1.2440789e+00   1.1938630e+00   9.1892454e-01   5.2524663e-01   6.3912943e-01   9.3733589e-01   4.5148429e-01   1.1281352e+00   9.0002570e-01   5.0517282e-01   9.4513210e-01   4.1315633e-01   1.2014191e+00   5.2133179e-01   1.3063533e+00   1.1019505e+00   8.5403370e-01   1.0426516e+00   1.3523310e+00   1.4544312e+00   9.0142636e-01   3.3818226e-01   5.0085236e-01   5.0437695e-01   3.0922892e-01   1.5001461e+00   9.0005094e-01   9.0668287e-01   1.2396475e+00   8.7209296e-01   5.0000761e-01   4.5078948e-01   8.0046764e-01   1.0030724e+00   4.1317535e-01   6.7824250e-01   6.0017665e-01   6.0000952e-01   6.0000317e-01   7.4262850e-01   6.3925756e-01   5.0001522e-01   2.4077059e+00   1.5012741e+00   2.3329496e+00   2.0008921e+00   2.2042323e+00   3.0476353e+00   9.3541878e-01   2.7309758e+00   2.2068463e+00   2.5373913e+00   1.5160787e+00   1.7043730e+00   1.9242109e+00   1.4044668e+00   1.5401330e+00   1.7168122e+00   1.9044144e+00   3.1543192e+00   3.3406961e+00   1.4044697e+00   2.1265131e+00   1.3061138e+00   3.1536525e+00   1.3069754e+00   2.1097680e+00   2.4379736e+00   1.2049541e+00   1.3017511e+00   2.0033792e+00   2.2562563e+00   2.5606009e+00   2.9377608e+00   2.0050253e+00   1.5030978e+00   2.0001168e+00   2.6390071e+00   2.0114908e+00   1.9023062e+00   1.2016381e+00   1.8467998e+00   2.0209800e+00   1.6143467e+00   1.5012741e+00   2.3121231e+00   2.1220697e+00   1.6461460e+00   1.4062065e+00   1.6118728e+00   1.8108200e+00   1.5004645e+00   1.1000005e+00   9.0305330e-01   9.0506343e-01   1.1075720e+00   8.0488008e-01   6.1119558e-01   6.3912943e-01   6.0383105e-01   3.0490481e-01   1.1269424e-01   4.1210927e-01   6.0184622e-01   7.0008735e-01   1.0803561e+00   1.2125410e+00   1.2178626e+00   9.0645118e-01   7.9153339e-01   1.3000002e+00   7.0096858e-01   3.0008832e-01   8.0245824e-01   1.1001015e+00   1.2040344e+00   1.2012928e+00   6.0017982e-01   9.0645118e-01   1.7262450e+00   1.1005460e+00   1.0000307e+00   1.0000307e+00   5.0043842e-01   1.7087610e+00   1.0003198e+00   1.6300950e+00   9.3848935e-01   1.5032156e+00   1.2007124e+00   1.4092540e+00   2.2026134e+00   1.8005395e+00   1.9004485e+00   1.4019342e+00   1.7231818e+00   7.4269314e-01   9.0668287e-01   1.1133897e+00   1.0251597e+00   1.0924484e+00   1.0143978e+00   1.1005460e+00   2.3044111e+00   2.5032992e+00   9.4511250e-01   1.3253497e+00   1.1075720e+00   2.3033192e+00   5.5450500e-01   1.3061180e+00   1.6004128e+00   5.4219811e-01   6.3912943e-01   1.2090477e+00   1.4006179e+00   1.7019555e+00   2.0185049e+00   1.2190878e+00   7.0548283e-01   1.2049539e+00   1.7202439e+00   1.2636227e+00   1.1006371e+00   7.0911112e-01   1.0207396e+00   1.2632946e+00   9.3308853e-01   9.3848935e-01   1.5130871e+00   1.3741498e+00   9.6572569e-01   6.9987517e-01   8.2421923e-01   1.0798806e+00   8.5583415e-01   5.2524663e-01   8.2418002e-01   6.3912709e-01   3.6452132e-01   5.6394820e-01   7.2036819e-01   5.0517282e-01   8.0008964e-01   1.0000005e+00   1.2000731e+00   1.1019597e+00   4.0002221e-01   1.0039063e+00   7.4612830e-01   8.3183672e-01   6.0383105e-01   6.1119558e-01   2.0000000e-01   4.5078948e-01   1.1000098e+00   7.8890806e-01   4.0122873e-01   5.6371422e-01   4.1212852e-01   5.0001522e-01   5.2524663e-01   1.2137020e+00   3.4080442e-01   3.3813251e-01   3.0490481e-01   6.0035621e-01   1.5010034e+00   4.0246123e-01   1.5265987e+00   6.1135434e-01   1.6395342e+00   1.1134850e+00   1.3309249e+00   2.3136797e+00   7.1629168e-01   1.9760038e+00   1.3748534e+00   1.8136626e+00   9.1894698e-01   9.0320459e-01   1.2661802e+00   6.0427481e-01   9.1427000e-01   9.6685270e-01   1.0777305e+00   2.4270428e+00   2.5626268e+00   8.1112909e-01   1.4228744e+00   5.2167208e-01   2.4261071e+00   7.0633229e-01   1.3043552e+00   1.7511131e+00   6.0383105e-01   5.2491131e-01   1.1330776e+00   1.6740160e+00   1.9314874e+00   2.4166999e+00   1.1400420e+00   7.4269200e-01   1.1024820e+00   2.1702438e+00   1.1639421e+00   1.0427822e+00   4.2268438e-01   1.3276412e+00   1.2710363e+00   1.3154933e+00   6.1135434e-01   1.4922566e+00   1.3466030e+00   1.1400339e+00   7.3461436e-01   9.3733552e-01   9.7694377e-01   6.0365948e-01   5.8750389e-01   2.4195741e-01   8.6051471e-01   3.3818226e-01   8.1719606e-01   6.0202028e-01   6.0219099e-01   8.0337471e-01   1.0214933e+00   1.0338224e+00   5.2201750e-01   6.0000635e-01   3.6259865e-01   4.2268438e-01   2.2538848e-01   1.0087393e+00   5.4219811e-01   7.4618926e-01   9.2019277e-01   5.2838320e-01   3.4080442e-01   3.4085233e-01   3.4085233e-01   5.2838320e-01   2.0121983e-01   9.0294373e-01   3.0482299e-01   3.0490481e-01   3.0490481e-01   4.1420960e-01   1.1133986e+00   3.0017653e-01   1.9760242e+00   1.0776188e+00   1.8598666e+00   1.5071120e+00   1.7384459e+00   2.5637810e+00   9.3426769e-01   2.2403929e+00   1.7108631e+00   2.0993246e+00   1.1405598e+00   1.2394690e+00   1.4816205e+00   1.0776296e+00   1.4324323e+00   1.4163126e+00   1.4134492e+00   2.6753615e+00   2.8540068e+00   9.1001664e-01   1.6986597e+00   1.0426638e+00   2.6697938e+00   9.0657539e-01   1.6396943e+00   1.9541963e+00   8.5583415e-01   9.0207914e-01   1.5412500e+00   1.7849153e+00   2.0880425e+00   2.4973149e+00   1.5650163e+00   1.0060994e+00   1.5001440e+00   2.2185588e+00   1.6410190e+00   1.4111252e+00   8.5440680e-01   1.4330979e+00   1.6474117e+00   1.4095656e+00   1.0776188e+00   1.8534896e+00   1.7579984e+00   1.3938438e+00   1.0171340e+00   1.1990152e+00   1.4686236e+00   1.0427822e+00   6.8261201e-01   1.0004792e+00   6.3178782e-01   4.1210927e-01   6.0202028e-01   7.0025283e-01   8.0245903e-01   6.7720957e-01   8.1719606e-01   7.0008432e-01   1.0069214e+00   7.9153339e-01   8.6054545e-01   6.4049114e-01   6.3178782e-01   9.0155393e-01   1.2000065e+00   9.0508712e-01   2.0181667e-01   8.2635069e-01   7.1708289e-01   7.0548283e-01   8.0000239e-01   5.4219811e-01   1.3530568e+00   6.3322667e-01   8.0967961e-01   7.1708289e-01   7.0016860e-01   1.5402579e+00   6.3925756e-01   1.5611241e+00   6.4620889e-01   1.4283663e+00   1.1134850e+00   1.3189663e+00   2.1346646e+00   1.3000497e+00   1.8186729e+00   1.3009674e+00   1.7335369e+00   1.0118233e+00   8.1117067e-01   1.0567664e+00   6.0605366e-01   9.2867113e-01   1.0782857e+00   1.0429127e+00   2.2917679e+00   2.4270713e+00   5.0042326e-01   1.2848760e+00   6.9987517e-01   2.2396581e+00   5.2491734e-01   1.3051737e+00   1.5457820e+00   6.0365948e-01   8.0291749e-01   1.1110184e+00   1.3561934e+00   1.6492450e+00   2.1212048e+00   1.1186586e+00   6.7616723e-01   1.1005365e+00   1.7574668e+00   1.3269962e+00   1.0777411e+00   8.0097499e-01   1.0411548e+00   1.1972915e+00   9.9911696e-01   6.4620889e-01   1.4422764e+00   1.3473056e+00   9.3861512e-01   5.2491734e-01   8.6084272e-01   1.2528048e+00   8.2498722e-01   9.6150595e-01   5.0477564e-01   1.0214931e+00   8.0923926e-01   8.0492246e-01   1.0062544e+00   1.2363856e+00   1.2438823e+00   6.2656178e-01   4.0006662e-01   1.2085435e-01   2.0181667e-01   2.2573593e-01   1.2016443e+00   6.3925756e-01   9.2019277e-01   1.1335345e+00   7.1636719e-01   5.0084481e-01   2.0121983e-01   5.0002283e-01   7.3155911e-01   2.0181667e-01   6.7626681e-01   3.0915245e-01   5.0437695e-01   4.1317535e-01   6.1845783e-01   9.0506254e-01   3.0922892e-01   2.1350025e+00   1.2189760e+00   2.0658767e+00   1.7034615e+00   1.9177947e+00   2.7775988e+00   7.7598704e-01   2.4534018e+00   1.9144928e+00   2.2857680e+00   1.2749306e+00   1.4182222e+00   1.6639408e+00   1.1527669e+00   1.4140789e+00   1.4954274e+00   1.6121856e+00   2.8913337e+00   3.0644792e+00   1.1011719e+00   1.8708183e+00   1.0777305e+00   2.8852099e+00   1.0396215e+00   1.8297117e+00   2.1701542e+00   9.4532171e-01   1.0262619e+00   1.7168122e+00   2.0059655e+00   2.3063931e+00   2.7230908e+00   1.7261949e+00   1.2093243e+00   1.7002548e+00   2.4331092e+00   1.7646791e+00   1.6080687e+00   9.3848935e-01   1.6152383e+00   1.7771159e+00   1.4971922e+00   1.2189760e+00   2.0349233e+00   1.8831259e+00   1.4665397e+00   1.1400339e+00   1.3492939e+00   1.5757399e+00   1.2102248e+00   8.1117067e-01   7.0548283e-01   6.0964891e-01   6.0605366e-01   7.0918894e-01   9.0279223e-01   8.0008964e-01   3.6259865e-01   1.3154973e+00   1.0604287e+00   1.1536694e+00   9.1892454e-01   5.0477564e-01   5.0894102e-01   3.0922892e-01   8.0046764e-01   9.0778124e-01   7.1708289e-01   8.6225026e-01   6.8685125e-01   4.0363334e-01   8.4536936e-01   1.5318139e+00   6.5832080e-01   6.7636452e-01   6.3322667e-01   5.6838732e-01   1.8053679e+00   7.2044167e-01   1.2092602e+00   5.0437695e-01   1.3018595e+00   8.0291671e-01   1.0095513e+00   1.9760044e+00   1.0208709e+00   1.6387179e+00   1.0597877e+00   1.4686236e+00   6.0201716e-01   6.0427481e-01   9.3331138e-01   7.0025283e-01   6.1119558e-01   6.0427175e-01   7.4269200e-01   2.0887699e+00   2.2281421e+00   1.0001753e+00   1.0797700e+00   4.1317535e-01   2.0885107e+00   5.2133179e-01   9.6591465e-01   1.4140457e+00   4.1209001e-01   2.2573593e-01   8.1156529e-01   1.3450340e+00   1.5966664e+00   2.0855643e+00   8.1343016e-01   4.6440171e-01   8.2635069e-01   1.8444686e+00   8.2635069e-01   7.1621748e-01   2.0061436e-01   1.0088783e+00   9.1566538e-01   1.0032296e+00   5.0437695e-01   1.1543257e+00   9.8997136e-01   8.1117067e-01   7.0470867e-01   6.0980961e-01   6.3322667e-01   3.0474106e-01   9.0031539e-01   7.0000151e-01   3.3813251e-01   5.2167829e-01   8.5403428e-01   1.0095513e+00   5.0043842e-01   5.2524663e-01   6.0980961e-01   6.1288055e-01   3.0026460e-01   1.1001015e+00   7.1636719e-01   6.3309258e-01   7.4335736e-01   5.2167208e-01   5.0043084e-01   6.0184309e-01   6.0964891e-01   6.0017982e-01   3.0482299e-01   1.1153247e+00   5.0043084e-01   4.0246123e-01   4.0125062e-01   3.0017653e-01   1.1270411e+00   4.0002221e-01   2.0175565e+00   1.1056693e+00   1.9099615e+00   1.6003257e+00   1.8055799e+00   2.6186105e+00   1.2017042e+00   2.3090806e+00   1.8007233e+00   2.1233216e+00   1.1144002e+00   1.3025622e+00   1.5097103e+00   1.0216374e+00   1.2396937e+00   1.3452695e+00   1.5005647e+00   2.7241212e+00   2.9166918e+00   1.0087396e+00   1.7168636e+00   9.3733552e-01   2.7221286e+00   9.0508756e-01   1.7046111e+00   2.0107590e+00   8.0879701e-01   9.0508712e-01   1.6049314e+00   1.8174378e+00   2.1221146e+00   2.4750525e+00   1.6096791e+00   1.1000193e+00   1.6000016e+00   2.1732693e+00   1.6308665e+00   1.5004872e+00   8.0883916e-01   1.4182493e+00   1.6308803e+00   1.2094550e+00   1.1056693e+00   1.9088565e+00   1.7377460e+00   1.2661852e+00   1.0088926e+00   1.2092662e+00   1.4340155e+00   1.1019692e+00   3.4342562e-01   6.0964891e-01   5.6595908e-01   5.0437695e-01   5.2167829e-01   4.5783248e-01   1.4023777e+00   1.1286018e+00   1.2201578e+00   1.0032443e+00   3.0922892e-01   9.0642679e-01   9.0166476e-01   6.0964597e-01   5.0084481e-01   8.6054545e-01   9.6574336e-01   8.0923926e-01   5.0477564e-01   9.0532093e-01   1.6742781e+00   7.8895472e-01   7.5564478e-01   7.4618926e-01   6.0964891e-01   1.9222003e+00   8.2462252e-01   1.2093908e+00   5.2201750e-01   1.0522594e+00   7.0548138e-01   9.3735629e-01   1.7578497e+00   1.4001717e+00   1.4326118e+00   9.0166431e-01   1.3681502e+00   7.1636719e-01   4.5148429e-01   7.1183012e-01   6.3164977e-01   9.0534502e-01   8.5583415e-01   6.3322667e-01   1.9047821e+00   2.0429861e+00   3.3813251e-01   9.4634218e-01   7.1700774e-01   1.8662975e+00   3.0474106e-01   9.1695534e-01   1.1636098e+00   3.3818226e-01   5.0476836e-01   7.4329527e-01   1.0171202e+00   1.3020942e+00   1.8013674e+00   7.8935898e-01   3.0474106e-01   7.0008735e-01   1.4927071e+00   1.0336860e+00   6.7720957e-01   5.0855778e-01   7.3895268e-01   9.4622126e-01   8.4540285e-01   5.2201750e-01   1.0621172e+00   1.0788651e+00   8.1156529e-01   4.0002221e-01   5.6618864e-01   9.6935134e-01   5.2524663e-01   4.1212852e-01   5.0517282e-01   7.0008584e-01   6.3322667e-01   3.0490481e-01   1.2003660e+00   9.1552373e-01   1.0095513e+00   8.0046685e-01   4.5080200e-01   7.0105084e-01   6.0964891e-01   6.0365948e-01   5.0477564e-01   6.3178782e-01   7.4329527e-01   6.0201716e-01   2.2573593e-01   7.0096708e-01   1.4548054e+00   5.6347978e-01   5.2167208e-01   5.2133802e-01   4.0006662e-01   1.7132643e+00   6.0948506e-01   1.4655221e+00   7.0548283e-01   1.2921474e+00   9.1424701e-01   1.1900342e+00   1.9791159e+00   1.2013591e+00   1.6491682e+00   1.1111057e+00   1.5689626e+00   8.0726668e-01   7.4329527e-01   9.8998705e-01   8.0337471e-01   1.2004198e+00   1.1061923e+00   8.2635069e-01   2.0953157e+00   2.2622460e+00   6.0366256e-01   1.2097311e+00   8.0883841e-01   2.0866883e+00   6.0035621e-01   1.0858512e+00   1.3762609e+00   6.0000635e-01   6.0035305e-01   1.0143975e+00   1.2399444e+00   1.5291965e+00   1.9842703e+00   1.0777305e+00   4.1315633e-01   9.0005048e-01   1.7303039e+00   1.2394744e+00   8.2498722e-01   6.0018299e-01   9.9013884e-01   1.2395260e+00   1.1286911e+00   7.0548283e-01   1.3081175e+00   1.3479052e+00   1.1074834e+00   7.0184453e-01   8.1117067e-01   1.1186499e+00   6.0980961e-01   2.0181667e-01   5.2133802e-01   7.0548283e-01   4.0243965e-01   8.5471446e-01   9.1001664e-01   9.1916394e-01   6.0964891e-01   8.0296037e-01   1.0000307e+00   5.2524663e-01   4.1420960e-01   6.0000635e-01   8.0004523e-01   9.0166431e-01   9.0026588e-01   3.3818226e-01   6.0366256e-01   1.4340438e+00   8.0004523e-01   7.0000454e-01   7.0000151e-01   2.0000000e-01   1.4651632e+00   7.0008584e-01   1.7369589e+00   8.4540285e-01   1.6071563e+00   1.3008770e+00   1.5130871e+00   2.3098753e+00   1.5002444e+00   2.0034559e+00   1.5005854e+00   1.8322392e+00   8.5437498e-01   1.0087393e+00   1.2192920e+00   8.4786353e-01   1.1330694e+00   1.1270325e+00   1.2012867e+00   2.4144060e+00   2.6097166e+00   7.9153339e-01   1.4330116e+00   8.7209348e-01   2.4119960e+00   6.3178782e-01   1.4094452e+00   1.7039341e+00   5.6371422e-01   6.3309258e-01   1.3130937e+00   1.5066999e+00   1.8106410e+00   2.1515742e+00   1.3253458e+00   8.0004602e-01   1.3000908e+00   1.8533295e+00   1.3748188e+00   1.2012928e+00   5.7609230e-01   1.1298636e+00   1.3741846e+00   1.0451812e+00   8.4540285e-01   1.6176927e+00   1.4854079e+00   1.0777307e+00   7.4612830e-01   9.3306807e-01   1.1910068e+00   8.1715665e-01   4.0243965e-01   6.0184622e-01   6.0000952e-01   1.0158274e+00   1.1111057e+00   1.1191444e+00   8.0928056e-01   7.4335736e-01   1.2000002e+00   6.0964891e-01   3.0026460e-01   7.0088477e-01   1.0001601e+00   1.1024820e+00   1.1005458e+00   5.0042326e-01   8.0492246e-01   1.6321742e+00   1.0001753e+00   9.0005048e-01   9.0002615e-01   4.0006662e-01   1.6390068e+00   9.0029064e-01   1.6300430e+00   8.6084272e-01   1.5035329e+00   1.2004200e+00   1.4092511e+00   2.2043907e+00   1.7002548e+00   1.9010379e+00   1.4007831e+00   1.7240342e+00   7.4269314e-01   9.0534502e-01   1.1133984e+00   9.3184922e-01   1.0597992e+00   1.0142766e+00   1.1005364e+00   2.3071806e+00   2.5048249e+00   8.4536936e-01   1.3253910e+00   1.0116865e+00   2.3056305e+00   5.2838320e-01   1.3061582e+00   1.6010223e+00   4.8391482e-01   5.7608844e-01   1.2089313e+00   1.4017695e+00   1.7039229e+00   2.0293124e+00   1.2189760e+00   7.0096858e-01   1.2016380e+00   1.7295384e+00   1.2636227e+00   1.1005460e+00   6.1830489e-01   1.0208709e+00   1.2632948e+00   9.3329055e-01   8.6084272e-01   1.5130912e+00   1.3741813e+00   9.6572569e-01   6.5832080e-01   8.2418071e-01   1.0788007e+00   7.9148662e-01   3.0922892e-01   8.0046764e-01   1.3743342e+00   1.3452695e+00   1.3745152e+00   1.0776296e+00   8.0051115e-01   1.4000349e+00   8.2462252e-01   3.0026460e-01   5.7609230e-01   1.2089253e+00   1.3131370e+00   1.3002493e+00   7.0016860e-01   1.0426760e+00   1.8951252e+00   1.2036864e+00   1.1055892e+00   1.1055707e+00   6.3165225e-01   1.9760099e+00   1.1133895e+00   1.3035495e+00   1.0032296e+00   1.1134939e+00   8.1112984e-01   1.0427944e+00   1.8040883e+00   1.9000220e+00   1.5005626e+00   1.0010060e+00   1.3844234e+00   6.1288055e-01   5.7609230e-01   7.8890721e-01   1.1056785e+00   1.1270325e+00   9.0778124e-01   7.0556260e-01   1.9141294e+00   2.1052841e+00   8.2421923e-01   1.0150395e+00   1.2036863e+00   1.9046783e+00   5.2133802e-01   9.3733589e-01   1.2010584e+00   6.0948212e-01   7.0470867e-01   8.5583357e-01   1.0008768e+00   1.3033860e+00   1.6469593e+00   9.0294373e-01   5.0436965e-01   8.5406616e-01   1.3485619e+00   1.0522594e+00   7.0993998e-01   8.0250123e-01   7.4329527e-01   1.0427822e+00   9.0053003e-01   1.0032296e+00   1.1531951e+00   1.1543259e+00   9.0142681e-01   5.6618864e-01   6.1135434e-01   9.3984267e-01   9.0168933e-01   7.1629303e-01   1.5266891e+00   1.3564850e+00   1.4198077e+00   1.1544060e+00   7.0088627e-01   1.3008812e+00   7.2036951e-01   3.0482299e-01   7.4954884e-01   1.1531951e+00   1.2645755e+00   1.2053003e+00   6.1119267e-01   1.0803561e+00   1.9177201e+00   1.1286911e+00   1.0451689e+00   1.0433444e+00   7.2036951e-01   2.0856547e+00   1.0782211e+00   1.0434746e+00   9.0029064e-01   9.0279223e-01   6.0948800e-01   8.0883841e-01   1.6097492e+00   1.8003682e+00   1.3025173e+00   8.0879701e-01   1.1347620e+00   3.0922892e-01   3.6452132e-01   5.2133179e-01   1.0032293e+00   9.3308891e-01   6.0383105e-01   5.0043084e-01   1.7170314e+00   1.9082779e+00   8.5406616e-01   7.4275547e-01   1.1001110e+00   1.7132654e+00   4.1212852e-01   7.0548138e-01   1.0030871e+00   5.0085236e-01   6.0000635e-01   6.1135434e-01   8.0879701e-01   1.1134075e+00   1.4922778e+00   6.3322667e-01   4.0246123e-01   6.8261201e-01   1.1935004e+00   7.4954884e-01   5.0437695e-01   7.0008584e-01   4.5148429e-01   7.4262964e-01   6.0018299e-01   9.0029064e-01   9.1424701e-01   8.5437440e-01   6.0017665e-01   5.2167208e-01   3.0915245e-01   6.4620889e-01   8.0000160e-01   1.0033867e+00   7.3461436e-01   8.2512420e-01   6.0219099e-01   6.0017982e-01   6.0000317e-01   5.0000761e-01   7.0016860e-01   6.0202028e-01   4.5148429e-01   5.7630313e-01   5.0855778e-01   1.2699992e-01   5.0894102e-01   1.2671752e+00   4.1420960e-01   3.6259865e-01   3.4080442e-01   2.4170870e-01   1.5133193e+00   4.1317535e-01   1.5238388e+00   6.0980961e-01   1.4557632e+00   1.1002023e+00   1.3069713e+00   2.1693127e+00   1.1005458e+00   1.8443124e+00   1.3063934e+00   1.6655594e+00   6.5832080e-01   8.0492246e-01   1.0498228e+00   5.7832449e-01   9.1424701e-01   9.0320459e-01   1.0032296e+00   2.2802814e+00   2.4537354e+00   7.1621613e-01   1.2528590e+00   5.3914287e-01   2.2781292e+00   4.2362917e-01   1.2128138e+00   1.5640141e+00   3.4085233e-01   4.1212852e-01   1.1060939e+00   1.4140458e+00   1.7081323e+00   2.1436849e+00   1.1138955e+00   6.0184622e-01   1.1001015e+00   1.8659091e+00   1.1544060e+00   1.0010209e+00   3.3813251e-01   1.0224270e+00   1.1635398e+00   9.7600992e-01   6.0980961e-01   1.4182493e+00   1.2705641e+00   8.9538275e-01   5.4219811e-01   7.3084171e-01   9.6936870e-01   6.0184934e-01   3.0922892e-01   2.4170870e-01   4.0127250e-01   1.6009488e+00   1.0040629e+00   1.0499492e+00   1.2653025e+00   9.1471442e-01   6.1119558e-01   5.0477564e-01   9.0005048e-01   1.1016049e+00   5.0043084e-01   7.0096708e-01   7.0088627e-01   7.0470720e-01   7.0176121e-01   8.0967961e-01   6.3165225e-01   6.0201716e-01   2.5221737e+00   1.6096629e+00   2.4221589e+00   2.1015969e+00   2.3098905e+00   3.1317714e+00   1.0597879e+00   2.8188299e+00   2.3040148e+00   2.6373482e+00   1.6231306e+00   1.8068049e+00   2.0210084e+00   1.5237053e+00   1.7080686e+00   1.8459262e+00   2.0034094e+00   3.2387184e+00   3.4286399e+00   1.5005854e+00   2.2285172e+00   1.4324350e+00   3.2358000e+00   1.4109628e+00   2.2110849e+00   2.5224740e+00   1.3139336e+00   1.4095777e+00   2.1090366e+00   2.3323064e+00   2.6377472e+00   2.9966835e+00   2.1144760e+00   1.6012568e+00   2.1000482e+00   2.6968519e+00   2.1346646e+00   2.0025815e+00   1.3133662e+00   1.9351024e+00   2.1377869e+00   1.7137965e+00   1.6096629e+00   2.4161682e+00   2.2434416e+00   1.7684500e+00   1.5143051e+00   1.7168636e+00   1.9368172e+00   1.6050040e+00   1.1269424e-01   3.3818226e-01   1.3017961e+00   7.4612830e-01   1.0262619e+00   1.2443200e+00   8.2421923e-01   6.0202028e-01   2.2573593e-01   6.0017982e-01   8.4572653e-01   3.0922892e-01   5.6347978e-01   4.1317535e-01   6.0964891e-01   5.2201750e-01   7.3090905e-01   8.0245824e-01   4.1420960e-01   2.2297880e+00   1.3131802e+00   2.1734835e+00   1.8042696e+00   2.0169191e+00   2.8857511e+00   7.7598796e-01   2.5607759e+00   2.0181988e+00   2.3909895e+00   1.3770846e+00   1.5195166e+00   1.7689720e+00   1.2362756e+00   1.4651863e+00   1.5800353e+00   1.7155605e+00   3.0010211e+00   3.1712557e+00   1.2016443e+00   1.9735224e+00   1.1531953e+00   2.9937162e+00   1.1401191e+00   1.9335528e+00   2.2793947e+00   1.0403116e+00   1.1237940e+00   1.8155572e+00   2.1170640e+00   2.4166673e+00   2.8369211e+00   1.8227568e+00   1.3135522e+00   1.8005404e+00   2.5443612e+00   1.8557670e+00   1.7105814e+00   1.0313359e+00   1.7226330e+00   1.8708183e+00   1.5881025e+00   1.3131802e+00   2.1363271e+00   1.9752912e+00   1.5530527e+00   1.2366099e+00   1.4501583e+00   1.6655594e+00   1.3088083e+00   3.4342562e-01   1.4024091e+00   8.3183672e-01   1.0522594e+00   1.2712749e+00   8.5437498e-01   6.1119558e-01   3.3813251e-01   7.0016860e-01   9.2867113e-01   3.4342562e-01   5.2133179e-01   5.0855778e-01   6.3192325e-01   5.6618864e-01   7.5564478e-01   7.0462844e-01   4.5847767e-01   2.3345511e+00   1.4181033e+00   2.2624227e+00   1.9044571e+00   2.1188381e+00   2.9740725e+00   8.7209348e-01   2.6511588e+00   2.1151751e+00   2.4832720e+00   1.4692287e+00   1.6190709e+00   1.8603115e+00   1.3450304e+00   1.5778323e+00   1.6856949e+00   1.8133657e+00   3.0879393e+00   3.2627356e+00   1.3017553e+00   2.0678448e+00   1.2635708e+00   3.0810441e+00   1.2366675e+00   2.0307764e+00   2.3657459e+00   1.1404856e+00   1.2257611e+00   1.9176961e+00   2.1957105e+00   2.4980314e+00   2.9055191e+00   1.9262438e+00   1.4100098e+00   1.9004485e+00   2.6116431e+00   1.9609333e+00   1.8095574e+00   1.1347620e+00   1.8037909e+00   1.9725464e+00   1.6581521e+00   1.4181033e+00   2.2355461e+00   2.0784269e+00   1.6462102e+00   1.3373104e+00   1.5468618e+00   1.7698041e+00   1.4111252e+00   1.2003596e+00   6.1288055e-01   7.4618926e-01   9.6691372e-01   5.7609230e-01   3.0922892e-01   3.0490481e-01   5.0436965e-01   7.0184453e-01   1.1269424e-01   8.2635069e-01   3.0482299e-01   3.3813251e-01   3.0490481e-01   4.5148429e-01   9.3308891e-01   2.0181667e-01   2.1221982e+00   1.2089191e+00   2.0305682e+00   1.7009400e+00   1.9088256e+00   2.7434081e+00   9.1894698e-01   2.4265154e+00   1.9046790e+00   2.2453370e+00   1.2284047e+00   1.4060413e+00   1.6267848e+00   1.1281352e+00   1.3523310e+00   1.4562730e+00   1.6032169e+00   2.8519346e+00   3.0369970e+00   1.1020600e+00   1.8342569e+00   1.0426638e+00   2.8491513e+00   1.0116721e+00   1.8114932e+00   2.1335035e+00   9.1552373e-01   1.0090312e+00   1.7079369e+00   1.9522117e+00   2.2566913e+00   2.6406601e+00   1.7139238e+00   1.2013529e+00   1.7000137e+00   2.3442222e+00   1.7386523e+00   1.6019500e+00   9.1449234e-01   1.5517970e+00   1.7435092e+00   1.3757183e+00   1.2089191e+00   2.0167186e+00   1.8496945e+00   1.3938438e+00   1.1152390e+00   1.3189663e+00   1.5429659e+00   1.2037520e+00   6.7720957e-01   7.4262850e-01   7.0911112e-01   7.0633229e-01   1.0011648e+00   1.1020600e+00   7.2036951e-01   5.0477564e-01   1.1005460e+00   1.8106900e+00   9.0166431e-01   9.0192695e-01   9.0055475e-01   8.0055465e-01   2.1027376e+00   1.0003198e+00   1.0225570e+00   3.0474106e-01   1.1299441e+00   5.0517282e-01   7.5564478e-01   1.7513222e+00   1.1055799e+00   1.4140515e+00   7.8895472e-01   1.3181953e+00   5.7608844e-01   4.1315633e-01   8.1156529e-01   4.1317535e-01   8.0004523e-01   7.2044167e-01   5.2524663e-01   1.8787830e+00   1.9768256e+00   5.0001522e-01   9.4912864e-01   4.5148429e-01   1.8635775e+00   3.0915245e-01   7.8611860e-01   1.2373911e+00   3.0922892e-01   3.0922892e-01   5.7609230e-01   1.2089834e+00   1.4324608e+00   1.9471600e+00   6.3912943e-01   3.0017653e-01   5.0043842e-01   1.7149040e+00   8.6084272e-01   4.8391482e-01   3.4080442e-01   9.0668287e-01   8.6225026e-01   9.3424659e-01   3.0474106e-01   9.3861512e-01   9.5646231e-01   7.8935898e-01   3.4085233e-01   5.2491734e-01   7.8940551e-01   3.0482299e-01   6.0948506e-01   1.3000044e+00   9.3308891e-01   4.0243965e-01   5.6371422e-01   4.1212852e-01   7.0000303e-01   5.4219811e-01   1.2105001e+00   3.4342562e-01   3.6256305e-01   3.4085233e-01   8.0008964e-01   1.5005854e+00   4.1420960e-01   1.5358856e+00   6.1990228e-01   1.7849054e+00   1.1528477e+00   1.3788456e+00   2.4261894e+00   5.6370994e-01   2.0884901e+00   1.4655452e+00   1.9390732e+00   1.1074834e+00   1.0434746e+00   1.4340155e+00   6.0605366e-01   9.1554656e-01   1.0782857e+00   1.1897288e+00   2.5394068e+00   2.6516318e+00   8.3183606e-01   1.5694554e+00   5.2201750e-01   2.5386539e+00   9.0192695e-01   1.4157600e+00   1.8949500e+00   8.0097499e-01   7.0548138e-01   1.1935069e+00   1.8443040e+00   2.0853276e+00   2.5816887e+00   1.1989547e+00   9.1424659e-01   1.1138955e+00   2.3468430e+00   1.1963432e+00   1.1270327e+00   6.0365948e-01   1.5143051e+00   1.3938114e+00   1.5079206e+00   6.1990228e-01   1.5829749e+00   1.4450801e+00   1.3189240e+00   9.1132198e-01   1.1152300e+00   1.0159134e+00   6.3309012e-01   7.0096858e-01   1.1002025e+00   4.8852375e-01   9.1024401e-01   8.1112984e-01   4.0127250e-01   8.1117067e-01   1.3486924e+00   7.0633229e-01   4.6440171e-01   5.1257987e-01   5.0517282e-01   1.5260594e+00   6.1288055e-01   1.5131090e+00   7.4335736e-01   1.4549432e+00   1.1020600e+00   1.3036236e+00   2.1691920e+00   1.1527669e+00   1.8444686e+00   1.3309288e+00   1.6567564e+00   6.3925756e-01   8.5617086e-01   1.0458540e+00   9.0668287e-01   8.4540285e-01   8.5586571e-01   1.0039209e+00   2.2782572e+00   2.4540263e+00   1.2012866e+00   1.2440282e+00   6.2656178e-01   2.2782572e+00   7.0556260e-01   1.2101609e+00   1.5639802e+00   6.0219099e-01   4.5148429e-01   1.1079931e+00   1.4142064e+00   1.7087610e+00   2.1412345e+00   1.1115204e+00   6.7720957e-01   1.1281352e+00   1.8646736e+00   1.1282162e+00   1.0010209e+00   4.1315633e-01   1.0172673e+00   1.1401191e+00   9.4532171e-01   7.4335736e-01   1.4134218e+00   1.2440229e+00   8.4786353e-01   9.0557807e-01   7.2440846e-01   9.3308853e-01   6.0964891e-01   8.0296037e-01   1.1055799e+00   1.2124837e+00   1.2014191e+00   6.0000952e-01   9.3755356e-01   1.7874653e+00   1.1024913e+00   1.0032296e+00   1.0031018e+00   5.2201750e-01   1.8640262e+00   1.0088926e+00   1.3452347e+00   9.0417295e-01   1.2040344e+00   9.0168933e-01   1.1133986e+00   1.9046783e+00   1.8005318e+00   1.6009504e+00   1.1056691e+00   1.4335330e+00   5.2167829e-01   6.1990228e-01   8.2418141e-01   1.0118233e+00   1.0151880e+00   8.2458478e-01   8.0051115e-01   2.0076819e+00   2.2050331e+00   9.3329017e-01   1.0426638e+00   1.1020600e+00   2.0062587e+00   4.5847767e-01   1.0087393e+00   1.3009222e+00   5.0855778e-01   6.0202028e-01   9.1471442e-01   1.1019505e+00   1.4044980e+00   1.7386523e+00   9.3351278e-01   4.5783248e-01   9.1892454e-01   1.4407364e+00   1.0151880e+00   8.0093081e-01   7.0088627e-01   7.4269200e-01   1.0142482e+00   8.0250123e-01   9.0417295e-01   1.2189645e+00   1.1269510e+00   8.0879701e-01   6.1990228e-01   5.6371422e-01   8.6084272e-01   8.0291749e-01   7.8935813e-01   8.0250123e-01   8.0046685e-01   7.0017011e-01   5.2491734e-01   1.3741813e+00   7.0470720e-01   7.4269314e-01   6.7626502e-01   6.0000635e-01   1.4852616e+00   6.3309012e-01   1.6636721e+00   7.5826453e-01   1.5161847e+00   1.2049539e+00   1.4220925e+00   2.2191056e+00   1.4001717e+00   1.9083789e+00   1.4007861e+00   1.7945122e+00   9.6133119e-01   9.1552373e-01   1.1416778e+00   7.7603846e-01   1.1170561e+00   1.1351073e+00   1.1152390e+00   2.3540839e+00   2.5167781e+00   6.0202028e-01   1.3687074e+00   8.0713433e-01   2.3222107e+00   5.7608844e-01   1.3563898e+00   1.6193612e+00   5.7609230e-01   7.3090905e-01   1.2201578e+00   1.4221192e+00   1.7236083e+00   2.1360791e+00   1.2373857e+00   7.1629168e-01   1.2000731e+00   1.7962160e+00   1.3755348e+00   1.1298552e+00   7.2113820e-01   1.0843962e+00   1.3150470e+00   1.0664292e+00   7.5826453e-01   1.5362595e+00   1.4451976e+00   1.0604287e+00   6.7626502e-01   8.9540816e-01   1.2552585e+00   8.0073117e-01   5.0001522e-01   4.1212852e-01   5.6347549e-01   4.0127250e-01   8.7240114e-01   3.0008832e-01   1.2085435e-01   1.2085435e-01   6.0017982e-01   1.1038933e+00   2.0061436e-01   1.9231154e+00   1.0088926e+00   1.8971338e+00   1.5035330e+00   1.7143629e+00   2.6071033e+00   7.2440846e-01   2.2781292e+00   1.7231818e+00   2.0998984e+00   1.0923537e+00   1.2224463e+00   1.4922544e+00   9.3733589e-01   1.1896725e+00   1.2782589e+00   1.4186216e+00   2.7177641e+00   2.8857013e+00   9.6674360e-01   1.6882618e+00   8.5406616e-01   2.7167827e+00   8.6084272e-01   1.6345263e+00   2.0058565e+00   7.5508853e-01   8.1715593e-01   1.5132180e+00   1.8635428e+00   2.1554613e+00   2.5926811e+00   1.5194972e+00   1.0207533e+00   1.5005626e+00   2.3165875e+00   1.5429659e+00   1.4098467e+00   7.2036819e-01   1.4724918e+00   1.5757929e+00   1.3838027e+00   1.0088926e+00   1.8378491e+00   1.6745686e+00   1.2948699e+00   9.4912864e-01   1.1635325e+00   1.3473688e+00   1.0032293e+00   4.0004442e-01   6.9509552e-01   3.0017653e-01   7.1708289e-01   2.2573593e-01   5.0085236e-01   4.0243965e-01   7.0548138e-01   1.0008617e+00   3.0482299e-01   2.0206913e+00   1.1056785e+00   2.0075255e+00   1.6053217e+00   1.8156855e+00   2.7170183e+00   6.3912709e-01   2.3873965e+00   1.8286172e+00   2.2132187e+00   1.2079042e+00   1.3276450e+00   1.6018644e+00   1.0207396e+00   1.2397507e+00   1.3715471e+00   1.5245240e+00   2.8327619e+00   2.9941199e+00   1.0032443e+00   1.7962160e+00   9.3329055e-01   2.8269181e+00   9.6936870e-01   1.7435156e+00   2.1174156e+00   8.6084272e-01   9.2351241e-01   1.6144550e+00   1.9761215e+00   2.2674248e+00   2.7114616e+00   1.6190709e+00   1.1282247e+00   1.6009322e+00   2.4285500e+00   1.6432478e+00   1.5147271e+00   8.2512420e-01   1.5839544e+00   1.6753044e+00   1.4829434e+00   1.1056785e+00   1.9427965e+00   1.7734131e+00   1.3908238e+00   1.0498226e+00   1.2712749e+00   1.4523130e+00   1.1044111e+00   6.0980961e-01   4.1209001e-01   1.1020600e+00   2.0181667e-01   4.0243965e-01   3.0922892e-01   7.0088627e-01   1.4001688e+00   3.0922892e-01   1.6797901e+00   7.8935898e-01   1.7574606e+00   1.2224463e+00   1.4618181e+00   2.4274025e+00   6.3165225e-01   2.0887499e+00   1.4865883e+00   1.9561612e+00   1.0664292e+00   1.0336863e+00   1.3939836e+00   8.2421923e-01   1.2089895e+00   1.1997296e+00   1.1938630e+00   2.5462062e+00   2.6763758e+00   6.4049114e-01   1.5645185e+00   8.0883916e-01   2.5391583e+00   8.3183672e-01   1.4351453e+00   1.8644317e+00   7.4618926e-01   6.9987517e-01   1.2680580e+00   1.7844580e+00   2.0440071e+00   2.5329067e+00   1.2921474e+00   8.5440680e-01   1.2036924e+00   2.2838099e+00   1.3737025e+00   1.1587585e+00   6.4620889e-01   1.4491800e+00   1.4507713e+00   1.4583848e+00   7.8935898e-01   1.6277433e+00   1.5384791e+00   1.3150470e+00   8.7209348e-01   1.0788651e+00   1.2179890e+00   7.4954884e-01   6.1135434e-01   1.3790270e+00   5.2491734e-01   4.5147187e-01   4.5080200e-01   3.0026460e-01   1.6179159e+00   5.2167829e-01   1.4543196e+00   5.6838732e-01   1.3502290e+00   1.0008620e+00   1.2192919e+00   2.0611274e+00   1.2013529e+00   1.7369589e+00   1.2053003e+00   1.5767956e+00   6.3925756e-01   7.1779518e-01   9.6131279e-01   6.4620889e-01   1.0032443e+00   9.3331138e-01   9.0279223e-01   2.1713885e+00   2.3476282e+00   8.0245903e-01   1.1755517e+00   6.3322667e-01   2.1693131e+00   4.2362917e-01   1.1187430e+00   1.4544336e+00   4.0246123e-01   4.1209001e-01   1.0208844e+00   1.3018144e+00   1.5969056e+00   2.0303761e+00   1.0427944e+00   5.0085236e-01   1.0008465e+00   1.7574039e+00   1.1274284e+00   9.0166476e-01   4.0125062e-01   9.3437551e-01   1.1319099e+00   9.6935134e-01   5.6838732e-01   1.3309288e+00   1.2428507e+00   9.2745734e-01   5.7630313e-01   6.8160885e-01   9.6672602e-01   5.2167208e-01   8.5440680e-01   2.2608083e-01   4.0125062e-01   3.0490481e-01   4.2270142e-01   1.0207262e+00   2.0181667e-01   2.0282636e+00   1.1133895e+00   1.9386586e+00   1.6012719e+00   1.8114342e+00   2.6515677e+00   9.0999313e-01   2.3322945e+00   1.8060515e+00   2.1578375e+00   1.1447365e+00   1.3085752e+00   1.5359734e+00   1.0426516e+00   1.3018144e+00   1.3779960e+00   1.5044724e+00   2.7627283e+00   2.9432651e+00   1.0010209e+00   1.7447170e+00   9.6576136e-01   2.7579768e+00   9.1892454e-01   1.7159977e+00   2.0420308e+00   8.2635069e-01   9.1576742e-01   1.6105700e+00   1.8662195e+00   2.1696258e+00   2.5680120e+00   1.6184785e+00   1.1020600e+00   1.6000183e+00   2.2731185e+00   1.6529028e+00   1.5029725e+00   8.2635069e-01   1.4699000e+00   1.6570292e+00   1.3301857e+00   1.1133895e+00   1.9214944e+00   1.7646791e+00   1.3272142e+00   1.0235120e+00   1.2275665e+00   1.4621584e+00   1.1060939e+00   9.1576742e-01   9.6133119e-01   9.4532171e-01   1.2662318e+00   3.0490481e-01   8.6084272e-01   2.7230933e+00   1.8083405e+00   2.7192500e+00   2.3152780e+00   2.5278895e+00   3.4307167e+00   1.2089253e+00   3.1023107e+00   2.5446557e+00   2.9238544e+00   1.9071235e+00   2.0445123e+00   2.3113306e+00   1.7148932e+00   1.8686471e+00   2.0692591e+00   2.2408459e+00   3.5448121e+00   3.7102716e+00   1.7134856e+00   2.5076804e+00   1.6187837e+00   3.5398901e+00   1.6780493e+00   2.4594169e+00   2.8277283e+00   1.5698091e+00   1.6365650e+00   2.3270565e+00   2.6739856e+00   2.9710335e+00   3.3954286e+00   2.3304578e+00   1.8446316e+00   2.3054869e+00   3.1050823e+00   2.3405162e+00   2.2288004e+00   1.5327217e+00   2.2740189e+00   2.3840998e+00   2.1122339e+00   1.8083405e+00   2.6588338e+00   2.4791402e+00   2.0688078e+00   1.7632513e+00   1.9828965e+00   2.1428811e+00   1.8095574e+00   3.0017653e-01   2.0061436e-01   6.0017982e-01   1.2012991e+00   1.2085435e-01   1.8301371e+00   9.1424659e-01   1.8223693e+00   1.4049093e+00   1.6190709e+00   2.5271432e+00   7.0556260e-01   2.1953731e+00   1.6303102e+00   2.0261311e+00   1.0363096e+00   1.1330692e+00   1.4229011e+00   8.5406674e-01   1.1527746e+00   1.2106302e+00   1.3261864e+00   2.6410980e+00   2.8004332e+00   8.1117067e-01   1.6146557e+00   7.8886054e-01   2.6375763e+00   7.9824795e-01   1.5471213e+00   1.9317133e+00   6.9509552e-01   7.3155911e-01   1.4182194e+00   1.8031267e+00   2.0887452e+00   2.5422790e+00   1.4267527e+00   9.3308891e-01   1.4006149e+00   2.2706274e+00   1.4614246e+00   1.3141581e+00   6.4049114e-01   1.4230277e+00   1.5004249e+00   1.3669148e+00   9.1424659e-01   1.7490906e+00   1.5981930e+00   1.2553121e+00   8.7212232e-01   1.0924484e+00   1.2731059e+00   9.0557807e-01   1.1269424e-01   5.0002283e-01   1.2049541e+00   2.0121983e-01   1.8447840e+00   9.3329055e-01   1.7901165e+00   1.4035225e+00   1.6222582e+00   2.4980210e+00   8.1757693e-01   2.1693127e+00   1.6187837e+00   2.0049100e+00   1.0151397e+00   1.1261381e+00   1.3938113e+00   9.0657539e-01   1.2362756e+00   1.2472959e+00   1.3154932e+00   2.6088344e+00   2.7785257e+00   9.0207914e-01   1.5972548e+00   8.5406674e-01   2.6071034e+00   7.7652636e-01   1.5358856e+00   1.8953567e+00   6.9518117e-01   7.4612718e-01   1.4220925e+00   1.7511588e+00   2.0440071e+00   2.4804818e+00   1.4362913e+00   9.1449234e-01   1.4003402e+00   2.2077377e+00   1.4867147e+00   1.3085752e+00   6.7720780e-01   1.3734975e+00   1.5100598e+00   1.3269962e+00   9.3329055e-01   1.7441015e+00   1.6143613e+00   1.2552584e+00   8.7796615e-01   1.0782751e+00   1.3029195e+00   9.1424659e-01   5.0000761e-01   1.2040406e+00   1.1269424e-01   1.8289847e+00   9.1424701e-01   1.7872748e+00   1.4023776e+00   1.6144390e+00   2.4974488e+00   8.0533198e-01   2.1691714e+00   1.6179842e+00   1.9948417e+00   9.9013884e-01   1.1186586e+00   1.3842454e+00   8.5583357e-01   1.1527671e+00   1.1990152e+00   1.3139296e+00   2.6085083e+00   2.7775771e+00   8.5440680e-01   1.5835478e+00   7.8886139e-01   2.6068470e+00   7.5508853e-01   1.5300146e+00   1.8950932e+00   6.5712813e-01   7.2036951e-01   1.4134189e+00   1.7511120e+00   2.0435901e+00   2.4807480e+00   1.4220898e+00   9.1424701e-01   1.4002005e+00   2.2048860e+00   1.4562730e+00   1.3069754e+00   6.3309258e-01   1.3632211e+00   1.4815986e+00   1.2921474e+00   9.1424701e-01   1.7351894e+00   1.5836236e+00   1.2085436e+00   8.4725834e-01   1.0597879e+00   1.2653025e+00   9.0508756e-01   1.3743342e+00   5.0043084e-01   1.7369589e+00   8.2635069e-01   1.6144390e+00   1.3008770e+00   1.5131090e+00   2.3226028e+00   1.3004854e+00   2.0107294e+00   1.5010034e+00   1.8390199e+00   8.5471446e-01   1.0087539e+00   1.2223855e+00   8.0073117e-01   1.1286018e+00   1.1270411e+00   1.2013529e+00   2.4290388e+00   2.6198428e+00   7.8895472e-01   1.4363167e+00   7.7598796e-01   2.4266979e+00   6.3178782e-01   1.4100098e+00   1.7134977e+00   5.6347549e-01   6.3165225e-01   1.3130978e+00   1.5237265e+00   1.8289379e+00   2.1979419e+00   1.3253497e+00   8.0004602e-01   1.3000455e+00   1.9028805e+00   1.3748188e+00   1.2012991e+00   5.6371422e-01   1.1400420e+00   1.3748220e+00   1.0597992e+00   8.2635069e-01   1.6184929e+00   1.4858469e+00   1.0797702e+00   7.4612830e-01   9.3329055e-01   1.1910002e+00   8.0923926e-01   1.1056785e+00   3.0089448e+00   2.1019570e+00   2.9563162e+00   2.6052626e+00   2.8107271e+00   3.6717374e+00   1.5012719e+00   3.3522198e+00   2.8186527e+00   3.1596417e+00   2.1362161e+00   2.3151198e+00   2.5461024e+00   2.0036629e+00   2.1224665e+00   2.3234228e+00   2.5150159e+00   3.7801779e+00   3.9623034e+00   2.0033816e+00   2.7467405e+00   1.9044216e+00   3.7784933e+00   1.9231092e+00   2.7237438e+00   3.0623796e+00   1.8186729e+00   1.9089573e+00   2.6097166e+00   2.8846684e+00   3.1885494e+00   3.5706709e+00   2.6109888e+00   2.1139043e+00   2.6017555e+00   3.2706949e+00   2.6138780e+00   2.5099937e+00   1.8069857e+00   2.4748863e+00   2.6338874e+00   2.2385678e+00   2.1019570e+00   2.9251726e+00   2.7321685e+00   2.2661994e+00   2.0190735e+00   2.2288464e+00   2.4131370e+00   2.1020441e+00   1.9226845e+00   1.0087252e+00   1.8684939e+00   1.5017097e+00   1.7108631e+00   2.5816562e+00   8.0533198e-01   2.2563154e+00   1.7134977e+00   2.0770423e+00   1.0604287e+00   1.2124777e+00   1.4620239e+00   9.3329017e-01   1.1896594e+00   1.2705641e+00   1.4098496e+00   2.6923501e+00   2.8659663e+00   9.1449234e-01   1.6637458e+00   8.5403428e-01   2.6902417e+00   8.3183672e-01   1.6225614e+00   1.9757175e+00   7.3084048e-01   8.1117067e-01   1.5097082e+00   1.8197097e+00   2.1169795e+00   2.5408329e+00   1.5160570e+00   1.0087393e+00   1.5001233e+00   2.2573596e+00   1.5423651e+00   1.4049376e+00   7.1708289e-01   1.4229011e+00   1.5611429e+00   1.3142952e+00   1.0087252e+00   1.8271493e+00   1.6639408e+00   1.2552635e+00   9.2768675e-01   1.1400420e+00   1.3479052e+00   1.0031018e+00   9.3184922e-01   8.0291749e-01   7.0910969e-01   3.4342562e-01   1.3028005e+00   1.6474201e+00   1.0216374e+00   8.5586571e-01   9.0026543e-01   9.0508756e-01   7.7598796e-01   5.7832449e-01   1.0522594e+00   9.0999313e-01   7.0008735e-01   7.1708289e-01   1.4049376e+00   1.4220925e+00   1.2553121e+00   6.0202028e-01   1.1170561e+00   1.4055109e+00   1.1186497e+00   4.5783248e-01   9.3306769e-01   1.2101609e+00   1.1134939e+00   5.3914287e-01   1.0144117e+00   1.1074742e+00   1.6007365e+00   5.2491734e-01   1.0797700e+00   1.1139044e+00   1.4000349e+00   4.0004442e-01   7.1629303e-01   1.2090477e+00   6.8170466e-01   4.5148429e-01   9.1427000e-01   9.3184922e-01   5.0043842e-01   4.1209001e-01   8.0296037e-01   1.0498226e+00   8.0928056e-01   6.0018299e-01   9.3446811e-01   1.3131410e+00   5.6371422e-01   7.8985507e-01   1.8949500e+00   9.1427000e-01   1.5639785e+00   9.3308891e-01   1.4501583e+00   7.1621748e-01   6.0017665e-01   1.0010209e+00   2.0181667e-01   5.0000761e-01   6.3925756e-01   7.0548283e-01   2.0162299e+00   2.0885102e+00   5.2167829e-01   1.1079931e+00   2.2608083e-01   2.0057464e+00   5.0043084e-01   9.2747919e-01   1.4186217e+00   4.1212852e-01   3.4085233e-01   6.3178782e-01   1.4043632e+00   1.6175925e+00   2.1298991e+00   6.3309258e-01   5.2133179e-01   5.6595908e-01   1.9078843e+00   7.4418186e-01   6.1830764e-01   3.4085233e-01   1.1006468e+00   9.1132198e-01   1.1010711e+00   0.0000000e+00   1.0458540e+00   9.3984267e-01   9.0166476e-01   5.0043084e-01   7.0088627e-01   7.0993998e-01   3.0017653e-01   8.0093160e-01   6.0000635e-01   7.1621613e-01   2.2268632e+00   4.1317535e-01   5.2491734e-01   6.0964891e-01   8.2421923e-01   7.4335736e-01   4.1209001e-01   1.4186217e+00   1.3131410e+00   7.4275547e-01   6.1119267e-01   9.1566538e-01   1.0095513e+00   1.1796101e+00   2.5399984e-01   1.5237074e+00   8.2421923e-01   1.0429127e+00   4.1315633e-01   3.0490481e-01   1.1528553e+00   1.1270325e+00   7.0096708e-01   5.0001522e-01   3.1328089e-01   9.0657583e-01   7.0096858e-01   9.1568820e-01   1.0216374e+00   6.0035305e-01   8.0337471e-01   7.0548283e-01   1.2396937e+00   5.0043084e-01   4.2270142e-01   8.0008964e-01   1.3131410e+00   3.0915245e-01   4.5847767e-01   7.0470720e-01   9.6936870e-01   7.4262964e-01   9.0645118e-01   1.2190260e+00   4.0246123e-01   1.3450652e+00   1.4544312e+00   1.0207258e+00   4.5147187e-01   9.6501813e-01   5.0517282e-01   3.0490481e-01   5.0437695e-01   6.8170466e-01   6.5712813e-01   5.0855778e-01   2.0121983e-01   1.4695463e+00   1.5267750e+00   7.4395693e-01   6.3309258e-01   7.8890806e-01   1.4542932e+00   7.0008432e-01   4.5784410e-01   9.0166431e-01   8.0000160e-01   7.0008584e-01   3.0017653e-01   9.0005094e-01   1.1019505e+00   1.6144405e+00   4.0004442e-01   5.0436965e-01   4.1315633e-01   1.4012283e+00   6.3164729e-01   2.0121983e-01   8.0046685e-01   6.0219099e-01   6.0964597e-01   6.5724028e-01   5.6371422e-01   5.6838732e-01   7.0911112e-01   5.3914287e-01   6.0948506e-01   4.0246123e-01   5.6371422e-01   5.2133179e-01   1.1281267e+00   1.6745375e+00   8.1112984e-01   5.2167208e-01   7.4395693e-01   7.0016860e-01   5.0855778e-01   3.3813251e-01   9.0659977e-01   7.8895472e-01   5.0043842e-01   4.1209001e-01   1.2528048e+00   1.3020492e+00   9.3861512e-01   4.0127250e-01   1.0142766e+00   1.2362810e+00   9.0168933e-01   3.0490481e-01   7.0478886e-01   1.0010209e+00   9.0279223e-01   2.2608083e-01   7.4262850e-01   9.0055475e-01   1.4109657e+00   2.2573593e-01   7.8895472e-01   8.0492246e-01   1.2000668e+00   4.0363334e-01   4.1212852e-01   1.0039060e+00   4.5080200e-01   2.4195741e-01   7.0462844e-01   7.8985507e-01   3.0490481e-01   3.4085233e-01   6.0017982e-01   8.0928056e-01   6.0017665e-01   4.5784410e-01   7.4612718e-01   2.7992301e+00   3.6259865e-01   9.6953662e-01   6.4620889e-01   1.5401330e+00   1.4140789e+00   1.1281265e+00   2.0058560e+00   1.8949500e+00   1.4140515e+00   1.2396937e+00   8.0000239e-01   4.1317535e-01   1.8064092e+00   9.3310976e-01   2.1167364e+00   2.0181667e-01   1.7570696e+00   1.0143975e+00   6.1135434e-01   1.8661434e+00   1.8197089e+00   1.2632995e+00   8.1112909e-01   5.0126466e-01   8.0051115e-01   1.2632996e+00   1.5975200e+00   1.5266891e+00   5.0043084e-01   1.3452695e+00   1.3018553e+00   1.9314575e+00   1.2089192e+00   1.0777307e+00   1.5030978e+00   1.8949500e+00   8.5409862e-01   1.0151880e+00   1.4180463e+00   1.6742781e+00   1.4542907e+00   1.4853863e+00   1.8197089e+00   2.4725511e+00   1.8443040e+00   2.3518103e+00   1.6032169e+00   1.5066818e+00   1.9080163e+00   8.0923851e-01   9.4532171e-01   1.5109394e+00   1.6179000e+00   2.9132779e+00   2.9705619e+00   1.1020600e+00   2.0185049e+00   7.0633229e-01   2.9085831e+00   1.4001717e+00   1.8310931e+00   2.3325127e+00   1.3000908e+00   1.2016381e+00   1.5402579e+00   2.3143334e+00   2.5314248e+00   3.0393618e+00   1.5405402e+00   1.4018011e+00   1.3018553e+00   2.8185850e+00   1.4728279e+00   1.5248833e+00   1.1020506e+00   2.0036931e+00   1.8191683e+00   2.0009584e+00   9.1427000e-01   1.9534067e+00   1.8336293e+00   1.8020058e+00   1.4006178e+00   1.6026130e+00   1.3506710e+00   1.0116724e+00   6.3912709e-01   7.8890806e-01   1.2190319e+00   1.0776296e+00   8.0923926e-01   1.6740888e+00   1.5650163e+00   1.0798806e+00   9.0155438e-01   9.0417295e-01   6.4049114e-01   1.4685147e+00   6.4049114e-01   1.7843537e+00   4.5148429e-01   1.4324350e+00   6.8261201e-01   3.3813251e-01   1.5401311e+00   1.4852570e+00   9.3329055e-01   5.0043842e-01   2.0181667e-01   9.1424701e-01   9.3424697e-01   1.2633467e+00   1.2093243e+00   5.2167829e-01   1.0313359e+00   9.6574336e-01   1.5965767e+00   9.0168933e-01   7.7603846e-01   1.2016443e+00   1.5639785e+00   5.7832449e-01   7.7882758e-01   1.1074742e+00   1.3452311e+00   1.1281352e+00   1.1558746e+00   1.4852570e+00   1.1153247e+00   7.8895472e-01   5.0477564e-01   5.0855778e-01   1.0426636e+00   9.4532171e-01   7.3155911e-01   5.0476836e-01   1.3669148e+00   1.1910003e+00   8.5471446e-01   7.1629303e-01   1.1528553e+00   1.0777411e+00   9.0142636e-01   8.0046685e-01   7.1629168e-01   1.0032293e+00   9.1892413e-01   3.6452132e-01   5.6370994e-01   7.0176271e-01   1.4157327e+00   4.2362917e-01   7.0633229e-01   6.0964891e-01   1.0062544e+00   9.1554656e-01   6.0365948e-01   1.0235118e+00   6.1135434e-01   6.7626502e-01   7.5508853e-01   9.3308891e-01   7.1621884e-01   8.5403428e-01   6.5712608e-01   8.0245824e-01   6.3192325e-01   9.1132198e-01   8.6051414e-01   1.0242692e+00   1.0232576e+00   6.8685125e-01   1.5761415e+00   1.4407364e+00   9.0296858e-01   8.3689956e-01   6.3322667e-01   1.0451812e+00   1.5490134e+00   4.5847767e-01   1.6529028e+00   8.3916809e-01   1.2749306e+00   5.4219811e-01   7.0462697e-01   1.3611955e+00   1.3103292e+00   9.0792879e-01   9.1446896e-01   8.2421853e-01   7.1708289e-01   9.0683128e-01   1.1954899e+00   1.2957636e+00   6.3178534e-01   9.0508756e-01   8.7796615e-01   1.4198077e+00   7.2113820e-01   6.0427481e-01   1.0032443e+00   1.4501583e+00   4.5216167e-01   5.2491131e-01   9.1894698e-01   1.2738390e+00   9.4912864e-01   1.0207533e+00   1.3523292e+00   5.0043842e-01   4.1317535e-01   8.5403428e-01   7.0910969e-01   3.0482299e-01   4.0243965e-01   1.6491696e+00   1.8289467e+00   1.0060994e+00   6.1119267e-01   9.0142636e-01   1.6484371e+00   5.0126466e-01   6.0018299e-01   9.3308853e-01   4.2362917e-01   4.0363334e-01   5.2133802e-01   7.9153339e-01   1.0782107e+00   1.5275160e+00   5.2167829e-01   5.2167208e-01   6.9987517e-01   1.2633516e+00   5.2201750e-01   4.0127250e-01   5.0517282e-01   4.1212852e-01   5.2167829e-01   4.1210927e-01   7.1621748e-01   8.0093081e-01   6.3178782e-01   3.0922892e-01   7.0008735e-01   2.0061436e-01   3.6452132e-01   6.0035305e-01   4.1420960e-01   7.0096858e-01   6.3178782e-01   5.2132556e-01   3.0490481e-01   1.5634961e+00   1.6740875e+00   5.4219811e-01   5.8750389e-01   8.0245903e-01   1.5263478e+00   4.0004442e-01   6.1135434e-01   8.6051471e-01   5.0043842e-01   4.2270142e-01   3.0482299e-01   8.0967961e-01   1.0426513e+00   1.5757929e+00   3.3813251e-01   4.0127250e-01   5.0855778e-01   1.3133662e+00   7.1700909e-01   4.0125062e-01   5.2491734e-01   5.2167829e-01   5.2838320e-01   5.3943256e-01   6.0017665e-01   6.4620889e-01   6.8261201e-01   4.2270142e-01   3.0482299e-01   3.0026460e-01   7.0470867e-01   5.0477564e-01   1.1038840e+00   1.0010209e+00   4.0363334e-01   3.3808272e-01   1.2528049e+00   1.4182049e+00   9.2033101e-01   2.4195741e-01   1.2036925e+00   1.2362756e+00   6.3451734e-01   3.0482299e-01   5.2524663e-01   7.4335736e-01   7.4329414e-01   4.0125062e-01   5.2491131e-01   6.7636452e-01   1.1755449e+00   4.0127250e-01   6.3925756e-01   7.9148746e-01   9.1424659e-01   5.2491734e-01   4.1210927e-01   8.5437440e-01   1.2085435e-01   3.0026460e-01   4.0127250e-01   1.0010209e+00   4.0243965e-01   4.1317535e-01   3.0482299e-01   6.0444249e-01   3.3813251e-01   6.0964891e-01   9.0166431e-01   4.1212852e-01   7.8985507e-01   8.1719606e-01   2.1378157e+00   2.2009978e+00   5.0855077e-01   1.2175952e+00   3.0017653e-01   2.1167404e+00   6.0035621e-01   1.0597879e+00   1.5265797e+00   5.0517282e-01   5.2167829e-01   7.4329527e-01   1.5072282e+00   1.7227391e+00   2.2434054e+00   7.4335736e-01   6.3309258e-01   6.8161057e-01   2.0107350e+00   9.2867113e-01   7.5508853e-01   5.0517282e-01   1.2040344e+00   1.0178820e+00   1.2037520e+00   2.0181667e-01   1.1636098e+00   1.0621172e+00   1.0032443e+00   6.0000317e-01   8.0883841e-01   9.0668287e-01   5.0085236e-01   6.0964891e-01   7.4618926e-01   2.0118073e+00   2.0884859e+00   9.1424701e-01   1.1060939e+00   4.0243965e-01   2.0057764e+00   6.3178782e-01   9.1916394e-01   1.4198627e+00   6.1119267e-01   6.0219099e-01   6.3309012e-01   1.4134218e+00   1.6179000e+00   2.1265315e+00   6.3178534e-01   9.0506254e-01   1.0032443e+00   1.9078396e+00   6.5712608e-01   6.8261201e-01   6.0219099e-01   1.1003034e+00   9.0532049e-01   1.1001014e+00   5.0000761e-01   1.0433444e+00   9.1892454e-01   9.0002615e-01   5.6595908e-01   7.0470867e-01   6.1119558e-01   6.0017982e-01   5.0085236e-01   1.5275160e+00   1.6747664e+00   1.0434746e+00   5.2132556e-01   8.0533198e-01   1.5264802e+00   5.7609230e-01   4.1317535e-01   8.6051414e-01   5.7630313e-01   5.2524663e-01   4.1315633e-01   8.6054545e-01   1.0440350e+00   1.5412701e+00   4.1210927e-01   8.0250202e-01   9.1471442e-01   1.3130978e+00   3.0490481e-01   5.0043084e-01   5.7630313e-01   5.0043842e-01   3.3818226e-01   5.0043084e-01   6.3925756e-01   6.0948212e-01   4.1317535e-01   3.0482299e-01   7.0548283e-01   3.0490481e-01   2.2573593e-01   5.6394820e-01   1.3634093e+00   1.4858469e+00   8.1757693e-01   5.2201750e-01   9.1427000e-01   1.3523380e+00   6.0201716e-01   3.4342562e-01   7.1629168e-01   7.0096708e-01   6.0948212e-01   3.0490481e-01   7.0096708e-01   9.1424701e-01   1.4267554e+00   4.0127250e-01   4.1420960e-01   4.8342635e-01   1.2049539e+00   6.0964891e-01   1.1269424e-01   7.1621613e-01   4.1212852e-01   6.0018299e-01   5.3914287e-01   7.0548283e-01   5.2524663e-01   7.0105084e-01   5.0476836e-01   5.6371422e-01   3.0474106e-01   5.2491734e-01   6.0948212e-01   1.2000065e+00   2.0187441e+00   1.0498228e+00   2.2315584e+00   1.0000152e+00   1.8808952e+00   1.1286798e+00   7.5826453e-01   1.9821126e+00   1.9334872e+00   1.4094023e+00   9.7944085e-01   1.0090312e+00   3.0915245e-01   1.4094022e+00   1.7226330e+00   1.6785116e+00   8.2418071e-01   1.4544336e+00   1.4183053e+00   2.0448570e+00   1.3189240e+00   1.1989547e+00   1.6071563e+00   2.0162299e+00   9.7599312e-01   1.1287691e+00   1.5299044e+00   1.8304280e+00   1.5694554e+00   1.5966664e+00   1.9334872e+00   2.0448570e+00   1.2223853e+00   2.3135239e+00   3.0915245e-01   2.0415522e+00   1.2703001e+00   9.2351241e-01   2.1487275e+00   2.0854181e+00   1.4650300e+00   1.1157320e+00   8.0296037e-01   1.2013591e+00   1.4650276e+00   1.8684939e+00   1.6818191e+00   8.0245746e-01   1.5324961e+00   1.5271597e+00   2.1953922e+00   1.5071120e+00   1.3457716e+00   1.8029854e+00   2.0885102e+00   1.0837575e+00   1.2703001e+00   1.7133162e+00   1.9522053e+00   1.7369702e+00   1.6941963e+00   2.0286286e+00   1.1213597e+00   6.3912943e-01   1.9163315e+00   5.0855778e-01   1.1310337e+00   1.3142952e+00   6.0219099e-01   8.0046764e-01   7.2904264e-01   1.2366099e+00   1.4559030e+00   2.0467625e+00   7.7882758e-01   6.0184622e-01   6.0948800e-01   1.7296063e+00   1.2395260e+00   9.0668287e-01   8.0051036e-01   1.0231745e+00   1.0411548e+00   1.0543951e+00   5.2167829e-01   1.1356187e+00   1.2079042e+00   9.3439622e-01   4.2268438e-01   8.1719606e-01   1.2192978e+00   8.0046764e-01   1.3133662e+00   1.0434746e+00   8.3916809e-01   2.2573593e-01   5.0855077e-01   9.3848935e-01   9.0659977e-01   5.2167829e-01   7.0096858e-01   5.5450500e-01   1.0287902e+00   5.2133802e-01   8.4725834e-01   9.7599312e-01   8.0250123e-01   6.0018299e-01   5.6371422e-01   1.0171340e+00   3.0482299e-01   2.0181667e-01   6.0000317e-01   1.1079931e+00   2.0061436e-01   2.2573593e-01   5.0084481e-01   8.1683095e-01   5.2524663e-01   7.0096708e-01   1.0116865e+00   2.2278855e+00   7.0008584e-01   1.1298552e+00   1.6300950e+00   6.0017982e-01   5.0084481e-01   8.5403428e-01   1.6097507e+00   1.8284464e+00   2.3350903e+00   8.5406616e-01   7.1629168e-01   7.5508853e-01   2.1138813e+00   8.1683095e-01   8.2462252e-01   4.0246123e-01   1.3009222e+00   1.1139906e+00   1.3000951e+00   2.2608083e-01   1.2636227e+00   1.1315710e+00   1.1002119e+00   7.0088627e-01   9.0029018e-01   6.9600743e-01   3.1328089e-01   1.8661354e+00   1.1286798e+00   7.2044167e-01   1.9755679e+00   1.9314520e+00   1.3741466e+00   9.0645118e-01   6.0184622e-01   1.0001751e+00   1.3741498e+00   1.7083042e+00   1.6308665e+00   6.0201716e-01   1.4559030e+00   1.4140789e+00   2.0433026e+00   1.3131370e+00   1.1900969e+00   1.6049479e+00   2.0057464e+00   9.6691372e-01   1.1304042e+00   1.5237285e+00   1.7843627e+00   1.5639785e+00   1.5975352e+00   1.9314520e+00   8.2671175e-01   1.1543257e+00   1.2085435e-01   3.0474106e-01   7.0088627e-01   1.0144117e+00   1.3018103e+00   1.7709153e+00   7.0462844e-01   3.0482299e-01   7.0470867e-01   1.4857440e+00   8.1457587e-01   6.0948506e-01   3.3813251e-01   6.4049114e-01   7.4954884e-01   6.3925756e-01   5.0043084e-01   1.0090834e+00   8.7372177e-01   5.2838320e-01   2.0121983e-01   3.4342562e-01   7.3084171e-01   4.1315633e-01   5.0855778e-01   9.1024401e-01   8.2498722e-01   5.0436965e-01   5.6595908e-01   7.2044167e-01   1.2101609e+00   5.0437695e-01   6.9987517e-01   8.1457660e-01   1.0010209e+00   4.1212852e-01   3.4342562e-01   9.3351278e-01   3.0915245e-01   3.0482299e-01   6.0052920e-01   9.2747919e-01   2.2608083e-01   4.0000000e-01   5.0476836e-01   8.5586571e-01   5.0477564e-01   5.0477564e-01   8.2498722e-01   1.2635707e+00   1.2396421e+00   8.0533198e-01   2.4170870e-01   4.0127250e-01   7.4618926e-01   8.0726668e-01   1.0151880e+00   1.1066159e+00   5.6371422e-01   9.1554656e-01   8.0879701e-01   1.3523345e+00   6.0366256e-01   6.3912943e-01   9.0532093e-01   1.4186217e+00   5.2133179e-01   7.1700909e-01   8.1719606e-01   1.0923439e+00   8.5409862e-01   1.0116865e+00   1.3253496e+00   2.0121983e-01   8.0051036e-01   1.1269596e+00   1.4140457e+00   1.8721285e+00   8.0250123e-01   3.3813251e-01   8.0250202e-01   1.5969056e+00   8.4536936e-01   7.0096708e-01   2.2538848e-01   7.4395693e-01   8.3222261e-01   7.1779518e-01   4.1212852e-01   1.1079931e+00   9.4151244e-01   5.7630313e-01   3.0490481e-01   4.1420960e-01   6.9509395e-01   3.4080442e-01   7.0184453e-01   1.1527745e+00   1.4140486e+00   1.8971345e+00   7.0556260e-01   3.1328089e-01   7.0910969e-01   1.6486410e+00   7.4618926e-01   6.0184622e-01   1.1269424e-01   8.0923926e-01   7.7598796e-01   8.0883916e-01   3.4085233e-01   1.0235254e+00   8.7240114e-01   6.3309012e-01   5.0043842e-01   4.1315633e-01   5.7609230e-01   2.2538848e-01   8.0888055e-01   1.0030868e+00   1.5299044e+00   1.0000000e-01   6.3164977e-01   7.0096708e-01   1.3008855e+00   6.0184622e-01   3.3813251e-01   8.0296037e-01   5.0476836e-01   3.6256305e-01   5.6618864e-01   6.3178782e-01   4.5847767e-01   5.2491734e-01   4.1420960e-01   6.0202028e-01   4.0127250e-01   6.0052920e-01   5.6618864e-01   3.4342562e-01   8.7372177e-01   8.2425704e-01   9.3308891e-01   1.1005554e+00   7.1700774e-01   9.6674360e-01   8.0051115e-01   1.2632995e+00   5.2491734e-01   8.0883916e-01   7.8935898e-01   1.4043632e+00   7.0470867e-01   9.0532093e-01   7.5502989e-01   9.6953662e-01   7.4612718e-01   1.0222576e+00   1.3061180e+00   1.0032296e+00   1.0032293e+00   1.1900276e+00   1.3017553e+00   4.1315633e-01   1.1093621e+00   1.0088783e+00   1.5263498e+00   7.1708289e-01   7.3155911e-01   1.0040629e+00   1.6175925e+00   6.1845783e-01   7.5826453e-01   9.3426769e-01   1.2396937e+00   1.0142626e+00   1.2128138e+00   1.5237074e+00   1.5299064e+00   1.6885111e+00   1.8315348e+00   8.0097499e-01   1.6050900e+00   1.5160591e+00   2.0074169e+00   1.1389082e+00   1.2275665e+00   1.3502668e+00   2.1298991e+00   1.1075720e+00   1.2113964e+00   1.3632538e+00   1.7639471e+00   1.4922544e+00   1.7133283e+00   2.0290146e+00   7.1621748e-01   8.0051036e-01   1.3008813e+00   6.0017982e-01   4.1210927e-01   8.0492246e-01   5.0477564e-01   3.4080442e-01   5.6595908e-01   6.3309258e-01   4.5784410e-01   5.0855778e-01   4.1317535e-01   6.0366256e-01   4.0246123e-01   6.0035621e-01   5.7630313e-01   5.0085236e-01   1.4407390e+00   9.1892413e-01   4.2270142e-01   3.6452132e-01   6.7824250e-01   9.0668287e-01   8.2458409e-01   5.2133179e-01   9.0792879e-01   1.0124729e+00   8.0250202e-01   4.1210927e-01   5.0085236e-01   8.2458478e-01   4.1315633e-01   1.6100639e+00   1.0426636e+00   5.2491734e-01   8.0488008e-01   8.6054545e-01   1.0116721e+00   9.7291273e-01   5.6595908e-01   9.4532171e-01   1.1186499e+00   9.1681464e-01   6.3178782e-01   6.2656178e-01   9.6574369e-01   5.3943256e-01   1.4007831e+00   1.3033860e+00   1.7572550e+00   8.5406674e-01   1.0030724e+00   1.0426513e+00   1.9078843e+00   9.0005048e-01   1.0010209e+00   1.0776188e+00   1.4549432e+00   1.2363278e+00   1.5032156e+00   1.8103044e+00   6.0184934e-01   8.2671175e-01   6.0383105e-01   4.1209001e-01   6.3309258e-01   7.4418186e-01   5.0477564e-01   4.0006662e-01   4.8342635e-01   9.1892413e-01   4.8391482e-01   2.0121983e-01   6.4620889e-01   7.0462697e-01   5.0436965e-01   6.0184622e-01   5.7608844e-01   6.1830764e-01   5.3914287e-01   7.0105084e-01   5.0855778e-01   6.3165225e-01   3.0490481e-01   5.0477564e-01   5.2133179e-01   9.1446938e-01   8.7209348e-01   9.0532093e-01   3.4085233e-01   1.1298636e+00   9.6150595e-01   7.2036819e-01   5.0477564e-01   5.2167208e-01   6.3925756e-01   3.0008832e-01   3.0915245e-01   3.0474106e-01   1.1006468e+00   5.0043842e-01   4.1420960e-01   2.4195741e-01   6.8170466e-01   4.0127250e-01   7.0096708e-01   1.0003198e+00   5.0043084e-01   9.1132198e-01   3.0026460e-01   2.0121983e-01   4.0004442e-01   6.9987517e-01   4.5148429e-01   5.0477564e-01   8.3183672e-01   1.1010711e+00   8.0000160e-01   6.0052920e-01   2.0121983e-01   6.8161057e-01   4.1212852e-01   7.0176121e-01   1.0030721e+00   1.0458540e+00   9.3984267e-01   9.0166476e-01   5.0043084e-01   7.0088627e-01   7.0993998e-01   3.0017653e-01   2.2608083e-01   7.0008584e-01   9.3848935e-01   7.0184453e-01   6.3178534e-01   9.6936870e-01   5.0476836e-01   8.7372177e-01   5.6618864e-01   5.0477564e-01   8.7240114e-01   5.3943256e-01   3.0474106e-01   5.2167208e-01   8.0879701e-01   5.0085236e-01   9.0279268e-01   5.2133802e-01   4.2362917e-01   6.0017982e-01   5.2838320e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-seuclidean-ml-iris.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-seuclidean-ml-iris.txt
    deleted file mode 100644
    index 3e2759df30..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-seuclidean-ml-iris.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   1.1781739e+00   8.4573383e-01   1.1040164e+00   2.6033464e-01   1.0391769e+00   6.5951091e-01   2.6643250e-01   1.6215602e+00   9.6424206e-01   5.8926015e-01   4.4417668e-01   1.2158053e+00   1.5196051e+00   1.4342986e+00   2.2147971e+00   1.0267382e+00   1.3103399e-01   1.0246015e+00   7.0646671e-01   4.6190224e-01   5.3352899e-01   6.8496652e-01   6.2944414e-01   5.1453676e-01   1.1649855e+00   3.8639663e-01   1.3340137e-01   2.6033464e-01   8.5141186e-01   9.9757114e-01   5.0629646e-01   1.3963590e+00   1.6851313e+00   9.6424206e-01   7.1143905e-01   4.8636669e-01   9.6424206e-01   1.4309353e+00   2.3749211e-01   1.8699153e-01   2.8644037e+00   1.0938603e+00   5.4968254e-01   7.9227302e-01   1.2158053e+00   7.0111465e-01   9.1831777e-01   5.2374483e-01   4.7680727e-01   3.4225655e+00   2.9886388e+00   3.5231786e+00   3.4844976e+00   3.4140450e+00   2.8802393e+00   3.0292175e+00   2.9585178e+00   3.2500773e+00   2.8104851e+00   3.8076036e+00   2.7718515e+00   3.6661618e+00   3.0567513e+00   2.4313960e+00   3.1540282e+00   2.7718124e+00   2.7494216e+00   4.0917474e+00   3.0136333e+00   3.0855821e+00   2.8833448e+00   3.7756717e+00   3.0462644e+00   3.0262994e+00   3.1582448e+00   3.6064873e+00   3.6179250e+00   3.0140884e+00   2.7108793e+00   3.1480683e+00   3.0769261e+00   2.8006021e+00   3.5140011e+00   2.7293962e+00   2.7724816e+00   3.3142477e+00   3.8377035e+00   2.4725633e+00   3.1307104e+00   3.0248446e+00   2.9240118e+00   2.9852014e+00   3.1515797e+00   2.8921724e+00   2.4678110e+00   2.6524994e+00   2.9083443e+00   2.7444686e+00   2.7478281e+00   4.2652803e+00   3.6712837e+00   4.4571526e+00   3.7518856e+00   4.1563042e+00   5.0327536e+00   3.5110505e+00   4.5914343e+00   4.4347154e+00   4.7605836e+00   3.6465897e+00   3.9644198e+00   4.1403419e+00   3.9458900e+00   4.0035735e+00   3.9244083e+00   3.7394250e+00   5.1213512e+00   5.6085412e+00   4.1515197e+00   4.3260896e+00   3.5311267e+00   5.2010517e+00   3.7194928e+00   4.0104626e+00   4.2547108e+00   3.5326627e+00   3.3344440e+00   4.1152831e+00   4.1647603e+00   4.7306329e+00   5.0503286e+00   4.1958529e+00   3.4649010e+00   3.7290073e+00   5.0848756e+00   4.0161825e+00   3.6208873e+00   3.2588014e+00   4.1126592e+00   4.3082433e+00   4.1887411e+00   3.6712837e+00   4.3324309e+00   4.3552675e+00   4.1561376e+00   4.0674499e+00   3.7933592e+00   3.8117183e+00   3.3250640e+00   5.2374483e-01   4.3319335e-01   1.3890415e+00   2.1841707e+00   9.9973471e-01   9.3211669e-01   6.4636269e-01   2.7124234e-01   1.7245677e+00   9.3727156e-01   1.7819563e-01   7.5570839e-01   2.5520912e+00   3.3809114e+00   2.1782802e+00   1.1854382e+00   2.0937104e+00   1.8662528e+00   1.1155937e+00   1.6542533e+00   1.4482752e+00   8.4881492e-01   9.7259094e-01   1.6562722e-01   9.7322023e-01   1.2100516e+00   9.9111027e-01   5.3286499e-01   2.8394141e-01   1.1346946e+00   2.5666454e+00   2.8608436e+00   2.7124234e-01   4.9009568e-01   1.3630799e+00   2.7124234e-01   6.0647055e-01   9.5529726e-01   1.1682143e+00   1.6911681e+00   7.6195008e-01   1.2774622e+00   1.9003949e+00   1.7819563e-01   1.8642334e+00   5.8652824e-01   1.6860841e+00   7.0235100e-01   3.5517185e+00   3.0793995e+00   3.5669733e+00   2.7166736e+00   3.1838913e+00   2.5120824e+00   3.1938169e+00   2.0428688e+00   3.1039816e+00   2.2561090e+00   2.8016158e+00   2.6226738e+00   2.9050141e+00   2.8502197e+00   2.0976260e+00   3.1846091e+00   2.5890531e+00   2.2584354e+00   3.4434621e+00   2.3329638e+00   3.1272801e+00   2.5616006e+00   3.3203580e+00   2.7436923e+00   2.8484236e+00   3.0948526e+00   3.4151452e+00   3.5708989e+00   2.7939968e+00   2.0736054e+00   2.3834491e+00   2.2886612e+00   2.3204706e+00   3.1632397e+00   2.5205525e+00   3.0112889e+00   3.3433635e+00   3.2300528e+00   2.2657938e+00   2.4705763e+00   2.4462190e+00   2.8038852e+00   2.4332562e+00   2.2089299e+00   2.4060762e+00   2.2734727e+00   2.3627180e+00   2.7012637e+00   1.8976741e+00   2.3590971e+00   4.3837112e+00   3.3195686e+00   4.4453895e+00   3.6018522e+00   4.1012355e+00   5.0512932e+00   2.8774753e+00   4.5344585e+00   4.0827835e+00   5.0801762e+00   3.7291677e+00   3.6888814e+00   4.1064223e+00   3.4625304e+00   3.7552252e+00   3.9939605e+00   3.6781201e+00   5.5433523e+00   5.4381439e+00   3.4976392e+00   4.4223824e+00   3.2288234e+00   5.1217596e+00   3.4157742e+00   4.1643077e+00   4.3726405e+00   3.2842292e+00   3.2296198e+00   3.9190152e+00   4.1591876e+00   4.6244312e+00   5.4884429e+00   4.0035368e+00   3.2202996e+00   3.3301365e+00   5.1089381e+00   4.2054648e+00   3.6234874e+00   3.1421933e+00   4.1502382e+00   4.3306814e+00   4.2256435e+00   3.3195686e+00   4.4219948e+00   4.4973329e+00   4.1152664e+00   3.6487297e+00   3.7329401e+00   4.0033827e+00   3.2017663e+00   2.8394141e-01   9.9272943e-01   1.8549949e+00   4.9772204e-01   5.9738093e-01   7.8305765e-01   3.7622328e-01   1.4342986e+00   5.0621589e-01   4.9772204e-01   6.9001472e-01   2.2742100e+00   3.0330374e+00   1.8410898e+00   8.5582452e-01   1.8552074e+00   1.4758765e+00   9.8932340e-01   1.2824303e+00   9.4580058e-01   7.0175090e-01   5.8564715e-01   6.1067563e-01   6.6453319e-01   9.2528705e-01   7.6195008e-01   1.7002750e-01   3.1093967e-01   1.0044375e+00   2.1686473e+00   2.5011215e+00   3.7622328e-01   3.6669623e-01   1.1883075e+00   3.7622328e-01   5.8652824e-01   6.7745878e-01   7.9191984e-01   2.0937821e+00   3.6228991e-01   9.5582164e-01   1.5272557e+00   4.9772204e-01   1.4755007e+00   1.3340137e-01   1.3666101e+00   4.3319335e-01   3.7283414e+00   3.2257816e+00   3.7651559e+00   3.1082143e+00   3.4606263e+00   2.7705999e+00   3.2962379e+00   2.4179023e+00   3.3643791e+00   2.5175848e+00   3.2317517e+00   2.8135312e+00   3.3502574e+00   3.0859108e+00   2.3316915e+00   3.3832002e+00   2.7540885e+00   2.5906747e+00   3.8459512e+00   2.7110494e+00   3.2296198e+00   2.8510843e+00   3.6612067e+00   3.0231940e+00   3.1083626e+00   3.3221751e+00   3.6999785e+00   3.7824508e+00   3.0223050e+00   2.4549511e+00   2.7813487e+00   2.6993734e+00   2.6424988e+00   3.4348309e+00   2.6680185e+00   3.0548262e+00   3.5357687e+00   3.6340473e+00   2.4474337e+00   2.8211532e+00   2.7662396e+00   3.0069386e+00   2.7817508e+00   2.6121651e+00   2.7000040e+00   2.4677010e+00   2.5915377e+00   2.9544131e+00   2.2712863e+00   2.6277952e+00   4.4682388e+00   3.5629824e+00   4.6484688e+00   3.8140427e+00   4.2790737e+00   5.2629823e+00   3.1332305e+00   4.7710813e+00   4.3977196e+00   5.1429155e+00   3.8634878e+00   3.9555042e+00   4.3021827e+00   3.7450218e+00   3.9451569e+00   4.1141318e+00   3.8729361e+00   5.5923916e+00   5.7171219e+00   3.8836634e+00   4.5660923e+00   3.4290419e+00   5.3764415e+00   3.6907517e+00   4.2782894e+00   4.5393827e+00   3.5302656e+00   3.4102235e+00   4.1476932e+00   4.3814983e+00   4.8831870e+00   5.5467550e+00   4.2276454e+00   3.4820326e+00   3.6311162e+00   5.3207972e+00   4.2656428e+00   3.7854499e+00   3.3178004e+00   4.3254709e+00   4.4873379e+00   4.3956810e+00   3.5629824e+00   4.5607328e+00   4.6030755e+00   4.3016138e+00   3.9622355e+00   3.9225802e+00   4.0577906e+00   3.3684813e+00   1.2515236e+00   2.1021589e+00   7.0646671e-01   8.4383266e-01   5.2374483e-01   3.8525820e-01   1.6876653e+00   7.3502408e-01   3.6319073e-01   5.0299964e-01   2.5372015e+00   3.2897546e+00   2.1021589e+00   1.1117653e+00   2.0978519e+00   1.7286097e+00   1.1937015e+00   1.5323598e+00   1.1874605e+00   8.6297946e-01   7.6710016e-01   5.3827772e-01   8.8540687e-01   1.1730565e+00   1.0034646e+00   2.6643250e-01   2.4808718e-01   1.2168625e+00   2.4209958e+00   2.7605308e+00   3.8525820e-01   5.6164055e-01   1.4300979e+00   3.8525820e-01   3.5266705e-01   9.1831777e-01   1.0556536e+00   1.8570904e+00   3.5266705e-01   1.1671832e+00   1.7581227e+00   3.6319073e-01   1.7245677e+00   2.3749211e-01   1.6215602e+00   6.7030885e-01   3.7702988e+00   3.2513048e+00   3.7854692e+00   2.9445918e+00   3.4252075e+00   2.6854877e+00   3.3289662e+00   2.2084365e+00   3.3482393e+00   2.3872006e+00   3.0088381e+00   2.7858967e+00   3.2051999e+00   3.0423378e+00   2.2727200e+00   3.4066596e+00   2.7026316e+00   2.4942728e+00   3.7194777e+00   2.5718070e+00   3.2266664e+00   2.8009311e+00   3.5699473e+00   2.9607930e+00   3.0876835e+00   3.3257458e+00   3.6752903e+00   3.7792524e+00   2.9772187e+00   2.3405405e+00   2.6225184e+00   2.5379456e+00   2.5530959e+00   3.3522700e+00   2.6036900e+00   3.0973166e+00   3.5528019e+00   3.5210610e+00   2.4001125e+00   2.6797931e+00   2.6323855e+00   2.9822614e+00   2.6747723e+00   2.4035668e+00   2.5939621e+00   2.4241443e+00   2.5291529e+00   2.9226858e+00   2.0959349e+00   2.5480036e+00   4.4738080e+00   3.4750769e+00   4.6459778e+00   3.7712854e+00   4.2573656e+00   5.2660922e+00   2.9665251e+00   4.7582165e+00   4.3221591e+00   5.2027091e+00   3.8786505e+00   3.8957203e+00   4.2952898e+00   3.6300721e+00   3.8796854e+00   4.1217239e+00   3.8539421e+00   5.6722969e+00   5.6818418e+00   3.7421166e+00   4.5832488e+00   3.3486395e+00   5.3611963e+00   3.6296692e+00   4.3021827e+00   4.5620086e+00   3.4793229e+00   3.3827920e+00   4.0990020e+00   4.3836503e+00   4.8653269e+00   5.6359100e+00   4.1798856e+00   3.4290065e+00   3.5331571e+00   5.3326389e+00   4.2899049e+00   3.7762521e+00   3.2871170e+00   4.3357622e+00   4.4879065e+00   4.4101806e+00   3.4750769e+00   4.5719131e+00   4.6252914e+00   4.2958117e+00   3.8764095e+00   3.9087616e+00   4.0828629e+00   3.3281063e+00   8.9980139e-01   6.8064066e-01   4.6472955e-01   1.7695601e+00   1.1682143e+00   5.3827772e-01   5.3286499e-01   1.4108003e+00   1.6357068e+00   1.3406177e+00   2.0471148e+00   8.8540687e-01   2.9145160e-01   9.8663349e-01   4.9772204e-01   6.8921053e-01   3.7371902e-01   5.3360548e-01   8.2263932e-01   5.9279023e-01   1.3884168e+00   5.4248468e-01   3.3872939e-01   5.2066928e-01   9.9757114e-01   1.1836141e+00   7.1971771e-01   1.1867923e+00   1.5097838e+00   1.1682143e+00   9.2945909e-01   6.4884272e-01   1.1682143e+00   1.5630357e+00   4.8016385e-01   2.7124234e-01   3.0617227e+00   1.1744248e+00   5.8374436e-01   6.1345624e-01   1.4108003e+00   4.9009568e-01   1.0413386e+00   4.3319335e-01   6.9189100e-01   3.5573943e+00   3.1141702e+00   3.6648465e+00   3.6881887e+00   3.5883824e+00   3.0468381e+00   3.1315658e+00   3.1515797e+00   3.4214871e+00   2.9743594e+00   4.0164863e+00   2.9182492e+00   3.8928105e+00   3.2158144e+00   2.6006889e+00   3.3027067e+00   2.9031809e+00   2.9465762e+00   4.3027855e+00   3.2186029e+00   3.1845052e+00   3.0688421e+00   3.9670252e+00   3.2223969e+00   3.2005819e+00   3.3183883e+00   3.7835219e+00   3.7624114e+00   3.1706932e+00   2.9238797e+00   3.3563322e+00   3.2896970e+00   2.9943889e+00   3.6782511e+00   2.8525048e+00   2.8501434e+00   3.4560405e+00   4.0524463e+00   2.6189855e+00   3.3240937e+00   3.2080454e+00   3.0726532e+00   3.1844624e+00   3.3537486e+00   3.0707196e+00   2.6200714e+00   2.8136838e+00   3.0798325e+00   2.9434145e+00   2.9219863e+00   4.3385668e+00   3.8211670e+00   4.5879449e+00   3.8900790e+00   4.2758494e+00   5.1630878e+00   3.6606996e+00   4.7359279e+00   4.6113949e+00   4.8204842e+00   3.7540483e+00   4.1248784e+00   4.2705921e+00   4.1081181e+00   4.1285849e+00   4.0208302e+00   3.8718630e+00   5.1706118e+00   5.7653526e+00   4.3529699e+00   4.4302351e+00   3.6643052e+00   5.3499285e+00   3.8863166e+00   4.1025634e+00   4.3705834e+00   3.6895783e+00   3.4655259e+00   4.2576017e+00   4.3078329e+00   4.8848930e+00   5.1059908e+00   4.3355275e+00   3.6287723e+00   3.9015858e+00   5.2167033e+00   4.0809175e+00   3.7394250e+00   3.3885059e+00   4.2346521e+00   4.4182506e+00   4.3085803e+00   3.8211670e+00   4.4331391e+00   4.4402220e+00   4.2825037e+00   4.2456731e+00   3.9239772e+00   3.8761056e+00   3.4480528e+00   1.5196051e+00   1.2824303e+00   2.6220224e+00   1.9839744e+00   5.4248468e-01   1.3880441e+00   2.2398377e+00   2.5185753e+00   6.5993495e-01   1.2140269e+00   2.2670334e-01   1.0140902e+00   4.4901474e-01   4.6310132e-01   1.1825559e+00   5.9738093e-01   1.2799022e+00   1.4364110e+00   1.3915110e+00   2.1479410e+00   1.2515236e+00   9.9544409e-01   1.2188859e+00   1.8419620e+00   2.0002725e+00   1.1587093e+00   6.6217390e-01   7.6869104e-01   1.9839744e+00   1.7287715e+00   9.9282597e-01   1.9839744e+00   2.4262873e+00   1.2419907e+00   1.0737552e+00   3.8557204e+00   2.0456732e+00   1.0753036e+00   4.4417668e-01   2.2089621e+00   5.0629646e-01   1.9071648e+00   5.5576380e-01   1.4985933e+00   3.3087308e+00   2.9428879e+00   3.4716469e+00   4.0891690e+00   3.6027276e+00   3.2367228e+00   2.9085289e+00   3.7111791e+00   3.3928277e+00   3.3150290e+00   4.5928107e+00   2.9594198e+00   4.2678298e+00   3.2621413e+00   2.8156205e+00   3.1507918e+00   2.9937665e+00   3.2188610e+00   4.5717893e+00   3.5888229e+00   3.0697068e+00   3.2000980e+00   4.1198780e+00   3.3377308e+00   3.2155231e+00   3.2352945e+00   3.7547729e+00   3.6294383e+00   3.2310889e+00   3.2831808e+00   3.7736317e+00   3.7263104e+00   3.2475076e+00   3.7907966e+00   2.9840079e+00   2.6164035e+00   3.2920107e+00   4.3046992e+00   2.7582086e+00   3.6782988e+00   3.5276458e+00   3.0726914e+00   3.4670753e+00   3.9103065e+00   3.3340819e+00   2.7470239e+00   2.9746673e+00   3.1328218e+00   3.4555378e+00   3.1318122e+00   4.0752096e+00   3.9330937e+00   4.3762388e+00   3.8407425e+00   4.1274362e+00   4.9032073e+00   4.0261574e+00   4.5547766e+00   4.6534818e+00   4.3582690e+00   3.5326627e+00   4.1405270e+00   4.0947877e+00   4.2953569e+00   4.1533819e+00   3.7981559e+00   3.7518931e+00   4.6218430e+00   5.6203182e+00   4.6338556e+00   4.1503558e+00   3.7655153e+00   5.1552615e+00   3.9363994e+00   3.8053980e+00   4.0787299e+00   3.7177375e+00   3.4172455e+00   4.2121483e+00   4.1116484e+00   4.7277367e+00   4.5452377e+00   4.2828893e+00   3.6617103e+00   4.0381240e+00   4.9437127e+00   3.7768622e+00   3.5869495e+00   3.3594066e+00   4.0056296e+00   4.1979142e+00   4.0739556e+00   3.9330937e+00   4.1628496e+00   4.1341118e+00   4.1117268e+00   4.3552101e+00   3.7951858e+00   3.5859295e+00   3.4280549e+00   5.0370871e-01   1.1854382e+00   8.2574748e-01   1.1968529e+00   2.9724335e-01   9.8896933e-01   1.0391769e+00   2.0112023e+00   2.6653431e+00   1.5111262e+00   6.4636269e-01   1.6262201e+00   1.1040164e+00   9.8966705e-01   9.2934901e-01   5.3040146e-01   7.1789533e-01   3.9472619e-01   1.0556536e+00   5.1318506e-01   7.7368489e-01   7.3633268e-01   5.0731024e-01   7.5303835e-01   9.7659801e-01   1.7897583e+00   2.1453760e+00   8.2574748e-01   6.9001472e-01   1.1202045e+00   8.2574748e-01   9.6424206e-01   6.2046469e-01   5.3827772e-01   2.5404386e+00   5.3988754e-01   6.7372733e-01   1.1459117e+00   9.5361455e-01   1.1160907e+00   4.7951153e-01   1.1016806e+00   5.5109043e-01   3.7667766e+00   3.2399456e+00   3.8211099e+00   3.3920086e+00   3.5974024e+00   2.9126202e+00   3.2661365e+00   2.7296888e+00   3.4884812e+00   2.6863536e+00   3.5939578e+00   2.8820992e+00   3.6783922e+00   3.1916608e+00   2.4616677e+00   3.4465420e+00   2.8051321e+00   2.8088029e+00   4.1173049e+00   2.9788024e+00   3.2021702e+00   3.0140681e+00   3.8639979e+00   3.1756883e+00   3.2362495e+00   3.4136564e+00   3.8424217e+00   3.8484723e+00   3.1221021e+00   2.7251978e+00   3.0739866e+00   3.0068046e+00   2.8468838e+00   3.5726594e+00   2.7099355e+00   2.9743925e+00   3.5889631e+00   3.9062347e+00   2.5235038e+00   3.0623697e+00   2.9777787e+00   3.0820765e+00   3.0110500e+00   2.9445347e+00   2.8809757e+00   2.5543631e+00   2.7073441e+00   3.0792230e+00   2.5679169e+00   2.7817508e+00   4.4017095e+00   3.6741422e+00   4.6939918e+00   3.8825162e+00   4.3049794e+00   5.3127345e+00   3.3002804e+00   4.8514877e+00   4.5630927e+00   5.0475012e+00   3.8518880e+00   4.0758615e+00   4.3442223e+00   3.8984747e+00   3.9980344e+00   4.0855291e+00   3.9215612e+00   5.4851910e+00   5.8312870e+00   4.1416475e+00   4.5535489e+00   3.5028870e+00   5.4694510e+00   3.8234999e+00   4.2411026e+00   4.5531893e+00   3.6365882e+00   3.4540568e+00   4.2272070e+00   4.4531017e+00   4.9839411e+00   5.4520873e+00   4.3016933e+00   3.6054769e+00   3.7985950e+00   5.3693255e+00   4.1776636e+00   3.8035129e+00   3.3594554e+00   4.3469553e+00   4.4886880e+00   4.4112274e+00   3.6741422e+00   4.5435532e+00   4.5534752e+00   4.3346048e+00   4.1329859e+00   3.9643709e+00   3.9674740e+00   3.4024060e+00   1.3630799e+00   7.1446962e-01   8.4383266e-01   2.4808718e-01   9.6424206e-01   1.2783641e+00   1.6962086e+00   2.4702874e+00   1.2824303e+00   2.9691107e-01   1.2631980e+00   9.3957399e-01   4.9617437e-01   7.4965096e-01   7.2553812e-01   4.8492463e-01   3.3125444e-01   9.2426065e-01   2.6812643e-01   3.3395426e-01   2.4808718e-01   5.8926015e-01   7.3502408e-01   5.4956349e-01   1.6376300e+00   1.9421609e+00   7.1446962e-01   4.9160020e-01   6.5622658e-01   7.1446962e-01   1.1785203e+00   1.2076330e-01   2.8845946e-01   2.6135503e+00   8.6638670e-01   5.7543116e-01   9.9282597e-01   9.6424206e-01   9.3211669e-01   6.7030885e-01   7.8100392e-01   2.3749211e-01   3.4362741e+00   2.9772187e+00   3.5154539e+00   3.2993605e+00   3.3443673e+00   2.7564382e+00   3.0285957e+00   2.7337208e+00   3.1980682e+00   2.6433553e+00   3.5789723e+00   2.6973512e+00   3.4963203e+00   2.9759207e+00   2.3127671e+00   3.1412273e+00   2.6774449e+00   2.6095931e+00   3.9436181e+00   2.8415480e+00   3.0475523e+00   2.7865107e+00   3.6589666e+00   2.9471549e+00   2.9637920e+00   3.1238401e+00   3.5511257e+00   3.5866237e+00   2.9292978e+00   2.5500042e+00   2.9620296e+00   2.8874183e+00   2.6658729e+00   3.4048426e+00   2.6224103e+00   2.7775195e+00   3.2991484e+00   3.6986035e+00   2.3717154e+00   2.9594198e+00   2.8613259e+00   2.8592001e+00   2.8393888e+00   2.9284199e+00   2.7478281e+00   2.3715604e+00   2.5423572e+00   2.8329679e+00   2.5370255e+00   2.6226761e+00   4.2550363e+00   3.5587552e+00   4.4384175e+00   3.6863990e+00   4.1157774e+00   5.0262130e+00   3.3282378e+00   4.5651798e+00   4.3425674e+00   4.8115590e+00   3.6359466e+00   3.8813909e+00   4.1126592e+00   3.8106374e+00   3.9142555e+00   3.9091502e+00   3.6969354e+00   5.1996380e+00   5.5654572e+00   3.9942942e+00   4.3261607e+00   3.4228883e+00   5.1764012e+00   3.6303894e+00   4.0165247e+00   4.2627936e+00   3.4508614e+00   3.2748167e+00   4.0461381e+00   4.1489951e+00   4.6983104e+00   5.1372583e+00   4.1280577e+00   3.3829235e+00   3.6112201e+00   5.0844328e+00   4.0217556e+00   3.5877667e+00   3.1942059e+00   4.1021303e+00   4.2899049e+00   4.1807095e+00   3.5587552e+00   4.3276502e+00   4.3608503e+00   4.1273623e+00   3.9585431e+00   3.7540483e+00   3.8154519e+00   3.2543470e+00   7.7313507e-01   2.2058495e+00   1.2553676e+00   5.5109043e-01   3.3742167e-01   3.0507869e+00   3.8084614e+00   2.6171176e+00   1.6268459e+00   2.6113513e+00   2.2457527e+00   1.6784057e+00   2.0471148e+00   1.6480463e+00   1.3225313e+00   1.2819528e+00   7.6880092e-01   1.3915110e+00   1.6886167e+00   1.5043671e+00   7.8918675e-01   6.7745878e-01   1.6911617e+00   2.9348176e+00   3.2792996e+00   7.7313507e-01   1.0082548e+00   1.9190366e+00   7.7313507e-01   2.3749211e-01   1.4309353e+00   1.5685186e+00   1.3963590e+00   6.9420840e-01   1.6514950e+00   2.2742046e+00   5.5109043e-01   2.2440749e+00   7.3283576e-01   2.1421205e+00   1.1730565e+00   4.0382971e+00   3.5072516e+00   4.0204750e+00   2.8157524e+00   3.5602797e+00   2.7716933e+00   3.6026548e+00   1.9881684e+00   3.5249607e+00   2.3719578e+00   2.7108793e+00   2.9588139e+00   3.1000099e+00   3.1914275e+00   2.3942229e+00   3.6456797e+00   2.8533918e+00   2.5518058e+00   3.6496659e+00   2.5198146e+00   3.4451131e+00   2.9183683e+00   3.5989425e+00   3.0794356e+00   3.2576813e+00   3.5320163e+00   3.8261152e+00   3.9741896e+00   3.1180182e+00   2.3364082e+00   2.5170134e+00   2.4274466e+00   2.6068690e+00   3.4218333e+00   2.7386417e+00   3.3934324e+00   3.7851452e+00   3.4854109e+00   2.5636830e+00   2.6200486e+00   2.6174942e+00   3.1669559e+00   2.6880360e+00   2.1675629e+00   2.6284424e+00   2.5986852e+00   2.6571682e+00   3.0828753e+00   1.9438939e+00   2.6338308e+00   4.6899445e+00   3.5257224e+00   4.8360835e+00   3.9149039e+00   4.4239486e+00   5.4654338e+00   2.8575769e+00   4.9368840e+00   4.3795070e+00   5.4971381e+00   4.1073887e+00   3.9867349e+00   4.4778796e+00   3.6113294e+00   3.9521234e+00   4.3324165e+00   4.0348180e+00   6.0067497e+00   5.8007868e+00   3.6612067e+00   4.8067419e+00   3.4133837e+00   5.5245725e+00   3.7158962e+00   4.5501061e+00   4.8067452e+00   3.5898573e+00   3.5494058e+00   4.2132256e+00   4.5903044e+00   5.0235775e+00   5.9805492e+00   4.2919572e+00   3.5520531e+00   3.5821954e+00   5.5319518e+00   4.5355231e+00   3.9801247e+00   3.4489678e+00   4.5459210e+00   4.6801759e+00   4.6148655e+00   3.5257224e+00   4.7911899e+00   4.8567489e+00   4.4697071e+00   3.9039516e+00   4.0848538e+00   4.3320055e+00   3.4824517e+00   1.5154593e+00   7.1671402e-01   2.6643250e-01   7.9347379e-01   2.3528246e+00   3.1744385e+00   1.9839744e+00   9.9059199e-01   1.9029496e+00   1.6532822e+00   9.3451915e-01   1.4586696e+00   1.2483935e+00   7.4743804e-01   7.4957404e-01   2.9691107e-01   8.0686941e-01   9.9973471e-01   7.9394533e-01   3.6319073e-01   1.8699153e-01   9.9891776e-01   2.3345854e+00   2.6422396e+00   0.0000000e+00   3.3742167e-01   1.1857824e+00   0.0000000e+00   6.6918102e-01   7.4445830e-01   9.7322023e-01   1.9284841e+00   6.6918102e-01   1.1393372e+00   1.6942803e+00   3.7371902e-01   1.6386105e+00   4.5257749e-01   1.4715172e+00   4.9772204e-01   3.5602797e+00   3.0968979e+00   3.5933352e+00   2.8998722e+00   3.2656298e+00   2.6029746e+00   3.1974447e+00   2.2445103e+00   3.1601923e+00   2.3946216e+00   3.0209665e+00   2.6867317e+00   3.0775658e+00   2.9161244e+00   2.1946278e+00   3.2137635e+00   2.6502891e+00   2.3652711e+00   3.6096140e+00   2.4893065e+00   3.1578003e+00   2.6568473e+00   3.4426472e+00   2.8187901e+00   2.9128857e+00   3.1418202e+00   3.4847097e+00   3.6205958e+00   2.8694312e+00   2.2223283e+00   2.5588203e+00   2.4651138e+00   2.4413293e+00   3.2621861e+00   2.5834128e+00   2.9995857e+00   3.3733791e+00   3.3817876e+00   2.3263008e+00   2.6305758e+00   2.5756071e+00   2.8533918e+00   2.5683063e+00   2.4187322e+00   2.5258216e+00   2.3250308e+00   2.4413618e+00   2.7691536e+00   2.1006933e+00   2.4608850e+00   4.4119896e+00   3.4290419e+00   4.4942656e+00   3.6650933e+00   4.1590662e+00   5.0899438e+00   3.0333619e+00   4.5799469e+00   4.1882413e+00   5.0726081e+00   3.7613721e+00   3.7859992e+00   4.1623715e+00   3.6029758e+00   3.8608065e+00   4.0352348e+00   3.7266849e+00   5.5043247e+00   5.5172732e+00   3.6569442e+00   4.4568115e+00   3.3324016e+00   5.1765224e+00   3.5192065e+00   4.1799642e+00   4.3857400e+00   3.3769078e+00   3.2906843e+00   4.0034548e+00   4.1917183e+00   4.6854597e+00   5.4444866e+00   4.0904298e+00   3.2962678e+00   3.4250783e+00   5.1569386e+00   4.2213314e+00   3.6582637e+00   3.2059261e+00   4.1937040e+00   4.3826532e+00   4.2786320e+00   3.4290419e+00   4.4549850e+00   4.5270304e+00   4.1816268e+00   3.7777251e+00   3.7924144e+00   4.0173731e+00   3.2613828e+00   1.0034646e+00   1.7753099e+00   2.1070188e+00   8.6079202e-01   1.6751898e+00   5.4248468e-01   6.0365341e-01   4.6310132e-01   4.4901474e-01   7.0111465e-01   4.4713936e-01   1.0328871e+00   1.0722301e+00   1.0271920e+00   1.6860841e+00   8.8540687e-01   5.2066928e-01   7.3502408e-01   1.4309353e+00   1.5630357e+00   7.3985997e-01   9.6257499e-01   1.1608422e+00   1.5154593e+00   1.2617482e+00   4.9009568e-01   1.5154593e+00   2.0192952e+00   7.8100392e-01   6.9001472e-01   3.4112480e+00   1.6736143e+00   8.5090098e-01   5.5183182e-01   1.7753099e+00   4.3319335e-01   1.5054343e+00   1.2076330e-01   1.0428797e+00   3.2901237e+00   2.9292978e+00   3.4367371e+00   3.8111737e+00   3.4729880e+00   3.0672734e+00   2.9473505e+00   3.3901879e+00   3.2662947e+00   3.1144880e+00   4.2413542e+00   2.8660589e+00   3.9495965e+00   3.1433256e+00   2.6375433e+00   3.0908568e+00   2.9081458e+00   2.9702968e+00   4.3236374e+00   3.3103937e+00   3.0964304e+00   3.0179755e+00   3.9313682e+00   3.1669001e+00   3.0754584e+00   3.1432906e+00   3.6245464e+00   3.5873526e+00   3.1179878e+00   2.9918256e+00   3.4776059e+00   3.4142800e+00   3.0198617e+00   3.6568155e+00   2.8980988e+00   2.6944324e+00   3.2512254e+00   4.0479095e+00   2.6293791e+00   3.4291613e+00   3.2968205e+00   2.9799791e+00   3.2239661e+00   3.5774656e+00   3.1299499e+00   2.6069579e+00   2.8203994e+00   2.9888842e+00   3.1470877e+00   2.9476507e+00   4.1975965e+00   3.8311128e+00   4.3861256e+00   3.7921747e+00   4.1446583e+00   4.9211801e+00   3.8442384e+00   4.5238497e+00   4.5231439e+00   4.5451210e+00   3.5805575e+00   4.0469570e+00   4.0990882e+00   4.1579560e+00   4.1249171e+00   3.8727781e+00   3.7290616e+00   4.8292468e+00   5.5757877e+00   4.3965263e+00   4.2248397e+00   3.6936498e+00   5.1256160e+00   3.8221804e+00   3.8961870e+00   4.1176453e+00   3.6242665e+00   3.3807801e+00   4.1671042e+00   4.0787299e+00   4.6798442e+00   4.7374542e+00   4.2466909e+00   3.5432140e+00   3.8759165e+00   4.9689016e+00   3.9204411e+00   3.5927937e+00   3.3203721e+00   4.0348754e+00   4.2531600e+00   4.1147391e+00   3.8311128e+00   4.2401451e+00   4.2502727e+00   4.1279956e+00   4.2116129e+00   3.7856899e+00   3.7242025e+00   3.3954918e+00   9.3865015e-01   1.1459117e+00   1.8505741e+00   2.5636327e+00   1.3972701e+00   4.6310132e-01   1.4327294e+00   1.0013399e+00   7.2679299e-01   8.2574748e-01   6.2187934e-01   5.8496636e-01   1.7002750e-01   9.5361455e-01   3.5639126e-01   5.3827772e-01   4.9617437e-01   4.7680727e-01   6.9189100e-01   7.7259801e-01   1.6911681e+00   2.0326426e+00   7.1671402e-01   5.6788283e-01   8.9258315e-01   7.1671402e-01   1.0551281e+00   3.6669623e-01   3.9699460e-01   2.5716465e+00   6.8921053e-01   6.2148529e-01   1.0391769e+00   9.3865015e-01   9.9111027e-01   5.3286499e-01   9.2006504e-01   3.5266705e-01   3.5819899e+00   3.0902007e+00   3.6482741e+00   3.3284223e+00   3.4528558e+00   2.8062636e+00   3.1283731e+00   2.7130802e+00   3.3201500e+00   2.6478976e+00   3.5696084e+00   2.7728704e+00   3.5649049e+00   3.0583917e+00   2.3718219e+00   3.2763163e+00   2.7180029e+00   2.6779045e+00   4.0150919e+00   2.8864805e+00   3.1083977e+00   2.8822332e+00   3.7402558e+00   3.0304088e+00   3.0793541e+00   3.2506893e+00   3.6756049e+00   3.7003058e+00   3.0054875e+00   2.6160903e+00   2.9965118e+00   2.9238797e+00   2.7351276e+00   3.4650507e+00   2.6418165e+00   2.8577585e+00   3.4252075e+00   3.7832878e+00   2.4227175e+00   2.9917855e+00   2.8903467e+00   2.9460322e+00   2.9034030e+00   2.9191699e+00   2.7908173e+00   2.4332562e+00   2.6000033e+00   2.9338362e+00   2.5416562e+00   2.6797931e+00   4.3169600e+00   3.6002350e+00   4.5501061e+00   3.7611228e+00   4.1952298e+00   5.1491201e+00   3.2996439e+00   4.6835622e+00   4.4311275e+00   4.9194005e+00   3.7316761e+00   3.9622355e+00   4.2152785e+00   3.8426553e+00   3.9520057e+00   3.9894324e+00   3.7877346e+00   5.3234168e+00   5.6801405e+00   4.0465336e+00   4.4289683e+00   3.4509848e+00   5.3007226e+00   3.7123045e+00   4.1128937e+00   4.3848871e+00   3.5295907e+00   3.3480190e+00   4.1214168e+00   4.2758432e+00   4.8208082e+00   5.2754050e+00   4.2018689e+00   3.4688327e+00   3.6716137e+00   5.2146461e+00   4.0903576e+00   3.6733277e+00   3.2612647e+00   4.2126998e+00   4.3809966e+00   4.2914999e+00   3.6002350e+00   4.4223824e+00   4.4497684e+00   4.2250047e+00   4.0330034e+00   3.8460049e+00   3.8818416e+00   3.3084835e+00   6.2729876e-01   2.6091054e+00   3.4299177e+00   2.2340939e+00   1.2368073e+00   2.1640372e+00   1.8992968e+00   1.1925354e+00   1.7015647e+00   1.4288989e+00   9.5582164e-01   9.7391954e-01   2.9724335e-01   1.0376697e+00   1.2583645e+00   1.0495503e+00   5.0731024e-01   2.8845946e-01   1.2384679e+00   2.5831347e+00   2.8967543e+00   2.6643250e-01   5.4873947e-01   1.4369223e+00   2.6643250e-01   5.0370871e-01   1.0013399e+00   1.2082987e+00   1.6761482e+00   6.8299624e-01   1.3528452e+00   1.9417181e+00   2.6206799e-01   1.8882412e+00   5.3690447e-01   1.7295385e+00   7.4445830e-01   3.6974389e+00   3.2246529e+00   3.7127916e+00   2.8221999e+00   3.3289662e+00   2.6369282e+00   3.3348648e+00   2.1165503e+00   3.2465431e+00   2.3709411e+00   2.8608899e+00   2.7655496e+00   3.0110500e+00   2.9862341e+00   2.2391292e+00   3.3332541e+00   2.7176350e+00   2.3810734e+00   3.5657791e+00   2.4469788e+00   3.2638546e+00   2.7057900e+00   3.4512743e+00   2.8728052e+00   2.9934131e+00   3.2431147e+00   3.5582624e+00   3.7179545e+00   2.9335017e+00   2.1999209e+00   2.4893065e+00   2.3915367e+00   2.4540260e+00   3.2923304e+00   2.6414379e+00   3.1466196e+00   3.4901671e+00   3.3542627e+00   2.3973915e+00   2.5861640e+00   2.5561973e+00   2.9420425e+00   2.5609364e+00   2.2836399e+00   2.5303888e+00   2.4035745e+00   2.4950488e+00   2.8435005e+00   2.0000785e+00   2.4916202e+00   4.5218181e+00   3.4492861e+00   4.5921002e+00   3.7366932e+00   4.2432727e+00   5.1949299e+00   2.9709788e+00   4.6735988e+00   4.2160797e+00   5.2249989e+00   3.8759828e+00   3.8289542e+00   4.2545385e+00   3.5878027e+00   3.8924868e+00   4.1403048e+00   3.8179103e+00   5.6801405e+00   5.5805905e+00   3.6100547e+00   4.5709635e+00   3.3584734e+00   5.2629823e+00   3.5576748e+00   4.3070506e+00   4.5135384e+00   3.4273212e+00   3.3707040e+00   4.0596064e+00   4.2990937e+00   4.7676077e+00   5.6256469e+00   4.1454035e+00   3.3551224e+00   3.4472672e+00   5.2603070e+00   4.3452859e+00   3.7614312e+00   3.2825924e+00   4.3002370e+00   4.4796258e+00   4.3809022e+00   3.4492861e+00   4.5673965e+00   4.6446301e+00   4.2677070e+00   3.7864370e+00   3.8796124e+00   4.1423594e+00   3.3352922e+00   2.9361142e+00   3.6728262e+00   2.4980859e+00   1.5364600e+00   2.5390784e+00   2.1113072e+00   1.6578570e+00   1.9353585e+00   1.4375287e+00   1.3425463e+00   1.1993279e+00   9.0115406e-01   1.3418210e+00   1.6061161e+00   1.4416694e+00   7.3727571e-01   7.1781501e-01   1.6797637e+00   2.7692440e+00   3.1313820e+00   7.9347379e-01   9.7352372e-01   1.8600648e+00   7.9347379e-01   2.1119253e-01   1.3612390e+00   1.4580439e+00   1.6571634e+00   5.0731024e-01   1.5980974e+00   2.1674065e+00   6.7984069e-01   2.1059482e+00   6.2457556e-01   2.0330443e+00   1.1132823e+00   4.2319020e+00   3.7044235e+00   4.2326669e+00   3.1432906e+00   3.8172627e+00   3.0425144e+00   3.7866079e+00   2.3206274e+00   3.7650177e+00   2.6608343e+00   3.0454230e+00   3.1914925e+00   3.4221447e+00   3.4413652e+00   2.6453561e+00   3.8539838e+00   3.0892079e+00   2.8357998e+00   3.9683086e+00   2.8336784e+00   3.6477040e+00   3.1799040e+00   3.8944724e+00   3.3434129e+00   3.4994776e+00   3.7569353e+00   4.0775935e+00   4.2049294e+00   3.3684490e+00   2.6363662e+00   2.8414019e+00   2.7526519e+00   2.8906655e+00   3.7008234e+00   2.9737492e+00   3.5555916e+00   3.9977110e+00   3.7960948e+00   2.7978670e+00   2.9332077e+00   2.9200513e+00   3.4002561e+00   2.9851921e+00   2.5032729e+00   2.9159414e+00   2.8324645e+00   2.9104902e+00   3.3286096e+00   2.2670901e+00   2.9042354e+00   4.8902416e+00   3.8029663e+00   5.0697571e+00   4.1657422e+00   4.6611282e+00   5.6979337e+00   3.1565039e+00   5.1794156e+00   4.6677357e+00   5.6656906e+00   4.3138249e+00   4.2590423e+00   4.7118516e+00   3.9079657e+00   4.2090892e+00   4.5409993e+00   4.2707579e+00   6.1569683e+00   6.0684263e+00   3.9837013e+00   5.0178221e+00   3.6761530e+00   5.7743610e+00   3.9890690e+00   4.7480354e+00   5.0151961e+00   3.8518880e+00   3.7849163e+00   4.4740109e+00   4.8191103e+00   5.2745801e+00   6.1258486e+00   4.5520039e+00   3.8145791e+00   3.8707244e+00   5.7618968e+00   4.7193263e+00   4.2030300e+00   3.6843246e+00   4.7664506e+00   4.9031551e+00   4.8333734e+00   3.8029663e+00   5.0038635e+00   5.0562579e+00   4.7021393e+00   4.1966653e+00   4.3193180e+00   4.5127918e+00   3.7195417e+00   9.8143688e-01   5.9868400e-01   1.4402716e+00   5.6992880e-01   9.8663349e-01   1.4928150e+00   1.1361809e+00   1.7216149e+00   1.8856736e+00   1.8789959e+00   2.5107352e+00   1.7228721e+00   1.3724737e+00   1.5661153e+00   2.2847787e+00   2.4120925e+00   1.4985933e+00   7.9011741e-01   5.9738093e-01   2.3528246e+00   2.0826771e+00   1.2100516e+00   2.3528246e+00   2.8601865e+00   1.6304499e+00   1.5111262e+00   4.2257604e+00   2.5031609e+00   1.6091095e+00   1.0739839e+00   2.6091054e+00   9.8932340e-01   2.3488496e+00   9.3392552e-01   1.8848176e+00   3.4513181e+00   3.2138675e+00   3.6568023e+00   4.4832075e+00   3.8715598e+00   3.6399979e+00   3.2048569e+00   4.1609431e+00   3.6276988e+00   3.7852753e+00   5.0007603e+00   3.3356061e+00   4.5726588e+00   3.6020324e+00   3.2283316e+00   3.3543125e+00   3.4317803e+00   3.5762357e+00   4.8853618e+00   3.9697083e+00   3.4608105e+00   3.5194529e+00   4.4307529e+00   3.6664068e+00   3.4821665e+00   3.4661369e+00   3.9690303e+00   3.8732280e+00   3.5908374e+00   3.6384054e+00   4.1605480e+00   4.1054173e+00   3.6121763e+00   4.1591449e+00   3.4571840e+00   2.9726287e+00   3.5108834e+00   4.5938444e+00   3.1869025e+00   4.0859475e+00   3.9449709e+00   3.4111584e+00   3.8289196e+00   4.3382952e+00   3.7437946e+00   3.1530215e+00   3.3792174e+00   3.4400302e+00   3.8876642e+00   3.5288768e+00   4.4107365e+00   4.3401559e+00   4.5910423e+00   4.1731099e+00   4.4383007e+00   5.0605479e+00   4.5288382e+00   4.7400085e+00   4.9337127e+00   4.5282137e+00   3.8167418e+00   4.4582410e+00   4.3491396e+00   4.7099690e+00   4.5667632e+00   4.1110524e+00   4.0457882e+00   4.6970439e+00   5.8050200e+00   4.9831785e+00   4.3869525e+00   4.2045486e+00   5.3107398e+00   4.2598936e+00   4.0608563e+00   4.2495756e+00   4.0560071e+00   3.7740189e+00   4.5388860e+00   4.2824837e+00   4.9058468e+00   4.5708763e+00   4.6120616e+00   3.9763551e+00   4.3872260e+00   5.0860672e+00   4.0998055e+00   3.8946369e+00   3.7330702e+00   4.2352833e+00   4.4742220e+00   4.3047259e+00   4.3401559e+00   4.4192906e+00   4.4017154e+00   4.3831142e+00   4.6832544e+00   4.0909816e+00   3.9225445e+00   3.8229302e+00   1.2140269e+00   2.2031378e+00   1.3945864e+00   1.5674943e+00   2.3519816e+00   1.7695601e+00   2.3060361e+00   2.6440626e+00   2.5730128e+00   3.3484034e+00   2.4570006e+00   2.1775427e+00   2.3990667e+00   3.0314484e+00   3.2003667e+00   2.3345854e+00   9.9891776e-01   5.8565201e-01   3.1744385e+00   2.9106021e+00   2.1090950e+00   3.1744385e+00   3.6015962e+00   2.4316107e+00   2.2478972e+00   5.0583620e+00   3.1946200e+00   2.2571919e+00   1.5783735e+00   3.4098353e+00   1.5848534e+00   3.0815481e+00   1.7053877e+00   2.6874763e+00   3.8897688e+00   3.6527400e+00   4.1085323e+00   5.1878354e+00   4.4401043e+00   4.2306533e+00   3.5668974e+00   4.8855250e+00   4.1984227e+00   4.3936084e+00   5.7667342e+00   3.8604223e+00   5.3386416e+00   4.1481805e+00   3.8457422e+00   3.8556395e+00   3.9253518e+00   4.2633467e+00   5.5746944e+00   4.6805795e+00   3.8185179e+00   4.1531225e+00   5.0514886e+00   4.2706189e+00   4.0732692e+00   4.0031242e+00   4.5383244e+00   4.3266944e+00   4.1312924e+00   4.3745457e+00   4.8862079e+00   4.8484299e+00   4.2820168e+00   4.7051757e+00   3.9401848e+00   3.2884177e+00   3.9767257e+00   5.2985037e+00   3.7419338e+00   4.7600849e+00   4.5926355e+00   3.9322406e+00   4.5116211e+00   5.0823619e+00   4.3725296e+00   3.7236864e+00   3.9623539e+00   4.0300759e+00   4.6141946e+00   4.1447443e+00   4.5866744e+00   4.8386745e+00   4.9461657e+00   4.6106150e+00   4.7813164e+00   5.3858108e+00   5.0919277e+00   5.1446449e+00   5.4739987e+00   4.5885042e+00   4.1414025e+00   4.9586480e+00   4.7213864e+00   5.2471037e+00   4.9661074e+00   4.3830009e+00   4.4568388e+00   4.6901031e+00   6.2154767e+00   5.6469307e+00   4.6501660e+00   4.6626232e+00   5.7036296e+00   4.7932843e+00   4.3038060e+00   4.5618709e+00   4.5655950e+00   4.2115551e+00   4.9692114e+00   4.7030193e+00   5.3377504e+00   4.5914343e+00   5.0293150e+00   4.5146706e+00   4.9581881e+00   5.4087027e+00   4.2557771e+00   4.2671438e+00   4.1737149e+00   4.5756985e+00   4.7660397e+00   4.6314702e+00   4.8386745e+00   4.6734470e+00   4.5970177e+00   4.7412505e+00   5.2464126e+00   4.4890534e+00   4.0948317e+00   4.2440420e+00   1.0013399e+00   5.0299964e-01   4.6310132e-01   1.2040900e+00   5.9738093e-01   1.2286837e+00   1.4541908e+00   1.4279677e+00   2.1539145e+00   1.2617482e+00   9.9544409e-01   1.2082987e+00   1.8489243e+00   2.0066856e+00   1.1587093e+00   6.6217390e-01   7.5179033e-01   1.9839744e+00   1.7063292e+00   9.6659661e-01   1.9839744e+00   2.4156729e+00   1.2419907e+00   1.0495503e+00   3.8490499e+00   2.0330726e+00   1.0871867e+00   5.4779717e-01   2.2031378e+00   5.3106808e-01   1.9004159e+00   5.5576380e-01   1.4899949e+00   3.4307448e+00   3.0710756e+00   3.5952799e+00   4.1669813e+00   3.7116384e+00   3.3536981e+00   3.0466130e+00   3.7729829e+00   3.5082606e+00   3.4067800e+00   4.6484249e+00   3.0744089e+00   4.3424419e+00   3.3858347e+00   2.9098729e+00   3.2669110e+00   3.1198644e+00   3.3210229e+00   4.6553382e+00   3.6737423e+00   3.2048569e+00   3.2989479e+00   4.2245828e+00   3.4587219e+00   3.3255241e+00   3.3484846e+00   3.8660480e+00   3.7512964e+00   3.3482610e+00   3.3605387e+00   3.8511468e+00   3.8014113e+00   3.3411134e+00   3.9109127e+00   3.1105014e+00   2.7597977e+00   3.4146222e+00   4.3904048e+00   2.8767763e+00   3.7646132e+00   3.6317356e+00   3.1996946e+00   3.5585167e+00   3.9690108e+00   3.4365573e+00   2.8705339e+00   3.0890888e+00   3.2456270e+00   3.5108688e+00   3.2367228e+00   4.2147014e+00   4.0489906e+00   4.5035700e+00   3.9755362e+00   4.2591912e+00   5.0350768e+00   4.1207838e+00   4.6882252e+00   4.7707308e+00   4.4918348e+00   3.6612573e+00   4.2568131e+00   4.2184327e+00   4.3988058e+00   4.2632946e+00   3.9245996e+00   3.8864624e+00   4.7642090e+00   5.7424408e+00   4.7299069e+00   4.2784034e+00   3.8797952e+00   5.2832732e+00   4.0458553e+00   3.9446593e+00   4.2181053e+00   3.8300888e+00   3.5427774e+00   4.3354099e+00   4.2438935e+00   4.8511407e+00   4.6817036e+00   4.4041714e+00   3.7859242e+00   4.1665370e+00   5.0618540e+00   3.9138566e+00   3.7274783e+00   3.4833346e+00   4.1288327e+00   4.3215818e+00   4.1859543e+00   4.0489906e+00   4.2965095e+00   4.2626474e+00   4.2257654e+00   4.4572702e+00   3.9184477e+00   3.7230473e+00   3.5604297e+00   1.0161882e+00   6.9420840e-01   4.8012872e-01   4.8284931e-01   6.9738730e-01   5.5709100e-01   5.3095950e-01   1.1723315e+00   3.1271814e-01   1.8699153e-01   2.9145160e-01   8.6143605e-01   1.0061402e+00   4.5257749e-01   1.4146831e+00   1.6902182e+00   9.9059199e-01   7.2340544e-01   5.0370871e-01   9.9059199e-01   1.4369223e+00   2.7124234e-01   1.3340137e-01   2.8614050e+00   1.1016806e+00   4.2656951e-01   7.5906970e-01   1.2087236e+00   7.1325427e-01   9.2761923e-01   5.3988754e-01   4.9448466e-01   3.3643791e+00   2.9159414e+00   3.4617249e+00   3.4323687e+00   3.3505903e+00   2.8169505e+00   2.9517065e+00   2.9146662e+00   3.1941250e+00   2.7393282e+00   3.7736317e+00   2.6933089e+00   3.6308668e+00   2.9914579e+00   2.3560812e+00   3.0907905e+00   2.6932687e+00   2.7021788e+00   4.0389540e+00   2.9648098e+00   2.9980910e+00   2.8201257e+00   3.7183934e+00   2.9922398e+00   2.9661287e+00   3.0950932e+00   3.5513156e+00   3.5484439e+00   2.9420200e+00   2.6629529e+00   3.1013619e+00   3.0347859e+00   2.7417410e+00   3.4474072e+00   2.6495954e+00   2.6875764e+00   3.2488445e+00   3.7904353e+00   2.3985415e+00   3.0725852e+00   2.9704304e+00   2.8556850e+00   2.9300511e+00   3.1104511e+00   2.8291506e+00   2.4008046e+00   2.5836379e+00   2.8456807e+00   2.6907656e+00   2.6814159e+00   4.1737238e+00   3.5932878e+00   4.3853076e+00   3.6802688e+00   4.0749525e+00   4.9692376e+00   3.4394110e+00   4.5331007e+00   4.3742923e+00   4.6787296e+00   3.5632387e+00   3.8923023e+00   4.0628985e+00   3.8689922e+00   3.9102807e+00   3.8336686e+00   3.6675649e+00   5.0555526e+00   5.5454277e+00   4.0994961e+00   4.2439469e+00   3.4449831e+00   5.1429556e+00   3.6472400e+00   3.9304610e+00   4.1916938e+00   3.4565067e+00   3.2536517e+00   4.0373591e+00   4.1087274e+00   4.6703619e+00   4.9904781e+00   4.1152831e+00   3.4023949e+00   3.6756751e+00   5.0151763e+00   3.9231895e+00   3.5466262e+00   3.1760855e+00   4.0346845e+00   4.2216886e+00   4.1038501e+00   3.5932878e+00   4.2504107e+00   4.2656428e+00   4.0705667e+00   3.9971917e+00   3.7133040e+00   3.7182295e+00   3.2440381e+00   7.3339246e-01   9.9973471e-01   7.7988766e-01   1.4669572e+00   1.3868865e+00   1.4360884e+00   2.0344949e+00   1.2593779e+00   9.3451915e-01   1.1232628e+00   1.8421759e+00   1.9514085e+00   1.0061402e+00   9.6168382e-01   9.7747632e-01   1.9029496e+00   1.6513423e+00   7.7821113e-01   1.9029496e+00   2.4366790e+00   1.1857824e+00   1.1156669e+00   3.7575639e+00   2.1090460e+00   1.1623508e+00   7.4500632e-01   2.1481102e+00   7.3851064e-01   1.9301732e+00   5.6262711e-01   1.4458364e+00   3.0574507e+00   2.7604800e+00   3.2354442e+00   3.9296796e+00   3.3802783e+00   3.0910114e+00   2.7653977e+00   3.6086433e+00   3.1477156e+00   3.2299948e+00   4.4531271e+00   2.8182580e+00   4.0359059e+00   3.0838698e+00   2.6832031e+00   2.9127171e+00   2.8999239e+00   3.0235972e+00   4.3556993e+00   3.4142800e+00   2.9871882e+00   2.9947610e+00   3.9084388e+00   3.1359326e+00   2.9852014e+00   3.0007807e+00   3.4997296e+00   3.4243092e+00   3.0709062e+00   3.0889274e+00   3.6054231e+00   3.5510321e+00   3.0652992e+00   3.6307363e+00   2.9199707e+00   2.5302845e+00   3.0705222e+00   4.0683526e+00   2.6430958e+00   3.5303998e+00   3.3838093e+00   2.9011205e+00   3.2808511e+00   3.7876206e+00   3.1898591e+00   2.6081677e+00   2.8342553e+00   2.9259899e+00   3.3400468e+00   2.9809771e+00   4.0130133e+00   3.8156727e+00   4.1823301e+00   3.6854232e+00   3.9919300e+00   4.6844795e+00   3.9756960e+00   4.3245814e+00   4.4396397e+00   4.2453585e+00   3.3946374e+00   3.9634682e+00   3.9204865e+00   4.1772364e+00   4.0766155e+00   3.6959934e+00   3.5831715e+00   4.4790872e+00   5.3894840e+00   4.4405497e+00   4.0027890e+00   3.6857786e+00   4.9137586e+00   3.7567964e+00   3.6729588e+00   3.8728151e+00   3.5543966e+00   3.2848126e+00   4.0598486e+00   3.8712880e+00   4.4886485e+00   4.3722180e+00   4.1373490e+00   3.4683949e+00   3.8543469e+00   4.7248681e+00   3.7193644e+00   3.4383872e+00   3.2381387e+00   3.8297356e+00   4.0647650e+00   3.9099360e+00   3.8156727e+00   4.0266221e+00   4.0296168e+00   3.9579549e+00   4.1722551e+00   3.6379295e+00   3.5328511e+00   3.3224979e+00   1.0061402e+00   2.6525508e-01   8.2148003e-01   1.1879760e+00   1.0251165e+00   1.8544941e+00   9.4128180e-01   7.1446962e-01   9.4128180e-01   1.4726083e+00   1.6607117e+00   9.9973471e-01   7.4965096e-01   1.0510795e+00   1.6532822e+00   1.4055304e+00   8.6143605e-01   1.6532822e+00   2.0368618e+00   9.3178083e-01   7.1143905e-01   3.5363390e+00   1.6307900e+00   8.0686941e-01   2.6184788e-01   1.8811296e+00   1.4276574e-01   1.5165187e+00   3.5874135e-01   1.1682143e+00   3.5420892e+00   3.1213639e+00   3.6765721e+00   3.9907084e+00   3.7063187e+00   3.2329517e+00   3.1017380e+00   3.5164906e+00   3.5204595e+00   3.2215483e+00   4.4016409e+00   3.0251724e+00   4.2008255e+00   3.3367044e+00   2.7940225e+00   3.3344791e+00   3.0219495e+00   3.1880051e+00   4.5546425e+00   3.5075399e+00   3.1952269e+00   3.2406785e+00   4.1563139e+00   3.3848806e+00   3.3178898e+00   3.3859281e+00   3.8870730e+00   3.7997125e+00   3.2944053e+00   3.2110142e+00   3.6683444e+00   3.6131226e+00   3.2236002e+00   3.8317071e+00   2.9830921e+00   2.7973167e+00   3.4787185e+00   4.2995699e+00   2.7671612e+00   3.5982071e+00   3.4619260e+00   3.1665431e+00   3.4310890e+00   3.7235030e+00   3.2953414e+00   2.7679629e+00   2.9819595e+00   3.2106653e+00   3.2879746e+00   3.1196883e+00   4.2713770e+00   3.9634682e+00   4.5846990e+00   3.9587038e+00   4.2895460e+00   5.1416821e+00   3.9119997e+00   4.7572039e+00   4.7460626e+00   4.6638344e+00   3.7280454e+00   4.2349181e+00   4.2803486e+00   4.2908242e+00   4.2152718e+00   3.9857018e+00   3.9070797e+00   4.9741957e+00   5.8097018e+00   4.6049285e+00   4.3788345e+00   3.7893243e+00   5.3689314e+00   4.0140467e+00   4.0363955e+00   4.3259832e+00   3.8006490e+00   3.5269016e+00   4.3297076e+00   4.3216441e+00   4.9220177e+00   4.9100057e+00   4.4024600e+00   3.7489348e+00   4.0736926e+00   5.1891895e+00   3.9903212e+00   3.7514870e+00   3.4564041e+00   4.2166575e+00   4.3944655e+00   4.2851348e+00   3.9634682e+00   4.3836343e+00   4.3634474e+00   4.2898748e+00   4.4067690e+00   3.9524852e+00   3.7906894e+00   3.5162082e+00   8.3156200e-01   1.1417173e+00   5.8221430e-01   7.3339246e-01   1.0428797e+00   5.5247822e-01   3.5266705e-01   2.9537172e-01   9.6466498e-01   1.0034646e+00   2.8553149e-01   1.6415483e+00   1.8567917e+00   9.3451915e-01   7.2553812e-01   3.4520795e-01   9.3451915e-01   1.5364952e+00   3.7960845e-01   5.9589853e-01   2.7723424e+00   1.3124532e+00   7.5130648e-01   1.0314203e+00   1.1925354e+00   9.9272943e-01   1.0839891e+00   7.1143905e-01   5.6164055e-01   3.0511653e+00   2.6629268e+00   3.1545235e+00   3.1980310e+00   3.0467396e+00   2.5772061e+00   2.7369167e+00   2.7576827e+00   2.8651003e+00   2.5868532e+00   3.5774656e+00   2.4748633e+00   3.3139897e+00   2.7217211e+00   2.1506369e+00   2.7852281e+00   2.5158340e+00   2.4059801e+00   3.7433691e+00   2.7041074e+00   2.8389661e+00   2.5310559e+00   3.4176981e+00   2.6902386e+00   2.6527549e+00   2.7866168e+00   3.2144386e+00   3.2675625e+00   2.6971865e+00   2.3822357e+00   2.8532332e+00   2.7780124e+00   2.4721123e+00   3.1952919e+00   2.5042136e+00   2.5315299e+00   2.9556760e+00   3.4693709e+00   2.1993495e+00   2.8460127e+00   2.7344861e+00   2.5960616e+00   2.6558880e+00   2.9309653e+00   2.5980406e+00   2.1695355e+00   2.3550298e+00   2.5518802e+00   2.5245372e+00   2.4441489e+00   4.0319503e+00   3.3933783e+00   4.1146478e+00   3.4339804e+00   3.8578841e+00   4.6712206e+00   3.3248410e+00   4.2174528e+00   4.0704050e+00   4.4988341e+00   3.3546526e+00   3.6317702e+00   3.8139412e+00   3.6743372e+00   3.7645284e+00   3.6614225e+00   3.4131399e+00   4.8439861e+00   5.2322637e+00   3.8189229e+00   4.0256031e+00   3.2902097e+00   4.8190347e+00   3.3870859e+00   3.7223145e+00   3.9080259e+00   3.2141230e+00   3.0414452e+00   3.8022687e+00   3.7869665e+00   4.3507688e+00   4.7565318e+00   3.8893282e+00   3.1162635e+00   3.3877625e+00   4.7282676e+00   3.7917280e+00   3.3122397e+00   2.9763122e+00   3.7889092e+00   4.0173731e+00   3.8788191e+00   3.3933783e+00   4.0384828e+00   4.0914753e+00   3.8500022e+00   3.7349483e+00   3.4804621e+00   3.5920363e+00   3.0535851e+00   7.5284003e-01   9.3865015e-01   8.5442446e-01   1.6409761e+00   7.0463400e-01   5.4408162e-01   7.5179033e-01   1.2786676e+00   1.4553344e+00   7.8100392e-01   1.0100290e+00   1.2786676e+00   1.4586696e+00   1.2008045e+00   7.2638147e-01   1.4586696e+00   1.8445759e+00   7.3985997e-01   5.0731024e-01   3.3136601e+00   1.4580439e+00   5.4702555e-01   3.2339566e-01   1.6607117e+00   3.5366952e-01   1.3290015e+00   3.5639126e-01   9.6825676e-01   3.4059851e+00   2.9602214e+00   3.5257340e+00   3.7492673e+00   3.5115913e+00   3.0191277e+00   2.9517483e+00   3.2720571e+00   3.3411331e+00   2.9834222e+00   4.1580731e+00   2.8211532e+00   3.9717534e+00   3.1414634e+00   2.5643903e+00   3.1728076e+00   2.8176969e+00   2.9703822e+00   4.3244606e+00   3.2734582e+00   3.0210021e+00   3.0274003e+00   3.9433841e+00   3.1866772e+00   3.1269680e+00   3.2103093e+00   3.7065013e+00   3.6299273e+00   3.0909484e+00   2.9772513e+00   3.4297321e+00   3.3756936e+00   2.9971174e+00   3.6243253e+00   2.7759818e+00   2.6501700e+00   3.3189004e+00   4.0764633e+00   2.5559925e+00   3.3602243e+00   3.2356862e+00   2.9780147e+00   3.2026716e+00   3.4783251e+00   3.0685582e+00   2.5635668e+00   2.7679629e+00   3.0129566e+00   3.0370166e+00   2.8975180e+00   4.1264564e+00   3.7496421e+00   4.4295218e+00   3.7774558e+00   4.1191095e+00   5.0038078e+00   3.6756489e+00   4.6074750e+00   4.5494422e+00   4.5665604e+00   3.5702411e+00   4.0355009e+00   4.1137066e+00   4.0638414e+00   4.0067360e+00   3.8250619e+00   3.7375780e+00   4.9153359e+00   5.6444336e+00   4.3773916e+00   4.2331397e+00   3.5751580e+00   5.2199809e+00   3.8075775e+00   3.9003626e+00   4.1989415e+00   3.5967191e+00   3.3381510e+00   4.1394208e+00   4.1772604e+00   4.7627066e+00   4.8574416e+00   4.2113835e+00   3.5565415e+00   3.8744064e+00   5.0458107e+00   3.8530980e+00   3.5894641e+00   3.2635788e+00   4.0605147e+00   4.2327162e+00   4.1232607e+00   3.7496421e+00   4.2381044e+00   4.2216886e+00   4.1152818e+00   4.1901775e+00   3.7759339e+00   3.6506667e+00   3.3268510e+00   1.0748172e+00   7.2889003e-01   1.5046031e+00   7.9398919e-01   8.1148630e-01   8.8835337e-01   9.9058911e-01   1.2262672e+00   1.1380274e+00   1.3972288e+00   1.7741287e+00   1.2483935e+00   1.0474897e+00   1.1240042e+00   1.2483935e+00   1.4149548e+00   8.1096210e-01   5.7672351e-01   3.0082939e+00   9.6865373e-01   8.2273123e-01   9.5195566e-01   1.4288989e+00   8.3246212e-01   9.4996842e-01   9.2092295e-01   8.7375509e-01   4.0151215e+00   3.5231786e+00   4.1026785e+00   3.8908802e+00   3.9665585e+00   3.3438394e+00   3.5293285e+00   3.2540384e+00   3.8314935e+00   3.1634347e+00   4.1178317e+00   3.2512254e+00   4.1561446e+00   3.5717750e+00   2.8833448e+00   3.7345570e+00   3.1952822e+00   3.2548783e+00   4.5820687e+00   3.4621656e+00   3.5141919e+00   3.4137993e+00   4.2939653e+00   3.5777026e+00   3.5926398e+00   3.7328374e+00   4.1920808e+00   4.1652091e+00   3.5073274e+00   3.1921998e+00   3.5706840e+00   3.5044583e+00   3.2904241e+00   3.9914615e+00   3.1120432e+00   3.2204607e+00   3.8807669e+00   4.3582892e+00   2.9219284e+00   3.5476488e+00   3.4540639e+00   3.4397115e+00   3.4680001e+00   3.4670753e+00   3.3367044e+00   2.9471549e+00   3.1205392e+00   3.4518638e+00   3.0783517e+00   3.2145380e+00   4.6697624e+00   4.0951447e+00   4.9940386e+00   4.2442248e+00   4.6312366e+00   5.5957028e+00   3.7901714e+00   5.1629764e+00   4.9662629e+00   5.2245876e+00   4.1326097e+00   4.4648550e+00   4.6557857e+00   4.3477764e+00   4.3833898e+00   4.3689111e+00   4.2520226e+00   5.6153369e+00   6.1715044e+00   4.6178873e+00   4.8201070e+00   3.9129644e+00   5.7808751e+00   4.2195149e+00   4.4949009e+00   4.8099428e+00   4.0213766e+00   3.8049151e+00   4.5961475e+00   4.7475868e+00   5.3061067e+00   5.5699347e+00   4.6684257e+00   3.9900166e+00   4.2272640e+00   5.6441645e+00   4.4197984e+00   4.1176453e+00   3.7157925e+00   4.6326704e+00   4.7820862e+00   4.6921349e+00   4.0951447e+00   4.8160040e+00   4.8050680e+00   4.6459078e+00   4.5554678e+00   4.2905569e+00   4.2115152e+00   3.7649212e+00   5.9314593e-01   8.0686941e-01   2.9691107e-01   6.2826980e-01   5.0121118e-01   6.6653737e-01   7.0834786e-01   4.6310132e-01   1.9251840e+00   2.1737519e+00   7.4743804e-01   5.5009731e-01   8.0748088e-01   7.4743804e-01   1.1828955e+00   4.6964680e-01   5.8942278e-01   2.4421558e+00   9.8677196e-01   4.9772204e-01   1.1660949e+00   8.4116354e-01   1.2196311e+00   7.7538587e-01   1.0376697e+00   4.4499696e-01   3.0983271e+00   2.5986852e+00   3.1534326e+00   2.8897192e+00   2.9336987e+00   2.3392252e+00   2.6586759e+00   2.3702978e+00   2.8165027e+00   2.2079130e+00   3.2363154e+00   2.2664200e+00   3.1218253e+00   2.5673178e+00   1.8638939e+00   2.7710337e+00   2.2535803e+00   2.2156046e+00   3.5264693e+00   2.4375344e+00   2.6410495e+00   2.3635223e+00   3.2419868e+00   2.5535068e+00   2.5663186e+00   2.7372400e+00   3.1657714e+00   3.1910277e+00   2.5035271e+00   2.1450705e+00   2.5644558e+00   2.5011730e+00   2.2417546e+00   2.9810976e+00   2.2012005e+00   2.4146140e+00   2.9247451e+00   3.2953953e+00   1.9474034e+00   2.5368532e+00   2.4541089e+00   2.4554575e+00   2.4210504e+00   2.5661600e+00   2.3207574e+00   1.9628165e+00   2.1171985e+00   2.4261018e+00   2.1366217e+00   2.1917681e+00   3.8609963e+00   3.1157672e+00   4.0464741e+00   3.2769657e+00   3.7011972e+00   4.6584806e+00   2.9074576e+00   4.1962146e+00   3.9292452e+00   4.4717831e+00   3.2385313e+00   3.4507621e+00   3.7050324e+00   3.3601278e+00   3.4577371e+00   3.4991206e+00   3.2980589e+00   4.9174048e+00   5.1685262e+00   3.5822256e+00   3.9345678e+00   2.9743611e+00   4.8043725e+00   3.1946631e+00   3.6425792e+00   3.9147944e+00   3.0137993e+00   2.8509730e+00   3.6160190e+00   3.7930649e+00   4.3160863e+00   4.8705552e+00   3.6935350e+00   2.9765851e+00   3.2157655e+00   4.7030966e+00   3.6383061e+00   3.1964791e+00   2.7656084e+00   3.7055141e+00   3.8768834e+00   3.7701724e+00   3.1157672e+00   3.9366463e+00   3.9674740e+00   3.7027144e+00   3.5167570e+00   3.3369517e+00   3.4319544e+00   2.8332022e+00   9.6865373e-01   3.9487224e-01   5.8131330e-01   5.6003943e-01   5.0621589e-01   7.1247632e-01   8.0317491e-01   1.7053539e+00   2.0491684e+00   7.4957404e-01   6.5459290e-01   9.3991103e-01   7.4957404e-01   1.0954558e+00   4.2737382e-01   4.9430028e-01   2.5884539e+00   7.4949264e-01   6.4432393e-01   1.0251728e+00   9.7391954e-01   1.0055888e+00   5.9279023e-01   9.4588685e-01   4.3798311e-01   3.5017283e+00   3.0032209e+00   3.5640998e+00   3.2626300e+00   3.3723783e+00   2.7101865e+00   3.0361435e+00   2.6574564e+00   3.2363742e+00   2.5684615e+00   3.5220489e+00   2.6863775e+00   3.5035562e+00   2.9639854e+00   2.2954282e+00   3.1974234e+00   2.6186896e+00   2.5919605e+00   3.9485388e+00   2.8137879e+00   3.0123600e+00   2.8059985e+00   3.6581986e+00   2.9351026e+00   2.9984934e+00   3.1711590e+00   3.5947528e+00   3.6146776e+00   2.9159819e+00   2.5508141e+00   2.9298445e+00   2.8588898e+00   2.6582994e+00   3.3705985e+00   2.5395254e+00   2.7634723e+00   3.3411818e+00   3.7151762e+00   2.3273691e+00   2.9184140e+00   2.8006021e+00   2.8512853e+00   2.8277392e+00   2.8675465e+00   2.7048983e+00   2.3342128e+00   2.5075548e+00   2.8488482e+00   2.4938133e+00   2.5939117e+00   4.2210242e+00   3.5094230e+00   4.4613495e+00   3.6611526e+00   4.1011462e+00   5.0575391e+00   3.2183295e+00   4.5889909e+00   4.3421582e+00   4.8334387e+00   3.6441411e+00   3.8749352e+00   4.1286607e+00   3.7602700e+00   3.8694583e+00   3.9027404e+00   3.6910974e+00   5.2330448e+00   5.5920875e+00   3.9683831e+00   4.3421746e+00   3.3618744e+00   5.2099570e+00   3.6296154e+00   4.0192804e+00   4.2904705e+00   3.4453138e+00   3.2560920e+00   4.0303932e+00   4.1835729e+00   4.7330562e+00   5.1897695e+00   4.1126264e+00   3.3744864e+00   3.5691373e+00   5.1336306e+00   3.9986271e+00   3.5735980e+00   3.1698618e+00   4.1283627e+00   4.2954773e+00   4.2156054e+00   3.5094230e+00   4.3310092e+00   4.3633885e+00   4.1455698e+00   3.9545856e+00   3.7585686e+00   3.7901495e+00   3.2094268e+00   9.5902306e-01   1.1795364e+00   9.6032771e-01   5.8652824e-01   3.3395426e-01   1.0753036e+00   2.5524007e+00   2.8349345e+00   2.9691107e-01   5.1396090e-01   1.3127309e+00   2.9691107e-01   7.4426155e-01   9.3211669e-01   1.1729612e+00   1.7369516e+00   8.7560645e-01   1.2666796e+00   1.8751947e+00   2.9724335e-01   1.8489906e+00   6.7745878e-01   1.6555341e+00   7.0111465e-01   3.4067014e+00   2.9452188e+00   3.4231096e+00   2.6265336e+00   3.0474186e+00   2.3887954e+00   3.0652160e+00   1.9891259e+00   2.9589070e+00   2.1699637e+00   2.7527251e+00   2.5008826e+00   2.7949298e+00   2.7160947e+00   1.9851008e+00   3.0428102e+00   2.4755098e+00   2.1256864e+00   3.3327734e+00   2.2236827e+00   3.0131023e+00   2.4300526e+00   3.1928299e+00   2.6040863e+00   2.7075499e+00   2.9536823e+00   3.2710263e+00   3.4338296e+00   2.6673396e+00   1.9555334e+00   2.2858019e+00   2.1897213e+00   2.1973377e+00   3.0392888e+00   2.4158794e+00   2.8941568e+00   3.2025759e+00   3.1091590e+00   2.1471303e+00   2.3710990e+00   2.3347284e+00   2.6698387e+00   2.3133518e+00   2.1525596e+00   2.2918773e+00   2.1454625e+00   2.2398142e+00   2.5636830e+00   1.8343082e+00   2.2388656e+00   4.2714137e+00   3.2107728e+00   4.3091817e+00   3.4717121e+00   3.9768763e+00   4.9078859e+00   2.8122927e+00   4.3885241e+00   3.9504682e+00   4.9558940e+00   3.6044479e+00   3.5632387e+00   3.9760735e+00   3.3646188e+00   3.6594046e+00   3.8782141e+00   3.5443654e+00   5.4091146e+00   5.2988184e+00   3.3878489e+00   4.2952367e+00   3.1303129e+00   4.9761618e+00   3.2919446e+00   4.0362588e+00   4.2291285e+00   3.1618923e+00   3.1077588e+00   3.7959134e+00   4.0112445e+00   4.4810404e+00   5.3509795e+00   3.8831153e+00   3.0844802e+00   3.1980603e+00   4.9707249e+00   4.0945548e+00   3.4918172e+00   3.0237585e+00   4.0192804e+00   4.2092252e+00   4.1017980e+00   3.2107728e+00   4.2952415e+00   4.3790330e+00   3.9936934e+00   3.5312555e+00   3.6065699e+00   3.8937621e+00   3.0840989e+00   4.2827238e-01   3.7398306e-01   6.4241342e-01   7.7828522e-01   4.8636669e-01   1.6800011e+00   1.9622194e+00   8.0686941e-01   5.7691891e-01   7.1789533e-01   8.0686941e-01   1.2139401e+00   2.9406726e-01   3.1507080e-01   2.6166211e+00   9.1398375e-01   3.4909881e-01   9.4580058e-01   9.6922609e-01   9.6659661e-01   7.2638147e-01   8.2574748e-01   3.6704030e-01   3.2939549e+00   2.8018134e+00   3.3643791e+00   3.1688464e+00   3.1882120e+00   2.5926123e+00   2.8420400e+00   2.6229843e+00   3.0569434e+00   2.4659441e+00   3.4932809e+00   2.5062529e+00   3.4038365e+00   2.8103848e+00   2.1284730e+00   2.9880998e+00   2.4809350e+00   2.4830222e+00   3.8129321e+00   2.7155086e+00   2.8370039e+00   2.6306749e+00   3.5140670e+00   2.8045035e+00   2.8143558e+00   2.9698163e+00   3.4126571e+00   3.4177063e+00   2.7508387e+00   2.4282690e+00   2.8424700e+00   2.7781836e+00   2.5174968e+00   3.2360555e+00   2.4214382e+00   2.5753180e+00   3.1397228e+00   3.5790751e+00   2.1850441e+00   2.8131786e+00   2.7177153e+00   2.6876771e+00   2.6993734e+00   2.8253248e+00   2.5871836e+00   2.1990767e+00   2.3678133e+00   2.6762366e+00   2.4070540e+00   2.4551607e+00   4.0383784e+00   3.3671653e+00   4.2642537e+00   3.5070157e+00   3.9193977e+00   4.8684861e+00   3.1505769e+00   4.4165051e+00   4.1898287e+00   4.6203725e+00   3.4386758e+00   3.7047820e+00   3.9273366e+00   3.6237778e+00   3.6947343e+00   3.6968840e+00   3.5190023e+00   5.0398879e+00   5.4089760e+00   3.8611646e+00   4.1322470e+00   3.2145601e+00   5.0295849e+00   3.4546083e+00   3.8248698e+00   4.1055247e+00   3.2664139e+00   3.0788010e+00   3.8567883e+00   4.0060320e+00   4.5478501e+00   4.9912214e+00   3.9339246e+00   3.2236553e+00   3.4677443e+00   4.9178815e+00   3.8042163e+00   3.4041321e+00   2.9939885e+00   3.9171296e+00   4.0866646e+00   3.9845548e+00   3.3671653e+00   4.1322520e+00   4.1520423e+00   3.9277271e+00   3.7880802e+00   3.5624203e+00   3.5967687e+00   3.0549169e+00   2.3749211e-01   9.2006504e-01   1.0428797e+00   4.2450569e-01   1.3899721e+00   1.6555341e+00   9.9973471e-01   7.5230154e-01   3.7960845e-01   9.9973471e-01   1.5086315e+00   2.6033464e-01   2.9724335e-01   2.8989712e+00   1.1937015e+00   5.7988427e-01   7.8318003e-01   1.2583645e+00   7.0463400e-01   1.0034646e+00   4.7680727e-01   5.2374483e-01   3.3114294e+00   2.8933417e+00   3.4177063e+00   3.4461307e+00   3.3255941e+00   2.8176969e+00   2.9380167e+00   2.9507452e+00   3.1524130e+00   2.7797208e+00   3.7960371e+00   2.6995806e+00   3.6095704e+00   2.9762135e+00   2.3753547e+00   3.0506196e+00   2.7121484e+00   2.6831858e+00   4.0299127e+00   2.9653560e+00   3.0144396e+00   2.8058449e+00   3.7011663e+00   2.9654419e+00   2.9344166e+00   3.0597490e+00   3.5085997e+00   3.5226725e+00   2.9395346e+00   2.6564538e+00   3.1076159e+00   3.0365838e+00   2.7379532e+00   3.4446760e+00   2.6796909e+00   2.6912430e+00   3.2129972e+00   3.7686900e+00   2.4108238e+00   3.0879511e+00   2.9762529e+00   2.8408428e+00   2.9254092e+00   3.1396427e+00   2.8384396e+00   2.3985415e+00   2.5881776e+00   2.8229620e+00   2.7289403e+00   2.6869860e+00   4.1910480e+00   3.6130663e+00   4.3602249e+00   3.6707779e+00   4.0745117e+00   4.9277939e+00   3.4934873e+00   4.4880495e+00   4.3514534e+00   4.6654573e+00   3.5594056e+00   3.8864758e+00   4.0498126e+00   3.8963526e+00   3.9502563e+00   3.8456535e+00   3.6509387e+00   5.0146974e+00   5.5101576e+00   4.0937915e+00   4.2345704e+00   3.4807992e+00   5.0960662e+00   3.6438388e+00   3.9190152e+00   4.1487738e+00   3.4580678e+00   3.2588014e+00   4.0378648e+00   4.0580582e+00   4.6285945e+00   4.9381887e+00   4.1199488e+00   3.3816601e+00   3.6553790e+00   4.9813108e+00   3.9405186e+00   3.5335600e+00   3.1869497e+00   4.0186781e+00   4.2240093e+00   4.0988575e+00   3.6130663e+00   4.2429720e+00   4.2712088e+00   4.0719125e+00   3.9975817e+00   3.7087600e+00   3.7375363e+00   3.2561951e+00   7.6824760e-01   8.5141186e-01   3.6086962e-01   1.6207126e+00   1.8802756e+00   7.9394533e-01   5.3286499e-01   4.3319335e-01   7.9394533e-01   1.3370188e+00   1.3340137e-01   3.6319073e-01   2.6778759e+00   1.0720705e+00   6.3173774e-01   1.0072799e+00   1.0495503e+00   9.3727156e-01   8.5893964e-01   7.0463400e-01   3.3395426e-01   3.3027871e+00   2.8812178e+00   3.3955887e+00   3.2888081e+00   3.2512254e+00   2.7283479e+00   2.9463809e+00   2.7764635e+00   3.0911130e+00   2.6620270e+00   3.6054231e+00   2.6430457e+00   3.4442793e+00   2.9123088e+00   2.2793286e+00   3.0205073e+00   2.6595069e+00   2.5635668e+00   3.8866925e+00   2.8178210e+00   3.0060120e+00   2.7101865e+00   3.5930006e+00   2.8829084e+00   2.8651003e+00   3.0121201e+00   3.4400598e+00   3.4869142e+00   2.8725792e+00   2.5068325e+00   2.9480926e+00   2.8720011e+00   2.6183827e+00   3.3619078e+00   2.6263990e+00   2.7176350e+00   3.1874455e+00   3.6289342e+00   2.3459758e+00   2.9476507e+00   2.8536577e+00   2.7917808e+00   2.7959976e+00   2.9585178e+00   2.7268209e+00   2.3347284e+00   2.5080346e+00   2.7508387e+00   2.5565749e+00   2.5881776e+00   4.2068537e+00   3.5342439e+00   4.3380559e+00   3.6271373e+00   4.0499864e+00   4.9127682e+00   3.3748745e+00   4.4574738e+00   4.2666131e+00   4.7143178e+00   3.5549829e+00   3.8149934e+00   4.0227420e+00   3.7946027e+00   3.8919837e+00   3.8432323e+00   3.6208873e+00   5.0849580e+00   5.4596452e+00   3.9569474e+00   4.2354064e+00   3.4126422e+00   5.0611947e+00   3.5638945e+00   3.9334644e+00   4.1519487e+00   3.3885059e+00   3.2191166e+00   3.9849073e+00   4.0334328e+00   4.5859724e+00   5.0075986e+00   4.0680600e+00   3.3134027e+00   3.5670952e+00   4.9632121e+00   3.9675062e+00   3.5176551e+00   3.1453377e+00   4.0038982e+00   4.2114761e+00   4.0820077e+00   3.5342439e+00   4.2453199e+00   4.2844704e+00   4.0426067e+00   3.8984747e+00   3.6765607e+00   3.7642725e+00   3.2184749e+00   2.6033464e-01   9.9962901e-01   2.1664244e+00   2.5030472e+00   3.6319073e-01   4.2737382e-01   1.2004100e+00   3.6319073e-01   6.1067563e-01   6.7030885e-01   8.0996690e-01   2.1006743e+00   4.0020411e-01   9.4057729e-01   1.4985933e+00   5.0731024e-01   1.4656715e+00   1.6562722e-01   1.3630799e+00   4.4417668e-01   3.6433721e+00   3.1333439e+00   3.6757970e+00   3.0281244e+00   3.3717708e+00   2.6624050e+00   3.1998148e+00   2.3430115e+00   3.2729115e+00   2.4219924e+00   3.1700354e+00   2.7177110e+00   3.2760900e+00   2.9826961e+00   2.2410752e+00   3.2981044e+00   2.6452183e+00   2.4901533e+00   3.7687554e+00   2.6225184e+00   3.1280668e+00   2.7635526e+00   3.5692464e+00   2.9177616e+00   3.0187150e+00   3.2354747e+00   3.6116754e+00   3.6909005e+00   2.9234404e+00   2.3731183e+00   2.6987010e+00   2.6178190e+00   2.5515904e+00   3.3308561e+00   2.5554841e+00   2.9570491e+00   3.4460544e+00   3.5549612e+00   2.3407691e+00   2.7326628e+00   2.6614903e+00   2.9042354e+00   2.6919656e+00   2.5430017e+00   2.6000033e+00   2.3578684e+00   2.4871797e+00   2.8599439e+00   2.2045434e+00   2.5287499e+00   4.3690091e+00   3.4628576e+00   4.5552847e+00   3.7077077e+00   4.1799642e+00   5.1678263e+00   3.0379779e+00   4.6720960e+00   4.3013447e+00   5.0550361e+00   3.7713495e+00   3.8605708e+00   4.2104897e+00   3.6525334e+00   3.8549710e+00   4.0229434e+00   3.7708197e+00   5.5011831e+00   5.6245097e+00   3.7945557e+00   4.4755001e+00   3.3306776e+00   5.2815051e+00   3.5995462e+00   4.1814664e+00   4.4417427e+00   3.4376057e+00   3.3113002e+00   4.0501276e+00   4.2847585e+00   4.7905454e+00   5.4600808e+00   4.1319681e+00   3.3795104e+00   3.5192584e+00   5.2359042e+00   4.1708373e+00   3.6809073e+00   3.2190305e+00   4.2365575e+00   4.3973146e+00   4.3149219e+00   3.4628576e+00   4.4657183e+00   4.5132257e+00   4.2167698e+00   3.8749352e+00   3.8293474e+00   3.9628758e+00   3.2623926e+00   1.0371214e+00   2.3606689e+00   2.6764689e+00   1.8699153e-01   4.0363332e-01   1.2627589e+00   1.8699153e-01   5.6164055e-01   7.8305765e-01   9.7747632e-01   1.8924893e+00   5.6164055e-01   1.0881632e+00   1.6837963e+00   2.8845946e-01   1.6545637e+00   3.5266705e-01   1.5108472e+00   5.3286499e-01   3.5596461e+00   3.0642731e+00   3.5820652e+00   2.8366432e+00   3.2382208e+00   2.5375138e+00   3.1537738e+00   2.1559444e+00   3.1474432e+00   2.2926143e+00   2.9585178e+00   2.6250629e+00   3.0590120e+00   2.8699760e+00   2.1233327e+00   3.2024265e+00   2.5670381e+00   2.3272067e+00   3.5735096e+00   2.4368431e+00   3.0826231e+00   2.6212838e+00   3.4052824e+00   2.7833861e+00   2.8923043e+00   3.1255601e+00   3.4747564e+00   3.5908785e+00   2.8135312e+00   2.1839196e+00   2.5032729e+00   2.4158569e+00   2.3928312e+00   3.2017644e+00   2.4862299e+00   2.9403226e+00   3.3545989e+00   3.3587820e+00   2.2520446e+00   2.5607059e+00   2.5059290e+00   2.8073565e+00   2.5209771e+00   2.3430115e+00   2.4562939e+00   2.2633781e+00   2.3755041e+00   2.7368591e+00   2.0165442e+00   2.3969046e+00   4.3354025e+00   3.3475977e+00   4.4615703e+00   3.6095772e+00   4.0990362e+00   5.0710534e+00   2.9144612e+00   4.5627576e+00   4.1522674e+00   5.0316497e+00   3.7102339e+00   3.7341705e+00   4.1195531e+00   3.5174471e+00   3.7659275e+00   3.9693828e+00   3.6809073e+00   5.4858042e+00   5.4945039e+00   3.6088006e+00   4.4109170e+00   3.2362256e+00   5.1634795e+00   3.4678413e+00   4.1322470e+00   4.3666536e+00   3.3199204e+00   3.2266664e+00   3.9433408e+00   4.1815045e+00   4.6694810e+00   5.4392260e+00   4.0273519e+00   3.2552513e+00   3.3773249e+00   5.1375752e+00   4.1484621e+00   3.6075786e+00   3.1365574e+00   4.1554935e+00   4.3260165e+00   4.2353581e+00   3.3475977e+00   4.4043042e+00   4.4676627e+00   4.1295047e+00   3.7244531e+00   3.7408419e+00   3.9430201e+00   3.1856251e+00   1.6790448e+00   1.8683303e+00   9.9891776e-01   7.3735391e-01   3.8639663e-01   9.9891776e-01   1.5462701e+00   4.4713936e-01   5.6262711e-01   2.7653818e+00   1.3238834e+00   5.9868400e-01   1.0167074e+00   1.1817121e+00   1.0267382e+00   1.1036371e+00   7.4965096e-01   5.9868400e-01   2.9920629e+00   2.5767485e+00   3.0904477e+00   3.1383073e+00   2.9738737e+00   2.5155127e+00   2.6450302e+00   2.7097016e+00   2.8120208e+00   2.4963677e+00   3.5442384e+00   2.3737852e+00   3.2878773e+00   2.6552959e+00   2.0482711e+00   2.7132601e+00   2.4244329e+00   2.3725931e+00   3.6825624e+00   2.6567419e+00   2.7277627e+00   2.4551607e+00   3.3586468e+00   2.6490703e+00   2.5878996e+00   2.7146857e+00   3.1604264e+00   3.1862677e+00   2.6121387e+00   2.3320406e+00   2.8060955e+00   2.7397840e+00   2.4059801e+00   3.1251810e+00   2.4123722e+00   2.4266062e+00   2.8827369e+00   3.4219145e+00   2.1146056e+00   2.7787333e+00   2.6868306e+00   2.5237903e+00   2.5969195e+00   2.8858666e+00   2.5292454e+00   2.1030528e+00   2.2789104e+00   2.4843930e+00   2.4502513e+00   2.3681813e+00   3.9129285e+00   3.2963379e+00   4.0307050e+00   3.3579713e+00   3.7573925e+00   4.6072225e+00   3.2350676e+00   4.1666050e+00   4.0096938e+00   4.3939440e+00   3.2458961e+00   3.5448948e+00   3.7163166e+00   3.5735211e+00   3.6303034e+00   3.5366397e+00   3.3347301e+00   4.7764597e+00   5.1656525e+00   3.7678733e+00   3.9190152e+00   3.1752055e+00   4.7655871e+00   3.2963860e+00   3.6257668e+00   3.8480918e+00   3.1163356e+00   2.9401017e+00   3.7060704e+00   3.7400429e+00   4.2905130e+00   4.6982735e+00   3.7862785e+00   3.0555922e+00   3.3519252e+00   4.6433941e+00   3.6672706e+00   3.2313825e+00   2.8704346e+00   3.6888814e+00   3.9001229e+00   3.7578379e+00   3.2963379e+00   3.9355102e+00   3.9693842e+00   3.7298088e+00   3.6452459e+00   3.3776637e+00   3.4666091e+00   2.9570067e+00   4.5257749e-01   2.3345854e+00   2.1006743e+00   1.4408765e+00   2.3345854e+00   2.7201861e+00   1.6242170e+00   1.4334280e+00   4.2461520e+00   2.2960397e+00   1.5510150e+00   8.3619405e-01   2.5963945e+00   7.1671402e-01   2.2031378e+00   9.3957399e-01   1.8662528e+00   3.9018608e+00   3.5587525e+00   4.0758181e+00   4.6738622e+00   4.2315488e+00   3.8362958e+00   3.5101695e+00   4.2349456e+00   4.0096351e+00   3.8957954e+00   5.1177048e+00   3.5857491e+00   4.8511271e+00   3.8770675e+00   3.4324591e+00   3.7687554e+00   3.5952204e+00   3.8095276e+00   5.1880951e+00   4.1733990e+00   3.6719420e+00   3.8275992e+00   4.7391892e+00   3.9417326e+00   3.8406076e+00   3.8597390e+00   4.3729123e+00   4.2482658e+00   3.8534412e+00   3.8740219e+00   4.3496532e+00   4.2951960e+00   3.8572117e+00   4.4028225e+00   3.5707989e+00   3.2084035e+00   3.9057558e+00   4.9165227e+00   3.3635180e+00   4.2694314e+00   4.1082916e+00   3.6886188e+00   4.0716087e+00   4.4411156e+00   3.9335446e+00   3.3496034e+00   3.5830335e+00   3.7561390e+00   4.0088699e+00   3.7413425e+00   4.6436290e+00   4.5471223e+00   4.9787007e+00   4.4481204e+00   4.7341193e+00   5.4826137e+00   4.5863292e+00   5.1433212e+00   5.2725182e+00   4.8836508e+00   4.1393671e+00   4.7672599e+00   4.7092337e+00   4.9106562e+00   4.7707455e+00   4.3996661e+00   4.3591552e+00   5.0844032e+00   6.2257171e+00   5.2378683e+00   4.7433741e+00   4.3742533e+00   5.7435198e+00   4.5678545e+00   4.3840310e+00   4.6485090e+00   4.3482964e+00   4.0364176e+00   4.8328894e+00   4.6980887e+00   5.3298852e+00   5.0020990e+00   4.9051796e+00   4.2757519e+00   4.6314634e+00   5.5369700e+00   4.3420582e+00   4.1857666e+00   3.9786340e+00   4.6138259e+00   4.8044656e+00   4.6911542e+00   4.5471223e+00   4.7508760e+00   4.7161034e+00   4.7355095e+00   4.9879154e+00   4.4154797e+00   4.1545903e+00   4.0343137e+00   2.6422396e+00   2.3867296e+00   1.6154069e+00   2.6422396e+00   3.0703842e+00   1.9080710e+00   1.7295385e+00   4.5475791e+00   2.6621202e+00   1.8051284e+00   1.1105716e+00   2.8967543e+00   1.0474897e+00   2.5495727e+00   1.1795364e+00   2.1617152e+00   3.8171826e+00   3.5339654e+00   4.0163479e+00   4.8425911e+00   4.2514283e+00   3.9557653e+00   3.4792404e+00   4.4740529e+00   4.0150475e+00   4.0717494e+00   5.3501548e+00   3.6486698e+00   4.9910943e+00   3.9350582e+00   3.5547141e+00   3.7282030e+00   3.6962934e+00   3.9420318e+00   5.2895497e+00   4.3341610e+00   3.6960949e+00   3.8986276e+00   4.8106103e+00   4.0206152e+00   3.8664495e+00   3.8454452e+00   4.3675713e+00   4.2173036e+00   3.9169317e+00   4.0237417e+00   4.5248905e+00   4.4756871e+00   3.9778974e+00   4.4827624e+00   3.6962934e+00   3.1970228e+00   3.8646917e+00   5.0103465e+00   3.4775294e+00   4.4295579e+00   4.2690345e+00   3.7344524e+00   4.1995700e+00   4.6716989e+00   4.0716455e+00   3.4573201e+00   3.6936958e+00   3.8056210e+00   4.2211876e+00   3.8604223e+00   4.5958210e+00   4.6323449e+00   4.9087471e+00   4.4703758e+00   4.7121623e+00   5.3828288e+00   4.7798674e+00   5.0815530e+00   5.2996504e+00   4.7231435e+00   4.0911974e+00   4.7955081e+00   4.6606898e+00   5.0156099e+00   4.8233009e+00   4.3540706e+00   4.3488974e+00   4.8785742e+00   6.1614901e+00   5.3577416e+00   4.6571075e+00   4.4651793e+00   5.6630236e+00   4.6077729e+00   4.3065165e+00   4.5525984e+00   4.3873290e+00   4.0638414e+00   4.8447047e+00   4.6322938e+00   5.2676175e+00   4.7796156e+00   4.9133278e+00   4.3194697e+00   4.7202167e+00   5.4208419e+00   4.2794874e+00   4.1728215e+00   4.0165591e+00   4.5422714e+00   4.7447407e+00   4.6112705e+00   4.6323449e+00   4.6754921e+00   4.6293227e+00   4.6871898e+00   5.0428586e+00   4.3953592e+00   4.1024574e+00   4.0848110e+00   3.3742167e-01   1.1857824e+00   0.0000000e+00   6.6918102e-01   7.4445830e-01   9.7322023e-01   1.9284841e+00   6.6918102e-01   1.1393372e+00   1.6942803e+00   3.7371902e-01   1.6386105e+00   4.5257749e-01   1.4715172e+00   4.9772204e-01   3.5602797e+00   3.0968979e+00   3.5933352e+00   2.8998722e+00   3.2656298e+00   2.6029746e+00   3.1974447e+00   2.2445103e+00   3.1601923e+00   2.3946216e+00   3.0209665e+00   2.6867317e+00   3.0775658e+00   2.9161244e+00   2.1946278e+00   3.2137635e+00   2.6502891e+00   2.3652711e+00   3.6096140e+00   2.4893065e+00   3.1578003e+00   2.6568473e+00   3.4426472e+00   2.8187901e+00   2.9128857e+00   3.1418202e+00   3.4847097e+00   3.6205958e+00   2.8694312e+00   2.2223283e+00   2.5588203e+00   2.4651138e+00   2.4413293e+00   3.2621861e+00   2.5834128e+00   2.9995857e+00   3.3733791e+00   3.3817876e+00   2.3263008e+00   2.6305758e+00   2.5756071e+00   2.8533918e+00   2.5683063e+00   2.4187322e+00   2.5258216e+00   2.3250308e+00   2.4413618e+00   2.7691536e+00   2.1006933e+00   2.4608850e+00   4.4119896e+00   3.4290419e+00   4.4942656e+00   3.6650933e+00   4.1590662e+00   5.0899438e+00   3.0333619e+00   4.5799469e+00   4.1882413e+00   5.0726081e+00   3.7613721e+00   3.7859992e+00   4.1623715e+00   3.6029758e+00   3.8608065e+00   4.0352348e+00   3.7266849e+00   5.5043247e+00   5.5172732e+00   3.6569442e+00   4.4568115e+00   3.3324016e+00   5.1765224e+00   3.5192065e+00   4.1799642e+00   4.3857400e+00   3.3769078e+00   3.2906843e+00   4.0034548e+00   4.1917183e+00   4.6854597e+00   5.4444866e+00   4.0904298e+00   3.2962678e+00   3.4250783e+00   5.1569386e+00   4.2213314e+00   3.6582637e+00   3.2059261e+00   4.1937040e+00   4.3826532e+00   4.2786320e+00   3.4290419e+00   4.4549850e+00   4.5270304e+00   4.1816268e+00   3.7777251e+00   3.7924144e+00   4.0173731e+00   3.2613828e+00   9.2006504e-01   3.3742167e-01   8.6080744e-01   5.0621589e-01   7.0646671e-01   2.1664244e+00   7.2679299e-01   8.9712099e-01   1.4681660e+00   5.4873947e-01   1.4074199e+00   4.9617437e-01   1.2206236e+00   2.5698045e-01   3.4986942e+00   3.0427234e+00   3.5520531e+00   3.0444864e+00   3.2783159e+00   2.6723101e+00   3.1333743e+00   2.4360210e+00   3.1627463e+00   2.4904253e+00   3.2338077e+00   2.6808015e+00   3.2240677e+00   2.9412073e+00   2.2206950e+00   3.1669559e+00   2.6716144e+00   2.4623998e+00   3.7173707e+00   2.6198784e+00   3.1208539e+00   2.6854361e+00   3.5171200e+00   2.8753360e+00   2.9157449e+00   3.1157529e+00   3.4945103e+00   3.5956983e+00   2.8873581e+00   2.3297122e+00   2.7075732e+00   2.6220688e+00   2.5143128e+00   3.3225169e+00   2.6164571e+00   2.9213819e+00   3.3323415e+00   3.4842326e+00   2.3487772e+00   2.7507828e+00   2.6991998e+00   2.8571158e+00   2.6614903e+00   2.6122501e+00   2.6121387e+00   2.3527202e+00   2.4822998e+00   2.7826627e+00   2.2477567e+00   2.5188545e+00   4.3590737e+00   3.4800724e+00   4.4652192e+00   3.6820633e+00   4.1423404e+00   5.0632361e+00   3.1594575e+00   4.5764429e+00   4.2442248e+00   4.9703970e+00   3.7054123e+00   3.8144340e+00   4.1322520e+00   3.6772717e+00   3.8704422e+00   3.9786899e+00   3.7187193e+00   5.3973273e+00   5.5276243e+00   3.7838435e+00   4.3978718e+00   3.3669786e+00   5.1732410e+00   3.5478651e+00   4.1195682e+00   4.3422158e+00   3.3925731e+00   3.2818178e+00   4.0157845e+00   4.1753467e+00   4.6824968e+00   5.3318394e+00   4.0983108e+00   3.3321312e+00   3.5171976e+00   5.1116177e+00   4.1480571e+00   3.6395566e+00   3.1983718e+00   4.1451783e+00   4.3355345e+00   4.2161052e+00   3.4800724e+00   4.4037157e+00   4.4559384e+00   4.1399085e+00   3.8303307e+00   3.7678377e+00   3.9434740e+00   3.2672961e+00   1.1857824e+00   1.7590894e+00   5.4715569e-01   6.1787077e-01   3.0224093e+00   1.4977817e+00   8.1744862e-01   9.4676850e-01   1.4369223e+00   8.6079202e-01   1.2896554e+00   5.3286499e-01   7.6195008e-01   3.1536923e+00   2.8019556e+00   3.2823965e+00   3.4754319e+00   3.2348803e+00   2.8339836e+00   2.8678686e+00   3.0569237e+00   3.0422163e+00   2.8599505e+00   3.8711727e+00   2.6769819e+00   3.5769114e+00   2.9369339e+00   2.3887701e+00   2.9172679e+00   2.7450499e+00   2.6744413e+00   3.9868196e+00   2.9825819e+00   3.0070640e+00   2.7478281e+00   3.6492544e+00   2.9260177e+00   2.8398296e+00   2.9417237e+00   3.3879693e+00   3.4191352e+00   2.9103957e+00   2.6495864e+00   3.1359829e+00   3.0635119e+00   2.7246726e+00   3.4310966e+00   2.7450499e+00   2.6593850e+00   3.0929063e+00   3.7090709e+00   2.4372581e+00   3.1206171e+00   3.0186562e+00   2.7973689e+00   2.9151879e+00   3.2261028e+00   2.8631702e+00   2.4096686e+00   2.5984928e+00   2.7564382e+00   2.8056103e+00   2.6945402e+00   4.1622883e+00   3.6243461e+00   4.2495237e+00   3.6308369e+00   4.0200378e+00   4.7940036e+00   3.6050689e+00   4.3664479e+00   4.2800934e+00   4.5553898e+00   3.4840330e+00   3.8323637e+00   3.9571437e+00   3.9163572e+00   3.9605759e+00   3.7909588e+00   3.5846709e+00   4.8756388e+00   5.3862972e+00   4.0807979e+00   4.1385728e+00   3.5138167e+00   4.9592897e+00   3.5910983e+00   3.8379532e+00   4.0230039e+00   3.4134019e+00   3.2269518e+00   3.9906410e+00   3.9261146e+00   4.4982182e+00   4.7746016e+00   4.0736767e+00   3.3286256e+00   3.6393910e+00   4.8333249e+00   3.9033387e+00   3.4776516e+00   3.1661860e+00   3.9124707e+00   4.1473618e+00   3.9899548e+00   3.6243461e+00   4.1607944e+00   4.1969547e+00   3.9859042e+00   3.9511939e+00   3.6382505e+00   3.7066628e+00   3.2552942e+00   6.6918102e-01   7.4445830e-01   9.7322023e-01   1.9284841e+00   6.6918102e-01   1.1393372e+00   1.6942803e+00   3.7371902e-01   1.6386105e+00   4.5257749e-01   1.4715172e+00   4.9772204e-01   3.5602797e+00   3.0968979e+00   3.5933352e+00   2.8998722e+00   3.2656298e+00   2.6029746e+00   3.1974447e+00   2.2445103e+00   3.1601923e+00   2.3946216e+00   3.0209665e+00   2.6867317e+00   3.0775658e+00   2.9161244e+00   2.1946278e+00   3.2137635e+00   2.6502891e+00   2.3652711e+00   3.6096140e+00   2.4893065e+00   3.1578003e+00   2.6568473e+00   3.4426472e+00   2.8187901e+00   2.9128857e+00   3.1418202e+00   3.4847097e+00   3.6205958e+00   2.8694312e+00   2.2223283e+00   2.5588203e+00   2.4651138e+00   2.4413293e+00   3.2621861e+00   2.5834128e+00   2.9995857e+00   3.3733791e+00   3.3817876e+00   2.3263008e+00   2.6305758e+00   2.5756071e+00   2.8533918e+00   2.5683063e+00   2.4187322e+00   2.5258216e+00   2.3250308e+00   2.4413618e+00   2.7691536e+00   2.1006933e+00   2.4608850e+00   4.4119896e+00   3.4290419e+00   4.4942656e+00   3.6650933e+00   4.1590662e+00   5.0899438e+00   3.0333619e+00   4.5799469e+00   4.1882413e+00   5.0726081e+00   3.7613721e+00   3.7859992e+00   4.1623715e+00   3.6029758e+00   3.8608065e+00   4.0352348e+00   3.7266849e+00   5.5043247e+00   5.5172732e+00   3.6569442e+00   4.4568115e+00   3.3324016e+00   5.1765224e+00   3.5192065e+00   4.1799642e+00   4.3857400e+00   3.3769078e+00   3.2906843e+00   4.0034548e+00   4.1917183e+00   4.6854597e+00   5.4444866e+00   4.0904298e+00   3.2962678e+00   3.4250783e+00   5.1569386e+00   4.2213314e+00   3.6582637e+00   3.2059261e+00   4.1937040e+00   4.3826532e+00   4.2786320e+00   3.4290419e+00   4.4549850e+00   4.5270304e+00   4.1816268e+00   3.7777251e+00   3.7924144e+00   4.0173731e+00   3.2613828e+00   1.2563834e+00   1.3681903e+00   1.6242170e+00   4.6126066e-01   1.4691503e+00   2.0743925e+00   5.0370871e-01   2.0365895e+00   5.2374483e-01   1.9494772e+00   1.0034646e+00   4.0320101e+00   3.4981749e+00   4.0289839e+00   2.9648238e+00   3.6116412e+00   2.8362334e+00   3.5807825e+00   2.1594400e+00   3.5619276e+00   2.4608850e+00   2.9150653e+00   2.9806848e+00   3.2524084e+00   3.2332049e+00   2.4351674e+00   3.6506644e+00   2.8794131e+00   2.6371069e+00   3.7842148e+00   2.6442387e+00   3.4386758e+00   2.9743384e+00   3.6958304e+00   3.1396988e+00   3.2947222e+00   3.5521670e+00   3.8756118e+00   3.9969338e+00   3.1587328e+00   2.4432066e+00   2.6604221e+00   2.5745994e+00   2.6880360e+00   3.4951118e+00   2.7657429e+00   3.3524671e+00   3.7924883e+00   3.6104716e+00   2.5876530e+00   2.7410968e+00   2.7238850e+00   3.1914275e+00   2.7871336e+00   2.3484202e+00   2.7125180e+00   2.6235600e+00   2.7012637e+00   3.1219910e+00   2.0888843e+00   2.6969064e+00   4.6820911e+00   3.5968849e+00   4.8607426e+00   3.9563487e+00   4.4501699e+00   5.4913616e+00   2.9743611e+00   4.9743359e+00   4.4659463e+00   5.4618868e+00   4.1043393e+00   4.0513907e+00   4.5016468e+00   3.7087600e+00   4.0024694e+00   4.3310092e+00   4.0611789e+00   5.9599083e+00   5.8632763e+00   3.7995759e+00   4.8081465e+00   3.4697005e+00   5.5699347e+00   3.7817852e+00   4.5398887e+00   4.8101536e+00   3.6425657e+00   3.5739550e+00   4.2642554e+00   4.6155808e+00   5.0696209e+00   5.9318766e+00   4.3420618e+00   3.6079861e+00   3.6711700e+00   5.5546786e+00   4.5127918e+00   3.9935485e+00   3.4733020e+00   4.5569739e+00   4.6922818e+00   4.6236700e+00   3.5968849e+00   4.7939394e+00   4.8471780e+00   4.4913725e+00   3.9942507e+00   4.1085491e+00   4.3067090e+00   3.5093006e+00   3.1271814e-01   2.6440626e+00   9.6964683e-01   5.8796666e-01   9.8545402e-01   1.0013399e+00   9.2426065e-01   7.6195008e-01   7.3283576e-01   2.6643250e-01   3.3524935e+00   2.9103383e+00   3.4378505e+00   3.2794093e+00   3.2805279e+00   2.7218307e+00   2.9677934e+00   2.7417113e+00   3.1265865e+00   2.6350666e+00   3.5810092e+00   2.6509959e+00   3.4564670e+00   2.9240118e+00   2.2778214e+00   3.0636652e+00   2.6473174e+00   2.5673371e+00   3.9008585e+00   2.8131786e+00   3.0066012e+00   2.7310040e+00   3.6088006e+00   2.8947302e+00   2.8966017e+00   3.0506196e+00   3.4785143e+00   3.5188926e+00   2.8816131e+00   2.5125550e+00   2.9397900e+00   2.8645995e+00   2.6245231e+00   3.3639057e+00   2.6028733e+00   2.7271822e+00   3.2253861e+00   3.6489825e+00   2.3376510e+00   2.9371604e+00   2.8382974e+00   2.8051321e+00   2.8006021e+00   2.9309089e+00   2.7184808e+00   2.3312464e+00   2.5047936e+00   2.7731354e+00   2.5341497e+00   2.5862794e+00   4.2119757e+00   3.5278864e+00   4.3705395e+00   3.6366115e+00   4.0640736e+00   4.9516709e+00   3.3348040e+00   4.4927271e+00   4.2867968e+00   4.7459453e+00   3.5773144e+00   3.8303307e+00   4.0501276e+00   3.7856794e+00   3.8862115e+00   3.8584574e+00   3.6392860e+00   5.1247727e+00   5.4955777e+00   3.9594564e+00   4.2633399e+00   3.3993739e+00   5.1011948e+00   3.5798230e+00   3.9561606e+00   4.1885925e+00   3.4019138e+00   3.2277183e+00   3.9971830e+00   4.0727212e+00   4.6247900e+00   5.0557045e+00   4.0800856e+00   3.3285999e+00   3.5685643e+00   5.0078455e+00   3.9761694e+00   3.5324648e+00   3.1505332e+00   4.0358238e+00   4.2334406e+00   4.1156691e+00   3.5278864e+00   4.2682695e+00   4.3053166e+00   4.0686429e+00   3.9122205e+00   3.6972894e+00   3.7712394e+00   3.2160302e+00   2.8326674e+00   1.0103954e+00   4.2829723e-01   7.9126749e-01   1.1795364e+00   7.3442235e-01   8.5582452e-01   6.1158310e-01   4.8284931e-01   3.4789406e+00   3.0164286e+00   3.5708825e+00   3.4760111e+00   3.4435701e+00   2.8856827e+00   3.0483404e+00   2.9286177e+00   3.2959553e+00   2.7769569e+00   3.7899650e+00   2.7721706e+00   3.6919547e+00   3.0773843e+00   2.4199353e+00   3.1984671e+00   2.7597977e+00   2.7743820e+00   4.1049898e+00   3.0189963e+00   3.0754044e+00   2.9033794e+00   3.7972498e+00   3.0781443e+00   3.0628742e+00   3.1980682e+00   3.6529321e+00   3.6479042e+00   3.0224061e+00   2.7237896e+00   3.1475538e+00   3.0809334e+00   2.8106441e+00   3.5217353e+00   2.7064382e+00   2.7753422e+00   3.3543209e+00   3.8636687e+00   2.4678110e+00   3.1212622e+00   3.0250044e+00   2.9444840e+00   2.9956969e+00   3.1281937e+00   2.8892226e+00   2.4772050e+00   2.6547819e+00   2.9364676e+00   2.7130802e+00   2.7488632e+00   4.2524457e+00   3.6566905e+00   4.4856620e+00   3.7659004e+00   4.1610154e+00   5.0768465e+00   3.4623926e+00   4.6393183e+00   4.4611186e+00   4.7773180e+00   3.6552032e+00   3.9746118e+00   4.1574253e+00   3.9234139e+00   3.9686223e+00   3.9172103e+00   3.7603950e+00   5.1648090e+00   5.6463490e+00   4.1614237e+00   4.3393711e+00   3.5009132e+00   5.2503936e+00   3.7276020e+00   4.0260707e+00   4.3007127e+00   3.5361708e+00   3.3347521e+00   4.1191095e+00   4.2183675e+00   4.7752353e+00   5.1049558e+00   4.1955154e+00   3.4902431e+00   3.7536489e+00   5.1215318e+00   4.0036288e+00   3.6385337e+00   3.2536517e+00   4.1326097e+00   4.3100988e+00   4.1978681e+00   3.6566905e+00   4.3438151e+00   4.3538983e+00   4.1591001e+00   4.0714399e+00   3.8024850e+00   3.7974783e+00   3.3185266e+00   2.0833080e+00   2.8648636e+00   3.5532593e+00   1.6555341e+00   3.5410343e+00   2.0840787e+00   3.3747131e+00   2.3883072e+00   4.3833871e+00   3.9159762e+00   4.2941647e+00   2.3488350e+00   3.6240540e+00   2.9044888e+00   4.0815608e+00   1.5532921e+00   3.6825697e+00   2.4113529e+00   1.7998094e+00   3.2616916e+00   2.5529439e+00   3.3821744e+00   2.6637769e+00   3.9531209e+00   3.1831859e+00   2.5836708e+00   3.1669559e+00   2.2907828e+00   3.8684560e+00   3.0202406e+00   3.4019580e+00   3.1886068e+00   3.4332960e+00   3.7685816e+00   3.8803374e+00   4.1746389e+00   3.3102735e+00   2.2304221e+00   2.1489616e+00   2.0501444e+00   2.6225712e+00   3.4164974e+00   3.0901976e+00   3.9885258e+00   4.0802502e+00   3.0869095e+00   2.9336463e+00   2.3936974e+00   2.5327316e+00   3.4518638e+00   2.5837552e+00   1.5782205e+00   2.6521862e+00   2.9662386e+00   2.9040188e+00   3.2768109e+00   1.6628177e+00   2.7685987e+00   5.0448046e+00   3.5141919e+00   4.9824611e+00   4.0549341e+00   4.5981277e+00   5.5863508e+00   2.6647036e+00   5.0241554e+00   4.1998979e+00   5.9440534e+00   4.4432394e+00   3.9560997e+00   4.6422438e+00   3.4164839e+00   4.0005863e+00   4.6454886e+00   4.2390210e+00   6.5166388e+00   5.6880370e+00   3.1944389e+00   5.0789131e+00   3.4956324e+00   5.5310286e+00   3.6881384e+00   4.9152167e+00   5.0890922e+00   3.6527501e+00   3.7902440e+00   4.2540354e+00   4.7585941e+00   5.0389487e+00   6.4918086e+00   4.3280601e+00   3.6284588e+00   3.4969965e+00   5.6399353e+00   4.9671290e+00   4.2659568e+00   3.6994310e+00   4.7714894e+00   4.8963175e+00   4.8281202e+00   3.5141919e+00   5.0683437e+00   5.1871515e+00   4.6280145e+00   3.7055141e+00   4.2764028e+00   4.7873094e+00   3.7371542e+00   1.1433971e+00   1.6774310e+00   6.8299624e-01   1.6304499e+00   2.4808718e-01   1.5886765e+00   7.6250797e-01   4.0055392e+00   3.4676312e+00   4.0289839e+00   3.2391776e+00   3.6989507e+00   2.9466089e+00   3.5208636e+00   2.4804256e+00   3.6211670e+00   2.6281173e+00   3.2921089e+00   3.0161637e+00   3.5345457e+00   3.2983536e+00   2.5210242e+00   3.6506644e+00   2.9161244e+00   2.7938108e+00   4.0292846e+00   2.8755116e+00   3.4075988e+00   3.0797683e+00   3.8646774e+00   3.2397520e+00   3.3586779e+00   3.5819899e+00   3.9571013e+00   4.0234614e+00   3.2253861e+00   2.6519927e+00   2.9269738e+00   2.8491914e+00   2.8419330e+00   3.6148101e+00   2.8039428e+00   3.2558795e+00   3.7924883e+00   3.8389577e+00   2.6284424e+00   2.9648238e+00   2.9126202e+00   3.2245885e+00   2.9718548e+00   2.6864788e+00   2.8651003e+00   2.6637996e+00   2.7789114e+00   3.1894122e+00   2.3748697e+00   2.8127546e+00   4.6364269e+00   3.7133040e+00   4.8825792e+00   4.0097653e+00   4.4740109e+00   5.5106999e+00   3.1817279e+00   5.0169254e+00   4.6066522e+00   5.3636182e+00   4.0783379e+00   4.1550948e+00   4.5252166e+00   3.8770438e+00   4.0814269e+00   4.3063766e+00   4.0872895e+00   5.8336247e+00   5.9533030e+00   4.0437148e+00   4.7859703e+00   3.5604924e+00   5.6269403e+00   3.8926783e+00   4.4927794e+00   4.7879866e+00   3.7291513e+00   3.6035976e+00   4.3384511e+00   4.6385717e+00   5.1321867e+00   5.8049832e+00   4.4149501e+00   3.6953819e+00   3.8133051e+00   5.5737973e+00   4.4415093e+00   3.9935485e+00   3.5037963e+00   4.5569739e+00   4.6922818e+00   4.6236700e+00   3.7133040e+00   4.7716971e+00   4.8030835e+00   4.5149959e+00   4.1509766e+00   4.1343605e+00   4.2319568e+00   3.5394847e+00   7.6869104e-01   1.2471855e+00   8.7636491e-01   9.9981032e-01   7.8863556e-01   7.0733904e-01   3.2400577e+00   2.7256768e+00   3.3173156e+00   3.2734582e+00   3.1889456e+00   2.6198618e+00   2.7351941e+00   2.7665224e+00   3.0627699e+00   2.5021229e+00   3.6608924e+00   2.4643905e+00   3.5457670e+00   2.8045035e+00   2.1368327e+00   2.9466857e+00   2.4386380e+00   2.5729082e+00   3.8963334e+00   2.8235662e+00   2.7242811e+00   2.6575342e+00   3.5598437e+00   2.8418228e+00   2.8206835e+00   2.9462527e+00   3.4233881e+00   3.3667899e+00   2.7322904e+00   2.5411273e+00   2.9638750e+00   2.9140871e+00   2.5797070e+00   3.2425969e+00   2.3780833e+00   2.4351544e+00   3.0892387e+00   3.6720185e+00   2.1688001e+00   2.8939857e+00   2.7945402e+00   2.6616170e+00   2.7767058e+00   2.9769851e+00   2.6347557e+00   2.1986118e+00   2.3753308e+00   2.6828901e+00   2.5283291e+00   2.4839186e+00   3.8851614e+00   3.3427747e+00   4.1909067e+00   3.4628626e+00   3.8305140e+00   4.8043725e+00   3.1800308e+00   4.3815267e+00   4.2038608e+00   4.4513681e+00   3.3256952e+00   3.6826282e+00   3.8475722e+00   3.6210755e+00   3.6107649e+00   3.5632387e+00   3.4596612e+00   4.8847297e+00   5.3781989e+00   3.9435460e+00   4.0131264e+00   3.1614358e+00   4.9957986e+00   3.4408340e+00   3.7000441e+00   4.0284550e+00   3.2354442e+00   3.0107962e+00   3.8036057e+00   3.9713385e+00   4.5180638e+00   4.8486868e+00   3.8729425e+00   3.2243808e+00   3.5087561e+00   4.8402519e+00   3.6359784e+00   3.3268022e+00   2.9240118e+00   3.8232660e+00   3.9709252e+00   3.8746323e+00   3.3427747e+00   4.0131316e+00   4.0031779e+00   3.8300809e+00   3.7945557e+00   3.4841580e+00   3.4283673e+00   2.9863682e+00   1.9060194e+00   3.1239235e-01   1.5583422e+00   4.8124784e-01   1.2220171e+00   3.3785962e+00   2.9374280e+00   3.5071305e+00   3.8740792e+00   3.5491790e+00   3.0669573e+00   2.9018296e+00   3.4251046e+00   3.3648459e+00   3.0744865e+00   4.3230411e+00   2.8485664e+00   4.1027662e+00   3.1626116e+00   2.6442554e+00   3.1724373e+00   2.8315630e+00   3.0534300e+00   4.4306138e+00   3.3882074e+00   2.9857887e+00   3.0959220e+00   4.0072094e+00   3.2240677e+00   3.1644964e+00   3.2264712e+00   3.7352584e+00   3.6230125e+00   3.1206853e+00   3.1023948e+00   3.5580276e+00   3.5096299e+00   3.0877778e+00   3.6577352e+00   2.7900553e+00   2.5838366e+00   3.3069107e+00   4.1792641e+00   2.5911812e+00   3.4684046e+00   3.3165070e+00   2.9868392e+00   3.2999163e+00   3.6373219e+00   3.1449351e+00   2.5937039e+00   2.8148578e+00   3.0518873e+00   3.1967428e+00   2.9647081e+00   4.0498613e+00   3.7819451e+00   4.3976398e+00   3.7644678e+00   4.0879497e+00   4.9574979e+00   3.7577430e+00   4.5772252e+00   4.5796941e+00   4.4589650e+00   3.5295907e+00   4.0592075e+00   4.0919364e+00   4.1226882e+00   4.0237849e+00   3.7803562e+00   3.7136035e+00   4.7772875e+00   5.6344258e+00   4.4679358e+00   4.1805116e+00   3.6013971e+00   5.1936459e+00   3.8460802e+00   3.8293150e+00   4.1365715e+00   3.6263469e+00   3.3344860e+00   4.1404384e+00   4.1465376e+00   4.7500857e+00   4.7258629e+00   4.2123837e+00   3.5757376e+00   3.9028467e+00   5.0127222e+00   3.7704782e+00   3.5495399e+00   3.2637691e+00   4.0284560e+00   4.1958516e+00   4.1011034e+00   3.7819451e+00   4.1793948e+00   4.1561376e+00   4.1029254e+00   4.2472742e+00   3.7624633e+00   3.5705606e+00   3.3154318e+00   1.8882412e+00   5.3690447e-01   1.7295385e+00   7.4445830e-01   3.5842571e+00   3.0831073e+00   3.5905412e+00   2.6850208e+00   3.1920496e+00   2.4895611e+00   3.1874455e+00   1.9825106e+00   3.1280291e+00   2.1902526e+00   2.7631963e+00   2.5991209e+00   2.9183874e+00   2.8448970e+00   2.0635463e+00   3.2072460e+00   2.5480786e+00   2.2627580e+00   3.4383056e+00   2.3172372e+00   3.0909340e+00   2.5623863e+00   3.3194064e+00   2.7506751e+00   2.8644451e+00   3.1134606e+00   3.4405052e+00   3.5767292e+00   2.7771563e+00   2.0712834e+00   2.3618912e+00   2.2737657e+00   2.3098587e+00   3.1429166e+00   2.4666493e+00   2.9899331e+00   3.3598261e+00   3.2396917e+00   2.2342806e+00   2.4357274e+00   2.4181291e+00   2.7984743e+00   2.4231383e+00   2.1599940e+00   2.3764241e+00   2.2561857e+00   2.3387588e+00   2.7074009e+00   1.8390751e+00   2.3351007e+00   4.3436399e+00   3.2756710e+00   4.4477490e+00   3.5866422e+00   4.0782068e+00   5.0677790e+00   2.7922251e+00   4.5545199e+00   4.0836814e+00   5.0715858e+00   3.7130862e+00   3.6733277e+00   4.0983149e+00   3.4111720e+00   3.6933050e+00   3.9623038e+00   3.6711803e+00   5.5579136e+00   5.4498365e+00   3.4842014e+00   4.4103781e+00   3.1690867e+00   5.1441957e+00   3.3997317e+00   4.1528029e+00   4.3901202e+00   3.2630747e+00   3.2035560e+00   3.8955732e+00   4.1857725e+00   4.6435471e+00   5.5146776e+00   3.9762768e+00   3.2193184e+00   3.3255820e+00   5.1213824e+00   4.1678001e+00   3.6124079e+00   3.1107135e+00   4.1457358e+00   4.3076787e+00   4.2130787e+00   3.2756710e+00   4.4066811e+00   4.4713485e+00   4.0952473e+00   3.6289876e+00   3.7168749e+00   3.9644506e+00   3.1662754e+00   1.5140329e+00   3.3872939e-01   1.1649855e+00   3.5691650e+00   3.1595321e+00   3.7055656e+00   4.0160835e+00   3.7376603e+00   3.2592987e+00   3.1435650e+00   3.5370651e+00   3.5437638e+00   3.2591886e+00   4.4166410e+00   3.0676819e+00   4.2127293e+00   3.3654329e+00   2.8346837e+00   3.3660906e+00   3.0613575e+00   3.2026716e+00   4.5808841e+00   3.5275705e+00   3.2454510e+00   3.2718756e+00   4.1819826e+00   3.4031279e+00   3.3454884e+00   3.4170637e+00   3.9109404e+00   3.8358967e+00   3.3305911e+00   3.2315455e+00   3.6883726e+00   3.6296117e+00   3.2506700e+00   3.8623185e+00   3.0230066e+00   2.8458833e+00   3.5111771e+00   4.3201594e+00   2.8024863e+00   3.6263297e+00   3.4825375e+00   3.1978058e+00   3.4556048e+00   3.7429398e+00   3.3240937e+00   2.7959976e+00   3.0137031e+00   3.2391776e+00   3.3180586e+00   3.1510639e+00   4.3279818e+00   4.0059487e+00   4.6233424e+00   3.9929211e+00   4.3355275e+00   5.1718231e+00   3.9512216e+00   4.7810147e+00   4.7732950e+00   4.7150495e+00   3.7777251e+00   4.2731987e+00   4.3246862e+00   4.3347988e+00   4.2753666e+00   4.0433740e+00   3.9425600e+00   5.0081332e+00   5.8406251e+00   4.6274156e+00   4.4284929e+00   3.8398842e+00   5.3940263e+00   4.0533472e+00   4.0818092e+00   4.3543675e+00   3.8429689e+00   3.5715666e+00   4.3728103e+00   4.3436347e+00   4.9498040e+00   4.9393850e+00   4.4487185e+00   3.7756717e+00   4.0901950e+00   5.2287042e+00   4.0497882e+00   3.7884247e+00   3.5028854e+00   4.2624115e+00   4.4485334e+00   4.3403092e+00   4.0059487e+00   4.4317896e+00   4.4210531e+00   4.3442496e+00   4.4457375e+00   3.9985746e+00   3.8504489e+00   3.5592028e+00   1.4309353e+00   5.3528567e-01   3.7908776e+00   3.2731841e+00   3.8215973e+00   3.1206853e+00   3.5080970e+00   2.7892862e+00   3.3363505e+00   2.4070515e+00   3.4174587e+00   2.5169099e+00   3.2261716e+00   2.8456035e+00   3.3834513e+00   3.1193847e+00   2.3599428e+00   3.4420978e+00   2.7676217e+00   2.6211360e+00   3.8782821e+00   2.7318604e+00   3.2516765e+00   2.8950591e+00   3.6956240e+00   3.0573546e+00   3.1595622e+00   3.3778208e+00   3.7543715e+00   3.8301935e+00   3.0538048e+00   2.4889600e+00   2.7975756e+00   2.7172725e+00   2.6747723e+00   3.4570094e+00   2.6710885e+00   3.0859941e+00   3.5894820e+00   3.6730945e+00   2.4678645e+00   2.8348872e+00   2.7756196e+00   3.0423378e+00   2.8112845e+00   2.6077230e+00   2.7173555e+00   2.4925318e+00   2.6151930e+00   2.9985224e+00   2.2768387e+00   2.6523384e+00   4.4886181e+00   3.5762214e+00   4.6936725e+00   3.8412437e+00   4.3086181e+00   5.3124523e+00   3.1125049e+00   4.8185220e+00   4.4330566e+00   5.1853832e+00   3.9019516e+00   3.9878172e+00   4.3438773e+00   3.7545919e+00   3.9571174e+00   4.1452084e+00   3.9080206e+00   5.6409887e+00   5.7635531e+00   3.9041153e+00   4.6071696e+00   3.4361835e+00   5.4269728e+00   3.7248960e+00   4.3153491e+00   4.5881410e+00   3.5627565e+00   3.4386758e+00   4.1762134e+00   4.4334431e+00   4.9338087e+00   5.6026788e+00   4.2556298e+00   3.5163766e+00   3.6516985e+00   5.3754384e+00   4.2899814e+00   3.8175194e+00   3.3436392e+00   4.3710164e+00   4.5233951e+00   4.4426760e+00   3.5762214e+00   4.5972906e+00   4.6375405e+00   4.3421746e+00   3.9932553e+00   3.9596590e+00   4.0813696e+00   3.3867904e+00   9.9272943e-01   3.3624661e+00   2.9811147e+00   3.5018936e+00   3.8169093e+00   3.5209477e+00   3.0838698e+00   2.9939885e+00   3.3707744e+00   3.3216375e+00   3.1074562e+00   4.2293025e+00   2.8939100e+00   3.9735251e+00   3.1779321e+00   2.6513305e+00   3.1539115e+00   2.9206559e+00   2.9923096e+00   4.3522137e+00   3.3213891e+00   3.1222272e+00   3.0540027e+00   3.9664528e+00   3.2012517e+00   3.1248526e+00   3.2007609e+00   3.6824267e+00   3.6418210e+00   3.1482435e+00   3.0088381e+00   3.4838907e+00   3.4206811e+00   3.0415159e+00   3.6826470e+00   2.9006138e+00   2.7293873e+00   3.3112277e+00   4.0819926e+00   2.6432089e+00   3.4355346e+00   3.3034492e+00   3.0164601e+00   3.2442582e+00   3.5631690e+00   3.1415769e+00   2.6264645e+00   2.8384396e+00   3.0300747e+00   3.1354811e+00   2.9649167e+00   4.2304738e+00   3.8482047e+00   4.4439318e+00   3.8285351e+00   4.1849277e+00   4.9874125e+00   3.8271288e+00   4.5862820e+00   4.5664639e+00   4.6040986e+00   3.6270951e+00   4.0846199e+00   4.1503558e+00   4.1702140e+00   4.1407964e+00   3.9121183e+00   3.7737683e+00   4.8997002e+00   5.6369181e+00   4.4180349e+00   4.2780098e+00   3.7035075e+00   5.1920494e+00   3.8582580e+00   3.9463952e+00   4.1826549e+00   3.6583100e+00   3.4129797e+00   4.2036908e+00   4.1443501e+00   4.7432980e+00   4.8153136e+00   4.2825977e+00   3.5821023e+00   3.9040345e+00   5.0374022e+00   3.9556226e+00   3.6351652e+00   3.3487998e+00   4.0905154e+00   4.2992014e+00   4.1693135e+00   3.8482047e+00   4.2897273e+00   4.2963449e+00   4.1754173e+00   4.2443816e+00   3.8297356e+00   3.7573406e+00   3.4190329e+00   3.4434283e+00   2.9833205e+00   3.5091456e+00   3.1516030e+00   3.2866494e+00   2.6849207e+00   3.0541761e+00   2.5654361e+00   3.1545670e+00   2.5403244e+00   3.3918434e+00   2.6608343e+00   3.3413616e+00   2.9302185e+00   2.2379234e+00   3.1290373e+00   2.6442995e+00   2.5077372e+00   3.8111267e+00   2.7069456e+00   3.0566678e+00   2.7098649e+00   3.5644689e+00   2.8826061e+00   2.9134932e+00   3.0944404e+00   3.4986589e+00   3.5664548e+00   2.8806273e+00   2.4158569e+00   2.8131786e+00   2.7333339e+00   2.5637470e+00   3.3370594e+00   2.5885603e+00   2.8220110e+00   3.2904739e+00   3.5710204e+00   2.3287218e+00   2.8315630e+00   2.7529707e+00   2.8293212e+00   2.7254528e+00   2.7527251e+00   2.6524994e+00   2.3299431e+00   2.4822440e+00   2.7803033e+00   2.3731496e+00   2.5423572e+00   4.2830420e+00   3.4939592e+00   4.4286678e+00   3.6575173e+00   4.1044791e+00   5.0220848e+00   3.2200733e+00   4.5468375e+00   4.2700249e+00   4.8698851e+00   3.6462342e+00   3.8237490e+00   4.0990020e+00   3.7208580e+00   3.8692104e+00   3.9203597e+00   3.6817365e+00   5.2775206e+00   5.5250867e+00   3.8676958e+00   4.3392543e+00   3.3693781e+00   5.1524083e+00   3.5650934e+00   4.0437994e+00   4.2783342e+00   3.3968462e+00   3.2517273e+00   4.0065881e+00   4.1377875e+00   4.6677357e+00   5.2142247e+00   4.0893000e+00   3.3307003e+00   3.5369003e+00   5.0771896e+00   4.0613196e+00   3.5869628e+00   3.1695162e+00   4.1006440e+00   4.2899814e+00   4.1769447e+00   3.4939592e+00   4.3422191e+00   4.3859843e+00   4.1114107e+00   3.8721945e+00   3.7365034e+00   3.8554667e+00   3.2330990e+00   7.4500632e-01   3.1271814e-01   2.7864553e+00   1.1117653e+00   1.8291315e+00   9.1459005e-01   3.2771828e+00   8.5582452e-01   2.5020950e+00   3.7722922e+00   1.4404415e+00   2.6850561e+00   1.2884095e+00   1.9346765e+00   4.6190224e-01   1.7610224e+00   1.9545276e+00   2.5064746e+00   2.4134734e+00   1.4291842e+00   1.4855627e+00   1.8305602e+00   1.4494865e+00   1.0355160e+00   6.8921053e-01   9.5529726e-01   7.2626021e-01   1.4025367e+00   2.2620298e+00   2.6646285e+00   2.6984190e+00   1.9245986e+00   1.7053476e+00   1.9940477e+00   1.3238834e+00   4.4901474e-01   2.2514668e+00   1.7899689e+00   2.4621620e+00   2.3008240e+00   1.1820572e+00   2.0593667e+00   3.3235867e+00   2.0701817e+00   1.6811909e+00   1.7438018e+00   1.2168151e+00   2.9923086e+00   1.8570167e+00   1.8407084e+00   1.9774894e+00   1.2374249e+00   1.3146181e+00   1.4369760e+00   1.6548985e+00   3.0339990e+00   1.3065206e+00   1.8441719e+00   1.9017153e+00   1.0169098e+00   1.5490835e+00   1.1480416e+00   2.3912363e+00   2.1724397e+00   1.4252775e+00   1.0284221e+00   2.2390158e+00   2.3611228e+00   2.6121814e+00   1.3139868e+00   2.0833701e+00   1.8624251e+00   1.5270661e+00   1.1605968e+00   9.3589904e-01   1.4360842e+00   1.2967707e+00   1.5740302e+00   8.5349066e-01   1.4639724e+00   2.1546616e+00   1.6538197e+00   1.2783641e+00   1.8320269e+00   1.7168897e+00   1.7042715e+00   1.0288355e+00   1.3960908e+00   1.0327124e+00   1.4702446e+00   1.2287925e+00   1.9774894e+00   1.3826233e+00   1.6072393e+00   1.3472497e+00   1.9439880e+00   1.1295026e+00   1.6414265e+00   1.5177322e+00   6.8496652e-01   2.3745921e+00   9.3211669e-01   1.2784093e+00   3.1271814e-01   2.7526949e+00   7.8034610e-01   1.8874930e+00   3.3568278e+00   7.7863029e-01   2.4620981e+00   7.9999102e-01   1.3194463e+00   4.5257749e-01   1.0705713e+00   1.5282070e+00   2.3189157e+00   1.9824340e+00   7.4029244e-01   1.0636179e+00   1.6347187e+00   1.0722301e+00   7.4849274e-01   5.3988754e-01   1.0632334e+00   7.0213871e-01   8.4383266e-01   1.8384560e+00   2.2399960e+00   2.2847962e+00   1.4577178e+00   1.3022697e+00   1.2927254e+00   6.8064066e-01   4.4417668e-01   2.0964002e+00   1.1252542e+00   1.9840858e+00   1.8038514e+00   6.0365341e-01   1.6354514e+00   2.8387736e+00   1.5364600e+00   1.0539473e+00   1.1361809e+00   7.8649633e-01   2.4634199e+00   1.2983546e+00   1.5835083e+00   1.4983760e+00   1.4748100e+00   1.0180846e+00   1.2694582e+00   2.0850659e+00   2.4405647e+00   1.6897529e+00   1.8533655e+00   2.0793529e+00   7.4797652e-01   1.3453828e+00   1.1770444e+00   1.9571621e+00   1.6977813e+00   1.1421260e+00   8.3850424e-01   2.6029823e+00   2.7071356e+00   2.3733266e+00   1.3878105e+00   1.5050081e+00   2.3020930e+00   1.2450968e+00   1.1247714e+00   1.3455945e+00   1.0453799e+00   7.4157869e-01   1.3630233e+00   1.3061954e+00   1.8456576e+00   2.6048102e+00   1.4425815e+00   9.9058911e-01   1.5658693e+00   2.1444356e+00   1.4166079e+00   7.2727886e-01   7.9343577e-01   1.1384575e+00   1.4013840e+00   1.2776135e+00   1.4983760e+00   1.4006413e+00   1.5375255e+00   1.2650236e+00   1.7250893e+00   9.0221296e-01   1.2767751e+00   9.2060977e-01   2.5673851e+00   8.6079202e-01   1.6428179e+00   8.7623959e-01   3.1131006e+00   6.6453319e-01   2.3246810e+00   3.5720588e+00   1.2918836e+00   2.4857868e+00   1.0845006e+00   1.8135469e+00   3.9472619e-01   1.6028858e+00   1.8029164e+00   2.2526468e+00   2.2305704e+00   1.2920175e+00   1.3194463e+00   1.5620078e+00   1.2567627e+00   8.7273869e-01   5.3095950e-01   7.1671402e-01   4.2827238e-01   1.2022652e+00   2.1186438e+00   2.4755072e+00   2.5212188e+00   1.7582453e+00   1.4360884e+00   1.8400908e+00   1.3147484e+00   2.6680274e-01   2.0194508e+00   1.6709595e+00   2.2587909e+00   2.1030957e+00   1.0161846e+00   1.8732616e+00   3.1496799e+00   1.8819614e+00   1.5700887e+00   1.5933926e+00   1.0543640e+00   2.8415314e+00   1.6890927e+00   1.6862498e+00   1.7038925e+00   1.0251132e+00   1.0245496e+00   1.1781513e+00   1.5212572e+00   2.8050734e+00   1.1091494e+00   1.5452835e+00   1.9080234e+00   8.5359653e-01   1.2416734e+00   8.9528108e-01   2.1088803e+00   1.9097018e+00   1.2522193e+00   7.4612152e-01   2.3284654e+00   2.1556564e+00   2.3436971e+00   1.1651790e+00   1.8364691e+00   1.6976628e+00   1.2371704e+00   1.0463225e+00   8.5302032e-01   1.1623508e+00   1.0682140e+00   1.2723284e+00   6.7955751e-01   1.2572095e+00   2.2840066e+00   1.3572135e+00   1.0082548e+00   1.5613089e+00   1.5962380e+00   1.5974627e+00   7.9671887e-01   1.1799226e+00   8.3571552e-01   1.2674750e+00   1.0543826e+00   1.7038925e+00   1.2197800e+00   1.4811026e+00   1.1132425e+00   1.6485749e+00   8.6295295e-01   1.5402909e+00   1.2957413e+00   1.7240804e+00   1.2117746e+00   2.5620931e+00   9.4346743e-01   1.9481085e+00   1.0013399e+00   1.0383354e+00   1.7091506e+00   7.5651431e-01   1.6169211e+00   1.4074199e+00   2.3606801e+00   1.6642999e+00   1.0677270e+00   9.5748562e-01   5.4702555e-01   2.2752108e+00   1.3619011e+00   1.2144903e+00   1.4245490e+00   1.7677805e+00   2.1070188e+00   2.0042865e+00   2.3026776e+00   1.5583422e+00   8.7856768e-01   3.6704030e-01   4.8644514e-01   1.0013399e+00   1.3262124e+00   1.6642999e+00   2.6524441e+00   2.3938089e+00   9.9234874e-01   1.6199145e+00   4.6126066e-01   7.3978204e-01   1.8066960e+00   7.9191984e-01   8.2250769e-01   9.3727156e-01   1.6415483e+00   1.4092680e+00   1.6304499e+00   9.1432842e-01   1.1795364e+00   3.1638147e+00   1.4103498e+00   2.9322745e+00   2.0247895e+00   2.5487652e+00   3.5082844e+00   1.0453705e+00   2.9611604e+00   1.9449446e+00   4.1343567e+00   2.6451449e+00   1.7869811e+00   2.6253748e+00   1.1973458e+00   1.9817270e+00   2.7838011e+00   2.2840066e+00   4.7706180e+00   3.4576971e+00   8.9870984e-01   3.1324336e+00   1.5639220e+00   3.4016597e+00   1.5728443e+00   3.0734808e+00   3.1995683e+00   1.6368228e+00   1.9546803e+00   2.1052860e+00   2.8313078e+00   2.9375460e+00   4.8020420e+00   2.1735035e+00   1.6493848e+00   1.3576485e+00   3.5774884e+00   3.2045691e+00   2.3952974e+00   1.8988804e+00   2.8268459e+00   2.8989852e+00   2.8927951e+00   1.4103498e+00   3.1063891e+00   3.2893581e+00   2.6241058e+00   1.4441104e+00   2.3170196e+00   3.0817543e+00   1.9124815e+00   1.0026233e+00   1.1867923e+00   2.3572427e+00   3.6939647e-01   1.6408576e+00   2.7392425e+00   8.8835337e-01   1.6805749e+00   5.5399712e-01   1.2745081e+00   7.5303835e-01   1.1820572e+00   1.1301977e+00   1.4315442e+00   1.4464138e+00   1.2423523e+00   6.4626422e-01   7.5230154e-01   6.2536527e-01   4.0664863e-01   5.0731024e-01   4.0158746e-01   6.2543628e-01   6.4884272e-01   1.4014424e+00   1.6702453e+00   1.7317202e+00   1.0390957e+00   7.1781501e-01   1.4073416e+00   1.5165187e+00   7.3502408e-01   1.2122797e+00   1.2421878e+00   1.4565053e+00   1.3559191e+00   6.8064066e-01   1.0943185e+00   2.3628816e+00   1.1638514e+00   1.1627754e+00   1.0519629e+00   5.3106808e-01   2.1057450e+00   1.0403581e+00   1.9325284e+00   1.0596309e+00   1.3779504e+00   7.6633520e-01   1.2315180e+00   1.9698667e+00   2.0697950e+00   1.4385383e+00   1.0743031e+00   2.5609592e+00   1.1664463e+00   7.0702759e-01   1.1055841e+00   1.3757605e+00   1.4784016e+00   1.4566739e+00   7.9213303e-01   3.1107848e+00   2.2607358e+00   1.5267093e+00   1.6037239e+00   1.2804073e+00   1.9864213e+00   5.4310586e-01   1.5475402e+00   1.5328931e+00   5.4647209e-01   7.9343577e-01   9.7668596e-01   1.1862065e+00   1.4760549e+00   3.1060327e+00   1.0849536e+00   3.7234239e-01   8.8571256e-01   2.0333305e+00   1.9196784e+00   9.5289573e-01   8.6297946e-01   1.2392529e+00   1.4996752e+00   1.3752205e+00   1.0596309e+00   1.6198849e+00   1.8691588e+00   1.2188552e+00   9.2906468e-01   8.7042892e-01   1.8304530e+00   9.8621003e-01   1.4220241e+00   1.5496729e+00   1.1125144e+00   7.4201890e-01   2.1434858e+00   6.0719477e-01   1.5102780e+00   5.6262711e-01   5.7267643e-01   1.3990971e+00   5.4408162e-01   5.2316125e-01   1.5323598e+00   8.2317311e-01   1.1694177e+00   5.6003943e-01   1.0600958e+00   5.1318506e-01   8.8354057e-01   1.1892978e+00   1.3456285e+00   1.4234329e+00   5.0311426e-01   8.2976237e-01   1.0655776e+00   1.1267154e+00   4.4786319e-01   6.7424840e-01   6.4241342e-01   1.4834540e+00   1.4207811e+00   1.3630799e+00   5.2795793e-01   7.8571751e-01   5.3988754e-01   6.8299624e-01   5.6992880e-01   1.6313928e+00   3.1093967e-01   5.0876385e-01   2.8653046e-01   6.5622658e-01   1.3398293e+00   2.2670334e-01   2.2472150e+00   8.9528108e-01   2.1908074e+00   1.1815770e+00   1.7549185e+00   2.8271782e+00   1.2987661e+00   2.2927322e+00   1.7056353e+00   3.1591627e+00   1.6557083e+00   1.2615426e+00   1.8432274e+00   1.1833606e+00   1.4858600e+00   1.8676774e+00   1.3771658e+00   3.7547288e+00   3.1005581e+00   1.4815836e+00   2.2650937e+00   9.5252488e-01   2.8687133e+00   1.0290036e+00   2.0855599e+00   2.2987772e+00   9.0705646e-01   9.6267538e-01   1.4839640e+00   2.0473137e+00   2.3780534e+00   3.7918985e+00   1.5792523e+00   8.4221906e-01   9.2300698e-01   2.9301148e+00   2.2149712e+00   1.3941954e+00   8.9564079e-01   1.9843979e+00   2.0984088e+00   2.1003345e+00   8.9528108e-01   2.2276119e+00   2.3923111e+00   1.8829565e+00   1.3046645e+00   1.4645285e+00   2.0631582e+00   9.0331700e-01   2.9007820e+00   1.0677270e+00   1.9884030e+00   3.5404087e+00   8.9973730e-01   2.7097598e+00   9.8896933e-01   1.4521880e+00   7.3735391e-01   1.1060455e+00   1.7358574e+00   2.5457091e+00   2.1802781e+00   5.9868400e-01   1.3038475e+00   1.8531597e+00   1.2895008e+00   1.0351584e+00   8.4116354e-01   1.3290015e+00   8.7070822e-01   1.0061402e+00   2.0523180e+00   2.4354079e+00   2.4861842e+00   1.6612475e+00   1.4482752e+00   1.3000067e+00   4.4417668e-01   6.8064066e-01   2.3457352e+00   1.2097457e+00   2.1562626e+00   1.9604379e+00   7.8034610e-01   1.8447318e+00   3.0052273e+00   1.6924215e+00   1.1656549e+00   1.2692102e+00   1.0351584e+00   2.6195047e+00   1.4577178e+00   1.3905452e+00   1.5765058e+00   1.5178511e+00   1.0862363e+00   1.2425116e+00   2.1288976e+00   2.5085097e+00   1.7889699e+00   2.0235792e+00   1.9184220e+00   6.6154242e-01   1.4831058e+00   1.2157849e+00   2.0573833e+00   1.6866006e+00   1.0122929e+00   9.0072498e-01   2.4680266e+00   2.8037035e+00   2.5716465e+00   1.3193736e+00   1.5270661e+00   2.3974481e+00   1.4129334e+00   9.9186850e-01   1.3586792e+00   1.1900564e+00   7.8649633e-01   1.4261046e+00   1.4313173e+00   1.9693923e+00   2.5032449e+00   1.4908532e+00   1.1825071e+00   1.7301809e+00   2.1927243e+00   1.1883807e+00   7.0823896e-01   8.2574748e-01   1.1508346e+00   1.3435624e+00   1.2769092e+00   1.5765058e+00   1.3121204e+00   1.3947465e+00   1.2781560e+00   1.8941016e+00   9.4449485e-01   1.0327124e+00   9.1221028e-01   2.4983699e+00   1.0001615e+00   9.3727156e-01   2.0156046e+00   1.4610933e+00   2.0818555e+00   1.4925824e+00   2.8275182e+00   1.8765007e+00   1.3658611e+00   1.8892371e+00   9.4900087e-01   2.5853758e+00   1.8063869e+00   2.0403844e+00   1.9103325e+00   2.2554051e+00   2.6063293e+00   2.6670659e+00   2.8999367e+00   1.9965452e+00   1.0765554e+00   7.8898008e-01   7.5921691e-01   1.3580560e+00   1.9753995e+00   1.7807988e+00   2.8573306e+00   2.8966014e+00   1.8587118e+00   1.7290357e+00   9.4346743e-01   1.0932187e+00   2.1982921e+00   1.2728402e+00   2.6033464e-01   1.2680818e+00   1.7824360e+00   1.6364088e+00   2.0664368e+00   3.9699460e-01   1.4644159e+00   3.6567369e+00   2.0227452e+00   3.6362580e+00   2.6431572e+00   3.1825084e+00   4.2569960e+00   1.1649315e+00   3.7040279e+00   2.8079889e+00   4.6643094e+00   3.1456885e+00   2.5368652e+00   3.2881355e+00   1.9057424e+00   2.5373943e+00   3.2972877e+00   2.8812938e+00   5.2957248e+00   4.3256332e+00   1.8261865e+00   3.7402681e+00   2.0260681e+00   4.2089146e+00   2.2931022e+00   3.6001832e+00   3.8156954e+00   2.2665638e+00   2.4364115e+00   2.8123267e+00   3.5007685e+00   3.7249152e+00   5.3249013e+00   2.8816817e+00   2.2758405e+00   2.0704519e+00   4.3322711e+00   3.6389537e+00   2.9225385e+00   2.3454418e+00   3.4545556e+00   3.5207953e+00   3.5188476e+00   2.0227452e+00   3.7070275e+00   3.8401809e+00   3.2712836e+00   2.2870689e+00   2.9197390e+00   3.4787880e+00   2.3479440e+00   1.8015956e+00   2.9300280e+00   9.4226820e-01   1.8443182e+00   6.2046469e-01   1.3340137e+00   5.0731024e-01   1.2583559e+00   1.1751408e+00   1.7063292e+00   1.5923247e+00   1.2788332e+00   7.3035754e-01   1.0391769e+00   6.6194168e-01   2.9537172e-01   2.8845946e-01   3.7622328e-01   6.2760421e-01   7.7259801e-01   1.4843174e+00   1.8353890e+00   1.8732616e+00   1.1492120e+00   9.8621003e-01   1.4916922e+00   1.4186317e+00   5.4702555e-01   1.4349060e+00   1.2616939e+00   1.6526705e+00   1.5077694e+00   6.5951091e-01   1.2429329e+00   2.5190636e+00   1.3124532e+00   1.1415080e+00   1.1102613e+00   5.1210327e-01   2.2412909e+00   1.1466385e+00   2.0209769e+00   1.3581397e+00   1.4351001e+00   9.3899770e-01   1.3860326e+00   1.9736520e+00   2.3116417e+00   1.4395013e+00   1.3256797e+00   2.5152621e+00   1.1895067e+00   1.0230389e+00   1.2126763e+00   1.7102781e+00   1.7732497e+00   1.5528794e+00   8.7017583e-01   2.9799960e+00   2.3789847e+00   1.8031686e+00   1.6479163e+00   1.5433090e+00   2.0188390e+00   8.9564079e-01   1.5340057e+00   1.4361609e+00   8.5359653e-01   9.3591761e-01   1.2375842e+00   1.0932909e+00   1.5255827e+00   2.9419617e+00   1.3503714e+00   5.7743200e-01   1.0870568e+00   2.0633836e+00   1.9646340e+00   9.8006549e-01   1.0101003e+00   1.2839264e+00   1.6205305e+00   1.4633215e+00   1.3581397e+00   1.6723912e+00   1.9304834e+00   1.3785508e+00   1.2852279e+00   1.0122929e+00   1.8669942e+00   1.1301977e+00   1.7293858e+00   1.1132823e+00   1.5940674e+00   1.2647627e+00   7.0155617e-01   2.0524860e+00   9.1916314e-01   9.0143387e-01   1.7090768e+00   7.7500385e-01   1.6060095e+00   1.1202045e+00   1.5217697e+00   1.2283051e+00   1.5431751e+00   1.8486311e+00   2.0116712e+00   2.0744305e+00   1.1308980e+00   8.6249502e-01   8.7618973e-01   9.4738284e-01   7.7051641e-01   1.2102028e+00   8.1844705e-01   1.9297683e+00   2.0868978e+00   1.6471661e+00   8.6143605e-01   6.0365341e-01   5.7743200e-01   1.3481077e+00   8.0628657e-01   1.1400599e+00   5.2860161e-01   9.6999820e-01   7.8957903e-01   1.3189781e+00   8.0128554e-01   6.6918102e-01   2.6783589e+00   1.1902996e+00   2.8052881e+00   1.7833755e+00   2.2807524e+00   3.4730319e+00   7.8369762e-01   2.9612706e+00   2.2200035e+00   3.7113566e+00   2.2079590e+00   1.7773274e+00   2.4240040e+00   1.2586273e+00   1.6606465e+00   2.3345591e+00   2.0100747e+00   4.3781379e+00   3.6673897e+00   1.6336953e+00   2.8241758e+00   1.1071867e+00   3.5077760e+00   1.5364148e+00   2.6605007e+00   2.9756588e+00   1.4305490e+00   1.5019762e+00   1.9806289e+00   2.7459951e+00   3.0159022e+00   4.4377151e+00   2.0446123e+00   1.5157660e+00   1.4706419e+00   3.5410473e+00   2.6488235e+00   2.0119986e+00   1.3953413e+00   2.5748429e+00   2.6034011e+00   2.6304123e+00   1.1902996e+00   2.7818749e+00   2.8834870e+00   2.3861430e+00   1.6719199e+00   2.0259174e+00   2.4855987e+00   1.3894554e+00   2.6621352e+00   1.3234208e+00   2.6096596e+00   2.2340939e+00   3.3444949e+00   2.5679785e+00   1.9118907e+00   1.7502251e+00   1.3868450e+00   3.2376571e+00   2.3245757e+00   2.2030084e+00   2.3874774e+00   2.7435279e+00   3.0963501e+00   2.9911365e+00   3.3313369e+00   2.5528922e+00   1.6215602e+00   1.1232628e+00   1.1083720e+00   1.9130507e+00   2.3463017e+00   2.5105454e+00   3.5809242e+00   3.3974315e+00   1.8325077e+00   2.4726944e+00   1.3889514e+00   1.6150266e+00   2.7833542e+00   1.7312416e+00   7.0111465e-01   1.8556044e+00   2.5019422e+00   2.3097506e+00   2.6016512e+00   1.2007565e+00   2.0949830e+00   4.1622891e+00   2.3984916e+00   3.9595754e+00   3.0477055e+00   3.5738045e+00   4.5102220e+00   1.5833139e+00   3.9547990e+00   2.8883510e+00   5.1681640e+00   3.6715203e+00   2.8100260e+00   3.6615020e+00   2.1175666e+00   2.9197870e+00   3.8026677e+00   3.3142297e+00   5.7988752e+00   4.3773719e+00   1.6802144e+00   4.1689985e+00   2.5051488e+00   4.3637125e+00   2.6075737e+00   4.1031841e+00   4.2218983e+00   2.6731954e+00   2.9685228e+00   3.1235748e+00   3.8333962e+00   3.9200272e+00   5.8238336e+00   3.1861618e+00   2.6684075e+00   2.3174914e+00   4.5851647e+00   4.2037872e+00   3.4173362e+00   2.9015754e+00   3.8649606e+00   3.9284352e+00   3.9274419e+00   2.3984916e+00   4.1396215e+00   4.3152972e+00   3.6556493e+00   2.4306199e+00   3.3534589e+00   4.0726739e+00   2.9019830e+00   1.9649078e+00   4.5716421e-01   6.0725725e-01   1.0082512e+00   4.0020411e-01   9.6216255e-01   1.8879475e+00   1.3283978e+00   6.9493020e-01   5.9382214e-01   1.3116762e+00   7.1128716e-01   6.9976890e-01   8.6291569e-01   1.2356595e+00   1.0989171e+00   3.1093967e-01   1.2231205e+00   1.5729927e+00   1.6302590e+00   8.2263932e-01   8.7786730e-01   6.2729876e-01   9.5483435e-01   1.0328871e+00   1.7091506e+00   4.5071694e-01   1.2824303e+00   1.1188225e+00   3.5622944e-01   1.0163696e+00   2.1159028e+00   8.2380019e-01   4.6137216e-01   4.2450569e-01   5.0629646e-01   1.7321630e+00   5.8565201e-01   1.8627348e+00   1.0140018e+00   1.9095789e+00   1.0347180e+00   1.4794093e+00   2.5851550e+00   1.6987423e+00   2.1172382e+00   1.7999889e+00   2.6937126e+00   1.1946586e+00   1.2274756e+00   1.5304430e+00   1.4222936e+00   1.3705079e+00   1.4369760e+00   1.1056213e+00   3.3133435e+00   3.0027854e+00   1.9037709e+00   1.8688892e+00   9.6470639e-01   2.7156484e+00   1.0119180e+00   1.6591942e+00   1.9679139e+00   7.8369762e-01   6.0848963e-01   1.3509456e+00   1.8177289e+00   2.2200035e+00   3.3498688e+00   1.4311753e+00   8.4040822e-01   1.2474502e+00   2.6426508e+00   1.7620244e+00   1.0560148e+00   5.3362004e-01   1.6100417e+00   1.7340403e+00   1.6942922e+00   1.0140018e+00   1.8496575e+00   1.9626001e+00   1.5340961e+00   1.4294738e+00   1.1293709e+00   1.5949054e+00   6.4398240e-01   1.7472907e+00   1.7451622e+00   2.3128200e+00   2.0364367e+00   1.1795364e+00   7.5358247e-01   8.5582452e-01   2.5764453e+00   1.4435947e+00   1.1399118e+00   1.4681660e+00   1.7387082e+00   2.0628406e+00   1.8244206e+00   2.2981140e+00   1.7651851e+00   1.0308265e+00   7.7934221e-01   7.7863029e-01   1.2082987e+00   1.5285763e+00   2.1068341e+00   2.8909913e+00   2.3684734e+00   6.2479428e-01   1.9481438e+00   9.9891776e-01   1.1557309e+00   1.9516972e+00   9.8896933e-01   1.2918836e+00   1.3154759e+00   1.9018319e+00   1.7043940e+00   1.6876317e+00   1.4136421e+00   1.4845363e+00   3.4227731e+00   1.7797546e+00   2.8993041e+00   2.1584174e+00   2.6985144e+00   3.3744038e+00   1.7790388e+00   2.8051892e+00   1.8256311e+00   4.2196161e+00   2.7909300e+00   1.8699505e+00   2.6716730e+00   1.6273208e+00   2.3931485e+00   2.9994905e+00   2.3643994e+00   4.7587356e+00   3.2663266e+00   8.6629251e-01   3.2140857e+00   2.0311002e+00   3.1918979e+00   1.6793067e+00   3.1869276e+00   3.1309476e+00   1.8104252e+00   2.1858235e+00   2.2467893e+00   2.6763967e+00   2.7532877e+00   4.7380019e+00   2.3330174e+00   1.6923429e+00   1.4009491e+00   3.4550204e+00   3.4609647e+00   2.5225714e+00   2.1699387e+00   2.8630132e+00   3.0349029e+00   2.9631214e+00   1.7797546e+00   3.2114945e+00   3.4557456e+00   2.7355167e+00   1.5237929e+00   2.4389172e+00   3.3539591e+00   2.2150193e+00   8.7774396e-01   8.7560645e-01   6.6918102e-01   8.5695467e-01   1.6281675e+00   1.2552875e+00   9.0276183e-01   4.7723749e-01   9.6922609e-01   3.4909881e-01   4.4701039e-01   6.6835176e-01   8.7807032e-01   8.7272262e-01   2.1119253e-01   1.2038778e+00   1.5064820e+00   1.5654738e+00   7.8630314e-01   5.8942278e-01   8.9320425e-01   1.1940983e+00   8.6887698e-01   1.4210091e+00   7.4201890e-01   1.2452417e+00   1.0494370e+00   2.3749211e-01   9.1435339e-01   2.1409786e+00   8.2148003e-01   6.5993495e-01   5.7516438e-01   2.8835410e-01   1.8418099e+00   6.4756318e-01   1.8787743e+00   9.0810653e-01   1.6779282e+00   7.7021931e-01   1.3319441e+00   2.3098596e+00   1.7659238e+00   1.7880416e+00   1.4280932e+00   2.6604707e+00   1.1753998e+00   9.4281519e-01   1.3471074e+00   1.3158313e+00   1.3974368e+00   1.4547739e+00   8.7568652e-01   3.2288696e+00   2.6753697e+00   1.6330922e+00   1.7674989e+00   1.0240850e+00   2.3852911e+00   7.4743804e-01   1.5932991e+00   1.7495490e+00   5.8796666e-01   5.8374436e-01   1.1339991e+00   1.5083690e+00   1.8912106e+00   3.2526896e+00   1.2423778e+00   4.2436984e-01   8.5959137e-01   2.4097678e+00   1.8344669e+00   9.0791603e-01   5.8796666e-01   1.4645285e+00   1.6477112e+00   1.6088132e+00   9.0810653e-01   1.7454599e+00   1.9428935e+00   1.4315280e+00   1.1694177e+00   9.9244707e-01   1.7007353e+00   6.6154242e-01   1.4832888e+00   6.1810529e-01   7.1128716e-01   1.8601631e+00   9.7397874e-01   1.2254650e+00   6.8496652e-01   1.4755282e+00   9.0753778e-01   1.0443931e+00   1.3169341e+00   1.6226440e+00   1.6498870e+00   7.4980278e-01   8.0686941e-01   1.1940983e+00   1.2255953e+00   5.6318359e-01   1.1503759e+00   6.6361830e-01   1.4063472e+00   1.5603679e+00   1.6837563e+00   3.6536845e-01   9.5761359e-01   8.4619410e-01   8.6958016e-01   7.7821113e-01   1.6196626e+00   5.7306091e-01   4.4786319e-01   3.6086172e-01   8.2608188e-01   1.1831978e+00   3.8480889e-01   2.4265852e+00   1.2696247e+00   2.4764170e+00   1.5584328e+00   2.0444851e+00   3.1426915e+00   1.4493286e+00   2.6430316e+00   2.1446703e+00   3.2893516e+00   1.7955663e+00   1.6408995e+00   2.1004081e+00   1.5285733e+00   1.7064050e+00   2.0142932e+00   1.6802708e+00   3.9009617e+00   3.4821230e+00   1.8809382e+00   2.4651410e+00   1.1989033e+00   3.2268928e+00   1.3782117e+00   2.2651964e+00   2.5478630e+00   1.2124370e+00   1.1789342e+00   1.8358339e+00   2.3443222e+00   2.7210373e+00   3.9221023e+00   1.9136809e+00   1.2486828e+00   1.4646971e+00   3.1951870e+00   2.3252489e+00   1.6537705e+00   1.0855082e+00   2.1947734e+00   2.3108044e+00   2.2621105e+00   1.2696247e+00   2.4484679e+00   2.5504328e+00   2.0873736e+00   1.6773040e+00   1.7023842e+00   2.1476736e+00   1.1560388e+00   1.3558057e+00   1.5283845e+00   2.1664244e+00   1.9784646e+00   1.1457159e+00   1.0355160e+00   1.4985549e+00   1.0494370e+00   6.0365341e-01   2.6033464e-01   7.3803207e-01   5.6864482e-01   9.7352372e-01   1.8229204e+00   2.2308199e+00   2.2668270e+00   1.4769275e+00   1.3385535e+00   1.5931825e+00   1.1248155e+00   2.1466080e-01   1.9117251e+00   1.3652496e+00   2.0207624e+00   1.8704283e+00   7.6880092e-01   1.6220723e+00   2.8778954e+00   1.6265611e+00   1.2621791e+00   1.3042843e+00   7.7313507e-01   2.5362196e+00   1.4082507e+00   1.8291996e+00   1.6183246e+00   1.3603639e+00   1.0878281e+00   1.3564590e+00   1.9053825e+00   2.6072470e+00   1.4737985e+00   1.6790332e+00   2.1679999e+00   9.4182667e-01   1.2929545e+00   1.1391970e+00   2.0265696e+00   1.8799960e+00   1.3547660e+00   8.8029208e-01   2.6196958e+00   2.4872661e+00   2.2706454e+00   1.4300844e+00   1.7151590e+00   2.0626281e+00   1.1997534e+00   1.2637010e+00   1.2307777e+00   1.0813975e+00   9.6603754e-01   1.3834169e+00   1.0564307e+00   1.5971466e+00   2.5708690e+00   1.4735640e+00   9.4160439e-01   1.5222760e+00   1.9572025e+00   1.7004687e+00   8.9142733e-01   1.0459007e+00   1.1049324e+00   1.4763267e+00   1.2674750e+00   1.6183246e+00   1.4769125e+00   1.6832034e+00   1.2843405e+00   1.6410601e+00   9.6706760e-01   1.5985259e+00   1.1910776e+00   1.0088064e+00   1.9822205e+00   1.3115314e+00   7.2626021e-01   8.5225534e-01   1.4476734e+00   8.6297946e-01   1.0334797e+00   1.2160426e+00   1.5358725e+00   1.3833366e+00   5.3528567e-01   1.2712561e+00   1.5367336e+00   1.6013312e+00   8.9845135e-01   9.1916314e-01   2.4152660e-01   1.0495503e+00   1.3530247e+00   1.8419620e+00   3.4651700e-01   1.2220171e+00   1.0116179e+00   6.2046469e-01   1.0696792e+00   2.0057768e+00   7.5914566e-01   4.4499696e-01   4.0664863e-01   8.1224041e-01   1.6406723e+00   5.8942278e-01   1.9060542e+00   9.6301827e-01   2.1281559e+00   1.1449868e+00   1.6017068e+00   2.8050285e+00   1.4536311e+00   2.3373419e+00   1.9472489e+00   2.8613983e+00   1.3924555e+00   1.3756347e+00   1.7433862e+00   1.3615778e+00   1.3332278e+00   1.5654312e+00   1.2872568e+00   3.4973752e+00   3.1986815e+00   1.9281666e+00   2.0588451e+00   8.3270853e-01   2.9373687e+00   1.1828955e+00   1.8231885e+00   2.1962402e+00   9.5979989e-01   7.5532639e-01   1.4672797e+00   2.0720690e+00   2.4566102e+00   3.5648048e+00   1.5414664e+00   1.0212756e+00   1.2733735e+00   2.8900916e+00   1.8289569e+00   1.2092544e+00   6.4558417e-01   1.8428644e+00   1.8966444e+00   1.9319316e+00   9.6301827e-01   2.0102936e+00   2.1030669e+00   1.7380754e+00   1.5489952e+00   1.3296349e+00   1.6538197e+00   6.3357758e-01   1.4295948e+00   5.4873947e-01   1.6126413e+00   5.8496636e-01   1.1009910e+00   6.0725725e-01   9.5139638e-01   1.3098483e+00   1.3941599e+00   1.6617787e+00   8.6702860e-01   4.2826573e-01   8.0996690e-01   8.1324137e-01   2.8553149e-01   9.9883272e-01   1.0921061e+00   1.8259719e+00   1.6053711e+00   1.1828265e+00   8.3161134e-01   7.0834786e-01   5.3106808e-01   9.8233874e-01   3.5366952e-01   1.4106682e+00   4.6484021e-01   7.5179033e-01   6.2055338e-01   7.8324937e-01   1.1546456e+00   4.7149050e-01   2.7022699e+00   1.3084256e+00   2.4620452e+00   1.5488588e+00   2.1433843e+00   3.0477876e+00   1.5122060e+00   2.4794486e+00   1.8496575e+00   3.5092631e+00   2.0205369e+00   1.5421829e+00   2.1550478e+00   1.4847626e+00   1.9338323e+00   2.2845215e+00   1.7093196e+00   4.0428524e+00   3.2768847e+00   1.4413624e+00   2.6112105e+00   1.4262166e+00   3.0341947e+00   1.2919157e+00   2.4486747e+00   2.5390232e+00   1.2420951e+00   1.3836252e+00   1.8380693e+00   2.2098781e+00   2.5420974e+00   4.0353061e+00   1.9425259e+00   1.0808550e+00   1.0871507e+00   3.1511951e+00   2.6568698e+00   1.7619640e+00   1.3391481e+00   2.2882514e+00   2.4739376e+00   2.4163220e+00   1.3084256e+00   2.5943375e+00   2.7895659e+00   2.2249457e+00   1.4927500e+00   1.8163092e+00   2.5068377e+00   1.3832520e+00   1.1807138e+00   2.3735475e+00   1.4416726e+00   7.3803207e-01   1.4480380e+00   1.6571634e+00   1.9125650e+00   1.5766889e+00   1.9793333e+00   1.6323793e+00   1.4021778e+00   1.1659675e+00   1.2498767e+00   1.3539814e+00   1.2332482e+00   2.0826771e+00   2.7811716e+00   2.1646850e+00   3.7371902e-01   2.0122804e+00   1.1585774e+00   1.3127802e+00   1.8544941e+00   1.1485726e+00   1.7450075e+00   1.3972701e+00   1.9880179e+00   1.7517164e+00   1.6394680e+00   1.8002228e+00   1.5490387e+00   2.9816674e+00   1.3976606e+00   2.4151949e+00   1.7787946e+00   2.2180206e+00   2.8804995e+00   1.7355261e+00   2.3592859e+00   1.2412454e+00   3.7977608e+00   2.4485045e+00   1.3668906e+00   2.2064746e+00   1.1631247e+00   1.9116990e+00   2.5849220e+00   2.0027932e+00   4.3925033e+00   2.6611027e+00   3.7234239e-01   2.7559143e+00   1.7089501e+00   2.6795765e+00   1.2450968e+00   2.8073641e+00   2.7667084e+00   1.4485479e+00   1.9038618e+00   1.7262603e+00   2.3286441e+00   2.2609611e+00   4.4068441e+00   1.7897439e+00   1.4300608e+00   1.1275945e+00   2.9337206e+00   3.0746426e+00   2.2005676e+00   1.9094387e+00   2.4292641e+00   2.5401664e+00   2.4975057e+00   1.3976606e+00   2.7518188e+00   2.9966927e+00   2.2416615e+00   9.2104245e-01   2.0302905e+00   3.0030765e+00   1.9507955e+00   1.9593598e+00   9.5666050e-01   1.1447875e+00   1.0324994e+00   1.3800294e+00   1.7386688e+00   1.7301705e+00   2.0251376e+00   1.2143895e+00   3.6924035e-01   2.6643250e-01   3.1271814e-01   5.3690447e-01   1.1566759e+00   1.3335853e+00   2.2553589e+00   2.0395552e+00   1.0374728e+00   1.1879760e+00   2.9406726e-01   4.0650681e-01   1.4164313e+00   3.6319073e-01   9.3305124e-01   5.5709100e-01   1.1791615e+00   9.8143688e-01   1.2231662e+00   7.9042934e-01   7.5817225e-01   2.9833953e+00   1.3537061e+00   2.7591591e+00   1.8262769e+00   2.3975382e+00   3.3499130e+00   1.2034779e+00   2.7851895e+00   1.9405021e+00   3.8845156e+00   2.3750632e+00   1.6954582e+00   2.4431790e+00   1.3394090e+00   1.9751740e+00   2.5771567e+00   2.0432035e+00   4.4739802e+00   3.4420978e+00   1.1727925e+00   2.9298786e+00   1.4800982e+00   3.2892623e+00   1.4456510e+00   2.8154123e+00   2.9321762e+00   1.4509441e+00   1.6902348e+00   2.0142932e+00   2.5791547e+00   2.8031074e+00   4.4835636e+00   2.1018908e+00   1.3894554e+00   1.2250001e+00   3.4334168e+00   2.9754074e+00   2.1241116e+00   1.6323629e+00   2.6113665e+00   2.7403495e+00   2.7045382e+00   1.3537061e+00   2.9092469e+00   3.0943267e+00   2.4717839e+00   1.4839640e+00   2.1082363e+00   2.8334846e+00   1.6627952e+00   1.2426609e+00   1.7313027e+00   1.2372185e+00   1.1631247e+00   1.1195889e+00   1.5188976e+00   1.0845006e+00   8.2263932e-01   1.9012930e+00   2.1909047e+00   2.2638611e+00   1.4908532e+00   1.2008045e+00   8.7223523e-01   5.7002996e-01   1.0697164e+00   2.2410714e+00   9.6470639e-01   1.8639992e+00   1.6786018e+00   7.4743804e-01   1.6592561e+00   2.7039438e+00   1.4162972e+00   1.0024224e+00   1.0401603e+00   1.0580730e+00   2.3284654e+00   1.2231205e+00   1.2611129e+00   1.1791615e+00   1.6899776e+00   9.5793067e-01   1.1548640e+00   2.3712314e+00   2.0275068e+00   2.0149111e+00   1.9649183e+00   2.1679212e+00   7.8905316e-01   1.3385913e+00   1.3061285e+00   1.6571634e+00   1.2299006e+00   9.3495766e-01   9.4613565e-01   2.8415314e+00   2.9130399e+00   2.3454203e+00   1.4655406e+00   1.0267382e+00   2.6085350e+00   1.2515236e+00   1.1837505e+00   1.7109084e+00   9.9111027e-01   5.2374483e-01   1.2552875e+00   1.7513749e+00   2.1661990e+00   2.9392776e+00   1.3022811e+00   1.1259771e+00   1.5663601e+00   2.4310503e+00   1.1268523e+00   7.5840628e-01   4.7680727e-01   1.3348163e+00   1.3454539e+00   1.4034689e+00   1.1791615e+00   1.4139320e+00   1.4450127e+00   1.2754470e+00   1.6940148e+00   9.2620264e-01   9.4281519e-01   4.9160020e-01   9.3054395e-01   4.1781009e-01   4.6190224e-01   8.0369154e-01   9.6816967e-01   1.1548640e+00   4.6557224e-01   8.2518769e-01   1.2073068e+00   1.2487994e+00   4.5257749e-01   7.8164792e-01   1.0374728e+00   1.4711456e+00   1.1089653e+00   1.1997868e+00   7.6195008e-01   1.0018628e+00   8.9796526e-01   5.8785093e-01   6.0098693e-01   1.8456219e+00   6.5622658e-01   6.9001472e-01   5.4715569e-01   3.1093967e-01   1.5254459e+00   4.8636669e-01   2.2683520e+00   1.0914354e+00   1.9823217e+00   1.1675117e+00   1.6963493e+00   2.6008457e+00   1.7128336e+00   2.0692339e+00   1.5728043e+00   3.0096252e+00   1.5213092e+00   1.1599200e+00   1.6580031e+00   1.3691582e+00   1.6116709e+00   1.8005954e+00   1.2641532e+00   3.5755981e+00   2.8921647e+00   1.5229350e+00   2.1046875e+00   1.2108278e+00   2.6299105e+00   8.9496218e-01   1.9702691e+00   2.0808148e+00   8.0585922e-01   9.4983854e-01   1.4326334e+00   1.7811974e+00   2.1211631e+00   3.5687116e+00   1.5311195e+00   7.1811205e-01   1.0257884e+00   2.6607812e+00   2.2075002e+00   1.3273841e+00   9.2853136e-01   1.7721541e+00   1.9757526e+00   1.8755628e+00   1.0914354e+00   2.1076593e+00   2.2924992e+00   1.7080157e+00   1.2150638e+00   1.3228669e+00   2.0678512e+00   1.0435585e+00   8.3930091e-01   1.0246689e+00   1.2483935e+00   9.2934901e-01   1.2786676e+00   1.0167074e+00   1.2794668e+00   1.2845002e+00   1.3705288e+00   1.0262066e+00   6.1158310e-01   1.6007620e+00   2.1232609e+00   1.4700482e+00   6.0145223e-01   1.5227019e+00   1.1234881e+00   1.1051628e+00   1.1975697e+00   9.1241332e-01   1.9821649e+00   1.0739839e+00   1.4719712e+00   1.2657553e+00   1.0246689e+00   1.8799916e+00   1.1304806e+00   2.3473055e+00   9.3001231e-01   1.7895396e+00   1.0784109e+00   1.5778477e+00   2.3110268e+00   1.7258314e+00   1.7588444e+00   8.0501785e-01   3.1299934e+00   1.7625999e+00   7.4394765e-01   1.5582385e+00   9.7850690e-01   1.4989725e+00   1.9419525e+00   1.2877346e+00   3.7053544e+00   2.3011615e+00   7.8305765e-01   2.1061327e+00   1.2737998e+00   2.1925140e+00   6.0604502e-01   2.1121593e+00   2.0810604e+00   8.0686941e-01   1.2420238e+00   1.1264142e+00   1.6698499e+00   1.7264467e+00   3.7248620e+00   1.2214818e+00   7.0111465e-01   5.3487449e-01   2.3978329e+00   2.4200364e+00   1.4831058e+00   1.2723027e+00   1.7715216e+00   1.9225896e+00   1.8845666e+00   9.3001231e-01   2.0954738e+00   2.3579846e+00   1.6403910e+00   5.2719130e-01   1.3587682e+00   2.3456726e+00   1.3154759e+00   5.0299964e-01   8.2155022e-01   8.8684653e-01   1.0935878e+00   4.8492463e-01   9.8860056e-01   1.2858521e+00   1.3288928e+00   6.2451737e-01   6.2760421e-01   1.0463002e+00   1.4889605e+00   1.0762241e+00   1.1975697e+00   8.4271175e-01   1.0854927e+00   8.7560645e-01   5.3352899e-01   7.0810362e-01   1.9474744e+00   7.1781501e-01   7.2553812e-01   6.1968090e-01   3.6924035e-01   1.6978139e+00   6.0510141e-01   2.1983316e+00   1.0378652e+00   1.8773521e+00   9.9490014e-01   1.5974238e+00   2.4585483e+00   1.7380659e+00   1.8957007e+00   1.4179266e+00   2.9495957e+00   1.4948761e+00   1.0683666e+00   1.5886178e+00   1.3564059e+00   1.6294524e+00   1.7819921e+00   1.1268523e+00   3.4719349e+00   2.7528980e+00   1.4535731e+00   2.0452826e+00   1.2150379e+00   2.4732935e+00   8.6167901e-01   1.8885847e+00   1.9433611e+00   7.9744128e-01   9.1854596e-01   1.3349909e+00   1.6250498e+00   1.9838258e+00   3.4743868e+00   1.4520430e+00   5.1406096e-01   7.3595190e-01   2.5794085e+00   2.1692945e+00   1.1973560e+00   9.2123504e-01   1.7205327e+00   1.9329718e+00   1.8817619e+00   1.0378652e+00   2.0262673e+00   2.2533761e+00   1.7016580e+00   1.1862896e+00   1.2748646e+00   2.0406838e+00   9.6984925e-01   3.6319073e-01   6.1968090e-01   7.8521221e-01   5.6113157e-01   1.2463647e+00   1.6309592e+00   1.6676963e+00   8.9796526e-01   8.9789119e-01   1.2621791e+00   1.3154759e+00   6.8124108e-01   1.3901973e+00   9.9970024e-01   1.4357022e+00   1.2962951e+00   4.8012872e-01   1.0246015e+00   2.2910733e+00   1.0720705e+00   8.8779355e-01   8.4724089e-01   2.4152660e-01   1.9817257e+00   8.8354057e-01   2.0655284e+00   1.2495886e+00   1.6398109e+00   9.9332012e-01   1.4769125e+00   2.2251642e+00   2.1023706e+00   1.7015856e+00   1.4609179e+00   2.6557282e+00   1.2410479e+00   1.0733560e+00   1.3593949e+00   1.6013655e+00   1.6915504e+00   1.5864800e+00   9.7957718e-01   3.1644964e+00   2.6137665e+00   1.7509262e+00   1.7860234e+00   1.3941000e+00   2.2824049e+00   8.7876634e-01   1.6464371e+00   1.6642217e+00   7.8808432e-01   8.5400786e-01   1.3018901e+00   1.3652161e+00   1.7805677e+00   3.1380968e+00   1.4095411e+00   5.8483448e-01   1.0816610e+00   2.2968622e+00   1.9911692e+00   1.0509799e+00   8.9223438e-01   1.4369760e+00   1.7217513e+00   1.5811148e+00   1.2495886e+00   1.8031513e+00   2.0209769e+00   1.4702446e+00   1.2810704e+00   1.0813343e+00   1.8691588e+00   1.0259679e+00   5.6788283e-01   5.3362004e-01   7.7368489e-01   1.6022591e+00   1.9873741e+00   2.0277089e+00   1.2494231e+00   1.1089653e+00   1.4561750e+00   1.2033093e+00   3.3742167e-01   1.6597443e+00   1.2265630e+00   1.7784712e+00   1.6384023e+00   6.1436388e-01   1.3800294e+00   2.6463489e+00   1.4025367e+00   1.1237500e+00   1.1244975e+00   5.5399712e-01   2.3227610e+00   1.2000527e+00   1.8734557e+00   1.4137602e+00   1.3887598e+00   9.6005858e-01   1.3202421e+00   1.9632584e+00   2.3879303e+00   1.4839475e+00   1.4995474e+00   2.3336107e+00   1.0014276e+00   1.1074656e+00   1.1350466e+00   1.8013325e+00   1.7379612e+00   1.3863777e+00   8.2339084e-01   2.8225734e+00   2.4523537e+00   2.0154418e+00   1.5091823e+00   1.5393373e+00   2.0723760e+00   9.8233874e-01   1.3702101e+00   1.3545502e+00   8.7875749e-01   8.4830222e-01   1.2549787e+00   1.1060185e+00   1.5823028e+00   2.7877979e+00   1.3537061e+00   7.2012544e-01   1.2954496e+00   2.0208193e+00   1.7781563e+00   8.8029208e-01   9.2256644e-01   1.1605968e+00   1.4991046e+00   1.3162835e+00   1.4137602e+00   1.5442127e+00   1.7645705e+00   1.2692218e+00   1.4162972e+00   9.1557526e-01   1.6722331e+00   1.0708496e+00   6.2826980e-01   1.0161846e+00   1.6718164e+00   1.9471640e+00   1.9947662e+00   1.3566251e+00   1.0412209e+00   1.7655766e+00   1.7163342e+00   7.1671402e-01   1.3277490e+00   1.5771462e+00   1.7793591e+00   1.6725709e+00   9.6964683e-01   1.3947746e+00   2.6556269e+00   1.5119726e+00   1.4702773e+00   1.3966512e+00   8.2199752e-01   2.4266623e+00   1.3925524e+00   2.0577807e+00   1.4034689e+00   1.2545960e+00   9.4767129e-01   1.3281960e+00   1.7401681e+00   2.4345214e+00   1.1896374e+00   1.0436620e+00   2.5015865e+00   1.2764504e+00   8.9223438e-01   1.1006735e+00   1.6953806e+00   1.7900496e+00   1.5985782e+00   8.8098199e-01   2.9595238e+00   2.0497239e+00   1.6965355e+00   1.5863720e+00   1.6496643e+00   1.7201711e+00   8.3409556e-01   1.5639220e+00   1.3496867e+00   8.9427872e-01   1.0978602e+00   1.1314785e+00   9.1432842e-01   1.2235673e+00   2.9196060e+00   1.2400775e+00   6.4083823e-01   1.0643984e+00   1.8241883e+00   2.0498818e+00   1.0696576e+00   1.1919906e+00   1.2042673e+00   1.5543054e+00   1.3831011e+00   1.4034689e+00   1.6218749e+00   1.9188761e+00   1.2920921e+00   1.1337565e+00   1.0067405e+00   1.9865217e+00   1.3029486e+00   9.5748562e-01   1.9681165e+00   2.2573397e+00   2.3235953e+00   1.5741400e+00   1.1016806e+00   1.6166760e+00   1.2896217e+00   3.8830315e-01   1.7972266e+00   1.5164233e+00   2.0064286e+00   1.8697578e+00   8.5494999e-01   1.6681709e+00   2.9309853e+00   1.6503473e+00   1.4467905e+00   1.4113341e+00   9.2189946e-01   2.6393526e+00   1.4852749e+00   1.4601858e+00   1.3160132e+00   8.7649478e-01   6.4756318e-01   8.3256255e-01   1.5094087e+00   2.4769347e+00   1.0668784e+00   1.2459961e+00   1.9408738e+00   6.5485710e-01   8.4116354e-01   6.0795234e-01   1.7154199e+00   1.4961901e+00   9.9551062e-01   3.9472619e-01   2.4940166e+00   2.0216642e+00   2.0463301e+00   1.0230389e+00   1.4612117e+00   1.6595118e+00   8.5582452e-01   9.5437259e-01   9.5694342e-01   7.7934221e-01   7.3851064e-01   8.5695467e-01   7.6638235e-01   1.1767396e+00   2.5076596e+00   9.4281519e-01   7.1971771e-01   1.2830542e+00   1.5700842e+00   1.4287578e+00   5.3095950e-01   8.6291569e-01   6.6154242e-01   1.0050638e+00   8.5606908e-01   1.3160132e+00   1.0514970e+00   1.3171874e+00   7.9433323e-01   1.2774110e+00   4.7509249e-01   1.3730080e+00   9.7659801e-01   1.1663747e+00   1.4582411e+00   1.5261646e+00   7.3570584e-01   5.8785093e-01   7.6039875e-01   1.1605726e+00   9.6964683e-01   1.4553344e+00   6.3765570e-01   1.1681709e+00   1.0005243e+00   2.9691107e-01   8.7856768e-01   2.0651943e+00   7.3735391e-01   6.0653347e-01   4.7837533e-01   3.7398306e-01   1.7406274e+00   5.5183182e-01   1.8498714e+00   8.1329726e-01   1.7508641e+00   8.2125107e-01   1.3423724e+00   2.4127395e+00   1.6384023e+00   1.9130927e+00   1.5043381e+00   2.6917823e+00   1.1782159e+00   9.6249568e-01   1.3877621e+00   1.2214135e+00   1.2719770e+00   1.4200371e+00   9.4526659e-01   3.3044115e+00   2.7645066e+00   1.6390945e+00   1.7948322e+00   8.7588404e-01   2.5003659e+00   7.4157869e-01   1.6267504e+00   1.8590427e+00   5.4310586e-01   5.2316125e-01   1.1372412e+00   1.6472029e+00   2.0021586e+00   3.3409572e+00   1.2314733e+00   5.4779717e-01   9.4822835e-01   2.4877874e+00   1.8001237e+00   9.6012811e-01   4.8644514e-01   1.5074309e+00   1.6452353e+00   1.6151033e+00   8.1329726e-01   1.7721541e+00   1.9352496e+00   1.4226963e+00   1.1564263e+00   1.0022114e+00   1.6574535e+00   5.8132667e-01   5.6318359e-01   5.3286499e-01   4.3341454e-01   1.2747045e+00   1.3163443e+00   2.1153648e+00   1.9183153e+00   1.1909838e+00   1.0657373e+00   5.8852220e-01   6.2225308e-01   1.3220343e+00   4.0443437e-01   1.0982562e+00   6.1619693e-01   1.0378442e+00   8.8917809e-01   1.0970024e+00   8.2199752e-01   6.9493020e-01   3.0003610e+00   1.5102474e+00   2.7635526e+00   1.8759428e+00   2.4405125e+00   3.3586044e+00   1.4659783e+00   2.7980993e+00   2.0759743e+00   3.8255754e+00   2.3211022e+00   1.7886572e+00   2.4450156e+00   1.5788967e+00   2.1011800e+00   2.5635734e+00   2.0416027e+00   4.3880097e+00   3.5282390e+00   1.4609179e+00   2.9105517e+00   1.6043433e+00   3.3245307e+00   1.5187698e+00   2.7743367e+00   2.8814354e+00   1.4896588e+00   1.6771528e+00   2.1027324e+00   2.5396337e+00   2.8265966e+00   4.3745133e+00   2.1946278e+00   1.4104380e+00   1.3873057e+00   3.4289479e+00   2.9514502e+00   2.1043046e+00   1.6198849e+00   2.5820471e+00   2.7513626e+00   2.6746677e+00   1.5102474e+00   2.9036877e+00   3.0793854e+00   2.4777911e+00   1.6406409e+00   2.1046875e+00   2.7982275e+00   1.6824284e+00   1.4276574e-01   7.9394533e-01   1.3473710e+00   1.5367336e+00   2.5040512e+00   2.2893871e+00   1.0820670e+00   1.4237364e+00   3.6704030e-01   5.8785093e-01   1.6733127e+00   6.1158310e-01   7.1781501e-01   7.8318003e-01   1.4288989e+00   1.2280749e+00   1.4809953e+00   7.0150436e-01   1.0034788e+00   3.1877520e+00   1.5005648e+00   2.9634183e+00   2.0359721e+00   2.5953129e+00   3.5470571e+00   1.1634940e+00   2.9839272e+00   2.0686806e+00   4.1156592e+00   2.6069476e+00   1.8659064e+00   2.6504363e+00   1.4017266e+00   2.1040122e+00   2.7893850e+00   2.2677890e+00   4.7183507e+00   3.5819899e+00   1.1465705e+00   3.1455771e+00   1.6263647e+00   3.4643576e+00   1.6254447e+00   3.0471395e+00   3.1646326e+00   1.6517237e+00   1.9156890e+00   2.1886203e+00   2.8006555e+00   2.9856138e+00   4.7315684e+00   2.2694993e+00   1.6130651e+00   1.3903392e+00   3.6256164e+00   3.1929372e+00   2.3573820e+00   1.8552594e+00   2.8291427e+00   2.9408913e+00   2.9120555e+00   1.5005648e+00   3.1237582e+00   3.3065647e+00   2.6677639e+00   1.5962380e+00   2.3224070e+00   3.0542458e+00   1.8794605e+00   8.3156200e-01   1.4460310e+00   1.6013312e+00   2.5509456e+00   2.3359909e+00   1.1395071e+00   1.4612871e+00   4.8644514e-01   6.6244727e-01   1.7247525e+00   6.6453319e-01   6.8496652e-01   8.5330525e-01   1.4567673e+00   1.2739414e+00   1.5213580e+00   6.7904052e-01   1.0560797e+00   3.2869799e+00   1.6218234e+00   3.0463976e+00   2.1264009e+00   2.6948572e+00   3.6228821e+00   1.2747978e+00   3.0537173e+00   2.1607143e+00   4.1937512e+00   2.6849827e+00   1.9680122e+00   2.7382122e+00   1.5399252e+00   2.2309601e+00   2.8826191e+00   2.3479440e+00   4.7798805e+00   3.6690959e+00   1.2447718e+00   3.2325186e+00   1.7450415e+00   3.5380106e+00   1.7243835e+00   3.1258440e+00   3.2275370e+00   1.7473394e+00   2.0003230e+00   2.2955340e+00   2.8573132e+00   3.0588804e+00   4.7837445e+00   2.3799967e+00   1.6861899e+00   1.4737985e+00   3.7047689e+00   3.2828775e+00   2.4345890e+00   1.9408738e+00   2.9104325e+00   3.0383020e+00   2.9993404e+00   1.6218234e+00   3.2132904e+00   3.3994957e+00   2.7639400e+00   1.7088499e+00   2.4110070e+00   3.1406474e+00   1.9689207e+00   8.9196595e-01   9.9107019e-01   1.7478610e+00   1.5467508e+00   1.1459117e+00   7.5303835e-01   6.0365341e-01   5.1453676e-01   9.1435339e-01   2.3749211e-01   1.4031123e+00   3.2313211e-01   7.2263841e-01   5.2290002e-01   7.1740234e-01   1.0975976e+00   3.1271814e-01   2.5686027e+00   1.1418735e+00   2.3704417e+00   1.4573208e+00   2.0173981e+00   2.9893605e+00   1.3924555e+00   2.4418079e+00   1.7809408e+00   3.4092863e+00   1.8988911e+00   1.4127713e+00   2.0371950e+00   1.3095379e+00   1.7286430e+00   2.1358640e+00   1.6228818e+00   3.9920035e+00   3.2072460e+00   1.3897098e+00   2.4925270e+00   1.2375842e+00   2.9891693e+00   1.1418958e+00   2.3510928e+00   2.4945648e+00   1.0792736e+00   1.2447083e+00   1.7021399e+00   2.1843621e+00   2.4864921e+00   3.9967418e+00   1.7954132e+00   1.0172824e+00   1.0869385e+00   3.0619758e+00   2.5242226e+00   1.6782397e+00   1.1896845e+00   2.1746675e+00   2.3309032e+00   2.2706367e+00   1.1418735e+00   2.4800313e+00   2.6530340e+00   2.0689155e+00   1.3443777e+00   1.6837594e+00   2.3748600e+00   1.2545769e+00   1.0660846e+00   1.6498377e+00   1.2783641e+00   1.1376397e+00   1.0898613e+00   1.0585628e+00   9.2189946e-01   8.0142393e-01   8.8029208e-01   1.9920533e+00   8.0501785e-01   1.0699859e+00   8.7105035e-01   7.9448303e-01   1.7999592e+00   8.1251986e-01   1.9227723e+00   4.6137216e-01   1.6965186e+00   7.0213871e-01   1.2723284e+00   2.3160615e+00   1.4526546e+00   1.7912701e+00   1.0739839e+00   2.8496420e+00   1.4032361e+00   6.3302304e-01   1.3757605e+00   7.8863556e-01   1.1001816e+00   1.5547583e+00   9.8152003e-01   3.4772360e+00   2.4799120e+00   1.1619556e+00   1.8622587e+00   7.5769247e-01   2.3162319e+00   4.6128322e-01   1.7816685e+00   1.9387331e+00   4.5729032e-01   7.5817225e-01   8.9223438e-01   1.6541379e+00   1.8404767e+00   3.5381276e+00   9.9244707e-01   4.4901474e-01   4.6557224e-01   2.4199101e+00   1.9790803e+00   1.0974789e+00   7.5914566e-01   1.5781281e+00   1.6567524e+00   1.6951864e+00   4.6137216e-01   1.8193470e+00   2.0336808e+00   1.4275349e+00   7.0834786e-01   1.0588854e+00   1.8801322e+00   7.4965096e-01   1.1803522e+00   1.5908164e+00   1.9645622e+00   4.2238505e-01   1.2220171e+00   1.0116179e+00   8.5731385e-01   1.1485726e+00   1.9317000e+00   7.9664122e-01   5.6097460e-01   5.3106808e-01   1.0334797e+00   1.5679493e+00   6.8124108e-01   2.0247774e+00   1.0499569e+00   2.3371797e+00   1.3332950e+00   1.7744903e+00   3.0154970e+00   1.3277924e+00   2.5520970e+00   2.1193865e+00   3.0297355e+00   1.5881698e+00   1.5547948e+00   1.9487821e+00   1.4037678e+00   1.3973195e+00   1.7249900e+00   1.4967902e+00   3.6762761e+00   3.3933664e+00   2.0023741e+00   2.2484516e+00   8.6702860e-01   3.1482546e+00   1.3659877e+00   2.0060004e+00   2.4114658e+00   1.1530661e+00   9.5944179e-01   1.6364369e+00   2.2989490e+00   2.6726954e+00   3.7560452e+00   1.7032717e+00   1.2286922e+00   1.4040978e+00   3.1041910e+00   1.9523740e+00   1.4097206e+00   8.4169735e-01   2.0525205e+00   2.0729884e+00   2.1328505e+00   1.0499569e+00   2.1908074e+00   2.2633850e+00   1.9289706e+00   1.6929462e+00   1.5333884e+00   1.7729821e+00   7.9671887e-01   1.1060455e+00   2.5932658e+00   1.1359179e+00   2.2153658e+00   2.0116430e+00   9.6825676e-01   1.9538525e+00   2.9958431e+00   1.7387082e+00   1.1339874e+00   1.2823616e+00   1.2471855e+00   2.5771468e+00   1.5006766e+00   1.5158960e+00   1.7131342e+00   1.9169015e+00   1.3850497e+00   1.5416258e+00   2.5358032e+00   2.4678381e+00   2.2144600e+00   2.3737220e+00   2.1274158e+00   9.8372339e-01   1.7887913e+00   1.5921275e+00   2.1896791e+00   1.7854129e+00   1.2218858e+00   1.2670969e+00   2.6904561e+00   3.2109839e+00   2.7851183e+00   1.6425353e+00   1.5729927e+00   2.8211634e+00   1.6904600e+00   1.2882518e+00   1.7618849e+00   1.4390193e+00   9.9282597e-01   1.7222401e+00   1.8692144e+00   2.3979391e+00   2.7477433e+00   1.7762263e+00   1.4761145e+00   1.9687854e+00   2.5941073e+00   1.2723200e+00   1.0497372e+00   9.7397874e-01   1.5327853e+00   1.6373339e+00   1.6177026e+00   1.7131342e+00   1.6177237e+00   1.6189835e+00   1.6013655e+00   2.1620604e+00   1.2836487e+00   1.0769609e+00   1.0246689e+00   1.9326437e+00   1.4149714e+00   2.0593667e+00   1.9008579e+00   7.7368489e-01   1.6801694e+00   2.9457974e+00   1.6627284e+00   1.3215146e+00   1.3491191e+00   8.3512263e-01   2.6175043e+00   1.4565053e+00   1.6449760e+00   1.5357227e+00   1.1692720e+00   9.2780124e-01   1.1582405e+00   1.7355630e+00   2.5925902e+00   1.3094338e+00   1.5678176e+00   2.0102053e+00   7.6952423e-01   1.1716038e+00   9.4417605e-01   1.9573929e+00   1.7612938e+00   1.1827746e+00   6.8675486e-01   2.4881500e+00   2.3327432e+00   2.2476505e+00   1.2375842e+00   1.6387331e+00   1.9108109e+00   1.1188225e+00   1.0733560e+00   1.0560148e+00   1.0005243e+00   8.6347207e-01   1.2199459e+00   9.0753778e-01   1.4483156e+00   2.4625089e+00   1.3082342e+00   8.7375509e-01   1.4601811e+00   1.8000065e+00   1.5372053e+00   7.0097130e-01   9.6204815e-01   9.1315231e-01   1.2848917e+00   1.0993651e+00   1.5357227e+00   1.2764003e+00   1.5003224e+00   1.1101208e+00   1.5658291e+00   7.8808432e-01   1.4489917e+00   1.0920053e+00   1.8302572e+00   1.0943114e+00   1.1955102e+00   1.6415483e+00   9.5491981e-01   1.7343175e+00   1.2563834e+00   1.7780218e+00   1.5661153e+00   1.3901973e+00   1.7352481e+00   1.3724737e+00   2.9349297e+00   1.4110819e+00   2.3154474e+00   1.6753059e+00   2.1644875e+00   2.7793057e+00   1.8300579e+00   2.2275691e+00   1.2267563e+00   3.6839155e+00   2.3163493e+00   1.3205795e+00   2.1115079e+00   1.3018219e+00   1.9822480e+00   2.5100153e+00   1.8661672e+00   4.2327578e+00   2.6573893e+00   6.0725725e-01   2.6633209e+00   1.7222058e+00   2.5939799e+00   1.1664463e+00   2.6821825e+00   2.5963941e+00   1.3509199e+00   1.7816323e+00   1.7046308e+00   2.1381589e+00   2.1542571e+00   4.2222578e+00   1.7881987e+00   1.2473306e+00   1.0083490e+00   2.8478148e+00   2.9960208e+00   2.0583207e+00   1.7939407e+00   2.3128527e+00   2.4854841e+00   2.4090630e+00   1.4110819e+00   2.6669716e+00   2.9270626e+00   2.1822548e+00   9.7289027e-01   1.9265423e+00   2.9135583e+00   1.8510296e+00   1.1608422e+00   9.5483435e-01   6.7975587e-01   9.6424206e-01   1.8685422e+00   6.9420840e-01   1.8699153e-01   2.6643250e-01   7.6880092e-01   1.4668684e+00   4.7680727e-01   2.1966727e+00   1.2150638e+00   2.3282955e+00   1.3855601e+00   1.8709248e+00   2.9899796e+00   1.5396352e+00   2.5003659e+00   2.1099656e+00   3.0668593e+00   1.5989333e+00   1.5788417e+00   1.9566602e+00   1.5639220e+00   1.6339738e+00   1.8236402e+00   1.4967013e+00   3.6603010e+00   3.3937895e+00   1.9915789e+00   2.2840141e+00   1.1223478e+00   3.1075626e+00   1.3520885e+00   2.0407191e+00   2.3526669e+00   1.1508346e+00   9.9970980e-01   1.7227103e+00   2.1946041e+00   2.6155078e+00   3.6958832e+00   1.8054416e+00   1.1477199e+00   1.3984076e+00   3.0713671e+00   2.0894840e+00   1.4301682e+00   9.0552938e-01   2.0395038e+00   2.1489811e+00   2.1344915e+00   1.2150638e+00   2.2517887e+00   2.3533226e+00   1.9673072e+00   1.7095802e+00   1.5528301e+00   1.9068051e+00   9.3899770e-01   3.4893361e-01   1.4098163e+00   4.4901474e-01   9.4301660e-01   4.9009568e-01   1.1908452e+00   9.6032771e-01   1.2627589e+00   7.8945238e-01   7.3502408e-01   2.8451486e+00   1.1622402e+00   2.7058576e+00   1.7424022e+00   2.2846522e+00   3.3213689e+00   9.3810350e-01   2.7757276e+00   1.8894571e+00   3.8131048e+00   2.3010216e+00   1.5984421e+00   2.3698153e+00   1.1049324e+00   1.7539088e+00   2.4591578e+00   1.9849730e+00   4.4474865e+00   3.3956070e+00   1.1104964e+00   2.8478148e+00   1.2628565e+00   3.2741783e+00   1.3548265e+00   2.7443454e+00   2.9214972e+00   1.3520885e+00   1.5950569e+00   1.8924015e+00   2.5961001e+00   2.7889301e+00   4.4811770e+00   1.9680122e+00   1.3672690e+00   1.1906665e+00   3.3943858e+00   2.8533575e+00   2.0610968e+00   1.5261646e+00   2.5498486e+00   2.6295980e+00   2.6227721e+00   1.1622402e+00   2.8191421e+00   2.9841287e+00   2.3684093e+00   1.3684638e+00   2.0228721e+00   2.7146999e+00   1.5430544e+00   1.2073068e+00   4.2737382e-01   1.1404637e+00   3.1271814e-01   9.6032771e-01   7.5303835e-01   1.1016806e+00   9.6606527e-01   5.6318359e-01   2.6951278e+00   1.0877340e+00   2.5880472e+00   1.5788417e+00   2.1577755e+00   3.1981141e+00   1.0053189e+00   2.6422640e+00   1.8441670e+00   3.6556493e+00   2.1516272e+00   1.5283932e+00   2.2572409e+00   1.1515368e+00   1.7244934e+00   2.3310687e+00   1.8210464e+00   4.2584241e+00   3.3382181e+00   1.2189366e+00   2.7191331e+00   1.1859692e+00   3.1732334e+00   1.2980649e+00   2.5768210e+00   2.7513616e+00   1.2636762e+00   1.4403062e+00   1.8020431e+00   2.4433699e+00   2.6920515e+00   4.2945779e+00   1.8903935e+00   1.2074964e+00   1.0277379e+00   3.3038558e+00   2.6967685e+00   1.8755883e+00   1.3730080e+00   2.4290237e+00   2.5228632e+00   2.5343900e+00   1.0877340e+00   2.6795155e+00   2.8549884e+00   2.2878474e+00   1.3941000e+00   1.9010194e+00   2.5529515e+00   1.3637808e+00   1.0801003e+00   2.2778358e+00   9.5491981e-01   5.9448670e-01   5.9589853e-01   3.3742167e-01   1.9403546e+00   7.3727571e-01   1.8011631e+00   1.0580730e+00   1.6859882e+00   8.4110582e-01   1.3396881e+00   2.3254107e+00   1.8940865e+00   1.8320164e+00   1.6099822e+00   2.5455416e+00   1.0698235e+00   1.0938968e+00   1.3476330e+00   1.4941922e+00   1.4633215e+00   1.3755629e+00   8.7649478e-01   3.1069376e+00   2.7702855e+00   1.8674396e+00   1.7104256e+00   1.1065179e+00   2.4455843e+00   9.1688392e-01   1.4945651e+00   1.6975565e+00   7.1757390e-01   5.5102439e-01   1.2274184e+00   1.5152116e+00   1.9568855e+00   3.1286065e+00   1.3281960e+00   6.0709980e-01   1.0827099e+00   2.4180452e+00   1.7168537e+00   8.4814328e-01   5.4968254e-01   1.4259927e+00   1.6175327e+00   1.5676792e+00   1.0580730e+00   1.6914438e+00   1.8627823e+00   1.4252775e+00   1.3670172e+00   9.8340962e-01   1.5690664e+00   6.4292875e-01   1.2799022e+00   3.7622328e-01   9.3727156e-01   7.2340544e-01   8.7070822e-01   1.0517510e+00   4.9772204e-01   2.6753497e+00   1.1327780e+00   2.4219935e+00   1.5112031e+00   2.0792734e+00   3.0229728e+00   1.3206164e+00   2.4652397e+00   1.7009790e+00   3.5349296e+00   2.0290396e+00   1.4008516e+00   2.1030738e+00   1.2197800e+00   1.7532536e+00   2.2495068e+00   1.7048463e+00   4.1210195e+00   3.1691829e+00   1.1769133e+00   2.5856062e+00   1.2767751e+00   2.9863081e+00   1.1384575e+00   2.4711712e+00   2.5838439e+00   1.1268523e+00   1.3640383e+00   1.7178039e+00   2.2416335e+00   2.4908014e+00   4.1279448e+00   1.8102703e+00   1.0585628e+00   1.0110609e+00   3.0999848e+00   2.6577347e+00   1.7876312e+00   1.3164631e+00   2.2615790e+00   2.4095273e+00   2.3580977e+00   1.1327780e+00   2.5710650e+00   2.7600070e+00   2.1383264e+00   1.2571099e+00   1.7683536e+00   2.5188615e+00   1.3683626e+00   1.3381984e+00   1.9104439e+00   1.7447553e+00   2.1191178e+00   5.2290002e-01   1.5506355e+00   3.7401310e+00   2.0532672e+00   3.6450987e+00   2.6791066e+00   3.2198971e+00   4.2474168e+00   1.2374249e+00   3.6904578e+00   2.7448024e+00   4.7359552e+00   3.2167523e+00   2.5268732e+00   3.3111493e+00   1.8901504e+00   2.5824715e+00   3.3694826e+00   2.9225385e+00   5.3651760e+00   4.2632085e+00   1.6938497e+00   3.7848480e+00   2.0962051e+00   4.1703198e+00   2.2884248e+00   3.6689921e+00   3.8480511e+00   2.2915999e+00   2.5084155e+00   2.8222270e+00   3.5057930e+00   3.6931154e+00   5.3885667e+00   2.8913445e+00   2.2944283e+00   2.0536056e+00   4.3194837e+00   3.7370068e+00   2.9859935e+00   2.4261724e+00   3.4875553e+00   3.5613793e+00   3.5512500e+00   2.0532672e+00   3.7558873e+00   3.9047630e+00   3.2988390e+00   2.2352837e+00   2.9604482e+00   3.5852990e+00   2.4345890e+00   7.1446962e-01   4.7680727e-01   8.6080744e-01   1.0528937e+00   2.6643250e-01   2.4784392e+00   9.6779954e-01   2.4056700e+00   1.4093245e+00   1.9680122e+00   3.0432377e+00   1.1095018e+00   2.5046513e+00   1.7969297e+00   3.4167115e+00   1.9006720e+00   1.3928921e+00   2.0543866e+00   1.1288261e+00   1.5650139e+00   2.0901630e+00   1.6223748e+00   4.0330923e+00   3.2470423e+00   1.3554912e+00   2.4968262e+00   1.0256272e+00   3.0550867e+00   1.1407226e+00   2.3454418e+00   2.5560105e+00   1.0597600e+00   1.1958054e+00   1.6477280e+00   2.2779375e+00   2.5604758e+00   4.0677826e+00   1.7340403e+00   1.0472149e+00   1.0317636e+00   3.1283758e+00   2.4552133e+00   1.6602736e+00   1.1211328e+00   2.2084219e+00   2.3071243e+00   2.3006257e+00   9.6779954e-01   2.4647768e+00   2.6219630e+00   2.0691920e+00   1.3232765e+00   1.6800415e+00   2.3045354e+00   1.1399118e+00   2.6525508e-01   6.6194168e-01   1.5279052e+00   4.8284931e-01   2.2240009e+00   1.2628565e+00   2.2754107e+00   1.3512603e+00   1.8635082e+00   2.9164540e+00   1.6496295e+00   2.4127395e+00   2.0563471e+00   3.0426144e+00   1.5827764e+00   1.5566996e+00   1.9230842e+00   1.6230251e+00   1.7204639e+00   1.8421714e+00   1.4471806e+00   3.6003163e+00   3.3322323e+00   1.9737130e+00   2.2612204e+00   1.2180372e+00   3.0253495e+00   1.3338820e+00   2.0126070e+00   2.2700141e+00   1.1450371e+00   1.0044165e+00   1.7168897e+00   2.0924575e+00   2.5354258e+00   3.6216411e+00   1.8094028e+00   1.0735412e+00   1.3351581e+00   3.0117528e+00   2.1161544e+00   1.3888000e+00   9.3005809e-01   2.0016409e+00   2.1479330e+00   2.1191972e+00   1.2628565e+00   2.2323235e+00   2.3582915e+00   1.9639632e+00   1.7034312e+00   1.5340961e+00   1.9379753e+00   9.6779954e-01   6.0647055e-01   1.3810470e+00   2.3749211e-01   2.2111682e+00   1.0514970e+00   2.2223402e+00   1.2585091e+00   1.7887495e+00   2.8752401e+00   1.4450035e+00   2.3620441e+00   1.8870562e+00   3.0854059e+00   1.5854288e+00   1.3907384e+00   1.8599878e+00   1.3776606e+00   1.5509730e+00   1.8163092e+00   1.3995189e+00   3.6797126e+00   3.2203837e+00   1.7354649e+00   2.2402012e+00   1.0327124e+00   2.9556082e+00   1.1508346e+00   2.0324938e+00   2.2869296e+00   9.8115739e-01   9.3443769e-01   1.5799528e+00   2.0763861e+00   2.4587811e+00   3.7098479e+00   1.6697722e+00   9.5240225e-01   1.1656779e+00   2.9602833e+00   2.1358640e+00   1.3782117e+00   8.5400786e-01   1.9683111e+00   2.0924338e+00   2.0712315e+00   1.0514970e+00   2.2110297e+00   2.3461934e+00   1.8840843e+00   1.4831574e+00   1.4659783e+00   1.9682209e+00   8.9496218e-01   1.7964452e+00   6.5622658e-01   2.0655284e+00   1.1268523e+00   1.7764179e+00   9.9332012e-01   1.5158960e+00   2.3895003e+00   1.8982283e+00   1.8651393e+00   1.5387077e+00   2.7528000e+00   1.2871947e+00   1.1001946e+00   1.4627474e+00   1.4880729e+00   1.6030181e+00   1.6047598e+00   1.0374207e+00   3.2910073e+00   2.7655862e+00   1.7002168e+00   1.8814596e+00   1.2390194e+00   2.4548042e+00   8.7876634e-01   1.7158367e+00   1.8151170e+00   7.5016118e-01   7.8272551e-01   1.3241046e+00   1.5455843e+00   1.9524619e+00   3.2834453e+00   1.4300844e+00   5.8483448e-01   1.0263139e+00   2.4682518e+00   1.9911692e+00   1.0783755e+00   7.8808432e-01   1.5539983e+00   1.7882304e+00   1.6881750e+00   1.1268523e+00   1.8822939e+00   2.0779047e+00   1.5475657e+00   1.2810704e+00   1.1339991e+00   1.8534885e+00   9.0513514e-01   1.2087510e+00   3.4293561e+00   1.8554780e+00   3.4031864e+00   2.4420991e+00   2.9637920e+00   4.0403658e+00   1.1828717e+00   3.4998500e+00   2.6632870e+00   4.3954130e+00   2.8761203e+00   2.3399831e+00   3.0445122e+00   1.7890328e+00   2.3476777e+00   3.0401775e+00   2.6527529e+00   5.0321758e+00   4.1557153e+00   1.7943181e+00   3.4850530e+00   1.8421879e+00   4.0156620e+00   2.0769537e+00   3.3460838e+00   3.5735182e+00   2.0311110e+00   2.1883894e+00   2.6137665e+00   3.2724268e+00   3.5184202e+00   5.0524110e+00   2.6818545e+00   2.0664108e+00   1.9589565e+00   4.0923995e+00   3.3884524e+00   2.6885740e+00   2.0959739e+00   3.1948685e+00   3.2743590e+00   3.2448452e+00   1.8554780e+00   3.4633688e+00   3.5839347e+00   3.0150212e+00   2.1174980e+00   2.6708841e+00   3.2242395e+00   2.1262653e+00   2.3423978e+00   1.0035466e+00   2.2827159e+00   1.3153660e+00   1.8615039e+00   2.9298417e+00   1.3184035e+00   2.4022001e+00   1.8151170e+00   3.2315412e+00   1.7166676e+00   1.3595814e+00   1.9250594e+00   1.2570691e+00   1.5534985e+00   1.9352496e+00   1.4849081e+00   3.8359772e+00   3.2064915e+00   1.5410939e+00   2.3431625e+00   1.0302848e+00   2.9742645e+00   1.1013771e+00   2.1700970e+00   2.3919091e+00   9.7531402e-01   1.0396764e+00   1.5925492e+00   2.1393811e+00   2.4733960e+00   3.8624002e+00   1.6816960e+00   9.5650957e-01   1.0890388e+00   3.0080097e+00   2.2891398e+00   1.5007156e+00   9.6470639e-01   2.0543866e+00   2.1765531e+00   2.1487165e+00   1.0035466e+00   2.3180617e+00   2.4663563e+00   1.9433990e+00   1.3718709e+00   1.5414664e+00   2.1305612e+00   1.0107221e+00   1.7770053e+00   1.3000021e+00   1.3205171e+00   8.3930091e-01   1.8258497e+00   2.8432746e+00   1.7831595e+00   2.1193624e+00   1.2896554e+00   8.9496218e-01   1.6446727e+00   1.0946825e+00   2.1632524e+00   1.4041749e+00   5.4207852e-01   1.2077572e+00   2.1213447e+00   2.4069921e+00   2.9335119e+00   8.2206766e-01   1.6918279e+00   2.1851365e+00   1.7733720e+00   7.3278119e-01   1.4407642e+00   1.6273345e+00   1.3293020e+00   1.2924610e+00   1.7503171e+00   1.9276214e+00   2.3545376e+00   1.2450968e+00   1.8184976e+00   2.1894327e+00   1.8463545e+00   3.4893361e-01   1.0719022e+00   1.3834169e+00   1.0621362e+00   7.1740234e-01   1.0327832e+00   1.7770053e+00   6.9976890e-01   5.1210327e-01   9.9313181e-01   2.0841099e+00   1.0825311e+00   5.0208681e-01   1.3466860e+00   1.7937749e+00   8.2148003e-01   1.2268833e+00   2.4485240e+00   1.2563297e+00   1.9934469e+00   1.2524426e+00   2.8471336e+00   1.4358042e+00   7.3339246e-01   1.4342819e+00   4.9772204e-01   6.9457760e-01   1.4636741e+00   1.1233354e+00   3.5605638e+00   2.5755365e+00   1.2907457e+00   1.8667489e+00   3.7622328e-01   2.4814136e+00   6.2818221e-01   1.8112028e+00   2.1131807e+00   5.7672351e-01   7.9999102e-01   8.5275415e-01   1.9102507e+00   2.0267836e+00   3.6643554e+00   9.0168685e-01   8.3216780e-01   8.3306409e-01   2.5178144e+00   1.8656026e+00   1.2019259e+00   7.6362786e-01   1.6472011e+00   1.5943283e+00   1.7001179e+00   0.0000000e+00   1.8078806e+00   1.9570111e+00   1.3920954e+00   7.6195008e-01   1.1016806e+00   1.7729341e+00   7.1446962e-01   1.0816610e+00   7.3851064e-01   7.2248857e-01   3.0483776e+00   5.6342615e-01   1.3118081e+00   1.4889605e+00   9.8006369e-01   1.1737270e+00   4.2737382e-01   2.1131807e+00   1.7428500e+00   1.0543640e+00   8.5494999e-01   2.0376324e+00   1.3288928e+00   2.4590893e+00   5.9382214e-01   1.9576761e+00   9.8006369e-01   1.3739792e+00   8.5141186e-01   6.2055338e-01   1.3918500e+00   1.3907270e+00   9.7789352e-01   6.6861320e-01   6.5233704e-01   2.1059482e+00   9.8663349e-01   1.4035018e+00   1.7831595e+00   7.7880944e-01   1.4027992e+00   9.8677196e-01   1.5191564e+00   4.3798311e-01   6.8554305e-01   6.2111408e-01   1.7937749e+00   6.4241342e-01   9.9981032e-01   6.7780188e-01   1.6099640e+00   8.3640969e-01   1.4769275e+00   1.5684930e+00   6.3173774e-01   1.7302001e+00   2.0286216e+00   1.2711306e+00   1.0474897e+00   2.1700788e+00   8.2827027e-01   5.2290002e-01   7.5863433e-01   1.2491511e+00   1.0565061e+00   9.7542502e-01   3.3872939e-01   2.7982543e+00   2.0758695e+00   1.7342859e+00   1.1984110e+00   9.9693045e-01   1.8354727e+00   6.0840510e-01   1.1145077e+00   1.3082023e+00   5.2283051e-01   5.1857575e-01   4.7149050e-01   1.1471723e+00   1.3839439e+00   2.8837687e+00   5.8522871e-01   5.3667800e-01   9.0098101e-01   1.8496383e+00   1.3956631e+00   4.8016385e-01   6.2451737e-01   9.5139638e-01   1.0316097e+00   1.1168387e+00   8.2148003e-01   1.1408175e+00   1.3888569e+00   8.7588404e-01   9.9189360e-01   4.8124784e-01   1.3365773e+00   6.0566865e-01   1.4097462e+00   2.4566860e+00   1.1582635e+00   1.2895008e+00   1.6771691e+00   6.6244727e-01   8.5330525e-01   4.2110953e-01   1.5929148e+00   1.0739839e+00   5.6992880e-01   5.5102439e-01   2.4009228e+00   1.8321979e+00   2.1944657e+00   6.8299624e-01   1.3125970e+00   1.6253273e+00   1.0353506e+00   7.4661256e-01   1.1022402e+00   9.6950963e-01   8.7649478e-01   5.0731024e-01   1.1544356e+00   1.2559800e+00   2.5390784e+00   4.9009568e-01   1.1268617e+00   1.4819274e+00   1.4649720e+00   9.9544409e-01   6.0942760e-01   9.8006526e-01   5.9589853e-01   4.3937875e-01   6.7904052e-01   1.2268833e+00   6.0365341e-01   8.3354038e-01   4.3719837e-01   1.3221954e+00   4.2932160e-01   1.0251165e+00   9.7833010e-01   3.6949435e+00   6.0653347e-01   1.6944472e+00   1.5821553e+00   1.6484241e+00   1.7861438e+00   1.1497964e+00   2.7265330e+00   2.4114658e+00   1.7100754e+00   1.5191564e+00   1.8544941e+00   9.8143688e-01   2.9288318e+00   1.1208167e+00   2.6441923e+00   4.9772204e-01   2.0065422e+00   1.3857067e+00   8.4632640e-01   2.0655380e+00   2.0890644e+00   1.6229726e+00   9.3175410e-01   6.4813570e-01   1.8882412e+00   1.6282537e+00   2.0045623e+00   2.3010727e+00   4.0443437e-01   1.9471640e+00   1.6420881e+00   2.2200703e+00   1.1092092e+00   1.3077539e+00   1.2486828e+00   2.4485240e+00   1.1714086e+00   1.4815200e+00   1.3709657e+00   2.1645801e+00   1.5528645e+00   2.0592947e+00   2.2565403e+00   3.2107953e+00   2.2989490e+00   4.0089941e+00   2.5709804e+00   1.9412285e+00   2.6814987e+00   1.0808305e+00   1.6177026e+00   2.5906314e+00   2.3241321e+00   4.7335798e+00   3.7356640e+00   1.5467170e+00   3.0855313e+00   1.1828955e+00   3.6907456e+00   1.7719327e+00   2.9776826e+00   3.3258154e+00   1.7290027e+00   1.8703975e+00   2.1032002e+00   3.0991288e+00   3.2379874e+00   4.8403184e+00   2.1396216e+00   1.8765527e+00   1.6420881e+00   3.7688016e+00   2.8977291e+00   2.3525703e+00   1.7721385e+00   2.8780663e+00   2.8053505e+00   2.9124074e+00   1.2563297e+00   3.0197298e+00   3.1129968e+00   2.6135061e+00   1.7341866e+00   2.3184326e+00   2.7661123e+00   1.7090768e+00   1.2067996e+00   1.8641580e+00   1.3940244e+00   1.3162189e+00   8.8198158e-01   2.2794791e+00   2.1012392e+00   1.5525661e+00   1.0918469e+00   2.2063254e+00   1.1211328e+00   2.4017426e+00   1.1211328e+00   2.2284888e+00   6.3765570e-01   1.5168126e+00   1.2830542e+00   7.2263841e-01   1.5939137e+00   1.6681833e+00   1.2435436e+00   4.6557224e-01   3.1271814e-01   2.2147971e+00   1.2909648e+00   1.4589882e+00   1.7351918e+00   8.5359653e-01   1.8877628e+00   1.2647627e+00   1.8001617e+00   9.2780124e-01   1.2301583e+00   1.1566759e+00   1.9934469e+00   1.1506301e+00   1.5274241e+00   1.1815770e+00   1.6939440e+00   1.2016246e+00   1.9452063e+00   1.8368886e+00   2.7696320e+00   1.7002168e+00   6.6444642e-01   1.2360344e+00   1.3162958e+00   1.5606124e+00   1.8019802e+00   1.1903794e+00   3.3139779e+00   1.5262653e+00   1.2463647e+00   1.7598642e+00   1.6038123e+00   1.5053088e+00   8.4040822e-01   1.8873059e+00   1.7273593e+00   1.0791305e+00   1.4542898e+00   8.8167165e-01   1.3277924e+00   1.1132823e+00   3.3576107e+00   9.4738284e-01   1.0119180e+00   9.3046944e-01   1.8017473e+00   2.2743623e+00   1.4404916e+00   1.5380438e+00   1.4761813e+00   1.5955619e+00   1.5999471e+00   1.2524426e+00   1.7473897e+00   2.0612423e+00   1.3691763e+00   6.7534282e-01   1.2539581e+00   2.2701663e+00   1.5558007e+00   1.5218782e+00   2.4628184e+00   1.5932824e+00   3.2458126e+00   2.5692387e+00   1.4348047e+00   1.8937847e+00   9.2060977e-01   2.4408782e+00   3.8250534e+00   1.0499398e+00   2.8336242e+00   2.0769357e+00   2.6064495e+00   1.0813975e+00   1.3021456e+00   2.4993477e+00   2.2323451e+00   2.1662332e+00   1.8260680e+00   2.0200580e+00   1.1770826e+00   2.1383117e+00   2.5736499e+00   3.0399892e+00   1.5323598e+00   1.2212784e+00   1.7944590e+00   2.3235953e+00   1.3759094e+00   1.3385913e+00   1.3604806e+00   2.8471336e+00   1.0797751e+00   9.4588685e-01   1.6150266e+00   2.9366827e+00   1.8217819e+00   1.3773939e+00   2.3541561e+00   1.1723315e+00   6.4232366e-01   1.8822595e+00   1.3566020e+00   4.2656951e-01   5.7691891e-01   2.2149281e+00   2.2825823e+00   2.4730728e+00   7.0958226e-01   1.4300979e+00   1.9425292e+00   1.2122797e+00   4.9430028e-01   1.0215032e+00   1.0391769e+00   7.2638147e-01   9.8137813e-01   1.1659675e+00   1.5397131e+00   2.3056726e+00   1.0072799e+00   1.1569911e+00   1.6871910e+00   1.6699010e+00   7.9127668e-01   4.3341454e-01   8.2155022e-01   5.7672351e-01   6.8304299e-01   6.6412342e-01   1.4358042e+00   7.0097130e-01   8.1019167e-01   6.5485710e-01   1.6386105e+00   4.6472955e-01   7.2626021e-01   8.9802947e-01   8.9083207e-01   9.8663349e-01   1.0101003e+00   1.2666796e+00   7.2340544e-01   3.1120413e+00   1.9012831e+00   1.3662822e+00   1.4214310e+00   1.0271885e+00   1.7789322e+00   2.8835410e-01   1.4717950e+00   1.5613089e+00   4.5716421e-01   8.2373020e-01   3.8830315e-01   1.2833190e+00   1.3103990e+00   3.1817011e+00   4.8644514e-01   5.9610506e-01   8.0162421e-01   1.8503155e+00   1.7547273e+00   9.3865015e-01   8.9973730e-01   1.1346946e+00   1.2001902e+00   1.2260535e+00   7.3339246e-01   1.3976606e+00   1.6479131e+00   9.4228329e-01   5.0621589e-01   7.1671402e-01   1.7153988e+00   9.3451915e-01   1.7865803e+00   1.3700593e+00   7.2638147e-01   5.3458689e-01   2.2505972e+00   1.6524504e+00   2.2440955e+00   5.5576380e-01   1.5638518e+00   1.3688560e+00   1.0552128e+00   7.1143905e-01   8.2518769e-01   1.0245496e+00   9.9235657e-01   6.7030885e-01   8.3156325e-01   9.6025744e-01   2.3337038e+00   6.8299624e-01   1.1166319e+00   1.5524781e+00   1.1685901e+00   1.1719135e+00   6.6412342e-01   1.1159239e+00   2.6643250e-01   4.7488466e-01   4.3341454e-01   1.4342819e+00   5.7691891e-01   8.8366512e-01   3.3492202e-01   1.3576953e+00   4.2110953e-01   1.2033093e+00   1.1777985e+00   8.7819565e-01   1.8719964e+00   1.5530949e+00   3.9773949e+00   2.6834336e+00   1.0194189e+00   2.2401597e+00   7.0463400e-01   2.6908242e+00   8.9981614e-01   2.2443541e+00   2.5055082e+00   9.6168382e-01   1.2786676e+00   1.1515752e+00   2.2564140e+00   2.2581551e+00   4.0837847e+00   1.1737270e+00   1.1984110e+00   1.0100915e+00   2.7760526e+00   2.2855612e+00   1.6668656e+00   1.2419907e+00   2.0207624e+00   1.9399964e+00   2.0427084e+00   4.9772204e-01   2.1876191e+00   2.3343528e+00   1.7191609e+00   7.3633268e-01   1.5086315e+00   2.2088314e+00   1.2082987e+00   1.1857824e+00   1.2636762e+00   3.3874427e+00   2.5564450e+00   1.8349829e+00   1.6578570e+00   5.8813453e-01   2.5222553e+00   1.0240850e+00   1.6676963e+00   2.1419072e+00   9.3827844e-01   9.8741108e-01   8.7169308e-01   2.0802526e+00   2.1175243e+00   3.5451448e+00   8.2097460e-01   1.3248988e+00   1.4633215e+00   2.4116155e+00   1.5361480e+00   1.2935378e+00   9.5818710e-01   1.5578153e+00   1.3192053e+00   1.5035025e+00   6.9457760e-01   1.5912796e+00   1.6259926e+00   1.1892978e+00   1.1294987e+00   1.0978602e+00   1.4813076e+00   9.1948999e-01   8.1819403e-01   2.2419326e+00   2.2807501e+00   2.5846003e+00   6.4497192e-01   1.4107908e+00   2.0247998e+00   1.3509199e+00   5.5183182e-01   1.2328847e+00   1.1911894e+00   9.0810653e-01   9.7397874e-01   1.4379681e+00   1.6702453e+00   2.3957048e+00   9.4716675e-01   1.4061835e+00   1.8616601e+00   1.6979390e+00   5.2290002e-01   7.0376604e-01   9.7757519e-01   6.9976890e-01   4.8012872e-01   6.5622658e-01   1.4636741e+00   5.9074344e-01   5.5183182e-01   5.8926015e-01   1.7101283e+00   6.2055338e-01   5.2374483e-01   1.0096792e+00   2.4983023e+00   2.0024830e+00   2.0009022e+00   9.4244262e-01   1.2563297e+00   1.6864324e+00   8.0788963e-01   8.3930091e-01   1.0038277e+00   7.0810362e-01   5.9074344e-01   6.2055338e-01   9.0121804e-01   1.2356595e+00   2.5673851e+00   7.1082758e-01   6.9066640e-01   1.1671832e+00   1.6263298e+00   1.2372185e+00   2.6033464e-01   7.2248857e-01   6.6653737e-01   8.5606908e-01   8.7588404e-01   1.1233354e+00   9.0810653e-01   1.1795009e+00   7.1867388e-01   1.2188386e+00   3.1239235e-01   1.1894366e+00   7.5921691e-01   2.7729820e+00   4.4273104e+00   1.7851048e+00   3.5860697e+00   2.3211451e+00   3.2572853e+00   1.7681972e+00   1.6466818e+00   3.1677578e+00   2.9074188e+00   2.8617360e+00   2.1557081e+00   2.3917474e+00   3.9487224e-01   2.8587345e+00   3.1370513e+00   3.5889276e+00   1.8806886e+00   2.0412779e+00   2.4100318e+00   3.0088533e+00   2.0247746e+00   2.1265123e+00   2.0926464e+00   3.5605638e+00   1.8217810e+00   1.8066213e+00   2.3669524e+00   3.5958907e+00   2.5091153e+00   2.1661990e+00   3.0374915e+00   2.7063291e+00   1.8195505e+00   2.8431665e+00   6.1655427e-01   2.1507483e+00   2.1438129e+00   1.7230435e+00   2.3108260e+00   2.5097011e+00   1.8135469e+00   1.5638528e+00   9.0791603e-01   2.8200316e+00   1.7992895e+00   2.2827159e+00   2.3805592e+00   1.0279218e+00   2.6120156e+00   2.2030084e+00   2.6289851e+00   1.7477225e+00   1.8297977e+00   1.8176515e+00   2.5755365e+00   1.8486085e+00   2.1438129e+00   1.7993706e+00   2.0846858e+00   2.0084695e+00   2.7218129e+00   2.6544629e+00   2.7850656e+00   1.6064410e+00   2.7362607e+00   1.2723027e+00   2.8153418e+00   2.8097763e+00   1.4630671e+00   1.8911656e+00   1.6976298e+00   2.3931138e+00   2.3316649e+00   4.4654565e+00   1.7621453e+00   1.4315442e+00   9.9921804e-01   3.0176875e+00   3.0491088e+00   2.1855415e+00   1.8898572e+00   2.4817766e+00   2.5552736e+00   2.5674482e+00   1.2907457e+00   2.7588865e+00   3.0041677e+00   2.2870308e+00   9.4057729e-01   2.0520412e+00   2.9779211e+00   1.8911656e+00   1.9172403e+00   1.5033800e+00   1.5778466e+00   4.2450569e-01   7.6773108e-01   1.5016932e+00   1.3345218e+00   1.1346946e+00   1.0902078e+00   1.2416734e+00   1.9196757e+00   1.1117653e+00   1.6095257e+00   2.0596575e+00   1.0943114e+00   8.7072347e-01   9.2729770e-01   1.4434261e+00   3.8830315e-01   3.6319073e-01   4.1088655e-01   1.8667489e+00   1.6562722e-01   4.2450569e-01   5.9279023e-01   1.8877121e+00   8.2518769e-01   9.7789352e-01   1.4886316e+00   2.7335291e+00   9.1459005e-01   1.8213026e+00   2.2454046e+00   7.7259801e-01   8.0376328e-01   1.0525811e+00   2.1168634e+00   2.2814168e+00   3.7089872e+00   1.0767714e+00   1.0755005e+00   1.1631285e+00   2.6946772e+00   1.7497347e+00   1.2634840e+00   7.1971771e-01   1.7438018e+00   1.6356845e+00   1.7637315e+00   3.7622328e-01   1.8511762e+00   1.9311191e+00   1.4699785e+00   1.1016806e+00   1.1928774e+00   1.6354514e+00   6.5233704e-01   2.0052498e+00   1.7681972e+00   1.2007144e+00   2.1235855e+00   2.2484715e+00   1.6942544e+00   1.0546367e+00   5.1386894e-01   2.3251407e+00   1.7093881e+00   2.0273081e+00   2.2255325e+00   6.9493020e-01   2.3316649e+00   1.8640280e+00   2.3781796e+00   1.4043141e+00   1.6126002e+00   1.5456113e+00   2.4814136e+00   1.5467508e+00   1.8811164e+00   1.5963715e+00   2.0694478e+00   1.7422855e+00   2.4276706e+00   2.4143104e+00   1.5837613e+00   1.7028549e+00   2.6643250e-01   7.3283576e-01   6.1619693e-01   1.4102704e+00   1.5157660e+00   3.3107238e+00   7.0702759e-01   4.6964680e-01   7.3731902e-01   2.0564363e+00   1.8389778e+00   9.9058911e-01   7.8305765e-01   1.2692102e+00   1.3637808e+00   1.3483908e+00   6.2818221e-01   1.5635907e+00   1.7874832e+00   1.0817627e+00   4.8284931e-01   7.9664122e-01   1.7693113e+00   8.5141186e-01   7.7538587e-01   1.4522625e+00   1.1678338e+00   1.2100516e+00   1.1294987e+00   1.4712028e+00   1.8985225e+00   1.2171256e+00   1.5155373e+00   1.9939611e+00   1.4342819e+00   6.6653737e-01   7.1511757e-01   1.2680818e+00   5.4772790e-01   6.0868934e-01   6.7484334e-01   1.8112028e+00   3.8639663e-01   5.2413598e-01   7.9227302e-01   1.9656037e+00   7.9656884e-01   7.1789533e-01   1.2970125e+00   1.6649242e+00   1.5382030e+00   1.4107908e+00   5.4248468e-01   9.6424206e-01   1.6581712e+00   1.4527629e+00   1.5643033e+00   2.0014001e+00   1.0048958e+00   1.4365091e+00   1.0328871e+00   1.6659456e+00   6.7424840e-01   1.0427348e+00   9.3481345e-01   2.1131807e+00   8.1596583e-01   1.1349095e+00   1.1009910e+00   2.0312552e+00   1.0961859e+00   1.4886316e+00   1.7139439e+00   4.8016385e-01   6.4687084e-01   1.4356300e+00   1.6309773e+00   3.2287360e+00   7.3391501e-01   4.4499696e-01   8.4121419e-01   2.1133414e+00   1.6592561e+00   8.3333283e-01   5.2066928e-01   1.2097457e+00   1.2911242e+00   1.2850973e+00   5.7672351e-01   1.4812088e+00   1.6720834e+00   1.0285902e+00   7.2340544e-01   6.8124108e-01   1.5683551e+00   6.1067563e-01   8.0990117e-01   1.4468934e+00   1.7768340e+00   2.9867606e+00   8.8098199e-01   6.6217390e-01   1.1327663e+00   2.1506380e+00   1.2980342e+00   5.4779717e-01   1.3340137e-01   1.1051628e+00   1.1634940e+00   1.1952607e+00   7.9999102e-01   1.2953104e+00   1.4320028e+00   9.9155078e-01   1.1867923e+00   5.7526462e-01   1.1726810e+00   2.6680274e-01   1.2602457e+00   1.2678174e+00   2.9703757e+00   1.3103399e-01   8.4439576e-01   1.0887336e+00   1.6811909e+00   1.4435947e+00   7.9778097e-01   8.9789119e-01   9.2528705e-01   8.7435479e-01   9.9613800e-01   8.5275415e-01   1.0871867e+00   1.3186900e+00   6.8124108e-01   8.2317311e-01   5.4397563e-01   1.4334280e+00   9.0121513e-01   6.7419212e-01   2.1234744e+00   1.3330747e+00   1.2524426e+00   1.6423187e+00   1.1112287e+00   1.7731481e+00   1.0412209e+00   1.5779602e+00   8.1552831e-01   1.2367326e+00   1.0877340e+00   1.9102507e+00   1.1360631e+00   1.4957547e+00   1.1495900e+00   1.6944472e+00   1.0511712e+00   1.7894533e+00   1.6403454e+00   2.3936810e+00   1.3012342e+00   1.5364148e+00   1.7852089e+00   7.8659640e-01   2.0467146e+00   1.4387140e+00   1.9055720e+00   1.0341095e+00   1.3049404e+00   1.1996837e+00   2.0267836e+00   1.2898173e+00   1.6473842e+00   1.2092432e+00   1.6223502e+00   1.2928268e+00   2.1087983e+00   1.9576761e+00   2.9790337e+00   3.1661621e+00   3.6343153e+00   1.9094387e+00   2.2505084e+00   2.4932991e+00   3.0919113e+00   2.0983540e+00   2.2774299e+00   2.1822207e+00   3.6643554e+00   1.9784646e+00   2.0041121e+00   2.4741311e+00   3.6564145e+00   2.5932898e+00   2.3540393e+00   3.1383476e+00   9.6758101e-01   1.2012033e+00   1.6658010e+00   1.4135473e+00   8.6985276e-01   9.6249568e-01   9.3451915e-01   8.2380019e-01   9.6993876e-01   9.0168685e-01   1.0632334e+00   1.2723027e+00   6.4232366e-01   8.7376399e-01   5.8942278e-01   1.4153467e+00   9.6559725e-01   6.0709980e-01   2.1192618e+00   1.8400866e+00   8.3619405e-01   7.2626021e-01   1.2848171e+00   1.4775384e+00   1.4500356e+00   8.3216780e-01   1.5874797e+00   1.8427499e+00   1.2442620e+00   8.6985276e-01   8.3878265e-01   1.7484907e+00   7.7500385e-01   2.4608044e+00   2.2758533e+00   1.3186900e+00   1.1601403e+00   1.7655861e+00   1.8899115e+00   1.9324044e+00   8.3306409e-01   2.0122449e+00   2.2830055e+00   1.6787550e+00   8.1019167e-01   1.3243478e+00   2.1959912e+00   1.1244567e+00   1.9511379e+00   1.7500667e+00   2.2774574e+00   1.1011934e+00   1.2684800e+00   1.1435764e+00   2.5178144e+00   1.1861264e+00   1.4342819e+00   1.3109392e+00   2.2026274e+00   1.5858048e+00   2.0711809e+00   2.3400013e+00   1.0557584e+00   1.3438727e+00   1.0821769e+00   8.4383266e-01   1.0493821e+00   1.8656026e+00   7.8957903e-01   5.5399712e-01   1.0737552e+00   2.2030214e+00   1.1115276e+00   2.1119253e-01   1.3352177e+00   6.6627781e-01   7.2272795e-01   8.6751530e-01   9.1936743e-01   1.2019259e+00   8.7588404e-01   1.0946184e+00   8.0162421e-01   1.4236959e+00   4.0664863e-01   9.8463602e-01   6.8496652e-01   1.2266388e+00   1.2615426e+00   1.3010124e+00   7.6362786e-01   1.4014424e+00   1.5148689e+00   1.0932736e+00   1.2210779e+00   6.9618131e-01   1.2059294e+00   2.0855006e-01   4.7509249e-01   3.1239235e-01   1.6472011e+00   4.6557224e-01   7.5810578e-01   4.3937875e-01   1.5999820e+00   5.6262711e-01   1.1233867e+00   1.3019241e+00   3.9472619e-01   1.5943283e+00   3.3742167e-01   4.8284931e-01   3.4893361e-01   1.6410601e+00   6.6154242e-01   9.3451915e-01   1.2980649e+00   1.7001179e+00   5.2283051e-01   6.7484334e-01   3.3872939e-01   1.6485749e+00   6.6653737e-01   1.1055440e+00   1.3931316e+00   1.8078806e+00   1.9570111e+00   1.3920954e+00   7.6195008e-01   1.1016806e+00   1.7729341e+00   7.1446962e-01   3.8639663e-01   6.2027457e-01   1.8723846e+00   8.0990117e-01   9.0447834e-01   1.4243850e+00   7.9227302e-01   2.1007225e+00   1.0230346e+00   7.1789533e-01   1.5391678e+00   1.3603920e+00   4.6137216e-01   1.1083720e+00   1.1686836e+00   1.1908452e+00   2.1561807e+00   1.2583645e+00   1.0722301e+00   7.7259801e-01   1.2001902e+00
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-seuclidean-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-seuclidean-ml.txt
    deleted file mode 100644
    index ce80cb1ead..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-seuclidean-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   1.4330520e+01   1.4635426e+01   1.3450855e+01   1.4761140e+01   1.3508642e+01   1.5434417e+01   1.3887693e+01   1.5166776e+01   1.3966038e+01   1.4950451e+01   1.4564587e+01   1.3834201e+01   1.4347008e+01   1.5641962e+01   1.4689053e+01   1.4418720e+01   1.4545856e+01   1.4151822e+01   1.4669017e+01   1.5150750e+01   1.3770166e+01   1.3288969e+01   1.4048191e+01   1.4049959e+01   1.4164158e+01   1.3727834e+01   1.4074687e+01   1.4321303e+01   1.2497330e+01   1.3820273e+01   1.4441030e+01   1.4780222e+01   1.2504339e+01   1.5022245e+01   1.4263650e+01   1.3704507e+01   1.3694385e+01   1.3667517e+01   1.3177468e+01   1.4391931e+01   1.4893903e+01   1.4475753e+01   1.4440707e+01   1.3603096e+01   1.6889651e+01   1.4731174e+01   1.3337775e+01   1.5187532e+01   1.5667271e+01   1.4226037e+01   1.4203554e+01   1.5272898e+01   1.6031460e+01   1.5991549e+01   1.1855060e+01   1.4844776e+01   1.2475182e+01   1.4408126e+01   1.4836870e+01   1.3472986e+01   1.4089281e+01   1.1018298e+01   1.3183296e+01   1.4590802e+01   1.4404230e+01   1.2717623e+01   1.3983283e+01   1.4017133e+01   1.4608005e+01   1.4402553e+01   1.3977803e+01   1.4091040e+01   1.3977459e+01   1.2630449e+01   1.4160109e+01   1.3029417e+01   1.2654432e+01   1.2794946e+01   1.3194978e+01   1.4378745e+01   1.2431908e+01   1.3852651e+01   1.3748358e+01   1.4003568e+01   1.5066681e+01   1.5192826e+01   1.4370013e+01   1.5792545e+01   1.3547546e+01   1.4411543e+01   1.4794215e+01   1.4924312e+01   1.4789153e+01   1.4875055e+01   1.4208537e+01   1.2786148e+01   1.4882476e+01   1.3302010e+01   1.4354774e+01   1.4542129e+01   1.5889633e+01   1.2928185e+01   1.4877868e+01   1.2890902e+01   1.4406165e+01   1.4498123e+01   1.4303273e+01   1.3207002e+01   1.3954732e+01   1.4841248e+01   1.5427799e+01   1.4363463e+01   1.3976277e+01   1.4284878e+01   1.4457991e+01   1.3369469e+01   1.5246610e+01   1.4487573e+01   1.4525176e+01   1.4505865e+01   1.5037347e+01   1.3834927e+01   1.3758988e+01   1.3424987e+01   1.4914766e+01   1.3783923e+01   1.3434291e+01   1.2895927e+01   1.3870360e+01   1.3342977e+01   1.3094322e+01   1.3057847e+01   1.3322375e+01   1.4940650e+01   1.4476829e+01   1.4197503e+01   1.4597035e+01   1.2963234e+01   1.4011414e+01   1.3181409e+01   1.3339615e+01   1.3928735e+01   1.3508015e+01   1.3170749e+01   1.3529133e+01   1.3454724e+01   1.4883437e+01   1.4564565e+01   1.2474313e+01   1.4435790e+01   1.5285703e+01   1.3701736e+01   1.3578312e+01   1.4807311e+01   1.4281072e+01   1.2920213e+01   1.4427803e+01   1.1408611e+01   1.4097334e+01   1.2868115e+01   1.3903683e+01   1.3800332e+01   1.3439339e+01   1.4062651e+01   1.3242107e+01   1.4400424e+01   1.3826132e+01   1.5991146e+01   1.3118258e+01   1.5377390e+01   1.2858378e+01   1.5249567e+01   1.4081585e+01   1.4458052e+01   1.4175623e+01   1.4850069e+01   1.5506668e+01   1.5014770e+01   1.4337030e+01   1.5214705e+01   1.4803729e+01   1.3188675e+01   1.3437739e+01   1.3409394e+01   1.4607386e+01   1.5394271e+01   1.5946451e+01   1.3769364e+01   1.4181208e+01   1.2551765e+01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/pdist-spearman-ml.txt b/scipy-0.10.1/scipy/spatial/tests/pdist-spearman-ml.txt
    deleted file mode 100644
    index b50fe3af19..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/pdist-spearman-ml.txt
    +++ /dev/null
    @@ -1 +0,0 @@
    -   9.3540954e-01   9.7904590e-01   8.6703870e-01   1.1569997e+00   8.7174317e-01   1.0627183e+00   9.1272727e-01   1.1593999e+00   9.7573357e-01   1.0072127e+00   1.0536814e+00   9.6276028e-01   9.7700570e-01   1.1513951e+00   1.0719592e+00   9.2178818e-01   1.0004680e+00   9.3689769e-01   9.8205821e-01   1.0332673e+00   9.4517852e-01   8.9437744e-01   9.7556556e-01   9.0460246e-01   9.7210921e-01   9.2230423e-01   9.9605161e-01   9.6852085e-01   8.4162016e-01   9.6667267e-01   9.7759376e-01   9.9757576e-01   7.6992499e-01   1.0151695e+00   9.8691869e-01   9.0325833e-01   8.6665467e-01   8.8844884e-01   8.4553255e-01   9.7700570e-01   9.5159916e-01   9.8906691e-01   1.0551935e+00   9.1973597e-01   1.3266247e+00   1.0982778e+00   8.4531653e-01   1.0887369e+00   1.0984938e+00   9.9851185e-01   9.0701470e-01   1.0639304e+00   1.2392919e+00   1.1422502e+00   8.1725773e-01   1.1844944e+00   7.8219022e-01   1.0817162e+00   1.2196100e+00   1.0003120e+00   1.0164536e+00   7.0724272e-01   9.7981398e-01   1.1134953e+00   1.0671107e+00   9.3600960e-01   9.9984398e-01   1.0356916e+00   1.1248005e+00   1.0696310e+00   1.0634263e+00   9.6472847e-01   9.9365137e-01   8.5724572e-01   1.1257846e+00   8.9930993e-01   9.4903090e-01   9.0667867e-01   9.1231923e-01   1.0573777e+00   9.0105011e-01   9.5255926e-01   1.0177978e+00   1.0606901e+00   1.1966997e+00   1.0891929e+00   1.0085089e+00   1.2640264e+00   9.3246925e-01   1.0198020e+00   1.2055806e+00   1.1237924e+00   1.1060666e+00   1.0517252e+00   1.0684668e+00   7.6844884e-01   1.0572697e+00   8.7373537e-01   9.6283228e-01   9.9350735e-01   1.2412601e+00   7.6322832e-01   1.0298950e+00   8.6148215e-01   1.0042724e+00   9.7012901e-01   9.3712571e-01   8.5845785e-01   8.5862586e-01   1.0336634e+00   1.0955536e+00   9.5302730e-01   9.8696670e-01   1.0633063e+00   1.0026643e+00   9.6380438e-01   1.1711251e+00   9.9273927e-01   1.0260906e+00   1.0863966e+00   1.0482808e+00   9.0361836e-01   9.2358836e-01   8.7794779e-01   1.2461206e+00   9.2985299e-01   1.0418962e+00   9.4660666e-01   9.5636364e-01   9.0646265e-01   9.9113111e-01   8.3027903e-01   9.3341734e-01   1.1378938e+00   1.0548215e+00   1.0086889e+00   1.1998920e+00   8.6063006e-01   1.0255506e+00   8.4786079e-01   1.0090729e+00   9.2542454e-01   9.5176718e-01   9.3477348e-01   9.0091809e-01   9.6404440e-01   1.1158716e+00   9.9614761e-01   7.7682568e-01   1.0605461e+00   1.0895650e+00   9.0065407e-01   8.7173117e-01   9.9821182e-01   1.2165617e+00   8.6127813e-01   1.1111071e+00   7.9015902e-01   1.0433843e+00   8.6510651e-01   1.0019202e+00   1.0154815e+00   9.4381038e-01   9.8646265e-01   1.0062526e+00   9.7426943e-01   9.8191419e-01   1.3038944e+00   8.6277828e-01   1.0830243e+00   8.6851485e-01   1.1192559e+00   9.9120312e-01   9.6540054e-01   9.1072307e-01   1.1775698e+00   1.1139154e+00   1.1083468e+00   9.9593159e-01   1.0825923e+00   1.1115032e+00   9.7430543e-01   9.5605161e-01   9.2800480e-01   9.4369037e-01   1.1136034e+00   1.1382898e+00   9.5937594e-01   9.8843084e-01   7.4563456e-01
    diff --git a/scipy-0.10.1/scipy/spatial/tests/random-bool-data.txt b/scipy-0.10.1/scipy/spatial/tests/random-bool-data.txt
    deleted file mode 100644
    index df0d838f51..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/random-bool-data.txt
    +++ /dev/null
    @@ -1,100 +0,0 @@
    -0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 1 0 0 1 1
    -1 1 1 1 1 1 1 0 0 1 1 1 0 0 0 0 1 0 1 0 1 1 1 0 1 0 1 1 1 1
    -0 1 0 1 1 0 0 1 1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 1 0 1 1 1 0 1
    -1 1 1 0 0 1 1 0 0 1 1 1 0 0 1 1 0 1 1 1 0 1 1 0 0 0 0 1 0 0
    -1 0 0 0 0 1 1 0 1 1 0 1 0 0 0 0 1 0 0 1 0 1 0 0 1 1 1 1 0 0
    -1 0 1 1 0 0 0 1 1 1 1 1 0 1 1 0 1 0 1 0 1 0 0 0 0 0 0 0 1 1
    -0 1 0 0 1 0 0 0 1 0 0 1 1 0 0 0 0 1 1 0 0 1 0 1 1 1 1 0 1 0
    -1 0 1 1 1 0 0 0 0 1 1 0 0 0 0 1 0 1 0 0 0 1 1 1 0 1 0 0 1 0
    -1 1 1 0 0 1 1 0 0 1 0 0 1 0 0 1 0 1 1 0 1 1 0 1 1 1 0 0 1 1
    -1 1 0 1 0 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0
    -1 0 1 0 1 1 0 1 1 0 1 1 0 1 1 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0
    -1 1 1 1 0 1 0 0 0 0 0 1 0 1 1 1 1 0 1 1 1 1 1 1 0 1 0 1 1 1
    -1 1 1 1 1 1 1 1 1 0 1 1 0 0 1 0 1 0 1 0 1 0 0 0 1 0 0 1 0 1
    -0 1 1 0 0 1 1 0 0 0 0 1 0 1 1 0 1 0 1 0 1 1 0 1 0 0 1 1 1 1
    -1 0 0 1 0 0 1 0 1 0 0 1 0 0 0 1 1 0 0 0 1 0 1 0 0 1 1 0 1 1
    -1 0 0 1 1 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 1 1 1 0 0
    -1 1 0 0 1 0 0 0 1 0 0 1 0 1 0 0 1 0 1 1 0 1 0 0 0 1 1 1 1 1
    -0 0 0 1 1 1 1 1 0 1 0 1 1 1 1 0 0 1 1 1 1 1 0 0 1 0 1 0 0 0
    -1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 0 1 1 0 1 1
    -0 0 0 0 1 0 1 0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 1 1 1
    -0 1 0 0 1 1 0 0 1 1 1 0 0 0 1 0 0 0 0 1 1 0 0 1 0 1 1 0 1 0
    -1 0 1 0 1 1 1 0 0 0 1 0 1 1 0 0 0 0 0 0 0 1 0 0 1 1 1 0 1 1
    -0 0 1 0 0 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 0 0 0 0 0 0 1 0
    -0 1 0 1 1 1 0 1 1 1 0 1 0 1 1 1 0 0 0 0 1 1 1 0 0 1 1 0 0 1
    -0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 1 1 0 1 1 0 0
    -1 0 0 0 1 0 1 0 0 1 0 1 1 0 1 0 1 0 1 0 1 1 1 0 0 0 1 1 1 0
    -1 0 0 0 1 1 1 0 0 1 0 1 1 1 0 0 0 1 1 1 0 0 0 0 1 0 0 0 1 1
    -0 1 0 0 0 1 1 1 0 1 1 1 0 1 0 0 1 1 1 1 0 1 0 1 0 1 1 0 1 1
    -0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 1 0 1 0 1 0 1
    -0 0 1 0 1 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 1 0 1 0
    -1 1 0 1 1 1 1 1 0 1 0 0 0 1 1 1 0 1 0 0 0 1 1 0 1 0 0 0 0 1
    -0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 1 1 1 0 0 1 1 0 1 1 0 0 1 0 1
    -1 1 0 0 0 0 0 1 1 0 1 1 0 0 1 0 1 1 0 0 0 1 0 1 0 1 0 1 0 1
    -1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1
    -0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1 1 0 0 1 0
    -1 1 1 1 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 0 1 1 0 0 1 0 1 0 0 0
    -0 0 0 0 1 1 1 0 1 1 0 0 1 1 1 1 0 1 0 1 1 1 1 1 1 0 0 0 0 0
    -0 1 1 1 0 0 0 1 1 1 0 1 0 0 1 1 1 1 1 0 1 0 0 1 0 0 0 0 1 1
    -0 1 0 0 1 1 1 1 0 0 1 0 1 0 1 1 0 0 1 0 0 1 1 0 0 0 0 1 0 0
    -1 1 0 1 0 0 1 1 0 0 1 1 1 0 0 1 1 1 0 0 0 0 1 1 1 0 1 0 0 1
    -0 1 1 0 1 0 1 1 0 0 0 1 1 0 0 0 0 0 0 1 0 0 1 1 0 1 0 0 1 1
    -0 0 1 1 1 0 1 0 0 1 1 0 0 0 1 1 1 0 1 0 0 0 0 1 1 0 1 1 0 0
    -1 0 1 1 1 1 1 1 1 1 0 1 0 0 0 1 0 1 0 0 0 1 1 0 0 1 0 0 0 0
    -1 0 1 1 1 0 1 1 1 1 0 0 1 0 1 1 1 0 0 0 0 1 1 1 1 1 0 1 0 0
    -1 0 0 0 1 1 1 0 1 1 0 0 1 1 1 0 1 0 0 1 0 1 0 1 1 1 0 0 0 1
    -1 0 1 0 1 0 0 0 1 0 0 1 1 0 1 1 0 0 0 1 0 1 1 0 1 0 0 1 0 0
    -0 1 1 0 1 0 1 1 1 1 1 0 0 0 0 1 0 1 0 0 1 1 1 1 0 1 0 1 1 1
    -0 1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 1 0 1 0 0 0 1 1 1 0 0 1 0 1
    -1 0 1 1 1 0 1 0 1 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1
    -1 1 1 1 1 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1
    -1 1 1 1 0 0 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 0 0 0 1 0 1 0 0
    -1 0 1 0 0 1 1 1 1 0 1 1 0 0 1 0 0 1 1 0 1 1 1 1 1 1 0 0 0 0
    -0 1 1 0 0 1 0 0 0 0 0 1 0 1 0 0 1 1 0 1 0 1 0 0 0 1 0 0 1 0
    -0 0 0 1 0 0 0 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 1 0 1 0 1 1 1 0
    -1 1 0 0 0 0 1 1 1 0 1 0 1 1 1 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0
    -1 0 1 1 1 0 1 0 1 0 0 1 1 1 1 1 0 0 1 1 0 1 1 1 1 0 0 0 0 1
    -0 0 1 1 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 1 0 0 1 0 0
    -0 0 1 1 1 1 1 0 1 0 1 0 0 1 1 1 1 0 0 0 1 0 1 1 0 1 1 1 0 0
    -0 0 0 0 0 1 0 0 1 1 0 1 1 0 0 0 0 1 0 1 1 0 0 1 0 0 1 0 1 0
    -1 0 0 1 0 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 1 0 1 0 0 0 1 1 1 1
    -0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 1 0 0 1 1 0 0 0
    -1 0 0 1 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 0 1 1 0 0 1 0 1 0
    -0 1 0 1 1 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 0 1 1 0 1 1 0 0 0 1
    -1 0 1 1 1 0 0 0 1 0 0 1 0 0 0 1 0 1 1 1 0 0 1 1 1 1 0 0 0 1
    -0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1
    -0 0 1 0 1 1 1 0 0 0 1 0 1 0 1 1 0 0 1 1 0 1 0 1 1 0 0 1 0 1
    -0 1 1 1 1 1 0 0 0 0 0 1 0 1 1 1 1 1 0 1 1 1 0 0 1 0 0 1 1 1
    -1 1 1 1 0 1 1 1 1 1 1 0 0 1 1 0 1 1 0 1 0 1 0 1 0 1 1 0 0 0
    -1 0 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 1 1 1 1 1 0 0 1 1 1 1 1 0
    -0 0 0 0 1 1 1 0 1 0 0 1 1 0 0 1 1 1 1 0 0 1 0 1 0 0 0 1 0 0
    -1 1 1 1 1 0 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 0 0 0 1 1 0 1 1 0
    -1 0 1 1 0 1 0 1 0 1 1 0 1 1 1 0 0 1 0 0 1 1 0 0 1 1 0 1 0 1
    -1 1 1 1 1 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 1
    -0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 1 1
    -1 1 1 0 1 1 1 1 1 0 0 0 0 1 0 0 1 0 1 0 1 1 1 0 0 1 0 0 1 1
    -1 1 0 1 0 1 0 1 0 0 1 0 0 0 1 0 1 1 0 1 1 0 1 0 0 1 0 0 1 0
    -1 0 1 1 0 0 1 1 0 0 1 1 0 0 0 1 1 0 0 1 0 0 0 0 0 1 0 1 1 0
    -1 1 1 1 1 0 0 1 0 0 1 1 1 0 1 0 0 1 1 1 0 1 1 1 1 1 1 1 1 1
    -1 0 1 1 0 0 1 1 0 1 1 1 0 0 0 1 0 1 0 0 0 1 1 1 1 1 0 0 1 0
    -0 0 0 0 0 1 1 1 0 0 0 0 0 1 1 1 1 1 0 0 1 1 0 0 1 0 0 1 0 0
    -1 1 1 0 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 1 1 1 0 1 0
    -1 0 0 1 0 1 0 0 0 0 0 0 1 0 1 0 1 1 0 1 0 1 1 0 0 1 0 1 0 1
    -1 0 0 0 1 0 1 1 0 1 0 0 0 1 0 1 0 0 0 0 1 1 1 0 1 0 1 1 0 1
    -0 1 0 0 0 0 1 0 1 1 1 0 1 1 0 1 0 1 0 1 1 0 0 0 0 0 0 1 1 1
    -0 1 0 0 1 0 1 1 0 0 0 0 1 1 0 1 1 1 0 0 1 1 0 0 1 0 1 0 0 0
    -0 1 0 1 1 1 1 1 1 1 0 0 1 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0
    -0 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 1 0 0
    -1 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 0
    -1 0 0 0 1 0 1 0 0 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1 1 0 1 1 1 0
    -0 0 0 1 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 0 1 0 0 0 1 1 0
    -1 0 0 0 0 0 1 0 1 0 1 0 0 1 1 1 0 1 1 1 0 0 1 0 1 1 1 0 1 0
    -0 1 0 0 1 1 1 0 0 1 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 1 1 1 0 1
    -0 0 0 1 1 0 1 0 1 0 1 0 0 0 1 1 1 0 1 1 0 0 0 1 1 0 0 1 0 1
    -1 1 1 1 1 1 1 1 0 0 1 1 0 0 0 1 0 1 0 1 0 0 0 1 1 0 1 0 1 0
    -0 1 1 0 0 0 1 1 0 0 1 1 0 1 1 1 1 1 0 1 0 0 0 0 1 0 1 0 0 0
    -1 1 1 0 1 1 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 1 0 1 1 0 0 1
    -0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 1 1 1 1 0 1 0 0 0 1 1 0
    -1 1 1 0 1 1 0 1 1 0 1 1 0 1 0 0 1 0 0 0 1 1 1 1 0 1 1 0 1 1
    -0 0 1 1 1 0 0 0 0 1 1 0 0 1 1 0 1 0 1 0 0 1 0 0 0 1 1 0 0 1
    -0 0 0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 0 0 0 0 1 1 1 1 1 1 0 0
    diff --git a/scipy-0.10.1/scipy/spatial/tests/test_distance.py b/scipy-0.10.1/scipy/spatial/tests/test_distance.py
    deleted file mode 100644
    index 3c6068c3f2..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/test_distance.py
    +++ /dev/null
    @@ -1,1902 +0,0 @@
    -#! /usr/bin/env python
    -#
    -# Author: Damian Eads
    -# Date: April 17, 2008
    -#
    -# Copyright (C) 2008 Damian Eads
    -#
    -# Redistribution and use in source and binary forms, with or without
    -# modification, are permitted provided that the following conditions
    -# are met:
    -#
    -# 1. Redistributions of source code must retain the above copyright
    -#    notice, this list of conditions and the following disclaimer.
    -#
    -# 2. Redistributions in binary form must reproduce the above
    -#    copyright notice, this list of conditions and the following
    -#    disclaimer in the documentation and/or other materials provided
    -#    with the distribution.
    -#
    -# 3. The name of the author may not be used to endorse or promote
    -#    products derived from this software without specific prior
    -#    written permission.
    -#
    -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
    -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
    -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
    -# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
    -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    -
    -
    -import os.path
    -
    -import numpy as np
    -from numpy.linalg import norm
    -from numpy.testing import verbose, TestCase, run_module_suite, \
    -        assert_raises, assert_array_equal, assert_equal, assert_almost_equal
    -
    -from scipy.spatial.distance import squareform, pdist, cdist, matching, \
    -        jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule, \
    -        num_obs_y, num_obs_dm, is_valid_dm, is_valid_y, minkowski, wminkowski, \
    -        euclidean, sqeuclidean, cosine, correlation, mahalanobis, \
    -        canberra, braycurtis, sokalmichener, _validate_vector
    -
    -
    -_filenames = ["iris.txt",
    -              "cdist-X1.txt",
    -              "cdist-X2.txt",
    -              "pdist-hamming-ml.txt",
    -              "pdist-boolean-inp.txt",
    -              "pdist-jaccard-ml.txt",
    -              "pdist-cityblock-ml-iris.txt",
    -              "pdist-minkowski-3.2-ml-iris.txt",
    -              "pdist-cityblock-ml.txt",
    -              "pdist-correlation-ml-iris.txt",
    -              "pdist-minkowski-5.8-ml-iris.txt",
    -              "pdist-correlation-ml.txt",
    -              "pdist-minkowski-3.2-ml.txt",
    -              "pdist-cosine-ml-iris.txt",
    -              "pdist-seuclidean-ml-iris.txt",
    -              "pdist-cosine-ml.txt",
    -              "pdist-seuclidean-ml.txt",
    -              "pdist-double-inp.txt",
    -              "pdist-spearman-ml.txt",
    -              "pdist-euclidean-ml.txt",
    -              "pdist-euclidean-ml-iris.txt",
    -              "pdist-chebychev-ml.txt",
    -              "pdist-chebychev-ml-iris.txt",
    -              "random-bool-data.txt"]
    -
    -_tdist = np.array([[0,    662,  877,  255,  412,  996],
    -                      [662,  0,    295,  468,  268,  400],
    -                      [877,  295,  0,    754,  564,  138],
    -                      [255,  468,  754,  0,    219,  869],
    -                      [412,  268,  564,  219,  0,    669],
    -                      [996,  400,  138,  869,  669,  0  ]], dtype='double')
    -
    -_ytdist = squareform(_tdist)
    -
    -# A hashmap of expected output arrays for the tests. These arrays
    -# come from a list of text files, which are read prior to testing.
    -
    -eo = {}
    -
    -def load_testing_files():
    -    "Loading test data files for the scipy.spatial.distance tests."
    -    for fn in _filenames:
    -        name = fn.replace(".txt", "").replace("-ml", "")
    -        fqfn = os.path.join(os.path.dirname(__file__), fn)
    -        fp = open(fqfn)
    -        eo[name] = np.loadtxt(fp)
    -        fp.close()
    -        #print "%s: %s   %s" % (name, str(eo[name].shape), str(eo[name].dtype))
    -    eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
    -
    -load_testing_files()
    -
    -
    -class TestCdist(TestCase):
    -    """
    -    Test suite for the cdist function.
    -    """
    -
    -    def test_cdist_euclidean_random(self):
    -        "Tests cdist(X, 'euclidean') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'euclidean')
    -        Y2 = cdist(X1, X2, 'test_euclidean')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_euclidean_random_unicode(self):
    -        "Tests cdist(X, u'euclidean') using unicode metric string"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, u'euclidean')
    -        Y2 = cdist(X1, X2, u'test_euclidean')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_sqeuclidean_random(self):
    -        "Tests cdist(X, 'sqeuclidean') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'sqeuclidean')
    -        Y2 = cdist(X1, X2, 'test_sqeuclidean')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_cityblock_random(self):
    -        "Tests cdist(X, 'cityblock') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'cityblock')
    -        Y2 = cdist(X1, X2, 'test_cityblock')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_hamming_double_random(self):
    -        "Tests cdist(X, 'hamming') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'hamming')
    -        Y2 = cdist(X1, X2, 'test_hamming')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_hamming_bool_random(self):
    -        "Tests cdist(X, 'hamming') on random boolean data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'hamming')
    -        Y2 = cdist(X1, X2, 'test_hamming')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_jaccard_double_random(self):
    -        "Tests cdist(X, 'jaccard') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'jaccard')
    -        Y2 = cdist(X1, X2, 'test_jaccard')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_jaccard_bool_random(self):
    -        "Tests cdist(X, 'jaccard') on random boolean data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'jaccard')
    -        Y2 = cdist(X1, X2, 'test_jaccard')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_chebychev_random(self):
    -        "Tests cdist(X, 'chebychev') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'chebychev')
    -        Y2 = cdist(X1, X2, 'test_chebychev')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_minkowski_random_p3d8(self):
    -        "Tests cdist(X, 'minkowski') on random data. (p=3.8)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'minkowski', p=3.8)
    -        Y2 = cdist(X1, X2, 'test_minkowski', p=3.8)
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_minkowski_random_p4d6(self):
    -        "Tests cdist(X, 'minkowski') on random data. (p=4.6)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'minkowski', p=4.6)
    -        Y2 = cdist(X1, X2, 'test_minkowski', p=4.6)
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_minkowski_random_p1d23(self):
    -        "Tests cdist(X, 'minkowski') on random data. (p=1.23)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'minkowski', p=1.23)
    -        Y2 = cdist(X1, X2, 'test_minkowski', p=1.23)
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -
    -    def test_cdist_wminkowski_random_p3d8(self):
    -        "Tests cdist(X, 'wminkowski') on random data. (p=3.8)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        w = 1.0 / X1.std(axis=0)
    -        Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w)
    -        Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w)
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_wminkowski_random_p4d6(self):
    -        "Tests cdist(X, 'wminkowski') on random data. (p=4.6)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        w = 1.0 / X1.std(axis=0)
    -        Y1 = cdist(X1, X2, 'wminkowski', p=4.6, w=w)
    -        Y2 = cdist(X1, X2, 'test_wminkowski', p=4.6, w=w)
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_wminkowski_random_p1d23(self):
    -        "Tests cdist(X, 'wminkowski') on random data. (p=1.23)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        w = 1.0 / X1.std(axis=0)
    -        Y1 = cdist(X1, X2, 'wminkowski', p=1.23, w=w)
    -        Y2 = cdist(X1, X2, 'test_wminkowski', p=1.23, w=w)
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -
    -    def test_cdist_seuclidean_random(self):
    -        "Tests cdist(X, 'seuclidean') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'seuclidean')
    -        Y2 = cdist(X1, X2, 'test_seuclidean')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -
    -    def test_cdist_cosine_random(self):
    -        "Tests cdist(X, 'cosine') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'cosine')
    -        Y2 = cdist(X1, X2, 'test_cosine')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_correlation_random(self):
    -        "Tests cdist(X, 'correlation') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'correlation')
    -        Y2 = cdist(X1, X2, 'test_correlation')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_mahalanobis_random(self):
    -        "Tests cdist(X, 'mahalanobis') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1']
    -        X2 = eo['cdist-X2']
    -        Y1 = cdist(X1, X2, 'mahalanobis')
    -        Y2 = cdist(X1, X2, 'test_mahalanobis')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_canberra_random(self):
    -        "Tests cdist(X, 'canberra') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'canberra')
    -        Y2 = cdist(X1, X2, 'test_canberra')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_braycurtis_random(self):
    -        "Tests cdist(X, 'braycurtis') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'braycurtis')
    -        Y2 = cdist(X1, X2, 'test_braycurtis')
    -        if verbose > 2:
    -            print Y1, Y2
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_yule_random(self):
    -        "Tests cdist(X, 'yule') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'yule')
    -        Y2 = cdist(X1, X2, 'test_yule')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_matching_random(self):
    -        "Tests cdist(X, 'matching') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'matching')
    -        Y2 = cdist(X1, X2, 'test_matching')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_kulsinski_random(self):
    -        "Tests cdist(X, 'kulsinski') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'kulsinski')
    -        Y2 = cdist(X1, X2, 'test_kulsinski')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_dice_random(self):
    -        "Tests cdist(X, 'dice') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'dice')
    -        Y2 = cdist(X1, X2, 'test_dice')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_rogerstanimoto_random(self):
    -        "Tests cdist(X, 'rogerstanimoto') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'rogerstanimoto')
    -        Y2 = cdist(X1, X2, 'test_rogerstanimoto')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_russellrao_random(self):
    -        "Tests cdist(X, 'russellrao') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'russellrao')
    -        Y2 = cdist(X1, X2, 'test_russellrao')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_sokalmichener_random(self):
    -        "Tests cdist(X, 'sokalmichener') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'sokalmichener')
    -        Y2 = cdist(X1, X2, 'test_sokalmichener')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -    def test_cdist_sokalsneath_random(self):
    -        "Tests cdist(X, 'sokalsneath') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X1 = eo['cdist-X1'] < 0.5
    -        X2 = eo['cdist-X2'] < 0.5
    -        Y1 = cdist(X1, X2, 'sokalsneath')
    -        Y2 = cdist(X1, X2, 'test_sokalsneath')
    -        if verbose > 2:
    -            print (Y1-Y2).max()
    -        self.assertTrue(within_tol(Y1, Y2, eps))
    -
    -
    -class TestPdist(TestCase):
    -    """
    -    Test suite for the pdist function.
    -    """
    -
    -    ################### pdist: euclidean
    -    def test_pdist_euclidean_random(self):
    -        "Tests pdist(X, 'euclidean') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-euclidean']
    -
    -        Y_test1 = pdist(X, 'euclidean')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_euclidean_random_u(self):
    -        "Tests pdist(X, 'euclidean') with unicode metric string"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-euclidean']
    -
    -        Y_test1 = pdist(X, u'euclidean')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_euclidean_random_float32(self):
    -        "Tests pdist(X, 'euclidean') on random data (float32)."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-double-inp'])
    -        Y_right = eo['pdist-euclidean']
    -
    -        Y_test1 = pdist(X, 'euclidean')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_euclidean_random_nonC(self):
    -        "Tests pdist(X, 'test_euclidean') [the non-C implementation] on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-euclidean']
    -        Y_test2 = pdist(X, 'test_euclidean')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_euclidean_iris_double(self):
    -        "Tests pdist(X, 'euclidean') on the Iris data set."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-euclidean-iris']
    -
    -        Y_test1 = pdist(X, 'euclidean')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_euclidean_iris_float32(self):
    -        "Tests pdist(X, 'euclidean') on the Iris data set. (float32)"
    -        eps = 1e-06
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['iris'])
    -        Y_right = eo['pdist-euclidean-iris']
    -
    -        Y_test1 = pdist(X, 'euclidean')
    -        if verbose > 2:
    -            print np.abs(Y_right - Y_test1).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_euclidean_iris_nonC(self):
    -        "Tests pdist(X, 'test_euclidean') [the non-C implementation] on the Iris data set."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-euclidean-iris']
    -        Y_test2 = pdist(X, 'test_euclidean')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################### pdist: seuclidean
    -    def test_pdist_seuclidean_random(self):
    -        "Tests pdist(X, 'seuclidean') on random data."
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-seuclidean']
    -
    -        Y_test1 = pdist(X, 'seuclidean')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_seuclidean_random_float32(self):
    -        "Tests pdist(X, 'seuclidean') on random data (float32)."
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-double-inp'])
    -        Y_right = eo['pdist-seuclidean']
    -
    -        Y_test1 = pdist(X, 'seuclidean')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_seuclidean_random_nonC(self):
    -        "Tests pdist(X, 'test_sqeuclidean') [the non-C implementation] on random data."
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-seuclidean']
    -        Y_test2 = pdist(X, 'test_sqeuclidean')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_seuclidean_iris(self):
    -        "Tests pdist(X, 'seuclidean') on the Iris data set."
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-seuclidean-iris']
    -
    -        Y_test1 = pdist(X, 'seuclidean')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_seuclidean_iris_float32(self):
    -        "Tests pdist(X, 'seuclidean') on the Iris data set (float32)."
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['iris'])
    -        Y_right = eo['pdist-seuclidean-iris']
    -
    -        Y_test1 = pdist(X, 'seuclidean')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_seuclidean_iris_nonC(self):
    -        "Tests pdist(X, 'test_seuclidean') [the non-C implementation] on the Iris data set."
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-seuclidean-iris']
    -        Y_test2 = pdist(X, 'test_sqeuclidean')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################### pdist: cosine
    -    def test_pdist_cosine_random(self):
    -        "Tests pdist(X, 'cosine') on random data."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-cosine']
    -        Y_test1 = pdist(X, 'cosine')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_cosine_random_float32(self):
    -        "Tests pdist(X, 'cosine') on random data. (float32)"
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-double-inp'])
    -        Y_right = eo['pdist-cosine']
    -
    -        Y_test1 = pdist(X, 'cosine')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_cosine_random_nonC(self):
    -        "Tests pdist(X, 'test_cosine') [the non-C implementation] on random data."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-cosine']
    -        Y_test2 = pdist(X, 'test_cosine')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_cosine_iris(self):
    -        "Tests pdist(X, 'cosine') on the Iris data set."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-cosine-iris']
    -
    -        Y_test1 = pdist(X, 'cosine')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -        #print "cosine-iris", np.abs(Y_test1 - Y_right).max()
    -
    -    def test_pdist_cosine_iris_float32(self):
    -        "Tests pdist(X, 'cosine') on the Iris data set."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['iris'])
    -        Y_right = eo['pdist-cosine-iris']
    -
    -        Y_test1 = pdist(X, 'cosine')
    -        if verbose > 2:
    -            print np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -        #print "cosine-iris", np.abs(Y_test1 - Y_right).max()
    -
    -    def test_pdist_cosine_iris_nonC(self):
    -        "Tests pdist(X, 'test_cosine') [the non-C implementation] on the Iris data set."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-cosine-iris']
    -        Y_test2 = pdist(X, 'test_cosine')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################### pdist: cityblock
    -    def test_pdist_cityblock_random(self):
    -        "Tests pdist(X, 'cityblock') on random data."
    -        eps = 1e-06
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-cityblock']
    -        Y_test1 = pdist(X, 'cityblock')
    -        #print "cityblock", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_cityblock_random_float32(self):
    -        "Tests pdist(X, 'cityblock') on random data. (float32)"
    -        eps = 1e-06
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-double-inp'])
    -        Y_right = eo['pdist-cityblock']
    -        Y_test1 = pdist(X, 'cityblock')
    -        #print "cityblock", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_cityblock_random_nonC(self):
    -        "Tests pdist(X, 'test_cityblock') [the non-C implementation] on random data."
    -        eps = 1e-06
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-cityblock']
    -        Y_test2 = pdist(X, 'test_cityblock')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_cityblock_iris(self):
    -        "Tests pdist(X, 'cityblock') on the Iris data set."
    -        eps = 1e-14
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-cityblock-iris']
    -
    -        Y_test1 = pdist(X, 'cityblock')
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -        #print "cityblock-iris", np.abs(Y_test1 - Y_right).max()
    -
    -    def test_pdist_cityblock_iris_float32(self):
    -        "Tests pdist(X, 'cityblock') on the Iris data set. (float32)"
    -        eps = 1e-06
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['iris'])
    -        Y_right = eo['pdist-cityblock-iris']
    -
    -        Y_test1 = pdist(X, 'cityblock')
    -        if verbose > 2:
    -            print "cityblock-iris-float32", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_cityblock_iris_nonC(self):
    -        "Tests pdist(X, 'test_cityblock') [the non-C implementation] on the Iris data set."
    -        eps = 1e-14
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-cityblock-iris']
    -        Y_test2 = pdist(X, 'test_cityblock')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################### pdist: correlation
    -    def test_pdist_correlation_random(self):
    -        "Tests pdist(X, 'correlation') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-correlation']
    -
    -        Y_test1 = pdist(X, 'correlation')
    -        #print "correlation", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_correlation_random_float32(self):
    -        "Tests pdist(X, 'correlation') on random data. (float32)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-double-inp'])
    -        Y_right = eo['pdist-correlation']
    -
    -        Y_test1 = pdist(X, 'correlation')
    -        #print "correlation", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_correlation_random_nonC(self):
    -        "Tests pdist(X, 'test_correlation') [the non-C implementation] on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-correlation']
    -        Y_test2 = pdist(X, 'test_correlation')
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_correlation_iris(self):
    -        "Tests pdist(X, 'correlation') on the Iris data set."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-correlation-iris']
    -
    -        Y_test1 = pdist(X, 'correlation')
    -        #print "correlation-iris", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_correlation_iris_float32(self):
    -        "Tests pdist(X, 'correlation') on the Iris data set. (float32)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = np.float32(eo['pdist-correlation-iris'])
    -
    -        Y_test1 = pdist(X, 'correlation')
    -        if verbose > 2:
    -            print "correlation-iris", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_correlation_iris_nonC(self):
    -        "Tests pdist(X, 'test_correlation') [the non-C implementation] on the Iris data set."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-correlation-iris']
    -        Y_test2 = pdist(X, 'test_correlation')
    -        #print "test-correlation-iris", np.abs(Y_test2 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################# minkowski
    -
    -    def test_pdist_minkowski_random(self):
    -        "Tests pdist(X, 'minkowski') on random data."
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-minkowski-3.2']
    -
    -        Y_test1 = pdist(X, 'minkowski', 3.2)
    -        #print "minkowski", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_minkowski_random_float32(self):
    -        "Tests pdist(X, 'minkowski') on random data. (float32)"
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-double-inp'])
    -        Y_right = eo['pdist-minkowski-3.2']
    -
    -        Y_test1 = pdist(X, 'minkowski', 3.2)
    -        #print "minkowski", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_minkowski_random_nonC(self):
    -        "Tests pdist(X, 'test_minkowski') [the non-C implementation] on random data."
    -        eps = 1e-05
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-minkowski-3.2']
    -        Y_test2 = pdist(X, 'test_minkowski', 3.2)
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_minkowski_3_2_iris(self):
    -        "Tests pdist(X, 'minkowski') on iris data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-minkowski-3.2-iris']
    -        Y_test1 = pdist(X, 'minkowski', 3.2)
    -        #print "minkowski-iris-3.2", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_minkowski_3_2_iris_float32(self):
    -        "Tests pdist(X, 'minkowski') on iris data. (float32)"
    -        eps = 1e-06
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['iris'])
    -        Y_right = eo['pdist-minkowski-3.2-iris']
    -        Y_test1 = pdist(X, 'minkowski', 3.2)
    -        #print "minkowski-iris-3.2", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_minkowski_3_2_iris_nonC(self):
    -        "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-minkowski-3.2-iris']
    -        Y_test2 = pdist(X, 'test_minkowski', 3.2)
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_minkowski_5_8_iris(self):
    -        "Tests pdist(X, 'minkowski') on iris data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-minkowski-5.8-iris']
    -        Y_test1 = pdist(X, 'minkowski', 5.8)
    -        #print "minkowski-iris-5.8", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_minkowski_5_8_iris_float32(self):
    -        "Tests pdist(X, 'minkowski') on iris data. (float32)"
    -        eps = 1e-06
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['iris'])
    -        Y_right = eo['pdist-minkowski-5.8-iris']
    -
    -        Y_test1 = pdist(X, 'minkowski', 5.8)
    -        if verbose > 2:
    -            print "minkowski-iris-5.8", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_minkowski_5_8_iris_nonC(self):
    -        "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-minkowski-5.8-iris']
    -        Y_test2 = pdist(X, 'test_minkowski', 5.8)
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################# wminkowski
    -
    -    def test_pdist_wminkowski(self):
    -        x = np.array([[0.0, 0.0, 0.0],
    -                      [1.0, 0.0, 0.0],
    -                      [0.0, 1.0, 0.0],
    -                      [1.0, 1.0, 1.0]])
    -
    -        p2_expected = [1.0, 1.0, np.sqrt(3),
    -                       np.sqrt(2), np.sqrt(2),
    -                       np.sqrt(2)]
    -        p1_expected = [0.5, 1.0, 3.5,
    -                       1.5, 3.0,
    -                       2.5]
    -        dist = pdist(x, metric=wminkowski, w=[1.0, 1.0, 1.0])
    -        assert_array_equal(dist, p2_expected)
    -
    -        dist = pdist(x, metric=wminkowski, w=[0.5, 1.0, 2.0], p=1)
    -        assert_array_equal(dist, p1_expected)
    -
    -        dist = pdist(x, metric='wminkowski', w=[1.0, 1.0, 1.0])
    -        assert_array_equal(dist, p2_expected)
    -
    -        dist = pdist(x, metric='wminkowski', w=[0.5, 1.0, 2.0], p=1)
    -        assert_array_equal(dist, p1_expected)
    -
    -    ################### pdist: hamming
    -    def test_pdist_hamming_random(self):
    -        "Tests pdist(X, 'hamming') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-boolean-inp']
    -        Y_right = eo['pdist-hamming']
    -
    -        Y_test1 = pdist(X, 'hamming')
    -        #print "hamming", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_hamming_random_float32(self):
    -        "Tests pdist(X, 'hamming') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-boolean-inp'])
    -        Y_right = eo['pdist-hamming']
    -
    -        Y_test1 = pdist(X, 'hamming')
    -        #print "hamming", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_hamming_random_nonC(self):
    -        "Tests pdist(X, 'test_hamming') [the non-C implementation] on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-boolean-inp']
    -        Y_right = eo['pdist-hamming']
    -        Y_test2 = pdist(X, 'test_hamming')
    -        #print "test-hamming", np.abs(Y_test2 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################### pdist: hamming (double)
    -    def test_pdist_dhamming_random(self):
    -        "Tests pdist(X, 'hamming') on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = np.float64(eo['pdist-boolean-inp'])
    -        Y_right = eo['pdist-hamming']
    -        Y_test1 = pdist(X, 'hamming')
    -        #print "hamming", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_dhamming_random_float32(self):
    -        "Tests pdist(X, 'hamming') on random data. (float32)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-boolean-inp'])
    -        Y_right = eo['pdist-hamming']
    -        Y_test1 = pdist(X, 'hamming')
    -        #print "hamming", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_dhamming_random_nonC(self):
    -        "Tests pdist(X, 'test_hamming') [the non-C implementation] on random data."
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = np.float64(eo['pdist-boolean-inp'])
    -        Y_right = eo['pdist-hamming']
    -        Y_test2 = pdist(X, 'test_hamming')
    -        #print "test-hamming", np.abs(Y_test2 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################### pdist: jaccard
    -    def test_pdist_jaccard_random(self):
    -        "Tests pdist(X, 'jaccard') on random data."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-boolean-inp']
    -        Y_right = eo['pdist-jaccard']
    -
    -        Y_test1 = pdist(X, 'jaccard')
    -        #print "jaccard", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_jaccard_random_float32(self):
    -        "Tests pdist(X, 'jaccard') on random data. (float32)"
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-boolean-inp'])
    -        Y_right = eo['pdist-jaccard']
    -
    -        Y_test1 = pdist(X, 'jaccard')
    -        #print "jaccard", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_jaccard_random_nonC(self):
    -        "Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-boolean-inp']
    -        Y_right = eo['pdist-jaccard']
    -        Y_test2 = pdist(X, 'test_jaccard')
    -        #print "test-jaccard", np.abs(Y_test2 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################### pdist: jaccard (double)
    -    def test_pdist_djaccard_random(self):
    -        "Tests pdist(X, 'jaccard') on random data."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = np.float64(eo['pdist-boolean-inp'])
    -        Y_right = eo['pdist-jaccard']
    -
    -        Y_test1 = pdist(X, 'jaccard')
    -        #print "jaccard", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_djaccard_random_float32(self):
    -        "Tests pdist(X, 'jaccard') on random data. (float32)"
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-boolean-inp'])
    -        Y_right = eo['pdist-jaccard']
    -
    -        Y_test1 = pdist(X, 'jaccard')
    -        #print "jaccard", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_djaccard_random_nonC(self):
    -        "Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = np.float64(eo['pdist-boolean-inp'])
    -        Y_right = eo['pdist-jaccard']
    -        Y_test2 = pdist(X, 'test_jaccard')
    -        #print "test-jaccard", np.abs(Y_test2 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    ################### pdist: chebychev
    -    def test_pdist_chebychev_random(self):
    -        "Tests pdist(X, 'chebychev') on random data."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-chebychev']
    -
    -        Y_test1 = pdist(X, 'chebychev')
    -        #print "chebychev", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_chebychev_random_float32(self):
    -        "Tests pdist(X, 'chebychev') on random data. (float32)"
    -        eps = 1e-07
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['pdist-double-inp'])
    -        Y_right = eo['pdist-chebychev']
    -
    -        Y_test1 = pdist(X, 'chebychev')
    -        if verbose > 2:
    -            print "chebychev", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_chebychev_random_nonC(self):
    -        "Tests pdist(X, 'test_chebychev') [the non-C implementation] on random data."
    -        eps = 1e-08
    -        # Get the data: the input matrix and the right output.
    -        X = eo['pdist-double-inp']
    -        Y_right = eo['pdist-chebychev']
    -        Y_test2 = pdist(X, 'test_chebychev')
    -        #print "test-chebychev", np.abs(Y_test2 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_chebychev_iris(self):
    -        "Tests pdist(X, 'chebychev') on the Iris data set."
    -        eps = 1e-15
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-chebychev-iris']
    -        Y_test1 = pdist(X, 'chebychev')
    -        #print "chebychev-iris", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_chebychev_iris_float32(self):
    -        "Tests pdist(X, 'chebychev') on the Iris data set. (float32)"
    -        eps = 1e-06
    -        # Get the data: the input matrix and the right output.
    -        X = np.float32(eo['iris'])
    -        Y_right = eo['pdist-chebychev-iris']
    -        Y_test1 = pdist(X, 'chebychev')
    -        if verbose > 2:
    -            print "chebychev-iris", np.abs(Y_test1 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test1, Y_right, eps))
    -
    -    def test_pdist_chebychev_iris_nonC(self):
    -        "Tests pdist(X, 'test_chebychev') [the non-C implementation] on the Iris data set."
    -        eps = 1e-15
    -        # Get the data: the input matrix and the right output.
    -        X = eo['iris']
    -        Y_right = eo['pdist-chebychev-iris']
    -        Y_test2 = pdist(X, 'test_chebychev')
    -        #print "test-chebychev-iris", np.abs(Y_test2 - Y_right).max()
    -        self.assertTrue(within_tol(Y_test2, Y_right, eps))
    -
    -    def test_pdist_matching_mtica1(self):
    -        "Tests matching(*,*) with mtica example #1 (nums)."
    -        m = matching(np.array([1, 0, 1, 1, 0]),
    -                     np.array([1, 1, 0, 1, 1]))
    -        m2 = matching(np.array([1, 0, 1, 1, 0], dtype=np.bool),
    -                      np.array([1, 1, 0, 1, 1], dtype=np.bool))
    -        self.assertTrue(np.abs(m - 0.6) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - 0.6) <= 1e-10)
    -
    -    def test_pdist_matching_mtica2(self):
    -        "Tests matching(*,*) with mtica example #2."
    -        m = matching(np.array([1, 0, 1]),
    -                     np.array([1, 1, 0]))
    -        m2 = matching(np.array([1, 0, 1], dtype=np.bool),
    -                      np.array([1, 1, 0], dtype=np.bool))
    -        self.assertTrue(np.abs(m - (2.0/3.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (2.0/3.0)) <= 1e-10)
    -
    -    def test_pdist_matching_match(self):
    -        "Tests pdist(X, 'matching') to see if the two implementations match on random boolean input data."
    -        D = eo['random-bool-data']
    -        B = np.bool_(D)
    -        if verbose > 2:
    -            print B.shape, B.dtype
    -        eps = 1e-10
    -        y1 = pdist(B, "matching")
    -        y2 = pdist(B, "test_matching")
    -        y3 = pdist(D, "test_matching")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -            print np.abs(y1-y3).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -        self.assertTrue(within_tol(y2, y3, eps))
    -
    -    def test_pdist_jaccard_mtica1(self):
    -        "Tests jaccard(*,*) with mtica example #1."
    -        m = jaccard(np.array([1, 0, 1, 1, 0]),
    -                    np.array([1, 1, 0, 1, 1]))
    -        m2 = jaccard(np.array([1, 0, 1, 1, 0], dtype=np.bool),
    -                     np.array([1, 1, 0, 1, 1], dtype=np.bool))
    -        self.assertTrue(np.abs(m - 0.6) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - 0.6) <= 1e-10)
    -
    -    def test_pdist_jaccard_mtica2(self):
    -        "Tests jaccard(*,*) with mtica example #2."
    -        m = jaccard(np.array([1, 0, 1]),
    -                    np.array([1, 1, 0]))
    -        m2 = jaccard(np.array([1, 0, 1], dtype=np.bool),
    -                     np.array([1, 1, 0], dtype=np.bool))
    -        self.assertTrue(np.abs(m - (2.0/3.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (2.0/3.0)) <= 1e-10)
    -
    -    def test_pdist_jaccard_match(self):
    -        "Tests pdist(X, 'jaccard') to see if the two implementations match on random double input data."
    -        D = eo['random-bool-data']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "jaccard")
    -        y2 = pdist(D, "test_jaccard")
    -        y3 = pdist(np.bool_(D), "test_jaccard")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -            print np.abs(y2-y3).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -        self.assertTrue(within_tol(y2, y3, eps))
    -
    -    def test_pdist_yule_mtica1(self):
    -        "Tests yule(*,*) with mtica example #1."
    -        m = yule(np.array([1, 0, 1, 1, 0]),
    -                 np.array([1, 1, 0, 1, 1]))
    -        m2 = yule(np.array([1, 0, 1, 1, 0], dtype=np.bool),
    -                  np.array([1, 1, 0, 1, 1], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - 2.0) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - 2.0) <= 1e-10)
    -
    -    def test_pdist_yule_mtica2(self):
    -        "Tests yule(*,*) with mtica example #2."
    -        m = yule(np.array([1, 0, 1]),
    -                 np.array([1, 1, 0]))
    -        m2 = yule(np.array([1, 0, 1], dtype=np.bool),
    -                  np.array([1, 1, 0], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - 2.0) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - 2.0) <= 1e-10)
    -
    -    def test_pdist_yule_match(self):
    -        "Tests pdist(X, 'yule') to see if the two implementations match on random double input data."
    -        D = eo['random-bool-data']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "yule")
    -        y2 = pdist(D, "test_yule")
    -        y3 = pdist(np.bool_(D), "test_yule")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -            print np.abs(y2-y3).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -        self.assertTrue(within_tol(y2, y3, eps))
    -
    -    def test_pdist_dice_mtica1(self):
    -        "Tests dice(*,*) with mtica example #1."
    -        m = dice(np.array([1, 0, 1, 1, 0]),
    -                 np.array([1, 1, 0, 1, 1]))
    -        m2 = dice(np.array([1, 0, 1, 1, 0], dtype=np.bool),
    -                  np.array([1, 1, 0, 1, 1], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - (3.0/7.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (3.0/7.0)) <= 1e-10)
    -
    -    def test_pdist_dice_mtica2(self):
    -        "Tests dice(*,*) with mtica example #2."
    -        m = dice(np.array([1, 0, 1]),
    -                 np.array([1, 1, 0]))
    -        m2 = dice(np.array([1, 0, 1], dtype=np.bool),
    -                  np.array([1, 1, 0], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - 0.5) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - 0.5) <= 1e-10)
    -
    -    def test_pdist_dice_match(self):
    -        "Tests pdist(X, 'dice') to see if the two implementations match on random double input data."
    -        D = eo['random-bool-data']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "dice")
    -        y2 = pdist(D, "test_dice")
    -        y3 = pdist(D, "test_dice")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -            print np.abs(y2-y3).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -        self.assertTrue(within_tol(y2, y3, eps))
    -
    -    def test_pdist_sokalsneath_mtica1(self):
    -        "Tests sokalsneath(*,*) with mtica example #1."
    -        m = sokalsneath(np.array([1, 0, 1, 1, 0]),
    -                        np.array([1, 1, 0, 1, 1]))
    -        m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=np.bool),
    -                         np.array([1, 1, 0, 1, 1], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - (3.0/4.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (3.0/4.0)) <= 1e-10)
    -
    -    def test_pdist_sokalsneath_mtica2(self):
    -        "Tests sokalsneath(*,*) with mtica example #2."
    -        m = sokalsneath(np.array([1, 0, 1]),
    -                        np.array([1, 1, 0]))
    -        m2 = sokalsneath(np.array([1, 0, 1], dtype=np.bool),
    -                         np.array([1, 1, 0], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - (4.0/5.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (4.0/5.0)) <= 1e-10)
    -
    -    def test_pdist_sokalsneath_match(self):
    -        "Tests pdist(X, 'sokalsneath') to see if the two implementations match on random double input data."
    -        D = eo['random-bool-data']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "sokalsneath")
    -        y2 = pdist(D, "test_sokalsneath")
    -        y3 = pdist(np.bool_(D), "test_sokalsneath")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -            print np.abs(y2-y3).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -        self.assertTrue(within_tol(y2, y3, eps))
    -
    -    def test_pdist_rogerstanimoto_mtica1(self):
    -        "Tests rogerstanimoto(*,*) with mtica example #1."
    -        m = rogerstanimoto(np.array([1, 0, 1, 1, 0]),
    -                           np.array([1, 1, 0, 1, 1]))
    -        m2 = rogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=np.bool),
    -                            np.array([1, 1, 0, 1, 1], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - (3.0/4.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (3.0/4.0)) <= 1e-10)
    -
    -    def test_pdist_rogerstanimoto_mtica2(self):
    -        "Tests rogerstanimoto(*,*) with mtica example #2."
    -        m = rogerstanimoto(np.array([1, 0, 1]),
    -                           np.array([1, 1, 0]))
    -        m2 = rogerstanimoto(np.array([1, 0, 1], dtype=np.bool),
    -                            np.array([1, 1, 0], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - (4.0/5.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (4.0/5.0)) <= 1e-10)
    -
    -    def test_pdist_rogerstanimoto_match(self):
    -        "Tests pdist(X, 'rogerstanimoto') to see if the two implementations match on random double input data."
    -        D = eo['random-bool-data']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "rogerstanimoto")
    -        y2 = pdist(D, "test_rogerstanimoto")
    -        y3 = pdist(np.bool_(D), "test_rogerstanimoto")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -            print np.abs(y2-y3).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -        self.assertTrue(within_tol(y2, y3, eps))
    -
    -    def test_pdist_russellrao_mtica1(self):
    -        "Tests russellrao(*,*) with mtica example #1."
    -        m = russellrao(np.array([1, 0, 1, 1, 0]),
    -                       np.array([1, 1, 0, 1, 1]))
    -        m2 = russellrao(np.array([1, 0, 1, 1, 0], dtype=np.bool),
    -                        np.array([1, 1, 0, 1, 1], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - (3.0/5.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (3.0/5.0)) <= 1e-10)
    -
    -    def test_pdist_russellrao_mtica2(self):
    -        "Tests russellrao(*,*) with mtica example #2."
    -        m = russellrao(np.array([1, 0, 1]),
    -                       np.array([1, 1, 0]))
    -        m2 = russellrao(np.array([1, 0, 1], dtype=np.bool),
    -                        np.array([1, 1, 0], dtype=np.bool))
    -        if verbose > 2:
    -            print m
    -        self.assertTrue(np.abs(m - (2.0/3.0)) <= 1e-10)
    -        self.assertTrue(np.abs(m2 - (2.0/3.0)) <= 1e-10)
    -
    -    def test_pdist_russellrao_match(self):
    -        "Tests pdist(X, 'russellrao') to see if the two implementations match on random double input data."
    -        D = eo['random-bool-data']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "russellrao")
    -        y2 = pdist(D, "test_russellrao")
    -        y3 = pdist(np.bool_(D), "test_russellrao")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -            print np.abs(y2-y3).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -        self.assertTrue(within_tol(y2, y3, eps))
    -
    -    def test_pdist_sokalmichener_match(self):
    -        "Tests pdist(X, 'sokalmichener') to see if the two implementations match on random double input data."
    -        D = eo['random-bool-data']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "sokalmichener")
    -        y2 = pdist(D, "test_sokalmichener")
    -        y3 = pdist(np.bool_(D), "test_sokalmichener")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -            print np.abs(y2-y3).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -        self.assertTrue(within_tol(y2, y3, eps))
    -
    -    def test_pdist_kulsinski_match(self):
    -        "Tests pdist(X, 'kulsinski') to see if the two implementations match on random double input data."
    -        D = eo['random-bool-data']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "kulsinski")
    -        y2 = pdist(D, "test_kulsinski")
    -        y3 = pdist(np.bool_(D), "test_kulsinski")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -
    -    def test_pdist_canberra_match(self):
    -        "Tests pdist(X, 'canberra') to see if the two implementations match on the Iris data set."
    -        D = eo['iris']
    -        if verbose > 2:
    -            print D.shape, D.dtype
    -        eps = 1e-10
    -        y1 = pdist(D, "canberra")
    -        y2 = pdist(D, "test_canberra")
    -        if verbose > 2:
    -            print np.abs(y1-y2).max()
    -        self.assertTrue(within_tol(y1, y2, eps))
    -
    -    def test_pdist_canberra_ticket_711(self):
    -        "Tests pdist(X, 'canberra') to see if Canberra gives the right result as reported in Scipy bug report 711."
    -        eps = 1e-8
    -        pdist_y = pdist(([3.3], [3.4]), "canberra")
    -        right_y = 0.01492537
    -        if verbose > 2:
    -            print np.abs(pdist_y-right_y).max()
    -        self.assertTrue(within_tol(pdist_y, right_y, eps))
    -
    -
    -def within_tol(a, b, tol):
    -    return np.abs(a - b).max() < tol
    -
    -
    -class TestSomeDistanceFunctions(TestCase):
    -
    -    def setUp(self):
    -        # 1D arrays
    -        x = np.array([1.0, 2.0, 3.0])
    -        y = np.array([1.0, 1.0, 5.0])
    -        # 3x1 arrays
    -        x31 = x[:,np.newaxis]
    -        y31 = y[:,np.newaxis]
    -        # 1x3 arrays
    -        x13 = x31.T
    -        y13 = y31.T
    -
    -        self.cases = [(x,y), (x31, y31), (x13, y13)]
    -
    -    def test_minkowski(self):
    -        for x, y in self.cases:
    -            dist1 = minkowski(x, y, p=1)
    -            assert_almost_equal(dist1, 3.0)
    -            dist1p5 = minkowski(x, y, p=1.5)
    -            assert_almost_equal(dist1p5, (1.0+2.0**1.5)**(2./3))
    -            dist2 = minkowski(x, y, p=2)
    -            assert_almost_equal(dist2, np.sqrt(5))
    -
    -    def test_wminkowski(self):
    -        w = np.array([1.0, 2.0, 0.5])
    -        for x, y in self.cases:
    -            dist1 = wminkowski(x, y, p=1, w=w)
    -            assert_almost_equal(dist1, 3.0)
    -            dist1p5 = wminkowski(x, y, p=1.5, w=w)
    -            assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))
    -            dist2 = wminkowski(x, y, p=2, w=w)
    -            assert_almost_equal(dist2, np.sqrt(5))
    -
    -    def test_euclidean(self):
    -        for x, y in self.cases:
    -            dist = euclidean(x, y)
    -            assert_almost_equal(dist, np.sqrt(5))
    -
    -    def test_sqeuclidean(self):
    -        for x, y in self.cases:
    -            dist = sqeuclidean(x, y)
    -            assert_almost_equal(dist, 5.0)
    -
    -    def test_cosine(self):
    -        for x, y in self.cases:
    -            dist = cosine(x, y)
    -            assert_almost_equal(dist, 1.0 - 18.0/(np.sqrt(14)*np.sqrt(27)))
    -
    -    def test_correlation(self):
    -        xm = np.array([-1.0, 0, 1.0])
    -        ym = np.array([-4.0/3, -4.0/3, 5.0-7.0/3])
    -        for x, y in self.cases:
    -            dist = correlation(x, y)
    -            assert_almost_equal(dist, 1.0 - np.dot(xm, ym)/(norm(xm)*norm(ym)))
    -
    -    def test_mahalanobis(self):
    -        x = np.array([1.0, 2.0, 3.0])
    -        y = np.array([1.0, 1.0, 5.0])
    -        vi = np.array([[2.0, 1.0, 0.0],[1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
    -        for x, y in self.cases:
    -            dist = mahalanobis(x, y, vi)
    -            assert_almost_equal(dist, np.sqrt(6.0))
    -
    -
    -class TestSquareForm(TestCase):
    -
    -    ################### squareform
    -    def test_squareform_empty_matrix(self):
    -        "Tests squareform on an empty matrix."
    -        A = np.zeros((0,0))
    -        rA = squareform(np.array(A, dtype='double'))
    -        self.assertTrue(rA.shape == (0,))
    -
    -    def test_squareform_empty_vector(self):
    -        "Tests squareform on an empty vector."
    -        v = np.zeros((0,))
    -        rv = squareform(np.array(v, dtype='double'))
    -        self.assertTrue(rv.shape == (1,1))
    -        self.assertTrue(rv[0, 0] == 0)
    -
    -    def test_squareform_1by1_matrix(self):
    -        "Tests squareform on a 1x1 matrix."
    -        A = np.zeros((1,1))
    -        rA = squareform(np.array(A, dtype='double'))
    -        self.assertTrue(rA.shape == (0,))
    -
    -    def test_squareform_one_vector(self):
    -        "Tests squareform on a 1-D array, length=1."
    -        v = np.ones((1,)) * 8.3
    -        rv = squareform(np.array(v, dtype='double'))
    -        self.assertTrue(rv.shape == (2,2))
    -        self.assertTrue(rv[0,1] == 8.3)
    -        self.assertTrue(rv[1,0] == 8.3)
    -
    -    def test_squareform_2by2_matrix(self):
    -        "Tests squareform on a 2x2 matrix."
    -        A = np.zeros((2,2))
    -        A[0,1]=0.8
    -        A[1,0]=0.8
    -        rA = squareform(np.array(A, dtype='double'))
    -        self.assertTrue(rA.shape == (1,))
    -        self.assertTrue(rA[0] == 0.8)
    -
    -    def test_squareform_multi_matrix(self):
    -        "Tests squareform on a square matrices of multiple sizes."
    -        for n in xrange(2, 5):
    -            yield self.check_squareform_multi_matrix(n)
    -
    -    def check_squareform_multi_matrix(self, n):
    -        X = np.random.rand(n, 4)
    -        Y = pdist(X)
    -        self.assertTrue(len(Y.shape) == 1)
    -        A = squareform(Y)
    -        Yr = squareform(A)
    -        s = A.shape
    -        k = 0
    -        if verbose >= 3:
    -            print A.shape, Y.shape, Yr.shape
    -        self.assertTrue(len(s) == 2)
    -        self.assertTrue(len(Yr.shape) == 1)
    -        self.assertTrue(s[0] == s[1])
    -        for i in xrange(0, s[0]):
    -            for j in xrange(i+1, s[1]):
    -                if i != j:
    -                    #print i, j, k, A[i, j], Y[k]
    -                    self.assertTrue(A[i, j] == Y[k])
    -                    k += 1
    -                else:
    -                    self.assertTrue(A[i, j] == 0)
    -
    -
    -class TestNumObsY(TestCase):
    -
    -    def test_num_obs_y_multi_matrix(self):
    -        "Tests num_obs_y with observation matrices of multiple sizes."
    -        for n in xrange(2, 10):
    -            X = np.random.rand(n, 4)
    -            Y = pdist(X)
    -            #print A.shape, Y.shape, Yr.shape
    -            self.assertTrue(num_obs_y(Y) == n)
    -
    -    def test_num_obs_y_1(self):
    -        "Tests num_obs_y(y) on a condensed distance matrix over 1 observations. Expecting exception."
    -        self.assertRaises(ValueError, self.check_y, 1)
    -
    -    def test_num_obs_y_2(self):
    -        "Tests num_obs_y(y) on a condensed distance matrix over 2 observations."
    -        self.assertTrue(self.check_y(2))
    -
    -    def test_num_obs_y_3(self):
    -        "Tests num_obs_y(y) on a condensed distance matrix over 3 observations."
    -        self.assertTrue(self.check_y(3))
    -
    -    def test_num_obs_y_4(self):
    -        "Tests num_obs_y(y) on a condensed distance matrix over 4 observations."
    -        self.assertTrue(self.check_y(4))
    -
    -    def test_num_obs_y_5_10(self):
    -        "Tests num_obs_y(y) on a condensed distance matrix between 5 and 15 observations."
    -        for i in xrange(5, 16):
    -            self.minit(i)
    -
    -    def test_num_obs_y_2_100(self):
    -        "Tests num_obs_y(y) on 100 improper condensed distance matrices. Expecting exception."
    -        a = set([])
    -        for n in xrange(2, 16):
    -            a.add(n*(n-1)/2)
    -        for i in xrange(5, 105):
    -            if i not in a:
    -                self.assertRaises(ValueError, self.bad_y, i)
    -
    -    def minit(self, n):
    -        self.assertTrue(self.check_y(n))
    -
    -    def bad_y(self, n):
    -        y = np.random.rand(n)
    -        return num_obs_y(y)
    -
    -    def check_y(self, n):
    -        return num_obs_y(self.make_y(n)) == n
    -
    -    def make_y(self, n):
    -        return np.random.rand((n*(n-1)/2))
    -
    -
    -class TestNumObsDM(TestCase):
    -
    -    ############## num_obs_dm
    -    def test_num_obs_dm_multi_matrix(self):
    -        "Tests num_obs_dm with observation matrices of multiple sizes."
    -        for n in xrange(1, 10):
    -            X = np.random.rand(n, 4)
    -            Y = pdist(X)
    -            A = squareform(Y)
    -            if verbose >= 3:
    -                print A.shape, Y.shape
    -            self.assertTrue(num_obs_dm(A) == n)
    -
    -    def test_num_obs_dm_0(self):
    -        "Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception."
    -        self.assertTrue(self.check_D(0))
    -
    -    def test_num_obs_dm_1(self):
    -        "Tests num_obs_dm(D) on a 1x1 distance matrix."
    -        self.assertTrue(self.check_D(1))
    -
    -    def test_num_obs_dm_2(self):
    -        "Tests num_obs_dm(D) on a 2x2 distance matrix."
    -        self.assertTrue(self.check_D(2))
    -
    -    def test_num_obs_dm_3(self):
    -        "Tests num_obs_dm(D) on a 3x3 distance matrix."
    -        self.assertTrue(self.check_D(2))
    -
    -    def test_num_obs_dm_4(self):
    -        "Tests num_obs_dm(D) on a 4x4 distance matrix."
    -        self.assertTrue(self.check_D(4))
    -
    -    def check_D(self, n):
    -        return num_obs_dm(self.make_D(n)) == n
    -
    -    def make_D(self, n):
    -        return np.random.rand(n, n)
    -
    -
    -def is_valid_dm_throw(D):
    -    return is_valid_dm(D, throw=True)
    -
    -
    -class TestIsValidDM(TestCase):
    -
    -    def test_is_valid_dm_int16_array_E(self):
    -        "Tests is_valid_dm(*) on an int16 array. Exception expected."
    -        D = np.zeros((5, 5), dtype='i')
    -        self.assertRaises(TypeError, is_valid_dm_throw, (D))
    -
    -    def test_is_valid_dm_int16_array_F(self):
    -        "Tests is_valid_dm(*) on an int16 array. False expected."
    -        D = np.zeros((5, 5), dtype='i')
    -        self.assertTrue(is_valid_dm(D) == False)
    -
    -    def test_is_valid_dm_improper_shape_1D_E(self):
    -        "Tests is_valid_dm(*) on a 1D array. Exception expected."
    -        D = np.zeros((5,), dtype=np.double)
    -        self.assertRaises(ValueError, is_valid_dm_throw, (D))
    -
    -    def test_is_valid_dm_improper_shape_1D_F(self):
    -        "Tests is_valid_dm(*) on a 1D array. False expected."
    -        D = np.zeros((5,), dtype=np.double)
    -        self.assertTrue(is_valid_dm(D) == False)
    -
    -    def test_is_valid_dm_improper_shape_3D_E(self):
    -        "Tests is_valid_dm(*) on a 3D array. Exception expected."
    -        D = np.zeros((3,3,3), dtype=np.double)
    -        self.assertRaises(ValueError, is_valid_dm_throw, (D))
    -
    -    def test_is_valid_dm_improper_shape_3D_F(self):
    -        "Tests is_valid_dm(*) on a 3D array. False expected."
    -        D = np.zeros((3,3,3), dtype=np.double)
    -        self.assertTrue(is_valid_dm(D) == False)
    -
    -    def test_is_valid_dm_nonzero_diagonal_E(self):
    -        "Tests is_valid_dm(*) on a distance matrix with a nonzero diagonal. Exception expected."
    -        y = np.random.rand(10)
    -        D = squareform(y)
    -        for i in xrange(0, 5):
    -            D[i, i] = 2.0
    -        self.assertRaises(ValueError, is_valid_dm_throw, (D))
    -
    -    def test_is_valid_dm_nonzero_diagonal_F(self):
    -        "Tests is_valid_dm(*) on a distance matrix with a nonzero diagonal. False expected."
    -        y = np.random.rand(10)
    -        D = squareform(y)
    -        for i in xrange(0, 5):
    -            D[i, i] = 2.0
    -        self.assertTrue(is_valid_dm(D) == False)
    -
    -    def test_is_valid_dm_assymetric_E(self):
    -        "Tests is_valid_dm(*) on an assymetric distance matrix. Exception expected."
    -        y = np.random.rand(10)
    -        D = squareform(y)
    -        D[1,3] = D[3,1] + 1
    -        self.assertRaises(ValueError, is_valid_dm_throw, (D))
    -
    -    def test_is_valid_dm_assymetric_F(self):
    -        "Tests is_valid_dm(*) on an assymetric distance matrix. False expected."
    -        y = np.random.rand(10)
    -        D = squareform(y)
    -        D[1,3] = D[3,1] + 1
    -        self.assertTrue(is_valid_dm(D) == False)
    -
    -    def test_is_valid_dm_correct_1_by_1(self):
    -        "Tests is_valid_dm(*) on a correct 1x1. True expected."
    -        D = np.zeros((1,1), dtype=np.double)
    -        self.assertTrue(is_valid_dm(D) == True)
    -
    -    def test_is_valid_dm_correct_2_by_2(self):
    -        "Tests is_valid_dm(*) on a correct 2x2. True expected."
    -        y = np.random.rand(1)
    -        D = squareform(y)
    -        self.assertTrue(is_valid_dm(D) == True)
    -
    -    def test_is_valid_dm_correct_3_by_3(self):
    -        "Tests is_valid_dm(*) on a correct 3x3. True expected."
    -        y = np.random.rand(3)
    -        D = squareform(y)
    -        self.assertTrue(is_valid_dm(D) == True)
    -
    -    def test_is_valid_dm_correct_4_by_4(self):
    -        "Tests is_valid_dm(*) on a correct 4x4. True expected."
    -        y = np.random.rand(6)
    -        D = squareform(y)
    -        self.assertTrue(is_valid_dm(D) == True)
    -
    -    def test_is_valid_dm_correct_5_by_5(self):
    -        "Tests is_valid_dm(*) on a correct 5x5. True expected."
    -        y = np.random.rand(10)
    -        D = squareform(y)
    -        self.assertTrue(is_valid_dm(D) == True)
    -
    -
    -def is_valid_y_throw(y):
    -    return is_valid_y(y, throw=True)
    -
    -
    -class TestIsValidY(TestCase):
    -
    -    def test_is_valid_y_int16_array_E(self):
    -        "Tests is_valid_y(*) on an int16 array. Exception expected."
    -        y = np.zeros((10,), dtype='i')
    -        self.assertRaises(TypeError, is_valid_y_throw, (y))
    -
    -    def test_is_valid_y_int16_array_F(self):
    -        "Tests is_valid_y(*) on an int16 array. False expected."
    -        y = np.zeros((10,), dtype='i')
    -        self.assertTrue(is_valid_y(y) == False)
    -
    -    def test_is_valid_y_improper_shape_2D_E(self):
    -        "Tests is_valid_y(*) on a 2D array. Exception expected."
    -        y = np.zeros((3,3,), dtype=np.double)
    -        self.assertRaises(ValueError, is_valid_y_throw, (y))
    -
    -    def test_is_valid_y_improper_shape_2D_F(self):
    -        "Tests is_valid_y(*) on a 2D array. False expected."
    -        y = np.zeros((3,3,), dtype=np.double)
    -        self.assertTrue(is_valid_y(y) == False)
    -
    -    def test_is_valid_y_improper_shape_3D_E(self):
    -        "Tests is_valid_y(*) on a 3D array. Exception expected."
    -        y = np.zeros((3,3,3), dtype=np.double)
    -        self.assertRaises(ValueError, is_valid_y_throw, (y))
    -
    -    def test_is_valid_y_improper_shape_3D_F(self):
    -        "Tests is_valid_y(*) on a 3D array. False expected."
    -        y = np.zeros((3,3,3), dtype=np.double)
    -        self.assertTrue(is_valid_y(y) == False)
    -
    -    def test_is_valid_y_correct_2_by_2(self):
    -        "Tests is_valid_y(*) on a correct 2x2 condensed. True expected."
    -        y = self.correct_n_by_n(2)
    -        self.assertTrue(is_valid_y(y) == True)
    -
    -    def test_is_valid_y_correct_3_by_3(self):
    -        "Tests is_valid_y(*) on a correct 3x3 condensed. True expected."
    -        y = self.correct_n_by_n(3)
    -        self.assertTrue(is_valid_y(y) == True)
    -
    -    def test_is_valid_y_correct_4_by_4(self):
    -        "Tests is_valid_y(*) on a correct 4x4 condensed. True expected."
    -        y = self.correct_n_by_n(4)
    -        self.assertTrue(is_valid_y(y) == True)
    -
    -    def test_is_valid_y_correct_5_by_5(self):
    -        "Tests is_valid_y(*) on a correct 5x5 condensed. True expected."
    -        y = self.correct_n_by_n(5)
    -        self.assertTrue(is_valid_y(y) == True)
    -
    -    def test_is_valid_y_2_100(self):
    -        "Tests is_valid_y(*) on 100 improper condensed distance matrices. Expecting exception."
    -        a = set([])
    -        for n in xrange(2, 16):
    -            a.add(n*(n-1)/2)
    -        for i in xrange(5, 105):
    -            if i not in a:
    -                self.assertRaises(ValueError, self.bad_y, i)
    -
    -    def bad_y(self, n):
    -        y = np.random.rand(n)
    -        return is_valid_y(y, throw=True)
    -
    -    def correct_n_by_n(self, n):
    -        y = np.random.rand(n*(n-1)/2)
    -        return y
    -
    -
    -def test_bad_p():
    -    """Raise ValueError if p < 1."""
    -    p = 0.5
    -    assert_raises(ValueError, minkowski, [1, 2], [3, 4], p)
    -    assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])
    -
    -
    -def test_sokalsneath_all_false():
    -    """Regression test for ticket #876"""
    -    assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
    -
    -
    -def test_canberra():
    -    """Regression test for ticket #1430."""
    -    assert_equal(canberra([1,2,3], [2,4,6]), 1)
    -    assert_equal(canberra([1,1,0,0], [1,0,1,0]), 2)
    -
    -
    -def test_braycurtis():
    -    """Regression test for ticket #1430."""
    -    assert_almost_equal(braycurtis([1,2,3], [2,4,6]), 1./3, decimal=15)
    -    assert_almost_equal(braycurtis([1,1,0,0], [1,0,1,0]), 0.5, decimal=15)
    -
    -
    -def test_euclideans():
    -    """Regression test for ticket #1328."""
    -    x1 = np.array([1, 1, 1])
    -    x2 = np.array([0, 0, 0])
    -
    -    # Basic test of the calculation.
    -    assert_almost_equal(sqeuclidean(x1, x2), 3.0, decimal=14)
    -    assert_almost_equal(euclidean(x1, x2), np.sqrt(3), decimal=14)
    -
    -    # Check flattening for (1, N) or (N, 1) inputs
    -    assert_almost_equal(euclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
    -                        np.sqrt(3), decimal=14)
    -    assert_almost_equal(sqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
    -                        3.0, decimal=14)
    -    assert_almost_equal(sqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),
    -                        3.0, decimal=14)
    -
    -    # Distance metrics only defined for vectors (= 1-D)
    -    x = np.arange(4).reshape(2, 2)
    -    assert_raises(ValueError, euclidean, x, x)
    -    assert_raises(ValueError, sqeuclidean, x, x)
    -
    -    # Another check, with random data.
    -    rs = np.random.RandomState(1234567890)
    -    x = rs.rand(10)
    -    y = rs.rand(10)
    -    d1 = euclidean(x, y)
    -    d2 = sqeuclidean(x, y)
    -    assert_almost_equal(d1**2, d2, decimal=14)
    -
    -
    -def test_sokalmichener():
    -    """Test that sokalmichener has the same result for bool and int inputs."""
    -    p = [True, True, False]
    -    q = [True, False, True]
    -    x = [int(b) for b in p]
    -    y = [int(b) for b in q]
    -    dist1 = sokalmichener(p, q)
    -    dist2 = sokalmichener(x, y)
    -    # These should be exactly the same.
    -    assert_equal(dist1, dist2)
    -
    -
    -def test__validate_vector():
    -    """Assorted tests for _validate_vector."""
    -    x = [1, 2, 3]
    -    y = _validate_vector(x)
    -    assert_array_equal(y, x)
    -
    -    y = _validate_vector(x, dtype=np.float64)
    -    assert_array_equal(y, x)
    -    assert_equal(y.dtype, np.float64)
    -
    -    x = [1]
    -    y = _validate_vector(x)
    -    assert_equal(y.ndim, 1)
    -    assert_equal(y, x)
    -
    -    x = 1
    -    y = _validate_vector(x)
    -    assert_equal(y.ndim, 1)
    -    assert_equal(y, [x])
    -
    -    x = np.arange(5).reshape(1, -1, 1)
    -    y = _validate_vector(x)
    -    assert_equal(y.ndim, 1)
    -    assert_array_equal(y, x[0, :, 0])
    -
    -    x = [[1, 2], [3, 4]]
    -    assert_raises(ValueError, _validate_vector, x)
    -
    -
    -if __name__=="__main__":
    -    run_module_suite()
    diff --git a/scipy-0.10.1/scipy/spatial/tests/test_kdtree.py b/scipy-0.10.1/scipy/spatial/tests/test_kdtree.py
    deleted file mode 100644
    index dc988e205d..0000000000
    --- a/scipy-0.10.1/scipy/spatial/tests/test_kdtree.py
    +++ /dev/null
    @@ -1,487 +0,0 @@
    -# Copyright Anne M. Archibald 2008
    -# Released under the scipy license
    -
    -from numpy.testing import assert_equal, assert_array_equal, assert_almost_equal, \
    -        assert_, run_module_suite
    -
    -import numpy as np
    -from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree
    -from scipy.spatial import minkowski_distance as distance
    -
    -class ConsistencyTests:
    -    def test_nearest(self):
    -        x = self.x
    -        d, i = self.kdtree.query(x, 1)
    -        assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
    -        eps = 1e-8
    -        assert_(np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1)>d**2-eps))
    -
    -    def test_m_nearest(self):
    -        x = self.x
    -        m = self.m
    -        dd, ii = self.kdtree.query(x, m)
    -        d = np.amax(dd)
    -        i = ii[np.argmax(dd)]
    -        assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
    -        eps = 1e-8
    -        assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1)=self.d/(1.+self.eps)))
    -
    -class test_random_ball(ball_consistency):
    -
    -    def setUp(self):
    -        n = 100
    -        m = 4
    -        self.data = np.random.randn(n,m)
    -        self.T = KDTree(self.data,leafsize=2)
    -        self.x = np.random.randn(m)
    -        self.p = 2.
    -        self.eps = 0
    -        self.d = 0.2
    -
    -class test_random_ball_approx(test_random_ball):
    -
    -    def setUp(self):
    -        test_random_ball.setUp(self)
    -        self.eps = 0.1
    -
    -class test_random_ball_far(test_random_ball):
    -
    -    def setUp(self):
    -        test_random_ball.setUp(self)
    -        self.d = 2.
    -
    -class test_random_ball_l1(test_random_ball):
    -
    -    def setUp(self):
    -        test_random_ball.setUp(self)
    -        self.p = 1
    -
    -class test_random_ball_linf(test_random_ball):
    -
    -    def setUp(self):
    -        test_random_ball.setUp(self)
    -        self.p = np.inf
    -
    -def test_random_ball_vectorized():
    -
    -    n = 20
    -    m = 5
    -    T = KDTree(np.random.randn(n,m))
    -
    -    r = T.query_ball_point(np.random.randn(2,3,m),1)
    -    assert_equal(r.shape,(2,3))
    -    assert_(isinstance(r[0,0],list))
    -
    -class two_trees_consistency:
    -
    -    def test_all_in_ball(self):
    -        r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
    -        for i, l in enumerate(r):
    -            for j in l:
    -                assert_(distance(self.data1[i],self.data2[j],self.p)<=self.d*(1.+self.eps))
    -    def test_found_all(self):
    -        r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
    -        for i, l in enumerate(r):
    -            c = np.ones(self.T2.n,dtype=np.bool)
    -            c[l] = False
    -            assert_(np.all(distance(self.data2[c],self.data1[i],self.p)>=self.d/(1.+self.eps)))
    -
    -class test_two_random_trees(two_trees_consistency):
    -
    -    def setUp(self):
    -        n = 50
    -        m = 4
    -        self.data1 = np.random.randn(n,m)
    -        self.T1 = KDTree(self.data1,leafsize=2)
    -        self.data2 = np.random.randn(n,m)
    -        self.T2 = KDTree(self.data2,leafsize=2)
    -        self.p = 2.
    -        self.eps = 0
    -        self.d = 0.2
    -
    -class test_two_random_trees_far(test_two_random_trees):
    -
    -    def setUp(self):
    -        test_two_random_trees.setUp(self)
    -        self.d = 2
    -
    -class test_two_random_trees_linf(test_two_random_trees):
    -
    -    def setUp(self):
    -        test_two_random_trees.setUp(self)
    -        self.p = np.inf
    -
    -
    -class test_rectangle:
    -
    -    def setUp(self):
    -        self.rect = Rectangle([0,0],[1,1])
    -
    -    def test_min_inside(self):
    -        assert_almost_equal(self.rect.min_distance_point([0.5,0.5]),0)
    -    def test_min_one_side(self):
    -        assert_almost_equal(self.rect.min_distance_point([0.5,1.5]),0.5)
    -    def test_min_two_sides(self):
    -        assert_almost_equal(self.rect.min_distance_point([2,2]),np.sqrt(2))
    -    def test_max_inside(self):
    -        assert_almost_equal(self.rect.max_distance_point([0.5,0.5]),1/np.sqrt(2))
    -    def test_max_one_side(self):
    -        assert_almost_equal(self.rect.max_distance_point([0.5,1.5]),np.hypot(0.5,1.5))
    -    def test_max_two_sides(self):
    -        assert_almost_equal(self.rect.max_distance_point([2,2]),2*np.sqrt(2))
    -
    -    def test_split(self):
    -        less, greater = self.rect.split(0,0.1)
    -        assert_array_equal(less.maxes,[0.1,1])
    -        assert_array_equal(less.mins,[0,0])
    -        assert_array_equal(greater.maxes,[1,1])
    -        assert_array_equal(greater.mins,[0.1,0])
    -
    -
    -def test_distance_l2():
    -    assert_almost_equal(distance([0,0],[1,1],2),np.sqrt(2))
    -def test_distance_l1():
    -    assert_almost_equal(distance([0,0],[1,1],1),2)
    -def test_distance_linf():
    -    assert_almost_equal(distance([0,0],[1,1],np.inf),1)
    -def test_distance_vectorization():
    -    x = np.random.randn(10,1,3)
    -    y = np.random.randn(1,7,3)
    -    assert_equal(distance(x,y).shape,(10,7))
    -
    -class test_count_neighbors:
    -
    -    def setUp(self):
    -        n = 50
    -        m = 2
    -        self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
    -        self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
    -
    -    def test_one_radius(self):
    -        r = 0.2
    -        assert_equal(self.T1.count_neighbors(self.T2, r),
    -                np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
    -
    -    def test_large_radius(self):
    -        r = 1000
    -        assert_equal(self.T1.count_neighbors(self.T2, r),
    -                np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
    -
    -    def test_multiple_radius(self):
    -        rs = np.exp(np.linspace(np.log(0.01),np.log(10),3))
    -        results = self.T1.count_neighbors(self.T2, rs)
    -        assert_(np.all(np.diff(results)>=0))
    -        for r,result in zip(rs, results):
    -            assert_equal(self.T1.count_neighbors(self.T2, r), result)
    -
    -class test_sparse_distance_matrix:
    -    def setUp(self):
    -        n = 50
    -        m = 4
    -        self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
    -        self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
    -        self.r = 0.3
    -
    -    def test_consistency_with_neighbors(self):
    -        M = self.T1.sparse_distance_matrix(self.T2, self.r)
    -        r = self.T1.query_ball_tree(self.T2, self.r)
    -        for i,l in enumerate(r):
    -            for j in l:
    -                assert_equal(M[i,j],distance(self.T1.data[i],self.T2.data[j]))
    -        for ((i,j),d) in M.items():
    -            assert_(j in r[i])
    -
    -    def test_zero_distance(self):
    -        M = self.T1.sparse_distance_matrix(self.T1, self.r) # raises an exception for bug 870
    -
    -def test_distance_matrix():
    -    m = 10
    -    n = 11
    -    k = 4
    -    xs = np.random.randn(m,k)
    -    ys = np.random.randn(n,k)
    -    ds = distance_matrix(xs,ys)
    -    assert_equal(ds.shape, (m,n))
    -    for i in range(m):
    -        for j in range(n):
    -            assert_almost_equal(distance(xs[i],ys[j]),ds[i,j])
    -def test_distance_matrix_looping():
    -    m = 10
    -    n = 11
    -    k = 4
    -    xs = np.random.randn(m,k)
    -    ys = np.random.randn(n,k)
    -    ds = distance_matrix(xs,ys)
    -    dsl = distance_matrix(xs,ys,threshold=1)
    -    assert_equal(ds,dsl)
    -
    -def check_onetree_query(T,d):
    -    r = T.query_ball_tree(T, d)
    -    s = set()
    -    for i, l in enumerate(r):
    -        for j in l:
    -            if i 0
    -    env.DistutilsStaticExtLibrary(libname, source = src)
    -
    -# C libraries
    -build_lib('c_misc', '.c', 'sc_c_misc')
    -build_lib('cephes', '.c', 'sc_cephes')
    -
    -# F libraries
    -# XXX: handle no opt flags for mach
    -build_lib('mach', '.f', 'sc_mach')
    -build_lib('toms', '.f', 'sc_toms')
    -build_lib('amos', '.f', 'sc_amos')
    -build_lib('cdflib', '.f', 'sc_cdf')
    -build_lib('specfun', '.f', 'sc_specfunlib')
    -
    -math_info = get_pkg_info("npymath")
    -env.MergeFlags(math_info.cflags())
    -env.MergeFlags(math_info.libs())
    -env.PrependUnique(LIBPATH = ['.'])
    -
    -# orthogonal_eval extension
    -env.NumpyPythonExtension('orthogonal_eval', source = 'orthogonal_eval.c')
    -
    -# logit extension
    -logit_src = env.GenerateFromTemplate('_logit.c.src')
    -env.NumpyPythonExtension('_logit', source = logit_src)
    -
    -# lambertw extension
    -env.NumpyPythonExtension('lambertw', source = 'lambertw.c')
    -
    -# Cephes extension
    -src = ['_cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c', \
    -       'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c']
    -
    -env.NumpyPythonExtension('_cephes', 
    -                         source = src, 
    -                         LIBS = ['sc_amos', 'sc_toms', 'sc_c_misc', 'sc_cephes', 'sc_mach',\
    -                                 'sc_cdf', 'sc_specfunlib']) 
    -
    -# Specfun extension
    -env.Prepend(LIBS = ['sc_specfunlib'])
    -env.NumpyPythonExtension('specfun', source = 'specfun.pyf',
    -                         F2PYOPTIONS = ["--no-wrap-functions"])
    diff --git a/scipy-0.10.1/scipy/special/SConstruct b/scipy-0.10.1/scipy/special/SConstruct
    deleted file mode 100644
    index a377d8391b..0000000000
    --- a/scipy-0.10.1/scipy/special/SConstruct
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -from numscons import GetInitEnvironment
    -GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript')
    diff --git a/scipy-0.10.1/scipy/special/__init__.py b/scipy-0.10.1/scipy/special/__init__.py
    deleted file mode 100644
    index b32109f256..0000000000
    --- a/scipy-0.10.1/scipy/special/__init__.py
    +++ /dev/null
    @@ -1,543 +0,0 @@
    -"""
    -========================================
    -Special functions (:mod:`scipy.special`)
    -========================================
    -
    -.. module:: scipy.special
    -
    -Nearly all of the functions below are universal functions and follow
    -broadcasting and automatic array-looping rules. Exceptions are noted.
    -
    -Error handling
    -==============
    -
    -Errors are handled by returning nans, or other appropriate values.
    -Some of the special function routines will emit warnings when an error
    -occurs.  By default this is disabled.  To enable such messages use
    -``errprint(1)``, and to disable such messages use ``errprint(0)``.
    -
    -Example:
    -
    -    >>> print scipy.special.bdtr(-1,10,0.3)
    -    >>> scipy.special.errprint(1)
    -    >>> print scipy.special.bdtr(-1,10,0.3)
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   errprint
    -
    -Available functions
    -===================
    -
    -Airy functions
    ---------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   airy     -- Airy functions and their derivatives.
    -   airye    -- Exponentially scaled Airy functions
    -   ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)
    -   bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)
    -
    -
    -Elliptic Functions and Integrals
    ---------------------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   ellipj    -- Jacobian elliptic functions
    -   ellipk    -- Complete elliptic integral of the first kind.
    -   ellipkm1  -- ellipkm1(x) == ellipk(1 - x)
    -   ellipkinc -- Incomplete elliptic integral of the first kind.
    -   ellipe    -- Complete elliptic integral of the second kind.
    -   ellipeinc -- Incomplete elliptic integral of the second kind.
    -
    -Bessel Functions
    -----------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   jn       -- Bessel function of integer order and real argument.
    -   jv       -- Bessel function of real-valued order and complex argument.
    -   jve      -- Exponentially scaled Bessel function.
    -   yn       -- Bessel function of second kind (integer order).
    -   yv       -- Bessel function of the second kind (real-valued order).
    -   yve      -- Exponentially scaled Bessel function of the second kind.
    -   kn       -- Modified Bessel function of the second kind (integer order).
    -   kv       -- Modified Bessel function of the second kind (real order).
    -   kve      -- Exponentially scaled modified Bessel function of the second kind.
    -   iv       -- Modified Bessel function.
    -   ive      -- Exponentially scaled modified Bessel function.
    -   hankel1  -- Hankel function of the first kind.
    -   hankel1e -- Exponentially scaled Hankel function of the first kind.
    -   hankel2  -- Hankel function of the second kind.
    -   hankel2e -- Exponentially scaled Hankel function of the second kind.
    -
    -The following is not an universal function:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   lmbda       -- [+]Sequence of lambda functions with arbitrary order v.
    -
    -Zeros of Bessel Functions
    -^^^^^^^^^^^^^^^^^^^^^^^^^
    -
    -These are not universal functions:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.
    -   jnyn_zeros  -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.
    -   jn_zeros    -- [+]Zeros of Jn(x)
    -   jnp_zeros   -- [+]Zeros of Jn'(x)
    -   yn_zeros    -- [+]Zeros of Yn(x)
    -   ynp_zeros   -- [+]Zeros of Yn'(x)
    -   y0_zeros    -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)
    -   y1_zeros    -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)
    -   y1p_zeros   -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')
    -
    -Faster versions of common Bessel Functions
    -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   j0       -- Bessel function of order 0.
    -   j1       -- Bessel function of order 1.
    -   y0       -- Bessel function of second kind of order 0.
    -   y1       -- Bessel function of second kind of order 1.
    -   i0       -- Modified Bessel function of order 0.
    -   i0e      -- Exponentially scaled modified Bessel function of order 0.
    -   i1       -- Modified Bessel function of order 1.
    -   i1e      -- Exponentially scaled modified Bessel function of order 1.
    -   k0       -- Modified Bessel function of the second kind of order 0.
    -   k0e      -- Exponentially scaled modified Bessel function of the second kind of order 0.
    -   k1       -- Modified Bessel function of the second kind of order 1.
    -   k1e      -- Exponentially scaled modified Bessel function of the second kind of order 1.
    -
    -Integrals of Bessel Functions
    -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   itj0y0     -- Basic integrals of j0 and y0 from 0 to x.
    -   it2j0y0    -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.
    -   iti0k0     -- Basic integrals of i0 and k0 from 0 to x.
    -   it2i0k0    -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.
    -   besselpoly -- Integral of a bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.
    -
    -Derivatives of Bessel Functions
    -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   jvp     -- Nth derivative of Jv(v,z)
    -   yvp     -- Nth derivative of Yv(v,z)
    -   kvp     -- Nth derivative of Kv(v,z)
    -   ivp     -- Nth derivative of Iv(v,z)
    -   h1vp    -- Nth derivative of H1v(v,z)
    -   h2vp    -- Nth derivative of H2v(v,z)
    -
    -Spherical Bessel Functions
    -^^^^^^^^^^^^^^^^^^^^^^^^^^
    -
    -These are not universal functions:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   sph_jn   -- [+]Sequence of spherical Bessel functions, jn(z)
    -   sph_yn   -- [+]Sequence of spherical Bessel functions, yn(z)
    -   sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)
    -   sph_in   -- [+]Sequence of spherical Bessel functions, in(z)
    -   sph_kn   -- [+]Sequence of spherical Bessel functions, kn(z)
    -   sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)
    -
    -Riccati-Bessel Functions
    -^^^^^^^^^^^^^^^^^^^^^^^^
    -
    -These are not universal functions:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.
    -   riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.
    -
    -Struve Functions
    -----------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   struve       -- Struve function --- Hv(x)
    -   modstruve    -- Modified struve function --- Lv(x)
    -   itstruve0    -- Integral of H0(t) from 0 to x
    -   it2struve0   -- Integral of H0(t)/t from x to Inf.
    -   itmodstruve0 -- Integral of L0(t) from 0 to x.
    -
    -
    -Raw Statistical Functions
    --------------------------
    -
    -.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   bdtr       -- Sum of terms 0 through k of of the binomial pdf.
    -   bdtrc      -- Sum of terms k+1 through n of the binomial pdf.
    -   bdtri      -- Inverse of bdtr
    -   btdtr      -- Integral from 0 to x of beta pdf.
    -   btdtri     -- Quantiles of beta distribution
    -   fdtr       -- Integral from 0 to x of F pdf.
    -   fdtrc      -- Integral from x to infinity under F pdf.
    -   fdtri      -- Inverse of fdtrc
    -   gdtr       -- Integral from 0 to x of gamma pdf.
    -   gdtrc      -- Integral from x to infinity under gamma pdf.
    -   gdtria     --
    -   gdtrib     --
    -   gdtrix     --
    -   nbdtr      -- Sum of terms 0 through k of the negative binomial pdf.
    -   nbdtrc     -- Sum of terms k+1 to infinity under negative binomial pdf.
    -   nbdtri     -- Inverse of nbdtr
    -   pdtr       -- Sum of terms 0 through k of the Poisson pdf.
    -   pdtrc      -- Sum of terms k+1 to infinity of the Poisson pdf.
    -   pdtri      -- Inverse of pdtr
    -   stdtr      -- Integral from -infinity to t of the Student-t pdf.
    -   stdtridf   --
    -   stdtrit    --
    -   chdtr      -- Integral from 0 to x of the Chi-square pdf.
    -   chdtrc     -- Integral from x to infnity of Chi-square pdf.
    -   chdtri     -- Inverse of chdtrc.
    -   ndtr       -- Integral from -infinity to x of standard normal pdf
    -   ndtri      -- Inverse of ndtr (quantiles)
    -   smirnov    -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)
    -   smirnovi   -- Inverse of smirnov.
    -   kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.
    -   kolmogi    -- Inverse of kolmogorov
    -   tklmbda    -- Tukey-Lambda CDF
    -   logit      --
    -   expit      --
    -
    -Gamma and Related Functions
    ----------------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   gamma        -- Gamma function.
    -   gammaln      -- Log of the absolute value of the gamma function.
    -   gammainc     -- Incomplete gamma integral.
    -   gammaincinv  -- Inverse of gammainc.
    -   gammaincc    -- Complemented incomplete gamma integral.
    -   gammainccinv -- Inverse of gammaincc.
    -   beta         -- Beta function.
    -   betaln       -- Log of the absolute value of the beta function.
    -   betainc      -- Incomplete beta integral.
    -   betaincinv   -- Inverse of betainc.
    -   psi          -- Logarithmic derivative of the gamma function.
    -   rgamma       -- One divided by the gamma function.
    -   polygamma    -- Nth derivative of psi function.
    -   multigammaln
    -
    -
    -Error Function and Fresnel Integrals
    -------------------------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   erf           -- Error function.
    -   erfc          -- Complemented error function (1- erf(x))
    -   erfinv        -- Inverse of error function
    -   erfcinv       -- Inverse of erfc
    -   fresnel       -- Fresnel sine and cosine integrals.
    -   fresnel_zeros -- Complex zeros of both Fresnel integrals
    -   modfresnelp   -- Modified Fresnel integrals F_+(x) and K_+(x)
    -   modfresnelm   -- Modified Fresnel integrals F_-(x) and K_-(x)
    -
    -These are not universal functions:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   erf_zeros     -- [+]Complex zeros of erf(z)
    -   fresnelc_zeros -- [+]Complex zeros of fresnel cosine integrals
    -   fresnels_zeros -- [+]Complex zeros of fresnel sine integrals
    -
    -Legendre Functions
    -------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   lpmv     -- Associated Legendre Function of arbitrary non-negative degree v.
    -   sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)
    -
    -These are not universal functions:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   lpn      -- [+]Legendre Functions (polynomials) of the first kind
    -   lqn      -- [+]Legendre Functions of the second kind.
    -   lpmn     -- [+]Associated Legendre Function of the first kind.
    -   lqmn     -- [+]Associated Legendre Function of the second kind.
    -
    -Orthogonal polynomials
    -----------------------
    -
    -The following functions evaluate values of orthogonal polynomials:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   eval_legendre
    -   eval_chebyt
    -   eval_chebyu
    -   eval_chebyc
    -   eval_chebys
    -   eval_jacobi
    -   eval_laguerre
    -   eval_genlaguerre
    -   eval_hermite
    -   eval_hermitenorm
    -   eval_gegenbauer
    -   eval_sh_legendre
    -   eval_sh_chebyt
    -   eval_sh_chebyu
    -   eval_sh_jacobi
    -
    -The functions below, in turn, return :ref:`orthopoly1d` objects, which
    -functions similarly as :ref:`numpy.poly1d`.  The :ref:`orthopoly1d`
    -class also has an attribute ``weights`` which returns the roots, weights,
    -and total weights for the appropriate form of Gaussian quadrature.
    -These are returned in an ``n x 3`` array with roots in the first column,
    -weights in the second column, and total weights in the final column.
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   legendre    -- [+]Legendre polynomial P_n(x) (lpn -- for function).
    -   chebyt      -- [+]Chebyshev polynomial T_n(x)
    -   chebyu      -- [+]Chebyshev polynomial U_n(x)
    -   chebyc      -- [+]Chebyshev polynomial C_n(x)
    -   chebys      -- [+]Chebyshev polynomial S_n(x)
    -   jacobi      -- [+]Jacobi polynomial P^(alpha,beta)_n(x)
    -   laguerre    -- [+]Laguerre polynomial, L_n(x)
    -   genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)
    -   hermite     -- [+]Hermite polynomial H_n(x)
    -   hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)
    -   gegenbauer  -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)
    -   sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)
    -   sh_chebyt   -- [+]shifted Chebyshev polynomial, T*_n(x)
    -   sh_chebyu   -- [+]shifted Chebyshev polynomial, U*_n(x)
    -   sh_jacobi   -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)
    -
    -.. warning::
    -
    -   Large-order polynomials obtained from these functions
    -   are numerically unstable.
    -
    -   ``orthopoly1d`` objects are converted to ``poly1d``, when doing
    -   arithmetic.  ``numpy.poly1d`` works in power basis and cannot
    -   represent high-order polynomials accurately, which can cause
    -   significant inaccuracy.
    -
    -
    -Hypergeometric Functions
    -------------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   hyp2f1   -- Gauss hypergeometric function (2F1)
    -   hyp1f1   -- Confluent hypergeometric function (1F1)
    -   hyperu   -- Confluent hypergeometric function (U)
    -   hyp0f1   -- Confluent hypergeometric limit function (0F1)
    -   hyp2f0   -- Hypergeometric function (2F0)
    -   hyp1f2   -- Hypergeometric function (1F2)
    -   hyp3f0   -- Hypergeometric function (3F0)
    -
    -
    -Parabolic Cylinder Functions
    -----------------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   pbdv     -- Parabolic cylinder function Dv(x) and derivative.
    -   pbvv     -- Parabolic cylinder function Vv(x) and derivative.
    -   pbwa     -- Parabolic cylinder function W(a,x) and derivative.
    -
    -These are not universal functions:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)
    -   pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)
    -   pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z
    -
    -Mathieu and Related Functions
    ------------------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   mathieu_a       -- Characteristic values for even solution (ce_m)
    -   mathieu_b       -- Characteristic values for odd solution (se_m)
    -
    -These are not universal functions:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   mathieu_even_coef -- [+]sequence of expansion coefficients for even solution
    -   mathieu_odd_coef  -- [+]sequence of expansion coefficients for odd solution
    -
    -The following return both function and first derivative:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   mathieu_cem     -- Even mathieu function
    -   mathieu_sem     -- Odd mathieu function
    -   mathieu_modcem1 -- Even modified mathieu function of the first kind
    -   mathieu_modcem2 -- Even modified mathieu function of the second kind
    -   mathieu_modsem1 -- Odd modified mathieu function of the first kind
    -   mathieu_modsem2 -- Odd modified mathieu function of the second kind
    -
    -Spheroidal Wave Functions
    --------------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   pro_ang1   -- Prolate spheroidal angular function of the first kind
    -   pro_rad1   -- Prolate spheroidal radial function of the first kind
    -   pro_rad2   -- Prolate spheroidal radial function of the second kind
    -   obl_ang1   -- Oblate spheroidal angluar function of the first kind
    -   obl_rad1   -- Oblate spheroidal radial function of the first kind
    -   obl_rad2   -- Oblate spheroidal radial function of the second kind
    -   pro_cv     -- Compute characteristic value for prolate functions
    -   obl_cv     -- Compute characteristic value for oblate functions
    -   pro_cv_seq -- Compute sequence of prolate characteristic values
    -   obl_cv_seq -- Compute sequence of oblate characteristic values
    -
    -The following functions require pre-computed characteristic value:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   pro_ang1_cv -- Prolate spheroidal angular function of the first kind
    -   pro_rad1_cv -- Prolate spheroidal radial function of the first kind
    -   pro_rad2_cv -- Prolate spheroidal radial function of the second kind
    -   obl_ang1_cv -- Oblate spheroidal angluar function of the first kind
    -   obl_rad1_cv -- Oblate spheroidal radial function of the first kind
    -   obl_rad2_cv -- Oblate spheroidal radial function of the second kind
    -
    -Kelvin Functions
    -----------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   kelvin       -- All Kelvin functions (order 0) and derivatives.
    -   kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives
    -   ber          -- Kelvin function ber x
    -   bei          -- Kelvin function bei x
    -   berp         -- Derivative of Kelvin function ber x
    -   beip         -- Derivative of Kelvin function bei x
    -   ker          -- Kelvin function ker x
    -   kei          -- Kelvin function kei x
    -   kerp         -- Derivative of Kelvin function ker x
    -   keip         -- Derivative of Kelvin function kei x
    -
    -These are not universal functions:
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   ber_zeros    -- [+]Zeros of Kelvin function bei x
    -   bei_zeros    -- [+]Zeros of Kelvin function ber x
    -   berp_zeros   -- [+]Zeros of derivative of Kelvin function ber x
    -   beip_zeros   -- [+]Zeros of derivative of Kelvin function bei x
    -   ker_zeros    -- [+]Zeros of Kelvin function kei x
    -   kei_zeros    -- [+]Zeros of Kelvin function ker x
    -   kerp_zeros   -- [+]Zeros of derivative of Kelvin function ker x
    -   keip_zeros   -- [+]Zeros of derivative of Kelvin function kei x
    -
    -Other Special Functions
    ------------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   expn         -- Exponential integral.
    -   exp1         -- Exponential integral of order 1 (for complex argument)
    -   expi         -- Another exponential integral -- Ei(x)
    -   wofz         -- Fadeeva function.
    -   dawsn        -- Dawson's integral.
    -   shichi       -- Hyperbolic sine and cosine integrals.
    -   sici         -- Integral of the sinc and "cosinc" functions.
    -   spence       -- Dilogarithm integral.
    -   lambertw     -- Lambert W function
    -   zeta         -- Riemann zeta function of two arguments.
    -   zetac        -- 1.0 - standard Riemann zeta function.
    -
    -Convenience Functions
    ----------------------
    -
    -.. autosummary::
    -   :toctree: generated/
    -
    -   cbrt     -- Cube root.
    -   exp10    -- 10 raised to the x power.
    -   exp2     -- 2 raised to the x power.
    -   radian   -- radian angle given degrees, minutes, and seconds.
    -   cosdg    -- cosine of the angle given in degrees.
    -   sindg    -- sine of the angle given in degrees.
    -   tandg    -- tangent of the angle given in degrees.
    -   cotdg    -- cotangent of the angle given in degrees.
    -   log1p    -- log(1+x)
    -   expm1    -- exp(x)-1
    -   cosm1    -- cos(x)-1
    -   round    -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.
    -
    -.. [+] in the description indicates a function which is not a universal
    -.. function and does not follow broadcasting and automatic
    -.. array-looping rules.
    -
    -"""
    -
    -#from special_version import special_version as __version__
    -
    -from _cephes import *
    -
    -from basic import *
    -import specfun
    -import orthogonal
    -from orthogonal import *
    -from spfun_stats import multigammaln
    -from lambertw import lambertw
    -from _logit import logit, expit
    -import add_newdocs
    -
    -__all__ = filter(lambda s:not s.startswith('_'),dir())
    -
    -from numpy.dual import register_func
    -register_func('i0',i0)
    -del register_func
    -
    -from numpy.testing import Tester
    -test = Tester().test
    diff --git a/scipy-0.10.1/scipy/special/_cephesmodule.c b/scipy-0.10.1/scipy/special/_cephesmodule.c
    deleted file mode 100644
    index 9af50eb6e5..0000000000
    --- a/scipy-0.10.1/scipy/special/_cephesmodule.c
    +++ /dev/null
    @@ -1,1178 +0,0 @@
    -/* Cephes module version 1.5
    - *  This module defines the functions in the cephes and amos libraries as
    - *   Numerical python ufunc objects so that they can operate on arbitrary
    - *   NumPy arrays with broadcasting and typecasting rules implemented.
    - *
    - *  Copyright 1999  Travis E. Oliphant
    - * Revisions 2002 (added functions from cdflib)
    - */
    -#include 
    -
    -#include "Python.h"
    -#include "numpy/arrayobject.h"
    -#include "numpy/ufuncobject.h"
    -#include "ufunc_extras.h"
    -#include "abstract.h"
    -#include "cephes.h"
    -#include "amos_wrappers.h"
    -#include "toms_wrappers.h"
    -#include "cdf_wrappers.h"
    -#include "specfun_wrappers.h"
    -#include "c_misc/misc.h"
    -
    -/* Defined in mtherr in the cephes library */
    -extern int scipy_special_print_error_messages;
    -
    -#include "cephes_doc.h"
    -
    -#if PY_VERSION_HEX >= 0x03000000
    -#define PyInt_FromLong PyLong_FromLong
    -#define PyUString_FromString PyUnicode_FromString
    -#else
    -#define PyUString_FromString PyString_FromString
    -#endif
    -static PyUFuncGenericFunction cephes1_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes1rc_functions[] = { NULL, NULL, NULL, NULL};
    -static PyUFuncGenericFunction cephes1_2_functions[] = { NULL, NULL, NULL, NULL,};
    -static PyUFuncGenericFunction cephes1_2c_functions[] = { NULL, NULL,};
    -static PyUFuncGenericFunction cephes1c_4_functions[] = { NULL, NULL, NULL, NULL };
    -static PyUFuncGenericFunction cephes1cpb_4_functions[] = { NULL, NULL,};
    -static PyUFuncGenericFunction cephes2_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes2_2_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes2_4_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes2a_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes2c_functions[] = { NULL, NULL, NULL, NULL };
    -static PyUFuncGenericFunction cephes2cpp_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes3_functions[] = { NULL, NULL, NULL, NULL};
    -static PyUFuncGenericFunction cephes3a_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes3_2_functions[] = { NULL, NULL,};
    -static PyUFuncGenericFunction cephes4_functions[] = { NULL, NULL, NULL, NULL,};
    -static PyUFuncGenericFunction cephes4a_2_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes4_2_functions[] = { NULL, NULL, };
    -static PyUFuncGenericFunction cephes5_2_functions[] = { NULL, NULL, };
    -
    -static PyUFuncGenericFunction cephes1c_functions[] = { NULL, NULL, };
    -
    -static void * airy_data[] = { (void *)airy, (void *)airy, (void *)cairy_wrap, (void *)cairy_wrap,};
    -static void * airye_data[] = { (void *)cairy_wrap_e_real, (void *)cairy_wrap_e_real, (void *)cairy_wrap_e, (void *)cairy_wrap_e, };
    -static void * itairy_data[] = { (void *)itairy_wrap, (void *)itairy_wrap, };
    -
    -static void * kelvin_data[] = { (void *)kelvin_wrap, (void *)kelvin_wrap,};
    -static void * ber_data[] = { (void *)ber_wrap, (void *)ber_wrap,};
    -static void * bei_data[] = { (void *)bei_wrap, (void *)bei_wrap,};
    -static void * ker_data[] = { (void *)ker_wrap, (void *)ker_wrap,};
    -static void * kei_data[] = { (void *)kei_wrap, (void *)kei_wrap,};
    -static void * berp_data[] = { (void *)berp_wrap, (void *)berp_wrap,};
    -static void * beip_data[] = { (void *)beip_wrap, (void *)beip_wrap,};
    -static void * kerp_data[] = { (void *)kerp_wrap, (void *)kerp_wrap,};
    -static void * keip_data[] = { (void *)keip_wrap, (void *)keip_wrap,};
    -
    -static void * ellpj_data[] = { (void *)ellpj, (void *)ellpj,};
    -
    -static void * exp1_data[] = { (void *)exp1_wrap, (void *)exp1_wrap, (void *)cexp1_wrap, (void *)cexp1_wrap,};
    -static void * expi_data[] = { (void *)expi_wrap, (void *)expi_wrap, (void *)cexpi_wrap, (void *)cexpi_wrap,};
    -static void * expn_data[] = { (void *)expn, (void *)expn, };
    -static void * kn_data[] = { (void *)kn, (void *)kn, };
    -
    -static void * pdtrc_data[] = { (void *)pdtrc, (void *)pdtrc, };
    -static void * pdtr_data[] = { (void *)pdtr, (void *)pdtr, };
    -static void * pdtri_data[] = { (void *)pdtri, (void *)pdtri, };
    -
    -static void * fresnl_data[] = { (void *)fresnl, (void *)fresnl, (void *)cfresnl_wrap, (void *)cfresnl_wrap };
    -static void * shichi_data[] = { (void *)shichi, (void *)shichi, };
    -static void * sici_data[] = { (void *)sici, (void *)sici, };
    -
    -
    -static void * itj0y0_data[] = { (void *)it1j0y0_wrap, (void *)it1j0y0_wrap, };
    -static void * it2j0y0_data[] = { (void *)it2j0y0_wrap, (void *)it2j0y0_wrap, };
    -static void * iti0k0_data[] = { (void *)it1i0k0_wrap, (void *)it1i0k0_wrap, };
    -static void * it2i0k0_data[] = { (void *)it2i0k0_wrap, (void *)it2i0k0_wrap, };
    -
    -/*
    -static void * stdtr_data[] = { (void *)stdtr, (void *)stdtr, };
    -static void * stdtri_data[] = { (void *)stdtri, (void *)stdtri, };
    -*/
    -
    -static void * yn_data[] = { (void *)yn, (void *)yn, };
    -static void * smirnov_data[] = { (void *)smirnov, (void *)smirnov, };
    -static void * smirnovi_data[] = { (void *)smirnovi, (void *)smirnovi, };
    -
    -static void * bdtrc_data[] = { (void *)bdtrc, (void *)bdtrc, };
    -static void * bdtr_data[] = { (void *)bdtr, (void *)bdtr, };
    -static void * bdtri_data[] = { (void *)bdtri, (void *)bdtri, };
    -static void * btdtr_data[] = { (void *)btdtr, (void *)btdtr, };
    -static void * btdtri_data[] = { (void *)incbi, (void *)incbi, };
    -
    -static void * fdtrc_data[] = { (void *)fdtrc, (void *)fdtrc, };
    -static void * fdtr_data[] = { (void *)fdtr, (void *)fdtr, };
    -static void * fdtri_data[] = { (void *)fdtri, (void *)fdtri, };
    -
    -static void * gdtrc_data[] = { (void *)gdtrc, (void *)gdtrc, };
    -static void * gdtr_data[] = { (void *)gdtr, (void *)gdtr, };
    -/*
    -static void * gdtri_data[] = { (void *)gdtri, (void *)gdtri, };
    -*/
    -static void * hyp2f1_data[] = { (void *)hyp2f1, (void *)hyp2f1, (void *)chyp2f1_wrap, (void *)chyp2f1_wrap};
    -static void * hyp1f1_data[] = { (void *)hyp1f1_wrap, (void *)hyp1f1_wrap, (void *)chyp1f1_wrap, (void *)chyp1f1_wrap};
    -static void * hypU_data[] = { (void *)hypU_wrap, (void *)hypU_wrap, };
    -static void * hyp2f0_data[] = { (void *)hyp2f0, (void *)hyp2f0, };
    -static void * threef0_data[] = { (void *)threef0, (void *)threef0, };
    -static void * onef2_data[] = { (void *)onef2, (void *)onef2, };
    -
    -static void * incbet_data[] = { (void *)incbet, (void *)incbet, };
    -static void * incbi_data[] = { (void *)incbi, (void *)incbi, };
    -
    -static void * nbdtrc_data[] = { (void *)nbdtrc, (void *)nbdtrc, };
    -static void * nbdtr_data[] = { (void *)nbdtr, (void *)nbdtr, };
    -static void * nbdtri_data[] = { (void *)nbdtri, (void *)nbdtri, };
    -
    -static void * beta_data[] = { (void *)beta, (void *)beta, };
    -static void * lbeta_data[] = { (void *)lbeta, (void *)lbeta, };
    -static void * cbrt_data[] = { (void *)cbrt, (void *)cbrt, };
    -static void * chdtrc_data[] = { (void *)chdtrc, (void *)chdtrc, };
    -static void * chdtr_data[] = { (void *)chdtr, (void *)chdtr, };
    -static void * chdtri_data[] = { (void *)chdtri, (void *)chdtri, };
    -static void * dawsn_data[] = {  (void *)dawsn, (void *)dawsn, };
    -static void * ellie_data[] = { (void *)ellie, (void *)ellie, };
    -static void * ellik_data[] = { (void *)ellik, (void *)ellik, };
    -static void * ellpe_data[] = { (void *)ellpe, (void *)ellpe, };
    -static void * ellpk_data[] = { (void *)ellpk, (void *)ellpk, };
    -static void * exp10_data[] = { (void *)exp10, (void *)exp10, };
    -static void * exp2_data[] = { (void *)exp2, (void *)exp2, };
    -static void * Gamma_data[] = { (void *)Gamma, (void *)Gamma, (void *)cgamma_wrap, (void *)cgamma_wrap};
    -static void * lgam_data[] = { (void *)lgam, (void *)lgam, (void *)clngamma_wrap, (void *)clngamma_wrap};
    -static void * i0_data[] = { (void *)i0, (void *)i0, };
    -static void * i0e_data[] = { (void *)i0e, (void *)i0e, };
    -static void * i1_data[] = { (void *)i1, (void *)i1, };
    -static void * i1e_data[] = { (void *)i1e, (void *)i1e, };
    -static void * igamc_data[] = { (void *)igamc, (void *)igamc, };
    -static void * igam_data[] = { (void *)igam, (void *)igam, };
    -static void * igami_data[] = { (void *)igami, (void *)igami, };
    -static void * gammaincinv_data[] = { (void *)gammaincinv,
    -                                     (void *)gammaincinv, };
    -
    -static void * iv_data[] = { (void *)iv, (void *)iv, (void *)cbesi_wrap, (void *)cbesi_wrap,};
    -static void * ive_data[] = { (void *)cbesi_wrap_e_real, (void *)cbesi_wrap_e_real, (void *)cbesi_wrap_e, (void *)cbesi_wrap_e, };
    -static void * j0_data[] = { (void *)j0,  (void *)j0,  };
    -static void * y0_data[] = { (void *)y0, (void *)y0, };
    -static void * j1_data[] = { (void *)j1,  (void *)j1,  };
    -static void * y1_data[] = { (void *)y1, (void *)y1, };
    -static void * jv_data[] = { (void *)jv, (void *)jv, (void *)cbesj_wrap, (void *)cbesj_wrap,};
    -static void * jve_data[] = { (void *)cbesj_wrap_e_real, (void *)cbesj_wrap_e_real, (void *)cbesj_wrap_e, (void *)cbesj_wrap_e, };
    -static void * yv_data[] = { (void *)yv, (void *)yv, (void *)cbesy_wrap, (void *)cbesy_wrap,};
    -static void * yve_data[] = { (void *)cbesy_wrap_e_real, (void *)cbesy_wrap_e_real, (void *)cbesy_wrap_e, (void *)cbesy_wrap_e, };
    -
    -static void * k0_data[] = { (void *)k0, (void *)k0, };
    -static void * k0e_data[] = { (void *)k0e, (void *)k0e, };
    -static void * k1_data[] = { (void *)k1, (void *)k1, };
    -static void * k1e_data[] = { (void *)k1e, (void *)k1e, };
    -static void * kv_data[] = { (void *)cbesk_wrap_real, (void *)cbesk_wrap_real, (void *)cbesk_wrap, (void *)cbesk_wrap,};
    -static void * kve_data[] = { (void *)cbesk_wrap_e_real, (void *)cbesk_wrap_e_real, (void *)cbesk_wrap_e, (void *)cbesk_wrap_e,};
    -static void * hankel1_data[] = { (void *)cbesh_wrap1, (void *)cbesh_wrap1,};
    -static void * hankel1e_data[] = { (void *)cbesh_wrap1_e, (void *)cbesh_wrap1_e,};
    -static void * hankel2_data[] = { (void *)cbesh_wrap2, (void *)cbesh_wrap2,};
    -static void * hankel2e_data[] = { (void *)cbesh_wrap2_e, (void *)cbesh_wrap2_e,};
    -
    -static void * ndtr_data[] = { (void *)ndtr, (void *)ndtr, };
    -static void * erfc_data[] = { (void *)erfc, (void *)erfc, };
    -static void * erf_data[] = { (void *)erf, (void *)erf, (void *)cerf_wrap, (void *)cerf_wrap};
    -static void * ndtri_data[] = { (void *)ndtri, (void *)ndtri, };
    -
    -static void * psi_data[] = { (void *)psi, (void *)psi, (void *)cpsi_wrap, (void *)cpsi_wrap};
    -static void * rgamma_data[] = { (void *)rgamma, (void *)rgamma, (void *)crgamma_wrap, (void *)crgamma_wrap};
    -static void * round_data[] = { (void *)round, (void *)round, };
    -static void * sindg_data[] = { (void *)sindg, (void *)sindg, };
    -static void * cosdg_data[] = { (void *)cosdg, (void *)cosdg, };
    -static void * radian_data[] = { (void *)radian, (void *)radian, };
    -static void * tandg_data[] = { (void *)tandg, (void *)tandg, };
    -static void * cotdg_data[] = { (void *)cotdg, (void *)cotdg, };
    -static void * log1p_data[] = { (void *)log1p, (void *)log1p, };
    -static void * expm1_data[] = { (void *)expm1, (void *)expm1, };
    -static void * cosm1_data[] = { (void *)cosm1, (void *)cosm1, };
    -
    -static void * spence_data[] = { (void *)spence, (void *)spence, };
    -/* static void * struve_data[] = { (void *)struve, (void *)struve, };*/
    -static void * struve_data[] = { (void *)struve_wrap, (void *)struve_wrap, };
    -static void * modstruve_data[] = { (void *)modstruve_wrap, (void *)modstruve_wrap, };
    -static void * itmodstruve0_data[] = { (void *)itmodstruve0_wrap, (void *)itmodstruve0_wrap, };
    -static void * itstruve0_data[] = { (void *)itstruve0_wrap, (void *)itstruve0_wrap, };
    -static void * it2struve0_data[] = { (void *)it2struve0_wrap, (void *)it2struve0_wrap, };
    -
    -
    -static void * zeta_data[] = { (void *)zeta, (void *)zeta, };
    -static void * zetac_data[] = { (void *)zetac, (void *)zetac, };
    -
    -static void * kolmogorov_data[] = { (void *)kolmogorov, (void *)kolmogorov, };
    -static void * kolmogi_data[] = { (void *)kolmogi, (void *)kolmogi, };
    -
    -static void * wofz_data[] = { (void *)cwofz_wrap, (void *)cwofz_wrap, };
    -
    -static void * besselpoly_data[] = {(void *)besselpoly, (void *)besselpoly,};
    -
    -static void * cdfbet3_data[] = {(void *)cdfbet3_wrap, (void *)cdfbet3_wrap};
    -static void * cdfbet4_data[] = {(void *)cdfbet4_wrap, (void *)cdfbet4_wrap};
    -static void * cdfbin2_data[] = {(void *)cdfbin2_wrap, (void *)cdfbin2_wrap};
    -static void * cdfbin3_data[] = {(void *)cdfbin3_wrap, (void *)cdfbin3_wrap};
    -static void * cdfchi3_data[] = {(void *)cdfchi3_wrap, (void *)cdfchi3_wrap};
    -static void * cdfchn1_data[] = {(void *)cdfchn1_wrap, (void *)cdfchn1_wrap};
    -static void * cdfchn2_data[] = {(void *)cdfchn2_wrap, (void *)cdfchn2_wrap};
    -static void * cdfchn3_data[] = {(void *)cdfchn3_wrap, (void *)cdfchn3_wrap};
    -static void * cdfchn4_data[] = {(void *)cdfchn4_wrap, (void *)cdfchn4_wrap};
    -/*
    -static void * cdff1_data[] = {(void *)cdff1_wrap, (void *)cdff1_wrap};
    -static void * cdff2_data[] = {(void *)cdff2_wrap, (void *)cdff2_wrap};
    -static void * cdff3_data[] = {(void *)cdff3_wrap, (void *)cdff3_wrap};
    -*/
    -static void * cdff4_data[] = {(void *)cdff4_wrap, (void *)cdff4_wrap};
    -
    -static void * cdffnc1_data[] = {(void *)cdffnc1_wrap, (void *)cdffnc1_wrap};
    -static void * cdffnc2_data[] = {(void *)cdffnc2_wrap, (void *)cdffnc2_wrap};
    -static void * cdffnc3_data[] = {(void *)cdffnc3_wrap, (void *)cdffnc3_wrap};
    -static void * cdffnc4_data[] = {(void *)cdffnc4_wrap, (void *)cdffnc4_wrap};
    -static void * cdffnc5_data[] = {(void *)cdffnc5_wrap, (void *)cdffnc5_wrap};
    -/*
    -static void * cdfgam1_data[] = {(void *)cdfgam1_wrap, (void *)cdfgam1_wrap};
    -*/
    -static void * cdfgam2_data[] = {(void *)cdfgam2_wrap, (void *)cdfgam2_wrap};
    -static void * cdfgam3_data[] = {(void *)cdfgam3_wrap, (void *)cdfgam3_wrap};
    -static void * cdfgam4_data[] = {(void *)cdfgam4_wrap, (void *)cdfgam4_wrap};
    -
    -static void * cdfnbn2_data[] = {(void *)cdfnbn2_wrap, (void *)cdfnbn2_wrap};
    -static void * cdfnbn3_data[] = {(void *)cdfnbn3_wrap, (void *)cdfnbn3_wrap};
    -
    -static void * cdfnor3_data[] = {(void *)cdfnor3_wrap, (void *)cdfnor3_wrap};
    -static void * cdfnor4_data[] = {(void *)cdfnor4_wrap, (void *)cdfnor4_wrap};
    -
    -static void * cdfpoi2_data[] = {(void *)cdfpoi2_wrap, (void *)cdfpoi2_wrap};
    -
    -static void * cdft1_data[] = {(void *)cdft1_wrap, (void *)cdft1_wrap};
    -static void * cdft2_data[] = {(void *)cdft2_wrap, (void *)cdft2_wrap};
    -static void * cdft3_data[] = {(void *)cdft3_wrap, (void *)cdft3_wrap};
    -
    -static void * cdftnc1_data[] = {(void *)cdftnc1_wrap, (void *)cdftnc1_wrap};
    -static void * cdftnc2_data[] = {(void *)cdftnc2_wrap, (void *)cdftnc2_wrap};
    -static void * cdftnc3_data[] = {(void *)cdftnc3_wrap, (void *)cdftnc3_wrap};
    -static void * cdftnc4_data[] = {(void *)cdftnc4_wrap, (void *)cdftnc4_wrap};
    -
    -static void * tklambda_data[] = {(void *)tukeylambdacdf, (void *)tukeylambdacdf};
    -
    -static void * mathieu_a_data[] = {(void *)cem_cva_wrap, (void *)cem_cva_wrap};
    -static void * mathieu_b_data[] = {(void *)sem_cva_wrap, (void *)sem_cva_wrap};
    -static void * mathieu_cem_data[] = {(void *)cem_wrap, (void *)cem_wrap};
    -static void * mathieu_sem_data[] = {(void *)sem_wrap, (void *)sem_wrap};
    -static void * mathieu_mcem1_data[] = {(void *)mcm1_wrap, (void *)mcm1_wrap};
    -static void * mathieu_mcem2_data[] = {(void *)mcm2_wrap, (void *)mcm2_wrap};
    -static void * mathieu_msem1_data[] = {(void *)msm1_wrap, (void *)msm1_wrap};
    -static void * mathieu_msem2_data[] = {(void *)msm2_wrap, (void *)msm2_wrap};
    -
    -static void * lpmv_data[] = {(void *)pmv_wrap, (void *)pmv_wrap};
    -static void * pbwa_data[] = {(void *)pbwa_wrap, (void *)pbwa_wrap};
    -static void * pbdv_data[] = {(void *)pbdv_wrap, (void *)pbdv_wrap};
    -static void * pbvv_data[] = {(void *)pbvv_wrap, (void *)pbvv_wrap};
    -static void * prolate_aswfa_data[] = {(void *)prolate_aswfa_wrap, (void *)prolate_aswfa_wrap};
    -static void * prolate_radial1_data[] = {(void *)prolate_radial1_wrap, (void *)prolate_radial1_wrap};
    -static void * prolate_radial2_data[] = {(void *)prolate_radial2_wrap, (void *)prolate_radial2_wrap};
    -static void * oblate_aswfa_data[] = {(void *)oblate_aswfa_wrap, (void *)oblate_aswfa_wrap};
    -static void * oblate_radial1_data[] = {(void *)oblate_radial1_wrap, (void *)oblate_radial1_wrap};
    -static void * oblate_radial2_data[] = {(void *)oblate_radial2_wrap, (void *)oblate_radial2_wrap};
    -static void * prolate_aswfa_nocv_data[] = {(void *)prolate_aswfa_nocv_wrap, (void *)prolate_aswfa_nocv_wrap};
    -static void * prolate_radial1_nocv_data[] = {(void *)prolate_radial1_nocv_wrap, (void *)prolate_radial1_nocv_wrap};
    -static void * prolate_radial2_nocv_data[] = {(void *)prolate_radial2_nocv_wrap, (void *)prolate_radial2_nocv_wrap};
    -static void * oblate_aswfa_nocv_data[] = {(void *)oblate_aswfa_nocv_wrap, (void *)oblate_aswfa_nocv_wrap};
    -static void * oblate_radial1_nocv_data[] = {(void *)oblate_radial1_nocv_wrap, (void *)oblate_radial1_nocv_wrap};
    -static void * oblate_radial2_nocv_data[] = {(void *)oblate_radial2_nocv_wrap, (void *)oblate_radial2_nocv_wrap};
    -static void * prolate_segv_data[] = {(void *)prolate_segv_wrap, (void *)prolate_segv_wrap};
    -static void * oblate_segv_data[] = {(void *)oblate_segv_wrap, (void *)oblate_segv_wrap};
    -
    -static void * modfresnelp_data[] = {(void *)modified_fresnel_plus_wrap, (void *)modified_fresnel_plus_wrap};
    -static void * modfresnelm_data[] = {(void *)modified_fresnel_minus_wrap, (void *)modified_fresnel_minus_wrap};
    -
    -
    -static char cephes_7_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE,};
    -static char cephes_6_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE,};
    -static char cephes_5_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE,};
    -
    -static char cephes_5b2_types[] = { PyArray_FLOAT,  PyArray_CFLOAT,  PyArray_CFLOAT, PyArray_CFLOAT, PyArray_CFLOAT, PyArray_DOUBLE,  PyArray_CDOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE,};
    -
    -static char cephes_5c_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_CFLOAT, PyArray_CFLOAT, PyArray_CFLOAT, PyArray_CFLOAT, PyArray_CFLOAT, PyArray_CDOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE, };
    -
    -static char cephes_5c2_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_CFLOAT, PyArray_CFLOAT, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE, };
    -
    -static char cephes_4_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE,};
    -
    -static char cephes_4c_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_FLOAT, PyArray_FLOAT, PyArray_CFLOAT, PyArray_CFLOAT, PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE};
    -
    -static char cephes_3_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT,   PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, };
    -static char cephes_3_cmplx_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_FLOAT,   PyArray_DOUBLE,  PyArray_DOUBLE, PyArray_DOUBLE, PyArray_CFLOAT,  PyArray_CFLOAT,  PyArray_CFLOAT,   PyArray_CDOUBLE,  PyArray_CDOUBLE, PyArray_CDOUBLE, };
    -static char cephes_3c_types[] = { PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_FLOAT, PyArray_CFLOAT,  PyArray_CFLOAT, PyArray_DOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE, };
    -static char cephes_3cp_types[] = { PyArray_FLOAT, PyArray_CFLOAT,  PyArray_CFLOAT, PyArray_DOUBLE, PyArray_CDOUBLE, PyArray_CDOUBLE, };
    -static char cephes_2_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_DOUBLE,  PyArray_DOUBLE,  };
    -static char cephes_1rc_types[] = { PyArray_FLOAT,  PyArray_FLOAT,  PyArray_DOUBLE,  PyArray_DOUBLE,  PyArray_CFLOAT, PyArray_CFLOAT, PyArray_CDOUBLE, PyArray_CDOUBLE };
    -static char cephes_1c_types[] = { PyArray_CFLOAT, PyArray_CFLOAT, PyArray_CDOUBLE, PyArray_CDOUBLE, };
    -
    -
    -/* Some functions needed from ufunc object, so that Py_complex's aren't being returned
    -between code possibly compiled with different compilers.
    -*/
    -
    -typedef Py_complex ComplexUnaryFunc(Py_complex x);
    -
    -static void cephes_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) {
    -    int i; Py_complex x;
    -    char *ip1=args[0], *op=args[1];
    -    for(i=0; i<*dimensions; i++, ip1+=steps[0], op+=steps[1]) {
    -	x.real = ((float *)ip1)[0]; x.imag = ((float *)ip1)[1];
    -	x = ((ComplexUnaryFunc *)func)(x);
    -	((float *)op)[0] = (float)x.real;
    -	((float *)op)[1] = (float)x.imag;
    -    }
    -}
    -
    -static void cephes_D_D(char **args, intp *dimensions, intp *steps, void *func) {
    -    int i; Py_complex x;
    -    char *ip1=args[0], *op=args[1];
    -    for(i=0; i<*dimensions; i++, ip1+=steps[0], op+=steps[1]) {
    -	x.real = ((double *)ip1)[0]; x.imag = ((double *)ip1)[1];
    -	x = ((ComplexUnaryFunc *)func)(x);
    -	((double *)op)[0] = x.real;
    -	((double *)op)[1] = x.imag;
    -    }
    -}
    -
    -static void Cephes_InitOperators(PyObject *dictionary) {
    -	PyObject *f;
    -
    -        cephes1_functions[0] = PyUFunc_f_f_As_d_d;
    -        cephes1_functions[1] = PyUFunc_d_d;
    -        cephes1c_functions[0] = cephes_F_F_As_D_D;
    -	cephes1c_functions[1] = cephes_D_D;
    -        cephes1rc_functions[0] = PyUFunc_f_f_As_d_d;
    -        cephes1rc_functions[1] = PyUFunc_d_d;
    -        cephes1rc_functions[2] = cephes_F_F_As_D_D;
    -	cephes1rc_functions[3] = cephes_D_D;
    -        cephes1_2_functions[0] = PyUFunc_f_ff_As_d_dd;
    -        cephes1_2_functions[1] = PyUFunc_d_dd;
    -        cephes1_2_functions[2] = PyUFunc_F_FF_As_D_DD;
    -        cephes1_2_functions[3] = PyUFunc_D_DD;
    -        cephes1_2c_functions[0] = PyUFunc_f_FF_As_d_DD;
    -        cephes1_2c_functions[1] = PyUFunc_d_DD;
    -        cephes1c_4_functions[0] = PyUFunc_f_ffff_As_d_dddd;
    -        cephes1c_4_functions[1] = PyUFunc_d_dddd;
    -        cephes1c_4_functions[2] = PyUFunc_F_FFFF_As_D_DDDD;
    -        cephes1c_4_functions[3] = PyUFunc_D_DDDD;
    -        cephes1cpb_4_functions[0] = PyUFunc_f_FFFF_As_d_DDDD;
    -        cephes1cpb_4_functions[1] = PyUFunc_d_DDDD;
    -        cephes2_functions[0] = PyUFunc_ff_f_As_dd_d;
    -        cephes2_functions[1] = PyUFunc_dd_d;
    -        cephes2_2_functions[0] = PyUFunc_ff_ff_As_dd_dd;
    -        cephes2_2_functions[1] = PyUFunc_dd_dd;
    -        cephes2a_functions[0] = PyUFunc_ff_f_As_id_d;
    -        cephes2a_functions[1] = PyUFunc_dd_d_As_id_d;
    -        cephes2c_functions[0] = PyUFunc_ff_f_As_dd_d;
    -        cephes2c_functions[1] = PyUFunc_dd_d;
    -        cephes2c_functions[2] = PyUFunc_fF_F_As_dD_D;
    -        cephes2c_functions[3] = PyUFunc_dD_D;
    -        cephes2cpp_functions[0] = PyUFunc_fF_F_As_dD_D;
    -        cephes2cpp_functions[1] = PyUFunc_dD_D;
    -        cephes2_4_functions[0] = PyUFunc_ff_ffff_As_dd_dddd;
    -        cephes2_4_functions[1] = PyUFunc_dd_dddd;
    -        cephes3_functions[0] = PyUFunc_fff_f_As_ddd_d;
    -        cephes3_functions[1] = PyUFunc_ddd_d;
    -        cephes3_functions[2] = PyUFunc_ffF_F_As_ddD_D;
    -        cephes3_functions[3] = PyUFunc_ddD_D;
    -        cephes3a_functions[0] = PyUFunc_fff_f_As_iid_d;
    -        cephes3a_functions[1] = PyUFunc_ddd_d_As_iid_d;
    -        cephes3_2_functions[0] = PyUFunc_fff_ff_As_ddd_dd;
    -        cephes3_2_functions[1] = PyUFunc_ddd_dd;
    -        cephes4_functions[0] = PyUFunc_ffff_f_As_dddd_d;
    -        cephes4_functions[1] = PyUFunc_dddd_d;
    -        cephes4_functions[2] = PyUFunc_fffF_F_As_dddD_D;
    -        cephes4_functions[3] = PyUFunc_dddD_D;
    -        cephes4_2_functions[0] = PyUFunc_ffff_ff_As_dddd_dd;
    -        cephes4_2_functions[1] = PyUFunc_dddd_dd;
    -        cephes4a_2_functions[0] = PyUFunc_ffff_ff_As_dddi_dd;
    -        cephes4a_2_functions[1] = PyUFunc_dddd_dd_As_dddi_dd;
    -        cephes5_2_functions[0] = PyUFunc_fffff_ff_As_ddddd_dd;
    -        cephes5_2_functions[1] = PyUFunc_ddddd_dd;
    -
    -	/* Create function objects for each function call and insert
    -	   them in the dictionary */
    -	f = PyUFunc_FromFuncAndData(cephes3a_functions, bdtrc_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "bdtrc", bdtrc_doc, 0);
    -	PyDict_SetItemString(dictionary, "bdtrc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3a_functions, bdtr_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "bdtr", bdtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "bdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3a_functions, bdtri_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "bdtri", bdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "bdtri", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, btdtr_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "btdtr", btdtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "btdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, btdtri_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "btdtri", btdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "btdtri", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, fdtrc_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "fdtrc", fdtrc_doc, 0);
    -        PyDict_SetItemString(dictionary, "fdtrc", f);
    -        Py_DECREF(f);
    -        f = PyUFunc_FromFuncAndData(cephes3_functions, fdtr_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "fdtr", fdtr_doc, 0);
    -        PyDict_SetItemString(dictionary, "fdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, fdtri_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "fdtri", fdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "fdtri", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, gdtrc_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "gdtrc", gdtrc_doc, 0);
    -	PyDict_SetItemString(dictionary, "gdtrc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, gdtr_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "gdtr", gdtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "gdtr", f);
    -	Py_DECREF(f);
    -        /* Use inverse from cdflib (a little faster)
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, gdtri_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "gdtri", gdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "gdtri", f);
    -	Py_DECREF(f);
    -        */
    -
    -	f = PyUFunc_FromFuncAndData(cephes4_functions, hyp2f1_data, cephes_5c2_types, 4, 4, 1, PyUFunc_None, "hyp2f1", hyp2f1_doc, 0);
    -	PyDict_SetItemString(dictionary, "hyp2f1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, hyp1f1_data, cephes_4c_types, 4, 3, 1, PyUFunc_None, "hyp1f1", hyp1f1_doc, 0);
    -	PyDict_SetItemString(dictionary, "hyp1f1", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, hypU_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "hyperu", hyperu_doc, 0);
    -	PyDict_SetItemString(dictionary, "hyperu", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes4a_2_functions, hyp2f0_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "hyp2f0", hyp2f0_doc, 0);
    -	PyDict_SetItemString(dictionary, "hyp2f0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_2_functions, onef2_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "hyp1f2", hyp1f2_doc, 0);
    -	PyDict_SetItemString(dictionary, "hyp1f2", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_2_functions, threef0_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "hyp3f0", hyp3f0_doc, 0);
    -	PyDict_SetItemString(dictionary, "hyp3f0", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, incbet_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "betainc", betainc_doc, 0);
    -	PyDict_SetItemString(dictionary, "betainc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, incbi_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "betaincinv", betaincinv_doc, 0);
    -	PyDict_SetItemString(dictionary, "betaincinv", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3a_functions, nbdtrc_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nbdtrc", nbdtrc_doc, 0);
    -	PyDict_SetItemString(dictionary, "nbdtrc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3a_functions, nbdtr_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nbdtr", nbdtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "nbdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3a_functions, nbdtri_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nbdtri", nbdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "nbdtri", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, beta_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "beta", beta_doc, 0);
    -	PyDict_SetItemString(dictionary, "beta", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, lbeta_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "betaln", betaln_doc, 0);
    -	PyDict_SetItemString(dictionary, "betaln", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, cbrt_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "cbrt", cbrt_doc, 0);
    -	PyDict_SetItemString(dictionary, "cbrt", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, chdtrc_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "chdtrc", chdtrc_doc, 0);
    -	PyDict_SetItemString(dictionary, "chdtrc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, chdtr_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "chdtr", chdtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "chdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, chdtri_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "chdtri", chdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "chdtri", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, dawsn_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "dawsn", dawsn_doc, 0);
    -	PyDict_SetItemString(dictionary, "dawsn", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, ellie_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "ellipeinc", ellipeinc_doc, 0);
    -	PyDict_SetItemString(dictionary, "ellipeinc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, ellik_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "ellipkinc", ellipkinc_doc, 0);
    -	PyDict_SetItemString(dictionary, "ellipkinc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, ellpe_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "ellipe", ellipe_doc, 0);
    -	PyDict_SetItemString(dictionary, "ellipe", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, ellpk_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "ellipkm1", ellipkm1_doc, 0);
    -	PyDict_SetItemString(dictionary, "ellipkm1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, exp10_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "exp10", exp10_doc, 0);
    -	PyDict_SetItemString(dictionary, "exp10", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, exp2_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "exp2", exp2_doc, 0);
    -	PyDict_SetItemString(dictionary, "exp2", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1rc_functions, Gamma_data, cephes_1rc_types, 4, 1, 1, PyUFunc_None, "gamma", gamma_doc, 0);
    -	PyDict_SetItemString(dictionary, "gamma", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1rc_functions, lgam_data, cephes_1rc_types, 4, 1, 1, PyUFunc_None, "gammaln", gammaln_doc, 0);
    -	PyDict_SetItemString(dictionary, "gammaln", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, i0_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "i0", i0_doc, 0);
    -	PyDict_SetItemString(dictionary, "i0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, i0e_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "i0e", i0e_doc, 0);
    -	PyDict_SetItemString(dictionary, "i0e", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, i1_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "i1", i1_doc, 0);
    -	PyDict_SetItemString(dictionary, "i1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, i1e_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "i1e", i1e_doc, 0);
    -	PyDict_SetItemString(dictionary, "i1e", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, igamc_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "gammaincc", gammaincc_doc, 0);
    -	PyDict_SetItemString(dictionary, "gammaincc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, igam_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "gammainc", gammainc_doc, 0);
    -	PyDict_SetItemString(dictionary, "gammainc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, igami_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "gammainccinv", gammainccinv_doc, 0);
    -	PyDict_SetItemString(dictionary, "gammainccinv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, gammaincinv_data,
    -                                    cephes_3_types, 2, 2, 1, PyUFunc_None,
    -                                    "gammaincinv", gammaincinv_doc, 0);
    -	PyDict_SetItemString(dictionary, "gammaincinv", f);
    -	Py_DECREF(f);
    -
    -        f = PyUFunc_FromFuncAndData(cephes2c_functions, iv_data, cephes_3c_types, 4, 2, 1, PyUFunc_None, "iv", iv_doc, 0);
    -	PyDict_SetItemString(dictionary, "iv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2c_functions, ive_data, cephes_3c_types, 4, 2, 1, PyUFunc_None, "ive", ive_doc, 0);
    -	PyDict_SetItemString(dictionary, "ive", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_4_functions, ellpj_data, cephes_6_types, 2, 2, 4, PyUFunc_None, "ellipj", ellipj_doc, 0);
    -	PyDict_SetItemString(dictionary, "ellipj", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, expn_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "expn", expn_doc, 0);
    -	PyDict_SetItemString(dictionary, "expn", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1rc_functions, exp1_data, cephes_1rc_types, 4, 1, 1, PyUFunc_None, "exp1", exp1_doc, 0);
    -	PyDict_SetItemString(dictionary, "exp1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1rc_functions, expi_data, cephes_1rc_types, 4, 1, 1, PyUFunc_None, "expi", expi_doc, 0);
    -	PyDict_SetItemString(dictionary, "expi", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, kn_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "kn", kn_doc, 0);
    -	PyDict_SetItemString(dictionary, "kn", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, pdtrc_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "pdtrc", pdtrc_doc, 0);
    -	PyDict_SetItemString(dictionary, "pdtrc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, pdtr_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "pdtr", pdtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "pdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, pdtri_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "pdtri", pdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "pdtri", f);
    -	Py_DECREF(f);
    -        /*  Use the student t library from cdflib (it supports doubles for
    -              degrees of freedom
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, stdtr_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "stdtr", stdtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "stdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, stdtri_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "stdtri", stdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "stdtri", f);
    -	Py_DECREF(f);
    -        */
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, yn_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "yn", yn_doc, 0);
    -	PyDict_SetItemString(dictionary, "yn", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, smirnov_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "smirnov", smirnov_doc, 0);
    -	PyDict_SetItemString(dictionary, "smirnov", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2a_functions, smirnovi_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "smirnovi", smirnovi_doc, 0);
    -	PyDict_SetItemString(dictionary, "smirnovi", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1c_4_functions, airy_data, cephes_5c_types, 4, 1, 4, PyUFunc_None, "airy", airy_doc, 0);
    -	PyDict_SetItemString(dictionary, "airy", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1c_4_functions, itairy_data, cephes_5_types, 2, 1, 4, PyUFunc_None, "itairy", itairy_doc, 0);
    -	PyDict_SetItemString(dictionary, "itairy", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes1c_4_functions, airye_data, cephes_5c_types, 4, 1, 4, PyUFunc_None, "airye", airye_doc, 0);
    -	PyDict_SetItemString(dictionary, "airye", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_2_functions, fresnl_data, cephes_3_cmplx_types, 4, 1, 2, PyUFunc_None, "fresnel", fresnel_doc, 0);
    -	PyDict_SetItemString(dictionary, "fresnel", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_2_functions, shichi_data, cephes_3_types, 2, 1, 2, PyUFunc_None, "shichi", shichi_doc, 0);
    -	PyDict_SetItemString(dictionary, "shichi", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_2_functions, sici_data, cephes_3_types, 2, 1, 2, PyUFunc_None, "sici", sici_doc, 0);
    -	PyDict_SetItemString(dictionary, "sici", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_2_functions, itj0y0_data, cephes_3_types, 2, 1, 2, PyUFunc_None, "itj0y0", itj0y0_doc, 0);
    -	PyDict_SetItemString(dictionary, "itj0y0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_2_functions, it2j0y0_data, cephes_3_types, 2, 1, 2, PyUFunc_None, "it2j0y0", it2j0y0_doc, 0);
    -	PyDict_SetItemString(dictionary, "it2j0y0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_2_functions, iti0k0_data, cephes_3_types, 2, 1, 2, PyUFunc_None, "iti0k0", iti0k0_doc, 0);
    -	PyDict_SetItemString(dictionary, "iti0k0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_2_functions, it2i0k0_data, cephes_3_types, 2, 1, 2, PyUFunc_None, "it2i0k0", it2i0k0_doc, 0);
    -	PyDict_SetItemString(dictionary, "it2i0k0", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, j0_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "j0", j0_doc, 0);
    -	PyDict_SetItemString(dictionary, "j0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, y0_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "y0", y0_doc, 0);
    -	PyDict_SetItemString(dictionary, "y0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, j1_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "j1", j1_doc, 0);
    -	PyDict_SetItemString(dictionary, "j1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, y1_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "y1", y1_doc, 0);
    -	PyDict_SetItemString(dictionary, "y1", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2c_functions, jv_data, cephes_3c_types, 4, 2, 1, PyUFunc_None, "jv", jv_doc, 0);
    -	PyDict_SetItemString(dictionary, "jv", f);
    -        /* cephes jn doesn't have any advantages over jv, and is less
    -           accurate. So we alias jv to jn */
    -        PyDict_SetItemString(dictionary, "jn", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2c_functions, jve_data, cephes_3c_types, 4, 2, 1, PyUFunc_None, "jve", jve_doc, 0);
    -	PyDict_SetItemString(dictionary, "jve", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2c_functions, yv_data, cephes_3c_types, 4, 2, 1, PyUFunc_None, "yv", yv_doc, 0);
    -	PyDict_SetItemString(dictionary, "yv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2c_functions, yve_data, cephes_3c_types, 4, 2, 1, PyUFunc_None, "yve", yve_doc, 0);
    -	PyDict_SetItemString(dictionary, "yve", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, k0_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "k0", k0_doc, 0);
    -	PyDict_SetItemString(dictionary, "k0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, k0e_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "k0e", k0e_doc, 0);
    -	PyDict_SetItemString(dictionary, "k0e", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, k1_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "k1", k1_doc, 0);
    -	PyDict_SetItemString(dictionary, "k1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, k1e_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "k1e", k1e_doc, 0);
    -	PyDict_SetItemString(dictionary, "k1e", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2c_functions, kv_data, cephes_3c_types, 4, 2, 1, PyUFunc_None, "kv", kv_doc, 0);
    -	PyDict_SetItemString(dictionary, "kv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2c_functions, kve_data, cephes_3c_types, 4, 2, 1, PyUFunc_None, "kve", kve_doc, 0);
    -	PyDict_SetItemString(dictionary, "kve", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2cpp_functions, hankel1_data, cephes_3cp_types, 2, 2, 1, PyUFunc_None, "hankel1", hankel1_doc, 0);
    -	PyDict_SetItemString(dictionary, "hankel1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2cpp_functions, hankel1e_data, cephes_3cp_types, 2, 2, 1, PyUFunc_None, "hankel1e", hankel1e_doc, 0);
    -	PyDict_SetItemString(dictionary, "hankel1e", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2cpp_functions, hankel2_data, cephes_3cp_types, 2, 2, 1, PyUFunc_None, "hankel2", hankel2_doc, 0);
    -	PyDict_SetItemString(dictionary, "hankel2", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2cpp_functions, hankel2e_data, cephes_3cp_types, 2, 2, 1, PyUFunc_None, "hankel2e", hankel2e_doc, 0);
    -	PyDict_SetItemString(dictionary, "hankel2e", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, ndtr_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "ndtr", ndtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "ndtr", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, erfc_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "erfc", erfc_doc, 0);
    -	PyDict_SetItemString(dictionary, "erfc", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1rc_functions, erf_data, cephes_1rc_types, 4, 1, 1, PyUFunc_None, "erf", erf_doc, 0);
    -	PyDict_SetItemString(dictionary, "erf", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, ndtri_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "ndtri", ndtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "ndtri", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1rc_functions, psi_data, cephes_1rc_types, 4, 1, 1, PyUFunc_None, "psi", psi_doc, 0);
    -	PyDict_SetItemString(dictionary, "psi", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1rc_functions, rgamma_data, cephes_1rc_types, 4, 1, 1, PyUFunc_None, "rgamma", rgamma_doc, 0);
    -	PyDict_SetItemString(dictionary, "rgamma", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, round_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "round", round_doc, 0);
    -	PyDict_SetItemString(dictionary, "round", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, sindg_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "sindg", sindg_doc, 0);
    -	PyDict_SetItemString(dictionary, "sindg", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, cosdg_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "cosdg", cosdg_doc, 0);
    -	PyDict_SetItemString(dictionary, "cosdg", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, radian_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "radian", radian_doc, 0);
    -	PyDict_SetItemString(dictionary, "radian", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, tandg_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "tandg", tandg_doc, 0);
    -	PyDict_SetItemString(dictionary, "tandg", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, cotdg_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "cotdg", cotdg_doc, 0);
    -	PyDict_SetItemString(dictionary, "cotdg", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, log1p_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "log1p", log1p_doc, 0);
    -	PyDict_SetItemString(dictionary, "log1p", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, expm1_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "expm1", expm1_doc, 0);
    -	PyDict_SetItemString(dictionary, "expm1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, cosm1_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "cosm1", cosm1_doc, 0);
    -	PyDict_SetItemString(dictionary, "cosm1", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, spence_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "spence", spence_doc, 0);
    -	PyDict_SetItemString(dictionary, "spence", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, zetac_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "zetac", zetac_doc, 0);
    -	PyDict_SetItemString(dictionary, "zetac", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, struve_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "struve", struve_doc, 0);
    -	PyDict_SetItemString(dictionary, "struve", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, modstruve_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "modstruve", modstruve_doc, 0);
    -	PyDict_SetItemString(dictionary, "modstruve", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, itstruve0_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "itstruve0", itstruve0_doc, 0);
    -	PyDict_SetItemString(dictionary, "itstruve0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, it2struve0_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "it2struve0", it2struve0_doc, 0);
    -	PyDict_SetItemString(dictionary, "it2struve0", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, itmodstruve0_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "itmodstruve0", itmodstruve0_doc, 0);
    -	PyDict_SetItemString(dictionary, "itmodstruve0", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1cpb_4_functions, kelvin_data, cephes_5b2_types, 2, 1, 4, PyUFunc_None, "kelvin", kelvin_doc, 0);
    -	PyDict_SetItemString(dictionary, "kelvin", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, ber_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "ber", ber_doc, 0);
    -	PyDict_SetItemString(dictionary, "ber", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, bei_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "bei", bei_doc, 0);
    -	PyDict_SetItemString(dictionary, "bei", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, ker_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "ker", ker_doc, 0);
    -	PyDict_SetItemString(dictionary, "ker", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, kei_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "kei", kei_doc, 0);
    -	PyDict_SetItemString(dictionary, "kei", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, berp_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "berp", berp_doc, 0);
    -	PyDict_SetItemString(dictionary, "berp", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, beip_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "beip", beip_doc, 0);
    -	PyDict_SetItemString(dictionary, "beip", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, kerp_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "kerp", kerp_doc, 0);
    -	PyDict_SetItemString(dictionary, "kerp", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, keip_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "keip", keip_doc, 0);
    -	PyDict_SetItemString(dictionary, "keip", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, zeta_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "zeta", zeta_doc, 0);
    -	PyDict_SetItemString(dictionary, "zeta", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, kolmogorov_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "kolmogorov", kolmogorov_doc, 0);
    -	PyDict_SetItemString(dictionary, "kolmogorov", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_functions, kolmogi_data, cephes_2_types, 2, 1, 1, PyUFunc_None, "kolmogi", kolmogi_doc, 0);
    -	PyDict_SetItemString(dictionary, "kolmogi", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1c_functions, wofz_data, cephes_1c_types, 2, 1, 1, PyUFunc_None, "wofz", wofz_doc, 0);
    -	PyDict_SetItemString(dictionary, "wofz", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, besselpoly_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "besselpoly", besselpoly_doc, 0);
    -	PyDict_SetItemString(dictionary, "besselpoly", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfbet3_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "btdtria", "", 0);
    -	PyDict_SetItemString(dictionary, "btdtria", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfbet4_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "btdtrib", "", 0);
    -	PyDict_SetItemString(dictionary, "btdtrib", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfbin2_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "bdtrik", "", 0);
    -	PyDict_SetItemString(dictionary, "bdtrik", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfbin3_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "bdtrin", "", 0);
    -	PyDict_SetItemString(dictionary, "bdtrin", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, cdfchi3_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "chdtriv", "", 0);
    -	PyDict_SetItemString(dictionary, "chdtriv", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfchn1_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "chndtr", "", 0);
    -	PyDict_SetItemString(dictionary, "chndtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfchn2_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "chndtrix", "", 0);
    -	PyDict_SetItemString(dictionary, "chndtrix", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfchn3_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "chndtridf", "", 0);
    -	PyDict_SetItemString(dictionary, "chndtridf", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfchn4_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "chndtrinc", "", 0);
    -	PyDict_SetItemString(dictionary, "chndtrinc", f);
    -	Py_DECREF(f);
    -
    -        /*
    -        f = PyUFunc_FromFuncAndData(cephes3_functions, cdff1_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "fdtr", fdtr_doc, 0);
    -        PyDict_SetItemString(dictionary, "fdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdff2_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "fdtrix", fdtri_doc, 0);
    -	PyDict_SetItemString(dictionary, "fdtrix", f);
    -	Py_DECREF(f);
    -        */
    -
    -        /*  The Fortran code for this one seems not to be working properly.
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdff3_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "fdtridfn", "", 0);
    -	PyDict_SetItemString(dictionary, "fdtridfn", f);
    -	Py_DECREF(f);
    -        */
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdff4_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "fdtridfd", "", 0);
    -	PyDict_SetItemString(dictionary, "fdtridfd", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes4_functions, cdffnc1_data, cephes_5_types, 2, 4, 1, PyUFunc_None, "ncfdtr", "", 0);
    -	PyDict_SetItemString(dictionary, "ncfdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_functions, cdffnc2_data, cephes_5_types, 2, 4, 1, PyUFunc_None, "ncfdtri", "", 0);
    -	PyDict_SetItemString(dictionary, "ncfdtri", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_functions, cdffnc3_data, cephes_5_types, 2, 4, 1, PyUFunc_None, "ncfdtridfn", "", 0);
    -	PyDict_SetItemString(dictionary, "ncfdtridfn", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes4_functions, cdffnc4_data, cephes_5_types, 2, 4, 1, PyUFunc_None, "ncfdtridfd", "", 0);
    -	PyDict_SetItemString(dictionary, "ncfdtridfd", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_functions, cdffnc5_data, cephes_5_types, 2, 4, 1, PyUFunc_None, "ncfdtrinc", "", 0);
    -	PyDict_SetItemString(dictionary, "ncfdtrinc", f);
    -	Py_DECREF(f);
    -
    -        /*
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfgam1_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "gdtr2", "", 0);
    -	PyDict_SetItemString(dictionary, "gdtr2", f);
    -	Py_DECREF(f);
    -        */
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfgam2_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "gdtrix", "", 0);
    -	PyDict_SetItemString(dictionary, "gdtrix", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfgam3_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "gdtrib", "", 0);
    -	PyDict_SetItemString(dictionary, "gdtrib", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfgam4_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "gdtria", "", 0);
    -	PyDict_SetItemString(dictionary, "gdtria", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfnbn2_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nbdtrik", "", 0);
    -	PyDict_SetItemString(dictionary, "nbdtrik", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfnbn3_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nbdtrin", "", 0);
    -	PyDict_SetItemString(dictionary, "nbdtrin", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfnor3_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nrdtrimn", "", 0);
    -	PyDict_SetItemString(dictionary, "nrdtrimn", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdfnor4_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nrdtrisd", "", 0);
    -	PyDict_SetItemString(dictionary, "nrdtrisd", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, cdfpoi2_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "pdtrik", "", 0);
    -	PyDict_SetItemString(dictionary, "pdtrik", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, cdft1_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "stdtr", stdtr_doc, 0);
    -	PyDict_SetItemString(dictionary, "stdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, cdft2_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "stdtrit", stdtrit_doc, 0);
    -	PyDict_SetItemString(dictionary, "stdtrit", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, cdft3_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "stdtridf", stdtridf_doc, 0);
    -	PyDict_SetItemString(dictionary, "stdtridf", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdftnc1_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nctdtr", "", 0);
    -	PyDict_SetItemString(dictionary, "nctdtr", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdftnc2_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nctdtrit", "", 0);
    -	PyDict_SetItemString(dictionary, "nctdtrit", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdftnc3_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nctdtridf", "", 0);
    -	PyDict_SetItemString(dictionary, "nctdtridf", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, cdftnc4_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "nctdtrinc", "", 0);
    -	PyDict_SetItemString(dictionary, "nctdtrinc", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, tklambda_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "tklmbda", "", 0);
    -	PyDict_SetItemString(dictionary, "tklmbda", f);
    -	Py_DECREF(f);
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, mathieu_a_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "mathieu_a", mathieu_a_doc, 0);
    -	PyDict_SetItemString(dictionary, "mathieu_a", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_functions, mathieu_b_data, cephes_3_types, 2, 2, 1, PyUFunc_None, "mathieu_b", mathieu_b_doc, 0);
    -	PyDict_SetItemString(dictionary, "mathieu_b", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_2_functions, mathieu_cem_data, cephes_5_types, 2, 3, 2, PyUFunc_None, "mathieu_cem", mathieu_cem_doc, 0);
    -	PyDict_SetItemString(dictionary, "mathieu_cem", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_2_functions, mathieu_sem_data, cephes_5_types, 2, 3, 2, PyUFunc_None, "mathieu_sem", mathieu_sem_doc, 0);
    -	PyDict_SetItemString(dictionary, "mathieu_sem", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_2_functions, mathieu_mcem1_data, cephes_5_types, 2, 3, 2, PyUFunc_None, "mathieu_modcem1", mathieu_modcem1_doc, 0);
    -	PyDict_SetItemString(dictionary, "mathieu_modcem1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_2_functions, mathieu_mcem2_data, cephes_5_types, 2, 3, 2, PyUFunc_None, "mathieu_modcem2", mathieu_modcem2_doc, 0);
    -	PyDict_SetItemString(dictionary, "mathieu_modcem2", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_2_functions, mathieu_msem1_data, cephes_5_types, 2, 3, 2, PyUFunc_None, "mathieu_modsem1", mathieu_modsem1_doc, 0);
    -	PyDict_SetItemString(dictionary, "mathieu_modsem1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_2_functions, mathieu_msem2_data, cephes_5_types, 2, 3, 2, PyUFunc_None, "mathieu_modsem2", mathieu_modsem2_doc, 0);
    -	PyDict_SetItemString(dictionary, "mathieu_modsem2", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, lpmv_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "lpmv", lpmv_doc, 0);
    -	PyDict_SetItemString(dictionary, "lpmv", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes2_2_functions, pbwa_data, cephes_4_types, 2, 2, 2, PyUFunc_None, "pbwa", pbwa_doc, 0);
    -	PyDict_SetItemString(dictionary, "pbwa", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_2_functions, pbdv_data, cephes_4_types, 2, 2, 2, PyUFunc_None, "pbdv", pbdv_doc, 0);
    -	PyDict_SetItemString(dictionary, "pbdv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes2_2_functions, pbvv_data, cephes_4_types, 2, 2, 2, PyUFunc_None, "pbvv", pbvv_doc, 0);
    -	PyDict_SetItemString(dictionary, "pbvv", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, prolate_segv_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "pro_cv", pro_cv_doc, 0);
    -	PyDict_SetItemString(dictionary, "pro_cv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes3_functions, oblate_segv_data, cephes_4_types, 2, 3, 1, PyUFunc_None, "obl_cv", obl_cv_doc, 0);
    -	PyDict_SetItemString(dictionary, "obl_cv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes5_2_functions, prolate_aswfa_data, cephes_7_types, 2, 5, 2, PyUFunc_None, "pro_ang1_cv", pro_ang1_cv_doc, 0);
    -	PyDict_SetItemString(dictionary, "pro_ang1_cv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes5_2_functions, prolate_radial1_data, cephes_7_types, 2, 5, 2, PyUFunc_None, "pro_rad1_cv", pro_rad1_cv_doc, 0);
    -	PyDict_SetItemString(dictionary, "pro_rad1_cv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes5_2_functions, prolate_radial2_data, cephes_7_types, 2, 5, 2, PyUFunc_None, "pro_rad2_cv", pro_rad2_cv_doc, 0);
    -	PyDict_SetItemString(dictionary, "pro_rad2_cv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes5_2_functions, oblate_aswfa_data, cephes_7_types, 2, 5, 2, PyUFunc_None, "obl_ang1_cv", obl_ang1_cv_doc, 0);
    -	PyDict_SetItemString(dictionary, "obl_ang1_cv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes5_2_functions, oblate_radial1_data, cephes_7_types, 2, 5, 2, PyUFunc_None, "obl_rad1_cv", obl_rad1_cv_doc, 0);
    -	PyDict_SetItemString(dictionary, "obl_rad1_cv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes5_2_functions, oblate_radial2_data, cephes_7_types, 2, 5, 2, PyUFunc_None, "obl_rad2_cv", obl_rad2_cv_doc, 0);
    -	PyDict_SetItemString(dictionary, "obl_rad2_cv", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_2_functions, prolate_aswfa_nocv_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "pro_ang1", pro_ang1_doc, 0);
    -	PyDict_SetItemString(dictionary, "pro_ang1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_2_functions, prolate_radial1_nocv_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "pro_rad1", pro_rad1_doc, 0);
    -	PyDict_SetItemString(dictionary, "pro_rad1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_2_functions, prolate_radial2_nocv_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "pro_rad2", pro_rad2_doc, 0);
    -	PyDict_SetItemString(dictionary, "pro_rad2", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_2_functions, oblate_aswfa_nocv_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "obl_ang1", obl_ang1_doc, 0);
    -	PyDict_SetItemString(dictionary, "obl_ang1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_2_functions, oblate_radial1_nocv_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "obl_rad1", obl_rad1_doc, 0);
    -	PyDict_SetItemString(dictionary, "obl_rad1", f);
    -	Py_DECREF(f);
    -	f = PyUFunc_FromFuncAndData(cephes4_2_functions, oblate_radial2_nocv_data, cephes_6_types, 2, 4, 2, PyUFunc_None, "obl_rad2", obl_rad2_doc, 0);
    -	PyDict_SetItemString(dictionary, "obl_rad2", f);
    -	Py_DECREF(f);
    -
    -
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_2c_functions, modfresnelp_data, cephes_3cp_types, 2, 1, 2, PyUFunc_None, "modfresnelp", modfresnelp_doc, 0);
    -	PyDict_SetItemString(dictionary, "modfresnelp", f);
    -	Py_DECREF(f);
    -
    -	f = PyUFunc_FromFuncAndData(cephes1_2c_functions, modfresnelm_data, cephes_3cp_types, 2, 1, 2, PyUFunc_None, "modfresnelm", modfresnelm_doc, 0);
    -	PyDict_SetItemString(dictionary, "modfresnelm", f);
    -	Py_DECREF(f);
    -
    -
    -
    -}
    -
    -static PyObject *scipy_special_SpecialFunctionWarning = NULL;
    -
    -void scipy_special_raise_warning(char *fmt, ...)
    -{
    -    char msg[1024];
    -    va_list ap;
    -    NPY_ALLOW_C_API_DEF;
    -
    -    va_start(ap, fmt);
    -    PyOS_vsnprintf(msg, 1024, fmt, ap);
    -    va_end(ap);
    -
    -    NPY_ALLOW_C_API;
    -    PyErr_Warn(scipy_special_SpecialFunctionWarning, msg);
    -    NPY_DISABLE_C_API;
    -}
    -
    -static char errprint_doc[] = \
    -"errprint({flag}) sets the error printing flag for special functions\n" \
    -"    (from the cephesmodule). The output is the previous state.\n" \
    -"    With errprint(0) no error messages are shown;\n" \
    -"    the default is errprint(1).\n" \
    -"    If no argument is given the current state of\n" \
    -"    the flag is returned and no change occurs.\n";
    -
    -
    -static PyObject *errprint_func(PyObject *self, PyObject *args)
    -{
    -  int inflag = -37;
    -  int oldflag = 0;
    -  if (!PyArg_ParseTuple ( args, "|i;cephes.errprint", &inflag)) return NULL;
    -
    -  oldflag = scipy_special_print_error_messages;
    -  if (inflag != -37) {
    -    scipy_special_print_error_messages = (inflag != 0);
    -  }
    -  return PyInt_FromLong((long) oldflag);
    -}
    -
    -
    -static struct PyMethodDef methods[] = {
    -  {"errprint", errprint_func, METH_VARARGS, errprint_doc},
    -  {NULL,		NULL, 0}		/* sentinel */
    -};
    -
    -#if PY_VERSION_HEX >= 0x03000000
    -static struct PyModuleDef moduledef = {
    -    PyModuleDef_HEAD_INIT,
    -    "_cephes",
    -    NULL,
    -    -1,
    -    methods,
    -    NULL,
    -    NULL,
    -    NULL,
    -    NULL
    -};
    -
    -PyObject *PyInit__cephes(void)
    -{
    -    PyObject *m, *s, *d;
    -
    -    m = PyModule_Create(&moduledef);
    -    import_array();
    -    import_ufunc();
    -
    -    /* Add some symbolic constants to the module */
    -    d = PyModule_GetDict(m);
    -
    -    s = PyUString_FromString("2.0");
    -    PyDict_SetItemString(d, "__version__", s);
    -    Py_DECREF(s);
    -
    -    /* Add scipy_special_print_error_message global variable */
    -    /*  No, instead acessible through errprint */
    -
    -    /* Load the cephes operators into the array module's namespace */
    -    Cephes_InitOperators(d);
    -
    -    /* Register and add the warning type object */
    -    scipy_special_SpecialFunctionWarning = PyErr_NewException(
    -            "scipy.special._cephes.SpecialFunctionWarning",
    -            PyExc_RuntimeWarning,
    -            NULL);
    -    PyModule_AddObject(m, "SpecialFunctionWarning",
    -            scipy_special_SpecialFunctionWarning);
    -
    -    /* Check for errors */
    -    if (PyErr_Occurred())
    -        Py_FatalError("can't initialize module _cephes");
    -    return m;
    -}
    -#else
    -PyMODINIT_FUNC init_cephes(void) {
    -  PyObject *m, *d, *s;
    -
    -  /* Create the module and add the functions */
    -  m = Py_InitModule("_cephes", methods);
    -
    -  /* Import the ufunc objects */
    -  import_array();
    -  import_ufunc();
    -
    -  /* Add some symbolic constants to the module */
    -  d = PyModule_GetDict(m);
    -
    -  s = PyString_FromString("2.0");
    -  PyDict_SetItemString(d, "__version__", s);
    -  Py_DECREF(s);
    -
    -  /* Add scipy_special_print_error_message global variable */
    -  /*  No, instead acessible through errprint */
    -
    -  /* Load the cephes operators into the array module's namespace */
    -  Cephes_InitOperators(d);
    -
    -  /* Register and add the warning type object */
    -  scipy_special_SpecialFunctionWarning = PyErr_NewException(
    -      "scipy.special._cephes.SpecialFunctionWarning",
    -      PyExc_RuntimeWarning,
    -      NULL);
    -  PyModule_AddObject(m, "SpecialFunctionWarning",
    -                     scipy_special_SpecialFunctionWarning);
    -
    -  /* Check for errors */
    -  if (PyErr_Occurred())
    -    Py_FatalError("can't initialize module _cephes");
    -}
    -#endif
    diff --git a/scipy-0.10.1/scipy/special/_logit.c.src b/scipy-0.10.1/scipy/special/_logit.c.src
    deleted file mode 100644
    index 237ca83da0..0000000000
    --- a/scipy-0.10.1/scipy/special/_logit.c.src
    +++ /dev/null
    @@ -1,166 +0,0 @@
    -/*-*-c-*-*/
    -
    -/*
    - * ufuncs to compute logit(p) = log(p/(1-p)) and
    - * expit(x) = 1/(1+exp(-x))
    - */
    -
    -#include 
    -#include 
    -
    -#include "numpy/npy_math.h"
    -#include "numpy/ndarraytypes.h"
    -#include "numpy/ufuncobject.h"
    -
    -/*
    - * Inner loops for logit and expit
    - */
    -
    -/**begin repeat
    - * #type = npy_float, npy_double, npy_longdouble#
    - * #c = f,,l#
    - */
    -
    -static void
    -logit_loop@c@(char **args, npy_intp *dimensions,
    -              npy_intp* steps, void* data)
    -{
    -    npy_intp i;
    -    npy_intp n = dimensions[0];
    -    char *in = args[0], *out = args[1];
    -    npy_intp in_step = steps[0], out_step = steps[1];
    -
    -    @type@ tmp;
    -
    -    for (i = 0; i < n; i++) {
    -        tmp = *(@type@ *)in;
    -        tmp /= 1 - tmp;
    -        *((@type@ *)out) = npy_log@c@(tmp);
    -
    -        in += in_step;
    -        out += out_step;
    -    }
    -}
    -
    -static void
    -expit_loop@c@(char **args, npy_intp *dimensions,
    -              npy_intp* steps, void* data)
    -{
    -    npy_intp i;
    -    npy_intp n = dimensions[0];
    -    char *in = args[0], *out = args[1];
    -    npy_intp in_step = steps[0], out_step = steps[1];
    -
    -    @type@ tmp;
    -
    -    for (i = 0; i < n; i++) {
    -        tmp = *(@type@ *)in;
    -        if (tmp > 0) {
    -            tmp = npy_exp@c@(tmp);
    -            *((@type@ *)out) = tmp / (1 + tmp);
    -        }
    -        else{
    -            *((@type@ *)out) = 1 / (1 + npy_exp@c@(-tmp));
    -        }
    -        in += in_step;
    -        out += out_step;
    -    }
    -}
    -
    -/**end repeat**/
    -
    -/*
    - * Definitions for the ufuncs.
    - */
    -
    -static PyUFuncGenericFunction expit_funcs[3] = {&expit_loopf,
    -                                                &expit_loop,
    -                                                &expit_loopl};
    -
    -static PyUFuncGenericFunction logit_funcs[3] = {&logit_loopf,
    -                                                &logit_loop,
    -                                                &logit_loopl};
    -
    -static char types[6] = {NPY_FLOAT, NPY_FLOAT,
    -                        NPY_DOUBLE, NPY_DOUBLE,
    -                        NPY_LONGDOUBLE, NPY_LONGDOUBLE};
    -
    -static void *data[3] = {NULL, NULL, NULL};
    -
    -/* Module definition */
    -
    -static PyMethodDef module_methods[] = {
    -    { NULL, NULL, 0, NULL }
    -};
    -
    -#if PY_VERSION_HEX >= 0x03000000
    -
    -static PyModuleDef moduledef = {
    -    PyModuleDef_HEAD_INIT,
    -    "_logit",
    -    NULL,
    -    -1,
    -    module_methods,
    -    NULL,
    -    NULL,
    -    NULL,
    -    NULL
    -};
    -
    -PyMODINIT_FUNC
    -PyInit__logit()
    -{
    -    PyObject *m, *f, *d;
    -    m = PyModule_Create(&moduledef);
    -    if (!m) {
    -        return NULL;
    -    }
    -
    -    import_array();
    -    import_umath();
    -
    -    d = PyModule_GetDict(m);
    -
    -    f = PyUFunc_FromFuncAndData(logit_funcs,data, types, 3, 1, 1,
    -                                PyUFunc_None, "logit",NULL , 0);
    -    PyDict_SetItemString(d, "logit", f);
    -    Py_DECREF(f);
    -
    -    f = PyUFunc_FromFuncAndData(expit_funcs,data, types, 3, 1, 1,
    -                                PyUFunc_None, "expit",NULL , 0);
    -    PyDict_SetItemString(d, "expit", f);
    -    Py_DECREF(f);
    -
    -    return m;
    -}
    -
    -#else
    -
    -PyMODINIT_FUNC
    -init_logit()
    -{
    -    PyObject *m, *f,  *d;
    -
    -    m  = Py_InitModule("_logit", module_methods);
    -    if (m == NULL) {
    -        return;
    -    }
    -
    -    d = PyModule_GetDict(m);
    -
    -    import_array();
    -    import_umath();
    -
    -    f = PyUFunc_FromFuncAndData(logit_funcs,data, types, 3, 1, 1,
    -                                PyUFunc_None, "logit",NULL , 0);
    -    PyDict_SetItemString(d , "logit", f);
    -    Py_DECREF(f);
    -
    -
    -    f = PyUFunc_FromFuncAndData(expit_funcs,data, types, 3, 1, 1,
    -                                PyUFunc_None, "expit",NULL , 0);
    -    PyDict_SetItemString(d , "expit", f);
    -    Py_DECREF(f);
    -}
    -
    -#endif
    diff --git a/scipy-0.10.1/scipy/special/_testutils.py b/scipy-0.10.1/scipy/special/_testutils.py
    deleted file mode 100644
    index bdab3877e4..0000000000
    --- a/scipy-0.10.1/scipy/special/_testutils.py
    +++ /dev/null
    @@ -1,236 +0,0 @@
    -import os
    -import warnings
    -
    -import numpy as np
    -from numpy.testing import assert_
    -from numpy.testing.noseclasses import KnownFailureTest
    -
    -import scipy.special as sc
    -
    -__all__ = ['with_special_errors', 'assert_tol_equal', 'assert_func_equal',
    -           'FuncData']
    -
    -#------------------------------------------------------------------------------
    -# Enable convergence and loss of precision warnings -- turn off one by one
    -#------------------------------------------------------------------------------
    -
    -def with_special_errors(func):
    -    """
    -    Enable special function errors (such as underflow, overflow,
    -    loss of precision, etc.)
    -    """
    -    def wrapper(*a, **kw):
    -        old_filters = list(getattr(warnings, 'filters', []))
    -        old_errprint = sc.errprint(1)
    -        warnings.filterwarnings("error", category=sc.SpecialFunctionWarning)
    -        try:
    -            return func(*a, **kw)
    -        finally:
    -            sc.errprint(old_errprint)
    -            setattr(warnings, 'filters', old_filters)
    -    wrapper.__name__ = func.__name__
    -    wrapper.__doc__ = func.__doc__
    -    return wrapper
    -
    -#------------------------------------------------------------------------------
    -# Comparing function values at many data points at once, with helpful
    -#------------------------------------------------------------------------------
    -
    -def assert_tol_equal(a, b, rtol=1e-7, atol=0, err_msg='', verbose=True):
    -    """Assert that `a` and `b` are equal to tolerance ``atol + rtol*abs(b)``"""
    -    def compare(x, y):
    -        return np.allclose(x, y, rtol=rtol, atol=atol)
    -    a, b = np.asanyarray(a), np.asanyarray(b)
    -    header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
    -    np.testing.utils.assert_array_compare(compare, a, b, err_msg=str(err_msg),
    -                                          verbose=verbose, header=header)
    -
    -#------------------------------------------------------------------------------
    -# Comparing function values at many data points at once, with helpful
    -# error reports
    -#------------------------------------------------------------------------------
    -
    -def assert_func_equal(func, results, points, rtol=None, atol=None,
    -                      param_filter=None, knownfailure=None,
    -                      vectorized=True, dtype=None):
    -    if hasattr(points, 'next'):
    -        # it's a generator
    -        points = list(points)
    -
    -    points = np.asarray(points)
    -    if points.ndim == 1:
    -        points = points[:,None]
    -
    -    if hasattr(results, '__name__'):
    -        # function
    -        if vectorized:
    -            results = results(*tuple(points.T))
    -        else:
    -            results = np.array([results(*tuple(p)) for p in points])
    -            if results.dtype == object:
    -                try:
    -                    results = results.astype(float)
    -                except TypeError:
    -                    results = results.astype(complex)
    -    else:
    -        results = np.asarray(results)
    -
    -    npoints = points.shape[1]
    -
    -    data = np.c_[points, results]
    -    fdata = FuncData(func, data, range(npoints), range(npoints, data.shape[1]),
    -                     rtol=rtol, atol=atol, param_filter=param_filter,
    -                     knownfailure=knownfailure)
    -    fdata.check()
    -
    -class FuncData(object):
    -    """
    -    Data set for checking a special function.
    -
    -    Parameters
    -    ----------
    -    func : function
    -        Function to test
    -    filename : str
    -        Input file name
    -    param_columns : int or tuple of ints
    -        Columns indices in which the parameters to `func` lie.
    -        Can be imaginary integers to indicate that the parameter
    -        should be cast to complex.
    -    result_columns : int or tuple of ints
    -        Column indices for expected results from `func`.
    -    rtol : float, optional
    -        Required relative tolerance. Default is 5*eps.
    -    atol : float, optional
    -        Required absolute tolerance. Default is 5*tiny.
    -    param_filter : function, or tuple of functions/Nones, optional
    -        Filter functions to exclude some parameter ranges.
    -        If omitted, no filtering is done.
    -    knownfailure : str, optional
    -        Known failure error message to raise when the test is run.
    -        If omitted, no exception is raised.
    -
    -    """
    -
    -    def __init__(self, func, data, param_columns, result_columns,
    -                 rtol=None, atol=None, param_filter=None, knownfailure=None,
    -                 dataname=None):
    -        self.func = func
    -        self.data = data
    -        self.dataname = dataname
    -        if not hasattr(param_columns, '__len__'):
    -            param_columns = (param_columns,)
    -        if not hasattr(result_columns, '__len__'):
    -            result_columns = (result_columns,)
    -        self.param_columns = tuple(param_columns)
    -        self.result_columns = tuple(result_columns)
    -        self.rtol = rtol
    -        self.atol = atol
    -        if not hasattr(param_filter, '__len__'):
    -            param_filter = (param_filter,)
    -        self.param_filter = param_filter
    -        self.knownfailure = knownfailure
    -
    -    def get_tolerances(self, dtype):
    -        info = np.finfo(dtype)
    -        rtol, atol = self.rtol, self.atol
    -        if rtol is None:
    -            rtol = 5*info.eps
    -        if atol is None:
    -            atol = 5*info.tiny
    -        return rtol, atol
    -
    -    def check(self, data=None, dtype=None):
    -        """Check the special function against the data."""
    -
    -        if self.knownfailure:
    -            raise KnownFailureTest(self.knownfailure)
    -
    -        if data is None:
    -            data = self.data
    -
    -        if dtype is None:
    -            dtype = data.dtype
    -        else:
    -            data = data.astype(dtype)
    -
    -        rtol, atol = self.get_tolerances(dtype)
    -
    -        # Apply given filter functions
    -        if self.param_filter:
    -            param_mask = np.ones((data.shape[0],), np.bool_)
    -            for j, filter in zip(self.param_columns, self.param_filter):
    -                if filter:
    -                    param_mask &= filter(data[:,j])
    -            data = data[param_mask]
    -
    -        # Pick parameters and results from the correct columns
    -        params = []
    -        for j in self.param_columns:
    -            if np.iscomplexobj(j):
    -                j = int(j.imag)
    -                params.append(data[:,j].astype(np.complex))
    -            else:
    -                params.append(data[:,j])
    -        wanted = tuple([data[:,j] for j in self.result_columns])
    -
    -        # Evaluate
    -        got = self.func(*params)
    -        if not isinstance(got, tuple):
    -            got = (got,)
    -
    -        # Check the validity of each output returned
    -
    -        assert_(len(got) == len(wanted))
    -
    -        for output_num, (x, y) in enumerate(zip(got, wanted)):
    -            pinf_x = np.isinf(x) & (x > 0)
    -            pinf_y = np.isinf(y) & (x > 0)
    -            minf_x = np.isinf(x) & (x < 0)
    -            minf_y = np.isinf(y) & (x < 0)
    -            nan_x = np.isnan(x)
    -            nan_y = np.isnan(y)
    -
    -            abs_y = np.absolute(y)
    -            abs_y[~np.isfinite(abs_y)] = 0
    -            diff = np.absolute(x - y)
    -            diff[~np.isfinite(diff)] = 0
    -
    -            rdiff = diff / np.absolute(y)
    -            rdiff[~np.isfinite(rdiff)] = 0
    -
    -            tol_mask = (diff < atol + rtol*abs_y)
    -            pinf_mask = (pinf_x == pinf_y)
    -            minf_mask = (minf_x == minf_y)
    -            nan_mask = (nan_x == nan_y)
    -
    -            bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask)
    -
    -            if np.any(bad_j):
    -                # Some bad results: inform what, where, and how bad
    -                msg = [""]
    -                msg.append("Max |adiff|: %g" % diff.max())
    -                msg.append("Max |rdiff|: %g" % rdiff.max())
    -                msg.append("Bad results for the following points (in output %d):"
    -                           % output_num)
    -                for j in np.where(bad_j)[0]:
    -                    j = int(j)
    -                    fmt = lambda x: "%30s" % np.array2string(x[j], precision=18)
    -                    a = "  ".join(map(fmt, params))
    -                    b = "  ".join(map(fmt, got))
    -                    c = "  ".join(map(fmt, wanted))
    -                    d = fmt(rdiff)
    -                    msg.append("%s => %s != %s  (rdiff %s)" % (a, b, c, d))
    -                assert_(False, "\n".join(msg))
    -
    -    def __repr__(self):
    -        """Pretty-printing, esp. for Nose output"""
    -        if np.any(map(np.iscomplexobj, self.param_columns)):
    -            is_complex = " (complex)"
    -        else:
    -            is_complex = ""
    -        if self.dataname:
    -            return "" % (self.func.__name__, is_complex,
    -                                            os.path.basename(self.dataname))
    -        else:
    -            return "" % (self.func.__name__, is_complex)
    diff --git a/scipy-0.10.1/scipy/special/add_newdocs.py b/scipy-0.10.1/scipy/special/add_newdocs.py
    deleted file mode 100644
    index 01ddb2d5ca..0000000000
    --- a/scipy-0.10.1/scipy/special/add_newdocs.py
    +++ /dev/null
    @@ -1,67 +0,0 @@
    -
    -# Adding documentation to the logit and expit ufuncs
    -
    -from _logit import logit, expit
    -
    -try:
    -    from numpy.lib import add_newdoc_ufunc
    -
    -    add_newdoc_ufunc(logit,
    -        """
    -        Logit ufunc for ndarrays.
    -
    -        The logit function is defined as logit(p) = log(p/(1-p)).
    -        Note that logit(0) = -inf, logit(1) = inf, and logit(p)
    -        for p<0 or p>1 yields nan.
    -
    -        Parameters
    -        ----------
    -        x : ndarray
    -            The ndarray to apply logit to element-wise.
    -
    -        Returns
    -        -------
    -        out : ndarray
    -            An ndarray of the same shape as x. Its entries
    -            are logit of the corresponding entry of x.
    -
    -        Notes
    -        -----
    -        As a ufunc logit takes a number of optional
    -        keywork arguments. For more information
    -        see `ufuncs `_
    -        """)
    -
    -    add_newdoc_ufunc(expit,
    -        """
    -        Expit ufunc for ndarrays.
    -
    -        The expit function is defined as expit(x) = 1/(1+exp(-x)).
    -        Note that expit is the inverse logit function.
    -
    -        Parameters
    -        ----------
    -        x : ndarray
    -            The ndarray to apply expit to element-wise.
    -
    -        Returns
    -        -------
    -        out : ndarray
    -            An ndarray of the same shape as x. Its entries
    -            are expit of the corresponding entry of x.
    -
    -        Notes
    -        -----
    -        As a ufunc logit takes a number of optional
    -        keywork arguments. For more information
    -        see `ufuncs `_
    -        """)
    -
    -except ImportError:
    -    pass
    -
    -
    -
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/special/amos/dgamln.f b/scipy-0.10.1/scipy/special/amos/dgamln.f
    deleted file mode 100644
    index 792014be51..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/dgamln.f
    +++ /dev/null
    @@ -1,189 +0,0 @@
    -      DOUBLE PRECISION FUNCTION DGAMLN(Z,IERR)
    -C***BEGIN PROLOGUE  DGAMLN
    -C***DATE WRITTEN   830501   (YYMMDD)
    -C***REVISION DATE  830501   (YYMMDD)
    -C***CATEGORY NO.  B5F
    -C***KEYWORDS  GAMMA FUNCTION,LOGARITHM OF GAMMA FUNCTION
    -C***AUTHOR  AMOS, DONALD E., SANDIA NATIONAL LABORATORIES
    -C***PURPOSE  TO COMPUTE THE LOGARITHM OF THE GAMMA FUNCTION
    -C***DESCRIPTION
    -C
    -C               **** A DOUBLE PRECISION ROUTINE ****
    -C         DGAMLN COMPUTES THE NATURAL LOG OF THE GAMMA FUNCTION FOR
    -C         Z.GT.0.  THE ASYMPTOTIC EXPANSION IS USED TO GENERATE VALUES
    -C         GREATER THAN ZMIN WHICH ARE ADJUSTED BY THE RECURSION
    -C         G(Z+1)=Z*G(Z) FOR Z.LE.ZMIN.  THE FUNCTION WAS MADE AS
    -C         PORTABLE AS POSSIBLE BY COMPUTIMG ZMIN FROM THE NUMBER OF BASE
    -C         10 DIGITS IN A WORD, RLN=AMAX1(-ALOG10(R1MACH(4)),0.5E-18)
    -C         LIMITED TO 18 DIGITS OF (RELATIVE) ACCURACY.
    -C
    -C         SINCE INTEGER ARGUMENTS ARE COMMON, A TABLE LOOK UP ON 100
    -C         VALUES IS USED FOR SPEED OF EXECUTION.
    -C
    -C     DESCRIPTION OF ARGUMENTS
    -C
    -C         INPUT      Z IS D0UBLE PRECISION
    -C           Z      - ARGUMENT, Z.GT.0.0D0
    -C
    -C         OUTPUT      DGAMLN IS DOUBLE PRECISION
    -C           DGAMLN  - NATURAL LOG OF THE GAMMA FUNCTION AT Z.NE.0.0D0
    -C           IERR    - ERROR FLAG
    -C                     IERR=0, NORMAL RETURN, COMPUTATION COMPLETED
    -C                     IERR=1, Z.LE.0.0D0,    NO COMPUTATION
    -C
    -C
    -C***REFERENCES  COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 BY D. E. AMOS, SAND83-0083, MAY, 1983.
    -C***ROUTINES CALLED  I1MACH,D1MACH
    -C***END PROLOGUE  DGAMLN
    -      DOUBLE PRECISION CF, CON, FLN, FZ, GLN, RLN, S, TLG, TRM, TST,
    -     * T1, WDTOL, Z, ZDMY, ZINC, ZM, ZMIN, ZP, ZSQ, D1MACH
    -      INTEGER I, IERR, I1M, K, MZ, NZ, I1MACH
    -      DIMENSION CF(22), GLN(100)
    -C           LNGAMMA(N), N=1,100
    -      DATA GLN(1), GLN(2), GLN(3), GLN(4), GLN(5), GLN(6), GLN(7),
    -     1     GLN(8), GLN(9), GLN(10), GLN(11), GLN(12), GLN(13), GLN(14),
    -     2     GLN(15), GLN(16), GLN(17), GLN(18), GLN(19), GLN(20),
    -     3     GLN(21), GLN(22)/
    -     4     0.00000000000000000D+00,     0.00000000000000000D+00,
    -     5     6.93147180559945309D-01,     1.79175946922805500D+00,
    -     6     3.17805383034794562D+00,     4.78749174278204599D+00,
    -     7     6.57925121201010100D+00,     8.52516136106541430D+00,
    -     8     1.06046029027452502D+01,     1.28018274800814696D+01,
    -     9     1.51044125730755153D+01,     1.75023078458738858D+01,
    -     A     1.99872144956618861D+01,     2.25521638531234229D+01,
    -     B     2.51912211827386815D+01,     2.78992713838408916D+01,
    -     C     3.06718601060806728D+01,     3.35050734501368889D+01,
    -     D     3.63954452080330536D+01,     3.93398841871994940D+01,
    -     E     4.23356164607534850D+01,     4.53801388984769080D+01/
    -      DATA GLN(23), GLN(24), GLN(25), GLN(26), GLN(27), GLN(28),
    -     1     GLN(29), GLN(30), GLN(31), GLN(32), GLN(33), GLN(34),
    -     2     GLN(35), GLN(36), GLN(37), GLN(38), GLN(39), GLN(40),
    -     3     GLN(41), GLN(42), GLN(43), GLN(44)/
    -     4     4.84711813518352239D+01,     5.16066755677643736D+01,
    -     5     5.47847293981123192D+01,     5.80036052229805199D+01,
    -     6     6.12617017610020020D+01,     6.45575386270063311D+01,
    -     7     6.78897431371815350D+01,     7.12570389671680090D+01,
    -     8     7.46582363488301644D+01,     7.80922235533153106D+01,
    -     9     8.15579594561150372D+01,     8.50544670175815174D+01,
    -     A     8.85808275421976788D+01,     9.21361756036870925D+01,
    -     B     9.57196945421432025D+01,     9.93306124547874269D+01,
    -     C     1.02968198614513813D+02,     1.06631760260643459D+02,
    -     D     1.10320639714757395D+02,     1.14034211781461703D+02,
    -     E     1.17771881399745072D+02,     1.21533081515438634D+02/
    -      DATA GLN(45), GLN(46), GLN(47), GLN(48), GLN(49), GLN(50),
    -     1     GLN(51), GLN(52), GLN(53), GLN(54), GLN(55), GLN(56),
    -     2     GLN(57), GLN(58), GLN(59), GLN(60), GLN(61), GLN(62),
    -     3     GLN(63), GLN(64), GLN(65), GLN(66)/
    -     4     1.25317271149356895D+02,     1.29123933639127215D+02,
    -     5     1.32952575035616310D+02,     1.36802722637326368D+02,
    -     6     1.40673923648234259D+02,     1.44565743946344886D+02,
    -     7     1.48477766951773032D+02,     1.52409592584497358D+02,
    -     8     1.56360836303078785D+02,     1.60331128216630907D+02,
    -     9     1.64320112263195181D+02,     1.68327445448427652D+02,
    -     A     1.72352797139162802D+02,     1.76395848406997352D+02,
    -     B     1.80456291417543771D+02,     1.84533828861449491D+02,
    -     C     1.88628173423671591D+02,     1.92739047287844902D+02,
    -     D     1.96866181672889994D+02,     2.01009316399281527D+02,
    -     E     2.05168199482641199D+02,     2.09342586752536836D+02/
    -      DATA GLN(67), GLN(68), GLN(69), GLN(70), GLN(71), GLN(72),
    -     1     GLN(73), GLN(74), GLN(75), GLN(76), GLN(77), GLN(78),
    -     2     GLN(79), GLN(80), GLN(81), GLN(82), GLN(83), GLN(84),
    -     3     GLN(85), GLN(86), GLN(87), GLN(88)/
    -     4     2.13532241494563261D+02,     2.17736934113954227D+02,
    -     5     2.21956441819130334D+02,     2.26190548323727593D+02,
    -     6     2.30439043565776952D+02,     2.34701723442818268D+02,
    -     7     2.38978389561834323D+02,     2.43268849002982714D+02,
    -     8     2.47572914096186884D+02,     2.51890402209723194D+02,
    -     9     2.56221135550009525D+02,     2.60564940971863209D+02,
    -     A     2.64921649798552801D+02,     2.69291097651019823D+02,
    -     B     2.73673124285693704D+02,     2.78067573440366143D+02,
    -     C     2.82474292687630396D+02,     2.86893133295426994D+02,
    -     D     2.91323950094270308D+02,     2.95766601350760624D+02,
    -     E     3.00220948647014132D+02,     3.04686856765668715D+02/
    -      DATA GLN(89), GLN(90), GLN(91), GLN(92), GLN(93), GLN(94),
    -     1     GLN(95), GLN(96), GLN(97), GLN(98), GLN(99), GLN(100)/
    -     2     3.09164193580146922D+02,     3.13652829949879062D+02,
    -     3     3.18152639620209327D+02,     3.22663499126726177D+02,
    -     4     3.27185287703775217D+02,     3.31717887196928473D+02,
    -     5     3.36261181979198477D+02,     3.40815058870799018D+02,
    -     6     3.45379407062266854D+02,     3.49954118040770237D+02,
    -     7     3.54539085519440809D+02,     3.59134205369575399D+02/
    -C             COEFFICIENTS OF ASYMPTOTIC EXPANSION
    -      DATA CF(1), CF(2), CF(3), CF(4), CF(5), CF(6), CF(7), CF(8),
    -     1     CF(9), CF(10), CF(11), CF(12), CF(13), CF(14), CF(15),
    -     2     CF(16), CF(17), CF(18), CF(19), CF(20), CF(21), CF(22)/
    -     3     8.33333333333333333D-02,    -2.77777777777777778D-03,
    -     4     7.93650793650793651D-04,    -5.95238095238095238D-04,
    -     5     8.41750841750841751D-04,    -1.91752691752691753D-03,
    -     6     6.41025641025641026D-03,    -2.95506535947712418D-02,
    -     7     1.79644372368830573D-01,    -1.39243221690590112D+00,
    -     8     1.34028640441683920D+01,    -1.56848284626002017D+02,
    -     9     2.19310333333333333D+03,    -3.61087712537249894D+04,
    -     A     6.91472268851313067D+05,    -1.52382215394074162D+07,
    -     B     3.82900751391414141D+08,    -1.08822660357843911D+10,
    -     C     3.47320283765002252D+11,    -1.23696021422692745D+13,
    -     D     4.88788064793079335D+14,    -2.13203339609193739D+16/
    -C
    -C             LN(2*PI)
    -      DATA CON                    /     1.83787706640934548D+00/
    -C
    -C***FIRST EXECUTABLE STATEMENT  DGAMLN
    -      IERR=0
    -      IF (Z.LE.0.0D0) GO TO 70
    -      IF (Z.GT.101.0D0) GO TO 10
    -      NZ = INT(SNGL(Z))
    -      FZ = Z - FLOAT(NZ)
    -      IF (FZ.GT.0.0D0) GO TO 10
    -      IF (NZ.GT.100) GO TO 10
    -      DGAMLN = GLN(NZ)
    -      RETURN
    -   10 CONTINUE
    -      WDTOL = D1MACH(4)
    -      WDTOL = DMAX1(WDTOL,0.5D-18)
    -      I1M = I1MACH(14)
    -      RLN = D1MACH(5)*FLOAT(I1M)
    -      FLN = DMIN1(RLN,20.0D0)
    -      FLN = DMAX1(FLN,3.0D0)
    -      FLN = FLN - 3.0D0
    -      ZM = 1.8000D0 + 0.3875D0*FLN
    -      MZ = INT(SNGL(ZM)) + 1
    -      ZMIN = FLOAT(MZ)
    -      ZDMY = Z
    -      ZINC = 0.0D0
    -      IF (Z.GE.ZMIN) GO TO 20
    -      ZINC = ZMIN - FLOAT(NZ)
    -      ZDMY = Z + ZINC
    -   20 CONTINUE
    -      ZP = 1.0D0/ZDMY
    -      T1 = CF(1)*ZP
    -      S = T1
    -      IF (ZP.LT.WDTOL) GO TO 40
    -      ZSQ = ZP*ZP
    -      TST = T1*WDTOL
    -      DO 30 K=2,22
    -        ZP = ZP*ZSQ
    -        TRM = CF(K)*ZP
    -        IF (DABS(TRM).LT.TST) GO TO 40
    -        S = S + TRM
    -   30 CONTINUE
    -   40 CONTINUE
    -      IF (ZINC.NE.0.0D0) GO TO 50
    -      TLG = DLOG(Z)
    -      DGAMLN = Z*(TLG-1.0D0) + 0.5D0*(CON-TLG) + S
    -      RETURN
    -   50 CONTINUE
    -      ZP = 1.0D0
    -      NZ = INT(SNGL(ZINC))
    -      DO 60 I=1,NZ
    -        ZP = ZP*(Z+FLOAT(I-1))
    -   60 CONTINUE
    -      TLG = DLOG(ZDMY)
    -      DGAMLN = ZDMY*(TLG-1.0D0) - DLOG(ZP) + 0.5D0*(CON-TLG) + S
    -      RETURN
    -C
    -C
    -   70 CONTINUE
    -      IERR=1
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/dsclmr.f b/scipy-0.10.1/scipy/special/amos/dsclmr.f
    deleted file mode 100644
    index d9d6334515..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/dsclmr.f
    +++ /dev/null
    @@ -1,20 +0,0 @@
    -      SUBROUTINE DSCLMR
    -C   * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
    -C   *                 ISSUED BY SANDIA LABORATORIES,
    -C   *                   A PRIME CONTRACTOR TO THE
    -C   *               UNITED STATES DEPARTMENT OF ENERGY
    -C   * * * * * * * * * * * * * *  NOTICE   * * * * * * * * * * * * * * *
    -C   * THIS REPORT WAS PREPARED AS AN ACCOUNT OF WORK SPONSORED BY THE
    -C   * UNITED STATES GOVERNMENT.  NEITHER THE UNITED STATES NOR THE
    -C   * UNITED STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR
    -C   * EMPLOYEES, NOR ANY OF THEIR CONTRACTORS, SUBCONTRACTORS, OR THEIR
    -C   * EMPLOYEES, MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY
    -C   * LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS
    -C   * OR USEFULNESS OF ANY INFORMATION, APPARATUS, PRODUCT OR PROCESS
    -C   * DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE
    -C   * PRIVATELY OWNED RIGHTS.
    -C   * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
    -C   * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
    -C   * THIS CODE HAS BEEN APPROVED FOR UNLIMITED RELEASE.
    -C   * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/fdump.f b/scipy-0.10.1/scipy/special/amos/fdump.f
    deleted file mode 100644
    index a67d54dd5b..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/fdump.f
    +++ /dev/null
    @@ -1,25 +0,0 @@
    -      SUBROUTINE FDUMP
    -C***BEGIN PROLOGUE  FDUMP
    -C***DATE WRITTEN   790801   (YYMMDD)
    -C***REVISION DATE  861211   (YYMMDD)
    -C***CATEGORY NO.  R3
    -C***KEYWORDS  LIBRARY=SLATEC(XERROR),TYPE=ALL(FDUMP-A),ERROR
    -C***AUTHOR  JONES, R. E., (SNLA)
    -C***PURPOSE  Symbolic dump (should be locally written).
    -C***DESCRIPTION
    -C
    -C        ***Note*** Machine Dependent Routine
    -C        FDUMP is intended to be replaced by a locally written
    -C        version which produces a symbolic dump.  Failing this,
    -C        it should be replaced by a version which prints the
    -C        subprogram nesting list.  Note that this dump must be
    -C        printed on each of up to five files, as indicated by the
    -C        XGETUA routine.  See XSETUA and XGETUA for details.
    -C
    -C     Written by Ron Jones, with SLATEC Common Math Library Subcommittee
    -C***REFERENCES  (NONE)
    -C***ROUTINES CALLED  (NONE)
    -C***END PROLOGUE  FDUMP
    -C***FIRST EXECUTABLE STATEMENT  FDUMP
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zabs.f b/scipy-0.10.1/scipy/special/amos/zabs.f
    deleted file mode 100644
    index 31514a2d6a..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zabs.f
    +++ /dev/null
    @@ -1,29 +0,0 @@
    -      DOUBLE PRECISION FUNCTION AZABS(ZR, ZI)
    -C***BEGIN PROLOGUE  AZABS
    -C***REFER TO  ZBESH,ZBESI,ZBESJ,ZBESK,ZBESY,ZAIRY,ZBIRY
    -C
    -C     AZABS COMPUTES THE ABSOLUTE VALUE OR MAGNITUDE OF A DOUBLE
    -C     PRECISION COMPLEX VARIABLE CMPLX(ZR,ZI)
    -C
    -C***ROUTINES CALLED  (NONE)
    -C***END PROLOGUE  AZABS
    -      DOUBLE PRECISION ZR, ZI, U, V, Q, S
    -      U = DABS(ZR)
    -      V = DABS(ZI)
    -      S = U + V
    -C-----------------------------------------------------------------------
    -C     S*1.0D0 MAKES AN UNNORMALIZED UNDERFLOW ON CDC MACHINES INTO A
    -C     TRUE FLOATING ZERO
    -C-----------------------------------------------------------------------
    -      S = S*1.0D+0
    -      IF (S.EQ.0.0D+0) GO TO 20
    -      IF (U.GT.V) GO TO 10
    -      Q = U/V
    -      AZABS = V*DSQRT(1.D+0+Q*Q)
    -      RETURN
    -   10 Q = V/U
    -      AZABS = U*DSQRT(1.D+0+Q*Q)
    -      RETURN
    -   20 AZABS = 0.0D+0
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zacai.f b/scipy-0.10.1/scipy/special/amos/zacai.f
    deleted file mode 100644
    index 87eba88d26..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zacai.f
    +++ /dev/null
    @@ -1,99 +0,0 @@
    -      SUBROUTINE ZACAI(ZR, ZI, FNU, KODE, MR, N, YR, YI, NZ, RL, TOL,
    -     * ELIM, ALIM)
    -C***BEGIN PROLOGUE  ZACAI
    -C***REFER TO  ZAIRY
    -C
    -C     ZACAI APPLIES THE ANALYTIC CONTINUATION FORMULA
    -C
    -C         K(FNU,ZN*EXP(MP))=K(FNU,ZN)*EXP(-MP*FNU) - MP*I(FNU,ZN)
    -C                 MP=PI*MR*CMPLX(0.0,1.0)
    -C
    -C     TO CONTINUE THE K FUNCTION FROM THE RIGHT HALF TO THE LEFT
    -C     HALF Z PLANE FOR USE WITH ZAIRY WHERE FNU=1/3 OR 2/3 AND N=1.
    -C     ZACAI IS THE SAME AS ZACON WITH THE PARTS FOR LARGER ORDERS AND
    -C     RECURRENCE REMOVED. A RECURSIVE CALL TO ZACON CAN RESULT IF ZACON
    -C     IS CALLED FROM ZAIRY.
    -C
    -C***ROUTINES CALLED  ZASYI,ZBKNU,ZMLRI,ZSERI,ZS1S2,D1MACH,AZABS
    -C***END PROLOGUE  ZACAI
    -C     COMPLEX CSGN,CSPN,C1,C2,Y,Z,ZN,CY
    -      DOUBLE PRECISION ALIM, ARG, ASCLE, AZ, CSGNR, CSGNI, CSPNR,
    -     * CSPNI, C1R, C1I, C2R, C2I, CYR, CYI, DFNU, ELIM, FMR, FNU, PI,
    -     * RL, SGN, TOL, YY, YR, YI, ZR, ZI, ZNR, ZNI, D1MACH, AZABS
    -      INTEGER INU, IUF, KODE, MR, N, NN, NW, NZ
    -      DIMENSION YR(N), YI(N), CYR(2), CYI(2)
    -      DATA PI / 3.14159265358979324D0 /
    -      NZ = 0
    -      ZNR = -ZR
    -      ZNI = -ZI
    -      AZ = AZABS(ZR,ZI)
    -      NN = N
    -      DFNU = FNU + DBLE(FLOAT(N-1))
    -      IF (AZ.LE.2.0D0) GO TO 10
    -      IF (AZ*AZ*0.25D0.GT.DFNU+1.0D0) GO TO 20
    -   10 CONTINUE
    -C-----------------------------------------------------------------------
    -C     POWER SERIES FOR THE I FUNCTION
    -C-----------------------------------------------------------------------
    -      CALL ZSERI(ZNR, ZNI, FNU, KODE, NN, YR, YI, NW, TOL, ELIM, ALIM)
    -      GO TO 40
    -   20 CONTINUE
    -      IF (AZ.LT.RL) GO TO 30
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR LARGE Z FOR THE I FUNCTION
    -C-----------------------------------------------------------------------
    -      CALL ZASYI(ZNR, ZNI, FNU, KODE, NN, YR, YI, NW, RL, TOL, ELIM,
    -     * ALIM)
    -      IF (NW.LT.0) GO TO 80
    -      GO TO 40
    -   30 CONTINUE
    -C-----------------------------------------------------------------------
    -C     MILLER ALGORITHM NORMALIZED BY THE SERIES FOR THE I FUNCTION
    -C-----------------------------------------------------------------------
    -      CALL ZMLRI(ZNR, ZNI, FNU, KODE, NN, YR, YI, NW, TOL)
    -      IF(NW.LT.0) GO TO 80
    -   40 CONTINUE
    -C-----------------------------------------------------------------------
    -C     ANALYTIC CONTINUATION TO THE LEFT HALF PLANE FOR THE K FUNCTION
    -C-----------------------------------------------------------------------
    -      CALL ZBKNU(ZNR, ZNI, FNU, KODE, 1, CYR, CYI, NW, TOL, ELIM, ALIM)
    -      IF (NW.NE.0) GO TO 80
    -      FMR = DBLE(FLOAT(MR))
    -      SGN = -DSIGN(PI,FMR)
    -      CSGNR = 0.0D0
    -      CSGNI = SGN
    -      IF (KODE.EQ.1) GO TO 50
    -      YY = -ZNI
    -      CSGNR = -CSGNI*DSIN(YY)
    -      CSGNI = CSGNI*DCOS(YY)
    -   50 CONTINUE
    -C-----------------------------------------------------------------------
    -C     CALCULATE CSPN=EXP(FNU*PI*I) TO MINIMIZE LOSSES OF SIGNIFICANCE
    -C     WHEN FNU IS LARGE
    -C-----------------------------------------------------------------------
    -      INU = INT(SNGL(FNU))
    -      ARG = (FNU-DBLE(FLOAT(INU)))*SGN
    -      CSPNR = DCOS(ARG)
    -      CSPNI = DSIN(ARG)
    -      IF (MOD(INU,2).EQ.0) GO TO 60
    -      CSPNR = -CSPNR
    -      CSPNI = -CSPNI
    -   60 CONTINUE
    -      C1R = CYR(1)
    -      C1I = CYI(1)
    -      C2R = YR(1)
    -      C2I = YI(1)
    -      IF (KODE.EQ.1) GO TO 70
    -      IUF = 0
    -      ASCLE = 1.0D+3*D1MACH(1)/TOL
    -      CALL ZS1S2(ZNR, ZNI, C1R, C1I, C2R, C2I, NW, ASCLE, ALIM, IUF)
    -      NZ = NZ + NW
    -   70 CONTINUE
    -      YR(1) = CSPNR*C1R - CSPNI*C1I + CSGNR*C2R - CSGNI*C2I
    -      YI(1) = CSPNR*C1I + CSPNI*C1R + CSGNR*C2I + CSGNI*C2R
    -      RETURN
    -   80 CONTINUE
    -      NZ = -1
    -      IF(NW.EQ.(-2)) NZ=-2
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zacon.f b/scipy-0.10.1/scipy/special/amos/zacon.f
    deleted file mode 100644
    index b0dbb913da..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zacon.f
    +++ /dev/null
    @@ -1,203 +0,0 @@
    -      SUBROUTINE ZACON(ZR, ZI, FNU, KODE, MR, N, YR, YI, NZ, RL, FNUL,
    -     * TOL, ELIM, ALIM)
    -C***BEGIN PROLOGUE  ZACON
    -C***REFER TO  ZBESK,ZBESH
    -C
    -C     ZACON APPLIES THE ANALYTIC CONTINUATION FORMULA
    -C
    -C         K(FNU,ZN*EXP(MP))=K(FNU,ZN)*EXP(-MP*FNU) - MP*I(FNU,ZN)
    -C                 MP=PI*MR*CMPLX(0.0,1.0)
    -C
    -C     TO CONTINUE THE K FUNCTION FROM THE RIGHT HALF TO THE LEFT
    -C     HALF Z PLANE
    -C
    -C***ROUTINES CALLED  ZBINU,ZBKNU,ZS1S2,D1MACH,AZABS,ZMLT
    -C***END PROLOGUE  ZACON
    -C     COMPLEX CK,CONE,CSCL,CSCR,CSGN,CSPN,CY,CZERO,C1,C2,RZ,SC1,SC2,ST,
    -C    *S1,S2,Y,Z,ZN
    -      DOUBLE PRECISION ALIM, ARG, ASCLE, AS2, AZN, BRY, BSCLE, CKI,
    -     * CKR, CONER, CPN, CSCL, CSCR, CSGNI, CSGNR, CSPNI, CSPNR,
    -     * CSR, CSRR, CSSR, CYI, CYR, C1I, C1M, C1R, C2I, C2R, ELIM, FMR,
    -     * FN, FNU, FNUL, PI, PTI, PTR, RAZN, RL, RZI, RZR, SC1I, SC1R,
    -     * SC2I, SC2R, SGN, SPN, STI, STR, S1I, S1R, S2I, S2R, TOL, YI, YR,
    -     * YY, ZEROR, ZI, ZNI, ZNR, ZR, D1MACH, AZABS
    -      INTEGER I, INU, IUF, KFLAG, KODE, MR, N, NN, NW, NZ
    -      DIMENSION YR(N), YI(N), CYR(2), CYI(2), CSSR(3), CSRR(3), BRY(3)
    -      DATA PI / 3.14159265358979324D0 /
    -      DATA ZEROR,CONER / 0.0D0,1.0D0 /
    -      NZ = 0
    -      ZNR = -ZR
    -      ZNI = -ZI
    -      NN = N
    -      CALL ZBINU(ZNR, ZNI, FNU, KODE, NN, YR, YI, NW, RL, FNUL, TOL,
    -     * ELIM, ALIM)
    -      IF (NW.LT.0) GO TO 90
    -C-----------------------------------------------------------------------
    -C     ANALYTIC CONTINUATION TO THE LEFT HALF PLANE FOR THE K FUNCTION
    -C-----------------------------------------------------------------------
    -      NN = MIN0(2,N)
    -      CALL ZBKNU(ZNR, ZNI, FNU, KODE, NN, CYR, CYI, NW, TOL, ELIM, ALIM)
    -      IF (NW.NE.0) GO TO 90
    -      S1R = CYR(1)
    -      S1I = CYI(1)
    -      FMR = DBLE(FLOAT(MR))
    -      SGN = -DSIGN(PI,FMR)
    -      CSGNR = ZEROR
    -      CSGNI = SGN
    -      IF (KODE.EQ.1) GO TO 10
    -      YY = -ZNI
    -      CPN = DCOS(YY)
    -      SPN = DSIN(YY)
    -      CALL ZMLT(CSGNR, CSGNI, CPN, SPN, CSGNR, CSGNI)
    -   10 CONTINUE
    -C-----------------------------------------------------------------------
    -C     CALCULATE CSPN=EXP(FNU*PI*I) TO MINIMIZE LOSSES OF SIGNIFICANCE
    -C     WHEN FNU IS LARGE
    -C-----------------------------------------------------------------------
    -      INU = INT(SNGL(FNU))
    -      ARG = (FNU-DBLE(FLOAT(INU)))*SGN
    -      CPN = DCOS(ARG)
    -      SPN = DSIN(ARG)
    -      CSPNR = CPN
    -      CSPNI = SPN
    -      IF (MOD(INU,2).EQ.0) GO TO 20
    -      CSPNR = -CSPNR
    -      CSPNI = -CSPNI
    -   20 CONTINUE
    -      IUF = 0
    -      C1R = S1R
    -      C1I = S1I
    -      C2R = YR(1)
    -      C2I = YI(1)
    -      ASCLE = 1.0D+3*D1MACH(1)/TOL
    -      IF (KODE.EQ.1) GO TO 30
    -      CALL ZS1S2(ZNR, ZNI, C1R, C1I, C2R, C2I, NW, ASCLE, ALIM, IUF)
    -      NZ = NZ + NW
    -      SC1R = C1R
    -      SC1I = C1I
    -   30 CONTINUE
    -      CALL ZMLT(CSPNR, CSPNI, C1R, C1I, STR, STI)
    -      CALL ZMLT(CSGNR, CSGNI, C2R, C2I, PTR, PTI)
    -      YR(1) = STR + PTR
    -      YI(1) = STI + PTI
    -      IF (N.EQ.1) RETURN
    -      CSPNR = -CSPNR
    -      CSPNI = -CSPNI
    -      S2R = CYR(2)
    -      S2I = CYI(2)
    -      C1R = S2R
    -      C1I = S2I
    -      C2R = YR(2)
    -      C2I = YI(2)
    -      IF (KODE.EQ.1) GO TO 40
    -      CALL ZS1S2(ZNR, ZNI, C1R, C1I, C2R, C2I, NW, ASCLE, ALIM, IUF)
    -      NZ = NZ + NW
    -      SC2R = C1R
    -      SC2I = C1I
    -   40 CONTINUE
    -      CALL ZMLT(CSPNR, CSPNI, C1R, C1I, STR, STI)
    -      CALL ZMLT(CSGNR, CSGNI, C2R, C2I, PTR, PTI)
    -      YR(2) = STR + PTR
    -      YI(2) = STI + PTI
    -      IF (N.EQ.2) RETURN
    -      CSPNR = -CSPNR
    -      CSPNI = -CSPNI
    -      AZN = AZABS(ZNR,ZNI)
    -      RAZN = 1.0D0/AZN
    -      STR = ZNR*RAZN
    -      STI = -ZNI*RAZN
    -      RZR = (STR+STR)*RAZN
    -      RZI = (STI+STI)*RAZN
    -      FN = FNU + 1.0D0
    -      CKR = FN*RZR
    -      CKI = FN*RZI
    -C-----------------------------------------------------------------------
    -C     SCALE NEAR EXPONENT EXTREMES DURING RECURRENCE ON K FUNCTIONS
    -C-----------------------------------------------------------------------
    -      CSCL = 1.0D0/TOL
    -      CSCR = TOL
    -      CSSR(1) = CSCL
    -      CSSR(2) = CONER
    -      CSSR(3) = CSCR
    -      CSRR(1) = CSCR
    -      CSRR(2) = CONER
    -      CSRR(3) = CSCL
    -      BRY(1) = ASCLE
    -      BRY(2) = 1.0D0/ASCLE
    -      BRY(3) = D1MACH(2)
    -      AS2 = AZABS(S2R,S2I)
    -      KFLAG = 2
    -      IF (AS2.GT.BRY(1)) GO TO 50
    -      KFLAG = 1
    -      GO TO 60
    -   50 CONTINUE
    -      IF (AS2.LT.BRY(2)) GO TO 60
    -      KFLAG = 3
    -   60 CONTINUE
    -      BSCLE = BRY(KFLAG)
    -      S1R = S1R*CSSR(KFLAG)
    -      S1I = S1I*CSSR(KFLAG)
    -      S2R = S2R*CSSR(KFLAG)
    -      S2I = S2I*CSSR(KFLAG)
    -      CSR = CSRR(KFLAG)
    -      DO 80 I=3,N
    -        STR = S2R
    -        STI = S2I
    -        S2R = CKR*STR - CKI*STI + S1R
    -        S2I = CKR*STI + CKI*STR + S1I
    -        S1R = STR
    -        S1I = STI
    -        C1R = S2R*CSR
    -        C1I = S2I*CSR
    -        STR = C1R
    -        STI = C1I
    -        C2R = YR(I)
    -        C2I = YI(I)
    -        IF (KODE.EQ.1) GO TO 70
    -        IF (IUF.LT.0) GO TO 70
    -        CALL ZS1S2(ZNR, ZNI, C1R, C1I, C2R, C2I, NW, ASCLE, ALIM, IUF)
    -        NZ = NZ + NW
    -        SC1R = SC2R
    -        SC1I = SC2I
    -        SC2R = C1R
    -        SC2I = C1I
    -        IF (IUF.NE.3) GO TO 70
    -        IUF = -4
    -        S1R = SC1R*CSSR(KFLAG)
    -        S1I = SC1I*CSSR(KFLAG)
    -        S2R = SC2R*CSSR(KFLAG)
    -        S2I = SC2I*CSSR(KFLAG)
    -        STR = SC2R
    -        STI = SC2I
    -   70   CONTINUE
    -        PTR = CSPNR*C1R - CSPNI*C1I
    -        PTI = CSPNR*C1I + CSPNI*C1R
    -        YR(I) = PTR + CSGNR*C2R - CSGNI*C2I
    -        YI(I) = PTI + CSGNR*C2I + CSGNI*C2R
    -        CKR = CKR + RZR
    -        CKI = CKI + RZI
    -        CSPNR = -CSPNR
    -        CSPNI = -CSPNI
    -        IF (KFLAG.GE.3) GO TO 80
    -        PTR = DABS(C1R)
    -        PTI = DABS(C1I)
    -        C1M = DMAX1(PTR,PTI)
    -        IF (C1M.LE.BSCLE) GO TO 80
    -        KFLAG = KFLAG + 1
    -        BSCLE = BRY(KFLAG)
    -        S1R = S1R*CSR
    -        S1I = S1I*CSR
    -        S2R = STR
    -        S2I = STI
    -        S1R = S1R*CSSR(KFLAG)
    -        S1I = S1I*CSSR(KFLAG)
    -        S2R = S2R*CSSR(KFLAG)
    -        S2I = S2I*CSSR(KFLAG)
    -        CSR = CSRR(KFLAG)
    -   80 CONTINUE
    -      RETURN
    -   90 CONTINUE
    -      NZ = -1
    -      IF(NW.EQ.(-2)) NZ=-2
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zairy.f b/scipy-0.10.1/scipy/special/amos/zairy.f
    deleted file mode 100644
    index 484e64cd57..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zairy.f
    +++ /dev/null
    @@ -1,393 +0,0 @@
    -      SUBROUTINE ZAIRY(ZR, ZI, ID, KODE, AIR, AII, NZ, IERR)
    -C***BEGIN PROLOGUE  ZAIRY
    -C***DATE WRITTEN   830501   (YYMMDD)
    -C***REVISION DATE  890801   (YYMMDD)
    -C***CATEGORY NO.  B5K
    -C***KEYWORDS  AIRY FUNCTION,BESSEL FUNCTIONS OF ORDER ONE THIRD
    -C***AUTHOR  AMOS, DONALD E., SANDIA NATIONAL LABORATORIES
    -C***PURPOSE  TO COMPUTE AIRY FUNCTIONS AI(Z) AND DAI(Z) FOR COMPLEX Z
    -C***DESCRIPTION
    -C
    -C                      ***A DOUBLE PRECISION ROUTINE***
    -C         ON KODE=1, ZAIRY COMPUTES THE COMPLEX AIRY FUNCTION AI(Z) OR
    -C         ITS DERIVATIVE DAI(Z)/DZ ON ID=0 OR ID=1 RESPECTIVELY. ON
    -C         KODE=2, A SCALING OPTION CEXP(ZTA)*AI(Z) OR CEXP(ZTA)*
    -C         DAI(Z)/DZ IS PROVIDED TO REMOVE THE EXPONENTIAL DECAY IN
    -C         -PI/3.LT.ARG(Z).LT.PI/3 AND THE EXPONENTIAL GROWTH IN
    -C         PI/3.LT.ABS(ARG(Z)).LT.PI WHERE ZTA=(2/3)*Z*CSQRT(Z).
    -C
    -C         WHILE THE AIRY FUNCTIONS AI(Z) AND DAI(Z)/DZ ARE ANALYTIC IN
    -C         THE WHOLE Z PLANE, THE CORRESPONDING SCALED FUNCTIONS DEFINED
    -C         FOR KODE=2 HAVE A CUT ALONG THE NEGATIVE REAL AXIS.
    -C         DEFINTIONS AND NOTATION ARE FOUND IN THE NBS HANDBOOK OF
    -C         MATHEMATICAL FUNCTIONS (REF. 1).
    -C
    -C         INPUT      ZR,ZI ARE DOUBLE PRECISION
    -C           ZR,ZI  - Z=CMPLX(ZR,ZI)
    -C           ID     - ORDER OF DERIVATIVE, ID=0 OR ID=1
    -C           KODE   - A PARAMETER TO INDICATE THE SCALING OPTION
    -C                    KODE= 1  RETURNS
    -C                             AI=AI(Z)                ON ID=0 OR
    -C                             AI=DAI(Z)/DZ            ON ID=1
    -C                        = 2  RETURNS
    -C                             AI=CEXP(ZTA)*AI(Z)       ON ID=0 OR
    -C                             AI=CEXP(ZTA)*DAI(Z)/DZ   ON ID=1 WHERE
    -C                             ZTA=(2/3)*Z*CSQRT(Z)
    -C
    -C         OUTPUT     AIR,AII ARE DOUBLE PRECISION
    -C           AIR,AII- COMPLEX ANSWER DEPENDING ON THE CHOICES FOR ID AND
    -C                    KODE
    -C           NZ     - UNDERFLOW INDICATOR
    -C                    NZ= 0   , NORMAL RETURN
    -C                    NZ= 1   , AI=CMPLX(0.0D0,0.0D0) DUE TO UNDERFLOW IN
    -C                              -PI/3.LT.ARG(Z).LT.PI/3 ON KODE=1
    -C           IERR   - ERROR FLAG
    -C                    IERR=0, NORMAL RETURN - COMPUTATION COMPLETED
    -C                    IERR=1, INPUT ERROR   - NO COMPUTATION
    -C                    IERR=2, OVERFLOW      - NO COMPUTATION, REAL(ZTA)
    -C                            TOO LARGE ON KODE=1
    -C                    IERR=3, CABS(Z) LARGE      - COMPUTATION COMPLETED
    -C                            LOSSES OF SIGNIFCANCE BY ARGUMENT REDUCTION
    -C                            PRODUCE LESS THAN HALF OF MACHINE ACCURACY
    -C                    IERR=4, CABS(Z) TOO LARGE  - NO COMPUTATION
    -C                            COMPLETE LOSS OF ACCURACY BY ARGUMENT
    -C                            REDUCTION
    -C                    IERR=5, ERROR              - NO COMPUTATION,
    -C                            ALGORITHM TERMINATION CONDITION NOT MET
    -C
    -C***LONG DESCRIPTION
    -C
    -C         AI AND DAI ARE COMPUTED FOR CABS(Z).GT.1.0 FROM THE K BESSEL
    -C         FUNCTIONS BY
    -C
    -C            AI(Z)=C*SQRT(Z)*K(1/3,ZTA) , DAI(Z)=-C*Z*K(2/3,ZTA)
    -C                           C=1.0/(PI*SQRT(3.0))
    -C                            ZTA=(2/3)*Z**(3/2)
    -C
    -C         WITH THE POWER SERIES FOR CABS(Z).LE.1.0.
    -C
    -C         IN MOST COMPLEX VARIABLE COMPUTATION, ONE MUST EVALUATE ELE-
    -C         MENTARY FUNCTIONS. WHEN THE MAGNITUDE OF Z IS LARGE, LOSSES
    -C         OF SIGNIFICANCE BY ARGUMENT REDUCTION OCCUR. CONSEQUENTLY, IF
    -C         THE MAGNITUDE OF ZETA=(2/3)*Z**1.5 EXCEEDS U1=SQRT(0.5/UR),
    -C         THEN LOSSES EXCEEDING HALF PRECISION ARE LIKELY AND AN ERROR
    -C         FLAG IERR=3 IS TRIGGERED WHERE UR=DMAX1(D1MACH(4),1.0D-18) IS
    -C         DOUBLE PRECISION UNIT ROUNDOFF LIMITED TO 18 DIGITS PRECISION.
    -C         ALSO, IF THE MAGNITUDE OF ZETA IS LARGER THAN U2=0.5/UR, THEN
    -C         ALL SIGNIFICANCE IS LOST AND IERR=4. IN ORDER TO USE THE INT
    -C         FUNCTION, ZETA MUST BE FURTHER RESTRICTED NOT TO EXCEED THE
    -C         LARGEST INTEGER, U3=I1MACH(9). THUS, THE MAGNITUDE OF ZETA
    -C         MUST BE RESTRICTED BY MIN(U2,U3). ON 32 BIT MACHINES, U1,U2,
    -C         AND U3 ARE APPROXIMATELY 2.0E+3, 4.2E+6, 2.1E+9 IN SINGLE
    -C         PRECISION ARITHMETIC AND 1.3E+8, 1.8E+16, 2.1E+9 IN DOUBLE
    -C         PRECISION ARITHMETIC RESPECTIVELY. THIS MAKES U2 AND U3 LIMIT-
    -C         ING IN THEIR RESPECTIVE ARITHMETICS. THIS MEANS THAT THE MAG-
    -C         NITUDE OF Z CANNOT EXCEED 3.1E+4 IN SINGLE AND 2.1E+6 IN
    -C         DOUBLE PRECISION ARITHMETIC. THIS ALSO MEANS THAT ONE CAN
    -C         EXPECT TO RETAIN, IN THE WORST CASES ON 32 BIT MACHINES,
    -C         NO DIGITS IN SINGLE PRECISION AND ONLY 7 DIGITS IN DOUBLE
    -C         PRECISION ARITHMETIC. SIMILAR CONSIDERATIONS HOLD FOR OTHER
    -C         MACHINES.
    -C
    -C         THE APPROXIMATE RELATIVE ERROR IN THE MAGNITUDE OF A COMPLEX
    -C         BESSEL FUNCTION CAN BE EXPRESSED BY P*10**S WHERE P=MAX(UNIT
    -C         ROUNDOFF,1.0E-18) IS THE NOMINAL PRECISION AND 10**S REPRE-
    -C         SENTS THE INCREASE IN ERROR DUE TO ARGUMENT REDUCTION IN THE
    -C         ELEMENTARY FUNCTIONS. HERE, S=MAX(1,ABS(LOG10(CABS(Z))),
    -C         ABS(LOG10(FNU))) APPROXIMATELY (I.E. S=MAX(1,ABS(EXPONENT OF
    -C         CABS(Z),ABS(EXPONENT OF FNU)) ). HOWEVER, THE PHASE ANGLE MAY
    -C         HAVE ONLY ABSOLUTE ACCURACY. THIS IS MOST LIKELY TO OCCUR WHEN
    -C         ONE COMPONENT (IN ABSOLUTE VALUE) IS LARGER THAN THE OTHER BY
    -C         SEVERAL ORDERS OF MAGNITUDE. IF ONE COMPONENT IS 10**K LARGER
    -C         THAN THE OTHER, THEN ONE CAN EXPECT ONLY MAX(ABS(LOG10(P))-K,
    -C         0) SIGNIFICANT DIGITS; OR, STATED ANOTHER WAY, WHEN K EXCEEDS
    -C         THE EXPONENT OF P, NO SIGNIFICANT DIGITS REMAIN IN THE SMALLER
    -C         COMPONENT. HOWEVER, THE PHASE ANGLE RETAINS ABSOLUTE ACCURACY
    -C         BECAUSE, IN COMPLEX ARITHMETIC WITH PRECISION P, THE SMALLER
    -C         COMPONENT WILL NOT (AS A RULE) DECREASE BELOW P TIMES THE
    -C         MAGNITUDE OF THE LARGER COMPONENT. IN THESE EXTREME CASES,
    -C         THE PRINCIPAL PHASE ANGLE IS ON THE ORDER OF +P, -P, PI/2-P,
    -C         OR -PI/2+P.
    -C
    -C***REFERENCES  HANDBOOK OF MATHEMATICAL FUNCTIONS BY M. ABRAMOWITZ
    -C                 AND I. A. STEGUN, NBS AMS SERIES 55, U.S. DEPT. OF
    -C                 COMMERCE, 1955.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 AND LARGE ORDER BY D. E. AMOS, SAND83-0643, MAY, 1983
    -C
    -C               A SUBROUTINE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, SAND85-
    -C                 1018, MAY, 1985
    -C
    -C               A PORTABLE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, TRANS.
    -C                 MATH. SOFTWARE, 1986
    -C
    -C***ROUTINES CALLED  ZACAI,ZBKNU,AZEXP,AZSQRT,I1MACH,D1MACH
    -C***END PROLOGUE  ZAIRY
    -C     COMPLEX AI,CONE,CSQ,CY,S1,S2,TRM1,TRM2,Z,ZTA,Z3
    -      DOUBLE PRECISION AA, AD, AII, AIR, AK, ALIM, ATRM, AZ, AZ3, BK,
    -     * CC, CK, COEF, CONEI, CONER, CSQI, CSQR, CYI, CYR, C1, C2, DIG,
    -     * DK, D1, D2, ELIM, FID, FNU, PTR, RL, R1M5, SFAC, STI, STR,
    -     * S1I, S1R, S2I, S2R, TOL, TRM1I, TRM1R, TRM2I, TRM2R, TTH, ZEROI,
    -     * ZEROR, ZI, ZR, ZTAI, ZTAR, Z3I, Z3R, D1MACH, AZABS, ALAZ, BB
    -      INTEGER ID, IERR, IFLAG, K, KODE, K1, K2, MR, NN, NZ, I1MACH
    -      DIMENSION CYR(1), CYI(1)
    -      DATA TTH, C1, C2, COEF /6.66666666666666667D-01,
    -     * 3.55028053887817240D-01,2.58819403792806799D-01,
    -     * 1.83776298473930683D-01/
    -      DATA ZEROR, ZEROI, CONER, CONEI /0.0D0,0.0D0,1.0D0,0.0D0/
    -C***FIRST EXECUTABLE STATEMENT  ZAIRY
    -      IERR = 0
    -      NZ=0
    -      IF (ID.LT.0 .OR. ID.GT.1) IERR=1
    -      IF (KODE.LT.1 .OR. KODE.GT.2) IERR=1
    -      IF (IERR.NE.0) RETURN
    -      AZ = AZABS(ZR,ZI)
    -      TOL = DMAX1(D1MACH(4),1.0D-18)
    -      FID = DBLE(FLOAT(ID))
    -      IF (AZ.GT.1.0D0) GO TO 70
    -C-----------------------------------------------------------------------
    -C     POWER SERIES FOR CABS(Z).LE.1.
    -C-----------------------------------------------------------------------
    -      S1R = CONER
    -      S1I = CONEI
    -      S2R = CONER
    -      S2I = CONEI
    -      IF (AZ.LT.TOL) GO TO 170
    -      AA = AZ*AZ
    -      IF (AA.LT.TOL/AZ) GO TO 40
    -      TRM1R = CONER
    -      TRM1I = CONEI
    -      TRM2R = CONER
    -      TRM2I = CONEI
    -      ATRM = 1.0D0
    -      STR = ZR*ZR - ZI*ZI
    -      STI = ZR*ZI + ZI*ZR
    -      Z3R = STR*ZR - STI*ZI
    -      Z3I = STR*ZI + STI*ZR
    -      AZ3 = AZ*AA
    -      AK = 2.0D0 + FID
    -      BK = 3.0D0 - FID - FID
    -      CK = 4.0D0 - FID
    -      DK = 3.0D0 + FID + FID
    -      D1 = AK*DK
    -      D2 = BK*CK
    -      AD = DMIN1(D1,D2)
    -      AK = 24.0D0 + 9.0D0*FID
    -      BK = 30.0D0 - 9.0D0*FID
    -      DO 30 K=1,25
    -        STR = (TRM1R*Z3R-TRM1I*Z3I)/D1
    -        TRM1I = (TRM1R*Z3I+TRM1I*Z3R)/D1
    -        TRM1R = STR
    -        S1R = S1R + TRM1R
    -        S1I = S1I + TRM1I
    -        STR = (TRM2R*Z3R-TRM2I*Z3I)/D2
    -        TRM2I = (TRM2R*Z3I+TRM2I*Z3R)/D2
    -        TRM2R = STR
    -        S2R = S2R + TRM2R
    -        S2I = S2I + TRM2I
    -        ATRM = ATRM*AZ3/AD
    -        D1 = D1 + AK
    -        D2 = D2 + BK
    -        AD = DMIN1(D1,D2)
    -        IF (ATRM.LT.TOL*AD) GO TO 40
    -        AK = AK + 18.0D0
    -        BK = BK + 18.0D0
    -   30 CONTINUE
    -   40 CONTINUE
    -      IF (ID.EQ.1) GO TO 50
    -      AIR = S1R*C1 - C2*(ZR*S2R-ZI*S2I)
    -      AII = S1I*C1 - C2*(ZR*S2I+ZI*S2R)
    -      IF (KODE.EQ.1) RETURN
    -      CALL AZSQRT(ZR, ZI, STR, STI)
    -      ZTAR = TTH*(ZR*STR-ZI*STI)
    -      ZTAI = TTH*(ZR*STI+ZI*STR)
    -      CALL AZEXP(ZTAR, ZTAI, STR, STI)
    -      PTR = AIR*STR - AII*STI
    -      AII = AIR*STI + AII*STR
    -      AIR = PTR
    -      RETURN
    -   50 CONTINUE
    -      AIR = -S2R*C2
    -      AII = -S2I*C2
    -      IF (AZ.LE.TOL) GO TO 60
    -      STR = ZR*S1R - ZI*S1I
    -      STI = ZR*S1I + ZI*S1R
    -      CC = C1/(1.0D0+FID)
    -      AIR = AIR + CC*(STR*ZR-STI*ZI)
    -      AII = AII + CC*(STR*ZI+STI*ZR)
    -   60 CONTINUE
    -      IF (KODE.EQ.1) RETURN
    -      CALL AZSQRT(ZR, ZI, STR, STI)
    -      ZTAR = TTH*(ZR*STR-ZI*STI)
    -      ZTAI = TTH*(ZR*STI+ZI*STR)
    -      CALL AZEXP(ZTAR, ZTAI, STR, STI)
    -      PTR = STR*AIR - STI*AII
    -      AII = STR*AII + STI*AIR
    -      AIR = PTR
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     CASE FOR CABS(Z).GT.1.0
    -C-----------------------------------------------------------------------
    -   70 CONTINUE
    -      FNU = (1.0D0+FID)/3.0D0
    -C-----------------------------------------------------------------------
    -C     SET PARAMETERS RELATED TO MACHINE CONSTANTS.
    -C     TOL IS THE APPROXIMATE UNIT ROUNDOFF LIMITED TO 1.0D-18.
    -C     ELIM IS THE APPROXIMATE EXPONENTIAL OVER- AND UNDERFLOW LIMIT.
    -C     EXP(-ELIM).LT.EXP(-ALIM)=EXP(-ELIM)/TOL    AND
    -C     EXP(ELIM).GT.EXP(ALIM)=EXP(ELIM)*TOL       ARE INTERVALS NEAR
    -C     UNDERFLOW AND OVERFLOW LIMITS WHERE SCALED ARITHMETIC IS DONE.
    -C     RL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC EXPANSION FOR LARGE Z.
    -C     DIG = NUMBER OF BASE 10 DIGITS IN TOL = 10**(-DIG).
    -C-----------------------------------------------------------------------
    -      K1 = I1MACH(15)
    -      K2 = I1MACH(16)
    -      R1M5 = D1MACH(5)
    -      K = MIN0(IABS(K1),IABS(K2))
    -      ELIM = 2.303D0*(DBLE(FLOAT(K))*R1M5-3.0D0)
    -      K1 = I1MACH(14) - 1
    -      AA = R1M5*DBLE(FLOAT(K1))
    -      DIG = DMIN1(AA,18.0D0)
    -      AA = AA*2.303D0
    -      ALIM = ELIM + DMAX1(-AA,-41.45D0)
    -      RL = 1.2D0*DIG + 3.0D0
    -      ALAZ = DLOG(AZ)
    -C--------------------------------------------------------------------------
    -C     TEST FOR PROPER RANGE
    -C-----------------------------------------------------------------------
    -      AA=0.5D0/TOL
    -      BB=DBLE(FLOAT(I1MACH(9)))*0.5D0
    -      AA=DMIN1(AA,BB)
    -      AA=AA**TTH
    -      IF (AZ.GT.AA) GO TO 260
    -      AA=DSQRT(AA)
    -      IF (AZ.GT.AA) IERR=3
    -      CALL AZSQRT(ZR, ZI, CSQR, CSQI)
    -      ZTAR = TTH*(ZR*CSQR-ZI*CSQI)
    -      ZTAI = TTH*(ZR*CSQI+ZI*CSQR)
    -C-----------------------------------------------------------------------
    -C     RE(ZTA).LE.0 WHEN RE(Z).LT.0, ESPECIALLY WHEN IM(Z) IS SMALL
    -C-----------------------------------------------------------------------
    -      IFLAG = 0
    -      SFAC = 1.0D0
    -      AK = ZTAI
    -      IF (ZR.GE.0.0D0) GO TO 80
    -      BK = ZTAR
    -      CK = -DABS(BK)
    -      ZTAR = CK
    -      ZTAI = AK
    -   80 CONTINUE
    -      IF (ZI.NE.0.0D0) GO TO 90
    -      IF (ZR.GT.0.0D0) GO TO 90
    -      ZTAR = 0.0D0
    -      ZTAI = AK
    -   90 CONTINUE
    -      AA = ZTAR
    -      IF (AA.GE.0.0D0 .AND. ZR.GT.0.0D0) GO TO 110
    -      IF (KODE.EQ.2) GO TO 100
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST
    -C-----------------------------------------------------------------------
    -      IF (AA.GT.(-ALIM)) GO TO 100
    -      AA = -AA + 0.25D0*ALAZ
    -      IFLAG = 1
    -      SFAC = TOL
    -      IF (AA.GT.ELIM) GO TO 270
    -  100 CONTINUE
    -C-----------------------------------------------------------------------
    -C     CBKNU AND CACON RETURN EXP(ZTA)*K(FNU,ZTA) ON KODE=2
    -C-----------------------------------------------------------------------
    -      MR = 1
    -      IF (ZI.LT.0.0D0) MR = -1
    -      CALL ZACAI(ZTAR, ZTAI, FNU, KODE, MR, 1, CYR, CYI, NN, RL, TOL,
    -     * ELIM, ALIM)
    -      IF (NN.LT.0) GO TO 280
    -      NZ = NZ + NN
    -      GO TO 130
    -  110 CONTINUE
    -      IF (KODE.EQ.2) GO TO 120
    -C-----------------------------------------------------------------------
    -C     UNDERFLOW TEST
    -C-----------------------------------------------------------------------
    -      IF (AA.LT.ALIM) GO TO 120
    -      AA = -AA - 0.25D0*ALAZ
    -      IFLAG = 2
    -      SFAC = 1.0D0/TOL
    -      IF (AA.LT.(-ELIM)) GO TO 210
    -  120 CONTINUE
    -      CALL ZBKNU(ZTAR, ZTAI, FNU, KODE, 1, CYR, CYI, NZ, TOL, ELIM,
    -     * ALIM)
    -  130 CONTINUE
    -      S1R = CYR(1)*COEF
    -      S1I = CYI(1)*COEF
    -      IF (IFLAG.NE.0) GO TO 150
    -      IF (ID.EQ.1) GO TO 140
    -      AIR = CSQR*S1R - CSQI*S1I
    -      AII = CSQR*S1I + CSQI*S1R
    -      RETURN
    -  140 CONTINUE
    -      AIR = -(ZR*S1R-ZI*S1I)
    -      AII = -(ZR*S1I+ZI*S1R)
    -      RETURN
    -  150 CONTINUE
    -      S1R = S1R*SFAC
    -      S1I = S1I*SFAC
    -      IF (ID.EQ.1) GO TO 160
    -      STR = S1R*CSQR - S1I*CSQI
    -      S1I = S1R*CSQI + S1I*CSQR
    -      S1R = STR
    -      AIR = S1R/SFAC
    -      AII = S1I/SFAC
    -      RETURN
    -  160 CONTINUE
    -      STR = -(S1R*ZR-S1I*ZI)
    -      S1I = -(S1R*ZI+S1I*ZR)
    -      S1R = STR
    -      AIR = S1R/SFAC
    -      AII = S1I/SFAC
    -      RETURN
    -  170 CONTINUE
    -      AA = 1.0D+3*D1MACH(1)
    -      S1R = ZEROR
    -      S1I = ZEROI
    -      IF (ID.EQ.1) GO TO 190
    -      IF (AZ.LE.AA) GO TO 180
    -      S1R = C2*ZR
    -      S1I = C2*ZI
    -  180 CONTINUE
    -      AIR = C1 - S1R
    -      AII = -S1I
    -      RETURN
    -  190 CONTINUE
    -      AIR = -C2
    -      AII = 0.0D0
    -      AA = DSQRT(AA)
    -      IF (AZ.LE.AA) GO TO 200
    -      S1R = 0.5D0*(ZR*ZR-ZI*ZI)
    -      S1I = ZR*ZI
    -  200 CONTINUE
    -      AIR = AIR + C1*S1R
    -      AII = AII + C1*S1I
    -      RETURN
    -  210 CONTINUE
    -      NZ = 1
    -      AIR = ZEROR
    -      AII = ZEROI
    -      RETURN
    -  270 CONTINUE
    -      NZ = 0
    -      IERR=2
    -      RETURN
    -  280 CONTINUE
    -      IF(NN.EQ.(-1)) GO TO 270
    -      NZ=0
    -      IERR=5
    -      RETURN
    -  260 CONTINUE
    -      IERR=4
    -      NZ=0
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zasyi.f b/scipy-0.10.1/scipy/special/amos/zasyi.f
    deleted file mode 100644
    index 578136fb41..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zasyi.f
    +++ /dev/null
    @@ -1,165 +0,0 @@
    -      SUBROUTINE ZASYI(ZR, ZI, FNU, KODE, N, YR, YI, NZ, RL, TOL, ELIM,
    -     * ALIM)
    -C***BEGIN PROLOGUE  ZASYI
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C     ZASYI COMPUTES THE I BESSEL FUNCTION FOR REAL(Z).GE.0.0 BY
    -C     MEANS OF THE ASYMPTOTIC EXPANSION FOR LARGE CABS(Z) IN THE
    -C     REGION CABS(Z).GT.MAX(RL,FNU*FNU/2). NZ=0 IS A NORMAL RETURN.
    -C     NZ.LT.0 INDICATES AN OVERFLOW ON KODE=1.
    -C
    -C***ROUTINES CALLED  D1MACH,AZABS,ZDIV,AZEXP,ZMLT,AZSQRT
    -C***END PROLOGUE  ZASYI
    -C     COMPLEX AK1,CK,CONE,CS1,CS2,CZ,CZERO,DK,EZ,P1,RZ,S2,Y,Z
    -      DOUBLE PRECISION AA, AEZ, AK, AK1I, AK1R, ALIM, ARG, ARM, ATOL,
    -     * AZ, BB, BK, CKI, CKR, CONEI, CONER, CS1I, CS1R, CS2I, CS2R, CZI,
    -     * CZR, DFNU, DKI, DKR, DNU2, ELIM, EZI, EZR, FDN, FNU, PI, P1I,
    -     * P1R, RAZ, RL, RTPI, RTR1, RZI, RZR, S, SGN, SQK, STI, STR, S2I,
    -     * S2R, TOL, TZI, TZR, YI, YR, ZEROI, ZEROR, ZI, ZR, D1MACH, AZABS
    -      INTEGER I, IB, IL, INU, J, JL, K, KODE, KODED, M, N, NN, NZ
    -      DIMENSION YR(N), YI(N)
    -      DATA PI, RTPI  /3.14159265358979324D0 , 0.159154943091895336D0 /
    -      DATA ZEROR,ZEROI,CONER,CONEI / 0.0D0, 0.0D0, 1.0D0, 0.0D0 /
    -C
    -      NZ = 0
    -      AZ = AZABS(ZR,ZI)
    -      ARM = 1.0D+3*D1MACH(1)
    -      RTR1 = DSQRT(ARM)
    -      IL = MIN0(2,N)
    -      DFNU = FNU + DBLE(FLOAT(N-IL))
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST
    -C-----------------------------------------------------------------------
    -      RAZ = 1.0D0/AZ
    -      STR = ZR*RAZ
    -      STI = -ZI*RAZ
    -      AK1R = RTPI*STR*RAZ
    -      AK1I = RTPI*STI*RAZ
    -      CALL AZSQRT(AK1R, AK1I, AK1R, AK1I)
    -      CZR = ZR
    -      CZI = ZI
    -      IF (KODE.NE.2) GO TO 10
    -      CZR = ZEROR
    -      CZI = ZI
    -   10 CONTINUE
    -      IF (DABS(CZR).GT.ELIM) GO TO 100
    -      DNU2 = DFNU + DFNU
    -      KODED = 1
    -      IF ((DABS(CZR).GT.ALIM) .AND. (N.GT.2)) GO TO 20
    -      KODED = 0
    -      CALL AZEXP(CZR, CZI, STR, STI)
    -      CALL ZMLT(AK1R, AK1I, STR, STI, AK1R, AK1I)
    -   20 CONTINUE
    -      FDN = 0.0D0
    -      IF (DNU2.GT.RTR1) FDN = DNU2*DNU2
    -      EZR = ZR*8.0D0
    -      EZI = ZI*8.0D0
    -C-----------------------------------------------------------------------
    -C     WHEN Z IS IMAGINARY, THE ERROR TEST MUST BE MADE RELATIVE TO THE
    -C     FIRST RECIPROCAL POWER SINCE THIS IS THE LEADING TERM OF THE
    -C     EXPANSION FOR THE IMAGINARY PART.
    -C-----------------------------------------------------------------------
    -      AEZ = 8.0D0*AZ
    -      S = TOL/AEZ
    -      JL = INT(SNGL(RL+RL)) + 2
    -      P1R = ZEROR
    -      P1I = ZEROI
    -      IF (ZI.EQ.0.0D0) GO TO 30
    -C-----------------------------------------------------------------------
    -C     CALCULATE EXP(PI*(0.5+FNU+N-IL)*I) TO MINIMIZE LOSSES OF
    -C     SIGNIFICANCE WHEN FNU OR N IS LARGE
    -C-----------------------------------------------------------------------
    -      INU = INT(SNGL(FNU))
    -      ARG = (FNU-DBLE(FLOAT(INU)))*PI
    -      INU = INU + N - IL
    -      AK = -DSIN(ARG)
    -      BK = DCOS(ARG)
    -      IF (ZI.LT.0.0D0) BK = -BK
    -      P1R = AK
    -      P1I = BK
    -      IF (MOD(INU,2).EQ.0) GO TO 30
    -      P1R = -P1R
    -      P1I = -P1I
    -   30 CONTINUE
    -      DO 70 K=1,IL
    -        SQK = FDN - 1.0D0
    -        ATOL = S*DABS(SQK)
    -        SGN = 1.0D0
    -        CS1R = CONER
    -        CS1I = CONEI
    -        CS2R = CONER
    -        CS2I = CONEI
    -        CKR = CONER
    -        CKI = CONEI
    -        AK = 0.0D0
    -        AA = 1.0D0
    -        BB = AEZ
    -        DKR = EZR
    -        DKI = EZI
    -        DO 40 J=1,JL
    -          CALL ZDIV(CKR, CKI, DKR, DKI, STR, STI)
    -          CKR = STR*SQK
    -          CKI = STI*SQK
    -          CS2R = CS2R + CKR
    -          CS2I = CS2I + CKI
    -          SGN = -SGN
    -          CS1R = CS1R + CKR*SGN
    -          CS1I = CS1I + CKI*SGN
    -          DKR = DKR + EZR
    -          DKI = DKI + EZI
    -          AA = AA*DABS(SQK)/BB
    -          BB = BB + AEZ
    -          AK = AK + 8.0D0
    -          SQK = SQK - AK
    -          IF (AA.LE.ATOL) GO TO 50
    -   40   CONTINUE
    -        GO TO 110
    -   50   CONTINUE
    -        S2R = CS1R
    -        S2I = CS1I
    -        IF (ZR+ZR.GE.ELIM) GO TO 60
    -        TZR = ZR + ZR
    -        TZI = ZI + ZI
    -        CALL AZEXP(-TZR, -TZI, STR, STI)
    -        CALL ZMLT(STR, STI, P1R, P1I, STR, STI)
    -        CALL ZMLT(STR, STI, CS2R, CS2I, STR, STI)
    -        S2R = S2R + STR
    -        S2I = S2I + STI
    -   60   CONTINUE
    -        FDN = FDN + 8.0D0*DFNU + 4.0D0
    -        P1R = -P1R
    -        P1I = -P1I
    -        M = N - IL + K
    -        YR(M) = S2R*AK1R - S2I*AK1I
    -        YI(M) = S2R*AK1I + S2I*AK1R
    -   70 CONTINUE
    -      IF (N.LE.2) RETURN
    -      NN = N
    -      K = NN - 2
    -      AK = DBLE(FLOAT(K))
    -      STR = ZR*RAZ
    -      STI = -ZI*RAZ
    -      RZR = (STR+STR)*RAZ
    -      RZI = (STI+STI)*RAZ
    -      IB = 3
    -      DO 80 I=IB,NN
    -        YR(K) = (AK+FNU)*(RZR*YR(K+1)-RZI*YI(K+1)) + YR(K+2)
    -        YI(K) = (AK+FNU)*(RZR*YI(K+1)+RZI*YR(K+1)) + YI(K+2)
    -        AK = AK - 1.0D0
    -        K = K - 1
    -   80 CONTINUE
    -      IF (KODED.EQ.0) RETURN
    -      CALL AZEXP(CZR, CZI, CKR, CKI)
    -      DO 90 I=1,NN
    -        STR = YR(I)*CKR - YI(I)*CKI
    -        YI(I) = YR(I)*CKI + YI(I)*CKR
    -        YR(I) = STR
    -   90 CONTINUE
    -      RETURN
    -  100 CONTINUE
    -      NZ = -1
    -      RETURN
    -  110 CONTINUE
    -      NZ=-2
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbesh.f b/scipy-0.10.1/scipy/special/amos/zbesh.f
    deleted file mode 100644
    index 5aacecf825..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbesh.f
    +++ /dev/null
    @@ -1,348 +0,0 @@
    -      SUBROUTINE ZBESH(ZR, ZI, FNU, KODE, M, N, CYR, CYI, NZ, IERR)
    -C***BEGIN PROLOGUE  ZBESH
    -C***DATE WRITTEN   830501   (YYMMDD)
    -C***REVISION DATE  890801   (YYMMDD)
    -C***CATEGORY NO.  B5K
    -C***KEYWORDS  H-BESSEL FUNCTIONS,BESSEL FUNCTIONS OF COMPLEX ARGUMENT,
    -C             BESSEL FUNCTIONS OF THIRD KIND,HANKEL FUNCTIONS
    -C***AUTHOR  AMOS, DONALD E., SANDIA NATIONAL LABORATORIES
    -C***PURPOSE  TO COMPUTE THE H-BESSEL FUNCTIONS OF A COMPLEX ARGUMENT
    -C***DESCRIPTION
    -C
    -C                      ***A DOUBLE PRECISION ROUTINE***
    -C         ON KODE=1, ZBESH COMPUTES AN N MEMBER SEQUENCE OF COMPLEX
    -C         HANKEL (BESSEL) FUNCTIONS CY(J)=H(M,FNU+J-1,Z) FOR KINDS M=1
    -C         OR 2, REAL, NONNEGATIVE ORDERS FNU+J-1, J=1,...,N, AND COMPLEX
    -C         Z.NE.CMPLX(0.0,0.0) IN THE CUT PLANE -PI.LT.ARG(Z).LE.PI.
    -C         ON KODE=2, ZBESH RETURNS THE SCALED HANKEL FUNCTIONS
    -C
    -C         CY(I)=EXP(-MM*Z*I)*H(M,FNU+J-1,Z)       MM=3-2*M,   I**2=-1.
    -C
    -C         WHICH REMOVES THE EXPONENTIAL BEHAVIOR IN BOTH THE UPPER AND
    -C         LOWER HALF PLANES. DEFINITIONS AND NOTATION ARE FOUND IN THE
    -C         NBS HANDBOOK OF MATHEMATICAL FUNCTIONS (REF. 1).
    -C
    -C         INPUT      ZR,ZI,FNU ARE DOUBLE PRECISION
    -C           ZR,ZI  - Z=CMPLX(ZR,ZI), Z.NE.CMPLX(0.0D0,0.0D0),
    -C                    -PT.LT.ARG(Z).LE.PI
    -C           FNU    - ORDER OF INITIAL H FUNCTION, FNU.GE.0.0D0
    -C           KODE   - A PARAMETER TO INDICATE THE SCALING OPTION
    -C                    KODE= 1  RETURNS
    -C                             CY(J)=H(M,FNU+J-1,Z),   J=1,...,N
    -C                        = 2  RETURNS
    -C                             CY(J)=H(M,FNU+J-1,Z)*EXP(-I*Z*(3-2M))
    -C                                  J=1,...,N  ,  I**2=-1
    -C           M      - KIND OF HANKEL FUNCTION, M=1 OR 2
    -C           N      - NUMBER OF MEMBERS IN THE SEQUENCE, N.GE.1
    -C
    -C         OUTPUT     CYR,CYI ARE DOUBLE PRECISION
    -C           CYR,CYI- DOUBLE PRECISION VECTORS WHOSE FIRST N COMPONENTS
    -C                    CONTAIN REAL AND IMAGINARY PARTS FOR THE SEQUENCE
    -C                    CY(J)=H(M,FNU+J-1,Z)  OR
    -C                    CY(J)=H(M,FNU+J-1,Z)*EXP(-I*Z*(3-2M))  J=1,...,N
    -C                    DEPENDING ON KODE, I**2=-1.
    -C           NZ     - NUMBER OF COMPONENTS SET TO ZERO DUE TO UNDERFLOW,
    -C                    NZ= 0   , NORMAL RETURN
    -C                    NZ.GT.0 , FIRST NZ COMPONENTS OF CY SET TO ZERO DUE
    -C                              TO UNDERFLOW, CY(J)=CMPLX(0.0D0,0.0D0)
    -C                              J=1,...,NZ WHEN Y.GT.0.0 AND M=1 OR
    -C                              Y.LT.0.0 AND M=2. FOR THE COMPLMENTARY
    -C                              HALF PLANES, NZ STATES ONLY THE NUMBER
    -C                              OF UNDERFLOWS.
    -C           IERR   - ERROR FLAG
    -C                    IERR=0, NORMAL RETURN - COMPUTATION COMPLETED
    -C                    IERR=1, INPUT ERROR   - NO COMPUTATION
    -C                    IERR=2, OVERFLOW      - NO COMPUTATION, FNU TOO
    -C                            LARGE OR CABS(Z) TOO SMALL OR BOTH
    -C                    IERR=3, CABS(Z) OR FNU+N-1 LARGE - COMPUTATION DONE
    -C                            BUT LOSSES OF SIGNIFCANCE BY ARGUMENT
    -C                            REDUCTION PRODUCE LESS THAN HALF OF MACHINE
    -C                            ACCURACY
    -C                    IERR=4, CABS(Z) OR FNU+N-1 TOO LARGE - NO COMPUTA-
    -C                            TION BECAUSE OF COMPLETE LOSSES OF SIGNIFI-
    -C                            CANCE BY ARGUMENT REDUCTION
    -C                    IERR=5, ERROR              - NO COMPUTATION,
    -C                            ALGORITHM TERMINATION CONDITION NOT MET
    -C
    -C***LONG DESCRIPTION
    -C
    -C         THE COMPUTATION IS CARRIED OUT BY THE RELATION
    -C
    -C         H(M,FNU,Z)=(1/MP)*EXP(-MP*FNU)*K(FNU,Z*EXP(-MP))
    -C             MP=MM*HPI*I,  MM=3-2*M,  HPI=PI/2,  I**2=-1
    -C
    -C         FOR M=1 OR 2 WHERE THE K BESSEL FUNCTION IS COMPUTED FOR THE
    -C         RIGHT HALF PLANE RE(Z).GE.0.0. THE K FUNCTION IS CONTINUED
    -C         TO THE LEFT HALF PLANE BY THE RELATION
    -C
    -C         K(FNU,Z*EXP(MP)) = EXP(-MP*FNU)*K(FNU,Z)-MP*I(FNU,Z)
    -C         MP=MR*PI*I, MR=+1 OR -1, RE(Z).GT.0, I**2=-1
    -C
    -C         WHERE I(FNU,Z) IS THE I BESSEL FUNCTION.
    -C
    -C         EXPONENTIAL DECAY OF H(M,FNU,Z) OCCURS IN THE UPPER HALF Z
    -C         PLANE FOR M=1 AND THE LOWER HALF Z PLANE FOR M=2.  EXPONENTIAL
    -C         GROWTH OCCURS IN THE COMPLEMENTARY HALF PLANES.  SCALING
    -C         BY EXP(-MM*Z*I) REMOVES THE EXPONENTIAL BEHAVIOR IN THE
    -C         WHOLE Z PLANE FOR Z TO INFINITY.
    -C
    -C         FOR NEGATIVE ORDERS,THE FORMULAE
    -C
    -C               H(1,-FNU,Z) = H(1,FNU,Z)*CEXP( PI*FNU*I)
    -C               H(2,-FNU,Z) = H(2,FNU,Z)*CEXP(-PI*FNU*I)
    -C                         I**2=-1
    -C
    -C         CAN BE USED.
    -C
    -C         IN MOST COMPLEX VARIABLE COMPUTATION, ONE MUST EVALUATE ELE-
    -C         MENTARY FUNCTIONS. WHEN THE MAGNITUDE OF Z OR FNU+N-1 IS
    -C         LARGE, LOSSES OF SIGNIFICANCE BY ARGUMENT REDUCTION OCCUR.
    -C         CONSEQUENTLY, IF EITHER ONE EXCEEDS U1=SQRT(0.5/UR), THEN
    -C         LOSSES EXCEEDING HALF PRECISION ARE LIKELY AND AN ERROR FLAG
    -C         IERR=3 IS TRIGGERED WHERE UR=DMAX1(D1MACH(4),1.0D-18) IS
    -C         DOUBLE PRECISION UNIT ROUNDOFF LIMITED TO 18 DIGITS PRECISION.
    -C         IF EITHER IS LARGER THAN U2=0.5/UR, THEN ALL SIGNIFICANCE IS
    -C         LOST AND IERR=4. IN ORDER TO USE THE INT FUNCTION, ARGUMENTS
    -C         MUST BE FURTHER RESTRICTED NOT TO EXCEED THE LARGEST MACHINE
    -C         INTEGER, U3=I1MACH(9). THUS, THE MAGNITUDE OF Z AND FNU+N-1 IS
    -C         RESTRICTED BY MIN(U2,U3). ON 32 BIT MACHINES, U1,U2, AND U3
    -C         ARE APPROXIMATELY 2.0E+3, 4.2E+6, 2.1E+9 IN SINGLE PRECISION
    -C         ARITHMETIC AND 1.3E+8, 1.8E+16, 2.1E+9 IN DOUBLE PRECISION
    -C         ARITHMETIC RESPECTIVELY. THIS MAKES U2 AND U3 LIMITING IN
    -C         THEIR RESPECTIVE ARITHMETICS. THIS MEANS THAT ONE CAN EXPECT
    -C         TO RETAIN, IN THE WORST CASES ON 32 BIT MACHINES, NO DIGITS
    -C         IN SINGLE AND ONLY 7 DIGITS IN DOUBLE PRECISION ARITHMETIC.
    -C         SIMILAR CONSIDERATIONS HOLD FOR OTHER MACHINES.
    -C
    -C         THE APPROXIMATE RELATIVE ERROR IN THE MAGNITUDE OF A COMPLEX
    -C         BESSEL FUNCTION CAN BE EXPRESSED BY P*10**S WHERE P=MAX(UNIT
    -C         ROUNDOFF,1.0D-18) IS THE NOMINAL PRECISION AND 10**S REPRE-
    -C         SENTS THE INCREASE IN ERROR DUE TO ARGUMENT REDUCTION IN THE
    -C         ELEMENTARY FUNCTIONS. HERE, S=MAX(1,ABS(LOG10(CABS(Z))),
    -C         ABS(LOG10(FNU))) APPROXIMATELY (I.E. S=MAX(1,ABS(EXPONENT OF
    -C         CABS(Z),ABS(EXPONENT OF FNU)) ). HOWEVER, THE PHASE ANGLE MAY
    -C         HAVE ONLY ABSOLUTE ACCURACY. THIS IS MOST LIKELY TO OCCUR WHEN
    -C         ONE COMPONENT (IN ABSOLUTE VALUE) IS LARGER THAN THE OTHER BY
    -C         SEVERAL ORDERS OF MAGNITUDE. IF ONE COMPONENT IS 10**K LARGER
    -C         THAN THE OTHER, THEN ONE CAN EXPECT ONLY MAX(ABS(LOG10(P))-K,
    -C         0) SIGNIFICANT DIGITS; OR, STATED ANOTHER WAY, WHEN K EXCEEDS
    -C         THE EXPONENT OF P, NO SIGNIFICANT DIGITS REMAIN IN THE SMALLER
    -C         COMPONENT. HOWEVER, THE PHASE ANGLE RETAINS ABSOLUTE ACCURACY
    -C         BECAUSE, IN COMPLEX ARITHMETIC WITH PRECISION P, THE SMALLER
    -C         COMPONENT WILL NOT (AS A RULE) DECREASE BELOW P TIMES THE
    -C         MAGNITUDE OF THE LARGER COMPONENT. IN THESE EXTREME CASES,
    -C         THE PRINCIPAL PHASE ANGLE IS ON THE ORDER OF +P, -P, PI/2-P,
    -C         OR -PI/2+P.
    -C
    -C***REFERENCES  HANDBOOK OF MATHEMATICAL FUNCTIONS BY M. ABRAMOWITZ
    -C                 AND I. A. STEGUN, NBS AMS SERIES 55, U.S. DEPT. OF
    -C                 COMMERCE, 1955.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 BY D. E. AMOS, SAND83-0083, MAY, 1983.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 AND LARGE ORDER BY D. E. AMOS, SAND83-0643, MAY, 1983
    -C
    -C               A SUBROUTINE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, SAND85-
    -C                 1018, MAY, 1985
    -C
    -C               A PORTABLE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, TRANS.
    -C                 MATH. SOFTWARE, 1986
    -C
    -C***ROUTINES CALLED  ZACON,ZBKNU,ZBUNK,ZUOIK,AZABS,I1MACH,D1MACH
    -C***END PROLOGUE  ZBESH
    -C
    -C     COMPLEX CY,Z,ZN,ZT,CSGN
    -      DOUBLE PRECISION AA, ALIM, ALN, ARG, AZ, CYI, CYR, DIG, ELIM,
    -     * FMM, FN, FNU, FNUL, HPI, RHPI, RL, R1M5, SGN, STR, TOL, UFL, ZI,
    -     * ZNI, ZNR, ZR, ZTI, D1MACH, AZABS, BB, ASCLE, RTOL, ATOL, STI,
    -     * CSGNR, CSGNI
    -      INTEGER I, IERR, INU, INUH, IR, K, KODE, K1, K2, M,
    -     * MM, MR, N, NN, NUF, NW, NZ, I1MACH
    -      DIMENSION CYR(N), CYI(N)
    -C
    -      DATA HPI /1.57079632679489662D0/
    -C
    -C***FIRST EXECUTABLE STATEMENT  ZBESH
    -      IERR = 0
    -      NZ=0
    -      IF (ZR.EQ.0.0D0 .AND. ZI.EQ.0.0D0) IERR=1
    -      IF (FNU.LT.0.0D0) IERR=1
    -      IF (M.LT.1 .OR. M.GT.2) IERR=1
    -      IF (KODE.LT.1 .OR. KODE.GT.2) IERR=1
    -      IF (N.LT.1) IERR=1
    -      IF (IERR.NE.0) RETURN
    -      NN = N
    -C-----------------------------------------------------------------------
    -C     SET PARAMETERS RELATED TO MACHINE CONSTANTS.
    -C     TOL IS THE APPROXIMATE UNIT ROUNDOFF LIMITED TO 1.0E-18.
    -C     ELIM IS THE APPROXIMATE EXPONENTIAL OVER- AND UNDERFLOW LIMIT.
    -C     EXP(-ELIM).LT.EXP(-ALIM)=EXP(-ELIM)/TOL    AND
    -C     EXP(ELIM).GT.EXP(ALIM)=EXP(ELIM)*TOL       ARE INTERVALS NEAR
    -C     UNDERFLOW AND OVERFLOW LIMITS WHERE SCALED ARITHMETIC IS DONE.
    -C     RL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC EXPANSION FOR LARGE Z.
    -C     DIG = NUMBER OF BASE 10 DIGITS IN TOL = 10**(-DIG).
    -C     FNUL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC SERIES FOR LARGE FNU
    -C-----------------------------------------------------------------------
    -      TOL = DMAX1(D1MACH(4),1.0D-18)
    -      K1 = I1MACH(15)
    -      K2 = I1MACH(16)
    -      R1M5 = D1MACH(5)
    -      K = MIN0(IABS(K1),IABS(K2))
    -      ELIM = 2.303D0*(DBLE(FLOAT(K))*R1M5-3.0D0)
    -      K1 = I1MACH(14) - 1
    -      AA = R1M5*DBLE(FLOAT(K1))
    -      DIG = DMIN1(AA,18.0D0)
    -      AA = AA*2.303D0
    -      ALIM = ELIM + DMAX1(-AA,-41.45D0)
    -      FNUL = 10.0D0 + 6.0D0*(DIG-3.0D0)
    -      RL = 1.2D0*DIG + 3.0D0
    -      FN = FNU + DBLE(FLOAT(NN-1))
    -      MM = 3 - M - M
    -      FMM = DBLE(FLOAT(MM))
    -      ZNR = FMM*ZI
    -      ZNI = -FMM*ZR
    -C-----------------------------------------------------------------------
    -C     TEST FOR PROPER RANGE
    -C-----------------------------------------------------------------------
    -      AZ = AZABS(ZR,ZI)
    -      AA = 0.5D0/TOL
    -      BB=DBLE(FLOAT(I1MACH(9)))*0.5D0
    -      AA = DMIN1(AA,BB)
    -      IF (AZ.GT.AA) GO TO 260
    -      IF (FN.GT.AA) GO TO 260
    -      AA = DSQRT(AA)
    -      IF (AZ.GT.AA) IERR=3
    -      IF (FN.GT.AA) IERR=3
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST ON THE LAST MEMBER OF THE SEQUENCE
    -C-----------------------------------------------------------------------
    -      UFL = D1MACH(1)*1.0D+3
    -      IF (AZ.LT.UFL) GO TO 230
    -      IF (FNU.GT.FNUL) GO TO 90
    -      IF (FN.LE.1.0D0) GO TO 70
    -      IF (FN.GT.2.0D0) GO TO 60
    -      IF (AZ.GT.TOL) GO TO 70
    -      ARG = 0.5D0*AZ
    -      ALN = -FN*DLOG(ARG)
    -      IF (ALN.GT.ELIM) GO TO 230
    -      GO TO 70
    -   60 CONTINUE
    -      CALL ZUOIK(ZNR, ZNI, FNU, KODE, 2, NN, CYR, CYI, NUF, TOL, ELIM,
    -     * ALIM)
    -      IF (NUF.LT.0) GO TO 230
    -      NZ = NZ + NUF
    -      NN = NN - NUF
    -C-----------------------------------------------------------------------
    -C     HERE NN=N OR NN=0 SINCE NUF=0,NN, OR -1 ON RETURN FROM CUOIK
    -C     IF NUF=NN, THEN CY(I)=CZERO FOR ALL I
    -C-----------------------------------------------------------------------
    -      IF (NN.EQ.0) GO TO 140
    -   70 CONTINUE
    -      IF ((ZNR.LT.0.0D0) .OR. (ZNR.EQ.0.0D0 .AND. ZNI.LT.0.0D0 .AND.
    -     * M.EQ.2)) GO TO 80
    -C-----------------------------------------------------------------------
    -C     RIGHT HALF PLANE COMPUTATION, XN.GE.0. .AND. (XN.NE.0. .OR.
    -C     YN.GE.0. .OR. M=1)
    -C-----------------------------------------------------------------------
    -      CALL ZBKNU(ZNR, ZNI, FNU, KODE, NN, CYR, CYI, NZ, TOL, ELIM, ALIM)
    -      GO TO 110
    -C-----------------------------------------------------------------------
    -C     LEFT HALF PLANE COMPUTATION
    -C-----------------------------------------------------------------------
    -   80 CONTINUE
    -      MR = -MM
    -      CALL ZACON(ZNR, ZNI, FNU, KODE, MR, NN, CYR, CYI, NW, RL, FNUL,
    -     * TOL, ELIM, ALIM)
    -      IF (NW.LT.0) GO TO 240
    -      NZ=NW
    -      GO TO 110
    -   90 CONTINUE
    -C-----------------------------------------------------------------------
    -C     UNIFORM ASYMPTOTIC EXPANSIONS FOR FNU.GT.FNUL
    -C-----------------------------------------------------------------------
    -      MR = 0
    -      IF ((ZNR.GE.0.0D0) .AND. (ZNR.NE.0.0D0 .OR. ZNI.GE.0.0D0 .OR.
    -     * M.NE.2)) GO TO 100
    -      MR = -MM
    -      IF (ZNR.NE.0.0D0 .OR. ZNI.GE.0.0D0) GO TO 100
    -      ZNR = -ZNR
    -      ZNI = -ZNI
    -  100 CONTINUE
    -      CALL ZBUNK(ZNR, ZNI, FNU, KODE, MR, NN, CYR, CYI, NW, TOL, ELIM,
    -     * ALIM)
    -      IF (NW.LT.0) GO TO 240
    -      NZ = NZ + NW
    -  110 CONTINUE
    -C-----------------------------------------------------------------------
    -C     H(M,FNU,Z) = -FMM*(I/HPI)*(ZT**FNU)*K(FNU,-Z*ZT)
    -C
    -C     ZT=EXP(-FMM*HPI*I) = CMPLX(0.0,-FMM), FMM=3-2*M, M=1,2
    -C-----------------------------------------------------------------------
    -      SGN = DSIGN(HPI,-FMM)
    -C-----------------------------------------------------------------------
    -C     CALCULATE EXP(FNU*HPI*I) TO MINIMIZE LOSSES OF SIGNIFICANCE
    -C     WHEN FNU IS LARGE
    -C-----------------------------------------------------------------------
    -      INU = INT(SNGL(FNU))
    -      INUH = INU/2
    -      IR = INU - 2*INUH
    -      ARG = (FNU-DBLE(FLOAT(INU-IR)))*SGN
    -      RHPI = 1.0D0/SGN
    -C     ZNI = RHPI*DCOS(ARG)
    -C     ZNR = -RHPI*DSIN(ARG)
    -      CSGNI = RHPI*DCOS(ARG)
    -      CSGNR = -RHPI*DSIN(ARG)
    -      IF (MOD(INUH,2).EQ.0) GO TO 120
    -C     ZNR = -ZNR
    -C     ZNI = -ZNI
    -      CSGNR = -CSGNR
    -      CSGNI = -CSGNI
    -  120 CONTINUE
    -      ZTI = -FMM
    -      RTOL = 1.0D0/TOL
    -      ASCLE = UFL*RTOL
    -      DO 130 I=1,NN
    -C       STR = CYR(I)*ZNR - CYI(I)*ZNI
    -C       CYI(I) = CYR(I)*ZNI + CYI(I)*ZNR
    -C       CYR(I) = STR
    -C       STR = -ZNI*ZTI
    -C       ZNI = ZNR*ZTI
    -C       ZNR = STR
    -        AA = CYR(I)
    -        BB = CYI(I)
    -        ATOL = 1.0D0
    -        IF (DMAX1(DABS(AA),DABS(BB)).GT.ASCLE) GO TO 135
    -          AA = AA*RTOL
    -          BB = BB*RTOL
    -          ATOL = TOL
    -  135 CONTINUE
    -      STR = AA*CSGNR - BB*CSGNI
    -      STI = AA*CSGNI + BB*CSGNR
    -      CYR(I) = STR*ATOL
    -      CYI(I) = STI*ATOL
    -      STR = -CSGNI*ZTI
    -      CSGNI = CSGNR*ZTI
    -      CSGNR = STR
    -  130 CONTINUE
    -      RETURN
    -  140 CONTINUE
    -      IF (ZNR.LT.0.0D0) GO TO 230
    -      RETURN
    -  230 CONTINUE
    -      NZ=0
    -      IERR=2
    -      RETURN
    -  240 CONTINUE
    -      IF(NW.EQ.(-1)) GO TO 230
    -      NZ=0
    -      IERR=5
    -      RETURN
    -  260 CONTINUE
    -      NZ=0
    -      IERR=4
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbesi.f b/scipy-0.10.1/scipy/special/amos/zbesi.f
    deleted file mode 100644
    index a2ddd8c4f4..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbesi.f
    +++ /dev/null
    @@ -1,269 +0,0 @@
    -      SUBROUTINE ZBESI(ZR, ZI, FNU, KODE, N, CYR, CYI, NZ, IERR)
    -C***BEGIN PROLOGUE  ZBESI
    -C***DATE WRITTEN   830501   (YYMMDD)
    -C***REVISION DATE  890801   (YYMMDD)
    -C***CATEGORY NO.  B5K
    -C***KEYWORDS  I-BESSEL FUNCTION,COMPLEX BESSEL FUNCTION,
    -C             MODIFIED BESSEL FUNCTION OF THE FIRST KIND
    -C***AUTHOR  AMOS, DONALD E., SANDIA NATIONAL LABORATORIES
    -C***PURPOSE  TO COMPUTE I-BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C***DESCRIPTION
    -C
    -C                    ***A DOUBLE PRECISION ROUTINE***
    -C         ON KODE=1, ZBESI COMPUTES AN N MEMBER SEQUENCE OF COMPLEX
    -C         BESSEL FUNCTIONS CY(J)=I(FNU+J-1,Z) FOR REAL, NONNEGATIVE
    -C         ORDERS FNU+J-1, J=1,...,N AND COMPLEX Z IN THE CUT PLANE
    -C         -PI.LT.ARG(Z).LE.PI. ON KODE=2, ZBESI RETURNS THE SCALED
    -C         FUNCTIONS
    -C
    -C         CY(J)=EXP(-ABS(X))*I(FNU+J-1,Z)   J = 1,...,N , X=REAL(Z)
    -C
    -C         WITH THE EXPONENTIAL GROWTH REMOVED IN BOTH THE LEFT AND
    -C         RIGHT HALF PLANES FOR Z TO INFINITY. DEFINITIONS AND NOTATION
    -C         ARE FOUND IN THE NBS HANDBOOK OF MATHEMATICAL FUNCTIONS
    -C         (REF. 1).
    -C
    -C         INPUT      ZR,ZI,FNU ARE DOUBLE PRECISION
    -C           ZR,ZI  - Z=CMPLX(ZR,ZI),  -PI.LT.ARG(Z).LE.PI
    -C           FNU    - ORDER OF INITIAL I FUNCTION, FNU.GE.0.0D0
    -C           KODE   - A PARAMETER TO INDICATE THE SCALING OPTION
    -C                    KODE= 1  RETURNS
    -C                             CY(J)=I(FNU+J-1,Z), J=1,...,N
    -C                        = 2  RETURNS
    -C                             CY(J)=I(FNU+J-1,Z)*EXP(-ABS(X)), J=1,...,N
    -C           N      - NUMBER OF MEMBERS OF THE SEQUENCE, N.GE.1
    -C
    -C         OUTPUT     CYR,CYI ARE DOUBLE PRECISION
    -C           CYR,CYI- DOUBLE PRECISION VECTORS WHOSE FIRST N COMPONENTS
    -C                    CONTAIN REAL AND IMAGINARY PARTS FOR THE SEQUENCE
    -C                    CY(J)=I(FNU+J-1,Z)  OR
    -C                    CY(J)=I(FNU+J-1,Z)*EXP(-ABS(X))  J=1,...,N
    -C                    DEPENDING ON KODE, X=REAL(Z)
    -C           NZ     - NUMBER OF COMPONENTS SET TO ZERO DUE TO UNDERFLOW,
    -C                    NZ= 0   , NORMAL RETURN
    -C                    NZ.GT.0 , LAST NZ COMPONENTS OF CY SET TO ZERO
    -C                              TO UNDERFLOW, CY(J)=CMPLX(0.0D0,0.0D0)
    -C                              J = N-NZ+1,...,N
    -C           IERR   - ERROR FLAG
    -C                    IERR=0, NORMAL RETURN - COMPUTATION COMPLETED
    -C                    IERR=1, INPUT ERROR   - NO COMPUTATION
    -C                    IERR=2, OVERFLOW      - NO COMPUTATION, REAL(Z) TOO
    -C                            LARGE ON KODE=1
    -C                    IERR=3, CABS(Z) OR FNU+N-1 LARGE - COMPUTATION DONE
    -C                            BUT LOSSES OF SIGNIFCANCE BY ARGUMENT
    -C                            REDUCTION PRODUCE LESS THAN HALF OF MACHINE
    -C                            ACCURACY
    -C                    IERR=4, CABS(Z) OR FNU+N-1 TOO LARGE - NO COMPUTA-
    -C                            TION BECAUSE OF COMPLETE LOSSES OF SIGNIFI-
    -C                            CANCE BY ARGUMENT REDUCTION
    -C                    IERR=5, ERROR              - NO COMPUTATION,
    -C                            ALGORITHM TERMINATION CONDITION NOT MET
    -C
    -C***LONG DESCRIPTION
    -C
    -C         THE COMPUTATION IS CARRIED OUT BY THE POWER SERIES FOR
    -C         SMALL CABS(Z), THE ASYMPTOTIC EXPANSION FOR LARGE CABS(Z),
    -C         THE MILLER ALGORITHM NORMALIZED BY THE WRONSKIAN AND A
    -C         NEUMANN SERIES FOR IMTERMEDIATE MAGNITUDES, AND THE
    -C         UNIFORM ASYMPTOTIC EXPANSIONS FOR I(FNU,Z) AND J(FNU,Z)
    -C         FOR LARGE ORDERS. BACKWARD RECURRENCE IS USED TO GENERATE
    -C         SEQUENCES OR REDUCE ORDERS WHEN NECESSARY.
    -C
    -C         THE CALCULATIONS ABOVE ARE DONE IN THE RIGHT HALF PLANE AND
    -C         CONTINUED INTO THE LEFT HALF PLANE BY THE FORMULA
    -C
    -C         I(FNU,Z*EXP(M*PI)) = EXP(M*PI*FNU)*I(FNU,Z)  REAL(Z).GT.0.0
    -C                       M = +I OR -I,  I**2=-1
    -C
    -C         FOR NEGATIVE ORDERS,THE FORMULA
    -C
    -C              I(-FNU,Z) = I(FNU,Z) + (2/PI)*SIN(PI*FNU)*K(FNU,Z)
    -C
    -C         CAN BE USED. HOWEVER,FOR LARGE ORDERS CLOSE TO INTEGERS, THE
    -C         THE FUNCTION CHANGES RADICALLY. WHEN FNU IS A LARGE POSITIVE
    -C         INTEGER,THE MAGNITUDE OF I(-FNU,Z)=I(FNU,Z) IS A LARGE
    -C         NEGATIVE POWER OF TEN. BUT WHEN FNU IS NOT AN INTEGER,
    -C         K(FNU,Z) DOMINATES IN MAGNITUDE WITH A LARGE POSITIVE POWER OF
    -C         TEN AND THE MOST THAT THE SECOND TERM CAN BE REDUCED IS BY
    -C         UNIT ROUNDOFF FROM THE COEFFICIENT. THUS, WIDE CHANGES CAN
    -C         OCCUR WITHIN UNIT ROUNDOFF OF A LARGE INTEGER FOR FNU. HERE,
    -C         LARGE MEANS FNU.GT.CABS(Z).
    -C
    -C         IN MOST COMPLEX VARIABLE COMPUTATION, ONE MUST EVALUATE ELE-
    -C         MENTARY FUNCTIONS. WHEN THE MAGNITUDE OF Z OR FNU+N-1 IS
    -C         LARGE, LOSSES OF SIGNIFICANCE BY ARGUMENT REDUCTION OCCUR.
    -C         CONSEQUENTLY, IF EITHER ONE EXCEEDS U1=SQRT(0.5/UR), THEN
    -C         LOSSES EXCEEDING HALF PRECISION ARE LIKELY AND AN ERROR FLAG
    -C         IERR=3 IS TRIGGERED WHERE UR=DMAX1(D1MACH(4),1.0D-18) IS
    -C         DOUBLE PRECISION UNIT ROUNDOFF LIMITED TO 18 DIGITS PRECISION.
    -C         IF EITHER IS LARGER THAN U2=0.5/UR, THEN ALL SIGNIFICANCE IS
    -C         LOST AND IERR=4. IN ORDER TO USE THE INT FUNCTION, ARGUMENTS
    -C         MUST BE FURTHER RESTRICTED NOT TO EXCEED THE LARGEST MACHINE
    -C         INTEGER, U3=I1MACH(9). THUS, THE MAGNITUDE OF Z AND FNU+N-1 IS
    -C         RESTRICTED BY MIN(U2,U3). ON 32 BIT MACHINES, U1,U2, AND U3
    -C         ARE APPROXIMATELY 2.0E+3, 4.2E+6, 2.1E+9 IN SINGLE PRECISION
    -C         ARITHMETIC AND 1.3E+8, 1.8E+16, 2.1E+9 IN DOUBLE PRECISION
    -C         ARITHMETIC RESPECTIVELY. THIS MAKES U2 AND U3 LIMITING IN
    -C         THEIR RESPECTIVE ARITHMETICS. THIS MEANS THAT ONE CAN EXPECT
    -C         TO RETAIN, IN THE WORST CASES ON 32 BIT MACHINES, NO DIGITS
    -C         IN SINGLE AND ONLY 7 DIGITS IN DOUBLE PRECISION ARITHMETIC.
    -C         SIMILAR CONSIDERATIONS HOLD FOR OTHER MACHINES.
    -C
    -C         THE APPROXIMATE RELATIVE ERROR IN THE MAGNITUDE OF A COMPLEX
    -C         BESSEL FUNCTION CAN BE EXPRESSED BY P*10**S WHERE P=MAX(UNIT
    -C         ROUNDOFF,1.0E-18) IS THE NOMINAL PRECISION AND 10**S REPRE-
    -C         SENTS THE INCREASE IN ERROR DUE TO ARGUMENT REDUCTION IN THE
    -C         ELEMENTARY FUNCTIONS. HERE, S=MAX(1,ABS(LOG10(CABS(Z))),
    -C         ABS(LOG10(FNU))) APPROXIMATELY (I.E. S=MAX(1,ABS(EXPONENT OF
    -C         CABS(Z),ABS(EXPONENT OF FNU)) ). HOWEVER, THE PHASE ANGLE MAY
    -C         HAVE ONLY ABSOLUTE ACCURACY. THIS IS MOST LIKELY TO OCCUR WHEN
    -C         ONE COMPONENT (IN ABSOLUTE VALUE) IS LARGER THAN THE OTHER BY
    -C         SEVERAL ORDERS OF MAGNITUDE. IF ONE COMPONENT IS 10**K LARGER
    -C         THAN THE OTHER, THEN ONE CAN EXPECT ONLY MAX(ABS(LOG10(P))-K,
    -C         0) SIGNIFICANT DIGITS; OR, STATED ANOTHER WAY, WHEN K EXCEEDS
    -C         THE EXPONENT OF P, NO SIGNIFICANT DIGITS REMAIN IN THE SMALLER
    -C         COMPONENT. HOWEVER, THE PHASE ANGLE RETAINS ABSOLUTE ACCURACY
    -C         BECAUSE, IN COMPLEX ARITHMETIC WITH PRECISION P, THE SMALLER
    -C         COMPONENT WILL NOT (AS A RULE) DECREASE BELOW P TIMES THE
    -C         MAGNITUDE OF THE LARGER COMPONENT. IN THESE EXTREME CASES,
    -C         THE PRINCIPAL PHASE ANGLE IS ON THE ORDER OF +P, -P, PI/2-P,
    -C         OR -PI/2+P.
    -C
    -C***REFERENCES  HANDBOOK OF MATHEMATICAL FUNCTIONS BY M. ABRAMOWITZ
    -C                 AND I. A. STEGUN, NBS AMS SERIES 55, U.S. DEPT. OF
    -C                 COMMERCE, 1955.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 BY D. E. AMOS, SAND83-0083, MAY, 1983.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 AND LARGE ORDER BY D. E. AMOS, SAND83-0643, MAY, 1983
    -C
    -C               A SUBROUTINE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, SAND85-
    -C                 1018, MAY, 1985
    -C
    -C               A PORTABLE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, TRANS.
    -C                 MATH. SOFTWARE, 1986
    -C
    -C***ROUTINES CALLED  ZBINU,I1MACH,D1MACH
    -C***END PROLOGUE  ZBESI
    -C     COMPLEX CONE,CSGN,CW,CY,CZERO,Z,ZN
    -      DOUBLE PRECISION AA, ALIM, ARG, CONEI, CONER, CSGNI, CSGNR, CYI,
    -     * CYR, DIG, ELIM, FNU, FNUL, PI, RL, R1M5, STR, TOL, ZI, ZNI, ZNR,
    -     * ZR, D1MACH, AZ, BB, FN, AZABS, ASCLE, RTOL, ATOL, STI
    -      INTEGER I, IERR, INU, K, KODE, K1,K2,N,NZ,NN, I1MACH
    -      DIMENSION CYR(N), CYI(N)
    -      DATA PI /3.14159265358979324D0/
    -      DATA CONER, CONEI /1.0D0,0.0D0/
    -C
    -C***FIRST EXECUTABLE STATEMENT  ZBESI
    -      IERR = 0
    -      NZ=0
    -      IF (FNU.LT.0.0D0) IERR=1
    -      IF (KODE.LT.1 .OR. KODE.GT.2) IERR=1
    -      IF (N.LT.1) IERR=1
    -      IF (IERR.NE.0) RETURN
    -C-----------------------------------------------------------------------
    -C     SET PARAMETERS RELATED TO MACHINE CONSTANTS.
    -C     TOL IS THE APPROXIMATE UNIT ROUNDOFF LIMITED TO 1.0E-18.
    -C     ELIM IS THE APPROXIMATE EXPONENTIAL OVER- AND UNDERFLOW LIMIT.
    -C     EXP(-ELIM).LT.EXP(-ALIM)=EXP(-ELIM)/TOL    AND
    -C     EXP(ELIM).GT.EXP(ALIM)=EXP(ELIM)*TOL       ARE INTERVALS NEAR
    -C     UNDERFLOW AND OVERFLOW LIMITS WHERE SCALED ARITHMETIC IS DONE.
    -C     RL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC EXPANSION FOR LARGE Z.
    -C     DIG = NUMBER OF BASE 10 DIGITS IN TOL = 10**(-DIG).
    -C     FNUL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC SERIES FOR LARGE FNU.
    -C-----------------------------------------------------------------------
    -      TOL = DMAX1(D1MACH(4),1.0D-18)
    -      K1 = I1MACH(15)
    -      K2 = I1MACH(16)
    -      R1M5 = D1MACH(5)
    -      K = MIN0(IABS(K1),IABS(K2))
    -      ELIM = 2.303D0*(DBLE(FLOAT(K))*R1M5-3.0D0)
    -      K1 = I1MACH(14) - 1
    -      AA = R1M5*DBLE(FLOAT(K1))
    -      DIG = DMIN1(AA,18.0D0)
    -      AA = AA*2.303D0
    -      ALIM = ELIM + DMAX1(-AA,-41.45D0)
    -      RL = 1.2D0*DIG + 3.0D0
    -      FNUL = 10.0D0 + 6.0D0*(DIG-3.0D0)
    -C-----------------------------------------------------------------------------
    -C     TEST FOR PROPER RANGE
    -C-----------------------------------------------------------------------
    -      AZ = AZABS(ZR,ZI)
    -      FN = FNU+DBLE(FLOAT(N-1))
    -      AA = 0.5D0/TOL
    -      BB=DBLE(FLOAT(I1MACH(9)))*0.5D0
    -      AA = DMIN1(AA,BB)
    -      IF (AZ.GT.AA) GO TO 260
    -      IF (FN.GT.AA) GO TO 260
    -      AA = DSQRT(AA)
    -      IF (AZ.GT.AA) IERR=3
    -      IF (FN.GT.AA) IERR=3
    -      ZNR = ZR
    -      ZNI = ZI
    -      CSGNR = CONER
    -      CSGNI = CONEI
    -      IF (ZR.GE.0.0D0) GO TO 40
    -      ZNR = -ZR
    -      ZNI = -ZI
    -C-----------------------------------------------------------------------
    -C     CALCULATE CSGN=EXP(FNU*PI*I) TO MINIMIZE LOSSES OF SIGNIFICANCE
    -C     WHEN FNU IS LARGE
    -C-----------------------------------------------------------------------
    -      INU = INT(SNGL(FNU))
    -      ARG = (FNU-DBLE(FLOAT(INU)))*PI
    -      IF (ZI.LT.0.0D0) ARG = -ARG
    -      CSGNR = DCOS(ARG)
    -      CSGNI = DSIN(ARG)
    -      IF (MOD(INU,2).EQ.0) GO TO 40
    -      CSGNR = -CSGNR
    -      CSGNI = -CSGNI
    -   40 CONTINUE
    -      CALL ZBINU(ZNR, ZNI, FNU, KODE, N, CYR, CYI, NZ, RL, FNUL, TOL,
    -     * ELIM, ALIM)
    -      IF (NZ.LT.0) GO TO 120
    -      IF (ZR.GE.0.0D0) RETURN
    -C-----------------------------------------------------------------------
    -C     ANALYTIC CONTINUATION TO THE LEFT HALF PLANE
    -C-----------------------------------------------------------------------
    -      NN = N - NZ
    -      IF (NN.EQ.0) RETURN
    -      RTOL = 1.0D0/TOL
    -      ASCLE = D1MACH(1)*RTOL*1.0D+3
    -      DO 50 I=1,NN
    -C       STR = CYR(I)*CSGNR - CYI(I)*CSGNI
    -C       CYI(I) = CYR(I)*CSGNI + CYI(I)*CSGNR
    -C       CYR(I) = STR
    -        AA = CYR(I)
    -        BB = CYI(I)
    -        ATOL = 1.0D0
    -        IF (DMAX1(DABS(AA),DABS(BB)).GT.ASCLE) GO TO 55
    -          AA = AA*RTOL
    -          BB = BB*RTOL
    -          ATOL = TOL
    -   55   CONTINUE
    -        STR = AA*CSGNR - BB*CSGNI
    -        STI = AA*CSGNI + BB*CSGNR
    -        CYR(I) = STR*ATOL
    -        CYI(I) = STI*ATOL
    -        CSGNR = -CSGNR
    -        CSGNI = -CSGNI
    -   50 CONTINUE
    -      RETURN
    -  120 CONTINUE
    -      IF(NZ.EQ.(-2)) GO TO 130
    -      NZ = 0
    -      IERR=2
    -      RETURN
    -  130 CONTINUE
    -      NZ=0
    -      IERR=5
    -      RETURN
    -  260 CONTINUE
    -      NZ=0
    -      IERR=4
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbesj.f b/scipy-0.10.1/scipy/special/amos/zbesj.f
    deleted file mode 100644
    index afe588f252..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbesj.f
    +++ /dev/null
    @@ -1,266 +0,0 @@
    -      SUBROUTINE ZBESJ(ZR, ZI, FNU, KODE, N, CYR, CYI, NZ, IERR)
    -C***BEGIN PROLOGUE  ZBESJ
    -C***DATE WRITTEN   830501   (YYMMDD)
    -C***REVISION DATE  890801   (YYMMDD)
    -C***CATEGORY NO.  B5K
    -C***KEYWORDS  J-BESSEL FUNCTION,BESSEL FUNCTION OF COMPLEX ARGUMENT,
    -C             BESSEL FUNCTION OF FIRST KIND
    -C***AUTHOR  AMOS, DONALD E., SANDIA NATIONAL LABORATORIES
    -C***PURPOSE  TO COMPUTE THE J-BESSEL FUNCTION OF A COMPLEX ARGUMENT
    -C***DESCRIPTION
    -C
    -C                      ***A DOUBLE PRECISION ROUTINE***
    -C         ON KODE=1, CBESJ COMPUTES AN N MEMBER  SEQUENCE OF COMPLEX
    -C         BESSEL FUNCTIONS CY(I)=J(FNU+I-1,Z) FOR REAL, NONNEGATIVE
    -C         ORDERS FNU+I-1, I=1,...,N AND COMPLEX Z IN THE CUT PLANE
    -C         -PI.LT.ARG(Z).LE.PI. ON KODE=2, CBESJ RETURNS THE SCALED
    -C         FUNCTIONS
    -C
    -C         CY(I)=EXP(-ABS(Y))*J(FNU+I-1,Z)   I = 1,...,N , Y=AIMAG(Z)
    -C
    -C         WHICH REMOVE THE EXPONENTIAL GROWTH IN BOTH THE UPPER AND
    -C         LOWER HALF PLANES FOR Z TO INFINITY. DEFINITIONS AND NOTATION
    -C         ARE FOUND IN THE NBS HANDBOOK OF MATHEMATICAL FUNCTIONS
    -C         (REF. 1).
    -C
    -C         INPUT      ZR,ZI,FNU ARE DOUBLE PRECISION
    -C           ZR,ZI  - Z=CMPLX(ZR,ZI),  -PI.LT.ARG(Z).LE.PI
    -C           FNU    - ORDER OF INITIAL J FUNCTION, FNU.GE.0.0D0
    -C           KODE   - A PARAMETER TO INDICATE THE SCALING OPTION
    -C                    KODE= 1  RETURNS
    -C                             CY(I)=J(FNU+I-1,Z), I=1,...,N
    -C                        = 2  RETURNS
    -C                             CY(I)=J(FNU+I-1,Z)EXP(-ABS(Y)), I=1,...,N
    -C           N      - NUMBER OF MEMBERS OF THE SEQUENCE, N.GE.1
    -C
    -C         OUTPUT     CYR,CYI ARE DOUBLE PRECISION
    -C           CYR,CYI- DOUBLE PRECISION VECTORS WHOSE FIRST N COMPONENTS
    -C                    CONTAIN REAL AND IMAGINARY PARTS FOR THE SEQUENCE
    -C                    CY(I)=J(FNU+I-1,Z)  OR
    -C                    CY(I)=J(FNU+I-1,Z)EXP(-ABS(Y))  I=1,...,N
    -C                    DEPENDING ON KODE, Y=AIMAG(Z).
    -C           NZ     - NUMBER OF COMPONENTS SET TO ZERO DUE TO UNDERFLOW,
    -C                    NZ= 0   , NORMAL RETURN
    -C                    NZ.GT.0 , LAST NZ COMPONENTS OF CY SET  ZERO DUE
    -C                              TO UNDERFLOW, CY(I)=CMPLX(0.0D0,0.0D0),
    -C                              I = N-NZ+1,...,N
    -C           IERR   - ERROR FLAG
    -C                    IERR=0, NORMAL RETURN - COMPUTATION COMPLETED
    -C                    IERR=1, INPUT ERROR   - NO COMPUTATION
    -C                    IERR=2, OVERFLOW      - NO COMPUTATION, AIMAG(Z)
    -C                            TOO LARGE ON KODE=1
    -C                    IERR=3, CABS(Z) OR FNU+N-1 LARGE - COMPUTATION DONE
    -C                            BUT LOSSES OF SIGNIFCANCE BY ARGUMENT
    -C                            REDUCTION PRODUCE LESS THAN HALF OF MACHINE
    -C                            ACCURACY
    -C                    IERR=4, CABS(Z) OR FNU+N-1 TOO LARGE - NO COMPUTA-
    -C                            TION BECAUSE OF COMPLETE LOSSES OF SIGNIFI-
    -C                            CANCE BY ARGUMENT REDUCTION
    -C                    IERR=5, ERROR              - NO COMPUTATION,
    -C                            ALGORITHM TERMINATION CONDITION NOT MET
    -C
    -C***LONG DESCRIPTION
    -C
    -C         THE COMPUTATION IS CARRIED OUT BY THE FORMULA
    -C
    -C         J(FNU,Z)=EXP( FNU*PI*I/2)*I(FNU,-I*Z)    AIMAG(Z).GE.0.0
    -C
    -C         J(FNU,Z)=EXP(-FNU*PI*I/2)*I(FNU, I*Z)    AIMAG(Z).LT.0.0
    -C
    -C         WHERE I**2 = -1 AND I(FNU,Z) IS THE I BESSEL FUNCTION.
    -C
    -C         FOR NEGATIVE ORDERS,THE FORMULA
    -C
    -C              J(-FNU,Z) = J(FNU,Z)*COS(PI*FNU) - Y(FNU,Z)*SIN(PI*FNU)
    -C
    -C         CAN BE USED. HOWEVER,FOR LARGE ORDERS CLOSE TO INTEGERS, THE
    -C         THE FUNCTION CHANGES RADICALLY. WHEN FNU IS A LARGE POSITIVE
    -C         INTEGER,THE MAGNITUDE OF J(-FNU,Z)=J(FNU,Z)*COS(PI*FNU) IS A
    -C         LARGE NEGATIVE POWER OF TEN. BUT WHEN FNU IS NOT AN INTEGER,
    -C         Y(FNU,Z) DOMINATES IN MAGNITUDE WITH A LARGE POSITIVE POWER OF
    -C         TEN AND THE MOST THAT THE SECOND TERM CAN BE REDUCED IS BY
    -C         UNIT ROUNDOFF FROM THE COEFFICIENT. THUS, WIDE CHANGES CAN
    -C         OCCUR WITHIN UNIT ROUNDOFF OF A LARGE INTEGER FOR FNU. HERE,
    -C         LARGE MEANS FNU.GT.CABS(Z).
    -C
    -C         IN MOST COMPLEX VARIABLE COMPUTATION, ONE MUST EVALUATE ELE-
    -C         MENTARY FUNCTIONS. WHEN THE MAGNITUDE OF Z OR FNU+N-1 IS
    -C         LARGE, LOSSES OF SIGNIFICANCE BY ARGUMENT REDUCTION OCCUR.
    -C         CONSEQUENTLY, IF EITHER ONE EXCEEDS U1=SQRT(0.5/UR), THEN
    -C         LOSSES EXCEEDING HALF PRECISION ARE LIKELY AND AN ERROR FLAG
    -C         IERR=3 IS TRIGGERED WHERE UR=DMAX1(D1MACH(4),1.0D-18) IS
    -C         DOUBLE PRECISION UNIT ROUNDOFF LIMITED TO 18 DIGITS PRECISION.
    -C         IF EITHER IS LARGER THAN U2=0.5/UR, THEN ALL SIGNIFICANCE IS
    -C         LOST AND IERR=4. IN ORDER TO USE THE INT FUNCTION, ARGUMENTS
    -C         MUST BE FURTHER RESTRICTED NOT TO EXCEED THE LARGEST MACHINE
    -C         INTEGER, U3=I1MACH(9). THUS, THE MAGNITUDE OF Z AND FNU+N-1 IS
    -C         RESTRICTED BY MIN(U2,U3). ON 32 BIT MACHINES, U1,U2, AND U3
    -C         ARE APPROXIMATELY 2.0E+3, 4.2E+6, 2.1E+9 IN SINGLE PRECISION
    -C         ARITHMETIC AND 1.3E+8, 1.8E+16, 2.1E+9 IN DOUBLE PRECISION
    -C         ARITHMETIC RESPECTIVELY. THIS MAKES U2 AND U3 LIMITING IN
    -C         THEIR RESPECTIVE ARITHMETICS. THIS MEANS THAT ONE CAN EXPECT
    -C         TO RETAIN, IN THE WORST CASES ON 32 BIT MACHINES, NO DIGITS
    -C         IN SINGLE AND ONLY 7 DIGITS IN DOUBLE PRECISION ARITHMETIC.
    -C         SIMILAR CONSIDERATIONS HOLD FOR OTHER MACHINES.
    -C
    -C         THE APPROXIMATE RELATIVE ERROR IN THE MAGNITUDE OF A COMPLEX
    -C         BESSEL FUNCTION CAN BE EXPRESSED BY P*10**S WHERE P=MAX(UNIT
    -C         ROUNDOFF,1.0E-18) IS THE NOMINAL PRECISION AND 10**S REPRE-
    -C         SENTS THE INCREASE IN ERROR DUE TO ARGUMENT REDUCTION IN THE
    -C         ELEMENTARY FUNCTIONS. HERE, S=MAX(1,ABS(LOG10(CABS(Z))),
    -C         ABS(LOG10(FNU))) APPROXIMATELY (I.E. S=MAX(1,ABS(EXPONENT OF
    -C         CABS(Z),ABS(EXPONENT OF FNU)) ). HOWEVER, THE PHASE ANGLE MAY
    -C         HAVE ONLY ABSOLUTE ACCURACY. THIS IS MOST LIKELY TO OCCUR WHEN
    -C         ONE COMPONENT (IN ABSOLUTE VALUE) IS LARGER THAN THE OTHER BY
    -C         SEVERAL ORDERS OF MAGNITUDE. IF ONE COMPONENT IS 10**K LARGER
    -C         THAN THE OTHER, THEN ONE CAN EXPECT ONLY MAX(ABS(LOG10(P))-K,
    -C         0) SIGNIFICANT DIGITS; OR, STATED ANOTHER WAY, WHEN K EXCEEDS
    -C         THE EXPONENT OF P, NO SIGNIFICANT DIGITS REMAIN IN THE SMALLER
    -C         COMPONENT. HOWEVER, THE PHASE ANGLE RETAINS ABSOLUTE ACCURACY
    -C         BECAUSE, IN COMPLEX ARITHMETIC WITH PRECISION P, THE SMALLER
    -C         COMPONENT WILL NOT (AS A RULE) DECREASE BELOW P TIMES THE
    -C         MAGNITUDE OF THE LARGER COMPONENT. IN THESE EXTREME CASES,
    -C         THE PRINCIPAL PHASE ANGLE IS ON THE ORDER OF +P, -P, PI/2-P,
    -C         OR -PI/2+P.
    -C
    -C***REFERENCES  HANDBOOK OF MATHEMATICAL FUNCTIONS BY M. ABRAMOWITZ
    -C                 AND I. A. STEGUN, NBS AMS SERIES 55, U.S. DEPT. OF
    -C                 COMMERCE, 1955.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 BY D. E. AMOS, SAND83-0083, MAY, 1983.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 AND LARGE ORDER BY D. E. AMOS, SAND83-0643, MAY, 1983
    -C
    -C               A SUBROUTINE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, SAND85-
    -C                 1018, MAY, 1985
    -C
    -C               A PORTABLE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, TRANS.
    -C                 MATH. SOFTWARE, 1986
    -C
    -C***ROUTINES CALLED  ZBINU,I1MACH,D1MACH
    -C***END PROLOGUE  ZBESJ
    -C
    -C     COMPLEX CI,CSGN,CY,Z,ZN
    -      DOUBLE PRECISION AA, ALIM, ARG, CII, CSGNI, CSGNR, CYI, CYR, DIG,
    -     * ELIM, FNU, FNUL, HPI, RL, R1M5, STR, TOL, ZI, ZNI, ZNR, ZR,
    -     * D1MACH, BB, FN, AZ, AZABS, ASCLE, RTOL, ATOL, STI
    -      INTEGER I, IERR, INU, INUH, IR, K, KODE, K1, K2, N, NL, NZ, I1MACH
    -      DIMENSION CYR(N), CYI(N)
    -      DATA HPI /1.57079632679489662D0/
    -C
    -C***FIRST EXECUTABLE STATEMENT  ZBESJ
    -      IERR = 0
    -      NZ=0
    -      IF (FNU.LT.0.0D0) IERR=1
    -      IF (KODE.LT.1 .OR. KODE.GT.2) IERR=1
    -      IF (N.LT.1) IERR=1
    -      IF (IERR.NE.0) RETURN
    -C-----------------------------------------------------------------------
    -C     SET PARAMETERS RELATED TO MACHINE CONSTANTS.
    -C     TOL IS THE APPROXIMATE UNIT ROUNDOFF LIMITED TO 1.0E-18.
    -C     ELIM IS THE APPROXIMATE EXPONENTIAL OVER- AND UNDERFLOW LIMIT.
    -C     EXP(-ELIM).LT.EXP(-ALIM)=EXP(-ELIM)/TOL    AND
    -C     EXP(ELIM).GT.EXP(ALIM)=EXP(ELIM)*TOL       ARE INTERVALS NEAR
    -C     UNDERFLOW AND OVERFLOW LIMITS WHERE SCALED ARITHMETIC IS DONE.
    -C     RL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC EXPANSION FOR LARGE Z.
    -C     DIG = NUMBER OF BASE 10 DIGITS IN TOL = 10**(-DIG).
    -C     FNUL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC SERIES FOR LARGE FNU.
    -C-----------------------------------------------------------------------
    -      TOL = DMAX1(D1MACH(4),1.0D-18)
    -      K1 = I1MACH(15)
    -      K2 = I1MACH(16)
    -      R1M5 = D1MACH(5)
    -      K = MIN0(IABS(K1),IABS(K2))
    -      ELIM = 2.303D0*(DBLE(FLOAT(K))*R1M5-3.0D0)
    -      K1 = I1MACH(14) - 1
    -      AA = R1M5*DBLE(FLOAT(K1))
    -      DIG = DMIN1(AA,18.0D0)
    -      AA = AA*2.303D0
    -      ALIM = ELIM + DMAX1(-AA,-41.45D0)
    -      RL = 1.2D0*DIG + 3.0D0
    -      FNUL = 10.0D0 + 6.0D0*(DIG-3.0D0)
    -C-----------------------------------------------------------------------
    -C     TEST FOR PROPER RANGE
    -C-----------------------------------------------------------------------
    -      AZ = AZABS(ZR,ZI)
    -      FN = FNU+DBLE(FLOAT(N-1))
    -      AA = 0.5D0/TOL
    -      BB=DBLE(FLOAT(I1MACH(9)))*0.5D0
    -      AA = DMIN1(AA,BB)
    -      IF (AZ.GT.AA) GO TO 260
    -      IF (FN.GT.AA) GO TO 260
    -      AA = DSQRT(AA)
    -      IF (AZ.GT.AA) IERR=3
    -      IF (FN.GT.AA) IERR=3
    -C-----------------------------------------------------------------------
    -C     CALCULATE CSGN=EXP(FNU*HPI*I) TO MINIMIZE LOSSES OF SIGNIFICANCE
    -C     WHEN FNU IS LARGE
    -C-----------------------------------------------------------------------
    -      CII = 1.0D0
    -      INU = INT(SNGL(FNU))
    -      INUH = INU/2
    -      IR = INU - 2*INUH
    -      ARG = (FNU-DBLE(FLOAT(INU-IR)))*HPI
    -      CSGNR = DCOS(ARG)
    -      CSGNI = DSIN(ARG)
    -      IF (MOD(INUH,2).EQ.0) GO TO 40
    -      CSGNR = -CSGNR
    -      CSGNI = -CSGNI
    -   40 CONTINUE
    -C-----------------------------------------------------------------------
    -C     ZN IS IN THE RIGHT HALF PLANE
    -C-----------------------------------------------------------------------
    -      ZNR = ZI
    -      ZNI = -ZR
    -      IF (ZI.GE.0.0D0) GO TO 50
    -      ZNR = -ZNR
    -      ZNI = -ZNI
    -      CSGNI = -CSGNI
    -      CII = -CII
    -   50 CONTINUE
    -      CALL ZBINU(ZNR, ZNI, FNU, KODE, N, CYR, CYI, NZ, RL, FNUL, TOL,
    -     * ELIM, ALIM)
    -      IF (NZ.LT.0) GO TO 130
    -      NL = N - NZ
    -      IF (NL.EQ.0) RETURN
    -      RTOL = 1.0D0/TOL
    -      ASCLE = D1MACH(1)*RTOL*1.0D+3
    -      DO 60 I=1,NL
    -C       STR = CYR(I)*CSGNR - CYI(I)*CSGNI
    -C       CYI(I) = CYR(I)*CSGNI + CYI(I)*CSGNR
    -C       CYR(I) = STR
    -        AA = CYR(I)
    -        BB = CYI(I)
    -        ATOL = 1.0D0
    -        IF (DMAX1(DABS(AA),DABS(BB)).GT.ASCLE) GO TO 55
    -          AA = AA*RTOL
    -          BB = BB*RTOL
    -          ATOL = TOL
    -   55   CONTINUE
    -        STR = AA*CSGNR - BB*CSGNI
    -        STI = AA*CSGNI + BB*CSGNR
    -        CYR(I) = STR*ATOL
    -        CYI(I) = STI*ATOL
    -        STR = -CSGNI*CII
    -        CSGNI = CSGNR*CII
    -        CSGNR = STR
    -   60 CONTINUE
    -      RETURN
    -  130 CONTINUE
    -      IF(NZ.EQ.(-2)) GO TO 140
    -      NZ = 0
    -      IERR = 2
    -      RETURN
    -  140 CONTINUE
    -      NZ=0
    -      IERR=5
    -      RETURN
    -  260 CONTINUE
    -      NZ=0
    -      IERR=4
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbesk.f b/scipy-0.10.1/scipy/special/amos/zbesk.f
    deleted file mode 100644
    index cd8eedac84..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbesk.f
    +++ /dev/null
    @@ -1,281 +0,0 @@
    -      SUBROUTINE ZBESK(ZR, ZI, FNU, KODE, N, CYR, CYI, NZ, IERR)
    -C***BEGIN PROLOGUE  ZBESK
    -C***DATE WRITTEN   830501   (YYMMDD)
    -C***REVISION DATE  890801   (YYMMDD)
    -C***CATEGORY NO.  B5K
    -C***KEYWORDS  K-BESSEL FUNCTION,COMPLEX BESSEL FUNCTION,
    -C             MODIFIED BESSEL FUNCTION OF THE SECOND KIND,
    -C             BESSEL FUNCTION OF THE THIRD KIND
    -C***AUTHOR  AMOS, DONALD E., SANDIA NATIONAL LABORATORIES
    -C***PURPOSE  TO COMPUTE K-BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C***DESCRIPTION
    -C
    -C                      ***A DOUBLE PRECISION ROUTINE***
    -C
    -C         ON KODE=1, CBESK COMPUTES AN N MEMBER SEQUENCE OF COMPLEX
    -C         BESSEL FUNCTIONS CY(J)=K(FNU+J-1,Z) FOR REAL, NONNEGATIVE
    -C         ORDERS FNU+J-1, J=1,...,N AND COMPLEX Z.NE.CMPLX(0.0,0.0)
    -C         IN THE CUT PLANE -PI.LT.ARG(Z).LE.PI. ON KODE=2, CBESK
    -C         RETURNS THE SCALED K FUNCTIONS,
    -C
    -C         CY(J)=EXP(Z)*K(FNU+J-1,Z) , J=1,...,N,
    -C
    -C         WHICH REMOVE THE EXPONENTIAL BEHAVIOR IN BOTH THE LEFT AND
    -C         RIGHT HALF PLANES FOR Z TO INFINITY. DEFINITIONS AND
    -C         NOTATION ARE FOUND IN THE NBS HANDBOOK OF MATHEMATICAL
    -C         FUNCTIONS (REF. 1).
    -C
    -C         INPUT      ZR,ZI,FNU ARE DOUBLE PRECISION
    -C           ZR,ZI  - Z=CMPLX(ZR,ZI), Z.NE.CMPLX(0.0D0,0.0D0),
    -C                    -PI.LT.ARG(Z).LE.PI
    -C           FNU    - ORDER OF INITIAL K FUNCTION, FNU.GE.0.0D0
    -C           N      - NUMBER OF MEMBERS OF THE SEQUENCE, N.GE.1
    -C           KODE   - A PARAMETER TO INDICATE THE SCALING OPTION
    -C                    KODE= 1  RETURNS
    -C                             CY(I)=K(FNU+I-1,Z), I=1,...,N
    -C                        = 2  RETURNS
    -C                             CY(I)=K(FNU+I-1,Z)*EXP(Z), I=1,...,N
    -C
    -C         OUTPUT     CYR,CYI ARE DOUBLE PRECISION
    -C           CYR,CYI- DOUBLE PRECISION VECTORS WHOSE FIRST N COMPONENTS
    -C                    CONTAIN REAL AND IMAGINARY PARTS FOR THE SEQUENCE
    -C                    CY(I)=K(FNU+I-1,Z), I=1,...,N OR
    -C                    CY(I)=K(FNU+I-1,Z)*EXP(Z), I=1,...,N
    -C                    DEPENDING ON KODE
    -C           NZ     - NUMBER OF COMPONENTS SET TO ZERO DUE TO UNDERFLOW.
    -C                    NZ= 0   , NORMAL RETURN
    -C                    NZ.GT.0 , FIRST NZ COMPONENTS OF CY SET TO ZERO DUE
    -C                              TO UNDERFLOW, CY(I)=CMPLX(0.0D0,0.0D0),
    -C                              I=1,...,N WHEN X.GE.0.0. WHEN X.LT.0.0
    -C                              NZ STATES ONLY THE NUMBER OF UNDERFLOWS
    -C                              IN THE SEQUENCE.
    -C
    -C           IERR   - ERROR FLAG
    -C                    IERR=0, NORMAL RETURN - COMPUTATION COMPLETED
    -C                    IERR=1, INPUT ERROR   - NO COMPUTATION
    -C                    IERR=2, OVERFLOW      - NO COMPUTATION, FNU IS
    -C                            TOO LARGE OR CABS(Z) IS TOO SMALL OR BOTH
    -C                    IERR=3, CABS(Z) OR FNU+N-1 LARGE - COMPUTATION DONE
    -C                            BUT LOSSES OF SIGNIFCANCE BY ARGUMENT
    -C                            REDUCTION PRODUCE LESS THAN HALF OF MACHINE
    -C                            ACCURACY
    -C                    IERR=4, CABS(Z) OR FNU+N-1 TOO LARGE - NO COMPUTA-
    -C                            TION BECAUSE OF COMPLETE LOSSES OF SIGNIFI-
    -C                            CANCE BY ARGUMENT REDUCTION
    -C                    IERR=5, ERROR              - NO COMPUTATION,
    -C                            ALGORITHM TERMINATION CONDITION NOT MET
    -C
    -C***LONG DESCRIPTION
    -C
    -C         EQUATIONS OF THE REFERENCE ARE IMPLEMENTED FOR SMALL ORDERS
    -C         DNU AND DNU+1.0 IN THE RIGHT HALF PLANE X.GE.0.0. FORWARD
    -C         RECURRENCE GENERATES HIGHER ORDERS. K IS CONTINUED TO THE LEFT
    -C         HALF PLANE BY THE RELATION
    -C
    -C         K(FNU,Z*EXP(MP)) = EXP(-MP*FNU)*K(FNU,Z)-MP*I(FNU,Z)
    -C         MP=MR*PI*I, MR=+1 OR -1, RE(Z).GT.0, I**2=-1
    -C
    -C         WHERE I(FNU,Z) IS THE I BESSEL FUNCTION.
    -C
    -C         FOR LARGE ORDERS, FNU.GT.FNUL, THE K FUNCTION IS COMPUTED
    -C         BY MEANS OF ITS UNIFORM ASYMPTOTIC EXPANSIONS.
    -C
    -C         FOR NEGATIVE ORDERS, THE FORMULA
    -C
    -C                       K(-FNU,Z) = K(FNU,Z)
    -C
    -C         CAN BE USED.
    -C
    -C         CBESK ASSUMES THAT A SIGNIFICANT DIGIT SINH(X) FUNCTION IS
    -C         AVAILABLE.
    -C
    -C         IN MOST COMPLEX VARIABLE COMPUTATION, ONE MUST EVALUATE ELE-
    -C         MENTARY FUNCTIONS. WHEN THE MAGNITUDE OF Z OR FNU+N-1 IS
    -C         LARGE, LOSSES OF SIGNIFICANCE BY ARGUMENT REDUCTION OCCUR.
    -C         CONSEQUENTLY, IF EITHER ONE EXCEEDS U1=SQRT(0.5/UR), THEN
    -C         LOSSES EXCEEDING HALF PRECISION ARE LIKELY AND AN ERROR FLAG
    -C         IERR=3 IS TRIGGERED WHERE UR=DMAX1(D1MACH(4),1.0D-18) IS
    -C         DOUBLE PRECISION UNIT ROUNDOFF LIMITED TO 18 DIGITS PRECISION.
    -C         IF EITHER IS LARGER THAN U2=0.5/UR, THEN ALL SIGNIFICANCE IS
    -C         LOST AND IERR=4. IN ORDER TO USE THE INT FUNCTION, ARGUMENTS
    -C         MUST BE FURTHER RESTRICTED NOT TO EXCEED THE LARGEST MACHINE
    -C         INTEGER, U3=I1MACH(9). THUS, THE MAGNITUDE OF Z AND FNU+N-1 IS
    -C         RESTRICTED BY MIN(U2,U3). ON 32 BIT MACHINES, U1,U2, AND U3
    -C         ARE APPROXIMATELY 2.0E+3, 4.2E+6, 2.1E+9 IN SINGLE PRECISION
    -C         ARITHMETIC AND 1.3E+8, 1.8E+16, 2.1E+9 IN DOUBLE PRECISION
    -C         ARITHMETIC RESPECTIVELY. THIS MAKES U2 AND U3 LIMITING IN
    -C         THEIR RESPECTIVE ARITHMETICS. THIS MEANS THAT ONE CAN EXPECT
    -C         TO RETAIN, IN THE WORST CASES ON 32 BIT MACHINES, NO DIGITS
    -C         IN SINGLE AND ONLY 7 DIGITS IN DOUBLE PRECISION ARITHMETIC.
    -C         SIMILAR CONSIDERATIONS HOLD FOR OTHER MACHINES.
    -C
    -C         THE APPROXIMATE RELATIVE ERROR IN THE MAGNITUDE OF A COMPLEX
    -C         BESSEL FUNCTION CAN BE EXPRESSED BY P*10**S WHERE P=MAX(UNIT
    -C         ROUNDOFF,1.0E-18) IS THE NOMINAL PRECISION AND 10**S REPRE-
    -C         SENTS THE INCREASE IN ERROR DUE TO ARGUMENT REDUCTION IN THE
    -C         ELEMENTARY FUNCTIONS. HERE, S=MAX(1,ABS(LOG10(CABS(Z))),
    -C         ABS(LOG10(FNU))) APPROXIMATELY (I.E. S=MAX(1,ABS(EXPONENT OF
    -C         CABS(Z),ABS(EXPONENT OF FNU)) ). HOWEVER, THE PHASE ANGLE MAY
    -C         HAVE ONLY ABSOLUTE ACCURACY. THIS IS MOST LIKELY TO OCCUR WHEN
    -C         ONE COMPONENT (IN ABSOLUTE VALUE) IS LARGER THAN THE OTHER BY
    -C         SEVERAL ORDERS OF MAGNITUDE. IF ONE COMPONENT IS 10**K LARGER
    -C         THAN THE OTHER, THEN ONE CAN EXPECT ONLY MAX(ABS(LOG10(P))-K,
    -C         0) SIGNIFICANT DIGITS; OR, STATED ANOTHER WAY, WHEN K EXCEEDS
    -C         THE EXPONENT OF P, NO SIGNIFICANT DIGITS REMAIN IN THE SMALLER
    -C         COMPONENT. HOWEVER, THE PHASE ANGLE RETAINS ABSOLUTE ACCURACY
    -C         BECAUSE, IN COMPLEX ARITHMETIC WITH PRECISION P, THE SMALLER
    -C         COMPONENT WILL NOT (AS A RULE) DECREASE BELOW P TIMES THE
    -C         MAGNITUDE OF THE LARGER COMPONENT. IN THESE EXTREME CASES,
    -C         THE PRINCIPAL PHASE ANGLE IS ON THE ORDER OF +P, -P, PI/2-P,
    -C         OR -PI/2+P.
    -C
    -C***REFERENCES  HANDBOOK OF MATHEMATICAL FUNCTIONS BY M. ABRAMOWITZ
    -C                 AND I. A. STEGUN, NBS AMS SERIES 55, U.S. DEPT. OF
    -C                 COMMERCE, 1955.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 BY D. E. AMOS, SAND83-0083, MAY, 1983.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 AND LARGE ORDER BY D. E. AMOS, SAND83-0643, MAY, 1983.
    -C
    -C               A SUBROUTINE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, SAND85-
    -C                 1018, MAY, 1985
    -C
    -C               A PORTABLE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, TRANS.
    -C                 MATH. SOFTWARE, 1986
    -C
    -C***ROUTINES CALLED  ZACON,ZBKNU,ZBUNK,ZUOIK,AZABS,I1MACH,D1MACH
    -C***END PROLOGUE  ZBESK
    -C
    -C     COMPLEX CY,Z
    -      DOUBLE PRECISION AA, ALIM, ALN, ARG, AZ, CYI, CYR, DIG, ELIM, FN,
    -     * FNU, FNUL, RL, R1M5, TOL, UFL, ZI, ZR, D1MACH, AZABS, BB
    -      INTEGER IERR, K, KODE, K1, K2, MR, N, NN, NUF, NW, NZ, I1MACH
    -      DIMENSION CYR(N), CYI(N)
    -C***FIRST EXECUTABLE STATEMENT  ZBESK
    -      IERR = 0
    -      NZ=0
    -      IF (ZI.EQ.0.0E0 .AND. ZR.EQ.0.0E0) IERR=1
    -      IF (FNU.LT.0.0D0) IERR=1
    -      IF (KODE.LT.1 .OR. KODE.GT.2) IERR=1
    -      IF (N.LT.1) IERR=1
    -      IF (IERR.NE.0) RETURN
    -      NN = N
    -C-----------------------------------------------------------------------
    -C     SET PARAMETERS RELATED TO MACHINE CONSTANTS.
    -C     TOL IS THE APPROXIMATE UNIT ROUNDOFF LIMITED TO 1.0E-18.
    -C     ELIM IS THE APPROXIMATE EXPONENTIAL OVER- AND UNDERFLOW LIMIT.
    -C     EXP(-ELIM).LT.EXP(-ALIM)=EXP(-ELIM)/TOL    AND
    -C     EXP(ELIM).GT.EXP(ALIM)=EXP(ELIM)*TOL       ARE INTERVALS NEAR
    -C     UNDERFLOW AND OVERFLOW LIMITS WHERE SCALED ARITHMETIC IS DONE.
    -C     RL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC EXPANSION FOR LARGE Z.
    -C     DIG = NUMBER OF BASE 10 DIGITS IN TOL = 10**(-DIG).
    -C     FNUL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC SERIES FOR LARGE FNU
    -C-----------------------------------------------------------------------
    -      TOL = DMAX1(D1MACH(4),1.0D-18)
    -      K1 = I1MACH(15)
    -      K2 = I1MACH(16)
    -      R1M5 = D1MACH(5)
    -      K = MIN0(IABS(K1),IABS(K2))
    -      ELIM = 2.303D0*(DBLE(FLOAT(K))*R1M5-3.0D0)
    -      K1 = I1MACH(14) - 1
    -      AA = R1M5*DBLE(FLOAT(K1))
    -      DIG = DMIN1(AA,18.0D0)
    -      AA = AA*2.303D0
    -      ALIM = ELIM + DMAX1(-AA,-41.45D0)
    -      FNUL = 10.0D0 + 6.0D0*(DIG-3.0D0)
    -      RL = 1.2D0*DIG + 3.0D0
    -C-----------------------------------------------------------------------------
    -C     TEST FOR PROPER RANGE
    -C-----------------------------------------------------------------------
    -      AZ = AZABS(ZR,ZI)
    -      FN = FNU + DBLE(FLOAT(NN-1))
    -      AA = 0.5D0/TOL
    -      BB=DBLE(FLOAT(I1MACH(9)))*0.5D0
    -      AA = DMIN1(AA,BB)
    -      IF (AZ.GT.AA) GO TO 260
    -      IF (FN.GT.AA) GO TO 260
    -      AA = DSQRT(AA)
    -      IF (AZ.GT.AA) IERR=3
    -      IF (FN.GT.AA) IERR=3
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST ON THE LAST MEMBER OF THE SEQUENCE
    -C-----------------------------------------------------------------------
    -C     UFL = DEXP(-ELIM)
    -      UFL = D1MACH(1)*1.0D+3
    -      IF (AZ.LT.UFL) GO TO 180
    -      IF (FNU.GT.FNUL) GO TO 80
    -      IF (FN.LE.1.0D0) GO TO 60
    -      IF (FN.GT.2.0D0) GO TO 50
    -      IF (AZ.GT.TOL) GO TO 60
    -      ARG = 0.5D0*AZ
    -      ALN = -FN*DLOG(ARG)
    -      IF (ALN.GT.ELIM) GO TO 180
    -      GO TO 60
    -   50 CONTINUE
    -      CALL ZUOIK(ZR, ZI, FNU, KODE, 2, NN, CYR, CYI, NUF, TOL, ELIM,
    -     * ALIM)
    -      IF (NUF.LT.0) GO TO 180
    -      NZ = NZ + NUF
    -      NN = NN - NUF
    -C-----------------------------------------------------------------------
    -C     HERE NN=N OR NN=0 SINCE NUF=0,NN, OR -1 ON RETURN FROM CUOIK
    -C     IF NUF=NN, THEN CY(I)=CZERO FOR ALL I
    -C-----------------------------------------------------------------------
    -      IF (NN.EQ.0) GO TO 100
    -   60 CONTINUE
    -      IF (ZR.LT.0.0D0) GO TO 70
    -C-----------------------------------------------------------------------
    -C     RIGHT HALF PLANE COMPUTATION, REAL(Z).GE.0.
    -C-----------------------------------------------------------------------
    -      CALL ZBKNU(ZR, ZI, FNU, KODE, NN, CYR, CYI, NW, TOL, ELIM, ALIM)
    -      IF (NW.LT.0) GO TO 200
    -      NZ=NW
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     LEFT HALF PLANE COMPUTATION
    -C     PI/2.LT.ARG(Z).LE.PI AND -PI.LT.ARG(Z).LT.-PI/2.
    -C-----------------------------------------------------------------------
    -   70 CONTINUE
    -      IF (NZ.NE.0) GO TO 180
    -      MR = 1
    -      IF (ZI.LT.0.0D0) MR = -1
    -      CALL ZACON(ZR, ZI, FNU, KODE, MR, NN, CYR, CYI, NW, RL, FNUL,
    -     * TOL, ELIM, ALIM)
    -      IF (NW.LT.0) GO TO 200
    -      NZ=NW
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     UNIFORM ASYMPTOTIC EXPANSIONS FOR FNU.GT.FNUL
    -C-----------------------------------------------------------------------
    -   80 CONTINUE
    -      MR = 0
    -      IF (ZR.GE.0.0D0) GO TO 90
    -      MR = 1
    -      IF (ZI.LT.0.0D0) MR = -1
    -   90 CONTINUE
    -      CALL ZBUNK(ZR, ZI, FNU, KODE, MR, NN, CYR, CYI, NW, TOL, ELIM,
    -     * ALIM)
    -      IF (NW.LT.0) GO TO 200
    -      NZ = NZ + NW
    -      RETURN
    -  100 CONTINUE
    -      IF (ZR.LT.0.0D0) GO TO 180
    -      RETURN
    -  180 CONTINUE
    -      NZ = 0
    -      IERR=2
    -      RETURN
    -  200 CONTINUE
    -      IF(NW.EQ.(-1)) GO TO 180
    -      NZ=0
    -      IERR=5
    -      RETURN
    -  260 CONTINUE
    -      NZ=0
    -      IERR=4
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbesy.f b/scipy-0.10.1/scipy/special/amos/zbesy.f
    deleted file mode 100644
    index 05ec40beef..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbesy.f
    +++ /dev/null
    @@ -1,244 +0,0 @@
    -      SUBROUTINE ZBESY(ZR, ZI, FNU, KODE, N, CYR, CYI, NZ, CWRKR, CWRKI,
    -     *                 IERR)
    -C***BEGIN PROLOGUE  ZBESY
    -C***DATE WRITTEN   830501   (YYMMDD)
    -C***REVISION DATE  890801   (YYMMDD)
    -C***CATEGORY NO.  B5K
    -C***KEYWORDS  Y-BESSEL FUNCTION,BESSEL FUNCTION OF COMPLEX ARGUMENT,
    -C             BESSEL FUNCTION OF SECOND KIND
    -C***AUTHOR  AMOS, DONALD E., SANDIA NATIONAL LABORATORIES
    -C***PURPOSE  TO COMPUTE THE Y-BESSEL FUNCTION OF A COMPLEX ARGUMENT
    -C***DESCRIPTION
    -C
    -C                      ***A DOUBLE PRECISION ROUTINE***
    -C
    -C         ON KODE=1, CBESY COMPUTES AN N MEMBER SEQUENCE OF COMPLEX
    -C         BESSEL FUNCTIONS CY(I)=Y(FNU+I-1,Z) FOR REAL, NONNEGATIVE
    -C         ORDERS FNU+I-1, I=1,...,N AND COMPLEX Z IN THE CUT PLANE
    -C         -PI.LT.ARG(Z).LE.PI. ON KODE=2, CBESY RETURNS THE SCALED
    -C         FUNCTIONS
    -C
    -C         CY(I)=EXP(-ABS(Y))*Y(FNU+I-1,Z)   I = 1,...,N , Y=AIMAG(Z)
    -C
    -C         WHICH REMOVE THE EXPONENTIAL GROWTH IN BOTH THE UPPER AND
    -C         LOWER HALF PLANES FOR Z TO INFINITY. DEFINITIONS AND NOTATION
    -C         ARE FOUND IN THE NBS HANDBOOK OF MATHEMATICAL FUNCTIONS
    -C         (REF. 1).
    -C
    -C         INPUT      ZR,ZI,FNU ARE DOUBLE PRECISION
    -C           ZR,ZI  - Z=CMPLX(ZR,ZI), Z.NE.CMPLX(0.0D0,0.0D0),
    -C                    -PI.LT.ARG(Z).LE.PI
    -C           FNU    - ORDER OF INITIAL Y FUNCTION, FNU.GE.0.0D0
    -C           KODE   - A PARAMETER TO INDICATE THE SCALING OPTION
    -C                    KODE= 1  RETURNS
    -C                             CY(I)=Y(FNU+I-1,Z), I=1,...,N
    -C                        = 2  RETURNS
    -C                             CY(I)=Y(FNU+I-1,Z)*EXP(-ABS(Y)), I=1,...,N
    -C                             WHERE Y=AIMAG(Z)
    -C           N      - NUMBER OF MEMBERS OF THE SEQUENCE, N.GE.1
    -C           CWRKR, - DOUBLE PRECISION WORK VECTORS OF DIMENSION AT
    -C           CWRKI    AT LEAST N
    -C
    -C         OUTPUT     CYR,CYI ARE DOUBLE PRECISION
    -C           CYR,CYI- DOUBLE PRECISION VECTORS WHOSE FIRST N COMPONENTS
    -C                    CONTAIN REAL AND IMAGINARY PARTS FOR THE SEQUENCE
    -C                    CY(I)=Y(FNU+I-1,Z)  OR
    -C                    CY(I)=Y(FNU+I-1,Z)*EXP(-ABS(Y))  I=1,...,N
    -C                    DEPENDING ON KODE.
    -C           NZ     - NZ=0 , A NORMAL RETURN
    -C                    NZ.GT.0 , NZ COMPONENTS OF CY SET TO ZERO DUE TO
    -C                    UNDERFLOW (GENERALLY ON KODE=2)
    -C           IERR   - ERROR FLAG
    -C                    IERR=0, NORMAL RETURN - COMPUTATION COMPLETED
    -C                    IERR=1, INPUT ERROR   - NO COMPUTATION
    -C                    IERR=2, OVERFLOW      - NO COMPUTATION, FNU IS
    -C                            TOO LARGE OR CABS(Z) IS TOO SMALL OR BOTH
    -C                    IERR=3, CABS(Z) OR FNU+N-1 LARGE - COMPUTATION DONE
    -C                            BUT LOSSES OF SIGNIFCANCE BY ARGUMENT
    -C                            REDUCTION PRODUCE LESS THAN HALF OF MACHINE
    -C                            ACCURACY
    -C                    IERR=4, CABS(Z) OR FNU+N-1 TOO LARGE - NO COMPUTA-
    -C                            TION BECAUSE OF COMPLETE LOSSES OF SIGNIFI-
    -C                            CANCE BY ARGUMENT REDUCTION
    -C                    IERR=5, ERROR              - NO COMPUTATION,
    -C                            ALGORITHM TERMINATION CONDITION NOT MET
    -C
    -C***LONG DESCRIPTION
    -C
    -C         THE COMPUTATION IS CARRIED OUT BY THE FORMULA
    -C
    -C         Y(FNU,Z)=0.5*(H(1,FNU,Z)-H(2,FNU,Z))/I
    -C
    -C         WHERE I**2 = -1 AND THE HANKEL BESSEL FUNCTIONS H(1,FNU,Z)
    -C         AND H(2,FNU,Z) ARE CALCULATED IN CBESH.
    -C
    -C         FOR NEGATIVE ORDERS,THE FORMULA
    -C
    -C              Y(-FNU,Z) = Y(FNU,Z)*COS(PI*FNU) + J(FNU,Z)*SIN(PI*FNU)
    -C
    -C         CAN BE USED. HOWEVER,FOR LARGE ORDERS CLOSE TO HALF ODD
    -C         INTEGERS THE FUNCTION CHANGES RADICALLY. WHEN FNU IS A LARGE
    -C         POSITIVE HALF ODD INTEGER,THE MAGNITUDE OF Y(-FNU,Z)=J(FNU,Z)*
    -C         SIN(PI*FNU) IS A LARGE NEGATIVE POWER OF TEN. BUT WHEN FNU IS
    -C         NOT A HALF ODD INTEGER, Y(FNU,Z) DOMINATES IN MAGNITUDE WITH A
    -C         LARGE POSITIVE POWER OF TEN AND THE MOST THAT THE SECOND TERM
    -C         CAN BE REDUCED IS BY UNIT ROUNDOFF FROM THE COEFFICIENT. THUS,
    -C         WIDE CHANGES CAN OCCUR WITHIN UNIT ROUNDOFF OF A LARGE HALF
    -C         ODD INTEGER. HERE, LARGE MEANS FNU.GT.CABS(Z).
    -C
    -C         IN MOST COMPLEX VARIABLE COMPUTATION, ONE MUST EVALUATE ELE-
    -C         MENTARY FUNCTIONS. WHEN THE MAGNITUDE OF Z OR FNU+N-1 IS
    -C         LARGE, LOSSES OF SIGNIFICANCE BY ARGUMENT REDUCTION OCCUR.
    -C         CONSEQUENTLY, IF EITHER ONE EXCEEDS U1=SQRT(0.5/UR), THEN
    -C         LOSSES EXCEEDING HALF PRECISION ARE LIKELY AND AN ERROR FLAG
    -C         IERR=3 IS TRIGGERED WHERE UR=DMAX1(D1MACH(4),1.0D-18) IS
    -C         DOUBLE PRECISION UNIT ROUNDOFF LIMITED TO 18 DIGITS PRECISION.
    -C         IF EITHER IS LARGER THAN U2=0.5/UR, THEN ALL SIGNIFICANCE IS
    -C         LOST AND IERR=4. IN ORDER TO USE THE INT FUNCTION, ARGUMENTS
    -C         MUST BE FURTHER RESTRICTED NOT TO EXCEED THE LARGEST MACHINE
    -C         INTEGER, U3=I1MACH(9). THUS, THE MAGNITUDE OF Z AND FNU+N-1 IS
    -C         RESTRICTED BY MIN(U2,U3). ON 32 BIT MACHINES, U1,U2, AND U3
    -C         ARE APPROXIMATELY 2.0E+3, 4.2E+6, 2.1E+9 IN SINGLE PRECISION
    -C         ARITHMETIC AND 1.3E+8, 1.8E+16, 2.1E+9 IN DOUBLE PRECISION
    -C         ARITHMETIC RESPECTIVELY. THIS MAKES U2 AND U3 LIMITING IN
    -C         THEIR RESPECTIVE ARITHMETICS. THIS MEANS THAT ONE CAN EXPECT
    -C         TO RETAIN, IN THE WORST CASES ON 32 BIT MACHINES, NO DIGITS
    -C         IN SINGLE AND ONLY 7 DIGITS IN DOUBLE PRECISION ARITHMETIC.
    -C         SIMILAR CONSIDERATIONS HOLD FOR OTHER MACHINES.
    -C
    -C         THE APPROXIMATE RELATIVE ERROR IN THE MAGNITUDE OF A COMPLEX
    -C         BESSEL FUNCTION CAN BE EXPRESSED BY P*10**S WHERE P=MAX(UNIT
    -C         ROUNDOFF,1.0E-18) IS THE NOMINAL PRECISION AND 10**S REPRE-
    -C         SENTS THE INCREASE IN ERROR DUE TO ARGUMENT REDUCTION IN THE
    -C         ELEMENTARY FUNCTIONS. HERE, S=MAX(1,ABS(LOG10(CABS(Z))),
    -C         ABS(LOG10(FNU))) APPROXIMATELY (I.E. S=MAX(1,ABS(EXPONENT OF
    -C         CABS(Z),ABS(EXPONENT OF FNU)) ). HOWEVER, THE PHASE ANGLE MAY
    -C         HAVE ONLY ABSOLUTE ACCURACY. THIS IS MOST LIKELY TO OCCUR WHEN
    -C         ONE COMPONENT (IN ABSOLUTE VALUE) IS LARGER THAN THE OTHER BY
    -C         SEVERAL ORDERS OF MAGNITUDE. IF ONE COMPONENT IS 10**K LARGER
    -C         THAN THE OTHER, THEN ONE CAN EXPECT ONLY MAX(ABS(LOG10(P))-K,
    -C         0) SIGNIFICANT DIGITS; OR, STATED ANOTHER WAY, WHEN K EXCEEDS
    -C         THE EXPONENT OF P, NO SIGNIFICANT DIGITS REMAIN IN THE SMALLER
    -C         COMPONENT. HOWEVER, THE PHASE ANGLE RETAINS ABSOLUTE ACCURACY
    -C         BECAUSE, IN COMPLEX ARITHMETIC WITH PRECISION P, THE SMALLER
    -C         COMPONENT WILL NOT (AS A RULE) DECREASE BELOW P TIMES THE
    -C         MAGNITUDE OF THE LARGER COMPONENT. IN THESE EXTREME CASES,
    -C         THE PRINCIPAL PHASE ANGLE IS ON THE ORDER OF +P, -P, PI/2-P,
    -C         OR -PI/2+P.
    -C
    -C***REFERENCES  HANDBOOK OF MATHEMATICAL FUNCTIONS BY M. ABRAMOWITZ
    -C                 AND I. A. STEGUN, NBS AMS SERIES 55, U.S. DEPT. OF
    -C                 COMMERCE, 1955.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 BY D. E. AMOS, SAND83-0083, MAY, 1983.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 AND LARGE ORDER BY D. E. AMOS, SAND83-0643, MAY, 1983
    -C
    -C               A SUBROUTINE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, SAND85-
    -C                 1018, MAY, 1985
    -C
    -C               A PORTABLE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, TRANS.
    -C                 MATH. SOFTWARE, 1986
    -C
    -C***ROUTINES CALLED  ZBESH,I1MACH,D1MACH
    -C***END PROLOGUE  ZBESY
    -C
    -C     COMPLEX CWRK,CY,C1,C2,EX,HCI,Z,ZU,ZV
    -      DOUBLE PRECISION CWRKI, CWRKR, CYI, CYR, C1I, C1R, C2I, C2R,
    -     * ELIM, EXI, EXR, EY, FNU, HCII, STI, STR, TAY, ZI, ZR, DEXP,
    -     * D1MACH, ASCLE, RTOL, ATOL, AA, BB, TOL
    -      INTEGER I, IERR, K, KODE, K1, K2, N, NZ, NZ1, NZ2, I1MACH
    -      DIMENSION CYR(N), CYI(N), CWRKR(N), CWRKI(N)
    -C***FIRST EXECUTABLE STATEMENT  ZBESY
    -      IERR = 0
    -      NZ=0
    -      IF (ZR.EQ.0.0D0 .AND. ZI.EQ.0.0D0) IERR=1
    -      IF (FNU.LT.0.0D0) IERR=1
    -      IF (KODE.LT.1 .OR. KODE.GT.2) IERR=1
    -      IF (N.LT.1) IERR=1
    -      IF (IERR.NE.0) RETURN
    -      HCII = 0.5D0
    -      CALL ZBESH(ZR, ZI, FNU, KODE, 1, N, CYR, CYI, NZ1, IERR)
    -      IF (IERR.NE.0.AND.IERR.NE.3) GO TO 170
    -      CALL ZBESH(ZR, ZI, FNU, KODE, 2, N, CWRKR, CWRKI, NZ2, IERR)
    -      IF (IERR.NE.0.AND.IERR.NE.3) GO TO 170
    -      NZ = MIN0(NZ1,NZ2)
    -      IF (KODE.EQ.2) GO TO 60
    -      DO 50 I=1,N
    -        STR = CWRKR(I) - CYR(I)
    -        STI = CWRKI(I) - CYI(I)
    -        CYR(I) = -STI*HCII
    -        CYI(I) = STR*HCII
    -   50 CONTINUE
    -      RETURN
    -   60 CONTINUE
    -      TOL = DMAX1(D1MACH(4),1.0D-18)
    -      K1 = I1MACH(15)
    -      K2 = I1MACH(16)
    -      K = MIN0(IABS(K1),IABS(K2))
    -      R1M5 = D1MACH(5)
    -C-----------------------------------------------------------------------
    -C     ELIM IS THE APPROXIMATE EXPONENTIAL UNDER- AND OVERFLOW LIMIT
    -C-----------------------------------------------------------------------
    -      ELIM = 2.303D0*(DBLE(FLOAT(K))*R1M5-3.0D0)
    -      EXR = DCOS(ZR)
    -      EXI = DSIN(ZR)
    -      EY = 0.0D0
    -      TAY = DABS(ZI+ZI)
    -      IF (TAY.LT.ELIM) EY = DEXP(-TAY)
    -      IF (ZI.LT.0.0D0) GO TO 90
    -      C1R = EXR*EY
    -      C1I = EXI*EY
    -      C2R = EXR
    -      C2I = -EXI
    -   70 CONTINUE
    -      NZ = 0
    -      RTOL = 1.0D0/TOL
    -      ASCLE = D1MACH(1)*RTOL*1.0D+3
    -      DO 80 I=1,N
    -C       STR = C1R*CYR(I) - C1I*CYI(I)
    -C       STI = C1R*CYI(I) + C1I*CYR(I)
    -C       STR = -STR + C2R*CWRKR(I) - C2I*CWRKI(I)
    -C       STI = -STI + C2R*CWRKI(I) + C2I*CWRKR(I)
    -C       CYR(I) = -STI*HCII
    -C       CYI(I) = STR*HCII
    -        AA = CWRKR(I)
    -        BB = CWRKI(I)
    -        ATOL = 1.0D0
    -        IF (DMAX1(DABS(AA),DABS(BB)).GT.ASCLE) GO TO 75
    -          AA = AA*RTOL
    -          BB = BB*RTOL
    -          ATOL = TOL
    -   75   CONTINUE
    -        STR = (AA*C2R - BB*C2I)*ATOL
    -        STI = (AA*C2I + BB*C2R)*ATOL
    -        AA = CYR(I)
    -        BB = CYI(I)
    -        ATOL = 1.0D0
    -        IF (DMAX1(DABS(AA),DABS(BB)).GT.ASCLE) GO TO 85
    -          AA = AA*RTOL
    -          BB = BB*RTOL
    -          ATOL = TOL
    -   85   CONTINUE
    -        STR = STR - (AA*C1R - BB*C1I)*ATOL
    -        STI = STI - (AA*C1I + BB*C1R)*ATOL
    -        CYR(I) = -STI*HCII
    -        CYI(I) =  STR*HCII
    -        IF (STR.EQ.0.0D0 .AND. STI.EQ.0.0D0 .AND. EY.EQ.0.0D0) NZ = NZ
    -     *   + 1
    -   80 CONTINUE
    -      RETURN
    -   90 CONTINUE
    -      C1R = EXR
    -      C1I = EXI
    -      C2R = EXR*EY
    -      C2I = -EXI*EY
    -      GO TO 70
    -  170 CONTINUE
    -      NZ = 0
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbinu.f b/scipy-0.10.1/scipy/special/amos/zbinu.f
    deleted file mode 100644
    index c76846a589..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbinu.f
    +++ /dev/null
    @@ -1,110 +0,0 @@
    -      SUBROUTINE ZBINU(ZR, ZI, FNU, KODE, N, CYR, CYI, NZ, RL, FNUL,
    -     * TOL, ELIM, ALIM)
    -C***BEGIN PROLOGUE  ZBINU
    -C***REFER TO  ZBESH,ZBESI,ZBESJ,ZBESK,ZAIRY,ZBIRY
    -C
    -C     ZBINU COMPUTES THE I FUNCTION IN THE RIGHT HALF Z PLANE
    -C
    -C***ROUTINES CALLED  AZABS,ZASYI,ZBUNI,ZMLRI,ZSERI,ZUOIK,ZWRSK
    -C***END PROLOGUE  ZBINU
    -      DOUBLE PRECISION ALIM, AZ, CWI, CWR, CYI, CYR, DFNU, ELIM, FNU,
    -     * FNUL, RL, TOL, ZEROI, ZEROR, ZI, ZR, AZABS
    -      INTEGER I, INW, KODE, N, NLAST, NN, NUI, NW, NZ
    -      DIMENSION CYR(N), CYI(N), CWR(2), CWI(2)
    -      DATA ZEROR,ZEROI / 0.0D0, 0.0D0 /
    -C
    -      NZ = 0
    -      AZ = AZABS(ZR,ZI)
    -      NN = N
    -      DFNU = FNU + DBLE(FLOAT(N-1))
    -      IF (AZ.LE.2.0D0) GO TO 10
    -      IF (AZ*AZ*0.25D0.GT.DFNU+1.0D0) GO TO 20
    -   10 CONTINUE
    -C-----------------------------------------------------------------------
    -C     POWER SERIES
    -C-----------------------------------------------------------------------
    -      CALL ZSERI(ZR, ZI, FNU, KODE, NN, CYR, CYI, NW, TOL, ELIM, ALIM)
    -      INW = IABS(NW)
    -      NZ = NZ + INW
    -      NN = NN - INW
    -      IF (NN.EQ.0) RETURN
    -      IF (NW.GE.0) GO TO 120
    -      DFNU = FNU + DBLE(FLOAT(NN-1))
    -   20 CONTINUE
    -      IF (AZ.LT.RL) GO TO 40
    -      IF (DFNU.LE.1.0D0) GO TO 30
    -      IF (AZ+AZ.LT.DFNU*DFNU) GO TO 50
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR LARGE Z
    -C-----------------------------------------------------------------------
    -   30 CONTINUE
    -      CALL ZASYI(ZR, ZI, FNU, KODE, NN, CYR, CYI, NW, RL, TOL, ELIM,
    -     * ALIM)
    -      IF (NW.LT.0) GO TO 130
    -      GO TO 120
    -   40 CONTINUE
    -      IF (DFNU.LE.1.0D0) GO TO 70
    -   50 CONTINUE
    -C-----------------------------------------------------------------------
    -C     OVERFLOW AND UNDERFLOW TEST ON I SEQUENCE FOR MILLER ALGORITHM
    -C-----------------------------------------------------------------------
    -      CALL ZUOIK(ZR, ZI, FNU, KODE, 1, NN, CYR, CYI, NW, TOL, ELIM,
    -     * ALIM)
    -      IF (NW.LT.0) GO TO 130
    -      NZ = NZ + NW
    -      NN = NN - NW
    -      IF (NN.EQ.0) RETURN
    -      DFNU = FNU+DBLE(FLOAT(NN-1))
    -      IF (DFNU.GT.FNUL) GO TO 110
    -      IF (AZ.GT.FNUL) GO TO 110
    -   60 CONTINUE
    -      IF (AZ.GT.RL) GO TO 80
    -   70 CONTINUE
    -C-----------------------------------------------------------------------
    -C     MILLER ALGORITHM NORMALIZED BY THE SERIES
    -C-----------------------------------------------------------------------
    -      CALL ZMLRI(ZR, ZI, FNU, KODE, NN, CYR, CYI, NW, TOL)
    -      IF(NW.LT.0) GO TO 130
    -      GO TO 120
    -   80 CONTINUE
    -C-----------------------------------------------------------------------
    -C     MILLER ALGORITHM NORMALIZED BY THE WRONSKIAN
    -C-----------------------------------------------------------------------
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST ON K FUNCTIONS USED IN WRONSKIAN
    -C-----------------------------------------------------------------------
    -      CALL ZUOIK(ZR, ZI, FNU, KODE, 2, 2, CWR, CWI, NW, TOL, ELIM,
    -     * ALIM)
    -      IF (NW.GE.0) GO TO 100
    -      NZ = NN
    -      DO 90 I=1,NN
    -        CYR(I) = ZEROR
    -        CYI(I) = ZEROI
    -   90 CONTINUE
    -      RETURN
    -  100 CONTINUE
    -      IF (NW.GT.0) GO TO 130
    -      CALL ZWRSK(ZR, ZI, FNU, KODE, NN, CYR, CYI, NW, CWR, CWI, TOL,
    -     * ELIM, ALIM)
    -      IF (NW.LT.0) GO TO 130
    -      GO TO 120
    -  110 CONTINUE
    -C-----------------------------------------------------------------------
    -C     INCREMENT FNU+NN-1 UP TO FNUL, COMPUTE AND RECUR BACKWARD
    -C-----------------------------------------------------------------------
    -      NUI = INT(SNGL(FNUL-DFNU)) + 1
    -      NUI = MAX0(NUI,0)
    -      CALL ZBUNI(ZR, ZI, FNU, KODE, NN, CYR, CYI, NW, NUI, NLAST, FNUL,
    -     * TOL, ELIM, ALIM)
    -      IF (NW.LT.0) GO TO 130
    -      NZ = NZ + NW
    -      IF (NLAST.EQ.0) GO TO 120
    -      NN = NLAST
    -      GO TO 60
    -  120 CONTINUE
    -      RETURN
    -  130 CONTINUE
    -      NZ = -1
    -      IF(NW.EQ.(-2)) NZ=-2
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbiry.f b/scipy-0.10.1/scipy/special/amos/zbiry.f
    deleted file mode 100644
    index 94f32f6a00..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbiry.f
    +++ /dev/null
    @@ -1,364 +0,0 @@
    -      SUBROUTINE ZBIRY(ZR, ZI, ID, KODE, BIR, BII, IERR)
    -C***BEGIN PROLOGUE  ZBIRY
    -C***DATE WRITTEN   830501   (YYMMDD)
    -C***REVISION DATE  890801   (YYMMDD)
    -C***CATEGORY NO.  B5K
    -C***KEYWORDS  AIRY FUNCTION,BESSEL FUNCTIONS OF ORDER ONE THIRD
    -C***AUTHOR  AMOS, DONALD E., SANDIA NATIONAL LABORATORIES
    -C***PURPOSE  TO COMPUTE AIRY FUNCTIONS BI(Z) AND DBI(Z) FOR COMPLEX Z
    -C***DESCRIPTION
    -C
    -C                      ***A DOUBLE PRECISION ROUTINE***
    -C         ON KODE=1, CBIRY COMPUTES THE COMPLEX AIRY FUNCTION BI(Z) OR
    -C         ITS DERIVATIVE DBI(Z)/DZ ON ID=0 OR ID=1 RESPECTIVELY. ON
    -C         KODE=2, A SCALING OPTION CEXP(-AXZTA)*BI(Z) OR CEXP(-AXZTA)*
    -C         DBI(Z)/DZ IS PROVIDED TO REMOVE THE EXPONENTIAL BEHAVIOR IN
    -C         BOTH THE LEFT AND RIGHT HALF PLANES WHERE
    -C         ZTA=(2/3)*Z*CSQRT(Z)=CMPLX(XZTA,YZTA) AND AXZTA=ABS(XZTA).
    -C         DEFINTIONS AND NOTATION ARE FOUND IN THE NBS HANDBOOK OF
    -C         MATHEMATICAL FUNCTIONS (REF. 1).
    -C
    -C         INPUT      ZR,ZI ARE DOUBLE PRECISION
    -C           ZR,ZI  - Z=CMPLX(ZR,ZI)
    -C           ID     - ORDER OF DERIVATIVE, ID=0 OR ID=1
    -C           KODE   - A PARAMETER TO INDICATE THE SCALING OPTION
    -C                    KODE= 1  RETURNS
    -C                             BI=BI(Z)                 ON ID=0 OR
    -C                             BI=DBI(Z)/DZ             ON ID=1
    -C                        = 2  RETURNS
    -C                             BI=CEXP(-AXZTA)*BI(Z)     ON ID=0 OR
    -C                             BI=CEXP(-AXZTA)*DBI(Z)/DZ ON ID=1 WHERE
    -C                             ZTA=(2/3)*Z*CSQRT(Z)=CMPLX(XZTA,YZTA)
    -C                             AND AXZTA=ABS(XZTA)
    -C
    -C         OUTPUT     BIR,BII ARE DOUBLE PRECISION
    -C           BIR,BII- COMPLEX ANSWER DEPENDING ON THE CHOICES FOR ID AND
    -C                    KODE
    -C           IERR   - ERROR FLAG
    -C                    IERR=0, NORMAL RETURN - COMPUTATION COMPLETED
    -C                    IERR=1, INPUT ERROR   - NO COMPUTATION
    -C                    IERR=2, OVERFLOW      - NO COMPUTATION, REAL(Z)
    -C                            TOO LARGE ON KODE=1
    -C                    IERR=3, CABS(Z) LARGE      - COMPUTATION COMPLETED
    -C                            LOSSES OF SIGNIFCANCE BY ARGUMENT REDUCTION
    -C                            PRODUCE LESS THAN HALF OF MACHINE ACCURACY
    -C                    IERR=4, CABS(Z) TOO LARGE  - NO COMPUTATION
    -C                            COMPLETE LOSS OF ACCURACY BY ARGUMENT
    -C                            REDUCTION
    -C                    IERR=5, ERROR              - NO COMPUTATION,
    -C                            ALGORITHM TERMINATION CONDITION NOT MET
    -C
    -C***LONG DESCRIPTION
    -C
    -C         BI AND DBI ARE COMPUTED FOR CABS(Z).GT.1.0 FROM THE I BESSEL
    -C         FUNCTIONS BY
    -C
    -C                BI(Z)=C*SQRT(Z)*( I(-1/3,ZTA) + I(1/3,ZTA) )
    -C               DBI(Z)=C *  Z  * ( I(-2/3,ZTA) + I(2/3,ZTA) )
    -C                               C=1.0/SQRT(3.0)
    -C                             ZTA=(2/3)*Z**(3/2)
    -C
    -C         WITH THE POWER SERIES FOR CABS(Z).LE.1.0.
    -C
    -C         IN MOST COMPLEX VARIABLE COMPUTATION, ONE MUST EVALUATE ELE-
    -C         MENTARY FUNCTIONS. WHEN THE MAGNITUDE OF Z IS LARGE, LOSSES
    -C         OF SIGNIFICANCE BY ARGUMENT REDUCTION OCCUR. CONSEQUENTLY, IF
    -C         THE MAGNITUDE OF ZETA=(2/3)*Z**1.5 EXCEEDS U1=SQRT(0.5/UR),
    -C         THEN LOSSES EXCEEDING HALF PRECISION ARE LIKELY AND AN ERROR
    -C         FLAG IERR=3 IS TRIGGERED WHERE UR=DMAX1(D1MACH(4),1.0D-18) IS
    -C         DOUBLE PRECISION UNIT ROUNDOFF LIMITED TO 18 DIGITS PRECISION.
    -C         ALSO, IF THE MAGNITUDE OF ZETA IS LARGER THAN U2=0.5/UR, THEN
    -C         ALL SIGNIFICANCE IS LOST AND IERR=4. IN ORDER TO USE THE INT
    -C         FUNCTION, ZETA MUST BE FURTHER RESTRICTED NOT TO EXCEED THE
    -C         LARGEST INTEGER, U3=I1MACH(9). THUS, THE MAGNITUDE OF ZETA
    -C         MUST BE RESTRICTED BY MIN(U2,U3). ON 32 BIT MACHINES, U1,U2,
    -C         AND U3 ARE APPROXIMATELY 2.0E+3, 4.2E+6, 2.1E+9 IN SINGLE
    -C         PRECISION ARITHMETIC AND 1.3E+8, 1.8E+16, 2.1E+9 IN DOUBLE
    -C         PRECISION ARITHMETIC RESPECTIVELY. THIS MAKES U2 AND U3 LIMIT-
    -C         ING IN THEIR RESPECTIVE ARITHMETICS. THIS MEANS THAT THE MAG-
    -C         NITUDE OF Z CANNOT EXCEED 3.1E+4 IN SINGLE AND 2.1E+6 IN
    -C         DOUBLE PRECISION ARITHMETIC. THIS ALSO MEANS THAT ONE CAN
    -C         EXPECT TO RETAIN, IN THE WORST CASES ON 32 BIT MACHINES,
    -C         NO DIGITS IN SINGLE PRECISION AND ONLY 7 DIGITS IN DOUBLE
    -C         PRECISION ARITHMETIC. SIMILAR CONSIDERATIONS HOLD FOR OTHER
    -C         MACHINES.
    -C
    -C         THE APPROXIMATE RELATIVE ERROR IN THE MAGNITUDE OF A COMPLEX
    -C         BESSEL FUNCTION CAN BE EXPRESSED BY P*10**S WHERE P=MAX(UNIT
    -C         ROUNDOFF,1.0E-18) IS THE NOMINAL PRECISION AND 10**S REPRE-
    -C         SENTS THE INCREASE IN ERROR DUE TO ARGUMENT REDUCTION IN THE
    -C         ELEMENTARY FUNCTIONS. HERE, S=MAX(1,ABS(LOG10(CABS(Z))),
    -C         ABS(LOG10(FNU))) APPROXIMATELY (I.E. S=MAX(1,ABS(EXPONENT OF
    -C         CABS(Z),ABS(EXPONENT OF FNU)) ). HOWEVER, THE PHASE ANGLE MAY
    -C         HAVE ONLY ABSOLUTE ACCURACY. THIS IS MOST LIKELY TO OCCUR WHEN
    -C         ONE COMPONENT (IN ABSOLUTE VALUE) IS LARGER THAN THE OTHER BY
    -C         SEVERAL ORDERS OF MAGNITUDE. IF ONE COMPONENT IS 10**K LARGER
    -C         THAN THE OTHER, THEN ONE CAN EXPECT ONLY MAX(ABS(LOG10(P))-K,
    -C         0) SIGNIFICANT DIGITS; OR, STATED ANOTHER WAY, WHEN K EXCEEDS
    -C         THE EXPONENT OF P, NO SIGNIFICANT DIGITS REMAIN IN THE SMALLER
    -C         COMPONENT. HOWEVER, THE PHASE ANGLE RETAINS ABSOLUTE ACCURACY
    -C         BECAUSE, IN COMPLEX ARITHMETIC WITH PRECISION P, THE SMALLER
    -C         COMPONENT WILL NOT (AS A RULE) DECREASE BELOW P TIMES THE
    -C         MAGNITUDE OF THE LARGER COMPONENT. IN THESE EXTREME CASES,
    -C         THE PRINCIPAL PHASE ANGLE IS ON THE ORDER OF +P, -P, PI/2-P,
    -C         OR -PI/2+P.
    -C
    -C***REFERENCES  HANDBOOK OF MATHEMATICAL FUNCTIONS BY M. ABRAMOWITZ
    -C                 AND I. A. STEGUN, NBS AMS SERIES 55, U.S. DEPT. OF
    -C                 COMMERCE, 1955.
    -C
    -C               COMPUTATION OF BESSEL FUNCTIONS OF COMPLEX ARGUMENT
    -C                 AND LARGE ORDER BY D. E. AMOS, SAND83-0643, MAY, 1983
    -C
    -C               A SUBROUTINE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, SAND85-
    -C                 1018, MAY, 1985
    -C
    -C               A PORTABLE PACKAGE FOR BESSEL FUNCTIONS OF A COMPLEX
    -C                 ARGUMENT AND NONNEGATIVE ORDER BY D. E. AMOS, TRANS.
    -C                 MATH. SOFTWARE, 1986
    -C
    -C***ROUTINES CALLED  ZBINU,AZABS,ZDIV,AZSQRT,D1MACH,I1MACH
    -C***END PROLOGUE  ZBIRY
    -C     COMPLEX BI,CONE,CSQ,CY,S1,S2,TRM1,TRM2,Z,ZTA,Z3
    -      DOUBLE PRECISION AA, AD, AK, ALIM, ATRM, AZ, AZ3, BB, BII, BIR,
    -     * BK, CC, CK, COEF, CONEI, CONER, CSQI, CSQR, CYI, CYR, C1, C2,
    -     * DIG, DK, D1, D2, EAA, ELIM, FID, FMR, FNU, FNUL, PI, RL, R1M5,
    -     * SFAC, STI, STR, S1I, S1R, S2I, S2R, TOL, TRM1I, TRM1R, TRM2I,
    -     * TRM2R, TTH, ZI, ZR, ZTAI, ZTAR, Z3I, Z3R, D1MACH, AZABS
    -      INTEGER ID, IERR, K, KODE, K1, K2, NZ, I1MACH
    -      DIMENSION CYR(2), CYI(2)
    -      DATA TTH, C1, C2, COEF, PI /6.66666666666666667D-01,
    -     * 6.14926627446000736D-01,4.48288357353826359D-01,
    -     * 5.77350269189625765D-01,3.14159265358979324D+00/
    -      DATA CONER, CONEI /1.0D0,0.0D0/
    -C***FIRST EXECUTABLE STATEMENT  ZBIRY
    -      IERR = 0
    -      NZ=0
    -      IF (ID.LT.0 .OR. ID.GT.1) IERR=1
    -      IF (KODE.LT.1 .OR. KODE.GT.2) IERR=1
    -      IF (IERR.NE.0) RETURN
    -      AZ = AZABS(ZR,ZI)
    -      TOL = DMAX1(D1MACH(4),1.0D-18)
    -      FID = DBLE(FLOAT(ID))
    -      IF (AZ.GT.1.0E0) GO TO 70
    -C-----------------------------------------------------------------------
    -C     POWER SERIES FOR CABS(Z).LE.1.
    -C-----------------------------------------------------------------------
    -      S1R = CONER
    -      S1I = CONEI
    -      S2R = CONER
    -      S2I = CONEI
    -      IF (AZ.LT.TOL) GO TO 130
    -      AA = AZ*AZ
    -      IF (AA.LT.TOL/AZ) GO TO 40
    -      TRM1R = CONER
    -      TRM1I = CONEI
    -      TRM2R = CONER
    -      TRM2I = CONEI
    -      ATRM = 1.0D0
    -      STR = ZR*ZR - ZI*ZI
    -      STI = ZR*ZI + ZI*ZR
    -      Z3R = STR*ZR - STI*ZI
    -      Z3I = STR*ZI + STI*ZR
    -      AZ3 = AZ*AA
    -      AK = 2.0D0 + FID
    -      BK = 3.0D0 - FID - FID
    -      CK = 4.0D0 - FID
    -      DK = 3.0D0 + FID + FID
    -      D1 = AK*DK
    -      D2 = BK*CK
    -      AD = DMIN1(D1,D2)
    -      AK = 24.0D0 + 9.0D0*FID
    -      BK = 30.0D0 - 9.0D0*FID
    -      DO 30 K=1,25
    -        STR = (TRM1R*Z3R-TRM1I*Z3I)/D1
    -        TRM1I = (TRM1R*Z3I+TRM1I*Z3R)/D1
    -        TRM1R = STR
    -        S1R = S1R + TRM1R
    -        S1I = S1I + TRM1I
    -        STR = (TRM2R*Z3R-TRM2I*Z3I)/D2
    -        TRM2I = (TRM2R*Z3I+TRM2I*Z3R)/D2
    -        TRM2R = STR
    -        S2R = S2R + TRM2R
    -        S2I = S2I + TRM2I
    -        ATRM = ATRM*AZ3/AD
    -        D1 = D1 + AK
    -        D2 = D2 + BK
    -        AD = DMIN1(D1,D2)
    -        IF (ATRM.LT.TOL*AD) GO TO 40
    -        AK = AK + 18.0D0
    -        BK = BK + 18.0D0
    -   30 CONTINUE
    -   40 CONTINUE
    -      IF (ID.EQ.1) GO TO 50
    -      BIR = C1*S1R + C2*(ZR*S2R-ZI*S2I)
    -      BII = C1*S1I + C2*(ZR*S2I+ZI*S2R)
    -      IF (KODE.EQ.1) RETURN
    -      CALL AZSQRT(ZR, ZI, STR, STI)
    -      ZTAR = TTH*(ZR*STR-ZI*STI)
    -      ZTAI = TTH*(ZR*STI+ZI*STR)
    -      AA = ZTAR
    -      AA = -DABS(AA)
    -      EAA = DEXP(AA)
    -      BIR = BIR*EAA
    -      BII = BII*EAA
    -      RETURN
    -   50 CONTINUE
    -      BIR = S2R*C2
    -      BII = S2I*C2
    -      IF (AZ.LE.TOL) GO TO 60
    -      CC = C1/(1.0D0+FID)
    -      STR = S1R*ZR - S1I*ZI
    -      STI = S1R*ZI + S1I*ZR
    -      BIR = BIR + CC*(STR*ZR-STI*ZI)
    -      BII = BII + CC*(STR*ZI+STI*ZR)
    -   60 CONTINUE
    -      IF (KODE.EQ.1) RETURN
    -      CALL AZSQRT(ZR, ZI, STR, STI)
    -      ZTAR = TTH*(ZR*STR-ZI*STI)
    -      ZTAI = TTH*(ZR*STI+ZI*STR)
    -      AA = ZTAR
    -      AA = -DABS(AA)
    -      EAA = DEXP(AA)
    -      BIR = BIR*EAA
    -      BII = BII*EAA
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     CASE FOR CABS(Z).GT.1.0
    -C-----------------------------------------------------------------------
    -   70 CONTINUE
    -      FNU = (1.0D0+FID)/3.0D0
    -C-----------------------------------------------------------------------
    -C     SET PARAMETERS RELATED TO MACHINE CONSTANTS.
    -C     TOL IS THE APPROXIMATE UNIT ROUNDOFF LIMITED TO 1.0E-18.
    -C     ELIM IS THE APPROXIMATE EXPONENTIAL OVER- AND UNDERFLOW LIMIT.
    -C     EXP(-ELIM).LT.EXP(-ALIM)=EXP(-ELIM)/TOL    AND
    -C     EXP(ELIM).GT.EXP(ALIM)=EXP(ELIM)*TOL       ARE INTERVALS NEAR
    -C     UNDERFLOW AND OVERFLOW LIMITS WHERE SCALED ARITHMETIC IS DONE.
    -C     RL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC EXPANSION FOR LARGE Z.
    -C     DIG = NUMBER OF BASE 10 DIGITS IN TOL = 10**(-DIG).
    -C     FNUL IS THE LOWER BOUNDARY OF THE ASYMPTOTIC SERIES FOR LARGE FNU.
    -C-----------------------------------------------------------------------
    -      K1 = I1MACH(15)
    -      K2 = I1MACH(16)
    -      R1M5 = D1MACH(5)
    -      K = MIN0(IABS(K1),IABS(K2))
    -      ELIM = 2.303D0*(DBLE(FLOAT(K))*R1M5-3.0D0)
    -      K1 = I1MACH(14) - 1
    -      AA = R1M5*DBLE(FLOAT(K1))
    -      DIG = DMIN1(AA,18.0D0)
    -      AA = AA*2.303D0
    -      ALIM = ELIM + DMAX1(-AA,-41.45D0)
    -      RL = 1.2D0*DIG + 3.0D0
    -      FNUL = 10.0D0 + 6.0D0*(DIG-3.0D0)
    -C-----------------------------------------------------------------------
    -C     TEST FOR RANGE
    -C-----------------------------------------------------------------------
    -      AA=0.5D0/TOL
    -      BB=DBLE(FLOAT(I1MACH(9)))*0.5D0
    -      AA=DMIN1(AA,BB)
    -      AA=AA**TTH
    -      IF (AZ.GT.AA) GO TO 260
    -      AA=DSQRT(AA)
    -      IF (AZ.GT.AA) IERR=3
    -      CALL AZSQRT(ZR, ZI, CSQR, CSQI)
    -      ZTAR = TTH*(ZR*CSQR-ZI*CSQI)
    -      ZTAI = TTH*(ZR*CSQI+ZI*CSQR)
    -C-----------------------------------------------------------------------
    -C     RE(ZTA).LE.0 WHEN RE(Z).LT.0, ESPECIALLY WHEN IM(Z) IS SMALL
    -C-----------------------------------------------------------------------
    -      SFAC = 1.0D0
    -      AK = ZTAI
    -      IF (ZR.GE.0.0D0) GO TO 80
    -      BK = ZTAR
    -      CK = -DABS(BK)
    -      ZTAR = CK
    -      ZTAI = AK
    -   80 CONTINUE
    -      IF (ZI.NE.0.0D0 .OR. ZR.GT.0.0D0) GO TO 90
    -      ZTAR = 0.0D0
    -      ZTAI = AK
    -   90 CONTINUE
    -      AA = ZTAR
    -      IF (KODE.EQ.2) GO TO 100
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST
    -C-----------------------------------------------------------------------
    -      BB = DABS(AA)
    -      IF (BB.LT.ALIM) GO TO 100
    -      BB = BB + 0.25D0*DLOG(AZ)
    -      SFAC = TOL
    -      IF (BB.GT.ELIM) GO TO 190
    -  100 CONTINUE
    -      FMR = 0.0D0
    -      IF (AA.GE.0.0D0 .AND. ZR.GT.0.0D0) GO TO 110
    -      FMR = PI
    -      IF (ZI.LT.0.0D0) FMR = -PI
    -      ZTAR = -ZTAR
    -      ZTAI = -ZTAI
    -  110 CONTINUE
    -C-----------------------------------------------------------------------
    -C     AA=FACTOR FOR ANALYTIC CONTINUATION OF I(FNU,ZTA)
    -C     KODE=2 RETURNS EXP(-ABS(XZTA))*I(FNU,ZTA) FROM CBESI
    -C-----------------------------------------------------------------------
    -      CALL ZBINU(ZTAR, ZTAI, FNU, KODE, 1, CYR, CYI, NZ, RL, FNUL, TOL,
    -     * ELIM, ALIM)
    -      IF (NZ.LT.0) GO TO 200
    -      AA = FMR*FNU
    -      Z3R = SFAC
    -      STR = DCOS(AA)
    -      STI = DSIN(AA)
    -      S1R = (STR*CYR(1)-STI*CYI(1))*Z3R
    -      S1I = (STR*CYI(1)+STI*CYR(1))*Z3R
    -      FNU = (2.0D0-FID)/3.0D0
    -      CALL ZBINU(ZTAR, ZTAI, FNU, KODE, 2, CYR, CYI, NZ, RL, FNUL, TOL,
    -     * ELIM, ALIM)
    -      CYR(1) = CYR(1)*Z3R
    -      CYI(1) = CYI(1)*Z3R
    -      CYR(2) = CYR(2)*Z3R
    -      CYI(2) = CYI(2)*Z3R
    -C-----------------------------------------------------------------------
    -C     BACKWARD RECUR ONE STEP FOR ORDERS -1/3 OR -2/3
    -C-----------------------------------------------------------------------
    -      CALL ZDIV(CYR(1), CYI(1), ZTAR, ZTAI, STR, STI)
    -      S2R = (FNU+FNU)*STR + CYR(2)
    -      S2I = (FNU+FNU)*STI + CYI(2)
    -      AA = FMR*(FNU-1.0D0)
    -      STR = DCOS(AA)
    -      STI = DSIN(AA)
    -      S1R = COEF*(S1R+S2R*STR-S2I*STI)
    -      S1I = COEF*(S1I+S2R*STI+S2I*STR)
    -      IF (ID.EQ.1) GO TO 120
    -      STR = CSQR*S1R - CSQI*S1I
    -      S1I = CSQR*S1I + CSQI*S1R
    -      S1R = STR
    -      BIR = S1R/SFAC
    -      BII = S1I/SFAC
    -      RETURN
    -  120 CONTINUE
    -      STR = ZR*S1R - ZI*S1I
    -      S1I = ZR*S1I + ZI*S1R
    -      S1R = STR
    -      BIR = S1R/SFAC
    -      BII = S1I/SFAC
    -      RETURN
    -  130 CONTINUE
    -      AA = C1*(1.0D0-FID) + FID*C2
    -      BIR = AA
    -      BII = 0.0D0
    -      RETURN
    -  190 CONTINUE
    -      IERR=2
    -      NZ=0
    -      RETURN
    -  200 CONTINUE
    -      IF(NZ.EQ.(-1)) GO TO 190
    -      NZ=0
    -      IERR=5
    -      RETURN
    -  260 CONTINUE
    -      IERR=4
    -      NZ=0
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbknu.f b/scipy-0.10.1/scipy/special/amos/zbknu.f
    deleted file mode 100644
    index a8eb50d7d7..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbknu.f
    +++ /dev/null
    @@ -1,568 +0,0 @@
    -      SUBROUTINE ZBKNU(ZR, ZI, FNU, KODE, N, YR, YI, NZ, TOL, ELIM,
    -     * ALIM)
    -C***BEGIN PROLOGUE  ZBKNU
    -C***REFER TO  ZBESI,ZBESK,ZAIRY,ZBESH
    -C
    -C     ZBKNU COMPUTES THE K BESSEL FUNCTION IN THE RIGHT HALF Z PLANE.
    -C
    -C***ROUTINES CALLED  DGAMLN,I1MACH,D1MACH,ZKSCL,ZSHCH,ZUCHK,AZABS,ZDIV,
    -C                    AZEXP,AZLOG,ZMLT,AZSQRT
    -C***END PROLOGUE  ZBKNU
    -C
    -      DOUBLE PRECISION AA, AK, ALIM, ASCLE, A1, A2, BB, BK, BRY, CAZ,
    -     * CBI, CBR, CC, CCHI, CCHR, CKI, CKR, COEFI, COEFR, CONEI, CONER,
    -     * CRSCR, CSCLR, CSHI, CSHR, CSI, CSR, CSRR, CSSR, CTWOR,
    -     * CZEROI, CZEROR, CZI, CZR, DNU, DNU2, DPI, ELIM, ETEST, FC, FHS,
    -     * FI, FK, FKS, FMUI, FMUR, FNU, FPI, FR, G1, G2, HPI, PI, PR, PTI,
    -     * PTR, P1I, P1R, P2I, P2M, P2R, QI, QR, RAK, RCAZ, RTHPI, RZI,
    -     * RZR, R1, S, SMUI, SMUR, SPI, STI, STR, S1I, S1R, S2I, S2R, TM,
    -     * TOL, TTH, T1, T2, YI, YR, ZI, ZR, DGAMLN, D1MACH, AZABS, ELM,
    -     * CELMR, ZDR, ZDI, AS, ALAS, HELIM, CYR, CYI
    -      INTEGER I, IFLAG, INU, K, KFLAG, KK, KMAX, KODE, KODED, N, NZ,
    -     * IDUM, I1MACH, J, IC, INUB, NW
    -      DIMENSION YR(N), YI(N), CC(8), CSSR(3), CSRR(3), BRY(3), CYR(2),
    -     * CYI(2)
    -C     COMPLEX Z,Y,A,B,RZ,SMU,FU,FMU,F,FLRZ,CZ,S1,S2,CSH,CCH
    -C     COMPLEX CK,P,Q,COEF,P1,P2,CBK,PT,CZERO,CONE,CTWO,ST,EZ,CS,DK
    -C
    -      DATA KMAX / 30 /
    -      DATA CZEROR,CZEROI,CONER,CONEI,CTWOR,R1/
    -     1  0.0D0 , 0.0D0 , 1.0D0 , 0.0D0 , 2.0D0 , 2.0D0 /
    -      DATA DPI, RTHPI, SPI ,HPI, FPI, TTH /
    -     1     3.14159265358979324D0,       1.25331413731550025D0,
    -     2     1.90985931710274403D0,       1.57079632679489662D0,
    -     3     1.89769999331517738D0,       6.66666666666666666D-01/
    -      DATA CC(1), CC(2), CC(3), CC(4), CC(5), CC(6), CC(7), CC(8)/
    -     1     5.77215664901532861D-01,    -4.20026350340952355D-02,
    -     2    -4.21977345555443367D-02,     7.21894324666309954D-03,
    -     3    -2.15241674114950973D-04,    -2.01348547807882387D-05,
    -     4     1.13302723198169588D-06,     6.11609510448141582D-09/
    -C
    -      CAZ = AZABS(ZR,ZI)
    -      CSCLR = 1.0D0/TOL
    -      CRSCR = TOL
    -      CSSR(1) = CSCLR
    -      CSSR(2) = 1.0D0
    -      CSSR(3) = CRSCR
    -      CSRR(1) = CRSCR
    -      CSRR(2) = 1.0D0
    -      CSRR(3) = CSCLR
    -      BRY(1) = 1.0D+3*D1MACH(1)/TOL
    -      BRY(2) = 1.0D0/BRY(1)
    -      BRY(3) = D1MACH(2)
    -      NZ = 0
    -      IFLAG = 0
    -      KODED = KODE
    -      RCAZ = 1.0D0/CAZ
    -      STR = ZR*RCAZ
    -      STI = -ZI*RCAZ
    -      RZR = (STR+STR)*RCAZ
    -      RZI = (STI+STI)*RCAZ
    -      INU = INT(SNGL(FNU+0.5D0))
    -      DNU = FNU - DBLE(FLOAT(INU))
    -      IF (DABS(DNU).EQ.0.5D0) GO TO 110
    -      DNU2 = 0.0D0
    -      IF (DABS(DNU).GT.TOL) DNU2 = DNU*DNU
    -      IF (CAZ.GT.R1) GO TO 110
    -C-----------------------------------------------------------------------
    -C     SERIES FOR CABS(Z).LE.R1
    -C-----------------------------------------------------------------------
    -      FC = 1.0D0
    -      CALL AZLOG(RZR, RZI, SMUR, SMUI, IDUM)
    -      FMUR = SMUR*DNU
    -      FMUI = SMUI*DNU
    -      CALL ZSHCH(FMUR, FMUI, CSHR, CSHI, CCHR, CCHI)
    -      IF (DNU.EQ.0.0D0) GO TO 10
    -      FC = DNU*DPI
    -      FC = FC/DSIN(FC)
    -      SMUR = CSHR/DNU
    -      SMUI = CSHI/DNU
    -   10 CONTINUE
    -      A2 = 1.0D0 + DNU
    -C-----------------------------------------------------------------------
    -C     GAM(1-Z)*GAM(1+Z)=PI*Z/SIN(PI*Z), T1=1/GAM(1-DNU), T2=1/GAM(1+DNU)
    -C-----------------------------------------------------------------------
    -      T2 = DEXP(-DGAMLN(A2,IDUM))
    -      T1 = 1.0D0/(T2*FC)
    -      IF (DABS(DNU).GT.0.1D0) GO TO 40
    -C-----------------------------------------------------------------------
    -C     SERIES FOR F0 TO RESOLVE INDETERMINACY FOR SMALL ABS(DNU)
    -C-----------------------------------------------------------------------
    -      AK = 1.0D0
    -      S = CC(1)
    -      DO 20 K=2,8
    -        AK = AK*DNU2
    -        TM = CC(K)*AK
    -        S = S + TM
    -        IF (DABS(TM).LT.TOL) GO TO 30
    -   20 CONTINUE
    -   30 G1 = -S
    -      GO TO 50
    -   40 CONTINUE
    -      G1 = (T1-T2)/(DNU+DNU)
    -   50 CONTINUE
    -      G2 = (T1+T2)*0.5D0
    -      FR = FC*(CCHR*G1+SMUR*G2)
    -      FI = FC*(CCHI*G1+SMUI*G2)
    -      CALL AZEXP(FMUR, FMUI, STR, STI)
    -      PR = 0.5D0*STR/T2
    -      PI = 0.5D0*STI/T2
    -      CALL ZDIV(0.5D0, 0.0D0, STR, STI, PTR, PTI)
    -      QR = PTR/T1
    -      QI = PTI/T1
    -      S1R = FR
    -      S1I = FI
    -      S2R = PR
    -      S2I = PI
    -      AK = 1.0D0
    -      A1 = 1.0D0
    -      CKR = CONER
    -      CKI = CONEI
    -      BK = 1.0D0 - DNU2
    -      IF (INU.GT.0 .OR. N.GT.1) GO TO 80
    -C-----------------------------------------------------------------------
    -C     GENERATE K(FNU,Z), 0.0D0 .LE. FNU .LT. 0.5D0 AND N=1
    -C-----------------------------------------------------------------------
    -      IF (CAZ.LT.TOL) GO TO 70
    -      CALL ZMLT(ZR, ZI, ZR, ZI, CZR, CZI)
    -      CZR = 0.25D0*CZR
    -      CZI = 0.25D0*CZI
    -      T1 = 0.25D0*CAZ*CAZ
    -   60 CONTINUE
    -      FR = (FR*AK+PR+QR)/BK
    -      FI = (FI*AK+PI+QI)/BK
    -      STR = 1.0D0/(AK-DNU)
    -      PR = PR*STR
    -      PI = PI*STR
    -      STR = 1.0D0/(AK+DNU)
    -      QR = QR*STR
    -      QI = QI*STR
    -      STR = CKR*CZR - CKI*CZI
    -      RAK = 1.0D0/AK
    -      CKI = (CKR*CZI+CKI*CZR)*RAK
    -      CKR = STR*RAK
    -      S1R = CKR*FR - CKI*FI + S1R
    -      S1I = CKR*FI + CKI*FR + S1I
    -      A1 = A1*T1*RAK
    -      BK = BK + AK + AK + 1.0D0
    -      AK = AK + 1.0D0
    -      IF (A1.GT.TOL) GO TO 60
    -   70 CONTINUE
    -      YR(1) = S1R
    -      YI(1) = S1I
    -      IF (KODED.EQ.1) RETURN
    -      CALL AZEXP(ZR, ZI, STR, STI)
    -      CALL ZMLT(S1R, S1I, STR, STI, YR(1), YI(1))
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     GENERATE K(DNU,Z) AND K(DNU+1,Z) FOR FORWARD RECURRENCE
    -C-----------------------------------------------------------------------
    -   80 CONTINUE
    -      IF (CAZ.LT.TOL) GO TO 100
    -      CALL ZMLT(ZR, ZI, ZR, ZI, CZR, CZI)
    -      CZR = 0.25D0*CZR
    -      CZI = 0.25D0*CZI
    -      T1 = 0.25D0*CAZ*CAZ
    -   90 CONTINUE
    -      FR = (FR*AK+PR+QR)/BK
    -      FI = (FI*AK+PI+QI)/BK
    -      STR = 1.0D0/(AK-DNU)
    -      PR = PR*STR
    -      PI = PI*STR
    -      STR = 1.0D0/(AK+DNU)
    -      QR = QR*STR
    -      QI = QI*STR
    -      STR = CKR*CZR - CKI*CZI
    -      RAK = 1.0D0/AK
    -      CKI = (CKR*CZI+CKI*CZR)*RAK
    -      CKR = STR*RAK
    -      S1R = CKR*FR - CKI*FI + S1R
    -      S1I = CKR*FI + CKI*FR + S1I
    -      STR = PR - FR*AK
    -      STI = PI - FI*AK
    -      S2R = CKR*STR - CKI*STI + S2R
    -      S2I = CKR*STI + CKI*STR + S2I
    -      A1 = A1*T1*RAK
    -      BK = BK + AK + AK + 1.0D0
    -      AK = AK + 1.0D0
    -      IF (A1.GT.TOL) GO TO 90
    -  100 CONTINUE
    -      KFLAG = 2
    -      A1 = FNU + 1.0D0
    -      AK = A1*DABS(SMUR)
    -      IF (AK.GT.ALIM) KFLAG = 3
    -      STR = CSSR(KFLAG)
    -      P2R = S2R*STR
    -      P2I = S2I*STR
    -      CALL ZMLT(P2R, P2I, RZR, RZI, S2R, S2I)
    -      S1R = S1R*STR
    -      S1I = S1I*STR
    -      IF (KODED.EQ.1) GO TO 210
    -      CALL AZEXP(ZR, ZI, FR, FI)
    -      CALL ZMLT(S1R, S1I, FR, FI, S1R, S1I)
    -      CALL ZMLT(S2R, S2I, FR, FI, S2R, S2I)
    -      GO TO 210
    -C-----------------------------------------------------------------------
    -C     IFLAG=0 MEANS NO UNDERFLOW OCCURRED
    -C     IFLAG=1 MEANS AN UNDERFLOW OCCURRED- COMPUTATION PROCEEDS WITH
    -C     KODED=2 AND A TEST FOR ON SCALE VALUES IS MADE DURING FORWARD
    -C     RECURSION
    -C-----------------------------------------------------------------------
    -  110 CONTINUE
    -      CALL AZSQRT(ZR, ZI, STR, STI)
    -      CALL ZDIV(RTHPI, CZEROI, STR, STI, COEFR, COEFI)
    -      KFLAG = 2
    -      IF (KODED.EQ.2) GO TO 120
    -      IF (ZR.GT.ALIM) GO TO 290
    -C     BLANK LINE
    -      STR = DEXP(-ZR)*CSSR(KFLAG)
    -      STI = -STR*DSIN(ZI)
    -      STR = STR*DCOS(ZI)
    -      CALL ZMLT(COEFR, COEFI, STR, STI, COEFR, COEFI)
    -  120 CONTINUE
    -      IF (DABS(DNU).EQ.0.5D0) GO TO 300
    -C-----------------------------------------------------------------------
    -C     MILLER ALGORITHM FOR CABS(Z).GT.R1
    -C-----------------------------------------------------------------------
    -      AK = DCOS(DPI*DNU)
    -      AK = DABS(AK)
    -      IF (AK.EQ.CZEROR) GO TO 300
    -      FHS = DABS(0.25D0-DNU2)
    -      IF (FHS.EQ.CZEROR) GO TO 300
    -C-----------------------------------------------------------------------
    -C     COMPUTE R2=F(E). IF CABS(Z).GE.R2, USE FORWARD RECURRENCE TO
    -C     DETERMINE THE BACKWARD INDEX K. R2=F(E) IS A STRAIGHT LINE ON
    -C     12.LE.E.LE.60. E IS COMPUTED FROM 2**(-E)=B**(1-I1MACH(14))=
    -C     TOL WHERE B IS THE BASE OF THE ARITHMETIC.
    -C-----------------------------------------------------------------------
    -      T1 = DBLE(FLOAT(I1MACH(14)-1))
    -      T1 = T1*D1MACH(5)*3.321928094D0
    -      T1 = DMAX1(T1,12.0D0)
    -      T1 = DMIN1(T1,60.0D0)
    -      T2 = TTH*T1 - 6.0D0
    -      IF (ZR.NE.0.0D0) GO TO 130
    -      T1 = HPI
    -      GO TO 140
    -  130 CONTINUE
    -      T1 = DATAN(ZI/ZR)
    -      T1 = DABS(T1)
    -  140 CONTINUE
    -      IF (T2.GT.CAZ) GO TO 170
    -C-----------------------------------------------------------------------
    -C     FORWARD RECURRENCE LOOP WHEN CABS(Z).GE.R2
    -C-----------------------------------------------------------------------
    -      ETEST = AK/(DPI*CAZ*TOL)
    -      FK = CONER
    -      IF (ETEST.LT.CONER) GO TO 180
    -      FKS = CTWOR
    -      CKR = CAZ + CAZ + CTWOR
    -      P1R = CZEROR
    -      P2R = CONER
    -      DO 150 I=1,KMAX
    -        AK = FHS/FKS
    -        CBR = CKR/(FK+CONER)
    -        PTR = P2R
    -        P2R = CBR*P2R - P1R*AK
    -        P1R = PTR
    -        CKR = CKR + CTWOR
    -        FKS = FKS + FK + FK + CTWOR
    -        FHS = FHS + FK + FK
    -        FK = FK + CONER
    -        STR = DABS(P2R)*FK
    -        IF (ETEST.LT.STR) GO TO 160
    -  150 CONTINUE
    -      GO TO 310
    -  160 CONTINUE
    -      FK = FK + SPI*T1*DSQRT(T2/CAZ)
    -      FHS = DABS(0.25D0-DNU2)
    -      GO TO 180
    -  170 CONTINUE
    -C-----------------------------------------------------------------------
    -C     COMPUTE BACKWARD INDEX K FOR CABS(Z).LT.R2
    -C-----------------------------------------------------------------------
    -      A2 = DSQRT(CAZ)
    -      AK = FPI*AK/(TOL*DSQRT(A2))
    -      AA = 3.0D0*T1/(1.0D0+CAZ)
    -      BB = 14.7D0*T1/(28.0D0+CAZ)
    -      AK = (DLOG(AK)+CAZ*DCOS(AA)/(1.0D0+0.008D0*CAZ))/DCOS(BB)
    -      FK = 0.12125D0*AK*AK/CAZ + 1.5D0
    -  180 CONTINUE
    -C-----------------------------------------------------------------------
    -C     BACKWARD RECURRENCE LOOP FOR MILLER ALGORITHM
    -C-----------------------------------------------------------------------
    -      K = INT(SNGL(FK))
    -      FK = DBLE(FLOAT(K))
    -      FKS = FK*FK
    -      P1R = CZEROR
    -      P1I = CZEROI
    -      P2R = TOL
    -      P2I = CZEROI
    -      CSR = P2R
    -      CSI = P2I
    -      DO 190 I=1,K
    -        A1 = FKS - FK
    -        AK = (FKS+FK)/(A1+FHS)
    -        RAK = 2.0D0/(FK+CONER)
    -        CBR = (FK+ZR)*RAK
    -        CBI = ZI*RAK
    -        PTR = P2R
    -        PTI = P2I
    -        P2R = (PTR*CBR-PTI*CBI-P1R)*AK
    -        P2I = (PTI*CBR+PTR*CBI-P1I)*AK
    -        P1R = PTR
    -        P1I = PTI
    -        CSR = CSR + P2R
    -        CSI = CSI + P2I
    -        FKS = A1 - FK + CONER
    -        FK = FK - CONER
    -  190 CONTINUE
    -C-----------------------------------------------------------------------
    -C     COMPUTE (P2/CS)=(P2/CABS(CS))*(CONJG(CS)/CABS(CS)) FOR BETTER
    -C     SCALING
    -C-----------------------------------------------------------------------
    -      TM = AZABS(CSR,CSI)
    -      PTR = 1.0D0/TM
    -      S1R = P2R*PTR
    -      S1I = P2I*PTR
    -      CSR = CSR*PTR
    -      CSI = -CSI*PTR
    -      CALL ZMLT(COEFR, COEFI, S1R, S1I, STR, STI)
    -      CALL ZMLT(STR, STI, CSR, CSI, S1R, S1I)
    -      IF (INU.GT.0 .OR. N.GT.1) GO TO 200
    -      ZDR = ZR
    -      ZDI = ZI
    -      IF(IFLAG.EQ.1) GO TO 270
    -      GO TO 240
    -  200 CONTINUE
    -C-----------------------------------------------------------------------
    -C     COMPUTE P1/P2=(P1/CABS(P2)*CONJG(P2)/CABS(P2) FOR SCALING
    -C-----------------------------------------------------------------------
    -      TM = AZABS(P2R,P2I)
    -      PTR = 1.0D0/TM
    -      P1R = P1R*PTR
    -      P1I = P1I*PTR
    -      P2R = P2R*PTR
    -      P2I = -P2I*PTR
    -      CALL ZMLT(P1R, P1I, P2R, P2I, PTR, PTI)
    -      STR = DNU + 0.5D0 - PTR
    -      STI = -PTI
    -      CALL ZDIV(STR, STI, ZR, ZI, STR, STI)
    -      STR = STR + 1.0D0
    -      CALL ZMLT(STR, STI, S1R, S1I, S2R, S2I)
    -C-----------------------------------------------------------------------
    -C     FORWARD RECURSION ON THE THREE TERM RECURSION WITH RELATION WITH
    -C     SCALING NEAR EXPONENT EXTREMES ON KFLAG=1 OR KFLAG=3
    -C-----------------------------------------------------------------------
    -  210 CONTINUE
    -      STR = DNU + 1.0D0
    -      CKR = STR*RZR
    -      CKI = STR*RZI
    -      IF (N.EQ.1) INU = INU - 1
    -      IF (INU.GT.0) GO TO 220
    -      IF (N.GT.1) GO TO 215
    -      S1R = S2R
    -      S1I = S2I
    -  215 CONTINUE
    -      ZDR = ZR
    -      ZDI = ZI
    -      IF(IFLAG.EQ.1) GO TO 270
    -      GO TO 240
    -  220 CONTINUE
    -      INUB = 1
    -      IF(IFLAG.EQ.1) GO TO 261
    -  225 CONTINUE
    -      P1R = CSRR(KFLAG)
    -      ASCLE = BRY(KFLAG)
    -      DO 230 I=INUB,INU
    -        STR = S2R
    -        STI = S2I
    -        S2R = CKR*STR - CKI*STI + S1R
    -        S2I = CKR*STI + CKI*STR + S1I
    -        S1R = STR
    -        S1I = STI
    -        CKR = CKR + RZR
    -        CKI = CKI + RZI
    -        IF (KFLAG.GE.3) GO TO 230
    -        P2R = S2R*P1R
    -        P2I = S2I*P1R
    -        STR = DABS(P2R)
    -        STI = DABS(P2I)
    -        P2M = DMAX1(STR,STI)
    -        IF (P2M.LE.ASCLE) GO TO 230
    -        KFLAG = KFLAG + 1
    -        ASCLE = BRY(KFLAG)
    -        S1R = S1R*P1R
    -        S1I = S1I*P1R
    -        S2R = P2R
    -        S2I = P2I
    -        STR = CSSR(KFLAG)
    -        S1R = S1R*STR
    -        S1I = S1I*STR
    -        S2R = S2R*STR
    -        S2I = S2I*STR
    -        P1R = CSRR(KFLAG)
    -  230 CONTINUE
    -      IF (N.NE.1) GO TO 240
    -      S1R = S2R
    -      S1I = S2I
    -  240 CONTINUE
    -      STR = CSRR(KFLAG)
    -      YR(1) = S1R*STR
    -      YI(1) = S1I*STR
    -      IF (N.EQ.1) RETURN
    -      YR(2) = S2R*STR
    -      YI(2) = S2I*STR
    -      IF (N.EQ.2) RETURN
    -      KK = 2
    -  250 CONTINUE
    -      KK = KK + 1
    -      IF (KK.GT.N) RETURN
    -      P1R = CSRR(KFLAG)
    -      ASCLE = BRY(KFLAG)
    -      DO 260 I=KK,N
    -        P2R = S2R
    -        P2I = S2I
    -        S2R = CKR*P2R - CKI*P2I + S1R
    -        S2I = CKI*P2R + CKR*P2I + S1I
    -        S1R = P2R
    -        S1I = P2I
    -        CKR = CKR + RZR
    -        CKI = CKI + RZI
    -        P2R = S2R*P1R
    -        P2I = S2I*P1R
    -        YR(I) = P2R
    -        YI(I) = P2I
    -        IF (KFLAG.GE.3) GO TO 260
    -        STR = DABS(P2R)
    -        STI = DABS(P2I)
    -        P2M = DMAX1(STR,STI)
    -        IF (P2M.LE.ASCLE) GO TO 260
    -        KFLAG = KFLAG + 1
    -        ASCLE = BRY(KFLAG)
    -        S1R = S1R*P1R
    -        S1I = S1I*P1R
    -        S2R = P2R
    -        S2I = P2I
    -        STR = CSSR(KFLAG)
    -        S1R = S1R*STR
    -        S1I = S1I*STR
    -        S2R = S2R*STR
    -        S2I = S2I*STR
    -        P1R = CSRR(KFLAG)
    -  260 CONTINUE
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     IFLAG=1 CASES, FORWARD RECURRENCE ON SCALED VALUES ON UNDERFLOW
    -C-----------------------------------------------------------------------
    -  261 CONTINUE
    -      HELIM = 0.5D0*ELIM
    -      ELM = DEXP(-ELIM)
    -      CELMR = ELM
    -      ASCLE = BRY(1)
    -      ZDR = ZR
    -      ZDI = ZI
    -      IC = -1
    -      J = 2
    -      DO 262 I=1,INU
    -        STR = S2R
    -        STI = S2I
    -        S2R = STR*CKR-STI*CKI+S1R
    -        S2I = STI*CKR+STR*CKI+S1I
    -        S1R = STR
    -        S1I = STI
    -        CKR = CKR+RZR
    -        CKI = CKI+RZI
    -        AS = AZABS(S2R,S2I)
    -        ALAS = DLOG(AS)
    -        P2R = -ZDR+ALAS
    -        IF(P2R.LT.(-ELIM)) GO TO 263
    -        CALL AZLOG(S2R,S2I,STR,STI,IDUM)
    -        P2R = -ZDR+STR
    -        P2I = -ZDI+STI
    -        P2M = DEXP(P2R)/TOL
    -        P1R = P2M*DCOS(P2I)
    -        P1I = P2M*DSIN(P2I)
    -        CALL ZUCHK(P1R,P1I,NW,ASCLE,TOL)
    -        IF(NW.NE.0) GO TO 263
    -        J = 3 - J
    -        CYR(J) = P1R
    -        CYI(J) = P1I
    -        IF(IC.EQ.(I-1)) GO TO 264
    -        IC = I
    -        GO TO 262
    -  263   CONTINUE
    -        IF(ALAS.LT.HELIM) GO TO 262
    -        ZDR = ZDR-ELIM
    -        S1R = S1R*CELMR
    -        S1I = S1I*CELMR
    -        S2R = S2R*CELMR
    -        S2I = S2I*CELMR
    -  262 CONTINUE
    -      IF(N.NE.1) GO TO 270
    -      S1R = S2R
    -      S1I = S2I
    -      GO TO 270
    -  264 CONTINUE
    -      KFLAG = 1
    -      INUB = I+1
    -      S2R = CYR(J)
    -      S2I = CYI(J)
    -      J = 3 - J
    -      S1R = CYR(J)
    -      S1I = CYI(J)
    -      IF(INUB.LE.INU) GO TO 225
    -      IF(N.NE.1) GO TO 240
    -      S1R = S2R
    -      S1I = S2I
    -      GO TO 240
    -  270 CONTINUE
    -      YR(1) = S1R
    -      YI(1) = S1I
    -      IF(N.EQ.1) GO TO 280
    -      YR(2) = S2R
    -      YI(2) = S2I
    -  280 CONTINUE
    -      ASCLE = BRY(1)
    -      CALL ZKSCL(ZDR,ZDI,FNU,N,YR,YI,NZ,RZR,RZI,ASCLE,TOL,ELIM)
    -      INU = N - NZ
    -      IF (INU.LE.0) RETURN
    -      KK = NZ + 1
    -      S1R = YR(KK)
    -      S1I = YI(KK)
    -      YR(KK) = S1R*CSRR(1)
    -      YI(KK) = S1I*CSRR(1)
    -      IF (INU.EQ.1) RETURN
    -      KK = NZ + 2
    -      S2R = YR(KK)
    -      S2I = YI(KK)
    -      YR(KK) = S2R*CSRR(1)
    -      YI(KK) = S2I*CSRR(1)
    -      IF (INU.EQ.2) RETURN
    -      T2 = FNU + DBLE(FLOAT(KK-1))
    -      CKR = T2*RZR
    -      CKI = T2*RZI
    -      KFLAG = 1
    -      GO TO 250
    -  290 CONTINUE
    -C-----------------------------------------------------------------------
    -C     SCALE BY DEXP(Z), IFLAG = 1 CASES
    -C-----------------------------------------------------------------------
    -      KODED = 2
    -      IFLAG = 1
    -      KFLAG = 2
    -      GO TO 120
    -C-----------------------------------------------------------------------
    -C     FNU=HALF ODD INTEGER CASE, DNU=-0.5
    -C-----------------------------------------------------------------------
    -  300 CONTINUE
    -      S1R = COEFR
    -      S1I = COEFI
    -      S2R = COEFR
    -      S2I = COEFI
    -      GO TO 210
    -C
    -C
    -  310 CONTINUE
    -      NZ=-2
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbuni.f b/scipy-0.10.1/scipy/special/amos/zbuni.f
    deleted file mode 100644
    index 965eddf7ed..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbuni.f
    +++ /dev/null
    @@ -1,174 +0,0 @@
    -      SUBROUTINE ZBUNI(ZR, ZI, FNU, KODE, N, YR, YI, NZ, NUI, NLAST,
    -     * FNUL, TOL, ELIM, ALIM)
    -C***BEGIN PROLOGUE  ZBUNI
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C     ZBUNI COMPUTES THE I BESSEL FUNCTION FOR LARGE CABS(Z).GT.
    -C     FNUL AND FNU+N-1.LT.FNUL. THE ORDER IS INCREASED FROM
    -C     FNU+N-1 GREATER THAN FNUL BY ADDING NUI AND COMPUTING
    -C     ACCORDING TO THE UNIFORM ASYMPTOTIC EXPANSION FOR I(FNU,Z)
    -C     ON IFORM=1 AND THE EXPANSION FOR J(FNU,Z) ON IFORM=2
    -C
    -C***ROUTINES CALLED  ZUNI1,ZUNI2,AZABS,D1MACH
    -C***END PROLOGUE  ZBUNI
    -C     COMPLEX CSCL,CSCR,CY,RZ,ST,S1,S2,Y,Z
    -      DOUBLE PRECISION ALIM, AX, AY, CSCLR, CSCRR, CYI, CYR, DFNU,
    -     * ELIM, FNU, FNUI, FNUL, GNU, RAZ, RZI, RZR, STI, STR, S1I, S1R,
    -     * S2I, S2R, TOL, YI, YR, ZI, ZR, AZABS, ASCLE, BRY, C1R, C1I, C1M,
    -     * D1MACH
    -      INTEGER I, IFLAG, IFORM, K, KODE, N, NL, NLAST, NUI, NW, NZ
    -      DIMENSION YR(N), YI(N), CYR(2), CYI(2), BRY(3)
    -      NZ = 0
    -      AX = DABS(ZR)*1.7321D0
    -      AY = DABS(ZI)
    -      IFORM = 1
    -      IF (AY.GT.AX) IFORM = 2
    -      IF (NUI.EQ.0) GO TO 60
    -      FNUI = DBLE(FLOAT(NUI))
    -      DFNU = FNU + DBLE(FLOAT(N-1))
    -      GNU = DFNU + FNUI
    -      IF (IFORM.EQ.2) GO TO 10
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR I(FNU,Z) FOR LARGE FNU APPLIED IN
    -C     -PI/3.LE.ARG(Z).LE.PI/3
    -C-----------------------------------------------------------------------
    -      CALL ZUNI1(ZR, ZI, GNU, KODE, 2, CYR, CYI, NW, NLAST, FNUL, TOL,
    -     * ELIM, ALIM)
    -      GO TO 20
    -   10 CONTINUE
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR J(FNU,Z*EXP(M*HPI)) FOR LARGE FNU
    -C     APPLIED IN PI/3.LT.ABS(ARG(Z)).LE.PI/2 WHERE M=+I OR -I
    -C     AND HPI=PI/2
    -C-----------------------------------------------------------------------
    -      CALL ZUNI2(ZR, ZI, GNU, KODE, 2, CYR, CYI, NW, NLAST, FNUL, TOL,
    -     * ELIM, ALIM)
    -   20 CONTINUE
    -      IF (NW.LT.0) GO TO 50
    -      IF (NW.NE.0) GO TO 90
    -      STR = AZABS(CYR(1),CYI(1))
    -C----------------------------------------------------------------------
    -C     SCALE BACKWARD RECURRENCE, BRY(3) IS DEFINED BUT NEVER USED
    -C----------------------------------------------------------------------
    -      BRY(1)=1.0D+3*D1MACH(1)/TOL
    -      BRY(2) = 1.0D0/BRY(1)
    -      BRY(3) = BRY(2)
    -      IFLAG = 2
    -      ASCLE = BRY(2)
    -      CSCLR = 1.0D0
    -      IF (STR.GT.BRY(1)) GO TO 21
    -      IFLAG = 1
    -      ASCLE = BRY(1)
    -      CSCLR = 1.0D0/TOL
    -      GO TO 25
    -   21 CONTINUE
    -      IF (STR.LT.BRY(2)) GO TO 25
    -      IFLAG = 3
    -      ASCLE=BRY(3)
    -      CSCLR = TOL
    -   25 CONTINUE
    -      CSCRR = 1.0D0/CSCLR
    -      S1R = CYR(2)*CSCLR
    -      S1I = CYI(2)*CSCLR
    -      S2R = CYR(1)*CSCLR
    -      S2I = CYI(1)*CSCLR
    -      RAZ = 1.0D0/AZABS(ZR,ZI)
    -      STR = ZR*RAZ
    -      STI = -ZI*RAZ
    -      RZR = (STR+STR)*RAZ
    -      RZI = (STI+STI)*RAZ
    -      DO 30 I=1,NUI
    -        STR = S2R
    -        STI = S2I
    -        S2R = (DFNU+FNUI)*(RZR*STR-RZI*STI) + S1R
    -        S2I = (DFNU+FNUI)*(RZR*STI+RZI*STR) + S1I
    -        S1R = STR
    -        S1I = STI
    -        FNUI = FNUI - 1.0D0
    -        IF (IFLAG.GE.3) GO TO 30
    -        STR = S2R*CSCRR
    -        STI = S2I*CSCRR
    -        C1R = DABS(STR)
    -        C1I = DABS(STI)
    -        C1M = DMAX1(C1R,C1I)
    -        IF (C1M.LE.ASCLE) GO TO 30
    -        IFLAG = IFLAG+1
    -        ASCLE = BRY(IFLAG)
    -        S1R = S1R*CSCRR
    -        S1I = S1I*CSCRR
    -        S2R = STR
    -        S2I = STI
    -        CSCLR = CSCLR*TOL
    -        CSCRR = 1.0D0/CSCLR
    -        S1R = S1R*CSCLR
    -        S1I = S1I*CSCLR
    -        S2R = S2R*CSCLR
    -        S2I = S2I*CSCLR
    -   30 CONTINUE
    -      YR(N) = S2R*CSCRR
    -      YI(N) = S2I*CSCRR
    -      IF (N.EQ.1) RETURN
    -      NL = N - 1
    -      FNUI = DBLE(FLOAT(NL))
    -      K = NL
    -      DO 40 I=1,NL
    -        STR = S2R
    -        STI = S2I
    -        S2R = (FNU+FNUI)*(RZR*STR-RZI*STI) + S1R
    -        S2I = (FNU+FNUI)*(RZR*STI+RZI*STR) + S1I
    -        S1R = STR
    -        S1I = STI
    -        STR = S2R*CSCRR
    -        STI = S2I*CSCRR
    -        YR(K) = STR
    -        YI(K) = STI
    -        FNUI = FNUI - 1.0D0
    -        K = K - 1
    -        IF (IFLAG.GE.3) GO TO 40
    -        C1R = DABS(STR)
    -        C1I = DABS(STI)
    -        C1M = DMAX1(C1R,C1I)
    -        IF (C1M.LE.ASCLE) GO TO 40
    -        IFLAG = IFLAG+1
    -        ASCLE = BRY(IFLAG)
    -        S1R = S1R*CSCRR
    -        S1I = S1I*CSCRR
    -        S2R = STR
    -        S2I = STI
    -        CSCLR = CSCLR*TOL
    -        CSCRR = 1.0D0/CSCLR
    -        S1R = S1R*CSCLR
    -        S1I = S1I*CSCLR
    -        S2R = S2R*CSCLR
    -        S2I = S2I*CSCLR
    -   40 CONTINUE
    -      RETURN
    -   50 CONTINUE
    -      NZ = -1
    -      IF(NW.EQ.(-2)) NZ=-2
    -      RETURN
    -   60 CONTINUE
    -      IF (IFORM.EQ.2) GO TO 70
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR I(FNU,Z) FOR LARGE FNU APPLIED IN
    -C     -PI/3.LE.ARG(Z).LE.PI/3
    -C-----------------------------------------------------------------------
    -      CALL ZUNI1(ZR, ZI, FNU, KODE, N, YR, YI, NW, NLAST, FNUL, TOL,
    -     * ELIM, ALIM)
    -      GO TO 80
    -   70 CONTINUE
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR J(FNU,Z*EXP(M*HPI)) FOR LARGE FNU
    -C     APPLIED IN PI/3.LT.ABS(ARG(Z)).LE.PI/2 WHERE M=+I OR -I
    -C     AND HPI=PI/2
    -C-----------------------------------------------------------------------
    -      CALL ZUNI2(ZR, ZI, FNU, KODE, N, YR, YI, NW, NLAST, FNUL, TOL,
    -     * ELIM, ALIM)
    -   80 CONTINUE
    -      IF (NW.LT.0) GO TO 50
    -      NZ = NW
    -      RETURN
    -   90 CONTINUE
    -      NLAST = N
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zbunk.f b/scipy-0.10.1/scipy/special/amos/zbunk.f
    deleted file mode 100644
    index b20b79f304..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zbunk.f
    +++ /dev/null
    @@ -1,35 +0,0 @@
    -      SUBROUTINE ZBUNK(ZR, ZI, FNU, KODE, MR, N, YR, YI, NZ, TOL, ELIM,
    -     * ALIM)
    -C***BEGIN PROLOGUE  ZBUNK
    -C***REFER TO  ZBESK,ZBESH
    -C
    -C     ZBUNK COMPUTES THE K BESSEL FUNCTION FOR FNU.GT.FNUL.
    -C     ACCORDING TO THE UNIFORM ASYMPTOTIC EXPANSION FOR K(FNU,Z)
    -C     IN ZUNK1 AND THE EXPANSION FOR H(2,FNU,Z) IN ZUNK2
    -C
    -C***ROUTINES CALLED  ZUNK1,ZUNK2
    -C***END PROLOGUE  ZBUNK
    -C     COMPLEX Y,Z
    -      DOUBLE PRECISION ALIM, AX, AY, ELIM, FNU, TOL, YI, YR, ZI, ZR
    -      INTEGER KODE, MR, N, NZ
    -      DIMENSION YR(N), YI(N)
    -      NZ = 0
    -      AX = DABS(ZR)*1.7321D0
    -      AY = DABS(ZI)
    -      IF (AY.GT.AX) GO TO 10
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR K(FNU,Z) FOR LARGE FNU APPLIED IN
    -C     -PI/3.LE.ARG(Z).LE.PI/3
    -C-----------------------------------------------------------------------
    -      CALL ZUNK1(ZR, ZI, FNU, KODE, MR, N, YR, YI, NZ, TOL, ELIM, ALIM)
    -      GO TO 20
    -   10 CONTINUE
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR H(2,FNU,Z*EXP(M*HPI)) FOR LARGE FNU
    -C     APPLIED IN PI/3.LT.ABS(ARG(Z)).LE.PI/2 WHERE M=+I OR -I
    -C     AND HPI=PI/2
    -C-----------------------------------------------------------------------
    -      CALL ZUNK2(ZR, ZI, FNU, KODE, MR, N, YR, YI, NZ, TOL, ELIM, ALIM)
    -   20 CONTINUE
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zdiv.f b/scipy-0.10.1/scipy/special/amos/zdiv.f
    deleted file mode 100644
    index f897f4ebf4..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zdiv.f
    +++ /dev/null
    @@ -1,19 +0,0 @@
    -      SUBROUTINE ZDIV(AR, AI, BR, BI, CR, CI)
    -C***BEGIN PROLOGUE  ZDIV
    -C***REFER TO  ZBESH,ZBESI,ZBESJ,ZBESK,ZBESY,ZAIRY,ZBIRY
    -C
    -C     DOUBLE PRECISION COMPLEX DIVIDE C=A/B.
    -C
    -C***ROUTINES CALLED  AZABS
    -C***END PROLOGUE  ZDIV
    -      DOUBLE PRECISION AR, AI, BR, BI, CR, CI, BM, CA, CB, CC, CD
    -      DOUBLE PRECISION AZABS
    -      BM = 1.0D0/AZABS(BR,BI)
    -      CC = BR*BM
    -      CD = BI*BM
    -      CA = (AR*CC+AI*CD)*BM
    -      CB = (AI*CC-AR*CD)*BM
    -      CR = CA
    -      CI = CB
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zexp.f b/scipy-0.10.1/scipy/special/amos/zexp.f
    deleted file mode 100644
    index 8a34276226..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zexp.f
    +++ /dev/null
    @@ -1,16 +0,0 @@
    -      SUBROUTINE AZEXP(AR, AI, BR, BI)
    -C***BEGIN PROLOGUE  AZEXP
    -C***REFER TO  ZBESH,ZBESI,ZBESJ,ZBESK,ZBESY,ZAIRY,ZBIRY
    -C
    -C     DOUBLE PRECISION COMPLEX EXPONENTIAL FUNCTION B=EXP(A)
    -C
    -C***ROUTINES CALLED  (NONE)
    -C***END PROLOGUE  AZEXP
    -      DOUBLE PRECISION AR, AI, BR, BI, ZM, CA, CB
    -      ZM = DEXP(AR)
    -      CA = ZM*DCOS(AI)
    -      CB = ZM*DSIN(AI)
    -      BR = CA
    -      BI = CB
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zkscl.f b/scipy-0.10.1/scipy/special/amos/zkscl.f
    deleted file mode 100644
    index 90df795bcf..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zkscl.f
    +++ /dev/null
    @@ -1,121 +0,0 @@
    -      SUBROUTINE ZKSCL(ZRR,ZRI,FNU,N,YR,YI,NZ,RZR,RZI,ASCLE,TOL,ELIM)
    -C***BEGIN PROLOGUE  ZKSCL
    -C***REFER TO  ZBESK
    -C
    -C     SET K FUNCTIONS TO ZERO ON UNDERFLOW, CONTINUE RECURRENCE
    -C     ON SCALED FUNCTIONS UNTIL TWO MEMBERS COME ON SCALE, THEN
    -C     RETURN WITH MIN(NZ+2,N) VALUES SCALED BY 1/TOL.
    -C
    -C***ROUTINES CALLED  ZUCHK,AZABS,AZLOG
    -C***END PROLOGUE  ZKSCL
    -C     COMPLEX CK,CS,CY,CZERO,RZ,S1,S2,Y,ZR,ZD,CELM
    -      DOUBLE PRECISION ACS, AS, ASCLE, CKI, CKR, CSI, CSR, CYI,
    -     * CYR, ELIM, FN, FNU, RZI, RZR, STR, S1I, S1R, S2I,
    -     * S2R, TOL, YI, YR, ZEROI, ZEROR, ZRI, ZRR, AZABS,
    -     * ZDR, ZDI, CELMR, ELM, HELIM, ALAS
    -      INTEGER I, IC, IDUM, KK, N, NN, NW, NZ
    -      DIMENSION YR(N), YI(N), CYR(2), CYI(2)
    -      DATA ZEROR,ZEROI / 0.0D0 , 0.0D0 /
    -C
    -      NZ = 0
    -      IC = 0
    -      NN = MIN0(2,N)
    -      DO 10 I=1,NN
    -        S1R = YR(I)
    -        S1I = YI(I)
    -        CYR(I) = S1R
    -        CYI(I) = S1I
    -        AS = AZABS(S1R,S1I)
    -        ACS = -ZRR + DLOG(AS)
    -        NZ = NZ + 1
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -        IF (ACS.LT.(-ELIM)) GO TO 10
    -        CALL AZLOG(S1R, S1I, CSR, CSI, IDUM)
    -        CSR = CSR - ZRR
    -        CSI = CSI - ZRI
    -        STR = DEXP(CSR)/TOL
    -        CSR = STR*DCOS(CSI)
    -        CSI = STR*DSIN(CSI)
    -        CALL ZUCHK(CSR, CSI, NW, ASCLE, TOL)
    -        IF (NW.NE.0) GO TO 10
    -        YR(I) = CSR
    -        YI(I) = CSI
    -        IC = I
    -        NZ = NZ - 1
    -   10 CONTINUE
    -      IF (N.EQ.1) RETURN
    -      IF (IC.GT.1) GO TO 20
    -      YR(1) = ZEROR
    -      YI(1) = ZEROI
    -      NZ = 2
    -   20 CONTINUE
    -      IF (N.EQ.2) RETURN
    -      IF (NZ.EQ.0) RETURN
    -      FN = FNU + 1.0D0
    -      CKR = FN*RZR
    -      CKI = FN*RZI
    -      S1R = CYR(1)
    -      S1I = CYI(1)
    -      S2R = CYR(2)
    -      S2I = CYI(2)
    -      HELIM = 0.5D0*ELIM
    -      ELM = DEXP(-ELIM)
    -      CELMR = ELM
    -      ZDR = ZRR
    -      ZDI = ZRI
    -C
    -C     FIND TWO CONSECUTIVE Y VALUES ON SCALE. SCALE RECURRENCE IF
    -C     S2 GETS LARGER THAN EXP(ELIM/2)
    -C
    -      DO 30 I=3,N
    -        KK = I
    -        CSR = S2R
    -        CSI = S2I
    -        S2R = CKR*CSR - CKI*CSI + S1R
    -        S2I = CKI*CSR + CKR*CSI + S1I
    -        S1R = CSR
    -        S1I = CSI
    -        CKR = CKR + RZR
    -        CKI = CKI + RZI
    -        AS = AZABS(S2R,S2I)
    -        ALAS = DLOG(AS)
    -        ACS = -ZDR + ALAS
    -        NZ = NZ + 1
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -        IF (ACS.LT.(-ELIM)) GO TO 25
    -        CALL AZLOG(S2R, S2I, CSR, CSI, IDUM)
    -        CSR = CSR - ZDR
    -        CSI = CSI - ZDI
    -        STR = DEXP(CSR)/TOL
    -        CSR = STR*DCOS(CSI)
    -        CSI = STR*DSIN(CSI)
    -        CALL ZUCHK(CSR, CSI, NW, ASCLE, TOL)
    -        IF (NW.NE.0) GO TO 25
    -        YR(I) = CSR
    -        YI(I) = CSI
    -        NZ = NZ - 1
    -        IF (IC.EQ.KK-1) GO TO 40
    -        IC = KK
    -        GO TO 30
    -   25   CONTINUE
    -        IF(ALAS.LT.HELIM) GO TO 30
    -        ZDR = ZDR - ELIM
    -        S1R = S1R*CELMR
    -        S1I = S1I*CELMR
    -        S2R = S2R*CELMR
    -        S2I = S2I*CELMR
    -   30 CONTINUE
    -      NZ = N
    -      IF(IC.EQ.N) NZ=N-1
    -      GO TO 45
    -   40 CONTINUE
    -      NZ = KK - 2
    -   45 CONTINUE
    -      DO 50 I=1,NZ
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -   50 CONTINUE
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zlog.f b/scipy-0.10.1/scipy/special/amos/zlog.f
    deleted file mode 100644
    index 403d8be074..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zlog.f
    +++ /dev/null
    @@ -1,41 +0,0 @@
    -      SUBROUTINE AZLOG(AR, AI, BR, BI, IERR)
    -C***BEGIN PROLOGUE  AZLOG
    -C***REFER TO  ZBESH,ZBESI,ZBESJ,ZBESK,ZBESY,ZAIRY,ZBIRY
    -C
    -C     DOUBLE PRECISION COMPLEX LOGARITHM B=CLOG(A)
    -C     IERR=0,NORMAL RETURN      IERR=1, Z=CMPLX(0.0,0.0)
    -C***ROUTINES CALLED  AZABS
    -C***END PROLOGUE  AZLOG
    -      DOUBLE PRECISION AR, AI, BR, BI, ZM, DTHETA, DPI, DHPI
    -      DOUBLE PRECISION AZABS
    -      DATA DPI , DHPI  / 3.141592653589793238462643383D+0,
    -     1                   1.570796326794896619231321696D+0/
    -C
    -      IERR=0
    -      IF (AR.EQ.0.0D+0) GO TO 10
    -      IF (AI.EQ.0.0D+0) GO TO 20
    -      DTHETA = DATAN(AI/AR)
    -      IF (DTHETA.LE.0.0D+0) GO TO 40
    -      IF (AR.LT.0.0D+0) DTHETA = DTHETA - DPI
    -      GO TO 50
    -   10 IF (AI.EQ.0.0D+0) GO TO 60
    -      BI = DHPI
    -      BR = DLOG(DABS(AI))
    -      IF (AI.LT.0.0D+0) BI = -BI
    -      RETURN
    -   20 IF (AR.GT.0.0D+0) GO TO 30
    -      BR = DLOG(DABS(AR))
    -      BI = DPI
    -      RETURN
    -   30 BR = DLOG(AR)
    -      BI = 0.0D+0
    -      RETURN
    -   40 IF (AR.LT.0.0D+0) DTHETA = DTHETA + DPI
    -   50 ZM = AZABS(AR,AI)
    -      BR = DLOG(ZM)
    -      BI = DTHETA
    -      RETURN
    -   60 CONTINUE
    -      IERR=1
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zmlri.f b/scipy-0.10.1/scipy/special/amos/zmlri.f
    deleted file mode 100644
    index 5bc8d774fa..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zmlri.f
    +++ /dev/null
    @@ -1,204 +0,0 @@
    -      SUBROUTINE ZMLRI(ZR, ZI, FNU, KODE, N, YR, YI, NZ, TOL)
    -C***BEGIN PROLOGUE  ZMLRI
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C     ZMLRI COMPUTES THE I BESSEL FUNCTION FOR RE(Z).GE.0.0 BY THE
    -C     MILLER ALGORITHM NORMALIZED BY A NEUMANN SERIES.
    -C
    -C***ROUTINES CALLED  DGAMLN,D1MACH,AZABS,AZEXP,AZLOG,ZMLT
    -C***END PROLOGUE  ZMLRI
    -C     COMPLEX CK,CNORM,CONE,CTWO,CZERO,PT,P1,P2,RZ,SUM,Y,Z
    -      DOUBLE PRECISION ACK, AK, AP, AT, AZ, BK, CKI, CKR, CNORMI,
    -     * CNORMR, CONEI, CONER, FKAP, FKK, FLAM, FNF, FNU, PTI, PTR, P1I,
    -     * P1R, P2I, P2R, RAZ, RHO, RHO2, RZI, RZR, SCLE, STI, STR, SUMI,
    -     * SUMR, TFNF, TOL, TST, YI, YR, ZEROI, ZEROR, ZI, ZR, DGAMLN,
    -     * D1MACH, AZABS
    -      INTEGER I, IAZ, IDUM, IFNU, INU, ITIME, K, KK, KM, KODE, M, N, NZ
    -      DIMENSION YR(N), YI(N)
    -      DATA ZEROR,ZEROI,CONER,CONEI / 0.0D0, 0.0D0, 1.0D0, 0.0D0 /
    -      SCLE = D1MACH(1)/TOL
    -      NZ=0
    -      AZ = AZABS(ZR,ZI)
    -      IAZ = INT(SNGL(AZ))
    -      IFNU = INT(SNGL(FNU))
    -      INU = IFNU + N - 1
    -      AT = DBLE(FLOAT(IAZ)) + 1.0D0
    -      RAZ = 1.0D0/AZ
    -      STR = ZR*RAZ
    -      STI = -ZI*RAZ
    -      CKR = STR*AT*RAZ
    -      CKI = STI*AT*RAZ
    -      RZR = (STR+STR)*RAZ
    -      RZI = (STI+STI)*RAZ
    -      P1R = ZEROR
    -      P1I = ZEROI
    -      P2R = CONER
    -      P2I = CONEI
    -      ACK = (AT+1.0D0)*RAZ
    -      RHO = ACK + DSQRT(ACK*ACK-1.0D0)
    -      RHO2 = RHO*RHO
    -      TST = (RHO2+RHO2)/((RHO2-1.0D0)*(RHO-1.0D0))
    -      TST = TST/TOL
    -C-----------------------------------------------------------------------
    -C     COMPUTE RELATIVE TRUNCATION ERROR INDEX FOR SERIES
    -C-----------------------------------------------------------------------
    -      AK = AT
    -      DO 10 I=1,80
    -        PTR = P2R
    -        PTI = P2I
    -        P2R = P1R - (CKR*PTR-CKI*PTI)
    -        P2I = P1I - (CKI*PTR+CKR*PTI)
    -        P1R = PTR
    -        P1I = PTI
    -        CKR = CKR + RZR
    -        CKI = CKI + RZI
    -        AP = AZABS(P2R,P2I)
    -        IF (AP.GT.TST*AK*AK) GO TO 20
    -        AK = AK + 1.0D0
    -   10 CONTINUE
    -      GO TO 110
    -   20 CONTINUE
    -      I = I + 1
    -      K = 0
    -      IF (INU.LT.IAZ) GO TO 40
    -C-----------------------------------------------------------------------
    -C     COMPUTE RELATIVE TRUNCATION ERROR FOR RATIOS
    -C-----------------------------------------------------------------------
    -      P1R = ZEROR
    -      P1I = ZEROI
    -      P2R = CONER
    -      P2I = CONEI
    -      AT = DBLE(FLOAT(INU)) + 1.0D0
    -      STR = ZR*RAZ
    -      STI = -ZI*RAZ
    -      CKR = STR*AT*RAZ
    -      CKI = STI*AT*RAZ
    -      ACK = AT*RAZ
    -      TST = DSQRT(ACK/TOL)
    -      ITIME = 1
    -      DO 30 K=1,80
    -        PTR = P2R
    -        PTI = P2I
    -        P2R = P1R - (CKR*PTR-CKI*PTI)
    -        P2I = P1I - (CKR*PTI+CKI*PTR)
    -        P1R = PTR
    -        P1I = PTI
    -        CKR = CKR + RZR
    -        CKI = CKI + RZI
    -        AP = AZABS(P2R,P2I)
    -        IF (AP.LT.TST) GO TO 30
    -        IF (ITIME.EQ.2) GO TO 40
    -        ACK = AZABS(CKR,CKI)
    -        FLAM = ACK + DSQRT(ACK*ACK-1.0D0)
    -        FKAP = AP/AZABS(P1R,P1I)
    -        RHO = DMIN1(FLAM,FKAP)
    -        TST = TST*DSQRT(RHO/(RHO*RHO-1.0D0))
    -        ITIME = 2
    -   30 CONTINUE
    -      GO TO 110
    -   40 CONTINUE
    -C-----------------------------------------------------------------------
    -C     BACKWARD RECURRENCE AND SUM NORMALIZING RELATION
    -C-----------------------------------------------------------------------
    -      K = K + 1
    -      KK = MAX0(I+IAZ,K+INU)
    -      FKK = DBLE(FLOAT(KK))
    -      P1R = ZEROR
    -      P1I = ZEROI
    -C-----------------------------------------------------------------------
    -C     SCALE P2 AND SUM BY SCLE
    -C-----------------------------------------------------------------------
    -      P2R = SCLE
    -      P2I = ZEROI
    -      FNF = FNU - DBLE(FLOAT(IFNU))
    -      TFNF = FNF + FNF
    -      BK = DGAMLN(FKK+TFNF+1.0D0,IDUM) - DGAMLN(FKK+1.0D0,IDUM) -
    -     * DGAMLN(TFNF+1.0D0,IDUM)
    -      BK = DEXP(BK)
    -      SUMR = ZEROR
    -      SUMI = ZEROI
    -      KM = KK - INU
    -      DO 50 I=1,KM
    -        PTR = P2R
    -        PTI = P2I
    -        P2R = P1R + (FKK+FNF)*(RZR*PTR-RZI*PTI)
    -        P2I = P1I + (FKK+FNF)*(RZI*PTR+RZR*PTI)
    -        P1R = PTR
    -        P1I = PTI
    -        AK = 1.0D0 - TFNF/(FKK+TFNF)
    -        ACK = BK*AK
    -        SUMR = SUMR + (ACK+BK)*P1R
    -        SUMI = SUMI + (ACK+BK)*P1I
    -        BK = ACK
    -        FKK = FKK - 1.0D0
    -   50 CONTINUE
    -      YR(N) = P2R
    -      YI(N) = P2I
    -      IF (N.EQ.1) GO TO 70
    -      DO 60 I=2,N
    -        PTR = P2R
    -        PTI = P2I
    -        P2R = P1R + (FKK+FNF)*(RZR*PTR-RZI*PTI)
    -        P2I = P1I + (FKK+FNF)*(RZI*PTR+RZR*PTI)
    -        P1R = PTR
    -        P1I = PTI
    -        AK = 1.0D0 - TFNF/(FKK+TFNF)
    -        ACK = BK*AK
    -        SUMR = SUMR + (ACK+BK)*P1R
    -        SUMI = SUMI + (ACK+BK)*P1I
    -        BK = ACK
    -        FKK = FKK - 1.0D0
    -        M = N - I + 1
    -        YR(M) = P2R
    -        YI(M) = P2I
    -   60 CONTINUE
    -   70 CONTINUE
    -      IF (IFNU.LE.0) GO TO 90
    -      DO 80 I=1,IFNU
    -        PTR = P2R
    -        PTI = P2I
    -        P2R = P1R + (FKK+FNF)*(RZR*PTR-RZI*PTI)
    -        P2I = P1I + (FKK+FNF)*(RZR*PTI+RZI*PTR)
    -        P1R = PTR
    -        P1I = PTI
    -        AK = 1.0D0 - TFNF/(FKK+TFNF)
    -        ACK = BK*AK
    -        SUMR = SUMR + (ACK+BK)*P1R
    -        SUMI = SUMI + (ACK+BK)*P1I
    -        BK = ACK
    -        FKK = FKK - 1.0D0
    -   80 CONTINUE
    -   90 CONTINUE
    -      PTR = ZR
    -      PTI = ZI
    -      IF (KODE.EQ.2) PTR = ZEROR
    -      CALL AZLOG(RZR, RZI, STR, STI, IDUM)
    -      P1R = -FNF*STR + PTR
    -      P1I = -FNF*STI + PTI
    -      AP = DGAMLN(1.0D0+FNF,IDUM)
    -      PTR = P1R - AP
    -      PTI = P1I
    -C-----------------------------------------------------------------------
    -C     THE DIVISION CEXP(PT)/(SUM+P2) IS ALTERED TO AVOID OVERFLOW
    -C     IN THE DENOMINATOR BY SQUARING LARGE QUANTITIES
    -C-----------------------------------------------------------------------
    -      P2R = P2R + SUMR
    -      P2I = P2I + SUMI
    -      AP = AZABS(P2R,P2I)
    -      P1R = 1.0D0/AP
    -      CALL AZEXP(PTR, PTI, STR, STI)
    -      CKR = STR*P1R
    -      CKI = STI*P1R
    -      PTR = P2R*P1R
    -      PTI = -P2I*P1R
    -      CALL ZMLT(CKR, CKI, PTR, PTI, CNORMR, CNORMI)
    -      DO 100 I=1,N
    -        STR = YR(I)*CNORMR - YI(I)*CNORMI
    -        YI(I) = YR(I)*CNORMI + YI(I)*CNORMR
    -        YR(I) = STR
    -  100 CONTINUE
    -      RETURN
    -  110 CONTINUE
    -      NZ=-2
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zmlt.f b/scipy-0.10.1/scipy/special/amos/zmlt.f
    deleted file mode 100644
    index 3bde7d3405..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zmlt.f
    +++ /dev/null
    @@ -1,15 +0,0 @@
    -      SUBROUTINE ZMLT(AR, AI, BR, BI, CR, CI)
    -C***BEGIN PROLOGUE  ZMLT
    -C***REFER TO  ZBESH,ZBESI,ZBESJ,ZBESK,ZBESY,ZAIRY,ZBIRY
    -C
    -C     DOUBLE PRECISION COMPLEX MULTIPLY, C=A*B.
    -C
    -C***ROUTINES CALLED  (NONE)
    -C***END PROLOGUE  ZMLT
    -      DOUBLE PRECISION AR, AI, BR, BI, CR, CI, CA, CB
    -      CA = AR*BR - AI*BI
    -      CB = AR*BI + AI*BR
    -      CR = CA
    -      CI = CB
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zrati.f b/scipy-0.10.1/scipy/special/amos/zrati.f
    deleted file mode 100644
    index d8ab777687..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zrati.f
    +++ /dev/null
    @@ -1,132 +0,0 @@
    -      SUBROUTINE ZRATI(ZR, ZI, FNU, N, CYR, CYI, TOL)
    -C***BEGIN PROLOGUE  ZRATI
    -C***REFER TO  ZBESI,ZBESK,ZBESH
    -C
    -C     ZRATI COMPUTES RATIOS OF I BESSEL FUNCTIONS BY BACKWARD
    -C     RECURRENCE.  THE STARTING INDEX IS DETERMINED BY FORWARD
    -C     RECURRENCE AS DESCRIBED IN J. RES. OF NAT. BUR. OF STANDARDS-B,
    -C     MATHEMATICAL SCIENCES, VOL 77B, P111-114, SEPTEMBER, 1973,
    -C     BESSEL FUNCTIONS I AND J OF COMPLEX ARGUMENT AND INTEGER ORDER,
    -C     BY D. J. SOOKNE.
    -C
    -C***ROUTINES CALLED  AZABS,ZDIV
    -C***END PROLOGUE  ZRATI
    -C     COMPLEX Z,CY(1),CONE,CZERO,P1,P2,T1,RZ,PT,CDFNU
    -      DOUBLE PRECISION AK, AMAGZ, AP1, AP2, ARG, AZ, CDFNUI, CDFNUR,
    -     * CONEI, CONER, CYI, CYR, CZEROI, CZEROR, DFNU, FDNU, FLAM, FNU,
    -     * FNUP, PTI, PTR, P1I, P1R, P2I, P2R, RAK, RAP1, RHO, RT2, RZI,
    -     * RZR, TEST, TEST1, TOL, TTI, TTR, T1I, T1R, ZI, ZR, AZABS
    -      INTEGER I, ID, IDNU, INU, ITIME, K, KK, MAGZ, N
    -      DIMENSION CYR(N), CYI(N)
    -      DATA CZEROR,CZEROI,CONER,CONEI,RT2/
    -     1 0.0D0, 0.0D0, 1.0D0, 0.0D0, 1.41421356237309505D0 /
    -      AZ = AZABS(ZR,ZI)
    -      INU = INT(SNGL(FNU))
    -      IDNU = INU + N - 1
    -      MAGZ = INT(SNGL(AZ))
    -      AMAGZ = DBLE(FLOAT(MAGZ+1))
    -      FDNU = DBLE(FLOAT(IDNU))
    -      FNUP = DMAX1(AMAGZ,FDNU)
    -      ID = IDNU - MAGZ - 1
    -      ITIME = 1
    -      K = 1
    -      PTR = 1.0D0/AZ
    -      RZR = PTR*(ZR+ZR)*PTR
    -      RZI = -PTR*(ZI+ZI)*PTR
    -      T1R = RZR*FNUP
    -      T1I = RZI*FNUP
    -      P2R = -T1R
    -      P2I = -T1I
    -      P1R = CONER
    -      P1I = CONEI
    -      T1R = T1R + RZR
    -      T1I = T1I + RZI
    -      IF (ID.GT.0) ID = 0
    -      AP2 = AZABS(P2R,P2I)
    -      AP1 = AZABS(P1R,P1I)
    -C-----------------------------------------------------------------------
    -C     THE OVERFLOW TEST ON K(FNU+I-1,Z) BEFORE THE CALL TO CBKNU
    -C     GUARANTEES THAT P2 IS ON SCALE. SCALE TEST1 AND ALL SUBSEQUENT
    -C     P2 VALUES BY AP1 TO ENSURE THAT AN OVERFLOW DOES NOT OCCUR
    -C     PREMATURELY.
    -C-----------------------------------------------------------------------
    -      ARG = (AP2+AP2)/(AP1*TOL)
    -      TEST1 = DSQRT(ARG)
    -      TEST = TEST1
    -      RAP1 = 1.0D0/AP1
    -      P1R = P1R*RAP1
    -      P1I = P1I*RAP1
    -      P2R = P2R*RAP1
    -      P2I = P2I*RAP1
    -      AP2 = AP2*RAP1
    -   10 CONTINUE
    -      K = K + 1
    -      AP1 = AP2
    -      PTR = P2R
    -      PTI = P2I
    -      P2R = P1R - (T1R*PTR-T1I*PTI)
    -      P2I = P1I - (T1R*PTI+T1I*PTR)
    -      P1R = PTR
    -      P1I = PTI
    -      T1R = T1R + RZR
    -      T1I = T1I + RZI
    -      AP2 = AZABS(P2R,P2I)
    -      IF (AP1.LE.TEST) GO TO 10
    -      IF (ITIME.EQ.2) GO TO 20
    -      AK = AZABS(T1R,T1I)*0.5D0
    -      FLAM = AK + DSQRT(AK*AK-1.0D0)
    -      RHO = DMIN1(AP2/AP1,FLAM)
    -      TEST = TEST1*DSQRT(RHO/(RHO*RHO-1.0D0))
    -      ITIME = 2
    -      GO TO 10
    -   20 CONTINUE
    -      KK = K + 1 - ID
    -      AK = DBLE(FLOAT(KK))
    -      T1R = AK
    -      T1I = CZEROI
    -      DFNU = FNU + DBLE(FLOAT(N-1))
    -      P1R = 1.0D0/AP2
    -      P1I = CZEROI
    -      P2R = CZEROR
    -      P2I = CZEROI
    -      DO 30 I=1,KK
    -        PTR = P1R
    -        PTI = P1I
    -        RAP1 = DFNU + T1R
    -        TTR = RZR*RAP1
    -        TTI = RZI*RAP1
    -        P1R = (PTR*TTR-PTI*TTI) + P2R
    -        P1I = (PTR*TTI+PTI*TTR) + P2I
    -        P2R = PTR
    -        P2I = PTI
    -        T1R = T1R - CONER
    -   30 CONTINUE
    -      IF (P1R.NE.CZEROR .OR. P1I.NE.CZEROI) GO TO 40
    -      P1R = TOL
    -      P1I = TOL
    -   40 CONTINUE
    -      CALL ZDIV(P2R, P2I, P1R, P1I, CYR(N), CYI(N))
    -      IF (N.EQ.1) RETURN
    -      K = N - 1
    -      AK = DBLE(FLOAT(K))
    -      T1R = AK
    -      T1I = CZEROI
    -      CDFNUR = FNU*RZR
    -      CDFNUI = FNU*RZI
    -      DO 60 I=2,N
    -        PTR = CDFNUR + (T1R*RZR-T1I*RZI) + CYR(K+1)
    -        PTI = CDFNUI + (T1R*RZI+T1I*RZR) + CYI(K+1)
    -        AK = AZABS(PTR,PTI)
    -        IF (AK.NE.CZEROR) GO TO 50
    -        PTR = TOL
    -        PTI = TOL
    -        AK = TOL*RT2
    -   50   CONTINUE
    -        RAK = CONER/AK
    -        CYR(K) = RAK*PTR*RAK
    -        CYI(K) = -RAK*PTI*RAK
    -        T1R = T1R - CONER
    -        K = K - 1
    -   60 CONTINUE
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zs1s2.f b/scipy-0.10.1/scipy/special/amos/zs1s2.f
    deleted file mode 100644
    index 194be444db..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zs1s2.f
    +++ /dev/null
    @@ -1,49 +0,0 @@
    -      SUBROUTINE ZS1S2(ZRR, ZRI, S1R, S1I, S2R, S2I, NZ, ASCLE, ALIM,
    -     * IUF)
    -C***BEGIN PROLOGUE  ZS1S2
    -C***REFER TO  ZBESK,ZAIRY
    -C
    -C     ZS1S2 TESTS FOR A POSSIBLE UNDERFLOW RESULTING FROM THE
    -C     ADDITION OF THE I AND K FUNCTIONS IN THE ANALYTIC CON-
    -C     TINUATION FORMULA WHERE S1=K FUNCTION AND S2=I FUNCTION.
    -C     ON KODE=1 THE I AND K FUNCTIONS ARE DIFFERENT ORDERS OF
    -C     MAGNITUDE, BUT FOR KODE=2 THEY CAN BE OF THE SAME ORDER
    -C     OF MAGNITUDE AND THE MAXIMUM MUST BE AT LEAST ONE
    -C     PRECISION ABOVE THE UNDERFLOW LIMIT.
    -C
    -C***ROUTINES CALLED  AZABS,AZEXP,AZLOG
    -C***END PROLOGUE  ZS1S2
    -C     COMPLEX CZERO,C1,S1,S1D,S2,ZR
    -      DOUBLE PRECISION AA, ALIM, ALN, ASCLE, AS1, AS2, C1I, C1R, S1DI,
    -     * S1DR, S1I, S1R, S2I, S2R, ZEROI, ZEROR, ZRI, ZRR, AZABS
    -      INTEGER IUF, IDUM, NZ
    -      DATA ZEROR,ZEROI  / 0.0D0 , 0.0D0 /
    -      NZ = 0
    -      AS1 = AZABS(S1R,S1I)
    -      AS2 = AZABS(S2R,S2I)
    -      IF (S1R.EQ.0.0D0 .AND. S1I.EQ.0.0D0) GO TO 10
    -      IF (AS1.EQ.0.0D0) GO TO 10
    -      ALN = -ZRR - ZRR + DLOG(AS1)
    -      S1DR = S1R
    -      S1DI = S1I
    -      S1R = ZEROR
    -      S1I = ZEROI
    -      AS1 = ZEROR
    -      IF (ALN.LT.(-ALIM)) GO TO 10
    -      CALL AZLOG(S1DR, S1DI, C1R, C1I, IDUM)
    -      C1R = C1R - ZRR - ZRR
    -      C1I = C1I - ZRI - ZRI
    -      CALL AZEXP(C1R, C1I, S1R, S1I)
    -      AS1 = AZABS(S1R,S1I)
    -      IUF = IUF + 1
    -   10 CONTINUE
    -      AA = DMAX1(AS1,AS2)
    -      IF (AA.GT.ASCLE) RETURN
    -      S1R = ZEROR
    -      S1I = ZEROI
    -      S2R = ZEROR
    -      S2I = ZEROI
    -      NZ = 1
    -      IUF = 0
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zseri.f b/scipy-0.10.1/scipy/special/amos/zseri.f
    deleted file mode 100644
    index 4c487f08b5..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zseri.f
    +++ /dev/null
    @@ -1,190 +0,0 @@
    -      SUBROUTINE ZSERI(ZR, ZI, FNU, KODE, N, YR, YI, NZ, TOL, ELIM,
    -     * ALIM)
    -C***BEGIN PROLOGUE  ZSERI
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C     ZSERI COMPUTES THE I BESSEL FUNCTION FOR REAL(Z).GE.0.0 BY
    -C     MEANS OF THE POWER SERIES FOR LARGE CABS(Z) IN THE
    -C     REGION CABS(Z).LE.2*SQRT(FNU+1). NZ=0 IS A NORMAL RETURN.
    -C     NZ.GT.0 MEANS THAT THE LAST NZ COMPONENTS WERE SET TO ZERO
    -C     DUE TO UNDERFLOW. NZ.LT.0 MEANS UNDERFLOW OCCURRED, BUT THE
    -C     CONDITION CABS(Z).LE.2*SQRT(FNU+1) WAS VIOLATED AND THE
    -C     COMPUTATION MUST BE COMPLETED IN ANOTHER ROUTINE WITH N=N-ABS(NZ).
    -C
    -C***ROUTINES CALLED  DGAMLN,D1MACH,ZUCHK,AZABS,ZDIV,AZLOG,ZMLT
    -C***END PROLOGUE  ZSERI
    -C     COMPLEX AK1,CK,COEF,CONE,CRSC,CSCL,CZ,CZERO,HZ,RZ,S1,S2,Y,Z
    -      DOUBLE PRECISION AA, ACZ, AK, AK1I, AK1R, ALIM, ARM, ASCLE, ATOL,
    -     * AZ, CKI, CKR, COEFI, COEFR, CONEI, CONER, CRSCR, CZI, CZR, DFNU,
    -     * ELIM, FNU, FNUP, HZI, HZR, RAZ, RS, RTR1, RZI, RZR, S, SS, STI,
    -     * STR, S1I, S1R, S2I, S2R, TOL, YI, YR, WI, WR, ZEROI, ZEROR, ZI,
    -     * ZR, DGAMLN, D1MACH, AZABS
    -      INTEGER I, IB, IDUM, IFLAG, IL, K, KODE, L, M, N, NN, NZ, NW
    -      DIMENSION YR(N), YI(N), WR(2), WI(2)
    -      DATA ZEROR,ZEROI,CONER,CONEI / 0.0D0, 0.0D0, 1.0D0, 0.0D0 /
    -C
    -      NZ = 0
    -      AZ = AZABS(ZR,ZI)
    -      IF (AZ.EQ.0.0D0) GO TO 160
    -      ARM = 1.0D+3*D1MACH(1)
    -      RTR1 = DSQRT(ARM)
    -      CRSCR = 1.0D0
    -      IFLAG = 0
    -      IF (AZ.LT.ARM) GO TO 150
    -      HZR = 0.5D0*ZR
    -      HZI = 0.5D0*ZI
    -      CZR = ZEROR
    -      CZI = ZEROI
    -      IF (AZ.LE.RTR1) GO TO 10
    -      CALL ZMLT(HZR, HZI, HZR, HZI, CZR, CZI)
    -   10 CONTINUE
    -      ACZ = AZABS(CZR,CZI)
    -      NN = N
    -      CALL AZLOG(HZR, HZI, CKR, CKI, IDUM)
    -   20 CONTINUE
    -      DFNU = FNU + DBLE(FLOAT(NN-1))
    -      FNUP = DFNU + 1.0D0
    -C-----------------------------------------------------------------------
    -C     UNDERFLOW TEST
    -C-----------------------------------------------------------------------
    -      AK1R = CKR*DFNU
    -      AK1I = CKI*DFNU
    -      AK = DGAMLN(FNUP,IDUM)
    -      AK1R = AK1R - AK
    -      IF (KODE.EQ.2) AK1R = AK1R - ZR
    -      IF (AK1R.GT.(-ELIM)) GO TO 40
    -   30 CONTINUE
    -      NZ = NZ + 1
    -      YR(NN) = ZEROR
    -      YI(NN) = ZEROI
    -      IF (ACZ.GT.DFNU) GO TO 190
    -      NN = NN - 1
    -      IF (NN.EQ.0) RETURN
    -      GO TO 20
    -   40 CONTINUE
    -      IF (AK1R.GT.(-ALIM)) GO TO 50
    -      IFLAG = 1
    -      SS = 1.0D0/TOL
    -      CRSCR = TOL
    -      ASCLE = ARM*SS
    -   50 CONTINUE
    -      AA = DEXP(AK1R)
    -      IF (IFLAG.EQ.1) AA = AA*SS
    -      COEFR = AA*DCOS(AK1I)
    -      COEFI = AA*DSIN(AK1I)
    -      ATOL = TOL*ACZ/FNUP
    -      IL = MIN0(2,NN)
    -      DO 90 I=1,IL
    -        DFNU = FNU + DBLE(FLOAT(NN-I))
    -        FNUP = DFNU + 1.0D0
    -        S1R = CONER
    -        S1I = CONEI
    -        IF (ACZ.LT.TOL*FNUP) GO TO 70
    -        AK1R = CONER
    -        AK1I = CONEI
    -        AK = FNUP + 2.0D0
    -        S = FNUP
    -        AA = 2.0D0
    -   60   CONTINUE
    -        RS = 1.0D0/S
    -        STR = AK1R*CZR - AK1I*CZI
    -        STI = AK1R*CZI + AK1I*CZR
    -        AK1R = STR*RS
    -        AK1I = STI*RS
    -        S1R = S1R + AK1R
    -        S1I = S1I + AK1I
    -        S = S + AK
    -        AK = AK + 2.0D0
    -        AA = AA*ACZ*RS
    -        IF (AA.GT.ATOL) GO TO 60
    -   70   CONTINUE
    -        S2R = S1R*COEFR - S1I*COEFI
    -        S2I = S1R*COEFI + S1I*COEFR
    -        WR(I) = S2R
    -        WI(I) = S2I
    -        IF (IFLAG.EQ.0) GO TO 80
    -        CALL ZUCHK(S2R, S2I, NW, ASCLE, TOL)
    -        IF (NW.NE.0) GO TO 30
    -   80   CONTINUE
    -        M = NN - I + 1
    -        YR(M) = S2R*CRSCR
    -        YI(M) = S2I*CRSCR
    -        IF (I.EQ.IL) GO TO 90
    -        CALL ZDIV(COEFR, COEFI, HZR, HZI, STR, STI)
    -        COEFR = STR*DFNU
    -        COEFI = STI*DFNU
    -   90 CONTINUE
    -      IF (NN.LE.2) RETURN
    -      K = NN - 2
    -      AK = DBLE(FLOAT(K))
    -      RAZ = 1.0D0/AZ
    -      STR = ZR*RAZ
    -      STI = -ZI*RAZ
    -      RZR = (STR+STR)*RAZ
    -      RZI = (STI+STI)*RAZ
    -      IF (IFLAG.EQ.1) GO TO 120
    -      IB = 3
    -  100 CONTINUE
    -      DO 110 I=IB,NN
    -        YR(K) = (AK+FNU)*(RZR*YR(K+1)-RZI*YI(K+1)) + YR(K+2)
    -        YI(K) = (AK+FNU)*(RZR*YI(K+1)+RZI*YR(K+1)) + YI(K+2)
    -        AK = AK - 1.0D0
    -        K = K - 1
    -  110 CONTINUE
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     RECUR BACKWARD WITH SCALED VALUES
    -C-----------------------------------------------------------------------
    -  120 CONTINUE
    -C-----------------------------------------------------------------------
    -C     EXP(-ALIM)=EXP(-ELIM)/TOL=APPROX. ONE PRECISION ABOVE THE
    -C     UNDERFLOW LIMIT = ASCLE = D1MACH(1)*SS*1.0D+3
    -C-----------------------------------------------------------------------
    -      S1R = WR(1)
    -      S1I = WI(1)
    -      S2R = WR(2)
    -      S2I = WI(2)
    -      DO 130 L=3,NN
    -        CKR = S2R
    -        CKI = S2I
    -        S2R = S1R + (AK+FNU)*(RZR*CKR-RZI*CKI)
    -        S2I = S1I + (AK+FNU)*(RZR*CKI+RZI*CKR)
    -        S1R = CKR
    -        S1I = CKI
    -        CKR = S2R*CRSCR
    -        CKI = S2I*CRSCR
    -        YR(K) = CKR
    -        YI(K) = CKI
    -        AK = AK - 1.0D0
    -        K = K - 1
    -        IF (AZABS(CKR,CKI).GT.ASCLE) GO TO 140
    -  130 CONTINUE
    -      RETURN
    -  140 CONTINUE
    -      IB = L + 1
    -      IF (IB.GT.NN) RETURN
    -      GO TO 100
    -  150 CONTINUE
    -      NZ = N
    -      IF (FNU.EQ.0.0D0) NZ = NZ - 1
    -  160 CONTINUE
    -      YR(1) = ZEROR
    -      YI(1) = ZEROI
    -      IF (FNU.NE.0.0D0) GO TO 170
    -      YR(1) = CONER
    -      YI(1) = CONEI
    -  170 CONTINUE
    -      IF (N.EQ.1) RETURN
    -      DO 180 I=2,N
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -  180 CONTINUE
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     RETURN WITH NZ.LT.0 IF CABS(Z*Z/4).GT.FNU+N-NZ-1 COMPLETE
    -C     THE CALCULATION IN CBINU WITH N=N-IABS(NZ)
    -C-----------------------------------------------------------------------
    -  190 CONTINUE
    -      NZ = -NZ
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zshch.f b/scipy-0.10.1/scipy/special/amos/zshch.f
    deleted file mode 100644
    index 168e62e52f..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zshch.f
    +++ /dev/null
    @@ -1,22 +0,0 @@
    -      SUBROUTINE ZSHCH(ZR, ZI, CSHR, CSHI, CCHR, CCHI)
    -C***BEGIN PROLOGUE  ZSHCH
    -C***REFER TO  ZBESK,ZBESH
    -C
    -C     ZSHCH COMPUTES THE COMPLEX HYPERBOLIC FUNCTIONS CSH=SINH(X+I*Y)
    -C     AND CCH=COSH(X+I*Y), WHERE I**2=-1.
    -C
    -C***ROUTINES CALLED  (NONE)
    -C***END PROLOGUE  ZSHCH
    -C
    -      DOUBLE PRECISION CCHI, CCHR, CH, CN, CSHI, CSHR, SH, SN, ZI, ZR,
    -     * DCOSH, DSINH
    -      SH = DSINH(ZR)
    -      CH = DCOSH(ZR)
    -      SN = DSIN(ZI)
    -      CN = DCOS(ZI)
    -      CSHR = SH*CN
    -      CSHI = CH*SN
    -      CCHR = CH*CN
    -      CCHI = SH*SN
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zsqrt.f b/scipy-0.10.1/scipy/special/amos/zsqrt.f
    deleted file mode 100644
    index 43ba617f4e..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zsqrt.f
    +++ /dev/null
    @@ -1,44 +0,0 @@
    -      SUBROUTINE AZSQRT(AR, AI, BR, BI)
    -C***BEGIN PROLOGUE  AZSQRT
    -C***REFER TO  ZBESH,ZBESI,ZBESJ,ZBESK,ZBESY,ZAIRY,ZBIRY
    -C
    -C     DOUBLE PRECISION COMPLEX SQUARE ROOT, B=CSQRT(A)
    -C
    -C***ROUTINES CALLED  AZABS
    -C***END PROLOGUE  AZSQRT
    -      DOUBLE PRECISION AR, AI, BR, BI, ZM, DTHETA, DPI, DRT
    -      DOUBLE PRECISION AZABS
    -      DATA DRT , DPI / 7.071067811865475244008443621D-1,
    -     1                 3.141592653589793238462643383D+0/
    -      ZM = AZABS(AR,AI)
    -      ZM = DSQRT(ZM)
    -      IF (AR.EQ.0.0D+0) GO TO 10
    -      IF (AI.EQ.0.0D+0) GO TO 20
    -      DTHETA = DATAN(AI/AR)
    -      IF (DTHETA.LE.0.0D+0) GO TO 40
    -      IF (AR.LT.0.0D+0) DTHETA = DTHETA - DPI
    -      GO TO 50
    -   10 IF (AI.GT.0.0D+0) GO TO 60
    -      IF (AI.LT.0.0D+0) GO TO 70
    -      BR = 0.0D+0
    -      BI = 0.0D+0
    -      RETURN
    -   20 IF (AR.GT.0.0D+0) GO TO 30
    -      BR = 0.0D+0
    -      BI = DSQRT(DABS(AR))
    -      RETURN
    -   30 BR = DSQRT(AR)
    -      BI = 0.0D+0
    -      RETURN
    -   40 IF (AR.LT.0.0D+0) DTHETA = DTHETA + DPI
    -   50 DTHETA = DTHETA*0.5D+0
    -      BR = ZM*DCOS(DTHETA)
    -      BI = ZM*DSIN(DTHETA)
    -      RETURN
    -   60 BR = ZM*DRT
    -      BI = ZM*DRT
    -      RETURN
    -   70 BR = ZM*DRT
    -      BI = -ZM*DRT
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zuchk.f b/scipy-0.10.1/scipy/special/amos/zuchk.f
    deleted file mode 100644
    index d15dc841ed..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zuchk.f
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -      SUBROUTINE ZUCHK(YR, YI, NZ, ASCLE, TOL)
    -C***BEGIN PROLOGUE  ZUCHK
    -C***REFER TO ZSERI,ZUOIK,ZUNK1,ZUNK2,ZUNI1,ZUNI2,ZKSCL
    -C
    -C      Y ENTERS AS A SCALED QUANTITY WHOSE MAGNITUDE IS GREATER THAN
    -C      EXP(-ALIM)=ASCLE=1.0E+3*D1MACH(1)/TOL. THE TEST IS MADE TO SEE
    -C      IF THE MAGNITUDE OF THE REAL OR IMAGINARY PART WOULD UNDERFLOW
    -C      WHEN Y IS SCALED (BY TOL) TO ITS PROPER VALUE. Y IS ACCEPTED
    -C      IF THE UNDERFLOW IS AT LEAST ONE PRECISION BELOW THE MAGNITUDE
    -C      OF THE LARGEST COMPONENT; OTHERWISE THE PHASE ANGLE DOES NOT HAVE
    -C      ABSOLUTE ACCURACY AND AN UNDERFLOW IS ASSUMED.
    -C
    -C***ROUTINES CALLED  (NONE)
    -C***END PROLOGUE  ZUCHK
    -C
    -C     COMPLEX Y
    -      DOUBLE PRECISION ASCLE, SS, ST, TOL, WR, WI, YR, YI
    -      INTEGER NZ
    -      NZ = 0
    -      WR = DABS(YR)
    -      WI = DABS(YI)
    -      ST = DMIN1(WR,WI)
    -      IF (ST.GT.ASCLE) RETURN
    -      SS = DMAX1(WR,WI)
    -      ST = ST/TOL
    -      IF (SS.LT.ST) NZ = 1
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zunhj.f b/scipy-0.10.1/scipy/special/amos/zunhj.f
    deleted file mode 100644
    index e56e43c391..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zunhj.f
    +++ /dev/null
    @@ -1,714 +0,0 @@
    -      SUBROUTINE ZUNHJ(ZR, ZI, FNU, IPMTR, TOL, PHIR, PHII, ARGR, ARGI,
    -     * ZETA1R, ZETA1I, ZETA2R, ZETA2I, ASUMR, ASUMI, BSUMR, BSUMI)
    -C***BEGIN PROLOGUE  ZUNHJ
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C     REFERENCES
    -C         HANDBOOK OF MATHEMATICAL FUNCTIONS BY M. ABRAMOWITZ AND I.A.
    -C         STEGUN, AMS55, NATIONAL BUREAU OF STANDARDS, 1965, CHAPTER 9.
    -C
    -C         ASYMPTOTICS AND SPECIAL FUNCTIONS BY F.W.J. OLVER, ACADEMIC
    -C         PRESS, N.Y., 1974, PAGE 420
    -C
    -C     ABSTRACT
    -C         ZUNHJ COMPUTES PARAMETERS FOR BESSEL FUNCTIONS C(FNU,Z) =
    -C         J(FNU,Z), Y(FNU,Z) OR H(I,FNU,Z) I=1,2 FOR LARGE ORDERS FNU
    -C         BY MEANS OF THE UNIFORM ASYMPTOTIC EXPANSION
    -C
    -C         C(FNU,Z)=C1*PHI*( ASUM*AIRY(ARG) + C2*BSUM*DAIRY(ARG) )
    -C
    -C         FOR PROPER CHOICES OF C1, C2, AIRY AND DAIRY WHERE AIRY IS
    -C         AN AIRY FUNCTION AND DAIRY IS ITS DERIVATIVE.
    -C
    -C               (2/3)*FNU*ZETA**1.5 = ZETA1-ZETA2,
    -C
    -C         ZETA1=0.5*FNU*CLOG((1+W)/(1-W)), ZETA2=FNU*W FOR SCALING
    -C         PURPOSES IN AIRY FUNCTIONS FROM CAIRY OR CBIRY.
    -C
    -C         MCONJ=SIGN OF AIMAG(Z), BUT IS AMBIGUOUS WHEN Z IS REAL AND
    -C         MUST BE SPECIFIED. IPMTR=0 RETURNS ALL PARAMETERS. IPMTR=
    -C         1 COMPUTES ALL EXCEPT ASUM AND BSUM.
    -C
    -C***ROUTINES CALLED  AZABS,ZDIV,AZLOG,AZSQRT,D1MACH
    -C***END PROLOGUE  ZUNHJ
    -C     COMPLEX ARG,ASUM,BSUM,CFNU,CONE,CR,CZERO,DR,P,PHI,PRZTH,PTFN,
    -C    *RFN13,RTZTA,RZTH,SUMA,SUMB,TFN,T2,UP,W,W2,Z,ZA,ZB,ZC,ZETA,ZETA1,
    -C    *ZETA2,ZTH
    -      DOUBLE PRECISION ALFA, ANG, AP, AR, ARGI, ARGR, ASUMI, ASUMR,
    -     * ATOL, AW2, AZTH, BETA, BR, BSUMI, BSUMR, BTOL, C, CONEI, CONER,
    -     * CRI, CRR, DRI, DRR, EX1, EX2, FNU, FN13, FN23, GAMA, GPI, HPI,
    -     * PHII, PHIR, PI, PP, PR, PRZTHI, PRZTHR, PTFNI, PTFNR, RAW, RAW2,
    -     * RAZTH, RFNU, RFNU2, RFN13, RTZTI, RTZTR, RZTHI, RZTHR, STI, STR,
    -     * SUMAI, SUMAR, SUMBI, SUMBR, TEST, TFNI, TFNR, THPI, TOL, TZAI,
    -     * TZAR, T2I, T2R, UPI, UPR, WI, WR, W2I, W2R, ZAI, ZAR, ZBI, ZBR,
    -     * ZCI, ZCR, ZEROI, ZEROR, ZETAI, ZETAR, ZETA1I, ZETA1R, ZETA2I,
    -     * ZETA2R, ZI, ZR, ZTHI, ZTHR, AZABS, AC, D1MACH
    -      INTEGER IAS, IBS, IPMTR, IS, J, JR, JU, K, KMAX, KP1, KS, L, LR,
    -     * LRP1, L1, L2, M, IDUM
    -      DIMENSION AR(14), BR(14), C(105), ALFA(180), BETA(210), GAMA(30),
    -     * AP(30), PR(30), PI(30), UPR(14), UPI(14), CRR(14), CRI(14),
    -     * DRR(14), DRI(14)
    -      DATA AR(1), AR(2), AR(3), AR(4), AR(5), AR(6), AR(7), AR(8),
    -     1     AR(9), AR(10), AR(11), AR(12), AR(13), AR(14)/
    -     2     1.00000000000000000D+00,     1.04166666666666667D-01,
    -     3     8.35503472222222222D-02,     1.28226574556327160D-01,
    -     4     2.91849026464140464D-01,     8.81627267443757652D-01,
    -     5     3.32140828186276754D+00,     1.49957629868625547D+01,
    -     6     7.89230130115865181D+01,     4.74451538868264323D+02,
    -     7     3.20749009089066193D+03,     2.40865496408740049D+04,
    -     8     1.98923119169509794D+05,     1.79190200777534383D+06/
    -      DATA BR(1), BR(2), BR(3), BR(4), BR(5), BR(6), BR(7), BR(8),
    -     1     BR(9), BR(10), BR(11), BR(12), BR(13), BR(14)/
    -     2     1.00000000000000000D+00,    -1.45833333333333333D-01,
    -     3    -9.87413194444444444D-02,    -1.43312053915895062D-01,
    -     4    -3.17227202678413548D-01,    -9.42429147957120249D-01,
    -     5    -3.51120304082635426D+00,    -1.57272636203680451D+01,
    -     6    -8.22814390971859444D+01,    -4.92355370523670524D+02,
    -     7    -3.31621856854797251D+03,    -2.48276742452085896D+04,
    -     8    -2.04526587315129788D+05,    -1.83844491706820990D+06/
    -      DATA C(1), C(2), C(3), C(4), C(5), C(6), C(7), C(8), C(9), C(10),
    -     1     C(11), C(12), C(13), C(14), C(15), C(16), C(17), C(18),
    -     2     C(19), C(20), C(21), C(22), C(23), C(24)/
    -     3     1.00000000000000000D+00,    -2.08333333333333333D-01,
    -     4     1.25000000000000000D-01,     3.34201388888888889D-01,
    -     5    -4.01041666666666667D-01,     7.03125000000000000D-02,
    -     6    -1.02581259645061728D+00,     1.84646267361111111D+00,
    -     7    -8.91210937500000000D-01,     7.32421875000000000D-02,
    -     8     4.66958442342624743D+00,    -1.12070026162229938D+01,
    -     9     8.78912353515625000D+00,    -2.36408691406250000D+00,
    -     A     1.12152099609375000D-01,    -2.82120725582002449D+01,
    -     B     8.46362176746007346D+01,    -9.18182415432400174D+01,
    -     C     4.25349987453884549D+01,    -7.36879435947963170D+00,
    -     D     2.27108001708984375D-01,     2.12570130039217123D+02,
    -     E    -7.65252468141181642D+02,     1.05999045252799988D+03/
    -      DATA C(25), C(26), C(27), C(28), C(29), C(30), C(31), C(32),
    -     1     C(33), C(34), C(35), C(36), C(37), C(38), C(39), C(40),
    -     2     C(41), C(42), C(43), C(44), C(45), C(46), C(47), C(48)/
    -     3    -6.99579627376132541D+02,     2.18190511744211590D+02,
    -     4    -2.64914304869515555D+01,     5.72501420974731445D-01,
    -     5    -1.91945766231840700D+03,     8.06172218173730938D+03,
    -     6    -1.35865500064341374D+04,     1.16553933368645332D+04,
    -     7    -5.30564697861340311D+03,     1.20090291321635246D+03,
    -     8    -1.08090919788394656D+02,     1.72772750258445740D+00,
    -     9     2.02042913309661486D+04,    -9.69805983886375135D+04,
    -     A     1.92547001232531532D+05,    -2.03400177280415534D+05,
    -     B     1.22200464983017460D+05,    -4.11926549688975513D+04,
    -     C     7.10951430248936372D+03,    -4.93915304773088012D+02,
    -     D     6.07404200127348304D+00,    -2.42919187900551333D+05,
    -     E     1.31176361466297720D+06,    -2.99801591853810675D+06/
    -      DATA C(49), C(50), C(51), C(52), C(53), C(54), C(55), C(56),
    -     1     C(57), C(58), C(59), C(60), C(61), C(62), C(63), C(64),
    -     2     C(65), C(66), C(67), C(68), C(69), C(70), C(71), C(72)/
    -     3     3.76327129765640400D+06,    -2.81356322658653411D+06,
    -     4     1.26836527332162478D+06,    -3.31645172484563578D+05,
    -     5     4.52187689813627263D+04,    -2.49983048181120962D+03,
    -     6     2.43805296995560639D+01,     3.28446985307203782D+06,
    -     7    -1.97068191184322269D+07,     5.09526024926646422D+07,
    -     8    -7.41051482115326577D+07,     6.63445122747290267D+07,
    -     9    -3.75671766607633513D+07,     1.32887671664218183D+07,
    -     A    -2.78561812808645469D+06,     3.08186404612662398D+05,
    -     B    -1.38860897537170405D+04,     1.10017140269246738D+02,
    -     C    -4.93292536645099620D+07,     3.25573074185765749D+08,
    -     D    -9.39462359681578403D+08,     1.55359689957058006D+09,
    -     E    -1.62108055210833708D+09,     1.10684281682301447D+09/
    -      DATA C(73), C(74), C(75), C(76), C(77), C(78), C(79), C(80),
    -     1     C(81), C(82), C(83), C(84), C(85), C(86), C(87), C(88),
    -     2     C(89), C(90), C(91), C(92), C(93), C(94), C(95), C(96)/
    -     3    -4.95889784275030309D+08,     1.42062907797533095D+08,
    -     4    -2.44740627257387285D+07,     2.24376817792244943D+06,
    -     5    -8.40054336030240853D+04,     5.51335896122020586D+02,
    -     6     8.14789096118312115D+08,    -5.86648149205184723D+09,
    -     7     1.86882075092958249D+10,    -3.46320433881587779D+10,
    -     8     4.12801855797539740D+10,    -3.30265997498007231D+10,
    -     9     1.79542137311556001D+10,    -6.56329379261928433D+09,
    -     A     1.55927986487925751D+09,    -2.25105661889415278D+08,
    -     B     1.73951075539781645D+07,    -5.49842327572288687D+05,
    -     C     3.03809051092238427D+03,    -1.46792612476956167D+10,
    -     D     1.14498237732025810D+11,    -3.99096175224466498D+11,
    -     E     8.19218669548577329D+11,    -1.09837515608122331D+12/
    -      DATA C(97), C(98), C(99), C(100), C(101), C(102), C(103), C(104),
    -     1     C(105)/
    -     2     1.00815810686538209D+12,    -6.45364869245376503D+11,
    -     3     2.87900649906150589D+11,    -8.78670721780232657D+10,
    -     4     1.76347306068349694D+10,    -2.16716498322379509D+09,
    -     5     1.43157876718888981D+08,    -3.87183344257261262D+06,
    -     6     1.82577554742931747D+04/
    -      DATA ALFA(1), ALFA(2), ALFA(3), ALFA(4), ALFA(5), ALFA(6),
    -     1     ALFA(7), ALFA(8), ALFA(9), ALFA(10), ALFA(11), ALFA(12),
    -     2     ALFA(13), ALFA(14), ALFA(15), ALFA(16), ALFA(17), ALFA(18),
    -     3     ALFA(19), ALFA(20), ALFA(21), ALFA(22)/
    -     4    -4.44444444444444444D-03,    -9.22077922077922078D-04,
    -     5    -8.84892884892884893D-05,     1.65927687832449737D-04,
    -     6     2.46691372741792910D-04,     2.65995589346254780D-04,
    -     7     2.61824297061500945D-04,     2.48730437344655609D-04,
    -     8     2.32721040083232098D-04,     2.16362485712365082D-04,
    -     9     2.00738858762752355D-04,     1.86267636637545172D-04,
    -     A     1.73060775917876493D-04,     1.61091705929015752D-04,
    -     B     1.50274774160908134D-04,     1.40503497391269794D-04,
    -     C     1.31668816545922806D-04,     1.23667445598253261D-04,
    -     D     1.16405271474737902D-04,     1.09798298372713369D-04,
    -     E     1.03772410422992823D-04,     9.82626078369363448D-05/
    -      DATA ALFA(23), ALFA(24), ALFA(25), ALFA(26), ALFA(27), ALFA(28),
    -     1     ALFA(29), ALFA(30), ALFA(31), ALFA(32), ALFA(33), ALFA(34),
    -     2     ALFA(35), ALFA(36), ALFA(37), ALFA(38), ALFA(39), ALFA(40),
    -     3     ALFA(41), ALFA(42), ALFA(43), ALFA(44)/
    -     4     9.32120517249503256D-05,     8.85710852478711718D-05,
    -     5     8.42963105715700223D-05,     8.03497548407791151D-05,
    -     6     7.66981345359207388D-05,     7.33122157481777809D-05,
    -     7     7.01662625163141333D-05,     6.72375633790160292D-05,
    -     8     6.93735541354588974D-04,     2.32241745182921654D-04,
    -     9    -1.41986273556691197D-05,    -1.16444931672048640D-04,
    -     A    -1.50803558053048762D-04,    -1.55121924918096223D-04,
    -     B    -1.46809756646465549D-04,    -1.33815503867491367D-04,
    -     C    -1.19744975684254051D-04,    -1.06184319207974020D-04,
    -     D    -9.37699549891194492D-05,    -8.26923045588193274D-05,
    -     E    -7.29374348155221211D-05,    -6.44042357721016283D-05/
    -      DATA ALFA(45), ALFA(46), ALFA(47), ALFA(48), ALFA(49), ALFA(50),
    -     1     ALFA(51), ALFA(52), ALFA(53), ALFA(54), ALFA(55), ALFA(56),
    -     2     ALFA(57), ALFA(58), ALFA(59), ALFA(60), ALFA(61), ALFA(62),
    -     3     ALFA(63), ALFA(64), ALFA(65), ALFA(66)/
    -     4    -5.69611566009369048D-05,    -5.04731044303561628D-05,
    -     5    -4.48134868008882786D-05,    -3.98688727717598864D-05,
    -     6    -3.55400532972042498D-05,    -3.17414256609022480D-05,
    -     7    -2.83996793904174811D-05,    -2.54522720634870566D-05,
    -     8    -2.28459297164724555D-05,    -2.05352753106480604D-05,
    -     9    -1.84816217627666085D-05,    -1.66519330021393806D-05,
    -     A    -1.50179412980119482D-05,    -1.35554031379040526D-05,
    -     B    -1.22434746473858131D-05,    -1.10641884811308169D-05,
    -     C    -3.54211971457743841D-04,    -1.56161263945159416D-04,
    -     D     3.04465503594936410D-05,     1.30198655773242693D-04,
    -     E     1.67471106699712269D-04,     1.70222587683592569D-04/
    -      DATA ALFA(67), ALFA(68), ALFA(69), ALFA(70), ALFA(71), ALFA(72),
    -     1     ALFA(73), ALFA(74), ALFA(75), ALFA(76), ALFA(77), ALFA(78),
    -     2     ALFA(79), ALFA(80), ALFA(81), ALFA(82), ALFA(83), ALFA(84),
    -     3     ALFA(85), ALFA(86), ALFA(87), ALFA(88)/
    -     4     1.56501427608594704D-04,     1.36339170977445120D-04,
    -     5     1.14886692029825128D-04,     9.45869093034688111D-05,
    -     6     7.64498419250898258D-05,     6.07570334965197354D-05,
    -     7     4.74394299290508799D-05,     3.62757512005344297D-05,
    -     8     2.69939714979224901D-05,     1.93210938247939253D-05,
    -     9     1.30056674793963203D-05,     7.82620866744496661D-06,
    -     A     3.59257485819351583D-06,     1.44040049814251817D-07,
    -     B    -2.65396769697939116D-06,    -4.91346867098485910D-06,
    -     C    -6.72739296091248287D-06,    -8.17269379678657923D-06,
    -     D    -9.31304715093561232D-06,    -1.02011418798016441D-05,
    -     E    -1.08805962510592880D-05,    -1.13875481509603555D-05/
    -      DATA ALFA(89), ALFA(90), ALFA(91), ALFA(92), ALFA(93), ALFA(94),
    -     1     ALFA(95), ALFA(96), ALFA(97), ALFA(98), ALFA(99), ALFA(100),
    -     2     ALFA(101), ALFA(102), ALFA(103), ALFA(104), ALFA(105),
    -     3     ALFA(106), ALFA(107), ALFA(108), ALFA(109), ALFA(110)/
    -     4    -1.17519675674556414D-05,    -1.19987364870944141D-05,
    -     5     3.78194199201772914D-04,     2.02471952761816167D-04,
    -     6    -6.37938506318862408D-05,    -2.38598230603005903D-04,
    -     7    -3.10916256027361568D-04,    -3.13680115247576316D-04,
    -     8    -2.78950273791323387D-04,    -2.28564082619141374D-04,
    -     9    -1.75245280340846749D-04,    -1.25544063060690348D-04,
    -     A    -8.22982872820208365D-05,    -4.62860730588116458D-05,
    -     B    -1.72334302366962267D-05,     5.60690482304602267D-06,
    -     C     2.31395443148286800D-05,     3.62642745856793957D-05,
    -     D     4.58006124490188752D-05,     5.24595294959114050D-05,
    -     E     5.68396208545815266D-05,     5.94349820393104052D-05/
    -      DATA ALFA(111), ALFA(112), ALFA(113), ALFA(114), ALFA(115),
    -     1     ALFA(116), ALFA(117), ALFA(118), ALFA(119), ALFA(120),
    -     2     ALFA(121), ALFA(122), ALFA(123), ALFA(124), ALFA(125),
    -     3     ALFA(126), ALFA(127), ALFA(128), ALFA(129), ALFA(130)/
    -     4     6.06478527578421742D-05,     6.08023907788436497D-05,
    -     5     6.01577894539460388D-05,     5.89199657344698500D-05,
    -     6     5.72515823777593053D-05,     5.52804375585852577D-05,
    -     7     5.31063773802880170D-05,     5.08069302012325706D-05,
    -     8     4.84418647620094842D-05,     4.60568581607475370D-05,
    -     9    -6.91141397288294174D-04,    -4.29976633058871912D-04,
    -     A     1.83067735980039018D-04,     6.60088147542014144D-04,
    -     B     8.75964969951185931D-04,     8.77335235958235514D-04,
    -     C     7.49369585378990637D-04,     5.63832329756980918D-04,
    -     D     3.68059319971443156D-04,     1.88464535514455599D-04/
    -      DATA ALFA(131), ALFA(132), ALFA(133), ALFA(134), ALFA(135),
    -     1     ALFA(136), ALFA(137), ALFA(138), ALFA(139), ALFA(140),
    -     2     ALFA(141), ALFA(142), ALFA(143), ALFA(144), ALFA(145),
    -     3     ALFA(146), ALFA(147), ALFA(148), ALFA(149), ALFA(150)/
    -     4     3.70663057664904149D-05,    -8.28520220232137023D-05,
    -     5    -1.72751952869172998D-04,    -2.36314873605872983D-04,
    -     6    -2.77966150694906658D-04,    -3.02079514155456919D-04,
    -     7    -3.12594712643820127D-04,    -3.12872558758067163D-04,
    -     8    -3.05678038466324377D-04,    -2.93226470614557331D-04,
    -     9    -2.77255655582934777D-04,    -2.59103928467031709D-04,
    -     A    -2.39784014396480342D-04,    -2.20048260045422848D-04,
    -     B    -2.00443911094971498D-04,    -1.81358692210970687D-04,
    -     C    -1.63057674478657464D-04,    -1.45712672175205844D-04,
    -     D    -1.29425421983924587D-04,    -1.14245691942445952D-04/
    -      DATA ALFA(151), ALFA(152), ALFA(153), ALFA(154), ALFA(155),
    -     1     ALFA(156), ALFA(157), ALFA(158), ALFA(159), ALFA(160),
    -     2     ALFA(161), ALFA(162), ALFA(163), ALFA(164), ALFA(165),
    -     3     ALFA(166), ALFA(167), ALFA(168), ALFA(169), ALFA(170)/
    -     4     1.92821964248775885D-03,     1.35592576302022234D-03,
    -     5    -7.17858090421302995D-04,    -2.58084802575270346D-03,
    -     6    -3.49271130826168475D-03,    -3.46986299340960628D-03,
    -     7    -2.82285233351310182D-03,    -1.88103076404891354D-03,
    -     8    -8.89531718383947600D-04,     3.87912102631035228D-06,
    -     9     7.28688540119691412D-04,     1.26566373053457758D-03,
    -     A     1.62518158372674427D-03,     1.83203153216373172D-03,
    -     B     1.91588388990527909D-03,     1.90588846755546138D-03,
    -     C     1.82798982421825727D-03,     1.70389506421121530D-03,
    -     D     1.55097127171097686D-03,     1.38261421852276159D-03/
    -      DATA ALFA(171), ALFA(172), ALFA(173), ALFA(174), ALFA(175),
    -     1     ALFA(176), ALFA(177), ALFA(178), ALFA(179), ALFA(180)/
    -     2     1.20881424230064774D-03,     1.03676532638344962D-03,
    -     3     8.71437918068619115D-04,     7.16080155297701002D-04,
    -     4     5.72637002558129372D-04,     4.42089819465802277D-04,
    -     5     3.24724948503090564D-04,     2.20342042730246599D-04,
    -     6     1.28412898401353882D-04,     4.82005924552095464D-05/
    -      DATA BETA(1), BETA(2), BETA(3), BETA(4), BETA(5), BETA(6),
    -     1     BETA(7), BETA(8), BETA(9), BETA(10), BETA(11), BETA(12),
    -     2     BETA(13), BETA(14), BETA(15), BETA(16), BETA(17), BETA(18),
    -     3     BETA(19), BETA(20), BETA(21), BETA(22)/
    -     4     1.79988721413553309D-02,     5.59964911064388073D-03,
    -     5     2.88501402231132779D-03,     1.80096606761053941D-03,
    -     6     1.24753110589199202D-03,     9.22878876572938311D-04,
    -     7     7.14430421727287357D-04,     5.71787281789704872D-04,
    -     8     4.69431007606481533D-04,     3.93232835462916638D-04,
    -     9     3.34818889318297664D-04,     2.88952148495751517D-04,
    -     A     2.52211615549573284D-04,     2.22280580798883327D-04,
    -     B     1.97541838033062524D-04,     1.76836855019718004D-04,
    -     C     1.59316899661821081D-04,     1.44347930197333986D-04,
    -     D     1.31448068119965379D-04,     1.20245444949302884D-04,
    -     E     1.10449144504599392D-04,     1.01828770740567258D-04/
    -      DATA BETA(23), BETA(24), BETA(25), BETA(26), BETA(27), BETA(28),
    -     1     BETA(29), BETA(30), BETA(31), BETA(32), BETA(33), BETA(34),
    -     2     BETA(35), BETA(36), BETA(37), BETA(38), BETA(39), BETA(40),
    -     3     BETA(41), BETA(42), BETA(43), BETA(44)/
    -     4     9.41998224204237509D-05,     8.74130545753834437D-05,
    -     5     8.13466262162801467D-05,     7.59002269646219339D-05,
    -     6     7.09906300634153481D-05,     6.65482874842468183D-05,
    -     7     6.25146958969275078D-05,     5.88403394426251749D-05,
    -     8    -1.49282953213429172D-03,    -8.78204709546389328D-04,
    -     9    -5.02916549572034614D-04,    -2.94822138512746025D-04,
    -     A    -1.75463996970782828D-04,    -1.04008550460816434D-04,
    -     B    -5.96141953046457895D-05,    -3.12038929076098340D-05,
    -     C    -1.26089735980230047D-05,    -2.42892608575730389D-07,
    -     D     8.05996165414273571D-06,     1.36507009262147391D-05,
    -     E     1.73964125472926261D-05,     1.98672978842133780D-05/
    -      DATA BETA(45), BETA(46), BETA(47), BETA(48), BETA(49), BETA(50),
    -     1     BETA(51), BETA(52), BETA(53), BETA(54), BETA(55), BETA(56),
    -     2     BETA(57), BETA(58), BETA(59), BETA(60), BETA(61), BETA(62),
    -     3     BETA(63), BETA(64), BETA(65), BETA(66)/
    -     4     2.14463263790822639D-05,     2.23954659232456514D-05,
    -     5     2.28967783814712629D-05,     2.30785389811177817D-05,
    -     6     2.30321976080909144D-05,     2.28236073720348722D-05,
    -     7     2.25005881105292418D-05,     2.20981015361991429D-05,
    -     8     2.16418427448103905D-05,     2.11507649256220843D-05,
    -     9     2.06388749782170737D-05,     2.01165241997081666D-05,
    -     A     1.95913450141179244D-05,     1.90689367910436740D-05,
    -     B     1.85533719641636667D-05,     1.80475722259674218D-05,
    -     C     5.52213076721292790D-04,     4.47932581552384646D-04,
    -     D     2.79520653992020589D-04,     1.52468156198446602D-04,
    -     E     6.93271105657043598D-05,     1.76258683069991397D-05/
    -      DATA BETA(67), BETA(68), BETA(69), BETA(70), BETA(71), BETA(72),
    -     1     BETA(73), BETA(74), BETA(75), BETA(76), BETA(77), BETA(78),
    -     2     BETA(79), BETA(80), BETA(81), BETA(82), BETA(83), BETA(84),
    -     3     BETA(85), BETA(86), BETA(87), BETA(88)/
    -     4    -1.35744996343269136D-05,    -3.17972413350427135D-05,
    -     5    -4.18861861696693365D-05,    -4.69004889379141029D-05,
    -     6    -4.87665447413787352D-05,    -4.87010031186735069D-05,
    -     7    -4.74755620890086638D-05,    -4.55813058138628452D-05,
    -     8    -4.33309644511266036D-05,    -4.09230193157750364D-05,
    -     9    -3.84822638603221274D-05,    -3.60857167535410501D-05,
    -     A    -3.37793306123367417D-05,    -3.15888560772109621D-05,
    -     B    -2.95269561750807315D-05,    -2.75978914828335759D-05,
    -     C    -2.58006174666883713D-05,    -2.41308356761280200D-05,
    -     D    -2.25823509518346033D-05,    -2.11479656768912971D-05,
    -     E    -1.98200638885294927D-05,    -1.85909870801065077D-05/
    -      DATA BETA(89), BETA(90), BETA(91), BETA(92), BETA(93), BETA(94),
    -     1     BETA(95), BETA(96), BETA(97), BETA(98), BETA(99), BETA(100),
    -     2     BETA(101), BETA(102), BETA(103), BETA(104), BETA(105),
    -     3     BETA(106), BETA(107), BETA(108), BETA(109), BETA(110)/
    -     4    -1.74532699844210224D-05,    -1.63997823854497997D-05,
    -     5    -4.74617796559959808D-04,    -4.77864567147321487D-04,
    -     6    -3.20390228067037603D-04,    -1.61105016119962282D-04,
    -     7    -4.25778101285435204D-05,     3.44571294294967503D-05,
    -     8     7.97092684075674924D-05,     1.03138236708272200D-04,
    -     9     1.12466775262204158D-04,     1.13103642108481389D-04,
    -     A     1.08651634848774268D-04,     1.01437951597661973D-04,
    -     B     9.29298396593363896D-05,     8.40293133016089978D-05,
    -     C     7.52727991349134062D-05,     6.69632521975730872D-05,
    -     D     5.92564547323194704D-05,     5.22169308826975567D-05,
    -     E     4.58539485165360646D-05,     4.01445513891486808D-05/
    -      DATA BETA(111), BETA(112), BETA(113), BETA(114), BETA(115),
    -     1     BETA(116), BETA(117), BETA(118), BETA(119), BETA(120),
    -     2     BETA(121), BETA(122), BETA(123), BETA(124), BETA(125),
    -     3     BETA(126), BETA(127), BETA(128), BETA(129), BETA(130)/
    -     4     3.50481730031328081D-05,     3.05157995034346659D-05,
    -     5     2.64956119950516039D-05,     2.29363633690998152D-05,
    -     6     1.97893056664021636D-05,     1.70091984636412623D-05,
    -     7     1.45547428261524004D-05,     1.23886640995878413D-05,
    -     8     1.04775876076583236D-05,     8.79179954978479373D-06,
    -     9     7.36465810572578444D-04,     8.72790805146193976D-04,
    -     A     6.22614862573135066D-04,     2.85998154194304147D-04,
    -     B     3.84737672879366102D-06,    -1.87906003636971558D-04,
    -     C    -2.97603646594554535D-04,    -3.45998126832656348D-04,
    -     D    -3.53382470916037712D-04,    -3.35715635775048757D-04/
    -      DATA BETA(131), BETA(132), BETA(133), BETA(134), BETA(135),
    -     1     BETA(136), BETA(137), BETA(138), BETA(139), BETA(140),
    -     2     BETA(141), BETA(142), BETA(143), BETA(144), BETA(145),
    -     3     BETA(146), BETA(147), BETA(148), BETA(149), BETA(150)/
    -     4    -3.04321124789039809D-04,    -2.66722723047612821D-04,
    -     5    -2.27654214122819527D-04,    -1.89922611854562356D-04,
    -     6    -1.55058918599093870D-04,    -1.23778240761873630D-04,
    -     7    -9.62926147717644187D-05,    -7.25178327714425337D-05,
    -     8    -5.22070028895633801D-05,    -3.50347750511900522D-05,
    -     9    -2.06489761035551757D-05,    -8.70106096849767054D-06,
    -     A     1.13698686675100290D-06,     9.16426474122778849D-06,
    -     B     1.56477785428872620D-05,     2.08223629482466847D-05,
    -     C     2.48923381004595156D-05,     2.80340509574146325D-05,
    -     D     3.03987774629861915D-05,     3.21156731406700616D-05/
    -      DATA BETA(151), BETA(152), BETA(153), BETA(154), BETA(155),
    -     1     BETA(156), BETA(157), BETA(158), BETA(159), BETA(160),
    -     2     BETA(161), BETA(162), BETA(163), BETA(164), BETA(165),
    -     3     BETA(166), BETA(167), BETA(168), BETA(169), BETA(170)/
    -     4    -1.80182191963885708D-03,    -2.43402962938042533D-03,
    -     5    -1.83422663549856802D-03,    -7.62204596354009765D-04,
    -     6     2.39079475256927218D-04,     9.49266117176881141D-04,
    -     7     1.34467449701540359D-03,     1.48457495259449178D-03,
    -     8     1.44732339830617591D-03,     1.30268261285657186D-03,
    -     9     1.10351597375642682D-03,     8.86047440419791759D-04,
    -     A     6.73073208165665473D-04,     4.77603872856582378D-04,
    -     B     3.05991926358789362D-04,     1.60315694594721630D-04,
    -     C     4.00749555270613286D-05,    -5.66607461635251611D-05,
    -     D    -1.32506186772982638D-04,    -1.90296187989614057D-04/
    -      DATA BETA(171), BETA(172), BETA(173), BETA(174), BETA(175),
    -     1     BETA(176), BETA(177), BETA(178), BETA(179), BETA(180),
    -     2     BETA(181), BETA(182), BETA(183), BETA(184), BETA(185),
    -     3     BETA(186), BETA(187), BETA(188), BETA(189), BETA(190)/
    -     4    -2.32811450376937408D-04,    -2.62628811464668841D-04,
    -     5    -2.82050469867598672D-04,    -2.93081563192861167D-04,
    -     6    -2.97435962176316616D-04,    -2.96557334239348078D-04,
    -     7    -2.91647363312090861D-04,    -2.83696203837734166D-04,
    -     8    -2.73512317095673346D-04,    -2.61750155806768580D-04,
    -     9     6.38585891212050914D-03,     9.62374215806377941D-03,
    -     A     7.61878061207001043D-03,     2.83219055545628054D-03,
    -     B    -2.09841352012720090D-03,    -5.73826764216626498D-03,
    -     C    -7.70804244495414620D-03,    -8.21011692264844401D-03,
    -     D    -7.65824520346905413D-03,    -6.47209729391045177D-03/
    -      DATA BETA(191), BETA(192), BETA(193), BETA(194), BETA(195),
    -     1     BETA(196), BETA(197), BETA(198), BETA(199), BETA(200),
    -     2     BETA(201), BETA(202), BETA(203), BETA(204), BETA(205),
    -     3     BETA(206), BETA(207), BETA(208), BETA(209), BETA(210)/
    -     4    -4.99132412004966473D-03,    -3.45612289713133280D-03,
    -     5    -2.01785580014170775D-03,    -7.59430686781961401D-04,
    -     6     2.84173631523859138D-04,     1.10891667586337403D-03,
    -     7     1.72901493872728771D-03,     2.16812590802684701D-03,
    -     8     2.45357710494539735D-03,     2.61281821058334862D-03,
    -     9     2.67141039656276912D-03,     2.65203073395980430D-03,
    -     A     2.57411652877287315D-03,     2.45389126236094427D-03,
    -     B     2.30460058071795494D-03,     2.13684837686712662D-03,
    -     C     1.95896528478870911D-03,     1.77737008679454412D-03,
    -     D     1.59690280765839059D-03,     1.42111975664438546D-03/
    -      DATA GAMA(1), GAMA(2), GAMA(3), GAMA(4), GAMA(5), GAMA(6),
    -     1     GAMA(7), GAMA(8), GAMA(9), GAMA(10), GAMA(11), GAMA(12),
    -     2     GAMA(13), GAMA(14), GAMA(15), GAMA(16), GAMA(17), GAMA(18),
    -     3     GAMA(19), GAMA(20), GAMA(21), GAMA(22)/
    -     4     6.29960524947436582D-01,     2.51984209978974633D-01,
    -     5     1.54790300415655846D-01,     1.10713062416159013D-01,
    -     6     8.57309395527394825D-02,     6.97161316958684292D-02,
    -     7     5.86085671893713576D-02,     5.04698873536310685D-02,
    -     8     4.42600580689154809D-02,     3.93720661543509966D-02,
    -     9     3.54283195924455368D-02,     3.21818857502098231D-02,
    -     A     2.94646240791157679D-02,     2.71581677112934479D-02,
    -     B     2.51768272973861779D-02,     2.34570755306078891D-02,
    -     C     2.19508390134907203D-02,     2.06210828235646240D-02,
    -     D     1.94388240897880846D-02,     1.83810633800683158D-02,
    -     E     1.74293213231963172D-02,     1.65685837786612353D-02/
    -      DATA GAMA(23), GAMA(24), GAMA(25), GAMA(26), GAMA(27), GAMA(28),
    -     1     GAMA(29), GAMA(30)/
    -     2     1.57865285987918445D-02,     1.50729501494095594D-02,
    -     3     1.44193250839954639D-02,     1.38184805735341786D-02,
    -     4     1.32643378994276568D-02,     1.27517121970498651D-02,
    -     5     1.22761545318762767D-02,     1.18338262398482403D-02/
    -      DATA EX1, EX2, HPI, GPI, THPI /
    -     1     3.33333333333333333D-01,     6.66666666666666667D-01,
    -     2     1.57079632679489662D+00,     3.14159265358979324D+00,
    -     3     4.71238898038468986D+00/
    -      DATA ZEROR,ZEROI,CONER,CONEI / 0.0D0, 0.0D0, 1.0D0, 0.0D0 /
    -C
    -      RFNU = 1.0D0/FNU
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST (Z/FNU TOO SMALL)
    -C-----------------------------------------------------------------------
    -      TEST = D1MACH(1)*1.0D+3
    -      AC = FNU*TEST
    -      IF (DABS(ZR).GT.AC .OR. DABS(ZI).GT.AC) GO TO 15
    -      ZETA1R = 2.0D0*DABS(DLOG(TEST))+FNU
    -      ZETA1I = 0.0D0
    -      ZETA2R = FNU
    -      ZETA2I = 0.0D0
    -      PHIR = 1.0D0
    -      PHII = 0.0D0
    -      ARGR = 1.0D0
    -      ARGI = 0.0D0
    -      RETURN
    -   15 CONTINUE
    -      ZBR = ZR*RFNU
    -      ZBI = ZI*RFNU
    -      RFNU2 = RFNU*RFNU
    -C-----------------------------------------------------------------------
    -C     COMPUTE IN THE FOURTH QUADRANT
    -C-----------------------------------------------------------------------
    -      FN13 = FNU**EX1
    -      FN23 = FN13*FN13
    -      RFN13 = 1.0D0/FN13
    -      W2R = CONER - ZBR*ZBR + ZBI*ZBI
    -      W2I = CONEI - ZBR*ZBI - ZBR*ZBI
    -      AW2 = AZABS(W2R,W2I)
    -      IF (AW2.GT.0.25D0) GO TO 130
    -C-----------------------------------------------------------------------
    -C     POWER SERIES FOR CABS(W2).LE.0.25D0
    -C-----------------------------------------------------------------------
    -      K = 1
    -      PR(1) = CONER
    -      PI(1) = CONEI
    -      SUMAR = GAMA(1)
    -      SUMAI = ZEROI
    -      AP(1) = 1.0D0
    -      IF (AW2.LT.TOL) GO TO 20
    -      DO 10 K=2,30
    -        PR(K) = PR(K-1)*W2R - PI(K-1)*W2I
    -        PI(K) = PR(K-1)*W2I + PI(K-1)*W2R
    -        SUMAR = SUMAR + PR(K)*GAMA(K)
    -        SUMAI = SUMAI + PI(K)*GAMA(K)
    -        AP(K) = AP(K-1)*AW2
    -        IF (AP(K).LT.TOL) GO TO 20
    -   10 CONTINUE
    -      K = 30
    -   20 CONTINUE
    -      KMAX = K
    -      ZETAR = W2R*SUMAR - W2I*SUMAI
    -      ZETAI = W2R*SUMAI + W2I*SUMAR
    -      ARGR = ZETAR*FN23
    -      ARGI = ZETAI*FN23
    -      CALL AZSQRT(SUMAR, SUMAI, ZAR, ZAI)
    -      CALL AZSQRT(W2R, W2I, STR, STI)
    -      ZETA2R = STR*FNU
    -      ZETA2I = STI*FNU
    -      STR = CONER + EX2*(ZETAR*ZAR-ZETAI*ZAI)
    -      STI = CONEI + EX2*(ZETAR*ZAI+ZETAI*ZAR)
    -      ZETA1R = STR*ZETA2R - STI*ZETA2I
    -      ZETA1I = STR*ZETA2I + STI*ZETA2R
    -      ZAR = ZAR + ZAR
    -      ZAI = ZAI + ZAI
    -      CALL AZSQRT(ZAR, ZAI, STR, STI)
    -      PHIR = STR*RFN13
    -      PHII = STI*RFN13
    -      IF (IPMTR.EQ.1) GO TO 120
    -C-----------------------------------------------------------------------
    -C     SUM SERIES FOR ASUM AND BSUM
    -C-----------------------------------------------------------------------
    -      SUMBR = ZEROR
    -      SUMBI = ZEROI
    -      DO 30 K=1,KMAX
    -        SUMBR = SUMBR + PR(K)*BETA(K)
    -        SUMBI = SUMBI + PI(K)*BETA(K)
    -   30 CONTINUE
    -      ASUMR = ZEROR
    -      ASUMI = ZEROI
    -      BSUMR = SUMBR
    -      BSUMI = SUMBI
    -      L1 = 0
    -      L2 = 30
    -      BTOL = TOL*(DABS(BSUMR)+DABS(BSUMI))
    -      ATOL = TOL
    -      PP = 1.0D0
    -      IAS = 0
    -      IBS = 0
    -      IF (RFNU2.LT.TOL) GO TO 110
    -      DO 100 IS=2,7
    -        ATOL = ATOL/RFNU2
    -        PP = PP*RFNU2
    -        IF (IAS.EQ.1) GO TO 60
    -        SUMAR = ZEROR
    -        SUMAI = ZEROI
    -        DO 40 K=1,KMAX
    -          M = L1 + K
    -          SUMAR = SUMAR + PR(K)*ALFA(M)
    -          SUMAI = SUMAI + PI(K)*ALFA(M)
    -          IF (AP(K).LT.ATOL) GO TO 50
    -   40   CONTINUE
    -   50   CONTINUE
    -        ASUMR = ASUMR + SUMAR*PP
    -        ASUMI = ASUMI + SUMAI*PP
    -        IF (PP.LT.TOL) IAS = 1
    -   60   CONTINUE
    -        IF (IBS.EQ.1) GO TO 90
    -        SUMBR = ZEROR
    -        SUMBI = ZEROI
    -        DO 70 K=1,KMAX
    -          M = L2 + K
    -          SUMBR = SUMBR + PR(K)*BETA(M)
    -          SUMBI = SUMBI + PI(K)*BETA(M)
    -          IF (AP(K).LT.ATOL) GO TO 80
    -   70   CONTINUE
    -   80   CONTINUE
    -        BSUMR = BSUMR + SUMBR*PP
    -        BSUMI = BSUMI + SUMBI*PP
    -        IF (PP.LT.BTOL) IBS = 1
    -   90   CONTINUE
    -        IF (IAS.EQ.1 .AND. IBS.EQ.1) GO TO 110
    -        L1 = L1 + 30
    -        L2 = L2 + 30
    -  100 CONTINUE
    -  110 CONTINUE
    -      ASUMR = ASUMR + CONER
    -      PP = RFNU*RFN13
    -      BSUMR = BSUMR*PP
    -      BSUMI = BSUMI*PP
    -  120 CONTINUE
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     CABS(W2).GT.0.25D0
    -C-----------------------------------------------------------------------
    -  130 CONTINUE
    -      CALL AZSQRT(W2R, W2I, WR, WI)
    -      IF (WR.LT.0.0D0) WR = 0.0D0
    -      IF (WI.LT.0.0D0) WI = 0.0D0
    -      STR = CONER + WR
    -      STI = WI
    -      CALL ZDIV(STR, STI, ZBR, ZBI, ZAR, ZAI)
    -      CALL AZLOG(ZAR, ZAI, ZCR, ZCI, IDUM)
    -      IF (ZCI.LT.0.0D0) ZCI = 0.0D0
    -      IF (ZCI.GT.HPI) ZCI = HPI
    -      IF (ZCR.LT.0.0D0) ZCR = 0.0D0
    -      ZTHR = (ZCR-WR)*1.5D0
    -      ZTHI = (ZCI-WI)*1.5D0
    -      ZETA1R = ZCR*FNU
    -      ZETA1I = ZCI*FNU
    -      ZETA2R = WR*FNU
    -      ZETA2I = WI*FNU
    -      AZTH = AZABS(ZTHR,ZTHI)
    -      ANG = THPI
    -      IF (ZTHR.GE.0.0D0 .AND. ZTHI.LT.0.0D0) GO TO 140
    -      ANG = HPI
    -      IF (ZTHR.EQ.0.0D0) GO TO 140
    -      ANG = DATAN(ZTHI/ZTHR)
    -      IF (ZTHR.LT.0.0D0) ANG = ANG + GPI
    -  140 CONTINUE
    -      PP = AZTH**EX2
    -      ANG = ANG*EX2
    -      ZETAR = PP*DCOS(ANG)
    -      ZETAI = PP*DSIN(ANG)
    -      IF (ZETAI.LT.0.0D0) ZETAI = 0.0D0
    -      ARGR = ZETAR*FN23
    -      ARGI = ZETAI*FN23
    -      CALL ZDIV(ZTHR, ZTHI, ZETAR, ZETAI, RTZTR, RTZTI)
    -      CALL ZDIV(RTZTR, RTZTI, WR, WI, ZAR, ZAI)
    -      TZAR = ZAR + ZAR
    -      TZAI = ZAI + ZAI
    -      CALL AZSQRT(TZAR, TZAI, STR, STI)
    -      PHIR = STR*RFN13
    -      PHII = STI*RFN13
    -      IF (IPMTR.EQ.1) GO TO 120
    -      RAW = 1.0D0/DSQRT(AW2)
    -      STR = WR*RAW
    -      STI = -WI*RAW
    -      TFNR = STR*RFNU*RAW
    -      TFNI = STI*RFNU*RAW
    -      RAZTH = 1.0D0/AZTH
    -      STR = ZTHR*RAZTH
    -      STI = -ZTHI*RAZTH
    -      RZTHR = STR*RAZTH*RFNU
    -      RZTHI = STI*RAZTH*RFNU
    -      ZCR = RZTHR*AR(2)
    -      ZCI = RZTHI*AR(2)
    -      RAW2 = 1.0D0/AW2
    -      STR = W2R*RAW2
    -      STI = -W2I*RAW2
    -      T2R = STR*RAW2
    -      T2I = STI*RAW2
    -      STR = T2R*C(2) + C(3)
    -      STI = T2I*C(2)
    -      UPR(2) = STR*TFNR - STI*TFNI
    -      UPI(2) = STR*TFNI + STI*TFNR
    -      BSUMR = UPR(2) + ZCR
    -      BSUMI = UPI(2) + ZCI
    -      ASUMR = ZEROR
    -      ASUMI = ZEROI
    -      IF (RFNU.LT.TOL) GO TO 220
    -      PRZTHR = RZTHR
    -      PRZTHI = RZTHI
    -      PTFNR = TFNR
    -      PTFNI = TFNI
    -      UPR(1) = CONER
    -      UPI(1) = CONEI
    -      PP = 1.0D0
    -      BTOL = TOL*(DABS(BSUMR)+DABS(BSUMI))
    -      KS = 0
    -      KP1 = 2
    -      L = 3
    -      IAS = 0
    -      IBS = 0
    -      DO 210 LR=2,12,2
    -        LRP1 = LR + 1
    -C-----------------------------------------------------------------------
    -C     COMPUTE TWO ADDITIONAL CR, DR, AND UP FOR TWO MORE TERMS IN
    -C     NEXT SUMA AND SUMB
    -C-----------------------------------------------------------------------
    -        DO 160 K=LR,LRP1
    -          KS = KS + 1
    -          KP1 = KP1 + 1
    -          L = L + 1
    -          ZAR = C(L)
    -          ZAI = ZEROI
    -          DO 150 J=2,KP1
    -            L = L + 1
    -            STR = ZAR*T2R - T2I*ZAI + C(L)
    -            ZAI = ZAR*T2I + ZAI*T2R
    -            ZAR = STR
    -  150     CONTINUE
    -          STR = PTFNR*TFNR - PTFNI*TFNI
    -          PTFNI = PTFNR*TFNI + PTFNI*TFNR
    -          PTFNR = STR
    -          UPR(KP1) = PTFNR*ZAR - PTFNI*ZAI
    -          UPI(KP1) = PTFNI*ZAR + PTFNR*ZAI
    -          CRR(KS) = PRZTHR*BR(KS+1)
    -          CRI(KS) = PRZTHI*BR(KS+1)
    -          STR = PRZTHR*RZTHR - PRZTHI*RZTHI
    -          PRZTHI = PRZTHR*RZTHI + PRZTHI*RZTHR
    -          PRZTHR = STR
    -          DRR(KS) = PRZTHR*AR(KS+2)
    -          DRI(KS) = PRZTHI*AR(KS+2)
    -  160   CONTINUE
    -        PP = PP*RFNU2
    -        IF (IAS.EQ.1) GO TO 180
    -        SUMAR = UPR(LRP1)
    -        SUMAI = UPI(LRP1)
    -        JU = LRP1
    -        DO 170 JR=1,LR
    -          JU = JU - 1
    -          SUMAR = SUMAR + CRR(JR)*UPR(JU) - CRI(JR)*UPI(JU)
    -          SUMAI = SUMAI + CRR(JR)*UPI(JU) + CRI(JR)*UPR(JU)
    -  170   CONTINUE
    -        ASUMR = ASUMR + SUMAR
    -        ASUMI = ASUMI + SUMAI
    -        TEST = DABS(SUMAR) + DABS(SUMAI)
    -        IF (PP.LT.TOL .AND. TEST.LT.TOL) IAS = 1
    -  180   CONTINUE
    -        IF (IBS.EQ.1) GO TO 200
    -        SUMBR = UPR(LR+2) + UPR(LRP1)*ZCR - UPI(LRP1)*ZCI
    -        SUMBI = UPI(LR+2) + UPR(LRP1)*ZCI + UPI(LRP1)*ZCR
    -        JU = LRP1
    -        DO 190 JR=1,LR
    -          JU = JU - 1
    -          SUMBR = SUMBR + DRR(JR)*UPR(JU) - DRI(JR)*UPI(JU)
    -          SUMBI = SUMBI + DRR(JR)*UPI(JU) + DRI(JR)*UPR(JU)
    -  190   CONTINUE
    -        BSUMR = BSUMR + SUMBR
    -        BSUMI = BSUMI + SUMBI
    -        TEST = DABS(SUMBR) + DABS(SUMBI)
    -        IF (PP.LT.BTOL .AND. TEST.LT.BTOL) IBS = 1
    -  200   CONTINUE
    -        IF (IAS.EQ.1 .AND. IBS.EQ.1) GO TO 220
    -  210 CONTINUE
    -  220 CONTINUE
    -      ASUMR = ASUMR + CONER
    -      STR = -BSUMR*RFN13
    -      STI = -BSUMI*RFN13
    -      CALL ZDIV(STR, STI, RTZTR, RTZTI, BSUMR, BSUMI)
    -      GO TO 120
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zuni1.f b/scipy-0.10.1/scipy/special/amos/zuni1.f
    deleted file mode 100644
    index c7173b3019..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zuni1.f
    +++ /dev/null
    @@ -1,204 +0,0 @@
    -      SUBROUTINE ZUNI1(ZR, ZI, FNU, KODE, N, YR, YI, NZ, NLAST, FNUL,
    -     * TOL, ELIM, ALIM)
    -C***BEGIN PROLOGUE  ZUNI1
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C     ZUNI1 COMPUTES I(FNU,Z)  BY MEANS OF THE UNIFORM ASYMPTOTIC
    -C     EXPANSION FOR I(FNU,Z) IN -PI/3.LE.ARG Z.LE.PI/3.
    -C
    -C     FNUL IS THE SMALLEST ORDER PERMITTED FOR THE ASYMPTOTIC
    -C     EXPANSION. NLAST=0 MEANS ALL OF THE Y VALUES WERE SET.
    -C     NLAST.NE.0 IS THE NUMBER LEFT TO BE COMPUTED BY ANOTHER
    -C     FORMULA FOR ORDERS FNU TO FNU+NLAST-1 BECAUSE FNU+NLAST-1.LT.FNUL.
    -C     Y(I)=CZERO FOR I=NLAST+1,N
    -C
    -C***ROUTINES CALLED  ZUCHK,ZUNIK,ZUOIK,D1MACH,AZABS
    -C***END PROLOGUE  ZUNI1
    -C     COMPLEX CFN,CONE,CRSC,CSCL,CSR,CSS,CWRK,CZERO,C1,C2,PHI,RZ,SUM,S1,
    -C    *S2,Y,Z,ZETA1,ZETA2
    -      DOUBLE PRECISION ALIM, APHI, ASCLE, BRY, CONER, CRSC,
    -     * CSCL, CSRR, CSSR, CWRKI, CWRKR, C1R, C2I, C2M, C2R, ELIM, FN,
    -     * FNU, FNUL, PHII, PHIR, RAST, RS1, RZI, RZR, STI, STR, SUMI,
    -     * SUMR, S1I, S1R, S2I, S2R, TOL, YI, YR, ZEROI, ZEROR, ZETA1I,
    -     * ZETA1R, ZETA2I, ZETA2R, ZI, ZR, CYR, CYI, D1MACH, AZABS
    -      INTEGER I, IFLAG, INIT, K, KODE, M, N, ND, NLAST, NN, NUF, NW, NZ
    -      DIMENSION BRY(3), YR(N), YI(N), CWRKR(16), CWRKI(16), CSSR(3),
    -     * CSRR(3), CYR(2), CYI(2)
    -      DATA ZEROR,ZEROI,CONER / 0.0D0, 0.0D0, 1.0D0 /
    -C
    -      NZ = 0
    -      ND = N
    -      NLAST = 0
    -C-----------------------------------------------------------------------
    -C     COMPUTED VALUES WITH EXPONENTS BETWEEN ALIM AND ELIM IN MAG-
    -C     NITUDE ARE SCALED TO KEEP INTERMEDIATE ARITHMETIC ON SCALE,
    -C     EXP(ALIM)=EXP(ELIM)*TOL
    -C-----------------------------------------------------------------------
    -      CSCL = 1.0D0/TOL
    -      CRSC = TOL
    -      CSSR(1) = CSCL
    -      CSSR(2) = CONER
    -      CSSR(3) = CRSC
    -      CSRR(1) = CRSC
    -      CSRR(2) = CONER
    -      CSRR(3) = CSCL
    -      BRY(1) = 1.0D+3*D1MACH(1)/TOL
    -C-----------------------------------------------------------------------
    -C     CHECK FOR UNDERFLOW AND OVERFLOW ON FIRST MEMBER
    -C-----------------------------------------------------------------------
    -      FN = DMAX1(FNU,1.0D0)
    -      INIT = 0
    -      CALL ZUNIK(ZR, ZI, FN, 1, 1, TOL, INIT, PHIR, PHII, ZETA1R,
    -     * ZETA1I, ZETA2R, ZETA2I, SUMR, SUMI, CWRKR, CWRKI)
    -      IF (KODE.EQ.1) GO TO 10
    -      STR = ZR + ZETA2R
    -      STI = ZI + ZETA2I
    -      RAST = FN/AZABS(STR,STI)
    -      STR = STR*RAST*RAST
    -      STI = -STI*RAST*RAST
    -      S1R = -ZETA1R + STR
    -      S1I = -ZETA1I + STI
    -      GO TO 20
    -   10 CONTINUE
    -      S1R = -ZETA1R + ZETA2R
    -      S1I = -ZETA1I + ZETA2I
    -   20 CONTINUE
    -      RS1 = S1R
    -      IF (DABS(RS1).GT.ELIM) GO TO 130
    -   30 CONTINUE
    -      NN = MIN0(2,ND)
    -      DO 80 I=1,NN
    -        FN = FNU + DBLE(FLOAT(ND-I))
    -        INIT = 0
    -        CALL ZUNIK(ZR, ZI, FN, 1, 0, TOL, INIT, PHIR, PHII, ZETA1R,
    -     *   ZETA1I, ZETA2R, ZETA2I, SUMR, SUMI, CWRKR, CWRKI)
    -        IF (KODE.EQ.1) GO TO 40
    -        STR = ZR + ZETA2R
    -        STI = ZI + ZETA2I
    -        RAST = FN/AZABS(STR,STI)
    -        STR = STR*RAST*RAST
    -        STI = -STI*RAST*RAST
    -        S1R = -ZETA1R + STR
    -        S1I = -ZETA1I + STI + ZI
    -        GO TO 50
    -   40   CONTINUE
    -        S1R = -ZETA1R + ZETA2R
    -        S1I = -ZETA1I + ZETA2I
    -   50   CONTINUE
    -C-----------------------------------------------------------------------
    -C     TEST FOR UNDERFLOW AND OVERFLOW
    -C-----------------------------------------------------------------------
    -        RS1 = S1R
    -        IF (DABS(RS1).GT.ELIM) GO TO 110
    -        IF (I.EQ.1) IFLAG = 2
    -        IF (DABS(RS1).LT.ALIM) GO TO 60
    -C-----------------------------------------------------------------------
    -C     REFINE  TEST AND SCALE
    -C-----------------------------------------------------------------------
    -        APHI = AZABS(PHIR,PHII)
    -        RS1 = RS1 + DLOG(APHI)
    -        IF (DABS(RS1).GT.ELIM) GO TO 110
    -        IF (I.EQ.1) IFLAG = 1
    -        IF (RS1.LT.0.0D0) GO TO 60
    -        IF (I.EQ.1) IFLAG = 3
    -   60   CONTINUE
    -C-----------------------------------------------------------------------
    -C     SCALE S1 IF CABS(S1).LT.ASCLE
    -C-----------------------------------------------------------------------
    -        S2R = PHIR*SUMR - PHII*SUMI
    -        S2I = PHIR*SUMI + PHII*SUMR
    -        STR = DEXP(S1R)*CSSR(IFLAG)
    -        S1R = STR*DCOS(S1I)
    -        S1I = STR*DSIN(S1I)
    -        STR = S2R*S1R - S2I*S1I
    -        S2I = S2R*S1I + S2I*S1R
    -        S2R = STR
    -        IF (IFLAG.NE.1) GO TO 70
    -        CALL ZUCHK(S2R, S2I, NW, BRY(1), TOL)
    -        IF (NW.NE.0) GO TO 110
    -   70   CONTINUE
    -        CYR(I) = S2R
    -        CYI(I) = S2I
    -        M = ND - I + 1
    -        YR(M) = S2R*CSRR(IFLAG)
    -        YI(M) = S2I*CSRR(IFLAG)
    -   80 CONTINUE
    -      IF (ND.LE.2) GO TO 100
    -      RAST = 1.0D0/AZABS(ZR,ZI)
    -      STR = ZR*RAST
    -      STI = -ZI*RAST
    -      RZR = (STR+STR)*RAST
    -      RZI = (STI+STI)*RAST
    -      BRY(2) = 1.0D0/BRY(1)
    -      BRY(3) = D1MACH(2)
    -      S1R = CYR(1)
    -      S1I = CYI(1)
    -      S2R = CYR(2)
    -      S2I = CYI(2)
    -      C1R = CSRR(IFLAG)
    -      ASCLE = BRY(IFLAG)
    -      K = ND - 2
    -      FN = DBLE(FLOAT(K))
    -      DO 90 I=3,ND
    -        C2R = S2R
    -        C2I = S2I
    -        S2R = S1R + (FNU+FN)*(RZR*C2R-RZI*C2I)
    -        S2I = S1I + (FNU+FN)*(RZR*C2I+RZI*C2R)
    -        S1R = C2R
    -        S1I = C2I
    -        C2R = S2R*C1R
    -        C2I = S2I*C1R
    -        YR(K) = C2R
    -        YI(K) = C2I
    -        K = K - 1
    -        FN = FN - 1.0D0
    -        IF (IFLAG.GE.3) GO TO 90
    -        STR = DABS(C2R)
    -        STI = DABS(C2I)
    -        C2M = DMAX1(STR,STI)
    -        IF (C2M.LE.ASCLE) GO TO 90
    -        IFLAG = IFLAG + 1
    -        ASCLE = BRY(IFLAG)
    -        S1R = S1R*C1R
    -        S1I = S1I*C1R
    -        S2R = C2R
    -        S2I = C2I
    -        S1R = S1R*CSSR(IFLAG)
    -        S1I = S1I*CSSR(IFLAG)
    -        S2R = S2R*CSSR(IFLAG)
    -        S2I = S2I*CSSR(IFLAG)
    -        C1R = CSRR(IFLAG)
    -   90 CONTINUE
    -  100 CONTINUE
    -      RETURN
    -C-----------------------------------------------------------------------
    -C     SET UNDERFLOW AND UPDATE PARAMETERS
    -C-----------------------------------------------------------------------
    -  110 CONTINUE
    -      IF (RS1.GT.0.0D0) GO TO 120
    -      YR(ND) = ZEROR
    -      YI(ND) = ZEROI
    -      NZ = NZ + 1
    -      ND = ND - 1
    -      IF (ND.EQ.0) GO TO 100
    -      CALL ZUOIK(ZR, ZI, FNU, KODE, 1, ND, YR, YI, NUF, TOL, ELIM, ALIM)
    -      IF (NUF.LT.0) GO TO 120
    -      ND = ND - NUF
    -      NZ = NZ + NUF
    -      IF (ND.EQ.0) GO TO 100
    -      FN = FNU + DBLE(FLOAT(ND-1))
    -      IF (FN.GE.FNUL) GO TO 30
    -      NLAST = ND
    -      RETURN
    -  120 CONTINUE
    -      NZ = -1
    -      RETURN
    -  130 CONTINUE
    -      IF (RS1.GT.0.0D0) GO TO 120
    -      NZ = N
    -      DO 140 I=1,N
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -  140 CONTINUE
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zuni2.f b/scipy-0.10.1/scipy/special/amos/zuni2.f
    deleted file mode 100644
    index 49061cb9aa..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zuni2.f
    +++ /dev/null
    @@ -1,267 +0,0 @@
    -      SUBROUTINE ZUNI2(ZR, ZI, FNU, KODE, N, YR, YI, NZ, NLAST, FNUL,
    -     * TOL, ELIM, ALIM)
    -C***BEGIN PROLOGUE  ZUNI2
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C     ZUNI2 COMPUTES I(FNU,Z) IN THE RIGHT HALF PLANE BY MEANS OF
    -C     UNIFORM ASYMPTOTIC EXPANSION FOR J(FNU,ZN) WHERE ZN IS Z*I
    -C     OR -Z*I AND ZN IS IN THE RIGHT HALF PLANE ALSO.
    -C
    -C     FNUL IS THE SMALLEST ORDER PERMITTED FOR THE ASYMPTOTIC
    -C     EXPANSION. NLAST=0 MEANS ALL OF THE Y VALUES WERE SET.
    -C     NLAST.NE.0 IS THE NUMBER LEFT TO BE COMPUTED BY ANOTHER
    -C     FORMULA FOR ORDERS FNU TO FNU+NLAST-1 BECAUSE FNU+NLAST-1.LT.FNUL.
    -C     Y(I)=CZERO FOR I=NLAST+1,N
    -C
    -C***ROUTINES CALLED  ZAIRY,ZUCHK,ZUNHJ,ZUOIK,D1MACH,AZABS
    -C***END PROLOGUE  ZUNI2
    -C     COMPLEX AI,ARG,ASUM,BSUM,CFN,CI,CID,CIP,CONE,CRSC,CSCL,CSR,CSS,
    -C    *CZERO,C1,C2,DAI,PHI,RZ,S1,S2,Y,Z,ZB,ZETA1,ZETA2,ZN
    -      DOUBLE PRECISION AARG, AIC, AII, AIR, ALIM, ANG, APHI, ARGI,
    -     * ARGR, ASCLE, ASUMI, ASUMR, BRY, BSUMI, BSUMR, CIDI, CIPI, CIPR,
    -     * CONER, CRSC, CSCL, CSRR, CSSR, C1R, C2I, C2M, C2R, DAII,
    -     * DAIR, ELIM, FN, FNU, FNUL, HPI, PHII, PHIR, RAST, RAZ, RS1, RZI,
    -     * RZR, STI, STR, S1I, S1R, S2I, S2R, TOL, YI, YR, ZBI, ZBR, ZEROI,
    -     * ZEROR, ZETA1I, ZETA1R, ZETA2I, ZETA2R, ZI, ZNI, ZNR, ZR, CYR,
    -     * CYI, D1MACH, AZABS, CAR, SAR
    -      INTEGER I, IFLAG, IN, INU, J, K, KODE, N, NAI, ND, NDAI, NLAST,
    -     * NN, NUF, NW, NZ, IDUM
    -      DIMENSION BRY(3), YR(N), YI(N), CIPR(4), CIPI(4), CSSR(3),
    -     * CSRR(3), CYR(2), CYI(2)
    -      DATA ZEROR,ZEROI,CONER / 0.0D0, 0.0D0, 1.0D0 /
    -      DATA CIPR(1),CIPI(1),CIPR(2),CIPI(2),CIPR(3),CIPI(3),CIPR(4),
    -     * CIPI(4)/ 1.0D0,0.0D0, 0.0D0,1.0D0, -1.0D0,0.0D0, 0.0D0,-1.0D0/
    -      DATA HPI, AIC  /
    -     1      1.57079632679489662D+00,     1.265512123484645396D+00/
    -C
    -      NZ = 0
    -      ND = N
    -      NLAST = 0
    -C-----------------------------------------------------------------------
    -C     COMPUTED VALUES WITH EXPONENTS BETWEEN ALIM AND ELIM IN MAG-
    -C     NITUDE ARE SCALED TO KEEP INTERMEDIATE ARITHMETIC ON SCALE,
    -C     EXP(ALIM)=EXP(ELIM)*TOL
    -C-----------------------------------------------------------------------
    -      CSCL = 1.0D0/TOL
    -      CRSC = TOL
    -      CSSR(1) = CSCL
    -      CSSR(2) = CONER
    -      CSSR(3) = CRSC
    -      CSRR(1) = CRSC
    -      CSRR(2) = CONER
    -      CSRR(3) = CSCL
    -      BRY(1) = 1.0D+3*D1MACH(1)/TOL
    -C-----------------------------------------------------------------------
    -C     ZN IS IN THE RIGHT HALF PLANE AFTER ROTATION BY CI OR -CI
    -C-----------------------------------------------------------------------
    -      ZNR = ZI
    -      ZNI = -ZR
    -      ZBR = ZR
    -      ZBI = ZI
    -      CIDI = -CONER
    -      INU = INT(SNGL(FNU))
    -      ANG = HPI*(FNU-DBLE(FLOAT(INU)))
    -      C2R = DCOS(ANG)
    -      C2I = DSIN(ANG)
    -      CAR = C2R
    -      SAR = C2I
    -      IN = INU + N - 1
    -      IN = MOD(IN,4) + 1
    -      STR = C2R*CIPR(IN) - C2I*CIPI(IN)
    -      C2I = C2R*CIPI(IN) + C2I*CIPR(IN)
    -      C2R = STR
    -      IF (ZI.GT.0.0D0) GO TO 10
    -      ZNR = -ZNR
    -      ZBI = -ZBI
    -      CIDI = -CIDI
    -      C2I = -C2I
    -   10 CONTINUE
    -C-----------------------------------------------------------------------
    -C     CHECK FOR UNDERFLOW AND OVERFLOW ON FIRST MEMBER
    -C-----------------------------------------------------------------------
    -      FN = DMAX1(FNU,1.0D0)
    -      CALL ZUNHJ(ZNR, ZNI, FN, 1, TOL, PHIR, PHII, ARGR, ARGI, ZETA1R,
    -     * ZETA1I, ZETA2R, ZETA2I, ASUMR, ASUMI, BSUMR, BSUMI)
    -      IF (KODE.EQ.1) GO TO 20
    -      STR = ZBR + ZETA2R
    -      STI = ZBI + ZETA2I
    -      RAST = FN/AZABS(STR,STI)
    -      STR = STR*RAST*RAST
    -      STI = -STI*RAST*RAST
    -      S1R = -ZETA1R + STR
    -      S1I = -ZETA1I + STI
    -      GO TO 30
    -   20 CONTINUE
    -      S1R = -ZETA1R + ZETA2R
    -      S1I = -ZETA1I + ZETA2I
    -   30 CONTINUE
    -      RS1 = S1R
    -      IF (DABS(RS1).GT.ELIM) GO TO 150
    -   40 CONTINUE
    -      NN = MIN0(2,ND)
    -      DO 90 I=1,NN
    -        FN = FNU + DBLE(FLOAT(ND-I))
    -        CALL ZUNHJ(ZNR, ZNI, FN, 0, TOL, PHIR, PHII, ARGR, ARGI,
    -     *   ZETA1R, ZETA1I, ZETA2R, ZETA2I, ASUMR, ASUMI, BSUMR, BSUMI)
    -        IF (KODE.EQ.1) GO TO 50
    -        STR = ZBR + ZETA2R
    -        STI = ZBI + ZETA2I
    -        RAST = FN/AZABS(STR,STI)
    -        STR = STR*RAST*RAST
    -        STI = -STI*RAST*RAST
    -        S1R = -ZETA1R + STR
    -        S1I = -ZETA1I + STI + DABS(ZI)
    -        GO TO 60
    -   50   CONTINUE
    -        S1R = -ZETA1R + ZETA2R
    -        S1I = -ZETA1I + ZETA2I
    -   60   CONTINUE
    -C-----------------------------------------------------------------------
    -C     TEST FOR UNDERFLOW AND OVERFLOW
    -C-----------------------------------------------------------------------
    -        RS1 = S1R
    -        IF (DABS(RS1).GT.ELIM) GO TO 120
    -        IF (I.EQ.1) IFLAG = 2
    -        IF (DABS(RS1).LT.ALIM) GO TO 70
    -C-----------------------------------------------------------------------
    -C     REFINE  TEST AND SCALE
    -C-----------------------------------------------------------------------
    -C-----------------------------------------------------------------------
    -        APHI = AZABS(PHIR,PHII)
    -        AARG = AZABS(ARGR,ARGI)
    -        RS1 = RS1 + DLOG(APHI) - 0.25D0*DLOG(AARG) - AIC
    -        IF (DABS(RS1).GT.ELIM) GO TO 120
    -        IF (I.EQ.1) IFLAG = 1
    -        IF (RS1.LT.0.0D0) GO TO 70
    -        IF (I.EQ.1) IFLAG = 3
    -   70   CONTINUE
    -C-----------------------------------------------------------------------
    -C     SCALE S1 TO KEEP INTERMEDIATE ARITHMETIC ON SCALE NEAR
    -C     EXPONENT EXTREMES
    -C-----------------------------------------------------------------------
    -        CALL ZAIRY(ARGR, ARGI, 0, 2, AIR, AII, NAI, IDUM)
    -        CALL ZAIRY(ARGR, ARGI, 1, 2, DAIR, DAII, NDAI, IDUM)
    -        STR = DAIR*BSUMR - DAII*BSUMI
    -        STI = DAIR*BSUMI + DAII*BSUMR
    -        STR = STR + (AIR*ASUMR-AII*ASUMI)
    -        STI = STI + (AIR*ASUMI+AII*ASUMR)
    -        S2R = PHIR*STR - PHII*STI
    -        S2I = PHIR*STI + PHII*STR
    -        STR = DEXP(S1R)*CSSR(IFLAG)
    -        S1R = STR*DCOS(S1I)
    -        S1I = STR*DSIN(S1I)
    -        STR = S2R*S1R - S2I*S1I
    -        S2I = S2R*S1I + S2I*S1R
    -        S2R = STR
    -        IF (IFLAG.NE.1) GO TO 80
    -        CALL ZUCHK(S2R, S2I, NW, BRY(1), TOL)
    -        IF (NW.NE.0) GO TO 120
    -   80   CONTINUE
    -        IF (ZI.LE.0.0D0) S2I = -S2I
    -        STR = S2R*C2R - S2I*C2I
    -        S2I = S2R*C2I + S2I*C2R
    -        S2R = STR
    -        CYR(I) = S2R
    -        CYI(I) = S2I
    -        J = ND - I + 1
    -        YR(J) = S2R*CSRR(IFLAG)
    -        YI(J) = S2I*CSRR(IFLAG)
    -        STR = -C2I*CIDI
    -        C2I = C2R*CIDI
    -        C2R = STR
    -   90 CONTINUE
    -      IF (ND.LE.2) GO TO 110
    -      RAZ = 1.0D0/AZABS(ZR,ZI)
    -      STR = ZR*RAZ
    -      STI = -ZI*RAZ
    -      RZR = (STR+STR)*RAZ
    -      RZI = (STI+STI)*RAZ
    -      BRY(2) = 1.0D0/BRY(1)
    -      BRY(3) = D1MACH(2)
    -      S1R = CYR(1)
    -      S1I = CYI(1)
    -      S2R = CYR(2)
    -      S2I = CYI(2)
    -      C1R = CSRR(IFLAG)
    -      ASCLE = BRY(IFLAG)
    -      K = ND - 2
    -      FN = DBLE(FLOAT(K))
    -      DO 100 I=3,ND
    -        C2R = S2R
    -        C2I = S2I
    -        S2R = S1R + (FNU+FN)*(RZR*C2R-RZI*C2I)
    -        S2I = S1I + (FNU+FN)*(RZR*C2I+RZI*C2R)
    -        S1R = C2R
    -        S1I = C2I
    -        C2R = S2R*C1R
    -        C2I = S2I*C1R
    -        YR(K) = C2R
    -        YI(K) = C2I
    -        K = K - 1
    -        FN = FN - 1.0D0
    -        IF (IFLAG.GE.3) GO TO 100
    -        STR = DABS(C2R)
    -        STI = DABS(C2I)
    -        C2M = DMAX1(STR,STI)
    -        IF (C2M.LE.ASCLE) GO TO 100
    -        IFLAG = IFLAG + 1
    -        ASCLE = BRY(IFLAG)
    -        S1R = S1R*C1R
    -        S1I = S1I*C1R
    -        S2R = C2R
    -        S2I = C2I
    -        S1R = S1R*CSSR(IFLAG)
    -        S1I = S1I*CSSR(IFLAG)
    -        S2R = S2R*CSSR(IFLAG)
    -        S2I = S2I*CSSR(IFLAG)
    -        C1R = CSRR(IFLAG)
    -  100 CONTINUE
    -  110 CONTINUE
    -      RETURN
    -  120 CONTINUE
    -      IF (RS1.GT.0.0D0) GO TO 140
    -C-----------------------------------------------------------------------
    -C     SET UNDERFLOW AND UPDATE PARAMETERS
    -C-----------------------------------------------------------------------
    -      YR(ND) = ZEROR
    -      YI(ND) = ZEROI
    -      NZ = NZ + 1
    -      ND = ND - 1
    -      IF (ND.EQ.0) GO TO 110
    -      CALL ZUOIK(ZR, ZI, FNU, KODE, 1, ND, YR, YI, NUF, TOL, ELIM, ALIM)
    -      IF (NUF.LT.0) GO TO 140
    -      ND = ND - NUF
    -      NZ = NZ + NUF
    -      IF (ND.EQ.0) GO TO 110
    -      FN = FNU + DBLE(FLOAT(ND-1))
    -      IF (FN.LT.FNUL) GO TO 130
    -C      FN = CIDI
    -C      J = NUF + 1
    -C      K = MOD(J,4) + 1
    -C      S1R = CIPR(K)
    -C      S1I = CIPI(K)
    -C      IF (FN.LT.0.0D0) S1I = -S1I
    -C      STR = C2R*S1R - C2I*S1I
    -C      C2I = C2R*S1I + C2I*S1R
    -C      C2R = STR
    -      IN = INU + ND - 1
    -      IN = MOD(IN,4) + 1
    -      C2R = CAR*CIPR(IN) - SAR*CIPI(IN)
    -      C2I = CAR*CIPI(IN) + SAR*CIPR(IN)
    -      IF (ZI.LE.0.0D0) C2I = -C2I
    -      GO TO 40
    -  130 CONTINUE
    -      NLAST = ND
    -      RETURN
    -  140 CONTINUE
    -      NZ = -1
    -      RETURN
    -  150 CONTINUE
    -      IF (RS1.GT.0.0D0) GO TO 140
    -      NZ = N
    -      DO 160 I=1,N
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -  160 CONTINUE
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zunik.f b/scipy-0.10.1/scipy/special/amos/zunik.f
    deleted file mode 100644
    index 7f297c3dbe..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zunik.f
    +++ /dev/null
    @@ -1,211 +0,0 @@
    -      SUBROUTINE ZUNIK(ZRR, ZRI, FNU, IKFLG, IPMTR, TOL, INIT, PHIR,
    -     * PHII, ZETA1R, ZETA1I, ZETA2R, ZETA2I, SUMR, SUMI, CWRKR, CWRKI)
    -C***BEGIN PROLOGUE  ZUNIK
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C        ZUNIK COMPUTES PARAMETERS FOR THE UNIFORM ASYMPTOTIC
    -C        EXPANSIONS OF THE I AND K FUNCTIONS ON IKFLG= 1 OR 2
    -C        RESPECTIVELY BY
    -C
    -C        W(FNU,ZR) = PHI*EXP(ZETA)*SUM
    -C
    -C        WHERE       ZETA=-ZETA1 + ZETA2       OR
    -C                          ZETA1 - ZETA2
    -C
    -C        THE FIRST CALL MUST HAVE INIT=0. SUBSEQUENT CALLS WITH THE
    -C        SAME ZR AND FNU WILL RETURN THE I OR K FUNCTION ON IKFLG=
    -C        1 OR 2 WITH NO CHANGE IN INIT. CWRK IS A COMPLEX WORK
    -C        ARRAY. IPMTR=0 COMPUTES ALL PARAMETERS. IPMTR=1 COMPUTES PHI,
    -C        ZETA1,ZETA2.
    -C
    -C***ROUTINES CALLED  ZDIV,AZLOG,AZSQRT,D1MACH
    -C***END PROLOGUE  ZUNIK
    -C     COMPLEX CFN,CON,CONE,CRFN,CWRK,CZERO,PHI,S,SR,SUM,T,T2,ZETA1,
    -C    *ZETA2,ZN,ZR
    -      DOUBLE PRECISION AC, C, CON, CONEI, CONER, CRFNI, CRFNR, CWRKI,
    -     * CWRKR, FNU, PHII, PHIR, RFN, SI, SR, SRI, SRR, STI, STR, SUMI,
    -     * SUMR, TEST, TI, TOL, TR, T2I, T2R, ZEROI, ZEROR, ZETA1I, ZETA1R,
    -     * ZETA2I, ZETA2R, ZNI, ZNR, ZRI, ZRR, D1MACH
    -      INTEGER I, IDUM, IKFLG, INIT, IPMTR, J, K, L
    -      DIMENSION C(120), CWRKR(16), CWRKI(16), CON(2)
    -      DATA ZEROR,ZEROI,CONER,CONEI / 0.0D0, 0.0D0, 1.0D0, 0.0D0 /
    -      DATA CON(1), CON(2)  /
    -     1 3.98942280401432678D-01,  1.25331413731550025D+00 /
    -      DATA C(1), C(2), C(3), C(4), C(5), C(6), C(7), C(8), C(9), C(10),
    -     1     C(11), C(12), C(13), C(14), C(15), C(16), C(17), C(18),
    -     2     C(19), C(20), C(21), C(22), C(23), C(24)/
    -     3     1.00000000000000000D+00,    -2.08333333333333333D-01,
    -     4     1.25000000000000000D-01,     3.34201388888888889D-01,
    -     5    -4.01041666666666667D-01,     7.03125000000000000D-02,
    -     6    -1.02581259645061728D+00,     1.84646267361111111D+00,
    -     7    -8.91210937500000000D-01,     7.32421875000000000D-02,
    -     8     4.66958442342624743D+00,    -1.12070026162229938D+01,
    -     9     8.78912353515625000D+00,    -2.36408691406250000D+00,
    -     A     1.12152099609375000D-01,    -2.82120725582002449D+01,
    -     B     8.46362176746007346D+01,    -9.18182415432400174D+01,
    -     C     4.25349987453884549D+01,    -7.36879435947963170D+00,
    -     D     2.27108001708984375D-01,     2.12570130039217123D+02,
    -     E    -7.65252468141181642D+02,     1.05999045252799988D+03/
    -      DATA C(25), C(26), C(27), C(28), C(29), C(30), C(31), C(32),
    -     1     C(33), C(34), C(35), C(36), C(37), C(38), C(39), C(40),
    -     2     C(41), C(42), C(43), C(44), C(45), C(46), C(47), C(48)/
    -     3    -6.99579627376132541D+02,     2.18190511744211590D+02,
    -     4    -2.64914304869515555D+01,     5.72501420974731445D-01,
    -     5    -1.91945766231840700D+03,     8.06172218173730938D+03,
    -     6    -1.35865500064341374D+04,     1.16553933368645332D+04,
    -     7    -5.30564697861340311D+03,     1.20090291321635246D+03,
    -     8    -1.08090919788394656D+02,     1.72772750258445740D+00,
    -     9     2.02042913309661486D+04,    -9.69805983886375135D+04,
    -     A     1.92547001232531532D+05,    -2.03400177280415534D+05,
    -     B     1.22200464983017460D+05,    -4.11926549688975513D+04,
    -     C     7.10951430248936372D+03,    -4.93915304773088012D+02,
    -     D     6.07404200127348304D+00,    -2.42919187900551333D+05,
    -     E     1.31176361466297720D+06,    -2.99801591853810675D+06/
    -      DATA C(49), C(50), C(51), C(52), C(53), C(54), C(55), C(56),
    -     1     C(57), C(58), C(59), C(60), C(61), C(62), C(63), C(64),
    -     2     C(65), C(66), C(67), C(68), C(69), C(70), C(71), C(72)/
    -     3     3.76327129765640400D+06,    -2.81356322658653411D+06,
    -     4     1.26836527332162478D+06,    -3.31645172484563578D+05,
    -     5     4.52187689813627263D+04,    -2.49983048181120962D+03,
    -     6     2.43805296995560639D+01,     3.28446985307203782D+06,
    -     7    -1.97068191184322269D+07,     5.09526024926646422D+07,
    -     8    -7.41051482115326577D+07,     6.63445122747290267D+07,
    -     9    -3.75671766607633513D+07,     1.32887671664218183D+07,
    -     A    -2.78561812808645469D+06,     3.08186404612662398D+05,
    -     B    -1.38860897537170405D+04,     1.10017140269246738D+02,
    -     C    -4.93292536645099620D+07,     3.25573074185765749D+08,
    -     D    -9.39462359681578403D+08,     1.55359689957058006D+09,
    -     E    -1.62108055210833708D+09,     1.10684281682301447D+09/
    -      DATA C(73), C(74), C(75), C(76), C(77), C(78), C(79), C(80),
    -     1     C(81), C(82), C(83), C(84), C(85), C(86), C(87), C(88),
    -     2     C(89), C(90), C(91), C(92), C(93), C(94), C(95), C(96)/
    -     3    -4.95889784275030309D+08,     1.42062907797533095D+08,
    -     4    -2.44740627257387285D+07,     2.24376817792244943D+06,
    -     5    -8.40054336030240853D+04,     5.51335896122020586D+02,
    -     6     8.14789096118312115D+08,    -5.86648149205184723D+09,
    -     7     1.86882075092958249D+10,    -3.46320433881587779D+10,
    -     8     4.12801855797539740D+10,    -3.30265997498007231D+10,
    -     9     1.79542137311556001D+10,    -6.56329379261928433D+09,
    -     A     1.55927986487925751D+09,    -2.25105661889415278D+08,
    -     B     1.73951075539781645D+07,    -5.49842327572288687D+05,
    -     C     3.03809051092238427D+03,    -1.46792612476956167D+10,
    -     D     1.14498237732025810D+11,    -3.99096175224466498D+11,
    -     E     8.19218669548577329D+11,    -1.09837515608122331D+12/
    -      DATA C(97), C(98), C(99), C(100), C(101), C(102), C(103), C(104),
    -     1     C(105), C(106), C(107), C(108), C(109), C(110), C(111),
    -     2     C(112), C(113), C(114), C(115), C(116), C(117), C(118)/
    -     3     1.00815810686538209D+12,    -6.45364869245376503D+11,
    -     4     2.87900649906150589D+11,    -8.78670721780232657D+10,
    -     5     1.76347306068349694D+10,    -2.16716498322379509D+09,
    -     6     1.43157876718888981D+08,    -3.87183344257261262D+06,
    -     7     1.82577554742931747D+04,     2.86464035717679043D+11,
    -     8    -2.40629790002850396D+12,     9.10934118523989896D+12,
    -     9    -2.05168994109344374D+13,     3.05651255199353206D+13,
    -     A    -3.16670885847851584D+13,     2.33483640445818409D+13,
    -     B    -1.23204913055982872D+13,     4.61272578084913197D+12,
    -     C    -1.19655288019618160D+12,     2.05914503232410016D+11,
    -     D    -2.18229277575292237D+10,     1.24700929351271032D+09/
    -      DATA C(119), C(120)/
    -     1    -2.91883881222208134D+07,     1.18838426256783253D+05/
    -C
    -      IF (INIT.NE.0) GO TO 40
    -C-----------------------------------------------------------------------
    -C     INITIALIZE ALL VARIABLES
    -C-----------------------------------------------------------------------
    -      RFN = 1.0D0/FNU
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST (ZR/FNU TOO SMALL)
    -C-----------------------------------------------------------------------
    -      TEST = D1MACH(1)*1.0D+3
    -      AC = FNU*TEST
    -      IF (DABS(ZRR).GT.AC .OR. DABS(ZRI).GT.AC) GO TO 15
    -      ZETA1R = 2.0D0*DABS(DLOG(TEST))+FNU
    -      ZETA1I = 0.0D0
    -      ZETA2R = FNU
    -      ZETA2I = 0.0D0
    -      PHIR = 1.0D0
    -      PHII = 0.0D0
    -      RETURN
    -   15 CONTINUE
    -      TR = ZRR*RFN
    -      TI = ZRI*RFN
    -      SR = CONER + (TR*TR-TI*TI)
    -      SI = CONEI + (TR*TI+TI*TR)
    -      CALL AZSQRT(SR, SI, SRR, SRI)
    -      STR = CONER + SRR
    -      STI = CONEI + SRI
    -      CALL ZDIV(STR, STI, TR, TI, ZNR, ZNI)
    -      CALL AZLOG(ZNR, ZNI, STR, STI, IDUM)
    -      ZETA1R = FNU*STR
    -      ZETA1I = FNU*STI
    -      ZETA2R = FNU*SRR
    -      ZETA2I = FNU*SRI
    -      CALL ZDIV(CONER, CONEI, SRR, SRI, TR, TI)
    -      SRR = TR*RFN
    -      SRI = TI*RFN
    -      CALL AZSQRT(SRR, SRI, CWRKR(16), CWRKI(16))
    -      PHIR = CWRKR(16)*CON(IKFLG)
    -      PHII = CWRKI(16)*CON(IKFLG)
    -      IF (IPMTR.NE.0) RETURN
    -      CALL ZDIV(CONER, CONEI, SR, SI, T2R, T2I)
    -      CWRKR(1) = CONER
    -      CWRKI(1) = CONEI
    -      CRFNR = CONER
    -      CRFNI = CONEI
    -      AC = 1.0D0
    -      L = 1
    -      DO 20 K=2,15
    -        SR = ZEROR
    -        SI = ZEROI
    -        DO 10 J=1,K
    -          L = L + 1
    -          STR = SR*T2R - SI*T2I + C(L)
    -          SI = SR*T2I + SI*T2R
    -          SR = STR
    -   10   CONTINUE
    -        STR = CRFNR*SRR - CRFNI*SRI
    -        CRFNI = CRFNR*SRI + CRFNI*SRR
    -        CRFNR = STR
    -        CWRKR(K) = CRFNR*SR - CRFNI*SI
    -        CWRKI(K) = CRFNR*SI + CRFNI*SR
    -        AC = AC*RFN
    -        TEST = DABS(CWRKR(K)) + DABS(CWRKI(K))
    -        IF (AC.LT.TOL .AND. TEST.LT.TOL) GO TO 30
    -   20 CONTINUE
    -      K = 15
    -   30 CONTINUE
    -      INIT = K
    -   40 CONTINUE
    -      IF (IKFLG.EQ.2) GO TO 60
    -C-----------------------------------------------------------------------
    -C     COMPUTE SUM FOR THE I FUNCTION
    -C-----------------------------------------------------------------------
    -      SR = ZEROR
    -      SI = ZEROI
    -      DO 50 I=1,INIT
    -        SR = SR + CWRKR(I)
    -        SI = SI + CWRKI(I)
    -   50 CONTINUE
    -      SUMR = SR
    -      SUMI = SI
    -      PHIR = CWRKR(16)*CON(1)
    -      PHII = CWRKI(16)*CON(1)
    -      RETURN
    -   60 CONTINUE
    -C-----------------------------------------------------------------------
    -C     COMPUTE SUM FOR THE K FUNCTION
    -C-----------------------------------------------------------------------
    -      SR = ZEROR
    -      SI = ZEROI
    -      TR = CONER
    -      DO 70 I=1,INIT
    -        SR = SR + TR*CWRKR(I)
    -        SI = SI + TR*CWRKI(I)
    -        TR = -TR
    -   70 CONTINUE
    -      SUMR = SR
    -      SUMI = SI
    -      PHIR = CWRKR(16)*CON(2)
    -      PHII = CWRKI(16)*CON(2)
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zunk1.f b/scipy-0.10.1/scipy/special/amos/zunk1.f
    deleted file mode 100644
    index 5457d0763b..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zunk1.f
    +++ /dev/null
    @@ -1,426 +0,0 @@
    -      SUBROUTINE ZUNK1(ZR, ZI, FNU, KODE, MR, N, YR, YI, NZ, TOL, ELIM,
    -     * ALIM)
    -C***BEGIN PROLOGUE  ZUNK1
    -C***REFER TO  ZBESK
    -C
    -C     ZUNK1 COMPUTES K(FNU,Z) AND ITS ANALYTIC CONTINUATION FROM THE
    -C     RIGHT HALF PLANE TO THE LEFT HALF PLANE BY MEANS OF THE
    -C     UNIFORM ASYMPTOTIC EXPANSION.
    -C     MR INDICATES THE DIRECTION OF ROTATION FOR ANALYTIC CONTINUATION.
    -C     NZ=-1 MEANS AN OVERFLOW WILL OCCUR
    -C
    -C***ROUTINES CALLED  ZKSCL,ZS1S2,ZUCHK,ZUNIK,D1MACH,AZABS
    -C***END PROLOGUE  ZUNK1
    -C     COMPLEX CFN,CK,CONE,CRSC,CS,CSCL,CSGN,CSPN,CSR,CSS,CWRK,CY,CZERO,
    -C    *C1,C2,PHI,PHID,RZ,SUM,SUMD,S1,S2,Y,Z,ZETA1,ZETA1D,ZETA2,ZETA2D,ZR
    -      DOUBLE PRECISION ALIM, ANG, APHI, ASC, ASCLE, BRY, CKI, CKR,
    -     * CONER, CRSC, CSCL, CSGNI, CSPNI, CSPNR, CSR, CSRR, CSSR,
    -     * CWRKI, CWRKR, CYI, CYR, C1I, C1R, C2I, C2M, C2R, ELIM, FMR, FN,
    -     * FNF, FNU, PHIDI, PHIDR, PHII, PHIR, PI, RAST, RAZR, RS1, RZI,
    -     * RZR, SGN, STI, STR, SUMDI, SUMDR, SUMI, SUMR, S1I, S1R, S2I,
    -     * S2R, TOL, YI, YR, ZEROI, ZEROR, ZETA1I, ZETA1R, ZETA2I, ZETA2R,
    -     * ZET1DI, ZET1DR, ZET2DI, ZET2DR, ZI, ZR, ZRI, ZRR, D1MACH, AZABS
    -      INTEGER I, IB, IFLAG, IFN, IL, INIT, INU, IUF, K, KDFLG, KFLAG,
    -     * KK, KODE, MR, N, NW, NZ, INITD, IC, IPARD, J
    -      DIMENSION BRY(3), INIT(2), YR(N), YI(N), SUMR(2), SUMI(2),
    -     * ZETA1R(2), ZETA1I(2), ZETA2R(2), ZETA2I(2), CYR(2), CYI(2),
    -     * CWRKR(16,3), CWRKI(16,3), CSSR(3), CSRR(3), PHIR(2), PHII(2)
    -      DATA ZEROR,ZEROI,CONER / 0.0D0, 0.0D0, 1.0D0 /
    -      DATA PI / 3.14159265358979324D0 /
    -C
    -      KDFLG = 1
    -      NZ = 0
    -C-----------------------------------------------------------------------
    -C     EXP(-ALIM)=EXP(-ELIM)/TOL=APPROX. ONE PRECISION GREATER THAN
    -C     THE UNDERFLOW LIMIT
    -C-----------------------------------------------------------------------
    -      CSCL = 1.0D0/TOL
    -      CRSC = TOL
    -      CSSR(1) = CSCL
    -      CSSR(2) = CONER
    -      CSSR(3) = CRSC
    -      CSRR(1) = CRSC
    -      CSRR(2) = CONER
    -      CSRR(3) = CSCL
    -      BRY(1) = 1.0D+3*D1MACH(1)/TOL
    -      BRY(2) = 1.0D0/BRY(1)
    -      BRY(3) = D1MACH(2)
    -      ZRR = ZR
    -      ZRI = ZI
    -      IF (ZR.GE.0.0D0) GO TO 10
    -      ZRR = -ZR
    -      ZRI = -ZI
    -   10 CONTINUE
    -      J = 2
    -      DO 70 I=1,N
    -C-----------------------------------------------------------------------
    -C     J FLIP FLOPS BETWEEN 1 AND 2 IN J = 3 - J
    -C-----------------------------------------------------------------------
    -        J = 3 - J
    -        FN = FNU + DBLE(FLOAT(I-1))
    -        INIT(J) = 0
    -        CALL ZUNIK(ZRR, ZRI, FN, 2, 0, TOL, INIT(J), PHIR(J), PHII(J),
    -     *   ZETA1R(J), ZETA1I(J), ZETA2R(J), ZETA2I(J), SUMR(J), SUMI(J),
    -     *   CWRKR(1,J), CWRKI(1,J))
    -        IF (KODE.EQ.1) GO TO 20
    -        STR = ZRR + ZETA2R(J)
    -        STI = ZRI + ZETA2I(J)
    -        RAST = FN/AZABS(STR,STI)
    -        STR = STR*RAST*RAST
    -        STI = -STI*RAST*RAST
    -        S1R = ZETA1R(J) - STR
    -        S1I = ZETA1I(J) - STI
    -        GO TO 30
    -   20   CONTINUE
    -        S1R = ZETA1R(J) - ZETA2R(J)
    -        S1I = ZETA1I(J) - ZETA2I(J)
    -   30   CONTINUE
    -        RS1 = S1R
    -C-----------------------------------------------------------------------
    -C     TEST FOR UNDERFLOW AND OVERFLOW
    -C-----------------------------------------------------------------------
    -        IF (DABS(RS1).GT.ELIM) GO TO 60
    -        IF (KDFLG.EQ.1) KFLAG = 2
    -        IF (DABS(RS1).LT.ALIM) GO TO 40
    -C-----------------------------------------------------------------------
    -C     REFINE  TEST AND SCALE
    -C-----------------------------------------------------------------------
    -        APHI = AZABS(PHIR(J),PHII(J))
    -        RS1 = RS1 + DLOG(APHI)
    -        IF (DABS(RS1).GT.ELIM) GO TO 60
    -        IF (KDFLG.EQ.1) KFLAG = 1
    -        IF (RS1.LT.0.0D0) GO TO 40
    -        IF (KDFLG.EQ.1) KFLAG = 3
    -   40   CONTINUE
    -C-----------------------------------------------------------------------
    -C     SCALE S1 TO KEEP INTERMEDIATE ARITHMETIC ON SCALE NEAR
    -C     EXPONENT EXTREMES
    -C-----------------------------------------------------------------------
    -        S2R = PHIR(J)*SUMR(J) - PHII(J)*SUMI(J)
    -        S2I = PHIR(J)*SUMI(J) + PHII(J)*SUMR(J)
    -        STR = DEXP(S1R)*CSSR(KFLAG)
    -        S1R = STR*DCOS(S1I)
    -        S1I = STR*DSIN(S1I)
    -        STR = S2R*S1R - S2I*S1I
    -        S2I = S1R*S2I + S2R*S1I
    -        S2R = STR
    -        IF (KFLAG.NE.1) GO TO 50
    -        CALL ZUCHK(S2R, S2I, NW, BRY(1), TOL)
    -        IF (NW.NE.0) GO TO 60
    -   50   CONTINUE
    -        CYR(KDFLG) = S2R
    -        CYI(KDFLG) = S2I
    -        YR(I) = S2R*CSRR(KFLAG)
    -        YI(I) = S2I*CSRR(KFLAG)
    -        IF (KDFLG.EQ.2) GO TO 75
    -        KDFLG = 2
    -        GO TO 70
    -   60   CONTINUE
    -        IF (RS1.GT.0.0D0) GO TO 300
    -C-----------------------------------------------------------------------
    -C     FOR ZR.LT.0.0, THE I FUNCTION TO BE ADDED WILL OVERFLOW
    -C-----------------------------------------------------------------------
    -        IF (ZR.LT.0.0D0) GO TO 300
    -        KDFLG = 1
    -        YR(I)=ZEROR
    -        YI(I)=ZEROI
    -        NZ=NZ+1
    -        IF (I.EQ.1) GO TO 70
    -        IF ((YR(I-1).EQ.ZEROR).AND.(YI(I-1).EQ.ZEROI)) GO TO 70
    -        YR(I-1)=ZEROR
    -        YI(I-1)=ZEROI
    -        NZ=NZ+1
    -   70 CONTINUE
    -      I = N
    -   75 CONTINUE
    -      RAZR = 1.0D0/AZABS(ZRR,ZRI)
    -      STR = ZRR*RAZR
    -      STI = -ZRI*RAZR
    -      RZR = (STR+STR)*RAZR
    -      RZI = (STI+STI)*RAZR
    -      CKR = FN*RZR
    -      CKI = FN*RZI
    -      IB = I + 1
    -      IF (N.LT.IB) GO TO 160
    -C-----------------------------------------------------------------------
    -C     TEST LAST MEMBER FOR UNDERFLOW AND OVERFLOW. SET SEQUENCE TO ZERO
    -C     ON UNDERFLOW.
    -C-----------------------------------------------------------------------
    -      FN = FNU + DBLE(FLOAT(N-1))
    -      IPARD = 1
    -      IF (MR.NE.0) IPARD = 0
    -      INITD = 0
    -      CALL ZUNIK(ZRR, ZRI, FN, 2, IPARD, TOL, INITD, PHIDR, PHIDI,
    -     * ZET1DR, ZET1DI, ZET2DR, ZET2DI, SUMDR, SUMDI, CWRKR(1,3),
    -     * CWRKI(1,3))
    -      IF (KODE.EQ.1) GO TO 80
    -      STR = ZRR + ZET2DR
    -      STI = ZRI + ZET2DI
    -      RAST = FN/AZABS(STR,STI)
    -      STR = STR*RAST*RAST
    -      STI = -STI*RAST*RAST
    -      S1R = ZET1DR - STR
    -      S1I = ZET1DI - STI
    -      GO TO 90
    -   80 CONTINUE
    -      S1R = ZET1DR - ZET2DR
    -      S1I = ZET1DI - ZET2DI
    -   90 CONTINUE
    -      RS1 = S1R
    -      IF (DABS(RS1).GT.ELIM) GO TO 95
    -      IF (DABS(RS1).LT.ALIM) GO TO 100
    -C----------------------------------------------------------------------------
    -C     REFINE ESTIMATE AND TEST
    -C-------------------------------------------------------------------------
    -      APHI = AZABS(PHIDR,PHIDI)
    -      RS1 = RS1+DLOG(APHI)
    -      IF (DABS(RS1).LT.ELIM) GO TO 100
    -   95 CONTINUE
    -      IF (DABS(RS1).GT.0.0D0) GO TO 300
    -C-----------------------------------------------------------------------
    -C     FOR ZR.LT.0.0, THE I FUNCTION TO BE ADDED WILL OVERFLOW
    -C-----------------------------------------------------------------------
    -      IF (ZR.LT.0.0D0) GO TO 300
    -      NZ = N
    -      DO 96 I=1,N
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -   96 CONTINUE
    -      RETURN
    -C---------------------------------------------------------------------------
    -C     FORWARD RECUR FOR REMAINDER OF THE SEQUENCE
    -C----------------------------------------------------------------------------
    -  100 CONTINUE
    -      S1R = CYR(1)
    -      S1I = CYI(1)
    -      S2R = CYR(2)
    -      S2I = CYI(2)
    -      C1R = CSRR(KFLAG)
    -      ASCLE = BRY(KFLAG)
    -      DO 120 I=IB,N
    -        C2R = S2R
    -        C2I = S2I
    -        S2R = CKR*C2R - CKI*C2I + S1R
    -        S2I = CKR*C2I + CKI*C2R + S1I
    -        S1R = C2R
    -        S1I = C2I
    -        CKR = CKR + RZR
    -        CKI = CKI + RZI
    -        C2R = S2R*C1R
    -        C2I = S2I*C1R
    -        YR(I) = C2R
    -        YI(I) = C2I
    -        IF (KFLAG.GE.3) GO TO 120
    -        STR = DABS(C2R)
    -        STI = DABS(C2I)
    -        C2M = DMAX1(STR,STI)
    -        IF (C2M.LE.ASCLE) GO TO 120
    -        KFLAG = KFLAG + 1
    -        ASCLE = BRY(KFLAG)
    -        S1R = S1R*C1R
    -        S1I = S1I*C1R
    -        S2R = C2R
    -        S2I = C2I
    -        S1R = S1R*CSSR(KFLAG)
    -        S1I = S1I*CSSR(KFLAG)
    -        S2R = S2R*CSSR(KFLAG)
    -        S2I = S2I*CSSR(KFLAG)
    -        C1R = CSRR(KFLAG)
    -  120 CONTINUE
    -  160 CONTINUE
    -      IF (MR.EQ.0) RETURN
    -C-----------------------------------------------------------------------
    -C     ANALYTIC CONTINUATION FOR RE(Z).LT.0.0D0
    -C-----------------------------------------------------------------------
    -      NZ = 0
    -      FMR = DBLE(FLOAT(MR))
    -      SGN = -DSIGN(PI,FMR)
    -C-----------------------------------------------------------------------
    -C     CSPN AND CSGN ARE COEFF OF K AND I FUNCTIONS RESP.
    -C-----------------------------------------------------------------------
    -      CSGNI = SGN
    -      INU = INT(SNGL(FNU))
    -      FNF = FNU - DBLE(FLOAT(INU))
    -      IFN = INU + N - 1
    -      ANG = FNF*SGN
    -      CSPNR = DCOS(ANG)
    -      CSPNI = DSIN(ANG)
    -      IF (MOD(IFN,2).EQ.0) GO TO 170
    -      CSPNR = -CSPNR
    -      CSPNI = -CSPNI
    -  170 CONTINUE
    -      ASC = BRY(1)
    -      IUF = 0
    -      KK = N
    -      KDFLG = 1
    -      IB = IB - 1
    -      IC = IB - 1
    -      DO 270 K=1,N
    -        FN = FNU + DBLE(FLOAT(KK-1))
    -C-----------------------------------------------------------------------
    -C     LOGIC TO SORT OUT CASES WHOSE PARAMETERS WERE SET FOR THE K
    -C     FUNCTION ABOVE
    -C-----------------------------------------------------------------------
    -        M=3
    -        IF (N.GT.2) GO TO 175
    -  172   CONTINUE
    -        INITD = INIT(J)
    -        PHIDR = PHIR(J)
    -        PHIDI = PHII(J)
    -        ZET1DR = ZETA1R(J)
    -        ZET1DI = ZETA1I(J)
    -        ZET2DR = ZETA2R(J)
    -        ZET2DI = ZETA2I(J)
    -        SUMDR = SUMR(J)
    -        SUMDI = SUMI(J)
    -        M = J
    -        J = 3 - J
    -        GO TO 180
    -  175   CONTINUE
    -        IF ((KK.EQ.N).AND.(IB.LT.N)) GO TO 180
    -        IF ((KK.EQ.IB).OR.(KK.EQ.IC)) GO TO 172
    -        INITD = 0
    -  180   CONTINUE
    -        CALL ZUNIK(ZRR, ZRI, FN, 1, 0, TOL, INITD, PHIDR, PHIDI,
    -     *   ZET1DR, ZET1DI, ZET2DR, ZET2DI, SUMDR, SUMDI,
    -     *   CWRKR(1,M), CWRKI(1,M))
    -        IF (KODE.EQ.1) GO TO 200
    -        STR = ZRR + ZET2DR
    -        STI = ZRI + ZET2DI
    -        RAST = FN/AZABS(STR,STI)
    -        STR = STR*RAST*RAST
    -        STI = -STI*RAST*RAST
    -        S1R = -ZET1DR + STR
    -        S1I = -ZET1DI + STI
    -        GO TO 210
    -  200   CONTINUE
    -        S1R = -ZET1DR + ZET2DR
    -        S1I = -ZET1DI + ZET2DI
    -  210   CONTINUE
    -C-----------------------------------------------------------------------
    -C     TEST FOR UNDERFLOW AND OVERFLOW
    -C-----------------------------------------------------------------------
    -        RS1 = S1R
    -        IF (DABS(RS1).GT.ELIM) GO TO 260
    -        IF (KDFLG.EQ.1) IFLAG = 2
    -        IF (DABS(RS1).LT.ALIM) GO TO 220
    -C-----------------------------------------------------------------------
    -C     REFINE  TEST AND SCALE
    -C-----------------------------------------------------------------------
    -        APHI = AZABS(PHIDR,PHIDI)
    -        RS1 = RS1 + DLOG(APHI)
    -        IF (DABS(RS1).GT.ELIM) GO TO 260
    -        IF (KDFLG.EQ.1) IFLAG = 1
    -        IF (RS1.LT.0.0D0) GO TO 220
    -        IF (KDFLG.EQ.1) IFLAG = 3
    -  220   CONTINUE
    -        STR = PHIDR*SUMDR - PHIDI*SUMDI
    -        STI = PHIDR*SUMDI + PHIDI*SUMDR
    -        S2R = -CSGNI*STI
    -        S2I = CSGNI*STR
    -        STR = DEXP(S1R)*CSSR(IFLAG)
    -        S1R = STR*DCOS(S1I)
    -        S1I = STR*DSIN(S1I)
    -        STR = S2R*S1R - S2I*S1I
    -        S2I = S2R*S1I + S2I*S1R
    -        S2R = STR
    -        IF (IFLAG.NE.1) GO TO 230
    -        CALL ZUCHK(S2R, S2I, NW, BRY(1), TOL)
    -        IF (NW.EQ.0) GO TO 230
    -        S2R = ZEROR
    -        S2I = ZEROI
    -  230   CONTINUE
    -        CYR(KDFLG) = S2R
    -        CYI(KDFLG) = S2I
    -        C2R = S2R
    -        C2I = S2I
    -        S2R = S2R*CSRR(IFLAG)
    -        S2I = S2I*CSRR(IFLAG)
    -C-----------------------------------------------------------------------
    -C     ADD I AND K FUNCTIONS, K SEQUENCE IN Y(I), I=1,N
    -C-----------------------------------------------------------------------
    -        S1R = YR(KK)
    -        S1I = YI(KK)
    -        IF (KODE.EQ.1) GO TO 250
    -        CALL ZS1S2(ZRR, ZRI, S1R, S1I, S2R, S2I, NW, ASC, ALIM, IUF)
    -        NZ = NZ + NW
    -  250   CONTINUE
    -        YR(KK) = S1R*CSPNR - S1I*CSPNI + S2R
    -        YI(KK) = CSPNR*S1I + CSPNI*S1R + S2I
    -        KK = KK - 1
    -        CSPNR = -CSPNR
    -        CSPNI = -CSPNI
    -        IF (C2R.NE.0.0D0 .OR. C2I.NE.0.0D0) GO TO 255
    -        KDFLG = 1
    -        GO TO 270
    -  255   CONTINUE
    -        IF (KDFLG.EQ.2) GO TO 275
    -        KDFLG = 2
    -        GO TO 270
    -  260   CONTINUE
    -        IF (RS1.GT.0.0D0) GO TO 300
    -        S2R = ZEROR
    -        S2I = ZEROI
    -        GO TO 230
    -  270 CONTINUE
    -      K = N
    -  275 CONTINUE
    -      IL = N - K
    -      IF (IL.EQ.0) RETURN
    -C-----------------------------------------------------------------------
    -C     RECUR BACKWARD FOR REMAINDER OF I SEQUENCE AND ADD IN THE
    -C     K FUNCTIONS, SCALING THE I SEQUENCE DURING RECURRENCE TO KEEP
    -C     INTERMEDIATE ARITHMETIC ON SCALE NEAR EXPONENT EXTREMES.
    -C-----------------------------------------------------------------------
    -      S1R = CYR(1)
    -      S1I = CYI(1)
    -      S2R = CYR(2)
    -      S2I = CYI(2)
    -      CSR = CSRR(IFLAG)
    -      ASCLE = BRY(IFLAG)
    -      FN = DBLE(FLOAT(INU+IL))
    -      DO 290 I=1,IL
    -        C2R = S2R
    -        C2I = S2I
    -        S2R = S1R + (FN+FNF)*(RZR*C2R-RZI*C2I)
    -        S2I = S1I + (FN+FNF)*(RZR*C2I+RZI*C2R)
    -        S1R = C2R
    -        S1I = C2I
    -        FN = FN - 1.0D0
    -        C2R = S2R*CSR
    -        C2I = S2I*CSR
    -        CKR = C2R
    -        CKI = C2I
    -        C1R = YR(KK)
    -        C1I = YI(KK)
    -        IF (KODE.EQ.1) GO TO 280
    -        CALL ZS1S2(ZRR, ZRI, C1R, C1I, C2R, C2I, NW, ASC, ALIM, IUF)
    -        NZ = NZ + NW
    -  280   CONTINUE
    -        YR(KK) = C1R*CSPNR - C1I*CSPNI + C2R
    -        YI(KK) = C1R*CSPNI + C1I*CSPNR + C2I
    -        KK = KK - 1
    -        CSPNR = -CSPNR
    -        CSPNI = -CSPNI
    -        IF (IFLAG.GE.3) GO TO 290
    -        C2R = DABS(CKR)
    -        C2I = DABS(CKI)
    -        C2M = DMAX1(C2R,C2I)
    -        IF (C2M.LE.ASCLE) GO TO 290
    -        IFLAG = IFLAG + 1
    -        ASCLE = BRY(IFLAG)
    -        S1R = S1R*CSR
    -        S1I = S1I*CSR
    -        S2R = CKR
    -        S2I = CKI
    -        S1R = S1R*CSSR(IFLAG)
    -        S1I = S1I*CSSR(IFLAG)
    -        S2R = S2R*CSSR(IFLAG)
    -        S2I = S2I*CSSR(IFLAG)
    -        CSR = CSRR(IFLAG)
    -  290 CONTINUE
    -      RETURN
    -  300 CONTINUE
    -      NZ = -1
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zunk2.f b/scipy-0.10.1/scipy/special/amos/zunk2.f
    deleted file mode 100644
    index 8ac2567e44..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zunk2.f
    +++ /dev/null
    @@ -1,505 +0,0 @@
    -      SUBROUTINE ZUNK2(ZR, ZI, FNU, KODE, MR, N, YR, YI, NZ, TOL, ELIM,
    -     * ALIM)
    -C***BEGIN PROLOGUE  ZUNK2
    -C***REFER TO  ZBESK
    -C
    -C     ZUNK2 COMPUTES K(FNU,Z) AND ITS ANALYTIC CONTINUATION FROM THE
    -C     RIGHT HALF PLANE TO THE LEFT HALF PLANE BY MEANS OF THE
    -C     UNIFORM ASYMPTOTIC EXPANSIONS FOR H(KIND,FNU,ZN) AND J(FNU,ZN)
    -C     WHERE ZN IS IN THE RIGHT HALF PLANE, KIND=(3-MR)/2, MR=+1 OR
    -C     -1. HERE ZN=ZR*I OR -ZR*I WHERE ZR=Z IF Z IS IN THE RIGHT
    -C     HALF PLANE OR ZR=-Z IF Z IS IN THE LEFT HALF PLANE. MR INDIC-
    -C     ATES THE DIRECTION OF ROTATION FOR ANALYTIC CONTINUATION.
    -C     NZ=-1 MEANS AN OVERFLOW WILL OCCUR
    -C
    -C***ROUTINES CALLED  ZAIRY,ZKSCL,ZS1S2,ZUCHK,ZUNHJ,D1MACH,AZABS
    -C***END PROLOGUE  ZUNK2
    -C     COMPLEX AI,ARG,ARGD,ASUM,ASUMD,BSUM,BSUMD,CFN,CI,CIP,CK,CONE,CRSC,
    -C    *CR1,CR2,CS,CSCL,CSGN,CSPN,CSR,CSS,CY,CZERO,C1,C2,DAI,PHI,PHID,RZ,
    -C    *S1,S2,Y,Z,ZB,ZETA1,ZETA1D,ZETA2,ZETA2D,ZN,ZR
    -      DOUBLE PRECISION AARG, AIC, AII, AIR, ALIM, ANG, APHI, ARGDI,
    -     * ARGDR, ARGI, ARGR, ASC, ASCLE, ASUMDI, ASUMDR, ASUMI, ASUMR,
    -     * BRY, BSUMDI, BSUMDR, BSUMI, BSUMR, CAR, CIPI, CIPR, CKI, CKR,
    -     * CONER, CRSC, CR1I, CR1R, CR2I, CR2R, CSCL, CSGNI, CSI,
    -     * CSPNI, CSPNR, CSR, CSRR, CSSR, CYI, CYR, C1I, C1R, C2I, C2M,
    -     * C2R, DAII, DAIR, ELIM, FMR, FN, FNF, FNU, HPI, PHIDI, PHIDR,
    -     * PHII, PHIR, PI, PTI, PTR, RAST, RAZR, RS1, RZI, RZR, SAR, SGN,
    -     * STI, STR, S1I, S1R, S2I, S2R, TOL, YI, YR, YY, ZBI, ZBR, ZEROI,
    -     * ZEROR, ZETA1I, ZETA1R, ZETA2I, ZETA2R, ZET1DI, ZET1DR, ZET2DI,
    -     * ZET2DR, ZI, ZNI, ZNR, ZR, ZRI, ZRR, D1MACH, AZABS
    -      INTEGER I, IB, IFLAG, IFN, IL, IN, INU, IUF, K, KDFLG, KFLAG, KK,
    -     * KODE, MR, N, NAI, NDAI, NW, NZ, IDUM, J, IPARD, IC
    -      DIMENSION BRY(3), YR(N), YI(N), ASUMR(2), ASUMI(2), BSUMR(2),
    -     * BSUMI(2), PHIR(2), PHII(2), ARGR(2), ARGI(2), ZETA1R(2),
    -     * ZETA1I(2), ZETA2R(2), ZETA2I(2), CYR(2), CYI(2), CIPR(4),
    -     * CIPI(4), CSSR(3), CSRR(3)
    -      DATA ZEROR,ZEROI,CONER,CR1R,CR1I,CR2R,CR2I /
    -     1         0.0D0, 0.0D0, 1.0D0,
    -     1 1.0D0,1.73205080756887729D0 , -0.5D0,-8.66025403784438647D-01 /
    -      DATA HPI, PI, AIC /
    -     1     1.57079632679489662D+00,     3.14159265358979324D+00,
    -     1     1.26551212348464539D+00/
    -      DATA CIPR(1),CIPI(1),CIPR(2),CIPI(2),CIPR(3),CIPI(3),CIPR(4),
    -     * CIPI(4) /
    -     1  1.0D0,0.0D0 ,  0.0D0,-1.0D0 ,  -1.0D0,0.0D0 ,  0.0D0,1.0D0 /
    -C
    -      KDFLG = 1
    -      NZ = 0
    -C-----------------------------------------------------------------------
    -C     EXP(-ALIM)=EXP(-ELIM)/TOL=APPROX. ONE PRECISION GREATER THAN
    -C     THE UNDERFLOW LIMIT
    -C-----------------------------------------------------------------------
    -      CSCL = 1.0D0/TOL
    -      CRSC = TOL
    -      CSSR(1) = CSCL
    -      CSSR(2) = CONER
    -      CSSR(3) = CRSC
    -      CSRR(1) = CRSC
    -      CSRR(2) = CONER
    -      CSRR(3) = CSCL
    -      BRY(1) = 1.0D+3*D1MACH(1)/TOL
    -      BRY(2) = 1.0D0/BRY(1)
    -      BRY(3) = D1MACH(2)
    -      ZRR = ZR
    -      ZRI = ZI
    -      IF (ZR.GE.0.0D0) GO TO 10
    -      ZRR = -ZR
    -      ZRI = -ZI
    -   10 CONTINUE
    -      YY = ZRI
    -      ZNR = ZRI
    -      ZNI = -ZRR
    -      ZBR = ZRR
    -      ZBI = ZRI
    -      INU = INT(SNGL(FNU))
    -      FNF = FNU - DBLE(FLOAT(INU))
    -      ANG = -HPI*FNF
    -      CAR = DCOS(ANG)
    -      SAR = DSIN(ANG)
    -      C2R = HPI*SAR
    -      C2I = -HPI*CAR
    -      KK = MOD(INU,4) + 1
    -      STR = C2R*CIPR(KK) - C2I*CIPI(KK)
    -      STI = C2R*CIPI(KK) + C2I*CIPR(KK)
    -      CSR = CR1R*STR - CR1I*STI
    -      CSI = CR1R*STI + CR1I*STR
    -      IF (YY.GT.0.0D0) GO TO 20
    -      ZNR = -ZNR
    -      ZBI = -ZBI
    -   20 CONTINUE
    -C-----------------------------------------------------------------------
    -C     K(FNU,Z) IS COMPUTED FROM H(2,FNU,-I*Z) WHERE Z IS IN THE FIRST
    -C     QUADRANT. FOURTH QUADRANT VALUES (YY.LE.0.0E0) ARE COMPUTED BY
    -C     CONJUGATION SINCE THE K FUNCTION IS REAL ON THE POSITIVE REAL AXIS
    -C-----------------------------------------------------------------------
    -      J = 2
    -      DO 80 I=1,N
    -C-----------------------------------------------------------------------
    -C     J FLIP FLOPS BETWEEN 1 AND 2 IN J = 3 - J
    -C-----------------------------------------------------------------------
    -        J = 3 - J
    -        FN = FNU + DBLE(FLOAT(I-1))
    -        CALL ZUNHJ(ZNR, ZNI, FN, 0, TOL, PHIR(J), PHII(J), ARGR(J),
    -     *   ARGI(J), ZETA1R(J), ZETA1I(J), ZETA2R(J), ZETA2I(J), ASUMR(J),
    -     *   ASUMI(J), BSUMR(J), BSUMI(J))
    -        IF (KODE.EQ.1) GO TO 30
    -        STR = ZBR + ZETA2R(J)
    -        STI = ZBI + ZETA2I(J)
    -        RAST = FN/AZABS(STR,STI)
    -        STR = STR*RAST*RAST
    -        STI = -STI*RAST*RAST
    -        S1R = ZETA1R(J) - STR
    -        S1I = ZETA1I(J) - STI
    -        GO TO 40
    -   30   CONTINUE
    -        S1R = ZETA1R(J) - ZETA2R(J)
    -        S1I = ZETA1I(J) - ZETA2I(J)
    -   40   CONTINUE
    -C-----------------------------------------------------------------------
    -C     TEST FOR UNDERFLOW AND OVERFLOW
    -C-----------------------------------------------------------------------
    -        RS1 = S1R
    -        IF (DABS(RS1).GT.ELIM) GO TO 70
    -        IF (KDFLG.EQ.1) KFLAG = 2
    -        IF (DABS(RS1).LT.ALIM) GO TO 50
    -C-----------------------------------------------------------------------
    -C     REFINE  TEST AND SCALE
    -C-----------------------------------------------------------------------
    -        APHI = AZABS(PHIR(J),PHII(J))
    -        AARG = AZABS(ARGR(J),ARGI(J))
    -        RS1 = RS1 + DLOG(APHI) - 0.25D0*DLOG(AARG) - AIC
    -        IF (DABS(RS1).GT.ELIM) GO TO 70
    -        IF (KDFLG.EQ.1) KFLAG = 1
    -        IF (RS1.LT.0.0D0) GO TO 50
    -        IF (KDFLG.EQ.1) KFLAG = 3
    -   50   CONTINUE
    -C-----------------------------------------------------------------------
    -C     SCALE S1 TO KEEP INTERMEDIATE ARITHMETIC ON SCALE NEAR
    -C     EXPONENT EXTREMES
    -C-----------------------------------------------------------------------
    -        C2R = ARGR(J)*CR2R - ARGI(J)*CR2I
    -        C2I = ARGR(J)*CR2I + ARGI(J)*CR2R
    -        CALL ZAIRY(C2R, C2I, 0, 2, AIR, AII, NAI, IDUM)
    -        CALL ZAIRY(C2R, C2I, 1, 2, DAIR, DAII, NDAI, IDUM)
    -        STR = DAIR*BSUMR(J) - DAII*BSUMI(J)
    -        STI = DAIR*BSUMI(J) + DAII*BSUMR(J)
    -        PTR = STR*CR2R - STI*CR2I
    -        PTI = STR*CR2I + STI*CR2R
    -        STR = PTR + (AIR*ASUMR(J)-AII*ASUMI(J))
    -        STI = PTI + (AIR*ASUMI(J)+AII*ASUMR(J))
    -        PTR = STR*PHIR(J) - STI*PHII(J)
    -        PTI = STR*PHII(J) + STI*PHIR(J)
    -        S2R = PTR*CSR - PTI*CSI
    -        S2I = PTR*CSI + PTI*CSR
    -        STR = DEXP(S1R)*CSSR(KFLAG)
    -        S1R = STR*DCOS(S1I)
    -        S1I = STR*DSIN(S1I)
    -        STR = S2R*S1R - S2I*S1I
    -        S2I = S1R*S2I + S2R*S1I
    -        S2R = STR
    -        IF (KFLAG.NE.1) GO TO 60
    -        CALL ZUCHK(S2R, S2I, NW, BRY(1), TOL)
    -        IF (NW.NE.0) GO TO 70
    -   60   CONTINUE
    -        IF (YY.LE.0.0D0) S2I = -S2I
    -        CYR(KDFLG) = S2R
    -        CYI(KDFLG) = S2I
    -        YR(I) = S2R*CSRR(KFLAG)
    -        YI(I) = S2I*CSRR(KFLAG)
    -        STR = CSI
    -        CSI = -CSR
    -        CSR = STR
    -        IF (KDFLG.EQ.2) GO TO 85
    -        KDFLG = 2
    -        GO TO 80
    -   70   CONTINUE
    -        IF (RS1.GT.0.0D0) GO TO 320
    -C-----------------------------------------------------------------------
    -C     FOR ZR.LT.0.0, THE I FUNCTION TO BE ADDED WILL OVERFLOW
    -C-----------------------------------------------------------------------
    -        IF (ZR.LT.0.0D0) GO TO 320
    -        KDFLG = 1
    -        YR(I)=ZEROR
    -        YI(I)=ZEROI
    -        NZ=NZ+1
    -        STR = CSI
    -        CSI =-CSR
    -        CSR = STR
    -        IF (I.EQ.1) GO TO 80
    -        IF ((YR(I-1).EQ.ZEROR).AND.(YI(I-1).EQ.ZEROI)) GO TO 80
    -        YR(I-1)=ZEROR
    -        YI(I-1)=ZEROI
    -        NZ=NZ+1
    -   80 CONTINUE
    -      I = N
    -   85 CONTINUE
    -      RAZR = 1.0D0/AZABS(ZRR,ZRI)
    -      STR = ZRR*RAZR
    -      STI = -ZRI*RAZR
    -      RZR = (STR+STR)*RAZR
    -      RZI = (STI+STI)*RAZR
    -      CKR = FN*RZR
    -      CKI = FN*RZI
    -      IB = I + 1
    -      IF (N.LT.IB) GO TO 180
    -C-----------------------------------------------------------------------
    -C     TEST LAST MEMBER FOR UNDERFLOW AND OVERFLOW. SET SEQUENCE TO ZERO
    -C     ON UNDERFLOW.
    -C-----------------------------------------------------------------------
    -      FN = FNU + DBLE(FLOAT(N-1))
    -      IPARD = 1
    -      IF (MR.NE.0) IPARD = 0
    -      CALL ZUNHJ(ZNR, ZNI, FN, IPARD, TOL, PHIDR, PHIDI, ARGDR, ARGDI,
    -     * ZET1DR, ZET1DI, ZET2DR, ZET2DI, ASUMDR, ASUMDI, BSUMDR, BSUMDI)
    -      IF (KODE.EQ.1) GO TO 90
    -      STR = ZBR + ZET2DR
    -      STI = ZBI + ZET2DI
    -      RAST = FN/AZABS(STR,STI)
    -      STR = STR*RAST*RAST
    -      STI = -STI*RAST*RAST
    -      S1R = ZET1DR - STR
    -      S1I = ZET1DI - STI
    -      GO TO 100
    -   90 CONTINUE
    -      S1R = ZET1DR - ZET2DR
    -      S1I = ZET1DI - ZET2DI
    -  100 CONTINUE
    -      RS1 = S1R
    -      IF (DABS(RS1).GT.ELIM) GO TO 105
    -      IF (DABS(RS1).LT.ALIM) GO TO 120
    -C----------------------------------------------------------------------------
    -C     REFINE ESTIMATE AND TEST
    -C-------------------------------------------------------------------------
    -      APHI = AZABS(PHIDR,PHIDI)
    -      RS1 = RS1+DLOG(APHI)
    -      IF (DABS(RS1).LT.ELIM) GO TO 120
    -  105 CONTINUE
    -      IF (RS1.GT.0.0D0) GO TO 320
    -C-----------------------------------------------------------------------
    -C     FOR ZR.LT.0.0, THE I FUNCTION TO BE ADDED WILL OVERFLOW
    -C-----------------------------------------------------------------------
    -      IF (ZR.LT.0.0D0) GO TO 320
    -      NZ = N
    -      DO 106 I=1,N
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -  106 CONTINUE
    -      RETURN
    -  120 CONTINUE
    -      S1R = CYR(1)
    -      S1I = CYI(1)
    -      S2R = CYR(2)
    -      S2I = CYI(2)
    -      C1R = CSRR(KFLAG)
    -      ASCLE = BRY(KFLAG)
    -      DO 130 I=IB,N
    -        C2R = S2R
    -        C2I = S2I
    -        S2R = CKR*C2R - CKI*C2I + S1R
    -        S2I = CKR*C2I + CKI*C2R + S1I
    -        S1R = C2R
    -        S1I = C2I
    -        CKR = CKR + RZR
    -        CKI = CKI + RZI
    -        C2R = S2R*C1R
    -        C2I = S2I*C1R
    -        YR(I) = C2R
    -        YI(I) = C2I
    -        IF (KFLAG.GE.3) GO TO 130
    -        STR = DABS(C2R)
    -        STI = DABS(C2I)
    -        C2M = DMAX1(STR,STI)
    -        IF (C2M.LE.ASCLE) GO TO 130
    -        KFLAG = KFLAG + 1
    -        ASCLE = BRY(KFLAG)
    -        S1R = S1R*C1R
    -        S1I = S1I*C1R
    -        S2R = C2R
    -        S2I = C2I
    -        S1R = S1R*CSSR(KFLAG)
    -        S1I = S1I*CSSR(KFLAG)
    -        S2R = S2R*CSSR(KFLAG)
    -        S2I = S2I*CSSR(KFLAG)
    -        C1R = CSRR(KFLAG)
    -  130 CONTINUE
    -  180 CONTINUE
    -      IF (MR.EQ.0) RETURN
    -C-----------------------------------------------------------------------
    -C     ANALYTIC CONTINUATION FOR RE(Z).LT.0.0D0
    -C-----------------------------------------------------------------------
    -      NZ = 0
    -      FMR = DBLE(FLOAT(MR))
    -      SGN = -DSIGN(PI,FMR)
    -C-----------------------------------------------------------------------
    -C     CSPN AND CSGN ARE COEFF OF K AND I FUNCIONS RESP.
    -C-----------------------------------------------------------------------
    -      CSGNI = SGN
    -      IF (YY.LE.0.0D0) CSGNI = -CSGNI
    -      IFN = INU + N - 1
    -      ANG = FNF*SGN
    -      CSPNR = DCOS(ANG)
    -      CSPNI = DSIN(ANG)
    -      IF (MOD(IFN,2).EQ.0) GO TO 190
    -      CSPNR = -CSPNR
    -      CSPNI = -CSPNI
    -  190 CONTINUE
    -C-----------------------------------------------------------------------
    -C     CS=COEFF OF THE J FUNCTION TO GET THE I FUNCTION. I(FNU,Z) IS
    -C     COMPUTED FROM EXP(I*FNU*HPI)*J(FNU,-I*Z) WHERE Z IS IN THE FIRST
    -C     QUADRANT. FOURTH QUADRANT VALUES (YY.LE.0.0E0) ARE COMPUTED BY
    -C     CONJUGATION SINCE THE I FUNCTION IS REAL ON THE POSITIVE REAL AXIS
    -C-----------------------------------------------------------------------
    -      CSR = SAR*CSGNI
    -      CSI = CAR*CSGNI
    -      IN = MOD(IFN,4) + 1
    -      C2R = CIPR(IN)
    -      C2I = CIPI(IN)
    -      STR = CSR*C2R + CSI*C2I
    -      CSI = -CSR*C2I + CSI*C2R
    -      CSR = STR
    -      ASC = BRY(1)
    -      IUF = 0
    -      KK = N
    -      KDFLG = 1
    -      IB = IB - 1
    -      IC = IB - 1
    -      DO 290 K=1,N
    -        FN = FNU + DBLE(FLOAT(KK-1))
    -C-----------------------------------------------------------------------
    -C     LOGIC TO SORT OUT CASES WHOSE PARAMETERS WERE SET FOR THE K
    -C     FUNCTION ABOVE
    -C-----------------------------------------------------------------------
    -        IF (N.GT.2) GO TO 175
    -  172   CONTINUE
    -        PHIDR = PHIR(J)
    -        PHIDI = PHII(J)
    -        ARGDR = ARGR(J)
    -        ARGDI = ARGI(J)
    -        ZET1DR = ZETA1R(J)
    -        ZET1DI = ZETA1I(J)
    -        ZET2DR = ZETA2R(J)
    -        ZET2DI = ZETA2I(J)
    -        ASUMDR = ASUMR(J)
    -        ASUMDI = ASUMI(J)
    -        BSUMDR = BSUMR(J)
    -        BSUMDI = BSUMI(J)
    -        J = 3 - J
    -        GO TO 210
    -  175   CONTINUE
    -        IF ((KK.EQ.N).AND.(IB.LT.N)) GO TO 210
    -        IF ((KK.EQ.IB).OR.(KK.EQ.IC)) GO TO 172
    -        CALL ZUNHJ(ZNR, ZNI, FN, 0, TOL, PHIDR, PHIDI, ARGDR,
    -     *   ARGDI, ZET1DR, ZET1DI, ZET2DR, ZET2DI, ASUMDR,
    -     *   ASUMDI, BSUMDR, BSUMDI)
    -  210   CONTINUE
    -        IF (KODE.EQ.1) GO TO 220
    -        STR = ZBR + ZET2DR
    -        STI = ZBI + ZET2DI
    -        RAST = FN/AZABS(STR,STI)
    -        STR = STR*RAST*RAST
    -        STI = -STI*RAST*RAST
    -        S1R = -ZET1DR + STR
    -        S1I = -ZET1DI + STI
    -        GO TO 230
    -  220   CONTINUE
    -        S1R = -ZET1DR + ZET2DR
    -        S1I = -ZET1DI + ZET2DI
    -  230   CONTINUE
    -C-----------------------------------------------------------------------
    -C     TEST FOR UNDERFLOW AND OVERFLOW
    -C-----------------------------------------------------------------------
    -        RS1 = S1R
    -        IF (DABS(RS1).GT.ELIM) GO TO 280
    -        IF (KDFLG.EQ.1) IFLAG = 2
    -        IF (DABS(RS1).LT.ALIM) GO TO 240
    -C-----------------------------------------------------------------------
    -C     REFINE  TEST AND SCALE
    -C-----------------------------------------------------------------------
    -        APHI = AZABS(PHIDR,PHIDI)
    -        AARG = AZABS(ARGDR,ARGDI)
    -        RS1 = RS1 + DLOG(APHI) - 0.25D0*DLOG(AARG) - AIC
    -        IF (DABS(RS1).GT.ELIM) GO TO 280
    -        IF (KDFLG.EQ.1) IFLAG = 1
    -        IF (RS1.LT.0.0D0) GO TO 240
    -        IF (KDFLG.EQ.1) IFLAG = 3
    -  240   CONTINUE
    -        CALL ZAIRY(ARGDR, ARGDI, 0, 2, AIR, AII, NAI, IDUM)
    -        CALL ZAIRY(ARGDR, ARGDI, 1, 2, DAIR, DAII, NDAI, IDUM)
    -        STR = DAIR*BSUMDR - DAII*BSUMDI
    -        STI = DAIR*BSUMDI + DAII*BSUMDR
    -        STR = STR + (AIR*ASUMDR-AII*ASUMDI)
    -        STI = STI + (AIR*ASUMDI+AII*ASUMDR)
    -        PTR = STR*PHIDR - STI*PHIDI
    -        PTI = STR*PHIDI + STI*PHIDR
    -        S2R = PTR*CSR - PTI*CSI
    -        S2I = PTR*CSI + PTI*CSR
    -        STR = DEXP(S1R)*CSSR(IFLAG)
    -        S1R = STR*DCOS(S1I)
    -        S1I = STR*DSIN(S1I)
    -        STR = S2R*S1R - S2I*S1I
    -        S2I = S2R*S1I + S2I*S1R
    -        S2R = STR
    -        IF (IFLAG.NE.1) GO TO 250
    -        CALL ZUCHK(S2R, S2I, NW, BRY(1), TOL)
    -        IF (NW.EQ.0) GO TO 250
    -        S2R = ZEROR
    -        S2I = ZEROI
    -  250   CONTINUE
    -        IF (YY.LE.0.0D0) S2I = -S2I
    -        CYR(KDFLG) = S2R
    -        CYI(KDFLG) = S2I
    -        C2R = S2R
    -        C2I = S2I
    -        S2R = S2R*CSRR(IFLAG)
    -        S2I = S2I*CSRR(IFLAG)
    -C-----------------------------------------------------------------------
    -C     ADD I AND K FUNCTIONS, K SEQUENCE IN Y(I), I=1,N
    -C-----------------------------------------------------------------------
    -        S1R = YR(KK)
    -        S1I = YI(KK)
    -        IF (KODE.EQ.1) GO TO 270
    -        CALL ZS1S2(ZRR, ZRI, S1R, S1I, S2R, S2I, NW, ASC, ALIM, IUF)
    -        NZ = NZ + NW
    -  270   CONTINUE
    -        YR(KK) = S1R*CSPNR - S1I*CSPNI + S2R
    -        YI(KK) = S1R*CSPNI + S1I*CSPNR + S2I
    -        KK = KK - 1
    -        CSPNR = -CSPNR
    -        CSPNI = -CSPNI
    -        STR = CSI
    -        CSI = -CSR
    -        CSR = STR
    -        IF (C2R.NE.0.0D0 .OR. C2I.NE.0.0D0) GO TO 255
    -        KDFLG = 1
    -        GO TO 290
    -  255   CONTINUE
    -        IF (KDFLG.EQ.2) GO TO 295
    -        KDFLG = 2
    -        GO TO 290
    -  280   CONTINUE
    -        IF (RS1.GT.0.0D0) GO TO 320
    -        S2R = ZEROR
    -        S2I = ZEROI
    -        GO TO 250
    -  290 CONTINUE
    -      K = N
    -  295 CONTINUE
    -      IL = N - K
    -      IF (IL.EQ.0) RETURN
    -C-----------------------------------------------------------------------
    -C     RECUR BACKWARD FOR REMAINDER OF I SEQUENCE AND ADD IN THE
    -C     K FUNCTIONS, SCALING THE I SEQUENCE DURING RECURRENCE TO KEEP
    -C     INTERMEDIATE ARITHMETIC ON SCALE NEAR EXPONENT EXTREMES.
    -C-----------------------------------------------------------------------
    -      S1R = CYR(1)
    -      S1I = CYI(1)
    -      S2R = CYR(2)
    -      S2I = CYI(2)
    -      CSR = CSRR(IFLAG)
    -      ASCLE = BRY(IFLAG)
    -      FN = DBLE(FLOAT(INU+IL))
    -      DO 310 I=1,IL
    -        C2R = S2R
    -        C2I = S2I
    -        S2R = S1R + (FN+FNF)*(RZR*C2R-RZI*C2I)
    -        S2I = S1I + (FN+FNF)*(RZR*C2I+RZI*C2R)
    -        S1R = C2R
    -        S1I = C2I
    -        FN = FN - 1.0D0
    -        C2R = S2R*CSR
    -        C2I = S2I*CSR
    -        CKR = C2R
    -        CKI = C2I
    -        C1R = YR(KK)
    -        C1I = YI(KK)
    -        IF (KODE.EQ.1) GO TO 300
    -        CALL ZS1S2(ZRR, ZRI, C1R, C1I, C2R, C2I, NW, ASC, ALIM, IUF)
    -        NZ = NZ + NW
    -  300   CONTINUE
    -        YR(KK) = C1R*CSPNR - C1I*CSPNI + C2R
    -        YI(KK) = C1R*CSPNI + C1I*CSPNR + C2I
    -        KK = KK - 1
    -        CSPNR = -CSPNR
    -        CSPNI = -CSPNI
    -        IF (IFLAG.GE.3) GO TO 310
    -        C2R = DABS(CKR)
    -        C2I = DABS(CKI)
    -        C2M = DMAX1(C2R,C2I)
    -        IF (C2M.LE.ASCLE) GO TO 310
    -        IFLAG = IFLAG + 1
    -        ASCLE = BRY(IFLAG)
    -        S1R = S1R*CSR
    -        S1I = S1I*CSR
    -        S2R = CKR
    -        S2I = CKI
    -        S1R = S1R*CSSR(IFLAG)
    -        S1I = S1I*CSSR(IFLAG)
    -        S2R = S2R*CSSR(IFLAG)
    -        S2I = S2I*CSSR(IFLAG)
    -        CSR = CSRR(IFLAG)
    -  310 CONTINUE
    -      RETURN
    -  320 CONTINUE
    -      NZ = -1
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zuoik.f b/scipy-0.10.1/scipy/special/amos/zuoik.f
    deleted file mode 100644
    index 5b05f965ee..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zuoik.f
    +++ /dev/null
    @@ -1,194 +0,0 @@
    -      SUBROUTINE ZUOIK(ZR, ZI, FNU, KODE, IKFLG, N, YR, YI, NUF, TOL,
    -     * ELIM, ALIM)
    -C***BEGIN PROLOGUE  ZUOIK
    -C***REFER TO  ZBESI,ZBESK,ZBESH
    -C
    -C     ZUOIK COMPUTES THE LEADING TERMS OF THE UNIFORM ASYMPTOTIC
    -C     EXPANSIONS FOR THE I AND K FUNCTIONS AND COMPARES THEM
    -C     (IN LOGARITHMIC FORM) TO ALIM AND ELIM FOR OVER AND UNDERFLOW
    -C     WHERE ALIM.LT.ELIM. IF THE MAGNITUDE, BASED ON THE LEADING
    -C     EXPONENTIAL, IS LESS THAN ALIM OR GREATER THAN -ALIM, THEN
    -C     THE RESULT IS ON SCALE. IF NOT, THEN A REFINED TEST USING OTHER
    -C     MULTIPLIERS (IN LOGARITHMIC FORM) IS MADE BASED ON ELIM. HERE
    -C     EXP(-ELIM)=SMALLEST MACHINE NUMBER*1.0E+3 AND EXP(-ALIM)=
    -C     EXP(-ELIM)/TOL
    -C
    -C     IKFLG=1 MEANS THE I SEQUENCE IS TESTED
    -C          =2 MEANS THE K SEQUENCE IS TESTED
    -C     NUF = 0 MEANS THE LAST MEMBER OF THE SEQUENCE IS ON SCALE
    -C         =-1 MEANS AN OVERFLOW WOULD OCCUR
    -C     IKFLG=1 AND NUF.GT.0 MEANS THE LAST NUF Y VALUES WERE SET TO ZERO
    -C             THE FIRST N-NUF VALUES MUST BE SET BY ANOTHER ROUTINE
    -C     IKFLG=2 AND NUF.EQ.N MEANS ALL Y VALUES WERE SET TO ZERO
    -C     IKFLG=2 AND 0.LT.NUF.LT.N NOT CONSIDERED. Y MUST BE SET BY
    -C             ANOTHER ROUTINE
    -C
    -C***ROUTINES CALLED  ZUCHK,ZUNHJ,ZUNIK,D1MACH,AZABS,AZLOG
    -C***END PROLOGUE  ZUOIK
    -C     COMPLEX ARG,ASUM,BSUM,CWRK,CZ,CZERO,PHI,SUM,Y,Z,ZB,ZETA1,ZETA2,ZN,
    -C    *ZR
    -      DOUBLE PRECISION AARG, AIC, ALIM, APHI, ARGI, ARGR, ASUMI, ASUMR,
    -     * ASCLE, AX, AY, BSUMI, BSUMR, CWRKI, CWRKR, CZI, CZR, ELIM, FNN,
    -     * FNU, GNN, GNU, PHII, PHIR, RCZ, STR, STI, SUMI, SUMR, TOL, YI,
    -     * YR, ZBI, ZBR, ZEROI, ZEROR, ZETA1I, ZETA1R, ZETA2I, ZETA2R, ZI,
    -     * ZNI, ZNR, ZR, ZRI, ZRR, D1MACH, AZABS
    -      INTEGER I, IDUM, IFORM, IKFLG, INIT, KODE, N, NN, NUF, NW
    -      DIMENSION YR(N), YI(N), CWRKR(16), CWRKI(16)
    -      DATA ZEROR,ZEROI / 0.0D0, 0.0D0 /
    -      DATA AIC / 1.265512123484645396D+00 /
    -      NUF = 0
    -      NN = N
    -      ZRR = ZR
    -      ZRI = ZI
    -      IF (ZR.GE.0.0D0) GO TO 10
    -      ZRR = -ZR
    -      ZRI = -ZI
    -   10 CONTINUE
    -      ZBR = ZRR
    -      ZBI = ZRI
    -      AX = DABS(ZR)*1.7321D0
    -      AY = DABS(ZI)
    -      IFORM = 1
    -      IF (AY.GT.AX) IFORM = 2
    -      GNU = DMAX1(FNU,1.0D0)
    -      IF (IKFLG.EQ.1) GO TO 20
    -      FNN = DBLE(FLOAT(NN))
    -      GNN = FNU + FNN - 1.0D0
    -      GNU = DMAX1(GNN,FNN)
    -   20 CONTINUE
    -C-----------------------------------------------------------------------
    -C     ONLY THE MAGNITUDE OF ARG AND PHI ARE NEEDED ALONG WITH THE
    -C     REAL PARTS OF ZETA1, ZETA2 AND ZB. NO ATTEMPT IS MADE TO GET
    -C     THE SIGN OF THE IMAGINARY PART CORRECT.
    -C-----------------------------------------------------------------------
    -      IF (IFORM.EQ.2) GO TO 30
    -      INIT = 0
    -      CALL ZUNIK(ZRR, ZRI, GNU, IKFLG, 1, TOL, INIT, PHIR, PHII,
    -     * ZETA1R, ZETA1I, ZETA2R, ZETA2I, SUMR, SUMI, CWRKR, CWRKI)
    -      CZR = -ZETA1R + ZETA2R
    -      CZI = -ZETA1I + ZETA2I
    -      GO TO 50
    -   30 CONTINUE
    -      ZNR = ZRI
    -      ZNI = -ZRR
    -      IF (ZI.GT.0.0D0) GO TO 40
    -      ZNR = -ZNR
    -   40 CONTINUE
    -      CALL ZUNHJ(ZNR, ZNI, GNU, 1, TOL, PHIR, PHII, ARGR, ARGI, ZETA1R,
    -     * ZETA1I, ZETA2R, ZETA2I, ASUMR, ASUMI, BSUMR, BSUMI)
    -      CZR = -ZETA1R + ZETA2R
    -      CZI = -ZETA1I + ZETA2I
    -      AARG = AZABS(ARGR,ARGI)
    -   50 CONTINUE
    -      IF (KODE.EQ.1) GO TO 60
    -      CZR = CZR - ZBR
    -      CZI = CZI - ZBI
    -   60 CONTINUE
    -      IF (IKFLG.EQ.1) GO TO 70
    -      CZR = -CZR
    -      CZI = -CZI
    -   70 CONTINUE
    -      APHI = AZABS(PHIR,PHII)
    -      RCZ = CZR
    -C-----------------------------------------------------------------------
    -C     OVERFLOW TEST
    -C-----------------------------------------------------------------------
    -      IF (RCZ.GT.ELIM) GO TO 210
    -      IF (RCZ.LT.ALIM) GO TO 80
    -      RCZ = RCZ + DLOG(APHI)
    -      IF (IFORM.EQ.2) RCZ = RCZ - 0.25D0*DLOG(AARG) - AIC
    -      IF (RCZ.GT.ELIM) GO TO 210
    -      GO TO 130
    -   80 CONTINUE
    -C-----------------------------------------------------------------------
    -C     UNDERFLOW TEST
    -C-----------------------------------------------------------------------
    -      IF (RCZ.LT.(-ELIM)) GO TO 90
    -      IF (RCZ.GT.(-ALIM)) GO TO 130
    -      RCZ = RCZ + DLOG(APHI)
    -      IF (IFORM.EQ.2) RCZ = RCZ - 0.25D0*DLOG(AARG) - AIC
    -      IF (RCZ.GT.(-ELIM)) GO TO 110
    -   90 CONTINUE
    -      DO 100 I=1,NN
    -        YR(I) = ZEROR
    -        YI(I) = ZEROI
    -  100 CONTINUE
    -      NUF = NN
    -      RETURN
    -  110 CONTINUE
    -      ASCLE = 1.0D+3*D1MACH(1)/TOL
    -      CALL AZLOG(PHIR, PHII, STR, STI, IDUM)
    -      CZR = CZR + STR
    -      CZI = CZI + STI
    -      IF (IFORM.EQ.1) GO TO 120
    -      CALL AZLOG(ARGR, ARGI, STR, STI, IDUM)
    -      CZR = CZR - 0.25D0*STR - AIC
    -      CZI = CZI - 0.25D0*STI
    -  120 CONTINUE
    -      AX = DEXP(RCZ)/TOL
    -      AY = CZI
    -      CZR = AX*DCOS(AY)
    -      CZI = AX*DSIN(AY)
    -      CALL ZUCHK(CZR, CZI, NW, ASCLE, TOL)
    -      IF (NW.NE.0) GO TO 90
    -  130 CONTINUE
    -      IF (IKFLG.EQ.2) RETURN
    -      IF (N.EQ.1) RETURN
    -C-----------------------------------------------------------------------
    -C     SET UNDERFLOWS ON I SEQUENCE
    -C-----------------------------------------------------------------------
    -  140 CONTINUE
    -      GNU = FNU + DBLE(FLOAT(NN-1))
    -      IF (IFORM.EQ.2) GO TO 150
    -      INIT = 0
    -      CALL ZUNIK(ZRR, ZRI, GNU, IKFLG, 1, TOL, INIT, PHIR, PHII,
    -     * ZETA1R, ZETA1I, ZETA2R, ZETA2I, SUMR, SUMI, CWRKR, CWRKI)
    -      CZR = -ZETA1R + ZETA2R
    -      CZI = -ZETA1I + ZETA2I
    -      GO TO 160
    -  150 CONTINUE
    -      CALL ZUNHJ(ZNR, ZNI, GNU, 1, TOL, PHIR, PHII, ARGR, ARGI, ZETA1R,
    -     * ZETA1I, ZETA2R, ZETA2I, ASUMR, ASUMI, BSUMR, BSUMI)
    -      CZR = -ZETA1R + ZETA2R
    -      CZI = -ZETA1I + ZETA2I
    -      AARG = AZABS(ARGR,ARGI)
    -  160 CONTINUE
    -      IF (KODE.EQ.1) GO TO 170
    -      CZR = CZR - ZBR
    -      CZI = CZI - ZBI
    -  170 CONTINUE
    -      APHI = AZABS(PHIR,PHII)
    -      RCZ = CZR
    -      IF (RCZ.LT.(-ELIM)) GO TO 180
    -      IF (RCZ.GT.(-ALIM)) RETURN
    -      RCZ = RCZ + DLOG(APHI)
    -      IF (IFORM.EQ.2) RCZ = RCZ - 0.25D0*DLOG(AARG) - AIC
    -      IF (RCZ.GT.(-ELIM)) GO TO 190
    -  180 CONTINUE
    -      YR(NN) = ZEROR
    -      YI(NN) = ZEROI
    -      NN = NN - 1
    -      NUF = NUF + 1
    -      IF (NN.EQ.0) RETURN
    -      GO TO 140
    -  190 CONTINUE
    -      ASCLE = 1.0D+3*D1MACH(1)/TOL
    -      CALL AZLOG(PHIR, PHII, STR, STI, IDUM)
    -      CZR = CZR + STR
    -      CZI = CZI + STI
    -      IF (IFORM.EQ.1) GO TO 200
    -      CALL AZLOG(ARGR, ARGI, STR, STI, IDUM)
    -      CZR = CZR - 0.25D0*STR - AIC
    -      CZI = CZI - 0.25D0*STI
    -  200 CONTINUE
    -      AX = DEXP(RCZ)/TOL
    -      AY = CZI
    -      CZR = AX*DCOS(AY)
    -      CZI = AX*DSIN(AY)
    -      CALL ZUCHK(CZR, CZI, NW, ASCLE, TOL)
    -      IF (NW.NE.0) GO TO 180
    -      RETURN
    -  210 CONTINUE
    -      NUF = -1
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos/zwrsk.f b/scipy-0.10.1/scipy/special/amos/zwrsk.f
    deleted file mode 100644
    index 397340f4b6..0000000000
    --- a/scipy-0.10.1/scipy/special/amos/zwrsk.f
    +++ /dev/null
    @@ -1,94 +0,0 @@
    -      SUBROUTINE ZWRSK(ZRR, ZRI, FNU, KODE, N, YR, YI, NZ, CWR, CWI,
    -     * TOL, ELIM, ALIM)
    -C***BEGIN PROLOGUE  ZWRSK
    -C***REFER TO  ZBESI,ZBESK
    -C
    -C     ZWRSK COMPUTES THE I BESSEL FUNCTION FOR RE(Z).GE.0.0 BY
    -C     NORMALIZING THE I FUNCTION RATIOS FROM ZRATI BY THE WRONSKIAN
    -C
    -C***ROUTINES CALLED  D1MACH,ZBKNU,ZRATI,AZABS
    -C***END PROLOGUE  ZWRSK
    -C     COMPLEX CINU,CSCL,CT,CW,C1,C2,RCT,ST,Y,ZR
    -      DOUBLE PRECISION ACT, ACW, ALIM, ASCLE, CINUI, CINUR, CSCLR, CTI,
    -     * CTR, CWI, CWR, C1I, C1R, C2I, C2R, ELIM, FNU, PTI, PTR, RACT,
    -     * STI, STR, TOL, YI, YR, ZRI, ZRR, AZABS, D1MACH
    -      INTEGER I, KODE, N, NW, NZ
    -      DIMENSION YR(N), YI(N), CWR(2), CWI(2)
    -C-----------------------------------------------------------------------
    -C     I(FNU+I-1,Z) BY BACKWARD RECURRENCE FOR RATIOS
    -C     Y(I)=I(FNU+I,Z)/I(FNU+I-1,Z) FROM CRATI NORMALIZED BY THE
    -C     WRONSKIAN WITH K(FNU,Z) AND K(FNU+1,Z) FROM CBKNU.
    -C-----------------------------------------------------------------------
    -      NZ = 0
    -      CALL ZBKNU(ZRR, ZRI, FNU, KODE, 2, CWR, CWI, NW, TOL, ELIM, ALIM)
    -      IF (NW.NE.0) GO TO 50
    -      CALL ZRATI(ZRR, ZRI, FNU, N, YR, YI, TOL)
    -C-----------------------------------------------------------------------
    -C     RECUR FORWARD ON I(FNU+1,Z) = R(FNU,Z)*I(FNU,Z),
    -C     R(FNU+J-1,Z)=Y(J),  J=1,...,N
    -C-----------------------------------------------------------------------
    -      CINUR = 1.0D0
    -      CINUI = 0.0D0
    -      IF (KODE.EQ.1) GO TO 10
    -      CINUR = DCOS(ZRI)
    -      CINUI = DSIN(ZRI)
    -   10 CONTINUE
    -C-----------------------------------------------------------------------
    -C     ON LOW EXPONENT MACHINES THE K FUNCTIONS CAN BE CLOSE TO BOTH
    -C     THE UNDER AND OVERFLOW LIMITS AND THE NORMALIZATION MUST BE
    -C     SCALED TO PREVENT OVER OR UNDERFLOW. CUOIK HAS DETERMINED THAT
    -C     THE RESULT IS ON SCALE.
    -C-----------------------------------------------------------------------
    -      ACW = AZABS(CWR(2),CWI(2))
    -      ASCLE = 1.0D+3*D1MACH(1)/TOL
    -      CSCLR = 1.0D0
    -      IF (ACW.GT.ASCLE) GO TO 20
    -      CSCLR = 1.0D0/TOL
    -      GO TO 30
    -   20 CONTINUE
    -      ASCLE = 1.0D0/ASCLE
    -      IF (ACW.LT.ASCLE) GO TO 30
    -      CSCLR = TOL
    -   30 CONTINUE
    -      C1R = CWR(1)*CSCLR
    -      C1I = CWI(1)*CSCLR
    -      C2R = CWR(2)*CSCLR
    -      C2I = CWI(2)*CSCLR
    -      STR = YR(1)
    -      STI = YI(1)
    -C-----------------------------------------------------------------------
    -C     CINU=CINU*(CONJG(CT)/CABS(CT))*(1.0D0/CABS(CT) PREVENTS
    -C     UNDER- OR OVERFLOW PREMATURELY BY SQUARING CABS(CT)
    -C-----------------------------------------------------------------------
    -      PTR = STR*C1R - STI*C1I
    -      PTI = STR*C1I + STI*C1R
    -      PTR = PTR + C2R
    -      PTI = PTI + C2I
    -      CTR = ZRR*PTR - ZRI*PTI
    -      CTI = ZRR*PTI + ZRI*PTR
    -      ACT = AZABS(CTR,CTI)
    -      RACT = 1.0D0/ACT
    -      CTR = CTR*RACT
    -      CTI = -CTI*RACT
    -      PTR = CINUR*RACT
    -      PTI = CINUI*RACT
    -      CINUR = PTR*CTR - PTI*CTI
    -      CINUI = PTR*CTI + PTI*CTR
    -      YR(1) = CINUR*CSCLR
    -      YI(1) = CINUI*CSCLR
    -      IF (N.EQ.1) RETURN
    -      DO 40 I=2,N
    -        PTR = STR*CINUR - STI*CINUI
    -        CINUI = STR*CINUI + STI*CINUR
    -        CINUR = PTR
    -        STR = YR(I)
    -        STI = YI(I)
    -        YR(I) = CINUR*CSCLR
    -        YI(I) = CINUI*CSCLR
    -   40 CONTINUE
    -      RETURN
    -   50 CONTINUE
    -      NZ = -1
    -      IF(NW.EQ.(-2)) NZ=-2
    -      RETURN
    -      END
    diff --git a/scipy-0.10.1/scipy/special/amos_wrappers.c b/scipy-0.10.1/scipy/special/amos_wrappers.c
    deleted file mode 100644
    index 767227e82b..0000000000
    --- a/scipy-0.10.1/scipy/special/amos_wrappers.c
    +++ /dev/null
    @@ -1,577 +0,0 @@
    -/* This file is a collection of wrappers around the
    - *  Amos Fortran library of functions that take complex
    - *  variables (see www.netlib.org) so that they can be called from
    - *  the cephes library of corresponding name but work with complex
    - *  arguments.
    - */
    -
    -#include "amos_wrappers.h"
    -
    -#define CADDR(z) (double *)(&((z).real)), (double*)(&((z).imag))
    -#define F2C_CST(z) (double *)&((z)->real), (double *)&((z)->imag)
    -
    -#if defined(NO_APPEND_FORTRAN)
    -#if defined(UPPERCASE_FORTRAN)
    -#define F_FUNC(f,F) F
    -#else
    -#define F_FUNC(f,F) f
    -#endif
    -#else
    -#if defined(UPPERCASE_FORTRAN)
    -#define F_FUNC(f,F) F##_
    -#else
    -#define F_FUNC(f,F) f##_
    -#endif
    -#endif
    -
    -extern int F_FUNC(zairy,ZAIRY)
    -     (double*, double*, int*, int*, double*, double*, int*, int*);
    -extern int F_FUNC(zbiry,ZBIRY)
    -     (double*, double*, int*, int*, double*, double*, int*);
    -extern int F_FUNC(zbesi,ZBESI)
    -     (double*, double*, double*, int*, int*, double*, double*, int*, int*);
    -extern int F_FUNC(zbesj,ZBESJ)
    -     (double*, double*, double*, int*, int*, double*, double*, int*, int*);
    -extern int F_FUNC(zbesk,ZBESK)
    -     (double*, double*, double*, int*, int*, double*, double*, int*, int*);
    -extern int F_FUNC(zbesy,ZBESY)
    -     (double*, double*, double*, int*, int*, double*, double*, int*, double*, double*, int*);
    -extern int F_FUNC(zbesh,ZBESH)
    -     (double*, double*, double*, int*, int*, int*, double*, double*, int*, int*);
    -
    -/* This must be linked with fortran
    - */
    -
    -int ierr_to_mtherr( int nz, int ierr) {
    -     /* Return mtherr equivalents for ierr values */
    -  
    -  if (nz != 0) return UNDERFLOW;
    -
    -  switch (ierr) {
    -  case 1:
    -    return DOMAIN;
    -  case 2:
    -    return OVERFLOW;
    -  case 3:
    -    return PLOSS;
    -  case 4:
    -    return TLOSS;
    -  case 5:   /* Algorithm termination condition not met */
    -    return TLOSS;    
    -  }
    -  return -1;
    -}
    -
    -void set_nan_if_no_computation_done(Py_complex *v, int ierr) {
    -  if (v != NULL && (ierr == 1 || ierr == 2 || ierr == 4 || ierr == 5)) {
    -    v->real = NPY_NAN;
    -    v->imag = NPY_NAN;
    -  }
    -}
    -
    -static Py_complex
    -rotate(Py_complex z, double v)
    -{
    -    Py_complex w;
    -    double c = cos(v * NPY_PI);
    -    double s = sin(v * NPY_PI);
    -    w.real = z.real*c - z.imag*s;
    -    w.imag = z.real*s + z.imag*c;
    -    return w;
    -}
    -
    -static Py_complex
    -rotate_jy(Py_complex j, Py_complex y, double v)
    -{
    -    Py_complex w;
    -    double c = cos(v * NPY_PI);
    -    double s = sin(v * NPY_PI);
    -    w.real = j.real * c - y.real * s;
    -    w.imag = j.imag * c - y.imag * s;
    -    return w;
    -}
    -
    -static int
    -reflect_jy(Py_complex *jy, double v)
    -{
    -    /* NB: Y_v may be huge near negative integers -- so handle exact
    -     *     integers carefully
    -     */
    -    int i;
    -    if (v != floor(v))
    -        return 0;
    -    
    -    i = v - 16384.0 * floor(v / 16384.0);
    -    if (i & 1) {
    -        jy->real = -jy->real;
    -        jy->imag = -jy->imag;
    -    }
    -    return 1;
    -}
    -
    -static int
    -reflect_i(Py_complex *ik, double v)
    -{
    -    if (v != floor(v))
    -        return 0;
    -    return 1; /* I is symmetric for integer v */
    -}
    -
    -static Py_complex
    -rotate_i(Py_complex i, Py_complex k, double v)
    -{
    -    Py_complex w;
    -    double s = sin(v * NPY_PI)*(2.0/NPY_PI);
    -    w.real = i.real + s*k.real;
    -    w.imag = i.imag + s*k.imag;
    -    return w;
    -}
    -
    -int cairy_wrap(Py_complex z, Py_complex *ai, Py_complex *aip, Py_complex *bi, Py_complex *bip) {
    -  int id = 0;
    -  int ierr = 0;
    -  int kode = 1;
    -  int nz;
    -
    -  F_FUNC(zairy,ZAIRY)(CADDR(z), &id, &kode, F2C_CST(ai), &nz, &ierr);
    -  DO_MTHERR("airy:", ai);
    -  nz = 0;
    -  F_FUNC(zbiry,ZBIRY)(CADDR(z), &id, &kode, F2C_CST(bi), &ierr);
    -  DO_MTHERR("airy:", bi);
    -  
    -  id = 1;
    -  F_FUNC(zairy,ZAIRY)(CADDR(z), &id, &kode, F2C_CST(aip), &nz, &ierr);
    -  DO_MTHERR("airy:", aip);
    -  nz = 0;
    -  F_FUNC(zbiry,ZBIRY)(CADDR(z), &id, &kode, F2C_CST(bip), &ierr);
    -  DO_MTHERR("airy:", bip);
    -  return 0;
    -}
    -
    -int cairy_wrap_e(Py_complex z, Py_complex *ai, Py_complex *aip, Py_complex *bi, Py_complex *bip) {
    -  int id = 0;
    -  int kode = 2;        /* Exponential scaling */
    -  int nz, ierr;
    -
    -  F_FUNC(zairy,ZAIRY)(CADDR(z), &id, &kode, F2C_CST(ai), &nz, &ierr);
    -  DO_MTHERR("airye:", ai);
    -  nz = 0;
    -  F_FUNC(zbiry,ZBIRY)(CADDR(z), &id, &kode, F2C_CST(bi), &ierr);
    -  DO_MTHERR("airye:", bi);
    -  
    -  id = 1;
    -  F_FUNC(zairy,ZAIRY)(CADDR(z), &id, &kode, F2C_CST(aip), &nz, &ierr);
    -  DO_MTHERR("airye:", aip);
    -  nz = 0;
    -  F_FUNC(zbiry,ZBIRY)(CADDR(z), &id, &kode, F2C_CST(bip), &ierr);
    -  DO_MTHERR("airye:", bip);
    -  return 0;
    -}
    -
    -int cairy_wrap_e_real(double z, double *ai, double *aip, double *bi, double *bip) {
    -  int id = 0;
    -  int kode = 2;        /* Exponential scaling */
    -  int nz, ierr;
    -  Py_complex cz, cai, caip, cbi, cbip;
    -
    -  cz.real = z;
    -  cz.imag = 0;
    -
    -  if (z < 0) {
    -      *ai = NPY_NAN;
    -  } else {
    -      F_FUNC(zairy,ZAIRY)(CADDR(cz), &id, &kode, CADDR(cai), &nz, &ierr);
    -      DO_MTHERR("airye:", &cai);
    -      *ai = cai.real;
    -  }
    -  nz = 0;
    -  F_FUNC(zbiry,ZBIRY)(CADDR(cz), &id, &kode, CADDR(cbi), &ierr);
    -  DO_MTHERR("airye:", &cbi);
    -  *bi = cbi.real;
    -  
    -  id = 1;
    -  if (z < 0) {
    -      *aip = NPY_NAN;
    -  } else {
    -      F_FUNC(zairy,ZAIRY)(CADDR(cz), &id, &kode, CADDR(caip), &nz, &ierr);
    -      DO_MTHERR("airye:", &caip);
    -      *aip = caip.real;
    -  }
    -  nz = 0;
    -  F_FUNC(zbiry,ZBIRY)(CADDR(cz), &id, &kode, CADDR(cbip), &ierr);
    -  DO_MTHERR("airye:", &cbip);
    -  *bip = cbip.real;
    -  return 0;
    -}
    -
    -Py_complex cbesi_wrap( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 1;
    -  int sign = 1;
    -  int nz, ierr;
    -  Py_complex cy, cy_k;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesi,ZBESI)(CADDR(z), &v,  &kode, &n, CADDR(cy), &nz, &ierr);
    -  DO_MTHERR("iv:", &cy);
    -  if (ierr == 2) {
    -    /* overflow */
    -    if (z.imag == 0 && (z.real >= 0 || v == floor(v))) {
    -        if (z.real < 0 && v/2 != floor(v/2))
    -            cy.real = -NPY_INFINITY;
    -        else
    -            cy.real = NPY_INFINITY;
    -        cy.imag = 0;
    -    } else {
    -        cy = cbesi_wrap_e(v*sign, z);
    -        cy.real *= NPY_INFINITY;
    -        cy.imag *= NPY_INFINITY;
    -    }
    -  }
    -
    -  if (sign == -1) {
    -    if (!reflect_i(&cy, v)) {
    -      F_FUNC(zbesk,ZBESK)(CADDR(z), &v,  &kode, &n, CADDR(cy_k), &nz, &ierr);
    -      DO_MTHERR("iv(kv):", &cy_k);
    -      cy = rotate_i(cy, cy_k, v);
    -    }
    -  }
    -
    -  return cy;
    -}
    -
    -Py_complex cbesi_wrap_e( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 2;
    -  int sign = 1;
    -  int nz, ierr;
    -  Py_complex cy, cy_k;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesi,ZBESI)(CADDR(z), &v,  &kode, &n, CADDR(cy), &nz, &ierr);
    -  DO_MTHERR("ive:", &cy);
    -
    -  if (sign == -1) {
    -    if (!reflect_i(&cy, v)) {
    -      F_FUNC(zbesk,ZBESK)(CADDR(z), &v,  &kode, &n, CADDR(cy_k), &nz, &ierr);
    -      DO_MTHERR("ive(kv):", &cy_k);
    -      /* adjust scaling to match zbesi */
    -      cy_k = rotate(cy_k, -z.imag/NPY_PI);
    -      if (z.real > 0) {
    -          cy_k.real *= exp(-2*z.real);
    -          cy_k.imag *= exp(-2*z.real);
    -      }
    -      /* v -> -v */
    -      cy = rotate_i(cy, cy_k, v);
    -    }
    -  }
    -
    -  return cy;
    -}
    -
    -double cbesi_wrap_e_real(double v, double z) {
    -  Py_complex cy, w;
    -  if (v != floor(v) && z < 0) {
    -    return NPY_NAN;
    -  } else {
    -    w.real = z;
    -    w.imag = 0;
    -    cy = cbesi_wrap_e(v, w);
    -    return cy.real;
    -  }
    -}
    -  
    -Py_complex cbesj_wrap( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 1;
    -  int nz, ierr;
    -  int sign = 1;
    -  Py_complex cy_j, cy_y, cwork;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesj,ZBESJ)(CADDR(z), &v,  &kode, &n, CADDR(cy_j), &nz, &ierr);
    -  DO_MTHERR("jv:", &cy_j);
    -  if (ierr == 2) {
    -    /* overflow */
    -    cy_j = cbesj_wrap_e(v, z);
    -    cy_j.real *= NPY_INFINITY;
    -    cy_j.imag *= NPY_INFINITY;
    -  }
    -
    -  if (sign == -1) {
    -    if (!reflect_jy(&cy_j, v)) {
    -      F_FUNC(zbesy,ZBESY)(CADDR(z), &v,  &kode, &n, CADDR(cy_y), &nz, CADDR(cwork), &ierr);
    -      DO_MTHERR("jv(yv):", &cy_y);
    -      cy_j = rotate_jy(cy_j, cy_y, v);
    -    }
    -  }
    -  return cy_j;
    -}
    -
    -Py_complex cbesj_wrap_e( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 2;
    -  int nz, ierr;
    -  int sign = 1;
    -  Py_complex cy_j, cy_y, cwork;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesj,ZBESJ)(CADDR(z), &v, &kode, &n, CADDR(cy_j), &nz, &ierr);
    -  DO_MTHERR("jve:", &cy_j);
    -  if (sign == -1) {
    -    if (!reflect_jy(&cy_j, v)) {
    -      F_FUNC(zbesy,ZBESY)(CADDR(z), &v,  &kode, &n, CADDR(cy_y), &nz, CADDR(cwork), &ierr);
    -      DO_MTHERR("jve(yve):", &cy_y);
    -      cy_j = rotate_jy(cy_j, cy_y, v);
    -    }
    -  }
    -  return cy_j;
    -}
    -
    -double cbesj_wrap_e_real(double v, double z) {
    -  Py_complex cy, w;
    -  if (v != floor(v) && z < 0) {
    -    return NPY_NAN;
    -  } else {
    -    w.real = z;
    -    w.imag = 0;
    -    cy = cbesj_wrap_e(v, w);
    -    return cy.real;
    -  }
    -}
    -  
    -Py_complex cbesy_wrap( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 1;
    -  int nz, ierr;
    -  int sign = 1;
    -  Py_complex cy_y, cy_j, cwork;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesy,ZBESY)(CADDR(z), &v,  &kode, &n, CADDR(cy_y), &nz, CADDR(cwork), &ierr);
    -  DO_MTHERR("yv:", &cy_y);
    -  if (ierr == 2) {
    -    if (z.real >= 0 && z.imag == 0) {
    -      /* overflow */
    -      cy_y.real = NPY_INFINITY;
    -      cy_y.imag = 0;
    -    }
    -  }
    -
    -  if (sign == -1) {
    -    if (!reflect_jy(&cy_y, v)) {
    -      F_FUNC(zbesj,ZBESJ)(CADDR(z), &v,  &kode, &n, CADDR(cy_j), &nz, &ierr);
    -      DO_MTHERR("yv(jv):", &cy_j);
    -      cy_y = rotate_jy(cy_y, cy_j, -v);
    -    }
    -  }
    -  return cy_y;
    -}
    -
    -Py_complex cbesy_wrap_e( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 2;
    -  int nz, ierr;
    -  int sign = 1;
    -  Py_complex cy_y, cy_j, cwork;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesy,ZBESY)(CADDR(z), &v, &kode, &n, CADDR(cy_y), &nz, CADDR(cwork), &ierr);
    -  DO_MTHERR("yve:", &cy_y);
    -  if (ierr == 2) {
    -    if (z.real >= 0 && z.imag == 0) {
    -      /* overflow */
    -      cy_y.real = NPY_INFINITY;
    -      cy_y.imag = 0;
    -    }
    -  }
    -
    -  if (sign == -1) {
    -    if (!reflect_jy(&cy_y, v)) {
    -      F_FUNC(zbesj,ZBESJ)(CADDR(z), &v,  &kode, &n, CADDR(cy_j), &nz, &ierr);
    -      DO_MTHERR("yv(jv):", &cy_j);
    -      cy_y = rotate_jy(cy_y, cy_j, -v);
    -    }
    -  }
    -  return cy_y;
    -}
    -
    -double cbesy_wrap_e_real(double v, double z) {
    -  Py_complex cy, w;
    -  if (z < 0) {
    -    return NPY_NAN;
    -  } else {
    -    w.real = z;
    -    w.imag = 0;
    -    cy = cbesy_wrap_e(v, w);
    -    return cy.real;
    -  }
    -}
    -  
    -Py_complex cbesk_wrap( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 1;
    -  int nz, ierr;
    -  Py_complex cy;
    -
    -  if (v < 0) {
    -    /* K_v == K_{-v} even for non-integer v */
    -    v = -v;
    -  }
    -  F_FUNC(zbesk,ZBESK)(CADDR(z), &v,  &kode, &n, CADDR(cy), &nz, &ierr);
    -  DO_MTHERR("kv:", &cy);
    -  if (ierr == 2) {
    -    if (z.real >= 0 && z.imag == 0) {
    -      /* overflow */
    -      cy.real = NPY_INFINITY;
    -      cy.imag = 0;
    -    }
    -  }
    -
    -  return cy;
    -}
    -
    -Py_complex cbesk_wrap_e( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 2;
    -  int nz, ierr;
    -  Py_complex cy;
    -
    -  if (v < 0) {
    -    /* K_v == K_{-v} even for non-integer v */
    -    v = -v;
    -  }
    -  F_FUNC(zbesk,ZBESK)(CADDR(z), &v, &kode, &n, CADDR(cy), &nz, &ierr);
    -  DO_MTHERR("kve:", &cy);
    -  if (ierr == 2) {
    -    if (z.real >= 0 && z.imag == 0) {
    -      /* overflow */
    -      cy.real = NPY_INFINITY;
    -      cy.imag = 0;
    -    }
    -  }
    -
    -  return cy;
    -}
    -  
    -double cbesk_wrap_real( double v, double z) {
    -  Py_complex cy, w;
    -  if (z < 0) {
    -    return NPY_NAN;
    -  } else {
    -    w.real = z;
    -    w.imag = 0;
    -    cy = cbesk_wrap(v, w);
    -    return cy.real;
    -  }
    -}
    -
    -double cbesk_wrap_e_real( double v, double z) {
    -  Py_complex cy, w;
    -  if (z < 0) {
    -    return NPY_NAN;
    -  } else {
    -    w.real = z;
    -    w.imag = 0;
    -    cy = cbesk_wrap_e(v, w);
    -    return cy.real;
    -  }
    -}
    -  
    -Py_complex cbesh_wrap1( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 1;
    -  int m = 1;
    -  int nz, ierr;
    -  int sign = 1;
    -  Py_complex cy;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesh,ZBESH)(CADDR(z), &v,  &kode, &m, &n, CADDR(cy), &nz, &ierr);
    -  DO_MTHERR("hankel1:", &cy);
    -  if (sign == -1) {
    -    cy = rotate(cy, v);
    -  }
    -  return cy;
    -}
    -
    -Py_complex cbesh_wrap1_e( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 2;
    -  int m = 1;
    -  int nz, ierr;
    -  int sign = 1;
    -  Py_complex cy;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesh,ZBESH)(CADDR(z), &v, &kode, &m, &n, CADDR(cy), &nz, &ierr);
    -  DO_MTHERR("hankel1e:", &cy);
    -  if (sign == -1) {
    -    cy = rotate(cy, v);
    -  }
    -  return cy;
    -}
    -  
    -Py_complex cbesh_wrap2( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 1;
    -  int m = 2;
    -  int nz, ierr;
    -  int sign = 1;
    -  Py_complex cy;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesh,ZBESH)(CADDR(z), &v, &kode, &m, &n, CADDR(cy), &nz, &ierr);
    -  DO_MTHERR("hankel2:", &cy);
    -  if (sign == -1) {
    -    cy = rotate(cy, -v);
    -  }
    -  return cy;
    -}
    -
    -Py_complex cbesh_wrap2_e( double v, Py_complex z) {
    -  int n = 1;
    -  int kode = 2;
    -  int m = 2;
    -  int nz, ierr;
    -  int sign = 1;
    -  Py_complex cy;
    -
    -  if (v < 0) {
    -    v = -v;
    -    sign = -1;
    -  }
    -  F_FUNC(zbesh,ZBESH)(CADDR(z), &v, &kode, &m, &n, CADDR(cy), &nz, &ierr);
    -  DO_MTHERR("hankel2e:", &cy);
    -  if (sign == -1) {
    -    cy = rotate(cy, -v);
    -  }
    -  return cy;
    -}
    diff --git a/scipy-0.10.1/scipy/special/amos_wrappers.h b/scipy-0.10.1/scipy/special/amos_wrappers.h
    deleted file mode 100644
    index 5487eb7e6e..0000000000
    --- a/scipy-0.10.1/scipy/special/amos_wrappers.h
    +++ /dev/null
    @@ -1,66 +0,0 @@
    -/* This file is a collection of wrappers around the
    - *  Amos Fortran library of functions that take complex
    - *  variables (see www.netlib.org) so that they can be called from
    - *  the cephes library of corresponding name but work with complex
    - *  arguments.
    - */
    -
    -#ifndef _AMOS_WRAPPERS_H
    -#define _AMOS_WRAPPERS_H
    -#include "Python.h"
    -#include "cephes/mconf.h"
    -
    -#include 
    -
    -#define DO_MTHERR(name, varp)                         \
    -    do {                                              \
    -      if (nz !=0 || ierr != 0) {                      \
    -        mtherr(name, ierr_to_mtherr(nz, ierr));       \
    -        set_nan_if_no_computation_done(varp, ierr);   \
    -      }                                               \
    -    } while (0)
    -
    -int ierr_to_mtherr( int nz, int ierr);
    -void set_nan_if_no_computation_done(Py_complex *var, int ierr);
    -int cairy_wrap(Py_complex z, Py_complex *ai, Py_complex *aip, Py_complex *bi, Py_complex *bip);
    -int cairy_wrap_e(Py_complex z, Py_complex *ai, Py_complex *aip, Py_complex *bi, Py_complex *bip);
    -int cairy_wrap_e_real(double z, double *ai, double *aip, double *bi, double *bip);
    -Py_complex cbesi_wrap( double v, Py_complex z);
    -Py_complex cbesi_wrap_e( double v, Py_complex z);
    -double cbesi_wrap_e_real( double v, double z);
    -Py_complex cbesj_wrap( double v, Py_complex z);
    -Py_complex cbesj_wrap_e( double v, Py_complex z);
    -double cbesj_wrap_e_real( double v, double z);
    -Py_complex cbesy_wrap( double v, Py_complex z);
    -Py_complex cbesy_wrap_e( double v, Py_complex z);
    -double cbesy_wrap_e_real( double v, double z);
    -Py_complex cbesk_wrap( double v, Py_complex z);
    -Py_complex cbesk_wrap_e( double v, Py_complex z);  
    -double cbesk_wrap_real( double v, double z);
    -double cbesk_wrap_e_real( double v, double z);
    -Py_complex cbesh_wrap1( double v, Py_complex z);
    -Py_complex cbesh_wrap1_e( double v, Py_complex z);  
    -Py_complex cbesh_wrap2( double v, Py_complex z);
    -Py_complex cbesh_wrap2_e( double v, Py_complex z);
    -/* 
    -int cairy_(double *, int *, int *, doublecomplex *, int *, int *);
    -int cbiry_(doublecomplex *, int *, int *, doublecomplex *, int *, int *);
    -int cbesi_(doublecomplex *, double *, int *, int *, doublecomplex *, int *, int *);
    -int cbesj_(doublecomplex *, double *, int *, int *, doublecomplex *, int *, int *);
    -int cbesk_(doublecomplex *, double *, int *, int *, doublecomplex *, int *, int *);
    -int cbesy_(doublecomplex *, double *, int *, int *, doublecomplex *, int *, doublecomplex *, int *);
    -int cbesh_(doublecomplex *, double *, int *, int *, int *, doublecomplex *, int *, int *);
    -*/
    -
    -#endif
    -
    -
    -
    -  
    -
    -
    -
    -
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/special/basic.py b/scipy-0.10.1/scipy/special/basic.py
    deleted file mode 100644
    index ff8c136199..0000000000
    --- a/scipy-0.10.1/scipy/special/basic.py
    +++ /dev/null
    @@ -1,876 +0,0 @@
    -#
    -# Author:  Travis Oliphant, 2002
    -#
    -
    -from numpy import pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt, \
    -        where, mgrid, cos, sin, exp, place, seterr, issubdtype, extract, \
    -        complexfloating, less, vectorize, inexact, nan, zeros, sometrue
    -from _cephes import ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta, \
    -        hankel1, hankel2, yv, kv, gammaln, errprint, ndtri
    -import types
    -import specfun
    -import orthogonal
    -
    -__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
    -           'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
    -           'bi_zeros', 'digamma', 'diric', 'ellipk', 'erf_zeros', 'erfcinv',
    -           'erfinv', 'errprint', 'euler', 'fresnel_zeros',
    -           'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
    -           'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
    -           'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
    -           'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
    -           'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
    -           'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
    -           'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq',
    -           'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
    -           'sinc', 'sph_harm', 'sph_in', 'sph_inkn',
    -           'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
    -           'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta']
    -
    -
    -def sinc(x):
    -    """Returns sin(pi*x)/(pi*x) at all points of array x.
    -    """
    -    w = pi * asarray(x)
    -    # w might contain 0, and so temporarily turn off warnings
    -    # while calculating sin(w)/w.
    -    old_settings = seterr(all='ignore')
    -    s = sin(w) / w
    -    seterr(**old_settings)
    -    return where(x==0, 1.0, s)
    -
    -def diric(x,n):
    -    """Returns the periodic sinc function also called the dirichlet function:
    -
    -    diric(x) = sin(x *n / 2) / (n sin(x / 2))
    -
    -    where n is a positive integer.
    -    """
    -    x,n = asarray(x), asarray(n)
    -    n = asarray(n + (x-x))
    -    x = asarray(x + (n-n))
    -    if issubdtype(x.dtype, inexact):
    -        ytype = x.dtype
    -    else:
    -        ytype = float
    -    y = zeros(x.shape,ytype)
    -
    -    mask1 = (n <= 0) | (n <> floor(n))
    -    place(y,mask1,nan)
    -
    -    z = asarray(x / 2.0 / pi)
    -    mask2 = (1-mask1) & (z == floor(z))
    -    zsub = extract(mask2,z)
    -    nsub = extract(mask2,n)
    -    place(y,mask2,pow(-1,zsub*(nsub-1)))
    -
    -    mask = (1-mask1) & (1-mask2)
    -    xsub = extract(mask,x)
    -    nsub = extract(mask,n)
    -    place(y,mask,sin(nsub*xsub/2.0)/(nsub*sin(xsub/2.0)))
    -    return y
    -
    -
    -
    -def jnjnp_zeros(nt):
    -    """Compute nt (<=1200) zeros of the bessel functions Jn and Jn'
    -    and arange them in order of their magnitudes.
    -
    -    Returns
    -    -------
    -    zo[l-1] : ndarray
    -        Value of the lth zero of of Jn(x) and Jn'(x). Of length `nt`.
    -    n[l-1] : ndarray
    -        Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
    -    m[l-1] : ndarray
    -        Serial number of the zeros of Jn(x) or Jn'(x) associated
    -        with lth zero. Of length `nt`.
    -    t[l-1] : ndarray
    -        0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
    -        length `nt`.
    -
    -    See Also
    -    --------
    -    jn_zeros, jnp_zeros : to get separated arrays of zeros.
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt>1200):
    -        raise ValueError("Number must be integer <= 1200.")
    -    nt = int(nt)
    -    n,m,t,zo = specfun.jdzo(nt)
    -    return zo[1:nt+1],n[:nt],m[:nt],t[:nt]
    -
    -def jnyn_zeros(n,nt):
    -    """Compute nt zeros of the Bessel functions Jn(x), Jn'(x), Yn(x), and
    -    Yn'(x), respectively. Returns 4 arrays of length nt.
    -
    -    See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
    -    """
    -    if not (isscalar(nt) and isscalar(n)):
    -        raise ValueError("Arguments must be scalars.")
    -    if (floor(n)!=n) or (floor(nt)!=nt):
    -        raise ValueError("Arguments must be integers.")
    -    if (nt <=0):
    -        raise ValueError("nt > 0")
    -    return specfun.jyzo(abs(n),nt)
    -
    -def jn_zeros(n,nt):
    -    """Compute nt zeros of the Bessel function Jn(x).
    -    """
    -    return jnyn_zeros(n,nt)[0]
    -def jnp_zeros(n,nt):
    -    """Compute nt zeros of the Bessel function Jn'(x).
    -    """
    -    return jnyn_zeros(n,nt)[1]
    -def yn_zeros(n,nt):
    -    """Compute nt zeros of the Bessel function Yn(x).
    -    """
    -    return jnyn_zeros(n,nt)[2]
    -def ynp_zeros(n,nt):
    -    """Compute nt zeros of the Bessel function Yn'(x).
    -    """
    -    return jnyn_zeros(n,nt)[3]
    -
    -def y0_zeros(nt,complex=0):
    -    """Returns nt (complex or real) zeros of Y0(z), z0, and the value
    -    of Y0'(z0) = -Y1(z0) at each zero.
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt <=0):
    -        raise ValueError("Arguments must be scalar positive integer.")
    -    kf = 0
    -    kc = (complex != 1)
    -    return specfun.cyzo(nt,kf,kc)
    -
    -def y1_zeros(nt,complex=0):
    -    """Returns nt (complex or real) zeros of Y1(z), z1, and the value
    -    of Y1'(z1) = Y0(z1) at each zero.
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt <=0):
    -        raise ValueError("Arguments must be scalar positive integer.")
    -    kf = 1
    -    kc = (complex != 1)
    -    return specfun.cyzo(nt,kf,kc)
    -
    -def y1p_zeros(nt,complex=0):
    -    """Returns nt (complex or real) zeros of Y1'(z), z1', and the value
    -    of Y1(z1') at each zero.
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt <=0):
    -        raise ValueError("Arguments must be scalar positive integer.")
    -    kf = 2
    -    kc = (complex != 1)
    -    return specfun.cyzo(nt,kf,kc)
    -
    -def bessel_diff_formula(v, z, n, L, phase):
    -    # from AMS55.
    -    # L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
    -    # L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
    -    # For K, you can pull out the exp((v-k)*pi*i) into the caller
    -    p = 1.0
    -    s = L(v-n, z)
    -    for i in xrange(1, n+1):
    -        p = phase * (p * (n-i+1)) / i   # = choose(k, i)
    -        s += p*L(v-n + i*2, z)
    -    return s / (2.**n)
    -
    -def jvp(v,z,n=1):
    -    """Return the nth derivative of Jv(z) with respect to z.
    -    """
    -    if not isinstance(n,types.IntType) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if n == 0:
    -        return jv(v,z)
    -    else:
    -        return bessel_diff_formula(v, z, n, jv, -1)
    -#        return (jvp(v-1,z,n-1) - jvp(v+1,z,n-1))/2.0
    -
    -def yvp(v,z,n=1):
    -    """Return the nth derivative of Yv(z) with respect to z.
    -    """
    -    if not isinstance(n,types.IntType) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if n == 0:
    -        return yv(v,z)
    -    else:
    -        return bessel_diff_formula(v, z, n, yv, -1)
    -#        return (yvp(v-1,z,n-1) - yvp(v+1,z,n-1))/2.0
    -
    -def kvp(v,z,n=1):
    -    """Return the nth derivative of Kv(z) with respect to z.
    -    """
    -    if not isinstance(n,types.IntType) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if n == 0:
    -        return kv(v,z)
    -    else:
    -        return (-1)**n * bessel_diff_formula(v, z, n, kv, 1)
    -
    -def ivp(v,z,n=1):
    -    """Return the nth derivative of Iv(z) with respect to z.
    -    """
    -    if not isinstance(n,types.IntType) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if n == 0:
    -        return iv(v,z)
    -    else:
    -        return bessel_diff_formula(v, z, n, iv, 1)
    -
    -def h1vp(v,z,n=1):
    -    """Return the nth derivative of H1v(z) with respect to z.
    -    """
    -    if not isinstance(n,types.IntType) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if n == 0:
    -        return hankel1(v,z)
    -    else:
    -        return bessel_diff_formula(v, z, n, hankel1, -1)
    -#        return (h1vp(v-1,z,n-1) - h1vp(v+1,z,n-1))/2.0
    -
    -def h2vp(v,z,n=1):
    -    """Return the nth derivative of H2v(z) with respect to z.
    -    """
    -    if not isinstance(n,types.IntType) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if n == 0:
    -        return hankel2(v,z)
    -    else:
    -        return bessel_diff_formula(v, z, n, hankel2, -1)
    -#        return (h2vp(v-1,z,n-1) - h2vp(v+1,z,n-1))/2.0
    -
    -def sph_jn(n,z):
    -    """Compute the spherical Bessel function jn(z) and its derivative for
    -    all orders up to and including n.
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n < 1): n1 = 1
    -    else: n1 = n
    -    if iscomplex(z):
    -        nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
    -    else:
    -        nm,jn,jnp = specfun.sphj(n1,z)
    -    return jn[:(n+1)], jnp[:(n+1)]
    -
    -def sph_yn(n,z):
    -    """Compute the spherical Bessel function yn(z) and its derivative for
    -    all orders up to and including n.
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n < 1): n1 = 1
    -    else: n1 = n
    -    if iscomplex(z) or less(z,0):
    -        nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
    -    else:
    -        nm,yn,ynp = specfun.sphy(n1,z)
    -    return yn[:(n+1)], ynp[:(n+1)]
    -
    -def sph_jnyn(n,z):
    -    """Compute the spherical Bessel functions, jn(z) and yn(z) and their
    -    derivatives for all orders up to and including n.
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n < 1): n1 = 1
    -    else: n1 = n
    -    if iscomplex(z) or less(z,0):
    -        nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
    -    else:
    -        nm,yn,ynp = specfun.sphy(n1,z)
    -        nm,jn,jnp = specfun.sphj(n1,z)
    -    return jn[:(n+1)],jnp[:(n+1)],yn[:(n+1)],ynp[:(n+1)]
    -
    -def sph_in(n,z):
    -    """Compute the spherical Bessel function in(z) and its derivative for
    -    all orders up to and including n.
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n < 1): n1 = 1
    -    else: n1 = n
    -    if iscomplex(z):
    -        nm,In,Inp,kn,knp = specfun.csphik(n1,z)
    -    else:
    -        nm,In,Inp = specfun.sphi(n1,z)
    -    return In[:(n+1)], Inp[:(n+1)]
    -
    -def sph_kn(n,z):
    -    """Compute the spherical Bessel function kn(z) and its derivative for
    -    all orders up to and including n.
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n < 1): n1 = 1
    -    else: n1 = n
    -    if iscomplex(z) or less(z,0):
    -        nm,In,Inp,kn,knp = specfun.csphik(n1,z)
    -    else:
    -        nm,kn,knp = specfun.sphk(n1,z)
    -    return kn[:(n+1)], knp[:(n+1)]
    -
    -def sph_inkn(n,z):
    -    """Compute the spherical Bessel functions, in(z) and kn(z) and their
    -    derivatives for all orders up to and including n.
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if iscomplex(z) or less(z,0):
    -        nm,In,Inp,kn,knp = specfun.csphik(n,z)
    -    else:
    -        nm,In,Inp = specfun.sphi(n,z)
    -        nm,kn,knp = specfun.sphk(n,z)
    -    return In,Inp,kn,knp
    -
    -def riccati_jn(n,x):
    -    """Compute the Ricatti-Bessel function of the first kind and its
    -    derivative for all orders up to and including n.
    -    """
    -    if not (isscalar(n) and isscalar(x)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n == 0): n1 = 1
    -    else: n1 = n
    -    nm,jn,jnp = specfun.rctj(n1,x)
    -    return jn[:(n+1)],jnp[:(n+1)]
    -
    -def riccati_yn(n,x):
    -    """Compute the Ricatti-Bessel function of the second kind and its
    -    derivative for all orders up to and including n.
    -    """
    -    if not (isscalar(n) and isscalar(x)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n == 0): n1 = 1
    -    else: n1 = n
    -    nm,jn,jnp = specfun.rcty(n1,x)
    -    return jn[:(n+1)],jnp[:(n+1)]
    -
    -def _sph_harmonic(m,n,theta,phi):
    -    """Compute spherical harmonics.
    -
    -    This is a ufunc and may take scalar or array arguments like any
    -    other ufunc.  The inputs will be broadcasted against each other.
    -
    -    Parameters
    -    ----------
    -    m : int
    -       |m| <= n; the order of the harmonic.
    -    n : int
    -       where `n` >= 0; the degree of the harmonic.  This is often called
    -       ``l`` (lower case L) in descriptions of spherical harmonics.
    -    theta : float
    -       [0, 2*pi]; the azimuthal (longitudinal) coordinate.
    -    phi : float
    -       [0, pi]; the polar (colatitudinal) coordinate.
    -
    -    Returns
    -    -------
    -    y_mn : complex float
    -       The harmonic $Y^m_n$ sampled at `theta` and `phi`
    -
    -    Notes
    -    -----
    -    There are different conventions for the meaning of input arguments
    -    `theta` and `phi`.  We take `theta` to be the azimuthal angle and
    -    `phi` to be the polar angle.  It is common to see the opposite
    -    convention - that is `theta` as the polar angle and `phi` as the
    -    azimuthal angle.
    -    """
    -    x = cos(phi)
    -    m,n = int(m), int(n)
    -    Pmn,Pmn_deriv = lpmn(m,n,x)
    -    # Legendre call generates all orders up to m and degrees up to n
    -    val = Pmn[-1, -1]
    -    val *= sqrt((2*n+1)/4.0/pi)
    -    val *= exp(0.5*(gammaln(n-m+1)-gammaln(n+m+1)))
    -    val *= exp(1j*m*theta)
    -    return val
    -
    -sph_harm = vectorize(_sph_harmonic,'D')
    -
    -def erfinv(y):
    -    return ndtri((y+1)/2.0)/sqrt(2)
    -
    -def erfcinv(y):
    -    return ndtri((2-y)/2.0)/sqrt(2)
    -
    -def erf_zeros(nt):
    -    """Compute nt complex zeros of the error function erf(z).
    -    """
    -    if (floor(nt)!=nt) or (nt<=0) or not isscalar(nt):
    -        raise ValueError("Argument must be positive scalar integer.")
    -    return specfun.cerzo(nt)
    -
    -def fresnelc_zeros(nt):
    -    """Compute nt complex zeros of the cosine fresnel integral C(z).
    -    """
    -    if (floor(nt)!=nt) or (nt<=0) or not isscalar(nt):
    -        raise ValueError("Argument must be positive scalar integer.")
    -    return specfun.fcszo(1,nt)
    -
    -def fresnels_zeros(nt):
    -    """Compute nt complex zeros of the sine fresnel integral S(z).
    -    """
    -    if (floor(nt)!=nt) or (nt<=0) or not isscalar(nt):
    -        raise ValueError("Argument must be positive scalar integer.")
    -    return specfun.fcszo(2,nt)
    -
    -def fresnel_zeros(nt):
    -    """Compute nt complex zeros of the sine and cosine fresnel integrals
    -    S(z) and C(z).
    -    """
    -    if (floor(nt)!=nt) or (nt<=0) or not isscalar(nt):
    -        raise ValueError("Argument must be positive scalar integer.")
    -    return specfun.fcszo(2,nt), specfun.fcszo(1,nt)
    -
    -def hyp0f1(v,z):
    -    """Confluent hypergeometric limit function 0F1.
    -    Limit as q->infinity of 1F1(q;a;z/q)
    -    """
    -    z = asarray(z)
    -    if issubdtype(z.dtype, complexfloating):
    -        arg = 2*sqrt(abs(z))
    -        num = where(z>=0, iv(v-1,arg), jv(v-1,arg))
    -        den = abs(z)**((v-1.0)/2)
    -    else:
    -        num = iv(v-1,2*sqrt(z))
    -        den = z**((v-1.0)/2.0)
    -    num *= gamma(v)
    -    return where(z==0,1.0,num/ asarray(den))
    -
    -def assoc_laguerre(x,n,k=0.0):
    -    return orthogonal.eval_genlaguerre(n, k, x)
    -
    -digamma = psi
    -
    -def polygamma(n, x):
    -    """Polygamma function which is the nth derivative of the digamma (psi)
    -    function."""
    -    n, x = asarray(n), asarray(x)
    -    cond = (n==0)
    -    fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1,x)
    -    if sometrue(cond,axis=0):
    -        return where(cond, psi(x), fac2)
    -    return fac2
    -
    -def mathieu_even_coef(m,q):
    -    """Compute expansion coefficients for even mathieu functions and
    -    modified mathieu functions.
    -    """
    -    if not (isscalar(m) and isscalar(q)):
    -        raise ValueError("m and q must be scalars.")
    -    if (q < 0):
    -        raise ValueError("q >=0")
    -    if (m != floor(m)) or (m<0):
    -        raise ValueError("m must be an integer >=0.")
    -
    -    if (q <= 1):
    -        qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
    -    else:
    -        qm=17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
    -    km = int(qm+0.5*m)
    -    if km > 251:
    -        print "Warning, too many predicted coefficients."
    -    kd = 1
    -    m = int(floor(m))
    -    if m % 2:
    -        kd = 2
    -
    -    a = mathieu_a(m,q)
    -    fc = specfun.fcoef(kd,m,q,a)
    -    return fc[:km]
    -
    -def mathieu_odd_coef(m,q):
    -    """Compute expansion coefficients for even mathieu functions and
    -    modified mathieu functions.
    -    """
    -    if not (isscalar(m) and isscalar(q)):
    -        raise ValueError("m and q must be scalars.")
    -    if (q < 0):
    -        raise ValueError("q >=0")
    -    if (m != floor(m)) or (m<=0):
    -        raise ValueError("m must be an integer > 0")
    -
    -    if (q <= 1):
    -        qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
    -    else:
    -        qm=17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
    -    km = int(qm+0.5*m)
    -    if km > 251:
    -        print "Warning, too many predicted coefficients."
    -    kd = 4
    -    m = int(floor(m))
    -    if m % 2:
    -        kd = 3
    -
    -    b = mathieu_b(m,q)
    -    fc = specfun.fcoef(kd,m,q,b)
    -    return fc[:km]
    -
    -def lpmn(m,n,z):
    -    """Associated Legendre functions of the first kind, Pmn(z) and its
    -    derivative, Pmn'(z) of order m and degree n.  Returns two
    -    arrays of size (m+1,n+1) containing Pmn(z) and Pmn'(z) for
    -    all orders from 0..m and degrees from 0..n.
    -
    -    z can be complex.
    -
    -    Parameters
    -    ----------
    -    m : int
    -       |m| <= n; the order of the Legendre function
    -    n : int
    -       where `n` >= 0; the degree of the Legendre function.  Often
    -       called ``l`` (lower case L) in descriptions of the associated
    -       Legendre function
    -    z : float or complex
    -       input value
    -
    -    Returns
    -    -------
    -    Pmn_z : (m+1, n+1) array
    -       Values for all orders 0..m and degrees 0..n
    -    Pmn_d_z : (m+1, n+1) array
    -       Derivatives for all orders 0..m and degrees 0..n
    -    """
    -    if not isscalar(m) or (abs(m)>n):
    -        raise ValueError("m must be <= n.")
    -    if not isscalar(n) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if not isscalar(z):
    -        raise ValueError("z must be scalar.")
    -    if (m < 0):
    -        mp = -m
    -        mf,nf = mgrid[0:mp+1,0:n+1]
    -        sv = errprint(0)
    -        fixarr = where(mf>nf,0.0,(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
    -        sv = errprint(sv)
    -    else:
    -        mp = m
    -    if iscomplex(z):
    -        p,pd = specfun.clpmn(mp,n,real(z),imag(z))
    -    else:
    -        p,pd = specfun.lpmn(mp,n,z)
    -    if (m < 0):
    -        p = p * fixarr
    -        pd = pd * fixarr
    -    return p,pd
    -
    -
    -
    -def lqmn(m,n,z):
    -    """Associated Legendre functions of the second kind, Qmn(z) and its
    -    derivative, Qmn'(z) of order m and degree n.  Returns two
    -    arrays of size (m+1,n+1) containing Qmn(z) and Qmn'(z) for
    -    all orders from 0..m and degrees from 0..n.
    -
    -    z can be complex.
    -    """
    -    if not isscalar(m) or (m<0):
    -        raise ValueError("m must be a non-negative integer.")
    -    if not isscalar(n) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if not isscalar(z):
    -        raise ValueError("z must be scalar.")
    -    m = int(m)
    -    n = int(n)
    -
    -    # Ensure neither m nor n == 0
    -    mm = max(1,m)
    -    nn = max(1,n)
    -
    -    if iscomplex(z):
    -        q,qd = specfun.clqmn(mm,nn,z)
    -    else:
    -        q,qd = specfun.lqmn(mm,nn,z)
    -    return q[:(m+1),:(n+1)],qd[:(m+1),:(n+1)]
    -
    -
    -def bernoulli(n):
    -    """Return an array of the Bernoulli numbers B0..Bn
    -    """
    -    if not isscalar(n) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    n = int(n)
    -    if (n < 2): n1 = 2
    -    else: n1 = n
    -    return specfun.bernob(int(n1))[:(n+1)]
    -
    -def euler(n):
    -    """Return an array of the Euler numbers E0..En (inclusive)
    -    """
    -    if not isscalar(n) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    n = int(n)
    -    if (n < 2): n1 = 2
    -    else:  n1 = n
    -    return specfun.eulerb(n1)[:(n+1)]
    -
    -def lpn(n,z):
    -    """Compute sequence of Legendre functions of the first kind (polynomials),
    -    Pn(z) and derivatives for all degrees from 0 to n (inclusive).
    -
    -    See also special.legendre  for polynomial class.
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n < 1): n1 = 1
    -    else: n1 = n
    -    if iscomplex(z):
    -        pn,pd = specfun.clpn(n1,z)
    -    else:
    -        pn,pd = specfun.lpn(n1,z)
    -    return pn[:(n+1)],pd[:(n+1)]
    -
    -## lpni
    -
    -def lqn(n,z):
    -    """Compute sequence of Legendre functions of the second kind,
    -    Qn(z) and derivatives for all degrees from 0 to n (inclusive).
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (n!= floor(n)) or (n<0):
    -        raise ValueError("n must be a non-negative integer.")
    -    if (n < 1): n1 = 1
    -    else: n1 = n
    -    if iscomplex(z):
    -        qn,qd = specfun.clqn(n1,z)
    -    else:
    -        qn,qd = specfun.lqnb(n1,z)
    -    return qn[:(n+1)],qd[:(n+1)]
    -
    -def ai_zeros(nt):
    -    """Compute the zeros of Airy Functions Ai(x) and Ai'(x), a and a'
    -    respectively, and the associated values of Ai(a') and Ai'(a).
    -
    -    Returns
    -    -------
    -    a[l-1]   -- the lth zero of Ai(x)
    -    ap[l-1]  -- the lth zero of Ai'(x)
    -    ai[l-1]  -- Ai(ap[l-1])
    -    aip[l-1] -- Ai'(a[l-1])
    -    """
    -    kf = 1
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be a positive integer scalar.")
    -    return specfun.airyzo(nt,kf)
    -
    -def bi_zeros(nt):
    -    """Compute the zeros of Airy Functions Bi(x) and Bi'(x), b and b'
    -    respectively, and the associated values of Ai(b') and Ai'(b).
    -
    -    Returns
    -    -------
    -    b[l-1]   -- the lth zero of Bi(x)
    -    bp[l-1]  -- the lth zero of Bi'(x)
    -    bi[l-1]  -- Bi(bp[l-1])
    -    bip[l-1] -- Bi'(b[l-1])
    -    """
    -    kf = 2
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be a positive integer scalar.")
    -    return specfun.airyzo(nt,kf)
    -
    -def lmbda(v,x):
    -    """Compute sequence of lambda functions with arbitrary order v
    -    and their derivatives.  Lv0(x)..Lv(x) are computed with v0=v-int(v).
    -    """
    -    if not (isscalar(v) and isscalar(x)):
    -        raise ValueError("arguments must be scalars.")
    -    if (v<0):
    -        raise ValueError("argument must be > 0.")
    -    n = int(v)
    -    v0 = v - n
    -    if (n < 1): n1 = 1
    -    else: n1 = n
    -    v1 = n1 + v0
    -    if (v!=floor(v)):
    -        vm, vl, dl = specfun.lamv(v1,x)
    -    else:
    -        vm, vl, dl = specfun.lamn(v1,x)
    -    return vl[:(n+1)], dl[:(n+1)]
    -
    -def pbdv_seq(v,x):
    -    """Compute sequence of parabolic cylinder functions Dv(x) and
    -    their derivatives for Dv0(x)..Dv(x) with v0=v-int(v).
    -    """
    -    if not (isscalar(v) and isscalar(x)):
    -        raise ValueError("arguments must be scalars.")
    -    n = int(v)
    -    v0 = v-n
    -    if (n < 1): n1=1
    -    else: n1 = n
    -    v1 = n1 + v0
    -    dv,dp,pdf,pdd = specfun.pbdv(v1,x)
    -    return dv[:n1+1],dp[:n1+1]
    -
    -def pbvv_seq(v,x):
    -    """Compute sequence of parabolic cylinder functions Dv(x) and
    -    their derivatives for Dv0(x)..Dv(x) with v0=v-int(v).
    -    """
    -    if not (isscalar(v) and isscalar(x)):
    -        raise ValueError("arguments must be scalars.")
    -    n = int(v)
    -    v0 = v-n
    -    if (n <= 1): n1=1
    -    else: n1 = n
    -    v1 = n1 + v0
    -    dv,dp,pdf,pdd = specfun.pbvv(v1,x)
    -    return dv[:n1+1],dp[:n1+1]
    -
    -def pbdn_seq(n,z):
    -    """Compute sequence of parabolic cylinder functions Dn(z) and
    -    their derivatives for D0(z)..Dn(z).
    -    """
    -    if not (isscalar(n) and isscalar(z)):
    -        raise ValueError("arguments must be scalars.")
    -    if (floor(n)!=n):
    -        raise ValueError("n must be an integer.")
    -    if (abs(n) <= 1):
    -        n1 = 1
    -    else:
    -        n1 = n
    -    cpb,cpd = specfun.cpbdn(n1,z)
    -    return cpb[:n1+1],cpd[:n1+1]
    -
    -def ber_zeros(nt):
    -    """Compute nt zeros of the kelvin function ber x
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,1)
    -
    -def bei_zeros(nt):
    -    """Compute nt zeros of the kelvin function bei x
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,2)
    -
    -def ker_zeros(nt):
    -    """Compute nt zeros of the kelvin function ker x
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,3)
    -
    -def kei_zeros(nt):
    -    """Compute nt zeros of the kelvin function kei x
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,4)
    -
    -def berp_zeros(nt):
    -    """Compute nt zeros of the kelvin function ber' x
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,5)
    -
    -def beip_zeros(nt):
    -    """Compute nt zeros of the kelvin function bei' x
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,6)
    -
    -def kerp_zeros(nt):
    -    """Compute nt zeros of the kelvin function ker' x
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,7)
    -
    -def keip_zeros(nt):
    -    """Compute nt zeros of the kelvin function kei' x
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,8)
    -
    -def kelvin_zeros(nt):
    -    """Compute nt zeros of all the kelvin functions returned in a
    -    length 8 tuple of arrays of length nt.
    -    The tuple containse the arrays of zeros of
    -    (ber, bei, ker, kei, ber', bei', ker', kei')
    -    """
    -    if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
    -        raise ValueError("nt must be positive integer scalar.")
    -    return specfun.klvnzo(nt,1), \
    -           specfun.klvnzo(nt,2), \
    -           specfun.klvnzo(nt,3), \
    -           specfun.klvnzo(nt,4), \
    -           specfun.klvnzo(nt,5), \
    -           specfun.klvnzo(nt,6), \
    -           specfun.klvnzo(nt,7), \
    -           specfun.klvnzo(nt,8)
    -
    -def pro_cv_seq(m,n,c):
    -    """Compute a sequence of characteristic values for the prolate
    -    spheroidal wave functions for mode m and n'=m..n and spheroidal
    -    parameter c.
    -    """
    -    if not (isscalar(m) and isscalar(n) and isscalar(c)):
    -        raise ValueError("Arguments must be scalars.")
    -    if (n!=floor(n)) or (m!=floor(m)):
    -        raise ValueError("Modes must be integers.")
    -    if (n-m > 199):
    -        raise ValueError("Difference between n and m is too large.")
    -    maxL = n-m+1
    -    return specfun.segv(m,n,c,1)[1][:maxL]
    -
    -def obl_cv_seq(m,n,c):
    -    """Compute a sequence of characteristic values for the oblate
    -    spheroidal wave functions for mode m and n'=m..n and spheroidal
    -    parameter c.
    -    """
    -    if not (isscalar(m) and isscalar(n) and isscalar(c)):
    -        raise ValueError("Arguments must be scalars.")
    -    if (n!=floor(n)) or (m!=floor(m)):
    -        raise ValueError("Modes must be integers.")
    -    if (n-m > 199):
    -        raise ValueError("Difference between n and m is too large.")
    -    maxL = n-m+1
    -    return specfun.segv(m,n,c,-1)[1][:maxL]
    -
    -def ellipk(m):
    -    """y=ellipk(m) returns the complete integral of the first kind:
    -    integral(1/sqrt(1-m*sin(t)**2),t=0..pi/2)
    -
    -    This function is rather imprecise around m==1. For more precision
    -    around this point, use ellipkm1."""
    -    return ellipkm1(1 - asarray(m))
    -
    -def agm(a,b):
    -    """Arithmetic, Geometric Mean
    -
    -    Start with a_0=a and b_0=b and iteratively compute
    -
    -    a_{n+1} = (a_n+b_n)/2
    -    b_{n+1} = sqrt(a_n*b_n)
    -
    -    until a_n=b_n.   The result is agm(a,b)
    -
    -    agm(a,b)=agm(b,a)
    -    agm(a,a) = a
    -    min(a,b) < agm(a,b) < max(a,b)
    -    """
    -    s = a + b + 0.0
    -    return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
    diff --git a/scipy-0.10.1/scipy/special/bento.info b/scipy-0.10.1/scipy/special/bento.info
    deleted file mode 100644
    index 482f8ef30d..0000000000
    --- a/scipy-0.10.1/scipy/special/bento.info
    +++ /dev/null
    @@ -1,49 +0,0 @@
    -HookFile: bscript
    -
    -Library:
    -    CompiledLibrary: sc_c_misc
    -        Sources:
    -            c_misc/besselpoly.c,
    -            c_misc/fsolve.c,
    -            c_misc/gammaincinv.c
    -    CompiledLibrary: sc_cephes
    -        Sources:
    -            cephes/*.c
    -    CompiledLibrary: sc_mach
    -        Sources:
    -            mach/d1mach.f,
    -            mach/i1mach.f,
    -            mach/r1mach.f,
    -            mach/xerror.f
    -    CompiledLibrary: sc_toms
    -        Sources:
    -            toms/wofz.f
    -    CompiledLibrary: sc_amos
    -        Sources:
    -            amos/*.f
    -    CompiledLibrary: sc_cdf
    -        Sources:
    -            cdflib/*.f
    -    CompiledLibrary: sc_specfunlib
    -        Sources:
    -            specfun/specfun.f
    -    Extension: orthogonal_eval
    -        Sources:
    -            orthogonal_eval.c
    -    Extension: lambertw
    -        Sources:
    -            lambertw.c
    -    Extension: _logit
    -        Sources:
    -            _logit.c.src
    -    Extension: _cephes
    -        Sources:
    -            _cephesmodule.c,
    -            amos_wrappers.c,
    -            specfun_wrappers.c,
    -            toms_wrappers.c,
    -            cdf_wrappers.c,
    -            ufunc_extras.c
    -    Extension: specfun
    -         Sources:
    -            specfun.pyf
    diff --git a/scipy-0.10.1/scipy/special/bscript b/scipy-0.10.1/scipy/special/bscript
    deleted file mode 100644
    index dfae871900..0000000000
    --- a/scipy-0.10.1/scipy/special/bscript
    +++ /dev/null
    @@ -1,23 +0,0 @@
    -from bento.commands import hooks
    -
    -@hooks.pre_build
    -def pre_build(context):
    -    default_builder = context.default_builder
    -
    -    def builder(extension):
    -        return default_builder(extension,  use="NPYMATH")
    -    context.register_builder("orthogonal_eval", builder)
    -    context.register_builder("lambertw", builder)
    -    context.register_builder("_logit", builder)
    -
    -    def builder(extension):
    -        return default_builder(extension,
    -                               features="c pyext cshlib bento f2py",
    -                               use="sc_specfunlib NPYMATH CLIB")
    -    context.register_builder("specfun", builder)
    -
    -    def builder(extension):
    -        return default_builder(extension, 
    -                               use="sc_amos sc_toms sc_c_misc sc_cephes sc_mach " \
    -                                   "sc_cdf sc_specfunlib NPYMATH CLIB")
    -    context.register_builder("_cephes", builder)
    diff --git a/scipy-0.10.1/scipy/special/c_misc/besselpoly.c b/scipy-0.10.1/scipy/special/c_misc/besselpoly.c
    deleted file mode 100644
    index b4356bae35..0000000000
    --- a/scipy-0.10.1/scipy/special/c_misc/besselpoly.c
    +++ /dev/null
    @@ -1,43 +0,0 @@
    -
    -#include 
    -extern double cephes_Gamma (double x);
    -
    -
    -#define EPS 1.0e-17
    -
    -double besselpoly(double a, double lambda, double nu) {
    -
    -  int m, factor=0;
    -  double Sm, relerr, Sol;
    -  double sum=0.0;
    -
    -  /* Special handling for a = 0.0 */
    -  if (a == 0.0) {
    -    if (nu == 0.0) return 1.0/(lambda + 1);
    -    else return 0.0;
    -  }
    -  /* Special handling for negative and integer nu */
    -  if ((nu < 0) && (floor(nu)==nu)) {
    -    nu = -nu;
    -    factor = ((int) nu) % 2;
    -  }    
    -  Sm = exp(nu*log(a))/(cephes_Gamma(nu+1)*(lambda+nu+1));
    -  m = 0;
    -  do {
    -    sum += Sm;
    -    Sol = Sm;
    -    Sm *= -a*a*(lambda+nu+1+2*m)/((nu+m+1)*(m+1)*(lambda+nu+1+2*m+2));
    -    m++;
    -    relerr = fabs((Sm-Sol)/Sm);
    -  } while (relerr > EPS && m < 1000);
    -  if (!factor)
    -    return sum;
    -  else
    -    return -sum;
    -}
    -
    -
    -
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/special/c_misc/fsolve.c b/scipy-0.10.1/scipy/special/c_misc/fsolve.c
    deleted file mode 100644
    index b6a2e0b001..0000000000
    --- a/scipy-0.10.1/scipy/special/c_misc/fsolve.c
    +++ /dev/null
    @@ -1,168 +0,0 @@
    -#include "misc.h"
    -#include 
    -
    -#define MAX_ITERATIONS              100
    -#define FP_CMP_WITH_BISECT_NITER    4
    -#define FP_CMP_WITH_BISECT_WIDTH    4.0
    -
    -static double
    -max(double a, double b)
    -{
    -    return (a > b ? a : b);
    -}
    -
    -/*
    -   Use a combination of bisection and false position to find a root
    -   of a function within a given interval. This is guaranteed to converge,
    -   and always keeps a bounding interval, unlike Newton's method.
    -
    -   The false position steps are either unmodified, or modified with
    -   the Anderson-Bjorck method as appropiate. Theoretically, this has
    -   a "speed of convergence" of 1.7 (bisection is 1, Newton is 2).
    -
    -   Input
    -   -----
    -    a, b:   initial bounding interval
    -    fa, fb: value of f() at a and b
    -    f, f_extra: function to find root of is f(x, f_extra)
    -    abserr, relerr: absolute and relative errors on the bounding interval
    -    bisect_til: If > 0.0, perform bisection until the width of the
    -                bounding interval is less than this.
    -
    -   Output
    -   ------
    -    a, b, fa, fb: Final bounding interval and function values
    -    best_x, best_f: Best root approximation and the function value there
    -
    -   Returns
    -   -------
    -    FSOLVE_CONVERGED: Bounding interval is smaller than required error.
    -    FSOLVE_NOT_BRACKET: Initial interval is not a bounding interval.
    -    FSOLVE_EXACT: An exact root was found (best_f = 0)
    -
    -
    -   Note that this routine was designed initially to work with gammaincinv, so
    -   it may not be tuned right for other problems. Don't use it blindly.
    - */
    -fsolve_result_t
    -false_position(double *a, double *fa, double *b, double *fb,
    -               objective_function f, void *f_extra,
    -               double abserr, double relerr, double bisect_til,
    -               double *best_x, double *best_f, double *errest)
    -{
    -    double x1=*a, f1=*fa, x2=*b, f2=*fb;
    -    fsolve_result_t r = FSOLVE_CONVERGED;
    -    double gamma = 1.0;
    -    enum {bisect, falsep} state = bisect;
    -    int n_falsep = 0;
    -    double x3, f3;
    -    double w, last_bisect_width;
    -    double tol;
    -    int niter;
    -
    -    if (f1*f2 >= 0.0) {
    -        return FSOLVE_NOT_BRACKET;
    -    }
    -    if (bisect_til > 0.0) {
    -        state = bisect;
    -    } else {
    -        state = falsep;
    -    }
    -    w = fabs(x2 - x1);
    -    last_bisect_width = w;
    -    for (niter=0; niter < MAX_ITERATIONS; niter++) {
    -        switch (state) {
    -        case bisect: {
    -            x3 = 0.5 * (x1 + x2);
    -            if (x3 == x1 || x3 == x2) {
    -                /* i.e., x1 and x2 are successive floating-point numbers. */
    -                *best_x = x3;
    -                *best_f = (x3==x1) ? f1 : f2;
    -                goto finish;
    -            }
    -            f3 = f(x3, f_extra);
    -            if (f3 == 0.0) {
    -                goto exact_soln;
    -            }
    -            if (f3*f2 < 0.0) {
    -                x1 = x2; f1 = f2;
    -            }
    -            x2 = x3; f2 = f3;
    -            w = fabs(x2 - x1);
    -            last_bisect_width = w;
    -            if (bisect_til > 0.0) {
    -                if (w < bisect_til) {
    -                    bisect_til = -1.0;
    -                    gamma = 1.0;
    -                    n_falsep = 0;
    -                    state = falsep;
    -                }
    -            } else {
    -                gamma = 1.0;
    -                n_falsep = 0;
    -                state = falsep;
    -            }
    -            break;
    -        }
    -        case falsep: {
    -            double s12 = (f2 - gamma*f1) / (x2 - x1);
    -            x3 = x2 - f2/s12;
    -            f3 = f(x3, f_extra);
    -            if (f3 == 0.0) {
    -                goto exact_soln;
    -            }
    -            n_falsep += 1;
    -            if (f3*f2 < 0.0) {
    -                gamma = 1.0;
    -                x1 = x2; f1 = f2;
    -            } else {
    -                /* Anderson-Bjorck method */
    -                double g = 1.0 - f3 / f2;
    -                if (g <= 0.0) { g = 0.5; }
    -                /* It's not really clear from the sources I've looked at,
    -                   but I believe this is *= instead of =. */
    -                gamma *= g;
    -            }
    -            x2 = x3; f2 = f3;
    -            w = fabs(x2 - x1);
    -            /* Sanity check. For every 4 false position checks, see if we
    -               really are decreasing the interval by comparing to what
    -               bisection would have achieved (or, rather, a bit more lenient
    -               than that -- interval decreased by 4 instead of by 16, as
    -               the fp could be decreasing gamma for a bit).
    -
    -               Note that this should guarantee convergence, as it makes
    -               sure that we always end up decreasing the interval width
    -               with a bisection.
    -             */
    -            if (n_falsep > FP_CMP_WITH_BISECT_NITER) {
    -                if (w*FP_CMP_WITH_BISECT_WIDTH > last_bisect_width) {
    -                    state = bisect;
    -                }
    -                n_falsep = 0;
    -                last_bisect_width = w;
    -            }
    -            break;
    -        }
    -        }
    -        tol = abserr + relerr*max(max(fabs(x1), fabs(x2)), 1.0);
    -        if (w <= tol) {
    -            if (fabs(f1) < fabs(f2)) {
    -                *best_x = x1; *best_f = f1;
    -            } else {
    -                *best_x = x2; *best_f = f2;
    -            }
    -            goto finish;
    -        }
    -    }
    -    r = FSOLVE_MAX_ITERATIONS;
    -    *best_x = x3; *best_f = f3;
    -    goto finish;
    -exact_soln:
    -    *best_x = x3; *best_f = 0.0;
    -    r = FSOLVE_EXACT;
    -finish:
    -    *a = x1; *fa = f1; *b = x2; *fb = f2;
    -    *errest = w;
    -    return r;
    -}
    diff --git a/scipy-0.10.1/scipy/special/c_misc/gammaincinv.c b/scipy-0.10.1/scipy/special/c_misc/gammaincinv.c
    deleted file mode 100644
    index cdcaefb605..0000000000
    --- a/scipy-0.10.1/scipy/special/c_misc/gammaincinv.c
    +++ /dev/null
    @@ -1,73 +0,0 @@
    -#include 
    -#include 
    -
    -#include 
    -#include 
    -
    -#include "../cephes.h"
    -#undef fabs
    -#include "misc.h"
    -
    -/* Limits after which to issue warnings about non-convergence */
    -#define ALLOWED_ATOL (1e-306)
    -#define ALLOWED_RTOL (1e-6)
    -
    -void scipy_special_raise_warning(char *fmt, ...);
    -
    -/*
    -  Inverse of the (regularised) incomplete Gamma integral.
    -
    -  Given a, find x such that igam(a, x) = y.
    -  For y not small, we just use igami(a, 1-y) (inverse of the complemented
    -  incomplete Gamma integral). For y small, however, 1-y is about 1, and we
    -  lose digits.
    -
    -*/
    -
    -extern double MACHEP, MAXNUM;
    -
    -static double
    -gammainc(double x, double params[2])
    -{
    -    return cephes_igam(params[0], x) - params[1];
    -}
    -
    -double
    -gammaincinv(double a, double y)
    -{
    -    double lo = 0.0, hi;
    -    double flo = -y, fhi = 0.25 - y;
    -    double params[2];
    -    double best_x, best_f, errest;
    -    fsolve_result_t r;
    -
    -    if (a <= 0.0 || y <= 0.0 || y >= 0.25) {
    -        return cephes_igami(a, 1-y);
    -    }
    -
    -    /* Note: flo and fhi must have different signs (and be != 0),
    -     *       otherwise fsolve terminates with an error.
    -     */
    -
    -    params[0] = a;
    -    params[1] = y;
    -    hi = cephes_igami(a, 0.75);
    -    /* I found Newton to be unreliable. Also, after we generate a small
    -       interval by bisection above, false position will do a large step
    -       from an interval of width ~1e-4 to ~1e-14 in one step (a=10, x=0.05,
    -       but similiar for other values).
    -     */
    -
    -    r = false_position(&lo, &flo, &hi, &fhi,
    -                       (objective_function)gammainc, params,
    -                       2*MACHEP, 2*MACHEP, 1e-2*a,
    -                       &best_x, &best_f, &errest);
    -    if (!(r == FSOLVE_CONVERGED || r == FSOLVE_EXACT) &&
    -            errest > ALLOWED_ATOL + ALLOWED_RTOL*fabs(best_x)) {
    -        scipy_special_raise_warning(
    -            "gammaincinv: failed to converge at (a, y) = (%.20g, %.20g): got %g +- %g, code %d\n",
    -            a, y, best_x, errest, r);
    -        best_x = NPY_NAN;
    -    }
    -    return best_x;
    -}
    diff --git a/scipy-0.10.1/scipy/special/c_misc/misc.h b/scipy-0.10.1/scipy/special/c_misc/misc.h
    deleted file mode 100644
    index 95f94770cb..0000000000
    --- a/scipy-0.10.1/scipy/special/c_misc/misc.h
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -#ifndef C_MISC_MISC_H
    -#define C_MISC_MISC_H
    -
    -typedef enum {
    -  /* An exact solution was found, in which case the first point
    -     on the interval is the value */
    -  FSOLVE_EXACT,
    -  /* Interval width is less than the tolerance */
    -  FSOLVE_CONVERGED,
    -  /* Not a bracket */
    -  FSOLVE_NOT_BRACKET,
    -  /* Root-finding didn't converge in a set number of iterations. */
    -  FSOLVE_MAX_ITERATIONS
    -} fsolve_result_t;
    -
    -typedef double (*objective_function)(double, void *);
    -
    -fsolve_result_t false_position(double *a, double *fa, double *b, double *fb,
    -                       objective_function f, void *f_extra,
    -                       double abserr, double relerr, double bisect_til,
    -                       double *best_x, double *best_f, double *errest);
    -
    -double besselpoly(double a, double lambda, double nu);
    -double gammaincinv(double a, double x);
    -
    -#define gammaincinv_doc """gammaincinv(a, y) returns x such that gammainc(a, x) = y."""
    -
    -#endif /* C_MISC_MISC_H */
    diff --git a/scipy-0.10.1/scipy/special/cdf_wrappers.c b/scipy-0.10.1/scipy/special/cdf_wrappers.c
    deleted file mode 100644
    index 36665158a4..0000000000
    --- a/scipy-0.10.1/scipy/special/cdf_wrappers.c
    +++ /dev/null
    @@ -1,550 +0,0 @@
    -/* This file is a collection (more can be added) of wrappers around some
    - *  CDF Fortran algorithms, so that they can be called from
    - *  cephesmodule.so
    - */
    -
    -#include "cdf_wrappers.h"
    -
    -#if defined(NO_APPEND_FORTRAN)
    -#if defined(UPPERCASE_FORTRAN)
    -#define F_FUNC(f,F) F
    -#else
    -#define F_FUNC(f,F) f
    -#endif
    -#else
    -#if defined(UPPERCASE_FORTRAN)
    -#define F_FUNC(f,F) F##_
    -#else
    -#define F_FUNC(f,F) f##_
    -#endif
    -#endif
    -
    -/* This must be linked with fortran
    - */
    -
    -extern int scipy_special_print_error_messages;
    -
    -/* Notice q and p are used in reverse from their meanings in distributions.py
    - */
    -
    -static void show_error( int status, int bound) {
    -  /* show_error message */
    -
    -  if (status < 0) {
    -    printf("(Fortran) input parameter %d is out of range.\n", (-status));
    -  }
    -  else {
    -    switch (status) {
    -    case 1:
    -      printf("Answer appears to be lower than lowest search bound (%d).\n", bound);
    -      break;
    -    case 2:
    -      printf("Answer appears to be higher than highest search bound (%d).\n", bound);
    -      break;
    -    case 3:
    -    case 4:
    -      printf("Two parameters that should sum to 1.0 do not.\n");
    -      break;
    -    case 10:
    -      printf("Computational error.\n");
    -      break;
    -    default:
    -      printf("Unknown error.\n");
    -    }
    -  }
    -}
    -
    -extern void F_FUNC(cdfbet,CDFBET)(int*,double*,double*,double*,double*,double*,double*,int*,double*);
    -
    -double cdfbet3_wrap(double p, double b, double x) {
    -  int which=3;
    -  double q=1.0-p, y=1.0-x, a, bound;
    -  int status;  
    -  
    -  F_FUNC(cdfbet,CDFBET)(&which, &p, &q, &x, &y, &a, &b, &status, &bound);
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return a;
    -}
    -
    -double cdfbet4_wrap(double a, double p, double x) {
    -  int which=4;
    -  double q=1.0-p, y=1.0-x, b, bound;
    -  int status;  
    -  
    -  F_FUNC(cdfbet,CDFBET)(&which, &p, &q, &x, &y, &a, &b, &status, &bound);
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return b;
    -}
    -
    -
    -extern void F_FUNC(cdfbin,CDFBIN)(int*,double*,double*,double*,double*,double*,double*,int*,double*);
    -
    -double cdfbin2_wrap(double p, double xn, double pr) {
    -  int which=2;
    -  double q=1.0-p, s, ompr=1.0-pr, bound;
    -  int status;  
    -  
    -  F_FUNC(cdfbin,CDFBIN)(&which, &p, &q, &s, &xn, &pr, &ompr, &status, &bound);
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return s;
    -}
    -
    -double cdfbin3_wrap(double s, double p, double pr) {
    -  int which=3;
    -  double q=1.0-p, xn, ompr=1.0-pr, bound;
    -  int status;  
    -
    -  F_FUNC(cdfbin,CDFBIN)(&which, &p, &q, &s, &xn, &pr, &ompr, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return xn;
    -}
    -
    -extern void F_FUNC(cdfchi,CDFCHI)(int*,double*,double*,double*,double*,int*,double*);
    -double cdfchi3_wrap(double p, double x){
    -  int which=3;
    -  double q=1.0-p, df, bound;
    -  int status;  
    -
    -  F_FUNC(cdfchi,CDFCHI)(&which, &p, &q, &x, &df, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return df;
    -}
    -
    -extern void F_FUNC(cdfchn,CDFCHN)(int*,double*,double*,double*,double*,double*,int*,double*);
    -double cdfchn1_wrap(double x, double df, double nc) {
    -  int which=1;
    -  double q, p, bound;
    -  int status;  
    -
    -  F_FUNC(cdfchn,CDFCHN)(&which, &p, &q, &x, &df, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return p;
    -}
    -
    -double cdfchn2_wrap(double p, double df, double nc) {
    -  int which=2;
    -  double q=1.0-p, x, bound;
    -  int status;  
    -
    -  F_FUNC(cdfchn,CDFCHN)(&which, &p, &q, &x, &df, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -  }
    -  return x;
    -}
    -
    -double cdfchn3_wrap(double x, double p, double nc) {
    -  int which=3;
    -  double q=1.0-p, df, bound;
    -  int status;  
    -
    -  F_FUNC(cdfchn,CDFCHN)(&which, &p, &q, &x, &df, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return df;
    -}
    -
    -double cdfchn4_wrap(double x, double df, double p) {
    -  int which=4;
    -  double q=1.0-p, nc, bound;
    -  int status;  
    -
    -  F_FUNC(cdfchn,CDFCHN)(&which, &p, &q, &x, &df, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return nc;
    -}
    -
    -extern void F_FUNC(cdff,CDFF)(int*,double*,double*,double*,double*,double*,int*,double*);
    -/*
    -double cdff1_wrap(double dfn, double dfd, double f) {
    -  int which=1;
    -  double q, p, bound;
    -  int status;
    -
    -  F_FUNC(cdff,CDFF)(&which, &p, &q, &f, &dfn, &dfd, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -  }
    -  return p;
    -}
    -
    -double cdff2_wrap(double dfn, double dfd, double p) {
    -  int which=2;
    -  double q=1.0-p, f, bound;
    -  int status;
    -
    -  F_FUNC(cdff,CDFF)(&which, &p, &q, &f, &dfn, &dfd, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -  }
    -  return f;
    -}
    -*/
    -
    -/* This seem to give some trouble.  No idea why... */
    -double cdff3_wrap(double p, double dfd, double f) {
    -  int which=3;
    -  double q=1.0-p, dfn, bound;
    -  int status;
    -
    -  F_FUNC(cdff,CDFF)(&which, &p, &q, &f, &dfn, &dfd, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return dfn;
    -}
    -
    -double cdff4_wrap(double dfn, double p, double f) {
    -  int which=4;
    -  double q=1.0-p, dfd, bound;
    -  int status;  
    -
    -  F_FUNC(cdff,CDFF)(&which, &p, &q, &f, &dfn, &dfd, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return dfd;
    -}
    -
    -
    -extern void F_FUNC(cdffnc,CDFFNC)(int*,double*,double*,double*,double*,double*,double*,int*,double*);
    -double cdffnc1_wrap(double dfn, double dfd, double nc, double f) {
    -  int which=1;
    -  double q, p, bound;
    -  int status;
    -
    -  F_FUNC(cdffnc,CDFFNC)(&which, &p, &q, &f, &dfn, &dfd, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -  }
    -  return p;
    -}
    -
    -double cdffnc2_wrap(double dfn, double dfd, double nc, double p) {
    -  int which=2;
    -  double q=1.0-p, f, bound;
    -  int status;
    -
    -  F_FUNC(cdffnc,CDFFNC)(&which, &p, &q, &f, &dfn, &dfd, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return f;
    -}
    -
    -
    -double cdffnc3_wrap(double p, double dfd, double nc, double f) {
    -  int which=3;
    -  double q=1.0-p, dfn, bound;
    -  int status;
    -
    -  F_FUNC(cdffnc,CDFFNC)(&which, &p, &q, &f, &dfn, &dfd, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return dfn;
    -}
    -double cdffnc4_wrap(double dfn, double p, double nc, double f) {
    -  int which=4;
    -  double q=1.0-p, dfd, bound;
    -  int status;
    -
    -  F_FUNC(cdffnc,CDFFNC)(&which, &p, &q, &f, &dfn, &dfd, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return dfd;
    -}
    -
    -double cdffnc5_wrap(double dfn, double dfd, double p, double f) {
    -  int which=5;
    -  double q=1.0-p, nc, bound;
    -  int status;
    -
    -  F_FUNC(cdffnc,CDFFNC)(&which, &p, &q, &f, &dfn, &dfd, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return nc;
    -}
    -
    -/* scl == a in gdtr
    -   shp == b in gdtr
    -*/ 
    -extern void F_FUNC(cdfgam,CDFGAM)(int*,double*,double*,double*,double*,double*,int*,double*);
    -double cdfgam1_wrap(double scl, double shp, double x) {
    -  int which=1;
    -  double q, p, bound;
    -  int status;
    -
    -  F_FUNC(cdfgam,CDFGAM)(&which, &p, &q, &x, &shp, &scl, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -  }
    -  return p;
    -}
    -
    -double cdfgam2_wrap(double scl, double shp, double p) {
    -  int which=2;
    -  double q=1.0-p, x, bound;
    -  int status;
    -
    -  F_FUNC(cdfgam,CDFGAM)(&which, &p, &q, &x, &shp, &scl,  &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return x;
    -}
    -
    -double cdfgam3_wrap(double scl, double p, double x) {
    -  int which=3;
    -  double q=1.0-p, shp, bound;
    -  int status;
    -
    -  F_FUNC(cdfgam,CDFGAM)(&which, &p, &q, &x, &shp, &scl, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return shp;
    -}
    -
    -double cdfgam4_wrap(double p, double shp, double x) {
    -  int which=4;
    -  double q=1.0-p, scl, bound;
    -  int status;
    -
    -  F_FUNC(cdfgam,CDFGAM)(&which, &p, &q, &x, &shp, &scl, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return scl;
    -}
    -
    -extern void F_FUNC(cdfnbn,CDFNBN)(int*,double*,double*,double*,double*,double*,double*,int*,double*);
    -double cdfnbn2_wrap(double p, double xn, double pr) {
    -  int which=2;
    -  double q=1.0-p, s, ompr=1.0-pr, bound;
    -  int status;  
    -  
    -  F_FUNC(cdfnbn,CDFNBN)(&which, &p, &q, &s, &xn, &pr, &ompr, &status, &bound);
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return s;
    -}
    -
    -double cdfnbn3_wrap(double s, double p, double pr) {
    -  int which=3;
    -  double q=1.0-p, xn, ompr=1.0-pr, bound;
    -  int status;  
    -
    -  F_FUNC(cdfnbn,CDFNBN)(&which, &p, &q, &s, &xn, &pr, &ompr, &status, &bound);
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return xn;
    -}
    -
    -extern void F_FUNC(cdfnor,CDFNOR)(int*,double*,double*,double*,double*,double*,int*,double*);
    -double cdfnor3_wrap(double p, double std, double x) {
    -  int which=3;
    -  double q=1.0-p, mn, bound;
    -  int status;  
    -
    -  F_FUNC(cdfnor,CDFNOR)(&which, &p, &q, &x, &mn, &std, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return mn;
    -}
    -
    -double cdfnor4_wrap(double mn, double p, double x) {
    -  int which=4;
    -  double q=1.0-p, std, bound;
    -  int status;  
    -
    -  F_FUNC(cdfnor,CDFNOR)(&which, &p, &q, &x, &mn, &std, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return std;
    -}
    -
    -extern void F_FUNC(cdfpoi,CDFPOI)(int*,double*,double*,double*,double*,int*,double*);
    -double cdfpoi2_wrap(double p, double xlam){
    -  int which=2;
    -  double q=1.0-p, s, bound;
    -  int status;  
    -
    -  F_FUNC(cdfpoi,CDFPOI)(&which, &p, &q, &s, &xlam, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return s;
    -}
    -
    -extern void F_FUNC(cdft,CDFT)(int*,double*,double*,double*,double*,int*,double*);
    -double cdft1_wrap(double df, double t){
    -  int which=1;
    -  double q, p, bound;
    -  int status;  
    -
    -  F_FUNC(cdft,CDFT)(&which, &p, &q, &t, &df, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -  }
    -  return p;
    -}
    -
    -double cdft2_wrap(double df, double p){
    -  int which=2;
    -  double q=1.0-p, t, bound;
    -  int status;  
    -
    -  F_FUNC(cdft,CDFT)(&which, &p, &q, &t, &df, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return t;
    -}
    -
    -double cdft3_wrap(double p, double t){
    -  int which=3;
    -  double q=1.0-p, df, bound;
    -  int status;  
    -
    -  F_FUNC(cdft,CDFT)(&which, &p, &q, &t, &df, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return df;
    -}
    -
    -extern void F_FUNC(cdftnc,CDFTNC)(int*,double*,double*,double*,double*,double*,int*,double*);
    -double cdftnc1_wrap(double df, double nc, double t) {
    -  int which=1;
    -  double q, p, bound;
    -  int status;  
    -
    -  F_FUNC(cdftnc,CDFTNC)(&which, &p, &q, &t, &df, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return p;
    -}
    -
    -double cdftnc2_wrap(double df, double nc, double p) {
    -  int which=2;
    -  double q=1.0-p, t, bound;
    -  int status;  
    -
    -  F_FUNC(cdftnc,CDFTNC)(&which, &p, &q, &t, &df, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return t;
    -}
    -
    -double cdftnc3_wrap(double p, double nc, double t) {
    -  int which=3;
    -  double q=1.0-p, df, bound;
    -  int status;  
    -
    -  F_FUNC(cdftnc,CDFTNC)(&which, &p, &q, &t, &df, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -  }
    -  return df;
    -}
    -
    -double cdftnc4_wrap(double df, double p, double t) {
    -  int which=4;
    -  double q=1.0-p, nc, bound;
    -  int status;  
    -
    -  F_FUNC(cdftnc,CDFTNC)(&which, &p, &q, &t, &df, &nc, &status, &bound); 
    -  if (status) {
    -    if (scipy_special_print_error_messages) show_error(status, bound);
    -    if ((status < 0) || (status==3) || (status==4)) return (NPY_NAN);
    -    if ((status == 1) || (status == 2)) return bound;
    -
    -  }
    -  return nc;
    -}
    -
    -
    diff --git a/scipy-0.10.1/scipy/special/cdf_wrappers.h b/scipy-0.10.1/scipy/special/cdf_wrappers.h
    deleted file mode 100644
    index 8cf7d786dd..0000000000
    --- a/scipy-0.10.1/scipy/special/cdf_wrappers.h
    +++ /dev/null
    @@ -1,62 +0,0 @@
    -/* This file is a collection of wrappers around the
    - *  Amos Fortran library of functions that take complex
    - *  variables (see www.netlib.org) so that they can be called from
    - *  the cephes library of corresponding name but work with complex
    - *  arguments.
    - */
    -
    -#ifndef _CDF_WRAPPERS_H
    -#define _CDF_WRAPPERS_H
    -#ifndef _AMOS_WRAPPERS_H
    -#include "Python.h"
    -#include "cephes/mconf.h"
    -#endif
    -
    -#include 
    -
    -extern double cdfbet3_wrap(double p, double x, double b);
    -extern double cdfbet4_wrap(double p, double x, double a);
    -
    -extern double cdfbin2_wrap(double p, double xn, double pr);
    -extern double cdfbin3_wrap(double p, double s, double pr);
    -
    -extern double cdfchi3_wrap(double p, double x);
    -
    -extern double cdfchn1_wrap(double x, double df, double nc);
    -extern double cdfchn2_wrap(double p, double df, double nc);
    -extern double cdfchn3_wrap(double p, double x, double nc);
    -extern double cdfchn4_wrap(double p, double x, double df);
    -
    -extern double cdff3_wrap(double p, double f, double dfd);
    -extern double cdff4_wrap(double p, double f, double dfn);
    -
    -extern double cdffnc1_wrap(double f, double dfn, double dfd, double nc);
    -extern double cdffnc2_wrap(double p, double dfn, double dfd, double nc);
    -extern double cdffnc3_wrap(double p, double f, double dfd, double nc);
    -extern double cdffnc4_wrap(double p, double f, double dfn, double nc);
    -extern double cdffnc5_wrap(double p, double f, double dfn, double dfd);
    -
    -extern double cdfgam1_wrap(double p, double x, double scl);
    -extern double cdfgam2_wrap(double p, double x, double shp);
    -extern double cdfgam3_wrap(double p, double x, double scl);
    -extern double cdfgam4_wrap(double p, double x, double shp);
    -
    -extern double cdfnbn2_wrap(double p, double xn, double pr);
    -extern double cdfnbn3_wrap(double p, double s, double pr);
    -
    -extern double cdfnor3_wrap(double p, double x, double std);
    -extern double cdfnor4_wrap(double p, double x, double mn);
    -
    -extern double cdfpoi2_wrap(double p, double xlam);
    -
    -extern double cdft1_wrap(double p, double t);
    -extern double cdft2_wrap(double p, double t);
    -extern double cdft3_wrap(double p, double t);
    -
    -extern double cdftnc1_wrap(double df, double nc, double t);
    -extern double cdftnc2_wrap(double df, double nc, double p);
    -extern double cdftnc3_wrap(double p, double nc, double t);
    -extern double cdftnc4_wrap(double df, double p, double t);
    -
    -extern double tukeylambdacdf(double x, double lambda);
    -#endif
    diff --git a/scipy-0.10.1/scipy/special/cdflib/algdiv.f b/scipy-0.10.1/scipy/special/cdflib/algdiv.f
    deleted file mode 100644
    index 6fab9b490c..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/algdiv.f
    +++ /dev/null
    @@ -1,71 +0,0 @@
    -      DOUBLE PRECISION FUNCTION algdiv(a,b)
    -C-----------------------------------------------------------------------
    -C
    -C     COMPUTATION OF LN(GAMMA(B)/GAMMA(A+B)) WHEN B .GE. 8
    -C
    -C                         --------
    -C
    -C     IN THIS ALGORITHM, DEL(X) IS THE FUNCTION DEFINED BY
    -C     LN(GAMMA(X)) = (X - 0.5)*LN(X) - X + 0.5*LN(2*PI) + DEL(X).
    -C
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION c,c0,c1,c2,c3,c4,c5,d,h,s11,s3,s5,s7,s9,t,u,v,w,
    -     +                 x,x2
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION alnrel
    -      EXTERNAL alnrel
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dlog
    -C     ..
    -C     .. Data statements ..
    -      DATA c0/.833333333333333D-01/,c1/-.277777777760991D-02/,
    -     +     c2/.793650666825390D-03/,c3/-.595202931351870D-03/,
    -     +     c4/.837308034031215D-03/,c5/-.165322962780713D-02/
    -C     ..
    -C     .. Executable Statements ..
    -C------------------------
    -      IF (a.LE.b) GO TO 10
    -      h = b/a
    -      c = 1.0D0/ (1.0D0+h)
    -      x = h/ (1.0D0+h)
    -      d = a + (b-0.5D0)
    -      GO TO 20
    -
    -   10 h = a/b
    -      c = h/ (1.0D0+h)
    -      x = 1.0D0/ (1.0D0+h)
    -      d = b + (a-0.5D0)
    -C
    -C                SET SN = (1 - X**N)/(1 - X)
    -C
    -   20 x2 = x*x
    -      s3 = 1.0D0 + (x+x2)
    -      s5 = 1.0D0 + (x+x2*s3)
    -      s7 = 1.0D0 + (x+x2*s5)
    -      s9 = 1.0D0 + (x+x2*s7)
    -      s11 = 1.0D0 + (x+x2*s9)
    -C
    -C                SET W = DEL(B) - DEL(A + B)
    -C
    -      t = (1.0D0/b)**2
    -      w = ((((c5*s11*t+c4*s9)*t+c3*s7)*t+c2*s5)*t+c1*s3)*t + c0
    -      w = w* (c/b)
    -C
    -C                    COMBINE THE RESULTS
    -C
    -      u = d*alnrel(a/b)
    -      v = a* (dlog(b)-1.0D0)
    -      IF (u.LE.v) GO TO 30
    -      algdiv = (w-v) - u
    -      RETURN
    -
    -   30 algdiv = (w-u) - v
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/alngam.f b/scipy-0.10.1/scipy/special/cdflib/alngam.f
    deleted file mode 100644
    index 0c61726ab6..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/alngam.f
    +++ /dev/null
    @@ -1,128 +0,0 @@
    -      DOUBLE PRECISION FUNCTION alngam(x)
    -C**********************************************************************
    -C
    -C     DOUBLE PRECISION FUNCTION ALNGAM(X)
    -C                 double precision LN of the GAMma function
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Returns the natural logarithm of GAMMA(X).
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     X --> value at which scaled log gamma is to be returned
    -C                    X is DOUBLE PRECISION
    -C
    -C
    -C                              Method
    -C
    -C
    -C     If X .le. 6.0, then use recursion to get X below 3
    -C     then apply rational approximation number 5236 of
    -C     Hart et al, Computer Approximations, John Wiley and
    -C     Sons, NY, 1968.
    -C
    -C     If X .gt. 6.0, then use recursion to get X to at least 12 and
    -C     then use formula 5423 of the same source.
    -C
    -C**********************************************************************
    -C
    -C     .. Parameters ..
    -      DOUBLE PRECISION hln2pi
    -      PARAMETER (hln2pi=0.91893853320467274178D0)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION offset,prod,xx
    -      INTEGER i,n
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION coef(5),scoefd(4),scoefn(9)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION devlpl
    -      EXTERNAL devlpl
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC log,dble,int
    -C     ..
    -C     .. Data statements ..
    -      DATA scoefn(1)/0.62003838007127258804D2/,
    -     +     scoefn(2)/0.36036772530024836321D2/,
    -     +     scoefn(3)/0.20782472531792126786D2/,
    -     +     scoefn(4)/0.6338067999387272343D1/,
    -     +     scoefn(5)/0.215994312846059073D1/,
    -     +     scoefn(6)/0.3980671310203570498D0/,
    -     +     scoefn(7)/0.1093115956710439502D0/,
    -     +     scoefn(8)/0.92381945590275995D-2/,
    -     +     scoefn(9)/0.29737866448101651D-2/
    -      DATA scoefd(1)/0.62003838007126989331D2/,
    -     +     scoefd(2)/0.9822521104713994894D1/,
    -     +     scoefd(3)/-0.8906016659497461257D1/,
    -     +     scoefd(4)/0.1000000000000000000D1/
    -      DATA coef(1)/0.83333333333333023564D-1/,
    -     +     coef(2)/-0.27777777768818808D-2/,
    -     +     coef(3)/0.79365006754279D-3/,coef(4)/-0.594997310889D-3/,
    -     +     coef(5)/0.8065880899D-3/
    -C     ..
    -C     .. Executable Statements ..
    -      IF (.NOT. (x.LE.6.0D0)) GO TO 70
    -      prod = 1.0D0
    -      xx = x
    -      IF (.NOT. (x.GT.3.0D0)) GO TO 30
    -   10 IF (.NOT. (xx.GT.3.0D0)) GO TO 20
    -      xx = xx - 1.0D0
    -      prod = prod*xx
    -      GO TO 10
    -
    -   20 CONTINUE
    -   30 IF (.NOT. (x.LT.2.0D0)) GO TO 60
    -   40 IF (.NOT. (xx.LT.2.0D0)) GO TO 50
    -      prod = prod/xx
    -      xx = xx + 1.0D0
    -      GO TO 40
    -
    -   50 CONTINUE
    -   60 alngam = devlpl(scoefn,9,xx-2.0D0)/devlpl(scoefd,4,xx-2.0D0)
    -C
    -C
    -C     COMPUTE RATIONAL APPROXIMATION TO GAMMA(X)
    -C
    -C
    -      alngam = alngam*prod
    -      alngam = log(alngam)
    -      GO TO 110
    -
    -   70 offset = hln2pi
    -C
    -C
    -C     IF NECESSARY MAKE X AT LEAST 12 AND CARRY CORRECTION IN OFFSET
    -C
    -C
    -      n = int(12.0D0-x)
    -      IF (.NOT. (n.GT.0)) GO TO 90
    -      prod = 1.0D0
    -      DO 80,i = 1,n
    -          prod = prod* (x+dble(i-1))
    -   80 CONTINUE
    -      offset = offset - log(prod)
    -      xx = x + dble(n)
    -      GO TO 100
    -
    -   90 xx = x
    -C
    -C
    -C     COMPUTE POWER SERIES
    -C
    -C
    -  100 alngam = devlpl(coef,5,1.0D0/ (xx**2))/xx
    -      alngam = alngam + offset + (xx-0.5D0)*log(xx) - xx
    -  110 RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/alnrel.f b/scipy-0.10.1/scipy/special/cdflib/alnrel.f
    deleted file mode 100644
    index bc9af9adf8..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/alnrel.f
    +++ /dev/null
    @@ -1,33 +0,0 @@
    -      DOUBLE PRECISION FUNCTION alnrel(a)
    -C-----------------------------------------------------------------------
    -C            EVALUATION OF THE FUNCTION LN(1 + A)
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION p1,p2,p3,q1,q2,q3,t,t2,w,x
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dble,dlog
    -C     ..
    -C     .. Data statements ..
    -      DATA p1/-.129418923021993D+01/,p2/.405303492862024D+00/,
    -     +     p3/-.178874546012214D-01/
    -      DATA q1/-.162752256355323D+01/,q2/.747811014037616D+00/,
    -     +     q3/-.845104217945565D-01/
    -C     ..
    -C     .. Executable Statements ..
    -C--------------------------
    -      IF (abs(a).GT.0.375D0) GO TO 10
    -      t = a/ (a+2.0D0)
    -      t2 = t*t
    -      w = (((p3*t2+p2)*t2+p1)*t2+1.0D0)/ (((q3*t2+q2)*t2+q1)*t2+1.0D0)
    -      alnrel = 2.0D0*t*w
    -      RETURN
    -C
    -   10 x = 1.D0 + dble(a)
    -      alnrel = dlog(x)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/apser.f b/scipy-0.10.1/scipy/special/cdflib/apser.f
    deleted file mode 100644
    index 1c15ce5de8..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/apser.f
    +++ /dev/null
    @@ -1,46 +0,0 @@
    -      DOUBLE PRECISION FUNCTION apser(a,b,x,eps)
    -C-----------------------------------------------------------------------
    -C     APSER YIELDS THE INCOMPLETE BETA RATIO I(SUB(1-X))(B,A) FOR
    -C     A .LE. MIN(EPS,EPS*B), B*X .LE. 1, AND X .LE. 0.5. USED WHEN
    -C     A IS VERY SMALL. USE ONLY IF ABOVE INEQUALITIES ARE SATISFIED.
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,eps,x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION aj,bx,c,g,j,s,t,tol
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION psi
    -      EXTERNAL psi
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dlog
    -C     ..
    -C     .. Data statements ..
    -C--------------------
    -      DATA g/.577215664901533D0/
    -C     ..
    -C     .. Executable Statements ..
    -C--------------------
    -      bx = b*x
    -      t = x - bx
    -      IF (b*eps.GT.2.D-2) GO TO 10
    -      c = dlog(x) + psi(b) + g + t
    -      GO TO 20
    -
    -   10 c = dlog(bx) + g + t
    -C
    -   20 tol = 5.0D0*eps*abs(c)
    -      j = 1.0D0
    -      s = 0.0D0
    -   30 j = j + 1.0D0
    -      t = t* (x-bx/j)
    -      aj = t/j
    -      s = s + aj
    -      IF (abs(aj).GT.tol) GO TO 30
    -C
    -      apser = -a* (c+s)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/basym.f b/scipy-0.10.1/scipy/special/cdflib/basym.f
    deleted file mode 100644
    index 356da173be..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/basym.f
    +++ /dev/null
    @@ -1,120 +0,0 @@
    -      DOUBLE PRECISION FUNCTION basym(a,b,lambda,eps)
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR IX(A,B) FOR LARGE A AND B.
    -C     LAMBDA = (A + B)*Y - B  AND EPS IS THE TOLERANCE USED.
    -C     IT IS ASSUMED THAT LAMBDA IS NONNEGATIVE AND THAT
    -C     A AND B ARE GREATER THAN OR EQUAL TO 15.
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,eps,lambda
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION bsum,dsum,e0,e1,f,h,h2,hn,j0,j1,r,r0,r1,s,sum,t,
    -     +                 t0,t1,u,w,w0,z,z0,z2,zn,znm1
    -      INTEGER i,im1,imj,j,m,mm1,mmj,n,np1,num
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION a0(21),b0(21),c(21),d(21)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION bcorr,erfc1,rlog1
    -      EXTERNAL bcorr,erfc1,rlog1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,exp,sqrt
    -C     ..
    -C     .. Data statements ..
    -C------------------------
    -C     ****** NUM IS THE MAXIMUM VALUE THAT N CAN TAKE IN THE DO LOOP
    -C            ENDING AT STATEMENT 50. IT IS REQUIRED THAT NUM BE EVEN.
    -C            THE ARRAYS A0, B0, C, D HAVE DIMENSION NUM + 1.
    -C
    -C------------------------
    -C     E0 = 2/SQRT(PI)
    -C     E1 = 2**(-3/2)
    -C------------------------
    -      DATA num/20/
    -      DATA e0/1.12837916709551D0/,e1/.353553390593274D0/
    -C     ..
    -C     .. Executable Statements ..
    -C------------------------
    -      basym = 0.0D0
    -      IF (a.GE.b) GO TO 10
    -      h = a/b
    -      r0 = 1.0D0/ (1.0D0+h)
    -      r1 = (b-a)/b
    -      w0 = 1.0D0/sqrt(a* (1.0D0+h))
    -      GO TO 20
    -
    -   10 h = b/a
    -      r0 = 1.0D0/ (1.0D0+h)
    -      r1 = (b-a)/a
    -      w0 = 1.0D0/sqrt(b* (1.0D0+h))
    -C
    -   20 f = a*rlog1(-lambda/a) + b*rlog1(lambda/b)
    -      t = exp(-f)
    -      IF (t.EQ.0.0D0) RETURN
    -      z0 = sqrt(f)
    -      z = 0.5D0* (z0/e1)
    -      z2 = f + f
    -C
    -      a0(1) = (2.0D0/3.0D0)*r1
    -      c(1) = -0.5D0*a0(1)
    -      d(1) = -c(1)
    -      j0 = (0.5D0/e0)*erfc1(1,z0)
    -      j1 = e1
    -      sum = j0 + d(1)*w0*j1
    -C
    -      s = 1.0D0
    -      h2 = h*h
    -      hn = 1.0D0
    -      w = w0
    -      znm1 = z
    -      zn = z2
    -      DO 70 n = 2,num,2
    -          hn = h2*hn
    -          a0(n) = 2.0D0*r0* (1.0D0+h*hn)/ (n+2.0D0)
    -          np1 = n + 1
    -          s = s + hn
    -          a0(np1) = 2.0D0*r1*s/ (n+3.0D0)
    -C
    -          DO 60 i = n,np1
    -              r = -0.5D0* (i+1.0D0)
    -              b0(1) = r*a0(1)
    -              DO 40 m = 2,i
    -                  bsum = 0.0D0
    -                  mm1 = m - 1
    -                  DO 30 j = 1,mm1
    -                      mmj = m - j
    -                      bsum = bsum + (j*r-mmj)*a0(j)*b0(mmj)
    -   30             CONTINUE
    -                  b0(m) = r*a0(m) + bsum/m
    -   40         CONTINUE
    -              c(i) = b0(i)/ (i+1.0D0)
    -C
    -              dsum = 0.0D0
    -              im1 = i - 1
    -              DO 50 j = 1,im1
    -                  imj = i - j
    -                  dsum = dsum + d(imj)*c(j)
    -   50         CONTINUE
    -              d(i) = - (dsum+c(i))
    -   60     CONTINUE
    -C
    -          j0 = e1*znm1 + (n-1.0D0)*j0
    -          j1 = e1*zn + n*j1
    -          znm1 = z2*znm1
    -          zn = z2*zn
    -          w = w0*w
    -          t0 = d(n)*w*j0
    -          w = w0*w
    -          t1 = d(np1)*w*j1
    -          sum = sum + (t0+t1)
    -          IF ((abs(t0)+abs(t1)).LE.eps*sum) GO TO 80
    -   70 CONTINUE
    -C
    -   80 u = exp(-bcorr(a,b))
    -      basym = e0*t*u*sum
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/bcorr.f b/scipy-0.10.1/scipy/special/cdflib/bcorr.f
    deleted file mode 100644
    index 381de8b62a..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/bcorr.f
    +++ /dev/null
    @@ -1,54 +0,0 @@
    -      DOUBLE PRECISION FUNCTION bcorr(a0,b0)
    -C-----------------------------------------------------------------------
    -C
    -C     EVALUATION OF  DEL(A0) + DEL(B0) - DEL(A0 + B0)  WHERE
    -C     LN(GAMMA(A)) = (A - 0.5)*LN(A) - A + 0.5*LN(2*PI) + DEL(A).
    -C     IT IS ASSUMED THAT A0 .GE. 8 AND B0 .GE. 8.
    -C
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a0,b0
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a,b,c,c0,c1,c2,c3,c4,c5,h,s11,s3,s5,s7,s9,t,w,x,
    -     +                 x2
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dmax1,dmin1
    -C     ..
    -C     .. Data statements ..
    -      DATA c0/.833333333333333D-01/,c1/-.277777777760991D-02/,
    -     +     c2/.793650666825390D-03/,c3/-.595202931351870D-03/,
    -     +     c4/.837308034031215D-03/,c5/-.165322962780713D-02/
    -C     ..
    -C     .. Executable Statements ..
    -C------------------------
    -      a = dmin1(a0,b0)
    -      b = dmax1(a0,b0)
    -C
    -      h = a/b
    -      c = h/ (1.0D0+h)
    -      x = 1.0D0/ (1.0D0+h)
    -      x2 = x*x
    -C
    -C                SET SN = (1 - X**N)/(1 - X)
    -C
    -      s3 = 1.0D0 + (x+x2)
    -      s5 = 1.0D0 + (x+x2*s3)
    -      s7 = 1.0D0 + (x+x2*s5)
    -      s9 = 1.0D0 + (x+x2*s7)
    -      s11 = 1.0D0 + (x+x2*s9)
    -C
    -C                SET W = DEL(B) - DEL(A + B)
    -C
    -      t = (1.0D0/b)**2
    -      w = ((((c5*s11*t+c4*s9)*t+c3*s7)*t+c2*s5)*t+c1*s3)*t + c0
    -      w = w* (c/b)
    -C
    -C                   COMPUTE  DEL(A) + W
    -C
    -      t = (1.0D0/a)**2
    -      bcorr = (((((c5*t+c4)*t+c3)*t+c2)*t+c1)*t+c0)/a + w
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/betaln.f b/scipy-0.10.1/scipy/special/cdflib/betaln.f
    deleted file mode 100644
    index d9a49b6c4f..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/betaln.f
    +++ /dev/null
    @@ -1,103 +0,0 @@
    -      DOUBLE PRECISION FUNCTION betaln(a0,b0)
    -C-----------------------------------------------------------------------
    -C     EVALUATION OF THE LOGARITHM OF THE BETA FUNCTION
    -C-----------------------------------------------------------------------
    -C     E = 0.5*LN(2*PI)
    -C--------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a0,b0
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a,b,c,e,h,u,v,w,z
    -      INTEGER i,n
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION algdiv,alnrel,bcorr,gamln,gsumln
    -      EXTERNAL algdiv,alnrel,bcorr,gamln,gsumln
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dlog,dmax1,dmin1
    -C     ..
    -C     .. Data statements ..
    -      DATA e/.918938533204673D0/
    -C     ..
    -C     .. Executable Statements ..
    -C--------------------------
    -      a = dmin1(a0,b0)
    -      b = dmax1(a0,b0)
    -      IF (a.GE.8.0D0) GO TO 100
    -      IF (a.GE.1.0D0) GO TO 20
    -C-----------------------------------------------------------------------
    -C                   PROCEDURE WHEN A .LT. 1
    -C-----------------------------------------------------------------------
    -      IF (b.GE.8.0D0) GO TO 10
    -      betaln = gamln(a) + (gamln(b)-gamln(a+b))
    -      RETURN
    -
    -   10 betaln = gamln(a) + algdiv(a,b)
    -      RETURN
    -C-----------------------------------------------------------------------
    -C                PROCEDURE WHEN 1 .LE. A .LT. 8
    -C-----------------------------------------------------------------------
    -   20 IF (a.GT.2.0D0) GO TO 40
    -      IF (b.GT.2.0D0) GO TO 30
    -      betaln = gamln(a) + gamln(b) - gsumln(a,b)
    -      RETURN
    -
    -   30 w = 0.0D0
    -      IF (b.LT.8.0D0) GO TO 60
    -      betaln = gamln(a) + algdiv(a,b)
    -      RETURN
    -C
    -C                REDUCTION OF A WHEN B .LE. 1000
    -C
    -   40 IF (b.GT.1000.0D0) GO TO 80
    -      n = a - 1.0D0
    -      w = 1.0D0
    -      DO 50 i = 1,n
    -          a = a - 1.0D0
    -          h = a/b
    -          w = w* (h/ (1.0D0+h))
    -   50 CONTINUE
    -      w = dlog(w)
    -      IF (b.LT.8.0D0) GO TO 60
    -      betaln = w + gamln(a) + algdiv(a,b)
    -      RETURN
    -C
    -C                 REDUCTION OF B WHEN B .LT. 8
    -C
    -   60 n = b - 1.0D0
    -      z = 1.0D0
    -      DO 70 i = 1,n
    -          b = b - 1.0D0
    -          z = z* (b/ (a+b))
    -   70 CONTINUE
    -      betaln = w + dlog(z) + (gamln(a)+ (gamln(b)-gsumln(a,b)))
    -      RETURN
    -C
    -C                REDUCTION OF A WHEN B .GT. 1000
    -C
    -   80 n = a - 1.0D0
    -      w = 1.0D0
    -      DO 90 i = 1,n
    -          a = a - 1.0D0
    -          w = w* (a/ (1.0D0+a/b))
    -   90 CONTINUE
    -      betaln = (dlog(w)-n*dlog(b)) + (gamln(a)+algdiv(a,b))
    -      RETURN
    -C-----------------------------------------------------------------------
    -C                   PROCEDURE WHEN A .GE. 8
    -C-----------------------------------------------------------------------
    -  100 w = bcorr(a,b)
    -      h = a/b
    -      c = h/ (1.0D0+h)
    -      u = - (a-0.5D0)*dlog(c)
    -      v = b*alnrel(h)
    -      IF (u.LE.v) GO TO 110
    -      betaln = (((-0.5D0*dlog(b)+e)+w)-v) - u
    -      RETURN
    -
    -  110 betaln = (((-0.5D0*dlog(b)+e)+w)-u) - v
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/bfrac.f b/scipy-0.10.1/scipy/special/cdflib/bfrac.f
    deleted file mode 100644
    index 1557eca8dd..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/bfrac.f
    +++ /dev/null
    @@ -1,77 +0,0 @@
    -      DOUBLE PRECISION FUNCTION bfrac(a,b,x,y,lambda,eps)
    -C-----------------------------------------------------------------------
    -C     CONTINUED FRACTION EXPANSION FOR IX(A,B) WHEN A,B .GT. 1.
    -C     IT IS ASSUMED THAT  LAMBDA = (A + B)*Y - B.
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,eps,lambda,x,y
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION alpha,an,anp1,beta,bn,bnp1,c,c0,c1,e,n,p,r,r0,s,
    -     +                 t,w,yp1
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION brcomp
    -      EXTERNAL brcomp
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -C     .. Executable Statements ..
    -C--------------------
    -      bfrac = brcomp(a,b,x,y)
    -      IF (bfrac.EQ.0.0D0) RETURN
    -C
    -      c = 1.0D0 + lambda
    -      c0 = b/a
    -      c1 = 1.0D0 + 1.0D0/a
    -      yp1 = y + 1.0D0
    -C
    -      n = 0.0D0
    -      p = 1.0D0
    -      s = a + 1.0D0
    -      an = 0.0D0
    -      bn = 1.0D0
    -      anp1 = 1.0D0
    -      bnp1 = c/c1
    -      r = c1/c
    -C
    -C        CONTINUED FRACTION CALCULATION
    -C
    -   10 n = n + 1.0D0
    -      t = n/a
    -      w = n* (b-n)*x
    -      e = a/s
    -      alpha = (p* (p+c0)*e*e)* (w*x)
    -      e = (1.0D0+t)/ (c1+t+t)
    -      beta = n + w/s + e* (c+n*yp1)
    -      p = 1.0D0 + t
    -      s = s + 2.0D0
    -C
    -C        UPDATE AN, BN, ANP1, AND BNP1
    -C
    -      t = alpha*an + beta*anp1
    -      an = anp1
    -      anp1 = t
    -      t = alpha*bn + beta*bnp1
    -      bn = bnp1
    -      bnp1 = t
    -C
    -      r0 = r
    -      r = anp1/bnp1
    -      IF (abs(r-r0).LE.eps*r) GO TO 20
    -C
    -C        RESCALE AN, BN, ANP1, AND BNP1
    -C
    -      an = an/bnp1
    -      bn = bn/bnp1
    -      anp1 = r
    -      bnp1 = 1.0D0
    -      GO TO 10
    -C
    -C                 TERMINATION
    -C
    -   20 bfrac = bfrac*r
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/bgrat.f b/scipy-0.10.1/scipy/special/cdflib/bgrat.f
    deleted file mode 100644
    index e6a707b659..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/bgrat.f
    +++ /dev/null
    @@ -1,93 +0,0 @@
    -      SUBROUTINE bgrat(a,b,x,y,w,eps,ierr)
    -C-----------------------------------------------------------------------
    -C     ASYMPTOTIC EXPANSION FOR IX(A,B) WHEN A IS LARGER THAN B.
    -C     THE RESULT OF THE EXPANSION IS ADDED TO W. IT IS ASSUMED
    -C     THAT A .GE. 15 AND B .LE. 1.  EPS IS THE TOLERANCE USED.
    -C     IERR IS A VARIABLE THAT REPORTS THE STATUS OF THE RESULTS.
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,eps,w,x,y
    -      INTEGER ierr
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION bm1,bp2n,cn,coef,dj,j,l,lnx,n2,nu,p,q,r,s,sum,t,
    -     +                 t2,u,v,z
    -      INTEGER i,n,nm1
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION c(30),d(30)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION algdiv,alnrel,gam1
    -      EXTERNAL algdiv,alnrel,gam1
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL grat1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dlog,exp
    -C     ..
    -C     .. Executable Statements ..
    -C
    -      bm1 = (b-0.5D0) - 0.5D0
    -      nu = a + 0.5D0*bm1
    -      IF (y.GT.0.375D0) GO TO 10
    -      lnx = alnrel(-y)
    -      GO TO 20
    -
    -   10 lnx = dlog(x)
    -   20 z = -nu*lnx
    -      IF (b*z.EQ.0.0D0) GO TO 70
    -C
    -C                 COMPUTATION OF THE EXPANSION
    -C                 SET R = EXP(-Z)*Z**B/GAMMA(B)
    -C
    -      r = b* (1.0D0+gam1(b))*exp(b*dlog(z))
    -      r = r*exp(a*lnx)*exp(0.5D0*bm1*lnx)
    -      u = algdiv(b,a) + b*dlog(nu)
    -      u = r*exp(-u)
    -      IF (u.EQ.0.0D0) GO TO 70
    -      CALL grat1(b,z,r,p,q,eps)
    -C
    -      v = 0.25D0* (1.0D0/nu)**2
    -      t2 = 0.25D0*lnx*lnx
    -      l = w/u
    -      j = q/r
    -      sum = j
    -      t = 1.0D0
    -      cn = 1.0D0
    -      n2 = 0.0D0
    -      DO 50 n = 1,30
    -          bp2n = b + n2
    -          j = (bp2n* (bp2n+1.0D0)*j+ (z+bp2n+1.0D0)*t)*v
    -          n2 = n2 + 2.0D0
    -          t = t*t2
    -          cn = cn/ (n2* (n2+1.0D0))
    -          c(n) = cn
    -          s = 0.0D0
    -          IF (n.EQ.1) GO TO 40
    -          nm1 = n - 1
    -          coef = b - n
    -          DO 30 i = 1,nm1
    -              s = s + coef*c(i)*d(n-i)
    -              coef = coef + b
    -   30     CONTINUE
    -   40     d(n) = bm1*cn + s/n
    -          dj = d(n)*j
    -          sum = sum + dj
    -          IF (sum.LE.0.0D0) GO TO 70
    -          IF (abs(dj).LE.eps* (sum+l)) GO TO 60
    -   50 CONTINUE
    -C
    -C                    ADD THE RESULTS TO W
    -C
    -   60 ierr = 0
    -      w = w + u*sum
    -      RETURN
    -C
    -C               THE EXPANSION CANNOT BE COMPUTED
    -C
    -   70 ierr = 1
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/bpser.f b/scipy-0.10.1/scipy/special/cdflib/bpser.f
    deleted file mode 100644
    index 802f0a6db5..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/bpser.f
    +++ /dev/null
    @@ -1,99 +0,0 @@
    -      DOUBLE PRECISION FUNCTION bpser(a,b,x,eps)
    -C-----------------------------------------------------------------------
    -C     POWER SERIES EXPANSION FOR EVALUATING IX(A,B) WHEN B .LE. 1
    -C     OR B*X .LE. 0.7.  EPS IS THE TOLERANCE USED.
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,eps,x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a0,apb,b0,c,n,sum,t,tol,u,w,z
    -      INTEGER i,m
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION algdiv,betaln,gam1,gamln1
    -      EXTERNAL algdiv,betaln,gam1,gamln1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dble,dlog,dmax1,dmin1,exp
    -C     ..
    -C     .. Executable Statements ..
    -C
    -      bpser = 0.0D0
    -      IF (x.EQ.0.0D0) RETURN
    -C-----------------------------------------------------------------------
    -C            COMPUTE THE FACTOR X**A/(A*BETA(A,B))
    -C-----------------------------------------------------------------------
    -      a0 = dmin1(a,b)
    -      IF (a0.LT.1.0D0) GO TO 10
    -      z = a*dlog(x) - betaln(a,b)
    -      bpser = exp(z)/a
    -      GO TO 100
    -
    -   10 b0 = dmax1(a,b)
    -      IF (b0.GE.8.0D0) GO TO 90
    -      IF (b0.GT.1.0D0) GO TO 40
    -C
    -C            PROCEDURE FOR A0 .LT. 1 AND B0 .LE. 1
    -C
    -      bpser = x**a
    -      IF (bpser.EQ.0.0D0) RETURN
    -C
    -      apb = a + b
    -      IF (apb.GT.1.0D0) GO TO 20
    -      z = 1.0D0 + gam1(apb)
    -      GO TO 30
    -
    -   20 u = dble(a) + dble(b) - 1.D0
    -      z = (1.0D0+gam1(u))/apb
    -C
    -   30 c = (1.0D0+gam1(a))* (1.0D0+gam1(b))/z
    -      bpser = bpser*c* (b/apb)
    -      GO TO 100
    -C
    -C         PROCEDURE FOR A0 .LT. 1 AND 1 .LT. B0 .LT. 8
    -C
    -   40 u = gamln1(a0)
    -      m = b0 - 1.0D0
    -      IF (m.LT.1) GO TO 60
    -      c = 1.0D0
    -      DO 50 i = 1,m
    -          b0 = b0 - 1.0D0
    -          c = c* (b0/ (a0+b0))
    -   50 CONTINUE
    -      u = dlog(c) + u
    -C
    -   60 z = a*dlog(x) - u
    -      b0 = b0 - 1.0D0
    -      apb = a0 + b0
    -      IF (apb.GT.1.0D0) GO TO 70
    -      t = 1.0D0 + gam1(apb)
    -      GO TO 80
    -
    -   70 u = dble(a0) + dble(b0) - 1.D0
    -      t = (1.0D0+gam1(u))/apb
    -   80 bpser = exp(z)* (a0/a)* (1.0D0+gam1(b0))/t
    -      GO TO 100
    -C
    -C            PROCEDURE FOR A0 .LT. 1 AND B0 .GE. 8
    -C
    -   90 u = gamln1(a0) + algdiv(a0,b0)
    -      z = a*dlog(x) - u
    -      bpser = (a0/a)*exp(z)
    -  100 IF (bpser.EQ.0.0D0 .OR. a.LE.0.1D0*eps) RETURN
    -C-----------------------------------------------------------------------
    -C                     COMPUTE THE SERIES
    -C-----------------------------------------------------------------------
    -      sum = 0.0D0
    -      n = 0.0D0
    -      c = 1.0D0
    -      tol = eps/a
    -  110 n = n + 1.0D0
    -      c = c* (0.5D0+ (0.5D0-b/n))*x
    -      w = c/ (a+n)
    -      sum = sum + w
    -      IF (abs(w).GT.tol) GO TO 110
    -      bpser = bpser* (1.0D0+a*sum)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/bratio.f b/scipy-0.10.1/scipy/special/cdflib/bratio.f
    deleted file mode 100644
    index 7cd451126a..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/bratio.f
    +++ /dev/null
    @@ -1,236 +0,0 @@
    -      SUBROUTINE bratio(a,b,x,y,w,w1,ierr)
    -C-----------------------------------------------------------------------
    -C
    -C            EVALUATION OF THE INCOMPLETE BETA FUNCTION IX(A,B)
    -C
    -C                     --------------------
    -C
    -C     IT IS ASSUMED THAT A AND B ARE NONNEGATIVE, AND THAT X .LE. 1
    -C     AND Y = 1 - X.  BRATIO ASSIGNS W AND W1 THE VALUES
    -C
    -C                      W  = IX(A,B)
    -C                      W1 = 1 - IX(A,B)
    -C
    -C     IERR IS A VARIABLE THAT REPORTS THE STATUS OF THE RESULTS.
    -C     IF NO INPUT ERRORS ARE DETECTED THEN IERR IS SET TO 0 AND
    -C     W AND W1 ARE COMPUTED. OTHERWISE, IF AN ERROR IS DETECTED,
    -C     THEN W AND W1 ARE ASSIGNED THE VALUE 0 AND IERR IS SET TO
    -C     ONE OF THE FOLLOWING VALUES ...
    -C
    -C        IERR = 1  IF A OR B IS NEGATIVE
    -C        IERR = 2  IF A = B = 0
    -C        IERR = 3  IF X .LT. 0 OR X .GT. 1
    -C        IERR = 4  IF Y .LT. 0 OR Y .GT. 1
    -C        IERR = 5  IF X + Y .NE. 1
    -C        IERR = 6  IF X = A = 0
    -C        IERR = 7  IF Y = B = 0
    -C
    -C--------------------
    -C     WRITTEN BY ALFRED H. MORRIS, JR.
    -C        NAVAL SURFACE WARFARE CENTER
    -C        DAHLGREN, VIRGINIA
    -C     REVISED ... NOV 1991
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,w,w1,x,y
    -      INTEGER ierr
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a0,b0,eps,lambda,t,x0,y0,z
    -      INTEGER ierr1,ind,n
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION apser,basym,bfrac,bpser,bup,fpser,spmpar
    -      EXTERNAL apser,basym,bfrac,bpser,bup,fpser,spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL bgrat
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dmax1,dmin1
    -C     ..
    -C     .. Executable Statements ..
    -C-----------------------------------------------------------------------
    -C
    -C     ****** EPS IS A MACHINE DEPENDENT CONSTANT. EPS IS THE SMALLEST
    -C            FLOATING POINT NUMBER FOR WHICH 1.0 + EPS .GT. 1.0
    -C
    -      eps = spmpar(1)
    -C
    -C-----------------------------------------------------------------------
    -      w = 0.0D0
    -      w1 = 0.0D0
    -      IF (a.LT.0.0D0 .OR. b.LT.0.0D0) GO TO 270
    -      IF (a.EQ.0.0D0 .AND. b.EQ.0.0D0) GO TO 280
    -      IF (x.LT.0.0D0 .OR. x.GT.1.0D0) GO TO 290
    -      IF (y.LT.0.0D0 .OR. y.GT.1.0D0) GO TO 300
    -      z = ((x+y)-0.5D0) - 0.5D0
    -      IF (abs(z).GT.3.0D0*eps) GO TO 310
    -C
    -      ierr = 0
    -      IF (x.EQ.0.0D0) GO TO 210
    -      IF (y.EQ.0.0D0) GO TO 230
    -      IF (a.EQ.0.0D0) GO TO 240
    -      IF (b.EQ.0.0D0) GO TO 220
    -C
    -      eps = dmax1(eps,1.D-15)
    -      IF (dmax1(a,b).LT.1.D-3*eps) GO TO 260
    -C
    -      ind = 0
    -      a0 = a
    -      b0 = b
    -      x0 = x
    -      y0 = y
    -      IF (dmin1(a0,b0).GT.1.0D0) GO TO 40
    -C
    -C             PROCEDURE FOR A0 .LE. 1 OR B0 .LE. 1
    -C
    -      IF (x.LE.0.5D0) GO TO 10
    -      ind = 1
    -      a0 = b
    -      b0 = a
    -      x0 = y
    -      y0 = x
    -C
    -   10 IF (b0.LT.dmin1(eps,eps*a0)) GO TO 90
    -      IF (a0.LT.dmin1(eps,eps*b0) .AND. b0*x0.LE.1.0D0) GO TO 100
    -      IF (dmax1(a0,b0).GT.1.0D0) GO TO 20
    -      IF (a0.GE.dmin1(0.2D0,b0)) GO TO 110
    -      IF (x0**a0.LE.0.9D0) GO TO 110
    -      IF (x0.GE.0.3D0) GO TO 120
    -      n = 20
    -      GO TO 140
    -C
    -   20 IF (b0.LE.1.0D0) GO TO 110
    -      IF (x0.GE.0.3D0) GO TO 120
    -      IF (x0.GE.0.1D0) GO TO 30
    -      IF ((x0*b0)**a0.LE.0.7D0) GO TO 110
    -   30 IF (b0.GT.15.0D0) GO TO 150
    -      n = 20
    -      GO TO 140
    -C
    -C             PROCEDURE FOR A0 .GT. 1 AND B0 .GT. 1
    -C
    -   40 IF (a.GT.b) GO TO 50
    -      lambda = a - (a+b)*x
    -      GO TO 60
    -
    -   50 lambda = (a+b)*y - b
    -   60 IF (lambda.GE.0.0D0) GO TO 70
    -      ind = 1
    -      a0 = b
    -      b0 = a
    -      x0 = y
    -      y0 = x
    -      lambda = abs(lambda)
    -C
    -   70 IF (b0.LT.40.0D0 .AND. b0*x0.LE.0.7D0) GO TO 110
    -      IF (b0.LT.40.0D0) GO TO 160
    -      IF (a0.GT.b0) GO TO 80
    -      IF (a0.LE.100.0D0) GO TO 130
    -      IF (lambda.GT.0.03D0*a0) GO TO 130
    -      GO TO 200
    -
    -   80 IF (b0.LE.100.0D0) GO TO 130
    -      IF (lambda.GT.0.03D0*b0) GO TO 130
    -      GO TO 200
    -C
    -C            EVALUATION OF THE APPROPRIATE ALGORITHM
    -C
    -   90 w = fpser(a0,b0,x0,eps)
    -      w1 = 0.5D0 + (0.5D0-w)
    -      GO TO 250
    -C
    -  100 w1 = apser(a0,b0,x0,eps)
    -      w = 0.5D0 + (0.5D0-w1)
    -      GO TO 250
    -C
    -  110 w = bpser(a0,b0,x0,eps)
    -      w1 = 0.5D0 + (0.5D0-w)
    -      GO TO 250
    -C
    -  120 w1 = bpser(b0,a0,y0,eps)
    -      w = 0.5D0 + (0.5D0-w1)
    -      GO TO 250
    -C
    -  130 w = bfrac(a0,b0,x0,y0,lambda,15.0D0*eps)
    -      w1 = 0.5D0 + (0.5D0-w)
    -      GO TO 250
    -C
    -  140 w1 = bup(b0,a0,y0,x0,n,eps)
    -      b0 = b0 + n
    -  150 CALL bgrat(b0,a0,y0,x0,w1,15.0D0*eps,ierr1)
    -      w = 0.5D0 + (0.5D0-w1)
    -      GO TO 250
    -C
    -  160 n = b0
    -      b0 = b0 - n
    -      IF (b0.NE.0.0D0) GO TO 170
    -      n = n - 1
    -      b0 = 1.0D0
    -  170 w = bup(b0,a0,y0,x0,n,eps)
    -      IF (x0.GT.0.7D0) GO TO 180
    -      w = w + bpser(a0,b0,x0,eps)
    -      w1 = 0.5D0 + (0.5D0-w)
    -      GO TO 250
    -C
    -  180 IF (a0.GT.15.0D0) GO TO 190
    -      n = 20
    -      w = w + bup(a0,b0,x0,y0,n,eps)
    -      a0 = a0 + n
    -  190 CALL bgrat(a0,b0,x0,y0,w,15.0D0*eps,ierr1)
    -      w1 = 0.5D0 + (0.5D0-w)
    -      GO TO 250
    -C
    -  200 w = basym(a0,b0,lambda,100.0D0*eps)
    -      w1 = 0.5D0 + (0.5D0-w)
    -      GO TO 250
    -C
    -C               TERMINATION OF THE PROCEDURE
    -C
    -  210 IF (a.EQ.0.0D0) GO TO 320
    -  220 w = 0.0D0
    -      w1 = 1.0D0
    -      RETURN
    -C
    -  230 IF (b.EQ.0.0D0) GO TO 330
    -  240 w = 1.0D0
    -      w1 = 0.0D0
    -      RETURN
    -C
    -  250 IF (ind.EQ.0) RETURN
    -      t = w
    -      w = w1
    -      w1 = t
    -      RETURN
    -C
    -C           PROCEDURE FOR A AND B .LT. 1.E-3*EPS
    -C
    -  260 w = b/ (a+b)
    -      w1 = a/ (a+b)
    -      RETURN
    -C
    -C                       ERROR RETURN
    -C
    -  270 ierr = 1
    -      RETURN
    -
    -  280 ierr = 2
    -      RETURN
    -
    -  290 ierr = 3
    -      RETURN
    -
    -  300 ierr = 4
    -      RETURN
    -
    -  310 ierr = 5
    -      RETURN
    -
    -  320 ierr = 6
    -      RETURN
    -
    -  330 ierr = 7
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/brcmp1.f b/scipy-0.10.1/scipy/special/cdflib/brcmp1.f
    deleted file mode 100644
    index ae3b412c46..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/brcmp1.f
    +++ /dev/null
    @@ -1,136 +0,0 @@
    -      DOUBLE PRECISION FUNCTION brcmp1(mu,a,b,x,y)
    -C-----------------------------------------------------------------------
    -C          EVALUATION OF  EXP(MU) * (X**A*Y**B/BETA(A,B))
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,x,y
    -      INTEGER mu
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a0,apb,b0,c,const,e,h,lambda,lnx,lny,t,u,v,x0,y0,
    -     +                 z
    -      INTEGER i,n
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION algdiv,alnrel,bcorr,betaln,esum,gam1,gamln1,rlog1
    -      EXTERNAL algdiv,alnrel,bcorr,betaln,esum,gam1,gamln1,rlog1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dble,dlog,dmax1,dmin1,exp,sqrt
    -C     ..
    -C     .. Data statements ..
    -C-----------------
    -C     CONST = 1/SQRT(2*PI)
    -C-----------------
    -      DATA const/.398942280401433D0/
    -C     ..
    -C     .. Executable Statements ..
    -C
    -      a0 = dmin1(a,b)
    -      IF (a0.GE.8.0D0) GO TO 130
    -C
    -      IF (x.GT.0.375D0) GO TO 10
    -      lnx = dlog(x)
    -      lny = alnrel(-x)
    -      GO TO 30
    -
    -   10 IF (y.GT.0.375D0) GO TO 20
    -      lnx = alnrel(-y)
    -      lny = dlog(y)
    -      GO TO 30
    -
    -   20 lnx = dlog(x)
    -      lny = dlog(y)
    -C
    -   30 z = a*lnx + b*lny
    -      IF (a0.LT.1.0D0) GO TO 40
    -      z = z - betaln(a,b)
    -      brcmp1 = esum(mu,z)
    -      RETURN
    -C-----------------------------------------------------------------------
    -C              PROCEDURE FOR A .LT. 1 OR B .LT. 1
    -C-----------------------------------------------------------------------
    -   40 b0 = dmax1(a,b)
    -      IF (b0.GE.8.0D0) GO TO 120
    -      IF (b0.GT.1.0D0) GO TO 70
    -C
    -C                   ALGORITHM FOR B0 .LE. 1
    -C
    -      brcmp1 = esum(mu,z)
    -      IF (brcmp1.EQ.0.0D0) RETURN
    -C
    -      apb = a + b
    -      IF (apb.GT.1.0D0) GO TO 50
    -      z = 1.0D0 + gam1(apb)
    -      GO TO 60
    -
    -   50 u = dble(a) + dble(b) - 1.D0
    -      z = (1.0D0+gam1(u))/apb
    -C
    -   60 c = (1.0D0+gam1(a))* (1.0D0+gam1(b))/z
    -      brcmp1 = brcmp1* (a0*c)/ (1.0D0+a0/b0)
    -      RETURN
    -C
    -C                ALGORITHM FOR 1 .LT. B0 .LT. 8
    -C
    -   70 u = gamln1(a0)
    -      n = b0 - 1.0D0
    -      IF (n.LT.1) GO TO 90
    -      c = 1.0D0
    -      DO 80 i = 1,n
    -          b0 = b0 - 1.0D0
    -          c = c* (b0/ (a0+b0))
    -   80 CONTINUE
    -      u = dlog(c) + u
    -C
    -   90 z = z - u
    -      b0 = b0 - 1.0D0
    -      apb = a0 + b0
    -      IF (apb.GT.1.0D0) GO TO 100
    -      t = 1.0D0 + gam1(apb)
    -      GO TO 110
    -
    -  100 u = dble(a0) + dble(b0) - 1.D0
    -      t = (1.0D0+gam1(u))/apb
    -  110 brcmp1 = a0*esum(mu,z)* (1.0D0+gam1(b0))/t
    -      RETURN
    -C
    -C                   ALGORITHM FOR B0 .GE. 8
    -C
    -  120 u = gamln1(a0) + algdiv(a0,b0)
    -      brcmp1 = a0*esum(mu,z-u)
    -      RETURN
    -C-----------------------------------------------------------------------
    -C              PROCEDURE FOR A .GE. 8 AND B .GE. 8
    -C-----------------------------------------------------------------------
    -  130 IF (a.GT.b) GO TO 140
    -      h = a/b
    -      x0 = h/ (1.0D0+h)
    -      y0 = 1.0D0/ (1.0D0+h)
    -      lambda = a - (a+b)*x
    -      GO TO 150
    -
    -  140 h = b/a
    -      x0 = 1.0D0/ (1.0D0+h)
    -      y0 = h/ (1.0D0+h)
    -      lambda = (a+b)*y - b
    -C
    -  150 e = -lambda/a
    -      IF (abs(e).GT.0.6D0) GO TO 160
    -      u = rlog1(e)
    -      GO TO 170
    -
    -  160 u = e - dlog(x/x0)
    -C
    -  170 e = lambda/b
    -      IF (abs(e).GT.0.6D0) GO TO 180
    -      v = rlog1(e)
    -      GO TO 190
    -
    -  180 v = e - dlog(y/y0)
    -C
    -  190 z = esum(mu,- (a*u+b*v))
    -      brcmp1 = const*sqrt(b*x0)*z*exp(-bcorr(a,b))
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/brcomp.f b/scipy-0.10.1/scipy/special/cdflib/brcomp.f
    deleted file mode 100644
    index f54cfd145b..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/brcomp.f
    +++ /dev/null
    @@ -1,137 +0,0 @@
    -      DOUBLE PRECISION FUNCTION brcomp(a,b,x,y)
    -C-----------------------------------------------------------------------
    -C               EVALUATION OF X**A*Y**B/BETA(A,B)
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,x,y
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a0,apb,b0,c,const,e,h,lambda,lnx,lny,t,u,v,x0,y0,
    -     +                 z
    -      INTEGER i,n
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION algdiv,alnrel,bcorr,betaln,gam1,gamln1,rlog1
    -      EXTERNAL algdiv,alnrel,bcorr,betaln,gam1,gamln1,rlog1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dble,dlog,dmax1,dmin1,exp,sqrt
    -C     ..
    -C     .. Data statements ..
    -C-----------------
    -C     CONST = 1/SQRT(2*PI)
    -C-----------------
    -      DATA const/.398942280401433D0/
    -C     ..
    -C     .. Executable Statements ..
    -C
    -      brcomp = 0.0D0
    -      IF (x.EQ.0.0D0 .OR. y.EQ.0.0D0) RETURN
    -      a0 = dmin1(a,b)
    -      IF (a0.GE.8.0D0) GO TO 130
    -C
    -      IF (x.GT.0.375D0) GO TO 10
    -      lnx = dlog(x)
    -      lny = alnrel(-x)
    -      GO TO 30
    -
    -   10 IF (y.GT.0.375D0) GO TO 20
    -      lnx = alnrel(-y)
    -      lny = dlog(y)
    -      GO TO 30
    -
    -   20 lnx = dlog(x)
    -      lny = dlog(y)
    -C
    -   30 z = a*lnx + b*lny
    -      IF (a0.LT.1.0D0) GO TO 40
    -      z = z - betaln(a,b)
    -      brcomp = exp(z)
    -      RETURN
    -C-----------------------------------------------------------------------
    -C              PROCEDURE FOR A .LT. 1 OR B .LT. 1
    -C-----------------------------------------------------------------------
    -   40 b0 = dmax1(a,b)
    -      IF (b0.GE.8.0D0) GO TO 120
    -      IF (b0.GT.1.0D0) GO TO 70
    -C
    -C                   ALGORITHM FOR B0 .LE. 1
    -C
    -      brcomp = exp(z)
    -      IF (brcomp.EQ.0.0D0) RETURN
    -C
    -      apb = a + b
    -      IF (apb.GT.1.0D0) GO TO 50
    -      z = 1.0D0 + gam1(apb)
    -      GO TO 60
    -
    -   50 u = dble(a) + dble(b) - 1.D0
    -      z = (1.0D0+gam1(u))/apb
    -C
    -   60 c = (1.0D0+gam1(a))* (1.0D0+gam1(b))/z
    -      brcomp = brcomp* (a0*c)/ (1.0D0+a0/b0)
    -      RETURN
    -C
    -C                ALGORITHM FOR 1 .LT. B0 .LT. 8
    -C
    -   70 u = gamln1(a0)
    -      n = b0 - 1.0D0
    -      IF (n.LT.1) GO TO 90
    -      c = 1.0D0
    -      DO 80 i = 1,n
    -          b0 = b0 - 1.0D0
    -          c = c* (b0/ (a0+b0))
    -   80 CONTINUE
    -      u = dlog(c) + u
    -C
    -   90 z = z - u
    -      b0 = b0 - 1.0D0
    -      apb = a0 + b0
    -      IF (apb.GT.1.0D0) GO TO 100
    -      t = 1.0D0 + gam1(apb)
    -      GO TO 110
    -
    -  100 u = dble(a0) + dble(b0) - 1.D0
    -      t = (1.0D0+gam1(u))/apb
    -  110 brcomp = a0*exp(z)* (1.0D0+gam1(b0))/t
    -      RETURN
    -C
    -C                   ALGORITHM FOR B0 .GE. 8
    -C
    -  120 u = gamln1(a0) + algdiv(a0,b0)
    -      brcomp = a0*exp(z-u)
    -      RETURN
    -C-----------------------------------------------------------------------
    -C              PROCEDURE FOR A .GE. 8 AND B .GE. 8
    -C-----------------------------------------------------------------------
    -  130 IF (a.GT.b) GO TO 140
    -      h = a/b
    -      x0 = h/ (1.0D0+h)
    -      y0 = 1.0D0/ (1.0D0+h)
    -      lambda = a - (a+b)*x
    -      GO TO 150
    -
    -  140 h = b/a
    -      x0 = 1.0D0/ (1.0D0+h)
    -      y0 = h/ (1.0D0+h)
    -      lambda = (a+b)*y - b
    -C
    -  150 e = -lambda/a
    -      IF (abs(e).GT.0.6D0) GO TO 160
    -      u = rlog1(e)
    -      GO TO 170
    -
    -  160 u = e - dlog(x/x0)
    -C
    -  170 e = lambda/b
    -      IF (abs(e).GT.0.6D0) GO TO 180
    -      v = rlog1(e)
    -      GO TO 190
    -
    -  180 v = e - dlog(y/y0)
    -C
    -  190 z = exp(- (a*u+b*v))
    -      brcomp = const*sqrt(b*x0)*z*exp(-bcorr(a,b))
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/bup.f b/scipy-0.10.1/scipy/special/cdflib/bup.f
    deleted file mode 100644
    index 2df254e84d..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/bup.f
    +++ /dev/null
    @@ -1,81 +0,0 @@
    -      DOUBLE PRECISION FUNCTION bup(a,b,x,y,n,eps)
    -C-----------------------------------------------------------------------
    -C     EVALUATION OF IX(A,B) - IX(A+N,B) WHERE N IS A POSITIVE INTEGER.
    -C     EPS IS THE TOLERANCE USED.
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,eps,x,y
    -      INTEGER n
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ap1,apb,d,l,r,t,w
    -      INTEGER i,k,kp1,mu,nm1
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION brcmp1,exparg
    -      EXTERNAL brcmp1,exparg
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,exp
    -C     ..
    -C     .. Executable Statements ..
    -C
    -C          OBTAIN THE SCALING FACTOR EXP(-MU) AND
    -C             EXP(MU)*(X**A*Y**B/BETA(A,B))/A
    -C
    -      apb = a + b
    -      ap1 = a + 1.0D0
    -      mu = 0
    -      d = 1.0D0
    -      IF (n.EQ.1 .OR. a.LT.1.0D0) GO TO 10
    -      IF (apb.LT.1.1D0*ap1) GO TO 10
    -      mu = abs(exparg(1))
    -      k = exparg(0)
    -      IF (k.LT.mu) mu = k
    -      t = mu
    -      d = exp(-t)
    -C
    -   10 bup = brcmp1(mu,a,b,x,y)/a
    -      IF (n.EQ.1 .OR. bup.EQ.0.0D0) RETURN
    -      nm1 = n - 1
    -      w = d
    -C
    -C          LET K BE THE INDEX OF THE MAXIMUM TERM
    -C
    -      k = 0
    -      IF (b.LE.1.0D0) GO TO 50
    -      IF (y.GT.1.D-4) GO TO 20
    -      k = nm1
    -      GO TO 30
    -
    -   20 r = (b-1.0D0)*x/y - a
    -      IF (r.LT.1.0D0) GO TO 50
    -      k = nm1
    -      t = nm1
    -      IF (r.LT.t) k = r
    -C
    -C          ADD THE INCREASING TERMS OF THE SERIES
    -C
    -   30 DO 40 i = 1,k
    -          l = i - 1
    -          d = ((apb+l)/ (ap1+l))*x*d
    -          w = w + d
    -   40 CONTINUE
    -      IF (k.EQ.nm1) GO TO 70
    -C
    -C          ADD THE REMAINING TERMS OF THE SERIES
    -C
    -   50 kp1 = k + 1
    -      DO 60 i = kp1,nm1
    -          l = i - 1
    -          d = ((apb+l)/ (ap1+l))*x*d
    -          w = w + d
    -          IF (d.LE.eps*w) GO TO 70
    -   60 CONTINUE
    -C
    -C               TERMINATE THE PROCEDURE
    -C
    -   70 bup = bup*w
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdfbet.f b/scipy-0.10.1/scipy/special/cdflib/cdfbet.f
    deleted file mode 100644
    index 22fe497a5f..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdfbet.f
    +++ /dev/null
    @@ -1,319 +0,0 @@
    -      SUBROUTINE cdfbet(which,p,q,x,y,a,b,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFBET( WHICH, P, Q, X, Y, A, B, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C                         BETa Distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the beta distribution given
    -C     values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which of the next four argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..4
    -C               iwhich = 1 : Calculate P and Q from X,Y,A and B
    -C               iwhich = 2 : Calculate X and Y from P,Q,A and B
    -C               iwhich = 3 : Calculate A from P,Q,X,Y and B
    -C               iwhich = 4 : Calculate B from P,Q,X,Y and A
    -C
    -C                    INTEGER WHICH
    -C
    -C     P <--> The integral from 0 to X of the chi-square
    -C            distribution.
    -C            Input range: [0, 1].
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Input range: [0, 1].
    -C            P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C     X <--> Upper limit of integration of beta density.
    -C            Input range: [0,1].
    -C            Search range: [0,1]
    -C                    DOUBLE PRECISION X
    -C
    -C     Y <--> 1-X.
    -C            Input range: [0,1].
    -C            Search range: [0,1]
    -C            X + Y = 1.0.
    -C                    DOUBLE PRECISION Y
    -C
    -C     A <--> The first parameter of the beta density.
    -C            Input range: (0, +infinity).
    -C            Search range: [1D-100,1D100]
    -C                    DOUBLE PRECISION A
    -C
    -C     B <--> The second parameter of the beta density.
    -C            Input range: (0, +infinity).
    -C            Search range: [1D-100,1D100]
    -C                    DOUBLE PRECISION B
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                4 if X + Y .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Cumulative distribution function  (P)  is calculated directly by
    -C     code associated with the following reference.
    -C
    -C     DiDinato, A. R. and Morris,  A.   H.  Algorithm 708: Significant
    -C     Digit Computation of the Incomplete  Beta  Function Ratios.  ACM
    -C     Trans. Math.  Softw. 18 (1993), 360-373.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C
    -C                              Note
    -C
    -C
    -C     The beta density is proportional to
    -C               t^(A-1) * (1-t)^(B-1)
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION zero,inf
    -      PARAMETER (zero=1.0D-100,inf=1.0D100)
    -      DOUBLE PRECISION one
    -      PARAMETER (one=1.0D0)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,bound,p,q,x,y
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx,pq,xhi,xlo,xy
    -      LOGICAL qhi,qleft,qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION spmpar
    -      EXTERNAL spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumbet,dinvr,dstinv,dstzr,dzror
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.4))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 4.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0D0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LT.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LT.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.2) GO TO 150
    -      IF (.NOT. ((x.LT.0.0D0).OR. (x.GT.1.0D0))) GO TO 140
    -      IF (.NOT. (x.LT.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      GO TO 130
    -
    -  120 bound = 1.0D0
    -  130 status = -4
    -      RETURN
    -
    -  140 CONTINUE
    -  150 IF (which.EQ.2) GO TO 190
    -      IF (.NOT. ((y.LT.0.0D0).OR. (y.GT.1.0D0))) GO TO 180
    -      IF (.NOT. (y.LT.0.0D0)) GO TO 160
    -      bound = 0.0D0
    -      GO TO 170
    -
    -  160 bound = 1.0D0
    -  170 status = -5
    -      RETURN
    -
    -  180 CONTINUE
    -  190 IF (which.EQ.3) GO TO 210
    -      IF (.NOT. (a.LE.0.0D0)) GO TO 200
    -      bound = 0.0D0
    -      status = -6
    -      RETURN
    -
    -  200 CONTINUE
    -  210 IF (which.EQ.4) GO TO 230
    -      IF (.NOT. (b.LE.0.0D0)) GO TO 220
    -      bound = 0.0D0
    -      status = -7
    -      RETURN
    -
    -  220 CONTINUE
    -  230 IF (which.EQ.1) GO TO 270
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 260
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 240
    -      bound = 0.0D0
    -      GO TO 250
    -
    -  240 bound = 1.0D0
    -  250 status = 3
    -      RETURN
    -
    -  260 CONTINUE
    -  270 IF (which.EQ.2) GO TO 310
    -      xy = x + y
    -      IF (.NOT. (abs(((xy)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 300
    -      IF (.NOT. (xy.LT.0.0D0)) GO TO 280
    -      bound = 0.0D0
    -      GO TO 290
    -
    -  280 bound = 1.0D0
    -  290 status = 4
    -      RETURN
    -
    -  300 CONTINUE
    -  310 IF (.NOT. (which.EQ.1)) qporq = p .LE. q
    -      IF ((1).EQ. (which)) THEN
    -          CALL cumbet(x,y,a,b,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          CALL dstzr(0.0D0,1.0D0,atol,tol)
    -          IF (.NOT. (qporq)) GO TO 340
    -          status = 0
    -          CALL dzror(status,x,fx,xlo,xhi,qleft,qhi)
    -          y = one - x
    -  320     IF (.NOT. (status.EQ.1)) GO TO 330
    -          CALL cumbet(x,y,a,b,cum,ccum)
    -          fx = cum - p
    -          CALL dzror(status,x,fx,xlo,xhi,qleft,qhi)
    -          y = one - x
    -          GO TO 320
    -
    -  330     GO TO 370
    -
    -  340     status = 0
    -          CALL dzror(status,y,fx,xlo,xhi,qleft,qhi)
    -          x = one - y
    -  350     IF (.NOT. (status.EQ.1)) GO TO 360
    -          CALL cumbet(x,y,a,b,cum,ccum)
    -          fx = ccum - q
    -          CALL dzror(status,y,fx,xlo,xhi,qleft,qhi)
    -          x = one - y
    -          GO TO 350
    -
    -  360     CONTINUE
    -  370     IF (.NOT. (status.EQ.-1)) GO TO 400
    -          IF (.NOT. (qleft)) GO TO 380
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 390
    -
    -  380     status = 2
    -          bound = 1.0D0
    -  390     CONTINUE
    -  400     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          a = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,a,fx,qleft,qhi)
    -  410     IF (.NOT. (status.EQ.1)) GO TO 440
    -          CALL cumbet(x,y,a,b,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 420
    -          fx = cum - p
    -          GO TO 430
    -
    -  420     fx = ccum - q
    -  430     CALL dinvr(status,a,fx,qleft,qhi)
    -          GO TO 410
    -
    -  440     IF (.NOT. (status.EQ.-1)) GO TO 470
    -          IF (.NOT. (qleft)) GO TO 450
    -          status = 1
    -          bound = zero
    -          GO TO 460
    -
    -  450     status = 2
    -          bound = inf
    -  460     CONTINUE
    -  470     CONTINUE
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          b = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,b,fx,qleft,qhi)
    -  480     IF (.NOT. (status.EQ.1)) GO TO 510
    -          CALL cumbet(x,y,a,b,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 490
    -          fx = cum - p
    -          GO TO 500
    -
    -  490     fx = ccum - q
    -  500     CALL dinvr(status,b,fx,qleft,qhi)
    -          GO TO 480
    -
    -  510     IF (.NOT. (status.EQ.-1)) GO TO 540
    -          IF (.NOT. (qleft)) GO TO 520
    -          status = 1
    -          bound = zero
    -          GO TO 530
    -
    -  520     status = 2
    -          bound = inf
    -  530     CONTINUE
    -  540 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdfbin.f b/scipy-0.10.1/scipy/special/cdflib/cdfbin.f
    deleted file mode 100644
    index 85d56734ab..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdfbin.f
    +++ /dev/null
    @@ -1,315 +0,0 @@
    -      SUBROUTINE cdfbin(which,p,q,s,xn,pr,ompr,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFBIN ( WHICH, P, Q, S, XN, PR, OMPR, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C                         BINomial distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the binomial
    -C     distribution given values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which of the next four argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..4
    -C               iwhich = 1 : Calculate P and Q from S,XN,PR and OMPR
    -C               iwhich = 2 : Calculate S from P,Q,XN,PR and OMPR
    -C               iwhich = 3 : Calculate XN from P,Q,S,PR and OMPR
    -C               iwhich = 4 : Calculate PR and OMPR from P,Q,S and XN
    -C                    INTEGER WHICH
    -C
    -C     P <--> The cumulation from 0 to S of the binomial distribution.
    -C            (Probablility of S or fewer successes in XN trials each
    -C            with probability of success PR.)
    -C            Input range: [0,1].
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Input range: [0, 1].
    -C            P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C     S <--> The number of successes observed.
    -C            Input range: [0, XN]
    -C            Search range: [0, XN]
    -C                    DOUBLE PRECISION S
    -C
    -C     XN  <--> The number of binomial trials.
    -C              Input range: (0, +infinity).
    -C              Search range: [1E-100, 1E100]
    -C                    DOUBLE PRECISION XN
    -C
    -C     PR  <--> The probability of success in each binomial trial.
    -C              Input range: [0,1].
    -C              Search range: [0,1]
    -C                    DOUBLE PRECISION PR
    -C
    -C     OMPR  <--> 1-PR
    -C              Input range: [0,1].
    -C              Search range: [0,1]
    -C              PR + OMPR = 1.0
    -C                    DOUBLE PRECISION OMPR
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                4 if PR + OMPR .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula  26.5.24    of   Abramowitz  and    Stegun,  Handbook   of
    -C     Mathematical   Functions (1966) is   used  to reduce the  binomial
    -C     distribution  to  the  cumulative incomplete    beta distribution.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION zero,inf
    -      PARAMETER (zero=1.0D-100,inf=1.0D100)
    -      DOUBLE PRECISION one
    -      PARAMETER (one=1.0D0)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,ompr,p,pr,q,s,xn
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx,pq,prompr,xhi,xlo
    -      LOGICAL qhi,qleft,qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION spmpar
    -      EXTERNAL spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumbin,dinvr,dstinv,dstzr,dzror
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      IF (.NOT. ((which.LT.1).AND. (which.GT.4))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 4.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0D0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LT.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LT.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.3) GO TO 130
    -      IF (.NOT. (xn.LE.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF (which.EQ.2) GO TO 170
    -      IF (.NOT. ((s.LT.0.0D0).OR. ((which.NE.3).AND.
    -     +    (s.GT.xn)))) GO TO 160
    -      IF (.NOT. (s.LT.0.0D0)) GO TO 140
    -      bound = 0.0D0
    -      GO TO 150
    -
    -  140 bound = xn
    -  150 status = -4
    -      RETURN
    -
    -  160 CONTINUE
    -  170 IF (which.EQ.4) GO TO 210
    -      IF (.NOT. ((pr.LT.0.0D0).OR. (pr.GT.1.0D0))) GO TO 200
    -      IF (.NOT. (pr.LT.0.0D0)) GO TO 180
    -      bound = 0.0D0
    -      GO TO 190
    -
    -  180 bound = 1.0D0
    -  190 status = -6
    -      RETURN
    -
    -  200 CONTINUE
    -  210 IF (which.EQ.4) GO TO 250
    -      IF (.NOT. ((ompr.LT.0.0D0).OR. (ompr.GT.1.0D0))) GO TO 240
    -      IF (.NOT. (ompr.LT.0.0D0)) GO TO 220
    -      bound = 0.0D0
    -      GO TO 230
    -
    -  220 bound = 1.0D0
    -  230 status = -7
    -      RETURN
    -
    -  240 CONTINUE
    -  250 IF (which.EQ.1) GO TO 290
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 280
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 260
    -      bound = 0.0D0
    -      GO TO 270
    -
    -  260 bound = 1.0D0
    -  270 status = 3
    -      RETURN
    -
    -  280 CONTINUE
    -  290 IF (which.EQ.4) GO TO 330
    -      prompr = pr + ompr
    -      IF (.NOT. (abs(((prompr)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 320
    -      IF (.NOT. (prompr.LT.0.0D0)) GO TO 300
    -      bound = 0.0D0
    -      GO TO 310
    -
    -  300 bound = 1.0D0
    -  310 status = 4
    -      RETURN
    -
    -  320 CONTINUE
    -  330 IF (.NOT. (which.EQ.1)) qporq = p .LE. q
    -      IF ((1).EQ. (which)) THEN
    -          CALL cumbin(s,xn,pr,ompr,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          s = xn/2.0D0
    -          CALL dstinv(0.0D0,xn,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,s,fx,qleft,qhi)
    -  340     IF (.NOT. (status.EQ.1)) GO TO 370
    -          CALL cumbin(s,xn,pr,ompr,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 350
    -          fx = cum - p
    -          GO TO 360
    -
    -  350     fx = ccum - q
    -  360     CALL dinvr(status,s,fx,qleft,qhi)
    -          GO TO 340
    -
    -  370     IF (.NOT. (status.EQ.-1)) GO TO 400
    -          IF (.NOT. (qleft)) GO TO 380
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 390
    -
    -  380     status = 2
    -          bound = xn
    -  390     CONTINUE
    -  400     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          xn = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,xn,fx,qleft,qhi)
    -  410     IF (.NOT. (status.EQ.1)) GO TO 440
    -          CALL cumbin(s,xn,pr,ompr,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 420
    -          fx = cum - p
    -          GO TO 430
    -
    -  420     fx = ccum - q
    -  430     CALL dinvr(status,xn,fx,qleft,qhi)
    -          GO TO 410
    -
    -  440     IF (.NOT. (status.EQ.-1)) GO TO 470
    -          IF (.NOT. (qleft)) GO TO 450
    -          status = 1
    -          bound = zero
    -          GO TO 460
    -
    -  450     status = 2
    -          bound = inf
    -  460     CONTINUE
    -  470     CONTINUE
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          CALL dstzr(0.0D0,1.0D0,atol,tol)
    -          IF (.NOT. (qporq)) GO TO 500
    -          status = 0
    -          CALL dzror(status,pr,fx,xlo,xhi,qleft,qhi)
    -          ompr = one - pr
    -  480     IF (.NOT. (status.EQ.1)) GO TO 490
    -          CALL cumbin(s,xn,pr,ompr,cum,ccum)
    -          fx = cum - p
    -          CALL dzror(status,pr,fx,xlo,xhi,qleft,qhi)
    -          ompr = one - pr
    -          GO TO 480
    -
    -  490     GO TO 530
    -
    -  500     status = 0
    -          CALL dzror(status,ompr,fx,xlo,xhi,qleft,qhi)
    -          pr = one - ompr
    -  510     IF (.NOT. (status.EQ.1)) GO TO 520
    -          CALL cumbin(s,xn,pr,ompr,cum,ccum)
    -          fx = ccum - q
    -          CALL dzror(status,ompr,fx,xlo,xhi,qleft,qhi)
    -          pr = one - ompr
    -          GO TO 510
    -
    -  520     CONTINUE
    -  530     IF (.NOT. (status.EQ.-1)) GO TO 560
    -          IF (.NOT. (qleft)) GO TO 540
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 550
    -
    -  540     status = 2
    -          bound = 1.0D0
    -  550     CONTINUE
    -  560 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdfchi.f b/scipy-0.10.1/scipy/special/cdflib/cdfchi.f
    deleted file mode 100644
    index 58d4c4935d..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdfchi.f
    +++ /dev/null
    @@ -1,245 +0,0 @@
    -      SUBROUTINE cdfchi(which,p,q,x,df,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFCHI( WHICH, P, Q, X, DF, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C               CHI-Square distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the chi-square
    -C     distribution given values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which of the next three argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..3
    -C               iwhich = 1 : Calculate P and Q from X and DF
    -C               iwhich = 2 : Calculate X from P,Q and DF
    -C               iwhich = 3 : Calculate DF from P,Q and X
    -C                    INTEGER WHICH
    -C
    -C     P <--> The integral from 0 to X of the chi-square
    -C            distribution.
    -C            Input range: [0, 1].
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Input range: (0, 1].
    -C            P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C     X <--> Upper limit of integration of the non-central
    -C            chi-square distribution.
    -C            Input range: [0, +infinity).
    -C            Search range: [0,1E100]
    -C                    DOUBLE PRECISION X
    -C
    -C     DF <--> Degrees of freedom of the
    -C             chi-square distribution.
    -C             Input range: (0, +infinity).
    -C             Search range: [ 1E-100, 1E100]
    -C                    DOUBLE PRECISION DF
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C               10 indicates error returned from cumgam.  See
    -C                  references in cdfgam
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula    26.4.19   of Abramowitz  and     Stegun, Handbook  of
    -C     Mathematical Functions   (1966) is used   to reduce the chisqure
    -C     distribution to the incomplete distribution.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION zero,inf
    -      PARAMETER (zero=1.0D-100,inf=1.0D100)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,df,p,q,x
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx,porq,pq
    -      LOGICAL qhi,qleft,qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION spmpar
    -      EXTERNAL spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumchi,dinvr,dstinv
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.3))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 3.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0D0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LE.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LE.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.2) GO TO 130
    -      IF (.NOT. (x.LT.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -4
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF (which.EQ.3) GO TO 150
    -      IF (.NOT. (df.LE.0.0D0)) GO TO 140
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  140 CONTINUE
    -  150 IF (which.EQ.1) GO TO 190
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 180
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 160
    -      bound = 0.0D0
    -      GO TO 170
    -
    -  160 bound = 1.0D0
    -  170 status = 3
    -      RETURN
    -
    -  180 CONTINUE
    -  190 IF (which.EQ.1) GO TO 220
    -      qporq = p .LE. q
    -      IF (.NOT. (qporq)) GO TO 200
    -      porq = p
    -      GO TO 210
    -
    -  200 porq = q
    -  210 CONTINUE
    -  220 IF ((1).EQ. (which)) THEN
    -          status = 0
    -          CALL cumchi(x,df,p,q)
    -          IF (porq.GT.1.5D0) THEN
    -              status = 10
    -              RETURN
    -
    -          END IF
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          x = 5.0D0
    -          CALL dstinv(0.0D0,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,x,fx,qleft,qhi)
    -  230     IF (.NOT. (status.EQ.1)) GO TO 270
    -          CALL cumchi(x,df,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 240
    -          fx = cum - p
    -          GO TO 250
    -
    -  240     fx = ccum - q
    -  250     IF (.NOT. ((fx+porq).GT.1.5D0)) GO TO 260
    -          status = 10
    -          RETURN
    -
    -  260     CALL dinvr(status,x,fx,qleft,qhi)
    -          GO TO 230
    -
    -  270     IF (.NOT. (status.EQ.-1)) GO TO 300
    -          IF (.NOT. (qleft)) GO TO 280
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 290
    -
    -  280     status = 2
    -          bound = inf
    -  290     CONTINUE
    -  300     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          df = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,df,fx,qleft,qhi)
    -  310     IF (.NOT. (status.EQ.1)) GO TO 350
    -          CALL cumchi(x,df,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 320
    -          fx = cum - p
    -          GO TO 330
    -
    -  320     fx = ccum - q
    -  330     IF (.NOT. ((fx+porq).GT.1.5D0)) GO TO 340
    -          status = 10
    -          RETURN
    -
    -  340     CALL dinvr(status,df,fx,qleft,qhi)
    -          GO TO 310
    -
    -  350     IF (.NOT. (status.EQ.-1)) GO TO 380
    -          IF (.NOT. (qleft)) GO TO 360
    -          status = 1
    -          bound = zero
    -          GO TO 370
    -
    -  360     status = 2
    -          bound = inf
    -  370     CONTINUE
    -  380 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdfchn.f b/scipy-0.10.1/scipy/special/cdflib/cdfchn.f
    deleted file mode 100644
    index 7559a8355b..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdfchn.f
    +++ /dev/null
    @@ -1,228 +0,0 @@
    -      SUBROUTINE cdfchn(which,p,q,x,df,pnonc,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFCHN( WHICH, P, Q, X, DF, PNONC, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C               Non-central Chi-Square
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the non-central chi-square
    -C     distribution given values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which of the next three argument
    -C               values is to be calculated from the others.
    -C               Input range: 1..4
    -C               iwhich = 1 : Calculate P and Q from X and DF
    -C               iwhich = 2 : Calculate X from P,DF and PNONC
    -C               iwhich = 3 : Calculate DF from P,X and PNONC
    -C               iwhich = 3 : Calculate PNONC from P,X and DF
    -C                    INTEGER WHICH
    -C
    -C     P <--> The integral from 0 to X of the non-central chi-square
    -C            distribution.
    -C            Input range: [0, 1-1E-16).
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Q is not used by this subroutine and is only included
    -C            for similarity with other cdf* routines.
    -C                    DOUBLE PRECISION Q
    -C
    -C     X <--> Upper limit of integration of the non-central
    -C            chi-square distribution.
    -C            Input range: [0, +infinity).
    -C            Search range: [0,1E100]
    -C                    DOUBLE PRECISION X
    -C
    -C     DF <--> Degrees of freedom of the non-central
    -C             chi-square distribution.
    -C             Input range: (0, +infinity).
    -C             Search range: [ 1E-100, 1E100]
    -C                    DOUBLE PRECISION DF
    -C
    -C     PNONC <--> Non-centrality parameter of the non-central
    -C                chi-square distribution.
    -C                Input range: [0, +infinity).
    -C                Search range: [0,1E4]
    -C                    DOUBLE PRECISION PNONC
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula  26.4.25   of   Abramowitz   and   Stegun,  Handbook  of
    -C     Mathematical  Functions (1966) is used to compute the cumulative
    -C     distribution function.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C
    -C                            WARNING
    -C
    -C     The computation time  required for this  routine is proportional
    -C     to the noncentrality  parameter  (PNONC).  Very large  values of
    -C     this parameter can consume immense  computer resources.  This is
    -C     why the search range is bounded by 10,000.
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tent4
    -      PARAMETER (tent4=1.0D4)
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION zero,one,inf
    -      PARAMETER (zero=1.0D-100,one=1.0D0-1.0D-16,inf=1.0D100)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,df,p,pnonc,q,x
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx
    -      LOGICAL qhi,qleft
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumchn,dinvr,dstinv
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.4))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 4.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.one))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = one
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.2) GO TO 90
    -      IF (.NOT. (x.LT.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      status = -4
    -      RETURN
    -
    -   80 CONTINUE
    -   90 IF (which.EQ.3) GO TO 110
    -      IF (.NOT. (df.LE.0.0D0)) GO TO 100
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.4) GO TO 130
    -      IF (.NOT. (pnonc.LT.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -6
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF ((1).EQ. (which)) THEN
    -          CALL cumchn(x,df,pnonc,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          x = 5.0D0
    -          CALL dstinv(0.0D0,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,x,fx,qleft,qhi)
    -  140     IF (.NOT. (status.EQ.1)) GO TO 150
    -          CALL cumchn(x,df,pnonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,x,fx,qleft,qhi)
    -          GO TO 140
    -
    -  150     IF (.NOT. (status.EQ.-1)) GO TO 180
    -          IF (.NOT. (qleft)) GO TO 160
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 170
    -
    -  160     status = 2
    -          bound = inf
    -  170     CONTINUE
    -  180     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          df = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,df,fx,qleft,qhi)
    -  190     IF (.NOT. (status.EQ.1)) GO TO 200
    -          CALL cumchn(x,df,pnonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,df,fx,qleft,qhi)
    -          GO TO 190
    -
    -  200     IF (.NOT. (status.EQ.-1)) GO TO 230
    -          IF (.NOT. (qleft)) GO TO 210
    -          status = 1
    -          bound = zero
    -          GO TO 220
    -
    -  210     status = 2
    -          bound = inf
    -  220     CONTINUE
    -  230     CONTINUE
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          pnonc = 5.0D0
    -          CALL dstinv(0.0D0,tent4,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,pnonc,fx,qleft,qhi)
    -  240     IF (.NOT. (status.EQ.1)) GO TO 250
    -          CALL cumchn(x,df,pnonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,pnonc,fx,qleft,qhi)
    -          GO TO 240
    -
    -  250     IF (.NOT. (status.EQ.-1)) GO TO 280
    -          IF (.NOT. (qleft)) GO TO 260
    -          status = 1
    -          bound = zero
    -          GO TO 270
    -
    -  260     status = 2
    -          bound = tent4
    -  270     CONTINUE
    -  280 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdff.f b/scipy-0.10.1/scipy/special/cdflib/cdff.f
    deleted file mode 100644
    index 9e6a6f0be9..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdff.f
    +++ /dev/null
    @@ -1,267 +0,0 @@
    -      SUBROUTINE cdff(which,p,q,f,dfn,dfd,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFF( WHICH, P, Q, F, DFN, DFD, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C               F distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the F distribution
    -C     given values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which of the next four argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..4
    -C               iwhich = 1 : Calculate P and Q from F,DFN and DFD
    -C               iwhich = 2 : Calculate F from P,Q,DFN and DFD
    -C               iwhich = 3 : Calculate DFN from P,Q,F and DFD
    -C               iwhich = 4 : Calculate DFD from P,Q,F and DFN
    -C                    INTEGER WHICH
    -C
    -C       P <--> The integral from 0 to F of the f-density.
    -C              Input range: [0,1].
    -C                    DOUBLE PRECISION P
    -C
    -C       Q <--> 1-P.
    -C              Input range: (0, 1].
    -C              P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C       F <--> Upper limit of integration of the f-density.
    -C              Input range: [0, +infinity).
    -C              Search range: [0,1E100]
    -C                    DOUBLE PRECISION F
    -C
    -C     DFN < --> Degrees of freedom of the numerator sum of squares.
    -C               Input range: (0, +infinity).
    -C               Search range: [ 1E-100, 1E100]
    -C                    DOUBLE PRECISION DFN
    -C
    -C     DFD < --> Degrees of freedom of the denominator sum of squares.
    -C               Input range: (0, +infinity).
    -C               Search range: [ 1E-100, 1E100]
    -C                    DOUBLE PRECISION DFD
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula   26.6.2   of   Abramowitz   and   Stegun,  Handbook  of
    -C     Mathematical  Functions (1966) is used to reduce the computation
    -C     of the  cumulative  distribution function for the  F  variate to
    -C     that of an incomplete beta.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C                              WARNING
    -C
    -C     The value of the  cumulative  F distribution is  not necessarily
    -C     monotone in  either degrees of freedom.  There  thus may  be two
    -C     values  that  provide a given CDF  value.   This routine assumes
    -C     monotonicity and will find an arbitrary one of the two values.
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION zero,inf
    -      PARAMETER (zero=1.0D-100,inf=1.0D100)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,dfd,dfn,f,p,q
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx,pq
    -      LOGICAL qhi,qleft,qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION spmpar
    -      EXTERNAL spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumf,dinvr,dstinv
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.4))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 4.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0D0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LE.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LE.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.2) GO TO 130
    -      IF (.NOT. (f.LT.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -4
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF (which.EQ.3) GO TO 150
    -      IF (.NOT. (dfn.LE.0.0D0)) GO TO 140
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  140 CONTINUE
    -  150 IF (which.EQ.4) GO TO 170
    -      IF (.NOT. (dfd.LE.0.0D0)) GO TO 160
    -      bound = 0.0D0
    -      status = -6
    -      RETURN
    -
    -  160 CONTINUE
    -  170 IF (which.EQ.1) GO TO 210
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 200
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 180
    -      bound = 0.0D0
    -      GO TO 190
    -
    -  180 bound = 1.0D0
    -  190 status = 3
    -      RETURN
    -
    -  200 CONTINUE
    -  210 IF (.NOT. (which.EQ.1)) qporq = p .LE. q
    -      IF ((1).EQ. (which)) THEN
    -          CALL cumf(f,dfn,dfd,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          f = 5.0D0
    -          CALL dstinv(0.0D0,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,f,fx,qleft,qhi)
    -  220     IF (.NOT. (status.EQ.1)) GO TO 250
    -          CALL cumf(f,dfn,dfd,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 230
    -          fx = cum - p
    -          GO TO 240
    -
    -  230     fx = ccum - q
    -  240     CALL dinvr(status,f,fx,qleft,qhi)
    -          GO TO 220
    -
    -  250     IF (.NOT. (status.EQ.-1)) GO TO 280
    -          IF (.NOT. (qleft)) GO TO 260
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 270
    -
    -  260     status = 2
    -          bound = inf
    -  270     CONTINUE
    -  280     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          dfn = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,dfn,fx,qleft,qhi)
    -  290     IF (.NOT. (status.EQ.1)) GO TO 320
    -          CALL cumf(f,dfn,dfd,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 300
    -          fx = cum - p
    -          GO TO 310
    -
    -  300     fx = ccum - q
    -  310     CALL dinvr(status,dfn,fx,qleft,qhi)
    -          GO TO 290
    -
    -  320     IF (.NOT. (status.EQ.-1)) GO TO 350
    -          IF (.NOT. (qleft)) GO TO 330
    -          status = 1
    -          bound = zero
    -          GO TO 340
    -
    -  330     status = 2
    -          bound = inf
    -  340     CONTINUE
    -  350     CONTINUE
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          dfd = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,dfd,fx,qleft,qhi)
    -  360     IF (.NOT. (status.EQ.1)) GO TO 390
    -          CALL cumf(f,dfn,dfd,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 370
    -          fx = cum - p
    -          GO TO 380
    -
    -  370     fx = ccum - q
    -  380     CALL dinvr(status,dfd,fx,qleft,qhi)
    -          GO TO 360
    -
    -  390     IF (.NOT. (status.EQ.-1)) GO TO 420
    -          IF (.NOT. (qleft)) GO TO 400
    -          status = 1
    -          bound = zero
    -          GO TO 410
    -
    -  400     status = 2
    -          bound = inf
    -  410     CONTINUE
    -  420 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdffnc.f b/scipy-0.10.1/scipy/special/cdflib/cdffnc.f
    deleted file mode 100644
    index f122182938..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdffnc.f
    +++ /dev/null
    @@ -1,268 +0,0 @@
    -      SUBROUTINE cdffnc(which,p,q,f,dfn,dfd,phonc,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFFNC( WHICH, P, Q, F, DFN, DFD, PNONC, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C               Non-central F distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the Non-central F
    -C     distribution given values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which of the next five argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..5
    -C               iwhich = 1 : Calculate P and Q from F,DFN,DFD and PNONC
    -C               iwhich = 2 : Calculate F from P,Q,DFN,DFD and PNONC
    -C               iwhich = 3 : Calculate DFN from P,Q,F,DFD and PNONC
    -C               iwhich = 4 : Calculate DFD from P,Q,F,DFN and PNONC
    -C               iwhich = 5 : Calculate PNONC from P,Q,F,DFN and DFD
    -C                    INTEGER WHICH
    -C
    -C       P <--> The integral from 0 to F of the non-central f-density.
    -C              Input range: [0,1-1E-16).
    -C                    DOUBLE PRECISION P
    -C
    -C       Q <--> 1-P.
    -C            Q is not used by this subroutine and is only included
    -C            for similarity with other cdf* routines.
    -C                    DOUBLE PRECISION Q
    -C
    -C       F <--> Upper limit of integration of the non-central f-density.
    -C              Input range: [0, +infinity).
    -C              Search range: [0,1E100]
    -C                    DOUBLE PRECISION F
    -C
    -C     DFN < --> Degrees of freedom of the numerator sum of squares.
    -C               Input range: (0, +infinity).
    -C               Search range: [ 1E-100, 1E100]
    -C                    DOUBLE PRECISION DFN
    -C
    -C     DFD < --> Degrees of freedom of the denominator sum of squares.
    -C               Must be in range: (0, +infinity).
    -C               Input range: (0, +infinity).
    -C               Search range: [ 1E-100, 1E100]
    -C                    DOUBLE PRECISION DFD
    -C
    -C     PNONC <-> The non-centrality parameter
    -C               Input range: [0,infinity)
    -C               Search range: [0,1E4]
    -C                    DOUBLE PRECISION PHONC
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula  26.6.20   of   Abramowitz   and   Stegun,  Handbook  of
    -C     Mathematical  Functions (1966) is used to compute the cumulative
    -C     distribution function.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C                            WARNING
    -C
    -C     The computation time  required for this  routine is proportional
    -C     to the noncentrality  parameter  (PNONC).  Very large  values of
    -C     this parameter can consume immense  computer resources.  This is
    -C     why the search range is bounded by 10,000.
    -C
    -C                              WARNING
    -C
    -C     The  value  of the  cumulative  noncentral F distribution is not
    -C     necessarily monotone in either degrees  of freedom.  There  thus
    -C     may be two values that provide a given  CDF value.  This routine
    -C     assumes monotonicity  and will find  an arbitrary one of the two
    -C     values.
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tent4
    -      PARAMETER (tent4=1.0D4)
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION zero,one,inf
    -      PARAMETER (zero=1.0D-100,one=1.0D0-1.0D-16,inf=1.0D100)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,dfd,dfn,f,p,phonc,q
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx
    -      LOGICAL qhi,qleft
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumfnc,dinvr,dstinv
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.5))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 5.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.one))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = one
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.2) GO TO 90
    -      IF (.NOT. (f.LT.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      status = -4
    -      RETURN
    -
    -   80 CONTINUE
    -   90 IF (which.EQ.3) GO TO 110
    -      IF (.NOT. (dfn.LE.0.0D0)) GO TO 100
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.4) GO TO 130
    -      IF (.NOT. (dfd.LE.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -6
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF (which.EQ.5) GO TO 150
    -      IF (.NOT. (phonc.LT.0.0D0)) GO TO 140
    -      bound = 0.0D0
    -      status = -7
    -      RETURN
    -
    -  140 CONTINUE
    -  150 IF ((1).EQ. (which)) THEN
    -          CALL cumfnc(f,dfn,dfd,phonc,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          f = 5.0D0
    -          CALL dstinv(0.0D0,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,f,fx,qleft,qhi)
    -  160     IF (.NOT. (status.EQ.1)) GO TO 170
    -          CALL cumfnc(f,dfn,dfd,phonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,f,fx,qleft,qhi)
    -          GO TO 160
    -
    -  170     IF (.NOT. (status.EQ.-1)) GO TO 200
    -          IF (.NOT. (qleft)) GO TO 180
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 190
    -
    -  180     status = 2
    -          bound = inf
    -  190     CONTINUE
    -  200     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          dfn = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,dfn,fx,qleft,qhi)
    -  210     IF (.NOT. (status.EQ.1)) GO TO 220
    -          CALL cumfnc(f,dfn,dfd,phonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,dfn,fx,qleft,qhi)
    -          GO TO 210
    -
    -  220     IF (.NOT. (status.EQ.-1)) GO TO 250
    -          IF (.NOT. (qleft)) GO TO 230
    -          status = 1
    -          bound = zero
    -          GO TO 240
    -
    -  230     status = 2
    -          bound = inf
    -  240     CONTINUE
    -  250     CONTINUE
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          dfd = 5.0D0
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,dfd,fx,qleft,qhi)
    -  260     IF (.NOT. (status.EQ.1)) GO TO 270
    -          CALL cumfnc(f,dfn,dfd,phonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,dfd,fx,qleft,qhi)
    -          GO TO 260
    -
    -  270     IF (.NOT. (status.EQ.-1)) GO TO 300
    -          IF (.NOT. (qleft)) GO TO 280
    -          status = 1
    -          bound = zero
    -          GO TO 290
    -
    -  280     status = 2
    -          bound = inf
    -  290     CONTINUE
    -  300     CONTINUE
    -
    -      ELSE IF ((5).EQ. (which)) THEN
    -          phonc = 5.0D0
    -          CALL dstinv(0.0D0,tent4,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,phonc,fx,qleft,qhi)
    -  310     IF (.NOT. (status.EQ.1)) GO TO 320
    -          CALL cumfnc(f,dfn,dfd,phonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,phonc,fx,qleft,qhi)
    -          GO TO 310
    -
    -  320     IF (.NOT. (status.EQ.-1)) GO TO 350
    -          IF (.NOT. (qleft)) GO TO 330
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 340
    -
    -  330     status = 2
    -          bound = tent4
    -  340     CONTINUE
    -  350 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdfgam.f b/scipy-0.10.1/scipy/special/cdflib/cdfgam.f
    deleted file mode 100644
    index f54de1f23c..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdfgam.f
    +++ /dev/null
    @@ -1,264 +0,0 @@
    -      SUBROUTINE cdfgam(which,p,q,x,shape,scale,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFGAM( WHICH, P, Q, X, SHAPE, SCALE, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C                         GAMma Distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the gamma
    -C     distribution given values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which of the next four argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..4
    -C               iwhich = 1 : Calculate P and Q from X,SHAPE and SCALE
    -C               iwhich = 2 : Calculate X from P,Q,SHAPE and SCALE
    -C               iwhich = 3 : Calculate SHAPE from P,Q,X and SCALE
    -C               iwhich = 4 : Calculate SCALE from P,Q,X and SHAPE
    -C                    INTEGER WHICH
    -C
    -C     P <--> The integral from 0 to X of the gamma density.
    -C            Input range: [0,1].
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Input range: (0, 1].
    -C            P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C
    -C     X <--> The upper limit of integration of the gamma density.
    -C            Input range: [0, +infinity).
    -C            Search range: [0,1E100]
    -C                    DOUBLE PRECISION X
    -C
    -C     SHAPE <--> The shape parameter of the gamma density.
    -C                Input range: (0, +infinity).
    -C                Search range: [1E-100,1E100]
    -C                  DOUBLE PRECISION SHAPE
    -C
    -C
    -C     SCALE <--> The scale parameter of the gamma density.
    -C                Input range: (0, +infinity).
    -C                Search range: (1E-100,1E100]
    -C                   DOUBLE PRECISION SCALE
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                10 if the gamma or inverse gamma routine cannot
    -C                   compute the answer.  Usually happens only for
    -C                   X and SHAPE very large (gt 1E10 or more)
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Cumulative distribution function (P) is calculated directly by
    -C     the code associated with:
    -C
    -C     DiDinato, A. R. and Morris, A. H. Computation of the  incomplete
    -C     gamma function  ratios  and their  inverse.   ACM  Trans.  Math.
    -C     Softw. 12 (1986), 377-393.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C
    -C                              Note
    -C
    -C
    -C
    -C     The gamma density is proportional to
    -C       T**(SHAPE - 1) * EXP(- SCALE * T)
    -C
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION zero,inf
    -      PARAMETER (zero=1.0D-100,inf=1.0D100)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,p,q,scale,shape,x
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx,porq,pq,xscale,xx
    -      INTEGER ierr
    -      LOGICAL qhi,qleft,qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION spmpar
    -      EXTERNAL spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumgam,dinvr,dstinv,gaminv
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.4))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 4.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0d0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LE.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LE.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.2) GO TO 130
    -      IF (.NOT. (x.LT.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -4
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF (which.EQ.3) GO TO 150
    -      IF (.NOT. (shape.LE.0.0D0)) GO TO 140
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  140 CONTINUE
    -  150 IF (which.EQ.4) GO TO 170
    -      IF (.NOT. (scale.LE.0.0D0)) GO TO 160
    -      bound = 0.0D0
    -      status = -6
    -      RETURN
    -
    -  160 CONTINUE
    -  170 IF (which.EQ.1) GO TO 210
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 200
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 180
    -      bound = 0.0D0
    -      GO TO 190
    -
    -  180 bound = 1.0D0
    -  190 status = 3
    -      RETURN
    -
    -  200 CONTINUE
    -  210 IF (which.EQ.1) GO TO 240
    -      qporq = p .LE. q
    -      IF (.NOT. (qporq)) GO TO 220
    -      porq = p
    -      GO TO 230
    -
    -  220 porq = q
    -  230 CONTINUE
    -  240 IF ((1).EQ. (which)) THEN
    -          status = 0
    -          xscale = x*scale
    -          CALL cumgam(xscale,shape,p,q)
    -          IF (p.GT.1.5D0) status = 10
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          CALL gaminv(shape,xx,-1.0D0,p,q,ierr)
    -          IF (ierr.LT.0.0D0) THEN
    -              status = 10
    -              RETURN
    -
    -          ELSE
    -              x = xx/scale
    -              status = 0
    -          END IF
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          shape = 5.0D0
    -          xscale = x*scale
    -          CALL dstinv(zero,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,shape,fx,qleft,qhi)
    -  250     IF (.NOT. (status.EQ.1)) GO TO 290
    -          CALL cumgam(xscale,shape,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 260
    -          fx = cum - p
    -          GO TO 270
    -
    -  260     fx = ccum - q
    -  270     IF (.NOT. ((qporq.AND. (cum.GT.1.5D0)).OR.
    -     +        ((.NOT.qporq).AND. (ccum.GT.1.5D0)))) GO TO 280
    -          status = 10
    -          RETURN
    -
    -  280     CALL dinvr(status,shape,fx,qleft,qhi)
    -          GO TO 250
    -
    -  290     IF (.NOT. (status.EQ.-1)) GO TO 320
    -          IF (.NOT. (qleft)) GO TO 300
    -          status = 1
    -          bound = zero
    -          GO TO 310
    -
    -  300     status = 2
    -          bound = inf
    -  310     CONTINUE
    -  320     CONTINUE
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          CALL gaminv(shape,xx,-1.0D0,p,q,ierr)
    -          IF (ierr.LT.0.0D0) THEN
    -              status = 10
    -              RETURN
    -
    -          ELSE
    -              scale = xx/x
    -              status = 0
    -          END IF
    -
    -      END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdfnbn.f b/scipy-0.10.1/scipy/special/cdflib/cdfnbn.f
    deleted file mode 100644
    index 50d54c1bf1..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdfnbn.f
    +++ /dev/null
    @@ -1,320 +0,0 @@
    -      SUBROUTINE cdfnbn(which,p,q,s,xn,pr,ompr,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFNBN ( WHICH, P, S, XN, PR, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C               Negative BiNomial distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the negative binomial
    -C     distribution given values for the others.
    -C
    -C     The  cumulative  negative   binomial  distribution  returns  the
    -C     probability that there  will be  F or fewer failures before  the
    -C     XNth success in binomial trials each of which has probability of
    -C     success PR.
    -C
    -C     The individual term of the negative binomial is the probability of
    -C     S failures before XN successes and is
    -C          Choose( S, XN+S-1 ) * PR^(XN) * (1-PR)^S
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which of the next four argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..4
    -C               iwhich = 1 : Calculate P and Q from S,XN,PR and OMPR
    -C               iwhich = 2 : Calculate S from P,Q,XN,PR and OMPR
    -C               iwhich = 3 : Calculate XN from P,Q,S,PR and OMPR
    -C               iwhich = 4 : Calculate PR and OMPR from P,Q,S and XN
    -C                    INTEGER WHICH
    -C
    -C     P <--> The cumulation from 0 to S of the  negative
    -C            binomial distribution.
    -C            Input range: [0,1].
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Input range: (0, 1].
    -C            P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C     S <--> The upper limit of cumulation of the binomial distribution.
    -C            There are F or fewer failures before the XNth success.
    -C            Input range: [0, +infinity).
    -C            Search range: [0, 1E100]
    -C                    DOUBLE PRECISION S
    -C
    -C     XN  <--> The number of successes.
    -C              Input range: [0, +infinity).
    -C              Search range: [0, 1E100]
    -C                    DOUBLE PRECISION XN
    -C
    -C     PR  <--> The probability of success in each binomial trial.
    -C              Input range: [0,1].
    -C              Search range: [0,1].
    -C                    DOUBLE PRECISION PR
    -C
    -C     OMPR  <--> 1-PR
    -C              Input range: [0,1].
    -C              Search range: [0,1]
    -C              PR + OMPR = 1.0
    -C                    DOUBLE PRECISION OMPR
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                4 if PR + OMPR .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula   26.5.26   of   Abramowitz  and  Stegun,  Handbook   of
    -C     Mathematical Functions (1966) is used  to  reduce calculation of
    -C     the cumulative distribution  function to that of  an  incomplete
    -C     beta.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION inf
    -      PARAMETER (inf=1.0D100)
    -      DOUBLE PRECISION one
    -      PARAMETER (one=1.0D0)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,ompr,p,pr,q,s,xn
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx,pq,prompr,xhi,xlo
    -      LOGICAL qhi,qleft,qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION spmpar
    -      EXTERNAL spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumnbn,dinvr,dstinv,dstzr,dzror
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.4))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 4.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0D0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LE.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LE.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.2) GO TO 130
    -      IF (.NOT. (s.LT.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -4
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF (which.EQ.3) GO TO 150
    -      IF (.NOT. (xn.LT.0.0D0)) GO TO 140
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  140 CONTINUE
    -  150 IF (which.EQ.4) GO TO 190
    -      IF (.NOT. ((pr.LT.0.0D0).OR. (pr.GT.1.0D0))) GO TO 180
    -      IF (.NOT. (pr.LT.0.0D0)) GO TO 160
    -      bound = 0.0D0
    -      GO TO 170
    -
    -  160 bound = 1.0D0
    -  170 status = -6
    -      RETURN
    -
    -  180 CONTINUE
    -  190 IF (which.EQ.4) GO TO 230
    -      IF (.NOT. ((ompr.LT.0.0D0).OR. (ompr.GT.1.0D0))) GO TO 220
    -      IF (.NOT. (ompr.LT.0.0D0)) GO TO 200
    -      bound = 0.0D0
    -      GO TO 210
    -
    -  200 bound = 1.0D0
    -  210 status = -7
    -      RETURN
    -
    -  220 CONTINUE
    -  230 IF (which.EQ.1) GO TO 270
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 260
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 240
    -      bound = 0.0D0
    -      GO TO 250
    -
    -  240 bound = 1.0D0
    -  250 status = 3
    -      RETURN
    -
    -  260 CONTINUE
    -  270 IF (which.EQ.4) GO TO 310
    -      prompr = pr + ompr
    -      IF (.NOT. (abs(((prompr)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 300
    -      IF (.NOT. (prompr.LT.0.0D0)) GO TO 280
    -      bound = 0.0D0
    -      GO TO 290
    -
    -  280 bound = 1.0D0
    -  290 status = 4
    -      RETURN
    -
    -  300 CONTINUE
    -  310 IF (.NOT. (which.EQ.1)) qporq = p .LE. q
    -      IF ((1).EQ. (which)) THEN
    -          CALL cumnbn(s,xn,pr,ompr,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          s = 5.0D0
    -          CALL dstinv(0.0D0,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,s,fx,qleft,qhi)
    -  320     IF (.NOT. (status.EQ.1)) GO TO 350
    -          CALL cumnbn(s,xn,pr,ompr,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 330
    -          fx = cum - p
    -          GO TO 340
    -
    -  330     fx = ccum - q
    -  340     CALL dinvr(status,s,fx,qleft,qhi)
    -          GO TO 320
    -
    -  350     IF (.NOT. (status.EQ.-1)) GO TO 380
    -          IF (.NOT. (qleft)) GO TO 360
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 370
    -
    -  360     status = 2
    -          bound = inf
    -  370     CONTINUE
    -  380     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          xn = 5.0D0
    -          CALL dstinv(0.0D0,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,xn,fx,qleft,qhi)
    -  390     IF (.NOT. (status.EQ.1)) GO TO 420
    -          CALL cumnbn(s,xn,pr,ompr,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 400
    -          fx = cum - p
    -          GO TO 410
    -
    -  400     fx = ccum - q
    -  410     CALL dinvr(status,xn,fx,qleft,qhi)
    -          GO TO 390
    -
    -  420     IF (.NOT. (status.EQ.-1)) GO TO 450
    -          IF (.NOT. (qleft)) GO TO 430
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 440
    -
    -  430     status = 2
    -          bound = inf
    -  440     CONTINUE
    -  450     CONTINUE
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          CALL dstzr(0.0D0,1.0D0,atol,tol)
    -          IF (.NOT. (qporq)) GO TO 480
    -          status = 0
    -          CALL dzror(status,pr,fx,xlo,xhi,qleft,qhi)
    -          ompr = one - pr
    -  460     IF (.NOT. (status.EQ.1)) GO TO 470
    -          CALL cumnbn(s,xn,pr,ompr,cum,ccum)
    -          fx = cum - p
    -          CALL dzror(status,pr,fx,xlo,xhi,qleft,qhi)
    -          ompr = one - pr
    -          GO TO 460
    -
    -  470     GO TO 510
    -
    -  480     status = 0
    -          CALL dzror(status,ompr,fx,xlo,xhi,qleft,qhi)
    -          pr = one - ompr
    -  490     IF (.NOT. (status.EQ.1)) GO TO 500
    -          CALL cumnbn(s,xn,pr,ompr,cum,ccum)
    -          fx = ccum - q
    -          CALL dzror(status,ompr,fx,xlo,xhi,qleft,qhi)
    -          pr = one - ompr
    -          GO TO 490
    -
    -  500     CONTINUE
    -  510     IF (.NOT. (status.EQ.-1)) GO TO 540
    -          IF (.NOT. (qleft)) GO TO 520
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 530
    -
    -  520     status = 2
    -          bound = 1.0D0
    -  530     CONTINUE
    -  540 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdfnor.f b/scipy-0.10.1/scipy/special/cdflib/cdfnor.f
    deleted file mode 100644
    index f610d8504c..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdfnor.f
    +++ /dev/null
    @@ -1,188 +0,0 @@
    -      SUBROUTINE cdfnor(which,p,q,x,mean,sd,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFNOR( WHICH, P, Q, X, MEAN, SD, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C               NORmal distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the normal
    -C     distribution given values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH  --> Integer indicating  which of the  next  parameter
    -C     values is to be calculated using values  of the others.
    -C     Legal range: 1..4
    -C               iwhich = 1 : Calculate P and Q from X,MEAN and SD
    -C               iwhich = 2 : Calculate X from P,Q,MEAN and SD
    -C               iwhich = 3 : Calculate MEAN from P,Q,X and SD
    -C               iwhich = 4 : Calculate SD from P,Q,X and MEAN
    -C                    INTEGER WHICH
    -C
    -C     P <--> The integral from -infinity to X of the normal density.
    -C            Input range: (0,1].
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Input range: (0, 1].
    -C            P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C     X < --> Upper limit of integration of the normal-density.
    -C             Input range: ( -infinity, +infinity)
    -C                    DOUBLE PRECISION X
    -C
    -C     MEAN <--> The mean of the normal density.
    -C               Input range: (-infinity, +infinity)
    -C                    DOUBLE PRECISION MEAN
    -C
    -C     SD <--> Standard Deviation of the normal density.
    -C             Input range: (0, +infinity).
    -C                    DOUBLE PRECISION SD
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C
    -C
    -C     A slightly modified version of ANORM from
    -C
    -C     Cody, W.D. (1993). "ALGORITHM 715: SPECFUN - A Portabel FORTRAN
    -C     Package of Special Function Routines and Test Drivers"
    -C     acm Transactions on Mathematical Software. 19, 22-32.
    -C
    -C     is used to calulate the  cumulative standard normal distribution.
    -C
    -C     The rational functions from pages  90-95  of Kennedy and Gentle,
    -C     Statistical  Computing,  Marcel  Dekker, NY,  1980 are  used  as
    -C     starting values to Newton's Iterations which compute the inverse
    -C     standard normal.  Therefore no  searches  are necessary for  any
    -C     parameter.
    -C
    -C     For X < -15, the asymptotic expansion for the normal is used  as
    -C     the starting value in finding the inverse standard normal.
    -C     This is formula 26.2.12 of Abramowitz and Stegun.
    -C
    -C
    -C                              Note
    -C
    -C
    -C      The normal density is proportional to
    -C      exp( - 0.5 * (( X - MEAN)/SD)**2)
    -C
    -C
    -C**********************************************************************
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,mean,p,q,sd,x
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION pq,z
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION dinvnr,spmpar
    -      EXTERNAL dinvnr,spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumnor
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      status = 0
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.4))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 4.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LE.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LE.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0D0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LE.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LE.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.1) GO TO 150
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 140
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      GO TO 130
    -
    -  120 bound = 1.0D0
    -  130 status = 3
    -      RETURN
    -
    -  140 CONTINUE
    -  150 IF (which.EQ.4) GO TO 170
    -      IF (.NOT. (sd.LE.0.0D0)) GO TO 160
    -      bound = 0.0D0
    -      status = -6
    -      RETURN
    -
    -  160 CONTINUE
    -  170 IF ((1).EQ. (which)) THEN
    -          z = (x-mean)/sd
    -          CALL cumnor(z,p,q)
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          z = dinvnr(p,q)
    -          x = sd*z + mean
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          z = dinvnr(p,q)
    -          mean = x - sd*z
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          z = dinvnr(p,q)
    -          sd = (x-mean)/z
    -      END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdfpoi.f b/scipy-0.10.1/scipy/special/cdflib/cdfpoi.f
    deleted file mode 100644
    index 7bcb20cc81..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdfpoi.f
    +++ /dev/null
    @@ -1,223 +0,0 @@
    -      SUBROUTINE cdfpoi(which,p,q,s,xlam,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFPOI( WHICH, P, Q, S, XLAM, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C               POIsson distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the Poisson
    -C     distribution given values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which  argument
    -C               value is to be calculated from the others.
    -C               Legal range: 1..3
    -C               iwhich = 1 : Calculate P and Q from S and XLAM
    -C               iwhich = 2 : Calculate A from P,Q and XLAM
    -C               iwhich = 3 : Calculate XLAM from P,Q and S
    -C                    INTEGER WHICH
    -C
    -C        P <--> The cumulation from 0 to S of the poisson density.
    -C               Input range: [0,1].
    -C                    DOUBLE PRECISION P
    -C
    -C        Q <--> 1-P.
    -C               Input range: (0, 1].
    -C               P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C        S <--> Upper limit of cumulation of the Poisson.
    -C               Input range: [0, +infinity).
    -C               Search range: [0,1E100]
    -C                    DOUBLE PRECISION S
    -C
    -C     XLAM <--> Mean of the Poisson distribution.
    -C               Input range: [0, +infinity).
    -C               Search range: [0,1E100]
    -C                    DOUBLE PRECISION XLAM
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula   26.4.21  of   Abramowitz  and   Stegun,   Handbook  of
    -C     Mathematical Functions (1966) is used  to reduce the computation
    -C     of  the cumulative distribution function to that  of computing a
    -C     chi-square, hence an incomplete gamma function.
    -C
    -C     Cumulative  distribution function  (P) is  calculated  directly.
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired value of  P.   The  search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION inf
    -      PARAMETER (inf=1.0D100)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,p,q,s,xlam
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx,pq
    -      LOGICAL qhi,qleft,qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION spmpar
    -      EXTERNAL spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumpoi,dinvr,dstinv
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.3))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 3.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0D0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LE.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LE.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.2) GO TO 130
    -      IF (.NOT. (s.LT.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -4
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF (which.EQ.3) GO TO 150
    -      IF (.NOT. (xlam.LT.0.0D0)) GO TO 140
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  140 CONTINUE
    -  150 IF (which.EQ.1) GO TO 190
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 180
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 160
    -      bound = 0.0D0
    -      GO TO 170
    -
    -  160 bound = 1.0D0
    -  170 status = 3
    -      RETURN
    -
    -  180 CONTINUE
    -  190 IF (.NOT. (which.EQ.1)) qporq = p .LE. q
    -      IF ((1).EQ. (which)) THEN
    -          CALL cumpoi(s,xlam,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          s = 5.0D0
    -          CALL dstinv(0.0D0,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,s,fx,qleft,qhi)
    -  200     IF (.NOT. (status.EQ.1)) GO TO 230
    -          CALL cumpoi(s,xlam,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 210
    -          fx = cum - p
    -          GO TO 220
    -
    -  210     fx = ccum - q
    -  220     CALL dinvr(status,s,fx,qleft,qhi)
    -          GO TO 200
    -
    -  230     IF (.NOT. (status.EQ.-1)) GO TO 260
    -          IF (.NOT. (qleft)) GO TO 240
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 250
    -
    -  240     status = 2
    -          bound = inf
    -  250     CONTINUE
    -  260     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          xlam = 5.0D0
    -          CALL dstinv(0.0D0,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,xlam,fx,qleft,qhi)
    -  270     IF (.NOT. (status.EQ.1)) GO TO 300
    -          CALL cumpoi(s,xlam,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 280
    -          fx = cum - p
    -          GO TO 290
    -
    -  280     fx = ccum - q
    -  290     CALL dinvr(status,xlam,fx,qleft,qhi)
    -          GO TO 270
    -
    -  300     IF (.NOT. (status.EQ.-1)) GO TO 330
    -          IF (.NOT. (qleft)) GO TO 310
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 320
    -
    -  310     status = 2
    -          bound = inf
    -  320     CONTINUE
    -  330 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdft.f b/scipy-0.10.1/scipy/special/cdflib/cdft.f
    deleted file mode 100644
    index af5c421fdb..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdft.f
    +++ /dev/null
    @@ -1,218 +0,0 @@
    -      SUBROUTINE cdft(which,p,q,t,df,status,bound)
    -C**********************************************************************
    -C
    -C      SUBROUTINE CDFT( WHICH, P, Q, T, DF, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C                         T distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates any one parameter of the t distribution given
    -C     values for the others.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     WHICH --> Integer indicating which  argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..3
    -C               iwhich = 1 : Calculate P and Q from T and DF
    -C               iwhich = 2 : Calculate T from P,Q and DF
    -C               iwhich = 3 : Calculate DF from P,Q and T
    -C                    INTEGER WHICH
    -C
    -C        P <--> The integral from -infinity to t of the t-density.
    -C              Input range: (0,1].
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Input range: (0, 1].
    -C            P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C        T <--> Upper limit of integration of the t-density.
    -C               Input range: ( -infinity, +infinity).
    -C               Search range: [ -1E100, 1E100 ]
    -C                    DOUBLE PRECISION T
    -C
    -C        DF <--> Degrees of freedom of the t-distribution.
    -C                Input range: (0 , +infinity).
    -C                Search range: [1e-100, 1E10]
    -C                    DOUBLE PRECISION DF
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula  26.5.27  of   Abramowitz   and  Stegun,   Handbook   of
    -C     Mathematical Functions  (1966) is used to reduce the computation
    -C     of the cumulative distribution function to that of an incomplete
    -C     beta.
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION zero,inf
    -      PARAMETER (zero=1.0D-100,inf=1.0D100)
    -      DOUBLE PRECISION rtinf
    -      PARAMETER (rtinf=1.0D100)
    -      DOUBLE PRECISION maxdf
    -      PARAMETER (maxdf=1.0d10)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,df,p,q,t
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx,pq
    -      LOGICAL qhi,qleft,qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION dt1,spmpar
    -      EXTERNAL dt1,spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumt,dinvr,dstinv
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.3))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 3.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LE.0.0D0).OR. (p.GT.1.0D0))) GO TO 60
    -      IF (.NOT. (p.LE.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = 1.0D0
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.1) GO TO 110
    -      IF (.NOT. ((q.LE.0.0D0).OR. (q.GT.1.0D0))) GO TO 100
    -      IF (.NOT. (q.LE.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      GO TO 90
    -
    -   80 bound = 1.0D0
    -   90 status = -3
    -      RETURN
    -
    -  100 CONTINUE
    -  110 IF (which.EQ.3) GO TO 130
    -      IF (.NOT. (df.LE.0.0D0)) GO TO 120
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -  120 CONTINUE
    -  130 IF (which.EQ.1) GO TO 170
    -      pq = p + q
    -      IF (.NOT. (abs(((pq)-0.5D0)-0.5D0).GT.
    -     +    (3.0D0*spmpar(1)))) GO TO 160
    -      IF (.NOT. (pq.LT.0.0D0)) GO TO 140
    -      bound = 0.0D0
    -      GO TO 150
    -
    -  140 bound = 1.0D0
    -  150 status = 3
    -      RETURN
    -
    -  160 CONTINUE
    -  170 IF (.NOT. (which.EQ.1)) qporq = p .LE. q
    -      IF ((1).EQ. (which)) THEN
    -          CALL cumt(t,df,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          t = dt1(p,q,df)
    -          CALL dstinv(-rtinf,rtinf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,t,fx,qleft,qhi)
    -  180     IF (.NOT. (status.EQ.1)) GO TO 210
    -          CALL cumt(t,df,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 190
    -          fx = cum - p
    -          GO TO 200
    -
    -  190     fx = ccum - q
    -  200     CALL dinvr(status,t,fx,qleft,qhi)
    -          GO TO 180
    -
    -  210     IF (.NOT. (status.EQ.-1)) GO TO 240
    -          IF (.NOT. (qleft)) GO TO 220
    -          status = 1
    -          bound = -rtinf
    -          GO TO 230
    -
    -  220     status = 2
    -          bound = rtinf
    -  230     CONTINUE
    -  240     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          df = 5.0D0
    -          CALL dstinv(zero,maxdf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,df,fx,qleft,qhi)
    -  250     IF (.NOT. (status.EQ.1)) GO TO 280
    -          CALL cumt(t,df,cum,ccum)
    -          IF (.NOT. (qporq)) GO TO 260
    -          fx = cum - p
    -          GO TO 270
    -
    -  260     fx = ccum - q
    -  270     CALL dinvr(status,df,fx,qleft,qhi)
    -          GO TO 250
    -
    -  280     IF (.NOT. (status.EQ.-1)) GO TO 310
    -          IF (.NOT. (qleft)) GO TO 290
    -          status = 1
    -          bound = zero
    -          GO TO 300
    -
    -  290     status = 2
    -          bound = maxdf
    -  300     CONTINUE
    -  310 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cdftnc.f b/scipy-0.10.1/scipy/special/cdflib/cdftnc.f
    deleted file mode 100644
    index a01ffd8962..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cdftnc.f
    +++ /dev/null
    @@ -1,197 +0,0 @@
    -      SUBROUTINE cdftnc(which,p,q,t,df,pnonc,status,bound)
    -C***********************************************************************
    -C
    -C      SUBROUTINE CDFTNC( WHICH, P, Q, T, DF, PNONC, STATUS, BOUND )
    -C               Cumulative Distribution Function
    -C                  Non-Central T distribution
    -C
    -C                               Function
    -C
    -C     Calculates any one parameter of the noncentral t distribution give
    -C     values for the others.
    -C
    -C                               Arguments
    -C
    -C     WHICH --> Integer indicating which  argument
    -C               values is to be calculated from the others.
    -C               Legal range: 1..3
    -C               iwhich = 1 : Calculate P and Q from T,DF,PNONC
    -C               iwhich = 2 : Calculate T from P,Q,DF,PNONC
    -C               iwhich = 3 : Calculate DF from P,Q,T
    -C               iwhich = 4 : Calculate PNONC from P,Q,DF,T
    -C                    INTEGER WHICH
    -C
    -C        P <--> The integral from -infinity to t of the noncentral t-den
    -C              Input range: (0,1].
    -C                    DOUBLE PRECISION P
    -C
    -C     Q <--> 1-P.
    -C            Input range: (0, 1].
    -C            P + Q = 1.0.
    -C                    DOUBLE PRECISION Q
    -C
    -C        T <--> Upper limit of integration of the noncentral t-density.
    -C               Input range: ( -infinity, +infinity).
    -C               Search range: [ -1E100, 1E100 ]
    -C                    DOUBLE PRECISION T
    -C
    -C        DF <--> Degrees of freedom of the noncentral t-distribution.
    -C                Input range: (0 , +infinity).
    -C                Search range: [1e-100, 1E10]
    -C                    DOUBLE PRECISION DF
    -C
    -C     PNONC <--> Noncentrality parameter of the noncentral t-distributio
    -C                Input range: [-infinity , +infinity).
    -C                Search range: [-1e4, 1E4]
    -C
    -C     STATUS <-- 0 if calculation completed correctly
    -C               -I if input parameter number I is out of range
    -C                1 if answer appears to be lower than lowest
    -C                  search bound
    -C                2 if answer appears to be higher than greatest
    -C                  search bound
    -C                3 if P + Q .ne. 1
    -C                    INTEGER STATUS
    -C
    -C     BOUND <-- Undefined if STATUS is 0
    -C
    -C               Bound exceeded by parameter number I if STATUS
    -C               is negative.
    -C
    -C               Lower search bound if STATUS is 1.
    -C
    -C               Upper search bound if STATUS is 2.
    -C
    -C                                Method
    -C
    -C     Upper tail    of  the  cumulative  noncentral t is calculated usin
    -C     formulae  from page 532  of Johnson, Kotz,  Balakrishnan, Coninuou
    -C     Univariate Distributions, Vol 2, 2nd Edition.  Wiley (1995)
    -C
    -C     Computation of other parameters involve a seach for a value that
    -C     produces  the desired  value  of P.   The search relies  on  the
    -C     monotinicity of P with the other parameter.
    -C
    -C***********************************************************************
    -C     .. Parameters ..
    -      DOUBLE PRECISION tent4
    -      PARAMETER (tent4=1.0D4)
    -      DOUBLE PRECISION tol
    -      PARAMETER (tol=1.0D-8)
    -      DOUBLE PRECISION atol
    -      PARAMETER (atol=1.0D-50)
    -      DOUBLE PRECISION zero,one,inf
    -      PARAMETER (zero=1.0D-100,one=1.0D0-1.0D-16,inf=1.0D100)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION bound,df,p,pnonc,q,t
    -      INTEGER status,which
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ccum,cum,fx
    -      LOGICAL qhi,qleft
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumtnc,dinvr,dstinv
    -C     ..
    -      IF (.NOT. ((which.LT.1).OR. (which.GT.4))) GO TO 30
    -      IF (.NOT. (which.LT.1)) GO TO 10
    -      bound = 1.0D0
    -      GO TO 20
    -
    -   10 bound = 5.0D0
    -   20 status = -1
    -      RETURN
    -
    -   30 IF (which.EQ.1) GO TO 70
    -      IF (.NOT. ((p.LT.0.0D0).OR. (p.GT.one))) GO TO 60
    -      IF (.NOT. (p.LT.0.0D0)) GO TO 40
    -      bound = 0.0D0
    -      GO TO 50
    -
    -   40 bound = one
    -   50 status = -2
    -      RETURN
    -
    -   60 CONTINUE
    -   70 IF (which.EQ.3) GO TO 90
    -      IF (.NOT. (df.LE.0.0D0)) GO TO 80
    -      bound = 0.0D0
    -      status = -5
    -      RETURN
    -
    -   80 CONTINUE
    -   90 IF (which.EQ.4) GO TO 100
    -  100 IF ((1).EQ. (which)) THEN
    -          CALL cumtnc(t,df,pnonc,p,q)
    -          status = 0
    -
    -      ELSE IF ((2).EQ. (which)) THEN
    -          t = 5.0D0
    -          CALL dstinv(-inf,inf,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,t,fx,qleft,qhi)
    -  110     IF (.NOT. (status.EQ.1)) GO TO 120
    -          CALL cumtnc(t,df,pnonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,t,fx,qleft,qhi)
    -          GO TO 110
    -
    -  120     IF (.NOT. (status.EQ.-1)) GO TO 150
    -          IF (.NOT. (qleft)) GO TO 130
    -          status = 1
    -          bound = -inf
    -          GO TO 140
    -
    -  130     status = 2
    -          bound = inf
    -  140     CONTINUE
    -  150     CONTINUE
    -
    -      ELSE IF ((3).EQ. (which)) THEN
    -          df = 5.0D0
    -          CALL dstinv(zero,tent4,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,df,fx,qleft,qhi)
    -  160     IF (.NOT. (status.EQ.1)) GO TO 170
    -          CALL cumtnc(t,df,pnonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,df,fx,qleft,qhi)
    -          GO TO 160
    -
    -  170     IF (.NOT. (status.EQ.-1)) GO TO 200
    -          IF (.NOT. (qleft)) GO TO 180
    -          status = 1
    -          bound = zero
    -          GO TO 190
    -
    -  180     status = 2
    -          bound = inf
    -  190     CONTINUE
    -  200     CONTINUE
    -
    -      ELSE IF ((4).EQ. (which)) THEN
    -          pnonc = 5.0D0
    -          CALL dstinv(-tent4,tent4,0.5D0,0.5D0,5.0D0,atol,tol)
    -          status = 0
    -          CALL dinvr(status,pnonc,fx,qleft,qhi)
    -  210     IF (.NOT. (status.EQ.1)) GO TO 220
    -          CALL cumtnc(t,df,pnonc,cum,ccum)
    -          fx = cum - p
    -          CALL dinvr(status,pnonc,fx,qleft,qhi)
    -          GO TO 210
    -
    -  220     IF (.NOT. (status.EQ.-1)) GO TO 250
    -          IF (.NOT. (qleft)) GO TO 230
    -          status = 1
    -          bound = 0.0D0
    -          GO TO 240
    -
    -  230     status = 2
    -          bound = tent4
    -  240     CONTINUE
    -  250 END IF
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumbet.f b/scipy-0.10.1/scipy/special/cdflib/cumbet.f
    deleted file mode 100644
    index b7a1242ca2..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumbet.f
    +++ /dev/null
    @@ -1,78 +0,0 @@
    -      SUBROUTINE cumbet(x,y,a,b,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE CUMBET(X,Y,A,B,CUM,CCUM)
    -C          Double precision cUMulative incomplete BETa distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates the cdf to X of the incomplete beta distribution
    -C     with parameters a and b.  This is the integral from 0 to x
    -C     of (1/B(a,b))*f(t)) where f(t) = t**(a-1) * (1-t)**(b-1)
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     X --> Upper limit of integration.
    -C                                        X is DOUBLE PRECISION
    -C
    -C     Y --> 1 - X.
    -C                                        Y is DOUBLE PRECISION
    -C
    -C     A --> First parameter of the beta distribution.
    -C                                        A is DOUBLE PRECISION
    -C
    -C     B --> Second parameter of the beta distribution.
    -C                                        B is DOUBLE PRECISION
    -C
    -C     CUM <-- Cumulative incomplete beta distribution.
    -C                                        CUM is DOUBLE PRECISION
    -C
    -C     CCUM <-- Compliment of Cumulative incomplete beta distribution.
    -C                                        CCUM is DOUBLE PRECISION
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Calls the routine BRATIO.
    -C
    -C                                   References
    -C
    -C     Didonato, Armido R. and Morris, Alfred H. Jr. (1992) Algorithim
    -C     708 Significant Digit Computation of the Incomplete Beta Function
    -C     Ratios. ACM ToMS, Vol.18, No. 3, Sept. 1992, 360-373.
    -C
    -C**********************************************************************
    -
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x,y,a,b,cum,ccum
    -C     ..
    -C     .. Local Scalars ..
    -      INTEGER ierr
    -C     ..
    -C     .. External Routines ..
    -      EXTERNAL bratio
    -C     ..
    -C     .. Executable Statements ..
    -      IF (.NOT. (x.LE.0.0D0)) GO TO 10
    -      cum = 0.0D0
    -      ccum = 1.0D0
    -      RETURN
    -
    -   10 IF (.NOT. (y.LE.0.0D0)) GO TO 20
    -      cum = 1.0D0
    -      ccum = 0.0D0
    -      RETURN
    -
    -   20 CALL bratio(a,b,x,y,cum,ccum,ierr)
    -
    -C     Call bratio routine
    -
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumbin.f b/scipy-0.10.1/scipy/special/cdflib/cumbin.f
    deleted file mode 100644
    index 7fc721e57f..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumbin.f
    +++ /dev/null
    @@ -1,61 +0,0 @@
    -      SUBROUTINE cumbin(s,xn,pr,ompr,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE CUMBIN(S,XN,PBIN,OMPR,CUM,CCUM)
    -C                    CUmulative BINomial distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Returns the probability   of 0  to  S  successes in  XN   binomial
    -C     trials, each of which has a probability of success, PBIN.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     S --> The upper limit of cumulation of the binomial distribution.
    -C                                                  S is DOUBLE PRECISION
    -C
    -C     XN --> The number of binomial trials.
    -C                                                  XN is DOUBLE PRECISIO
    -C
    -C     PBIN --> The probability of success in each binomial trial.
    -C                                                  PBIN is DOUBLE PRECIS
    -C
    -C     OMPR --> 1 - PBIN
    -C                                                  OMPR is DOUBLE PRECIS
    -C
    -C     CUM <-- Cumulative binomial distribution.
    -C                                                  CUM is DOUBLE PRECISI
    -C
    -C     CCUM <-- Compliment of Cumulative binomial distribution.
    -C                                                  CCUM is DOUBLE PRECIS
    -
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula  26.5.24    of   Abramowitz  and    Stegun,  Handbook   of
    -C     Mathematical   Functions (1966) is   used  to reduce the  binomial
    -C     distribution  to  the  cumulative    beta distribution.
    -C
    -C**********************************************************************
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION pr,ompr,s,xn,cum,ccum
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumbet
    -C     ..
    -C     .. Executable Statements ..
    -      IF (.NOT. (s.LT.xn)) GO TO 10
    -      CALL cumbet(pr,ompr,s+1.0D0,xn-s,ccum,cum)
    -      GO TO 20
    -
    -   10 cum = 1.0D0
    -      ccum = 0.0D0
    -   20 RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumchi.f b/scipy-0.10.1/scipy/special/cdflib/cumchi.f
    deleted file mode 100644
    index 5471b81209..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumchi.f
    +++ /dev/null
    @@ -1,53 +0,0 @@
    -      SUBROUTINE cumchi(x,df,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE FUNCTION CUMCHI(X,DF,CUM,CCUM)
    -C             CUMulative of the CHi-square distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Calculates the cumulative chi-square distribution.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     X       --> Upper limit of integration of the
    -C                 chi-square distribution.
    -C                                                 X is DOUBLE PRECISION
    -C
    -C     DF      --> Degrees of freedom of the
    -C                 chi-square distribution.
    -C                                                 DF is DOUBLE PRECISION
    -C
    -C     CUM <-- Cumulative chi-square distribution.
    -C                                                 CUM is DOUBLE PRECISIO
    -C
    -C     CCUM <-- Compliment of Cumulative chi-square distribution.
    -C                                                 CCUM is DOUBLE PRECISI
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Calls incomplete gamma function (CUMGAM)
    -C
    -C**********************************************************************
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION df,x,cum,ccum
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a,xx
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumgam
    -C     ..
    -C     .. Executable Statements ..
    -      a = df*0.5D0
    -      xx = x*0.5D0
    -      CALL cumgam(xx,a,cum,ccum)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumchn.f b/scipy-0.10.1/scipy/special/cdflib/cumchn.f
    deleted file mode 100644
    index f2fbbb68e3..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumchn.f
    +++ /dev/null
    @@ -1,220 +0,0 @@
    -      SUBROUTINE cumchn(x,df,pnonc,cum,ccum)
    -C***********************************************************************
    -C
    -C     SUBROUTINE CUMCHN(X,DF,PNONC,CUM,CCUM)
    -C             CUMulative of the Non-central CHi-square distribution
    -C
    -C                               Function
    -C
    -C     Calculates     the       cumulative      non-central    chi-square
    -C     distribution, i.e.,  the probability   that  a   random   variable
    -C     which    follows  the  non-central chi-square  distribution,  with
    -C     non-centrality  parameter    PNONC  and   continuous  degrees   of
    -C     freedom DF, is less than or equal to X.
    -C
    -C                              Arguments
    -C
    -C     X       --> Upper limit of integration of the non-central
    -C                 chi-square distribution.
    -C                                                 X is DOUBLE PRECISION
    -C
    -C     DF      --> Degrees of freedom of the non-central
    -C                 chi-square distribution.
    -C                                                 DF is DOUBLE PRECISION
    -C
    -C     PNONC   --> Non-centrality parameter of the non-central
    -C                 chi-square distribution.
    -C                                                 PNONC is DOUBLE PRECIS
    -C
    -C     CUM <-- Cumulative non-central chi-square distribution.
    -C                                                 CUM is DOUBLE PRECISIO
    -C
    -C     CCUM <-- Compliment of Cumulative non-central chi-square distribut
    -C                                                 CCUM is DOUBLE PRECISI
    -C
    -C
    -C                                Method
    -C
    -C     Uses  formula  26.4.25   of  Abramowitz  and  Stegun, Handbook  of
    -C     Mathematical    Functions,  US   NBS   (1966)    to calculate  the
    -C     non-central chi-square.
    -C
    -C                                Variables
    -C
    -C     EPS     --- Convergence criterion.  The sum stops when a
    -C                 term is less than EPS*SUM.
    -C                                                 EPS is DOUBLE PRECISIO
    -C
    -C     CCUM <-- Compliment of Cumulative non-central
    -C              chi-square distribution.
    -C                                                 CCUM is DOUBLE PRECISI
    -C
    -C***********************************************************************
    -C
    -C
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION ccum,cum,df,pnonc,x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION adj,centaj,centwt,chid2,dfd2,eps,lcntaj,lcntwt,
    -     +                 lfact,pcent,pterm,sum,sumadj,term,wt,xnonc,xx
    -      INTEGER i,icent
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION alngam
    -      EXTERNAL alngam
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumchi
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dble,exp,int,log
    -C     ..
    -C     .. Statement Functions ..
    -      DOUBLE PRECISION dg
    -      LOGICAL qsmall
    -C     ..
    -C     .. Data statements ..
    -      DATA eps/1.0D-5/
    -C     ..
    -C     .. Statement Function definitions ..
    -      qsmall(xx) = sum .LT. 1.0D-20 .OR. xx .LT. eps*sum
    -      dg(i) = df + 2.0D0*dble(i)
    -C     ..
    -C
    -      IF (.NOT. (x.LE.0.0D0)) GO TO 10
    -      cum = 0.0D0
    -      ccum = 1.0D0
    -      RETURN
    -
    -   10 IF (.NOT. (pnonc.LE.1.0D-10)) GO TO 20
    -C
    -C
    -C     When non-centrality parameter is (essentially) zero,
    -C     use cumulative chi-square distribution
    -C
    -C
    -      CALL cumchi(x,df,cum,ccum)
    -      RETURN
    -
    -   20 xnonc = pnonc/2.0D0
    -C***********************************************************************
    -C
    -C     The following code calcualtes the weight, chi-square, and
    -C     adjustment term for the central term in the infinite series.
    -C     The central term is the one in which the poisson weight is
    -C     greatest.  The adjustment term is the amount that must
    -C     be subtracted from the chi-square to move up two degrees
    -C     of freedom.
    -C
    -C***********************************************************************
    -      icent = int(xnonc)
    -      IF (icent.EQ.0) icent = 1
    -      chid2 = x/2.0D0
    -C
    -C
    -C     Calculate central weight term
    -C
    -C
    -      lfact = alngam(dble(icent+1))
    -      lcntwt = -xnonc + icent*log(xnonc) - lfact
    -      centwt = exp(lcntwt)
    -C
    -C
    -C     Calculate central chi-square
    -C
    -C
    -      CALL cumchi(x,dg(icent),pcent,ccum)
    -C
    -C
    -C     Calculate central adjustment term
    -C
    -C
    -      dfd2 = dg(icent)/2.0D0
    -      lfact = alngam(1.0D0+dfd2)
    -      lcntaj = dfd2*log(chid2) - chid2 - lfact
    -      centaj = exp(lcntaj)
    -      sum = centwt*pcent
    -C***********************************************************************
    -C
    -C     Sum backwards from the central term towards zero.
    -C     Quit whenever either
    -C     (1) the zero term is reached, or
    -C     (2) the term gets small relative to the sum, or
    -C
    -C***********************************************************************
    -      sumadj = 0.0D0
    -      adj = centaj
    -      wt = centwt
    -      i = icent
    -C
    -      GO TO 40
    -
    -   30 IF (qsmall(term) .OR. i.EQ.0) GO TO 50
    -   40 dfd2 = dg(i)/2.0D0
    -C
    -C
    -C     Adjust chi-square for two fewer degrees of freedom.
    -C     The adjusted value ends up in PTERM.
    -C
    -C
    -      adj = adj*dfd2/chid2
    -      sumadj = sumadj + adj
    -      pterm = pcent + sumadj
    -C
    -C
    -C     Adjust poisson weight for J decreased by one
    -C
    -C
    -      wt = wt* (i/xnonc)
    -      term = wt*pterm
    -      sum = sum + term
    -      i = i - 1
    -      GO TO 30
    -
    -   50 sumadj = centaj
    -C***********************************************************************
    -C
    -C     Now sum forward from the central term towards infinity.
    -C     Quit when either
    -C     (1) the term gets small relative to the sum, or
    -C
    -C***********************************************************************
    -      adj = centaj
    -      wt = centwt
    -      i = icent
    -C
    -      GO TO 70
    -
    -   60 IF (qsmall(term)) GO TO 80
    -C
    -C
    -C     Update weights for next higher J
    -C
    -C
    -   70 wt = wt* (xnonc/ (i+1))
    -C
    -C
    -C     Calculate PTERM and add term to sum
    -C
    -C
    -      pterm = pcent - sumadj
    -      term = wt*pterm
    -      sum = sum + term
    -C
    -C
    -C     Update adjustment term for DF for next iteration
    -C
    -C
    -      i = i + 1
    -      dfd2 = dg(i)/2.0D0
    -      adj = adj*chid2/dfd2
    -      sumadj = sumadj + adj
    -      GO TO 60
    -
    -   80 cum = sum
    -      ccum = 0.5D0 + (0.5D0-cum)
    -C
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumf.f b/scipy-0.10.1/scipy/special/cdflib/cumf.f
    deleted file mode 100644
    index 84c758bab8..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumf.f
    +++ /dev/null
    @@ -1,93 +0,0 @@
    -      SUBROUTINE cumf(f,dfn,dfd,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE CUMF(F,DFN,DFD,CUM,CCUM)
    -C                    CUMulative F distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Computes  the  integral from  0  to  F of  the f-density  with DFN
    -C     and DFD degrees of freedom.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     F --> Upper limit of integration of the f-density.
    -C                                                  F is DOUBLE PRECISION
    -C
    -C     DFN --> Degrees of freedom of the numerator sum of squares.
    -C                                                  DFN is DOUBLE PRECISI
    -C
    -C     DFD --> Degrees of freedom of the denominator sum of squares.
    -C                                                  DFD is DOUBLE PRECISI
    -C
    -C     CUM <-- Cumulative f distribution.
    -C                                                  CUM is DOUBLE PRECISI
    -C
    -C     CCUM <-- Compliment of Cumulative f distribution.
    -C                                                  CCUM is DOUBLE PRECIS
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula  26.5.28 of  Abramowitz and   Stegun   is  used to  reduce
    -C     the cumulative F to a cumulative beta distribution.
    -C
    -C
    -C                              Note
    -C
    -C
    -C     If F is less than or equal to 0, 0 is returned.
    -C
    -C**********************************************************************
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION dfd,dfn,f,cum,ccum
    -C     ..
    -C     .. Local Scalars ..
    -
    -      DOUBLE PRECISION dsum,prod,xx,yy
    -      INTEGER ierr
    -C     ..
    -C     .. Parameters ..
    -      DOUBLE PRECISION half
    -      PARAMETER (half=0.5D0)
    -      DOUBLE PRECISION done
    -      PARAMETER (done=1.0D0)
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL bratio
    -C     ..
    -C     .. Executable Statements ..
    -
    -      IF (.NOT. (f.LE.0.0D0)) GO TO 10
    -      cum = 0.0D0
    -      ccum = 1.0D0
    -      RETURN
    -
    -   10 prod = dfn*f
    -C
    -C     XX is such that the incomplete beta with parameters
    -C     DFD/2 and DFN/2 evaluated at XX is 1 - CUM or CCUM
    -C
    -C     YY is 1 - XX
    -C
    -C     Calculate the smaller of XX and YY accurately
    -C
    -      dsum = dfd + prod
    -      xx = dfd/dsum
    -      IF (xx.GT.half) THEN
    -          yy = prod/dsum
    -          xx = done - yy
    -
    -      ELSE
    -          yy = done - xx
    -      END IF
    -
    -      CALL bratio(dfd*half,dfn*half,xx,yy,ccum,cum,ierr)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumfnc.f b/scipy-0.10.1/scipy/special/cdflib/cumfnc.f
    deleted file mode 100644
    index 6d86754459..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumfnc.f
    +++ /dev/null
    @@ -1,189 +0,0 @@
    -      SUBROUTINE cumfnc(f,dfn,dfd,pnonc,cum,ccum)
    -C**********************************************************************
    -C
    -C               F -NON- -C-ENTRAL F DISTRIBUTION
    -C
    -C
    -C
    -C                              Function
    -C
    -C
    -C     COMPUTES NONCENTRAL F DISTRIBUTION WITH DFN AND DFD
    -C     DEGREES OF FREEDOM AND NONCENTRALITY PARAMETER PNONC
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     X --> UPPER LIMIT OF INTEGRATION OF NONCENTRAL F IN EQUATION
    -C
    -C     DFN --> DEGREES OF FREEDOM OF NUMERATOR
    -C
    -C     DFD -->  DEGREES OF FREEDOM OF DENOMINATOR
    -C
    -C     PNONC --> NONCENTRALITY PARAMETER.
    -C
    -C     CUM <-- CUMULATIVE NONCENTRAL F DISTRIBUTION
    -C
    -C     CCUM <-- COMPLIMENT OF CUMMULATIVE
    -C
    -C
    -C                              Method
    -C
    -C
    -C     USES FORMULA 26.6.20 OF REFERENCE FOR INFINITE SERIES.
    -C     SERIES IS CALCULATED BACKWARD AND FORWARD FROM J = LAMBDA/2
    -C     (THIS IS THE TERM WITH THE LARGEST POISSON WEIGHT) UNTIL
    -C     THE CONVERGENCE CRITERION IS MET.
    -C
    -C     FOR SPEED, THE INCOMPLETE BETA FUNCTIONS ARE EVALUATED
    -C     BY FORMULA 26.5.16.
    -C
    -C
    -C               REFERENCE
    -C
    -C
    -C     HANDBOOD OF MATHEMATICAL FUNCTIONS
    -C     EDITED BY MILTON ABRAMOWITZ AND IRENE A. STEGUN
    -C     NATIONAL BUREAU OF STANDARDS APPLIED MATEMATICS SERIES - 55
    -C     MARCH 1965
    -C     P 947, EQUATIONS 26.6.17, 26.6.18
    -C
    -C
    -C                              Note
    -C
    -C
    -C     THE SUM CONTINUES UNTIL A SUCCEEDING TERM IS LESS THAN EPS
    -C     TIMES THE SUM (OR THE SUM IS LESS THAN 1.0E-20).  EPS IS
    -C     SET TO 1.0E-4 IN A DATA STATEMENT WHICH CAN BE CHANGED.
    -C
    -C**********************************************************************
    -
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION dfd,dfn,pnonc,f,cum,ccum
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION dsum,dummy,prod,xx,yy
    -      DOUBLE PRECISION adn,aup,b,betdn,betup,centwt,dnterm,eps,sum,
    -     +                 upterm,xmult,xnonc,x
    -      INTEGER i,icent,ierr
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION alngam
    -      EXTERNAL alngam
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC log,dble,exp
    -C     ..
    -C     .. Statement Functions ..
    -      LOGICAL qsmall
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL bratio,cumf
    -C     ..
    -C     .. Parameters ..
    -      DOUBLE PRECISION half
    -      PARAMETER (half=0.5D0)
    -      DOUBLE PRECISION done
    -      PARAMETER (done=1.0D0)
    -C     ..
    -C     .. Data statements ..
    -      DATA eps/1.0D-4/
    -C     ..
    -C     .. Statement Function definitions ..
    -      qsmall(x) = sum .LT. 1.0D-20 .OR. x .LT. eps*sum
    -C     ..
    -C     .. Executable Statements ..
    -C
    -      IF (.NOT. (f.LE.0.0D0)) GO TO 10
    -      cum = 0.0D0
    -      ccum = 1.0D0
    -      RETURN
    -
    -   10 IF (.NOT. (pnonc.LT.1.0D-10)) GO TO 20
    -C
    -C     Handle case in which the non-centrality parameter is
    -C     (essentially) zero.
    -
    -      CALL cumf(f,dfn,dfd,cum,ccum)
    -      RETURN
    -
    -   20 xnonc = pnonc/2.0D0
    -
    -C     Calculate the central term of the poisson weighting factor.
    -
    -      icent = xnonc
    -      IF (icent.EQ.0) icent = 1
    -
    -C     Compute central weight term
    -
    -      centwt = exp(-xnonc+icent*log(xnonc)-alngam(dble(icent+1)))
    -
    -C     Compute central incomplete beta term
    -C     Assure that minimum of arg to beta and 1 - arg is computed
    -C          accurately.
    -
    -      prod = dfn*f
    -      dsum = dfd + prod
    -      yy = dfd/dsum
    -      IF (yy.GT.half) THEN
    -          xx = prod/dsum
    -          yy = done - xx
    -
    -      ELSE
    -          xx = done - yy
    -      END IF
    -
    -      CALL bratio(dfn*half+dble(icent),dfd*half,xx,yy,betdn,dummy,ierr)
    -      adn = dfn/2.0D0 + dble(icent)
    -      aup = adn
    -      b = dfd/2.0D0
    -      betup = betdn
    -      sum = centwt*betdn
    -
    -C     Now sum terms backward from icent until convergence or all done
    -
    -      xmult = centwt
    -      i = icent
    -      dnterm = exp(alngam(adn+b)-alngam(adn+1.0D0)-alngam(b)+
    -     +         adn*log(xx)+b*log(yy))
    -   30 IF (qsmall(xmult*betdn) .OR. i.LE.0) GO TO 40
    -      xmult = xmult* (i/xnonc)
    -      i = i - 1
    -      adn = adn - 1
    -      dnterm = (adn+1)/ ((adn+b)*xx)*dnterm
    -      betdn = betdn + dnterm
    -      sum = sum + xmult*betdn
    -      GO TO 30
    -
    -   40 i = icent + 1
    -
    -C     Now sum forwards until convergence
    -
    -      xmult = centwt
    -      IF ((aup-1+b).EQ.0) THEN
    -          upterm = exp(-alngam(aup)-alngam(b)+ (aup-1)*log(xx)+
    -     +             b*log(yy))
    -
    -      ELSE
    -          upterm = exp(alngam(aup-1+b)-alngam(aup)-alngam(b)+
    -     +             (aup-1)*log(xx)+b*log(yy))
    -      END IF
    -
    -      GO TO 60
    -
    -   50 IF (qsmall(xmult*betup)) GO TO 70
    -   60 xmult = xmult* (xnonc/i)
    -      i = i + 1
    -      aup = aup + 1
    -      upterm = (aup+b-2.0D0)*xx/ (aup-1)*upterm
    -      betup = betup - upterm
    -      sum = sum + xmult*betup
    -      GO TO 50
    -
    -   70 cum = sum
    -
    -      ccum = 0.5D0 + (0.5D0-cum)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumgam.f b/scipy-0.10.1/scipy/special/cdflib/cumgam.f
    deleted file mode 100644
    index 25484c56cb..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumgam.f
    +++ /dev/null
    @@ -1,61 +0,0 @@
    -      SUBROUTINE cumgam(x,a,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE CUMGAM(X,A,CUM,CCUM)
    -C           Double precision cUMulative incomplete GAMma distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Computes   the  cumulative        of    the     incomplete   gamma
    -C     distribution, i.e., the integral from 0 to X of
    -C          (1/GAM(A))*EXP(-T)*T**(A-1) DT
    -C     where GAM(A) is the complete gamma function of A, i.e.,
    -C          GAM(A) = integral from 0 to infinity of
    -C                    EXP(-T)*T**(A-1) DT
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     X --> The upper limit of integration of the incomplete gamma.
    -C                                                X is DOUBLE PRECISION
    -C
    -C     A --> The shape parameter of the incomplete gamma.
    -C                                                A is DOUBLE PRECISION
    -C
    -C     CUM <-- Cumulative incomplete gamma distribution.
    -C                                        CUM is DOUBLE PRECISION
    -C
    -C     CCUM <-- Compliment of Cumulative incomplete gamma distribution.
    -C                                                CCUM is DOUBLE PRECISIO
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Calls the routine GRATIO.
    -C
    -C**********************************************************************
    -C
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,x,cum,ccum
    -C     ..
    -C     .. External Routines ..
    -      EXTERNAL gratio
    -C     ..
    -C     .. Executable Statements ..
    -      IF (.NOT. (x.LE.0.0D0)) GO TO 10
    -      cum = 0.0D0
    -      ccum = 1.0D0
    -      RETURN
    -
    -   10 CALL gratio(a,x,cum,ccum,0)
    -
    -C     Call gratio routine
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumnbn.f b/scipy-0.10.1/scipy/special/cdflib/cumnbn.f
    deleted file mode 100644
    index 969b14a3ff..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumnbn.f
    +++ /dev/null
    @@ -1,61 +0,0 @@
    -      SUBROUTINE cumnbn(s,xn,pr,ompr,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE CUMNNBN(S,XN,PR,OMPR,CUM,CCUM)
    -C                    CUmulative Negative BINomial distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Returns the probability that it there will be S or fewer failures
    -C     before there are XN successes, with each binomial trial having
    -C     a probability of success PR.
    -C
    -C     Prob(# failures = S | XN successes, PR)  =
    -C                        ( XN + S - 1 )
    -C                        (            ) * PR^XN * (1-PR)^S
    -C                        (      S     )
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     S --> The number of failures
    -C                                                  S is DOUBLE PRECISION
    -C
    -C     XN --> The number of successes
    -C                                                  XN is DOUBLE PRECISIO
    -C
    -C     PR --> The probability of success in each binomial trial.
    -C                                                  PR is DOUBLE PRECISIO
    -C
    -C     OMPR --> 1 - PR
    -C                                                  OMPR is DOUBLE PRECIS
    -C
    -C     CUM <-- Cumulative negative binomial distribution.
    -C                                                  CUM is DOUBLE PRECISI
    -C
    -C     CCUM <-- Compliment of Cumulative negative binomial distribution.
    -C                                                  CCUM is DOUBLE PRECIS
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula  26.5.26    of   Abramowitz  and    Stegun,  Handbook   of
    -C     Mathematical   Functions (1966) is   used  to reduce the  negative
    -C     binomial distribution to the cumulative beta distribution.
    -C
    -C**********************************************************************
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION pr,ompr,s,xn,cum,ccum
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumbet
    -C     ..
    -C     .. Executable Statements ..
    -      CALL cumbet(pr,ompr,xn,s+1.D0,cum,ccum)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumnor.f b/scipy-0.10.1/scipy/special/cdflib/cumnor.f
    deleted file mode 100644
    index 32ada82fb7..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumnor.f
    +++ /dev/null
    @@ -1,213 +0,0 @@
    -      SUBROUTINE cumnor(arg,result,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUINE CUMNOR(X,RESULT,CCUM)
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Computes the cumulative  of    the  normal   distribution,   i.e.,
    -C     the integral from -infinity to x of
    -C          (1/sqrt(2*pi)) exp(-u*u/2) du
    -C
    -C     X --> Upper limit of integration.
    -C                                        X is DOUBLE PRECISION
    -C
    -C     RESULT <-- Cumulative normal distribution.
    -C                                        RESULT is DOUBLE PRECISION
    -C
    -C     CCUM <-- Compliment of Cumulative normal distribution.
    -C                                        CCUM is DOUBLE PRECISION
    -C
    -C
    -C     Renaming of function ANORM from:
    -C
    -C     Cody, W.D. (1993). "ALGORITHM 715: SPECFUN - A Portabel FORTRAN
    -C     Package of Special Function Routines and Test Drivers"
    -C     acm Transactions on Mathematical Software. 19, 22-32.
    -C
    -C     with slight modifications to return ccum and to deal with
    -C     machine constants.
    -C
    -C**********************************************************************
    -C
    -C
    -C Original Comments:
    -C------------------------------------------------------------------
    -C
    -C This function evaluates the normal distribution function:
    -C
    -C                              / x
    -C                     1       |       -t*t/2
    -C          P(x) = ----------- |      e       dt
    -C                 sqrt(2 pi)  |
    -C                             /-oo
    -C
    -C   The main computation evaluates near-minimax approximations
    -C   derived from those in "Rational Chebyshev approximations for
    -C   the error function" by W. J. Cody, Math. Comp., 1969, 631-637.
    -C   This transportable program uses rational functions that
    -C   theoretically approximate the normal distribution function to
    -C   at least 18 significant decimal digits.  The accuracy achieved
    -C   depends on the arithmetic system, the compiler, the intrinsic
    -C   functions, and proper selection of the machine-dependent
    -C   constants.
    -C
    -C*******************************************************************
    -C*******************************************************************
    -C
    -C Explanation of machine-dependent constants.
    -C
    -C   MIN   = smallest machine representable number.
    -C
    -C   EPS   = argument below which anorm(x) may be represented by
    -C           0.5  and above which  x*x  will not underflow.
    -C           A conservative value is the largest machine number X
    -C           such that   1.0 + X = 1.0   to machine precision.
    -C*******************************************************************
    -C*******************************************************************
    -C
    -C Error returns
    -C
    -C  The program returns  ANORM = 0     for  ARG .LE. XLOW.
    -C
    -C
    -C Intrinsic functions required are:
    -C
    -C     ABS, AINT, EXP
    -C
    -C
    -C  Author: W. J. Cody
    -C          Mathematics and Computer Science Division
    -C          Argonne National Laboratory
    -C          Argonne, IL 60439
    -C
    -C  Latest modification: March 15, 1992
    -C
    -C------------------------------------------------------------------
    -      INTEGER i
    -      DOUBLE PRECISION a,arg,b,c,d,del,eps,half,p,one,q,result,sixten,
    -     +                 temp,sqrpi,thrsh,root32,x,xden,xnum,y,xsq,zero,
    -     +                 min,ccum
    -      DIMENSION a(5),b(4),c(9),d(8),p(6),q(5)
    -C------------------------------------------------------------------
    -C  External Function
    -C------------------------------------------------------------------
    -      DOUBLE PRECISION spmpar
    -      EXTERNAL spmpar
    -C------------------------------------------------------------------
    -C  Mathematical constants
    -C
    -C  SQRPI = 1 / sqrt(2*pi), ROOT32 = sqrt(32), and
    -C  THRSH is the argument for which anorm = 0.75.
    -C------------------------------------------------------------------
    -      DATA one,half,zero,sixten/1.0D0,0.5D0,0.0D0,1.60D0/,
    -     +     sqrpi/3.9894228040143267794D-1/,thrsh/0.66291D0/,
    -     +     root32/5.656854248D0/
    -C------------------------------------------------------------------
    -C  Coefficients for approximation in first interval
    -C------------------------------------------------------------------
    -      DATA a/2.2352520354606839287D00,1.6102823106855587881D02,
    -     +     1.0676894854603709582D03,1.8154981253343561249D04,
    -     +     6.5682337918207449113D-2/
    -      DATA b/4.7202581904688241870D01,9.7609855173777669322D02,
    -     +     1.0260932208618978205D04,4.5507789335026729956D04/
    -C------------------------------------------------------------------
    -C  Coefficients for approximation in second interval
    -C------------------------------------------------------------------
    -      DATA c/3.9894151208813466764D-1,8.8831497943883759412D00,
    -     +     9.3506656132177855979D01,5.9727027639480026226D02,
    -     +     2.4945375852903726711D03,6.8481904505362823326D03,
    -     +     1.1602651437647350124D04,9.8427148383839780218D03,
    -     +     1.0765576773720192317D-8/
    -      DATA d/2.2266688044328115691D01,2.3538790178262499861D02,
    -     +     1.5193775994075548050D03,6.4855582982667607550D03,
    -     +     1.8615571640885098091D04,3.4900952721145977266D04,
    -     +     3.8912003286093271411D04,1.9685429676859990727D04/
    -C------------------------------------------------------------------
    -C  Coefficients for approximation in third interval
    -C------------------------------------------------------------------
    -      DATA p/2.1589853405795699D-1,1.274011611602473639D-1,
    -     +     2.2235277870649807D-2,1.421619193227893466D-3,
    -     +     2.9112874951168792D-5,2.307344176494017303D-2/
    -      DATA q/1.28426009614491121D00,4.68238212480865118D-1,
    -     +     6.59881378689285515D-2,3.78239633202758244D-3,
    -     +     7.29751555083966205D-5/
    -C------------------------------------------------------------------
    -C  Machine dependent constants
    -C------------------------------------------------------------------
    -      eps = spmpar(1)*0.5D0
    -      min = spmpar(2)
    -C------------------------------------------------------------------
    -      x = arg
    -      y = abs(x)
    -      IF (y.LE.thrsh) THEN
    -C------------------------------------------------------------------
    -C  Evaluate  anorm  for  |X| <= 0.66291
    -C------------------------------------------------------------------
    -          xsq = zero
    -          IF (y.GT.eps) xsq = x*x
    -          xnum = a(5)*xsq
    -          xden = xsq
    -          DO 10 i = 1,3
    -              xnum = (xnum+a(i))*xsq
    -              xden = (xden+b(i))*xsq
    -   10     CONTINUE
    -          result = x* (xnum+a(4))/ (xden+b(4))
    -          temp = result
    -          result = half + temp
    -          ccum = half - temp
    -C------------------------------------------------------------------
    -C  Evaluate  anorm  for 0.66291 <= |X| <= sqrt(32)
    -C------------------------------------------------------------------
    -      ELSE IF (y.LE.root32) THEN
    -          xnum = c(9)*y
    -          xden = y
    -          DO 20 i = 1,7
    -              xnum = (xnum+c(i))*y
    -              xden = (xden+d(i))*y
    -   20     CONTINUE
    -          result = (xnum+c(8))/ (xden+d(8))
    -          xsq = aint(y*sixten)/sixten
    -          del = (y-xsq)* (y+xsq)
    -          result = exp(-xsq*xsq*half)*exp(-del*half)*result
    -          ccum = one - result
    -          IF (x.GT.zero) THEN
    -              temp = result
    -              result = ccum
    -              ccum = temp
    -          END IF
    -C------------------------------------------------------------------
    -C  Evaluate  anorm  for |X| > sqrt(32)
    -C------------------------------------------------------------------
    -      ELSE
    -          result = zero
    -          xsq = one/ (x*x)
    -          xnum = p(6)*xsq
    -          xden = xsq
    -          DO 30 i = 1,4
    -              xnum = (xnum+p(i))*xsq
    -              xden = (xden+q(i))*xsq
    -   30     CONTINUE
    -          result = xsq* (xnum+p(5))/ (xden+q(5))
    -          result = (sqrpi-result)/y
    -          xsq = aint(x*sixten)/sixten
    -          del = (x-xsq)* (x+xsq)
    -          result = exp(-xsq*xsq*half)*exp(-del*half)*result
    -          ccum = one - result
    -          IF (x.GT.zero) THEN
    -              temp = result
    -              result = ccum
    -              ccum = temp
    -          END IF
    -
    -      END IF
    -
    -      IF (result.LT.min) result = 0.0D0
    -      IF (ccum.LT.min) ccum = 0.0D0
    -C------------------------------------------------------------------
    -C  Fix up for negative argument, erf, etc.
    -C------------------------------------------------------------------
    -C----------Last card of ANORM ----------
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumpoi.f b/scipy-0.10.1/scipy/special/cdflib/cumpoi.f
    deleted file mode 100644
    index b6736816da..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumpoi.f
    +++ /dev/null
    @@ -1,54 +0,0 @@
    -      SUBROUTINE cumpoi(s,xlam,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE CUMPOI(S,XLAM,CUM,CCUM)
    -C                    CUMulative POIsson distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Returns the  probability  of  S   or  fewer events in  a   Poisson
    -C     distribution with mean XLAM.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     S --> Upper limit of cumulation of the Poisson.
    -C                                                  S is DOUBLE PRECISION
    -C
    -C     XLAM --> Mean of the Poisson distribution.
    -C                                                  XLAM is DOUBLE PRECIS
    -C
    -C     CUM <-- Cumulative poisson distribution.
    -C                                        CUM is DOUBLE PRECISION
    -C
    -C     CCUM <-- Compliment of Cumulative poisson distribution.
    -C                                                  CCUM is DOUBLE PRECIS
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Uses formula  26.4.21   of   Abramowitz and  Stegun,  Handbook  of
    -C     Mathematical   Functions  to reduce   the   cumulative Poisson  to
    -C     the cumulative chi-square distribution.
    -C
    -C**********************************************************************
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION s,xlam,cum,ccum
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION chi,df
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumchi
    -C     ..
    -C     .. Executable Statements ..
    -      df = 2.0D0* (s+1.0D0)
    -      chi = 2.0D0*xlam
    -      CALL cumchi(chi,df,ccum,cum)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumt.f b/scipy-0.10.1/scipy/special/cdflib/cumt.f
    deleted file mode 100644
    index c69d71847a..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumt.f
    +++ /dev/null
    @@ -1,63 +0,0 @@
    -      SUBROUTINE cumt(t,df,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE CUMT(T,DF,CUM,CCUM)
    -C                    CUMulative T-distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Computes the integral from -infinity to T of the t-density.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     T --> Upper limit of integration of the t-density.
    -C                                                  T is DOUBLE PRECISION
    -C
    -C     DF --> Degrees of freedom of the t-distribution.
    -C                                                  DF is DOUBLE PRECISIO
    -C
    -C     CUM <-- Cumulative t-distribution.
    -C                                                  CCUM is DOUBLE PRECIS
    -C
    -C     CCUM <-- Compliment of Cumulative t-distribution.
    -C                                                  CCUM is DOUBLE PRECIS
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Formula 26.5.27   of     Abramowitz  and   Stegun,    Handbook  of
    -C     Mathematical Functions  is   used   to  reduce the  t-distribution
    -C     to an incomplete beta.
    -C
    -C**********************************************************************
    -
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION df,t,cum,ccum
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION xx,a,oma,tt,yy,dfptt
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumbet
    -C     ..
    -C     .. Executable Statements ..
    -      tt = t*t
    -      dfptt = df + tt
    -      xx = df/dfptt
    -      yy = tt/dfptt
    -      CALL cumbet(xx,yy,0.5D0*df,0.5D0,a,oma)
    -      IF (.NOT. (t.LE.0.0D0)) GO TO 10
    -      cum = 0.5D0*a
    -      ccum = oma + cum
    -      GO TO 20
    -
    -   10 ccum = 0.5D0*a
    -      cum = oma + ccum
    -   20 RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/cumtnc.f b/scipy-0.10.1/scipy/special/cdflib/cumtnc.f
    deleted file mode 100644
    index edfc621696..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/cumtnc.f
    +++ /dev/null
    @@ -1,276 +0,0 @@
    -      SUBROUTINE cumtnc(t,df,pnonc,cum,ccum)
    -C**********************************************************************
    -C
    -C     SUBROUTINE CUMTNC(T,DF,PNONC,CUM,CCUM)
    -C
    -C                 CUMulative Non-Central T-distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Computes the integral from -infinity to T of the non-central
    -C     t-density.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     T --> Upper limit of integration of the non-central t-density.
    -C                                                  T is DOUBLE PRECISION
    -C
    -C     DF --> Degrees of freedom of the non-central t-distribution.
    -C                                                  DF is DOUBLE PRECISIO
    -C
    -C     PNONC --> Non-centrality parameter of the non-central t distibutio
    -C                                                  PNONC is DOUBLE PRECI
    -C
    -C     CUM <-- Cumulative t-distribution.
    -C                                                  CCUM is DOUBLE PRECIS
    -C
    -C     CCUM <-- Compliment of Cumulative t-distribution.
    -C                                                  CCUM is DOUBLE PRECIS
    -C
    -C
    -C                              Method
    -C
    -C     Upper tail    of  the  cumulative  noncentral t   using
    -C     formulae from page 532  of Johnson, Kotz,  Balakrishnan, Coninuous
    -C     Univariate Distributions, Vol 2, 2nd Edition.  Wiley (1995)
    -C
    -C     This implementation starts the calculation at i = lambda,
    -C     which is near the largest Di.  It then sums forward and backward.
    -C***********************************************************************
    -C     .. Parameters ..
    -
    -      DOUBLE PRECISION one,zero,half,two,onep5
    -      PARAMETER (one=1.0d0,zero=0.0d0,half=0.5d0,two=2.0d0,onep5=1.5d0)
    -      DOUBLE PRECISION conv
    -      PARAMETER (conv=1.0d-7)
    -      DOUBLE PRECISION tiny
    -      PARAMETER (tiny=1.0d-10)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION ccum,cum,df,pnonc,t
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION alghdf,b,bb,bbcent,bcent,cent,d,dcent,dpnonc,
    -     +                 dum1,dum2,e,ecent,halfdf,lambda,lnomx,lnx,omx,
    -     +                 pnonc2,s,scent,ss,sscent,t2,term,tt,twoi,x,
    -     +                 xi,xlnd,xlne
    -      INTEGER ierr
    -      LOGICAL qrevs
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION gamln
    -      EXTERNAL gamln
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL bratio,cumnor,cumt
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,exp,int,log,max,min
    -C     ..
    -
    -C     Case pnonc essentially zero
    -
    -      IF (abs(pnonc).LE.tiny) THEN
    -          CALL cumt(t,df,cum,ccum)
    -          RETURN
    -
    -      END IF
    -
    -      qrevs = t .LT. zero
    -      IF (qrevs) THEN
    -          tt = -t
    -          dpnonc = -pnonc
    -
    -      ELSE
    -          tt = t
    -          dpnonc = pnonc
    -      END IF
    -
    -      pnonc2 = dpnonc*dpnonc
    -      t2 = tt*tt
    -
    -      IF (abs(tt).LE.tiny) THEN
    -          CALL cumnor(-pnonc,cum,ccum)
    -          RETURN
    -
    -      END IF
    -
    -      lambda = half*pnonc2
    -      x = df/ (df+t2)
    -      omx = one - x
    -
    -      lnx = log(x)
    -      lnomx = log(omx)
    -
    -      halfdf = half*df
    -      alghdf = gamln(halfdf)
    -
    -C     ******************** Case i = lambda
    -
    -      cent = int(lambda)
    -
    -      IF (cent.LT.one) cent = one
    -
    -C     Compute d=T(2i) in log space and offset by exp(-lambda)
    -
    -      xlnd = cent*log(lambda) - gamln(cent+one) - lambda
    -
    -      dcent = exp(xlnd)
    -
    -C     Compute e=t(2i+1) in log space offset by exp(-lambda)
    -
    -      xlne = (cent+half)*log(lambda) - gamln(cent+onep5) - lambda
    -      ecent = exp(xlne)
    -
    -      IF (dpnonc.LT.zero) ecent = -ecent
    -
    -C     Compute bcent=B(2*cent)
    -
    -      CALL bratio(halfdf,cent+half,x,omx,bcent,dum1,ierr)
    -
    -C     compute bbcent=B(2*cent+1)
    -
    -      CALL bratio(halfdf,cent+one,x,omx,bbcent,dum2,ierr)
    -
    -C     Case bcent and bbcent are essentially zero
    -C     Thus t is effectively infinite
    -
    -      IF ((bcent+bbcent).LT.tiny) THEN
    -          IF (qrevs) THEN
    -              cum = zero
    -              ccum = one
    -
    -          ELSE
    -              cum = one
    -              ccum = zero
    -          END IF
    -
    -          RETURN
    -
    -      END IF
    -
    -C     Case bcent and bbcent are essentially one
    -C     Thus t is effectively zero
    -
    -      IF ((dum1+dum2).LT.tiny) THEN
    -          CALL cumnor(-pnonc,cum,ccum)
    -          RETURN
    -
    -      END IF
    -
    -C     First term in ccum is D*B + E*BB
    -
    -      ccum = dcent*bcent + ecent*bbcent
    -
    -C     compute s(cent) = B(2*(cent+1)) - B(2*cent))
    -
    -      scent = gamln(halfdf+cent+half) - gamln(cent+onep5) - alghdf +
    -     +        halfdf*lnx + (cent+half)*lnomx
    -      scent = exp(scent)
    -
    -C     compute ss(cent) = B(2*cent+3) - B(2*cent+1)
    -
    -      sscent = gamln(halfdf+cent+one) - gamln(cent+two) - alghdf +
    -     +         halfdf*lnx + (cent+one)*lnomx
    -      sscent = exp(sscent)
    -
    -C     ******************** Sum Forward
    -
    -      xi = cent + one
    -      twoi = two*xi
    -
    -      d = dcent
    -
    -      e = ecent
    -
    -      b = bcent
    -
    -      bb = bbcent
    -
    -      s = scent
    -
    -      ss = sscent
    -
    -   10 b = b + s
    -      bb = bb + ss
    -
    -      d = (lambda/xi)*d
    -      e = (lambda/ (xi+half))*e
    -
    -      term = d*b + e*bb
    -
    -      ccum = ccum + term
    -
    -      s = s*omx* (df+twoi-one)/ (twoi+one)
    -
    -      ss = ss*omx* (df+twoi)/ (twoi+two)
    -
    -      xi = xi + one
    -      twoi = two*xi
    -
    -      IF (abs(term).GT.conv*ccum) GO TO 10
    -
    -C     ******************** Sum Backward
    -
    -      xi = cent
    -      twoi = two*xi
    -
    -      d = dcent
    -
    -      e = ecent
    -
    -      b = bcent
    -
    -      bb = bbcent
    -
    -      s = scent* (one+twoi)/ ((df+twoi-one)*omx)
    -
    -      ss = sscent* (two+twoi)/ ((df+twoi)*omx)
    -
    -   20 b = b - s
    -      bb = bb - ss
    -
    -      d = d* (xi/lambda)
    -
    -      e = e* ((xi+half)/lambda)
    -
    -      term = d*b + e*bb
    -
    -      ccum = ccum + term
    -
    -      xi = xi - one
    -
    -      IF (xi.LT.half) GO TO 30
    -
    -      twoi = two*xi
    -
    -      s = s* (one+twoi)/ ((df+twoi-one)*omx)
    -
    -      ss = ss* (two+twoi)/ ((df+twoi)*omx)
    -
    -      IF (abs(term).GT.conv*ccum) GO TO 20
    -
    -   30 CONTINUE
    -
    -      IF (qrevs) THEN
    -          cum = half*ccum
    -          ccum = one - cum
    -
    -      ELSE
    -          ccum = half*ccum
    -          cum = one - ccum
    -      END IF
    -
    -C     Due to roundoff error the answer may not lie between zero and one
    -C     Force it to do so
    -
    -      cum = max(min(cum,one),zero)
    -      ccum = max(min(ccum,one),zero)
    -
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/devlpl.f b/scipy-0.10.1/scipy/special/cdflib/devlpl.f
    deleted file mode 100644
    index 2220143110..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/devlpl.f
    +++ /dev/null
    @@ -1,48 +0,0 @@
    -      DOUBLE PRECISION FUNCTION devlpl(a,n,x)
    -C**********************************************************************
    -C
    -C     DOUBLE PRECISION FUNCTION DEVLPL(A,N,X)
    -C              Double precision EVALuate a PoLynomial at X
    -C
    -C
    -C                              Function
    -C
    -C
    -C     returns
    -C          A(1) + A(2)*X + ... + A(N)*X**(N-1)
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     A --> Array of coefficients of the polynomial.
    -C                                        A is DOUBLE PRECISION(N)
    -C
    -C     N --> Length of A, also degree of polynomial - 1.
    -C                                        N is INTEGER
    -C
    -C     X --> Point at which the polynomial is to be evaluated.
    -C                                        X is DOUBLE PRECISION
    -C
    -C**********************************************************************
    -C
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x
    -      INTEGER n
    -C     ..
    -C     .. Array Arguments ..
    -      DOUBLE PRECISION a(n)
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION term
    -      INTEGER i
    -C     ..
    -C     .. Executable Statements ..
    -      term = a(n)
    -      DO 10,i = n - 1,1,-1
    -          term = a(i) + term*x
    -   10 CONTINUE
    -      devlpl = term
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/dinvnr.f b/scipy-0.10.1/scipy/special/cdflib/dinvnr.f
    deleted file mode 100644
    index 2639ef8ad0..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/dinvnr.f
    +++ /dev/null
    @@ -1,106 +0,0 @@
    -      DOUBLE PRECISION FUNCTION dinvnr(p,q)
    -C**********************************************************************
    -C
    -C     DOUBLE PRECISION FUNCTION DINVNR(P,Q)
    -C     Double precision NoRmal distribution INVerse
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Returns X  such that CUMNOR(X)  =   P,  i.e., the  integral from -
    -C     infinity to X of (1/SQRT(2*PI)) EXP(-U*U/2) dU is P
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     P --> The probability whose normal deviate is sought.
    -C                    P is DOUBLE PRECISION
    -C
    -C     Q --> 1-P
    -C                    P is DOUBLE PRECISION
    -C
    -C
    -C                              Method
    -C
    -C
    -C     The  rational   function   on  page 95    of Kennedy  and  Gentle,
    -C     Statistical Computing, Marcel Dekker, NY , 1980 is used as a start
    -C     value for the Newton method of finding roots.
    -C
    -C
    -C                              Note
    -C
    -C
    -C     If P or Q .lt. machine EPS returns +/- DINVNR(EPS)
    -C
    -C**********************************************************************
    -C     .. Parameters ..
    -      INTEGER maxit
    -      PARAMETER (maxit=100)
    -      DOUBLE PRECISION eps
    -      PARAMETER (eps=1.0D-13)
    -      DOUBLE PRECISION r2pi
    -      PARAMETER (r2pi=0.3989422804014326D0)
    -      DOUBLE PRECISION nhalf
    -      PARAMETER (nhalf=-0.5D0)
    -C     ..
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION p,q
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION strtx,xcur,cum,ccum,pp,dx
    -      INTEGER i
    -      LOGICAL qporq
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION stvaln
    -      EXTERNAL stvaln
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL cumnor
    -C     ..
    -C     .. Statement Functions ..
    -      DOUBLE PRECISION dennor,x
    -
    -      dennor(x) = r2pi*exp(nhalf*x*x)
    -C     ..
    -C     .. Executable Statements ..
    -C
    -C     FIND MINIMUM OF P AND Q
    -C
    -      qporq = p .LE. q
    -      IF (.NOT. (qporq)) GO TO 10
    -      pp = p
    -      GO TO 20
    -
    -   10 pp = q
    -C
    -C     INITIALIZATION STEP
    -C
    -   20 strtx = stvaln(pp)
    -      xcur = strtx
    -C
    -C     NEWTON INTERATIONS
    -C
    -      DO 30,i = 1,maxit
    -          CALL cumnor(xcur,cum,ccum)
    -          dx = (cum-pp)/dennor(xcur)
    -          xcur = xcur - dx
    -          IF (abs(dx/xcur).LT.eps) GO TO 40
    -   30 CONTINUE
    -      dinvnr = strtx
    -C
    -C     IF WE GET HERE, NEWTON HAS FAILED
    -C
    -      IF (.NOT.qporq) dinvnr = -dinvnr
    -      RETURN
    -C
    -C     IF WE GET HERE, NEWTON HAS SUCCEDED
    -C
    -   40 dinvnr = xcur
    -      IF (.NOT.qporq) dinvnr = -dinvnr
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/dinvr.f b/scipy-0.10.1/scipy/special/cdflib/dinvr.f
    deleted file mode 100644
    index b8ef994598..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/dinvr.f
    +++ /dev/null
    @@ -1,348 +0,0 @@
    -      SUBROUTINE dinvr(status,x,fx,qleft,qhi)
    -C**********************************************************************
    -C
    -C     SUBROUTINE DINVR(STATUS, X, FX, QLEFT, QHI)
    -C          Double precision
    -C          bounds the zero of the function and invokes zror
    -C                    Reverse Communication
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Bounds the    function  and  invokes  ZROR   to perform the   zero
    -C     finding.  STINVR  must  have   been  called  before this   routine
    -C     in order to set its parameters.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     STATUS <--> At the beginning of a zero finding problem, STATUS
    -C                 should be set to 0 and INVR invoked.  (The value
    -C                 of parameters other than X will be ignored on this cal
    -C
    -C                 When INVR needs the function evaluated, it will set
    -C                 STATUS to 1 and return.  The value of the function
    -C                 should be set in FX and INVR again called without
    -C                 changing any of its other parameters.
    -C
    -C                 When INVR has finished without error, it will return
    -C                 with STATUS 0.  In that case X is approximately a root
    -C                 of F(X).
    -C
    -C                 If INVR cannot bound the function, it returns status
    -C                 -1 and sets QLEFT and QHI.
    -C                         INTEGER STATUS
    -C
    -C     X <-- The value of X at which F(X) is to be evaluated.
    -C                         DOUBLE PRECISION X
    -C
    -C     FX --> The value of F(X) calculated when INVR returns with
    -C            STATUS = 1.
    -C                         DOUBLE PRECISION FX
    -C
    -C     QLEFT <-- Defined only if QMFINV returns .FALSE.  In that
    -C          case it is .TRUE. If the stepping search terminated
    -C          unsucessfully at SMALL.  If it is .FALSE. the search
    -C          terminated unsucessfully at BIG.
    -C                    QLEFT is LOGICAL
    -C
    -C     QHI <-- Defined only if QMFINV returns .FALSE.  In that
    -C          case it is .TRUE. if F(X) .GT. Y at the termination
    -C          of the search and .FALSE. if F(X) .LT. Y at the
    -C          termination of the search.
    -C                    QHI is LOGICAL
    -
    -C
    -C**********************************************************************
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION fx,x,zabsst,zabsto,zbig,zrelst,zrelto,zsmall,
    -     +                 zstpmu
    -      INTEGER status
    -      LOGICAL qhi,qleft
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION absstp,abstol,big,fbig,fsmall,relstp,reltol,
    -     +                 small,step,stpmul,xhi,xlb,xlo,xsave,xub,yy,zx,zy,
    -     +                 zz
    -      INTEGER i99999
    -      LOGICAL qbdd,qcond,qdum1,qdum2,qincr,qlim,qok,qup
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL dstzr,dzror
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,max,min
    -C     ..
    -C     .. Statement Functions ..
    -      LOGICAL qxmon
    -C     ..
    -C     .. Save statement ..
    -      SAVE
    -C     ..
    -C     .. Statement Function definitions ..
    -      qxmon(zx,zy,zz) = zx .LE. zy .AND. zy .LE. zz
    -C     ..
    -C     .. Executable Statements ..
    -
    -      IF (status.GT.0) GO TO 310
    -
    -      qcond = .NOT. qxmon(small,x,big)
    -      IF (qcond) STOP ' SMALL, X, BIG not monotone in INVR'
    -      xsave = x
    -C
    -C     See that SMALL and BIG bound the zero and set QINCR
    -C
    -      x = small
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 10 TO i99999
    -      GO TO 300
    -
    -   10 fsmall = fx
    -      x = big
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 20 TO i99999
    -      GO TO 300
    -
    -   20 fbig = fx
    -      qincr = fbig .GT. fsmall
    -      IF (.NOT. (qincr)) GO TO 50
    -      IF (fsmall.LE.0.0D0) GO TO 30
    -      status = -1
    -      qleft = .TRUE.
    -      qhi = .TRUE.
    -      RETURN
    -
    -   30 IF (fbig.GE.0.0D0) GO TO 40
    -      status = -1
    -      qleft = .FALSE.
    -      qhi = .FALSE.
    -      RETURN
    -
    -   40 GO TO 80
    -
    -   50 IF (fsmall.GE.0.0D0) GO TO 60
    -      status = -1
    -      qleft = .TRUE.
    -      qhi = .FALSE.
    -      RETURN
    -
    -   60 IF (fbig.LE.0.0D0) GO TO 70
    -      status = -1
    -      qleft = .FALSE.
    -      qhi = .TRUE.
    -      RETURN
    -
    -   70 CONTINUE
    -   80 x = xsave
    -      step = max(absstp,relstp*abs(x))
    -C      YY = F(X) - Y
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 90 TO i99999
    -      GO TO 300
    -
    -   90 yy = fx
    -      IF (.NOT. (yy.EQ.0.0D0)) GO TO 100
    -      status = 0
    -      qok = .TRUE.
    -      RETURN
    -
    -  100 qup = (qincr .AND. (yy.LT.0.0D0)) .OR.
    -     +      (.NOT.qincr .AND. (yy.GT.0.0D0))
    -C++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    -C
    -C     HANDLE CASE IN WHICH WE MUST STEP HIGHER
    -C
    -C++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    -      IF (.NOT. (qup)) GO TO 170
    -      xlb = xsave
    -      xub = min(xlb+step,big)
    -      GO TO 120
    -
    -  110 IF (qcond) GO TO 150
    -C      YY = F(XUB) - Y
    -  120 x = xub
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 130 TO i99999
    -      GO TO 300
    -
    -  130 yy = fx
    -      qbdd = (qincr .AND. (yy.GE.0.0D0)) .OR.
    -     +       (.NOT.qincr .AND. (yy.LE.0.0D0))
    -      qlim = xub .GE. big
    -      qcond = qbdd .OR. qlim
    -      IF (qcond) GO TO 140
    -      step = stpmul*step
    -      xlb = xub
    -      xub = min(xlb+step,big)
    -  140 GO TO 110
    -
    -  150 IF (.NOT. (qlim.AND..NOT.qbdd)) GO TO 160
    -      status = -1
    -      qleft = .FALSE.
    -      qhi = .NOT. qincr
    -      x = big
    -      RETURN
    -
    -  160 GO TO 240
    -C++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    -C
    -C     HANDLE CASE IN WHICH WE MUST STEP LOWER
    -C
    -C++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    -  170 xub = xsave
    -      xlb = max(xub-step,small)
    -      GO TO 190
    -
    -  180 IF (qcond) GO TO 220
    -C      YY = F(XLB) - Y
    -  190 x = xlb
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 200 TO i99999
    -      GO TO 300
    -
    -  200 yy = fx
    -      qbdd = (qincr .AND. (yy.LE.0.0D0)) .OR.
    -     +       (.NOT.qincr .AND. (yy.GE.0.0D0))
    -      qlim = xlb .LE. small
    -      qcond = qbdd .OR. qlim
    -      IF (qcond) GO TO 210
    -      step = stpmul*step
    -      xub = xlb
    -      xlb = max(xub-step,small)
    -  210 GO TO 180
    -
    -  220 IF (.NOT. (qlim.AND..NOT.qbdd)) GO TO 230
    -      status = -1
    -      qleft = .TRUE.
    -      qhi = qincr
    -      x = small
    -      RETURN
    -
    -  230 CONTINUE
    -  240 CALL dstzr(xlb,xub,abstol,reltol)
    -C++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    -C
    -C     IF WE REACH HERE, XLB AND XUB BOUND THE ZERO OF F.
    -C
    -C++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    -      status = 0
    -      GO TO 260
    -
    -  250 IF (.NOT. (status.EQ.1)) GO TO 290
    -  260 CALL dzror(status,x,fx,xlo,xhi,qdum1,qdum2)
    -      IF (.NOT. (status.EQ.1)) GO TO 280
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 270 TO i99999
    -      GO TO 300
    -
    -  270 CONTINUE
    -  280 GO TO 250
    -
    -  290 x = xlo
    -      status = 0
    -      RETURN
    -
    -      ENTRY dstinv(zsmall,zbig,zabsst,zrelst,zstpmu,zabsto,zrelto)
    -C**********************************************************************
    -C
    -C      SUBROUTINE DSTINV( SMALL, BIG, ABSSTP, RELSTP, STPMUL,
    -C     +                   ABSTOL, RELTOL )
    -C      Double Precision - SeT INverse finder - Reverse Communication
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Concise Description - Given a monotone function F finds X
    -C     such that F(X) = Y.  Uses Reverse communication -- see invr.
    -C     This routine sets quantities needed by INVR.
    -C
    -C          More Precise Description of INVR -
    -C
    -C     F must be a monotone function, the results of QMFINV are
    -C     otherwise undefined.  QINCR must be .TRUE. if F is non-
    -C     decreasing and .FALSE. if F is non-increasing.
    -C
    -C     QMFINV will return .TRUE. if and only if F(SMALL) and
    -C     F(BIG) bracket Y, i. e.,
    -C          QINCR is .TRUE. and F(SMALL).LE.Y.LE.F(BIG) or
    -C          QINCR is .FALSE. and F(BIG).LE.Y.LE.F(SMALL)
    -C
    -C     if QMFINV returns .TRUE., then the X returned satisfies
    -C     the following condition.  let
    -C               TOL(X) = MAX(ABSTOL,RELTOL*ABS(X))
    -C     then if QINCR is .TRUE.,
    -C          F(X-TOL(X)) .LE. Y .LE. F(X+TOL(X))
    -C     and if QINCR is .FALSE.
    -C          F(X-TOL(X)) .GE. Y .GE. F(X+TOL(X))
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     SMALL --> The left endpoint of the interval to be
    -C          searched for a solution.
    -C                    SMALL is DOUBLE PRECISION
    -C
    -C     BIG --> The right endpoint of the interval to be
    -C          searched for a solution.
    -C                    BIG is DOUBLE PRECISION
    -C
    -C     ABSSTP, RELSTP --> The initial step size in the search
    -C          is MAX(ABSSTP,RELSTP*ABS(X)). See algorithm.
    -C                    ABSSTP is DOUBLE PRECISION
    -C                    RELSTP is DOUBLE PRECISION
    -C
    -C     STPMUL --> When a step doesn't bound the zero, the step
    -C                size is multiplied by STPMUL and another step
    -C                taken.  A popular value is 2.0
    -C                    DOUBLE PRECISION STPMUL
    -C
    -C     ABSTOL, RELTOL --> Two numbers that determine the accuracy
    -C          of the solution.  See function for a precise definition.
    -C                    ABSTOL is DOUBLE PRECISION
    -C                    RELTOL is DOUBLE PRECISION
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Compares F(X) with Y for the input value of X then uses QINCR
    -C     to determine whether to step left or right to bound the
    -C     desired x.  the initial step size is
    -C          MAX(ABSSTP,RELSTP*ABS(S)) for the input value of X.
    -C     Iteratively steps right or left until it bounds X.
    -C     At each step which doesn't bound X, the step size is doubled.
    -C     The routine is careful never to step beyond SMALL or BIG.  If
    -C     it hasn't bounded X at SMALL or BIG, QMFINV returns .FALSE.
    -C     after setting QLEFT and QHI.
    -C
    -C     If X is successfully bounded then Algorithm R of the paper
    -C     'Two Efficient Algorithms with Guaranteed Convergence for
    -C     Finding a Zero of a Function' by J. C. P. Bus and
    -C     T. J. Dekker in ACM Transactions on Mathematical
    -C     Software, Volume 1, No. 4 page 330 (DEC. '75) is employed
    -C     to find the zero of the function F(X)-Y. This is routine
    -C     QRZERO.
    -C
    -C**********************************************************************
    -      small = zsmall
    -      big = zbig
    -      absstp = zabsst
    -      relstp = zrelst
    -      stpmul = zstpmu
    -      abstol = zabsto
    -      reltol = zrelto
    -      RETURN
    -
    -      STOP '*** EXECUTION FLOWING INTO FLECS PROCEDURES ***'
    -C     TO GET-FUNCTION-VALUE
    -  300 status = 1
    -      RETURN
    -
    -  310 CONTINUE
    -      GO TO i99999
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/dt1.f b/scipy-0.10.1/scipy/special/cdflib/dt1.f
    deleted file mode 100644
    index 3a940381a5..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/dt1.f
    +++ /dev/null
    @@ -1,76 +0,0 @@
    -      DOUBLE PRECISION FUNCTION dt1(p,q,df)
    -C**********************************************************************
    -C
    -C     DOUBLE PRECISION FUNCTION DT1(P,Q,DF)
    -C     Double precision Initalize Approximation to
    -C           INVerse of the cumulative T distribution
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Returns  the  inverse   of  the T   distribution   function, i.e.,
    -C     the integral from 0 to INVT of the T density is P. This is an
    -C     initial approximation
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     P --> The p-value whose inverse from the T distribution is
    -C          desired.
    -C                    P is DOUBLE PRECISION
    -C
    -C     Q --> 1-P.
    -C                    Q is DOUBLE PRECISION
    -C
    -C     DF --> Degrees of freedom of the T distribution.
    -C                    DF is DOUBLE PRECISION
    -C
    -C**********************************************************************
    -C
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION df,p,q
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION denpow,sum,term,x,xp,xx
    -      INTEGER i
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION coef(5,4),denom(4)
    -      INTEGER ideg(4)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION dinvnr,devlpl
    -      EXTERNAL dinvnr,devlpl
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs
    -C     ..
    -C     .. Data statements ..
    -      DATA (coef(i,1),i=1,5)/1.0D0,1.0D0,3*0.0D0/
    -      DATA (coef(i,2),i=1,5)/3.0D0,16.0D0,5.0D0,2*0.0D0/
    -      DATA (coef(i,3),i=1,5)/-15.0D0,17.0D0,19.0D0,3.0D0,0.0D0/
    -      DATA (coef(i,4),i=1,5)/-945.0D0,-1920.0D0,1482.0D0,776.0D0,79.0D0/
    -      DATA ideg/2,3,4,5/
    -      DATA denom/4.0D0,96.0D0,384.0D0,92160.0D0/
    -C     ..
    -C     .. Executable Statements ..
    -      x = abs(dinvnr(p,q))
    -      xx = x*x
    -      sum = x
    -      denpow = 1.0D0
    -      DO 10,i = 1,4
    -          term = devlpl(coef(1,i),ideg(i),xx)*x
    -          denpow = denpow*df
    -          sum = sum + term/ (denpow*denom(i))
    -   10 CONTINUE
    -      IF (.NOT. (p.GE.0.5D0)) GO TO 20
    -      xp = sum
    -      GO TO 30
    -
    -   20 xp = -sum
    -   30 dt1 = xp
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/dzror.f b/scipy-0.10.1/scipy/special/cdflib/dzror.f
    deleted file mode 100644
    index c12c9a72c8..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/dzror.f
    +++ /dev/null
    @@ -1,283 +0,0 @@
    -      SUBROUTINE dzror(status,x,fx,xlo,xhi,qleft,qhi)
    -C**********************************************************************
    -C
    -C     SUBROUTINE DZROR(STATUS, X, FX, XLO, XHI, QLEFT, QHI)
    -C     Double precision ZeRo of a function -- Reverse Communication
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Performs the zero finding.  STZROR must have been called before
    -C     this routine in order to set its parameters.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     STATUS <--> At the beginning of a zero finding problem, STATUS
    -C                 should be set to 0 and ZROR invoked.  (The value
    -C                 of other parameters will be ignored on this call.)
    -C
    -C                 When ZROR needs the function evaluated, it will set
    -C                 STATUS to 1 and return.  The value of the function
    -C                 should be set in FX and ZROR again called without
    -C                 changing any of its other parameters.
    -C
    -C                 When ZROR has finished without error, it will return
    -C                 with STATUS 0.  In that case (XLO,XHI) bound the answe
    -C
    -C                 If ZROR finds an error (which implies that F(XLO)-Y an
    -C                 F(XHI)-Y have the same sign, it returns STATUS -1.  In
    -C                 this case, XLO and XHI are undefined.
    -C                         INTEGER STATUS
    -C
    -C     X <-- The value of X at which F(X) is to be evaluated.
    -C                         DOUBLE PRECISION X
    -C
    -C     FX --> The value of F(X) calculated when ZROR returns with
    -C            STATUS = 1.
    -C                         DOUBLE PRECISION FX
    -C
    -C     XLO <-- When ZROR returns with STATUS = 0, XLO bounds the
    -C             inverval in X containing the solution below.
    -C                         DOUBLE PRECISION XLO
    -C
    -C     XHI <-- When ZROR returns with STATUS = 0, XHI bounds the
    -C             inverval in X containing the solution above.
    -C                         DOUBLE PRECISION XHI
    -C
    -C     QLEFT <-- .TRUE. if the stepping search terminated unsucessfully
    -C                at XLO.  If it is .FALSE. the search terminated
    -C                unsucessfully at XHI.
    -C                    QLEFT is LOGICAL
    -C
    -C     QHI <-- .TRUE. if F(X) .GT. Y at the termination of the
    -C              search and .FALSE. if F(X) .LT. Y at the
    -C              termination of the search.
    -C                    QHI is LOGICAL
    -C
    -C**********************************************************************
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION fx,x,xhi,xlo,zabstl,zreltl,zxhi,zxlo
    -      INTEGER status
    -      LOGICAL qhi,qleft
    -C     ..
    -C     .. Save statement ..
    -      SAVE
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a,abstol,b,c,d,fa,fb,fc,fd,fda,fdb,m,mb,p,q,
    -     +                 reltol,tol,w,xxhi,xxlo,zx
    -      INTEGER ext,i99999
    -      LOGICAL first,qrzero
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,max,sign
    -C     ..
    -C     .. Statement Functions ..
    -      DOUBLE PRECISION ftol
    -C     ..
    -C     .. Statement Function definitions ..
    -      ftol(zx) = 0.5D0*max(abstol,reltol*abs(zx))
    -C     ..
    -C     .. Executable Statements ..
    -
    -      IF (status.GT.0) GO TO 280
    -      xlo = xxlo
    -      xhi = xxhi
    -      b = xlo
    -      x = xlo
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 10 TO i99999
    -      GO TO 270
    -
    -   10 fb = fx
    -      xlo = xhi
    -      a = xlo
    -      x = xlo
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 20 TO i99999
    -      GO TO 270
    -C
    -C     Check that F(ZXLO) < 0 < F(ZXHI)  or
    -C                F(ZXLO) > 0 > F(ZXHI)
    -C
    -   20 IF (.NOT. (fb.LT.0.0D0)) GO TO 40
    -      IF (.NOT. (fx.LT.0.0D0)) GO TO 30
    -      status = -1
    -      qleft = fx .LT. fb
    -      qhi = .FALSE.
    -      RETURN
    -
    -   30 CONTINUE
    -   40 IF (.NOT. (fb.GT.0.0D0)) GO TO 60
    -      IF (.NOT. (fx.GT.0.0D0)) GO TO 50
    -      status = -1
    -      qleft = fx .GT. fb
    -      qhi = .TRUE.
    -      RETURN
    -
    -   50 CONTINUE
    -   60 fa = fx
    -C
    -      first = .TRUE.
    -   70 c = a
    -      fc = fa
    -      ext = 0
    -   80 IF (.NOT. (abs(fc).LT.abs(fb))) GO TO 100
    -      IF (.NOT. (c.NE.a)) GO TO 90
    -      d = a
    -      fd = fa
    -   90 a = b
    -      fa = fb
    -      xlo = c
    -      b = xlo
    -      fb = fc
    -      c = a
    -      fc = fa
    -  100 tol = ftol(xlo)
    -      m = (c+b)*.5D0
    -      mb = m - b
    -      IF (.NOT. (abs(mb).GT.tol)) GO TO 240
    -      IF (.NOT. (ext.GT.3)) GO TO 110
    -      w = mb
    -      GO TO 190
    -
    -  110 tol = sign(tol,mb)
    -      p = (b-a)*fb
    -      IF (.NOT. (first)) GO TO 120
    -      q = fa - fb
    -      first = .FALSE.
    -      GO TO 130
    -
    -  120 fdb = (fd-fb)/ (d-b)
    -      fda = (fd-fa)/ (d-a)
    -      p = fda*p
    -      q = fdb*fa - fda*fb
    -  130 IF (.NOT. (p.LT.0.0D0)) GO TO 140
    -      p = -p
    -      q = -q
    -  140 IF (ext.EQ.3) p = p*2.0D0
    -      IF (.NOT. ((p*1.0D0).EQ.0.0D0.OR.p.LE. (q*tol))) GO TO 150
    -      w = tol
    -      GO TO 180
    -
    -  150 IF (.NOT. (p.LT. (mb*q))) GO TO 160
    -      w = p/q
    -      GO TO 170
    -
    -  160 w = mb
    -  170 CONTINUE
    -  180 CONTINUE
    -  190 d = a
    -      fd = fa
    -      a = b
    -      fa = fb
    -      b = b + w
    -      xlo = b
    -      x = xlo
    -C     GET-FUNCTION-VALUE
    -      ASSIGN 200 TO i99999
    -      GO TO 270
    -
    -  200 fb = fx
    -      IF (.NOT. ((fc*fb).GE.0.0D0)) GO TO 210
    -      GO TO 70
    -
    -  210 IF (.NOT. (w.EQ.mb)) GO TO 220
    -      ext = 0
    -      GO TO 230
    -
    -  220 ext = ext + 1
    -  230 GO TO 80
    -
    -  240 xhi = c
    -      qrzero = (fc.GE.0.0D0 .AND. fb.LE.0.0D0) .OR.
    -     +         (fc.LT.0.0D0 .AND. fb.GE.0.0D0)
    -      IF (.NOT. (qrzero)) GO TO 250
    -      status = 0
    -      GO TO 260
    -
    -  250 status = -1
    -  260 RETURN
    -
    -      ENTRY dstzr(zxlo,zxhi,zabstl,zreltl)
    -C**********************************************************************
    -C
    -C     SUBROUTINE DSTZR( XLO, XHI, ABSTOL, RELTOL )
    -C     Double precision SeT ZeRo finder - Reverse communication version
    -C
    -C
    -C                              Function
    -C
    -C
    -C
    -C     Sets quantities needed by ZROR.  The function of ZROR
    -C     and the quantities set is given here.
    -C
    -C     Concise Description - Given a function F
    -C     find XLO such that F(XLO) = 0.
    -C
    -C          More Precise Description -
    -C
    -C     Input condition. F is a double precision function of a single
    -C     double precision argument and XLO and XHI are such that
    -C          F(XLO)*F(XHI)  .LE.  0.0
    -C
    -C     If the input condition is met, QRZERO returns .TRUE.
    -C     and output values of XLO and XHI satisfy the following
    -C          F(XLO)*F(XHI)  .LE. 0.
    -C          ABS(F(XLO)  .LE. ABS(F(XHI)
    -C          ABS(XLO-XHI)  .LE. TOL(X)
    -C     where
    -C          TOL(X) = MAX(ABSTOL,RELTOL*ABS(X))
    -C
    -C     If this algorithm does not find XLO and XHI satisfying
    -C     these conditions then QRZERO returns .FALSE.  This
    -C     implies that the input condition was not met.
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     XLO --> The left endpoint of the interval to be
    -C           searched for a solution.
    -C                    XLO is DOUBLE PRECISION
    -C
    -C     XHI --> The right endpoint of the interval to be
    -C           for a solution.
    -C                    XHI is DOUBLE PRECISION
    -C
    -C     ABSTOL, RELTOL --> Two numbers that determine the accuracy
    -C                      of the solution.  See function for a
    -C                      precise definition.
    -C                    ABSTOL is DOUBLE PRECISION
    -C                    RELTOL is DOUBLE PRECISION
    -C
    -C
    -C                              Method
    -C
    -C
    -C     Algorithm R of the paper 'Two Efficient Algorithms with
    -C     Guaranteed Convergence for Finding a Zero of a Function'
    -C     by J. C. P. Bus and T. J. Dekker in ACM Transactions on
    -C     Mathematical Software, Volume 1, no. 4 page 330
    -C     (Dec. '75) is employed to find the zero of F(X)-Y.
    -C
    -C**********************************************************************
    -      xxlo = zxlo
    -      xxhi = zxhi
    -      abstol = zabstl
    -      reltol = zreltl
    -      RETURN
    -
    -      STOP '*** EXECUTION FLOWING INTO FLECS PROCEDURES ***'
    -C     TO GET-FUNCTION-VALUE
    -  270 status = 1
    -      RETURN
    -
    -  280 CONTINUE
    -      GO TO i99999
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/erf.f b/scipy-0.10.1/scipy/special/cdflib/erf.f
    deleted file mode 100644
    index 98b32c56af..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/erf.f
    +++ /dev/null
    @@ -1,74 +0,0 @@
    -      DOUBLE PRECISION FUNCTION erf(x)
    -C-----------------------------------------------------------------------
    -C             EVALUATION OF THE REAL ERROR FUNCTION
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ax,bot,c,t,top,x2
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION a(5),b(3),p(8),q(8),r(5),s(4)
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,exp,sign
    -C     ..
    -C     .. Data statements ..
    -C-------------------------
    -C-------------------------
    -C-------------------------
    -C-------------------------
    -      DATA c/.564189583547756D0/
    -      DATA a(1)/.771058495001320D-04/,a(2)/-.133733772997339D-02/,
    -     +     a(3)/.323076579225834D-01/,a(4)/.479137145607681D-01/,
    -     +     a(5)/.128379167095513D+00/
    -      DATA b(1)/.301048631703895D-02/,b(2)/.538971687740286D-01/,
    -     +     b(3)/.375795757275549D+00/
    -      DATA p(1)/-1.36864857382717D-07/,p(2)/5.64195517478974D-01/,
    -     +     p(3)/7.21175825088309D+00/,p(4)/4.31622272220567D+01/,
    -     +     p(5)/1.52989285046940D+02/,p(6)/3.39320816734344D+02/,
    -     +     p(7)/4.51918953711873D+02/,p(8)/3.00459261020162D+02/
    -      DATA q(1)/1.00000000000000D+00/,q(2)/1.27827273196294D+01/,
    -     +     q(3)/7.70001529352295D+01/,q(4)/2.77585444743988D+02/,
    -     +     q(5)/6.38980264465631D+02/,q(6)/9.31354094850610D+02/,
    -     +     q(7)/7.90950925327898D+02/,q(8)/3.00459260956983D+02/
    -      DATA r(1)/2.10144126479064D+00/,r(2)/2.62370141675169D+01/,
    -     +     r(3)/2.13688200555087D+01/,r(4)/4.65807828718470D+00/,
    -     +     r(5)/2.82094791773523D-01/
    -      DATA s(1)/9.41537750555460D+01/,s(2)/1.87114811799590D+02/,
    -     +     s(3)/9.90191814623914D+01/,s(4)/1.80124575948747D+01/
    -C     ..
    -C     .. Executable Statements ..
    -C-------------------------
    -      ax = abs(x)
    -      IF (ax.GT.0.5D0) GO TO 10
    -      t = x*x
    -      top = ((((a(1)*t+a(2))*t+a(3))*t+a(4))*t+a(5)) + 1.0D0
    -      bot = ((b(1)*t+b(2))*t+b(3))*t + 1.0D0
    -      erf = x* (top/bot)
    -      RETURN
    -C
    -   10 IF (ax.GT.4.0D0) GO TO 20
    -      top = ((((((p(1)*ax+p(2))*ax+p(3))*ax+p(4))*ax+p(5))*ax+p(6))*ax+
    -     +      p(7))*ax + p(8)
    -      bot = ((((((q(1)*ax+q(2))*ax+q(3))*ax+q(4))*ax+q(5))*ax+q(6))*ax+
    -     +      q(7))*ax + q(8)
    -      erf = 0.5D0 + (0.5D0-exp(-x*x)*top/bot)
    -      IF (x.LT.0.0D0) erf = -erf
    -      RETURN
    -C
    -   20 IF (ax.GE.5.8D0) GO TO 30
    -      x2 = x*x
    -      t = 1.0D0/x2
    -      top = (((r(1)*t+r(2))*t+r(3))*t+r(4))*t + r(5)
    -      bot = (((s(1)*t+s(2))*t+s(3))*t+s(4))*t + 1.0D0
    -      erf = (c-top/ (x2*bot))/ax
    -      erf = 0.5D0 + (0.5D0-exp(-x2)*erf)
    -      IF (x.LT.0.0D0) erf = -erf
    -      RETURN
    -C
    -   30 erf = sign(1.0D0,x)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/erfc1.f b/scipy-0.10.1/scipy/special/cdflib/erfc1.f
    deleted file mode 100644
    index 53d6023064..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/erfc1.f
    +++ /dev/null
    @@ -1,111 +0,0 @@
    -      DOUBLE PRECISION FUNCTION erfc1(ind,x)
    -C-----------------------------------------------------------------------
    -C         EVALUATION OF THE COMPLEMENTARY ERROR FUNCTION
    -C
    -C          ERFC1(IND,X) = ERFC(X)            IF IND = 0
    -C          ERFC1(IND,X) = EXP(X*X)*ERFC(X)   OTHERWISE
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x
    -      INTEGER ind
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION ax,bot,c,e,t,top,w
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION a(5),b(3),p(8),q(8),r(5),s(4)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION exparg
    -      EXTERNAL exparg
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dble,exp
    -C     ..
    -C     .. Data statements ..
    -C-------------------------
    -C-------------------------
    -C-------------------------
    -C-------------------------
    -      DATA c/.564189583547756D0/
    -      DATA a(1)/.771058495001320D-04/,a(2)/-.133733772997339D-02/,
    -     +     a(3)/.323076579225834D-01/,a(4)/.479137145607681D-01/,
    -     +     a(5)/.128379167095513D+00/
    -      DATA b(1)/.301048631703895D-02/,b(2)/.538971687740286D-01/,
    -     +     b(3)/.375795757275549D+00/
    -      DATA p(1)/-1.36864857382717D-07/,p(2)/5.64195517478974D-01/,
    -     +     p(3)/7.21175825088309D+00/,p(4)/4.31622272220567D+01/,
    -     +     p(5)/1.52989285046940D+02/,p(6)/3.39320816734344D+02/,
    -     +     p(7)/4.51918953711873D+02/,p(8)/3.00459261020162D+02/
    -      DATA q(1)/1.00000000000000D+00/,q(2)/1.27827273196294D+01/,
    -     +     q(3)/7.70001529352295D+01/,q(4)/2.77585444743988D+02/,
    -     +     q(5)/6.38980264465631D+02/,q(6)/9.31354094850610D+02/,
    -     +     q(7)/7.90950925327898D+02/,q(8)/3.00459260956983D+02/
    -      DATA r(1)/2.10144126479064D+00/,r(2)/2.62370141675169D+01/,
    -     +     r(3)/2.13688200555087D+01/,r(4)/4.65807828718470D+00/,
    -     +     r(5)/2.82094791773523D-01/
    -      DATA s(1)/9.41537750555460D+01/,s(2)/1.87114811799590D+02/,
    -     +     s(3)/9.90191814623914D+01/,s(4)/1.80124575948747D+01/
    -C     ..
    -C     .. Executable Statements ..
    -C-------------------------
    -C
    -C                     ABS(X) .LE. 0.5
    -C
    -      ax = abs(x)
    -      IF (ax.GT.0.5D0) GO TO 10
    -      t = x*x
    -      top = ((((a(1)*t+a(2))*t+a(3))*t+a(4))*t+a(5)) + 1.0D0
    -      bot = ((b(1)*t+b(2))*t+b(3))*t + 1.0D0
    -      erfc1 = 0.5D0 + (0.5D0-x* (top/bot))
    -      IF (ind.NE.0) erfc1 = exp(t)*erfc1
    -      RETURN
    -C
    -C                  0.5 .LT. ABS(X) .LE. 4
    -C
    -   10 IF (ax.GT.4.0D0) GO TO 20
    -      top = ((((((p(1)*ax+p(2))*ax+p(3))*ax+p(4))*ax+p(5))*ax+p(6))*ax+
    -     +      p(7))*ax + p(8)
    -      bot = ((((((q(1)*ax+q(2))*ax+q(3))*ax+q(4))*ax+q(5))*ax+q(6))*ax+
    -     +      q(7))*ax + q(8)
    -      erfc1 = top/bot
    -      GO TO 40
    -C
    -C                      ABS(X) .GT. 4
    -C
    -   20 IF (x.LE.-5.6D0) GO TO 60
    -      IF (ind.NE.0) GO TO 30
    -      IF (x.GT.100.0D0) GO TO 70
    -      IF (x*x.GT.-exparg(1)) GO TO 70
    -C
    -   30 t = (1.0D0/x)**2
    -      top = (((r(1)*t+r(2))*t+r(3))*t+r(4))*t + r(5)
    -      bot = (((s(1)*t+s(2))*t+s(3))*t+s(4))*t + 1.0D0
    -      erfc1 = (c-t*top/bot)/ax
    -C
    -C                      FINAL ASSEMBLY
    -C
    -   40 IF (ind.EQ.0) GO TO 50
    -      IF (x.LT.0.0D0) erfc1 = 2.0D0*exp(x*x) - erfc1
    -      RETURN
    -
    -   50 w = dble(x)*dble(x)
    -      t = w
    -      e = w - dble(t)
    -      erfc1 = ((0.5D0+ (0.5D0-e))*exp(-t))*erfc1
    -      IF (x.LT.0.0D0) erfc1 = 2.0D0 - erfc1
    -      RETURN
    -C
    -C             LIMIT VALUE FOR LARGE NEGATIVE X
    -C
    -   60 erfc1 = 2.0D0
    -      IF (ind.NE.0) erfc1 = 2.0D0*exp(x*x)
    -      RETURN
    -C
    -C             LIMIT VALUE FOR LARGE POSITIVE X
    -C                       WHEN IND = 0
    -C
    -   70 erfc1 = 0.0D0
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/esum.f b/scipy-0.10.1/scipy/special/cdflib/esum.f
    deleted file mode 100644
    index b463a0b89b..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/esum.f
    +++ /dev/null
    @@ -1,35 +0,0 @@
    -      DOUBLE PRECISION FUNCTION esum(mu,x)
    -C-----------------------------------------------------------------------
    -C                    EVALUATION OF EXP(MU + X)
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x
    -      INTEGER mu
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION w
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC exp
    -C     ..
    -C     .. Executable Statements ..
    -
    -      IF (x.GT.0.0D0) GO TO 10
    -C
    -      IF (mu.LT.0) GO TO 20
    -      w = mu + x
    -      IF (w.GT.0.0D0) GO TO 20
    -      esum = exp(w)
    -      RETURN
    -C
    -   10 IF (mu.GT.0) GO TO 20
    -      w = mu + x
    -      IF (w.LT.0.0D0) GO TO 20
    -      esum = exp(w)
    -      RETURN
    -C
    -   20 w = mu
    -      esum = exp(w)*exp(x)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/exparg.f b/scipy-0.10.1/scipy/special/cdflib/exparg.f
    deleted file mode 100644
    index fa7a4cd103..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/exparg.f
    +++ /dev/null
    @@ -1,51 +0,0 @@
    -      DOUBLE PRECISION FUNCTION exparg(l)
    -C--------------------------------------------------------------------
    -C     IF L = 0 THEN  EXPARG(L) = THE LARGEST POSITIVE W FOR WHICH
    -C     EXP(W) CAN BE COMPUTED.
    -C
    -C     IF L IS NONZERO THEN  EXPARG(L) = THE LARGEST NEGATIVE W FOR
    -C     WHICH THE COMPUTED VALUE OF EXP(W) IS NONZERO.
    -C
    -C     NOTE... ONLY AN APPROXIMATE VALUE FOR EXPARG(L) IS NEEDED.
    -C--------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      INTEGER l
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION lnb
    -      INTEGER b,m
    -C     ..
    -C     .. External Functions ..
    -      INTEGER ipmpar
    -      EXTERNAL ipmpar
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dble,dlog
    -C     ..
    -C     .. Executable Statements ..
    -C
    -      b = ipmpar(4)
    -      IF (b.NE.2) GO TO 10
    -      lnb = .69314718055995D0
    -      GO TO 40
    -
    -   10 IF (b.NE.8) GO TO 20
    -      lnb = 2.0794415416798D0
    -      GO TO 40
    -
    -   20 IF (b.NE.16) GO TO 30
    -      lnb = 2.7725887222398D0
    -      GO TO 40
    -
    -   30 lnb = dlog(dble(b))
    -C
    -   40 IF (l.EQ.0) GO TO 50
    -      m = ipmpar(9) - 1
    -      exparg = 0.99999D0* (m*lnb)
    -      RETURN
    -
    -   50 m = ipmpar(10)
    -      exparg = 0.99999D0* (m*lnb)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/fpser.f b/scipy-0.10.1/scipy/special/cdflib/fpser.f
    deleted file mode 100644
    index ddbabb1a33..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/fpser.f
    +++ /dev/null
    @@ -1,51 +0,0 @@
    -      DOUBLE PRECISION FUNCTION fpser(a,b,x,eps)
    -C-----------------------------------------------------------------------
    -C
    -C                 EVALUATION OF I (A,B)
    -C                                X
    -C
    -C          FOR B .LT. MIN(EPS,EPS*A) AND X .LE. 0.5.
    -C
    -C-----------------------------------------------------------------------
    -C
    -C                  SET  FPSER = X**A
    -C
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b,eps,x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION an,c,s,t,tol
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION exparg
    -      EXTERNAL exparg
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dlog,exp
    -C     ..
    -C     .. Executable Statements ..
    -
    -      fpser = 1.0D0
    -      IF (a.LE.1.D-3*eps) GO TO 10
    -      fpser = 0.0D0
    -      t = a*dlog(x)
    -      IF (t.LT.exparg(1)) RETURN
    -      fpser = exp(t)
    -C
    -C                NOTE THAT 1/B(A,B) = B
    -C
    -   10 fpser = (b/a)*fpser
    -      tol = eps/a
    -      an = a + 1.0D0
    -      t = x
    -      s = t/an
    -   20 an = an + 1.0D0
    -      t = x*t
    -      c = t/an
    -      s = s + c
    -      IF (abs(c).GT.tol) GO TO 20
    -C
    -      fpser = fpser* (1.0D0+a*s)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/gam1.f b/scipy-0.10.1/scipy/special/cdflib/gam1.f
    deleted file mode 100644
    index 642a71e595..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/gam1.f
    +++ /dev/null
    @@ -1,66 +0,0 @@
    -      DOUBLE PRECISION FUNCTION gam1(a)
    -C     ------------------------------------------------------------------
    -C     COMPUTATION OF 1/GAMMA(A+1) - 1  FOR -0.5 .LE. A .LE. 1.5
    -C     ------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION bot,d,s1,s2,t,top,w
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION p(7),q(5),r(9)
    -C     ..
    -C     .. Data statements ..
    -C     -------------------
    -C     -------------------
    -C     -------------------
    -C     -------------------
    -      DATA p(1)/.577215664901533D+00/,p(2)/-.409078193005776D+00/,
    -     +     p(3)/-.230975380857675D+00/,p(4)/.597275330452234D-01/,
    -     +     p(5)/.766968181649490D-02/,p(6)/-.514889771323592D-02/,
    -     +     p(7)/.589597428611429D-03/
    -      DATA q(1)/.100000000000000D+01/,q(2)/.427569613095214D+00/,
    -     +     q(3)/.158451672430138D+00/,q(4)/.261132021441447D-01/,
    -     +     q(5)/.423244297896961D-02/
    -      DATA r(1)/-.422784335098468D+00/,r(2)/-.771330383816272D+00/,
    -     +     r(3)/-.244757765222226D+00/,r(4)/.118378989872749D+00/,
    -     +     r(5)/.930357293360349D-03/,r(6)/-.118290993445146D-01/,
    -     +     r(7)/.223047661158249D-02/,r(8)/.266505979058923D-03/,
    -     +     r(9)/-.132674909766242D-03/
    -      DATA s1/.273076135303957D+00/,s2/.559398236957378D-01/
    -C     ..
    -C     .. Executable Statements ..
    -C     -------------------
    -      t = a
    -      d = a - 0.5D0
    -      IF (d.GT.0.0D0) t = d - 0.5D0
    -      IF (t.lt.0) GO TO 40
    -      IF (t.eq.0) GO TO 10
    -      GO TO 20
    -C
    -   10 gam1 = 0.0D0
    -      RETURN
    -C
    -   20 top = (((((p(7)*t+p(6))*t+p(5))*t+p(4))*t+p(3))*t+p(2))*t + p(1)
    -      bot = (((q(5)*t+q(4))*t+q(3))*t+q(2))*t + 1.0D0
    -      w = top/bot
    -      IF (d.GT.0.0D0) GO TO 30
    -      gam1 = a*w
    -      RETURN
    -
    -   30 gam1 = (t/a)* ((w-0.5D0)-0.5D0)
    -      RETURN
    -C
    -   40 top = (((((((r(9)*t+r(8))*t+r(7))*t+r(6))*t+r(5))*t+r(4))*t+r(3))*
    -     +      t+r(2))*t + r(1)
    -      bot = (s2*t+s1)*t + 1.0D0
    -      w = top/bot
    -      IF (d.GT.0.0D0) GO TO 50
    -      gam1 = a* ((w+0.5D0)+0.5D0)
    -      RETURN
    -
    -   50 gam1 = t*w/a
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/gaminv.f b/scipy-0.10.1/scipy/special/cdflib/gaminv.f
    deleted file mode 100644
    index 9f57477a99..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/gaminv.f
    +++ /dev/null
    @@ -1,355 +0,0 @@
    -      SUBROUTINE gaminv(a,x,x0,p,q,ierr)
    -C ----------------------------------------------------------------------
    -C            INVERSE INCOMPLETE GAMMA RATIO FUNCTION
    -C
    -C     GIVEN POSITIVE A, AND NONEGATIVE P AND Q WHERE P + Q = 1.
    -C     THEN X IS COMPUTED WHERE P(A,X) = P AND Q(A,X) = Q. SCHRODER
    -C     ITERATION IS EMPLOYED. THE ROUTINE ATTEMPTS TO COMPUTE X
    -C     TO 10 SIGNIFICANT DIGITS IF THIS IS POSSIBLE FOR THE
    -C     PARTICULAR COMPUTER ARITHMETIC BEING USED.
    -C
    -C                      ------------
    -C
    -C     X IS A VARIABLE. IF P = 0 THEN X IS ASSIGNED THE VALUE 0,
    -C     AND IF Q = 0 THEN X IS SET TO THE LARGEST FLOATING POINT
    -C     NUMBER AVAILABLE. OTHERWISE, GAMINV ATTEMPTS TO OBTAIN
    -C     A SOLUTION FOR P(A,X) = P AND Q(A,X) = Q. IF THE ROUTINE
    -C     IS SUCCESSFUL THEN THE SOLUTION IS STORED IN X.
    -C
    -C     X0 IS AN OPTIONAL INITIAL APPROXIMATION FOR X. IF THE USER
    -C     DOES NOT WISH TO SUPPLY AN INITIAL APPROXIMATION, THEN SET
    -C     X0 .LE. 0.
    -C
    -C     IERR IS A VARIABLE THAT REPORTS THE STATUS OF THE RESULTS.
    -C     WHEN THE ROUTINE TERMINATES, IERR HAS ONE OF THE FOLLOWING
    -C     VALUES ...
    -C
    -C       IERR =  0    THE SOLUTION WAS OBTAINED. ITERATION WAS
    -C                    NOT USED.
    -C       IERR.GT.0    THE SOLUTION WAS OBTAINED. IERR ITERATIONS
    -C                    WERE PERFORMED.
    -C       IERR = -2    (INPUT ERROR) A .LE. 0
    -C       IERR = -3    NO SOLUTION WAS OBTAINED. THE RATIO Q/A
    -C                    IS TOO LARGE.
    -C       IERR = -4    (INPUT ERROR) P + Q .NE. 1
    -C       IERR = -6    20 ITERATIONS WERE PERFORMED. THE MOST
    -C                    RECENT VALUE OBTAINED FOR X IS GIVEN.
    -C                    THIS CANNOT OCCUR IF X0 .LE. 0.
    -C       IERR = -7    ITERATION FAILED. NO VALUE IS GIVEN FOR X.
    -C                    THIS MAY OCCUR WHEN X IS APPROXIMATELY 0.
    -C       IERR = -8    A VALUE FOR X HAS BEEN OBTAINED, BUT THE
    -C                    ROUTINE IS NOT CERTAIN OF ITS ACCURACY.
    -C                    ITERATION CANNOT BE PERFORMED IN THIS
    -C                    CASE. IF X0 .LE. 0, THIS CAN OCCUR ONLY
    -C                    WHEN P OR Q IS APPROXIMATELY 0. IF X0 IS
    -C                    POSITIVE THEN THIS CAN OCCUR WHEN A IS
    -C                    EXCEEDINGLY CLOSE TO X AND A IS EXTREMELY
    -C                    LARGE (SAY A .GE. 1.E20).
    -C ----------------------------------------------------------------------
    -C     WRITTEN BY ALFRED H. MORRIS, JR.
    -C        NAVAL SURFACE WEAPONS CENTER
    -C        DAHLGREN, VIRGINIA
    -C     -------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,p,q,x,x0
    -      INTEGER ierr
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a0,a1,a2,a3,am1,amax,ap1,ap2,ap3,apn,b,b1,b2,b3,
    -     +                 b4,c,c1,c2,c3,c4,c5,d,e,e2,eps,g,h,ln10,pn,qg,qn,
    -     +                 r,rta,s,s2,sum,t,tol,u,w,xmax,xmin,xn,y,z
    -      INTEGER iop
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION amin(2),bmin(2),dmin(2),emin(2),eps0(2)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION alnrel,gamln,gamln1,gamma,rcomp,spmpar
    -      EXTERNAL alnrel,gamln,gamln1,gamma,rcomp,spmpar
    -C     ..
    -C     .. External Subroutines ..
    -      EXTERNAL gratio
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dble,dlog,dmax1,exp,sqrt
    -C     ..
    -C     .. Data statements ..
    -C     -------------------
    -C     LN10 = LN(10)
    -C     C = EULER CONSTANT
    -C     -------------------
    -C     -------------------
    -C     -------------------
    -C     -------------------
    -      DATA ln10/2.302585D0/
    -      DATA c/.577215664901533D0/
    -      DATA a0/3.31125922108741D0/,a1/11.6616720288968D0/,
    -     +     a2/4.28342155967104D0/,a3/.213623493715853D0/
    -      DATA b1/6.61053765625462D0/,b2/6.40691597760039D0/,
    -     +     b3/1.27364489782223D0/,b4/.036117081018842D0/
    -      DATA eps0(1)/1.D-10/,eps0(2)/1.D-08/
    -      DATA amin(1)/500.0D0/,amin(2)/100.0D0/
    -      DATA bmin(1)/1.D-28/,bmin(2)/1.D-13/
    -      DATA dmin(1)/1.D-06/,dmin(2)/1.D-04/
    -      DATA emin(1)/2.D-03/,emin(2)/6.D-03/
    -      DATA tol/1.D-5/
    -C     ..
    -C     .. Executable Statements ..
    -C     -------------------
    -C     ****** E, XMIN, AND XMAX ARE MACHINE DEPENDENT CONSTANTS.
    -C            E IS THE SMALLEST NUMBER FOR WHICH 1.0 + E .GT. 1.0.
    -C            XMIN IS THE SMALLEST POSITIVE NUMBER AND XMAX IS THE
    -C            LARGEST POSITIVE NUMBER.
    -C
    -      e = spmpar(1)
    -      xmin = spmpar(2)
    -      xmax = spmpar(3)
    -C     -------------------
    -      x = 0.0D0
    -      IF (a.LE.0.0D0) GO TO 300
    -      t = dble(p) + dble(q) - 1.D0
    -      IF (abs(t).GT.e) GO TO 320
    -C
    -      ierr = 0
    -      IF (p.EQ.0.0D0) RETURN
    -      IF (q.EQ.0.0D0) GO TO 270
    -      IF (a.EQ.1.0D0) GO TO 280
    -C
    -      e2 = 2.0D0*e
    -      amax = 0.4D-10/ (e*e)
    -      iop = 1
    -      IF (e.GT.1.D-10) iop = 2
    -      eps = eps0(iop)
    -      xn = x0
    -      IF (x0.GT.0.0D0) GO TO 160
    -C
    -C        SELECTION OF THE INITIAL APPROXIMATION XN OF X
    -C                       WHEN A .LT. 1
    -C
    -      IF (a.GT.1.0D0) GO TO 80
    -      g = gamma(a+1.0D0)
    -      qg = q*g
    -      IF (qg.EQ.0.0D0) GO TO 360
    -      b = qg/a
    -      IF (qg.GT.0.6D0*a) GO TO 40
    -      IF (a.GE.0.30D0 .OR. b.LT.0.35D0) GO TO 10
    -      t = exp(- (b+c))
    -      u = t*exp(t)
    -      xn = t*exp(u)
    -      GO TO 160
    -C
    -   10 IF (b.GE.0.45D0) GO TO 40
    -      IF (b.EQ.0.0D0) GO TO 360
    -      y = -dlog(b)
    -      s = 0.5D0 + (0.5D0-a)
    -      z = dlog(y)
    -      t = y - s*z
    -      IF (b.LT.0.15D0) GO TO 20
    -      xn = y - s*dlog(t) - dlog(1.0D0+s/ (t+1.0D0))
    -      GO TO 220
    -
    -   20 IF (b.LE.0.01D0) GO TO 30
    -      u = ((t+2.0D0* (3.0D0-a))*t+ (2.0D0-a)* (3.0D0-a))/
    -     +    ((t+ (5.0D0-a))*t+2.0D0)
    -      xn = y - s*dlog(t) - dlog(u)
    -      GO TO 220
    -
    -   30 c1 = -s*z
    -      c2 = -s* (1.0D0+c1)
    -      c3 = s* ((0.5D0*c1+ (2.0D0-a))*c1+ (2.5D0-1.5D0*a))
    -      c4 = -s* (((c1/3.0D0+ (2.5D0-1.5D0*a))*c1+ ((a-6.0D0)*a+7.0D0))*
    -     +     c1+ ((11.0D0*a-46)*a+47.0D0)/6.0D0)
    -      c5 = -s* ((((-c1/4.0D0+ (11.0D0*a-17.0D0)/6.0D0)*c1+ ((-3.0D0*a+
    -     +     13.0D0)*a-13.0D0))*c1+0.5D0* (((2.0D0*a-25.0D0)*a+72.0D0)*a-
    -     +     61.0D0))*c1+ (((25.0D0*a-195.0D0)*a+477.0D0)*a-379.0D0)/
    -     +     12.0D0)
    -      xn = ((((c5/y+c4)/y+c3)/y+c2)/y+c1) + y
    -      IF (a.GT.1.0D0) GO TO 220
    -      IF (b.GT.bmin(iop)) GO TO 220
    -      x = xn
    -      RETURN
    -C
    -   40 IF (b*q.GT.1.D-8) GO TO 50
    -      xn = exp(- (q/a+c))
    -      GO TO 70
    -
    -   50 IF (p.LE.0.9D0) GO TO 60
    -      xn = exp((alnrel(-q)+gamln1(a))/a)
    -      GO TO 70
    -
    -   60 xn = exp(dlog(p*g)/a)
    -   70 IF (xn.EQ.0.0D0) GO TO 310
    -      t = 0.5D0 + (0.5D0-xn/ (a+1.0D0))
    -      xn = xn/t
    -      GO TO 160
    -C
    -C        SELECTION OF THE INITIAL APPROXIMATION XN OF X
    -C                       WHEN A .GT. 1
    -C
    -   80 IF (q.LE.0.5D0) GO TO 90
    -      w = dlog(p)
    -      GO TO 100
    -
    -   90 w = dlog(q)
    -  100 t = sqrt(-2.0D0*w)
    -      s = t - (((a3*t+a2)*t+a1)*t+a0)/ ((((b4*t+b3)*t+b2)*t+b1)*t+1.0D0)
    -      IF (q.GT.0.5D0) s = -s
    -C
    -      rta = sqrt(a)
    -      s2 = s*s
    -      xn = a + s*rta + (s2-1.0D0)/3.0D0 + s* (s2-7.0D0)/ (36.0D0*rta) -
    -     +     ((3.0D0*s2+7.0D0)*s2-16.0D0)/ (810.0D0*a) +
    -     +     s* ((9.0D0*s2+256.0D0)*s2-433.0D0)/ (38880.0D0*a*rta)
    -      xn = dmax1(xn,0.0D0)
    -      IF (a.LT.amin(iop)) GO TO 110
    -      x = xn
    -      d = 0.5D0 + (0.5D0-x/a)
    -      IF (abs(d).LE.dmin(iop)) RETURN
    -C
    -  110 IF (p.LE.0.5D0) GO TO 130
    -      IF (xn.LT.3.0D0*a) GO TO 220
    -      y = - (w+gamln(a))
    -      d = dmax1(2.0D0,a* (a-1.0D0))
    -      IF (y.LT.ln10*d) GO TO 120
    -      s = 1.0D0 - a
    -      z = dlog(y)
    -      GO TO 30
    -
    -  120 t = a - 1.0D0
    -      xn = y + t*dlog(xn) - alnrel(-t/ (xn+1.0D0))
    -      xn = y + t*dlog(xn) - alnrel(-t/ (xn+1.0D0))
    -      GO TO 220
    -C
    -  130 ap1 = a + 1.0D0
    -      IF (xn.GT.0.70D0*ap1) GO TO 170
    -      w = w + gamln(ap1)
    -      IF (xn.GT.0.15D0*ap1) GO TO 140
    -      ap2 = a + 2.0D0
    -      ap3 = a + 3.0D0
    -      x = exp((w+x)/a)
    -      x = exp((w+x-dlog(1.0D0+ (x/ap1)* (1.0D0+x/ap2)))/a)
    -      x = exp((w+x-dlog(1.0D0+ (x/ap1)* (1.0D0+x/ap2)))/a)
    -      x = exp((w+x-dlog(1.0D0+ (x/ap1)* (1.0D0+ (x/ap2)* (1.0D0+
    -     +    x/ap3))))/a)
    -      xn = x
    -      IF (xn.GT.1.D-2*ap1) GO TO 140
    -      IF (xn.LE.emin(iop)*ap1) RETURN
    -      GO TO 170
    -C
    -  140 apn = ap1
    -      t = xn/apn
    -      sum = 1.0D0 + t
    -  150 apn = apn + 1.0D0
    -      t = t* (xn/apn)
    -      sum = sum + t
    -      IF (t.GT.1.D-4) GO TO 150
    -      t = w - dlog(sum)
    -      xn = exp((xn+t)/a)
    -      xn = xn* (1.0D0- (a*dlog(xn)-xn-t)/ (a-xn))
    -      GO TO 170
    -C
    -C                 SCHRODER ITERATION USING P
    -C
    -  160 IF (p.GT.0.5D0) GO TO 220
    -  170 IF (p.LE.1.D10*xmin) GO TO 350
    -      am1 = (a-0.5D0) - 0.5D0
    -  180 IF (a.LE.amax) GO TO 190
    -      d = 0.5D0 + (0.5D0-xn/a)
    -      IF (abs(d).LE.e2) GO TO 350
    -C
    -  190 IF (ierr.GE.20) GO TO 330
    -      ierr = ierr + 1
    -      CALL gratio(a,xn,pn,qn,0)
    -      IF (pn.EQ.0.0D0 .OR. qn.EQ.0.0D0) GO TO 350
    -      r = rcomp(a,xn)
    -      IF (r.EQ.0.0D0) GO TO 350
    -      t = (pn-p)/r
    -      w = 0.5D0* (am1-xn)
    -      IF (abs(t).LE.0.1D0 .AND. abs(w*t).LE.0.1D0) GO TO 200
    -      x = xn* (1.0D0-t)
    -      IF (x.LE.0.0D0) GO TO 340
    -      d = abs(t)
    -      GO TO 210
    -C
    -  200 h = t* (1.0D0+w*t)
    -      x = xn* (1.0D0-h)
    -      IF (x.LE.0.0D0) GO TO 340
    -      IF (abs(w).GE.1.0D0 .AND. abs(w)*t*t.LE.eps) RETURN
    -      d = abs(h)
    -  210 xn = x
    -      IF (d.GT.tol) GO TO 180
    -      IF (d.LE.eps) RETURN
    -      IF (abs(p-pn).LE.tol*p) RETURN
    -      GO TO 180
    -C
    -C                 SCHRODER ITERATION USING Q
    -C
    -  220 IF (q.LE.1.D10*xmin) GO TO 350
    -      am1 = (a-0.5D0) - 0.5D0
    -  230 IF (a.LE.amax) GO TO 240
    -      d = 0.5D0 + (0.5D0-xn/a)
    -      IF (abs(d).LE.e2) GO TO 350
    -C
    -  240 IF (ierr.GE.20) GO TO 330
    -      ierr = ierr + 1
    -      CALL gratio(a,xn,pn,qn,0)
    -      IF (pn.EQ.0.0D0 .OR. qn.EQ.0.0D0) GO TO 350
    -      r = rcomp(a,xn)
    -      IF (r.EQ.0.0D0) GO TO 350
    -      t = (q-qn)/r
    -      w = 0.5D0* (am1-xn)
    -      IF (abs(t).LE.0.1D0 .AND. abs(w*t).LE.0.1D0) GO TO 250
    -      x = xn* (1.0D0-t)
    -      IF (x.LE.0.0D0) GO TO 340
    -      d = abs(t)
    -      GO TO 260
    -C
    -  250 h = t* (1.0D0+w*t)
    -      x = xn* (1.0D0-h)
    -      IF (x.LE.0.0D0) GO TO 340
    -      IF (abs(w).GE.1.0D0 .AND. abs(w)*t*t.LE.eps) RETURN
    -      d = abs(h)
    -  260 xn = x
    -      IF (d.GT.tol) GO TO 230
    -      IF (d.LE.eps) RETURN
    -      IF (abs(q-qn).LE.tol*q) RETURN
    -      GO TO 230
    -C
    -C                       SPECIAL CASES
    -C
    -  270 x = xmax
    -      RETURN
    -C
    -  280 IF (q.LT.0.9D0) GO TO 290
    -      x = -alnrel(-p)
    -      RETURN
    -
    -  290 x = -dlog(q)
    -      RETURN
    -C
    -C                       ERROR RETURN
    -C
    -  300 ierr = -2
    -      RETURN
    -C
    -  310 ierr = -3
    -      RETURN
    -C
    -  320 ierr = -4
    -      RETURN
    -C
    -  330 ierr = -6
    -      RETURN
    -C
    -  340 ierr = -7
    -      RETURN
    -C
    -  350 x = xn
    -      ierr = -8
    -      RETURN
    -C
    -  360 x = xmax
    -      ierr = -8
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/gamln.f b/scipy-0.10.1/scipy/special/cdflib/gamln.f
    deleted file mode 100644
    index 7d8c889d55..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/gamln.f
    +++ /dev/null
    @@ -1,57 +0,0 @@
    -      DOUBLE PRECISION FUNCTION gamln(a)
    -C-----------------------------------------------------------------------
    -C            EVALUATION OF LN(GAMMA(A)) FOR POSITIVE A
    -C-----------------------------------------------------------------------
    -C     WRITTEN BY ALFRED H. MORRIS
    -C          NAVAL SURFACE WARFARE CENTER
    -C          DAHLGREN, VIRGINIA
    -C--------------------------
    -C     D = 0.5*(LN(2*PI) - 1)
    -C--------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION c0,c1,c2,c3,c4,c5,d,t,w
    -      INTEGER i,n
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION gamln1
    -      EXTERNAL gamln1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dlog
    -C     ..
    -C     .. Data statements ..
    -C--------------------------
    -      DATA d/.418938533204673D0/
    -      DATA c0/.833333333333333D-01/,c1/-.277777777760991D-02/,
    -     +     c2/.793650666825390D-03/,c3/-.595202931351870D-03/,
    -     +     c4/.837308034031215D-03/,c5/-.165322962780713D-02/
    -C     ..
    -C     .. Executable Statements ..
    -C-----------------------------------------------------------------------
    -      IF (a.GT.0.8D0) GO TO 10
    -      gamln = gamln1(a) - dlog(a)
    -      RETURN
    -
    -   10 IF (a.GT.2.25D0) GO TO 20
    -      t = (a-0.5D0) - 0.5D0
    -      gamln = gamln1(t)
    -      RETURN
    -C
    -   20 IF (a.GE.10.0D0) GO TO 40
    -      n = a - 1.25D0
    -      t = a
    -      w = 1.0D0
    -      DO 30 i = 1,n
    -          t = t - 1.0D0
    -          w = t*w
    -   30 CONTINUE
    -      gamln = gamln1(t-1.0D0) + dlog(w)
    -      RETURN
    -C
    -   40 t = (1.0D0/a)**2
    -      w = (((((c5*t+c4)*t+c3)*t+c2)*t+c1)*t+c0)/a
    -      gamln = (d+w) + (a-0.5D0)* (dlog(a)-1.0D0)
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/gamln1.f b/scipy-0.10.1/scipy/special/cdflib/gamln1.f
    deleted file mode 100644
    index 4bf55bf96b..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/gamln1.f
    +++ /dev/null
    @@ -1,42 +0,0 @@
    -      DOUBLE PRECISION FUNCTION gamln1(a)
    -C-----------------------------------------------------------------------
    -C     EVALUATION OF LN(GAMMA(1 + A)) FOR -0.2 .LE. A .LE. 1.25
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION p0,p1,p2,p3,p4,p5,p6,q1,q2,q3,q4,q5,q6,r0,r1,r2,
    -     +                 r3,r4,r5,s1,s2,s3,s4,s5,w,x
    -C     ..
    -C     .. Data statements ..
    -C----------------------
    -      DATA p0/.577215664901533D+00/,p1/.844203922187225D+00/,
    -     +     p2/-.168860593646662D+00/,p3/-.780427615533591D+00/,
    -     +     p4/-.402055799310489D+00/,p5/-.673562214325671D-01/,
    -     +     p6/-.271935708322958D-02/
    -      DATA q1/.288743195473681D+01/,q2/.312755088914843D+01/,
    -     +     q3/.156875193295039D+01/,q4/.361951990101499D+00/,
    -     +     q5/.325038868253937D-01/,q6/.667465618796164D-03/
    -      DATA r0/.422784335098467D+00/,r1/.848044614534529D+00/,
    -     +     r2/.565221050691933D+00/,r3/.156513060486551D+00/,
    -     +     r4/.170502484022650D-01/,r5/.497958207639485D-03/
    -      DATA s1/.124313399877507D+01/,s2/.548042109832463D+00/,
    -     +     s3/.101552187439830D+00/,s4/.713309612391000D-02/,
    -     +     s5/.116165475989616D-03/
    -C     ..
    -C     .. Executable Statements ..
    -C----------------------
    -      IF (a.GE.0.6D0) GO TO 10
    -      w = ((((((p6*a+p5)*a+p4)*a+p3)*a+p2)*a+p1)*a+p0)/
    -     +    ((((((q6*a+q5)*a+q4)*a+q3)*a+q2)*a+q1)*a+1.0D0)
    -      gamln1 = -a*w
    -      RETURN
    -C
    -   10 x = (a-0.5D0) - 0.5D0
    -      w = (((((r5*x+r4)*x+r3)*x+r2)*x+r1)*x+r0)/
    -     +    (((((s5*x+s4)*x+s3)*x+s2)*x+s1)*x+1.0D0)
    -      gamln1 = x*w
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/gamma_fort.f b/scipy-0.10.1/scipy/special/cdflib/gamma_fort.f
    deleted file mode 100644
    index bafcb48afa..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/gamma_fort.f
    +++ /dev/null
    @@ -1,152 +0,0 @@
    -      DOUBLE PRECISION FUNCTION gamma(a)
    -C-----------------------------------------------------------------------
    -C
    -C         EVALUATION OF THE GAMMA FUNCTION FOR REAL ARGUMENTS
    -C
    -C                           -----------
    -C
    -C     GAMMA(A) IS ASSIGNED THE VALUE 0 WHEN THE GAMMA FUNCTION CANNOT
    -C     BE COMPUTED.
    -C
    -C-----------------------------------------------------------------------
    -C     WRITTEN BY ALFRED H. MORRIS, JR.
    -C          NAVAL SURFACE WEAPONS CENTER
    -C          DAHLGREN, VIRGINIA
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION bot,d,g,lnx,pi,r1,r2,r3,r4,r5,s,t,top,w,x,z
    -      INTEGER i,j,m,n
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION p(7),q(7)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION exparg,spmpar
    -      EXTERNAL exparg,spmpar
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dble,dlog,exp,int,mod,sin
    -C     ..
    -C     .. Data statements ..
    -C--------------------------
    -C     D = 0.5*(LN(2*PI) - 1)
    -C--------------------------
    -C--------------------------
    -C--------------------------
    -      DATA pi/3.1415926535898D0/
    -      DATA d/.41893853320467274178D0/
    -      DATA p(1)/.539637273585445D-03/,p(2)/.261939260042690D-02/,
    -     +     p(3)/.204493667594920D-01/,p(4)/.730981088720487D-01/,
    -     +     p(5)/.279648642639792D+00/,p(6)/.553413866010467D+00/,
    -     +     p(7)/1.0D0/
    -      DATA q(1)/-.832979206704073D-03/,q(2)/.470059485860584D-02/,
    -     +     q(3)/.225211131035340D-01/,q(4)/-.170458969313360D+00/,
    -     +     q(5)/-.567902761974940D-01/,q(6)/.113062953091122D+01/,
    -     +     q(7)/1.0D0/
    -      DATA r1/.820756370353826D-03/,r2/-.595156336428591D-03/,
    -     +     r3/.793650663183693D-03/,r4/-.277777777770481D-02/,
    -     +     r5/.833333333333333D-01/
    -C     ..
    -C     .. Executable Statements ..
    -C--------------------------
    -      gamma = 0.0D0
    -      x = a
    -      IF (abs(a).GE.15.0D0) GO TO 110
    -C-----------------------------------------------------------------------
    -C            EVALUATION OF GAMMA(A) FOR ABS(A) .LT. 15
    -C-----------------------------------------------------------------------
    -      t = 1.0D0
    -      m = int(a) - 1
    -C
    -C     LET T BE THE PRODUCT OF A-J WHEN A .GE. 2
    -C
    -      IF (m.lt.0) GO TO 40
    -      IF (m.eq.0) GO TO 30
    -      GO TO 10
    -   10 DO 20 j = 1,m
    -          x = x - 1.0D0
    -          t = x*t
    -   20 CONTINUE
    -   30 x = x - 1.0D0
    -      GO TO 80
    -C
    -C     LET T BE THE PRODUCT OF A+J WHEN A .LT. 1
    -C
    -   40 t = a
    -      IF (a.GT.0.0D0) GO TO 70
    -      m = -m - 1
    -      IF (m.EQ.0) GO TO 60
    -      DO 50 j = 1,m
    -          x = x + 1.0D0
    -          t = x*t
    -   50 CONTINUE
    -   60 x = (x+0.5D0) + 0.5D0
    -      t = x*t
    -      IF (t.EQ.0.0D0) RETURN
    -C
    -   70 CONTINUE
    -C
    -C     THE FOLLOWING CODE CHECKS IF 1/T CAN OVERFLOW. THIS
    -C     CODE MAY BE OMITTED IF DESIRED.
    -C
    -      IF (abs(t).GE.1.D-30) GO TO 80
    -      IF (abs(t)*spmpar(3).LE.1.0001D0) RETURN
    -      gamma = 1.0D0/t
    -      RETURN
    -C
    -C     COMPUTE GAMMA(1 + X) FOR  0 .LE. X .LT. 1
    -C
    -   80 top = p(1)
    -      bot = q(1)
    -      DO 90 i = 2,7
    -          top = p(i) + x*top
    -          bot = q(i) + x*bot
    -   90 CONTINUE
    -      gamma = top/bot
    -C
    -C     TERMINATION
    -C
    -      IF (a.LT.1.0D0) GO TO 100
    -      gamma = gamma*t
    -      RETURN
    -
    -  100 gamma = gamma/t
    -      RETURN
    -C-----------------------------------------------------------------------
    -C            EVALUATION OF GAMMA(A) FOR ABS(A) .GE. 15
    -C-----------------------------------------------------------------------
    -  110 IF (abs(a).GE.1.D3) RETURN
    -      IF (a.GT.0.0D0) GO TO 120
    -      x = -a
    -      n = x
    -      t = x - n
    -      IF (t.GT.0.9D0) t = 1.0D0 - t
    -      s = sin(pi*t)/pi
    -      IF (mod(n,2).EQ.0) s = -s
    -      IF (s.EQ.0.0D0) RETURN
    -C
    -C     COMPUTE THE MODIFIED ASYMPTOTIC SUM
    -C
    -  120 t = 1.0D0/ (x*x)
    -      g = ((((r1*t+r2)*t+r3)*t+r4)*t+r5)/x
    -C
    -C     ONE MAY REPLACE THE NEXT STATEMENT WITH  LNX = ALOG(X)
    -C     BUT LESS ACCURACY WILL NORMALLY BE OBTAINED.
    -C
    -      lnx = dlog(x)
    -C
    -C     FINAL ASSEMBLY
    -C
    -      z = x
    -      g = (d+g) + (z-0.5D0)* (lnx-1.D0)
    -      w = g
    -      t = g - dble(w)
    -      IF (w.GT.0.99999D0*exparg(0)) RETURN
    -      gamma = exp(w)* (1.0D0+t)
    -      IF (a.LT.0.0D0) gamma = (1.0D0/ (gamma*s))/x
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/grat1.f b/scipy-0.10.1/scipy/special/cdflib/grat1.f
    deleted file mode 100644
    index 7968af16f7..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/grat1.f
    +++ /dev/null
    @@ -1,105 +0,0 @@
    -      SUBROUTINE grat1(a,x,r,p,q,eps)
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,eps,p,q,r,x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a2n,a2nm1,am0,an,an0,b2n,b2nm1,c,cma,g,h,j,l,sum,
    -     +                 t,tol,w,z
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION erf,erfc1,gam1,rexp
    -      EXTERNAL erf,erfc1,gam1,rexp
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dlog,exp,sqrt
    -C     ..
    -C     .. Executable Statements ..
    -C-----------------------------------------------------------------------
    -C        EVALUATION OF THE INCOMPLETE GAMMA RATIO FUNCTIONS
    -C                      P(A,X) AND Q(A,X)
    -C
    -C     IT IS ASSUMED THAT A .LE. 1.  EPS IS THE TOLERANCE TO BE USED.
    -C     THE INPUT ARGUMENT R HAS THE VALUE E**(-X)*X**A/GAMMA(A).
    -C-----------------------------------------------------------------------
    -      IF (a*x.EQ.0.0D0) GO TO 120
    -      IF (a.EQ.0.5D0) GO TO 100
    -      IF (x.LT.1.1D0) GO TO 10
    -      GO TO 60
    -C
    -C             TAYLOR SERIES FOR P(A,X)/X**A
    -C
    -   10 an = 3.0D0
    -      c = x
    -      sum = x/ (a+3.0D0)
    -      tol = 0.1D0*eps/ (a+1.0D0)
    -   20 an = an + 1.0D0
    -      c = -c* (x/an)
    -      t = c/ (a+an)
    -      sum = sum + t
    -      IF (abs(t).GT.tol) GO TO 20
    -      j = a*x* ((sum/6.0D0-0.5D0/ (a+2.0D0))*x+1.0D0/ (a+1.0D0))
    -C
    -      z = a*dlog(x)
    -      h = gam1(a)
    -      g = 1.0D0 + h
    -      IF (x.LT.0.25D0) GO TO 30
    -      IF (a.LT.x/2.59D0) GO TO 50
    -      GO TO 40
    -
    -   30 IF (z.GT.-.13394D0) GO TO 50
    -C
    -   40 w = exp(z)
    -      p = w*g* (0.5D0+ (0.5D0-j))
    -      q = 0.5D0 + (0.5D0-p)
    -      RETURN
    -C
    -   50 l = rexp(z)
    -      w = 0.5D0 + (0.5D0+l)
    -      q = (w*j-l)*g - h
    -      IF (q.LT.0.0D0) GO TO 90
    -      p = 0.5D0 + (0.5D0-q)
    -      RETURN
    -C
    -C              CONTINUED FRACTION EXPANSION
    -C
    -   60 a2nm1 = 1.0D0
    -      a2n = 1.0D0
    -      b2nm1 = x
    -      b2n = x + (1.0D0-a)
    -      c = 1.0D0
    -   70 a2nm1 = x*a2n + c*a2nm1
    -      b2nm1 = x*b2n + c*b2nm1
    -      am0 = a2nm1/b2nm1
    -      c = c + 1.0D0
    -      cma = c - a
    -      a2n = a2nm1 + cma*a2n
    -      b2n = b2nm1 + cma*b2n
    -      an0 = a2n/b2n
    -      IF (abs(an0-am0).GE.eps*an0) GO TO 70
    -      q = r*an0
    -      p = 0.5D0 + (0.5D0-q)
    -      RETURN
    -C
    -C                SPECIAL CASES
    -C
    -   80 p = 0.0D0
    -      q = 1.0D0
    -      RETURN
    -C
    -   90 p = 1.0D0
    -      q = 0.0D0
    -      RETURN
    -C
    -  100 IF (x.GE.0.25D0) GO TO 110
    -      p = erf(sqrt(x))
    -      q = 0.5D0 + (0.5D0-p)
    -      RETURN
    -
    -  110 q = erfc1(0,sqrt(x))
    -      p = 0.5D0 + (0.5D0-q)
    -      RETURN
    -C
    -  120 IF (x.LE.a) GO TO 80
    -      GO TO 90
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/gratio.f b/scipy-0.10.1/scipy/special/cdflib/gratio.f
    deleted file mode 100644
    index d8be40d32b..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/gratio.f
    +++ /dev/null
    @@ -1,420 +0,0 @@
    -      SUBROUTINE gratio(a,x,ans,qans,ind)
    -C ----------------------------------------------------------------------
    -C        EVALUATION OF THE INCOMPLETE GAMMA RATIO FUNCTIONS
    -C                      P(A,X) AND Q(A,X)
    -C
    -C                        ----------
    -C
    -C     IT IS ASSUMED THAT A AND X ARE NONNEGATIVE, WHERE A AND X
    -C     ARE NOT BOTH 0.
    -C
    -C     ANS AND QANS ARE VARIABLES. GRATIO ASSIGNS ANS THE VALUE
    -C     P(A,X) AND QANS THE VALUE Q(A,X). IND MAY BE ANY INTEGER.
    -C     IF IND = 0 THEN THE USER IS REQUESTING AS MUCH ACCURACY AS
    -C     POSSIBLE (UP TO 14 SIGNIFICANT DIGITS). OTHERWISE, IF
    -C     IND = 1 THEN ACCURACY IS REQUESTED TO WITHIN 1 UNIT OF THE
    -C     6-TH SIGNIFICANT DIGIT, AND IF IND .NE. 0,1 THEN ACCURACY
    -C     IS REQUESTED TO WITHIN 1 UNIT OF THE 3RD SIGNIFICANT DIGIT.
    -C
    -C     ERROR RETURN ...
    -C        ANS IS ASSIGNED THE VALUE 2 WHEN A OR X IS NEGATIVE,
    -C     WHEN A*X = 0, OR WHEN P(A,X) AND Q(A,X) ARE INDETERMINANT.
    -C     P(A,X) AND Q(A,X) ARE COMPUTATIONALLY INDETERMINANT WHEN
    -C     X IS EXCEEDINGLY CLOSE TO A AND A IS EXTREMELY LARGE.
    -C ----------------------------------------------------------------------
    -C     WRITTEN BY ALFRED H. MORRIS, JR.
    -C        NAVAL SURFACE WEAPONS CENTER
    -C        DAHLGREN, VIRGINIA
    -C     --------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,ans,qans,x
    -      INTEGER ind
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a2n,a2nm1,acc,alog10,am0,amn,an,an0,apn,b2n,
    -     +                 b2nm1,c,c0,c1,c2,c3,c4,c5,c6,cma,d10,d20,d30,d40,
    -     +                 d50,d60,d70,e,e0,g,h,j,l,r,rt2pin,rta,rtpi,rtx,s,
    -     +                 sum,t,t1,third,tol,twoa,u,w,x0,y,z
    -      INTEGER i,iop,m,max,n
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION acc0(3),big(3),d0(13),d1(12),d2(10),d3(8),d4(6),
    -     +                 d5(4),d6(2),e00(3),wk(20),x00(3)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION erf,erfc1,gam1,gamma,rexp,rlog,spmpar
    -      EXTERNAL erf,erfc1,gam1,gamma,rexp,rlog,spmpar
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,dble,dlog,dmax1,exp,int,sqrt
    -C     ..
    -C     .. Data statements ..
    -C     --------------------
    -C     --------------------
    -C     ALOG10 = LN(10)
    -C     RT2PIN = 1/SQRT(2*PI)
    -C     RTPI   = SQRT(PI)
    -C     --------------------
    -C     --------------------
    -C     --------------------
    -C     --------------------
    -C     --------------------
    -C     --------------------
    -C     --------------------
    -C     --------------------
    -C     --------------------
    -      DATA acc0(1)/5.D-15/,acc0(2)/5.D-7/,acc0(3)/5.D-4/
    -      DATA big(1)/20.0D0/,big(2)/14.0D0/,big(3)/10.0D0/
    -      DATA e00(1)/.25D-3/,e00(2)/.25D-1/,e00(3)/.14D0/
    -      DATA x00(1)/31.0D0/,x00(2)/17.0D0/,x00(3)/9.7D0/
    -      DATA alog10/2.30258509299405D0/
    -      DATA rt2pin/.398942280401433D0/
    -      DATA rtpi/1.77245385090552D0/
    -      DATA third/.333333333333333D0/
    -      DATA d0(1)/.833333333333333D-01/,d0(2)/-.148148148148148D-01/,
    -     +     d0(3)/.115740740740741D-02/,d0(4)/.352733686067019D-03/,
    -     +     d0(5)/-.178755144032922D-03/,d0(6)/.391926317852244D-04/,
    -     +     d0(7)/-.218544851067999D-05/,d0(8)/-.185406221071516D-05/,
    -     +     d0(9)/.829671134095309D-06/,d0(10)/-.176659527368261D-06/,
    -     +     d0(11)/.670785354340150D-08/,d0(12)/.102618097842403D-07/,
    -     +     d0(13)/-.438203601845335D-08/
    -      DATA d10/-.185185185185185D-02/,d1(1)/-.347222222222222D-02/,
    -     +     d1(2)/.264550264550265D-02/,d1(3)/-.990226337448560D-03/,
    -     +     d1(4)/.205761316872428D-03/,d1(5)/-.401877572016461D-06/,
    -     +     d1(6)/-.180985503344900D-04/,d1(7)/.764916091608111D-05/,
    -     +     d1(8)/-.161209008945634D-05/,d1(9)/.464712780280743D-08/,
    -     +     d1(10)/.137863344691572D-06/,d1(11)/-.575254560351770D-07/,
    -     +     d1(12)/.119516285997781D-07/
    -      DATA d20/.413359788359788D-02/,d2(1)/-.268132716049383D-02/,
    -     +     d2(2)/.771604938271605D-03/,d2(3)/.200938786008230D-05/,
    -     +     d2(4)/-.107366532263652D-03/,d2(5)/.529234488291201D-04/,
    -     +     d2(6)/-.127606351886187D-04/,d2(7)/.342357873409614D-07/,
    -     +     d2(8)/.137219573090629D-05/,d2(9)/-.629899213838006D-06/,
    -     +     d2(10)/.142806142060642D-06/
    -      DATA d30/.649434156378601D-03/,d3(1)/.229472093621399D-03/,
    -     +     d3(2)/-.469189494395256D-03/,d3(3)/.267720632062839D-03/,
    -     +     d3(4)/-.756180167188398D-04/,d3(5)/-.239650511386730D-06/,
    -     +     d3(6)/.110826541153473D-04/,d3(7)/-.567495282699160D-05/,
    -     +     d3(8)/.142309007324359D-05/
    -      DATA d40/-.861888290916712D-03/,d4(1)/.784039221720067D-03/,
    -     +     d4(2)/-.299072480303190D-03/,d4(3)/-.146384525788434D-05/,
    -     +     d4(4)/.664149821546512D-04/,d4(5)/-.396836504717943D-04/,
    -     +     d4(6)/.113757269706784D-04/
    -      DATA d50/-.336798553366358D-03/,d5(1)/-.697281375836586D-04/,
    -     +     d5(2)/.277275324495939D-03/,d5(3)/-.199325705161888D-03/,
    -     +     d5(4)/.679778047793721D-04/
    -      DATA d60/.531307936463992D-03/,d6(1)/-.592166437353694D-03/,
    -     +     d6(2)/.270878209671804D-03/
    -      DATA d70/.344367606892378D-03/
    -C     ..
    -C     .. Executable Statements ..
    -C     --------------------
    -C     ****** E IS A MACHINE DEPENDENT CONSTANT. E IS THE SMALLEST
    -C            FLOATING POINT NUMBER FOR WHICH 1.0 + E .GT. 1.0 .
    -C
    -      e = spmpar(1)
    -C
    -C     --------------------
    -      IF (a.LT.0.0D0 .OR. x.LT.0.0D0) GO TO 430
    -      IF (a.EQ.0.0D0 .AND. x.EQ.0.0D0) GO TO 430
    -      IF (a*x.EQ.0.0D0) GO TO 420
    -C
    -      iop = ind + 1
    -      IF (iop.NE.1 .AND. iop.NE.2) iop = 3
    -      acc = dmax1(acc0(iop),e)
    -      e0 = e00(iop)
    -      x0 = x00(iop)
    -C
    -C            SELECT THE APPROPRIATE ALGORITHM
    -C
    -      IF (a.GE.1.0D0) GO TO 10
    -      IF (a.EQ.0.5D0) GO TO 390
    -      IF (x.LT.1.1D0) GO TO 160
    -      t1 = a*dlog(x) - x
    -      u = a*exp(t1)
    -      IF (u.EQ.0.0D0) GO TO 380
    -      r = u* (1.0D0+gam1(a))
    -      GO TO 250
    -C
    -   10 IF (a.GE.big(iop)) GO TO 30
    -      IF (a.GT.x .OR. x.GE.x0) GO TO 20
    -      twoa = a + a
    -      m = int(twoa)
    -      IF (twoa.NE.dble(m)) GO TO 20
    -      i = m/2
    -      IF (a.EQ.dble(i)) GO TO 210
    -      GO TO 220
    -
    -   20 t1 = a*dlog(x) - x
    -      r = exp(t1)/gamma(a)
    -      GO TO 40
    -C
    -   30 l = x/a
    -      IF (l.EQ.0.0D0) GO TO 370
    -      s = 0.5D0 + (0.5D0-l)
    -      z = rlog(l)
    -      IF (z.GE.700.0D0/a) GO TO 410
    -      y = a*z
    -      rta = sqrt(a)
    -      IF (abs(s).LE.e0/rta) GO TO 330
    -      IF (abs(s).LE.0.4D0) GO TO 270
    -C
    -      t = (1.0D0/a)**2
    -      t1 = (((0.75D0*t-1.0D0)*t+3.5D0)*t-105.0D0)/ (a*1260.0D0)
    -      t1 = t1 - y
    -      r = rt2pin*rta*exp(t1)
    -C
    -   40 IF (r.EQ.0.0D0) GO TO 420
    -      IF (x.LE.dmax1(a,alog10)) GO TO 50
    -      IF (x.LT.x0) GO TO 250
    -      GO TO 100
    -C
    -C                 TAYLOR SERIES FOR P/R
    -C
    -   50 apn = a + 1.0D0
    -      t = x/apn
    -      wk(1) = t
    -      DO 60 n = 2,20
    -          apn = apn + 1.0D0
    -          t = t* (x/apn)
    -          IF (t.LE.1.D-3) GO TO 70
    -          wk(n) = t
    -   60 CONTINUE
    -      n = 20
    -C
    -   70 sum = t
    -      tol = 0.5D0*acc
    -   80 apn = apn + 1.0D0
    -      t = t* (x/apn)
    -      sum = sum + t
    -      IF (t.GT.tol) GO TO 80
    -C
    -      max = n - 1
    -      DO 90 m = 1,max
    -          n = n - 1
    -          sum = sum + wk(n)
    -   90 CONTINUE
    -      ans = (r/a)* (1.0D0+sum)
    -      qans = 0.5D0 + (0.5D0-ans)
    -      RETURN
    -C
    -C                 ASYMPTOTIC EXPANSION
    -C
    -  100 amn = a - 1.0D0
    -      t = amn/x
    -      wk(1) = t
    -      DO 110 n = 2,20
    -          amn = amn - 1.0D0
    -          t = t* (amn/x)
    -          IF (abs(t).LE.1.D-3) GO TO 120
    -          wk(n) = t
    -  110 CONTINUE
    -      n = 20
    -C
    -  120 sum = t
    -  130 IF (abs(t).LE.acc) GO TO 140
    -      amn = amn - 1.0D0
    -      t = t* (amn/x)
    -      sum = sum + t
    -      GO TO 130
    -C
    -  140 max = n - 1
    -      DO 150 m = 1,max
    -          n = n - 1
    -          sum = sum + wk(n)
    -  150 CONTINUE
    -      qans = (r/x)* (1.0D0+sum)
    -      ans = 0.5D0 + (0.5D0-qans)
    -      RETURN
    -C
    -C             TAYLOR SERIES FOR P(A,X)/X**A
    -C
    -  160 an = 3.0D0
    -      c = x
    -      sum = x/ (a+3.0D0)
    -      tol = 3.0D0*acc/ (a+1.0D0)
    -  170 an = an + 1.0D0
    -      c = -c* (x/an)
    -      t = c/ (a+an)
    -      sum = sum + t
    -      IF (abs(t).GT.tol) GO TO 170
    -      j = a*x* ((sum/6.0D0-0.5D0/ (a+2.0D0))*x+1.0D0/ (a+1.0D0))
    -C
    -      z = a*dlog(x)
    -      h = gam1(a)
    -      g = 1.0D0 + h
    -      IF (x.LT.0.25D0) GO TO 180
    -      IF (a.LT.x/2.59D0) GO TO 200
    -      GO TO 190
    -
    -  180 IF (z.GT.-.13394D0) GO TO 200
    -C
    -  190 w = exp(z)
    -      ans = w*g* (0.5D0+ (0.5D0-j))
    -      qans = 0.5D0 + (0.5D0-ans)
    -      RETURN
    -C
    -  200 l = rexp(z)
    -      w = 0.5D0 + (0.5D0+l)
    -      qans = (w*j-l)*g - h
    -      IF (qans.LT.0.0D0) GO TO 380
    -      ans = 0.5D0 + (0.5D0-qans)
    -      RETURN
    -C
    -C             FINITE SUMS FOR Q WHEN A .GE. 1
    -C                 AND 2*A IS AN INTEGER
    -C
    -  210 sum = exp(-x)
    -      t = sum
    -      n = 1
    -      c = 0.0D0
    -      GO TO 230
    -C
    -  220 rtx = sqrt(x)
    -      sum = erfc1(0,rtx)
    -      t = exp(-x)/ (rtpi*rtx)
    -      n = 0
    -      c = -0.5D0
    -C
    -  230 IF (n.EQ.i) GO TO 240
    -      n = n + 1
    -      c = c + 1.0D0
    -      t = (x*t)/c
    -      sum = sum + t
    -      GO TO 230
    -
    -  240 qans = sum
    -      ans = 0.5D0 + (0.5D0-qans)
    -      RETURN
    -C
    -C              CONTINUED FRACTION EXPANSION
    -C
    -  250 tol = dmax1(5.0D0*e,acc)
    -      a2nm1 = 1.0D0
    -      a2n = 1.0D0
    -      b2nm1 = x
    -      b2n = x + (1.0D0-a)
    -      c = 1.0D0
    -  260 a2nm1 = x*a2n + c*a2nm1
    -      b2nm1 = x*b2n + c*b2nm1
    -      am0 = a2nm1/b2nm1
    -      c = c + 1.0D0
    -      cma = c - a
    -      a2n = a2nm1 + cma*a2n
    -      b2n = b2nm1 + cma*b2n
    -      an0 = a2n/b2n
    -      IF (abs(an0-am0).GE.tol*an0) GO TO 260
    -C
    -      qans = r*an0
    -      ans = 0.5D0 + (0.5D0-qans)
    -      RETURN
    -C
    -C                GENERAL TEMME EXPANSION
    -C
    -  270 IF (abs(s).LE.2.0D0*e .AND. a*e*e.GT.3.28D-3) GO TO 430
    -      c = exp(-y)
    -      w = 0.5D0*erfc1(1,sqrt(y))
    -      u = 1.0D0/a
    -      z = sqrt(z+z)
    -      IF (l.LT.1.0D0) z = -z
    -      IF (iop.lt.2) GO TO 280
    -      IF (iop.eq.2) GO TO 290
    -      GO TO 300
    -C
    -  280 IF (abs(s).LE.1.D-3) GO TO 340
    -      c0 = ((((((((((((d0(13)*z+d0(12))*z+d0(11))*z+d0(10))*z+d0(9))*z+
    -     +     d0(8))*z+d0(7))*z+d0(6))*z+d0(5))*z+d0(4))*z+d0(3))*z+d0(2))*
    -     +     z+d0(1))*z - third
    -      c1 = (((((((((((d1(12)*z+d1(11))*z+d1(10))*z+d1(9))*z+d1(8))*z+
    -     +     d1(7))*z+d1(6))*z+d1(5))*z+d1(4))*z+d1(3))*z+d1(2))*z+d1(1))*
    -     +     z + d10
    -      c2 = (((((((((d2(10)*z+d2(9))*z+d2(8))*z+d2(7))*z+d2(6))*z+
    -     +     d2(5))*z+d2(4))*z+d2(3))*z+d2(2))*z+d2(1))*z + d20
    -      c3 = (((((((d3(8)*z+d3(7))*z+d3(6))*z+d3(5))*z+d3(4))*z+d3(3))*z+
    -     +     d3(2))*z+d3(1))*z + d30
    -      c4 = (((((d4(6)*z+d4(5))*z+d4(4))*z+d4(3))*z+d4(2))*z+d4(1))*z +
    -     +     d40
    -      c5 = (((d5(4)*z+d5(3))*z+d5(2))*z+d5(1))*z + d50
    -      c6 = (d6(2)*z+d6(1))*z + d60
    -      t = ((((((d70*u+c6)*u+c5)*u+c4)*u+c3)*u+c2)*u+c1)*u + c0
    -      GO TO 310
    -C
    -  290 c0 = (((((d0(6)*z+d0(5))*z+d0(4))*z+d0(3))*z+d0(2))*z+d0(1))*z -
    -     +     third
    -      c1 = (((d1(4)*z+d1(3))*z+d1(2))*z+d1(1))*z + d10
    -      c2 = d2(1)*z + d20
    -      t = (c2*u+c1)*u + c0
    -      GO TO 310
    -C
    -  300 t = ((d0(3)*z+d0(2))*z+d0(1))*z - third
    -C
    -  310 IF (l.LT.1.0D0) GO TO 320
    -      qans = c* (w+rt2pin*t/rta)
    -      ans = 0.5D0 + (0.5D0-qans)
    -      RETURN
    -
    -  320 ans = c* (w-rt2pin*t/rta)
    -      qans = 0.5D0 + (0.5D0-ans)
    -      RETURN
    -C
    -C               TEMME EXPANSION FOR L = 1
    -C
    -  330 IF (a*e*e.GT.3.28D-3) GO TO 430
    -      c = 0.5D0 + (0.5D0-y)
    -      w = (0.5D0-sqrt(y)* (0.5D0+ (0.5D0-y/3.0D0))/rtpi)/c
    -      u = 1.0D0/a
    -      z = sqrt(z+z)
    -      IF (l.LT.1.0D0) z = -z
    -      IF (iop.lt.2) GO TO 340
    -      IF (iop.eq.2) GO TO 350
    -      GO TO 360
    -C
    -  340 c0 = ((((((d0(7)*z+d0(6))*z+d0(5))*z+d0(4))*z+d0(3))*z+d0(2))*z+
    -     +     d0(1))*z - third
    -      c1 = (((((d1(6)*z+d1(5))*z+d1(4))*z+d1(3))*z+d1(2))*z+d1(1))*z +
    -     +     d10
    -      c2 = ((((d2(5)*z+d2(4))*z+d2(3))*z+d2(2))*z+d2(1))*z + d20
    -      c3 = (((d3(4)*z+d3(3))*z+d3(2))*z+d3(1))*z + d30
    -      c4 = (d4(2)*z+d4(1))*z + d40
    -      c5 = (d5(2)*z+d5(1))*z + d50
    -      c6 = d6(1)*z + d60
    -      t = ((((((d70*u+c6)*u+c5)*u+c4)*u+c3)*u+c2)*u+c1)*u + c0
    -      GO TO 310
    -C
    -  350 c0 = (d0(2)*z+d0(1))*z - third
    -      c1 = d1(1)*z + d10
    -      t = (d20*u+c1)*u + c0
    -      GO TO 310
    -C
    -  360 t = d0(1)*z - third
    -      GO TO 310
    -C
    -C                     SPECIAL CASES
    -C
    -  370 ans = 0.0D0
    -      qans = 1.0D0
    -      RETURN
    -C
    -  380 ans = 1.0D0
    -      qans = 0.0D0
    -      RETURN
    -C
    -  390 IF (x.GE.0.25D0) GO TO 400
    -      ans = erf(sqrt(x))
    -      qans = 0.5D0 + (0.5D0-ans)
    -      RETURN
    -
    -  400 qans = erfc1(0,sqrt(x))
    -      ans = 0.5D0 + (0.5D0-qans)
    -      RETURN
    -C
    -  410 IF (abs(s).LE.2.0D0*e) GO TO 430
    -  420 IF (x.LE.a) GO TO 370
    -      GO TO 380
    -C
    -C                     ERROR RETURN
    -C
    -  430 ans = 2.0D0
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/gsumln.f b/scipy-0.10.1/scipy/special/cdflib/gsumln.f
    deleted file mode 100644
    index 6eec02559d..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/gsumln.f
    +++ /dev/null
    @@ -1,32 +0,0 @@
    -      DOUBLE PRECISION FUNCTION gsumln(a,b)
    -C-----------------------------------------------------------------------
    -C          EVALUATION OF THE FUNCTION LN(GAMMA(A + B))
    -C          FOR 1 .LE. A .LE. 2  AND  1 .LE. B .LE. 2
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,b
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION x
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION alnrel,gamln1
    -      EXTERNAL alnrel,gamln1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dble,dlog
    -C     ..
    -C     .. Executable Statements ..
    -      x = dble(a) + dble(b) - 2.D0
    -      IF (x.GT.0.25D0) GO TO 10
    -      gsumln = gamln1(1.0D0+x)
    -      RETURN
    -
    -   10 IF (x.GT.1.25D0) GO TO 20
    -      gsumln = gamln1(x) + alnrel(x)
    -      RETURN
    -
    -   20 gsumln = gamln1(x-1.0D0) + dlog(x* (1.0D0+x))
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/ipmpar.f b/scipy-0.10.1/scipy/special/cdflib/ipmpar.f
    deleted file mode 100644
    index a52a8d77fe..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/ipmpar.f
    +++ /dev/null
    @@ -1,429 +0,0 @@
    -      INTEGER FUNCTION ipmpar(i)
    -C-----------------------------------------------------------------------
    -C
    -C     IPMPAR PROVIDES THE INTEGER MACHINE CONSTANTS FOR THE COMPUTER
    -C     THAT IS USED. IT IS ASSUMED THAT THE ARGUMENT I IS AN INTEGER
    -C     HAVING ONE OF THE VALUES 1-10. IPMPAR(I) HAS THE VALUE ...
    -C
    -C  INTEGERS.
    -C
    -C     ASSUME INTEGERS ARE REPRESENTED IN THE N-DIGIT, BASE-A FORM
    -C
    -C               SIGN ( X(N-1)*A**(N-1) + ... + X(1)*A + X(0) )
    -C
    -C               WHERE 0 .LE. X(I) .LT. A FOR I=0,...,N-1.
    -C
    -C     IPMPAR(1) = A, THE BASE.
    -C
    -C     IPMPAR(2) = N, THE NUMBER OF BASE-A DIGITS.
    -C
    -C     IPMPAR(3) = A**N - 1, THE LARGEST MAGNITUDE.
    -C
    -C  FLOATING-POINT NUMBERS.
    -C
    -C     IT IS ASSUMED THAT THE SINGLE AND DOUBLE PRECISION FLOATING
    -C     POINT ARITHMETICS HAVE THE SAME BASE, SAY B, AND THAT THE
    -C     NONZERO NUMBERS ARE REPRESENTED IN THE FORM
    -C
    -C               SIGN (B**E) * (X(1)/B + ... + X(M)/B**M)
    -C
    -C               WHERE X(I) = 0,1,...,B-1 FOR I=1,...,M,
    -C               X(1) .GE. 1, AND EMIN .LE. E .LE. EMAX.
    -C
    -C     IPMPAR(4) = B, THE BASE.
    -C
    -C  SINGLE-PRECISION
    -C
    -C     IPMPAR(5) = M, THE NUMBER OF BASE-B DIGITS.
    -C
    -C     IPMPAR(6) = EMIN, THE SMALLEST EXPONENT E.
    -C
    -C     IPMPAR(7) = EMAX, THE LARGEST EXPONENT E.
    -C
    -C  DOUBLE-PRECISION
    -C
    -C     IPMPAR(8) = M, THE NUMBER OF BASE-B DIGITS.
    -C
    -C     IPMPAR(9) = EMIN, THE SMALLEST EXPONENT E.
    -C
    -C     IPMPAR(10) = EMAX, THE LARGEST EXPONENT E.
    -C
    -C-----------------------------------------------------------------------
    -C
    -C     TO DEFINE THIS FUNCTION FOR THE COMPUTER BEING USED, ACTIVATE
    -C     THE DATA STATMENTS FOR THE COMPUTER BY REMOVING THE C FROM
    -C     COLUMN 1. (ALL THE OTHER DATA STATEMENTS SHOULD HAVE C IN
    -C     COLUMN 1.)
    -C
    -C-----------------------------------------------------------------------
    -C
    -C     IPMPAR IS AN ADAPTATION OF THE FUNCTION I1MACH, WRITTEN BY
    -C     P.A. FOX, A.D. HALL, AND N.L. SCHRYER (BELL LABORATORIES).
    -C     IPMPAR WAS FORMED BY A.H. MORRIS (NSWC). THE CONSTANTS ARE
    -C     FROM BELL LABORATORIES, NSWC, AND OTHER SOURCES.
    -C
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      INTEGER i
    -C     ..
    -C     .. Local Arrays ..
    -      INTEGER imach(10)
    -C     ..
    -C     .. Data statements ..
    -C
    -C     MACHINE CONSTANTS FOR AMDAHL MACHINES.
    -C
    -C     DATA IMACH( 1) /   2 /
    -C     DATA IMACH( 2) /  31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /  16 /
    -C     DATA IMACH( 5) /   6 /
    -C     DATA IMACH( 6) / -64 /
    -C     DATA IMACH( 7) /  63 /
    -C     DATA IMACH( 8) /  14 /
    -C     DATA IMACH( 9) / -64 /
    -C     DATA IMACH(10) /  63 /
    -C
    -C     MACHINE CONSTANTS FOR THE AT&T 3B SERIES, AT&T
    -C     PC 7300, AND AT&T 6300.
    -C
    -C     DATA IMACH( 1) /     2 /
    -C     DATA IMACH( 2) /    31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /     2 /
    -C     DATA IMACH( 5) /    24 /
    -C     DATA IMACH( 6) /  -125 /
    -C     DATA IMACH( 7) /   128 /
    -C     DATA IMACH( 8) /    53 /
    -C     DATA IMACH( 9) / -1021 /
    -C     DATA IMACH(10) /  1024 /
    -C
    -C     MACHINE CONSTANTS FOR THE BURROUGHS 1700 SYSTEM.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   33 /
    -C     DATA IMACH( 3) / 8589934591 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   24 /
    -C     DATA IMACH( 6) / -256 /
    -C     DATA IMACH( 7) /  255 /
    -C     DATA IMACH( 8) /   60 /
    -C     DATA IMACH( 9) / -256 /
    -C     DATA IMACH(10) /  255 /
    -C
    -C     MACHINE CONSTANTS FOR THE BURROUGHS 5700 SYSTEM.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   39 /
    -C     DATA IMACH( 3) / 549755813887 /
    -C     DATA IMACH( 4) /    8 /
    -C     DATA IMACH( 5) /   13 /
    -C     DATA IMACH( 6) /  -50 /
    -C     DATA IMACH( 7) /   76 /
    -C     DATA IMACH( 8) /   26 /
    -C     DATA IMACH( 9) /  -50 /
    -C     DATA IMACH(10) /   76 /
    -C
    -C     MACHINE CONSTANTS FOR THE BURROUGHS 6700/7700 SYSTEMS.
    -C
    -C     DATA IMACH( 1) /      2 /
    -C     DATA IMACH( 2) /     39 /
    -C     DATA IMACH( 3) / 549755813887 /
    -C     DATA IMACH( 4) /      8 /
    -C     DATA IMACH( 5) /     13 /
    -C     DATA IMACH( 6) /    -50 /
    -C     DATA IMACH( 7) /     76 /
    -C     DATA IMACH( 8) /     26 /
    -C     DATA IMACH( 9) / -32754 /
    -C     DATA IMACH(10) /  32780 /
    -C
    -C     MACHINE CONSTANTS FOR THE CDC 6000/7000 SERIES
    -C     60 BIT ARITHMETIC, AND THE CDC CYBER 995 64 BIT
    -C     ARITHMETIC (NOS OPERATING SYSTEM).
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   48 /
    -C     DATA IMACH( 3) / 281474976710655 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   48 /
    -C     DATA IMACH( 6) / -974 /
    -C     DATA IMACH( 7) / 1070 /
    -C     DATA IMACH( 8) /   95 /
    -C     DATA IMACH( 9) / -926 /
    -C     DATA IMACH(10) / 1070 /
    -C
    -C     MACHINE CONSTANTS FOR THE CDC CYBER 995 64 BIT
    -C     ARITHMETIC (NOS/VE OPERATING SYSTEM).
    -C
    -C     DATA IMACH( 1) /     2 /
    -C     DATA IMACH( 2) /    63 /
    -C     DATA IMACH( 3) / 9223372036854775807 /
    -C     DATA IMACH( 4) /     2 /
    -C     DATA IMACH( 5) /    48 /
    -C     DATA IMACH( 6) / -4096 /
    -C     DATA IMACH( 7) /  4095 /
    -C     DATA IMACH( 8) /    96 /
    -C     DATA IMACH( 9) / -4096 /
    -C     DATA IMACH(10) /  4095 /
    -C
    -C     MACHINE CONSTANTS FOR THE CRAY 1, XMP, 2, AND 3.
    -C
    -C     DATA IMACH( 1) /     2 /
    -C     DATA IMACH( 2) /    63 /
    -C     DATA IMACH( 3) / 9223372036854775807 /
    -C     DATA IMACH( 4) /     2 /
    -C     DATA IMACH( 5) /    47 /
    -C     DATA IMACH( 6) / -8189 /
    -C     DATA IMACH( 7) /  8190 /
    -C     DATA IMACH( 8) /    94 /
    -C     DATA IMACH( 9) / -8099 /
    -C     DATA IMACH(10) /  8190 /
    -C
    -C     MACHINE CONSTANTS FOR THE DATA GENERAL ECLIPSE S/200.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   15 /
    -C     DATA IMACH( 3) / 32767 /
    -C     DATA IMACH( 4) /   16 /
    -C     DATA IMACH( 5) /    6 /
    -C     DATA IMACH( 6) /  -64 /
    -C     DATA IMACH( 7) /   63 /
    -C     DATA IMACH( 8) /   14 /
    -C     DATA IMACH( 9) /  -64 /
    -C     DATA IMACH(10) /   63 /
    -C
    -C     MACHINE CONSTANTS FOR THE HARRIS 220.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   23 /
    -C     DATA IMACH( 3) / 8388607 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   23 /
    -C     DATA IMACH( 6) / -127 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   38 /
    -C     DATA IMACH( 9) / -127 /
    -C     DATA IMACH(10) /  127 /
    -C
    -C     MACHINE CONSTANTS FOR THE HONEYWELL 600/6000
    -C     AND DPS 8/70 SERIES.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   35 /
    -C     DATA IMACH( 3) / 34359738367 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   27 /
    -C     DATA IMACH( 6) / -127 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   63 /
    -C     DATA IMACH( 9) / -127 /
    -C     DATA IMACH(10) /  127 /
    -C
    -C     MACHINE CONSTANTS FOR THE HP 2100
    -C     3 WORD DOUBLE PRECISION OPTION WITH FTN4
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   15 /
    -C     DATA IMACH( 3) / 32767 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   23 /
    -C     DATA IMACH( 6) / -128 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   39 /
    -C     DATA IMACH( 9) / -128 /
    -C     DATA IMACH(10) /  127 /
    -C
    -C     MACHINE CONSTANTS FOR THE HP 2100
    -C     4 WORD DOUBLE PRECISION OPTION WITH FTN4
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   15 /
    -C     DATA IMACH( 3) / 32767 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   23 /
    -C     DATA IMACH( 6) / -128 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   55 /
    -C     DATA IMACH( 9) / -128 /
    -C     DATA IMACH(10) /  127 /
    -C
    -C     MACHINE CONSTANTS FOR THE HP 9000.
    -C
    -C     DATA IMACH( 1) /     2 /
    -C     DATA IMACH( 2) /    31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /     2 /
    -C     DATA IMACH( 5) /    24 /
    -C     DATA IMACH( 6) /  -126 /
    -C     DATA IMACH( 7) /   128 /
    -C     DATA IMACH( 8) /    53 /
    -C     DATA IMACH( 9) / -1021 /
    -C     DATA IMACH(10) /  1024 /
    -C
    -C     MACHINE CONSTANTS FOR THE IBM 360/370 SERIES,
    -C     THE ICL 2900, THE ITEL AS/6, THE XEROX SIGMA
    -C     5/7/9 AND THE SEL SYSTEMS 85/86.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /   16 /
    -C     DATA IMACH( 5) /    6 /
    -C     DATA IMACH( 6) /  -64 /
    -C     DATA IMACH( 7) /   63 /
    -C     DATA IMACH( 8) /   14 /
    -C     DATA IMACH( 9) /  -64 /
    -C     DATA IMACH(10) /   63 /
    -C
    -C     MACHINE CONSTANTS FOR THE IBM PC.
    -C
    -C      DATA imach(1)/2/
    -C      DATA imach(2)/31/
    -C      DATA imach(3)/2147483647/
    -C      DATA imach(4)/2/
    -C      DATA imach(5)/24/
    -C      DATA imach(6)/-125/
    -C      DATA imach(7)/128/
    -C      DATA imach(8)/53/
    -C      DATA imach(9)/-1021/
    -C      DATA imach(10)/1024/
    -C
    -C     MACHINE CONSTANTS FOR THE MACINTOSH II - ABSOFT
    -C     MACFORTRAN II.
    -C
    -C     DATA IMACH( 1) /     2 /
    -C     DATA IMACH( 2) /    31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /     2 /
    -C     DATA IMACH( 5) /    24 /
    -C     DATA IMACH( 6) /  -125 /
    -C     DATA IMACH( 7) /   128 /
    -C     DATA IMACH( 8) /    53 /
    -C     DATA IMACH( 9) / -1021 /
    -C     DATA IMACH(10) /  1024 /
    -C
    -C     MACHINE CONSTANTS FOR THE MICROVAX - VMS FORTRAN.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   24 /
    -C     DATA IMACH( 6) / -127 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   56 /
    -C     DATA IMACH( 9) / -127 /
    -C     DATA IMACH(10) /  127 /
    -C
    -C     MACHINE CONSTANTS FOR THE PDP-10 (KA PROCESSOR).
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   35 /
    -C     DATA IMACH( 3) / 34359738367 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   27 /
    -C     DATA IMACH( 6) / -128 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   54 /
    -C     DATA IMACH( 9) / -101 /
    -C     DATA IMACH(10) /  127 /
    -C
    -C     MACHINE CONSTANTS FOR THE PDP-10 (KI PROCESSOR).
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   35 /
    -C     DATA IMACH( 3) / 34359738367 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   27 /
    -C     DATA IMACH( 6) / -128 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   62 /
    -C     DATA IMACH( 9) / -128 /
    -C     DATA IMACH(10) /  127 /
    -C
    -C     MACHINE CONSTANTS FOR THE PDP-11 FORTRAN SUPPORTING
    -C     32-BIT INTEGER ARITHMETIC.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   24 /
    -C     DATA IMACH( 6) / -127 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   56 /
    -C     DATA IMACH( 9) / -127 /
    -C     DATA IMACH(10) /  127 /
    -C
    -C     MACHINE CONSTANTS FOR THE SEQUENT BALANCE 8000.
    -C
    -C     DATA IMACH( 1) /     2 /
    -C     DATA IMACH( 2) /    31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /     2 /
    -C     DATA IMACH( 5) /    24 /
    -C     DATA IMACH( 6) /  -125 /
    -C     DATA IMACH( 7) /   128 /
    -C     DATA IMACH( 8) /    53 /
    -C     DATA IMACH( 9) / -1021 /
    -C     DATA IMACH(10) /  1024 /
    -C
    -C     MACHINE CONSTANTS FOR THE SILICON GRAPHICS IRIS-4D
    -C     SERIES (MIPS R3000 PROCESSOR).
    -C
    -C     DATA IMACH( 1) /     2 /
    -C     DATA IMACH( 2) /    31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /     2 /
    -C     DATA IMACH( 5) /    24 /
    -C     DATA IMACH( 6) /  -125 /
    -C     DATA IMACH( 7) /   128 /
    -C     DATA IMACH( 8) /    53 /
    -C     DATA IMACH( 9) / -1021 /
    -C     DATA IMACH(10) /  1024 /
    -C
    -C     MACHINE CONSTANTS FOR IEEE ARITHMETIC MACHINES, SUCH AS THE AT&T
    -C     3B SERIES, MOTOROLA 68000 BASED MACHINES (E.G. SUN 3 AND AT&T
    -C     PC 7300), AND 8087 BASED MICROS (E.G. IBM PC AND AT&T 6300).
    -C
    -      DATA IMACH( 1) /     2 /
    -      DATA IMACH( 2) /    31 /
    -      DATA IMACH( 3) / 2147483647 /
    -      DATA IMACH( 4) /     2 /
    -      DATA IMACH( 5) /    24 /
    -      DATA IMACH( 6) /  -125 /
    -      DATA IMACH( 7) /   128 /
    -      DATA IMACH( 8) /    53 /
    -      DATA IMACH( 9) / -1021 /
    -      DATA IMACH(10) /  1024 /
    -C
    -C     MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   35 /
    -C     DATA IMACH( 3) / 34359738367 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   27 /
    -C     DATA IMACH( 6) / -128 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   60 /
    -C     DATA IMACH( 9) /-1024 /
    -C     DATA IMACH(10) / 1023 /
    -C
    -C     MACHINE CONSTANTS FOR THE VAX 11/780.
    -C
    -C     DATA IMACH( 1) /    2 /
    -C     DATA IMACH( 2) /   31 /
    -C     DATA IMACH( 3) / 2147483647 /
    -C     DATA IMACH( 4) /    2 /
    -C     DATA IMACH( 5) /   24 /
    -C     DATA IMACH( 6) / -127 /
    -C     DATA IMACH( 7) /  127 /
    -C     DATA IMACH( 8) /   56 /
    -C     DATA IMACH( 9) / -127 /
    -C     DATA IMACH(10) /  127 /
    -C
    -      ipmpar = imach(i)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/psi_fort.f b/scipy-0.10.1/scipy/special/cdflib/psi_fort.f
    deleted file mode 100644
    index 95d74fc4ca..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/psi_fort.f
    +++ /dev/null
    @@ -1,193 +0,0 @@
    -      DOUBLE PRECISION FUNCTION psi(xx)
    -C---------------------------------------------------------------------
    -C
    -C                 EVALUATION OF THE DIGAMMA FUNCTION
    -C
    -C                           -----------
    -C
    -C     PSI(XX) IS ASSIGNED THE VALUE 0 WHEN THE DIGAMMA FUNCTION CANNOT
    -C     BE COMPUTED.
    -C
    -C     THE MAIN COMPUTATION INVOLVES EVALUATION OF RATIONAL CHEBYSHEV
    -C     APPROXIMATIONS PUBLISHED IN MATH. COMP. 27, 123-127(1973) BY
    -C     CODY, STRECOK AND THACHER.
    -C
    -C---------------------------------------------------------------------
    -C     PSI WAS WRITTEN AT ARGONNE NATIONAL LABORATORY FOR THE FUNPACK
    -C     PACKAGE OF SPECIAL FUNCTION SUBROUTINES. PSI WAS MODIFIED BY
    -C     A.H. MORRIS (NSWC).
    -C---------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION xx
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION aug,den,dx0,piov4,sgn,upper,w,x,xmax1,xmx0,
    -     +                 xsmall,z
    -      INTEGER i,m,n,nq
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION p1(7),p2(4),q1(6),q2(4)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION spmpar
    -      INTEGER ipmpar
    -      EXTERNAL spmpar,ipmpar
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,cos,dble,dlog,dmin1,int,sin
    -C     ..
    -C     .. Data statements ..
    -C---------------------------------------------------------------------
    -C
    -C     PIOV4 = PI/4
    -C     DX0 = ZERO OF PSI TO EXTENDED PRECISION
    -C
    -C---------------------------------------------------------------------
    -C---------------------------------------------------------------------
    -C
    -C     COEFFICIENTS FOR RATIONAL APPROXIMATION OF
    -C     PSI(X) / (X - X0),  0.5 .LE. X .LE. 3.0
    -C
    -C---------------------------------------------------------------------
    -C---------------------------------------------------------------------
    -C
    -C     COEFFICIENTS FOR RATIONAL APPROXIMATION OF
    -C     PSI(X) - LN(X) + 1 / (2*X),  X .GT. 3.0
    -C
    -C---------------------------------------------------------------------
    -      DATA piov4/.785398163397448D0/
    -      DATA dx0/1.461632144968362341262659542325721325D0/
    -      DATA p1(1)/.895385022981970D-02/,p1(2)/.477762828042627D+01/,
    -     +     p1(3)/.142441585084029D+03/,p1(4)/.118645200713425D+04/,
    -     +     p1(5)/.363351846806499D+04/,p1(6)/.413810161269013D+04/,
    -     +     p1(7)/.130560269827897D+04/
    -      DATA q1(1)/.448452573429826D+02/,q1(2)/.520752771467162D+03/,
    -     +     q1(3)/.221000799247830D+04/,q1(4)/.364127349079381D+04/,
    -     +     q1(5)/.190831076596300D+04/,q1(6)/.691091682714533D-05/
    -      DATA p2(1)/-.212940445131011D+01/,p2(2)/-.701677227766759D+01/,
    -     +     p2(3)/-.448616543918019D+01/,p2(4)/-.648157123766197D+00/
    -      DATA q2(1)/.322703493791143D+02/,q2(2)/.892920700481861D+02/,
    -     +     q2(3)/.546117738103215D+02/,q2(4)/.777788548522962D+01/
    -C     ..
    -C     .. Executable Statements ..
    -C---------------------------------------------------------------------
    -C
    -C     MACHINE DEPENDENT CONSTANTS ...
    -C
    -C        XMAX1  = THE SMALLEST POSITIVE FLOATING POINT CONSTANT
    -C                 WITH ENTIRELY INTEGER REPRESENTATION.  ALSO USED
    -C                 AS NEGATIVE OF LOWER BOUND ON ACCEPTABLE NEGATIVE
    -C                 ARGUMENTS AND AS THE POSITIVE ARGUMENT BEYOND WHICH
    -C                 PSI MAY BE REPRESENTED AS ALOG(X).
    -C
    -C        XSMALL = ABSOLUTE ARGUMENT BELOW WHICH PI*COTAN(PI*X)
    -C                 MAY BE REPRESENTED BY 1/X.
    -C
    -C---------------------------------------------------------------------
    -      xmax1 = ipmpar(3)
    -      xmax1 = dmin1(xmax1,1.0D0/spmpar(1))
    -      xsmall = 1.D-9
    -C---------------------------------------------------------------------
    -      x = xx
    -      aug = 0.0D0
    -      IF (x.GE.0.5D0) GO TO 50
    -C---------------------------------------------------------------------
    -C     X .LT. 0.5,  USE REFLECTION FORMULA
    -C     PSI(1-X) = PSI(X) + PI * COTAN(PI*X)
    -C---------------------------------------------------------------------
    -      IF (abs(x).GT.xsmall) GO TO 10
    -      IF (x.EQ.0.0D0) GO TO 100
    -C---------------------------------------------------------------------
    -C     0 .LT. ABS(X) .LE. XSMALL.  USE 1/X AS A SUBSTITUTE
    -C     FOR  PI*COTAN(PI*X)
    -C---------------------------------------------------------------------
    -      aug = -1.0D0/x
    -      GO TO 40
    -C---------------------------------------------------------------------
    -C     REDUCTION OF ARGUMENT FOR COTAN
    -C---------------------------------------------------------------------
    -   10 w = -x
    -      sgn = piov4
    -      IF (w.GT.0.0D0) GO TO 20
    -      w = -w
    -      sgn = -sgn
    -C---------------------------------------------------------------------
    -C     MAKE AN ERROR EXIT IF X .LE. -XMAX1
    -C---------------------------------------------------------------------
    -   20 IF (w.GE.xmax1) GO TO 100
    -      nq = int(w)
    -      w = w - dble(nq)
    -      nq = int(w*4.0D0)
    -      w = 4.0D0* (w-dble(nq)*.25D0)
    -C---------------------------------------------------------------------
    -C     W IS NOW RELATED TO THE FRACTIONAL PART OF  4.0 * X.
    -C     ADJUST ARGUMENT TO CORRESPOND TO VALUES IN FIRST
    -C     QUADRANT AND DETERMINE SIGN
    -C---------------------------------------------------------------------
    -      n = nq/2
    -      IF ((n+n).NE.nq) w = 1.0D0 - w
    -      z = piov4*w
    -      m = n/2
    -      IF ((m+m).NE.n) sgn = -sgn
    -C---------------------------------------------------------------------
    -C     DETERMINE FINAL VALUE FOR  -PI*COTAN(PI*X)
    -C---------------------------------------------------------------------
    -      n = (nq+1)/2
    -      m = n/2
    -      m = m + m
    -      IF (m.NE.n) GO TO 30
    -C---------------------------------------------------------------------
    -C     CHECK FOR SINGULARITY
    -C---------------------------------------------------------------------
    -      IF (z.EQ.0.0D0) GO TO 100
    -C---------------------------------------------------------------------
    -C     USE COS/SIN AS A SUBSTITUTE FOR COTAN, AND
    -C     SIN/COS AS A SUBSTITUTE FOR TAN
    -C---------------------------------------------------------------------
    -      aug = sgn* ((cos(z)/sin(z))*4.0D0)
    -      GO TO 40
    -
    -   30 aug = sgn* ((sin(z)/cos(z))*4.0D0)
    -   40 x = 1.0D0 - x
    -   50 IF (x.GT.3.0D0) GO TO 70
    -C---------------------------------------------------------------------
    -C     0.5 .LE. X .LE. 3.0
    -C---------------------------------------------------------------------
    -      den = x
    -      upper = p1(1)*x
    -C
    -      DO 60 i = 1,5
    -          den = (den+q1(i))*x
    -          upper = (upper+p1(i+1))*x
    -   60 CONTINUE
    -C
    -      den = (upper+p1(7))/ (den+q1(6))
    -      xmx0 = dble(x) - dx0
    -      psi = den*xmx0 + aug
    -      RETURN
    -C---------------------------------------------------------------------
    -C     IF X .GE. XMAX1, PSI = LN(X)
    -C---------------------------------------------------------------------
    -   70 IF (x.GE.xmax1) GO TO 90
    -C---------------------------------------------------------------------
    -C     3.0 .LT. X .LT. XMAX1
    -C---------------------------------------------------------------------
    -      w = 1.0D0/ (x*x)
    -      den = w
    -      upper = p2(1)*w
    -C
    -      DO 80 i = 1,3
    -          den = (den+q2(i))*w
    -          upper = (upper+p2(i+1))*w
    -   80 CONTINUE
    -C
    -      aug = upper/ (den+q2(4)) - 0.5D0/x + aug
    -   90 psi = aug + dlog(x)
    -      RETURN
    -C---------------------------------------------------------------------
    -C     ERROR RETURN
    -C---------------------------------------------------------------------
    -  100 psi = 0.0D0
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/rcomp.f b/scipy-0.10.1/scipy/special/cdflib/rcomp.f
    deleted file mode 100644
    index 55d2c7edbe..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/rcomp.f
    +++ /dev/null
    @@ -1,43 +0,0 @@
    -      DOUBLE PRECISION FUNCTION rcomp(a,x)
    -C     -------------------
    -C     EVALUATION OF EXP(-X)*X**A/GAMMA(A)
    -C     -------------------
    -C     RT2PIN = 1/SQRT(2*PI)
    -C     -------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION a,x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION rt2pin,t,t1,u
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION gam1,gamma,rlog
    -      EXTERNAL gam1,gamma,rlog
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dlog,exp,sqrt
    -C     ..
    -C     .. Data statements ..
    -      DATA rt2pin/.398942280401433D0/
    -C     ..
    -C     .. Executable Statements ..
    -C     -------------------
    -      rcomp = 0.0D0
    -      IF (a.GE.20.0D0) GO TO 20
    -      t = a*dlog(x) - x
    -      IF (a.GE.1.0D0) GO TO 10
    -      rcomp = (a*exp(t))* (1.0D0+gam1(a))
    -      RETURN
    -
    -   10 rcomp = exp(t)/gamma(a)
    -      RETURN
    -C
    -   20 u = x/a
    -      IF (u.EQ.0.0D0) RETURN
    -      t = (1.0D0/a)**2
    -      t1 = (((0.75D0*t-1.0D0)*t+3.5D0)*t-105.0D0)/ (a*1260.0D0)
    -      t1 = t1 - a*rlog(u)
    -      rcomp = rt2pin*sqrt(a)*exp(t1)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/rexp.f b/scipy-0.10.1/scipy/special/cdflib/rexp.f
    deleted file mode 100644
    index cc29c414f4..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/rexp.f
    +++ /dev/null
    @@ -1,33 +0,0 @@
    -      DOUBLE PRECISION FUNCTION rexp(x)
    -C-----------------------------------------------------------------------
    -C            EVALUATION OF THE FUNCTION EXP(X) - 1
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION p1,p2,q1,q2,q3,q4,w
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC abs,exp
    -C     ..
    -C     .. Data statements ..
    -      DATA p1/.914041914819518D-09/,p2/.238082361044469D-01/,
    -     +     q1/-.499999999085958D+00/,q2/.107141568980644D+00/,
    -     +     q3/-.119041179760821D-01/,q4/.595130811860248D-03/
    -C     ..
    -C     .. Executable Statements ..
    -C-----------------------
    -      IF (abs(x).GT.0.15D0) GO TO 10
    -      rexp = x* (((p2*x+p1)*x+1.0D0)/ ((((q4*x+q3)*x+q2)*x+q1)*x+1.0D0))
    -      RETURN
    -C
    -   10 w = exp(x)
    -      IF (x.GT.0.0D0) GO TO 20
    -      rexp = (w-0.5D0) - 0.5D0
    -      RETURN
    -
    -   20 rexp = w* (0.5D0+ (0.5D0-1.0D0/w))
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/rlog.f b/scipy-0.10.1/scipy/special/cdflib/rlog.f
    deleted file mode 100644
    index 94faa6c3e1..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/rlog.f
    +++ /dev/null
    @@ -1,55 +0,0 @@
    -      DOUBLE PRECISION FUNCTION rlog(x)
    -C     -------------------
    -C     COMPUTATION OF  X - 1 - LN(X)
    -C     -------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a,b,p0,p1,p2,q1,q2,r,t,u,w,w1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dble,dlog
    -C     ..
    -C     .. Data statements ..
    -C     -------------------
    -      DATA a/.566749439387324D-01/
    -      DATA b/.456512608815524D-01/
    -      DATA p0/.333333333333333D+00/,p1/-.224696413112536D+00/,
    -     +     p2/.620886815375787D-02/
    -      DATA q1/-.127408923933623D+01/,q2/.354508718369557D+00/
    -C     ..
    -C     .. Executable Statements ..
    -C     -------------------
    -      IF (x.LT.0.61D0 .OR. x.GT.1.57D0) GO TO 40
    -      IF (x.LT.0.82D0) GO TO 10
    -      IF (x.GT.1.18D0) GO TO 20
    -C
    -C              ARGUMENT REDUCTION
    -C
    -      u = (x-0.5D0) - 0.5D0
    -      w1 = 0.0D0
    -      GO TO 30
    -C
    -   10 u = dble(x) - 0.7D0
    -      u = u/0.7D0
    -      w1 = a - u*0.3D0
    -      GO TO 30
    -C
    -   20 u = 0.75D0*dble(x) - 1.D0
    -      w1 = b + u/3.0D0
    -C
    -C               SERIES EXPANSION
    -C
    -   30 r = u/ (u+2.0D0)
    -      t = r*r
    -      w = ((p2*t+p1)*t+p0)/ ((q2*t+q1)*t+1.0D0)
    -      rlog = 2.0D0*t* (1.0D0/ (1.0D0-r)-r*w) + w1
    -      RETURN
    -C
    -C
    -   40 r = (x-0.5D0) - 0.5D0
    -      rlog = r - dlog(x)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/rlog1.f b/scipy-0.10.1/scipy/special/cdflib/rlog1.f
    deleted file mode 100644
    index 8b215eba85..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/rlog1.f
    +++ /dev/null
    @@ -1,55 +0,0 @@
    -      DOUBLE PRECISION FUNCTION rlog1(x)
    -C-----------------------------------------------------------------------
    -C             EVALUATION OF THE FUNCTION X - LN(1 + X)
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION x
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION a,b,h,p0,p1,p2,q1,q2,r,t,w,w1
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dble,dlog
    -C     ..
    -C     .. Data statements ..
    -C------------------------
    -      DATA a/.566749439387324D-01/
    -      DATA b/.456512608815524D-01/
    -      DATA p0/.333333333333333D+00/,p1/-.224696413112536D+00/,
    -     +     p2/.620886815375787D-02/
    -      DATA q1/-.127408923933623D+01/,q2/.354508718369557D+00/
    -C     ..
    -C     .. Executable Statements ..
    -C------------------------
    -      IF (x.LT.-0.39D0 .OR. x.GT.0.57D0) GO TO 40
    -      IF (x.LT.-0.18D0) GO TO 10
    -      IF (x.GT.0.18D0) GO TO 20
    -C
    -C              ARGUMENT REDUCTION
    -C
    -      h = x
    -      w1 = 0.0D0
    -      GO TO 30
    -C
    -   10 h = dble(x) + 0.3D0
    -      h = h/0.7D0
    -      w1 = a - h*0.3D0
    -      GO TO 30
    -C
    -   20 h = 0.75D0*dble(x) - 0.25D0
    -      w1 = b + h/3.0D0
    -C
    -C               SERIES EXPANSION
    -C
    -   30 r = h/ (h+2.0D0)
    -      t = r*r
    -      w = ((p2*t+p1)*t+p0)/ ((q2*t+q1)*t+1.0D0)
    -      rlog1 = 2.0D0*t* (1.0D0/ (1.0D0-r)-r*w) + w1
    -      RETURN
    -C
    -C
    -   40 w = (x+0.5D0) + 0.5D0
    -      rlog1 = x - dlog(w)
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/spmpar.f b/scipy-0.10.1/scipy/special/cdflib/spmpar.f
    deleted file mode 100644
    index 4cfafb1a3b..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/spmpar.f
    +++ /dev/null
    @@ -1,72 +0,0 @@
    -      DOUBLE PRECISION FUNCTION spmpar(i)
    -C-----------------------------------------------------------------------
    -C
    -C     SPMPAR PROVIDES THE SINGLE PRECISION MACHINE CONSTANTS FOR
    -C     THE COMPUTER BEING USED. IT IS ASSUMED THAT THE ARGUMENT
    -C     I IS AN INTEGER HAVING ONE OF THE VALUES 1, 2, OR 3. IF THE
    -C     SINGLE PRECISION ARITHMETIC BEING USED HAS M BASE B DIGITS AND
    -C     ITS SMALLEST AND LARGEST EXPONENTS ARE EMIN AND EMAX, THEN
    -C
    -C        SPMPAR(1) = B**(1 - M), THE MACHINE PRECISION,
    -C
    -C        SPMPAR(2) = B**(EMIN - 1), THE SMALLEST MAGNITUDE,
    -C
    -C        SPMPAR(3) = B**EMAX*(1 - B**(-M)), THE LARGEST MAGNITUDE.
    -C
    -C-----------------------------------------------------------------------
    -C     WRITTEN BY
    -C        ALFRED H. MORRIS, JR.
    -C        NAVAL SURFACE WARFARE CENTER
    -C        DAHLGREN VIRGINIA
    -C-----------------------------------------------------------------------
    -C-----------------------------------------------------------------------
    -C     MODIFIED BY BARRY W. BROWN TO RETURN DOUBLE PRECISION MACHINE
    -C     CONSTANTS FOR THE COMPUTER BEING USED.  THIS MODIFICATION WAS
    -C     MADE AS PART OF CONVERTING BRATIO TO DOUBLE PRECISION
    -C-----------------------------------------------------------------------
    -C     .. Scalar Arguments ..
    -      INTEGER i
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION b,binv,bm1,one,w,z
    -      INTEGER emax,emin,ibeta,m
    -C     ..
    -C     .. External Functions ..
    -      INTEGER ipmpar
    -      EXTERNAL ipmpar
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dble
    -C     ..
    -C     .. Executable Statements ..
    -C
    -      IF (i.GT.1) GO TO 10
    -      b = ipmpar(4)
    -      m = ipmpar(8)
    -      spmpar = b** (1-m)
    -      RETURN
    -C
    -   10 IF (i.GT.2) GO TO 20
    -      b = ipmpar(4)
    -      emin = ipmpar(9)
    -      one = dble(1)
    -      binv = one/b
    -      w = b** (emin+2)
    -      spmpar = ((w*binv)*binv)*binv
    -      RETURN
    -C
    -   20 ibeta = ipmpar(4)
    -      m = ipmpar(8)
    -      emax = ipmpar(10)
    -C
    -      b = ibeta
    -      bm1 = ibeta - 1
    -      one = dble(1)
    -      z = b** (m-1)
    -      w = ((z-one)*b+bm1)/ (b*z)
    -C
    -      z = b** (emax-2)
    -      spmpar = ((w*z)*b)*b
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cdflib/stvaln.f b/scipy-0.10.1/scipy/special/cdflib/stvaln.f
    deleted file mode 100644
    index 51d1526ca2..0000000000
    --- a/scipy-0.10.1/scipy/special/cdflib/stvaln.f
    +++ /dev/null
    @@ -1,67 +0,0 @@
    -      DOUBLE PRECISION FUNCTION stvaln(p)
    -C
    -C**********************************************************************
    -C
    -C     DOUBLE PRECISION FUNCTION STVALN(P)
    -C                    STarting VALue for Neton-Raphon
    -C                calculation of Normal distribution Inverse
    -C
    -C
    -C                              Function
    -C
    -C
    -C     Returns X  such that CUMNOR(X)  =   P,  i.e., the  integral from -
    -C     infinity to X of (1/SQRT(2*PI)) EXP(-U*U/2) dU is P
    -C
    -C
    -C                              Arguments
    -C
    -C
    -C     P --> The probability whose normal deviate is sought.
    -C                    P is DOUBLE PRECISION
    -C
    -C
    -C                              Method
    -C
    -C
    -C     The  rational   function   on  page 95    of Kennedy  and  Gentle,
    -C     Statistical Computing, Marcel Dekker, NY , 1980.
    -C
    -C**********************************************************************
    -C
    -C     .. Scalar Arguments ..
    -      DOUBLE PRECISION p
    -C     ..
    -C     .. Local Scalars ..
    -      DOUBLE PRECISION sign,y,z
    -C     ..
    -C     .. Local Arrays ..
    -      DOUBLE PRECISION xden(5),xnum(5)
    -C     ..
    -C     .. External Functions ..
    -      DOUBLE PRECISION devlpl
    -      EXTERNAL devlpl
    -C     ..
    -C     .. Intrinsic Functions ..
    -      INTRINSIC dble,log,sqrt
    -C     ..
    -C     .. Data statements ..
    -      DATA xnum/-0.322232431088D0,-1.000000000000D0,-0.342242088547D0,
    -     +     -0.204231210245D-1,-0.453642210148D-4/
    -      DATA xden/0.993484626060D-1,0.588581570495D0,0.531103462366D0,
    -     +     0.103537752850D0,0.38560700634D-2/
    -C     ..
    -C     .. Executable Statements ..
    -      IF (.NOT. (p.LE.0.5D0)) GO TO 10
    -      sign = -1.0D0
    -      z = p
    -      GO TO 20
    -
    -   10 sign = 1.0D0
    -      z = 1.0D0 - p
    -   20 y = sqrt(-2.0D0*log(z))
    -      stvaln = y + devlpl(xnum,5,y)/devlpl(xden,5,y)
    -      stvaln = sign*stvaln
    -      RETURN
    -
    -      END
    diff --git a/scipy-0.10.1/scipy/special/cephes.h b/scipy-0.10.1/scipy/special/cephes.h
    deleted file mode 100644
    index 5d89eef4c7..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes.h
    +++ /dev/null
    @@ -1,200 +0,0 @@
    - /*
    - *   This file was automatically generated by version 1.7 of cextract.
    - *   Manual editing not recommended.
    - * 
    - *   Edited for use by cephesmodule.c by Travis Oliphant
    - *
    - *   Created: Fri Mar 31 19:17:33 1995
    - */
    -
    -#ifndef CEPHES_H
    -#define CEPHES_H
    -
    -#include "cephes/cephes_names.h"
    -
    -extern int airy ( double x, double *ai, double *aip, double *bi, double *bip );
    -
    -extern double bdtrc ( int k, int n, double p ); 
    -extern double bdtr ( int k, int n, double p ); 
    -extern double bdtri ( int k, int n, double y ); 
    -
    -extern double beta ( double a, double b );
    -extern double lbeta ( double a, double b );
    -
    -extern double btdtr ( double a, double b, double x ); 
    -
    -extern double cbrt ( double x );
    -/*
    -extern double chbevl ( double x, void *P, int n );
    -*/
    -extern double chdtrc ( double df, double x );
    -extern double chdtr ( double df, double x );
    -extern double chdtri ( double df, double y );
    -extern double dawsn ( double xx );
    -/*
    -extern void eigens ( double A[], double RR[], double E[], int N );
    -*/
    -extern double ellie ( double phi, double m );
    -extern double ellik ( double phi, double m );
    -extern double ellpe ( double x );
    -
    -extern int ellpj ( double u, double m, double *sn, double *cn, double *dn, double *ph );
    -extern double ellpk ( double x );
    -extern double exp10 ( double x );
    -extern double exp1m ( double x );
    -extern double exp2 ( double x );
    -
    -extern double expn ( int n, double x );
    -
    -/*
    -extern double fabs ( double x );
    -extern double fac ( int i );
    -*/
    -
    -extern double fdtrc ( double a, double b, double x ); 
    -extern double fdtr ( double a, double b, double x ); 
    -extern double fdtri ( double a, double b, double y ); 
    -
    -/*
    -extern int fftr ( double x[], int m0, double sine[] );
    -extern double frexp ( double x, int *pw2 );
    -*/
    -/*
    -extern double ldexp ( double x, int pw2 );
    -*/
    -
    -extern int fresnl ( double xxa, double *ssa, double *cca );
    -extern double Gamma ( double x );
    -extern double lgam ( double x );
    -
    -extern double gdtr ( double a, double b, double x ); 
    -extern double gdtrc ( double a, double b, double x ); 
    -extern double gdtri ( double a, double b, double y ); 
    -
    -/*
    -extern int gels ( double A[], double R[], int M, double EPS, double AUX[] );
    -*/
    -extern double hyp2f1 ( double a, double b, double c, double x ); 
    -extern double hyperg ( double a, double b, double x ); 
    -extern double hyp2f0 ( double a, double b, double x, int type, double *err );
    -extern double onef2 ( double a, double b, double c, double x, double *err ); 
    -extern double threef0 ( double a, double b, double c, double x, double *err );
    -
    -
    -extern double i0 ( double x );
    -extern double i0e ( double x );
    -extern double i1 ( double x );
    -extern double i1e ( double x );
    -extern double igamc ( double a, double x );
    -extern double igam ( double a, double x );
    -extern double igami ( double a, double y0 );
    -
    -extern double incbet ( double aa, double bb, double xx ); 
    -extern double incbi ( double aa, double bb, double yy0 ); 
    -
    -extern double iv ( double v, double x );
    -extern double j0 ( double x );
    -extern double y0 ( double x );
    -extern double j1 ( double x );
    -extern double y1 ( double x );
    -
    -extern double jn ( int n, double x );
    -extern double jv ( double n, double x );
    -extern double k0 ( double x );
    -extern double k0e ( double x );
    -extern double k1 ( double x );
    -extern double k1e ( double x );
    -extern double kn ( int nn, double x );
    -/*
    -extern int levnsn ( int n, double r[], double a[], double e[], double refl[] );
    -#ifndef log2
    -extern double log2 ( double x );
    -#endif
    -*/
    -/*
    -extern long lrand ( void );
    -extern long lsqrt ( long x );
    -extern int minv ( double A[], double X[], int n, double B[], int IPS[] );
    -extern int mmmpy ( int r, int c, double *A, double *B, double *Y );
    -extern int mtherr ( char *name, int code );
    -extern double polevl ( double x, void *P, int N );
    -extern double p1evl ( double x, void *P, int N );
    -extern int mtransp ( int n, double *A, double *T );
    -extern int mvmpy ( int r, int c, double *A, double *V, double *Y );
    -*/
    -extern double nbdtrc ( int k, int n, double p ); 
    -extern double nbdtr ( int k, int n, double p ); 
    -extern double nbdtri ( int k, int n, double p ); 
    -
    -extern double ndtr ( double a );
    -extern double erfc ( double a );
    -extern double erf ( double x );
    -extern double ndtri ( double y0 );
    -
    -extern double pdtrc ( int k, double m );
    -extern double pdtr ( int k, double m );
    -extern double pdtri ( int k, double y );
    -/*
    -extern double pow ( double x, double y );
    -extern double powi ( double x, int nn );
    -*/
    -extern double psi ( double x );
    -/* 
    -extern void revers ( double y[], double x[], int n );
    - */
    -extern double rgamma ( double x );
    -extern double round ( double x );
    -
    -/*
    -extern int sprec ( void );
    -extern int dprec ( void );
    -extern int ldprec ( void );
    -*/ 
    -extern int shichi ( double x, double *si, double *ci );
    -extern int sici ( double x, double *si, double *ci );
    -/*
    -extern double simpsn ( double f[], double delta );
    -extern int simq ( double A[], double B[], double X[], int n, int flag, int IPS[] );
    -*/
    -extern double radian ( double d, double m, double s );
    -/*
    -extern int sincos ( double x, double *s, double *c, int flg );
    -*/
    -extern double sindg ( double x ); 
    -extern double cosdg ( double x );
    -/*
    -extern double sinh ( double x );
    -*/
    -extern double spence ( double x );
    -/*
    -extern double sqrt ( double x );
    -*/
    -extern double stdtr ( int k, double t );
    -extern double stdtri ( int k, double p );
    -
    -
    -extern double struve ( double v, double x );
    -extern double yv ( double v, double x); 
    -/*
    -extern double tan ( double x );
    -extern double cot ( double x );
    -*/
    -extern double tandg ( double x );
    -extern double cotdg ( double x );
    -/*
    -extern double tanh ( double x );
    -*/
    -extern double log1p ( double x ); 
    -extern double expm1 ( double x ); 
    -extern double cosm1 ( double x ); 
    -
    -extern double yn ( int n, double x );
    -extern double zeta ( double x, double q );
    -extern double zetac ( double x );
    -
    -extern double smirnov (int n, double e );
    -extern double smirnovi (int n, double p );
    -extern double kolmogorov ( double x );
    -extern double kolmogi ( double p );
    -
    -#endif /* CEPHES_H */
    diff --git a/scipy-0.10.1/scipy/special/cephes/airy.c b/scipy-0.10.1/scipy/special/cephes/airy.c
    deleted file mode 100644
    index c4c1315261..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/airy.c
    +++ /dev/null
    @@ -1,952 +0,0 @@
    -/*							airy.c
    - *
    - *	Airy function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, ai, aip, bi, bip;
    - * int airy();
    - *
    - * airy( x, _&ai, _&aip, _&bi, _&bip );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Solution of the differential equation
    - *
    - *	y"(x) = xy.
    - *
    - * The function returns the two independent solutions Ai, Bi
    - * and their first derivatives Ai'(x), Bi'(x).
    - *
    - * Evaluation is by power series summation for small x,
    - * by rational minimax approximations for large x.
    - *
    - *
    - *
    - * ACCURACY:
    - * Error criterion is absolute when function <= 1, relative
    - * when function > 1, except * denotes relative error criterion.
    - * For large negative x, the absolute error increases as x^1.5.
    - * For large positive x, the relative error increases as x^1.5.
    - *
    - * Arithmetic  domain   function  # trials      peak         rms
    - * IEEE        -10, 0     Ai        10000       1.6e-15     2.7e-16
    - * IEEE          0, 10    Ai        10000       2.3e-14*    1.8e-15*
    - * IEEE        -10, 0     Ai'       10000       4.6e-15     7.6e-16
    - * IEEE          0, 10    Ai'       10000       1.8e-14*    1.5e-15*
    - * IEEE        -10, 10    Bi        30000       4.2e-15     5.3e-16
    - * IEEE        -10, 10    Bi'       30000       4.9e-15     7.3e-16
    - * DEC         -10, 0     Ai         5000       1.7e-16     2.8e-17
    - * DEC           0, 10    Ai         5000       2.1e-15*    1.7e-16*
    - * DEC         -10, 0     Ai'        5000       4.7e-16     7.8e-17
    - * DEC           0, 10    Ai'       12000       1.8e-15*    1.5e-16*
    - * DEC         -10, 10    Bi        10000       5.5e-16     6.8e-17
    - * DEC         -10, 10    Bi'        7000       5.3e-16     8.7e-17
    - *
    - */
    -/*							airy.c */
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -static double c1 = 0.35502805388781723926;
    -static double c2 = 0.258819403792806798405;
    -static double sqrt3 = 1.732050807568877293527;
    -static double sqpii = 5.64189583547756286948E-1;
    -extern double PI;
    -
    -extern double MAXNUM, MACHEP;
    -#ifdef UNK
    -#define MAXAIRY 25.77
    -#endif
    -#ifdef DEC
    -#define MAXAIRY 25.77
    -#endif
    -#ifdef IBMPC
    -#define MAXAIRY 103.892
    -#endif
    -#ifdef MIEEE
    -#define MAXAIRY 103.892
    -#endif
    -
    -
    -#ifdef UNK
    -static double AN[8] = {
    -  3.46538101525629032477E-1,
    -  1.20075952739645805542E1,
    -  7.62796053615234516538E1,
    -  1.68089224934630576269E2,
    -  1.59756391350164413639E2,
    -  7.05360906840444183113E1,
    -  1.40264691163389668864E1,
    -  9.99999999999999995305E-1,
    -};
    -static double AD[8] = {
    -  5.67594532638770212846E-1,
    -  1.47562562584847203173E1,
    -  8.45138970141474626562E1,
    -  1.77318088145400459522E2,
    -  1.64234692871529701831E2,
    -  7.14778400825575695274E1,
    -  1.40959135607834029598E1,
    -  1.00000000000000000470E0,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short AN[32] = {
    -0037661,0066561,0024675,0131301,
    -0041100,0017434,0034324,0101466,
    -0041630,0107450,0067427,0007430,
    -0042050,0013327,0071000,0034737,
    -0042037,0140642,0156417,0167366,
    -0041615,0011172,0075147,0051165,
    -0041140,0066152,0160520,0075146,
    -0040200,0000000,0000000,0000000,
    -};
    -static unsigned short AD[32] = {
    -0040021,0046740,0011422,0064606,
    -0041154,0014640,0024631,0062450,
    -0041651,0003435,0101152,0106401,
    -0042061,0050556,0034605,0136602,
    -0042044,0036024,0152377,0151414,
    -0041616,0172247,0072216,0115374,
    -0041141,0104334,0124154,0166007,
    -0040200,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short AN[32] = {
    -0xb658,0x2537,0x2dae,0x3fd6,
    -0x9067,0x871a,0x03e3,0x4028,
    -0xe1e3,0x0de2,0x11e5,0x4053,
    -0x073c,0xee40,0x02da,0x4065,
    -0xfddf,0x5ba1,0xf834,0x4063,
    -0xea4f,0x4f4c,0xa24f,0x4051,
    -0x0f4d,0x5c2a,0x0d8d,0x402c,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -static unsigned short AD[32] = {
    -0x4d31,0x0262,0x29bc,0x3fe2,
    -0x2ca5,0x0533,0x8334,0x402d,
    -0x51a0,0xb04d,0x20e3,0x4055,
    -0xb7b0,0xc730,0x2a2d,0x4066,
    -0xfa61,0x9a9f,0x8782,0x4064,
    -0xd35f,0xee91,0xde94,0x4051,
    -0x9d81,0x950d,0x311b,0x402c,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short AN[32] = {
    -0x3fd6,0x2dae,0x2537,0xb658,
    -0x4028,0x03e3,0x871a,0x9067,
    -0x4053,0x11e5,0x0de2,0xe1e3,
    -0x4065,0x02da,0xee40,0x073c,
    -0x4063,0xf834,0x5ba1,0xfddf,
    -0x4051,0xa24f,0x4f4c,0xea4f,
    -0x402c,0x0d8d,0x5c2a,0x0f4d,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short AD[32] = {
    -0x3fe2,0x29bc,0x0262,0x4d31,
    -0x402d,0x8334,0x0533,0x2ca5,
    -0x4055,0x20e3,0xb04d,0x51a0,
    -0x4066,0x2a2d,0xc730,0xb7b0,
    -0x4064,0x8782,0x9a9f,0xfa61,
    -0x4051,0xde94,0xee91,0xd35f,
    -0x402c,0x311b,0x950d,0x9d81,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double APN[8] = {
    -  6.13759184814035759225E-1,
    -  1.47454670787755323881E1,
    -  8.20584123476060982430E1,
    -  1.71184781360976385540E2,
    -  1.59317847137141783523E2,
    -  6.99778599330103016170E1,
    -  1.39470856980481566958E1,
    -  1.00000000000000000550E0,
    -};
    -static double APD[8] = {
    -  3.34203677749736953049E-1,
    -  1.11810297306158156705E1,
    -  7.11727352147859965283E1,
    -  1.58778084372838313640E2,
    -  1.53206427475809220834E2,
    -  6.86752304592780337944E1,
    -  1.38498634758259442477E1,
    -  9.99999999999999994502E-1,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short APN[32] = {
    -0040035,0017522,0065145,0054755,
    -0041153,0166556,0161471,0057174,
    -0041644,0016750,0034445,0046462,
    -0042053,0027515,0152316,0046717,
    -0042037,0050536,0067023,0023264,
    -0041613,0172252,0007240,0131055,
    -0041137,0023503,0052472,0002305,
    -0040200,0000000,0000000,0000000,
    -};
    -static unsigned short APD[32] = {
    -0037653,0016276,0112106,0126625,
    -0041062,0162577,0067111,0111761,
    -0041616,0054160,0140004,0137455,
    -0042036,0143460,0104626,0157206,
    -0042031,0032330,0067131,0114260,
    -0041611,0054667,0147207,0134564,
    -0041135,0114412,0070653,0146015,
    -0040200,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short APN[32] = {
    -0xab3e,0x4d4c,0xa3ea,0x3fe3,
    -0x2bcf,0xdc67,0x7dad,0x402d,
    -0xa9a6,0x0724,0x83bd,0x4054,
    -0xc9ba,0xba99,0x65e9,0x4065,
    -0x64d7,0xcdc2,0xea2b,0x4063,
    -0x1646,0x41d4,0x7e95,0x4051,
    -0x4099,0x6aa7,0xe4e8,0x402b,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -static unsigned short APD[32] = {
    -0xd5b3,0xd288,0x6397,0x3fd5,
    -0x327e,0xedc9,0x5caf,0x4026,
    -0x97e6,0x1800,0xcb0e,0x4051,
    -0xdbd1,0x1132,0xd8e6,0x4063,
    -0x3316,0x0dcb,0x269b,0x4063,
    -0xf72f,0xf9d0,0x2b36,0x4051,
    -0x7982,0x4e35,0xb321,0x402b,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short APN[32] = {
    -0x3fe3,0xa3ea,0x4d4c,0xab3e,
    -0x402d,0x7dad,0xdc67,0x2bcf,
    -0x4054,0x83bd,0x0724,0xa9a6,
    -0x4065,0x65e9,0xba99,0xc9ba,
    -0x4063,0xea2b,0xcdc2,0x64d7,
    -0x4051,0x7e95,0x41d4,0x1646,
    -0x402b,0xe4e8,0x6aa7,0x4099,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short APD[32] = {
    -0x3fd5,0x6397,0xd288,0xd5b3,
    -0x4026,0x5caf,0xedc9,0x327e,
    -0x4051,0xcb0e,0x1800,0x97e6,
    -0x4063,0xd8e6,0x1132,0xdbd1,
    -0x4063,0x269b,0x0dcb,0x3316,
    -0x4051,0x2b36,0xf9d0,0xf72f,
    -0x402b,0xb321,0x4e35,0x7982,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double BN16[5] = {
    --2.53240795869364152689E-1,
    - 5.75285167332467384228E-1,
    --3.29907036873225371650E-1,
    - 6.44404068948199951727E-2,
    --3.82519546641336734394E-3,
    -};
    -static double BD16[5] = {
    -/* 1.00000000000000000000E0,*/
    --7.15685095054035237902E0,
    - 1.06039580715664694291E1,
    --5.23246636471251500874E0,
    - 9.57395864378383833152E-1,
    --5.50828147163549611107E-2,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short BN16[20] = {
    -0137601,0124307,0010213,0035210,
    -0040023,0042743,0101621,0016031,
    -0137650,0164623,0036056,0074511,
    -0037203,0174525,0000473,0142474,
    -0136172,0130041,0066726,0064324,
    -};
    -static unsigned short BD16[20] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0140745,0002354,0044335,0055276,
    -0041051,0124717,0170130,0104013,
    -0140647,0070135,0046473,0103501,
    -0040165,0013745,0033324,0127766,
    -0137141,0117204,0076164,0033107,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short BN16[20] = {
    -0x6751,0xe211,0x3518,0xbfd0,
    -0x2383,0x7072,0x68bc,0x3fe2,
    -0xcf29,0x6785,0x1d32,0xbfd5,
    -0x78a8,0xa027,0x7f2a,0x3fb0,
    -0xcd1b,0x2dba,0x5604,0xbf6f,
    -};
    -static unsigned short BD16[20] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xab58,0x891b,0xa09d,0xc01c,
    -0x1101,0xfe0b,0x3539,0x4025,
    -0x70e8,0xa9a7,0xee0b,0xc014,
    -0x95ff,0xa6da,0xa2fc,0x3fee,
    -0x86c9,0x8f8e,0x33d0,0xbfac,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short BN16[20] = {
    -0xbfd0,0x3518,0xe211,0x6751,
    -0x3fe2,0x68bc,0x7072,0x2383,
    -0xbfd5,0x1d32,0x6785,0xcf29,
    -0x3fb0,0x7f2a,0xa027,0x78a8,
    -0xbf6f,0x5604,0x2dba,0xcd1b,
    -};
    -static unsigned short BD16[20] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0xc01c,0xa09d,0x891b,0xab58,
    -0x4025,0x3539,0xfe0b,0x1101,
    -0xc014,0xee0b,0xa9a7,0x70e8,
    -0x3fee,0xa2fc,0xa6da,0x95ff,
    -0xbfac,0x33d0,0x8f8e,0x86c9,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double BPPN[5] = {
    - 4.65461162774651610328E-1,
    --1.08992173800493920734E0,
    - 6.38800117371827987759E-1,
    --1.26844349553102907034E-1,
    - 7.62487844342109852105E-3,
    -};
    -static double BPPD[5] = {
    -/* 1.00000000000000000000E0,*/
    --8.70622787633159124240E0,
    - 1.38993162704553213172E1,
    --7.14116144616431159572E0,
    - 1.34008595960680518666E0,
    --7.84273211323341930448E-2,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short BPPN[20] = {
    -0037756,0050354,0167531,0135731,
    -0140213,0101216,0032767,0020375,
    -0040043,0104147,0106312,0177632,
    -0137401,0161574,0032015,0043714,
    -0036371,0155035,0143165,0142262,
    -};
    -static unsigned short BPPD[20] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0141013,0046265,0115005,0161053,
    -0041136,0061631,0072445,0156131,
    -0140744,0102145,0001127,0065304,
    -0040253,0103757,0146453,0102513,
    -0137240,0117200,0155402,0113500,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short BPPN[20] = {
    -0x377b,0x9deb,0xca1d,0x3fdd,
    -0xe420,0xc6be,0x7051,0xbff1,
    -0x5ff3,0xf199,0x710c,0x3fe4,
    -0xa8fa,0x8681,0x3c6f,0xbfc0,
    -0xb896,0xb8ce,0x3b43,0x3f7f,
    -};
    -static unsigned short BPPD[20] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xbc45,0xb340,0x6996,0xc021,
    -0xbb8b,0x2ea4,0xcc73,0x402b,
    -0xed59,0xa04a,0x908c,0xc01c,
    -0x70a9,0xf9a5,0x70fd,0x3ff5,
    -0x52e8,0x1b60,0x13d0,0xbfb4,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short BPPN[20] = {
    -0x3fdd,0xca1d,0x9deb,0x377b,
    -0xbff1,0x7051,0xc6be,0xe420,
    -0x3fe4,0x710c,0xf199,0x5ff3,
    -0xbfc0,0x3c6f,0x8681,0xa8fa,
    -0x3f7f,0x3b43,0xb8ce,0xb896,
    -};
    -static unsigned short BPPD[20] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0xc021,0x6996,0xb340,0xbc45,
    -0x402b,0xcc73,0x2ea4,0xbb8b,
    -0xc01c,0x908c,0xa04a,0xed59,
    -0x3ff5,0x70fd,0xf9a5,0x70a9,
    -0xbfb4,0x13d0,0x1b60,0x52e8,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double AFN[9] = {
    --1.31696323418331795333E-1,
    --6.26456544431912369773E-1,
    --6.93158036036933542233E-1,
    --2.79779981545119124951E-1,
    --4.91900132609500318020E-2,
    --4.06265923594885404393E-3,
    --1.59276496239262096340E-4,
    --2.77649108155232920844E-6,
    --1.67787698489114633780E-8,
    -};
    -static double AFD[9] = {
    -/* 1.00000000000000000000E0,*/
    - 1.33560420706553243746E1,
    - 3.26825032795224613948E1,
    - 2.67367040941499554804E1,
    - 9.18707402907259625840E0,
    - 1.47529146771666414581E0,
    - 1.15687173795188044134E-1,
    - 4.40291641615211203805E-3,
    - 7.54720348287414296618E-5,
    - 4.51850092970580378464E-7,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short AFN[36] = {
    -0137406,0155546,0124127,0033732,
    -0140040,0057564,0141263,0041222,
    -0140061,0071316,0013674,0175754,
    -0137617,0037522,0056637,0120130,
    -0137111,0075567,0121755,0166122,
    -0136205,0020016,0043317,0002201,
    -0135047,0001565,0075130,0002334,
    -0133472,0051700,0165021,0131551,
    -0131620,0020347,0132165,0013215,
    -};
    -static unsigned short AFD[36] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041125,0131131,0025627,0067623,
    -0041402,0135342,0021703,0154315,
    -0041325,0162305,0016671,0120175,
    -0041022,0177101,0053114,0141632,
    -0040274,0153131,0147364,0114306,
    -0037354,0166545,0120042,0150530,
    -0036220,0043127,0000727,0130273,
    -0034636,0043275,0075667,0034733,
    -0032762,0112715,0146250,0142474,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short AFN[36] = {
    -0xe6fb,0xd50a,0xdb6c,0xbfc0,
    -0x6852,0x9856,0x0bee,0xbfe4,
    -0x9f7d,0xc2f7,0x2e59,0xbfe6,
    -0xf40b,0x4bb3,0xe7ea,0xbfd1,
    -0xbd8a,0xf47d,0x2f6e,0xbfa9,
    -0xe090,0xc8d9,0xa401,0xbf70,
    -0x009c,0xaf4b,0xe06e,0xbf24,
    -0x366d,0x1d42,0x4a78,0xbec7,
    -0xa2d2,0xf68e,0x041c,0xbe52,
    -};
    -static unsigned short AFD[36] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xedf2,0x2572,0xb64b,0x402a,
    -0x7b1a,0x4478,0x575c,0x4040,
    -0x3410,0xa3b7,0xbc98,0x403a,
    -0x9873,0x2ac9,0x5fc8,0x4022,
    -0x9319,0x39de,0x9acb,0x3ff7,
    -0x5a2b,0xb404,0x9dac,0x3fbd,
    -0xf617,0xe03a,0x08ca,0x3f72,
    -0xe73b,0xaf76,0xc8d7,0x3f13,
    -0x18a7,0xb995,0x52b9,0x3e9e,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short AFN[36] = {
    -0xbfc0,0xdb6c,0xd50a,0xe6fb,
    -0xbfe4,0x0bee,0x9856,0x6852,
    -0xbfe6,0x2e59,0xc2f7,0x9f7d,
    -0xbfd1,0xe7ea,0x4bb3,0xf40b,
    -0xbfa9,0x2f6e,0xf47d,0xbd8a,
    -0xbf70,0xa401,0xc8d9,0xe090,
    -0xbf24,0xe06e,0xaf4b,0x009c,
    -0xbec7,0x4a78,0x1d42,0x366d,
    -0xbe52,0x041c,0xf68e,0xa2d2,
    -};
    -static unsigned short AFD[36] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x402a,0xb64b,0x2572,0xedf2,
    -0x4040,0x575c,0x4478,0x7b1a,
    -0x403a,0xbc98,0xa3b7,0x3410,
    -0x4022,0x5fc8,0x2ac9,0x9873,
    -0x3ff7,0x9acb,0x39de,0x9319,
    -0x3fbd,0x9dac,0xb404,0x5a2b,
    -0x3f72,0x08ca,0xe03a,0xf617,
    -0x3f13,0xc8d7,0xaf76,0xe73b,
    -0x3e9e,0x52b9,0xb995,0x18a7,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double AGN[11] = {
    -  1.97339932091685679179E-2,
    -  3.91103029615688277255E-1,
    -  1.06579897599595591108E0,
    -  9.39169229816650230044E-1,
    -  3.51465656105547619242E-1,
    -  6.33888919628925490927E-2,
    -  5.85804113048388458567E-3,
    -  2.82851600836737019778E-4,
    -  6.98793669997260967291E-6,
    -  8.11789239554389293311E-8,
    -  3.41551784765923618484E-10,
    -};
    -static double AGD[10] = {
    -/*  1.00000000000000000000E0,*/
    -  9.30892908077441974853E0,
    -  1.98352928718312140417E1,
    -  1.55646628932864612953E1,
    -  5.47686069422975497931E0,
    -  9.54293611618961883998E-1,
    -  8.64580826352392193095E-2,
    -  4.12656523824222607191E-3,
    -  1.01259085116509135510E-4,
    -  1.17166733214413521882E-6,
    -  4.91834570062930015649E-9,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short AGN[44] = {
    -0036641,0124456,0167175,0157354,
    -0037710,0037250,0001441,0136671,
    -0040210,0066031,0150401,0123532,
    -0040160,0066545,0003570,0153133,
    -0037663,0171516,0072507,0170345,
    -0037201,0151011,0007510,0045702,
    -0036277,0172317,0104572,0101030,
    -0035224,0045663,0000160,0136422,
    -0033752,0074753,0047702,0135160,
    -0032256,0052225,0156550,0107103,
    -0030273,0142443,0166277,0071720,
    -};
    -static unsigned short AGD[40] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041024,0170537,0117253,0055003,
    -0041236,0127256,0003570,0143240,
    -0041171,0004333,0172476,0160645,
    -0040657,0041161,0055716,0157161,
    -0040164,0046226,0006257,0063431,
    -0037261,0010357,0065445,0047563,
    -0036207,0034043,0057434,0116732,
    -0034724,0055416,0130035,0026377,
    -0033235,0041056,0154071,0023502,
    -0031250,0177071,0167254,0047242,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short AGN[44] = {
    -0xbbde,0xddcf,0x3525,0x3f94,
    -0x37b7,0x0064,0x07d5,0x3fd9,
    -0x34eb,0x3a20,0x0d83,0x3ff1,
    -0x1acb,0xa0ef,0x0dac,0x3fee,
    -0xfe1d,0xcea8,0x7e69,0x3fd6,
    -0x0978,0x21e9,0x3a41,0x3fb0,
    -0x5043,0xf12f,0xfe99,0x3f77,
    -0x17a2,0x600e,0x8976,0x3f32,
    -0x574e,0x69f8,0x4f3d,0x3edd,
    -0x11c8,0xbbad,0xca92,0x3e75,
    -0xee7a,0x7d97,0x78a4,0x3df7,
    -};
    -static unsigned short AGD[40] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x6b40,0xf3d5,0x9e2b,0x4022,
    -0x18d4,0xc0ef,0xd5d5,0x4033,
    -0xdc35,0x7ea7,0x211b,0x402f,
    -0xdbce,0x2b79,0xe84e,0x4015,
    -0xece3,0xc195,0x8992,0x3fee,
    -0xa9ee,0xed64,0x221d,0x3fb6,
    -0x93bb,0x6be3,0xe704,0x3f70,
    -0xa5a0,0xd603,0x8b61,0x3f1a,
    -0x24e8,0xdb07,0xa845,0x3eb3,
    -0x89d4,0x3dd5,0x1fc7,0x3e35,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short AGN[44] = {
    -0x3f94,0x3525,0xddcf,0xbbde,
    -0x3fd9,0x07d5,0x0064,0x37b7,
    -0x3ff1,0x0d83,0x3a20,0x34eb,
    -0x3fee,0x0dac,0xa0ef,0x1acb,
    -0x3fd6,0x7e69,0xcea8,0xfe1d,
    -0x3fb0,0x3a41,0x21e9,0x0978,
    -0x3f77,0xfe99,0xf12f,0x5043,
    -0x3f32,0x8976,0x600e,0x17a2,
    -0x3edd,0x4f3d,0x69f8,0x574e,
    -0x3e75,0xca92,0xbbad,0x11c8,
    -0x3df7,0x78a4,0x7d97,0xee7a,
    -};
    -static unsigned short AGD[40] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4022,0x9e2b,0xf3d5,0x6b40,
    -0x4033,0xd5d5,0xc0ef,0x18d4,
    -0x402f,0x211b,0x7ea7,0xdc35,
    -0x4015,0xe84e,0x2b79,0xdbce,
    -0x3fee,0x8992,0xc195,0xece3,
    -0x3fb6,0x221d,0xed64,0xa9ee,
    -0x3f70,0xe704,0x6be3,0x93bb,
    -0x3f1a,0x8b61,0xd603,0xa5a0,
    -0x3eb3,0xa845,0xdb07,0x24e8,
    -0x3e35,0x1fc7,0x3dd5,0x89d4,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double APFN[9] = {
    -  1.85365624022535566142E-1,
    -  8.86712188052584095637E-1,
    -  9.87391981747398547272E-1,
    -  4.01241082318003734092E-1,
    -  7.10304926289631174579E-2,
    -  5.90618657995661810071E-3,
    -  2.33051409401776799569E-4,
    -  4.08718778289035454598E-6,
    -  2.48379932900442457853E-8,
    -};
    -static double APFD[9] = {
    -/*  1.00000000000000000000E0,*/
    -  1.47345854687502542552E1,
    -  3.75423933435489594466E1,
    -  3.14657751203046424330E1,
    -  1.09969125207298778536E1,
    -  1.78885054766999417817E0,
    -  1.41733275753662636873E-1,
    -  5.44066067017226003627E-3,
    -  9.39421290654511171663E-5,
    -  5.65978713036027009243E-7,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short APFN[36] = {
    -0037475,0150174,0071752,0166651,
    -0040142,0177621,0164246,0101757,
    -0040174,0142670,0106760,0006573,
    -0037715,0067570,0116274,0022404,
    -0037221,0074157,0053341,0117207,
    -0036301,0104257,0015075,0004777,
    -0035164,0057502,0164034,0001313,
    -0033611,0022254,0176000,0112565,
    -0031725,0055523,0025153,0166057,
    -};
    -static unsigned short APFD[36] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041153,0140334,0130506,0061402,
    -0041426,0025551,0024440,0070611,
    -0041373,0134750,0047147,0176702,
    -0041057,0171532,0105430,0017674,
    -0040344,0174416,0001726,0047754,
    -0037421,0021207,0020167,0136264,
    -0036262,0043621,0151321,0124324,
    -0034705,0001313,0163733,0016407,
    -0033027,0166702,0150440,0170561,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short APFN[36] = {
    -0x5db5,0x8e7d,0xba0f,0x3fc7,
    -0xd07e,0x3d14,0x5ff2,0x3fec,
    -0x01af,0x11be,0x98b7,0x3fef,
    -0x84a1,0x1397,0xadef,0x3fd9,
    -0x33d1,0xeadc,0x2f0d,0x3fb2,
    -0xa140,0xe347,0x3115,0x3f78,
    -0x8059,0x5d03,0x8be8,0x3f2e,
    -0x12af,0x9f80,0x2495,0x3ed1,
    -0x7d86,0x654d,0xab6a,0x3e5a,
    -};
    -static unsigned short APFD[36] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xcc60,0x9628,0x781b,0x402d,
    -0x0e31,0x2524,0xc56d,0x4042,
    -0xffb8,0x09cc,0x773d,0x403f,
    -0x03f7,0x5163,0xfe6b,0x4025,
    -0xc9fd,0xc07a,0x9f21,0x3ffc,
    -0xf796,0xe40e,0x2450,0x3fc2,
    -0x351a,0x3a5a,0x48f2,0x3f76,
    -0x63a1,0x7cfb,0xa059,0x3f18,
    -0x1e2e,0x5a24,0xfdb8,0x3ea2,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short APFN[36] = {
    -0x3fc7,0xba0f,0x8e7d,0x5db5,
    -0x3fec,0x5ff2,0x3d14,0xd07e,
    -0x3fef,0x98b7,0x11be,0x01af,
    -0x3fd9,0xadef,0x1397,0x84a1,
    -0x3fb2,0x2f0d,0xeadc,0x33d1,
    -0x3f78,0x3115,0xe347,0xa140,
    -0x3f2e,0x8be8,0x5d03,0x8059,
    -0x3ed1,0x2495,0x9f80,0x12af,
    -0x3e5a,0xab6a,0x654d,0x7d86,
    -};
    -static unsigned short APFD[36] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x402d,0x781b,0x9628,0xcc60,
    -0x4042,0xc56d,0x2524,0x0e31,
    -0x403f,0x773d,0x09cc,0xffb8,
    -0x4025,0xfe6b,0x5163,0x03f7,
    -0x3ffc,0x9f21,0xc07a,0xc9fd,
    -0x3fc2,0x2450,0xe40e,0xf796,
    -0x3f76,0x48f2,0x3a5a,0x351a,
    -0x3f18,0xa059,0x7cfb,0x63a1,
    -0x3ea2,0xfdb8,0x5a24,0x1e2e,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double APGN[11] = {
    --3.55615429033082288335E-2,
    --6.37311518129435504426E-1,
    --1.70856738884312371053E0,
    --1.50221872117316635393E0,
    --5.63606665822102676611E-1,
    --1.02101031120216891789E-1,
    --9.48396695961445269093E-3,
    --4.60325307486780994357E-4,
    --1.14300836484517375919E-5,
    --1.33415518685547420648E-7,
    --5.63803833958893494476E-10,
    -};
    -static double APGD[11] = {
    -/*  1.00000000000000000000E0,*/
    -  9.85865801696130355144E0,
    -  2.16401867356585941885E1,
    -  1.73130776389749389525E1,
    -  6.17872175280828766327E0,
    -  1.08848694396321495475E0,
    -  9.95005543440888479402E-2,
    -  4.78468199683886610842E-3,
    -  1.18159633322838625562E-4,
    -  1.37480673554219441465E-6,
    -  5.79912514929147598821E-9,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short APGN[44] = {
    -0137021,0124372,0176075,0075331,
    -0140043,0023330,0177672,0161655,
    -0140332,0131126,0010413,0171112,
    -0140300,0044263,0175560,0054070,
    -0140020,0044206,0142603,0073324,
    -0137321,0015130,0066144,0144033,
    -0136433,0061243,0175542,0103373,
    -0135361,0053721,0020441,0053203,
    -0134077,0141725,0160277,0130612,
    -0132417,0040372,0100363,0060200,
    -0130432,0175052,0171064,0034147,
    -};
    -static unsigned short APGD[40] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041035,0136420,0030124,0140220,
    -0041255,0017432,0034447,0162256,
    -0041212,0100456,0154544,0006321,
    -0040705,0134026,0127154,0123414,
    -0040213,0051612,0044470,0172607,
    -0037313,0143362,0053273,0157051,
    -0036234,0144322,0054536,0007264,
    -0034767,0146170,0054265,0170342,
    -0033270,0102777,0167362,0073631,
    -0031307,0040644,0167103,0021763,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short APGN[44] = {
    -0xaf5b,0x5f87,0x351f,0xbfa2,
    -0x5c76,0x1ff7,0x64db,0xbfe4,
    -0x7e49,0xc221,0x564a,0xbffb,
    -0x0b07,0x7f6e,0x0916,0xbff8,
    -0x6edb,0xd8b0,0x0910,0xbfe2,
    -0x9903,0x0d8c,0x234b,0xbfba,
    -0x50df,0x7f6c,0x6c54,0xbf83,
    -0x2ad0,0x2424,0x2afa,0xbf3e,
    -0xf631,0xbc17,0xf87a,0xbee7,
    -0x6c10,0x501e,0xe81f,0xbe81,
    -0x870d,0x5e46,0x5f45,0xbe03,
    -};
    -static unsigned short APGD[40] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x9812,0x060a,0xb7a2,0x4023,
    -0xfc96,0x4724,0xa3e3,0x4035,
    -0x819a,0xdb2c,0x5025,0x4031,
    -0x94e2,0xd5cd,0xb702,0x4018,
    -0x1eb1,0x4927,0x6a71,0x3ff1,
    -0x7bc5,0x4ad7,0x78de,0x3fb9,
    -0xc1d7,0x4b2b,0x991a,0x3f73,
    -0xbe1c,0x0b16,0xf98f,0x3f1e,
    -0x4ef3,0xfdde,0x10bf,0x3eb7,
    -0x647e,0x9dc8,0xe834,0x3e38,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short APGN[44] = {
    -0xbfa2,0x351f,0x5f87,0xaf5b,
    -0xbfe4,0x64db,0x1ff7,0x5c76,
    -0xbffb,0x564a,0xc221,0x7e49,
    -0xbff8,0x0916,0x7f6e,0x0b07,
    -0xbfe2,0x0910,0xd8b0,0x6edb,
    -0xbfba,0x234b,0x0d8c,0x9903,
    -0xbf83,0x6c54,0x7f6c,0x50df,
    -0xbf3e,0x2afa,0x2424,0x2ad0,
    -0xbee7,0xf87a,0xbc17,0xf631,
    -0xbe81,0xe81f,0x501e,0x6c10,
    -0xbe03,0x5f45,0x5e46,0x870d,
    -};
    -static unsigned short APGD[40] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4023,0xb7a2,0x060a,0x9812,
    -0x4035,0xa3e3,0x4724,0xfc96,
    -0x4031,0x5025,0xdb2c,0x819a,
    -0x4018,0xb702,0xd5cd,0x94e2,
    -0x3ff1,0x6a71,0x4927,0x1eb1,
    -0x3fb9,0x78de,0x4ad7,0x7bc5,
    -0x3f73,0x991a,0x4b2b,0xc1d7,
    -0x3f1e,0xf98f,0x0b16,0xbe1c,
    -0x3eb7,0x10bf,0xfdde,0x4ef3,
    -0x3e38,0xe834,0x9dc8,0x647e,
    -};
    -#endif
    -
    -int airy( x, ai, aip, bi, bip )
    -double x, *ai, *aip, *bi, *bip;
    -{
    -double z, zz, t, f, g, uf, ug, k, zeta, theta;
    -int domflg;
    -
    -domflg = 0;
    -if( x > MAXAIRY )
    -	{
    -	*ai = 0;
    -	*aip = 0;
    -	*bi = MAXNUM;
    -	*bip = MAXNUM;
    -	return(-1);
    -	}
    -
    -if( x < -2.09 )
    -	{
    -	domflg = 15;
    -	t = sqrt(-x);
    -	zeta = -2.0 * x * t / 3.0;
    -	t = sqrt(t);
    -	k = sqpii / t;
    -	z = 1.0/zeta;
    -	zz = z * z;
    -	uf = 1.0 + zz * polevl( zz, AFN, 8 ) / p1evl( zz, AFD, 9 );
    -	ug = z * polevl( zz, AGN, 10 ) / p1evl( zz, AGD, 10 );
    -	theta = zeta + 0.25 * PI;
    -	f = sin( theta );
    -	g = cos( theta );
    -	*ai = k * (f * uf - g * ug);
    -	*bi = k * (g * uf + f * ug);
    -	uf = 1.0 + zz * polevl( zz, APFN, 8 ) / p1evl( zz, APFD, 9 );
    -	ug = z * polevl( zz, APGN, 10 ) / p1evl( zz, APGD, 10 );
    -	k = sqpii * t;
    -	*aip = -k * (g * uf + f * ug);
    -	*bip = k * (f * uf - g * ug);
    -	return(0);
    -	}
    -
    -if( x >= 2.09 )	/* cbrt(9) */
    -	{
    -	domflg = 5;
    -	t = sqrt(x);
    -	zeta = 2.0 * x * t / 3.0;
    -	g = exp( zeta );
    -	t = sqrt(t);
    -	k = 2.0 * t * g;
    -	z = 1.0/zeta;
    -	f = polevl( z, AN, 7 ) / polevl( z, AD, 7 );
    -	*ai = sqpii * f / k;
    -	k = -0.5 * sqpii * t / g;
    -	f = polevl( z, APN, 7 ) / polevl( z, APD, 7 );
    -	*aip = f * k;
    -
    -	if( x > 8.3203353 )	/* zeta > 16 */
    -		{
    -		f = z * polevl( z, BN16, 4 ) / p1evl( z, BD16, 5 );
    -		k = sqpii * g;
    -		*bi = k * (1.0 + f) / t;
    -		f = z * polevl( z, BPPN, 4 ) / p1evl( z, BPPD, 5 );
    -		*bip = k * t * (1.0 + f);
    -		return(0);
    -		}
    -	}
    -
    -f = 1.0;
    -g = x;
    -t = 1.0;
    -uf = 1.0;
    -ug = x;
    -k = 1.0;
    -z = x * x * x;
    -while( t > MACHEP )
    -	{
    -	uf *= z;
    -	k += 1.0;
    -	uf /=k;
    -	ug *= z;
    -	k += 1.0;
    -	ug /=k;
    -	uf /=k;
    -	f += uf;
    -	k += 1.0;
    -	ug /=k;
    -	g += ug;
    -	t = fabs(uf/f);
    -	}
    -uf = c1 * f;
    -ug = c2 * g;
    -if( (domflg & 1) == 0 )
    -	*ai = uf - ug;
    -if( (domflg & 2) == 0 )
    -	*bi = sqrt3 * (uf + ug);
    -
    -/* the deriviative of ai */
    -k = 4.0;
    -uf = x * x/2.0;
    -ug = z/3.0;
    -f = uf;
    -g = 1.0 + ug;
    -uf /= 3.0;
    -t = 1.0;
    -
    -while( t > MACHEP )
    -	{
    -	uf *= z;
    -	ug /=k;
    -	k += 1.0;
    -	ug *= z;
    -	uf /=k;
    -	f += uf;
    -	k += 1.0;
    -	ug /=k;
    -	uf /=k;
    -	g += ug;
    -	k += 1.0;
    -	t = fabs(ug/g);
    -	}
    -
    -uf = c1 * f;
    -ug = c2 * g;
    -if( (domflg & 4) == 0 )
    -	*aip = uf - ug;
    -if( (domflg & 8) == 0 )
    -	*bip = sqrt3 * (uf + ug);
    -return(0);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/bdtr.c b/scipy-0.10.1/scipy/special/cephes/bdtr.c
    deleted file mode 100644
    index aa06ce0f82..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/bdtr.c
    +++ /dev/null
    @@ -1,254 +0,0 @@
    -/*							bdtr.c
    - *
    - *	Binomial distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int k, n;
    - * double p, y, bdtr();
    - *
    - * y = bdtr( k, n, p );
    - *
    - * DESCRIPTION:
    - *
    - * Returns the sum of the terms 0 through k of the Binomial
    - * probability density:
    - *
    - *   k
    - *   --  ( n )   j      n-j
    - *   >   (   )  p  (1-p)
    - *   --  ( j )
    - *  j=0
    - *
    - * The terms are not summed directly; instead the incomplete
    - * beta integral is employed, according to the formula
    - *
    - * y = bdtr( k, n, p ) = incbet( n-k, k+1, 1-p ).
    - *
    - * The arguments must be positive, with p ranging from 0 to 1.
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a,b,p), with p between 0 and 1.
    - *
    - *               a,b                     Relative error:
    - * arithmetic  domain     # trials      peak         rms
    - *  For p between 0.001 and 1:
    - *    IEEE     0,100       100000      4.3e-15     2.6e-16
    - * See also incbet.c.
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * bdtr domain         k < 0            0.0
    - *                     n < k
    - *                     x < 0, x > 1
    - */
    -/*							bdtrc()
    - *
    - *	Complemented binomial distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int k, n;
    - * double p, y, bdtrc();
    - *
    - * y = bdtrc( k, n, p );
    - *
    - * DESCRIPTION:
    - *
    - * Returns the sum of the terms k+1 through n of the Binomial
    - * probability density:
    - *
    - *   n
    - *   --  ( n )   j      n-j
    - *   >   (   )  p  (1-p)
    - *   --  ( j )
    - *  j=k+1
    - *
    - * The terms are not summed directly; instead the incomplete
    - * beta integral is employed, according to the formula
    - *
    - * y = bdtrc( k, n, p ) = incbet( k+1, n-k, p ).
    - *
    - * The arguments must be positive, with p ranging from 0 to 1.
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a,b,p).
    - *
    - *               a,b                     Relative error:
    - * arithmetic  domain     # trials      peak         rms
    - *  For p between 0.001 and 1:
    - *    IEEE     0,100       100000      6.7e-15     8.2e-16
    - *  For p between 0 and .001:
    - *    IEEE     0,100       100000      1.5e-13     2.7e-15
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * bdtrc domain      x<0, x>1, n 1
    - */
    -
    -/*								bdtr() */
    -
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1987, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -double bdtrc( k, n, p )
    -int k, n;
    -double p;
    -{
    -double dk, dn;
    -
    -if( (p < 0.0) || (p > 1.0) )
    -	goto domerr;
    -if( k < 0 )
    -	return( 1.0 );
    -
    -if( n < k )
    -	{
    -domerr:
    -	mtherr( "bdtrc", DOMAIN );
    -	return( NPY_NAN);
    -	}
    -
    -if( k == n )
    -	return( 0.0 );
    -dn = n - k;
    -if( k == 0 )
    -	{
    -	if( p < .01 )
    -		dk = -expm1( dn * log1p(-p) );
    -	else
    -		dk = 1.0 - pow( 1.0-p, dn );
    -	}
    -else
    -	{
    -	dk = k + 1;
    -	dk = incbet( dk, dn, p );
    -	}
    -return( dk );
    -}
    -
    -
    -
    -double bdtr( k, n, p )
    -int k, n;
    -double p;
    -{
    -double dk, dn;
    -
    -if( (p < 0.0) || (p > 1.0) )
    -	goto domerr;
    -if( (k < 0) || (n < k) )
    -	{
    -domerr:
    -	mtherr( "bdtr", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -
    -if( k == n )
    -	return( 1.0 );
    -
    -dn = n - k;
    -if( k == 0 )
    -	{
    -	dk = pow( 1.0-p, dn );
    -	}
    -else
    -	{
    -	dk = k + 1;
    -	dk = incbet( dn, dk, 1.0 - p );
    -	}
    -return( dk );
    -}
    -
    -
    -double bdtri( k, n, y )
    -int k, n;
    -double y;
    -{
    -double dk, dn, p;
    -
    -if( (y < 0.0) || (y > 1.0) )
    -	goto domerr;
    -if( (k < 0) || (n <= k) )
    -	{
    -domerr:
    -	mtherr( "bdtri", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -
    -dn = n - k;
    -if( k == 0 )
    -	{
    -	if( y > 0.8 )
    -		p = -expm1( log1p(y-1.0) / dn );
    -	else
    -		p = 1.0 - pow( y, 1.0/dn );
    -	}
    -else
    -	{
    -	dk = k + 1;
    -	p = incbet( dn, dk, 0.5 );
    -	if( p > 0.5 )
    -		p = incbi( dk, dn, 1.0-y );
    -	else
    -		p = 1.0 - incbi( dn, dk, y );
    -	}
    -return( p );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/beta.c b/scipy-0.10.1/scipy/special/cephes/beta.c
    deleted file mode 100644
    index 0b059ce1aa..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/beta.c
    +++ /dev/null
    @@ -1,191 +0,0 @@
    -/*							beta.c
    - *
    - *	Beta function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, b, y, beta();
    - *
    - * y = beta( a, b );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - *                   -     -
    - *                  | (a) | (b)
    - * beta( a, b )  =  -----------.
    - *                     -
    - *                    | (a+b)
    - *
    - * For large arguments the logarithm of the function is
    - * evaluated using lgam(), then exponentiated.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC        0,30        1700       7.7e-15     1.5e-15
    - *    IEEE       0,30       30000       8.1e-14     1.1e-14
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition          value returned
    - * beta overflow    log(beta) > MAXLOG       0.0
    - *                  a or b <0 integer        0.0
    - *
    - */
    -
    -/*							beta.c	*/
    -
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -#define MAXGAM 34.84425627277176174
    -#endif
    -#ifdef DEC
    -#define MAXGAM 34.84425627277176174
    -#endif
    -#ifdef IBMPC
    -#define MAXGAM 171.624376956302725
    -#endif
    -#ifdef MIEEE
    -#define MAXGAM 171.624376956302725
    -#endif
    -
    -extern double MAXLOG, MAXNUM;
    -extern int sgngam;
    -
    -double beta( a, b )
    -double a, b;
    -{
    -double y;
    -int sign;
    -
    -sign = 1;
    -
    -if( a <= 0.0 )
    -	{
    -	if( a == floor(a) )
    -		goto over;
    -	}
    -if( b <= 0.0 )
    -	{
    -	if( b == floor(b) )
    -		goto over;
    -	}
    -
    -
    -y = a + b;
    -if( fabs(y) > MAXGAM )
    -	{
    -	y = lgam(y);
    -	sign *= sgngam; /* keep track of the sign */
    -	y = lgam(b) - y;
    -	sign *= sgngam;
    -	y = lgam(a) + y;
    -	sign *= sgngam;
    -	if( y > MAXLOG )
    -		{
    -over:
    -		mtherr( "beta", OVERFLOW );
    -		return( sign * MAXNUM );
    -		}
    -	return( sign * exp(y) );
    -	}
    -
    -y = Gamma(y);
    -if( y == 0.0 )
    -	goto over;
    -
    -if( a > b )
    -	{
    -	y = Gamma(a)/y;
    -	y *= Gamma(b);
    -	}
    -else
    -	{
    -	y = Gamma(b)/y;
    -	y *= Gamma(a);
    -	}
    -
    -return(y);
    -}
    -
    -
    -
    -/* Natural log of |beta|.  Return the sign of beta in sgngam.  */
    -
    -double lbeta( a, b )
    -double a, b;
    -{
    -double y;
    -int sign;
    -
    -sign = 1;
    -
    -if( a <= 0.0 )
    -	{
    -	if( a == floor(a) )
    -		goto over;
    -	}
    -if( b <= 0.0 )
    -	{
    -	if( b == floor(b) )
    -		goto over;
    -	}
    -
    -
    -y = a + b;
    -if( fabs(y) > MAXGAM )
    -	{
    -	y = lgam(y);
    -	sign *= sgngam; /* keep track of the sign */
    -	y = lgam(b) - y;
    -	sign *= sgngam;
    -	y = lgam(a) + y;
    -	sign *= sgngam;
    -	sgngam = sign;
    -	return( y );
    -	}
    -
    -y = Gamma(y);
    -if( y == 0.0 )
    -	{
    -over:
    -	mtherr( "lbeta", OVERFLOW );
    -	return( sign * MAXNUM );
    -	}
    -
    -if( a > b )
    -	{
    -	y = Gamma(a)/y;
    -	y *= Gamma(b);
    -	}
    -else
    -	{
    -	y = Gamma(b)/y;
    -	y *= Gamma(a);
    -	}
    -
    -if( y < 0 )
    -  {
    -    sgngam = -1;
    -    y = -y;
    -  }
    -else
    -  sgngam = 1;
    -
    -return( log(y) );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/btdtr.c b/scipy-0.10.1/scipy/special/cephes/btdtr.c
    deleted file mode 100644
    index c44ef16486..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/btdtr.c
    +++ /dev/null
    @@ -1,60 +0,0 @@
    -
    -/*							btdtr.c
    - *
    - *	Beta distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, b, x, y, btdtr();
    - *
    - * y = btdtr( a, b, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the area from zero to x under the beta density
    - * function:
    - *
    - *
    - *                          x
    - *            -             -
    - *           | (a+b)       | |  a-1      b-1
    - * P(x)  =  ----------     |   t    (1-t)    dt
    - *           -     -     | |
    - *          | (a) | (b)   -
    - *                         0
    - *
    - *
    - * This function is identical to the incomplete beta
    - * integral function incbet(a, b, x).
    - *
    - * The complemented function is
    - *
    - * 1 - P(1-x)  =  incbet( b, a, x );
    - *
    - *
    - * ACCURACY:
    - *
    - * See incbet.c.
    - *
    - */
    -
    -/*								btdtr()	*/
    -
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -double btdtr( a, b, x )
    -double a, b, x;
    -{
    -
    -return( incbet( a, b, x ) );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/cbrt.c b/scipy-0.10.1/scipy/special/cephes/cbrt.c
    deleted file mode 100644
    index c1c8b3540f..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/cbrt.c
    +++ /dev/null
    @@ -1,126 +0,0 @@
    -/*							cbrt.c
    - *
    - *	Cube root
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, cbrt();
    - *
    - * y = cbrt( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the cube root of the argument, which may be negative.
    - *
    - * Range reduction involves determining the power of 2 of
    - * the argument.  A polynomial of degree 2 applied to the
    - * mantissa, and multiplication by the cube root of 1, 2, or 4
    - * approximates the root to within about 0.1%.  Then Newton's
    - * iteration is used three times to converge to an accurate
    - * result.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC        -10,10     200000      1.8e-17     6.2e-18
    - *    IEEE       0,1e308     30000      1.5e-16     5.0e-17
    - *
    - */
    -/*							cbrt.c  */
    -
    -/*
    -Cephes Math Library Release 2.2:  January, 1991
    -Copyright 1984, 1991 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -
    -#include "mconf.h"
    -
    -static double CBRT2  = 1.2599210498948731647672;
    -static double CBRT4  = 1.5874010519681994747517;
    -static double CBRT2I = 0.79370052598409973737585;
    -static double CBRT4I = 0.62996052494743658238361;
    -
    -double cbrt(double x)
    -{
    -int e, rem, sign;
    -double z;
    -
    -if( !npy_isfinite(x) )
    -  return x;
    -if( x == 0 )
    -	return( x );
    -if( x > 0 )
    -	sign = 1;
    -else
    -	{
    -	sign = -1;
    -	x = -x;
    -	}
    -
    -z = x;
    -/* extract power of 2, leaving
    - * mantissa between 0.5 and 1
    - */
    -x = frexp( x, &e );
    -
    -/* Approximate cube root of number between .5 and 1,
    - * peak relative error = 9.2e-6
    - */
    -x = (((-1.3466110473359520655053e-1  * x
    -      + 5.4664601366395524503440e-1) * x
    -      - 9.5438224771509446525043e-1) * x
    -      + 1.1399983354717293273738e0 ) * x
    -      + 4.0238979564544752126924e-1;
    -
    -/* exponent divided by 3 */
    -if( e >= 0 )
    -	{
    -	rem = e;
    -	e /= 3;
    -	rem -= 3*e;
    -	if( rem == 1 )
    -		x *= CBRT2;
    -	else if( rem == 2 )
    -		x *= CBRT4;
    -	}
    -
    -
    -/* argument less than 1 */
    -
    -else
    -	{
    -	e = -e;
    -	rem = e;
    -	e /= 3;
    -	rem -= 3*e;
    -	if( rem == 1 )
    -		x *= CBRT2I;
    -	else if( rem == 2 )
    -		x *= CBRT4I;
    -	e = -e;
    -	}
    -
    -/* multiply by power of 2 */
    -x = ldexp( x, e );
    -
    -/* Newton iteration */
    -x -= ( x - (z/(x*x)) )*0.33333333333333333333;
    -#ifdef DEC
    -x -= ( x - (z/(x*x)) )/3.0;
    -#else
    -x -= ( x - (z/(x*x)) )*0.33333333333333333333;
    -#endif
    -
    -if( sign < 0 )
    -	x = -x;
    -return(x);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/cephes_names.h b/scipy-0.10.1/scipy/special/cephes/cephes_names.h
    deleted file mode 100644
    index b8e99809bb..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/cephes_names.h
    +++ /dev/null
    @@ -1,98 +0,0 @@
    -#ifndef CEPHES_NAMES_H
    -#define CEPHES_NAMES_H
    -
    -#define airy cephes_airy
    -#define bdtrc cephes_bdtrc
    -#define bdtr cephes_bdtr
    -#define bdtri cephes_bdtri
    -#define beta cephes_beta
    -#define lbeta cephes_lbeta
    -#define btdtr cephes_btdtr
    -#define cbrt cephes_cbrt
    -#define chdtrc cephes_chdtrc
    -#define chdtr cephes_chdtr
    -#define chdtri cephes_chdtri
    -#define dawsn cephes_dawsn
    -#define ellie cephes_ellie
    -#define ellik cephes_ellik
    -#define ellpe cephes_ellpe
    -#define ellpj cephes_ellpj
    -#define ellpk cephes_ellpk
    -#define exp10 cephes_exp10
    -#define exp1m cephes_exp1m
    -#define exp2 cephes_exp2
    -#define expn cephes_expn
    -#define fabs cephes_fabs
    -#define fdtrc cephes_fdtrc
    -#define fdtr cephes_fdtr
    -#define fdtri cephes_fdtri
    -#define fresnl cephes_fresnl
    -#define Gamma cephes_Gamma
    -#define lgam cephes_lgam
    -#define gdtr cephes_gdtr
    -#define gdtrc cephes_gdtrc
    -#define gdtri cephes_gdtri
    -#define hyp2f1 cephes_hyp2f1
    -#define hyperg cephes_hyperg
    -#define hyp2f0 cephes_hyp2f0
    -#define onef2 cephes_onef2
    -#define threef0 cephes_threef0
    -#define i0 cephes_i0
    -#define i0e cephes_i0e
    -#define i1 cephes_i1
    -#define i1e cephes_i1e
    -#define igamc cephes_igamc
    -#define igam cephes_igam
    -#define igami cephes_igami
    -#define incbet cephes_incbet
    -#define incbi cephes_incbi
    -#define iv cephes_iv
    -#define j0 cephes_j0
    -#define y0 cephes_y0
    -#define j1 cephes_j1
    -#define y1 cephes_y1
    -#define jn cephes_jn
    -#define jv cephes_jv
    -#define k0 cephes_k0
    -#define k0e cephes_k0e
    -#define k1 cephes_k1
    -#define k1e cephes_k1e
    -#define kn cephes_kn
    -#define nbdtrc cephes_nbdtrc
    -#define nbdtr cephes_nbdtr
    -#define nbdtri cephes_nbdtri
    -#define ndtr cephes_ndtr
    -#define erfc cephes_erfc
    -#define erf cephes_erf
    -#define ndtri cephes_ndtri
    -#define pdtrc cephes_pdtrc
    -#define pdtr cephes_pdtr
    -#define pdtri cephes_pdtri
    -#define psi cephes_psi
    -#define rgamma cephes_rgamma
    -#define round cephes_round
    -#define shichi cephes_shichi
    -#define sici cephes_sici
    -#define radian cephes_radian
    -#define sindg cephes_sindg
    -#define cosdg cephes_cosdg
    -#define sincos cephes_sincos
    -#define spence cephes_spence
    -#define stdtr cephes_stdtr
    -#define stdtri cephes_stdtri
    -#define struve cephes_struve
    -#define yv cephes_yv
    -#define tandg cephes_tandg
    -#define cotdg cephes_cotdg
    -#define log1p cephes_log1p
    -#define expm1 cephes_expm1
    -#define cosm1 cephes_cosm1
    -#define yn cephes_yn
    -#define zeta cephes_zeta
    -#define zetac cephes_zetac
    -#define smirnov cephes_smirnov
    -#define smirnovi cephes_smirnovi
    -#define kolmogorov cephes_kolmogorov
    -#define kolmogi cephes_kolmogi
    -
    -#endif
    diff --git a/scipy-0.10.1/scipy/special/cephes/chbevl.c b/scipy-0.10.1/scipy/special/cephes/chbevl.c
    deleted file mode 100644
    index c383246322..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/chbevl.c
    +++ /dev/null
    @@ -1,85 +0,0 @@
    -/*							chbevl.c
    - *
    - *	Evaluate Chebyshev series
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int N;
    - * double x, y, coef[N], chebevl();
    - *
    - * y = chbevl( x, coef, N );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Evaluates the series
    - *
    - *        N-1
    - *         - '
    - *  y  =   >   coef[i] T (x/2)
    - *         -            i
    - *        i=0
    - *
    - * of Chebyshev polynomials Ti at argument x/2.
    - *
    - * Coefficients are stored in reverse order, i.e. the zero
    - * order term is last in the array.  Note N is the number of
    - * coefficients, not the order.
    - *
    - * If coefficients are for the interval a to b, x must
    - * have been transformed to x -> 2(2x - b - a)/(b-a) before
    - * entering the routine.  This maps x from (a, b) to (-1, 1),
    - * over which the Chebyshev polynomials are defined.
    - *
    - * If the coefficients are for the inverted interval, in
    - * which (a, b) is mapped to (1/b, 1/a), the transformation
    - * required is x -> 2(2ab/x - b - a)/(b-a).  If b is infinity,
    - * this becomes x -> 4a/x - 1.
    - *
    - *
    - *
    - * SPEED:
    - *
    - * Taking advantage of the recurrence properties of the
    - * Chebyshev polynomials, the routine requires one more
    - * addition per loop than evaluating a nested polynomial of
    - * the same degree.
    - *
    - */
    -/*							chbevl.c	*/
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1985, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include 
    -#include "protos.h"
    -
    -double chbevl( x, array, n )
    -double x;
    -double array[];
    -int n;
    -{
    -double b0, b1, b2, *p;
    -int i;
    -
    -p = array;
    -b0 = *p++;
    -b1 = 0.0;
    -i = n - 1;
    -
    -do
    -	{
    -	b2 = b1;
    -	b1 = b0;
    -	b0 = x * b1  -  b2  + *p++;
    -	}
    -while( --i );
    -
    -return( 0.5*(b0-b2) );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/chdtr.c b/scipy-0.10.1/scipy/special/cephes/chdtr.c
    deleted file mode 100644
    index 704d8673fa..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/chdtr.c
    +++ /dev/null
    @@ -1,190 +0,0 @@
    -/*							chdtr.c
    - *
    - *	Chi-square distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double df, x, y, chdtr();
    - *
    - * y = chdtr( df, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the area under the left hand tail (from 0 to x)
    - * of the Chi square probability density function with
    - * v degrees of freedom.
    - *
    - *
    - *                                  inf.
    - *                                    -
    - *                        1          | |  v/2-1  -t/2
    - *  P( x | v )   =   -----------     |   t      e     dt
    - *                    v/2  -       | |
    - *                   2    | (v/2)   -
    - *                                   x
    - *
    - * where x is the Chi-square variable.
    - *
    - * The incomplete Gamma integral is used, according to the
    - * formula
    - *
    - *	y = chdtr( v, x ) = igam( v/2.0, x/2.0 ).
    - *
    - *
    - * The arguments must both be positive.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * See igam().
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * chdtr domain   x < 0 or v < 1        0.0
    - */
    -/*							chdtrc()
    - *
    - *	Complemented Chi-square distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double v, x, y, chdtrc();
    - *
    - * y = chdtrc( v, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the area under the right hand tail (from x to
    - * infinity) of the Chi square probability density function
    - * with v degrees of freedom:
    - *
    - *
    - *                                  inf.
    - *                                    -
    - *                        1          | |  v/2-1  -t/2
    - *  P( x | v )   =   -----------     |   t      e     dt
    - *                    v/2  -       | |
    - *                   2    | (v/2)   -
    - *                                   x
    - *
    - * where x is the Chi-square variable.
    - *
    - * The incomplete Gamma integral is used, according to the
    - * formula
    - *
    - *	y = chdtr( v, x ) = igamc( v/2.0, x/2.0 ).
    - *
    - *
    - * The arguments must both be positive.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * See igamc().
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * chdtrc domain  x < 0 or v < 1        0.0
    - */
    -/*							chdtri()
    - *
    - *	Inverse of complemented Chi-square distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double df, x, y, chdtri();
    - *
    - * x = chdtri( df, y );
    - *
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Finds the Chi-square argument x such that the integral
    - * from x to infinity of the Chi-square density is equal
    - * to the given cumulative probability y.
    - *
    - * This is accomplished using the inverse Gamma integral
    - * function and the relation
    - *
    - *    x/2 = igami( df/2, y );
    - *
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * See igami.c.
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * chdtri domain   y < 0 or y > 1        0.0
    - *                     v < 1
    - *
    - */
    -
    -/*								chdtr() */
    -
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -
    -double chdtrc(df,x)
    -double df, x;
    -{
    -
    -if (x < 0.0) return 1.0;   /* modified by T. Oliphant */
    -return( igamc( df/2.0, x/2.0 ) );
    -}
    -
    -
    -
    -double chdtr(df,x)
    -double df, x;
    -{
    -
    -if( (x < 0.0))  /* || (df < 1.0) ) */
    -	{
    -	mtherr( "chdtr", DOMAIN );
    -	return(NPY_NAN);
    -	}
    -return( igam( df/2.0, x/2.0 ) );
    -}
    -
    -
    -
    -double chdtri( df, y )
    -double df, y;
    -{
    -double x;
    -
    -if( (y < 0.0) || (y > 1.0)) /* || (df < 1.0) ) */
    -	{
    -	mtherr( "chdtri", DOMAIN );
    -	return(NPY_NAN);
    -	}
    -
    -x = igami( 0.5 * df, y );
    -return( 2.0 * x );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/const.c b/scipy-0.10.1/scipy/special/cephes/const.c
    deleted file mode 100644
    index f1c048f903..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/const.c
    +++ /dev/null
    @@ -1,213 +0,0 @@
    -/*							const.c
    - *
    - *	Globally declared constants
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * extern double nameofconstant;
    - *
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * This file contains a number of mathematical constants and
    - * also some needed size parameters of the computer arithmetic.
    - * The values are supplied as arrays of hexadecimal integers
    - * for IEEE arithmetic; arrays of octal constants for DEC
    - * arithmetic; and in a normal decimal scientific notation for
    - * other machines.  The particular notation used is determined
    - * by a symbol (DEC, IBMPC, or UNK) defined in the include file
    - * mconf.h.
    - *
    - * The default size parameters are as follows.
    - *
    - * For DEC and UNK modes:
    - * MACHEP =  1.38777878078144567553E-17       2**-56
    - * MAXLOG =  8.8029691931113054295988E1       log(2**127)
    - * MINLOG = -8.872283911167299960540E1        log(2**-128)
    - * MAXNUM =  1.701411834604692317316873e38    2**127
    - *
    - * For IEEE arithmetic (IBMPC):
    - * MACHEP =  1.11022302462515654042E-16       2**-53
    - * MAXLOG =  7.09782712893383996843E2         log(2**1024)
    - * MINLOG = -7.08396418532264106224E2         log(2**-1022)
    - * MAXNUM =  1.7976931348623158E308           2**1024
    - *
    - * The global symbols for mathematical constants are
    - * PI     =  3.14159265358979323846           pi
    - * PIO2   =  1.57079632679489661923           pi/2
    - * SQRT2  =  1.41421356237309504880           sqrt(2)
    - * SQRTH  =  7.07106781186547524401E-1        sqrt(2)/2
    - * LOG2E  =  1.4426950408889634073599         1/log(2)
    - * SQ2OPI =  7.9788456080286535587989E-1      sqrt( 2/pi )
    - * LOGE2  =  6.93147180559945309417E-1        log(2)
    - * LOGSQ2 =  3.46573590279972654709E-1        log(2)/2
    - * THPIO4 =  2.35619449019234492885           3*pi/4
    - * TWOOPI =  6.36619772367581343075535E-1     2/pi
    - *
    - * These lists are subject to change.
    - */
    -
    -/*							const.c */
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -double EULER  = 0.577215664901532860606512090082402;        /* Euler constant */
    -
    -#ifdef UNK
    -#if 1
    -double MACHEP =  1.11022302462515654042E-16;   /* 2**-53 */
    -#else
    -double MACHEP =  1.38777878078144567553E-17;   /* 2**-56 */
    -#endif
    -double UFLOWTHRESH =  2.22507385850720138309E-308; /* 2**-1022 */
    -#ifdef DENORMAL
    -double MAXLOG =  7.09782712893383996732E2;     /* log(MAXNUM) */
    -/* double MINLOG = -7.44440071921381262314E2; */     /* log(2**-1074) */
    -double MINLOG = -7.451332191019412076235E2;     /* log(2**-1075) */
    -#else
    -double MAXLOG =  7.08396418532264106224E2;     /* log 2**1022 */
    -double MINLOG = -7.08396418532264106224E2;     /* log 2**-1022 */
    -#endif
    -double MAXNUM =  1.79769313486231570815E308;    /* 2**1024*(1-MACHEP) */
    -double PI     =  3.14159265358979323846;       /* pi */
    -double PIO2   =  1.57079632679489661923;       /* pi/2 */
    -double SQRT2  =  1.41421356237309504880;       /* sqrt(2) */
    -double SQRTH  =  7.07106781186547524401E-1;    /* sqrt(2)/2 */
    -double LOG2E  =  1.4426950408889634073599;     /* 1/log(2) */
    -double SQ2OPI =  7.9788456080286535587989E-1;  /* sqrt( 2/pi ) */
    -double LOGE2  =  6.93147180559945309417E-1;    /* log(2) */
    -double LOGSQ2 =  3.46573590279972654709E-1;    /* log(2)/2 */
    -double THPIO4 =  2.35619449019234492885;       /* 3*pi/4 */
    -double TWOOPI =  6.36619772367581343075535E-1; /* 2/pi */
    -#ifdef MINUSZERO
    -double NEGZERO = -0.0;
    -#else
    -double NEGZERO = 0.0;
    -#endif
    -#endif
    -
    -#ifdef IBMPC
    -			/* 2**-53 =  1.11022302462515654042E-16 */
    -unsigned short MACHEP[4] = {0x0000,0x0000,0x0000,0x3ca0};
    -unsigned short UFLOWTHRESH[4] = {0x0000,0x0000,0x0000,0x0010};
    -#ifdef DENORMAL
    -			/* log(MAXNUM) =  7.09782712893383996732224E2 */
    -unsigned short MAXLOG[4] = {0x39ef,0xfefa,0x2e42,0x4086};
    -			/* log(2**-1074) = - -7.44440071921381262314E2 */
    -/*unsigned short MINLOG[4] = {0x71c3,0x446d,0x4385,0xc087};*/
    -unsigned short MINLOG[4] = {0x3052,0xd52d,0x4910,0xc087};
    -#else
    -			/* log(2**1022) =   7.08396418532264106224E2 */
    -unsigned short MAXLOG[4] = {0xbcd2,0xdd7a,0x232b,0x4086};
    -			/* log(2**-1022) = - 7.08396418532264106224E2 */
    -unsigned short MINLOG[4] = {0xbcd2,0xdd7a,0x232b,0xc086};
    -#endif
    -			/* 2**1024*(1-MACHEP) =  1.7976931348623158E308 */
    -unsigned short MAXNUM[4] = {0xffff,0xffff,0xffff,0x7fef};
    -unsigned short PI[4]     = {0x2d18,0x5444,0x21fb,0x4009};
    -unsigned short PIO2[4]   = {0x2d18,0x5444,0x21fb,0x3ff9};
    -unsigned short SQRT2[4]  = {0x3bcd,0x667f,0xa09e,0x3ff6};
    -unsigned short SQRTH[4]  = {0x3bcd,0x667f,0xa09e,0x3fe6};
    -unsigned short LOG2E[4]  = {0x82fe,0x652b,0x1547,0x3ff7};
    -unsigned short SQ2OPI[4] = {0x3651,0x33d4,0x8845,0x3fe9};
    -unsigned short LOGE2[4]  = {0x39ef,0xfefa,0x2e42,0x3fe6};
    -unsigned short LOGSQ2[4] = {0x39ef,0xfefa,0x2e42,0x3fd6};
    -unsigned short THPIO4[4] = {0x21d2,0x7f33,0xd97c,0x4002};
    -unsigned short TWOOPI[4] = {0xc883,0x6dc9,0x5f30,0x3fe4};
    -#ifdef MINUSZERO
    -unsigned short NEGZERO[4] = {0x0000,0x0000,0x0000,0x8000};
    -#else
    -unsigned short NEGZERO[4] = {0x0000,0x0000,0x0000,0x0000};
    -#endif
    -#endif
    -
    -#ifdef MIEEE
    -			/* 2**-53 =  1.11022302462515654042E-16 */
    -unsigned short MACHEP[4] = {0x3ca0,0x0000,0x0000,0x0000};
    -unsigned short UFLOWTHRESH[4] = {0x0010,0x0000,0x0000,0x0000};
    -#ifdef DENORMAL
    -			/* log(2**1024) =   7.09782712893383996843E2 */
    -unsigned short MAXLOG[4] = {0x4086,0x2e42,0xfefa,0x39ef};
    -			/* log(2**-1074) = - -7.44440071921381262314E2 */
    -/* unsigned short MINLOG[4] = {0xc087,0x4385,0x446d,0x71c3}; */
    -unsigned short MINLOG[4] = {0xc087,0x4910,0xd52d,0x3052};
    -#else
    -			/* log(2**1022) =  7.08396418532264106224E2 */
    -unsigned short MAXLOG[4] = {0x4086,0x232b,0xdd7a,0xbcd2};
    -			/* log(2**-1022) = - 7.08396418532264106224E2 */
    -unsigned short MINLOG[4] = {0xc086,0x232b,0xdd7a,0xbcd2};
    -#endif
    -			/* 2**1024*(1-MACHEP) =  1.7976931348623158E308 */
    -unsigned short MAXNUM[4] = {0x7fef,0xffff,0xffff,0xffff};
    -unsigned short PI[4]     = {0x4009,0x21fb,0x5444,0x2d18};
    -unsigned short PIO2[4]   = {0x3ff9,0x21fb,0x5444,0x2d18};
    -unsigned short SQRT2[4]  = {0x3ff6,0xa09e,0x667f,0x3bcd};
    -unsigned short SQRTH[4]  = {0x3fe6,0xa09e,0x667f,0x3bcd};
    -unsigned short LOG2E[4]  = {0x3ff7,0x1547,0x652b,0x82fe};
    -unsigned short SQ2OPI[4] = {0x3fe9,0x8845,0x33d4,0x3651};
    -unsigned short LOGE2[4]  = {0x3fe6,0x2e42,0xfefa,0x39ef};
    -unsigned short LOGSQ2[4] = {0x3fd6,0x2e42,0xfefa,0x39ef};
    -unsigned short THPIO4[4] = {0x4002,0xd97c,0x7f33,0x21d2};
    -unsigned short TWOOPI[4] = {0x3fe4,0x5f30,0x6dc9,0xc883};
    -#ifdef MINUSZERO
    -unsigned short NEGZERO[4] = {0x8000,0x0000,0x0000,0x0000};
    -#else
    -unsigned short NEGZERO[4] = {0x0000,0x0000,0x0000,0x0000};
    -#endif
    -#endif
    -
    -#ifdef DEC
    -			/* 2**-56 =  1.38777878078144567553E-17 */
    -unsigned short MACHEP[4] = {0022200,0000000,0000000,0000000};
    -unsigned short UFLOWTHRESH[4] = {0x0080,0x0000,0x0000,0x0000};
    -			/* log 2**127 = 88.029691931113054295988 */
    -unsigned short MAXLOG[4] = {041660,007463,0143742,025733,};
    -			/* log 2**-128 = -88.72283911167299960540 */
    -unsigned short MINLOG[4] = {0141661,071027,0173721,0147572,};
    -			/* 2**127 = 1.701411834604692317316873e38 */
    -unsigned short MAXNUM[4] = {077777,0177777,0177777,0177777,};
    -unsigned short PI[4]     = {040511,007732,0121041,064302,};
    -unsigned short PIO2[4]   = {040311,007732,0121041,064302,};
    -unsigned short SQRT2[4]  = {040265,002363,031771,0157145,};
    -unsigned short SQRTH[4]  = {040065,002363,031771,0157144,};
    -unsigned short LOG2E[4]  = {040270,0125073,024534,013761,};
    -unsigned short SQ2OPI[4] = {040114,041051,0117241,0131204,};
    -unsigned short LOGE2[4]  = {040061,071027,0173721,0147572,};
    -unsigned short LOGSQ2[4] = {037661,071027,0173721,0147572,};
    -unsigned short THPIO4[4] = {040426,0145743,0174631,007222,};
    -unsigned short TWOOPI[4] = {040042,0174603,067116,042025,};
    -#ifdef MINUSZERO
    -unsigned short NEGZERO[4] = {0000000,0000000,0000000,0100000};
    -#else
    -unsigned short NEGZERO[4] = {0000000,0000000,0000000,0000000};
    -#endif
    -#endif
    -
    -#ifndef UNK
    -extern unsigned short MACHEP[];
    -extern unsigned short UFLOWTHRESH[];
    -extern unsigned short MAXLOG[];
    -extern unsigned short UNDLOG[];
    -extern unsigned short MINLOG[];
    -extern unsigned short MAXNUM[];
    -extern unsigned short PI[];
    -extern unsigned short PIO2[];
    -extern unsigned short SQRT2[];
    -extern unsigned short SQRTH[];
    -extern unsigned short LOG2E[];
    -extern unsigned short SQ2OPI[];
    -extern unsigned short LOGE2[];
    -extern unsigned short LOGSQ2[];
    -extern unsigned short THPIO4[];
    -extern unsigned short TWOOPI[];
    -extern unsigned short NEGZERO[];
    -#endif
    diff --git a/scipy-0.10.1/scipy/special/cephes/cpmul.c b/scipy-0.10.1/scipy/special/cephes/cpmul.c
    deleted file mode 100644
    index c9c0b6567f..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/cpmul.c
    +++ /dev/null
    @@ -1,106 +0,0 @@
    -/*							cpmul.c
    - *
    - *	Multiply two polynomials with complex coefficients
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * typedef struct
    - *		{
    - *		double r;
    - *		double i;
    - *		}cmplx;
    - *
    - * cmplx a[], b[], c[];
    - * int da, db, dc;
    - *
    - * cpmul( a, da, b, db, c, &dc );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * The two argument polynomials are multiplied together, and
    - * their product is placed in c.
    - *
    - * Each polynomial is represented by its coefficients stored
    - * as an array of complex number structures (see the typedef).
    - * The degree of a is da, which must be passed to the routine
    - * as an argument; similarly the degree db of b is an argument.
    - * Array a has da + 1 elements and array b has db + 1 elements.
    - * Array c must have storage allocated for at least da + db + 1
    - * elements.  The value da + db is returned in dc; this is
    - * the degree of the product polynomial.
    - *
    - * Polynomial coefficients are stored in ascending order; i.e.,
    - * a(x) = a[0]*x**0 + a[1]*x**1 + ... + a[da]*x**da.
    - *
    - *
    - * If desired, c may be the same as either a or b, in which
    - * case the input argument array is replaced by the product
    - * array (but only up to terms of degree da + db).
    - *
    - */
    -
    -/*							cpmul	*/
    -
    -typedef struct
    -	{
    -	double r;
    -	double i;
    -	}cmplx;
    -
    -void cpmul( cmplx*, int, cmplx*, int, cmplx*, int* );
    -
    -void
    -cpmul( a, da, b, db, c, dc )
    -cmplx *a, *b, *c;
    -int da, db;
    -int *dc;
    -{
    -int i, j, k;
    -cmplx y;
    -register cmplx *pa, *pb, *pc;
    -
    -if( da > db )	/* Know which polynomial has higher degree */
    -	{
    -	i = da;	/* Swapping is OK because args are on the stack */
    -	da = db;
    -	db = i;
    -	pa = a;
    -	a = b;
    -	b = pa;
    -	}
    -	
    -k = da + db;
    -*dc = k;		/* Output the degree of the product */
    -pc = &c[db+1];
    -for( i=db+1; i<=k; i++ )	/* Clear high order terms of output */
    -	{
    -	pc->r = 0;
    -	pc->i = 0;
    -	pc++;
    -	}
    -/* To permit replacement of input, work backward from highest degree */
    -pb = &b[db];
    -for( j=0; j<=db; j++ )
    -	{
    -	pa = &a[da];
    -	pc = &c[k-j];
    -	for( i=0; ir * pb->r  -  pa->i * pb->i;	/* cmpx multiply */
    -		y.i = pa->r * pb->i  +  pa->i * pb->r;
    -		pc->r += y.r;	/* accumulate partial product */
    -		pc->i += y.i;
    -		pa--;
    -		pc--;
    -		}
    -	y.r = pa->r * pb->r  -  pa->i * pb->i;	/* replace last term,	*/
    -	y.i = pa->r * pb->i  +  pa->i * pb->r;	/* ...do not accumulate	*/
    -	pc->r = y.r;
    -	pc->i = y.i;
    -	pb--;
    -	}
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/dawsn.c b/scipy-0.10.1/scipy/special/cephes/dawsn.c
    deleted file mode 100644
    index ca541f9927..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/dawsn.c
    +++ /dev/null
    @@ -1,384 +0,0 @@
    -/*							dawsn.c
    - *
    - *	Dawson's Integral
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, dawsn();
    - *
    - * y = dawsn( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Approximates the integral
    - *
    - *                             x
    - *                             -
    - *                      2     | |        2
    - *  dawsn(x)  =  exp( -x  )   |    exp( t  ) dt
    - *                          | |
    - *                           -
    - *                           0
    - *
    - * Three different rational approximations are employed, for
    - * the intervals 0 to 3.25; 3.25 to 6.25; and 6.25 up.
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      0,10        10000       6.9e-16     1.0e-16
    - *    DEC       0,10         6000       7.4e-17     1.4e-17
    - *
    - *
    - */
    -
    -/*							dawsn.c */
    -
    -
    -/*
    -Cephes Math Library Release 2.1:  January, 1989
    -Copyright 1984, 1987, 1989 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -/* Dawson's integral, interval 0 to 3.25 */
    -#ifdef UNK
    -static double AN[10] = {
    - 1.13681498971755972054E-11,
    - 8.49262267667473811108E-10,
    - 1.94434204175553054283E-8,
    - 9.53151741254484363489E-7,
    - 3.07828309874913200438E-6,
    - 3.52513368520288738649E-4,
    --8.50149846724410912031E-4,
    - 4.22618223005546594270E-2,
    --9.17480371773452345351E-2,
    - 9.99999999999999994612E-1,
    -};
    -static double AD[11] = {
    - 2.40372073066762605484E-11,
    - 1.48864681368493396752E-9,
    - 5.21265281010541664570E-8,
    - 1.27258478273186970203E-6,
    - 2.32490249820789513991E-5,
    - 3.25524741826057911661E-4,
    - 3.48805814657162590916E-3,
    - 2.79448531198828973716E-2,
    - 1.58874241960120565368E-1,
    - 5.74918629489320327824E-1,
    - 1.00000000000000000539E0,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short AN[40] = {
    -0027107,0176630,0075752,0107612,
    -0030551,0070604,0166707,0127727,
    -0031647,0002210,0117120,0056376,
    -0033177,0156026,0141275,0140627,
    -0033516,0112200,0037035,0165515,
    -0035270,0150613,0016423,0105634,
    -0135536,0156227,0023515,0044413,
    -0037055,0015273,0105147,0064025,
    -0137273,0163145,0014460,0166465,
    -0040200,0000000,0000000,0000000,
    -};
    -static unsigned short AD[44] = {
    -0027323,0067372,0115566,0131320,
    -0030714,0114432,0074206,0006637,
    -0032137,0160671,0044203,0026344,
    -0033252,0146656,0020247,0100231,
    -0034303,0003346,0123260,0022433,
    -0035252,0125460,0173041,0155415,
    -0036144,0113747,0125203,0124617,
    -0036744,0166232,0143671,0133670,
    -0037442,0127755,0162625,0000100,
    -0040023,0026736,0003604,0106265,
    -0040200,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short AN[40] = {
    -0x51f1,0x0f7d,0xffb3,0x3da8,
    -0xf5fb,0x9db8,0x2e30,0x3e0d,
    -0x0ba0,0x13ca,0xe091,0x3e54,
    -0xb833,0xd857,0xfb82,0x3eaf,
    -0xbd6a,0x07c3,0xd290,0x3ec9,
    -0x7174,0x63a2,0x1a31,0x3f37,
    -0xa921,0xe4e9,0xdb92,0xbf4b,
    -0xed03,0x714c,0xa357,0x3fa5,
    -0x1da7,0xa326,0x7ccc,0xbfb7,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -static unsigned short AD[44] = {
    -0xd65a,0x536e,0x6ddf,0x3dba,
    -0xc1b4,0x4f10,0x9323,0x3e19,
    -0x659c,0x2910,0xfc37,0x3e6b,
    -0xf013,0xc414,0x59b5,0x3eb5,
    -0x04a3,0xd4d6,0x60dc,0x3ef8,
    -0x3b62,0x1ec4,0x5566,0x3f35,
    -0x7532,0xf550,0x92fc,0x3f6c,
    -0x36f7,0x58f7,0x9d93,0x3f9c,
    -0xa008,0xbcb2,0x55fd,0x3fc4,
    -0x9197,0xc0f0,0x65bb,0x3fe2,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short AN[40] = {
    -0x3da8,0xffb3,0x0f7d,0x51f1,
    -0x3e0d,0x2e30,0x9db8,0xf5fb,
    -0x3e54,0xe091,0x13ca,0x0ba0,
    -0x3eaf,0xfb82,0xd857,0xb833,
    -0x3ec9,0xd290,0x07c3,0xbd6a,
    -0x3f37,0x1a31,0x63a2,0x7174,
    -0xbf4b,0xdb92,0xe4e9,0xa921,
    -0x3fa5,0xa357,0x714c,0xed03,
    -0xbfb7,0x7ccc,0xa326,0x1da7,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short AD[44] = {
    -0x3dba,0x6ddf,0x536e,0xd65a,
    -0x3e19,0x9323,0x4f10,0xc1b4,
    -0x3e6b,0xfc37,0x2910,0x659c,
    -0x3eb5,0x59b5,0xc414,0xf013,
    -0x3ef8,0x60dc,0xd4d6,0x04a3,
    -0x3f35,0x5566,0x1ec4,0x3b62,
    -0x3f6c,0x92fc,0xf550,0x7532,
    -0x3f9c,0x9d93,0x58f7,0x36f7,
    -0x3fc4,0x55fd,0xbcb2,0xa008,
    -0x3fe2,0x65bb,0xc0f0,0x9197,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -#endif
    -
    -/* interval 3.25 to 6.25 */
    -#ifdef UNK
    -static double BN[11] = {
    - 5.08955156417900903354E-1,
    --2.44754418142697847934E-1,
    - 9.41512335303534411857E-2,
    --2.18711255142039025206E-2,
    - 3.66207612329569181322E-3,
    --4.23209114460388756528E-4,
    - 3.59641304793896631888E-5,
    --2.14640351719968974225E-6,
    - 9.10010780076391431042E-8,
    --2.40274520828250956942E-9,
    - 3.59233385440928410398E-11,
    -};
    -static double BD[10] = {
    -/*  1.00000000000000000000E0,*/
    --6.31839869873368190192E-1,
    - 2.36706788228248691528E-1,
    --5.31806367003223277662E-2,
    - 8.48041718586295374409E-3,
    --9.47996768486665330168E-4,
    - 7.81025592944552338085E-5,
    --4.55875153252442634831E-6,
    - 1.89100358111421846170E-7,
    --4.91324691331920606875E-9,
    - 7.18466403235734541950E-11,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short BN[44] = {
    -0040002,0045342,0113762,0004360,
    -0137572,0120346,0172745,0144046,
    -0037300,0151134,0123440,0117047,
    -0136663,0025423,0014755,0046026,
    -0036157,0177561,0027535,0046744,
    -0135335,0161052,0071243,0146535,
    -0034426,0154060,0164506,0135625,
    -0133420,0005356,0100017,0151334,
    -0032303,0066137,0024013,0046212,
    -0131045,0016612,0066270,0047574,
    -0027435,0177025,0060625,0116363,
    -};
    -static unsigned short BD[40] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0140041,0140101,0174552,0037073,
    -0037562,0061503,0124271,0160756,
    -0137131,0151760,0073210,0110534,
    -0036412,0170562,0117017,0155377,
    -0135570,0101374,0074056,0037276,
    -0034643,0145376,0001516,0060636,
    -0133630,0173540,0121344,0155231,
    -0032513,0005602,0134516,0007144,
    -0131250,0150540,0075747,0105341,
    -0027635,0177020,0012465,0125402,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short BN[44] = {
    -0x411e,0x52fe,0x495c,0x3fe0,
    -0xb905,0xdebc,0x541c,0xbfcf,
    -0x13c5,0x94e4,0x1a4b,0x3fb8,
    -0xa983,0x633d,0x6562,0xbf96,
    -0xa9bd,0x25eb,0xffee,0x3f6d,
    -0x79ac,0x4e54,0xbc45,0xbf3b,
    -0xd773,0x1d28,0xdb06,0x3f02,
    -0xfa5b,0xd001,0x015d,0xbec2,
    -0x6991,0xe501,0x6d8b,0x3e78,
    -0x09f0,0x4d97,0xa3b1,0xbe24,
    -0xb39e,0xac32,0xbfc2,0x3dc3,
    -};
    -static unsigned short BD[40] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x47c7,0x3f2d,0x3808,0xbfe4,
    -0x3c3e,0x7517,0x4c68,0x3fce,
    -0x122b,0x0ed1,0x3a7e,0xbfab,
    -0xfb60,0x53c1,0x5e2e,0x3f81,
    -0xc7d8,0x8f05,0x105f,0xbf4f,
    -0xcc34,0xc069,0x795f,0x3f14,
    -0x9b53,0x145c,0x1eec,0xbed3,
    -0xc1cd,0x5729,0x6170,0x3e89,
    -0xf15c,0x0f7c,0x1a2c,0xbe35,
    -0xb560,0x02a6,0xbfc2,0x3dd3,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short BN[44] = {
    -0x3fe0,0x495c,0x52fe,0x411e,
    -0xbfcf,0x541c,0xdebc,0xb905,
    -0x3fb8,0x1a4b,0x94e4,0x13c5,
    -0xbf96,0x6562,0x633d,0xa983,
    -0x3f6d,0xffee,0x25eb,0xa9bd,
    -0xbf3b,0xbc45,0x4e54,0x79ac,
    -0x3f02,0xdb06,0x1d28,0xd773,
    -0xbec2,0x015d,0xd001,0xfa5b,
    -0x3e78,0x6d8b,0xe501,0x6991,
    -0xbe24,0xa3b1,0x4d97,0x09f0,
    -0x3dc3,0xbfc2,0xac32,0xb39e,
    -};
    -static unsigned short BD[40] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0xbfe4,0x3808,0x3f2d,0x47c7,
    -0x3fce,0x4c68,0x7517,0x3c3e,
    -0xbfab,0x3a7e,0x0ed1,0x122b,
    -0x3f81,0x5e2e,0x53c1,0xfb60,
    -0xbf4f,0x105f,0x8f05,0xc7d8,
    -0x3f14,0x795f,0xc069,0xcc34,
    -0xbed3,0x1eec,0x145c,0x9b53,
    -0x3e89,0x6170,0x5729,0xc1cd,
    -0xbe35,0x1a2c,0x0f7c,0xf15c,
    -0x3dd3,0xbfc2,0x02a6,0xb560,
    -};
    -#endif
    -
    -/* 6.25 to infinity */
    -#ifdef UNK
    -static double CN[5] = {
    --5.90592860534773254987E-1,
    - 6.29235242724368800674E-1,
    --1.72858975380388136411E-1,
    - 1.64837047825189632310E-2,
    --4.86827613020462700845E-4,
    -};
    -static double CD[5] = {
    -/* 1.00000000000000000000E0,*/
    --2.69820057197544900361E0,
    - 1.73270799045947845857E0,
    --3.93708582281939493482E-1,
    - 3.44278924041233391079E-2,
    --9.73655226040941223894E-4,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short CN[20] = {
    -0140027,0030427,0176477,0074402,
    -0040041,0012617,0112375,0162657,
    -0137461,0000761,0074120,0135160,
    -0036607,0004325,0117246,0115525,
    -0135377,0036345,0064750,0047732,
    -};
    -static unsigned short CD[20] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0140454,0127521,0071653,0133415,
    -0040335,0144540,0016105,0045241,
    -0137711,0112053,0155034,0062237,
    -0037015,0002102,0177442,0074546,
    -0135577,0036345,0064750,0052152,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short CN[20] = {
    -0xef20,0xffa7,0xe622,0xbfe2,
    -0xbcb6,0xf29f,0x22b1,0x3fe4,
    -0x174e,0x2f0a,0x203e,0xbfc6,
    -0xd36b,0xb3d4,0xe11a,0x3f90,
    -0x09fb,0xad3d,0xe79c,0xbf3f,
    -};
    -static unsigned short CD[20] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x76e2,0x2e75,0x95ea,0xc005,
    -0xa954,0x0388,0xb92c,0x3ffb,
    -0x8c94,0x7b43,0x3285,0xbfd9,
    -0x4f2d,0x5fe4,0xa088,0x3fa1,
    -0x0a8d,0xad3d,0xe79c,0xbf4f,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short CN[20] = {
    -0xbfe2,0xe622,0xffa7,0xef20,
    -0x3fe4,0x22b1,0xf29f,0xbcb6,
    -0xbfc6,0x203e,0x2f0a,0x174e,
    -0x3f90,0xe11a,0xb3d4,0xd36b,
    -0xbf3f,0xe79c,0xad3d,0x09fb,
    -};
    -static unsigned short CD[20] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0xc005,0x95ea,0x2e75,0x76e2,
    -0x3ffb,0xb92c,0x0388,0xa954,
    -0xbfd9,0x3285,0x7b43,0x8c94,
    -0x3fa1,0xa088,0x5fe4,0x4f2d,
    -0xbf4f,0xe79c,0xad3d,0x0a8d,
    -};
    -#endif
    -
    -extern double PI, MACHEP;
    -
    -double dawsn( xx )
    -double xx;
    -{
    -double x, y;
    -int sign;
    -
    -
    -sign = 1;
    -if( xx < 0.0 )
    -	{
    -	sign = -1;
    -	xx = -xx;
    -	}
    -
    -if( xx < 3.25 )
    -{
    -x = xx*xx;
    -y = xx * polevl( x, AN, 9 )/polevl( x, AD, 10 );
    -return( sign * y );
    -}
    -
    -
    -x = 1.0/(xx*xx);
    -
    -if( xx < 6.25 )
    -	{
    -	y = 1.0/xx + x * polevl( x, BN, 10) / (p1evl( x, BD, 10) * xx);
    -	return( sign * 0.5 * y );
    -	}
    -
    -
    -if( xx > 1.0e9 )
    -	return( (sign * 0.5)/xx );
    -
    -/* 6.25 to infinity */
    -y = 1.0/xx + x * polevl( x, CN, 4) / (p1evl( x, CD, 5) * xx);
    -return( sign * 0.5 * y );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/ellie.c b/scipy-0.10.1/scipy/special/cephes/ellie.c
    deleted file mode 100644
    index b60d3ca754..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/ellie.c
    +++ /dev/null
    @@ -1,136 +0,0 @@
    -/*							ellie.c
    - *
    - *	Incomplete elliptic integral of the second kind
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double phi, m, y, ellie();
    - *
    - * y = ellie( phi, m );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Approximates the integral
    - *
    - *
    - *                phi
    - *                 -
    - *                | |
    - *                |                   2
    - * E(phi_\m)  =    |    sqrt( 1 - m sin t ) dt
    - *                |
    - *              | |    
    - *               -
    - *                0
    - *
    - * of amplitude phi and modulus m, using the arithmetic -
    - * geometric mean algorithm.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * Tested at random arguments with phi in [-10, 10] and m in
    - * [0, 1].
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC        0,2         2000       1.9e-16     3.4e-17
    - *    IEEE     -10,10      150000       3.3e-15     1.4e-16
    - *
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987, 1993 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -/*	Incomplete elliptic integral of second kind	*/
    -
    -#include "mconf.h"
    -
    -extern double PI, PIO2, MACHEP;
    -
    -double ellie( phi, m )
    -double phi, m;
    -{
    -double a, b, c, e, temp;
    -double lphi, t, E;
    -int d, mod, npio2, sign;
    -
    -if( m == 0.0 )
    -	return( phi );
    -lphi = phi;
    -npio2 = floor( lphi/PIO2 );
    -if( npio2 & 1 )
    -	npio2 += 1;
    -lphi = lphi - npio2 * PIO2;
    -if( lphi < 0.0 )
    -	{
    -	lphi = -lphi;
    -	sign = -1;
    -	}
    -else
    -	{
    -	sign = 1;
    -	}
    -a = 1.0 - m;
    -E = ellpe( m );
    -if( a == 0.0 )
    -	{
    -	temp = sin( lphi );
    -	goto done;
    -	}
    -t = tan( lphi );
    -b = sqrt(a);
    -/* Thanks to Brian Fitzgerald 
    -   for pointing out an instability near odd multiples of pi/2.  */
    -if( fabs(t) > 10.0 )
    -	{
    -	/* Transform the amplitude */
    -	e = 1.0/(b*t);
    -	/* ... but avoid multiple recursions.  */
    -	if( fabs(e) < 10.0 )
    -		{
    -		e = atan(e);
    -		temp = E + m * sin( lphi ) * sin( e ) - ellie( e, m );
    -		goto done;
    -		}
    -	}
    -c = sqrt(m);
    -a = 1.0;
    -d = 1;
    -e = 0.0;
    -mod = 0;
    -
    -while( fabs(c/a) > MACHEP )
    -	{
    -	temp = b/a;
    -	lphi = lphi + atan(t*temp) + mod * PI;
    -	mod = (lphi + PIO2)/PI;
    -	t = t * ( 1.0 + temp )/( 1.0 - temp * t * t );
    -	c = ( a - b )/2.0;
    -	temp = sqrt( a * b );
    -	a = ( a + b )/2.0;
    -	b = temp;
    -	d += d;
    -	e += c * sin(lphi);
    -	}
    -
    -temp = E / ellpk( 1.0 - m ); 
    -temp *= (atan(t) + mod * PI)/(d * a);
    -temp += e;
    -
    -done:
    -
    -if( sign < 0 )
    -	temp = -temp;
    -temp += npio2 * E;
    -return( temp );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/ellik.c b/scipy-0.10.1/scipy/special/cephes/ellik.c
    deleted file mode 100644
    index 5970b1b47c..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/ellik.c
    +++ /dev/null
    @@ -1,136 +0,0 @@
    -/*							ellik.c
    - *
    - *	Incomplete elliptic integral of the first kind
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double phi, m, y, ellik();
    - *
    - * y = ellik( phi, m );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Approximates the integral
    - *
    - *
    - *
    - *                phi
    - *                 -
    - *                | |
    - *                |           dt
    - * F(phi | m) =   |    ------------------
    - *                |                   2
    - *              | |    sqrt( 1 - m sin t )
    - *               -
    - *                0
    - *
    - * of amplitude phi and modulus m, using the arithmetic -
    - * geometric mean algorithm.
    - *
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * Tested at random points with m in [0, 1] and phi as indicated.
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE     -10,10       200000      7.4e-16     1.0e-16
    - *
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -/*	Incomplete elliptic integral of first kind	*/
    -
    -#include "mconf.h"
    -extern double PI, PIO2, MACHEP, MAXNUM;
    -
    -double ellik( phi, m )
    -double phi, m;
    -{
    -double a, b, c, e, temp, t, K;
    -int d, mod, sign, npio2;
    -
    -if( m == 0.0 )
    -	return( phi );
    -a = 1.0 - m;
    -if( a == 0.0 )
    -	{
    -	if( fabs(phi) >= PIO2 )
    -		{
    -		mtherr( "ellik", SING );
    -		return( MAXNUM );
    -		}
    -	return(  log(  tan( (PIO2 + phi)/2.0 )  )   );
    -	}
    -npio2 = floor( phi/PIO2 );
    -if( npio2 & 1 )
    -	npio2 += 1;
    -if( npio2 )
    -	{
    -	K = ellpk( a ); 
    -	phi = phi - npio2 * PIO2;
    -	}
    -else
    -	K = 0.0;
    -if( phi < 0.0 )
    -	{
    -	phi = -phi;
    -	sign = -1;
    -	}
    -else
    -	sign = 0;
    -b = sqrt(a);
    -t = tan( phi );
    -if( fabs(t) > 10.0 )
    -	{
    -	/* Transform the amplitude */
    -	e = 1.0/(b*t);
    -	/* ... but avoid multiple recursions.  */
    -	if( fabs(e) < 10.0 )
    -		{
    -		e = atan(e);
    -		if( npio2 == 0 )
    -		    K = ellpk( a ); 
    -		temp = K - ellik( e, m );
    -		goto done;
    -		}
    -	}
    -a = 1.0;
    -c = sqrt(m);
    -d = 1;
    -mod = 0;
    -
    -while( fabs(c/a) > MACHEP )
    -	{
    -	temp = b/a;
    -	phi = phi + atan(t*temp) + mod * PI;
    -	mod = (phi + PIO2)/PI;
    -	t = t * ( 1.0 + temp )/( 1.0 - temp * t * t );
    -	c = ( a - b )/2.0;
    -	temp = sqrt( a * b );
    -	a = ( a + b )/2.0;
    -	b = temp;
    -	d += d;
    -	}
    -
    -temp = (atan(t) + mod * PI)/(d * a);
    -
    -done:
    -if( sign < 0 )
    -	temp = -temp;
    -temp += npio2 * K;
    -return( temp );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/ellpe.c b/scipy-0.10.1/scipy/special/cephes/ellpe.c
    deleted file mode 100644
    index beb98ac028..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/ellpe.c
    +++ /dev/null
    @@ -1,193 +0,0 @@
    -/*							ellpe.c
    - *
    - *	Complete elliptic integral of the second kind
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double m, y, ellpe();
    - *
    - * y = ellpe( m );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Approximates the integral
    - *
    - *
    - *            pi/2
    - *             -
    - *            | |                 2
    - * E(m)  =    |    sqrt( 1 - m sin t ) dt
    - *          | |    
    - *           -
    - *            0
    - *
    - * Where m = 1 - m1, using the approximation
    - *
    - *      P(x)  -  x log x Q(x).
    - *
    - * Though there are no singularities, the argument m1 is used
    - * internally rather than m for compatibility with ellpk().
    - *
    - * E(1) = 1; E(0) = pi/2.
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC        0, 1       13000       3.1e-17     9.4e-18
    - *    IEEE       0, 1       10000       2.1e-16     7.3e-17
    - *
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * ellpe domain      x<0, x>1            0.0
    - *
    - */
    -
    -/*							ellpe.c		*/
    -
    -/* Elliptic integral of second kind */
    -
    -/*
    -Cephes Math Library, Release 2.1:  February, 1989
    -Copyright 1984, 1987, 1989 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -
    -Feb, 2002:  altered by Travis Oliphant
    -            so that it is called with argument m 
    -            (which gets immediately converted to m1 = 1-m)
    -*/
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double P[] = {
    -  1.53552577301013293365E-4,
    -  2.50888492163602060990E-3,
    -  8.68786816565889628429E-3,
    -  1.07350949056076193403E-2,
    -  7.77395492516787092951E-3,
    -  7.58395289413514708519E-3,
    -  1.15688436810574127319E-2,
    -  2.18317996015557253103E-2,
    -  5.68051945617860553470E-2,
    -  4.43147180560990850618E-1,
    -  1.00000000000000000299E0
    -};
    -static double Q[] = {
    -  3.27954898576485872656E-5,
    -  1.00962792679356715133E-3,
    -  6.50609489976927491433E-3,
    -  1.68862163993311317300E-2,
    -  2.61769742454493659583E-2,
    -  3.34833904888224918614E-2,
    -  4.27180926518931511717E-2,
    -  5.85936634471101055642E-2,
    -  9.37499997197644278445E-2,
    -  2.49999999999888314361E-1
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short P[] = {
    -0035041,0001364,0141572,0117555,
    -0036044,0066032,0130027,0033404,
    -0036416,0053617,0064456,0102632,
    -0036457,0161100,0061177,0122612,
    -0036376,0136251,0012403,0124162,
    -0036370,0101316,0151715,0131613,
    -0036475,0105477,0050317,0133272,
    -0036662,0154232,0024645,0171552,
    -0037150,0126220,0047054,0030064,
    -0037742,0162057,0167645,0165612,
    -0040200,0000000,0000000,0000000
    -};
    -static unsigned short Q[] = {
    -0034411,0106743,0115771,0055462,
    -0035604,0052575,0155171,0045540,
    -0036325,0030424,0064332,0167756,
    -0036612,0052366,0063006,0115175,
    -0036726,0070430,0004533,0124654,
    -0037011,0022741,0030675,0030711,
    -0037056,0174452,0127062,0132122,
    -0037157,0177750,0142041,0072523,
    -0037277,0177777,0173137,0002627,
    -0037577,0177777,0177777,0101101
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short P[] = {
    -0x53ee,0x986f,0x205e,0x3f24,
    -0xe6e0,0x5602,0x8d83,0x3f64,
    -0xd0b3,0xed25,0xcaf1,0x3f81,
    -0xf4b1,0x0c4f,0xfc48,0x3f85,
    -0x750e,0x22a0,0xd795,0x3f7f,
    -0xb671,0xda79,0x1059,0x3f7f,
    -0xf6d7,0xea19,0xb167,0x3f87,
    -0xbe6d,0x4534,0x5b13,0x3f96,
    -0x8607,0x09c5,0x1592,0x3fad,
    -0xbd71,0xfdf4,0x5c85,0x3fdc,
    -0x0000,0x0000,0x0000,0x3ff0
    -};
    -static unsigned short Q[] = {
    -0x2b66,0x737f,0x31bc,0x3f01,
    -0x296c,0xbb4f,0x8aaf,0x3f50,
    -0x5dfe,0x8d1b,0xa622,0x3f7a,
    -0xd350,0xccc0,0x4a9e,0x3f91,
    -0x7535,0x012b,0xce23,0x3f9a,
    -0xa639,0x2637,0x24bc,0x3fa1,
    -0x568a,0x55c6,0xdf25,0x3fa5,
    -0x2eaa,0x1884,0xfffd,0x3fad,
    -0xe0b3,0xfecb,0xffff,0x3fb7,
    -0xf048,0xffff,0xffff,0x3fcf
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short P[] = {
    -0x3f24,0x205e,0x986f,0x53ee,
    -0x3f64,0x8d83,0x5602,0xe6e0,
    -0x3f81,0xcaf1,0xed25,0xd0b3,
    -0x3f85,0xfc48,0x0c4f,0xf4b1,
    -0x3f7f,0xd795,0x22a0,0x750e,
    -0x3f7f,0x1059,0xda79,0xb671,
    -0x3f87,0xb167,0xea19,0xf6d7,
    -0x3f96,0x5b13,0x4534,0xbe6d,
    -0x3fad,0x1592,0x09c5,0x8607,
    -0x3fdc,0x5c85,0xfdf4,0xbd71,
    -0x3ff0,0x0000,0x0000,0x0000
    -};
    -static unsigned short Q[] = {
    -0x3f01,0x31bc,0x737f,0x2b66,
    -0x3f50,0x8aaf,0xbb4f,0x296c,
    -0x3f7a,0xa622,0x8d1b,0x5dfe,
    -0x3f91,0x4a9e,0xccc0,0xd350,
    -0x3f9a,0xce23,0x012b,0x7535,
    -0x3fa1,0x24bc,0x2637,0xa639,
    -0x3fa5,0xdf25,0x55c6,0x568a,
    -0x3fad,0xfffd,0x1884,0x2eaa,
    -0x3fb7,0xffff,0xfecb,0xe0b3,
    -0x3fcf,0xffff,0xffff,0xf048
    -};
    -#endif
    -
    -double ellpe(x)
    -double x;
    -{
    -x = 1.0-x;
    -if( (x <= 0.0) || (x > 1.0) )
    -	{
    -	if( x == 0.0 )
    -		return( 1.0 );
    -	mtherr( "ellpe", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -return( polevl(x,P,10) - log(x) * (x * polevl(x,Q,9)) );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/ellpj.c b/scipy-0.10.1/scipy/special/cephes/ellpj.c
    deleted file mode 100644
    index 6e405cc7c0..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/ellpj.c
    +++ /dev/null
    @@ -1,157 +0,0 @@
    -/*							ellpj.c
    - *
    - *	Jacobian Elliptic Functions
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double u, m, sn, cn, dn, phi;
    - * int ellpj();
    - *
    - * ellpj( u, m, _&sn, _&cn, _&dn, _&phi );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - *
    - * Evaluates the Jacobian elliptic functions sn(u|m), cn(u|m),
    - * and dn(u|m) of parameter m between 0 and 1, and real
    - * argument u.
    - *
    - * These functions are periodic, with quarter-period on the
    - * real axis equal to the complete elliptic integral
    - * ellpk(m).
    - *
    - * Relation to incomplete elliptic integral:
    - * If u = ellik(phi,m), then sn(u|m) = sin(phi),
    - * and cn(u|m) = cos(phi).  Phi is called the amplitude of u.
    - *
    - * Computation is by means of the arithmetic-geometric mean
    - * algorithm, except when m is within 1e-9 of 0 or 1.  In the
    - * latter case with m close to 1, the approximation applies
    - * only for phi < pi/2.
    - *
    - * ACCURACY:
    - *
    - * Tested at random points with u between 0 and 10, m between
    - * 0 and 1.
    - *
    - *            Absolute error (* = relative error):
    - * arithmetic   function   # trials      peak         rms
    - *    DEC       sn           1800       4.5e-16     8.7e-17
    - *    IEEE      phi         10000       9.2e-16*    1.4e-16*
    - *    IEEE      sn          50000       4.1e-15     4.6e-16
    - *    IEEE      cn          40000       3.6e-15     4.4e-16
    - *    IEEE      dn          10000       1.3e-12     1.8e-14
    - *
    - *  Peak error observed in consistency check using addition
    - * theorem for sn(u+v) was 4e-16 (absolute).  Also tested by
    - * the above relation to the incomplete elliptic integral.
    - * Accuracy deteriorates when u is large.
    - *
    - */
    -
    -/*							ellpj.c		*/
    -
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -extern double PIO2, MACHEP;
    -
    -int ellpj( u, m, sn, cn, dn, ph )
    -double u, m;
    -double *sn, *cn, *dn, *ph;
    -{
    -double ai, b, phi, t, twon;
    -double a[9], c[9];
    -int i;
    -
    -
    -/* Check for special cases */
    -
    -if( m < 0.0 || m > 1.0  || npy_isnan(m))
    -	{
    -	mtherr( "ellpj", DOMAIN );
    -	*sn = NPY_NAN;
    -	*cn = NPY_NAN;
    -	*ph = NPY_NAN;
    -	*dn = NPY_NAN;
    -	return(-1);
    -	}
    -if( m < 1.0e-9 )
    -	{
    -	t = sin(u);
    -	b = cos(u);
    -	ai = 0.25 * m * (u - t*b);
    -	*sn = t - ai*b;
    -	*cn = b + ai*t;
    -	*ph = u - ai;
    -	*dn = 1.0 - 0.5*m*t*t;
    -	return(0);
    -	}
    -
    -if( m >= 0.9999999999 )
    -	{
    -	ai = 0.25 * (1.0-m);
    -	b = cosh(u);
    -	t = tanh(u);
    -	phi = 1.0/b;
    -	twon = b * sinh(u);
    -	*sn = t + ai * (twon - u)/(b*b);
    -	*ph = 2.0*atan(exp(u)) - PIO2 + ai*(twon - u)/b;
    -	ai *= t * phi;
    -	*cn = phi - ai * (twon - u);
    -	*dn = phi + ai * (twon + u);
    -	return(0);
    -	}
    -
    -
    -/*	A. G. M. scale		*/
    -a[0] = 1.0;
    -b = sqrt(1.0 - m);
    -c[0] = sqrt(m);
    -twon = 1.0;
    -i = 0;
    -
    -while( fabs(c[i]/a[i]) > MACHEP )
    -	{
    -	if( i > 7 )
    -		{
    -		mtherr( "ellpj", OVERFLOW );
    -		goto done;
    -		}
    -	ai = a[i];
    -	++i;
    -	c[i] = ( ai - b )/2.0;
    -	t = sqrt( ai * b );
    -	a[i] = ( ai + b )/2.0;
    -	b = t;
    -	twon *= 2.0;
    -	}
    -
    -done:
    -
    -/* backward recurrence */
    -phi = twon * a[i] * u;
    -do
    -	{
    -	t = c[i] * sin(phi) / a[i];
    -	b = phi;
    -	phi = (asin(t) + phi)/2.0;
    -	}
    -while( --i );
    -
    -*sn = sin(phi);
    -t = cos(phi);
    -*cn = t;
    -*dn = t/cos(phi-b);
    -*ph = phi;
    -return(0);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/ellpk.c b/scipy-0.10.1/scipy/special/cephes/ellpk.c
    deleted file mode 100644
    index bfe1217d83..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/ellpk.c
    +++ /dev/null
    @@ -1,232 +0,0 @@
    -/*							ellpk.c
    - *
    - *	Complete elliptic integral of the first kind
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double m, y, ellpk();
    - *
    - * y = ellpk( m ); 
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Approximates the integral
    - *
    - *
    - *
    - *            pi/2
    - *             -
    - *            | |
    - *            |           dt
    - * K(m)  =    |    ------------------
    - *            |                   2
    - *          | |    sqrt( 1 - m sin t )
    - *           -
    - *            0
    - *
    - * where m = 1 - m1, using the approximation
    - *
    - *     P(x)  -  log x Q(x).
    - *
    - * The argument m1 is used internally rather than m so that the logarithmic
    - * singularity at m = 1 will be shifted to the origin; this
    - * preserves maximum accuracy.
    - *
    - * K(0) = pi/2.
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC        0,1        16000       3.5e-17     1.1e-17
    - *    IEEE       0,1        30000       2.5e-16     6.8e-17
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * ellpk domain       x<0, x>1           0.0
    - *
    - */
    -
    -/*							ellpk.c */
    -
    -
    -/*
    -Cephes Math Library, Release 2.0:  April, 1987
    -Copyright 1984, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -
    -Feb, 2002:  altered by Travis Oliphant 
    -            so that it is called with argument m 
    -            (which gets immediately converted to m1 = 1-m)
    -*/
    -
    -#include "mconf.h"
    -
    -#ifdef DEC
    -static unsigned short P[] =
    -{
    -0035020,0127576,0040430,0051544,
    -0036025,0070136,0042703,0153716,
    -0036402,0122614,0062555,0077777,
    -0036441,0102130,0072334,0025172,
    -0036341,0043320,0117242,0172076,
    -0036312,0146456,0077242,0154141,
    -0036420,0003467,0013727,0035407,
    -0036564,0137263,0110651,0020237,
    -0036775,0001330,0144056,0020305,
    -0037305,0144137,0157521,0141734,
    -0040261,0071027,0173721,0147572
    -};
    -static unsigned short Q[] =
    -{
    -0034366,0130371,0103453,0077633,
    -0035557,0122745,0173515,0113016,
    -0036302,0124470,0167304,0074473,
    -0036575,0132403,0117226,0117576,
    -0036703,0156271,0047124,0147733,
    -0036766,0137465,0002053,0157312,
    -0037031,0014423,0154274,0176515,
    -0037107,0177747,0143216,0016145,
    -0037217,0177777,0172621,0074000,
    -0037377,0177777,0177776,0156435,
    -0040000,0000000,0000000,0000000
    -};
    -static unsigned short ac1[] = {0040261,0071027,0173721,0147572};
    -#define C1 (*(double *)ac1)
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short P[] =
    -{
    -0x0a6d,0xc823,0x15ef,0x3f22,
    -0x7afa,0xc8b8,0xae0b,0x3f62,
    -0xb000,0x8cad,0x54b1,0x3f80,
    -0x854f,0x0e9b,0x308b,0x3f84,
    -0x5e88,0x13d4,0x28da,0x3f7c,
    -0x5b0c,0xcfd4,0x59a5,0x3f79,
    -0xe761,0xe2fa,0x00e6,0x3f82,
    -0x2414,0x7235,0x97d6,0x3f8e,
    -0xc419,0x1905,0xa05b,0x3f9f,
    -0x387c,0xfbea,0xb90b,0x3fb8,
    -0x39ef,0xfefa,0x2e42,0x3ff6
    -};
    -static unsigned short Q[] =
    -{
    -0x6ff3,0x30e5,0xd61f,0x3efe,
    -0xb2c2,0xbee9,0xf4bc,0x3f4d,
    -0x8f27,0x1dd8,0x5527,0x3f78,
    -0xd3f0,0x73d2,0xb6a0,0x3f8f,
    -0x99fb,0x29ca,0x7b97,0x3f98,
    -0x7bd9,0xa085,0xd7e6,0x3f9e,
    -0x9faa,0x7b17,0x2322,0x3fa3,
    -0xc38d,0xf8d1,0xfffc,0x3fa8,
    -0x2f00,0xfeb2,0xffff,0x3fb1,
    -0xdba4,0xffff,0xffff,0x3fbf,
    -0x0000,0x0000,0x0000,0x3fe0
    -};
    -static unsigned short ac1[] = {0x39ef,0xfefa,0x2e42,0x3ff6};
    -#define C1 (*(double *)ac1)
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short P[] =
    -{
    -0x3f22,0x15ef,0xc823,0x0a6d,
    -0x3f62,0xae0b,0xc8b8,0x7afa,
    -0x3f80,0x54b1,0x8cad,0xb000,
    -0x3f84,0x308b,0x0e9b,0x854f,
    -0x3f7c,0x28da,0x13d4,0x5e88,
    -0x3f79,0x59a5,0xcfd4,0x5b0c,
    -0x3f82,0x00e6,0xe2fa,0xe761,
    -0x3f8e,0x97d6,0x7235,0x2414,
    -0x3f9f,0xa05b,0x1905,0xc419,
    -0x3fb8,0xb90b,0xfbea,0x387c,
    -0x3ff6,0x2e42,0xfefa,0x39ef
    -};
    -static unsigned short Q[] =
    -{
    -0x3efe,0xd61f,0x30e5,0x6ff3,
    -0x3f4d,0xf4bc,0xbee9,0xb2c2,
    -0x3f78,0x5527,0x1dd8,0x8f27,
    -0x3f8f,0xb6a0,0x73d2,0xd3f0,
    -0x3f98,0x7b97,0x29ca,0x99fb,
    -0x3f9e,0xd7e6,0xa085,0x7bd9,
    -0x3fa3,0x2322,0x7b17,0x9faa,
    -0x3fa8,0xfffc,0xf8d1,0xc38d,
    -0x3fb1,0xffff,0xfeb2,0x2f00,
    -0x3fbf,0xffff,0xffff,0xdba4,
    -0x3fe0,0x0000,0x0000,0x0000
    -};
    -static unsigned short ac1[] = {
    -0x3ff6,0x2e42,0xfefa,0x39ef
    -};
    -#define C1 (*(double *)ac1)
    -#endif
    -
    -#ifdef UNK
    -static double P[] =
    -{
    - 1.37982864606273237150E-4,
    - 2.28025724005875567385E-3,
    - 7.97404013220415179367E-3,
    - 9.85821379021226008714E-3,
    - 6.87489687449949877925E-3,
    - 6.18901033637687613229E-3,
    - 8.79078273952743772254E-3,
    - 1.49380448916805252718E-2,
    - 3.08851465246711995998E-2,
    - 9.65735902811690126535E-2,
    - 1.38629436111989062502E0
    -};
    -
    -static double Q[] =
    -{
    - 2.94078955048598507511E-5,
    - 9.14184723865917226571E-4,
    - 5.94058303753167793257E-3,
    - 1.54850516649762399335E-2,
    - 2.39089602715924892727E-2,
    - 3.01204715227604046988E-2,
    - 3.73774314173823228969E-2,
    - 4.88280347570998239232E-2,
    - 7.03124996963957469739E-2,
    - 1.24999999999870820058E-1,
    - 4.99999999999999999821E-1
    -};
    -static double C1 = 1.3862943611198906188E0; /* log(4) */
    -#endif
    -
    -extern double MACHEP, MAXNUM;
    -
    -double ellpk(x)
    -double x;
    -{
    -
    -if( (x < 0.0) || (x > 1.0) )
    -	{
    -	mtherr( "ellpk", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -
    -if( x > MACHEP )
    -	{
    -	return( polevl(x,P,10) - log(x) * polevl(x,Q,10) );
    -	}
    -else
    -	{
    -	if( x == 0.0 )
    -		{
    -		mtherr( "ellpk", SING );
    -		return( MAXNUM );
    -		}
    -	else
    -		{
    -		return( C1 - 0.5 * log(x) );
    -		}
    -	}
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/euclid.c b/scipy-0.10.1/scipy/special/cephes/euclid.c
    deleted file mode 100644
    index 4460fa39be..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/euclid.c
    +++ /dev/null
    @@ -1,250 +0,0 @@
    -/*							euclid.c
    - *
    - *	Rational arithmetic routines
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * 
    - * typedef struct
    - *      {
    - *      double n;  numerator
    - *      double d;  denominator
    - *      }fract;
    - *
    - * radd( a, b, c )      c = b + a
    - * rsub( a, b, c )      c = b - a
    - * rmul( a, b, c )      c = b * a
    - * rdiv( a, b, c )      c = b / a
    - * euclid( &n, &d )     Reduce n/d to lowest terms,
    - *                      return greatest common divisor.
    - *
    - * Arguments of the routines are pointers to the structures.
    - * The double precision numbers are assumed, without checking,
    - * to be integer valued.  Overflow conditions are reported.
    - */
    - 
    -
    -#include "mconf.h"
    -
    -extern double MACHEP;
    -#define BIG (1.0/MACHEP)
    -
    -double euclid(double* num, double* den );
    -
    -typedef struct
    -	{
    -	double n; /* numerator */
    -	double d; /* denominator */
    -	} fract;
    -
    -/* Add fractions. */
    -static void radd(fract*,fract*,fract*);
    -static void rsub(fract*,fract*,fract*);
    -static void rmul(fract*,fract*,fract*);
    -static void rdiv(fract*,fract*,fract*);
    -
    -void radd( f1, f2, f3 )
    -fract *f1, *f2, *f3;
    -{
    -double gcd, d1, d2, gcn, n1, n2;
    -
    -n1 = f1->n;
    -d1 = f1->d;
    -n2 = f2->n;
    -d2 = f2->d;
    -if( n1 == 0.0 )
    -	{
    -	f3->n = n2;
    -	f3->d = d2;
    -	return;
    -	}
    -if( n2 == 0.0 )
    -	{
    -	f3->n = n1;
    -	f3->d = d1;
    -	return;
    -	}
    -
    -gcd = euclid( &d1, &d2 ); /* common divisors of denominators */
    -gcn = euclid( &n1, &n2 ); /* common divisors of numerators */
    -/* Note, factoring the numerators
    - * makes overflow slightly less likely.
    - */
    -f3->n = ( n1 * d2 + n2 * d1) * gcn;
    -f3->d = d1 * d2 * gcd;
    -euclid( &f3->n, &f3->d );
    -}
    -
    -
    -/* Subtract fractions. */
    -
    -void rsub( f1, f2, f3 )
    -fract *f1, *f2, *f3;
    -{
    -double gcd, d1, d2, gcn, n1, n2;
    -
    -n1 = f1->n;
    -d1 = f1->d;
    -n2 = f2->n;
    -d2 = f2->d;
    -if( n1 == 0.0 )
    -	{
    -	f3->n = n2;
    -	f3->d = d2;
    -	return;
    -	}
    -if( n2 == 0.0 )
    -	{
    -	f3->n = -n1;
    -	f3->d = d1;
    -	return;
    -	}
    -
    -gcd = euclid( &d1, &d2 );
    -gcn = euclid( &n1, &n2 );
    -f3->n = (n2 * d1 - n1 * d2) * gcn;
    -f3->d = d1 * d2 * gcd;
    -euclid( &f3->n, &f3->d );
    -}
    -
    -
    -
    -
    -/* Multiply fractions. */
    -
    -void rmul( ff1, ff2, ff3 )
    -fract *ff1, *ff2, *ff3;
    -{
    -double d1, d2, n1, n2;
    -
    -n1 = ff1->n;
    -d1 = ff1->d;
    -n2 = ff2->n;
    -d2 = ff2->d;
    -
    -if( (n1 == 0.0) || (n2 == 0.0) )
    -	{
    -	ff3->n = 0.0;
    -	ff3->d = 1.0;
    -	return;
    -	}
    -euclid( &n1, &d2 ); /* cross cancel common divisors */
    -euclid( &n2, &d1 );
    -ff3->n = n1 * n2;
    -ff3->d = d1 * d2;
    -/* Report overflow. */
    -if( (fabs(ff3->n) >= BIG) || (fabs(ff3->d) >= BIG) )
    -	{
    -	mtherr( "rmul", OVERFLOW );
    -	return;
    -	}
    -/* euclid( &ff3->n, &ff3->d );*/
    -}
    -
    -
    -
    -/* Divide fractions. */
    -
    -void rdiv( ff1, ff2, ff3 )
    -fract *ff1, *ff2, *ff3;
    -{
    -double d1, d2, n1, n2;
    -
    -n1 = ff1->d;	/* Invert ff1, then multiply */
    -d1 = ff1->n;
    -if( d1 < 0.0 )
    -	{ /* keep denominator positive */
    -	n1 = -n1;
    -	d1 = -d1;
    -	}
    -n2 = ff2->n;
    -d2 = ff2->d;
    -if( (n1 == 0.0) || (n2 == 0.0) )
    -	{
    -	ff3->n = 0.0;
    -	ff3->d = 1.0;
    -	return;
    -	}
    -
    -euclid( &n1, &d2 ); /* cross cancel any common divisors */
    -euclid( &n2, &d1 );
    -ff3->n = n1 * n2;
    -ff3->d = d1 * d2;
    -/* Report overflow. */
    -if( (fabs(ff3->n) >= BIG) || (fabs(ff3->d) >= BIG) )
    -	{
    -	mtherr( "rdiv", OVERFLOW );
    -	return;
    -	}
    -/* euclid( &ff3->n, &ff3->d );*/
    -}
    -
    -
    -
    -
    -
    -/* Euclidean algorithm
    - *   reduces fraction to lowest terms,
    - *   returns greatest common divisor.
    - */
    -
    -
    -double euclid( num, den )
    -double *num, *den;
    -{
    -double n, d, q, r;
    -
    -n = *num; /* Numerator. */
    -d = *den; /* Denominator. */
    -
    -/* Make numbers positive, locally. */
    -if( n < 0.0 )
    -	n = -n;
    -if( d < 0.0 )
    -	d = -d;
    -
    -/* Abort if numbers are too big for integer arithmetic. */
    -if( (n >= BIG) || (d >= BIG) )
    -	{
    -	mtherr( "euclid", OVERFLOW );
    -	return(1.0);
    -	}
    -
    -/* Divide by zero, gcd = 1. */
    -if(d == 0.0)
    -	return( 1.0 );
    -
    -/* Zero. Return 0/1, gcd = denominator. */
    -if(n == 0.0)
    -	{
    -/*
    -	if( *den < 0.0 )
    -		*den = -1.0;
    -	else
    -		*den = 1.0;
    -*/
    -	*den = 1.0;
    -	return( d );
    -	}
    -
    -while( d > 0.5 )
    -	{
    -/* Find integer part of n divided by d. */
    -	q = floor( n/d );
    -/* Find remainder after dividing n by d. */
    -	r = n - d * q;
    -/* The next fraction is d/r. */
    -	n = d;
    -	d = r;
    -	}
    -
    -if( n < 0.0 )
    -	mtherr( "euclid", UNDERFLOW );
    -
    -*num /= n;
    -*den /= n;
    -return( n );
    -}
    -
    diff --git a/scipy-0.10.1/scipy/special/cephes/exp10.c b/scipy-0.10.1/scipy/special/cephes/exp10.c
    deleted file mode 100644
    index 99e62d852b..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/exp10.c
    +++ /dev/null
    @@ -1,200 +0,0 @@
    -/*							exp10.c
    - *
    - *	Base 10 exponential function
    - *      (Common antilogarithm)
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, exp10();
    - *
    - * y = exp10( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns 10 raised to the x power.
    - *
    - * Range reduction is accomplished by expressing the argument
    - * as 10**x = 2**n 10**f, with |f| < 0.5 log10(2).
    - * The Pade' form
    - *
    - *    1 + 2x P(x**2)/( Q(x**2) - P(x**2) )
    - *
    - * is used to approximate 10**f.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE     -307,+307    30000       2.2e-16     5.5e-17
    - * Test result from an earlier version (2.1):
    - *    DEC       -38,+38     70000       3.1e-17     7.0e-18
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * exp10 underflow    x < -MAXL10        0.0
    - * exp10 overflow     x > MAXL10       MAXNUM
    - *
    - * DEC arithmetic: MAXL10 = 38.230809449325611792.
    - * IEEE arithmetic: MAXL10 = 308.2547155599167.
    - *
    - */
    -
    -/*
    -Cephes Math Library Release 2.2:  January, 1991
    -Copyright 1984, 1991 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double P[] = {
    - 4.09962519798587023075E-2,
    - 1.17452732554344059015E1,
    - 4.06717289936872725516E2,
    - 2.39423741207388267439E3,
    -};
    -static double Q[] = {
    -/* 1.00000000000000000000E0,*/
    - 8.50936160849306532625E1,
    - 1.27209271178345121210E3,
    - 2.07960819286001865907E3,
    -};
    -/* static double LOG102 = 3.01029995663981195214e-1; */
    -static double LOG210 = 3.32192809488736234787e0;
    -static double LG102A = 3.01025390625000000000E-1;
    -static double LG102B = 4.60503898119521373889E-6;
    -/* static double MAXL10 = 38.230809449325611792; */
    -static double MAXL10 = 308.2547155599167;
    -#endif
    -
    -#ifdef DEC
    -static unsigned short P[] = {
    -0037047,0165657,0114061,0067234,
    -0041073,0166243,0123052,0144643,
    -0042313,0055720,0024032,0047443,
    -0043025,0121714,0070232,0050007,
    -};
    -static unsigned short Q[] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041652,0027756,0071216,0050075,
    -0042637,0001367,0077263,0136017,
    -0043001,0174673,0024157,0133416,
    -};
    -/*
    -static unsigned short L102[] = {0037632,0020232,0102373,0147770};
    -#define LOG102 *(double *)L102
    -*/
    -static unsigned short L210[] = {0040524,0115170,0045715,0015613};
    -#define LOG210 *(double *)L210
    -static unsigned short L102A[] = {0037632,0020000,0000000,0000000,};
    -#define LG102A *(double *)L102A
    -static unsigned short L102B[] = {0033632,0102373,0147767,0114220,};
    -#define LG102B *(double *)L102B
    -static unsigned short MXL[] = {0041430,0166131,0047761,0154130,};
    -#define MAXL10 ( *(double *)MXL )
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short P[] = {
    -0x2dd4,0xf306,0xfd75,0x3fa4,
    -0x5934,0x74c5,0x7d94,0x4027,
    -0x49e4,0x0503,0x6b7a,0x4079,
    -0x4a01,0x8e13,0xb479,0x40a2,
    -};
    -static unsigned short Q[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xca08,0xce51,0x45fd,0x4055,
    -0x7782,0xefd6,0xe05e,0x4093,
    -0xf6e2,0x650d,0x3f37,0x40a0,
    -};
    -/*
    -static unsigned short L102[] = {0x79ff,0x509f,0x4413,0x3fd3};
    -#define LOG102 *(double *)L102
    -*/
    -static unsigned short L210[] = {0xa371,0x0979,0x934f,0x400a};
    -#define LOG210 *(double *)L210
    -static unsigned short L102A[] = {0x0000,0x0000,0x4400,0x3fd3,};
    -#define LG102A *(double *)L102A
    -static unsigned short L102B[] = {0xf312,0x79fe,0x509f,0x3ed3,};
    -#define LG102B *(double *)L102B
    -static double MAXL10 = 308.2547155599167;
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short P[] = {
    -0x3fa4,0xfd75,0xf306,0x2dd4,
    -0x4027,0x7d94,0x74c5,0x5934,
    -0x4079,0x6b7a,0x0503,0x49e4,
    -0x40a2,0xb479,0x8e13,0x4a01,
    -};
    -static unsigned short Q[] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4055,0x45fd,0xce51,0xca08,
    -0x4093,0xe05e,0xefd6,0x7782,
    -0x40a0,0x3f37,0x650d,0xf6e2,
    -};
    -/*
    -static unsigned short L102[] = {0x3fd3,0x4413,0x509f,0x79ff};
    -#define LOG102 *(double *)L102
    -*/
    -static unsigned short L210[] = {0x400a,0x934f,0x0979,0xa371};
    -#define LOG210 *(double *)L210
    -static unsigned short L102A[] = {0x3fd3,0x4400,0x0000,0x0000,};
    -#define LG102A *(double *)L102A
    -static unsigned short L102B[] = {0x3ed3,0x509f,0x79fe,0xf312,};
    -#define LG102B *(double *)L102B
    -static double MAXL10 = 308.2547155599167;
    -#endif
    -
    -extern double MAXNUM;
    -
    -double exp10(double x)
    -{
    -double px, xx;
    -short n;
    -
    -if( npy_isnan(x) )
    -	return(x);
    -if( x > MAXL10 )
    -	{
    -	return( NPY_INFINITY );
    -	}
    -
    -if( x < -MAXL10 )	/* Would like to use MINLOG but can't */
    -	{
    -	mtherr( "exp10", UNDERFLOW );
    -	return(0.0);
    -	}
    -
    -/* Express 10**x = 10**g 2**n
    - *   = 10**g 10**( n log10(2) )
    - *   = 10**( g + n log10(2) )
    - */
    -px = floor( LOG210 * x + 0.5 );
    -n = px;
    -x -= px * LG102A;
    -x -= px * LG102B;
    -
    -/* rational approximation for exponential
    - * of the fractional part:
    - * 10**x = 1 + 2x P(x**2)/( Q(x**2) - P(x**2) )
    - */
    -xx = x * x;
    -px = x * polevl( xx, P, 3 );
    -x =  px/( p1evl( xx, Q, 3 ) - px );
    -x = 1.0 + ldexp( x, 1 );
    -
    -/* multiply by power of 2 */
    -x = ldexp( x, n );
    -
    -return(x);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/exp2.c b/scipy-0.10.1/scipy/special/cephes/exp2.c
    deleted file mode 100644
    index c3945ff6d8..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/exp2.c
    +++ /dev/null
    @@ -1,158 +0,0 @@
    -/*							exp2.c
    - *
    - *	Base 2 exponential function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, exp2();
    - *
    - * y = exp2( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns 2 raised to the x power.
    - *
    - * Range reduction is accomplished by separating the argument
    - * into an integer k and fraction f such that
    - *     x    k  f
    - *    2  = 2  2.
    - *
    - * A Pade' form
    - *
    - *   1 + 2x P(x**2) / (Q(x**2) - x P(x**2) )
    - *
    - * approximates 2**x in the basic range [-0.5, 0.5].
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE    -1022,+1024   30000       1.8e-16     5.4e-17
    - *
    - *
    - * See exp.c for comments on error amplification.
    - *
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * exp underflow    x < -MAXL2        0.0
    - * exp overflow     x > MAXL2         MAXNUM
    - *
    - * For DEC arithmetic, MAXL2 = 127.
    - * For IEEE arithmetic, MAXL2 = 1024.
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1995 by Stephen L. Moshier
    -*/
    -
    -
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double P[] = {
    - 2.30933477057345225087E-2,
    - 2.02020656693165307700E1,
    - 1.51390680115615096133E3,
    -};
    -static double Q[] = {
    -/* 1.00000000000000000000E0,*/
    - 2.33184211722314911771E2,
    - 4.36821166879210612817E3,
    -};
    -#define MAXL2 1024.0
    -#define MINL2 -1024.0
    -#endif
    -
    -#ifdef DEC
    -static unsigned short P[] = {
    -0036675,0027102,0122327,0053227,
    -0041241,0116724,0115412,0157355,
    -0042675,0036404,0101733,0132226,
    -};
    -static unsigned short Q[] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0042151,0027450,0077732,0160744,
    -0043210,0100661,0077550,0056560,
    -};
    -#define MAXL2 127.0
    -#define MINL2 -127.0
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short P[] = {
    -0xead3,0x549a,0xa5c8,0x3f97,
    -0x5bde,0x9361,0x33ba,0x4034,
    -0x7693,0x907b,0xa7a0,0x4097,
    -};
    -static unsigned short Q[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x5c3c,0x0ffb,0x25e5,0x406d,
    -0x0bae,0x2fed,0x1036,0x40b1,
    -};
    -#define MAXL2 1024.0
    -#define MINL2 -1022.0
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short P[] = {
    -0x3f97,0xa5c8,0x549a,0xead3,
    -0x4034,0x33ba,0x9361,0x5bde,
    -0x4097,0xa7a0,0x907b,0x7693,
    -};
    -static unsigned short Q[] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x406d,0x25e5,0x0ffb,0x5c3c,
    -0x40b1,0x1036,0x2fed,0x0bae,
    -};
    -#define MAXL2 1024.0
    -#define MINL2 -1022.0
    -#endif
    -
    -extern double MAXNUM;
    -
    -double exp2(double x)
    -{
    -double px, xx;
    -short n;
    -
    -if( npy_isnan(x) )
    -	return(x);
    -if( x > MAXL2)
    -	{
    -	return( NPY_INFINITY );
    -	}
    -
    -if( x < MINL2 )
    -	{
    -	return(0.0);
    -	}
    -
    -xx = x;	/* save x */
    -/* separate into integer and fractional parts */
    -px = floor(x+0.5);
    -n = px;
    -x = x - px;
    -
    -/* rational approximation
    - * exp2(x) = 1 +  2xP(xx)/(Q(xx) - P(xx))
    - * where xx = x**2
    - */
    -xx = x * x;
    -px = x * polevl( xx, P, 2 );
    -x =  px / ( p1evl( xx, Q, 2 ) - px );
    -x = 1.0 + ldexp( x, 1 );
    -
    -/* scale by power of 2 */
    -x = ldexp( x, n );
    -return(x);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/expn.c b/scipy-0.10.1/scipy/special/cephes/expn.c
    deleted file mode 100644
    index f0be347beb..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/expn.c
    +++ /dev/null
    @@ -1,200 +0,0 @@
    -/*							expn.c
    - *
    - *		Exponential integral En
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int n;
    - * double x, y, expn();
    - *
    - * y = expn( n, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Evaluates the exponential integral
    - *
    - *                 inf.
    - *                   -
    - *                  | |   -xt
    - *                  |    e
    - *      E (x)  =    |    ----  dt.
    - *       n          |      n
    - *                | |     t
    - *                 -
    - *                  1
    - *
    - *
    - * Both n and x must be nonnegative.
    - *
    - * The routine employs either a power series, a continued
    - * fraction, or an asymptotic formula depending on the
    - * relative values of n and x.
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0, 30        5000       2.0e-16     4.6e-17
    - *    IEEE      0, 30       10000       1.7e-15     3.6e-16
    - *
    - */
    -
    -/*							expn.c	*/
    -
    -/* Cephes Math Library Release 1.1:  March, 1985
    - * Copyright 1985 by Stephen L. Moshier
    - * Direct inquiries to 30 Frost Street, Cambridge, MA 02140 */
    -
    -#include "mconf.h"
    -#define EUL 0.57721566490153286060
    -#define BIG  1.44115188075855872E+17
    -extern double MAXNUM, MACHEP, MAXLOG;
    -
    -double expn( n, x )
    -int n;
    -double x;
    -{
    -double ans, r, t, yk, xk;
    -double pk, pkm1, pkm2, qk, qkm1, qkm2;
    -double psi, z;
    -int i, k;
    -static double big = BIG;
    -
    -if( n < 0 )
    -	goto domerr;
    -
    -if( x < 0 )
    -	{
    -domerr:	mtherr( "expn", DOMAIN );
    -	return( MAXNUM );
    -	}
    -
    -if( x > MAXLOG )
    -	return( 0.0 );
    -
    -if( x == 0.0 )
    -	{
    -	if( n < 2 )
    -		{
    -		mtherr( "expn", SING );
    -		return( MAXNUM );
    -		}
    -	else
    -		return( 1.0/(n-1.0) );
    -	}
    -
    -if( n == 0 )
    -	return( exp(-x)/x );
    -
    -/*							expn.c	*/
    -/*		Expansion for large n		*/
    -
    -if( n > 5000 )
    -	{
    -	xk = x + n;
    -	yk = 1.0 / (xk * xk);
    -	t = n;
    -	ans = yk * t * (6.0 * x * x  -  8.0 * t * x  +  t * t);
    -	ans = yk * (ans + t * (t  -  2.0 * x));
    -	ans = yk * (ans + t);
    -	ans = (ans + 1.0) * exp( -x ) / xk;
    -	goto done;
    -	}
    -
    -if( x > 1.0 )
    -	goto cfrac;
    -
    -/*							expn.c	*/
    -
    -/*		Power series expansion		*/
    -
    -psi = -EUL - log(x);
    -for( i=1; i MACHEP );
    -k = xk;
    -t = n;
    -r = n - 1;
    -ans = (pow(z, r) * psi / Gamma(t)) - ans;
    -goto done;
    -
    -/*							expn.c	*/
    -/*		continued fraction		*/
    -cfrac:
    -k = 1;
    -pkm2 = 1.0;
    -qkm2 = x;
    -pkm1 = 1.0;
    -qkm1 = x + n;
    -ans = pkm1/qkm1;
    -
    -do
    -	{
    -	k += 1;
    -	if( k & 1 )
    -		{
    -		yk = 1.0;
    -		xk = n + (k-1)/2;
    -		}
    -	else
    -		{
    -		yk = x;
    -		xk = k/2;
    -		}
    -	pk = pkm1 * yk  +  pkm2 * xk;
    -	qk = qkm1 * yk  +  qkm2 * xk;
    -	if( qk != 0 )
    -		{
    -		r = pk/qk;
    -		t = fabs( (ans - r)/r );
    -		ans = r;
    -		}
    -	else
    -		t = 1.0;
    -	pkm2 = pkm1;
    -	pkm1 = pk;
    -	qkm2 = qkm1;
    -	qkm1 = qk;
    -if( fabs(pk) > big )
    -		{
    -		pkm2 /= big;
    -		pkm1 /= big;
    -		qkm2 /= big;
    -		qkm1 /= big;
    -		}
    -	}
    -while( t > MACHEP );
    -
    -ans *= exp( -x );
    -
    -done:
    -return( ans );
    -}
    -
    diff --git a/scipy-0.10.1/scipy/special/cephes/fabs.c b/scipy-0.10.1/scipy/special/cephes/fabs.c
    deleted file mode 100644
    index 3fe828f47b..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/fabs.c
    +++ /dev/null
    @@ -1,55 +0,0 @@
    -/*							fabs.c
    - *
    - *		Absolute value
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y;
    - *
    - * y = fabs( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - * 
    - * Returns the absolute value of the argument.
    - *
    - */
    -
    -
    -#include "mconf.h"
    -/* Avoid using UNK if possible.  */
    -#ifdef UNK
    -#if BIGENDIAN
    -#define MIEEE 1
    -#else
    -#define IBMPC 1
    -#endif
    -#endif
    -
    -double fabs(double x)
    -{
    -union
    -  {
    -    double d;
    -    short i[4];
    -  } u;
    -
    -u.d = x;
    -#ifdef IBMPC
    -    u.i[3] &= 0x7fff;
    -#endif
    -#ifdef MIEEE
    -    u.i[0] &= 0x7fff;
    -#endif
    -#ifdef DEC
    -    u.i[3] &= 0x7fff;
    -#endif
    -#ifdef UNK
    -if( u.d < 0 )
    -   u.d = -u.d;
    -#endif
    -return( u.d );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/fdtr.c b/scipy-0.10.1/scipy/special/cephes/fdtr.c
    deleted file mode 100644
    index 3f727fd3e5..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/fdtr.c
    +++ /dev/null
    @@ -1,226 +0,0 @@
    -/*							fdtr.c
    - *
    - *	F distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double df1, df2;
    - * double x, y, fdtr();
    - *
    - * y = fdtr( df1, df2, x );
    - *
    - * DESCRIPTION:
    - *
    - * Returns the area from zero to x under the F density
    - * function (also known as Snedcor's density or the
    - * variance ratio density).  This is the density
    - * of x = (u1/df1)/(u2/df2), where u1 and u2 are random
    - * variables having Chi square distributions with df1
    - * and df2 degrees of freedom, respectively.
    - *
    - * The incomplete beta integral is used, according to the
    - * formula
    - *
    - *	P(x) = incbet( df1/2, df2/2, (df1*x/(df2 + df1*x) ).
    - *
    - *
    - * The arguments a and b are greater than zero, and x is
    - * nonnegative.
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a,b,x).
    - *
    - *                x     a,b                     Relative error:
    - * arithmetic  domain  domain     # trials      peak         rms
    - *    IEEE      0,1    0,100       100000      9.8e-15     1.7e-15
    - *    IEEE      1,5    0,100       100000      6.5e-15     3.5e-16
    - *    IEEE      0,1    1,10000     100000      2.2e-11     3.3e-12
    - *    IEEE      1,5    1,10000     100000      1.1e-11     1.7e-13
    - * See also incbet.c.
    - *
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * fdtr domain     a<0, b<0, x<0         0.0
    - *
    - */
    -/*							fdtrc()
    - *
    - *	Complemented F distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double df1, df2;
    - * double x, y, fdtrc();
    - *
    - * y = fdtrc( df1, df2, x );
    - *
    - * DESCRIPTION:
    - *
    - * Returns the area from x to infinity under the F density
    - * function (also known as Snedcor's density or the
    - * variance ratio density).
    - *
    - *
    - *                      inf.
    - *                       -
    - *              1       | |  a-1      b-1
    - * 1-P(x)  =  ------    |   t    (1-t)    dt
    - *            B(a,b)  | |
    - *                     -
    - *                      x
    - *
    - *
    - * The incomplete beta integral is used, according to the
    - * formula
    - *
    - *	P(x) = incbet( df2/2, df1/2, (df2/(df2 + df1*x) ).
    - *
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a,b,x) in the indicated intervals.
    - *                x     a,b                     Relative error:
    - * arithmetic  domain  domain     # trials      peak         rms
    - *    IEEE      0,1    1,100       100000      3.7e-14     5.9e-16
    - *    IEEE      1,5    1,100       100000      8.0e-15     1.6e-15
    - *    IEEE      0,1    1,10000     100000      1.8e-11     3.5e-13
    - *    IEEE      1,5    1,10000     100000      2.0e-11     3.0e-12
    - * See also incbet.c.
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * fdtrc domain    a<0, b<0, x<0         0.0
    - *
    - */
    -/*							fdtri()
    - *
    - *	Inverse of F distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double df1, df2;
    - * double x, p, fdtri();
    - *
    - * x = fdtri( df1, df2, p );
    - *
    - * DESCRIPTION:
    - *
    - * Finds the F density argument x such that the integral
    - * from -infinity to x of the F density is equal to the
    - * given probability p.
    - *
    - * This is accomplished using the inverse beta integral
    - * function and the relations
    - *
    - *      z = incbi( df2/2, df1/2, p )
    - *      x = df2 (1-z) / (df1 z).
    - *
    - * Note: the following relations hold for the inverse of
    - * the uncomplemented F distribution:
    - *
    - *      z = incbi( df1/2, df2/2, p )
    - *      x = df2 z / (df1 (1-z)).
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a,b,p).
    - *
    - *              a,b                     Relative error:
    - * arithmetic  domain     # trials      peak         rms
    - *  For p between .001 and 1:
    - *    IEEE     1,100       100000      8.3e-15     4.7e-16
    - *    IEEE     1,10000     100000      2.1e-11     1.4e-13
    - *  For p between 10^-6 and 10^-3:
    - *    IEEE     1,100        50000      1.3e-12     8.4e-15
    - *    IEEE     1,10000      50000      3.0e-12     4.8e-14
    - * See also fdtrc.c.
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * fdtri domain   p <= 0 or p > 1       NaN
    - *                     v < 1
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1987, 1995 by Stephen L. Moshier
    -*/
    -
    -
    -#include "mconf.h"
    -
    -double fdtrc( a, b, x )
    -double a, b;
    -double x;
    -{
    -double w;
    -
    -if( (a < 1.0) || (b < 1.0) || (x < 0.0) )
    -	{
    -	mtherr( "fdtrc", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -w = b / (b + a * x);
    -return( incbet( 0.5*b, 0.5*a, w ) );
    -}
    -
    -double fdtr( a, b, x )
    -double a, b;
    -double x;
    -{
    -double w;
    -
    -if( (a < 1.0) || (b < 1.0) || (x < 0.0) )
    -	{
    -	mtherr( "fdtr", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -w = a * x;
    -w = w / (b + w);
    -return( incbet(0.5*a, 0.5*b, w) );
    -}
    -
    -
    -double fdtri( a, b, y )
    -double a, b;
    -double y;
    -{
    -double w, x;
    -
    -if( (a < 1.0) || (b < 1.0) || (y <= 0.0) || (y > 1.0) )
    -	{
    -	mtherr( "fdtri", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -y = 1.0-y;
    -a = a;
    -b = b;
    -/* Compute probability for x = 0.5.  */
    -w = incbet( 0.5*b, 0.5*a, 0.5 );
    -/* If that is greater than y, then the solution w < .5.
    -   Otherwise, solve at 1-y to remove cancellation in (b - b*w).  */
    -if( w > y || y < 0.001)
    -	{
    -	w = incbi( 0.5*b, 0.5*a, y );
    -	x = (b - b*w)/(a*w);
    -	}
    -else
    -	{
    -	w = incbi( 0.5*a, 0.5*b, 1.0-y );
    -	x = b*w/(a*(1.0-w));
    -	}
    -return(x);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/fresnl.c b/scipy-0.10.1/scipy/special/cephes/fresnl.c
    deleted file mode 100644
    index a751e48bda..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/fresnl.c
    +++ /dev/null
    @@ -1,507 +0,0 @@
    -/*							fresnl.c
    - *
    - *	Fresnel integral
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, S, C;
    - * void fresnl();
    - *
    - * fresnl( x, _&S, _&C );
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Evaluates the Fresnel integrals
    - *
    - *           x
    - *           -
    - *          | |
    - * C(x) =   |   cos(pi/2 t**2) dt,
    - *        | |
    - *         -
    - *          0
    - *
    - *           x
    - *           -
    - *          | |
    - * S(x) =   |   sin(pi/2 t**2) dt.
    - *        | |
    - *         -
    - *          0
    - *
    - *
    - * The integrals are evaluated by a power series for x < 1.
    - * For x >= 1 auxiliary functions f(x) and g(x) are employed
    - * such that
    - *
    - * C(x) = 0.5 + f(x) sin( pi/2 x**2 ) - g(x) cos( pi/2 x**2 )
    - * S(x) = 0.5 - f(x) cos( pi/2 x**2 ) - g(x) sin( pi/2 x**2 )
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *  Relative error.
    - *
    - * Arithmetic  function   domain     # trials      peak         rms
    - *   IEEE       S(x)      0, 10       10000       2.0e-15     3.2e-16
    - *   IEEE       C(x)      0, 10       10000       1.8e-15     3.3e-16
    - *   DEC        S(x)      0, 10        6000       2.2e-16     3.9e-17
    - *   DEC        C(x)      0, 10        5000       2.3e-16     3.9e-17
    - */
    -
    -/*
    -Cephes Math Library Release 2.1:  January, 1989
    -Copyright 1984, 1987, 1989 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -
    -/* S(x) for small x */
    -#ifdef UNK
    -static double sn[6] = {
    --2.99181919401019853726E3,
    - 7.08840045257738576863E5,
    --6.29741486205862506537E7,
    - 2.54890880573376359104E9,
    --4.42979518059697779103E10,
    - 3.18016297876567817986E11,
    -};
    -static double sd[6] = {
    -/* 1.00000000000000000000E0,*/
    - 2.81376268889994315696E2,
    - 4.55847810806532581675E4,
    - 5.17343888770096400730E6,
    - 4.19320245898111231129E8,
    - 2.24411795645340920940E10,
    - 6.07366389490084639049E11,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short sn[24] = {
    -0143072,0176433,0065455,0127034,
    -0045055,0007200,0134540,0026661,
    -0146560,0035061,0023667,0127545,
    -0050027,0166503,0002673,0153756,
    -0151045,0002721,0121737,0102066,
    -0051624,0013177,0033451,0021271,
    -};
    -static unsigned short sd[24] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0042214,0130051,0112070,0101617,
    -0044062,0010307,0172346,0152510,
    -0045635,0160575,0143200,0136642,
    -0047307,0171215,0127457,0052361,
    -0050647,0031447,0032621,0013510,
    -0052015,0064733,0117362,0012653,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short sn[24] = {
    -0xb5c3,0x6d65,0x5fa3,0xc0a7,
    -0x05b6,0x172c,0xa1d0,0x4125,
    -0xf5ed,0x24f6,0x0746,0xc18e,
    -0x7afe,0x60b7,0xfda8,0x41e2,
    -0xf087,0x347b,0xa0ba,0xc224,
    -0x2457,0xe6e5,0x82cf,0x4252,
    -};
    -static unsigned short sd[24] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x1072,0x3287,0x9605,0x4071,
    -0xdaa9,0xfe9c,0x4218,0x40e6,
    -0x17b4,0xb8d0,0xbc2f,0x4153,
    -0xea9e,0xb5e5,0xfe51,0x41b8,
    -0x22e9,0xe6b2,0xe664,0x4214,
    -0x42b5,0x73de,0xad3b,0x4261,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short sn[24] = {
    -0xc0a7,0x5fa3,0x6d65,0xb5c3,
    -0x4125,0xa1d0,0x172c,0x05b6,
    -0xc18e,0x0746,0x24f6,0xf5ed,
    -0x41e2,0xfda8,0x60b7,0x7afe,
    -0xc224,0xa0ba,0x347b,0xf087,
    -0x4252,0x82cf,0xe6e5,0x2457,
    -};
    -static unsigned short sd[24] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4071,0x9605,0x3287,0x1072,
    -0x40e6,0x4218,0xfe9c,0xdaa9,
    -0x4153,0xbc2f,0xb8d0,0x17b4,
    -0x41b8,0xfe51,0xb5e5,0xea9e,
    -0x4214,0xe664,0xe6b2,0x22e9,
    -0x4261,0xad3b,0x73de,0x42b5,
    -};
    -#endif
    -
    -/* C(x) for small x */
    -#ifdef UNK
    -static double cn[6] = {
    --4.98843114573573548651E-8,
    - 9.50428062829859605134E-6,
    --6.45191435683965050962E-4,
    - 1.88843319396703850064E-2,
    --2.05525900955013891793E-1,
    - 9.99999999999999998822E-1,
    -};
    -static double cd[7] = {
    - 3.99982968972495980367E-12,
    - 9.15439215774657478799E-10,
    - 1.25001862479598821474E-7,
    - 1.22262789024179030997E-5,
    - 8.68029542941784300606E-4,
    - 4.12142090722199792936E-2,
    - 1.00000000000000000118E0,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short cn[24] = {
    -0132126,0040141,0063733,0013231,
    -0034037,0072223,0010200,0075637,
    -0135451,0021020,0073264,0036057,
    -0036632,0131520,0101316,0060233,
    -0137522,0072541,0136124,0132202,
    -0040200,0000000,0000000,0000000,
    -};
    -static unsigned short cd[28] = {
    -0026614,0135503,0051776,0032631,
    -0030573,0121116,0154033,0126712,
    -0032406,0034100,0012442,0106212,
    -0034115,0017567,0150520,0164623,
    -0035543,0106171,0177336,0146351,
    -0037050,0150073,0000607,0171635,
    -0040200,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short cn[24] = {
    -0x62d3,0x2cfb,0xc80c,0xbe6a,
    -0x0f74,0x6210,0xee92,0x3ee3,
    -0x8786,0x0ed6,0x2442,0xbf45,
    -0xcc13,0x1059,0x566a,0x3f93,
    -0x9690,0x378a,0x4eac,0xbfca,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -static unsigned short cd[28] = {
    -0xc6b3,0x6a7f,0x9768,0x3d91,
    -0x75b9,0xdb03,0x7449,0x3e0f,
    -0x5191,0x02a4,0xc708,0x3e80,
    -0x1d32,0xfa2a,0xa3ee,0x3ee9,
    -0xd99d,0x3fdb,0x718f,0x3f4c,
    -0xfe74,0x6030,0x1a07,0x3fa5,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short cn[24] = {
    -0xbe6a,0xc80c,0x2cfb,0x62d3,
    -0x3ee3,0xee92,0x6210,0x0f74,
    -0xbf45,0x2442,0x0ed6,0x8786,
    -0x3f93,0x566a,0x1059,0xcc13,
    -0xbfca,0x4eac,0x378a,0x9690,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short cd[28] = {
    -0x3d91,0x9768,0x6a7f,0xc6b3,
    -0x3e0f,0x7449,0xdb03,0x75b9,
    -0x3e80,0xc708,0x02a4,0x5191,
    -0x3ee9,0xa3ee,0xfa2a,0x1d32,
    -0x3f4c,0x718f,0x3fdb,0xd99d,
    -0x3fa5,0x1a07,0x6030,0xfe74,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -#endif
    -
    -/* Auxiliary function f(x) */
    -#ifdef UNK
    -static double fn[10] = {
    -  4.21543555043677546506E-1,
    -  1.43407919780758885261E-1,
    -  1.15220955073585758835E-2,
    -  3.45017939782574027900E-4,
    -  4.63613749287867322088E-6,
    -  3.05568983790257605827E-8,
    -  1.02304514164907233465E-10,
    -  1.72010743268161828879E-13,
    -  1.34283276233062758925E-16,
    -  3.76329711269987889006E-20,
    -};
    -static double fd[10] = {
    -/*  1.00000000000000000000E0,*/
    -  7.51586398353378947175E-1,
    -  1.16888925859191382142E-1,
    -  6.44051526508858611005E-3,
    -  1.55934409164153020873E-4,
    -  1.84627567348930545870E-6,
    -  1.12699224763999035261E-8,
    -  3.60140029589371370404E-11,
    -  5.88754533621578410010E-14,
    -  4.52001434074129701496E-17,
    -  1.25443237090011264384E-20,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short fn[40] = {
    -0037727,0152216,0106601,0016214,
    -0037422,0154606,0112710,0071355,
    -0036474,0143453,0154253,0166545,
    -0035264,0161606,0022250,0073743,
    -0033633,0110036,0024653,0136246,
    -0032003,0036652,0041164,0036413,
    -0027740,0174122,0046305,0036726,
    -0025501,0125270,0121317,0167667,
    -0023032,0150555,0076175,0047443,
    -0020061,0133570,0070130,0027657,
    -};
    -static unsigned short fd[40] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0040100,0063767,0054413,0151452,
    -0037357,0061566,0007243,0065754,
    -0036323,0005365,0033552,0133625,
    -0035043,0101123,0000275,0165402,
    -0033367,0146614,0110623,0023647,
    -0031501,0116644,0125222,0144263,
    -0027436,0062051,0117235,0001411,
    -0025204,0111543,0056370,0036201,
    -0022520,0071351,0015227,0122144,
    -0017554,0172240,0112713,0005006,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short fn[40] = {
    -0x2391,0xd1b0,0xfa91,0x3fda,
    -0x0e5e,0xd2b9,0x5b30,0x3fc2,
    -0x7dad,0x7b15,0x98e5,0x3f87,
    -0x0efc,0xc495,0x9c70,0x3f36,
    -0x7795,0xc535,0x7203,0x3ed3,
    -0x87a1,0x484e,0x67b5,0x3e60,
    -0xa7bb,0x4998,0x1f0a,0x3ddc,
    -0xfdf7,0x1459,0x3557,0x3d48,
    -0xa9e4,0xaf8f,0x5a2d,0x3ca3,
    -0x05f6,0x0e0b,0x36ef,0x3be6,
    -};
    -static unsigned short fd[40] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x7a65,0xeb21,0x0cfe,0x3fe8,
    -0x6d7d,0xc1d4,0xec6e,0x3fbd,
    -0x56f3,0xa6ed,0x615e,0x3f7a,
    -0xbd60,0x6017,0x704a,0x3f24,
    -0x64f5,0x9232,0xf9b1,0x3ebe,
    -0x5916,0x9552,0x33b4,0x3e48,
    -0xa061,0x33d3,0xcc85,0x3dc3,
    -0x0790,0x6b9f,0x926c,0x3d30,
    -0xf48d,0x2352,0x0e5d,0x3c8a,
    -0x6141,0x12b9,0x9e94,0x3bcd,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short fn[40] = {
    -0x3fda,0xfa91,0xd1b0,0x2391,
    -0x3fc2,0x5b30,0xd2b9,0x0e5e,
    -0x3f87,0x98e5,0x7b15,0x7dad,
    -0x3f36,0x9c70,0xc495,0x0efc,
    -0x3ed3,0x7203,0xc535,0x7795,
    -0x3e60,0x67b5,0x484e,0x87a1,
    -0x3ddc,0x1f0a,0x4998,0xa7bb,
    -0x3d48,0x3557,0x1459,0xfdf7,
    -0x3ca3,0x5a2d,0xaf8f,0xa9e4,
    -0x3be6,0x36ef,0x0e0b,0x05f6,
    -};
    -static unsigned short fd[40] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x3fe8,0x0cfe,0xeb21,0x7a65,
    -0x3fbd,0xec6e,0xc1d4,0x6d7d,
    -0x3f7a,0x615e,0xa6ed,0x56f3,
    -0x3f24,0x704a,0x6017,0xbd60,
    -0x3ebe,0xf9b1,0x9232,0x64f5,
    -0x3e48,0x33b4,0x9552,0x5916,
    -0x3dc3,0xcc85,0x33d3,0xa061,
    -0x3d30,0x926c,0x6b9f,0x0790,
    -0x3c8a,0x0e5d,0x2352,0xf48d,
    -0x3bcd,0x9e94,0x12b9,0x6141,
    -};
    -#endif
    -
    -
    -/* Auxiliary function g(x) */
    -#ifdef UNK
    -static double gn[11] = {
    -  5.04442073643383265887E-1,
    -  1.97102833525523411709E-1,
    -  1.87648584092575249293E-2,
    -  6.84079380915393090172E-4,
    -  1.15138826111884280931E-5,
    -  9.82852443688422223854E-8,
    -  4.45344415861750144738E-10,
    -  1.08268041139020870318E-12,
    -  1.37555460633261799868E-15,
    -  8.36354435630677421531E-19,
    -  1.86958710162783235106E-22,
    -};
    -static double gd[11] = {
    -/*  1.00000000000000000000E0,*/
    -  1.47495759925128324529E0,
    -  3.37748989120019970451E-1,
    -  2.53603741420338795122E-2,
    -  8.14679107184306179049E-4,
    -  1.27545075667729118702E-5,
    -  1.04314589657571990585E-7,
    -  4.60680728146520428211E-10,
    -  1.10273215066240270757E-12,
    -  1.38796531259578871258E-15,
    -  8.39158816283118707363E-19,
    -  1.86958710162783236342E-22,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short gn[44] = {
    -0040001,0021435,0120406,0053123,
    -0037511,0152523,0037703,0122011,
    -0036631,0134302,0122721,0110235,
    -0035463,0051712,0043215,0114732,
    -0034101,0025677,0147725,0057630,
    -0032323,0010342,0067523,0002206,
    -0030364,0152247,0110007,0054107,
    -0026230,0057654,0035464,0047124,
    -0023706,0036401,0167705,0045440,
    -0021166,0154447,0105632,0142461,
    -0016142,0002353,0011175,0170530,
    -};
    -static unsigned short gd[44] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0040274,0145551,0016742,0127005,
    -0037654,0166557,0076416,0015165,
    -0036717,0140217,0030675,0050111,
    -0035525,0110060,0076405,0070502,
    -0034125,0176061,0060120,0031730,
    -0032340,0001615,0054343,0120501,
    -0030375,0041414,0070747,0107060,
    -0026233,0031034,0160757,0074526,
    -0023710,0003341,0137100,0144664,
    -0021167,0126414,0023774,0015435,
    -0016142,0002353,0011175,0170530,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short gn[44] = {
    -0xcaca,0xb420,0x2463,0x3fe0,
    -0x7481,0x67f8,0x3aaa,0x3fc9,
    -0x3214,0x54ba,0x3718,0x3f93,
    -0xb33b,0x48d1,0x6a79,0x3f46,
    -0xabf3,0xf9fa,0x2577,0x3ee8,
    -0x6091,0x4dea,0x621c,0x3e7a,
    -0xeb09,0xf200,0x9a94,0x3dfe,
    -0x89cb,0x8766,0x0bf5,0x3d73,
    -0xa964,0x3df8,0xc7a0,0x3cd8,
    -0x58a6,0xf173,0xdb24,0x3c2e,
    -0xbe2b,0x624f,0x409d,0x3b6c,
    -};
    -static unsigned short gd[44] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x55c1,0x23bc,0x996d,0x3ff7,
    -0xc34f,0xefa1,0x9dad,0x3fd5,
    -0xaa09,0xe637,0xf811,0x3f99,
    -0xae28,0x0fa0,0xb206,0x3f4a,
    -0x067b,0x2c0a,0xbf86,0x3eea,
    -0x7428,0xab1c,0x0071,0x3e7c,
    -0xf1c6,0x8e3c,0xa861,0x3dff,
    -0xef2b,0x9c3d,0x6643,0x3d73,
    -0x1936,0x37c8,0x00dc,0x3cd9,
    -0x8364,0x84ff,0xf5a1,0x3c2e,
    -0xbe2b,0x624f,0x409d,0x3b6c,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short gn[44] = {
    -0x3fe0,0x2463,0xb420,0xcaca,
    -0x3fc9,0x3aaa,0x67f8,0x7481,
    -0x3f93,0x3718,0x54ba,0x3214,
    -0x3f46,0x6a79,0x48d1,0xb33b,
    -0x3ee8,0x2577,0xf9fa,0xabf3,
    -0x3e7a,0x621c,0x4dea,0x6091,
    -0x3dfe,0x9a94,0xf200,0xeb09,
    -0x3d73,0x0bf5,0x8766,0x89cb,
    -0x3cd8,0xc7a0,0x3df8,0xa964,
    -0x3c2e,0xdb24,0xf173,0x58a6,
    -0x3b6c,0x409d,0x624f,0xbe2b,
    -};
    -static unsigned short gd[44] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x3ff7,0x996d,0x23bc,0x55c1,
    -0x3fd5,0x9dad,0xefa1,0xc34f,
    -0x3f99,0xf811,0xe637,0xaa09,
    -0x3f4a,0xb206,0x0fa0,0xae28,
    -0x3eea,0xbf86,0x2c0a,0x067b,
    -0x3e7c,0x0071,0xab1c,0x7428,
    -0x3dff,0xa861,0x8e3c,0xf1c6,
    -0x3d73,0x6643,0x9c3d,0xef2b,
    -0x3cd9,0x00dc,0x37c8,0x1936,
    -0x3c2e,0xf5a1,0x84ff,0x8364,
    -0x3b6c,0x409d,0x624f,0xbe2b,
    -};
    -#endif
    -
    -extern double PI, PIO2, MACHEP;
    -
    -int fresnl( xxa, ssa, cca )
    -double xxa, *ssa, *cca;
    -{
    -double f, g, cc, ss, c, s, t, u;
    -double x, x2;
    -
    -x = fabs(xxa);
    -x2 = x * x;
    -if( x2 < 2.5625 )
    -	{
    -	t = x2 * x2;
    -	ss = x * x2 * polevl( t, sn, 5)/p1evl( t, sd, 6 );
    -	cc = x * polevl( t, cn, 5)/polevl(t, cd, 6 );
    -	goto done;
    -	}
    -
    -
    -
    -
    -
    -
    -if( x > 36974.0 )
    -	{
    -	cc = 0.5;
    -	ss = 0.5;
    -	goto done;
    -	}
    -
    -
    -/*		Asymptotic power series auxiliary functions
    - *		for large argument
    - */
    -	x2 = x * x;
    -	t = PI * x2;
    -	u = 1.0/(t * t);
    -	t = 1.0/t;
    -	f = 1.0 - u * polevl( u, fn, 9)/p1evl(u, fd, 10);
    -	g = t * polevl( u, gn, 10)/p1evl(u, gd, 11);
    -
    -	t = PIO2 * x2;
    -	c = cos(t);
    -	s = sin(t);
    -	t = PI * x;
    -	cc = 0.5  +  (f * s  -  g * c)/t;
    -	ss = 0.5  -  (f * c  +  g * s)/t;
    -
    -done:
    -if( xxa < 0.0 )
    -	{
    -	cc = -cc;
    -	ss = -ss;
    -	}
    -
    -*cca = cc;
    -*ssa = ss;
    -return(0);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/gamma.c b/scipy-0.10.1/scipy/special/cephes/gamma.c
    deleted file mode 100644
    index 9ed77edf94..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/gamma.c
    +++ /dev/null
    @@ -1,615 +0,0 @@
    -/*							Gamma.c
    - *
    - *	Gamma function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, Gamma();
    - * extern int sgngam;
    - *
    - * y = Gamma( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns Gamma function of the argument.  The result is
    - * correctly signed, and the sign (+1 or -1) is also
    - * returned in a global (extern) variable named sgngam.
    - * This variable is also filled in by the logarithmic Gamma
    - * function lgam().
    - *
    - * Arguments |x| <= 34 are reduced by recurrence and the function
    - * approximated by a rational function of degree 6/7 in the
    - * interval (2,3).  Large arguments are handled by Stirling's
    - * formula. Large negative arguments are made positive using
    - * a reflection formula.  
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC      -34, 34      10000       1.3e-16     2.5e-17
    - *    IEEE    -170,-33      20000       2.3e-15     3.3e-16
    - *    IEEE     -33,  33     20000       9.4e-16     2.2e-16
    - *    IEEE      33, 171.6   20000       2.3e-15     3.2e-16
    - *
    - * Error for arguments outside the test range will be larger
    - * owing to error amplification by the exponential function.
    - *
    - */
    -/*							lgam()
    - *
    - *	Natural logarithm of Gamma function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, lgam();
    - * extern int sgngam;
    - *
    - * y = lgam( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the base e (2.718...) logarithm of the absolute
    - * value of the Gamma function of the argument.
    - * The sign (+1 or -1) of the Gamma function is returned in a
    - * global (extern) variable named sgngam.
    - *
    - * For arguments greater than 13, the logarithm of the Gamma
    - * function is approximated by the logarithmic version of
    - * Stirling's formula using a polynomial approximation of
    - * degree 4. Arguments between -33 and +33 are reduced by
    - * recurrence to the interval [2,3] of a rational approximation.
    - * The cosecant reflection formula is employed for arguments
    - * less than -33.
    - *
    - * Arguments greater than MAXLGM return MAXNUM and an error
    - * message.  MAXLGM = 2.035093e36 for DEC
    - * arithmetic or 2.556348e305 for IEEE arithmetic.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *
    - * arithmetic      domain        # trials     peak         rms
    - *    DEC     0, 3                  7000     5.2e-17     1.3e-17
    - *    DEC     2.718, 2.035e36       5000     3.9e-17     9.9e-18
    - *    IEEE    0, 3                 28000     5.4e-16     1.1e-16
    - *    IEEE    2.718, 2.556e305     40000     3.5e-16     8.3e-17
    - * The error criterion was relative when the function magnitude
    - * was greater than one but absolute when it was less than one.
    - *
    - * The following test used the relative error criterion, though
    - * at certain points the relative error could be much higher than
    - * indicated.
    - *    IEEE    -200, -4             10000     4.8e-16     1.3e-16
    - *
    - */
    -
    -/*							Gamma.c	*/
    -/*	Gamma function	*/
    -
    -/*
    -Cephes Math Library Release 2.2:  July, 1992
    -Copyright 1984, 1987, 1989, 1992 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double P[] = {
    -  1.60119522476751861407E-4,
    -  1.19135147006586384913E-3,
    -  1.04213797561761569935E-2,
    -  4.76367800457137231464E-2,
    -  2.07448227648435975150E-1,
    -  4.94214826801497100753E-1,
    -  9.99999999999999996796E-1
    -};
    -static double Q[] = {
    --2.31581873324120129819E-5,
    - 5.39605580493303397842E-4,
    --4.45641913851797240494E-3,
    - 1.18139785222060435552E-2,
    - 3.58236398605498653373E-2,
    --2.34591795718243348568E-1,
    - 7.14304917030273074085E-2,
    - 1.00000000000000000320E0
    -};
    -#define MAXGAM 171.624376956302725
    -static double LOGPI = 1.14472988584940017414;
    -#endif
    -
    -#ifdef DEC
    -static unsigned short P[] = {
    -0035047,0162701,0146301,0005234,
    -0035634,0023437,0032065,0176530,
    -0036452,0137157,0047330,0122574,
    -0037103,0017310,0143041,0017232,
    -0037524,0066516,0162563,0164605,
    -0037775,0004671,0146237,0014222,
    -0040200,0000000,0000000,0000000
    -};
    -static unsigned short Q[] = {
    -0134302,0041724,0020006,0116565,
    -0035415,0072121,0044251,0025634,
    -0136222,0003447,0035205,0121114,
    -0036501,0107552,0154335,0104271,
    -0037022,0135717,0014776,0171471,
    -0137560,0034324,0165024,0037021,
    -0037222,0045046,0047151,0161213,
    -0040200,0000000,0000000,0000000
    -};
    -#define MAXGAM 34.84425627277176174
    -static unsigned short LPI[4] = {
    -0040222,0103202,0043475,0006750,
    -};
    -#define LOGPI *(double *)LPI
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short P[] = {
    -0x2153,0x3998,0xfcb8,0x3f24,
    -0xbfab,0xe686,0x84e3,0x3f53,
    -0x14b0,0xe9db,0x57cd,0x3f85,
    -0x23d3,0x18c4,0x63d9,0x3fa8,
    -0x7d31,0xdcae,0x8da9,0x3fca,
    -0xe312,0x3993,0xa137,0x3fdf,
    -0x0000,0x0000,0x0000,0x3ff0
    -};
    -static unsigned short Q[] = {
    -0xd3af,0x8400,0x487a,0xbef8,
    -0x2573,0x2915,0xae8a,0x3f41,
    -0xb44a,0xe750,0x40e4,0xbf72,
    -0xb117,0x5b1b,0x31ed,0x3f88,
    -0xde67,0xe33f,0x5779,0x3fa2,
    -0x87c2,0x9d42,0x071a,0xbfce,
    -0x3c51,0xc9cd,0x4944,0x3fb2,
    -0x0000,0x0000,0x0000,0x3ff0
    -};
    -#define MAXGAM 171.624376956302725
    -static unsigned short LPI[4] = {
    -0xa1bd,0x48e7,0x50d0,0x3ff2,
    -};
    -#define LOGPI *(double *)LPI
    -#endif 
    -
    -#ifdef MIEEE
    -static unsigned short P[] = {
    -0x3f24,0xfcb8,0x3998,0x2153,
    -0x3f53,0x84e3,0xe686,0xbfab,
    -0x3f85,0x57cd,0xe9db,0x14b0,
    -0x3fa8,0x63d9,0x18c4,0x23d3,
    -0x3fca,0x8da9,0xdcae,0x7d31,
    -0x3fdf,0xa137,0x3993,0xe312,
    -0x3ff0,0x0000,0x0000,0x0000
    -};
    -static unsigned short Q[] = {
    -0xbef8,0x487a,0x8400,0xd3af,
    -0x3f41,0xae8a,0x2915,0x2573,
    -0xbf72,0x40e4,0xe750,0xb44a,
    -0x3f88,0x31ed,0x5b1b,0xb117,
    -0x3fa2,0x5779,0xe33f,0xde67,
    -0xbfce,0x071a,0x9d42,0x87c2,
    -0x3fb2,0x4944,0xc9cd,0x3c51,
    -0x3ff0,0x0000,0x0000,0x0000
    -};
    -#define MAXGAM 171.624376956302725
    -static unsigned short LPI[4] = {
    -0x3ff2,0x50d0,0x48e7,0xa1bd,
    -};
    -#define LOGPI *(double *)LPI
    -#endif 
    -
    -/* Stirling's formula for the Gamma function */
    -#if UNK
    -static double STIR[5] = {
    - 7.87311395793093628397E-4,
    --2.29549961613378126380E-4,
    --2.68132617805781232825E-3,
    - 3.47222221605458667310E-3,
    - 8.33333333333482257126E-2,
    -};
    -#define MAXSTIR 143.01608
    -static double SQTPI = 2.50662827463100050242E0;
    -#endif
    -#if DEC
    -static unsigned short STIR[20] = {
    -0035516,0061622,0144553,0112224,
    -0135160,0131531,0037460,0165740,
    -0136057,0134460,0037242,0077270,
    -0036143,0107070,0156306,0027751,
    -0037252,0125252,0125252,0146064,
    -};
    -#define MAXSTIR 26.77
    -static unsigned short SQT[4] = {
    -0040440,0066230,0177661,0034055,
    -};
    -#define SQTPI *(double *)SQT
    -#endif
    -#if IBMPC
    -static unsigned short STIR[20] = {
    -0x7293,0x592d,0xcc72,0x3f49,
    -0x1d7c,0x27e6,0x166b,0xbf2e,
    -0x4fd7,0x07d4,0xf726,0xbf65,
    -0xc5fd,0x1b98,0x71c7,0x3f6c,
    -0x5986,0x5555,0x5555,0x3fb5,
    -};
    -#define MAXSTIR 143.01608
    -static unsigned short SQT[4] = {
    -0x2706,0x1ff6,0x0d93,0x4004,
    -};
    -#define SQTPI *(double *)SQT
    -#endif
    -#if MIEEE
    -static unsigned short STIR[20] = {
    -0x3f49,0xcc72,0x592d,0x7293,
    -0xbf2e,0x166b,0x27e6,0x1d7c,
    -0xbf65,0xf726,0x07d4,0x4fd7,
    -0x3f6c,0x71c7,0x1b98,0xc5fd,
    -0x3fb5,0x5555,0x5555,0x5986,
    -};
    -#define MAXSTIR 143.01608
    -static unsigned short SQT[4] = {
    -0x4004,0x0d93,0x1ff6,0x2706,
    -};
    -#define SQTPI *(double *)SQT
    -#endif
    -
    -int sgngam = 0;
    -extern int sgngam;
    -extern double MAXLOG, MAXNUM, PI;
    -static double stirf(double);
    -
    -/* Gamma function computed by Stirling's formula.
    - * The polynomial STIR is valid for 33 <= x <= 172.
    - */
    -static double stirf(double x)
    -{
    -double y, w, v;
    -
    -if (x >= MAXGAM) {
    -	return (NPY_INFINITY);
    -}
    -w = 1.0/x;
    -w = 1.0 + w * polevl( w, STIR, 4 );
    -y = exp(x);
    -if( x > MAXSTIR )
    -	{ /* Avoid overflow in pow() */
    -	v = pow( x, 0.5 * x - 0.25 );
    -	y = v * (v / y);
    -	}
    -else
    -	{
    -	y = pow( x, x - 0.5 ) / y;
    -	}
    -y = SQTPI * y * w;
    -return( y );
    -}
    -
    -
    -
    -double Gamma(double x)
    -{
    -double p, q, z;
    -int i;
    -
    -sgngam = 1;
    -if (!npy_isfinite(x)) {
    -        return x;
    -}
    -q = fabs(x);
    -
    -if( q > 33.0 )
    -	{
    -	if( x < 0.0 )
    -		{
    -		p = floor(q);
    -		if( p == q )
    -			{
    -gamnan:
    -			mtherr( "Gamma", OVERFLOW );
    -			return (MAXNUM);
    -			}
    -		i = p;
    -		if( (i & 1) == 0 )
    -			sgngam = -1;
    -		z = q - p;
    -		if( z > 0.5 )
    -			{
    -			p += 1.0;
    -			z = q - p;
    -			}
    -		z = q * sin( PI * z );
    -		if( z == 0.0 )
    -			{
    -			return( sgngam * NPY_INFINITY);
    -			}
    -		z = fabs(z);
    -		z = PI/(z * stirf(q) );
    -		}
    -	else
    -		{
    -		z = stirf(x);
    -		}
    -	return( sgngam * z );
    -	}
    -
    -z = 1.0;
    -while( x >= 3.0 )
    -	{
    -	x -= 1.0;
    -	z *= x;
    -	}
    -
    -while( x < 0.0 )
    -	{
    -	if( x > -1.E-9 )
    -		goto small;
    -	z /= x;
    -	x += 1.0;
    -	}
    -
    -while( x < 2.0 )
    -	{
    -	if( x < 1.e-9 )
    -		goto small;
    -	z /= x;
    -	x += 1.0;
    -	}
    -
    -if( x == 2.0 )
    -	return(z);
    -
    -x -= 2.0;
    -p = polevl( x, P, 6 );
    -q = polevl( x, Q, 7 );
    -return( z * p / q );
    -
    -small:
    -if( x == 0.0 )
    -	{
    -	  goto gamnan;
    -	}
    -else
    -	return( z/((1.0 + 0.5772156649015329 * x) * x) );
    -}
    -
    -
    -
    -/* A[]: Stirling's formula expansion of log Gamma
    - * B[], C[]: log Gamma function between 2 and 3
    - */
    -#ifdef UNK
    -static double A[] = {
    - 8.11614167470508450300E-4,
    --5.95061904284301438324E-4,
    - 7.93650340457716943945E-4,
    --2.77777777730099687205E-3,
    - 8.33333333333331927722E-2
    -};
    -static double B[] = {
    --1.37825152569120859100E3,
    --3.88016315134637840924E4,
    --3.31612992738871184744E5,
    --1.16237097492762307383E6,
    --1.72173700820839662146E6,
    --8.53555664245765465627E5
    -};
    -static double C[] = {
    -/* 1.00000000000000000000E0, */
    --3.51815701436523470549E2,
    --1.70642106651881159223E4,
    --2.20528590553854454839E5,
    --1.13933444367982507207E6,
    --2.53252307177582951285E6,
    --2.01889141433532773231E6
    -};
    -/* log( sqrt( 2*pi ) ) */
    -static double LS2PI  =  0.91893853320467274178;
    -#define MAXLGM 2.556348e305
    -#endif
    -
    -#ifdef DEC
    -static unsigned short A[] = {
    -0035524,0141201,0034633,0031405,
    -0135433,0176755,0126007,0045030,
    -0035520,0006371,0003342,0172730,
    -0136066,0005540,0132605,0026407,
    -0037252,0125252,0125252,0125132
    -};
    -static unsigned short B[] = {
    -0142654,0044014,0077633,0035410,
    -0144027,0110641,0125335,0144760,
    -0144641,0165637,0142204,0047447,
    -0145215,0162027,0146246,0155211,
    -0145322,0026110,0010317,0110130,
    -0145120,0061472,0120300,0025363
    -};
    -static unsigned short C[] = {
    -/*0040200,0000000,0000000,0000000*/
    -0142257,0164150,0163630,0112622,
    -0143605,0050153,0156116,0135272,
    -0144527,0056045,0145642,0062332,
    -0145213,0012063,0106250,0001025,
    -0145432,0111254,0044577,0115142,
    -0145366,0071133,0050217,0005122
    -};
    -/* log( sqrt( 2*pi ) ) */
    -static unsigned short LS2P[] = {040153,037616,041445,0172645,};
    -#define LS2PI *(double *)LS2P
    -#define MAXLGM 2.035093e36
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short A[] = {
    -0x6661,0x2733,0x9850,0x3f4a,
    -0xe943,0xb580,0x7fbd,0xbf43,
    -0x5ebb,0x20dc,0x019f,0x3f4a,
    -0xa5a1,0x16b0,0xc16c,0xbf66,
    -0x554b,0x5555,0x5555,0x3fb5
    -};
    -static unsigned short B[] = {
    -0x6761,0x8ff3,0x8901,0xc095,
    -0xb93e,0x355b,0xf234,0xc0e2,
    -0x89e5,0xf890,0x3d73,0xc114,
    -0xdb51,0xf994,0xbc82,0xc131,
    -0xf20b,0x0219,0x4589,0xc13a,
    -0x055e,0x5418,0x0c67,0xc12a
    -};
    -static unsigned short C[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x12b2,0x1cf3,0xfd0d,0xc075,
    -0xd757,0x7b89,0xaa0d,0xc0d0,
    -0x4c9b,0xb974,0xeb84,0xc10a,
    -0x0043,0x7195,0x6286,0xc131,
    -0xf34c,0x892f,0x5255,0xc143,
    -0xe14a,0x6a11,0xce4b,0xc13e
    -};
    -/* log( sqrt( 2*pi ) ) */
    -static unsigned short LS2P[] = {
    -0xbeb5,0xc864,0x67f1,0x3fed
    -};
    -#define LS2PI *(double *)LS2P
    -#define MAXLGM 2.556348e305
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short A[] = {
    -0x3f4a,0x9850,0x2733,0x6661,
    -0xbf43,0x7fbd,0xb580,0xe943,
    -0x3f4a,0x019f,0x20dc,0x5ebb,
    -0xbf66,0xc16c,0x16b0,0xa5a1,
    -0x3fb5,0x5555,0x5555,0x554b
    -};
    -static unsigned short B[] = {
    -0xc095,0x8901,0x8ff3,0x6761,
    -0xc0e2,0xf234,0x355b,0xb93e,
    -0xc114,0x3d73,0xf890,0x89e5,
    -0xc131,0xbc82,0xf994,0xdb51,
    -0xc13a,0x4589,0x0219,0xf20b,
    -0xc12a,0x0c67,0x5418,0x055e
    -};
    -static unsigned short C[] = {
    -0xc075,0xfd0d,0x1cf3,0x12b2,
    -0xc0d0,0xaa0d,0x7b89,0xd757,
    -0xc10a,0xeb84,0xb974,0x4c9b,
    -0xc131,0x6286,0x7195,0x0043,
    -0xc143,0x5255,0x892f,0xf34c,
    -0xc13e,0xce4b,0x6a11,0xe14a
    -};
    -/* log( sqrt( 2*pi ) ) */
    -static unsigned short LS2P[] = {
    -0x3fed,0x67f1,0xc864,0xbeb5
    -};
    -#define LS2PI *(double *)LS2P
    -#define MAXLGM 2.556348e305
    -#endif
    -
    -
    -/* Logarithm of Gamma function */
    -
    -
    -double lgam(double x)
    -{
    -double p, q, u, w, z;
    -int i;
    -
    -sgngam = 1;
    -
    -if( !npy_isfinite(x) )
    -        return x;
    -
    -if( x < -34.0 )
    -	{
    -	q = -x;
    -	w = lgam(q); /* note this modifies sgngam! */
    -	p = floor(q);
    -	if( p == q )
    -		{
    -lgsing:
    -		mtherr( "lgam", SING );
    -		return (NPY_INFINITY);
    -		}
    -	i = p;
    -	if( (i & 1) == 0 )
    -		sgngam = -1;
    -	else
    -		sgngam = 1;
    -	z = q - p;
    -	if( z > 0.5 )
    -		{
    -		p += 1.0;
    -		z = p - q;
    -		}
    -	z = q * sin( PI * z );
    -	if( z == 0.0 )
    -		goto lgsing;
    -/*	z = log(PI) - log( z ) - w;*/
    -	z = LOGPI - log( z ) - w;
    -	return( z );
    -	}
    -
    -if( x < 13.0 )
    -	{
    -	z = 1.0;
    -	p = 0.0;
    -	u = x;
    -	while( u >= 3.0 )
    -		{
    -		p -= 1.0;
    -		u = x + p;
    -		z *= u;
    -		}
    -	while( u < 2.0 )
    -		{
    -		if( u == 0.0 )
    -			goto lgsing;
    -		z /= u;
    -		p += 1.0;
    -		u = x + p;
    -		}
    -	if( z < 0.0 )
    -		{
    -		sgngam = -1;
    -		z = -z;
    -		}
    -	else
    -		sgngam = 1;
    -	if( u == 2.0 )
    -		return( log(z) );
    -	p -= 2.0;
    -	x = x + p;
    -	p = x * polevl( x, B, 5 ) / p1evl( x, C, 6);
    -	return( log(z) + p );
    -	}
    -
    -if( x > MAXLGM )
    -	{
    -	return( sgngam * NPY_INFINITY );
    -	}
    -
    -q = ( x - 0.5 ) * log(x) - x + LS2PI;
    -if( x > 1.0e8 )
    -	return( q );
    -
    -p = 1.0/(x*x);
    -if( x >= 1000.0 )
    -	q += ((   7.9365079365079365079365e-4 * p
    -		- 2.7777777777777777777778e-3) *p
    -		+ 0.0833333333333333333333) / x;
    -else
    -	q += polevl( p, A, 4 ) / x;
    -return( q );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/gdtr.c b/scipy-0.10.1/scipy/special/cephes/gdtr.c
    deleted file mode 100644
    index 3ac4c68724..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/gdtr.c
    +++ /dev/null
    @@ -1,138 +0,0 @@
    -/*							gdtr.c
    - *
    - *	Gamma distribution function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, b, x, y, gdtr();
    - *
    - * y = gdtr( a, b, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the integral from zero to x of the Gamma probability
    - * density function:
    - *
    - *
    - *                x
    - *        b       -
    - *       a       | |   b-1  -at
    - * y =  -----    |    t    e    dt
    - *       -     | |
    - *      | (b)   -
    - *               0
    - *
    - *  The incomplete Gamma integral is used, according to the
    - * relation
    - *
    - * y = igam( b, ax ).
    - *
    - *
    - * ACCURACY:
    - *
    - * See igam().
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * gdtr domain         x < 0            0.0
    - *
    - */
    -/*							gdtrc.c
    - *
    - *	Complemented Gamma distribution function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, b, x, y, gdtrc();
    - *
    - * y = gdtrc( a, b, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the integral from x to infinity of the Gamma
    - * probability density function:
    - *
    - *
    - *               inf.
    - *        b       -
    - *       a       | |   b-1  -at
    - * y =  -----    |    t    e    dt
    - *       -     | |
    - *      | (b)   -
    - *               x
    - *
    - *  The incomplete Gamma integral is used, according to the
    - * relation
    - *
    - * y = igamc( b, ax ).
    - *
    - *
    - * ACCURACY:
    - *
    - * See igamc().
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * gdtrc domain         x < 0            0.0
    - *
    - */
    -
    -/*							gdtr()  */
    -
    -
    -/*
    -Cephes Math Library Release 2.3:  March,1995
    -Copyright 1984, 1987, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -double gdtri(double,double,double);
    -
    -double gdtr( a, b, x )
    -double a, b, x;
    -{
    -
    -if( x < 0.0 )
    -	{
    -	mtherr( "gdtr", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -return(  igam( b, a * x )  );
    -}
    -
    -
    -double gdtrc( a, b, x )
    -double a, b, x;
    -{
    -
    -if( x < 0.0 )
    -	{
    -	mtherr( "gdtrc", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -return(  igamc( b, a * x )  );
    -}
    -
    -
    -double gdtri( a, b, y)
    -double a, b, y;
    -{
    -
    -if ((y < 0.0) || (y > 1.0) || (a <= 0.0) || (b < 0.0))
    -  {
    -    mtherr("gdtri", DOMAIN);
    -    return( NPY_NAN );
    -  }
    -
    -return ( igami (b, 1.0-y) / a);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/gels.c b/scipy-0.10.1/scipy/special/cephes/gels.c
    deleted file mode 100644
    index 1781f59a41..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/gels.c
    +++ /dev/null
    @@ -1,228 +0,0 @@
    -/*
    -C
    -C     ..................................................................
    -C
    -C        SUBROUTINE GELS
    -C
    -C        PURPOSE
    -C           TO SOLVE A SYSTEM OF SIMULTANEOUS LINEAR EQUATIONS WITH
    -C           SYMMETRIC COEFFICIENT MATRIX UPPER TRIANGULAR PART OF WHICH
    -C           IS ASSUMED TO BE STORED COLUMNWISE.
    -C
    -C        USAGE
    -C           CALL GELS(R,A,M,N,EPS,IER,AUX)
    -C
    -C        DESCRIPTION OF PARAMETERS
    -C           R      - M BY N RIGHT HAND SIDE MATRIX.  (DESTROYED)
    -C                    ON RETURN R CONTAINS THE SOLUTION OF THE EQUATIONS.
    -C           A      - UPPER TRIANGULAR PART OF THE SYMMETRIC
    -C                    M BY M COEFFICIENT MATRIX.  (DESTROYED)
    -C           M      - THE NUMBER OF EQUATIONS IN THE SYSTEM.
    -C           N      - THE NUMBER OF RIGHT HAND SIDE VECTORS.
    -C           EPS    - AN INPUT CONSTANT WHICH IS USED AS RELATIVE
    -C                    TOLERANCE FOR TEST ON LOSS OF SIGNIFICANCE.
    -C           IER    - RESULTING ERROR PARAMETER CODED AS FOLLOWS
    -C                    IER=0  - NO ERROR,
    -C                    IER=-1 - NO RESULT BECAUSE OF M LESS THAN 1 OR
    -C                             PIVOT ELEMENT AT ANY ELIMINATION STEP
    -C                             EQUAL TO 0,
    -C                    IER=K  - WARNING DUE TO POSSIBLE LOSS OF SIGNIFI-
    -C                             CANCE INDICATED AT ELIMINATION STEP K+1,
    -C                             WHERE PIVOT ELEMENT WAS LESS THAN OR
    -C                             EQUAL TO THE INTERNAL TOLERANCE EPS TIMES
    -C                             ABSOLUTELY GREATEST MAIN DIAGONAL
    -C                             ELEMENT OF MATRIX A.
    -C           AUX    - AN AUXILIARY STORAGE ARRAY WITH DIMENSION M-1.
    -C
    -C        REMARKS
    -C           UPPER TRIANGULAR PART OF MATRIX A IS ASSUMED TO BE STORED
    -C           COLUMNWISE IN M*(M+1)/2 SUCCESSIVE STORAGE LOCATIONS, RIGHT
    -C           HAND SIDE MATRIX R COLUMNWISE IN N*M SUCCESSIVE STORAGE
    -C           LOCATIONS. ON RETURN SOLUTION MATRIX R IS STORED COLUMNWISE
    -C           TOO.
    -C           THE PROCEDURE GIVES RESULTS IF THE NUMBER OF EQUATIONS M IS
    -C           GREATER THAN 0 AND PIVOT ELEMENTS AT ALL ELIMINATION STEPS
    -C           ARE DIFFERENT FROM 0. HOWEVER WARNING IER=K - IF GIVEN -
    -C           INDICATES POSSIBLE LOSS OF SIGNIFICANCE. IN CASE OF A WELL
    -C           SCALED MATRIX A AND APPROPRIATE TOLERANCE EPS, IER=K MAY BE
    -C           INTERPRETED THAT MATRIX A HAS THE RANK K. NO WARNING IS
    -C           GIVEN IN CASE M=1.
    -C           ERROR PARAMETER IER=-1 DOES NOT NECESSARILY MEAN THAT
    -C           MATRIX A IS SINGULAR, AS ONLY MAIN DIAGONAL ELEMENTS
    -C           ARE USED AS PIVOT ELEMENTS. POSSIBLY SUBROUTINE GELG (WHICH
    -C           WORKS WITH TOTAL PIVOTING) WOULD BE ABLE TO FIND A SOLUTION.
    -C
    -C        SUBROUTINES AND FUNCTION SUBPROGRAMS REQUIRED
    -C           NONE
    -C
    -C        METHOD
    -C           SOLUTION IS DONE BY MEANS OF GAUSS-ELIMINATION WITH
    -C           PIVOTING IN MAIN DIAGONAL, IN ORDER TO PRESERVE
    -C           SYMMETRY IN REMAINING COEFFICIENT MATRICES.
    -C
    -C     ..................................................................
    -C
    -*/
    -#include "protos.h"
    -
    -int
    -gels( A, R, M, EPS, AUX )
    -double A[],R[];
    -int M;
    -double EPS;
    -double AUX[];
    -{
    -int I = 0, J = 0, K, L, IER;
    -int II, LL, LLD, LR, LT, LST, LLST, LEND;
    -double tb, piv, tol, pivi;
    -
    -if( M <= 0 )
    -	{
    -fatal:
    -	IER = -1;
    -	goto done;
    -	}
    -/* SEARCH FOR GREATEST MAIN DIAGONAL ELEMENT */
    -
    -/*  Diagonal elements are at A(i,i) = 1, 3, 6, 10, ...
    - *  A(i,j) = A( i(i-1)/2 + j )
    - */
    -IER = 0;
    -piv = 0.0;
    -L = 0;
    -for( K=1; K<=M; K++ )
    -	{
    -	L += K;
    -	tb = fabs( A[L-1] );
    -	if( tb > piv )
    -		{
    -		piv = tb;
    -		I = L;
    -		J = K;
    -		}
    -	}
    -tol = EPS * piv;
    -
    -/*
    -C     MAIN DIAGONAL ELEMENT A(I)=A(J,J) IS FIRST PIVOT ELEMENT.
    -C     PIV CONTAINS THE ABSOLUTE VALUE OF A(I).
    -*/
    -
    -/*     START ELIMINATION LOOP */
    -LST = 0;
    -LEND = M - 1;
    -for( K=1; K<=M; K++ )
    -	{
    -/*     TEST ON USEFULNESS OF SYMMETRIC ALGORITHM */
    -	if( piv <= 0.0 )
    -		goto fatal;
    -	if( IER == 0 )
    -		{
    -		if( piv <= tol )
    -			{
    -			IER = K - 1;
    -			}
    -		}
    -	LT = J - K;
    -	LST += K;
    -
    -/*  PIVOT ROW REDUCTION AND ROW INTERCHANGE IN RIGHT HAND SIDE R */
    -	pivi = 1.0 / A[I-1];
    -	L = K;
    -	LL = L + LT;
    -	tb = pivi * R[LL-1];
    -	R[LL-1] = R[L-1];
    -	R[L-1] = tb;
    -/* IS ELIMINATION TERMINATED */
    -	if( K >= M )
    -		break;
    -/*
    -C     ROW AND COLUMN INTERCHANGE AND PIVOT ROW REDUCTION IN MATRIX A.
    -C     ELEMENTS OF PIVOT COLUMN ARE SAVED IN AUXILIARY VECTOR AUX.
    -*/
    -	LR = LST + (LT*(K+J-1))/2;
    -	LL = LR;
    -	L=LST;
    -	for( II=K; II<=LEND; II++ )
    -		{
    -		L += II;
    -		LL += 1;
    -		if( L == LR )
    -			{
    -			A[LL-1] = A[LST-1];
    -			tb = A[L-1];
    -			goto lab13;
    -			}
    -		if( L > LR )
    -			LL = L + LT;
    -
    -		tb = A[LL-1];
    -		A[LL-1] = A[L-1];
    -lab13:
    -		AUX[II-1] = tb;
    -		A[L-1] = pivi * tb;
    -		}
    -/* SAVE COLUMN INTERCHANGE INFORMATION */
    -	A[LST-1] = LT;
    -/* ELEMENT REDUCTION AND SEARCH FOR NEXT PIVOT */
    -	piv = 0.0;
    -	LLST = LST;
    -	LT = 0;
    -	for( II=K; II<=LEND; II++ )
    -		{
    -		pivi = -AUX[II-1];
    -		LL = LLST;
    -		LT += 1;
    -		for( LLD=II; LLD<=LEND; LLD++ )
    -			{
    -			LL += LLD;
    -			L = LL + LT;
    -			A[L-1] += pivi * A[LL-1];
    -			}
    -		LLST += II;
    -		LR = LLST + LT;
    -		tb =fabs( A[LR-1] );
    -		if( tb > piv )
    -			{
    -			piv = tb;
    -			I = LR;
    -			J = II + 1;
    -			}
    -		LR = K;
    -		LL = LR + LT;
    -		R[LL-1] += pivi * R[LR-1];
    -		}
    -	}
    -/* END OF ELIMINATION LOOP */
    -
    -/* BACK SUBSTITUTION AND BACK INTERCHANGE */
    -
    -if( LEND <= 0 )
    -	{
    -	if( LEND < 0 )
    -		goto fatal;
    -	goto done;
    -	}
    -II = M;
    -for( I=2; I<=M; I++ )
    -	{
    -	LST -= II;
    -	II -= 1;
    -	L = A[LST-1] + 0.5;
    -	J = II;
    -	tb = R[J-1];
    -	LL = J;
    -	K = LST;
    -	for( LT=II; LT<=LEND; LT++ )
    -		{
    -		LL += 1;
    -		K += LT;
    -		tb -= A[K-1] * R[LL-1];
    -		}
    -	K = J + L;
    -	R[J-1] = R[K-1];
    -	R[K-1] = tb;
    -	}
    -done:
    -return( IER );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/hyp2f1.c b/scipy-0.10.1/scipy/special/cephes/hyp2f1.c
    deleted file mode 100644
    index c54a989d23..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/hyp2f1.c
    +++ /dev/null
    @@ -1,569 +0,0 @@
    -/*                                                      hyp2f1.c
    - *
    - *      Gauss hypergeometric function   F
    - *                                     2 1
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, b, c, x, y, hyp2f1();
    - *
    - * y = hyp2f1( a, b, c, x );
    - *
    - *
    - * DESCRIPTION:
    - *
    - *
    - *  hyp2f1( a, b, c, x )  =   F ( a, b; c; x )
    - *                           2 1
    - *
    - *           inf.
    - *            -   a(a+1)...(a+k) b(b+1)...(b+k)   k+1
    - *   =  1 +   >   -----------------------------  x   .
    - *            -         c(c+1)...(c+k) (k+1)!
    - *          k = 0
    - *
    - *  Cases addressed are
    - *      Tests and escapes for negative integer a, b, or c
    - *      Linear transformation if c - a or c - b negative integer
    - *      Special case c = a or c = b
    - *      Linear transformation for  x near +1
    - *      Transformation for x < -0.5
    - *      Psi function expansion if x > 0.5 and c - a - b integer
    - *      Conditionally, a recurrence on c to make c-a-b > 0
    - *
    - *      x < -1  AMS 15.3.7 transformation applied (Travis Oliphant)
    - *         valid for b,a,c,(b-a) != integer and (c-a),(c-b) != negative integer
    - *
    - * x >= 1 is rejected (unless special cases are present)
    - *
    - * The parameters a, b, c are considered to be integer
    - * valued if they are within 1.0e-14 of the nearest integer
    - * (1.0e-13 for IEEE arithmetic).
    - *
    - * ACCURACY:
    - *
    - *
    - *               Relative error (-1 < x < 1):
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      -1,7        230000      1.2e-11     5.2e-14
    - *
    - * Several special cases also tested with a, b, c in
    - * the range -7 to 7.
    - *
    - * ERROR MESSAGES:
    - *
    - * A "partial loss of precision" message is printed if
    - * the internally estimated relative error exceeds 1^-12.
    - * A "singularity" message is printed on overflow or
    - * in cases not addressed (such as x < -1).
    - */
    -
    -/*                                                      hyp2f1  */
    -
    -
    -/*
    - * Cephes Math Library Release 2.8:  June, 2000
    - * Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier
    - */
    -
    -#include 
    -#include "mconf.h"
    -
    -#ifdef DEC
    -#define EPS 1.0e-14
    -#define EPS2 1.0e-11
    -#endif
    -
    -#ifdef IBMPC
    -#define EPS 1.0e-13
    -#define EPS2 1.0e-10
    -#endif
    -
    -#ifdef MIEEE
    -#define EPS 1.0e-13
    -#define EPS2 1.0e-10
    -#endif
    -
    -#ifdef UNK
    -#define EPS 1.0e-13
    -#define EPS2 1.0e-10
    -#endif
    -
    -#define ETHRESH 1.0e-12
    -
    -extern double MACHEP;
    -
    -static double hyt2f1(double a, double b, double c, double x, double *loss);
    -static double hys2f1(double a, double b, double c, double x, double *loss);
    -static double hyp2f1ra(double a, double b, double c, double x, double* loss);
    -
    -double hyp2f1(a, b, c, x)
    -double a, b, c, x;
    -{
    -    double d, d1, d2, e;
    -    double p, q, r, s, y, ax;
    -    double ia, ib, ic, id, err;
    -    double t1;
    -    int i, aid;
    -    int neg_int_a = 0, neg_int_b = 0;
    -    int neg_int_ca_or_cb = 0;
    -
    -    err = 0.0;
    -    ax = fabs(x);
    -    s = 1.0 - x;
    -    ia = round(a);              /* nearest integer to a */
    -    ib = round(b);
    -
    -    if (x == 0.0) {
    -        return 1.0;
    -    }
    -
    -    d = c - a - b;
    -    id = round(d);
    -
    -    if ((a == 0 || b == 0) && c != 0) {
    -        return 1.0;
    -    }
    -
    -    if (a <= 0 && fabs(a - ia) < EPS) { /* a is a negative integer */
    -        neg_int_a = 1;
    -    }
    -
    -    if (b <= 0 && fabs(b - ib) < EPS) { /* b is a negative integer */
    -        neg_int_b = 1;
    -    }
    -
    -    if (d <= -1 && !(fabs(d-id) > EPS && s < 0) && !(neg_int_a || neg_int_b)) {
    -        return pow(s, d) * hyp2f1(c - a, c - b, c, x);
    -    }
    -    if (d <= 0 && x == 1 && !(neg_int_a || neg_int_b))
    -        goto hypdiv;
    -
    -    if (ax < 1.0 || x == -1.0) {
    -        /* 2F1(a,b;b;x) = (1-x)**(-a) */
    -        if (fabs(b - c) < EPS) {        /* b = c */
    -            y = pow(s, -a);     /* s to the -a power */
    -            goto hypdon;
    -        }
    -        if (fabs(a - c) < EPS) {        /* a = c */
    -            y = pow(s, -b);     /* s to the -b power */
    -            goto hypdon;
    -        }
    -    }
    -
    -
    -
    -    if (c <= 0.0) {
    -        ic = round(c);          /* nearest integer to c */
    -        if (fabs(c - ic) < EPS) {       /* c is a negative integer */
    -            /* check if termination before explosion */
    -            if (neg_int_a && (ia > ic))
    -                goto hypok;
    -            if (neg_int_b && (ib > ic))
    -                goto hypok;
    -            goto hypdiv;
    -        }
    -    }
    -
    -    if (neg_int_a || neg_int_b) /* function is a polynomial */
    -        goto hypok;
    -
    -    t1 = fabs(b - a);
    -    if (x < -2.0 && fabs(t1 - round(t1)) > EPS) {
    -        /* This transform has a pole for b-a integer, and
    -         * may produce large cancellation errors for |1/x| close 1
    -         */
    -        p = hyp2f1(a, 1 - c + a, 1 - b + a, 1.0 / x);
    -        q = hyp2f1(b, 1 - c + b, 1 - a + b, 1.0 / x);
    -        p *= pow(-x, -a);
    -        q *= pow(-x, -b);
    -        t1 = gamma(c);
    -        s = t1 * gamma(b - a) / (gamma(b) * gamma(c - a));
    -        y = t1 * gamma(a - b) / (gamma(a) * gamma(c - b));
    -        return s * p + y * q;
    -    } else if (x < -1.0) {
    -        if (fabs(a) < fabs(b)) {
    -            return pow(s, -a) * hyp2f1(a, c-b, c, x/(x-1));
    -        } else {
    -            return pow(s, -b) * hyp2f1(b, c-a, c, x/(x-1));
    -        }
    -    }
    -
    -    if (ax > 1.0)               /* series diverges  */
    -        goto hypdiv;
    -
    -    p = c - a;
    -    ia = round(p);              /* nearest integer to c-a */
    -    if ((ia <= 0.0) && (fabs(p - ia) < EPS))    /* negative int c - a */
    -        neg_int_ca_or_cb = 1;
    -
    -    r = c - b;
    -    ib = round(r);              /* nearest integer to c-b */
    -    if ((ib <= 0.0) && (fabs(r - ib) < EPS))    /* negative int c - b */
    -        neg_int_ca_or_cb = 1;
    -
    -    id = round(d);              /* nearest integer to d */
    -    q = fabs(d - id);
    -
    -    /* Thanks to Christian Burger 
    -     * for reporting a bug here.  */
    -    if (fabs(ax - 1.0) < EPS) { /* |x| == 1.0   */
    -        if (x > 0.0) {
    -            if (neg_int_ca_or_cb) {
    -                if (d >= 0.0)
    -                    goto hypf;
    -                else
    -                    goto hypdiv;
    -            }
    -            if (d <= 0.0)
    -                goto hypdiv;
    -            y = gamma(c) * gamma(d) / (gamma(p) * gamma(r));
    -            goto hypdon;
    -        }
    -        if (d <= -1.0)
    -            goto hypdiv;
    -    }
    -
    -    /* Conditionally make d > 0 by recurrence on c
    -     * AMS55 #15.2.27
    -     */
    -    if (d < 0.0) {
    -        /* Try the power series first */
    -        y = hyt2f1(a, b, c, x, &err);
    -        if (err < ETHRESH)
    -            goto hypdon;
    -        /* Apply the recurrence if power series fails */
    -        err = 0.0;
    -        aid = 2 - id;
    -        e = c + aid;
    -        d2 = hyp2f1(a, b, e, x);
    -        d1 = hyp2f1(a, b, e + 1.0, x);
    -        q = a + b + 1.0;
    -        for (i = 0; i < aid; i++) {
    -            r = e - 1.0;
    -            y = (e * (r - (2.0 * e - q) * x) * d2 +
    -                 (e - a) * (e - b) * x * d1) / (e * r * s);
    -            e = r;
    -            d1 = d2;
    -            d2 = y;
    -        }
    -        goto hypdon;
    -    }
    -
    -
    -    if (neg_int_ca_or_cb)
    -        goto hypf;              /* negative integer c-a or c-b */
    -
    -  hypok:
    -    y = hyt2f1(a, b, c, x, &err);
    -
    -
    -  hypdon:
    -    if (err > ETHRESH) {
    -        mtherr("hyp2f1", PLOSS);
    -        /*      printf( "Estimated err = %.2e\n", err ); */
    -    }
    -    return (y);
    -
    -/* The transformation for c-a or c-b negative integer
    - * AMS55 #15.3.3
    - */
    -  hypf:
    -    y = pow(s, d) * hys2f1(c - a, c - b, c, x, &err);
    -    goto hypdon;
    -
    -/* The alarm exit */
    -  hypdiv:
    -    mtherr("hyp2f1", OVERFLOW);
    -    return NPY_INFINITY;
    -}
    -
    -
    -
    -
    -
    -
    -/* Apply transformations for |x| near 1
    - * then call the power series
    - */
    -static double hyt2f1(a, b, c, x, loss)
    -double a, b, c, x;
    -double *loss;
    -{
    -    double p, q, r, s, t, y, d, err, err1;
    -    double ax, id, d1, d2, e, y1;
    -    int i, aid;
    -
    -    int ia, ib, neg_int_a = 0, neg_int_b = 0;
    -
    -    ia = round(a);
    -    ib = round(b);
    -
    -    if (a <= 0 && fabs(a - ia) < EPS) { /* a is a negative integer */
    -        neg_int_a = 1;
    -    }
    -
    -    if (b <= 0 && fabs(b - ib) < EPS) { /* b is a negative integer */
    -        neg_int_b = 1;
    -    }
    -
    -    err = 0.0;
    -    s = 1.0 - x;
    -    if (x < -0.5 && !(neg_int_a || neg_int_b)) {
    -        if (b > a)
    -            y = pow(s, -a) * hys2f1(a, c - b, c, -x / s, &err);
    -
    -        else
    -            y = pow(s, -b) * hys2f1(c - a, b, c, -x / s, &err);
    -
    -        goto done;
    -    }
    -
    -    d = c - a - b;
    -    id = round(d);              /* nearest integer to d */
    -
    -    if (x > 0.9 && !(neg_int_a || neg_int_b)) {
    -        if (fabs(d - id) > EPS) {
    -            /* test for integer c-a-b */
    -            /* Try the power series first */
    -            y = hys2f1(a, b, c, x, &err);
    -            if (err < ETHRESH)
    -                goto done;
    -            /* If power series fails, then apply AMS55 #15.3.6 */
    -            q = hys2f1(a, b, 1.0 - d, s, &err);
    -            q *= gamma(d) / (gamma(c - a) * gamma(c - b));
    -            r = pow(s, d) * hys2f1(c - a, c - b, d + 1.0, s, &err1);
    -            r *= gamma(-d) / (gamma(a) * gamma(b));
    -            y = q + r;
    -
    -            q = fabs(q); /* estimate cancellation error */
    -            r = fabs(r);
    -            if (q > r)
    -                r = q;
    -            err += err1 + (MACHEP * r) / y;
    -
    -            y *= gamma(c);
    -            goto done;
    -        } else {
    -            /* Psi function expansion, AMS55 #15.3.10, #15.3.11, #15.3.12
    -             *
    -             * Although AMS55 does not explicitly state it, this expansion fails
    -             * for negative integer a or b, since the psi and Gamma functions
    -             * involved have poles.
    -             */
    -
    -            if (id >= 0.0) {
    -                e = d;
    -                d1 = d;
    -                d2 = 0.0;
    -                aid = id;
    -            } else {
    -                e = -d;
    -                d1 = 0.0;
    -                d2 = d;
    -                aid = -id;
    -            }
    -
    -            ax = log(s);
    -
    -            /* sum for t = 0 */
    -            y = psi(1.0) + psi(1.0 + e) - psi(a + d1) - psi(b + d1) - ax;
    -            y /= gamma(e + 1.0);
    -
    -            p = (a + d1) * (b + d1) * s / gamma(e + 2.0); /* Poch for t=1 */
    -            t = 1.0;
    -            do {
    -                r = psi(1.0 + t) + psi(1.0 + t + e) - psi(a + t + d1)
    -                    - psi(b + t + d1) - ax;
    -                q = p * r;
    -                y += q;
    -                p *= s * (a + t + d1) / (t + 1.0);
    -                p *= (b + t + d1) / (t + 1.0 + e);
    -                t += 1.0;
    -                if (t > 10000) {      /* should never happen */
    -                    mtherr("hyp2f1", TOOMANY);
    -                    *loss = 1.0;
    -                    return NPY_NAN;
    -                }
    -            }
    -            while (y == 0 || fabs(q / y) > EPS);
    -
    -            if (id == 0.0) {
    -                y *= gamma(c) / (gamma(a) * gamma(b));
    -                goto psidon;
    -            }
    -
    -            y1 = 1.0;
    -
    -            if (aid == 1)
    -                goto nosum;
    -
    -            t = 0.0;
    -            p = 1.0;
    -            for (i = 1; i < aid; i++) {
    -                r = 1.0 - e + t;
    -                p *= s * (a + t + d2) * (b + t + d2) / r;
    -                t += 1.0;
    -                p /= t;
    -                y1 += p;
    -            }
    -          nosum:
    -            p = gamma(c);
    -            y1 *= gamma(e) * p / (gamma(a + d1) * gamma(b + d1));
    -
    -            y *= p / (gamma(a + d2) * gamma(b + d2));
    -            if ((aid & 1) != 0)
    -                y = -y;
    -
    -            q = pow(s, id);     /* s to the id power */
    -            if (id > 0.0)
    -                y *= q;
    -            else
    -                y1 *= q;
    -
    -            y += y1;
    -          psidon:
    -            goto done;
    -        }
    -
    -    }
    -
    -/* Use defining power series if no special cases */
    -    y = hys2f1(a, b, c, x, &err);
    -
    -  done:
    -    *loss = err;
    -    return (y);
    -}
    -
    -
    -
    -
    -
    -/* Defining power series expansion of Gauss hypergeometric function */
    -
    -static double hys2f1(a, b, c, x, loss)
    -double a, b, c, x;
    -double *loss;                   /* estimates loss of significance */
    -{
    -    double f, g, h, k, m, s, u, umax, t;
    -    int i;
    -    int ia, ib, intflag = 0;
    -
    -    if (fabs(b) > fabs(a)) {
    -        /* Ensure that |a| > |b| ... */
    -        f = b;
    -        b = a;
    -        a = f;
    -    }
    -
    -    ia = round(a);
    -    ib = round(b);
    -
    -    if (fabs(b-ib) < EPS && ib <= 0 && fabs(b) < fabs(a)) {
    -        /* .. except when `b` is a smaller negative integer */
    -        f = b;
    -        b = a;
    -        a = f;
    -        intflag = 1;
    -    }
    -
    -    if ((fabs(a) > fabs(c) + 1 || intflag) && fabs(c-a) > 2 && fabs(a) > 2) {
    -        /* |a| >> |c| implies that large cancellation error is to be expected.
    -         *
    -         * We try to reduce it with the recurrence relations
    -         */
    -        return hyp2f1ra(a, b, c, x, loss);
    -    }
    -
    -    i = 0;
    -    umax = 0.0;
    -    f = a;
    -    g = b;
    -    h = c;
    -    s = 1.0;
    -    u = 1.0;
    -    k = 0.0;
    -    do {
    -        if (fabs(h) < EPS) {
    -            *loss = 1.0;
    -            return NPY_INFINITY;
    -        }
    -        m = k + 1.0;
    -        u = u * ((f + k) * (g + k) * x / ((h + k) * m));
    -        s += u;
    -        k = fabs(u);            /* remember largest term summed */
    -        if (k > umax)
    -            umax = k;
    -        k = m;
    -        if (++i > 10000) {      /* should never happen */
    -            *loss = 1.0;
    -            return (s);
    -        }
    -    }
    -    while (s == 0 || fabs(u / s) > MACHEP);
    -
    -    /* return estimated relative error */
    -    *loss = (MACHEP * umax) / fabs(s) + (MACHEP * i);
    -
    -    return (s);
    -}
    -
    -
    -/*
    - * Evaluate hypergeometric function by two-term recurrence in `a`.
    - *
    - * This avoids some of the loss of precision in the strongly alternating
    - * hypergeometric series, and can be used to reduce the `a` and `b` parameters
    - * to smaller values.
    - *
    - * AMS55 #15.2.10
    - */
    -static double hyp2f1ra(double a, double b, double c, double x,
    -                       double* loss)
    -{
    -    double f2, f1, f0;
    -    int n, m, da;
    -    double t, err;
    -
    -    /* Don't cross c or zero */
    -    if ((c < 0 && a <= c) || (c >= 0 && a >= c)) {
    -        da = round(a - c);
    -    } else {
    -        da = round(a);
    -    }
    -    t = a - da;
    -
    -    *loss = 0;
    -
    -    assert(da != 0);
    -
    -    if (da < 0) {
    -        /* Recurse down */
    -        f2 = 0;
    -        f1 = hys2f1(t, b, c, x, &err); *loss += err;
    -        f0 = hys2f1(t-1, b, c, x, &err); *loss += err;
    -        t -= 1;
    -        for (n = 1; n < -da; ++n) {
    -            f2 = f1;
    -            f1 = f0;
    -            f0 = -(2*t-c-t*x+b*x)/(c-t)*f1 - t*(x-1)/(c-t)*f2;
    -            t -= 1;
    -        }
    -    } else {
    -        /* Recurse up */
    -        f2 = 0;
    -        f1 = hys2f1(t, b, c, x, &err); *loss += err;
    -        f0 = hys2f1(t+1, b, c, x, &err); *loss += err;
    -        t += 1;
    -        for (n = 1; n < da; ++n) {
    -            f2 = f1;
    -            f1 = f0;
    -            f0 = -((2*t-c-t*x+b*x)*f1 + (c-t)*f2)/(t*(x-1));
    -            t += 1;
    -        }
    -    }
    -
    -    return f0;
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/hyperg.c b/scipy-0.10.1/scipy/special/cephes/hyperg.c
    deleted file mode 100644
    index 3ed79fc874..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/hyperg.c
    +++ /dev/null
    @@ -1,395 +0,0 @@
    -/*							hyperg.c
    - *
    - *	Confluent hypergeometric function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, b, x, y, hyperg();
    - *
    - * y = hyperg( a, b, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Computes the confluent hypergeometric function
    - *
    - *                          1           2
    - *                       a x    a(a+1) x
    - *   F ( a,b;x )  =  1 + ---- + --------- + ...
    - *  1 1                  b 1!   b(b+1) 2!
    - *
    - * Many higher transcendental functions are special cases of
    - * this power series.
    - *
    - * As is evident from the formula, b must not be a negative
    - * integer or zero unless a is an integer with 0 >= a > b.
    - *
    - * The routine attempts both a direct summation of the series
    - * and an asymptotic expansion.  In each case error due to
    - * roundoff, cancellation, and nonconvergence is estimated.
    - * The result with smaller estimated error is returned.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a, b, x), all three variables
    - * ranging from 0 to 30.
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0,30         2000       1.2e-15     1.3e-16
    - qtst1:
    - 21800   max =  1.4200E-14   rms =  1.0841E-15  ave = -5.3640E-17 
    - ltstd:
    - 25500   max = 1.2759e-14   rms = 3.7155e-16  ave = 1.5384e-18 
    - *    IEEE      0,30        30000       1.8e-14     1.1e-15
    - *
    - * Larger errors can be observed when b is near a negative
    - * integer or zero.  Certain combinations of arguments yield
    - * serious cancellation error in the power series summation
    - * and also are not in the region of near convergence of the
    - * asymptotic series.  An error message is printed if the
    - * self-estimated relative error is greater than 1.0e-12.
    - *
    - */
    -
    -/*							hyperg.c */
    -
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 1988, 2000 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -extern double MAXNUM, MACHEP;
    -
    -static double hy1f1p(double a, double b, double x, double *acanc );
    -static double hy1f1a(double a, double b, double x, double *acanc );
    -
    -double hyperg( a, b, x)
    -double a, b, x;
    -{
    -double asum, psum, acanc, pcanc, temp;
    -
    -/* See if a Kummer transformation will help */
    -temp = b - a;
    -if( fabs(temp) < 0.001 * fabs(a) )
    -	return( exp(x) * hyperg( temp, b, -x )  );
    -
    -
    -/* Try power & asymptotic series, starting from the one that is likely OK */
    -if (fabs(x) < 10 + fabs(a) + fabs(b))
    -        {
    -        psum = hy1f1p( a, b, x, &pcanc );
    -        if( pcanc < 1.0e-15 )
    - 	        goto done;
    -        asum = hy1f1a( a, b, x, &acanc );
    -        }
    -else
    -        {
    -        psum = hy1f1a( a, b, x, &pcanc );
    -        if( pcanc < 1.0e-15 )
    - 	        goto done;
    -        asum = hy1f1p( a, b, x, &acanc );
    -        }
    -
    -/* Pick the result with less estimated error */
    -
    -if( acanc < pcanc )
    -	{
    -	pcanc = acanc;
    -	psum = asum;
    -	}
    -
    -done:
    -if( pcanc > 1.0e-12 )
    -	mtherr( "hyperg", PLOSS );
    -
    -return( psum );
    -}
    -
    -
    -
    -
    -/* Power series summation for confluent hypergeometric function		*/
    -
    -
    -static double hy1f1p( a, b, x, err )
    -double a, b, x;
    -double *err;
    -{
    -double n, a0, sum, t, u, temp, maxn;
    -double an, bn, maxt;
    -double y, c, sumc;
    -
    -
    -/* set up for power series summation */
    -an = a;
    -bn = b;
    -a0 = 1.0;
    -sum = 1.0;
    -c = 0.0;
    -n = 1.0;
    -t = 1.0;
    -maxt = 0.0;
    -*err = 1.0;
    -
    -maxn = 200.0 + 2*fabs(a) + 2*fabs(b);
    -
    -while( t > MACHEP )
    -	{
    -	if( bn == 0 )			/* check bn first since if both	*/
    -		{
    -		mtherr( "hyperg", SING );
    -		return( MAXNUM );	/* an and bn are zero it is	*/
    -		}
    -	if( an == 0 )			/* a singularity		*/
    -		return( sum );
    -	if( n > maxn )
    -                {
    -                /* too many terms; take the last one as error estimate */
    -                c = fabs(c) + fabs(t)*50.0;
    -		goto pdone;
    -                }
    -	u = x * ( an / (bn * n) );
    -
    -	/* check for blowup */
    -	temp = fabs(u);
    -	if( (temp > 1.0 ) && (maxt > (MAXNUM/temp)) )
    -		{
    -		*err = 1.0;	/* blowup: estimate 100% error */
    -                return sum;
    -		}
    -
    -	a0 *= u;
    -
    -	y = a0 - c;
    -	sumc = sum + y;
    -	c = (sumc - sum) - y;
    -	sum = sumc;
    -
    -	t = fabs(a0);
    -
    -	an += 1.0;
    -	bn += 1.0;
    -	n += 1.0;
    -	}
    -
    -pdone:
    -
    -/* estimate error due to roundoff and cancellation */
    -if (sum != 0.0) {
    -	*err = fabs(c / sum);
    -} else {
    -	*err = fabs(c);
    -}
    -
    -if (*err != *err) {
    -	/* nan */
    -	*err = 1.0;
    -}
    -
    -return( sum );
    -}
    -
    -
    -/*							hy1f1a()	*/
    -/* asymptotic formula for hypergeometric function:
    - *
    - *        (    -a                         
    - *  --    ( |z|                           
    - * |  (b) ( -------- 2f0( a, 1+a-b, -1/x )
    - *        (  --                           
    - *        ( |  (b-a)                      
    - *
    - *
    - *                                x    a-b                     )
    - *                               e  |x|                        )
    - *                             + -------- 2f0( b-a, 1-a, 1/x ) )
    - *                                --                           )
    - *                               |  (a)                        )
    - */
    -
    -static double hy1f1a( a, b, x, err )
    -double a, b, x;
    -double *err;
    -{
    -double h1, h2, t, u, temp, acanc, asum, err1, err2;
    -
    -if( x == 0 )
    -	{
    -	acanc = 1.0;
    -	asum = MAXNUM;
    -	goto adone;
    -	}
    -temp = log( fabs(x) );
    -t = x + temp * (a-b);
    -u = -temp * a;
    -
    -if( b > 0 )
    -	{
    -	temp = lgam(b);
    -	t += temp;
    -	u += temp;
    -	}
    -
    -h1 = hyp2f0( a, a-b+1, -1.0/x, 1, &err1 );
    -
    -temp = exp(u) / gamma(b-a);
    -h1 *= temp;
    -err1 *= temp;
    -
    -h2 = hyp2f0( b-a, 1.0-a, 1.0/x, 2, &err2 );
    -
    -if( a < 0 )
    -	temp = exp(t) / gamma(a);
    -else
    -	temp = exp( t - lgam(a) );
    -
    -h2 *= temp;
    -err2 *= temp;
    -
    -if( x < 0.0 )
    -	asum = h1;
    -else
    -	asum = h2;
    -
    -acanc = fabs(err1) + fabs(err2);
    -
    -if( b < 0 )
    -	{
    -	temp = gamma(b);
    -	asum *= temp;
    -	acanc *= fabs(temp);
    -	}
    -
    -
    -if( asum != 0.0 )
    -	acanc /= fabs(asum);
    -
    -if (acanc != acanc)
    -	/* nan */
    -	acanc = 1.0;
    -
    -if (asum == NPY_INFINITY || asum == -NPY_INFINITY)
    -        /* infinity */
    -        acanc = 0;
    -
    -acanc *= 30.0;	/* fudge factor, since error of asymptotic formula
    -		 * often seems this much larger than advertised */
    -
    -adone:
    -
    -
    -*err = acanc;
    -return( asum );
    -}
    -
    -/*							hyp2f0()	*/
    -
    -double hyp2f0( a, b, x, type, err )
    -double a, b, x;
    -int type;	/* determines what converging factor to use */
    -double *err;
    -{
    -double a0, alast, t, tlast, maxt;
    -double n, an, bn, u, sum, temp;
    -
    -an = a;
    -bn = b;
    -a0 = 1.0e0;
    -alast = 1.0e0;
    -sum = 0.0;
    -n = 1.0e0;
    -t = 1.0e0;
    -tlast = 1.0e9;
    -maxt = 0.0;
    -
    -do
    -	{
    -	if( an == 0 )
    -		goto pdone;
    -	if( bn == 0 )
    -		goto pdone;
    -
    -	u = an * (bn * x / n);
    -
    -	/* check for blowup */
    -	temp = fabs(u);
    -	if( (temp > 1.0 ) && (maxt > (MAXNUM/temp)) )
    -		goto error;
    -
    -	a0 *= u;
    -	t = fabs(a0);
    -
    -	/* terminating condition for asymptotic series:
    -         * the series is divergent (if a or b is not a negative integer),
    -         * but its leading part can be used as an asymptotic expansion
    -         */
    -        if( t > tlast )
    -                goto ndone;
    -
    -	tlast = t;
    -	sum += alast;	/* the sum is one term behind */
    -	alast = a0;
    -
    -	if( n > 200 )
    -		goto ndone;
    -
    -	an += 1.0e0;
    -	bn += 1.0e0;
    -	n += 1.0e0;
    -	if( t > maxt )
    -		maxt = t;
    -	}
    -while( t > MACHEP );
    -
    -
    -pdone:	/* series converged! */
    -
    -/* estimate error due to roundoff and cancellation */
    -*err = fabs(  MACHEP * (n + maxt)  );
    -
    -alast = a0;
    -goto done;
    -
    -ndone:	/* series did not converge */
    -
    -/* The following "Converging factors" are supposed to improve accuracy,
    - * but do not actually seem to accomplish very much. */
    -
    -n -= 1.0;
    -x = 1.0/x;
    -
    -switch( type )	/* "type" given as subroutine argument */
    -{
    -case 1:
    -	alast *= ( 0.5 + (0.125 + 0.25*b - 0.5*a + 0.25*x - 0.25*n)/x );
    -	break;
    -
    -case 2:
    -	alast *= 2.0/3.0 - b + 2.0*a + x - n;
    -	break;
    -
    -default:
    -	;
    -}
    -
    -/* estimate error due to roundoff, cancellation, and nonconvergence */
    -*err = MACHEP * (n + maxt)  +  fabs ( a0 );
    -
    -done:
    -sum += alast;
    -return( sum );
    -
    -/* series blew up: */
    -error:
    -*err = MAXNUM;
    -mtherr( "hyperg", TLOSS );
    -return( sum );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/i0.c b/scipy-0.10.1/scipy/special/cephes/i0.c
    deleted file mode 100644
    index 25214d8660..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/i0.c
    +++ /dev/null
    @@ -1,389 +0,0 @@
    -/*							i0.c
    - *
    - *	Modified Bessel function of order zero
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, i0();
    - *
    - * y = i0( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns modified Bessel function of order zero of the
    - * argument.
    - *
    - * The function is defined as i0(x) = j0( ix ).
    - *
    - * The range is partitioned into the two intervals [0,8] and
    - * (8, infinity).  Chebyshev polynomial expansions are employed
    - * in each interval.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0,30         6000       8.2e-17     1.9e-17
    - *    IEEE      0,30        30000       5.8e-16     1.4e-16
    - *
    - */
    -/*							i0e.c
    - *
    - *	Modified Bessel function of order zero,
    - *	exponentially scaled
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, i0e();
    - *
    - * y = i0e( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns exponentially scaled modified Bessel function
    - * of order zero of the argument.
    - *
    - * The function is defined as i0e(x) = exp(-|x|) j0( ix ).
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      0,30        30000       5.4e-16     1.2e-16
    - * See i0().
    - *
    - */
    -
    -/*							i0.c		*/
    -
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 2000 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -/* Chebyshev coefficients for exp(-x) I0(x)
    - * in the interval [0,8].
    - *
    - * lim(x->0){ exp(-x) I0(x) } = 1.
    - */
    -
    -#ifdef UNK
    -static double A[] =
    -{
    --4.41534164647933937950E-18,
    - 3.33079451882223809783E-17,
    --2.43127984654795469359E-16,
    - 1.71539128555513303061E-15,
    --1.16853328779934516808E-14,
    - 7.67618549860493561688E-14,
    --4.85644678311192946090E-13,
    - 2.95505266312963983461E-12,
    --1.72682629144155570723E-11,
    - 9.67580903537323691224E-11,
    --5.18979560163526290666E-10,
    - 2.65982372468238665035E-9,
    --1.30002500998624804212E-8,
    - 6.04699502254191894932E-8,
    --2.67079385394061173391E-7,
    - 1.11738753912010371815E-6,
    --4.41673835845875056359E-6,
    - 1.64484480707288970893E-5,
    --5.75419501008210370398E-5,
    - 1.88502885095841655729E-4,
    --5.76375574538582365885E-4,
    - 1.63947561694133579842E-3,
    --4.32430999505057594430E-3,
    - 1.05464603945949983183E-2,
    --2.37374148058994688156E-2,
    - 4.93052842396707084878E-2,
    --9.49010970480476444210E-2,
    - 1.71620901522208775349E-1,
    --3.04682672343198398683E-1,
    - 6.76795274409476084995E-1
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short A[] = {
    -0121642,0162671,0004646,0103567,
    -0022431,0115424,0135755,0026104,
    -0123214,0023533,0110365,0156635,
    -0023767,0033304,0117662,0172716,
    -0124522,0100426,0012277,0157531,
    -0025254,0155062,0054461,0030465,
    -0126010,0131143,0013560,0153604,
    -0026517,0170577,0006336,0114437,
    -0127227,0162253,0152243,0052734,
    -0027724,0142766,0061641,0160200,
    -0130416,0123760,0116564,0125262,
    -0031066,0144035,0021246,0054641,
    -0131537,0053664,0060131,0102530,
    -0032201,0155664,0165153,0020652,
    -0132617,0061434,0074423,0176145,
    -0033225,0174444,0136147,0122542,
    -0133624,0031576,0056453,0020470,
    -0034211,0175305,0172321,0041314,
    -0134561,0054462,0147040,0165315,
    -0035105,0124333,0120203,0162532,
    -0135427,0013750,0174257,0055221,
    -0035726,0161654,0050220,0100162,
    -0136215,0131361,0000325,0041110,
    -0036454,0145417,0117357,0017352,
    -0136702,0072367,0104415,0133574,
    -0037111,0172126,0072505,0014544,
    -0137302,0055601,0120550,0033523,
    -0037457,0136543,0136544,0043002,
    -0137633,0177536,0001276,0066150,
    -0040055,0041164,0100655,0010521
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short A[] = {
    -0xd0ef,0x2134,0x5cb7,0xbc54,
    -0xa589,0x977d,0x3362,0x3c83,
    -0xbbb4,0x721e,0x84eb,0xbcb1,
    -0x5eba,0x93f6,0xe6d8,0x3cde,
    -0xfbeb,0xc297,0x5022,0xbd0a,
    -0x2627,0x4b26,0x9b46,0x3d35,
    -0x1af0,0x62ee,0x164c,0xbd61,
    -0xd324,0xe19b,0xfe2f,0x3d89,
    -0x6abc,0x7a94,0xfc95,0xbdb2,
    -0x3c10,0xcc74,0x98be,0x3dda,
    -0x9556,0x13ae,0xd4fe,0xbe01,
    -0xcb34,0xa454,0xd903,0x3e26,
    -0x30ab,0x8c0b,0xeaf6,0xbe4b,
    -0x6435,0x9d4d,0x3b76,0x3e70,
    -0x7f8d,0x8f22,0xec63,0xbe91,
    -0xf4ac,0x978c,0xbf24,0x3eb2,
    -0x6427,0xcba5,0x866f,0xbed2,
    -0x2859,0xbe9a,0x3f58,0x3ef1,
    -0x1d5a,0x59c4,0x2b26,0xbf0e,
    -0x7cab,0x7410,0xb51b,0x3f28,
    -0xeb52,0x1f15,0xe2fd,0xbf42,
    -0x100e,0x8a12,0xdc75,0x3f5a,
    -0xa849,0x201a,0xb65e,0xbf71,
    -0xe3dd,0xf3dd,0x9961,0x3f85,
    -0xb6f0,0xf121,0x4e9e,0xbf98,
    -0xa32d,0xcea8,0x3e8a,0x3fa9,
    -0x06ea,0x342d,0x4b70,0xbfb8,
    -0x88c0,0x77ac,0xf7ac,0x3fc5,
    -0xcd8d,0xc057,0x7feb,0xbfd3,
    -0xa22a,0x9035,0xa84e,0x3fe5,
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short A[] = {
    -0xbc54,0x5cb7,0x2134,0xd0ef,
    -0x3c83,0x3362,0x977d,0xa589,
    -0xbcb1,0x84eb,0x721e,0xbbb4,
    -0x3cde,0xe6d8,0x93f6,0x5eba,
    -0xbd0a,0x5022,0xc297,0xfbeb,
    -0x3d35,0x9b46,0x4b26,0x2627,
    -0xbd61,0x164c,0x62ee,0x1af0,
    -0x3d89,0xfe2f,0xe19b,0xd324,
    -0xbdb2,0xfc95,0x7a94,0x6abc,
    -0x3dda,0x98be,0xcc74,0x3c10,
    -0xbe01,0xd4fe,0x13ae,0x9556,
    -0x3e26,0xd903,0xa454,0xcb34,
    -0xbe4b,0xeaf6,0x8c0b,0x30ab,
    -0x3e70,0x3b76,0x9d4d,0x6435,
    -0xbe91,0xec63,0x8f22,0x7f8d,
    -0x3eb2,0xbf24,0x978c,0xf4ac,
    -0xbed2,0x866f,0xcba5,0x6427,
    -0x3ef1,0x3f58,0xbe9a,0x2859,
    -0xbf0e,0x2b26,0x59c4,0x1d5a,
    -0x3f28,0xb51b,0x7410,0x7cab,
    -0xbf42,0xe2fd,0x1f15,0xeb52,
    -0x3f5a,0xdc75,0x8a12,0x100e,
    -0xbf71,0xb65e,0x201a,0xa849,
    -0x3f85,0x9961,0xf3dd,0xe3dd,
    -0xbf98,0x4e9e,0xf121,0xb6f0,
    -0x3fa9,0x3e8a,0xcea8,0xa32d,
    -0xbfb8,0x4b70,0x342d,0x06ea,
    -0x3fc5,0xf7ac,0x77ac,0x88c0,
    -0xbfd3,0x7feb,0xc057,0xcd8d,
    -0x3fe5,0xa84e,0x9035,0xa22a
    -};
    -#endif
    -
    -
    -/* Chebyshev coefficients for exp(-x) sqrt(x) I0(x)
    - * in the inverted interval [8,infinity].
    - *
    - * lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi).
    - */
    -
    -#ifdef UNK
    -static double B[] =
    -{
    --7.23318048787475395456E-18,
    --4.83050448594418207126E-18,
    - 4.46562142029675999901E-17,
    - 3.46122286769746109310E-17,
    --2.82762398051658348494E-16,
    --3.42548561967721913462E-16,
    - 1.77256013305652638360E-15,
    - 3.81168066935262242075E-15,
    --9.55484669882830764870E-15,
    --4.15056934728722208663E-14,
    - 1.54008621752140982691E-14,
    - 3.85277838274214270114E-13,
    - 7.18012445138366623367E-13,
    --1.79417853150680611778E-12,
    --1.32158118404477131188E-11,
    --3.14991652796324136454E-11,
    - 1.18891471078464383424E-11,
    - 4.94060238822496958910E-10,
    - 3.39623202570838634515E-9,
    - 2.26666899049817806459E-8,
    - 2.04891858946906374183E-7,
    - 2.89137052083475648297E-6,
    - 6.88975834691682398426E-5,
    - 3.36911647825569408990E-3,
    - 8.04490411014108831608E-1
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short B[] = {
    -0122005,0066672,0123124,0054311,
    -0121662,0033323,0030214,0104602,
    -0022515,0170300,0113314,0020413,
    -0022437,0117350,0035402,0007146,
    -0123243,0000135,0057220,0177435,
    -0123305,0073476,0144106,0170702,
    -0023777,0071755,0017527,0154373,
    -0024211,0052214,0102247,0033270,
    -0124454,0017763,0171453,0012322,
    -0125072,0166316,0075505,0154616,
    -0024612,0133770,0065376,0025045,
    -0025730,0162143,0056036,0001632,
    -0026112,0015077,0150464,0063542,
    -0126374,0101030,0014274,0065457,
    -0127150,0077271,0125763,0157617,
    -0127412,0104350,0040713,0120445,
    -0027121,0023765,0057500,0001165,
    -0030407,0147146,0003643,0075644,
    -0031151,0061445,0044422,0156065,
    -0031702,0132224,0003266,0125551,
    -0032534,0000076,0147153,0005555,
    -0033502,0004536,0004016,0026055,
    -0034620,0076433,0142314,0171215,
    -0036134,0146145,0013454,0101104,
    -0040115,0171425,0062500,0047133
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short B[] = {
    -0x8b19,0x54ca,0xadb7,0xbc60,
    -0x9130,0x6611,0x46da,0xbc56,
    -0x8421,0x12d9,0xbe18,0x3c89,
    -0x41cd,0x0760,0xf3dd,0x3c83,
    -0x1fe4,0xabd2,0x600b,0xbcb4,
    -0xde38,0xd908,0xaee7,0xbcb8,
    -0xfb1f,0xa3ea,0xee7d,0x3cdf,
    -0xe6d7,0x9094,0x2a91,0x3cf1,
    -0x629a,0x7e65,0x83fe,0xbd05,
    -0xbb32,0xcf68,0x5d99,0xbd27,
    -0xc545,0x0d5f,0x56ff,0x3d11,
    -0xc073,0x6b83,0x1c8c,0x3d5b,
    -0x8cec,0xfa26,0x4347,0x3d69,
    -0x8d66,0x0317,0x9043,0xbd7f,
    -0x7bf2,0x357e,0x0fd7,0xbdad,
    -0x7425,0x0839,0x511d,0xbdc1,
    -0x004f,0xabe8,0x24fe,0x3daa,
    -0x6f75,0xc0f4,0xf9cc,0x3e00,
    -0x5b87,0xa922,0x2c64,0x3e2d,
    -0xd56d,0x80d6,0x5692,0x3e58,
    -0x616e,0xd9cd,0x8007,0x3e8b,
    -0xc586,0xc101,0x412b,0x3ec8,
    -0x9e52,0x7899,0x0fa3,0x3f12,
    -0x9049,0xa2e5,0x998c,0x3f6b,
    -0x09cb,0xaca8,0xbe62,0x3fe9
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short B[] = {
    -0xbc60,0xadb7,0x54ca,0x8b19,
    -0xbc56,0x46da,0x6611,0x9130,
    -0x3c89,0xbe18,0x12d9,0x8421,
    -0x3c83,0xf3dd,0x0760,0x41cd,
    -0xbcb4,0x600b,0xabd2,0x1fe4,
    -0xbcb8,0xaee7,0xd908,0xde38,
    -0x3cdf,0xee7d,0xa3ea,0xfb1f,
    -0x3cf1,0x2a91,0x9094,0xe6d7,
    -0xbd05,0x83fe,0x7e65,0x629a,
    -0xbd27,0x5d99,0xcf68,0xbb32,
    -0x3d11,0x56ff,0x0d5f,0xc545,
    -0x3d5b,0x1c8c,0x6b83,0xc073,
    -0x3d69,0x4347,0xfa26,0x8cec,
    -0xbd7f,0x9043,0x0317,0x8d66,
    -0xbdad,0x0fd7,0x357e,0x7bf2,
    -0xbdc1,0x511d,0x0839,0x7425,
    -0x3daa,0x24fe,0xabe8,0x004f,
    -0x3e00,0xf9cc,0xc0f4,0x6f75,
    -0x3e2d,0x2c64,0xa922,0x5b87,
    -0x3e58,0x5692,0x80d6,0xd56d,
    -0x3e8b,0x8007,0xd9cd,0x616e,
    -0x3ec8,0x412b,0xc101,0xc586,
    -0x3f12,0x0fa3,0x7899,0x9e52,
    -0x3f6b,0x998c,0xa2e5,0x9049,
    -0x3fe9,0xbe62,0xaca8,0x09cb
    -};
    -#endif
    -
    -double i0(x)
    -double x;
    -{
    -double y;
    -
    -if( x < 0 )
    -	x = -x;
    -if( x <= 8.0 )
    -	{
    -	y = (x/2.0) - 2.0;
    -	return( exp(x) * chbevl( y, A, 30 ) );
    -	}
    -
    -return(  exp(x) * chbevl( 32.0/x - 2.0, B, 25 ) / sqrt(x) );
    -
    -}
    -
    -
    -
    -
    -double i0e( x )
    -double x;
    -{
    -double y;
    -
    -if( x < 0 )
    -	x = -x;
    -if( x <= 8.0 )
    -	{
    -	y = (x/2.0) - 2.0;
    -	return( chbevl( y, A, 30 ) );
    -	}
    -
    -return(  chbevl( 32.0/x - 2.0, B, 25 ) / sqrt(x) );
    -
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/i1.c b/scipy-0.10.1/scipy/special/cephes/i1.c
    deleted file mode 100644
    index 864ec62c3c..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/i1.c
    +++ /dev/null
    @@ -1,394 +0,0 @@
    -/*							i1.c
    - *
    - *	Modified Bessel function of order one
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, i1();
    - *
    - * y = i1( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns modified Bessel function of order one of the
    - * argument.
    - *
    - * The function is defined as i1(x) = -i j1( ix ).
    - *
    - * The range is partitioned into the two intervals [0,8] and
    - * (8, infinity).  Chebyshev polynomial expansions are employed
    - * in each interval.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0, 30        3400       1.2e-16     2.3e-17
    - *    IEEE      0, 30       30000       1.9e-15     2.1e-16
    - *
    - *
    - */
    -/*							i1e.c
    - *
    - *	Modified Bessel function of order one,
    - *	exponentially scaled
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, i1e();
    - *
    - * y = i1e( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns exponentially scaled modified Bessel function
    - * of order one of the argument.
    - *
    - * The function is defined as i1(x) = -i exp(-|x|) j1( ix ).
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      0, 30       30000       2.0e-15     2.0e-16
    - * See i1().
    - *
    - */
    -
    -/*							i1.c 2		*/
    -
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1985, 1987, 2000 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -/* Chebyshev coefficients for exp(-x) I1(x) / x
    - * in the interval [0,8].
    - *
    - * lim(x->0){ exp(-x) I1(x) / x } = 1/2.
    - */
    -
    -#ifdef UNK
    -static double A[] =
    -{
    - 2.77791411276104639959E-18,
    --2.11142121435816608115E-17,
    - 1.55363195773620046921E-16,
    --1.10559694773538630805E-15,
    - 7.60068429473540693410E-15,
    --5.04218550472791168711E-14,
    - 3.22379336594557470981E-13,
    --1.98397439776494371520E-12,
    - 1.17361862988909016308E-11,
    --6.66348972350202774223E-11,
    - 3.62559028155211703701E-10,
    --1.88724975172282928790E-9,
    - 9.38153738649577178388E-9,
    --4.44505912879632808065E-8,
    - 2.00329475355213526229E-7,
    --8.56872026469545474066E-7,
    - 3.47025130813767847674E-6,
    --1.32731636560394358279E-5,
    - 4.78156510755005422638E-5,
    --1.61760815825896745588E-4,
    - 5.12285956168575772895E-4,
    --1.51357245063125314899E-3,
    - 4.15642294431288815669E-3,
    --1.05640848946261981558E-2,
    - 2.47264490306265168283E-2,
    --5.29459812080949914269E-2,
    - 1.02643658689847095384E-1,
    --1.76416518357834055153E-1,
    - 2.52587186443633654823E-1
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short A[] = {
    -0021514,0174520,0060742,0000241,
    -0122302,0137206,0016120,0025663,
    -0023063,0017437,0026235,0176536,
    -0123637,0052523,0170150,0125632,
    -0024410,0165770,0030251,0044134,
    -0125143,0012160,0162170,0054727,
    -0025665,0075702,0035716,0145247,
    -0126413,0116032,0176670,0015462,
    -0027116,0073425,0110351,0105242,
    -0127622,0104034,0137530,0037364,
    -0030307,0050645,0120776,0175535,
    -0131001,0130331,0043523,0037455,
    -0031441,0026160,0010712,0100174,
    -0132076,0164761,0022706,0017500,
    -0032527,0015045,0115076,0104076,
    -0133146,0001714,0015434,0144520,
    -0033550,0161166,0124215,0077050,
    -0134136,0127715,0143365,0157170,
    -0034510,0106652,0013070,0064130,
    -0135051,0117126,0117264,0123761,
    -0035406,0045355,0133066,0175751,
    -0135706,0061420,0054746,0122440,
    -0036210,0031232,0047235,0006640,
    -0136455,0012373,0144235,0011523,
    -0036712,0107437,0036731,0015111,
    -0137130,0156742,0115744,0172743,
    -0037322,0033326,0124667,0124740,
    -0137464,0123210,0021510,0144556,
    -0037601,0051433,0111123,0177721
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short A[] = {
    -0x4014,0x0c3c,0x9f2a,0x3c49,
    -0x0576,0xc38a,0x57d0,0xbc78,
    -0xbfac,0xe593,0x63e3,0x3ca6,
    -0x1573,0x7e0d,0xeaaa,0xbcd3,
    -0x290c,0x0615,0x1d7f,0x3d01,
    -0x0b3b,0x1c8f,0x628e,0xbd2c,
    -0xd955,0x4779,0xaf78,0x3d56,
    -0x0366,0x5fb7,0x7383,0xbd81,
    -0x3154,0xb21d,0xcee2,0x3da9,
    -0x07de,0x97eb,0x5103,0xbdd2,
    -0xdf6c,0xb43f,0xea34,0x3df8,
    -0x67e6,0x28ea,0x361b,0xbe20,
    -0x5010,0x0239,0x258e,0x3e44,
    -0xc3e8,0x24b8,0xdd3e,0xbe67,
    -0xd108,0xb347,0xe344,0x3e8a,
    -0x992a,0x8363,0xc079,0xbeac,
    -0xafc5,0xd511,0x1c4e,0x3ecd,
    -0xbbcf,0xb8de,0xd5f9,0xbeeb,
    -0x0d0b,0x42c7,0x11b5,0x3f09,
    -0x94fe,0xd3d6,0x33ca,0xbf25,
    -0xdf7d,0xb6c6,0xc95d,0x3f40,
    -0xd4a4,0x0b3c,0xcc62,0xbf58,
    -0xa1b4,0x49d3,0x0653,0x3f71,
    -0xa26a,0x7913,0xa29f,0xbf85,
    -0x2349,0xe7bb,0x51e3,0x3f99,
    -0x9ebc,0x537c,0x1bbc,0xbfab,
    -0xf53c,0xd536,0x46da,0x3fba,
    -0x192e,0x0469,0x94d1,0xbfc6,
    -0x7ffa,0x724a,0x2a63,0x3fd0
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short A[] = {
    -0x3c49,0x9f2a,0x0c3c,0x4014,
    -0xbc78,0x57d0,0xc38a,0x0576,
    -0x3ca6,0x63e3,0xe593,0xbfac,
    -0xbcd3,0xeaaa,0x7e0d,0x1573,
    -0x3d01,0x1d7f,0x0615,0x290c,
    -0xbd2c,0x628e,0x1c8f,0x0b3b,
    -0x3d56,0xaf78,0x4779,0xd955,
    -0xbd81,0x7383,0x5fb7,0x0366,
    -0x3da9,0xcee2,0xb21d,0x3154,
    -0xbdd2,0x5103,0x97eb,0x07de,
    -0x3df8,0xea34,0xb43f,0xdf6c,
    -0xbe20,0x361b,0x28ea,0x67e6,
    -0x3e44,0x258e,0x0239,0x5010,
    -0xbe67,0xdd3e,0x24b8,0xc3e8,
    -0x3e8a,0xe344,0xb347,0xd108,
    -0xbeac,0xc079,0x8363,0x992a,
    -0x3ecd,0x1c4e,0xd511,0xafc5,
    -0xbeeb,0xd5f9,0xb8de,0xbbcf,
    -0x3f09,0x11b5,0x42c7,0x0d0b,
    -0xbf25,0x33ca,0xd3d6,0x94fe,
    -0x3f40,0xc95d,0xb6c6,0xdf7d,
    -0xbf58,0xcc62,0x0b3c,0xd4a4,
    -0x3f71,0x0653,0x49d3,0xa1b4,
    -0xbf85,0xa29f,0x7913,0xa26a,
    -0x3f99,0x51e3,0xe7bb,0x2349,
    -0xbfab,0x1bbc,0x537c,0x9ebc,
    -0x3fba,0x46da,0xd536,0xf53c,
    -0xbfc6,0x94d1,0x0469,0x192e,
    -0x3fd0,0x2a63,0x724a,0x7ffa
    -};
    -#endif
    -
    -/*							i1.c	*/
    -
    -/* Chebyshev coefficients for exp(-x) sqrt(x) I1(x)
    - * in the inverted interval [8,infinity].
    - *
    - * lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi).
    - */
    -
    -#ifdef UNK
    -static double B[] =
    -{
    - 7.51729631084210481353E-18,
    - 4.41434832307170791151E-18,
    --4.65030536848935832153E-17,
    --3.20952592199342395980E-17,
    - 2.96262899764595013876E-16,
    - 3.30820231092092828324E-16,
    --1.88035477551078244854E-15,
    --3.81440307243700780478E-15,
    - 1.04202769841288027642E-14,
    - 4.27244001671195135429E-14,
    --2.10154184277266431302E-14,
    --4.08355111109219731823E-13,
    --7.19855177624590851209E-13,
    - 2.03562854414708950722E-12,
    - 1.41258074366137813316E-11,
    - 3.25260358301548823856E-11,
    --1.89749581235054123450E-11,
    --5.58974346219658380687E-10,
    --3.83538038596423702205E-9,
    --2.63146884688951950684E-8,
    --2.51223623787020892529E-7,
    --3.88256480887769039346E-6,
    --1.10588938762623716291E-4,
    --9.76109749136146840777E-3,
    - 7.78576235018280120474E-1
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short B[] = {
    -0022012,0125555,0115227,0043456,
    -0021642,0156127,0052075,0145203,
    -0122526,0072435,0111231,0011664,
    -0122424,0001544,0161671,0114403,
    -0023252,0144257,0163532,0142121,
    -0023276,0132162,0174045,0013204,
    -0124007,0077154,0057046,0110517,
    -0124211,0066650,0116127,0157073,
    -0024473,0133413,0130551,0107504,
    -0025100,0064741,0032631,0040364,
    -0124675,0045101,0071551,0012400,
    -0125745,0161054,0071637,0011247,
    -0126112,0117410,0035525,0122231,
    -0026417,0037237,0131034,0176427,
    -0027170,0100373,0024742,0025725,
    -0027417,0006417,0105303,0141446,
    -0127246,0163716,0121202,0060137,
    -0130431,0123122,0120436,0166000,
    -0131203,0144134,0153251,0124500,
    -0131742,0005234,0122732,0033006,
    -0132606,0157751,0072362,0121031,
    -0133602,0043372,0047120,0015626,
    -0134747,0165774,0001125,0046462,
    -0136437,0166402,0117746,0155137,
    -0040107,0050305,0125330,0124241
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short B[] = {
    -0xe8e6,0xb352,0x556d,0x3c61,
    -0xb950,0xea87,0x5b8a,0x3c54,
    -0x2277,0xb253,0xcea3,0xbc8a,
    -0x3320,0x9c77,0x806c,0xbc82,
    -0x588a,0xfceb,0x5915,0x3cb5,
    -0xa2d1,0x5f04,0xd68e,0x3cb7,
    -0xd22a,0x8bc4,0xefcd,0xbce0,
    -0xfbc7,0x138a,0x2db5,0xbcf1,
    -0x31e8,0x762d,0x76e1,0x3d07,
    -0x281e,0x26b3,0x0d3c,0x3d28,
    -0x22a0,0x2e6d,0xa948,0xbd17,
    -0xe255,0x8e73,0xbc45,0xbd5c,
    -0xb493,0x076a,0x53e1,0xbd69,
    -0x9fa3,0xf643,0xe7d3,0x3d81,
    -0x457b,0x653c,0x101f,0x3daf,
    -0x7865,0xf158,0xe1a1,0x3dc1,
    -0x4c0c,0xd450,0xdcf9,0xbdb4,
    -0xdd80,0x5423,0x34ca,0xbe03,
    -0x3528,0x9ad5,0x790b,0xbe30,
    -0x46c1,0x94bb,0x4153,0xbe5c,
    -0x5443,0x2e9e,0xdbfd,0xbe90,
    -0x0373,0x49ca,0x48df,0xbed0,
    -0xa9a6,0x804a,0xfd7f,0xbf1c,
    -0xdb4c,0x53fc,0xfda0,0xbf83,
    -0x1514,0xb55b,0xea18,0x3fe8
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short B[] = {
    -0x3c61,0x556d,0xb352,0xe8e6,
    -0x3c54,0x5b8a,0xea87,0xb950,
    -0xbc8a,0xcea3,0xb253,0x2277,
    -0xbc82,0x806c,0x9c77,0x3320,
    -0x3cb5,0x5915,0xfceb,0x588a,
    -0x3cb7,0xd68e,0x5f04,0xa2d1,
    -0xbce0,0xefcd,0x8bc4,0xd22a,
    -0xbcf1,0x2db5,0x138a,0xfbc7,
    -0x3d07,0x76e1,0x762d,0x31e8,
    -0x3d28,0x0d3c,0x26b3,0x281e,
    -0xbd17,0xa948,0x2e6d,0x22a0,
    -0xbd5c,0xbc45,0x8e73,0xe255,
    -0xbd69,0x53e1,0x076a,0xb493,
    -0x3d81,0xe7d3,0xf643,0x9fa3,
    -0x3daf,0x101f,0x653c,0x457b,
    -0x3dc1,0xe1a1,0xf158,0x7865,
    -0xbdb4,0xdcf9,0xd450,0x4c0c,
    -0xbe03,0x34ca,0x5423,0xdd80,
    -0xbe30,0x790b,0x9ad5,0x3528,
    -0xbe5c,0x4153,0x94bb,0x46c1,
    -0xbe90,0xdbfd,0x2e9e,0x5443,
    -0xbed0,0x48df,0x49ca,0x0373,
    -0xbf1c,0xfd7f,0x804a,0xa9a6,
    -0xbf83,0xfda0,0x53fc,0xdb4c,
    -0x3fe8,0xea18,0xb55b,0x1514
    -};
    -#endif
    -
    -/*							i1.c	*/
    -
    -double i1(x)
    -double x;
    -{ 
    -double y, z;
    -
    -z = fabs(x);
    -if( z <= 8.0 )
    -	{
    -	y = (z/2.0) - 2.0;
    -	z = chbevl( y, A, 29 ) * z * exp(z);
    -	}
    -else
    -	{
    -	z = exp(z) * chbevl( 32.0/z - 2.0, B, 25 ) / sqrt(z);
    -	}
    -if( x < 0.0 )
    -	z = -z;
    -return( z );
    -}
    -
    -/*							i1e()	*/
    -
    -double i1e( x )
    -double x;
    -{ 
    -double y, z;
    -
    -z = fabs(x);
    -if( z <= 8.0 )
    -	{
    -	y = (z/2.0) - 2.0;
    -	z = chbevl( y, A, 29 ) * z;
    -	}
    -else
    -	{
    -	z = chbevl( 32.0/z - 2.0, B, 25 ) / sqrt(z);
    -	}
    -if( x < 0.0 )
    -	z = -z;
    -return( z );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/igam.c b/scipy-0.10.1/scipy/special/cephes/igam.c
    deleted file mode 100644
    index 4850ac2258..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/igam.c
    +++ /dev/null
    @@ -1,211 +0,0 @@
    -/*							igam.c
    - *
    - *	Incomplete Gamma integral
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, x, y, igam();
    - *
    - * y = igam( a, x );
    - *
    - * DESCRIPTION:
    - *
    - * The function is defined by
    - *
    - *                           x
    - *                            -
    - *                   1       | |  -t  a-1
    - *  igam(a,x)  =   -----     |   e   t   dt.
    - *                  -      | |
    - *                 | (a)    -
    - *                           0
    - *
    - *
    - * In this implementation both arguments must be positive.
    - * The integral is evaluated by either a power series or
    - * continued fraction expansion, depending on the relative
    - * values of a and x.
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      0,30       200000       3.6e-14     2.9e-15
    - *    IEEE      0,100      300000       9.9e-14     1.5e-14
    - */
    -/*							igamc()
    - *
    - *	Complemented incomplete Gamma integral
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, x, y, igamc();
    - *
    - * y = igamc( a, x );
    - *
    - * DESCRIPTION:
    - *
    - * The function is defined by
    - *
    - *
    - *  igamc(a,x)   =   1 - igam(a,x)
    - *
    - *                            inf.
    - *                              -
    - *                     1       | |  -t  a-1
    - *               =   -----     |   e   t   dt.
    - *                    -      | |
    - *                   | (a)    -
    - *                             x
    - *
    - *
    - * In this implementation both arguments must be positive.
    - * The integral is evaluated by either a power series or
    - * continued fraction expansion, depending on the relative
    - * values of a and x.
    - *
    - * ACCURACY:
    - *
    - * Tested at random a, x.
    - *                a         x                      Relative error:
    - * arithmetic   domain   domain     # trials      peak         rms
    - *    IEEE     0.5,100   0,100      200000       1.9e-14     1.7e-15
    - *    IEEE     0.01,0.5  0,100      200000       1.4e-13     1.6e-15
    - */
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1985, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -
    -extern double MACHEP, MAXLOG;
    -static double big = 4.503599627370496e15;
    -static double biginv =  2.22044604925031308085e-16;
    -
    -double igamc( a, x )
    -double a, x;
    -{
    -double ans, ax, c, yc, r, t, y, z;
    -double pk, pkm1, pkm2, qk, qkm1, qkm2;
    -
    -if( (x < 0) || ( a <= 0) )
    -	{
    -	mtherr("gammaincc", DOMAIN);
    -	return( NPY_NAN );
    -	}
    -
    -if( (x < 1.0) || (x < a) )
    -	return( 1.0 - igam(a,x) );
    -
    -ax = a * log(x) - x - lgam(a);
    -if( ax < -MAXLOG )
    -	{
    -	mtherr( "igamc", UNDERFLOW );
    -	return( 0.0 );
    -	}
    -ax = exp(ax);
    -
    -/* continued fraction */
    -y = 1.0 - a;
    -z = x + y + 1.0;
    -c = 0.0;
    -pkm2 = 1.0;
    -qkm2 = x;
    -pkm1 = x + 1.0;
    -qkm1 = z * x;
    -ans = pkm1/qkm1;
    -
    -do
    -	{
    -	c += 1.0;
    -	y += 1.0;
    -	z += 2.0;
    -	yc = y * c;
    -	pk = pkm1 * z  -  pkm2 * yc;
    -	qk = qkm1 * z  -  qkm2 * yc;
    -	if( qk != 0 )
    -		{
    -		r = pk/qk;
    -		t = fabs( (ans - r)/r );
    -		ans = r;
    -		}
    -	else
    -		t = 1.0;
    -	pkm2 = pkm1;
    -	pkm1 = pk;
    -	qkm2 = qkm1;
    -	qkm1 = qk;
    -	if( fabs(pk) > big )
    -		{
    -		pkm2 *= biginv;
    -		pkm1 *= biginv;
    -		qkm2 *= biginv;
    -		qkm1 *= biginv;
    -		}
    -	}
    -while( t > MACHEP );
    -
    -return( ans * ax );
    -}
    -
    -
    -
    -/* left tail of incomplete Gamma function:
    - *
    - *          inf.      k
    - *   a  -x   -       x
    - *  x  e     >   ----------
    - *           -     -
    - *          k=0   | (a+k+1)
    - *
    - */
    -
    -double igam( a, x )
    -double a, x;
    -{
    -double ans, ax, c, r;
    -
    -/* Check zero integration limit first */
    -if( x == 0 )
    -    return ( 0.0 );
    -
    -if( (x < 0) || ( a <= 0) )
    -	{
    -	mtherr("gammainc", DOMAIN);
    -	return( NPY_NAN );
    -	}
    -
    -if( (x > 1.0) && (x > a ) )
    -	return( 1.0 - igamc(a,x) );
    -
    -/* Compute  x**a * exp(-x) / Gamma(a)  */
    -ax = a * log(x) - x - lgam(a);
    -if( ax < -MAXLOG )
    -	{
    -        mtherr( "igam", UNDERFLOW );
    -	return( 0.0 );
    -	}
    -ax = exp(ax);
    -
    -/* power series */
    -r = a;
    -c = 1.0;
    -ans = 1.0;
    -
    -do
    -	{
    -	r += 1.0;
    -	c *= x/r;
    -	ans += c;
    -	}
    -while( c/ans > MACHEP );
    -
    -return( ans * ax/a );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/igami.c b/scipy-0.10.1/scipy/special/cephes/igami.c
    deleted file mode 100644
    index f3ef286a82..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/igami.c
    +++ /dev/null
    @@ -1,190 +0,0 @@
    -/*							igami()
    - *
    - *      Inverse of complemented imcomplete Gamma integral
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, x, p, igami();
    - *
    - * x = igami( a, p );
    - *
    - * DESCRIPTION:
    - *
    - * Given p, the function finds x such that
    - *
    - *  igamc( a, x ) = p.
    - *
    - * Starting with the approximate value
    - *
    - *         3
    - *  x = a t
    - *
    - *  where
    - *
    - *  t = 1 - d - ndtri(p) sqrt(d)
    - * 
    - * and
    - *
    - *  d = 1/9a,
    - *
    - * the routine performs up to 10 Newton iterations to find the
    - * root of igamc(a,x) - p = 0.
    - *
    - * ACCURACY:
    - *
    - * Tested at random a, p in the intervals indicated.
    - *
    - *                a        p                      Relative error:
    - * arithmetic   domain   domain     # trials      peak         rms
    - *    IEEE     0.5,100   0,0.5       100000       1.0e-14     1.7e-15
    - *    IEEE     0.01,0.5  0,0.5       100000       9.0e-14     3.4e-15
    - *    IEEE    0.5,10000  0,0.5        20000       2.3e-13     3.8e-14
    - */
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1987, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -#include 
    -
    -extern double MACHEP, MAXNUM, MAXLOG, MINLOG;
    -
    -double igami( a, y0 )
    -double a, y0;
    -{
    -double x0, x1, x, yl, yh, y, d, lgm, dithresh;
    -int i, dir;
    -
    -/* bound the solution */
    -x0 = MAXNUM;
    -yl = 0;
    -x1 = 0;
    -yh = 1.0;
    -dithresh = 5.0 * MACHEP;
    -
    -if ((y0<0.0) || (y0>1.0) || (a<=0)) {
    -   mtherr("igami", DOMAIN);
    -   return(NPY_NAN);
    -}
    -
    -if (y0==0.0) {
    -  return(MAXNUM);
    -}
    -
    -if (y0==1.0){
    -   return 0.0;
    -}
    -
    -/* approximation to inverse function */
    -d = 1.0/(9.0*a);
    -y = ( 1.0 - d - ndtri(y0) * sqrt(d) );
    -x = a * y * y * y;
    -
    -lgm = lgam(a);
    -
    -for( i=0; i<10; i++ )
    -	{
    -	if( x > x0 || x < x1 )
    -		goto ihalve;
    -	y = igamc(a,x);
    -	if( y < yl || y > yh )
    -		goto ihalve;
    -	if( y < y0 )
    -		{
    -		x0 = x;
    -		yl = y;
    -		}
    -	else
    -		{
    -		x1 = x;
    -		yh = y;
    -		}
    -/* compute the derivative of the function at this point */
    -	d = (a - 1.0) * log(x) - x - lgm;
    -	if( d < -MAXLOG )
    -		goto ihalve;
    -	d = -exp(d);
    -/* compute the step to the next approximation of x */
    -	d = (y - y0)/d;
    -	if( fabs(d/x) < MACHEP )
    -		goto done;
    -	x = x - d;
    -	}
    -
    -/* Resort to interval halving if Newton iteration did not converge. */
    -ihalve:
    -
    -d = 0.0625;
    -if( x0 == MAXNUM )
    -	{
    -	if( x <= 0.0 )
    -		x = 1.0;
    -	while( x0 == MAXNUM )
    -		{
    -		x = (1.0 + d) * x;
    -		y = igamc( a, x );
    -		if( y < y0 )
    -			{
    -			x0 = x;
    -			yl = y;
    -			break;
    -			}
    -		d = d + d;
    -		}
    -	}
    -d = 0.5;
    -dir = 0;
    -
    -for( i=0; i<400; i++ )
    -	{
    -	x = x1  +  d * (x0 - x1);
    -	y = igamc( a, x );
    -	lgm = (x0 - x1)/(x1 + x0);
    -	if( fabs(lgm) < dithresh )
    -		break;
    -	lgm = (y - y0)/y0;
    -	if( fabs(lgm) < dithresh )
    -		break;
    -	if( x <= 0.0 )
    -		break;
    -	if( y >= y0 )
    -		{
    -		x1 = x;
    -		yh = y;
    -		if( dir < 0 )
    -			{
    -			dir = 0;
    -			d = 0.5;
    -			}
    -		else if( dir > 1 )
    -			d = 0.5 * d + 0.5; 
    -		else
    -			d = (y0 - yl)/(yh - yl);
    -		dir += 1;
    -		}
    -	else
    -		{
    -		x0 = x;
    -		yl = y;
    -		if( dir > 0 )
    -			{
    -			dir = 0;
    -			d = 0.5;
    -			}
    -		else if( dir < -1 )
    -			d = 0.5 * d;
    -		else
    -			d = (y0 - yl)/(yh - yl);
    -		dir -= 1;
    -		}
    -	}
    -if( x == 0.0 )
    -	mtherr( "igami", UNDERFLOW );
    -
    -done:
    -return( x );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/incbet.c b/scipy-0.10.1/scipy/special/cephes/incbet.c
    deleted file mode 100644
    index 1260f27374..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/incbet.c
    +++ /dev/null
    @@ -1,398 +0,0 @@
    -/*							incbet.c
    - *
    - *	Incomplete beta integral
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, b, x, y, incbet();
    - *
    - * y = incbet( a, b, x );
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns incomplete beta integral of the arguments, evaluated
    - * from zero to x.  The function is defined as
    - *
    - *                  x
    - *     -            -
    - *    | (a+b)      | |  a-1     b-1
    - *  -----------    |   t   (1-t)   dt.
    - *   -     -     | |
    - *  | (a) | (b)   -
    - *                 0
    - *
    - * The domain of definition is 0 <= x <= 1.  In this
    - * implementation a and b are restricted to positive values.
    - * The integral from x to 1 may be obtained by the symmetry
    - * relation
    - *
    - *    1 - incbet( a, b, x )  =  incbet( b, a, 1-x ).
    - *
    - * The integral is evaluated by a continued fraction expansion
    - * or, when b*x is small, by a power series.
    - *
    - * ACCURACY:
    - *
    - * Tested at uniformly distributed random points (a,b,x) with a and b
    - * in "domain" and x between 0 and 1.
    - *                                        Relative error
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      0,5         10000       6.9e-15     4.5e-16
    - *    IEEE      0,85       250000       2.2e-13     1.7e-14
    - *    IEEE      0,1000      30000       5.3e-12     6.3e-13
    - *    IEEE      0,10000    250000       9.3e-11     7.1e-12
    - *    IEEE      0,100000    10000       8.7e-10     4.8e-11
    - * Outputs smaller than the IEEE gradual underflow threshold
    - * were excluded from these statistics.
    - *
    - * ERROR MESSAGES:
    - *   message         condition      value returned
    - * incbet domain      x<0, x>1          0.0
    - * incbet underflow                     0.0
    - */
    -
    -
    -/*
    -Cephes Math Library, Release 2.3:  March, 1995
    -Copyright 1984, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -#ifdef DEC
    -#define MAXGAM 34.84425627277176174
    -#else
    -#define MAXGAM 171.624376956302725
    -#endif
    -
    -extern double MACHEP, MINLOG, MAXLOG;
    -
    -static double big = 4.503599627370496e15;
    -static double biginv =  2.22044604925031308085e-16;
    -
    -static double incbcf(double a, double b, double x );
    -static double incbd(double a, double b, double x );
    -static double pseries(double a, double b, double x);
    -
    -double incbet( aa, bb, xx )
    -double aa, bb, xx;
    -{
    -double a, b, t, x, xc, w, y;
    -int flag;
    -
    -if( aa <= 0.0 || bb <= 0.0 )
    -	goto domerr;
    -
    -if( (xx <= 0.0) || ( xx >= 1.0) )
    -	{
    -	if( xx == 0.0 )
    -		return(0.0);
    -	if( xx == 1.0 )
    -		return( 1.0 );
    -domerr:
    -	mtherr( "incbet", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -
    -flag = 0;
    -if( (bb * xx) <= 1.0 && xx <= 0.95)
    -	{
    -	t = pseries(aa, bb, xx);
    -		goto done;
    -	}
    -
    -w = 1.0 - xx;
    -
    -/* Reverse a and b if x is greater than the mean. */
    -if( xx > (aa/(aa+bb)) )
    -	{
    -	flag = 1;
    -	a = bb;
    -	b = aa;
    -	xc = xx;
    -	x = w;
    -	}
    -else
    -	{
    -	a = aa;
    -	b = bb;
    -	xc = w;
    -	x = xx;
    -	}
    -
    -if( flag == 1 && (b * x) <= 1.0 && x <= 0.95)
    -	{
    -	t = pseries(a, b, x);
    -	goto done;
    -	}
    -
    -/* Choose expansion for better convergence. */
    -y = x * (a+b-2.0) - (a-1.0);
    -if( y < 0.0 )
    -	w = incbcf( a, b, x );
    -else
    -	w = incbd( a, b, x ) / xc;
    -
    -/* Multiply w by the factor
    -     a      b   _             _     _
    -    x  (1-x)   | (a+b) / ( a | (a) | (b) ) .   */
    -
    -y = a * log(x);
    -t = b * log(xc);
    -if( (a+b) < MAXGAM && fabs(y) < MAXLOG && fabs(t) < MAXLOG )
    -	{
    -	t = pow(xc,b);
    -	t *= pow(x,a);
    -	t /= a;
    -	t *= w;
    -	t *= Gamma(a+b) / (Gamma(a) * Gamma(b));
    -	goto done;
    -	}
    -/* Resort to logarithms.  */
    -y += t + lgam(a+b) - lgam(a) - lgam(b);
    -y += log(w/a);
    -if( y < MINLOG )
    -	t = 0.0;
    -else
    -	t = exp(y);
    -
    -done:
    -
    -if( flag == 1 )
    -	{
    -	if( t <= MACHEP )
    -		t = 1.0 - MACHEP;
    -	else
    -		t = 1.0 - t;
    -	}
    -return( t );
    -}
    -
    -/* Continued fraction expansion #1
    - * for incomplete beta integral
    - */
    -
    -static double incbcf( a, b, x )
    -double a, b, x;
    -{
    -double xk, pk, pkm1, pkm2, qk, qkm1, qkm2;
    -double k1, k2, k3, k4, k5, k6, k7, k8;
    -double r, t, ans, thresh;
    -int n;
    -
    -k1 = a;
    -k2 = a + b;
    -k3 = a;
    -k4 = a + 1.0;
    -k5 = 1.0;
    -k6 = b - 1.0;
    -k7 = k4;
    -k8 = a + 2.0;
    -
    -pkm2 = 0.0;
    -qkm2 = 1.0;
    -pkm1 = 1.0;
    -qkm1 = 1.0;
    -ans = 1.0;
    -r = 1.0;
    -n = 0;
    -thresh = 3.0 * MACHEP;
    -do
    -	{
    -	
    -	xk = -( x * k1 * k2 )/( k3 * k4 );
    -	pk = pkm1 +  pkm2 * xk;
    -	qk = qkm1 +  qkm2 * xk;
    -	pkm2 = pkm1;
    -	pkm1 = pk;
    -	qkm2 = qkm1;
    -	qkm1 = qk;
    -
    -	xk = ( x * k5 * k6 )/( k7 * k8 );
    -	pk = pkm1 +  pkm2 * xk;
    -	qk = qkm1 +  qkm2 * xk;
    -	pkm2 = pkm1;
    -	pkm1 = pk;
    -	qkm2 = qkm1;
    -	qkm1 = qk;
    -
    -	if( qk != 0 )
    -		r = pk/qk;
    -	if( r != 0 )
    -		{
    -		t = fabs( (ans - r)/r );
    -		ans = r;
    -		}
    -	else
    -		t = 1.0;
    -
    -	if( t < thresh )
    -		goto cdone;
    -
    -	k1 += 1.0;
    -	k2 += 1.0;
    -	k3 += 2.0;
    -	k4 += 2.0;
    -	k5 += 1.0;
    -	k6 -= 1.0;
    -	k7 += 2.0;
    -	k8 += 2.0;
    -
    -	if( (fabs(qk) + fabs(pk)) > big )
    -		{
    -		pkm2 *= biginv;
    -		pkm1 *= biginv;
    -		qkm2 *= biginv;
    -		qkm1 *= biginv;
    -		}
    -	if( (fabs(qk) < biginv) || (fabs(pk) < biginv) )
    -		{
    -		pkm2 *= big;
    -		pkm1 *= big;
    -		qkm2 *= big;
    -		qkm1 *= big;
    -		}
    -	}
    -while( ++n < 300 );
    -
    -cdone:
    -return(ans);
    -}
    -
    -
    -/* Continued fraction expansion #2
    - * for incomplete beta integral
    - */
    -
    -static double incbd( a, b, x )
    -double a, b, x;
    -{
    -double xk, pk, pkm1, pkm2, qk, qkm1, qkm2;
    -double k1, k2, k3, k4, k5, k6, k7, k8;
    -double r, t, ans, z, thresh;
    -int n;
    -
    -k1 = a;
    -k2 = b - 1.0;
    -k3 = a;
    -k4 = a + 1.0;
    -k5 = 1.0;
    -k6 = a + b;
    -k7 = a + 1.0;;
    -k8 = a + 2.0;
    -
    -pkm2 = 0.0;
    -qkm2 = 1.0;
    -pkm1 = 1.0;
    -qkm1 = 1.0;
    -z = x / (1.0-x);
    -ans = 1.0;
    -r = 1.0;
    -n = 0;
    -thresh = 3.0 * MACHEP;
    -do
    -	{
    -	
    -	xk = -( z * k1 * k2 )/( k3 * k4 );
    -	pk = pkm1 +  pkm2 * xk;
    -	qk = qkm1 +  qkm2 * xk;
    -	pkm2 = pkm1;
    -	pkm1 = pk;
    -	qkm2 = qkm1;
    -	qkm1 = qk;
    -
    -	xk = ( z * k5 * k6 )/( k7 * k8 );
    -	pk = pkm1 +  pkm2 * xk;
    -	qk = qkm1 +  qkm2 * xk;
    -	pkm2 = pkm1;
    -	pkm1 = pk;
    -	qkm2 = qkm1;
    -	qkm1 = qk;
    -
    -	if( qk != 0 )
    -		r = pk/qk;
    -	if( r != 0 )
    -		{
    -		t = fabs( (ans - r)/r );
    -		ans = r;
    -		}
    -	else
    -		t = 1.0;
    -
    -	if( t < thresh )
    -		goto cdone;
    -
    -	k1 += 1.0;
    -	k2 -= 1.0;
    -	k3 += 2.0;
    -	k4 += 2.0;
    -	k5 += 1.0;
    -	k6 += 1.0;
    -	k7 += 2.0;
    -	k8 += 2.0;
    -
    -	if( (fabs(qk) + fabs(pk)) > big )
    -		{
    -		pkm2 *= biginv;
    -		pkm1 *= biginv;
    -		qkm2 *= biginv;
    -		qkm1 *= biginv;
    -		}
    -	if( (fabs(qk) < biginv) || (fabs(pk) < biginv) )
    -		{
    -		pkm2 *= big;
    -		pkm1 *= big;
    -		qkm2 *= big;
    -		qkm1 *= big;
    -		}
    -	}
    -while( ++n < 300 );
    -cdone:
    -return(ans);
    -}
    -
    -/* Power series for incomplete beta integral.
    -   Use when b*x is small and x not too close to 1.  */
    -
    -static double pseries( a, b, x )
    -double a, b, x;
    -{
    -double s, t, u, v, n, t1, z, ai;
    -
    -ai = 1.0 / a;
    -u = (1.0 - b) * x;
    -v = u / (a + 1.0);
    -t1 = v;
    -t = u;
    -n = 2.0;
    -s = 0.0;
    -z = MACHEP * ai;
    -while( fabs(v) > z )
    -	{
    -	u = (n - b) * x / n;
    -	t *= u;
    -	v = t / (a + n);
    -	s += v; 
    -	n += 1.0;
    -	}
    -s += t1;
    -s += ai;
    -
    -u = a * log(x);
    -if( (a+b) < MAXGAM && fabs(u) < MAXLOG )
    -	{
    -	t = Gamma(a+b)/(Gamma(a)*Gamma(b));
    -	s = s * t * pow(x,a);
    -	}
    -else
    -	{
    -	t = lgam(a+b) - lgam(a) - lgam(b) + u + log(s);
    -	if( t < MINLOG )
    -		s = 0.0;
    -	else
    -	s = exp(t);
    -	}
    -return(s);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/incbi.c b/scipy-0.10.1/scipy/special/cephes/incbi.c
    deleted file mode 100644
    index 3c989013e5..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/incbi.c
    +++ /dev/null
    @@ -1,302 +0,0 @@
    -/*							incbi()
    - *
    - *      Inverse of imcomplete beta integral
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double a, b, x, y, incbi();
    - *
    - * x = incbi( a, b, y );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Given y, the function finds x such that
    - *
    - *  incbet( a, b, x ) = y .
    - *
    - * The routine performs interval halving or Newton iterations to find the
    - * root of incbet(a,b,x) - y = 0.
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - *                x     a,b
    - * arithmetic   domain  domain  # trials    peak       rms
    - *    IEEE      0,1    .5,10000   50000    5.8e-12   1.3e-13
    - *    IEEE      0,1   .25,100    100000    1.8e-13   3.9e-15
    - *    IEEE      0,1     0,5       50000    1.1e-12   5.5e-15
    - *    VAX       0,1    .5,100     25000    3.5e-14   1.1e-15
    - * With a and b constrained to half-integer or integer values:
    - *    IEEE      0,1    .5,10000   50000    5.8e-12   1.1e-13
    - *    IEEE      0,1    .5,100    100000    1.7e-14   7.9e-16
    - * With a = .5, b constrained to half-integer or integer values:
    - *    IEEE      0,1    .5,10000   10000    8.3e-11   1.0e-11
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.4:  March,1996
    -Copyright 1984, 1996 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -extern double MACHEP, MAXNUM, MAXLOG, MINLOG;
    -
    -double incbi( aa, bb, yy0 )
    -double aa, bb, yy0;
    -{
    -double a, b, y0, d, y, x, x0, x1, lgm, yp, di, dithresh, yl, yh, xt;
    -int i, rflg, dir, nflg;
    -
    -
    -i = 0;
    -if( yy0 <= 0 )
    -	return(0.0);
    -if( yy0 >= 1.0 )
    -	return(1.0);
    -x0 = 0.0;
    -yl = 0.0;
    -x1 = 1.0;
    -yh = 1.0;
    -nflg = 0;
    -
    -if( aa <= 1.0 || bb <= 1.0 )
    -	{
    -	dithresh = 1.0e-6;
    -	rflg = 0;
    -	a = aa;
    -	b = bb;
    -	y0 = yy0;
    -	x = a/(a+b);
    -	y = incbet( a, b, x );
    -	goto ihalve;
    -	}
    -else
    -	{
    -	dithresh = 1.0e-4;
    -	}
    -/* approximation to inverse function */
    -
    -yp = -ndtri(yy0);
    -
    -if( yy0 > 0.5 )
    -	{
    -	rflg = 1;
    -	a = bb;
    -	b = aa;
    -	y0 = 1.0 - yy0;
    -	yp = -yp;
    -	}
    -else
    -	{
    -	rflg = 0;
    -	a = aa;
    -	b = bb;
    -	y0 = yy0;
    -	}
    -
    -lgm = (yp * yp - 3.0)/6.0;
    -x = 2.0/( 1.0/(2.0*a-1.0)  +  1.0/(2.0*b-1.0) );
    -d = yp * sqrt( x + lgm ) / x
    -	- ( 1.0/(2.0*b-1.0) - 1.0/(2.0*a-1.0) )
    -	* (lgm + 5.0/6.0 - 2.0/(3.0*x));
    -d = 2.0 * d;
    -if( d < MINLOG )
    -	{
    -	x = 1.0;
    -	goto under;
    -	}
    -x = a/( a + b * exp(d) );
    -y = incbet( a, b, x );
    -yp = (y - y0)/y0;
    -if( fabs(yp) < 0.2 )
    -	goto newt;
    -
    -/* Resort to interval halving if not close enough. */
    -ihalve:
    -
    -dir = 0;
    -di = 0.5;
    -for( i=0; i<100; i++ )
    -	{
    -	if( i != 0 )
    -		{
    -		x = x0  +  di * (x1 - x0);
    -		if( x == 1.0 )
    -			x = 1.0 - MACHEP;
    -		if( x == 0.0 )
    -			{
    -			di = 0.5;
    -			x = x0  +  di * (x1 - x0);
    -			if( x == 0.0 )
    -				goto under;
    -			}
    -		y = incbet( a, b, x );
    -		yp = (x1 - x0)/(x1 + x0);
    -		if( fabs(yp) < dithresh )
    -			goto newt;
    -		yp = (y-y0)/y0;
    -		if( fabs(yp) < dithresh )
    -			goto newt;
    -		}
    -	if( y < y0 )
    -		{
    -		x0 = x;
    -		yl = y;
    -		if( dir < 0 )
    -			{
    -			dir = 0;
    -			di = 0.5;
    -			}
    -		else if( dir > 3 )
    -			di = 1.0 - (1.0 - di) * (1.0 - di);
    -		else if( dir > 1 )
    -			di = 0.5 * di + 0.5; 
    -		else
    -			di = (y0 - y)/(yh - yl);
    -		dir += 1;
    -		if( x0 > 0.75 )
    -			{
    -			if( rflg == 1 )
    -				{
    -				rflg = 0;
    -				a = aa;
    -				b = bb;
    -				y0 = yy0;
    -				}
    -			else
    -				{
    -				rflg = 1;
    -				a = bb;
    -				b = aa;
    -				y0 = 1.0 - yy0;
    -				}
    -			x = 1.0 - x;
    -			y = incbet( a, b, x );
    -			x0 = 0.0;
    -			yl = 0.0;
    -			x1 = 1.0;
    -			yh = 1.0;
    -			goto ihalve;
    -			}
    -		}
    -	else
    -		{
    -		x1 = x;
    -		if( rflg == 1 && x1 < MACHEP )
    -			{
    -			x = 0.0;
    -			goto done;
    -			}
    -		yh = y;
    -		if( dir > 0 )
    -			{
    -			dir = 0;
    -			di = 0.5;
    -			}
    -		else if( dir < -3 )
    -			di = di * di;
    -		else if( dir < -1 )
    -			di = 0.5 * di;
    -		else
    -			di = (y - y0)/(yh - yl);
    -		dir -= 1;
    -		}
    -	}
    -mtherr( "incbi", PLOSS );
    -if( x0 >= 1.0 )
    -	{
    -	x = 1.0 - MACHEP;
    -	goto done;
    -	}
    -if( x <= 0.0 )
    -	{
    -under:
    -	mtherr( "incbi", UNDERFLOW );
    -	x = 0.0;
    -	goto done;
    -	}
    -
    -newt:
    -
    -if( nflg )
    -	goto done;
    -nflg = 1;
    -lgm = lgam(a+b) - lgam(a) - lgam(b);
    -
    -for( i=0; i<8; i++ )
    -	{
    -	/* Compute the function at this point. */
    -	if( i != 0 )
    -		y = incbet(a,b,x);
    -	if( y < yl )
    -		{
    -		x = x0;
    -		y = yl;
    -		}
    -	else if( y > yh )
    -		{
    -		x = x1;
    -		y = yh;
    -		}
    -	else if( y < y0 )
    -		{
    -		x0 = x;
    -		yl = y;
    -		}
    -	else
    -		{
    -		x1 = x;
    -		yh = y;
    -		}
    -	if( x == 1.0 || x == 0.0 )
    -		break;
    -	/* Compute the derivative of the function at this point. */
    -	d = (a - 1.0) * log(x) + (b - 1.0) * log(1.0-x) + lgm;
    -	if( d < MINLOG )
    -		goto done;
    -	if( d > MAXLOG )
    -		break;
    -	d = exp(d);
    -	/* Compute the step to the next approximation of x. */
    -	d = (y - y0)/d;
    -	xt = x - d;
    -	if( xt <= x0 )
    -		{
    -		y = (x - x0) / (x1 - x0);
    -		xt = x0 + 0.5 * y * (x - x0);
    -		if( xt <= 0.0 )
    -			break;
    -		}
    -	if( xt >= x1 )
    -		{
    -		y = (x1 - x) / (x1 - x0);
    -		xt = x1 - 0.5 * y * (x1 - x);
    -		if( xt >= 1.0 )
    -			break;
    -		}
    -	x = xt;
    -	if( fabs(d/x) < 128.0 * MACHEP )
    -		goto done;
    -	}
    -/* Did not converge.  */
    -dithresh = 256.0 * MACHEP;
    -goto ihalve;
    -
    -done:
    -
    -if( rflg )
    -	{
    -	if( x <= MACHEP )
    -		x = 1.0 - MACHEP;
    -	else
    -		x = 1.0 - x;
    -	}
    -return( x );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/j0.c b/scipy-0.10.1/scipy/special/cephes/j0.c
    deleted file mode 100644
    index 484d532e47..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/j0.c
    +++ /dev/null
    @@ -1,532 +0,0 @@
    -/*							j0.c
    - *
    - *	Bessel function of order zero
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, j0();
    - *
    - * y = j0( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns Bessel function of order zero of the argument.
    - *
    - * The domain is divided into the intervals [0, 5] and
    - * (5, infinity). In the first interval the following rational
    - * approximation is used:
    - *
    - *
    - *        2         2
    - * (w - r  ) (w - r  ) P (w) / Q (w)
    - *       1         2    3       8
    - *
    - *            2
    - * where w = x  and the two r's are zeros of the function.
    - *
    - * In the second interval, the Hankel asymptotic expansion
    - * is employed with two rational functions of degree 6/6
    - * and 7/7.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Absolute error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0, 30       10000       4.4e-17     6.3e-18
    - *    IEEE      0, 30       60000       4.2e-16     1.1e-16
    - *
    - */
    -/*							y0.c
    - *
    - *	Bessel function of the second kind, order zero
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, y0();
    - *
    - * y = y0( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns Bessel function of the second kind, of order
    - * zero, of the argument.
    - *
    - * The domain is divided into the intervals [0, 5] and
    - * (5, infinity). In the first interval a rational approximation
    - * R(x) is employed to compute
    - *   y0(x)  = R(x)  +   2 * log(x) * j0(x) / PI.
    - * Thus a call to j0() is required.
    - *
    - * In the second interval, the Hankel asymptotic expansion
    - * is employed with two rational functions of degree 6/6
    - * and 7/7.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *  Absolute error, when y0(x) < 1; else relative error:
    - *
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0, 30        9400       7.0e-17     7.9e-18
    - *    IEEE      0, 30       30000       1.3e-15     1.6e-16
    - *
    - */
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier
    -*/
    -
    -/* Note: all coefficients satisfy the relative error criterion
    - * except YP, YQ which are designed for absolute error. */
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double PP[7] = {
    -  7.96936729297347051624E-4,
    -  8.28352392107440799803E-2,
    -  1.23953371646414299388E0,
    -  5.44725003058768775090E0,
    -  8.74716500199817011941E0,
    -  5.30324038235394892183E0,
    -  9.99999999999999997821E-1,
    -};
    -static double PQ[7] = {
    -  9.24408810558863637013E-4,
    -  8.56288474354474431428E-2,
    -  1.25352743901058953537E0,
    -  5.47097740330417105182E0,
    -  8.76190883237069594232E0,
    -  5.30605288235394617618E0,
    -  1.00000000000000000218E0,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short PP[28] = {
    -0035520,0164604,0140733,0054470,
    -0037251,0122605,0115356,0107170,
    -0040236,0124412,0071500,0056303,
    -0040656,0047737,0045720,0045263,
    -0041013,0172143,0045004,0142103,
    -0040651,0132045,0026241,0026406,
    -0040200,0000000,0000000,0000000,
    -};
    -static unsigned short PQ[28] = {
    -0035562,0052006,0070034,0134666,
    -0037257,0057055,0055242,0123424,
    -0040240,0071626,0046630,0032371,
    -0040657,0011077,0032013,0012731,
    -0041014,0030307,0050331,0006414,
    -0040651,0145457,0065021,0150304,
    -0040200,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short PP[28] = {
    -0x6b27,0x983b,0x1d30,0x3f4a,
    -0xd1cf,0xb35d,0x34b0,0x3fb5,
    -0x0b98,0x4e68,0xd521,0x3ff3,
    -0x0956,0xe97a,0xc9fb,0x4015,
    -0x9888,0x6940,0x7e8c,0x4021,
    -0x25a1,0xa594,0x3684,0x4015,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -static unsigned short PQ[28] = {
    -0x9737,0xce03,0x4a80,0x3f4e,
    -0x54e3,0xab54,0xebc5,0x3fb5,
    -0x069f,0xc9b3,0x0e72,0x3ff4,
    -0x62bb,0xe681,0xe247,0x4015,
    -0x21a1,0xea1b,0x8618,0x4021,
    -0x3a19,0xed42,0x3965,0x4015,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short PP[28] = {
    -0x3f4a,0x1d30,0x983b,0x6b27,
    -0x3fb5,0x34b0,0xb35d,0xd1cf,
    -0x3ff3,0xd521,0x4e68,0x0b98,
    -0x4015,0xc9fb,0xe97a,0x0956,
    -0x4021,0x7e8c,0x6940,0x9888,
    -0x4015,0x3684,0xa594,0x25a1,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short PQ[28] = {
    -0x3f4e,0x4a80,0xce03,0x9737,
    -0x3fb5,0xebc5,0xab54,0x54e3,
    -0x3ff4,0x0e72,0xc9b3,0x069f,
    -0x4015,0xe247,0xe681,0x62bb,
    -0x4021,0x8618,0xea1b,0x21a1,
    -0x4015,0x3965,0xed42,0x3a19,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double QP[8] = {
    --1.13663838898469149931E-2,
    --1.28252718670509318512E0,
    --1.95539544257735972385E1,
    --9.32060152123768231369E1,
    --1.77681167980488050595E2,
    --1.47077505154951170175E2,
    --5.14105326766599330220E1,
    --6.05014350600728481186E0,
    -};
    -static double QQ[7] = {
    -/*  1.00000000000000000000E0,*/
    -  6.43178256118178023184E1,
    -  8.56430025976980587198E2,
    -  3.88240183605401609683E3,
    -  7.24046774195652478189E3,
    -  5.93072701187316984827E3,
    -  2.06209331660327847417E3,
    -  2.42005740240291393179E2,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short QP[32] = {
    -0136472,0035021,0142451,0141115,
    -0140244,0024731,0150620,0105642,
    -0141234,0067177,0124161,0060141,
    -0141672,0064572,0151557,0043036,
    -0142061,0127141,0003127,0043517,
    -0142023,0011727,0060271,0144544,
    -0141515,0122142,0126620,0143150,
    -0140701,0115306,0106715,0007344,
    -};
    -static unsigned short QQ[28] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041600,0121272,0004741,0026544,
    -0042526,0015605,0105654,0161771,
    -0043162,0123155,0165644,0062645,
    -0043342,0041675,0167576,0130756,
    -0043271,0052720,0165631,0154214,
    -0043000,0160576,0034614,0172024,
    -0042162,0000570,0030500,0051235,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short QP[32] = {
    -0x384a,0x38a5,0x4742,0xbf87,
    -0x1174,0x3a32,0x853b,0xbff4,
    -0x2c0c,0xf50e,0x8dcf,0xc033,
    -0xe8c4,0x5a6d,0x4d2f,0xc057,
    -0xe8ea,0x20ca,0x35cc,0xc066,
    -0x392d,0xec17,0x627a,0xc062,
    -0x18cd,0x55b2,0xb48c,0xc049,
    -0xa1dd,0xd1b9,0x3358,0xc018,
    -};
    -static unsigned short QQ[28] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x25ac,0x413c,0x1457,0x4050,
    -0x9c7f,0xb175,0xc370,0x408a,
    -0x8cb5,0xbd74,0x54cd,0x40ae,
    -0xd63e,0xbdef,0x4877,0x40bc,
    -0x3b11,0x1d73,0x2aba,0x40b7,
    -0x9e82,0xc731,0x1c2f,0x40a0,
    -0x0a54,0x0628,0x402f,0x406e,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short QP[32] = {
    -0xbf87,0x4742,0x38a5,0x384a,
    -0xbff4,0x853b,0x3a32,0x1174,
    -0xc033,0x8dcf,0xf50e,0x2c0c,
    -0xc057,0x4d2f,0x5a6d,0xe8c4,
    -0xc066,0x35cc,0x20ca,0xe8ea,
    -0xc062,0x627a,0xec17,0x392d,
    -0xc049,0xb48c,0x55b2,0x18cd,
    -0xc018,0x3358,0xd1b9,0xa1dd,
    -};
    -static unsigned short QQ[28] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4050,0x1457,0x413c,0x25ac,
    -0x408a,0xc370,0xb175,0x9c7f,
    -0x40ae,0x54cd,0xbd74,0x8cb5,
    -0x40bc,0x4877,0xbdef,0xd63e,
    -0x40b7,0x2aba,0x1d73,0x3b11,
    -0x40a0,0x1c2f,0xc731,0x9e82,
    -0x406e,0x402f,0x0628,0x0a54,
    -};
    -#endif
    -
    -
    -#ifdef UNK
    -static double YP[8] = {
    - 1.55924367855235737965E4,
    --1.46639295903971606143E7,
    - 5.43526477051876500413E9,
    --9.82136065717911466409E11,
    - 8.75906394395366999549E13,
    --3.46628303384729719441E15,
    - 4.42733268572569800351E16,
    --1.84950800436986690637E16,
    -};
    -static double YQ[7] = {
    -/* 1.00000000000000000000E0,*/
    - 1.04128353664259848412E3,
    - 6.26107330137134956842E5,
    - 2.68919633393814121987E8,
    - 8.64002487103935000337E10,
    - 2.02979612750105546709E13,
    - 3.17157752842975028269E15,
    - 2.50596256172653059228E17,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short YP[32] = {
    -0043563,0120677,0042264,0046166,
    -0146137,0140371,0113444,0042260,
    -0050241,0175707,0100502,0063344,
    -0152144,0125737,0007265,0164526,
    -0053637,0051621,0163035,0060546,
    -0155105,0004416,0107306,0060023,
    -0056035,0045133,0030132,0000024,
    -0155603,0065132,0144061,0131732,
    -};
    -static unsigned short YQ[28] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0042602,0024422,0135557,0162663,
    -0045030,0155665,0044075,0160135,
    -0047200,0035432,0105446,0104005,
    -0051240,0167331,0056063,0022743,
    -0053223,0127746,0025764,0012160,
    -0055064,0044206,0177532,0145545,
    -0056536,0111375,0163715,0127201,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short YP[32] = {
    -0x898f,0xe896,0x7437,0x40ce,
    -0x8896,0x32e4,0xf81f,0xc16b,
    -0x4cdd,0xf028,0x3f78,0x41f4,
    -0xbd2b,0xe1d6,0x957b,0xc26c,
    -0xac2d,0x3cc3,0xea72,0x42d3,
    -0xcc02,0xd1d8,0xa121,0xc328,
    -0x4003,0x660b,0xa94b,0x4363,
    -0x367b,0x5906,0x6d4b,0xc350,
    -};
    -static unsigned short YQ[28] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xfcb6,0x576d,0x4522,0x4090,
    -0xbc0c,0xa907,0x1b76,0x4123,
    -0xd101,0x5164,0x0763,0x41b0,
    -0x64bc,0x2b86,0x1ddb,0x4234,
    -0x828e,0xc57e,0x75fc,0x42b2,
    -0x596d,0xdfeb,0x8910,0x4326,
    -0xb5d0,0xbcf9,0xd25f,0x438b,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short YP[32] = {
    -0x40ce,0x7437,0xe896,0x898f,
    -0xc16b,0xf81f,0x32e4,0x8896,
    -0x41f4,0x3f78,0xf028,0x4cdd,
    -0xc26c,0x957b,0xe1d6,0xbd2b,
    -0x42d3,0xea72,0x3cc3,0xac2d,
    -0xc328,0xa121,0xd1d8,0xcc02,
    -0x4363,0xa94b,0x660b,0x4003,
    -0xc350,0x6d4b,0x5906,0x367b,
    -};
    -static unsigned short YQ[28] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4090,0x4522,0x576d,0xfcb6,
    -0x4123,0x1b76,0xa907,0xbc0c,
    -0x41b0,0x0763,0x5164,0xd101,
    -0x4234,0x1ddb,0x2b86,0x64bc,
    -0x42b2,0x75fc,0xc57e,0x828e,
    -0x4326,0x8910,0xdfeb,0x596d,
    -0x438b,0xd25f,0xbcf9,0xb5d0,
    -};
    -#endif
    -
    -#ifdef UNK
    -/*  5.783185962946784521175995758455807035071 */
    -static double DR1 = 5.78318596294678452118E0;
    -/* 30.47126234366208639907816317502275584842 */
    -static double DR2 = 3.04712623436620863991E1;
    -#endif
    -
    -#ifdef DEC
    -static unsigned short R1[] = {0040671,0007734,0001061,0056734};
    -#define DR1 *(double *)R1
    -static unsigned short R2[] = {0041363,0142445,0030416,0165567};
    -#define DR2 *(double *)R2
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short R1[] = {0x2bbb,0x8046,0x21fb,0x4017};
    -#define DR1 *(double *)R1
    -static unsigned short R2[] = {0xdd6f,0xa621,0x78a4,0x403e};
    -#define DR2 *(double *)R2
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short R1[] = {0x4017,0x21fb,0x8046,0x2bbb};
    -#define DR1 *(double *)R1
    -static unsigned short R2[] = {0x403e,0x78a4,0xa621,0xdd6f};
    -#define DR2 *(double *)R2
    -#endif
    -
    -#ifdef UNK
    -static double RP[4] = {
    --4.79443220978201773821E9,
    - 1.95617491946556577543E12,
    --2.49248344360967716204E14,
    - 9.70862251047306323952E15,
    -};
    -static double RQ[8] = {
    -/* 1.00000000000000000000E0,*/
    - 4.99563147152651017219E2,
    - 1.73785401676374683123E5,
    - 4.84409658339962045305E7,
    - 1.11855537045356834862E10,
    - 2.11277520115489217587E12,
    - 3.10518229857422583814E14,
    - 3.18121955943204943306E16,
    - 1.71086294081043136091E18,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short RP[16] = {
    -0150216,0161235,0064344,0014450,
    -0052343,0135216,0035624,0144153,
    -0154142,0130247,0003310,0003667,
    -0055411,0173703,0047772,0176635,
    -};
    -static unsigned short RQ[32] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0042371,0144025,0032265,0136137,
    -0044451,0133131,0132420,0151466,
    -0046470,0144641,0072540,0030636,
    -0050446,0126600,0045042,0044243,
    -0052365,0172633,0110301,0071063,
    -0054215,0032424,0062272,0043513,
    -0055742,0005013,0171731,0072335,
    -0057275,0170646,0036663,0013134,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short RP[16] = {
    -0x8325,0xad1c,0xdc53,0xc1f1,
    -0x990d,0xc772,0x7751,0x427c,
    -0x00f7,0xe0d9,0x5614,0xc2ec,
    -0x5fb4,0x69ff,0x3ef8,0x4341,
    -};
    -static unsigned short RQ[32] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xb78c,0xa696,0x3902,0x407f,
    -0x1a67,0x36a2,0x36cb,0x4105,
    -0x0634,0x2eac,0x1934,0x4187,
    -0x4914,0x0944,0xd5b0,0x4204,
    -0x2e46,0x7218,0xbeb3,0x427e,
    -0x48e9,0x8c97,0xa6a2,0x42f1,
    -0x2e9c,0x7e7b,0x4141,0x435c,
    -0x62cc,0xc7b6,0xbe34,0x43b7,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short RP[16] = {
    -0xc1f1,0xdc53,0xad1c,0x8325,
    -0x427c,0x7751,0xc772,0x990d,
    -0xc2ec,0x5614,0xe0d9,0x00f7,
    -0x4341,0x3ef8,0x69ff,0x5fb4,
    -};
    -static unsigned short RQ[32] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x407f,0x3902,0xa696,0xb78c,
    -0x4105,0x36cb,0x36a2,0x1a67,
    -0x4187,0x1934,0x2eac,0x0634,
    -0x4204,0xd5b0,0x0944,0x4914,
    -0x427e,0xbeb3,0x7218,0x2e46,
    -0x42f1,0xa6a2,0x8c97,0x48e9,
    -0x435c,0x4141,0x7e7b,0x2e9c,
    -0x43b7,0xbe34,0xc7b6,0x62cc,
    -};
    -#endif
    -
    -extern double TWOOPI, SQ2OPI;
    -
    -double j0(x)
    -double x;
    -{
    -double w, z, p, q, xn;
    -
    -if( x < 0 )
    -	x = -x;
    -
    -if( x <= 5.0 )
    -	{
    -	z = x * x;
    -	if( x < 1.0e-5 )
    -		return( 1.0 - z/4.0 );
    -
    -	p = (z - DR1) * (z - DR2);
    -	p = p * polevl( z, RP, 3)/p1evl( z, RQ, 8 );
    -	return( p );
    -	}
    -
    -w = 5.0/x;
    -q = 25.0/(x*x);
    -p = polevl( q, PP, 6)/polevl( q, PQ, 6 );
    -q = polevl( q, QP, 7)/p1evl( q, QQ, 7 );
    -xn = x - NPY_PI_4;
    -p = p * cos(xn) - w * q * sin(xn);
    -return( p * SQ2OPI / sqrt(x) );
    -}
    -
    -/*							y0() 2	*/
    -/* Bessel function of second kind, order zero	*/
    -
    -/* Rational approximation coefficients YP[], YQ[] are used here.
    - * The function computed is  y0(x)  -  2 * log(x) * j0(x) / PI,
    - * whose value at x = 0 is  2 * ( log(0.5) + EUL ) / PI
    - * = 0.073804295108687225.
    - */
    -
    -/*
    -#define NPY_PI_4 .78539816339744830962
    -#define SQ2OPI .79788456080286535588
    -*/
    -
    -double y0(x)
    -double x;
    -{
    -double w, z, p, q, xn;
    -
    -if( x <= 5.0 )
    -	{
    -	if (x == 0.0) {
    -		mtherr("y0", SING);
    -		return -NPY_INFINITY;
    -	} else if (x < 0.0) {
    -		mtherr("y0", DOMAIN);
    -		return NPY_NAN;
    -	}
    -	z = x * x;
    -	w = polevl( z, YP, 7) / p1evl( z, YQ, 7 );
    -	w += TWOOPI * log(x) * j0(x);
    -	return( w );
    -	}
    -
    -w = 5.0/x;
    -z = 25.0 / (x * x);
    -p = polevl( z, PP, 6)/polevl( z, PQ, 6 );
    -q = polevl( z, QP, 7)/p1evl( z, QQ, 7 );
    -xn = x - NPY_PI_4;
    -p = p * sin(xn) + w * q * cos(xn);
    -return( p * SQ2OPI / sqrt(x) );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/j1.c b/scipy-0.10.1/scipy/special/cephes/j1.c
    deleted file mode 100644
    index 422a15657e..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/j1.c
    +++ /dev/null
    @@ -1,503 +0,0 @@
    -/*							j1.c
    - *
    - *	Bessel function of order one
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, j1();
    - *
    - * y = j1( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns Bessel function of order one of the argument.
    - *
    - * The domain is divided into the intervals [0, 8] and
    - * (8, infinity). In the first interval a 24 term Chebyshev
    - * expansion is used. In the second, the asymptotic
    - * trigonometric representation is employed using two
    - * rational functions of degree 5/5.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Absolute error:
    - * arithmetic   domain      # trials      peak         rms
    - *    DEC       0, 30       10000       4.0e-17     1.1e-17
    - *    IEEE      0, 30       30000       2.6e-16     1.1e-16
    - *
    - *
    - */
    -/*							y1.c
    - *
    - *	Bessel function of second kind of order one
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, y1();
    - *
    - * y = y1( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns Bessel function of the second kind of order one
    - * of the argument.
    - *
    - * The domain is divided into the intervals [0, 8] and
    - * (8, infinity). In the first interval a 25 term Chebyshev
    - * expansion is used, and a call to j1() is required.
    - * In the second, the asymptotic trigonometric representation
    - * is employed using two rational functions of degree 5/5.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Absolute error:
    - * arithmetic   domain      # trials      peak         rms
    - *    DEC       0, 30       10000       8.6e-17     1.3e-17
    - *    IEEE      0, 30       30000       1.0e-15     1.3e-16
    - *
    - * (error criterion relative when |y1| > 1).
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier
    -*/
    -
    -/*
    -#define PIO4 .78539816339744830962
    -#define THPIO4 2.35619449019234492885
    -#define SQ2OPI .79788456080286535588
    -*/
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double RP[4] = {
    --8.99971225705559398224E8,
    - 4.52228297998194034323E11,
    --7.27494245221818276015E13,
    - 3.68295732863852883286E15,
    -};
    -static double RQ[8] = {
    -/* 1.00000000000000000000E0,*/
    - 6.20836478118054335476E2,
    - 2.56987256757748830383E5,
    - 8.35146791431949253037E7,
    - 2.21511595479792499675E10,
    - 4.74914122079991414898E12,
    - 7.84369607876235854894E14,
    - 8.95222336184627338078E16,
    - 5.32278620332680085395E18,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short RP[16] = {
    -0147526,0110742,0063322,0077052,
    -0051722,0112720,0065034,0061530,
    -0153604,0052227,0033147,0105650,
    -0055121,0055025,0032276,0022015,
    -};
    -static unsigned short RQ[32] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0042433,0032610,0155604,0033473,
    -0044572,0173320,0067270,0006616,
    -0046637,0045246,0162225,0006606,
    -0050645,0004773,0157577,0053004,
    -0052612,0033734,0001667,0176501,
    -0054462,0054121,0173147,0121367,
    -0056237,0002777,0121451,0176007,
    -0057623,0136253,0131601,0044710,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short RP[16] = {
    -0x4fc5,0x4cda,0xd23c,0xc1ca,
    -0x8c6b,0x0d43,0x52ba,0x425a,
    -0xf175,0xe6cc,0x8a92,0xc2d0,
    -0xc482,0xa697,0x2b42,0x432a,
    -};
    -static unsigned short RQ[32] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x86e7,0x1b70,0x66b1,0x4083,
    -0x01b2,0x0dd7,0x5eda,0x410f,
    -0xa1b1,0xdc92,0xe954,0x4193,
    -0xeac1,0x7bef,0xa13f,0x4214,
    -0xffa8,0x8076,0x46fb,0x4291,
    -0xf45f,0x3ecc,0x4b0a,0x4306,
    -0x3f81,0xf465,0xe0bf,0x4373,
    -0x2939,0x7670,0x7795,0x43d2,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short RP[16] = {
    -0xc1ca,0xd23c,0x4cda,0x4fc5,
    -0x425a,0x52ba,0x0d43,0x8c6b,
    -0xc2d0,0x8a92,0xe6cc,0xf175,
    -0x432a,0x2b42,0xa697,0xc482,
    -};
    -static unsigned short RQ[32] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4083,0x66b1,0x1b70,0x86e7,
    -0x410f,0x5eda,0x0dd7,0x01b2,
    -0x4193,0xe954,0xdc92,0xa1b1,
    -0x4214,0xa13f,0x7bef,0xeac1,
    -0x4291,0x46fb,0x8076,0xffa8,
    -0x4306,0x4b0a,0x3ecc,0xf45f,
    -0x4373,0xe0bf,0xf465,0x3f81,
    -0x43d2,0x7795,0x7670,0x2939,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double PP[7] = {
    - 7.62125616208173112003E-4,
    - 7.31397056940917570436E-2,
    - 1.12719608129684925192E0,
    - 5.11207951146807644818E0,
    - 8.42404590141772420927E0,
    - 5.21451598682361504063E0,
    - 1.00000000000000000254E0,
    -};
    -static double PQ[7] = {
    - 5.71323128072548699714E-4,
    - 6.88455908754495404082E-2,
    - 1.10514232634061696926E0,
    - 5.07386386128601488557E0,
    - 8.39985554327604159757E0,
    - 5.20982848682361821619E0,
    - 9.99999999999999997461E-1,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short PP[28] = {
    -0035507,0144542,0061543,0024326,
    -0037225,0145105,0017766,0022661,
    -0040220,0043766,0010254,0133255,
    -0040643,0113047,0142611,0151521,
    -0041006,0144344,0055351,0074261,
    -0040646,0156520,0120574,0006416,
    -0040200,0000000,0000000,0000000,
    -};
    -static unsigned short PQ[28] = {
    -0035425,0142330,0115041,0165514,
    -0037214,0177352,0145105,0052026,
    -0040215,0072515,0141207,0073255,
    -0040642,0056427,0137222,0106405,
    -0041006,0062716,0166427,0165450,
    -0040646,0133352,0035425,0123304,
    -0040200,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short PP[28] = {
    -0x651b,0x4c6c,0xf92c,0x3f48,
    -0xc4b6,0xa3fe,0xb948,0x3fb2,
    -0x96d6,0xc215,0x08fe,0x3ff2,
    -0x3a6a,0xf8b1,0x72c4,0x4014,
    -0x2f16,0x8b5d,0xd91c,0x4020,
    -0x81a2,0x142f,0xdbaa,0x4014,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -static unsigned short PQ[28] = {
    -0x3d69,0x1344,0xb89b,0x3f42,
    -0xaa83,0x5948,0x9fdd,0x3fb1,
    -0xeed6,0xb850,0xaea9,0x3ff1,
    -0x51a1,0xf7d2,0x4ba2,0x4014,
    -0xfd65,0xdda2,0xccb9,0x4020,
    -0xb4d9,0x4762,0xd6dd,0x4014,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short PP[28] = {
    -0x3f48,0xf92c,0x4c6c,0x651b,
    -0x3fb2,0xb948,0xa3fe,0xc4b6,
    -0x3ff2,0x08fe,0xc215,0x96d6,
    -0x4014,0x72c4,0xf8b1,0x3a6a,
    -0x4020,0xd91c,0x8b5d,0x2f16,
    -0x4014,0xdbaa,0x142f,0x81a2,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short PQ[28] = {
    -0x3f42,0xb89b,0x1344,0x3d69,
    -0x3fb1,0x9fdd,0x5948,0xaa83,
    -0x3ff1,0xaea9,0xb850,0xeed6,
    -0x4014,0x4ba2,0xf7d2,0x51a1,
    -0x4020,0xccb9,0xdda2,0xfd65,
    -0x4014,0xd6dd,0x4762,0xb4d9,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double QP[8] = {
    - 5.10862594750176621635E-2,
    - 4.98213872951233449420E0,
    - 7.58238284132545283818E1,
    - 3.66779609360150777800E2,
    - 7.10856304998926107277E2,
    - 5.97489612400613639965E2,
    - 2.11688757100572135698E2,
    - 2.52070205858023719784E1,
    -};
    -static double QQ[7] = {
    -/* 1.00000000000000000000E0,*/
    - 7.42373277035675149943E1,
    - 1.05644886038262816351E3,
    - 4.98641058337653607651E3,
    - 9.56231892404756170795E3,
    - 7.99704160447350683650E3,
    - 2.82619278517639096600E3,
    - 3.36093607810698293419E2,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short QP[32] = {
    -0037121,0037723,0055605,0151004,
    -0040637,0066656,0031554,0077264,
    -0041627,0122714,0153170,0161466,
    -0042267,0061712,0036520,0140145,
    -0042461,0133315,0131573,0071176,
    -0042425,0057525,0147500,0013201,
    -0042123,0130122,0061245,0154131,
    -0041311,0123772,0064254,0172650,
    -};
    -static unsigned short QQ[28] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041624,0074603,0002112,0101670,
    -0042604,0007135,0010162,0175565,
    -0043233,0151510,0157757,0172010,
    -0043425,0064506,0112006,0104276,
    -0043371,0164125,0032271,0164242,
    -0043060,0121425,0122750,0136013,
    -0042250,0005773,0053472,0146267,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short QP[32] = {
    -0xba40,0x6b70,0x27fa,0x3faa,
    -0x8fd6,0xc66d,0xedb5,0x4013,
    -0x1c67,0x9acf,0xf4b9,0x4052,
    -0x180d,0x47aa,0xec79,0x4076,
    -0x6e50,0xb66f,0x36d9,0x4086,
    -0x02d0,0xb9e8,0xabea,0x4082,
    -0xbb0b,0x4c54,0x760a,0x406a,
    -0x9eb5,0x4d15,0x34ff,0x4039,
    -};
    -static unsigned short QQ[28] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x5077,0x6089,0x8f30,0x4052,
    -0x5f6f,0xa20e,0x81cb,0x4090,
    -0xfe81,0x1bfd,0x7a69,0x40b3,
    -0xd118,0xd280,0xad28,0x40c2,
    -0x3d14,0xa697,0x3d0a,0x40bf,
    -0x1781,0xb4bd,0x1462,0x40a6,
    -0x5997,0x6ae7,0x017f,0x4075,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short QP[32] = {
    -0x3faa,0x27fa,0x6b70,0xba40,
    -0x4013,0xedb5,0xc66d,0x8fd6,
    -0x4052,0xf4b9,0x9acf,0x1c67,
    -0x4076,0xec79,0x47aa,0x180d,
    -0x4086,0x36d9,0xb66f,0x6e50,
    -0x4082,0xabea,0xb9e8,0x02d0,
    -0x406a,0x760a,0x4c54,0xbb0b,
    -0x4039,0x34ff,0x4d15,0x9eb5,
    -};
    -static unsigned short QQ[28] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4052,0x8f30,0x6089,0x5077,
    -0x4090,0x81cb,0xa20e,0x5f6f,
    -0x40b3,0x7a69,0x1bfd,0xfe81,
    -0x40c2,0xad28,0xd280,0xd118,
    -0x40bf,0x3d0a,0xa697,0x3d14,
    -0x40a6,0x1462,0xb4bd,0x1781,
    -0x4075,0x017f,0x6ae7,0x5997,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double YP[6] = {
    - 1.26320474790178026440E9,
    --6.47355876379160291031E11,
    - 1.14509511541823727583E14,
    --8.12770255501325109621E15,
    - 2.02439475713594898196E17,
    --7.78877196265950026825E17,
    -};
    -static double YQ[8] = {
    -/* 1.00000000000000000000E0,*/
    - 5.94301592346128195359E2,
    - 2.35564092943068577943E5,
    - 7.34811944459721705660E7,
    - 1.87601316108706159478E10,
    - 3.88231277496238566008E12,
    - 6.20557727146953693363E14,
    - 6.87141087355300489866E16,
    - 3.97270608116560655612E18,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short YP[24] = {
    -0047626,0112763,0013715,0133045,
    -0152026,0134552,0142033,0024411,
    -0053720,0045245,0102210,0077565,
    -0155347,0000321,0136415,0102031,
    -0056463,0146550,0055633,0032605,
    -0157054,0171012,0167361,0054265,
    -};
    -static unsigned short YQ[32] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0042424,0111515,0044773,0153014,
    -0044546,0005405,0171307,0075774,
    -0046614,0023575,0047105,0063556,
    -0050613,0143034,0101533,0156026,
    -0052541,0175367,0166514,0114257,
    -0054415,0014466,0134350,0171154,
    -0056164,0017436,0025075,0022101,
    -0057534,0103614,0103663,0121772,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short YP[24] = {
    -0xb6c5,0x62f9,0xd2be,0x41d2,
    -0x6521,0x5883,0xd72d,0xc262,
    -0x0fef,0xb091,0x0954,0x42da,
    -0xb083,0x37a1,0xe01a,0xc33c,
    -0x66b1,0x0b73,0x79ad,0x4386,
    -0x2b17,0x5dde,0x9e41,0xc3a5,
    -};
    -static unsigned short YQ[32] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x7ac2,0xa93f,0x9269,0x4082,
    -0xef7f,0xbe58,0xc160,0x410c,
    -0xacee,0xa9c8,0x84ef,0x4191,
    -0x7b83,0x906b,0x78c3,0x4211,
    -0x9316,0xfda9,0x3f5e,0x428c,
    -0x1e4e,0xd71d,0xa326,0x4301,
    -0xa488,0xc547,0x83e3,0x436e,
    -0x747f,0x90f6,0x90f1,0x43cb,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short YP[24] = {
    -0x41d2,0xd2be,0x62f9,0xb6c5,
    -0xc262,0xd72d,0x5883,0x6521,
    -0x42da,0x0954,0xb091,0x0fef,
    -0xc33c,0xe01a,0x37a1,0xb083,
    -0x4386,0x79ad,0x0b73,0x66b1,
    -0xc3a5,0x9e41,0x5dde,0x2b17,
    -};
    -static unsigned short YQ[32] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4082,0x9269,0xa93f,0x7ac2,
    -0x410c,0xc160,0xbe58,0xef7f,
    -0x4191,0x84ef,0xa9c8,0xacee,
    -0x4211,0x78c3,0x906b,0x7b83,
    -0x428c,0x3f5e,0xfda9,0x9316,
    -0x4301,0xa326,0xd71d,0x1e4e,
    -0x436e,0x83e3,0xc547,0xa488,
    -0x43cb,0x90f1,0x90f6,0x747f,
    -};
    -#endif
    -
    -
    -#ifdef UNK
    -static double Z1 = 1.46819706421238932572E1;
    -static double Z2 = 4.92184563216946036703E1;
    -#endif
    -
    -#ifdef DEC
    -static unsigned short DZ1[] = {0041152,0164532,0006114,0010540};
    -static unsigned short DZ2[] = {0041504,0157663,0001625,0020621};
    -#define Z1 (*(double *)DZ1)
    -#define Z2 (*(double *)DZ2)
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short DZ1[] = {0x822c,0x4189,0x5d2b,0x402d};
    -static unsigned short DZ2[] = {0xa432,0x6072,0x9bf6,0x4048};
    -#define Z1 (*(double *)DZ1)
    -#define Z2 (*(double *)DZ2)
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short DZ1[] = {0x402d,0x5d2b,0x4189,0x822c};
    -static unsigned short DZ2[] = {0x4048,0x9bf6,0x6072,0xa432};
    -#define Z1 (*(double *)DZ1)
    -#define Z2 (*(double *)DZ2)
    -#endif
    -
    -extern double TWOOPI, THPIO4, SQ2OPI;
    -
    -double j1(x)
    -double x;
    -{
    -double w, z, p, q, xn;
    -
    -w = x;
    -if( x < 0 )
    -        return -j1(-x);
    -
    -if( w <= 5.0 )
    -	{
    -	z = x * x;	
    -	w = polevl( z, RP, 3 ) / p1evl( z, RQ, 8 );
    -	w = w * x * (z - Z1) * (z - Z2);
    -	return( w );
    -	}
    -
    -w = 5.0/x;
    -z = w * w;
    -p = polevl( z, PP, 6)/polevl( z, PQ, 6 );
    -q = polevl( z, QP, 7)/p1evl( z, QQ, 7 );
    -xn = x - THPIO4;
    -p = p * cos(xn) - w * q * sin(xn);
    -return( p * SQ2OPI / sqrt(x) );
    -}
    -
    -
    -double y1(x)
    -double x;
    -{
    -double w, z, p, q, xn;
    -
    -if( x <= 5.0 )
    -	{
    -	if (x == 0.0) {
    -		mtherr("y1", SING);
    -		return -NPY_INFINITY;
    -	} else if (x <= 0.0) {
    -		mtherr("y1", DOMAIN);
    -		return NPY_NAN;
    -	}
    -	z = x * x;
    -	w = x * (polevl( z, YP, 5 ) / p1evl( z, YQ, 8 ));
    -	w += TWOOPI * ( j1(x) * log(x)  -  1.0/x );
    -	return( w );
    -	}
    -
    -w = 5.0/x;
    -z = w * w;
    -p = polevl( z, PP, 6)/polevl( z, PQ, 6 );
    -q = polevl( z, QP, 7)/p1evl( z, QQ, 7 );
    -xn = x - THPIO4;
    -p = p * sin(xn) + w * q * cos(xn);
    -return( p * SQ2OPI / sqrt(x) );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/jv.c b/scipy-0.10.1/scipy/special/cephes/jv.c
    deleted file mode 100644
    index 2729b7ef87..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/jv.c
    +++ /dev/null
    @@ -1,822 +0,0 @@
    -/*							jv.c
    - *
    - *	Bessel function of noninteger order
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double v, x, y, jv();
    - *
    - * y = jv( v, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns Bessel function of order v of the argument,
    - * where v is real.  Negative x is allowed if v is an integer.
    - *
    - * Several expansions are included: the ascending power
    - * series, the Hankel expansion, and two transitional
    - * expansions for large v.  If v is not too large, it
    - * is reduced by recurrence to a region of best accuracy.
    - * The transitional expansions give 12D accuracy for v > 500.
    - *
    - *
    - *
    - * ACCURACY:
    - * Results for integer v are indicated by *, where x and v
    - * both vary from -125 to +125.  Otherwise,
    - * x ranges from 0 to 125, v ranges as indicated by "domain."
    - * Error criterion is absolute, except relative when |jv()| > 1.
    - *
    - * arithmetic  v domain  x domain    # trials      peak       rms
    - *    IEEE      0,125     0,125      100000      4.6e-15    2.2e-16
    - *    IEEE   -125,0       0,125       40000      5.4e-11    3.7e-13
    - *    IEEE      0,500     0,500       20000      4.4e-15    4.0e-16
    - * Integer v:
    - *    IEEE   -125,125   -125,125      50000      3.5e-15*   1.9e-16*
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
    -*/
    -
    -
    -#include "mconf.h"
    -#define CEPHES_DEBUG 0
    -
    -#if CEPHES_DEBUG
    -#include 
    -#endif
    -
    -#ifdef DEC
    -#define MAXGAM 34.84425627277176174
    -#else
    -#define MAXGAM 171.624376956302725
    -#endif
    -
    -extern double MAXNUM, MACHEP, MINLOG, MAXLOG;
    -#define BIG  1.44115188075855872E+17
    -
    -static double jvs(double n, double x);
    -static double hankel(double n, double x);
    -static double recur(double *n, double x, double *newn, int cancel);
    -static double jnx(double n, double x);
    -static double jnt(double n, double x);
    -
    -double jv(double n, double x)
    -{
    -    double k, q, t, y, an;
    -    int i, sign, nint;
    -
    -    nint = 0;			/* Flag for integer n */
    -    sign = 1;			/* Flag for sign inversion */
    -    an = fabs(n);
    -    y = floor(an);
    -    if (y == an) {
    -	nint = 1;
    -	i = an - 16384.0 * floor(an / 16384.0);
    -	if (n < 0.0) {
    -	    if (i & 1)
    -		sign = -sign;
    -	    n = an;
    -	}
    -	if (x < 0.0) {
    -	    if (i & 1)
    -		sign = -sign;
    -	    x = -x;
    -	}
    -	if (n == 0.0)
    -	    return (j0(x));
    -	if (n == 1.0)
    -	    return (sign * j1(x));
    -    }
    -
    -    if ((x < 0.0) && (y != an)) {
    -	mtherr("Jv", DOMAIN);
    -	y = NPY_NAN;
    -	goto done;
    -    }
    -
    -    y = fabs(x);
    -
    -    if (y * y < fabs(n + 1) * MACHEP) {
    -	return pow(0.5 * x, n) / gamma(n + 1);
    -    }
    -
    -    k = 3.6 * sqrt(y);
    -    t = 3.6 * sqrt(an);
    -    if ((y < t) && (an > 21.0))
    -	return (sign * jvs(n, x));
    -    if ((an < k) && (y > 21.0))
    -	return (sign * hankel(n, x));
    -
    -    if (an < 500.0) {
    -    /* Note: if x is too large, the continued fraction will fail; but then the
    -       Hankel expansion can be used. */
    -	if (nint != 0) {
    -	    k = 0.0;
    -	    q = recur(&n, x, &k, 1);
    -	    if (k == 0.0) {
    -		y = j0(x) / q;
    -		goto done;
    -	    }
    -	    if (k == 1.0) {
    -		y = j1(x) / q;
    -		goto done;
    -	    }
    -	}
    -
    -	if (an > 2.0 * y)
    -	    goto rlarger;
    -
    -	if ((n >= 0.0) && (n < 20.0)
    -	    && (y > 6.0) && (y < 20.0)) {
    -            /* Recur backwards from a larger value of n */
    -rlarger:
    -	    k = n;
    -
    -	    y = y + an + 1.0;
    -	    if (y < 30.0)
    -		y = 30.0;
    -	    y = n + floor(y - n);
    -	    q = recur(&y, x, &k, 0);
    -	    y = jvs(y, x) * q;
    -	    goto done;
    -	}
    -
    -	if (k <= 30.0) {
    -	    k = 2.0;
    -	} else if (k < 90.0) {
    -	    k = (3 * k) / 4;
    -	}
    -	if (an > (k + 3.0)) {
    -	    if (n < 0.0)
    -		k = -k;
    -	    q = n - floor(n);
    -	    k = floor(k) + q;
    -	    if (n > 0.0)
    -		q = recur(&n, x, &k, 1);
    -	    else {
    -		t = k;
    -		k = n;
    -		q = recur(&t, x, &k, 1);
    -		k = t;
    -	    }
    -	    if (q == 0.0) {
    -	      underf:
    -		y = 0.0;
    -		goto done;
    -	    }
    -	} else {
    -	    k = n;
    -	    q = 1.0;
    -	}
    -
    -/* boundary between convergence of
    - * power series and Hankel expansion
    - */
    -	y = fabs(k);
    -	if (y < 26.0)
    -	    t = (0.0083 * y + 0.09) * y + 12.9;
    -	else
    -	    t = 0.9 * y;
    -
    -	if (x > t)
    -	    y = hankel(k, x);
    -	else
    -	    y = jvs(k, x);
    -#if CEPHES_DEBUG
    -	printf("y = %.16e, recur q = %.16e\n", y, q);
    -#endif
    -	if (n > 0.0)
    -	    y /= q;
    -	else
    -	    y *= q;
    -    }
    -
    -    else {
    -        /* For large n, use the uniform expansion or the transitional expansion.
    -           But if x is of the order of n**2, these may blow up, whereas the
    -           Hankel expansion will then work.
    -        */
    -	if (n < 0.0) {
    -	    mtherr("Jv", TLOSS);
    -	    y = NPY_NAN;
    -	    goto done;
    -	}
    -	t = x / n;
    -	t /= n;
    -	if (t > 0.3)
    -	    y = hankel(n, x);
    -	else
    -	    y = jnx(n, x);
    -    }
    -
    -  done:return (sign * y);
    -}
    -
    -/* Reduce the order by backward recurrence.
    - * AMS55 #9.1.27 and 9.1.73.
    - */
    -
    -static double recur(double *n, double x, double *newn, int cancel)
    -{
    -    double pkm2, pkm1, pk, qkm2, qkm1;
    -/* double pkp1; */
    -    double k, ans, qk, xk, yk, r, t, kf;
    -    static double big = BIG;
    -    int nflag, ctr;
    -    int miniter, maxiter;
    -
    -/* Continued fraction for Jn(x)/Jn-1(x)
    - * AMS 9.1.73
    - *
    - *    x       -x^2      -x^2
    - * ------  ---------  ---------   ...
    - * 2 n +   2(n+1) +   2(n+2) +
    - *
    - * Compute it with the simplest possible algorithm.
    - *
    - * This continued fraction starts to converge when (|n| + m) > |x|.
    - * Hence, at least |x|-|n| iterations are necessary before convergence is
    - * achieved. There is a hard limit set below, m <= 30000, which is chosen
    - * so that no branch in `jv` requires more iterations to converge.
    - * The exact maximum number is (500/3.6)^2 - 500 ~ 19000
    - */
    -
    -    maxiter = 22000;
    -    miniter = fabs(x) - fabs(*n);
    -    if (miniter < 1)
    -        miniter = 1;
    -
    -    if (*n < 0.0)
    -	nflag = 1;
    -    else
    -	nflag = 0;
    -
    -  fstart:
    -
    -#if CEPHES_DEBUG
    -    printf("recur: n = %.6e, newn = %.6e, cfrac = ", *n, *newn);
    -#endif
    -
    -    pkm2 = 0.0;
    -    qkm2 = 1.0;
    -    pkm1 = x;
    -    qkm1 = *n + *n;
    -    xk = -x * x;
    -    yk = qkm1;
    -    ans = 0.0; /* ans=0.0 ensures that t=1.0 in the first iteration */
    -    ctr = 0;
    -    do {
    -	yk += 2.0;
    -	pk = pkm1 * yk + pkm2 * xk;
    -	qk = qkm1 * yk + qkm2 * xk;
    -	pkm2 = pkm1;
    -	pkm1 = pk;
    -	qkm2 = qkm1;
    -	qkm1 = qk;
    -
    -	/* check convergence */
    -	if (qk != 0 && ctr > miniter)
    -	    r = pk / qk;
    -	else
    -	    r = 0.0;
    -
    -	if (r != 0) {
    -	    t = fabs((ans - r) / r);
    -	    ans = r;
    -	} else {
    -	    t = 1.0;
    -	}
    -
    -	if (++ctr > maxiter) {
    -	    mtherr("jv", UNDERFLOW);
    -	    goto done;
    -	}
    -	if (t < MACHEP)
    -	    goto done;
    -
    -	/* renormalize coefficients */
    -	if (fabs(pk) > big) {
    -	    pkm2 /= big;
    -	    pkm1 /= big;
    -	    qkm2 /= big;
    -	    qkm1 /= big;
    -	}
    -    }
    -    while (t > MACHEP);
    -
    -  done:
    -    if (ans == 0)
    -        ans = 1.0;
    -
    -#if CEPHES_DEBUG
    -    printf("%.6e\n", ans);
    -#endif
    -
    -    /* Change n to n-1 if n < 0 and the continued fraction is small */
    -    if (nflag > 0) {
    -	if (fabs(ans) < 0.125) {
    -	    nflag = -1;
    -	    *n = *n - 1.0;
    -	    goto fstart;
    -	}
    -    }
    -
    -
    -    kf = *newn;
    -
    -/* backward recurrence
    - *              2k
    - *  J   (x)  =  --- J (x)  -  J   (x)
    - *   k-1         x   k         k+1
    - */
    -
    -    pk = 1.0;
    -    pkm1 = 1.0 / ans;
    -    k = *n - 1.0;
    -    r = 2 * k;
    -    do {
    -	pkm2 = (pkm1 * r - pk * x) / x;
    -	/*      pkp1 = pk; */
    -	pk = pkm1;
    -	pkm1 = pkm2;
    -	r -= 2.0;
    -/*
    -	t = fabs(pkp1) + fabs(pk);
    -	if( (k > (kf + 2.5)) && (fabs(pkm1) < 0.25*t) )
    -		{
    -		k -= 1.0;
    -		t = x*x;
    -		pkm2 = ( (r*(r+2.0)-t)*pk - r*x*pkp1 )/t;
    -		pkp1 = pk;
    -		pk = pkm1;
    -		pkm1 = pkm2;
    -		r -= 2.0;
    -		}
    -*/
    -	k -= 1.0;
    -    }
    -    while (k > (kf + 0.5));
    -
    -/* Take the larger of the last two iterates
    - * on the theory that it may have less cancellation error.
    - */
    -
    -    if (cancel) {
    -	if ((kf >= 0.0) && (fabs(pk) > fabs(pkm1))) {
    -	    k += 1.0;
    -	    pkm2 = pk;
    -	}
    -    }
    -    *newn = k;
    -#if CEPHES_DEBUG
    -    printf("newn %.6e rans %.6e\n", k, pkm2);
    -#endif
    -    return (pkm2);
    -}
    -
    -
    -
    -/* Ascending power series for Jv(x).
    - * AMS55 #9.1.10.
    - */
    -
    -extern double PI;
    -extern int sgngam;
    -
    -static double jvs(double n, double x)
    -{
    -    double t, u, y, z, k;
    -    int ex;
    -
    -    z = -x * x / 4.0;
    -    u = 1.0;
    -    y = u;
    -    k = 1.0;
    -    t = 1.0;
    -
    -    while (t > MACHEP) {
    -	u *= z / (k * (n + k));
    -	y += u;
    -	k += 1.0;
    -	if (y != 0)
    -	    t = fabs(u / y);
    -    }
    -#if CEPHES_DEBUG
    -    printf("power series=%.5e ", y);
    -#endif
    -    t = frexp(0.5 * x, &ex);
    -    ex = ex * n;
    -    if ((ex > -1023)
    -	&& (ex < 1023)
    -	&& (n > 0.0)
    -	&& (n < (MAXGAM - 1.0))) {
    -	t = pow(0.5 * x, n) / gamma(n + 1.0);
    -#if CEPHES_DEBUG
    -	printf("pow(.5*x, %.4e)/gamma(n+1)=%.5e\n", n, t);
    -#endif
    -	y *= t;
    -    } else {
    -#if CEPHES_DEBUG
    -	z = n * log(0.5 * x);
    -	k = lgam(n + 1.0);
    -	t = z - k;
    -	printf("log pow=%.5e, lgam(%.4e)=%.5e\n", z, n + 1.0, k);
    -#else
    -	t = n * log(0.5 * x) - lgam(n + 1.0);
    -#endif
    -	if (y < 0) {
    -	    sgngam = -sgngam;
    -	    y = -y;
    -	}
    -	t += log(y);
    -#if CEPHES_DEBUG
    -	printf("log y=%.5e\n", log(y));
    -#endif
    -	if (t < -MAXLOG) {
    -	    return (0.0);
    -	}
    -	if (t > MAXLOG) {
    -	    mtherr("Jv", OVERFLOW);
    -	    return (MAXNUM);
    -	}
    -	y = sgngam * exp(t);
    -    }
    -    return (y);
    -}
    -
    -/* Hankel's asymptotic expansion
    - * for large x.
    - * AMS55 #9.2.5.
    - */
    -
    -static double hankel(double n, double x)
    -{
    -    double t, u, z, k, sign, conv;
    -    double p, q, j, m, pp, qq;
    -    int flag;
    -
    -    m = 4.0 * n * n;
    -    j = 1.0;
    -    z = 8.0 * x;
    -    k = 1.0;
    -    p = 1.0;
    -    u = (m - 1.0) / z;
    -    q = u;
    -    sign = 1.0;
    -    conv = 1.0;
    -    flag = 0;
    -    t = 1.0;
    -    pp = 1.0e38;
    -    qq = 1.0e38;
    -
    -    while (t > MACHEP) {
    -	k += 2.0;
    -	j += 1.0;
    -	sign = -sign;
    -	u *= (m - k * k) / (j * z);
    -	p += sign * u;
    -	k += 2.0;
    -	j += 1.0;
    -	u *= (m - k * k) / (j * z);
    -	q += sign * u;
    -	t = fabs(u / p);
    -	if (t < conv) {
    -	    conv = t;
    -	    qq = q;
    -	    pp = p;
    -	    flag = 1;
    -	}
    -/* stop if the terms start getting larger */
    -	if ((flag != 0) && (t > conv)) {
    -#if CEPHES_DEBUG
    -	    printf("Hankel: convergence to %.4E\n", conv);
    -#endif
    -	    goto hank1;
    -	}
    -    }
    -
    -  hank1:
    -    u = x - (0.5 * n + 0.25) * PI;
    -    t = sqrt(2.0 / (PI * x)) * (pp * cos(u) - qq * sin(u));
    -#if CEPHES_DEBUG
    -    printf("hank: %.6e\n", t);
    -#endif
    -    return (t);
    -}
    -
    -
    -/* Asymptotic expansion for large n.
    - * AMS55 #9.3.35.
    - */
    -
    -static double lambda[] = {
    -    1.0,
    -    1.041666666666666666666667E-1,
    -    8.355034722222222222222222E-2,
    -    1.282265745563271604938272E-1,
    -    2.918490264641404642489712E-1,
    -    8.816272674437576524187671E-1,
    -    3.321408281862767544702647E+0,
    -    1.499576298686255465867237E+1,
    -    7.892301301158651813848139E+1,
    -    4.744515388682643231611949E+2,
    -    3.207490090890661934704328E+3
    -};
    -static double mu[] = {
    -    1.0,
    -    -1.458333333333333333333333E-1,
    -    -9.874131944444444444444444E-2,
    -    -1.433120539158950617283951E-1,
    -    -3.172272026784135480967078E-1,
    -    -9.424291479571202491373028E-1,
    -    -3.511203040826354261542798E+0,
    -    -1.572726362036804512982712E+1,
    -    -8.228143909718594444224656E+1,
    -    -4.923553705236705240352022E+2,
    -    -3.316218568547972508762102E+3
    -};
    -static double P1[] = {
    -    -2.083333333333333333333333E-1,
    -    1.250000000000000000000000E-1
    -};
    -static double P2[] = {
    -    3.342013888888888888888889E-1,
    -    -4.010416666666666666666667E-1,
    -    7.031250000000000000000000E-2
    -};
    -static double P3[] = {
    -    -1.025812596450617283950617E+0,
    -    1.846462673611111111111111E+0,
    -    -8.912109375000000000000000E-1,
    -    7.324218750000000000000000E-2
    -};
    -static double P4[] = {
    -    4.669584423426247427983539E+0,
    -    -1.120700261622299382716049E+1,
    -    8.789123535156250000000000E+0,
    -    -2.364086914062500000000000E+0,
    -    1.121520996093750000000000E-1
    -};
    -static double P5[] = {
    -    -2.8212072558200244877E1,
    -    8.4636217674600734632E1,
    -    -9.1818241543240017361E1,
    -    4.2534998745388454861E1,
    -    -7.3687943594796316964E0,
    -    2.27108001708984375E-1
    -};
    -static double P6[] = {
    -    2.1257013003921712286E2,
    -    -7.6525246814118164230E2,
    -    1.0599904525279998779E3,
    -    -6.9957962737613254123E2,
    -    2.1819051174421159048E2,
    -    -2.6491430486951555525E1,
    -    5.7250142097473144531E-1
    -};
    -static double P7[] = {
    -    -1.9194576623184069963E3,
    -    8.0617221817373093845E3,
    -    -1.3586550006434137439E4,
    -    1.1655393336864533248E4,
    -    -5.3056469786134031084E3,
    -    1.2009029132163524628E3,
    -    -1.0809091978839465550E2,
    -    1.7277275025844573975E0
    -};
    -
    -
    -static double jnx(double n, double x)
    -{
    -    double zeta, sqz, zz, zp, np;
    -    double cbn, n23, t, z, sz;
    -    double pp, qq, z32i, zzi;
    -    double ak, bk, akl, bkl;
    -    int sign, doa, dob, nflg, k, s, tk, tkp1, m;
    -    static double u[8];
    -    static double ai, aip, bi, bip;
    -
    -    /* Test for x very close to n. Use expansion for transition region if so. */
    -    cbn = cbrt(n);
    -    z = (x - n) / cbn;
    -    if (fabs(z) <= 0.7)
    -	return (jnt(n, x));
    -
    -    z = x / n;
    -    zz = 1.0 - z * z;
    -    if (zz == 0.0)
    -	return (0.0);
    -
    -    if (zz > 0.0) {
    -	sz = sqrt(zz);
    -	t = 1.5 * (log((1.0 + sz) / z) - sz);	/* zeta ** 3/2          */
    -	zeta = cbrt(t * t);
    -	nflg = 1;
    -    } else {
    -	sz = sqrt(-zz);
    -	t = 1.5 * (sz - acos(1.0 / z));
    -	zeta = -cbrt(t * t);
    -	nflg = -1;
    -    }
    -    z32i = fabs(1.0 / t);
    -    sqz = cbrt(t);
    -
    -    /* Airy function */
    -    n23 = cbrt(n * n);
    -    t = n23 * zeta;
    -
    -#if CEPHES_DEBUG
    -    printf("zeta %.5E, Airy(%.5E)\n", zeta, t);
    -#endif
    -    airy(t, &ai, &aip, &bi, &bip);
    -
    -    /* polynomials in expansion */
    -    u[0] = 1.0;
    -    zzi = 1.0 / zz;
    -    u[1] = polevl(zzi, P1, 1) / sz;
    -    u[2] = polevl(zzi, P2, 2) / zz;
    -    u[3] = polevl(zzi, P3, 3) / (sz * zz);
    -    pp = zz * zz;
    -    u[4] = polevl(zzi, P4, 4) / pp;
    -    u[5] = polevl(zzi, P5, 5) / (pp * sz);
    -    pp *= zz;
    -    u[6] = polevl(zzi, P6, 6) / pp;
    -    u[7] = polevl(zzi, P7, 7) / (pp * sz);
    -
    -#if CEPHES_DEBUG
    -    for (k = 0; k <= 7; k++)
    -	printf("u[%d] = %.5E\n", k, u[k]);
    -#endif
    -
    -    pp = 0.0;
    -    qq = 0.0;
    -    np = 1.0;
    -    /* flags to stop when terms get larger */
    -    doa = 1;
    -    dob = 1;
    -    akl = MAXNUM;
    -    bkl = MAXNUM;
    -
    -    for (k = 0; k <= 3; k++) {
    -	tk = 2 * k;
    -	tkp1 = tk + 1;
    -	zp = 1.0;
    -	ak = 0.0;
    -	bk = 0.0;
    -	for (s = 0; s <= tk; s++) {
    -	    if (doa) {
    -		if ((s & 3) > 1)
    -		    sign = nflg;
    -		else
    -		    sign = 1;
    -		ak += sign * mu[s] * zp * u[tk - s];
    -	    }
    -
    -	    if (dob) {
    -		m = tkp1 - s;
    -		if (((m + 1) & 3) > 1)
    -		    sign = nflg;
    -		else
    -		    sign = 1;
    -		bk += sign * lambda[s] * zp * u[m];
    -	    }
    -	    zp *= z32i;
    -	}
    -
    -	if (doa) {
    -	    ak *= np;
    -	    t = fabs(ak);
    -	    if (t < akl) {
    -		akl = t;
    -		pp += ak;
    -	    } else
    -		doa = 0;
    -	}
    -
    -	if (dob) {
    -	    bk += lambda[tkp1] * zp * u[0];
    -	    bk *= -np / sqz;
    -	    t = fabs(bk);
    -	    if (t < bkl) {
    -		bkl = t;
    -		qq += bk;
    -	    } else
    -		dob = 0;
    -	}
    -#if CEPHES_DEBUG
    -	printf("a[%d] %.5E, b[%d] %.5E\n", k, ak, k, bk);
    -#endif
    -	if (np < MACHEP)
    -	    break;
    -	np /= n * n;
    -    }
    -
    -    /* normalizing factor ( 4*zeta/(1 - z**2) )**1/4	*/
    -    t = 4.0 * zeta / zz;
    -    t = sqrt(sqrt(t));
    -
    -    t *= ai * pp / cbrt(n) + aip * qq / (n23 * n);
    -    return (t);
    -}
    -
    -/* Asymptotic expansion for transition region,
    - * n large and x close to n.
    - * AMS55 #9.3.23.
    - */
    -
    -static double PF2[] = {
    -    -9.0000000000000000000e-2,
    -    8.5714285714285714286e-2
    -};
    -static double PF3[] = {
    -    1.3671428571428571429e-1,
    -    -5.4920634920634920635e-2,
    -    -4.4444444444444444444e-3
    -};
    -static double PF4[] = {
    -    1.3500000000000000000e-3,
    -    -1.6036054421768707483e-1,
    -    4.2590187590187590188e-2,
    -    2.7330447330447330447e-3
    -};
    -static double PG1[] = {
    -    -2.4285714285714285714e-1,
    -    1.4285714285714285714e-2
    -};
    -static double PG2[] = {
    -    -9.0000000000000000000e-3,
    -    1.9396825396825396825e-1,
    -    -1.1746031746031746032e-2
    -};
    -static double PG3[] = {
    -    1.9607142857142857143e-2,
    -    -1.5983694083694083694e-1,
    -    6.3838383838383838384e-3
    -};
    -
    -
    -static double jnt(double n, double x)
    -{
    -    double z, zz, z3;
    -    double cbn, n23, cbtwo;
    -    double ai, aip, bi, bip;	/* Airy functions */
    -    double nk, fk, gk, pp, qq;
    -    double F[5], G[4];
    -    int k;
    -
    -    cbn = cbrt(n);
    -    z = (x - n) / cbn;
    -    cbtwo = cbrt(2.0);
    -
    -    /* Airy function */
    -    zz = -cbtwo * z;
    -    airy(zz, &ai, &aip, &bi, &bip);
    -
    -    /* polynomials in expansion */
    -    zz = z * z;
    -    z3 = zz * z;
    -    F[0] = 1.0;
    -    F[1] = -z / 5.0;
    -    F[2] = polevl(z3, PF2, 1) * zz;
    -    F[3] = polevl(z3, PF3, 2);
    -    F[4] = polevl(z3, PF4, 3) * z;
    -    G[0] = 0.3 * zz;
    -    G[1] = polevl(z3, PG1, 1);
    -    G[2] = polevl(z3, PG2, 2) * z;
    -    G[3] = polevl(z3, PG3, 2) * zz;
    -#if CEPHES_DEBUG
    -    for (k = 0; k <= 4; k++)
    -	printf("F[%d] = %.5E\n", k, F[k]);
    -    for (k = 0; k <= 3; k++)
    -	printf("G[%d] = %.5E\n", k, G[k]);
    -#endif
    -    pp = 0.0;
    -    qq = 0.0;
    -    nk = 1.0;
    -    n23 = cbrt(n * n);
    -
    -    for (k = 0; k <= 4; k++) {
    -	fk = F[k] * nk;
    -	pp += fk;
    -	if (k != 4) {
    -	    gk = G[k] * nk;
    -	    qq += gk;
    -	}
    -#if CEPHES_DEBUG
    -	printf("fk[%d] %.5E, gk[%d] %.5E\n", k, fk, k, gk);
    -#endif
    -	nk /= n23;
    -    }
    -
    -    fk = cbtwo * ai * pp / cbn + cbrt(4.0) * aip * qq / n;
    -    return (fk);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/k0.c b/scipy-0.10.1/scipy/special/cephes/k0.c
    deleted file mode 100644
    index 172825306e..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/k0.c
    +++ /dev/null
    @@ -1,327 +0,0 @@
    -/*							k0.c
    - *
    - *	Modified Bessel function, third kind, order zero
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, k0();
    - *
    - * y = k0( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns modified Bessel function of the third kind
    - * of order zero of the argument.
    - *
    - * The range is partitioned into the two intervals [0,8] and
    - * (8, infinity).  Chebyshev polynomial expansions are employed
    - * in each interval.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * Tested at 2000 random points between 0 and 8.  Peak absolute
    - * error (relative when K0 > 1) was 1.46e-14; rms, 4.26e-15.
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0, 30        3100       1.3e-16     2.1e-17
    - *    IEEE      0, 30       30000       1.2e-15     1.6e-16
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - *  K0 domain          x <= 0          MAXNUM
    - *
    - */
    -/*							k0e()
    - *
    - *	Modified Bessel function, third kind, order zero,
    - *	exponentially scaled
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, k0e();
    - *
    - * y = k0e( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns exponentially scaled modified Bessel function
    - * of the third kind of order zero of the argument.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      0, 30       30000       1.4e-15     1.4e-16
    - * See k0().
    - *
    - */
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 2000 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -/* Chebyshev coefficients for K0(x) + log(x/2) I0(x)
    - * in the interval [0,2].  The odd order coefficients are all
    - * zero; only the even order coefficients are listed.
    - * 
    - * lim(x->0){ K0(x) + log(x/2) I0(x) } = -EUL.
    - */
    -
    -#ifdef UNK
    -static double A[] =
    -{
    - 1.37446543561352307156E-16,
    - 4.25981614279661018399E-14,
    - 1.03496952576338420167E-11,
    - 1.90451637722020886025E-9,
    - 2.53479107902614945675E-7,
    - 2.28621210311945178607E-5,
    - 1.26461541144692592338E-3,
    - 3.59799365153615016266E-2,
    - 3.44289899924628486886E-1,
    --5.35327393233902768720E-1
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short A[] = {
    -0023036,0073417,0032477,0165673,
    -0025077,0154126,0016046,0012517,
    -0027066,0011342,0035211,0005041,
    -0031002,0160233,0037454,0050224,
    -0032610,0012747,0037712,0173741,
    -0034277,0144007,0172147,0162375,
    -0035645,0140563,0125431,0165626,
    -0037023,0057662,0125124,0102051,
    -0037660,0043304,0004411,0166707,
    -0140011,0005467,0047227,0130370
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short A[] = {
    -0xfd77,0xe6a7,0xcee1,0x3ca3,
    -0xc2aa,0xc384,0xfb0a,0x3d27,
    -0x2144,0x4751,0xc25c,0x3da6,
    -0x8a13,0x67e5,0x5c13,0x3e20,
    -0x5efc,0xe7f9,0x02bc,0x3e91,
    -0xfca0,0xfe8c,0xf900,0x3ef7,
    -0x3d73,0x7563,0xb82e,0x3f54,
    -0x9085,0x554a,0x6bf6,0x3fa2,
    -0x3db9,0x8121,0x08d8,0x3fd6,
    -0xf61f,0xe9d2,0x2166,0xbfe1
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short A[] = {
    -0x3ca3,0xcee1,0xe6a7,0xfd77,
    -0x3d27,0xfb0a,0xc384,0xc2aa,
    -0x3da6,0xc25c,0x4751,0x2144,
    -0x3e20,0x5c13,0x67e5,0x8a13,
    -0x3e91,0x02bc,0xe7f9,0x5efc,
    -0x3ef7,0xf900,0xfe8c,0xfca0,
    -0x3f54,0xb82e,0x7563,0x3d73,
    -0x3fa2,0x6bf6,0x554a,0x9085,
    -0x3fd6,0x08d8,0x8121,0x3db9,
    -0xbfe1,0x2166,0xe9d2,0xf61f
    -};
    -#endif
    -
    -
    -
    -/* Chebyshev coefficients for exp(x) sqrt(x) K0(x)
    - * in the inverted interval [2,infinity].
    - * 
    - * lim(x->inf){ exp(x) sqrt(x) K0(x) } = sqrt(pi/2).
    - */
    -
    -#ifdef UNK
    -static double B[] = {
    - 5.30043377268626276149E-18,
    --1.64758043015242134646E-17,
    - 5.21039150503902756861E-17,
    --1.67823109680541210385E-16,
    - 5.51205597852431940784E-16,
    --1.84859337734377901440E-15,
    - 6.34007647740507060557E-15,
    --2.22751332699166985548E-14,
    - 8.03289077536357521100E-14,
    --2.98009692317273043925E-13,
    - 1.14034058820847496303E-12,
    --4.51459788337394416547E-12,
    - 1.85594911495471785253E-11,
    --7.95748924447710747776E-11,
    - 3.57739728140030116597E-10,
    --1.69753450938905987466E-9,
    - 8.57403401741422608519E-9,
    --4.66048989768794782956E-8,
    - 2.76681363944501510342E-7,
    --1.83175552271911948767E-6,
    - 1.39498137188764993662E-5,
    --1.28495495816278026384E-4,
    - 1.56988388573005337491E-3,
    --3.14481013119645005427E-2,
    - 2.44030308206595545468E0
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short B[] = {
    -0021703,0106456,0076144,0173406,
    -0122227,0173144,0116011,0030033,
    -0022560,0044562,0006506,0067642,
    -0123101,0076243,0123273,0131013,
    -0023436,0157713,0056243,0141331,
    -0124005,0032207,0063726,0164664,
    -0024344,0066342,0051756,0162300,
    -0124710,0121365,0154053,0077022,
    -0025264,0161166,0066246,0077420,
    -0125647,0141671,0006443,0103212,
    -0026240,0076431,0077147,0160445,
    -0126636,0153741,0174002,0105031,
    -0027243,0040102,0035375,0163073,
    -0127656,0176256,0113476,0044653,
    -0030304,0125544,0006377,0130104,
    -0130751,0047257,0110537,0127324,
    -0031423,0046400,0014772,0012164,
    -0132110,0025240,0155247,0112570,
    -0032624,0105314,0007437,0021574,
    -0133365,0155243,0174306,0116506,
    -0034152,0004776,0061643,0102504,
    -0135006,0136277,0036104,0175023,
    -0035715,0142217,0162474,0115022,
    -0137000,0147671,0065177,0134356,
    -0040434,0026754,0175163,0044070
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short B[] = {
    -0x9ee1,0xcf8c,0x71a5,0x3c58,
    -0x2603,0x9381,0xfecc,0xbc72,
    -0xcdf4,0x41a8,0x092e,0x3c8e,
    -0x7641,0x74d7,0x2f94,0xbca8,
    -0x785b,0x6b94,0xdbf9,0x3cc3,
    -0xdd36,0xecfa,0xa690,0xbce0,
    -0xdc98,0x4a7d,0x8d9c,0x3cfc,
    -0x6fc2,0xbb05,0x145e,0xbd19,
    -0xcfe2,0xcd94,0x9c4e,0x3d36,
    -0x70d1,0x21a4,0xf877,0xbd54,
    -0xfc25,0x2fcc,0x0fa3,0x3d74,
    -0x5143,0x3f00,0xdafc,0xbd93,
    -0xbcc7,0x475f,0x6808,0x3db4,
    -0xc935,0xd2e7,0xdf95,0xbdd5,
    -0xf608,0x819f,0x956c,0x3df8,
    -0xf5db,0xf22b,0x29d5,0xbe1d,
    -0x428e,0x033f,0x69a0,0x3e42,
    -0xf2af,0x1b54,0x0554,0xbe69,
    -0xe46f,0x81e3,0x9159,0x3e92,
    -0xd3a9,0x7f18,0xbb54,0xbebe,
    -0x70a9,0xcc74,0x413f,0x3eed,
    -0x9f42,0xe788,0xd797,0xbf20,
    -0x9342,0xfca7,0xb891,0x3f59,
    -0xf71e,0x2d4f,0x19f7,0xbfa0,
    -0x6907,0x9f4e,0x85bd,0x4003
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short B[] = {
    -0x3c58,0x71a5,0xcf8c,0x9ee1,
    -0xbc72,0xfecc,0x9381,0x2603,
    -0x3c8e,0x092e,0x41a8,0xcdf4,
    -0xbca8,0x2f94,0x74d7,0x7641,
    -0x3cc3,0xdbf9,0x6b94,0x785b,
    -0xbce0,0xa690,0xecfa,0xdd36,
    -0x3cfc,0x8d9c,0x4a7d,0xdc98,
    -0xbd19,0x145e,0xbb05,0x6fc2,
    -0x3d36,0x9c4e,0xcd94,0xcfe2,
    -0xbd54,0xf877,0x21a4,0x70d1,
    -0x3d74,0x0fa3,0x2fcc,0xfc25,
    -0xbd93,0xdafc,0x3f00,0x5143,
    -0x3db4,0x6808,0x475f,0xbcc7,
    -0xbdd5,0xdf95,0xd2e7,0xc935,
    -0x3df8,0x956c,0x819f,0xf608,
    -0xbe1d,0x29d5,0xf22b,0xf5db,
    -0x3e42,0x69a0,0x033f,0x428e,
    -0xbe69,0x0554,0x1b54,0xf2af,
    -0x3e92,0x9159,0x81e3,0xe46f,
    -0xbebe,0xbb54,0x7f18,0xd3a9,
    -0x3eed,0x413f,0xcc74,0x70a9,
    -0xbf20,0xd797,0xe788,0x9f42,
    -0x3f59,0xb891,0xfca7,0x9342,
    -0xbfa0,0x19f7,0x2d4f,0xf71e,
    -0x4003,0x85bd,0x9f4e,0x6907
    -};
    -#endif
    -
    -/*							k0.c	*/
    -extern double PI;
    -
    -double k0(x)
    -double x;
    -{
    -double y, z;
    -
    -if (x == 0.0) {
    -	mtherr("k0", SING);
    -	return NPY_INFINITY;
    -} else if (x < 0.0) {
    -	mtherr("k0", DOMAIN);
    -	return NPY_NAN;
    -}
    -
    -if( x <= 2.0 )
    -	{
    -	y = x * x - 2.0;
    -	y = chbevl( y, A, 10 ) - log( 0.5 * x ) * i0(x);
    -	return( y );
    -	}
    -z = 8.0/x - 2.0;
    -y = exp(-x) * chbevl( z, B, 25 ) / sqrt(x);
    -return(y);
    -}
    -
    -
    -
    -
    -double k0e( x )
    -double x;
    -{
    -double y;
    -
    -if (x == 0.0) {
    -	mtherr("k0e", SING);
    -	return NPY_INFINITY;
    -} else if (x < 0.0) {
    -	mtherr( "k0e", DOMAIN );
    -	return NPY_NAN;
    -}
    -
    -if( x <= 2.0 )
    -	{
    -	y = x * x - 2.0;
    -	y = chbevl( y, A, 10 ) - log( 0.5 * x ) * i0(x);
    -	return( y * exp(x) );
    -	}
    -
    -y = chbevl( 8.0/x - 2.0, B, 25 ) / sqrt(x);
    -return(y);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/k1.c b/scipy-0.10.1/scipy/special/cephes/k1.c
    deleted file mode 100644
    index 20f5649cfb..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/k1.c
    +++ /dev/null
    @@ -1,330 +0,0 @@
    -/*							k1.c
    - *
    - *	Modified Bessel function, third kind, order one
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, k1();
    - *
    - * y = k1( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Computes the modified Bessel function of the third kind
    - * of order one of the argument.
    - *
    - * The range is partitioned into the two intervals [0,2] and
    - * (2, infinity).  Chebyshev polynomial expansions are employed
    - * in each interval.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0, 30        3300       8.9e-17     2.2e-17
    - *    IEEE      0, 30       30000       1.2e-15     1.6e-16
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * k1 domain          x <= 0          MAXNUM
    - *
    - */
    -/*							k1e.c
    - *
    - *	Modified Bessel function, third kind, order one,
    - *	exponentially scaled
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, k1e();
    - *
    - * y = k1e( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns exponentially scaled modified Bessel function
    - * of the third kind of order one of the argument:
    - *
    - *      k1e(x) = exp(x) * k1(x).
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      0, 30       30000       7.8e-16     1.2e-16
    - * See k1().
    - *
    - */
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 2000 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -/* Chebyshev coefficients for x(K1(x) - log(x/2) I1(x))
    - * in the interval [0,2].
    - * 
    - * lim(x->0){ x(K1(x) - log(x/2) I1(x)) } = 1.
    - */
    -
    -#ifdef UNK
    -static double A[] =
    -{
    --7.02386347938628759343E-18,
    --2.42744985051936593393E-15,
    --6.66690169419932900609E-13,
    --1.41148839263352776110E-10,
    --2.21338763073472585583E-8,
    --2.43340614156596823496E-6,
    --1.73028895751305206302E-4,
    --6.97572385963986435018E-3,
    --1.22611180822657148235E-1,
    --3.53155960776544875667E-1,
    - 1.52530022733894777053E0
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short A[] = {
    -0122001,0110501,0164746,0151255,
    -0124056,0165213,0150034,0147377,
    -0126073,0124026,0167207,0001044,
    -0130033,0030735,0141061,0033116,
    -0131676,0020350,0121341,0107175,
    -0133443,0046631,0062031,0070716,
    -0135065,0067427,0026435,0164022,
    -0136344,0112234,0165752,0006222,
    -0137373,0015622,0017016,0155636,
    -0137664,0150333,0125730,0067240,
    -0040303,0036411,0130200,0043120
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short A[] = {
    -0xda56,0x3d3c,0x3228,0xbc60,
    -0x99e0,0x7a03,0xdd51,0xbce5,
    -0xe045,0xddd0,0x7502,0xbd67,
    -0x26ca,0xb846,0x663b,0xbde3,
    -0x31d0,0x145c,0xc41d,0xbe57,
    -0x2e3a,0x2c83,0x69b3,0xbec4,
    -0xbd02,0xe5a3,0xade2,0xbf26,
    -0x4192,0x9d7d,0x9293,0xbf7c,
    -0xdb74,0x43c1,0x6372,0xbfbf,
    -0x0dd4,0x757b,0x9a1b,0xbfd6,
    -0x08ca,0x3610,0x67a1,0x3ff8
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short A[] = {
    -0xbc60,0x3228,0x3d3c,0xda56,
    -0xbce5,0xdd51,0x7a03,0x99e0,
    -0xbd67,0x7502,0xddd0,0xe045,
    -0xbde3,0x663b,0xb846,0x26ca,
    -0xbe57,0xc41d,0x145c,0x31d0,
    -0xbec4,0x69b3,0x2c83,0x2e3a,
    -0xbf26,0xade2,0xe5a3,0xbd02,
    -0xbf7c,0x9293,0x9d7d,0x4192,
    -0xbfbf,0x6372,0x43c1,0xdb74,
    -0xbfd6,0x9a1b,0x757b,0x0dd4,
    -0x3ff8,0x67a1,0x3610,0x08ca
    -};
    -#endif
    -
    -
    -
    -/* Chebyshev coefficients for exp(x) sqrt(x) K1(x)
    - * in the interval [2,infinity].
    - *
    - * lim(x->inf){ exp(x) sqrt(x) K1(x) } = sqrt(pi/2).
    - */
    -
    -#ifdef UNK
    -static double B[] =
    -{
    --5.75674448366501715755E-18,
    - 1.79405087314755922667E-17,
    --5.68946255844285935196E-17,
    - 1.83809354436663880070E-16,
    --6.05704724837331885336E-16,
    - 2.03870316562433424052E-15,
    --7.01983709041831346144E-15,
    - 2.47715442448130437068E-14,
    --8.97670518232499435011E-14,
    - 3.34841966607842919884E-13,
    --1.28917396095102890680E-12,
    - 5.13963967348173025100E-12,
    --2.12996783842756842877E-11,
    - 9.21831518760500529508E-11,
    --4.19035475934189648750E-10,
    - 2.01504975519703286596E-9,
    --1.03457624656780970260E-8,
    - 5.74108412545004946722E-8,
    --3.50196060308781257119E-7,
    - 2.40648494783721712015E-6,
    --1.93619797416608296024E-5,
    - 1.95215518471351631108E-4,
    --2.85781685962277938680E-3,
    - 1.03923736576817238437E-1,
    - 2.72062619048444266945E0
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short B[] = {
    -0121724,0061352,0013041,0150076,
    -0022245,0074324,0016172,0173232,
    -0122603,0030250,0135670,0165221,
    -0023123,0165362,0023561,0060124,
    -0123456,0112436,0141654,0073623,
    -0024022,0163557,0077564,0006753,
    -0124374,0165221,0131014,0026524,
    -0024737,0017512,0144250,0175451,
    -0125312,0021456,0123136,0076633,
    -0025674,0077720,0020125,0102607,
    -0126265,0067543,0007744,0043701,
    -0026664,0152702,0033002,0074202,
    -0127273,0055234,0120016,0071733,
    -0027712,0133200,0042441,0075515,
    -0130346,0057000,0015456,0074470,
    -0031012,0074441,0051636,0111155,
    -0131461,0136444,0177417,0002101,
    -0032166,0111743,0032176,0021410,
    -0132674,0001224,0076555,0027060,
    -0033441,0077430,0135226,0106663,
    -0134242,0065610,0167155,0113447,
    -0035114,0131304,0043664,0102163,
    -0136073,0045065,0171465,0122123,
    -0037324,0152767,0147401,0017732,
    -0040456,0017275,0050061,0062120,
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short B[] = {
    -0x3a08,0x42c4,0x8c5d,0xbc5a,
    -0x5ed3,0x838f,0xaf1a,0x3c74,
    -0x1d52,0x1777,0x6615,0xbc90,
    -0x2c0b,0x44ee,0x7d5e,0x3caa,
    -0x8ef2,0xd875,0xd2a3,0xbcc5,
    -0x81bd,0xefee,0x5ced,0x3ce2,
    -0x85ab,0x3641,0x9d52,0xbcff,
    -0x1f65,0x5915,0xe3e9,0x3d1b,
    -0xcfb3,0xd4cb,0x4465,0xbd39,
    -0xb0b1,0x040a,0x8ffa,0x3d57,
    -0x88f8,0x61fc,0xadec,0xbd76,
    -0x4f10,0x46c0,0x9ab8,0x3d96,
    -0xce7b,0x9401,0x6b53,0xbdb7,
    -0x2f6a,0x08a4,0x56d0,0x3dd9,
    -0xcf27,0x0365,0xcbc0,0xbdfc,
    -0xd24e,0x2a73,0x4f24,0x3e21,
    -0xe088,0x9fe1,0x37a4,0xbe46,
    -0xc461,0x668f,0xd27c,0x3e6e,
    -0xa5c6,0x8fad,0x8052,0xbe97,
    -0xd1b6,0x1752,0x2fe3,0x3ec4,
    -0xb2e5,0x1dcd,0x4d71,0xbef4,
    -0x908e,0x88f6,0x9658,0x3f29,
    -0xb48a,0xbe66,0x6946,0xbf67,
    -0x23fb,0xf9e0,0x9abe,0x3fba,
    -0x2c8a,0xaa06,0xc3d7,0x4005
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short B[] = {
    -0xbc5a,0x8c5d,0x42c4,0x3a08,
    -0x3c74,0xaf1a,0x838f,0x5ed3,
    -0xbc90,0x6615,0x1777,0x1d52,
    -0x3caa,0x7d5e,0x44ee,0x2c0b,
    -0xbcc5,0xd2a3,0xd875,0x8ef2,
    -0x3ce2,0x5ced,0xefee,0x81bd,
    -0xbcff,0x9d52,0x3641,0x85ab,
    -0x3d1b,0xe3e9,0x5915,0x1f65,
    -0xbd39,0x4465,0xd4cb,0xcfb3,
    -0x3d57,0x8ffa,0x040a,0xb0b1,
    -0xbd76,0xadec,0x61fc,0x88f8,
    -0x3d96,0x9ab8,0x46c0,0x4f10,
    -0xbdb7,0x6b53,0x9401,0xce7b,
    -0x3dd9,0x56d0,0x08a4,0x2f6a,
    -0xbdfc,0xcbc0,0x0365,0xcf27,
    -0x3e21,0x4f24,0x2a73,0xd24e,
    -0xbe46,0x37a4,0x9fe1,0xe088,
    -0x3e6e,0xd27c,0x668f,0xc461,
    -0xbe97,0x8052,0x8fad,0xa5c6,
    -0x3ec4,0x2fe3,0x1752,0xd1b6,
    -0xbef4,0x4d71,0x1dcd,0xb2e5,
    -0x3f29,0x9658,0x88f6,0x908e,
    -0xbf67,0x6946,0xbe66,0xb48a,
    -0x3fba,0x9abe,0xf9e0,0x23fb,
    -0x4005,0xc3d7,0xaa06,0x2c8a
    -};
    -#endif
    -
    -extern double PI;
    -extern double MINLOG;
    -
    -double k1(x)
    -double x;
    -{
    -double y, z;
    -
    -if (x == 0.0) {
    -	mtherr("k1", SING);
    -	return NPY_INFINITY;
    -} else if (x < 0.0) {
    -	mtherr("k1", DOMAIN);
    -	return NPY_NAN;
    -}
    -z = 0.5 * x;
    -
    -if( x <= 2.0 )
    -	{
    -	y = x * x - 2.0;
    -	y =  log(z) * i1(x)  +  chbevl( y, A, 11 ) / x;
    -	return( y );
    -	}
    -
    -return(  exp(-x) * chbevl( 8.0/x - 2.0, B, 25 ) / sqrt(x) );
    -}
    -
    -
    -
    -
    -double k1e( x )
    -double x;
    -{
    -double y;
    -
    -if (x == 0.0) {
    -	mtherr("k1e", SING);
    -	return NPY_INFINITY;
    -} else if (x < 0.0) {
    -	mtherr("k1e", DOMAIN);
    -	return NPY_NAN;
    -}
    -
    -if( x <= 2.0 )
    -	{
    -	y = x * x - 2.0;
    -	y =  log( 0.5 * x ) * i1(x)  +  chbevl( y, A, 11 ) / x;
    -	return( y * exp(x) );
    -	}
    -
    -return(  chbevl( 8.0/x - 2.0, B, 25 ) / sqrt(x) );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/kn.c b/scipy-0.10.1/scipy/special/cephes/kn.c
    deleted file mode 100644
    index fed5560524..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/kn.c
    +++ /dev/null
    @@ -1,248 +0,0 @@
    -/*							kn.c
    - *
    - *	Modified Bessel function, third kind, integer order
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, kn();
    - * int n;
    - *
    - * y = kn( n, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns modified Bessel function of the third kind
    - * of order n of the argument.
    - *
    - * The range is partitioned into the two intervals [0,9.55] and
    - * (9.55, infinity).  An ascending power series is used in the
    - * low range, and an asymptotic expansion in the high range.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0,30         3000       1.3e-9      5.8e-11
    - *    IEEE      0,30        90000       1.8e-8      3.0e-10
    - *
    - *  Error is high only near the crossover point x = 9.55
    - * between the two expansions used.
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 1988, 2000 by Stephen L. Moshier
    -*/
    -
    -
    -/*
    -Algorithm for Kn.
    -                       n-1 
    -                   -n   -  (n-k-1)!    2   k
    -K (x)  =  0.5 (x/2)     >  -------- (-x /4)
    - n                      -     k!
    -                       k=0
    -
    -                    inf.                                   2   k
    -       n         n   -                                   (x /4)
    - + (-1)  0.5(x/2)    >  {p(k+1) + p(n+k+1) - 2log(x/2)} ---------
    -                     -                                  k! (n+k)!
    -                    k=0
    -
    -where  p(m) is the psi function: p(1) = -EUL and
    -
    -                      m-1
    -                       -
    -      p(m)  =  -EUL +  >  1/k
    -                       -
    -                      k=1
    -
    -For large x,
    -                                         2        2     2
    -                                      u-1     (u-1 )(u-3 )
    -K (z)  =  sqrt(pi/2z) exp(-z) { 1 + ------- + ------------ + ...}
    - v                                        1            2
    -                                    1! (8z)     2! (8z)
    -asymptotically, where
    -
    -           2
    -    u = 4 v .
    -
    -*/
    -
    -#include "mconf.h"
    -
    -#define EUL 5.772156649015328606065e-1
    -#define MAXFAC 31
    -extern double MACHEP, MAXNUM, MAXLOG, PI;
    -
    -double kn( nn, x )
    -int nn;
    -double x;
    -{
    -double k, kf, nk1f, nkf, zn, t, s, z0, z;
    -double ans, fn, pn, pk, zmn, tlg, tox;
    -int i, n;
    -
    -if( nn < 0 )
    -	n = -nn;
    -else
    -	n = nn;
    -
    -if( n > MAXFAC )
    -	{
    -overf:
    -	mtherr( "kn", OVERFLOW );
    -	return( MAXNUM );
    -	}
    -
    -if(x <= 0.0) {
    -	if( x < 0.0 ) {
    -		mtherr("kn", DOMAIN);
    -                return NPY_NAN;
    -	} else {
    -		mtherr("kn", SING);
    -		return NPY_INFINITY;
    -	}
    -}
    -
    -
    -if( x > 9.55 )
    -	goto asymp;
    -
    -ans = 0.0;
    -z0 = 0.25 * x * x;
    -fn = 1.0;
    -pn = 0.0;
    -zmn = 1.0;
    -tox = 2.0/x;
    -
    -if( n > 0 )
    -	{
    -	/* compute factorial of n and psi(n) */
    -	pn = -EUL;
    -	k = 1.0;
    -	for( i=1; i 1.0) && ((MAXNUM/tox) < zmn) )
    -				goto overf;
    -			zmn *= tox;
    -			}
    -		s *= 0.5;
    -		t = fabs(s);
    -		if( (zmn > 1.0) && ((MAXNUM/zmn) < t) )
    -			goto overf;
    -		if( (t > 1.0) && ((MAXNUM/t) < zmn) )
    -			goto overf;
    -		ans = s * zmn;
    -		}
    -	}
    -
    -
    -tlg = 2.0 * log( 0.5 * x );
    -pk = -EUL;
    -if( n == 0 )
    -	{
    -	pn = pk;
    -	t = 1.0;
    -	}
    -else
    -	{
    -	pn = pn + 1.0/n;
    -	t = 1.0/fn;
    -	}
    -s = (pk+pn-tlg)*t;
    -k = 1.0;
    -do
    -	{
    -	t *= z0 / (k * (k+n));
    -	pk += 1.0/k;
    -	pn += 1.0/(k+n);
    -	s += (pk+pn-tlg)*t;
    -	k += 1.0;
    -	}
    -while( fabs(t/s) > MACHEP );
    -
    -s = 0.5 * s / zmn;
    -if( n & 1 )
    -	s = -s;
    -ans += s;
    -
    -return(ans);
    -
    -
    -
    -/* Asymptotic expansion for Kn(x) */
    -/* Converges to 1.4e-17 for x > 18.4 */
    -
    -asymp:
    -
    -if( x > MAXLOG )
    -	{
    -	mtherr( "kn", UNDERFLOW );
    -	return(0.0);
    -	}
    -k = n;
    -pn = 4.0 * k * k;
    -pk = 1.0;
    -z0 = 8.0 * x;
    -fn = 1.0;
    -t = 1.0;
    -s = t;
    -nkf = MAXNUM;
    -i = 0;
    -do
    -	{
    -	z = pn - pk * pk;
    -	t = t * z /(fn * z0);
    -	nk1f = fabs(t);
    -	if( (i >= n) && (nk1f > nkf) )
    -		{
    -		goto adone;
    -		}
    -	nkf = nk1f;
    -	s += t;
    -	fn += 1.0;
    -	pk += 2.0;
    -	i += 1;
    -	}
    -while( fabs(t/s) > MACHEP );
    -
    -adone:
    -ans = exp(-x) * sqrt( PI/(2.0*x) ) * s;
    -return(ans);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/kolmogorov.c b/scipy-0.10.1/scipy/special/cephes/kolmogorov.c
    deleted file mode 100644
    index f587b6af28..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/kolmogorov.c
    +++ /dev/null
    @@ -1,253 +0,0 @@
    -/* File altered for inclusion in cephes module for Python:
    -   Main loop commented out.... */
    -/*  Travis Oliphant Nov. 1998 */
    -
    -
    -/* Re Kolmogorov statistics, here is Birnbaum and Tingey's formula for the
    -   distribution of D+, the maximum of all positive deviations between a
    -   theoretical distribution function P(x) and an empirical one Sn(x)
    -   from n samples.
    -
    -     +
    -    D  =         sup        [ P(x) - Sn(x) ]
    -     n     -inf < x < inf
    -
    -
    -                  [n(1-e)]
    -        +            -                    v-1              n-v
    -    Pr{D   > e} =    >    C    e (e + v/n)    (1 - e - v/n)
    -        n            -   n v
    -                    v=0
    -
    -    [n(1-e)] is the largest integer not exceeding n(1-e).
    -    nCv is the number of combinations of n things taken v at a time.  */
    -
    -
    -#include "mconf.h"
    -extern double MAXLOG;
    -
    -/* Exact Smirnov statistic, for one-sided test.  */
    -double
    -smirnov (n, e)
    -     int n;
    -     double e;
    -{
    -  int v, nn;
    -  double evn, omevn, p, t, c, lgamnp1;
    -
    -  if (n <= 0 || e < 0.0 || e > 1.0)
    -    return (NPY_NAN);
    -  if (e == 0.0) return 1.0;
    -  nn = (int) (floor ((double) n * (1.0 - e)));
    -  p = 0.0;
    -  if (n < 1013)
    -    {
    -      c = 1.0;
    -      for (v = 0; v <= nn; v++)
    -	{
    -	  evn = e + ((double) v) / n;
    -	  p += c * pow (evn, (double) (v - 1))
    -	    * pow (1.0 - evn, (double) (n - v));
    -	  /* Next combinatorial term; worst case error = 4e-15.  */
    -	  c *= ((double) (n - v)) / (v + 1);
    -	}
    -    }
    -  else
    -    {
    -      lgamnp1 = lgam ((double) (n + 1));
    -      for (v = 0; v <= nn; v++)
    -	{
    -	  evn = e + ((double) v) / n;
    -	  omevn = 1.0 - evn;
    -	  if (fabs (omevn) > 0.0)
    -	    {
    -	      t = lgamnp1
    -		- lgam ((double) (v + 1))
    -		- lgam ((double) (n - v + 1))
    -		+ (v - 1) * log (evn)
    -		+ (n - v) * log (omevn);
    -	      if (t > -MAXLOG)
    -		p += exp (t);
    -	    }
    -	}
    -    }
    -  return (p * e);
    -}
    -
    -
    -/* Kolmogorov's limiting distribution of two-sided test, returns
    -   probability that sqrt(n) * max deviation > y,
    -   or that max deviation > y/sqrt(n).
    -   The approximation is useful for the tail of the distribution
    -   when n is large.  */
    -double
    -kolmogorov (y)
    -     double y;
    -{
    -  double p, t, r, sign, x;
    -
    -  if ( y < 1.1e-16 ) 
    -    return 1.0;
    -  x = -2.0 * y * y;
    -  sign = 1.0;
    -  p = 0.0;
    -  r = 1.0;
    -  do
    -    {
    -      t = exp (x * r * r);
    -      p += sign * t;
    -      if (t == 0.0)
    -	break;
    -      r += 1.0;
    -      sign = -sign;
    -    }
    -  while ((t / p) > 1.1e-16);
    -  return (p + p);
    -}
    -
    -/* Functional inverse of Smirnov distribution
    -   finds e such that smirnov(n,e) = p.  */
    -double
    -smirnovi (n, p)
    -     int n;
    -     double p;
    -{
    -  double e, t, dpde;
    -  int iterations;
    -
    -  if (p <= 0.0 || p > 1.0)
    -    {
    -      mtherr ("smirnovi", DOMAIN);
    -      return (NPY_NAN);
    -    }
    -  /* Start with approximation p = exp(-2 n e^2).  */
    -  e = sqrt (-log (p) / (2.0 * n));
    -  iterations = 0;
    -  do
    -    {
    -      /* Use approximate derivative in Newton iteration. */
    -      t = -2.0 * n * e;
    -      dpde = 2.0 * t * exp (t * e);
    -      if (fabs (dpde) > 0.0)
    -	t = (p - smirnov (n, e)) / dpde;
    -      else
    -	{
    -	  mtherr ("smirnovi", UNDERFLOW);
    -	  return 0.0;
    -	}
    -      e = e + t;
    -      if (e >= 1.0 || e <= 0.0)
    -	{
    -	  mtherr ("smirnovi", OVERFLOW);
    -	  return 0.0;
    -	}
    -      if (++iterations > MAXITER) 
    -        {
    -          mtherr ("smirnovi", TOOMANY);
    -          return (e);          
    -        }
    -    }
    -  while (fabs (t / e) > 1e-10);
    -  return (e);
    -}
    -
    -
    -/* Functional inverse of Kolmogorov statistic for two-sided test.
    -   Finds y such that kolmogorov(y) = p.
    -   If e = smirnovi (n,p), then kolmogi(2 * p) / sqrt(n) should
    -   be close to e.  */
    -double
    -kolmogi (p)
    -     double p;
    -{
    -  double y, t, dpdy;
    -  int iterations;
    -
    -  if (p <= 0.0 || p > 1.0)
    -    {
    -      mtherr ("kolmogi", DOMAIN);
    -      return (NPY_NAN);
    -    }
    -  if ( (1.0 - p ) < 1e-16) return 0.0;
    -  /* Start with approximation p = 2 exp(-2 y^2).  */
    -  y = sqrt (-0.5 * log (0.5 * p));
    -  iterations = 0;
    -  do
    -    {
    -      /* Use approximate derivative in Newton iteration. */
    -      t = -2.0 * y;
    -      dpdy = 4.0 * t * exp (t * y);
    -      if (fabs (dpdy) > 0.0)
    -	t = (p - kolmogorov (y)) / dpdy;
    -      else
    -	{
    -	  mtherr ("kolmogi", UNDERFLOW);
    -	  return 0.0;
    -	}
    -      y = y + t;
    -      if (++iterations > MAXITER) 
    -        {
    -          mtherr ("kolmogi", TOOMANY);
    -          return (y);          
    -        }      
    -    }
    -  while (fabs (t / y) > 1.0e-10);
    -  return (y);
    -}
    -
    -
    -/* Type in a number.  */
    -/* void
    -getnum (s, px)
    -     char *s;
    -     double *px;
    -{
    -  char str[30];
    -
    -  printf (" %s (%.15e) ? ", s, *px);
    -  gets (str);
    -  if (str[0] == '\0' || str[0] == '\n')
    -    return;
    -  sscanf (str, "%lf", px);
    -  printf ("%.15e\n", *px);
    -}
    -*/
    -/* Type in values, get answers.  */
    -/*
    -void
    -main ()
    -{
    -  int n;
    -  double e, p, ps, pk, ek, y;
    -
    -  n = 5;
    -  e = 0.0;
    -  p = 0.1;
    -loop:
    -  ps = n;
    -  getnum ("n", &ps);
    -  n = ps;
    -  if (n <= 0)
    -    {
    -      printf ("? Operator error.\n");
    -      goto loop;
    -    }
    -*/
    -  /*
    -  getnum ("e", &e);
    -  ps = smirnov (n, e);
    -  y = sqrt ((double) n) * e;
    -  printf ("y = %.4e\n", y);
    -  pk = kolmogorov (y);
    -  printf ("Smirnov = %.15e, Kolmogorov/2 = %.15e\n", ps, pk / 2.0);
    -*/
    -/*
    -  getnum ("p", &p);
    -  e = smirnovi (n, p);
    -  printf ("Smirnov e = %.15e\n", e);
    -  y = kolmogi (2.0 * p);
    -  ek = y / sqrt ((double) n);
    -  printf ("Kolmogorov e = %.15e\n", ek);
    -  goto loop;
    -}
    -*/
    diff --git a/scipy-0.10.1/scipy/special/cephes/mconf.h b/scipy-0.10.1/scipy/special/cephes/mconf.h
    deleted file mode 100644
    index 0cae946f74..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/mconf.h
    +++ /dev/null
    @@ -1,149 +0,0 @@
    -/*							mconf.h
    - *
    - *	Common include file for math routines
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * #include "mconf.h"
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * This file contains definitions for error codes that are
    - * passed to the common error handling routine mtherr()
    - * (which see).
    - *
    - * The file also includes a conditional assembly definition
    - * for the type of computer arithmetic (IEEE, DEC, Motorola
    - * IEEE, or UNKnown).
    - * 
    - * For Digital Equipment PDP-11 and VAX computers, certain
    - * IBM systems, and others that use numbers with a 56-bit
    - * significand, the symbol DEC should be defined.  In this
    - * mode, most floating point constants are given as arrays
    - * of octal integers to eliminate decimal to binary conversion
    - * errors that might be introduced by the compiler.
    - *
    - * For little-endian computers, such as IBM PC, that follow the
    - * IEEE Standard for Binary Floating Point Arithmetic (ANSI/IEEE
    - * Std 754-1985), the symbol IBMPC should be defined.  These
    - * numbers have 53-bit significands.  In this mode, constants
    - * are provided as arrays of hexadecimal 16 bit integers.
    - *
    - * Big-endian IEEE format is denoted MIEEE.  On some RISC
    - * systems such as Sun SPARC, double precision constants
    - * must be stored on 8-byte address boundaries.  Since integer
    - * arrays may be aligned differently, the MIEEE configuration
    - * may fail on such machines.
    - *
    - * To accommodate other types of computer arithmetic, all
    - * constants are also provided in a normal decimal radix
    - * which one can hope are correctly converted to a suitable
    - * format by the available C language compiler.  To invoke
    - * this mode, define the symbol UNK.
    - *
    - * An important difference among these modes is a predefined
    - * set of machine arithmetic constants for each.  The numbers
    - * MACHEP (the machine roundoff error), MAXNUM (largest number
    - * represented), and several other parameters are preset by
    - * the configuration symbol.  Check the file const.c to
    - * ensure that these values are correct for your computer.
    - *
    - * Configurations NANS, INFINITIES, MINUSZERO, and DENORMAL
    - * may fail on many systems.  Verify that they are supposed
    - * to work on your computer.
    - */
    -
    -/*
    -Cephes Math Library Release 2.3:  June, 1995
    -Copyright 1984, 1987, 1989, 1995 by Stephen L. Moshier
    -*/
    -
    -#ifndef CEPHES_MCONF_H
    -#define CEPHES_MCONF_H
    -
    -#include 
    -#include 
    -
    -#include "cephes_names.h"
    -#include "protos.h"
    -
    -/* Constant definitions for math error conditions
    - */
    -
    -#define DOMAIN		1	/* argument domain error */
    -#define SING		2	/* argument singularity */
    -#define OVERFLOW	3	/* overflow range error */
    -#define UNDERFLOW	4	/* underflow range error */
    -#define TLOSS		5	/* total loss of precision */
    -#define PLOSS		6	/* partial loss of precision */
    -#define TOOMANY         7       /* too many iterations */
    -#define MAXITER        500
    -
    -#define EDOM		33
    -#define ERANGE		34
    -
    -/* Long double complex numeral.  */
    -/*
    -typedef struct
    -	{
    -	long double r;
    -	long double i;
    -	} cmplxl;
    -*/
    -
    -/* Type of computer arithmetic */
    -
    -/* UNKnown arithmetic, invokes coefficients given in
    - * normal decimal format.  Beware of range boundary
    - * problems (MACHEP, MAXLOG, etc. in const.c) and
    - * roundoff problems in pow.c:
    - * (Sun SPARCstation)
    - */
    -
    -/* SciPy note: by defining UNK, we prevent the compiler from
    - * casting integers to floating point numbers.  If the Endianness
    - * is detected incorrectly, this causes problems on some platforms.
    - */
    -#define UNK 1
    -
    -/* Define this `volatile' if your compiler thinks
    - * that floating point arithmetic obeys the associative
    - * and distributive laws.  It will defeat some optimizations
    - * (but probably not enough of them).
    - *
    - * #define VOLATILE volatile
    - */
    -#define VOLATILE
    -
    -/* For 12-byte long doubles on an i386, pad a 16-bit short 0
    - * to the end of real constants initialized by integer arrays.
    - *
    - * #define XPD 0,
    - *
    - * Otherwise, the type is 10 bytes long and XPD should be
    - * defined blank (e.g., Microsoft C).
    - *
    - * #define XPD
    - */
    -#define XPD 0,
    -
    -/* Define to support tiny denormal numbers, else undefine. */
    -#define DENORMAL 1
    -
    -/* Define to distinguish between -0.0 and +0.0.  */
    -#define MINUSZERO 1
    -
    -/* Define 1 for ANSI C atan2() function
    -   See atan.c and clog.c. */
    -#define ANSIC 1
    -
    -/* Variable for error reporting.  See mtherr.c.  */
    -extern int merror;
    -
    -#define gamma Gamma
    -
    -#endif /* CEPHES_MCONF_H */
    diff --git a/scipy-0.10.1/scipy/special/cephes/mmmpy.c b/scipy-0.10.1/scipy/special/cephes/mmmpy.c
    deleted file mode 100644
    index 674a5086d4..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/mmmpy.c
    +++ /dev/null
    @@ -1,62 +0,0 @@
    -/*							mmmpy.c
    - *
    - *	Matrix multiply
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int r, c;
    - * double A[r*c], B[c*r], Y[r*r];
    - *
    - * mmmpy( r, c, A, B, Y );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Y = A B
    - *              c-1
    - *              --
    - * Y[i][j]  =   >   A[i][k] B[k][j]
    - *              --
    - *              k=0
    - *
    - * Multiplies an r (rows) by c (columns) matrix A on the left
    - * by a c (rows) by r (columns) matrix B on the right
    - * to produce an r by r matrix Y.
    - *
    - *
    - */
    -
    -#include "protos.h"
    -
    -void
    -mmmpy( r, c, A, B, Y )
    -int r, c;
    -double *A, *B, *Y;
    -{
    -register double s;
    -double *pA, *pB, *pY, *pt;
    -int i, j, k;
    -
    -pY = Y;
    -pB = B;
    -for( i=0; i
    -#include "mconf.h"
    -
    -void scipy_special_raise_warning(char *fmt, ...);
    -int scipy_special_print_error_messages = 0;
    -
    -int merror = 0;
    -
    -/* Notice: the order of appearance of the following
    - * messages is bound to the error codes defined
    - * in mconf.h.
    - */
    -static char *ermsg[8] = {
    -    "unknown",			/* error code 0 */
    -    "domain",			/* error code 1 */
    -    "singularity",		/* et seq.      */
    -    "overflow",
    -    "underflow",
    -    "total loss of precision",
    -    "partial loss of precision",
    -    "too many iterations"
    -};
    -
    -
    -int mtherr(char *name, int code)
    -{
    -    /* Display string passed by calling program,
    -     * which is supposed to be the name of the
    -     * function in which the error occurred:
    -     */
    -
    -    /* Set global error message word */
    -    merror = code;
    -
    -    /* Display error message defined
    -     * by the code argument.
    -     */
    -    if ((code <= 0) || (code >= 8))
    -	code = 0;
    -
    -    if (scipy_special_print_error_messages) {
    -        scipy_special_raise_warning("%s: %s error", name, ermsg[code]);
    -    }
    -
    -    /* Return to calling
    -     * program
    -     */
    -    return (0);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/mtransp.c b/scipy-0.10.1/scipy/special/cephes/mtransp.c
    deleted file mode 100644
    index e1a48e7e34..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/mtransp.c
    +++ /dev/null
    @@ -1,64 +0,0 @@
    -/*							mtransp.c
    - *
    - *	Matrix transpose
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int n;
    - * double A[n*n], T[n*n];
    - *
    - * mtransp( n, A, T );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - *
    - * T[r][c] = A[c][r]
    - *
    - *
    - * Transposes the n by n square matrix A and puts the result in T.
    - * The output, T, may occupy the same storage as A.
    - *
    - *
    - *
    - */
    -
    -
    -void mtransp( int,double*,double* );
    -
    -void
    -mtransp( n, A, T )
    -int n;
    -double *A, *T;
    -{
    -int i, j, np1;
    -double *pAc, *pAr, *pTc, *pTr, *pA0, *pT0;
    -double x;
    -
    -np1 = n+1;
    -pA0 = A;
    -pT0 = T;
    -for( i=0; i   A[j][k] V[k] ,  j = 1, ..., r
    - *          --
    - *          k=0
    - *
    - * Multiplies the r (rows) by c (columns) matrix A on the left
    - * by column vector V of dimension c on the right
    - * to produce a (column) vector Y output of dimension r.
    - *
    - *
    - *
    - *
    - */
    -
    -
    -void
    -mvmpy( r, c, A, V, Y )
    -int r, c;
    -double *A, *V, *Y;
    -{
    -register double s;
    -double *pA, *pV, *pY;
    -int i, j;
    -
    -pA = A;
    -pY = Y;
    -for( i=0; i   (       )  p  (1-p)
    - *   --  (   j   )
    - *  j=0
    - *
    - * In a sequence of Bernoulli trials, this is the probability
    - * that k or fewer failures precede the nth success.
    - *
    - * The terms are not computed individually; instead the incomplete
    - * beta integral is employed, according to the formula
    - *
    - * y = nbdtr( k, n, p ) = incbet( n, k+1, p ).
    - *
    - * The arguments must be positive, with p ranging from 0 to 1.
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a,b,p), with p between 0 and 1.
    - *
    - *               a,b                     Relative error:
    - * arithmetic  domain     # trials      peak         rms
    - *    IEEE     0,100       100000      1.7e-13     8.8e-15
    - * See also incbet.c.
    - *
    - */
    -/*							nbdtrc.c
    - *
    - *	Complemented negative binomial distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int k, n;
    - * double p, y, nbdtrc();
    - *
    - * y = nbdtrc( k, n, p );
    - *
    - * DESCRIPTION:
    - *
    - * Returns the sum of the terms k+1 to infinity of the negative
    - * binomial distribution:
    - *
    - *   inf
    - *   --  ( n+j-1 )   n      j
    - *   >   (       )  p  (1-p)
    - *   --  (   j   )
    - *  j=k+1
    - *
    - * The terms are not computed individually; instead the incomplete
    - * beta integral is employed, according to the formula
    - *
    - * y = nbdtrc( k, n, p ) = incbet( k+1, n, 1-p ).
    - *
    - * The arguments must be positive, with p ranging from 0 to 1.
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a,b,p), with p between 0 and 1.
    - *
    - *               a,b                     Relative error:
    - * arithmetic  domain     # trials      peak         rms
    - *    IEEE     0,100       100000      1.7e-13     8.8e-15
    - * See also incbet.c.
    - */
    -
    -/*							nbdtrc
    - *
    - *	Complemented negative binomial distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int k, n;
    - * double p, y, nbdtrc();
    - *
    - * y = nbdtrc( k, n, p );
    - *
    - * DESCRIPTION:
    - *
    - * Returns the sum of the terms k+1 to infinity of the negative
    - * binomial distribution:
    - *
    - *   inf
    - *   --  ( n+j-1 )   n      j
    - *   >   (       )  p  (1-p)
    - *   --  (   j   )
    - *  j=k+1
    - *
    - * The terms are not computed individually; instead the incomplete
    - * beta integral is employed, according to the formula
    - *
    - * y = nbdtrc( k, n, p ) = incbet( k+1, n, 1-p ).
    - *
    - * The arguments must be positive, with p ranging from 0 to 1.
    - *
    - * ACCURACY:
    - *
    - * See incbet.c.
    - */
    -/*							nbdtri
    - *
    - *	Functional inverse of negative binomial distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int k, n;
    - * double p, y, nbdtri();
    - *
    - * p = nbdtri( k, n, y );
    - *
    - * DESCRIPTION:
    - *
    - * Finds the argument p such that nbdtr(k,n,p) is equal to y.
    - *
    - * ACCURACY:
    - *
    - * Tested at random points (a,b,y), with y between 0 and 1.
    - *
    - *               a,b                     Relative error:
    - * arithmetic  domain     # trials      peak         rms
    - *    IEEE     0,100       100000      1.5e-14     8.5e-16
    - * See also incbi.c.
    - */
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1987, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -double nbdtrc( k, n, p )
    -int k, n;
    -double p;
    -{
    -double dk, dn;
    -
    -if( (p < 0.0) || (p > 1.0) )
    -	goto domerr;
    -if( k < 0 )
    -	{
    -domerr:
    -	mtherr( "nbdtr", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -
    -dk = k+1;
    -dn = n;
    -return( incbet( dk, dn, 1.0 - p ) );
    -}
    -
    -
    -
    -double nbdtr( k, n, p )
    -int k, n;
    -double p;
    -{
    -double dk, dn;
    -
    -if( (p < 0.0) || (p > 1.0) )
    -	goto domerr;
    -if( k < 0 )
    -	{
    -domerr:
    -	mtherr( "nbdtr", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -dk = k+1;
    -dn = n;
    -return( incbet( dn, dk, p ) );
    -}
    -
    -
    -
    -double nbdtri( k, n, p )
    -int k, n;
    -double p;
    -{
    -double dk, dn, w;
    -
    -if( (p < 0.0) || (p > 1.0) )
    -	goto domerr;
    -if( k < 0 )
    -	{
    -domerr:
    -	mtherr( "nbdtri", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -dk = k+1;
    -dn = n;
    -w = incbi( dn, dk, p );
    -return( w );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/ndtr.c b/scipy-0.10.1/scipy/special/cephes/ndtr.c
    deleted file mode 100644
    index 1b5fbfa248..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/ndtr.c
    +++ /dev/null
    @@ -1,480 +0,0 @@
    -/*							ndtr.c
    - *
    - *	Normal distribution function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, ndtr();
    - *
    - * y = ndtr( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the area under the Gaussian probability density
    - * function, integrated from minus infinity to x:
    - *
    - *                            x
    - *                             -
    - *                   1        | |          2
    - *    ndtr(x)  = ---------    |    exp( - t /2 ) dt
    - *               sqrt(2pi)  | |
    - *                           -
    - *                          -inf.
    - *
    - *             =  ( 1 + erf(z) ) / 2
    - *             =  erfc(z) / 2
    - *
    - * where z = x/sqrt(2). Computation is via the functions
    - * erf and erfc.
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC      -13,0         8000       2.1e-15     4.8e-16
    - *    IEEE     -13,0        30000       3.4e-14     6.7e-15
    - *
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition         value returned
    - * erfc underflow    x > 37.519379347       0.0
    - *
    - */
    -/*							erf.c
    - *
    - *	Error function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, erf();
    - *
    - * y = erf( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * The integral is
    - *
    - *                           x 
    - *                            -
    - *                 2         | |          2
    - *   erf(x)  =  --------     |    exp( - t  ) dt.
    - *              sqrt(pi)   | |
    - *                          -
    - *                           0
    - *
    - * The magnitude of x is limited to 9.231948545 for DEC
    - * arithmetic; 1 or -1 is returned outside this range.
    - *
    - * For 0 <= |x| < 1, erf(x) = x * P4(x**2)/Q5(x**2); otherwise
    - * erf(x) = 1 - erfc(x).
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0,1         14000       4.7e-17     1.5e-17
    - *    IEEE      0,1         30000       3.7e-16     1.0e-16
    - *
    - */
    -/*							erfc.c
    - *
    - *	Complementary error function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, erfc();
    - *
    - * y = erfc( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - *
    - *  1 - erf(x) =
    - *
    - *                           inf. 
    - *                             -
    - *                  2         | |          2
    - *   erfc(x)  =  --------     |    exp( - t  ) dt
    - *               sqrt(pi)   | |
    - *                           -
    - *                            x
    - *
    - *
    - * For small x, erfc(x) = 1 - erf(x); otherwise rational
    - * approximations are computed.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0, 9.2319   12000       5.1e-16     1.2e-16
    - *    IEEE      0,26.6417   30000       5.7e-14     1.5e-14
    - *
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition              value returned
    - * erfc underflow    x > 9.231948545 (DEC)       0.0
    - *
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.2:  June, 1992
    -Copyright 1984, 1987, 1988, 1992 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -
    -#include "mconf.h"
    -
    -extern double SQRTH;
    -extern double MAXLOG;
    -
    -#ifdef UNK
    -static double P[] = {
    - 2.46196981473530512524E-10,
    - 5.64189564831068821977E-1,
    - 7.46321056442269912687E0,
    - 4.86371970985681366614E1,
    - 1.96520832956077098242E2,
    - 5.26445194995477358631E2,
    - 9.34528527171957607540E2,
    - 1.02755188689515710272E3,
    - 5.57535335369399327526E2
    -};
    -static double Q[] = {
    -/* 1.00000000000000000000E0,*/
    - 1.32281951154744992508E1,
    - 8.67072140885989742329E1,
    - 3.54937778887819891062E2,
    - 9.75708501743205489753E2,
    - 1.82390916687909736289E3,
    - 2.24633760818710981792E3,
    - 1.65666309194161350182E3,
    - 5.57535340817727675546E2
    -};
    -static double R[] = {
    - 5.64189583547755073984E-1,
    - 1.27536670759978104416E0,
    - 5.01905042251180477414E0,
    - 6.16021097993053585195E0,
    - 7.40974269950448939160E0,
    - 2.97886665372100240670E0
    -};
    -static double S[] = {
    -/* 1.00000000000000000000E0,*/
    - 2.26052863220117276590E0,
    - 9.39603524938001434673E0,
    - 1.20489539808096656605E1,
    - 1.70814450747565897222E1,
    - 9.60896809063285878198E0,
    - 3.36907645100081516050E0
    -};
    -static double T[] = {
    - 9.60497373987051638749E0,
    - 9.00260197203842689217E1,
    - 2.23200534594684319226E3,
    - 7.00332514112805075473E3,
    - 5.55923013010394962768E4
    -};
    -static double U[] = {
    -/* 1.00000000000000000000E0,*/
    - 3.35617141647503099647E1,
    - 5.21357949780152679795E2,
    - 4.59432382970980127987E3,
    - 2.26290000613890934246E4,
    - 4.92673942608635921086E4
    -};
    -
    -#define UTHRESH 37.519379347
    -#endif
    -
    -#ifdef DEC
    -static unsigned short P[] = {
    -0030207,0054445,0011173,0021706,
    -0040020,0067272,0030661,0122075,
    -0040756,0151236,0173053,0067042,
    -0041502,0106175,0062555,0151457,
    -0042104,0102525,0047401,0003667,
    -0042403,0116176,0011446,0075303,
    -0042551,0120723,0061641,0123275,
    -0042600,0070651,0007264,0134516,
    -0042413,0061102,0167507,0176625
    -};
    -static unsigned short Q[] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041123,0123257,0165741,0017142,
    -0041655,0065027,0173413,0115450,
    -0042261,0074011,0021573,0004150,
    -0042563,0166530,0013662,0007200,
    -0042743,0176427,0162443,0105214,
    -0043014,0062546,0153727,0123772,
    -0042717,0012470,0006227,0067424,
    -0042413,0061103,0003042,0013254
    -};
    -static unsigned short R[] = {
    -0040020,0067272,0101024,0155421,
    -0040243,0037467,0056706,0026462,
    -0040640,0116017,0120665,0034315,
    -0040705,0020162,0143350,0060137,
    -0040755,0016234,0134304,0130157,
    -0040476,0122700,0051070,0015473
    -};
    -static unsigned short S[] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0040420,0126200,0044276,0070413,
    -0041026,0053051,0007302,0063746,
    -0041100,0144203,0174051,0061151,
    -0041210,0123314,0126343,0177646,
    -0041031,0137125,0051431,0033011,
    -0040527,0117362,0152661,0066201
    -};
    -static unsigned short T[] = {
    -0041031,0126770,0170672,0166101,
    -0041664,0006522,0072360,0031770,
    -0043013,0100025,0162641,0126671,
    -0043332,0155231,0161627,0076200,
    -0044131,0024115,0021020,0117343
    -};
    -static unsigned short U[] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041406,0037461,0177575,0032714,
    -0042402,0053350,0123061,0153557,
    -0043217,0111227,0032007,0164217,
    -0043660,0145000,0004013,0160114,
    -0044100,0071544,0167107,0125471
    -};
    -#define UTHRESH 14.0
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short P[] = {
    -0x6479,0xa24f,0xeb24,0x3df0,
    -0x3488,0x4636,0x0dd7,0x3fe2,
    -0x6dc4,0xdec5,0xda53,0x401d,
    -0xba66,0xacad,0x518f,0x4048,
    -0x20f7,0xa9e0,0x90aa,0x4068,
    -0xcf58,0xc264,0x738f,0x4080,
    -0x34d8,0x6c74,0x343a,0x408d,
    -0x972a,0x21d6,0x0e35,0x4090,
    -0xffb3,0x5de8,0x6c48,0x4081
    -};
    -static unsigned short Q[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x23cc,0xfd7c,0x74d5,0x402a,
    -0x7365,0xfee1,0xad42,0x4055,
    -0x610d,0x246f,0x2f01,0x4076,
    -0x41d0,0x02f6,0x7dab,0x408e,
    -0x7151,0xfca4,0x7fa2,0x409c,
    -0xf4ff,0xdafa,0x8cac,0x40a1,
    -0xede2,0x0192,0xe2a7,0x4099,
    -0x42d6,0x60c4,0x6c48,0x4081
    -};
    -static unsigned short R[] = {
    -0x9b62,0x5042,0x0dd7,0x3fe2,
    -0xc5a6,0xebb8,0x67e6,0x3ff4,
    -0xa71a,0xf436,0x1381,0x4014,
    -0x0c0c,0x58dd,0xa40e,0x4018,
    -0x960e,0x9718,0xa393,0x401d,
    -0x0367,0x0a47,0xd4b8,0x4007
    -};
    -static unsigned short S[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xce21,0x0917,0x1590,0x4002,
    -0x4cfd,0x21d8,0xcac5,0x4022,
    -0x2c4d,0x7f05,0x1910,0x4028,
    -0x7ff5,0x959c,0x14d9,0x4031,
    -0x26c1,0xaa63,0x37ca,0x4023,
    -0x2d90,0x5ab6,0xf3de,0x400a
    -};
    -static unsigned short T[] = {
    -0x5d88,0x1e37,0x35bf,0x4023,
    -0x067f,0x4e9e,0x81aa,0x4056,
    -0x35b7,0xbcb4,0x7002,0x40a1,
    -0xef90,0x3c72,0x5b53,0x40bb,
    -0x13dc,0xa442,0x2509,0x40eb
    -};
    -static unsigned short U[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xa6ba,0x3fef,0xc7e6,0x4040,
    -0x3aee,0x14c6,0x4add,0x4080,
    -0xfd12,0xe680,0xf252,0x40b1,
    -0x7c0a,0x0101,0x1940,0x40d6,
    -0xf567,0x9dc8,0x0e6c,0x40e8
    -};
    -#define UTHRESH 37.519379347
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short P[] = {
    -0x3df0,0xeb24,0xa24f,0x6479,
    -0x3fe2,0x0dd7,0x4636,0x3488,
    -0x401d,0xda53,0xdec5,0x6dc4,
    -0x4048,0x518f,0xacad,0xba66,
    -0x4068,0x90aa,0xa9e0,0x20f7,
    -0x4080,0x738f,0xc264,0xcf58,
    -0x408d,0x343a,0x6c74,0x34d8,
    -0x4090,0x0e35,0x21d6,0x972a,
    -0x4081,0x6c48,0x5de8,0xffb3
    -};
    -static unsigned short Q[] = {
    -0x402a,0x74d5,0xfd7c,0x23cc,
    -0x4055,0xad42,0xfee1,0x7365,
    -0x4076,0x2f01,0x246f,0x610d,
    -0x408e,0x7dab,0x02f6,0x41d0,
    -0x409c,0x7fa2,0xfca4,0x7151,
    -0x40a1,0x8cac,0xdafa,0xf4ff,
    -0x4099,0xe2a7,0x0192,0xede2,
    -0x4081,0x6c48,0x60c4,0x42d6
    -};
    -static unsigned short R[] = {
    -0x3fe2,0x0dd7,0x5042,0x9b62,
    -0x3ff4,0x67e6,0xebb8,0xc5a6,
    -0x4014,0x1381,0xf436,0xa71a,
    -0x4018,0xa40e,0x58dd,0x0c0c,
    -0x401d,0xa393,0x9718,0x960e,
    -0x4007,0xd4b8,0x0a47,0x0367
    -};
    -static unsigned short S[] = {
    -0x4002,0x1590,0x0917,0xce21,
    -0x4022,0xcac5,0x21d8,0x4cfd,
    -0x4028,0x1910,0x7f05,0x2c4d,
    -0x4031,0x14d9,0x959c,0x7ff5,
    -0x4023,0x37ca,0xaa63,0x26c1,
    -0x400a,0xf3de,0x5ab6,0x2d90
    -};
    -static unsigned short T[] = {
    -0x4023,0x35bf,0x1e37,0x5d88,
    -0x4056,0x81aa,0x4e9e,0x067f,
    -0x40a1,0x7002,0xbcb4,0x35b7,
    -0x40bb,0x5b53,0x3c72,0xef90,
    -0x40eb,0x2509,0xa442,0x13dc
    -};
    -static unsigned short U[] = {
    -0x4040,0xc7e6,0x3fef,0xa6ba,
    -0x4080,0x4add,0x14c6,0x3aee,
    -0x40b1,0xf252,0xe680,0xfd12,
    -0x40d6,0x1940,0x0101,0x7c0a,
    -0x40e8,0x0e6c,0x9dc8,0xf567
    -};
    -#define UTHRESH 37.519379347
    -#endif
    -
    -double ndtr(double a)
    -{
    -double x, y, z;
    -
    -if (npy_isnan(a)) {
    -  mtherr("ndtr", DOMAIN);
    -  return (NPY_NAN);
    -}
    -
    -x = a * SQRTH;
    -z = fabs(x);
    -
    -if( z < SQRTH )
    -	y = 0.5 + 0.5 * erf(x);
    -
    -else
    -	{
    -	y = 0.5 * erfc(z);
    -
    -	if( x > 0 )
    -		y = 1.0 - y;
    -	}
    -
    -return(y);
    -}
    -
    -
    -double erfc(double a)
    -{
    -double p,q,x,y,z;
    -
    -if (npy_isnan(a)) {
    -  mtherr("erfc", DOMAIN);
    -  return (NPY_NAN);
    -}
    -
    -if( a < 0.0 )
    -	x = -a;
    -else
    -	x = a;
    -
    -if( x < 1.0 )
    -	return( 1.0 - erf(a) );
    -
    -z = -a * a;
    -
    -if( z < -MAXLOG )
    -	{
    -under:
    -	mtherr( "erfc", UNDERFLOW );
    -	if( a < 0 )
    -		return( 2.0 );
    -	else
    -		return( 0.0 );
    -	}
    -
    -z = exp(z);
    -
    -if( x < 8.0 )
    -	{
    -	p = polevl( x, P, 8 );
    -	q = p1evl( x, Q, 8 );
    -	}
    -else
    -	{
    -	p = polevl( x, R, 5 );
    -	q = p1evl( x, S, 6 );
    -	}
    -y = (z * p)/q;
    -
    -if( a < 0 )
    -	y = 2.0 - y;
    -
    -if( y == 0.0 )
    -	goto under;
    -
    -return(y);
    -}
    -
    -
    -
    -double erf(double x)
    -{
    -double y, z;
    -
    -if (npy_isnan(x)) {
    -  mtherr("erf", DOMAIN);
    -  return (NPY_NAN);
    -}
    -
    -if( fabs(x) > 1.0 )
    -	return( 1.0 - erfc(x) );
    -z = x * x;
    -
    -y = x * polevl( z, T, 4 ) / p1evl( z, U, 5 );
    -return( y );
    -
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/ndtri.c b/scipy-0.10.1/scipy/special/cephes/ndtri.c
    deleted file mode 100644
    index d3b05c749e..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/ndtri.c
    +++ /dev/null
    @@ -1,409 +0,0 @@
    -/*							ndtri.c
    - *
    - *	Inverse of Normal distribution function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, ndtri();
    - *
    - * x = ndtri( y );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the argument, x, for which the area under the
    - * Gaussian probability density function (integrated from
    - * minus infinity to x) is equal to y.
    - *
    - *
    - * For small arguments 0 < y < exp(-2), the program computes
    - * z = sqrt( -2.0 * log(y) );  then the approximation is
    - * x = z - log(z)/z  - (1/z) P(1/z) / Q(1/z).
    - * There are two rational functions P/Q, one for 0 < y < exp(-32)
    - * and the other for y up to exp(-2).  For larger arguments,
    - * w = y - 0.5, and  x/sqrt(2pi) = w + w**3 R(w**2)/S(w**2)).
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain        # trials      peak         rms
    - *    DEC      0.125, 1         5500       9.5e-17     2.1e-17
    - *    DEC      6e-39, 0.135     3500       5.7e-17     1.3e-17
    - *    IEEE     0.125, 1        20000       7.2e-16     1.3e-16
    - *    IEEE     3e-308, 0.135   50000       4.6e-16     9.8e-17
    - *
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition    value returned
    - * ndtri domain       x <= 0        -MAXNUM
    - * ndtri domain       x >= 1         MAXNUM
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.1:  January, 1989
    -Copyright 1984, 1987, 1989 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -extern double MAXNUM;
    -
    -#ifdef UNK
    -/* sqrt(2pi) */
    -static double s2pi = 2.50662827463100050242E0;
    -#endif
    -
    -#ifdef DEC
    -static unsigned short s2p[] = {0040440,0066230,0177661,0034055};
    -#define s2pi *(double *)s2p
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short s2p[] = {0x2706,0x1ff6,0x0d93,0x4004};
    -#define s2pi *(double *)s2p
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short s2p[] = {
    -0x4004,0x0d93,0x1ff6,0x2706
    -};
    -#define s2pi *(double *)s2p
    -#endif
    -
    -/* approximation for 0 <= |y - 0.5| <= 3/8 */
    -#ifdef UNK
    -static double P0[5] = {
    --5.99633501014107895267E1,
    - 9.80010754185999661536E1,
    --5.66762857469070293439E1,
    - 1.39312609387279679503E1,
    --1.23916583867381258016E0,
    -};
    -static double Q0[8] = {
    -/* 1.00000000000000000000E0,*/
    - 1.95448858338141759834E0,
    - 4.67627912898881538453E0,
    - 8.63602421390890590575E1,
    --2.25462687854119370527E2,
    - 2.00260212380060660359E2,
    --8.20372256168333339912E1,
    - 1.59056225126211695515E1,
    --1.18331621121330003142E0,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short P0[20] = {
    -0141557,0155170,0071360,0120550,
    -0041704,0000214,0172417,0067307,
    -0141542,0132204,0040066,0156723,
    -0041136,0163161,0157276,0007747,
    -0140236,0116374,0073666,0051764,
    -};
    -static unsigned short Q0[32] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0040372,0026256,0110403,0123707,
    -0040625,0122024,0020277,0026661,
    -0041654,0134161,0124134,0007244,
    -0142141,0073162,0133021,0131371,
    -0042110,0041235,0043516,0057767,
    -0141644,0011417,0036155,0137305,
    -0041176,0076556,0004043,0125430,
    -0140227,0073347,0152776,0067251,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short P0[20] = {
    -0x142d,0x0e5e,0xfb4f,0xc04d,
    -0xedd9,0x9ea1,0x8011,0x4058,
    -0xdbba,0x8806,0x5690,0xc04c,
    -0xc1fd,0x3bd7,0xdcce,0x402b,
    -0xca7e,0x8ef6,0xd39f,0xbff3,
    -};
    -static unsigned short Q0[36] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x74f9,0xd220,0x4595,0x3fff,
    -0xe5b6,0x8417,0xb482,0x4012,
    -0x81d4,0x350b,0x970e,0x4055,
    -0x365f,0x56c2,0x2ece,0xc06c,
    -0xcbff,0xa8e9,0x0853,0x4069,
    -0xb7d9,0xe78d,0x8261,0xc054,
    -0x7563,0xc104,0xcfad,0x402f,
    -0xcdd5,0xfabf,0xeedc,0xbff2,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short P0[20] = {
    -0xc04d,0xfb4f,0x0e5e,0x142d,
    -0x4058,0x8011,0x9ea1,0xedd9,
    -0xc04c,0x5690,0x8806,0xdbba,
    -0x402b,0xdcce,0x3bd7,0xc1fd,
    -0xbff3,0xd39f,0x8ef6,0xca7e,
    -};
    -static unsigned short Q0[32] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x3fff,0x4595,0xd220,0x74f9,
    -0x4012,0xb482,0x8417,0xe5b6,
    -0x4055,0x970e,0x350b,0x81d4,
    -0xc06c,0x2ece,0x56c2,0x365f,
    -0x4069,0x0853,0xa8e9,0xcbff,
    -0xc054,0x8261,0xe78d,0xb7d9,
    -0x402f,0xcfad,0xc104,0x7563,
    -0xbff2,0xeedc,0xfabf,0xcdd5,
    -};
    -#endif
    -
    -
    -/* Approximation for interval z = sqrt(-2 log y ) between 2 and 8
    - * i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14.
    - */
    -#ifdef UNK
    -static double P1[9] = {
    - 4.05544892305962419923E0,
    - 3.15251094599893866154E1,
    - 5.71628192246421288162E1,
    - 4.40805073893200834700E1,
    - 1.46849561928858024014E1,
    - 2.18663306850790267539E0,
    --1.40256079171354495875E-1,
    --3.50424626827848203418E-2,
    --8.57456785154685413611E-4,
    -};
    -static double Q1[8] = {
    -/*  1.00000000000000000000E0,*/
    - 1.57799883256466749731E1,
    - 4.53907635128879210584E1,
    - 4.13172038254672030440E1,
    - 1.50425385692907503408E1,
    - 2.50464946208309415979E0,
    --1.42182922854787788574E-1,
    --3.80806407691578277194E-2,
    --9.33259480895457427372E-4,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short P1[36] = {
    -0040601,0143074,0150744,0073326,
    -0041374,0031554,0113253,0146016,
    -0041544,0123272,0012463,0176771,
    -0041460,0051160,0103560,0156511,
    -0041152,0172624,0117772,0030755,
    -0040413,0170713,0151545,0176413,
    -0137417,0117512,0022154,0131671,
    -0137017,0104257,0071432,0007072,
    -0135540,0143363,0063137,0036166,
    -};
    -static unsigned short Q1[32] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041174,0075325,0004736,0120326,
    -0041465,0110044,0047561,0045567,
    -0041445,0042321,0012142,0030340,
    -0041160,0127074,0166076,0141051,
    -0040440,0046055,0040745,0150400,
    -0137421,0114146,0067330,0010621,
    -0137033,0175162,0025555,0114351,
    -0135564,0122773,0145750,0030357,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short P1[36] = {
    -0x8edb,0x9a3c,0x38c7,0x4010,
    -0x7982,0x92d5,0x866d,0x403f,
    -0x7fbf,0x42a6,0x94d7,0x404c,
    -0x1ba9,0x10ee,0x0a4e,0x4046,
    -0x463e,0x93ff,0x5eb2,0x402d,
    -0xbfa1,0x7a6c,0x7e39,0x4001,
    -0x9677,0x448d,0xf3e9,0xbfc1,
    -0x41c7,0xee63,0xf115,0xbfa1,
    -0xe78f,0x6ccb,0x18de,0xbf4c,
    -};
    -static unsigned short Q1[32] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xd41b,0xa13b,0x8f5a,0x402f,
    -0x296f,0x89ee,0xb204,0x4046,
    -0x461c,0x228c,0xa89a,0x4044,
    -0xd845,0x9d87,0x15c7,0x402e,
    -0xba20,0xa83c,0x0985,0x4004,
    -0x0232,0xcddb,0x330c,0xbfc2,
    -0xb31d,0x456d,0x7f4e,0xbfa3,
    -0x061e,0x797d,0x94bf,0xbf4e,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short P1[36] = {
    -0x4010,0x38c7,0x9a3c,0x8edb,
    -0x403f,0x866d,0x92d5,0x7982,
    -0x404c,0x94d7,0x42a6,0x7fbf,
    -0x4046,0x0a4e,0x10ee,0x1ba9,
    -0x402d,0x5eb2,0x93ff,0x463e,
    -0x4001,0x7e39,0x7a6c,0xbfa1,
    -0xbfc1,0xf3e9,0x448d,0x9677,
    -0xbfa1,0xf115,0xee63,0x41c7,
    -0xbf4c,0x18de,0x6ccb,0xe78f,
    -};
    -static unsigned short Q1[32] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x402f,0x8f5a,0xa13b,0xd41b,
    -0x4046,0xb204,0x89ee,0x296f,
    -0x4044,0xa89a,0x228c,0x461c,
    -0x402e,0x15c7,0x9d87,0xd845,
    -0x4004,0x0985,0xa83c,0xba20,
    -0xbfc2,0x330c,0xcddb,0x0232,
    -0xbfa3,0x7f4e,0x456d,0xb31d,
    -0xbf4e,0x94bf,0x797d,0x061e,
    -};
    -#endif
    -
    -/* Approximation for interval z = sqrt(-2 log y ) between 8 and 64
    - * i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890.
    - */
    -
    -#ifdef UNK
    -static double P2[9] = {
    -  3.23774891776946035970E0,
    -  6.91522889068984211695E0,
    -  3.93881025292474443415E0,
    -  1.33303460815807542389E0,
    -  2.01485389549179081538E-1,
    -  1.23716634817820021358E-2,
    -  3.01581553508235416007E-4,
    -  2.65806974686737550832E-6,
    -  6.23974539184983293730E-9,
    -};
    -static double Q2[8] = {
    -/*  1.00000000000000000000E0,*/
    -  6.02427039364742014255E0,
    -  3.67983563856160859403E0,
    -  1.37702099489081330271E0,
    -  2.16236993594496635890E-1,
    -  1.34204006088543189037E-2,
    -  3.28014464682127739104E-4,
    -  2.89247864745380683936E-6,
    -  6.79019408009981274425E-9,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short P2[36] = {
    -0040517,0033507,0036236,0125641,
    -0040735,0044616,0014473,0140133,
    -0040574,0012567,0114535,0102541,
    -0040252,0120340,0143474,0150135,
    -0037516,0051057,0115361,0031211,
    -0036512,0131204,0101511,0125144,
    -0035236,0016627,0043160,0140216,
    -0033462,0060512,0060141,0010641,
    -0031326,0062541,0101304,0077706,
    -};
    -static unsigned short Q2[32] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0040700,0143322,0132137,0040501,
    -0040553,0101155,0053221,0140257,
    -0040260,0041071,0052573,0010004,
    -0037535,0066472,0177261,0162330,
    -0036533,0160475,0066666,0036132,
    -0035253,0174533,0027771,0044027,
    -0033502,0016147,0117666,0063671,
    -0031351,0047455,0141663,0054751,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short P2[36] = {
    -0xd574,0xe793,0xe6e8,0x4009,
    -0x780b,0xc327,0xa931,0x401b,
    -0xb0ac,0xf32b,0x82ae,0x400f,
    -0x9a0c,0x18e7,0x541c,0x3ff5,
    -0x2651,0xf35e,0xca45,0x3fc9,
    -0x354d,0x9069,0x5650,0x3f89,
    -0x1812,0xe8ce,0xc3b2,0x3f33,
    -0x2234,0x4c0c,0x4c29,0x3ec6,
    -0x8ff9,0x3058,0xccac,0x3e3a,
    -};
    -static unsigned short Q2[32] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xe828,0x568b,0x18da,0x4018,
    -0x3816,0xaad2,0x704d,0x400d,
    -0x6200,0x2aaf,0x0847,0x3ff6,
    -0x3c9b,0x5fd6,0xada7,0x3fcb,
    -0xc78b,0xadb6,0x7c27,0x3f8b,
    -0x2903,0x65ff,0x7f2b,0x3f35,
    -0xccf7,0xf3f6,0x438c,0x3ec8,
    -0x6b3d,0xb876,0x29e5,0x3e3d,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short P2[36] = {
    -0x4009,0xe6e8,0xe793,0xd574,
    -0x401b,0xa931,0xc327,0x780b,
    -0x400f,0x82ae,0xf32b,0xb0ac,
    -0x3ff5,0x541c,0x18e7,0x9a0c,
    -0x3fc9,0xca45,0xf35e,0x2651,
    -0x3f89,0x5650,0x9069,0x354d,
    -0x3f33,0xc3b2,0xe8ce,0x1812,
    -0x3ec6,0x4c29,0x4c0c,0x2234,
    -0x3e3a,0xccac,0x3058,0x8ff9,
    -};
    -static unsigned short Q2[32] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4018,0x18da,0x568b,0xe828,
    -0x400d,0x704d,0xaad2,0x3816,
    -0x3ff6,0x0847,0x2aaf,0x6200,
    -0x3fcb,0xada7,0x5fd6,0x3c9b,
    -0x3f8b,0x7c27,0xadb6,0xc78b,
    -0x3f35,0x7f2b,0x65ff,0x2903,
    -0x3ec8,0x438c,0xf3f6,0xccf7,
    -0x3e3d,0x29e5,0xb876,0x6b3d,
    -};
    -#endif
    -
    -double ndtri(y0)
    -double y0;
    -{
    -double x, y, z, y2, x0, x1;
    -int code;
    -
    -if( y0 <= 0.0 )
    -	{
    -	mtherr( "ndtri", DOMAIN );
    -	return( -MAXNUM );
    -	}
    -if( y0 >= 1.0 )
    -	{
    -	mtherr( "ndtri", DOMAIN );
    -	return( MAXNUM );
    -	}
    -code = 1;
    -y = y0;
    -if( y > (1.0 - 0.13533528323661269189) ) /* 0.135... = exp(-2) */
    -	{
    -	y = 1.0 - y;
    -	code = 0;
    -	}
    -
    -if( y > 0.13533528323661269189 )
    -	{
    -	y = y - 0.5;
    -	y2 = y * y;
    -	x = y + y * (y2 * polevl( y2, P0, 4)/p1evl( y2, Q0, 8 ));
    -	x = x * s2pi; 
    -	return(x);
    -	}
    -
    -x = sqrt( -2.0 * log(y) );
    -x0 = x - log(x)/x;
    -
    -z = 1.0/x;
    -if( x < 8.0 ) /* y > exp(-32) = 1.2664165549e-14 */
    -	x1 = z * polevl( z, P1, 8 )/p1evl( z, Q1, 8 );
    -else
    -	x1 = z * polevl( z, P2, 8 )/p1evl( z, Q2, 8 );
    -x = x0 - x1;
    -if( code != 0 )
    -	x = -x;
    -return( x );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/pdtr.c b/scipy-0.10.1/scipy/special/cephes/pdtr.c
    deleted file mode 100644
    index 97ebfa00e3..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/pdtr.c
    +++ /dev/null
    @@ -1,177 +0,0 @@
    -/*							pdtr.c
    - *
    - *	Poisson distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int k;
    - * double m, y, pdtr();
    - *
    - * y = pdtr( k, m );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the sum of the first k terms of the Poisson
    - * distribution:
    - *
    - *   k         j
    - *   --   -m  m
    - *   >   e    --
    - *   --       j!
    - *  j=0
    - *
    - * The terms are not summed directly; instead the incomplete
    - * Gamma integral is employed, according to the relation
    - *
    - * y = pdtr( k, m ) = igamc( k+1, m ).
    - *
    - * The arguments must both be positive.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * See igamc().
    - *
    - */
    -/*							pdtrc()
    - *
    - *	Complemented poisson distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int k;
    - * double m, y, pdtrc();
    - *
    - * y = pdtrc( k, m );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the sum of the terms k+1 to infinity of the Poisson
    - * distribution:
    - *
    - *  inf.       j
    - *   --   -m  m
    - *   >   e    --
    - *   --       j!
    - *  j=k+1
    - *
    - * The terms are not summed directly; instead the incomplete
    - * Gamma integral is employed, according to the formula
    - *
    - * y = pdtrc( k, m ) = igam( k+1, m ).
    - *
    - * The arguments must both be positive.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * See igam.c.
    - *
    - */
    -/*							pdtri()
    - *
    - *	Inverse Poisson distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int k;
    - * double m, y, pdtr();
    - *
    - * m = pdtri( k, y );
    - *
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Finds the Poisson variable x such that the integral
    - * from 0 to x of the Poisson density is equal to the
    - * given probability y.
    - *
    - * This is accomplished using the inverse Gamma integral
    - * function and the relation
    - *
    - *    m = igami( k+1, y ).
    - *
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * See igami.c.
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * pdtri domain    y < 0 or y >= 1       0.0
    - *                     k < 0
    - *
    - */
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1987, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -double pdtrc( k, m )
    -int k;
    -double m;
    -{
    -double v;
    -
    -if( (k < 0) || (m <= 0.0) )
    -	{
    -	mtherr( "pdtrc", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -v = k+1;
    -return( igam( v, m ) );
    -}
    -
    -
    -
    -double pdtr( k, m )
    -int k;
    -double m;
    -{
    -double v;
    -
    -if( (k < 0) || (m <= 0.0) )
    -	{
    -	mtherr( "pdtr", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -v = k+1;
    -return( igamc( v, m ) );
    -}
    -
    -
    -double pdtri( k, y )
    -int k;
    -double y;
    -{
    -double v;
    -
    -if( (k < 0) || (y < 0.0) || (y >= 1.0) )
    -	{
    -	mtherr( "pdtri", DOMAIN );
    -	return( NPY_NAN );
    -	}
    -v = k+1;
    -v = igami( v, y );
    -return( v );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/polevl.c b/scipy-0.10.1/scipy/special/cephes/polevl.c
    deleted file mode 100644
    index 3bc3448d08..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/polevl.c
    +++ /dev/null
    @@ -1,97 +0,0 @@
    -/*							polevl.c
    - *							p1evl.c
    - *
    - *	Evaluate polynomial
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * int N;
    - * double x, y, coef[N+1], polevl[];
    - *
    - * y = polevl( x, coef, N );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Evaluates polynomial of degree N:
    - *
    - *                     2          N
    - * y  =  C  + C x + C x  +...+ C x
    - *        0    1     2          N
    - *
    - * Coefficients are stored in reverse order:
    - *
    - * coef[0] = C  , ..., coef[N] = C  .
    - *            N                   0
    - *
    - *  The function p1evl() assumes that coef[N] = 1.0 and is
    - * omitted from the array.  Its calling arguments are
    - * otherwise the same as polevl().
    - *
    - *
    - * SPEED:
    - *
    - * In the interest of speed, there are no checks for out
    - * of bounds arithmetic.  This routine is used by most of
    - * the functions in the library.  Depending on available
    - * equipment features, the user may wish to rewrite the
    - * program in microcode or assembly language.
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.1:  December, 1988
    -Copyright 1984, 1987, 1988 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -#include "protos.h"
    -
    -double polevl( x, coef, N )
    -double x;
    -double coef[];
    -int N;
    -{
    -double ans;
    -int i;
    -double *p;
    -
    -p = coef;
    -ans = *p++;
    -i = N;
    -
    -do
    -	ans = ans * x  +  *p++;
    -while( --i );
    -
    -return( ans );
    -}
    -
    -/*							p1evl()	*/
    -/*                                          N
    - * Evaluate polynomial when coefficient of x  is 1.0.
    - * Otherwise same as polevl.
    - */
    -
    -double p1evl( x, coef, N )
    -double x;
    -double coef[];
    -int N;
    -{
    -double ans;
    -double *p;
    -int i;
    -
    -p = coef;
    -ans = x + *p++;
    -i = N-1;
    -
    -do
    -	ans = ans * x  + *p++;
    -while( --i );
    -
    -return( ans );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/polmisc.c b/scipy-0.10.1/scipy/special/cephes/polmisc.c
    deleted file mode 100644
    index 4078298654..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/polmisc.c
    +++ /dev/null
    @@ -1,287 +0,0 @@
    -
    -/* Square root, sine, cosine, and arctangent of polynomial.
    - * See polyn.c for data structures and discussion.
    - */
    -
    -#include 
    -#include 
    -#include "mconf.h"
    -
    -/* Highest degree of polynomial to be handled
    -   by the polyn.c subroutine package.  */
    -#define N 16
    -/* Highest degree actually initialized at runtime.  */
    -extern int MAXPOL;
    -
    -/* Taylor series coefficients for various functions
    - */
    -double patan[N+1] = {
    -  0.0,     1.0,      0.0, -1.0/3.0,     0.0,
    -  1.0/5.0, 0.0, -1.0/7.0,      0.0, 1.0/9.0, 0.0, -1.0/11.0,
    -  0.0, 1.0/13.0, 0.0, -1.0/15.0, 0.0 };
    -
    -double psin[N+1] = {
    -  0.0, 1.0, 0.0,   -1.0/6.0,  0.0, 1.0/120.0,  0.0,
    -  -1.0/5040.0, 0.0, 1.0/362880.0, 0.0, -1.0/39916800.0,
    -  0.0, 1.0/6227020800.0, 0.0, -1.0/1.307674368e12, 0.0};
    -
    -double pcos[N+1] = {
    -  1.0, 0.0,   -1.0/2.0,  0.0, 1.0/24.0,  0.0,
    -  -1.0/720.0, 0.0, 1.0/40320.0, 0.0, -1.0/3628800.0, 0.0,
    -  1.0/479001600.0, 0.0, -1.0/8.7179291e10, 0.0, 1.0/2.0922789888e13};
    -
    -double pasin[N+1] = {
    -  0.0,     1.0,  0.0, 1.0/6.0,  0.0,
    -  3.0/40.0, 0.0, 15.0/336.0, 0.0, 105.0/3456.0, 0.0, 945.0/42240.0,
    -  0.0, 10395.0/599040.0 , 0.0, 135135.0/9676800.0 , 0.0
    -};
    -
    -/* Square root of 1 + x.  */
    -double psqrt[N+1] = {
    -  1.0, 1./2., -1./8., 1./16., -5./128., 7./256., -21./1024., 33./2048.,
    -  -429./32768., 715./65536., -2431./262144., 4199./524288., -29393./4194304.,
    -  52003./8388608., -185725./33554432., 334305./67108864.,
    -  -9694845./2147483648.};
    -
    -/* Arctangent of the ratio num/den of two polynomials.
    - */
    -void
    -polatn( num, den, ans, nn )
    -     double num[], den[], ans[];
    -     int nn;
    -{
    -  double a, t;
    -  double *polq, *polu, *polt;
    -  int i;
    -
    -  if (nn > N)
    -    {
    -      mtherr ("polatn", OVERFLOW);
    -      return;
    -    }
    -  /* arctan( a + b ) = arctan(a) + arctan( b/(1 + ab + a**2) ) */
    -  t = num[0];
    -  a = den[0];
    -  if( (t == 0.0) && (a == 0.0 ) )
    -    {
    -      t = num[1];
    -      a = den[1];
    -    }
    -  t = atan2( t, a );  /* arctan(num/den), the ANSI argument order */
    -  polq = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  polu = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  polt = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  polclr( polq, MAXPOL );
    -  i = poldiv( den, nn, num, nn, polq );
    -  a = polq[0]; /* a */
    -  polq[0] = 0.0; /* b */
    -  polmov( polq, nn, polu ); /* b */
    -  /* Form the polynomial
    -     1 + ab + a**2
    -     where a is a scalar.  */
    -  for( i=0; i<=nn; i++ )
    -    polu[i] *= a;
    -  polu[0] += 1.0 + a * a;
    -  poldiv( polu, nn, polq, nn, polt ); /* divide into b */
    -  polsbt( polt, nn, patan, nn, polu ); /* arctan(b)  */
    -  polu[0] += t; /* plus arctan(a) */
    -  polmov( polu, nn, ans );
    -  free( polt );
    -  free( polu );
    -  free( polq );
    -}
    -
    -
    -
    -/* Square root of a polynomial.
    - * Assumes the lowest degree nonzero term is dominant
    - * and of even degree.  An error message is given
    - * if the Newton iteration does not converge.
    - */
    -void
    -polsqt( pol, ans, nn )
    -     double pol[], ans[];
    -     int nn;
    -{
    -  double t;
    -  double *x, *y;
    -  int i, n;
    -#if 0
    -  double z[N+1];
    -  double u;
    -#endif
    -
    -  if (nn > N)
    -    {
    -      mtherr ("polatn", OVERFLOW);
    -      return;
    -    }
    -  x = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  y = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  polmov( pol, nn, x );
    -  polclr( y, MAXPOL );
    -
    -  /* Find lowest degree nonzero term.  */
    -  t = 0.0;
    -  for( n=0; n 0 )
    -    {
    -      if (n & 1)
    -        {
    -	  printf("error, sqrt of odd polynomial\n");
    -	  return;
    -	}
    -      /* Divide by x^n.  */
    -      y[n] = x[n];
    -      poldiv (y, nn, pol, N, x);
    -    }
    -
    -  t = x[0];
    -  for( i=1; i<=nn; i++ )
    -    x[i] /= t;
    -  x[0] = 0.0;
    -  /* series development sqrt(1+x) = 1  +  x / 2  -  x**2 / 8  +  x**3 / 16
    -     hopes that first (constant) term is greater than what follows   */
    -  polsbt( x, nn, psqrt, nn, y);
    -  t = sqrt( t );
    -  for( i=0; i<=nn; i++ )
    -    y[i] *= t;
    -
    -  /* If first nonzero coefficient was at degree n > 0, multiply by
    -     x^(n/2).  */
    -  if (n > 0)
    -    {
    -      polclr (x, MAXPOL);
    -      x[n/2] = 1.0;
    -      polmul (x, nn, y, nn, y);
    -    }
    -#if 0
    -/* Newton iterations */
    -for( n=0; n<10; n++ )
    -	{
    -	poldiv( y, nn, pol, nn, z );
    -	poladd( y, nn, z, nn, y );
    -	for( i=0; i<=nn; i++ )
    -		y[i] *= 0.5;
    -	for( i=0; i<=nn; i++ )
    -		{
    -		u = fabs( y[i] - z[i] );
    -		if( u > 1.0e-15 )
    -			goto more;
    -		}
    -	goto done;
    -more:	;
    -	}
    -printf( "square root did not converge\n" );
    -done:
    -#endif /* 0 */
    -
    -polmov( y, nn, ans );
    -free( y );
    -free( x );
    -}
    -
    -
    -
    -/* Sine of a polynomial.
    - * The computation uses
    - *     sin(a+b) = sin(a) cos(b) + cos(a) sin(b)
    - * where a is the constant term of the polynomial and
    - * b is the sum of the rest of the terms.
    - * Since sin(b) and cos(b) are computed by series expansions,
    - * the value of b should be small.
    - */
    -void
    -polsin( x, y, nn )
    -     double x[], y[];
    -     int nn;
    -{
    -  double a, sc;
    -  double *w, *c;
    -  int i;
    -
    -  if (nn > N)
    -    {
    -      mtherr ("polatn", OVERFLOW);
    -      return;
    -    }
    -  w = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  c = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  polmov( x, nn, w );
    -  polclr( c, MAXPOL );
    -  polclr( y, nn );
    -  /* a, in the description, is x[0].  b is the polynomial x - x[0].  */
    -  a = w[0];
    -  /* c = cos (b) */
    -  w[0] = 0.0;
    -  polsbt( w, nn, pcos, nn, c );
    -  sc = sin(a);
    -  /* sin(a) cos (b) */
    -  for( i=0; i<=nn; i++ )
    -    c[i] *= sc;
    -  /* y = sin (b)  */
    -  polsbt( w, nn, psin, nn, y );
    -  sc = cos(a);
    -  /* cos(a) sin(b) */
    -  for( i=0; i<=nn; i++ )
    -    y[i] *= sc;
    -  poladd( c, nn, y, nn, y );
    -  free( c );
    -  free( w );
    -}
    -
    -
    -/* Cosine of a polynomial.
    - * The computation uses
    - *     cos(a+b) = cos(a) cos(b) - sin(a) sin(b)
    - * where a is the constant term of the polynomial and
    - * b is the sum of the rest of the terms.
    - * Since sin(b) and cos(b) are computed by series expansions,
    - * the value of b should be small.
    - */
    -void
    -polcos( x, y, nn )
    -     double x[], y[];
    -     int nn;
    -{
    -  double a, sc;
    -  double *w, *c;
    -  int i;
    -
    -  if (nn > N)
    -    {
    -      mtherr ("polatn", OVERFLOW);
    -      return;
    -    }
    -  w = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  c = (double * )malloc( (MAXPOL+1) * sizeof (double) );
    -  polmov( x, nn, w );
    -  polclr( c, MAXPOL );
    -  polclr( y, nn );
    -  a = w[0];
    -  w[0] = 0.0;
    -  /* c = cos(b)  */
    -  polsbt( w, nn, pcos, nn, c );
    -  sc = cos(a);
    -  /* cos(a) cos(b)  */
    -  for( i=0; i<=nn; i++ )
    -    c[i] *= sc;
    -  /* y = sin(b) */
    -  polsbt( w, nn, psin, nn, y );
    -  sc = sin(a);
    -  /* sin(a) sin(b) */
    -  for( i=0; i<=nn; i++ )
    -    y[i] *= sc;
    -  polsub( y, nn, c, nn, y );
    -  free( c );
    -  free( w );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/polrt.c b/scipy-0.10.1/scipy/special/cephes/polrt.c
    deleted file mode 100644
    index d12408687d..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/polrt.c
    +++ /dev/null
    @@ -1,220 +0,0 @@
    -/*							polrt.c
    - *
    - *	Find roots of a polynomial
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * typedef struct
    - *	{
    - *	double r;
    - *	double i;
    - *	}cmplx;
    - *
    - * double xcof[], cof[];
    - * int m;
    - * cmplx root[];
    - *
    - * polrt( xcof, cof, m, root )
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Iterative determination of the roots of a polynomial of
    - * degree m whose coefficient vector is xcof[].  The
    - * coefficients are arranged in ascending order; i.e., the
    - * coefficient of x**m is xcof[m].
    - *
    - * The array cof[] is working storage the same size as xcof[].
    - * root[] is the output array containing the complex roots.
    - *
    - *
    - * ACCURACY:
    - *
    - * Termination depends on evaluation of the polynomial at
    - * the trial values of the roots.  The values of multiple roots
    - * or of roots that are nearly equal may have poor relative
    - * accuracy after the first root in the neighborhood has been
    - * found.
    - *
    - */
    -
    -/*							polrt	*/
    -/* Complex roots of real polynomial */
    -/* number of coefficients is m + 1 ( i.e., m is degree of polynomial) */
    -
    -#include "mconf.h"
    -/*
    -typedef struct
    -	{
    -	double r;
    -	double i;
    -	}cmplx;
    -*/
    -
    -int polrt( xcof, cof, m, root )
    -double xcof[], cof[];
    -int m;
    -cmplx root[];
    -{
    -register double *p, *q;
    -int i, j, nsav, n, n1, n2, nroot, iter, retry;
    -int final;
    -double mag, cofj;
    -cmplx x0, x, xsav, dx, t, t1, u, ud;
    -
    -final = 0;
    -n = m;
    -if( n <= 0 )
    -	return(1);
    -if( n > 36 )
    -	return(2);
    -if( xcof[m] == 0.0 )
    -	return(4);
    -
    -n1 = n;
    -n2 = n;
    -nroot = 0;
    -nsav = n;
    -q = &xcof[0];
    -p = &cof[n];
    -for( j=0; j<=nsav; j++ )
    -	*p-- = *q++;	/*	cof[ n-j ] = xcof[j];*/
    -
    -nxtrut:
    -x0.r = 0.00500101;
    -x0.i = 0.01000101;
    -retry = 0;
    -
    -tryagn:
    -retry += 1;
    -x.r = x0.r;
    -
    -x0.r = -10.0 * x0.i;
    -x0.i = -10.0 * x.r;
    -
    -x.r = x0.r;
    -x.i = x0.i;
    -
    -finitr:
    -iter = 0;
    -
    -while( iter < 500 )
    -{
    -u.r = cof[n];
    -if( u.r == 0.0 )
    -	{		/* this root is zero */
    -	x.r = 0;
    -	n1 -= 1;
    -	n2 -= 1;
    -	goto zerrut;
    -	}
    -u.i = 0;
    -ud.r = 0;
    -ud.i = 0;
    -t.r = 1.0;
    -t.i = 0;
    -p = &cof[n-1];
    -for( i=0; i= 1.0e-5 )
    -	{
    -	cofj = x.r + x.r;
    -	mag = x.r * x.r  +  x.i * x.i;
    -	n -= 2;
    -	}
    -else
    -	{		/* root is real */
    -zerrut:
    -	x.i = 0;
    -	cofj = x.r;
    -	mag = 0;
    -	n -= 1;
    -	}
    -/* divide working polynomial cof(z) by z - x */
    -p = &cof[1];
    -*p += cofj * *(p-1);
    -for( j=1; j 0 )
    -	goto nxtrut;
    -return(0);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/polyn.c b/scipy-0.10.1/scipy/special/cephes/polyn.c
    deleted file mode 100644
    index e13df4cc62..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/polyn.c
    +++ /dev/null
    @@ -1,455 +0,0 @@
    -/*							polyn.c
    - *							polyr.c
    - * Arithmetic operations on polynomials
    - *
    - * In the following descriptions a, b, c are polynomials of degree
    - * na, nb, nc respectively.  The degree of a polynomial cannot
    - * exceed a run-time value MAXPOL.  An operation that attempts
    - * to use or generate a polynomial of higher degree may produce a
    - * result that suffers truncation at degree MAXPOL.  The value of
    - * MAXPOL is set by calling the function
    - *
    - *     polini( maxpol );
    - *
    - * where maxpol is the desired maximum degree.  This must be
    - * done prior to calling any of the other functions in this module.
    - * Memory for internal temporary polynomial storage is allocated
    - * by polini().
    - *
    - * Each polynomial is represented by an array containing its
    - * coefficients, together with a separately declared integer equal
    - * to the degree of the polynomial.  The coefficients appear in
    - * ascending order; that is,
    - *
    - *                                        2                      na
    - * a(x)  =  a[0]  +  a[1] * x  +  a[2] * x   +  ...  +  a[na] * x  .
    - *
    - *
    - *
    - * sum = poleva( a, na, x );	Evaluate polynomial a(t) at t = x.
    - * polprt( a, na, D );		Print the coefficients of a to D digits.
    - * polclr( a, na );		Set a identically equal to zero, up to a[na].
    - * polmov( a, na, b );		Set b = a.
    - * poladd( a, na, b, nb, c );	c = b + a, nc = max(na,nb)
    - * polsub( a, na, b, nb, c );	c = b - a, nc = max(na,nb)
    - * polmul( a, na, b, nb, c );	c = b * a, nc = na+nb
    - *
    - *
    - * Division:
    - *
    - * i = poldiv( a, na, b, nb, c );	c = b / a, nc = MAXPOL
    - *
    - * returns i = the degree of the first nonzero coefficient of a.
    - * The computed quotient c must be divided by x^i.  An error message
    - * is printed if a is identically zero.
    - *
    - *
    - * Change of variables:
    - * If a and b are polynomials, and t = a(x), then
    - *     c(t) = b(a(x))
    - * is a polynomial found by substituting a(x) for t.  The
    - * subroutine call for this is
    - *
    - * polsbt( a, na, b, nb, c );
    - *
    - *
    - * Notes:
    - * poldiv() is an integer routine; poleva() is double.
    - * Any of the arguments a, b, c may refer to the same array.
    - *
    - */
    -
    -#include 
    -#include 
    -#include "mconf.h"
    -
    -/* near pointer version of malloc() */
    -/*
    -#define malloc _nmalloc
    -#define free _nfree
    -*/
    -
    -/* Pointers to internal arrays.  Note poldiv() allocates
    - * and deallocates some temporary arrays every time it is called.
    - */
    -static double *pt1 = 0;
    -static double *pt2 = 0;
    -static double *pt3 = 0;
    -
    -/* Maximum degree of polynomial. */
    -int MAXPOL = 0;
    -extern int MAXPOL;
    -
    -/* Number of bytes (chars) in maximum size polynomial. */
    -static int psize = 0;
    -
    -
    -/* Initialize max degree of polynomials
    - * and allocate temporary storage.
    - */
    -void polini( maxdeg )
    -int maxdeg;
    -{
    -
    -MAXPOL = maxdeg;
    -psize = (maxdeg + 1) * sizeof(double);
    -
    -/* Release previously allocated memory, if any. */
    -if( pt3 )
    -	free(pt3);
    -if( pt2 )
    -	free(pt2);
    -if( pt1 )
    -	free(pt1);
    -
    -/* Allocate new arrays */
    -pt1 = (double * )malloc(psize); /* used by polsbt */
    -pt2 = (double * )malloc(psize); /* used by polsbt */
    -pt3 = (double * )malloc(psize); /* used by polmul */
    -
    -/* Report if failure */
    -if( (pt1 == NULL) || (pt2 == NULL) || (pt3 == NULL) )
    -	{
    -	mtherr( "polini", ERANGE );
    -	exit(1);
    -	}
    -}
    -
    -
    -
    -/* Print the coefficients of a, with d decimal precision.
    - */
    -static char *form = "abcdefghijk";
    -
    -void polprt( a, na, d )
    -double a[];
    -int na, d;
    -{
    -int i, j, d1;
    -char *p;
    -
    -/* Create format descriptor string for the printout.
    - * Do this partly by hand, since sprintf() may be too
    - * bug-ridden to accomplish this feat by itself.
    - */
    -p = form;
    -*p++ = '%';
    -d1 = d + 8;
    -sprintf( p, "%d ", d1 );
    -p += 1;
    -if( d1 >= 10 )
    -	p += 1;
    -*p++ = '.';
    -sprintf( p, "%d ", d );
    -p += 1;
    -if( d >= 10 )
    -	p += 1;
    -*p++ = 'e';
    -*p++ = ' ';
    -*p++ = '\0';
    -
    -
    -/* Now do the printing.
    - */
    -d1 += 1;
    -j = 0;
    -for( i=0; i<=na; i++ )
    -	{
    -/* Detect end of available line */
    -	j += d1;
    -	if( j >= 78 )
    -		{
    -		printf( "\n" );
    -		j = d1;
    -		}
    -	printf( form, a[i] );
    -	}
    -printf( "\n" );
    -}
    -
    -
    -
    -/* Set a = 0.
    - */
    -void polclr( a, n )
    -register double *a;
    -int n;
    -{
    -int i;
    -
    -if( n > MAXPOL )
    -	n = MAXPOL;
    -for( i=0; i<=n; i++ )
    -	*a++ = 0.0;
    -}
    -
    -
    -
    -/* Set b = a.
    - */
    -void polmov( a, na, b )
    -register double *a, *b;
    -int na;
    -{
    -int i;
    -
    -if( na > MAXPOL )
    -	na = MAXPOL;
    -
    -for( i=0; i<= na; i++ )
    -	{
    -	*b++ = *a++;
    -	}
    -}
    -
    -
    -/* c = b * a.
    - */
    -void polmul( a, na, b, nb, c )
    -double a[], b[], c[];
    -int na, nb;
    -{
    -int i, j, k, nc;
    -double x;
    -
    -nc = na + nb;
    -polclr( pt3, MAXPOL );
    -
    -for( i=0; i<=na; i++ )
    -	{
    -	x = a[i];
    -	for( j=0; j<=nb; j++ )
    -		{
    -		k = i + j;
    -		if( k > MAXPOL )
    -			break;
    -		pt3[k] += x * b[j];
    -		}
    -	}
    -
    -if( nc > MAXPOL )
    -	nc = MAXPOL;
    -for( i=0; i<=nc; i++ )
    -	c[i] = pt3[i];
    -}
    -
    -
    -
    - 
    -/* c = b + a.
    - */
    -void poladd( a, na, b, nb, c )
    -double a[], b[], c[];
    -int na, nb;
    -{
    -int i, n;
    -
    -
    -if( na > nb )
    -	n = na;
    -else
    -	n = nb;
    -
    -if( n > MAXPOL )
    -	n = MAXPOL;
    -
    -for( i=0; i<=n; i++ )
    -	{
    -	if( i > na )
    -		c[i] = b[i];
    -	else if( i > nb )
    -		c[i] = a[i];
    -	else
    -		c[i] = b[i] + a[i];
    -	}
    -}
    -
    -/* c = b - a.
    - */
    -void polsub( a, na, b, nb, c )
    -double a[], b[], c[];
    -int na, nb;
    -{
    -int i, n;
    -
    -
    -if( na > nb )
    -	n = na;
    -else
    -	n = nb;
    -
    -if( n > MAXPOL )
    -	n = MAXPOL;
    -
    -for( i=0; i<=n; i++ )
    -	{
    -	if( i > na )
    -		c[i] = b[i];
    -	else if( i > nb )
    -		c[i] = -a[i];
    -	else
    -		c[i] = b[i] - a[i];
    -	}
    -}
    -
    -
    -
    -/* c = b/a
    - */
    -int poldiv( a, na, b, nb, c )
    -double a[], b[], c[];
    -int na, nb;
    -{
    -double quot;
    -double *ta, *tb, *tq;
    -int i, j, k, sing;
    -
    -sing = 0;
    -
    -/* Allocate temporary arrays.  This would be quicker
    - * if done automatically on the stack, but stack space
    - * may be hard to obtain on a small computer.
    - */
    -ta = (double * )malloc( psize );
    -polclr( ta, MAXPOL );
    -polmov( a, na, ta );
    -
    -tb = (double * )malloc( psize );
    -polclr( tb, MAXPOL );
    -polmov( b, nb, tb );
    -
    -tq = (double * )malloc( psize );
    -polclr( tq, MAXPOL );
    -
    -/* What to do if leading (constant) coefficient
    - * of denominator is zero.
    - */
    -if( a[0] == 0.0 )
    -	{
    -	for( i=0; i<=na; i++ )
    -		{
    -		if( ta[i] != 0.0 )
    -			goto nzero;
    -		}
    -	mtherr( "poldiv", SING );
    -	goto done;
    -
    -nzero:
    -/* Reduce the degree of the denominator. */
    -	for( i=0; i MAXPOL )
    -			break;
    -		tb[k] -= quot * ta[j];
    -		}
    -	tq[i] = quot;
    -	}
    -/* Send quotient to output array. */
    -polmov( tq, MAXPOL, c );
    -
    -done:
    -
    -/* Restore allocated memory. */
    -free(tq);
    -free(tb);
    -free(ta);
    -return( sing );
    -}
    -
    -
    -
    -
    -/* Change of variables
    - * Substitute a(y) for the variable x in b(x).
    - * x = a(y)
    - * c(x) = b(x) = b(a(y)).
    - */
    -
    -void polsbt( a, na, b, nb, c )
    -double a[], b[], c[];
    -int na, nb;
    -{
    -int i, j, k, n2;
    -double x;
    -
    -/* 0th degree term:
    - */
    -polclr( pt1, MAXPOL );
    -pt1[0] = b[0];
    -
    -polclr( pt2, MAXPOL );
    -pt2[0] = 1.0;
    -n2 = 0;
    -
    -for( i=1; i<=nb; i++ )
    -	{
    -/* Form ith power of a. */
    -	polmul( a, na, pt2, n2, pt2 );
    -	n2 += na;
    -	x = b[i];
    -/* Add the ith coefficient of b times the ith power of a. */
    -	for( j=0; j<=n2; j++ )
    -		{
    -		if( j > MAXPOL )
    -			break;
    -		pt1[j] += x * pt2[j];
    -		}
    -	}
    -
    -k = n2 + nb;
    -if( k > MAXPOL )
    -	k = MAXPOL;
    -for( i=0; i<=k; i++ )
    -	c[i] = pt1[i];
    -}
    -
    -
    -
    -
    -/* Evaluate polynomial a(t) at t = x.
    - */
    -double poleva( a, na, x )
    -double a[];
    -int na;
    -double x;
    -{
    -double s;
    -int i;
    -
    -s = a[na];
    -for( i=na-1; i>=0; i-- )
    -	{
    -	s = s * x + a[i];
    -	}
    -return(s);
    -}
    -
    diff --git a/scipy-0.10.1/scipy/special/cephes/powi.c b/scipy-0.10.1/scipy/special/cephes/powi.c
    deleted file mode 100644
    index 622740b129..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/powi.c
    +++ /dev/null
    @@ -1,178 +0,0 @@
    -/*							powi.c
    - *
    - *	Real raised to integer power
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, powi();
    - * int n;
    - *
    - * y = powi( x, n );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns argument x raised to the nth power.
    - * The routine efficiently decomposes n as a sum of powers of
    - * two. The desired power is a product of two-to-the-kth
    - * powers of x.  Thus to compute the 32767 power of x requires
    - * 28 multiplications instead of 32767 multiplications.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *
    - *                      Relative error:
    - * arithmetic   x domain   n domain  # trials      peak         rms
    - *    DEC       .04,26     -26,26    100000       2.7e-16     4.3e-17
    - *    IEEE      .04,26     -26,26     50000       2.0e-15     3.8e-16
    - *    IEEE        1,2    -1022,1023   50000       8.6e-14     1.6e-14
    - *
    - * Returns MAXNUM on overflow, zero on underflow.
    - *
    - */
    -
    -/*							powi.c	*/
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -extern double NEGZERO, MAXNUM, MAXLOG, MINLOG, LOGE2;
    -
    -double powi( x, nn )
    -double x;
    -int nn;
    -{
    -int n, e, sign, asign, lx;
    -double w, y, s;
    -
    -/* See pow.c for these tests.  */
    -if( x == 0.0 )
    -	{
    -	if( nn == 0 )
    -		return( 1.0 );
    -	else if( nn < 0 )
    -	    return( NPY_INFINITY );
    -	else
    -	  {
    -	    if( nn & 1 )
    -	      return( x );
    -	    else
    -	      return( 0.0 );
    -	  }
    -	}
    -
    -if( nn == 0 )
    -	return( 1.0 );
    -
    -if( nn == -1 )
    -	return( 1.0/x );
    -
    -if( x < 0.0 )
    -	{
    -	asign = -1;
    -	x = -x;
    -	}
    -else
    -	asign = 0;
    -
    -
    -if( nn < 0 )
    -	{
    -	sign = -1;
    -	n = -nn;
    -	}
    -else
    -	{
    -	sign = 1;
    -	n = nn;
    -	}
    -
    -/* Even power will be positive. */
    -if( (n & 1) == 0 )
    -	asign = 0;
    -
    -/* Overflow detection */
    -
    -/* Calculate approximate logarithm of answer */
    -s = frexp( x, &lx );
    -e = (lx - 1)*n;
    -if( (e == 0) || (e > 64) || (e < -64) )
    -	{
    -	s = (s - 7.0710678118654752e-1) / (s +  7.0710678118654752e-1);
    -	s = (2.9142135623730950 * s - 0.5 + lx) * nn * LOGE2;
    -	}
    -else
    -	{
    -	s = LOGE2 * e;
    -	}
    -
    -if( s > MAXLOG )
    -	{
    -	mtherr( "powi", OVERFLOW );
    -	y = NPY_INFINITY;
    -	goto done;
    -	}
    -
    -#if DENORMAL
    -if( s < MINLOG )
    -	{
    -	y = 0.0;
    -	goto done;
    -	}
    -
    -/* Handle tiny denormal answer, but with less accuracy
    - * since roundoff error in 1.0/x will be amplified.
    - * The precise demarcation should be the gradual underflow threshold.
    - */
    -if( (s < (-MAXLOG+2.0)) && (sign < 0) )
    -	{
    -	x = 1.0/x;
    -	sign = -sign;
    -	}
    -#else
    -/* do not produce denormal answer */
    -if( s < -MAXLOG )
    -	return(0.0);
    -#endif
    -
    -
    -/* First bit of the power */
    -if( n & 1 )
    -	y = x;
    -		
    -else
    -	y = 1.0;
    -
    -w = x;
    -n >>= 1;
    -while( n )
    -	{
    -	w = w * w;	/* arg to the 2-to-the-kth power */
    -	if( n & 1 )	/* if that bit is set, then include in product */
    -		y *= w;
    -	n >>= 1;
    -	}
    -
    -if( sign < 0 )
    -	y = 1.0/y;
    -
    -done:
    -
    -if( asign )
    -	{
    -	/* odd power of negative number */
    -	if( y == 0.0 )
    -		y = NEGZERO;
    -	else
    -		y = -y;
    -	}
    -return(y);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/protos.h b/scipy-0.10.1/scipy/special/cephes/protos.h
    deleted file mode 100644
    index 2fb3b86906..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/protos.h
    +++ /dev/null
    @@ -1,190 +0,0 @@
    -#ifndef __SCIPY_SPECIAL_CEPHES
    -#define __SCIPY_SPECIAL_CEPHES
    -
    -/* Complex numeral.  */
    -typedef struct
    -	{
    -	double r;
    -	double i;
    -	} cmplx;
    -
    -extern double acosh ( double x );
    -extern int airy ( double x, double *ai, double *aip, double *bi, double *bip );
    -extern double asin ( double x );
    -extern double acos ( double x );
    -extern double asinh ( double x );
    -extern double atan ( double x );
    -extern double atan2 ( double y, double x );
    -extern double atanh ( double x );
    -extern double bdtrc ( int k, int n, double p );
    -extern double bdtr ( int k, int n, double p );
    -extern double bdtri ( int k, int n, double y );
    -extern double beta ( double a, double b );
    -extern double lbeta ( double a, double b );
    -extern double btdtr ( double a, double b, double x );
    -extern double cbrt ( double x );
    -extern double chbevl ( double x, double P[], int n );
    -extern double chdtrc ( double df, double x );
    -extern double chdtr ( double df, double x );
    -extern double chdtri ( double df, double y );
    -/*
    -extern void clog ( cmplx *z, cmplx *w );
    -extern void cexp ( cmplx *z, cmplx *w );
    -extern void csin ( cmplx *z, cmplx *w );
    -extern void ccos ( cmplx *z, cmplx *w );
    -extern void ctan ( cmplx *z, cmplx *w );
    -extern void ccot ( cmplx *z, cmplx *w );
    -extern void casin ( cmplx *z, cmplx *w );
    -extern void cacos ( cmplx *z, cmplx *w );
    -extern void catan ( cmplx *z, cmplx *w );
    -extern void cadd ( cmplx *a, cmplx *b, cmplx *c );
    -extern void csub ( cmplx *a, cmplx *b, cmplx *c );
    -extern void cmul ( cmplx *a, cmplx *b, cmplx *c );
    -extern void cdiv ( cmplx *a, cmplx *b, cmplx *c );
    -extern void cmov ( void *a, void *b );
    -extern void cneg ( cmplx *a );
    -*/
    -/*extern double cabs ( cmplx *z );*/
    -/* extern void csqrt ( cmplx *z, cmplx *w );*/
    -extern double cosh ( double x );
    -extern double dawsn ( double xx );
    -extern void eigens ( double A[], double RR[], double E[], int N );
    -extern double ellie ( double phi, double m );
    -extern double ellik ( double phi, double m );
    -extern double ellpe ( double x );
    -extern int ellpj ( double u, double m, double *sn, double *cn, double *dn, double *ph );
    -extern double ellpk ( double x );
    -extern double exp ( double x );
    -extern double exp10 ( double x );
    -extern double exp1m ( double x );
    -extern double exp2 ( double x );
    -extern double expn ( int n, double x );
    -extern double fabs ( double x );
    -extern double fac ( int i );
    -extern double fdtrc ( double a, double b, double x );
    -extern double fdtr ( double a, double b, double x );
    -extern double fdtri ( double a, double b, double y );
    -/*
    -extern int fftr ( double x[], int m0, double sine[] );
    -*/
    -extern int fresnl ( double xxa, double *ssa, double *cca );
    -extern double Gamma ( double x );
    -extern double lgam ( double x );
    -extern double gdtr ( double a, double b, double x );
    -extern double gdtrc ( double a, double b, double x );
    -extern int gels ( double A[], double R[], int M, double EPS, double AUX[] );
    -extern double hyp2f1 ( double a, double b, double c, double x );
    -extern double hyperg ( double a, double b, double x );
    -extern double hyp2f0 ( double a, double b, double x, int type, double *err );
    -extern double i0 ( double x );
    -extern double i0e ( double x );
    -extern double i1 ( double x );
    -extern double i1e ( double x );
    -extern double igamc ( double a, double x );
    -extern double igam ( double a, double x );
    -extern double igami ( double a, double y0 );
    -extern double incbet ( double aa, double bb, double xx );
    -extern double incbi ( double aa, double bb, double yy0 );
    -extern double iv ( double v, double x );
    -extern double j0 ( double x );
    -extern double y0 ( double x );
    -extern double j1 ( double x );
    -extern double y1 ( double x );
    -extern double jn ( int n, double x );
    -extern double jv ( double n, double x );
    -extern double k0 ( double x );
    -extern double k0e ( double x );
    -extern double k1 ( double x );
    -extern double k1e ( double x );
    -extern double kn ( int nn, double x );
    -/*
    -extern int levnsn ( int n, double r[], double a[], double e[], double refl[] );
    -*/
    -extern double log ( double x );
    -extern double log10 ( double x );
    -/*
    -extern double log2 ( double x );
    -*/
    -extern long lrand ( void );
    -extern long lsqrt ( long x );
    -extern int minv ( double A[], double X[], int n, double B[], int IPS[] );
    -extern void mmmpy ( int r, int c, double *A, double *B, double *Y );
    -extern int mtherr ( char *name, int code );
    -extern double polevl ( double x, double *P, int N );
    -extern double p1evl ( double x, double *P, int N );
    -extern void mtransp ( int n, double *A, double *T );
    -extern void mvmpy ( int r, int c, double *A, double *V, double *Y );
    -extern double nbdtrc ( int k, int n, double p );
    -extern double nbdtr ( int k, int n, double p );
    -extern double nbdtri ( int k, int n, double p );
    -extern double ndtr ( double a );
    -extern double erfc ( double a );
    -extern double erf ( double x );
    -extern double ndtri ( double y0 );
    -extern double pdtrc ( int k, double m );
    -extern double pdtr ( int k, double m );
    -extern double pdtri ( int k, double y );
    -extern double pow ( double x, double y );
    -extern double powi ( double x, int nn );
    -extern double psi ( double x );
    -extern void revers ( double y[], double x[], int n );
    -extern double rgamma ( double x );
    -extern double round ( double x );
    -extern int sprec ( void );
    -extern int dprec ( void );
    -extern int ldprec ( void );
    -extern int shichi ( double x, double *si, double *ci );
    -extern int sici ( double x, double *si, double *ci );
    -extern double simpsn ( double f[], double delta );
    -extern int simq ( double A[], double B[], double X[], int n, int flag, int IPS[] );
    -extern double sin ( double x );
    -extern double cos ( double x );
    -extern double radian ( double d, double m, double s );
    -/*
    -extern void sincos ( double x, double *s, double *c, int flg );
    -*/
    -extern double sindg ( double x );
    -extern double cosdg ( double x );
    -extern double sinh ( double x );
    -extern double spence ( double x );
    -extern double sqrt ( double x );
    -extern double stdtr ( int k, double t );
    -extern double stdtri ( int k, double p );
    -extern double onef2 ( double a, double b, double c, double x, double *err );
    -extern double threef0 ( double a, double b, double c, double x, double *err );
    -extern double struve ( double v, double x );
    -extern double tan ( double x );
    -extern double cot ( double x );
    -extern double tandg ( double x );
    -extern double cotdg ( double x );
    -extern double tanh ( double x );
    -extern double log1p ( double x );
    -extern double expm1 ( double x );
    -extern double cosm1 ( double x );
    -extern double yn ( int n, double x );
    -extern double zeta ( double x, double q );
    -extern double zetac ( double x );
    -extern int drand ( double *a );
    -
    -/* polyn.c */
    -extern void polini ( int maxdeg );
    -extern void polprt ( double a[], int na, int d );
    -extern void polclr ( double *a, int n );
    -extern void polmov ( double *a, int na, double *b );
    -extern void polmul ( double a[], int na, double b[], int nb, double c[] );
    -extern void poladd ( double a[], int na, double b[], int nb, double c[] );
    -extern void polsub ( double a[], int na, double b[], int nb, double c[] );
    -extern int poldiv ( double a[], int na, double b[], int nb, double c[] );
    -extern void polsbt ( double a[], int na, double b[], int nb, double c[] );
    -extern double poleva ( double a[], int na, double x );
    -/* polmisc.c */
    -extern void polatn ( double num[], double den[], double ans[], int nn );
    -extern void polsqt ( double pol[], double ans[], int nn );
    -extern void polsin ( double x[], double y[], int nn );
    -extern void polcos ( double x[], double y[], int nn );
    -
    -/* polrt.c */
    -int polrt( double [], double [], int, cmplx []);
    -
    -double yv(double v, double x );
    -#endif
    diff --git a/scipy-0.10.1/scipy/special/cephes/psi.c b/scipy-0.10.1/scipy/special/cephes/psi.c
    deleted file mode 100644
    index 6596876fe5..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/psi.c
    +++ /dev/null
    @@ -1,193 +0,0 @@
    -/*							psi.c
    - *
    - *	Psi (digamma) function
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, psi();
    - *
    - * y = psi( x );
    - *
    - *
    - * DESCRIPTION:
    - *
    - *              d      -
    - *   psi(x)  =  -- ln | (x)
    - *              dx
    - *
    - * is the logarithmic derivative of the gamma function.
    - * For integer x,
    - *                   n-1
    - *                    -
    - * psi(n) = -EUL  +   >  1/k.
    - *                    -
    - *                   k=1
    - *
    - * This formula is used for 0 < n <= 10.  If x is negative, it
    - * is transformed to a positive argument by the reflection
    - * formula  psi(1-x) = psi(x) + pi cot(pi x).
    - * For general positive x, the argument is made greater than 10
    - * using the recurrence  psi(x+1) = psi(x) + 1/x.
    - * Then the following asymptotic expansion is applied:
    - *
    - *                           inf.   B
    - *                            -      2k
    - * psi(x) = log(x) - 1/2x -   >   -------
    - *                            -        2k
    - *                           k=1   2k x
    - *
    - * where the B2k are Bernoulli numbers.
    - *
    - * ACCURACY:
    - *    Relative error (except absolute when |psi| < 1):
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0,30         2500       1.7e-16     2.0e-17
    - *    IEEE      0,30        30000       1.3e-15     1.4e-16
    - *    IEEE      -30,0       40000       1.5e-15     2.2e-16
    - *
    - * ERROR MESSAGES:
    - *     message         condition      value returned
    - * psi singularity    x integer <=0      MAXNUM
    - */
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double A[] = {
    - 8.33333333333333333333E-2,
    --2.10927960927960927961E-2,
    - 7.57575757575757575758E-3,
    --4.16666666666666666667E-3,
    - 3.96825396825396825397E-3,
    --8.33333333333333333333E-3,
    - 8.33333333333333333333E-2
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short A[] = {
    -0037252,0125252,0125252,0125253,
    -0136654,0145314,0126312,0146255,
    -0036370,0037017,0101740,0174076,
    -0136210,0104210,0104210,0104211,
    -0036202,0004040,0101010,0020202,
    -0136410,0104210,0104210,0104211,
    -0037252,0125252,0125252,0125253
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short A[] = {
    -0x5555,0x5555,0x5555,0x3fb5,
    -0x5996,0x9599,0x9959,0xbf95,
    -0x1f08,0xf07c,0x07c1,0x3f7f,
    -0x1111,0x1111,0x1111,0xbf71,
    -0x0410,0x1041,0x4104,0x3f70,
    -0x1111,0x1111,0x1111,0xbf81,
    -0x5555,0x5555,0x5555,0x3fb5
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short A[] = {
    -0x3fb5,0x5555,0x5555,0x5555,
    -0xbf95,0x9959,0x9599,0x5996,
    -0x3f7f,0x07c1,0xf07c,0x1f08,
    -0xbf71,0x1111,0x1111,0x1111,
    -0x3f70,0x4104,0x1041,0x0410,
    -0xbf81,0x1111,0x1111,0x1111,
    -0x3fb5,0x5555,0x5555,0x5555
    -};
    -#endif
    -
    -#define EUL 0.57721566490153286061
    -
    -extern double PI, MAXNUM;
    -
    -
    -double psi(x)
    -double x;
    -{
    -double p, q, nz, s, w, y, z;
    -int i, n, negative;
    -
    -negative = 0;
    -nz = 0.0;
    -
    -if( x <= 0.0 )
    -	{
    -	negative = 1;
    -	q = x;
    -	p = floor(q);
    -	if( p == q )
    -		{
    -		mtherr( "psi", SING );
    -		return( MAXNUM );
    -		}
    -/* Remove the zeros of tan(PI x)
    - * by subtracting the nearest integer from x
    - */
    -	nz = q - p;
    -	if( nz != 0.5 )
    -		{
    -		if( nz > 0.5 )
    -			{
    -			p += 1.0;
    -			nz = q - p;
    -			}
    -		nz = PI/tan(PI*nz);
    -		}
    -	else
    -		{
    -		nz = 0.0;
    -		}
    -	x = 1.0 - x;
    -	}
    -
    -/* check for positive integer up to 10 */
    -if( (x <= 10.0) && (x == floor(x)) )
    -	{
    -	y = 0.0;
    -	n = x;
    -	for( i=1; i 34.84425627277176174)
    -	{
    -	mtherr( name, UNDERFLOW );
    -	return(1.0/MAXNUM);
    -	}
    -if( x < -34.034 )
    -	{
    -	w = -x;
    -	z = sin( PI*w );
    -	if( z == 0.0 )
    -		return(0.0);
    -	if( z < 0.0 )
    -		{
    -		sign = 1;
    -		z = -z;
    -		}
    -	else
    -		sign = -1;
    -
    -	y = log( w * z ) - log(PI) + lgam(w);
    -	if( y < -MAXLOG )
    -		{
    -		mtherr( name, UNDERFLOW );
    -		return( sign * 1.0 / MAXNUM );
    -		}
    -	if( y > MAXLOG )
    -		{
    -		mtherr( name, OVERFLOW );
    -		return( sign * MAXNUM );
    -		}
    -	return( sign * exp(y));
    -	}
    -z = 1.0;
    -w = x;
    -
    -while( w > 1.0 )	/* Downward recurrence */
    -	{
    -	w -= 1.0;
    -	z *= w;
    -	}
    -while( w < 0.0 )	/* Upward recurrence */
    -	{
    -	z /= w;
    -	w += 1.0;
    -	}
    -if( w == 0.0 )		/* Nonpositive integer */
    -	return(0.0);
    -if( w == 1.0 )		/* Other integer */
    -	return( 1.0/z );
    -
    -y = w * ( 1.0 + chbevl( 4.0*w-2.0, R, 16 ) ) / z;
    -return(y);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/round.c b/scipy-0.10.1/scipy/special/cephes/round.c
    deleted file mode 100644
    index a845a66f51..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/round.c
    +++ /dev/null
    @@ -1,65 +0,0 @@
    -/*							round.c
    - *
    - *	Round double to nearest or even integer valued double
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, round();
    - *
    - * y = round(x);
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the nearest integer to x as a double precision
    - * floating point result.  If x ends in 0.5 exactly, the
    - * nearest even integer is chosen.
    - * 
    - *
    - *
    - * ACCURACY:
    - *
    - * If x is greater than 1/(2*MACHEP), its closest machine
    - * representation is already an integer, so rounding does
    - * not change it.
    - */
    -
    -/*
    -Cephes Math Library Release 2.1:  January, 1989
    -Copyright 1984, 1987, 1989 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -
    -double round(double x)
    -{
    -double y, r;
    -
    -/* Largest integer <= x */
    -y = floor(x);
    -
    -/* Fractional part */
    -r = x - y;
    -
    -/* Round up to nearest. */
    -if( r > 0.5 )
    -	goto rndup;
    -
    -/* Round to even */
    -if( r == 0.5 )
    -	{
    -	r = y - 2.0 * floor( 0.5 * y );
    -	if( r == 1.0 )
    -		{
    -rndup:
    -		y += 1.0;
    -		}
    -	}
    -
    -/* Else round down. */
    -return(y);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/scipy_iv.c b/scipy-0.10.1/scipy/special/cephes/scipy_iv.c
    deleted file mode 100644
    index 53659c6e7e..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/scipy_iv.c
    +++ /dev/null
    @@ -1,642 +0,0 @@
    -/*							iv.c
    - *
    - *	Modified Bessel function of noninteger order
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double v, x, y, iv();
    - *
    - * y = iv( v, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns modified Bessel function of order v of the
    - * argument.  If x is negative, v must be integer valued.
    - *
    - */
    -/*							iv.c	*/
    -/*	Modified Bessel function of noninteger order		*/
    -/* If x < 0, then v must be an integer. */
    -
    -
    -/*
    - * Parts of the code are copyright:
    - *
    - *     Cephes Math Library Release 2.8:  June, 2000
    - *     Copyright 1984, 1987, 1988, 2000 by Stephen L. Moshier
    - *
    - * And other parts:
    - *
    - *     Copyright (c) 2006 Xiaogang Zhang
    - *     Use, modification and distribution are subject to the
    - *     Boost Software License, Version 1.0.
    - *
    - *     Boost Software License - Version 1.0 - August 17th, 2003
    - * 
    - *     Permission is hereby granted, free of charge, to any person or
    - *     organization obtaining a copy of the software and accompanying
    - *     documentation covered by this license (the "Software") to use, reproduce,
    - *     display, distribute, execute, and transmit the Software, and to prepare
    - *     derivative works of the Software, and to permit third-parties to whom the
    - *     Software is furnished to do so, all subject to the following:
    - *     
    - *     The copyright notices in the Software and this entire statement,
    - *     including the above license grant, this restriction and the following
    - *     disclaimer, must be included in all copies of the Software, in whole or
    - *     in part, and all derivative works of the Software, unless such copies or
    - *     derivative works are solely in the form of machine-executable object code
    - *     generated by a source language processor.
    - *     
    - *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
    - *     OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    - *     MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
    - *     NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
    - *     DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
    - *     WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
    - *     CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    - *     SOFTWARE.
    - *
    - * And the rest are:
    - *
    - *     Copyright (C) 2009 Pauli Virtanen
    - *     Distributed under the same license as Scipy.
    - *
    - */
    -
    -#include 
    -#include "mconf.h"
    -extern double MACHEP, MAXNUM, PI, EULER;
    -
    -static double iv_asymptotic(double v, double x);
    -void ikv_asymptotic_uniform(double v, double x, double *Iv, double *Kv);
    -void ikv_temme(double v, double x, double *Iv, double *Kv);
    -
    -double iv(double v, double x)
    -{
    -    int sign;
    -    double t, vp, ax, res;
    -
    -    /* If v is a negative integer, invoke symmetry */
    -    t = floor(v);
    -    if (v < 0.0) {
    -	if (t == v) {
    -	    v = -v;		/* symmetry */
    -	    t = -t;
    -	}
    -    }
    -    /* If x is negative, require v to be an integer */
    -    sign = 1;
    -    if (x < 0.0) {
    -	if (t != v) {
    -	    mtherr("iv", DOMAIN);
    -	    return (NPY_NAN);
    -	}
    -	if (v != 2.0 * floor(v / 2.0)) {
    -	    sign = -1;
    -        }
    -    }
    -
    -    /* Avoid logarithm singularity */
    -    if (x == 0.0) {
    -	if (v == 0.0) {
    -	    return 1.0;
    -        }
    -	if (v < 0.0) {
    -	    mtherr("iv", OVERFLOW);
    -	    return MAXNUM;
    -	} else
    -	    return 0.0;
    -    }
    -
    -    ax = fabs(x);
    -    if (fabs(v) > 50) {
    -        /*
    -         * Uniform asymptotic expansion for large orders.
    -         *
    -         * This appears to overflow slightly later than the Boost
    -         * implementation of Temme's method.
    -         */
    -        ikv_asymptotic_uniform(v, ax, &res, NULL);
    -    }
    -    else {
    -        /* Otherwise: Temme's method */
    -        ikv_temme(v, ax, &res, NULL);
    -    }
    -    res *= sign;
    -    return res;
    -}
    -
    -
    -/*
    - * Compute Iv from (AMS5 9.7.1), asymptotic expansion for large |z|
    - * Iv ~ exp(x)/sqrt(2 pi x) ( 1 + (4*v*v-1)/8x + (4*v*v-1)(4*v*v-9)/8x/2! + ...)
    - */
    -static double iv_asymptotic(double v, double x)
    -{
    -    double mu, mup;
    -    double sum, term, prefactor, factor;
    -    int k;
    -
    -    prefactor = exp(x) / sqrt(2 * PI * x);
    -
    -    if (prefactor == NPY_INFINITY) {
    -	return prefactor;
    -    }
    -
    -    mu = 4 * v * v;
    -    sum = 1.0;
    -    term = 1.0;
    -    k = 1;
    -
    -    do {
    -	factor = (mu - (2 * k - 1) * (2 * k - 1)) / (8 * x) / k;
    -	if (k > 100) {
    -	    /* didn't converge */
    -	    mtherr("iv(iv_asymptotic)", TLOSS);
    -	    break;
    -	}
    -	term *= -factor;
    -	sum += term;
    -	++k;
    -    } while (fabs(term) > MACHEP * fabs(sum));
    -    return sum * prefactor;
    -}
    -
    -
    -/*
    - * Uniform asymptotic expansion factors, (AMS5 9.3.9; AMS5 9.3.10)
    - *
    - * Computed with:
    - * --------------------
    -   import numpy as np
    -   t = np.poly1d([1,0])
    -   def up1(p):
    -       return .5*t*t*(1-t*t)*p.deriv() + 1/8. * ((1-5*t*t)*p).integ()
    -   us = [np.poly1d([1])]
    -   for k in range(10):
    -       us.append(up1(us[-1]))
    -   n = us[-1].order
    -   for p in us:
    -       print "{" + ", ".join(["0"]*(n-p.order) + map(repr, p)) + "},"
    -   print "N_UFACTORS", len(us)
    -   print "N_UFACTOR_TERMS", us[-1].order + 1
    - * --------------------
    - */
    -#define N_UFACTORS 11
    -#define N_UFACTOR_TERMS 31
    -static const double asymptotic_ufactors[N_UFACTORS][N_UFACTOR_TERMS] = {
    -    {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    -     0, 0, 0, 0, 0, 0, 0, 1},
    -    {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    -     0, 0, 0, 0, -0.20833333333333334, 0.0, 0.125, 0.0},
    -    {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    -     0, 0.3342013888888889, 0.0, -0.40104166666666669, 0.0, 0.0703125, 0.0,
    -     0.0},
    -    {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    -     -1.0258125964506173, 0.0, 1.8464626736111112, 0.0,
    -     -0.89121093750000002, 0.0, 0.0732421875, 0.0, 0.0, 0.0},
    -    {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    -     4.6695844234262474, 0.0, -11.207002616222995, 0.0, 8.78912353515625,
    -     0.0, -2.3640869140624998, 0.0, 0.112152099609375, 0.0, 0.0, 0.0, 0.0},
    -    {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -28.212072558200244, 0.0,
    -     84.636217674600744, 0.0, -91.818241543240035, 0.0, 42.534998745388457,
    -     0.0, -7.3687943594796312, 0.0, 0.22710800170898438, 0.0, 0.0, 0.0,
    -     0.0, 0.0},
    -    {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 212.5701300392171, 0.0,
    -     -765.25246814118157, 0.0, 1059.9904525279999, 0.0,
    -     -699.57962737613275, 0.0, 218.19051174421159, 0.0,
    -     -26.491430486951554, 0.0, 0.57250142097473145, 0.0, 0.0, 0.0, 0.0,
    -     0.0, 0.0},
    -    {0, 0, 0, 0, 0, 0, 0, 0, 0, -1919.4576623184068, 0.0,
    -     8061.7221817373083, 0.0, -13586.550006434136, 0.0, 11655.393336864536,
    -     0.0, -5305.6469786134048, 0.0, 1200.9029132163525, 0.0,
    -     -108.09091978839464, 0.0, 1.7277275025844574, 0.0, 0.0, 0.0, 0.0, 0.0,
    -     0.0, 0.0},
    -    {0, 0, 0, 0, 0, 0, 20204.291330966149, 0.0, -96980.598388637503, 0.0,
    -     192547.0012325315, 0.0, -203400.17728041555, 0.0, 122200.46498301747,
    -     0.0, -41192.654968897557, 0.0, 7109.5143024893641, 0.0,
    -     -493.915304773088, 0.0, 6.074042001273483, 0.0, 0.0, 0.0, 0.0, 0.0,
    -     0.0, 0.0, 0.0},
    -    {0, 0, 0, -242919.18790055133, 0.0, 1311763.6146629769, 0.0,
    -     -2998015.9185381061, 0.0, 3763271.2976564039, 0.0,
    -     -2813563.2265865342, 0.0, 1268365.2733216248, 0.0,
    -     -331645.17248456361, 0.0, 45218.768981362737, 0.0,
    -     -2499.8304818112092, 0.0, 24.380529699556064, 0.0, 0.0, 0.0, 0.0, 0.0,
    -     0.0, 0.0, 0.0, 0.0},
    -    {3284469.8530720375, 0.0, -19706819.11843222, 0.0, 50952602.492664628,
    -     0.0, -74105148.211532637, 0.0, 66344512.274729028, 0.0,
    -     -37567176.660763353, 0.0, 13288767.166421819, 0.0,
    -     -2785618.1280864552, 0.0, 308186.40461266245, 0.0,
    -     -13886.089753717039, 0.0, 110.01714026924674, 0.0, 0.0, 0.0, 0.0, 0.0,
    -     0.0, 0.0, 0.0, 0.0, 0.0}
    -};
    -
    -
    -/*
    - * Compute Iv, Kv from (AMS5 9.7.7 + 9.7.8), asymptotic expansion for large v
    - */
    -void ikv_asymptotic_uniform(double v, double x,
    -                            double *i_value, double *k_value)
    -{
    -    double i_prefactor, k_prefactor;
    -    double t, t2, eta, z;
    -    double i_sum, k_sum, term, divisor;
    -    int k, n;
    -    int sign = 1;
    -
    -    if (v < 0) {
    -	/* Negative v; compute I_{-v} and K_{-v} and use (AMS 9.6.2) */
    -	sign = -1;
    -	v = -v;
    -    }
    -
    -    z = x / v;
    -    t = 1 / sqrt(1 + z * z);
    -    t2 = t * t;
    -    eta = sqrt(1 + z * z) + log(z / (1 + 1 / t));
    -
    -    i_prefactor = sqrt(t / (2 * PI * v)) * exp(v * eta);
    -    i_sum = 1.0;
    -
    -    k_prefactor = sqrt(PI * t / (2 * v)) * exp(-v * eta);
    -    k_sum = 1.0;
    -
    -    divisor = v;
    -    for (n = 1; n < N_UFACTORS; ++n) {
    -	/*
    -         * Evaluate u_k(t) with Horner's scheme;
    -	 * (using the knowledge about which coefficients are zero)
    -	 */
    -	term = 0;
    -	for (k = N_UFACTOR_TERMS - 1 - 3 * n;
    -	     k < N_UFACTOR_TERMS - n; k += 2) {
    -	    term *= t2;
    -	    term += asymptotic_ufactors[n][k];
    -	}
    -	for (k = 1; k < n; k += 2) {
    -	    term *= t2;
    -        }
    -	if (n % 2 == 1) {
    -	    term *= t;
    -        }
    -
    -	/* Sum terms */
    -	term /= divisor;
    -	i_sum += term;
    -        k_sum += (n % 2 == 0) ? term : -term;
    -
    -	/* Check convergence */
    -	if (fabs(term) < MACHEP) {
    -	    break;
    -        }
    -
    -	divisor *= v;
    -    }
    -
    -    if (fabs(term) > 1e-3*fabs(i_sum)) {
    -	/* Didn't converge */
    -	mtherr("ikv_asymptotic_uniform", TLOSS);
    -    }
    -    if (fabs(term) > MACHEP*fabs(i_sum)) {
    -	/* Some precision lost */
    -	mtherr("ikv_asymptotic_uniform", PLOSS);
    -    }
    -
    -    if (k_value != NULL) {
    -        /* symmetric in v */
    -        *k_value = k_prefactor * k_sum;
    -    }
    -
    -    if (i_value != NULL) {
    -        if (sign == 1) {
    -            *i_value = i_prefactor * i_sum;
    -        }
    -        else {
    -            /* (AMS 9.6.2) */
    -            *i_value = (i_prefactor * i_sum
    -                        + (2/PI) * sin(PI*v) * k_prefactor * k_sum);
    -        }
    -    }
    -}
    -
    -
    -/*
    - * The following code originates from the Boost C++ library,
    - * from file `boost/math/special_functions/detail/bessel_ik.hpp`,
    - * converted from C++ to C.
    - */
    -
    -#ifdef DEBUG
    -#define BOOST_ASSERT(a) assert(a)
    -#else
    -#define BOOST_ASSERT(a)
    -#endif
    -
    -/*
    - * Modified Bessel functions of the first and second kind of fractional order
    - *
    - * Calculate K(v, x) and K(v+1, x) by method analogous to
    - * Temme, Journal of Computational Physics, vol 21, 343 (1976)
    - */
    -static int temme_ik_series(double v, double x, double *K, double *K1)
    -{
    -    double f, h, p, q, coef, sum, sum1, tolerance;
    -    double a, b, c, d, sigma, gamma1, gamma2;
    -    unsigned long k;
    -    double gp;
    -    double gm;
    -
    -
    -    /*
    -     * |x| <= 2, Temme series converge rapidly
    -     * |x| > 2, the larger the |x|, the slower the convergence
    -     */
    -    BOOST_ASSERT(fabs(x) <= 2);
    -    BOOST_ASSERT(fabs(v) <= 0.5f);
    -
    -    gp = gamma(v + 1) - 1;
    -    gm = gamma(-v + 1) - 1;
    -
    -    a = log(x / 2);
    -    b = exp(v * a);
    -    sigma = -a * v;
    -    c = fabs(v) < MACHEP ? 1 : sin(PI * v) / (v * PI);
    -    d = fabs(sigma) < MACHEP ? 1 : sinh(sigma) / sigma;
    -    gamma1 = fabs(v) < MACHEP ? -EULER : (0.5f / v) * (gp - gm) * c;
    -    gamma2 = (2 + gp + gm) * c / 2;
    -
    -    /* initial values */
    -    p = (gp + 1) / (2 * b);
    -    q = (1 + gm) * b / 2;
    -    f = (cosh(sigma) * gamma1 + d * (-a) * gamma2) / c;
    -    h = p;
    -    coef = 1;
    -    sum = coef * f;
    -    sum1 = coef * h;
    -
    -    /* series summation */
    -    tolerance = MACHEP;
    -    for (k = 1; k < MAXITER; k++) {
    -	f = (k * f + p + q) / (k * k - v * v);
    -	p /= k - v;
    -	q /= k + v;
    -	h = p - k * f;
    -	coef *= x * x / (4 * k);
    -	sum += coef * f;
    -	sum1 += coef * h;
    -	if (fabs(coef * f) < fabs(sum) * tolerance) {
    -	    break;
    -	}
    -    }
    -    if (k == MAXITER) {
    -	mtherr("ikv_temme(temme_ik_series)", TLOSS);
    -    }
    -
    -    *K = sum;
    -    *K1 = 2 * sum1 / x;
    -
    -    return 0;
    -}
    -
    -/* Evaluate continued fraction fv = I_(v+1) / I_v, derived from
    - * Abramowitz and Stegun, Handbook of Mathematical Functions, 1972, 9.1.73 */
    -static int CF1_ik(double v, double x, double *fv)
    -{
    -    double C, D, f, a, b, delta, tiny, tolerance;
    -    unsigned long k;
    -
    -
    -    /*
    -     * |x| <= |v|, CF1_ik converges rapidly
    -     * |x| > |v|, CF1_ik needs O(|x|) iterations to converge
    -     */
    -
    -    /*
    -     * modified Lentz's method, see
    -     * Lentz, Applied Optics, vol 15, 668 (1976)
    -     */
    -    tolerance = 2 * MACHEP;
    -    tiny = 1 / sqrt(MAXNUM);
    -    C = f = tiny;		/* b0 = 0, replace with tiny */
    -    D = 0;
    -    for (k = 1; k < MAXITER; k++) {
    -	a = 1;
    -	b = 2 * (v + k) / x;
    -	C = b + a / C;
    -	D = b + a * D;
    -	if (C == 0) {
    -	    C = tiny;
    -	}
    -	if (D == 0) {
    -	    D = tiny;
    -	}
    -	D = 1 / D;
    -	delta = C * D;
    -	f *= delta;
    -	if (fabs(delta - 1) <= tolerance) {
    -	    break;
    -	}
    -    }
    -    if (k == MAXITER) {
    -	mtherr("ikv_temme(CF1_ik)", TLOSS);
    -    }
    -
    -    *fv = f;
    -
    -    return 0;
    -}
    -
    -/*
    - * Calculate K(v, x) and K(v+1, x) by evaluating continued fraction
    - * z1 / z0 = U(v+1.5, 2v+1, 2x) / U(v+0.5, 2v+1, 2x), see
    - * Thompson and Barnett, Computer Physics Communications, vol 47, 245 (1987)
    - */
    -static int CF2_ik(double v, double x, double *Kv, double *Kv1)
    -{
    -
    -    double S, C, Q, D, f, a, b, q, delta, tolerance, current, prev;
    -    unsigned long k;
    -
    -    /*
    -     * |x| >= |v|, CF2_ik converges rapidly
    -     * |x| -> 0, CF2_ik fails to converge
    -     */
    -
    -    BOOST_ASSERT(fabs(x) > 1);
    -
    -    /*
    -     * Steed's algorithm, see Thompson and Barnett,
    -     * Journal of Computational Physics, vol 64, 490 (1986)
    -     */
    -    tolerance = MACHEP;
    -    a = v * v - 0.25f;
    -    b = 2 * (x + 1);		/* b1 */
    -    D = 1 / b;			/* D1 = 1 / b1 */
    -    f = delta = D;		/* f1 = delta1 = D1, coincidence */
    -    prev = 0;			/* q0 */
    -    current = 1;		/* q1 */
    -    Q = C = -a;			/* Q1 = C1 because q1 = 1 */
    -    S = 1 + Q * delta;		/* S1 */
    -    for (k = 2; k < MAXITER; k++)	/* starting from 2 */
    -    {
    -	/* continued fraction f = z1 / z0 */
    -	a -= 2 * (k - 1);
    -	b += 2;
    -	D = 1 / (b + a * D);
    -	delta *= b * D - 1;
    -	f += delta;
    -
    -	/* series summation S = 1 + \sum_{n=1}^{\infty} C_n * z_n / z_0 */
    -	q = (prev - (b - 2) * current) / a;
    -	prev = current;
    -	current = q;		/* forward recurrence for q */
    -	C *= -a / k;
    -	Q += C * q;
    -	S += Q * delta;
    -
    -	/* S converges slower than f */
    -	if (fabs(Q * delta) < fabs(S) * tolerance) {
    -	    break;
    -	}
    -    }
    -    if (k == MAXITER) {
    -	mtherr("ikv_temme(CF2_ik)", TLOSS);
    -    }
    -
    -    *Kv = sqrt(PI / (2 * x)) * exp(-x) / S;
    -    *Kv1 = *Kv * (0.5f + v + x + (v * v - 0.25f) * f) / x;
    -
    -    return 0;
    -}
    -
    -/* Flags for what to compute */
    -enum {
    -    need_i = 0x1,
    -    need_k = 0x2
    -};
    -
    -/*
    - * Compute I(v, x) and K(v, x) simultaneously by Temme's method, see
    - * Temme, Journal of Computational Physics, vol 19, 324 (1975)
    - */
    -void ikv_temme(double v, double x, double *Iv_p, double *Kv_p)
    -{
    -    /* Kv1 = K_(v+1), fv = I_(v+1) / I_v */
    -    /* Ku1 = K_(u+1), fu = I_(u+1) / I_u */
    -    double u, Iv, Kv, Kv1, Ku, Ku1, fv;
    -    double W, current, prev, next;
    -    int reflect = 0;
    -    unsigned n, k;
    -    int kind;
    -
    -    kind = 0;
    -    if (Iv_p != NULL) {
    -	kind |= need_i;
    -    }
    -    if (Kv_p != NULL) {
    -	kind |= need_k;
    -    }
    -
    -    if (v < 0) {
    -	reflect = 1;
    -	v = -v;			/* v is non-negative from here */
    -	kind |= need_k;
    -    }
    -    n = round(v);
    -    u = v - n;			/* -1/2 <= u < 1/2 */
    -
    -    if (x < 0) {
    -        if (Iv_p != NULL) *Iv_p = NPY_NAN;
    -        if (Kv_p != NULL) *Kv_p = NPY_NAN;
    -	mtherr("ikv_temme", DOMAIN);
    -	return;
    -    }
    -    if (x == 0) {
    -	Iv = (v == 0) ? 1 : 0;
    -	if (kind & need_k) {
    -	    mtherr("ikv_temme", OVERFLOW);
    -	    Kv = NPY_INFINITY;
    -	}
    -        else {
    -	    Kv = NPY_NAN;		/* any value will do */
    -	}
    -
    -	if (reflect && (kind & need_i)) {
    -	    double z = (u + n % 2);
    -	    Iv = sin(PI * z) == 0 ? Iv : NPY_INFINITY;
    -	    if (Iv == NPY_INFINITY || Iv == -NPY_INFINITY) {
    -		mtherr("ikv_temme", OVERFLOW);
    -            }
    -	}
    -
    -	if (Iv_p != NULL) {
    -            *Iv_p = Iv;
    -        }
    -	if (Kv_p != NULL) {
    -            *Kv_p = Kv;
    -        }
    -	return;
    -    }
    -    /* x is positive until reflection */
    -    W = 1 / x;			/* Wronskian */
    -    if (x <= 2) {		/* x in (0, 2] */
    -	temme_ik_series(u, x, &Ku, &Ku1);	/* Temme series */
    -    }
    -    else {		                 	/* x in (2, \infty) */
    -	CF2_ik(u, x, &Ku, &Ku1);	/* continued fraction CF2_ik */
    -    }
    -    prev = Ku;
    -    current = Ku1;
    -    for (k = 1; k <= n; k++) {	/* forward recurrence for K */
    -	next = 2 * (u + k) * current / x + prev;
    -	prev = current;
    -	current = next;
    -    }
    -    Kv = prev;
    -    Kv1 = current;
    -    if (kind & need_i) {
    -	double lim = (4 * v * v + 10) / (8 * x);
    -	lim *= lim;
    -	lim *= lim;
    -	lim /= 24;
    -	if ((lim < MACHEP * 10) && (x > 100)) {
    -	    /*
    -             * x is huge compared to v, CF1 may be very slow 
    -	     * to converge so use asymptotic expansion for large
    -	     * x case instead.  Note that the asymptotic expansion
    -	     * isn't very accurate - so it's deliberately very hard
    -	     * to get here - probably we're going to overflow:
    -             */
    -	    Iv = iv_asymptotic(v, x);
    -	}
    -        else {
    -	    CF1_ik(v, x, &fv);	/* continued fraction CF1_ik */
    -	    Iv = W / (Kv * fv + Kv1);	/* Wronskian relation */
    -	}
    -    }
    -    else {
    -	Iv = NPY_NAN;		/* any value will do */
    -    }
    -
    -    if (reflect) {
    -	double z = (u + n % 2);
    -	if (Iv_p != NULL) {
    -            *Iv_p = Iv + (2 / PI) * sin(PI * z) * Kv;	/* reflection formula */
    -        }
    -	if (Kv_p != NULL) {
    -            *Kv_p = Kv;
    -        }
    -    } else {
    -	if (Iv_p != NULL) {
    -            *Iv_p = Iv;
    -        }
    -	if (Kv_p != NULL) {
    -            *Kv_p = Kv;
    -        }
    -    }
    -    return;
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/setprec.c b/scipy-0.10.1/scipy/special/cephes/setprec.c
    deleted file mode 100644
    index bee098cda5..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/setprec.c
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -/* Null stubs for coprocessor precision settings */
    -
    -int
    -sprec(void) {return 0; }
    -
    -int
    -dprec(void) {return 0; }
    -
    -int
    -ldprec(void) {return 0; }
    diff --git a/scipy-0.10.1/scipy/special/cephes/shichi.c b/scipy-0.10.1/scipy/special/cephes/shichi.c
    deleted file mode 100644
    index 05ed5752aa..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/shichi.c
    +++ /dev/null
    @@ -1,592 +0,0 @@
    -/*							shichi.c
    - *
    - *	Hyperbolic sine and cosine integrals
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, Chi, Shi, shichi();
    - *
    - * shichi( x, &Chi, &Shi );
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Approximates the integrals
    - *
    - *                            x
    - *                            -
    - *                           | |   cosh t - 1
    - *   Chi(x) = eul + ln x +   |    -----------  dt,
    - *                         | |          t
    - *                          -
    - *                          0
    - *
    - *               x
    - *               -
    - *              | |  sinh t
    - *   Shi(x) =   |    ------  dt
    - *            | |       t
    - *             -
    - *             0
    - *
    - * where eul = 0.57721566490153286061 is Euler's constant.
    - * The integrals are evaluated by power series for x < 8
    - * and by Chebyshev expansions for x between 8 and 88.
    - * For large x, both functions approach exp(x)/2x.
    - * Arguments greater than 88 in magnitude return MAXNUM.
    - *
    - *
    - * ACCURACY:
    - *
    - * Test interval 0 to 88.
    - *                      Relative error:
    - * arithmetic   function  # trials      peak         rms
    - *    DEC          Shi       3000       9.1e-17
    - *    IEEE         Shi      30000       6.9e-16     1.6e-16
    - *        Absolute error, except relative when |Chi| > 1:
    - *    DEC          Chi       2500       9.3e-17
    - *    IEEE         Chi      30000       8.4e-16     1.4e-16
    - */
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -/* x exp(-x) shi(x), inverted interval 8 to 18 */
    -static double S1[] = {
    - 1.83889230173399459482E-17,
    --9.55485532279655569575E-17,
    - 2.04326105980879882648E-16,
    - 1.09896949074905343022E-15,
    --1.31313534344092599234E-14,
    - 5.93976226264314278932E-14,
    --3.47197010497749154755E-14,
    --1.40059764613117131000E-12,
    - 9.49044626224223543299E-12,
    --1.61596181145435454033E-11,
    --1.77899784436430310321E-10,
    - 1.35455469767246947469E-9,
    --1.03257121792819495123E-9,
    --3.56699611114982536845E-8,
    - 1.44818877384267342057E-7,
    - 7.82018215184051295296E-7,
    --5.39919118403805073710E-6,
    --3.12458202168959833422E-5,
    - 8.90136741950727517826E-5,
    - 2.02558474743846862168E-3,
    - 2.96064440855633256972E-2,
    - 1.11847751047257036625E0
    -};
    -
    -/* x exp(-x) shi(x), inverted interval 18 to 88 */
    -static double S2[] = {
    --1.05311574154850938805E-17,
    - 2.62446095596355225821E-17,
    - 8.82090135625368160657E-17,
    --3.38459811878103047136E-16,
    --8.30608026366935789136E-16,
    - 3.93397875437050071776E-15,
    - 1.01765565969729044505E-14,
    --4.21128170307640802703E-14,
    --1.60818204519802480035E-13,
    - 3.34714954175994481761E-13,
    - 2.72600352129153073807E-12,
    - 1.66894954752839083608E-12,
    --3.49278141024730899554E-11,
    --1.58580661666482709598E-10,
    --1.79289437183355633342E-10,
    - 1.76281629144264523277E-9,
    - 1.69050228879421288846E-8,
    - 1.25391771228487041649E-7,
    - 1.16229947068677338732E-6,
    - 1.61038260117376323993E-5,
    - 3.49810375601053973070E-4,
    - 1.28478065259647610779E-2,
    - 1.03665722588798326712E0
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short S1[] = {
    -0022251,0115635,0165120,0006574,
    -0122734,0050751,0020305,0101356,
    -0023153,0111154,0011103,0177462,
    -0023636,0060321,0060253,0124246,
    -0124554,0106655,0152525,0166400,
    -0025205,0140145,0171006,0106556,
    -0125034,0056427,0004205,0176022,
    -0126305,0016731,0025011,0134453,
    -0027046,0172453,0112604,0116235,
    -0127216,0022071,0116600,0137667,
    -0130103,0115126,0071104,0052535,
    -0030672,0025450,0010071,0141414,
    -0130615,0165136,0132137,0177737,
    -0132031,0031611,0074436,0175407,
    -0032433,0077602,0104345,0060076,
    -0033121,0165741,0167177,0172433,
    -0133665,0025262,0174621,0022612,
    -0134403,0006761,0124566,0145405,
    -0034672,0126332,0034737,0116744,
    -0036004,0137654,0037332,0131766,
    -0036762,0104466,0121445,0124326,
    -0040217,0025105,0062145,0042640
    -};
    -
    -static unsigned short S2[] = {
    -0122102,0041774,0016051,0055137,
    -0022362,0010125,0007651,0015773,
    -0022713,0062551,0040227,0071645,
    -0123303,0015732,0025731,0146570,
    -0123557,0064016,0002067,0067711,
    -0024215,0136214,0132374,0124234,
    -0024467,0051425,0071066,0064210,
    -0125075,0124305,0135123,0024170,
    -0125465,0010261,0005560,0034232,
    -0025674,0066602,0030724,0174557,
    -0026477,0151520,0051510,0067250,
    -0026352,0161076,0113154,0116271,
    -0127431,0116470,0177465,0127274,
    -0130056,0056174,0170315,0013321,
    -0130105,0020575,0075327,0036710,
    -0030762,0043625,0113046,0125035,
    -0031621,0033211,0154354,0022077,
    -0032406,0121555,0074270,0041141,
    -0033234,0000116,0041611,0173743,
    -0034207,0013263,0174715,0115563,
    -0035267,0063300,0175753,0117266,
    -0036522,0077633,0033255,0136200,
    -0040204,0130457,0014454,0166254
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short S1[] = {
    -0x01b0,0xbd4a,0x3373,0x3c75,
    -0xb05e,0x2418,0x8a3d,0xbc9b,
    -0x7fe6,0x8248,0x724d,0x3cad,
    -0x7515,0x2c15,0xcc1a,0x3cd3,
    -0xbda0,0xbaaa,0x91b5,0xbd0d,
    -0xd1ae,0xbe40,0xb80c,0x3d30,
    -0xbf82,0xe110,0x8ba2,0xbd23,
    -0x3725,0x2541,0xa3bb,0xbd78,
    -0x9394,0x72b0,0xdea5,0x3da4,
    -0x17f7,0x33b0,0xc487,0xbdb1,
    -0x8aac,0xce48,0x734a,0xbde8,
    -0x3862,0x0207,0x4565,0x3e17,
    -0xfffc,0xd68b,0xbd4b,0xbe11,
    -0xdf61,0x2f23,0x2671,0xbe63,
    -0xac08,0x511c,0x6ff0,0x3e83,
    -0xfea3,0x3dcf,0x3d7c,0x3eaa,
    -0x24b1,0x5f32,0xa556,0xbed6,
    -0xd961,0x352e,0x61be,0xbf00,
    -0xf3bd,0x473b,0x559b,0x3f17,
    -0x567f,0x87db,0x97f5,0x3f60,
    -0xb51b,0xd464,0x5126,0x3f9e,
    -0xa8b4,0xac8c,0xe548,0x3ff1
    -};
    -
    -static unsigned short S2[] = {
    -0x2b4c,0x8385,0x487f,0xbc68,
    -0x237f,0xa1f5,0x420a,0x3c7e,
    -0xee75,0x2812,0x6cad,0x3c99,
    -0x39af,0x457b,0x637b,0xbcb8,
    -0xedf9,0xc086,0xed01,0xbccd,
    -0x9513,0x969f,0xb791,0x3cf1,
    -0xcd11,0xae46,0xea62,0x3d06,
    -0x650f,0xb74a,0xb518,0xbd27,
    -0x0713,0x216e,0xa216,0xbd46,
    -0x9f2e,0x463a,0x8db0,0x3d57,
    -0x0dd5,0x0a69,0xfa6a,0x3d87,
    -0x9397,0xd2cd,0x5c47,0x3d7d,
    -0xb5d8,0x1fe6,0x33a7,0xbdc3,
    -0xa2da,0x9e19,0xcb8f,0xbde5,
    -0xe7b9,0xaf5a,0xa42f,0xbde8,
    -0xd544,0xb2c4,0x48f2,0x3e1e,
    -0x8488,0x3b1d,0x26d1,0x3e52,
    -0x084c,0xaf17,0xd46d,0x3e80,
    -0x3efc,0xc871,0x8009,0x3eb3,
    -0xb36e,0x7f39,0xe2d6,0x3ef0,
    -0x73d7,0x1f7d,0xecd8,0x3f36,
    -0xb790,0x66d5,0x4ff3,0x3f8a,
    -0x9d96,0xe325,0x9625,0x3ff0
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short S1[] = {
    -0x3c75,0x3373,0xbd4a,0x01b0,
    -0xbc9b,0x8a3d,0x2418,0xb05e,
    -0x3cad,0x724d,0x8248,0x7fe6,
    -0x3cd3,0xcc1a,0x2c15,0x7515,
    -0xbd0d,0x91b5,0xbaaa,0xbda0,
    -0x3d30,0xb80c,0xbe40,0xd1ae,
    -0xbd23,0x8ba2,0xe110,0xbf82,
    -0xbd78,0xa3bb,0x2541,0x3725,
    -0x3da4,0xdea5,0x72b0,0x9394,
    -0xbdb1,0xc487,0x33b0,0x17f7,
    -0xbde8,0x734a,0xce48,0x8aac,
    -0x3e17,0x4565,0x0207,0x3862,
    -0xbe11,0xbd4b,0xd68b,0xfffc,
    -0xbe63,0x2671,0x2f23,0xdf61,
    -0x3e83,0x6ff0,0x511c,0xac08,
    -0x3eaa,0x3d7c,0x3dcf,0xfea3,
    -0xbed6,0xa556,0x5f32,0x24b1,
    -0xbf00,0x61be,0x352e,0xd961,
    -0x3f17,0x559b,0x473b,0xf3bd,
    -0x3f60,0x97f5,0x87db,0x567f,
    -0x3f9e,0x5126,0xd464,0xb51b,
    -0x3ff1,0xe548,0xac8c,0xa8b4
    -};
    -
    -static unsigned short S2[] = {
    -0xbc68,0x487f,0x8385,0x2b4c,
    -0x3c7e,0x420a,0xa1f5,0x237f,
    -0x3c99,0x6cad,0x2812,0xee75,
    -0xbcb8,0x637b,0x457b,0x39af,
    -0xbccd,0xed01,0xc086,0xedf9,
    -0x3cf1,0xb791,0x969f,0x9513,
    -0x3d06,0xea62,0xae46,0xcd11,
    -0xbd27,0xb518,0xb74a,0x650f,
    -0xbd46,0xa216,0x216e,0x0713,
    -0x3d57,0x8db0,0x463a,0x9f2e,
    -0x3d87,0xfa6a,0x0a69,0x0dd5,
    -0x3d7d,0x5c47,0xd2cd,0x9397,
    -0xbdc3,0x33a7,0x1fe6,0xb5d8,
    -0xbde5,0xcb8f,0x9e19,0xa2da,
    -0xbde8,0xa42f,0xaf5a,0xe7b9,
    -0x3e1e,0x48f2,0xb2c4,0xd544,
    -0x3e52,0x26d1,0x3b1d,0x8488,
    -0x3e80,0xd46d,0xaf17,0x084c,
    -0x3eb3,0x8009,0xc871,0x3efc,
    -0x3ef0,0xe2d6,0x7f39,0xb36e,
    -0x3f36,0xecd8,0x1f7d,0x73d7,
    -0x3f8a,0x4ff3,0x66d5,0xb790,
    -0x3ff0,0x9625,0xe325,0x9d96
    -};
    -#endif
    -
    -
    -#ifdef UNK
    -/* x exp(-x) chin(x), inverted interval 8 to 18 */
    -static double C1[] = {
    --8.12435385225864036372E-18,
    - 2.17586413290339214377E-17,
    - 5.22624394924072204667E-17,
    --9.48812110591690559363E-16,
    - 5.35546311647465209166E-15,
    --1.21009970113732918701E-14,
    --6.00865178553447437951E-14,
    - 7.16339649156028587775E-13,
    --2.93496072607599856104E-12,
    --1.40359438136491256904E-12,
    - 8.76302288609054966081E-11,
    --4.40092476213282340617E-10,
    --1.87992075640569295479E-10,
    - 1.31458150989474594064E-8,
    --4.75513930924765465590E-8,
    --2.21775018801848880741E-7,
    - 1.94635531373272490962E-6,
    - 4.33505889257316408893E-6,
    --6.13387001076494349496E-5,
    --3.13085477492997465138E-4,
    - 4.97164789823116062801E-4,
    - 2.64347496031374526641E-2,
    - 1.11446150876699213025E0
    -};
    -
    -/* x exp(-x) chin(x), inverted interval 18 to 88 */
    -static double C2[] = {
    - 8.06913408255155572081E-18,
    --2.08074168180148170312E-17,
    --5.98111329658272336816E-17,
    - 2.68533951085945765591E-16,
    - 4.52313941698904694774E-16,
    --3.10734917335299464535E-15,
    --4.42823207332531972288E-15,
    - 3.49639695410806959872E-14,
    - 6.63406731718911586609E-14,
    --3.71902448093119218395E-13,
    --1.27135418132338309016E-12,
    - 2.74851141935315395333E-12,
    - 2.33781843985453438400E-11,
    - 2.71436006377612442764E-11,
    --2.56600180000355990529E-10,
    --1.61021375163803438552E-9,
    --4.72543064876271773512E-9,
    --3.00095178028681682282E-9,
    - 7.79387474390914922337E-8,
    - 1.06942765566401507066E-6,
    - 1.59503164802313196374E-5,
    - 3.49592575153777996871E-4,
    - 1.28475387530065247392E-2,
    - 1.03665693917934275131E0
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short C1[] = {
    -0122025,0157055,0021702,0021427,
    -0022310,0130043,0123265,0022340,
    -0022561,0002231,0017746,0013043,
    -0123610,0136375,0002352,0024467,
    -0024300,0171555,0141300,0000446,
    -0124531,0176777,0126210,0035616,
    -0125207,0046604,0167760,0077132,
    -0026111,0120666,0026606,0064143,
    -0126516,0103615,0054127,0005436,
    -0126305,0104721,0025415,0004134,
    -0027700,0131556,0164725,0157553,
    -0130361,0170602,0077274,0055406,
    -0130116,0131420,0125472,0017231,
    -0031541,0153747,0177312,0056304,
    -0132114,0035517,0041545,0043151,
    -0132556,0020415,0110044,0172442,
    -0033402,0117041,0031152,0010364,
    -0033621,0072737,0050647,0013720,
    -0134600,0121366,0140010,0063265,
    -0135244,0022637,0013756,0044742,
    -0035402,0052052,0006523,0043564,
    -0036730,0106660,0020277,0162146,
    -0040216,0123254,0135147,0005724
    -};
    -
    -static unsigned short C2[] = {
    -0022024,0154550,0104311,0144257,
    -0122277,0165037,0133443,0155601,
    -0122611,0165102,0157053,0055252,
    -0023232,0146235,0153511,0113222,
    -0023402,0057340,0145304,0010471,
    -0124137,0164171,0113071,0100002,
    -0124237,0105473,0056130,0022022,
    -0025035,0073266,0056746,0164433,
    -0025225,0061313,0055600,0165407,
    -0125721,0056312,0107613,0051215,
    -0126262,0166534,0115336,0066653,
    -0026501,0064307,0127442,0065573,
    -0027315,0121375,0142020,0045356,
    -0027356,0140764,0070641,0046570,
    -0130215,0010503,0146335,0177737,
    -0130735,0047134,0015215,0163665,
    -0131242,0056523,0155276,0050053,
    -0131116,0034515,0050707,0163512,
    -0032247,0057507,0107545,0032007,
    -0033217,0104501,0021706,0025047,
    -0034205,0146413,0033746,0076562,
    -0035267,0044605,0065355,0002772,
    -0036522,0077173,0130716,0170304,
    -0040204,0130454,0130571,0027270
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short C1[] = {
    -0x4463,0xa478,0xbbc5,0xbc62,
    -0xa49c,0x74d6,0x1604,0x3c79,
    -0xc2c4,0x23fc,0x2093,0x3c8e,
    -0x4527,0xa09d,0x179f,0xbcd1,
    -0x0025,0xb858,0x1e6d,0x3cf8,
    -0x0772,0xf591,0x3fbf,0xbd0b,
    -0x0fcb,0x9dfe,0xe9b0,0xbd30,
    -0xcd0c,0xc5b0,0x3436,0x3d69,
    -0xe164,0xab0a,0xd0f1,0xbd89,
    -0xa10c,0x2561,0xb13a,0xbd78,
    -0xbbed,0xdd3a,0x166d,0x3dd8,
    -0x8b61,0x4fd7,0x3e30,0xbdfe,
    -0x43d3,0x1567,0xd662,0xbde9,
    -0x4b98,0xffd9,0x3afc,0x3e4c,
    -0xa8cd,0xe86c,0x8769,0xbe69,
    -0x9ea4,0xb204,0xc421,0xbe8d,
    -0x421f,0x264d,0x53c4,0x3ec0,
    -0xe2fa,0xea34,0x2ebb,0x3ed2,
    -0x0cd7,0xd801,0x145e,0xbf10,
    -0xc93c,0xe2fd,0x84b3,0xbf34,
    -0x68ef,0x41aa,0x4a85,0x3f40,
    -0xfc8d,0x0417,0x11b6,0x3f9b,
    -0xe17b,0x974c,0xd4d5,0x3ff1
    -};
    -
    -static unsigned short C2[] = {
    -0x3916,0x1119,0x9b2d,0x3c62,
    -0x7b70,0xf6e4,0xfd43,0xbc77,
    -0x6b55,0x5bc5,0x3d48,0xbc91,
    -0x32d2,0xbae9,0x5993,0x3cb3,
    -0x8227,0x1958,0x4bdc,0x3cc0,
    -0x3000,0x32c7,0xfd0f,0xbceb,
    -0x0482,0x6b8b,0xf167,0xbcf3,
    -0xdd23,0xcbbc,0xaed6,0x3d23,
    -0x1d61,0x6b70,0xac59,0x3d32,
    -0x6a52,0x51f1,0x2b99,0xbd5a,
    -0xcdb5,0x935b,0x5dab,0xbd76,
    -0x4d6f,0xf5e4,0x2d18,0x3d88,
    -0x095e,0xb882,0xb45f,0x3db9,
    -0x29af,0x8e34,0xd83e,0x3dbd,
    -0xbffc,0x799b,0xa228,0xbdf1,
    -0xbcf7,0x8351,0xa9cb,0xbe1b,
    -0xca05,0x7b57,0x4baa,0xbe34,
    -0xfce9,0xaa38,0xc729,0xbe29,
    -0xa681,0xf1ec,0xebe8,0x3e74,
    -0xc545,0x2478,0xf128,0x3eb1,
    -0xcfae,0x66fc,0xb9a1,0x3ef0,
    -0xa0bf,0xad5d,0xe930,0x3f36,
    -0xde19,0x7639,0x4fcf,0x3f8a,
    -0x25d7,0x962f,0x9625,0x3ff0
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short C1[] = {
    -0xbc62,0xbbc5,0xa478,0x4463,
    -0x3c79,0x1604,0x74d6,0xa49c,
    -0x3c8e,0x2093,0x23fc,0xc2c4,
    -0xbcd1,0x179f,0xa09d,0x4527,
    -0x3cf8,0x1e6d,0xb858,0x0025,
    -0xbd0b,0x3fbf,0xf591,0x0772,
    -0xbd30,0xe9b0,0x9dfe,0x0fcb,
    -0x3d69,0x3436,0xc5b0,0xcd0c,
    -0xbd89,0xd0f1,0xab0a,0xe164,
    -0xbd78,0xb13a,0x2561,0xa10c,
    -0x3dd8,0x166d,0xdd3a,0xbbed,
    -0xbdfe,0x3e30,0x4fd7,0x8b61,
    -0xbde9,0xd662,0x1567,0x43d3,
    -0x3e4c,0x3afc,0xffd9,0x4b98,
    -0xbe69,0x8769,0xe86c,0xa8cd,
    -0xbe8d,0xc421,0xb204,0x9ea4,
    -0x3ec0,0x53c4,0x264d,0x421f,
    -0x3ed2,0x2ebb,0xea34,0xe2fa,
    -0xbf10,0x145e,0xd801,0x0cd7,
    -0xbf34,0x84b3,0xe2fd,0xc93c,
    -0x3f40,0x4a85,0x41aa,0x68ef,
    -0x3f9b,0x11b6,0x0417,0xfc8d,
    -0x3ff1,0xd4d5,0x974c,0xe17b
    -};
    -
    -static unsigned short C2[] = {
    -0x3c62,0x9b2d,0x1119,0x3916,
    -0xbc77,0xfd43,0xf6e4,0x7b70,
    -0xbc91,0x3d48,0x5bc5,0x6b55,
    -0x3cb3,0x5993,0xbae9,0x32d2,
    -0x3cc0,0x4bdc,0x1958,0x8227,
    -0xbceb,0xfd0f,0x32c7,0x3000,
    -0xbcf3,0xf167,0x6b8b,0x0482,
    -0x3d23,0xaed6,0xcbbc,0xdd23,
    -0x3d32,0xac59,0x6b70,0x1d61,
    -0xbd5a,0x2b99,0x51f1,0x6a52,
    -0xbd76,0x5dab,0x935b,0xcdb5,
    -0x3d88,0x2d18,0xf5e4,0x4d6f,
    -0x3db9,0xb45f,0xb882,0x095e,
    -0x3dbd,0xd83e,0x8e34,0x29af,
    -0xbdf1,0xa228,0x799b,0xbffc,
    -0xbe1b,0xa9cb,0x8351,0xbcf7,
    -0xbe34,0x4baa,0x7b57,0xca05,
    -0xbe29,0xc729,0xaa38,0xfce9,
    -0x3e74,0xebe8,0xf1ec,0xa681,
    -0x3eb1,0xf128,0x2478,0xc545,
    -0x3ef0,0xb9a1,0x66fc,0xcfae,
    -0x3f36,0xe930,0xad5d,0xa0bf,
    -0x3f8a,0x4fcf,0x7639,0xde19,
    -0x3ff0,0x9625,0x962f,0x25d7
    -};
    -#endif
    -
    -
    -
    -/* Sine and cosine integrals */
    -
    -#define EUL 0.57721566490153286061
    -extern double MACHEP, MAXNUM, PIO2;
    -
    -int shichi( x, si, ci )
    -double x;
    -double *si, *ci;
    -{
    -double k, z, c, s, a;
    -short sign;
    -
    -if( x < 0.0 )
    -	{
    -	sign = -1;
    -	x = -x;
    -	}
    -else
    -	sign = 0;
    -
    -
    -if( x == 0.0 )
    -	{
    -	*si = 0.0;
    -	*ci = -MAXNUM;
    -	return( 0 );
    -	}
    -
    -if( x >= 8.0 )
    -	goto chb;
    -
    -z = x * x;
    -
    -/*	Direct power series expansion	*/
    -
    -a = 1.0;
    -s = 1.0;
    -c = 0.0;
    -k = 2.0;
    -
    -do
    -	{
    -	a *= z/k;
    -	c += a/k;
    -	k += 1.0;
    -	a /= k;
    -	s += a/k;
    -	k += 1.0;
    -	}
    -while( fabs(a/s) > MACHEP );
    -
    -s *= x;
    -goto done;
    -
    -
    -chb:
    -
    -if( x < 18.0 )
    -	{
    -	a = (576.0/x - 52.0)/10.0;
    -	k = exp(x) / x;
    -	s = k * chbevl( a, S1, 22 );
    -	c = k * chbevl( a, C1, 23 );
    -	goto done;
    -	}
    -
    -if( x <= 88.0 )
    -	{
    -	a = (6336.0/x - 212.0)/70.0;
    -	k = exp(x) / x;
    -	s = k * chbevl( a, S2, 23 );
    -	c = k * chbevl( a, C2, 24 );
    -	goto done;
    -	}
    -else
    -	{
    -	if( sign )
    -		*si = -MAXNUM;
    -	else
    -		*si = MAXNUM;
    -	*ci = MAXNUM;
    -	return(0);
    -	}
    -done:
    -if( sign )
    -	s = -s;
    -
    -*si = s;
    -
    -*ci = EUL + log(x) + c;
    -return(0);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/sici.c b/scipy-0.10.1/scipy/special/cephes/sici.c
    deleted file mode 100644
    index ab5a11b544..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/sici.c
    +++ /dev/null
    @@ -1,676 +0,0 @@
    -/*							sici.c
    - *
    - *	Sine and cosine integrals
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, Ci, Si, sici();
    - *
    - * sici( x, &Si, &Ci );
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Evaluates the integrals
    - *
    - *                          x
    - *                          -
    - *                         |  cos t - 1
    - *   Ci(x) = eul + ln x +  |  --------- dt,
    - *                         |      t
    - *                        -
    - *                         0
    - *             x
    - *             -
    - *            |  sin t
    - *   Si(x) =  |  ----- dt
    - *            |    t
    - *           -
    - *            0
    - *
    - * where eul = 0.57721566490153286061 is Euler's constant.
    - * The integrals are approximated by rational functions.
    - * For x > 8 auxiliary functions f(x) and g(x) are employed
    - * such that
    - *
    - * Ci(x) = f(x) sin(x) - g(x) cos(x)
    - * Si(x) = pi/2 - f(x) cos(x) - g(x) sin(x)
    - *
    - *
    - * ACCURACY:
    - *    Test interval = [0,50].
    - * Absolute error, except relative when > 1:
    - * arithmetic   function   # trials      peak         rms
    - *    IEEE        Si        30000       4.4e-16     7.3e-17
    - *    IEEE        Ci        30000       6.9e-16     5.1e-17
    - *    DEC         Si         5000       4.4e-17     9.0e-18
    - *    DEC         Ci         5300       7.9e-17     5.2e-18
    - */
    -
    -/*
    -Cephes Math Library Release 2.1:  January, 1989
    -Copyright 1984, 1987, 1989 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -#include 
    -#include 
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double SN[] = {
    --8.39167827910303881427E-11,
    - 4.62591714427012837309E-8,
    --9.75759303843632795789E-6,
    - 9.76945438170435310816E-4,
    --4.13470316229406538752E-2,
    - 1.00000000000000000302E0,
    -};
    -static double SD[] = {
    -  2.03269266195951942049E-12,
    -  1.27997891179943299903E-9,
    -  4.41827842801218905784E-7,
    -  9.96412122043875552487E-5,
    -  1.42085239326149893930E-2,
    -  9.99999999999999996984E-1,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short SN[] = {
    -0127670,0104362,0167505,0035161,
    -0032106,0127177,0032131,0056461,
    -0134043,0132213,0000476,0172351,
    -0035600,0006331,0064761,0032665,
    -0137051,0055601,0044667,0017645,
    -0040200,0000000,0000000,0000000,
    -};
    -static unsigned short SD[] = {
    -0026417,0004674,0052064,0001573,
    -0030657,0165501,0014666,0131526,
    -0032755,0032133,0034147,0024124,
    -0034720,0173167,0166624,0154477,
    -0036550,0145336,0063534,0063220,
    -0040200,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short SN[] = {
    -0xa74e,0x5de8,0x111e,0xbdd7,
    -0x2ba6,0xe68b,0xd5cf,0x3e68,
    -0xde9d,0x6027,0x7691,0xbee4,
    -0x26b7,0x2d3e,0x019b,0x3f50,
    -0xe3f5,0x2936,0x2b70,0xbfa5,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -static unsigned short SD[] = {
    -0x806f,0x8a86,0xe137,0x3d81,
    -0xd66b,0x2336,0xfd68,0x3e15,
    -0xe50a,0x670c,0xa68b,0x3e9d,
    -0x9b28,0xfdb2,0x1ece,0x3f1a,
    -0x8cd2,0xcceb,0x195b,0x3f8d,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short SN[] = {
    -0xbdd7,0x111e,0x5de8,0xa74e,
    -0x3e68,0xd5cf,0xe68b,0x2ba6,
    -0xbee4,0x7691,0x6027,0xde9d,
    -0x3f50,0x019b,0x2d3e,0x26b7,
    -0xbfa5,0x2b70,0x2936,0xe3f5,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short SD[] = {
    -0x3d81,0xe137,0x8a86,0x806f,
    -0x3e15,0xfd68,0x2336,0xd66b,
    -0x3e9d,0xa68b,0x670c,0xe50a,
    -0x3f1a,0x1ece,0xfdb2,0x9b28,
    -0x3f8d,0x195b,0xcceb,0x8cd2,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -#endif
    -#ifdef UNK
    -static double CN[] = {
    - 2.02524002389102268789E-11,
    --1.35249504915790756375E-8,
    - 3.59325051419993077021E-6,
    --4.74007206873407909465E-4,
    - 2.89159652607555242092E-2,
    --1.00000000000000000080E0,
    -};
    -static double CD[] = {
    -  4.07746040061880559506E-12,
    -  3.06780997581887812692E-9,
    -  1.23210355685883423679E-6,
    -  3.17442024775032769882E-4,
    -  5.10028056236446052392E-2,
    -  4.00000000000000000080E0,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short CN[] = {
    -0027262,0022131,0160257,0020166,
    -0131550,0055534,0077637,0000557,
    -0033561,0021622,0161463,0026575,
    -0135370,0102053,0116333,0000466,
    -0036754,0160454,0122022,0024622,
    -0140200,0000000,0000000,0000000,
    -};
    -static unsigned short CD[] = {
    -0026617,0073177,0107543,0104425,
    -0031122,0150573,0156453,0041517,
    -0033245,0057301,0077706,0110510,
    -0035246,0067130,0165424,0044543,
    -0037120,0164121,0061206,0053657,
    -0040600,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short CN[] = {
    -0xe40f,0x3c15,0x448b,0x3db6,
    -0xe02e,0x8ff3,0x0b6b,0xbe4d,
    -0x65b0,0x5c66,0x2472,0x3ece,
    -0x6027,0x739b,0x1085,0xbf3f,
    -0x4532,0x9482,0x9c25,0x3f9d,
    -0x0000,0x0000,0x0000,0xbff0,
    -};
    -static unsigned short CD[] = {
    -0x7123,0xf1ec,0xeecf,0x3d91,
    -0x686a,0x7ba5,0x5a2f,0x3e2a,
    -0xd229,0x2ff8,0xabd8,0x3eb4,
    -0x892c,0x1d62,0xcdcb,0x3f34,
    -0xcaf6,0x2c50,0x1d0a,0x3faa,
    -0x0000,0x0000,0x0000,0x4010,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short CN[] = {
    -0x3db6,0x448b,0x3c15,0xe40f,
    -0xbe4d,0x0b6b,0x8ff3,0xe02e,
    -0x3ece,0x2472,0x5c66,0x65b0,
    -0xbf3f,0x1085,0x739b,0x6027,
    -0x3f9d,0x9c25,0x9482,0x4532,
    -0xbff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short CD[] = {
    -0x3d91,0xeecf,0xf1ec,0x7123,
    -0x3e2a,0x5a2f,0x7ba5,0x686a,
    -0x3eb4,0xabd8,0x2ff8,0xd229,
    -0x3f34,0xcdcb,0x1d62,0x892c,
    -0x3faa,0x1d0a,0x2c50,0xcaf6,
    -0x4010,0x0000,0x0000,0x0000,
    -};
    -#endif
    -
    -
    -#ifdef UNK
    -static double FN4[] = {
    -  4.23612862892216586994E0,
    -  5.45937717161812843388E0,
    -  1.62083287701538329132E0,
    -  1.67006611831323023771E-1,
    -  6.81020132472518137426E-3,
    -  1.08936580650328664411E-4,
    -  5.48900223421373614008E-7,
    -};
    -static double FD4[] = {
    -/*  1.00000000000000000000E0,*/
    -  8.16496634205391016773E0,
    -  7.30828822505564552187E0,
    -  1.86792257950184183883E0,
    -  1.78792052963149907262E-1,
    -  7.01710668322789753610E-3,
    -  1.10034357153915731354E-4,
    -  5.48900252756255700982E-7,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short FN4[] = {
    -0040607,0107135,0120133,0153471,
    -0040656,0131467,0140424,0017567,
    -0040317,0073563,0121610,0002511,
    -0037453,0001710,0000040,0006334,
    -0036337,0024033,0176003,0171425,
    -0034744,0072341,0121657,0126035,
    -0033023,0054042,0154652,0000451,
    -};
    -static unsigned short FD4[] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041002,0121663,0137500,0177450,
    -0040751,0156577,0042213,0061552,
    -0040357,0014026,0045465,0147265,
    -0037467,0012503,0110413,0131772,
    -0036345,0167701,0155706,0160551,
    -0034746,0141076,0162250,0123547,
    -0033023,0054043,0056706,0151050,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short FN4[] = {
    -0x7ae7,0xb40b,0xf1cb,0x4010,
    -0x83ef,0xf822,0xd666,0x4015,
    -0x00a9,0x7471,0xeeee,0x3ff9,
    -0x019c,0x0004,0x6079,0x3fc5,
    -0x7e63,0x7f80,0xe503,0x3f7b,
    -0xf584,0x3475,0x8e9c,0x3f1c,
    -0x4025,0x5b35,0x6b04,0x3ea2,
    -};
    -static unsigned short FD4[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x1fe5,0x77e8,0x5476,0x4020,
    -0x6c6d,0xe891,0x3baf,0x401d,
    -0xb9d7,0xc966,0xe302,0x3ffd,
    -0x767f,0x7221,0xe2a8,0x3fc6,
    -0xdc2d,0x3b78,0xbdf8,0x3f7c,
    -0x14ed,0xdc95,0xd847,0x3f1c,
    -0xda45,0x6bb8,0x6b04,0x3ea2,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short FN4[] = {
    -0x4010,0xf1cb,0xb40b,0x7ae7,
    -0x4015,0xd666,0xf822,0x83ef,
    -0x3ff9,0xeeee,0x7471,0x00a9,
    -0x3fc5,0x6079,0x0004,0x019c,
    -0x3f7b,0xe503,0x7f80,0x7e63,
    -0x3f1c,0x8e9c,0x3475,0xf584,
    -0x3ea2,0x6b04,0x5b35,0x4025,
    -};
    -static unsigned short FD4[] = {
    -/* 0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4020,0x5476,0x77e8,0x1fe5,
    -0x401d,0x3baf,0xe891,0x6c6d,
    -0x3ffd,0xe302,0xc966,0xb9d7,
    -0x3fc6,0xe2a8,0x7221,0x767f,
    -0x3f7c,0xbdf8,0x3b78,0xdc2d,
    -0x3f1c,0xd847,0xdc95,0x14ed,
    -0x3ea2,0x6b04,0x6bb8,0xda45,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double FN8[] = {
    -  4.55880873470465315206E-1,
    -  7.13715274100146711374E-1,
    -  1.60300158222319456320E-1,
    -  1.16064229408124407915E-2,
    -  3.49556442447859055605E-4,
    -  4.86215430826454749482E-6,
    -  3.20092790091004902806E-8,
    -  9.41779576128512936592E-11,
    -  9.70507110881952024631E-14,
    -};
    -static double FD8[] = {
    -/*  1.00000000000000000000E0,*/
    -  9.17463611873684053703E-1,
    -  1.78685545332074536321E-1,
    -  1.22253594771971293032E-2,
    -  3.58696481881851580297E-4,
    -  4.92435064317881464393E-6,
    -  3.21956939101046018377E-8,
    -  9.43720590350276732376E-11,
    -  9.70507110881952025725E-14,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short FN8[] = {
    -0037751,0064467,0142332,0164573,
    -0040066,0133013,0050352,0071102,
    -0037444,0022671,0102157,0013535,
    -0036476,0024335,0136423,0146444,
    -0035267,0042253,0164110,0110460,
    -0033643,0022626,0062535,0060320,
    -0032011,0075223,0010110,0153413,
    -0027717,0014572,0011360,0014034,
    -0025332,0104755,0004563,0152354,
    -};
    -static unsigned short FD8[] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0040152,0157345,0030104,0075616,
    -0037466,0174527,0172740,0071060,
    -0036510,0046337,0144272,0156552,
    -0035274,0007555,0042537,0015572,
    -0033645,0035731,0112465,0026474,
    -0032012,0043612,0030613,0030123,
    -0027717,0103277,0004564,0151000,
    -0025332,0104755,0004563,0152354,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short FN8[] = {
    -0x5d2f,0xf89b,0x2d26,0x3fdd,
    -0x4e48,0x6a1d,0xd6c1,0x3fe6,
    -0xe2ec,0x308d,0x84b7,0x3fc4,
    -0x79a4,0xb7a2,0xc51b,0x3f87,
    -0x1226,0x7d09,0xe895,0x3f36,
    -0xac1a,0xccab,0x64b2,0x3ed4,
    -0x1ae1,0x6209,0x2f52,0x3e61,
    -0x0304,0x425e,0xe32f,0x3dd9,
    -0x7a9d,0xa12e,0x513d,0x3d3b,
    -};
    -static unsigned short FD8[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x8f72,0xa608,0x5bdc,0x3fed,
    -0x0e46,0xfebc,0xdf2a,0x3fc6,
    -0x5bad,0xf917,0x099b,0x3f89,
    -0xe36f,0xa8ab,0x81ed,0x3f37,
    -0xa5a8,0x32a6,0xa77b,0x3ed4,
    -0x660a,0x4631,0x48f1,0x3e61,
    -0x9a40,0xe12e,0xf0d7,0x3dd9,
    -0x7a9d,0xa12e,0x513d,0x3d3b,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short FN8[] = {
    -0x3fdd,0x2d26,0xf89b,0x5d2f,
    -0x3fe6,0xd6c1,0x6a1d,0x4e48,
    -0x3fc4,0x84b7,0x308d,0xe2ec,
    -0x3f87,0xc51b,0xb7a2,0x79a4,
    -0x3f36,0xe895,0x7d09,0x1226,
    -0x3ed4,0x64b2,0xccab,0xac1a,
    -0x3e61,0x2f52,0x6209,0x1ae1,
    -0x3dd9,0xe32f,0x425e,0x0304,
    -0x3d3b,0x513d,0xa12e,0x7a9d,
    -};
    -static unsigned short FD8[] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x3fed,0x5bdc,0xa608,0x8f72,
    -0x3fc6,0xdf2a,0xfebc,0x0e46,
    -0x3f89,0x099b,0xf917,0x5bad,
    -0x3f37,0x81ed,0xa8ab,0xe36f,
    -0x3ed4,0xa77b,0x32a6,0xa5a8,
    -0x3e61,0x48f1,0x4631,0x660a,
    -0x3dd9,0xf0d7,0xe12e,0x9a40,
    -0x3d3b,0x513d,0xa12e,0x7a9d,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double GN4[] = {
    -  8.71001698973114191777E-2,
    -  6.11379109952219284151E-1,
    -  3.97180296392337498885E-1,
    -  7.48527737628469092119E-2,
    -  5.38868681462177273157E-3,
    -  1.61999794598934024525E-4,
    -  1.97963874140963632189E-6,
    -  7.82579040744090311069E-9,
    -};
    -static double GD4[] = {
    -/*  1.00000000000000000000E0,*/
    -  1.64402202413355338886E0,
    -  6.66296701268987968381E-1,
    -  9.88771761277688796203E-2,
    -  6.22396345441768420760E-3,
    -  1.73221081474177119497E-4,
    -  2.02659182086343991969E-6,
    -  7.82579218933534490868E-9,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short GN4[] = {
    -0037262,0060622,0164572,0157515,
    -0040034,0101527,0061263,0147204,
    -0037713,0055467,0037475,0144512,
    -0037231,0046151,0035234,0045261,
    -0036260,0111624,0150617,0053536,
    -0035051,0157175,0016675,0155456,
    -0033404,0154757,0041211,0000055,
    -0031406,0071060,0130322,0033322,
    -};
    -static unsigned short GD4[] = {
    -/* 0040200,0000000,0000000,0000000,*/
    -0040322,0067520,0046707,0053275,
    -0040052,0111153,0126542,0005516,
    -0037312,0100035,0167121,0014552,
    -0036313,0171143,0137176,0014213,
    -0035065,0121256,0012033,0150603,
    -0033410,0000225,0013121,0071643,
    -0031406,0071062,0131152,0150454,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short GN4[] = {
    -0x5bea,0x5d2f,0x4c32,0x3fb6,
    -0x79d1,0xec56,0x906a,0x3fe3,
    -0xb929,0xe7e7,0x6b66,0x3fd9,
    -0x8956,0x2753,0x298d,0x3fb3,
    -0xeaec,0x9a31,0x1272,0x3f76,
    -0xbb66,0xa3b7,0x3bcf,0x3f25,
    -0x2006,0xe851,0x9b3d,0x3ec0,
    -0x46da,0x161a,0xce46,0x3e40,
    -};
    -static unsigned short GD4[] = {
    -/* 0x0000,0x0000,0x0000,0x3ff0,*/
    -0xead8,0x09b8,0x4dea,0x3ffa,
    -0x416a,0x75ac,0x524d,0x3fe5,
    -0x232d,0xbdca,0x5003,0x3fb9,
    -0xc311,0x77cf,0x7e4c,0x3f79,
    -0x7a30,0xc283,0xb455,0x3f26,
    -0x2e74,0xa2ca,0x0012,0x3ec1,
    -0x5a26,0x564d,0xce46,0x3e40,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short GN4[] = {
    -0x3fb6,0x4c32,0x5d2f,0x5bea,
    -0x3fe3,0x906a,0xec56,0x79d1,
    -0x3fd9,0x6b66,0xe7e7,0xb929,
    -0x3fb3,0x298d,0x2753,0x8956,
    -0x3f76,0x1272,0x9a31,0xeaec,
    -0x3f25,0x3bcf,0xa3b7,0xbb66,
    -0x3ec0,0x9b3d,0xe851,0x2006,
    -0x3e40,0xce46,0x161a,0x46da,
    -};
    -static unsigned short GD4[] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x3ffa,0x4dea,0x09b8,0xead8,
    -0x3fe5,0x524d,0x75ac,0x416a,
    -0x3fb9,0x5003,0xbdca,0x232d,
    -0x3f79,0x7e4c,0x77cf,0xc311,
    -0x3f26,0xb455,0xc283,0x7a30,
    -0x3ec1,0x0012,0xa2ca,0x2e74,
    -0x3e40,0xce46,0x564d,0x5a26,
    -};
    -#endif
    -
    -#ifdef UNK
    -static double GN8[] = {
    -  6.97359953443276214934E-1,
    -  3.30410979305632063225E-1,
    -  3.84878767649974295920E-2,
    -  1.71718239052347903558E-3,
    -  3.48941165502279436777E-5,
    -  3.47131167084116673800E-7,
    -  1.70404452782044526189E-9,
    -  3.85945925430276600453E-12,
    -  3.14040098946363334640E-15,
    -};
    -static double GD8[] = {
    -/*  1.00000000000000000000E0,*/
    -  1.68548898811011640017E0,
    -  4.87852258695304967486E-1,
    -  4.67913194259625806320E-2,
    -  1.90284426674399523638E-3,
    -  3.68475504442561108162E-5,
    -  3.57043223443740838771E-7,
    -  1.72693748966316146736E-9,
    -  3.87830166023954706752E-12,
    -  3.14040098946363335242E-15,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short GN8[] = {
    -0040062,0103056,0110624,0033123,
    -0037651,0025640,0136266,0145647,
    -0037035,0122566,0137770,0061777,
    -0035741,0011424,0065311,0013370,
    -0034422,0055505,0134324,0016755,
    -0032672,0056530,0022565,0014747,
    -0030752,0031674,0114735,0013162,
    -0026607,0145353,0022020,0123625,
    -0024142,0045054,0060033,0016505,
    -};
    -static unsigned short GD8[] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0040327,0137032,0064331,0136425,
    -0037771,0143705,0070300,0105711,
    -0037077,0124101,0025275,0035356,
    -0035771,0064333,0145103,0105357,
    -0034432,0106301,0105311,0010713,
    -0032677,0127645,0120034,0157551,
    -0030755,0054466,0010743,0105566,
    -0026610,0072242,0142530,0135744,
    -0024142,0045054,0060033,0016505,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short GN8[] = {
    -0x86ca,0xd232,0x50c5,0x3fe6,
    -0xd975,0x1796,0x2574,0x3fd5,
    -0x0c80,0xd7ff,0xb4ae,0x3fa3,
    -0x22df,0x8d59,0x2262,0x3f5c,
    -0x83be,0xb71a,0x4b68,0x3f02,
    -0xa33d,0x04ae,0x4bab,0x3e97,
    -0xa2ce,0x933b,0x4677,0x3e1d,
    -0x14f3,0x6482,0xf95d,0x3d90,
    -0x63a9,0x8c03,0x4945,0x3cec,
    -};
    -static unsigned short GD8[] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x37a3,0x4d1b,0xf7c3,0x3ffa,
    -0x1179,0xae18,0x38f8,0x3fdf,
    -0xa75e,0x2557,0xf508,0x3fa7,
    -0x715e,0x7948,0x2d1b,0x3f5f,
    -0x2239,0x3159,0x5198,0x3f03,
    -0x9bed,0xb403,0xf5f4,0x3e97,
    -0x716f,0xc23c,0xab26,0x3e1d,
    -0x177c,0x58ab,0x0e94,0x3d91,
    -0x63a9,0x8c03,0x4945,0x3cec,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short GN8[] = {
    -0x3fe6,0x50c5,0xd232,0x86ca,
    -0x3fd5,0x2574,0x1796,0xd975,
    -0x3fa3,0xb4ae,0xd7ff,0x0c80,
    -0x3f5c,0x2262,0x8d59,0x22df,
    -0x3f02,0x4b68,0xb71a,0x83be,
    -0x3e97,0x4bab,0x04ae,0xa33d,
    -0x3e1d,0x4677,0x933b,0xa2ce,
    -0x3d90,0xf95d,0x6482,0x14f3,
    -0x3cec,0x4945,0x8c03,0x63a9,
    -};
    -static unsigned short GD8[] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x3ffa,0xf7c3,0x4d1b,0x37a3,
    -0x3fdf,0x38f8,0xae18,0x1179,
    -0x3fa7,0xf508,0x2557,0xa75e,
    -0x3f5f,0x2d1b,0x7948,0x715e,
    -0x3f03,0x5198,0x3159,0x2239,
    -0x3e97,0xf5f4,0xb403,0x9bed,
    -0x3e1d,0xab26,0xc23c,0x716f,
    -0x3d91,0x0e94,0x58ab,0x177c,
    -0x3cec,0x4945,0x8c03,0x63a9,
    -};
    -#endif
    -
    -#define EUL 0.57721566490153286061
    -extern double MAXNUM, PIO2, MACHEP;
    -
    -
    -int sici( x, si, ci )
    -double x;
    -double *si, *ci;
    -{
    -double z, c, s, f, g;
    -short sign;
    -
    -if( x < 0.0 )
    -	{
    -	sign = -1;
    -	x = -x;
    -	}
    -else
    -	sign = 0;
    -
    -
    -if( x == 0.0 )
    -	{
    -	*si = 0.0;
    -	*ci = -MAXNUM;
    -	return( 0 );
    -	}
    -
    -
    -if( x > 1.0e9 ) {
    -        if (npy_isinf(x)) {
    -                if (sign == -1) {
    -                        *si = -PIO2;
    -                        *ci = NPY_NAN;
    -                } else {
    -                        *si = PIO2;
    -                        *ci = 0;
    -                }
    -                return 0;
    -        }
    -	*si = PIO2 - cos(x)/x;
    -	*ci = sin(x)/x;
    -}
    -
    -
    -
    -if( x > 4.0 )
    -	goto asympt;
    -
    -z = x * x;
    -s = x * polevl( z, SN, 5 ) / polevl( z, SD, 5 );
    -c = z * polevl( z, CN, 5 ) / polevl( z, CD, 5 );
    -
    -if( sign )
    -	s = -s;
    -*si = s;
    -*ci = EUL + log(x) + c;	/* real part if x < 0 */
    -return(0);
    -
    -
    -
    -/* The auxiliary functions are:
    - *
    - *
    - * *si = *si - PIO2;
    - * c = cos(x);
    - * s = sin(x);
    - *
    - * t = *ci * s - *si * c;
    - * a = *ci * c + *si * s;
    - *
    - * *si = t;
    - * *ci = -a;
    - */
    -
    -
    -asympt:
    -
    -s = sin(x);
    -c = cos(x);
    -z = 1.0/(x*x);
    -if( x < 8.0 )
    -	{
    -	f = polevl( z, FN4, 6 ) / (x * p1evl( z, FD4, 7 ));
    -	g = z * polevl( z, GN4, 7 ) / p1evl( z, GD4, 7 );
    -	}
    -else
    -	{
    -	f = polevl( z, FN8, 8 ) / (x * p1evl( z, FD8, 8 ));
    -	g = z * polevl( z, GN8, 8 ) / p1evl( z, GD8, 9 );
    -	}
    -*si = PIO2 - f * c - g * s;
    -if( sign )
    -	*si = -( *si );
    -*ci = f * s - g * c;
    -
    -return(0);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/simpsn.c b/scipy-0.10.1/scipy/special/cephes/simpsn.c
    deleted file mode 100644
    index ce5f00c938..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/simpsn.c
    +++ /dev/null
    @@ -1,84 +0,0 @@
    -/*							simpsn.c	*/
    -/* simpsn.c
    - * Numerical integration of function tabulated
    - * at equally spaced arguments
    - */
    -
    -/* Coefficients for Cote integration formulas */
    -
    -/* Note: these numbers were computed using 40-decimal precision. */
    -
    -#define NCOTE 8
    -
    -/* 6th order formula */
    -/*
    -static double simcon[] =
    -{
    -  4.88095238095238095E-2,
    -  2.57142857142857142857E-1,
    -  3.2142857142857142857E-2,
    -  3.2380952380952380952E-1,
    -};
    -*/
    -
    -/* 8th order formula */
    -static double simcon[] =
    -{
    -  3.488536155202821869E-2,
    -  2.076895943562610229E-1,
    - -3.27336860670194003527E-2,
    -  3.7022927689594356261E-1,
    - -1.6014109347442680776E-1,
    -};
    -
    -/* 10th order formula */
    -/*
    -static double simcon[] =
    -{
    -  2.68341483619261397039E-2,
    -  1.77535941424830313719E-1,
    - -8.1043570626903960237E-2,
    -  4.5494628827962161295E-1,
    - -4.3515512265512265512E-1,
    -  7.1376463043129709796E-1,
    -};
    -*/
    -
    -/*							simpsn.c 2	*/
    -/* 20th order formula */
    -/*
    -static double simcon[] =
    -{
    -  1.182527324903160319E-2,
    -  1.14137717644606974987E-1,
    - -2.36478370511426964E-1,
    -  1.20618689348187566E+0,
    - -3.7710317267153304677E+0,
    -  1.03367982199398011435E+1,
    - -2.270881584397951229796E+1,
    -  4.1828057422193554603E+1,
    - -6.4075279490154004651555E+1,
    -  8.279728347247285172085E+1,
    - -9.0005367135242894657916E+1,
    -};
    -*/
    -
    -/*							simpsn.c 3	*/
    -
    -double simpsn( double [], double );
    -
    -double simpsn( f, delta )
    -double f[];	/* tabulated function */
    -double delta;	/* spacing of arguments */
    -{
    -extern double simcon[];
    -double ans;
    -int i;
    -
    -
    -ans = simcon[NCOTE/2] * f[NCOTE/2];
    -for( i=0; i < NCOTE/2; i++ )
    -	ans += simcon[i] * ( f[i] + f[NCOTE-i] );
    -
    -return( ans * delta * NCOTE );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/simq.c b/scipy-0.10.1/scipy/special/cephes/simq.c
    deleted file mode 100644
    index 6a3b8fedd8..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/simq.c
    +++ /dev/null
    @@ -1,182 +0,0 @@
    -/*							simq.c
    - *
    - *	Solution of simultaneous linear equations AX = B
    - *	by Gaussian elimination with partial pivoting
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double A[n*n], B[n], X[n];
    - * int n, flag;
    - * int IPS[];
    - * int simq();
    - *
    - * ercode = simq( A, B, X, n, flag, IPS );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * B, X, IPS are vectors of length n.
    - * A is an n x n matrix (i.e., a vector of length n*n),
    - * stored row-wise: that is, A(i,j) = A[ij],
    - * where ij = i*n + j, which is the transpose of the normal
    - * column-wise storage.
    - *
    - * The contents of matrix A are destroyed.
    - *
    - * Set flag=0 to solve.
    - * Set flag=-1 to do a new back substitution for different B vector
    - * using the same A matrix previously reduced when flag=0.
    - *
    - * The routine returns nonzero on error; messages are printed.
    - *
    - *
    - * ACCURACY:
    - *
    - * Depends on the conditioning (range of eigenvalues) of matrix A.
    - *
    - *
    - * REFERENCE:
    - *
    - * Computer Solution of Linear Algebraic Systems,
    - * by George E. Forsythe and Cleve B. Moler; Prentice-Hall, 1967.
    - *
    - */
    -
    -/*							simq	2 */
    -
    -#include 
    -int simq(double [], double [], double [], int, int, int [] );
    -
    -#define fabs(x) ((x) < 0 ? -(x) : (x))
    -
    -int simq( A, B, X, n, flag, IPS )
    -double A[], B[], X[];
    -int n, flag;
    -int IPS[];
    -{
    -int i, j, ij, ip, ipj, ipk, ipn;
    -int idxpiv, iback;
    -int k, kp, kp1, kpk, kpn;
    -int nip, nkp, nm1;
    -double em, q, rownrm, big, size, pivot, sum;
    -
    -nm1 = n-1;
    -if( flag < 0 )
    -	goto solve;
    -
    -/*	Initialize IPS and X	*/
    -
    -ij=0;
    -for( i=0; i big )
    -			{
    -			big = size;
    -			idxpiv = i;
    -			}
    -		}
    -
    -	if( big == 0.0 )
    -		{
    -		puts( "SIMQ BIG=0" );
    -		return(2);
    -		}
    -	if( idxpiv != k )
    -		{
    -		j = IPS[k];
    -		IPS[k] = IPS[idxpiv];
    -		IPS[idxpiv] = j;
    -		}
    -	kp = IPS[k];
    -	kpk = n*kp + k;
    -	pivot = A[kpk];
    -	kp1 = k+1;
    -	for( i=kp1; i 90 )
    -	{
    -	csign = -csign;
    -	ix = 180 - ix;
    -	}
    -
    -sx = sintbl[ix];
    -if( ssign < 0 )
    -	sx = -sx;
    -cx = sintbl[ 90-ix ];
    -if( csign < 0 )
    -	cx = -cx;
    -
    -/* If the flag argument is set, then just return
    - * the tabulated values for arg to the nearest whole degree.
    - */
    -if( flg )
    -	{
    -#if LINTERP
    -	y = sx + 1.74531263774940077459e-2 * z * cx;
    -	cx -= 1.74531263774940077459e-2 * z * sx;
    -	sx = y;
    -#endif
    -	if( xsign < 0 )
    -		sx = -sx;
    -	*s = sx;	/* sine */
    -	*c = cx;	/* cosine */
    -	return;
    -	}
    -
    -
    -if( ssign < 0 )
    -	sx = -sx;
    -if( csign < 0 )
    -	cx = -cx;
    -
    -/* Find sine and cosine
    - * of the residual angle between -0.5 and +0.5 degree.
    - */
    -#if ACC5
    -#if ABSERR
    -/* absolute error = 2.769e-8: */
    -sz = 1.74531263774940077459e-2 * z;
    -/* absolute error = 4.146e-11: */
    -cz = 1.0 - 1.52307909153324666207e-4 * z * z;
    -#else
    -/* relative error = 6.346e-6: */
    -sz = 1.74531817576426662296e-2 * z;
    -/* relative error = 3.173e-6: */
    -cz = 1.0 - 1.52308226602566149927e-4 * z * z;
    -#endif
    -#else
    -y = z * z;
    -#endif
    -
    -
    -#if ACC11
    -sz = ( -8.86092781698004819918e-7 * y
    -      + 1.74532925198378577601e-2     ) * z;
    -
    -cz = 1.0 - ( -3.86631403698859047896e-9 * y
    -            + 1.52308709893047593702e-4     ) * y;
    -#endif
    -
    -
    -#if ACC17
    -sz = ((  1.34959795251974073996e-11 * y
    -       - 8.86096155697856783296e-7     ) * y
    -       + 1.74532925199432957214e-2          ) * z;
    -
    -cz = 1.0 - ((  3.92582397764340914444e-14 * y
    -             - 3.86632385155548605680e-9     ) * y
    -             + 1.52308709893354299569e-4          ) * y;
    -#endif
    -
    -
    -/* Combine the tabulated part and the calculated part
    - * by trigonometry.
    - */
    -y = sx * cz  +  cx * sz;
    -if( xsign < 0 )
    -	y = - y;
    -*s = y; /* sine */
    -
    -*c = cx * cz  -  sx * sz; /* cosine */
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/sindg.c b/scipy-0.10.1/scipy/special/cephes/sindg.c
    deleted file mode 100644
    index 6dbf8f10fd..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/sindg.c
    +++ /dev/null
    @@ -1,311 +0,0 @@
    -/*							sindg.c
    - *
    - *	Circular sine of angle in degrees
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, sindg();
    - *
    - * y = sindg( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Range reduction is into intervals of 45 degrees.
    - *
    - * Two polynomial approximating functions are employed.
    - * Between 0 and pi/4 the sine is approximated by
    - *      x  +  x**3 P(x**2).
    - * Between pi/4 and pi/2 the cosine is represented as
    - *      1  -  x**2 P(x**2).
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain      # trials      peak         rms
    - *    DEC       +-1000        3100      3.3e-17      9.0e-18
    - *    IEEE      +-1000       30000      2.3e-16      5.6e-17
    - * 
    - * ERROR MESSAGES:
    - *
    - *   message           condition        value returned
    - * sindg total loss   x > 8.0e14 (DEC)      0.0
    - *                    x > 1.0e14 (IEEE)
    - *
    - */
    -/*							cosdg.c
    - *
    - *	Circular cosine of angle in degrees
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, cosdg();
    - *
    - * y = cosdg( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Range reduction is into intervals of 45 degrees.
    - *
    - * Two polynomial approximating functions are employed.
    - * Between 0 and pi/4 the cosine is approximated by
    - *      1  -  x**2 P(x**2).
    - * Between pi/4 and pi/2 the sine is represented as
    - *      x  +  x**3 P(x**2).
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain      # trials      peak         rms
    - *    DEC      +-1000         3400       3.5e-17     9.1e-18
    - *    IEEE     +-1000        30000       2.1e-16     5.7e-17
    - *  See also sin().
    - *
    - */
    -
    -/* Cephes Math Library Release 2.0:  April, 1987
    - * Copyright 1985, 1987 by Stephen L. Moshier
    - * Direct inquiries to 30 Frost Street, Cambridge, MA 02140 */
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double sincof[] = {
    - 1.58962301572218447952E-10,
    --2.50507477628503540135E-8,
    - 2.75573136213856773549E-6,
    --1.98412698295895384658E-4,
    - 8.33333333332211858862E-3,
    --1.66666666666666307295E-1
    -};
    -static double coscof[] = {
    - 1.13678171382044553091E-11,
    --2.08758833757683644217E-9,
    - 2.75573155429816611547E-7,
    --2.48015872936186303776E-5,
    - 1.38888888888806666760E-3,
    --4.16666666666666348141E-2,
    - 4.99999999999999999798E-1
    -};
    -static double PI180 = 1.74532925199432957692E-2; /* pi/180 */
    -static double lossth = 1.0e14;
    -#endif
    -
    -#ifdef DEC
    -static unsigned short sincof[] = {
    -0030056,0143750,0177170,0073013,
    -0131727,0027455,0044510,0132205,
    -0033470,0167432,0131752,0042263,
    -0135120,0006400,0146776,0174027,
    -0036410,0104210,0104207,0137202,
    -0137452,0125252,0125252,0125103
    -};
    -static unsigned short coscof[] = {
    -0027107,0176030,0153315,0110312,
    -0131017,0072476,0007450,0123243,
    -0032623,0171174,0070066,0146445,
    -0134320,0006400,0147355,0163313,
    -0035666,0005540,0133012,0165067,
    -0137052,0125252,0125252,0125206,
    -0040000,0000000,0000000,0000000
    -};
    -static unsigned short P1[] = {0036616,0175065,0011224,0164711};
    -#define PI180 *(double *)P1
    -static double lossth = 8.0e14;
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short sincof[] = {
    -0x0ec1,0x1fcf,0xd8fd,0x3de5,
    -0x1691,0xa929,0xe5e5,0xbe5a,
    -0x4896,0x567d,0x1de3,0x3ec7,
    -0xdf03,0x19bf,0x01a0,0xbf2a,
    -0xf7d0,0x1110,0x1111,0x3f81,
    -0x5548,0x5555,0x5555,0xbfc5
    -};
    -static unsigned short coscof[] = {
    -0xb219,0x1ad9,0xff83,0x3da8,
    -0x14d4,0xc1e5,0xeea7,0xbe21,
    -0xd9a5,0x8e06,0x7e4f,0x3e92,
    -0xbcd9,0x19dd,0x01a0,0xbefa,
    -0x5d47,0x16c1,0xc16c,0x3f56,
    -0x5551,0x5555,0x5555,0xbfa5,
    -0x0000,0x0000,0x0000,0x3fe0
    -};
    -
    -static unsigned short P1[] = {0x9d39,0xa252,0xdf46,0x3f91};
    -#define PI180 *(double *)P1
    -static double lossth = 1.0e14;
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short sincof[] = {
    -0x3de5,0xd8fd,0x1fcf,0x0ec1,
    -0xbe5a,0xe5e5,0xa929,0x1691,
    -0x3ec7,0x1de3,0x567d,0x4896,
    -0xbf2a,0x01a0,0x19bf,0xdf03,
    -0x3f81,0x1111,0x1110,0xf7d0,
    -0xbfc5,0x5555,0x5555,0x5548
    -};
    -static unsigned short coscof[] = {
    -0x3da8,0xff83,0x1ad9,0xb219,
    -0xbe21,0xeea7,0xc1e5,0x14d4,
    -0x3e92,0x7e4f,0x8e06,0xd9a5,
    -0xbefa,0x01a0,0x19dd,0xbcd9,
    -0x3f56,0xc16c,0x16c1,0x5d47,
    -0xbfa5,0x5555,0x5555,0x5551,
    -0x3fe0,0x0000,0x0000,0x0000
    -};
    -
    -static unsigned short P1[] = {
    -0x3f91,0xdf46,0xa252,0x9d39
    -};
    -#define PI180 *(double *)P1
    -static double lossth = 1.0e14;
    -#endif
    -
    -double sindg(x)
    -double x;
    -{
    -double y, z, zz;
    -int j, sign;
    -
    -/* make argument positive but save the sign */
    -sign = 1;
    -if( x < 0 )
    -	{
    -	x = -x;
    -	sign = -1;
    -	}
    -
    -if( x > lossth )
    -	{
    -	mtherr( "sindg", TLOSS );
    -	return(0.0);
    -	}
    -
    -y = floor( x/45.0 ); /* integer part of x/NPY_PI_4 */
    -
    -/* strip high bits of integer part to prevent integer overflow */
    -z = ldexp( y, -4 );
    -z = floor(z);           /* integer part of y/8 */
    -z = y - ldexp( z, 4 );  /* y - 16 * (y/16) */
    -
    -j = z; /* convert to integer for tests on the phase angle */
    -/* map zeros to origin */
    -if( j & 1 )
    -	{
    -	j += 1;
    -	y += 1.0;
    -	}
    -j = j & 07; /* octant modulo 360 degrees */
    -/* reflect in x axis */
    -if( j > 3)
    -	{
    -	sign = -sign;
    -	j -= 4;
    -	}
    -
    -z = x - y * 45.0; /* x mod 45 degrees */
    -z *= PI180;	/* multiply by pi/180 to convert to radians */
    -zz = z * z;
    -
    -if( (j==1) || (j==2) )
    -	{
    -	y = 1.0 - zz * polevl( zz, coscof, 6 );
    -	}
    -else
    -	{
    -	y = z  +  z * (zz * polevl( zz, sincof, 5 ));
    -	}
    -
    -if(sign < 0)
    -	y = -y;
    -
    -return(y);
    -}
    -
    -
    -double cosdg(x)
    -double x;
    -{
    -double y, z, zz;
    -int j, sign;
    -
    -/* make argument positive */
    -sign = 1;
    -if( x < 0 )
    -	x = -x;
    -
    -if( x > lossth )
    -	{
    -	mtherr( "cosdg", TLOSS );
    -	return(0.0);
    -	}
    -
    -y = floor( x/45.0 );
    -z = ldexp( y, -4 );
    -z = floor(z);		/* integer part of y/8 */
    -z = y - ldexp( z, 4 );  /* y - 16 * (y/16) */
    -
    -/* integer and fractional part modulo one octant */
    -j = z;
    -if( j & 1 )	/* map zeros to origin */
    -	{
    -	j += 1;
    -	y += 1.0;
    -	}
    -j = j & 07;
    -if( j > 3)
    -	{
    -	j -=4;
    -	sign = -sign;
    -	}
    -
    -if( j > 1 )
    -	sign = -sign;
    -
    -z = x - y * 45.0; /* x mod 45 degrees */
    -z *= PI180;	/* multiply by pi/180 to convert to radians */
    -
    -zz = z * z;
    -
    -if( (j==1) || (j==2) )
    -	{
    -	y = z  +  z * (zz * polevl( zz, sincof, 5 ));
    -	}
    -else
    -	{
    -	y = 1.0 - zz * polevl( zz, coscof, 6 );
    -	}
    -
    -if(sign < 0)
    -	y = -y;
    -
    -return(y);
    -}
    -
    -
    -/* Degrees, minutes, seconds to radians: */
    -
    -/* 1 arc second, in radians = 4.848136811095359935899141023579479759563533023727e-6 */
    -static double P64800 = 4.848136811095359935899141023579479759563533023727e-6;
    -
    -double radian(d,m,s)
    -double d,m,s;
    -{
    -return( ((d*60.0 + m)*60.0 + s)*P64800 );
    -}
    -
    -
    -
    diff --git a/scipy-0.10.1/scipy/special/cephes/spence.c b/scipy-0.10.1/scipy/special/cephes/spence.c
    deleted file mode 100644
    index 671636d2c3..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/spence.c
    +++ /dev/null
    @@ -1,199 +0,0 @@
    -/*							spence.c
    - *
    - *	Dilogarithm
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, spence();
    - *
    - * y = spence( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Computes the integral
    - *
    - *                    x
    - *                    -
    - *                   | | log t
    - * spence(x)  =  -   |   ----- dt
    - *                 | |   t - 1
    - *                  -
    - *                  1
    - *
    - * for x >= 0.  A rational approximation gives the integral in
    - * the interval (0.5, 1.5).  Transformation formulas for 1/x
    - * and 1-x are employed outside the basic expansion range.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      0,4         30000       3.9e-15     5.4e-16
    - *    DEC       0,4          3000       2.5e-16     4.5e-17
    - *
    - *
    - */
    -
    -/*							spence.c */
    -
    -
    -/*
    -Cephes Math Library Release 2.1:  January, 1989
    -Copyright 1985, 1987, 1989 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double A[8] = {
    -  4.65128586073990045278E-5,
    -  7.31589045238094711071E-3,
    -  1.33847639578309018650E-1,
    -  8.79691311754530315341E-1,
    -  2.71149851196553469920E0,
    -  4.25697156008121755724E0,
    -  3.29771340985225106936E0,
    -  1.00000000000000000126E0,
    -};
    -static double B[8] = {
    -  6.90990488912553276999E-4,
    -  2.54043763932544379113E-2,
    -  2.82974860602568089943E-1,
    -  1.41172597751831069617E0,
    -  3.63800533345137075418E0,
    -  5.03278880143316990390E0,
    -  3.54771340985225096217E0,
    -  9.99999999999999998740E-1,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short A[32] = {
    -0034503,0013315,0034120,0157771,
    -0036357,0135043,0016766,0150637,
    -0037411,0007533,0005212,0161475,
    -0040141,0031563,0023217,0120331,
    -0040455,0104461,0007002,0155522,
    -0040610,0034434,0065721,0120465,
    -0040523,0006674,0105671,0054427,
    -0040200,0000000,0000000,0000000,
    -};
    -static unsigned short B[32] = {
    -0035465,0021626,0032367,0144157,
    -0036720,0016326,0134431,0000406,
    -0037620,0161024,0133701,0120766,
    -0040264,0131557,0152055,0064512,
    -0040550,0152424,0051166,0034272,
    -0040641,0006233,0014672,0111572,
    -0040543,0006674,0105671,0054425,
    -0040200,0000000,0000000,0000000,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short A[32] = {
    -0x1bff,0xa70a,0x62d9,0x3f08,
    -0xda34,0x63be,0xf744,0x3f7d,
    -0x5c68,0x6151,0x21eb,0x3fc1,
    -0xf41b,0x64d1,0x266e,0x3fec,
    -0x5b6a,0x21c0,0xb126,0x4005,
    -0x3427,0x8d7a,0x0723,0x4011,
    -0x2b23,0x9177,0x61b7,0x400a,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -static unsigned short B[32] = {
    -0xf90e,0xc69e,0xa472,0x3f46,
    -0x2021,0xd723,0x039a,0x3f9a,
    -0x343f,0x96f8,0x1c42,0x3fd2,
    -0xad29,0xfa85,0x966d,0x3ff6,
    -0xc717,0x8a4e,0x1aa2,0x400d,
    -0x526f,0x6337,0x2193,0x4014,
    -0x2b23,0x9177,0x61b7,0x400c,
    -0x0000,0x0000,0x0000,0x3ff0,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short A[32] = {
    -0x3f08,0x62d9,0xa70a,0x1bff,
    -0x3f7d,0xf744,0x63be,0xda34,
    -0x3fc1,0x21eb,0x6151,0x5c68,
    -0x3fec,0x266e,0x64d1,0xf41b,
    -0x4005,0xb126,0x21c0,0x5b6a,
    -0x4011,0x0723,0x8d7a,0x3427,
    -0x400a,0x61b7,0x9177,0x2b23,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -static unsigned short B[32] = {
    -0x3f46,0xa472,0xc69e,0xf90e,
    -0x3f9a,0x039a,0xd723,0x2021,
    -0x3fd2,0x1c42,0x96f8,0x343f,
    -0x3ff6,0x966d,0xfa85,0xad29,
    -0x400d,0x1aa2,0x8a4e,0xc717,
    -0x4014,0x2193,0x6337,0x526f,
    -0x400c,0x61b7,0x9177,0x2b23,
    -0x3ff0,0x0000,0x0000,0x0000,
    -};
    -#endif
    -
    -extern double PI, MACHEP;
    -
    -double spence(x)
    -double x;
    -{
    -double w, y, z;
    -int flag;
    -
    -if( x < 0.0 )
    -	{
    -	mtherr( "spence", DOMAIN );
    -	return(NPY_NAN);
    -	}
    -
    -if( x == 1.0 )
    -	return( 0.0 );
    -
    -if( x == 0.0 )
    -	return( PI*PI/6.0 );
    -
    -flag = 0;
    -
    -if( x > 2.0 )
    -	{
    -	x = 1.0/x;
    -	flag |= 2;
    -	}
    -
    -if( x > 1.5 )
    -	{
    -	w = (1.0/x) - 1.0;
    -	flag |= 2;
    -	}
    -
    -else if( x < 0.5 )
    -	{
    -	w = -x;
    -	flag |= 1;
    -	}
    -
    -else
    -	w = x - 1.0;
    -
    -
    -y = -w * polevl( w, A, 7) / polevl( w, B, 7 );
    -
    -if( flag & 1 )
    -	y = (PI * PI)/6.0  - log(x) * log(1.0-x) - y;
    -
    -if( flag & 2 )
    -	{
    -	z = log(x);
    -	y = -0.5 * z * z  -  y;
    -	}
    -
    -return( y );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/stdtr.c b/scipy-0.10.1/scipy/special/cephes/stdtr.c
    deleted file mode 100644
    index 2973c2fa5c..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/stdtr.c
    +++ /dev/null
    @@ -1,216 +0,0 @@
    -/*							stdtr.c
    - *
    - *	Student's t distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double t, stdtr();
    - * short k;
    - *
    - * y = stdtr( k, t );
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Computes the integral from minus infinity to t of the Student
    - * t distribution with integer k > 0 degrees of freedom:
    - *
    - *                                      t
    - *                                      -
    - *                                     | |
    - *              -                      |         2   -(k+1)/2
    - *             | ( (k+1)/2 )           |  (     x   )
    - *       ----------------------        |  ( 1 + --- )        dx
    - *                     -               |  (      k  )
    - *       sqrt( k pi ) | ( k/2 )        |
    - *                                   | |
    - *                                    -
    - *                                   -inf.
    - * 
    - * Relation to incomplete beta integral:
    - *
    - *        1 - stdtr(k,t) = 0.5 * incbet( k/2, 1/2, z )
    - * where
    - *        z = k/(k + t**2).
    - *
    - * For t < -2, this is the method of computation.  For higher t,
    - * a direct method is derived from integration by parts.
    - * Since the function is symmetric about t=0, the area under the
    - * right tail of the density is found by calling the function
    - * with -t instead of t.
    - * 
    - * ACCURACY:
    - *
    - * Tested at random 1 <= k <= 25.  The "domain" refers to t.
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE     -100,-2      50000       5.9e-15     1.4e-15
    - *    IEEE     -2,100      500000       2.7e-15     4.9e-17
    - */
    -
    -/*							stdtri.c
    - *
    - *	Functional inverse of Student's t distribution
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double p, t, stdtri();
    - * int k;
    - *
    - * t = stdtri( k, p );
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Given probability p, finds the argument t such that stdtr(k,t)
    - * is equal to p.
    - * 
    - * ACCURACY:
    - *
    - * Tested at random 1 <= k <= 100.  The "domain" refers to p:
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE    .001,.999     25000       5.7e-15     8.0e-16
    - *    IEEE    10^-6,.001    25000       2.0e-12     2.9e-14
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.3:  March, 1995
    -Copyright 1984, 1987, 1995 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -
    -extern double PI, MACHEP, MAXNUM;
    -
    -double stdtr( k, t )
    -int k;
    -double t;
    -{
    -double x, rk, z, f, tz, p, xsqk;
    -int j;
    -
    -if( k <= 0 )
    -	{
    -	mtherr( "stdtr", DOMAIN );
    -	return(NPY_NAN);
    -	}
    -
    -if( t == 0 )
    -	return( 0.5 );
    -
    -if( t < -2.0 )
    -	{
    -	rk = k;
    -	z = rk / (rk + t * t);
    -	p = 0.5 * incbet( 0.5*rk, 0.5, z );
    -	return( p );
    -	}
    -
    -/*	compute integral from -t to + t */
    -
    -if( t < 0 )
    -	x = -t;
    -else
    -	x = t;
    -
    -rk = k;	/* degrees of freedom */
    -z = 1.0 + ( x * x )/rk;
    -
    -/* test if k is odd or even */
    -if( (k & 1) != 0)
    -	{
    -
    -	/*	computation for odd k	*/
    -
    -	xsqk = x/sqrt(rk);
    -	p = atan( xsqk );
    -	if( k > 1 )
    -		{
    -		f = 1.0;
    -		tz = 1.0;
    -		j = 3;
    -		while(  (j<=(k-2)) && ( (tz/f) > MACHEP )  )
    -			{
    -			tz *= (j-1)/( z * j );
    -			f += tz;
    -			j += 2;
    -			}
    -		p += f * xsqk/z;
    -		}
    -	p *= 2.0/PI;
    -	}
    -
    -
    -else
    -	{
    -
    -	/*	computation for even k	*/
    -
    -	f = 1.0;
    -	tz = 1.0;
    -	j = 2;
    -
    -	while(  ( j <= (k-2) ) && ( (tz/f) > MACHEP )  )
    -		{
    -		tz *= (j - 1)/( z * j );
    -		f += tz;
    -		j += 2;
    -		}
    -	p = f * x/sqrt(z*rk);
    -	}
    -
    -/*	common exit	*/
    -
    -
    -if( t < 0 )
    -	p = -p;	/* note destruction of relative accuracy */
    -
    -	p = 0.5 + 0.5 * p;
    -return(p);
    -}
    -
    -double stdtri( k, p )
    -int k;
    -double p;
    -{
    -double t, rk, z;
    -int rflg;
    -
    -if( k <= 0 || p <= 0.0 || p >= 1.0 )
    -	{
    -	mtherr( "stdtri", DOMAIN );
    -	return(NPY_NAN);
    -	}
    -
    -rk = k;
    -
    -if( p > 0.25 && p < 0.75 )
    -	{
    -	if( p == 0.5 )
    -		return( 0.0 );
    -	z = 1.0 - 2.0 * p;
    -	z = incbi( 0.5, 0.5*rk, fabs(z) );
    -	t = sqrt( rk*z/(1.0-z) );
    -	if( p < 0.5 )
    -		t = -t;
    -	return( t );
    -	}
    -rflg = -1;
    -if( p >= 0.5)
    -	{
    -	p = 1.0 - p;
    -	rflg = 1;
    -	}
    -z = incbi( 0.5*rk, 0.5, 2.0*p );
    -
    -if( MAXNUM * z < rk )
    -	return(rflg* MAXNUM);
    -t = sqrt( rk/z - rk );
    -return( rflg * t );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/struve.c b/scipy-0.10.1/scipy/special/cephes/struve.c
    deleted file mode 100644
    index ee75ced96a..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/struve.c
    +++ /dev/null
    @@ -1,305 +0,0 @@
    -/*							struve.c
    - *
    - *      Struve function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double v, x, y, struve();
    - *
    - * y = struve( v, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Computes the Struve function Hv(x) of order v, argument x.
    - * Negative x is rejected unless v is an integer.
    - *
    - * This module also contains the hypergeometric functions 1F2
    - * and 3F0 and a routine for the Bessel function Yv(x) with
    - * noninteger v.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - * Not accurately characterized, but spot checked against tables.
    - *
    - */
    -
    -
    -/*
    -Cephes Math Library Release 2.81:  June, 2000
    -Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier
    -*/
    -#include "mconf.h"
    -#define DEBUG 0
    -static double stop = 1.37e-17;
    -extern double MACHEP;
    -
    -double onef2( a, b, c, x, err )
    -double a, b, c, x;
    -double *err;
    -{
    -double n, a0, sum, t;
    -double an, bn, cn, max, z;
    -
    -an = a;
    -bn = b;
    -cn = c;
    -a0 = 1.0;
    -sum = 1.0;
    -n = 1.0;
    -t = 1.0;
    -max = 0.0;
    -
    -do
    -	{
    -	if( an == 0 )
    -		goto done;
    -	if( bn == 0 )
    -		goto error;
    -	if( cn == 0 )
    -		goto error;
    -	if( (a0 > 1.0e34) || (n > 200) )
    -		goto error;
    -	a0 *= (an * x) / (bn * cn * n);
    -	sum += a0;
    -	an += 1.0;
    -	bn += 1.0;
    -	cn += 1.0;
    -	n += 1.0;
    -	z = fabs( a0 );
    -	if( z > max )
    -		max = z;
    -	if( sum != 0 )
    -		t = fabs( a0 / sum );
    -	else
    -		t = z;
    -	}
    -while( t > stop );
    -
    -done:
    -
    -*err = fabs( MACHEP*max /sum );
    -
    -#if DEBUG
    -	printf(" onef2 cancellation error %.5E\n", *err );
    -#endif
    -
    -goto xit;
    -
    -error:
    -#if DEBUG
    -printf("onef2 does not converge\n");
    -#endif
    -*err = 1.0e38;
    -
    -xit:
    -
    -#if DEBUG
    -printf("onef2( %.2E %.2E %.2E %.5E ) =  %.3E  %.6E\n", a, b, c, x, n, sum);
    -#endif
    -return(sum);
    -}
    -
    -
    -
    -
    -double threef0( a, b, c, x, err )
    -double a, b, c, x;
    -double *err;
    -{
    -double n, a0, sum, t, conv, conv1;
    -double an, bn, cn, max, z;
    -
    -an = a;
    -bn = b;
    -cn = c;
    -a0 = 1.0;
    -sum = 1.0;
    -n = 1.0;
    -t = 1.0;
    -max = 0.0;
    -conv = 1.0e38;
    -conv1 = conv;
    -
    -do
    -	{
    -	if( an == 0.0 )
    -		goto done;
    -	if( bn == 0.0 )
    -		goto done;
    -	if( cn == 0.0 )
    -		goto done;
    -	if( (a0 > 1.0e34) || (n > 200) )
    -		goto error;
    -	a0 *= (an * bn * cn * x) / n;
    -	an += 1.0;
    -	bn += 1.0;
    -	cn += 1.0;
    -	n += 1.0;
    -	z = fabs( a0 );
    -	if( z > max )
    -		max = z;
    -	if( z >= conv )
    -		{
    -		if( (z < max) && (z > conv1) )
    -			goto done;
    -		}
    -	conv1 = conv;
    -	conv = z;
    -	sum += a0;
    -	if( sum != 0 )
    -		t = fabs( a0 / sum );
    -	else
    -		t = z;
    -	}
    -while( t > stop );
    -
    -done:
    -
    -t = fabs( MACHEP*max/sum );
    -#if DEBUG
    -	printf(" threef0 cancellation error %.5E\n", t );
    -#endif
    -
    -max = fabs( conv/sum );
    -if( max > t )
    -	t = max;
    -#if DEBUG
    -	printf(" threef0 convergence %.5E\n", max );
    -#endif
    -
    -goto xit;
    -
    -error:
    -#if DEBUG
    -printf("threef0 does not converge\n");
    -#endif
    -t = 1.0e38;
    -
    -xit:
    -
    -#if DEBUG
    -printf("threef0( %.2E %.2E %.2E %.5E ) =  %.3E  %.6E\n", a, b, c, x, n, sum);
    -#endif
    -
    -*err = t;
    -return(sum);
    -}
    -
    -
    -
    -
    -extern double PI;
    -
    -double struve( v, x )
    -double v, x;
    -{
    -double y, ya, f, g, h, t;
    -double onef2err, threef0err;
    -
    -if (x == 0.0) {
    -    if (v > -1) {
    -        return 0.0;
    -    } else if (v < -1) {
    -        if ((int)(floor(0.5-v)-1) % 2) return -NPY_INFINITY;
    -        else return NPY_INFINITY;
    -    } else {
    -        return 2.0/PI;
    -    }
    -}
    -
    -f = floor(v);
    -if( (v < 0) && ( v-f == 0.5 ) )
    -	{
    -	y = jv( -v, x );
    -	f = 1.0 - f;
    -	g =  2.0 * floor(f/2.0);
    -	if( g != f )
    -		y = -y;
    -	return(y);
    -	}
    -t = 0.25*x*x;
    -f = fabs(x);
    -g = 1.5 * fabs(v);
    -if( (f > 30.0) && (f > g) )
    -	{
    -	onef2err = 1.0e38;
    -	y = 0.0;
    -	}
    -else
    -	{
    -	y = onef2( 1.0, 1.5, 1.5+v, -t, &onef2err );
    -	}
    -
    -if( (f < 18.0) || (x < 0.0) )
    -	{
    -	threef0err = 1.0e38;
    -	ya = 0.0;
    -	}
    -else
    -	{
    -	ya = threef0( 1.0, 0.5, 0.5-v, -1.0/t, &threef0err );
    -	}
    -
    -f = sqrt( PI );
    -h = pow( 0.5*x, v-1.0 );
    -
    -if( onef2err <= threef0err )
    -	{
    -	g = gamma( v + 1.5 );
    -	y = y * h * t / ( 0.5 * f * g );
    -	return(y);
    -	}
    -else
    -	{
    -	g = gamma( v + 0.5 );
    -	ya = ya * h / ( f * g );
    -	ya = ya + yv( v, x );
    -	return(ya);
    -	}
    -}
    -
    -
    -
    -
    -/* Bessel function of noninteger order
    - */
    -
    -double yv( v, x )
    -double v, x;
    -{
    -double y, t;
    -int n;
    -
    -y = floor( v );
    -if( y == v )
    -	{
    -	n = v;
    -	y = yn( n, x );
    -	return( y );
    -	}
    -t = PI * v;
    -y = (cos(t) * jv( v, x ) - jv( -v, x ))/sin(t);
    -return( y );
    -}
    -
    -/* Crossover points between ascending series and asymptotic series
    - * for Struve function
    - *
    - *	 v	 x
    - * 
    - *	 0	19.2
    - *	 1	18.95
    - *	 2	19.15
    - *	 3	19.3
    - *	 5	19.7
    - *	10	21.35
    - *	20	26.35
    - *	30	32.31
    - *	40	40.0
    - */
    diff --git a/scipy-0.10.1/scipy/special/cephes/tandg.c b/scipy-0.10.1/scipy/special/cephes/tandg.c
    deleted file mode 100644
    index 2cb6d74d66..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/tandg.c
    +++ /dev/null
    @@ -1,165 +0,0 @@
    -/*							tandg.c
    - *
    - *	Circular tangent of argument in degrees
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, tandg();
    - *
    - * y = tandg( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the circular tangent of the argument x in degrees.
    - *
    - * Range reduction is modulo pi/4.  A rational function
    - *       x + x**3 P(x**2)/Q(x**2)
    - * is employed in the basic interval [0, pi/4].
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC      0,10          8000      3.4e-17      1.2e-17
    - *    IEEE     0,10         30000      3.2e-16      8.4e-17
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition          value returned
    - * tandg total loss   x > 8.0e14 (DEC)      0.0
    - *                    x > 1.0e14 (IEEE)
    - * tandg singularity  x = 180 k  +  90     MAXNUM
    - */
    -/*							cotdg.c
    - *
    - *	Circular cotangent of argument in degrees
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, cotdg();
    - *
    - * y = cotdg( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns the circular cotangent of the argument x in degrees.
    - *
    - * Range reduction is modulo pi/4.  A rational function
    - *       x + x**3 P(x**2)/Q(x**2)
    - * is employed in the basic interval [0, pi/4].
    - *
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition          value returned
    - * cotdg total loss   x > 8.0e14 (DEC)      0.0
    - *                    x > 1.0e14 (IEEE)
    - * cotdg singularity  x = 180 k            MAXNUM
    - */
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -
    -#ifdef UNK
    -static double PI180 = 1.74532925199432957692E-2;
    -static double lossth = 1.0e14;
    -#endif
    -
    -#ifdef DEC
    -static unsigned short P1[] = {0036616,0175065,0011224,0164711};
    -#define PI180 *(double *)P1
    -static double lossth = 8.0e14;
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short P1[] = {0x9d39,0xa252,0xdf46,0x3f91};
    -#define PI180 *(double *)P1
    -static double lossth = 1.0e14;
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short P1[] = {
    -0x3f91,0xdf46,0xa252,0x9d39
    -};
    -#define PI180 *(double *)P1
    -static double lossth = 1.0e14;
    -#endif
    -
    -static double tancot(double, int);
    -extern double MAXNUM;
    -
    -double
    -tandg(double x)
    -{
    -    return( tancot(x,0) );
    -}
    -
    -
    -double
    -cotdg(double x)
    -{
    -    return( tancot(x,1) );
    -}
    -
    -
    -static double
    -tancot(double xx, int cotflg)
    -{
    -    double x;
    -    int sign;
    -
    -    /* make argument positive but save the sign */
    -    if( xx < 0 ) {
    -        x = -xx;
    -        sign = -1;
    -    } else {
    -        x = xx;
    -        sign = 1;
    -    }
    -
    -    if( x > lossth ) {
    -        mtherr("tandg", TLOSS);
    -        return 0.0;
    -    }
    -
    -    /* modulo 180 */
    -    x = x - 180.0*floor(x/180.0);
    -    if (cotflg) {
    -        if (x <= 90.0) {
    -            x = 90.0 - x;
    -        } else {
    -            x = x - 90.0;
    -            sign *= -1;
    -        }
    -    } else {
    -        if (x > 90.0) {
    -            x = 180.0 - x;
    -            sign *= -1;
    -        }
    -    }
    -    if (x == 0.0) {
    -        return 0.0;
    -    } else if (x == 45.0) {
    -        return sign*1.0;
    -    } else if (x == 90.0) {
    -        mtherr( (cotflg ? "cotdg" : "tandg"), SING );
    -        return MAXNUM;
    -    }
    -    /* x is now transformed into [0, 90) */
    -    return sign * tan(x*PI180);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/tukey.c b/scipy-0.10.1/scipy/special/cephes/tukey.c
    deleted file mode 100644
    index 3c6c668647..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/tukey.c
    +++ /dev/null
    @@ -1,57 +0,0 @@
    -
    -/* Compute the CDF of the Tukey-Lambda distribution 
    -    using a braketing search with special checks
    - 
    -   The PPF of the Tukey-lambda distribution is 
    -   G(p) = p**lam + (1-p)**lam / lam
    -
    -   Author:  Travis Oliphant 
    -*/
    -
    -#include 
    -
    -#define SMALLVAL 1e-4
    -#define EPS 1.0e-14
    -#define MAXCOUNT 60
    -
    -double tukeylambdacdf(double x, double lmbda)
    -{
    -  double pmin, pmid, pmax, plow, phigh, xeval;
    -  int count;
    -
    -  xeval = 1.0/lmbda;
    -  if (lmbda > 0.0) {
    -    if (x < (-xeval)) return 0.0;
    -    if (x > xeval) return 1.0;
    -  }
    -
    -  if ((-SMALLVAL < lmbda) && (lmbda < SMALLVAL)) {
    -    if (x >= 0) return 1.0/(1.0+exp(-x));
    -    else return exp(x) / (1.0+exp(x));
    -  }
    -
    -  pmin = 0.0;
    -  pmid = 0.5;
    -  pmax = 1.0;
    -  plow = pmin;
    -  phigh = pmax;
    -  count = 0;
    -
    -  while ((count < MAXCOUNT) && (fabs(pmid-plow) > EPS)) {
    -    xeval = (pow(pmid, lmbda) - pow(1.0-pmid,lmbda))/lmbda;
    -    if (xeval == x) return pmid;
    -    if (xeval > x) {
    -      phigh = pmid;
    -      pmid = (pmid + plow)/2.0;
    -    }
    -    else {
    -      plow = pmid;
    -      pmid = (pmid + phigh)/2.0;
    -    }
    -    count++;
    -  }
    -  return pmid;
    -}
    -
    -
    -  
    diff --git a/scipy-0.10.1/scipy/special/cephes/unity.c b/scipy-0.10.1/scipy/special/cephes/unity.c
    deleted file mode 100644
    index 9e0bf88f21..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/unity.c
    +++ /dev/null
    @@ -1,118 +0,0 @@
    -/*							unity.c
    - *
    - * Relative error approximations for function arguments near
    - * unity.
    - *
    - *    log1p(x) = log(1+x)
    - *    expm1(x) = exp(x) - 1
    - *    cosm1(x) = cos(x) - 1
    - *
    - */
    -
    -#include "mconf.h"
    -/* log1p(x) = log(1 + x)  */
    -
    -/* Coefficients for log(1+x) = x - x**2/2 + x**3 P(x)/Q(x)
    - * 1/sqrt(2) <= x < sqrt(2)
    - * Theoretical peak relative error = 2.32e-20
    - */
    -static double LP[] = {
    - 4.5270000862445199635215E-5,
    - 4.9854102823193375972212E-1,
    - 6.5787325942061044846969E0,
    - 2.9911919328553073277375E1,
    - 6.0949667980987787057556E1,
    - 5.7112963590585538103336E1,
    - 2.0039553499201281259648E1,
    -};
    -static double LQ[] = {
    -/* 1.0000000000000000000000E0,*/
    - 1.5062909083469192043167E1,
    - 8.3047565967967209469434E1,
    - 2.2176239823732856465394E2,
    - 3.0909872225312059774938E2,
    - 2.1642788614495947685003E2,
    - 6.0118660497603843919306E1,
    -};
    -
    -#define SQRTH 0.70710678118654752440
    -#define SQRT2 1.41421356237309504880
    -
    -double log1p(double x)
    -{
    -double z;
    -
    -z = 1.0 + x;
    -if( (z < SQRTH) || (z > SQRT2) )
    -	return( log(z) );
    -z = x*x;
    -z = -0.5 * z + x * ( z * polevl( x, LP, 6 ) / p1evl( x, LQ, 6 ) );
    -return (x + z);
    -}
    -
    -
    -
    -/* expm1(x) = exp(x) - 1  */
    -
    -/*  e^x =  1 + 2x P(x^2)/( Q(x^2) - P(x^2) )
    - * -0.5 <= x <= 0.5
    - */
    -
    -static double EP[3] = {
    - 1.2617719307481059087798E-4,
    - 3.0299440770744196129956E-2,
    - 9.9999999999999999991025E-1,
    -};
    -static double EQ[4] = {
    - 3.0019850513866445504159E-6,
    - 2.5244834034968410419224E-3,
    - 2.2726554820815502876593E-1,
    - 2.0000000000000000000897E0,
    -};
    -
    -double expm1(double x)
    -{
    -double r, xx;
    -
    -if (!npy_isfinite(x)) {
    -        if (npy_isnan(x)) {
    -                return x;
    -        } else if (x > 0) {
    -                return x;
    -        } else {
    -                return -1.0;
    -        }
    -
    -}
    -if( (x < -0.5) || (x > 0.5) )
    -	return( exp(x) - 1.0 );
    -xx = x * x;
    -r = x * polevl( xx, EP, 2 );
    -r = r/( polevl( xx, EQ, 3 ) - r );
    -return (r + r);
    -}
    -
    -
    -
    -/* cosm1(x) = cos(x) - 1  */
    -
    -static double coscof[7] = {
    - 4.7377507964246204691685E-14,
    --1.1470284843425359765671E-11,
    - 2.0876754287081521758361E-9,
    --2.7557319214999787979814E-7,
    - 2.4801587301570552304991E-5,
    --1.3888888888888872993737E-3,
    - 4.1666666666666666609054E-2,
    -};
    -
    -double cosm1(double x)
    -{
    -double xx;
    -
    -if( (x < -NPY_PI_4) || (x > NPY_PI_4) )
    -	return( cos(x) - 1.0 );
    -xx = x * x;
    -xx = -0.5*xx + xx * xx * polevl( xx, coscof, 6 );
    -return xx;
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/yn.c b/scipy-0.10.1/scipy/special/cephes/yn.c
    deleted file mode 100644
    index d7e213431c..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/yn.c
    +++ /dev/null
    @@ -1,109 +0,0 @@
    -/*							yn.c
    - *
    - *	Bessel function of second kind of integer order
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, yn();
    - * int n;
    - *
    - * y = yn( n, x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - * Returns Bessel function of order n, where n is a
    - * (possibly negative) integer.
    - *
    - * The function is evaluated by forward recurrence on
    - * n, starting with values computed by the routines
    - * y0() and y1().
    - *
    - * If n = 0 or 1 the routine for y0 or y1 is called
    - * directly.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *
    - *                      Absolute error, except relative
    - *                      when y > 1:
    - * arithmetic   domain     # trials      peak         rms
    - *    DEC       0, 30        2200       2.9e-16     5.3e-17
    - *    IEEE      0, 30       30000       3.4e-15     4.3e-16
    - *
    - *
    - * ERROR MESSAGES:
    - *
    - *   message         condition      value returned
    - * yn singularity   x = 0              MAXNUM
    - * yn overflow                         MAXNUM
    - *
    - * Spot checked against tables for x, n between 0 and 100.
    - *
    - */
    -
    -/*
    -Cephes Math Library Release 2.8:  June, 2000
    -Copyright 1984, 1987, 2000 by Stephen L. Moshier
    -*/
    -
    -#include "mconf.h"
    -extern double MAXNUM, MAXLOG;
    -
    -double yn( n, x )
    -int n;
    -double x;
    -{
    -double an, anm1, anm2, r;
    -int k, sign;
    -
    -if( n < 0 )
    -	{
    -	n = -n;
    -	if( (n & 1) == 0 )	/* -1**n */
    -		sign = 1;
    -	else
    -		sign = -1;
    -	}
    -else
    -	sign = 1;
    -
    -
    -if( n == 0 )
    -	return( sign * y0(x) );
    -if( n == 1 )
    -	return( sign * y1(x) );
    -
    -/* test for overflow */
    -if (x == 0.0) {
    -	mtherr("yn", SING);
    -	return -NPY_INFINITY;
    -} else if (x < 0.0) {
    -	mtherr("yn", DOMAIN);
    -        return NPY_NAN;
    -}
    -
    -/* forward recurrence on n */
    -
    -anm2 = y0(x);
    -anm1 = y1(x);
    -k = 1;
    -r = 2 * k;
    -do
    -	{
    -	an = r * anm1 / x  -  anm2;
    -	anm2 = anm1;
    -	anm1 = an;
    -	r += 2.0;
    -	++k;
    -	}
    -while( k < n );
    -
    -
    -return( sign * an );
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/zeta.c b/scipy-0.10.1/scipy/special/cephes/zeta.c
    deleted file mode 100644
    index 7db84d8ea1..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/zeta.c
    +++ /dev/null
    @@ -1,183 +0,0 @@
    -/*							zeta.c
    - *
    - *	Riemann zeta function of two arguments
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, q, y, zeta();
    - *
    - * y = zeta( x, q );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - *
    - *
    - *                 inf.
    - *                  -        -x
    - *   zeta(x,q)  =   >   (k+q)  
    - *                  -
    - *                 k=0
    - *
    - * where x > 1 and q is not a negative integer or zero.
    - * The Euler-Maclaurin summation formula is used to obtain
    - * the expansion
    - *
    - *                n         
    - *                -       -x
    - * zeta(x,q)  =   >  (k+q)  
    - *                -         
    - *               k=1        
    - *
    - *           1-x                 inf.  B   x(x+1)...(x+2j)
    - *      (n+q)           1         -     2j
    - *  +  ---------  -  -------  +   >    --------------------
    - *        x-1              x      -                   x+2j+1
    - *                   2(n+q)      j=1       (2j)! (n+q)
    - *
    - * where the B2j are Bernoulli numbers.  Note that (see zetac.c)
    - * zeta(x,1) = zetac(x) + 1.
    - *
    - *
    - *
    - * ACCURACY:
    - *
    - *
    - *
    - * REFERENCE:
    - *
    - * Gradshteyn, I. S., and I. M. Ryzhik, Tables of Integrals,
    - * Series, and Products, p. 1073; Academic Press, 1980.
    - *
    - */
    -
    -/*
    -Cephes Math Library Release 2.0:  April, 1987
    -Copyright 1984, 1987 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -extern double MAXNUM, MACHEP;
    -
    -/* Expansion coefficients
    - * for Euler-Maclaurin summation formula
    - * (2k)! / B2k
    - * where B2k are Bernoulli numbers
    - */
    -static double A[] = {
    -12.0,
    --720.0,
    -30240.0,
    --1209600.0,
    -47900160.0,
    --1.8924375803183791606e9, /*1.307674368e12/691*/
    -7.47242496e10,
    --2.950130727918164224e12, /*1.067062284288e16/3617*/
    -1.1646782814350067249e14, /*5.109094217170944e18/43867*/
    --4.5979787224074726105e15, /*8.028576626982912e20/174611*/
    -1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/
    --7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/
    -};
    -/* 30 Nov 86 -- error in third coefficient fixed */
    -
    -
    -double zeta(x,q)
    -double x,q;
    -{
    -int i;
    -double a, b, k, s, t, w;
    -
    -if( x == 1.0 )
    -	goto retinf;
    -
    -if( x < 1.0 )
    -	{
    -domerr:
    -	mtherr( "zeta", DOMAIN );
    -	return(NPY_NAN);
    -	}
    -
    -if( q <= 0.0 )
    -	{
    -	if(q == floor(q))
    -		{
    -		mtherr( "zeta", SING );
    -retinf:
    -		return( MAXNUM );
    -		}
    -	if( x != floor(x) )
    -		goto domerr; /* because q^-x not defined */
    -	}
    -
    -/* Euler-Maclaurin summation formula */
    -/*
    -if( x < 25.0 )
    -*/
    -{
    -/* Permit negative q but continue sum until n+q > +9 .
    - * This case should be handled by a reflection formula.
    - * If q<0 and x is an integer, there is a relation to
    - * the polyGamma function.
    - */
    -s = pow( q, -x );
    -a = q;
    -i = 0;
    -b = 0.0;
    -while( (i < 9) || (a <= 9.0) )
    -	{
    -	i += 1;
    -	a += 1.0;
    -	b = pow( a, -x );
    -	s += b;
    -	if( fabs(b/s) < MACHEP )
    -		goto done;
    -	}
    -
    -w = a;
    -s += b*w/(x-1.0);
    -s -= 0.5 * b;
    -a = 1.0;
    -k = 0.0;
    -for( i=0; i<12; i++ )
    -	{
    -	a *= x + k;
    -	b /= w;
    -	t = a*b/A[i];
    -	s = s + t;
    -	t = fabs(t/s);
    -	if( t < MACHEP )
    -		goto done;
    -	k += 1.0;
    -	a *= x + k;
    -	b /= w;
    -	k += 1.0;
    -	}
    -done:
    -return(s);
    -}
    -
    -
    -
    -/* Basic sum of inverse powers */
    -/*
    -pseres:
    -
    -s = pow( q, -x );
    -a = q;
    -do
    -	{
    -	a += 2.0;
    -	b = pow( a, -x );
    -	s += b;
    -	}
    -while( b/s > MACHEP );
    -
    -b = pow( 2.0, -x );
    -s = (s + b)/(1.0-b);
    -return(s);
    -*/
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes/zetac.c b/scipy-0.10.1/scipy/special/cephes/zetac.c
    deleted file mode 100644
    index d521d6afbd..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes/zetac.c
    +++ /dev/null
    @@ -1,583 +0,0 @@
    - /*							zetac.c
    - *
    - *	Riemann zeta function
    - *
    - *
    - *
    - * SYNOPSIS:
    - *
    - * double x, y, zetac();
    - *
    - * y = zetac( x );
    - *
    - *
    - *
    - * DESCRIPTION:
    - *
    - *
    - *
    - *                inf.
    - *                 -    -x
    - *   zetac(x)  =   >   k   ,   x > 1,
    - *                 -
    - *                k=2
    - *
    - * is related to the Riemann zeta function by
    - *
    - *	Riemann zeta(x) = zetac(x) + 1.
    - *
    - * Extension of the function definition for x < 1 is implemented.
    - * Zero is returned for x > log2(MAXNUM).
    - *
    - * An overflow error may occur for large negative x, due to the
    - * Gamma function in the reflection formula.
    - *
    - * ACCURACY:
    - *
    - * Tabulated values have full machine accuracy.
    - *
    - *                      Relative error:
    - * arithmetic   domain     # trials      peak         rms
    - *    IEEE      1,50        10000       9.8e-16	    1.3e-16
    - *    DEC       1,50         2000       1.1e-16     1.9e-17
    - *
    - *
    - */
    -
    -/*
    -Cephes Math Library Release 2.1:  January, 1989
    -Copyright 1984, 1987, 1989 by Stephen L. Moshier
    -Direct inquiries to 30 Frost Street, Cambridge, MA 02140
    -*/
    -
    -#include "mconf.h"
    -
    -extern double MAXNUM, PI;
    -
    -/* Riemann zeta(x) - 1
    - * for integer arguments between 0 and 30.
    - */
    -#ifdef UNK
    -static double azetac[] = {
    --1.50000000000000000000E0,
    - 1.70141183460469231730E38, /* infinity. */
    - 6.44934066848226436472E-1,
    - 2.02056903159594285400E-1,
    - 8.23232337111381915160E-2,
    - 3.69277551433699263314E-2,
    - 1.73430619844491397145E-2,
    - 8.34927738192282683980E-3,
    - 4.07735619794433937869E-3,
    - 2.00839282608221441785E-3,
    - 9.94575127818085337146E-4,
    - 4.94188604119464558702E-4,
    - 2.46086553308048298638E-4,
    - 1.22713347578489146752E-4,
    - 6.12481350587048292585E-5,
    - 3.05882363070204935517E-5,
    - 1.52822594086518717326E-5,
    - 7.63719763789976227360E-6,
    - 3.81729326499983985646E-6,
    - 1.90821271655393892566E-6,
    - 9.53962033872796113152E-7,
    - 4.76932986787806463117E-7,
    - 2.38450502727732990004E-7,
    - 1.19219925965311073068E-7,
    - 5.96081890512594796124E-8,
    - 2.98035035146522801861E-8,
    - 1.49015548283650412347E-8,
    - 7.45071178983542949198E-9,
    - 3.72533402478845705482E-9,
    - 1.86265972351304900640E-9,
    - 9.31327432419668182872E-10
    -};
    -#endif
    -
    -#ifdef DEC
    -static unsigned short azetac[] = {
    -0140300,0000000,0000000,0000000,
    -0077777,0177777,0177777,0177777,
    -0040045,0015146,0022460,0076462,
    -0037516,0164001,0036001,0104116,
    -0037250,0114425,0061754,0022033,
    -0037027,0040616,0145174,0146670,
    -0036616,0011411,0100444,0104437,
    -0036410,0145550,0051474,0161067,
    -0036205,0115527,0141434,0133506,
    -0036003,0117475,0100553,0053403,
    -0035602,0056147,0045567,0027703,
    -0035401,0106157,0111054,0145242,
    -0035201,0002455,0113151,0101015,
    -0035000,0126235,0004273,0157260,
    -0034600,0071127,0112647,0005261,
    -0034400,0045736,0057610,0157550,
    -0034200,0031146,0172621,0074172,
    -0034000,0020603,0115503,0032007,
    -0033600,0013114,0124672,0023135,
    -0033400,0007330,0043715,0151117,
    -0033200,0004742,0145043,0033514,
    -0033000,0003225,0152624,0004411,
    -0032600,0002143,0033166,0035746,
    -0032400,0001354,0074234,0026143,
    -0032200,0000762,0147776,0170220,
    -0032000,0000514,0072452,0130631,
    -0031600,0000335,0114266,0063315,
    -0031400,0000223,0132710,0041045,
    -0031200,0000142,0073202,0153426,
    -0031000,0000101,0121400,0152065,
    -0030600,0000053,0140525,0072761
    -};
    -#endif
    -
    -#ifdef IBMPC
    -static unsigned short azetac[] = {
    -0x0000,0x0000,0x0000,0xbff8,
    -0xffff,0xffff,0xffff,0x7fef,
    -0x0fa6,0xc4a6,0xa34c,0x3fe4,
    -0x310a,0x2780,0xdd00,0x3fc9,
    -0x8483,0xac7d,0x1322,0x3fb5,
    -0x99b7,0xd94f,0xe831,0x3fa2,
    -0x9124,0x3024,0xc261,0x3f91,
    -0x9c47,0x0a67,0x196d,0x3f81,
    -0x96e9,0xf863,0xb36a,0x3f70,
    -0x6ae0,0xb02d,0x73e7,0x3f60,
    -0xe5f8,0xe96e,0x4b8c,0x3f50,
    -0x9954,0xf245,0x318d,0x3f40,
    -0x3042,0xb2cd,0x20a5,0x3f30,
    -0x7bd6,0xa117,0x1593,0x3f20,
    -0xe156,0xf2b4,0x0e4a,0x3f10,
    -0x1bed,0xcbf1,0x097b,0x3f00,
    -0x2f0f,0xdeb2,0x064c,0x3ef0,
    -0x6681,0x7368,0x0430,0x3ee0,
    -0x44cc,0x9537,0x02c9,0x3ed0,
    -0xba4a,0x08f9,0x01db,0x3ec0,
    -0x66ea,0x5944,0x013c,0x3eb0,
    -0x8121,0xbab2,0x00d2,0x3ea0,
    -0xc77d,0x66ce,0x008c,0x3e90,
    -0x858c,0x8f13,0x005d,0x3e80,
    -0xde12,0x59ff,0x003e,0x3e70,
    -0x5633,0x8ea5,0x0029,0x3e60,
    -0xccda,0xb316,0x001b,0x3e50,
    -0x0845,0x76b9,0x0012,0x3e40,
    -0x5ae3,0x4ed0,0x000c,0x3e30,
    -0x1a87,0x3460,0x0008,0x3e20,
    -0xaebe,0x782a,0x0005,0x3e10
    -};
    -#endif
    -
    -#ifdef MIEEE
    -static unsigned short azetac[] = {
    -0xbff8,0x0000,0x0000,0x0000,
    -0x7fef,0xffff,0xffff,0xffff,
    -0x3fe4,0xa34c,0xc4a6,0x0fa6,
    -0x3fc9,0xdd00,0x2780,0x310a,
    -0x3fb5,0x1322,0xac7d,0x8483,
    -0x3fa2,0xe831,0xd94f,0x99b7,
    -0x3f91,0xc261,0x3024,0x9124,
    -0x3f81,0x196d,0x0a67,0x9c47,
    -0x3f70,0xb36a,0xf863,0x96e9,
    -0x3f60,0x73e7,0xb02d,0x6ae0,
    -0x3f50,0x4b8c,0xe96e,0xe5f8,
    -0x3f40,0x318d,0xf245,0x9954,
    -0x3f30,0x20a5,0xb2cd,0x3042,
    -0x3f20,0x1593,0xa117,0x7bd6,
    -0x3f10,0x0e4a,0xf2b4,0xe156,
    -0x3f00,0x097b,0xcbf1,0x1bed,
    -0x3ef0,0x064c,0xdeb2,0x2f0f,
    -0x3ee0,0x0430,0x7368,0x6681,
    -0x3ed0,0x02c9,0x9537,0x44cc,
    -0x3ec0,0x01db,0x08f9,0xba4a,
    -0x3eb0,0x013c,0x5944,0x66ea,
    -0x3ea0,0x00d2,0xbab2,0x8121,
    -0x3e90,0x008c,0x66ce,0xc77d,
    -0x3e80,0x005d,0x8f13,0x858c,
    -0x3e70,0x003e,0x59ff,0xde12,
    -0x3e60,0x0029,0x8ea5,0x5633,
    -0x3e50,0x001b,0xb316,0xccda,
    -0x3e40,0x0012,0x76b9,0x0845,
    -0x3e30,0x000c,0x4ed0,0x5ae3,
    -0x3e20,0x0008,0x3460,0x1a87,
    -0x3e10,0x0005,0x782a,0xaebe
    -};
    -#endif
    -
    -
    -/* 2**x (1 - 1/x) (zeta(x) - 1) = P(1/x)/Q(1/x), 1 <= x <= 10 */
    -#ifdef UNK
    -static double P[9] = {
    -  5.85746514569725319540E11,
    -  2.57534127756102572888E11,
    -  4.87781159567948256438E10,
    -  5.15399538023885770696E9,
    -  3.41646073514754094281E8,
    -  1.60837006880656492731E7,
    -  5.92785467342109522998E5,
    -  1.51129169964938823117E4,
    -  2.01822444485997955865E2,
    -};
    -static double Q[8] = {
    -/*  1.00000000000000000000E0,*/
    -  3.90497676373371157516E11,
    -  5.22858235368272161797E10,
    -  5.64451517271280543351E9,
    -  3.39006746015350418834E8,
    -  1.79410371500126453702E7,
    -  5.66666825131384797029E5,
    -  1.60382976810944131506E4,
    -  1.96436237223387314144E2,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short P[36] = {
    -0052010,0060466,0101211,0134657,
    -0051557,0154353,0135060,0064411,
    -0051065,0133157,0133514,0133633,
    -0050231,0114735,0035036,0111344,
    -0047242,0164327,0146036,0033545,
    -0046165,0065364,0130045,0011005,
    -0045020,0134427,0075073,0134107,
    -0043554,0021653,0000440,0177426,
    -0042111,0151213,0134312,0021402,
    -};
    -static unsigned short Q[32] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0051665,0153363,0054252,0137010,
    -0051102,0143645,0121415,0036107,
    -0050250,0034073,0131133,0036465,
    -0047241,0123250,0150037,0070012,
    -0046210,0160426,0111463,0116507,
    -0045012,0054255,0031674,0173612,
    -0043572,0114460,0151520,0012221,
    -0042104,0067655,0037037,0137421,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short P[36] = {
    -0x3736,0xd051,0x0c26,0x4261,
    -0x0d21,0x7746,0xfb1d,0x424d,
    -0x96f3,0xf6e9,0xb6cd,0x4226,
    -0xd25c,0xa743,0x333b,0x41f3,
    -0xc6ed,0xf983,0x5d1a,0x41b4,
    -0xa241,0x9604,0xad5e,0x416e,
    -0x7709,0xef47,0x1722,0x4122,
    -0x1fe3,0x6024,0x8475,0x40cd,
    -0x4460,0x7719,0x3a51,0x4069,
    -};
    -static unsigned short Q[32] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x57c1,0x6b15,0xbade,0x4256,
    -0xa789,0xb461,0x58f4,0x4228,
    -0x67a7,0x764b,0x0707,0x41f5,
    -0xee01,0x1a03,0x34d5,0x41b4,
    -0x73a9,0xd266,0x1c22,0x4171,
    -0x9ef1,0xa677,0x4b15,0x4121,
    -0x0292,0x1a6a,0x5326,0x40cf,
    -0xf7e2,0xa7c3,0x8df5,0x4068,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short P[36] = {
    -0x4261,0x0c26,0xd051,0x3736,
    -0x424d,0xfb1d,0x7746,0x0d21,
    -0x4226,0xb6cd,0xf6e9,0x96f3,
    -0x41f3,0x333b,0xa743,0xd25c,
    -0x41b4,0x5d1a,0xf983,0xc6ed,
    -0x416e,0xad5e,0x9604,0xa241,
    -0x4122,0x1722,0xef47,0x7709,
    -0x40cd,0x8475,0x6024,0x1fe3,
    -0x4069,0x3a51,0x7719,0x4460,
    -};
    -static unsigned short Q[32] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4256,0xbade,0x6b15,0x57c1,
    -0x4228,0x58f4,0xb461,0xa789,
    -0x41f5,0x0707,0x764b,0x67a7,
    -0x41b4,0x34d5,0x1a03,0xee01,
    -0x4171,0x1c22,0xd266,0x73a9,
    -0x4121,0x4b15,0xa677,0x9ef1,
    -0x40cf,0x5326,0x1a6a,0x0292,
    -0x4068,0x8df5,0xa7c3,0xf7e2,
    -};
    -#endif
    -
    -/* log(zeta(x) - 1 - 2**-x), 10 <= x <= 50 */
    -#ifdef UNK
    -static double A[11] = {
    - 8.70728567484590192539E6,
    - 1.76506865670346462757E8,
    - 2.60889506707483264896E10,
    - 5.29806374009894791647E11,
    - 2.26888156119238241487E13,
    - 3.31884402932705083599E14,
    - 5.13778997975868230192E15,
    --1.98123688133907171455E15,
    --9.92763810039983572356E16,
    - 7.82905376180870586444E16,
    - 9.26786275768927717187E16,
    -};
    -static double B[10] = {
    -/* 1.00000000000000000000E0,*/
    --7.92625410563741062861E6,
    --1.60529969932920229676E8,
    --2.37669260975543221788E10,
    --4.80319584350455169857E11,
    --2.07820961754173320170E13,
    --2.96075404507272223680E14,
    --4.86299103694609136686E15,
    - 5.34589509675789930199E15,
    - 5.71464111092297631292E16,
    --1.79915597658676556828E16,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short A[44] = {
    -0046004,0156325,0126302,0131567,
    -0047050,0052177,0015271,0136466,
    -0050702,0060271,0070727,0171112,
    -0051766,0132727,0064363,0145042,
    -0053245,0012466,0056000,0117230,
    -0054226,0166155,0174275,0170213,
    -0055222,0003127,0112544,0101322,
    -0154741,0036625,0010346,0053767,
    -0156260,0054653,0154052,0031113,
    -0056213,0011152,0021000,0007111,
    -0056244,0120534,0040576,0163262,
    -};
    -static unsigned short B[40] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0145761,0161734,0033026,0015520,
    -0147031,0013743,0017355,0036703,
    -0150661,0011720,0061061,0136402,
    -0151737,0125216,0070274,0164414,
    -0153227,0032653,0127211,0145250,
    -0154206,0121666,0123774,0042035,
    -0155212,0033352,0125154,0132533,
    -0055227,0170201,0110775,0072132,
    -0056113,0003133,0127132,0122303,
    -0155577,0126351,0141462,0171037,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short A[44] = {
    -0x566f,0xb598,0x9b9a,0x4160,
    -0x37a7,0xe357,0x0a8f,0x41a5,
    -0xfe49,0x2e3a,0x4c17,0x4218,
    -0x7944,0xed1e,0xd6ba,0x425e,
    -0x13d3,0xcb80,0xa2a6,0x42b4,
    -0xbe11,0xbf17,0xdd8d,0x42f2,
    -0x905a,0xf2ac,0x40ca,0x4332,
    -0xcaff,0xa21c,0x27b2,0xc31c,
    -0x4649,0x7b05,0x0b35,0xc376,
    -0x01c9,0x4440,0x624d,0x4371,
    -0xdcd6,0x882f,0x942b,0x4374,
    -};
    -static unsigned short B[40] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0xc36a,0x86c2,0x3c7b,0xc15e,
    -0xa7b8,0x63dd,0x22fc,0xc1a3,
    -0x37a0,0x0c46,0x227a,0xc216,
    -0x9d22,0xce17,0xf551,0xc25b,
    -0x3955,0x75d1,0xe6b5,0xc2b2,
    -0x8884,0xd4ff,0xd476,0xc2f0,
    -0x96ab,0x554d,0x46dd,0xc331,
    -0xae8b,0x323f,0xfe10,0x4332,
    -0x5498,0x75cb,0x60cb,0x4369,
    -0x5e44,0x3866,0xf59d,0xc34f,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short A[44] = {
    -0x4160,0x9b9a,0xb598,0x566f,
    -0x41a5,0x0a8f,0xe357,0x37a7,
    -0x4218,0x4c17,0x2e3a,0xfe49,
    -0x425e,0xd6ba,0xed1e,0x7944,
    -0x42b4,0xa2a6,0xcb80,0x13d3,
    -0x42f2,0xdd8d,0xbf17,0xbe11,
    -0x4332,0x40ca,0xf2ac,0x905a,
    -0xc31c,0x27b2,0xa21c,0xcaff,
    -0xc376,0x0b35,0x7b05,0x4649,
    -0x4371,0x624d,0x4440,0x01c9,
    -0x4374,0x942b,0x882f,0xdcd6,
    -};
    -static unsigned short B[40] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0xc15e,0x3c7b,0x86c2,0xc36a,
    -0xc1a3,0x22fc,0x63dd,0xa7b8,
    -0xc216,0x227a,0x0c46,0x37a0,
    -0xc25b,0xf551,0xce17,0x9d22,
    -0xc2b2,0xe6b5,0x75d1,0x3955,
    -0xc2f0,0xd476,0xd4ff,0x8884,
    -0xc331,0x46dd,0x554d,0x96ab,
    -0x4332,0xfe10,0x323f,0xae8b,
    -0x4369,0x60cb,0x75cb,0x5498,
    -0xc34f,0xf59d,0x3866,0x5e44,
    -};
    -#endif
    -
    -/* (1-x) (zeta(x) - 1), 0 <= x <= 1 */
    -
    -#ifdef UNK
    -static double R[6] = {
    --3.28717474506562731748E-1,
    - 1.55162528742623950834E1,
    --2.48762831680821954401E2,
    - 1.01050368053237678329E3,
    - 1.26726061410235149405E4,
    --1.11578094770515181334E5,
    -};
    -static double S[5] = {
    -/* 1.00000000000000000000E0,*/
    - 1.95107674914060531512E1,
    - 3.17710311750646984099E2,
    - 3.03835500874445748734E3,
    - 2.03665876435770579345E4,
    - 7.43853965136767874343E4,
    -};
    -#endif
    -#ifdef DEC
    -static unsigned short R[24] = {
    -0137650,0046650,0022502,0040316,
    -0041170,0041222,0057666,0142216,
    -0142170,0141510,0167741,0075646,
    -0042574,0120074,0046505,0106053,
    -0043506,0001154,0130073,0101413,
    -0144331,0166414,0020560,0131652,
    -};
    -static unsigned short S[20] = {
    -/*0040200,0000000,0000000,0000000,*/
    -0041234,0013015,0042073,0113570,
    -0042236,0155353,0077325,0077445,
    -0043075,0162656,0016646,0031723,
    -0043637,0016454,0157636,0071126,
    -0044221,0044262,0140365,0146434,
    -};
    -#endif
    -#ifdef IBMPC
    -static unsigned short R[24] = {
    -0x481a,0x04a8,0x09b5,0xbfd5,
    -0xd892,0x4bf6,0x0852,0x402f,
    -0x2f75,0x1dfc,0x1869,0xc06f,
    -0xb185,0x89a8,0x9407,0x408f,
    -0x7061,0x9607,0xc04d,0x40c8,
    -0x1675,0x842e,0x3da1,0xc0fb,
    -};
    -static unsigned short S[20] = {
    -/*0x0000,0x0000,0x0000,0x3ff0,*/
    -0x72ef,0xa887,0x82c1,0x4033,
    -0xafe5,0x6fda,0xdb5d,0x4073,
    -0xc67a,0xc3b4,0xbcb5,0x40a7,
    -0xce4b,0x9bf3,0xe3a5,0x40d3,
    -0xb9a3,0x581e,0x2916,0x40f2,
    -};
    -#endif
    -#ifdef MIEEE
    -static unsigned short R[24] = {
    -0xbfd5,0x09b5,0x04a8,0x481a,
    -0x402f,0x0852,0x4bf6,0xd892,
    -0xc06f,0x1869,0x1dfc,0x2f75,
    -0x408f,0x9407,0x89a8,0xb185,
    -0x40c8,0xc04d,0x9607,0x7061,
    -0xc0fb,0x3da1,0x842e,0x1675,
    -};
    -static unsigned short S[20] = {
    -/*0x3ff0,0x0000,0x0000,0x0000,*/
    -0x4033,0x82c1,0xa887,0x72ef,
    -0x4073,0xdb5d,0x6fda,0xafe5,
    -0x40a7,0xbcb5,0xc3b4,0xc67a,
    -0x40d3,0xe3a5,0x9bf3,0xce4b,
    -0x40f2,0x2916,0x581e,0xb9a3,
    -};
    -#endif
    -
    -#define MAXL2 127
    -
    -/*
    - * Riemann zeta function, minus one
    - */
    -extern double MACHEP;
    -
    -double zetac(x)
    -double x;
    -{
    -int i;
    -double a, b, s, w;
    -
    -if( x < 0.0 )
    -	{
    -	if( x < -30.8148 )
    -		{
    -		mtherr( "zetac", OVERFLOW );
    -		return(0.0);
    -		}
    -	s = 1.0 - x;
    -	w = zetac( s );
    -	b = sin(0.5*PI*x) * pow(2.0*PI, x) * Gamma(s) * (1.0 + w) / PI;
    -	return(b - 1.0);
    -	}
    -
    -if( x >= MAXL2 )
    -	return(0.0);	/* because first term is 2**-x */
    -
    -/* Tabulated values for integer argument */
    -w = floor(x);
    -if( w == x )
    -	{
    -	i = x;
    -	if( i < 31 )
    -		{
    -#ifdef UNK
    -		return( azetac[i] );
    -#else
    -		return( *(double *)&azetac[4*i]  );
    -#endif
    -		}
    -	}
    -
    -
    -if( x < 1.0 )
    -	{
    -	w = 1.0 - x;
    -	a = polevl( x, R, 5 ) / ( w * p1evl( x, S, 5 ));
    -	return( a );
    -	}
    -
    -if( x == 1.0 )
    -	{
    -	mtherr( "zetac", SING );
    -	return( MAXNUM );
    -	}
    -
    -if( x <= 10.0 )
    -	{
    -	b = pow( 2.0, x ) * (x - 1.0);
    -	w = 1.0/x;
    -	s = (x * polevl( w, P, 8 )) / (b * p1evl( w, Q, 8 ));
    -	return( s );
    -	}
    -
    -if( x <= 50.0 )
    -	{
    -	b = pow( 2.0, -x );
    -	w = polevl( x, A, 10 ) / p1evl( x, B, 10 );
    -	w = exp(w) + b;
    -	return(w);
    -	}
    -
    -
    -/* Basic sum of inverse powers */
    -
    -
    -s = 0.0;
    -a = 1.0;
    -do
    -	{
    -	a += 2.0;
    -	b = pow( a, -x );
    -	s += b;
    -	}
    -while( b/s > MACHEP );
    -
    -b = pow( 2.0, -x );
    -s = (s + b)/(1.0-b);
    -return(s);
    -}
    diff --git a/scipy-0.10.1/scipy/special/cephes_doc.h b/scipy-0.10.1/scipy/special/cephes_doc.h
    deleted file mode 100644
    index 29a91e3c3a..0000000000
    --- a/scipy-0.10.1/scipy/special/cephes_doc.h
    +++ /dev/null
    @@ -1,170 +0,0 @@
    -#ifndef CEPHES_DOC_H
    -#define CEPHES_DOC_H
    -#define airy_doc "(Ai,Aip,Bi,Bip)=airy(z) calculates the Airy functions and their derivatives\nevaluated at real or complex number z.  The Airy functions Ai and Bi \nare two independent solutions of y''(x)=xy.  Aip and Bip are the first derivatives\nevaluated at x of Ai and Bi respectively."
    -#define airye_doc "(Aie,Aipe,Bie,Bipe)=airye(z) calculates the exponentially scaled Airy functions and \ntheir derivatives evaluated at real or complex number z.  \nairye(z)[0:1] = airy(z)[0:1] * exp(2.0/3.0*z*sqrt(z))\nairye(z)[2:3] = airy(z)[2:3] * exp(-abs((2.0/3.0*z*sqrt(z)).real))"
    -#define bdtr_doc "y=bdtr(k,n,p) returns the sum of the terms 0 through k of the\nBinomial probability density:  sum(nCj p**j (1-p)**(n-j),j=0..k)"
    -#define bdtrc_doc "y=bdtrc(k,n,p) returns the sum of the terms k+1 through n of the\nBinomial probability density: sum(nCj p**j (1-p)**(n-j), j=k+1..n)"
    -#define bdtri_doc "p=bdtri(k,n,y) finds the probability p such that the sum of the\nterms 0 through k of the Binomial probability density is equal to the\ngiven cumulative probability y."
    -#define bei_doc "y=bei(x) returns the Kelvin function bei x"
    -#define beip_doc "y=beip(x) returns the derivative of the Kelvin function bei x"
    -#define ber_doc "y=ber(x) returns the Kelvin function ber x"
    -#define berp_doc "y=berp(x) returns the derivative of the Kelvin function ber x"
    -#define besselpoly_doc "y=besselpoly(a,lam,nu) returns the value of the integral:\nintegral(x**lam * jv(nu,2*a*x),x=0..1)."
    -#define beta_doc "y=beta(a,b) returns gamma(a) * gamma(b) / gamma(a+b)"
    -#define betainc_doc "y=betainc(a,b,x) returns the incomplete beta integral of the\n" \
    -                "arguments, evaluated from zero to x: \n\n" \
    -                "gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).\n" \
    -                "\n" \
    -                "Note\n" \
    -                "----\n" \
    -                "The incomplete beta is also sometimes defined without the terms\n" \
    -                "in gamma, in which case the above definition is the so-called regularized\n" \
    -                "incomplete beta. Under this definition, you can get the incomplete beta by\n" \
    -                "multiplying the result of the scipy function by beta(a, b)."
    -#define betaincinv_doc "x=betaincinv(a,b,y) returns x such that betainc(a,b,x) = y."
    -#define betaln_doc "y=betaln(a,b) returns the natural logarithm of the absolute value of\nbeta: ln(|beta(x)|)."
    -#define btdtr_doc "y=btdtr(a,b,x) returns the area from zero to x under the beta\ndensity function: gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1)\n(1-t)**(b-1), t=0..x).  SEE ALSO betainc"
    -#define btdtri_doc "x=btdtri(a,b,p) returns the pth quantile of the beta distribution.  It is\neffectively the inverse of btdtr returning the value of x for which \nbtdtr(a,b,x) = p.   SEE ALSO betaincinv"
    -#define cbrt_doc "y=cbrt(x) returns the real cube root of x."
    -#define chdtr_doc "p=chdtr(v,x) Returns the area under the left hand tail (from 0 to x) of the Chi\nsquare probability density function with v degrees of freedom:\n1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)"
    -#define chdtrc_doc "p=chdtrc(v,x) returns the area under the right hand tail (from x to\ninfinity) of the Chi square probability density function with v\ndegrees of freedom:\n1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)"
    -#define chdtri_doc "x=chdtri(v,p) returns the argument x such that chdtrc(v,x) is equal\nto p."
    -#define cosdg_doc "y=cosdg(x) calculates the cosine of the angle x given in degrees."
    -#define cosm1_doc "y=calculates cos(x) - 1 for use when x is near zero."
    -#define cotdg_doc "y=cotdg(x) calculates the cotangent of the angle x given in degrees."
    -#define dawsn_doc "y=dawsn(x) returns dawson's integral: exp(-x**2) *\nintegral(exp(t**2),t=0..x)."
    -#define ellipe_doc "y=ellipe(m) returns the complete integral of the second kind:\nintegral(sqrt(1-m*sin(t)**2),t=0..pi/2)"
    -#define ellipeinc_doc "y=ellipeinc(phi,m) returns the incomplete elliptic integral of the\nsecond kind: integral(sqrt(1-m*sin(t)**2),t=0..phi)"
    -#define ellipj_doc "(sn,cn,dn,ph)=ellipj(u,m) calculates the Jacobian elliptic functions of\nparameter m between 0 and 1, and real u.  The returned functions are\noften written sn(u|m), cn(u|m), and dn(u|m).  The value of ph is such\nthat if u = ellik(ph,m), then sn(u|m) = sin(ph) and cn(u|m) = cos(ph)."
    -#define ellipkm1_doc "y=ellipkm1(1 - m) returns the complete integral of the first kind:\nintegral(1/sqrt(1-m*sin(t)**2),t=0..pi/2)"
    -#define ellipkinc_doc "y=ellipkinc(phi,m) returns the incomplete elliptic integral of the first\nkind: integral(1/sqrt(1-m*sin(t)**2),t=0..phi)"
    -#define erf_doc "y=erf(z) returns the error function of complex argument defined as\nas 2/sqrt(pi)*integral(exp(-t**2),t=0..z)"
    -#define erfc_doc "y=erfc(x) returns 1 - erf(x)."
    -#define exp1_doc "y=exp1(z) returns the exponential integral (n=1) of complex argument\nz: integral(exp(-z*t)/t,t=1..inf)."
    -#define exp10_doc "y=exp10(x) returns 10 raised to the x power."
    -#define exp2_doc "y=exp2(x) returns 2 raised to the x power."
    -#define expi_doc "y=expi(x) returns an exponential integral of argument x defined as\nintegral(exp(t)/t,t=-inf..x).  See expn for a different exponential\nintegral."
    -#define expm1_doc "y=expm1(x) calculates exp(x) - 1 for use when x is near zero."
    -#define expn_doc "y=expn(n,x) returns the exponential integral for integer n and\nnon-negative x and n: integral(exp(-x*t) / t**n, t=1..inf)."
    -#define fdtr_doc "y=fdtr(dfn,dfd,x) returns the area from zero to x under the F density\nfunction (also known as Snedcor's density or the variance ratio\ndensity).  This is the density of X = (unum/dfn)/(uden/dfd), where unum and\nuden are random variables having Chi square distributions with dfn and\ndfd degrees of freedom, respectively."
    -#define fdtrc_doc "y=fdtrc(dfn,dfd,x) returns the complemented F distribution function."
    -#define fdtri_doc "x=fdtri(dfn,dfd,p) finds the F density argument x such that \nfdtr(dfn,dfd,x)=p."
    -#define fdtridfd_doc "x=fdtridfd(dfn,p,x) finds the F density argument dfd such that \nfdtr(dfn,dfd,x)=p."
    -#define fdtridfn_doc "x=fdtridfn(p,dfd,x) finds the F density argument dfn such that \nfdtr(dfn,dfd,x)=p."
    -#define fresnel_doc "(ssa,cca)=fresnel(z) returns the fresnel sin and cos integrals: integral(sin(pi/2\n* t**2),t=0..z) and integral(cos(pi/2 * t**2),t=0..z) for real or \ncomplex z."
    -#define gamma_doc "y=gamma(z) returns the gamma function of the argument.  The gamma\nfunction is often referred to as the generalized factorial since \nz*gamma(z) = gamma(z+1) and gamma(n+1) = n! for natural number n."
    -#define gammainc_doc "y=gammainc(a,x) returns the incomplete gamma integral defined as\n1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x).  a must be\npositive and x must be >= 0."
    -#define gammaincc_doc "y=gammaincc(a,x) returns the complemented incomplete gamma integral\ndefined as 1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 -\ngammainc(a,x).  a must be positive and x must be >= 0."
    -#define gammainccinv_doc "x=gammainccinv(a,y) returns x such that gammaincc(a,x) = y."
    -#define gammaln_doc "y=gammaln(z) returns the base e logarithm of the absolute value of the\ngamma function of z: ln(|gamma(z)|)"
    -#define gdtr_doc "y=gdtr(a,b,x) returns the integral from zero to x of the gamma\nprobability density function: a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).\nThe arguments a and b are used differently here than in other definitions."
    -#define gdtrc_doc "y=gdtrc(a,b,x) returns the integral from x to infinity of the gamma\nprobability density function.  SEE gdtr, gdtri"
    -#define gdtri_doc "x=gdtri(a,b,p) returns pth quantile of the gamma distribution.  It is \nthe inverse of the gamma cdf returning the value of x for which \ngdtr(b,a,x) = p."
    -#define hankel1_doc "y=hankel1(v,z) returns the Hankel function of the first kind for real order v and complex argument z."
    -#define hankel1e_doc "y=hankel1e(v,z) returns the exponentially scaled Hankel function of the first\nkind for real order v and complex argument z:\nhankel1e(v,z) = hankel1(v,z) * exp(-1j * z)"
    -#define hankel2_doc "y=hankel2(v,z) returns the Hankel function of the second kind for real order v and complex argument z."
    -#define hankel2e_doc "y=hankel2e(v,z) returns the exponentially scaled Hankel function of the second\nkind for real order v and complex argument z:\nhankel1e(v,z) = hankel1(v,z) * exp(1j * z)"
    -#define hyp1f1_doc "y=hyp1f1(a,b,x) returns the confluent hypergeometeric function\n( 1F1(a,b;x) ) evaluated at the values a, b, and x."
    -#define hyp1f2_doc "(y,err)=hyp1f2(a,b,c,x) returns (y,err) with the hypergeometric function 1F2 in y and an error estimate in err."
    -#define hyp2f0_doc "(y,err)=hyp2f0(a,b,x,type) returns (y,err) with the hypergeometric function 2F0 in y and an error estimate in err.  The input type determines a convergence factor and\ncan be either 1 or 2."
    -#define hyp2f1_doc "y=hyp2f1(a,b,c,z) returns the gauss hypergeometric function\n( 2F1(a,b;c;z) )."
    -#define hyp3f0_doc "(y,err)=hyp3f0(a,b,c,x) returns (y,err) with the hypergeometric function 3F0 in y and an error estimate in err."
    -#define hyperu_doc "y=hyperu(a,b,x) returns the confluent hypergeometric function of the\nsecond kind U(a,b,x)."
    -#define i0_doc "y=i0(x) returns the modified Bessel function of order 0 at x."
    -#define i0e_doc "y=i0e(x) returns the exponentially scaled modified Bessel function\nof order 0 at x.  i0e(x) = exp(-|x|) * i0(x)."
    -#define i1_doc "y=i1(x) returns the modified Bessel function of order 1 at x."
    -#define i1e_doc "y=i1e(x) returns the exponentially scaled modified Bessel function\nof order 0 at x.  i1e(x) = exp(-|x|) * i1(x)."
    -#define it2i0k0_doc "(ii0,ik0)=it2i0k0(x) returns the integrals int((i0(t)-1)/t,t=0..x) and \nint(k0(t)/t,t=x..infinitity)."
    -#define it2j0y0_doc "(ij0,iy0)=it2j0y0(x) returns the integrals int((1-j0(t))/t,t=0..x) and \nint(y0(t)/t,t=x..infinitity)."
    -#define it2struve0_doc "y=it2struve0(x) returns the integral of the Struve function of order 0 \ndivided by t from x to infinity:  integral(H0(t)/t, t=x..inf)."
    -#define itairy_doc "(Apt,Bpt,Ant,Bnt)=itairy(x) calculates the integral of Airy functions from 0 to x\nfor positive (Apt, Bpt) and negative (Ant, Bnt) arguments."
    -#define iti0k0_doc "(ii0,ik0)=iti0k0(x) returns simple integrals from 0 to x of the zeroth order \nmodified bessel functions i0 and k0."
    -#define itj0y0_doc "(ij0,iy0)=itj0y0(x) returns simple integrals from 0 to x of the zeroth order \nbessel functions j0 and y0."
    -#define itmodstruve0_doc "y=itmodstruve0(x) returns the integral of the modified Struve function\nof order 0 from 0 to x:  integral(L0(t), t=0..x)."
    -#define itstruve0_doc "y=itstruve0(x) returns the integral of the Struve function of order 0 \nfrom 0 to x:  integral(H0(t), t=0..x)."
    -#define iv_doc "y=iv(v,z) returns the modified Bessel function of real order v of\nz.  If z is of real type and negative, v must be integer valued."
    -#define ive_doc "y=ive(v,z) returns the exponentially scaled modified Bessel function of \nreal order v and complex z: ive(v,z) = iv(v,z) * exp(-abs(z.real))"
    -#define j0_doc "y=j0(x) returns the Bessel function of order 0 at x."
    -#define j1_doc "y=j1(x) returns the Bessel function of order 1 at x."
    -#define jn_doc "y=jn(n,x) returns the Bessel function of integer order n at  x."
    -#define jv_doc "y=jv(v,z) returns the Bessel function of real order v at complex z."
    -#define jve_doc "y=jve(v,z) returns the exponentially scaled Bessel function of real order\nv at complex z: jve(v,z) = jv(v,z) * exp(-abs(z.imag))"
    -#define k0_doc "y=k0(x) returns the modified Bessel function of the second kind (sometimes called the third kind) of\norder 0 at x."
    -#define k0e_doc "y=k0e(x) returns the exponentially scaled modified Bessel function\nof the second kind (sometimes called the third kind) of order 0 at x.  k0e(x) = exp(x) * k0(x)."
    -#define k1_doc "y=i1(x) returns the modified Bessel function of the second kind (sometimes called the third kind) of\norder 1 at x."
    -#define k1e_doc "y=k1e(x) returns the exponentially scaled modified Bessel function\nof the second kind (sometimes called the third kind) of order 1 at x.  k1e(x) = exp(x) * k1(x)"
    -#define kei_doc "y=kei(x) returns the Kelvin function ker x"
    -#define keip_doc "y=keip(x) returns the derivative of the Kelvin function kei x"
    -#define kelvin_doc "(Be, Ke, Bep, Kep)=kelvin(x) returns the tuple (Be, Ke, Bep, Kep) which containes \ncomplex numbers representing the real and imaginary Kelvin functions \nand their derivatives evaluated at x.  For example, \nkelvin(x)[0].real = ber x and kelvin(x)[0].imag = bei x with similar \nrelationships for ker and kei."
    -#define ker_doc "y=ker(x) returns the Kelvin function ker x"
    -#define kerp_doc "y=kerp(x) returns the derivative of the Kelvin function ker x"
    -#define kn_doc "y=kn(n,x) returns the modified Bessel function of the second kind (sometimes called the third kind) for\ninteger order n at x."
    -#define kolmogi_doc "y=kolmogi(p) returns y such that kolmogorov(y) = p"
    -#define kolmogorov_doc "p=kolmogorov(y) returns the complementary cumulative distribution \nfunction of Kolmogorov's limiting distribution (Kn* for large n) \nof a two-sided test for equality between an empirical and a theoretical \ndistribution. It is equal to the (limit as n->infinity of the) probability \nthat sqrt(n) * max absolute deviation > y."
    -#define kv_doc "y=kv(v,z) returns the modified Bessel function of the second kind (sometimes called the third kind) for\nreal order v at complex z."
    -#define kve_doc "y=kve(v,z) returns the exponentially scaled, modified Bessel function\nof the second kind (sometimes called the third kind) for real order v at complex z: kve(v,z) = kv(v,z) * exp(z)"
    -#define log1p_doc "y=log1p(x) calculates log(1+x) for use when x is near zero."
    -#define lpmv_doc "y=lpmv(m,v,x) returns the associated legendre function of integer order\nm and real degree v (s.t. v>-m-1 or v=0 and n>=m, spheroidal parameter c and |x|<1.0."
    -#define obl_ang1_cv_doc "(s,sp)=obl_ang1_cv(m,n,c,cv,x) computes the oblate sheroidal angular function \nof the first kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0. Requires pre-computed\ncharacteristic value."
    -#define obl_cv_doc "cv=obl_cv(m,n,c) computes the characteristic value of oblate spheroidal \nwave functions of order m,n (n>=m) and spheroidal parameter c."
    -#define obl_rad1_doc "(s,sp)=obl_rad1(m,n,c,x) computes the oblate sheroidal radial function \nof the first kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0."
    -#define obl_rad1_cv_doc "(s,sp)=obl_rad1_cv(m,n,c,cv,x) computes the oblate sheroidal radial function \nof the first kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0. Requires pre-computed\ncharacteristic value."
    -#define obl_rad2_doc "(s,sp)=obl_rad2(m,n,c,x) computes the oblate sheroidal radial function \nof the second kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0."
    -#define obl_rad2_cv_doc "(s,sp)=obl_rad2_cv(m,n,c,cv,x) computes the oblate sheroidal radial function \nof the second kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0. Requires pre-computed\ncharacteristic value."
    -#define pbdv_doc "(d,dp)=pbdv(v,x) returns (d,dp) with the parabolic cylinder function Dv(x) in \nd and the derivative, Dv'(x) in dp."
    -#define pbvv_doc "(v,vp)=pbvv(v,x) returns (v,vp) with the parabolic cylinder function Vv(x) in \nv and the derivative, Vv'(x) in vp."
    -#define pbwa_doc "(w,wp)=pbwa(a,x) returns (w,wp) with the parabolic cylinder function W(a,x) in \nw and the derivative, W'(a,x) in wp.  May not be accurate for large (>5) \narguments in a and/or x."
    -#define pdtr_doc "y=pdtr(k,m) returns the sum of the first k terms of the Poisson\ndistribution: sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m).\nArguments must both be positive and k an integer."
    -#define pdtrc_doc "y=pdtrc(k,m) returns the sum of the terms from k+1 to infinity of the\nPoisson distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc( k+1, m).\nArguments must both be positive and k an integer."
    -#define pdtri_doc "m=pdtri(k,y) returns the Poisson variable m such that the sum\nfrom 0 to k of the Poisson density is equal to the given probability\ny:  calculated by gammaincinv( k+1, y).  k must be a nonnegative integer and\ny between 0 and 1."
    -#define pdtrik_doc "k=pdtrik(p,m) returns the quantile k such that pdtr(k,m)=p"
    -#define pro_ang1_doc "(s,sp)=pro_ang1(m,n,c,x) computes the prolate sheroidal angular function \nof the first kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0."
    -#define pro_ang1_cv_doc "(s,sp)=pro_ang1_cv(m,n,c,cv,x) computes the prolate sheroidal angular function \nof the first kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0. Requires pre-computed\ncharacteristic value."
    -#define pro_cv_doc "cv=pro_cv(m,n,c) computes the characteristic value of prolate spheroidal \nwave functions of order m,n (n>=m) and spheroidal parameter c."
    -#define pro_rad1_doc "(s,sp)=pro_rad1(m,n,c,x) computes the prolate sheroidal radial function \nof the first kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0."
    -#define pro_rad1_cv_doc "(s,sp)=pro_rad1_cv(m,n,c,cv,x) computes the prolate sheroidal radial function \nof the first kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0. Requires pre-computed\ncharacteristic value."
    -#define pro_rad2_doc "(s,sp)=pro_rad2(m,n,c,x) computes the prolate sheroidal radial function \nof the second kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0."
    -#define pro_rad2_cv_doc "(s,sp)=pro_rad2_cv(m,n,c,cv,x) computes the prolate sheroidal radial function \nof the second kind and its derivative (with respect to x) for mode paramters\nm>=0 and n>=m, spheroidal parameter c and |x|<1.0. Requires pre-computed\ncharacteristic value."
    -#define psi_doc "y=psi(z) is the derivative of the logarithm of the gamma function\nevaluated at z (also called the digamma function)."
    -#define radian_doc "y=radian(d,m,s) returns the angle given in (d)egrees, (m)inutes, and\n(s)econds in radians."
    -#define rgamma_doc "y=rgamma(z) returns one divided by the gamma function of x."
    -#define round_doc "y=Returns the nearest integer to x as a double precision\nfloating point result.  If x ends in 0.5 exactly, the\nnearest even integer is chosen."
    -#define shichi_doc "(shi,chi)=shichi(x) returns the hyperbolic sine and cosine integrals:\nintegral(sinh(t)/t,t=0..x) and eul + ln x +\nintegral((cosh(t)-1)/t,t=0..x) where eul is Euler's Constant."
    -#define sici_doc "(si,ci)=sici(x) returns in si the integral of the sinc function from 0 to x:\nintegral(sin(t)/t,t=0..x).  It returns in ci the cosine integral: eul + ln x +\nintegral((cos(t) - 1)/t,t=0..x)."
    -#define sindg_doc "y=sindg(x) calculates the sine of the angle x given in degrees."
    -#define smirnov_doc "y=smirnov(n,e) returns the exact Kolmogorov-Smirnov complementary \ncumulative distribution function (Dn+ or Dn-) for a one-sided test of \nequality between an empirical and a theoretical distribution. It is equal \nto the probability that the maximum difference between a theoretical \ndistribution and an empirical one based on n samples is greater than e."
    -#define smirnovi_doc "e=smirnovi(n,y) returns e such that smirnov(n,e) = y."
    -#define spence_doc "y=spence(x) returns the dilogarithm integral: -integral(log t /\n(t-1),t=1..x)"
    -#define stdtr_doc "p=stdtr(df,t) returns the integral from minus infinity to t of the Student t\ndistribution with df > 0 degrees of freedom:\ngamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) * integral((1+x**2/df)**(-df/2-1/2),\nx=-inf..t)"
    -#define stdtridf_doc "t=stdtridf(p,t) returns the argument df such that stdtr(df,t) is equal to p."
    -#define stdtrit_doc "t=stdtrit(df,p) returns the argument t such that stdtr(df,t) is equal to p."
    -#define struve_doc "y=struve(v,x) returns the Struve function Hv(x) of order v at x, x\nmust be positive unless v is an integer."
    -#define tandg_doc "y=tandg(x) calculates the tangent of the angle x given in degrees."
    -#define wofz_doc "y=wofz(z) returns the value of the fadeeva function for complex argument\nz: exp(-z**2)*erfc(-i*z)"
    -#define y0_doc "y=y0(x) returns the Bessel function of the second kind of order 0 at x."
    -#define y1_doc "y=y1(x) returns the Bessel function of the second kind of order 1 at x."
    -#define yn_doc "y=yn(n,x) returns the Bessel function of the second kind of integer\norder n at x."
    -#define yv_doc "y=yv(v,z) returns the Bessel function of the second kind of real\norder v at complex z."
    -#define yve_doc "y=yve(v,z) returns the exponentially scaled Bessel function of the second \nkind of real order v at complex z: yve(v,z) = yv(v,z) * exp(-abs(z.imag))"
    -#define zeta_doc "y=zeta(x,q) returns the Riemann zeta function of two arguments:\nsum((k+q)**(-x),k=0..inf)"
    -#define zetac_doc "y=zetac(x) returns 1.0 - the Riemann zeta function: sum(k**(-x), k=2..inf)"
    -#endif /* CEPHES_DOC_H */
    diff --git a/scipy-0.10.1/scipy/special/gendoc.py b/scipy-0.10.1/scipy/special/gendoc.py
    deleted file mode 100644
    index 935a62017d..0000000000
    --- a/scipy-0.10.1/scipy/special/gendoc.py
    +++ /dev/null
    @@ -1,43 +0,0 @@
    -#!/usr/bin/env python
    -
    -"""generate cephes_doc.h from included_functions.html"""
    -
    -
    -def parse(infile):
    -    d={}
    -    key=None
    -    val=''
    -    prev_line = ''
    -    for line in infile.readlines():
    -        if not line.strip():
    -            continue
    -        if line[0]=='<':
    -            if key and val:
    -                d[key]=val.strip()
    -                key,val=None,None
    -            if line[:4]=='
    ': - tok=line.split() - tok=tok[-1].split('(') - key=tok[0] - elif line[:4]=='
    ' and key: - prev_line = prev_line[4:] - tok = prev_line.split(' = ') - val=tok[0]+'='+line[4:] - else: - if val: - val=val+line - prev_line = line - - return d - -if __name__=="__main__": - d = parse(open("docs/included_functions.html",'r')) - keys = d.keys() - keys.sort() - ofile=open("cephes_doc.h",'w') - ofile.write('#ifndef CEPHES_DOC_H\n') - ofile.write('#define CEPHES_DOC_H\n') - for key in keys: - ofile.write('#define %s_doc "%s"\n'%(key,repr(d[key])[1:-1])) - ofile.write('#endif /* CEPHES_DOC_H */\n') - ofile.close() diff --git a/scipy-0.10.1/scipy/special/lambertw.c b/scipy-0.10.1/scipy/special/lambertw.c deleted file mode 100644 index acfd733065..0000000000 --- a/scipy-0.10.1/scipy/special/lambertw.c +++ /dev/null @@ -1,2774 +0,0 @@ -/* Generated by Cython 0.15 on Tue Nov 1 18:19:53 2011 */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#else - -#include /* For offsetof */ -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__special__lambertw -#define __PYX_HAVE_API__scipy__special__lambertw -#include "math.h" -#include "numpy/npy_math.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - -/* inline attribute */ -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -/* unused attribute */ -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ - - -/* Type Conversion Predeclarations */ - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - /* Test for GCC > 2.95 */ - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else /* __GNUC__ > 2 ... */ - #define likely(x) (x) - #define unlikely(x) (x) - #endif /* __GNUC__ > 2 ... */ -#else /* __GNUC__ */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - -static const char *__pyx_f[] = { - "lambertw.pyx", -}; - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif - -/*--- Type declarations ---*/ - - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -#ifndef __PYX_FORCE_INIT_THREADS - #if PY_VERSION_HEX < 0x02040200 - #define __PYX_FORCE_INIT_THREADS 1 - #else - #define __PYX_FORCE_INIT_THREADS 0 - #endif -#endif - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); /*proto*/ - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq(a, b) ((a)==(b)) - #define __Pyx_c_sum(a, b) ((a)+(b)) - #define __Pyx_c_diff(a, b) ((a)-(b)) - #define __Pyx_c_prod(a, b) ((a)*(b)) - #define __Pyx_c_quot(a, b) ((a)/(b)) - #define __Pyx_c_neg(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero(z) ((z)==(double)0) - #define __Pyx_c_conj(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs(z) (::std::abs(z)) - #define __Pyx_c_pow(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero(z) ((z)==0) - #define __Pyx_c_conj(z) (conj(z)) - #if 1 - #define __Pyx_c_abs(z) (cabs(z)) - #define __Pyx_c_pow(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ - -#define __pyx_PyComplex_FromComplex(z) \ - PyComplex_FromDoubles((double)__Pyx_CREAL(z), \ - (double)__Pyx_CIMAG(z)) - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_intp(npy_intp); - -static CYTHON_INLINE npy_intp __Pyx_PyInt_from_py_npy_intp(PyObject *); - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename); /*proto*/ - -static int __Pyx_check_binary_version(void); - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -/* Module declarations from 'cython.cython.view' */ - -/* Module declarations from 'cython' */ - -/* Module declarations from 'scipy.special.lambertw' */ -static PyUFuncGenericFunction __pyx_v_5scipy_7special_8lambertw__loop_funcs[1]; -static char __pyx_v_5scipy_7special_8lambertw__inp_outp_types[4]; -static void *__pyx_v_5scipy_7special_8lambertw_the_func_to_apply[1]; -static CYTHON_INLINE int __pyx_f_5scipy_7special_8lambertw_zisnan(__pyx_t_double_complex); /*proto*/ -static CYTHON_INLINE double __pyx_f_5scipy_7special_8lambertw_zabs(__pyx_t_double_complex); /*proto*/ -static CYTHON_INLINE __pyx_t_double_complex __pyx_f_5scipy_7special_8lambertw_zlog(__pyx_t_double_complex); /*proto*/ -static CYTHON_INLINE __pyx_t_double_complex __pyx_f_5scipy_7special_8lambertw_zexp(__pyx_t_double_complex); /*proto*/ -static void __pyx_f_5scipy_7special_8lambertw_lambertw_raise_warning(__pyx_t_double_complex); /*proto*/ -static __pyx_t_double_complex __pyx_f_5scipy_7special_8lambertw_lambertw_scalar(__pyx_t_double_complex, long, double); /*proto*/ -static void __pyx_f_5scipy_7special_8lambertw__apply_func_to_1d_vec(char **, npy_intp *, npy_intp *, void *); /*proto*/ -#define __Pyx_MODULE_NAME "scipy.special.lambertw" -int __pyx_module_is_main_scipy__special__lambertw = 0; - -/* Implementation of 'scipy.special.lambertw' */ -static PyObject *__pyx_builtin_range; -static char __pyx_k_1[] = "Lambert W iteration failed to converge: %r"; -static char __pyx_k_3[] = ""; -static char __pyx_k_4[] = "scipy.special.lambertw"; -static char __pyx_k_5[] = "lambertw (line 193)"; -static char __pyx_k_6[] = "\n lambertw(z, k=0, tol=1e-8)\n\n Lambert W function.\n\n The Lambert W function `W(z)` is defined as the inverse function\n of :math:`w \\exp(w)`. In other words, the value of :math:`W(z)` is\n such that :math:`z = W(z) \\exp(W(z))` for any complex number\n :math:`z`.\n\n The Lambert W function is a multivalued function with infinitely\n many branches. Each branch gives a separate solution of the\n equation :math:`w \\exp(w)`. Here, the branches are indexed by the\n integer `k`.\n \n Parameters\n ----------\n z : array_like\n Input argument\n k : integer, optional\n Branch index\n tol : float\n Evaluation tolerance\n\n Notes\n -----\n All branches are supported by `lambertw`:\n\n * ``lambertw(z)`` gives the principal solution (branch 0)\n * ``lambertw(z, k)`` gives the solution on branch `k`\n\n The Lambert W function has two partially real branches: the\n principal branch (`k = 0`) is real for real `z > -1/e`, and the\n `k = -1` branch is real for `-1/e < z < 0`. All branches except\n `k = 0` have a logarithmic singularity at `z = 0`.\n\n .. rubric:: Possible issues\n \n The evaluation can become inaccurate very close to the branch point\n at `-1/e`. In some corner cases, :func:`lambertw` might currently\n fail to converge, or can end up on the wrong branch.\n\n .. rubric:: Algorithm\n\n Halley's iteration is used to invert `w \\exp(w)`, using a first-order\n asymptotic approximation (`O(\\log(w))` or `O(w)`) as the initial\n estimate.\n\n The definition, implementation and choice of branches is based\n on Corless et al, \"On the Lambert W function\", Adv. Comp. Math. 5\n (1996) 329-359, available online here:\n http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf\n \n TODO: use a series expansion when extremely close to the branch point\n at `-1/e` and make sure that the proper branch is chosen there\n\n Examples""\n --------\n The Lambert W function is the inverse of `w \\exp(w)`::\n\n >>> from scipy.special import lambertw\n >>> w = lambertw(1)\n >>> w\n 0.56714329040978387299996866221035555\n >>> w*exp(w)\n 1.0\n\n Any branch gives a valid inverse::\n\n >>> w = lambertw(1, k=3)\n >>> w\n (-2.8535817554090378072068187234910812 +\n 17.113535539412145912607826671159289j)\n >>> w*exp(w)\n (1.0 + 3.5075477124212226194278700785075126e-36j)\n\n .. rubric:: Applications to equation-solving\n\n The Lambert W function may be used to solve various kinds of\n equations, such as finding the value of the infinite power\n tower `z^{z^{z^{\\ldots}}}`::\n\n >>> def tower(z, n):\n ... if n == 0:\n ... return z\n ... return z ** tower(z, n-1)\n ...\n >>> tower(0.5, 100)\n 0.641185744504986\n >>> -lambertw(-log(0.5))/log(0.5)\n 0.6411857445049859844862004821148236665628209571911\n\n .. rubric:: Properties\n\n The Lambert W function grows roughly like the natural logarithm\n for large arguments::\n\n >>> lambertw(1000)\n 5.2496028524016\n >>> log(1000)\n 6.90775527898214\n >>> lambertw(10**100)\n 224.843106445119\n >>> log(10**100)\n 230.258509299405\n \n The principal branch of the Lambert W function has a rational\n Taylor series expansion around `z = 0`::\n \n >>> nprint(taylor(lambertw, 0, 6), 10)\n [0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8]\n \n Some special values and limits are::\n \n >>> lambertw(0)\n 0.0\n >>> lambertw(1)\n 0.567143290409784\n >>> lambertw(e)\n 1.0\n >>> lambertw(inf)\n +inf\n >>> lambertw(0, k=-1)\n -inf\n >>> lambertw(0, k=3)\n -inf\n >>> lambertw(inf, k=3)\n (+inf + 18.8495559215388j)\n\n The `k = 0` and `k = -1` branches join at `z = -1/e` where\n `W(z) = -1` for both branches. Since `-1/e` can only be represented\n approximately with mpmath numbers, evaluatin""g the Lambert W function\n at this point only gives `-1` approximately::\n\n >>> lambertw(-1/e, 0)\n -0.999999999999837133022867\n >>> lambertw(-1/e, -1)\n -1.00000000000016286697718\n \n If `-1/e` happens to round in the negative direction, there might be\n a small imaginary part::\n \n >>> lambertw(-1/e)\n (-1.0 + 8.22007971511612e-9j)\n\n "; -static char __pyx_k__k[] = "k"; -static char __pyx_k__z[] = "z"; -static char __pyx_k__tol[] = "tol"; -static char __pyx_k__warn[] = "warn"; -static char __pyx_k__range[] = "range"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k__lambertw[] = "lambertw"; -static char __pyx_k__warnings[] = "warnings"; -static char __pyx_k___lambertw[] = "_lambertw"; -static PyObject *__pyx_kp_s_1; -static PyObject *__pyx_n_s_4; -static PyObject *__pyx_kp_u_5; -static PyObject *__pyx_kp_u_6; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s___lambertw; -static PyObject *__pyx_n_s__k; -static PyObject *__pyx_n_s__lambertw; -static PyObject *__pyx_n_s__range; -static PyObject *__pyx_n_s__tol; -static PyObject *__pyx_n_s__warn; -static PyObject *__pyx_n_s__warnings; -static PyObject *__pyx_n_s__z; -static PyObject *__pyx_int_0; -static PyObject *__pyx_k_2; - -/* "scipy/special/lambertw.pyx":44 - * double NPY_PI - * - * cdef inline bint zisnan(double complex x) nogil: # <<<<<<<<<<<<<< - * return npy_isnan(x.real) or npy_isnan(x.imag) - * - */ - -static CYTHON_INLINE int __pyx_f_5scipy_7special_8lambertw_zisnan(__pyx_t_double_complex __pyx_v_x) { - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - - /* "scipy/special/lambertw.pyx":45 - * - * cdef inline bint zisnan(double complex x) nogil: - * return npy_isnan(x.real) or npy_isnan(x.imag) # <<<<<<<<<<<<<< - * - * cdef inline double zabs(double complex x) nogil: - */ - __pyx_t_1 = npy_isnan(__Pyx_CREAL(__pyx_v_x)); - if (!__pyx_t_1) { - __pyx_t_2 = npy_isnan(__Pyx_CIMAG(__pyx_v_x)); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - __pyx_r = __pyx_t_3; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - -/* "scipy/special/lambertw.pyx":47 - * return npy_isnan(x.real) or npy_isnan(x.imag) - * - * cdef inline double zabs(double complex x) nogil: # <<<<<<<<<<<<<< - * cdef double r - * r = npy_cabs((&x)[0]) - */ - -static CYTHON_INLINE double __pyx_f_5scipy_7special_8lambertw_zabs(__pyx_t_double_complex __pyx_v_x) { - double __pyx_v_r; - double __pyx_r; - - /* "scipy/special/lambertw.pyx":49 - * cdef inline double zabs(double complex x) nogil: - * cdef double r - * r = npy_cabs((&x)[0]) # <<<<<<<<<<<<<< - * return r - * - */ - __pyx_v_r = npy_cabs((((npy_cdouble *)(&__pyx_v_x))[0])); - - /* "scipy/special/lambertw.pyx":50 - * cdef double r - * r = npy_cabs((&x)[0]) - * return r # <<<<<<<<<<<<<< - * - * cdef inline double complex zlog(double complex x) nogil: - */ - __pyx_r = __pyx_v_r; - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - -/* "scipy/special/lambertw.pyx":52 - * return r - * - * cdef inline double complex zlog(double complex x) nogil: # <<<<<<<<<<<<<< - * cdef npy_cdouble r - * r = npy_clog((&x)[0]) - */ - -static CYTHON_INLINE __pyx_t_double_complex __pyx_f_5scipy_7special_8lambertw_zlog(__pyx_t_double_complex __pyx_v_x) { - npy_cdouble __pyx_v_r; - __pyx_t_double_complex __pyx_r; - - /* "scipy/special/lambertw.pyx":54 - * cdef inline double complex zlog(double complex x) nogil: - * cdef npy_cdouble r - * r = npy_clog((&x)[0]) # <<<<<<<<<<<<<< - * return (&r)[0] - * - */ - __pyx_v_r = npy_clog((((npy_cdouble *)(&__pyx_v_x))[0])); - - /* "scipy/special/lambertw.pyx":55 - * cdef npy_cdouble r - * r = npy_clog((&x)[0]) - * return (&r)[0] # <<<<<<<<<<<<<< - * - * cdef inline double complex zexp(double complex x) nogil: - */ - __pyx_r = (((__pyx_t_double_complex *)(&__pyx_v_r))[0]); - goto __pyx_L0; - - __pyx_r = __pyx_t_double_complex_from_parts(0, 0); - __pyx_L0:; - return __pyx_r; -} - -/* "scipy/special/lambertw.pyx":57 - * return (&r)[0] - * - * cdef inline double complex zexp(double complex x) nogil: # <<<<<<<<<<<<<< - * cdef npy_cdouble r - * r = npy_cexp((&x)[0]) - */ - -static CYTHON_INLINE __pyx_t_double_complex __pyx_f_5scipy_7special_8lambertw_zexp(__pyx_t_double_complex __pyx_v_x) { - npy_cdouble __pyx_v_r; - __pyx_t_double_complex __pyx_r; - - /* "scipy/special/lambertw.pyx":59 - * cdef inline double complex zexp(double complex x) nogil: - * cdef npy_cdouble r - * r = npy_cexp((&x)[0]) # <<<<<<<<<<<<<< - * return (&r)[0] - * - */ - __pyx_v_r = npy_cexp((((npy_cdouble *)(&__pyx_v_x))[0])); - - /* "scipy/special/lambertw.pyx":60 - * cdef npy_cdouble r - * r = npy_cexp((&x)[0]) - * return (&r)[0] # <<<<<<<<<<<<<< - * - * cdef void lambertw_raise_warning(double complex z) with gil: - */ - __pyx_r = (((__pyx_t_double_complex *)(&__pyx_v_r))[0]); - goto __pyx_L0; - - __pyx_r = __pyx_t_double_complex_from_parts(0, 0); - __pyx_L0:; - return __pyx_r; -} - -/* "scipy/special/lambertw.pyx":62 - * return (&r)[0] - * - * cdef void lambertw_raise_warning(double complex z) with gil: # <<<<<<<<<<<<<< - * warnings.warn("Lambert W iteration failed to converge: %r" % z) - * - */ - -static void __pyx_f_5scipy_7special_8lambertw_lambertw_raise_warning(__pyx_t_double_complex __pyx_v_z) { - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("lambertw_raise_warning"); - - /* "scipy/special/lambertw.pyx":63 - * - * cdef void lambertw_raise_warning(double complex z) with gil: - * warnings.warn("Lambert W iteration failed to converge: %r" % z) # <<<<<<<<<<<<<< - * - * # Heavy lifting is here: - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__warnings); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__warn); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_z); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_1), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_WriteUnraisable("scipy.special.lambertw.lambertw_raise_warning", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - PyGILState_Release(__pyx_gilstate_save); - #endif -} - -/* "scipy/special/lambertw.pyx":68 - * - * @cython.cdivision(True) - * cdef double complex lambertw_scalar(double complex z, long k, double tol) nogil: # <<<<<<<<<<<<<< - * """ - * This is just the implementation of W for a single input z. - */ - -static __pyx_t_double_complex __pyx_f_5scipy_7special_8lambertw_lambertw_scalar(__pyx_t_double_complex __pyx_v_z, long __pyx_v_k, double __pyx_v_tol) { - __pyx_t_double_complex __pyx_v_w; - double __pyx_v_u; - double __pyx_v_absz; - __pyx_t_double_complex __pyx_v_ew; - __pyx_t_double_complex __pyx_v_wew; - __pyx_t_double_complex __pyx_v_wewz; - __pyx_t_double_complex __pyx_v_wn; - int __pyx_v_i; - __pyx_t_double_complex __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - - /* "scipy/special/lambertw.pyx":74 - * """ - * # Comments copied verbatim from [2] are marked with '>' - * if zisnan(z): # <<<<<<<<<<<<<< - * return z - * - */ - __pyx_t_1 = __pyx_f_5scipy_7special_8lambertw_zisnan(__pyx_v_z); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":75 - * # Comments copied verbatim from [2] are marked with '>' - * if zisnan(z): - * return z # <<<<<<<<<<<<<< - * - * # Return value: - */ - __pyx_r = __pyx_v_z; - goto __pyx_L0; - goto __pyx_L3; - } - __pyx_L3:; - - /* "scipy/special/lambertw.pyx":82 - * #> We must be extremely careful near the singularities at -1/e and 0 - * cdef double u - * u = exp(-1) # <<<<<<<<<<<<<< - * - * cdef double absz - */ - __pyx_v_u = exp(-1.0); - - /* "scipy/special/lambertw.pyx":85 - * - * cdef double absz - * absz = zabs(z) # <<<<<<<<<<<<<< - * if absz <= u: - * if z == 0: - */ - __pyx_v_absz = __pyx_f_5scipy_7special_8lambertw_zabs(__pyx_v_z); - - /* "scipy/special/lambertw.pyx":86 - * cdef double absz - * absz = zabs(z) - * if absz <= u: # <<<<<<<<<<<<<< - * if z == 0: - * #> w(0,0) = 0; for all other branches we hit the pole - */ - __pyx_t_1 = (__pyx_v_absz <= __pyx_v_u); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":87 - * absz = zabs(z) - * if absz <= u: - * if z == 0: # <<<<<<<<<<<<<< - * #> w(0,0) = 0; for all other branches we hit the pole - * if k == 0: - */ - __pyx_t_1 = (__Pyx_c_eq(__pyx_v_z, __pyx_t_double_complex_from_parts(0, 0))); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":89 - * if z == 0: - * #> w(0,0) = 0; for all other branches we hit the pole - * if k == 0: # <<<<<<<<<<<<<< - * return z - * return -NPY_INFINITY - */ - __pyx_t_1 = (__pyx_v_k == 0); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":90 - * #> w(0,0) = 0; for all other branches we hit the pole - * if k == 0: - * return z # <<<<<<<<<<<<<< - * return -NPY_INFINITY - * - */ - __pyx_r = __pyx_v_z; - goto __pyx_L0; - goto __pyx_L6; - } - __pyx_L6:; - - /* "scipy/special/lambertw.pyx":91 - * if k == 0: - * return z - * return -NPY_INFINITY # <<<<<<<<<<<<<< - * - * if k == 0: - */ - __pyx_r = __pyx_t_double_complex_from_parts((-NPY_INFINITY), 0); - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - /* "scipy/special/lambertw.pyx":93 - * return -NPY_INFINITY - * - * if k == 0: # <<<<<<<<<<<<<< - * w = z # Initial guess for iteration - * #> For small real z < 0, the -1 branch beaves roughly like log(-z) - */ - __pyx_t_1 = (__pyx_v_k == 0); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":94 - * - * if k == 0: - * w = z # Initial guess for iteration # <<<<<<<<<<<<<< - * #> For small real z < 0, the -1 branch beaves roughly like log(-z) - * elif k == -1 and z.imag ==0 and z.real < 0: - */ - __pyx_v_w = __pyx_v_z; - goto __pyx_L7; - } - - /* "scipy/special/lambertw.pyx":96 - * w = z # Initial guess for iteration - * #> For small real z < 0, the -1 branch beaves roughly like log(-z) - * elif k == -1 and z.imag ==0 and z.real < 0: # <<<<<<<<<<<<<< - * w = log(-z.real) - * #> Use a simple asymptotic approximation. - */ - __pyx_t_1 = (__pyx_v_k == -1); - if (__pyx_t_1) { - __pyx_t_2 = (__Pyx_CIMAG(__pyx_v_z) == 0.0); - if (__pyx_t_2) { - __pyx_t_3 = (__Pyx_CREAL(__pyx_v_z) < 0.0); - __pyx_t_4 = __pyx_t_3; - } else { - __pyx_t_4 = __pyx_t_2; - } - __pyx_t_2 = __pyx_t_4; - } else { - __pyx_t_2 = __pyx_t_1; - } - if (__pyx_t_2) { - - /* "scipy/special/lambertw.pyx":97 - * #> For small real z < 0, the -1 branch beaves roughly like log(-z) - * elif k == -1 and z.imag ==0 and z.real < 0: - * w = log(-z.real) # <<<<<<<<<<<<<< - * #> Use a simple asymptotic approximation. - * else: - */ - __pyx_v_w = __pyx_t_double_complex_from_parts(log((-__Pyx_CREAL(__pyx_v_z))), 0); - goto __pyx_L7; - } - /*else*/ { - - /* "scipy/special/lambertw.pyx":100 - * #> Use a simple asymptotic approximation. - * else: - * w = zlog(z) # <<<<<<<<<<<<<< - * #> The branches are roughly logarithmic. This approximation - * #> gets better for large |k|; need to check that this always - */ - __pyx_v_w = __pyx_f_5scipy_7special_8lambertw_zlog(__pyx_v_z); - - /* "scipy/special/lambertw.pyx":104 - * #> gets better for large |k|; need to check that this always - * #> works for k ~= -1, 0, 1. - * if k: w = w + k*2*NPY_PI*1j # <<<<<<<<<<<<<< - * - * elif k == 0 and z.imag and zabs(z) <= 0.7: - */ - if (__pyx_v_k) { - __pyx_v_w = __Pyx_c_sum(__pyx_v_w, __Pyx_c_prod(__pyx_t_double_complex_from_parts(((__pyx_v_k * 2) * NPY_PI), 0), __pyx_t_double_complex_from_parts(0, 1.0))); - goto __pyx_L8; - } - __pyx_L8:; - } - __pyx_L7:; - goto __pyx_L4; - } - - /* "scipy/special/lambertw.pyx":106 - * if k: w = w + k*2*NPY_PI*1j - * - * elif k == 0 and z.imag and zabs(z) <= 0.7: # <<<<<<<<<<<<<< - * #> Both the W(z) ~= z and W(z) ~= ln(z) approximations break - * #> down around z ~= -0.5 (converging to the wrong branch), so patch - */ - __pyx_t_2 = (__pyx_v_k == 0); - if (__pyx_t_2) { - if ((__Pyx_CIMAG(__pyx_v_z) != 0)) { - __pyx_t_1 = (__pyx_f_5scipy_7special_8lambertw_zabs(__pyx_v_z) <= 0.7); - __pyx_t_4 = __pyx_t_1; - } else { - __pyx_t_4 = (__Pyx_CIMAG(__pyx_v_z) != 0); - } - __pyx_t_1 = __pyx_t_4; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":110 - * #> down around z ~= -0.5 (converging to the wrong branch), so patch - * #> with a constant approximation (adjusted for sign) - * if zabs(z+0.5) < 0.1: # <<<<<<<<<<<<<< - * if z.imag > 0: - * w = 0.7 + 0.7j - */ - __pyx_t_1 = (__pyx_f_5scipy_7special_8lambertw_zabs(__Pyx_c_sum(__pyx_v_z, __pyx_t_double_complex_from_parts(0.5, 0))) < 0.1); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":111 - * #> with a constant approximation (adjusted for sign) - * if zabs(z+0.5) < 0.1: - * if z.imag > 0: # <<<<<<<<<<<<<< - * w = 0.7 + 0.7j - * else: - */ - __pyx_t_1 = (__Pyx_CIMAG(__pyx_v_z) > 0.0); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":112 - * if zabs(z+0.5) < 0.1: - * if z.imag > 0: - * w = 0.7 + 0.7j # <<<<<<<<<<<<<< - * else: - * w = 0.7 - 0.7j - */ - __pyx_v_w = __Pyx_c_sum(__pyx_t_double_complex_from_parts(0.7, 0), __pyx_t_double_complex_from_parts(0, 0.69999999999999996)); - goto __pyx_L10; - } - /*else*/ { - - /* "scipy/special/lambertw.pyx":114 - * w = 0.7 + 0.7j - * else: - * w = 0.7 - 0.7j # <<<<<<<<<<<<<< - * else: - * w = z - */ - __pyx_v_w = __Pyx_c_diff(__pyx_t_double_complex_from_parts(0.7, 0), __pyx_t_double_complex_from_parts(0, 0.69999999999999996)); - } - __pyx_L10:; - goto __pyx_L9; - } - /*else*/ { - - /* "scipy/special/lambertw.pyx":116 - * w = 0.7 - 0.7j - * else: - * w = z # <<<<<<<<<<<<<< - * - * else: - */ - __pyx_v_w = __pyx_v_z; - } - __pyx_L9:; - goto __pyx_L4; - } - /*else*/ { - - /* "scipy/special/lambertw.pyx":119 - * - * else: - * if z.real == NPY_INFINITY: # <<<<<<<<<<<<<< - * if k == 0: - * return z - */ - __pyx_t_1 = (__Pyx_CREAL(__pyx_v_z) == NPY_INFINITY); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":120 - * else: - * if z.real == NPY_INFINITY: - * if k == 0: # <<<<<<<<<<<<<< - * return z - * else: - */ - __pyx_t_1 = (__pyx_v_k == 0); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":121 - * if z.real == NPY_INFINITY: - * if k == 0: - * return z # <<<<<<<<<<<<<< - * else: - * return z + 2*k*NPY_PI*1j - */ - __pyx_r = __pyx_v_z; - goto __pyx_L0; - goto __pyx_L12; - } - /*else*/ { - - /* "scipy/special/lambertw.pyx":123 - * return z - * else: - * return z + 2*k*NPY_PI*1j # <<<<<<<<<<<<<< - * - * if z.real == -NPY_INFINITY: - */ - __pyx_r = __Pyx_c_sum(__pyx_v_z, __Pyx_c_prod(__pyx_t_double_complex_from_parts(((2 * __pyx_v_k) * NPY_PI), 0), __pyx_t_double_complex_from_parts(0, 1.0))); - goto __pyx_L0; - } - __pyx_L12:; - goto __pyx_L11; - } - __pyx_L11:; - - /* "scipy/special/lambertw.pyx":125 - * return z + 2*k*NPY_PI*1j - * - * if z.real == -NPY_INFINITY: # <<<<<<<<<<<<<< - * return (-z) + (2*k+1)*NPY_PI*1j - * - */ - __pyx_t_1 = (__Pyx_CREAL(__pyx_v_z) == (-NPY_INFINITY)); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":126 - * - * if z.real == -NPY_INFINITY: - * return (-z) + (2*k+1)*NPY_PI*1j # <<<<<<<<<<<<<< - * - * #> Simple asymptotic approximation as above - */ - __pyx_r = __Pyx_c_sum(__Pyx_c_neg(__pyx_v_z), __Pyx_c_prod(__pyx_t_double_complex_from_parts((((2 * __pyx_v_k) + 1) * NPY_PI), 0), __pyx_t_double_complex_from_parts(0, 1.0))); - goto __pyx_L0; - goto __pyx_L13; - } - __pyx_L13:; - - /* "scipy/special/lambertw.pyx":129 - * - * #> Simple asymptotic approximation as above - * w = zlog(z) # <<<<<<<<<<<<<< - * if k: w = w + k*2*NPY_PI*1j - * - */ - __pyx_v_w = __pyx_f_5scipy_7special_8lambertw_zlog(__pyx_v_z); - - /* "scipy/special/lambertw.pyx":130 - * #> Simple asymptotic approximation as above - * w = zlog(z) - * if k: w = w + k*2*NPY_PI*1j # <<<<<<<<<<<<<< - * - * #> Use Halley iteration to solve w*exp(w) = z - */ - if (__pyx_v_k) { - __pyx_v_w = __Pyx_c_sum(__pyx_v_w, __Pyx_c_prod(__pyx_t_double_complex_from_parts(((__pyx_v_k * 2) * NPY_PI), 0), __pyx_t_double_complex_from_parts(0, 1.0))); - goto __pyx_L14; - } - __pyx_L14:; - } - __pyx_L4:; - - /* "scipy/special/lambertw.pyx":135 - * cdef double complex ew, wew, wewz, wn - * cdef int i - * for i in range(100): # <<<<<<<<<<<<<< - * ew = zexp(w) - * wew = w*ew - */ - for (__pyx_t_5 = 0; __pyx_t_5 < 100; __pyx_t_5+=1) { - __pyx_v_i = __pyx_t_5; - - /* "scipy/special/lambertw.pyx":136 - * cdef int i - * for i in range(100): - * ew = zexp(w) # <<<<<<<<<<<<<< - * wew = w*ew - * wewz = wew-z - */ - __pyx_v_ew = __pyx_f_5scipy_7special_8lambertw_zexp(__pyx_v_w); - - /* "scipy/special/lambertw.pyx":137 - * for i in range(100): - * ew = zexp(w) - * wew = w*ew # <<<<<<<<<<<<<< - * wewz = wew-z - * wn = w - wewz / (wew + ew - (w + 2)*wewz/(2*w + 2)) - */ - __pyx_v_wew = __Pyx_c_prod(__pyx_v_w, __pyx_v_ew); - - /* "scipy/special/lambertw.pyx":138 - * ew = zexp(w) - * wew = w*ew - * wewz = wew-z # <<<<<<<<<<<<<< - * wn = w - wewz / (wew + ew - (w + 2)*wewz/(2*w + 2)) - * if zabs(wn-w) < tol*zabs(wn): - */ - __pyx_v_wewz = __Pyx_c_diff(__pyx_v_wew, __pyx_v_z); - - /* "scipy/special/lambertw.pyx":139 - * wew = w*ew - * wewz = wew-z - * wn = w - wewz / (wew + ew - (w + 2)*wewz/(2*w + 2)) # <<<<<<<<<<<<<< - * if zabs(wn-w) < tol*zabs(wn): - * return wn - */ - __pyx_v_wn = __Pyx_c_diff(__pyx_v_w, __Pyx_c_quot(__pyx_v_wewz, __Pyx_c_diff(__Pyx_c_sum(__pyx_v_wew, __pyx_v_ew), __Pyx_c_quot(__Pyx_c_prod(__Pyx_c_sum(__pyx_v_w, __pyx_t_double_complex_from_parts(2, 0)), __pyx_v_wewz), __Pyx_c_sum(__Pyx_c_prod(__pyx_t_double_complex_from_parts(2, 0), __pyx_v_w), __pyx_t_double_complex_from_parts(2, 0)))))); - - /* "scipy/special/lambertw.pyx":140 - * wewz = wew-z - * wn = w - wewz / (wew + ew - (w + 2)*wewz/(2*w + 2)) - * if zabs(wn-w) < tol*zabs(wn): # <<<<<<<<<<<<<< - * return wn - * else: - */ - __pyx_t_1 = (__pyx_f_5scipy_7special_8lambertw_zabs(__Pyx_c_diff(__pyx_v_wn, __pyx_v_w)) < (__pyx_v_tol * __pyx_f_5scipy_7special_8lambertw_zabs(__pyx_v_wn))); - if (__pyx_t_1) { - - /* "scipy/special/lambertw.pyx":141 - * wn = w - wewz / (wew + ew - (w + 2)*wewz/(2*w + 2)) - * if zabs(wn-w) < tol*zabs(wn): - * return wn # <<<<<<<<<<<<<< - * else: - * w = wn - */ - __pyx_r = __pyx_v_wn; - goto __pyx_L0; - goto __pyx_L17; - } - /*else*/ { - - /* "scipy/special/lambertw.pyx":143 - * return wn - * else: - * w = wn # <<<<<<<<<<<<<< - * - * lambertw_raise_warning(z) - */ - __pyx_v_w = __pyx_v_wn; - } - __pyx_L17:; - } - - /* "scipy/special/lambertw.pyx":145 - * w = wn - * - * lambertw_raise_warning(z) # <<<<<<<<<<<<<< - * return wn - * - */ - __pyx_f_5scipy_7special_8lambertw_lambertw_raise_warning(__pyx_v_z); - - /* "scipy/special/lambertw.pyx":146 - * - * lambertw_raise_warning(z) - * return wn # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_wn; - goto __pyx_L0; - - __pyx_r = __pyx_t_double_complex_from_parts(0, 0); - __pyx_L0:; - return __pyx_r; -} - -/* "scipy/special/lambertw.pyx":166 - * int identity, char* name, char* doc, int c) - * - * cdef void _apply_func_to_1d_vec(char **args, npy_intp *dimensions, npy_intp *steps, # <<<<<<<<<<<<<< - * void *func) nogil: - * cdef npy_intp i - */ - -static void __pyx_f_5scipy_7special_8lambertw__apply_func_to_1d_vec(char **__pyx_v_args, npy_intp *__pyx_v_dimensions, npy_intp *__pyx_v_steps, void *__pyx_v_func) { - npy_intp __pyx_v_i; - char *__pyx_v_ip1; - char *__pyx_v_ip2; - char *__pyx_v_ip3; - char *__pyx_v_op; - npy_intp __pyx_t_1; - npy_intp __pyx_t_2; - - /* "scipy/special/lambertw.pyx":169 - * void *func) nogil: - * cdef npy_intp i - * cdef char *ip1=args[0], *ip2=args[1], *ip3=args[2], *op=args[3] # <<<<<<<<<<<<<< - * for i in range(0, dimensions[0]): - * (op)[0] = (func)( - */ - __pyx_v_ip1 = (__pyx_v_args[0]); - __pyx_v_ip2 = (__pyx_v_args[1]); - __pyx_v_ip3 = (__pyx_v_args[2]); - __pyx_v_op = (__pyx_v_args[3]); - - /* "scipy/special/lambertw.pyx":170 - * cdef npy_intp i - * cdef char *ip1=args[0], *ip2=args[1], *ip3=args[2], *op=args[3] - * for i in range(0, dimensions[0]): # <<<<<<<<<<<<<< - * (op)[0] = (func)( - * (ip1)[0], (ip2)[0], (ip3)[0]) - */ - __pyx_t_1 = (__pyx_v_dimensions[0]); - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - /* "scipy/special/lambertw.pyx":171 - * cdef char *ip1=args[0], *ip2=args[1], *ip3=args[2], *op=args[3] - * for i in range(0, dimensions[0]): - * (op)[0] = (func)( # <<<<<<<<<<<<<< - * (ip1)[0], (ip2)[0], (ip3)[0]) - * ip1 += steps[0]; ip2 += steps[1]; ip3 += steps[2]; op += steps[3] - */ - (((__pyx_t_double_complex *)__pyx_v_op)[0]) = ((__pyx_t_double_complex (*)(__pyx_t_double_complex, long, double))__pyx_v_func)((((__pyx_t_double_complex *)__pyx_v_ip1)[0]), (((long *)__pyx_v_ip2)[0]), (((double *)__pyx_v_ip3)[0])); - - /* "scipy/special/lambertw.pyx":173 - * (op)[0] = (func)( - * (ip1)[0], (ip2)[0], (ip3)[0]) - * ip1 += steps[0]; ip2 += steps[1]; ip3 += steps[2]; op += steps[3] # <<<<<<<<<<<<<< - * - * cdef PyUFuncGenericFunction _loop_funcs[1] - */ - __pyx_v_ip1 = (__pyx_v_ip1 + (__pyx_v_steps[0])); - __pyx_v_ip2 = (__pyx_v_ip2 + (__pyx_v_steps[1])); - __pyx_v_ip3 = (__pyx_v_ip3 + (__pyx_v_steps[2])); - __pyx_v_op = (__pyx_v_op + (__pyx_v_steps[3])); - } - -} - -/* "scipy/special/lambertw.pyx":193 - * _inp_outp_types, 1, 3, 1, 0, "", "", 0) - * - * def lambertw(z, k=0, tol=1e-8): # <<<<<<<<<<<<<< - * r""" - * lambertw(z, k=0, tol=1e-8) - */ - -static PyObject *__pyx_pf_5scipy_7special_8lambertw_lambertw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_8lambertw_lambertw[] = "\n lambertw(z, k=0, tol=1e-8)\n\n Lambert W function.\n\n The Lambert W function `W(z)` is defined as the inverse function\n of :math:`w \\exp(w)`. In other words, the value of :math:`W(z)` is\n such that :math:`z = W(z) \\exp(W(z))` for any complex number\n :math:`z`.\n\n The Lambert W function is a multivalued function with infinitely\n many branches. Each branch gives a separate solution of the\n equation :math:`w \\exp(w)`. Here, the branches are indexed by the\n integer `k`.\n \n Parameters\n ----------\n z : array_like\n Input argument\n k : integer, optional\n Branch index\n tol : float\n Evaluation tolerance\n\n Notes\n -----\n All branches are supported by `lambertw`:\n\n * ``lambertw(z)`` gives the principal solution (branch 0)\n * ``lambertw(z, k)`` gives the solution on branch `k`\n\n The Lambert W function has two partially real branches: the\n principal branch (`k = 0`) is real for real `z > -1/e`, and the\n `k = -1` branch is real for `-1/e < z < 0`. All branches except\n `k = 0` have a logarithmic singularity at `z = 0`.\n\n .. rubric:: Possible issues\n \n The evaluation can become inaccurate very close to the branch point\n at `-1/e`. In some corner cases, :func:`lambertw` might currently\n fail to converge, or can end up on the wrong branch.\n\n .. rubric:: Algorithm\n\n Halley's iteration is used to invert `w \\exp(w)`, using a first-order\n asymptotic approximation (`O(\\log(w))` or `O(w)`) as the initial\n estimate.\n\n The definition, implementation and choice of branches is based\n on Corless et al, \"On the Lambert W function\", Adv. Comp. Math. 5\n (1996) 329-359, available online here:\n http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf\n \n TODO: use a series expansion when extremely close to the branch point\n at `-1/e` and make sure that the proper branch is chosen there\n\n Examples""\n --------\n The Lambert W function is the inverse of `w \\exp(w)`::\n\n >>> from scipy.special import lambertw\n >>> w = lambertw(1)\n >>> w\n 0.56714329040978387299996866221035555\n >>> w*exp(w)\n 1.0\n\n Any branch gives a valid inverse::\n\n >>> w = lambertw(1, k=3)\n >>> w\n (-2.8535817554090378072068187234910812 +\n 17.113535539412145912607826671159289j)\n >>> w*exp(w)\n (1.0 + 3.5075477124212226194278700785075126e-36j)\n\n .. rubric:: Applications to equation-solving\n\n The Lambert W function may be used to solve various kinds of\n equations, such as finding the value of the infinite power\n tower `z^{z^{z^{\\ldots}}}`::\n\n >>> def tower(z, n):\n ... if n == 0:\n ... return z\n ... return z ** tower(z, n-1)\n ...\n >>> tower(0.5, 100)\n 0.641185744504986\n >>> -lambertw(-log(0.5))/log(0.5)\n 0.6411857445049859844862004821148236665628209571911\n\n .. rubric:: Properties\n\n The Lambert W function grows roughly like the natural logarithm\n for large arguments::\n\n >>> lambertw(1000)\n 5.2496028524016\n >>> log(1000)\n 6.90775527898214\n >>> lambertw(10**100)\n 224.843106445119\n >>> log(10**100)\n 230.258509299405\n \n The principal branch of the Lambert W function has a rational\n Taylor series expansion around `z = 0`::\n \n >>> nprint(taylor(lambertw, 0, 6), 10)\n [0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8]\n \n Some special values and limits are::\n \n >>> lambertw(0)\n 0.0\n >>> lambertw(1)\n 0.567143290409784\n >>> lambertw(e)\n 1.0\n >>> lambertw(inf)\n +inf\n >>> lambertw(0, k=-1)\n -inf\n >>> lambertw(0, k=3)\n -inf\n >>> lambertw(inf, k=3)\n (+inf + 18.8495559215388j)\n\n The `k = 0` and `k = -1` branches join at `z = -1/e` where\n `W(z) = -1` for both branches. Since `-1/e` can only be represented\n approximately with mpmath numbers, evaluatin""g the Lambert W function\n at this point only gives `-1` approximately::\n\n >>> lambertw(-1/e, 0)\n -0.999999999999837133022867\n >>> lambertw(-1/e, -1)\n -1.00000000000016286697718\n \n If `-1/e` happens to round in the negative direction, there might be\n a small imaginary part::\n \n >>> lambertw(-1/e)\n (-1.0 + 8.22007971511612e-9j)\n\n "; -static PyMethodDef __pyx_mdef_5scipy_7special_8lambertw_lambertw = {__Pyx_NAMESTR("lambertw"), (PyCFunction)__pyx_pf_5scipy_7special_8lambertw_lambertw, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_8lambertw_lambertw)}; -static PyObject *__pyx_pf_5scipy_7special_8lambertw_lambertw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_z = 0; - PyObject *__pyx_v_k = 0; - PyObject *__pyx_v_tol = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__z,&__pyx_n_s__k,&__pyx_n_s__tol,0}; - __Pyx_RefNannySetupContext("lambertw"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[1] = ((PyObject *)__pyx_int_0); - values[2] = __pyx_k_2; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__k); - if (value) { values[1] = value; kw_args--; } - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__tol); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "lambertw") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_z = values[0]; - __pyx_v_k = values[1]; - __pyx_v_tol = values[2]; - } else { - __pyx_v_k = ((PyObject *)__pyx_int_0); - __pyx_v_tol = __pyx_k_2; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: __pyx_v_tol = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: __pyx_v_k = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: __pyx_v_z = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("lambertw", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.lambertw.lambertw", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/lambertw.pyx":340 - * - * """ - * return _lambertw(z, k, tol) # <<<<<<<<<<<<<< - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___lambertw); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_z); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_z); - __Pyx_GIVEREF(__pyx_v_z); - __Pyx_INCREF(__pyx_v_k); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_k); - __Pyx_GIVEREF(__pyx_v_k); - __Pyx_INCREF(__pyx_v_tol); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_tol); - __Pyx_GIVEREF(__pyx_v_tol); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.special.lambertw.lambertw", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("lambertw"), - 0, /* m_doc */ - -1, /* m_size */ - __pyx_methods /* m_methods */, - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, - {&__pyx_n_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 1}, - {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, - {&__pyx_kp_u_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 1, 0, 0}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s___lambertw, __pyx_k___lambertw, sizeof(__pyx_k___lambertw), 0, 0, 1, 1}, - {&__pyx_n_s__k, __pyx_k__k, sizeof(__pyx_k__k), 0, 0, 1, 1}, - {&__pyx_n_s__lambertw, __pyx_k__lambertw, sizeof(__pyx_k__lambertw), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__tol, __pyx_k__tol, sizeof(__pyx_k__tol), 0, 0, 1, 1}, - {&__pyx_n_s__warn, __pyx_k__warn, sizeof(__pyx_k__warn), 0, 0, 1, 1}, - {&__pyx_n_s__warnings, __pyx_k__warnings, sizeof(__pyx_k__warnings), 0, 0, 1, 1}, - {&__pyx_n_s__z, __pyx_k__z, sizeof(__pyx_k__z), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initlambertw(void); /*proto*/ -PyMODINIT_FUNC initlambertw(void) -#else -PyMODINIT_FUNC PyInit_lambertw(void); /*proto*/ -PyMODINIT_FUNC PyInit_lambertw(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_lambertw(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("lambertw"), __pyx_methods, 0, 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - /*--- Initialize various global constants etc. ---*/ - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__special__lambertw) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - /*--- Builtin init code ---*/ - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Constants init code ---*/ - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Global init code ---*/ - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - /*--- Type init code ---*/ - /*--- Type import code ---*/ - /*--- Variable import code ---*/ - /*--- Function import code ---*/ - /*--- Execution code ---*/ - - /* "scipy/special/lambertw.pyx":24 - * - * import cython - * import warnings # <<<<<<<<<<<<<< - * - * cdef extern from "math.h": - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__warnings), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__warnings, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/lambertw.pyx":176 - * - * cdef PyUFuncGenericFunction _loop_funcs[1] - * _loop_funcs[0] = _apply_func_to_1d_vec # <<<<<<<<<<<<<< - * - * cdef char _inp_outp_types[4] - */ - (__pyx_v_5scipy_7special_8lambertw__loop_funcs[0]) = __pyx_f_5scipy_7special_8lambertw__apply_func_to_1d_vec; - - /* "scipy/special/lambertw.pyx":179 - * - * cdef char _inp_outp_types[4] - * _inp_outp_types[0] = NPY_CDOUBLE # <<<<<<<<<<<<<< - * _inp_outp_types[1] = NPY_LONG - * _inp_outp_types[2] = NPY_DOUBLE - */ - (__pyx_v_5scipy_7special_8lambertw__inp_outp_types[0]) = NPY_CDOUBLE; - - /* "scipy/special/lambertw.pyx":180 - * cdef char _inp_outp_types[4] - * _inp_outp_types[0] = NPY_CDOUBLE - * _inp_outp_types[1] = NPY_LONG # <<<<<<<<<<<<<< - * _inp_outp_types[2] = NPY_DOUBLE - * _inp_outp_types[3] = NPY_CDOUBLE - */ - (__pyx_v_5scipy_7special_8lambertw__inp_outp_types[1]) = NPY_LONG; - - /* "scipy/special/lambertw.pyx":181 - * _inp_outp_types[0] = NPY_CDOUBLE - * _inp_outp_types[1] = NPY_LONG - * _inp_outp_types[2] = NPY_DOUBLE # <<<<<<<<<<<<<< - * _inp_outp_types[3] = NPY_CDOUBLE - * - */ - (__pyx_v_5scipy_7special_8lambertw__inp_outp_types[2]) = NPY_DOUBLE; - - /* "scipy/special/lambertw.pyx":182 - * _inp_outp_types[1] = NPY_LONG - * _inp_outp_types[2] = NPY_DOUBLE - * _inp_outp_types[3] = NPY_CDOUBLE # <<<<<<<<<<<<<< - * - * import_array() - */ - (__pyx_v_5scipy_7special_8lambertw__inp_outp_types[3]) = NPY_CDOUBLE; - - /* "scipy/special/lambertw.pyx":184 - * _inp_outp_types[3] = NPY_CDOUBLE - * - * import_array() # <<<<<<<<<<<<<< - * import_ufunc() - * - */ - import_array(); - - /* "scipy/special/lambertw.pyx":185 - * - * import_array() - * import_ufunc() # <<<<<<<<<<<<<< - * - * # The actual ufunc declaration: - */ - import_ufunc(); - - /* "scipy/special/lambertw.pyx":189 - * # The actual ufunc declaration: - * cdef void *the_func_to_apply[1] - * the_func_to_apply[0] = lambertw_scalar # <<<<<<<<<<<<<< - * _lambertw = PyUFunc_FromFuncAndData(_loop_funcs, the_func_to_apply, - * _inp_outp_types, 1, 3, 1, 0, "", "", 0) - */ - (__pyx_v_5scipy_7special_8lambertw_the_func_to_apply[0]) = ((void *)__pyx_f_5scipy_7special_8lambertw_lambertw_scalar); - - /* "scipy/special/lambertw.pyx":191 - * the_func_to_apply[0] = lambertw_scalar - * _lambertw = PyUFunc_FromFuncAndData(_loop_funcs, the_func_to_apply, - * _inp_outp_types, 1, 3, 1, 0, "", "", 0) # <<<<<<<<<<<<<< - * - * def lambertw(z, k=0, tol=1e-8): - */ - __pyx_t_1 = PyUFunc_FromFuncAndData(__pyx_v_5scipy_7special_8lambertw__loop_funcs, __pyx_v_5scipy_7special_8lambertw_the_func_to_apply, __pyx_v_5scipy_7special_8lambertw__inp_outp_types, 1, 3, 1, 0, __pyx_k_3, __pyx_k_3, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s___lambertw, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/lambertw.pyx":193 - * _inp_outp_types, 1, 3, 1, 0, "", "", 0) - * - * def lambertw(z, k=0, tol=1e-8): # <<<<<<<<<<<<<< - * r""" - * lambertw(z, k=0, tol=1e-8) - */ - __pyx_t_1 = PyFloat_FromDouble(1e-8); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_k_2 = __pyx_t_1; - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_8lambertw_lambertw, NULL, __pyx_n_s_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__lambertw, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/lambertw.pyx":1 - * # Implementation of the Lambert W function [1]. Based on the MPMath # <<<<<<<<<<<<<< - * # implementation [2], and documentaion [3]. - * # - */ - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_5), ((PyObject *)__pyx_kp_u_6)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.special.lambertw", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.special.lambertw"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - -/* Runtime support code */ - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - /* unexpected keyword found */ - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(a, a); - case 3: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, a); - case 4: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_abs(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_intp(npy_intp val) { - const npy_intp neg_one = (npy_intp)-1, const_zero = (npy_intp)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(npy_intp) == sizeof(char)) || - (sizeof(npy_intp) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(npy_intp) == sizeof(int)) || - (sizeof(npy_intp) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(npy_intp) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(npy_intp), - little, !is_unsigned); - } -} - -static CYTHON_INLINE npy_intp __Pyx_PyInt_from_py_npy_intp(PyObject* x) { - const npy_intp neg_one = (npy_intp)-1, const_zero = (npy_intp)0; - const int is_unsigned = const_zero < neg_one; - if (sizeof(npy_intp) == sizeof(char)) { - if (is_unsigned) - return (npy_intp)__Pyx_PyInt_AsUnsignedChar(x); - else - return (npy_intp)__Pyx_PyInt_AsSignedChar(x); - } else if (sizeof(npy_intp) == sizeof(short)) { - if (is_unsigned) - return (npy_intp)__Pyx_PyInt_AsUnsignedShort(x); - else - return (npy_intp)__Pyx_PyInt_AsSignedShort(x); - } else if (sizeof(npy_intp) == sizeof(int)) { - if (is_unsigned) - return (npy_intp)__Pyx_PyInt_AsUnsignedInt(x); - else - return (npy_intp)__Pyx_PyInt_AsSignedInt(x); - } else if (sizeof(npy_intp) == sizeof(long)) { - if (is_unsigned) - return (npy_intp)__Pyx_PyInt_AsUnsignedLong(x); - else - return (npy_intp)__Pyx_PyInt_AsSignedLong(x); - } else if (sizeof(npy_intp) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return (npy_intp)__Pyx_PyInt_AsUnsignedLongLong(x); - else - return (npy_intp)__Pyx_PyInt_AsSignedLongLong(x); - } else { - npy_intp val; - PyObject *v = __Pyx_PyNumber_Int(x); - #if PY_VERSION_HEX < 0x03000000 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } - return (npy_intp)-1; - } -} - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename) { - PyObject *old_exc, *old_val, *old_tb; - PyObject *ctx; - __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); - #if PY_MAJOR_VERSION < 3 - ctx = PyString_FromString(name); - #else - ctx = PyUnicode_FromString(name); - #endif - __Pyx_ErrRestore(old_exc, old_val, old_tb); - if (!ctx) { - PyErr_WriteUnraisable(Py_None); - } else { - PyErr_WriteUnraisable(ctx); - Py_DECREF(ctx); - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - #if PY_MAJOR_VERSION >= 3 - 0, /*int kwonlyargcount,*/ - #endif - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else /* Python 3+ has unicode identifiers */ - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -/* Type Conversion Functions */ - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif /* Py_PYTHON_H */ diff --git a/scipy-0.10.1/scipy/special/mach/d1mach.f b/scipy-0.10.1/scipy/special/mach/d1mach.f deleted file mode 100644 index bda4529c9e..0000000000 --- a/scipy-0.10.1/scipy/special/mach/d1mach.f +++ /dev/null @@ -1,209 +0,0 @@ - DOUBLE PRECISION FUNCTION D1MACH(I) - INTEGER I -C -C DOUBLE-PRECISION MACHINE CONSTANTS -C D1MACH( 1) = B**(EMIN-1), THE SMALLEST POSITIVE MAGNITUDE. -C D1MACH( 2) = B**EMAX*(1 - B**(-T)), THE LARGEST MAGNITUDE. -C D1MACH( 3) = B**(-T), THE SMALLEST RELATIVE SPACING. -C D1MACH( 4) = B**(1-T), THE LARGEST RELATIVE SPACING. -C D1MACH( 5) = LOG10(B) -C - INTEGER SMALL(2) - INTEGER LARGE(2) - INTEGER RIGHT(2) - INTEGER DIVER(2) - INTEGER LOG10(2) - INTEGER SC, CRAY1(38), J - COMMON /D9MACH/ CRAY1 - SAVE SMALL, LARGE, RIGHT, DIVER, LOG10, SC - DOUBLE PRECISION DMACH(5) - EQUIVALENCE (DMACH(1),SMALL(1)) - EQUIVALENCE (DMACH(2),LARGE(1)) - EQUIVALENCE (DMACH(3),RIGHT(1)) - EQUIVALENCE (DMACH(4),DIVER(1)) - EQUIVALENCE (DMACH(5),LOG10(1)) -C THIS VERSION ADAPTS AUTOMATICALLY TO MOST CURRENT MACHINES. -C R1MACH CAN HANDLE AUTO-DOUBLE COMPILING, BUT THIS VERSION OF -C D1MACH DOES NOT, BECAUSE WE DO NOT HAVE QUAD CONSTANTS FOR -C MANY MACHINES YET. -C TO COMPILE ON OLDER MACHINES, ADD A C IN COLUMN 1 -C ON THE NEXT LINE - DATA SC/0/ -C AND REMOVE THE C FROM COLUMN 1 IN ONE OF THE SECTIONS BELOW. -C CONSTANTS FOR EVEN OLDER MACHINES CAN BE OBTAINED BY -C mail netlib@research.bell-labs.com -C send old1mach from blas -C PLEASE SEND CORRECTIONS TO dmg OR ehg@bell-labs.com. -C -C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES. -C DATA SMALL(1),SMALL(2) / O402400000000, O000000000000 / -C DATA LARGE(1),LARGE(2) / O376777777777, O777777777777 / -C DATA RIGHT(1),RIGHT(2) / O604400000000, O000000000000 / -C DATA DIVER(1),DIVER(2) / O606400000000, O000000000000 / -C DATA LOG10(1),LOG10(2) / O776464202324, O117571775714 /, SC/987/ -C -C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING -C 32-BIT INTEGERS. -C DATA SMALL(1),SMALL(2) / 8388608, 0 / -C DATA LARGE(1),LARGE(2) / 2147483647, -1 / -C DATA RIGHT(1),RIGHT(2) / 612368384, 0 / -C DATA DIVER(1),DIVER(2) / 620756992, 0 / -C DATA LOG10(1),LOG10(2) / 1067065498, -2063872008 /, SC/987/ -C -C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES. -C DATA SMALL(1),SMALL(2) / O000040000000, O000000000000 / -C DATA LARGE(1),LARGE(2) / O377777777777, O777777777777 / -C DATA RIGHT(1),RIGHT(2) / O170540000000, O000000000000 / -C DATA DIVER(1),DIVER(2) / O170640000000, O000000000000 / -C DATA LOG10(1),LOG10(2) / O177746420232, O411757177572 /, SC/987/ -C -C ON FIRST CALL, IF NO DATA UNCOMMENTED, TEST MACHINE TYPES. - IF (SC .NE. 987) THEN - DMACH(1) = 1.D13 - IF ( SMALL(1) .EQ. 1117925532 - * .AND. SMALL(2) .EQ. -448790528) THEN -* *** IEEE BIG ENDIAN *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2146435071 - LARGE(2) = -1 - RIGHT(1) = 1017118720 - RIGHT(2) = 0 - DIVER(1) = 1018167296 - DIVER(2) = 0 - LOG10(1) = 1070810131 - LOG10(2) = 1352628735 - ELSE IF ( SMALL(2) .EQ. 1117925532 - * .AND. SMALL(1) .EQ. -448790528) THEN -* *** IEEE LITTLE ENDIAN *** - SMALL(2) = 1048576 - SMALL(1) = 0 - LARGE(2) = 2146435071 - LARGE(1) = -1 - RIGHT(2) = 1017118720 - RIGHT(1) = 0 - DIVER(2) = 1018167296 - DIVER(1) = 0 - LOG10(2) = 1070810131 - LOG10(1) = 1352628735 - ELSE IF ( SMALL(1) .EQ. -2065213935 - * .AND. SMALL(2) .EQ. 10752) THEN -* *** VAX WITH D_FLOATING *** - SMALL(1) = 128 - SMALL(2) = 0 - LARGE(1) = -32769 - LARGE(2) = -1 - RIGHT(1) = 9344 - RIGHT(2) = 0 - DIVER(1) = 9472 - DIVER(2) = 0 - LOG10(1) = 546979738 - LOG10(2) = -805796613 - ELSE IF ( SMALL(1) .EQ. 1267827943 - * .AND. SMALL(2) .EQ. 704643072) THEN -* *** IBM MAINFRAME *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2147483647 - LARGE(2) = -1 - RIGHT(1) = 856686592 - RIGHT(2) = 0 - DIVER(1) = 873463808 - DIVER(2) = 0 - LOG10(1) = 1091781651 - LOG10(2) = 1352628735 - ELSE IF ( SMALL(1) .EQ. 1120022684 - * .AND. SMALL(2) .EQ. -448790528) THEN -* *** CONVEX C-1 *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2147483647 - LARGE(2) = -1 - RIGHT(1) = 1019215872 - RIGHT(2) = 0 - DIVER(1) = 1020264448 - DIVER(2) = 0 - LOG10(1) = 1072907283 - LOG10(2) = 1352628735 - ELSE IF ( SMALL(1) .EQ. 815547074 - * .AND. SMALL(2) .EQ. 58688) THEN -* *** VAX G-FLOATING *** - SMALL(1) = 16 - SMALL(2) = 0 - LARGE(1) = -32769 - LARGE(2) = -1 - RIGHT(1) = 15552 - RIGHT(2) = 0 - DIVER(1) = 15568 - DIVER(2) = 0 - LOG10(1) = 1142112243 - LOG10(2) = 2046775455 - ELSE - DMACH(2) = 1.D27 + 1 - DMACH(3) = 1.D27 - LARGE(2) = LARGE(2) - RIGHT(2) - IF (LARGE(2) .EQ. 64 .AND. SMALL(2) .EQ. 0) THEN - CRAY1(1) = 67291416 - DO 10 J = 1, 20 - CRAY1(J+1) = CRAY1(J) + CRAY1(J) - 10 CONTINUE - CRAY1(22) = CRAY1(21) + 321322 - DO 20 J = 22, 37 - CRAY1(J+1) = CRAY1(J) + CRAY1(J) - 20 CONTINUE - IF (CRAY1(38) .EQ. SMALL(1)) THEN -* *** CRAY *** - CALL I1MCRY(SMALL(1), J, 8285, 8388608, 0) - SMALL(2) = 0 - CALL I1MCRY(LARGE(1), J, 24574, 16777215, 16777215) - CALL I1MCRY(LARGE(2), J, 0, 16777215, 16777214) - CALL I1MCRY(RIGHT(1), J, 16291, 8388608, 0) - RIGHT(2) = 0 - CALL I1MCRY(DIVER(1), J, 16292, 8388608, 0) - DIVER(2) = 0 - CALL I1MCRY(LOG10(1), J, 16383, 10100890, 8715215) - CALL I1MCRY(LOG10(2), J, 0, 16226447, 9001388) - ELSE - WRITE(*,9000) - STOP 779 - END IF - ELSE - WRITE(*,9000) - STOP 779 - END IF - END IF - SC = 987 - END IF -* SANITY CHECK - IF (DMACH(4) .GE. 1.0D0) STOP 778 - IF (I .LT. 1 .OR. I .GT. 5) THEN - WRITE(*,*) 'D1MACH(I): I =',I,' is out of bounds.' - STOP - END IF - D1MACH = DMACH(I) - RETURN - 9000 FORMAT(/' Adjust D1MACH by uncommenting data statements'/ - *' appropriate for your machine.') -* /* Standard C source for D1MACH -- remove the * in column 1 */ -*#include -*#include -*#include -*double d1mach_(long *i) -*{ -* switch(*i){ -* case 1: return DBL_MIN; -* case 2: return DBL_MAX; -* case 3: return DBL_EPSILON/FLT_RADIX; -* case 4: return DBL_EPSILON; -* case 5: return log10(FLT_RADIX); -* } -* fprintf(stderr, "invalid argument: d1mach(%ld)\n", *i); -* exit(1); return 0; /* some compilers demand return values */ -*} - END - SUBROUTINE I1MCRY(A, A1, B, C, D) -**** SPECIAL COMPUTATION FOR OLD CRAY MACHINES **** - INTEGER A, A1, B, C, D - A1 = 16777216*B + C - A = 16777216*A1 + D - END diff --git a/scipy-0.10.1/scipy/special/mach/i1mach.f b/scipy-0.10.1/scipy/special/mach/i1mach.f deleted file mode 100644 index 1d6f7fc6bb..0000000000 --- a/scipy-0.10.1/scipy/special/mach/i1mach.f +++ /dev/null @@ -1,291 +0,0 @@ - INTEGER FUNCTION I1MACH(I) - INTEGER I -C -C I1MACH( 1) = THE STANDARD INPUT UNIT. -C I1MACH( 2) = THE STANDARD OUTPUT UNIT. -C I1MACH( 3) = THE STANDARD PUNCH UNIT. -C I1MACH( 4) = THE STANDARD ERROR MESSAGE UNIT. -C I1MACH( 5) = THE NUMBER OF BITS PER INTEGER STORAGE UNIT. -C I1MACH( 6) = THE NUMBER OF CHARACTERS PER CHARACTER STORAGE UNIT. -C INTEGERS HAVE FORM SIGN ( X(S-1)*A**(S-1) + ... + X(1)*A + X(0) ) -C I1MACH( 7) = A, THE BASE. -C I1MACH( 8) = S, THE NUMBER OF BASE-A DIGITS. -C I1MACH( 9) = A**S - 1, THE LARGEST MAGNITUDE. -C FLOATS HAVE FORM SIGN (B**E)*( (X(1)/B) + ... + (X(T)/B**T) ) -C WHERE EMIN .LE. E .LE. EMAX. -C I1MACH(10) = B, THE BASE. -C SINGLE-PRECISION -C I1MACH(11) = T, THE NUMBER OF BASE-B DIGITS. -C I1MACH(12) = EMIN, THE SMALLEST EXPONENT E. -C I1MACH(13) = EMAX, THE LARGEST EXPONENT E. -C DOUBLE-PRECISION -C I1MACH(14) = T, THE NUMBER OF BASE-B DIGITS. -C I1MACH(15) = EMIN, THE SMALLEST EXPONENT E. -C I1MACH(16) = EMAX, THE LARGEST EXPONENT E. -C - INTEGER IMACH(16), OUTPUT, SC, SMALL(2) - SAVE IMACH, SC - REAL RMACH - EQUIVALENCE (IMACH(4),OUTPUT), (RMACH,SMALL(1)) - INTEGER I3, J, K, T3E(3) - DATA T3E(1) / 9777664 / - DATA T3E(2) / 5323660 / - DATA T3E(3) / 46980 / -C THIS VERSION ADAPTS AUTOMATICALLY TO MOST CURRENT MACHINES, -C INCLUDING AUTO-DOUBLE COMPILERS. -C TO COMPILE ON OLDER MACHINES, ADD A C IN COLUMN 1 -C ON THE NEXT LINE - DATA SC/0/ -C AND REMOVE THE C FROM COLUMN 1 IN ONE OF THE SECTIONS BELOW. -C CONSTANTS FOR EVEN OLDER MACHINES CAN BE OBTAINED BY -C mail netlib@research.bell-labs.com -C send old1mach from blas -C PLEASE SEND CORRECTIONS TO dmg OR ehg@bell-labs.com. -C -C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES. -C -C DATA IMACH( 1) / 5 / -C DATA IMACH( 2) / 6 / -C DATA IMACH( 3) / 43 / -C DATA IMACH( 4) / 6 / -C DATA IMACH( 5) / 36 / -C DATA IMACH( 6) / 4 / -C DATA IMACH( 7) / 2 / -C DATA IMACH( 8) / 35 / -C DATA IMACH( 9) / O377777777777 / -C DATA IMACH(10) / 2 / -C DATA IMACH(11) / 27 / -C DATA IMACH(12) / -127 / -C DATA IMACH(13) / 127 / -C DATA IMACH(14) / 63 / -C DATA IMACH(15) / -127 / -C DATA IMACH(16) / 127 /, SC/987/ -C -C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING -C 32-BIT INTEGER ARITHMETIC. -C -C DATA IMACH( 1) / 5 / -C DATA IMACH( 2) / 6 / -C DATA IMACH( 3) / 7 / -C DATA IMACH( 4) / 6 / -C DATA IMACH( 5) / 32 / -C DATA IMACH( 6) / 4 / -C DATA IMACH( 7) / 2 / -C DATA IMACH( 8) / 31 / -C DATA IMACH( 9) / 2147483647 / -C DATA IMACH(10) / 2 / -C DATA IMACH(11) / 24 / -C DATA IMACH(12) / -127 / -C DATA IMACH(13) / 127 / -C DATA IMACH(14) / 56 / -C DATA IMACH(15) / -127 / -C DATA IMACH(16) / 127 /, SC/987/ -C -C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES. -C -C NOTE THAT THE PUNCH UNIT, I1MACH(3), HAS BEEN SET TO 7 -C WHICH IS APPROPRIATE FOR THE UNIVAC-FOR SYSTEM. -C IF YOU HAVE THE UNIVAC-FTN SYSTEM, SET IT TO 1. -C -C DATA IMACH( 1) / 5 / -C DATA IMACH( 2) / 6 / -C DATA IMACH( 3) / 7 / -C DATA IMACH( 4) / 6 / -C DATA IMACH( 5) / 36 / -C DATA IMACH( 6) / 6 / -C DATA IMACH( 7) / 2 / -C DATA IMACH( 8) / 35 / -C DATA IMACH( 9) / O377777777777 / -C DATA IMACH(10) / 2 / -C DATA IMACH(11) / 27 / -C DATA IMACH(12) / -128 / -C DATA IMACH(13) / 127 / -C DATA IMACH(14) / 60 / -C DATA IMACH(15) /-1024 / -C DATA IMACH(16) / 1023 /, SC/987/ -C - IF (SC .NE. 987) THEN -* *** CHECK FOR AUTODOUBLE *** - SMALL(2) = 0 - RMACH = 1E13 - IF (SMALL(2) .NE. 0) THEN -* *** AUTODOUBLED *** - IF ( (SMALL(1) .EQ. 1117925532 - * .AND. SMALL(2) .EQ. -448790528) - * .OR. (SMALL(2) .EQ. 1117925532 - * .AND. SMALL(1) .EQ. -448790528)) THEN -* *** IEEE *** - IMACH(10) = 2 - IMACH(14) = 53 - IMACH(15) = -1021 - IMACH(16) = 1024 - ELSE IF ( SMALL(1) .EQ. -2065213935 - * .AND. SMALL(2) .EQ. 10752) THEN -* *** VAX WITH D_FLOATING *** - IMACH(10) = 2 - IMACH(14) = 56 - IMACH(15) = -127 - IMACH(16) = 127 - ELSE IF ( SMALL(1) .EQ. 1267827943 - * .AND. SMALL(2) .EQ. 704643072) THEN -* *** IBM MAINFRAME *** - IMACH(10) = 16 - IMACH(14) = 14 - IMACH(15) = -64 - IMACH(16) = 63 - ELSE - WRITE(*,9010) - STOP 777 - END IF - IMACH(11) = IMACH(14) - IMACH(12) = IMACH(15) - IMACH(13) = IMACH(16) - ELSE - RMACH = 1234567. - IF (SMALL(1) .EQ. 1234613304) THEN -* *** IEEE *** - IMACH(10) = 2 - IMACH(11) = 24 - IMACH(12) = -125 - IMACH(13) = 128 - IMACH(14) = 53 - IMACH(15) = -1021 - IMACH(16) = 1024 - SC = 987 - ELSE IF (SMALL(1) .EQ. -1271379306) THEN -* *** VAX *** - IMACH(10) = 2 - IMACH(11) = 24 - IMACH(12) = -127 - IMACH(13) = 127 - IMACH(14) = 56 - IMACH(15) = -127 - IMACH(16) = 127 - SC = 987 - ELSE IF (SMALL(1) .EQ. 1175639687) THEN -* *** IBM MAINFRAME *** - IMACH(10) = 16 - IMACH(11) = 6 - IMACH(12) = -64 - IMACH(13) = 63 - IMACH(14) = 14 - IMACH(15) = -64 - IMACH(16) = 63 - SC = 987 - ELSE IF (SMALL(1) .EQ. 1251390520) THEN -* *** CONVEX C-1 *** - IMACH(10) = 2 - IMACH(11) = 24 - IMACH(12) = -128 - IMACH(13) = 127 - IMACH(14) = 53 - IMACH(15) = -1024 - IMACH(16) = 1023 - ELSE - DO 10 I3 = 1, 3 - J = SMALL(1) / 10000000 - K = SMALL(1) - 10000000*J - IF (K .NE. T3E(I3)) GO TO 20 - SMALL(1) = J - 10 CONTINUE -* *** CRAY T3E *** - IMACH( 1) = 5 - IMACH( 2) = 6 - IMACH( 3) = 0 - IMACH( 4) = 0 - IMACH( 5) = 64 - IMACH( 6) = 8 - IMACH( 7) = 2 - IMACH( 8) = 63 - CALL I1MCR1(IMACH(9), K, 32767, 16777215, 16777215) - IMACH(10) = 2 - IMACH(11) = 53 - IMACH(12) = -1021 - IMACH(13) = 1024 - IMACH(14) = 53 - IMACH(15) = -1021 - IMACH(16) = 1024 - GO TO 35 - 20 CALL I1MCR1(J, K, 16405, 9876536, 0) - IF (SMALL(1) .NE. J) THEN - WRITE(*,9020) - STOP 777 - END IF -* *** CRAY 1, XMP, 2, AND 3 *** - IMACH(1) = 5 - IMACH(2) = 6 - IMACH(3) = 102 - IMACH(4) = 6 - IMACH(5) = 46 - IMACH(6) = 8 - IMACH(7) = 2 - IMACH(8) = 45 - CALL I1MCR1(IMACH(9), K, 0, 4194303, 16777215) - IMACH(10) = 2 - IMACH(11) = 47 - IMACH(12) = -8188 - IMACH(13) = 8189 - IMACH(14) = 94 - IMACH(15) = -8141 - IMACH(16) = 8189 - GO TO 35 - END IF - END IF - IMACH( 1) = 5 - IMACH( 2) = 6 - IMACH( 3) = 7 - IMACH( 4) = 6 - IMACH( 5) = 32 - IMACH( 6) = 4 - IMACH( 7) = 2 - IMACH( 8) = 31 - IMACH( 9) = 2147483647 - 35 SC = 987 - END IF - 9010 FORMAT(/' Adjust autodoubled I1MACH by uncommenting data'/ - * ' statements appropriate for your machine and setting'/ - * ' IMACH(I) = IMACH(I+3) for I = 11, 12, and 13.') - 9020 FORMAT(/' Adjust I1MACH by uncommenting data statements'/ - * ' appropriate for your machine.') - IF (I .LT. 1 .OR. I .GT. 16) GO TO 40 - I1MACH = IMACH(I) - RETURN - 40 WRITE(*,*) 'I1MACH(I): I =',I,' is out of bounds.' - STOP -* /* C source for I1MACH -- remove the * in column 1 */ -* /* Note that some values may need changing. */ -*#include -*#include -*#include -*#include -* -*long i1mach_(long *i) -*{ -* switch(*i){ -* case 1: return 5; /* standard input */ -* case 2: return 6; /* standard output */ -* case 3: return 7; /* standard punch */ -* case 4: return 0; /* standard error */ -* case 5: return 32; /* bits per integer */ -* case 6: return sizeof(int); -* case 7: return 2; /* base for integers */ -* case 8: return 31; /* digits of integer base */ -* case 9: return LONG_MAX; -* case 10: return FLT_RADIX; -* case 11: return FLT_MANT_DIG; -* case 12: return FLT_MIN_EXP; -* case 13: return FLT_MAX_EXP; -* case 14: return DBL_MANT_DIG; -* case 15: return DBL_MIN_EXP; -* case 16: return DBL_MAX_EXP; -* } -* fprintf(stderr, "invalid argument: i1mach(%ld)\n", *i); -* exit(1);return 0; /* some compilers demand return values */ -*} - END - SUBROUTINE I1MCR1(A, A1, B, C, D) -**** SPECIAL COMPUTATION FOR OLD CRAY MACHINES **** - INTEGER A, A1, B, C, D - A1 = 16777216*B + C - A = 16777216*A1 + D - END diff --git a/scipy-0.10.1/scipy/special/mach/r1mach.f b/scipy-0.10.1/scipy/special/mach/r1mach.f deleted file mode 100644 index 204530e35e..0000000000 --- a/scipy-0.10.1/scipy/special/mach/r1mach.f +++ /dev/null @@ -1,222 +0,0 @@ - REAL FUNCTION R1MACH(I) - INTEGER I -C -C SINGLE-PRECISION MACHINE CONSTANTS -C R1MACH(1) = B**(EMIN-1), THE SMALLEST POSITIVE MAGNITUDE. -C R1MACH(2) = B**EMAX*(1 - B**(-T)), THE LARGEST MAGNITUDE. -C R1MACH(3) = B**(-T), THE SMALLEST RELATIVE SPACING. -C R1MACH(4) = B**(1-T), THE LARGEST RELATIVE SPACING. -C R1MACH(5) = LOG10(B) -C - INTEGER SMALL(2) - INTEGER LARGE(2) - INTEGER RIGHT(2) - INTEGER DIVER(2) - INTEGER LOG10(2) -C needs to be (2) for AUTODOUBLE, HARRIS SLASH 6, ... - INTEGER SC - SAVE SMALL, LARGE, RIGHT, DIVER, LOG10, SC - REAL RMACH(5) - EQUIVALENCE (RMACH(1),SMALL(1)) - EQUIVALENCE (RMACH(2),LARGE(1)) - EQUIVALENCE (RMACH(3),RIGHT(1)) - EQUIVALENCE (RMACH(4),DIVER(1)) - EQUIVALENCE (RMACH(5),LOG10(1)) - INTEGER J, K, L, T3E(3) - DATA T3E(1) / 9777664 / - DATA T3E(2) / 5323660 / - DATA T3E(3) / 46980 / -C THIS VERSION ADAPTS AUTOMATICALLY TO MOST CURRENT MACHINES, -C INCLUDING AUTO-DOUBLE COMPILERS. -C TO COMPILE ON OLDER MACHINES, ADD A C IN COLUMN 1 -C ON THE NEXT LINE - DATA SC/0/ -C AND REMOVE THE C FROM COLUMN 1 IN ONE OF THE SECTIONS BELOW. -C CONSTANTS FOR EVEN OLDER MACHINES CAN BE OBTAINED BY -C mail netlib@research.bell-labs.com -C send old1mach from blas -C PLEASE SEND CORRECTIONS TO dmg OR ehg@bell-labs.com. -C -C MACHINE CONSTANTS FOR THE HONEYWELL DPS 8/70 SERIES. -C DATA RMACH(1) / O402400000000 / -C DATA RMACH(2) / O376777777777 / -C DATA RMACH(3) / O714400000000 / -C DATA RMACH(4) / O716400000000 / -C DATA RMACH(5) / O776464202324 /, SC/987/ -C -C MACHINE CONSTANTS FOR PDP-11 FORTRANS SUPPORTING -C 32-BIT INTEGERS (EXPRESSED IN INTEGER AND OCTAL). -C DATA SMALL(1) / 8388608 / -C DATA LARGE(1) / 2147483647 / -C DATA RIGHT(1) / 880803840 / -C DATA DIVER(1) / 889192448 / -C DATA LOG10(1) / 1067065499 /, SC/987/ -C DATA RMACH(1) / O00040000000 / -C DATA RMACH(2) / O17777777777 / -C DATA RMACH(3) / O06440000000 / -C DATA RMACH(4) / O06500000000 / -C DATA RMACH(5) / O07746420233 /, SC/987/ -C -C MACHINE CONSTANTS FOR THE UNIVAC 1100 SERIES. -C DATA RMACH(1) / O000400000000 / -C DATA RMACH(2) / O377777777777 / -C DATA RMACH(3) / O146400000000 / -C DATA RMACH(4) / O147400000000 / -C DATA RMACH(5) / O177464202324 /, SC/987/ -C - IF (SC .NE. 987) THEN -* *** CHECK FOR AUTODOUBLE *** - SMALL(2) = 0 - RMACH(1) = 1E13 - IF (SMALL(2) .NE. 0) THEN -* *** AUTODOUBLED *** - IF ( SMALL(1) .EQ. 1117925532 - * .AND. SMALL(2) .EQ. -448790528) THEN -* *** IEEE BIG ENDIAN *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2146435071 - LARGE(2) = -1 - RIGHT(1) = 1017118720 - RIGHT(2) = 0 - DIVER(1) = 1018167296 - DIVER(2) = 0 - LOG10(1) = 1070810131 - LOG10(2) = 1352628735 - ELSE IF ( SMALL(2) .EQ. 1117925532 - * .AND. SMALL(1) .EQ. -448790528) THEN -* *** IEEE LITTLE ENDIAN *** - SMALL(2) = 1048576 - SMALL(1) = 0 - LARGE(2) = 2146435071 - LARGE(1) = -1 - RIGHT(2) = 1017118720 - RIGHT(1) = 0 - DIVER(2) = 1018167296 - DIVER(1) = 0 - LOG10(2) = 1070810131 - LOG10(1) = 1352628735 - ELSE IF ( SMALL(1) .EQ. -2065213935 - * .AND. SMALL(2) .EQ. 10752) THEN -* *** VAX WITH D_FLOATING *** - SMALL(1) = 128 - SMALL(2) = 0 - LARGE(1) = -32769 - LARGE(2) = -1 - RIGHT(1) = 9344 - RIGHT(2) = 0 - DIVER(1) = 9472 - DIVER(2) = 0 - LOG10(1) = 546979738 - LOG10(2) = -805796613 - ELSE IF ( SMALL(1) .EQ. 1267827943 - * .AND. SMALL(2) .EQ. 704643072) THEN -* *** IBM MAINFRAME *** - SMALL(1) = 1048576 - SMALL(2) = 0 - LARGE(1) = 2147483647 - LARGE(2) = -1 - RIGHT(1) = 856686592 - RIGHT(2) = 0 - DIVER(1) = 873463808 - DIVER(2) = 0 - LOG10(1) = 1091781651 - LOG10(2) = 1352628735 - ELSE - WRITE(*,9010) - STOP 777 - END IF - ELSE - RMACH(1) = 1234567. - IF (SMALL(1) .EQ. 1234613304) THEN -* *** IEEE *** - SMALL(1) = 8388608 - LARGE(1) = 2139095039 - RIGHT(1) = 864026624 - DIVER(1) = 872415232 - LOG10(1) = 1050288283 - ELSE IF (SMALL(1) .EQ. -1271379306) THEN -* *** VAX *** - SMALL(1) = 128 - LARGE(1) = -32769 - RIGHT(1) = 13440 - DIVER(1) = 13568 - LOG10(1) = 547045274 - ELSE IF (SMALL(1) .EQ. 1175639687) THEN -* *** IBM MAINFRAME *** - SMALL(1) = 1048576 - LARGE(1) = 2147483647 - RIGHT(1) = 990904320 - DIVER(1) = 1007681536 - LOG10(1) = 1091781651 - ELSE IF (SMALL(1) .EQ. 1251390520) THEN -* *** CONVEX C-1 *** - SMALL(1) = 8388608 - LARGE(1) = 2147483647 - RIGHT(1) = 880803840 - DIVER(1) = 889192448 - LOG10(1) = 1067065499 - ELSE - DO 10 L = 1, 3 - J = SMALL(1) / 10000000 - K = SMALL(1) - 10000000*J - IF (K .NE. T3E(L)) GO TO 20 - SMALL(1) = J - 10 CONTINUE -* *** CRAY T3E *** - CALL I1MCRA(SMALL, K, 16, 0, 0) - CALL I1MCRA(LARGE, K, 32751, 16777215, 16777215) - CALL I1MCRA(RIGHT, K, 15520, 0, 0) - CALL I1MCRA(DIVER, K, 15536, 0, 0) - CALL I1MCRA(LOG10, K, 16339, 4461392, 10451455) - GO TO 30 - 20 CALL I1MCRA(J, K, 16405, 9876536, 0) - IF (SMALL(1) .NE. J) THEN - WRITE(*,9020) - STOP 777 - END IF -* *** CRAY 1, XMP, 2, AND 3 *** - CALL I1MCRA(SMALL(1), K, 8195, 8388608, 1) - CALL I1MCRA(LARGE(1), K, 24574, 16777215, 16777214) - CALL I1MCRA(RIGHT(1), K, 16338, 8388608, 0) - CALL I1MCRA(DIVER(1), K, 16339, 8388608, 0) - CALL I1MCRA(LOG10(1), K, 16383, 10100890, 8715216) - END IF - END IF - 30 SC = 987 - END IF -* SANITY CHECK - IF (RMACH(4) .GE. 1.0) STOP 776 - IF (I .LT. 1 .OR. I .GT. 5) THEN - WRITE(*,*) 'R1MACH(I): I =',I,' is out of bounds.' - STOP - END IF - R1MACH = RMACH(I) - RETURN - 9010 FORMAT(/' Adjust autodoubled R1MACH by getting data'/ - *' appropriate for your machine from D1MACH.') - 9020 FORMAT(/' Adjust R1MACH by uncommenting data statements'/ - *' appropriate for your machine.') -* /* C source for R1MACH -- remove the * in column 1 */ -*#include -*#include -*#include -*float r1mach_(long *i) -*{ -* switch(*i){ -* case 1: return FLT_MIN; -* case 2: return FLT_MAX; -* case 3: return FLT_EPSILON/FLT_RADIX; -* case 4: return FLT_EPSILON; -* case 5: return log10(FLT_RADIX); -* } -* fprintf(stderr, "invalid argument: r1mach(%ld)\n", *i); -* exit(1); return 0; /* else complaint of missing return value */ -*} - END - SUBROUTINE I1MCRA(A, A1, B, C, D) -**** SPECIAL COMPUTATION FOR CRAY MACHINES **** - INTEGER A, A1, B, C, D - A1 = 16777216*B + C - A = 16777216*A1 + D - END diff --git a/scipy-0.10.1/scipy/special/mach/xerror.f b/scipy-0.10.1/scipy/special/mach/xerror.f deleted file mode 100644 index baa55067ba..0000000000 --- a/scipy-0.10.1/scipy/special/mach/xerror.f +++ /dev/null @@ -1,22 +0,0 @@ - SUBROUTINE XERROR(MESS,NMESS,L1,L2) -C -C THIS IS A DUMMY XERROR ROUTINE TO PRINT ERROR MESSAGES WITH NMESS -C CHARACTERS. L1 AND L2 ARE DUMMY PARAMETERS TO MAKE THIS CALL -C COMPATIBLE WITH THE SLATEC XERROR ROUTINE. THIS IS A FORTRAN 77 -C ROUTINE. -C - CHARACTER*(*) MESS - NN=NMESS/70 - NR=NMESS-70*NN - IF(NR.NE.0) NN=NN+1 - K=1 - PRINT 900 - 900 FORMAT(/) - DO 10 I=1,NN - KMIN=MIN0(K+69,NMESS) - PRINT *, MESS(K:KMIN) - K=K+70 - 10 CONTINUE - PRINT 900 - RETURN - END diff --git a/scipy-0.10.1/scipy/special/orthogonal.py b/scipy-0.10.1/scipy/special/orthogonal.py deleted file mode 100644 index 539b2483f2..0000000000 --- a/scipy-0.10.1/scipy/special/orthogonal.py +++ /dev/null @@ -1,701 +0,0 @@ -""" -A collection of functions to find the weights and abscissas for -Gaussian Quadrature. - -These calculations are done by finding the eigenvalues of a -tridiagonal matrix whose entries are dependent on the coefficients -in the recursion formula for the orthogonal polynomials with the -corresponding weighting function over the interval. - -Many recursion relations for orthogonal polynomials are given: - -.. math:: - - a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x) - -The recursion relation of interest is - -.. math:: - - P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x) - -where :math:`P` has a different normalization than :math:`f`. - -The coefficients can be found as: - -.. math:: - - A_n = -a2n / a3n - \\qquad - B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2 - -where - -.. math:: - - h_n = \\int_a^b w(x) f_n(x)^2 - -assume: - -.. math:: - - P_0 (x) = 1 - \\qquad - P_{-1} (x) == 0 - -For the mathematical background, see [golub.welsch-1969-mathcomp]_ and -[abramowitz.stegun-1965]_. - -Functions:: - - gen_roots_and_weights -- Generic roots and weights. - j_roots -- Jacobi - js_roots -- Shifted Jacobi - la_roots -- Generalized Laguerre - h_roots -- Hermite - he_roots -- Hermite (unit-variance) - cg_roots -- Ultraspherical (Gegenbauer) - t_roots -- Chebyshev of the first kind - u_roots -- Chebyshev of the second kind - c_roots -- Chebyshev of the first kind ([-2,2] interval) - s_roots -- Chebyshev of the second kind ([-2,2] interval) - ts_roots -- Shifted Chebyshev of the first kind. - us_roots -- Shifted Chebyshev of the second kind. - p_roots -- Legendre - ps_roots -- Shifted Legendre - l_roots -- Laguerre - - -.. [golub.welsch-1969-mathcomp] - Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss - Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10. - -.. [abramowitz.stegun-1965] - Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of - Mathematical Functions: with Formulas, Graphs, and Mathematical - Tables*. Gaithersburg, MD: National Bureau of Standards. - http://www.math.sfu.ca/~cbm/aands/ - -""" -# -# Author: Travis Oliphant 2000 -# Updated Sep. 2003 (fixed bugs --- tested to be accurate) - -# Scipy imports. -import numpy as np -from numpy import all, any, exp, inf, pi, sqrt -from numpy.dual import eig - -# Local imports. -import _cephes as cephes -_gam = cephes.gamma - -__all__ = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys', - 'jacobi', 'laguerre', 'genlaguerre', 'hermite', 'hermitenorm', - 'gegenbauer', 'sh_legendre', 'sh_chebyt', 'sh_chebyu', 'sh_jacobi', - 'p_roots', 'ps_roots', 'j_roots', 'js_roots', 'l_roots', 'la_roots', - 'he_roots', 'ts_roots', 'us_roots', 's_roots', 't_roots', 'u_roots', - 'c_roots', 'cg_roots', 'h_roots', - 'eval_legendre', 'eval_chebyt', 'eval_chebyu', 'eval_chebyc', - 'eval_chebys', 'eval_jacobi', 'eval_laguerre', 'eval_genlaguerre', - 'eval_hermite', 'eval_hermitenorm', 'eval_gegenbauer', - 'eval_sh_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu', - 'eval_sh_jacobi', 'poch', 'binom'] - -def poch(z, m): - """Pochhammer symbol (z)_m = (z)(z+1)....(z+m-1) = gamma(z+m)/gamma(z)""" - return _gam(z+m) / _gam(z) - -class orthopoly1d(np.poly1d): - def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None, limits=None, monic=0,eval_func=None): - np.poly1d.__init__(self, roots, r=1) - equiv_weights = [weights[k] / wfunc(roots[k]) for k in range(len(roots))] - self.__dict__['weights'] = np.array(zip(roots,weights,equiv_weights)) - self.__dict__['weight_func'] = wfunc - self.__dict__['limits'] = limits - mu = sqrt(hn) - if monic: - evf = eval_func - if evf: - eval_func = lambda x: evf(x)/kn - mu = mu / abs(kn) - kn = 1.0 - self.__dict__['normcoef'] = mu - self.__dict__['coeffs'] *= kn - - # Note: eval_func will be discarded on arithmetic - self.__dict__['_eval_func'] = eval_func - - def __call__(self, v): - if self._eval_func and (isinstance(v, np.ndarray) or np.isscalar(v)): - return self._eval_func(v) - else: - return np.poly1d.__call__(self, v) - - def _scale(self, p): - if p == 1.0: - return - self.__dict__['coeffs'] *= p - evf = self.__dict__['_eval_func'] - if evf: - self.__dict__['_eval_func'] = lambda x: evf(x) * p - self.__dict__['normcoef'] *= p - -def gen_roots_and_weights(n, an_func, sqrt_bn_func, mu): - """[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu) - - Returns the roots (x) of an nth order orthogonal polynomial, - and weights (w) to use in appropriate Gaussian quadrature with that - orthogonal polynomial. - - The polynomials have the recurrence relation - P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x) - - an_func(n) should return A_n - sqrt_bn_func(n) should return sqrt(B_n) - mu ( = h_0 ) is the integral of the weight over the orthogonal interval - """ - nn = np.arange(1.0,n) - sqrt_bn = sqrt_bn_func(nn) - an = an_func(np.concatenate(([0], nn))) - x, v = eig((np.diagflat(an) + - np.diagflat(sqrt_bn,1) + - np.diagflat(sqrt_bn,-1))) - answer = [] - sortind = x.real.argsort() - answer.append(x[sortind]) - answer.append((mu*v[0]**2)[sortind]) - return answer - -# Jacobi Polynomials 1 P^(alpha,beta)_n(x) -def j_roots(n, alpha, beta, mu=0): - """[x,w] = j_roots(n,alpha,beta) - - Returns the roots (x) of the nth order Jacobi polynomial, P^(alpha,beta)_n(x) - and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting - function (1-x)**alpha (1+x)**beta with alpha,beta > -1. - """ - if any(alpha <= -1) or any(beta <= -1): - raise ValueError("alpha and beta must be greater than -1.") - if n < 1: - raise ValueError("n must be positive.") - - olderr = np.seterr(all='ignore') - try: - (p,q) = (alpha,beta) - # from recurrence relations - sbn_J = lambda k: 2.0/(2.0*k+p+q)*sqrt((k+p)*(k+q)/(2*k+q+p+1)) * \ - (np.where(k==1,1.0,sqrt(k*(k+p+q)/(2.0*k+p+q-1)))) - if any(p == q): # XXX any or all??? - an_J = lambda k: 0.0*k - else: - an_J = lambda k: np.where(k==0,(q-p)/(p+q+2.0), - (q*q - p*p)/((2.0*k+p+q)*(2.0*k+p+q+2))) - g = cephes.gamma - mu0 = 2.0**(p+q+1)*g(p+1)*g(q+1)/(g(p+q+2)) - val = gen_roots_and_weights(n,an_J,sbn_J,mu0) - finally: - np.seterr(**olderr) - - if mu: - return val + [mu0] - else: - return val - -def jacobi(n, alpha, beta, monic=0): - """Returns the nth order Jacobi polynomial, P^(alpha,beta)_n(x) - orthogonal over [-1,1] with weighting function - (1-x)**alpha (1+x)**beta with alpha,beta > -1. - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - wfunc = lambda x: (1-x)**alpha * (1+x)**beta - if n==0: - return orthopoly1d([],[],1.0,1.0,wfunc,(-1,1),monic, - eval_func=np.ones_like) - x,w,mu = j_roots(n,alpha,beta,mu=1) - ab1 = alpha+beta+1.0 - hn = 2**ab1/(2*n+ab1)*_gam(n+alpha+1) - hn *= _gam(n+beta+1.0) / _gam(n+1) / _gam(n+ab1) - kn = _gam(2*n+ab1)/2.0**n / _gam(n+1) / _gam(n+ab1) - # here kn = coefficient on x^n term - p = orthopoly1d(x,w,hn,kn,wfunc,(-1,1),monic, - lambda x: eval_jacobi(n,alpha,beta,x)) - return p - -# Jacobi Polynomials shifted G_n(p,q,x) -def js_roots(n, p1, q1, mu=0): - """[x,w] = js_roots(n,p,q) - - Returns the roots (x) of the nth order shifted Jacobi polynomial, G_n(p,q,x), - and weights (w) to use in Gaussian Quadrature over [0,1] with weighting - function (1-x)**(p-q) x**(q-1) with p-q > -1 and q > 0. - """ - # from recurrence relation - if not ( any((p1 - q1) > -1) and any(q1 > 0) ): - raise ValueError("(p - q) > -1 and q > 0 please.") - if n <= 0: - raise ValueError("n must be positive.") - - p,q = p1,q1 - - sbn_Js = lambda k: sqrt(np.where(k==1,q*(p-q+1.0)/(p+2.0), \ - k*(k+q-1.0)*(k+p-1.0)*(k+p-q) \ - / ((2.0*k+p-2) * (2.0*k+p))))/(2*k+p-1.0) - an_Js = lambda k: np.where(k==0,q/(p+1.0),(2.0*k*(k+p)+q*(p-1.0)) / ((2.0*k+p+1.0)*(2*k+p-1.0))) - - # could also use definition - # Gn(p,q,x) = constant_n * P^(p-q,q-1)_n(2x-1) - # so roots of Gn(p,q,x) are (roots of P^(p-q,q-1)_n + 1) / 2.0 - g = _gam - # integral of weight over interval - mu0 = g(q)*g(p-q+1)/g(p+1) - val = gen_roots_and_weights(n,an_Js,sbn_Js,mu0) - if mu: - return val + [mu0] - else: - return val - # What code would look like using jacobi polynomial roots - #if mu: - # [x,w,mut] = j_roots(n,p-q,q-1,mu=1) - # return [(x+1)/2.0,w,mu0] - #else: - # [x,w] = j_roots(n,p-q,q-1,mu=0) - # return [(x+1)/2.0,w] - -def sh_jacobi(n, p, q, monic=0): - """Returns the nth order Jacobi polynomial, G_n(p,q,x) - orthogonal over [0,1] with weighting function - (1-x)**(p-q) (x)**(q-1) with p>q-1 and q > 0. - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - wfunc = lambda x: (1.0-x)**(p-q) * (x)**(q-1.) - if n==0: - return orthopoly1d([],[],1.0,1.0,wfunc,(-1,1),monic, - eval_func=np.ones_like) - n1 = n - x,w,mu0 = js_roots(n1,p,q,mu=1) - hn = _gam(n+1)*_gam(n+q)*_gam(n+p)*_gam(n+p-q+1) - hn /= (2*n+p)*(_gam(2*n+p)**2) - # kn = 1.0 in standard form so monic is redundant. Kept for compatibility. - kn = 1.0 - pp = orthopoly1d(x,w,hn,kn,wfunc=wfunc,limits=(0,1),monic=monic, - eval_func=lambda x: eval_sh_jacobi(n, p, q, x)) - return pp - -# Generalized Laguerre L^(alpha)_n(x) -def la_roots(n, alpha, mu=0): - """[x,w] = la_roots(n,alpha) - - Returns the roots (x) of the nth order generalized (associated) Laguerre - polynomial, L^(alpha)_n(x), and weights (w) to use in Gaussian quadrature over - [0,inf] with weighting function exp(-x) x**alpha with alpha > -1. - """ - if not all(alpha > -1): - raise ValueError("alpha > -1") - if n < 1: - raise ValueError("n must be positive.") - - (p,q) = (alpha,0.0) - sbn_La = lambda k: -sqrt(k*(k + p)) # from recurrence relation - an_La = lambda k: 2*k + p + 1 - mu0 = cephes.gamma(alpha+1) # integral of weight over interval - val = gen_roots_and_weights(n,an_La,sbn_La,mu0) - if mu: - return val + [mu0] - else: - return val - -def genlaguerre(n, alpha, monic=0): - """Returns the nth order generalized (associated) Laguerre polynomial, - L^(alpha)_n(x), orthogonal over [0,inf) with weighting function - exp(-x) x**alpha with alpha > -1 - """ - if any(alpha <= -1): - raise ValueError("alpha must be > -1") - if n < 0: - raise ValueError("n must be nonnegative.") - - if n==0: n1 = n+1 - else: n1 = n - x,w,mu0 = la_roots(n1,alpha,mu=1) - wfunc = lambda x: exp(-x) * x**alpha - if n==0: x,w = [],[] - hn = _gam(n+alpha+1)/_gam(n+1) - kn = (-1)**n / _gam(n+1) - p = orthopoly1d(x,w,hn,kn,wfunc,(0,inf),monic, - lambda x: eval_genlaguerre(n,alpha,x)) - return p - -# Laguerre L_n(x) -def l_roots(n, mu=0): - """[x,w] = l_roots(n) - - Returns the roots (x) of the nth order Laguerre polynomial, L_n(x), - and weights (w) to use in Gaussian Quadrature over [0,inf] with weighting - function exp(-x). - """ - return la_roots(n,0.0,mu=mu) - -def laguerre(n, monic=0): - """Return the nth order Laguerre polynoimal, L_n(x), orthogonal over - [0,inf) with weighting function exp(-x) - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - if n==0: n1 = n+1 - else: n1 = n - x,w,mu0 = l_roots(n1,mu=1) - if n==0: x,w = [],[] - hn = 1.0 - kn = (-1)**n / _gam(n+1) - p = orthopoly1d(x,w,hn,kn,lambda x: exp(-x),(0,inf),monic, - lambda x: eval_laguerre(n,x)) - return p - - -# Hermite 1 H_n(x) -def h_roots(n, mu=0): - """[x,w] = h_roots(n) - - Returns the roots (x) of the nth order Hermite polynomial, - H_n(x), and weights (w) to use in Gaussian Quadrature over - [-inf,inf] with weighting function exp(-x**2). - """ - if n < 1: - raise ValueError("n must be positive.") - - sbn_H = lambda k: sqrt(k/2) # from recurrence relation - an_H = lambda k: 0*k - mu0 = sqrt(pi) # integral of weight over interval - val = gen_roots_and_weights(n,an_H,sbn_H,mu0) - if mu: - return val + [mu0] - else: - return val - -def hermite(n, monic=0): - """Return the nth order Hermite polynomial, H_n(x), orthogonal over - (-inf,inf) with weighting function exp(-x**2) - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - if n==0: n1 = n+1 - else: n1 = n - x,w,mu0 = h_roots(n1,mu=1) - wfunc = lambda x: exp(-x*x) - if n==0: x,w = [],[] - hn = 2**n * _gam(n+1)*sqrt(pi) - kn = 2**n - p = orthopoly1d(x,w,hn,kn,wfunc,(-inf,inf),monic, - lambda x: eval_hermite(n,x)) - return p - -# Hermite 2 He_n(x) -def he_roots(n, mu=0): - """[x,w] = he_roots(n) - - Returns the roots (x) of the nth order Hermite polynomial, - He_n(x), and weights (w) to use in Gaussian Quadrature over - [-inf,inf] with weighting function exp(-(x/2)**2). - """ - if n < 1: - raise ValueError("n must be positive.") - - sbn_He = lambda k: sqrt(k) # from recurrence relation - an_He = lambda k: 0*k - mu0 = sqrt(2*pi) # integral of weight over interval - val = gen_roots_and_weights(n,an_He,sbn_He,mu0) - if mu: - return val + [mu0] - else: - return val - -def hermitenorm(n, monic=0): - """Return the nth order normalized Hermite polynomial, He_n(x), orthogonal - over (-inf,inf) with weighting function exp(-(x/2)**2) - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - if n==0: n1 = n+1 - else: n1 = n - x,w,mu0 = he_roots(n1,mu=1) - wfunc = lambda x: exp(-x*x/4.0) - if n==0: x,w = [],[] - hn = sqrt(2*pi)*_gam(n+1) - kn = 1.0 - p = orthopoly1d(x,w,hn,kn,wfunc=wfunc,limits=(-inf,inf),monic=monic, - eval_func=lambda x: eval_hermitenorm(n,x)) - return p - -## The remainder of the polynomials can be derived from the ones above. - -# Ultraspherical (Gegenbauer) C^(alpha)_n(x) -def cg_roots(n, alpha, mu=0): - """[x,w] = cg_roots(n,alpha) - - Returns the roots (x) of the nth order Ultraspherical (Gegenbauer) - polynomial, C^(alpha)_n(x), and weights (w) to use in Gaussian Quadrature - over [-1,1] with weighting function (1-x**2)**(alpha-1/2) with alpha>-1/2. - """ - return j_roots(n,alpha-0.5,alpha-0.5,mu=mu) - -def gegenbauer(n, alpha, monic=0): - """Return the nth order Gegenbauer (ultraspherical) polynomial, - C^(alpha)_n(x), orthogonal over [-1,1] with weighting function - (1-x**2)**(alpha-1/2) with alpha > -1/2 - """ - base = jacobi(n,alpha-0.5,alpha-0.5,monic=monic) - if monic: - return base - # Abrahmowitz and Stegan 22.5.20 - factor = _gam(2*alpha+n)*_gam(alpha+0.5) / _gam(2*alpha) / _gam(alpha+0.5+n) - base._scale(factor) - return base - -# Chebyshev of the first kind: T_n(x) = n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x) -# Computed anew. -def t_roots(n, mu=0): - """[x,w] = t_roots(n) - - Returns the roots (x) of the nth order Chebyshev (of the first kind) - polynomial, T_n(x), and weights (w) to use in Gaussian Quadrature - over [-1,1] with weighting function (1-x**2)**(-1/2). - """ - if n < 1: - raise ValueError("n must be positive.") - - # from recurrence relation - sbn_J = lambda k: np.where(k==1,sqrt(2)/2.0,0.5) - an_J = lambda k: 0.0*k - g = cephes.gamma - mu0 = pi - val = gen_roots_and_weights(n,an_J,sbn_J,mu0) - if mu: - return val + [mu0] - else: - return val - -def chebyt(n, monic=0): - """Return nth order Chebyshev polynomial of first kind, Tn(x). Orthogonal - over [-1,1] with weight function (1-x**2)**(-1/2). - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - wfunc = lambda x: 1.0/sqrt(1-x*x) - if n==0: - return orthopoly1d([],[],pi,1.0,wfunc,(-1,1),monic, - lambda x: eval_chebyt(n,x)) - n1 = n - x,w,mu = t_roots(n1,mu=1) - hn = pi/2 - kn = 2**(n-1) - p = orthopoly1d(x,w,hn,kn,wfunc,(-1,1),monic, - lambda x: eval_chebyt(n,x)) - return p - -# Chebyshev of the second kind -# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x) -def u_roots(n, mu=0): - """[x,w] = u_roots(n) - - Returns the roots (x) of the nth order Chebyshev (of the second kind) - polynomial, U_n(x), and weights (w) to use in Gaussian Quadrature - over [-1,1] with weighting function (1-x**2)**1/2. - """ - return j_roots(n,0.5,0.5,mu=mu) - -def chebyu(n, monic=0): - """Return nth order Chebyshev polynomial of second kind, Un(x). Orthogonal - over [-1,1] with weight function (1-x**2)**(1/2). - """ - base = jacobi(n,0.5,0.5,monic=monic) - if monic: - return base - factor = sqrt(pi)/2.0*_gam(n+2) / _gam(n+1.5) - base._scale(factor) - return base - -# Chebyshev of the first kind C_n(x) -def c_roots(n, mu=0): - """[x,w] = c_roots(n) - - Returns the roots (x) of the nth order Chebyshev (of the first kind) - polynomial, C_n(x), and weights (w) to use in Gaussian Quadrature - over [-2,2] with weighting function (1-(x/2)**2)**(-1/2). - """ - if mu: - [x,w,mu0] = j_roots(n,-0.5,-0.5,mu=1) - return [x*2,w,mu0] - else: - [x,w] = j_roots(n,-0.5,-0.5,mu=0) - return [x*2,w] - -def chebyc(n, monic=0): - """Return nth order Chebyshev polynomial of first kind, Cn(x). Orthogonal - over [-2,2] with weight function (1-(x/2)**2)**(-1/2). - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - if n==0: n1 = n+1 - else: n1 = n - x,w,mu0 = c_roots(n1,mu=1) - if n==0: x,w = [],[] - hn = 4*pi * ((n==0)+1) - kn = 1.0 - p = orthopoly1d(x,w,hn,kn,wfunc=lambda x: 1.0/sqrt(1-x*x/4.0),limits=(-2,2),monic=monic) - if not monic: - p._scale(2.0/p(2)) - p.__dict__['_eval_func'] = lambda x: eval_chebyc(n,x) - return p - -# Chebyshev of the second kind S_n(x) -def s_roots(n, mu=0): - """[x,w] = s_roots(n) - - Returns the roots (x) of the nth order Chebyshev (of the second kind) - polynomial, S_n(x), and weights (w) to use in Gaussian Quadrature - over [-2,2] with weighting function (1-(x/2)**2)**1/2. - """ - if mu: - [x,w,mu0] = j_roots(n,0.5,0.5,mu=1) - return [x*2,w,mu0] - else: - [x,w] = j_roots(n,0.5,0.5,mu=0) - return [x*2,w] - -def chebys(n, monic=0): - """Return nth order Chebyshev polynomial of second kind, Sn(x). Orthogonal - over [-2,2] with weight function (1-(x/)**2)**(1/2). - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - if n==0: n1 = n+1 - else: n1 = n - x,w,mu0 = s_roots(n1,mu=1) - if n==0: x,w = [],[] - hn = pi - kn = 1.0 - p = orthopoly1d(x,w,hn,kn,wfunc=lambda x: sqrt(1-x*x/4.0),limits=(-2,2),monic=monic) - if not monic: - factor = (n+1.0)/p(2) - p._scale(factor) - p.__dict__['_eval_func'] = lambda x: eval_chebys(n,x) - return p - -# Shifted Chebyshev of the first kind T^*_n(x) -def ts_roots(n, mu=0): - """[x,w] = ts_roots(n) - - Returns the roots (x) of the nth order shifted Chebyshev (of the first kind) - polynomial, T^*_n(x), and weights (w) to use in Gaussian Quadrature - over [0,1] with weighting function (x-x**2)**(-1/2). - """ - return js_roots(n,0.0,0.5,mu=mu) - -def sh_chebyt(n, monic=0): - """Return nth order shifted Chebyshev polynomial of first kind, Tn(x). - Orthogonal over [0,1] with weight function (x-x**2)**(-1/2). - """ - base = sh_jacobi(n,0.0,0.5,monic=monic) - if monic: - return base - if n > 0: - factor = 4**n / 2.0 - else: - factor = 1.0 - base._scale(factor) - return base - - -# Shifted Chebyshev of the second kind U^*_n(x) -def us_roots(n, mu=0): - """[x,w] = us_roots(n) - - Returns the roots (x) of the nth order shifted Chebyshev (of the second kind) - polynomial, U^*_n(x), and weights (w) to use in Gaussian Quadrature - over [0,1] with weighting function (x-x**2)**1/2. - """ - return js_roots(n,2.0,1.5,mu=mu) - -def sh_chebyu(n, monic=0): - """Return nth order shifted Chebyshev polynomial of second kind, Un(x). - Orthogonal over [0,1] with weight function (x-x**2)**(1/2). - """ - base = sh_jacobi(n,2.0,1.5,monic=monic) - if monic: return base - factor = 4**n - base._scale(factor) - return base - -# Legendre -def p_roots(n, mu=0): - """[x,w] = p_roots(n) - - Returns the roots (x) of the nth order Legendre polynomial, P_n(x), - and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting - function 1. - """ - return j_roots(n,0.0,0.0,mu=mu) - -def legendre(n, monic=0): - """Returns the nth order Legendre polynomial, P_n(x), orthogonal over - [-1,1] with weight function 1. - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - if n==0: n1 = n+1 - else: n1 = n - x,w,mu0 = p_roots(n1,mu=1) - if n==0: x,w = [],[] - hn = 2.0/(2*n+1) - kn = _gam(2*n+1)/_gam(n+1)**2 / 2.0**n - p = orthopoly1d(x,w,hn,kn,wfunc=lambda x: 1.0,limits=(-1,1),monic=monic, - eval_func=lambda x: eval_legendre(n,x)) - return p - -# Shifted Legendre P^*_n(x) -def ps_roots(n, mu=0): - """[x,w] = ps_roots(n) - - Returns the roots (x) of the nth order shifted Legendre polynomial, P^*_n(x), - and weights (w) to use in Gaussian Quadrature over [0,1] with weighting - function 1. - """ - return js_roots(n,1.0,1.0,mu=mu) - -def sh_legendre(n, monic=0): - """Returns the nth order shifted Legendre polynomial, P^*_n(x), orthogonal - over [0,1] with weighting function 1. - """ - if n < 0: - raise ValueError("n must be nonnegative.") - - wfunc = lambda x: 0.0*x + 1.0 - if n==0: return orthopoly1d([],[],1.0,1.0,wfunc,(0,1),monic, - lambda x: eval_sh_legendre(n,x)) - x,w,mu0 = ps_roots(n,mu=1) - hn = 1.0/(2*n+1.0) - kn = _gam(2*n+1)/_gam(n+1)**2 - p = orthopoly1d(x,w,hn,kn,wfunc,limits=(0,1),monic=monic, - eval_func=lambda x: eval_sh_legendre(n,x)) - return p - -#------------------------------------------------------------------------------ -# Vectorized functions for evaluation -#------------------------------------------------------------------------------ -from orthogonal_eval import \ - binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer, eval_chebyt, \ - eval_chebyu, eval_chebys, eval_chebyc, eval_sh_chebyt, eval_sh_chebyu, \ - eval_legendre, eval_sh_legendre, eval_genlaguerre, eval_laguerre, \ - eval_hermite, eval_hermitenorm diff --git a/scipy-0.10.1/scipy/special/orthogonal_eval.c b/scipy-0.10.1/scipy/special/orthogonal_eval.c deleted file mode 100644 index 32f8e1bc85..0000000000 --- a/scipy-0.10.1/scipy/special/orthogonal_eval.c +++ /dev/null @@ -1,5436 +0,0 @@ -/* Generated by Cython 0.15 on Tue Nov 1 18:20:03 2011 */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#else - -#include /* For offsetof */ -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__special__orthogonal_eval -#define __PYX_HAVE_API__scipy__special__orthogonal_eval -#include "math.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - -/* inline attribute */ -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -/* unused attribute */ -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ - - -/* Type Conversion Predeclarations */ - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - /* Test for GCC > 2.95 */ - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else /* __GNUC__ > 2 ... */ - #define likely(x) (x) - #define unlikely(x) (x) - #endif /* __GNUC__ > 2 ... */ -#else /* __GNUC__ */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -static const char *__pyx_f[] = { - "orthogonal_eval.pyx", -}; - -/*--- Type declarations ---*/ - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); /*proto*/ - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_intp(npy_intp); - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static int __Pyx_check_binary_version(void); - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -/* Module declarations from 'scipy.special.orthogonal_eval' */ -static char __pyx_v_5scipy_7special_15orthogonal_eval__id_d_types[3]; -static PyUFuncGenericFunction __pyx_v_5scipy_7special_15orthogonal_eval__id_d_funcs[1]; -static void *__pyx_v_5scipy_7special_15orthogonal_eval_chebyt_data[1]; -static double __pyx_f_5scipy_7special_15orthogonal_eval_eval_poly_chebyt(long, double); /*proto*/ -static void __pyx_f_5scipy_7special_15orthogonal_eval__loop_id_d(char **, npy_intp *, npy_intp *, void *); /*proto*/ -#define __Pyx_MODULE_NAME "scipy.special.orthogonal_eval" -int __pyx_module_is_main_scipy__special__orthogonal_eval = 0; - -/* Implementation of 'scipy.special.orthogonal_eval' */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_ValueError; -static char __pyx_k_1[] = "Order must be integer"; -static char __pyx_k_3[] = "\nEvaluate orthogonal polynomial values using recurrence relations\nor by calling special functions.\n\nReferences\n----------\n\n.. [AMS55] Abramowitz & Stegun, Section 22.5.\n\n.. [MH] Mason & Handscombe, Chebyshev Polynomials, CRC Press (2003).\n\n"; -static char __pyx_k_4[] = ""; -static char __pyx_k_5[] = "scipy.special._cephes"; -static char __pyx_k_6[] = "scipy.special.orthogonal_eval"; -static char __pyx_k__k[] = "k"; -static char __pyx_k__n[] = "n"; -static char __pyx_k__p[] = "p"; -static char __pyx_k__q[] = "q"; -static char __pyx_k__x[] = "x"; -static char __pyx_k__np[] = "np"; -static char __pyx_k__any[] = "any"; -static char __pyx_k__exp[] = "exp"; -static char __pyx_k__out[] = "out"; -static char __pyx_k__beta[] = "beta"; -static char __pyx_k__alpha[] = "alpha"; -static char __pyx_k__binom[] = "binom"; -static char __pyx_k__gamma[] = "gamma"; -static char __pyx_k__numpy[] = "numpy"; -static char __pyx_k__range[] = "range"; -static char __pyx_k__hyp1f1[] = "hyp1f1"; -static char __pyx_k__hyp2f1[] = "hyp2f1"; -static char __pyx_k__gammaln[] = "gammaln"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k__ValueError[] = "ValueError"; -static char __pyx_k__atleast_1d[] = "atleast_1d"; -static char __pyx_k__zeros_like[] = "zeros_like"; -static char __pyx_k__eval_chebyc[] = "eval_chebyc"; -static char __pyx_k__eval_chebys[] = "eval_chebys"; -static char __pyx_k__eval_chebyt[] = "eval_chebyt"; -static char __pyx_k__eval_chebyu[] = "eval_chebyu"; -static char __pyx_k__eval_jacobi[] = "eval_jacobi"; -static char __pyx_k___eval_chebyt[] = "_eval_chebyt"; -static char __pyx_k__eval_hermite[] = "eval_hermite"; -static char __pyx_k__eval_laguerre[] = "eval_laguerre"; -static char __pyx_k__eval_legendre[] = "eval_legendre"; -static char __pyx_k__eval_sh_chebyt[] = "eval_sh_chebyt"; -static char __pyx_k__eval_sh_chebyu[] = "eval_sh_chebyu"; -static char __pyx_k__eval_sh_jacobi[] = "eval_sh_jacobi"; -static char __pyx_k__eval_gegenbauer[] = "eval_gegenbauer"; -static char __pyx_k__broadcast_arrays[] = "broadcast_arrays"; -static char __pyx_k__eval_genlaguerre[] = "eval_genlaguerre"; -static char __pyx_k__eval_hermitenorm[] = "eval_hermitenorm"; -static char __pyx_k__eval_sh_legendre[] = "eval_sh_legendre"; -static PyObject *__pyx_kp_s_1; -static PyObject *__pyx_n_s_5; -static PyObject *__pyx_n_s_6; -static PyObject *__pyx_n_s__ValueError; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s___eval_chebyt; -static PyObject *__pyx_n_s__alpha; -static PyObject *__pyx_n_s__any; -static PyObject *__pyx_n_s__atleast_1d; -static PyObject *__pyx_n_s__beta; -static PyObject *__pyx_n_s__binom; -static PyObject *__pyx_n_s__broadcast_arrays; -static PyObject *__pyx_n_s__eval_chebyc; -static PyObject *__pyx_n_s__eval_chebys; -static PyObject *__pyx_n_s__eval_chebyt; -static PyObject *__pyx_n_s__eval_chebyu; -static PyObject *__pyx_n_s__eval_gegenbauer; -static PyObject *__pyx_n_s__eval_genlaguerre; -static PyObject *__pyx_n_s__eval_hermite; -static PyObject *__pyx_n_s__eval_hermitenorm; -static PyObject *__pyx_n_s__eval_jacobi; -static PyObject *__pyx_n_s__eval_laguerre; -static PyObject *__pyx_n_s__eval_legendre; -static PyObject *__pyx_n_s__eval_sh_chebyt; -static PyObject *__pyx_n_s__eval_sh_chebyu; -static PyObject *__pyx_n_s__eval_sh_jacobi; -static PyObject *__pyx_n_s__eval_sh_legendre; -static PyObject *__pyx_n_s__exp; -static PyObject *__pyx_n_s__gamma; -static PyObject *__pyx_n_s__gammaln; -static PyObject *__pyx_n_s__hyp1f1; -static PyObject *__pyx_n_s__hyp2f1; -static PyObject *__pyx_n_s__k; -static PyObject *__pyx_n_s__n; -static PyObject *__pyx_n_s__np; -static PyObject *__pyx_n_s__numpy; -static PyObject *__pyx_n_s__out; -static PyObject *__pyx_n_s__p; -static PyObject *__pyx_n_s__q; -static PyObject *__pyx_n_s__range; -static PyObject *__pyx_n_s__x; -static PyObject *__pyx_n_s__zeros_like; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_2; -static PyObject *__pyx_int_neg_1; -static PyObject *__pyx_k_tuple_2; - -/* "scipy/special/orthogonal_eval.pyx":25 - * double sqrt(double x) nogil - * - * cdef double eval_poly_chebyt(long k, double x) nogil: # <<<<<<<<<<<<<< - * # Use Chebyshev T recurrence directly, see [MH] - * cdef long m - */ - -static double __pyx_f_5scipy_7special_15orthogonal_eval_eval_poly_chebyt(long __pyx_v_k, double __pyx_v_x) { - long __pyx_v_m; - double __pyx_v_b2; - double __pyx_v_b1; - double __pyx_v_b0; - double __pyx_r; - long __pyx_t_1; - long __pyx_t_2; - - /* "scipy/special/orthogonal_eval.pyx":30 - * cdef double b2, b1, b0 - * - * b2 = 0 # <<<<<<<<<<<<<< - * b1 = -1 - * b0 = 0 - */ - __pyx_v_b2 = 0.0; - - /* "scipy/special/orthogonal_eval.pyx":31 - * - * b2 = 0 - * b1 = -1 # <<<<<<<<<<<<<< - * b0 = 0 - * x = 2*x - */ - __pyx_v_b1 = -1.0; - - /* "scipy/special/orthogonal_eval.pyx":32 - * b2 = 0 - * b1 = -1 - * b0 = 0 # <<<<<<<<<<<<<< - * x = 2*x - * for m in range(k+1): - */ - __pyx_v_b0 = 0.0; - - /* "scipy/special/orthogonal_eval.pyx":33 - * b1 = -1 - * b0 = 0 - * x = 2*x # <<<<<<<<<<<<<< - * for m in range(k+1): - * b2 = b1 - */ - __pyx_v_x = (2.0 * __pyx_v_x); - - /* "scipy/special/orthogonal_eval.pyx":34 - * b0 = 0 - * x = 2*x - * for m in range(k+1): # <<<<<<<<<<<<<< - * b2 = b1 - * b1 = b0 - */ - __pyx_t_1 = (__pyx_v_k + 1); - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_m = __pyx_t_2; - - /* "scipy/special/orthogonal_eval.pyx":35 - * x = 2*x - * for m in range(k+1): - * b2 = b1 # <<<<<<<<<<<<<< - * b1 = b0 - * b0 = x*b1 - b2 - */ - __pyx_v_b2 = __pyx_v_b1; - - /* "scipy/special/orthogonal_eval.pyx":36 - * for m in range(k+1): - * b2 = b1 - * b1 = b0 # <<<<<<<<<<<<<< - * b0 = x*b1 - b2 - * return (b0 - b2)/2.0 - */ - __pyx_v_b1 = __pyx_v_b0; - - /* "scipy/special/orthogonal_eval.pyx":37 - * b2 = b1 - * b1 = b0 - * b0 = x*b1 - b2 # <<<<<<<<<<<<<< - * return (b0 - b2)/2.0 - * - */ - __pyx_v_b0 = ((__pyx_v_x * __pyx_v_b1) - __pyx_v_b2); - } - - /* "scipy/special/orthogonal_eval.pyx":38 - * b1 = b0 - * b0 = x*b1 - b2 - * return (b0 - b2)/2.0 # <<<<<<<<<<<<<< - * - * #------------------------------------------------------------------------------ - */ - __pyx_r = ((__pyx_v_b0 - __pyx_v_b2) / 2.0); - goto __pyx_L0; - - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":58 - * int identity, char* name, char* doc, int c) - * - * cdef void _loop_id_d(char **args, npy_intp *dimensions, npy_intp *steps, # <<<<<<<<<<<<<< - * void *func) nogil: - * cdef int i - */ - -static void __pyx_f_5scipy_7special_15orthogonal_eval__loop_id_d(char **__pyx_v_args, npy_intp *__pyx_v_dimensions, npy_intp *__pyx_v_steps, void *__pyx_v_func) { - int __pyx_v_i; - char *__pyx_v_ip1; - char *__pyx_v_ip2; - char *__pyx_v_op; - npy_intp __pyx_t_1; - int __pyx_t_2; - - /* "scipy/special/orthogonal_eval.pyx":62 - * cdef int i - * cdef double x - * cdef char *ip1=args[0], *ip2=args[1], *op=args[2] # <<<<<<<<<<<<<< - * for i in range(0, dimensions[0]): - * (op)[0] = (func)( - */ - __pyx_v_ip1 = (__pyx_v_args[0]); - __pyx_v_ip2 = (__pyx_v_args[1]); - __pyx_v_op = (__pyx_v_args[2]); - - /* "scipy/special/orthogonal_eval.pyx":63 - * cdef double x - * cdef char *ip1=args[0], *ip2=args[1], *op=args[2] - * for i in range(0, dimensions[0]): # <<<<<<<<<<<<<< - * (op)[0] = (func)( - * (ip1)[0], (ip2)[0]) - */ - __pyx_t_1 = (__pyx_v_dimensions[0]); - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_v_i = __pyx_t_2; - - /* "scipy/special/orthogonal_eval.pyx":64 - * cdef char *ip1=args[0], *ip2=args[1], *op=args[2] - * for i in range(0, dimensions[0]): - * (op)[0] = (func)( # <<<<<<<<<<<<<< - * (ip1)[0], (ip2)[0]) - * ip1 += steps[0]; ip2 += steps[1]; op += steps[2] - */ - (((double *)__pyx_v_op)[0]) = ((double (*)(long, double))__pyx_v_func)((((long *)__pyx_v_ip1)[0]), (((double *)__pyx_v_ip2)[0])); - - /* "scipy/special/orthogonal_eval.pyx":66 - * (op)[0] = (func)( - * (ip1)[0], (ip2)[0]) - * ip1 += steps[0]; ip2 += steps[1]; op += steps[2] # <<<<<<<<<<<<<< - * - * cdef char _id_d_types[3] - */ - __pyx_v_ip1 = (__pyx_v_ip1 + (__pyx_v_steps[0])); - __pyx_v_ip2 = (__pyx_v_ip2 + (__pyx_v_steps[1])); - __pyx_v_op = (__pyx_v_op + (__pyx_v_steps[2])); - } - -} - -/* "scipy/special/orthogonal_eval.pyx":97 - * from numpy import exp - * - * def binom(n, k): # <<<<<<<<<<<<<< - * """Binomial coefficient""" - * return np.exp(gammaln(1+n) - gammaln(1+k) - gammaln(1+n-k)) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_binom(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_binom[] = "Binomial coefficient"; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_binom = {__Pyx_NAMESTR("binom"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_binom, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_binom)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_binom(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_k = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__k,0}; - __Pyx_RefNannySetupContext("binom"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__k); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("binom", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "binom") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_k = values[1]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_k = PyTuple_GET_ITEM(__pyx_args, 1); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("binom", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.binom", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":99 - * def binom(n, k): - * """Binomial coefficient""" - * return np.exp(gammaln(1+n) - gammaln(1+k) - gammaln(1+n-k)) # <<<<<<<<<<<<<< - * - * def eval_jacobi(n, alpha, beta, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__exp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__gammaln); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Add(__pyx_int_1, __pyx_v_n); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__gammaln); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyNumber_Add(__pyx_int_1, __pyx_v_k); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Subtract(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__gammaln); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Add(__pyx_int_1, __pyx_v_n); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyNumber_Subtract(__pyx_t_3, __pyx_v_k); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.binom", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":101 - * return np.exp(gammaln(1+n) - gammaln(1+k) - gammaln(1+n-k)) - * - * def eval_jacobi(n, alpha, beta, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Jacobi polynomial at a point.""" - * d = binom(n+alpha, n) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_1eval_jacobi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_1eval_jacobi[] = "Evaluate Jacobi polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_1eval_jacobi = {__Pyx_NAMESTR("eval_jacobi"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_1eval_jacobi, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_1eval_jacobi)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_1eval_jacobi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_alpha = 0; - PyObject *__pyx_v_beta = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_v_d = NULL; - PyObject *__pyx_v_a = NULL; - PyObject *__pyx_v_b = NULL; - PyObject *__pyx_v_c = NULL; - PyObject *__pyx_v_g = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__alpha,&__pyx_n_s__beta,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_jacobi"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[5] = {0,0,0,0,0}; - values[4] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_jacobi", 0, 4, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__beta); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_jacobi", 0, 4, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[3])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_jacobi", 0, 4, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 4: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_jacobi") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_alpha = values[1]; - __pyx_v_beta = values[2]; - __pyx_v_x = values[3]; - __pyx_v_out = values[4]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 3); - __pyx_v_beta = PyTuple_GET_ITEM(__pyx_args, 2); - __pyx_v_alpha = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_jacobi", 0, 4, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_jacobi", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":103 - * def eval_jacobi(n, alpha, beta, x, out=None): - * """Evaluate Jacobi polynomial at a point.""" - * d = binom(n+alpha, n) # <<<<<<<<<<<<<< - * a = -n - * b = n + alpha + beta + 1 - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__binom); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Add(__pyx_v_n, __pyx_v_alpha); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_d = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":104 - * """Evaluate Jacobi polynomial at a point.""" - * d = binom(n+alpha, n) - * a = -n # <<<<<<<<<<<<<< - * b = n + alpha + beta + 1 - * c = alpha + 1 - */ - __pyx_t_2 = PyNumber_Negative(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_a = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":105 - * d = binom(n+alpha, n) - * a = -n - * b = n + alpha + beta + 1 # <<<<<<<<<<<<<< - * c = alpha + 1 - * g = (1-x)/2.0 - */ - __pyx_t_2 = PyNumber_Add(__pyx_v_n, __pyx_v_alpha); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_v_beta); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_b = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":106 - * a = -n - * b = n + alpha + beta + 1 - * c = alpha + 1 # <<<<<<<<<<<<<< - * g = (1-x)/2.0 - * return hyp2f1(a, b, c, g) * d - */ - __pyx_t_2 = PyNumber_Add(__pyx_v_alpha, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_c = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":107 - * b = n + alpha + beta + 1 - * c = alpha + 1 - * g = (1-x)/2.0 # <<<<<<<<<<<<<< - * return hyp2f1(a, b, c, g) * d - * - */ - __pyx_t_2 = PyNumber_Subtract(__pyx_int_1, __pyx_v_x); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyFloat_FromDouble(2.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_g = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":108 - * c = alpha + 1 - * g = (1-x)/2.0 - * return hyp2f1(a, b, c, g) * d # <<<<<<<<<<<<<< - * - * def eval_sh_jacobi(n, p, q, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__hyp2f1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_a); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_a); - __Pyx_GIVEREF(__pyx_v_a); - __Pyx_INCREF(__pyx_v_b); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_b); - __Pyx_GIVEREF(__pyx_v_b); - __Pyx_INCREF(__pyx_v_c); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_c); - __Pyx_GIVEREF(__pyx_v_c); - __Pyx_INCREF(__pyx_v_g); - PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_v_g); - __Pyx_GIVEREF(__pyx_v_g); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_v_d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_jacobi", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_d); - __Pyx_XDECREF(__pyx_v_a); - __Pyx_XDECREF(__pyx_v_b); - __Pyx_XDECREF(__pyx_v_c); - __Pyx_XDECREF(__pyx_v_g); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":110 - * return hyp2f1(a, b, c, g) * d - * - * def eval_sh_jacobi(n, p, q, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate shifted Jacobi polynomial at a point.""" - * factor = np.exp(gammaln(1+n) + gammaln(n+p) - gammaln(2*n+p)) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_2eval_sh_jacobi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_2eval_sh_jacobi[] = "Evaluate shifted Jacobi polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_2eval_sh_jacobi = {__Pyx_NAMESTR("eval_sh_jacobi"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_2eval_sh_jacobi, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_2eval_sh_jacobi)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_2eval_sh_jacobi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_p = 0; - PyObject *__pyx_v_q = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_v_factor = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__p,&__pyx_n_s__q,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_sh_jacobi"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[5] = {0,0,0,0,0}; - values[4] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__p); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_sh_jacobi", 0, 4, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__q); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_sh_jacobi", 0, 4, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[3])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_sh_jacobi", 0, 4, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 4: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_sh_jacobi") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_p = values[1]; - __pyx_v_q = values[2]; - __pyx_v_x = values[3]; - __pyx_v_out = values[4]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 4); - case 4: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 3); - __pyx_v_q = PyTuple_GET_ITEM(__pyx_args, 2); - __pyx_v_p = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_sh_jacobi", 0, 4, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_sh_jacobi", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":112 - * def eval_sh_jacobi(n, p, q, x, out=None): - * """Evaluate shifted Jacobi polynomial at a point.""" - * factor = np.exp(gammaln(1+n) + gammaln(n+p) - gammaln(2*n+p)) # <<<<<<<<<<<<<< - * return factor * eval_jacobi(n, p-q, q-1, 2*x-1) - * - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__exp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__gammaln); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Add(__pyx_int_1, __pyx_v_n); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__gammaln); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyNumber_Add(__pyx_v_n, __pyx_v_p); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Add(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__gammaln); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Multiply(__pyx_int_2, __pyx_v_n); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyNumber_Add(__pyx_t_3, __pyx_v_p); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_v_factor = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/special/orthogonal_eval.pyx":113 - * """Evaluate shifted Jacobi polynomial at a point.""" - * factor = np.exp(gammaln(1+n) + gammaln(n+p) - gammaln(2*n+p)) - * return factor * eval_jacobi(n, p-q, q-1, 2*x-1) # <<<<<<<<<<<<<< - * - * def eval_gegenbauer(n, alpha, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_jacobi); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyNumber_Subtract(__pyx_v_p, __pyx_v_q); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyNumber_Subtract(__pyx_v_q, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyNumber_Multiply(__pyx_int_2, __pyx_v_x); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyNumber_Subtract(__pyx_t_5, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_4 = 0; - __pyx_t_2 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Multiply(__pyx_v_factor, __pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_sh_jacobi", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_factor); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":115 - * return factor * eval_jacobi(n, p-q, q-1, 2*x-1) - * - * def eval_gegenbauer(n, alpha, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Gegenbauer polynomial at a point.""" - * d = gamma(n+2*alpha)/gamma(1+n)/gamma(2*alpha) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_3eval_gegenbauer(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_3eval_gegenbauer[] = "Evaluate Gegenbauer polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_3eval_gegenbauer = {__Pyx_NAMESTR("eval_gegenbauer"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_3eval_gegenbauer, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_3eval_gegenbauer)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_3eval_gegenbauer(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_alpha = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_v_d = NULL; - PyObject *__pyx_v_a = NULL; - PyObject *__pyx_v_b = NULL; - PyObject *__pyx_v_c = NULL; - PyObject *__pyx_v_g = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__alpha,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_gegenbauer"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[4] = {0,0,0,0}; - values[3] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_gegenbauer", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_gegenbauer", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[3] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_gegenbauer") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_alpha = values[1]; - __pyx_v_x = values[2]; - __pyx_v_out = values[3]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 2); - __pyx_v_alpha = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_gegenbauer", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_gegenbauer", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":117 - * def eval_gegenbauer(n, alpha, x, out=None): - * """Evaluate Gegenbauer polynomial at a point.""" - * d = gamma(n+2*alpha)/gamma(1+n)/gamma(2*alpha) # <<<<<<<<<<<<<< - * a = -n - * b = n + 2*alpha - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__gamma); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_v_alpha); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Add(__pyx_v_n, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__gamma); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyNumber_Add(__pyx_int_1, __pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__gamma); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Multiply(__pyx_int_2, __pyx_v_alpha); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_d = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":118 - * """Evaluate Gegenbauer polynomial at a point.""" - * d = gamma(n+2*alpha)/gamma(1+n)/gamma(2*alpha) - * a = -n # <<<<<<<<<<<<<< - * b = n + 2*alpha - * c = alpha + 0.5 - */ - __pyx_t_2 = PyNumber_Negative(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_a = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":119 - * d = gamma(n+2*alpha)/gamma(1+n)/gamma(2*alpha) - * a = -n - * b = n + 2*alpha # <<<<<<<<<<<<<< - * c = alpha + 0.5 - * g = (1-x)/2.0 - */ - __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_v_alpha); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Add(__pyx_v_n, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_b = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/special/orthogonal_eval.pyx":120 - * a = -n - * b = n + 2*alpha - * c = alpha + 0.5 # <<<<<<<<<<<<<< - * g = (1-x)/2.0 - * return hyp2f1(a, b, c, g) * d - */ - __pyx_t_3 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyNumber_Add(__pyx_v_alpha, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_c = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":121 - * b = n + 2*alpha - * c = alpha + 0.5 - * g = (1-x)/2.0 # <<<<<<<<<<<<<< - * return hyp2f1(a, b, c, g) * d - * - */ - __pyx_t_2 = PyNumber_Subtract(__pyx_int_1, __pyx_v_x); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyFloat_FromDouble(2.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_g = __pyx_t_4; - __pyx_t_4 = 0; - - /* "scipy/special/orthogonal_eval.pyx":122 - * c = alpha + 0.5 - * g = (1-x)/2.0 - * return hyp2f1(a, b, c, g) * d # <<<<<<<<<<<<<< - * - * def eval_chebyt(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__hyp2f1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_a); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_a); - __Pyx_GIVEREF(__pyx_v_a); - __Pyx_INCREF(__pyx_v_b); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_b); - __Pyx_GIVEREF(__pyx_v_b); - __Pyx_INCREF(__pyx_v_c); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_c); - __Pyx_GIVEREF(__pyx_v_c); - __Pyx_INCREF(__pyx_v_g); - PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_v_g); - __Pyx_GIVEREF(__pyx_v_g); - __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_v_d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_gegenbauer", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_d); - __Pyx_XDECREF(__pyx_v_a); - __Pyx_XDECREF(__pyx_v_b); - __Pyx_XDECREF(__pyx_v_c); - __Pyx_XDECREF(__pyx_v_g); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":124 - * return hyp2f1(a, b, c, g) * d - * - * def eval_chebyt(n, x, out=None): # <<<<<<<<<<<<<< - * """ - * Evaluate Chebyshev T polynomial at a point. - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_4eval_chebyt(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_4eval_chebyt[] = "\n Evaluate Chebyshev T polynomial at a point.\n\n This routine is numerically stable for `x` in ``[-1, 1]`` at least\n up to order ``10000``.\n "; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_4eval_chebyt = {__Pyx_NAMESTR("eval_chebyt"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_4eval_chebyt, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_4eval_chebyt)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_4eval_chebyt(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_chebyt"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_chebyt", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_chebyt") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_chebyt", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_chebyt", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":131 - * up to order ``10000``. - * """ - * return _eval_chebyt(n, x, out) # <<<<<<<<<<<<<< - * - * def eval_chebyu(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___eval_chebyt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __Pyx_INCREF(__pyx_v_out); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_out); - __Pyx_GIVEREF(__pyx_v_out); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_chebyt", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":133 - * return _eval_chebyt(n, x, out) - * - * def eval_chebyu(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Chebyshev U polynomial at a point.""" - * d = n+1 - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_5eval_chebyu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_5eval_chebyu[] = "Evaluate Chebyshev U polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_5eval_chebyu = {__Pyx_NAMESTR("eval_chebyu"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_5eval_chebyu, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_5eval_chebyu)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_5eval_chebyu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_v_d = NULL; - PyObject *__pyx_v_a = NULL; - PyObject *__pyx_v_b = NULL; - double __pyx_v_c; - PyObject *__pyx_v_g = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_chebyu"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_chebyu", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_chebyu") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_chebyu", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_chebyu", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":135 - * def eval_chebyu(n, x, out=None): - * """Evaluate Chebyshev U polynomial at a point.""" - * d = n+1 # <<<<<<<<<<<<<< - * a = -n - * b = n+2 - */ - __pyx_t_1 = PyNumber_Add(__pyx_v_n, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_d = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":136 - * """Evaluate Chebyshev U polynomial at a point.""" - * d = n+1 - * a = -n # <<<<<<<<<<<<<< - * b = n+2 - * c = 1.5 - */ - __pyx_t_1 = PyNumber_Negative(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_a = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":137 - * d = n+1 - * a = -n - * b = n+2 # <<<<<<<<<<<<<< - * c = 1.5 - * g = (1-x)/2.0 - */ - __pyx_t_1 = PyNumber_Add(__pyx_v_n, __pyx_int_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_b = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":138 - * a = -n - * b = n+2 - * c = 1.5 # <<<<<<<<<<<<<< - * g = (1-x)/2.0 - * return hyp2f1(a, b, c, g) * d - */ - __pyx_v_c = 1.5; - - /* "scipy/special/orthogonal_eval.pyx":139 - * b = n+2 - * c = 1.5 - * g = (1-x)/2.0 # <<<<<<<<<<<<<< - * return hyp2f1(a, b, c, g) * d - * - */ - __pyx_t_1 = PyNumber_Subtract(__pyx_int_1, __pyx_v_x); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyFloat_FromDouble(2.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_g = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/special/orthogonal_eval.pyx":140 - * c = 1.5 - * g = (1-x)/2.0 - * return hyp2f1(a, b, c, g) * d # <<<<<<<<<<<<<< - * - * def eval_chebys(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__hyp2f1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_c); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_a); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_a); - __Pyx_GIVEREF(__pyx_v_a); - __Pyx_INCREF(__pyx_v_b); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_b); - __Pyx_GIVEREF(__pyx_v_b); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_g); - PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_g); - __Pyx_GIVEREF(__pyx_v_g); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_t_2, __pyx_v_d); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_chebyu", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_d); - __Pyx_XDECREF(__pyx_v_a); - __Pyx_XDECREF(__pyx_v_b); - __Pyx_XDECREF(__pyx_v_g); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":142 - * return hyp2f1(a, b, c, g) * d - * - * def eval_chebys(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Chebyshev S polynomial at a point.""" - * return eval_chebyu(n, x/2, out=out) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_6eval_chebys(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_6eval_chebys[] = "Evaluate Chebyshev S polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_6eval_chebys = {__Pyx_NAMESTR("eval_chebys"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_6eval_chebys, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_6eval_chebys)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_6eval_chebys(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_chebys"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_chebys", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_chebys") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_chebys", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_chebys", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":144 - * def eval_chebys(n, x, out=None): - * """Evaluate Chebyshev S polynomial at a point.""" - * return eval_chebyu(n, x/2, out=out) # <<<<<<<<<<<<<< - * - * def eval_chebyc(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_chebyu); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_v_x, __pyx_int_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__out), __pyx_v_out) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_chebys", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":146 - * return eval_chebyu(n, x/2, out=out) - * - * def eval_chebyc(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Chebyshev C polynomial at a point.""" - * return 2*eval_chebyt(n, x/2.0, out) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_7eval_chebyc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_7eval_chebyc[] = "Evaluate Chebyshev C polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_7eval_chebyc = {__Pyx_NAMESTR("eval_chebyc"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_7eval_chebyc, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_7eval_chebyc)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_7eval_chebyc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_chebyc"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_chebyc", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_chebyc") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_chebyc", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_chebyc", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":148 - * def eval_chebyc(n, x, out=None): - * """Evaluate Chebyshev C polynomial at a point.""" - * return 2*eval_chebyt(n, x/2.0, out) # <<<<<<<<<<<<<< - * - * def eval_sh_chebyt(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_chebyt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyFloat_FromDouble(2.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_v_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_out); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_out); - __Pyx_GIVEREF(__pyx_v_out); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_chebyc", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":150 - * return 2*eval_chebyt(n, x/2.0, out) - * - * def eval_sh_chebyt(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate shifted Chebyshev T polynomial at a point.""" - * return eval_chebyt(n, 2*x-1, out=out) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_8eval_sh_chebyt(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_8eval_sh_chebyt[] = "Evaluate shifted Chebyshev T polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_8eval_sh_chebyt = {__Pyx_NAMESTR("eval_sh_chebyt"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_8eval_sh_chebyt, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_8eval_sh_chebyt)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_8eval_sh_chebyt(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_sh_chebyt"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_sh_chebyt", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_sh_chebyt") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_sh_chebyt", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_sh_chebyt", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":152 - * def eval_sh_chebyt(n, x, out=None): - * """Evaluate shifted Chebyshev T polynomial at a point.""" - * return eval_chebyt(n, 2*x-1, out=out) # <<<<<<<<<<<<<< - * - * def eval_sh_chebyu(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_chebyt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_v_x); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Subtract(__pyx_t_2, __pyx_int_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__out), __pyx_v_out) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_sh_chebyt", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":154 - * return eval_chebyt(n, 2*x-1, out=out) - * - * def eval_sh_chebyu(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate shifted Chebyshev U polynomial at a point.""" - * return eval_chebyu(n, 2*x-1, out=out) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_9eval_sh_chebyu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_9eval_sh_chebyu[] = "Evaluate shifted Chebyshev U polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_9eval_sh_chebyu = {__Pyx_NAMESTR("eval_sh_chebyu"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_9eval_sh_chebyu, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_9eval_sh_chebyu)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_9eval_sh_chebyu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_sh_chebyu"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_sh_chebyu", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_sh_chebyu") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_sh_chebyu", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_sh_chebyu", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":156 - * def eval_sh_chebyu(n, x, out=None): - * """Evaluate shifted Chebyshev U polynomial at a point.""" - * return eval_chebyu(n, 2*x-1, out=out) # <<<<<<<<<<<<<< - * - * def eval_legendre(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_chebyu); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_v_x); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Subtract(__pyx_t_2, __pyx_int_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__out), __pyx_v_out) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_sh_chebyu", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":158 - * return eval_chebyu(n, 2*x-1, out=out) - * - * def eval_legendre(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Legendre polynomial at a point.""" - * d = 1 - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_10eval_legendre(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_10eval_legendre[] = "Evaluate Legendre polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_10eval_legendre = {__Pyx_NAMESTR("eval_legendre"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_10eval_legendre, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_10eval_legendre)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_10eval_legendre(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_v_d = NULL; - PyObject *__pyx_v_a = NULL; - PyObject *__pyx_v_b = NULL; - long __pyx_v_c; - PyObject *__pyx_v_g = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_legendre"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_legendre", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_legendre") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_legendre", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_legendre", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":160 - * def eval_legendre(n, x, out=None): - * """Evaluate Legendre polynomial at a point.""" - * d = 1 # <<<<<<<<<<<<<< - * a = -n - * b = n+1 - */ - __Pyx_INCREF(__pyx_int_1); - __pyx_v_d = __pyx_int_1; - - /* "scipy/special/orthogonal_eval.pyx":161 - * """Evaluate Legendre polynomial at a point.""" - * d = 1 - * a = -n # <<<<<<<<<<<<<< - * b = n+1 - * c = 1 - */ - __pyx_t_1 = PyNumber_Negative(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_a = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":162 - * d = 1 - * a = -n - * b = n+1 # <<<<<<<<<<<<<< - * c = 1 - * g = (1-x)/2.0 - */ - __pyx_t_1 = PyNumber_Add(__pyx_v_n, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_b = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":163 - * a = -n - * b = n+1 - * c = 1 # <<<<<<<<<<<<<< - * g = (1-x)/2.0 - * return hyp2f1(a, b, c, g) * d - */ - __pyx_v_c = 1; - - /* "scipy/special/orthogonal_eval.pyx":164 - * b = n+1 - * c = 1 - * g = (1-x)/2.0 # <<<<<<<<<<<<<< - * return hyp2f1(a, b, c, g) * d - * - */ - __pyx_t_1 = PyNumber_Subtract(__pyx_int_1, __pyx_v_x); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyFloat_FromDouble(2.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_g = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/special/orthogonal_eval.pyx":165 - * c = 1 - * g = (1-x)/2.0 - * return hyp2f1(a, b, c, g) * d # <<<<<<<<<<<<<< - * - * def eval_sh_legendre(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__hyp2f1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyInt_FromLong(__pyx_v_c); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_a); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_a); - __Pyx_GIVEREF(__pyx_v_a); - __Pyx_INCREF(__pyx_v_b); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_b); - __Pyx_GIVEREF(__pyx_v_b); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_g); - PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_g); - __Pyx_GIVEREF(__pyx_v_g); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_t_2, __pyx_v_d); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_legendre", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_d); - __Pyx_XDECREF(__pyx_v_a); - __Pyx_XDECREF(__pyx_v_b); - __Pyx_XDECREF(__pyx_v_g); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":167 - * return hyp2f1(a, b, c, g) * d - * - * def eval_sh_legendre(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate shifted Legendre polynomial at a point.""" - * return eval_legendre(n, 2*x-1, out=out) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_11eval_sh_legendre(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_11eval_sh_legendre[] = "Evaluate shifted Legendre polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_11eval_sh_legendre = {__Pyx_NAMESTR("eval_sh_legendre"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_11eval_sh_legendre, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_11eval_sh_legendre)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_11eval_sh_legendre(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_sh_legendre"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_sh_legendre", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_sh_legendre") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_sh_legendre", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_sh_legendre", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":169 - * def eval_sh_legendre(n, x, out=None): - * """Evaluate shifted Legendre polynomial at a point.""" - * return eval_legendre(n, 2*x-1, out=out) # <<<<<<<<<<<<<< - * - * def eval_genlaguerre(n, alpha, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_legendre); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_v_x); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Subtract(__pyx_t_2, __pyx_int_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__out), __pyx_v_out) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_sh_legendre", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":171 - * return eval_legendre(n, 2*x-1, out=out) - * - * def eval_genlaguerre(n, alpha, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate generalized Laguerre polynomial at a point.""" - * d = binom(n+alpha, n) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_12eval_genlaguerre(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_12eval_genlaguerre[] = "Evaluate generalized Laguerre polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_12eval_genlaguerre = {__Pyx_NAMESTR("eval_genlaguerre"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_12eval_genlaguerre, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_12eval_genlaguerre)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_12eval_genlaguerre(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_alpha = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_v_d = NULL; - PyObject *__pyx_v_a = NULL; - PyObject *__pyx_v_b = NULL; - PyObject *__pyx_v_g = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__alpha,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_genlaguerre"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[4] = {0,0,0,0}; - values[3] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_genlaguerre", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_genlaguerre", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[3] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_genlaguerre") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_alpha = values[1]; - __pyx_v_x = values[2]; - __pyx_v_out = values[3]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 2); - __pyx_v_alpha = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_genlaguerre", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_genlaguerre", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":173 - * def eval_genlaguerre(n, alpha, x, out=None): - * """Evaluate generalized Laguerre polynomial at a point.""" - * d = binom(n+alpha, n) # <<<<<<<<<<<<<< - * a = -n - * b = alpha + 1 - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__binom); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Add(__pyx_v_n, __pyx_v_alpha); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_d = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":174 - * """Evaluate generalized Laguerre polynomial at a point.""" - * d = binom(n+alpha, n) - * a = -n # <<<<<<<<<<<<<< - * b = alpha + 1 - * g = x - */ - __pyx_t_2 = PyNumber_Negative(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_a = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":175 - * d = binom(n+alpha, n) - * a = -n - * b = alpha + 1 # <<<<<<<<<<<<<< - * g = x - * return hyp1f1(a, b, g) * d - */ - __pyx_t_2 = PyNumber_Add(__pyx_v_alpha, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_b = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":176 - * a = -n - * b = alpha + 1 - * g = x # <<<<<<<<<<<<<< - * return hyp1f1(a, b, g) * d - * - */ - __Pyx_INCREF(__pyx_v_x); - __pyx_v_g = __pyx_v_x; - - /* "scipy/special/orthogonal_eval.pyx":177 - * b = alpha + 1 - * g = x - * return hyp1f1(a, b, g) * d # <<<<<<<<<<<<<< - * - * def eval_laguerre(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__hyp1f1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_a); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_a); - __Pyx_GIVEREF(__pyx_v_a); - __Pyx_INCREF(__pyx_v_b); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_b); - __Pyx_GIVEREF(__pyx_v_b); - __Pyx_INCREF(__pyx_v_g); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_g); - __Pyx_GIVEREF(__pyx_v_g); - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_v_d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_genlaguerre", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_d); - __Pyx_XDECREF(__pyx_v_a); - __Pyx_XDECREF(__pyx_v_b); - __Pyx_XDECREF(__pyx_v_g); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":179 - * return hyp1f1(a, b, g) * d - * - * def eval_laguerre(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Laguerre polynomial at a point.""" - * return eval_genlaguerre(n, 0., x, out=out) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_13eval_laguerre(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_13eval_laguerre[] = "Evaluate Laguerre polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_13eval_laguerre = {__Pyx_NAMESTR("eval_laguerre"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_13eval_laguerre, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_13eval_laguerre)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_13eval_laguerre(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_laguerre"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_laguerre", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_laguerre") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_laguerre", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_laguerre", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":181 - * def eval_laguerre(n, x, out=None): - * """Evaluate Laguerre polynomial at a point.""" - * return eval_genlaguerre(n, 0., x, out=out) # <<<<<<<<<<<<<< - * - * def eval_hermite(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_genlaguerre); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyFloat_FromDouble(0.); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_2 = 0; - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__out), __pyx_v_out) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_laguerre", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":183 - * return eval_genlaguerre(n, 0., x, out=out) - * - * def eval_hermite(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Hermite polynomial at a point.""" - * n, x = np.broadcast_arrays(n, x) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_14eval_hermite(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_14eval_hermite[] = "Evaluate Hermite polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_14eval_hermite = {__Pyx_NAMESTR("eval_hermite"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_14eval_hermite, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_14eval_hermite)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_14eval_hermite(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_v_even = NULL; - PyObject *__pyx_v_m = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *(*__pyx_t_5)(PyObject *); - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_hermite"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_hermite", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_hermite") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_hermite", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_hermite", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_n); - __Pyx_INCREF(__pyx_v_x); - __Pyx_INCREF(__pyx_v_out); - - /* "scipy/special/orthogonal_eval.pyx":185 - * def eval_hermite(n, x, out=None): - * """Evaluate Hermite polynomial at a point.""" - * n, x = np.broadcast_arrays(n, x) # <<<<<<<<<<<<<< - * n, x = np.atleast_1d(n, x) - * - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__broadcast_arrays); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { - PyObject* sequence = __pyx_t_3; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 2)) { - if (PyList_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = PyList_GET_ITEM(sequence, 0); - __pyx_t_2 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; - index = 0; __pyx_t_1 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_1)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_1); - index = 1; __pyx_t_2 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_2)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_2); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_4), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L7_unpacking_done; - __pyx_L6_unpacking_failed:; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_L7_unpacking_done:; - } - __Pyx_DECREF(__pyx_v_n); - __pyx_v_n = __pyx_t_1; - __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_x); - __pyx_v_x = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":186 - * """Evaluate Hermite polynomial at a point.""" - * n, x = np.broadcast_arrays(n, x) - * n, x = np.atleast_1d(n, x) # <<<<<<<<<<<<<< - * - * if out is None: - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__atleast_1d); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { - PyObject* sequence = __pyx_t_1; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 2)) { - if (PyList_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyList_GET_ITEM(sequence, 0); - __pyx_t_2 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_4 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; - index = 0; __pyx_t_3 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_3)) goto __pyx_L8_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - index = 1; __pyx_t_2 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_2)) goto __pyx_L8_unpacking_failed; - __Pyx_GOTREF(__pyx_t_2); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_4), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L9_unpacking_done; - __pyx_L8_unpacking_failed:; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_L9_unpacking_done:; - } - __Pyx_DECREF(__pyx_v_n); - __pyx_v_n = __pyx_t_3; - __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_x); - __pyx_v_x = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":188 - * n, x = np.atleast_1d(n, x) - * - * if out is None: # <<<<<<<<<<<<<< - * out = np.zeros_like(0*n + 0*x) - * if (n % 1 != 0).any(): - */ - __pyx_t_6 = (__pyx_v_out == Py_None); - if (__pyx_t_6) { - - /* "scipy/special/orthogonal_eval.pyx":189 - * - * if out is None: - * out = np.zeros_like(0*n + 0*x) # <<<<<<<<<<<<<< - * if (n % 1 != 0).any(): - * raise ValueError("Order must be integer") - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros_like); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_int_0, __pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Multiply(__pyx_int_0, __pyx_v_x); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyNumber_Add(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_out); - __pyx_v_out = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L10; - } - __pyx_L10:; - - /* "scipy/special/orthogonal_eval.pyx":190 - * if out is None: - * out = np.zeros_like(0*n + 0*x) - * if (n % 1 != 0).any(): # <<<<<<<<<<<<<< - * raise ValueError("Order must be integer") - * - */ - __pyx_t_4 = PyNumber_Remainder(__pyx_v_n, __pyx_int_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_int_0, Py_NE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - - /* "scipy/special/orthogonal_eval.pyx":191 - * out = np.zeros_like(0*n + 0*x) - * if (n % 1 != 0).any(): - * raise ValueError("Order must be integer") # <<<<<<<<<<<<<< - * - * even = (n % 2 == 0) - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L11; - } - __pyx_L11:; - - /* "scipy/special/orthogonal_eval.pyx":193 - * raise ValueError("Order must be integer") - * - * even = (n % 2 == 0) # <<<<<<<<<<<<<< - * - * m = n[even]/2 - */ - __pyx_t_3 = PyNumber_Remainder(__pyx_v_n, __pyx_int_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_int_0, Py_EQ); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_even = __pyx_t_4; - __pyx_t_4 = 0; - - /* "scipy/special/orthogonal_eval.pyx":195 - * even = (n % 2 == 0) - * - * m = n[even]/2 # <<<<<<<<<<<<<< - * out[even] = ((-1)**m * 2**(2*m) * gamma(1+m) - * * eval_genlaguerre(m, -0.5, x[even]**2)) - */ - __pyx_t_4 = PyObject_GetItem(__pyx_v_n, __pyx_v_even); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_int_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_m = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/special/orthogonal_eval.pyx":196 - * - * m = n[even]/2 - * out[even] = ((-1)**m * 2**(2*m) * gamma(1+m) # <<<<<<<<<<<<<< - * * eval_genlaguerre(m, -0.5, x[even]**2)) - * - */ - __pyx_t_3 = PyNumber_Power(__pyx_int_neg_1, __pyx_v_m, Py_None); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyNumber_Multiply(__pyx_int_2, __pyx_v_m); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyNumber_Power(__pyx_int_2, __pyx_t_4, Py_None); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__gamma); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Add(__pyx_int_1, __pyx_v_m); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "scipy/special/orthogonal_eval.pyx":197 - * m = n[even]/2 - * out[even] = ((-1)**m * 2**(2*m) * gamma(1+m) - * * eval_genlaguerre(m, -0.5, x[even]**2)) # <<<<<<<<<<<<<< - * - * m = (n[~even]-1)/2 - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_genlaguerre); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyFloat_FromDouble(-0.5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyObject_GetItem(__pyx_v_x, __pyx_v_even); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = PyNumber_Power(__pyx_t_2, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_m); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_m); - __Pyx_GIVEREF(__pyx_v_m); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_4 = 0; - __pyx_t_7 = 0; - __pyx_t_7 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_7); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "scipy/special/orthogonal_eval.pyx":196 - * - * m = n[even]/2 - * out[even] = ((-1)**m * 2**(2*m) * gamma(1+m) # <<<<<<<<<<<<<< - * * eval_genlaguerre(m, -0.5, x[even]**2)) - * - */ - if (PyObject_SetItem(__pyx_v_out, __pyx_v_even, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":199 - * * eval_genlaguerre(m, -0.5, x[even]**2)) - * - * m = (n[~even]-1)/2 # <<<<<<<<<<<<<< - * out[~even] = ((-1)**m * 2**(2*m+1) * gamma(1+m) - * * x[~even] * eval_genlaguerre(m, 0.5, x[~even]**2)) - */ - __pyx_t_2 = PyNumber_Invert(__pyx_v_even); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = PyObject_GetItem(__pyx_v_n, __pyx_t_2); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Subtract(__pyx_t_7, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_int_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_m); - __pyx_v_m = __pyx_t_7; - __pyx_t_7 = 0; - - /* "scipy/special/orthogonal_eval.pyx":200 - * - * m = (n[~even]-1)/2 - * out[~even] = ((-1)**m * 2**(2*m+1) * gamma(1+m) # <<<<<<<<<<<<<< - * * x[~even] * eval_genlaguerre(m, 0.5, x[~even]**2)) - * - */ - __pyx_t_7 = PyNumber_Power(__pyx_int_neg_1, __pyx_v_m, Py_None); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_v_m); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Power(__pyx_int_2, __pyx_t_1, Py_None); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__gamma); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = PyNumber_Add(__pyx_int_1, __pyx_v_m); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - __pyx_t_7 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_7); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "scipy/special/orthogonal_eval.pyx":201 - * m = (n[~even]-1)/2 - * out[~even] = ((-1)**m * 2**(2*m+1) * gamma(1+m) - * * x[~even] * eval_genlaguerre(m, 0.5, x[~even]**2)) # <<<<<<<<<<<<<< - * - * return out - */ - __pyx_t_7 = PyNumber_Invert(__pyx_v_even); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_1 = PyObject_GetItem(__pyx_v_x, __pyx_t_7); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = PyNumber_Multiply(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_genlaguerre); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyNumber_Invert(__pyx_v_even); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyObject_GetItem(__pyx_v_x, __pyx_t_2); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Power(__pyx_t_4, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __Pyx_INCREF(__pyx_v_m); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_m); - __Pyx_GIVEREF(__pyx_v_m); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_3 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = PyNumber_Multiply(__pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":200 - * - * m = (n[~even]-1)/2 - * out[~even] = ((-1)**m * 2**(2*m+1) * gamma(1+m) # <<<<<<<<<<<<<< - * * x[~even] * eval_genlaguerre(m, 0.5, x[~even]**2)) - * - */ - __pyx_t_2 = PyNumber_Invert(__pyx_v_even); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetItem(__pyx_v_out, __pyx_t_2, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "scipy/special/orthogonal_eval.pyx":203 - * * x[~even] * eval_genlaguerre(m, 0.5, x[~even]**2)) - * - * return out # <<<<<<<<<<<<<< - * - * def eval_hermitenorm(n, x, out=None): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_out); - __pyx_r = __pyx_v_out; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_hermite", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_even); - __Pyx_XDECREF(__pyx_v_m); - __Pyx_XDECREF(__pyx_v_n); - __Pyx_XDECREF(__pyx_v_x); - __Pyx_XDECREF(__pyx_v_out); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/special/orthogonal_eval.pyx":205 - * return out - * - * def eval_hermitenorm(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate normalized Hermite polynomial at a point.""" - * return eval_hermite(n, x/sqrt(2)) * 2**(-n/2.0) - */ - -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_15eval_hermitenorm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_5scipy_7special_15orthogonal_eval_15eval_hermitenorm[] = "Evaluate normalized Hermite polynomial at a point."; -static PyMethodDef __pyx_mdef_5scipy_7special_15orthogonal_eval_15eval_hermitenorm = {__Pyx_NAMESTR("eval_hermitenorm"), (PyCFunction)__pyx_pf_5scipy_7special_15orthogonal_eval_15eval_hermitenorm, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5scipy_7special_15orthogonal_eval_15eval_hermitenorm)}; -static PyObject *__pyx_pf_5scipy_7special_15orthogonal_eval_15eval_hermitenorm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_n = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_out = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__x,&__pyx_n_s__out,0}; - __Pyx_RefNannySetupContext("eval_hermitenorm"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("eval_hermitenorm", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__out); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "eval_hermitenorm") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_n = values[0]; - __pyx_v_x = values[1]; - __pyx_v_out = values[2]; - } else { - __pyx_v_out = ((PyObject *)Py_None); - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: - __pyx_v_out = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_n = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("eval_hermitenorm", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_hermitenorm", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/special/orthogonal_eval.pyx":207 - * def eval_hermitenorm(n, x, out=None): - * """Evaluate normalized Hermite polynomial at a point.""" - * return eval_hermite(n, x/sqrt(2)) * 2**(-n/2.0) # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__eval_hermite); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyFloat_FromDouble(sqrt(2.0)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_v_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n); - __Pyx_GIVEREF(__pyx_v_n); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Negative(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyFloat_FromDouble(2.0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Power(__pyx_int_2, __pyx_t_4, Py_None); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("scipy.special.orthogonal_eval.eval_hermitenorm", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("orthogonal_eval"), - __Pyx_DOCSTR(__pyx_k_3), /* m_doc */ - -1, /* m_size */ - __pyx_methods /* m_methods */, - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, - {&__pyx_n_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 1}, - {&__pyx_n_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 1}, - {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s___eval_chebyt, __pyx_k___eval_chebyt, sizeof(__pyx_k___eval_chebyt), 0, 0, 1, 1}, - {&__pyx_n_s__alpha, __pyx_k__alpha, sizeof(__pyx_k__alpha), 0, 0, 1, 1}, - {&__pyx_n_s__any, __pyx_k__any, sizeof(__pyx_k__any), 0, 0, 1, 1}, - {&__pyx_n_s__atleast_1d, __pyx_k__atleast_1d, sizeof(__pyx_k__atleast_1d), 0, 0, 1, 1}, - {&__pyx_n_s__beta, __pyx_k__beta, sizeof(__pyx_k__beta), 0, 0, 1, 1}, - {&__pyx_n_s__binom, __pyx_k__binom, sizeof(__pyx_k__binom), 0, 0, 1, 1}, - {&__pyx_n_s__broadcast_arrays, __pyx_k__broadcast_arrays, sizeof(__pyx_k__broadcast_arrays), 0, 0, 1, 1}, - {&__pyx_n_s__eval_chebyc, __pyx_k__eval_chebyc, sizeof(__pyx_k__eval_chebyc), 0, 0, 1, 1}, - {&__pyx_n_s__eval_chebys, __pyx_k__eval_chebys, sizeof(__pyx_k__eval_chebys), 0, 0, 1, 1}, - {&__pyx_n_s__eval_chebyt, __pyx_k__eval_chebyt, sizeof(__pyx_k__eval_chebyt), 0, 0, 1, 1}, - {&__pyx_n_s__eval_chebyu, __pyx_k__eval_chebyu, sizeof(__pyx_k__eval_chebyu), 0, 0, 1, 1}, - {&__pyx_n_s__eval_gegenbauer, __pyx_k__eval_gegenbauer, sizeof(__pyx_k__eval_gegenbauer), 0, 0, 1, 1}, - {&__pyx_n_s__eval_genlaguerre, __pyx_k__eval_genlaguerre, sizeof(__pyx_k__eval_genlaguerre), 0, 0, 1, 1}, - {&__pyx_n_s__eval_hermite, __pyx_k__eval_hermite, sizeof(__pyx_k__eval_hermite), 0, 0, 1, 1}, - {&__pyx_n_s__eval_hermitenorm, __pyx_k__eval_hermitenorm, sizeof(__pyx_k__eval_hermitenorm), 0, 0, 1, 1}, - {&__pyx_n_s__eval_jacobi, __pyx_k__eval_jacobi, sizeof(__pyx_k__eval_jacobi), 0, 0, 1, 1}, - {&__pyx_n_s__eval_laguerre, __pyx_k__eval_laguerre, sizeof(__pyx_k__eval_laguerre), 0, 0, 1, 1}, - {&__pyx_n_s__eval_legendre, __pyx_k__eval_legendre, sizeof(__pyx_k__eval_legendre), 0, 0, 1, 1}, - {&__pyx_n_s__eval_sh_chebyt, __pyx_k__eval_sh_chebyt, sizeof(__pyx_k__eval_sh_chebyt), 0, 0, 1, 1}, - {&__pyx_n_s__eval_sh_chebyu, __pyx_k__eval_sh_chebyu, sizeof(__pyx_k__eval_sh_chebyu), 0, 0, 1, 1}, - {&__pyx_n_s__eval_sh_jacobi, __pyx_k__eval_sh_jacobi, sizeof(__pyx_k__eval_sh_jacobi), 0, 0, 1, 1}, - {&__pyx_n_s__eval_sh_legendre, __pyx_k__eval_sh_legendre, sizeof(__pyx_k__eval_sh_legendre), 0, 0, 1, 1}, - {&__pyx_n_s__exp, __pyx_k__exp, sizeof(__pyx_k__exp), 0, 0, 1, 1}, - {&__pyx_n_s__gamma, __pyx_k__gamma, sizeof(__pyx_k__gamma), 0, 0, 1, 1}, - {&__pyx_n_s__gammaln, __pyx_k__gammaln, sizeof(__pyx_k__gammaln), 0, 0, 1, 1}, - {&__pyx_n_s__hyp1f1, __pyx_k__hyp1f1, sizeof(__pyx_k__hyp1f1), 0, 0, 1, 1}, - {&__pyx_n_s__hyp2f1, __pyx_k__hyp2f1, sizeof(__pyx_k__hyp2f1), 0, 0, 1, 1}, - {&__pyx_n_s__k, __pyx_k__k, sizeof(__pyx_k__k), 0, 0, 1, 1}, - {&__pyx_n_s__n, __pyx_k__n, sizeof(__pyx_k__n), 0, 0, 1, 1}, - {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, - {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, - {&__pyx_n_s__out, __pyx_k__out, sizeof(__pyx_k__out), 0, 0, 1, 1}, - {&__pyx_n_s__p, __pyx_k__p, sizeof(__pyx_k__p), 0, 0, 1, 1}, - {&__pyx_n_s__q, __pyx_k__q, sizeof(__pyx_k__q), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, - {&__pyx_n_s__zeros_like, __pyx_k__zeros_like, sizeof(__pyx_k__zeros_like), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - /* "scipy/special/orthogonal_eval.pyx":191 - * out = np.zeros_like(0*n + 0*x) - * if (n % 1 != 0).any(): - * raise ValueError("Order must be integer") # <<<<<<<<<<<<<< - * - * even = (n % 2 == 0) - */ - __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_2)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); - PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initorthogonal_eval(void); /*proto*/ -PyMODINIT_FUNC initorthogonal_eval(void) -#else -PyMODINIT_FUNC PyInit_orthogonal_eval(void); /*proto*/ -PyMODINIT_FUNC PyInit_orthogonal_eval(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_orthogonal_eval(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("orthogonal_eval"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_3), 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - /*--- Initialize various global constants etc. ---*/ - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__special__orthogonal_eval) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - /*--- Builtin init code ---*/ - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Constants init code ---*/ - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Global init code ---*/ - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - /*--- Type init code ---*/ - /*--- Type import code ---*/ - /*--- Variable import code ---*/ - /*--- Function import code ---*/ - /*--- Execution code ---*/ - - /* "scipy/special/orthogonal_eval.pyx":72 - * cdef PyUFuncGenericFunction _id_d_funcs[1] - * - * _id_d_types[0] = NPY_LONG # <<<<<<<<<<<<<< - * _id_d_types[1] = NPY_DOUBLE - * _id_d_types[2] = NPY_DOUBLE - */ - (__pyx_v_5scipy_7special_15orthogonal_eval__id_d_types[0]) = NPY_LONG; - - /* "scipy/special/orthogonal_eval.pyx":73 - * - * _id_d_types[0] = NPY_LONG - * _id_d_types[1] = NPY_DOUBLE # <<<<<<<<<<<<<< - * _id_d_types[2] = NPY_DOUBLE - * - */ - (__pyx_v_5scipy_7special_15orthogonal_eval__id_d_types[1]) = NPY_DOUBLE; - - /* "scipy/special/orthogonal_eval.pyx":74 - * _id_d_types[0] = NPY_LONG - * _id_d_types[1] = NPY_DOUBLE - * _id_d_types[2] = NPY_DOUBLE # <<<<<<<<<<<<<< - * - * _id_d_funcs[0] = _loop_id_d - */ - (__pyx_v_5scipy_7special_15orthogonal_eval__id_d_types[2]) = NPY_DOUBLE; - - /* "scipy/special/orthogonal_eval.pyx":76 - * _id_d_types[2] = NPY_DOUBLE - * - * _id_d_funcs[0] = _loop_id_d # <<<<<<<<<<<<<< - * - * import_array() - */ - (__pyx_v_5scipy_7special_15orthogonal_eval__id_d_funcs[0]) = __pyx_f_5scipy_7special_15orthogonal_eval__loop_id_d; - - /* "scipy/special/orthogonal_eval.pyx":78 - * _id_d_funcs[0] = _loop_id_d - * - * import_array() # <<<<<<<<<<<<<< - * import_ufunc() - * - */ - import_array(); - - /* "scipy/special/orthogonal_eval.pyx":79 - * - * import_array() - * import_ufunc() # <<<<<<<<<<<<<< - * - * #-- - */ - import_ufunc(); - - /* "scipy/special/orthogonal_eval.pyx":84 - * - * cdef void *chebyt_data[1] - * chebyt_data[0] = eval_poly_chebyt # <<<<<<<<<<<<<< - * _eval_chebyt = PyUFunc_FromFuncAndData(_id_d_funcs, chebyt_data, - * _id_d_types, 1, 2, 1, 0, "", "", 0) - */ - (__pyx_v_5scipy_7special_15orthogonal_eval_chebyt_data[0]) = ((void *)__pyx_f_5scipy_7special_15orthogonal_eval_eval_poly_chebyt); - - /* "scipy/special/orthogonal_eval.pyx":86 - * chebyt_data[0] = eval_poly_chebyt - * _eval_chebyt = PyUFunc_FromFuncAndData(_id_d_funcs, chebyt_data, - * _id_d_types, 1, 2, 1, 0, "", "", 0) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = PyUFunc_FromFuncAndData(__pyx_v_5scipy_7special_15orthogonal_eval__id_d_funcs, __pyx_v_5scipy_7special_15orthogonal_eval_chebyt_data, __pyx_v_5scipy_7special_15orthogonal_eval__id_d_types, 1, 2, 1, 0, __pyx_k_4, __pyx_k_4, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s___eval_chebyt, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":93 - * #------------------------------------------------------------------------------ - * - * import numpy as np # <<<<<<<<<<<<<< - * from scipy.special._cephes import gamma, hyp2f1, hyp1f1, gammaln - * from numpy import exp - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":94 - * - * import numpy as np - * from scipy.special._cephes import gamma, hyp2f1, hyp1f1, gammaln # <<<<<<<<<<<<<< - * from numpy import exp - * - */ - __pyx_t_1 = PyList_New(4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__gamma)); - PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__gamma)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__gamma)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__hyp2f1)); - PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_n_s__hyp2f1)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__hyp2f1)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__hyp1f1)); - PyList_SET_ITEM(__pyx_t_1, 2, ((PyObject *)__pyx_n_s__hyp1f1)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__hyp1f1)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__gammaln)); - PyList_SET_ITEM(__pyx_t_1, 3, ((PyObject *)__pyx_n_s__gammaln)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__gammaln)); - __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_5), ((PyObject *)__pyx_t_1), -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__gamma); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gamma, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__hyp2f1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__hyp2f1, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__hyp1f1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__hyp1f1, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__gammaln); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gammaln, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/special/orthogonal_eval.pyx":95 - * import numpy as np - * from scipy.special._cephes import gamma, hyp2f1, hyp1f1, gammaln - * from numpy import exp # <<<<<<<<<<<<<< - * - * def binom(n, k): - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__exp)); - PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s__exp)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__exp)); - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__exp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__exp, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":97 - * from numpy import exp - * - * def binom(n, k): # <<<<<<<<<<<<<< - * """Binomial coefficient""" - * return np.exp(gammaln(1+n) - gammaln(1+k) - gammaln(1+n-k)) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_binom, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__binom, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":101 - * return np.exp(gammaln(1+n) - gammaln(1+k) - gammaln(1+n-k)) - * - * def eval_jacobi(n, alpha, beta, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Jacobi polynomial at a point.""" - * d = binom(n+alpha, n) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_1eval_jacobi, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_jacobi, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":110 - * return hyp2f1(a, b, c, g) * d - * - * def eval_sh_jacobi(n, p, q, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate shifted Jacobi polynomial at a point.""" - * factor = np.exp(gammaln(1+n) + gammaln(n+p) - gammaln(2*n+p)) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_2eval_sh_jacobi, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_sh_jacobi, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":115 - * return factor * eval_jacobi(n, p-q, q-1, 2*x-1) - * - * def eval_gegenbauer(n, alpha, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Gegenbauer polynomial at a point.""" - * d = gamma(n+2*alpha)/gamma(1+n)/gamma(2*alpha) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_3eval_gegenbauer, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_gegenbauer, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":124 - * return hyp2f1(a, b, c, g) * d - * - * def eval_chebyt(n, x, out=None): # <<<<<<<<<<<<<< - * """ - * Evaluate Chebyshev T polynomial at a point. - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_4eval_chebyt, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_chebyt, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":133 - * return _eval_chebyt(n, x, out) - * - * def eval_chebyu(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Chebyshev U polynomial at a point.""" - * d = n+1 - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_5eval_chebyu, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_chebyu, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":142 - * return hyp2f1(a, b, c, g) * d - * - * def eval_chebys(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Chebyshev S polynomial at a point.""" - * return eval_chebyu(n, x/2, out=out) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_6eval_chebys, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_chebys, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":146 - * return eval_chebyu(n, x/2, out=out) - * - * def eval_chebyc(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Chebyshev C polynomial at a point.""" - * return 2*eval_chebyt(n, x/2.0, out) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_7eval_chebyc, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_chebyc, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":150 - * return 2*eval_chebyt(n, x/2.0, out) - * - * def eval_sh_chebyt(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate shifted Chebyshev T polynomial at a point.""" - * return eval_chebyt(n, 2*x-1, out=out) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_8eval_sh_chebyt, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_sh_chebyt, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":154 - * return eval_chebyt(n, 2*x-1, out=out) - * - * def eval_sh_chebyu(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate shifted Chebyshev U polynomial at a point.""" - * return eval_chebyu(n, 2*x-1, out=out) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_9eval_sh_chebyu, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_sh_chebyu, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":158 - * return eval_chebyu(n, 2*x-1, out=out) - * - * def eval_legendre(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Legendre polynomial at a point.""" - * d = 1 - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_10eval_legendre, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_legendre, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":167 - * return hyp2f1(a, b, c, g) * d - * - * def eval_sh_legendre(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate shifted Legendre polynomial at a point.""" - * return eval_legendre(n, 2*x-1, out=out) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_11eval_sh_legendre, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_sh_legendre, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":171 - * return eval_legendre(n, 2*x-1, out=out) - * - * def eval_genlaguerre(n, alpha, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate generalized Laguerre polynomial at a point.""" - * d = binom(n+alpha, n) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_12eval_genlaguerre, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_genlaguerre, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":179 - * return hyp1f1(a, b, g) * d - * - * def eval_laguerre(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Laguerre polynomial at a point.""" - * return eval_genlaguerre(n, 0., x, out=out) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_13eval_laguerre, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_laguerre, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":183 - * return eval_genlaguerre(n, 0., x, out=out) - * - * def eval_hermite(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate Hermite polynomial at a point.""" - * n, x = np.broadcast_arrays(n, x) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_14eval_hermite, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_hermite, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":205 - * return out - * - * def eval_hermitenorm(n, x, out=None): # <<<<<<<<<<<<<< - * """Evaluate normalized Hermite polynomial at a point.""" - * return eval_hermite(n, x/sqrt(2)) * 2**(-n/2.0) - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5scipy_7special_15orthogonal_eval_15eval_hermitenorm, NULL, __pyx_n_s_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__eval_hermitenorm, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/special/orthogonal_eval.pyx":1 - * """ # <<<<<<<<<<<<<< - * Evaluate orthogonal polynomial values using recurrence relations - * or by calling special functions. - */ - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.special.orthogonal_eval", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.special.orthogonal_eval"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - -/* Runtime support code */ - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - /* unexpected keyword found */ - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", - index, (index == 1) ? "" : "s"); -} - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); -} - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } else if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { - PyErr_Clear(); - return 0; - } else { - return -1; - } - } - return 0; -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - /* cause is unused */ - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - /* First, check the traceback argument, replacing None with NULL. */ - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - /* Next, replace a missing value with None */ - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - /* Raising an instance. The value should be a dummy. */ - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - /* Normalize to raise , */ - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else /* Python 3+ */ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_intp(npy_intp val) { - const npy_intp neg_one = (npy_intp)-1, const_zero = (npy_intp)0; - const int is_unsigned = const_zero < neg_one; - if ((sizeof(npy_intp) == sizeof(char)) || - (sizeof(npy_intp) == sizeof(short))) { - return PyInt_FromLong((long)val); - } else if ((sizeof(npy_intp) == sizeof(int)) || - (sizeof(npy_intp) == sizeof(long))) { - if (is_unsigned) - return PyLong_FromUnsignedLong((unsigned long)val); - else - return PyInt_FromLong((long)val); - } else if (sizeof(npy_intp) == sizeof(PY_LONG_LONG)) { - if (is_unsigned) - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); - else - return PyLong_FromLongLong((PY_LONG_LONG)val); - } else { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - return _PyLong_FromByteArray(bytes, sizeof(npy_intp), - little, !is_unsigned); - } -} - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - #if PY_MAJOR_VERSION >= 3 - 0, /*int kwonlyargcount,*/ - #endif - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else /* Python 3+ has unicode identifiers */ - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -/* Type Conversion Functions */ - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif /* Py_PYTHON_H */ diff --git a/scipy-0.10.1/scipy/special/setup.py b/scipy-0.10.1/scipy/special/setup.py deleted file mode 100755 index b0e1f4698f..0000000000 --- a/scipy-0.10.1/scipy/special/setup.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -from os.path import join -from distutils.sysconfig import get_python_inc -import numpy -from numpy.distutils.misc_util import get_numpy_include_dirs - -try: - from numpy.distutils.misc_util import get_info -except ImportError: - raise ValueError("numpy >= 1.4 is required (detected %s from %s)" % \ - (numpy.__version__, numpy.__file__)) - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('special', parent_package, top_path) - - define_macros = [] - if sys.platform=='win32': -# define_macros.append(('NOINFINITIES',None)) -# define_macros.append(('NONANS',None)) - define_macros.append(('_USE_MATH_DEFINES',None)) - - # C libraries - config.add_library('sc_c_misc',sources=[join('c_misc','*.c')], - include_dirs=[get_python_inc(), get_numpy_include_dirs()], - macros=define_macros) - config.add_library('sc_cephes',sources=[join('cephes','*.c')], - include_dirs=[get_python_inc(), get_numpy_include_dirs()], - macros=define_macros) - - # Fortran libraries - config.add_library('sc_mach',sources=[join('mach','*.f')], - config_fc={'noopt':(__file__,1)}) - config.add_library('sc_toms',sources=[join('amos','*.f')]) - config.add_library('sc_amos',sources=[join('toms','*.f')]) - config.add_library('sc_cdf',sources=[join('cdflib','*.f')]) - config.add_library('sc_specfun',sources=[join('specfun','*.f')]) - - # Extension _cephes - sources = ['_cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c', - 'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c'] - config.add_extension('_cephes', sources=sources, - libraries=['sc_amos','sc_toms','sc_c_misc','sc_cephes','sc_mach', - 'sc_cdf', 'sc_specfun'], - depends=["ufunc_extras.h", "cephes.h", - "amos_wrappers.h", "toms_wrappers.h", - "cdf_wrappers.h", "specfun_wrappers.h", - "c_misc/misc.h", "cephes_doc.h", - "cephes/mconf.h", "cephes/cephes_names.h"], - define_macros = define_macros, - extra_info=get_info("npymath") - ) - - # Extension specfun - config.add_extension('specfun', - sources=['specfun.pyf'], - f2py_options=['--no-wrap-functions'], - define_macros=[], - libraries=['sc_specfun']) - - # Extension orthogonal_eval - config.add_extension('orthogonal_eval', - sources=['orthogonal_eval.c'], - define_macros=[], - extra_info=get_info("npymath")) - - # Extension lambertw - config.add_extension('lambertw', - sources=['lambertw.c'], - define_macros=[], - extra_info=get_info("npymath")) - - # Extension _logit - config.add_extension('_logit', - sources=['_logit.c.src'], - extra_info=get_info("npymath")) - - config.add_data_files('tests/*.py') - config.add_data_files('tests/data/README') - config.add_data_files('tests/data/*.npz') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/special/setupscons.py b/scipy-0.10.1/scipy/special/setupscons.py deleted file mode 100755 index 7114f22a39..0000000000 --- a/scipy-0.10.1/scipy/special/setupscons.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -from os.path import join -from distutils.sysconfig import get_python_inc - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('special', parent_package, top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/special/specfun.pyf b/scipy-0.10.1/scipy/special/specfun.pyf deleted file mode 100644 index a84aebba74..0000000000 --- a/scipy-0.10.1/scipy/special/specfun.pyf +++ /dev/null @@ -1,492 +0,0 @@ -!%f90 -*- f90 -*- -python module specfun ! in - interface ! in :specfun - ! cpdsa - ! cfs - subroutine clqmn(mm,m,n,z,cqm,cqd) ! in :specfun:specfun.f - callstatement (*f2py_func)(&mm,&m,&n,&(z.r),&(z.i),cqm,cqd) - callprotoargument int*,int*,int*,double*,double*,complex_double*,complex_double* - integer intent(hide),depend(m) :: mm=m - integer intent(in), check(m>=1) :: m - integer intent(in), check(n>=1) :: n - complex*16 intent(in) :: z - complex*16 intent(out),dimension(0:mm,0:n),depend(mm,n) :: cqm - complex*16 intent(out),dimension(0:mm,0:n),depend(mm,n) :: cqd - end subroutine clqmn - subroutine lqmn(mm,m,n,x,qm,qd) ! in :specfun:specfun.f - integer intent(hide), depend(m) :: mm=m - integer intent(in), check(m>=1) :: m - integer intent(in), check(n>=1) :: n - double precision intent(in) :: x - double precision intent(out),dimension(0:mm,0:n),depend(mm,n) :: qm - double precision intent(out),dimension(0:mm,0:n),depend(mm,n) :: qd - end subroutine lqmn - subroutine clpmn(mm,m,n,x,y,cpm,cpd) ! in :specfun:specfun.f - integer intent(hide), depend(m) :: mm=m - integer intent(in), check(m>=0) :: m - integer intent(in), check(n>=0) :: n - double precision intent(in) :: x - double precision intent(in) :: y - complex*16 intent(out),dimension(0:m,0:n),depend(m,n) :: cpm - complex*16 intent(out),dimension(0:m,0:n),depend(m,n) :: cpd - end subroutine clpmn - ! vvsa - - subroutine jdzo(nt,n,m,pcode,zo) ! in :specfun:specfun.f - integer intent(in), check((nt>0)&&(nt<=1200)) :: nt - integer depend(nt), intent(out), dimension(1400) :: n - integer depend(nt), intent(out), dimension(1400) :: m - integer depend(nt), intent(out), dimension(1400) :: pcode - double precision intent(out), depend(nt), dimension(0:1400) :: zo - end subroutine jdzo - - ! cbk - ! cjy01 - ! rmn2sp - subroutine bernob(n,bn) ! in :specfun:specfun.f - integer intent(in), check(n>=2) :: n - double precision intent(out),depend(n),dimension(n+1) :: bn - end subroutine bernob - subroutine bernoa(n,bn) ! in :specfun:specfun.f - integer intent(in), check(n>=0) :: n - double precision intent(out),depend(n),dimension(n+1) :: bn - end subroutine bernoa - ! qstar - ! cv0 - ! cvqm - ! cvql - subroutine csphjy(n,z,nm,csj,cdj,csy,cdy) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - complex*16 intent(in) :: z - integer intent(out) :: nm - complex*16 intent(out), dimension(n + 1),depend(n) :: csj - complex*16 intent(out), dimension(n + 1),depend(n) :: cdj - complex*16 intent(out), dimension(n + 1),depend(n) :: csy - complex*16 intent(out), dimension(n + 1),depend(n) :: cdy - end subroutine csphjy - - ! ittjyb - - ! ittjya - - ! msta1 - ! msta2 - - ! cjylv - ! rmn2l - ! psi (psi_spec) - !subroutine cva2(kd,m,q,a) ! in :specfun:specfun.f - ! integer :: kd - ! integer :: m - ! double precision :: q - ! double precision :: a - !end subroutine cva2 - subroutine lpmns(m,n,x,pm,pd) ! in :specfun:specfun.f - integer intent(in), depend(n), check((m>=0) && (m<=n)):: m - integer intent(in), check(n>=1) :: n - double precision intent(in) :: x - double precision intent(out),depend(n),dimension(n+1) :: pm - double precision intent(out),depend(n),dimension(n+1) :: pd - end subroutine lpmns - ! rswfp - ! jyndd - ! gam0 - ! cisib - subroutine eulera(n,en) ! in :specfun:specfun.f - integer intent(in), check(n>=0) :: n - double precision intent(out),depend(n),dimension(n+1) :: en - end subroutine eulera - ! refine - ! cisia - - ! itsl0 - ! stvl0 - ! stvl1 - - subroutine clqn(n,z,cqn,cqd) ! in :specfun:specfun.f - callstatement (*f2py_func)(&n,&(z.r),&(z.i),cqn,cqd) - callprotoargument int*,double*,double*,complex_double*,complex_double* - integer intent(in), check(n>=1) :: n - complex*16 intent(in) :: z - complex*16 intent(out),dimension(n+1),depend(n) :: cqn - complex*16 intent(out),dimension(n+1),depend(n) :: cqd - end subroutine clqn - - ! stvl0 - - subroutine airyzo(nt,kf,xa,xb,xc,xd) ! in :specfun:specfun.f - integer intent(in),check(nt>0) :: nt - integer optional,intent(in) :: kf=1 - double precision intent(out),depend(nt),dimension(nt) :: xa - double precision intent(out),depend(nt),dimension(nt) :: xb - double precision intent(out),depend(nt),dimension(nt) :: xc - double precision intent(out),depend(nt),dimension(nt) :: xd - end subroutine airyzo - ! error - ! cerror - - subroutine eulerb(n,en) ! in :specfun:specfun. - integer intent(in), check(n>=2) :: n - double precision intent(out),depend(n),dimension(n+1) :: en - end subroutine eulerb - subroutine cva1(kd,m,q,cv) ! in :specfun:specfun.f - integer intent(in) :: kd - integer intent(in), check(m<=200) :: m - double precision intent(in), check(q>=0) :: q - double precision intent(out), depend(m), dimension(m) :: cv - end subroutine cva1 - ! ittikb - subroutine lqnb(n,x,qn,qd) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - double precision intent(in) :: x - double precision intent(out),depend(n),dimension(n+1) :: qn - double precision intent(out),depend(n),dimension(n+1) :: qd - end subroutine lqnb - ! cjk - - ! ittika - - subroutine lamv(v,x,vm,vl,dl) ! in :specfun:specfun.f - double precision intent(in), check(v>=1) :: v - double precision intent(in) :: x - double precision intent(out) :: vm - double precision intent(out),depend(v),dimension((int)v+1) :: vl - double precision intent(out),depend(v),dimension((int)v+1) :: dl - end subroutine lamv - ! chguit - ! kmn - subroutine lagzo(n,x,w) ! in :specfun:specfun.f - integer intent(in),check(n>0) :: n - double precision intent(out),depend(n),dimension(n) :: x - double precision intent(out),dimension(n),depend(n) :: w - end subroutine lagzo - ! vvla - - ! cjyva - ! cjyvb - - ! jy01a - ! incog - - ! itika - ! itikb - - ! jyv - ! jynb - ! stvh1 - - subroutine legzo(n,x,w) ! in :specfun:specfun.f - integer intent(in),check(n>0) :: n - double precision intent(out),depend(n),dimension(n) :: x - double precision intent(out),dimension(n),depend(n) :: w - end subroutine legzo - ! aswfa - ! jyna - - subroutine pbdv(v,x,dv,dp,pdf,pdd) ! in :specfun:specfun.f - double precision intent(in),check((abs((int)v)+2)>=2) :: v - double precision intent(in) :: x - double precision intent(out),depend(v),dimension(abs((int)v)+2) :: dv - double precision intent(out),depend(v),dimension(abs((int)v)+2) :: dp - double precision intent(out) :: pdf - double precision intent(out) :: pdd - end subroutine pbdv - - ! itsh0 - - subroutine cerzo(nt,zo) ! in :specfun:specfun.f - integer intent(in), check(nt>0) :: nt - complex*16 intent(out), depend(nt), dimension(nt) :: zo - end subroutine cerzo - - ! gamma2 - - ! chgu - subroutine lamn(n,x,nm,bl,dl) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - double precision intent(in) :: x - integer intent(out) :: nm - double precision intent(out),depend(n),dimension(n+1) :: bl - double precision intent(out),depend(n),dimension(n+1) :: dl - end subroutine lamn - ! comelp - ! incob - !subroutine cvf(kd,m,q,a,mj,f) ! in :specfun:specfun.f - ! integer :: kd - ! integer :: m - ! double precision :: q - ! double precision :: a - ! integer :: mj - ! double precision :: f - !end subroutine cvf - subroutine clpn(n,z,cpn,cpd) ! in :specfun:specfun.f - callstatement (*f2py_func)(&n,&(z.r),&(z.i),cpn,cpd) - callprotoargument int*,double*,double*,complex_double*,complex_double* - integer intent(in), check(n>=1) :: n - complex*16 intent(in) :: z - complex*16 intent(out),depend(n),dimension(n+1) :: cpn - complex*16 intent(out),depend(n),dimension(n+1) :: cpd - end subroutine clpn - - subroutine lqmns(m,n,x,qm,qd) ! in :specfun:specfun.f - integer intent(in), check(m>=0) :: m - integer intent(in), check(n>=1) :: n, - double precision intent(in) :: x - double precision intent(out),depend(n),dimension(n+1) :: qm - double precision intent(out),depend(n),dimension(n+1) :: qd - end subroutine lqmns - ! ciklv - ! elit - ! elit3 - - ! eix - ! e1xb - - subroutine chgm(a,b,x,hg) ! in :specfun:specfun.f - double precision intent(in) :: a - double precision intent(in) :: b - double precision intent(in) :: x - double precision intent(out) :: hg - end subroutine chgm - - ! stvh0 - - ! hygfx - ! cchg - ! hygfz - ! itairy - ! airya - ! airyb - - ! ikna - ! cjyna - ! cjynb - ! iknb - subroutine lpmn(mm,m,n,x,pm,pd) ! in :specfun:specfun.f - integer intent(hide) :: mm=m - integer intent(in), depend(n), check((m>=0) && (m<=n)) :: m - integer intent(in), check((n>=0)) :: n - double precision intent(in) :: x - double precision intent(out),depend(m,n),dimension(m+1,n+1) :: pm - double precision intent(out),dimension(m+1,n+1),depend(m,n) :: pd - end subroutine lpmn - ! mtu0 - ! cy01 - ! ffk - ! scka - ! sckb - ! cpdla - subroutine fcszo(kf,nt,zo) ! in :specfun:specfun.f - integer intent(in), check((kf==1)||(kf==2)) :: kf - integer intent(in), check(nt>0) :: nt - complex*16 intent(out), depend(nt), dimension(nt) :: zo - end subroutine fcszo - ! e1xa - ! lpmv - - ! cgama - - subroutine aswfb(m,n,c,x,kd,cv,s1f,s1d) ! in :specfun:specfun.f - integer intent(in), check(m>=0) :: m - integer intent(in), check(n>=m) :: n - double precision intent(in) :: c - double precision intent(in), check(fabs(x)<1) :: x - integer intent(in), check((kd==-1)||(kd==1)) :: kd - double precision intent(in) :: cv - double precision intent(out) :: s1f - double precision intent(out) :: s1d - end subroutine aswfb - - ! chgu - - ! itth0 - - ! lgama - - subroutine lqna(n,x,qn,qd) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - double precision intent(in), check(fabs(x)<1) :: x - double precision intent(out),depend(n),dimension(n+1) :: qn - double precision intent(out),dimension(n+1),depend(n) :: qd - end subroutine lqna - ! dvla - ! ik01a - subroutine cpbdn(n,z,cpb,cpd) ! in :specfun:specfun.f - integer intent(in), check((abs(n)) >= 1) :: n - complex*16 intent(in) :: z - complex*16 depend(n), intent(out), dimension(abs(n)+2) :: cpb - complex*16 depend(n), intent(out), dimension(abs(n)+2) :: cpd - end subroutine cpbdn - ! ik01b - ! beta - subroutine lpn(n,x,pn,pd) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - double precision intent(in) :: x - double precision intent(out),dimension(n+1),depend(n) :: pn - double precision intent(out),dimension(n+1),depend(n) :: pd - end subroutine lpn - subroutine fcoef(kd,m,q,a,fc) ! in :specfun:specfun.f - integer intent(in), check((kd>0) && (kd<5)) :: kd - integer intent(in) :: m - double precision intent(in), check(q>=0) :: q - double precision intent(in) :: a - double precision intent(out),dimension(251) :: fc - end subroutine fcoef - subroutine sphi(n,x,nm,si,di) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - double precision intent(in) :: x - integer intent(out) :: nm - double precision intent(out),dimension(n + 1),depend(n) :: si - double precision intent(out),dimension(n + 1),depend(n) :: di - end subroutine sphi - ! pbwa - ! rmn1 - ! dvsa - - ! e1z - - ! itjyb - ! chgul - ! gmn - ! itjya - ! stvlv - - subroutine rcty(n,x,nm,ry,dy) ! in :specfun:specfun.f - integer intent(in), check(n>0) :: n - double precision intent(in) :: x - integer intent(out) :: nm - double precision intent(out),dimension(n+1),depend(n) :: ry - double precision intent(out),dimension(n+1),depend(n) :: dy - end subroutine rcty - subroutine lpni(n,x,pn,pd,pl) ! in :specfun:specfun.f - integer intent(in),check(n>0) :: n - double precision intent(in) :: x - double precision intent(out), depend(n), dimension(n+1) :: pn - double precision intent(out), depend(n), dimension(n+1) :: pd - double precision intent(out), depend(n), dimension(n+1) :: pl - end subroutine lpni - - ! klvna - - ! chgubi - subroutine cyzo(nt,kf,kc,zo,zv) ! in :specfun:specfun.f - integer intent(in), check(nt>0) :: nt - integer intent(in), check((kf>=0)&&(kf<=2)) :: kf - integer intent(in), check((kc==0)||(kc==1)) :: kc - complex*16 intent(out),depend(nt),dimension(nt) :: zo - complex*16 intent(out),dimension(nt),depend(nt) :: zv - end subroutine cyzo - ! klvnb - ! rmn2so - subroutine csphik(n,z,nm,csi,cdi,csk,cdk) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - complex*16 intent(in) :: z - integer intent(out) :: nm - complex*16 intent(out),dimension(n+1),depend(n) :: csi - complex*16 intent(out),dimension(n+1),depend(n) :: cdi - complex*16 intent(out),dimension(n+1),depend(n) :: csk - complex*16 intent(out),dimension(n+1),depend(n) :: cdk - end subroutine csphik - ! bjndd - subroutine sphj(n,x,nm,sj,dj) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - double precision intent(in) :: x - integer intent(out) :: nm - double precision intent(out),dimension(n+1),depend(n) :: sj - double precision intent(out),dimension(n+1),depend(n) :: dj - end subroutine sphj - subroutine othpl(kf,n,x,pl,dpl) ! in :specfun:specfun.f - integer intent(in), check((kf>0)&&(kf<5)) :: kf - integer intent(in), check(n>0) :: n - double precision intent(in) :: x - double precision intent(out),dimension(n+1),depend(n) :: pl - double precision intent(out),dimension(n+1),depend(n) :: dpl - end subroutine othpl - subroutine klvnzo(nt,kd,zo) ! in :specfun:specfun.f - integer intent(in), check(nt>0) :: nt - integer intent(in), check(kd>=1 || kd<=8) :: kd - double precision intent(out), depend(nt), dimension(nt) :: zo - end subroutine klvnzo - ! rswfo - ! ch12n - subroutine jyzo(n,nt,rj0,rj1,ry0,ry1) ! in :specfun:specfun.f - integer intent(in), check(n>=0) :: n - integer intent(in), check(nt>0) :: nt - double precision intent(out),dimension(nt),depend(nt) :: rj0 - double precision intent(out),dimension(nt),depend(nt) :: rj1 - double precision intent(out),dimension(nt),depend(nt) :: ry0 - double precision intent(out),dimension(nt),depend(nt) :: ry1 - end subroutine jyzo - - ! ikv - - ! sdmn - ! ajyik - ! cikvb - - ! cikva - ! cfc - - ! fcs - subroutine rctj(n,x,nm,rj,dj) ! in :specfun:specfun.f - integer intent(in), check(n>0) :: n - double precision intent(in) :: x - integer intent(out) :: nm - double precision intent(out),dimension(n+1),depend(n) :: rj - double precision intent(out),dimension(n+1),depend(n) :: dj - end subroutine rctj - subroutine herzo(n,x,w) ! in :specfun:specfun.f - integer intent(in), check(n>0) :: n - double precision intent(out),dimension(n),depend(n) :: x - double precision intent(out),dimension(n),depend(n) :: w - end subroutine herzo - ! jy01b - ! enxb - subroutine sphk(n,x,nm,sk,dk) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - double precision intent(in), check(x>=0) :: x - integer intent(out) :: nm - double precision intent(out), dimension(n+1),depend(n) :: sk - double precision intent(out), dimension(n+1),depend(n) :: dk - end subroutine sphk - ! enxa - ! gaih - subroutine pbvv(v,x,vv,vp,pvf,pvd) ! in :specfun:specfun.f - double precision intent(in), check((abs((int)v)+2)>=2) :: v - double precision intent(in) :: x - double precision intent(out),depend(v),dimension(abs((int)v)+2) :: vv - double precision intent(out),depend(v),dimension(abs((int)v)+2) :: vp - double precision intent(out) :: pvf - double precision intent(out) :: pvd - end subroutine pbvv - subroutine segv(m,n,c,kd,cv,eg) ! in :specfun:specfun.f - integer intent(in) :: m - integer intent(in),depend(m),check((n>=m) && ((n-m)<199)) :: n - double precision intent(in) :: c - integer intent(in), check((kd==-1) || (kd==1)) :: kd - double precision intent(out) :: cv - double precision intent(out),dimension(n-m+2) :: eg - end subroutine segv - ! ciknb - ! cikna - ! mtu12 - ! cik01 - - ! cpsi - - subroutine sphy(n,x,nm,sy,dy) ! in :specfun:specfun.f - integer intent(in), check(n>=1) :: n - double precision intent(in), check(x>=0) :: x - integer intent(out) :: nm - double precision intent(out),dimension(n+1),depend(n) :: sy - double precision intent(out),dimension(n+1),depend(n) :: dy - end subroutine sphy - ! jelp - - ! stvhv - end interface -end python module specfun - -! This file was auto-generated with f2py (version:2.13.175-1239). -! and then heavily modified..... -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/special/specfun/specfun.f b/scipy-0.10.1/scipy/special/specfun/specfun.f deleted file mode 100644 index 159fb3a3fe..0000000000 --- a/scipy-0.10.1/scipy/special/specfun/specfun.f +++ /dev/null @@ -1,12947 +0,0 @@ -C COMPUTATION OF SPECIAL FUNCTIONS -C -C Shanjie Zhang and Jianming Jin -C -C Copyrighted but permission granted to use code in programs. -C Buy their book "Computation of Special Functions", 1996, John Wiley & Sons, Inc. -C -C -C Compiled into a single source file and changed REAL To DBLE throughout. -C -C Changed according to ERRATA also. -C -C Changed GAMMA to GAMMA2 and PSI to PSI_SPEC to avoid potential conflicts. -C - - SUBROUTINE CPDSA(N,Z,CDN) -C -C =========================================================== -C Purpose: Compute complex parabolic cylinder function Dn(z) -C for small argument -C Input: z --- complex argument of D(z) -C n --- Order of D(z) (n = 0,-1,-2,...) -C Output: CDN --- Dn(z) -C Routine called: GAIH for computing Г(x), x=n/2 (n=1,2,...) -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - EPS=1.0D-15 - PI=3.141592653589793D0 - SQ2=DSQRT(2.0D0) - CA0=CDEXP(-.25D0*Z*Z) - VA0=0.5D0*(1.0D0-N) - IF (N.EQ.0.0) THEN - CDN=CA0 - ELSE - IF (CDABS(Z).EQ.0.0) THEN - IF (VA0.LE.0.0.AND.VA0.EQ.INT(VA0)) THEN - CDN=0.0D0 - ELSE - CALL GAIH(VA0,GA0) - PD=DSQRT(PI)/(2.0D0**(-.5D0*N)*GA0) - CDN=CMPLX(PD,0.0D0) - ENDIF - ELSE - XN=-N - CALL GAIH(XN,G1) - CB0=2.0D0**(-0.5D0*N-1.0D0)*CA0/G1 - VT=-.5D0*N - CALL GAIH(VT,G0) - CDN=CMPLX(G0,0.0D0) - CR=(1.0D0,0.0D0) - DO 10 M=1,250 - VM=.5D0*(M-N) - CALL GAIH(VM,GM) - CR=-CR*SQ2*Z/M - CDW=GM*CR - CDN=CDN+CDW - IF (CDABS(CDW).LT.CDABS(CDN)*EPS) GO TO 20 -10 CONTINUE -20 CDN=CB0*CDN - ENDIF - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE CFS(Z,ZF,ZD) -C -C ========================================================= -C Purpose: Compute complex Fresnel Integral S(z) and S'(z) -C Input : z --- Argument of S(z) -C Output: ZF --- S(z) -C ZD --- S'(z) -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (E,P,W) - IMPLICIT COMPLEX *16 (C,S,Z) - EPS=1.0D-14 - PI=3.141592653589793D0 - W0=CDABS(Z) - ZP=0.5D0*PI*Z*Z - ZP2=ZP*ZP - Z0=(0.0D0,0.0D0) - IF (Z.EQ.Z0) THEN - S=Z0 - ELSE IF (W0.LE.2.5) THEN - S=Z*ZP/3.0D0 - CR=S - WB0=0.0D0 - DO 10 K=1,80 - CR=-.5D0*CR*(4.0D0*K-1.0D0)/K/(2.0D0*K+1.0D0) - & /(4.0D0*K+3.0D0)*ZP2 - S=S+CR - WB=CDABS(S) - IF (DABS(WB-WB0).LT.EPS.AND.K.GT.10) GO TO 30 -10 WB0=WB - ELSE IF (W0.GT.2.5.AND.W0.LT.4.5) THEN - M=85 - S=Z0 - CF1=Z0 - CF0=(1.0D-100,0.0D0) - DO 15 K=M,0,-1 - CF=(2.0D0*K+3.0D0)*CF0/ZP-CF1 - IF (K.NE.INT(K/2)*2) S=S+CF - CF1=CF0 -15 CF0=CF - S=CDSQRT(2.0D0/(PI*ZP))*CDSIN(ZP)/CF*S - ELSE - CR=(1.0D0,0.0D0) - CF=(1.0D0,0.0D0) - DO 20 K=1,20 - CR=-.25D0*CR*(4.0D0*K-1.0D0)*(4.0D0*K-3.0D0)/ZP2 -20 CF=CF+CR - CR=1.0D0 - CG=CR - DO 25 K=1,12 - CR=-.25D0*CR*(4.0D0*K+1.0D0)*(4.0D0*K-1.0D0)/ZP2 -25 CG=CG+CR - CG = CG/(PI*Z*Z) - S=.5D0-(CF*CDCOS(ZP)+CG*CDSIN(ZP))/(PI*Z) - ENDIF -30 ZF=S - ZD=CDSIN(0.5*PI*Z*Z) - RETURN - END - -C ********************************** - - SUBROUTINE LQMN(MM,M,N,X,QM,QD) -C -C ========================================================== -C Purpose: Compute the associated Legendre functions of the -C second kind, Qmn(x) and Qmn'(x) -C Input : x --- Argument of Qmn(x) -C m --- Order of Qmn(x) ( m = 0,1,2,… ) -C n --- Degree of Qmn(x) ( n = 0,1,2,… ) -C mm --- Physical dimension of QM and QD -C Output: QM(m,n) --- Qmn(x) -C QD(m,n) --- Qmn'(x) -C ========================================================== -C - IMPLICIT DOUBLE PRECISION (Q,X) - DIMENSION QM(0:MM,0:N),QD(0:MM,0:N) - IF (DABS(X).EQ.1.0D0) THEN - DO 10 I=0,M - DO 10 J=0,N - QM(I,J)=1.0D+300 - QD(I,J)=1.0D+300 -10 CONTINUE - RETURN - ENDIF - LS=1 - IF (DABS(X).GT.1.0D0) LS=-1 - XS=LS*(1.0D0-X*X) - XQ=DSQRT(XS) - Q0=0.5D0*DLOG(DABS((X+1.0D0)/(X-1.0D0))) - IF (DABS(X).LT.1.0001D0) THEN - QM(0,0)=Q0 - QM(0,1)=X*Q0-1.0D0 - QM(1,0)=-1.0D0/XQ - QM(1,1)=-XQ*(Q0+X/(1.0D0-X*X)) - DO 15 I=0,1 - DO 15 J=2,N - QM(I,J)=((2.0D0*J-1.0D0)*X*QM(I,J-1) - & -(J+I-1.0D0)*QM(I,J-2))/(J-I) -15 CONTINUE - DO 20 J=0,N - DO 20 I=2,M - QM(I,J)=-2.0D0*(I-1.0D0)*X/XQ*QM(I-1,J)-LS* - & (J+I-1.0D0)*(J-I+2.0D0)*QM(I-2,J) -20 CONTINUE - ELSE - IF (DABS(X).GT.1.1D0) THEN - KM=40+M+N - ELSE - KM=(40+M+N)*INT(-1.0-1.8*LOG(X-1.0)) - ENDIF - QF2=0.0D0 - QF1=1.0D0 - QF0=0.0D0 - DO 25 K=KM,0,-1 - QF0=((2*K+3.0D0)*X*QF1-(K+2.0D0)*QF2)/(K+1.0D0) - IF (K.LE.N) QM(0,K)=QF0 - QF2=QF1 -25 QF1=QF0 - DO 30 K=0,N -30 QM(0,K)=Q0*QM(0,K)/QF0 - QF2=0.0D0 - QF1=1.0D0 - DO 35 K=KM,0,-1 - QF0=((2*K+3.0D0)*X*QF1-(K+1.0D0)*QF2)/(K+2.0D0) - IF (K.LE.N) QM(1,K)=QF0 - QF2=QF1 -35 QF1=QF0 - Q10=-1.0D0/XQ - DO 40 K=0,N -40 QM(1,K)=Q10*QM(1,K)/QF0 - DO 45 J=0,N - Q0=QM(0,J) - Q1=QM(1,J) - DO 45 I=0,M-2 - QF=-2.0D0*(I+1)*X/XQ*Q1+(J-I)*(J+I+1.0D0)*Q0 - QM(I+2,J)=QF - Q0=Q1 - Q1=QF -45 CONTINUE - ENDIF - QD(0,0)=LS/XS - DO 50 J=1,N -50 QD(0,J)=LS*J*(QM(0,J-1)-X*QM(0,J))/XS - DO 55 J=0,N - DO 55 I=1,M - QD(I,J)=LS*I*X/XS*QM(I,J)+(I+J)*(J-I+1.0D0)/XQ*QM(I-1,J) -55 CONTINUE - RETURN - END - -C ********************************** - - SUBROUTINE CLPMN(MM,M,N,X,Y,CPM,CPD) -C -C ========================================================= -C Purpose: Compute the associated Legendre functions Pmn(z) -C and their derivatives Pmn'(z) for a complex -C argument -C Input : x --- Real part of z -C y --- Imaginary part of z -C m --- Order of Pmn(z), m = 0,1,2,...,n -C n --- Degree of Pmn(z), n = 0,1,2,...,N -C mm --- Physical dimension of CPM and CPD -C Output: CPM(m,n) --- Pmn(z) -C CPD(m,n) --- Pmn'(z) -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (X,Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CPM(0:MM,0:N),CPD(0:MM,0:N) - Z=CMPLX(X,Y) - DO 10 I=0,N - DO 10 J=0,M - CPM(J,I)=(0.0D0,0.0D0) -10 CPD(J,I)=(0.0D0,0.0D0) - CPM(0,0)=(1.0D0,0.0D0) - IF (N.EQ.0) RETURN - IF (DABS(X).EQ.1.0D0.AND.Y.EQ.0.0D0) THEN - DO 15 I=1,N - CPM(0,I)=X**I -15 CPD(0,I)=0.5D0*I*(I+1)*X**(I+1) - DO 20 J=1,N - DO 20 I=1,M - IF (I.EQ.1) THEN - CPD(I,J)=(1.0D+300,0.0D0) - ELSE IF (I.EQ.2) THEN - CPD(I,J)=-0.25D0*(J+2)*(J+1)*J*(J-1)*X**(J+1) - ENDIF -20 CONTINUE - RETURN - ENDIF - LS=1 - IF (CDABS(Z).GT.1.0D0) LS=-1 - ZQ=CDSQRT(LS*(1.0D0-Z*Z)) - ZS=LS*(1.0D0-Z*Z) - DO 25 I=1,M -25 CPM(I,I)=-LS*(2.0D0*I-1.0D0)*ZQ*CPM(I-1,I-1) - DO 30 I=0,M -30 CPM(I,I+1)=(2.0D0*I+1.0D0)*Z*CPM(I,I) - DO 35 I=0,M - DO 35 J=I+2,N - CPM(I,J)=((2.0D0*J-1.0D0)*Z*CPM(I,J-1)-(I+J- - & 1.0D0)*CPM(I,J-2))/(J-I) -35 CONTINUE - CPD(0,0)=(0.0D0,0.0D0) - DO 40 J=1,N -40 CPD(0,J)=LS*J*(CPM(0,J-1)-Z*CPM(0,J))/ZS - DO 45 I=1,M - DO 45 J=I,N - CPD(I,J)=LS*I*Z*CPM(I,J)/ZS+(J+I)*(J-I+1.0D0) - & /ZQ*CPM(I-1,J) -45 CONTINUE - RETURN - END - -C ********************************** - - SUBROUTINE VVSA(VA,X,PV) -C -C =================================================== -C Purpose: Compute parabolic cylinder function Vv(x) -C for small argument -C Input: x --- Argument -C va --- Order -C Output: PV --- Vv(x) -C Routine called : GAMMA2 for computing Г(x) -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - EPS=1.0D-15 - PI=3.141592653589793D0 - EP=DEXP(-.25D0*X*X) - VA0=1.0D0+0.5D0*VA - IF (X.EQ.0.0) THEN - IF (VA0.LE.0.0.AND.VA0.EQ.INT(VA0).OR.VA.EQ.0.0) THEN - PV=0.0D0 - ELSE - VB0=-0.5D0*VA - SV0=DSIN(VA0*PI) - CALL GAMMA2(VA0,GA0) - PV=2.0D0**VB0*SV0/GA0 - ENDIF - ELSE - SQ2=DSQRT(2.0D0) - A0=2.0D0**(-.5D0*VA)*EP/(2.0D0*PI) - SV=DSIN(-(VA+.5D0)*PI) - V1=-.5D0*VA - CALL GAMMA2(V1,G1) - PV=(SV+1.0D0)*G1 - R=1.0D0 - FAC=1.0D0 - DO 10 M=1,250 - VM=.5D0*(M-VA) - CALL GAMMA2(VM,GM) - R=R*SQ2*X/M - FAC=-FAC - GW=FAC*SV+1.0D0 - R1=GW*R*GM - PV=PV+R1 - IF (DABS(R1/PV).LT.EPS.AND.GW.NE.0.0) GO TO 15 -10 CONTINUE -15 PV=A0*PV - ENDIF - RETURN - END - - - -C ********************************** -C SciPy: Changed P from a character array to an integer array. - SUBROUTINE JDZO(NT,N,M,P,ZO) -C -C =========================================================== -C Purpose: Compute the zeros of Bessel functions Jn(x) and -C Jn'(x), and arrange them in the order of their -C magnitudes -C Input : NT --- Number of total zeros ( NT ≤ 1200 ) -C Output: ZO(L) --- Value of the L-th zero of Jn(x) -C and Jn'(x) -C N(L) --- n, order of Jn(x) or Jn'(x) associated -C with the L-th zero -C M(L) --- m, serial number of the zeros of Jn(x) -C or Jn'(x) associated with the L-th zero -C ( L is the serial number of all the -C zeros of Jn(x) and Jn'(x) ) -C P(L) --- 0 (TM) or 1 (TE), a code for designating the -C zeros of Jn(x) or Jn'(x). -C In the waveguide applications, the zeros -C of Jn(x) correspond to TM modes and -C those of Jn'(x) correspond to TE modes -C Routine called: BJNDD for computing Jn(x), Jn'(x) and -C Jn''(x) -C ============================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - INTEGER P(1400), P1(70) - DIMENSION N(1400),M(1400),ZO(0:1400),N1(70),M1(70), - & ZOC(0:70),BJ(101),DJ(101),FJ(101) - X = 0 - ZOC(0) = 0 - IF (NT.LT.600) THEN - XM=-1.0+2.248485*NT**0.5-.0159382*NT+3.208775E-4 - & *NT**1.5 - NM=INT(14.5+.05875*NT) - MM=INT(.02*NT)+6 - ELSE - XM=5.0+1.445389*NT**.5+.01889876*NT-2.147763E-4 - & *NT**1.5 - NM=INT(27.8+.0327*NT) - MM=INT(.01088*NT)+10 - ENDIF - L0=0 - DO 45 I=1,NM - X1=.407658+.4795504*(I-1)**.5+.983618*(I-1) - X2=1.99535+.8333883*(I-1)**.5+.984584*(I-1) - L1=0 - DO 30 J=1,MM - IF (I.EQ.1.AND.J.EQ.1) GO TO 15 - X=X1 -10 CALL BJNDD(I,X,BJ,DJ,FJ) - X0=X - X=X-DJ(I)/FJ(I) - IF (X1.GT.XM) GO TO 20 - IF (DABS(X-X0).GT.1.0D-10) GO TO 10 -15 L1=L1+1 - N1(L1)=I-1 - M1(L1)=J - IF (I.EQ.1) M1(L1)=J-1 - P1(L1)=1 - ZOC(L1)=X - IF (I.LE.15) THEN - X1=X+3.057+.0122*(I-1)+(1.555+.41575*(I-1))/(J+1)**2 - ELSE - X1=X+2.918+.01924*(I-1)+(6.26+.13205*(I-1))/(J+1)**2 - ENDIF -20 X=X2 -25 CALL BJNDD(I,X,BJ,DJ,FJ) - X0=X - X=X-BJ(I)/DJ(I) - IF (X.GT.XM) GO TO 30 - IF (DABS(X-X0).GT.1.0D-10) GO TO 25 - L1=L1+1 - N1(L1)=I-1 - M1(L1)=J - P1(L1)=0 - ZOC(L1)=X - IF (I.LE.15) THEN - X2=X+3.11+.0138*(I-1)+(.04832+.2804*(I-1))/(J+1)**2 - ELSE - X2=X+3.001+.0105*(I-1)+(11.52+.48525*(I-1))/(J+3)**2 - ENDIF -30 CONTINUE - L=L0+L1 - L2=L -35 IF (L0.EQ.0) THEN - DO 40 K=1,L - ZO(K)=ZOC(K) - N(K)=N1(K) - M(K)=M1(K) -40 P(K)=P1(K) - L1=0 - ELSE IF (L0.NE.0) THEN - IF (ZO(L0).GE.ZOC(L1)) THEN - ZO(L0+L1)=ZO(L0) - N(L0+L1)=N(L0) - M(L0+L1)=M(L0) - P(L0+L1)=P(L0) - L0=L0-1 - ELSE - ZO(L0+L1)=ZOC(L1) - N(L0+L1)=N1(L1) - M(L0+L1)=M1(L1) - P(L0+L1)=P1(L1) - L1=L1-1 - ENDIF - ENDIF - IF (L1.NE.0) GO TO 35 -45 L0=L2 - RETURN - END - - - -C ********************************** - - SUBROUTINE CBK(M,N,C,CV,QT,CK,BK) -C -C ===================================================== -C Purpose: Compute coefficient Bk's for oblate radial -C functions with a small argument -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BK(200),CK(200),U(200),V(200),W(200) - EPS=1.0D-14 - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - NM=25+INT(0.5*(N-M)+C) - U(1)=0.0D0 - N2=NM-2 - DO 10 J=2,N2 -10 U(J)=C*C - DO 15 J=1,N2 -15 V(J)=(2.0*J-1.0-IP)*(2.0*(J-M)-IP)+M*(M-1.0)-CV - DO 20 J=1,NM-1 -20 W(J)=(2.0*J-IP)*(2.0*J+1.0-IP) - IF (IP.EQ.0) THEN - SW=0.0D0 - DO 40 K=0,N2-1 - S1=0.0D0 - I1=K-M+1 - DO 30 I=I1,NM - IF (I.LT.0) GO TO 30 - R1=1.0D0 - DO 25 J=1,K -25 R1=R1*(I+M-J)/J - S1=S1+CK(I+1)*(2.0*I+M)*R1 - IF (DABS(S1-SW).LT.DABS(S1)*EPS) GO TO 35 - SW=S1 -30 CONTINUE -35 BK(K+1)=QT*S1 -40 CONTINUE - ELSE IF (IP.EQ.1) THEN - SW=0.0D0 - DO 60 K=0,N2-1 - S1=0.0D0 - I1=K-M+1 - DO 50 I=I1,NM - IF (I.LT.0) GO TO 50 - R1=1.0D0 - DO 45 J=1,K -45 R1=R1*(I+M-J)/J - IF (I.GT.0) S1=S1+CK(I)*(2.0*I+M-1)*R1 - S1=S1-CK(I+1)*(2.0*I+M)*R1 - IF (DABS(S1-SW).LT.DABS(S1)*EPS) GO TO 55 - SW=S1 -50 CONTINUE -55 BK(K+1)=QT*S1 -60 CONTINUE - ENDIF - W(1)=W(1)/V(1) - BK(1)=BK(1)/V(1) - DO 65 K=2,N2 - T=V(K)-W(K-1)*U(K) - W(K)=W(K)/T -65 BK(K)=(BK(K)-BK(K-1)*U(K))/T - DO 70 K=N2-1,1,-1 -70 BK(K)=BK(K)-W(K)*BK(K+1) - RETURN - END - - - -C ********************************** - - SUBROUTINE CJY01(Z,CBJ0,CDJ0,CBJ1,CDJ1,CBY0,CDY0,CBY1,CDY1) -C -C ======================================================= -C Purpose: Compute Bessel functions J0(z), J1(z), Y0(z), -C Y1(z), and their derivatives for a complex -C argument -C Input : z --- Complex argument -C Output: CBJ0 --- J0(z) -C CDJ0 --- J0'(z) -C CBJ1 --- J1(z) -C CDJ1 --- J1'(z) -C CBY0 --- Y0(z) -C CDY0 --- Y0'(z) -C CBY1 --- Y1(z) -C CDY1 --- Y1'(z) -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A,B,E,P,R,W) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION A(12),B(12),A1(12),B1(12) - PI=3.141592653589793D0 - EL=0.5772156649015329D0 - RP2=2.0D0/PI - CI=(0.0D0,1.0D0) - A0=CDABS(Z) - Z2=Z*Z - Z1=Z - IF (A0.EQ.0.0D0) THEN - CBJ0=(1.0D0,0.0D0) - CBJ1=(0.0D0,0.0D0) - CDJ0=(0.0D0,0.0D0) - CDJ1=(0.5D0,0.0D0) - CBY0=-(1.0D300,0.0D0) - CBY1=-(1.0D300,0.0D0) - CDY0=(1.0D300,0.0D0) - CDY1=(1.0D300,0.0D0) - RETURN - ENDIF - IF (DBLE(Z).LT.0.0) Z1=-Z - IF (A0.LE.12.0) THEN - CBJ0=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 10 K=1,40 - CR=-0.25D0*CR*Z2/(K*K) - CBJ0=CBJ0+CR - IF (CDABS(CR).LT.CDABS(CBJ0)*1.0D-15) GO TO 15 -10 CONTINUE -15 CBJ1=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 20 K=1,40 - CR=-0.25D0*CR*Z2/(K*(K+1.0D0)) - CBJ1=CBJ1+CR - IF (CDABS(CR).LT.CDABS(CBJ1)*1.0D-15) GO TO 25 -20 CONTINUE -25 CBJ1=0.5D0*Z1*CBJ1 - W0=0.0D0 - CR=(1.0D0,0.0D0) - CS=(0.0D0,0.0D0) - DO 30 K=1,40 - W0=W0+1.0D0/K - CR=-0.25D0*CR/(K*K)*Z2 - CP=CR*W0 - CS=CS+CP - IF (CDABS(CP).LT.CDABS(CS)*1.0D-15) GO TO 35 -30 CONTINUE -35 CBY0=RP2*(CDLOG(Z1/2.0D0)+EL)*CBJ0-RP2*CS - W1=0.0D0 - CR=(1.0D0,0.0D0) - CS=(1.0D0,0.0D0) - DO 40 K=1,40 - W1=W1+1.0D0/K - CR=-0.25D0*CR/(K*(K+1))*Z2 - CP=CR*(2.0D0*W1+1.0D0/(K+1.0D0)) - CS=CS+CP - IF (CDABS(CP).LT.CDABS(CS)*1.0D-15) GO TO 45 -40 CONTINUE -45 CBY1=RP2*((CDLOG(Z1/2.0D0)+EL)*CBJ1-1.0D0/Z1-.25D0*Z1*CS) - ELSE - DATA A/-.703125D-01,.112152099609375D+00, - & -.5725014209747314D+00,.6074042001273483D+01, - & -.1100171402692467D+03,.3038090510922384D+04, - & -.1188384262567832D+06,.6252951493434797D+07, - & -.4259392165047669D+09,.3646840080706556D+11, - & -.3833534661393944D+13,.4854014686852901D+15/ - DATA B/ .732421875D-01,-.2271080017089844D+00, - & .1727727502584457D+01,-.2438052969955606D+02, - & .5513358961220206D+03,-.1825775547429318D+05, - & .8328593040162893D+06,-.5006958953198893D+08, - & .3836255180230433D+10,-.3649010818849833D+12, - & .4218971570284096D+14,-.5827244631566907D+16/ - DATA A1/.1171875D+00,-.144195556640625D+00, - & .6765925884246826D+00,-.6883914268109947D+01, - & .1215978918765359D+03,-.3302272294480852D+04, - & .1276412726461746D+06,-.6656367718817688D+07, - & .4502786003050393D+09,-.3833857520742790D+11, - & .4011838599133198D+13,-.5060568503314727D+15/ - DATA B1/-.1025390625D+00,.2775764465332031D+00, - & -.1993531733751297D+01,.2724882731126854D+02, - & -.6038440767050702D+03,.1971837591223663D+05, - & -.8902978767070678D+06,.5310411010968522D+08, - & -.4043620325107754D+10,.3827011346598605D+12, - & -.4406481417852278D+14,.6065091351222699D+16/ - K0=12 - IF (A0.GE.35.0) K0=10 - IF (A0.GE.50.0) K0=8 - CT1=Z1-.25D0*PI - CP0=(1.0D0,0.0D0) - DO 50 K=1,K0 -50 CP0=CP0+A(K)*Z1**(-2*K) - CQ0=-0.125D0/Z1 - DO 55 K=1,K0 -55 CQ0=CQ0+B(K)*Z1**(-2*K-1) - CU=CDSQRT(RP2/Z1) - CBJ0=CU*(CP0*CDCOS(CT1)-CQ0*CDSIN(CT1)) - CBY0=CU*(CP0*CDSIN(CT1)+CQ0*CDCOS(CT1)) - CT2=Z1-.75D0*PI - CP1=(1.0D0,0.0D0) - DO 60 K=1,K0 -60 CP1=CP1+A1(K)*Z1**(-2*K) - CQ1=0.375D0/Z1 - DO 65 K=1,K0 -65 CQ1=CQ1+B1(K)*Z1**(-2*K-1) - CBJ1=CU*(CP1*CDCOS(CT2)-CQ1*CDSIN(CT2)) - CBY1=CU*(CP1*CDSIN(CT2)+CQ1*CDCOS(CT2)) - ENDIF - IF (DBLE(Z).LT.0.0) THEN - IF (DIMAG(Z).LT.0.0) CBY0=CBY0-2.0D0*CI*CBJ0 - IF (DIMAG(Z).GT.0.0) CBY0=CBY0+2.0D0*CI*CBJ0 - IF (DIMAG(Z).LT.0.0) CBY1=-(CBY1-2.0D0*CI*CBJ1) - IF (DIMAG(Z).GT.0.0) CBY1=-(CBY1+2.0D0*CI*CBJ1) - CBJ1=-CBJ1 - ENDIF - CDJ0=-CBJ1 - CDJ1=CBJ0-1.0D0/Z*CBJ1 - CDY0=-CBY1 - CDY1=CBY0-1.0D0/Z*CBY1 - RETURN - END - -C ********************************** - - SUBROUTINE RMN2SP(M,N,C,X,CV,DF,KD,R2F,R2D) -C -C ====================================================== -C Purpose: Compute prolate spheroidal radial function -C of the second kind with a small argument -C Routines called: -C (1) LPMNS for computing the associated Legendre -C functions of the first kind -C (2) LQMNS for computing the associated Legendre -C functions of the second kind -C (3) KMN for computing expansion coefficients -C and joining factors -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION PM(0:251),PD(0:251),QM(0:251),QD(0:251), - & DN(200),DF(200) - IF (DABS(DF(1)).LT.1.0D-280) THEN - R2F=1.0D+300 - R2D=1.0D+300 - RETURN - ENDIF - EPS=1.0D-14 - IP=1 - NM1=INT((N-M)/2) - IF (N-M.EQ.2*NM1) IP=0 - NM=25+NM1+INT(C) - NM2=2*NM+M - CALL KMN(M,N,C,CV,KD,DF,DN,CK1,CK2) - CALL LPMNS(M,NM2,X,PM,PD) - CALL LQMNS(M,NM2,X,QM,QD) - SU0=0.0D0 - SW=0.0D0 - DO 10 K=1,NM - J=2*K-2+M+IP - SU0=SU0+DF(K)*QM(J) - IF (K.GT.NM1.AND.DABS(SU0-SW).LT.DABS(SU0)*EPS) GO TO 15 -10 SW=SU0 -15 SD0=0.0D0 - DO 20 K=1,NM - J=2*K-2+M+IP - SD0=SD0+DF(K)*QD(J) - IF (K.GT.NM1.AND.DABS(SD0-SW).LT.DABS(SD0)*EPS) GO TO 25 -20 SW=SD0 -25 SU1=0.0D0 - SD1=0.0D0 - DO 30 K=1,M - J=M-2*K+IP - IF (J.LT.0) J=-J-1 - SU1=SU1+DN(K)*QM(J) -30 SD1=SD1+DN(K)*QD(J) - GA=((X-1.0D0)/(X+1.0D0))**(0.5D0*M) - DO 55 K=1,M - J=M-2*K+IP - IF (J.GE.0) GO TO 55 - IF (J.LT.0) J=-J-1 - R1=1.0D0 - DO 35 J1=1,J -35 R1=(M+J1)*R1 - R2=1.0D0 - DO 40 J2=1,M-J-2 -40 R2=J2*R2 - R3=1.0D0 - SF=1.0D0 - DO 45 L1=1,J - R3=0.5D0*R3*(-J+L1-1.0)*(J+L1)/((M+L1)*L1)*(1.0-X) -45 SF=SF+R3 - IF (M-J.GE.2) GB=(M-J-1.0D0)*R2 - IF (M-J.LE.1) GB=1.0D0 - SPL=R1*GA*GB*SF - SU1=SU1+(-1)**(J+M)*DN(K)*SPL - SPD1=M/(X*X-1.0D0)*SPL - GC=0.5D0*J*(J+1.0)/(M+1.0) - SD=1.0D0 - R4=1.0D0 - DO 50 L1=1,J-1 - R4=0.5D0*R4*(-J+L1)*(J+L1+1.0)/((M+L1+1.0)*L1) - & *(1.0-X) -50 SD=SD+R4 - SPD2=R1*GA*GB*GC*SD - SD1=SD1+(-1)**(J+M)*DN(K)*(SPD1+SPD2) -55 CONTINUE - SU2=0.0D0 - KI=(2*M+1+IP)/2 - NM3=NM+KI - DO 60 K=KI,NM3 - J=2*K-1-M-IP - SU2=SU2+DN(K)*PM(J) - IF (J.GT.M.AND.DABS(SU2-SW).LT.DABS(SU2)*EPS) GO TO 65 -60 SW=SU2 -65 SD2=0.0D0 - DO 70 K=KI,NM3 - J=2*K-1-M-IP - SD2=SD2+DN(K)*PD(J) - IF (J.GT.M.AND.DABS(SD2-SW).LT.DABS(SD2)*EPS) GO TO 75 -70 SW=SD2 -75 SUM=SU0+SU1+SU2 - SDM=SD0+SD1+SD2 - R2F=SUM/CK2 - R2D=SDM/CK2 - RETURN - END - - - -C ********************************** - - SUBROUTINE BERNOB(N,BN) -C -C ====================================== -C Purpose: Compute Bernoulli number Bn -C Input : n --- Serial number -C Output: BN(n) --- Bn -C ====================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BN(0:N) - TPI=6.283185307179586D0 - BN(0)=1.0D0 - BN(1)=-0.5D0 - BN(2)=1.0D0/6.0D0 - R1=(2.0D0/TPI)**2 - DO 20 M=4,N,2 - R1=-R1*(M-1)*M/(TPI*TPI) - R2=1.0D0 - DO 10 K=2,10000 - S=(1.0D0/K)**M - R2=R2+S - IF (S.LT.1.0D-15) GOTO 20 -10 CONTINUE -20 BN(M)=R1*R2 - RETURN - END - -C ********************************** - - SUBROUTINE BERNOA(N,BN) -C -C ====================================== -C Purpose: Compute Bernoulli number Bn -C Input : n --- Serial number -C Output: BN(n) --- Bn -C ====================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BN(0:N) - BN(0)=1.0D0 - BN(1)=-0.5D0 - DO 30 M=2,N - S=-(1.0D0/(M+1.0D0)-0.5D0) - DO 20 K=2,M-1 - R=1.0D0 - DO 10 J=2,K -10 R=R*(J+M-K)/J -20 S=S-R*BN(K) -30 BN(M)=S - DO 40 M=3,N,2 -40 BN(M)=0.0D0 - RETURN - END - -C ********************************** - - SUBROUTINE QSTAR(M,N,C,CK,CK1,QS,QT) -C -C ========================================================= -C Purpose: Compute Q*mn(-ic) for oblate radial functions -C with a small argument -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION AP(200),CK(200) - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - R=1.0D0/CK(1)**2 - AP(1)=R - DO 20 I=1,M - S=0.0D0 - DO 15 L=1,I - SK=0.0D0 - DO 10 K=0,L -10 SK=SK+CK(K+1)*CK(L-K+1) -15 S=S+SK*AP(I-L+1) -20 AP(I+1)=-R*S - QS0=AP(M+1) - DO 30 L=1,M - R=1.0D0 - DO 25 K=1,L -25 R=R*(2.0D0*K+IP)*(2.0D0*K-1.0D0+IP)/(2.0D0*K)**2 -30 QS0=QS0+AP(M-L+1)*R - QS=(-1)**IP*CK1*(CK1*QS0)/C - QT=-2.0D0/CK1*QS - RETURN - END - - - -C ********************************** - - SUBROUTINE CV0(KD,M,Q,A0) -C -C ===================================================== -C Purpose: Compute the initial characteristic value of -C Mathieu functions for m ≤ 12 or q ≤ 300 or -C q ≥ m*m -C Input : m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions -C Output: A0 --- Characteristic value -C Routines called: -C (1) CVQM for computing initial characteristic -C value for q ≤ 3*m -C (2) CVQL for computing initial characteristic -C value for q ≥ m*m -C ==================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - Q2=Q*Q - IF (M.EQ.0) THEN - IF (Q.LE.1.0) THEN - A0=(((.0036392*Q2-.0125868)*Q2+.0546875)*Q2-.5)*Q2 - ELSE IF (Q.LE.10.0) THEN - A0=((3.999267D-3*Q-9.638957D-2)*Q-.88297)*Q - & +.5542818 - ELSE - CALL CVQL(KD,M,Q,A0) - ENDIF - ELSE IF (M.EQ.1) THEN - IF (Q.LE.1.0.AND.KD.EQ.2) THEN - A0=(((-6.51E-4*Q-.015625)*Q-.125)*Q+1.0)*Q+1.0 - ELSE IF (Q.LE.1.0.AND.KD.EQ.3) THEN - A0=(((-6.51E-4*Q+.015625)*Q-.125)*Q-1.0)*Q+1.0 - ELSE IF (Q.LE.10.0.AND. KD.EQ.2) THEN - A0=(((-4.94603D-4*Q+1.92917D-2)*Q-.3089229) - & *Q+1.33372)*Q+.811752 - ELSE IF (Q.LE.10.0.AND.KD.EQ.3) THEN - A0=((1.971096D-3*Q-5.482465D-2)*Q-1.152218) - & *Q+1.10427 - ELSE - CALL CVQL(KD,M,Q,A0) - ENDIF - ELSE IF (M.EQ.2) THEN - IF (Q.LE.1.0.AND.KD.EQ.1) THEN - A0=(((-.0036391*Q2+.0125888)*Q2-.0551939)*Q2 - & +.416667)*Q2+4.0 - ELSE IF (Q.LE.1.0.AND.KD.EQ.4) THEN - A0=(.0003617*Q2-.0833333)*Q2+4.0 - ELSE IF (Q.LE.15.AND.KD.EQ.1) THEN - A0=(((3.200972D-4*Q-8.667445D-3)*Q - & -1.829032D-4)*Q+.9919999)*Q+3.3290504 - ELSE IF (Q.LE.10.0.AND.KD.EQ.4) THEN - A0=((2.38446D-3*Q-.08725329)*Q-4.732542D-3) - & *Q+4.00909 - ELSE - CALL CVQL(KD,M,Q,A0) - ENDIF - ELSE IF (M.EQ.3) THEN - IF (Q.LE.1.0.AND.KD.EQ.2) THEN - A0=((6.348E-4*Q+.015625)*Q+.0625)*Q2+9.0 - ELSE IF (Q.LE.1.0.AND.KD.EQ.3) THEN - A0=((6.348E-4*Q-.015625)*Q+.0625)*Q2+9.0 - ELSE IF (Q.LE.20.0.AND.KD.EQ.2) THEN - A0=(((3.035731D-4*Q-1.453021D-2)*Q - & +.19069602)*Q-.1039356)*Q+8.9449274 - ELSE IF (Q.LE.15.0.AND.KD.EQ.3) THEN - A0=((9.369364D-5*Q-.03569325)*Q+.2689874)*Q - & +8.771735 - ELSE - CALL CVQL(KD,M,Q,A0) - ENDIF - ELSE IF (M.EQ.4) THEN - IF (Q.LE.1.0.AND.KD.EQ.1) THEN - A0=((-2.1E-6*Q2+5.012E-4)*Q2+.0333333)*Q2+16.0 - ELSE IF (Q.LE.1.0.AND.KD.EQ.4) THEN - A0=((3.7E-6*Q2-3.669E-4)*Q2+.0333333)*Q2+16.0 - ELSE IF (Q.LE.25.0.AND.KD.EQ.1) THEN - A0=(((1.076676D-4*Q-7.9684875D-3)*Q - & +.17344854)*Q-.5924058)*Q+16.620847 - ELSE IF (Q.LE.20.0.AND.KD.EQ.4) THEN - A0=((-7.08719D-4*Q+3.8216144D-3)*Q - & +.1907493)*Q+15.744 - ELSE - CALL CVQL(KD,M,Q,A0) - ENDIF - ELSE IF (M.EQ.5) THEN - IF (Q.LE.1.0.AND.KD.EQ.2) THEN - A0=((6.8E-6*Q+1.42E-5)*Q2+.0208333)*Q2+25.0 - ELSE IF (Q.LE.1.0.AND.KD.EQ.3) THEN - A0=((-6.8E-6*Q+1.42E-5)*Q2+.0208333)*Q2+25.0 - ELSE IF (Q.LE.35.0.AND.KD.EQ.2) THEN - A0=(((2.238231D-5*Q-2.983416D-3)*Q - & +.10706975)*Q-.600205)*Q+25.93515 - ELSE IF (Q.LE.25.0.AND.KD.EQ.3) THEN - A0=((-7.425364D-4*Q+2.18225D-2)*Q - & +4.16399D-2)*Q+24.897 - ELSE - CALL CVQL(KD,M,Q,A0) - ENDIF - ELSE IF (M.EQ.6) THEN - IF (Q.LE.1.0) THEN - A0=(.4D-6*Q2+.0142857)*Q2+36.0 - ELSE IF (Q.LE.40.0.AND.KD.EQ.1) THEN - A0=(((-1.66846D-5*Q+4.80263D-4)*Q - & +2.53998D-2)*Q-.181233)*Q+36.423 - ELSE IF (Q.LE.35.0.AND.KD.EQ.4) THEN - A0=((-4.57146D-4*Q+2.16609D-2)*Q-2.349616D-2)*Q - & +35.99251 - ELSE - CALL CVQL(KD,M,Q,A0) - ENDIF - ELSE IF (M.EQ.7) THEN - IF (Q.LE.10.0) THEN - CALL CVQM(M,Q,A0) - ELSE IF (Q.LE.50.0.AND.KD.EQ.2) THEN - A0=(((-1.411114D-5*Q+9.730514D-4)*Q - & -3.097887D-3)*Q+3.533597D-2)*Q+49.0547 - ELSE IF (Q.LE.40.0.AND.KD.EQ.3) THEN - A0=((-3.043872D-4*Q+2.05511D-2)*Q - & -9.16292D-2)*Q+49.19035 - ELSE - CALL CVQL(KD,M,Q,A0) - ENDIF - ELSE IF (M.GE.8) THEN - IF (Q.LE.3.*M) THEN - CALL CVQM(M,Q,A0) - ELSE IF (Q.GT.M*M) THEN - CALL CVQL(KD,M,Q,A0) - ELSE - IF (M.EQ.8.AND.KD.EQ.1) THEN - A0=(((8.634308D-6*Q-2.100289D-3)*Q+.169072)*Q - & -4.64336)*Q+109.4211 - ELSE IF (M.EQ.8.AND.KD.EQ.4) THEN - A0=((-6.7842D-5*Q+2.2057D-3)*Q+.48296)*Q+56.59 - ELSE IF (M.EQ.9.AND.KD.EQ.2) THEN - A0=(((2.906435D-6*Q-1.019893D-3)*Q+.1101965)*Q - & -3.821851)*Q+127.6098 - ELSE IF (M.EQ.9.AND.KD.EQ.3) THEN - A0=((-9.577289D-5*Q+.01043839)*Q+.06588934)*Q - & +78.0198 - ELSE IF (M.EQ.10.AND.KD.EQ.1) THEN - A0=(((5.44927D-7*Q-3.926119D-4)*Q+.0612099)*Q - & -2.600805)*Q+138.1923 - ELSE IF (M.EQ.10.AND.KD.EQ.4) THEN - A0=((-7.660143D-5*Q+.01132506)*Q-.09746023)*Q - & +99.29494 - ELSE IF (M.EQ.11.AND.KD.EQ.2) THEN - A0=(((-5.67615D-7*Q+7.152722D-6)*Q+.01920291)*Q - & -1.081583)*Q+140.88 - ELSE IF (M.EQ.11.AND.KD.EQ.3) THEN - A0=((-6.310551D-5*Q+.0119247)*Q-.2681195)*Q - & +123.667 - ELSE IF (M.EQ.12.AND.KD.EQ.1) THEN - A0=(((-2.38351D-7*Q-2.90139D-5)*Q+.02023088)*Q - & -1.289)*Q+171.2723 - ELSE IF (M.EQ.12.AND.KD.EQ.4) THEN - A0=(((3.08902D-7*Q-1.577869D-4)*Q+.0247911)*Q - & -1.05454)*Q+161.471 - ENDIF - ENDIF - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE CVQM(M,Q,A0) -C -C ===================================================== -C Purpose: Compute the characteristic value of Mathieu -C functions for q ≤ m*m -C Input : m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions -C Output: A0 --- Initial characteristic value -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - HM1=.5*Q/(M*M-1.0) - HM3=.25*HM1**3/(M*M-4.0) - HM5=HM1*HM3*Q/((M*M-1.0)*(M*M-9.0)) - A0=M*M+Q*(HM1+(5.0*M*M+7.0)*HM3 - & +(9.0*M**4+58.0*M*M+29.0)*HM5) - RETURN - END - -C ********************************** - - SUBROUTINE CVQL(KD,M,Q,A0) -C -C ======================================================== -C Purpose: Compute the characteristic value of Mathieu -C functions for q ≥ 3m -C Input : m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions -C Output: A0 --- Initial characteristic value -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - W=0.0D0 - IF (KD.EQ.1.OR.KD.EQ.2) W=2.0D0*M+1.0D0 - IF (KD.EQ.3.OR.KD.EQ.4) W=2.0D0*M-1.0D0 - W2=W*W - W3=W*W2 - W4=W2*W2 - W6=W2*W4 - D1=5.0+34.0/W2+9.0/W4 - D2=(33.0+410.0/W2+405.0/W4)/W - D3=(63.0+1260.0/W2+2943.0/W4+486.0/W6)/W2 - D4=(527.0+15617.0/W2+69001.0/W4+41607.0/W6)/W3 - C1=128.0 - P2=Q/W4 - P1=DSQRT(P2) - CV1=-2.0*Q+2.0*W*DSQRT(Q)-(W2+1.0)/8.0 - CV2=(W+3.0/W)+D1/(32.0*P1)+D2/(8.0*C1*P2) - CV2=CV2+D3/(64.0*C1*P1*P2)+D4/(16.0*C1*C1*P2*P2) - A0=CV1-CV2/(C1*P1) - RETURN - END - - - -C ********************************** - - SUBROUTINE CSPHJY(N,Z,NM,CSJ,CDJ,CSY,CDY) -C -C ========================================================== -C Purpose: Compute spherical Bessel functions jn(z) & yn(z) -C and their derivatives for a complex argument -C Input : z --- Complex argument -C n --- Order of jn(z) & yn(z) ( n = 0,1,2,... ) -C Output: CSJ(n) --- jn(z) -C CDJ(n) --- jn'(z) -C CSY(n) --- yn(z) -C CDY(n) --- yn'(z) -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C ========================================================== -C - IMPLICIT COMPLEX*16 (C,Z) - DOUBLE PRECISION A0 - DIMENSION CSJ(0:N),CDJ(0:N),CSY(0:N),CDY(0:N) - A0=CDABS(Z) - NM=N - IF (A0.LT.1.0D-60) THEN - DO 10 K=0,N - CSJ(K)=0.0D0 - CDJ(K)=0.0D0 - CSY(K)=-1.0D+300 -10 CDY(K)=1.0D+300 - CSJ(0)=(1.0D0,0.0D0) - IF (N.GT.0) THEN - CDJ(1)=(.333333333333333D0,0.0D0) - ENDIF - RETURN - ENDIF - CSJ(0)=CDSIN(Z)/Z - CDJ(0)=(CDCOS(Z)-CDSIN(Z)/Z)/Z - CSY(0)=-CDCOS(Z)/Z - CDY(0)=(CDSIN(Z)+CDCOS(Z)/Z)/Z - IF (N.LT.1) THEN - RETURN - ENDIF - CSJ(1)=(CSJ(0)-CDCOS(Z))/Z - IF (N.GE.2) THEN - CSA=CSJ(0) - CSB=CSJ(1) - M=MSTA1(A0,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(A0,N,15) - ENDIF - CF0=0.0D0 - CF1=1.0D0-100 - DO 15 K=M,0,-1 - CF=(2.0D0*K+3.0D0)*CF1/Z-CF0 - IF (K.LE.NM) CSJ(K)=CF - CF0=CF1 -15 CF1=CF - IF (CDABS(CSA).GT.CDABS(CSB)) CS=CSA/CF1 - IF (CDABS(CSA).LE.CDABS(CSB)) CS=CSB/CF0 - DO 20 K=0,NM -20 CSJ(K)=CS*CSJ(K) - ENDIF - DO 25 K=1,NM -25 CDJ(K)=CSJ(K-1)-(K+1.0D0)*CSJ(K)/Z - CSY(1)=(CSY(0)-CDSIN(Z))/Z - CDY(1)=(2.0D0*CDY(0)-CDCOS(Z))/Z - DO 30 K=2,NM - IF (CDABS(CSJ(K-1)).GT.CDABS(CSJ(K-2))) THEN - CSY(K)=(CSJ(K)*CSY(K-1)-1.0D0/(Z*Z))/CSJ(K-1) - ELSE - CSY(K)=(CSJ(K)*CSY(K-2)-(2.0D0*K-1.0D0)/Z**3)/CSJ(K-2) - ENDIF -30 CONTINUE - DO 35 K=2,NM -35 CDY(K)=CSY(K-1)-(K+1.0D0)*CSY(K)/Z - RETURN - END - - - INTEGER FUNCTION MSTA1(X,MP) -C -C =================================================== -C Purpose: Determine the starting point for backward -C recurrence such that the magnitude of -C Jn(x) at that point is about 10^(-MP) -C Input : x --- Argument of Jn(x) -C MP --- Value of magnitude -C Output: MSTA1 --- Starting point -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - A0=DABS(X) - N0=INT(1.1D0*A0)+1 - F0=ENVJ(N0,A0)-MP - N1=N0+5 - F1=ENVJ(N1,A0)-MP - DO 10 IT=1,20 - NN=N1-(N1-N0)/(1.0D0-F0/F1) - F=ENVJ(NN,A0)-MP - IF(ABS(NN-N1).LT.1) GO TO 20 - N0=N1 - F0=F1 - N1=NN - 10 F1=F - 20 MSTA1=NN - RETURN - END - - - INTEGER FUNCTION MSTA2(X,N,MP) -C -C =================================================== -C Purpose: Determine the starting point for backward -C recurrence such that all Jn(x) has MP -C significant digits -C Input : x --- Argument of Jn(x) -C n --- Order of Jn(x) -C MP --- Significant digit -C Output: MSTA2 --- Starting point -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - A0=DABS(X) - HMP=0.5D0*MP - EJN=ENVJ(N,A0) - IF (EJN.LE.HMP) THEN - OBJ=MP - N0=INT(1.1*A0)+1 - ELSE - OBJ=HMP+EJN - N0=N - ENDIF - F0=ENVJ(N0,A0)-OBJ - N1=N0+5 - F1=ENVJ(N1,A0)-OBJ - DO 10 IT=1,20 - NN=N1-(N1-N0)/(1.0D0-F0/F1) - F=ENVJ(NN,A0)-OBJ - IF (ABS(NN-N1).LT.1) GO TO 20 - N0=N1 - F0=F1 - N1=NN -10 F1=F -20 MSTA2=NN+10 - RETURN - END - - REAL*8 FUNCTION ENVJ(N,X) - DOUBLE PRECISION X - ENVJ=0.5D0*DLOG10(6.28D0*N)-N*DLOG10(1.36D0*X/N) - RETURN - END - -C ********************************** - - SUBROUTINE ITTJYB(X,TTJ,TTY) -C -C ========================================================== -C Purpose: Integrate [1-J0(t)]/t with respect to t from 0 -C to x, and Y0(t)/t with respect to t from x to ∞ -C Input : x --- Variable in the limits ( x ≥ 0 ) -C Output: TTJ --- Integration of [1-J0(t)]/t from 0 to x -C TTY --- Integration of Y0(t)/t from x to ∞ -C ========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - IF (X.EQ.0.0D0) THEN - TTJ=0.0D0 - TTY=-1.0D+300 - ELSE IF (X.LE.4.0D0) THEN - X1=X/4.0D0 - T=X1*X1 - TTJ=((((((.35817D-4*T-.639765D-3)*T+.7092535D-2)*T - & -.055544803D0)*T+.296292677D0)*T-.999999326D0) - & *T+1.999999936D0)*T - TTY=(((((((-.3546D-5*T+.76217D-4)*T-.1059499D-2)*T - & +.010787555D0)*T-.07810271D0)*T+.377255736D0) - & *T-1.114084491D0)*T+1.909859297D0)*T - E0=EL+DLOG(X/2.0D0) - TTY=PI/6.0D0+E0/PI*(2.0D0*TTJ-E0)-TTY - ELSE IF (X.LE.8.0D0) THEN - XT=X+.25D0*PI - T1=4.0D0/X - T=T1*T1 - F0=(((((.0145369D0*T-.0666297D0)*T+.1341551D0)*T - & -.1647797D0)*T+.1608874D0)*T-.2021547D0)*T - & +.7977506D0 - G0=((((((.0160672D0*T-.0759339D0)*T+.1576116D0)*T - & -.1960154D0)*T+.1797457D0)*T-.1702778D0)*T - & +.3235819D0)*T1 - TTJ=(F0*DCOS(XT)+G0*DSIN(XT))/(DSQRT(X)*X) - TTJ=TTJ+EL+DLOG(X/2.0D0) - TTY=(F0*DSIN(XT)-G0*DCOS(XT))/(DSQRT(X)*X) - ELSE - T=8.0D0/X - XT=X+.25D0*PI - F0=(((((.18118D-2*T-.91909D-2)*T+.017033D0)*T - & -.9394D-3)*T-.051445D0)*T-.11D-5)*T+.7978846D0 - G0=(((((-.23731D-2*T+.59842D-2)*T+.24437D-2)*T - & -.0233178D0)*T+.595D-4)*T+.1620695D0)*T - TTJ=(F0*DCOS(XT)+G0*DSIN(XT))/(DSQRT(X)*X) - & +EL+DLOG(X/2.0D0) - TTY=(F0*DSIN(XT)-G0*DCOS(XT))/(DSQRT(X)*X) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE ITTJYA(X,TTJ,TTY) -C -C ========================================================= -C Purpose: Integrate [1-J0(t)]/t with respect to t from 0 -C to x, and Y0(t)/t with respect to t from x to ∞ -C Input : x --- Variable in the limits ( x ≥ 0 ) -C Output: TTJ --- Integration of [1-J0(t)]/t from 0 to x -C TTY --- Integration of Y0(t)/t from x to ∞ -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - IF (X.EQ.0.0D0) THEN - TTJ=0.0D0 - TTY=-1.0D+300 - ELSE IF (X.LE.20.0D0) THEN - TTJ=1.0D0 - R=1.0D0 - DO 10 K=2,100 - R=-.25D0*R*(K-1.0D0)/(K*K*K)*X*X - TTJ=TTJ+R - IF (DABS(R).LT.DABS(TTJ)*1.0D-12) GO TO 15 -10 CONTINUE -15 TTJ=TTJ*.125D0*X*X - E0=.5D0*(PI*PI/6.0D0-EL*EL)-(.5D0*DLOG(X/2.0D0)+EL) - & *DLOG(X/2.0D0) - B1=EL+DLOG(X/2.0D0)-1.5D0 - RS=1.0D0 - R=-1.0D0 - DO 20 K=2,100 - R=-.25D0*R*(K-1.0D0)/(K*K*K)*X*X - RS=RS+1.0D0/K - R2=R*(RS+1.0D0/(2.0D0*K)-(EL+DLOG(X/2.0D0))) - B1=B1+R2 - IF (DABS(R2).LT.DABS(B1)*1.0D-12) GO TO 25 -20 CONTINUE -25 TTY=2.0D0/PI*(E0+.125D0*X*X*B1) - ELSE - A0=DSQRT(2.0D0/(PI*X)) - BJ0=0.0D0 - BY0=0.0D0 - BJ1=0.0D0 - DO 50 L=0,1 - VT=4.0D0*L*L - PX=1.0D0 - R=1.0D0 - DO 30 K=1,14 - R=-.0078125D0*R*(VT-(4.0D0*K-3.0D0)**2) - & /(X*K)*(VT-(4.0D0*K-1.0D0)**2) - & /((2.0D0*K-1.0D0)*X) - PX=PX+R - IF (DABS(R).LT.DABS(PX)*1.0D-12) GO TO 35 -30 CONTINUE -35 QX=1.0D0 - R=1.0D0 - DO 40 K=1,14 - R=-.0078125D0*R*(VT-(4.0D0*K-1.0D0)**2) - & /(X*K)*(VT-(4.0D0*K+1.0D0)**2) - & /(2.0D0*K+1.0D0)/X - QX=QX+R - IF (DABS(R).LT.DABS(QX)*1.0D-12) GO TO 45 -40 CONTINUE -45 QX=.125D0*(VT-1.0D0)/X*QX - XK=X-(.25D0+.5D0*L)*PI - BJ1=A0*(PX*DCOS(XK)-QX*DSIN(XK)) - BY1=A0*(PX*DSIN(XK)+QX*DCOS(XK)) - IF (L.EQ.0) THEN - BJ0=BJ1 - BY0=BY1 - ENDIF -50 CONTINUE - T=2.0D0/X - G0=1.0D0 - R0=1.0D0 - DO 55 K=1,10 - R0=-K*K*T*T*R0 -55 G0=G0+R0 - G1=1.0D0 - R1=1.0D0 - DO 60 K=1,10 - R1=-K*(K+1.0D0)*T*T*R1 -60 G1=G1+R1 - TTJ=2.0D0*G1*BJ0/(X*X)-G0*BJ1/X+EL+DLOG(X/2.0D0) - TTY=2.0D0*G1*BY0/(X*X)-G0*BY1/X - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE CJYLV(V,Z,CBJV,CDJV,CBYV,CDYV) -C -C =================================================== -C Purpose: Compute Bessel functions Jv(z) and Yv(z) -C and their derivatives with a complex -C argument and a large order -C Input: v --- Order of Jv(z) and Yv(z) -C z --- Complex argument -C Output: CBJV --- Jv(z) -C CDJV --- Jv'(z) -C CBYV --- Yv(z) -C CDYV --- Yv'(z) -C Routine called: -C CJK to compute the expansion coefficients -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CF(12),A(91) - KM=12 - CALL CJK(KM,A) - PI=3.141592653589793D0 - DO 30 L=1,0,-1 - V0=V-L - CWS=CDSQRT(1.0D0-(Z/V0)*(Z/V0)) - CETA=CWS+CDLOG(Z/V0/(1.0D0+CWS)) - CT=1.0D0/CWS - CT2=CT*CT - DO 15 K=1,KM - L0=K*(K+1)/2+1 - LF=L0+K - CF(K)=A(LF) - DO 10 I=LF-1,L0,-1 -10 CF(K)=CF(K)*CT2+A(I) -15 CF(K)=CF(K)*CT**K - VR=1.0D0/V0 - CSJ=(1.0D0,0.0D0) - DO 20 K=1,KM -20 CSJ=CSJ+CF(K)*VR**K - CBJV=CDSQRT(CT/(2.0D0*PI*V0))*CDEXP(V0*CETA)*CSJ - IF (L.EQ.1) CFJ=CBJV - CSY=(1.0D0,0.0D0) - DO 25 K=1,KM -25 CSY=CSY+(-1)**K*CF(K)*VR**K - CBYV=-CDSQRT(2.0D0*CT/(PI*V0))*CDEXP(-V0*CETA)*CSY - IF (L.EQ.1) CFY=CBYV -30 CONTINUE - CDJV=-V/Z*CBJV+CFJ - CDYV=-V/Z*CBYV+CFY - RETURN - END - - - -C ********************************** - - SUBROUTINE RMN2L(M,N,C,X,DF,KD,R2F,R2D,ID) -C -C ======================================================== -C Purpose: Compute prolate and oblate spheroidal radial -C functions of the second kind for given m, n, -C c and a large cx -C Routine called: -C SPHY for computing the spherical Bessel -C functions of the second kind -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION DF(200),SY(0:251),DY(0:251) - EPS=1.0D-14 - IP=1 - NM1=INT((N-M)/2) - IF (N-M.EQ.2*NM1) IP=0 - NM=25+NM1+INT(C) - REG=1.0D0 - IF (M+NM.GT.80) REG=1.0D-200 - NM2=2*NM+M - CX=C*X - CALL SPHY(NM2,CX,NM2,SY,DY) - R0=REG - DO 10 J=1,2*M+IP -10 R0=R0*J - R=R0 - SUC=R*DF(1) - SW=0.0D0 - DO 15 K=2,NM - R=R*(M+K-1.0)*(M+K+IP-1.5D0)/(K-1.0D0)/(K+IP-1.5D0) - SUC=SUC+R*DF(K) - IF (K.GT.NM1.AND.DABS(SUC-SW).LT.DABS(SUC)*EPS) GO TO 20 -15 SW=SUC -20 A0=(1.0D0-KD/(X*X))**(0.5D0*M)/SUC - R2F=0.0D0 - EPS1=0.0D0 - NP=0 - DO 50 K=1,NM - L=2*K+M-N-2+IP - LG=1 - IF (L.NE.4*INT(L/4)) LG=-1 - IF (K.EQ.1) THEN - R=R0 - ELSE - R=R*(M+K-1.0)*(M+K+IP-1.5D0)/(K-1.0D0)/(K+IP-1.5D0) - ENDIF - NP=M+2*K-2+IP - R2F=R2F+LG*R*(DF(K)*SY(NP)) - EPS1=DABS(R2F-SW) - IF (K.GT.NM1.AND.EPS1.LT.DABS(R2F)*EPS) GO TO 55 -50 SW=R2F -55 ID1=INT(LOG10(EPS1/DABS(R2F)+EPS)) - R2F=R2F*A0 - IF (NP.GE.NM2) THEN - ID=10 - RETURN - ENDIF - B0=KD*M/X**3.0D0/(1.0-KD/(X*X))*R2F - SUD=0.0D0 - EPS2=0.0D0 - DO 60 K=1,NM - L=2*K+M-N-2+IP - LG=1 - IF (L.NE.4*INT(L/4)) LG=-1 - IF (K.EQ.1) THEN - R=R0 - ELSE - R=R*(M+K-1.0)*(M+K+IP-1.5D0)/(K-1.0D0)/(K+IP-1.5D0) - ENDIF - NP=M+2*K-2+IP - SUD=SUD+LG*R*(DF(K)*DY(NP)) - EPS2=DABS(SUD-SW) - IF (K.GT.NM1.AND.EPS2.LT.DABS(SUD)*EPS) GO TO 65 -60 SW=SUD -65 R2D=B0+A0*C*SUD - ID2=INT(LOG10(EPS2/DABS(SUD)+EPS)) - ID=MAX(ID1,ID2) - RETURN - END - - - -C ********************************** - - SUBROUTINE PSI_SPEC(X,PS) -C -C ====================================== -C Purpose: Compute Psi function -C Input : x --- Argument of psi(x) -C Output: PS --- psi(x) -C ====================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - XA=DABS(X) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - S=0.0D0 - IF (X.EQ.INT(X).AND.X.LE.0.0) THEN - PS=1.0D+300 - RETURN - ELSE IF (XA.EQ.INT(XA)) THEN - N=XA - DO 10 K=1 ,N-1 -10 S=S+1.0D0/K - PS=-EL+S - ELSE IF (XA+.5.EQ.INT(XA+.5)) THEN - N=XA-.5 - DO 20 K=1,N -20 S=S+1.0/(2.0D0*K-1.0D0) - PS=-EL+2.0D0*S-1.386294361119891D0 - ELSE - IF (XA.LT.10.0) THEN - N=10-INT(XA) - DO 30 K=0,N-1 -30 S=S+1.0D0/(XA+K) - XA=XA+N - ENDIF - X2=1.0D0/(XA*XA) - A1=-.8333333333333D-01 - A2=.83333333333333333D-02 - A3=-.39682539682539683D-02 - A4=.41666666666666667D-02 - A5=-.75757575757575758D-02 - A6=.21092796092796093D-01 - A7=-.83333333333333333D-01 - A8=.4432598039215686D0 - PS=DLOG(XA)-.5D0/XA+X2*(((((((A8*X2+A7)*X2+ - & A6)*X2+A5)*X2+A4)*X2+A3)*X2+A2)*X2+A1) - PS=PS-S - ENDIF - IF (X.LT.0.0) PS=PS-PI*DCOS(PI*X)/DSIN(PI*X)-1.0D0/X - RETURN - END - -C ********************************** - - SUBROUTINE CVA2(KD,M,Q,A) -C -C ====================================================== -C Purpose: Calculate a specific characteristic value of -C Mathieu functions -C Input : m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions -C KD --- Case code -C KD=1 for cem(x,q) ( m = 0,2,4,...) -C KD=2 for cem(x,q) ( m = 1,3,5,...) -C KD=3 for sem(x,q) ( m = 1,3,5,...) -C KD=4 for sem(x,q) ( m = 2,4,6,...) -C Output: A --- Characteristic value -C Routines called: -C (1) REFINE for finding accurate characteristic -C value using an iteration method -C (2) CV0 for finding initial characteristic -C values using polynomial approximation -C (3) CVQM for computing initial characteristic -C values for q ≤ 3*m -C (3) CVQL for computing initial characteristic -C values for q ≥ m*m -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - IF (M.LE.12.OR.Q.LE.3.0*M.OR.Q.GT.M*M) THEN - CALL CV0(KD,M,Q,A) - IF (Q.NE.0.0D0) CALL REFINE(KD,M,Q,A) - ELSE - NDIV=10 - DELTA=(M-3.0)*M/NDIV - IF ((Q-3.0*M).LE.(M*M-Q)) THEN -5 NN=INT((Q-3.0*M)/DELTA)+1 - DELTA=(Q-3.0*M)/NN - Q1=2.0*M - CALL CVQM(M,Q1,A1) - Q2=3.0*M - CALL CVQM(M,Q2,A2) - QQ=3.0*M - DO 10 I=1,NN - QQ=QQ+DELTA - A=(A1*Q2-A2*Q1+(A2-A1)*QQ)/(Q2-Q1) - IFLAG=1 - IF (I.EQ.NN) IFLAG=-1 - CALL REFINE(KD,M,QQ,A) - Q1=Q2 - Q2=QQ - A1=A2 - A2=A -10 CONTINUE - IF (IFLAG.EQ.-10) THEN - NDIV=NDIV*2 - DELTA=(M-3.0)*M/NDIV - GO TO 5 - ENDIF - ELSE -15 NN=INT((M*M-Q)/DELTA)+1 - DELTA=(M*M-Q)/NN - Q1=M*(M-1.0) - CALL CVQL(KD,M,Q1,A1) - Q2=M*M - CALL CVQL(KD,M,Q2,A2) - QQ=M*M - DO 20 I=1,NN - QQ=QQ-DELTA - A=(A1*Q2-A2*Q1+(A2-A1)*QQ)/(Q2-Q1) - IFLAG=1 - IF (I.EQ.NN) IFLAG=-1 - CALL REFINE(KD,M,QQ,A) - Q1=Q2 - Q2=QQ - A1=A2 - A2=A -20 CONTINUE - IF (IFLAG.EQ.-10) THEN - NDIV=NDIV*2 - DELTA=(M-3.0)*M/NDIV - GO TO 15 - ENDIF - ENDIF - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE LPMNS(M,N,X,PM,PD) -C -C ======================================================== -C Purpose: Compute associated Legendre functions Pmn(x) -C and Pmn'(x) for a given order -C Input : x --- Argument of Pmn(x) -C m --- Order of Pmn(x), m = 0,1,2,...,n -C n --- Degree of Pmn(x), n = 0,1,2,...,N -C Output: PM(n) --- Pmn(x) -C PD(n) --- Pmn'(x) -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION PM(0:N),PD(0:N) - DO 10 K=0,N - PM(K)=0.0D0 -10 PD(K)=0.0D0 - IF (DABS(X).EQ.1.0D0) THEN - DO 15 K=0,N - IF (M.EQ.0) THEN - PM(K)=1.0D0 - PD(K)=0.5D0*K*(K+1.0) - IF (X.LT.0.0) THEN - PM(K)=(-1)**K*PM(K) - PD(K)=(-1)**(K+1)*PD(K) - ENDIF - ELSE IF (M.EQ.1) THEN - PD(K)=1.0D+300 - ELSE IF (M.EQ.2) THEN - PD(K)=-0.25D0*(K+2.0)*(K+1.0)*K*(K-1.0) - IF (X.LT.0.0) PD(K)=(-1)**(K+1)*PD(K) - ENDIF -15 CONTINUE - RETURN - ENDIF - X0=DABS(1.0D0-X*X) - PM0=1.0D0 - PMK=PM0 - DO 20 K=1,M - PMK=(2.0D0*K-1.0D0)*DSQRT(X0)*PM0 -20 PM0=PMK - PM1=(2.0D0*M+1.0D0)*X*PM0 - PM(M)=PMK - PM(M+1)=PM1 - DO 25 K=M+2,N - PM2=((2.0D0*K-1.0D0)*X*PM1-(K+M-1.0D0)*PMK)/(K-M) - PM(K)=PM2 - PMK=PM1 -25 PM1=PM2 - PD(0)=((1.0D0-M)*PM(1)-X*PM(0))/(X*X-1.0) - DO 30 K=1,N -30 PD(K)=(K*X*PM(K)-(K+M)*PM(K-1))/(X*X-1.0D0) - DO 35 K=1,N - PM(K)=(-1)**M*PM(K) -35 PD(K)=(-1)**M*PD(K) - RETURN - END - -C ********************************** - - SUBROUTINE CERF(Z,CER,CDER) -C -C ========================================================== -C Purpose: Compute complex Error function erf(z) & erf'(z) -C Input: z --- Complex argument of erf(z) -C x --- Real part of z -C y --- Imaginary part of z -C Output: CER --- erf(z) -C CDER --- erf'(z) -C ========================================================== - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - COMPLEX *16 Z,CER,CDER - EPS=1.0D-12 - PI=3.141592653589793D0 - X=DBLE(Z) - Y=DIMAG(Z) - X2=X*X - IF (X.LE.3.5D0) THEN - ER=1.0D0 - R=1.0D0 - W=0.0D0 - DO 10 K=1,100 - R=R*X2/(K+0.5D0) - ER=ER+R - IF (DABS(ER-W).LE.EPS*DABS(ER)) GO TO 15 -10 W=ER -15 C0=2.0D0/DSQRT(PI)*X*DEXP(-X2) - ER0=C0*ER - ELSE - ER=1.0D0 - R=1.0D0 - DO 20 K=1,12 - R=-R*(K-0.5D0)/X2 -20 ER=ER+R - C0=DEXP(-X2)/(X*DSQRT(PI)) - ER0=1.0D0-C0*ER - ENDIF - IF (Y.EQ.0.0D0) THEN - ERR=ER0 - ERI=0.0D0 - ELSE - CS=DCOS(2.0D0*X*Y) - SS=DSIN(2.0D0*X*Y) - ER1=DEXP(-X2)*(1.0D0-CS)/(2.0D0*PI*X) - EI1=DEXP(-X2)*SS/(2.0D0*PI*X) - ER2=0.0D0 - W1=0.0D0 - DO 25 N=1,100 - ER2=ER2+DEXP(-.25D0*N*N)/(N*N+4.0D0*X2)*(2.0D0*X - & -2.0D0*X*DCOSH(N*Y)*CS+N*DSINH(N*Y)*SS) - IF (DABS((ER2-W1)/ER2).LT.EPS) GO TO 30 -25 W1=ER2 -30 C0=2.0D0*DEXP(-X2)/PI - ERR=ER0+ER1+C0*ER2 - EI2=0.0D0 - W2=0.0D0 - DO 35 N=1,100 - EI2=EI2+DEXP(-.25D0*N*N)/(N*N+4.0D0*X2)*(2.0D0*X - & *DCOSH(N*Y)*SS+N*DSINH(N*Y)*CS) - IF (DABS((EI2-W2)/EI2).LT.EPS) GO TO 40 -35 W2=EI2 -40 ERI=EI1+C0*EI2 - ENDIF - CER=CMPLX(ERR,ERI) - CDER=2.0D0/DSQRT(PI)*CDEXP(-Z*Z) - RETURN - END - -C ********************************** - - SUBROUTINE RSWFP(M,N,C,X,CV,KF,R1F,R1D,R2F,R2D) -C -C ============================================================== -C Purpose: Compute prolate spheriodal radial functions of the -C first and second kinds, and their derivatives -C Input : m --- Mode parameter, m = 0,1,2,... -C n --- Mode parameter, n = m,m+1,m+2,... -C c --- Spheroidal parameter -C x --- Argument of radial function ( x > 1.0 ) -C cv --- Characteristic value -C KF --- Function code -C KF=1 for the first kind -C KF=2 for the second kind -C KF=3 for both the first and second kinds -C Output: R1F --- Radial function of the first kind -C R1D --- Derivative of the radial function of -C the first kind -C R2F --- Radial function of the second kind -C R2D --- Derivative of the radial function of -C the second kind -C Routines called: -C (1) SDMN for computing expansion coefficients dk -C (2) RMN1 for computing prolate and oblate radial -C functions of the first kind -C (3) RMN2L for computing prolate and oblate radial -C functions of the second kind for a large argument -C (4) RMN2SP for computing the prolate radial function -C of the second kind for a small argument -C ============================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION DF(200) - KD=1 - CALL SDMN(M,N,C,CV,KD,DF) - IF (KF.NE.2) THEN - CALL RMN1(M,N,C,X,DF,KD,R1F,R1D) - ENDIF - IF (KF.GT.1) THEN - CALL RMN2L(M,N,C,X,DF,KD,R2F,R2D,ID) - IF (ID.GT.-8) THEN - CALL RMN2SP(M,N,C,X,CV,DF,KD,R2F,R2D) - ENDIF - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE JYNDD(N,X,BJN,DJN,FJN,BYN,DYN,FYN) -C -C =========================================================== -C Purpose: Compute Bessel functions Jn(x) and Yn(x), and -C their first and second derivatives -C Input: x --- Argument of Jn(x) and Yn(x) ( x > 0 ) -C n --- Order of Jn(x) and Yn(x) -C Output: BJN --- Jn(x) -C DJN --- Jn'(x) -C FJN --- Jn"(x) -C BYN --- Yn(x) -C DYN --- Yn'(x) -C FYN --- Yn"(x) -C Routines called: -C JYNBH to compute Jn and Yn -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BJ(2),BY(2) - CALL JYNBH(N+1,N,X,NM,BJ,BY) -C Compute derivatives by differentiation formulas - BJN=BJ(1) - BYN=BY(1) - DJN=-BJ(2)+N*BJ(1)/X - DYN=-BY(2)+N*BY(1)/X - FJN=(N*N/(X*X)-1.0D0)*BJN-DJN/X - FYN=(N*N/(X*X)-1.0D0)*BYN-DYN/X - RETURN - END - - -C ********************************** - - SUBROUTINE GAM0 (X,GA) -C -C ================================================ -C Purpose: Compute gamma function Г(x) -C Input : x --- Argument of Г(x) ( |x| ≤ 1 ) -C Output: GA --- Г(x) -C ================================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION G(25) - DATA G/1.0D0,0.5772156649015329D0, - & -0.6558780715202538D0, -0.420026350340952D-1, - & 0.1665386113822915D0, -.421977345555443D-1, - & -.96219715278770D-2, .72189432466630D-2, - & -.11651675918591D-2, -.2152416741149D-3, - & .1280502823882D-3, -.201348547807D-4, - & -.12504934821D-5, .11330272320D-5, - & -.2056338417D-6, .61160950D-8, - & .50020075D-8, -.11812746D-8, - & .1043427D-9, .77823D-11, - & -.36968D-11, .51D-12, - & -.206D-13, -.54D-14, .14D-14/ - GR=(25) - DO 20 K=24,1,-1 -20 GR=GR*X+G(K) - GA=1.0D0/(GR*X) - RETURN - END - - -C ********************************** - - SUBROUTINE CISIB(X,CI,SI) -C -C ============================================= -C Purpose: Compute cosine and sine integrals -C Si(x) and Ci(x) ( x ≥ 0 ) -C Input : x --- Argument of Ci(x) and Si(x) -C Output: CI --- Ci(x) -C SI --- Si(x) -C ============================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - X2=X*X - IF (X.EQ.0.0) THEN - CI=-1.0D+300 - SI=0.0D0 - ELSE IF (X.LE.1.0D0) THEN - CI=((((-3.0D-8*X2+3.10D-6)*X2-2.3148D-4) - & *X2+1.041667D-2)*X2-0.25)*X2+0.577215665D0+LOG(X) - SI=((((3.1D-7*X2-2.834D-5)*X2+1.66667D-003) - & *X2-5.555556D-002)*X2+1.0)*X - ELSE - FX=((((X2+38.027264D0)*X2+265.187033D0)*X2 - & +335.67732D0)*X2+38.102495D0)/((((X2 - & +40.021433D0)*X2+322.624911D0)*X2 - & +570.23628D0)*X2+157.105423D0) - GX=((((X2+42.242855D0)*X2+302.757865D0)*X2 - & +352.018498D0)*X2+21.821899D0)/((((X2 - & +48.196927D0)*X2+482.485984D0)*X2 - & +1114.978885D0)*X2+449.690326D0)/X - CI=FX*SIN(X)/X-GX*COS(X)/X - SI=1.570796327D0-FX*COS(X)/X-GX*SIN(X)/X - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE EULERA(N,EN) -C -C ====================================== -C Purpose: Compute Euler number En -C Input : n --- Serial number -C Output: EN(n) --- En -C ====================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION EN(0:N) - EN(0)=1.0D0 - DO 30 M=1,N/2 - S=1.0D0 - DO 20 K=1,M-1 - R=1.0D0 - DO 10 J=1,2*K -10 R=R*(2.0D0*M-2.0D0*K+J)/J -20 S=S+R*EN(2*K) -30 EN(2*M)=-S - RETURN - END - -C ********************************** - - SUBROUTINE REFINE(KD,M,Q,A) -C -C ===================================================== -C Purpose: calculate the accurate characteristic value -C by the secant method -C Input : m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions -C A --- Initial characteristic value -C Output: A --- Refineed characteristic value -C Routine called: CVF for computing the value of F for -C characteristic equation -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - EPS=1.0D-14 - MJ=10+M - CA=A - DELTA=0.0D0 - X0=A - CALL CVF(KD,M,Q,X0,MJ,F0) - X1=1.002*A - CALL CVF(KD,M,Q,X1,MJ,F1) - DO 10 IT=1,100 - MJ=MJ+1 - X=X1-(X1-X0)/(1.0D0-F0/F1) - CALL CVF(KD,M,Q,X,MJ,F) - IF (ABS(1.0-X1/X).LT.EPS.OR.F.EQ.0.0) GO TO 15 - X0=X1 - F0=F1 - X1=X -10 F1=F -15 A=X - RETURN - END - - - -C ********************************** - - SUBROUTINE CISIA(X,CI,SI) -C -C ============================================= -C Purpose: Compute cosine and sine integrals -C Si(x) and Ci(x) ( x ≥ 0 ) -C Input : x --- Argument of Ci(x) and Si(x) -C Output: CI --- Ci(x) -C SI --- Si(x) -C ============================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BJ(101) - P2=1.570796326794897D0 - EL=.5772156649015329D0 - EPS=1.0D-15 - X2=X*X - IF (X.EQ.0.0D0) THEN - CI=-1.0D+300 - SI=0.0D0 - ELSE IF (X.LE.16.0D0) THEN - XR=-.25D0*X2 - CI=EL+DLOG(X)+XR - DO 10 K=2,40 - XR=-.5D0*XR*(K-1)/(K*K*(2*K-1))*X2 - CI=CI+XR - IF (DABS(XR).LT.DABS(CI)*EPS) GO TO 15 -10 CONTINUE -15 XR=X - SI=X - DO 20 K=1,40 - XR=-.5D0*XR*(2*K-1)/K/(4*K*K+4*K+1)*X2 - SI=SI+XR - IF (DABS(XR).LT.DABS(SI)*EPS) RETURN -20 CONTINUE - ELSE IF (X.LE.32.0D0) THEN - M=INT(47.2+.82*X) - XA1=0.0D0 - XA0=1.0D-100 - DO 25 K=M,1,-1 - XA=4.0D0*K*XA0/X-XA1 - BJ(K)=XA - XA1=XA0 -25 XA0=XA - XS=BJ(1) - DO 30 K=3,M,2 -30 XS=XS+2.0D0*BJ(K) - BJ(1)=BJ(1)/XS - DO 35 K=2,M -35 BJ(K)=BJ(K)/XS - XR=1.0D0 - XG1=BJ(1) - DO 40 K=2,M - XR=.25D0*XR*(2.0*K-3.0)**2/((K-1.0)*(2.0*K-1.0)**2)*X -40 XG1=XG1+BJ(K)*XR - XR=1.0D0 - XG2=BJ(1) - DO 45 K=2,M - XR=.25D0*XR*(2.0*K-5.0)**2/((K-1.0)*(2.0*K-3.0)**2)*X -45 XG2=XG2+BJ(K)*XR - XCS=DCOS(X/2.0D0) - XSS=DSIN(X/2.0D0) - CI=EL+DLOG(X)-X*XSS*XG1+2*XCS*XG2-2*XCS*XCS - SI=X*XCS*XG1+2*XSS*XG2-DSIN(X) - ELSE - XR=1.0D0 - XF=1.0D0 - DO 50 K=1,9 - XR=-2.0D0*XR*K*(2*K-1)/X2 -50 XF=XF+XR - XR=1.0D0/X - XG=XR - DO 55 K=1,8 - XR=-2.0D0*XR*(2*K+1)*K/X2 -55 XG=XG+XR - CI=XF*DSIN(X)/X-XG*DCOS(X)/X - SI=P2-XF*DCOS(X)/X-XG*DSIN(X)/X - ENDIF - RETURN - END - - -C ********************************** - - SUBROUTINE ITSL0(X,TL0) -C -C =========================================================== -C Purpose: Evaluate the integral of modified Struve function -C L0(t) with respect to t from 0 to x -C Input : x --- Upper limit ( x ≥ 0 ) -C Output: TL0 --- Integration of L0(t) from 0 to x -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(18) - PI=3.141592653589793D0 - R=1.0D0 - IF (X.LE.20.0) THEN - S=0.5D0 - DO 10 K=1,100 - RD=1.0D0 - IF (K.EQ.1) RD=0.5D0 - R=R*RD*K/(K+1.0D0)*(X/(2.0D0*K+1.0D0))**2 - S=S+R - IF (DABS(R/S).LT.1.0D-12) GO TO 15 -10 CONTINUE -15 TL0=2.0D0/PI*X*X*S - ELSE - S=1.0D0 - DO 20 K=1,10 - R=R*K/(K+1.0D0)*((2.0D0*K+1.0D0)/X)**2 - S=S+R - IF (DABS(R/S).LT.1.0D-12) GO TO 25 -20 CONTINUE -25 EL=.57721566490153D0 - S0=-S/(PI*X*X)+2.0D0/PI*(DLOG(2.0D0*X)+EL) - A0=1.0D0 - A1=5.0D0/8.0D0 - A(1)=A1 - DO 30 K=1,10 - AF=((1.5D0*(K+.50D0)*(K+5.0D0/6.0D0)*A1-.5D0* - & (K+.5D0)**2*(K-.5D0)*A0))/(K+1.0D0) - A(K+1)=AF - A0=A1 -30 A1=AF - TI=1.0D0 - R=1.0D0 - DO 35 K=1,11 - R=R/X -35 TI=TI+A(K)*R - TL0=TI/DSQRT(2*PI*X)*DEXP(X)+S0 - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE STVL1(X,SL1) -C -C ================================================ -C Purpose: Compute modified Struve function L1(x) -C Input : x --- Argument of L1(x) ( x ≥ 0 ) -C Output: SL1 --- L1(x) -C ================================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - R=1.0D0 - IF (X.LE.20.0D0) THEN - S=0.0D0 - DO 10 K=1,60 - R=R*X*X/(4.0D0*K*K-1.0D0) - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 15 -10 CONTINUE -15 SL1=2.0D0/PI*S - ELSE - S=1.0D0 - KM=INT(.50*X) - IF (X.GT.50) KM=25 - DO 20 K=1,KM - R=R*(2.0D0*K+3.0D0)*(2.0D0*K+1.0D0)/(X*X) - S=S+R - IF (DABS(R/S).LT.1.0D-12) GO TO 25 -20 CONTINUE -25 SL1=2.0D0/PI*(-1.0D0+1.0D0/(X*X)+3.0D0*S/X**4) - A1=DEXP(X)/DSQRT(2.0D0*PI*X) - R=1.0D0 - BI1=1.0D0 - DO 30 K=1,16 - R=-0.125D0*R*(4.0D0-(2.0D0*K-1.0D0)**2)/(K*X) - BI1=BI1+R - IF (DABS(R/BI1).LT.1.0D-12) GO TO 35 -30 CONTINUE -35 SL1=SL1+A1*BI1 - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE CLQN(N,X,Y,CQN,CQD) -C -C ================================================== -C Purpose: Compute the Legendre functions Qn(z) and -C their derivatives Qn'(z) for a complex -C argument -C Input : x --- Real part of z -C y --- Imaginary part of z -C n --- Degree of Qn(z), n = 0,1,2,... -C Output: CQN(n) --- Qn(z) -C CQD(n) --- Qn'(z) -C ================================================== -C - IMPLICIT DOUBLE PRECISION (X,Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CQN(0:N),CQD(0:N) - Z=CMPLX(X,Y) - IF (Z.EQ.1.0D0) THEN - DO 10 K=0,N - CQN(K)=(1.0D+300,0.0D0) -10 CQD(K)=(1.0D+300,0.0D0) - RETURN - ENDIF - LS=1 - IF (CDABS(Z).GT.1.0D0) LS=-1 - CQ0=0.5D0*CDLOG(LS*(1.0D0+Z)/(1.0D0-Z)) - CQ1=Z*CQ0-1.0D0 - CQN(0)=CQ0 - CQN(1)=CQ1 - IF (CDABS(Z).LT.1.0001D0) THEN - CQF0=CQ0 - CQF1=CQ1 - DO 15 K=2,N - CQF2=((2.0D0*K-1.0D0)*Z*CQF1-(K-1.0D0)*CQF0)/K - CQN(K)=CQF2 - CQF0=CQF1 -15 CQF1=CQF2 - ELSE - IF (CDABS(Z).GT.1.1D0) THEN - KM=40+N - ELSE - KM=(40+N)*INT(-1.0-1.8*LOG(CDABS(Z-1.0))) - ENDIF - CQF2=0.0D0 - CQF1=1.0D0 - DO 20 K=KM,0,-1 - CQF0=((2*K+3.0D0)*Z*CQF1-(K+2.0D0)*CQF2)/(K+1.0D0) - IF (K.LE.N) CQN(K)=CQF0 - CQF2=CQF1 -20 CQF1=CQF0 - DO 25 K=0,N -25 CQN(K)=CQN(K)*CQ0/CQF0 - ENDIF - CQD(0)=(CQN(1)-Z*CQN(0))/(Z*Z-1.0D0) - DO 30 K=1,N -30 CQD(K)=(K*Z*CQN(K)-K*CQN(K-1))/(Z*Z-1.0D0) - RETURN - END - -C ********************************** - - SUBROUTINE STVL0(X,SL0) -C -C ================================================ -C Purpose: Compute modified Struve function L0(x) -C Input : x --- Argument of L0(x) ( x ≥ 0 ) -C Output: SL0 --- L0(x) -C ================================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - S=1.0D0 - R=1.0D0 - IF (X.LE.20.0D0) THEN - A0=2.0D0*X/PI - DO 10 K=1,60 - R=R*(X/(2.0D0*K+1.0D0))**2 - S=S+R - IF (DABS(R/S).LT.1.0D-12) GO TO 15 -10 CONTINUE -15 SL0=A0*S - ELSE - KM=INT(.5*(X+1.0)) - IF (X.GE.50.0) KM=25 - DO 20 K=1,KM - R=R*((2.0D0*K-1.0D0)/X)**2 - S=S+R - IF (DABS(R/S).LT.1.0D-12) GO TO 25 -20 CONTINUE -25 A1=DEXP(X)/DSQRT(2.0D0*PI*X) - R=1.0D0 - BI0=1.0D0 - DO 30 K=1,16 - R=0.125D0*R*(2.0D0*K-1.0D0)**2/(K*X) - BI0=BI0+R - IF (DABS(R/BI0).LT.1.0D-12) GO TO 35 -30 CONTINUE -35 BI0=A1*BI0 - SL0=-2.0D0/(PI*X)*S+BI0 - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE AIRYZO(NT,KF,XA,XB,XC,XD) -C -C ======================================================== -C Purpose: Compute the first NT zeros of Airy functions -C Ai(x) and Ai'(x), a and a', and the associated -C values of Ai(a') and Ai'(a); and the first NT -C zeros of Airy functions Bi(x) and Bi'(x), b and -C b', and the associated values of Bi(b') and -C Bi'(b) -C Input : NT --- Total number of zeros -C KF --- Function code -C KF=1 for Ai(x) and Ai'(x) -C KF=2 for Bi(x) and Bi'(x) -C Output: XA(m) --- a, the m-th zero of Ai(x) or -C b, the m-th zero of Bi(x) -C XB(m) --- a', the m-th zero of Ai'(x) or -C b', the m-th zero of Bi'(x) -C XC(m) --- Ai(a') or Bi(b') -C XD(m) --- Ai'(a) or Bi'(b) -C ( m --- Serial number of zeros ) -C Routine called: AIRYB for computing Airy functions and -C their derivatives -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION XA(NT),XB(NT),XC(NT),XD(NT) - PI=3.141592653589793D0 - RT0=0.0D0 - RT=0.0D0 - DO 15 I=1,NT - IF (KF.EQ.1) THEN - U=3.0*PI*(4.0*I-1)/8.0D0 - U1=1/(U*U) - RT0=-(U*U)**(1.0/3.0)*((((-15.5902*U1+.929844)*U1 - & -.138889)*U1+.10416667D0)*U1+1.0D0) - ELSE IF (KF.EQ.2) THEN - IF (I.EQ.1) THEN - RT0=-1.17371 - ELSE - U=3.0*PI*(4.0*I-3.0)/8.0 - U1=1.0D0/(U*U) - RT0=-(U*U)**(1.0/3.0)*((((-15.5902*U1+.929844)*U1 - & -.138889)*U1+.10416667)*U1+1.0) - ENDIF - ENDIF -10 X=RT0 - CALL AIRYB(X,AI,BI,AD,BD) - IF (KF.EQ.1) RT=RT0-AI/AD - IF (KF.EQ.2) RT=RT0-BI/BD - IF (DABS((RT-RT0)/RT).GT.1.D-9) THEN - RT0=RT - GOTO 10 - ELSE - XA(I)=RT - IF (KF.EQ.1) XD(I)=AD - IF (KF.EQ.2) XD(I)=BD - ENDIF -15 CONTINUE - DO 25 I=1,NT - IF (KF.EQ.1) THEN - IF (I.EQ.1) THEN - RT0=-1.01879 - ELSE - U=3.0*PI*(4.0*I-3.0)/8.0 - U1=1/(U*U) - RT0=-(U*U)**(1.0/3.0)*((((15.0168*U1-.873954) - & *U1+.121528)*U1-.145833D0)*U1+1.0D0) - ENDIF - ELSE IF (KF.EQ.2) THEN - IF (I.EQ.1) THEN - RT0=-2.29444 - ELSE - U=3.0*PI*(4.0*I-1.0)/8.0 - U1=1.0/(U*U) - RT0=-(U*U)**(1.0/3.0)*((((15.0168*U1-.873954) - & *U1+.121528)*U1-.145833)*U1+1.0) - ENDIF - ENDIF -20 X=RT0 - CALL AIRYB(X,AI,BI,AD,BD) - IF (KF.EQ.1) RT=RT0-AD/(AI*X) - IF (KF.EQ.2) RT=RT0-BD/(BI*X) - IF (DABS((RT-RT0)/RT).GT.1.0D-9) THEN - RT0=RT - GOTO 20 - ELSE - XB(I)=RT - IF (KF.EQ.1) XC(I)=AI - IF (KF.EQ.2) XC(I)=BI - ENDIF -25 CONTINUE - RETURN - END - - - -C ********************************** - - SUBROUTINE ERROR(X,ERR) -C -C ========================================= -C Purpose: Compute error function erf(x) -C Input: x --- Argument of erf(x) -C Output: ERR --- erf(x) -C ========================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - EPS=1.0D-15 - PI=3.141592653589793D0 - X2=X*X - IF (DABS(X).LT.3.5D0) THEN - ER=1.0D0 - R=1.0D0 - DO 10 K=1,50 - R=R*X2/(K+0.5D0) - ER=ER+R - IF (DABS(R).LE.DABS(ER)*EPS) GO TO 15 -10 CONTINUE -15 C0=2.0D0/DSQRT(PI)*X*DEXP(-X2) - ERR=C0*ER - ELSE - ER=1.0D0 - R=1.0D0 - DO 20 K=1,12 - R=-R*(K-0.5D0)/X2 -20 ER=ER+R - C0=DEXP(-X2)/(DABS(X)*DSQRT(PI)) - ERR=1.0D0-C0*ER - IF (X.LT.0.0) ERR=-ERR - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE CERROR(Z,CER) -C -C ==================================================== -C Purpose: Compute error function erf(z) for a complex -C argument (z=x+iy) -C Input : z --- Complex argument -C Output: CER --- erf(z) -C ==================================================== -C - IMPLICIT COMPLEX *16 (C,Z) - DOUBLE PRECISION A0,PI - A0=CDABS(Z) - C0=CDEXP(-Z*Z) - PI=3.141592653589793D0 - Z1=Z - IF (DBLE(Z).LT.0.0) THEN - Z1=-Z - ENDIF -C -C Cutoff radius R = 4.36; determined by balancing rounding error -C and asymptotic expansion error, see below. -C -C The resulting maximum global accuracy expected is around 1e-8 -C - IF (A0.LE.4.36D0) THEN -C -C Rounding error in the Taylor expansion is roughly -C -C ~ R*R * EPSILON * R**(2 R**2) / (2 R**2 Gamma(R**2 + 1/2)) -C - CS=Z1 - CR=Z1 - DO 10 K=1,120 - CR=CR*Z1*Z1/(K+0.5D0) - CS=CS+CR - IF (CDABS(CR/CS).LT.1.0D-15) GO TO 15 -10 CONTINUE -15 CER=2.0D0*C0*CS/DSQRT(PI) - ELSE - CL=1.0D0/Z1 - CR=CL -C -C Asymptotic series; maximum K must be at most ~ R^2. -C -C The maximum accuracy obtainable from this expansion is roughly -C -C ~ Gamma(2R**2 + 2) / ( -C (2 R**2)**(R**2 + 1/2) Gamma(R**2 + 3/2) 2**(R**2 + 1/2)) -C - DO 20 K=1,20 - CR=-CR*(K-0.5D0)/(Z1*Z1) - CL=CL+CR - IF (CDABS(CR/CL).LT.1.0D-15) GO TO 25 -20 CONTINUE -25 CER=1.0D0-C0*CL/DSQRT(PI) - ENDIF - IF (DBLE(Z).LT.0.0) THEN - CER=-CER - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE EULERB(N,EN) -C -C ====================================== -C Purpose: Compute Euler number En -C Input : n --- Serial number -C Output: EN(n) --- En -C ====================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION EN(0:N) - HPI=2.0D0/3.141592653589793D0 - EN(0)=1.0D0 - EN(2)=-1.0D0 - R1=-4.0D0*HPI**3 - DO 20 M=4,N,2 - R1=-R1*(M-1)*M*HPI*HPI - R2=1.0D0 - ISGN=1.0D0 - DO 10 K=3,1000,2 - ISGN=-ISGN - S=(1.0D0/K)**(M+1) - R2=R2+ISGN*S - IF (S.LT.1.0D-15) GOTO 20 -10 CONTINUE -20 EN(M)=R1*R2 - RETURN - END - -C ********************************** - - SUBROUTINE CVA1(KD,M,Q,CV) -C -C ============================================================ -C Purpose: Compute a sequence of characteristic values of -C Mathieu functions -C Input : M --- Maximum order of Mathieu functions -C q --- Parameter of Mathieu functions -C KD --- Case code -C KD=1 for cem(x,q) ( m = 0,2,4,… ) -C KD=2 for cem(x,q) ( m = 1,3,5,… ) -C KD=3 for sem(x,q) ( m = 1,3,5,… ) -C KD=4 for sem(x,q) ( m = 2,4,6,… ) -C Output: CV(I) --- Characteristic values; I = 1,2,3,... -C For KD=1, CV(1), CV(2), CV(3),..., correspond to -C the characteristic values of cem for m = 0,2,4,... -C For KD=2, CV(1), CV(2), CV(3),..., correspond to -C the characteristic values of cem for m = 1,3,5,... -C For KD=3, CV(1), CV(2), CV(3),..., correspond to -C the characteristic values of sem for m = 1,3,5,... -C For KD=4, CV(1), CV(2), CV(3),..., correspond to -C the characteristic values of sem for m = 0,2,4,... -C ============================================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION G(200),H(200),D(500),E(500),F(500),CV(200) - EPS=1.0D-14 - ICM=INT(M/2)+1 - IF (KD.EQ.4) ICM=M/2 - IF (Q.EQ.0.0D0) THEN - IF (KD.EQ.1) THEN - DO 10 IC=1,ICM -10 CV(IC)=4.0D0*(IC-1.0D0)**2 - ELSE IF (KD.NE.4) THEN - DO 15 IC=1,ICM -15 CV(IC)=(2.0D0*IC-1.0D0)**2 - ELSE - DO 20 IC=1,ICM -20 CV(IC)=4.0D0*IC*IC - ENDIF - ELSE - NM=INT(10+1.5*M+0.5*Q) - E(1)=0.0D0 - F(1)=0.0D0 - IF (KD.EQ.1) THEN - D(1)=0.0D0 - DO 25 I=2,NM - D(I)=4.0D0*(I-1.0D0)**2 - E(I)=Q -25 F(I)=Q*Q - E(2)=DSQRT(2.0D0)*Q - F(2)=2.0D0*Q*Q - ELSE IF (KD.NE.4) THEN - D(1)=1.0D0+(-1)**KD*Q - DO 30 I=2,NM - D(I)=(2.0D0*I-1.0D0)**2 - E(I)=Q -30 F(I)=Q*Q - ELSE - D(1)=4.0D0 - DO 35 I=2,NM - D(I)=4.0D0*I*I - E(I)=Q -35 F(I)=Q*Q - ENDIF - XA=D(NM)+DABS(E(NM)) - XB=D(NM)-DABS(E(NM)) - NM1=NM-1 - DO 40 I=1,NM1 - T=DABS(E(I))+DABS(E(I+1)) - T1=D(I)+T - IF (XA.LT.T1) XA=T1 - T1=D(I)-T - IF (T1.LT.XB) XB=T1 -40 CONTINUE - DO 45 I=1,ICM - G(I)=XA -45 H(I)=XB - DO 75 K=1,ICM - DO 50 K1=K,ICM - IF (G(K1).LT.G(K)) THEN - G(K)=G(K1) - GO TO 55 - ENDIF -50 CONTINUE -55 IF (K.NE.1.AND.H(K).LT.H(K-1)) H(K)=H(K-1) -60 X1=(G(K)+H(K))/2.0D0 - CV(K)=X1 - IF (DABS((G(K)-H(K))/X1).LT.EPS) GO TO 70 - J=0 - S=1.0D0 - DO 65 I=1,NM - IF (S.EQ.0.0D0) S=S+1.0D-30 - T=F(I)/S - S=D(I)-T-X1 - IF (S.LT.0.0) J=J+1 -65 CONTINUE - IF (J.LT.K) THEN - H(K)=X1 - ELSE - G(K)=X1 - IF (J.GE.ICM) THEN - G(ICM)=X1 - ELSE - IF (H(J+1).LT.X1) H(J+1)=X1 - IF (X1.LT.G(J)) G(J)=X1 - ENDIF - ENDIF - GO TO 60 -70 CV(K)=X1 -75 CONTINUE - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE ITTIKB(X,TTI,TTK) -C -C ========================================================= -C Purpose: Integrate [I0(t)-1]/t with respect to t from 0 -C to x, and K0(t)/t with respect to t from x to ∞ -C Input : x --- Variable in the limits ( x ≥ 0 ) -C Output: TTI --- Integration of [I0(t)-1]/t from 0 to x -C TTK --- Integration of K0(t)/t from x to ∞ -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - IF (X.EQ.0.0D0) THEN - TTI=0.0D0 - ELSE IF (X.LE.5.0D0) THEN - X1=X/5.0D0 - T=X1*X1 - TTI=(((((((.1263D-3*T+.96442D-3)*T+.968217D-2)*T - & +.06615507D0)*T+.33116853D0)*T+1.13027241D0) - & *T+2.44140746D0)*T+3.12499991D0)*T - ELSE - T=5.0D0/X - TTI=(((((((((2.1945464D0*T-3.5195009D0)*T - & -11.9094395D0)*T+40.394734D0)*T-48.0524115D0) - & *T+28.1221478D0)*T-8.6556013D0)*T+1.4780044D0) - & *T-.0493843D0)*T+.1332055D0)*T+.3989314D0 - TTI=TTI*DEXP(X)/(DSQRT(X)*X) - ENDIF - IF (X.EQ.0.0D0) THEN - TTK=1.0D+300 - ELSE IF (X.LE.2.0D0) THEN - T1=X/2.0D0 - T=T1*T1 - TTK=(((((.77D-6*T+.1544D-4)*T+.48077D-3)*T - & +.925821D-2)*T+.10937537D0)*T+.74999993D0)*T - E0=EL+DLOG(X/2.0D0) - TTK=PI*PI/24.0D0+E0*(.5D0*E0+TTI)-TTK - ELSE IF (X.LE.4.0D0) THEN - T=2.0D0/X - TTK=(((.06084D0*T-.280367D0)*T+.590944D0)*T - & -.850013D0)*T+1.234684D0 - TTK=TTK*DEXP(-X)/(DSQRT(X)*X) - ELSE - T=4.0D0/X - TTK=(((((.02724D0*T-.1110396D0)*T+.2060126D0)*T - & -.2621446D0)*T+.3219184D0)*T-.5091339D0)*T - & +1.2533141D0 - TTK=TTK*DEXP(-X)/(DSQRT(X)*X) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE LQNB(N,X,QN,QD) -C -C ==================================================== -C Purpose: Compute Legendre functions Qn(x) & Qn'(x) -C Input : x --- Argument of Qn(x) -C n --- Degree of Qn(x) ( n = 0,1,2,…) -C Output: QN(n) --- Qn(x) -C QD(n) --- Qn'(x) -C ==================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION QN(0:N),QD(0:N) - EPS=1.0D-14 - IF (DABS(X).EQ.1.0D0) THEN - DO 10 K=0,N - QN(K)=1.0D+300 -10 QD(K)=1.0D+300 - RETURN - ENDIF - IF (X.LE.1.021D0) THEN - X2=DABS((1.0D0+X)/(1.0D0-X)) - Q0=0.5D0*DLOG(X2) - Q1=X*Q0-1.0D0 - QN(0)=Q0 - QN(1)=Q1 - QD(0)=1.0D0/(1.0D0-X*X) - QD(1)=QN(0)+X*QD(0) - DO 15 K=2,N - QF=((2.0D0*K-1.0D0)*X*Q1-(K-1.0D0)*Q0)/K - QN(K)=QF - QD(K)=(QN(K-1)-X*QF)*K/(1.0D0-X*X) - Q0=Q1 -15 Q1=QF - ELSE - QC1=0.0D0 - QC2=1.0D0/X - DO 20 J=1,N - QC2=QC2*J/((2.0*J+1.0D0)*X) - IF (J.EQ.N-1) QC1=QC2 -20 CONTINUE - DO 35 L=0,1 - NL=N+L - QF=1.0D0 - QR=1.0D0 - DO 25 K=1,500 - QR=QR*(0.5D0*NL+K-1.0D0)*(0.5D0*(NL-1)+K) - & /((NL+K-0.5D0)*K*X*X) - QF=QF+QR - IF (DABS(QR/QF).LT.EPS) GO TO 30 -25 CONTINUE -30 IF (L.EQ.0) THEN - QN(N-1)=QF*QC1 - ELSE - QN(N)=QF*QC2 - ENDIF -35 CONTINUE - QF2=QN(N) - QF1=QN(N-1) - DO 40 K=N,2,-1 - QF0=((2*K-1.0D0)*X*QF1-K*QF2)/(K-1.0D0) - QN(K-2)=QF0 - QF2=QF1 -40 QF1=QF0 - QD(0)=1.0D0/(1.0D0-X*X) - DO 45 K=1,N -45 QD(K)=K*(QN(K-1)-X*QN(K))/(1.0D0-X*X) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE CJK(KM,A) -C -C ======================================================== -C Purpose: Compute the expansion coefficients for the -C asymptotic expansion of Bessel functions -C with large orders -C Input : Km --- Maximum k -C Output: A(L) --- Cj(k) where j and k are related to L -C by L=j+1+[k*(k+1)]/2; j,k=0,1,...,Km -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(*) - A(1)=1.0D0 - F0=1.0D0 - G0=1.0D0 - DO 10 K=0,KM-1 - L1=(K+1)*(K+2)/2+1 - L2=(K+1)*(K+2)/2+K+2 - F=(0.5D0*K+0.125D0/(K+1))*F0 - G=-(1.5D0*K+0.625D0/(3.0*(K+1.0D0)))*G0 - A(L1)=F - A(L2)=G - F0=F -10 G0=G - DO 15 K=1,KM-1 - DO 15 J=1,K - L3=K*(K+1)/2+J+1 - L4=(K+1)*(K+2)/2+J+1 - A(L4)=(J+0.5D0*K+0.125D0/(2.0*J+K+1.0))*A(L3) - & -(J+0.5D0*K-1.0+0.625D0/(2.0*J+K+1.0))*A(L3-1) -15 CONTINUE - RETURN - END - - -C ********************************** - - SUBROUTINE ITTIKA(X,TTI,TTK) -C -C ========================================================= -C Purpose: Integrate [I0(t)-1]/t with respect to t from 0 -C to x, and K0(t)/t with respect to t from x to ∞ -C Input : x --- Variable in the limits ( x ≥ 0 ) -C Output: TTI --- Integration of [I0(t)-1]/t from 0 to x -C TTK --- Integration of K0(t)/t from x to ∞ -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION C(8) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - DATA C/1.625D0,4.1328125D0, - & 1.45380859375D+1,6.553353881835D+1, - & 3.6066157150269D+2,2.3448727161884D+3, - & 1.7588273098916D+4,1.4950639538279D+5/ - IF (X.EQ.0.0D0) THEN - TTI=0.0D0 - TTK=1.0D+300 - RETURN - ENDIF - IF (X.LT.40.0D0) THEN - TTI=1.0D0 - R=1.0D0 - DO 10 K=2,50 - R=.25D0*R*(K-1.0D0)/(K*K*K)*X*X - TTI=TTI+R - IF (DABS(R/TTI).LT.1.0D-12) GO TO 15 -10 CONTINUE -15 TTI=TTI*.125D0*X*X - ELSE - TTI=1.0D0 - R=1.0D0 - DO 20 K=1,8 - R=R/X -20 TTI=TTI+C(K)*R - RC=X*DSQRT(2.0D0*PI*X) - TTI=TTI*DEXP(X)/RC - ENDIF - IF (X.LE.12.0D0) THEN - E0=(.5D0*DLOG(X/2.0D0)+EL)*DLOG(X/2.0D0) - & +PI*PI/24.0D0+.5D0*EL*EL - B1=1.5D0-(EL+DLOG(X/2.0D0)) - RS=1.0D0 - R=1.0D0 - DO 25 K=2,50 - R=.25D0*R*(K-1.0D0)/(K*K*K)*X*X - RS=RS+1.0D0/K - R2=R*(RS+1.0D0/(2.0D0*K)-(EL+DLOG(X/2.0D0))) - B1=B1+R2 - IF (DABS(R2/B1).LT.1.0D-12) GO TO 30 -25 CONTINUE -30 TTK=E0-.125D0*X*X*B1 - ELSE - TTK=1.0D0 - R=1.0D0 - DO 35 K=1,8 - R=-R/X -35 TTK=TTK+C(K)*R - RC=X*DSQRT(2.0D0/PI*X) - TTK=TTK*DEXP(-X)/RC - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE LAMV(V,X,VM,VL,DL) -C -C ========================================================= -C Purpose: Compute lambda function with arbitrary order v, -C and their derivative -C Input : x --- Argument of lambda function -C v --- Order of lambda function -C Output: VL(n) --- Lambda function of order n+v0 -C DL(n) --- Derivative of lambda function -C VM --- Highest order computed -C Routines called: -C (1) MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C (2) GAM0 for computing gamma function (|x| ≤ 1) -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION VL(0:*),DL(0:*) - PI=3.141592653589793D0 - RP2=0.63661977236758D0 - X=DABS(X) - X2=X*X - N=INT(V) - V0=V-N - VM=V - IF (X.LE.12.0D0) THEN - DO 25 K=0,N - VK=V0+K - BK=1.0D0 - R=1.0D0 - DO 10 I=1,50 - R=-0.25D0*R*X2/(I*(I+VK)) - BK=BK+R - IF (DABS(R).LT.DABS(BK)*1.0D-15) GO TO 15 -10 CONTINUE -15 VL(K)=BK - UK=1.0D0 - R=1.0D0 - DO 20 I=1,50 - R=-0.25D0*R*X2/(I*(I+VK+1.0D0)) - UK=UK+R - IF (DABS(R).LT.DABS(UK)*1.0D-15) GO TO 25 -20 CONTINUE -25 DL(K)=-0.5D0*X/(VK+1.0D0)*UK - RETURN - ENDIF - K0=11 - IF (X.GE.35.0D0) K0=10 - IF (X.GE.50.0D0) K0=8 - BJV0=0.0D0 - BJV1=0.0D0 - DO 40 J=0,1 - VV=4.0D0*(J+V0)*(J+V0) - PX=1.0D0 - RP=1.0D0 - DO 30 K=1,K0 - RP=-0.78125D-2*RP*(VV-(4.0*K-3.0)**2.0)*(VV- - & (4.0*K-1.0)**2.0)/(K*(2.0*K-1.0)*X2) -30 PX=PX+RP - QX=1.0D0 - RQ=1.0D0 - DO 35 K=1,K0 - RQ=-0.78125D-2*RQ*(VV-(4.0*K-1.0)**2.0)*(VV- - & (4.0*K+1.0)**2.0)/(K*(2.0*K+1.0)*X2) -35 QX=QX+RQ - QX=0.125D0*(VV-1.0D0)*QX/X - XK=X-(0.5D0*(J+V0)+0.25D0)*PI - A0=DSQRT(RP2/X) - CK=DCOS(XK) - SK=DSIN(XK) - IF (J.EQ.0) BJV0=A0*(PX*CK-QX*SK) - IF (J.EQ.1) BJV1=A0*(PX*CK-QX*SK) -40 CONTINUE - IF (V0.EQ.0.0D0) THEN - GA=1.0D0 - ELSE - CALL GAM0(V0,GA) - GA=V0*GA - ENDIF - FAC=(2.0D0/X)**V0*GA - VL(0)=BJV0 - DL(0)=-BJV1+V0/X*BJV0 - VL(1)=BJV1 - DL(1)=BJV0-(1.0D0+V0)/X*BJV1 - R0=2.0D0*(1.0D0+V0)/X - IF (N.LE.1) THEN - VL(0)=FAC*VL(0) - DL(0)=FAC*DL(0)-V0/X*VL(0) - VL(1)=FAC*R0*VL(1) - DL(1)=FAC*R0*DL(1)-(1.0D0+V0)/X*VL(1) - RETURN - ENDIF - IF (N.GE.2.AND.N.LE.INT(0.9*X)) THEN - F0=BJV0 - F1=BJV1 - DO 45 K=2,N - F=2.0D0*(K+V0-1.0D0)/X*F1-F0 - F0=F1 - F1=F -45 VL(K)=F - ELSE IF (N.GE.2) THEN - M=MSTA1(X,200) - IF (M.LT.N) THEN - N=M - ELSE - M=MSTA2(X,N,15) - ENDIF - F=0.0D0 - F2=0.0D0 - F1=1.0D-100 - DO 50 K=M,0,-1 - F=2.0D0*(V0+K+1.0D0)/X*F1-F2 - IF (K.LE.N) VL(K)=F - F2=F1 -50 F1=F - CS=0.0D0 - IF (DABS(BJV0).GT.DABS(BJV1)) CS=BJV0/F - ELSE CS=BJV1/F2 - DO 55 K=0,N -55 VL(K)=CS*VL(K) - ENDIF - VL(0)=FAC*VL(0) - DO 65 J=1,N - RC=FAC*R0 - VL(J)=RC*VL(J) - DL(J-1)=-0.5D0*X/(J+V0)*VL(J) -65 R0=2.0D0*(J+V0+1)/X*R0 - DL(N)=2.0D0*(V0+N)*(VL(N-1)-VL(N))/X - VM=N+V0 - RETURN - END - - - -C ********************************** - - SUBROUTINE CHGUIT(A,B,X,HU,ID) -C -C ====================================================== -C Purpose: Compute hypergeometric function U(a,b,x) by -C using Gaussian-Legendre integration (n=60) -C Input : a --- Parameter ( a > 0 ) -C b --- Parameter -C x --- Argument ( x > 0 ) -C Output: HU --- U(a,b,z) -C ID --- Estimated number of significant digits -C Routine called: GAMMA2 for computing Г(x) -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION T(30),W(30) - DATA T/ .259597723012478D-01, .778093339495366D-01, - & .129449135396945D+00, .180739964873425D+00, - & .231543551376029D+00, .281722937423262D+00, - & .331142848268448D+00, .379670056576798D+00, - & .427173741583078D+00, .473525841761707D+00, - & .518601400058570D+00, .562278900753945D+00, - & .604440597048510D+00, .644972828489477D+00, - & .683766327381356D+00, .720716513355730D+00, - & .755723775306586D+00, .788693739932264D+00, - & .819537526162146D+00, .848171984785930D+00, - & .874519922646898D+00, .898510310810046D+00, - & .920078476177628D+00, .939166276116423D+00, - & .955722255839996D+00, .969701788765053D+00, - & .981067201752598D+00, .989787895222222D+00, - & .995840525118838D+00, .999210123227436D+00/ - DATA W/ .519078776312206D-01, .517679431749102D-01, - & .514884515009810D-01, .510701560698557D-01, - & .505141845325094D-01, .498220356905502D-01, - & .489955754557568D-01, .480370318199712D-01, - & .469489888489122D-01, .457343797161145D-01, - & .443964787957872D-01, .429388928359356D-01, - & .413655512355848D-01, .396806954523808D-01, - & .378888675692434D-01, .359948980510845D-01, - & .340038927249464D-01, .319212190192963D-01, - & .297524915007890D-01, .275035567499248D-01, - & .251804776215213D-01, .227895169439978D-01, - & .203371207294572D-01, .178299010142074D-01, - & .152746185967848D-01, .126781664768159D-01, - & .100475571822880D-01, .738993116334531D-02, - & .471272992695363D-02, .202681196887362D-02/ - ID=7 - A1=A-1.0D0 - B1=B-A-1.0D0 - C=12.0D0/X - HU0=0.0D0 - DO 20 M=10,100,5 - HU1=0.0D0 - G=0.5D0*C/M - D=G - DO 15 J=1,M - S=0.0D0 - DO 10 K=1,30 - T1=D+G*T(K) - T2=D-G*T(K) - F1=DEXP(-X*T1)*T1**A1*(1.0D0+T1)**B1 - F2=DEXP(-X*T2)*T2**A1*(1.0D0+T2)**B1 - S=S+W(K)*(F1+F2) -10 CONTINUE - HU1=HU1+S*G - D=D+2.0D0*G -15 CONTINUE - IF (DABS(1.0D0-HU0/HU1).LT.1.0D-7) GO TO 25 - HU0=HU1 -20 CONTINUE -25 CALL GAMMA2(A,GA) - HU1=HU1/GA - DO 40 M=2,10,2 - HU2=0.0D0 - G=0.5D0/M - D=G - DO 35 J=1,M - S=0.0D0 - DO 30 K=1,30 - T1=D+G*T(K) - T2=D-G*T(K) - T3=C/(1.0D0-T1) - T4=C/(1.0D0-T2) - F1=T3*T3/C*DEXP(-X*T3)*T3**A1*(1.0D0+T3)**B1 - F2=T4*T4/C*DEXP(-X*T4)*T4**A1*(1.0D0+T4)**B1 - S=S+W(K)*(F1+F2) -30 CONTINUE - HU2=HU2+S*G - D=D+2.0D0*G -35 CONTINUE - IF (DABS(1.0D0-HU0/HU2).LT.1.0D-7) GO TO 45 - HU0=HU2 -40 CONTINUE -45 CALL GAMMA2(A,GA) - HU2=HU2/GA - HU=HU1+HU2 - RETURN - END - - - -C ********************************** - - SUBROUTINE KMN(M,N,C,CV,KD,DF,DN,CK1,CK2) -C -C =================================================== -C Purpose: Compute the expansion coefficients of the -C prolate and oblate spheroidal functions -C and joining factors -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION U(200),V(200),W(200),DF(200),DN(200), - & TP(200),RK(200) - NM=25+INT(0.5*(N-M)+C) - NN=NM+M - CS=C*C*KD - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - K=0 - DO 10 I=1,NN+3 - IF (IP.EQ.0) K=-2*(I-1) - IF (IP.EQ.1) K=-(2*I-3) - GK0=2.0D0*M+K - GK1=(M+K)*(M+K+1.0D0) - GK2=2.0D0*(M+K)-1.0D0 - GK3=2.0D0*(M+K)+3.0D0 - U(I)=GK0*(GK0-1.0D0)*CS/(GK2*(GK2+2.0D0)) - V(I)=GK1-CV+(2.0D0*(GK1-M*M)-1.0D0)*CS/(GK2*GK3) -10 W(I)=(K+1.0D0)*(K+2.0D0)*CS/((GK2+2.0D0)*GK3) - DO 20 K=1,M - T=V(M+1) - DO 15 L=0,M-K-1 -15 T=V(M-L)-W(M-L+1)*U(M-L)/T -20 RK(K)=-U(K)/T - R=1.0D0 - DO 25 K=1,M - R=R*RK(K) -25 DN(K)=DF(1)*R - TP(NN)=V(NN+1) - DO 30 K=NN-1,M+1,-1 - TP(K)=V(K+1)-W(K+2)*U(K+1)/TP(K+1) - IF (K.GT.M+1) RK(K)=-U(K)/TP(K) -30 CONTINUE - IF (M.EQ.0) DNP=DF(1) - IF (M.NE.0) DNP=DN(M) - DN(M+1)=(-1)**IP*DNP*CS/((2.0*M-1.0)*(2.0*M+1.0-4.0*IP) - & *TP(M+1)) - DO 35 K=M+2,NN -35 DN(K)=RK(K)*DN(K-1) - R1=1.0D0 - DO 40 J=1,(N+M+IP)/2 -40 R1=R1*(J+0.5D0*(N+M+IP)) - NM1=(N-M)/2 - R=1.0D0 - DO 45 J=1,2*M+IP -45 R=R*J - SU0=R*DF(1) - SW=0.0D0 - DO 50 K=2,NM - R=R*(M+K-1.0)*(M+K+IP-1.5D0)/(K-1.0D0)/(K+IP-1.5D0) - SU0=SU0+R*DF(K) - IF (K.GT.NM1.AND.DABS((SU0-SW)/SU0).LT.1.0D-14) GO TO 55 -50 SW=SU0 -55 IF (KD.EQ.1) GOTO 70 - R2=1.0D0 - DO 60 J=1,M -60 R2=2.0D0*C*R2*J - R3=1.0D0 - DO 65 J=1,(N-M-IP)/2 -65 R3=R3*J - SA0=(2.0*(M+IP)+1.0)*R1/(2.0**N*C**IP*R2*R3*DF(1)) - CK1=SA0*SU0 - IF (KD.EQ.-1) RETURN -70 R4=1.0D0 - DO 75 J=1,(N-M-IP)/2 -75 R4=4.0D0*R4*J - R5=1.0D0 - DO 80 J=1,M -80 R5=R5*(J+M)/C - G0=DN(M) - IF (M.EQ.0) G0=DF(1) - SB0=(IP+1.0)*C**(IP+1)/(2.0*IP*(M-2.0)+1.0)/(2.0*M-1.0) - CK2=(-1)**IP*SB0*R4*R5*G0/R1*SU0 - RETURN - END - - - -C ********************************** - - SUBROUTINE LAGZO(N,X,W) -C -C ========================================================= -C Purpose : Compute the zeros of Laguerre polynomial Ln(x) -C in the interval [0,∞], and the corresponding -C weighting coefficients for Gauss-Laguerre -C integration -C Input : n --- Order of the Laguerre polynomial -C X(n) --- Zeros of the Laguerre polynomial -C W(n) --- Corresponding weighting coefficients -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(N),W(N) - HN=1.0D0/N - PF=0.0D0 - PD=0.0D0 - DO 35 NR=1,N - Z=HN - IF (NR.GT.1) Z=X(NR-1)+HN*NR**1.27 - IT=0 -10 IT=IT+1 - Z0=Z - P=1.0D0 - DO 15 I=1,NR-1 -15 P=P*(Z-X(I)) - F0=1.0D0 - F1=1.0D0-Z - DO 20 K=2,N - PF=((2.0D0*K-1.0D0-Z)*F1-(K-1.0D0)*F0)/K - PD=K/Z*(PF-F1) - F0=F1 -20 F1=PF - FD=PF/P - Q=0.0D0 - DO 30 I=1,NR-1 - WP=1.0D0 - DO 25 J=1,NR-1 - IF (J.EQ.I) GO TO 25 - WP=WP*(Z-X(J)) -25 CONTINUE - Q=Q+WP -30 CONTINUE - GD=(PD-Q*FD)/P - Z=Z-FD/GD - IF (IT.LE.40.AND.DABS((Z-Z0)/Z).GT.1.0D-15) GO TO 10 - X(NR)=Z - W(NR)=1.0D0/(Z*PD*PD) -35 CONTINUE - RETURN - END - -C ********************************** - - SUBROUTINE VVLA(VA,X,PV) -C -C =================================================== -C Purpose: Compute parabolic cylinder function Vv(x) -C for large argument -C Input: x --- Argument -C va --- Order -C Output: PV --- Vv(x) -C Routines called: -C (1) DVLA for computing Dv(x) for large |x| -C (2) GAMMA2 for computing Г(x) -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - EPS=1.0D-12 - QE=DEXP(0.25*X*X) - A0=DABS(X)**(-VA-1.0D0)*DSQRT(2.0D0/PI)*QE - R=1.0D0 - PV=1.0D0 - DO 10 K=1,18 - R=0.5D0*R*(2.0*K+VA-1.0)*(2.0*K+VA)/(K*X*X) - PV=PV+R - IF (DABS(R/PV).LT.EPS) GO TO 15 -10 CONTINUE -15 PV=A0*PV - IF (X.LT.0.0D0) THEN - X1=-X - CALL DVLA(VA,X1,PDL) - CALL GAMMA2(-VA,GL) - DSL=DSIN(PI*VA)*DSIN(PI*VA) - PV=DSL*GL/PI*PDL-DCOS(PI*VA)*PV - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE CJYVA(V,Z,VM,CBJ,CDJ,CBY,CDY) -C -C =========================================================== -C Purpose: Compute Bessel functions Jv(z), Yv(z) and their -C derivatives for a complex argument -C Input : z --- Complex argument -C v --- Order of Jv(z) and Yv(z) -C ( v = n+v0, n = 0,1,2,..., 0 ≤ v0 < 1 ) -C Output: CBJ(n) --- Jn+v0(z) -C CDJ(n) --- Jn+v0'(z) -C CBY(n) --- Yn+v0(z) -C CDY(n) --- Yn+v0'(z) -C VM --- Highest order computed -C Routines called: -C (1) GAMMA2 for computing the gamma function -C (2) MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,G,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBJ(0:*),CDJ(0:*),CBY(0:*),CDY(0:*) - PI=3.141592653589793D0 - RP2=.63661977236758D0 - CI=(0.0D0,1.0D0) - A0=CDABS(Z) - Z1=Z - Z2=Z*Z - N=INT(V) - V0=V-N - PV0=PI*V0 - PV1=PI*(1.0D0+V0) - IF (A0.LT.1.0D-100) THEN - DO 10 K=0,N - CBJ(K)=(0.0D0,0.0D0) - CDJ(K)=(0.0D0,0.0D0) - CBY(K)=-(1.0D+300,0.0D0) -10 CDY(K)=(1.0D+300,0.0D0) - IF (V0.EQ.0.0) THEN - CBJ(0)=(1.0D0,0.0D0) - CDJ(1)=(0.5D0,0.0D0) - ELSE - CDJ(0)=(1.0D+300,0.0D0) - ENDIF - VM=V - RETURN - ENDIF - LB0=0.0D0 - IF (DBLE(Z).LT.0.0) Z1=-Z - IF (A0.LE.12.0) THEN - DO 25 L=0,1 - VL=V0+L - CJVL=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 15 K=1,40 - CR=-0.25D0*CR*Z2/(K*(K+VL)) - CJVL=CJVL+CR - IF (CDABS(CR).LT.CDABS(CJVL)*1.0D-15) GO TO 20 -15 CONTINUE -20 VG=1.0D0+VL - CALL GAMMA2(VG,GA) - CA=(0.5D0*Z1)**VL/GA - IF (L.EQ.0) CJV0=CJVL*CA - IF (L.EQ.1) CJV1=CJVL*CA -25 CONTINUE - ELSE - K0=11 - IF (A0.GE.35.0) K0=10 - IF (A0.GE.50.0) K0=8 - DO 40 J=0,1 - VV=4.0D0*(J+V0)*(J+V0) - CPZ=(1.0D0,0.0D0) - CRP=(1.0D0,0.0D0) - DO 30 K=1,K0 - CRP=-0.78125D-2*CRP*(VV-(4.0*K-3.0)**2.0)*(VV- - & (4.0*K-1.0)**2.0)/(K*(2.0*K-1.0)*Z2) -30 CPZ=CPZ+CRP - CQZ=(1.0D0,0.0D0) - CRQ=(1.0D0,0.0D0) - DO 35 K=1,K0 - CRQ=-0.78125D-2*CRQ*(VV-(4.0*K-1.0)**2.0)*(VV- - & (4.0*K+1.0)**2.0)/(K*(2.0*K+1.0)*Z2) -35 CQZ=CQZ+CRQ - CQZ=0.125D0*(VV-1.0)*CQZ/Z1 - ZK=Z1-(0.5D0*(J+V0)+0.25D0)*PI - CA0=CDSQRT(RP2/Z1) - CCK=CDCOS(ZK) - CSK=CDSIN(ZK) - IF (J.EQ.0) THEN - CJV0=CA0*(CPZ*CCK-CQZ*CSK) - CYV0=CA0*(CPZ*CSK+CQZ*CCK) - ELSE IF (J.EQ.1) THEN - CJV1=CA0*(CPZ*CCK-CQZ*CSK) - CYV1=CA0*(CPZ*CSK+CQZ*CCK) - ENDIF -40 CONTINUE - ENDIF - IF (A0.LE.12.0) THEN - IF (V0.NE.0.0) THEN - DO 55 L=0,1 - VL=V0+L - CJVL=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 45 K=1,40 - CR=-0.25D0*CR*Z2/(K*(K-VL)) - CJVL=CJVL+CR - IF (CDABS(CR).LT.CDABS(CJVL)*1.0D-15) GO TO 50 -45 CONTINUE -50 VG=1.0D0-VL - CALL GAMMA2(VG,GB) - CB=(2.0D0/Z1)**VL/GB - IF (L.EQ.0) CJU0=CJVL*CB - IF (L.EQ.1) CJU1=CJVL*CB -55 CONTINUE - CYV0=(CJV0*DCOS(PV0)-CJU0)/DSIN(PV0) - CYV1=(CJV1*DCOS(PV1)-CJU1)/DSIN(PV1) - ELSE - CEC=CDLOG(Z1/2.0D0)+.5772156649015329D0 - CS0=(0.0D0,0.0D0) - W0=0.0D0 - CR0=(1.0D0,0.0D0) - DO 60 K=1,30 - W0=W0+1.0D0/K - CR0=-0.25D0*CR0/(K*K)*Z2 -60 CS0=CS0+CR0*W0 - CYV0=RP2*(CEC*CJV0-CS0) - CS1=(1.0D0,0.0D0) - W1=0.0D0 - CR1=(1.0D0,0.0D0) - DO 65 K=1,30 - W1=W1+1.0D0/K - CR1=-0.25D0*CR1/(K*(K+1))*Z2 -65 CS1=CS1+CR1*(2.0D0*W1+1.0D0/(K+1.0D0)) - CYV1=RP2*(CEC*CJV1-1.0D0/Z1-0.25D0*Z1*CS1) - ENDIF - ENDIF - IF (DBLE(Z).LT.0.0D0) THEN - CFAC0=CDEXP(PV0*CI) - CFAC1=CDEXP(PV1*CI) - IF (DIMAG(Z).LT.0.0D0) THEN - CYV0=CFAC0*CYV0-2.0D0*CI*DCOS(PV0)*CJV0 - CYV1=CFAC1*CYV1-2.0D0*CI*DCOS(PV1)*CJV1 - CJV0=CJV0/CFAC0 - CJV1=CJV1/CFAC1 - ELSE IF (DIMAG(Z).GT.0.0D0) THEN - CYV0=CYV0/CFAC0+2.0D0*CI*DCOS(PV0)*CJV0 - CYV1=CYV1/CFAC1+2.0D0*CI*DCOS(PV1)*CJV1 - CJV0=CFAC0*CJV0 - CJV1=CFAC1*CJV1 - ENDIF - ENDIF - CBJ(0)=CJV0 - CBJ(1)=CJV1 - IF (N.GE.2.AND.N.LE.INT(0.25*A0)) THEN - CF0=CJV0 - CF1=CJV1 - DO 70 K=2,N - CF=2.0D0*(K+V0-1.0D0)/Z*CF1-CF0 - CBJ(K)=CF - CF0=CF1 -70 CF1=CF - ELSE IF (N.GE.2) THEN - M=MSTA1(A0,200) - IF (M.LT.N) THEN - N=M - ELSE - M=MSTA2(A0,N,15) - ENDIF - CF2=(0.0D0,0.0D0) - CF1=(1.0D-100,0.0D0) - DO 75 K=M,0,-1 - CF=2.0D0*(V0+K+1.0D0)/Z*CF1-CF2 - IF (K.LE.N) CBJ(K)=CF - CF2=CF1 -75 CF1=CF - IF (CDABS(CJV0).GT.CDABS(CJV1)) CS=CJV0/CF - IF (CDABS(CJV0).LE.CDABS(CJV1)) CS=CJV1/CF2 - DO 80 K=0,N -80 CBJ(K)=CS*CBJ(K) - ENDIF - CDJ(0)=V0/Z*CBJ(0)-CBJ(1) - DO 85 K=1,N -85 CDJ(K)=-(K+V0)/Z*CBJ(K)+CBJ(K-1) - CBY(0)=CYV0 - CBY(1)=CYV1 - YA0=CDABS(CYV0) - LB=0 - CG0=CYV0 - CG1=CYV1 - DO 90 K=2,N - CYK=2.0D0*(V0+K-1.0D0)/Z*CG1-CG0 - IF (CDABS(CYK).GT.1.0D+290) GO TO 90 - YAK=CDABS(CYK) - YA1=CDABS(CG0) - IF (YAK.LT.YA0.AND.YAK.LT.YA1) LB=K - CBY(K)=CYK - CG0=CG1 - CG1=CYK -90 CONTINUE - IF (LB.LE.4.OR.DIMAG(Z).EQ.0.0D0) GO TO 125 -95 IF (LB.EQ.LB0) GO TO 125 - CH2=(1.0D0,0.0D0) - CH1=(0.0D0,0.0D0) - LB0=LB - DO 100 K=LB,1,-1 - CH0=2.0D0*(K+V0)/Z*CH1-CH2 - CH2=CH1 -100 CH1=CH0 - CP12=CH0 - CP22=CH2 - CH2=(0.0D0,0.0D0) - CH1=(1.0D0,0.0D0) - DO 105 K=LB,1,-1 - CH0=2.0D0*(K+V0)/Z*CH1-CH2 - CH2=CH1 -105 CH1=CH0 - CP11=CH0 - CP21=CH2 - IF (LB.EQ.N) CBJ(LB+1)=2.0D0*(LB+V0)/Z*CBJ(LB)-CBJ(LB-1) - IF (CDABS(CBJ(0)).GT.CDABS(CBJ(1))) THEN - CBY(LB+1)=(CBJ(LB+1)*CYV0-2.0D0*CP11/(PI*Z))/CBJ(0) - CBY(LB)=(CBJ(LB)*CYV0+2.0D0*CP12/(PI*Z))/CBJ(0) - ELSE - CBY(LB+1)=(CBJ(LB+1)*CYV1-2.0D0*CP21/(PI*Z))/CBJ(1) - CBY(LB)=(CBJ(LB)*CYV1+2.0D0*CP22/(PI*Z))/CBJ(1) - ENDIF - CYL2=CBY(LB+1) - CYL1=CBY(LB) - DO 110 K=LB-1,0,-1 - CYLK=2.0D0*(K+V0+1.0D0)/Z*CYL1-CYL2 - CBY(K)=CYLK - CYL2=CYL1 -110 CYL1=CYLK - CYL1=CBY(LB) - CYL2=CBY(LB+1) - DO 115 K=LB+1,N-1 - CYLK=2.0D0*(K+V0)/Z*CYL2-CYL1 - CBY(K+1)=CYLK - CYL1=CYL2 -115 CYL2=CYLK - DO 120 K=2,N - WA=CDABS(CBY(K)) - IF (WA.LT.CDABS(CBY(K-1))) LB=K -120 CONTINUE - GO TO 95 -125 CDY(0)=V0/Z*CBY(0)-CBY(1) - DO 130 K=1,N -130 CDY(K)=CBY(K-1)-(K+V0)/Z*CBY(K) - VM=N+V0 - RETURN - END - - - -C ********************************** - - SUBROUTINE CJYVB(V,Z,VM,CBJ,CDJ,CBY,CDY) -C -C =========================================================== -C Purpose: Compute Bessel functions Jv(z), Yv(z) and their -C derivatives for a complex argument -C Input : z --- Complex argument -C v --- Order of Jv(z) and Yv(z) -C ( v = n+v0, n = 0,1,2,..., 0 ≤ v0 < 1 ) -C Output: CBJ(n) --- Jn+v0(z) -C CDJ(n) --- Jn+v0'(z) -C CBY(n) --- Yn+v0(z) -C CDY(n) --- Yn+v0'(z) -C VM --- Highest order computed -C Routines called: -C (1) GAMMA2 for computing the gamma function -C (2) MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,G,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBJ(0:*),CDJ(0:*),CBY(0:*),CDY(0:*) - PI=3.141592653589793D0 - RP2=.63661977236758D0 - CI=(0.0D0,1.0D0) - A0=CDABS(Z) - Z1=Z - Z2=Z*Z - N=INT(V) - V0=V-N - PV0=PI*V0 - IF (A0.LT.1.0D-100) THEN - DO 10 K=0,N - CBJ(K)=(0.0D0,0.0D0) - CDJ(K)=(0.0D0,0.0D0) - CBY(K)=-(1.0D+300,0.0D0) -10 CDY(K)=(1.0D+300,0.0D0) - IF (V0.EQ.0.0) THEN - CBJ(0)=(1.0D0,0.0D0) - CDJ(1)=(0.5D0,0.0D0) - ELSE - CDJ(0)=(1.0D+300,0.0D0) - ENDIF - VM=V - RETURN - ENDIF - IF (DBLE(Z).LT.0.0D0) Z1=-Z - IF (A0.LE.12.0) THEN - CJV0=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 15 K=1,40 - CR=-0.25D0*CR*Z2/(K*(K+V0)) - CJV0=CJV0+CR - IF (CDABS(CR).LT.CDABS(CJV0)*1.0D-15) GO TO 20 -15 CONTINUE -20 VG=1.0D0+V0 - CALL GAMMA2(VG,GA) - CA=(0.5D0*Z1)**V0/GA - CJV0=CJV0*CA - ELSE - K0=11 - IF (A0.GE.35.0) K0=10 - IF (A0.GE.50.0) K0=8 - VV=4.0D0*V0*V0 - CPZ=(1.0D0,0.0D0) - CRP=(1.0D0,0.0D0) - DO 25 K=1,K0 - CRP=-0.78125D-2*CRP*(VV-(4.0*K-3.0)**2.0)*(VV- - & (4.0*K-1.0)**2.0)/(K*(2.0*K-1.0)*Z2) -25 CPZ=CPZ+CRP - CQZ=(1.0D0,0.0D0) - CRQ=(1.0D0,0.0D0) - DO 30 K=1,K0 - CRQ=-0.78125D-2*CRQ*(VV-(4.0*K-1.0)**2.0)*(VV- - & (4.0*K+1.0)**2.0)/(K*(2.0*K+1.0)*Z2) -30 CQZ=CQZ+CRQ - CQZ=0.125D0*(VV-1.0)*CQZ/Z1 - ZK=Z1-(0.5D0*V0+0.25D0)*PI - CA0=CDSQRT(RP2/Z1) - CCK=CDCOS(ZK) - CSK=CDSIN(ZK) - CJV0=CA0*(CPZ*CCK-CQZ*CSK) - CYV0=CA0*(CPZ*CSK+CQZ*CCK) - ENDIF - IF (A0.LE.12.0) THEN - IF (V0.NE.0.0) THEN - CJVN=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 35 K=1,40 - CR=-0.25D0*CR*Z2/(K*(K-V0)) - CJVN=CJVN+CR - IF (CDABS(CR).LT.CDABS(CJVN)*1.0D-15) GO TO 40 -35 CONTINUE -40 VG=1.0D0-V0 - CALL GAMMA2(VG,GB) - CB=(2.0D0/Z1)**V0/GB - CJU0=CJVN*CB - CYV0=(CJV0*DCOS(PV0)-CJU0)/DSIN(PV0) - ELSE - CEC=CDLOG(Z1/2.0D0)+.5772156649015329D0 - CS0=(0.0D0,0.0D0) - W0=0.0D0 - CR0=(1.0D0,0.0D0) - DO 45 K=1,30 - W0=W0+1.0D0/K - CR0=-0.25D0*CR0/(K*K)*Z2 -45 CS0=CS0+CR0*W0 - CYV0=RP2*(CEC*CJV0-CS0) - ENDIF - ENDIF - IF (N.EQ.0) N=1 - M=MSTA1(A0,200) - IF (M.LT.N) THEN - N=M - ELSE - M=MSTA2(A0,N,15) - ENDIF - CF2=(0.0D0,0.0D0) - CF1=(1.0D-100,0.0D0) - DO 50 K=M,0,-1 - CF=2.0D0*(V0+K+1.0D0)/Z1*CF1-CF2 - IF (K.LE.N) CBJ(K)=CF - CF2=CF1 -50 CF1=CF - CS=CJV0/CF - DO 55 K=0,N -55 CBJ(K)=CS*CBJ(K) - IF (DBLE(Z).LT.0.0D0) THEN - CFAC0=CDEXP(PV0*CI) - IF (DIMAG(Z).LT.0.0D0) THEN - CYV0=CFAC0*CYV0-2.0D0*CI*DCOS(PV0)*CJV0 - ELSE IF (DIMAG(Z).GT.0.0D0) THEN - CYV0=CYV0/CFAC0+2.0D0*CI*DCOS(PV0)*CJV0 - ENDIF - DO 60 K=0,N - IF (DIMAG(Z).LT.0.0D0) THEN - CBJ(K)=CDEXP(-PI*(K+V0)*CI)*CBJ(K) - ELSE IF (DIMAG(Z).GT.0.0D0) THEN - CBJ(K)=CDEXP(PI*(K+V0)*CI)*CBJ(K) - ENDIF -60 CONTINUE - Z1=Z1 - ENDIF - CBY(0)=CYV0 - DO 65 K=1,N - CYY=(CBJ(K)*CBY(K-1)-2.0D0/(PI*Z))/CBJ(K-1) - CBY(K)=CYY -65 CONTINUE - CDJ(0)=V0/Z*CBJ(0)-CBJ(1) - DO 70 K=1,N -70 CDJ(K)=-(K+V0)/Z*CBJ(K)+CBJ(K-1) - CDY(0)=V0/Z*CBY(0)-CBY(1) - DO 75 K=1,N -75 CDY(K)=CBY(K-1)-(K+V0)/Z*CBY(K) - VM=N+V0 - RETURN - END - - - -C ********************************** - - SUBROUTINE JY01A(X,BJ0,DJ0,BJ1,DJ1,BY0,DY0,BY1,DY1) -C -C ======================================================= -C Purpose: Compute Bessel functions J0(x), J1(x), Y0(x), -C Y1(x), and their derivatives -C Input : x --- Argument of Jn(x) & Yn(x) ( x ≥ 0 ) -C Output: BJ0 --- J0(x) -C DJ0 --- J0'(x) -C BJ1 --- J1(x) -C DJ1 --- J1'(x) -C BY0 --- Y0(x) -C DY0 --- Y0'(x) -C BY1 --- Y1(x) -C DY1 --- Y1'(x) -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(12),B(12),A1(12),B1(12) - PI=3.141592653589793D0 - RP2=0.63661977236758D0 - X2=X*X - IF (X.EQ.0.0D0) THEN - BJ0=1.0D0 - BJ1=0.0D0 - DJ0=0.0D0 - DJ1=0.5D0 - BY0=-1.0D+300 - BY1=-1.0D+300 - DY0=1.0D+300 - DY1=1.0D+300 - RETURN - ENDIF - IF (X.LE.12.0D0) THEN - BJ0=1.0D0 - R=1.0D0 - DO 5 K=1,30 - R=-0.25D0*R*X2/(K*K) - BJ0=BJ0+R - IF (DABS(R).LT.DABS(BJ0)*1.0D-15) GO TO 10 -5 CONTINUE -10 BJ1=1.0D0 - R=1.0D0 - DO 15 K=1,30 - R=-0.25D0*R*X2/(K*(K+1.0D0)) - BJ1=BJ1+R - IF (DABS(R).LT.DABS(BJ1)*1.0D-15) GO TO 20 -15 CONTINUE -20 BJ1=0.5D0*X*BJ1 - EC=DLOG(X/2.0D0)+0.5772156649015329D0 - CS0=0.0D0 - W0=0.0D0 - R0=1.0D0 - DO 25 K=1,30 - W0=W0+1.0D0/K - R0=-0.25D0*R0/(K*K)*X2 - R=R0*W0 - CS0=CS0+R - IF (DABS(R).LT.DABS(CS0)*1.0D-15) GO TO 30 -25 CONTINUE -30 BY0=RP2*(EC*BJ0-CS0) - CS1=1.0D0 - W1=0.0D0 - R1=1.0D0 - DO 35 K=1,30 - W1=W1+1.0D0/K - R1=-0.25D0*R1/(K*(K+1))*X2 - R=R1*(2.0D0*W1+1.0D0/(K+1.0D0)) - CS1=CS1+R - IF (DABS(R).LT.DABS(CS1)*1.0D-15) GO TO 40 -35 CONTINUE -40 BY1=RP2*(EC*BJ1-1.0D0/X-0.25D0*X*CS1) - ELSE - DATA A/-.7031250000000000D-01,.1121520996093750D+00, - & -.5725014209747314D+00,.6074042001273483D+01, - & -.1100171402692467D+03,.3038090510922384D+04, - & -.1188384262567832D+06,.6252951493434797D+07, - & -.4259392165047669D+09,.3646840080706556D+11, - & -.3833534661393944D+13,.4854014686852901D+15/ - DATA B/ .7324218750000000D-01,-.2271080017089844D+00, - & .1727727502584457D+01,-.2438052969955606D+02, - & .5513358961220206D+03,-.1825775547429318D+05, - & .8328593040162893D+06,-.5006958953198893D+08, - & .3836255180230433D+10,-.3649010818849833D+12, - & .4218971570284096D+14,-.5827244631566907D+16/ - DATA A1/.1171875000000000D+00,-.1441955566406250D+00, - & .6765925884246826D+00,-.6883914268109947D+01, - & .1215978918765359D+03,-.3302272294480852D+04, - & .1276412726461746D+06,-.6656367718817688D+07, - & .4502786003050393D+09,-.3833857520742790D+11, - & .4011838599133198D+13,-.5060568503314727D+15/ - DATA B1/-.1025390625000000D+00,.2775764465332031D+00, - & -.1993531733751297D+01,.2724882731126854D+02, - & -.6038440767050702D+03,.1971837591223663D+05, - & -.8902978767070678D+06,.5310411010968522D+08, - & -.4043620325107754D+10,.3827011346598605D+12, - & -.4406481417852278D+14,.6065091351222699D+16/ - K0=12 - IF (X.GE.35.0) K0=10 - IF (X.GE.50.0) K0=8 - T1=X-0.25D0*PI - P0=1.0D0 - Q0=-0.125D0/X - DO 45 K=1,K0 - P0=P0+A(K)*X**(-2*K) -45 Q0=Q0+B(K)*X**(-2*K-1) - CU=DSQRT(RP2/X) - BJ0=CU*(P0*DCOS(T1)-Q0*DSIN(T1)) - BY0=CU*(P0*DSIN(T1)+Q0*DCOS(T1)) - T2=X-0.75D0*PI - P1=1.0D0 - Q1=0.375D0/X - DO 50 K=1,K0 - P1=P1+A1(K)*X**(-2*K) -50 Q1=Q1+B1(K)*X**(-2*K-1) - CU=DSQRT(RP2/X) - BJ1=CU*(P1*DCOS(T2)-Q1*DSIN(T2)) - BY1=CU*(P1*DSIN(T2)+Q1*DCOS(T2)) - ENDIF - DJ0=-BJ1 - DJ1=BJ0-BJ1/X - DY0=-BY1 - DY1=BY0-BY1/X - RETURN - END - -C ********************************** - - SUBROUTINE INCOG(A,X,GIN,GIM,GIP) -C -C =================================================== -C Purpose: Compute the incomplete gamma function -C r(a,x), Г(a,x) and P(a,x) -C Input : a --- Parameter ( a ≤ 170 ) -C x --- Argument -C Output: GIN --- r(a,x) -C GIM --- Г(a,x) -C GIP --- P(a,x) -C Routine called: GAMMA2 for computing Г(x) -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - XAM=-X+A*DLOG(X) - IF (XAM.GT.700.0.OR.A.GT.170.0) THEN - WRITE(*,*)'a and/or x too large' - STOP - ENDIF - IF (X.EQ.0.0) THEN - GIN=0.0 - CALL GAMMA2(A,GA) - GIM=GA - GIP=0.0 - ELSE IF (X.LE.1.0+A) THEN - S=1.0D0/A - R=S - DO 10 K=1,60 - R=R*X/(A+K) - S=S+R - IF (DABS(R/S).LT.1.0D-15) GO TO 15 -10 CONTINUE -15 GIN=DEXP(XAM)*S - CALL GAMMA2(A,GA) - GIP=GIN/GA - GIM=GA-GIN - ELSE IF (X.GT.1.0+A) THEN - T0=0.0D0 - DO 20 K=60,1,-1 - T0=(K-A)/(1.0D0+K/(X+T0)) -20 CONTINUE - GIM=DEXP(XAM)/(X+T0) - CALL GAMMA2(A,GA) - GIN=GA-GIM - GIP=1.0D0-GIM/GA - ENDIF - END - - - -C ********************************** - - SUBROUTINE ITIKB(X,TI,TK) -C -C ======================================================= -C Purpose: Integrate Bessel functions I0(t) and K0(t) -C with respect to t from 0 to x -C Input : x --- Upper limit of the integral ( x ≥ 0 ) -C Output: TI --- Integration of I0(t) from 0 to x -C TK --- Integration of K0(t) from 0 to x -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - IF (X.EQ.0.0D0) THEN - TI=0.0D0 - ELSE IF (X.LT.5.0D0) THEN - T1=X/5.0D0 - T=T1*T1 - TI=((((((((.59434D-3*T+.4500642D-2)*T - & +.044686921D0)*T+.300704878D0)*T+1.471860153D0) - & *T+4.844024624D0)*T+9.765629849D0)*T - & +10.416666367D0)*T+5.0D0)*T1 - ELSE IF (X.GE.5.0.AND.X.LE.8.0D0) THEN - T=5.0D0/X - TI=(((-.015166D0*T-.0202292D0)*T+.1294122D0)*T - & -.0302912D0)*T+.4161224D0 - TI=TI*DEXP(X)/DSQRT(X) - ELSE - T=8.0D0/X - TI=(((((-.0073995D0*T+.017744D0)*T-.0114858D0)*T - & +.55956D-2)*T+.59191D-2)*T+.0311734D0)*T - & +.3989423D0 - TI=TI*DEXP(X)/DSQRT(X) - ENDIF - IF (X.EQ.0.0D0) THEN - TK=0.0D0 - ELSE IF (X.LE.2.0D0) THEN - T1=X/2.0D0 - T=T1*T1 - TK=((((((.116D-5*T+.2069D-4)*T+.62664D-3)*T - & +.01110118D0)*T+.11227902D0)*T+.50407836D0)*T - & +.84556868D0)*T1 - TK=TK-DLOG(X/2.0D0)*TI - ELSE IF (X.GT.2.0.AND.X.LE.4.0D0) THEN - T=2.0D0/X - TK=(((.0160395D0*T-.0781715D0)*T+.185984D0)*T - & -.3584641D0)*T+1.2494934D0 - TK=PI/2.0D0-TK*DEXP(-X)/DSQRT(X) - ELSE IF (X.GT.4.0.AND.X.LE.7.0D0) THEN - T=4.0D0/X - TK=(((((.37128D-2*T-.0158449D0)*T+.0320504D0)*T - & -.0481455D0)*T+.0787284D0)*T-.1958273D0)*T - & +1.2533141D0 - TK=PI/2.0D0-TK*DEXP(-X)/DSQRT(X) - ELSE - T=7.0D0/X - TK=(((((.33934D-3*T-.163271D-2)*T+.417454D-2)*T - & -.933944D-2)*T+.02576646D0)*T-.11190289D0)*T - & +1.25331414D0 - TK=PI/2.0D0-TK*DEXP(-X)/DSQRT(X) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE ITIKA(X,TI,TK) -C -C ======================================================= -C Purpose: Integrate modified Bessel functions I0(t) and -C K0(t) with respect to t from 0 to x -C Input : x --- Upper limit of the integral ( x ≥ 0 ) -C Output: TI --- Integration of I0(t) from 0 to x -C TK --- Integration of K0(t) from 0 to x -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(10) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - DATA A/.625D0,1.0078125D0, - & 2.5927734375D0,9.1868591308594D0, - & 4.1567974090576D+1,2.2919635891914D+2, - & 1.491504060477D+3,1.1192354495579D+4, - & 9.515939374212D+4,9.0412425769041D+5/ - IF (X.EQ.0.0D0) THEN - TI=0.0D0 - TK=0.0D0 - RETURN - ELSE IF (X.LT.20.0D0) THEN - X2=X*X - TI=1.0D0 - R=1.0D0 - DO 10 K=1,50 - R=.25D0*R*(2*K-1.0D0)/(2*K+1.0D0)/(K*K)*X2 - TI=TI+R - IF (DABS(R/TI).LT.1.0D-12) GO TO 15 -10 CONTINUE -15 TI=TI*X - ELSE - X2=0.0D0 - TI=1.0D0 - R=1.0D0 - DO 20 K=1,10 - R=R/X -20 TI=TI+A(K)*R - RC1=1.0D0/DSQRT(2.0D0*PI*X) - TI=RC1*DEXP(X)*TI - ENDIF - IF (X.LT.12.0D0) THEN - E0=EL+DLOG(X/2.0D0) - B1=1.0D0-E0 - B2=0.0D0 - RS=0.0D0 - R=1.0D0 - TW=0.0D0 - DO 25 K=1,50 - R=.25D0*R*(2*K-1.0D0)/(2*K+1.0D0)/(K*K)*X2 - B1=B1+R*(1.0D0/(2*K+1)-E0) - RS=RS+1.0D0/K - B2=B2+R*RS - TK=B1+B2 - IF (DABS((TK-TW)/TK).LT.1.0D-12) GO TO 30 -25 TW=TK -30 TK=TK*X - ELSE - TK=1.0D0 - R=1.0D0 - DO 35 K=1,10 - R=-R/X -35 TK=TK+A(K)*R - RC2=DSQRT(PI/(2.0D0*X)) - TK=PI/2.0D0-RC2*TK*DEXP(-X) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE JYV(V,X,VM,BJ,DJ,BY,DY) -C -C ======================================================= -C Purpose: Compute Bessel functions Jv(x) and Yv(x) -C and their derivatives -C Input : x --- Argument of Jv(x) and Yv(x) -C v --- Order of Jv(x) and Yv(x) -C ( v = n+v0, 0 ≤ v0 < 1, n = 0,1,2,... ) -C Output: BJ(n) --- Jn+v0(x) -C DJ(n) --- Jn+v0'(x) -C BY(n) --- Yn+v0(x) -C DY(n) --- Yn+v0'(x) -C VM --- Highest order computed -C Routines called: -C (1) GAMMA2 for computing gamma function -C (2) MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BJ(0:*),DJ(0:*),BY(0:*),DY(0:*) - EL=.5772156649015329D0 - PI=3.141592653589793D0 - RP2=.63661977236758D0 - X2=X*X - N=INT(V) - V0=V-N - IF (X.LT.1.0D-100) THEN - DO 10 K=0,N - BJ(K)=0.0D0 - DJ(K)=0.0D0 - BY(K)=-1.0D+300 -10 DY(K)=1.0D+300 - IF (V0.EQ.0.0) THEN - BJ(0)=1.0D0 - DJ(1)=0.5D0 - ELSE - DJ(0)=1.0D+300 - ENDIF - VM=V - RETURN - ENDIF - BJV0=0.0D0 - BJV1=0.0D0 - BYV0=0.0D0 - BYV1=0.0D0 - IF (X.LE.12.0) THEN - DO 25 L=0,1 - VL=V0+L - BJVL=1.0D0 - R=1.0D0 - DO 15 K=1,40 - R=-0.25D0*R*X2/(K*(K+VL)) - BJVL=BJVL+R - IF (DABS(R).LT.DABS(BJVL)*1.0D-15) GO TO 20 -15 CONTINUE -20 VG=1.0D0+VL - CALL GAMMA2(VG,GA) - A=(0.5D0*X)**VL/GA - IF (L.EQ.0) BJV0=BJVL*A - IF (L.EQ.1) BJV1=BJVL*A -25 CONTINUE - ELSE - K0=11 - IF (X.GE.35.0) K0=10 - IF (X.GE.50.0) K0=8 - DO 40 J=0,1 - VV=4.0D0*(J+V0)*(J+V0) - PX=1.0D0 - RP=1.0D0 - DO 30 K=1,K0 - RP=-0.78125D-2*RP*(VV-(4.0*K-3.0)**2.0)*(VV- - & (4.0*K-1.0)**2.0)/(K*(2.0*K-1.0)*X2) -30 PX=PX+RP - QX=1.0D0 - RQ=1.0D0 - DO 35 K=1,K0 - RQ=-0.78125D-2*RQ*(VV-(4.0*K-1.0)**2.0)*(VV- - & (4.0*K+1.0)**2.0)/(K*(2.0*K+1.0)*X2) -35 QX=QX+RQ - QX=0.125D0*(VV-1.0)*QX/X - XK=X-(0.5D0*(J+V0)+0.25D0)*PI - A0=DSQRT(RP2/X) - CK=DCOS(XK) - SK=DSIN(XK) - IF (J.EQ.0) THEN - BJV0=A0*(PX*CK-QX*SK) - BYV0=A0*(PX*SK+QX*CK) - ELSE IF (J.EQ.1) THEN - BJV1=A0*(PX*CK-QX*SK) - BYV1=A0*(PX*SK+QX*CK) - ENDIF -40 CONTINUE - ENDIF - BJ(0)=BJV0 - BJ(1)=BJV1 - DJ(0)=V0/X*BJ(0)-BJ(1) - DJ(1)=-(1.0D0+V0)/X*BJ(1)+BJ(0) - IF (N.GE.2.AND.N.LE.INT(0.9*X)) THEN - F0=BJV0 - F1=BJV1 - DO 45 K=2,N - F=2.0D0*(K+V0-1.0D0)/X*F1-F0 - BJ(K)=F - F0=F1 -45 F1=F - ELSE IF (N.GE.2) THEN - M=MSTA1(X,200) - IF (M.LT.N) THEN - N=M - ELSE - M=MSTA2(X,N,15) - ENDIF - F=0.0D0 - F2=0.0D0 - F1=1.0D-100 - DO 50 K=M,0,-1 - F=2.0D0*(V0+K+1.0D0)/X*F1-F2 - IF (K.LE.N) BJ(K)=F - F2=F1 -50 F1=F - IF (DABS(BJV0).GT.DABS(BJV1)) THEN - CS=BJV0/F - ELSE - CS=BJV1/F2 - ENDIF - DO 55 K=0,N -55 BJ(K)=CS*BJ(K) - ENDIF - DO 60 K=2,N -60 DJ(K)=-(K+V0)/X*BJ(K)+BJ(K-1) - IF (X.LE.12.0D0) THEN - IF (V0.NE.0.0) THEN - BJU0=0.0D0 - BJU1=0.0D0 - DO 75 L=0,1 - VL=V0+L - BJVL=1.0D0 - R=1.0D0 - DO 65 K=1,40 - R=-0.25D0*R*X2/(K*(K-VL)) - BJVL=BJVL+R - IF (DABS(R).LT.DABS(BJVL)*1.0D-15) GO TO 70 -65 CONTINUE -70 VG=1.0D0-VL - CALL GAMMA2(VG,GB) - B=(2.0D0/X)**VL/GB - IF (L.EQ.0) BJU0=BJVL*B - IF (L.EQ.1) BJU1=BJVL*B -75 CONTINUE - PV0=PI*V0 - PV1=PI*(1.0D0+V0) - BYV0=(BJV0*DCOS(PV0)-BJU0)/DSIN(PV0) - BYV1=(BJV1*DCOS(PV1)-BJU1)/DSIN(PV1) - ELSE - EC=DLOG(X/2.0D0)+EL - CS0=0.0D0 - W0=0.0D0 - R0=1.0D0 - DO 80 K=1,30 - W0=W0+1.0D0/K - R0=-0.25D0*R0/(K*K)*X2 -80 CS0=CS0+R0*W0 - BYV0=RP2*(EC*BJV0-CS0) - CS1=1.0D0 - W1=0.0D0 - R1=1.0D0 - DO 85 K=1,30 - W1=W1+1.0D0/K - R1=-0.25D0*R1/(K*(K+1))*X2 -85 CS1=CS1+R1*(2.0D0*W1+1.0D0/(K+1.0D0)) - BYV1=RP2*(EC*BJV1-1.0D0/X-0.25D0*X*CS1) - ENDIF - ENDIF - BY(0)=BYV0 - BY(1)=BYV1 - DO 90 K=2,N - BYVK=2.0D0*(V0+K-1.0D0)/X*BYV1-BYV0 - BY(K)=BYVK - BYV0=BYV1 -90 BYV1=BYVK - DY(0)=V0/X*BY(0)-BY(1) - DO 95 K=1,N -95 DY(K)=-(K+V0)/X*BY(K)+BY(K-1) - VM=N+V0 - RETURN - END - - - -C ********************************** - - SUBROUTINE JYNB(N,X,NM,BJ,DJ,BY,DY) -C -C ===================================================== -C Purpose: Compute Bessel functions Jn(x), Yn(x) and -C their derivatives -C Input : x --- Argument of Jn(x) and Yn(x) ( x ≥ 0 ) -C n --- Order of Jn(x) and Yn(x) -C Output: BJ(n) --- Jn(x) -C DJ(n) --- Jn'(x) -C BY(n) --- Yn(x) -C DY(n) --- Yn'(x) -C NM --- Highest order computed -C Routines called: -C JYNBH to calculate the Jn and Yn -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BJ(0:N),DJ(0:N),BY(0:N),DY(0:N) - CALL JYNBH(N,0,X,NM,BJ,BY) -C Compute derivatives by differentiation formulas - IF (X.LT.1.0D-100) THEN - DO 10 K=0,N - DJ(K) = 0.0D0 - 10 DY(K) = 1.0D+300 - DJ(1)=0.5D0 - ELSE - DJ(0)=-BJ(1) - DO 40 K=1,NM - 40 DJ(K)=BJ(K-1)-K/X*BJ(K) - DY(0)=-BY(1) - DO 50 K=1,NM - 50 DY(K)=BY(K-1)-K*BY(K)/X - END IF - RETURN - END - - -C ********************************** - - SUBROUTINE JYNBH(N,NMIN,X,NM,BJ,BY) -C -C ===================================================== -C Purpose: Compute Bessel functions Jn(x), Yn(x) -C Input : x --- Argument of Jn(x) and Yn(x) ( x ≥ 0 ) -C n --- Highest order of Jn(x) and Yn(x) computed ( n ≥ 0 ) -C nmin -- Lowest order computed ( nmin ≥ 0 ) -C Output: BJ(n-NMIN) --- Jn(x) ; if indexing starts at 0 -C BY(n-NMIN) --- Yn(x) ; if indexing starts at 0 -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 to calculate the starting -C point for backward recurrence -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BJ(0:N-NMIN),BY(0:N-NMIN),A(4),B(4),A1(4),B1(4) - PI=3.141592653589793D0 - R2P=.63661977236758D0 - NM=N - IF (X.LT.1.0D-100) THEN - DO 10 K=NMIN,N - BJ(K-NMIN)=0.0D0 -10 BY(K-NMIN)=-1.0D+300 - IF (NMIN.EQ.0) BJ(0)=1.0D0 - RETURN - ENDIF - IF (X.LE.300.0.OR.N.GT.INT(0.9*X)) THEN -C Backward recurrence for Jn - IF (N.EQ.0) NM=1 - M=MSTA1(X,200) - IF (M.LT.NM) THEN - NM=M - ELSE - M=MSTA2(X,NM,15) - ENDIF - BS=0.0D0 - SU=0.0D0 - SV=0.0D0 - F2=0.0D0 - F1=1.0D-100 - F=0.0D0 - DO 15 K=M,0,-1 - F=2.0D0*(K+1.0D0)/X*F1-F2 - IF (K.LE.NM .AND. K.GE.NMIN) BJ(K-NMIN)=F - IF (K.EQ.2*INT(K/2).AND.K.NE.0) THEN - BS=BS+2.0D0*F - SU=SU+(-1)**(K/2)*F/K - ELSE IF (K.GT.1) THEN - SV=SV+(-1)**(K/2)*K/(K*K-1.0D0)*F - ENDIF - F2=F1 -15 F1=F - S0=BS+F - DO 20 K=NMIN,NM -20 BJ(K-NMIN)=BJ(K-NMIN)/S0 -C Estimates for Yn at start of recurrence - BJ0 = F1 / S0 - BJ1 = F2 / S0 - EC=DLOG(X/2.0D0)+0.5772156649015329D0 - BY0=R2P*(EC*BJ0-4.0D0*SU/S0) - BY1=R2P*((EC-1.0D0)*BJ1-BJ0/X-4.0D0*SV/S0) - IF (0.GE.NMIN) BY(0-NMIN)=BY0 - IF (1.GE.NMIN) BY(1-NMIN)=BY1 - KY=2 - ELSE -C Hankel expansion - DATA A/-.7031250000000000D-01,.1121520996093750D+00, - & -.5725014209747314D+00,.6074042001273483D+01/ - DATA B/ .7324218750000000D-01,-.2271080017089844D+00, - & .1727727502584457D+01,-.2438052969955606D+02/ - DATA A1/.1171875000000000D+00,-.1441955566406250D+00, - & .6765925884246826D+00,-.6883914268109947D+01/ - DATA B1/-.1025390625000000D+00,.2775764465332031D+00, - & -.1993531733751297D+01,.2724882731126854D+02/ - T1=X-0.25D0*PI - P0=1.0D0 - Q0=-0.125D0/X - DO 25 K=1,4 - P0=P0+A(K)*X**(-2*K) -25 Q0=Q0+B(K)*X**(-2*K-1) - CU=DSQRT(R2P/X) - BJ0=CU*(P0*DCOS(T1)-Q0*DSIN(T1)) - BY0=CU*(P0*DSIN(T1)+Q0*DCOS(T1)) - IF (0.GE.NMIN) BJ(0-NMIN)=BJ0 - IF (0.GE.NMIN) BY(0-NMIN)=BY0 - T2=X-0.75D0*PI - P1=1.0D0 - Q1=0.375D0/X - DO 30 K=1,4 - P1=P1+A1(K)*X**(-2*K) -30 Q1=Q1+B1(K)*X**(-2*K-1) - BJ1=CU*(P1*DCOS(T2)-Q1*DSIN(T2)) - BY1=CU*(P1*DSIN(T2)+Q1*DCOS(T2)) - IF (1.GE.NMIN) BJ(1-NMIN)=BJ1 - IF (1.GE.NMIN) BY(1-NMIN)=BY1 - DO 35 K=2,NM - BJK=2.0D0*(K-1.0D0)/X*BJ1-BJ0 - IF (K.GE.NMIN) BJ(K-NMIN)=BJK - BJ0=BJ1 -35 BJ1=BJK - KY=2 - ENDIF -C Forward recurrence for Yn - DO 45 K=KY,NM - BYK=2.0D0*(K-1.0D0)*BY1/X-BY0 - IF (K.GE.NMIN) BY(K-NMIN)=BYK - BY0=BY1 -45 BY1=BYK - RETURN - END - - -C ********************************** - - SUBROUTINE STVH1(X,SH1) -C -C ============================================= -C Purpose: Compute Struve function H1(x) -C Input : x --- Argument of H1(x) ( x ≥ 0 ) -C Output: SH1 --- H1(x) -C ============================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - R=1.0D0 - IF (X.LE.20.0D0) THEN - S=0.0D0 - A0=-2.0D0/PI - DO 10 K=1,60 - R=-R*X*X/(4.0D0*K*K-1.0D0) - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 15 -10 CONTINUE -15 SH1=A0*S - ELSE - S=1.0D0 - KM=INT(.5*X) - IF (X.GT.50.D0) KM=25 - DO 20 K=1,KM - R=-R*(4.0D0*K*K-1.0D0)/(X*X) - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 25 -20 CONTINUE -25 T=4.0D0/X - T2=T*T - P1=((((.42414D-5*T2-.20092D-4)*T2+.580759D-4)*T2 - & -.223203D-3)*T2+.29218256D-2)*T2+.3989422819D0 - Q1=T*(((((-.36594D-5*T2+.1622D-4)*T2-.398708D-4)* - & T2+.1064741D-3)*T2-.63904D-3)*T2+.0374008364D0) - TA1=X-.75D0*PI - BY1=2.0D0/DSQRT(X)*(P1*DSIN(TA1)+Q1*DCOS(TA1)) - SH1=2.0/PI*(1.0D0+S/(X*X))+BY1 - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE LEGZO(N,X,W) -C -C ========================================================= -C Purpose : Compute the zeros of Legendre polynomial Pn(x) -C in the interval [-1,1], and the corresponding -C weighting coefficients for Gauss-Legendre -C integration -C Input : n --- Order of the Legendre polynomial -C Output: X(n) --- Zeros of the Legendre polynomial -C W(n) --- Corresponding weighting coefficients -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(N),W(N) - N0=(N+1)/2 - PF=0.0D0 - PD=0.0D0 - DO 45 NR=1,N0 - Z=DCOS(3.1415926D0*(NR-0.25D0)/N) -10 Z0=Z - P=1.0D0 - DO 15 I=1,NR-1 -15 P=P*(Z-X(I)) - F0=1.0D0 - IF (NR.EQ.N0.AND.N.NE.2*INT(N/2)) Z=0.0D0 - F1=Z - DO 20 K=2,N - PF=(2.0D0-1.0D0/K)*Z*F1-(1.0D0-1.0D0/K)*F0 - PD=K*(F1-Z*PF)/(1.0D0-Z*Z) - F0=F1 -20 F1=PF - IF (Z.EQ.0.0) GO TO 40 - FD=PF/P - Q=0.0D0 - DO 35 I=1,NR - WP=1.0D0 - DO 30 J=1,NR - IF (J.NE.I) WP=WP*(Z-X(J)) -30 CONTINUE -35 Q=Q+WP - GD=(PD-Q*FD)/P - Z=Z-FD/GD - IF (DABS(Z-Z0).GT.DABS(Z)*1.0D-15) GO TO 10 -40 X(NR)=Z - X(N+1-NR)=-Z - W(NR)=2.0D0/((1.0D0-Z*Z)*PD*PD) -45 W(N+1-NR)=W(NR) - RETURN - END - -C ********************************** - - SUBROUTINE ASWFA(M,N,C,X,KD,CV,S1F,S1D) -C -C =========================================================== -C Purpose: Compute the prolate and oblate spheroidal angular -C functions of the first kind and their derivatives -C Input : m --- Mode parameter, m = 0,1,2,... -C n --- Mode parameter, n = m,m+1,... -C c --- Spheroidal parameter -C x --- Argument of angular function, |x| < 1.0 -C KD --- Function code -C KD=1 for prolate; KD=-1 for oblate -C cv --- Characteristic value -C Output: S1F --- Angular function of the first kind -C S1D --- Derivative of the angular function of -C the first kind -C Routine called: -C SCKB for computing expansion coefficients ck -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CK(200),DF(200) - EPS=1.0D-14 - X0=X - X=DABS(X) - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - NM=40+INT((N-M)/2+C) - NM2=NM/2-2 - CALL SDMN(M,N,C,CV,KD,DF) - CALL SCKB(M,N,C,DF,CK) - X1=1.0D0-X*X - IF (M.EQ.0.AND.X1.EQ.0.0D0) THEN - A0=1.0D0 - ELSE - A0=X1**(0.5D0*M) - ENDIF - SU1=CK(1) - DO 10 K=1,NM2 - R=CK(K+1)*X1**K - SU1=SU1+R - IF (K.GE.10.AND.DABS(R/SU1).LT.EPS) GO TO 15 -10 CONTINUE -15 S1F=A0*X**IP*SU1 - IF (X.EQ.1.0D0) THEN - IF (M.EQ.0) S1D=IP*CK(1)-2.0D0*CK(2) - IF (M.EQ.1) S1D=-1.0D+100 - IF (M.EQ.2) S1D=-2.0D0*CK(1) - IF (M.GE.3) S1D=0.0D0 - ELSE - D0=IP-M/X1*X**(IP+1.0D0) - D1=-2.0D0*A0*X**(IP+1.0D0) - SU2=CK(2) - DO 20 K=2,NM2 - R=K*CK(K+1)*X1**(K-1.0D0) - SU2=SU2+R - IF (K.GE.10.AND.DABS(R/SU2).LT.EPS) GO TO 25 -20 CONTINUE -25 S1D=D0*A0*SU1+D1*SU2 - ENDIF - IF (X0.LT.0.0D0.AND.IP.EQ.0) S1D=-S1D - IF (X0.LT.0.0D0.AND.IP.EQ.1) S1F=-S1F - X=X0 - RETURN - END - - - -C ********************************** - - SUBROUTINE JYNA(N,X,NM,BJ,DJ,BY,DY) -C -C ========================================================== -C Purpose: Compute Bessel functions Jn(x) & Yn(x) and -C their derivatives -C Input : x --- Argument of Jn(x) & Yn(x) ( x ≥ 0 ) -C n --- Order of Jn(x) & Yn(x) -C Output: BJ(n) --- Jn(x) -C DJ(n) --- Jn'(x) -C BY(n) --- Yn(x) -C DY(n) --- Yn'(x) -C NM --- Highest order computed -C Routines called: -C (1) JY01B to calculate J0(x), J1(x), Y0(x) & Y1(x) -C (2) MSTA1 and MSTA2 to calculate the starting -C point for backward recurrence -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BJ(0:N),BY(0:N),DJ(0:N),DY(0:N) - NM=N - IF (X.LT.1.0D-100) THEN - DO 10 K=0,N - BJ(K)=0.0D0 - DJ(K)=0.0D0 - BY(K)=-1.0D+300 -10 DY(K)=1.0D+300 - BJ(0)=1.0D0 - DJ(1)=0.5D0 - RETURN - ENDIF - CALL JY01B(X,BJ0,DJ0,BJ1,DJ1,BY0,DY0,BY1,DY1) - BJ(0)=BJ0 - BJ(1)=BJ1 - BY(0)=BY0 - BY(1)=BY1 - DJ(0)=DJ0 - DJ(1)=DJ1 - DY(0)=DY0 - DY(1)=DY1 - IF (N.LE.1) RETURN - IF (N.LT.INT(0.9*X)) THEN - DO 20 K=2,N - BJK=2.0D0*(K-1.0D0)/X*BJ1-BJ0 - BJ(K)=BJK - BJ0=BJ1 -20 BJ1=BJK - ELSE - M=MSTA1(X,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(X,N,15) - ENDIF - F2=0.0D0 - F1=1.0D-100 - F=0.0D0 - DO 30 K=M,0,-1 - F=2.0D0*(K+1.0D0)/X*F1-F2 - IF (K.LE.NM) BJ(K)=F - F2=F1 -30 F1=F - IF (DABS(BJ0).GT.DABS(BJ1)) THEN - CS=BJ0/F - ELSE - CS=BJ1/F2 - ENDIF - DO 40 K=0,NM -40 BJ(K)=CS*BJ(K) - ENDIF - DO 50 K=2,NM -50 DJ(K)=BJ(K-1)-K/X*BJ(K) - F0=BY(0) - F1=BY(1) - DO 60 K=2,NM - F=2.0D0*(K-1.0D0)/X*F1-F0 - BY(K)=F - F0=F1 -60 F1=F - DO 70 K=2,NM -70 DY(K)=BY(K-1)-K*BY(K)/X - RETURN - END - - - -C ********************************** - - SUBROUTINE PBDV(V,X,DV,DP,PDF,PDD) -C -C ==================================================== -C Purpose: Compute parabolic cylinder functions Dv(x) -C and their derivatives -C Input: x --- Argument of Dv(x) -C v --- Order of Dv(x) -C Output: DV(na) --- Dn+v0(x) -C DP(na) --- Dn+v0'(x) -C ( na = |n|, v0 = v-n, |v0| < 1, -C n = 0,±1,±2,… ) -C PDF --- Dv(x) -C PDD --- Dv'(x) -C Routines called: -C (1) DVSA for computing Dv(x) for small |x| -C (2) DVLA for computing Dv(x) for large |x| -C ==================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION DV(0:*),DP(0:*) - XA=DABS(X) - VH=V - V=V+DSIGN(1.0D0,V) - NV=INT(V) - V0=V-NV - NA=ABS(NV) - EP=DEXP(-.25D0*X*X) - JA=0 - IF (NA.GE.1) JA=1 - IF (V.GE.0.0) THEN - IF (V0.EQ.0.0) THEN - PD0=EP - PD1=X*EP - ELSE - DO 10 L=0,JA - V1=V0+L - IF (XA.LE.5.8) CALL DVSA(V1,X,PD1) - IF (XA.GT.5.8) CALL DVLA(V1,X,PD1) - IF (L.EQ.0) PD0=PD1 -10 CONTINUE - ENDIF - DV(0)=PD0 - DV(1)=PD1 - DO 15 K=2,NA - PDF=X*PD1-(K+V0-1.0D0)*PD0 - DV(K)=PDF - PD0=PD1 -15 PD1=PDF - ELSE - IF (X.LE.0.0) THEN - IF (XA.LE.5.8D0) THEN - CALL DVSA(V0,X,PD0) - V1=V0-1.0D0 - CALL DVSA(V1,X,PD1) - ELSE - CALL DVLA(V0,X,PD0) - V1=V0-1.0D0 - CALL DVLA(V1,X,PD1) - ENDIF - DV(0)=PD0 - DV(1)=PD1 - DO 20 K=2,NA - PD=(-X*PD1+PD0)/(K-1.0D0-V0) - DV(K)=PD - PD0=PD1 -20 PD1=PD - ELSE IF (X.LE.2.0) THEN - V2=NV+V0 - IF (NV.EQ.0) V2=V2-1.0D0 - NK=INT(-V2) - CALL DVSA(V2,X,F1) - V1=V2+1.0D0 - CALL DVSA(V1,X,F0) - DV(NK)=F1 - DV(NK-1)=F0 - DO 25 K=NK-2,0,-1 - F=X*F0+(K-V0+1.0D0)*F1 - DV(K)=F - F1=F0 -25 F0=F - ELSE - IF (XA.LE.5.8) CALL DVSA(V0,X,PD0) - IF (XA.GT.5.8) CALL DVLA(V0,X,PD0) - DV(0)=PD0 - M=100+NA - F1=0.0D0 - F0=1.0D-30 - F=0.0D0 - DO 30 K=M,0,-1 - F=X*F0+(K-V0+1.0D0)*F1 - IF (K.LE.NA) DV(K)=F - F1=F0 -30 F0=F - S0=PD0/F - DO 35 K=0,NA -35 DV(K)=S0*DV(K) - ENDIF - ENDIF - DO 40 K=0,NA-1 - V1=ABS(V0)+K - IF (V.GE.0.0D0) THEN - DP(K)=0.5D0*X*DV(K)-DV(K+1) - ELSE - DP(K)=-0.5D0*X*DV(K)-V1*DV(K+1) - ENDIF -40 CONTINUE - PDF=DV(NA-1) - PDD=DP(NA-1) - V=VH - RETURN - END - - - -C ********************************** - - SUBROUTINE ITSH0(X,TH0) -C -C =================================================== -C Purpose: Evaluate the integral of Struve function -C H0(t) with respect to t from 0 and x -C Input : x --- Upper limit ( x ≥ 0 ) -C Output: TH0 --- Integration of H0(t) from 0 and x -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(25) - PI=3.141592653589793D0 - R=1.0D0 - IF (X.LE.30.0) THEN - S=0.5D0 - DO 10 K=1,100 - RD=1.0D0 - IF (K.EQ.1) RD=0.5D0 - R=-R*RD*K/(K+1.0D0)*(X/(2.0D0*K+1.0D0))**2 - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 15 -10 CONTINUE -15 TH0=2.0D0/PI*X*X*S - ELSE - S=1.0D0 - DO 20 K=1,12 - R=-R*K/(K+1.0D0)*((2.0D0*K+1.0D0)/X)**2 - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 25 -20 CONTINUE -25 EL=.57721566490153D0 - S0=S/(PI*X*X)+2.0D0/PI*(DLOG(2.0D0*X)+EL) - A0=1.0D0 - A1=5.0D0/8.0D0 - A(1)=A1 - DO 30 K=1,20 - AF=((1.5D0*(K+.5D0)*(K+5.0D0/6.0D0)*A1-.5D0 - & *(K+.5D0)*(K+.5D0)*(K-.5D0)*A0))/(K+1.0D0) - A(K+1)=AF - A0=A1 -30 A1=AF - BF=1.0D0 - R=1.0D0 - DO 35 K=1,10 - R=-R/(X*X) -35 BF=BF+A(2*K)*R - BG=A(1)/X - R=1.0D0/X - DO 40 K=1,10 - R=-R/(X*X) -40 BG=BG+A(2*K+1)*R - XP=X+.25D0*PI - TY=DSQRT(2.0D0/(PI*X))*(BG*DCOS(XP)-BF*DSIN(XP)) - TH0=TY+S0 - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE CERZO(NT,ZO) -C -C =============================================================== -C Purpose : Evaluate the complex zeros of error function erf(z) -C using the modified Newton's iteration method -C Input : NT --- Total number of zeros -C Output: ZO(L) --- L-th zero of erf(z), L=1,2,...,NT -C Routine called: CERF for computing erf(z) and erf'(z) -C =============================================================== -C - IMPLICIT DOUBLE PRECISION (E,P,W) - IMPLICIT COMPLEX *16 (C,Z) - DIMENSION ZO(NT) - PI=3.141592653589793D0 - W=0.0D0 - DO 35 NR=1,NT - PU=DSQRT(PI*(4.0D0*NR-0.5D0)) - PV=PI*DSQRT(2.0D0*NR-0.25D0) - PX=0.5*PU-0.5*DLOG(PV)/PU - PY=0.5*PU+0.5*DLOG(PV)/PU - Z=CMPLX(PX,PY) - IT=0 -15 IT=IT+1 - CALL CERF(Z,ZF,ZD) - ZP=(1.0D0,0.0D0) - DO 20 I=1,NR-1 -20 ZP=ZP*(Z-ZO(I)) - ZFD=ZF/ZP - ZQ=(0.0D0,0.0D0) - DO 30 I=1,NR-1 - ZW=(1.0D0,0.0D0) - DO 25 J=1,NR-1 - IF (J.EQ.I) GO TO 25 - ZW=ZW*(Z-ZO(J)) -25 CONTINUE -30 ZQ=ZQ+ZW - ZGD=(ZD-ZQ*ZFD)/ZP - Z=Z-ZFD/ZGD - W0=W - W=CDABS(Z) - IF (IT.LE.50.AND.DABS((W-W0)/W).GT.1.0D-11) GO TO 15 -35 ZO(NR)=Z - RETURN - END - - - -C ********************************** - - SUBROUTINE GAMMA2(X,GA) -C -C ================================================== -C Purpose: Compute gamma function Г(x) -C Input : x --- Argument of Г(x) -C ( x is not equal to 0,-1,-2,…) -C Output: GA --- Г(x) -C ================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION G(26) - PI=3.141592653589793D0 - IF (X.EQ.INT(X)) THEN - IF (X.GT.0.0D0) THEN - GA=1.0D0 - M1=X-1 - DO 10 K=2,M1 -10 GA=GA*K - ELSE - GA=1.0D+300 - ENDIF - ELSE - R=1.0D0 - IF (DABS(X).GT.1.0D0) THEN - Z=DABS(X) - M=INT(Z) - DO 15 K=1,M -15 R=R*(Z-K) - Z=Z-M - ELSE - Z=X - ENDIF - DATA G/1.0D0,0.5772156649015329D0, - & -0.6558780715202538D0, -0.420026350340952D-1, - & 0.1665386113822915D0,-.421977345555443D-1, - & -.96219715278770D-2, .72189432466630D-2, - & -.11651675918591D-2, -.2152416741149D-3, - & .1280502823882D-3, -.201348547807D-4, - & -.12504934821D-5, .11330272320D-5, - & -.2056338417D-6, .61160950D-8, - & .50020075D-8, -.11812746D-8, - & .1043427D-9, .77823D-11, - & -.36968D-11, .51D-12, - & -.206D-13, -.54D-14, .14D-14, .1D-15/ - GR=G(26) - DO 20 K=25,1,-1 -20 GR=GR*Z+G(K) - GA=1.0D0/(GR*Z) - IF (DABS(X).GT.1.0D0) THEN - GA=GA*R - IF (X.LT.0.0D0) GA=-PI/(X*GA*DSIN(PI*X)) - ENDIF - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE CHGU(A,B,X,HU,MD) -C -C ======================================================= -C Purpose: Compute the confluent hypergeometric function -C U(a,b,x) -C Input : a --- Parameter -C b --- Parameter -C x --- Argument ( x > 0 ) -C Output: HU --- U(a,b,x) -C MD --- Method code -C Routines called: -C (1) CHGUS for small x ( MD=1 ) -C (2) CHGUL for large x ( MD=2 ) -C (3) CHGUBI for integer b ( MD=3 ) -C (4) CHGUIT for numerical integration ( MD=4 ) -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - LOGICAL IL1,IL2,IL3,BL1,BL2,BL3,BN - AA=A-B+1.0D0 - IL1=A.EQ.INT(A).AND.A.LE.0.0 - IL2=AA.EQ.INT(AA).AND.AA.LE.0.0 - IL3=ABS(A*(A-B+1.0))/X.LE.2.0 - BL1=X.LE.5.0.OR.(X.LE.10.0.AND.A.LE.2.0) - BL2=(X.GT.5.0.AND.X.LE.12.5).AND.(A.GE.1.0.AND.B.GE.A+4.0) - BL3=X.GT.12.5.AND.A.GE.5.0.AND.B.GE.A+5.0 - BN=B.EQ.INT(B).AND.B.NE.0.0 - ID1=-100 - HU1=0.0D0 - IF (B.NE.INT(B)) THEN - CALL CHGUS(A,B,X,HU,ID1) - MD=1 - IF (ID1.GE.6) RETURN - HU1=HU - ENDIF - IF (IL1.OR.IL2.OR.IL3) THEN - CALL CHGUL(A,B,X,HU,ID) - MD=2 - IF (ID.GE.6) RETURN - IF (ID1.GT.ID) THEN - MD=1 - ID=ID1 - HU=HU1 - ENDIF - ENDIF - IF (A.GE.0.0) THEN - IF (BN.AND.(BL1.OR.BL2.OR.BL3)) THEN - CALL CHGUBI(A,B,X,HU,ID) - MD=3 - ELSE - CALL CHGUIT(A,B,X,HU,ID) - MD=4 - ENDIF - ELSE - IF (B.LE.A) THEN - A00=A - B00=B - A=A-B+1.0D0 - B=2.0D0-B - CALL CHGUIT(A,B,X,HU,ID) - HU=X**(1.0D0-B00)*HU - A=A00 - B=B00 - MD=4 - ELSE IF (BN.AND.(.NOT.IL1)) THEN - CALL CHGUBI(A,B,X,HU,ID) - MD=3 - ENDIF - ENDIF - IF (ID.LT.6) WRITE(*,*)'No accurate result obtained' - RETURN - END - - - -C ********************************** - - SUBROUTINE LAMN(N,X,NM,BL,DL) -C -C ========================================================= -C Purpose: Compute lambda functions and their derivatives -C Input: x --- Argument of lambda function -C n --- Order of lambda function -C Output: BL(n) --- Lambda function of order n -C DL(n) --- Derivative of lambda function -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 for computing the start -C point for backward recurrence -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BL(0:N),DL(0:N) - NM=N - IF (DABS(X).LT.1.0D-100) THEN - DO 10 K=0,N - BL(K)=0.0D0 -10 DL(K)=0.0D0 - BL(0)=1.0D0 - DL(1)=0.5D0 - RETURN - ENDIF - IF (X.LE.12.0D0) THEN - X2=X*X - DO 25 K=0,N - BK=1.0D0 - R=1.0D0 - DO 15 I=1,50 - R=-0.25D0*R*X2/(I*(I+K)) - BK=BK+R - IF (DABS(R).LT.DABS(BK)*1.0D-15) GO TO 20 -15 CONTINUE -20 BL(K)=BK -25 IF (K.GE.1) DL(K-1)=-0.5D0*X/K*BK - UK=1.0D0 - R=1.0D0 - DO 30 I=1,50 - R=-0.25D0*R*X2/(I*(I+N+1.0D0)) - UK=UK+R - IF (DABS(R).LT.DABS(UK)*1.0D-15) GO TO 35 -30 CONTINUE -35 DL(N)=-0.5D0*X/(N+1.0D0)*UK - RETURN - ENDIF - IF (N.EQ.0) NM=1 - M=MSTA1(X,200) - IF (M.LT.NM) THEN - NM=M - ELSE - M=MSTA2(X,NM,15) - ENDIF - BS=0.0D0 - F=0.0D0 - F0=0.0D0 - F1=1.0D-100 - DO 40 K=M,0,-1 - F=2.0D0*(K+1.0D0)*F1/X-F0 - IF (K.LE.NM) BL(K)=F - IF (K.EQ.2*INT(K/2)) BS=BS+2.0D0*F - F0=F1 -40 F1=F - BG=BS-F - DO 45 K=0,NM -45 BL(K)=BL(K)/BG - R0=1.0D0 - DO 50 K=1,NM - R0=2.0D0*R0*K/X -50 BL(K)=R0*BL(K) - DL(0)=-0.5D0*X*BL(1) - DO 55 K=1,NM -55 DL(K)=2.0D0*K/X*(BL(K-1)-BL(K)) - RETURN - END - - - -C ********************************** - - SUBROUTINE COMELP(HK,CK,CE) -C -C ================================================== -C Purpose: Compute complete elliptic integrals K(k) -C and E(k) -C Input : K --- Modulus k ( 0 ≤ k ≤ 1 ) -C Output : CK --- K(k) -C CE --- E(k) -C ================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PK=1.0D0-HK*HK - IF (HK.EQ.1.0) THEN - CK=1.0D+300 - CE=1.0D0 - ELSE - AK=(((.01451196212D0*PK+.03742563713D0)*PK - & +.03590092383D0)*PK+.09666344259D0)*PK+ - & 1.38629436112D0 - BK=(((.00441787012D0*PK+.03328355346D0)*PK+ - & .06880248576D0)*PK+.12498593597D0)*PK+.5D0 - CK=AK-BK*DLOG(PK) - AE=(((.01736506451D0*PK+.04757383546D0)*PK+ - & .0626060122D0)*PK+.44325141463D0)*PK+1.0D0 - BE=(((.00526449639D0*PK+.04069697526D0)*PK+ - & .09200180037D0)*PK+.2499836831D0)*PK - CE=AE-BE*DLOG(PK) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE INCOB(A,B,X,BIX) -C -C ======================================================== -C Purpose: Compute the incomplete beta function Ix(a,b) -C Input : a --- Parameter -C b --- Parameter -C x --- Argument ( 0 ≤ x ≤ 1 ) -C Output: BIX --- Ix(a,b) -C Routine called: BETA for computing beta function B(p,q) -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION DK(51),FK(51) - S0=(A+1.0D0)/(A+B+2.0D0) - CALL BETA(A,B,BT) - IF (X.LE.S0) THEN - DO 10 K=1,20 -10 DK(2*K)=K*(B-K)*X/(A+2.0D0*K-1.0D0)/(A+2.0D0*K) - DO 15 K=0,20 -15 DK(2*K+1)=-(A+K)*(A+B+K)*X/(A+2.D0*K)/(A+2.0*K+1.0) - T1=0.0D0 - DO 20 K=20,1,-1 -20 T1=DK(K)/(1.0D0+T1) - TA=1.0D0/(1.0D0+T1) - BIX=X**A*(1.0D0-X)**B/(A*BT)*TA - ELSE - DO 25 K=1,20 -25 FK(2*K)=K*(A-K)*(1.0D0-X)/(B+2.*K-1.0)/(B+2.0*K) - DO 30 K=0,20 -30 FK(2*K+1)=-(B+K)*(A+B+K)*(1.D0-X)/ - & (B+2.D0*K)/(B+2.D0*K+1.D0) - T2=0.0D0 - DO 35 K=20,1,-1 -35 T2=FK(K)/(1.0D0+T2) - TB=1.0D0/(1.0D0+T2) - BIX=1.0D0-X**A*(1.0D0-X)**B/(B*BT)*TB - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE CVF(KD,M,Q,A,MJ,F) -C -C ====================================================== -C Purpose: Compute the value of F for characteristic -C equation of Mathieu functions -C Input : m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions -C A --- Characteristic value -C Output: F --- Value of F for characteristic equation -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - B=A - IC=INT(M/2) - L=0 - L0=0 - J0=2 - JF=IC - IF (KD.EQ.1) L0=2 - IF (KD.EQ.1) J0=3 - IF (KD.EQ.2.OR.KD.EQ.3) L=1 - IF (KD.EQ.4) JF=IC-1 - T1=0.0D0 - DO 10 J=MJ,IC+1,-1 -10 T1=-Q*Q/((2.0D0*J+L)**2-B+T1) - IF (M.LE.2) THEN - T2=0.0D0 - IF (KD.EQ.1.AND.M.EQ.0) T1=T1+T1 - IF (KD.EQ.1.AND.M.EQ.2) T1=-2.0*Q*Q/(4.0-B+T1)-4.0 - IF (KD.EQ.2.AND.M.EQ.1) T1=T1+Q - IF (KD.EQ.3.AND.M.EQ.1) T1=T1-Q - ELSE - T0=0.0D0 - IF (KD.EQ.1) T0=4.0D0-B+2.0D0*Q*Q/B - IF (KD.EQ.2) T0=1.0D0-B+Q - IF (KD.EQ.3) T0=1.0D0-B-Q - IF (KD.EQ.4) T0=4.0D0-B - T2=-Q*Q/T0 - DO 15 J=J0,JF -15 T2=-Q*Q/((2.0D0*J-L-L0)**2-B+T2) - ENDIF - F=(2.0D0*IC+L)**2+T1+T2-B - RETURN - END - - - -C ********************************** - - SUBROUTINE CLPN(N,X,Y,CPN,CPD) -C -C ================================================== -C Purpose: Compute Legendre polynomials Pn(z) and -C their derivatives Pn'(z) for a complex -C argument -C Input : x --- Real part of z -C y --- Imaginary part of z -C n --- Degree of Pn(z), n = 0,1,2,... -C Output: CPN(n) --- Pn(z) -C CPD(n) --- Pn'(z) -C ================================================== -C - IMPLICIT DOUBLE PRECISION (X,Y) - IMPLICIT COMPLEX *16 (C,Z) - DIMENSION CPN(0:N),CPD(0:N) - Z=CMPLX(X,Y) - CPN(0)=(1.0D0,0.0D0) - CPN(1)=Z - CPD(0)=(0.0D0,0.0D0) - CPD(1)=(1.0D0,0.0D0) - CP0=(1.0D0,0.0D0) - CP1=Z - DO 10 K=2,N - CPF=(2.0D0*K-1.0D0)/K*Z*CP1-(K-1.0D0)/K*CP0 - CPN(K)=CPF - IF (DABS(X).EQ.1.0D0.AND.Y.EQ.0.0D0) THEN - CPD(K)=0.5D0*X**(K+1)*K*(K+1.0D0) - ELSE - CPD(K)=K*(CP1-Z*CPF)/(1.0D0-Z*Z) - ENDIF - CP0=CP1 -10 CP1=CPF - RETURN - END - -C ********************************** - - SUBROUTINE LQMNS(M,N,X,QM,QD) -C -C ======================================================== -C Purpose: Compute associated Legendre functions Qmn(x) -C and Qmn'(x) for a given order -C Input : x --- Argument of Qmn(x) -C m --- Order of Qmn(x), m = 0,1,2,... -C n --- Degree of Qmn(x), n = 0,1,2,... -C Output: QM(n) --- Qmn(x) -C QD(n) --- Qmn'(x) -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION QM(0:N),QD(0:N) - DO 10 K=0,N - QM(K)=0.0D0 -10 QD(K)=0.0D0 - IF (DABS(X).EQ.1.0D0) THEN - DO 15 K=0,N - QM(K)=1.0D+300 -15 QD(K)=1.0D+300 - RETURN - ENDIF - LS=1 - IF (DABS(X).GT.1.0D0) LS=-1 - XQ=DSQRT(LS*(1.0D0-X*X)) - Q0=0.5D0*DLOG(DABS((X+1.0)/(X-1.0))) - Q00=Q0 - Q10=-1.0D0/XQ - Q01=X*Q0-1.0D0 - Q11=-LS*XQ*(Q0+X/(1.0D0-X*X)) - QF0=Q00 - QF1=Q10 - QM0=0.0D0 - QM1=0.0D0 - DO 20 K=2,M - QM0=-2.0D0*(K-1.0)/XQ*X*QF1-LS*(K-1.0)*(2.0-K)*QF0 - QF0=QF1 -20 QF1=QM0 - IF (M.EQ.0) QM0=Q00 - IF (M.EQ.1) QM0=Q10 - QM(0)=QM0 - IF (DABS(X).LT.1.0001D0) THEN - IF (M.EQ.0.AND.N.GT.0) THEN - QF0=Q00 - QF1=Q01 - DO 25 K=2,N - QF2=((2.0*K-1.0D0)*X*QF1-(K-1.0)*QF0)/K - QM(K)=QF2 - QF0=QF1 -25 QF1=QF2 - ENDIF - QG0=Q01 - QG1=Q11 - DO 30 K=2,M - QM1=-2.0D0*(K-1.0)/XQ*X*QG1-LS*K*(3.0-K)*QG0 - QG0=QG1 -30 QG1=QM1 - IF (M.EQ.0) QM1=Q01 - IF (M.EQ.1) QM1=Q11 - QM(1)=QM1 - IF (M.EQ.1.AND.N.GT.1) THEN - QH0=Q10 - QH1=Q11 - DO 35 K=2,N - QH2=((2.0*K-1.0D0)*X*QH1-K*QH0)/(K-1.0) - QM(K)=QH2 - QH0=QH1 -35 QH1=QH2 - ELSE IF (M.GE.2) THEN - QG0=Q00 - QG1=Q01 - QH0=Q10 - QH1=Q11 - QMK=0.0D0 - DO 45 L=2,N - Q0L=((2.0D0*L-1.0D0)*X*QG1-(L-1.0D0)*QG0)/L - Q1L=((2.0*L-1.0D0)*X*QH1-L*QH0)/(L-1.0D0) - QF0=Q0L - QF1=Q1L - DO 40 K=2,M - QMK=-2.0D0*(K-1.0)/XQ*X*QF1-LS*(K+L-1.0)* - & (L+2.0-K)*QF0 - QF0=QF1 -40 QF1=QMK - QM(L)=QMK - QG0=QG1 - QG1=Q0L - QH0=QH1 -45 QH1=Q1L - ENDIF - ELSE - IF (DABS(X).GT.1.1) THEN - KM=40+M+N - ELSE - KM=(40+M+N)*INT(-1.0-1.8*LOG(X-1.0)) - ENDIF - QF2=0.0D0 - QF1=1.0D0 - DO 50 K=KM,0,-1 - QF0=((2.0*K+3.0D0)*X*QF1-(K+2.0-M)*QF2)/(K+M+1.0) - IF (K.LE.N) QM(K)=QF0 - QF2=QF1 -50 QF1=QF0 - DO 55 K=0,N -55 QM(K)=QM(K)*QM0/QF0 - ENDIF - IF (DABS(X).LT.1.0D0) THEN - DO 60 K=0,N -60 QM(K)=(-1)**M*QM(K) - ENDIF - QD(0)=((1.0D0-M)*QM(1)-X*QM(0))/(X*X-1.0) - DO 65 K=1,N -65 QD(K)=(K*X*QM(K)-(K+M)*QM(K-1))/(X*X-1.0) - RETURN - END - -C ********************************** - - SUBROUTINE CIKLV(V,Z,CBIV,CDIV,CBKV,CDKV) -C -C ===================================================== -C Purpose: Compute modified Bessel functions Iv(z) and -C Kv(z) and their derivatives with a complex -C argument and a large order -C Input: v --- Order of Iv(z) and Kv(z) -C z --- Complex argument -C Output: CBIV --- Iv(z) -C CDIV --- Iv'(z) -C CBKV --- Kv(z) -C CDKV --- Kv'(z) -C Routine called: -C CJK to compute the expansion coefficients -C ==================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CF(12),A(91) - PI=3.141592653589793D0 - KM=12 - CALL CJK(KM,A) - DO 30 L=1,0,-1 - V0=V-L - CWS=CDSQRT(1.0D0+(Z/V0)*(Z/V0)) - CETA=CWS+CDLOG(Z/V0/(1.0D0+CWS)) - CT=1.0D0/CWS - CT2=CT*CT - DO 15 K=1,KM - L0=K*(K+1)/2+1 - LF=L0+K - CF(K)=A(LF) - DO 10 I=LF-1,L0,-1 -10 CF(K)=CF(K)*CT2+A(I) -15 CF(K)=CF(K)*CT**K - VR=1.0D0/V0 - CSI=(1.0D0,0.0D0) - DO 20 K=1,KM -20 CSI=CSI+CF(K)*VR**K - CBIV=CDSQRT(CT/(2.0D0*PI*V0))*CDEXP(V0*CETA)*CSI - IF (L.EQ.1) CFI=CBIV - CSK=(1.0D0,0.0D0) - DO 25 K=1,KM -25 CSK=CSK+(-1)**K*CF(K)*VR**K - CBKV=CDSQRT(PI*CT/(2.0D0*V0))*CDEXP(-V0*CETA)*CSK - IF (L.EQ.1) CFK=CBKV -30 CONTINUE - CDIV=CFI-V/Z*CBIV - CDKV=-CFK-V/Z*CBKV - RETURN - END - - - -C ********************************** - - SUBROUTINE ELIT(HK,PHI,FE,EE) -C -C ================================================== -C Purpose: Compute complete and incomplete elliptic -C integrals F(k,phi) and E(k,phi) -C Input : HK --- Modulus k ( 0 ≤ k ≤ 1 ) -C Phi --- Argument ( in degrees ) -C Output : FE --- F(k,phi) -C EE --- E(k,phi) -C ================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - G=0.0D0 - PI=3.14159265358979D0 - A0=1.0D0 - B0=DSQRT(1.0D0-HK*HK) - D0=(PI/180.0D0)*PHI - R=HK*HK - IF (HK.EQ.1.0D0.AND.PHI.EQ.90.0D0) THEN - FE=1.0D+300 - EE=1.0D0 - ELSE IF (HK.EQ.1.0D0) THEN - FE=DLOG((1.0D0+DSIN(D0))/DCOS(D0)) - EE=DSIN(D0) - ELSE - FAC=1.0D0 - D=0.0D0 - DO 10 N=1,40 - A=(A0+B0)/2.0D0 - B=DSQRT(A0*B0) - C=(A0-B0)/2.0D0 - FAC=2.0D0*FAC - R=R+FAC*C*C - IF (PHI.NE.90.0D0) THEN - D=D0+DATAN((B0/A0)*DTAN(D0)) - G=G+C*DSIN(D) - D0=D+PI*INT(D/PI+.5D0) - ENDIF - A0=A - B0=B - IF (C.LT.1.0D-7) GO TO 15 -10 CONTINUE -15 CK=PI/(2.0D0*A) - CE=PI*(2.0D0-R)/(4.0D0*A) - IF (PHI.EQ.90.0D0) THEN - FE=CK - EE=CE - ELSE - FE=D/(FAC*A) - EE=FE*CE/CK+G - ENDIF - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE ELIT3(PHI,HK,C,EL3) -C -C ========================================================= -C Purpose: Compute the elliptic integral of the third kind -C using Gauss-Legendre quadrature -C Input : Phi --- Argument ( in degrees ) -C k --- Modulus ( 0 ≤ k ≤ 1.0 ) -C c --- Parameter ( 0 ≤ c ≤ 1.0 ) -C Output: EL3 --- Value of the elliptic integral of the -C third kind -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION T(10),W(10) - LOGICAL LB1,LB2 - DATA T/.9931285991850949D0,.9639719272779138D0, - & .9122344282513259D0,.8391169718222188D0, - & .7463319064601508D0,.6360536807265150D0, - & .5108670019508271D0,.3737060887154195D0, - & .2277858511416451D0,.7652652113349734D-1/ - DATA W/.1761400713915212D-1,.4060142980038694D-1, - & .6267204833410907D-1,.8327674157670475D-1, - & .1019301198172404D0,.1181945319615184D0, - & .1316886384491766D0,.1420961093183820D0, - & .1491729864726037D0,.1527533871307258D0/ - LB1=HK.EQ.1.0D0.AND.DABS(PHI-90.0).LE.1.0D-8 - LB2=C.EQ.1.0D0.AND.DABS(PHI-90.0).LE.1.0D-8 - IF (LB1.OR.LB2) THEN - EL3=1.0D+300 - RETURN - ENDIF - C1=0.87266462599716D-2*PHI - C2=C1 - EL3=0.0D0 - DO 10 I=1,10 - C0=C2*T(I) - T1=C1+C0 - T2=C1-C0 - F1=1.0D0/((1.0D0-C*DSIN(T1)*DSIN(T1))* - & DSQRT(1.0D0-HK*HK*DSIN(T1)*DSIN(T1))) - F2=1.0D0/((1.0D0-C*DSIN(T2)*DSIN(T2))* - & DSQRT(1.0D0-HK*HK*DSIN(T2)*DSIN(T2))) -10 EL3=EL3+W(I)*(F1+F2) - EL3=C1*EL3 - RETURN - END - -C ********************************** - - SUBROUTINE EIX(X,EI) -C -C ============================================ -C Purpose: Compute exponential integral Ei(x) -C Input : x --- Argument of Ei(x) -C Output: EI --- Ei(x) -C ============================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - IF (X.EQ.0.0) THEN - EI=-1.0D+300 - ELSE IF (X .LT. 0) THEN - CALL E1XB(-X, EI) - EI = -EI - ELSE IF (DABS(X).LE.40.0) THEN -C Power series around x=0 - EI=1.0D0 - R=1.0D0 - DO 15 K=1,100 - R=R*K*X/(K+1.0D0)**2 - EI=EI+R - IF (DABS(R/EI).LE.1.0D-15) GO TO 20 -15 CONTINUE -20 GA=0.5772156649015328D0 - EI=GA+DLOG(X)+X*EI - ELSE -C Asymptotic expansion (the series is not convergent) - EI=1.0D0 - R=1.0D0 - DO 25 K=1,20 - R=R*K/X -25 EI=EI+R - EI=DEXP(X)/X*EI - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE EIXZ(Z,CEI) -C -C ============================================ -C Purpose: Compute exponential integral Ei(x) -C Input : x --- Complex argument of Ei(x) -C Output: EI --- Ei(x) -C ============================================ -C - IMPLICIT NONE - DOUBLE COMPLEX Z, CEI - CALL E1Z(-Z, CEI) - CEI = -CEI + (CDLOG(Z) - CDLOG(1D0/Z))/2D0 - CDLOG(-Z) - RETURN - END - -C ********************************** - - SUBROUTINE E1XB(X,E1) -C -C ============================================ -C Purpose: Compute exponential integral E1(x) -C Input : x --- Argument of E1(x) -C Output: E1 --- E1(x) ( x > 0 ) -C ============================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - IF (X.EQ.0.0) THEN - E1=1.0D+300 - ELSE IF (X.LE.1.0) THEN - E1=1.0D0 - R=1.0D0 - DO 10 K=1,25 - R=-R*K*X/(K+1.0D0)**2 - E1=E1+R - IF (DABS(R).LE.DABS(E1)*1.0D-15) GO TO 15 -10 CONTINUE -15 GA=0.5772156649015328D0 - E1=-GA-DLOG(X)+X*E1 - ELSE - M=20+INT(80.0/X) - T0=0.0D0 - DO 20 K=M,1,-1 - T0=K/(1.0D0+K/(X+T0)) -20 CONTINUE - T=1.0D0/(X+T0) - E1=DEXP(-X)*T - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE CHGM(A,B,X,HG) -C -C =================================================== -C Purpose: Compute confluent hypergeometric function -C M(a,b,x) -C Input : a --- Parameter -C b --- Parameter ( b <> 0,-1,-2,... ) -C x --- Argument -C Output: HG --- M(a,b,x) -C Routine called: GAMMA2 for computing Г(x) -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - A0=A - A1=A - X0=X - HG=0.0D0 - IF (B.EQ.0.0D0.OR.B.EQ.-ABS(INT(B))) THEN - HG=1.0D+300 - ELSE IF (A.EQ.0.0D0.OR.X.EQ.0.0D0) THEN - HG=1.0D0 - ELSE IF (A.EQ.-1.0D0) THEN - HG=1.0D0-X/B - ELSE IF (A.EQ.B) THEN - HG=DEXP(X) - ELSE IF (A-B.EQ.1.0D0) THEN - HG=(1.0D0+X/B)*DEXP(X) - ELSE IF (A.EQ.1.0D0.AND.B.EQ.2.0D0) THEN - HG=(DEXP(X)-1.0D0)/X - ELSE IF (A.EQ.INT(A).AND.A.LT.0.0D0) THEN - M=INT(-A) - R=1.0D0 - HG=1.0D0 - DO 10 K=1,M - R=R*(A+K-1.0D0)/K/(B+K-1.0D0)*X -10 HG=HG+R - ENDIF - IF (HG.NE.0.0D0) RETURN - IF (X.LT.0.0D0) THEN - A=B-A - A0=A - X=DABS(X) - ENDIF - NL=0 - LA=0 - IF (A.GE.2.0D0) THEN - NL=1 - LA=INT(A) - A=A-LA-1.0D0 - ENDIF - Y0=0.0D0 - Y1=0.0D0 - DO 30 N=0,NL - IF (A0.GE.2.0D0) A=A+1.0D0 - IF (X.LE.30.0D0+DABS(B).OR.A.LT.0.0D0) THEN - HG=1.0D0 - RG=1.0D0 - DO 15 J=1,500 - RG=RG*(A+J-1.0D0)/(J*(B+J-1.0D0))*X - HG=HG+RG - IF (HG.NE.0D0.AND.DABS(RG/HG).LT.1.0D-15) GO TO 25 -15 CONTINUE - ELSE - CALL GAMMA2(A,TA) - CALL GAMMA2(B,TB) - XG=B-A - CALL GAMMA2(XG,TBA) - SUM1=1.0D0 - SUM2=1.0D0 - R1=1.0D0 - R2=1.0D0 - DO 20 I=1,8 - R1=-R1*(A+I-1.0D0)*(A-B+I)/(X*I) - R2=-R2*(B-A+I-1.0D0)*(A-I)/(X*I) - SUM1=SUM1+R1 -20 SUM2=SUM2+R2 - HG1=TB/TBA*X**(-A)*DCOS(PI*A)*SUM1 - HG2=TB/TA*DEXP(X)*X**(A-B)*SUM2 - HG=HG1+HG2 - ENDIF -25 IF (N.EQ.0) Y0=HG - IF (N.EQ.1) Y1=HG -30 CONTINUE - IF (A0.GE.2.0D0) THEN - DO 35 I=1,LA-1 - HG=((2.0D0*A-B+X)*Y1+(B-A)*Y0)/A - Y0=Y1 - Y1=HG -35 A=A+1.0D0 - ENDIF - IF (X0.LT.0.0D0) HG=HG*DEXP(X0) - A=A1 - X=X0 - RETURN - END - - - -C ********************************** - - SUBROUTINE STVH0(X,SH0) -C -C ============================================= -C Purpose: Compute Struve function H0(x) -C Input : x --- Argument of H0(x) ( x ≥ 0 ) -C Output: SH0 --- H0(x) -C ============================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - S=1.0D0 - R=1.0D0 - IF (X.LE.20.0D0) THEN - A0=2.0*X/PI - DO 10 K=1,60 - R=-R*X/(2.0D0*K+1.0D0)*X/(2.0D0*K+1.0D0) - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 15 -10 CONTINUE -15 SH0=A0*S - ELSE - KM=INT(.5*(X+1.0)) - IF (X.GE.50.0) KM=25 - DO 20 K=1,KM - R=-R*((2.0D0*K-1.0D0)/X)**2 - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 25 -20 CONTINUE -25 T=4.0D0/X - T2=T*T - P0=((((-.37043D-5*T2+.173565D-4)*T2-.487613D-4) - & *T2+.17343D-3)*T2-.1753062D-2)*T2+.3989422793D0 - Q0=T*(((((.32312D-5*T2-.142078D-4)*T2+.342468D-4)* - & T2-.869791D-4)*T2+.4564324D-3)*T2-.0124669441D0) - TA0=X-.25D0*PI - BY0=2.0D0/DSQRT(X)*(P0*DSIN(TA0)+Q0*DCOS(TA0)) - SH0=2.0D0/(PI*X)*S+BY0 - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE HYGFX(A,B,C,X,HF) -C -C ==================================================== -C Purpose: Compute hypergeometric function F(a,b,c,x) -C Input : a --- Parameter -C b --- Parameter -C c --- Parameter, c <> 0,-1,-2,... -C x --- Argument ( x < 1 ) -C Output: HF --- F(a,b,c,x) -C Routines called: -C (1) GAMMA2 for computing gamma function -C (2) PSI_SPEC for computing psi function -C ==================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - LOGICAL L0,L1,L2,L3,L4,L5 - PI=3.141592653589793D0 - EL=.5772156649015329D0 - L0=C.EQ.INT(C).AND.C.LT.0.0 - L1=1.0D0-X.LT.1.0D-15.AND.C-A-B.LE.0.0 - L2=A.EQ.INT(A).AND.A.LT.0.0 - L3=B.EQ.INT(B).AND.B.LT.0.0 - L4=C-A.EQ.INT(C-A).AND.C-A.LE.0.0 - L5=C-B.EQ.INT(C-B).AND.C-B.LE.0.0 - IF (L0.OR.L1) THEN - WRITE(*,*)'The hypergeometric series is divergent' - RETURN - ENDIF - EPS=1.0D-15 - IF (X.GT.0.95) EPS=1.0D-8 - IF (X.EQ.0.0.OR.A.EQ.0.0.OR.B.EQ.0.0) THEN - HF=1.0D0 - RETURN - ELSE IF (1.0D0-X.EQ.EPS.AND.C-A-B.GT.0.0) THEN - CALL GAMMA2(C,GC) - CALL GAMMA2(C-A-B,GCAB) - CALL GAMMA2(C-A,GCA) - CALL GAMMA2(C-B,GCB) - HF=GC*GCAB/(GCA*GCB) - RETURN - ELSE IF (1.0D0+X.LE.EPS.AND.DABS(C-A+B-1.0).LE.EPS) THEN - G0=DSQRT(PI)*2.0D0**(-A) - CALL GAMMA2(C,G1) - CALL GAMMA2(1.0D0+A/2.0-B,G2) - CALL GAMMA2(0.5D0+0.5*A,G3) - HF=G0*G1/(G2*G3) - RETURN - ELSE IF (L2.OR.L3) THEN - IF (L2) NM=INT(ABS(A)) - IF (L3) NM=INT(ABS(B)) - HF=1.0D0 - R=1.0D0 - DO 10 K=1,NM - R=R*(A+K-1.0D0)*(B+K-1.0D0)/(K*(C+K-1.0D0))*X -10 HF=HF+R - RETURN - ELSE IF (L4.OR.L5) THEN - IF (L4) NM=INT(ABS(C-A)) - IF (L5) NM=INT(ABS(C-B)) - HF=1.0D0 - R=1.0D0 - DO 15 K=1,NM - R=R*(C-A+K-1.0D0)*(C-B+K-1.0D0)/(K*(C+K-1.0D0))*X -15 HF=HF+R - HF=(1.0D0-X)**(C-A-B)*HF - RETURN - ENDIF - AA=A - BB=B - X1=X - IF (X.LT.0.0D0) THEN - X=X/(X-1.0D0) - IF (C.GT.A.AND.B.LT.A.AND.B.GT.0.0) THEN - A=BB - B=AA - ENDIF - B=C-B - ENDIF - HW=0.0D0 - IF (X.GE.0.75D0) THEN - GM=0.0D0 - IF (DABS(C-A-B-INT(C-A-B)).LT.1.0D-15) THEN - M=INT(C-A-B) - CALL GAMMA2(A,GA) - CALL GAMMA2(B,GB) - CALL GAMMA2(C,GC) - CALL GAMMA2(A+M,GAM) - CALL GAMMA2(B+M,GBM) - CALL PSI_SPEC(A,PA) - CALL PSI_SPEC(B,PB) - IF (M.NE.0) GM=1.0D0 - DO 30 J=1,ABS(M)-1 -30 GM=GM*J - RM=1.0D0 - DO 35 J=1,ABS(M) -35 RM=RM*J - F0=1.0D0 - R0=1.0D0 - R1=1.0D0 - SP0=0.D0 - SP=0.0D0 - IF (M.GE.0) THEN - C0=GM*GC/(GAM*GBM) - C1=-GC*(X-1.0D0)**M/(GA*GB*RM) - DO 40 K=1,M-1 - R0=R0*(A+K-1.0D0)*(B+K-1.0)/(K*(K-M))*(1.0-X) -40 F0=F0+R0 - DO 45 K=1,M -45 SP0=SP0+1.0D0/(A+K-1.0)+1.0/(B+K-1.0)-1.0/K - F1=PA+PB+SP0+2.0D0*EL+DLOG(1.0D0-X) - DO 55 K=1,250 - SP=SP+(1.0D0-A)/(K*(A+K-1.0))+(1.0-B)/(K*(B+K-1.0)) - SM=0.0D0 - DO 50 J=1,M -50 SM=SM+(1.0D0-A)/((J+K)*(A+J+K-1.0))+1.0/ - & (B+J+K-1.0) - RP=PA+PB+2.0D0*EL+SP+SM+DLOG(1.0D0-X) - R1=R1*(A+M+K-1.0D0)*(B+M+K-1.0)/(K*(M+K))*(1.0-X) - F1=F1+R1*RP - IF (DABS(F1-HW).LT.DABS(F1)*EPS) GO TO 60 -55 HW=F1 -60 HF=F0*C0+F1*C1 - ELSE IF (M.LT.0) THEN - M=-M - C0=GM*GC/(GA*GB*(1.0D0-X)**M) - C1=-(-1)**M*GC/(GAM*GBM*RM) - DO 65 K=1,M-1 - R0=R0*(A-M+K-1.0D0)*(B-M+K-1.0)/(K*(K-M))*(1.0-X) -65 F0=F0+R0 - DO 70 K=1,M -70 SP0=SP0+1.0D0/K - F1=PA+PB-SP0+2.0D0*EL+DLOG(1.0D0-X) - DO 80 K=1,250 - SP=SP+(1.0D0-A)/(K*(A+K-1.0))+(1.0-B)/(K*(B+K-1.0)) - SM=0.0D0 - DO 75 J=1,M -75 SM=SM+1.0D0/(J+K) - RP=PA+PB+2.0D0*EL+SP-SM+DLOG(1.0D0-X) - R1=R1*(A+K-1.0D0)*(B+K-1.0)/(K*(M+K))*(1.0-X) - F1=F1+R1*RP - IF (DABS(F1-HW).LT.DABS(F1)*EPS) GO TO 85 -80 HW=F1 -85 HF=F0*C0+F1*C1 - ENDIF - ELSE - CALL GAMMA2(A,GA) - CALL GAMMA2(B,GB) - CALL GAMMA2(C,GC) - CALL GAMMA2(C-A,GCA) - CALL GAMMA2(C-B,GCB) - CALL GAMMA2(C-A-B,GCAB) - CALL GAMMA2(A+B-C,GABC) - C0=GC*GCAB/(GCA*GCB) - C1=GC*GABC/(GA*GB)*(1.0D0-X)**(C-A-B) - HF=0.0D0 - R0=C0 - R1=C1 - DO 90 K=1,250 - R0=R0*(A+K-1.0D0)*(B+K-1.0)/(K*(A+B-C+K))*(1.0-X) - R1=R1*(C-A+K-1.0D0)*(C-B+K-1.0)/(K*(C-A-B+K)) - & *(1.0-X) - HF=HF+R0+R1 - IF (DABS(HF-HW).LT.DABS(HF)*EPS) GO TO 95 -90 HW=HF -95 HF=HF+C0+C1 - ENDIF - ELSE - A0=1.0D0 - IF (C.GT.A.AND.C.LT.2.0D0*A.AND. - & C.GT.B.AND.C.LT.2.0D0*B) THEN - A0=(1.0D0-X)**(C-A-B) - A=C-A - B=C-B - ENDIF - HF=1.0D0 - R=1.0D0 - DO 100 K=1,250 - R=R*(A+K-1.0D0)*(B+K-1.0D0)/(K*(C+K-1.0D0))*X - HF=HF+R - IF (DABS(HF-HW).LE.DABS(HF)*EPS) GO TO 105 -100 HW=HF -105 HF=A0*HF - ENDIF - IF (X1.LT.0.0D0) THEN - X=X1 - C0=1.0D0/(1.0D0-X)**AA - HF=C0*HF - ENDIF - A=AA - B=BB - IF (K.GT.120) WRITE(*,115) -115 FORMAT(1X,'Warning! You should check the accuracy') - RETURN - END - - - -C ********************************** - - SUBROUTINE CCHG(A,B,Z,CHG) -C -C =================================================== -C Purpose: Compute confluent hypergeometric function -C M(a,b,z) with real parameters a, b and a -C complex argument z -C Input : a --- Parameter -C b --- Parameter -C z --- Complex argument -C Output: CHG --- M(a,b,z) -C Routine called: GAMMA2 for computing gamma function -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,D-H,O-Y) - IMPLICIT COMPLEX *16 (C,Z) - PI=3.141592653589793D0 - CI=(0.0D0,1.0D0) - A0=A - A1=A - Z0=Z - IF (B.EQ.0.0.OR.B.EQ.-INT(ABS(B))) THEN - CHG=(1.0D+300,0.0D0) - ELSE IF (A.EQ.0.0D0.OR.Z.EQ.0.0D0) THEN - CHG=(1.0D0,0.0D0) - ELSE IF (A.EQ.-1.0D0) THEN - CHG=1.0D0-Z/B - ELSE IF (A.EQ.B) THEN - CHG=CDEXP(Z) - ELSE IF (A-B.EQ.1.0D0) THEN - CHG=(1.0D0+Z/B)*CDEXP(Z) - ELSE IF (A.EQ.1.0D0.AND.B.EQ.2.0D0) THEN - CHG=(CDEXP(Z)-1.0D0)/Z - ELSE IF (A.EQ.INT(A).AND.A.LT.0.0D0) THEN - M=INT(-A) - CR=(1.0D0,0.0D0) - CHG=(1.0D0,0.0D0) - DO 10 K=1,M - CR=CR*(A+K-1.0D0)/K/(B+K-1.0D0)*Z -10 CHG=CHG+CR - ELSE - X0=DBLE(Z) - IF (X0.LT.0.0D0) THEN - A=B-A - A0=A - Z=-Z - ENDIF - NL=0 - LA=0 - IF (A.GE.2.0D0) THEN - NL=1 - LA=INT(A) - A=A-LA-1.0D0 - ENDIF - NS=0 - DO 30 N=0,NL - IF (A0.GE.2.0D0) A=A+1.0D0 - IF (CDABS(Z).LT.20.0D0+ABS(B).OR.A.LT.0.0D0) THEN - CHG=(1.0D0,0.0D0) - CRG=(1.0D0,0.0D0) - DO 15 J=1,500 - CRG=CRG*(A+J-1.0D0)/(J*(B+J-1.0D0))*Z - CHG=CHG+CRG - IF (CDABS((CHG-CHW)/CHG).LT.1.D-15) GO TO 25 - CHW=CHG -15 CONTINUE - ELSE - CALL GAMMA2(A,G1) - CALL GAMMA2(B,G2) - BA=B-A - CALL GAMMA2(BA,G3) - CS1=(1.0D0,0.0D0) - CS2=(1.0D0,0.0D0) - CR1=(1.0D0,0.0D0) - CR2=(1.0D0,0.0D0) - DO 20 I=1,8 - CR1=-CR1*(A+I-1.0D0)*(A-B+I)/(Z*I) - CR2=CR2*(B-A+I-1.0D0)*(I-A)/(Z*I) - CS1=CS1+CR1 -20 CS2=CS2+CR2 - X=DBLE(Z) - Y=DIMAG(Z) - IF (X.EQ.0.0.AND.Y.GE.0.0) THEN - PHI=0.5D0*PI - ELSE IF (X.EQ.0.0.AND.Y.LE.0.0) THEN - PHI=-0.5D0*PI - ELSE - PHI=DATAN(Y/X) - ENDIF - IF (PHI.GT.-0.5*PI.AND.PHI.LT.1.5*PI) NS=1 - IF (PHI.GT.-1.5*PI.AND.PHI.LE.-0.5*PI) NS=-1 - CFAC=CDEXP(NS*CI*PI*A) - IF (Y.EQ.0.0D0) CFAC=DCOS(PI*A) - CHG1=G2/G3*Z**(-A)*CFAC*CS1 - CHG2=G2/G1*CDEXP(Z)*Z**(A-B)*CS2 - CHG=CHG1+CHG2 - ENDIF -25 IF (N.EQ.0) CY0=CHG - IF (N.EQ.1) CY1=CHG -30 CONTINUE - IF (A0.GE.2.0D0) THEN - DO 35 I=1,LA-1 - CHG=((2.0D0*A-B+Z)*CY1+(B-A)*CY0)/A - CY0=CY1 - CY1=CHG -35 A=A+1.0D0 - ENDIF - IF (X0.LT.0.0D0) CHG=CHG*CDEXP(-Z) - ENDIF - A=A1 - Z=Z0 - RETURN - END - - - -C ********************************** - - SUBROUTINE HYGFZ(A,B,C,Z,ZHF) -C -C ====================================================== -C Purpose: Compute the hypergeometric function for a -C complex argument, F(a,b,c,z) -C Input : a --- Parameter -C b --- Parameter -C c --- Parameter, c <> 0,-1,-2,... -C z --- Complex argument -C Output: ZHF --- F(a,b,c,z) -C Routines called: -C (1) GAMMA2 for computing gamma function -C (2) PSI_SPEC for computing psi function -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Y) - IMPLICIT COMPLEX *16 (Z) - LOGICAL L0,L1,L2,L3,L4,L5,L6 - X=DBLE(Z) - Y=DIMAG(Z) - EPS=1.0D-15 - L0=C.EQ.INT(C).AND.C.LT.0.0D0 - L1=DABS(1.0D0-X).LT.EPS.AND.Y.EQ.0.0D0.AND.C-A-B.LE.0.0D0 - L2=CDABS(Z+1.0D0).LT.EPS.AND.DABS(C-A+B-1.0D0).LT.EPS - L3=A.EQ.INT(A).AND.A.LT.0.0D0 - L4=B.EQ.INT(B).AND.B.LT.0.0D0 - L5=C-A.EQ.INT(C-A).AND.C-A.LE.0.0D0 - L6=C-B.EQ.INT(C-B).AND.C-B.LE.0.0D0 - AA=A - BB=B - A0=CDABS(Z) - IF (A0.GT.0.95D0) EPS=1.0D-8 - PI=3.141592653589793D0 - EL=.5772156649015329D0 - IF (L0.OR.L1) THEN -C WRITE(*,*)'The hypergeometric series is divergent' - ZHF = 1.0D300 - RETURN - ENDIF - NM=0 - IF (A0.EQ.0.0D0.OR.A.EQ.0.0D0.OR.B.EQ.0.0D0) THEN - ZHF=(1.0D0,0.0D0) - ELSE IF (Z.EQ.1.0D0.AND.C-A-B.GT.0.0D0) THEN - CALL GAMMA2(C,GC) - CALL GAMMA2(C-A-B,GCAB) - CALL GAMMA2(C-A,GCA) - CALL GAMMA2(C-B,GCB) - ZHF=GC*GCAB/(GCA*GCB) - ELSE IF (L2) THEN - G0=DSQRT(PI)*2.0D0**(-A) - CALL GAMMA2(C,G1) - CALL GAMMA2(1.0D0+A/2.0D0-B,G2) - CALL GAMMA2(0.5D0+0.5D0*A,G3) - ZHF=G0*G1/(G2*G3) - ELSE IF (L3.OR.L4) THEN - IF (L3) NM=INT(ABS(A)) - IF (L4) NM=INT(ABS(B)) - ZHF=(1.0D0,0.0D0) - ZR=(1.0D0,0.0D0) - DO 10 K=1,NM - ZR=ZR*(A+K-1.0D0)*(B+K-1.0D0)/(K*(C+K-1.0D0))*Z -10 ZHF=ZHF+ZR - ELSE IF (L5.OR.L6) THEN - IF (L5) NM=INT(ABS(C-A)) - IF (L6) NM=INT(ABS(C-B)) - ZHF=(1.0D0,0.0D0) - ZR=(1.0D0,0.0D0) - DO 15 K=1,NM - ZR=ZR*(C-A+K-1.0D0)*(C-B+K-1.0D0)/(K*(C+K-1.0D0))*Z -15 ZHF=ZHF+ZR - ZHF=(1.0D0-Z)**(C-A-B)*ZHF - ELSE IF (A0.LE.1.0D0) THEN - IF (X.LT.0.0D0) THEN - Z1=Z/(Z-1.0D0) - IF (C.GT.A.AND.B.LT.A.AND.B.GT.0.0) THEN - A=BB - B=AA - ENDIF - ZC0=1.0D0/((1.0D0-Z)**A) - ZHF=(1.0D0,0.0D0) - ZR0=(1.0D0,0.0D0) - DO 20 K=1,500 - ZR0=ZR0*(A+K-1.0D0)*(C-B+K-1.0D0)/(K*(C+K-1.0D0))*Z1 - ZHF=ZHF+ZR0 - IF (CDABS(ZHF-ZW).LT.CDABS(ZHF)*EPS) GO TO 25 -20 ZW=ZHF -25 ZHF=ZC0*ZHF - ELSE IF (A0.GE.0.90D0) THEN - GM=0.0D0 - MCAB=INT(C-A-B+EPS*DSIGN(1.0D0,C-A-B)) - IF (DABS(C-A-B-MCAB).LT.EPS) THEN - M=INT(C-A-B) - CALL GAMMA2(A,GA) - CALL GAMMA2(B,GB) - CALL GAMMA2(C,GC) - CALL GAMMA2(A+M,GAM) - CALL GAMMA2(B+M,GBM) - CALL PSI_SPEC(A,PA) - CALL PSI_SPEC(B,PB) - IF (M.NE.0) GM=1.0D0 - DO 30 J=1,ABS(M)-1 -30 GM=GM*J - RM=1.0D0 - DO 35 J=1,ABS(M) -35 RM=RM*J - ZF0=(1.0D0,0.0D0) - ZR0=(1.0D0,0.0D0) - ZR1=(1.0D0,0.0D0) - SP0=0.D0 - SP=0.0D0 - IF (M.GE.0) THEN - ZC0=GM*GC/(GAM*GBM) - ZC1=-GC*(Z-1.0D0)**M/(GA*GB*RM) - DO 40 K=1,M-1 - ZR0=ZR0*(A+K-1.D0)*(B+K-1.D0)/(K*(K-M))*(1.D0-Z) -40 ZF0=ZF0+ZR0 - DO 45 K=1,M -45 SP0=SP0+1.0D0/(A+K-1.0D0)+1.0/(B+K-1.0D0)-1.D0/K - ZF1=PA+PB+SP0+2.0D0*EL+CDLOG(1.0D0-Z) - DO 55 K=1,500 - SP=SP+(1.0D0-A)/(K*(A+K-1.0D0))+(1.0D0-B)/ - & (K*(B+K-1.0D0)) - SM=0.0D0 - DO 50 J=1,M - SM=SM+(1.0D0-A)/((J+K)*(A+J+K-1.0D0)) - & +1.0D0/(B+J+K-1.0D0) -50 CONTINUE - ZP=PA+PB+2.0D0*EL+SP+SM+CDLOG(1.0D0-Z) - ZR1=ZR1*(A+M+K-1.0D0)*(B+M+K-1.0D0)/(K*(M+K)) - & *(1.0D0-Z) - ZF1=ZF1+ZR1*ZP - IF (CDABS(ZF1-ZW).LT.CDABS(ZF1)*EPS) GO TO 60 -55 ZW=ZF1 -60 ZHF=ZF0*ZC0+ZF1*ZC1 - ELSE IF (M.LT.0) THEN - M=-M - ZC0=GM*GC/(GA*GB*(1.0D0-Z)**M) - ZC1=-(-1)**M*GC/(GAM*GBM*RM) - DO 65 K=1,M-1 - ZR0=ZR0*(A-M+K-1.0D0)*(B-M+K-1.0D0)/(K*(K-M)) - & *(1.0D0-Z) -65 ZF0=ZF0+ZR0 - DO 70 K=1,M -70 SP0=SP0+1.0D0/K - ZF1=PA+PB-SP0+2.0D0*EL+CDLOG(1.0D0-Z) - DO 80 K=1,500 - SP=SP+(1.0D0-A)/(K*(A+K-1.0D0))+(1.0D0-B)/(K* - & (B+K-1.0D0)) - SM=0.0D0 - DO 75 J=1,M -75 SM=SM+1.0D0/(J+K) - ZP=PA+PB+2.0D0*EL+SP-SM+CDLOG(1.0D0-Z) - ZR1=ZR1*(A+K-1.D0)*(B+K-1.D0)/(K*(M+K))*(1.D0-Z) - ZF1=ZF1+ZR1*ZP - IF (CDABS(ZF1-ZW).LT.CDABS(ZF1)*EPS) GO TO 85 -80 ZW=ZF1 -85 ZHF=ZF0*ZC0+ZF1*ZC1 - ENDIF - ELSE - CALL GAMMA2(A,GA) - CALL GAMMA2(B,GB) - CALL GAMMA2(C,GC) - CALL GAMMA2(C-A,GCA) - CALL GAMMA2(C-B,GCB) - CALL GAMMA2(C-A-B,GCAB) - CALL GAMMA2(A+B-C,GABC) - ZC0=GC*GCAB/(GCA*GCB) - ZC1=GC*GABC/(GA*GB)*(1.0D0-Z)**(C-A-B) - ZHF=(0.0D0,0.0D0) - ZR0=ZC0 - ZR1=ZC1 - DO 90 K=1,500 - ZR0=ZR0*(A+K-1.D0)*(B+K-1.D0)/(K*(A+B-C+K))*(1.D0-Z) - ZR1=ZR1*(C-A+K-1.0D0)*(C-B+K-1.0D0)/(K*(C-A-B+K)) - & *(1.0D0-Z) - ZHF=ZHF+ZR0+ZR1 - IF (CDABS(ZHF-ZW).LT.CDABS(ZHF)*EPS) GO TO 95 -90 ZW=ZHF -95 ZHF=ZHF+ZC0+ZC1 - ENDIF - ELSE - Z00=(1.0D0,0.0D0) - IF (C-A.LT.A.AND.C-B.LT.B) THEN - Z00=(1.0D0-Z)**(C-A-B) - A=C-A - B=C-B - ENDIF - ZHF=(1.0D0,0.D0) - ZR=(1.0D0,0.0D0) - DO 100 K=1,1500 - ZR=ZR*(A+K-1.0D0)*(B+K-1.0D0)/(K*(C+K-1.0D0))*Z - ZHF=ZHF+ZR - IF (CDABS(ZHF-ZW).LE.CDABS(ZHF)*EPS) GO TO 105 -100 ZW=ZHF -105 ZHF=Z00*ZHF - ENDIF - ELSE IF (A0.GT.1.0D0) THEN - MAB=INT(A-B+EPS*DSIGN(1.0D0,A-B)) - IF (DABS(A-B-MAB).LT.EPS.AND.A0.LE.1.1D0) B=B+EPS - IF (DABS(A-B-MAB).GT.EPS) THEN - CALL GAMMA2(A,GA) - CALL GAMMA2(B,GB) - CALL GAMMA2(C,GC) - CALL GAMMA2(A-B,GAB) - CALL GAMMA2(B-A,GBA) - CALL GAMMA2(C-A,GCA) - CALL GAMMA2(C-B,GCB) - ZC0=GC*GBA/(GCA*GB*(-Z)**A) - ZC1=GC*GAB/(GCB*GA*(-Z)**B) - ZR0=ZC0 - ZR1=ZC1 - ZHF=(0.0D0,0.0D0) - DO 110 K=1,500 - ZR0=ZR0*(A+K-1.0D0)*(A-C+K)/((A-B+K)*K*Z) - ZR1=ZR1*(B+K-1.0D0)*(B-C+K)/((B-A+K)*K*Z) - ZHF=ZHF+ZR0+ZR1 - IF (CDABS((ZHF-ZW)/ZHF).LE.EPS) GO TO 115 -110 ZW=ZHF -115 ZHF=ZHF+ZC0+ZC1 - ELSE - IF (A-B.LT.0.0D0) THEN - A=BB - B=AA - ENDIF - CA=C-A - CB=C-B - NCA=INT(CA+EPS*DSIGN(1.0D0,CA)) - NCB=INT(CB+EPS*DSIGN(1.0D0,CB)) - IF (DABS(CA-NCA).LT.EPS.OR.DABS(CB-NCB).LT.EPS) C=C+EPS - CALL GAMMA2(A,GA) - CALL GAMMA2(C,GC) - CALL GAMMA2(C-B,GCB) - CALL PSI_SPEC(A,PA) - CALL PSI_SPEC(C-A,PCA) - CALL PSI_SPEC(A-C,PAC) - MAB=INT(A-B+EPS) - ZC0=GC/(GA*(-Z)**B) - CALL GAMMA2(A-B,GM) - ZF0=GM/GCB*ZC0 - ZR=ZC0 - DO 120 K=1,MAB-1 - ZR=ZR*(B+K-1.0D0)/(K*Z) - T0=A-B-K - CALL GAMMA2(T0,G0) - CALL GAMMA2(C-B-K,GCBK) -120 ZF0=ZF0+ZR*G0/GCBK - IF (MAB.EQ.0) ZF0=(0.0D0,0.0D0) - ZC1=GC/(GA*GCB*(-Z)**A) - SP=-2.0D0*EL-PA-PCA - DO 125 J=1,MAB -125 SP=SP+1.0D0/J - ZP0=SP+CDLOG(-Z) - SQ=1.0D0 - DO 130 J=1,MAB -130 SQ=SQ*(B+J-1.0D0)*(B-C+J)/J - ZF1=(SQ*ZP0)*ZC1 - ZR=ZC1 - RK1=1.0D0 - SJ1=0.0D0 - W0=0.0D0 - DO 145 K=1,10000 - ZR=ZR/Z - RK1=RK1*(B+K-1.0D0)*(B-C+K)/(K*K) - RK2=RK1 - DO 135 J=K+1,K+MAB -135 RK2=RK2*(B+J-1.0D0)*(B-C+J)/J - SJ1=SJ1+(A-1.0D0)/(K*(A+K-1.0D0))+(A-C-1.0D0)/ - & (K*(A-C+K-1.0D0)) - SJ2=SJ1 - DO 140 J=K+1,K+MAB -140 SJ2=SJ2+1.0D0/J - ZP=-2.0D0*EL-PA-PAC+SJ2-1.0D0/(K+A-C) - & -PI/DTAN(PI*(K+A-C))+CDLOG(-Z) - ZF1=ZF1+RK2*ZR*ZP - WS=CDABS(ZF1) - IF (DABS((WS-W0)/WS).LT.EPS) GO TO 150 -145 W0=WS -150 ZHF=ZF0+ZF1 - ENDIF - ENDIF - A=AA - B=BB - IF (K.GT.150) WRITE(*,160) -160 FORMAT(1X,'Warning! You should check the accuracy') - RETURN - END - - - -C ********************************** - - SUBROUTINE ITAIRY(X,APT,BPT,ANT,BNT) -C -C ====================================================== -C Purpose: Compute the integrals of Airy fnctions with -C respect to t from 0 and x ( x ≥ 0 ) -C Input : x --- Upper limit of the integral -C Output : APT --- Integration of Ai(t) from 0 and x -C BPT --- Integration of Bi(t) from 0 and x -C ANT --- Integration of Ai(-t) from 0 and x -C BNT --- Integration of Bi(-t) from 0 and x -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(16) - EPS=1.0D-15 - PI=3.141592653589793D0 - C1=.355028053887817D0 - C2=.258819403792807D0 - SR3=1.732050807568877D0 - IF (X.EQ.0.0D0) THEN - APT=0.0D0 - BPT=0.0D0 - ANT=0.0D0 - BNT=0.0D0 - ELSE - IF (DABS(X).LE.9.25D0) THEN - DO 30 L=0,1 - X=(-1)**L*X - FX=X - R=X - DO 10 K=1,40 - R=R*(3.0*K-2.0D0)/(3.0*K+1.0D0)*X/(3.0*K) - & *X/(3.0*K-1.0D0)*X - FX=FX+R - IF (DABS(R).LT.DABS(FX)*EPS) GO TO 15 -10 CONTINUE -15 GX=.5D0*X*X - R=GX - DO 20 K=1,40 - R=R*(3.0*K-1.0D0)/(3.0*K+2.0D0)*X/(3.0*K) - & *X/(3.0*K+1.0D0)*X - GX=GX+R - IF (DABS(R).LT.DABS(GX)*EPS) GO TO 25 -20 CONTINUE -25 ANT=C1*FX-C2*GX - BNT=SR3*(C1*FX+C2*GX) - IF (L.EQ.0) THEN - APT=ANT - BPT=BNT - ELSE - ANT=-ANT - BNT=-BNT - X=-X - ENDIF -30 CONTINUE - ELSE - DATA A/.569444444444444D0,.891300154320988D0, - & .226624344493027D+01,.798950124766861D+01, - & .360688546785343D+02,.198670292131169D+03, - & .129223456582211D+04,.969483869669600D+04, - & .824184704952483D+05,.783031092490225D+06, - & .822210493622814D+07,.945557399360556D+08, - & .118195595640730D+10,.159564653040121D+11, - & .231369166433050D+12,.358622522796969D+13/ - Q2=1.414213562373095D0 - Q0=.3333333333333333D0 - Q1=.6666666666666667D0 - XE=X*DSQRT(X)/1.5D0 - XP6=1.0D0/DSQRT(6.0D0*PI*XE) - SU1=1.0D0 - R=1.0D0 - XR1=1.0D0/XE - DO 35 K=1,16 - R=-R*XR1 -35 SU1=SU1+A(K)*R - SU2=1.0D0 - R=1.0D0 - DO 40 K=1,16 - R=R*XR1 -40 SU2=SU2+A(K)*R - APT=Q0-DEXP(-XE)*XP6*SU1 - BPT=2.0D0*DEXP(XE)*XP6*SU2 - SU3=1.0D0 - R=1.0D0 - XR2=1.0D0/(XE*XE) - DO 45 K=1,8 - R=-R*XR2 -45 SU3=SU3+A(2*K)*R - SU4=A(1)*XR1 - R=XR1 - DO 50 K=1,7 - R=-R*XR2 -50 SU4=SU4+A(2*K+1)*R - SU5=SU3+SU4 - SU6=SU3-SU4 - ANT=Q1-Q2*XP6*(SU5*DCOS(XE)-SU6*DSIN(XE)) - BNT=Q2*XP6*(SU5*DSIN(XE)+SU6*DCOS(XE)) - ENDIF - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE IKNA(N,X,NM,BI,DI,BK,DK) -C -C ======================================================== -C Purpose: Compute modified Bessel functions In(x) and -C Kn(x), and their derivatives -C Input: x --- Argument of In(x) and Kn(x) ( x ≥ 0 ) -C n --- Order of In(x) and Kn(x) -C Output: BI(n) --- In(x) -C DI(n) --- In'(x) -C BK(n) --- Kn(x) -C DK(n) --- Kn'(x) -C NM --- Highest order computed -C Routines called: -C (1) IK01A for computing I0(x),I1(x),K0(x) & K1(x) -C (2) MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BI(0:N),DI(0:N),BK(0:N),DK(0:N) - NM=N - IF (X.LE.1.0D-100) THEN - DO 10 K=0,N - BI(K)=0.0D0 - DI(K)=0.0D0 - BK(K)=1.0D+300 -10 DK(K)=-1.0D+300 - BI(0)=1.0D0 - DI(1)=0.5D0 - RETURN - ENDIF - CALL IK01A(X,BI0,DI0,BI1,DI1,BK0,DK0,BK1,DK1) - BI(0)=BI0 - BI(1)=BI1 - BK(0)=BK0 - BK(1)=BK1 - DI(0)=DI0 - DI(1)=DI1 - DK(0)=DK0 - DK(1)=DK1 - IF (N.LE.1) RETURN - IF (X.GT.40.0.AND.N.LT.INT(0.25*X)) THEN - H0=BI0 - H1=BI1 - DO 15 K=2,N - H=-2.0D0*(K-1.0D0)/X*H1+H0 - BI(K)=H - H0=H1 -15 H1=H - ELSE - M=MSTA1(X,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(X,N,15) - ENDIF - F0=0.0D0 - F1=1.0D-100 - F=0.0D0 - DO 20 K=M,0,-1 - F=2.0D0*(K+1.0D0)*F1/X+F0 - IF (K.LE.NM) BI(K)=F - F0=F1 -20 F1=F - S0=BI0/F - DO 25 K=0,NM -25 BI(K)=S0*BI(K) - ENDIF - G0=BK0 - G1=BK1 - DO 30 K=2,NM - G=2.0D0*(K-1.0D0)/X*G1+G0 - BK(K)=G - G0=G1 -30 G1=G - DO 40 K=2,NM - DI(K)=BI(K-1)-K/X*BI(K) -40 DK(K)=-BK(K-1)-K/X*BK(K) - RETURN - END - - - -C ********************************** - - SUBROUTINE CJYNA(N,Z,NM,CBJ,CDJ,CBY,CDY) -C -C ======================================================= -C Purpose: Compute Bessel functions Jn(z), Yn(z) and -C their derivatives for a complex argument -C Input : z --- Complex argument of Jn(z) and Yn(z) -C n --- Order of Jn(z) and Yn(z) -C Output: CBJ(n) --- Jn(z) -C CDJ(n) --- Jn'(z) -C CBY(n) --- Yn(z) -C CDY(n) --- Yn'(z) -C NM --- Highest order computed -C Rouitines called: -C (1) CJY01 to calculate J0(z), J1(z), Y0(z), Y1(z) -C (2) MSTA1 and MSTA2 to calculate the starting -C point for backward recurrence -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A,B,E,P,R,W,Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBJ(0:N),CDJ(0:N),CBY(0:N),CDY(0:N) - PI=3.141592653589793D0 - A0=CDABS(Z) - NM=N - IF (A0.LT.1.0D-100) THEN - DO 5 K=0,N - CBJ(K)=(0.0D0,0.0D0) - CDJ(K)=(0.0D0,0.0D0) - CBY(K)=-(1.0D+300,0.0D0) -5 CDY(K)=(1.0D+300,0.0D0) - CBJ(0)=(1.0D0,0.0D0) - CDJ(1)=(0.5D0,0.0D0) - RETURN - ENDIF - CALL CJY01(Z,CBJ0,CDJ0,CBJ1,CDJ1,CBY0,CDY0,CBY1,CDY1) - CBJ(0)=CBJ0 - CBJ(1)=CBJ1 - CBY(0)=CBY0 - CBY(1)=CBY1 - CDJ(0)=CDJ0 - CDJ(1)=CDJ1 - CDY(0)=CDY0 - CDY(1)=CDY1 - IF (N.LE.1) RETURN - IF (N.LT.INT(0.25*A0)) THEN - CJ0=CBJ0 - CJ1=CBJ1 - DO 70 K=2,N - CJK=2.0D0*(K-1.0D0)/Z*CJ1-CJ0 - CBJ(K)=CJK - CJ0=CJ1 -70 CJ1=CJK - ELSE - M=MSTA1(A0,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(A0,N,15) - ENDIF - CF2=(0.0D0,0.0D0) - CF1=(1.0D-100,0.0D0) - DO 75 K=M,0,-1 - CF=2.0D0*(K+1.0D0)/Z*CF1-CF2 - IF (K.LE.NM) CBJ(K)=CF - CF2=CF1 -75 CF1=CF - IF (CDABS(CBJ0).GT.CDABS(CBJ1)) THEN - CS=CBJ0/CF - ELSE - CS=CBJ1/CF2 - ENDIF - DO 80 K=0,NM -80 CBJ(K)=CS*CBJ(K) - ENDIF - DO 85 K=2,NM -85 CDJ(K)=CBJ(K-1)-K/Z*CBJ(K) - YA0=CDABS(CBY0) - LB=0 - LB0=0 - CG0=CBY0 - CG1=CBY1 - DO 90 K=2,NM - CYK=2.0D0*(K-1.0D0)/Z*CG1-CG0 - IF (CDABS(CYK).GT.1.0D+290) GO TO 90 - YAK=CDABS(CYK) - YA1=CDABS(CG0) - IF (YAK.LT.YA0.AND.YAK.LT.YA1) LB=K - CBY(K)=CYK - CG0=CG1 - CG1=CYK -90 CONTINUE - IF (LB.LE.4.OR.DIMAG(Z).EQ.0.0D0) GO TO 125 -95 IF (LB.EQ.LB0) GO TO 125 - CH2=(1.0D0,0.0D0) - CH1=(0.0D0,0.0D0) - LB0=LB - DO 100 K=LB,1,-1 - CH0=2.0D0*K/Z*CH1-CH2 - CH2=CH1 -100 CH1=CH0 - CP12=CH0 - CP22=CH2 - CH2=(0.0D0,0.0D0) - CH1=(1.0D0,0.0D0) - DO 105 K=LB,1,-1 - CH0=2.0D0*K/Z*CH1-CH2 - CH2=CH1 -105 CH1=CH0 - CP11=CH0 - CP21=CH2 - IF (LB.EQ.NM) CBJ(LB+1)=2.0D0*LB/Z*CBJ(LB)-CBJ(LB-1) - IF (CDABS(CBJ(0)).GT.CDABS(CBJ(1))) THEN - CBY(LB+1)=(CBJ(LB+1)*CBY0-2.0D0*CP11/(PI*Z))/CBJ(0) - CBY(LB)=(CBJ(LB)*CBY0+2.0D0*CP12/(PI*Z))/CBJ(0) - ELSE - CBY(LB+1)=(CBJ(LB+1)*CBY1-2.0D0*CP21/(PI*Z))/CBJ(1) - CBY(LB)=(CBJ(LB)*CBY1+2.0D0*CP22/(PI*Z))/CBJ(1) - ENDIF - CYL2=CBY(LB+1) - CYL1=CBY(LB) - DO 110 K=LB-1,0,-1 - CYLK=2.0D0*(K+1.0D0)/Z*CYL1-CYL2 - CBY(K)=CYLK - CYL2=CYL1 -110 CYL1=CYLK - CYL1=CBY(LB) - CYL2=CBY(LB+1) - DO 115 K=LB+1,NM-1 - CYLK=2.0D0*K/Z*CYL2-CYL1 - CBY(K+1)=CYLK - CYL1=CYL2 -115 CYL2=CYLK - DO 120 K=2,NM - WA=CDABS(CBY(K)) - IF (WA.LT.CDABS(CBY(K-1))) LB=K -120 CONTINUE - GO TO 95 -125 CONTINUE - DO 130 K=2,NM -130 CDY(K)=CBY(K-1)-K/Z*CBY(K) - RETURN - END - - - -C ********************************** - - SUBROUTINE CJYNB(N,Z,NM,CBJ,CDJ,CBY,CDY) -C -C ======================================================= -C Purpose: Compute Bessel functions Jn(z), Yn(z) and -C their derivatives for a complex argument -C Input : z --- Complex argument of Jn(z) and Yn(z) -C n --- Order of Jn(z) and Yn(z) -C Output: CBJ(n) --- Jn(z) -C CDJ(n) --- Jn'(z) -C CBY(n) --- Yn(z) -C CDY(n) --- Yn'(z) -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 to calculate the starting -C point for backward recurrence -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A,B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBJ(0:N),CDJ(0:N),CBY(0:N),CDY(0:N), - & A(4),B(4),A1(4),B1(4) - EL=0.5772156649015329D0 - PI=3.141592653589793D0 - R2P=.63661977236758D0 - Y0=DABS(DIMAG(Z)) - A0=CDABS(Z) - NM=N - IF (A0.LT.1.0D-100) THEN - DO 10 K=0,N - CBJ(K)=(0.0D0,0.0D0) - CDJ(K)=(0.0D0,0.0D0) - CBY(K)=-(1.0D+300,0.0D0) -10 CDY(K)=(1.0D+300,0.0D0) - CBJ(0)=(1.0D0,0.0D0) - CDJ(1)=(0.5D0,0.0D0) - RETURN - ENDIF - IF (A0.LE.300.D0.OR.N.GT.80) THEN - IF (N.EQ.0) NM=1 - M=MSTA1(A0,200) - IF (M.LT.NM) THEN - NM=M - ELSE - M=MSTA2(A0,NM,15) - ENDIF - CBS=(0.0D0,0.0D0) - CSU=(0.0D0,0.0D0) - CSV=(0.0D0,0.0D0) - CF2=(0.0D0,0.0D0) - CF1=(1.0D-100,0.0D0) - DO 15 K=M,0,-1 - CF=2.0D0*(K+1.0D0)/Z*CF1-CF2 - IF (K.LE.NM) CBJ(K)=CF - IF (K.EQ.2*INT(K/2).AND.K.NE.0) THEN - IF (Y0.LE.1.0D0) THEN - CBS=CBS+2.0D0*CF - ELSE - CBS=CBS+(-1)**(K/2)*2.0D0*CF - ENDIF - CSU=CSU+(-1)**(K/2)*CF/K - ELSE IF (K.GT.1) THEN - CSV=CSV+(-1)**(K/2)*K/(K*K-1.0D0)*CF - ENDIF - CF2=CF1 -15 CF1=CF - IF (Y0.LE.1.0D0) THEN - CS0=CBS+CF - ELSE - CS0=(CBS+CF)/CDCOS(Z) - ENDIF - DO 20 K=0,NM -20 CBJ(K)=CBJ(K)/CS0 - CE=CDLOG(Z/2.0D0)+EL - CBY(0)=R2P*(CE*CBJ(0)-4.0D0*CSU/CS0) - CBY(1)=R2P*(-CBJ(0)/Z+(CE-1.0D0)*CBJ(1)-4.0D0*CSV/CS0) - ELSE - DATA A/-.7031250000000000D-01,.1121520996093750D+00, - & -.5725014209747314D+00,.6074042001273483D+01/ - DATA B/ .7324218750000000D-01,-.2271080017089844D+00, - & .1727727502584457D+01,-.2438052969955606D+02/ - DATA A1/.1171875000000000D+00,-.1441955566406250D+00, - & .6765925884246826D+00,-.6883914268109947D+01/ - DATA B1/-.1025390625000000D+00,.2775764465332031D+00, - & -.1993531733751297D+01,.2724882731126854D+02/ - CT1=Z-0.25D0*PI - CP0=(1.0D0,0.0D0) - DO 25 K=1,4 -25 CP0=CP0+A(K)*Z**(-2*K) - CQ0=-0.125D0/Z - DO 30 K=1,4 -30 CQ0=CQ0+B(K)*Z**(-2*K-1) - CU=CDSQRT(R2P/Z) - CBJ0=CU*(CP0*CDCOS(CT1)-CQ0*CDSIN(CT1)) - CBY0=CU*(CP0*CDSIN(CT1)+CQ0*CDCOS(CT1)) - CBJ(0)=CBJ0 - CBY(0)=CBY0 - CT2=Z-0.75D0*PI - CP1=(1.0D0,0.0D0) - DO 35 K=1,4 -35 CP1=CP1+A1(K)*Z**(-2*K) - CQ1=0.375D0/Z - DO 40 K=1,4 -40 CQ1=CQ1+B1(K)*Z**(-2*K-1) - CBJ1=CU*(CP1*CDCOS(CT2)-CQ1*CDSIN(CT2)) - CBY1=CU*(CP1*CDSIN(CT2)+CQ1*CDCOS(CT2)) - CBJ(1)=CBJ1 - CBY(1)=CBY1 - DO 45 K=2,NM - CBJK=2.0D0*(K-1.0D0)/Z*CBJ1-CBJ0 - CBJ(K)=CBJK - CBJ0=CBJ1 -45 CBJ1=CBJK - ENDIF - CDJ(0)=-CBJ(1) - DO 50 K=1,NM -50 CDJ(K)=CBJ(K-1)-K/Z*CBJ(K) - IF (CDABS(CBJ(0)).GT.1.0D0) THEN - CBY(1)=(CBJ(1)*CBY(0)-2.0D0/(PI*Z))/CBJ(0) - ENDIF - DO 55 K=2,NM - IF (CDABS(CBJ(K-1)).GE.CDABS(CBJ(K-2))) THEN - CYY=(CBJ(K)*CBY(K-1)-2.0D0/(PI*Z))/CBJ(K-1) - ELSE - CYY=(CBJ(K)*CBY(K-2)-4.0D0*(K-1.0D0)/(PI*Z*Z))/CBJ(K-2) - ENDIF - CBY(K)=CYY -55 CONTINUE - CDY(0)=-CBY(1) - DO 60 K=1,NM -60 CDY(K)=CBY(K-1)-K/Z*CBY(K) - RETURN - END - - - -C ********************************** - - SUBROUTINE IKNB(N,X,NM,BI,DI,BK,DK) -C -C ============================================================ -C Purpose: Compute modified Bessel functions In(x) and Kn(x), -C and their derivatives -C Input: x --- Argument of In(x) and Kn(x) ( 0 ≤ x ≤ 700 ) -C n --- Order of In(x) and Kn(x) -C Output: BI(n) --- In(x) -C DI(n) --- In'(x) -C BK(n) --- Kn(x) -C DK(n) --- Kn'(x) -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 for computing the starting point -C for backward recurrence -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BI(0:N),DI(0:N),BK(0:N),DK(0:N) - PI=3.141592653589793D0 - EL=0.5772156649015329D0 - NM=N - IF (X.LE.1.0D-100) THEN - DO 10 K=0,N - BI(K)=0.0D0 - DI(K)=0.0D0 - BK(K)=1.0D+300 -10 DK(K)=-1.0D+300 - BI(0)=1.0D0 - DI(1)=0.5D0 - RETURN - ENDIF - IF (N.EQ.0) NM=1 - M=MSTA1(X,200) - IF (M.LT.NM) THEN - NM=M - ELSE - M=MSTA2(X,NM,15) - ENDIF - BS=0.0D0 - SK0=0.0D0 - F=0.0D0 - F0=0.0D0 - F1=1.0D-100 - DO 15 K=M,0,-1 - F=2.0D0*(K+1.0D0)/X*F1+F0 - IF (K.LE.NM) BI(K)=F - IF (K.NE.0.AND.K.EQ.2*INT(K/2)) SK0=SK0+4.0D0*F/K - BS=BS+2.0D0*F - F0=F1 -15 F1=F - S0=DEXP(X)/(BS-F) - DO 20 K=0,NM -20 BI(K)=S0*BI(K) - IF (X.LE.8.0D0) THEN - BK(0)=-(DLOG(0.5D0*X)+EL)*BI(0)+S0*SK0 - BK(1)=(1.0D0/X-BI(1)*BK(0))/BI(0) - ELSE - A0=DSQRT(PI/(2.0D0*X))*DEXP(-X) - K0=16 - IF (X.GE.25.0) K0=10 - IF (X.GE.80.0) K0=8 - IF (X.GE.200.0) K0=6 - DO 30 L=0,1 - BKL=1.0D0 - VT=4.0D0*L - R=1.0D0 - DO 25 K=1,K0 - R=0.125D0*R*(VT-(2.0*K-1.0)**2)/(K*X) -25 BKL=BKL+R - BK(L)=A0*BKL -30 CONTINUE - ENDIF - G0=BK(0) - G1=BK(1) - DO 35 K=2,NM - G=2.0D0*(K-1.0D0)/X*G1+G0 - BK(K)=G - G0=G1 -35 G1=G - DI(0)=BI(1) - DK(0)=-BK(1) - DO 40 K=1,NM - DI(K)=BI(K-1)-K/X*BI(K) -40 DK(K)=-BK(K-1)-K/X*BK(K) - RETURN - END - - - -C ********************************** - - SUBROUTINE LPMN(MM,M,N,X,PM,PD) -C -C ===================================================== -C Purpose: Compute the associated Legendre functions -C Pmn(x) and their derivatives Pmn'(x) -C Input : x --- Argument of Pmn(x) -C m --- Order of Pmn(x), m = 0,1,2,...,n -C n --- Degree of Pmn(x), n = 0,1,2,...,N -C mm --- Physical dimension of PM and PD -C Output: PM(m,n) --- Pmn(x) -C PD(m,n) --- Pmn'(x) -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (P,X) - DIMENSION PM(0:MM,0:N),PD(0:MM,0:N) - INTRINSIC MIN - DO 10 I=0,N - DO 10 J=0,M - PM(J,I)=0.0D0 -10 PD(J,I)=0.0D0 - PM(0,0)=1.0D0 - IF (N.EQ.0) RETURN - IF (DABS(X).EQ.1.0D0) THEN - DO 15 I=1,N - PM(0,I)=X**I -15 PD(0,I)=0.5D0*I*(I+1.0D0)*X**(I+1) - DO 20 J=1,N - DO 20 I=1,M - IF (I.EQ.1) THEN - PD(I,J)=1.0D+300 - ELSE IF (I.EQ.2) THEN - PD(I,J)=-0.25D0*(J+2)*(J+1)*J*(J-1)*X**(J+1) - ENDIF -20 CONTINUE - RETURN - ENDIF - LS=1 - IF (DABS(X).GT.1.0D0) LS=-1 - XQ=DSQRT(LS*(1.0D0-X*X)) - XS=LS*(1.0D0-X*X) - DO 30 I=1,M -30 PM(I,I)=-LS*(2.0D0*I-1.0D0)*XQ*PM(I-1,I-1) - DO 35 I=0,MIN(M,N-1) -35 PM(I,I+1)=(2.0D0*I+1.0D0)*X*PM(I,I) - DO 40 I=0,M - DO 40 J=I+2,N - PM(I,J)=((2.0D0*J-1.0D0)*X*PM(I,J-1)- - & (I+J-1.0D0)*PM(I,J-2))/(J-I) -40 CONTINUE - PD(0,0)=0.0D0 - DO 45 J=1,N -45 PD(0,J)=LS*J*(PM(0,J-1)-X*PM(0,J))/XS - DO 50 I=1,M - DO 50 J=I,N - PD(I,J)=LS*I*X*PM(I,J)/XS+(J+I) - & *(J-I+1.0D0)/XQ*PM(I-1,J) -50 CONTINUE - RETURN - END - -C ********************************** - - SUBROUTINE MTU0(KF,M,Q,X,CSF,CSD) -C -C =============================================================== -C Purpose: Compute Mathieu functions cem(x,q) and sem(x,q) -C and their derivatives ( q ≥ 0 ) -C Input : KF --- Function code -C KF=1 for computing cem(x,q) and cem'(x,q) -C KF=2 for computing sem(x,q) and sem'(x,q) -C m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions -C x --- Argument of Mathieu functions (in degrees) -C Output: CSF --- cem(x,q) or sem(x,q) -C CSD --- cem'x,q) or sem'x,q) -C Routines called: -C (1) CVA2 for computing the characteristic values -C (2) FCOEF for computing the expansion coefficients -C =============================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION FG(251) - EPS=1.0D-14 - IF (KF.EQ.1.AND.M.EQ.2*INT(M/2)) KD=1 - IF (KF.EQ.1.AND.M.NE.2*INT(M/2)) KD=2 - IF (KF.EQ.2.AND.M.NE.2*INT(M/2)) KD=3 - IF (KF.EQ.2.AND.M.EQ.2*INT(M/2)) KD=4 - CALL CVA2(KD,M,Q,A) - IF (Q.LE.1.0D0) THEN - QM=7.5+56.1*SQRT(Q)-134.7*Q+90.7*SQRT(Q)*Q - ELSE - QM=17.0+3.1*SQRT(Q)-.126*Q+.0037*SQRT(Q)*Q - ENDIF - KM=INT(QM+0.5*M) - CALL FCOEF(KD,M,Q,A,FG) - IC=INT(M/2)+1 - RD=1.74532925199433D-2 - XR=X*RD - CSF=0.0D0 - DO 10 K=1,KM - IF (KD.EQ.1) THEN - CSF=CSF+FG(K)*DCOS((2*K-2)*XR) - ELSE IF (KD.EQ.2) THEN - CSF=CSF+FG(K)*DCOS((2*K-1)*XR) - ELSE IF (KD.EQ.3) THEN - CSF=CSF+FG(K)*DSIN((2*K-1)*XR) - ELSE IF (KD.EQ.4) THEN - CSF=CSF+FG(K)*DSIN(2*K*XR) - ENDIF - IF (K.GE.IC.AND.DABS(FG(K)).LT.DABS(CSF)*EPS) GO TO 15 -10 CONTINUE -15 CSD=0.0D0 - DO 20 K=1,KM - IF (KD.EQ.1) THEN - CSD=CSD-(2*K-2)*FG(K)*DSIN((2*K-2)*XR) - ELSE IF (KD.EQ.2) THEN - CSD=CSD-(2*K-1)*FG(K)*DSIN((2*K-1)*XR) - ELSE IF (KD.EQ.3) THEN - CSD=CSD+(2*K-1)*FG(K)*DCOS((2*K-1)*XR) - ELSE IF (KD.EQ.4) THEN - CSD=CSD+2.0D0*K*FG(K)*DCOS(2*K*XR) - ENDIF - IF (K.GE.IC.AND.DABS(FG(K)).LT.DABS(CSD)*EPS) GO TO 25 -20 CONTINUE -25 RETURN - END - - - -C ********************************** - - SUBROUTINE CY01(KF,Z,ZF,ZD) -C -C =========================================================== -C Purpose: Compute complex Bessel functions Y0(z), Y1(z) -C and their derivatives -C Input : z --- Complex argument of Yn(z) ( n=0,1 ) -C KF --- Function choice code -C KF=0 for ZF=Y0(z) and ZD=Y0'(z) -C KF=1 for ZF=Y1(z) and ZD=Y1'(z) -C KF=2 for ZF=Y1'(z) and ZD=Y1''(z) -C Output: ZF --- Y0(z) or Y1(z) or Y1'(z) -C ZD --- Y0'(z) or Y1'(z) or Y1''(z) -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,E,P,R,W) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION A(12),B(12),A1(12),B1(12) - PI=3.141592653589793D0 - EL=0.5772156649015329D0 - RP2=2.0D0/PI - CI=(0.0D0,1.0D0) - A0=CDABS(Z) - Z2=Z*Z - Z1=Z - IF (A0.EQ.0.0D0) THEN - CBJ0=(1.0D0,0.0D0) - CBJ1=(0.0D0,0.0D0) - CBY0=-(1.0D300,0.0D0) - CBY1=-(1.0D300,0.0D0) - CDY0=(1.0D300,0.0D0) - CDY1=(1.0D300,0.0D0) - GO TO 70 - ENDIF - IF (DBLE(Z).LT.0.0) Z1=-Z - IF (A0.LE.12.0) THEN - CBJ0=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 10 K=1,40 - CR=-0.25D0*CR*Z2/(K*K) - CBJ0=CBJ0+CR - IF (CDABS(CR).LT.CDABS(CBJ0)*1.0D-15) GO TO 15 -10 CONTINUE -15 CBJ1=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 20 K=1,40 - CR=-0.25D0*CR*Z2/(K*(K+1.0D0)) - CBJ1=CBJ1+CR - IF (CDABS(CR).LT.CDABS(CBJ1)*1.0D-15) GO TO 25 -20 CONTINUE -25 CBJ1=0.5D0*Z1*CBJ1 - W0=0.0D0 - CR=(1.0D0,0.0D0) - CS=(0.0D0,0.0D0) - DO 30 K=1,40 - W0=W0+1.0D0/K - CR=-0.25D0*CR/(K*K)*Z2 - CP=CR*W0 - CS=CS+CP - IF (CDABS(CP).LT.CDABS(CS)*1.0D-15) GO TO 35 -30 CONTINUE -35 CBY0=RP2*(CDLOG(Z1/2.0D0)+EL)*CBJ0-RP2*CS - W1=0.0D0 - CR=(1.0D0,0.0D0) - CS=(1.0D0,0.0D0) - DO 40 K=1,40 - W1=W1+1.0D0/K - CR=-0.25D0*CR/(K*(K+1))*Z2 - CP=CR*(2.0D0*W1+1.0D0/(K+1.0D0)) - CS=CS+CP - IF (CDABS(CP).LT.CDABS(CS)*1.0D-15) GO TO 45 -40 CONTINUE -45 CBY1=RP2*((CDLOG(Z1/2.0D0)+EL)*CBJ1-1.0D0/Z1-.25D0*Z1*CS) - ELSE - DATA A/-.703125D-01,.112152099609375D+00, - & -.5725014209747314D+00,.6074042001273483D+01, - & -.1100171402692467D+03,.3038090510922384D+04, - & -.1188384262567832D+06,.6252951493434797D+07, - & -.4259392165047669D+09,.3646840080706556D+11, - & -.3833534661393944D+13,.4854014686852901D+15/ - DATA B/ .732421875D-01,-.2271080017089844D+00, - & .1727727502584457D+01,-.2438052969955606D+02, - & .5513358961220206D+03,-.1825775547429318D+05, - & .8328593040162893D+06,-.5006958953198893D+08, - & .3836255180230433D+10,-.3649010818849833D+12, - & .4218971570284096D+14,-.5827244631566907D+16/ - DATA A1/.1171875D+00,-.144195556640625D+00, - & .6765925884246826D+00,-.6883914268109947D+01, - & .1215978918765359D+03,-.3302272294480852D+04, - & .1276412726461746D+06,-.6656367718817688D+07, - & .4502786003050393D+09,-.3833857520742790D+11, - & .4011838599133198D+13,-.5060568503314727D+15/ - DATA B1/-.1025390625D+00,.2775764465332031D+00, - & -.1993531733751297D+01,.2724882731126854D+02, - & -.6038440767050702D+03,.1971837591223663D+05, - & -.8902978767070678D+06,.5310411010968522D+08, - & -.4043620325107754D+10,.3827011346598605D+12, - & -.4406481417852278D+14,.6065091351222699D+16/ - K0=12 - IF (A0.GE.35.0) K0=10 - IF (A0.GE.50.0) K0=8 - CT1=Z1-.25D0*PI - CP0=(1.0D0,0.0D0) - DO 50 K=1,K0 -50 CP0=CP0+A(K)*Z1**(-2*K) - CQ0=-0.125D0/Z1 - DO 55 K=1,K0 -55 CQ0=CQ0+B(K)*Z1**(-2*K-1) - CU=CDSQRT(RP2/Z1) - CBJ0=CU*(CP0*CDCOS(CT1)-CQ0*CDSIN(CT1)) - CBY0=CU*(CP0*CDSIN(CT1)+CQ0*CDCOS(CT1)) - CT2=Z1-.75D0*PI - CP1=(1.0D0,0.0D0) - DO 60 K=1,K0 -60 CP1=CP1+A1(K)*Z1**(-2*K) - CQ1=0.375D0/Z1 - DO 65 K=1,K0 -65 CQ1=CQ1+B1(K)*Z1**(-2*K-1) - CBJ1=CU*(CP1*CDCOS(CT2)-CQ1*CDSIN(CT2)) - CBY1=CU*(CP1*CDSIN(CT2)+CQ1*CDCOS(CT2)) - ENDIF - IF (DBLE(Z).LT.0.0) THEN - IF (DIMAG(Z).LT.0.0) CBY0=CBY0-2.0D0*CI*CBJ0 - IF (DIMAG(Z).GT.0.0) CBY0=CBY0+2.0D0*CI*CBJ0 - IF (DIMAG(Z).LT.0.0) CBY1=-(CBY1-2.0D0*CI*CBJ1) - IF (DIMAG(Z).GT.0.0) CBY1=-(CBY1+2.0D0*CI*CBJ1) - CBJ1=-CBJ1 - ENDIF - CDY0=-CBY1 - CDY1=CBY0-1.0D0/Z*CBY1 -70 IF (KF.EQ.0) THEN - ZF=CBY0 - ZD=CDY0 - ELSE IF (KF.EQ.1) THEN - ZF=CBY1 - ZD=CDY1 - ELSE IF (KF.EQ.2) THEN - ZF=CDY1 - ZD=-CDY1/Z-(1.0D0-1.0D0/(Z*Z))*CBY1 - ENDIF - RETURN - END - - -C ********************************** - - SUBROUTINE FFK(KS,X,FR,FI,FM,FA,GR,GI,GM,GA) -C -C ======================================================= -C Purpose: Compute modified Fresnel integrals F±(x) -C and K±(x) -C Input : x --- Argument of F±(x) and K±(x) -C KS --- Sign code -C KS=0 for calculating F+(x) and K+(x) -C KS=1 for calculating F_(x) and K_(x) -C Output: FR --- Re[F±(x)] -C FI --- Im[F±(x)] -C FM --- |F±(x)| -C FA --- Arg[F±(x)] (Degs.) -C GR --- Re[K±(x)] -C GI --- Im[K±(x)] -C GM --- |K±(x)| -C GA --- Arg[K±(x)] (Degs.) -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - SRD= 57.29577951308233D0 - EPS=1.0D-15 - PI=3.141592653589793D0 - PP2=1.2533141373155D0 - P2P=.7978845608028654D0 - XA=DABS(X) - X2=X*X - X4=X2*X2 - IF (X.EQ.0.0D0) THEN - FR=.5D0*DSQRT(0.5D0*PI) - FI=(-1)**KS*FR - FM=DSQRT(0.25D0*PI) - FA=(-1)**KS*45.0D0 - GR=.5D0 - GI=0.0D0 - GM=.5D0 - GA=0.0D0 - ELSE - IF (XA.LE.2.5D0) THEN - XR=P2P*XA - C1=XR - DO 10 K=1,50 - XR=-.5D0*XR*(4.0D0*K-3.0D0)/K/(2.0D0*K-1.0D0) - & /(4.0D0*K+1.0D0)*X4 - C1=C1+XR - IF (DABS(XR/C1).LT.EPS) GO TO 15 -10 CONTINUE -15 S1=P2P*XA*XA*XA/3.0D0 - XR=S1 - DO 20 K=1,50 - XR=-.5D0*XR*(4.0D0*K-1.0D0)/K/(2.0D0*K+1.0D0) - & /(4.0D0*K+3.0D0)*X4 - S1=S1+XR - IF (DABS(XR/S1).LT.EPS) GO TO 40 -20 CONTINUE - ELSE IF (XA.LT.5.5D0) THEN - M=INT(42+1.75*X2) - XSU=0.0D0 - XC=0.0D0 - XS=0.0D0 - XF1=0.0D0 - XF0=1D-100 - DO 25 K=M,0,-1 - XF=(2.0D0*K+3.0D0)*XF0/X2-XF1 - IF (K.EQ.2*INT(K/2)) THEN - XC=XC+XF - ELSE - XS=XS+XF - ENDIF - XSU=XSU+(2.0D0*K+1.0D0)*XF*XF - XF1=XF0 -25 XF0=XF - XQ=DSQRT(XSU) - XW=P2P*XA/XQ - C1=XC*XW - S1=XS*XW - ELSE - XR=1.0D0 - XF=1.0D0 - DO 30 K=1,12 - XR=-.25D0*XR*(4.0D0*K-1.0D0)*(4.0D0*K-3.0D0)/X4 -30 XF=XF+XR - XR=1.0D0/(2.0D0*XA*XA) - XG=XR - DO 35 K=1,12 - XR=-.25D0*XR*(4.0D0*K+1.0D0)*(4.0D0*K-1.0D0)/X4 -35 XG=XG+XR - C1=.5D0+(XF*DSIN(X2)-XG*DCOS(X2))/DSQRT(2.0D0*PI)/XA - S1=.5D0-(XF*DCOS(X2)+XG*DSIN(X2))/DSQRT(2.0D0*PI)/XA - ENDIF -40 FR=PP2*(.5D0-C1) - FI0=PP2*(.5D0-S1) - FI=(-1)**KS*FI0 - FM=DSQRT(FR*FR+FI*FI) - IF (FR.GE.0.0) THEN - FA=SRD*DATAN(FI/FR) - ELSE IF (FI.GT.0.0) THEN - FA=SRD*(DATAN(FI/FR)+PI) - ELSE IF (FI.LT.0.0) THEN - FA=SRD*(DATAN(FI/FR)-PI) - ENDIF - XP=X*X+PI/4.0D0 - CS=DCOS(XP) - SS=DSIN(XP) - XQ2=1.0D0/DSQRT(PI) - GR=XQ2*(FR*CS+FI0*SS) - GI=(-1)**KS*XQ2*(FI0*CS-FR*SS) - GM=DSQRT(GR*GR+GI*GI) - IF (GR.GE.0.0) THEN - GA=SRD*DATAN(GI/GR) - ELSE IF (GI.GT.0.0) THEN - GA=SRD*(DATAN(GI/GR)+PI) - ELSE IF (GI.LT.0.0) THEN - GA=SRD*(DATAN(GI/GR)-PI) - ENDIF - IF (X.LT.0.0D0) THEN - FR=PP2-FR - FI=(-1)**KS*PP2-FI - FM=DSQRT(FR*FR+FI*FI) - FA=SRD*DATAN(FI/FR) - GR=DCOS(X*X)-GR - GI=-(-1)**KS*DSIN(X*X)-GI - GM=DSQRT(GR*GR+GI*GI) - GA=SRD*DATAN(GI/GR) - ENDIF - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE AIRYA(X,AI,BI,AD,BD) -C -C ====================================================== -C Purpose: Compute Airy functions and their derivatives -C Input: x --- Argument of Airy function -C Output: AI --- Ai(x) -C BI --- Bi(x) -C AD --- Ai'(x) -C BD --- Bi'(x) -C Routine called: -C AJYIK for computing Jv(x), Yv(x), Iv(x) and -C Kv(x) with v=1/3 and 2/3 -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - XA=DABS(X) - PIR=0.318309886183891D0 - C1=0.355028053887817D0 - C2=0.258819403792807D0 - SR3=1.732050807568877D0 - Z=XA**1.5/1.5D0 - XQ=DSQRT(XA) - CALL AJYIK(Z,VJ1,VJ2,VY1,VY2,VI1,VI2,VK1,VK2) - IF (X.EQ.0.0D0) THEN - AI=C1 - BI=SR3*C1 - AD=-C2 - BD=SR3*C2 - ELSE IF (X.GT.0.0D0) THEN - AI=PIR*XQ/SR3*VK1 - BI=XQ*(PIR*VK1+2.0D0/SR3*VI1) - AD=-XA/SR3*PIR*VK2 - BD=XA*(PIR*VK2+2.0D0/SR3*VI2) - ELSE - AI=0.5D0*XQ*(VJ1-VY1/SR3) - BI=-0.5D0*XQ*(VJ1/SR3+VY1) - AD=0.5D0*XA*(VJ2+VY2/SR3) - BD=0.5D0*XA*(VJ2/SR3-VY2) - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE AIRYB(X,AI,BI,AD,BD) -C -C ======================================================= -C Purpose: Compute Airy functions and their derivatives -C Input: x --- Argument of Airy function -C Output: AI --- Ai(x) -C BI --- Bi(x) -C AD --- Ai'(x) -C BD --- Bi'(x) -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CK(41),DK(41) - EPS=1.0D-15 - PI=3.141592653589793D0 - C1=0.355028053887817D0 - C2=0.258819403792807D0 - SR3=1.732050807568877D0 - XA=DABS(X) - XQ=DSQRT(XA) - XM=8.0D0 - IF (X.GT.0.0D0) XM=5.0D0 - IF (X.EQ.0.0D0) THEN - AI=C1 - BI=SR3*C1 - AD=-C2 - BD=SR3*C2 - RETURN - ENDIF - IF (XA.LE.XM) THEN - FX=1.0D0 - R=1.0D0 - DO 10 K=1,40 - R=R*X/(3.0D0*K)*X/(3.0D0*K-1.0D0)*X - FX=FX+R - IF (DABS(R).LT.DABS(FX)*EPS) GO TO 15 -10 CONTINUE -15 GX=X - R=X - DO 20 K=1,40 - R=R*X/(3.0D0*K)*X/(3.0D0*K+1.0D0)*X - GX=GX+R - IF (DABS(R).LT.DABS(GX)*EPS) GO TO 25 -20 CONTINUE -25 AI=C1*FX-C2*GX - BI=SR3*(C1*FX+C2*GX) - DF=0.5D0*X*X - R=DF - DO 30 K=1,40 - R=R*X/(3.0D0*K)*X/(3.0D0*K+2.0D0)*X - DF=DF+R - IF (DABS(R).LT.DABS(DF)*EPS) GO TO 35 -30 CONTINUE -35 DG=1.0D0 - R=1.0D0 - DO 40 K=1,40 - R=R*X/(3.0D0*K)*X/(3.0D0*K-2.0D0)*X - DG=DG+R - IF (DABS(R).LT.DABS(DG)*EPS) GO TO 45 -40 CONTINUE -45 AD=C1*DF-C2*DG - BD=SR3*(C1*DF+C2*DG) - ELSE - XE=XA*XQ/1.5D0 - XR1=1.0D0/XE - XAR=1.0D0/XQ - XF=DSQRT(XAR) - RP=0.5641895835477563D0 - R=1.0D0 - DO 50 K=1,40 - R=R*(6.0D0*K-1.0D0)/216.0D0*(6.0D0*K-3.0D0) - & /K*(6.0D0*K-5.0D0)/(2.0D0*K-1.0D0) - CK(K)=R -50 DK(K)=-(6.0D0*K+1.0D0)/(6.0D0*K-1.0D0)*CK(K) - KM=INT(24.5-XA) - IF (XA.LT.6.0) KM=14 - IF (XA.GT.15.0) KM=10 - IF (X.GT.0.0D0) THEN - SAI=1.0D0 - SAD=1.0D0 - R=1.0D0 - DO 55 K=1,KM - R=-R*XR1 - SAI=SAI+CK(K)*R -55 SAD=SAD+DK(K)*R - SBI=1.0D0 - SBD=1.0D0 - R=1.0D0 - DO 60 K=1,KM - R=R*XR1 - SBI=SBI+CK(K)*R -60 SBD=SBD+DK(K)*R - XP1=DEXP(-XE) - AI=0.5D0*RP*XF*XP1*SAI - BI=RP*XF/XP1*SBI - AD=-.5D0*RP/XF*XP1*SAD - BD=RP/XF/XP1*SBD - ELSE - XCS=DCOS(XE+PI/4.0D0) - XSS=DSIN(XE+PI/4.0D0) - SSA=1.0D0 - SDA=1.0D0 - R=1.0D0 - XR2=1.0D0/(XE*XE) - DO 65 K=1,KM - R=-R*XR2 - SSA=SSA+CK(2*K)*R -65 SDA=SDA+DK(2*K)*R - SSB=CK(1)*XR1 - SDB=DK(1)*XR1 - R=XR1 - DO 70 K=1,KM - R=-R*XR2 - SSB=SSB+CK(2*K+1)*R -70 SDB=SDB+DK(2*K+1)*R - AI=RP*XF*(XSS*SSA-XCS*SSB) - BI=RP*XF*(XCS*SSA+XSS*SSB) - AD=-RP/XF*(XCS*SDA+XSS*SDB) - BD=RP/XF*(XSS*SDA-XCS*SDB) - ENDIF - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE SCKA(M,N,C,CV,KD,CK) -C -C ====================================================== -C Purpose: Compute the expansion coefficients of the -C prolate and oblate spheroidal functions, c2k -C Input : m --- Mode parameter -C n --- Mode parameter -C c --- Spheroidal parameter -C cv --- Characteristic value -C KD --- Function code -C KD=1 for prolate; KD=-1 for oblate -C Output: CK(k) --- Expansion coefficients ck; -C CK(1), CK(2),... correspond to -C c0, c2,... -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CK(200) - IF (C.LE.1.0D-10) C=1.0D-10 - NM=25+INT((N-M)/2+C) - CS=C*C*KD - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - FS=1.0D0 - F1=0.0D0 - F0=1.0D-100 - KB=0 - CK(NM+1)=0.0D0 - FL=0.0D0 - DO 15 K=NM,1,-1 - F=(((2.0D0*K+M+IP)*(2.0D0*K+M+1.0D0+IP)-CV+CS)*F0 - & -4.0D0*(K+1.0D0)*(K+M+1.0D0)*F1)/CS - IF (DABS(F).GT.DABS(CK(K+1))) THEN - CK(K)=F - F1=F0 - F0=F - IF (DABS(F).GT.1.0D+100) THEN - DO 5 K1=NM,K,-1 -5 CK(K1)=CK(K1)*1.0D-100 - F1=F1*1.0D-100 - F0=F0*1.0D-100 - ENDIF - ELSE - KB=K - FL=CK(K+1) - F1=1.0D0 - F2=0.25D0*((M+IP)*(M+IP+1.0)-CV+CS)/(M+1.0)*F1 - CK(1)=F1 - IF (KB.EQ.1) THEN - FS=F2 - ELSE IF (KB.EQ.2) THEN - CK(2)=F2 - FS=0.125D0*(((M+IP+2.0)*(M+IP+3.0)-CV+CS)*F2 - & -CS*F1)/(M+2.0) - ELSE - CK(2)=F2 - DO 10 J=3,KB+1 - F=0.25D0*(((2.0*J+M+IP-4.0)*(2.0*J+M+IP- - & 3.0)-CV+CS)*F2-CS*F1)/((J-1.0)*(J+M-1.0)) - IF (J.LE.KB) CK(J)=F - F1=F2 -10 F2=F - FS=F - ENDIF - GO TO 20 - ENDIF -15 CONTINUE -20 SU1=0.0D0 - DO 25 K=1,KB -25 SU1=SU1+CK(K) - SU2=0.0D0 - DO 30 K=KB+1,NM -30 SU2=SU2+CK(K) - R1=1.0D0 - DO 35 J=1,(N+M+IP)/2 -35 R1=R1*(J+0.5D0*(N+M+IP)) - R2=1.0D0 - DO 40 J=1,(N-M-IP)/2 -40 R2=-R2*J - IF (KB.EQ.0) THEN - S0=R1/(2.0D0**N*R2*SU2) - ELSE - S0=R1/(2.0D0**N*R2*(FL/FS*SU1+SU2)) - ENDIF - DO 45 K=1,KB -45 CK(K)=FL/FS*S0*CK(K) - DO 50 K=KB+1,NM -50 CK(K)=S0*CK(K) - RETURN - END - - - -C ********************************** - - SUBROUTINE SCKB(M,N,C,DF,CK) -C -C ====================================================== -C Purpose: Compute the expansion coefficients of the -C prolate and oblate spheroidal functions -C Input : m --- Mode parameter -C n --- Mode parameter -C c --- Spheroidal parameter -C DF(k) --- Expansion coefficients dk -C Output: CK(k) --- Expansion coefficients ck; -C CK(1), CK(2), ... correspond to -C c0, c2, ... -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION DF(200),CK(200) - IF (C.LE.1.0D-10) C=1.0D-10 - NM=25+INT(0.5*(N-M)+C) - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - REG=1.0D0 - IF (M+NM.GT.80) REG=1.0D-200 - FAC=-0.5D0**M - SW=0.0D0 - DO 35 K=0,NM-1 - FAC=-FAC - I1=2*K+IP+1 - R=REG - DO 10 I=I1,I1+2*M-1 -10 R=R*I - I2=K+M+IP - DO 15 I=I2,I2+K-1 -15 R=R*(I+0.5D0) - SUM=R*DF(K+1) - DO 20 I=K+1,NM - D1=2.0D0*I+IP - D2=2.0D0*M+D1 - D3=I+M+IP-0.5D0 - R=R*D2*(D2-1.0D0)*I*(D3+K)/(D1*(D1-1.0D0)*(I-K)*D3) - SUM=SUM+R*DF(I+1) - IF (DABS(SW-SUM).LT.DABS(SUM)*1.0D-14) GOTO 25 -20 SW=SUM -25 R1=REG - DO 30 I=2,M+K -30 R1=R1*I -35 CK(K+1)=FAC*SUM/R1 - RETURN - END - - - -C ********************************** - - SUBROUTINE CPDLA(N,Z,CDN) -C -C =========================================================== -C Purpose: Compute complex parabolic cylinder function Dn(z) -C for large argument -C Input: z --- Complex argument of Dn(z) -C n --- Order of Dn(z) (n = 0,±1,±2,…) -C Output: CDN --- Dn(z) -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - CB0=Z**N*CDEXP(-.25D0*Z*Z) - CR=(1.0D0,0.0D0) - CDN=(1.0D0,0.0D0) - DO 10 K=1,16 - CR=-0.5D0*CR*(2.0*K-N-1.0)*(2.0*K-N-2.0)/(K*Z*Z) - CDN=CDN+CR - IF (CDABS(CR).LT.CDABS(CDN)*1.0D-12) GO TO 15 -10 CONTINUE -15 CDN=CB0*CDN - RETURN - END - - - -C ********************************** - - SUBROUTINE FCSZO(KF,NT,ZO) -C -C =============================================================== -C Purpose: Compute the complex zeros of Fresnel integral C(z) -C or S(z) using modified Newton's iteration method -C Input : KF --- Function code -C KF=1 for C(z) or KF=2 for S(z) -C NT --- Total number of zeros -C Output: ZO(L) --- L-th zero of C(z) or S(z) -C Routines called: -C (1) CFC for computing Fresnel integral C(z) -C (2) CFS for computing Fresnel integral S(z) -C ============================================================== -C - IMPLICIT DOUBLE PRECISION (E,P,W) - IMPLICIT COMPLEX *16 (C,Z) - DIMENSION ZO(NT) - PI=3.141592653589793D0 - PSQ=0.0D0 - W=0.0D0 - DO 35 NR=1,NT - IF (KF.EQ.1) PSQ=DSQRT(4.0D0*NR-1.0D0) - IF (KF.EQ.2) PSQ=2.0D0*NR**(0.5) - PX=PSQ-DLOG(PI*PSQ)/(PI*PI*PSQ**3.0) - PY=DLOG(PI*PSQ)/(PI*PSQ) - Z=CMPLX(PX,PY) - IF (KF.EQ.2) THEN - IF (NR.EQ.2) Z=(2.8334,0.2443) - IF (NR.EQ.3) Z=(3.4674,0.2185) - IF (NR.EQ.4) Z=(4.0025,0.2008) - ENDIF - IT=0 -15 IT=IT+1 - IF (KF.EQ.1) CALL CFC(Z,ZF,ZD) - IF (KF.EQ.2) CALL CFS(Z,ZF,ZD) - ZP=(1.0D0,0.0D0) - DO 20 I=1,NR-1 -20 ZP=ZP*(Z-ZO(I)) - ZFD=ZF/ZP - ZQ=(0.0D0,0.0D0) - DO 30 I=1,NR-1 - ZW=(1.0D0,0.0D0) - DO 25 J=1,NR-1 - IF (J.EQ.I) GO TO 25 - ZW=ZW*(Z-ZO(J)) -25 CONTINUE -30 ZQ=ZQ+ZW - ZGD=(ZD-ZQ*ZFD)/ZP - Z=Z-ZFD/ZGD - W0=W - W=CDABS(Z) - IF (IT.LE.50.AND.DABS((W-W0)/W).GT.1.0D-12) GO TO 15 -35 ZO(NR)=Z - RETURN - END - - - -C ********************************** - - SUBROUTINE E1XA(X,E1) -C -C ============================================ -C Purpose: Compute exponential integral E1(x) -C Input : x --- Argument of E1(x) -C Output: E1 --- E1(x) ( x > 0 ) -C ============================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - IF (X.EQ.0.0) THEN - E1=1.0D+300 - ELSE IF (X.LE.1.0) THEN - E1=-DLOG(X)+((((1.07857D-3*X-9.76004D-3)*X+5.519968D-2)*X - & -0.24991055D0)*X+0.99999193D0)*X-0.57721566D0 - ELSE - ES1=(((X+8.5733287401D0)*X+18.059016973D0)*X - & +8.6347608925D0)*X+0.2677737343D0 - ES2=(((X+9.5733223454D0)*X+25.6329561486D0)*X - & +21.0996530827D0)*X+3.9584969228D0 - E1=DEXP(-X)/X*ES1/ES2 - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE LPMV0(V,M,X,PMV) -C -C ======================================================= -C Purpose: Compute the associated Legendre function -C Pmv(x) with an integer order and an arbitrary -C nonnegative degree v -C Input : x --- Argument of Pm(x) ( -1 ≤ x ≤ 1 ) -C m --- Order of Pmv(x) -C v --- Degree of Pmv(x) -C Output: PMV --- Pmv(x) -C Routine called: PSI_SPEC for computing Psi function -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - EPS=1.0D-14 - NV=INT(V) - V0=V-NV - IF (X.EQ.-1.0D0.AND.V.NE.NV) THEN - IF (M.EQ.0) PMV=-1.0D+300 - IF (M.NE.0) PMV=1.0D+300 - RETURN - ENDIF - C0=1.0D0 - IF (M.NE.0) THEN - RG=V*(V+M) - DO 10 J=1,M-1 -10 RG=RG*(V*V-J*J) - XQ=DSQRT(1.0D0-X*X) - R0=1.0D0 - DO 15 J=1,M -15 R0=.5D0*R0*XQ/J - C0=R0*RG - ENDIF - IF (V0.EQ.0.0D0) THEN - PMV=1.0D0 - R=1.0D0 - DO 20 K=1,NV-M - R=0.5D0*R*(-NV+M+K-1.0D0)*(NV+M+K)/(K*(K+M)) - & *(1.0D0+X) -20 PMV=PMV+R - PMV=(-1)**NV*C0*PMV - ELSE - IF (X.GE.-0.35D0) THEN - PMV=1.0D0 - R=1.0D0 - DO 25 K=1,100 - R=0.5D0*R*(-V+M+K-1.0D0)*(V+M+K)/(K*(M+K))*(1.0D0-X) - PMV=PMV+R - IF (K.GT.12.AND.DABS(R/PMV).LT.EPS) GO TO 30 -25 CONTINUE -30 PMV=(-1)**M*C0*PMV - ELSE - VS=DSIN(V*PI)/PI - PV0=0.0D0 - IF (M.NE.0) THEN - QR=DSQRT((1.0D0-X)/(1.0D0+X)) - R2=1.0D0 - DO 35 J=1,M -35 R2=R2*QR*J - S0=1.0D0 - R1=1.0D0 - DO 40 K=1,M-1 - R1=0.5D0*R1*(-V+K-1)*(V+K)/(K*(K-M))*(1.0D0+X) -40 S0=S0+R1 - PV0=-VS*R2/M*S0 - ENDIF - CALL PSI_SPEC(V,PSV) - PA=2.0D0*(PSV+EL)+PI/DTAN(PI*V)+1.0D0/V - S1=0.0D0 - DO 45 J=1,M -45 S1=S1+(J*J+V*V)/(J*(J*J-V*V)) - PMV=PA+S1-1.0D0/(M-V)+DLOG(0.5D0*(1.0D0+X)) - R=1.0D0 - DO 60 K=1,100 - R=0.5D0*R*(-V+M+K-1.0D0)*(V+M+K)/(K*(K+M))*(1.0D0+X) - S=0.0D0 - DO 50 J=1,M -50 S=S+((K+J)**2+V*V)/((K+J)*((K+J)**2-V*V)) - S2=0.0D0 - DO 55 J=1,K -55 S2=S2+1.0D0/(J*(J*J-V*V)) - PSS=PA+S+2.0D0*V*V*S2-1.0D0/(M+K-V) - & +DLOG(0.5D0*(1.0D0+X)) - R2=PSS*R - PMV=PMV+R2 - IF (DABS(R2/PMV).LT.EPS) GO TO 65 -60 CONTINUE -65 PMV=PV0+PMV*VS*C0 - ENDIF - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE LPMV(V,M,X,PMV) -C -C ======================================================= -C Purpose: Compute the associated Legendre function -C Pmv(x) with an integer order and an arbitrary -C degree v, using down-recursion for large degrees -C Input : x --- Argument of Pm(x) ( -1 ≤ x ≤ 1 ) -C m --- Order of Pmv(x) -C v --- Degree of Pmv(x) -C Output: PMV --- Pmv(x) -C Routine called: LPMV0 -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - IF (X.EQ.-1.0D0.AND.V.NE.INT(V)) THEN - IF (M.EQ.0) PMV=-1.0D+300 - IF (M.NE.0) PMV=1.0D+300 - RETURN - ENDIF - VX=V - MX=M - IF (V.LT.0) THEN - VX=-VX-1 - ENDIF - NEG_M=0 - IF (M.LT.0.AND.(VX+M+1).GT.0D0) THEN -C XXX: does not handle the cases where AMS 8.2.5 -C does not help - NEG_M=1 - MX=-M - ENDIF - NV=INT(VX) - V0=VX-NV - IF (NV.GT.2.AND.NV.GT.MX) THEN -C Up-recursion on degree, AMS 8.5.3 - CALL LPMV0(V0+MX, MX, X, P0) - CALL LPMV0(V0+MX+1, MX, X, P1) - PMV = P1 - DO 10 J=MX+2,NV - PMV = ((2*(V0+J)-1)*X*P1 - (V0+J-1+MX)*P0) / (V0+J-MX) - P0 = P1 - P1 = PMV -10 CONTINUE - ELSE - CALL LPMV0(VX, MX, X, PMV) - ENDIF - IF (NEG_M.NE.0.AND.ABS(PMV).LT.1.0D+300) THEN -C AMS 8.2.5, for integer order - CALL GAMMA2(VX-MX+1, G1) - CALL GAMMA2(VX+MX+1, G2) - PMV = PMV*G1/G2 * (-1)**MX - ENDIF - END - - -C ********************************** - - SUBROUTINE CGAMA(X,Y,KF,GR,GI) -C -C ========================================================= -C Purpose: Compute the gamma function Г(z) or ln[Г(z)] -C for a complex argument -C Input : x --- Real part of z -C y --- Imaginary part of z -C KF --- Function code -C KF=0 for ln[Г(z)] -C KF=1 for Г(z) -C Output: GR --- Real part of ln[Г(z)] or Г(z) -C GI --- Imaginary part of ln[Г(z)] or Г(z) -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(10) - PI=3.141592653589793D0 - DATA A/8.333333333333333D-02,-2.777777777777778D-03, - & 7.936507936507937D-04,-5.952380952380952D-04, - & 8.417508417508418D-04,-1.917526917526918D-03, - & 6.410256410256410D-03,-2.955065359477124D-02, - & 1.796443723688307D-01,-1.39243221690590D+00/ - IF (Y.EQ.0.0D0.AND.X.EQ.INT(X).AND.X.LE.0.0D0) THEN - GR=1.0D+300 - GI=0.0D0 - RETURN - ELSE IF (X.LT.0.0D0) THEN - X1=X - Y1=Y - X=-X - Y=-Y - ELSE - Y1=0.0D0 - X1=X - ENDIF - X0=X - NA=0 - IF (X.LE.7.0) THEN - NA=INT(7-X) - X0=X+NA - ENDIF - Z1=DSQRT(X0*X0+Y*Y) - TH=DATAN(Y/X0) - GR=(X0-.5D0)*DLOG(Z1)-TH*Y-X0+0.5D0*DLOG(2.0D0*PI) - GI=TH*(X0-0.5D0)+Y*DLOG(Z1)-Y - DO 10 K=1,10 - T=Z1**(1-2*K) - GR=GR+A(K)*T*DCOS((2.0D0*K-1.0D0)*TH) -10 GI=GI-A(K)*T*DSIN((2.0D0*K-1.0D0)*TH) - IF (X.LE.7.0) THEN - GR1=0.0D0 - GI1=0.0D0 - DO 15 J=0,NA-1 - GR1=GR1+.5D0*DLOG((X+J)**2+Y*Y) -15 GI1=GI1+DATAN(Y/(X+J)) - GR=GR-GR1 - GI=GI-GI1 - ENDIF - IF (X1.LT.0.0D0) THEN - Z1=DSQRT(X*X+Y*Y) - TH1=DATAN(Y/X) - SR=-DSIN(PI*X)*DCOSH(PI*Y) - SI=-DCOS(PI*X)*DSINH(PI*Y) - Z2=DSQRT(SR*SR+SI*SI) - TH2=DATAN(SI/SR) - IF (SR.LT.0.0D0) TH2=PI+TH2 - GR=DLOG(PI/(Z1*Z2))-GR - GI=-TH1-TH2-GI - X=X1 - Y=Y1 - ENDIF - IF (KF.EQ.1) THEN - G0=DEXP(GR) - GR=G0*DCOS(GI) - GI=G0*DSIN(GI) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE ASWFB(M,N,C,X,KD,CV,S1F,S1D) -C -C =========================================================== -C Purpose: Compute the prolate and oblate spheroidal angular -C functions of the first kind and their derivatives -C Input : m --- Mode parameter, m = 0,1,2,... -C n --- Mode parameter, n = m,m+1,... -C c --- Spheroidal parameter -C x --- Argument of angular function, |x| < 1.0 -C KD --- Function code -C KD=1 for prolate; KD=-1 for oblate -C cv --- Characteristic value -C Output: S1F --- Angular function of the first kind -C S1D --- Derivative of the angular function of -C the first kind -C Routines called: -C (1) SDMN for computing expansion coefficients dk -C (2) LPMNS for computing associated Legendre function -C of the first kind Pmn(x) -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION DF(200),PM(0:251),PD(0:251) - EPS=1.0D-14 - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - NM=25+INT((N-M)/2+C) - NM2=2*NM+M - CALL SDMN(M,N,C,CV,KD,DF) - CALL LPMNS(M,NM2,X,PM,PD) - SW=0.0D0 - SU1=0.0D0 - DO 10 K=1,NM - MK=M+2*(K-1)+IP - SU1=SU1+DF(K)*PM(MK) - IF (DABS(SW-SU1).LT.DABS(SU1)*EPS) GOTO 15 -10 SW=SU1 -15 S1F=(-1)**M*SU1 - SU1=0.0D0 - DO 20 K=1,NM - MK=M+2*(K-1)+IP - SU1=SU1+DF(K)*PD(MK) - IF (DABS(SW-SU1).LT.DABS(SU1)*EPS) GOTO 25 -20 SW=SU1 -25 S1D=(-1)**M*SU1 - RETURN - END - - - -C ********************************** - - SUBROUTINE CHGUS(A,B,X,HU,ID) -C -C ====================================================== -C Purpose: Compute confluent hypergeometric function -C U(a,b,x) for small argument x -C Input : a --- Parameter -C b --- Parameter ( b <> 0,-1,-2,...) -C x --- Argument -C Output: HU --- U(a,b,x) -C ID --- Estimated number of significant digits -C Routine called: GAMMA2 for computing gamma function -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - ID=-100 - PI=3.141592653589793D0 - CALL GAMMA2(A,GA) - CALL GAMMA2(B,GB) - XG1=1.0D0+A-B - CALL GAMMA2(XG1,GAB) - XG2=2.0D0-B - CALL GAMMA2(XG2,GB2) - HU0=PI/DSIN(PI*B) - R1=HU0/(GAB*GB) - R2=HU0*X**(1.0D0-B)/(GA*GB2) - HU=R1-R2 - HMAX=0.0D0 - HMIN=1.0D+300 - H0=0.0D0 - DO 10 J=1,150 - R1=R1*(A+J-1.0D0)/(J*(B+J-1.0D0))*X - R2=R2*(A-B+J)/(J*(1.0D0-B+J))*X - HU=HU+R1-R2 - HUA=DABS(HU) - IF (HUA.GT.HMAX) HMAX=HUA - IF (HUA.LT.HMIN) HMIN=HUA - IF (DABS(HU-H0).LT.DABS(HU)*1.0D-15) GO TO 15 -10 H0=HU -15 D1=LOG10(HMAX) - D2=0.0D0 - IF (HMIN.NE.0.0) D2=LOG10(HMIN) - ID=15-ABS(D1-D2) - RETURN - END - - - -C ********************************** - - SUBROUTINE ITTH0(X,TTH) -C -C =========================================================== -C Purpose: Evaluate the integral H0(t)/t with respect to t -C from x to infinity -C Input : x --- Lower limit ( x ≥ 0 ) -C Output: TTH --- Integration of H0(t)/t from x to infinity -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - S=1.0D0 - R=1.0D0 - IF (X.LT.24.5D0) THEN - DO 10 K=1,60 - R=-R*X*X*(2.0*K-1.0D0)/(2.0*K+1.0D0)**3 - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 15 -10 CONTINUE -15 TTH=PI/2.0D0-2.0D0/PI*X*S - ELSE - DO 20 K=1,10 - R=-R*(2.0*K-1.0D0)**3/((2.0*K+1.0D0)*X*X) - S=S+R - IF (DABS(R).LT.DABS(S)*1.0D-12) GO TO 25 -20 CONTINUE -25 TTH=2.0D0/(PI*X)*S - T=8.0D0/X - XT=X+.25D0*PI - F0=(((((.18118D-2*T-.91909D-2)*T+.017033D0)*T - & -.9394D-3)*T-.051445D0)*T-.11D-5)*T+.7978846D0 - G0=(((((-.23731D-2*T+.59842D-2)*T+.24437D-2)*T - & -.0233178D0)*T+.595D-4)*T+.1620695D0)*T - TTY=(F0*DSIN(XT)-G0*DCOS(XT))/(DSQRT(X)*X) - TTH=TTH+TTY - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE LGAMA(KF,X,GL) -C -C ================================================== -C Purpose: Compute gamma function Г(x) or ln[Г(x)] -C Input: x --- Argument of Г(x) ( x > 0 ) -C KF --- Function code -C KF=1 for Г(x); KF=0 for ln[Г(x)] -C Output: GL --- Г(x) or ln[Г(x)] -C ================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(10) - DATA A/8.333333333333333D-02,-2.777777777777778D-03, - & 7.936507936507937D-04,-5.952380952380952D-04, - & 8.417508417508418D-04,-1.917526917526918D-03, - & 6.410256410256410D-03,-2.955065359477124D-02, - & 1.796443723688307D-01,-1.39243221690590D+00/ - X0=X - N=0 - IF (X.EQ.1.0.OR.X.EQ.2.0) THEN - GL=0.0D0 - GO TO 20 - ELSE IF (X.LE.7.0) THEN - N=INT(7-X) - X0=X+N - ENDIF - X2=1.0D0/(X0*X0) - XP=6.283185307179586477D0 - GL0=A(10) - DO 10 K=9,1,-1 -10 GL0=GL0*X2+A(K) - GL=GL0/X0+0.5D0*DLOG(XP)+(X0-.5D0)*DLOG(X0)-X0 - IF (X.LE.7.0) THEN - DO 15 K=1,N - GL=GL-DLOG(X0-1.0D0) -15 X0=X0-1.0D0 - ENDIF -20 IF (KF.EQ.1) GL=DEXP(GL) - RETURN - END - -C ********************************** - - SUBROUTINE LQNA(N,X,QN,QD) -C -C ===================================================== -C Purpose: Compute Legendre functions Qn(x) and Qn'(x) -C Input : x --- Argument of Qn(x) ( -1 ≤ x ≤ 1 ) -C n --- Degree of Qn(x) ( n = 0,1,2,… ) -C Output: QN(n) --- Qn(x) -C QD(n) --- Qn'(x) -C ( 1.0D+300 stands for infinity ) -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (Q,X) - DIMENSION QN(0:N),QD(0:N) - IF (DABS(X).EQ.1.0D0) THEN - DO 10 K=0,N - QN(K)=1.0D+300 - QD(K)=-1.0D+300 -10 CONTINUE - ELSE IF (DABS(X).LT.1.0D0) THEN - Q0=0.5D0*DLOG((1.0D0+X)/(1.0D0-X)) - Q1=X*Q0-1.0D0 - QN(0)=Q0 - QN(1)=Q1 - QD(0)=1.0D0/(1.0D0-X*X) - QD(1)=QN(0)+X*QD(0) - DO 15 K=2,N - QF=((2*K-1)*X*Q1-(K-1)*Q0)/K - QN(K)=QF - QD(K)=(QN(K-1)-X*QF)*K/(1.0D0-X*X) - Q0=Q1 -15 Q1=QF - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE DVLA(VA,X,PD) -C -C ==================================================== -C Purpose: Compute parabolic cylinder functions Dv(x) -C for large argument -C Input: x --- Argument -C va --- Order -C Output: PD --- Dv(x) -C Routines called: -C (1) VVLA for computing Vv(x) for large |x| -C (2) GAMMA2 for computing Г(x) -C ==================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - EPS=1.0D-12 - EP=DEXP(-.25*X*X) - A0=DABS(X)**VA*EP - R=1.0D0 - PD=1.0D0 - DO 10 K=1,16 - R=-0.5D0*R*(2.0*K-VA-1.0)*(2.0*K-VA-2.0)/(K*X*X) - PD=PD+R - IF (DABS(R/PD).LT.EPS) GO TO 15 -10 CONTINUE -15 PD=A0*PD - IF (X.LT.0.0D0) THEN - X1=-X - CALL VVLA(VA,X1,VL) - CALL GAMMA2(-VA,GL) - PD=PI*VL/GL+DCOS(PI*VA)*PD - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE IK01A(X,BI0,DI0,BI1,DI1,BK0,DK0,BK1,DK1) -C -C ========================================================= -C Purpose: Compute modified Bessel functions I0(x), I1(1), -C K0(x) and K1(x), and their derivatives -C Input : x --- Argument ( x ≥ 0 ) -C Output: BI0 --- I0(x) -C DI0 --- I0'(x) -C BI1 --- I1(x) -C DI1 --- I1'(x) -C BK0 --- K0(x) -C DK0 --- K0'(x) -C BK1 --- K1(x) -C DK1 --- K1'(x) -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(12),B(12),A1(8) - PI=3.141592653589793D0 - EL=0.5772156649015329D0 - X2=X*X - IF (X.EQ.0.0D0) THEN - BI0=1.0D0 - BI1=0.0D0 - BK0=1.0D+300 - BK1=1.0D+300 - DI0=0.0D0 - DI1=0.5D0 - DK0=-1.0D+300 - DK1=-1.0D+300 - RETURN - ELSE IF (X.LE.18.0D0) THEN - BI0=1.0D0 - R=1.0D0 - DO 15 K=1,50 - R=0.25D0*R*X2/(K*K) - BI0=BI0+R - IF (DABS(R/BI0).LT.1.0D-15) GO TO 20 -15 CONTINUE -20 BI1=1.0D0 - R=1.0D0 - DO 25 K=1,50 - R=0.25D0*R*X2/(K*(K+1)) - BI1=BI1+R - IF (DABS(R/BI1).LT.1.0D-15) GO TO 30 -25 CONTINUE -30 BI1=0.5D0*X*BI1 - ELSE - DATA A/0.125D0,7.03125D-2, - & 7.32421875D-2,1.1215209960938D-1, - & 2.2710800170898D-1,5.7250142097473D-1, - & 1.7277275025845D0,6.0740420012735D0, - & 2.4380529699556D01,1.1001714026925D02, - & 5.5133589612202D02,3.0380905109224D03/ - DATA B/-0.375D0,-1.171875D-1, - & -1.025390625D-1,-1.4419555664063D-1, - & -2.7757644653320D-1,-6.7659258842468D-1, - & -1.9935317337513D0,-6.8839142681099D0, - & -2.7248827311269D01,-1.2159789187654D02, - & -6.0384407670507D02,-3.3022722944809D03/ - K0=12 - IF (X.GE.35.0) K0=9 - IF (X.GE.50.0) K0=7 - CA=DEXP(X)/DSQRT(2.0D0*PI*X) - BI0=1.0D0 - XR=1.0D0/X - DO 35 K=1,K0 -35 BI0=BI0+A(K)*XR**K - BI0=CA*BI0 - BI1=1.0D0 - DO 40 K=1,K0 -40 BI1=BI1+B(K)*XR**K - BI1=CA*BI1 - ENDIF - WW=0.0D0 - IF (X.LE.9.0D0) THEN - CT=-(DLOG(X/2.0D0)+EL) - BK0=0.0D0 - W0=0.0D0 - R=1.0D0 - DO 65 K=1,50 - W0=W0+1.0D0/K - R=0.25D0*R/(K*K)*X2 - BK0=BK0+R*(W0+CT) - IF (DABS((BK0-WW)/BK0).LT.1.0D-15) GO TO 70 -65 WW=BK0 -70 BK0=BK0+CT - ELSE - DATA A1/0.125D0,0.2109375D0, - & 1.0986328125D0,1.1775970458984D01, - & 2.1461706161499D02,5.9511522710323D03, - & 2.3347645606175D05,1.2312234987631D07/ - CB=0.5D0/X - XR2=1.0D0/X2 - BK0=1.0D0 - DO 75 K=1,8 -75 BK0=BK0+A1(K)*XR2**K - BK0=CB*BK0/BI0 - ENDIF - BK1=(1.0D0/X-BI1*BK0)/BI0 - DI0=BI1 - DI1=BI0-BI1/X - DK0=-BK1 - DK1=-BK0-BK1/X - RETURN - END - -C ********************************** - - SUBROUTINE CPBDN(N,Z,CPB,CPD) -C -C ================================================== -C Purpose: Compute the parabolic cylinder functions -C Dn(z) and Dn'(z) for a complex argument -C Input: z --- Complex argument of Dn(z) -C n --- Order of Dn(z) ( n=0,±1,±2,… ) -C Output: CPB(|n|) --- Dn(z) -C CPD(|n|) --- Dn'(z) -C Routines called: -C (1) CPDSA for computing Dn(z) for a small |z| -C (2) CPDLA for computing Dn(z) for a large |z| -C ================================================== -C - IMPLICIT DOUBLE PRECISION (A-B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CPB(0:*),CPD(0:*) - PI=3.141592653589793D0 - X=DBLE(Z) - A0=CDABS(Z) - C0=(0.0D0,0.0D0) - CA0=CDEXP(-0.25D0*Z*Z) - N0=0 - IF (N.GE.0) THEN - CF0=CA0 - CF1=Z*CA0 - CPB(0)=CF0 - CPB(1)=CF1 - DO 10 K=2,N - CF=Z*CF1-(K-1.0D0)*CF0 - CPB(K)=CF - CF0=CF1 -10 CF1=CF - ELSE - N0=-N - IF (X.LE.0.0.OR.CDABS(Z).EQ.0.0) THEN - CF0=CA0 - CPB(0)=CF0 - Z1=-Z - IF (A0.LE.7.0) THEN - CALL CPDSA(-1,Z1,CF1) - ELSE - CALL CPDLA(-1,Z1,CF1) - ENDIF - CF1=DSQRT(2.0D0*PI)/CA0-CF1 - CPB(1)=CF1 - DO 15 K=2,N0 - CF=(-Z*CF1+CF0)/(K-1.0D0) - CPB(K)=CF - CF0=CF1 -15 CF1=CF - ELSE - IF (A0.LE.3.0) THEN - CALL CPDSA(-N0,Z,CFA) - CPB(N0)=CFA - N1=N0+1 - CALL CPDSA(-N1,Z,CFB) - CPB(N1)=CFB - NM1=N0-1 - DO 20 K=NM1,0,-1 - CF=Z*CFA+(K+1.0D0)*CFB - CPB(K)=CF - CFB=CFA -20 CFA=CF - ELSE - M=100+ABS(N) - CFA=C0 - CFB=(1.0D-30,0.0D0) - DO 25 K=M,0,-1 - CF=Z*CFB+(K+1.0D0)*CFA - IF (K.LE.N0) CPB(K)=CF - CFA=CFB -25 CFB=CF - CS0=CA0/CF - DO 30 K=0,N0 -30 CPB(K)=CS0*CPB(K) - ENDIF - ENDIF - ENDIF - CPD(0)=-0.5D0*Z*CPB(0) - IF (N.GE.0) THEN - DO 35 K=1,N -35 CPD(K)=-0.5D0*Z*CPB(K)+K*CPB(K-1) - ELSE - DO 40 K=1,N0 -40 CPD(K)=0.5D0*Z*CPB(K)-CPB(K-1) - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE IK01B(X,BI0,DI0,BI1,DI1,BK0,DK0,BK1,DK1) -C -C ========================================================= -C Purpose: Compute modified Bessel functions I0(x), I1(1), -C K0(x) and K1(x), and their derivatives -C Input : x --- Argument ( x ≥ 0 ) -C Output: BI0 --- I0(x) -C DI0 --- I0'(x) -C BI1 --- I1(x) -C DI1 --- I1'(x) -C BK0 --- K0(x) -C DK0 --- K0'(x) -C BK1 --- K1(x) -C DK1 --- K1'(x) -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - IF (X.EQ.0.0D0) THEN - BI0=1.0D0 - BI1=0.0D0 - BK0=1.0D+300 - BK1=1.0D+300 - DI0=0.0D0 - DI1=0.5D0 - DK0=-1.0D+300 - DK1=-1.0D+300 - RETURN - ELSE IF (X.LE.3.75D0) THEN - T=X/3.75D0 - T2=T*T - BI0=(((((.0045813D0*T2+.0360768D0)*T2+.2659732D0) - & *T2+1.2067492D0)*T2+3.0899424D0)*T2 - & +3.5156229D0)*T2+1.0D0 - BI1=X*((((((.00032411D0*T2+.00301532D0)*T2 - & +.02658733D0)*T2+.15084934D0)*T2+.51498869D0) - & *T2+.87890594D0)*T2+.5D0) - ELSE - T=3.75D0/X - BI0=((((((((.00392377D0*T-.01647633D0)*T - & +.02635537D0)*T-.02057706D0)*T+.916281D-2)*T - & -.157565D-2)*T+.225319D-2)*T+.01328592D0)*T - & +.39894228D0)*DEXP(X)/DSQRT(X) - BI1=((((((((-.420059D-2*T+.01787654D0)*T - & -.02895312D0)*T+.02282967D0)*T-.01031555D0)*T - & +.163801D-2)*T-.00362018D0)*T-.03988024D0)*T - & +.39894228D0)*DEXP(X)/DSQRT(X) - ENDIF - IF (X.LE.2.0D0) THEN - T=X/2.0D0 - T2=T*T - BK0=(((((.0000074D0*T2+.0001075D0)*T2+.00262698D0) - & *T2+.0348859D0)*T2+.23069756D0)*T2+.4227842D0) - & *T2-.57721566D0-BI0*DLOG(T) - BK1=((((((-.00004686D0*T2-.00110404D0)*T2 - & -.01919402D0)*T2-.18156897D0)*T2-.67278579D0) - & *T2+.15443144D0)*T2+1.0D0)/X+BI1*DLOG(T) - ELSE - T=2.0D0/X - T2=T*T - BK0=((((((.00053208D0*T-.0025154D0)*T+.00587872D0) - & *T-.01062446D0)*T+.02189568D0)*T-.07832358D0) - & *T+1.25331414D0)*DEXP(-X)/DSQRT(X) - BK1=((((((-.00068245D0*T+.00325614D0)*T - & -.00780353D0)*T+.01504268D0)*T-.0365562D0)*T+ - & .23498619D0)*T+1.25331414D0)*DEXP(-X)/DSQRT(X) - ENDIF - DI0=BI1 - DI1=BI0-BI1/X - DK0=-BK1 - DK1=-BK0-BK1/X - RETURN - END - -C ********************************** - - SUBROUTINE BETA(P,Q,BT) -C -C ========================================== -C Purpose: Compute the beta function B(p,q) -C Input : p --- Parameter ( p > 0 ) -C q --- Parameter ( q > 0 ) -C Output: BT --- B(p,q) -C Routine called: GAMMA2 for computing Г(x) -C ========================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - CALL GAMMA2(P,GP) - CALL GAMMA2(Q,GQ) - PPQ=P+Q - CALL GAMMA2(PPQ,GPQ) - BT=GP*GQ/GPQ - RETURN - END - - - -C ********************************** - - SUBROUTINE LPN(N,X,PN,PD) -C -C =============================================== -C Purpose: Compute Legendre polynomials Pn(x) -C and their derivatives Pn'(x) -C Input : x --- Argument of Pn(x) -C n --- Degree of Pn(x) ( n = 0,1,...) -C Output: PN(n) --- Pn(x) -C PD(n) --- Pn'(x) -C =============================================== -C - IMPLICIT DOUBLE PRECISION (P,X) - DIMENSION PN(0:N),PD(0:N) - PN(0)=1.0D0 - PN(1)=X - PD(0)=0.0D0 - PD(1)=1.0D0 - P0=1.0D0 - P1=X - DO 10 K=2,N - PF=(2.0D0*K-1.0D0)/K*X*P1-(K-1.0D0)/K*P0 - PN(K)=PF - IF (DABS(X).EQ.1.0D0) THEN - PD(K)=0.5D0*X**(K+1)*K*(K+1.0D0) - ELSE - PD(K)=K*(P1-X*PF)/(1.0D0-X*X) - ENDIF - P0=P1 -10 P1=PF - RETURN - END - -C ********************************** - - SUBROUTINE FCOEF(KD,M,Q,A,FC) -C -C ===================================================== -C Purpose: Compute expansion coefficients for Mathieu -C functions and modified Mathieu functions -C Input : m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions -C KD --- Case code -C KD=1 for cem(x,q) ( m = 0,2,4,...) -C KD=2 for cem(x,q) ( m = 1,3,5,...) -C KD=3 for sem(x,q) ( m = 1,3,5,...) -C KD=4 for sem(x,q) ( m = 2,4,6,...) -C A --- Characteristic value of Mathieu -C functions for given m and q -C Output: FC(k) --- Expansion coefficients of Mathieu -C functions ( k= 1,2,...,KM ) -C FC(1),FC(2),FC(3),... correspond to -C A0,A2,A4,... for KD=1 case, A1,A3, -C A5,... for KD=2 case, B1,B3,B5,... -C for KD=3 case and B2,B4,B6,... for -C KD=4 case -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION FC(251) - DO 5 I=1,251 -5 FC(I)=0.0D0 - IF (Q.LE.1.0D0) THEN - QM=7.5+56.1*SQRT(Q)-134.7*Q+90.7*SQRT(Q)*Q - ELSE - QM=17.0+3.1*SQRT(Q)-.126*Q+.0037*SQRT(Q)*Q - ENDIF - KM=INT(QM+0.5*M) - IF (Q.EQ.0.0D0) THEN - DO 10 K=1,KM -10 FC(K)=0.0D0 - IF (KD.EQ.1) THEN - FC((M+2)/2)=1.0D0 - IF (M.EQ.0) FC(1)=1.0D0/DSQRT(2.0D0) - ELSE IF (KD.EQ.4) THEN - FC(M/2)=1.0D0 - ELSE - FC((M+1)/2)=1.0D0 - ENDIF - RETURN - ENDIF - KB=0 - S=0.0D0 - F=1.0D-100 - U=0.0D0 - FC(KM)=0.0D0 - F2=0.0D0 - IF (KD.EQ.1) THEN - DO 25 K=KM,3,-1 - V=U - U=F - F=(A-4.0D0*K*K)*U/Q-V - IF (DABS(F).LT.DABS(FC(K+1))) THEN - KB=K - FC(1)=1.0D-100 - SP=0.0D0 - F3=FC(K+1) - FC(2)=A/Q*FC(1) - FC(3)=(A-4.0D0)*FC(2)/Q-2.0D0*FC(1) - U=FC(2) - F1=FC(3) - DO 15 I=3,KB - V=U - U=F1 - F1=(A-4.0D0*(I-1.0D0)**2)*U/Q-V - FC(I+1)=F1 - IF (I.EQ.KB) F2=F1 - IF (I.NE.KB) SP=SP+F1*F1 -15 CONTINUE - SP=SP+2.0D0*FC(1)**2+FC(2)**2+FC(3)**2 - SS=S+SP*(F3/F2)**2 - S0=DSQRT(1.0D0/SS) - DO 20 J=1,KM - IF (J.LE.KB+1) THEN - FC(J)=S0*FC(J)*F3/F2 - ELSE - FC(J)=S0*FC(J) - ENDIF -20 CONTINUE - GO TO 85 - ELSE - FC(K)=F - S=S+F*F - ENDIF -25 CONTINUE - FC(2)=Q*FC(3)/(A-4.0D0-2.0D0*Q*Q/A) - FC(1)=Q/A*FC(2) - S=S+2.0D0*FC(1)**2+FC(2)**2 - S0=DSQRT(1.0D0/S) - DO 30 K=1,KM -30 FC(K)=S0*FC(K) - ELSE IF (KD.EQ.2.OR.KD.EQ.3) THEN - DO 35 K=KM,3,-1 - V=U - U=F - F=(A-(2.0D0*K-1)**2)*U/Q-V - IF (DABS(F).GE.DABS(FC(K))) THEN - FC(K-1)=F - S=S+F*F - ELSE - KB=K - F3=FC(K) - GO TO 45 - ENDIF -35 CONTINUE - FC(1)=Q/(A-1.0D0-(-1)**KD*Q)*FC(2) - S=S+FC(1)*FC(1) - S0=DSQRT(1.0D0/S) - DO 40 K=1,KM -40 FC(K)=S0*FC(K) - GO TO 85 -45 FC(1)=1.0D-100 - FC(2)=(A-1.0D0-(-1)**KD*Q)/Q*FC(1) - SP=0.0D0 - U=FC(1) - F1=FC(2) - DO 50 I=2,KB-1 - V=U - U=F1 - F1=(A-(2.0D0*I-1.0D0)**2)*U/Q-V - IF (I.NE.KB-1) THEN - FC(I+1)=F1 - SP=SP+F1*F1 - ELSE - F2=F1 - ENDIF -50 CONTINUE - SP=SP+FC(1)**2+FC(2)**2 - SS=S+SP*(F3/F2)**2 - S0=1.0D0/DSQRT(SS) - DO 55 J=1,KM - IF (J.LT.KB) FC(J)=S0*FC(J)*F3/F2 - IF (J.GE.KB) FC(J)=S0*FC(J) -55 CONTINUE - ELSE IF (KD.EQ.4) THEN - DO 60 K=KM,3,-1 - V=U - U=F - F=(A-4.0D0*K*K)*U/Q-V - IF (DABS(F).GE.DABS(FC(K))) THEN - FC(K-1)=F - S=S+F*F - ELSE - KB=K - F3=FC(K) - GO TO 70 - ENDIF -60 CONTINUE - FC(1)=Q/(A-4.0D0)*FC(2) - S=S+FC(1)*FC(1) - S0=DSQRT(1.0D0/S) - DO 65 K=1,KM -65 FC(K)=S0*FC(K) - GO TO 85 -70 FC(1)=1.0D-100 - FC(2)=(A-4.0D0)/Q*FC(1) - SP=0.0D0 - U=FC(1) - F1=FC(2) - DO 75 I=2,KB-1 - V=U - U=F1 - F1=(A-4.0D0*I*I)*U/Q-V - IF (I.NE.KB-1) THEN - FC(I+1)=F1 - SP=SP+F1*F1 - ELSE - F2=F1 - ENDIF -75 CONTINUE - SP=SP+FC(1)**2+FC(2)**2 - SS=S+SP*(F3/F2)**2 - S0=1.0D0/DSQRT(SS) - DO 80 J=1,KM - IF (J.LT.KB) FC(J)=S0*FC(J)*F3/F2 - IF (J.GE.KB) FC(J)=S0*FC(J) -80 CONTINUE - ENDIF -85 IF (FC(1).LT.0.0D0) THEN - DO 90 J=1,KM -90 FC(J)=-FC(J) - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE SPHI(N,X,NM,SI,DI) -C -C ======================================================== -C Purpose: Compute modified spherical Bessel functions -C of the first kind, in(x) and in'(x) -C Input : x --- Argument of in(x) -C n --- Order of in(x) ( n = 0,1,2,... ) -C Output: SI(n) --- in(x) -C DI(n) --- in'(x) -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION SI(0:N),DI(0:N) - NM=N - IF (DABS(X).LT.1.0D-100) THEN - DO 10 K=0,N - SI(K)=0.0D0 -10 DI(K)=0.0D0 - SI(0)=1.0D0 - DI(1)=0.333333333333333D0 - RETURN - ENDIF - SI(0)=DSINH(X)/X - SI(1)=-(DSINH(X)/X-DCOSH(X))/X - SI0=SI(0) - IF (N.GE.2) THEN - M=MSTA1(X,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(X,N,15) - ENDIF - F=0.0D0 - F0=0.0D0 - F1=1.0D0-100 - DO 15 K=M,0,-1 - F=(2.0D0*K+3.0D0)*F1/X+F0 - IF (K.LE.NM) SI(K)=F - F0=F1 -15 F1=F - CS=SI0/F - DO 20 K=0,NM -20 SI(K)=CS*SI(K) - ENDIF - DI(0)=SI(1) - DO 25 K=1,NM -25 DI(K)=SI(K-1)-(K+1.0D0)/X*SI(K) - RETURN - END - - - -C ********************************** - - SUBROUTINE PBWA(A,X,W1F,W1D,W2F,W2D) -C -C ====================================================== -C Purpose: Compute parabolic cylinder functions W(a,±x) -C and their derivatives -C Input : a --- Parameter ( 0 ≤ |a| ≤ 5 ) -C x --- Argument of W(a,±x) ( 0 ≤ |x| ≤ 5 ) -C Output : W1F --- W(a,x) -C W1D --- W'(a,x) -C W2F --- W(a,-x) -C W2D --- W'(a,-x) -C Routine called: -C CGAMA for computing complex gamma function -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,D-H,O-Y) - IMPLICIT COMPLEX *16 (C,Z) - DIMENSION H(100),D(100) - EPS=1.0D-15 - P0=0.59460355750136D0 - IF (A.EQ.0.0D0) THEN - G1=3.625609908222D0 - G2=1.225416702465D0 - ELSE - X1=0.25D0 - Y1=0.5D0*A - CALL CGAMA(X1,Y1,1,UGR,UGI) - G1=DSQRT(UGR*UGR+UGI*UGI) - X2=0.75D0 - CALL CGAMA(X2,Y1,1,VGR,VGI) - G2=DSQRT(VGR*VGR+VGI*VGI) - ENDIF - F1=DSQRT(G1/G2) - F2=DSQRT(2.0D0*G2/G1) - H0=1.0D0 - H1=A - H(1)=A - DO 10 L1=4,200,2 - M=L1/2 - HL=A*H1-0.25D0*(L1-2.0D0)*(L1-3.0D0)*H0 - H(M)=HL - H0=H1 -10 H1=HL - Y1F=1.0D0 - R=1.0D0 - DO 15 K=1,100 - R=0.5D0*R*X*X/(K*(2.0D0*K-1.0D0)) - R1=H(K)*R - Y1F=Y1F+R1 - IF (DABS(R1/Y1F).LE.EPS.AND.K.GT.30) GO TO 20 -15 CONTINUE -20 Y1D=A - R=1.0D0 - DO 25 K=1,100 - R=0.5D0*R*X*X/(K*(2.0D0*K+1.0D0)) - R1=H(K+1)*R - Y1D=Y1D+R1 - IF (DABS(R1/Y1D).LE.EPS.AND.K.GT.30) GO TO 30 -25 CONTINUE -30 Y1D=X*Y1D - D1=1.0D0 - D2=A - D(1)=1.0D0 - D(2)=A - DO 40 L2=5,160,2 - M=(L2+1)/2 - DL=A*D2-0.25D0*(L2-2.0D0)*(L2-3.0D0)*D1 - D(M)=DL - D1=D2 -40 D2=DL - Y2F=1.0D0 - R=1.0D0 - DO 45 K=1,100 - R=0.5D0*R*X*X/(K*(2.0D0*K+1.0D0)) - R1=D(K+1)*R - Y2F=Y2F+R1 - IF (DABS(R1/Y2F).LE.EPS.AND.K.GT.30) GO TO 50 -45 CONTINUE -50 Y2F=X*Y2F - Y2D=1.0D0 - R=1.0D0 - DO 55 K=1,100 - R=0.5D0*R*X*X/(K*(2.0D0*K-1.0D0)) - R1=D(K+1)*R - Y2D=Y2D+R1 - IF (DABS(R1/Y2D).LE.EPS.AND.K.GT.30) GO TO 60 -55 CONTINUE -60 W1F=P0*(F1*Y1F-F2*Y2F) - W2F=P0*(F1*Y1F+F2*Y2F) - W1D=P0*(F1*Y1D-F2*Y2D) - W2D=P0*(F1*Y1D+F2*Y2D) - RETURN - END - - - -C ********************************** - - SUBROUTINE RMN1(M,N,C,X,DF,KD,R1F,R1D) -C -C ======================================================= -C Purpose: Compute prolate and oblate spheroidal radial -C functions of the first kind for given m, n, -C c and x -C Routines called: -C (1) SCKB for computing expansion coefficients c2k -C (2) SPHJ for computing the spherical Bessel -C functions of the first kind -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION CK(200),DF(200),SJ(0:251),DJ(0:251) - EPS=1.0D-14 - IP=1 - NM1=INT((N-M)/2) - IF (N-M.EQ.2*NM1) IP=0 - NM=25+NM1+INT(C) - REG=1.0D0 - IF (M+NM.GT.80) REG=1.0D-200 - R0=REG - DO 10 J=1,2*M+IP -10 R0=R0*J - R=R0 - SUC=R*DF(1) - SW=0.0D0 - DO 15 K=2,NM - R=R*(M+K-1.0)*(M+K+IP-1.5D0)/(K-1.0D0)/(K+IP-1.5D0) - SUC=SUC+R*DF(K) - IF (K.GT.NM1.AND.DABS(SUC-SW).LT.DABS(SUC)*EPS) GO TO 20 -15 SW=SUC -20 CONTINUE - IF (X.EQ.0.0) THEN - CALL SCKB(M,N,C,DF,CK) - SUM=0.0D0 - SW1=0.0D0 - DO 25 J=1,NM - SUM=SUM+CK(J) - IF (DABS(SUM-SW1).LT.DABS(SUM)*EPS) GO TO 30 -25 SW1=SUM -30 R1=1.0D0 - DO 35 J=1,(N+M+IP)/2 -35 R1=R1*(J+0.5D0*(N+M+IP)) - R2=1.0D0 - DO 40 J=1,M -40 R2=2.0D0*C*R2*J - R3=1.0D0 - DO 45 J=1,(N-M-IP)/2 -45 R3=R3*J - SA0=(2.0*(M+IP)+1.0)*R1/(2.0**N*C**IP*R2*R3) - IF (IP.EQ.0) THEN - R1F=SUM/(SA0*SUC)*DF(1)*REG - R1D=0.0D0 - ELSE IF (IP.EQ.1) THEN - R1F=0.0D0 - R1D=SUM/(SA0*SUC)*DF(1)*REG - ENDIF - RETURN - ENDIF - CX=C*X - NM2=2*NM+M - CALL SPHJ(NM2,CX,NM2,SJ,DJ) - A0=(1.0D0-KD/(X*X))**(0.5D0*M)/SUC - R1F=0.0D0 - SW=0.0D0 - LG=0 - DO 50 K=1,NM - L=2*K+M-N-2+IP - IF (L.EQ.4*INT(L/4)) LG=1 - IF (L.NE.4*INT(L/4)) LG=-1 - IF (K.EQ.1) THEN - R=R0 - ELSE - R=R*(M+K-1.0)*(M+K+IP-1.5D0)/(K-1.0D0)/(K+IP-1.5D0) - ENDIF - NP=M+2*K-2+IP - R1F=R1F+LG*R*DF(K)*SJ(NP) - IF (K.GT.NM1.AND.DABS(R1F-SW).LT.DABS(R1F)*EPS) GO TO 55 -50 SW=R1F -55 R1F=R1F*A0 - B0=KD*M/X**3.0D0/(1.0-KD/(X*X))*R1F - SUD=0.0D0 - SW=0.0D0 - DO 60 K=1,NM - L=2*K+M-N-2+IP - IF (L.EQ.4*INT(L/4)) LG=1 - IF (L.NE.4*INT(L/4)) LG=-1 - IF (K.EQ.1) THEN - R=R0 - ELSE - R=R*(M+K-1.0)*(M+K+IP-1.5D0)/(K-1.0D0)/(K+IP-1.5D0) - ENDIF - NP=M+2*K-2+IP - SUD=SUD+LG*R*DF(K)*DJ(NP) - IF (K.GT.NM1.AND.DABS(SUD-SW).LT.DABS(SUD)*EPS) GO TO 65 -60 SW=SUD -65 R1D=B0+A0*C*SUD - RETURN - END - - - -C ********************************** - - SUBROUTINE DVSA(VA,X,PD) -C -C =================================================== -C Purpose: Compute parabolic cylinder function Dv(x) -C for small argument -C Input: x --- Argument -C va --- Order -C Output: PD --- Dv(x) -C Routine called: GAMMA2 for computing Г(x) -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - EPS=1.0D-15 - PI=3.141592653589793D0 - SQ2=DSQRT(2.0D0) - EP=DEXP(-.25D0*X*X) - VA0=0.5D0*(1.0D0-VA) - IF (VA.EQ.0.0) THEN - PD=EP - ELSE - IF (X.EQ.0.0) THEN - IF (VA0.LE.0.0.AND.VA0.EQ.INT(VA0)) THEN - PD=0.0D0 - ELSE - CALL GAMMA2(VA0,GA0) - PD=DSQRT(PI)/(2.0D0**(-.5D0*VA)*GA0) - ENDIF - ELSE - CALL GAMMA2(-VA,G1) - A0=2.0D0**(-0.5D0*VA-1.0D0)*EP/G1 - VT=-.5D0*VA - CALL GAMMA2(VT,G0) - PD=G0 - R=1.0D0 - DO 10 M=1,250 - VM=.5D0*(M-VA) - CALL GAMMA2(VM,GM) - R=-R*SQ2*X/M - R1=GM*R - PD=PD+R1 - IF (DABS(R1).LT.DABS(PD)*EPS) GO TO 15 -10 CONTINUE -15 PD=A0*PD - ENDIF - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE E1Z(Z,CE1) -C -C ==================================================== -C Purpose: Compute complex exponential integral E1(z) -C Input : z --- Argument of E1(z) -C Output: CE1 --- E1(z) -C ==================================================== -C - IMPLICIT COMPLEX*16 (C,Z) - IMPLICIT DOUBLE PRECISION (A,D-H,O-Y) - PI=3.141592653589793D0 - EL=0.5772156649015328D0 - X=DBLE(Z) - A0=CDABS(Z) - IF (A0.EQ.0.0D0) THEN - CE1=(1.0D+300,0.0D0) - ELSE IF (A0.LE.10.0.OR.X.LT.0.0.AND.A0.LT.20.0) THEN - CE1=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 10 K=1,150 - CR=-CR*K*Z/(K+1.0D0)**2 - CE1=CE1+CR - IF (CDABS(CR).LE.CDABS(CE1)*1.0D-15) GO TO 15 -10 CONTINUE -15 CE1=-EL-CDLOG(Z)+Z*CE1 - ELSE - CT0=(0.0D0,0.0D0) - DO 20 K=120,1,-1 - CT0=K/(1.0D0+K/(Z+CT0)) -20 CONTINUE - CT=1.0D0/(Z+CT0) - CE1=CDEXP(-Z)*CT - IF (X.LE.0.0.AND.DIMAG(Z).EQ.0.0) CE1=CE1-PI*(0.0D0,1.0D0) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE ITJYB(X,TJ,TY) -C -C ======================================================= -C Purpose: Integrate Bessel functions J0(t) and Y0(t) -C with respect to t from 0 to x ( x ≥ 0 ) -C Input : x --- Upper limit of the integral -C Output: TJ --- Integration of J0(t) from 0 to x -C TY --- Integration of Y0(t) from 0 to x -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - IF (X.EQ.0.0D0) THEN - TJ=0.0D0 - TY=0.0D0 - ELSE IF (X.LE.4.0D0) THEN - X1=X/4.0D0 - T=X1*X1 - TJ=(((((((-.133718D-3*T+.2362211D-2)*T - & -.025791036D0)*T+.197492634D0)*T-1.015860606D0) - & *T+3.199997842D0)*T-5.333333161D0)*T+4.0D0)*X1 - TY=((((((((.13351D-4*T-.235002D-3)*T+.3034322D-2)* - & T-.029600855D0)*T+.203380298D0)*T-.904755062D0) - & *T+2.287317974D0)*T-2.567250468D0)*T - & +1.076611469D0)*X1 - TY=2.0D0/PI*DLOG(X/2.0D0)*TJ-TY - ELSE IF (X.LE.8.0D0) THEN - XT=X-.25D0*PI - T=16.0D0/(X*X) - F0=((((((.1496119D-2*T-.739083D-2)*T+.016236617D0) - & *T-.022007499D0)*T+.023644978D0) - & *T-.031280848D0)*T+.124611058D0)*4.0D0/X - G0=(((((.1076103D-2*T-.5434851D-2)*T+.01242264D0) - & *T-.018255209)*T+.023664841D0)*T-.049635633D0) - & *T+.79784879D0 - TJ=1.0D0-(F0*DCOS(XT)-G0*DSIN(XT))/DSQRT(X) - TY=-(F0*DSIN(XT)+G0*DCOS(XT))/DSQRT(X) - ELSE - T=64.0D0/(X*X) - XT=X-.25D0*PI - F0=(((((((-.268482D-4*T+.1270039D-3)*T - & -.2755037D-3)*T+.3992825D-3)*T-.5366169D-3)*T - & +.10089872D-2)*T-.40403539D-2)*T+.0623347304D0) - & *8.0D0/X - G0=((((((-.226238D-4*T+.1107299D-3)*T-.2543955D-3) - & *T+.4100676D-3)*T-.6740148D-3)*T+.17870944D-2) - & *T-.01256424405D0)*T+.79788456D0 - TJ=1.0D0-(F0*DCOS(XT)-G0*DSIN(XT))/DSQRT(X) - TY=-(F0*DSIN(XT)+G0*DCOS(XT))/DSQRT(X) - ENDIF - RETURN - END - - -C ********************************** - - SUBROUTINE CHGUL(A,B,X,HU,ID) -C -C ======================================================= -C Purpose: Compute the confluent hypergeometric function -C U(a,b,x) for large argument x -C Input : a --- Parameter -C b --- Parameter -C x --- Argument -C Output: HU --- U(a,b,x) -C ID --- Estimated number of significant digits -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - LOGICAL IL1,IL2 - ID=-100 - AA=A-B+1.0D0 - IL1=A.EQ.INT(A).AND.A.LE.0.0 - IL2=AA.EQ.INT(AA).AND.AA.LE.0.0 - NM=0 - IF (IL1) NM=ABS(A) - IF (IL2) NM=ABS(AA) - IF (IL1.OR.IL2) THEN - HU=1.0D0 - R=1.0D0 - DO 10 K=1,NM - R=-R*(A+K-1.0D0)*(A-B+K)/(K*X) - HU=HU+R -10 CONTINUE - HU=X**(-A)*HU - ID=10 - ELSE - HU=1.0D0 - R=1.0D0 - DO 15 K=1,25 - R=-R*(A+K-1.0D0)*(A-B+K)/(K*X) - RA=DABS(R) - IF (K.GT.5.AND.RA.GE.R0.OR.RA.LT.1.0D-15) GO TO 20 - R0=RA -15 HU=HU+R -20 ID=ABS(LOG10(RA)) - HU=X**(-A)*HU - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE GMN(M,N,C,X,BK,GF,GD) -C -C =========================================================== -C Purpose: Compute gmn(-ic,ix) and its derivative for oblate -C radial functions with a small argument -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BK(200) - EPS=1.0D-14 - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - NM=25+INT(0.5*(N-M)+C) - XM=(1.0D0+X*X)**(-0.5D0*M) - GF0=0.0D0 - GW=0.0D0 - DO 10 K=1,NM - GF0=GF0+BK(K)*X**(2.0*K-2.0) - IF (DABS((GF0-GW)/GF0).LT.EPS.AND.K.GE.10) GO TO 15 -10 GW=GF0 -15 GF=XM*GF0*X**(1-IP) - GD1=-M*X/(1.0D0+X*X)*GF - GD0=0.0D0 - DO 20 K=1,NM - IF (IP.EQ.0) THEN - GD0=GD0+(2.0D0*K-1.0)*BK(K)*X**(2.0*K-2.0) - ELSE - GD0=GD0+2.0D0*K*BK(K+1)*X**(2.0*K-1.0) - ENDIF - IF (DABS((GD0-GW)/GD0).LT.EPS.AND.K.GE.10) GO TO 25 -20 GW=GD0 -25 GD=GD1+XM*GD0 - RETURN - END - - - -C ********************************** - - SUBROUTINE ITJYA(X,TJ,TY) -C -C ========================================================== -C Purpose: Integrate Bessel functions J0(t) & Y0(t) with -C respect to t from 0 to x -C Input : x --- Upper limit of the integral ( x >= 0 ) -C Output: TJ --- Integration of J0(t) from 0 to x -C TY --- Integration of Y0(t) from 0 to x -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(18) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - EPS=1.0D-12 - IF (X.EQ.0.0D0) THEN - TJ=0.0D0 - TY=0.0D0 - ELSE IF (X.LE.20.0D0) THEN - X2=X*X - TJ=X - R=X - DO 10 K=1,60 - R=-.25D0*R*(2*K-1.0D0)/(2*K+1.0D0)/(K*K)*X2 - TJ=TJ+R - IF (DABS(R).LT.DABS(TJ)*EPS) GO TO 15 -10 CONTINUE -15 TY1=(EL+DLOG(X/2.0D0))*TJ - RS=0.0D0 - TY2=1.0D0 - R=1.0D0 - DO 20 K=1,60 - R=-.25D0*R*(2*K-1.0D0)/(2*K+1.0D0)/(K*K)*X2 - RS=RS+1.0D0/K - R2=R*(RS+1.0D0/(2.0D0*K+1.0D0)) - TY2=TY2+R2 - IF (DABS(R2).LT.DABS(TY2)*EPS) GO TO 25 -20 CONTINUE -25 TY=(TY1-X*TY2)*2.0D0/PI - ELSE - A0=1.0D0 - A1=5.0D0/8.0D0 - A(1)=A1 - DO 30 K=1,16 - AF=((1.5D0*(K+.5D0)*(K+5.0D0/6.0D0)*A1-.5D0 - & *(K+.5D0)*(K+.5D0)*(K-.5D0)*A0))/(K+1.0D0) - A(K+1)=AF - A0=A1 -30 A1=AF - BF=1.0D0 - R=1.0D0 - DO 35 K=1,8 - R=-R/(X*X) -35 BF=BF+A(2*K)*R - BG=A(1)/X - R=1.0D0/X - DO 40 K=1,8 - R=-R/(X*X) -40 BG=BG+A(2*K+1)*R - XP=X+.25D0*PI - RC=DSQRT(2.0D0/(PI*X)) - TJ=1.0D0-RC*(BF*DCOS(XP)+BG*DSIN(XP)) - TY=RC*(BG*DCOS(XP)-BF*DSIN(XP)) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE STVLV(V,X,SLV) -C -C ====================================================== -C Purpose: Compute modified Struve function Lv(x) with -C an arbitrary order v -C Input : v --- Order of Lv(x) ( |v| ≤ 20 ) -C x --- Argument of Lv(x) ( x ≥ 0 ) -C Output: SLV --- Lv(x) -C Routine called: GAMMA2 to compute the gamma function -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - IF (X.EQ.0.0D0) THEN - IF (V.GT.-1.0.OR.INT(V)-V.EQ.0.5D0) THEN - SLV=0.0D0 - ELSE IF (V.LT.-1.0D0) THEN - SLV=(-1)**(INT(0.5D0-V)-1)*1.0D+300 - ELSE IF (V.EQ.-1.0D0) THEN - SLV=2.0D0/PI - ENDIF - RETURN - ENDIF - IF (X.LE.40.0D0) THEN - V0=V+1.5D0 - CALL GAMMA2(V0,GA) - S=2.0D0/(DSQRT(PI)*GA) - R1=1.0D0 - DO 10 K=1,100 - VA=K+1.5D0 - CALL GAMMA2(VA,GA) - VB=V+K+1.5D0 - CALL GAMMA2(VB,GB) - R1=R1*(0.5D0*X)**2 - R2=R1/(GA*GB) - S=S+R2 - IF (DABS(R2/S).LT.1.0D-12) GO TO 15 -10 CONTINUE -15 SLV=(0.5D0*X)**(V+1.0D0)*S - ELSE - SA=-1.0D0/PI*(0.5D0*X)**(V-1.0) - V0=V+0.5D0 - CALL GAMMA2(V0,GA) - S=-DSQRT(PI)/GA - R1=-1.0D0 - DO 20 K=1,12 - VA=K+0.5D0 - CALL GAMMA2(VA,GA) - VB=-K+V+0.5D0 - CALL GAMMA2(VB,GB) - R1=-R1/(0.5D0*X)**2 - S=S+R1*GA/GB -20 CONTINUE - S0=SA*S - U=DABS(V) - N=INT(U) - U0=U-N - BIV0=0.0D0 - DO 35 L=0,1 - VT=U0+L - R=1.0D0 - BIV=1.0D0 - DO 25 K=1,16 - R=-0.125*R*(4.0*VT*VT-(2.0*K-1.0D0)**2)/(K*X) - BIV=BIV+R - IF (DABS(R/BIV).LT.1.0D-12) GO TO 30 -25 CONTINUE -30 IF (L.EQ.0) BIV0=BIV -35 CONTINUE - BF=0.0D0 - BF0=BIV0 - BF1=BIV - DO 40 K=2,N - BF=-2.0D0*(K-1.0+U0)/X*BF1+BF0 - BF0=BF1 -40 BF1=BF - IF (N.EQ.0) BIV=BIV0 - IF (N.GT.1) BIV=BF - SLV=DEXP(X)/DSQRT(2.0D0*PI*X)*BIV+S0 - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE RCTY(N,X,NM,RY,DY) -C -C ======================================================== -C Purpose: Compute Riccati-Bessel functions of the second -C kind and their derivatives -C Input: x --- Argument of Riccati-Bessel function -C n --- Order of yn(x) -C Output: RY(n) --- x·yn(x) -C DY(n) --- [x·yn(x)]' -C NM --- Highest order computed -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION RY(0:N),DY(0:N) - NM=N - IF (X.LT.1.0D-60) THEN - DO 10 K=0,N - RY(K)=-1.0D+300 -10 DY(K)=1.0D+300 - RY(0)=-1.0D0 - DY(0)=0.0D0 - RETURN - ENDIF - RY(0)=-DCOS(X) - RY(1)=RY(0)/X-DSIN(X) - RF0=RY(0) - RF1=RY(1) - DO 15 K=2,N - RF2=(2.0D0*K-1.0D0)*RF1/X-RF0 - IF (DABS(RF2).GT.1.0D+300) GO TO 20 - RY(K)=RF2 - RF0=RF1 -15 RF1=RF2 -20 NM=K-1 - DY(0)=DSIN(X) - DO 25 K=1,NM -25 DY(K)=-K*RY(K)/X+RY(K-1) - RETURN - END - -C ********************************** - - SUBROUTINE LPNI(N,X,PN,PD,PL) -C -C ===================================================== -C Purpose: Compute Legendre polynomials Pn(x), Pn'(x) -C and the integral of Pn(t) from 0 to x -C Input : x --- Argument of Pn(x) -C n --- Degree of Pn(x) ( n = 0,1,... ) -C Output: PN(n) --- Pn(x) -C PD(n) --- Pn'(x) -C PL(n) --- Integral of Pn(t) from 0 to x -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (P,R,X) - DIMENSION PN(0:N),PD(0:N),PL(0:N) - PN(0)=1.0D0 - PN(1)=X - PD(0)=0.0D0 - PD(1)=1.0D0 - PL(0)=X - PL(1)=0.5D0*X*X - P0=1.0D0 - P1=X - DO 15 K=2,N - PF=(2.0D0*K-1.0D0)/K*X*P1-(K-1.0D0)/K*P0 - PN(K)=PF - IF (DABS(X).EQ.1.0D0) THEN - PD(K)=0.5D0*X**(K+1)*K*(K+1.0D0) - ELSE - PD(K)=K*(P1-X*PF)/(1.0D0-X*X) - ENDIF - PL(K)=(X*PN(K)-PN(K-1))/(K+1.0D0) - P0=P1 - P1=PF - IF (K.EQ.2*INT(K/2)) GO TO 15 - R=1.0D0/(K+1.0D0) - N1=(K-1)/2 - DO 10 J=1,N1 -10 R=(0.5D0/J-1.0D0)*R - PL(K)=PL(K)+R -15 CONTINUE - RETURN - END - -C ********************************** - - SUBROUTINE KLVNA(X,BER,BEI,GER,GEI,DER,DEI,HER,HEI) -C -C ====================================================== -C Purpose: Compute Kelvin functions ber x, bei x, ker x -C and kei x, and their derivatives ( x > 0 ) -C Input : x --- Argument of Kelvin functions -C Output: BER --- ber x -C BEI --- bei x -C GER --- ker x -C GEI --- kei x -C DER --- ber'x -C DEI --- bei'x -C HER --- ker'x -C HEI --- kei'x -C ================================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - EL=.5772156649015329D0 - EPS=1.0D-15 - IF (X.EQ.0.0D0) THEN - BER=1.0D0 - BEI=0.0D0 - GER=1.0D+300 - GEI=-0.25D0*PI - DER=0.0D0 - DEI=0.0D0 - HER=-1.0D+300 - HEI=0.0D0 - RETURN - ENDIF - X2=0.25D0*X*X - X4=X2*X2 - IF (DABS(X).LT.10.0D0) THEN - BER=1.0D0 - R=1.0D0 - DO 10 M=1,60 - R=-0.25D0*R/(M*M)/(2.0D0*M-1.0D0)**2*X4 - BER=BER+R - IF (DABS(R).LT.DABS(BER)*EPS) GO TO 15 -10 CONTINUE -15 BEI=X2 - R=X2 - DO 20 M=1,60 - R=-0.25D0*R/(M*M)/(2.0D0*M+1.0D0)**2*X4 - BEI=BEI+R - IF (DABS(R).LT.DABS(BEI)*EPS) GO TO 25 -20 CONTINUE -25 GER=-(DLOG(X/2.0D0)+EL)*BER+0.25D0*PI*BEI - R=1.0D0 - GS=0.0D0 - DO 30 M=1,60 - R=-0.25D0*R/(M*M)/(2.0D0*M-1.0D0)**2*X4 - GS=GS+1.0D0/(2.0D0*M-1.0D0)+1.0D0/(2.0D0*M) - GER=GER+R*GS - IF (DABS(R*GS).LT.DABS(GER)*EPS) GO TO 35 -30 CONTINUE -35 GEI=X2-(DLOG(X/2.0D0)+EL)*BEI-0.25D0*PI*BER - R=X2 - GS=1.0D0 - DO 40 M=1,60 - R=-0.25D0*R/(M*M)/(2.0D0*M+1.0D0)**2*X4 - GS=GS+1.0D0/(2.0D0*M)+1.0D0/(2.0D0*M+1.0D0) - GEI=GEI+R*GS - IF (DABS(R*GS).LT.DABS(GEI)*EPS) GO TO 45 -40 CONTINUE -45 DER=-0.25D0*X*X2 - R=DER - DO 50 M=1,60 - R=-0.25D0*R/M/(M+1.0D0)/(2.0D0*M+1.0D0)**2*X4 - DER=DER+R - IF (DABS(R).LT.DABS(DER)*EPS) GO TO 55 -50 CONTINUE -55 DEI=0.5D0*X - R=DEI - DO 60 M=1,60 - R=-0.25D0*R/(M*M)/(2.D0*M-1.D0)/(2.D0*M+1.D0)*X4 - DEI=DEI+R - IF (DABS(R).LT.DABS(DEI)*EPS) GO TO 65 -60 CONTINUE -65 R=-0.25D0*X*X2 - GS=1.5D0 - HER=1.5D0*R-BER/X-(DLOG(X/2.D0)+EL)*DER+0.25*PI*DEI - DO 70 M=1,60 - R=-0.25D0*R/M/(M+1.0D0)/(2.0D0*M+1.0D0)**2*X4 - GS=GS+1.0D0/(2*M+1.0D0)+1.0D0/(2*M+2.0D0) - HER=HER+R*GS - IF (DABS(R*GS).LT.DABS(HER)*EPS) GO TO 75 -70 CONTINUE -75 R=0.5D0*X - GS=1.0D0 - HEI=0.5D0*X-BEI/X-(DLOG(X/2.D0)+EL)*DEI-0.25*PI*DER - DO 80 M=1,60 - R=-0.25D0*R/(M*M)/(2*M-1.0D0)/(2*M+1.0D0)*X4 - GS=GS+1.0D0/(2.0D0*M)+1.0D0/(2*M+1.0D0) - HEI=HEI+R*GS - IF (DABS(R*GS).LT.DABS(HEI)*EPS) RETURN -80 CONTINUE - ELSE - PP0=1.0D0 - PN0=1.0D0 - QP0=0.0D0 - QN0=0.0D0 - R0=1.0D0 - KM=18 - IF (DABS(X).GE.40.0) KM=10 - FAC=1.0D0 - DO 85 K=1,KM - FAC=-FAC - XT=0.25D0*K*PI-INT(0.125D0*K)*2.0D0*PI - CS=COS(XT) - SS=SIN(XT) - R0=0.125D0*R0*(2.0D0*K-1.0D0)**2/K/X - RC=R0*CS - RS=R0*SS - PP0=PP0+RC - PN0=PN0+FAC*RC - QP0=QP0+RS -85 QN0=QN0+FAC*RS - XD=X/DSQRT(2.0D0) - XE1=DEXP(XD) - XE2=DEXP(-XD) - XC1=1.D0/DSQRT(2.0D0*PI*X) - XC2=DSQRT(.5D0*PI/X) - CP0=DCOS(XD+0.125D0*PI) - CN0=DCOS(XD-0.125D0*PI) - SP0=DSIN(XD+0.125D0*PI) - SN0=DSIN(XD-0.125D0*PI) - GER=XC2*XE2*(PN0*CP0-QN0*SP0) - GEI=XC2*XE2*(-PN0*SP0-QN0*CP0) - BER=XC1*XE1*(PP0*CN0+QP0*SN0)-GEI/PI - BEI=XC1*XE1*(PP0*SN0-QP0*CN0)+GER/PI - PP1=1.0D0 - PN1=1.0D0 - QP1=0.0D0 - QN1=0.0D0 - R1=1.0D0 - FAC=1.0D0 - DO 90 K=1,KM - FAC=-FAC - XT=0.25D0*K*PI-INT(0.125D0*K)*2.0D0*PI - CS=DCOS(XT) - SS=DSIN(XT) - R1=0.125D0*R1*(4.D0-(2.0D0*K-1.0D0)**2)/K/X - RC=R1*CS - RS=R1*SS - PP1=PP1+FAC*RC - PN1=PN1+RC - QP1=QP1+FAC*RS - QN1=QN1+RS -90 CONTINUE - HER=XC2*XE2*(-PN1*CN0+QN1*SN0) - HEI=XC2*XE2*(PN1*SN0+QN1*CN0) - DER=XC1*XE1*(PP1*CP0+QP1*SP0)-HEI/PI - DEI=XC1*XE1*(PP1*SP0-QP1*CP0)+HER/PI - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE CHGUBI(A,B,X,HU,ID) -C -C ====================================================== -C Purpose: Compute confluent hypergeometric function -C U(a,b,x) with integer b ( b = ±1,±2,... ) -C Input : a --- Parameter -C b --- Parameter -C x --- Argument -C Output: HU --- U(a,b,x) -C ID --- Estimated number of significant digits -C Routines called: -C (1) GAMMA2 for computing gamma function Г(x) -C (2) PSI_SPEC for computing psi function -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - ID=-100 - EL=0.5772156649015329D0 - N=ABS(B-1) - RN1=1.0D0 - RN=1.0D0 - DO 10 J=1,N - RN=RN*J - IF (J.EQ.N-1) RN1=RN -10 CONTINUE - CALL PSI_SPEC(A,PS) - CALL GAMMA2(A,GA) - IF (B.GT.0.0) THEN - A0=A - A1=A-N - A2=A1 - CALL GAMMA2(A1,GA1) - UA=(-1)**(N-1)/(RN*GA1) - UB=RN1/GA*X**(-N) - ELSE - A0=A+N - A1=A0 - A2=A - CALL GAMMA2(A1,GA1) - UA=(-1)**(N-1)/(RN*GA)*X**N - UB=RN1/GA1 - ENDIF - HM1=1.0D0 - R=1.0D0 - HMAX=0.0D0 - HMIN=1.0D+300 - H0=0D0 - DO 15 K=1,150 - R=R*(A0+K-1.0D0)*X/((N+K)*K) - HM1=HM1+R - HU1=DABS(HM1) - IF (HU1.GT.HMAX) HMAX=HU1 - IF (HU1.LT.HMIN) HMIN=HU1 - IF (DABS(HM1-H0).LT.DABS(HM1)*1.0D-15) GO TO 20 -15 H0=HM1 -20 DA1=LOG10(HMAX) - DA2=0.0D0 - IF (HMIN.NE.0.0) DA2=LOG10(HMIN) - ID=15-ABS(DA1-DA2) - HM1=HM1*DLOG(X) - S0=0.0D0 - DO 25 M=1,N - IF (B.GE.0.0) S0=S0-1.0D0/M -25 IF (B.LT.0.0) S0=S0+(1.0D0-A)/(M*(A+M-1.0D0)) - HM2=PS+2.0D0*EL+S0 - R=1.0D0 - HMAX=0.0D0 - HMIN=1.0D+300 - DO 50 K=1,150 - S1=0.0D0 - S2=0.0D0 - IF (B.GT.0.0) THEN - DO 30 M=1,K -30 S1=S1-(M+2.0D0*A-2.0D0)/(M*(M+A-1.0D0)) - DO 35 M=1,N -35 S2=S2+1.0D0/(K+M) - ELSE - DO 40 M=1,K+N -40 S1=S1+(1.0D0-A)/(M*(M+A-1.0D0)) - DO 45 M=1,K -45 S2=S2+1.0D0/M - ENDIF - HW=2.0D0*EL+PS+S1-S2 - R=R*(A0+K-1.0D0)*X/((N+K)*K) - HM2=HM2+R*HW - HU2=DABS(HM2) - IF (HU2.GT.HMAX) HMAX=HU2 - IF (HU2.LT.HMIN) HMIN=HU2 - IF (DABS((HM2-H0)/HM2).LT.1.0D-15) GO TO 55 -50 H0=HM2 -55 DB1=LOG10(HMAX) - DB2=0.0D0 - IF (HMIN.NE.0.0) DB2=LOG10(HMIN) - ID1=15-ABS(DB1-DB2) - IF (ID1.LT.ID) ID=ID1 - HM3=1.0D0 - IF (N.EQ.0) HM3=0.0D0 - R=1.0D0 - DO 60 K=1,N-1 - R=R*(A2+K-1.0D0)/((K-N)*K)*X -60 HM3=HM3+R - SA=UA*(HM1+HM2) - SB=UB*HM3 - HU=SA+SB - ID2=0.0D0 - IF (SA.NE.0.0) ID1=INT(LOG10(ABS(SA))) - IF (HU.NE.0.0) ID2=INT(LOG10(ABS(HU))) - IF (SA*SB.LT.0.0) ID=ID-ABS(ID1-ID2) - RETURN - END - - - -C ********************************** - - SUBROUTINE CYZO(NT,KF,KC,ZO,ZV) -C -C =========================================================== -C Purpose : Compute the complex zeros of Y0(z), Y1(z) and -C Y1'(z), and their associated values at the zeros -C using the modified Newton's iteration method -C Input: NT --- Total number of zeros/roots -C KF --- Function choice code -C KF=0 for Y0(z) & Y1(z0) -C KF=1 for Y1(z) & Y0(z1) -C KF=2 for Y1'(z) & Y1(z1') -C KC --- Choice code -C KC=0 for complex roots -C KC=1 for real roots -C Output: ZO(L) --- L-th zero of Y0(z) or Y1(z) or Y1'(z) -C ZV(L) --- Value of Y0'(z) or Y1'(z) or Y1(z) -C at the L-th zero -C Routine called: CY01 for computing Y0(z) and Y1(z), and -C their derivatives -C =========================================================== - IMPLICIT DOUBLE PRECISION (H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION ZO(NT),ZV(NT) - X=0.0D0 - Y=0.0D0 - H=0.0D0 - IF (KC.EQ.0) THEN - X=-2.4D0 - Y=0.54D0 - H=3.14D0 - ELSE IF (KC.EQ.1) THEN - X=0.89 - Y=0.0 - H=-3.14 - ENDIF - IF (KF.EQ.1) X=-0.503 - IF (KF.EQ.2) X=0.577 - ZERO=CMPLX(X,Y) - Z=ZERO - W=0.0D0 - DO 35 NR=1,NT - IF (NR.NE.1) Z=ZO(NR-1)-H - IT=0 -15 IT=IT+1 - CALL CY01(KF,Z,ZF,ZD) - ZP=(1.0D0,0.0D0) - DO 20 I=1,NR-1 -20 ZP=ZP*(Z-ZO(I)) - ZFD=ZF/ZP - ZQ=(0.0D0,0.0D0) - DO 30 I=1,NR-1 - ZW=(1.0D0,0.0D0) - DO 25 J=1,NR-1 - IF (J.EQ.I) GO TO 25 - ZW=ZW*(Z-ZO(J)) -25 CONTINUE - ZQ=ZQ+ZW -30 CONTINUE - ZGD=(ZD-ZQ*ZFD)/ZP - Z=Z-ZFD/ZGD - W0=W - W=CDABS(Z) - IF (IT.LE.50.AND.DABS((W-W0)/W).GT.1.0D-12) GO TO 15 - ZO(NR)=Z -35 CONTINUE - DO 40 I=1,NT - Z=ZO(I) - IF (KF.EQ.0.OR.KF.EQ.2) THEN - CALL CY01(1,Z,ZF,ZD) - ZV(I)=ZF - ELSE IF (KF.EQ.1) THEN - CALL CY01(0,Z,ZF,ZD) - ZV(I)=ZF - ENDIF -40 CONTINUE - RETURN - END - - - -C ********************************** - - SUBROUTINE KLVNB(X,BER,BEI,GER,GEI,DER,DEI,HER,HEI) -C -C ====================================================== -C Purpose: Compute Kelvin functions ber x, bei x, ker x -C and kei x, and their derivatives ( x > 0 ) -C Input : x --- Argument of Kelvin functions -C Output: BER --- ber x -C BEI --- bei x -C GER --- ker x -C GEI --- kei x -C DER --- ber'x -C DEI --- bei'x -C HER --- ker'x -C HEI --- kei'x -C ================================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - IF (X.EQ.0.0D0) THEN - BER=1.0D0 - BEI=0.0D0 - GER=1.0D+300 - GEI=-.25D0*PI - DER=0.0D0 - DEI=0.0D0 - HER=-1.0D+300 - HEI=0.0D0 - ELSE IF (X.LT.8.0D0) THEN - T=X/8.0D0 - T2=T*T - U=T2*T2 - BER=((((((-.901D-5*U+.122552D-2)*U-.08349609D0)*U - & +2.64191397D0)*U-32.36345652D0)*U - & +113.77777774D0)*U-64.0D0)*U+1.0D0 - BEI=T*T*((((((.11346D-3*U-.01103667D0)*U - & +.52185615D0)*U-10.56765779D0)*U - & +72.81777742D0)*U-113.77777774D0)*U+16.0D0) - GER=((((((-.2458D-4*U+.309699D-2)*U-.19636347D0) - & *U+5.65539121D0)*U-60.60977451D0)*U+ - & 171.36272133D0)*U-59.05819744D0)*U-.57721566D0 - GER=GER-DLOG(.5D0*X)*BER+.25D0*PI*BEI - GEI=T2*((((((.29532D-3*U-.02695875D0)*U - & +1.17509064D0)*U-21.30060904D0)*U - & +124.2356965D0)*U-142.91827687D0)*U - & +6.76454936D0) - GEI=GEI-DLOG(.5D0*X)*BEI-.25D0*PI*BER - DER=X*T2*((((((-.394D-5*U+.45957D-3)*U - & -.02609253D0)*U+.66047849D0)*U-6.0681481D0)*U - & +14.22222222D0)*U-4.0D0) - DEI=X*((((((.4609D-4*U-.379386D-2)*U+.14677204D0) - & *U-2.31167514D0)*U+11.37777772D0)*U - & -10.66666666D0)*U+.5D0) - HER=X*T2*((((((-.1075D-4*U+.116137D-2)*U - & -.06136358D0)*U+1.4138478D0)*U-11.36433272D0) - & *U+21.42034017D0)*U-3.69113734D0) - HER=HER-DLOG(.5D0*X)*DER-BER/X+.25D0*PI*DEI - HEI=X*((((((.11997D-3*U-.926707D-2)*U - & +.33049424D0)*U-4.65950823D0)*U+19.41182758D0) - & *U-13.39858846D0)*U+.21139217D0) - HEI=HEI-DLOG(.5D0*X)*DEI-BEI/X-.25D0*PI*DER - ELSE - T=8.0D0/X - TNR=0.0D0 - TNI=0.0D0 - DO 10 L=1,2 - V=(-1)**L*T - TPR=((((.6D-6*V-.34D-5)*V-.252D-4)*V-.906D-4) - & *V*V+.0110486D0)*V - TPI=((((.19D-5*V+.51D-5)*V*V-.901D-4)*V - & -.9765D-3)*V-.0110485D0)*V-.3926991D0 - IF (L.EQ.1) THEN - TNR=TPR - TNI=TPI - ENDIF -10 CONTINUE - YD=X/DSQRT(2.0D0) - YE1=DEXP(YD+TPR) - YE2=DEXP(-YD+TNR) - YC1=1.0D0/DSQRT(2.0D0*PI*X) - YC2=DSQRT(PI/(2.0D0*X)) - CSP=DCOS(YD+TPI) - SSP=DSIN(YD+TPI) - CSN=DCOS(-YD+TNI) - SSN=DSIN(-YD+TNI) - GER=YC2*YE2*CSN - GEI=YC2*YE2*SSN - FXR=YC1*YE1*CSP - FXI=YC1*YE1*SSP - BER=FXR-GEI/PI - BEI=FXI+GER/PI - PNR=0.0D0 - PNI=0.0D0 - DO 15 L=1,2 - V=(-1)**L*T - PPR=(((((.16D-5*V+.117D-4)*V+.346D-4)*V+.5D-6) - & *V-.13813D-2)*V-.0625001D0)*V+.7071068D0 - PPI=(((((-.32D-5*V-.24D-5)*V+.338D-4)*V+ - & .2452D-3)*V+.13811D-2)*V-.1D-6)*V+.7071068D0 - IF (L.EQ.1) THEN - PNR=PPR - PNI=PPI - ENDIF -15 CONTINUE - HER=GEI*PNI-GER*PNR - HEI=-(GEI*PNR+GER*PNI) - DER=FXR*PPR-FXI*PPI-HEI/PI - DEI=FXI*PPR+FXR*PPI+HER/PI - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE RMN2SO(M,N,C,X,CV,DF,KD,R2F,R2D) -C -C ============================================================= -C Purpose: Compute oblate radial functions of the second kind -C with a small argument, Rmn(-ic,ix) & Rmn'(-ic,ix) -C Routines called: -C (1) SCKB for computing the expansion coefficients c2k -C (2) KMN for computing the joining factors -C (3) QSTAR for computing the factor defined in (15.7.3) -C (4) CBK for computing the the expansion coefficient -C defined in (15.7.6) -C (5) GMN for computing the function defined in (15.7.4) -C (6) RMN1 for computing the radial function of the first -C kind -C ============================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BK(200),CK(200),DF(200),DN(200) - IF (DABS(DF(1)).LE.1.0D-280) THEN - R2F=1.0D+300 - R2D=1.0D+300 - RETURN - ENDIF - EPS=1.0D-14 - PI=3.141592653589793D0 - NM=25+INT((N-M)/2+C) - IP=1 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - CALL SCKB(M,N,C,DF,CK) - CALL KMN(M,N,C,CV,KD,DF,DN,CK1,CK2) - CALL QSTAR(M,N,C,CK,CK1,QS,QT) - CALL CBK(M,N,C,CV,QT,CK,BK) - IF (X.EQ.0.0D0) THEN - SUM=0.0D0 - SW=0.0D0 - DO 10 J=1,NM - SUM=SUM+CK(J) - IF (DABS(SUM-SW).LT.DABS(SUM)*EPS) GO TO 15 -10 SW=SUM -15 IF (IP.EQ.0) THEN - R1F=SUM/CK1 - R2F=-0.5D0*PI*QS*R1F - R2D=QS*R1F+BK(1) - ELSE IF (IP.EQ.1) THEN - R1D=SUM/CK1 - R2F=BK(1) - R2D=-0.5D0*PI*QS*R1D - ENDIF - RETURN - ELSE - CALL GMN(M,N,C,X,BK,GF,GD) - CALL RMN1(M,N,C,X,DF,KD,R1F,R1D) - H0=DATAN(X)-0.5D0*PI - R2F=QS*R1F*H0+GF - R2D=QS*(R1D*H0+R1F/(1.0D0+X*X))+GD - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE CSPHIK(N,Z,NM,CSI,CDI,CSK,CDK) -C -C ======================================================= -C Purpose: Compute modified spherical Bessel functions -C and their derivatives for a complex argument -C Input : z --- Complex argument -C n --- Order of in(z) & kn(z) ( n = 0,1,2,... ) -C Output: CSI(n) --- in(z) -C CDI(n) --- in'(z) -C CSK(n) --- kn(z) -C CDK(n) --- kn'(z) -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C ======================================================= -C - IMPLICIT COMPLEX*16 (C,Z) - DOUBLE PRECISION A0,PI - DIMENSION CSI(0:N),CDI(0:N),CSK(0:N),CDK(0:N) - PI=3.141592653589793D0 - A0=CDABS(Z) - NM=N - IF (A0.LT.1.0D-60) THEN - DO 10 K=0,N - CSI(K)=0.0D0 - CDI(K)=0.0D0 - CSK(K)=1.0D+300 -10 CDK(K)=-1.0D+300 - CSI(0)=1.0D0 - CDI(1)=0.3333333333333333D0 - RETURN - ENDIF - CI=CMPLX(0.0D0,1.0D0) - CSINH=CDSIN(CI*Z)/CI - CCOSH=CDCOS(CI*Z) - CSI0=CSINH/Z - CSI1=(-CSINH/Z+CCOSH)/Z - CSI(0)=CSI0 - CSI(1)=CSI1 - IF (N.GE.2) THEN - M=MSTA1(A0,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(A0,N,15) - ENDIF - CF0=0.0D0 - CF1=1.0D0-100 - DO 15 K=M,0,-1 - CF=(2.0D0*K+3.0D0)*CF1/Z+CF0 - IF (K.LE.NM) CSI(K)=CF - CF0=CF1 -15 CF1=CF - IF (CDABS(CSI0).GT.CDABS(CSI1)) CS=CSI0/CF - IF (CDABS(CSI0).LE.CDABS(CSI1)) CS=CSI1/CF0 - DO 20 K=0,NM -20 CSI(K)=CS*CSI(K) - ENDIF - CDI(0)=CSI(1) - DO 25 K=1,NM -25 CDI(K)=CSI(K-1)-(K+1.0D0)*CSI(K)/Z - CSK(0)=0.5D0*PI/Z*CDEXP(-Z) - CSK(1)=CSK(0)*(1.0D0+1.0D0/Z) - DO 30 K=2,NM - IF (CDABS(CSI(K-1)).GT.CDABS(CSI(K-2))) THEN - CSK(K)=(0.5D0*PI/(Z*Z)-CSI(K)*CSK(K-1))/CSI(K-1) - ELSE - CSK(K)=(CSI(K)*CSK(K-2)+(K-0.5D0)*PI/Z**3)/CSI(K-2) - ENDIF -30 CONTINUE - CDK(0)=-CSK(1) - DO 35 K=1,NM -35 CDK(K)=-CSK(K-1)-(K+1.0D0)*CSK(K)/Z - RETURN - END - - - -C ********************************** - - SUBROUTINE BJNDD(N,X,BJ,DJ,FJ) -C -C ===================================================== -C Purpose: Compute Bessel functions Jn(x) and their -C first and second derivatives ( n= 0,1,… ) -C Input: x --- Argument of Jn(x) ( x ≥ 0 ) -C n --- Order of Jn(x) -C Output: BJ(n+1) --- Jn(x) -C DJ(n+1) --- Jn'(x) -C FJ(n+1) --- Jn"(x) -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BJ(101),DJ(101),FJ(101) - DO 10 NT=1,900 - MT=INT(0.5*LOG10(6.28*NT)-NT*LOG10(1.36*DABS(X)/NT)) - IF (MT.GT.20) GO TO 15 -10 CONTINUE -15 M=NT - BS=0.0D0 - F=0.0D0 - F0=0.0D0 - F1=1.0D-35 - DO 20 K=M,0,-1 - F=2.0D0*(K+1.0D0)*F1/X-F0 - IF (K.LE.N) BJ(K+1)=F - IF (K.EQ.2*INT(K/2)) BS=BS+2.0D0*F - F0=F1 -20 F1=F - DO 25 K=0,N -25 BJ(K+1)=BJ(K+1)/(BS-F) - DJ(1)=-BJ(2) - FJ(1)=-1.0D0*BJ(1)-DJ(1)/X - DO 30 K=1,N - DJ(K+1)=BJ(K)-K*BJ(K+1)/X -30 FJ(K+1)=(K*K/(X*X)-1.0D0)*BJ(K+1)-DJ(K+1)/X - RETURN - END - -C ********************************** - - - SUBROUTINE SPHJ(N,X,NM,SJ,DJ) -C MODIFIED to ALLOW N=0 CASE (ALSO IN CSPHJY, SPHY) -C -C ======================================================= -C Purpose: Compute spherical Bessel functions jn(x) and -C their derivatives -C Input : x --- Argument of jn(x) -C n --- Order of jn(x) ( n = 0,1,… ) -C Output: SJ(n) --- jn(x) -C DJ(n) --- jn'(x) -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION SJ(0:N),DJ(0:N) - NM=N - IF (DABS(X).LT.1.0D-100) THEN - DO 10 K=0,N - SJ(K)=0.0D0 -10 DJ(K)=0.0D0 - SJ(0)=1.0D0 - IF (N.GT.0) THEN - DJ(1)=.3333333333333333D0 - ENDIF - RETURN - ENDIF - SJ(0)=DSIN(X)/X - DJ(0)=(DCOS(X)-DSIN(X)/X)/X - IF (N.LT.1) THEN - RETURN - ENDIF - SJ(1)=(SJ(0)-DCOS(X))/X - IF (N.GE.2) THEN - SA=SJ(0) - SB=SJ(1) - M=MSTA1(X,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(X,N,15) - ENDIF - F=0.0D0 - F0=0.0D0 - F1=1.0D0-100 - DO 15 K=M,0,-1 - F=(2.0D0*K+3.0D0)*F1/X-F0 - IF (K.LE.NM) SJ(K)=F - F0=F1 -15 F1=F - CS=0.0D0 - IF (DABS(SA).GT.DABS(SB)) CS=SA/F - IF (DABS(SA).LE.DABS(SB)) CS=SB/F0 - DO 20 K=0,NM -20 SJ(K)=CS*SJ(K) - ENDIF - DO 25 K=1,NM -25 DJ(K)=SJ(K-1)-(K+1.0D0)*SJ(K)/X - RETURN - END - - - -C ********************************** - - SUBROUTINE OTHPL(KF,N,X,PL,DPL) -C -C ========================================================== -C Purpose: Compute orthogonal polynomials: Tn(x) or Un(x), -C or Ln(x) or Hn(x), and their derivatives -C Input : KF --- Function code -C KF=1 for Chebyshev polynomial Tn(x) -C KF=2 for Chebyshev polynomial Un(x) -C KF=3 for Laguerre polynomial Ln(x) -C KF=4 for Hermite polynomial Hn(x) -C n --- Order of orthogonal polynomials -C x --- Argument of orthogonal polynomials -C Output: PL(n) --- Tn(x) or Un(x) or Ln(x) or Hn(x) -C DPL(n)--- Tn'(x) or Un'(x) or Ln'(x) or Hn'(x) -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION PL(0:N),DPL(0:N) - A=2.0D0 - B=0.0D0 - C=1.0D0 - Y0=1.0D0 - Y1=2.0D0*X - DY0=0.0D0 - DY1=2.0D0 - PL(0)=1.0D0 - PL(1)=2.0D0*X - DPL(0)=0.0D0 - DPL(1)=2.0D0 - IF (KF.EQ.1) THEN - Y1=X - DY1=1.0D0 - PL(1)=X - DPL(1)=1.0D0 - ELSE IF (KF.EQ.3) THEN - Y1=1.0D0-X - DY1=-1.0D0 - PL(1)=1.0D0-X - DPL(1)=-1.0D0 - ENDIF - DO 10 K=2,N - IF (KF.EQ.3) THEN - A=-1.0D0/K - B=2.0D0+A - C=1.0D0+A - ELSE IF (KF.EQ.4) THEN - C=2.0D0*(K-1.0D0) - ENDIF - YN=(A*X+B)*Y1-C*Y0 - DYN=A*Y1+(A*X+B)*DY1-C*DY0 - PL(K)=YN - DPL(K)=DYN - Y0=Y1 - Y1=YN - DY0=DY1 -10 DY1=DYN - RETURN - END - -C ********************************** - - SUBROUTINE KLVNZO(NT,KD,ZO) -C -C ==================================================== -C Purpose: Compute the zeros of Kelvin functions -C Input : NT --- Total number of zeros -C KD --- Function code -C KD=1 to 8 for ber x, bei x, ker x, kei x, -C ber'x, bei'x, ker'x and kei'x, -C respectively. -C Output: ZO(M) --- the M-th zero of Kelvin function -C for code KD -C Routine called: -C KLVNA for computing Kelvin functions and -C their derivatives -C ==================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION ZO(NT),RT0(8) - RT0(1)=2.84891 - RT0(2)=5.02622 - RT0(3)=1.71854 - RT0(4)=3.91467 - RT0(5)=6.03871 - RT0(6)=3.77268 - RT0(7)=2.66584 - RT0(8)=4.93181 - RT=RT0(KD) - DO 15 M=1,NT -10 CALL KLVNA(RT,BER,BEI,GER,GEI,DER,DEI,HER,HEI) - IF (KD.EQ.1) THEN - RT=RT-BER/DER - ELSE IF (KD.EQ.2) THEN - RT=RT-BEI/DEI - ELSE IF (KD.EQ.3) THEN - RT=RT-GER/HER - ELSE IF (KD.EQ.4) THEN - RT=RT-GEI/HEI - ELSE IF (KD.EQ.5) THEN - DDR=-BEI-DER/RT - RT=RT-DER/DDR - ELSE IF (KD.EQ.6) THEN - DDI=BER-DEI/RT - RT=RT-DEI/DDI - ELSE IF (KD.EQ.7) THEN - GDR=-GEI-HER/RT - RT=RT-HER/GDR - ELSE - GDI=GER-HEI/RT - RT=RT-HEI/GDI - ENDIF - IF (DABS(RT-RT0(KD)).GT.5.0D-10) THEN - RT0(KD)=RT - GO TO 10 - ENDIF - ZO(M)=RT -15 RT=RT+4.44D0 - RETURN - END - - - -C ********************************** - - SUBROUTINE RSWFO(M,N,C,X,CV,KF,R1F,R1D,R2F,R2D) -C -C ========================================================== -C Purpose: Compute oblate radial functions of the first -C and second kinds, and their derivatives -C Input : m --- Mode parameter, m = 0,1,2,... -C n --- Mode parameter, n = m,m+1,m+2,... -C c --- Spheroidal parameter -C x --- Argument (x ≥ 0) -C cv --- Characteristic value -C KF --- Function code -C KF=1 for the first kind -C KF=2 for the second kind -C KF=3 for both the first and second kinds -C Output: R1F --- Radial function of the first kind -C R1D --- Derivative of the radial function of -C the first kind -C R2F --- Radial function of the second kind -C R2D --- Derivative of the radial function of -C the second kind -C Routines called: -C (1) SDMN for computing expansion coefficients dk -C (2) RMN1 for computing prolate or oblate radial -C function of the first kind -C (3) RMN2L for computing prolate or oblate radial -C function of the second kind for a large argument -C (4) RMN2SO for computing oblate radial functions of -C the second kind for a small argument -C ========================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION DF(200) - KD=-1 - CALL SDMN(M,N,C,CV,KD,DF) - IF (KF.NE.2) THEN - CALL RMN1(M,N,C,X,DF,KD,R1F,R1D) - ENDIF - IF (KF.GT.1) THEN - ID=10 - IF (X.GT.1.0D-8) THEN - CALL RMN2L(M,N,C,X,DF,KD,R2F,R2D,ID) - ENDIF - IF (ID.GT.-1) THEN - CALL RMN2SO(M,N,C,X,CV,DF,KD,R2F,R2D) - ENDIF - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE CH12N(N,Z,NM,CHF1,CHD1,CHF2,CHD2) -C -C ==================================================== -C Purpose: Compute Hankel functions of the first and -C second kinds and their derivatives for a -C complex argument -C Input : z --- Complex argument -C n --- Order of Hn(1)(z) and Hn(2)(z) -C Output: CHF1(n) --- Hn(1)(z) -C CHD1(n) --- Hn(1)'(z) -C CHF2(n) --- Hn(2)(z) -C CHD2(n) --- Hn(2)'(z) -C NM --- Highest order computed -C Routines called: -C (1) CJYNB for computing Jn(z) and Yn(z) -C (2) CIKNB for computing In(z) and Kn(z) -C ==================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBJ(0:250),CDJ(0:250),CBY(0:250),CDY(0:250), - & CBI(0:250),CDI(0:250),CBK(0:250),CDK(0:250) - DIMENSION CHF1(0:N),CHD1(0:N),CHF2(0:N),CHD2(0:N) - CI=(0.0D0,1.0D0) - PI=3.141592653589793D0 - IF (DIMAG(Z).LT.0.0D0) THEN - CALL CJYNB(N,Z,NM,CBJ,CDJ,CBY,CDY) - DO 10 K=0,NM - CHF1(K)=CBJ(K)+CI*CBY(K) -10 CHD1(K)=CDJ(K)+CI*CDY(K) - ZI=CI*Z - CALL CIKNB(N,ZI,NM,CBI,CDI,CBK,CDK) - CFAC=-2.0D0/(PI*CI) - DO 15 K=0,NM - CHF2(K)=CFAC*CBK(K) - CHD2(K)=CFAC*CI*CDK(K) -15 CFAC=CFAC*CI - ELSE IF (DIMAG(Z).GT.0.0D0) THEN - ZI=-CI*Z - CALL CIKNB(N,ZI,NM,CBI,CDI,CBK,CDK) - CF1=-CI - CFAC=2.0D0/(PI*CI) - DO 20 K=0,NM - CHF1(K)=CFAC*CBK(K) - CHD1(K)=-CFAC*CI*CDK(K) -20 CFAC=CFAC*CF1 - CALL CJYNB(N,Z,NM,CBJ,CDJ,CBY,CDY) - DO 25 K=0,NM - CHF2(K)=CBJ(K)-CI*CBY(K) -25 CHD2(K)=CDJ(K)-CI*CDY(K) - ELSE - CALL CJYNB(N,Z,NM,CBJ,CDJ,CBY,CDY) - DO 30 K=0,NM - CHF1(K)=CBJ(K)+CI*CBY(K) - CHD1(K)=CDJ(K)+CI*CDY(K) - CHF2(K)=CBJ(K)-CI*CBY(K) -30 CHD2(K)=CDJ(K)-CI*CDY(K) - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE JYZO(N,NT,RJ0,RJ1,RY0,RY1) -C -C ====================================================== -C Purpose: Compute the zeros of Bessel functions Jn(x), -C Yn(x), and their derivatives -C Input : n --- Order of Bessel functions (n >= 0) -C NT --- Number of zeros (roots) -C Output: RJ0(L) --- L-th zero of Jn(x), L=1,2,...,NT -C RJ1(L) --- L-th zero of Jn'(x), L=1,2,...,NT -C RY0(L) --- L-th zero of Yn(x), L=1,2,...,NT -C RY1(L) --- L-th zero of Yn'(x), L=1,2,...,NT -C Routine called: JYNDD for computing Jn(x), Yn(x), and -C their first and second derivatives -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION RJ0(NT),RJ1(NT),RY0(NT),RY1(NT) - PI=3.141592653589793D0 -C -- Newton method for j_{N,L} -C 1) initial guess for j_{N,1} - IF (N.LE.20) THEN - X=2.82141+1.15859*N - ELSE -C Abr & Stg (9.5.14) - X=N+1.85576*N**0.33333+1.03315/N**0.33333 - ENDIF - L=0 -C 2) iterate - XGUESS=X -10 X0=X - CALL JYNDD(N,X,BJN,DJN,FJN,BYN,DYN,FYN) - X=X-BJN/DJN - IF (X-X0.LT.-1) X=X0-1 - IF (X-X0.GT.1) X=X0+1 - IF (DABS(X-X0).GT.1.0D-11) GO TO 10 -C 3) initial guess for j_{N,L+1} - IF (L.GE.1 .AND. X.LE.RJ0(L)+0.5) THEN - X=XGUESS+PI - XGUESS=X - GO TO 10 - END IF - L=L+1 - RJ0(L)=X -C XXX: should have a better initial guess for large N ~> 100 here - X=X+PI+MAX((0.0972d0+0.0679*N-0.000354*N**2)/L, 0d0) - IF (L.LT.NT) GO TO 10 -C -- Newton method for j_{N,L}' - IF (N.LE.20) THEN - X=0.961587+1.07703*N - ELSE - X=N+0.80861*N**0.33333+0.07249/N**0.33333 - ENDIF - IF (N.EQ.0) X=3.8317 - L=0 - XGUESS=X -15 X0=X - CALL JYNDD(N,X,BJN,DJN,FJN,BYN,DYN,FYN) - X=X-DJN/FJN - IF (X-X0.LT.-1) X=X0-1 - IF (X-X0.GT.1) X=X0+1 - IF (DABS(X-X0).GT.1.0D-11) GO TO 15 - IF (L.GE.1 .AND. X.LE.RJ1(L)+0.5) THEN - X=XGUESS+PI - XGUESS=X - GO TO 15 - END IF - L=L+1 - RJ1(L)=X -C XXX: should have a better initial guess for large N ~> 100 here - X=X+PI+MAX((0.4955d0+0.0915*N-0.000435*N**2)/L, 0d0) - IF (L.LT.NT) GO TO 15 -C -- Newton method for y_{N,L} - IF (N.LE.20) THEN - X=1.19477+1.08933*N - ELSE - X=N+0.93158*N**0.33333+0.26035/N**0.33333 - ENDIF - L=0 - XGUESS=X -20 X0=X - CALL JYNDD(N,X,BJN,DJN,FJN,BYN,DYN,FYN) - X=X-BYN/DYN - IF (X-X0.LT.-1) X=X0-1 - IF (X-X0.GT.1) X=X0+1 - IF (DABS(X-X0).GT.1.0D-11) GO TO 20 - IF (L.GE.1 .AND. X.LE.RY0(L)+0.5) THEN - X=XGUESS+PI - XGUESS=X - GO TO 20 - END IF - L=L+1 - RY0(L)=X -C XXX: should have a better initial guess for large N ~> 100 here - X=X+PI+MAX((0.312d0+0.0852*N-0.000403*N**2)/L,0d0) - IF (L.LT.NT) GO TO 20 -C -- Newton method for y_{N,L}' - IF (N.LE.20) THEN - X=2.67257+1.16099*N - ELSE - X=N+1.8211*N**0.33333+0.94001/N**0.33333 - ENDIF - L=0 - XGUESS=X -25 X0=X - CALL JYNDD(N,X,BJN,DJN,FJN,BYN,DYN,FYN) - X=X-DYN/FYN - IF (DABS(X-X0).GT.1.0D-11) GO TO 25 - IF (L.GE.1 .AND. X.LE.RY1(L)+0.5) THEN - X=XGUESS+PI - XGUESS=X - GO TO 25 - END IF - L=L+1 - RY1(L)=X -C XXX: should have a better initial guess for large N ~> 100 here - X=X+PI+MAX((0.197d0+0.0643*N-0.000286*N**2)/L,0d0) - IF (L.LT.NT) GO TO 25 - RETURN - END - - - -C ********************************** - - SUBROUTINE IKV(V,X,VM,BI,DI,BK,DK) -C -C ======================================================= -C Purpose: Compute modified Bessel functions Iv(x) and -C Kv(x), and their derivatives -C Input : x --- Argument ( x ≥ 0 ) -C v --- Order of Iv(x) and Kv(x) -C ( v = n+v0, n = 0,1,2,..., 0 ≤ v0 < 1 ) -C Output: BI(n) --- In+v0(x) -C DI(n) --- In+v0'(x) -C BK(n) --- Kn+v0(x) -C DK(n) --- Kn+v0'(x) -C VM --- Highest order computed -C Routines called: -C (1) GAMMA2 for computing the gamma function -C (2) MSTA1 and MSTA2 to compute the starting -C point for backward recurrence -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION BI(0:*),DI(0:*),BK(0:*),DK(0:*) - PI=3.141592653589793D0 - X2=X*X - N=INT(V) - V0=V-N - IF (N.EQ.0) N=1 - IF (X.LT.1.0D-100) THEN - DO 10 K=0,N - BI(K)=0.0D0 - DI(K)=0.0D0 - BK(K)=-1.0D+300 -10 DK(K)=1.0D+300 - IF (V.EQ.0.0) THEN - BI(0)=1.0D0 - DI(1)=0.5D0 - ENDIF - VM=V - RETURN - ENDIF - PIV=PI*V0 - VT=4.0D0*V0*V0 - IF (V0.EQ.0.0D0) THEN - A1=1.0D0 - ELSE - V0P=1.0D0+V0 - CALL GAMMA2(V0P,GAP) - A1=(0.5D0*X)**V0/GAP - ENDIF - K0=14 - IF (X.GE.35.0) K0=10 - IF (X.GE.50.0) K0=8 - IF (X.LE.18.0) THEN - BI0=1.0D0 - R=1.0D0 - DO 15 K=1,30 - R=0.25D0*R*X2/(K*(K+V0)) - BI0=BI0+R - IF (DABS(R/BI0).LT.1.0D-15) GO TO 20 -15 CONTINUE -20 BI0=BI0*A1 - ELSE - CA=DEXP(X)/DSQRT(2.0D0*PI*X) - SUM=1.0D0 - R=1.0D0 - DO 25 K=1,K0 - R=-0.125D0*R*(VT-(2.0D0*K-1.0D0)**2.0)/(K*X) -25 SUM=SUM+R - BI0=CA*SUM - ENDIF - M=MSTA1(X,200) - IF (M.LT.N) THEN - N=M - ELSE - M=MSTA2(X,N,15) - ENDIF - F=0.0D0 - F2=0.0D0 - F1=1.0D-100 - WW=0.0D0 - DO 30 K=M,0,-1 - F=2.0D0*(V0+K+1.0D0)/X*F1+F2 - IF (K.LE.N) BI(K)=F - F2=F1 -30 F1=F - CS=BI0/F - DO 35 K=0,N -35 BI(K)=CS*BI(K) - DI(0)=V0/X*BI(0)+BI(1) - DO 40 K=1,N -40 DI(K)=-(K+V0)/X*BI(K)+BI(K-1) - IF (X.LE.9.0D0) THEN - IF (V0.EQ.0.0D0) THEN - CT=-DLOG(0.5D0*X)-0.5772156649015329D0 - CS=0.0D0 - W0=0.0D0 - R=1.0D0 - DO 45 K=1,50 - W0=W0+1.0D0/K - R=0.25D0*R/(K*K)*X2 - CS=CS+R*(W0+CT) - WA=DABS(CS) - IF (DABS((WA-WW)/WA).LT.1.0D-15) GO TO 50 -45 WW=WA -50 BK0=CT+CS - ELSE - V0N=1.0D0-V0 - CALL GAMMA2(V0N,GAN) - A2=1.0D0/(GAN*(0.5D0*X)**V0) - A1=(0.5D0*X)**V0/GAP - SUM=A2-A1 - R1=1.0D0 - R2=1.0D0 - DO 55 K=1,120 - R1=0.25D0*R1*X2/(K*(K-V0)) - R2=0.25D0*R2*X2/(K*(K+V0)) - SUM=SUM+A2*R1-A1*R2 - WA=DABS(SUM) - IF (DABS((WA-WW)/WA).LT.1.0D-15) GO TO 60 -55 WW=WA -60 BK0=0.5D0*PI*SUM/DSIN(PIV) - ENDIF - ELSE - CB=DEXP(-X)*DSQRT(0.5D0*PI/X) - SUM=1.0D0 - R=1.0D0 - DO 65 K=1,K0 - R=0.125D0*R*(VT-(2.0*K-1.0)**2.0)/(K*X) -65 SUM=SUM+R - BK0=CB*SUM - ENDIF - BK1=(1.0D0/X-BI(1)*BK0)/BI(0) - BK(0)=BK0 - BK(1)=BK1 - DO 70 K=2,N - BK2=2.0D0*(V0+K-1.0D0)/X*BK1+BK0 - BK(K)=BK2 - BK0=BK1 -70 BK1=BK2 - DK(0)=V0/X*BK(0)-BK(1) - DO 80 K=1,N -80 DK(K)=-(K+V0)/X*BK(K)-BK(K-1) - VM=N+V0 - RETURN - END - - - -C ********************************** - - SUBROUTINE SDMN(M,N,C,CV,KD,DF) -C -C ===================================================== -C Purpose: Compute the expansion coefficients of the -C prolate and oblate spheroidal functions, dk -C Input : m --- Mode parameter -C n --- Mode parameter -C c --- Spheroidal parameter -C cv --- Characteristic value -C KD --- Function code -C KD=1 for prolate; KD=-1 for oblate -C Output: DF(k) --- Expansion coefficients dk; -C DF(1), DF(2), ... correspond to -C d0, d2, ... for even n-m and d1, -C d3, ... for odd n-m -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(200),D(200),G(200),DF(200) - NM=25+INT(0.5*(N-M)+C) - IF (C.LT.1.0D-10) THEN - DO 5 I=1,NM -5 DF(I)=0D0 - DF((N-M)/2+1)=1.0D0 - RETURN - ENDIF - CS=C*C*KD - IP=1 - K=0 - IF (N-M.EQ.2*INT((N-M)/2)) IP=0 - DO 10 I=1,NM+2 - IF (IP.EQ.0) K=2*(I-1) - IF (IP.EQ.1) K=2*I-1 - DK0=M+K - DK1=M+K+1 - DK2=2*(M+K) - D2K=2*M+K - A(I)=(D2K+2.0)*(D2K+1.0)/((DK2+3.0)*(DK2+5.0))*CS - D(I)=DK0*DK1+(2.0*DK0*DK1-2.0*M*M-1.0)/((DK2-1.0) - & *(DK2+3.0))*CS - G(I)=K*(K-1.0)/((DK2-3.0)*(DK2-1.0))*CS -10 CONTINUE - FS=1.0D0 - F1=0.0D0 - F0=1.0D-100 - KB=0 - DF(NM+1)=0.0D0 - FL=0.0D0 - DO 30 K=NM,1,-1 - F=-((D(K+1)-CV)*F0+A(K+1)*F1)/G(K+1) - IF (DABS(F).GT.DABS(DF(K+1))) THEN - DF(K)=F - F1=F0 - F0=F - IF (DABS(F).GT.1.0D+100) THEN - DO 12 K1=K,NM -12 DF(K1)=DF(K1)*1.0D-100 - F1=F1*1.0D-100 - F0=F0*1.0D-100 - ENDIF - ELSE - KB=K - FL=DF(K+1) - F1=1.0D-100 - F2=-(D(1)-CV)/A(1)*F1 - DF(1)=F1 - IF (KB.EQ.1) THEN - FS=F2 - ELSE IF (KB.EQ.2) THEN - DF(2)=F2 - FS=-((D(2)-CV)*F2+G(2)*F1)/A(2) - ELSE - DF(2)=F2 - DO 20 J=3,KB+1 - F=-((D(J-1)-CV)*F2+G(J-1)*F1)/A(J-1) - IF (J.LE.KB) DF(J)=F - IF (DABS(F).GT.1.0D+100) THEN - DO 15 K1=1,J -15 DF(K1)=DF(K1)*1.0D-100 - F=F*1.0D-100 - F2=F2*1.0D-100 - ENDIF - F1=F2 -20 F2=F - FS=F - ENDIF - GO TO 35 - ENDIF -30 CONTINUE -35 SU1=0.0D0 - R1=1.0D0 - DO 40 J=M+IP+1,2*(M+IP) -40 R1=R1*J - SU1=DF(1)*R1 - DO 45 K=2,KB - R1=-R1*(K+M+IP-1.5D0)/(K-1.0D0) -45 SU1=SU1+R1*DF(K) - SU2=0.0D0 - SW=0.0D0 - DO 50 K=KB+1,NM - IF (K.NE.1) R1=-R1*(K+M+IP-1.5D0)/(K-1.0D0) - SU2=SU2+R1*DF(K) - IF (DABS(SW-SU2).LT.DABS(SU2)*1.0D-14) GOTO 55 -50 SW=SU2 -55 R3=1.0D0 - DO 60 J=1,(M+N+IP)/2 -60 R3=R3*(J+0.5D0*(N+M+IP)) - R4=1.0D0 - DO 65 J=1,(N-M-IP)/2 -65 R4=-4.0D0*R4*J - S0=R3/(FL*(SU1/FS)+SU2)/R4 - DO 70 K=1,KB -70 DF(K)=FL/FS*S0*DF(K) - DO 75 K=KB+1,NM -75 DF(K)=S0*DF(K) - RETURN - END - - - - -C ********************************** - - SUBROUTINE AJYIK(X,VJ1,VJ2,VY1,VY2,VI1,VI2,VK1,VK2) -C -C ======================================================= -C Purpose: Compute Bessel functions Jv(x) and Yv(x), -C and modified Bessel functions Iv(x) and -C Kv(x), and their derivatives with v=1/3,2/3 -C Input : x --- Argument of Jv(x),Yv(x),Iv(x) and -C Kv(x) ( x ≥ 0 ) -C Output: VJ1 --- J1/3(x) -C VJ2 --- J2/3(x) -C VY1 --- Y1/3(x) -C VY2 --- Y2/3(x) -C VI1 --- I1/3(x) -C VI2 --- I2/3(x) -C VK1 --- K1/3(x) -C VK2 --- K2/3(x) -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - IF (X.EQ.0.0D0) THEN - VJ1=0.0D0 - VJ2=0.0D0 - VY1=-1.0D+300 - VY2=1.0D+300 - VI1=0.0D0 - VI2=0.0D0 - VK1=-1.0D+300 - VK2=-1.0D+300 - RETURN - ENDIF - PI=3.141592653589793D0 - RP2=.63661977236758D0 - GP1=.892979511569249D0 - GP2=.902745292950934D0 - GN1=1.3541179394264D0 - GN2=2.678938534707747D0 - VV0=0.444444444444444D0 - UU0=1.1547005383793D0 - X2=X*X - K0=12 - IF (X.GE.35.0) K0=10 - IF (X.GE.50.0) K0=8 - IF (X.LE.12.0) THEN - DO 25 L=1,2 - VL=L/3.0D0 - VJL=1.0D0 - R=1.0D0 - DO 15 K=1,40 - R=-0.25D0*R*X2/(K*(K+VL)) - VJL=VJL+R - IF (DABS(R).LT.1.0D-15) GO TO 20 -15 CONTINUE -20 A0=(0.5D0*X)**VL - IF (L.EQ.1) VJ1=A0/GP1*VJL - IF (L.EQ.2) VJ2=A0/GP2*VJL -25 CONTINUE - ELSE - DO 40 L=1,2 - VV=VV0*L*L - PX=1.0D0 - RP=1.0D0 - DO 30 K=1,K0 - RP=-0.78125D-2*RP*(VV-(4.0*K-3.0)**2.0)*(VV- - & (4.0*K-1.0)**2.0)/(K*(2.0*K-1.0)*X2) -30 PX=PX+RP - QX=1.0D0 - RQ=1.0D0 - DO 35 K=1,K0 - RQ=-0.78125D-2*RQ*(VV-(4.0*K-1.0)**2.0)*(VV- - & (4.0*K+1.0)**2.0)/(K*(2.0*K+1.0)*X2) -35 QX=QX+RQ - QX=0.125D0*(VV-1.0)*QX/X - XK=X-(0.5D0*L/3.0D0+0.25D0)*PI - A0=DSQRT(RP2/X) - CK=DCOS(XK) - SK=DSIN(XK) - IF (L.EQ.1) THEN - VJ1=A0*(PX*CK-QX*SK) - VY1=A0*(PX*SK+QX*CK) - ELSE IF (L.EQ.2) THEN - VJ2=A0*(PX*CK-QX*SK) - VY2=A0*(PX*SK+QX*CK) - ENDIF -40 CONTINUE - ENDIF - IF (X.LE.12.0D0) THEN - UJ1=0.0D0 - UJ2=0.0D0 - DO 55 L=1,2 - VL=L/3.0D0 - VJL=1.0D0 - R=1.0D0 - DO 45 K=1,40 - R=-0.25D0*R*X2/(K*(K-VL)) - VJL=VJL+R - IF (DABS(R).LT.1.0D-15) GO TO 50 -45 CONTINUE -50 B0=(2.0D0/X)**VL - IF (L.EQ.1) UJ1=B0*VJL/GN1 - IF (L.EQ.2) UJ2=B0*VJL/GN2 -55 CONTINUE - PV1=PI/3.0D0 - PV2=PI/1.5D0 - VY1=UU0*(VJ1*DCOS(PV1)-UJ1) - VY2=UU0*(VJ2*DCOS(PV2)-UJ2) - ENDIF - IF (X.LE.18.0) THEN - DO 70 L=1,2 - VL=L/3.0D0 - VIL=1.0D0 - R=1.0D0 - DO 60 K=1,40 - R=0.25D0*R*X2/(K*(K+VL)) - VIL=VIL+R - IF (DABS(R).LT.1.0D-15) GO TO 65 -60 CONTINUE -65 A0=(0.5D0*X)**VL - IF (L.EQ.1) VI1=A0/GP1*VIL - IF (L.EQ.2) VI2=A0/GP2*VIL -70 CONTINUE - ELSE - C0=DEXP(X)/DSQRT(2.0D0*PI*X) - DO 80 L=1,2 - VV=VV0*L*L - VSL=1.0D0 - R=1.0D0 - DO 75 K=1,K0 - R=-0.125D0*R*(VV-(2.0D0*K-1.0D0)**2.0)/(K*X) -75 VSL=VSL+R - IF (L.EQ.1) VI1=C0*VSL - IF (L.EQ.2) VI2=C0*VSL -80 CONTINUE - ENDIF - IF (X.LE.9.0D0) THEN - GN=0.0D0 - DO 95 L=1,2 - VL=L/3.0D0 - IF (L.EQ.1) GN=GN1 - IF (L.EQ.2) GN=GN2 - A0=(2.0D0/X)**VL/GN - SUM=1.0D0 - R=1.0D0 - DO 85 K=1,60 - R=0.25D0*R*X2/(K*(K-VL)) - SUM=SUM+R - IF (DABS(R).LT.1.0D-15) GO TO 90 -85 CONTINUE -90 IF (L.EQ.1) VK1=0.5D0*UU0*PI*(SUM*A0-VI1) - IF (L.EQ.2) VK2=0.5D0*UU0*PI*(SUM*A0-VI2) -95 CONTINUE - ELSE - C0=DEXP(-X)*DSQRT(0.5D0*PI/X) - DO 105 L=1,2 - VV=VV0*L*L - SUM=1.0D0 - R=1.0D0 - DO 100 K=1,K0 - R=0.125D0*R*(VV-(2.0*K-1.0)**2.0)/(K*X) -100 SUM=SUM+R - IF (L.EQ.1) VK1=C0*SUM - IF (L.EQ.2) VK2=C0*SUM -105 CONTINUE - ENDIF - RETURN - END - - - -C ********************************** - - SUBROUTINE CIKVB(V,Z,VM,CBI,CDI,CBK,CDK) -C -C =========================================================== -C Purpose: Compute the modified Bessel functions Iv(z), Kv(z) -C and their derivatives for an arbitrary order and -C complex argument -C Input : z --- Complex argument z -C v --- Real order of Iv(z) and Kv(z) -C ( v =n+v0, n = 0,1,2,..., 0 ≤ v0 < 1 ) -C Output: CBI(n) --- In+v0(z) -C CDI(n) --- In+v0'(z) -C CBK(n) --- Kn+v0(z) -C CDK(n) --- Kn+v0'(z) -C VM --- Highest order computed -C Routines called: -C (1) GAMMA2 for computing the gamma function -C (2) MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBI(0:*),CDI(0:*),CBK(0:*),CDK(0:*) - Z1=Z - Z2=Z*Z - A0=CDABS(Z) - PI=3.141592653589793D0 - CI=(0.0D0,1.0D0) - N=INT(V) - V0=V-N - PIV=PI*V0 - VT=4.0D0*V0*V0 - IF (N.EQ.0) N=1 - IF (A0.LT.1.0D-100) THEN - DO 10 K=0,N - CBI(K)=0.0D0 - CDI(K)=0.0D0 - CBK(K)=-1.0D+300 -10 CDK(K)=1.0D+300 - IF (V0.EQ.0.0) THEN - CBI(0)=(1.0D0,0.0D0) - CDI(1)=(0.5D0,0.0D0) - ENDIF - VM=V - RETURN - ENDIF - K0=14 - IF (A0.GE.35.0) K0=10 - IF (A0.GE.50.0) K0=8 - IF (DBLE(Z).LT.0.0) Z1=-Z - IF (A0.LT.18.0) THEN - IF (V0.EQ.0.0) THEN - CA1=(1.0D0,0.0D0) - ELSE - V0P=1.0D0+V0 - CALL GAMMA2(V0P,GAP) - CA1=(0.5D0*Z1)**V0/GAP - ENDIF - CI0=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 15 K=1,50 - CR=0.25D0*CR*Z2/(K*(K+V0)) - CI0=CI0+CR - IF (CDABS(CR/CI0).LT.1.0D-15) GO TO 20 -15 CONTINUE -20 CBI0=CI0*CA1 - ELSE - CA=CDEXP(Z1)/CDSQRT(2.0D0*PI*Z1) - CS=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 25 K=1,K0 - CR=-0.125D0*CR*(VT-(2.0D0*K-1.0D0)**2.0)/(K*Z1) -25 CS=CS+CR - CBI0=CA*CS - ENDIF - M=MSTA1(A0,200) - IF (M.LT.N) THEN - N=M - ELSE - M=MSTA2(A0,N,15) - ENDIF - CF2=(0.0D0,0.0D0) - CF1=(1.0D-100,0.0D0) - DO 30 K=M,0,-1 - CF=2.0D0*(V0+K+1.0D0)/Z1*CF1+CF2 - IF (K.LE.N) CBI(K)=CF - CF2=CF1 -30 CF1=CF - CS=CBI0/CF - DO 35 K=0,N -35 CBI(K)=CS*CBI(K) - IF (A0.LE.9.0) THEN - IF (V0.EQ.0.0) THEN - CT=-CDLOG(0.5D0*Z1)-0.5772156649015329D0 - CS=(0.0D0,0.0D0) - W0=0.0D0 - CR=(1.0D0,0.0D0) - DO 40 K=1,50 - W0=W0+1.0D0/K - CR=0.25D0*CR/(K*K)*Z2 - CP=CR*(W0+CT) - CS=CS+CP - IF (K.GE.10.AND.CDABS(CP/CS).LT.1.0D-15) GO TO 45 -40 CONTINUE -45 CBK0=CT+CS - ELSE - V0N=1.0D0-V0 - CALL GAMMA2(V0N,GAN) - CA2=1.0D0/(GAN*(0.5D0*Z1)**V0) - CA1=(0.5D0*Z1)**V0/GAP - CSU=CA2-CA1 - CR1=(1.0D0,0.0D0) - CR2=(1.0D0,0.0D0) - DO 50 K=1,50 - CR1=0.25D0*CR1*Z2/(K*(K-V0)) - CR2=0.25D0*CR2*Z2/(K*(K+V0)) - CP=CA2*CR1-CA1*CR2 - CSU=CSU+CP - IF (K.GE.10.AND.CDABS(CP/CSU).LT.1.0D-15) GO TO 55 -50 CONTINUE -55 CBK0=0.5D0*PI*CSU/DSIN(PIV) - ENDIF - ELSE - CB=CDEXP(-Z1)*CDSQRT(0.5D0*PI/Z1) - CS=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 60 K=1,K0 - CR=0.125D0*CR*(VT-(2.0D0*K-1.0D0)**2.0)/(K*Z1) -60 CS=CS+CR - CBK0=CB*CS - ENDIF - CBK(0)=CBK0 - IF (DBLE(Z).LT.0.0) THEN - DO 65 K=0,N - CVK=CDEXP((K+V0)*PI*CI) - IF (DIMAG(Z).LT.0.0D0) THEN - CBK(K)=CVK*CBK(K)+PI*CI*CBI(K) - CBI(K)=CBI(K)/CVK - ELSE IF (DIMAG(Z).GT.0.0) THEN - CBK(K)=CBK(K)/CVK-PI*CI*CBI(K) - CBI(K)=CVK*CBI(K) - ENDIF -65 CONTINUE - ENDIF - DO 70 K=1,N - CKK=(1.0D0/Z-CBI(K)*CBK(K-1))/CBI(K-1) - CBK(K)=CKK -70 CONTINUE - CDI(0)=V0/Z*CBI(0)+CBI(1) - CDK(0)=V0/Z*CBK(0)-CBK(1) - DO 80 K=1,N - CDI(K)=-(K+V0)/Z*CBI(K)+CBI(K-1) -80 CDK(K)=-(K+V0)/Z*CBK(K)-CBK(K-1) - VM=N+V0 - RETURN - END - - - -C ********************************** - - SUBROUTINE CIKVA(V,Z,VM,CBI,CDI,CBK,CDK) -C -C ============================================================ -C Purpose: Compute the modified Bessel functions Iv(z), Kv(z) -C and their derivatives for an arbitrary order and -C complex argument -C Input : z --- Complex argument -C v --- Real order of Iv(z) and Kv(z) -C ( v = n+v0, n = 0,1,2,…, 0 ≤ v0 < 1 ) -C Output: CBI(n) --- In+v0(z) -C CDI(n) --- In+v0'(z) -C CBK(n) --- Kn+v0(z) -C CDK(n) --- Kn+v0'(z) -C VM --- Highest order computed -C Routines called: -C (1) GAMMA2 for computing the gamma function -C (2) MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C ============================================================ -C - IMPLICIT DOUBLE PRECISION (A,G,P,R,V,W) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBI(0:*),CDI(0:*),CBK(0:*),CDK(0:*) - PI=3.141592653589793D0 - CI=(0.0D0,1.0D0) - A0=CDABS(Z) - Z1=Z - Z2=Z*Z - N=INT(V) - V0=V-N - PIV=PI*V0 - VT=4.0D0*V0*V0 - IF (N.EQ.0) N=1 - IF (A0.LT.1.0D-100) THEN - DO 10 K=0,N - CBI(K)=0.0D0 - CDI(K)=0.0D0 - CBK(K)=-1.0D+300 -10 CDK(K)=1.0D+300 - IF (V0.EQ.0.0) THEN - CBI(0)=(1.0D0,0.0D0) - CDI(1)=(0.5D0,0.0D0) - ENDIF - VM=V - RETURN - ENDIF - K0=14 - IF (A0.GE.35.0) K0=10 - IF (A0.GE.50.0) K0=8 - IF (DBLE(Z).LT.0.0) Z1=-Z - IF (A0.LT.18.0) THEN - IF (V0.EQ.0.0) THEN - CA1=(1.0D0,0.0D0) - ELSE - V0P=1.0D0+V0 - CALL GAMMA2(V0P,GAP) - CA1=(0.5D0*Z1)**V0/GAP - ENDIF - CI0=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 15 K=1,50 - CR=0.25D0*CR*Z2/(K*(K+V0)) - CI0=CI0+CR - IF (CDABS(CR).LT.CDABS(CI0)*1.0D-15) GO TO 20 -15 CONTINUE -20 CBI0=CI0*CA1 - ELSE - CA=CDEXP(Z1)/CDSQRT(2.0D0*PI*Z1) - CS=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 25 K=1,K0 - CR=-0.125D0*CR*(VT-(2.0D0*K-1.0D0)**2.0)/(K*Z1) -25 CS=CS+CR - CBI0=CA*CS - ENDIF - M=MSTA1(A0,200) - IF (M.LT.N) THEN - N=M - ELSE - M=MSTA2(A0,N,15) - ENDIF - CF2=(0.0D0,0.0D0) - CF1=(1.0D-100,0.0D0) - DO 30 K=M,0,-1 - CF=2.0D0*(V0+K+1.0D0)/Z1*CF1+CF2 - IF (K.LE.N) CBI(K)=CF - CF2=CF1 -30 CF1=CF - CS=CBI0/CF - DO 35 K=0,N -35 CBI(K)=CS*CBI(K) - IF (A0.LE.9.0) THEN - IF (V0.EQ.0.0) THEN - CT=-CDLOG(0.5D0*Z1)-0.5772156649015329D0 - CS=(0.0D0,0.0D0) - W0=0.0D0 - CR=(1.0D0,0.0D0) - DO 40 K=1,50 - W0=W0+1.0D0/K - CR=0.25D0*CR/(K*K)*Z2 - CP=CR*(W0+CT) - CS=CS+CP - IF (K.GE.10.AND.CDABS(CP/CS).LT.1.0D-15) GO TO 45 -40 CONTINUE -45 CBK0=CT+CS - ELSE - V0N=1.0D0-V0 - CALL GAMMA2(V0N,GAN) - CA2=1.0D0/(GAN*(0.5D0*Z1)**V0) - CA1=(0.5D0*Z1)**V0/GAP - CSU=CA2-CA1 - CR1=(1.0D0,0.0D0) - CR2=(1.0D0,0.0D0) - WS0=0.0D0 - DO 50 K=1,50 - CR1=0.25D0*CR1*Z2/(K*(K-V0)) - CR2=0.25D0*CR2*Z2/(K*(K+V0)) - CSU=CSU+CA2*CR1-CA1*CR2 - WS=CDABS(CSU) - IF (K.GE.10.AND.DABS(WS-WS0)/WS.LT.1.0D-15) GO TO 55 - WS0=WS -50 CONTINUE -55 CBK0=0.5D0*PI*CSU/DSIN(PIV) - ENDIF - ELSE - CB=CDEXP(-Z1)*CDSQRT(0.5D0*PI/Z1) - CS=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 60 K=1,K0 - CR=0.125D0*CR*(VT-(2.0D0*K-1.0D0)**2.0)/(K*Z1) -60 CS=CS+CR - CBK0=CB*CS - ENDIF - CBK1=(1.0D0/Z1-CBI(1)*CBK0)/CBI(0) - CBK(0)=CBK0 - CBK(1)=CBK1 - CG0=CBK0 - CG1=CBK1 - DO 65 K=2,N - CGK=2.0D0*(V0+K-1.0D0)/Z1*CG1+CG0 - CBK(K)=CGK - CG0=CG1 -65 CG1=CGK - IF (DBLE(Z).LT.0.0) THEN - DO 70 K=0,N - CVK=CDEXP((K+V0)*PI*CI) - IF (DIMAG(Z).LT.0.0D0) THEN - CBK(K)=CVK*CBK(K)+PI*CI*CBI(K) - CBI(K)=CBI(K)/CVK - ELSE IF (DIMAG(Z).GT.0.0) THEN - CBK(K)=CBK(K)/CVK-PI*CI*CBI(K) - CBI(K)=CVK*CBI(K) - ENDIF -70 CONTINUE - ENDIF - CDI(0)=V0/Z*CBI(0)+CBI(1) - CDK(0)=V0/Z*CBK(0)-CBK(1) - DO 75 K=1,N - CDI(K)=-(K+V0)/Z*CBI(K)+CBI(K-1) -75 CDK(K)=-(K+V0)/Z*CBK(K)-CBK(K-1) - VM=N+V0 - RETURN - END - - - -C ********************************** - - SUBROUTINE CFC(Z,ZF,ZD) -C -C ========================================================= -C Purpose: Compute complex Fresnel integral C(z) and C'(z) -C Input : z --- Argument of C(z) -C Output: ZF --- C(z) -C ZD --- C'(z) -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (E,P,W) - IMPLICIT COMPLEX *16 (C,S,Z) - EPS=1.0D-14 - PI=3.141592653589793D0 - W0=CDABS(Z) - ZP=0.5D0*PI*Z*Z - ZP2=ZP*ZP - Z0=(0.0D0,0.0D0) - IF (Z.EQ.Z0) THEN - C=Z0 - ELSE IF (W0.LE.2.5) THEN - CR=Z - C=CR - WA0=0.0D0 - DO 10 K=1,80 - CR=-.5D0*CR*(4.0D0*K-3.0D0)/K/(2.0D0*K-1.0D0) - & /(4.0D0*K+1.0D0)*ZP2 - C=C+CR - WA=CDABS(C) - IF (DABS((WA-WA0)/WA).LT.EPS.AND.K.GT.10) GO TO 30 -10 WA0=WA - ELSE IF (W0.GT.2.5.AND.W0.LT.4.5) THEN - M=85 - C=Z0 - CF1=Z0 - CF0=(1.0D-100,0.0D0) - DO 15 K=M,0,-1 - CF=(2.0D0*K+3.0D0)*CF0/ZP-CF1 - IF (K.EQ.INT(K/2)*2) C=C+CF - CF1=CF0 -15 CF0=CF - C=CDSQRT(2.0D0/(PI*ZP))*CDSIN(ZP)/CF*C - ELSE - CR=(1.0D0,0.0D0) - CF=(1.0D0,0.0D0) - DO 20 K=1,20 - CR=-.25D0*CR*(4.0D0*K-1.0D0)*(4.0D0*K-3.0D0)/ZP2 -20 CF=CF+CR - CR=1.0D0/(PI*Z*Z) - CG=CR - DO 25 K=1,12 - CR=-.25D0*CR*(4.0D0*K+1.0D0)*(4.0D0*K-1.0D0)/ZP2 -25 CG=CG+CR - C=.5D0+(CF*CDSIN(ZP)-CG*CDCOS(ZP))/(PI*Z) - ENDIF -30 ZF=C - ZD=CDCOS(0.5*PI*Z*Z) - RETURN - END - - - -C ********************************** - - SUBROUTINE FCS(X,C,S) -C -C ================================================= -C Purpose: Compute Fresnel integrals C(x) and S(x) -C Input : x --- Argument of C(x) and S(x) -C Output: C --- C(x) -C S --- S(x) -C ================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - EPS=1.0D-15 - PI=3.141592653589793D0 - XA=DABS(X) - PX=PI*XA - T=.5D0*PX*XA - T2=T*T - IF (XA.EQ.0.0) THEN - C=0.0D0 - S=0.0D0 - ELSE IF (XA.LT.2.5D0) THEN - R=XA - C=R - DO 10 K=1,50 - R=-.5D0*R*(4.0D0*K-3.0D0)/K/(2.0D0*K-1.0D0) - & /(4.0D0*K+1.0D0)*T2 - C=C+R - IF (DABS(R).LT.DABS(C)*EPS) GO TO 15 -10 CONTINUE -15 S=XA*T/3.0D0 - R=S - DO 20 K=1,50 - R=-.5D0*R*(4.0D0*K-1.0D0)/K/(2.0D0*K+1.0D0) - & /(4.0D0*K+3.0D0)*T2 - S=S+R - IF (DABS(R).LT.DABS(S)*EPS) GO TO 40 -20 CONTINUE - ELSE IF (XA.LT.4.5D0) THEN - M=INT(42.0+1.75*T) - SU=0.0D0 - C=0.0D0 - S=0.0D0 - F1=0.0D0 - F0=1.0D-100 - DO 25 K=M,0,-1 - F=(2.0D0*K+3.0D0)*F0/T-F1 - IF (K.EQ.INT(K/2)*2) THEN - C=C+F - ELSE - S=S+F - ENDIF - SU=SU+(2.0D0*K+1.0D0)*F*F - F1=F0 -25 F0=F - Q=DSQRT(SU) - C=C*XA/Q - S=S*XA/Q - ELSE - R=1.0D0 - F=1.0D0 - DO 30 K=1,20 - R=-.25D0*R*(4.0D0*K-1.0D0)*(4.0D0*K-3.0D0)/T2 -30 F=F+R - R=1.0D0/(PX*XA) - G=R - DO 35 K=1,12 - R=-.25D0*R*(4.0D0*K+1.0D0)*(4.0D0*K-1.0D0)/T2 -35 G=G+R - T0=T-INT(T/(2.0D0*PI))*2.0D0*PI - C=.5D0+(F*DSIN(T0)-G*DCOS(T0))/PX - S=.5D0-(F*DCOS(T0)+G*DSIN(T0))/PX - ENDIF -40 IF (X.LT.0.0D0) THEN - C=-C - S=-S - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE RCTJ(N,X,NM,RJ,DJ) -C -C ======================================================== -C Purpose: Compute Riccati-Bessel functions of the first -C kind and their derivatives -C Input: x --- Argument of Riccati-Bessel function -C n --- Order of jn(x) ( n = 0,1,2,... ) -C Output: RJ(n) --- x·jn(x) -C DJ(n) --- [x·jn(x)]' -C NM --- Highest order computed -C Routines called: -C MSTA1 and MSTA2 for computing the starting -C point for backward recurrence -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION RJ(0:N),DJ(0:N) - NM=N - IF (DABS(X).LT.1.0D-100) THEN - DO 10 K=0,N - RJ(K)=0.0D0 -10 DJ(K)=0.0D0 - DJ(0)=1.0D0 - RETURN - ENDIF - RJ(0)=DSIN(X) - RJ(1)=RJ(0)/X-DCOS(X) - RJ0=RJ(0) - RJ1=RJ(1) - CS=0.0D0 - F=0.0D0 - IF (N.GE.2) THEN - M=MSTA1(X,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(X,N,15) - ENDIF - F0=0.0D0 - F1=1.0D-100 - DO 15 K=M,0,-1 - F=(2.0D0*K+3.0D0)*F1/X-F0 - IF (K.LE.NM) RJ(K)=F - F0=F1 -15 F1=F - IF (DABS(RJ0).GT.DABS(RJ1)) CS=RJ0/F - IF (DABS(RJ0).LE.DABS(RJ1)) CS=RJ1/F0 - DO 20 K=0,NM -20 RJ(K)=CS*RJ(K) - ENDIF - DJ(0)=DCOS(X) - DO 25 K=1,NM -25 DJ(K)=-K*RJ(K)/X+RJ(K-1) - RETURN - END - - - -C ********************************** - - SUBROUTINE HERZO(N,X,W) -C -C ======================================================== -C Purpose : Compute the zeros of Hermite polynomial Ln(x) -C in the interval [-∞,∞], and the corresponding -C weighting coefficients for Gauss-Hermite -C integration -C Input : n --- Order of the Hermite polynomial -C X(n) --- Zeros of the Hermite polynomial -C W(n) --- Corresponding weighting coefficients -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION X(N),W(N) - HN=1.0D0/N - ZL=-1.1611D0+1.46D0*N**0.5 - Z=0.0D0 - HF=0.0D0 - HD=0.0D0 - DO 40 NR=1,N/2 - IF (NR.EQ.1) Z=ZL - IF (NR.NE.1) Z=Z-HN*(N/2+1-NR) - IT=0 -10 IT=IT+1 - Z0=Z - F0=1.0D0 - F1=2.0D0*Z - DO 15 K=2,N - HF=2.0D0*Z*F1-2.0D0*(K-1.0D0)*F0 - HD=2.0D0*K*F1 - F0=F1 -15 F1=HF - P=1.0D0 - DO 20 I=1,NR-1 -20 P=P*(Z-X(I)) - FD=HF/P - Q=0.0D0 - DO 30 I=1,NR-1 - WP=1.0D0 - DO 25 J=1,NR-1 - IF (J.EQ.I) GO TO 25 - WP=WP*(Z-X(J)) -25 CONTINUE -30 Q=Q+WP - GD=(HD-Q*FD)/P - Z=Z-FD/GD - IF (IT.LE.40.AND.DABS((Z-Z0)/Z).GT.1.0D-15) GO TO 10 - X(NR)=Z - X(N+1-NR)=-Z - R=1.0D0 - DO 35 K=1,N -35 R=2.0D0*R*K - W(NR)=3.544907701811D0*R/(HD*HD) -40 W(N+1-NR)=W(NR) - IF (N.NE.2*INT(N/2)) THEN - R1=1.0D0 - R2=1.0D0 - DO 45 J=1,N - R1=2.0D0*R1*J - IF (J.GE.(N+1)/2) R2=R2*J -45 CONTINUE - W(N/2+1)=0.88622692545276D0*R1/(R2*R2) - X(N/2+1)=0.0D0 - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE JY01B(X,BJ0,DJ0,BJ1,DJ1,BY0,DY0,BY1,DY1) -C -C ======================================================= -C Purpose: Compute Bessel functions J0(x), J1(x), Y0(x), -C Y1(x), and their derivatives -C Input : x --- Argument of Jn(x) & Yn(x) ( x ≥ 0 ) -C Output: BJ0 --- J0(x) -C DJ0 --- J0'(x) -C BJ1 --- J1(x) -C DJ1 --- J1'(x) -C BY0 --- Y0(x) -C DY0 --- Y0'(x) -C BY1 --- Y1(x) -C DY1 --- Y1'(x) -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - IF (X.EQ.0.0D0) THEN - BJ0=1.0D0 - BJ1=0.0D0 - DJ0=0.0D0 - DJ1=0.5D0 - BY0=-1.0D+300 - BY1=-1.0D+300 - DY0=1.0D+300 - DY1=1.0D+300 - RETURN - ELSE IF (X.LE.4.0D0) THEN - T=X/4.0D0 - T2=T*T - BJ0=((((((-.5014415D-3*T2+.76771853D-2)*T2 - & -.0709253492D0)*T2+.4443584263D0)*T2 - & -1.7777560599D0)*T2+3.9999973021D0) - & *T2-3.9999998721D0)*T2+1.0D0 - BJ1=T*(((((((-.1289769D-3*T2+.22069155D-2) - & *T2-.0236616773D0)*T2+.1777582922D0)*T2 - & -.8888839649D0)*T2+2.6666660544D0)*T2 - & -3.9999999710D0)*T2+1.9999999998D0) - BY0=(((((((-.567433D-4*T2+.859977D-3)*T2 - & -.94855882D-2)*T2+.0772975809D0)*T2 - & -.4261737419D0)*T2+1.4216421221D0)*T2 - & -2.3498519931D0)*T2+1.0766115157D0)*T2 - & +.3674669052D0 - BY0=2.0D0/PI*DLOG(X/2.0D0)*BJ0+BY0 - BY1=((((((((.6535773D-3*T2-.0108175626D0)*T2 - & +.107657606D0)*T2-.7268945577D0)*T2 - & +3.1261399273D0)*T2-7.3980241381D0)*T2 - & +6.8529236342D0)*T2+.3932562018D0)*T2 - & -.6366197726D0)/X - BY1=2.0D0/PI*DLOG(X/2.0D0)*BJ1+BY1 - ELSE - T=4.0D0/X - T2=T*T - A0=DSQRT(2.0D0/(PI*X)) - P0=((((-.9285D-5*T2+.43506D-4)*T2-.122226D-3)*T2 - & +.434725D-3)*T2-.4394275D-2)*T2+.999999997D0 - Q0=T*(((((.8099D-5*T2-.35614D-4)*T2+.85844D-4)*T2 - & -.218024D-3)*T2+.1144106D-2)*T2-.031249995D0) - TA0=X-.25D0*PI - BJ0=A0*(P0*DCOS(TA0)-Q0*DSIN(TA0)) - BY0=A0*(P0*DSIN(TA0)+Q0*DCOS(TA0)) - P1=((((.10632D-4*T2-.50363D-4)*T2+.145575D-3)*T2 - & -.559487D-3)*T2+.7323931D-2)*T2+1.000000004D0 - Q1=T*(((((-.9173D-5*T2+.40658D-4)*T2-.99941D-4)*T2 - & +.266891D-3)*T2-.1601836D-2)*T2+.093749994D0) - TA1=X-.75D0*PI - BJ1=A0*(P1*DCOS(TA1)-Q1*DSIN(TA1)) - BY1=A0*(P1*DSIN(TA1)+Q1*DCOS(TA1)) - ENDIF - DJ0=-BJ1 - DJ1=BJ0-BJ1/X - DY0=-BY1 - DY1=BY0-BY1/X - RETURN - END - -C ********************************** - - SUBROUTINE ENXB(N,X,EN) -C -C =============================================== -C Purpose: Compute exponential integral En(x) -C Input : x --- Argument of En(x) -C n --- Order of En(x) (n = 0,1,2,...) -C Output: EN(n) --- En(x) -C =============================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION EN(0:N) - IF (X.EQ.0.0) THEN - EN(0)=1.0D+300 - EN(1)=1.0D+300 - DO 10 K=2,N -10 EN(K)=1.0D0/(K-1.0) - RETURN - ELSE IF (X.LE.1.0) THEN - EN(0)=DEXP(-X)/X - S0=0.0D0 - DO 40 L=1,N - RP=1.0D0 - DO 15 J=1,L-1 -15 RP=-RP*X/J - PS=-0.5772156649015328D0 - DO 20 M=1,L-1 -20 PS=PS+1.0D0/M - ENS=RP*(-DLOG(X)+PS) - S=0.0D0 - DO 30 M=0,20 - IF (M.EQ.L-1) GO TO 30 - R=1.0D0 - DO 25 J=1,M -25 R=-R*X/J - S=S+R/(M-L+1.0D0) - IF (DABS(S-S0).LT.DABS(S)*1.0D-15) GO TO 35 - S0=S -30 CONTINUE -35 EN(L)=ENS-S -40 CONTINUE - ELSE - EN(0)=DEXP(-X)/X - M=15+INT(100.0/X) - DO 50 L=1,N - T0=0.0D0 - DO 45 K=M,1,-1 -45 T0=(L+K-1.0D0)/(1.0D0+K/(X+T0)) - T=1.0D0/(X+T0) -50 EN(L)=DEXP(-X)*T - ENDIF - END - -C ********************************** - - SUBROUTINE SPHK(N,X,NM,SK,DK) -C -C ===================================================== -C Purpose: Compute modified spherical Bessel functions -C of the second kind, kn(x) and kn'(x) -C Input : x --- Argument of kn(x) ( x ≥ 0 ) -C n --- Order of kn(x) ( n = 0,1,2,... ) -C Output: SK(n) --- kn(x) -C DK(n) --- kn'(x) -C NM --- Highest order computed -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION SK(0:N),DK(0:N) - PI=3.141592653589793D0 - NM=N - IF (X.LT.1.0D-60) THEN - DO 10 K=0,N - SK(K)=1.0D+300 -10 DK(K)=-1.0D+300 - RETURN - ENDIF - SK(0)=0.5D0*PI/X*DEXP(-X) - SK(1)=SK(0)*(1.0D0+1.0D0/X) - F0=SK(0) - F1=SK(1) - DO 15 K=2,N - F=(2.0D0*K-1.0D0)*F1/X+F0 - SK(K)=F - IF (DABS(F).GT.1.0D+300) GO TO 20 - F0=F1 -15 F1=F -20 NM=K-1 - DK(0)=-SK(1) - DO 25 K=1,NM -25 DK(K)=-SK(K-1)-(K+1.0D0)/X*SK(K) - RETURN - END - -C ********************************** - - SUBROUTINE ENXA(N,X,EN) -C -C ============================================ -C Purpose: Compute exponential integral En(x) -C Input : x --- Argument of En(x) ( x ≤ 20 ) -C n --- Order of En(x) -C Output: EN(n) --- En(x) -C Routine called: E1XB for computing E1(x) -C ============================================ -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION EN(0:N) - EN(0)=DEXP(-X)/X - CALL E1XB(X,E1) - EN(1)=E1 - DO 10 K=2,N - EK=(DEXP(-X)-X*E1)/(K-1.0D0) - EN(K)=EK -10 E1=EK - RETURN - END - - - -C ********************************** - - SUBROUTINE GAIH(X,GA) -C -C ===================================================== -C Purpose: Compute gamma function Г(x) -C Input : x --- Argument of Г(x), x = n/2, n=1,2,… -C Output: GA --- Г(x) -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - IF (X.EQ.INT(X).AND.X.GT.0.0) THEN - GA=1.0D0 - M1=INT(X-1.0) - DO 10 K=2,M1 -10 GA=GA*K - ELSE IF (X+.5D0.EQ.INT(X+.5D0).AND.X.GT.0.0) THEN - M=INT(X) - GA=DSQRT(PI) - DO 15 K=1,M -15 GA=0.5D0*GA*(2.0D0*K-1.0D0) - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE PBVV(V,X,VV,VP,PVF,PVD) -C -C =================================================== -C Purpose: Compute parabolic cylinder functions Vv(x) -C and their derivatives -C Input: x --- Argument of Vv(x) -C v --- Order of Vv(x) -C Output: VV(na) --- Vv(x) -C VP(na) --- Vv'(x) -C ( na = |n|, v = n+v0, |v0| < 1 -C n = 0,±1,±2,… ) -C PVF --- Vv(x) -C PVD --- Vv'(x) -C Routines called: -C (1) VVSA for computing Vv(x) for small |x| -C (2) VVLA for computing Vv(x) for large |x| -C =================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION VV(0:*),VP(0:*) - PI=3.141592653589793D0 - XA=DABS(X) - VH=V - V=V+DSIGN(1.0D0,V) - NV=INT(V) - V0=V-NV - NA=ABS(NV) - QE=DEXP(0.25D0*X*X) - Q2P=DSQRT(2.0D0/PI) - JA=0 - IF (NA.GE.1) JA=1 - F=0.0D0 - IF (V.LE.0.0) THEN - IF (V0.EQ.0.0) THEN - IF (XA.LE.7.5) CALL VVSA(V0,X,PV0) - IF (XA.GT.7.5) CALL VVLA(V0,X,PV0) - F0=Q2P*QE - F1=X*F0 - VV(0)=PV0 - VV(1)=F0 - VV(2)=F1 - ELSE - DO 10 L=0,JA - V1=V0-L - IF (XA.LE.7.5) CALL VVSA(V1,X,F1) - IF (XA.GT.7.5) CALL VVLA(V1,X,F1) - IF (L.EQ.0) F0=F1 -10 CONTINUE - VV(0)=F0 - VV(1)=F1 - ENDIF - KV=2 - IF (V0.EQ.0.0) KV=3 - DO 15 K=KV,NA - F=X*F1+(K-V0-2.0D0)*F0 - VV(K)=F - F0=F1 -15 F1=F - ELSE - IF (X.GE.0.0.AND.X.LE.7.5D0) THEN - V2=V - IF (V2.LT.1.0) V2=V2+1.0D0 - CALL VVSA(V2,X,F1) - V1=V2-1.0D0 - KV=INT(V2) - CALL VVSA(V1,X,F0) - VV(KV)=F1 - VV(KV-1)=F0 - DO 20 K=KV-2,0,-1 - F=X*F0-(K+V0+2.0D0)*F1 - IF (K.LE.NA) VV(K)=F - F1=F0 -20 F0=F - ELSE IF (X.GT.7.5D0) THEN - CALL VVLA(V0,X,PV0) - M=100+ABS(NA) - VV(1)=PV0 - F1=0.0D0 - F0=1.0D-40 - DO 25 K=M,0,-1 - F=X*F0-(K+V0+2.0D0)*F1 - IF (K.LE.NA) VV(K)=F - F1=F0 -25 F0=F - S0=PV0/F - DO 30 K=0,NA -30 VV(K)=S0*VV(K) - ELSE - IF (XA.LE.7.5D0) THEN - CALL VVSA(V0,X,F0) - V1=V0+1.0 - CALL VVSA(V1,X,F1) - ELSE - CALL VVLA(V0,X,F0) - V1=V0+1.0D0 - CALL VVLA(V1,X,F1) - ENDIF - VV(0)=F0 - VV(1)=F1 - DO 35 K=2,NA - F=(X*F1-F0)/(K+V0) - VV(K)=F - F0=F1 -35 F1=F - ENDIF - ENDIF - DO 40 K=0,NA-1 - V1=V0+K - IF (V.GE.0.0D0) THEN - VP(K)=0.5D0*X*VV(K)-(V1+1.0D0)*VV(K+1) - ELSE - VP(K)=-0.5D0*X*VV(K)+VV(K+1) - ENDIF -40 CONTINUE - PVF=VV(NA-1) - PVD=VP(NA-1) - V=VH - RETURN - END - - - -C ********************************** - - SUBROUTINE CLQMN(MM,M,N,X,Y,CQM,CQD) -C -C ======================================================= -C Purpose: Compute the associated Legendre functions of -C the second kind, Qmn(z) and Qmn'(z), for a -C complex argument -C Input : x --- Real part of z -C y --- Imaginary part of z -C m --- Order of Qmn(z) ( m = 0,1,2,… ) -C n --- Degree of Qmn(z) ( n = 0,1,2,… ) -C mm --- Physical dimension of CQM and CQD -C Output: CQM(m,n) --- Qmn(z) -C CQD(m,n) --- Qmn'(z) -C ======================================================= -C - IMPLICIT DOUBLE PRECISION (X,Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CQM(0:MM,0:N),CQD(0:MM,0:N) - Z=CMPLX(X,Y) - IF (DABS(X).EQ.1.0D0.AND.Y.EQ.0.0D0) THEN - DO 10 I=0,M - DO 10 J=0,N - CQM(I,J)=(1.0D+300,0.0D0) - CQD(I,J)=(1.0D+300,0.0D0) -10 CONTINUE - RETURN - ENDIF - XC=CDABS(Z) - LS=0 - IF (DIMAG(Z).EQ.0.0D0.OR.XC.LT.1.0D0) LS=1 - IF (XC.GT.1.0D0) LS=-1 - ZQ=CDSQRT(LS*(1.0D0-Z*Z)) - ZS=LS*(1.0D0-Z*Z) - CQ0=0.5D0*CDLOG(LS*(1.0D0+Z)/(1.0D0-Z)) - IF (XC.LT.1.0001D0) THEN - CQM(0,0)=CQ0 - CQM(0,1)=Z*CQ0-1.0D0 - CQM(1,0)=-1.0D0/ZQ - CQM(1,1)=-ZQ*(CQ0+Z/(1.0D0-Z*Z)) - DO 15 I=0,1 - DO 15 J=2,N - CQM(I,J)=((2.0D0*J-1.0D0)*Z*CQM(I,J-1) - & -(J+I-1.0D0)*CQM(I,J-2))/(J-I) -15 CONTINUE - DO 20 J=0,N - DO 20 I=2,M - CQM(I,J)=-2.0D0*(I-1.0D0)*Z/ZQ*CQM(I-1,J)-LS* - & (J+I-1.0D0)*(J-I+2.0D0)*CQM(I-2,J) -20 CONTINUE - ELSE - IF (XC.GT.1.1) THEN - KM=40+M+N - ELSE - KM=(40+M+N)*INT(-1.0-1.8*LOG(XC-1.0)) - ENDIF - CQF2=(0.0D0,0.0D0) - CQF1=(1.0D0,0.0D0) - DO 25 K=KM,0,-1 - CQF0=((2*K+3.0D0)*Z*CQF1-(K+2.0D0)*CQF2)/(K+1.0D0) - IF (K.LE.N) CQM(0,K)=CQF0 - CQF2=CQF1 -25 CQF1=CQF0 - DO 30 K=0,N -30 CQM(0,K)=CQ0*CQM(0,K)/CQF0 - CQF2=0.0D0 - CQF1=1.0D0 - DO 35 K=KM,0,-1 - CQF0=((2*K+3.0D0)*Z*CQF1-(K+1.0D0)*CQF2)/(K+2.0D0) - IF (K.LE.N) CQM(1,K)=CQF0 - CQF2=CQF1 -35 CQF1=CQF0 - CQ10=-1.0D0/ZQ - DO 40 K=0,N -40 CQM(1,K)=CQ10*CQM(1,K)/CQF0 - DO 45 J=0,N - CQ0=CQM(0,J) - CQ1=CQM(1,J) - DO 45 I=0,M-2 - CQF=-2.0D0*(I+1)*Z/ZQ*CQ1+(J-I)*(J+I+1.0D0)*CQ0 - CQM(I+2,J)=CQF - CQ0=CQ1 - CQ1=CQF -45 CONTINUE - ENDIF - CQD(0,0)=LS/ZS - DO 50 J=1,N -50 CQD(0,J)=LS*J*(CQM(0,J-1)-Z*CQM(0,J))/ZS - DO 55 J=0,N - DO 55 I=1,M - CQD(I,J)=LS*I*Z/ZS*CQM(I,J)+(I+J)*(J-I+1.0D0) - & /ZQ*CQM(I-1,J) -55 CONTINUE - RETURN - END - - -C ********************************** - - SUBROUTINE SEGV(M,N,C,KD,CV,EG) -C -C ========================================================= -C Purpose: Compute the characteristic values of spheroidal -C wave functions -C Input : m --- Mode parameter -C n --- Mode parameter -C c --- Spheroidal parameter -C KD --- Function code -C KD=1 for Prolate; KD=-1 for Oblate -C Output: CV --- Characteristic value for given m, n and c -C EG(L) --- Characteristic value for mode m and n' -C ( L = n' - m + 1 ) -C ========================================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION B(100),H(100),D(300),E(300),F(300),CV0(100), - & A(300),G(300),EG(200) - IF (C.LT.1.0D-10) THEN - DO 5 I=1,N-M+1 -5 EG(I)=(I+M)*(I+M-1.0D0) - GO TO 70 - ENDIF - ICM=(N-M+2)/2 - NM=10+INT(0.5*(N-M)+C) - CS=C*C*KD - K=0 - DO 60 L=0,1 - DO 10 I=1,NM - IF (L.EQ.0) K=2*(I-1) - IF (L.EQ.1) K=2*I-1 - DK0=M+K - DK1=M+K+1 - DK2=2*(M+K) - D2K=2*M+K - A(I)=(D2K+2.0)*(D2K+1.0)/((DK2+3.0)*(DK2+5.0))*CS - D(I)=DK0*DK1+(2.0*DK0*DK1-2.0*M*M-1.0)/((DK2-1.0) - & *(DK2+3.0))*CS -10 G(I)=K*(K-1.0)/((DK2-3.0)*(DK2-1.0))*CS - DO 15 K=2,NM - E(K)=DSQRT(A(K-1)*G(K)) -15 F(K)=E(K)*E(K) - F(1)=0.0D0 - E(1)=0.0D0 - XA=D(NM)+DABS(E(NM)) - XB=D(NM)-DABS(E(NM)) - NM1=NM-1 - DO 20 I=1,NM1 - T=DABS(E(I))+DABS(E(I+1)) - T1=D(I)+T - IF (XA.LT.T1) XA=T1 - T1=D(I)-T - IF (T1.LT.XB) XB=T1 -20 CONTINUE - DO 25 I=1,ICM - B(I)=XA -25 H(I)=XB - DO 55 K=1,ICM - DO 30 K1=K,ICM - IF (B(K1).LT.B(K)) THEN - B(K)=B(K1) - GO TO 35 - ENDIF -30 CONTINUE -35 IF (K.NE.1.AND.H(K).LT.H(K-1)) H(K)=H(K-1) -40 X1=(B(K)+H(K))/2.0D0 - CV0(K)=X1 - IF (DABS((B(K)-H(K))/X1).LT.1.0D-14) GO TO 50 - J=0 - S=1.0D0 - DO 45 I=1,NM - IF (S.EQ.0.0D0) S=S+1.0D-30 - T=F(I)/S - S=D(I)-T-X1 - IF (S.LT.0.0D0) J=J+1 -45 CONTINUE - IF (J.LT.K) THEN - H(K)=X1 - ELSE - B(K)=X1 - IF (J.GE.ICM) THEN - B(ICM)=X1 - ELSE - IF (H(J+1).LT.X1) H(J+1)=X1 - IF (X1.LT.B(J)) B(J)=X1 - ENDIF - ENDIF - GO TO 40 -50 CV0(K)=X1 - IF (L.EQ.0) EG(2*K-1)=CV0(K) - IF (L.EQ.1) EG(2*K)=CV0(K) -55 CONTINUE -60 CONTINUE -70 CV=EG(N-M+1) - RETURN - END - - -C ********************************** - - SUBROUTINE CIKNB(N,Z,NM,CBI,CDI,CBK,CDK) -C -C ============================================================ -C Purpose: Compute modified Bessel functions In(z) and Kn(z), -C and their derivatives for a complex argument -C Input: z --- Complex argument -C n --- Order of In(z) and Kn(z) -C Output: CBI(n) --- In(z) -C CDI(n) --- In'(z) -C CBK(n) --- Kn(z) -C CDK(n) --- Kn'(z) -C NM --- Highest order computed -C Routones called: -C MSTA1 and MSTA2 to compute the starting point for -C backward recurrence -C =========================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBI(0:N),CDI(0:N),CBK(0:N),CDK(0:N) - PI=3.141592653589793D0 - EL=0.57721566490153D0 - A0=CDABS(Z) - NM=N - IF (A0.LT.1.0D-100) THEN - DO 10 K=0,N - CBI(K)=(0.0D0,0.0D0) - CBK(K)=(1.0D+300,0.0D0) - CDI(K)=(0.0D0,0.0D0) -10 CDK(K)=-(1.0D+300,0.0D0) - CBI(0)=(1.0D0,0.0D0) - CDI(1)=(0.5D0,0.0D0) - RETURN - ENDIF - Z1=Z - CI=(0.0D0,1.0D0) - IF (DBLE(Z).LT.0.0) Z1=-Z - IF (N.EQ.0) NM=1 - M=MSTA1(A0,200) - IF (M.LT.NM) THEN - NM=M - ELSE - M=MSTA2(A0,NM,15) - ENDIF - CBS=0.0D0 - CSK0=0.0D0 - CF0=0.0D0 - CF1=1.0D-100 - DO 15 K=M,0,-1 - CF=2.0D0*(K+1.0D0)*CF1/Z1+CF0 - IF (K.LE.NM) CBI(K)=CF - IF (K.NE.0.AND.K.EQ.2*INT(K/2)) CSK0=CSK0+4.0D0*CF/K - CBS=CBS+2.0D0*CF - CF0=CF1 -15 CF1=CF - CS0=CDEXP(Z1)/(CBS-CF) - DO 20 K=0,NM -20 CBI(K)=CS0*CBI(K) - IF (A0.LE.9.0) THEN - CBK(0)=-(CDLOG(0.5D0*Z1)+EL)*CBI(0)+CS0*CSK0 - CBK(1)=(1.0D0/Z1-CBI(1)*CBK(0))/CBI(0) - ELSE - CA0=CDSQRT(PI/(2.0D0*Z1))*CDEXP(-Z1) - K0=16 - IF (A0.GE.25.0) K0=10 - IF (A0.GE.80.0) K0=8 - IF (A0.GE.200.0) K0=6 - DO 30 L=0,1 - CBKL=1.0D0 - VT=4.0D0*L - CR=(1.0D0,0.0D0) - DO 25 K=1,K0 - CR=0.125D0*CR*(VT-(2.0*K-1.0)**2)/(K*Z1) -25 CBKL=CBKL+CR - CBK(L)=CA0*CBKL -30 CONTINUE - ENDIF - CG0=CBK(0) - CG1=CBK(1) - DO 35 K=2,NM - CG=2.0D0*(K-1.0D0)/Z1*CG1+CG0 - CBK(K)=CG - CG0=CG1 -35 CG1=CG - IF (DBLE(Z).LT.0.0) THEN - FAC=1.0D0 - DO 45 K=0,NM - IF (DIMAG(Z).LT.0.0) THEN - CBK(K)=FAC*CBK(K)+CI*PI*CBI(K) - ELSE - CBK(K)=FAC*CBK(K)-CI*PI*CBI(K) - ENDIF - CBI(K)=FAC*CBI(K) - FAC=-FAC -45 CONTINUE - ENDIF - CDI(0)=CBI(1) - CDK(0)=-CBK(1) - DO 50 K=1,NM - CDI(K)=CBI(K-1)-K/Z*CBI(K) -50 CDK(K)=-CBK(K-1)-K/Z*CBK(K) - RETURN - END - - -C ********************************** - - SUBROUTINE CIKNA(N,Z,NM,CBI,CDI,CBK,CDK) -C -C ======================================================== -C Purpose: Compute modified Bessel functions In(z), Kn(x) -C and their derivatives for a complex argument -C Input : z --- Complex argument of In(z) and Kn(z) -C n --- Order of In(z) and Kn(z) -C Output: CBI(n) --- In(z) -C CDI(n) --- In'(z) -C CBK(n) --- Kn(z) -C CDK(n) --- Kn'(z) -C NM --- Highest order computed -C Routines called: -C (1) CIK01 to compute I0(z), I1(z) K0(z) & K1(z) -C (2) MSTA1 and MSTA2 to compute the starting -C point for backward recurrence -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,P,W,X,Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION CBI(0:N),CDI(0:N),CBK(0:N),CDK(0:N) - A0=CDABS(Z) - NM=N - IF (A0.LT.1.0D-100) THEN - DO 10 K=0,N - CBI(K)=(0.0D0,0.0D0) - CDI(K)=(0.0D0,0.0D0) - CBK(K)=-(1.0D+300,0.0D0) -10 CDK(K)=(1.0D+300,0.0D0) - CBI(0)=(1.0D0,0.0D0) - CDI(1)=(0.5D0,0.0D0) - RETURN - ENDIF - CALL CIK01(Z,CBI0,CDI0,CBI1,CDI1,CBK0,CDK0,CBK1,CDK1) - CBI(0)=CBI0 - CBI(1)=CBI1 - CBK(0)=CBK0 - CBK(1)=CBK1 - CDI(0)=CDI0 - CDI(1)=CDI1 - CDK(0)=CDK0 - CDK(1)=CDK1 - IF (N.LE.1) RETURN - M=MSTA1(A0,200) - IF (M.LT.N) THEN - NM=M - ELSE - M=MSTA2(A0,N,15) - ENDIF - CF2=(0.0D0,0.0D0) - CF1=(1.0D-100,0.0D0) - DO 45 K=M,0,-1 - CF=2.0D0*(K+1.0D0)/Z*CF1+CF2 - IF (K.LE.NM) CBI(K)=CF - CF2=CF1 -45 CF1=CF - CS=CBI0/CF - DO 50 K=0,NM -50 CBI(K)=CS*CBI(K) - DO 60 K=2,NM - IF (CDABS(CBI(K-1)).GT.CDABS(CBI(K-2))) THEN - CKK=(1.0D0/Z-CBI(K)*CBK(K-1))/CBI(K-1) - ELSE - CKK=(CBI(K)*CBK(K-2)+2.0D0*(K-1.0D0)/(Z*Z))/CBI(K-2) - ENDIF -60 CBK(K)=CKK - DO 70 K=2,NM - CDI(K)=CBI(K-1)-K/Z*CBI(K) -70 CDK(K)=-CBK(K-1)-K/Z*CBK(K) - RETURN - END - - - -C ********************************** - - SUBROUTINE MTU12(KF,KC,M,Q,X,F1R,D1R,F2R,D2R) -C -C ============================================================== -C Purpose: Compute modified Mathieu functions of the first and -C second kinds, Mcm(1)(2)(x,q) and Msm(1)(2)(x,q), -C and their derivatives -C Input: KF --- Function code -C KF=1 for computing Mcm(x,q) -C KF=2 for computing Msm(x,q) -C KC --- Function Code -C KC=1 for computing the first kind -C KC=2 for computing the second kind -C or Msm(2)(x,q) and Msm(2)'(x,q) -C KC=3 for computing both the first -C and second kinds -C m --- Order of Mathieu functions -C q --- Parameter of Mathieu functions ( q ≥ 0 ) -C x --- Argument of Mathieu functions -C Output: F1R --- Mcm(1)(x,q) or Msm(1)(x,q) -C D1R --- Derivative of Mcm(1)(x,q) or Msm(1)(x,q) -C F2R --- Mcm(2)(x,q) or Msm(2)(x,q) -C D2R --- Derivative of Mcm(2)(x,q) or Msm(2)(x,q) -C Routines called: -C (1) CVA2 for computing the characteristic values -C (2) FCOEF for computing expansion coefficients -C (3) JYNB for computing Jn(x), Yn(x) and their -C derivatives -C ============================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION FG(251),BJ1(0:251),DJ1(0:251),BJ2(0:251),DJ2(0:251), - & BY1(0:251),DY1(0:251),BY2(0:251),DY2(0:251) - EPS=1.0D-14 - IF (KF.EQ.1.AND.M.EQ.2*INT(M/2)) KD=1 - IF (KF.EQ.1.AND.M.NE.2*INT(M/2)) KD=2 - IF (KF.EQ.2.AND.M.NE.2*INT(M/2)) KD=3 - IF (KF.EQ.2.AND.M.EQ.2*INT(M/2)) KD=4 - CALL CVA2(KD,M,Q,A) - IF (Q.LE.1.0D0) THEN - QM=7.5+56.1*SQRT(Q)-134.7*Q+90.7*SQRT(Q)*Q - ELSE - QM=17.0+3.1*SQRT(Q)-.126*Q+.0037*SQRT(Q)*Q - ENDIF - KM=INT(QM+0.5*M) - CALL FCOEF(KD,M,Q,A,FG) - IC=INT(M/2)+1 - IF (KD.EQ.4) IC=M/2 - C1=DEXP(-X) - C2=DEXP(X) - U1=DSQRT(Q)*C1 - U2=DSQRT(Q)*C2 - CALL JYNB(KM,U1,NM,BJ1,DJ1,BY1,DY1) - CALL JYNB(KM,U2,NM,BJ2,DJ2,BY2,DY2) - W1=0.0D0 - W2=0.0D0 - IF (KC.EQ.2) GO TO 50 - F1R=0.0D0 - DO 30 K=1,KM - IF (KD.EQ.1) THEN - F1R=F1R+(-1)**(IC+K)*FG(K)*BJ1(K-1)*BJ2(K-1) - ELSE IF (KD.EQ.2.OR.KD.EQ.3) THEN - F1R=F1R+(-1)**(IC+K)*FG(K)*(BJ1(K-1)*BJ2(K) - & +(-1)**KD*BJ1(K)*BJ2(K-1)) - ELSE - F1R=F1R+(-1)**(IC+K)*FG(K)*(BJ1(K-1)*BJ2(K+1) - & -BJ1(K+1)*BJ2(K-1)) - ENDIF - IF (K.GE.5.AND.DABS(F1R-W1).LT.DABS(F1R)*EPS) GO TO 35 -30 W1=F1R -35 F1R=F1R/FG(1) - D1R=0.0D0 - DO 40 K=1,KM - IF (KD.EQ.1) THEN - D1R=D1R+(-1)**(IC+K)*FG(K)*(C2*BJ1(K-1)*DJ2(K-1) - & -C1*DJ1(K-1)*BJ2(K-1)) - ELSE IF (KD.EQ.2.OR.KD.EQ.3) THEN - D1R=D1R+(-1)**(IC+K)*FG(K)*(C2*(BJ1(K-1)*DJ2(K) - & +(-1)**KD*BJ1(K)*DJ2(K-1))-C1*(DJ1(K-1)*BJ2(K) - & +(-1)**KD*DJ1(K)*BJ2(K-1))) - ELSE - D1R=D1R+(-1)**(IC+K)*FG(K)*(C2*(BJ1(K-1)*DJ2(K+1) - & -BJ1(K+1)*DJ2(K-1))-C1*(DJ1(K-1)*BJ2(K+1) - & -DJ1(K+1)*BJ2(K-1))) - ENDIF - IF (K.GE.5.AND.DABS(D1R-W2).LT.DABS(D1R)*EPS) GO TO 45 -40 W2=D1R -45 D1R=D1R*DSQRT(Q)/FG(1) - IF (KC.EQ.1) RETURN -50 F2R=0.0D0 - DO 55 K=1,KM - IF (KD.EQ.1) THEN - F2R=F2R+(-1)**(IC+K)*FG(K)*BJ1(K-1)*BY2(K-1) - ELSE IF (KD.EQ.2.OR.KD.EQ.3) THEN - F2R=F2R+(-1)**(IC+K)*FG(K)*(BJ1(K-1)*BY2(K) - & +(-1)**KD*BJ1(K)*BY2(K-1)) - ELSE - F2R=F2R+(-1)**(IC+K)*FG(K)*(BJ1(K-1)*BY2(K+1) - & -BJ1(K+1)*BY2(K-1)) - ENDIF - IF (K.GE.5.AND.DABS(F2R-W1).LT.DABS(F2R)*EPS) GO TO 60 -55 W1=F2R -60 F2R=F2R/FG(1) - D2R=0.0D0 - DO 65 K=1,KM - IF (KD.EQ.1) THEN - D2R=D2R+(-1)**(IC+K)*FG(K)*(C2*BJ1(K-1)*DY2(K-1) - & -C1*DJ1(K-1)*BY2(K-1)) - ELSE IF (KD.EQ.2.OR.KD.EQ.3) THEN - D2R=D2R+(-1)**(IC+K)*FG(K)*(C2*(BJ1(K-1)*DY2(K) - & +(-1)**KD*BJ1(K)*DY2(K-1))-C1*(DJ1(K-1)*BY2(K) - & +(-1)**KD*DJ1(K)*BY2(K-1))) - ELSE - D2R=D2R+(-1)**(IC+K)*FG(K)*(C2*(BJ1(K-1)*DY2(K+1) - & -BJ1(K+1)*DY2(K-1))-C1*(DJ1(K-1)*BY2(K+1) - & -DJ1(K+1)*BY2(K-1))) - ENDIF - IF (K.GE.5.AND.DABS(D2R-W2).LT.DABS(D2R)*EPS) GO TO 70 -65 W2=D2R -70 D2R=D2R*DSQRT(Q)/FG(1) - RETURN - END - - - -C ********************************** - - SUBROUTINE CIK01(Z,CBI0,CDI0,CBI1,CDI1,CBK0,CDK0,CBK1,CDK1) -C -C ========================================================== -C Purpose: Compute modified Bessel functions I0(z), I1(z), -C K0(z), K1(z), and their derivatives for a -C complex argument -C Input : z --- Complex argument -C Output: CBI0 --- I0(z) -C CDI0 --- I0'(z) -C CBI1 --- I1(z) -C CDI1 --- I1'(z) -C CBK0 --- K0(z) -C CDK0 --- K0'(z) -C CBK1 --- K1(z) -C CDK1 --- K1'(z) -C ========================================================== -C - IMPLICIT DOUBLE PRECISION (A,B,D-H,O-Y) - IMPLICIT COMPLEX*16 (C,Z) - DIMENSION A(12),B(12),A1(10) - PI=3.141592653589793D0 - CI=(0.0D0,1.0D0) - A0=CDABS(Z) - Z2=Z*Z - Z1=Z - IF (A0.EQ.0.0D0) THEN - CBI0=(1.0D0,0.0D0) - CBI1=(0.0D0,0.0D0) - CDI0=(0.0D0,0.0D0) - CDI1=(0.5D0,0.0D0) - CBK0=(1.0D+300,0.0D0) - CBK1=(1.0D+300,0.0D0) - CDK0=-(1.0D+300,0.0D0) - CDK1=-(1.0D+300,0.0D0) - RETURN - ENDIF - IF (DBLE(Z).LT.0.0) Z1=-Z - IF (A0.LE.18.0) THEN - CBI0=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 10 K=1,50 - CR=0.25D0*CR*Z2/(K*K) - CBI0=CBI0+CR - IF (CDABS(CR/CBI0).LT.1.0D-15) GO TO 15 -10 CONTINUE -15 CBI1=(1.0D0,0.0D0) - CR=(1.0D0,0.0D0) - DO 20 K=1,50 - CR=0.25D0*CR*Z2/(K*(K+1)) - CBI1=CBI1+CR - IF (CDABS(CR/CBI1).LT.1.0D-15) GO TO 25 -20 CONTINUE -25 CBI1=0.5D0*Z1*CBI1 - ELSE - DATA A/0.125D0,7.03125D-2, - & 7.32421875D-2,1.1215209960938D-1, - & 2.2710800170898D-1,5.7250142097473D-1, - & 1.7277275025845D0,6.0740420012735D0, - & 2.4380529699556D01,1.1001714026925D02, - & 5.5133589612202D02,3.0380905109224D03/ - DATA B/-0.375D0,-1.171875D-1, - & -1.025390625D-1,-1.4419555664063D-1, - & -2.7757644653320D-1,-6.7659258842468D-1, - & -1.9935317337513D0,-6.8839142681099D0, - & -2.7248827311269D01,-1.2159789187654D02, - & -6.0384407670507D02,-3.3022722944809D03/ - K0=12 - IF (A0.GE.35.0) K0=9 - IF (A0.GE.50.0) K0=7 - CA=CDEXP(Z1)/CDSQRT(2.0D0*PI*Z1) - CBI0=(1.0D0,0.0D0) - ZR=1.0D0/Z1 - DO 30 K=1,K0 -30 CBI0=CBI0+A(K)*ZR**K - CBI0=CA*CBI0 - CBI1=(1.0D0,0.0D0) - DO 35 K=1,K0 -35 CBI1=CBI1+B(K)*ZR**K - CBI1=CA*CBI1 - ENDIF - IF (A0.LE.9.0) THEN - CS=(0.0D0,0.0D0) - CT=-CDLOG(0.5D0*Z1)-0.5772156649015329D0 - W0=0.0D0 - CR=(1.0D0,0.0D0) - DO 40 K=1,50 - W0=W0+1.0D0/K - CR=0.25D0*CR/(K*K)*Z2 - CS=CS+CR*(W0+CT) - IF (CDABS((CS-CW)/CS).LT.1.0D-15) GO TO 45 -40 CW=CS -45 CBK0=CT+CS - ELSE - DATA A1/0.125D0,0.2109375D0, - & 1.0986328125D0,1.1775970458984D01, - & 2.1461706161499D02,5.9511522710323D03, - & 2.3347645606175D05,1.2312234987631D07, - & 8.401390346421D08,7.2031420482627D10/ - CB=0.5D0/Z1 - ZR2=1.0D0/Z2 - CBK0=(1.0D0,0.0D0) - DO 50 K=1,10 -50 CBK0=CBK0+A1(K)*ZR2**K - CBK0=CB*CBK0/CBI0 - ENDIF - CBK1=(1.0D0/Z1-CBI1*CBK0)/CBI0 - IF (DBLE(Z).LT.0.0) THEN - IF (DIMAG(Z).LT.0.0) CBK0=CBK0+CI*PI*CBI0 - IF (DIMAG(Z).GT.0.0) CBK0=CBK0-CI*PI*CBI0 - IF (DIMAG(Z).LT.0.0) CBK1=-CBK1+CI*PI*CBI1 - IF (DIMAG(Z).GT.0.0) CBK1=-CBK1-CI*PI*CBI1 - CBI1=-CBI1 - ENDIF - CDI0=CBI1 - CDI1=CBI0-1.0D0/Z*CBI1 - CDK0=-CBK1 - CDK1=-CBK0-1.0D0/Z*CBK1 - RETURN - END - -C ********************************** - - SUBROUTINE CPSI(X,Y,PSR,PSI) -C -C ============================================= -C Purpose: Compute the psi function for a -C complex argument -C Input : x --- Real part of z -C y --- Imaginary part of z -C Output: PSR --- Real part of psi(z) -C PSI --- Imaginary part of psi(z) -C ============================================= -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION A(8) - DATA A/-.8333333333333D-01,.83333333333333333D-02, - & -.39682539682539683D-02,.41666666666666667D-02, - & -.75757575757575758D-02,.21092796092796093D-01, - & -.83333333333333333D-01,.4432598039215686D0/ - PI=3.141592653589793D0 - IF (Y.EQ.0.0D0.AND.X.EQ.INT(X).AND.X.LE.0.0D0) THEN - PSR=1.0D+300 - PSI=0.0D0 - ELSE - X1=X - Y1=Y - IF (X.LT.0.0D0) THEN - X=-X - Y=-Y - ENDIF - X0=X - N=0 - IF (X.LT.8.0D0) THEN - N=8-INT(X) - X0=X+N - ENDIF - TH=0.0D0 - IF (X0.EQ.0.0D0.AND.Y.NE.0.0D0) TH=0.5D0*PI - IF (X0.NE.0.0D0) TH=DATAN(Y/X0) - Z2=X0*X0+Y*Y - Z0=DSQRT(Z2) - PSR=DLOG(Z0)-0.5D0*X0/Z2 - PSI=TH+0.5D0*Y/Z2 - DO 10 K=1,8 - PSR=PSR+A(K)*Z2**(-K)*DCOS(2.0D0*K*TH) -10 PSI=PSI-A(K)*Z2**(-K)*DSIN(2.0D0*K*TH) - IF (X.LT.8.0D0) THEN - RR=0.0D0 - RI=0.0D0 - DO 20 K=1,N - RR=RR+(X0-K)/((X0-K)**2.0D0+Y*Y) -20 RI=RI+Y/((X0-K)**2.0D0+Y*Y) - PSR=PSR-RR - PSI=PSI+RI - ENDIF - IF (X1.LT.0.0D0) THEN - TN=DTAN(PI*X) - TM=DTANH(PI*Y) - CT2=TN*TN+TM*TM - PSR=PSR+X/(X*X+Y*Y)+PI*(TN-TN*TM*TM)/CT2 - PSI=PSI-Y/(X*X+Y*Y)-PI*TM*(1.0D0+TN*TN)/CT2 - X=X1 - Y=Y1 - ENDIF - ENDIF - RETURN - END - -C ********************************** - - SUBROUTINE SPHY(N,X,NM,SY,DY) -C -C ====================================================== -C Purpose: Compute spherical Bessel functions yn(x) and -C their derivatives -C Input : x --- Argument of yn(x) ( x ≥ 0 ) -C n --- Order of yn(x) ( n = 0,1,… ) -C Output: SY(n) --- yn(x) -C DY(n) --- yn'(x) -C NM --- Highest order computed -C ====================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION SY(0:N),DY(0:N) - NM=N - IF (X.LT.1.0D-60) THEN - DO 10 K=0,N - SY(K)=-1.0D+300 -10 DY(K)=1.0D+300 - RETURN - ENDIF - SY(0)=-DCOS(X)/X - F0=SY(0) - DY(0)=(DSIN(X)+DCOS(X)/X)/X - IF (N.LT.1) THEN - RETURN - ENDIF - SY(1)=(SY(0)-DSIN(X))/X - F1=SY(1) - DO 15 K=2,N - F=(2.0D0*K-1.0D0)*F1/X-F0 - SY(K)=F - IF (DABS(F).GE.1.0D+300) GO TO 20 - F0=F1 -15 F1=F -20 NM=K-1 - DO 25 K=1,NM -25 DY(K)=SY(K-1)-(K+1.0D0)*SY(K)/X - RETURN - END - - - -C ********************************** - - SUBROUTINE JELP(U,HK,ESN,ECN,EDN,EPH) -C -C ======================================================== -C Purpose: Compute Jacobian elliptic functions sn u, cn u -C and dn u -C Input : u --- Argument of Jacobian elliptic fuctions -C Hk --- Modulus k ( 0 ≤ k ≤ 1 ) -C Output : ESN --- sn u -C ECN --- cn u -C EDN --- dn u -C EPH --- phi ( in degrees ) -C ======================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - DIMENSION R(40) - PI=3.14159265358979D0 - A0=1.0D0 - B0=DSQRT(1.0D0-HK*HK) - DO 10 N=1,40 - A=(A0+B0)/2.0D0 - B=DSQRT(A0*B0) - C=(A0-B0)/2.0D0 - R(N)=C/A - IF (C.LT.1.0D-7) GO TO 15 - A0=A -10 B0=B -15 DN=2.0D0**N*A*U - D=0.0D0 - DO 20 J=N,1,-1 - T=R(J)*DSIN(DN) - SA=DATAN(T/DSQRT(DABS(1.0D0-T*T))) - D=.5D0*(DN+SA) -20 DN=D - EPH=D*180.0D0/PI - ESN=DSIN(D) - ECN=DCOS(D) - EDN=DSQRT(1.0D0-HK*HK*ESN*ESN) - RETURN - END - -C ********************************** - - SUBROUTINE STVHV(V,X,HV) -C -C ===================================================== -C Purpose: Compute Struve function Hv(x) with an -C arbitrary order v -C Input : v --- Order of Hv(x) ( -8.0 ≤ v ≤ 12.5 ) -C x --- Argument of Hv(x) ( x ≥ 0 ) -C Output: HV --- Hv(x) -C Note: numerically unstable away from the above range for `v` -C Routine called: GAMMA2 to compute the gamma function -C ===================================================== -C - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - PI=3.141592653589793D0 - IF (X.EQ.0.0D0) THEN - IF (V.GT.-1.0.OR.INT(V)-V.EQ.0.5D0) THEN - HV=0.0D0 - ELSE IF (V.LT.-1.0D0) THEN - HV=(-1)**(INT(0.5D0-V)-1)*1.0D+300 - ELSE IF (V.EQ.-1.0D0) THEN - HV=2.0D0/PI - ENDIF - RETURN - ENDIF - BYV=0.0D0 - BF=0.0D0 - QU0=0.0D0 - PU0=0.0D0 - IF (X.LE.20.0D0) THEN -C Power series for Hv (Abramowitz & Stegun 12.1.3) - V0=V+1.5D0 - CALL GAMMA2(V0,GA) - S=2.0D0/(DSQRT(PI)*GA) - R1=1.0D0 - DO 10 K=1,100 - VA=K+1.5D0 - CALL GAMMA2(VA,GA) - VB=V+K+1.5D0 - CALL GAMMA2(VB,GB) - R1=-R1*(0.5D0*X)**2 - R2=R1/(GA*GB) - S=S+R2 - IF (DABS(R2).LT.DABS(S)*1.0D-12) GO TO 15 -10 CONTINUE -15 HV=(0.5D0*X)**(V+1.0D0)*S - ELSE -C Asymptotic large |z| expansion for Hv - Yv (Abm & Stg 12.1.29) - SA=(0.5D0*X)**(V-1.0)/PI - V0=V+0.5D0 - CALL GAMMA2(V0,GA) - S=DSQRT(PI)/GA - R1=1.0D0 - DO 20 K=1,12 - VA=K+0.5D0 - CALL GAMMA2(VA,GA) - VB=-K+V+0.5D0 - CALL GAMMA2(VB,GB) - R1=R1/(0.5D0*X)**2 - S=S+R1*GA/GB -20 CONTINUE - S0=SA*S - -C Compute Y_(|v|-N) (Abm & Stg 9.2.6) - U=DABS(V) - N=INT(U) - U0=U-N - DO 35 L=0,1 - VT=4.0D0*(U0+L)**2 - R1=1.0D0 - PU1=1.0D0 - DO 25 K=1,12 - R1=-0.0078125D0*R1*(VT-(4.0*K-3.0D0)**2)* - & (VT-(4.0D0*K-1.0)**2)/((2.0D0*K-1.0)*K*X*X) - PU1=PU1+R1 -25 CONTINUE - QU1=1.0D0 - R2=1.0D0 - DO 30 K=1,12 - R2=-0.0078125D0*R2*(VT-(4.0D0*K-1.0)**2)* - & (VT-(4.0D0*K+1.0)**2)/((2.0D0*K+1.0)*K*X*X) - QU1=QU1+R2 -30 CONTINUE - QU1=0.125D0*(VT-1.0D0)/X*QU1 - IF (L.EQ.0) THEN - PU0=PU1 - QU0=QU1 - ENDIF -35 CONTINUE - T0=X-(0.5*U0+0.25D0)*PI - T1=X-(0.5*U0+0.75D0)*PI - SR=DSQRT(2.0D0/(PI*X)) - BY0=SR*(PU0*DSIN(T0)+QU0*DCOS(T0)) - BY1=SR*(PU1*DSIN(T1)+QU1*DCOS(T1)) - -C Compute Y_|v| (Abm & Stg 9.1.27) - BF0=BY0 - BF1=BY1 - DO 40 K=2,N - BF=2.0D0*(K-1.0+U0)/X*BF1-BF0 - BF0=BF1 -40 BF1=BF - IF (N.EQ.0) BYV=BY0 - IF (N.EQ.1) BYV=BY1 - IF (N.GT.1) BYV=BF - -C Compute Y_v (handle the case v < 0 appropriately) - IF (V .LT. 0) THEN - IF (U0 .EQ. 0) THEN -C Use symmetry (Abm & Stg 9.1.5) - BYV=(-1)**N*BYV - ELSE -C Use relation between Yv & Jv (Abm & Stg 9.1.6) - -C Compute J_(|v|-N) (Abm & Stg 9.2.5) - BJ0=SR*(PU0*DCOS(T0)-QU0*DSIN(T0)) - BJ1=SR*(PU1*DCOS(T1)-QU1*DSIN(T1)) -C Forward recurrence for J_|v| (Abm & Stg 9.1.27) -C It's OK for the limited range -8.0 ≤ v ≤ 12.5, -C since x >= 20 here; but would be unstable for v <~ -20 - BF0=BJ0 - BF1=BJ1 - DO 50 K=2,N - BF=2.0D0*(K-1.0+U0)/X*BF1-BF0 - BF0=BF1 -50 BF1=BF - IF (N.EQ.0) BJV=BJ0 - IF (N.EQ.1) BJV=BJ1 - IF (N.GT.1) BJV=BF - -C Compute Y_v (Abm & Stg 9.1.6) - BYV = DCOS(V*PI)*BYV + DSIN(-V*PI)*BJV - END IF - END IF - -C Compute H_v - HV=BYV+S0 - ENDIF - RETURN - END - - - -C ********************************** diff --git a/scipy-0.10.1/scipy/special/specfun_wrappers.c b/scipy-0.10.1/scipy/special/specfun_wrappers.c deleted file mode 100644 index d922f53176..0000000000 --- a/scipy-0.10.1/scipy/special/specfun_wrappers.c +++ /dev/null @@ -1,998 +0,0 @@ -/* This file is a collection of wrappers around the - * Specfun Fortran library of functions - */ - -#include "specfun_wrappers.h" -#include - -#define CADDR(z) (double *)(&((z).real)), (double*)(&((z).imag)) -#define F2C_CST(z) (double *)&((z)->real), (double *)&((z)->imag) - -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif - -extern double cephes_psi(double); -extern double cephes_struve(double, double); - -extern void F_FUNC(cgama,CGAMA)(double*,double*,int*,double*,double*); -extern void F_FUNC(cpsi,CPSI)(double*,double*,double*,double*); -extern void F_FUNC(hygfz,HYGFZ)(double*,double*,double*,Py_complex*,Py_complex*); -extern void F_FUNC(cchg,CCHG)(double*,double*,Py_complex*,Py_complex*); -extern void F_FUNC(chgm,CHGM)(double*,double*,double*,double*); -extern void F_FUNC(chgu,CHGU)(double*,double*,double*,double*,int*); -extern void F_FUNC(itairy,ITAIRY)(double*,double*,double*,double*,double*); -extern void F_FUNC(e1xb,E1XB)(double*,double*); -extern void F_FUNC(e1z,E1Z)(Py_complex*,Py_complex*); -extern void F_FUNC(eix,EIX)(double*,double*); -extern void F_FUNC(cerror,CERROR)(Py_complex*,Py_complex*); -extern void F_FUNC(stvh0,STVH0)(double*,double*); -extern void F_FUNC(stvh1,STVH1)(double*,double*); -extern void F_FUNC(stvhv,STVHV)(double*,double*,double*); -extern void F_FUNC(stvl0,STVL0)(double*,double*); -extern void F_FUNC(stvl1,STVL1)(double*,double*); -extern void F_FUNC(stvlv,STVLV)(double*,double*,double*); -extern void F_FUNC(itsh0,ITSH0)(double*,double*); -extern void F_FUNC(itth0,ITTH0)(double*,double*); -extern void F_FUNC(itsl0,ITSL0)(double*,double*); -extern void F_FUNC(klvna,KLVNA)(double*,double*,double*,double*,double*,double*,double*,double*,double*); -extern void F_FUNC(itjya,ITJYA)(double*,double*,double*); -extern void F_FUNC(ittjya,ITTJYA)(double*,double*,double*); -extern void F_FUNC(itika,ITIKA)(double*,double*,double*); -extern void F_FUNC(ittika,ITTIKA)(double*,double*,double*); -extern void F_FUNC(cfc,CFC)(Py_complex*,Py_complex*,Py_complex*); -extern void F_FUNC(cfs,CFS)(Py_complex*,Py_complex*,Py_complex*); -extern void F_FUNC(cva2,CVA2)(int*,int*,double*,double*); -extern void F_FUNC(mtu0,MTU0)(int*,int*,double*,double*,double*,double*); -extern void F_FUNC(mtu12,MTU12)(int*,int*,int*,double*,double*,double*,double*,double*,double*); -extern void F_FUNC(lpmv,LPMV)(double*,int*,double*,double*); -extern void F_FUNC(pbwa,PBWA)(double*,double*,double*,double*,double*,double*); -extern void F_FUNC(pbdv,PBDV)(double*,double*,double*,double*,double*,double*); -extern void F_FUNC(pbvv,PBVV)(double*,double*,double*,double*,double*,double*); -extern void F_FUNC(segv,SEGV)(int*,int*,double*,int*,double*,double*); -extern void F_FUNC(aswfa,ASWFA)(int*,int*,double*,double*,int*,double*,double*,double*); -extern void F_FUNC(rswfp,RSWFP)(int*,int*,double*,double*,double*,int*,double*,double*,double*,double*); -extern void F_FUNC(rswfo,RSWFO)(int*,int*,double*,double*,double*,int*,double*,double*,double*,double*); -extern void F_FUNC(ffk,FFK)(int*,double*,double*,double*,double*,double*,double*,double*,double*,double*); - - -/* This must be linked with fortran - */ - -Py_complex cgamma_wrap( Py_complex z) { - int kf = 1; - Py_complex cy; - - F_FUNC(cgama,CGAMA)(CADDR(z), &kf, CADDR(cy)); - return cy; -} - -Py_complex clngamma_wrap( Py_complex z) { - int kf = 0; - Py_complex cy; - - F_FUNC(cgama,CGAMA)(CADDR(z), &kf, CADDR(cy)); - return cy; -} - -Py_complex cpsi_wrap( Py_complex z) { - Py_complex cy; - - if (IMAG(z)==0.0) { - REAL(cy) = cephes_psi(REAL(z)); - IMAG(cy) = 0.0; - } - else { - F_FUNC(cpsi,CPSI)(CADDR(z), CADDR(cy)); - } - return cy; -} - -Py_complex crgamma_wrap( Py_complex z) { - int kf = 1; - Py_complex cy; - Py_complex cy2; - double magsq; - - F_FUNC(cgama,CGAMA)(CADDR(z), &kf, CADDR(cy)); - magsq = ABSQ(cy); - REAL(cy2) = REAL(cy) / magsq; - IMAG(cy2) = -IMAG(cy) / magsq; - return cy2; -} - -Py_complex chyp2f1_wrap( double a, double b, double c, Py_complex z) { - Py_complex outz; - int l1, l0; - - - l0 = ((c == floor(c)) && (c < 0)); - l1 = ((fabs(1-REAL(z)) < 1e-15) && (IMAG(z) == 0) && (c-a-b <= 0)); - if (l0 || l1) { - REAL(outz) = NPY_INFINITY; - IMAG(outz) = 0.0; - return outz; - } - F_FUNC(hygfz, HYGFZ)(&a, &b, &c, &z, &outz); - return outz; -} - -Py_complex chyp1f1_wrap(double a, double b, Py_complex z) { - Py_complex outz; - - F_FUNC(cchg,CCHG)(&a, &b, &z, &outz); - if (REAL(outz) == 1e300) { - REAL(outz) = NPY_INFINITY; - } - return outz; -} - - -double hypU_wrap(double a, double b, double x) { - double out; - int md; /* method code --- not returned */ - - F_FUNC(chgu,CHGU)(&a, &b, &x, &out, &md); - if (out == 1e300) out = NPY_INFINITY; - return out; - -} - -double hyp1f1_wrap(double a, double b, double x) { - double outy; - - F_FUNC(chgm,CHGM)(&a, &b, &x, &outy); - if (outy == 1e300) { - outy = NPY_INFINITY; - } - return outy; -} - -int itairy_wrap(double x, double *apt, double *bpt, double *ant, double *bnt) { - double tmp; - int flag = 0; - - if (x < 0) { - x = -x; - flag = 1; - } - F_FUNC(itairy,ITAIRY)(&x, apt, bpt, ant, bnt); - if (flag) { /* negative limit -- switch signs and roles */ - tmp = *apt; - *apt = -*ant; - *ant = -tmp; - tmp = *bpt; - *bpt = -*bnt; - *bnt = -tmp; - } - return 0; -} - - -double exp1_wrap(double x) { - double out; - - F_FUNC(e1xb,E1XB)(&x, &out); - CONVINF(out); - return out; -} - -Py_complex cexp1_wrap(Py_complex z) { - Py_complex outz; - - F_FUNC(e1z,E1Z)(&z, &outz); - ZCONVINF(outz); - return outz; -} - -double expi_wrap(double x) { - double out; - - F_FUNC(eix,EIX)(&x, &out); - CONVINF(out); - return out; -} - -Py_complex cexpi_wrap(Py_complex z) { - Py_complex outz; - - F_FUNC(eixz,EIXZ)(&z, &outz); - ZCONVINF(outz); - return outz; -} - -Py_complex cerf_wrap(Py_complex z) { - Py_complex outz; - - F_FUNC(cerror,CERROR)(&z, &outz); - return outz; -} - -double struve_wrap(double v, double x) { - double out; - double rem; - int flag=0; - - if (x < 0) { - rem = fmod(v, 2.0); - if (rem == 0) { - x = -x; - flag = 1; - } else if (rem == 1 || rem == -1) { - x = -x; - flag = 0; - } else { - /* non-integer v and x < 0 => complex-valued */ - return NPY_NAN; - } - } - - if ((v<-8.0) || (v>12.5)) { - out = cephes_struve(v, x); /* from cephes */ - } - else if (v==0.0) { - F_FUNC(stvh0,STVH0)(&x,&out); - CONVINF(out); - } - else if (v==1.0) { - F_FUNC(stvh1,STVH1)(&x,&out); - CONVINF(out); - } - else { - F_FUNC(stvhv,STVHV)(&v,&x,&out); - CONVINF(out); - } - if (flag) out = -out; - return out; -} - -double modstruve_wrap(double v, double x) { - double out; - int flag=0; - - if ((x < 0) & (floor(v)!=v)) return NPY_NAN; - if (v==0.0) { - if (x < 0) {x = -x; flag=1;} - F_FUNC(stvl0,STVL0)(&x,&out); - CONVINF(out); - if (flag) out = -out; - return out; - } - if (v==1.0) { - if (x < 0) x=-x; - F_FUNC(stvl1,STVL1)(&x,&out); - CONVINF(out); - return out; - } - if (x<0) { - x = -x; - flag = 1; - } - F_FUNC(stvlv,STVLV)(&v,&x,&out); - CONVINF(out); - if (flag && (!((int)floor(v) % 2))) out = -out; - return out; -} - -double itstruve0_wrap(double x) { - double out; - - if (x<0) x=-x; - F_FUNC(itsh0,ITSH0)(&x,&out); - CONVINF(out); - return out; -} - -double it2struve0_wrap(double x) { - double out; - int flag=0; - - if (x<0) {x=-x; flag=1;} - F_FUNC(itth0,ITTH0)(&x,&out); - CONVINF(out); - if (flag) { - out = PI - out; - } - return out; -} - -double itmodstruve0_wrap(double x) { - double out; - - if (x<0) x=-x; - F_FUNC(itsl0,ITSL0)(&x,&out); - CONVINF(out); - return out; -} - - -double ber_wrap(double x) -{ - Py_complex Be, Ke, Bep, Kep; - - if (x<0) x=-x; - F_FUNC(klvna,KLVNA)(&x, CADDR(Be), CADDR(Ke), CADDR(Bep), CADDR(Kep)); - ZCONVINF(Be); - return REAL(Be); -} - -double bei_wrap(double x) -{ - Py_complex Be, Ke, Bep, Kep; - - if (x<0) x=-x; - F_FUNC(klvna,KLVNA)(&x, CADDR(Be), CADDR(Ke), CADDR(Bep), CADDR(Kep)); - ZCONVINF(Be); - return IMAG(Be); -} - -double ker_wrap(double x) -{ - Py_complex Be, Ke, Bep, Kep; - - if (x<0) return NPY_NAN; - F_FUNC(klvna,KLVNA)(&x, CADDR(Be), CADDR(Ke), CADDR(Bep), CADDR(Kep)); - ZCONVINF(Ke); - return REAL(Ke); -} - -double kei_wrap(double x) -{ - Py_complex Be, Ke, Bep, Kep; - - if (x<0) return NPY_NAN; - F_FUNC(klvna,KLVNA)(&x, CADDR(Be), CADDR(Ke), CADDR(Bep), CADDR(Kep)); - ZCONVINF(Ke); - return IMAG(Ke); -} - -double berp_wrap(double x) -{ - Py_complex Be, Ke, Bep, Kep; - int flag = 0; - - if (x<0) {x=-x; flag=1;} - F_FUNC(klvna,KLVNA)(&x, CADDR(Be), CADDR(Ke), CADDR(Bep), CADDR(Kep)); - ZCONVINF(Bep); - if (flag) return -REAL(Bep); - return REAL(Bep); -} - -double beip_wrap(double x) -{ - Py_complex Be, Ke, Bep, Kep; - int flag = 0; - - if (x<0) {x=-x; flag=1;} - F_FUNC(klvna,KLVNA)(&x, CADDR(Be), CADDR(Ke), CADDR(Bep), CADDR(Kep)); - ZCONVINF(Bep); - if (flag) return -IMAG(Bep); - return IMAG(Bep); -} - -double kerp_wrap(double x) -{ - Py_complex Be, Ke, Bep, Kep; - - if (x<0) return NPY_NAN; - F_FUNC(klvna,KLVNA)(&x, CADDR(Be), CADDR(Ke), CADDR(Bep), CADDR(Kep)); - ZCONVINF(Kep); - return REAL(Kep); -} - -double keip_wrap(double x) -{ - Py_complex Be, Ke, Bep, Kep; - - if (x<0) return NPY_NAN; - F_FUNC(klvna,KLVNA)(&x, CADDR(Be), CADDR(Ke), CADDR(Bep), CADDR(Kep)); - ZCONVINF(Kep); - return IMAG(Kep); -} - - -int kelvin_wrap(double x, Py_complex *Be, Py_complex *Ke, Py_complex *Bep, Py_complex *Kep) { - int flag = 0; - - if (x<0) {x=-x; flag=1;} - F_FUNC(klvna,KLVNA)(&x, F2C_CST(Be), F2C_CST(Ke), F2C_CST(Bep), F2C_CST(Kep)); - ZCONVINF(*Be); - ZCONVINF(*Ke); - ZCONVINF(*Bep); - ZCONVINF(*Kep); - if (flag) { - REAL(*Bep) = -REAL(*Bep); - IMAG(*Bep) = -IMAG(*Bep); - REAL(*Ke) = NPY_NAN; - IMAG(*Ke) = NPY_NAN; - REAL(*Kep) = NPY_NAN; - IMAG(*Kep) = NPY_NAN; - } - return 0; -} - -/* Integrals of bessel functions */ - -/* int(j0(t),t=0..x) */ -/* int(y0(t),t=0..x) */ - -int it1j0y0_wrap(double x, double *j0int, double *y0int) -{ - int flag = 0; - - if (x < 0) {x = -x; flag=1;} - F_FUNC(itjya, ITJYA)(&x, j0int, y0int); - if (flag) { - *j0int = -(*j0int); - *y0int = NPY_NAN; /* domain error */ - } - return 0; -} - -/* int((1-j0(t))/t,t=0..x) */ -/* int(y0(t)/t,t=x..inf) */ - -int it2j0y0_wrap(double x, double *j0int, double *y0int) -{ - int flag = 0; - - if (x < 0) {x=-x; flag=1;} - F_FUNC(ittjya, ITTJYA)(&x, j0int, y0int); - if (flag) { - *y0int = NPY_NAN; /* domain error */ - } - return 0; -} - -/* Integrals of modified bessel functions */ - -int it1i0k0_wrap(double x, double *i0int, double *k0int) -{ - int flag = 0; - - if (x < 0) {x = -x; flag=1;} - F_FUNC(itika, ITIKA)(&x, i0int, k0int); - if (flag) { - *i0int = -(*i0int); - *k0int = NPY_NAN; /* domain error */ - } - return 0; -} - -int it2i0k0_wrap(double x, double *i0int, double *k0int) -{ - int flag = 0; - - if (x < 0) {x=-x; flag=1;} - F_FUNC(ittika, ITTIKA)(&x, i0int, k0int); - if (flag) { - *k0int = NPY_NAN; /* domain error */ - } - return 0; -} - - -/* Fresnel integrals of complex numbers */ - -int cfresnl_wrap(Py_complex z, Py_complex *zfs, Py_complex *zfc) -{ - Py_complex zfd; - F_FUNC(cfs,CFS)(&z,zfs,&zfd); - F_FUNC(cfc,CFC)(&z,zfc,&zfd); - return 0; -} - -/* Mathieu functions */ -/* Characteristic values */ -double cem_cva_wrap(double m, double q) { - int int_m, kd=1; - double out; - - if ((m < 0) || (m != floor(m))) - return NPY_NAN; - int_m = (int )m; - if (int_m % 2) kd=2; - F_FUNC(cva2,CVA2)(&kd, &int_m, &q, &out); - return out; -} - -double sem_cva_wrap(double m, double q) { - int int_m, kd=4; - double out; - - if ((m < 1) || (m != floor(m))) - return NPY_NAN; - int_m = (int )m; - if (int_m % 2) kd=3; - F_FUNC(cva2,CVA2)(&kd, &int_m, &q, &out); - return out; -} - -/* Mathieu functions */ -int cem_wrap(double m, double q, double x, double *csf, double *csd) -{ - int int_m, kf=1; - if ((m < 1) || (m != floor(m)) || (q<0)) { - *csf = NPY_NAN; - *csd = NPY_NAN; - } - int_m = (int )m; - F_FUNC(mtu0,MTU0)(&kf,&int_m, &q, &x, csf, csd); - return 0; -} - -int sem_wrap(double m, double q, double x, double *csf, double *csd) -{ - int int_m, kf=2; - if ((m < 1) || (m != floor(m)) || (q<0)) { - *csf = NPY_NAN; - *csd = NPY_NAN; - } - int_m = (int )m; - F_FUNC(mtu0,MTU0)(&kf,&int_m, &q, &x, csf, csd); - return 0; -} - - -int mcm1_wrap(double m, double q, double x, double *f1r, double *d1r) -{ - int int_m, kf=1, kc=1; - double f2r, d2r; - - if ((m < 1) || (m != floor(m)) || (q<0)) { - *f1r = NPY_NAN; - *d1r = NPY_NAN; - } - int_m = (int )m; - F_FUNC(mtu12,MTU12)(&kf,&kc,&int_m, &q, &x, f1r, d1r, &f2r, &d2r); - return 0; -} - -int msm1_wrap(double m, double q, double x, double *f1r, double *d1r) -{ - int int_m, kf=2, kc=1; - double f2r, d2r; - - if ((m < 1) || (m != floor(m)) || (q<0)) { - *f1r = NPY_NAN; - *d1r = NPY_NAN; - } - int_m = (int )m; - F_FUNC(mtu12,MTU12)(&kf,&kc,&int_m, &q, &x, f1r, d1r, &f2r, &d2r); - return 0; -} - -int mcm2_wrap(double m, double q, double x, double *f2r, double *d2r) -{ - int int_m, kf=1, kc=2; - double f1r, d1r; - - if ((m < 1) || (m != floor(m)) || (q<0)) { - *f2r = NPY_NAN; - *d2r = NPY_NAN; - } - int_m = (int )m; - F_FUNC(mtu12,MTU12)(&kf,&kc,&int_m, &q, &x, &f1r, &d1r, f2r, d2r); - return 0; -} - -int msm2_wrap(double m, double q, double x, double *f2r, double *d2r) -{ - int int_m, kf=2, kc=2; - double f1r, d1r; - - if ((m < 1) || (m != floor(m)) || (q<0)) { - *f2r = NPY_NAN; - *d2r = NPY_NAN; - } - int_m = (int )m; - F_FUNC(mtu12,MTU12)(&kf,&kc,&int_m, &q, &x, &f1r, &d1r, f2r, d2r); - return 0; -} - - -double pmv_wrap(double m, double v, double x){ - int int_m; - double out; - - if (m != floor(m)) return NPY_NAN; - int_m = (int ) m; - F_FUNC(lpmv,LPMV)(&v, &int_m, &x, &out); - CONVINF(out); - return out; -} - - -/* if x > 0 return w1f and w1d. - otherwise return w2f and w2d (after abs(x)) -*/ -int pbwa_wrap(double a, double x, double *wf, double *wd) { - int flag = 0; - double w1f, w1d, w2f, w2d; - - if (x < 0) {x=-x; flag=1;} - F_FUNC(pbwa,PBWA)(&a, &x, &w1f, &w1d, &w2f, &w2d); - if (flag) { - *wf = w2f; - *wd = w2d; - } - else { - *wf = w1f; - *wd = w1d; - } - return 0; -} - -int pbdv_wrap(double v, double x, double *pdf, double *pdd) { - - double *dv; - double *dp; - int num; - - /* NB. Indexing of DV/DP in specfun.f:PBDV starts from 0, hence +2 */ - num = ABS((int)v) + 2; - dv = (double *)PyMem_Malloc(sizeof(double)*2*num); - if (dv==NULL) { - printf("Warning: Memory allocation error.\n"); - *pdf = NPY_NAN; - *pdd = NPY_NAN; - return -1; - } - dp = dv + num; - F_FUNC(pbdv,PBDV)(&v, &x, dv, dp, pdf, pdd); - PyMem_Free(dv); - return 0; -} - -int pbvv_wrap(double v, double x, double *pvf, double *pvd) { - double *vv; - double *vp; - int num; - - /* NB. Indexing of DV/DP in specfun.f:PBVV starts from 0, hence +2 */ - num = ABS((int)v) + 2; - vv = (double *)PyMem_Malloc(sizeof(double)*2*num); - if (vv==NULL) { - printf("Warning: Memory allocation error.\n"); - *pvf = NPY_NAN; - *pvd = NPY_NAN; - return -1; - } - vp = vv + num; - F_FUNC(pbvv,PBVV)(&v, &x, vv, vp, pvf, pvd); - PyMem_Free(vv); - return 0; -} - -double prolate_segv_wrap(double m, double n, double c) -{ - int kd=1; - int int_m, int_n; - double cv, *eg; - - if ((m<0) || (n198)) { - return NPY_NAN; - } - int_m = (int) m; - int_n = (int) n; - eg = (double *)PyMem_Malloc(sizeof(double)*(n-m+2)); - if (eg==NULL) { - printf("Warning: Memory allocation error.\n"); - return NPY_NAN; - } - F_FUNC(segv,SEGV)(&int_m,&int_n,&c,&kd,&cv,eg); - PyMem_Free(eg); - return cv; -} - -double oblate_segv_wrap(double m, double n, double c) -{ - int kd=-1; - int int_m, int_n; - double cv, *eg; - - if ((m<0) || (n198)) { - return NPY_NAN; - } - int_m = (int) m; - int_n = (int) n; - eg = (double *)PyMem_Malloc(sizeof(double)*(n-m+2)); - if (eg==NULL) { - printf("Warning: Memory allocation error.\n"); - return NPY_NAN; - } - F_FUNC(segv,SEGV)(&int_m,&int_n,&c,&kd,&cv,eg); - PyMem_Free(eg); - return cv; -} - - -double prolate_aswfa_nocv_wrap(double m, double n, double c, double x, double *s1d) -{ - int kd = 1; - int int_m, int_n; - double cv, s1f, *eg; - - if ((x >=1) || (x <=-1) || (m<0) || (n198)) { - *s1d = NPY_NAN; - return NPY_NAN; - } - int_m = (int )m; - int_n = (int )n; - eg = (double *)PyMem_Malloc(sizeof(double)*(n-m+2)); - if (eg==NULL) { - printf("Warning: Memory allocation error.\n"); - *s1d = NPY_NAN; - return NPY_NAN; - } - F_FUNC(segv,SEGV)(&int_m,&int_n,&c,&kd,&cv,eg); - F_FUNC(aswfa,ASWFA)(&int_m,&int_n,&c,&x,&kd,&cv,&s1f,s1d); - PyMem_Free(eg); - return s1f; -} - - -double oblate_aswfa_nocv_wrap(double m, double n, double c, double x, double *s1d) -{ - int kd = -1; - int int_m, int_n; - double cv, s1f, *eg; - - if ((x >=1) || (x <=-1) || (m<0) || (n198)) { - *s1d = NPY_NAN; - return NPY_NAN; - } - int_m = (int )m; - int_n = (int )n; - eg = (double *)PyMem_Malloc(sizeof(double)*(n-m+2)); - if (eg==NULL) { - printf("Warning: Memory allocation error.\n"); - *s1d = NPY_NAN; - return NPY_NAN; - } - F_FUNC(segv,SEGV)(&int_m,&int_n,&c,&kd,&cv,eg); - F_FUNC(aswfa,ASWFA)(&int_m,&int_n,&c,&x,&kd,&cv,&s1f,s1d); - PyMem_Free(eg); - return s1f; -} - - -int prolate_aswfa_wrap(double m, double n, double c, double cv, double x, double *s1f, double *s1d) -{ - int kd = 1; - int int_m, int_n; - - if ((x >=1) || (x <=-1) || (m<0) || (n=1) || (x <=-1) || (m<0) || (n198)) { - *r1d = NPY_NAN; - return NPY_NAN; - } - int_m = (int )m; - int_n = (int )n; - eg = (double *)PyMem_Malloc(sizeof(double)*(n-m+2)); - if (eg==NULL) { - printf("Warning: Memory allocation error.\n"); - *r1d = NPY_NAN; - return NPY_NAN; - } - F_FUNC(segv,SEGV)(&int_m,&int_n,&c,&kd,&cv,eg); - F_FUNC(rswfp,RSWFP)(&int_m,&int_n,&c,&x,&cv,&kf,&r1f,r1d,&r2f,&r2d); - PyMem_Free(eg); - return r1f; -} - -double prolate_radial2_nocv_wrap(double m, double n, double c, double x, double *r2d) -{ - int kf=2, kd=1; - double r1f, r1d, r2f, cv, *eg; - int int_m, int_n; - - if ((x <=1.0) || (m<0) || (n198)) { - *r2d = NPY_NAN; - return NPY_NAN; - } - int_m = (int )m; - int_n = (int )n; - eg = (double *)PyMem_Malloc(sizeof(double)*(n-m+2)); - if (eg==NULL) { - printf("Warning: Memory allocation error.\n"); - *r2d = NPY_NAN; - return NPY_NAN; - } - F_FUNC(segv,SEGV)(&int_m,&int_n,&c,&kd,&cv,eg); - F_FUNC(rswfp,RSWFP)(&int_m,&int_n,&c,&x,&cv,&kf,&r1f,&r1d,&r2f,r2d); - PyMem_Free(eg); - return r2f; -} - -int prolate_radial1_wrap(double m, double n, double c, double cv, double x, double *r1f, double *r1d) -{ - int kf=1; - double r2f, r2d; - int int_m, int_n; - - if ((x <= 1.0) || (m<0) || (n198)) { - *r1d = NPY_NAN; - return NPY_NAN; - } - int_m = (int )m; - int_n = (int )n; - eg = (double *)PyMem_Malloc(sizeof(double)*(n-m+2)); - if (eg==NULL) { - printf("Warning: Memory allocation error.\n"); - *r1d = NPY_NAN; - return NPY_NAN; - } - F_FUNC(segv,SEGV)(&int_m,&int_n,&c,&kd,&cv,eg); - F_FUNC(rswfo,RSWFO)(&int_m,&int_n,&c,&x,&cv,&kf,&r1f,r1d,&r2f,&r2d); - PyMem_Free(eg); - return r1f; -} - -double oblate_radial2_nocv_wrap(double m, double n, double c, double x, double *r2d) -{ - int kf=2, kd=-1; - double r1f, r1d, r2f, cv, *eg; - int int_m, int_n; - - if ((x < 0.0) || (m<0) || (n198)) { - *r2d = NPY_NAN; - return NPY_NAN; - } - int_m = (int )m; - int_n = (int )n; - eg = (double *)PyMem_Malloc(sizeof(double)*(n-m+2)); - if (eg==NULL) { - printf("Warning: Memory allocation error.\n"); - *r2d = NPY_NAN; - return NPY_NAN; - } - F_FUNC(segv,SEGV)(&int_m,&int_n,&c,&kd,&cv,eg); - F_FUNC(rswfo,RSWFO)(&int_m,&int_n,&c,&x,&cv,&kf,&r1f,&r1d,&r2f,r2d); - PyMem_Free(eg); - return r2f; -} - -int oblate_radial1_wrap(double m, double n, double c, double cv, double x, double *r1f, double *r1d) -{ - int kf=1; - double r2f, r2d; - int int_m, int_n; - - if ((x <0.0) || (m<0) || (n - -extern double PI; - -#define REAL(z) (z).real -#define IMAG(z) (z).imag -#define ABSQ(z) (z).real*(z).real + (z).imag*(z).imag; -#define ZCONVINF(z) if (REAL((z))==1.0e300) REAL((z))=NPY_INFINITY; if (REAL((z))==-1.0e300) REAL((z))=-NPY_INFINITY -#define CONVINF(x) if ((x)==1.0e300) (x)=NPY_INFINITY; if ((x)==-1.0e300) (x)=-NPY_INFINITY -#define ABS(x) ((x)<0 ? -(x) : (x)) - -Py_complex cgamma_wrap( Py_complex z); -Py_complex clngamma_wrap( Py_complex z); -Py_complex cpsi_wrap( Py_complex z); -Py_complex crgamma_wrap( Py_complex z); -Py_complex chyp2f1_wrap( double a, double b, double c, Py_complex z); -Py_complex chyp1f1_wrap( double a, double b, Py_complex z); -double hyp1f1_wrap( double a, double b, double x); -double hypU_wrap(double a, double b, double x); -double exp1_wrap(double x); -double expi_wrap(double x); -Py_complex cexp1_wrap(Py_complex z); -Py_complex cexpi_wrap(Py_complex z); -Py_complex cerf_wrap(Py_complex z); -int itairy_wrap(double x, double *apt, double *bpt, double *ant, double *bnt); - -double struve_wrap(double v, double x); -double itstruve0_wrap(double x); -double it2struve0_wrap(double x); - -double modstruve_wrap(double v, double x); -double itmodstruve0_wrap(double x); - -double ber_wrap(double x); -double bei_wrap(double x); -double ker_wrap(double x); -double kei_wrap(double x); -double berp_wrap(double x); -double beip_wrap(double x); -double kerp_wrap(double x); -double keip_wrap(double x); - -int kelvin_wrap(double x, Py_complex *Be, Py_complex *Ke, Py_complex *Bep, Py_complex *Kep); - -int it1j0y0_wrap(double x, double *, double *); -int it2j0y0_wrap(double x, double *, double *); -int it1i0k0_wrap(double x, double *, double *); -int it2i0k0_wrap(double x, double *, double *); - -int cfresnl_wrap(Py_complex x, Py_complex *sf, Py_complex *cf); -double cem_cva_wrap(double m, double q); -double sem_cva_wrap(double m, double q); -int cem_wrap(double m, double q, double x, double *csf, double *csd); -int sem_wrap(double m, double q, double x, double *csf, double *csd); -int mcm1_wrap(double m, double q, double x, double *f1r, double *d1r); -int msm1_wrap(double m, double q, double x, double *f1r, double *d1r); -int mcm2_wrap(double m, double q, double x, double *f2r, double *d2r); -int msm2_wrap(double m, double q, double x, double *f2r, double *d2r); -double pmv_wrap(double, double, double); -int pbwa_wrap(double, double, double *, double *); -int pbdv_wrap(double, double, double *, double *); -int pbvv_wrap(double, double, double *, double *); - -int prolate_aswfa_wrap(double, double, double, double, double, double *, double *); -int prolate_radial1_wrap(double, double, double, double, double, double *, double *); -int prolate_radial2_wrap(double, double, double, double, double, double *, double *); -int oblate_aswfa_wrap(double, double, double, double, double, double *, double *); -int oblate_radial1_wrap(double, double, double, double, double, double *, double *); -int oblate_radial2_wrap(double, double, double, double, double, double *, double *); -double prolate_aswfa_nocv_wrap(double, double, double, double, double *); -double prolate_radial1_nocv_wrap(double, double, double, double, double *); -double prolate_radial2_nocv_wrap(double, double, double, double, double *); -double oblate_aswfa_nocv_wrap(double, double, double, double, double *); -double oblate_radial1_nocv_wrap(double, double, double, double, double *); -double oblate_radial2_nocv_wrap(double, double, double, double, double *); -double prolate_segv_wrap(double, double, double); -double oblate_segv_wrap(double, double, double); - - - -int modified_fresnel_plus_wrap(double x, Py_complex *F, Py_complex *K); -int modified_fresnel_minus_wrap(double x, Py_complex *F, Py_complex *K); -#endif - - - - - - - - - - - - diff --git a/scipy-0.10.1/scipy/special/special_version.py b/scipy-0.10.1/scipy/special/special_version.py deleted file mode 100644 index 2ada67b9bb..0000000000 --- a/scipy-0.10.1/scipy/special/special_version.py +++ /dev/null @@ -1,7 +0,0 @@ -major = 0 -minor = 4 -micro = 9 -release_level = 'beta' - - -special_version = '%(major)d.%(minor)d.%(micro)d' % (locals ()) diff --git a/scipy-0.10.1/scipy/special/spfun_stats.py b/scipy-0.10.1/scipy/special/spfun_stats.py deleted file mode 100644 index fde8f5f5d9..0000000000 --- a/scipy-0.10.1/scipy/special/spfun_stats.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# Last Change: Sat Mar 21 02:00 PM 2009 J - -# Copyright (c) 2001, 2002 Enthought, Inc. -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# a. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# b. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# c. Neither the name of the Enthought nor the names of its contributors -# may be used to endorse or promote products derived from this software -# without specific prior written permission. -# -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -# DAMAGE. - -"""Some more special functions which may be useful for multivariate statistical -analysis.""" - -import numpy as np -from scipy.special import gammaln as loggam - -__all__ = ['multigammln'] - - -def multigammaln(a, d): - """returns the log of multivariate gamma, also sometimes called the - generalized gamma. - - Parameters - ---------- - a : ndarray - the multivariate gamma is computed for each item of a - d : int - the dimension of the space of integration. - - Returns - ------- - res : ndarray - the values of the log multivariate gamma at the given points a. - - Note - ---- - The formal definition of the multivariate gamma of dimension d for a real a - is : - - \Gamma_d(a) = \int_{A>0}{e^{-tr(A)\cdot{|A|}^{a - (m+1)/2}dA}} - - with the condition a > (d-1)/2, and A>0 being the set of all the positive - definite matrices of dimension s. Note that a is a scalar: the integrand - only is multivariate, the argument is not (the function is defined over a - subset of the real set). - - This can be proven to be equal to the much friendler equation: - - \Gamma_d(a) = \pi^{d(d-1)/4}\prod_{i=1}^{d}{\Gamma(a - (i-1)/2)}. - - Notes - ----- - Reference: - - R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in - probability and mathematical statistics). """ - a = np.asarray(a) - if not np.isscalar(d) or (np.floor(d) != d): - raise ValueError("d should be a positive integer (dimension)") - if np.any(a <= 0.5 * (d - 1)): - raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met" \ - % (a, 0.5 * (d-1))) - - res = (d * (d-1) * 0.25) * np.log(np.pi) - if a.size == 1: - axis = -1 - else: - axis = 0 - res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis) - return res diff --git a/scipy-0.10.1/scipy/special/tests/data/README b/scipy-0.10.1/scipy/special/tests/data/README deleted file mode 100644 index c4fad6c7e8..0000000000 --- a/scipy-0.10.1/scipy/special/tests/data/README +++ /dev/null @@ -1,6 +0,0 @@ -This directory contains numerical data for testing special functions. -The data is in version control as text files, but it is distributed as -compressed NPZ files which are also checked in. - -To rebuild the npz files, use ../../utils/makenpz.py on the directories. - diff --git a/scipy-0.10.1/scipy/special/tests/data/boost.npz b/scipy-0.10.1/scipy/special/tests/data/boost.npz deleted file mode 100644 index 0958d2304d..0000000000 Binary files a/scipy-0.10.1/scipy/special/tests/data/boost.npz and /dev/null differ diff --git a/scipy-0.10.1/scipy/special/tests/test_basic.py b/scipy-0.10.1/scipy/special/tests/test_basic.py deleted file mode 100644 index 4553ecb9b0..0000000000 --- a/scipy-0.10.1/scipy/special/tests/test_basic.py +++ /dev/null @@ -1,2201 +0,0 @@ -#this program corresponds to special.py - -### Means test is not done yet -#E Means test is giving error (E) -#F Means test is failing (F) -#EF Means test is giving error and Failing -#! Means test is segfaulting -#8 Means test runs forever - -### test_besselpoly -### test_mathieu_a -### test_mathieu_even_coef -### test_mathieu_odd_coef -### test_modfresnelp -### test_modfresnelm -# test_pbdv_seq -### test_pbvv_seq -### test_sph_harm -# test_sph_in -# test_sph_jn -# test_sph_kn - -import numpy as np -from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, log, zeros, \ - sqrt, asarray, inf, nan_to_num, real, arctan, float_ - -from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, \ - assert_array_almost_equal, assert_approx_equal, assert_, \ - rand, dec, TestCase, run_module_suite, assert_allclose -from scipy import special -import scipy.special._cephes as cephes -from scipy.special import ellipk - -from scipy.special._testutils import assert_tol_equal, with_special_errors - -class TestCephes(TestCase): - def test_airy(self): - cephes.airy(0) - def test_airye(self): - cephes.airye(0) - def test_bdtr(self): - assert_equal(cephes.bdtr(1,1,0.5),1.0) - def test_bdtri(self): - assert_equal(cephes.bdtri(1,3,0.5),0.5) - def test_bdtrc(self): - assert_equal(cephes.bdtrc(1,3,0.5),0.5) - def test_bdtrin(self): - assert_equal(cephes.bdtrin(1,0,1),5.0) - def test_bdtrik(self): - cephes.bdtrik(1,3,0.5) - - def test_bei(self): - assert_equal(cephes.bei(0),0.0) - def test_beip(self): - assert_equal(cephes.beip(0),0.0) - def test_ber(self): - assert_equal(cephes.ber(0),1.0) - def test_berp(self): - assert_equal(cephes.berp(0),0.0) - - def test_besselpoly(self): - assert_equal(cephes.besselpoly(0,0,0),1.0) - - def test_beta(self): - assert_equal(cephes.beta(1,1),1.0) - def test_betainc(self): - assert_equal(cephes.betainc(1,1,1),1.0) - def test_betaln(self): - assert_equal(cephes.betaln(1,1),0.0) - def test_betaincinv(self): - assert_equal(cephes.betaincinv(1,1,1),1.0) - - def test_btdtr(self): - assert_equal(cephes.btdtr(1,1,1),1.0) - def test_btdtri(self): - assert_equal(cephes.btdtri(1,1,1),1.0) - def test_btdtria(self): - assert_equal(cephes.btdtria(1,1,1),5.0) - def test_btdtrib(self): - assert_equal(cephes.btdtrib(1,1,1),5.0) - - def test_cbrt(self): - assert_approx_equal(cephes.cbrt(1),1.0) - - def test_chdtr(self): - assert_equal(cephes.chdtr(1,0),0.0) - def test_chdtrc(self): - assert_equal(cephes.chdtrc(1,0),1.0) - def test_chdtri(self): - assert_equal(cephes.chdtri(1,1),0.0) - def test_chdtriv(self): - assert_equal(cephes.chdtriv(0,0),5.0) - - def test_chndtr(self): - assert_equal(cephes.chndtr(0,1,0),0.0) - def test_chndtridf(self): - assert_equal(cephes.chndtridf(0,0,1),5.0) - def test_chndtrinc(self): - assert_equal(cephes.chndtrinc(0,1,0),5.0) - def test_chndtrix(self): - assert_equal(cephes.chndtrix(0,1,0),0.0) - - def test_cosdg(self): - assert_equal(cephes.cosdg(0),1.0) - def test_cosm1(self): - assert_equal(cephes.cosm1(0),0.0) - def test_cotdg(self): - assert_almost_equal(cephes.cotdg(45),1.0) - - def test_dawsn(self): - assert_equal(cephes.dawsn(0),0.0) - - def test_ellipe(self): - assert_equal(cephes.ellipe(1),1.0) - def test_ellipeinc(self): - assert_equal(cephes.ellipeinc(0,1),0.0) - def test_ellipj(self): - cephes.ellipj(0,1) - def test_ellipk(self): - assert_allclose(ellipk(0), pi/2) - def test_ellipkinc(self): - assert_equal(cephes.ellipkinc(0,0),0.0) - - def test_erf(self): - assert_equal(cephes.erf(0),0.0) - def test_erfc(self): - assert_equal(cephes.erfc(0),1.0) - - def test_exp1(self): - cephes.exp1(1) - def test_expi(self): - cephes.expi(1) - def test_expn(self): - cephes.expn(1,1) - def test_exp1_reg(self): - # Regression for #834 - a = cephes.exp1(-complex(19.9999990)) - b = cephes.exp1(-complex(19.9999991)) - assert_array_almost_equal(a.imag, b.imag) - - def test_exp10(self): - assert_approx_equal(cephes.exp10(2),100.0) - def test_exp2(self): - assert_equal(cephes.exp2(2),4.0) - def test_expm1(self): - assert_equal(cephes.expm1(0),0.0) - - def test_fdtr(self): - assert_equal(cephes.fdtr(1,1,0),0.0) - def test_fdtrc(self): - assert_equal(cephes.fdtrc(1,1,0),1.0) - def test_fdtri(self): - cephes.fdtri(1,1,0.5) - def test_fdtridfd(self): - assert_equal(cephes.fdtridfd(1,0,0),5.0) - - def test_fresnel(self): - assert_equal(cephes.fresnel(0),(0.0,0.0)) - - def test_gamma(self): - assert_equal(cephes.gamma(5),24.0) - def test_gammainc(self): - assert_equal(cephes.gammainc(5,0),0.0) - def test_gammaincc(self): - assert_equal(cephes.gammaincc(5,0),1.0) - def test_gammainccinv(self): - assert_equal(cephes.gammainccinv(5,1),0.0) - def test_gammaln(self): - cephes.gammaln(10) - - def test_gdtr(self): - assert_equal(cephes.gdtr(1,1,0),0.0) - def test_gdtrc(self): - assert_equal(cephes.gdtrc(1,1,0),1.0) - def test_gdtria(self): - assert_equal(cephes.gdtria(0,1,1),0.0) - def test_gdtrib(self): - cephes.gdtrib(1,0,1) - #assert_equal(cephes.gdtrib(1,0,1),5.0) - def test_gdtrix(self): - cephes.gdtrix(1,1,.1) - - def test_hankel1(self): - cephes.hankel1(1,1) - def test_hankel1e(self): - cephes.hankel1e(1,1) - def test_hankel2(self): - cephes.hankel2(1,1) - def test_hankel2e(self): - cephes.hankel2e(1,1) - - def test_hyp1f1(self): - assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0)) - assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095) - cephes.hyp1f1(1,1,1) - def test_hyp1f2(self): - cephes.hyp1f2(1,1,1,1) - def test_hyp2f0(self): - cephes.hyp2f0(1,1,1,1) - def test_hyp2f1(self): - assert_equal(cephes.hyp2f1(1,1,1,0),1.0) - def test_hyp3f0(self): - assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0)) - def test_hyperu(self): - assert_equal(cephes.hyperu(0,1,1),1.0) - - def test_i0(self): - assert_equal(cephes.i0(0),1.0) - def test_i0e(self): - assert_equal(cephes.i0e(0),1.0) - def test_i1(self): - assert_equal(cephes.i1(0),0.0) - def test_i1e(self): - assert_equal(cephes.i1e(0),0.0) - - def test_it2i0k0(self): - cephes.it2i0k0(1) - def test_it2j0y0(self): - cephes.it2j0y0(1) - def test_it2struve0(self): - cephes.it2struve0(1) - def test_itairy(self): - cephes.itairy(1) - def test_iti0k0(self): - assert_equal(cephes.iti0k0(0),(0.0,0.0)) - def test_itj0y0(self): - assert_equal(cephes.itj0y0(0),(0.0,0.0)) - def test_itmodstruve0(self): - assert_equal(cephes.itmodstruve0(0),0.0) - def test_itstruve0(self): - assert_equal(cephes.itstruve0(0),0.0) - def test_iv(self): - assert_equal(cephes.iv(1,0),0.0) - def _check_ive(self): - assert_equal(cephes.ive(1,0),0.0) - - def test_j0(self): - assert_equal(cephes.j0(0),1.0) - def test_j1(self): - assert_equal(cephes.j1(0),0.0) - def test_jn(self): - assert_equal(cephes.jn(0,0),1.0) - def test_jv(self): - assert_equal(cephes.jv(0,0),1.0) - def _check_jve(self): - assert_equal(cephes.jve(0,0),1.0) - - def test_k0(self): - cephes.k0(2) - def test_k0e(self): - cephes.k0e(2) - def test_k1(self): - cephes.k1(2) - def test_k1e(self): - cephes.k1e(2) - def test_kei(self): - cephes.kei(2) - def test_keip(self): - assert_equal(cephes.keip(0),0.0) - def test_ker(self): - cephes.ker(2) - def test_kerp(self): - cephes.kerp(2) - def _check_kelvin(self): - cephes.kelvin(2) - def test_kn(self): - cephes.kn(1,1) - - def test_kolmogi(self): - assert_equal(cephes.kolmogi(1),0.0) - def test_kolmogorov(self): - assert_equal(cephes.kolmogorov(0),1.0) - - def _check_kv(self): - cephes.kv(1,1) - def _check_kve(self): - cephes.kve(1,1) - def test_log1p(self): - assert_equal(cephes.log1p(0),0.0) - def test_lpmv(self): - assert_equal(cephes.lpmv(0,0,1),1.0) - - def test_mathieu_a(self): - assert_equal(cephes.mathieu_a(1,0),1.0) - def test_mathieu_b(self): - assert_equal(cephes.mathieu_b(1,0),1.0) - def test_mathieu_cem(self): - assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0)) - def test_mathieu_modcem1(self): - assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0)) - def test_mathieu_modcem2(self): - cephes.mathieu_modcem2(1,1,1) - def test_mathieu_sem(self): - assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0)) - def test_mathieu_modsem1(self): - assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0)) - def test_mathieu_modsem2(self): - cephes.mathieu_modsem2(1,1,1) - - def test_modfresnelm(self): - cephes.modfresnelm(0) - def test_modfresnelp(self): - cephes.modfresnelp(0) - def _check_modstruve(self): - assert_equal(cephes.modstruve(1,0),0.0) - - def test_nbdtr(self): - assert_equal(cephes.nbdtr(1,1,1),1.0) - def test_nbdtrc(self): - assert_equal(cephes.nbdtrc(1,1,1),0.0) - def test_nbdtri(self): - assert_equal(cephes.nbdtri(1,1,1),1.0) - def __check_nbdtrik(self): - cephes.nbdtrik(1,.4,.5) - def test_nbdtrin(self): - assert_equal(cephes.nbdtrin(1,0,0),5.0) - - def test_ncfdtr(self): - assert_equal(cephes.ncfdtr(1,1,1,0),0.0) - def test_ncfdtri(self): - assert_equal(cephes.ncfdtri(1,1,1,0),0.0) - def test_ncfdtridfd(self): - cephes.ncfdtridfd(1,0.5,0,1) - def __check_ncfdtridfn(self): - cephes.ncfdtridfn(1,0.5,0,1) - def __check_ncfdtrinc(self): - cephes.ncfdtrinc(1,0.5,0,1) - - def test_nctdtr(self): - assert_equal(cephes.nctdtr(1,0,0),0.5) - def __check_nctdtridf(self): - cephes.nctdtridf(1,0.5,0) - def test_nctdtrinc(self): - cephes.nctdtrinc(1,0,0) - def test_nctdtrit(self): - cephes.nctdtrit(.1,0.2,.5) - - def test_ndtr(self): - assert_equal(cephes.ndtr(0), 0.5) - assert_almost_equal(cephes.ndtr(1), 0.84134474606) - - def test_ndtri(self): - assert_equal(cephes.ndtri(0.5),0.0) - def test_nrdtrimn(self): - assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0) - def test_nrdtrisd(self): - assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0, - atol=0, rtol=0) - - def test_obl_ang1(self): - cephes.obl_ang1(1,1,1,0) - def test_obl_ang1_cv(self): - result = cephes.obl_ang1_cv(1,1,1,1,0) - assert_almost_equal(result[0],1.0) - assert_almost_equal(result[1],0.0) - - def _check_obl_cv(self): - assert_equal(cephes.obl_cv(1,1,0),2.0) - def test_obl_rad1(self): - cephes.obl_rad1(1,1,1,0) - def test_obl_rad1_cv(self): - cephes.obl_rad1_cv(1,1,1,1,0) - def test_obl_rad2(self): - cephes.obl_rad2(1,1,1,0) - def test_obl_rad2_cv(self): - cephes.obl_rad2_cv(1,1,1,1,0) - - def test_pbdv(self): - assert_equal(cephes.pbdv(1,0),(0.0,1.0)) - def test_pbvv(self): - cephes.pbvv(1,0) - def test_pbwa(self): - cephes.pbwa(1,0) - def test_pdtr(self): - cephes.pdtr(0,1) - def test_pdtrc(self): - cephes.pdtrc(0,1) - def test_pdtri(self): - cephes.pdtri(0.5,0.5) - def test_pdtrik(self): - cephes.pdtrik(0.5,1) - - def test_pro_ang1(self): - cephes.pro_ang1(1,1,1,0) - def test_pro_ang1_cv(self): - assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0), - array((1.0,0.0))) - def _check_pro_cv(self): - assert_equal(cephes.pro_cv(1,1,0),2.0) - def test_pro_rad1(self): - cephes.pro_rad1(1,1,1,0.1) - def test_pro_rad1_cv(self): - cephes.pro_rad1_cv(1,1,1,1,0) - def test_pro_rad2(self): - cephes.pro_rad2(1,1,1,0) - def test_pro_rad2_cv(self): - cephes.pro_rad2_cv(1,1,1,1,0) - - def test_psi(self): - cephes.psi(1) - - def test_radian(self): - assert_equal(cephes.radian(0,0,0),0) - def test_rgamma(self): - assert_equal(cephes.rgamma(1),1.0) - def test_round(self): - assert_equal(cephes.round(3.4),3.0) - assert_equal(cephes.round(-3.4),-3.0) - assert_equal(cephes.round(3.6),4.0) - assert_equal(cephes.round(-3.6),-4.0) - assert_equal(cephes.round(3.5),4.0) - assert_equal(cephes.round(-3.5),-4.0) - - def test_shichi(self): - cephes.shichi(1) - def test_sici(self): - cephes.sici(1) - - s, c = cephes.sici(np.inf) - assert_almost_equal(s, np.pi * 0.5) - assert_almost_equal(c, 0) - - s, c = cephes.sici(-np.inf) - assert_almost_equal(s, -np.pi * 0.5) - assert_(np.isnan(c), "cosine integral(-inf) is not nan") - - def test_sindg(self): - assert_equal(cephes.sindg(90),1.0) - def test_smirnov(self): - assert_equal(cephes.smirnov(1,.1),0.9) - def test_smirnovi(self): - assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4) - assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6) - - def test_spence(self): - assert_equal(cephes.spence(1),0.0) - def test_stdtr(self): - assert_equal(cephes.stdtr(1,0),0.5) - def test_stdtridf(self): - cephes.stdtridf(0.7,1) - def test_stdtrit(self): - cephes.stdtrit(1,0.7) - def test_struve(self): - assert_equal(cephes.struve(0,0),0.0) - - def test_tandg(self): - assert_equal(cephes.tandg(45),1.0) - def test_tklmbda(self): - assert_almost_equal(cephes.tklmbda(1,1),1.0) - - def test_y0(self): - cephes.y0(1) - def test_y1(self): - cephes.y1(1) - def test_yn(self): - cephes.yn(1,1) - def test_yv(self): - cephes.yv(1,1) - def _check_yve(self): - cephes.yve(1,1) - - def test_zeta(self): - cephes.zeta(2,2) - def test_zetac(self): - assert_equal(cephes.zetac(0),-1.5) - def test_wofz(self): - cephes.wofz(0) - -class TestAiry(TestCase): - def test_airy(self): - #This tests the airy function to ensure 8 place accuracy in computation - - x = special.airy(.99) - assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8) - x = special.airy(.41) - assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8) - x = special.airy(-.36) - assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8) - - def test_airye(self): - a = special.airye(0.01) - b = special.airy(0.01) - b1 = [None]*4 - for n in range(2): - b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01)) - for n in range(2,4): - b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01)))) - assert_array_almost_equal(a,b1,6) - - def test_bi_zeros(self): - bi = special.bi_zeros(2) - bia = (array([-1.17371322, -3.2710930]), - array([-2.29443968, -4.07315509]), - array([-0.45494438, 0.39652284]), - array([ 0.60195789 , -0.76031014])) - assert_array_almost_equal(bi,bia,4) - - def test_ai_zeros(self): - ai = special.ai_zeros(1) - assert_array_almost_equal(ai,(array([-2.33810741]), - array([-1.01879297]), - array([ 0.5357]), - array([ 0.7012])),4) - -class TestAssocLaguerre(TestCase): - def test_assoc_laguerre(self): - a1 = special.genlaguerre(11,1) - a2 = special.assoc_laguerre(.2,11,1) - assert_array_almost_equal(a2,a1(.2),8) - a2 = special.assoc_laguerre(1,11,1) - assert_array_almost_equal(a2,a1(1),8) - -class TestBesselpoly(TestCase): - def test_besselpoly(self): - pass - -class TestKelvin(TestCase): - def test_bei(self): - mbei = special.bei(2) - assert_almost_equal(mbei, 0.9722916273066613,5)#this may not be exact - - def test_beip(self): - mbeip = special.beip(2) - assert_almost_equal(mbeip,0.91701361338403631,5)#this may not be exact - - def test_ber(self): - mber = special.ber(2) - assert_almost_equal(mber,0.75173418271380821,5)#this may not be exact - - def test_berp(self): - mberp = special.berp(2) - assert_almost_equal(mberp,-0.49306712470943909,5)#this may not be exact - - def test_bei_zeros(self): - bi = special.bi_zeros(5) - assert_array_almost_equal(bi[0],array([-1.173713222709127, - -3.271093302836352, - -4.830737841662016, - -6.169852128310251, - -7.376762079367764]),11) - - assert_array_almost_equal(bi[1],array([-2.294439682614122, - -4.073155089071828, - -5.512395729663599, - -6.781294445990305, - -7.940178689168587]),10) - - assert_array_almost_equal(bi[2],array([-0.454944383639657, - 0.396522836094465, - -0.367969161486959, - 0.349499116831805, - -0.336026240133662]),11) - - assert_array_almost_equal(bi[3],array([0.601957887976239, - -0.760310141492801, - 0.836991012619261, - -0.88947990142654, - 0.929983638568022]),11) - - - def test_beip_zeros(self): - bip = special.beip_zeros(5) - assert_array_almost_equal(bip,array([ 3.772673304934953, - 8.280987849760042, - 12.742147523633703, - 17.193431752512542, - 21.641143941167325]),4) - - def test_ber_zeros(self): - ber = special.ber_zeros(5) - assert_array_almost_equal(ber,array([2.84892, - 7.23883, - 11.67396, - 16.11356, - 20.55463]),4) - - def test_berp_zeros(self): - brp = special.berp_zeros(5) - assert_array_almost_equal(brp,array([6.03871, - 10.51364, - 14.96844, - 19.41758, - 23.86430]),4) - - def test_kelvin(self): - mkelv = special.kelvin(2) - assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j, - special.ker(2) + special.kei(2)*1j, - special.berp(2) + special.beip(2)*1j, - special.kerp(2) + special.keip(2)*1j),8) - - def test_kei(self): - mkei = special.kei(2) - assert_almost_equal(mkei,-0.20240006776470432,5) - - def test_keip(self): - mkeip = special.keip(2) - assert_almost_equal(mkeip,0.21980790991960536,5) - - def test_ker(self): - mker = special.ker(2) - assert_almost_equal(mker,-0.041664513991509472,5) - - def test_kerp(self): - mkerp = special.kerp(2) - assert_almost_equal(mkerp,-0.10660096588105264,5) - - def test_kei_zeros(self): - kei = special.kei_zeros(5) - assert_array_almost_equal(kei,array([ 3.91467, - 8.34422, - 12.78256, - 17.22314, - 21.66464]),4) - - def test_keip_zeros(self): - keip = special.keip_zeros(5) - assert_array_almost_equal(keip,array([ 4.93181, - 9.40405, - 13.85827, - 18.30717, - 22.75379]),4) - - - - # numbers come from 9.9 of A&S pg. 381 - def test_kelvin_zeros(self): - tmp = special.kelvin_zeros(5) - berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp - assert_array_almost_equal(berz,array([ 2.84892, - 7.23883, - 11.67396, - 16.11356, - 20.55463]),4) - assert_array_almost_equal(beiz,array([ 5.02622, - 9.45541, - 13.89349, - 18.33398, - 22.77544]),4) - assert_array_almost_equal(kerz,array([ 1.71854, - 6.12728, - 10.56294, - 15.00269, - 19.44382]),4) - assert_array_almost_equal(keiz,array([ 3.91467, - 8.34422, - 12.78256, - 17.22314, - 21.66464]),4) - assert_array_almost_equal(berpz,array([ 6.03871, - 10.51364, - 14.96844, - 19.41758, - 23.86430]),4) - assert_array_almost_equal(beipz,array([ 3.77267, - # table from 1927 had 3.77320 - # but this is more accurate - 8.28099, - 12.74215, - 17.19343, - 21.64114]),4) - assert_array_almost_equal(kerpz,array([ 2.66584, - 7.17212, - 11.63218, - 16.08312, - 20.53068]),4) - assert_array_almost_equal(keipz,array([ 4.93181, - 9.40405, - 13.85827, - 18.30717, - 22.75379]),4) - - def test_ker_zeros(self): - ker = special.ker_zeros(5) - assert_array_almost_equal(ker,array([ 1.71854, - 6.12728, - 10.56294, - 15.00269, - 19.44381]),4) - - def test_kerp_zeros(self): - kerp = special.kerp_zeros(5) - assert_array_almost_equal(kerp,array([ 2.66584, - 7.17212, - 11.63218, - 16.08312, - 20.53068]),4) - -class TestBernoulli(TestCase): - def test_bernoulli(self): - brn = special.bernoulli(5) - assert_array_almost_equal(brn,array([1.0000, - -0.5000, - 0.1667, - 0.0000, - -0.0333, - 0.0000]),4) - -class TestBeta(TestCase): - def test_beta(self): - bet = special.beta(2,4) - betg = (special.gamma(2)*special.gamma(4))/special.gamma(6) - assert_almost_equal(bet,betg,8) - - def test_betaln(self): - betln = special.betaln(2,4) - bet = log(abs(special.beta(2,4))) - assert_almost_equal(betln,bet,8) - - def test_betainc(self): - btinc = special.betainc(1,1,.2) - assert_almost_equal(btinc,0.2,8) - - def test_betaincinv(self): - y = special.betaincinv(2,4,.5) - comp = special.betainc(2,4,y) - assert_almost_equal(comp,.5,5) - -class TestTrigonometric(TestCase): - def test_cbrt(self): - cb = special.cbrt(27) - cbrl = 27**(1.0/3.0) - assert_approx_equal(cb,cbrl) - - def test_cbrtmore(self): - cb1 = special.cbrt(27.9) - cbrl1 = 27.9**(1.0/3.0) - assert_almost_equal(cb1,cbrl1,8) - - def test_cosdg(self): - cdg = special.cosdg(90) - cdgrl = cos(pi/2.0) - assert_almost_equal(cdg,cdgrl,8) - - def test_cosdgmore(self): - cdgm = special.cosdg(30) - cdgmrl = cos(pi/6.0) - assert_almost_equal(cdgm,cdgmrl,8) - - def test_cosm1(self): - cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10)) - csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1) - assert_array_almost_equal(cs,csrl,8) - - def test_cotdg(self): - ct = special.cotdg(30) - ctrl = tan(pi/6.0)**(-1) - assert_almost_equal(ct,ctrl,8) - - def test_cotdgmore(self): - ct1 = special.cotdg(45) - ctrl1 = tan(pi/4.0)**(-1) - assert_almost_equal(ct1,ctrl1,8) - - def test_specialpoints(self): - assert_almost_equal(special.cotdg(45), 1.0, 14) - assert_almost_equal(special.cotdg(-45), -1.0, 14) - assert_almost_equal(special.cotdg(90), 0.0, 14) - assert_almost_equal(special.cotdg(-90), 0.0, 14) - assert_almost_equal(special.cotdg(135), -1.0, 14) - assert_almost_equal(special.cotdg(-135), 1.0, 14) - assert_almost_equal(special.cotdg(225), 1.0, 14) - assert_almost_equal(special.cotdg(-225), -1.0, 14) - assert_almost_equal(special.cotdg(270), 0.0, 14) - assert_almost_equal(special.cotdg(-270), 0.0, 14) - assert_almost_equal(special.cotdg(315), -1.0, 14) - assert_almost_equal(special.cotdg(-315), 1.0, 14) - assert_almost_equal(special.cotdg(765), 1.0, 14) - - def test_sinc(self): - c = arange(-2,2,.1) - y = special.sinc(c) - yre = sin(pi*c)/(pi*c) - yre[20] = 1.0 - assert_array_almost_equal(y, yre, 4) - - def test_0(self): - x = 0.0 - assert_equal(special.sinc(x),1.0) - - def test_sindg(self): - sn = special.sindg(90) - assert_equal(sn,1.0) - - def test_sindgmore(self): - snm = special.sindg(30) - snmrl = sin(pi/6.0) - assert_almost_equal(snm,snmrl,8) - snm1 = special.sindg(45) - snmrl1 = sin(pi/4.0) - assert_almost_equal(snm1,snmrl1,8) - -class TestTandg(TestCase): - - def test_tandg(self): - tn = special.tandg(30) - tnrl = tan(pi/6.0) - assert_almost_equal(tn,tnrl,8) - - def test_tandgmore(self): - tnm = special.tandg(45) - tnmrl = tan(pi/4.0) - assert_almost_equal(tnm,tnmrl,8) - tnm1 = special.tandg(60) - tnmrl1 = tan(pi/3.0) - assert_almost_equal(tnm1,tnmrl1,8) - - def test_specialpoints(self): - assert_almost_equal(special.tandg(0), 0.0, 14) - assert_almost_equal(special.tandg(45), 1.0, 14) - assert_almost_equal(special.tandg(-45), -1.0, 14) - assert_almost_equal(special.tandg(135), -1.0, 14) - assert_almost_equal(special.tandg(-135), 1.0, 14) - assert_almost_equal(special.tandg(180), 0.0, 14) - assert_almost_equal(special.tandg(-180), 0.0, 14) - assert_almost_equal(special.tandg(225), 1.0, 14) - assert_almost_equal(special.tandg(-225), -1.0, 14) - assert_almost_equal(special.tandg(315), -1.0, 14) - assert_almost_equal(special.tandg(-315), 1.0, 14) - -class TestEllip(TestCase): - def test_ellipj_nan(self): - """Regression test for #912.""" - special.ellipj(0.5, np.nan) - - def test_ellipj(self): - el = special.ellipj(0.2,0) - rel = [sin(0.2),cos(0.2),1.0,0.20] - assert_array_almost_equal(el,rel,13) - - def test_ellipk(self): - elk = special.ellipk(.2) - assert_almost_equal(elk,1.659623598610528,11) - - def test_ellipkinc(self): - elkinc = special.ellipkinc(pi/2,.2) - elk = special.ellipk(0.2) - assert_almost_equal(elkinc,elk,15) - alpha = 20*pi/180 - phi = 45*pi/180 - m = sin(alpha)**2 - elkinc = special.ellipkinc(phi,m) - assert_almost_equal(elkinc,0.79398143,8) - # From pg. 614 of A & S - - def test_ellipe(self): - ele = special.ellipe(.2) - assert_almost_equal(ele,1.4890350580958529,8) - - def test_ellipeinc(self): - eleinc = special.ellipeinc(pi/2,.2) - ele = special.ellipe(0.2) - assert_almost_equal(eleinc,ele,14) - # pg 617 of A & S - alpha, phi = 52*pi/180,35*pi/180 - m = sin(alpha)**2 - eleinc = special.ellipeinc(phi,m) - assert_almost_equal(eleinc, 0.58823065, 8) - - -class TestErf(TestCase): - - def test_erf(self): - er = special.erf(.25) - assert_almost_equal(er,0.2763263902,8) - - def test_erf_zeros(self): - erz = special.erf_zeros(5) - erzr= array([1.45061616+1.88094300j, - 2.24465928+2.61657514j, - 2.83974105+3.17562810j, - 3.33546074+3.64617438j, - 3.76900557+4.06069723j]) - assert_array_almost_equal(erz,erzr,4) - - def test_erfcinv(self): - i = special.erfcinv(1) - assert_equal(i,0) - - def test_erfinv(self): - i = special.erfinv(0) - assert_equal(i,0) - - def test_errprint(self): - a = special.errprint() - b = 1-a #a is the state 1-a inverts state - c = special.errprint(b) #returns last state 'a' - assert_equal(a,c) - d = special.errprint(a) #returns to original state - assert_equal(d,b) #makes sure state was returned - #assert_equal(d,1-a) - -class TestEuler(TestCase): - def test_euler(self): - eu0 = special.euler(0) - eu1 = special.euler(1) - eu2 = special.euler(2) # just checking segfaults - assert_almost_equal(eu0[0],1,8) - assert_almost_equal(eu2[2],-1,8) - eu24 = special.euler(24) - mathworld = [1,1,5,61,1385,50521,2702765,199360981, - 19391512145l,2404879675441l, - 370371188237525l,69348874393137901l, - 15514534163557086905l] - correct = zeros((25,),'d') - for k in range(0,13): - if (k % 2): - correct[2*k] = -float(mathworld[k]) - else: - correct[2*k] = float(mathworld[k]) - olderr = np.seterr(all='ignore') - try: - err = nan_to_num((eu24-correct)/correct) - errmax = max(err) - finally: - np.seterr(**olderr) - assert_almost_equal(errmax, 0.0, 14) - -class TestExp(TestCase): - def test_exp2(self): - ex = special.exp2(2) - exrl = 2**2 - assert_equal(ex,exrl) - - def test_exp2more(self): - exm = special.exp2(2.5) - exmrl = 2**(2.5) - assert_almost_equal(exm,exmrl,8) - - def test_exp10(self): - ex = special.exp10(2) - exrl = 10**2 - assert_approx_equal(ex,exrl) - - def test_exp10more(self): - exm = special.exp10(2.5) - exmrl = 10**(2.5) - assert_almost_equal(exm,exmrl,8) - - def test_expm1(self): - ex = (special.expm1(2),special.expm1(3),special.expm1(4)) - exrl = (exp(2)-1,exp(3)-1,exp(4)-1) - assert_array_almost_equal(ex,exrl,8) - - def test_expm1more(self): - ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2)) - exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1) - assert_array_almost_equal(ex1,exrl1,8) - -class TestFresnel(TestCase): - def test_fresnel(self): - frs = array(special.fresnel(.5)) - assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8) - - # values from pg 329 Table 7.11 of A & S - # slightly corrected in 4th decimal place - def test_fresnel_zeros(self): - szo, czo = special.fresnel_zeros(5) - assert_array_almost_equal(szo, - array([ 2.0093+0.2885j, - 2.8335+0.2443j, - 3.4675+0.2185j, - 4.0026+0.2009j, - 4.4742+0.1877j]),3) - assert_array_almost_equal(czo, - array([ 1.7437+0.3057j, - 2.6515+0.2529j, - 3.3204+0.2240j, - 3.8757+0.2047j, - 4.3611+0.1907j]),3) - vals1 = special.fresnel(szo)[0] - vals2 = special.fresnel(czo)[1] - assert_array_almost_equal(vals1,0,14) - assert_array_almost_equal(vals2,0,14) - - def test_fresnelc_zeros(self): - szo, czo = special.fresnel_zeros(6) - frc = special.fresnelc_zeros(6) - assert_array_almost_equal(frc,czo,12) - - def test_fresnels_zeros(self): - szo, czo = special.fresnel_zeros(5) - frs = special.fresnels_zeros(5) - assert_array_almost_equal(frs,szo,12) - - -class TestGamma(TestCase): - def test_gamma(self): - gam = special.gamma(5) - assert_equal(gam,24.0) - - def test_gammaln(self): - gamln = special.gammaln(3) - lngam = log(special.gamma(3)) - assert_almost_equal(gamln,lngam,8) - - def test_gammainc(self): - gama = special.gammainc(.5,.5) - assert_almost_equal(gama,.7,1) - - def test_gammaincnan(self): - gama = special.gammainc(-1,1) - assert_(isnan(gama)) - - def test_gammainczero(self): - # bad arg but zero integration limit - gama = special.gammainc(-1,0) - assert_equal(gama,0.0) - - def test_gammaincc(self): - gicc = special.gammaincc(.5,.5) - greal = 1 - special.gammainc(.5,.5) - assert_almost_equal(gicc,greal,8) - - def test_gammainccnan(self): - gama = special.gammaincc(-1,1) - assert_(isnan(gama)) - - def test_gammainccinv(self): - gccinv = special.gammainccinv(.5,.5) - gcinv = special.gammaincinv(.5,.5) - assert_almost_equal(gccinv,gcinv,8) - - @with_special_errors - def test_gammaincinv(self): - y = special.gammaincinv(.4,.4) - x = special.gammainc(.4,y) - assert_almost_equal(x,0.4,1) - y = special.gammainc(10, 0.05) - x = special.gammaincinv(10, 2.5715803516000736e-20) - assert_almost_equal(0.05, x, decimal=10) - assert_almost_equal(y, 2.5715803516000736e-20, decimal=10) - x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18) - assert_almost_equal(11.0, x, decimal=10) - - @with_special_errors - def test_975(self): - # Regression test for ticket #975 -- switch point in algorithm - # check that things work OK at the point, immediately next floats - # around it, and a bit further away - pts = [0.25, - np.nextafter(0.25, 0), 0.25 - 1e-12, - np.nextafter(0.25, 1), 0.25 + 1e-12] - for xp in pts: - y = special.gammaincinv(.4, xp) - x = special.gammainc(0.4, y) - assert_tol_equal(x, xp, rtol=1e-12) - - def test_rgamma(self): - rgam = special.rgamma(8) - rlgam = 1/special.gamma(8) - assert_almost_equal(rgam,rlgam,8) - -class TestHankel(TestCase): - - def test_negv1(self): - assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14) - - def test_hankel1(self): - hank1 = special.hankel1(1,.1) - hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j) - assert_almost_equal(hank1,hankrl,8) - - def test_negv1e(self): - assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14) - - def test_hankel1e(self): - hank1e = special.hankel1e(1,.1) - hankrle = special.hankel1(1,.1)*exp(-.1j) - assert_almost_equal(hank1e,hankrle,8) - - def test_negv2(self): - assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14) - - def test_hankel2(self): - hank2 = special.hankel2(1,.1) - hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j) - assert_almost_equal(hank2,hankrl2,8) - - def test_neg2e(self): - assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14) - - def test_hankl2e(self): - hank2e = special.hankel2e(1,.1) - hankrl2e = special.hankel2e(1,.1) - assert_almost_equal(hank2e,hankrl2e,8) - -class TestHyper(TestCase): - def test_h1vp(self): - h1 = special.h1vp(1,.1) - h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j) - assert_almost_equal(h1,h1real,8) - - def test_h2vp(self): - h2 = special.h2vp(1,.1) - h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j) - assert_almost_equal(h2,h2real,8) - - def test_hyp0f1(self): - pass - - def test_hyp1f1(self): - hyp1 = special.hyp1f1(.1,.1,.3) - assert_almost_equal(hyp1, 1.3498588075760032,7) - - # test contributed by Moritz Deger (2008-05-29) - # http://projects.scipy.org/scipy/scipy/ticket/659 - - # reference data obtained from mathematica [ a, b, x, m(a,b,x)]: - # produced with test_hyp1f1.nb - ref_data = array([[ -8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04], - [ 2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00], - [ -1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05], - [ 5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08], - [ -2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24], - [ 4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21], - [ 1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13], - [ 2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13], - [ 1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02], - [ 1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10], - [ -4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01], - [ 8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21], - [ 1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20], - [ -2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07], - [ 2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03], - [ 2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02], - [ 6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11], - [ -1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03], - [ 2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17], - [ 8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01], - [ 1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00], - [ -4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00], - [ 2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23], - [ -2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01], - [ 3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04], - [ -1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08], - [ 2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01], - [ -9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07], - [ 1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03], - [ -2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09], - [ -8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06], - [ -1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00], - [ -3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01], - [ 3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02], - [ 6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02], - [ -2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02], - [ 2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00], - [ 1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09], - [ 1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01], - [ 1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00], - [ 1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02], - [ -1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05], - [ -1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05], - [ 7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02], - [ 2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02], - [ -2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13], - [ -2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05], - [ -1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12], - [ -5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01], - [ -1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16], - [ 2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37], - [ 5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06], - [ -1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02], - [ -1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12], - [ 5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27], - [ -2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04], - [ 1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06], - [ 2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07], - [ 5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03], - [ -2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07], - [ 1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27], - [ 6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12], - [ 1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32], - [ -2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04], - [ -4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01], - [ -7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02], - [ -2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19], - [ 1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09], - [ 2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31], - [ -2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01], - [ 2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02], - [ -2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08], - [ 2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09], - [ 1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33], - [ -3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01], - [ 7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29], - [ 2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01], - [ 8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29], - [ -1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02], - [ -8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00], - [ -1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08], - [ -5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01], - [ -5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01], - [ -2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01], - [ 6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13], - [ -2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11], - [ -1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02], - [ 6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02], - [ -1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01], - [ 7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31], - [ -1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04], - [ 5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25], - [ 3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01], - [ -2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00], - [ 2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02], - [ 2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05], - [ -9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02], - [ -5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01], - [ -1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01], - [ -5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]]) - - for a,b,c,expected in ref_data: - result = special.hyp1f1(a,b,c) - assert_(abs(expected - result)/expected < 1e-4) - - def test_hyp1f2(self): - pass - - def test_hyp2f0(self): - pass - - def test_hyp2f1(self): - # a collection of special cases taken from AMS 55 - values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))], - [0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)], - [1, 1, 2, 0.2, -1/0.2*log(1-0.2)], - [3, 3.5, 1.5, 0.2**2, - 0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))], - [-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)], - [3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)], - [3, 2, 3-2+1, -1, 1./2**3*sqrt(pi)* - special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)], - [5, 2, 5-2+1, -1, 1./2**5*sqrt(pi)* - special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)], - [4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3)* - special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)], - # and some others - # ticket #424 - [1.5, -0.5, 1.0, -10.0, 4.1300097765277476484], - # negative integer a or b, with c-a-b integer and x > 0.9 - [-2,3,1,0.95,0.715], - [2,-3,1,0.95,-0.007], - [-6,3,1,0.95,0.0000810625], - [2,-5,1,0.95,-0.000029375], - # huge negative integers - (10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24), - (10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18), - ] - for i, (a, b, c, x, v) in enumerate(values): - cv = special.hyp2f1(a, b, c, x) - assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) - - def test_hyp3f0(self): - pass - - def test_hyperu(self): - val1 = special.hyperu(1,0.1,100) - assert_almost_equal(val1,0.0098153,7) - a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2] - a,b = asarray(a), asarray(b) - z = 0.5 - hypu = special.hyperu(a,b,z) - hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z)/ \ - (special.gamma(1+a-b)*special.gamma(b))- \ - z**(1-b)*special.hyp1f1(1+a-b,2-b,z) \ - /(special.gamma(a)*special.gamma(2-b))) - assert_array_almost_equal(hypu,hprl,12) - -class TestBessel(TestCase): - def test_itj0y0(self): - it0 = array(special.itj0y0(.2)) - assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8) - - def test_it2j0y0(self): - it2 = array(special.it2j0y0(.2)) - assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8) - - def test_negv_iv(self): - assert_equal(special.iv(3,2), special.iv(-3,2)) - - def test_j0(self): - oz = special.j0(.1) - ozr = special.jn(0,.1) - assert_almost_equal(oz,ozr,8) - - def test_j1(self): - o1 = special.j1(.1) - o1r = special.jn(1,.1) - assert_almost_equal(o1,o1r,8) - - def test_jn(self): - jnnr = special.jn(1,.2) - assert_almost_equal(jnnr,0.099500832639235995,8) - - def test_negv_jv(self): - assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14) - - def test_jv(self): - values = [[0, 0.1, 0.99750156206604002], - [2./3, 1e-8, 0.3239028506761532e-5], - [2./3, 1e-10, 0.1503423854873779e-6], - [3.1, 1e-10, 0.1711956265409013e-32], - [2./3, 4.0, -0.2325440850267039], - ] - for i, (v, x, y) in enumerate(values): - yc = special.jv(v, x) - assert_almost_equal(yc, y, 8, err_msg='test #%d' % i) - - def test_negv_jve(self): - assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14) - - def test_jve(self): - jvexp = special.jve(1,.2) - assert_almost_equal(jvexp,0.099500832639235995,8) - jvexp1 = special.jve(1,.2+1j) - z = .2+1j - jvexpr = special.jv(1,z)*exp(-abs(z.imag)) - assert_almost_equal(jvexp1,jvexpr,8) - - def test_jn_zeros(self): - jn0 = special.jn_zeros(0,5) - jn1 = special.jn_zeros(1,5) - assert_array_almost_equal(jn0,array([ 2.4048255577, - 5.5200781103, - 8.6537279129, - 11.7915344391, - 14.9309177086]),4) - assert_array_almost_equal(jn1,array([ 3.83171, - 7.01559, - 10.17347, - 13.32369, - 16.47063]),4) - - jn102 = special.jn_zeros(102,5) - assert_tol_equal(jn102, array([110.89174935992040343, - 117.83464175788308398, - 123.70194191713507279, - 129.02417238949092824, - 134.00114761868422559]), rtol=1e-13) - - jn301 = special.jn_zeros(301,5) - assert_tol_equal(jn301, array([313.59097866698830153, - 323.21549776096288280, - 331.22338738656748796, - 338.39676338872084500, - 345.03284233056064157]), rtol=1e-13) - - def test_jn_zeros_slow(self): - jn0 = special.jn_zeros(0, 300) - assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13) - assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13) - assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13) - - jn10 = special.jn_zeros(10, 300) - assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13) - assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13) - assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13) - - jn3010 = special.jn_zeros(3010,5) - assert_tol_equal(jn3010, array([3036.86590780927, - 3057.06598526482, - 3073.66360690272, - 3088.37736494778, - 3101.86438139042]), rtol=1e-8) - - def test_jnjnp_zeros(self): - jn = special.jn - def jnp(n, x): - return (jn(n-1,x) - jn(n+1,x))/2 - for nt in range(1, 30): - z, n, m, t = special.jnjnp_zeros(nt) - for zz, nn, tt in zip(z, n, t): - if tt == 0: - assert_allclose(jn(nn, zz), 0, atol=1e-6) - elif tt == 1: - assert_allclose(jnp(nn, zz), 0, atol=1e-6) - else: - raise AssertionError("Invalid t return for nt=%d" % nt) - - def test_jnp_zeros(self): - jnp = special.jnp_zeros(1,5) - assert_array_almost_equal(jnp, array([ 1.84118, - 5.33144, - 8.53632, - 11.70600, - 14.86359]),4) - jnp = special.jnp_zeros(443,5) - assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15) - - def test_jnyn_zeros(self): - jnz = special.jnyn_zeros(1,5) - assert_array_almost_equal(jnz,(array([ 3.83171, - 7.01559, - 10.17347, - 13.32369, - 16.47063]), - array([ 1.84118, - 5.33144, - 8.53632, - 11.70600, - 14.86359]), - array([ 2.19714, - 5.42968, - 8.59601, - 11.74915, - 14.89744]), - array([ 3.68302, - 6.94150, - 10.12340, - 13.28576, - 16.44006])),5) - - def test_jvp(self): - jvprim = special.jvp(2,2) - jv0 = (special.jv(1,2)-special.jv(3,2))/2 - assert_almost_equal(jvprim,jv0,10) - - def test_k0(self): - ozk = special.k0(.1) - ozkr = special.kv(0,.1) - assert_almost_equal(ozk,ozkr,8) - - def test_k0e(self): - ozke = special.k0e(.1) - ozker = special.kve(0,.1) - assert_almost_equal(ozke,ozker,8) - - def test_k1(self): - o1k = special.k1(.1) - o1kr = special.kv(1,.1) - assert_almost_equal(o1k,o1kr,8) - - def test_k1e(self): - o1ke = special.k1e(.1) - o1ker = special.kve(1,.1) - assert_almost_equal(o1ke,o1ker,8) - - def test_jacobi(self): - a = 5*rand() - 1 - b = 5*rand() - 1 - P0 = special.jacobi(0,a,b) - P1 = special.jacobi(1,a,b) - P2 = special.jacobi(2,a,b) - P3 = special.jacobi(3,a,b) - - assert_array_almost_equal(P0.c,[1],13) - assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13) - cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)] - p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]] - assert_array_almost_equal(P2.c,array(p2c)/8.0,13) - cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3), - 12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)] - p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]] - assert_array_almost_equal(P3.c,array(p3c)/48.0,13) - - def test_kn(self): - kn1 = special.kn(0,.2) - assert_almost_equal(kn1,1.7527038555281462,8) - - def test_negv_kv(self): - assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2)) - - def test_kv0(self): - kv0 = special.kv(0,.2) - assert_almost_equal(kv0, 1.7527038555281462, 10) - - def test_kv1(self): - kv1 = special.kv(1,0.2) - assert_almost_equal(kv1, 4.775972543220472, 10) - - def test_kv2(self): - kv2 = special.kv(2,0.2) - assert_almost_equal(kv2, 49.51242928773287, 10) - - - def test_negv_kve(self): - assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2)) - - def test_kve(self): - kve1 = special.kve(0,.2) - kv1 = special.kv(0,.2)*exp(.2) - assert_almost_equal(kve1,kv1,8) - z = .2+1j - kve2 = special.kve(0,z) - kv2 = special.kv(0,z)*exp(z) - assert_almost_equal(kve2,kv2,8) - - def test_kvp_v0n1(self): - z = 2.2 - assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10) - - def test_kvp_n1(self): - v = 3. - z = 2.2 - xc = -special.kv(v+1,z) + v/z*special.kv(v,z) - x = special.kvp(v,z, n=1) - assert_almost_equal(xc, x, 10) #this function (kvp) is broken - - def test_kvp_n2(self): - v = 3. - z = 2.2 - xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z - x = special.kvp(v, z, n=2) - assert_almost_equal(xc, x, 10) - - def test_y0(self): - oz = special.y0(.1) - ozr = special.yn(0,.1) - assert_almost_equal(oz,ozr,8) - - def test_y1(self): - o1 = special.y1(.1) - o1r = special.yn(1,.1) - assert_almost_equal(o1,o1r,8) - - def test_y0_zeros(self): - yo,ypo = special.y0_zeros(2) - zo,zpo = special.y0_zeros(2,complex=1) - all = r_[yo,zo] - allval = r_[ypo,zpo] - assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11) - assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11) - - - def test_y1_zeros(self): - y1 = special.y1_zeros(1) - assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5) - - def test_y1p_zeros(self): - y1p = special.y1p_zeros(1,complex=1) - assert_array_almost_equal(y1p,(array([ 0.5768+0.904j]), array([-0.7635+0.5892j])),3) - - def test_yn_zeros(self): - an = special.yn_zeros(4,2) - assert_array_almost_equal(an,array([ 5.64515, 9.36162]),5) - an = special.yn_zeros(443,5) - assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542, - 472.80651546418663566, 481.27353184725625838, - 488.98055964441374646], rtol=1e-15) - - def test_ynp_zeros(self): - ao = special.ynp_zeros(0,2) - assert_array_almost_equal(ao,array([ 2.19714133, 5.42968104]),6) - ao = special.ynp_zeros(43,5) - assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15) - ao = special.ynp_zeros(443,5) - assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9) - - @dec.knownfailureif(True, - "cephes/yv is not eps accurate for large orders on " - "all platforms, and has nan/inf issues") - def test_ynp_zeros_large_order(self): - ao = special.ynp_zeros(443,5) - assert_tol_equal(special.yvp(443, ao), 0, atol=1e-15) - - def test_yn(self): - yn2n = special.yn(1,.2) - assert_almost_equal(yn2n,-3.3238249881118471,8) - - def test_negv_yv(self): - assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14) - - def test_yv(self): - yv2 = special.yv(1,.2) - assert_almost_equal(yv2,-3.3238249881118471,8) - - def test_negv_yve(self): - assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14) - - def test_yve(self): - yve2 = special.yve(1,.2) - assert_almost_equal(yve2,-3.3238249881118471,8) - yve2r = special.yv(1,.2+1j)*exp(-1) - yve22 = special.yve(1,.2+1j) - assert_almost_equal(yve22,yve2r,8) - - def test_yvp(self): - yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0 - yvp1 = special.yvp(2,.2) - assert_array_almost_equal(yvp1,yvpr,10) - - - def _cephes_vs_amos_points(self): - """Yield points at which to compare Cephes implementation to AMOS""" - # check several points, including large-amplitude ones - for v in [-120, -100.3, -20., -10., -1., -.5, - 0., 1., 12.49, 120., 301]: - for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, - 700.6, 1300, 10003]: - yield v, z - - # check half-integers; these are problematic points at least - # for cephes/iv - for v in 0.5 + arange(-60, 60): - yield v, 3.5 - - def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None): - for v, z in self._cephes_vs_amos_points(): - if skip is not None and skip(v, z): - continue - c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z) - if np.isinf(c1): - assert_(np.abs(c2) >= 1e300, (v, z)) - elif np.isnan(c1): - assert_(c2.imag != 0, (v, z)) - else: - assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol) - if v == int(v): - assert_tol_equal(c3, c2, err_msg=(v, z), - rtol=rtol, atol=atol) - - def test_jv_cephes_vs_amos(self): - self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305) - - @dec.knownfailureif(True, - "cephes/yv is not eps accurate for large orders on " - "all platforms, and has nan/inf issues") - def test_yv_cephes_vs_amos(self): - self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305) - - def test_yv_cephes_vs_amos_only_small_orders(self): - skipper = lambda v, z: (abs(v) > 50) - self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper) - - def test_iv_cephes_vs_amos(self): - olderr = np.seterr(all='ignore') - try: - self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305) - finally: - np.seterr(**olderr) - - @dec.slow - def test_iv_cephes_vs_amos_mass_test(self): - N = 1000000 - np.random.seed(1) - v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N) - x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N) - - imsk = (np.random.randint(8, size=N) == 0) - v[imsk] = v.astype(int) - - c1 = special.iv(v, x) - c2 = special.iv(v, x+0j) - - # deal with differences in the inf cutoffs - c1[abs(c1) > 1e300] = np.inf - c2[abs(c2) > 1e300] = np.inf - - dc = abs(c1/c2 - 1) - dc[np.isnan(dc)] = 0 - - k = np.argmax(dc) - - # Most error apparently comes from AMOS and not our implementation; - # there are some problems near integer orders there - assert_(dc[k] < 1e-9, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j))) - - def test_kv_cephes_vs_amos(self): - #self.check_cephes_vs_amos(kv, kn, rtol=1e-9, atol=1e-305) - self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305) - - def test_ticket_623(self): - assert_tol_equal(special.jv(3, 4), 0.43017147387562193) - assert_tol_equal(special.jv(301, 1300), 0.0183487151115275) - assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048) - - def test_ticket_853(self): - """Negative-order Bessels""" - # cephes - assert_tol_equal(special.jv(-1, 1 ), -0.4400505857449335) - assert_tol_equal(special.jv(-2, 1 ), 0.1149034849319005) - assert_tol_equal(special.yv(-1, 1 ), 0.7812128213002887) - assert_tol_equal(special.yv(-2, 1 ), -1.650682606816255) - assert_tol_equal(special.iv(-1, 1 ), 0.5651591039924851) - assert_tol_equal(special.iv(-2, 1 ), 0.1357476697670383) - assert_tol_equal(special.kv(-1, 1 ), 0.6019072301972347) - assert_tol_equal(special.kv(-2, 1 ), 1.624838898635178) - assert_tol_equal(special.jv(-0.5, 1 ), 0.43109886801837607952) - assert_tol_equal(special.yv(-0.5, 1 ), 0.6713967071418031) - assert_tol_equal(special.iv(-0.5, 1 ), 1.231200214592967) - assert_tol_equal(special.kv(-0.5, 1 ), 0.4610685044478945) - # amos - assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335) - assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005) - assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887) - assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255) - - assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851) - assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383) - assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347) - assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178) - - assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952) - assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j) - assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031) - assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j) - - assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967) - assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j) - assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945) - assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j) - - assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3)) - assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3)) - assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3)) - assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j)) - - assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j)) - assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j)) - - def test_ticket_854(self): - """Real-valued Bessel domains""" - assert_(isnan(special.jv(0.5, -1))) - assert_(isnan(special.iv(0.5, -1))) - assert_(isnan(special.yv(0.5, -1))) - assert_(isnan(special.yv(1, -1))) - assert_(isnan(special.kv(0.5, -1))) - assert_(isnan(special.kv(1, -1))) - assert_(isnan(special.jve(0.5, -1))) - assert_(isnan(special.ive(0.5, -1))) - assert_(isnan(special.yve(0.5, -1))) - assert_(isnan(special.yve(1, -1))) - assert_(isnan(special.kve(0.5, -1))) - assert_(isnan(special.kve(1, -1))) - assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1)) - assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1)) - - def test_ticket_503(self): - """Real-valued Bessel I overflow""" - assert_tol_equal(special.iv(1, 700), 1.528500390233901e302) - assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301) - - def test_iv_hyperg_poles(self): - assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967) - - def iv_series(self, v, z, n=200): - k = arange(0, n).astype(float_) - r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1) - r[isnan(r)] = inf - r = exp(r) - err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10 - return r.sum(), err - - def test_i0_series(self): - for z in [1., 10., 200.5]: - value, err = self.iv_series(0, z) - assert_tol_equal(special.i0(z), value, atol=err, err_msg=z) - - def test_i1_series(self): - for z in [1., 10., 200.5]: - value, err = self.iv_series(1, z) - assert_tol_equal(special.i1(z), value, atol=err, err_msg=z) - - def test_iv_series(self): - for v in [-20., -10., -1., 0., 1., 12.49, 120.]: - for z in [1., 10., 200.5, -1+2j]: - value, err = self.iv_series(v, z) - assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z)) - - def test_i0(self): - values = [[0.0, 1.0], - [1e-10, 1.0], - [0.1, 0.9071009258], - [0.5, 0.6450352706], - [1.0, 0.4657596077], - [2.5, 0.2700464416], - [5.0, 0.1835408126], - [20.0, 0.0897803119], - ] - for i, (x, v) in enumerate(values): - cv = special.i0(x) * exp(-x) - assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) - - def test_i0e(self): - oize = special.i0e(.1) - oizer = special.ive(0,.1) - assert_almost_equal(oize,oizer,8) - - def test_i1(self): - values = [[0.0, 0.0], - [1e-10, 0.4999999999500000e-10], - [0.1, 0.0452984468], - [0.5, 0.1564208032], - [1.0, 0.2079104154], - [5.0, 0.1639722669], - [20.0, 0.0875062222], - ] - for i, (x, v) in enumerate(values): - cv = special.i1(x) * exp(-x) - assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) - - def test_i1e(self): - oi1e = special.i1e(.1) - oi1er = special.ive(1,.1) - assert_almost_equal(oi1e,oi1er,8) - - def test_iti0k0(self): - iti0 = array(special.iti0k0(5)) - assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5) - - def test_it2i0k0(self): - it2k = special.it2i0k0(.1) - assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6) - - def test_iv(self): - iv1 = special.iv(0,.1)*exp(-.1) - assert_almost_equal(iv1,0.90710092578230106,10) - - def test_negv_ive(self): - assert_equal(special.ive(3,2), special.ive(-3,2)) - - def test_ive(self): - ive1 = special.ive(0,.1) - iv1 = special.iv(0,.1)*exp(-.1) - assert_almost_equal(ive1,iv1,10) - - def test_ivp0(self): - assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10) - - def test_ivp(self): - y=(special.iv(0,2) + special.iv(2,2))/2 - x = special.ivp(1,2) - assert_almost_equal(x,y,10) - - -class TestLaguerre(TestCase): - def test_laguerre(self): - lag0 = special.laguerre(0) - lag1 = special.laguerre(1) - lag2 = special.laguerre(2) - lag3 = special.laguerre(3) - lag4 = special.laguerre(4) - lag5 = special.laguerre(5) - assert_array_almost_equal(lag0.c,[1],13) - assert_array_almost_equal(lag1.c,[-1,1],13) - assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13) - assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13) - assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13) - assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13) - - def test_genlaguerre(self): - k = 5*rand()-0.9 - lag0 = special.genlaguerre(0,k) - lag1 = special.genlaguerre(1,k) - lag2 = special.genlaguerre(2,k) - lag3 = special.genlaguerre(3,k) - assert_equal(lag0.c,[1]) - assert_equal(lag1.c,[-1,k+1]) - assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0) - assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0) - - -# Base polynomials come from Abrahmowitz and Stegan -class TestLegendre(TestCase): - def test_legendre(self): - leg0 = special.legendre(0) - leg1 = special.legendre(1) - leg2 = special.legendre(2) - leg3 = special.legendre(3) - leg4 = special.legendre(4) - leg5 = special.legendre(5) - assert_equal(leg0.c,[1]) - assert_equal(leg1.c,[1,0]) - assert_equal(leg2.c,array([3,0,-1])/2.0) - assert_almost_equal(leg3.c,array([5,0,-3,0])/2.0) - assert_almost_equal(leg4.c,array([35,0,-30,0,3])/8.0) - assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0) - - -class TestLambda(TestCase): - def test_lmbda(self): - lam = special.lmbda(1,.1) - lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]), - array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1])) - assert_array_almost_equal(lam,lamr,8) - -class TestLog1p(TestCase): - def test_log1p(self): - l1p = (special.log1p(10), special.log1p(11), special.log1p(12)) - l1prl = (log(11), log(12), log(13)) - assert_array_almost_equal(l1p,l1prl,8) - - def test_log1pmore(self): - l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2)) - l1pmrl = (log(2),log(2.1),log(2.2)) - assert_array_almost_equal(l1pm,l1pmrl,8) - -class TestLegendreFunctions(TestCase): - def test_lpmn(self): - lp = special.lpmn(0,2,.5) - assert_array_almost_equal(lp,(array([ [ 1.00000 , - 0.50000, - -0.12500]]), - array([ [ 0.00000 , - 1.00000 , - 1.50000]])),4) - - def test_lpn(self): - lpnf = special.lpn(2,.5) - assert_array_almost_equal(lpnf,(array( [ 1.00000 , - 0.50000, - -0.12500]), - array( [ 0.00000 , - 1.00000 , - 1.50000])),4) - - def test_lpmv(self): - lp = special.lpmv(0,2,.5) - assert_almost_equal(lp,-0.125,7) - lp = special.lpmv(0,40,.001) - assert_almost_equal(lp,0.1252678976534484,7) - - # XXX: this is outside the domain of the current implementation, - # so ensure it returns a NaN rather than a wrong answer. - olderr = np.seterr(all='ignore') - try: - lp = special.lpmv(-1,-1,.001) - finally: - np.seterr(**olderr) - assert_(lp != 0 or np.isnan(lp)) - - def test_lqmn(self): - lqmnf = special.lqmn(0,2,.5) - lqmnf = special.lqmn(0,2,.5) - lqf = special.lqn(2,.5) - assert_array_almost_equal(lqmnf[0][0],lqf[0],4) - assert_array_almost_equal(lqmnf[1][0],lqf[1],4) - - def test_lqmn_shape(self): - a, b = special.lqmn(4, 4, 1.1) - assert_equal(a.shape, (5, 5)) - assert_equal(b.shape, (5, 5)) - - a, b = special.lqmn(4, 0, 1.1) - assert_equal(a.shape, (5, 1)) - assert_equal(b.shape, (5, 1)) - - def test_lqn(self): - lqf = special.lqn(2,.5) - assert_array_almost_equal(lqf,(array([ 0.5493, -0.7253, -0.8187]), - array([ 1.3333, 1.216 , -0.8427])),4) - -class TestMathieu(TestCase): - - def test_mathieu_a(self): - pass - - def test_mathieu_even_coef(self): - mc = special.mathieu_even_coef(2,5) - #Q not defined broken and cannot figure out proper reporting order - - def test_mathieu_odd_coef(self): - pass - #same problem as above - -class TestFresnelIntegral(TestCase): - - def test_modfresnelp(self): - pass - - def test_modfresnelm(self): - pass - -class TestOblCvSeq(TestCase): - def test_obl_cv_seq(self): - obl = special.obl_cv_seq(0,3,1) - assert_array_almost_equal(obl,array([ -0.348602, - 1.393206, - 5.486800, - 11.492120]),5) - -class TestParabolicCylinder(TestCase): - def test_pbdn_seq(self): - pb = special.pbdn_seq(1,.1) - assert_array_almost_equal(pb,(array([ 0.9975, - 0.0998]), - array([-0.0499, - 0.9925])),4) - - def test_pbdv(self): - pbv = special.pbdv(1,.2) - derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0] - - def test_pbdv_seq(self): - pbn = special.pbdn_seq(1,.1) - pbv = special.pbdv_seq(1,.1) - assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4) - - def test_pbdv_points(self): - # simple case - eta = np.linspace(-10, 10, 5) - z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta) - assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14) - - # some points - assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12) - assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12) - - def test_pbdv_gradient(self): - x = np.linspace(-4, 4, 8)[:,None] - eta = np.linspace(-10, 10, 5)[None,:] - - p = special.pbdv(eta, x) - eps = 1e-7 + 1e-7*abs(x) - dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2. - assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6) - - def test_pbvv_gradient(self): - x = np.linspace(-4, 4, 8)[:,None] - eta = np.linspace(-10, 10, 5)[None,:] - - p = special.pbvv(eta, x) - eps = 1e-7 + 1e-7*abs(x) - dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2. - assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6) - - -class TestPolygamma(TestCase): - # from Table 6.2 (pg. 271) of A&S - def test_polygamma(self): - poly2 = special.polygamma(2,1) - poly3 = special.polygamma(3,1) - assert_almost_equal(poly2,-2.4041138063,10) - assert_almost_equal(poly3,6.4939394023,10) - -class TestProCvSeq(TestCase): - def test_pro_cv_seq(self): - prol = special.pro_cv_seq(0,3,1) - assert_array_almost_equal(prol,array([ 0.319000, - 2.593084, - 6.533471, - 12.514462]),5) - -class TestPsi(TestCase): - def test_psi(self): - ps = special.psi(1) - assert_almost_equal(ps,-0.57721566490153287,8) - -class TestRadian(TestCase): - def test_radian(self): - rad = special.radian(90,0,0) - assert_almost_equal(rad,pi/2.0,5) - - def test_radianmore(self): - rad1 = special.radian(90,1,60) - assert_almost_equal(rad1,pi/2+0.0005816135199345904,5) - -class TestRiccati(TestCase): - def test_riccati_jn(self): - jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2) - ricjn = special.riccati_jn(1,.2) - assert_array_almost_equal(ricjn,jnrl,8) - - def test_riccati_yn(self): - ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2) - ricyn = special.riccati_yn(1,.2) - assert_array_almost_equal(ricyn,ynrl,8) - -class TestRound(TestCase): - def test_round(self): - rnd = map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))) - - # Note: According to the documentation, scipy.special.round is - # supposed to round to the nearest even number if the fractional - # part is exactly 0.5. On some platforms, this does not appear - # to work and thus this test may fail. However, this unit test is - # correctly written. - rndrl = (10,10,10,11) - assert_array_equal(rnd,rndrl) - - -def test_sph_harm(): - # Tests derived from tables in - # http://en.wikipedia.org/wiki/Table_of_spherical_harmonics - sh = special.sph_harm - pi = np.pi - exp = np.exp - sqrt = np.sqrt - sin = np.sin - cos = np.cos - yield (assert_array_almost_equal, sh(0,0,0,0), - 0.5/sqrt(pi)) - yield (assert_array_almost_equal, sh(-2,2,0.,pi/4), - 0.25*sqrt(15./(2.*pi))* - (sin(pi/4))**2.) - yield (assert_array_almost_equal, sh(-2,2,0.,pi/2), - 0.25*sqrt(15./(2.*pi))) - yield (assert_array_almost_equal, sh(2,2,pi,pi/2), - 0.25*sqrt(15/(2.*pi))* - exp(0+2.*pi*1j)*sin(pi/2.)**2.) - yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.), - (3./8.)*sqrt(5./(2.*pi))* - exp(0+2.*pi/4.*1j)* - sin(pi/3.)**2.* - (7.*cos(pi/3.)**2.-1)) - yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.), - (3./16.)*sqrt(35./(2.*pi))* - exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.) - - -class TestSpherical(TestCase): - def test_sph_harm(self): - # see test_sph_harm function - pass - - def test_sph_in(self): - i1n = special.sph_in(1,.2) - inp0 = (i1n[0][1]) - inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1]) - assert_array_almost_equal(i1n[0],array([1.0066800127054699381, - 0.066933714568029540839]),12) - assert_array_almost_equal(i1n[1],[inp0,inp1],12) - - def test_sph_inkn(self): - spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)] - inkn = r_[special.sph_inkn(1,.2)] - assert_array_almost_equal(inkn,spikn,10) - - def test_sph_jn(self): - s1 = special.sph_jn(2,.2) - s10 = -s1[0][1] - s11 = s1[0][0]-2.0/0.2*s1[0][1] - s12 = s1[0][1]-3.0/0.2*s1[0][2] - assert_array_almost_equal(s1[0],[0.99334665397530607731, - 0.066400380670322230863, - 0.0026590560795273856680],12) - assert_array_almost_equal(s1[1],[s10,s11,s12],12) - - def test_sph_jnyn(self): - jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition - jnyn1 = r_[special.sph_jnyn(1,.2)] - assert_array_almost_equal(jnyn1,jnyn,9) - - def test_sph_kn(self): - kn = special.sph_kn(2,.2) - kn0 = -kn[0][1] - kn1 = -kn[0][0]-2.0/0.2*kn[0][1] - kn2 = -kn[0][1]-3.0/0.2*kn[0][2] - assert_array_almost_equal(kn[0],[6.4302962978445670140, - 38.581777787067402086, - 585.15696310385559829],12) - assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9) - - def test_sph_yn(self): - sy1 = special.sph_yn(2,.2)[0][2] - sy2 = special.sph_yn(0,.2)[0][0] - sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 #correct derivative value - assert_almost_equal(sy1,-377.52483,5)#previous values in the system - assert_almost_equal(sy2,-4.9003329,5) - sy3 = special.sph_yn(1,.2)[1][1] - assert_almost_equal(sy3,sphpy,4) #compare correct derivative val. (correct =-system val). - -class TestStruve(object): - def _series(self, v, z, n=100): - """Compute Struve function & error estimate from its power series.""" - k = arange(0, n) - r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5) - err = abs(r).max() * finfo(float_).eps * n - return r.sum(), err - - def test_vs_series(self): - """Check Struve function versus its power series""" - for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]: - for z in [1, 10, 19, 21, 30]: - value, err = self._series(v, z) - assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z) - - def test_some_values(self): - assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7) - assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8) - assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12) - assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11) - assert_equal(special.struve(-12, -41), -special.struve(-12, 41)) - assert_equal(special.struve(+12, -41), -special.struve(+12, 41)) - assert_equal(special.struve(-11, -41), +special.struve(-11, 41)) - assert_equal(special.struve(+11, -41), +special.struve(+11, 41)) - - assert_(isnan(special.struve(-7.1, -1))) - assert_(isnan(special.struve(-10.1, -1))) - - def test_regression_679(self): - """Regression test for #679""" - assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8)) - assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8)) - assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8)) - -def test_chi2_smalldf(): - assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110) - -def test_chi2c_smalldf(): - assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110) - -def test_chi2_inv_smalldf(): - assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3) - -def test_agm_simple(): - assert_allclose(special.agm(24, 6), 13.4581714817) - assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/special/tests/test_data.py b/scipy-0.10.1/scipy/special/tests/test_data.py deleted file mode 100644 index 3d51681043..0000000000 --- a/scipy-0.10.1/scipy/special/tests/test_data.py +++ /dev/null @@ -1,220 +0,0 @@ -import os - -import numpy as np -from numpy import arccosh, arcsinh, arctanh -from scipy.special import ( - erf, erfc, log1p, expm1, - jn, jv, yn, yv, iv, kv, kn, gamma, gammaln, digamma, beta, cbrt, - ellipe, ellipeinc, ellipk, ellipkm1, ellipj, erfinv, erfcinv, exp1, expi, - expn, zeta, gammaincinv, lpmv -) - -from scipy.special._testutils import FuncData - -DATASETS = np.load(os.path.join(os.path.dirname(__file__), - "data", "boost.npz")) - -def data(func, dataname, *a, **kw): - kw.setdefault('dataname', dataname) - return FuncData(func, DATASETS[dataname], *a, **kw) - -def ellipk_(k): - return ellipk(k*k) -def ellipe_(k): - return ellipe(k*k) -def ellipeinc_(f, k): - return ellipeinc(f, k*k) -def ellipj_(k): - return ellipj(k*k) -def zeta_(x): - return zeta(x, 1.) -def assoc_legendre_p_boost_(nu, mu, x): - # the boost test data is for integer orders only - return lpmv(mu, nu.astype(int), x) -def legendre_p_via_assoc_(nu, x): - return lpmv(0, nu, x) - -def test_boost(): - TESTS = [ - data(arccosh, 'acosh_data_ipp-acosh_data', 0, 1, rtol=5e-13), - data(arccosh, 'acosh_data_ipp-acosh_data', 0j, 1, rtol=5e-14), - - data(arcsinh, 'asinh_data_ipp-asinh_data', 0, 1, rtol=1e-11), - data(arcsinh, 'asinh_data_ipp-asinh_data', 0j, 1, rtol=1e-11), - - data(arctanh, 'atanh_data_ipp-atanh_data', 0, 1, rtol=1e-11), - data(arctanh, 'atanh_data_ipp-atanh_data', 0j, 1, rtol=1e-11), - - data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p', - (0,1,2), 3, rtol=1e-11), - data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p', - (0,1), 2, rtol=1e-11), - - data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13), - data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13), - data(beta, 'beta_small_data_ipp-beta_small_data', (0,1), 2), - - data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0), - - data(digamma, 'digamma_data_ipp-digamma_data', 0, 1), - data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1), - data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=1e-13), - data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13), - data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-11), - data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-11), - data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1), - data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1), - - data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1), - data(ellipkm1, '-ellipkm1', 0, 1), - data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1), - data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14), - - data(erf, 'erf_data_ipp-erf_data', 0, 1), - data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-14), - data(erfc, 'erf_data_ipp-erf_data', 0, 2), - data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1), - data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1), - data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2), - data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1), - data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1), - data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2), - - data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1), - data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1), - #data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data', 0, 1), - - data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2), - data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9), - data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13), - data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1), - - data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2), - data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14), - - data(gamma, 'test_gamma_data_ipp-near_0', 0, 1), - data(gamma, 'test_gamma_data_ipp-near_1', 0, 1), - data(gamma, 'test_gamma_data_ipp-near_2', 0, 1), - data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1), - data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1), - data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9), - data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9), - data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9), - data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9), - data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9), - data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11), - data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11), - data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10), - data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11), - data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11), - - data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1), - data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2), - - data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1), 2, rtol=1e-12), - data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1j), 2, rtol=2e-10, atol=1e-306), - data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1), 2, rtol=1e-9), - data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1j), 2, rtol=2e-10), - - data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), - data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), - data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11), - data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11), - - data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), - data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), - data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12), - data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12), - - data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12, - knownfailure="Known bug in Cephes kn implementation"), - - data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), - data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12), - data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12), - data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12), - - data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12), - data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), - - data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), - data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12), - data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-12, - knownfailure="Known bug in Cephes yv implementation"), - data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10), - - data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1, param_filter=(lambda s: s > 1)), - data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1, param_filter=(lambda s: s > 1)), - data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1, param_filter=(lambda s: s > 1)), - data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1, param_filter=(lambda s: s > 1)), - - data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, - rtol=1e-12), - data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', - (0,1), 2, rtol=1e-11), - - # XXX: the data file needs reformatting... - #data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', - # (0,1), 2), - - # -- not used yet: - # assoc_legendre_p.txt - # binomial_data.txt - # binomial_large_data.txt - # binomial_quantile_data.txt - # ellint_f_data.txt - # ellint_pi2_data.txt - # ellint_pi3_data.txt - # ellint_pi3_large_data.txt - # ellint_rc_data.txt - # ellint_rd_data.txt - # ellint_rf_data.txt - # ellint_rj_data.txt - # expinti_data_long.txt - # factorials.txt - # gammap1m1_data.txt - # hermite.txt - # ibeta_data.txt - # ibeta_int_data.txt - # ibeta_inv_data.txt - # ibeta_inva_data.txt - # ibeta_large_data.txt - # ibeta_small_data.txt - # igamma_big_data.txt - # igamma_int_data.txt - # igamma_inva_data.txt - # igamma_med_data.txt - # igamma_small_data.txt - # laguerre2.txt - # laguerre3.txt - # legendre_p.txt - # legendre_p_large.txt - # ncbeta.txt - # ncbeta_big.txt - # nccs.txt - # near_0.txt - # near_1.txt - # near_2.txt - # near_m10.txt - # near_m55.txt - # negative_binomial_quantile_data.txt - # poisson_quantile_data.txt - # sph_bessel_data.txt - # sph_neumann_data.txt - # spherical_harmonic.txt - # tgamma_delta_ratio_data.txt - # tgamma_delta_ratio_int.txt - # tgamma_delta_ratio_int2.txt - # tgamma_ratio_data.txt - ] - - for test in TESTS: - yield _test_factory, test - -def _test_factory(test, dtype=np.double): - """Boost test""" - olderr = np.seterr(all='ignore') - try: - test.check(dtype=dtype) - finally: - np.seterr(**olderr) diff --git a/scipy-0.10.1/scipy/special/tests/test_lambertw.py b/scipy-0.10.1/scipy/special/tests/test_lambertw.py deleted file mode 100644 index 96ff81ca30..0000000000 --- a/scipy-0.10.1/scipy/special/tests/test_lambertw.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Tests for the lambertw function, -# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il -# Distributed under the same license as SciPy itself. -# -# [1] mpmath source code, Subversion revision 992 -# http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992 - -import numpy as np -from numpy.testing import assert_, assert_equal, assert_array_almost_equal -from scipy.special import lambertw -from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_ - -from scipy.special._testutils import FuncData - - -def test_values(): - assert_(isnan(lambertw(nan))) - assert_equal(lambertw(inf,1).real, inf) - assert_equal(lambertw(inf,1).imag, 2*pi) - assert_equal(lambertw(-inf,1).real, inf) - assert_equal(lambertw(-inf,1).imag, 3*pi) - - assert_equal(lambertw(1.), lambertw(1., 0)) - - data = [ - (0,0, 0), - (0+0j,0, 0), - (inf,0, inf), - (0,-1, -inf), - (0,1, -inf), - (0,3, -inf), - (e,0, 1), - (1,0, 0.567143290409783873), - (-pi/2,0, 1j*pi/2), - (-log(2)/2,0, -log(2)), - (0.25,0, 0.203888354702240164), - (-0.25,0, -0.357402956181388903), - (-1./10000,0, -0.000100010001500266719), - (-0.25,-1, -2.15329236411034965), - (0.25,-1, -3.00899800997004620-4.07652978899159763j), - (-0.25,-1, -2.15329236411034965), - (0.25,1, -3.00899800997004620+4.07652978899159763j), - (-0.25,1, -3.48973228422959210+7.41405453009603664j), - (-4,0, 0.67881197132094523+1.91195078174339937j), - (-4,1, -0.66743107129800988+7.76827456802783084j), - (-4,-1, 0.67881197132094523-1.91195078174339937j), - (1000,0, 5.24960285240159623), - (1000,1, 4.91492239981054535+5.44652615979447070j), - (1000,-1, 4.91492239981054535-5.44652615979447070j), - (1000,5, 3.5010625305312892+29.9614548941181328j), - (3+4j,0, 1.281561806123775878+0.533095222020971071j), - (-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j), - (3+4j,1, -0.11691092896595324+5.61888039871282334j), - (3+4j,-1, 0.25856740686699742-3.85211668616143559j), - (-0.5,-1, -0.794023632344689368-0.770111750510379110j), - (-1./10000,1, -11.82350837248724344+6.80546081842002101j), - (-1./10000,-1, -11.6671145325663544), - (-1./10000,-2, -11.82350837248724344-6.80546081842002101j), - (-1./100000,4, -14.9186890769540539+26.1856750178782046j), - (-1./100000,5, -15.0931437726379218666+32.5525721210262290086j), - ((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j), - ((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j), - ((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j), - ((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j), - (-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j), - (-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j), - (-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j), - (-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j), - (pi,0, 1.073658194796149172092178407024821347547745350410314531), - - # Former bug in generated branch, - (-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j), - (-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j), - (-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j), - (-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j), - ] - data = array(data, dtype=complex_) - - def w(x, y): - return lambertw(x, y.real.astype(int)) - olderr = np.seterr(all='ignore') - try: - FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check() - finally: - np.seterr(**olderr) - -def test_ufunc(): - assert_array_almost_equal( - lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873]) diff --git a/scipy-0.10.1/scipy/special/tests/test_logit.py b/scipy-0.10.1/scipy/special/tests/test_logit.py deleted file mode 100644 index b296d7f9c2..0000000000 --- a/scipy-0.10.1/scipy/special/tests/test_logit.py +++ /dev/null @@ -1,74 +0,0 @@ -import numpy as np -from numpy.testing import TestCase, assert_equal, assert_almost_equal -from scipy.special import logit, expit - - -class TestLogit(TestCase): - def check_logit_out(self, dtype, expected): - a = np.linspace(0,1,10) - a = np.array(a, dtype=dtype) - olderr = np.seterr(divide='ignore') - try: - actual = logit(a) - finally: - np.seterr(**olderr) - - if np.__version__ >= '1.6': - assert_almost_equal(actual, expected) - else: - assert_almost_equal(actual[1:-1], expected[1:-1]) - - assert_equal(actual.dtype, np.dtype(dtype)) - - def test_float32(self): - expected = np.array([-np.inf, -2.07944155, - -1.25276291, -0.69314718, - -0.22314353, 0.22314365, - 0.6931473 , 1.25276303, - 2.07944155, np.inf], dtype=np.float32) - self.check_logit_out('f4', expected) - - def test_float64(self): - expected = np.array([-np.inf, -2.07944154, - -1.25276297, -0.69314718, - -0.22314355, 0.22314355, - 0.69314718, 1.25276297, - 2.07944154, np.inf]) - self.check_logit_out('f8', expected) - - def test_nan(self): - expected = np.array([np.nan]*4) - olderr = np.seterr(invalid='ignore') - try: - actual = logit(np.array([-3., -2., 2., 3.])) - finally: - np.seterr(**olderr) - - assert_equal(expected, actual) - - -class TestExpit(TestCase): - def check_expit_out(self, dtype, expected): - a = np.linspace(-4,4,10) - a = np.array(a, dtype=dtype) - actual = expit(a) - assert_almost_equal(actual, expected) - assert_equal(actual.dtype, np.dtype(dtype)) - - def test_float32(self): - expected = np.array([ 0.01798621, 0.04265125, - 0.09777259, 0.20860852, - 0.39068246, 0.60931754, - 0.79139149, 0.9022274 , - 0.95734876, 0.98201376], dtype=np.float32) - self.check_expit_out('f4',expected) - - def test_float64(self): - expected = np.array([ 0.01798621, 0.04265125, - 0.0977726 , 0.20860853, - 0.39068246, 0.60931754, - 0.79139147, 0.9022274 , - 0.95734875, 0.98201379]) - self.check_expit_out('f8', expected) - - diff --git a/scipy-0.10.1/scipy/special/tests/test_mpmath.py b/scipy-0.10.1/scipy/special/tests/test_mpmath.py deleted file mode 100644 index b3f95471a2..0000000000 --- a/scipy-0.10.1/scipy/special/tests/test_mpmath.py +++ /dev/null @@ -1,238 +0,0 @@ -""" -Test Scipy functions versus mpmath, if available. - -""" -import re -import numpy as np -from numpy.testing import dec -import scipy.special as sc - -from scipy.special._testutils import FuncData, assert_func_equal - -try: - import mpmath -except ImportError: - try: - import sympy.mpmath as mpmath - except ImportError: - mpmath = None - -def mpmath_check(min_ver): - if mpmath is None: - return dec.skipif(True, "mpmath library is not present") - - def try_int(v): - try: return int(v) - except ValueError: return v - - def get_version(v): - return map(try_int, re.split('[^0-9]', v)) - - return dec.skipif(get_version(min_ver) > get_version(mpmath.__version__), - "mpmath %s required" % min_ver) - - -#------------------------------------------------------------------------------ -# expi -#------------------------------------------------------------------------------ - -@mpmath_check('0.10') -def test_expi_complex(): - dataset = [] - for r in np.logspace(-99, 2, 10): - for p in np.linspace(0, 2*np.pi, 30): - z = r*np.exp(1j*p) - dataset.append((z, complex(mpmath.ei(z)))) - dataset = np.array(dataset, dtype=np.complex_) - - FuncData(sc.expi, dataset, 0, 1).check() - - -#------------------------------------------------------------------------------ -# hyp2f1 -#------------------------------------------------------------------------------ - -@mpmath_check('0.14') -def test_hyp2f1_strange_points(): - pts = [ - (2,-1,-1,0.7), - (2,-2,-2,0.7), - ] - kw = dict(eliminate=True) - dataset = [p + (float(mpmath.hyp2f1(*p, **kw)),) for p in pts] - dataset = np.array(dataset, dtype=np.float_) - - FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() - -@mpmath_check('0.13') -def test_hyp2f1_real_some_points(): - pts = [ - (1,2,3,0), - (1./3, 2./3, 5./6, 27./32), - (1./4, 1./2, 3./4, 80./81), - (2,-2,-3,3), - (2,-3,-2,3), - (2,-1.5,-1.5,3), - (1,2,3,0), - (0.7235, -1, -5, 0.3), - (0.25, 1./3, 2, 0.999), - (0.25, 1./3, 2, -1), - (2,3,5,0.99), - (3./2,-0.5,3,0.99), - (2,2.5,-3.25,0.999), - (-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001), - (-10,900,-10.5,0.99), - (-10,900,10.5,0.99), - (-1,2,1,1.0), - (-1,2,1,-1.0), - (-3,13,5,1.0), - (-3,13,5,-1.0), - ] - dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts] - dataset = np.array(dataset, dtype=np.float_) - - olderr = np.seterr(invalid='ignore') - try: - FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() - finally: - np.seterr(**olderr) - - -@mpmath_check('0.14') -def test_hyp2f1_some_points_2(): - # Taken from mpmath unit tests -- this point failed for mpmath 0.13 but - # was fixed in their SVN since then - pts = [ - (112, (51,10), (-9,10), -0.99999), - (10,-900,10.5,0.99), - (10,-900,-10.5,0.99), - ] - - def fev(x): - if isinstance(x, tuple): - return float(x[0]) / x[1] - else: - return x - - dataset = [tuple(map(fev, p)) + (float(mpmath.hyp2f1(*p)),) for p in pts] - dataset = np.array(dataset, dtype=np.float_) - - FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() - -@mpmath_check('0.13') -def test_hyp2f1_real_some(): - dataset = [] - for a in [-10, -5, -1.8, 1.8, 5, 10]: - for b in [-2.5, -1, 1, 7.4]: - for c in [-9, -1.8, 5, 20.4]: - for z in [-10, -1.01, -0.99, 0, 0.6, 0.95, 1.5, 10]: - try: - v = float(mpmath.hyp2f1(a, b, c, z)) - except: - continue - dataset.append((a, b, c, z, v)) - dataset = np.array(dataset, dtype=np.float_) - - olderr = np.seterr(invalid='ignore') - try: - FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9).check() - finally: - np.seterr(**olderr) - -@mpmath_check('0.12') -@dec.slow -def test_hyp2f1_real_random(): - dataset = [] - - npoints = 500 - dataset = np.zeros((npoints, 5), np.float_) - - np.random.seed(1234) - dataset[:,0] = np.random.pareto(1.5, npoints) - dataset[:,1] = np.random.pareto(1.5, npoints) - dataset[:,2] = np.random.pareto(1.5, npoints) - dataset[:,3] = 2*np.random.rand(npoints) - 1 - - dataset[:,0] *= (-1)**np.random.randint(2, npoints) - dataset[:,1] *= (-1)**np.random.randint(2, npoints) - dataset[:,2] *= (-1)**np.random.randint(2, npoints) - - for ds in dataset: - if mpmath.__version__ < '0.14': - # mpmath < 0.14 fails for c too much smaller than a, b - if abs(ds[:2]).max() > abs(ds[2]): - ds[2] = abs(ds[:2]).max() - ds[4] = float(mpmath.hyp2f1(*tuple(ds[:4]))) - - FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9).check() - -#------------------------------------------------------------------------------ -# erf (complex) -#------------------------------------------------------------------------------ - -@mpmath_check('0.14') -def test_erf_complex(): - # need to increase mpmath precision for this test - old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec - try: - mpmath.mp.dps = 70 - x1, y1 = np.meshgrid(np.linspace(-10, 1, 11), np.linspace(-10, 1, 11)) - x2, y2 = np.meshgrid(np.logspace(-10, .8, 11), np.logspace(-10, .8, 11)) - points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(),y2.ravel()] - - # note that the global accuracy of our complex erf algorithm is limited - # roughly to 2e-8 - assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points, - vectorized=False, rtol=2e-8) - finally: - mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec - - - -#------------------------------------------------------------------------------ -# lpmv -#------------------------------------------------------------------------------ - -@mpmath_check('0.15') -def test_lpmv(): - pts = [] - for x in [-0.99, -0.557, 1e-6, 0.132, 1]: - pts.extend([ - (1, 1, x), - (1, -1, x), - (-1, 1, x), - (-1, -2, x), - (1, 1.7, x), - (1, -1.7, x), - (-1, 1.7, x), - (-1, -2.7, x), - (1, 10, x), - (1, 11, x), - (3, 8, x), - (5, 11, x), - (-3, 8, x), - (-5, 11, x), - (3, -8, x), - (5, -11, x), - (-3, -8, x), - (-5, -11, x), - (3, 8.3, x), - (5, 11.3, x), - (-3, 8.3, x), - (-5, 11.3, x), - (3, -8.3, x), - (5, -11.3, x), - (-3, -8.3, x), - (-5, -11.3, x), - ]) - - dataset = [p + (mpmath.legenp(p[1], p[0], p[2]),) for p in pts] - dataset = np.array(dataset, dtype=np.float_) - - evf = lambda mu,nu,x: sc.lpmv(mu.astype(int), nu, x) - - olderr = np.seterr(invalid='ignore') - try: - FuncData(evf, dataset, (0,1,2), 3, rtol=1e-10, atol=1e-14).check() - finally: - np.seterr(**olderr) diff --git a/scipy-0.10.1/scipy/special/tests/test_orthogonal.py b/scipy-0.10.1/scipy/special/tests/test_orthogonal.py deleted file mode 100644 index 3a6eeddf60..0000000000 --- a/scipy-0.10.1/scipy/special/tests/test_orthogonal.py +++ /dev/null @@ -1,260 +0,0 @@ -from numpy.testing import assert_array_almost_equal, assert_almost_equal, \ - rand, TestCase -import numpy as np -from numpy import array, sqrt -import scipy.special.orthogonal as orth -from scipy.special import gamma - - -class TestCheby(TestCase): - def test_chebyc(self): - C0 = orth.chebyc(0) - C1 = orth.chebyc(1) - olderr = np.seterr(all='ignore') - try: - C2 = orth.chebyc(2) - C3 = orth.chebyc(3) - C4 = orth.chebyc(4) - C5 = orth.chebyc(5) - finally: - np.seterr(**olderr) - - assert_array_almost_equal(C0.c,[2],13) - assert_array_almost_equal(C1.c,[1,0],13) - assert_array_almost_equal(C2.c,[1,0,-2],13) - assert_array_almost_equal(C3.c,[1,0,-3,0],13) - assert_array_almost_equal(C4.c,[1,0,-4,0,2],13) - assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13) - - def test_chebys(self): - S0 = orth.chebys(0) - S1 = orth.chebys(1) - S2 = orth.chebys(2) - S3 = orth.chebys(3) - S4 = orth.chebys(4) - S5 = orth.chebys(5) - assert_array_almost_equal(S0.c,[1],13) - assert_array_almost_equal(S1.c,[1,0],13) - assert_array_almost_equal(S2.c,[1,0,-1],13) - assert_array_almost_equal(S3.c,[1,0,-2,0],13) - assert_array_almost_equal(S4.c,[1,0,-3,0,1],13) - assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13) - - def test_chebyt(self): - T0 = orth.chebyt(0) - T1 = orth.chebyt(1) - T2 = orth.chebyt(2) - T3 = orth.chebyt(3) - T4 = orth.chebyt(4) - T5 = orth.chebyt(5) - assert_array_almost_equal(T0.c,[1],13) - assert_array_almost_equal(T1.c,[1,0],13) - assert_array_almost_equal(T2.c,[2,0,-1],13) - assert_array_almost_equal(T3.c,[4,0,-3,0],13) - assert_array_almost_equal(T4.c,[8,0,-8,0,1],13) - assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13) - - def test_chebyu(self): - U0 = orth.chebyu(0) - U1 = orth.chebyu(1) - U2 = orth.chebyu(2) - U3 = orth.chebyu(3) - U4 = orth.chebyu(4) - U5 = orth.chebyu(5) - assert_array_almost_equal(U0.c,[1],13) - assert_array_almost_equal(U1.c,[2,0],13) - assert_array_almost_equal(U2.c,[4,0,-1],13) - assert_array_almost_equal(U3.c,[8,0,-4,0],13) - assert_array_almost_equal(U4.c,[16,0,-12,0,1],13) - assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13) - -class TestGegenbauer(TestCase): - - def test_gegenbauer(self): - a = 5*rand()-0.5 - if np.any(a==0): a = -0.2 - Ca0 = orth.gegenbauer(0,a) - Ca1 = orth.gegenbauer(1,a) - Ca2 = orth.gegenbauer(2,a) - Ca3 = orth.gegenbauer(3,a) - Ca4 = orth.gegenbauer(4,a) - Ca5 = orth.gegenbauer(5,a) - - assert_array_almost_equal(Ca0.c,array([1]),13) - assert_array_almost_equal(Ca1.c,array([2*a,0]),13) - assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13) - assert_array_almost_equal(Ca3.c,array([4*orth.poch(a,3),0,-6*a*(a+1), - 0])/3.0,11) - assert_array_almost_equal(Ca4.c,array([4*orth.poch(a,4),0,-12*orth.poch(a,3), - 0,3*a*(a+1)])/6.0,11) - assert_array_almost_equal(Ca5.c,array([4*orth.poch(a,5),0,-20*orth.poch(a,4), - 0,15*orth.poch(a,3),0])/15.0,11) - -class TestHermite(TestCase): - def test_hermite(self): - H0 = orth.hermite(0) - H1 = orth.hermite(1) - H2 = orth.hermite(2) - H3 = orth.hermite(3) - H4 = orth.hermite(4) - H5 = orth.hermite(5) - assert_array_almost_equal(H0.c,[1],13) - assert_array_almost_equal(H1.c,[2,0],13) - assert_array_almost_equal(H2.c,[4,0,-2],13) - assert_array_almost_equal(H3.c,[8,0,-12,0],13) - assert_array_almost_equal(H4.c,[16,0,-48,0,12],12) - assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12) - - def test_hermitenorm(self): - # He_n(x) = 2**(-n/2) H_n(x/sqrt(2)) - psub = np.poly1d([1.0/sqrt(2),0]) - H0 = orth.hermitenorm(0) - H1 = orth.hermitenorm(1) - H2 = orth.hermitenorm(2) - H3 = orth.hermitenorm(3) - H4 = orth.hermitenorm(4) - H5 = orth.hermitenorm(5) - he0 = orth.hermite(0)(psub) - he1 = orth.hermite(1)(psub) / sqrt(2) - he2 = orth.hermite(2)(psub) / 2.0 - he3 = orth.hermite(3)(psub) / (2*sqrt(2)) - he4 = orth.hermite(4)(psub) / 4.0 - he5 = orth.hermite(5)(psub) / (4.0*sqrt(2)) - - assert_array_almost_equal(H0.c,he0.c,13) - assert_array_almost_equal(H1.c,he1.c,13) - assert_array_almost_equal(H2.c,he2.c,13) - assert_array_almost_equal(H3.c,he3.c,13) - assert_array_almost_equal(H4.c,he4.c,13) - assert_array_almost_equal(H5.c,he5.c,13) - -class _test_sh_legendre(TestCase): - - def test_sh_legendre(self): - # P*_n(x) = P_n(2x-1) - psub = np.poly1d([2,-1]) - Ps0 = orth.sh_legendre(0) - Ps1 = orth.sh_legendre(1) - Ps2 = orth.sh_legendre(2) - Ps3 = orth.sh_legendre(3) - Ps4 = orth.sh_legendre(4) - Ps5 = orth.sh_legendre(5) - pse0 = orth.legendre(0)(psub) - pse1 = orth.legendre(1)(psub) - pse2 = orth.legendre(2)(psub) - pse3 = orth.legendre(3)(psub) - pse4 = orth.legendre(4)(psub) - pse5 = orth.legendre(5)(psub) - assert_array_almost_equal(Ps0.c,pse0.c,13) - assert_array_almost_equal(Ps1.c,pse1.c,13) - assert_array_almost_equal(Ps2.c,pse2.c,13) - assert_array_almost_equal(Ps3.c,pse3.c,13) - assert_array_almost_equal(Ps4.c,pse4.c,12) - assert_array_almost_equal(Ps5.c,pse5.c,12) - -class _test_sh_chebyt(TestCase): - - def test_sh_chebyt(self): - # T*_n(x) = T_n(2x-1) - psub = np.poly1d([2,-1]) - Ts0 = orth.sh_chebyt(0) - Ts1 = orth.sh_chebyt(1) - Ts2 = orth.sh_chebyt(2) - Ts3 = orth.sh_chebyt(3) - Ts4 = orth.sh_chebyt(4) - Ts5 = orth.sh_chebyt(5) - tse0 = orth.chebyt(0)(psub) - tse1 = orth.chebyt(1)(psub) - tse2 = orth.chebyt(2)(psub) - tse3 = orth.chebyt(3)(psub) - tse4 = orth.chebyt(4)(psub) - tse5 = orth.chebyt(5)(psub) - assert_array_almost_equal(Ts0.c,tse0.c,13) - assert_array_almost_equal(Ts1.c,tse1.c,13) - assert_array_almost_equal(Ts2.c,tse2.c,13) - assert_array_almost_equal(Ts3.c,tse3.c,13) - assert_array_almost_equal(Ts4.c,tse4.c,12) - assert_array_almost_equal(Ts5.c,tse5.c,12) - -class _test_sh_chebyu(TestCase): - - def test_sh_chebyu(self): - # U*_n(x) = U_n(2x-1) - psub = np.poly1d([2,-1]) - Us0 = orth.sh_chebyu(0) - Us1 = orth.sh_chebyu(1) - Us2 = orth.sh_chebyu(2) - Us3 = orth.sh_chebyu(3) - Us4 = orth.sh_chebyu(4) - Us5 = orth.sh_chebyu(5) - use0 = orth.chebyu(0)(psub) - use1 = orth.chebyu(1)(psub) - use2 = orth.chebyu(2)(psub) - use3 = orth.chebyu(3)(psub) - use4 = orth.chebyu(4)(psub) - use5 = orth.chebyu(5)(psub) - assert_array_almost_equal(Us0.c,use0.c,13) - assert_array_almost_equal(Us1.c,use1.c,13) - assert_array_almost_equal(Us2.c,use2.c,13) - assert_array_almost_equal(Us3.c,use3.c,13) - assert_array_almost_equal(Us4.c,use4.c,12) - assert_array_almost_equal(Us5.c,use5.c,11) - -class _test_sh_jacobi(TestCase): - def test_sh_jacobi(self): - # G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1) - conv = lambda n,p: gamma(n+1)*gamma(n+p)/gamma(2*n+p) - psub = np.poly1d([2,-1]) - q = 4*rand() - p = q-1 + 2*rand() - #print "shifted jacobi p,q = ", p, q - G0 = orth.sh_jacobi(0,p,q) - G1 = orth.sh_jacobi(1,p,q) - G2 = orth.sh_jacobi(2,p,q) - G3 = orth.sh_jacobi(3,p,q) - G4 = orth.sh_jacobi(4,p,q) - G5 = orth.sh_jacobi(5,p,q) - ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p) - ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p) - ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p) - ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p) - ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p) - ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p) - - assert_array_almost_equal(G0.c,ge0.c,13) - assert_array_almost_equal(G1.c,ge1.c,13) - assert_array_almost_equal(G2.c,ge2.c,13) - assert_array_almost_equal(G3.c,ge3.c,13) - assert_array_almost_equal(G4.c,ge4.c,13) - assert_array_almost_equal(G5.c,ge5.c,13) - -class TestCall(object): - def test_call(self): - poly = [] - for n in xrange(5): - poly.extend([x.strip() for x in - (""" - orth.jacobi(%(n)d,0.3,0.9) - orth.sh_jacobi(%(n)d,0.3,0.9) - orth.genlaguerre(%(n)d,0.3) - orth.laguerre(%(n)d) - orth.hermite(%(n)d) - orth.hermitenorm(%(n)d) - orth.gegenbauer(%(n)d,0.3) - orth.chebyt(%(n)d) - orth.chebyu(%(n)d) - orth.chebyc(%(n)d) - orth.chebys(%(n)d) - orth.sh_chebyt(%(n)d) - orth.sh_chebyu(%(n)d) - orth.legendre(%(n)d) - orth.sh_legendre(%(n)d) - """ % dict(n=n)).split() - ]) - olderr = np.seterr(all='ignore') - try: - for pstr in poly: - p = eval(pstr) - assert_almost_equal(p(0.315), np.poly1d(p)(0.315), err_msg=pstr) - finally: - np.seterr(**olderr) diff --git a/scipy-0.10.1/scipy/special/tests/test_orthogonal_eval.py b/scipy-0.10.1/scipy/special/tests/test_orthogonal_eval.py deleted file mode 100644 index 9760e23cd9..0000000000 --- a/scipy-0.10.1/scipy/special/tests/test_orthogonal_eval.py +++ /dev/null @@ -1,139 +0,0 @@ -import numpy as np -from numpy.testing import assert_ -import scipy.special.orthogonal as orth - -from scipy.special._testutils import FuncData - - -def test_eval_chebyt(): - n = np.arange(0, 10000, 7) - x = 2*np.random.rand() - 1 - v1 = np.cos(n*np.arccos(x)) - v2 = orth.eval_chebyt(n, x) - assert_(np.allclose(v1, v2, rtol=1e-15)) - - -def test_warnings(): - # ticket 1334 - olderr = np.seterr(all='raise') - try: - # these should raise no fp warnings - orth.eval_legendre(1, 0) - orth.eval_laguerre(1, 1) - orth.eval_gegenbauer(1, 1, 0) - finally: - np.seterr(**olderr) - - -class TestPolys(object): - """ - Check that the eval_* functions agree with the constructed polynomials - - """ - - def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10, - nparam=10, nx=10, rtol=1e-8): - np.random.seed(1234) - - dataset = [] - for n in np.arange(nn): - params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges] - params = np.asarray(params).T - if not param_ranges: - params = [0] - for p in params: - if param_ranges: - p = (n,) + tuple(p) - else: - p = (n,) - x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx) - x[0] = x_range[0] # always include domain start point - x[1] = x_range[1] # always include domain end point - poly = np.poly1d(cls(*p)) - z = np.c_[np.tile(p, (nx,1)), x, poly(x)] - dataset.append(z) - - dataset = np.concatenate(dataset, axis=0) - - def polyfunc(*p): - p = (p[0].astype(int),) + p[1:] - return func(*p) - - olderr = np.seterr(all='raise') - try: - ds = FuncData(polyfunc, dataset, range(len(param_ranges)+2), -1, - rtol=rtol) - ds.check() - finally: - np.seterr(**olderr) - - def test_jacobi(self): - self.check_poly(orth.eval_jacobi, orth.jacobi, - param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1], - rtol=1e-5) - - def test_sh_jacobi(self): - self.check_poly(orth.eval_sh_jacobi, orth.sh_jacobi, - param_ranges=[(1, 10), (0, 1)], x_range=[0, 1], - rtol=1e-5) - - def test_gegenbauer(self): - self.check_poly(orth.eval_gegenbauer, orth.gegenbauer, - param_ranges=[(-0.499, 10)], x_range=[-1, 1], - rtol=1e-7) - - def test_chebyt(self): - self.check_poly(orth.eval_chebyt, orth.chebyt, - param_ranges=[], x_range=[-1, 1]) - - def test_chebyu(self): - self.check_poly(orth.eval_chebyu, orth.chebyu, - param_ranges=[], x_range=[-1, 1]) - - def test_chebys(self): - self.check_poly(orth.eval_chebys, orth.chebys, - param_ranges=[], x_range=[-2, 2]) - - def test_chebyc(self): - self.check_poly(orth.eval_chebyc, orth.chebyc, - param_ranges=[], x_range=[-2, 2]) - - def test_sh_chebyt(self): - olderr = np.seterr(all='ignore') - try: - self.check_poly(orth.eval_sh_chebyt, orth.sh_chebyt, - param_ranges=[], x_range=[0, 1]) - finally: - np.seterr(**olderr) - - def test_sh_chebyu(self): - self.check_poly(orth.eval_sh_chebyu, orth.sh_chebyu, - param_ranges=[], x_range=[0, 1]) - - def test_legendre(self): - self.check_poly(orth.eval_legendre, orth.legendre, - param_ranges=[], x_range=[-1, 1]) - - def test_sh_legendre(self): - olderr = np.seterr(all='ignore') - try: - self.check_poly(orth.eval_sh_legendre, orth.sh_legendre, - param_ranges=[], x_range=[0, 1]) - finally: - np.seterr(**olderr) - - def test_genlaguerre(self): - self.check_poly(orth.eval_genlaguerre, orth.genlaguerre, - param_ranges=[(-0.99, 10)], x_range=[0, 100]) - - def test_laguerre(self): - self.check_poly(orth.eval_laguerre, orth.laguerre, - param_ranges=[], x_range=[0, 100]) - - def test_hermite(self): - self.check_poly(orth.eval_hermite, orth.hermite, - param_ranges=[], x_range=[-100, 100]) - - def test_hermitenorm(self): - self.check_poly(orth.eval_hermitenorm, orth.hermitenorm, - param_ranges=[], x_range=[-100, 100]) diff --git a/scipy-0.10.1/scipy/special/tests/test_spfun_stats.py b/scipy-0.10.1/scipy/special/tests/test_spfun_stats.py deleted file mode 100644 index b7d7a263ca..0000000000 --- a/scipy-0.10.1/scipy/special/tests/test_spfun_stats.py +++ /dev/null @@ -1,37 +0,0 @@ -import numpy as np -from numpy.testing import assert_array_equal, TestCase, run_module_suite - -from scipy.special import gammaln, multigammaln - - -class TestMultiGammaLn(TestCase): - def test1(self): - a = np.abs(np.random.randn()) - assert_array_equal(multigammaln(a, 1), gammaln(a)) - - def test_ararg(self): - d = 5 - a = np.abs(np.random.randn(3, 2)) + d - - tr = multigammaln(a, d) - assert_array_equal(tr.shape, a.shape) - for i in range(a.size): - assert_array_equal(tr.ravel()[i], multigammaln(a.ravel()[i], d)) - - d = 5 - a = np.abs(np.random.randn(1, 2)) + d - - tr = multigammaln(a, d) - assert_array_equal(tr.shape, a.shape) - for i in range(a.size): - assert_array_equal(tr.ravel()[i], multigammaln(a.ravel()[i], d)) - - def test_bararg(self): - try: - multigammaln(0.5, 1.2) - raise Exception("Expected this call to fail") - except ValueError: - pass - -if __name__ == '__main__': - run_module_suite() diff --git a/scipy-0.10.1/scipy/special/toms/wofz.f b/scipy-0.10.1/scipy/special/toms/wofz.f deleted file mode 100644 index a668df181a..0000000000 --- a/scipy-0.10.1/scipy/special/toms/wofz.f +++ /dev/null @@ -1,214 +0,0 @@ -C ALGORITHM 680, COLLECTED ALGORITHMS FROM ACM. -C THIS WORK PUBLISHED IN TRANSACTIONS ON MATHEMATICAL SOFTWARE, -C VOL. 16, NO. 1, PP. 47. - SUBROUTINE WOFZ (XI, YI, U, V, FLAG) -C -C GIVEN A COMPLEX NUMBER Z = (XI,YI), THIS SUBROUTINE COMPUTES -C THE VALUE OF THE FADDEEVA-FUNCTION W(Z) = EXP(-Z**2)*ERFC(-I*Z), -C WHERE ERFC IS THE COMPLEX COMPLEMENTARY ERROR-FUNCTION AND I -C MEANS SQRT(-1). -C THE ACCURACY OF THE ALGORITHM FOR Z IN THE 1ST AND 2ND QUADRANT -C IS 14 SIGNIFICANT DIGITS; IN THE 3RD AND 4TH IT IS 13 SIGNIFICANT -C DIGITS OUTSIDE A CIRCULAR REGION WITH RADIUS 0.126 AROUND A ZERO -C OF THE FUNCTION. -C ALL REAL VARIABLES IN THE PROGRAM ARE DOUBLE PRECISION. -C -C -C THE CODE CONTAINS A FEW COMPILER-DEPENDENT PARAMETERS : -C RMAXREAL = THE MAXIMUM VALUE OF RMAXREAL EQUALS THE ROOT OF -C RMAX = THE LARGEST NUMBER WHICH CAN STILL BE -C IMPLEMENTED ON THE COMPUTER IN DOUBLE PRECISION -C FLOATING-POINT ARITHMETIC -C RMAXEXP = LN(RMAX) - LN(2) -C RMAXGONI = THE LARGEST POSSIBLE ARGUMENT OF A DOUBLE PRECISION -C GONIOMETRIC FUNCTION (DCOS, DSIN, ...) -C THE REASON WHY THESE PARAMETERS ARE NEEDED AS THEY ARE DEFINED WILL -C BE EXPLAINED IN THE CODE BY MEANS OF COMMENTS -C -C -C PARAMETER LIST -C XI = REAL PART OF Z -C YI = IMAGINARY PART OF Z -C U = REAL PART OF W(Z) -C V = IMAGINARY PART OF W(Z) -C FLAG = AN ERROR FLAG INDICATING WHETHER OVERFLOW WILL -C OCCUR OR NOT; TYPE LOGICAL; -C THE VALUES OF THIS VARIABLE HAVE THE FOLLOWING -C MEANING : -C FLAG=.FALSE. : NO ERROR CONDITION -C FLAG=.TRUE. : OVERFLOW WILL OCCUR, THE ROUTINE -C BECOMES INACTIVE -C XI, YI ARE THE INPUT-PARAMETERS -C U, V, FLAG ARE THE OUTPUT-PARAMETERS -C -C FURTHERMORE THE PARAMETER FACTOR EQUALS 2/SQRT(PI) -C -C THE ROUTINE IS NOT UNDERFLOW-PROTECTED BUT ANY VARIABLE CAN BE -C PUT TO 0 UPON UNDERFLOW; -C -C REFERENCE - GPM POPPE, CMJ WIJERS; MORE EFFICIENT COMPUTATION OF -C THE COMPLEX ERROR-FUNCTION, ACM TRANS. MATH. SOFTWARE. -C -* -* -* -* - IMPLICIT DOUBLE PRECISION (A-H, O-Z) -* - LOGICAL A, B, FLAG - PARAMETER (FACTOR = 1.12837916709551257388D0, - * RMAXREAL = 0.5D+154, - * RMAXEXP = 708.503061461606D0, - * RMAXGONI = 3.53711887601422D+15) -* - FLAG = .FALSE. -* - XABS = DABS(XI) - YABS = DABS(YI) - X = XABS/6.3 - Y = YABS/4.4 -* -C -C THE FOLLOWING IF-STATEMENT PROTECTS -C QRHO = (X**2 + Y**2) AGAINST OVERFLOW -C - IF ((XABS.GT.RMAXREAL).OR.(YABS.GT.RMAXREAL)) GOTO 100 -* - QRHO = X**2 + Y**2 -* - XABSQ = XABS**2 - XQUAD = XABSQ - YABS**2 - YQUAD = 2*XABS*YABS -* - A = QRHO.LT.0.085264D0 -* - IF (A) THEN -C -C IF (QRHO.LT.0.085264D0) THEN THE FADDEEVA-FUNCTION IS EVALUATED -C USING A POWER-SERIES (ABRAMOWITZ/STEGUN, EQUATION (7.1.5), P.297) -C N IS THE MINIMUM NUMBER OF TERMS NEEDED TO OBTAIN THE REQUIRED -C ACCURACY -C - QRHO = (1-0.85*Y)*DSQRT(QRHO) - N = IDNINT(6 + 72*QRHO) - J = 2*N+1 - XSUM = 1.0/J - YSUM = 0.0D0 - DO 10 I=N, 1, -1 - J = J - 2 - XAUX = (XSUM*XQUAD - YSUM*YQUAD)/I - YSUM = (XSUM*YQUAD + YSUM*XQUAD)/I - XSUM = XAUX + 1.0/J - 10 CONTINUE - U1 = -FACTOR*(XSUM*YABS + YSUM*XABS) + 1.0 - V1 = FACTOR*(XSUM*XABS - YSUM*YABS) - DAUX = DEXP(-XQUAD) - U2 = DAUX*DCOS(YQUAD) - V2 = -DAUX*DSIN(YQUAD) -* - U = U1*U2 - V1*V2 - V = U1*V2 + V1*U2 -* - ELSE -C -C IF (QRHO.GT.1.O) THEN W(Z) IS EVALUATED USING THE LAPLACE -C CONTINUED FRACTION -C NU IS THE MINIMUM NUMBER OF TERMS NEEDED TO OBTAIN THE REQUIRED -C ACCURACY -C -C IF ((QRHO.GT.0.085264D0).AND.(QRHO.LT.1.0)) THEN W(Z) IS EVALUATED -C BY A TRUNCATED TAYLOR EXPANSION, WHERE THE LAPLACE CONTINUED FRACTION -C IS USED TO CALCULATE THE DERIVATIVES OF W(Z) -C KAPN IS THE MINIMUM NUMBER OF TERMS IN THE TAYLOR EXPANSION NEEDED -C TO OBTAIN THE REQUIRED ACCURACY -C NU IS THE MINIMUM NUMBER OF TERMS OF THE CONTINUED FRACTION NEEDED -C TO CALCULATE THE DERIVATIVES WITH THE REQUIRED ACCURACY -C -* - IF (QRHO.GT.1.0) THEN - H = 0.0D0 - KAPN = 0 - QRHO = DSQRT(QRHO) - NU = IDINT(3 + (1442/(26*QRHO+77))) - ELSE - QRHO = (1-Y)*DSQRT(1-QRHO) - H = 1.88*QRHO - H2 = 2*H - KAPN = IDNINT(7 + 34*QRHO) - NU = IDNINT(16 + 26*QRHO) - ENDIF -* - B = (H.GT.0.0) -* - IF (B) QLAMBDA = H2**KAPN -* - RX = 0.0 - RY = 0.0 - SX = 0.0 - SY = 0.0 -* - DO 11 N=NU, 0, -1 - NP1 = N + 1 - TX = YABS + H + NP1*RX - TY = XABS - NP1*RY - C = 0.5/(TX**2 + TY**2) - RX = C*TX - RY = C*TY - IF ((B).AND.(N.LE.KAPN)) THEN - TX = QLAMBDA + SX - SX = RX*TX - RY*SY - SY = RY*TX + RX*SY - QLAMBDA = QLAMBDA/H2 - ENDIF - 11 CONTINUE -* - IF (H.EQ.0.0) THEN - U = FACTOR*RX - V = FACTOR*RY - ELSE - U = FACTOR*SX - V = FACTOR*SY - END IF -* - IF (YABS.EQ.0.0) U = DEXP(-XABS**2) -* - END IF -* -* -C -C EVALUATION OF W(Z) IN THE OTHER QUADRANTS -C -* - IF (YI.LT.0.0) THEN -* - IF (A) THEN - U2 = 2*U2 - V2 = 2*V2 - ELSE - XQUAD = -XQUAD -* -C -C THE FOLLOWING IF-STATEMENT PROTECTS 2*EXP(-Z**2) -C AGAINST OVERFLOW -C - IF ((YQUAD.GT.RMAXGONI).OR. - * (XQUAD.GT.RMAXEXP)) GOTO 100 -* - W1 = 2*DEXP(XQUAD) - U2 = W1*DCOS(YQUAD) - V2 = -W1*DSIN(YQUAD) - END IF -* - U = U2 - U - V = V2 - V - IF (XI.GT.0.0) V = -V - ELSE - IF (XI.LT.0.0) V = -V - END IF -* - RETURN -* - 100 FLAG = .TRUE. - RETURN -* - END diff --git a/scipy-0.10.1/scipy/special/toms_wrappers.c b/scipy-0.10.1/scipy/special/toms_wrappers.c deleted file mode 100644 index c9c9f00a6d..0000000000 --- a/scipy-0.10.1/scipy/special/toms_wrappers.c +++ /dev/null @@ -1,35 +0,0 @@ -/* This file is a collection (more can be added) of wrappers around some - * ToMS Fortran algorithm, so that they can be called from - * cephesmodule.so - */ - -#include "toms_wrappers.h" -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif -/* This must be linked with fortran - */ -extern void F_FUNC(wofz,WOFZ)(double*,double*,double*,double*,int*); - -Py_complex cwofz_wrap( Py_complex z) { - int errflag; - Py_complex cy; - - F_FUNC(wofz,WOFZ)(CADDR(z), CADDR(cy), &errflag); - if (errflag==1) mtherr("wofz:",3); /* wofz returns a single flag both - for real overflows and for domain - errors -- internal overflows from too - large abs(z)*/ - return cy; -} - diff --git a/scipy-0.10.1/scipy/special/toms_wrappers.h b/scipy-0.10.1/scipy/special/toms_wrappers.h deleted file mode 100644 index 6aad6d81f0..0000000000 --- a/scipy-0.10.1/scipy/special/toms_wrappers.h +++ /dev/null @@ -1,18 +0,0 @@ -/* This file is a collection of wrappers around the - * Amos Fortran library of functions that take complex - * variables (see www.netlib.org) so that they can be called from - * the cephes library of corresponding name but work with complex - * arguments. - */ - -#ifndef _TOMS_WRAPPERS_H -#define _TOMS_WRAPPERS_H -#ifndef _AMOS_WRAPPERS_H -#include "Python.h" -#include "cephes/mconf.h" - -#define CADDR(z) (double *)&z.real, (double*)&z.imag -#endif /*_AMOS */ -extern Py_complex cwofz_wrap(Py_complex z); - -#endif diff --git a/scipy-0.10.1/scipy/special/ufunc_extras.c b/scipy-0.10.1/scipy/special/ufunc_extras.c deleted file mode 100644 index bf8e611d81..0000000000 --- a/scipy-0.10.1/scipy/special/ufunc_extras.c +++ /dev/null @@ -1,593 +0,0 @@ -#define NO_IMPORT_ARRAY -#include "ufunc_extras.h" - -extern void PyUFunc_f_ff_As_d_dd(char **args, intp *dimensions, intp *steps, void *func) { - int i, is1=steps[0],os1=steps[1],os2=steps[2]; - char *ip1=args[0], *op1=args[1], *op2=args[2]; - intp n=dimensions[0]; - double to1, to2; - - for(i=0; i attached on the 'right'.\n""" - - source = asarray(source) - if len(source.shape)==1: - width = 1 - source = np.resize(source,[source.shape[0],width]) - else: - width = source.shape[1] - for addon in args: - if len(addon.shape)==1: - width = 1 - addon = np.resize(addon,[source.shape[0],width]) - else: - width = source.shape[1] - if len(addon) < len(source): - addon = np.resize(addon,[source.shape[0],addon.shape[1]]) - elif len(source) < len(addon): - source = np.resize(source,[addon.shape[0],source.shape[1]]) - source = np.concatenate((source,addon),1) - return source - - -def unique(inarray): - """Returns unique items in the FIRST dimension of the passed array. Only - works on arrays NOT including string items (e.g., type 'O' or 'c'). - """ - inarray = asarray(inarray) - uniques = np.array([inarray[0]]) - if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY - for item in inarray[1:]: - if np.add.reduce(np.equal(uniques,item).flat) == 0: - try: - uniques = np.concatenate([uniques,np.array[np.newaxis,:]]) - except TypeError: - uniques = np.concatenate([uniques,np.array([item])]) - else: # IT MUST BE A 2+D ARRAY - if inarray.dtype.char != 'O': # not an Object array - for item in inarray[1:]: - if not np.sum(np.alltrue(np.equal(uniques,item),1),axis=0): - try: - uniques = np.concatenate( [uniques,item[np.newaxis,:]] ) - except TypeError: # the item to add isn't a list - uniques = np.concatenate([uniques,np.array([item])]) - else: - pass # this item is already in the uniques array - else: # must be an Object array, alltrue/equal functions don't work - for item in inarray[1:]: - newflag = 1 - for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=> - test = np.sum(abs(np.array(map(cmp,item,unq))),axis=0) - if test == 0: # if item identical to any 1 row in uniques - newflag = 0 # then not a novel item to add - break - if newflag == 1: - try: - uniques = np.concatenate( [uniques,item[np.newaxis,:]] ) - except TypeError: # the item to add isn't a list - uniques = np.concatenate([uniques,np.array([item])]) - return uniques - -def colex(a, indices, axis=1): - """\nExtracts specified indices (a list) from passed array, along passed - axis (column extraction is default). BEWARE: A 1D array is presumed to be a - column-array (and that the whole array will be returned as a column). - - Returns: the columns of a specified by indices\n""" - - if type(indices) not in [ListType,TupleType,np.ndarray]: - indices = [indices] - if len(np.shape(a)) == 1: - cols = np.resize(a,[a.shape[0],1]) - else: - cols = np.take(a,indices,axis) - return cols - -def adm(a, criterion): - """\nReturns rows from the passed list of lists that meet the criteria in -the passed criterion expression (a string). - -Format: adm (a,criterion) where criterion is like 'x[2]==37'\n""" - - lines = eval('filter(lambda x: '+criterion+',a)') - try: - lines = np.array(lines) - except: - lines = np.array(lines,'O') - return lines - - -def linexand(a, columnlist, valuelist): - """Returns the rows of an array where col (from columnlist) = val - (from valuelist). One value is required for each column in columnlist. - - Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i\n""" - - a = asarray(a) - if type(columnlist) not in [ListType,TupleType,np.ndarray]: - columnlist = [columnlist] - if type(valuelist) not in [ListType,TupleType,np.ndarray]: - valuelist = [valuelist] - criterion = '' - for i in range(len(columnlist)): - if type(valuelist[i])==StringType: - critval = '\'' + valuelist[i] + '\'' - else: - critval = str(valuelist[i]) - criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and' - criterion = criterion[0:-3] # remove the "and" after the last crit - return adm(a,criterion) - - -def collapse(a, keepcols, collapsecols, stderr=0, ns=0, cfcn=None): - """Averages data in collapsecol, keeping all unique items in keepcols - (using unique, which keeps unique LISTS of column numbers), retaining - the unique sets of values in keepcols, the mean for each. If the sterr or - N of the mean are desired, set either or both parameters to 1. - - Returns: unique 'conditions' specified by the contents of columns specified - by keepcols, abutted with the mean(s,axis=0) of column(s) specified by - collapsecols - - Examples - -------- - - import numpy as np - from scipy import stats - - xx = np.array([[ 0., 0., 1.], - [ 1., 1., 1.], - [ 2., 2., 1.], - [ 0., 3., 1.], - [ 1., 4., 1.], - [ 2., 5., 1.], - [ 0., 6., 1.], - [ 1., 7., 1.], - [ 2., 8., 1.], - [ 0., 9., 1.]]) - - >>> stats._support.collapse(xx, (0), (1,2), stderr=0, ns=0, cfcn=None) - array([[ 0. , 4.5, 1. ], - [ 0. , 4.5, 1. ], - [ 1. , 4. , 1. ], - [ 1. , 4. , 1. ], - [ 2. , 5. , 1. ], - [ 2. , 5. , 1. ]]) - >>> stats._support.collapse(xx, (0), (1,2), stderr=1, ns=1, cfcn=None) - array([[ 0. , 4.5 , 1.93649167, 4. , 1. , - 0. , 4. ], - [ 0. , 4.5 , 1.93649167, 4. , 1. , - 0. , 4. ], - [ 1. , 4. , 1.73205081, 3. , 1. , - 0. , 3. ], - [ 1. , 4. , 1.73205081, 3. , 1. , - 0. , 3. ], - [ 2. , 5. , 1.73205081, 3. , 1. , - 0. , 3. ], - [ 2. , 5. , 1.73205081, 3. , 1. , - 0. , 3. ]]) - - """ - if cfcn is None: - cfcn = lambda(x): np.mean(x, axis=0) - a = asarray(a) - if keepcols == []: - avgcol = colex(a,collapsecols) - means = cfcn(avgcol) - return means - else: - if type(keepcols) not in [ListType,TupleType,np.ndarray]: - keepcols = [keepcols] - values = colex(a,keepcols) # so that "item" can be appended (below) - uniques = unique(values).tolist() # get a LIST, so .sort keeps rows intact - uniques.sort() - newlist = [] - for item in uniques: - if type(item) not in [ListType,TupleType,np.ndarray]: - item =[item] - tmprows = linexand(a,keepcols,item) - for col in collapsecols: - avgcol = colex(tmprows,col) - item.append(cfcn(avgcol)) - if stderr: - if len(avgcol)>1: - item.append(compute_stderr(avgcol)) - else: - item.append('N/A') - if ns: - item.append(len(avgcol)) - newlist.append(item) - try: - new_a = np.array(newlist) - except TypeError: - new_a = np.array(newlist,'O') - return new_a - -def _chk_asarray(a, axis): - if axis is None: - a = np.ravel(a) - outaxis = 0 - else: - a = np.asarray(a) - outaxis = axis - return a, outaxis - -def _chk2_asarray(a, b, axis): - if axis is None: - a = np.ravel(a) - b = np.ravel(b) - outaxis = 0 - else: - a = np.asarray(a) - b = np.asarray(b) - outaxis = axis - return a, b, outaxis - -def compute_stderr(a, axis=0, ddof=1): - a, axis = _chk_asarray(a, axis) - return np.std(a,axis,ddof=1) / float(np.sqrt(a.shape[axis])) diff --git a/scipy-0.10.1/scipy/stats/bento.info b/scipy-0.10.1/scipy/stats/bento.info deleted file mode 100644 index 220e924838..0000000000 --- a/scipy-0.10.1/scipy/stats/bento.info +++ /dev/null @@ -1,16 +0,0 @@ -HookFile: bscript - -Library: - CompiledLibrary: statlibimp - Sources: - statlib/ansari.f, - statlib/spearman.f, - statlib/swilk.f - Extension: statlib - Sources: statlib.pyf - Extension: futil - Sources: futil.f - Extension: mvn - Sources: mvn.pyf, mvndst.f - Extension: vonmises_cython - Sources: vonmises_cython.c diff --git a/scipy-0.10.1/scipy/stats/bscript b/scipy-0.10.1/scipy/stats/bscript deleted file mode 100644 index 30ce213465..0000000000 --- a/scipy-0.10.1/scipy/stats/bscript +++ /dev/null @@ -1,27 +0,0 @@ -from bento.commands import hooks - -@hooks.pre_build -def pre_build(context): - default_builder = context.default_builder - - def builder(extension): - return default_builder(extension, features="c cstlib pyext bento") - context.register_compiled_library_builder("statlibimp", builder) - - def builder(extension): - return default_builder(extension, - features="c cshlib pyext bento f2py", - use="statlibimp CLIB") - context.register_builder("statlib", builder) - - def builder(extension): - return default_builder(extension, - features="c cshlib pyext bento f2py", - use="CLIB") - context.register_builder("mvn", builder) - - def builder(extension): - return default_builder(extension, - features="c cshlib pyext bento f2py f2py_fortran", - use="CLIB") - context.register_builder("futil", builder) diff --git a/scipy-0.10.1/scipy/stats/contingency.py b/scipy-0.10.1/scipy/stats/contingency.py deleted file mode 100644 index c06439c9b6..0000000000 --- a/scipy-0.10.1/scipy/stats/contingency.py +++ /dev/null @@ -1,249 +0,0 @@ -"""Some functions for working with contingency tables (i.e. cross tabulations). -""" - -# Author: Warren Weckesser, Enthought, Inc. - -import numpy as np -from scipy import special - - -__all__ = ['margins', 'expected_freq', 'chi2_contingency'] - - -def margins(a): - """Return a list of the marginal sums of the array `a`. - - Parameters - ---------- - a : ndarray - The array for which to compute the marginal sums. - - Return Value - ------------ - margsums : list of ndarrays - A list of length `a.ndim`. `margsums[k]` is the result - of summing `a` over all axes except `k`; it has the same - number of dimensions as `a`, but the length of each axis - except axis `k` will be 1. - - Examples - -------- - >>> a = np.arange(12).reshape(2, 6) - >>> a - array([[ 0, 1, 2, 3, 4, 5], - [ 6, 7, 8, 9, 10, 11]]) - >>> m0, m1 = margins(a) - >>> m0 - array([[15], - [51]]) - >>> m1 - array([[ 6, 8, 10, 12, 14, 16]]) - - >>> b = np.arange(24).reshape(2,3,4) - >>> m0, m1, m2 = margins(b) - >>> m0 - array([[[ 66]], - [[210]]]) - >>> m1 - array([[[ 60], - [ 92], - [124]]]) - >>> m2 - array([[[60, 66, 72, 78]]]) - """ - margsums = [] - ranged = range(a.ndim) - for k in ranged: - marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k]) - margsums.append(marg) - return margsums - - -def expected_freq(observed): - """Compute the expected frequencies from a contingency table. - - Given an n-dimensional contingency table of observed frequencies, - compute the expected frequencies for the table based on the marginal - sums under the assumption that the groups associated with each - dimension are independent. - - Parameters - ---------- - observed : array_like - The table of observed frequencies. (While this function can handle - a 1-D array, that case is trivial. Generally `observed` is at - least 2-D.) - - Returns - ------- - expected : ndarray of type numpy.float64, same shape as `observed`. - The expected frequencies, based on the marginal sums of the table. - - Examples - -------- - >>> observed = np.array([[10, 10, 20],[20, 20, 20]]) - >>> expected_freq(observed) - array([[ 12., 12., 16.], - [ 18., 18., 24.]]) - """ - # Typically `observed` is an integer array. If `observed` has a large - # number of dimensions or holds large values, some of the following - # computations may overflow, so we first switch to floating point. - observed = np.asarray(observed, dtype=np.float64) - - # Create a list of the marginal sums. - margsums = margins(observed) - - # Create the array of expected frequencies. The shapes of the - # marginal sums returned by apply_over_axes() are just what we - # need for broadcasting in the following product. - d = observed.ndim - expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1) - return expected - - -def chi2_contingency(observed, correction=True): - """Chi-square test of independence of variables in a contingency table. - - This function computes the chi-square statistic and p-value for the - hypothesis test of independence of the observed frequencies in the - contingency table [1]_ `observed`. The expected frequencies are computed - based on the marginal sums under the assumption of independence; - see scipy.stats.expected_freq. The number of degrees of freedom is - (expressed using numpy functions and attributes):: - - dof = observed.size - sum(observed.shape) + observed.ndim - 1 - - - Parameters - ---------- - observed : array_like - The contingency table. The table contains the observed frequencies - (i.e. number of occurrences) in each category. In the two-dimensional - case, the table is often described as an "R x C table". - correction : bool, optional - If True, *and* the degrees of freedom is 1, apply Yates' correction - for continuity. - - Returns - ------- - chi2 : float - The chi-square test statistic. Without the Yates' correction, this - is the sum of the squares of the observed values minus the expected - values, divided by the expected values. With Yates' correction, - 0.5 is subtracted from the squared differences before dividing by - the expected values. - p : float - The p-value of the test - dof : int - Degrees of freedom - expected : ndarray, same shape as `observed` - The expected frequencies, based on the marginal sums of the table. - - See Also - -------- - contingency.expected_freq - fisher_exact - chisquare - - Notes - ----- - An often quoted guideline for the validity of this calculation is that - the test should be used only if the observed and expected frequency in - each cell is at least 5. - - This is a test for the independence of different categories of a - population. The test is only meaningful when the dimension of - `observed` is two or more. Applying the test to a one-dimensional - table will always result in `expected` equal to `observed` and a - chi-square statistic equal to 0. - - This function does not handle masked arrays, because the calculation - does not make sense with missing values. - - Like stats.chisquare, this function computes a chi-square statistic; - the convenience this function provides is to figure out the expected - frequencies and degrees of freedom from the given contingency table. - If these were already known, and if the Yates' correction was not - required, one could use stats.chisquare. That is, if one calls:: - - chi2, p, dof, ex = chi2_contingency(obs, correction=False) - - then the following is true:: - - (chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(), - ddof=obs.size - 1 - dof) - - References - ---------- - .. [1] http://en.wikipedia.org/wiki/Contingency_table - - Examples - -------- - A two-way example (2 x 3): - - >>> obs = np.array([[10, 10, 20], [20, 20, 20]]) - >>> chi2_contingency(obs) - (2.7777777777777777, - 0.24935220877729619, - 2, - array([[ 12., 12., 16.], - [ 18., 18., 24.]])) - - A four-way example (2 x 2 x 2 x 2): - - >>> obs = np.array( - ... [[[[12, 17], - ... [11, 16]], - ... [[11, 12], - ... [15, 16]]], - ... [[[23, 15], - ... [30, 22]], - ... [[14, 17], - ... [15, 16]]]]) - >>> chi2_contingency(obs) - (8.7584514426741897, - 0.64417725029295503, - 11, - array([[[[ 14.15462386, 14.15462386], - [ 16.49423111, 16.49423111]], - [[ 11.2461395 , 11.2461395 ], - [ 13.10500554, 13.10500554]]], - [[[ 19.5591166 , 19.5591166 ], - [ 22.79202844, 22.79202844]], - [[ 15.54012004, 15.54012004], - [ 18.10873492, 18.10873492]]]])) - """ - observed = np.asarray(observed) - if np.any(observed < 0): - raise ValueError("All values in `observed` must be nonnegative.") - if observed.size == 0: - raise ValueError("No data; `observed` has size 0.") - - expected = expected_freq(observed) - if np.any(expected == 0): - # Include one of the positions where expected is zero in - # the exception message. - zeropos = list(np.where(expected == 0)[0]) - raise ValueError("The internally computed table of expected " - "frequencies has a zero element at %s." % zeropos) - - # The degrees of freedom - dof = expected.size - sum(expected.shape) + expected.ndim - 1 - - if dof == 0: - # Degenerate case; this occurs when `observed` is 1D (or, more - # generally, when it has only one nontrivial dimension). In this - # case, we also have observed == expected, so chi2 is 0. - chi2 = 0.0 - p = 1.0 - else: - if dof == 1 and correction: - # Use Yates' correction for continuity. - chi2 = ((np.abs(observed - expected) - 0.5) ** 2 / expected).sum() - else: - # Regular chi-square--no correction. - chi2 = ((observed - expected) ** 2 / expected).sum() - p = special.chdtrc(dof, chi2) - - return chi2, p, dof, expected diff --git a/scipy-0.10.1/scipy/stats/distributions.py b/scipy-0.10.1/scipy/stats/distributions.py deleted file mode 100644 index 7f70474511..0000000000 --- a/scipy-0.10.1/scipy/stats/distributions.py +++ /dev/null @@ -1,7020 +0,0 @@ -# Functions to implement several important functions for -# various Continous and Discrete Probability Distributions -# -# Author: Travis Oliphant 2002-2011 with contributions from -# SciPy Developers 2004-2011 -# - -import math -import warnings -from copy import copy - -from scipy.misc import comb, derivative -from scipy import special -from scipy import optimize -from scipy import integrate -from scipy.special import gammaln as gamln - -import inspect -from numpy import alltrue, where, arange, putmask, \ - ravel, take, ones, sum, shape, product, repeat, reshape, \ - zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \ - arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array, log1p, expm1 -from numpy import atleast_1d, polyval, ceil, place, extract, \ - any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \ - power, NINF, empty -import numpy -import numpy as np -import numpy.random as mtrand -from numpy import flatnonzero as nonzero -import vonmises_cython - -__all__ = [ - 'rv_continuous', - 'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine', - 'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy', - 'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang', - 'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy', - 'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l', - 'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme', - 'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r', - 'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant', - 'gausshyper', 'invgamma', 'invgauss', 'invweibull', - 'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l', - 'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm', - 'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't', - 'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm', - 'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss', - 'semicircular', 'triang', 'truncexpon', 'truncnorm', - 'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy', - 'entropy', 'rv_discrete', - 'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom', 'logser', - 'poisson', 'planck', 'boltzmann', 'randint', 'zipf', 'dlaplace', - 'skellam' - ] - -floatinfo = numpy.finfo(float) - -errp = special.errprint -arr = asarray -gam = special.gamma - -import types -from scipy.misc import doccer -all = alltrue -sgf = vectorize - -try: - from new import instancemethod -except ImportError: - # Python 3 - def instancemethod(func, obj, cls): - return types.MethodType(func, obj) - - -# These are the docstring parts used for substitution in specific -# distribution docstrings. - -docheaders = {'methods':"""\nMethods\n-------\n""", - 'parameters':"""\nParameters\n---------\n""", - 'notes':"""\nNotes\n-----\n""", - 'examples':"""\nExamples\n--------\n"""} - -_doc_rvs = \ -"""rvs(%(shapes)s, loc=0, scale=1, size=1) - Random variates. -""" -_doc_pdf = \ -"""pdf(x, %(shapes)s, loc=0, scale=1) - Probability density function. -""" -_doc_logpdf = \ -"""logpdf(x, %(shapes)s, loc=0, scale=1) - Log of the probability density function. -""" -_doc_pmf = \ -"""pmf(x, %(shapes)s, loc=0, scale=1) - Probability mass function. -""" -_doc_logpmf = \ -"""logpmf(x, %(shapes)s, loc=0, scale=1) - Log of the probability mass function. -""" -_doc_cdf = \ -"""cdf(x, %(shapes)s, loc=0, scale=1) - Cumulative density function. -""" -_doc_logcdf = \ -"""logcdf(x, %(shapes)s, loc=0, scale=1) - Log of the cumulative density function. -""" -_doc_sf = \ -"""sf(x, %(shapes)s, loc=0, scale=1) - Survival function (1-cdf --- sometimes more accurate). -""" -_doc_logsf = \ -"""logsf(x, %(shapes)s, loc=0, scale=1) - Log of the survival function. -""" -_doc_ppf = \ -"""ppf(q, %(shapes)s, loc=0, scale=1) - Percent point function (inverse of cdf --- percentiles). -""" -_doc_isf = \ -"""isf(q, %(shapes)s, loc=0, scale=1) - Inverse survival function (inverse of sf). -""" -_doc_moment = \ -"""moment(n, %(shapes)s, loc=0, scale=1) - Non-central moment of order n -""" -_doc_stats = \ -"""stats(%(shapes)s, loc=0, scale=1, moments='mv') - Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). -""" -_doc_entropy = \ -"""entropy(%(shapes)s, loc=0, scale=1) - (Differential) entropy of the RV. -""" -_doc_fit = \ -"""fit(data, %(shapes)s, loc=0, scale=1) - Parameter estimates for generic data. -""" -_doc_expect = \ -"""expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) - Expected value of a function (of one argument) with respect to the distribution. -""" -_doc_expect_discrete = \ -"""expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False) - Expected value of a function (of one argument) with respect to the distribution. -""" -_doc_median = \ -"""median(%(shapes)s, loc=0, scale=1) - Median of the distribution. -""" -_doc_mean = \ -"""mean(%(shapes)s, loc=0, scale=1) - Mean of the distribution. -""" -_doc_var = \ -"""var(%(shapes)s, loc=0, scale=1) - Variance of the distribution. -""" -_doc_std = \ -"""std(%(shapes)s, loc=0, scale=1) - Standard deviation of the distribution. -""" -_doc_interval = \ -"""interval(alpha, %(shapes)s, loc=0, scale=1) - Endpoints of the range that contains alpha percent of the distribution -""" -_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, - _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, - _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, - _doc_stats, _doc_entropy, _doc_fit, - _doc_expect, _doc_median, - _doc_mean, _doc_var, _doc_std, _doc_interval]) - -# Note that the two lines for %(shapes) are searched for and replaced in -# rv_continuous and rv_discrete - update there if the exact string changes -_doc_default_callparams = \ -""" -Parameters ----------- -x : array_like - quantiles -q : array_like - lower or upper tail probability -%(shapes)s : array_like - shape parameters -loc : array_like, optional - location parameter (default=0) -scale : array_like, optional - scale parameter (default=1) -size : int or tuple of ints, optional - shape of random variates (default computed from input arguments ) -moments : str, optional - composed of letters ['mvsk'] specifying which moments to compute where - 'm' = mean, 'v' = variance, 's' = (Fisher's) skew and - 'k' = (Fisher's) kurtosis. (default='mv') -""" -_doc_default_longsummary = \ -"""Continuous random variables are defined from a standard form and may -require some shape parameters to complete its specification. Any -optional keyword parameters can be passed to the methods of the RV -object as given below: -""" -_doc_default_frozen_note = \ -""" -Alternatively, the object may be called (as a function) to fix the shape, -location, and scale parameters returning a "frozen" continuous RV object: - -rv = %(name)s(%(shapes)s, loc=0, scale=1) - - Frozen RV object with the same methods but holding the given shape, - location, and scale fixed. -""" -_doc_default_example = \ -"""Examples --------- ->>> from scipy.stats import %(name)s ->>> numargs = %(name)s.numargs ->>> [ %(shapes)s ] = [0.9,] * numargs ->>> rv = %(name)s(%(shapes)s) - -Display frozen pdf - ->>> x = np.linspace(0, np.minimum(rv.dist.b, 3)) ->>> h = plt.plot(x, rv.pdf(x)) - -Check accuracy of cdf and ppf - ->>> prb = %(name)s.cdf(x, %(shapes)s) ->>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20) - -Random number generation - ->>> R = %(name)s.rvs(%(shapes)s, size=100) -""" - -_doc_default = ''.join([_doc_default_longsummary, - _doc_allmethods, - _doc_default_callparams, - _doc_default_frozen_note, - _doc_default_example]) - -_doc_default_before_notes = ''.join([_doc_default_longsummary, - _doc_allmethods, - _doc_default_callparams, - _doc_default_frozen_note]) - -docdict = {'rvs':_doc_rvs, - 'pdf':_doc_pdf, - 'logpdf':_doc_logpdf, - 'cdf':_doc_cdf, - 'logcdf':_doc_logcdf, - 'sf':_doc_sf, - 'logsf':_doc_logsf, - 'ppf':_doc_ppf, - 'isf':_doc_isf, - 'stats':_doc_stats, - 'entropy':_doc_entropy, - 'fit':_doc_fit, - 'moment':_doc_moment, - 'expect':_doc_expect, - 'interval':_doc_interval, - 'mean':_doc_mean, - 'std':_doc_std, - 'var':_doc_var, - 'median':_doc_median, - 'allmethods':_doc_allmethods, - 'callparams':_doc_default_callparams, - 'longsummary':_doc_default_longsummary, - 'frozennote':_doc_default_frozen_note, - 'example':_doc_default_example, - 'default':_doc_default, - 'before_notes':_doc_default_before_notes} - -# Reuse common content between continous and discrete docs, change some -# minor bits. -docdict_discrete = docdict.copy() - -docdict_discrete['pmf'] = _doc_pmf -docdict_discrete['logpmf'] = _doc_logpmf -docdict_discrete['expect'] = _doc_expect_discrete -_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', - 'ppf', 'isf', 'stats', 'entropy', 'fit', 'expect', 'median', - 'mean', 'var', 'std', 'interval'] -for obj in _doc_disc_methods: - docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') -docdict_discrete.pop('pdf') -docdict_discrete.pop('logpdf') - -_doc_allmethods = ''.join([docdict_discrete[obj] for obj in - _doc_disc_methods]) -docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods - -docdict_discrete['longsummary'] = _doc_default_longsummary.replace(\ - 'Continuous', 'Discrete') -_doc_default_frozen_note = \ -""" -Alternatively, the object may be called (as a function) to fix the shape and -location parameters returning a "frozen" discrete RV object: - -rv = %(name)s(%(shapes)s, loc=0) - - Frozen RV object with the same methods but holding the given shape and - location fixed. -""" -docdict_discrete['frozennote'] = _doc_default_frozen_note - -docdict_discrete['example'] = _doc_default_example.replace('[0.9,]', - 'Replace with reasonable value') - -_doc_default_before_notes = ''.join([docdict_discrete['longsummary'], - docdict_discrete['allmethods'], - docdict_discrete['callparams'], - docdict_discrete['frozennote']]) -docdict_discrete['before_notes'] = _doc_default_before_notes - -_doc_default_disc = ''.join([docdict_discrete['longsummary'], - docdict_discrete['allmethods'], - docdict_discrete['frozennote'], - docdict_discrete['example']]) -docdict_discrete['default'] = _doc_default_disc - - -# clean up all the separate docstring elements, we do not need them anymore -for obj in [s for s in dir() if s.startswith('_doc_')]: - exec('del ' + obj) -del obj -try: - del s -except NameError: - # in Python 3, loop variables are not visible after the loop - pass - - -def _moment(data, n, mu=None): - if mu is None: - mu = data.mean() - return ((data - mu)**n).mean() - -def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): - if (n==0): - return 1.0 - elif (n==1): - if mu is None: - val = moment_func(1,*args) - else: - val = mu - elif (n==2): - if mu2 is None or mu is None: - val = moment_func(2,*args) - else: - val = mu2 + mu*mu - elif (n==3): - if g1 is None or mu2 is None or mu is None: - val = moment_func(3,*args) - else: - mu3 = g1*(mu2**1.5) # 3rd central moment - val = mu3+3*mu*mu2+mu**3 # 3rd non-central moment - elif (n==4): - if g1 is None or g2 is None or mu2 is None or mu is None: - val = moment_func(4,*args) - else: - mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment - mu3 = g1*(mu2**1.5) # 3rd central moment - val = mu4+4*mu*mu3+6*mu*mu*mu2+mu**4 - else: - val = moment_func(n, *args) - - return val - - -def _skew(data): - data = np.ravel(data) - mu = data.mean() - m2 = ((data - mu)**2).mean() - m3 = ((data - mu)**3).mean() - return m3 / m2**1.5 - -def _kurtosis(data): - data = np.ravel(data) - mu = data.mean() - m2 = ((data - mu)**2).mean() - m4 = ((data - mu)**4).mean() - return m4 / m2**2 - 3 - - - -def _build_random_array(fun, args, size=None): -# Build an array by applying function fun to -# the arguments in args, creating an array with -# the specified shape. -# Allows an integer shape n as a shorthand for (n,). - if isinstance(size, types.IntType): - size = [size] - if size is not None and len(size) != 0: - n = numpy.multiply.reduce(size) - s = apply(fun, args + (n,)) - s.shape = size - return s - else: - n = 1 - s = apply(fun, args + (n,)) - return s[0] - -random = mtrand.random_sample -rand = mtrand.rand -random_integers = mtrand.random_integers -permutation = mtrand.permutation - -## Internal class to compute a ppf given a distribution. -## (needs cdf function) and uses brentq from scipy.optimize -## to compute ppf from cdf. -class general_cont_ppf(object): - def __init__(self, dist, xa=-10.0, xb=10.0, xtol=1e-14): - self.dist = dist - self.cdf = eval('%scdf'%dist) - self.xa = xa - self.xb = xb - self.xtol = xtol - self.vecfunc = sgf(self._single_call,otypes='d') - def _tosolve(self, x, q, *args): - return apply(self.cdf, (x, )+args) - q - def _single_call(self, q, *args): - return optimize.brentq(self._tosolve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol) - def __call__(self, q, *args): - return self.vecfunc(q, *args) - - -# Frozen RV class -class rv_frozen(object): - - def __init__(self, dist, *args, **kwds): - self.args = args - self.kwds = kwds - self.dist = dist - - def pdf(self, x): #raises AttributeError in frozen discrete distribution - return self.dist.pdf(x, *self.args, **self.kwds) - - def logpdf(self, x): - return self.dist.logpdf(x, *self.args, **self.kwds) - - def cdf(self, x): - return self.dist.cdf(x, *self.args, **self.kwds) - - def logcdf(self, x): - return self.dist.logcdf(x, *self.args, **self.kwds) - - def ppf(self, q): - return self.dist.ppf(q, *self.args, **self.kwds) - - def isf(self, q): - return self.dist.isf(q, *self.args, **self.kwds) - - def rvs(self, size=None): - kwds = self.kwds.copy() - kwds.update({'size':size}) - return self.dist.rvs(*self.args, **kwds) - - def sf(self, x): - return self.dist.sf(x, *self.args, **self.kwds) - - def logsf(self, x): - return self.dist.logsf(x, *self.args, **self.kwds) - - def stats(self, moments='mv'): - kwds = self.kwds.copy() - kwds.update({'moments':moments}) - return self.dist.stats(*self.args, **kwds) - - def median(self): - return self.dist.median(*self.args, **self.kwds) - - def mean(self): - return self.dist.mean(*self.args, **self.kwds) - - def var(self): - return self.dist.var(*self.args, **self.kwds) - - def std(self): - return self.dist.std(*self.args, **self.kwds) - - def moment(self, n): - return self.dist.moment(n, *self.args, **self.kwds) - - def entropy(self): - return self.dist.entropy(*self.args, **self.kwds) - - def pmf(self,k): - return self.dist.pmf(k, *self.args, **self.kwds) - - def logpmf(self,k): - return self.dist.logpmf(k, *self.args, **self.kwds) - - def interval(self, alpha): - return self.dist.interval(alpha, *self.args, **self.kwds) - - - -## NANs are returned for unsupported parameters. -## location and scale parameters are optional for each distribution. -## The shape parameters are generally required -## -## The loc and scale parameters must be given as keyword parameters. -## These are related to the common symbols in the .lyx file - -## skew is third central moment / variance**(1.5) -## kurtosis is fourth central moment / variance**2 - 3 - - -## References:: - -## Documentation for ranlib, rv2, cdflib and -## -## Eric Wesstein's world of mathematics http://mathworld.wolfram.com/ -## http://mathworld.wolfram.com/topics/StatisticalDistributions.html -## -## Documentation to Regress+ by Michael McLaughlin -## -## Engineering and Statistics Handbook (NIST) -## http://www.itl.nist.gov/div898/handbook/index.htm -## -## Documentation for DATAPLOT from NIST -## http://www.itl.nist.gov/div898/software/dataplot/distribu.htm -## -## Norman Johnson, Samuel Kotz, and N. Balakrishnan "Continuous -## Univariate Distributions", second edition, -## Volumes I and II, Wiley & Sons, 1994. - - -## Each continuous random variable as the following methods -## -## rvs -- Random Variates (alternatively calling the class could produce these) -## pdf -- PDF -## logpdf -- log PDF (more numerically accurate if possible) -## cdf -- CDF -## logcdf -- log of CDF -## sf -- Survival Function (1-CDF) -## logsf --- log of SF -## ppf -- Percent Point Function (Inverse of CDF) -## isf -- Inverse Survival Function (Inverse of SF) -## stats -- Return mean, variance, (Fisher's) skew, or (Fisher's) kurtosis -## nnlf -- negative log likelihood function (to minimize) -## fit -- Model-fitting -## -## Maybe Later -## -## hf --- Hazard Function (PDF / SF) -## chf --- Cumulative hazard function (-log(SF)) -## psf --- Probability sparsity function (reciprocal of the pdf) in -## units of percent-point-function (as a function of q). -## Also, the derivative of the percent-point function. - -## To define a new random variable you subclass the rv_continuous class -## and re-define the -## -## _pdf method which will be given clean arguments (in between a and b) -## and passing the argument check method -## -## If postive argument checking is not correct for your RV -## then you will also need to re-define -## _argcheck - -## Correct, but potentially slow defaults exist for the remaining -## methods but for speed and/or accuracy you can over-ride -## -## _cdf, _ppf, _rvs, _isf, _sf -## -## Rarely would you override _isf and _sf but you could for numerical precision. -## -## Statistics are computed using numerical integration by default. -## For speed you can redefine this using -## -## _stats --- take shape parameters and return mu, mu2, g1, g2 -## --- If you can't compute one of these return it as None -## -## --- Can also be defined with a keyword argument moments= -## where is a string composed of 'm', 'v', 's', -## and/or 'k'. Only the components appearing in string -## should be computed and returned in the order 'm', 'v', -## 's', or 'k' with missing values returned as None -## -## OR -## -## You can override -## -## _munp -- takes n and shape parameters and returns -## -- then nth non-central moment of the distribution. -## - -def valarray(shape,value=nan,typecode=None): - """Return an array of all value. - """ - out = reshape(repeat([value],product(shape,axis=0),axis=0),shape) - if typecode is not None: - out = out.astype(typecode) - if not isinstance(out, ndarray): - out = arr(out) - return out - -# This should be rewritten -def argsreduce(cond, *args): - """Return the sequence of ravel(args[i]) where ravel(condition) is - True in 1D. - - Examples - -------- - >>> import numpy as np - >>> rand = np.random.random_sample - >>> A = rand((4,5)) - >>> B = 2 - >>> C = rand((1,5)) - >>> cond = np.ones(A.shape) - >>> [A1,B1,C1] = argsreduce(cond,A,B,C) - >>> B1.shape - (20,) - >>> cond[2,:] = 0 - >>> [A2,B2,C2] = argsreduce(cond,A,B,C) - >>> B2.shape - (15,) - - """ - newargs = atleast_1d(*args) - if not isinstance(newargs, list): - newargs = [newargs,] - expand_arr = (cond==cond) - return [extract(cond, arr1 * expand_arr) for arr1 in newargs] - -class rv_generic(object): - """Class which encapsulates common functionality between rv_discrete - and rv_continuous. - - """ - def _fix_loc_scale(self, args, loc, scale=1): - N = len(args) - if N > self.numargs: - if N == self.numargs + 1 and loc is None: - # loc is given without keyword - loc = args[-1] - if N == self.numargs + 2 and scale is None: - # loc and scale given without keyword - loc, scale = args[-2:] - args = args[:self.numargs] - if scale is None: - scale = 1.0 - if loc is None: - loc = 0.0 - return args, loc, scale - - def _fix_loc(self, args, loc): - args, loc, scale = self._fix_loc_scale(args, loc) - return args, loc - - # These are actually called, and should not be overwritten if you - # want to keep error checking. - def rvs(self,*args,**kwds): - """ - Random variates of given type. - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - size : int or tuple of ints, optional - defining number of random variates (default=1) - - Returns - ------- - rvs : array_like - random variates of given `size` - - """ - kwd_names = ['loc', 'scale', 'size', 'discrete'] - loc, scale, size, discrete = map(kwds.get, kwd_names, - [None]*len(kwd_names)) - - args, loc, scale = self._fix_loc_scale(args, loc, scale) - cond = logical_and(self._argcheck(*args),(scale >= 0)) - if not all(cond): - raise ValueError("Domain error in arguments.") - - # self._size is total size of all output values - self._size = product(size, axis=0) - if self._size is not None and self._size > 1: - size = numpy.array(size, ndmin=1) - - if np.all(scale == 0): - return loc*ones(size, 'd') - - vals = self._rvs(*args) - if self._size is not None: - vals = reshape(vals, size) - - vals = vals * scale + loc - - # Cast to int if discrete - if discrete: - if numpy.isscalar(vals): - vals = int(vals) - else: - vals = vals.astype(int) - - return vals - - def median(self, *args, **kwds): - """ - Median of the distribution. - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - median : float - the median of the distribution. - - See Also - -------- - self.ppf --- inverse of the CDF - """ - return self.ppf(0.5, *args, **kwds) - - def mean(self, *args, **kwds): - """ - Mean of the distribution - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - mean : float - the mean of the distribution - """ - kwds['moments'] = 'm' - res = self.stats(*args, **kwds) - if isinstance(res, ndarray) and res.ndim == 0: - return res[()] - return res - - def var(self, *args, **kwds): - """ - Variance of the distribution - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - var : float - the variance of the distribution - - """ - kwds['moments'] = 'v' - res = self.stats(*args, **kwds) - if isinstance(res, ndarray) and res.ndim == 0: - return res[()] - return res - - def std(self, *args, **kwds): - """ - Standard deviation of the distribution. - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - std : float - standard deviation of the distribution - - """ - kwds['moments'] = 'v' - res = sqrt(self.stats(*args, **kwds)) - return res - - def interval(self, alpha, *args, **kwds): - """Confidence interval with equal areas around the median - - Parameters - ---------- - alpha : array_like float in [0,1] - Probability that an rv will be drawn from the returned range - arg1, arg2, ... : array_like - The shape parameter(s) for the distribution (see docstring of the instance - object for more information) - loc: array_like, optioal - location parameter (deafult = 0) - scale : array_like, optional - scale paramter (default = 1) - - Returns - ------- - a, b: array_like (float) - end-points of range that contain alpha % of the rvs - """ - alpha = arr(alpha) - if any((alpha > 1) | (alpha < 0)): - raise ValueError("alpha must be between 0 and 1 inclusive") - q1 = (1.0-alpha)/2 - q2 = (1.0+alpha)/2 - a = self.ppf(q1, *args, **kwds) - b = self.ppf(q2, *args, **kwds) - return a, b - - -class rv_continuous(rv_generic): - """ - A generic continuous random variable class meant for subclassing. - - `rv_continuous` is a base class to construct specific distribution classes - and instances from for continuous random variables. It cannot be used - directly as a distribution. - - Parameters - ---------- - momtype : int, optional - The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf. - a : float, optional - Lower bound of the support of the distribution, default is minus - infinity. - b : float, optional - Upper bound of the support of the distribution, default is plus - infinity. - xa : float, optional - Lower bound for fixed point calculation for generic ppf. - xb : float, optional - Upper bound for fixed point calculation for generic ppf. - xtol : float, optional - The tolerance for fixed point calculation for generic ppf. - badvalue : object, optional - The value in a result arrays that indicates a value that for which - some argument restriction is violated, default is np.nan. - name : str, optional - The name of the instance. This string is used to construct the default - example for distributions. - longname : str, optional - This string is used as part of the first line of the docstring returned - when a subclass has no docstring of its own. Note: `longname` exists - for backwards compatibility, do not use for new subclasses. - shapes : str, optional - The shape of the distribution. For example ``"m, n"`` for a - distribution that takes two integers as the two shape arguments for all - its methods. - extradoc : str, optional, deprecated - This string is used as the last part of the docstring returned when a - subclass has no docstring of its own. Note: `extradoc` exists for - backwards compatibility, do not use for new subclasses. - - Methods - ------- - rvs(, loc=0, scale=1, size=1) - random variates - - pdf(x, , loc=0, scale=1) - probability density function - - logpdf(x, , loc=0, scale=1) - log of the probability density function - - cdf(x, , loc=0, scale=1) - cumulative density function - - logcdf(x, , loc=0, scale=1) - log of the cumulative density function - - sf(x, , loc=0, scale=1) - survival function (1-cdf --- sometimes more accurate) - - logsf(x, , loc=0, scale=1) - log of the survival function - - ppf(q, , loc=0, scale=1) - percent point function (inverse of cdf --- quantiles) - - isf(q, , loc=0, scale=1) - inverse survival function (inverse of sf) - - moment(n, , loc=0, scale=1) - non-central n-th moment of the distribution. May not work for array arguments. - - stats(, loc=0, scale=1, moments='mv') - mean('m'), variance('v'), skew('s'), and/or kurtosis('k') - - entropy(, loc=0, scale=1) - (differential) entropy of the RV. - - fit(data, , loc=0, scale=1) - Parameter estimates for generic data - - expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None, - conditional=False, **kwds) - Expected value of a function with respect to the distribution. - Additional kwd arguments passed to integrate.quad - - median(, loc=0, scale=1) - Median of the distribution. - - mean(, loc=0, scale=1) - Mean of the distribution. - - std(, loc=0, scale=1) - Standard deviation of the distribution. - - var(, loc=0, scale=1) - Variance of the distribution. - - interval(alpha, , loc=0, scale=1) - Interval that with `alpha` percent probability contains a random - realization of this distribution. - - __call__(, loc=0, scale=1) - Calling a distribution instance creates a frozen RV object with the - same methods but holding the given shape, location, and scale fixed. - See Notes section. - - **Parameters for Methods** - - x : array_like - quantiles - q : array_like - lower or upper tail probability - : array_like - shape parameters - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - size : int or tuple of ints, optional - shape of random variates (default computed from input arguments ) - moments : string, optional - composed of letters ['mvsk'] specifying which moments to compute where - 'm' = mean, 'v' = variance, 's' = (Fisher's) skew and - 'k' = (Fisher's) kurtosis. (default='mv') - n : int - order of moment to calculate in method moments - - - **Methods that can be overwritten by subclasses** - :: - - _rvs - _pdf - _cdf - _sf - _ppf - _isf - _stats - _munp - _entropy - _argcheck - - There are additional (internal and private) generic methods that can - be useful for cross-checking and for debugging, but might work in all - cases when directly called. - - - Notes - ----- - - **Frozen Distribution** - - Alternatively, the object may be called (as a function) to fix the shape, - location, and scale parameters returning a "frozen" continuous RV object: - - rv = generic(, loc=0, scale=1) - frozen RV object with the same methods but holding the given shape, - location, and scale fixed - - **Subclassing** - - New random variables can be defined by subclassing rv_continuous class - and re-defining at least the - - _pdf or the _cdf method (normalized to location 0 and scale 1) - which will be given clean arguments (in between a and b) and - passing the argument check method - - If postive argument checking is not correct for your RV - then you will also need to re-define :: - - _argcheck - - Correct, but potentially slow defaults exist for the remaining - methods but for speed and/or accuracy you can over-ride :: - - _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf - - Rarely would you override _isf, _sf, and _logsf but you could. - - Statistics are computed using numerical integration by default. - For speed you can redefine this using - - _stats - - take shape parameters and return mu, mu2, g1, g2 - - If you can't compute one of these, return it as None - - Can also be defined with a keyword argument moments= - where is a string composed of 'm', 'v', 's', - and/or 'k'. Only the components appearing in string - should be computed and returned in the order 'm', 'v', - 's', or 'k' with missing values returned as None - - OR - - You can override - - _munp - takes n and shape parameters and returns - the nth non-central moment of the distribution. - - - Examples - -------- - To create a new Gaussian distribution, we would do the following:: - - class gaussian_gen(rv_continuous): - "Gaussian distribution" - def _pdf: - ... - ... - - """ - - def __init__(self, momtype=1, a=None, b=None, xa=-10.0, xb=10.0, - xtol=1e-14, badvalue=None, name=None, longname=None, - shapes=None, extradoc=None): - - rv_generic.__init__(self) - - if badvalue is None: - badvalue = nan - if name is None: - name = 'Distribution' - self.badvalue = badvalue - self.name = name - self.a = a - self.b = b - if a is None: - self.a = -inf - if b is None: - self.b = inf - self.xa = xa - self.xb = xb - self.xtol = xtol - self._size = 1 - self.m = 0.0 - self.moment_type = momtype - - self.expandarr = 1 - - if not hasattr(self,'numargs'): - #allows more general subclassing with *args - cdf_signature = inspect.getargspec(self._cdf.im_func) - numargs1 = len(cdf_signature[0]) - 2 - pdf_signature = inspect.getargspec(self._pdf.im_func) - numargs2 = len(pdf_signature[0]) - 2 - self.numargs = max(numargs1, numargs2) - #nin correction - self.vecfunc = sgf(self._ppf_single_call,otypes='d') - self.vecfunc.nin = self.numargs + 1 - self.vecentropy = sgf(self._entropy,otypes='d') - self.vecentropy.nin = self.numargs + 1 - self.veccdf = sgf(self._cdf_single_call,otypes='d') - self.veccdf.nin = self.numargs + 1 - self.shapes = shapes - self.extradoc = extradoc - if momtype == 0: - self.generic_moment = sgf(self._mom0_sc,otypes='d') - else: - self.generic_moment = sgf(self._mom1_sc,otypes='d') - self.generic_moment.nin = self.numargs+1 # Because of the *args argument - # of _mom0_sc, vectorize cannot count the number of arguments correctly. - - if longname is None: - if name[0] in ['aeiouAEIOU']: - hstr = "An " - else: - hstr = "A " - longname = hstr + name - - # generate docstring for subclass instances - if self.__doc__ is None: - self._construct_default_doc(longname=longname, extradoc=extradoc) - else: - self._construct_doc() - - ## This only works for old-style classes... - # self.__class__.__doc__ = self.__doc__ - - def _construct_default_doc(self, longname=None, extradoc=None): - """Construct instance docstring from the default template.""" - if longname is None: - longname = 'A' - if extradoc is None: - extradoc = '' - if extradoc.startswith('\n\n'): - extradoc = extradoc[2:] - self.__doc__ = ''.join(['%s continuous random variable.'%longname, - '\n\n%(before_notes)s\n', docheaders['notes'], - extradoc, '\n%(example)s']) - self._construct_doc() - - def _construct_doc(self): - """Construct the instance docstring with string substitutions.""" - tempdict = docdict.copy() - tempdict['name'] = self.name or 'distname' - tempdict['shapes'] = self.shapes or '' - - if self.shapes is None: - # remove shapes from call parameters if there are none - for item in ['callparams', 'default', 'before_notes']: - tempdict[item] = tempdict[item].replace(\ - "\n%(shapes)s : array_like\n shape parameters", "") - for i in range(2): - if self.shapes is None: - # necessary because we use %(shapes)s in two forms (w w/o ", ") - self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") - self.__doc__ = doccer.docformat(self.__doc__, tempdict) - - def _ppf_to_solve(self, x, q,*args): - return apply(self.cdf, (x, )+args)-q - - def _ppf_single_call(self, q, *args): - return optimize.brentq(self._ppf_to_solve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol) - - # moment from definition - def _mom_integ0(self, x,m,*args): - return x**m * self.pdf(x,*args) - def _mom0_sc(self, m,*args): - return integrate.quad(self._mom_integ0, self.a, - self.b, args=(m,)+args)[0] - # moment calculated using ppf - def _mom_integ1(self, q,m,*args): - return (self.ppf(q,*args))**m - def _mom1_sc(self, m,*args): - return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0] - - ## These are the methods you must define (standard form functions) - def _argcheck(self, *args): - # Default check for correct values on args and keywords. - # Returns condition array of 1's where arguments are correct and - # 0's where they are not. - cond = 1 - for arg in args: - cond = logical_and(cond,(arr(arg) > 0)) - return cond - - def _pdf(self,x,*args): - return derivative(self._cdf,x,dx=1e-5,args=args,order=5) - - ## Could also define any of these - def _logpdf(self, x, *args): - return log(self._pdf(x, *args)) - - ##(return 1-d using self._size to get number) - def _rvs(self, *args): - ## Use basic inverse cdf algorithm for RV generation as default. - U = mtrand.sample(self._size) - Y = self._ppf(U,*args) - return Y - - def _cdf_single_call(self, x, *args): - return integrate.quad(self._pdf, self.a, x, args=args)[0] - - def _cdf(self, x, *args): - return self.veccdf(x,*args) - - def _logcdf(self, x, *args): - return log(self._cdf(x, *args)) - - def _sf(self, x, *args): - return 1.0-self._cdf(x,*args) - - def _logsf(self, x, *args): - return log(self._sf(x, *args)) - - def _ppf(self, q, *args): - return self.vecfunc(q,*args) - - def _isf(self, q, *args): - return self._ppf(1.0-q,*args) #use correct _ppf for subclasses - - # The actual cacluation functions (no basic checking need be done) - # If these are defined, the others won't be looked at. - # Otherwise, the other set can be defined. - def _stats(self,*args, **kwds): - return None, None, None, None - - # Central moments - def _munp(self,n,*args): - return self.generic_moment(n,*args) - - def pdf(self,x,*args,**kwds): - """ - Probability density function at x of the given RV. - - Parameters - ---------- - x : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - pdf : ndarray - Probability density function evaluated at x - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - x,loc,scale = map(arr,(x,loc,scale)) - args = tuple(map(arr,args)) - x = arr((x-loc)*1.0/scale) - cond0 = self._argcheck(*args) & (scale > 0) - cond1 = (scale > 0) & (x >= self.a) & (x <= self.b) - cond = cond0 & cond1 - output = zeros(shape(cond),'d') - putmask(output,(1-cond0)+np.isnan(x),self.badvalue) - if any(cond): - goodargs = argsreduce(cond, *((x,)+args+(scale,))) - scale, goodargs = goodargs[-1], goodargs[:-1] - place(output,cond,self._pdf(*goodargs) / scale) - if output.ndim == 0: - return output[()] - return output - - def logpdf(self, x, *args, **kwds): - """ - Log of the probability density function at x of the given RV. - - This uses a more numerically accurate calculation if available. - - Parameters - ---------- - x : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - logpdf : array_like - Log of the probability density function evaluated at x - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - x,loc,scale = map(arr,(x,loc,scale)) - args = tuple(map(arr,args)) - x = arr((x-loc)*1.0/scale) - cond0 = self._argcheck(*args) & (scale > 0) - cond1 = (scale > 0) & (x >= self.a) & (x <= self.b) - cond = cond0 & cond1 - output = empty(shape(cond),'d') - output.fill(NINF) - putmask(output,(1-cond0)+np.isnan(x),self.badvalue) - if any(cond): - goodargs = argsreduce(cond, *((x,)+args+(scale,))) - scale, goodargs = goodargs[-1], goodargs[:-1] - place(output,cond,self._logpdf(*goodargs) - log(scale)) - if output.ndim == 0: - return output[()] - return output - - - def cdf(self,x,*args,**kwds): - """ - Cumulative distribution function at x of the given RV. - - Parameters - ---------- - x : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - cdf : array_like - Cumulative distribution function evaluated at x - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - x,loc,scale = map(arr,(x,loc,scale)) - args = tuple(map(arr,args)) - x = (x-loc)*1.0/scale - cond0 = self._argcheck(*args) & (scale > 0) - cond1 = (scale > 0) & (x > self.a) & (x < self.b) - cond2 = (x >= self.b) & cond0 - cond = cond0 & cond1 - output = zeros(shape(cond),'d') - place(output,(1-cond0)+np.isnan(x),self.badvalue) - place(output,cond2,1.0) - if any(cond): #call only if at least 1 entry - goodargs = argsreduce(cond, *((x,)+args)) - place(output,cond,self._cdf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def logcdf(self,x,*args,**kwds): - """ - Log of the cumulative distribution function at x of the given RV. - - Parameters - ---------- - x : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - logcdf : array_like - Log of the cumulative distribution function evaluated at x - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - x,loc,scale = map(arr,(x,loc,scale)) - args = tuple(map(arr,args)) - x = (x-loc)*1.0/scale - cond0 = self._argcheck(*args) & (scale > 0) - cond1 = (scale > 0) & (x > self.a) & (x < self.b) - cond2 = (x >= self.b) & cond0 - cond = cond0 & cond1 - output = empty(shape(cond),'d') - output.fill(NINF) - place(output,(1-cond0)*(cond1==cond1)+np.isnan(x),self.badvalue) - place(output,cond2,0.0) - if any(cond): #call only if at least 1 entry - goodargs = argsreduce(cond, *((x,)+args)) - place(output,cond,self._logcdf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def sf(self,x,*args,**kwds): - """ - Survival function (1-cdf) at x of the given RV. - - Parameters - ---------- - x : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - sf : array_like - Survival function evaluated at x - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - x,loc,scale = map(arr,(x,loc,scale)) - args = tuple(map(arr,args)) - x = (x-loc)*1.0/scale - cond0 = self._argcheck(*args) & (scale > 0) - cond1 = (scale > 0) & (x > self.a) & (x < self.b) - cond2 = cond0 & (x <= self.a) - cond = cond0 & cond1 - output = zeros(shape(cond),'d') - place(output,(1-cond0)+np.isnan(x),self.badvalue) - place(output,cond2,1.0) - if any(cond): - goodargs = argsreduce(cond, *((x,)+args)) - place(output,cond,self._sf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def logsf(self,x,*args,**kwds): - """ - Log of the survival function of the given RV. - - Returns the log of the "survival function," defined as (1 - `cdf`), - evaluated at `x`. - - Parameters - ---------- - x : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - logsf : ndarray - Log of the survival function evaluated at `x`. - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - x,loc,scale = map(arr,(x,loc,scale)) - args = tuple(map(arr,args)) - x = (x-loc)*1.0/scale - cond0 = self._argcheck(*args) & (scale > 0) - cond1 = (scale > 0) & (x > self.a) & (x < self.b) - cond2 = cond0 & (x <= self.a) - cond = cond0 & cond1 - output = empty(shape(cond),'d') - output.fill(NINF) - place(output,(1-cond0)+np.isnan(x),self.badvalue) - place(output,cond2,0.0) - if any(cond): - goodargs = argsreduce(cond, *((x,)+args)) - place(output,cond,self._logsf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def ppf(self,q,*args,**kwds): - """ - Percent point function (inverse of cdf) at q of the given RV. - - Parameters - ---------- - q : array_like - lower tail probability - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - x : array_like - quantile corresponding to the lower tail probability q. - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - q,loc,scale = map(arr,(q,loc,scale)) - args = tuple(map(arr,args)) - cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) - cond1 = (q > 0) & (q < 1) - cond2 = (q==1) & cond0 - cond = cond0 & cond1 - output = valarray(shape(cond),value=self.a*scale + loc) - place(output,(1-cond0)+(1-cond1)*(q!=0.0), self.badvalue) - place(output,cond2,self.b*scale + loc) - if any(cond): #call only if at least 1 entry - goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) - scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] - place(output,cond,self._ppf(*goodargs)*scale + loc) - if output.ndim == 0: - return output[()] - return output - - def isf(self,q,*args,**kwds): - """ - Inverse survival function at q of the given RV. - - Parameters - ---------- - q : array_like - upper tail probability - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - Returns - ------- - x : array_like - quantile corresponding to the upper tail probability q. - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - q,loc,scale = map(arr,(q,loc,scale)) - args = tuple(map(arr,args)) - cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) - cond1 = (q > 0) & (q < 1) - cond2 = (q==1) & cond0 - cond = cond0 & cond1 - output = valarray(shape(cond),value=self.b) - #place(output,(1-cond0)*(cond1==cond1), self.badvalue) - place(output,(1-cond0)*(cond1==cond1)+(1-cond1)*(q!=0.0), self.badvalue) - place(output,cond2,self.a) - if any(cond): #call only if at least 1 entry - goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) #PB replace 1-q by q - scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] - place(output,cond,self._isf(*goodargs)*scale + loc) #PB use _isf instead of _ppf - if output.ndim == 0: - return output[()] - return output - - def stats(self,*args,**kwds): - """ - Some statistics of the given RV - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - moments : string, optional - composed of letters ['mvsk'] defining which moments to compute: - 'm' = mean, - 'v' = variance, - 's' = (Fisher's) skew, - 'k' = (Fisher's) kurtosis. - (default='mv') - - Returns - ------- - stats : sequence - of requested moments. - - """ - loc,scale,moments=map(kwds.get,['loc','scale','moments']) - - N = len(args) - if N > self.numargs: - if N == self.numargs + 1 and loc is None: - # loc is given without keyword - loc = args[-1] - if N == self.numargs + 2 and scale is None: - # loc and scale given without keyword - loc, scale = args[-2:] - if N == self.numargs + 3 and moments is None: - # loc, scale, and moments - loc, scale, moments = args[-3:] - args = args[:self.numargs] - if scale is None: scale = 1.0 - if loc is None: loc = 0.0 - if moments is None: moments = 'mv' - - loc,scale = map(arr,(loc,scale)) - args = tuple(map(arr,args)) - cond = self._argcheck(*args) & (scale > 0) & (loc==loc) - - signature = inspect.getargspec(self._stats.im_func) - if (signature[2] is not None) or ('moments' in signature[0]): - mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments}) - else: - mu, mu2, g1, g2 = self._stats(*args) - if g1 is None: - mu3 = None - else: - mu3 = g1*np.power(mu2,1.5) #(mu2**1.5) breaks down for nan and inf - default = valarray(shape(cond), self.badvalue) - output = [] - - # Use only entries that are valid in calculation - if any(cond): - goodargs = argsreduce(cond, *(args+(scale,loc))) - scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] - if 'm' in moments: - if mu is None: - mu = self._munp(1.0,*goodargs) - out0 = default.copy() - place(out0,cond,mu*scale+loc) - output.append(out0) - - if 'v' in moments: - if mu2 is None: - mu2p = self._munp(2.0,*goodargs) - if mu is None: - mu = self._munp(1.0,*goodargs) - mu2 = mu2p - mu*mu - if np.isinf(mu): - #if mean is inf then var is also inf - mu2 = np.inf - out0 = default.copy() - place(out0,cond,mu2*scale*scale) - output.append(out0) - - if 's' in moments: - if g1 is None: - mu3p = self._munp(3.0,*goodargs) - if mu is None: - mu = self._munp(1.0,*goodargs) - if mu2 is None: - mu2p = self._munp(2.0,*goodargs) - mu2 = mu2p - mu*mu - mu3 = mu3p - 3*mu*mu2 - mu**3 - g1 = mu3 / mu2**1.5 - out0 = default.copy() - place(out0,cond,g1) - output.append(out0) - - if 'k' in moments: - if g2 is None: - mu4p = self._munp(4.0,*goodargs) - if mu is None: - mu = self._munp(1.0,*goodargs) - if mu2 is None: - mu2p = self._munp(2.0,*goodargs) - mu2 = mu2p - mu*mu - if mu3 is None: - mu3p = self._munp(3.0,*goodargs) - mu3 = mu3p - 3*mu*mu2 - mu**3 - mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4 - g2 = mu4 / mu2**2.0 - 3.0 - out0 = default.copy() - place(out0,cond,g2) - output.append(out0) - else: #no valid args - output = [] - for _ in moments: - out0 = default.copy() - output.append(out0) - - if len(output) == 1: - return output[0] - else: - return tuple(output) - - def moment(self, n, *args, **kwds): - """ - n'th order non-central moment of distribution - - Parameters - ---------- - n: int, n>=1 - Order of moment. - arg1, arg2, arg3,... : float - The shape parameter(s) for the distribution (see docstring of the - instance object for more information). - kwds : keyword arguments, optional - These can include "loc" and "scale", as well as other keyword - arguments relevant for a given distribution. - - """ - loc = kwds.get('loc', 0) - scale = kwds.get('scale', 1) - if not (self._argcheck(*args) and (scale > 0)): - return nan - if (floor(n) != n): - raise ValueError("Moment must be an integer.") - if (n < 0): raise ValueError("Moment must be positive.") - mu, mu2, g1, g2 = None, None, None, None - if (n > 0) and (n < 5): - signature = inspect.getargspec(self._stats.im_func) - if (signature[2] is not None) or ('moments' in signature[0]): - mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]} - else: - mdict = {} - mu, mu2, g1, g2 = self._stats(*args,**mdict) - val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args) - - # Convert to transformed X = L + S*Y - # so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n) - if loc == 0: - return scale**n * val - else: - result = 0 - fac = float(scale) / float(loc) - for k in range(n): - valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args) - result += comb(n,k,exact=True)*(fac**k) * valk - result += fac**n * val - return result * loc**n - - def _nnlf(self, x, *args): - return -sum(self._logpdf(x, *args),axis=0) - - def nnlf(self, theta, x): - # - sum (log pdf(x, theta),axis=0) - # where theta are the parameters (including loc and scale) - # - try: - loc = theta[-2] - scale = theta[-1] - args = tuple(theta[:-2]) - except IndexError: - raise ValueError("Not enough input arguments.") - if not self._argcheck(*args) or scale <= 0: - return inf - x = arr((x-loc) / scale) - cond0 = (x <= self.a) | (x >= self.b) - if (any(cond0)): - return inf - else: - N = len(x) - return self._nnlf(x, *args) + N*log(scale) - - # return starting point for fit (shape arguments + loc + scale) - def _fitstart(self, data, args=None): - if args is None: - args = (1.0,)*self.numargs - return args + self.fit_loc_scale(data, *args) - - # Return the (possibly reduced) function to optimize in order to find MLE - # estimates for the .fit method - def _reduce_func(self, args, kwds): - args = list(args) - Nargs = len(args) - fixedn = [] - index = range(Nargs) - names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] - x0 = args[:] - for n, key in zip(index, names): - if kwds.has_key(key): - fixedn.append(n) - args[n] = kwds[key] - del x0[n] - - if len(fixedn) == 0: - func = self.nnlf - restore = None - else: - if len(fixedn) == len(index): - raise ValueError("All parameters fixed. There is nothing to optimize.") - def restore(args, theta): - # Replace with theta for all numbers not in fixedn - # This allows the non-fixed values to vary, but - # we still call self.nnlf with all parameters. - i = 0 - for n in range(Nargs): - if n not in fixedn: - args[n] = theta[i] - i += 1 - return args - - def func(theta, x): - newtheta = restore(args[:], theta) - return self.nnlf(newtheta, x) - - return x0, func, restore, args - - - def fit(self, data, *args, **kwds): - """ - Return MLEs for shape, location, and scale parameters from data. - - MLE stands for Maximum Likelihood Estimate. Starting estimates for - the fit are given by input arguments; for any arguments not provided - with starting estimates, ``self._fitstart(data)`` is called to generate - such. - - One can hold some parameters fixed to specific values by passing in - keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) - and ``floc`` and ``fscale`` (for location and scale parameters, - respectively). - - Parameters - ---------- - data : array_like - Data to use in calculating the MLEs - args : floats, optional - Starting value(s) for any shape-characterizing arguments (those not - provided will be determined by a call to ``_fitstart(data)``). - No default value. - kwds : floats, optional - Starting values for the location and scale parameters; no default. - Special keyword arguments are recognized as holding certain - parameters fixed: - - f0...fn : hold respective shape parameters fixed. - - floc : hold location parameter fixed to specified value. - - fscale : hold scale parameter fixed to specified value. - - optimizer : The optimizer to use. The optimizer must take func, - and starting position as the first two arguments, - plus args (for extra arguments to pass to the - function to be optimized) and disp=0 to suppress - output as keyword arguments. - - Returns - ------- - shape, loc, scale : tuple of floats - MLEs for any shape statistics, followed by those for location and - scale. - - """ - Narg = len(args) - if Narg > self.numargs: - raise ValueError("Too many input arguments.") - start = [None]*2 - if (Narg < self.numargs) or not (kwds.has_key('loc') and - kwds.has_key('scale')): - start = self._fitstart(data) # get distribution specific starting locations - args += start[Narg:-2] - loc = kwds.get('loc', start[-2]) - scale = kwds.get('scale', start[-1]) - args += (loc, scale) - x0, func, restore, args = self._reduce_func(args, kwds) - - optimizer = kwds.get('optimizer', optimize.fmin) - # convert string to function in scipy.optimize - if not callable(optimizer) and isinstance(optimizer, (str, unicode)): - if not optimizer.startswith('fmin_'): - optimizer = "fmin_"+optimizer - if optimizer == 'fmin_': - optimizer = 'fmin' - try: - optimizer = getattr(optimize, optimizer) - except AttributeError: - raise ValueError("%s is not a valid optimizer" % optimizer) - vals = optimizer(func,x0,args=(ravel(data),),disp=0) - if restore is not None: - vals = restore(args, vals) - vals = tuple(vals) - return vals - - def fit_loc_scale(self, data, *args): - """ - Estimate loc and scale parameters from data using 1st and 2nd moments - """ - mu, mu2 = self.stats(*args,**{'moments':'mv'}) - muhat = arr(data).mean() - mu2hat = arr(data).var() - Shat = sqrt(mu2hat / mu2) - Lhat = muhat - Shat*mu - return Lhat, Shat - - @np.deprecate - def est_loc_scale(self, data, *args): - """This function is deprecated, use self.fit_loc_scale(data) instead. """ - return self.fit_loc_scale(data, *args) - - def freeze(self,*args,**kwds): - return rv_frozen(self,*args,**kwds) - - def __call__(self, *args, **kwds): - return self.freeze(*args, **kwds) - - def _entropy(self, *args): - def integ(x): - val = self._pdf(x, *args) - return val*log(val) - - entr = -integrate.quad(integ,self.a,self.b)[0] - if not np.isnan(entr): - return entr - else: # try with different limits if integration problems - low,upp = self.ppf([0.001,0.999],*args) - if np.isinf(self.b): - upper = upp - else: - upper = self.b - if np.isinf(self.a): - lower = low - else: - lower = self.a - return -integrate.quad(integ,lower,upper)[0] - - - def entropy(self, *args, **kwds): - """ - Differential entropy of the RV. - - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale : array_like, optional - scale parameter (default=1) - - """ - loc,scale=map(kwds.get,['loc','scale']) - args, loc, scale = self._fix_loc_scale(args, loc, scale) - args = tuple(map(arr,args)) - cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) - output = zeros(shape(cond0),'d') - place(output,(1-cond0),self.badvalue) - goodargs = argsreduce(cond0, *args) - #I don't know when or why vecentropy got broken when numargs == 0 - if self.numargs == 0: - place(output,cond0,self._entropy()+log(scale)) - else: - place(output,cond0,self.vecentropy(*goodargs)+log(scale)) - return output - - def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, - conditional=False, **kwds): - """calculate expected value of a function with respect to the distribution - - location and scale only tested on a few examples - - Parameters - ---------- - all parameters are keyword parameters - func : function (default: identity mapping) - Function for which integral is calculated. Takes only one argument. - args : tuple - argument (parameters) of the distribution - lb, ub : numbers - lower and upper bound for integration, default is set to the support - of the distribution - conditional : boolean (False) - If true then the integral is corrected by the conditional probability - of the integration interval. The return value is the expectation - of the function, conditional on being in the given interval. - - Additional keyword arguments are passed to the integration routine. - - - Returns - ------- - expected value : float - - Notes - ----- - This function has not been checked for it's behavior when the integral is - not finite. The integration behavior is inherited from integrate.quad. - """ - lockwds = {'loc': loc, - 'scale':scale} - if func is None: - def fun(x, *args): - return x*self.pdf(x, *args, **lockwds) - else: - def fun(x, *args): - return func(x)*self.pdf(x, *args, **lockwds) - if lb is None: - lb = loc + self.a * scale - if ub is None: - ub = loc + self.b * scale - if conditional: - invfac = (self.sf(lb, *args, **lockwds) - - self.sf(ub, *args, **lockwds)) - else: - invfac = 1.0 - kwds['args'] = args - return integrate.quad(fun, lb, ub, **kwds)[0] / invfac - - -_EULER = 0.577215664901532860606512090082402431042 # -special.psi(1) -_ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant - -## Kolmogorov-Smirnov one-sided and two-sided test statistics - -class ksone_gen(rv_continuous): - """General Kolmogorov-Smirnov one-sided test. - - %(default)s - - """ - def _cdf(self,x,n): - return 1.0-special.smirnov(n,x) - def _ppf(self,q,n): - return special.smirnovi(n,1.0-q) -ksone = ksone_gen(a=0.0, name='ksone', shapes="n") - -class kstwobign_gen(rv_continuous): - """Kolmogorov-Smirnov two-sided test for large N. - - %(default)s - - """ - def _cdf(self,x): - return 1.0-special.kolmogorov(x) - def _sf(self,x): - return special.kolmogorov(x) - def _ppf(self,q): - return special.kolmogi(1.0-q) -kstwobign = kstwobign_gen(a=0.0, name='kstwobign') - - -## Normal distribution - -# loc = mu, scale = std -# Keep these implementations out of the class definition so they can be reused -# by other distributions. -_norm_pdf_C = math.sqrt(2*pi) -_norm_pdf_logC = math.log(_norm_pdf_C) -def _norm_pdf(x): - return exp(-x**2/2.0) / _norm_pdf_C -def _norm_logpdf(x): - return -x**2 / 2.0 - _norm_pdf_logC -def _norm_cdf(x): - return special.ndtr(x) -def _norm_logcdf(x): - return log(special.ndtr(x)) -def _norm_ppf(q): - return special.ndtri(q) -class norm_gen(rv_continuous): - """A normal continuous random variable. - - The location (loc) keyword specifies the mean. - The scale (scale) keyword specifies the standard deviation. - - %(before_notes)s - - Notes - ----- - The probability density function for `norm` is:: - - norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi) - - %(example)s - - """ - def _rvs(self): - return mtrand.standard_normal(self._size) - def _pdf(self,x): - return _norm_pdf(x) - def _logpdf(self, x): - return _norm_logpdf(x) - def _cdf(self,x): - return _norm_cdf(x) - def _logcdf(self, x): - return _norm_logcdf(x) - def _sf(self, x): - return _norm_cdf(-x) - def _logsf(self, x): - return _norm_logcdf(-x) - def _ppf(self,q): - return _norm_ppf(q) - def _isf(self,q): - return -_norm_ppf(q) - def _stats(self): - return 0.0, 1.0, 0.0, 0.0 - def _entropy(self): - return 0.5*(log(2*pi)+1) -norm = norm_gen(name='norm') - - -## Alpha distribution -## -class alpha_gen(rv_continuous): - """An alpha continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `alpha` is:: - - alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2), - - where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``. - - %(example)s - - """ - def _pdf(self, x, a): - return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x) - def _logpdf(self, x, a): - return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a)) - def _cdf(self, x, a): - return special.ndtr(a-1.0/x) / special.ndtr(a) - def _ppf(self, q, a): - return 1.0/arr(a-special.ndtri(q*special.ndtr(a))) - def _stats(self, a): - return [inf]*2 + [nan]*2 -alpha = alpha_gen(a=0.0, name='alpha', shapes='a') - - -## Anglit distribution -## -class anglit_gen(rv_continuous): - """An anglit continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `anglit` is:: - - anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x), - - for ``-pi/4 <= x <= pi/4``. - - %(example)s - - """ - def _pdf(self, x): - return cos(2*x) - def _cdf(self, x): - return sin(x+pi/4)**2.0 - def _ppf(self, q): - return (arcsin(sqrt(q))-pi/4) - def _stats(self): - return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2 - def _entropy(self): - return 1-log(2) -anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit') - - -## Arcsine distribution -## -class arcsine_gen(rv_continuous): - """An arcsine continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `arcsine` is:: - - arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x))) - for 0 < x < 1. - - %(example)s - - """ - def _pdf(self, x): - return 1.0/pi/sqrt(x*(1-x)) - def _cdf(self, x): - return 2.0/pi*arcsin(sqrt(x)) - def _ppf(self, q): - return sin(pi/2.0*q)**2.0 - def _stats(self): - #mup = 0.5, 3.0/8.0, 15.0/48.0, 35.0/128.0 - mu = 0.5 - mu2 = 1.0/8 - g1 = 0 - g2 = -3.0/2.0 - return mu, mu2, g1, g2 - def _entropy(self): - return -0.24156447527049044468 -arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine') - - -## Beta distribution -## -class beta_gen(rv_continuous): - """A beta continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `beta` is:: - - beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) * - (1-x)**(b-1), - - for ``0 < x < 1``, ``a > 0``, ``b > 0``. - - %(example)s - - """ - def _rvs(self, a, b): - return mtrand.beta(a,b,self._size) - def _pdf(self, x, a, b): - Px = (1.0-x)**(b-1.0) * x**(a-1.0) - Px /= special.beta(a,b) - return Px - def _logpdf(self, x, a, b): - lPx = (b-1.0)*log(1.0-x) + (a-1.0)*log(x) - lPx -= log(special.beta(a,b)) - return lPx - def _cdf(self, x, a, b): - return special.btdtr(a,b,x) - def _ppf(self, q, a, b): - return special.btdtri(a,b,q) - def _stats(self, a, b): - mn = a *1.0 / (a + b) - var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0 - g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b) - g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b)) - g2 /= a*b*(a+b+2)*(a+b+3) - return mn, var, g1, g2 - def _fitstart(self, data): - g1 = _skew(data) - g2 = _kurtosis(data) - def func(x): - a, b = x - sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b) - ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2) - ku /= a*b*(a+b+2)*(a+b+3) - ku *= 6 - return [sk-g1, ku-g2] - a, b = optimize.fsolve(func, (1.0, 1.0)) - return super(beta_gen, self)._fitstart(data, args=(a,b)) - def fit(self, data, *args, **kwds): - floc = kwds.get('floc', None) - fscale = kwds.get('fscale', None) - if floc is not None and fscale is not None: - # special case - data = (ravel(data)-floc)/fscale - xbar = data.mean() - v = data.var(ddof=0) - fac = xbar*(1-xbar)/v - 1 - a = xbar * fac - b = (1-xbar) * fac - return a, b, floc, fscale - else: # do general fit - return super(beta_gen, self).fit(data, *args, **kwds) -beta = beta_gen(a=0.0, b=1.0, name='beta', shapes='a, b') - - -## Beta Prime -class betaprime_gen(rv_continuous): - """A beta prima continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `betaprime` is:: - - betaprime.pdf(x, a, b) = - gamma(a+b) / (gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(-a-b) - - for ``x > 0``, ``a > 0``, ``b > 0``. - - %(example)s - - """ - def _rvs(self, a, b): - u1 = gamma.rvs(a,size=self._size) - u2 = gamma.rvs(b,size=self._size) - return (u1 / u2) - def _pdf(self, x, a, b): - return 1.0/special.beta(a,b)*x**(a-1.0)/(1+x)**(a+b) - def _logpdf(self, x, a, b): - return (a-1.0)*log(x) - (a+b)*log(1+x) - log(special.beta(a,b)) - def _cdf_skip(self, x, a, b): - # remove for now: special.hyp2f1 is incorrect for large a - x = where(x==1.0, 1.0-1e-6,x) - return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b) - def _munp(self, n, a, b): - if (n == 1.0): - return where(b > 1, a/(b-1.0), inf) - elif (n == 2.0): - return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf) - elif (n == 3.0): - return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)), - inf) - elif (n == 4.0): - return where(b > 4, - a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0) \ - *(b-2.0)*(b-1.0)), inf) - else: - raise NotImplementedError -betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime', shapes='a, b') - - -## Bradford -## - -class bradford_gen(rv_continuous): - """A Bradford continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `bradford` is:: - - bradford.pdf(x, c) = c / (k * (1+c*x)), - - for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``. - - %(example)s - - """ - def _pdf(self, x, c): - return c / (c*x + 1.0) / log(1.0+c) - def _cdf(self, x, c): - return log(1.0+c*x) / log(c+1.0) - def _ppf(self, q, c): - return ((1.0+c)**q-1)/c - def _stats(self, c, moments='mv'): - k = log(1.0+c) - mu = (c-k)/(c*k) - mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k) - g1 = None - g2 = None - if 's' in moments: - g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3)) - g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k) - if 'k' in moments: - g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \ - + 6*c*k*k*(3*k-14) + 12*k**3 - g2 /= 3*c*(c*(k-2)+2*k)**2 - return mu, mu2, g1, g2 - def _entropy(self, c): - k = log(1+c) - return k/2.0 - log(c/k) -bradford = bradford_gen(a=0.0, b=1.0, name='bradford', shapes='c') - - -## Burr - -# burr with d=1 is called the fisk distribution -class burr_gen(rv_continuous): - """A Burr continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `burr` is:: - - burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1) - - for ``x > 0``. - - %(example)s - - """ - def _pdf(self, x, c, d): - return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0)) - def _cdf(self, x, c, d): - return (1+x**(-c*1.0))**(-d**1.0) - def _ppf(self, q, c, d): - return (q**(-1.0/d)-1)**(-1.0/c) - def _stats(self, c, d, moments='mv'): - g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d) - g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d) - gd = gam(d) - k = gd*g2c*g2cd - g1c**2 * g1cd**2 - mu = g1c*g1cd / gd - mu2 = k / gd**2.0 - g1, g2 = None, None - g3c, g3cd = None, None - if 's' in moments: - g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d) - g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd - g1 /= sqrt(k**3) - if 'k' in moments: - if g3c is None: - g3c = gam(1-3.0/c) - if g3cd is None: - g3cd = gam(3.0/c+d) - g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d) - g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd - g2 -= 3*g1c**4 * g1cd**4 -4*gd**2*g3c*g1c*g1cd*g3cd - return mu, mu2, g1, g2 -burr = burr_gen(a=0.0, name='burr', shapes="c, d") - -# Fisk distribution -# burr is a generalization - -class fisk_gen(burr_gen): - """A Fisk continuous random variable. - - The Fisk distribution is also known as the log-logistic distribution, and - equals the Burr distribution with ``d=1``. - - %(before_notes)s - - See Also - -------- - burr - - %(example)s - - """ - def _pdf(self, x, c): - return burr_gen._pdf(self, x, c, 1.0) - def _cdf(self, x, c): - return burr_gen._cdf(self, x, c, 1.0) - def _ppf(self, x, c): - return burr_gen._ppf(self, x, c, 1.0) - def _stats(self, c): - return burr_gen._stats(self, c, 1.0) - def _entropy(self, c): - return 2 - log(c) -fisk = fisk_gen(a=0.0, name='fisk', shapes='c') - -## Cauchy - -# median = loc - -class cauchy_gen(rv_continuous): - """A Cauchy continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `cauchy` is:: - - cauchy.pdf(x) = 1 / (pi * (1 + x**2)) - - %(example)s - - """ - def _pdf(self, x): - return 1.0/pi/(1.0+x*x) - def _cdf(self, x): - return 0.5 + 1.0/pi*arctan(x) - def _ppf(self, q): - return tan(pi*q-pi/2.0) - def _sf(self, x): - return 0.5 - 1.0/pi*arctan(x) - def _isf(self, q): - return tan(pi/2.0-pi*q) - def _stats(self): - return inf, inf, nan, nan - def _entropy(self): - return log(4*pi) - def _fitstart(data, args=None): - return (0, 1) -cauchy = cauchy_gen(name='cauchy') - - -## Chi -## (positive square-root of chi-square) -## chi(1, loc, scale) = halfnormal -## chi(2, 0, scale) = Rayleigh -## chi(3, 0, scale) = MaxWell - -class chi_gen(rv_continuous): - """A chi continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `chi` is:: - - chi.pdf(x,df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2)) - - for ``x > 0``. - - %(example)s - - """ - def _rvs(self, df): - return sqrt(chi2.rvs(df,size=self._size)) - def _pdf(self, x, df): - return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5) - def _cdf(self, x, df): - return special.gammainc(df*0.5,0.5*x*x) - def _ppf(self, q, df): - return sqrt(2*special.gammaincinv(df*0.5,q)) - def _stats(self, df): - mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0) - mu2 = df - mu*mu - g1 = (2*mu**3.0 + mu*(1-2*df))/arr(mu2**1.5) - g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1) - g2 /= arr(mu2**2.0) - return mu, mu2, g1, g2 -chi = chi_gen(a=0.0, name='chi', shapes='df') - - -## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2) -class chi2_gen(rv_continuous): - """A chi-squared continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `chi2` is:: - - chi2.pdf(x,df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2) - - %(example)s - - """ - def _rvs(self, df): - return mtrand.chisquare(df,self._size) - def _pdf(self, x, df): - return exp(self._logpdf(x, df)) - def _logpdf(self, x, df): - #term1 = (df/2.-1)*log(x) - #term1[(df==2)*(x==0)] = 0 - #avoid 0*log(0)==nan - return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2. -## Px = x**(df/2.0-1)*exp(-x/2.0) -## Px /= special.gamma(df/2.0)* 2**(df/2.0) -## return log(Px) - def _cdf(self, x, df): - return special.chdtr(df, x) - def _sf(self, x, df): - return special.chdtrc(df, x) - def _isf(self, p, df): - return special.chdtri(df, p) - def _ppf(self, p, df): - return self._isf(1.0-p, df) - def _stats(self, df): - mu = df - mu2 = 2*df - g1 = 2*sqrt(2.0/df) - g2 = 12.0/df - return mu, mu2, g1, g2 -chi2 = chi2_gen(a=0.0, name='chi2', shapes='df') - - -## Cosine (Approximation to the Normal) -class cosine_gen(rv_continuous): - """A cosine continuous random variable. - - %(before_notes)s - - Notes - ----- - The cosine distribution is an approximation to the normal distribution. - The probability density function for `cosine` is:: - - cosine.pdf(x) = 1/(2*pi) * (1+cos(x)) - - for ``-pi <= x <= pi``. - - %(example)s - - """ - def _pdf(self, x): - return 1.0/2/pi*(1+cos(x)) - def _cdf(self, x): - return 1.0/2/pi*(pi + x + sin(x)) - def _stats(self): - return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2) - def _entropy(self): - return log(4*pi)-1.0 -cosine = cosine_gen(a=-pi, b=pi, name='cosine') - - -## Double Gamma distribution -class dgamma_gen(rv_continuous): - """A double gamma continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `dgamma` is:: - - dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x)) - - for ``a > 0``. - - %(example)s - - """ - def _rvs(self, a): - u = random(size=self._size) - return (gamma.rvs(a,size=self._size)*where(u>=0.5,1,-1)) - def _pdf(self, x, a): - ax = abs(x) - return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax) - def _logpdf(self, x, a): - ax = abs(x) - return (a-1.0)*log(ax) - ax - log(2) - gamln(a) - def _cdf(self, x, a): - fac = 0.5*special.gammainc(a,abs(x)) - return where(x>0,0.5+fac,0.5-fac) - def _sf(self, x, a): - fac = 0.5*special.gammainc(a,abs(x)) - #return where(x>0,0.5-0.5*fac,0.5+0.5*fac) - return where(x>0,0.5-fac,0.5+fac) - def _ppf(self, q, a): - fac = special.gammainccinv(a,1-abs(2*q-1)) - return where(q>0.5, fac, -fac) - def _stats(self, a): - mu2 = a*(a+1.0) - return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0 -dgamma = dgamma_gen(name='dgamma', shapes='a') - - -## Double Weibull distribution -## -class dweibull_gen(rv_continuous): - """A double Weibull continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `dweibull` is:: - - dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c) - - %(example)s - - """ - def _rvs(self, c): - u = random(size=self._size) - return weibull_min.rvs(c, size=self._size)*(where(u>=0.5,1,-1)) - def _pdf(self, x, c): - ax = abs(x) - Px = c/2.0*ax**(c-1.0)*exp(-ax**c) - return Px - def _logpdf(self, x, c): - ax = abs(x) - return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c - def _cdf(self, x, c): - Cx1 = 0.5*exp(-abs(x)**c) - return where(x > 0, 1-Cx1, Cx1) - def _ppf_skip(self, q, c): - fac = where(q<=0.5,2*q,2*q-1) - fac = pow(arr(log(1.0/fac)),1.0/c) - return where(q>0.5,fac,-fac) - def _stats(self, c): - var = gam(1+2.0/c) - return 0.0, var, 0.0, gam(1+4.0/c)/var -dweibull = dweibull_gen(name='dweibull', shapes='c') - - -## ERLANG -## -## Special case of the Gamma distribution with shape parameter an integer. -## -class erlang_gen(rv_continuous): - """An Erlang continuous random variable. - - %(before_notes)s - - Notes - ----- - The Erlang distribution is a special case of the Gamma distribution, with - the shape parameter an integer. - - %(example)s - - """ - def _rvs(self, n): - return gamma.rvs(n,size=self._size) - def _arg_check(self, n): - return (n > 0) & (floor(n)==n) - def _pdf(self, x, n): - Px = (x)**(n-1.0)*exp(-x)/special.gamma(n) - return Px - def _logpdf(self, x, n): - return (n-1.0)*log(x) - x - gamln(n) - def _cdf(self, x, n): - return special.gdtr(1.0,n,x) - def _sf(self, x, n): - return special.gdtrc(1.0,n,x) - def _ppf(self, q, n): - return special.gdtrix(1.0, n, q) - def _stats(self, n): - n = n*1.0 - return n, n, 2/sqrt(n), 6/n - def _entropy(self, n): - return special.psi(n)*(1-n) + 1 + gamln(n) -erlang = erlang_gen(a=0.0, name='erlang', shapes='n') - - -## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale) -## scale == 1.0 / lambda - -class expon_gen(rv_continuous): - """An exponential continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `expon` is:: - - expon.pdf(x) = exp(-x) - - for ``x >= 0``. - - The scale parameter is equal to ``scale = 1.0 / lambda``. - - %(example)s - - """ - def _rvs(self): - return mtrand.standard_exponential(self._size) - def _pdf(self, x): - return exp(-x) - def _logpdf(self, x): - return -x - def _cdf(self, x): - return -expm1(-x) - def _ppf(self, q): - return -log1p(-q) - def _sf(self,x): - return exp(-x) - def _logsf(self, x): - return -x - def _isf(self,q): - return -log(q) - def _stats(self): - return 1.0, 1.0, 2.0, 6.0 - def _entropy(self): - return 1.0 -expon = expon_gen(a=0.0, name='expon') - - -## Exponentiated Weibull -class exponweib_gen(rv_continuous): - """An exponentiated Weibull continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `exponweib` is:: - - exponweib.pdf(x, a, c) = - a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1) - - for ``x > 0``, ``a > 0``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, a, c): - exc = exp(-x**c) - return a*c*(1-exc)**arr(a-1) * exc * x**(c-1) - def _logpdf(self, x, a, c): - exc = exp(-x**c) - return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x) - def _cdf(self, x, a, c): - exm1c = -expm1(-x**c) - return arr((exm1c)**a) - def _ppf(self, q, a, c): - return (-log1p(-q**(1.0/a)))**arr(1.0/c) -exponweib = exponweib_gen(a=0.0, name='exponweib', shapes="a, c") - - -## Exponential Power - -class exponpow_gen(rv_continuous): - """An exponential power continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `exponpow` is:: - - exponpow.pdf(x, b) = b * x**(b-1) * exp(1+x**b - exp(x**b)) - - for ``x >= 0``, ``b > 0``. - - %(example)s - - """ - def _pdf(self, x, b): - xbm1 = arr(x**(b-1.0)) - xb = xbm1 * x - return exp(1)*b*xbm1 * exp(xb - exp(xb)) - def _logpdf(self, x, b): - xb = x**(b-1.0)*x - return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb) - def _cdf(self, x, b): - xb = arr(x**b) - return -expm1(-expm1(xb)) - def _sf(self, x, b): - xb = arr(x**b) - return exp(-expm1(xb)) - def _isf(self, x, b): - return (log1p(-log(x)))**(1./b) - def _ppf(self, q, b): - return pow(log1p(-log1p(-q)), 1.0/b) -exponpow = exponpow_gen(a=0.0, name='exponpow', shapes='b') - - -## Fatigue-Life (Birnbaum-Sanders) -class fatiguelife_gen(rv_continuous): - """A fatigue-life (Birnbaum-Sanders) continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `fatiguelife` is:: - - fatiguelife.pdf(x,c) = - (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2)) - - for ``x > 0``. - - %(example)s - - """ - def _rvs(self, c): - z = norm.rvs(size=self._size) - x = 0.5*c*z - x2 = x*x - t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2) - return t - def _pdf(self, x, c): - return (x+1)/arr(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/arr((2.0*x*c**2))) - def _logpdf(self, x, c): - return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x)) - def _cdf(self, x, c): - return special.ndtr(1.0/c*(sqrt(x)-1.0/arr(sqrt(x)))) - def _ppf(self, q, c): - tmp = c*special.ndtri(q) - return 0.25*(tmp + sqrt(tmp**2 + 4))**2 - def _stats(self, c): - c2 = c*c - mu = c2 / 2.0 + 1 - den = 5*c2 + 4 - mu2 = c2*den /4.0 - g1 = 4*c*sqrt(11*c2+6.0)/den**1.5 - g2 = 6*c2*(93*c2+41.0) / den**2.0 - return mu, mu2, g1, g2 -fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife', shapes='c') - - -## Folded Cauchy - -class foldcauchy_gen(rv_continuous): - """A folded Cauchy continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `foldcauchy` is:: - - foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2)) - - for ``x >= 0``. - - %(example)s - - """ - def _rvs(self, c): - return abs(cauchy.rvs(loc=c,size=self._size)) - def _pdf(self, x, c): - return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2)) - def _cdf(self, x, c): - return 1.0/pi*(arctan(x-c) + arctan(x+c)) - def _stats(self, c): - return inf, inf, nan, nan -# setting xb=1000 allows to calculate ppf for up to q=0.9993 -foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy', xb=1000, shapes='c') - - -## F - -class f_gen(rv_continuous): - """An F continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `f` is:: - - df2**(df2/2) * df1**(df1/2) * x**(df1/2-1) - F.pdf(x, df1, df2) = -------------------------------------------- - (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2) - - for ``x > 0``. - - %(example)s - - """ - def _rvs(self, dfn, dfd): - return mtrand.f(dfn, dfd, self._size) - def _pdf(self, x, dfn, dfd): -# n = arr(1.0*dfn) -# m = arr(1.0*dfd) -# Px = m**(m/2) * n**(n/2) * x**(n/2-1) -# Px /= (m+n*x)**((n+m)/2)*special.beta(n/2,m/2) - return exp(self._logpdf(x, dfn, dfd)) - def _logpdf(self, x, dfn, dfd): - n = 1.0*dfn - m = 1.0*dfd - lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x) - lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2) - return lPx - def _cdf(self, x, dfn, dfd): - return special.fdtr(dfn, dfd, x) - def _sf(self, x, dfn, dfd): - return special.fdtrc(dfn, dfd, x) - def _ppf(self, q, dfn, dfd): - return special.fdtri(dfn, dfd, q) - def _stats(self, dfn, dfd): - v2 = arr(dfd*1.0) - v1 = arr(dfn*1.0) - mu = where (v2 > 2, v2 / arr(v2 - 2), inf) - mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4)) - mu2 = where(v2 > 4, mu2, inf) - g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2))) - g1 = where(v2 > 6, g1, nan) - g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6)) - g2 = where(v2 > 8, g2, nan) - return mu, mu2, g1, g2 -f = f_gen(a=0.0, name='f', shapes="dfn, dfd") - - -## Folded Normal -## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S) -## -## note: regress docs have scale parameter correct, but first parameter -## he gives is a shape parameter A = c * scale - -## Half-normal is folded normal with shape-parameter c=0. - -class foldnorm_gen(rv_continuous): - """A folded normal continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `foldnorm` is:: - - foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2) - - for ``c >= 0``. - - %(example)s - - """ - def _rvs(self, c): - return abs(norm.rvs(loc=c,size=self._size)) - def _pdf(self, x, c): - return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0) - def _cdf(self, x, c,): - return special.ndtr(x-c) + special.ndtr(x+c) - 1.0 - def _stats(self, c): - fac = special.erf(c/sqrt(2)) - mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac - mu2 = c*c + 1 - mu*mu - c2 = c*c - g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0)) - g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \ - pi*c*(fac*fac-1)) - g1 /= pi*mu2**1.5 - - g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4 - g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac) - g2 /= mu2**2.0 - return mu, mu2, g1, g2 -foldnorm = foldnorm_gen(a=0.0, name='foldnorm', shapes='c') - - -## Extreme Value Type II or Frechet -## (defined in Regress+ documentation as Extreme LB) as -## a limiting value distribution. -## -class frechet_r_gen(rv_continuous): - """A Frechet right (or Weibull minimum) continuous random variable. - - %(before_notes)s - - See Also - -------- - weibull_min : The same distribution as `frechet_r`. - frechet_l, weibull_max - - Notes - ----- - The probability density function for `frechet_r` is:: - - frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c) - - for ``x > 0``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - return c*pow(x,c-1)*exp(-pow(x,c)) - def _logpdf(self, x, c): - return log(c) + (c-1)*log(x) - pow(x,c) - def _cdf(self, x, c): - return -expm1(-pow(x,c)) - def _ppf(self, q, c): - return pow(-log1p(-q),1.0/c) - def _munp(self, n, c): - return special.gamma(1.0+n*1.0/c) - def _entropy(self, c): - return -_EULER / c - log(c) + _EULER + 1 -frechet_r = frechet_r_gen(a=0.0, name='frechet_r', shapes='c') -weibull_min = frechet_r_gen(a=0.0, name='weibull_min', shapes='c') - - - -class frechet_l_gen(rv_continuous): - """A Frechet left (or Weibull maximum) continuous random variable. - - %(before_notes)s - - See Also - -------- - weibull_max : The same distribution as `frechet_l`. - frechet_r, weibull_min - - Notes - ----- - The probability density function for `frechet_l` is:: - - frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c) - - for ``x < 0``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - return c*pow(-x,c-1)*exp(-pow(-x,c)) - def _cdf(self, x, c): - return exp(-pow(-x,c)) - def _ppf(self, q, c): - return -pow(-log(q),1.0/c) - def _munp(self, n, c): - val = special.gamma(1.0+n*1.0/c) - if (int(n) % 2): - sgn = -1 - else: - sgn = 1 - return sgn * val - def _entropy(self, c): - return -_EULER / c - log(c) + _EULER + 1 -frechet_l = frechet_l_gen(b=0.0, name='frechet_l', shapes='c') -weibull_max = frechet_l_gen(b=0.0, name='weibull_max', shapes='c') - - -## Generalized Logistic -## -class genlogistic_gen(rv_continuous): - """A generalized logistic continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `genlogistic` is:: - - genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1) - - for ``x > 0``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - Px = c*exp(-x)/(1+exp(-x))**(c+1.0) - return Px - def _logpdf(self, x, c): - return log(c) - x - (c+1.0)*log1p(exp(-x)) - def _cdf(self, x, c): - Cx = (1+exp(-x))**(-c) - return Cx - def _ppf(self, q, c): - vals = -log(pow(q,-1.0/c)-1) - return vals - def _stats(self, c): - zeta = special.zeta - mu = _EULER + special.psi(c) - mu2 = pi*pi/6.0 + zeta(2,c) - g1 = -2*zeta(3,c) + 2*_ZETA3 - g1 /= mu2**1.5 - g2 = pi**4/15.0 + 6*zeta(4,c) - g2 /= mu2**2.0 - return mu, mu2, g1, g2 -genlogistic = genlogistic_gen(name='genlogistic', shapes='c') - - -## Generalized Pareto -class genpareto_gen(rv_continuous): - """A generalized Pareto continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `genpareto` is:: - - genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c) - - for ``c != 0``, and for ``x >= 0`` for all c, - and ``x < 1/abs(c)`` for ``c < 0``. - - %(example)s - - """ - def _argcheck(self, c): - c = arr(c) - self.b = where(c < 0, 1.0/abs(c), inf) - return where(c==0, 0, 1) - def _pdf(self, x, c): - Px = pow(1+c*x,arr(-1.0-1.0/c)) - return Px - def _logpdf(self, x, c): - return (-1.0-1.0/c) * np.log1p(c*x) - def _cdf(self, x, c): - return 1.0 - pow(1+c*x,arr(-1.0/c)) - def _ppf(self, q, c): - vals = 1.0/c * (pow(1-q, -c)-1) - return vals - def _munp(self, n, c): - k = arange(0,n+1) - val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0) - return where(c*n < 1, val, inf) - def _entropy(self, c): - if (c > 0): - return 1+c - else: - self.b = -1.0 / c - return rv_continuous._entropy(self, c) - -genpareto = genpareto_gen(a=0.0, name='genpareto', shapes='c') - - -## Generalized Exponential - -class genexpon_gen(rv_continuous): - """A generalized exponential continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `genexpon` is:: - - genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \ - exp(-a*x - b*x + b/c * (1-exp(-c*x))) - - for ``x >= 0``, ``a,b,c > 0``. - - References - ---------- - "An Extension of Marshall and Olkin's Bivariate Exponential Distribution", - H.K. Ryu, Journal of the American Statistical Association, 1993. - - "The Exponential Distribution: Theory, Methods and Applications", - N. Balakrishnan, Asit P. Basu. - - %(example)s - - """ - def _pdf(self, x, a, b, c): - return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c) - def _cdf(self, x, a, b, c): - return -expm1((-a-b)*x + b*(-expm1(-c*x))/c) - def _logpdf(self, x, a, b, c): - return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c -genexpon = genexpon_gen(a=0.0, name='genexpon', shapes='a, b, c') - - -## Generalized Extreme Value -## c=0 is just gumbel distribution. -## This version does now accept c==0 -## Use gumbel_r for c==0 - -# new version by Per Brodtkorb, see ticket:767 -# also works for c==0, special case is gumbel_r -# increased precision for small c - -class genextreme_gen(rv_continuous): - """A generalized extreme value continuous random variable. - - %(before_notes)s - - See Also - -------- - gumbel_r - - Notes - ----- - For ``c=0``, `genextreme` is equal to `gumbel_r`. - The probability density function for `genextreme` is:: - - genextreme.pdf(x, c) = - exp(-exp(-x))*exp(-x), for c==0 - exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0 - - %(example)s - - """ - def _argcheck(self, c): - min = np.minimum - max = np.maximum - sml = floatinfo.machar.xmin - #self.b = where(c > 0, 1.0 / c,inf) - #self.a = where(c < 0, 1.0 / c, -inf) - self.b = where(c > 0, 1.0 / max(c, sml),inf) - self.a = where(c < 0, 1.0 / min(c,-sml), -inf) - return where(abs(c)==inf, 0, 1) #True #(c!=0) - def _pdf(self, x, c): - ## ex2 = 1-c*x - ## pex2 = pow(ex2,1.0/c) - ## p2 = exp(-pex2)*pex2/ex2 - ## return p2 - cx = c*x - - logex2 = where((c==0)*(x==x),0.0,log1p(-cx)) - logpex2 = where((c==0)*(x==x),-x,logex2/c) - pex2 = exp(logpex2) - # % Handle special cases - logpdf = where((cx==1) | (cx==-inf),-inf,-pex2+logpex2-logex2) - putmask(logpdf,(c==1) & (x==1),0.0) # logpdf(c==1 & x==1) = 0; % 0^0 situation - - return exp(logpdf) - - - def _cdf(self, x, c): - #return exp(-pow(1-c*x,1.0/c)) - loglogcdf = where((c==0)*(x==x),-x,log1p(-c*x)/c) - return exp(-exp(loglogcdf)) - - def _ppf(self, q, c): - #return 1.0/c*(1.-(-log(q))**c) - x = -log(-log(q)) - return where((c==0)*(x==x),x,-expm1(-c*x)/c) - def _stats(self,c): - - g = lambda n : gam(n*c+1) - g1 = g(1) - g2 = g(2) - g3 = g(3); - g4 = g(4) - g2mg12 = where(abs(c)<1e-7,(c*pi)**2.0/6.0,g2-g1**2.0) - gam2k = where(abs(c)<1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0); - eps = 1e-14 - gamk = where(abs(c) -1, vals, inf) -genextreme = genextreme_gen(name='genextreme', shapes='c') - - -## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition) - -## gamma(a, loc, scale) with a an integer is the Erlang distribution -## gamma(1, loc, scale) is the Exponential distribution -## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom. - -class gamma_gen(rv_continuous): - """A gamma continuous random variable. - - %(before_notes)s - - See Also - -------- - erlang, expon - - Notes - ----- - When ``a`` is an integer, this is the Erlang distribution, and for ``a=1`` - it is the exponential distribution. - - The probability density function for `gamma` is:: - - gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a) - - for ``x >= 0``, ``a > 0``. - - %(example)s - - """ - def _rvs(self, a): - return mtrand.standard_gamma(a, self._size) - def _pdf(self, x, a): - return exp(self._logpdf(x, a)) - def _logpdf(self, x, a): - return (a-1)*log(x) - x - gamln(a) - def _cdf(self, x, a): - return special.gammainc(a, x) - def _ppf(self, q, a): - return special.gammaincinv(a,q) - def _stats(self, a): - return a, a, 2.0/sqrt(a), 6.0/a - def _entropy(self, a): - return special.psi(a)*(1-a) + 1 + gamln(a) - def _fitstart(self, data): - a = 4 / _skew(data)**2 - return super(gamma_gen, self)._fitstart(data, args=(a,)) - def fit(self, data, *args, **kwds): - floc = kwds.get('floc', None) - if floc == 0: - xbar = ravel(data).mean() - logx_bar = ravel(log(data)).mean() - s = log(xbar) - logx_bar - def func(a): - return log(a) - special.digamma(a) - s - aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s) - xa = aest*(1-0.4) - xb = aest*(1+0.4) - a = optimize.brentq(func, xa, xb, disp=0) - scale = xbar / a - return a, floc, scale - else: - return super(gamma_gen, self).fit(data, *args, **kwds) -gamma = gamma_gen(a=0.0, name='gamma', shapes='a') - - -# Generalized Gamma -class gengamma_gen(rv_continuous): - """A generalized gamma continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `gengamma` is:: - - gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a) - - for ``x > 0``, ``a > 0``, and ``c != 0``. - - %(example)s - - """ - def _argcheck(self, a, c): - return (a > 0) & (c != 0) - def _pdf(self, x, a, c): - return abs(c)* exp((c*a-1)*log(x)-x**c- gamln(a)) - def _cdf(self, x, a, c): - val = special.gammainc(a,x**c) - cond = c + 0*val - return where(cond>0,val,1-val) - def _ppf(self, q, a, c): - val1 = special.gammaincinv(a,q) - val2 = special.gammaincinv(a,1.0-q) - ic = 1.0/c - cond = c+0*val1 - return where(cond > 0,val1**ic,val2**ic) - def _munp(self, n, a, c): - return special.gamma(a+n*1.0/c) / special.gamma(a) - def _entropy(self, a,c): - val = special.psi(a) - return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c)) -gengamma = gengamma_gen(a=0.0, name='gengamma', shapes="a, c") - - -## Generalized Half-Logistic -## - -class genhalflogistic_gen(rv_continuous): - """A generalized half-logistic continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `genhalflogistic` is:: - - genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2 - - for ``0 <= x <= 1/c``, and ``c > 0``. - - %(example)s - - """ - def _argcheck(self, c): - self.b = 1.0 / c - return (c > 0) - def _pdf(self, x, c): - limit = 1.0/c - tmp = arr(1-c*x) - tmp0 = tmp**(limit-1) - tmp2 = tmp0*tmp - return 2*tmp0 / (1+tmp2)**2 - def _cdf(self, x, c): - limit = 1.0/c - tmp = arr(1-c*x) - tmp2 = tmp**(limit) - return (1.0-tmp2) / (1+tmp2) - def _ppf(self, q, c): - return 1.0/c*(1-((1.0-q)/(1.0+q))**c) - def _entropy(self,c): - return 2 - (2*c+1)*log(2) -genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic', - shapes='c') - - -## Gompertz (Truncated Gumbel) -## Defined for x>=0 - -class gompertz_gen(rv_continuous): - """A Gompertz (or truncated Gumbel) continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `gompertz` is:: - - gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1)) - - for ``x >= 0``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - ex = exp(x) - return c*ex*exp(-c*(ex-1)) - def _cdf(self, x, c): - return 1.0-exp(-c*(exp(x)-1)) - def _ppf(self, q, c): - return log(1-1.0/c*log(1-q)) - def _entropy(self, c): - return 1.0 - log(c) - exp(c)*special.expn(1,c) -gompertz = gompertz_gen(a=0.0, name='gompertz', shapes='c') - - -## Gumbel, Log-Weibull, Fisher-Tippett, Gompertz -## The left-skewed gumbel distribution. -## and right-skewed are available as gumbel_l and gumbel_r - -class gumbel_r_gen(rv_continuous): - """A right-skewed Gumbel continuous random variable. - - %(before_notes)s - - See Also - -------- - gumbel_l, gompertz, genextreme - - Notes - ----- - The probability density function for `gumbel_r` is:: - - gumbel_r.pdf(x) = exp(-(x + exp(-x))) - - The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett - distribution. It is also related to the extreme value distribution, - log-Weibull and Gompertz distributions. - - %(example)s - - """ - def _pdf(self, x): - ex = exp(-x) - return ex*exp(-ex) - def _logpdf(self, x): - return -x - exp(-x) - def _cdf(self, x): - return exp(-exp(-x)) - def _logcdf(self, x): - return -exp(-x) - def _ppf(self, q): - return -log(-log(q)) - def _stats(self): - return _EULER, pi*pi/6.0, \ - 12*sqrt(6)/pi**3 * _ZETA3, 12.0/5 - def _entropy(self): - return 1.0608407169541684911 -gumbel_r = gumbel_r_gen(name='gumbel_r') - - -class gumbel_l_gen(rv_continuous): - """A left-skewed Gumbel continuous random variable. - - %(before_notes)s - - See Also - -------- - gumbel_r, gompertz, genextreme - - Notes - ----- - The probability density function for `gumbel_l` is:: - - gumbel_l.pdf(x) = exp(x - exp(x)) - - The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett - distribution. It is also related to the extreme value distribution, - log-Weibull and Gompertz distributions. - - %(example)s - - """ - def _pdf(self, x): - ex = exp(x) - return ex*exp(-ex) - def _logpdf(self, x): - return x - exp(x) - def _cdf(self, x): - return 1.0-exp(-exp(x)) - def _ppf(self, q): - return log(-log(1-q)) - def _stats(self): - return -_EULER, pi*pi/6.0, \ - -12*sqrt(6)/pi**3 * _ZETA3, 12.0/5 - def _entropy(self): - return 1.0608407169541684911 -gumbel_l = gumbel_l_gen(name='gumbel_l') - - -# Half-Cauchy - -class halfcauchy_gen(rv_continuous): - """A Half-Cauchy continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `halfcauchy` is:: - - halfcauchy.pdf(x) = 2 / (pi * (1 + x**2)) - - for ``x >= 0``. - - %(example)s - - """ - def _pdf(self, x): - return 2.0/pi/(1.0+x*x) - def _logpdf(self, x): - return np.log(2.0/pi) - np.log1p(x*x) - def _cdf(self, x): - return 2.0/pi*arctan(x) - def _ppf(self, q): - return tan(pi/2*q) - def _stats(self): - return inf, inf, nan, nan - def _entropy(self): - return log(2*pi) -halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy') - - -## Half-Logistic -## - -class halflogistic_gen(rv_continuous): - """A half-logistic continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `halflogistic` is:: - - halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2 - - for ``x >= 0``. - - %(example)s - - """ - def _pdf(self, x): - return 0.5/(cosh(x/2.0))**2.0 - def _cdf(self, x): - return tanh(x/2.0) - def _ppf(self, q): - return 2*arctanh(q) - def _munp(self, n): - if n==1: return 2*log(2) - if n==2: return pi*pi/3.0 - if n==3: return 9*_ZETA3 - if n==4: return 7*pi**4 / 15.0 - return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1) - def _entropy(self): - return 2-log(2) -halflogistic = halflogistic_gen(a=0.0, name='halflogistic') - - -## Half-normal = chi(1, loc, scale) - -class halfnorm_gen(rv_continuous): - """A half-normal continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `halfnorm` is:: - - halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2) - - for ``x > 0``. - - %(example)s - - """ - def _rvs(self): - return abs(norm.rvs(size=self._size)) - def _pdf(self, x): - return sqrt(2.0/pi)*exp(-x*x/2.0) - def _logpdf(self, x): - return 0.5 * np.log(2.0/pi) - x*x/2.0 - def _cdf(self, x): - return special.ndtr(x)*2-1.0 - def _ppf(self, q): - return special.ndtri((1+q)/2.0) - def _stats(self): - return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \ - 8*(pi-3)/(pi-2)**2 - def _entropy(self): - return 0.5*log(pi/2.0)+0.5 -halfnorm = halfnorm_gen(a=0.0, name='halfnorm') - - -## Hyperbolic Secant - -class hypsecant_gen(rv_continuous): - """A hyperbolic secant continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `hypsecant` is:: - - hypsecant.pdf(x) = 1/pi * sech(x) - - %(example)s - - """ - def _pdf(self, x): - return 1.0/(pi*cosh(x)) - def _cdf(self, x): - return 2.0/pi*arctan(exp(x)) - def _ppf(self, q): - return log(tan(pi*q/2.0)) - def _stats(self): - return 0, pi*pi/4, 0, 2 - def _entropy(self): - return log(2*pi) -hypsecant = hypsecant_gen(name='hypsecant') - - -## Gauss Hypergeometric - -class gausshyper_gen(rv_continuous): - """A Gauss hypergeometric continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `gausshyper` is:: - - gausshyper.pdf(x, a, b, c, z) = - C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c) - - for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and - ``C = 1 / (B(a,b) F[2,1](c, a; a+b; -z))`` - - %(example)s - - """ - def _argcheck(self, a, b, c, z): - return (a > 0) & (b > 0) & (c==c) & (z==z) - def _pdf(self, x, a, b, c, z): - Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z) - return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c - def _munp(self, n, a, b, c, z): - fac = special.beta(n+a,b) / special.beta(a,b) - num = special.hyp2f1(c,a+n,a+b+n,-z) - den = special.hyp2f1(c,a,a+b,-z) - return fac*num / den -gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper', - shapes="a, b, c, z") - - -## Inverted Gamma -# special case of generalized gamma with c=-1 -# - -class invgamma_gen(rv_continuous): - """An inverted gamma continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `invgamma` is:: - - invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x) - - for x > 0, a > 0. - - %(example)s - - """ - def _pdf(self, x, a): - return exp(self._logpdf(x,a)) - def _logpdf(self, x, a): - return (-(a+1)*log(x)-gamln(a) - 1.0/x) - def _cdf(self, x, a): - return 1.0-special.gammainc(a, 1.0/x) - def _ppf(self, q, a): - return 1.0/special.gammaincinv(a,1-q) - def _munp(self, n, a): - return exp(gamln(a-n) - gamln(a)) - def _entropy(self, a): - return a - (a+1.0)*special.psi(a) + gamln(a) -invgamma = invgamma_gen(a=0.0, name='invgamma', shapes='a') - - -## Inverse Gaussian Distribution (used to be called 'invnorm' -# scale is gamma from DATAPLOT and B from Regress - -class invgauss_gen(rv_continuous): - """An inverse Gaussian continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `invgauss` is:: - - invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2)) - - for ``x > 0``. - - When `mu` is too small, evaluating the cumulative density function will be - inaccurate due to ``cdf(mu -> 0) = inf * 0``. - NaNs are returned for ``mu <= 0.0028``. - - %(example)s - - """ - def _rvs(self, mu): - return mtrand.wald(mu, 1.0, size=self._size) - def _pdf(self, x, mu): - return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2) - def _logpdf(self, x, mu): - return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x) - def _cdf(self, x, mu): - fac = sqrt(1.0/x) - # Numerical accuracy for small `mu` is bad. See #869. - C1 = norm.cdf(fac*(x-mu)/mu) - C1 += exp(1.0/mu) * norm.cdf(-fac*(x+mu)/mu) * exp(1.0/mu) - return C1 - def _stats(self, mu): - return mu, mu**3.0, 3*sqrt(mu), 15*mu -invgauss = invgauss_gen(a=0.0, name='invgauss', shapes="mu") - - -## Inverted Weibull - -class invweibull_gen(rv_continuous): - """An inverted Weibull continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `invweibull` is:: - - invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c)) - - for ``x > 0``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - xc1 = x**(-c-1.0) - #xc2 = xc1*x - xc2 = x**(-c) - xc2 = exp(-xc2) - return c*xc1*xc2 - def _cdf(self, x, c): - xc1 = x**(-c) - return exp(-xc1) - def _ppf(self, q, c): - return pow(-log(q),arr(-1.0/c)) - def _entropy(self, c): - return 1+_EULER + _EULER / c - log(c) -invweibull = invweibull_gen(a=0, name='invweibull', shapes='c') - - -## Johnson SB - -class johnsonsb_gen(rv_continuous): - """A Johnson SB continuous random variable. - - %(before_notes)s - - See Also - -------- - johnsonsu - - Notes - ----- - The probability density function for `johnsonsb` is:: - - johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x))) - - for ``0 < x < 1`` and ``a,b > 0``, and ``phi`` is the normal pdf. - - %(example)s - - """ - def _argcheck(self, a, b): - return (b > 0) & (a==a) - def _pdf(self, x, a, b): - trm = norm.pdf(a+b*log(x/(1.0-x))) - return b*1.0/(x*(1-x))*trm - def _cdf(self, x, a, b): - return norm.cdf(a+b*log(x/(1.0-x))) - def _ppf(self, q, a, b): - return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a))) -johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonb', shapes="a, b") - - -## Johnson SU -class johnsonsu_gen(rv_continuous): - """A Johnson SU continuous random variable. - - %(before_notes)s - - See Also - -------- - johnsonsb - - Notes - ----- - The probability density function for `johnsonsu` is:: - - johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) * - phi(a + b * log(x + sqrt(x**2 + 1))) - - for all ``x, a, b > 0``, and `phi` is the normal pdf. - - %(example)s - - """ - def _argcheck(self, a, b): - return (b > 0) & (a==a) - def _pdf(self, x, a, b): - x2 = x*x - trm = norm.pdf(a+b*log(x+sqrt(x2+1))) - return b*1.0/sqrt(x2+1.0)*trm - def _cdf(self, x, a, b): - return norm.cdf(a+b*log(x+sqrt(x*x+1))) - def _ppf(self, q, a, b): - return sinh((norm.ppf(q)-a)/b) -johnsonsu = johnsonsu_gen(name='johnsonsu', shapes="a, b") - - -## Laplace Distribution - -class laplace_gen(rv_continuous): - """A Laplace continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `laplace` is:: - - laplace.pdf(x) = 1/2 * exp(-abs(x)) - - %(example)s - - """ - def _rvs(self): - return mtrand.laplace(0, 1, size=self._size) - def _pdf(self, x): - return 0.5*exp(-abs(x)) - def _cdf(self, x): - return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x)) - def _ppf(self, q): - return where(q > 0.5, -log(2*(1-q)), log(2*q)) - def _stats(self): - return 0, 2, 0, 3 - def _entropy(self): - return log(2)+1 -laplace = laplace_gen(name='laplace') - - -## Levy Distribution - -class levy_gen(rv_continuous): - """A Levy continuous random variable. - - %(before_notes)s - - See Also - -------- - levy_stable, levy_l - - Notes - ----- - The probability density function for `levy` is:: - - levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x)) - - for ``x > 0``. - - This is the same as the Levy-stable distribution with a=1/2 and b=1. - - %(example)s - - """ - def _pdf(self, x): - return 1/sqrt(2*pi*x)/x*exp(-1/(2*x)) - def _cdf(self, x): - return 2*(1-norm._cdf(1/sqrt(x))) - def _ppf(self, q): - val = norm._ppf(1-q/2.0) - return 1.0/(val*val) - def _stats(self): - return inf, inf, nan, nan -levy = levy_gen(a=0.0,name="levy") - - -## Left-skewed Levy Distribution - -class levy_l_gen(rv_continuous): - """A left-skewed Levy continuous random variable. - - %(before_notes)s - - See Also - -------- - levy, levy_stable - - Notes - ----- - The probability density function for `levy_l` is:: - - levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x))) - - for ``x < 0``. - - This is the same as the Levy-stable distribution with a=1/2 and b=-1. - - %(example)s - - """ - def _pdf(self, x): - ax = abs(x) - return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax)) - def _cdf(self, x): - ax = abs(x) - return 2*norm._cdf(1/sqrt(ax))-1 - def _ppf(self, q): - val = norm._ppf((q+1.0)/2) - return -1.0/(val*val) - def _stats(self): - return inf, inf, nan, nan -levy_l = levy_l_gen(b=0.0, name="levy_l") - - -## Levy-stable Distribution (only random variates) - -class levy_stable_gen(rv_continuous): - """A Levy-stable continuous random variable. - - %(before_notes)s - - See Also - -------- - levy, levy_l - - Notes - ----- - Levy-stable distribution (only random variates available -- ignore other - docs) - - %(example)s - - """ - def _rvs(self, alpha, beta): - sz = self._size - TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz) - W = expon.rvs(size=sz) - if alpha==1: - return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH)) - # else - ialpha = 1.0/alpha - aTH = alpha*TH - if beta==0: - return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha - # else - val0 = beta*tan(pi*alpha/2) - th0 = arctan(val0)/alpha - val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH)) - res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha - return res3 - - def _argcheck(self, alpha, beta): - if beta == -1: - self.b = 0.0 - elif beta == 1: - self.a = 0.0 - return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1) - - def _pdf(self, x, alpha, beta): - raise NotImplementedError - -levy_stable = levy_stable_gen(name='levy_stable', shapes="alpha, beta") - - -## Logistic (special case of generalized logistic with c=1) -## Sech-squared - -class logistic_gen(rv_continuous): - """A logistic continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `logistic` is:: - - logistic.pdf(x) = exp(-x) / (1+exp(-x))**2 - - %(example)s - - """ - def _rvs(self): - return mtrand.logistic(size=self._size) - def _pdf(self, x): - ex = exp(-x) - return ex / (1+ex)**2.0 - def _cdf(self, x): - return 1.0/(1+exp(-x)) - def _ppf(self, q): - return -log(1.0/q-1) - def _stats(self): - return 0, pi*pi/3.0, 0, 6.0/5.0 - def _entropy(self): - return 1.0 -logistic = logistic_gen(name='logistic') - - -## Log Gamma -# -class loggamma_gen(rv_continuous): - """A log gamma continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `loggamma` is:: - - loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c) - - for all ``x, c > 0``. - - %(example)s - - """ - def _rvs(self, c): - return log(mtrand.gamma(c, size=self._size)) - def _pdf(self, x, c): - return exp(c*x-exp(x)-gamln(c)) - def _cdf(self, x, c): - return special.gammainc(c, exp(x)) - def _ppf(self, q, c): - return log(special.gammaincinv(c,q)) - def _munp(self,n,*args): - # use generic moment calculation using ppf - return self._mom0_sc(n,*args) -loggamma = loggamma_gen(name='loggamma', shapes='c') - - -## Log-Laplace (Log Double Exponential) -## -class loglaplace_gen(rv_continuous): - """A log-Laplace continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `loglaplace` is:: - - loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1 - = c / 2 * x**(-c-1), for x >= 1 - - for ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - cd2 = c/2.0 - c = where(x < 1, c, -c) - return cd2*x**(c-1) - def _cdf(self, x, c): - return where(x < 1, 0.5*x**c, 1-0.5*x**(-c)) - def _ppf(self, q, c): - return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c)) - def _entropy(self, c): - return log(2.0/c) + 1.0 -loglaplace = loglaplace_gen(a=0.0, name='loglaplace', shapes='c') - - -## Lognormal (Cobb-Douglass) -## std is a shape parameter and is the variance of the underlying -## distribution. -## the mean of the underlying distribution is log(scale) - -class lognorm_gen(rv_continuous): - """A lognormal continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `lognorm` is:: - - lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2) - - for ``x > 0``, ``s > 0``. - - If log x is normally distributed with mean mu and variance sigma**2, - then x is log-normally distributed with shape paramter sigma and scale - parameter exp(mu). - - %(example)s - - """ - def _rvs(self, s): - return exp(s * norm.rvs(size=self._size)) - def _pdf(self, x, s): - Px = exp(-log(x)**2 / (2*s**2)) - return Px / (s*x*sqrt(2*pi)) - def _cdf(self, x, s): - return norm.cdf(log(x)/s) - def _ppf(self, q, s): - return exp(s*norm._ppf(q)) - def _stats(self, s): - p = exp(s*s) - mu = sqrt(p) - mu2 = p*(p-1) - g1 = sqrt((p-1))*(2+p) - g2 = numpy.polyval([1,2,3,0,-6.0],p) - return mu, mu2, g1, g2 - def _entropy(self, s): - return 0.5*(1+log(2*pi)+2*log(s)) -lognorm = lognorm_gen(a=0.0, name='lognorm', shapes='s') - - -# Gibrat's distribution is just lognormal with s=1 - -class gilbrat_gen(lognorm_gen): - """A Gilbrat continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `gilbrat` is:: - - gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2) - - %(example)s - - """ - def _rvs(self): - return lognorm_gen._rvs(self, 1.0) - def _pdf(self, x): - return lognorm_gen._pdf(self, x, 1.0) - def _cdf(self, x): - return lognorm_gen._cdf(self, x, 1.0) - def _ppf(self, q): - return lognorm_gen._ppf(self, q, 1.0) - def _stats(self): - return lognorm_gen._stats(self, 1.0) - def _entropy(self): - return 0.5*log(2*pi) + 0.5 -gilbrat = gilbrat_gen(a=0.0, name='gilbrat') - - -# MAXWELL - -class maxwell_gen(rv_continuous): - """A Maxwell continuous random variable. - - %(before_notes)s - - Notes - ----- - A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``, - and given ``scale = 1.0 / sqrt(a)``, where a is the parameter used in - the Mathworld description [1]_. - - The probability density function for `maxwell` is:: - - maxwell.pdf(x, a) = sqrt(2/pi)x**2 * exp(-x**2/2) - - for ``x > 0``. - - References - ---------- - .. [1] http://mathworld.wolfram.com/MaxwellDistribution.html - - %(example)s - """ - def _rvs(self): - return chi.rvs(3.0,size=self._size) - def _pdf(self, x): - return sqrt(2.0/pi)*x*x*exp(-x*x/2.0) - def _cdf(self, x): - return special.gammainc(1.5,x*x/2.0) - def _ppf(self, q): - return sqrt(2*special.gammaincinv(1.5,q)) - def _stats(self): - val = 3*pi-8 - return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \ - (-12*pi*pi + 160*pi - 384) / val**2.0 - def _entropy(self): - return _EULER + 0.5*log(2*pi)-0.5 -maxwell = maxwell_gen(a=0.0, name='maxwell') - - -# Mielke's Beta-Kappa - -class mielke_gen(rv_continuous): - """A Mielke's Beta-Kappa continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `mielke` is:: - - mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s) - - for ``x > 0``. - - %(example)s - - """ - def _pdf(self, x, k, s): - return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s) - def _cdf(self, x, k, s): - return x**k / (1.0+x**s)**(k*1.0/s) - def _ppf(self, q, k, s): - qsk = pow(q,s*1.0/k) - return pow(qsk/(1.0-qsk),1.0/s) -mielke = mielke_gen(a=0.0, name='mielke', shapes="k, s") - - -# Nakagami (cf Chi) - -class nakagami_gen(rv_continuous): - """A Nakagami continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `nakagami` is:: - - nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) * - x**(2*nu-1) * exp(-nu*x**2) - - for ``x > 0``, ``nu > 0``. - - %(example)s - - """ - def _pdf(self, x, nu): - return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x) - def _cdf(self, x, nu): - return special.gammainc(nu,nu*x*x) - def _ppf(self, q, nu): - return sqrt(1.0/nu*special.gammaincinv(nu,q)) - def _stats(self, nu): - mu = gam(nu+0.5)/gam(nu)/sqrt(nu) - mu2 = 1.0-mu*mu - g1 = mu*(1-4*nu*mu2)/2.0/nu/mu2**1.5 - g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1 - g2 /= nu*mu2**2.0 - return mu, mu2, g1, g2 -nakagami = nakagami_gen(a=0.0, name="nakagami", shapes='nu') - - -# Non-central chi-squared -# nc is lambda of definition, df is nu - -class ncx2_gen(rv_continuous): - """A non-central chi-squared continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `ncx2` is:: - - ncx2.pdf(x, df, nc) = exp(-(nc+df)/2) * 1/2 * (x/nc)**((df-2)/4) - * I[(df-2)/2](sqrt(nc*x)) - - for ``x > 0``. - - %(example)s - - """ - def _rvs(self, df, nc): - return mtrand.noncentral_chisquare(df,nc,self._size) - def _logpdf(self, x, df, nc): - a = arr(df/2.0) - fac = -nc/2.0 - x/2.0 + (a-1)*np.log(x) - a*np.log(2) - special.gammaln(a) - return fac + np.nan_to_num(np.log(special.hyp0f1(a, nc * x/4.0))) - def _pdf(self, x, df, nc): - return np.exp(self._logpdf(x, df, nc)) - def _cdf(self, x, df, nc): - return special.chndtr(x,df,nc) - def _ppf(self, q, df, nc): - return special.chndtrix(q,df,nc) - def _stats(self, df, nc): - val = df + 2.0*nc - return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \ - 12.0*(val+2*nc)/val**2.0 -ncx2 = ncx2_gen(a=0.0, name='ncx2', shapes="df, nc") - - -# Non-central F - -class ncf_gen(rv_continuous): - """A non-central F distribution continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `ncf` is:: - - ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) - * df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) - * (df2+df1*x)**(-(df1+df2)/2) - * gamma(df1/2)*gamma(1+df2/2) - * L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) - / (B(v1/2, v2/2) * gamma((v1+v2)/2)) - - for ``df1, df2, nc > 0``. - - %(example)s - - """ - def _rvs(self, dfn, dfd, nc): - return mtrand.noncentral_f(dfn,dfd,nc,self._size) - def _pdf_skip(self, x, dfn, dfd, nc): - n1,n2 = dfn, dfd - term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.) - term -= gamln((n1+n2)/2.0) - Px = exp(term) - Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1) - Px *= (n2+n1*x)**(-(n1+n2)/2) - Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1) - Px /= special.beta(n1/2,n2/2) - #this function does not have a return - # drop it for now, the generic function seems to work ok - def _cdf(self, x, dfn, dfd, nc): - return special.ncfdtr(dfn,dfd,nc,x) - def _ppf(self, q, dfn, dfd, nc): - return special.ncfdtri(dfn, dfd, nc, q) - def _munp(self, n, dfn, dfd, nc): - val = (dfn *1.0/dfd)**n - term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5) - val *= exp(-nc / 2.0+term) - val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc) - return val - def _stats(self, dfn, dfd, nc): - mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn)) - mu2 = where(dfd <=4, inf, 2*(dfd*1.0/dfn)**2.0 * \ - ((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / \ - ((dfd-2.0)**2.0 * (dfd-4.0))) - return mu, mu2, None, None -ncf = ncf_gen(a=0.0, name='ncf', shapes="dfn, dfd, nc") - - -## Student t distribution - -class t_gen(rv_continuous): - """A Student's T continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `t` is:: - - gamma((df+1)/2) - t.pdf(x, df) = --------------------------------------------------- - sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2) - - for ``df > 0``. - - %(example)s - - """ - def _rvs(self, df): - return mtrand.standard_t(df, size=self._size) - #Y = f.rvs(df, df, size=self._size) - #sY = sqrt(Y) - #return 0.5*sqrt(df)*(sY-1.0/sY) - def _pdf(self, x, df): - r = arr(df*1.0) - Px = exp(gamln((r+1)/2)-gamln(r/2)) - Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2) - return Px - def _logpdf(self, x, df): - r = df*1.0 - lPx = gamln((r+1)/2)-gamln(r/2) - lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r) - return lPx - def _cdf(self, x, df): - return special.stdtr(df, x) - def _sf(self, x, df): - return special.stdtr(df, -x) - def _ppf(self, q, df): - return special.stdtrit(df, q) - def _isf(self, q, df): - return -special.stdtrit(df, q) - def _stats(self, df): - mu2 = where(df > 2, df / (df-2.0), inf) - g1 = where(df > 3, 0.0, nan) - g2 = where(df > 4, 6.0/(df-4.0), nan) - return 0, mu2, g1, g2 -t = t_gen(name='t', shapes="df") - - -## Non-central T distribution - -class nct_gen(rv_continuous): - """A non-central Student's T continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `nct` is:: - - df**(df/2) * gamma(df+1) - nct.pdf(x, df, nc) = ---------------------------------------------------- - 2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2) - - for ``df > 0``, ``nc > 0``. - - %(example)s - - """ - def _rvs(self, df, nc): - return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size)) - def _pdf(self, x, df, nc): - n = df*1.0 - nc = nc*1.0 - x2 = x*x - ncx2 = nc*nc*x2 - fac1 = n + x2 - trm1 = n/2.*log(n) + gamln(n+1) - trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.) - Px = exp(trm1) - valF = ncx2 / (2*fac1) - trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF) - trm1 /= arr(fac1*special.gamma((n+1)/2)) - trm2 = special.hyp1f1((n+1)/2,0.5,valF) - trm2 /= arr(sqrt(fac1)*special.gamma(n/2+1)) - Px *= trm1+trm2 - return Px - def _cdf(self, x, df, nc): - return special.nctdtr(df, nc, x) - def _ppf(self, q, df, nc): - return special.nctdtrit(df, nc, q) - def _stats(self, df, nc, moments='mv'): - mu, mu2, g1, g2 = None, None, None, None - val1 = gam((df-1.0)/2.0) - val2 = gam(df/2.0) - if 'm' in moments: - mu = nc*sqrt(df/2.0)*val1/val2 - if 'v' in moments: - var = (nc*nc+1.0)*df/(df-2.0) - var -= nc*nc*df* val1**2 / 2.0 / val2**2 - mu2 = var - if 's' in moments: - g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2 \ - -nc*nc*(df-2)*(df-3)*val1**2) - g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) - \ - nc*nc*df*(val1/val2)**2) * val2 * \ - (nc*nc*(df-2)*val1**2 - \ - 2*(nc*nc+1)*val2**2) - g1 = g1n/g1d - if 'k' in moments: - g2n = 2*(-3*nc**4*(df-2)**2 *(df-3) *(df-4)*val1**4 + \ - 2**(6-2*df) * nc*nc*(df-2)*(df-4)* \ - (nc*nc*(2*df-7)-3)*pi* gam(df+1)**2 - \ - 4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4) - g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 - \ - 2*(nc*nc+1)*val2)**2 - g2 = g2n / g2d - return mu, mu2, g1, g2 -nct = nct_gen(name="nct", shapes="df, nc") - - -# Pareto - -class pareto_gen(rv_continuous): - """A Pareto continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `pareto` is:: - - pareto.pdf(x, b) = b / x**(b+1) - - for ``x >= 1``, ``b > 0``. - - %(example)s - - """ - def _pdf(self, x, b): - return b * x**(-b-1) - def _cdf(self, x, b): - return 1 - x**(-b) - def _ppf(self, q, b): - return pow(1-q, -1.0/b) - def _stats(self, b, moments='mv'): - mu, mu2, g1, g2 = None, None, None, None - if 'm' in moments: - mask = b > 1 - bt = extract(mask,b) - mu = valarray(shape(b),value=inf) - place(mu, mask, bt / (bt-1.0)) - if 'v' in moments: - mask = b > 2 - bt = extract( mask,b) - mu2 = valarray(shape(b), value=inf) - place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2) - if 's' in moments: - mask = b > 3 - bt = extract( mask,b) - g1 = valarray(shape(b), value=nan) - vals = 2*(bt+1.0)*sqrt(b-2.0)/((b-3.0)*sqrt(b)) - place(g1, mask, vals) - if 'k' in moments: - mask = b > 4 - bt = extract( mask,b) - g2 = valarray(shape(b), value=nan) - vals = 6.0*polyval([1.0,1.0,-6,-2],bt)/ \ - polyval([1.0,-7.0,12.0,0.0],bt) - place(g2, mask, vals) - return mu, mu2, g1, g2 - def _entropy(self, c): - return 1 + 1.0/c - log(c) -pareto = pareto_gen(a=1.0, name="pareto", shapes="b") - - -# LOMAX (Pareto of the second kind.) - -class lomax_gen(rv_continuous): - """A Lomax (Pareto of the second kind) continuous random variable. - - %(before_notes)s - - Notes - ----- - The Lomax distribution is a special case of the Pareto distribution, with - (loc=-1.0). - - The probability density function for `lomax` is:: - - lomax.pdf(x, c) = c / (1+x)**(c+1) - - for ``x >= 0``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - return c*1.0/(1.0+x)**(c+1.0) - def _logpdf(self, x, c): - return log(c) - (c+1)*log(1+x) - def _cdf(self, x, c): - return 1.0-1.0/(1.0+x)**c - def _sf(self, x, c): - return 1.0/(1.0+x)**c - def _logsf(self, x, c): - return -c*log(1+x) - def _ppf(self, q, c): - return pow(1.0-q,-1.0/c)-1 - def _stats(self, c): - mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk') - return mu, mu2, g1, g2 - def _entropy(self, c): - return 1+1.0/c-log(c) -lomax = lomax_gen(a=0.0, name="lomax", shapes="c") - - -## Power-function distribution -## Special case of beta dist. with d =1.0 - -class powerlaw_gen(rv_continuous): - """A power-function continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `powerlaw` is:: - - powerlaw.pdf(x, a) = a * x**(a-1) - - for ``0 <= x <= 1``, ``a > 0``. - - %(example)s - - """ - def _pdf(self, x, a): - return a*x**(a-1.0) - def _logpdf(self, x, a): - return log(a) + (a-1)*log(x) - def _cdf(self, x, a): - return x**(a*1.0) - def _logcdf(self, x, a): - return a*log(x) - def _ppf(self, q, a): - return pow(q, 1.0/a) - def _stats(self, a): - return a/(a+1.0), a*(a+2.0)/(a+1.0)**2, \ - 2*(1.0-a)*sqrt((a+2.0)/(a*(a+3.0))), \ - 6*polyval([1,-1,-6,2],a)/(a*(a+3.0)*(a+4)) - def _entropy(self, a): - return 1 - 1.0/a - log(a) -powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw", shapes="a") - - -# Power log normal - -class powerlognorm_gen(rv_continuous): - """A power log-normal continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `powerlognorm` is:: - - powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) * - (Phi(-log(x)/s))**(c-1), - - where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf, - and ``x > 0``, ``s, c > 0``. - - %(example)s - - """ - def _pdf(self, x, c, s): - return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0) - - def _cdf(self, x, c, s): - return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0) - def _ppf(self, q, c, s): - return exp(-s*norm.ppf(pow(1.0-q,1.0/c))) -powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm", shapes="c, s") - - -# Power Normal - -class powernorm_gen(rv_continuous): - """A power normal continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `powernorm` is:: - - powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1) - - where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf, - and ``x > 0``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - return c*_norm_pdf(x)* \ - (_norm_cdf(-x)**(c-1.0)) - def _logpdf(self, x, c): - return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x) - def _cdf(self, x, c): - return 1.0-_norm_cdf(-x)**(c*1.0) - def _ppf(self, q, c): - return -norm.ppf(pow(1.0-q,1.0/c)) -powernorm = powernorm_gen(name='powernorm', shapes="c") - - -# R-distribution ( a general-purpose distribution with a -# variety of shapes. - -# FIXME: PPF does not work. -class rdist_gen(rv_continuous): - """An R-distributed continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `rdist` is:: - - rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2) - - for ``-1 <= x <= 1``, ``c > 0``. - - %(example)s - - """ - def _pdf(self, x, c): - return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0) - def _cdf_skip(self, x, c): - #error inspecial.hyp2f1 for some values see tickets 758, 759 - return 0.5 + x/special.beta(0.5,c/2.0)* \ - special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x) - def _munp(self, n, c): - return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0) -rdist = rdist_gen(a=-1.0, b=1.0, name="rdist", shapes="c") - - -# Rayleigh distribution (this is chi with df=2 and loc=0.0) -# scale is the mode. - -class rayleigh_gen(rv_continuous): - """A Rayleigh continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `rayleigh` is:: - - rayleigh.pdf(r) = r * exp(-r**2/2) - - for ``x >= 0``. - - %(example)s - - """ - def _rvs(self): - return chi.rvs(2,size=self._size) - def _pdf(self, r): - return r*exp(-r*r/2.0) - def _cdf(self, r): - return 1.0-exp(-r*r/2.0) - def _ppf(self, q): - return sqrt(-2*log(1-q)) - def _stats(self): - val = 4-pi - return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \ - 6*pi/val-16/val**2 - def _entropy(self): - return _EULER/2.0 + 1 - 0.5*log(2) -rayleigh = rayleigh_gen(a=0.0, name="rayleigh") - - -# Reciprocal Distribution -class reciprocal_gen(rv_continuous): - """A reciprocal continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `reciprocal` is:: - - reciprocal.pdf(x, a, b) = 1 / (x*log(b/a)) - - for ``a <= x <= b``, ``a, b > 0``. - - %(example)s - - """ - def _argcheck(self, a, b): - self.a = a - self.b = b - self.d = log(b*1.0 / a) - return (a > 0) & (b > 0) & (b > a) - def _pdf(self, x, a, b): - # argcheck should be called before _pdf - return 1.0/(x*self.d) - def _logpdf(self, x, a, b): - return -log(x) - log(self.d) - def _cdf(self, x, a, b): - return (log(x)-log(a)) / self.d - def _ppf(self, q, a, b): - return a*pow(b*1.0/a,q) - def _munp(self, n, a, b): - return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n)) - def _entropy(self,a,b): - return 0.5*log(a*b)+log(log(b/a)) -reciprocal = reciprocal_gen(name="reciprocal", shapes="a, b") - - -# Rice distribution - -# FIXME: PPF does not work. -class rice_gen(rv_continuous): - """A Rice continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `rice` is:: - - rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b) - - for ``x > 0``, ``b > 0``. - - %(example)s - - """ - def _pdf(self, x, b): - return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b) - def _logpdf(self, x, b): - return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b)) - def _munp(self, n, b): - nd2 = n/2.0 - n1 = 1+nd2 - b2 = b*b/2.0 - return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \ - special.hyp1f1(n1,1,b2) -rice = rice_gen(a=0.0, name="rice", shapes="b") - - -# Reciprocal Inverse Gaussian - -# FIXME: PPF does not work. -class recipinvgauss_gen(rv_continuous): - """A reciprocal inverse Gaussian continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `recipinvgauss` is:: - - recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2)) - - for ``x >= 0``. - - %(example)s - - """ - def _rvs(self, mu): #added, taken from invgauss - return 1.0/mtrand.wald(mu, 1.0, size=self._size) - def _pdf(self, x, mu): - return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0)) - def _logpdf(self, x, mu): - return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x) - def _cdf(self, x, mu): - trm1 = 1.0/mu - x - trm2 = 1.0/mu + x - isqx = 1.0/sqrt(x) - return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2) - # xb=50 or something large is necessary for stats to converge without exception -recipinvgauss = recipinvgauss_gen(a=0.0, xb=50, name='recipinvgauss', - shapes="mu") - -# Semicircular - -class semicircular_gen(rv_continuous): - """A semicircular continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `semicircular` is:: - - semicircular.pdf(x) = 2/pi * sqrt(1-x**2) - - for ``-1 <= x <= 1``. - - %(example)s - - """ - def _pdf(self, x): - return 2.0/pi*sqrt(1-x*x) - def _cdf(self, x): - return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x)) - def _stats(self): - return 0, 0.25, 0, -1.0 - def _entropy(self): - return 0.64472988584940017414 -semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular") - - -# Triangular - -class triang_gen(rv_continuous): - """A triangular continuous random variable. - - %(before_notes)s - - Notes - ----- - The triangular distribution can be represented with an up-sloping line from - ``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)`` - to ``(loc+scale)``. - - The standard form is in the range [0, 1] with c the mode. - The location parameter shifts the start to `loc`. - The scale parameter changes the width from 1 to `scale`. - - %(example)s - - """ - def _rvs(self, c): - return mtrand.triangular(0, c, 1, self._size) - def _argcheck(self, c): - return (c >= 0) & (c <= 1) - def _pdf(self, x, c): - return where(x < c, 2*x/c, 2*(1-x)/(1-c)) - def _cdf(self, x, c): - return where(x < c, x*x/c, (x*x-2*x+c)/(c-1)) - def _ppf(self, q, c): - return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q))) - def _stats(self, c): - return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \ - (5*(1.0-c+c*c)**1.5), -3.0/5.0 - def _entropy(self,c): - return 0.5-log(2) -triang = triang_gen(a=0.0, b=1.0, name="triang", shapes="c") - - -# Truncated Exponential - -class truncexpon_gen(rv_continuous): - """A truncated exponential continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `truncexpon` is:: - - truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b)) - - for ``0 < x < b``. - - %(example)s - - """ - def _argcheck(self, b): - self.b = b - return (b > 0) - def _pdf(self, x, b): - return exp(-x)/(1-exp(-b)) - def _logpdf(self, x, b): - return -x - log(1-exp(-b)) - def _cdf(self, x, b): - return (1.0-exp(-x))/(1-exp(-b)) - def _ppf(self, q, b): - return -log(1-q+q*exp(-b)) - def _munp(self, n, b): - #wrong answer with formula, same as in continuous.pdf - #return gam(n+1)-special.gammainc(1+n,b) - if n == 1: - return (1-(b+1)*exp(-b))/(-expm1(-b)) - elif n == 2: - return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b)) - else: - #return generic for higher moments - #return rv_continuous._mom1_sc(self,n, b) - return self._mom1_sc(n, b) - def _entropy(self, b): - eB = exp(b) - return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB) -truncexpon = truncexpon_gen(a=0.0, name='truncexpon', shapes="b") - - -# Truncated Normal - -class truncnorm_gen(rv_continuous): - """A truncated normal continuous random variable. - - %(before_notes)s - - Notes - ----- - The standard form of this distribution is a standard normal truncated to - the range [a,b] --- notice that a and b are defined over the domain of the - standard normal. To convert clip values for a specific mean and standard - deviation, use:: - - a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std - - %(example)s - - """ - def _argcheck(self, a, b): - self.a = a - self.b = b - self._nb = _norm_cdf(b) - self._na = _norm_cdf(a) - self._delta = self._nb - self._na - self._logdelta = log(self._delta) - return (a != b) - # All of these assume that _argcheck is called first - # and no other thread calls _pdf before. - def _pdf(self, x, a, b): - return _norm_pdf(x) / self._delta - def _logpdf(self, x, a, b): - return _norm_logpdf(x) - self._logdelta - def _cdf(self, x, a, b): - return (_norm_cdf(x) - self._na) / self._delta - def _ppf(self, q, a, b): - return norm._ppf(q*self._nb + self._na*(1.0-q)) - def _stats(self, a, b): - nA, nB = self._na, self._nb - d = nB - nA - pA, pB = _norm_pdf(a), _norm_pdf(b) - mu = (pA - pB) / d #correction sign - mu2 = 1 + (a*pA - b*pB) / d - mu*mu - return mu, mu2, None, None -truncnorm = truncnorm_gen(name='truncnorm', shapes="a, b") - - -# Tukey-Lambda - -# FIXME: RVS does not work. -class tukeylambda_gen(rv_continuous): - """A Tukey-Lamdba continuous random variable. - - %(before_notes)s - - Notes - ----- - A flexible distribution, able to represent and interpolate between the - following distributions: - - - Cauchy (lam=-1) - - logistic (lam=0.0) - - approx Normal (lam=0.14) - - u-shape (lam = 0.5) - - uniform from -1 to 1 (lam = 1) - - %(example)s - - """ - def _argcheck(self, lam): - # lam in RR. - return np.ones(np.shape(lam), dtype=bool) - def _pdf(self, x, lam): - Fx = arr(special.tklmbda(x,lam)) - Px = Fx**(lam-1.0) + (arr(1-Fx))**(lam-1.0) - Px = 1.0/arr(Px) - return where((lam <= 0) | (abs(x) < 1.0/arr(lam)), Px, 0.0) - def _cdf(self, x, lam): - return special.tklmbda(x, lam) - def _ppf(self, q, lam): - q = q*1.0 - vals1 = (q**lam - (1-q)**lam)/lam - vals2 = log(q/(1-q)) - return where((lam == 0)&(q==q), vals2, vals1) - def _stats(self, lam): - mu2 = 2*gam(lam+1.5)-lam*pow(4,-lam)*sqrt(pi)*gam(lam)*(1-2*lam) - mu2 /= lam*lam*(1+2*lam)*gam(1+1.5) - mu4 = 3*gam(lam)*gam(lam+0.5)*pow(2,-2*lam) / lam**3 / gam(2*lam+1.5) - mu4 += 2.0/lam**4 / (1+4*lam) - mu4 -= 2*sqrt(3)*gam(lam)*pow(2,-6*lam)*pow(3,3*lam) * \ - gam(lam+1.0/3)*gam(lam+2.0/3) / (lam**3.0 * gam(2*lam+1.5) * \ - gam(lam+0.5)) - g2 = mu4 / mu2 / mu2 - 3.0 - - return 0, mu2, 0, g2 - def _entropy(self, lam): - def integ(p): - return log(pow(p,lam-1)+pow(1-p,lam-1)) - return integrate.quad(integ,0,1)[0] -tukeylambda = tukeylambda_gen(name='tukeylambda', shapes="lam") - - -# Uniform - -class uniform_gen(rv_continuous): - """A uniform continuous random variable. - - This distribution is constant between `loc` and ``loc = scale``. - - %(before_notes)s - - %(example)s - - """ - def _rvs(self): - return mtrand.uniform(0.0,1.0,self._size) - def _pdf(self, x): - return 1.0*(x==x) - def _cdf(self, x): - return x - def _ppf(self, q): - return q - def _stats(self): - return 0.5, 1.0/12, 0, -1.2 - def _entropy(self): - return 0.0 -uniform = uniform_gen(a=0.0, b=1.0, name='uniform') - - -# Von-Mises - -# if x is not in range or loc is not in range it assumes they are angles -# and converts them to [-pi, pi] equivalents. - -eps = numpy.finfo(float).eps - - -class vonmises_gen(rv_continuous): - """A Von Mises continuous random variable. - - %(before_notes)s - - Notes - ----- - If `x` is not in range or `loc` is not in range it assumes they are angles - and converts them to [-pi, pi] equivalents. - - The probability density function for `vonmises` is:: - - vonmises.pdf(x, b) = exp(b*cos(x)) / (2*pi*I[0](b)) - - for ``-pi <= x <= pi``, ``b > 0``. - - %(example)s - - """ - def _rvs(self, b): - return mtrand.vonmises(0.0, b, size=self._size) - def _pdf(self, x, b): - return exp(b*cos(x)) / (2*pi*special.i0(b)) - def _cdf(self, x, b): - return vonmises_cython.von_mises_cdf(b,x) - def _stats_skip(self, b): - return 0, None, 0, None -vonmises = vonmises_gen(name='vonmises', shapes="b") - - -## Wald distribution (Inverse Normal with shape parameter mu=1.0) - -class wald_gen(invgauss_gen): - """A Wald continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `wald` is:: - - wald.pdf(x, a) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x)) - - for ``x > 0``. - - %(example)s - """ - def _rvs(self): - return mtrand.wald(1.0, 1.0, size=self._size) - def _pdf(self, x): - return invgauss._pdf(x, 1.0) - def _logpdf(self, x): - return invgauss._logpdf(x, 1.0) - def _cdf(self, x): - return invgauss._cdf(x, 1.0) - def _stats(self): - return 1.0, 1.0, 3.0, 15.0 -wald = wald_gen(a=0.0, name="wald") - - -# Wrapped Cauchy - -class wrapcauchy_gen(rv_continuous): - """A wrapped Cauchy continuous random variable. - - %(before_notes)s - - Notes - ----- - The probability density function for `wrapcauchy` is:: - - wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x))) - - for ``0 <= x <= 2*pi``, ``0 < c < 1``. - - %(example)s - - """ - def _argcheck(self, c): - return (c > 0) & (c < 1) - def _pdf(self, x, c): - return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x))) - def _cdf(self, x, c): - output = 0.0*x - val = (1.0+c)/(1.0-c) - c1 = xxk),axis=-1)-1 - return self.F[self.xk[indx]] - -def _drv_ppf(self, q, *args): - indx = argmax((self.qvals>=q),axis=-1) - return self.Finv[self.qvals[indx]] - -def _drv_nonzero(self, k, *args): - return 1 - -def _drv_moment(self, n, *args): - n = arr(n) - return sum(self.xk**n[newaxis,...] * self.pk, axis=0) - -def _drv_moment_gen(self, t, *args): - t = arr(t) - return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0) - -def _drv2_moment(self, n, *args): - '''non-central moment of discrete distribution''' - #many changes, originally not even a return - tot = 0.0 - diff = 1e100 - #pos = self.a - pos = max(0.0, 1.0*self.a) - count = 0 - #handle cases with infinite support - ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 ) - llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 ) - - while (pos <= self.b) and ((pos <= ulimit) or \ - (diff > self.moment_tol)): - diff = np.power(pos, n) * self.pmf(pos,*args) - # use pmf because _pmf does not check support in randint - # and there might be problems ? with correct self.a, self.b at this stage - tot += diff - pos += self.inc - count += 1 - - if self.a < 0: #handle case when self.a = -inf - diff = 1e100 - pos = -self.inc - while (pos >= self.a) and ((pos >= llimit) or \ - (diff > self.moment_tol)): - diff = np.power(pos, n) * self.pmf(pos,*args) - #using pmf instead of _pmf, see above - tot += diff - pos -= self.inc - count += 1 - return tot - -def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm - b = self.invcdf_b - a = self.invcdf_a - if isinf(b): # Be sure ending point is > q - b = max(100*q,10) - while 1: - if b >= self.b: qb = 1.0; break - qb = self._cdf(b,*args) - if (qb < q): b += 10 - else: break - else: - qb = 1.0 - if isinf(a): # be sure starting point < q - a = min(-100*q,-10) - while 1: - if a <= self.a: qb = 0.0; break - qa = self._cdf(a,*args) - if (qa > q): a -= 10 - else: break - else: - qa = self._cdf(a, *args) - - while 1: - if (qa == q): - return a - if (qb == q): - return b - if b == a+1: - #testcase: return wrong number at lower index - #python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong - #python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)" - #python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)" - if qa > q: - return a - else: - return b - c = int((a+b)/2.0) - qc = self._cdf(c, *args) - if (qc < q): - a = c - qa = qc - elif (qc > q): - b = c - qb = qc - else: - return c - -def reverse_dict(dict): - newdict = {} - sorted_keys = copy(dict.keys()) - sorted_keys.sort() - for key in sorted_keys[::-1]: - newdict[dict[key]] = key - return newdict - -def make_dict(keys, values): - d = {} - for key, value in zip(keys, values): - d[key] = value - return d - -# Must over-ride one of _pmf or _cdf or pass in -# x_k, p(x_k) lists in initialization - -class rv_discrete(rv_generic): - """ - A generic discrete random variable class meant for subclassing. - - `rv_discrete` is a base class to construct specific distribution classes - and instances from for discrete random variables. rv_discrete can be used - to construct an arbitrary distribution with defined by a list of support - points and the corresponding probabilities. - - Parameters - ---------- - a : float, optional - Lower bound of the support of the distribution, default: 0 - b : float, optional - Upper bound of the support of the distribution, default: plus infinity - moment_tol : float, optional - The tolerance for the generic calculation of moments - values : tuple of two array_like - (xk, pk) where xk are points (integers) with positive probability pk - with sum(pk) = 1 - inc : integer - increment for the support of the distribution, default: 1 - other values have not been tested - badvalue : object, optional - The value in (masked) arrays that indicates a value that should be - ignored. - name : str, optional - The name of the instance. This string is used to construct the default - example for distributions. - longname : str, optional - This string is used as part of the first line of the docstring returned - when a subclass has no docstring of its own. Note: `longname` exists - for backwards compatibility, do not use for new subclasses. - shapes : str, optional - The shape of the distribution. For example ``"m, n"`` for a - distribution that takes two integers as the first two arguments for all - its methods. - extradoc : str, optional - This string is used as the last part of the docstring returned when a - subclass has no docstring of its own. Note: `extradoc` exists for - backwards compatibility, do not use for new subclasses. - - - Methods - ------- - - generic.rvs(, loc=0, size=1) - random variates - - generic.pmf(x, , loc=0) - probability mass function - - logpmf(x, , loc=0) - log of the probability density function - - generic.cdf(x, , loc=0) - cumulative density function - - generic.logcdf(x, , loc=0) - log of the cumulative density function - - generic.sf(x, , loc=0) - survival function (1-cdf --- sometimes more accurate) - - generic.logsf(x, , loc=0, scale=1) - log of the survival function - - generic.ppf(q, , loc=0) - percent point function (inverse of cdf --- percentiles) - - generic.isf(q, , loc=0) - inverse survival function (inverse of sf) - - generic.moment(n, , loc=0) - non-central n-th moment of the distribution. May not work for array arguments. - - generic.stats(, loc=0, moments='mv') - mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k') - - generic.entropy(, loc=0) - entropy of the RV - - generic.fit(data, , loc=0) - Parameter estimates for generic data - - generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False) - Expected value of a function with respect to the distribution. - Additional kwd arguments passed to integrate.quad - - generic.median(, loc=0) - Median of the distribution. - - generic.mean(, loc=0) - Mean of the distribution. - - generic.std(, loc=0) - Standard deviation of the distribution. - - generic.var(, loc=0) - Variance of the distribution. - - generic.interval(alpha, , loc=0) - Interval that with `alpha` percent probability contains a random - realization of this distribution. - - generic(, loc=0) - calling a distribution instance returns a frozen distribution - - Notes - ----- - - Alternatively, the object may be called (as a function) to fix - the shape and location parameters returning a - "frozen" discrete RV object: - - myrv = generic(, loc=0) - - frozen RV object with the same methods but holding the given shape - and location fixed. - - You can construct an aribtrary discrete rv where P{X=xk} = pk - by passing to the rv_discrete initialization method (through the - values=keyword) a tuple of sequences (xk, pk) which describes only those - values of X (xk) that occur with nonzero probability (pk). - - To create a new discrete distribution, we would do the following:: - - class poisson_gen(rv_continuous): - #"Poisson distribution" - def _pmf(self, k, mu): - ... - - and create an instance - - poisson = poisson_gen(name="poisson", shapes="mu", longname='A Poisson') - - The docstring can be created from a template. - - - Examples - -------- - - >>> import matplotlib.pyplot as plt - >>> numargs = generic.numargs - >>> [ ] = ['Replace with resonable value', ]*numargs - - Display frozen pmf: - - >>> rv = generic() - >>> x = np.arange(0, np.min(rv.dist.b, 3)+1) - >>> h = plt.plot(x, rv.pmf(x)) - - Check accuracy of cdf and ppf: - - >>> prb = generic.cdf(x, ) - >>> h = plt.semilogy(np.abs(x-generic.ppf(prb, ))+1e-20) - - Random number generation: - - >>> R = generic.rvs(, size=100) - - Custom made discrete distribution: - - >>> vals = [arange(7), (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1)] - >>> custm = rv_discrete(name='custm', values=vals) - >>> h = plt.plot(vals[0], custm.pmf(vals[0])) - - """ - - def __init__(self, a=0, b=inf, name=None, badvalue=None, - moment_tol=1e-8,values=None,inc=1,longname=None, - shapes=None, extradoc=None): - - super(rv_generic,self).__init__() - - if badvalue is None: - badvalue = nan - if name is None: - name = 'Distribution' - self.badvalue = badvalue - self.a = a - self.b = b - self.invcdf_a = a # what's the difference to self.a, .b - self.invcdf_b = b - self.name = name - self.moment_tol = moment_tol - self.inc = inc - self._cdfvec = sgf(self._cdfsingle,otypes='d') - self.return_integers = 1 - self.vecentropy = vectorize(self._entropy) - self.shapes = shapes - self.extradoc = extradoc - - if values is not None: - self.xk, self.pk = values - self.return_integers = 0 - indx = argsort(ravel(self.xk)) - self.xk = take(ravel(self.xk),indx, 0) - self.pk = take(ravel(self.pk),indx, 0) - self.a = self.xk[0] - self.b = self.xk[-1] - self.P = make_dict(self.xk, self.pk) - self.qvals = numpy.cumsum(self.pk,axis=0) - self.F = make_dict(self.xk, self.qvals) - self.Finv = reverse_dict(self.F) - self._ppf = instancemethod(sgf(_drv_ppf,otypes='d'), - self, rv_discrete) - self._pmf = instancemethod(sgf(_drv_pmf,otypes='d'), - self, rv_discrete) - self._cdf = instancemethod(sgf(_drv_cdf,otypes='d'), - self, rv_discrete) - self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete) - self.generic_moment = instancemethod(_drv_moment, - self, rv_discrete) - self.moment_gen = instancemethod(_drv_moment_gen, - self, rv_discrete) - self.numargs=0 - else: - cdf_signature = inspect.getargspec(self._cdf.im_func) - numargs1 = len(cdf_signature[0]) - 2 - pmf_signature = inspect.getargspec(self._pmf.im_func) - numargs2 = len(pmf_signature[0]) - 2 - self.numargs = max(numargs1, numargs2) - - #nin correction needs to be after we know numargs - #correct nin for generic moment vectorization - self.vec_generic_moment = sgf(_drv2_moment, otypes='d') - self.vec_generic_moment.nin = self.numargs + 2 - self.generic_moment = instancemethod(self.vec_generic_moment, - self, rv_discrete) - - #correct nin for ppf vectorization - _vppf = sgf(_drv2_ppfsingle,otypes='d') - _vppf.nin = self.numargs + 2 # +1 is for self - self._vecppf = instancemethod(_vppf, - self, rv_discrete) - - #now that self.numargs is defined, we can adjust nin - self._cdfvec.nin = self.numargs + 1 - - # generate docstring for subclass instances - if longname is None: - if name[0] in ['aeiouAEIOU']: - hstr = "An " - else: - hstr = "A " - longname = hstr + name - if self.__doc__ is None: - self._construct_default_doc(longname=longname, extradoc=extradoc) - else: - self._construct_doc() - - ## This only works for old-style classes... - # self.__class__.__doc__ = self.__doc__ - - def _construct_default_doc(self, longname=None, extradoc=None): - """Construct instance docstring from the rv_discrete template.""" - if extradoc is None: - extradoc = '' - if extradoc.startswith('\n\n'): - extradoc = extradoc[2:] - self.__doc__ = ''.join(['%s discrete random variable.'%longname, - '\n\n%(before_notes)s\n', docheaders['notes'], - extradoc, '\n%(example)s']) - self._construct_doc() - - def _construct_doc(self): - """Construct the instance docstring with string substitutions.""" - tempdict = docdict_discrete.copy() - tempdict['name'] = self.name or 'distname' - tempdict['shapes'] = self.shapes or '' - - if self.shapes is None: - # remove shapes from call parameters if there are none - for item in ['callparams', 'default', 'before_notes']: - tempdict[item] = tempdict[item].replace(\ - "\n%(shapes)s : array_like\n shape parameters", "") - for i in range(2): - if self.shapes is None: - # necessary because we use %(shapes)s in two forms (w w/o ", ") - self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") - self.__doc__ = doccer.docformat(self.__doc__, tempdict) - - - def _rvs(self, *args): - return self._ppf(mtrand.random_sample(self._size),*args) - - def _nonzero(self, k, *args): - return floor(k)==k - - def _argcheck(self, *args): - cond = 1 - for arg in args: - cond &= (arg > 0) - return cond - - def _pmf(self, k, *args): - return self._cdf(k,*args) - self._cdf(k-1,*args) - - def _logpmf(self, k, *args): - return log(self._pmf(k, *args)) - - def _cdfsingle(self, k, *args): - m = arange(int(self.a),k+1) - return sum(self._pmf(m,*args),axis=0) - - def _cdf(self, x, *args): - k = floor(x) - return self._cdfvec(k,*args) - - def _logcdf(self, x, *args): - return log(self._cdf(x, *args)) - - def _sf(self, x, *args): - return 1.0-self._cdf(x,*args) - - def _logsf(self, x, *args): - return log(self._sf(x, *args)) - - def _ppf(self, q, *args): - return self._vecppf(q, *args) - - def _isf(self, q, *args): - return self._ppf(1-q,*args) - - def _stats(self, *args): - return None, None, None, None - - def _munp(self, n, *args): - return self.generic_moment(n, *args) - - - def rvs(self, *args, **kwargs): - """ - Random variates of given type. - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - size : int or tuple of ints, optional - defining number of random variates (default=1) - - Returns - ------- - rvs : array_like - random variates of given `size` - - """ - kwargs['discrete'] = True - return super(rv_discrete, self).rvs(*args, **kwargs) - - def pmf(self, k,*args, **kwds): - """ - Probability mass function at k of the given RV. - - Parameters - ---------- - k : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - - Returns - ------- - pmf : array_like - Probability mass function evaluated at k - - """ - loc = kwds.get('loc') - args, loc = self._fix_loc(args, loc) - k,loc = map(arr,(k,loc)) - args = tuple(map(arr,args)) - k = arr((k-loc)) - cond0 = self._argcheck(*args) - cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args) - cond = cond0 & cond1 - output = zeros(shape(cond),'d') - place(output,(1-cond0) + np.isnan(k),self.badvalue) - if any(cond): - goodargs = argsreduce(cond, *((k,)+args)) - place(output,cond,self._pmf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def logpmf(self, k,*args, **kwds): - """ - Log of the probability mass function at k of the given RV. - - Parameters - ---------- - k : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - Location parameter. Default is 0. - - Returns - ------- - logpmf : array_like - Log of the probability mass function evaluated at k - - """ - loc = kwds.get('loc') - args, loc = self._fix_loc(args, loc) - k,loc = map(arr,(k,loc)) - args = tuple(map(arr,args)) - k = arr((k-loc)) - cond0 = self._argcheck(*args) - cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args) - cond = cond0 & cond1 - output = empty(shape(cond),'d') - output.fill(NINF) - place(output,(1-cond0) + np.isnan(k),self.badvalue) - if any(cond): - goodargs = argsreduce(cond, *((k,)+args)) - place(output,cond,self._logpmf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def cdf(self, k, *args, **kwds): - """ - Cumulative distribution function at k of the given RV - - Parameters - ---------- - k : array_like, int - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - - Returns - ------- - cdf : array_like - Cumulative distribution function evaluated at k - - """ - loc = kwds.get('loc') - args, loc = self._fix_loc(args, loc) - k,loc = map(arr,(k,loc)) - args = tuple(map(arr,args)) - k = arr((k-loc)) - cond0 = self._argcheck(*args) - cond1 = (k >= self.a) & (k < self.b) - cond2 = (k >= self.b) - cond = cond0 & cond1 - output = zeros(shape(cond),'d') - place(output,(1-cond0) + np.isnan(k),self.badvalue) - place(output,cond2*(cond0==cond0), 1.0) - - if any(cond): - goodargs = argsreduce(cond, *((k,)+args)) - place(output,cond,self._cdf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def logcdf(self, k, *args, **kwds): - """ - Log of the cumulative distribution function at k of the given RV - - Parameters - ---------- - k : array_like, int - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - - Returns - ------- - logcdf : array_like - Log of the cumulative distribution function evaluated at k - - """ - loc = kwds.get('loc') - args, loc = self._fix_loc(args, loc) - k,loc = map(arr,(k,loc)) - args = tuple(map(arr,args)) - k = arr((k-loc)) - cond0 = self._argcheck(*args) - cond1 = (k >= self.a) & (k < self.b) - cond2 = (k >= self.b) - cond = cond0 & cond1 - output = empty(shape(cond),'d') - output.fill(NINF) - place(output,(1-cond0) + np.isnan(k),self.badvalue) - place(output,cond2*(cond0==cond0), 0.0) - - if any(cond): - goodargs = argsreduce(cond, *((k,)+args)) - place(output,cond,self._logcdf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def sf(self,k,*args,**kwds): - """ - Survival function (1-cdf) at k of the given RV - - Parameters - ---------- - k : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - - Returns - ------- - sf : array_like - Survival function evaluated at k - - """ - loc= kwds.get('loc') - args, loc = self._fix_loc(args, loc) - k,loc = map(arr,(k,loc)) - args = tuple(map(arr,args)) - k = arr(k-loc) - cond0 = self._argcheck(*args) - cond1 = (k >= self.a) & (k <= self.b) - cond2 = (k < self.a) & cond0 - cond = cond0 & cond1 - output = zeros(shape(cond),'d') - place(output,(1-cond0) + np.isnan(k),self.badvalue) - place(output,cond2,1.0) - if any(cond): - goodargs = argsreduce(cond, *((k,)+args)) - place(output,cond,self._sf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def logsf(self,k,*args,**kwds): - """ - Log of the survival function (1-cdf) at k of the given RV - - Parameters - ---------- - k : array_like - quantiles - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - - Returns - ------- - sf : array_like - Survival function evaluated at k - - """ - loc= kwds.get('loc') - args, loc = self._fix_loc(args, loc) - k,loc = map(arr,(k,loc)) - args = tuple(map(arr,args)) - k = arr(k-loc) - cond0 = self._argcheck(*args) - cond1 = (k >= self.a) & (k <= self.b) - cond2 = (k < self.a) & cond0 - cond = cond0 & cond1 - output = empty(shape(cond),'d') - output.fill(NINF) - place(output,(1-cond0) + np.isnan(k),self.badvalue) - place(output,cond2,0.0) - if any(cond): - goodargs = argsreduce(cond, *((k,)+args)) - place(output,cond,self._logsf(*goodargs)) - if output.ndim == 0: - return output[()] - return output - - def ppf(self,q,*args,**kwds): - """ - Percent point function (inverse of cdf) at q of the given RV - - Parameters - ---------- - q : array_like - lower tail probability - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - scale: array_like, optional - scale parameter (default=1) - - Returns - ------- - k : array_like - quantile corresponding to the lower tail probability, q. - - """ - loc = kwds.get('loc') - args, loc = self._fix_loc(args, loc) - q,loc = map(arr,(q,loc)) - args = tuple(map(arr,args)) - cond0 = self._argcheck(*args) & (loc == loc) - cond1 = (q > 0) & (q < 1) - cond2 = (q==1) & cond0 - cond = cond0 & cond1 - output = valarray(shape(cond),value=self.badvalue,typecode='d') - #output type 'd' to handle nin and inf - place(output,(q==0)*(cond==cond), self.a-1) - place(output,cond2,self.b) - if any(cond): - goodargs = argsreduce(cond, *((q,)+args+(loc,))) - loc, goodargs = goodargs[-1], goodargs[:-1] - place(output,cond,self._ppf(*goodargs) + loc) - - if output.ndim == 0: - return output[()] - return output - - def isf(self,q,*args,**kwds): - """ - Inverse survival function (1-sf) at q of the given RV - - Parameters - ---------- - q : array_like - upper tail probability - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - - Returns - ------- - k : array_like - quantile corresponding to the upper tail probability, q. - - """ - - loc = kwds.get('loc') - args, loc = self._fix_loc(args, loc) - q,loc = map(arr,(q,loc)) - args = tuple(map(arr,args)) - cond0 = self._argcheck(*args) & (loc == loc) - cond1 = (q > 0) & (q < 1) - cond2 = (q==1) & cond0 - cond = cond0 & cond1 - #old: -## output = valarray(shape(cond),value=self.b,typecode='d') -## #typecode 'd' to handle nin and inf -## place(output,(1-cond0)*(cond1==cond1), self.badvalue) -## place(output,cond2,self.a-1) - - #same problem as with ppf - # copied from ppf and changed - output = valarray(shape(cond),value=self.badvalue,typecode='d') - #output type 'd' to handle nin and inf - place(output,(q==0)*(cond==cond), self.b) - place(output,cond2,self.a-1) - - # call place only if at least 1 valid argument - if any(cond): - goodargs = argsreduce(cond, *((q,)+args+(loc,))) - loc, goodargs = goodargs[-1], goodargs[:-1] - place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766 - - if output.ndim == 0: - return output[()] - return output - - def stats(self, *args, **kwds): - """ - Some statistics of the given discrete RV - - Parameters - ---------- - arg1, arg2, arg3,... : array_like - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - loc : array_like, optional - location parameter (default=0) - moments : string, optional - composed of letters ['mvsk'] defining which moments to compute: - 'm' = mean, - 'v' = variance, - 's' = (Fisher's) skew, - 'k' = (Fisher's) kurtosis. - (default='mv') - - Returns - ------- - stats : sequence - of requested moments. - - """ - loc,moments=map(kwds.get,['loc','moments']) - N = len(args) - if N > self.numargs: - if N == self.numargs + 1 and loc is None: # loc is given without keyword - loc = args[-1] - if N == self.numargs + 2 and moments is None: # loc, scale, and moments - loc, moments = args[-2:] - args = args[:self.numargs] - if loc is None: loc = 0.0 - if moments is None: moments = 'mv' - - loc = arr(loc) - args = tuple(map(arr,args)) - cond = self._argcheck(*args) & (loc==loc) - - signature = inspect.getargspec(self._stats.im_func) - if (signature[2] is not None) or ('moments' in signature[0]): - mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments}) - else: - mu, mu2, g1, g2 = self._stats(*args) - if g1 is None: - mu3 = None - else: - mu3 = g1*(mu2**1.5) - default = valarray(shape(cond), self.badvalue) - output = [] - - # Use only entries that are valid in calculation - goodargs = argsreduce(cond, *(args+(loc,))) - loc, goodargs = goodargs[-1], goodargs[:-1] - - if 'm' in moments: - if mu is None: - mu = self._munp(1.0,*goodargs) - out0 = default.copy() - place(out0,cond,mu+loc) - output.append(out0) - - if 'v' in moments: - if mu2 is None: - mu2p = self._munp(2.0,*goodargs) - if mu is None: - mu = self._munp(1.0,*goodargs) - mu2 = mu2p - mu*mu - out0 = default.copy() - place(out0,cond,mu2) - output.append(out0) - - if 's' in moments: - if g1 is None: - mu3p = self._munp(3.0,*goodargs) - if mu is None: - mu = self._munp(1.0,*goodargs) - if mu2 is None: - mu2p = self._munp(2.0,*goodargs) - mu2 = mu2p - mu*mu - mu3 = mu3p - 3*mu*mu2 - mu**3 - g1 = mu3 / mu2**1.5 - out0 = default.copy() - place(out0,cond,g1) - output.append(out0) - - if 'k' in moments: - if g2 is None: - mu4p = self._munp(4.0,*goodargs) - if mu is None: - mu = self._munp(1.0,*goodargs) - if mu2 is None: - mu2p = self._munp(2.0,*goodargs) - mu2 = mu2p - mu*mu - if mu3 is None: - mu3p = self._munp(3.0,*goodargs) - mu3 = mu3p - 3*mu*mu2 - mu**3 - mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4 - g2 = mu4 / mu2**2.0 - 3.0 - out0 = default.copy() - place(out0,cond,g2) - output.append(out0) - - if len(output) == 1: - return output[0] - else: - return tuple(output) - - def moment(self, n, *args, **kwds): # Non-central moments in standard form. - """ - n'th non-central moment of the distribution - - Parameters - ---------- - n: int, n>=1 - order of moment - arg1, arg2, arg3,...: float - The shape parameter(s) for the distribution (see docstring of the - instance object for more information) - - loc : float, optional - location parameter (default=0) - scale : float, optional - scale parameter (default=1) - - """ - loc = kwds.get('loc', 0) - scale = kwds.get('scale', 1) - if not (self._argcheck(*args) and (scale > 0)): - return nan - if (floor(n) != n): - raise ValueError("Moment must be an integer.") - if (n < 0): raise ValueError("Moment must be positive.") - mu, mu2, g1, g2 = None, None, None, None - if (n > 0) and (n < 5): - signature = inspect.getargspec(self._stats.im_func) - if (signature[2] is not None) or ('moments' in signature[0]): - dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]} - else: - dict = {} - mu, mu2, g1, g2 = self._stats(*args,**dict) - val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args) - - # Convert to transformed X = L + S*Y - # so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n) - if loc == 0: - return scale**n * val - else: - result = 0 - fac = float(scale) / float(loc) - for k in range(n): - valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args) - result += comb(n,k,exact=True)*(fac**k) * valk - result += fac**n * val - return result * loc**n - - - def freeze(self, *args, **kwds): - return rv_frozen(self, *args, **kwds) - - def _entropy(self, *args): - if hasattr(self,'pk'): - return entropy(self.pk) - else: - mu = int(self.stats(*args, **{'moments':'m'})) - val = self.pmf(mu,*args) - if (val==0.0): ent = 0.0 - else: ent = -val*log(val) - k = 1 - term = 1.0 - while (abs(term) > eps): - val = self.pmf(mu+k,*args) - if val == 0.0: term = 0.0 - else: term = -val * log(val) - val = self.pmf(mu-k,*args) - if val != 0.0: term -= val*log(val) - k += 1 - ent += term - return ent - - def entropy(self, *args, **kwds): - loc= kwds.get('loc') - args, loc = self._fix_loc(args, loc) - loc = arr(loc) - args = map(arr,args) - cond0 = self._argcheck(*args) & (loc==loc) - output = zeros(shape(cond0),'d') - place(output,(1-cond0),self.badvalue) - goodargs = argsreduce(cond0, *args) - place(output,cond0,self.vecentropy(*goodargs)) - return output - - def __call__(self, *args, **kwds): - return self.freeze(*args,**kwds) - - def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False): - """calculate expected value of a function with respect to the distribution - for discrete distribution - - Parameters - ---------- - fn : function (default: identity mapping) - Function for which sum is calculated. Takes only one argument. - args : tuple - argument (parameters) of the distribution - optional keyword parameters - lb, ub : numbers - lower and upper bound for integration, default is set to the support - of the distribution, lb and ub are inclusive (ul<=k<=ub) - conditional : boolean (False) - If true then the expectation is corrected by the conditional - probability of the integration interval. The return value is the - expectation of the function, conditional on being in the given - interval (k such that ul<=k<=ub). - - Returns - ------- - expected value : float - - Notes - ----- - * function is not vectorized - * accuracy: uses self.moment_tol as stopping criterium - for heavy tailed distribution e.g. zipf(4), accuracy for - mean, variance in example is only 1e-5, - increasing precision (moment_tol) makes zipf very slow - * suppnmin=100 internal parameter for minimum number of points to evaluate - could be added as keyword parameter, to evaluate functions with - non-monotonic shapes, points include integers in (-suppnmin, suppnmin) - * uses maxcount=1000 limits the number of points that are evaluated - to break loop for infinite sums - (a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers - are evaluated) - - """ - - #moment_tol = 1e-12 # increase compared to self.moment_tol, - # too slow for only small gain in precision for zipf - - #avoid endless loop with unbound integral, eg. var of zipf(2) - maxcount = 1000 - suppnmin = 100 #minimum number of points to evaluate (+ and -) - - if func is None: - def fun(x): - #loc and args from outer scope - return (x+loc)*self._pmf(x, *args) - else: - def fun(x): - #loc and args from outer scope - return func(x+loc)*self._pmf(x, *args) - # used pmf because _pmf does not check support in randint - # and there might be problems(?) with correct self.a, self.b at this stage - # maybe not anymore, seems to work now with _pmf - - self._argcheck(*args) # (re)generate scalar self.a and self.b - if lb is None: - lb = (self.a) - else: - lb = lb - loc #convert bound for standardized distribution - if ub is None: - ub = (self.b) - else: - ub = ub - loc #convert bound for standardized distribution - if conditional: - if np.isposinf(ub)[()]: - #work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan - invfac = 1 - self.cdf(lb-1,*args) - else: - invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args) - else: - invfac = 1.0 - - tot = 0.0 - low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args) - low = max(min(-suppnmin, low), lb) - upp = min(max(suppnmin, upp), ub) - supp = np.arange(low, upp+1, self.inc) #check limits - #print 'low, upp', low, upp - tot = np.sum(fun(supp)) - diff = 1e100 - pos = upp + self.inc - count = 0 - - #handle cases with infinite support - - while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount: - diff = fun(pos) - tot += diff - pos += self.inc - count += 1 - - if self.a < 0: #handle case when self.a = -inf - diff = 1e100 - pos = low - self.inc - while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount: - diff = fun(pos) - tot += diff - pos -= self.inc - count += 1 - if count > maxcount: - # fixme: replace with proper warning - print 'sum did not converge' - return tot/invfac - - -# Binomial - -class binom_gen(rv_discrete): - def _rvs(self, n, pr): - return mtrand.binomial(n,pr,self._size) - def _argcheck(self, n, pr): - self.b = n - return (n>=0) & (pr >= 0) & (pr <= 1) - def _logpmf(self, x, n, pr): - k = floor(x) - combiln = (gamln(n+1) - (gamln(k+1) + - gamln(n-k+1))) - return combiln + k*np.log(pr) + (n-k)*np.log(1-pr) - def _pmf(self, x, n, pr): - return exp(self._logpmf(x, n, pr)) - def _cdf(self, x, n, pr): - k = floor(x) - vals = special.bdtr(k,n,pr) - return vals - def _sf(self, x, n, pr): - k = floor(x) - return special.bdtrc(k,n,pr) - def _ppf(self, q, n, pr): - vals = ceil(special.bdtrik(q,n,pr)) - vals1 = vals-1 - temp = special.bdtr(vals1,n,pr) - return where(temp >= q, vals1, vals) - def _stats(self, n, pr): - q = 1.0-pr - mu = n * pr - var = n * pr * q - g1 = (q-pr) / sqrt(n*pr*q) - g2 = (1.0-6*pr*q)/(n*pr*q) - return mu, var, g1, g2 - def _entropy(self, n, pr): - k = r_[0:n+1] - vals = self._pmf(k,n,pr) - lvals = where(vals==0,0.0,log(vals)) - return -sum(vals*lvals,axis=0) -binom = binom_gen(name='binom',shapes="n, pr",extradoc=""" - -Binomial distribution - - Counts the number of successes in *n* independent - trials when the probability of success each time is *pr*. - - binom.pmf(k,n,p) = choose(n,k)*p**k*(1-p)**(n-k) - for k in {0,1,...,n} -""") - -# Bernoulli distribution - -class bernoulli_gen(binom_gen): - def _rvs(self, pr): - return binom_gen._rvs(self, 1, pr) - def _argcheck(self, pr): - return (pr >=0 ) & (pr <= 1) - def _logpmf(self, x, pr): - return binom._logpmf(x, 1, pr) - def _pmf(self, x, pr): - return binom._pmf(x, 1, pr) - def _cdf(self, x, pr): - return binom._cdf(x, 1, pr) - def _sf(self, x, pr): - return binom._sf(x, 1, pr) - def _ppf(self, q, pr): - return binom._ppf(q, 1, pr) - def _stats(self, pr): - return binom._stats(1, pr) - def _entropy(self, pr): - return -pr*log(pr)-(1-pr)*log(1-pr) -bernoulli = bernoulli_gen(b=1,name='bernoulli',shapes="pr",extradoc=""" - -Bernoulli distribution - - 1 if binary experiment succeeds, 0 otherwise. Experiment - succeeds with probabilty *pr*. - - bernoulli.pmf(k,p) = 1-p if k = 0 - = p if k = 1 - for k = 0,1 -""" -) - -# Negative binomial -class nbinom_gen(rv_discrete): - """A negative binomial discrete random variable. - - %(before_notes)s - - Notes - ----- - Probability mass function, given by - ``np.choose(k+n-1, n-1) * p**n * (1-p)**k`` for ``k >= 0``. - - %(example)s - """ - def _rvs(self, n, pr): - return mtrand.negative_binomial(n, pr, self._size) - def _argcheck(self, n, pr): - return (n >= 0) & (pr >= 0) & (pr <= 1) - def _pmf(self, x, n, pr): - coeff = exp(gamln(n+x) - gamln(x+1) - gamln(n)) - return coeff * power(pr,n) * power(1-pr,x) - def _logpmf(self, x, n, pr): - coeff = gamln(n+x) - gamln(x+1) - gamln(n) - return coeff + n*log(pr) + x*log(1-pr) - def _cdf(self, x, n, pr): - k = floor(x) - return special.betainc(n, k+1, pr) - def _sf_skip(self, x, n, pr): - #skip because special.nbdtrc doesn't work for 0= q, vals1, vals) - def _stats(self, n, pr): - Q = 1.0 / pr - P = Q - 1.0 - mu = n*P - var = n*P*Q - g1 = (Q+P)/sqrt(n*P*Q) - g2 = (1.0 + 6*P*Q) / (n*P*Q) - return mu, var, g1, g2 -nbinom = nbinom_gen(name='nbinom', shapes="n, pr", extradoc=""" - -Negative binomial distribution - -nbinom.pmf(k,n,p) = choose(k+n-1,n-1) * p**n * (1-p)**k -for k >= 0. -""" - ) - - -## Geometric distribution - -class geom_gen(rv_discrete): - def _rvs(self, pr): - return mtrand.geometric(pr,size=self._size) - def _argcheck(self, pr): - return (pr<=1) & (pr >= 0) - def _pmf(self, k, pr): - return (1-pr)**(k-1) * pr - def _logpmf(self, k, pr): - return (k-1)*log(1-pr) + pr - def _cdf(self, x, pr): - k = floor(x) - return (1.0-(1.0-pr)**k) - def _sf(self, x, pr): - k = floor(x) - return (1.0-pr)**k - def _ppf(self, q, pr): - vals = ceil(log(1.0-q)/log(1-pr)) - temp = 1.0-(1.0-pr)**(vals-1) - return where((temp >= q) & (vals > 0), vals-1, vals) - def _stats(self, pr): - mu = 1.0/pr - qr = 1.0-pr - var = qr / pr / pr - g1 = (2.0-pr) / sqrt(qr) - g2 = numpy.polyval([1,-6,6],pr)/(1.0-pr) - return mu, var, g1, g2 -geom = geom_gen(a=1,name='geom', longname="A geometric", - shapes="pr", extradoc=""" - -Geometric distribution - -geom.pmf(k,p) = (1-p)**(k-1)*p -for k >= 1 -""" - ) - -## Hypergeometric distribution - -class hypergeom_gen(rv_discrete): - """A hypergeometric discrete random variable. - - The hypergeometric distribution models drawing objects from a bin. - M is the total number of objects, n is total number of Type I objects. - The random variate represents the number of Type I objects in N drawn - without replacement from the total population. - - %(before_notes)s - - Notes - ----- - The probability mass function is defined as:: - - pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N), - for N - (M-n) <= k <= min(m,N) - - %(example)s - - """ - - def _rvs(self, M, n, N): - return mtrand.hypergeometric(n,M-n,N,size=self._size) - def _argcheck(self, M, n, N): - cond = rv_discrete._argcheck(self,M,n,N) - cond &= (n <= M) & (N <= M) - self.a = N-(M-n) - self.b = min(n,N) - return cond - def _logpmf(self, k, M, n, N): - tot, good = M, n - bad = tot - good - return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \ - - gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \ - + gamln(N+1) - def _pmf(self, k, M, n, N): - #same as the following but numerically more precise - #return comb(good,k) * comb(bad,N-k) / comb(tot,N) - return exp(self._logpmf(k, M, n, N)) - def _stats(self, M, n, N): - tot, good = M, n - n = good*1.0 - m = (tot-good)*1.0 - N = N*1.0 - tot = m+n - p = n/tot - mu = N*p - var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1)) - g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N))) - m2, m3, m4, m5 = m**2, m**3, m**4, m**5 - n2, n3, n4, n5 = n**2, n**2, n**4, n**5 - g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \ - - 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \ - - 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \ - + 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 + \ - 12*m*n2 - 6*n3) - return mu, var, g1, g2 - def _entropy(self, M, n, N): - k = r_[N-(M-n):min(n,N)+1] - vals = self.pmf(k,M,n,N) - lvals = where(vals==0.0,0.0,log(vals)) - return -sum(vals*lvals,axis=0) - def _sf(self, k, M, n, N): - """More precise calculation, 1 - cdf doesn't cut it.""" - # This for loop is needed because `k` can be an array. If that's the - # case, the sf() method makes M, n and N arrays of the same shape. We - # therefore unpack all inputs args, so we can do the manual integration. - res = [] - for quant, tot, good, draw in zip(k, M, n, N): - # Manual integration over probability mass function. More accurate - # than integrate.quad. - k2 = np.arange(quant + 1, draw + 1) - res.append(np.sum(self._pmf(k2, tot, good, draw))) - return np.asarray(res) - -hypergeom = hypergeom_gen(name='hypergeom', shapes="M, n, N") - - -## Logarithmic (Log-Series), (Series) distribution -# FIXME: Fails _cdfvec -class logser_gen(rv_discrete): - def _rvs(self, pr): - # looks wrong for pr>0.5, too few k=1 - # trying to use generic is worse, no k=1 at all - return mtrand.logseries(pr,size=self._size) - def _argcheck(self, pr): - return (pr > 0) & (pr < 1) - def _pmf(self, k, pr): - return -pr**k * 1.0 / k / log(1-pr) - def _stats(self, pr): - r = log(1-pr) - mu = pr / (pr - 1.0) / r - mu2p = -pr / r / (pr-1.0)**2 - var = mu2p - mu*mu - mu3p = -pr / r * (1.0+pr) / (1.0-pr)**3 - mu3 = mu3p - 3*mu*mu2p + 2*mu**3 - g1 = mu3 / var**1.5 - - mu4p = -pr / r * (1.0/(pr-1)**2 - 6*pr/(pr-1)**3 + \ - 6*pr*pr / (pr-1)**4) - mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 - g2 = mu4 / var**2 - 3.0 - return mu, var, g1, g2 -logser = logser_gen(a=1,name='logser', longname='A logarithmic', - shapes='pr', extradoc=""" - -Logarithmic (Log-Series, Series) distribution - -logser.pmf(k,p) = - p**k / (k*log(1-p)) -for k >= 1 -""" - ) - -## Poisson distribution - -class poisson_gen(rv_discrete): - def _rvs(self, mu): - return mtrand.poisson(mu, self._size) - def _pmf(self, k, mu): - Pk = k*log(mu)-gamln(k+1) - mu - return exp(Pk) - def _cdf(self, x, mu): - k = floor(x) - return special.pdtr(k,mu) - def _sf(self, x, mu): - k = floor(x) - return special.pdtrc(k,mu) - def _ppf(self, q, mu): - vals = ceil(special.pdtrik(q,mu)) - vals1 = vals-1 - temp = special.pdtr(vals1,mu) - return where((temp >= q), vals1, vals) - def _stats(self, mu): - var = mu - g1 = 1.0/arr(sqrt(mu)) - g2 = 1.0 / arr(mu) - return mu, var, g1, g2 -poisson = poisson_gen(name="poisson", longname='A Poisson', - shapes="mu", extradoc=""" - -Poisson distribution - -poisson.pmf(k, mu) = exp(-mu) * mu**k / k! -for k >= 0 -""" - ) - -## (Planck) Discrete Exponential - -class planck_gen(rv_discrete): - def _argcheck(self, lambda_): - if (lambda_ > 0): - self.a = 0 - self.b = inf - return 1 - elif (lambda_ < 0): - self.a = -inf - self.b = 0 - return 1 - return 0 # lambda_ = 0 - def _pmf(self, k, lambda_): - fact = (1-exp(-lambda_)) - return fact*exp(-lambda_*k) - def _cdf(self, x, lambda_): - k = floor(x) - return 1-exp(-lambda_*(k+1)) - def _ppf(self, q, lambda_): - vals = ceil(-1.0/lambda_ * log1p(-q)-1) - vals1 = (vals-1).clip(self.a, np.inf) - temp = self._cdf(vals1, lambda_) - return where(temp >= q, vals1, vals) - def _stats(self, lambda_): - mu = 1/(exp(lambda_)-1) - var = exp(-lambda_)/(expm1(-lambda_))**2 - g1 = 2*cosh(lambda_/2.0) - g2 = 4+2*cosh(lambda_) - return mu, var, g1, g2 - def _entropy(self, lambda_): - l = lambda_ - C = (1-exp(-l)) - return l*exp(-l)/C - log(C) -planck = planck_gen(name='planck',longname='A discrete exponential ', - shapes="lamda", - extradoc=""" - -Planck (Discrete Exponential) - -planck.pmf(k,b) = (1-exp(-b))*exp(-b*k) -for k*b >= 0 -""" - ) - -class boltzmann_gen(rv_discrete): - def _pmf(self, k, lambda_, N): - fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) - return fact*exp(-lambda_*k) - def _cdf(self, x, lambda_, N): - k = floor(x) - return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) - def _ppf(self, q, lambda_, N): - qnew = q*(1-exp(-lambda_*N)) - vals = ceil(-1.0/lambda_ * log(1-qnew)-1) - vals1 = (vals-1).clip(0.0, np.inf) - temp = self._cdf(vals1, lambda_, N) - return where(temp >= q, vals1, vals) - def _stats(self, lambda_, N): - z = exp(-lambda_) - zN = exp(-lambda_*N) - mu = z/(1.0-z)-N*zN/(1-zN) - var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 - trm = (1-zN)/(1-z) - trm2 = (z*trm**2 - N*N*zN) - g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) - g1 = g1 / trm2**(1.5) - g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) - g2 = g2 / trm2 / trm2 - return mu, var, g1, g2 - -boltzmann = boltzmann_gen(name='boltzmann',longname='A truncated discrete exponential ', - shapes="lamda, N", - extradoc=""" - -Boltzmann (Truncated Discrete Exponential) - -boltzmann.pmf(k,b,N) = (1-exp(-b))*exp(-b*k)/(1-exp(-b*N)) -for k=0,..,N-1 -""" - ) - - - - -## Discrete Uniform - -class randint_gen(rv_discrete): - def _argcheck(self, min, max): - self.a = min - self.b = max-1 - return (max > min) - def _pmf(self, k, min, max): - fact = 1.0 / (max - min) - return fact - def _cdf(self, x, min, max): - k = floor(x) - return (k-min+1)*1.0/(max-min) - def _ppf(self, q, min, max): - vals = ceil(q*(max-min)+min)-1 - vals1 = (vals-1).clip(min, max) - temp = self._cdf(vals1, min, max) - return where(temp >= q, vals1, vals) - def _stats(self, min, max): - m2, m1 = arr(max), arr(min) - mu = (m2 + m1 - 1.0) / 2 - d = m2 - m1 - var = (d-1)*(d+1.0)/12.0 - g1 = 0.0 - g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0) - return mu, var, g1, g2 - def _rvs(self, min, max=None): - """An array of *size* random integers >= min and < max. - - If max is None, then range is >=0 and < min - """ - return mtrand.randint(min, max, self._size) - - def _entropy(self, min, max): - return log(max-min) -randint = randint_gen(name='randint',longname='A discrete uniform '\ - '(random integer)', shapes="min, max", - extradoc=""" - -Discrete Uniform - - Random integers >=min and 1 - def _pmf(self, k, a): - Pk = 1.0 / arr(special.zeta(a,1) * k**a) - return Pk - def _munp(self, n, a): - return special.zeta(a-n,1) / special.zeta(a,1) - def _stats(self, a): - sv = errp(0) - fac = arr(special.zeta(a,1)) - mu = special.zeta(a-1.0,1)/fac - mu2p = special.zeta(a-2.0,1)/fac - var = mu2p - mu*mu - mu3p = special.zeta(a-3.0,1)/fac - mu3 = mu3p - 3*mu*mu2p + 2*mu**3 - g1 = mu3 / arr(var**1.5) - - mu4p = special.zeta(a-4.0,1)/fac - sv = errp(sv) - mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 - g2 = mu4 / arr(var**2) - 3.0 - return mu, var, g1, g2 -zipf = zipf_gen(a=1,name='zipf', longname='A Zipf', - shapes="a", extradoc=""" - -Zipf distribution - -zipf.pmf(k,a) = 1/(zeta(a)*k**a) -for k >= 1 -""" - ) - - -# Discrete Laplacian - -class dlaplace_gen(rv_discrete): - def _pmf(self, k, a): - return tanh(a/2.0)*exp(-a*abs(k)) - def _cdf(self, x, a): - k = floor(x) - ind = (k >= 0) - const = exp(a)+1 - return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const) - def _ppf(self, q, a): - const = 1.0/(1+exp(-a)) - cons2 = 1+exp(a) - ind = q < const - vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a)) - vals1 = (vals-1) - temp = self._cdf(vals1, a) - return where(temp >= q, vals1, vals) - - def _stats_skip(self, a): - # variance mu2 does not aggree with sample variance, - # nor with direct calculation using pmf - # remove for now because generic calculation works - # except it does not show nice zeros for mean and skew(?) - ea = exp(-a) - e2a = exp(-2*a) - e3a = exp(-3*a) - e4a = exp(-4*a) - mu2 = 2* (e2a + ea) / (1-ea)**3.0 - mu4 = 2* (e4a + 11*e3a + 11*e2a + ea) / (1-ea)**5.0 - return 0.0, mu2, 0.0, mu4 / mu2**2.0 - 3 - def _entropy(self, a): - return a / sinh(a) - log(tanh(a/2.0)) -dlaplace = dlaplace_gen(a=-inf, - name='dlaplace', longname='A discrete Laplacian', - shapes="a", extradoc=""" - -Discrete Laplacian distribution. - -dlaplace.pmf(k,a) = tanh(a/2) * exp(-a*abs(k)) -for a > 0. -""" - ) - - -class skellam_gen(rv_discrete): - def _rvs(self, mu1, mu2): - n = self._size - return np.random.poisson(mu1, n)-np.random.poisson(mu2, n) - def _pmf(self, x, mu1, mu2): - px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2, - ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2) - #ncx2.pdf() returns nan's for extremely low probabilities - return px - def _cdf(self, x, mu1, mu2): - x = np.floor(x) - px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1), - 1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2)) - return px - -# enable later -## def _cf(self, w, mu1, mu2): -## # characteristic function -## poisscf = poisson._cf -## return poisscf(w, mu1) * poisscf(-w, mu2) - - def _stats(self, mu1, mu2): - mean = mu1 - mu2 - var = mu1 + mu2 - g1 = mean / np.sqrt((var)**3) - g2 = 1 / var - return mean, var, g1, g2 -skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam', - shapes="mu1,mu2", extradoc=""" - -Skellam distribution - - Probability distribution of the difference of two correlated or - uncorrelated Poisson random variables. - - Let k1 and k2 be two Poisson-distributed r.v. with expected values - lam1 and lam2. Then, k1-k2 follows a Skellam distribution with - parameters mu1 = lam1 - rho*sqrt(lam1*lam2) and - mu2 = lam2 - rho*sqrt(lam1*lam2), where rho is the correlation - coefficient between k1 and k2. If the two Poisson-distributed r.v. - are independent then rho = 0. - - Parameters mu1 and mu2 must be strictly positive. - - For details see: http://en.wikipedia.org/wiki/Skellam_distribution - -""" - ) diff --git a/scipy-0.10.1/scipy/stats/futil.f b/scipy-0.10.1/scipy/stats/futil.f deleted file mode 100644 index a82396d7dc..0000000000 --- a/scipy-0.10.1/scipy/stats/futil.f +++ /dev/null @@ -1,130 +0,0 @@ -C Sorts an array arr(1:N) into ascending numerical order -C using the QuickSort algorithm. On output arr is replaced with its -C sorted rearrangement. - SUBROUTINE DQSORT(N,ARR) -CF2PY INTENT(IN,OUT,COPY) ARR -CF2PY INTEGER, INTENT(HIDE), DEPEND(ARR) :: N=len(ARR) - INTEGER N,M,NSTACK - REAL*8 ARR(N) - PARAMETER (M=7, NSTACK=100) - INTEGER I, IR, J, JSTACK, K, L, ISTACK(NSTACK) - REAL*8 A, TEMP - - JSTACK = 0 - L = 1 - IR = N - 1 IF(IR-L.LT.M)THEN - DO J=L+1,IR - A = ARR(J) - DO I = J-1,L,-1 - IF (ARR(I).LE.A) GOTO 2 - ARR(I+1)=ARR(I) - ENDDO - I = L-1 - 2 ARR(I+1) = A - ENDDO - - IF(JSTACK.EQ.0)RETURN - IR=ISTACK(JSTACK) - L=ISTACK(JSTACK-1) - JSTACK = JSTACK - 2 - - ELSE - K = (L+IR)/2 - TEMP = ARR(K) - ARR(K) = ARR(L+1) - ARR(L+1) = TEMP - IF(ARR(L).GT.ARR(IR))THEN - TEMP = ARR(L) - ARR(L) = ARR(IR) - ARR(IR) = TEMP - ENDIF - IF(ARR(L+1).GT.ARR(IR))THEN - TEMP=ARR(L+1) - ARR(L+1)=ARR(IR) - ARR(IR)=TEMP - ENDIF - IF(ARR(L).GT.ARR(L+1))THEN - TEMP=ARR(L) - ARR(L) = ARR(L+1) - ARR(L+1) = TEMP - ENDIF - - I=L+1 - J=IR - A=ARR(L+1) - 3 CONTINUE - I=I+1 - IF(ARR(I).LT.A)GOTO 3 - 4 CONTINUE - J=J-1 - IF(ARR(J).GT.A)GOTO 4 - IF(J.LT.I)GOTO 5 - TEMP = ARR(I) - ARR(I) = ARR(J) - ARR(J) = TEMP - GOTO 3 - 5 ARR(L+1) = ARR(J) - ARR(J) = A - JSTACK = JSTACK + 2 - IF(JSTACK.GT.NSTACK)RETURN - IF(IR-I+1.GE.J-1)THEN - ISTACK(JSTACK)=IR - ISTACK(JSTACK-1)=I - IR=J-1 - ELSE - ISTACK(JSTACK)=J-1 - ISTACK(JSTACK-1)=L - L=I - ENDIF - ENDIF - GOTO 1 - END - -C Finds repeated elements of ARR and their occurrence incidence -C reporting the result in REPLIST and REPNUM respectively. -C NLIST is the number of repeated elements found. -C Algorithm first sorts the list and then walks down it -C counting repeats as they are found. - SUBROUTINE DFREPS(ARR,N,REPLIST,REPNUM,NLIST) -CF2PY INTENT(IN) ARR -CF2PY INTENT(OUT) REPLIST -CF2PY INTENT(OUT) REPNUM -CF2PY INTENT(OUT) NLIST -CF2PY INTEGER, INTENT(HIDE), DEPEND(ARR) :: N=len(ARR) - REAL*8 REPLIST(N), ARR(N) - REAL*8 LASTVAL - INTEGER REPNUM(N) - INTEGER HOWMANY, REPEAT, IND, NLIST, NNUM - - CALL DQSORT(N,ARR) - LASTVAL = ARR(1) - HOWMANY = 0 - IND = 2 - NNUM = 1 - NLIST = 1 - REPEAT = 0 - DO WHILE(IND.LE.N) - IF(ARR(IND).NE.LASTVAL)THEN - IF (REPEAT.EQ.1)THEN - REPNUM(NNUM)=HOWMANY+1 - NNUM=NNUM+1 - REPEAT=0 - HOWMANY=0 - ENDIF - ELSE - HOWMANY=HOWMANY+1 - REPEAT=1 - IF(HOWMANY.EQ.1)THEN - REPLIST(NLIST)=ARR(IND) - NLIST=NLIST+1 - ENDIF - ENDIF - LASTVAL=ARR(IND) - IND=IND+1 - ENDDO - IF(REPEAT.EQ.1)THEN - REPNUM(NNUM)=HOWMANY+1 - ENDIF - NLIST = NLIST - 1 - END diff --git a/scipy-0.10.1/scipy/stats/kde.py b/scipy-0.10.1/scipy/stats/kde.py deleted file mode 100644 index 11810f5934..0000000000 --- a/scipy-0.10.1/scipy/stats/kde.py +++ /dev/null @@ -1,340 +0,0 @@ -#------------------------------------------------------------------------------- -# -# Define classes for (uni/multi)-variate kernel density estimation. -# -# Currently, only Gaussian kernels are implemented. -# -# Written by: Robert Kern -# -# Date: 2004-08-09 -# -# Modified: 2005-02-10 by Robert Kern. -# Contributed to Scipy -# 2005-10-07 by Robert Kern. -# Some fixes to match the new scipy_core -# -# Copyright 2004-2005 by Enthought, Inc. -# -#------------------------------------------------------------------------------- - -# Standard library imports. -import warnings - -# Scipy imports. -from scipy import linalg, special -from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \ - ravel, power, atleast_1d, squeeze, sum, transpose -import numpy as np -from numpy.random import randint, multivariate_normal - -# Local imports. -import stats -import mvn - -__all__ = ['gaussian_kde'] - - -class gaussian_kde(object): - """ - Representation of a kernel-density estimate using Gaussian kernels. - - - - Attributes - ---------- - d : int - number of dimensions - n : int - number of datapoints - - Methods - ------- - kde.evaluate(points) : array - evaluate the estimated pdf on a provided set of points - kde(points) : array - same as kde.evaluate(points) - kde.integrate_gaussian(mean, cov) : float - multiply pdf with a specified Gaussian and integrate over the whole - domain - kde.integrate_box_1d(low, high) : float - integrate pdf (1D only) between two bounds - kde.integrate_box(low_bounds, high_bounds) : float - integrate pdf over a rectangular space between low_bounds and - high_bounds - kde.integrate_kde(other_kde) : float - integrate two kernel density estimates multiplied together - kde.resample(size=None) : array - randomly sample a dataset from the estimated pdf. - kde.covariance_factor() : float - computes the coefficient that multiplies the data covariance matrix to - obtain the kernel covariance matrix. Set this method to - kde.scotts_factor or kde.silverman_factor (or subclass to provide your - own). The default is scotts_factor. - - Parameters - ---------- - dataset : (# of dims, # of data)-array - datapoints to estimate from - - """ - - def __init__(self, dataset): - self.dataset = atleast_2d(dataset) - - self.d, self.n = self.dataset.shape - - self._compute_covariance() - - - def evaluate(self, points): - """Evaluate the estimated pdf on a set of points. - - Parameters - ---------- - points : (# of dimensions, # of points)-array - Alternatively, a (# of dimensions,) vector can be passed in and - treated as a single point. - - Returns - ------- - values : (# of points,)-array - The values at each point. - - Raises - ------ - ValueError if the dimensionality of the input points is different than - the dimensionality of the KDE. - """ - - points = atleast_2d(points).astype(self.dataset.dtype) - - d, m = points.shape - if d != self.d: - if d == 1 and m == self.d: - # points was passed in as a row vector - points = reshape(points, (self.d, 1)) - m = 1 - else: - msg = "points have dimension %s, dataset has dimension %s" % (d, - self.d) - raise ValueError(msg) - - result = zeros((m,), points.dtype) - - if m >= self.n: - # there are more points than data, so loop over data - for i in range(self.n): - diff = self.dataset[:,i,newaxis] - points - tdiff = dot(self.inv_cov, diff) - energy = sum(diff*tdiff,axis=0)/2.0 - result += exp(-energy) - else: - # loop over points - for i in range(m): - diff = self.dataset - points[:,i,newaxis] - tdiff = dot(self.inv_cov, diff) - energy = sum(diff*tdiff,axis=0)/2.0 - result[i] = sum(exp(-energy),axis=0) - - result /= self._norm_factor - - return result - - __call__ = evaluate - - def integrate_gaussian(self, mean, cov): - """Multiply estimated density by a multivariate Gaussian and integrate - over the wholespace. - - Parameters - ---------- - mean : vector - the mean of the Gaussian - cov : matrix - the covariance matrix of the Gaussian - - Returns - ------- - result : scalar - the value of the integral - - Raises - ------ - ValueError if the mean or covariance of the input Gaussian differs from - the KDE's dimensionality. - """ - - mean = atleast_1d(squeeze(mean)) - cov = atleast_2d(cov) - - if mean.shape != (self.d,): - raise ValueError("mean does not have dimension %s" % self.d) - if cov.shape != (self.d, self.d): - raise ValueError("covariance does not have dimension %s" % self.d) - - # make mean a column vector - mean = mean[:,newaxis] - - sum_cov = self.covariance + cov - - diff = self.dataset - mean - tdiff = dot(linalg.inv(sum_cov), diff) - - energies = sum(diff*tdiff,axis=0)/2.0 - result = sum(exp(-energies),axis=0)/sqrt(linalg.det(2*pi*sum_cov))/self.n - - return result - - def integrate_box_1d(self, low, high): - """Computes the integral of a 1D pdf between two bounds. - - Parameters - ---------- - low : scalar - lower bound of integration - high : scalar - upper bound of integration - - Returns - ------- - value : scalar - the result of the integral - - Raises - ------ - ValueError if the KDE is over more than one dimension. - """ - if self.d != 1: - raise ValueError("integrate_box_1d() only handles 1D pdfs") - - stdev = ravel(sqrt(self.covariance))[0] - - normalized_low = ravel((low - self.dataset)/stdev) - normalized_high = ravel((high - self.dataset)/stdev) - - value = np.mean(special.ndtr(normalized_high) - - special.ndtr(normalized_low)) - return value - - - def integrate_box(self, low_bounds, high_bounds, maxpts=None): - """Computes the integral of a pdf over a rectangular interval. - - Parameters - ---------- - low_bounds : vector - lower bounds of integration - high_bounds : vector - upper bounds of integration - maxpts=None : int - maximum number of points to use for integration - - Returns - ------- - value : scalar - the result of the integral - """ - if maxpts is not None: - extra_kwds = {'maxpts': maxpts} - else: - extra_kwds = {} - - value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset, - self.covariance, **extra_kwds) - if inform: - msg = ('an integral in mvn.mvnun requires more points than %s' % - (self.d*1000)) - warnings.warn(msg) - - return value - - def integrate_kde(self, other): - """Computes the integral of the product of this kernel density estimate - with another. - - Parameters - ---------- - other : gaussian_kde instance - the other kde - - Returns - ------- - value : scalar - the result of the integral - - Raises - ------ - ValueError if the KDEs have different dimensionality. - """ - - if other.d != self.d: - raise ValueError("KDEs are not the same dimensionality") - - # we want to iterate over the smallest number of points - if other.n < self.n: - small = other - large = self - else: - small = self - large = other - - sum_cov = small.covariance + large.covariance - result = 0.0 - for i in range(small.n): - mean = small.dataset[:,i,newaxis] - diff = large.dataset - mean - tdiff = dot(linalg.inv(sum_cov), diff) - - energies = sum(diff*tdiff,axis=0)/2.0 - result += sum(exp(-energies),axis=0) - - result /= sqrt(linalg.det(2*pi*sum_cov))*large.n*small.n - - return result - - def resample(self, size=None): - """Randomly sample a dataset from the estimated pdf. - - Parameters - ---------- - size : int, optional - The number of samples to draw. - If not provided, then the size is the same as the underlying - dataset. - - Returns - ------- - dataset : (self.d, size)-array - sampled dataset - """ - - if size is None: - size = self.n - - norm = transpose(multivariate_normal(zeros((self.d,), float), - self.covariance, size=size)) - indices = randint(0, self.n, size=size) - means = self.dataset[:,indices] - - return means + norm - - - def scotts_factor(self): - return power(self.n, -1./(self.d+4)) - - def silverman_factor(self): - return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4)) - - # This can be replaced with silverman_factor if one wants to use Silverman's - # rule for choosing the bandwidth of the kernels. - covariance_factor = scotts_factor - - def _compute_covariance(self): - """Computes the covariance matrix for each Gaussian kernel using - covariance_factor - """ - self.factor = self.covariance_factor() - self.covariance = atleast_2d(np.cov(self.dataset, rowvar=1, bias=False) * - self.factor * self.factor) - self.inv_cov = linalg.inv(self.covariance) - self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n diff --git a/scipy-0.10.1/scipy/stats/morestats.py b/scipy-0.10.1/scipy/stats/morestats.py deleted file mode 100644 index bc347cc0f1..0000000000 --- a/scipy-0.10.1/scipy/stats/morestats.py +++ /dev/null @@ -1,1430 +0,0 @@ -# Author: Travis Oliphant, 2002 -# -# Further updates and enhancements by many SciPy developers. -# - -import math -import statlib -import stats -from stats import find_repeats -import distributions -from numpy import isscalar, r_, log, sum, around, unique, asarray -from numpy import zeros, arange, sort, amin, amax, any, where, \ - atleast_1d, sqrt, ceil, floor, array, poly1d, compress, not_equal, \ - pi, exp, ravel, angle -import scipy -import numpy as np -import types -import scipy.optimize as optimize -import scipy.special as special -import futil -from numpy.testing.decorators import setastest -import warnings - -__all__ = ['mvsdist', - 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', - 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', - 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', - 'fligner', 'mood', 'oneway', 'wilcoxon', - 'pdf_fromgamma', 'circmean', 'circvar', 'circstd', - ] - - -def bayes_mvs(data, alpha=0.90): - """Bayesian confidence intervals for the mean, var, and std. - - Parameters - ---------- - data : array_like - Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. - Requires 2 or more data points. - alpha : float, optional - Probability that the returned confidence interval contains - the true parameter. - - Returns - ------- - Returns a 3 output arguments for each of mean, variance, and standard deviation. - Each of the outputs is a pair: - (center, (lower, upper)) - with center the mean of the conditional pdf of the value given the data - and (lower, upper) is a confidence interval centered on the median, - containing the estimate to a probability alpha. - - mctr, (ma, mb) : - Estimates for mean - vctr, (va, vb) : - Estimates for variance - sctr, (sa, sb) : - Estimates for standard deviation - - Notes - ----- - Converts data to 1-D and assumes all data has the same mean and variance. - Uses Jeffrey's prior for variance and std. - - Equivalent to tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat)) - - References - ---------- - T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and - standard-deviation from data", http://hdl.handle.net/1877/438, 2006. - - """ - res = mvsdist(data) - if alpha >= 1 or alpha <= 0: - raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha) - return tuple((x.mean(), x.interval(alpha)) for x in res) - -def mvsdist(data): - """ - 'Frozen' distributions for mean, variance, and standard deviation of data. - - Parameters - ---------- - data : array_like - Input array. Converted to 1-D using ravel. - Requires 2 or more data-points. - - Returns - ------- - mdist : "frozen" distribution object - Distribution object representing the mean of the data - vdist : "frozen" distribution object - Distribution object representing the variance of the data - sdist : "frozen" distribution object - Distribution object representing the standard deviation of the data - - Notes - ----- - The return values from bayes_mvs(data) is equivalent to - ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. - - In other words, calling ``.mean()`` and ``.interval(0.90)`` - on the three distribution objects returned from this function will give - the same results that are returned from `bayes_mvs`. - - Examples - -------- - >>> from scipy.stats import mvsdist - >>> data = [6, 9, 12, 7, 8, 8, 13] - >>> mean, var, std = mvsdist(data) - - We now have frozen distribution objects "mean", "var" and "std" that we can - examine: - - >>> mean.mean() - 9.0 - >>> mean.interval(0.95) - (6.6120585482655692, 11.387941451734431) - >>> mean.std() - 1.1952286093343936 - - """ - x = ravel(data) - n = len(x) - if (n < 2): - raise ValueError("Need at least 2 data-points.") - xbar = x.mean() - C = x.var() - if (n > 1000): # gaussian approximations for large n - mdist = distributions.norm(loc=xbar, scale=math.sqrt(C/n)) - sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C/(2.*n))) - vdist = distributions.norm(loc=C, scale=math.sqrt(2.0/n)*C) - else: - nm1 = n-1 - fac = n*C/2. - val = nm1/2. - mdist = distributions.t(nm1,loc=xbar,scale=math.sqrt(C/nm1)) - sdist = distributions.gengamma(val,-2,scale=math.sqrt(fac)) - vdist = distributions.invgamma(val,scale=fac) - return mdist, vdist, sdist - - -def kstat(data,n=2): - """ - Return the nth k-statistic (1<=n<=4 so far). - - The nth k-statistic is the unique symmetric unbiased estimator of the nth - cumulant kappa_n. - - Parameters - ---------- - data : array_like - Input array. - n : int, {1, 2, 3, 4}, optional - Default is equal to 2. - - Returns - ------- - kstat : float - The nth k-statistic. - - See Also - -------- - kstatvar: Returns an unbiased estimator of the variance of the k-statistic. - - Notes - ----- - The cumulants are related to central moments but are specifically defined - using a power series expansion of the logarithm of the characteristic - function (which is the Fourier transform of the PDF). - In particular let phi(t) be the characteristic function, then:: - - ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf) - - The first few cumulants (kappa_n) in terms of central moments (mu_n) are:: - - kappa_1 = mu_1 - kappa_2 = mu_2 - kappa_3 = mu_3 - kappa_4 = mu_4 - 3*mu_2**2 - kappa_5 = mu_5 - 10*mu_2 * mu_3 - - References - ---------- - http://mathworld.wolfram.com/k-Statistic.html - - http://mathworld.wolfram.com/Cumulant.html - - """ - if n > 4 or n < 1: - raise ValueError("k-statistics only supported for 1<=n<=4") - n = int(n) - S = zeros(n+1,'d') - data = ravel(data) - N = len(data) - for k in range(1,n+1): - S[k] = sum(data**k,axis=0) - if n==1: - return S[1]*1.0/N - elif n==2: - return (N*S[2]-S[1]**2.0)/(N*(N-1.0)) - elif n==3: - return (2*S[1]**3 - 3*N*S[1]*S[2]+N*N*S[3]) / (N*(N-1.0)*(N-2.0)) - elif n==4: - return (-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - \ - 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / \ - (N*(N-1.0)*(N-2.0)*(N-3.0)) - else: - raise ValueError("Should not be here.") - -def kstatvar(data,n=2): - """ - Returns an unbiased estimator of the variance of the k-statistic. - - See `kstat` for more details of the k-statistic. - - Parameters - ---------- - data : array_like - Input array. - n : int, {1, 2}, optional - Default is equal to 2. - - Returns - ------- - kstatvar : float - The nth k-statistic variance. - - See Also - -------- - kstat - - """ - data = ravel(data) - N = len(data) - if n == 1: - return kstat(data,n=2)*1.0/N - elif n == 2: - k2 = kstat(data,n=2) - k4 = kstat(data,n=4) - return (2*k2*k2*N + (N-1)*k4)/(N*(N+1)) - else: - raise ValueError("Only n=1 or n=2 supported.") - - -def probplot(x, sparams=(), dist='norm', fit=True, plot=None): - """ - Calculate quantiles for a probability plot of sample data against a - specified theoretical distribution. - - `probplot` optionally calculates a best-fit line for the data and plots the - results using Matplotlib or a given plot function. - - Parameters - ---------- - x : array_like - Sample/response data from which `probplot` creates the plot. - sparams : tuple, optional - Distribution-specific shape parameters (location(s) and scale(s)). - dist : str, optional - Distribution function name. The default is 'norm' for a normal - probability plot. - fit : bool, optional - Fit a least-squares regression (best-fit) line to the sample data if - True (default). - plot : object, optional - If given, plots the quantiles and least squares fit. - `plot` is an object with methods "plot", "title", "xlabel", "ylabel" - and "text". The matplotlib.pyplot module or a Matplotlib axes object can - be used, or a custom object with the same methods. - By default, no plot is created. - - Notes - ----- - Even if `plot` is given, the figure is not shown or saved by `probplot`; - ``plot.show()`` or ``plot.savefig('figname.png')`` should be used after - calling `probplot`. - - Returns - ------- - (osm, osr) : tuple of ndarrays - Tuple of theoretical quantiles (osm, or order statistic medians) and - ordered responses (osr). - (slope, intercept, r) : tuple of floats, optional - Tuple containing the result of the least-squares fit, if that is - performed by `probplot`. `r` is the square root of the coefficient of - determination. If ``fit=False`` and ``plot=None``, this tuple is not - returned. - - Examples - -------- - >>> import scipy.stats as stats - >>> nsample = 100 - >>> np.random.seed(7654321) - - A t distribution with small degrees of freedom: - - >>> ax1 = plt.subplot(221) - >>> x = stats.t.rvs(3, size=nsample) - >>> res = stats.probplot(x, plot=plt) - - A t distribution with larger degrees of freedom: - - >>> ax2 = plt.subplot(222) - >>> x = stats.t.rvs(25, size=nsample) - >>> res = stats.probplot(x, plot=plt) - - A mixture of 2 normal distributions with broadcasting: - - >>> ax3 = plt.subplot(223) - >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], size=(nsample/2.,2)).ravel() - >>> res = stats.probplot(x, plot=plt) - - A standard normal distribution: - - >>> ax4 = plt.subplot(224) - >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample) - >>> res = stats.probplot(x, plot=plt) - - """ - N = len(x) - Ui = zeros(N) * 1.0 - Ui[-1] = 0.5**(1.0 /N) - Ui[0] = 1 - Ui[-1] - i = arange(2, N) - Ui[1:-1] = (i - 0.3175) / (N + 0.365) - try: - ppf_func = eval('distributions.%s.ppf' % dist) - except AttributeError: - raise ValueError("%s is not a valid distribution with a ppf." % dist) - if sparams is None: - sparams = () - if isscalar(sparams): - sparams = (sparams,) - if not isinstance(sparams, types.TupleType): - sparams = tuple(sparams) - """ - res = inspect.getargspec(ppf_func) - if not ('loc' == res[0][-2] and 'scale' == res[0][-1] and \ - 0.0==res[-1][-2] and 1.0==res[-1][-1]): - raise ValueError("Function has does not have default location " - "and scale parameters\n that are 0.0 and 1.0 respectively.") - if (len(sparams) < len(res[0])-len(res[-1])-1) or \ - (len(sparams) > len(res[0])-3): - raise ValueError("Incorrect number of shape parameters.") - """ - osm = ppf_func(Ui, *sparams) - osr = sort(x) - if fit or (plot is not None): - # perform a linear fit. - slope, intercept, r, prob, sterrest = stats.linregress(osm, osr) - if plot is not None: - plot.plot(osm, osr, 'o', osm, slope*osm + intercept) - plot.title('Probability Plot') - plot.xlabel('Quantiles') - plot.ylabel('Ordered Values') - - xmin = amin(osm) - xmax = amax(osm) - ymin = amin(x) - ymax = amax(x) - posx = xmin + 0.70 * (xmax - xmin) - posy = ymin + 0.01 * (ymax - ymin) - plot.text(posx, posy, "r^2=%1.4f" % r) - if fit: - return (osm, osr), (slope, intercept, r) - else: - return osm, osr - -def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'): - """Returns the shape parameter that maximizes the probability plot - correlation coefficient for the given data to a one-parameter - family of distributions. - - See also ppcc_plot - """ - try: - ppf_func = eval('distributions.%s.ppf'%dist) - except AttributeError: - raise ValueError("%s is not a valid distribution with a ppf." % dist) - """ - res = inspect.getargspec(ppf_func) - if not ('loc' == res[0][-2] and 'scale' == res[0][-1] and \ - 0.0==res[-1][-2] and 1.0==res[-1][-1]): - raise ValueError("Function has does not have default location " - "and scale parameters\n that are 0.0 and 1.0 respectively.") - if (1 < len(res[0])-len(res[-1])-1) or \ - (1 > len(res[0])-3): - raise ValueError("Must be a one-parameter family.") - """ - N = len(x) - # compute uniform median statistics - Ui = zeros(N)*1.0 - Ui[-1] = 0.5**(1.0/N) - Ui[0] = 1-Ui[-1] - i = arange(2,N) - Ui[1:-1] = (i-0.3175)/(N+0.365) - osr = sort(x) - # this function computes the x-axis values of the probability plot - # and computes a linear regression (including the correlation) - # and returns 1-r so that a minimization function maximizes the - # correlation - def tempfunc(shape, mi, yvals, func): - xvals = func(mi, shape) - r, prob = stats.pearsonr(xvals, yvals) - return 1-r - return optimize.brent(tempfunc, brack=brack, args=(Ui, osr, ppf_func)) - -def ppcc_plot(x,a,b,dist='tukeylambda', plot=None, N=80): - """Returns (shape, ppcc), and optionally plots shape vs. ppcc - (probability plot correlation coefficient) as a function of shape - parameter for a one-parameter family of distributions from shape - value a to b. - - See also ppcc_max - """ - svals = r_[a:b:complex(N)] - ppcc = svals*0.0 - k=0 - for sval in svals: - r1,r2 = probplot(x,sval,dist=dist,fit=1) - ppcc[k] = r2[-1] - k += 1 - if plot is not None: - plot.plot(svals, ppcc, 'x') - plot.title('(%s) PPCC Plot' % dist) - plot.xlabel('Prob Plot Corr. Coef.')#,deltay=-0.01) - plot.ylabel('Shape Values')#,deltax=-0.01) - return svals, ppcc - -def boxcox_llf(lmb, data): - """The boxcox log-likelihood function. - """ - N = len(data) - y = boxcox(data,lmb) - my = np.mean(y, axis=0) - f = (lmb-1)*sum(log(data),axis=0) - f -= N/2.0*log(sum((y-my)**2.0/N,axis=0)) - return f - -def _boxcox_conf_interval(x, lmax, alpha): - # Need to find the lambda for which - # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 - fac = 0.5*distributions.chi2.ppf(1-alpha,1) - target = boxcox_llf(lmax,x)-fac - def rootfunc(lmbda,data,target): - return boxcox_llf(lmbda,data) - target - # Find positive endpont - newlm = lmax+0.5 - N = 0 - while (rootfunc(newlm,x,target) > 0.0) and (N < 500): - newlm += 0.1 - N +=1 - if N == 500: - raise RuntimeError("Could not find endpoint.") - lmplus = optimize.brentq(rootfunc,lmax,newlm,args=(x,target)) - newlm = lmax-0.5 - N = 0 - while (rootfunc(newlm,x,target) > 0.0) and (N < 500): - newlm += 0.1 - N +=1 - if N == 500: - raise RuntimeError("Could not find endpoint.") - lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x,target)) - return lmminus, lmplus - -def boxcox(x,lmbda=None,alpha=None): - """Return a positive dataset tranformed by a Box-Cox power transformation. - - If lmbda is not None, do the transformation for that value. - - If lmbda is None, find the lambda that maximizes the log-likelihood - function and return it as the second output argument. - - If alpha is not None, return the 100(1-alpha)% confidence interval for - lambda as the third output argument. - """ - if any(x < 0): - raise ValueError("Data must be positive.") - if lmbda is not None: # single transformation - lmbda = lmbda*(x==x) - y = where(lmbda == 0, log(x), (x**lmbda - 1)/lmbda) - return y - # Otherwise find the lmbda that maximizes the log-likelihood function. - def tempfunc(lmb, data): # function to minimize - return -boxcox_llf(lmb,data) - lmax = optimize.brent(tempfunc, brack=(-2.0,2.0),args=(x,)) - y = boxcox(x, lmax) - if alpha is None: - return y, lmax - # Otherwise find confidence interval - interval = _boxcox_conf_interval(x, lmax, alpha) - return y, lmax, interval - - -def boxcox_normmax(x,brack=(-1.0,1.0)): - N = len(x) - # compute uniform median statistics - Ui = zeros(N)*1.0 - Ui[-1] = 0.5**(1.0/N) - Ui[0] = 1-Ui[-1] - i = arange(2,N) - Ui[1:-1] = (i-0.3175)/(N+0.365) - # this function computes the x-axis values of the probability plot - # and computes a linear regression (including the correlation) - # and returns 1-r so that a minimization function maximizes the - # correlation - xvals = distributions.norm.ppf(Ui) - def tempfunc(lmbda, xvals, samps): - y = boxcox(samps,lmbda) - yvals = sort(y) - r, prob = stats.pearsonr(xvals, yvals) - return 1-r - return optimize.brent(tempfunc, brack=brack, args=(xvals, x)) - - -def boxcox_normplot(x,la,lb,plot=None,N=80): - svals = r_[la:lb:complex(N)] - ppcc = svals*0.0 - k = 0 - for sval in svals: - #JP: this doesn't use sval, creates constant ppcc, and horizontal line - z = boxcox(x,sval) #JP: this was missing - r1,r2 = probplot(z,dist='norm',fit=1) - ppcc[k] = r2[-1] - k +=1 - if plot is not None: - plot.plot(svals, ppcc, 'x') - plot.title('Box-Cox Normality Plot') - plot.xlabel('Prob Plot Corr. Coef.') - plot.ylabel('Transformation parameter') - return svals, ppcc - -def shapiro(x,a=None,reta=False): - """ - Perform the Shapiro-Wilk test for normality. - - The Shapiro-Wilk test tests the null hypothesis that the - data was drawn from a normal distribution. - - Parameters - ---------- - x : array_like - Array of sample data. - a : array_like, optional - Array of internal parameters used in the calculation. If these - are not given, they will be computed internally. If x has length - n, then a must have length n/2. - reta : bool, optional - Whether or not to return the internally computed a values. The - default is False. - - Returns - ------- - W : float - The test statistic. - p-value : float - The p-value for the hypothesis test. - a : array_like, optional - If `reta` is True, then these are the internally computed "a" - values that may be passed into this function on future calls. - - See Also - -------- - anderson : The Anderson-Darling test for normality - - References - ---------- - .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm - - """ - N = len(x) - if N < 3: - raise ValueError("Data must be at least length 3.") - if a is None: - a = zeros(N,'f') - init = 0 - else: - if len(a) != N//2: - raise ValueError("len(a) must equal len(x)/2") - init = 1 - y = sort(x) - a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) - if not ifault in [0,2]: - warnings.warn(str(ifault)) - if N > 5000: - warnings.warn("p-value may not be accurate for N > 5000.") - if reta: - return w, pw, a - else: - return w, pw - -# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and -# Some Comparisons", Journal of he American Statistical -# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 -_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) -_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) -# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", -# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. -_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) -# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based -# on the Empirical Distribution Function.", Biometrika, -# Vol. 66, Issue 3, Dec. 1979, pp 591-595. -_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) -def anderson(x,dist='norm'): - """ - Anderson-Darling test for data coming from a particular distribution - - The Anderson-Darling test is a modification of the Kolmogorov- - Smirnov test kstest_ for the null hypothesis that a sample is - drawn from a population that follows a particular distribution. - For the Anderson-Darling test, the critical values depend on - which distribution is being tested against. This function works - for normal, exponential, logistic, or Gumbel (Extreme Value - Type I) distributions. - - Parameters - ---------- - x : array_like - array of sample data - dist : {'norm','expon','logistic','gumbel','extreme1'}, optional - the type of distribution to test against. The default is 'norm' - and 'extreme1' is a synonym for 'gumbel' - - Returns - ------- - A2 : float - The Anderson-Darling test statistic - critical : list - The critical values for this distribution - sig : list - The significance levels for the corresponding critical values - in percents. The function returns critical values for a - differing set of significance levels depending on the - distribution that is being tested against. - - Notes - ----- - Critical values provided are for the following significance levels: - - normal/exponenential - 15%, 10%, 5%, 2.5%, 1% - logistic - 25%, 10%, 5%, 2.5%, 1%, 0.5% - Gumbel - 25%, 10%, 5%, 2.5%, 1% - - If A2 is larger than these critical values then for the corresponding - significance level, the null hypothesis that the data come from the - chosen distribution can be rejected. - - References - ---------- - .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm - .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and - Some Comparisons, Journal of the American Statistical Association, - Vol. 69, pp. 730-737. - .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit - Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, - pp. 357-369. - .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value - Distribution, Biometrika, Vol. 64, pp. 583-588. - .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference - to Tests for Exponentiality , Technical Report No. 262, - Department of Statistics, Stanford University, Stanford, CA. - .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution - Based on the Empirical Distribution Function, Biometrika, Vol. 66, - pp. 591-595. - - """ - if not dist in ['norm','expon','gumbel','extreme1','logistic']: - raise ValueError("Invalid distribution; dist must be 'norm', " - "'expon', 'gumbel', 'extreme1' or 'logistic'.") - y = sort(x) - xbar = np.mean(x, axis=0) - N = len(y) - if dist == 'norm': - s = np.std(x, ddof=1, axis=0) - w = (y-xbar)/s - z = distributions.norm.cdf(w) - sig = array([15,10,5,2.5,1]) - critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N),3) - elif dist == 'expon': - w = y / xbar - z = distributions.expon.cdf(w) - sig = array([15,10,5,2.5,1]) - critical = around(_Avals_expon / (1.0 + 0.6/N),3) - elif dist == 'logistic': - def rootfunc(ab,xj,N): - a,b = ab - tmp = (xj-a)/b - tmp2 = exp(tmp) - val = [sum(1.0/(1+tmp2),axis=0)-0.5*N, - sum(tmp*(1.0-tmp2)/(1+tmp2),axis=0)+N] - return array(val) - sol0=array([xbar,np.std(x, ddof=1, axis=0)]) - sol = optimize.fsolve(rootfunc,sol0,args=(x,N),xtol=1e-5) - w = (y-sol[0])/sol[1] - z = distributions.logistic.cdf(w) - sig = array([25,10,5,2.5,1,0.5]) - critical = around(_Avals_logistic / (1.0+0.25/N),3) - else: # (dist == 'gumbel') or (dist == 'extreme1'): - #the following is incorrect, see ticket:1097 -## def fixedsolve(th,xj,N): -## val = stats.sum(xj)*1.0/N -## tmp = exp(-xj/th) -## term = sum(xj*tmp,axis=0) -## term /= sum(tmp,axis=0) -## return val - term -## s = optimize.fixed_point(fixedsolve, 1.0, args=(x,N),xtol=1e-5) -## xbar = -s*log(sum(exp(-x/s),axis=0)*1.0/N) - xbar, s = distributions.gumbel_l.fit(x) - w = (y-xbar)/s - z = distributions.gumbel_l.cdf(w) - sig = array([25,10,5,2.5,1]) - critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)),3) - - i = arange(1,N+1) - S = sum((2*i-1.0)/N*(log(z)+log(1-z[::-1])),axis=0) - A2 = -N-S - return A2, critical, sig - - -def ansari(x,y): - """ - Perform the Ansari-Bradley test for equal scale parameters - - The Ansari-Bradley test is a non-parametric test for the equality - of the scale parameter of the distributions from which two - samples were drawn. - - Parameters - ---------- - x, y : array_like - arrays of sample data - - Returns - ------- - p-value : float - The p-value of the hypothesis test - - See Also - -------- - fligner : A non-parametric test for the equality of k variances - mood : A non-parametric test for the equality of two scale parameters - - Notes - ----- - The p-value given is exact when the sample sizes are both less than - 55 and there are no ties, otherwise a normal approximation for the - p-value is used. - - References - ---------- - .. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical - methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. - - """ - x,y = asarray(x),asarray(y) - n = len(x) - m = len(y) - if m < 1: - raise ValueError("Not enough other observations.") - if n < 1: - raise ValueError("Not enough test observations.") - N = m+n - xy = r_[x,y] # combine - rank = stats.rankdata(xy) - symrank = amin(array((rank,N-rank+1)),0) - AB = sum(symrank[:n],axis=0) - uxy = unique(xy) - repeats = (len(uxy) != len(xy)) - exact = ((m<55) and (n<55) and not repeats) - if repeats and ((m < 55) or (n < 55)): - warnings.warn("Ties preclude use of exact statistic.") - if exact: - astart, a1, ifault = statlib.gscale(n,m) - ind = AB-astart - total = sum(a1,axis=0) - if ind < len(a1)/2.0: - cind = int(ceil(ind)) - if (ind == cind): - pval = 2.0*sum(a1[:cind+1],axis=0)/total - else: - pval = 2.0*sum(a1[:cind],axis=0)/total - else: - find = int(floor(ind)) - if (ind == floor(ind)): - pval = 2.0*sum(a1[find:],axis=0)/total - else: - pval = 2.0*sum(a1[find+1:],axis=0)/total - return AB, min(1.0,pval) - - # otherwise compute normal approximation - if N % 2: # N odd - mnAB = n*(N+1.0)**2 / 4.0 / N - varAB = n*m*(N+1.0)*(3+N**2)/(48.0*N**2) - else: - mnAB = n*(N+2.0)/4.0 - varAB = m*n*(N+2)*(N-2.0)/48/(N-1.0) - if repeats: # adjust variance estimates - # compute sum(tj * rj**2,axis=0) - fac = sum(symrank**2,axis=0) - if N % 2: # N odd - varAB = m*n*(16*N*fac-(N+1)**4)/(16.0 * N**2 * (N-1)) - else: # N even - varAB = m*n*(16*fac-N*(N+2)**2)/(16.0 * N * (N-1)) - z = (AB - mnAB)/sqrt(varAB) - pval = distributions.norm.sf(abs(z)) * 2.0 - return AB, pval - -def bartlett(*args): - """ - Perform Bartlett's test for equal variances - - Bartlett's test tests the null hypothesis that all input samples - are from populations with equal variances. For samples - from significantly non-normal populations, Levene's test - `levene`_ is more robust. - - Parameters - ---------- - sample1, sample2,... : array_like - arrays of sample data. May be different lengths. - - Returns - ------- - T : float - The test statistic. - p-value : float - The p-value of the test. - - References - ---------- - .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm - - .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical - Methods, Eighth Edition, Iowa State University Press. - - """ - k = len(args) - if k < 2: - raise ValueError("Must enter at least two input sample vectors.") - Ni = zeros(k) - ssq = zeros(k,'d') - for j in range(k): - Ni[j] = len(args[j]) - ssq[j] = np.var(args[j], ddof=1) - Ntot = sum(Ni,axis=0) - spsq = sum((Ni-1)*ssq,axis=0)/(1.0*(Ntot-k)) - numer = (Ntot*1.0-k)*log(spsq) - sum((Ni-1.0)*log(ssq),axis=0) - denom = 1.0 + (1.0/(3*(k-1)))*((sum(1.0/(Ni-1.0),axis=0))-1.0/(Ntot-k)) - T = numer / denom - pval = distributions.chi2.sf(T,k-1) # 1 - cdf - return T, pval - - -def levene(*args,**kwds): - """ - Perform Levene test for equal variances. - - The Levene test tests the null hypothesis that all input samples - are from populations with equal variances. Levene's test is an - alternative to Bartlett's test `bartlett` in the case where - there are significant deviations from normality. - - Parameters - ---------- - sample1, sample2, ... : array_like - The sample data, possibly with different lengths - center : {'mean', 'median', 'trimmed'}, optional - Which function of the data to use in the test. The default - is 'median'. - proportiontocut : float, optional - When `center` is 'trimmed', this gives the proportion of data points - to cut from each end. (See `scipy.stats.trim_mean`.) - Default is 0.05. - - Returns - ------- - W : float - The test statistic. - p-value : float - The p-value for the test. - - Notes - ----- - Three variations of Levene's test are possible. The possibilities - and their recommended usages are: - - * 'median' : Recommended for skewed (non-normal) distributions> - * 'mean' : Recommended for symmetric, moderate-tailed distributions. - * 'trimmed' : Recommended for heavy-tailed distributions. - - References - ---------- - .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm - .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: - Essays in Honor of Harold Hotelling, I. Olkin et al. eds., - Stanford University Press, pp. 278-292. - .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American - Statistical Association, 69, 364-367 - - """ - # Handle keyword arguments. - center = 'median' - proportiontocut = 0.05 - for kw, value in kwds.items(): - if kw not in ['center', 'proportiontocut']: - raise TypeError("levene() got an unexpected keyword argument '%s'" % kw) - if kw == 'center': - center = value - else: - proportiontocut = value - - k = len(args) - if k < 2: - raise ValueError("Must enter at least two input sample vectors.") - Ni = zeros(k) - Yci = zeros(k,'d') - - if not center in ['mean','median','trimmed']: - raise ValueError("Keyword argument
    must be 'mean', 'median'" - + "or 'trimmed'.") - - if center == 'median': - func = lambda x: np.median(x, axis=0) - elif center == 'mean': - func = lambda x: np.mean(x, axis=0) - else: # center == 'trimmed' - args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) - func = lambda x: np.mean(x, axis=0) - - for j in range(k): - Ni[j] = len(args[j]) - Yci[j] = func(args[j]) - Ntot = sum(Ni,axis=0) - - # compute Zij's - Zij = [None]*k - for i in range(k): - Zij[i] = abs(asarray(args[i])-Yci[i]) - # compute Zbari - Zbari = zeros(k,'d') - Zbar = 0.0 - for i in range(k): - Zbari[i] = np.mean(Zij[i], axis=0) - Zbar += Zbari[i]*Ni[i] - Zbar /= Ntot - - numer = (Ntot-k)*sum(Ni*(Zbari-Zbar)**2,axis=0) - - # compute denom_variance - dvar = 0.0 - for i in range(k): - dvar += sum((Zij[i]-Zbari[i])**2,axis=0) - - denom = (k-1.0)*dvar - - W = numer / denom - pval = distributions.f.sf(W,k-1,Ntot-k) # 1 - cdf - return W, pval - -@setastest(False) -def binom_test(x,n=None,p=0.5): - """ - Perform a test that the probability of success is p. - - This is an exact, two-sided test of the null hypothesis - that the probability of success in a Bernoulli experiment - is `p`. - - Parameters - ---------- - x : integer or array_like - the number of successes, or if x has length 2, it is the - number of successes and the number of failures. - n : integer - the number of trials. This is ignored if x gives both the - number of successes and failures - p : float, optional - The hypothesized probability of success. 0 <= p <= 1. The - default value is p = 0.5 - - Returns - ------- - p-value : float - The p-value of the hypothesis test - - References - ---------- - .. [1] http://en.wikipedia.org/wiki/Binomial_test - - """ - x = atleast_1d(x).astype(np.integer) - if len(x) == 2: - n = x[1]+x[0] - x = x[0] - elif len(x) == 1: - x = x[0] - if n is None or n < x: - raise ValueError("n must be >= x") - n = np.int_(n) - else: - raise ValueError("Incorrect length for x.") - - if (p > 1.0) or (p < 0.0): - raise ValueError("p must be in range [0,1]") - - d = distributions.binom.pmf(x,n,p) - rerr = 1+1e-7 - if (x < p*n): - i = np.arange(np.ceil(p*n),n+1) - y = np.sum(distributions.binom.pmf(i,n,p) <= d*rerr,axis=0) - pval = distributions.binom.cdf(x,n,p) + distributions.binom.sf(n-y,n,p) - else: - i = np.arange(np.floor(p*n)) - y = np.sum(distributions.binom.pmf(i,n,p) <= d*rerr,axis=0) - pval = distributions.binom.cdf(y-1,n,p) + distributions.binom.sf(x-1,n,p) - - return min(1.0,pval) - -def _apply_func(x,g,func): - # g is list of indices into x - # separating x into different groups - # func should be applied over the groups - g = unique(r_[0,g,len(x)]) - output = [] - for k in range(len(g)-1): - output.append(func(x[g[k]:g[k+1]])) - return asarray(output) - -def fligner(*args,**kwds): - """ - Perform Fligner's test for equal variances. - - Fligner's test tests the null hypothesis that all input samples - are from populations with equal variances. Fligner's test is - non-parametric in contrast to Bartlett's test `bartlett` and - Levene's test `levene`. - - Parameters - ---------- - sample1, sample2, ... : array_like - arrays of sample data. Need not be the same length - center : {'mean', 'median', 'trimmed'}, optional - keyword argument controlling which function of the data - is used in computing the test statistic. The default - is 'median'. - proportiontocut : float, optional - When `center` is 'trimmed', this gives the proportion of data points - to cut from each end. (See `scipy.stats.trim_mean`.) - Default is 0.05. - - Returns - ------- - Xsq : float - the test statistic - p-value : float - the p-value for the hypothesis test - - Notes - ----- - As with Levene's test there are three variants - of Fligner's test that differ by the measure of central - tendency used in the test. See `levene` for more information. - - References - ---------- - .. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps - - .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample - tests for scale. 'Journal of the American Statistical Association.' - 71(353), 210-213. - - """ - # Handle keyword arguments. - center = 'median' - proportiontocut = 0.05 - for kw, value in kwds.items(): - if kw not in ['center', 'proportiontocut']: - raise TypeError("fligner() got an unexpected keyword argument '%s'" % kw) - if kw == 'center': - center = value - else: - proportiontocut = value - - k = len(args) - if k < 2: - raise ValueError("Must enter at least two input sample vectors.") - - if not center in ['mean','median','trimmed']: - raise ValueError("Keyword argument
    must be 'mean', 'median'" - + "or 'trimmed'.") - - if center == 'median': - func = lambda x: np.median(x, axis=0) - elif center == 'mean': - func = lambda x: np.mean(x, axis=0) - else: # center == 'trimmed' - args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) - func = lambda x: np.mean(x, axis=0) - - Ni = asarray([len(args[j]) for j in range(k)]) - Yci = asarray([func(args[j]) for j in range(k)]) - Ntot = sum(Ni,axis=0) - # compute Zij's - Zij = [abs(asarray(args[i])-Yci[i]) for i in range(k)] - allZij = [] - g = [0] - for i in range(k): - allZij.extend(list(Zij[i])) - g.append(len(allZij)) - - ranks = stats.rankdata(allZij) - a = distributions.norm.ppf(ranks/(2*(Ntot+1.0)) + 0.5) - - # compute Aibar - Aibar = _apply_func(a,g,sum) / Ni - anbar = np.mean(a, axis=0) - varsq = np.var(a,axis=0, ddof=1) - Xsq = sum(Ni*(asarray(Aibar)-anbar)**2.0,axis=0)/varsq - pval = distributions.chi2.sf(Xsq,k-1) # 1 - cdf - return Xsq, pval - - -def mood(x,y): - """ - Perform Mood's test for equal scale parameters. - - Mood's two-sample test for scale parameters is a non-parametric - test for the null hypothesis that two samples are drawn from the - same distribution with the same scale parameter. - - Parameters - ---------- - x, y : array_like - Arrays of sample data. - - Returns - ------- - p-value : float - The p-value for the hypothesis test. - - See Also - -------- - fligner : A non-parametric test for the equality of k variances - ansari : A non-parametric test for the equality of 2 variances - bartlett : A parametric test for equality of k variances in normal samples - levene : A parametric test for equality of k variances - - Notes - ----- - The data are assumed to be drawn from probability distributions f(x) and - f(x/s)/s respectively, for some probability density function f. The - null hypothesis is that s = 1. - - """ - n = len(x) - m = len(y) - xy = r_[x,y] - N = m+n - if N < 3: - raise ValueError("Not enough observations.") - ranks = stats.rankdata(xy) - Ri = ranks[:n] - M = sum((Ri - (N+1.0)/2)**2,axis=0) - # Approx stat. - mnM = n*(N*N-1.0)/12 - varM = m*n*(N+1.0)*(N+2)*(N-2)/180 - z = (M-mnM)/sqrt(varM) - - # Numerically better than p = norm.cdf(x); p = min(p, 1 - p) - if z > 0: - pval = distributions.norm.sf(z) - else: - pval = distributions.norm.cdf(z) - - # Account for two-sidedness - pval *= 2. - return z, pval - - -def oneway(*args,**kwds): - """Test for equal means in two or more samples from the - normal distribution. - - If the keyword parameter is true then the variances - are assumed to be equal, otherwise they are not assumed to - be equal (default). - - Return test statistic and the p-value giving the probability - of error if the null hypothesis (equal means) is rejected at this value. - """ - k = len(args) - if k < 2: - raise ValueError("Must enter at least two input sample vectors.") - if 'equal_var' in kwds.keys(): - if kwds['equal_var']: evar = 1 - else: evar = 0 - else: - evar = 0 - - Ni = array([len(args[i]) for i in range(k)]) - Mi = array([np.mean(args[i], axis=0) for i in range(k)]) - Vi = array([np.var(args[i]) for i in range(k)]) - Wi = Ni / Vi - swi = sum(Wi,axis=0) - N = sum(Ni,axis=0) - my = sum(Mi*Ni,axis=0)*1.0/N - tmp = sum((1-Wi/swi)**2 / (Ni-1.0),axis=0)/(k*k-1.0) - if evar: - F = ((sum(Ni*(Mi-my)**2,axis=0) / (k-1.0)) / (sum((Ni-1.0)*Vi,axis=0) / (N-k))) - pval = distributions.f.sf(F,k-1,N-k) # 1-cdf - else: - m = sum(Wi*Mi,axis=0)*1.0/swi - F = sum(Wi*(Mi-m)**2,axis=0) / ((k-1.0)*(1+2*(k-2)*tmp)) - pval = distributions.f.sf(F,k-1.0,1.0/(3*tmp)) - - return F, pval - - -def wilcoxon(x,y=None): - """ - Calculate the Wilcoxon signed-rank test. - - The Wilcoxon signed-rank test tests the null hypothesis that two - related samples come from the same distribution. It is a a - non-parametric version of the paired T-test. - - Parameters - ---------- - x : array_like - The first set of measurements. - y : array_like, optional - The second set of measurements. If y is not given, then the x array - is considered to be the differences between the two sets of - measurements. - - Returns - ------- - z-statistic : float - The test statistic under the large-sample approximation that the - signed-rank statistic is normally distributed. - p-value : float - The two-sided p-value for the test. - - Notes - ----- - Because the normal approximation is used for the calculations, the - samples used should be large. A typical rule is to require that - n > 20. - - References - ---------- - .. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test - - """ - if y is None: - d = x - else: - x, y = map(asarray, (x, y)) - if len(x) <> len(y): - raise ValueError('Unequal N in wilcoxon. Aborting.') - d = x-y - d = compress(not_equal(d,0),d,axis=-1) # Keep all non-zero differences - count = len(d) - if (count < 10): - warnings.warn("Warning: sample size too small for normal approximation.") - r = stats.rankdata(abs(d)) - r_plus = sum((d > 0)*r,axis=0) - r_minus = sum((d < 0)*r,axis=0) - T = min(r_plus, r_minus) - mn = count*(count+1.0)*0.25 - se = math.sqrt(count*(count+1)*(2*count+1.0)/24) - if (len(r) != len(unique(r))): # handle ties in data - replist, repnum = find_repeats(r) - corr = 0.0 - for i in range(len(replist)): - si = repnum[i] - corr += 0.5*si*(si*si-1.0) - V = se*se - corr - se = sqrt((count*V - T*T)/(count-1.0)) - z = (T - mn)/se - prob = 2 * distributions.norm.sf(abs(z)) - return T, prob - -def _hermnorm(N): - # return the negatively normalized hermite polynomials up to order N-1 - # (inclusive) - # using the recursive relationship - # p_n+1 = p_n(x)' - x*p_n(x) - # and p_0(x) = 1 - plist = [None]*N - plist[0] = poly1d(1) - for n in range(1,N): - plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1] - return plist - -def pdf_fromgamma(g1,g2,g3=0.0,g4=None): - if g4 is None: - g4 = 3*g2*g2 - sigsq = 1.0/g2 - sig = sqrt(sigsq) - mu = g1*sig**3.0 - p12 = _hermnorm(13) - for k in range(13): - p12[k] = p12[k]/sig**k - - # Add all of the terms to polynomial - totp = p12[0] - (g1/6.0*p12[3]) + \ - (g2/24.0*p12[4] +g1*g1/72.0*p12[6]) - \ - (g3/120.0*p12[5] + g1*g2/144.0*p12[7] + g1**3.0/1296.0*p12[9]) + \ - (g4/720*p12[6] + (g2*g2/1152.0+g1*g3/720)*p12[8] + - g1*g1*g2/1728.0*p12[10] + g1**4.0/31104.0*p12[12]) - # Final normalization - totp = totp / sqrt(2*pi)/sig - def thefunc(x): - xn = (x-mu)/sig - return totp(xn)*exp(-xn*xn/2.0) - return thefunc - -def circmean(samples, high=2*pi, low=0): - """ - Compute the circular mean for samples assumed to be in the range - [low to high]. - - Parameters - ---------- - samples : array_like - Input array. - low : float or int, optional - Low boundary for circular mean range. Default is 0. - high : float or int, optional - High boundary for circular mean range. Default is 2*pi. - - Returns - ------- - circmean : float - Circular mean. - - """ - ang = (samples - low)*2*pi / (high-low) - res = angle(np.mean(exp(1j*ang), axis=0)) - if (res < 0): - res = res + 2*pi - return res*(high-low)/2.0/pi + low - -def circvar(samples, high=2*pi, low=0): - """ - Compute the circular variance for samples assumed to be in the range - [low to high]. - - Parameters - ---------- - samples : array_like - Input array. - low : float or int, optional - Low boundary for circular variance range. Default is 0. - high : float or int, optional - High boundary for circular variance range. Default is 2*pi. - - Returns - ------- - circvar : float - Circular variance. - - """ - ang = (samples - low)*2*pi / (high-low) - res = np.mean(exp(1j*ang), axis=0) - V = 1-abs(res) - return ((high-low)/2.0/pi)**2 * V - -def circstd(samples, high=2*pi, low=0): - """ - Compute the circular standard deviation for samples assumed to be in the - range [low to high]. - - Parameters - ---------- - samples : array_like - Input array. - low : float or int, optional - Low boundary for circular standard deviation range. Default is 0. - high : float or int, optional - High boundary for circular standard deviation range. Default is 2*pi. - - Returns - ------- - circstd : float - Circular standard deviation. - - """ - ang = (samples - low)*2*pi / (high-low) - res = np.mean(exp(1j*ang), axis=0) - V = 1-abs(res) - return ((high-low)/2.0/pi) * sqrt(V) - - - -#Tests to include (from R) -- some of these already in stats. -######## -#X Ansari-Bradley -#X Bartlett (and Levene) -#X Binomial -#Y Pearson's Chi-squared (stats.chisquare) -#Y Association Between Paired samples (stats.pearsonr, stats.spearmanr) -# stats.kendalltau) -- these need work though -# Fisher's exact test -#X Fligner-Killeen Test -#Y Friedman Rank Sum (stats.friedmanchisquare?) -#Y Kruskal-Wallis -#Y Kolmogorov-Smirnov -# Cochran-Mantel-Haenszel Chi-Squared for Count -# McNemar's Chi-squared for Count -#X Mood Two-Sample -#X Test For Equal Means in One-Way Layout (see stats.ttest also) -# Pairwise Comparisons of proportions -# Pairwise t tests -# Tabulate p values for pairwise comparisons -# Pairwise Wilcoxon rank sum tests -# Power calculations two sample test of prop. -# Power calculations for one and two sample t tests -# Equal or Given Proportions -# Trend in Proportions -# Quade Test -#Y Student's T Test -#Y F Test to compare two variances -#XY Wilcoxon Rank Sum and Signed Rank Tests diff --git a/scipy-0.10.1/scipy/stats/mstats.py b/scipy-0.10.1/scipy/stats/mstats.py deleted file mode 100644 index 7d8c58ffa3..0000000000 --- a/scipy-0.10.1/scipy/stats/mstats.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -=================================================================== -Statistical functions for masked arrays (:mod:`scipy.stats.mstats`) -=================================================================== - -.. currentmodule:: scipy.stats.mstats - -This module contains a large number of statistical functions that can -be used with masked arrays. - -Most of these functions are similar to those in scipy.stats but might -have small differences in the API or in the algorithm used. Since this -is a relatively new package, some API changes are still possible. - -.. autosummary:: - :toctree: generated/ - - argstoarray - betai - chisquare - count_tied_groups - describe - f_oneway - f_value_wilks_lambda - find_repeats - friedmanchisquare - gmean - hmean - kendalltau - kendalltau_seasonal - kruskalwallis - kruskalwallis - ks_twosamp - ks_twosamp - kurtosis - kurtosistest - linregress - mannwhitneyu - plotting_positions - mode - moment - mquantiles - msign - normaltest - obrientransform - pearsonr - plotting_positions - pointbiserialr - rankdata - scoreatpercentile - sem - signaltonoise - skew - skewtest - spearmanr - theilslopes - threshold - tmax - tmean - tmin - trim - trima - trimboth - trimmed_stde - trimr - trimtail - tsem - ttest_onesamp - ttest_ind - ttest_onesamp - ttest_rel - tvar - variation - winsorize - zmap - zscore - -""" - -from mstats_basic import * -from mstats_extras import * diff --git a/scipy-0.10.1/scipy/stats/mstats_basic.py b/scipy-0.10.1/scipy/stats/mstats_basic.py deleted file mode 100644 index 36a93cad67..0000000000 --- a/scipy-0.10.1/scipy/stats/mstats_basic.py +++ /dev/null @@ -1,1990 +0,0 @@ -""" -An extension of scipy.stats.stats to support masked arrays - -:author: Pierre GF Gerard-Marchant -:contact: pierregm_at_uga_edu -""" -#TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ? -#TODO : ttest_reel looks botched: what are x1,x2,v1,v2 for ? -#TODO : reimplement ksonesamp - -__author__ = "Pierre GF Gerard-Marchant" -__docformat__ = "restructuredtext en" - -__all__ = ['argstoarray', - 'betai', - 'chisquare','count_tied_groups', - 'describe', - 'f_oneway','f_value_wilks_lambda','find_repeats','friedmanchisquare', - 'gmean', - 'hmean', - 'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis', - 'ks_twosamp','ks_2samp','kurtosis','kurtosistest', - 'linregress', - 'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign', - 'normaltest', - 'obrientransform', - 'pearsonr','plotting_positions','pointbiserialr', - 'rankdata', - 'scoreatpercentile','sem', - 'sen_seasonal_slopes','signaltonoise','skew','skewtest','spearmanr', - 'theilslopes','threshold','tmax','tmean','tmin','trim','trimboth', - 'trimtail','trima','trimr','trimmed_mean','trimmed_std', - 'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp', - 'ttest_ind','ttest_rel','tvar', - 'variation', - 'winsorize', - 'zmap', 'zscore' - ] - -import numpy as np -from numpy import ndarray -import numpy.ma as ma -from numpy.ma import MaskedArray, masked, nomask - -import itertools -import warnings - - -#import scipy.stats as stats -import stats -import scipy.special as special -import scipy.misc as misc -#import scipy.stats.futil as futil -import futil - -genmissingvaldoc = """ -Notes ------ - Missing values are considered pair-wise: if a value is missing in x, - the corresponding value in y is masked. -""" -#------------------------------------------------------------------------------ -def _chk_asarray(a, axis): - if axis is None: - a = ma.ravel(a) - outaxis = 0 - else: - a = ma.asanyarray(a) - outaxis = axis - return a, outaxis - -def _chk2_asarray(a, b, axis): - if axis is None: - a = ma.ravel(a) - b = ma.ravel(b) - outaxis = 0 - else: - a = ma.asanyarray(a) - b = ma.asanyarray(b) - outaxis = axis - return a, b, outaxis - -def _chk_size(a,b): - a = ma.asanyarray(a) - b = ma.asanyarray(b) - (na, nb) = (a.size, b.size) - if na != nb: - raise ValueError("The size of the input array should match!"\ - " (%s <> %s)" % (na,nb)) - return (a,b,na) - -def argstoarray(*args): - """Constructs a 2D array from a sequence of sequences. Sequences are filled - with missing values to match the length of the longest sequence. - - Returns - ------- - output : MaskedArray - a (mxn) masked array, where m is the number of arguments and n the - length of the longest argument. - """ - if len(args) == 1 and not isinstance(args[0], ndarray): - output = ma.asarray(args[0]) - if output.ndim != 2: - raise ValueError("The input should be 2D") - else: - n = len(args) - m = max([len(k) for k in args]) - output = ma.array(np.empty((n,m), dtype=float), mask=True) - for (k,v) in enumerate(args): - output[k,:len(v)] = v - output[np.logical_not(np.isfinite(output._data))] = masked - return output - - - -#####-------------------------------------------------------------------------- -#---- --- Ranking --- -#####-------------------------------------------------------------------------- - -def find_repeats(arr): - """Find repeats in arr and return a tuple (repeats, repeat_count). - Masked values are discarded. - -Parameters ----------- - arr : sequence - Input array. The array is flattened if it is not 1D. - -Returns -------- - repeats : ndarray - Array of repeated values. - counts : ndarray - Array of counts. - - """ - marr = ma.compressed(arr) - if not marr.size: - return (np.array(0), np.array(0)) - (v1, v2, n) = futil.dfreps(ma.array(ma.compressed(arr), copy=True)) - return (v1[:n], v2[:n]) - - -def count_tied_groups(x, use_missing=False): - """ - Counts the number of tied values in x, and returns a dictionary - (nb of ties: nb of groups). - - Parameters - ---------- - x : sequence - Sequence of data on which to counts the ties - use_missing : boolean - Whether to consider missing values as tied. - - Examples - -------- - >>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6] - >>> count_tied_groups(z) - >>> {2:1, 3:2} - >>> # The ties were 0 (3x), 2 (3x) and 3 (2x) - >>> z = ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6]) - >>> count_tied_groups(z) - >>> {2:2, 3:1} - >>> # The ties were 0 (2x), 2 (3x) and 3 (2x) - >>> z[[1,-1]] = masked - >>> count_tied_groups(z, use_missing=True) - >>> {2:2, 3:1} - >>> # The ties were 2 (3x), 3 (2x) and masked (2x) - - """ - nmasked = ma.getmask(x).sum() - # We need the copy as find_repeats will overwrite the initial data - data = ma.compressed(x).copy() - (ties, counts) = find_repeats(data) - nties = {} - if len(ties): - nties = dict(zip(np.unique(counts), itertools.repeat(1))) - nties.update(dict(zip(*find_repeats(counts)))) - if nmasked and use_missing: - try: - nties[nmasked] += 1 - except KeyError: - nties[nmasked] = 1 - return nties - - -def rankdata(data, axis=None, use_missing=False): - """Returns the rank (also known as order statistics) of each data point - along the given axis. - - If some values are tied, their rank is averaged. - If some values are masked, their rank is set to 0 if use_missing is False, - or set to the average rank of the unmasked values if use_missing is True. - - Parameters - ---------- - data : sequence - Input data. The data is transformed to a masked array - axis : {None,int} optional - Axis along which to perform the ranking. - If None, the array is first flattened. An exception is raised if - the axis is specified for arrays with a dimension larger than 2 - use_missing : {boolean} optional - Whether the masked values have a rank of 0 (False) or equal to the - average rank of the unmasked values (True). - """ - # - def _rank1d(data, use_missing=False): - n = data.count() - rk = np.empty(data.size, dtype=float) - idx = data.argsort() - rk[idx[:n]] = np.arange(1,n+1) - # - if use_missing: - rk[idx[n:]] = (n+1)/2. - else: - rk[idx[n:]] = 0 - # - repeats = find_repeats(data.copy()) - for r in repeats[0]: - condition = (data==r).filled(False) - rk[condition] = rk[condition].mean() - return rk - # - data = ma.array(data, copy=False) - if axis is None: - if data.ndim > 1: - return _rank1d(data.ravel(), use_missing).reshape(data.shape) - else: - return _rank1d(data, use_missing) - else: - return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray) - - -#####-------------------------------------------------------------------------- -#---- --- Central tendency --- -#####-------------------------------------------------------------------------- - -def gmean(a, axis=0): - a, axis = _chk_asarray(a, axis) - log_a = ma.log(a) - return ma.exp(log_a.mean(axis=axis)) -gmean.__doc__ = stats.gmean.__doc__ - - -def hmean(a, axis=0): - a, axis = _chk_asarray(a, axis) - if isinstance(a, MaskedArray): - size = a.count(axis) - else: - size = a.shape[axis] - return size / (1.0/a).sum(axis) -hmean.__doc__ = stats.hmean.__doc__ - - -def mode(a, axis=0): - def _mode1D(a): - (rep,cnt) = find_repeats(a) - if not cnt.ndim: - return (0, 0) - elif cnt.size: - return (rep[cnt.argmax()], cnt.max()) - return (a[0], 1) - # - if axis is None: - output = _mode1D(ma.ravel(a)) - output = (ma.array(output[0]), ma.array(output[1])) - else: - output = ma.apply_along_axis(_mode1D, axis, a) - newshape = list(a.shape) - newshape[axis] = 1 - slices = [slice(None)] * output.ndim - slices[axis] = 0 - modes = output[tuple(slices)].reshape(newshape) - slices[axis] = 1 - counts = output[tuple(slices)].reshape(newshape) - output = (modes, counts) - return output -mode.__doc__ = stats.mode.__doc__ - - -#####-------------------------------------------------------------------------- -#---- --- Probabilities --- -#####-------------------------------------------------------------------------- - -def betai(a, b, x): - x = np.asanyarray(x) - x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0 - return special.betainc(a, b, x) -betai.__doc__ = stats.betai.__doc__ - - -#####-------------------------------------------------------------------------- -#---- --- Correlation --- -#####-------------------------------------------------------------------------- - -def msign(x): - """Returns the sign of x, or 0 if x is masked.""" - return ma.filled(np.sign(x), 0) - - - -def pearsonr(x,y): - """Calculates a Pearson correlation coefficient and the p-value for testing - non-correlation. - - The Pearson correlation coefficient measures the linear relationship - between two datasets. Strictly speaking, Pearson's correlation requires - that each dataset be normally distributed. Like other correlation - coefficients, this one varies between -1 and +1 with 0 implying no - correlation. Correlations of -1 or +1 imply an exact linear - relationship. Positive correlations imply that as x increases, so does - y. Negative correlations imply that as x increases, y decreases. - - The p-value roughly indicates the probability of an uncorrelated system - producing datasets that have a Pearson correlation at least as extreme - as the one computed from these datasets. The p-values are not entirely - reliable but are probably reasonable for datasets larger than 500 or so. - - Parameters - ---------- - x : 1D array - y : 1D array the same length as x - - Returns - ------- - (Pearson's correlation coefficient, - 2-tailed p-value) - - References - ---------- - http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation - """ - (x, y, n) = _chk_size(x, y) - (x, y) = (x.ravel(), y.ravel()) - # Get the common mask and the total nb of unmasked elements - m = ma.mask_or(ma.getmask(x), ma.getmask(y)) - n -= m.sum() - df = n-2 - if df < 0: - return (masked, masked) - # - (mx, my) = (x.mean(), y.mean()) - (xm, ym) = (x-mx, y-my) - # - r_num = n*(ma.add.reduce(xm*ym)) - r_den = n*ma.sqrt(ma.dot(xm,xm)*ma.dot(ym,ym)) - r = (r_num / r_den) - # Presumably, if r > 1, then it is only some small artifact of floating - # point arithmetic. - r = min(r, 1.0) - r = max(r, -1.0) - df = n-2 - # - t = ma.sqrt(df/((1.0-r)*(1.0+r))) * r - if t is masked: - prob = 0. - else: - prob = betai(0.5*df,0.5,df/(df+t*t)) - return (r,prob) - - -def spearmanr(x, y, use_ties=True): - """Calculates a Spearman rank-order correlation coefficient and the p-value - to test for non-correlation. - - The Spearman correlation is a nonparametric measure of the linear - relationship between two datasets. Unlike the Pearson correlation, the - Spearman correlation does not assume that both datasets are normally - distributed. Like other correlation coefficients, this one varies - between -1 and +1 with 0 implying no correlation. Correlations of -1 or - +1 imply an exact linear relationship. Positive correlations imply that - as x increases, so does y. Negative correlations imply that as x - increases, y decreases. - - Missing values are discarded pair-wise: if a value is missing in x, the - corresponding value in y is masked. - - The p-value roughly indicates the probability of an uncorrelated system - producing datasets that have a Spearman correlation at least as extreme - as the one computed from these datasets. The p-values are not entirely - reliable but are probably reasonable for datasets larger than 500 or so. - -Parameters ----------- - x : 1D array - y : 1D array the same length as x - The lengths of both arrays must be > 2. - use_ties : {True, False} optional - Whether the correction for ties should be computed. - -Returns -------- - (Spearman correlation coefficient, - 2-tailed p-value) - - References - ---------- - [CRCProbStat2000] section 14.7 - """ - (x, y, n) = _chk_size(x, y) - (x, y) = (x.ravel(), y.ravel()) - # - m = ma.mask_or(ma.getmask(x), ma.getmask(y)) - n -= m.sum() - if m is not nomask: - x = ma.array(x, mask=m, copy=True) - y = ma.array(y, mask=m, copy=True) - df = n-2 - if df < 0: - raise ValueError("The input must have at least 3 entries!") - # Gets the ranks and rank differences - rankx = rankdata(x) - ranky = rankdata(y) - dsq = np.add.reduce((rankx-ranky)**2) - # Tie correction - if use_ties: - xties = count_tied_groups(x) - yties = count_tied_groups(y) - corr_x = np.sum(v*k*(k**2-1) for (k,v) in xties.iteritems())/12. - corr_y = np.sum(v*k*(k**2-1) for (k,v) in yties.iteritems())/12. - else: - corr_x = corr_y = 0 - denom = n*(n**2 - 1)/6. - if corr_x != 0 or corr_y != 0: - rho = denom - dsq - corr_x - corr_y - rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y)) - else: - rho = 1. - dsq/denom - # - t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho - if t is masked: - prob = 0. - else: - prob = betai(0.5*df,0.5,df/(df+t*t)) - return rho, prob - - -def kendalltau(x, y, use_ties=True, use_missing=False): - """Computes Kendall's rank correlation tau on two variables *x* and *y*. - - Parameters - ---------- - xdata: sequence - First data list (for example, time). - ydata: sequence - Second data list. - use_ties: {True, False} optional - Whether ties correction should be performed. - use_missing: {False, True} optional - Whether missing data should be allocated a rank of 0 (False) or the - average rank (True) - - Returns - ------- - tau : float - Kendall tau - prob : float - Approximate 2-side p-value. - """ - (x, y, n) = _chk_size(x, y) - (x, y) = (x.flatten(), y.flatten()) - m = ma.mask_or(ma.getmask(x), ma.getmask(y)) - if m is not nomask: - x = ma.array(x, mask=m, copy=True) - y = ma.array(y, mask=m, copy=True) - n -= m.sum() - # - if n < 2: - return (np.nan, np.nan) - # - rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0) - ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0) - idx = rx.argsort() - (rx, ry) = (rx[idx], ry[idx]) - C = np.sum([((ry[i+1:]>ry[i]) * (rx[i+1:]>rx[i])).filled(0).sum() - for i in range(len(ry)-1)], dtype=float) - D = np.sum([((ry[i+1:]rx[i])).filled(0).sum() - for i in range(len(ry)-1)], dtype=float) - if use_ties: - xties = count_tied_groups(x) - yties = count_tied_groups(y) - corr_x = np.sum([v*k*(k-1) for (k,v) in xties.iteritems()], dtype=float) - corr_y = np.sum([v*k*(k-1) for (k,v) in yties.iteritems()], dtype=float) - denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.) - else: - denom = n*(n-1)/2. - tau = (C-D) / denom - # - var_s = n*(n-1)*(2*n+5) - if use_ties: - var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in xties.iteritems()) - var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in yties.iteritems()) - v1 = np.sum([v*k*(k-1) for (k, v) in xties.iteritems()], dtype=float) *\ - np.sum([v*k*(k-1) for (k, v) in yties.iteritems()], dtype=float) - v1 /= 2.*n*(n-1) - if n > 2: - v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in xties.iteritems()], - dtype=float) * \ - np.sum([v*k*(k-1)*(k-2) for (k,v) in yties.iteritems()], - dtype=float) - v2 /= 9.*n*(n-1)*(n-2) - else: - v2 = 0 - else: - v1 = v2 = 0 - var_s /= 18. - var_s += (v1 + v2) - z = (C-D)/np.sqrt(var_s) - prob = special.erfc(abs(z)/np.sqrt(2)) - return (tau, prob) - - -def kendalltau_seasonal(x): - """Computes a multivariate extension Kendall's rank correlation tau, designed - for seasonal data. - -Parameters ----------- - x: 2D array - Array of seasonal data, with seasons in columns. - """ - x = ma.array(x, subok=True, copy=False, ndmin=2) - (n,m) = x.shape - n_p = x.count(0) - # - S_szn = np.sum(msign(x[i:]-x[i]).sum(0) for i in range(n)) - S_tot = S_szn.sum() - # - n_tot = x.count() - ties = count_tied_groups(x.compressed()) - corr_ties = np.sum(v*k*(k-1) for (k,v) in ties.iteritems()) - denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2. - # - R = rankdata(x, axis=0, use_missing=True) - K = ma.empty((m,m), dtype=int) - covmat = ma.empty((m,m), dtype=float) -# cov_jj = ma.empty(m, dtype=float) - denom_szn = ma.empty(m, dtype=float) - for j in range(m): - ties_j = count_tied_groups(x[:,j].compressed()) - corr_j = np.sum(v*k*(k-1) for (k,v) in ties_j.iteritems()) - cmb = n_p[j]*(n_p[j]-1) - for k in range(j,m,1): - K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum() - for i in range(n)) - covmat[j,k] = (K[j,k] +4*(R[:,j]*R[:,k]).sum() - \ - n*(n_p[j]+1)*(n_p[k]+1))/3. - K[k,j] = K[j,k] - covmat[k,j] = covmat[j,k] -# cov_jj[j] = (nn_p*(2*n_p[j]+5)) -# cov_jj[j] -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in ties_j.iteritems()) -# cov_jj[j] /= 18. - denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2. - var_szn = covmat.diagonal() - # - z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn) - z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum()) - z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum()) - # - prob_szn = special.erfc(abs(z_szn)/np.sqrt(2)) - prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2)) - prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2)) - # - chi2_tot = (z_szn*z_szn).sum() - chi2_trd = m * z_szn.mean()**2 - output = {'seasonal tau': S_szn/denom_szn, - 'global tau': S_tot/denom_tot, - 'global tau (alt)': S_tot/denom_szn.sum(), - 'seasonal p-value': prob_szn, - 'global p-value (indep)': prob_tot_ind, - 'global p-value (dep)': prob_tot_dep, - 'chi2 total': chi2_tot, - 'chi2 trend': chi2_trd, - } - return output - - -def pointbiserialr(x, y): - x = ma.fix_invalid(x, copy=True).astype(bool) - y = ma.fix_invalid(y, copy=True).astype(float) - # Get rid of the missing data .......... - m = ma.mask_or(ma.getmask(x), ma.getmask(y)) - if m is not nomask: - unmask = np.logical_not(m) - x = x[unmask] - y = y[unmask] - # - n = len(x) - # phat is the fraction of x values that are True - phat = x.sum() / float(n) - y0 = y[~x] # y-values where x is False - y1 = y[x] # y-values where x is True - y0m = y0.mean() - y1m = y1.mean() - # - rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std() - # - df = n-2 - t = rpb*ma.sqrt(df/(1.0-rpb**2)) - prob = betai(0.5*df, 0.5, df/(df+t*t)) - return rpb, prob - -if stats.pointbiserialr.__doc__: - pointbiserialr.__doc__ = stats.pointbiserialr.__doc__ + genmissingvaldoc - - -def linregress(*args): - if len(args) == 1: # more than 1D array? - args = ma.array(args[0], copy=True) - if len(args) == 2: - x = args[0] - y = args[1] - else: - x = args[:,0] - y = args[:,1] - else: - x = ma.array(args[0]).flatten() - y = ma.array(args[1]).flatten() - m = ma.mask_or(ma.getmask(x), ma.getmask(y)) - if m is not nomask: - x = ma.array(x,mask=m) - y = ma.array(y,mask=m) - n = len(x) - (xmean, ymean) = (x.mean(), y.mean()) - (xm, ym) = (x-xmean, y-ymean) - (Sxx, Syy) = (ma.add.reduce(xm*xm), ma.add.reduce(ym*ym)) - Sxy = ma.add.reduce(xm*ym) - r_den = ma.sqrt(Sxx*Syy) - if r_den == 0.0: - r = 0.0 - else: - r = Sxy / r_den - if (r > 1.0): - r = 1.0 # from numerical error - #z = 0.5*log((1.0+r+TINY)/(1.0-r+TINY)) - df = n-2 - t = r * ma.sqrt(df/(1.0-r*r)) - prob = betai(0.5*df,0.5,df/(df+t*t)) - slope = Sxy / Sxx - intercept = ymean - slope*xmean - sterrest = ma.sqrt(1.-r*r) * y.std() - return slope, intercept, r, prob, sterrest - -if stats.linregress.__doc__: - linregress.__doc__ = stats.linregress.__doc__ + genmissingvaldoc - - -def theilslopes(y, x=None, alpha=0.05): - """Computes the Theil slope over the dataset (x,y), as the median of all slopes - between paired values. - - Parameters - ---------- - y : sequence - Dependent variable. - x : {None, sequence} optional - Independent variable. If None, use arange(len(y)) instead. - alpha : float - Confidence degree. - - Returns - ------- - medslope : float - Theil slope - medintercept : float - Intercept of the Theil line, as median(y)-medslope*median(x) - lo_slope : float - Lower bound of the confidence interval on medslope - up_slope : float - Upper bound of the confidence interval on medslope - - """ - y = ma.asarray(y).flatten() - y[-1] = masked - n = len(y) - if x is None: - x = ma.arange(len(y), dtype=float) - else: - x = ma.asarray(x).flatten() - if len(x) != n: - raise ValueError("Incompatible lengths ! (%s<>%s)" % (n,len(x))) - m = ma.mask_or(ma.getmask(x), ma.getmask(y)) - y._mask = x._mask = m - ny = y.count() - # - slopes = ma.hstack([(y[i+1:]-y[i])/(x[i+1:]-x[i]) for i in range(n-1)]) - slopes.sort() - medslope = ma.median(slopes) - medinter = ma.median(y) - medslope*ma.median(x) - # - if alpha > 0.5: - alpha = 1.-alpha - z = stats.distributions.norm.ppf(alpha/2.) - # - (xties, yties) = (count_tied_groups(x), count_tied_groups(y)) - nt = ny*(ny-1)/2. - sigsq = (ny*(ny-1)*(2*ny+5)/18.) - sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in xties.iteritems()) - sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in yties.iteritems()) - sigma = np.sqrt(sigsq) - - Ru = min(np.round((nt - z*sigma)/2. + 1), len(slopes)-1) - Rl = max(np.round((nt + z*sigma)/2.), 0) - delta = slopes[[Rl,Ru]] - return medslope, medinter, delta[0], delta[1] - - -def sen_seasonal_slopes(x): - x = ma.array(x, subok=True, copy=False, ndmin=2) - (n,_) = x.shape - # Get list of slopes per season - szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None] - for i in range(n)]) - szn_medslopes = ma.median(szn_slopes, axis=0) - medslope = ma.median(szn_slopes, axis=None) - return szn_medslopes, medslope - - -#####-------------------------------------------------------------------------- -#---- --- Inferential statistics --- -#####-------------------------------------------------------------------------- - -def ttest_onesamp(a, popmean): - a = ma.asarray(a) - x = a.mean(axis=None) - v = a.var(axis=None,ddof=1) - n = a.count(axis=None) - df = n-1 - svar = ((n-1)*v) / float(df) - t = (x-popmean)/ma.sqrt(svar*(1.0/n)) - prob = betai(0.5*df,0.5,df/(df+t*t)) - return t,prob -ttest_onesamp.__doc__ = stats.ttest_1samp.__doc__ -ttest_1samp = ttest_onesamp - - -def ttest_ind(a, b, axis=0): - a, b, axis = _chk2_asarray(a, b, axis) - (x1, x2) = (a.mean(axis), b.mean(axis)) - (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1)) - (n1, n2) = (a.count(axis), b.count(axis)) - df = n1+n2-2 - svar = ((n1-1)*v1+(n2-1)*v2) / float(df) - svar == 0 - t = (x1-x2)/ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!! - t = ma.filled(t, 1) # replace NaN t-values with 1.0 - probs = betai(0.5*df,0.5,float(df)/(df+t*t)).reshape(t.shape) - return t, probs.squeeze() -ttest_ind.__doc__ = stats.ttest_ind.__doc__ - - -def ttest_rel(a,b,axis=None): - a, b, axis = _chk2_asarray(a, b, axis) - if len(a)!=len(b): - raise ValueError('unequal length arrays') - (x1, x2) = (a.mean(axis), b.mean(axis)) - (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1)) - n = a.count(axis) - df = (n-1.0) - d = (a-b).astype('d') - denom = ma.sqrt((n*ma.add.reduce(d*d,axis) - ma.add.reduce(d,axis)**2) /df) - #zerodivproblem = denom == 0 - t = ma.add.reduce(d, axis) / denom - t = ma.filled(t, 1) - probs = betai(0.5*df,0.5,df/(df+t*t)).reshape(t.shape).squeeze() - return t, probs -ttest_rel.__doc__ = stats.ttest_rel.__doc__ - - -def chisquare(f_obs, f_exp=None): - f_obs = ma.asarray(f_obs) - if f_exp is None: - f_exp = ma.array([f_obs.mean(axis=0)] * len(f_obs)) - f_exp = f_exp.astype(float) - chisq = ma.add.reduce((f_obs-f_exp)**2 / f_exp) - return chisq, stats.chisqprob(chisq, f_obs.count(0)-1) -chisquare.__doc__ = stats.chisquare.__doc__ - - -def mannwhitneyu(x,y, use_continuity=True): - """Computes the Mann-Whitney on samples x and y. - Missing values in x and/or y are discarded. - - Parameters - ---------- - x : sequence - y : sequence - use_continuity : {True, False} optional - Whether a continuity correction (1/2.) should be taken into account. - - Returns - ------- - u : float - The Mann-Whitney statistics - prob : float - Approximate p-value assuming a normal distribution. - - """ - x = ma.asarray(x).compressed().view(ndarray) - y = ma.asarray(y).compressed().view(ndarray) - ranks = rankdata(np.concatenate([x,y])) - (nx, ny) = (len(x), len(y)) - nt = nx + ny - U = ranks[:nx].sum() - nx*(nx+1)/2. - U = max(U, nx*ny - U) - u = nx*ny - U - # - mu = (nx*ny)/2. - sigsq = (nt**3 - nt)/12. - ties = count_tied_groups(ranks) - sigsq -= np.sum(v*(k**3-k) for (k,v) in ties.iteritems())/12. - sigsq *= nx*ny/float(nt*(nt-1)) - # - if use_continuity: - z = (U - 1/2. - mu) / ma.sqrt(sigsq) - else: - z = (U - mu) / ma.sqrt(sigsq) - prob = special.erfc(abs(z)/np.sqrt(2)) - return (u, prob) - - -def kruskalwallis(*args): - output = argstoarray(*args) - ranks = ma.masked_equal(rankdata(output, use_missing=False), 0) - sumrk = ranks.sum(-1) - ngrp = ranks.count(-1) - ntot = ranks.count() -# ssbg = (sumrk**2/ranks.count(-1)).sum() - ranks.sum()**2/ntotal -# H = ssbg / (ntotal*(ntotal+1)/12.) - H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1) - # Tie correction - ties = count_tied_groups(ranks) - T = 1. - np.sum(v*(k**3-k) for (k,v) in ties.iteritems())/float(ntot**3-ntot) - if T == 0: - raise ValueError('All numbers are identical in kruskal') - H /= T - # - df = len(output) - 1 - prob = stats.chisqprob(H,df) - return (H, prob) -kruskal = kruskalwallis -kruskalwallis.__doc__ = stats.kruskal.__doc__ - - -_kolmog2 = special.kolmogorov -def _kolmog1(x,n): - if x <= 0: - return 0 - if x >= 1: - return 1 - j = np.arange(np.floor(n*(1-x))+1) - return 1 - x * np.sum(np.exp(np.log(misc.comb(n,j)) - + (n-j) * np.log(1-x-j/float(n)) - + (j-1) * np.log(x+j/float(n)))) - - -def ks_twosamp(data1, data2, alternative="two_sided"): - """Computes the Kolmogorov-Smirnov test on two samples. - Missing values are discarded. - - Parameters - ---------- - data1 : sequence - First data set - data2 : sequence - Second data set - alternative : {'two_sided', 'less', 'greater'} optional - Indicates the alternative hypothesis. - - Returns - ------- - d : float - Value of the Kolmogorov Smirnov test - p : float - Corresponding p-value. - - """ - (data1, data2) = (ma.asarray(data1), ma.asarray(data2)) - (n1, n2) = (data1.count(), data2.count()) - n = (n1*n2/float(n1+n2)) - mix = ma.concatenate((data1.compressed(), data2.compressed())) - mixsort = mix.argsort(kind='mergesort') - csum = np.where(mixsort threshmax).filled(False) - a[mask] = newval - return a - - -def trima(a, limits=None, inclusive=(True,True)): - """Trims an array by masking the data outside some given limits. - Returns a masked version of the input array. - - Parameters - ---------- - a : sequence - Input array. - limits : {None, tuple} optional - Tuple of (lower limit, upper limit) in absolute values. - Values of the input array lower (greater) than the lower (upper) limit - will be masked. A limit is None indicates an open interval. - inclusive : {(True,True) tuple} optional - Tuple of (lower flag, upper flag), indicating whether values exactly - equal to the lower (upper) limit are allowed. - - """ - a = ma.asarray(a) - a.unshare_mask() - if limits is None: - return a - (lower_lim, upper_lim) = limits - (lower_in, upper_in) = inclusive - condition = False - if lower_lim is not None: - if lower_in: - condition |= (a < lower_lim) - else: - condition |= (a <= lower_lim) - if upper_lim is not None: - if upper_in: - condition |= (a > upper_lim) - else: - condition |= (a >= upper_lim) - a[condition.filled(True)] = masked - return a - - -def trimr(a, limits=None, inclusive=(True, True), axis=None): - """Trims an array by masking some proportion of the data on each end. - Returns a masked version of the input array. - - Parameters - ---------- - a : sequence - Input array. - limits : {None, tuple} optional - Tuple of the percentages to cut on each side of the array, with respect - to the number of unmasked data, as floats between 0. and 1. - Noting n the number of unmasked data before trimming, the (n*limits[0])th - smallest data and the (n*limits[1])th largest data are masked, and the - total number of unmasked data after trimming is n*(1.-sum(limits)) - The value of one limit can be set to None to indicate an open interval. - inclusive : {(True,True) tuple} optional - Tuple of flags indicating whether the number of data being masked on the - left (right) end should be truncated (True) or rounded (False) to integers. - axis : {None,int} optional - Axis along which to trim. If None, the whole array is trimmed, but its - shape is maintained. - - """ - def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive): - n = a.count() - idx = a.argsort() - if low_limit: - if low_inclusive: - lowidx = int(low_limit*n) - else: - lowidx = np.round(low_limit*n) - a[idx[:lowidx]] = masked - if up_limit is not None: - if up_inclusive: - upidx = n - int(n*up_limit) - else: - upidx = n- np.round(n*up_limit) - a[idx[upidx:]] = masked - return a - # - a = ma.asarray(a) - a.unshare_mask() - if limits is None: - return a - # Check the limits - (lolim, uplim) = limits - errmsg = "The proportion to cut from the %s should be between 0. and 1." - if lolim is not None: - if lolim > 1. or lolim < 0: - raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) - if uplim is not None: - if uplim > 1. or uplim < 0: - raise ValueError(errmsg % 'end' + "(got %s)" % uplim) - # - (loinc, upinc) = inclusive - # - if axis is None: - shp = a.shape - return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp) - else: - return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc) - -trimdoc = """ - Parameters - ---------- - a : sequence - Input array - limits : {None, tuple} optional - If relative == False, tuple (lower limit, upper limit) in absolute values. - Values of the input array lower (greater) than the lower (upper) limit are - masked. - If relative == True, tuple (lower percentage, upper percentage) to cut - on each side of the array, with respect to the number of unmasked data. - Noting n the number of unmasked data before trimming, the (n*limits[0])th - smallest data and the (n*limits[1])th largest data are masked, and the - total number of unmasked data after trimming is n*(1.-sum(limits)) - In each case, the value of one limit can be set to None to indicate an - open interval. - If limits is None, no trimming is performed - inclusive : {(True, True) tuple} optional - If relative==False, tuple indicating whether values exactly equal to the - absolute limits are allowed. - If relative==True, tuple indicating whether the number of data being masked - on each side should be rounded (True) or truncated (False). - relative : {False, True} optional - Whether to consider the limits as absolute values (False) or proportions - to cut (True). - axis : {None, integer}, optional - Axis along which to trim. -""" - - -def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None): - """Trims an array by masking the data outside some given limits. - - Returns a masked version of the input array. - %s - - Examples - -------- - >>>z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10] - >>>trim(z,(3,8)) - [--,--, 3, 4, 5, 6, 7, 8,--,--] - >>>trim(z,(0.1,0.2),relative=True) - [--, 2, 3, 4, 5, 6, 7, 8,--,--] - - - """ - if relative: - return trimr(a, limits=limits, inclusive=inclusive, axis=axis) - else: - return trima(a, limits=limits, inclusive=inclusive) - -if trim.__doc__: - trim.__doc__ = trim.__doc__ % trimdoc - - -def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None): - """Trims the data by masking the int(proportiontocut*n) smallest and - int(proportiontocut*n) largest values of data along the given axis, where n - is the number of unmasked values before trimming. - -Parameters ----------- - data : ndarray - Data to trim. - proportiontocut : {0.2, float} optional - Percentage of trimming (as a float between 0 and 1). - If n is the number of unmasked values before trimming, the number of - values after trimming is: - (1-2*proportiontocut)*n. - inclusive : {(True, True) tuple} optional - Tuple indicating whether the number of data being masked on each side - should be rounded (True) or truncated (False). - axis : {None, integer}, optional - Axis along which to perform the trimming. - If None, the input array is first flattened. - - """ - return trimr(data, limits=(proportiontocut,proportiontocut), - inclusive=inclusive, axis=axis) - -#.............................................................................. -def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True), - axis=None): - """Trims the data by masking int(trim*n) values from ONE tail of the - data along the given axis, where n is the number of unmasked values. - -Parameters ----------- - data : {ndarray} - Data to trim. - proportiontocut : {0.2, float} optional - Percentage of trimming. If n is the number of unmasked values - before trimming, the number of values after trimming is - (1-proportiontocut)*n. - tail : {'left','right'} optional - If left (right), the ``proportiontocut`` lowest (greatest) values will - be masked. - inclusive : {(True, True) tuple} optional - Tuple indicating whether the number of data being masked on each side - should be rounded (True) or truncated (False). - axis : {None, integer}, optional - Axis along which to perform the trimming. - If None, the input array is first flattened. - - """ - tail = str(tail).lower()[0] - if tail == 'l': - limits = (proportiontocut,None) - elif tail == 'r': - limits = (None, proportiontocut) - else: - raise TypeError("The tail argument should be in ('left','right')") - return trimr(data, limits=limits, axis=axis, inclusive=inclusive) - -trim1 = trimtail - -def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, - axis=None): - """Returns the trimmed mean of the data along the given axis. - - %s - - """ % trimdoc - if (not isinstance(limits,tuple)) and isinstance(limits,float): - limits = (limits, limits) - if relative: - return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis) - else: - return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis) - - -def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, - axis=None, ddof=0): - """Returns the trimmed variance of the data along the given axis. - - %s - ddof : {0,integer}, optional - Means Delta Degrees of Freedom. The denominator used during computations - is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- - biased estimate of the variance. - - """ % trimdoc - if (not isinstance(limits,tuple)) and isinstance(limits,float): - limits = (limits, limits) - if relative: - out = trimr(a,limits=limits, inclusive=inclusive,axis=axis) - else: - out = trima(a,limits=limits,inclusive=inclusive) - return out.var(axis=axis, ddof=ddof) - - -def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, - axis=None, ddof=0): - """Returns the trimmed standard deviation of the data along the given axis. - - %s - ddof : {0,integer}, optional - Means Delta Degrees of Freedom. The denominator used during computations - is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- - biased estimate of the variance. - - """ % trimdoc - if (not isinstance(limits,tuple)) and isinstance(limits,float): - limits = (limits, limits) - if relative: - out = trimr(a,limits=limits,inclusive=inclusive,axis=axis) - else: - out = trima(a,limits=limits,inclusive=inclusive) - return out.std(axis=axis,ddof=ddof) - - -def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None): - """Returns the standard error of the trimmed mean of the data along the given - axis. - Parameters - ---------- - a : sequence - Input array - limits : {(0.1,0.1), tuple of float} optional - tuple (lower percentage, upper percentage) to cut on each side of the - array, with respect to the number of unmasked data. - Noting n the number of unmasked data before trimming, the (n*limits[0])th - smallest data and the (n*limits[1])th largest data are masked, and the - total number of unmasked data after trimming is n*(1.-sum(limits)) - In each case, the value of one limit can be set to None to indicate an - open interval. - If limits is None, no trimming is performed - inclusive : {(True, True) tuple} optional - Tuple indicating whether the number of data being masked on each side - should be rounded (True) or truncated (False). - axis : {None, integer}, optional - Axis along which to trim. - """ - #........................ - def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive): - "Returns the standard error of the trimmed mean for a 1D input data." - n = a.count() - idx = a.argsort() - if low_limit: - if low_inclusive: - lowidx = int(low_limit*n) - else: - lowidx = np.round(low_limit*n) - a[idx[:lowidx]] = masked - if up_limit is not None: - if up_inclusive: - upidx = n - int(n*up_limit) - else: - upidx = n- np.round(n*up_limit) - a[idx[upidx:]] = masked - nsize = a.count() - a[idx[:lowidx]] = a[idx[lowidx]] - a[idx[upidx:]] = a[idx[upidx-1]] - winstd = a.std(ddof=1) - return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a))) - #........................ - a = ma.array(a, copy=True, subok=True) - a.unshare_mask() - if limits is None: - return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis)) - if (not isinstance(limits,tuple)) and isinstance(limits,float): - limits = (limits, limits) - # Check the limits - (lolim, uplim) = limits - errmsg = "The proportion to cut from the %s should be between 0. and 1." - if lolim is not None: - if lolim > 1. or lolim < 0: - raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) - if uplim is not None: - if uplim > 1. or uplim < 0: - raise ValueError(errmsg % 'end' + "(got %s)" % uplim) - # - (loinc, upinc) = inclusive - if (axis is None): - shp = a.shape - return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc) - else: - if a.ndim > 2: - raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim) - return ma.apply_along_axis(_trimmed_stde_1D, axis, a, - lolim,uplim,loinc,upinc) - - -def tmean(a, limits=None, inclusive=(True,True)): - return trima(a, limits=limits, inclusive=inclusive).mean() -tmean.__doc__ = stats.tmean.__doc__ - -def tvar(a, limits=None, inclusive=(True,True)): - return trima(a, limits=limits, inclusive=inclusive).var() -tvar.__doc__ = stats.tvar.__doc__ - -def tmin(a, lowerlimit=None, axis=0, inclusive=True): - a, axis = _chk_asarray(a, axis) - am = trima(a, (lowerlimit, None), (inclusive, False)) - return ma.minimum.reduce(am, axis) -tmin.__doc__ = stats.tmin.__doc__ - -def tmax(a, upperlimit, axis=0, inclusive=True): - a, axis = _chk_asarray(a, axis) - am = trima(a, (None, upperlimit), (False, inclusive)) - return ma.maximum.reduce(am, axis) -tmax.__doc__ = stats.tmax.__doc__ - -def tsem(a, limits=None, inclusive=(True,True)): - a = ma.asarray(a).ravel() - if limits is None: - n = float(a.count()) - return a.std()/ma.sqrt(n) - am = trima(a.ravel(), limits, inclusive) - sd = np.sqrt(am.var()) - return sd / am.count() -tsem.__doc__ = stats.tsem.__doc__ - - -def winsorize(a, limits=None, inclusive=(True,True), inplace=False, axis=None): - """Returns a Winsorized version of the input array. - - The (limits[0])th lowest values are set to the (limits[0])th percentile, - and the (limits[1])th highest values are set to the (limits[1])th - percentile. - Masked values are skipped. - - - Parameters - ---------- - a : sequence - Input array. - limits : {None, tuple of float} optional - Tuple of the percentages to cut on each side of the array, with respect - to the number of unmasked data, as floats between 0. and 1. - Noting n the number of unmasked data before trimming, the (n*limits[0])th - smallest data and the (n*limits[1])th largest data are masked, and the - total number of unmasked data after trimming is n*(1.-sum(limits)) - The value of one limit can be set to None to indicate an open interval. - inclusive : {(True, True) tuple} optional - Tuple indicating whether the number of data being masked on each side - should be rounded (True) or truncated (False). - inplace : {False, True} optional - Whether to winsorize in place (True) or to use a copy (False) - axis : {None, int} optional - Axis along which to trim. If None, the whole array is trimmed, but its - shape is maintained. - - """ - def _winsorize1D(a, low_limit, up_limit, low_include, up_include): - n = a.count() - idx = a.argsort() - if low_limit: - if low_include: - lowidx = int(low_limit*n) - else: - lowidx = np.round(low_limit*n) - a[idx[:lowidx]] = a[idx[lowidx]] - if up_limit is not None: - if up_include: - upidx = n - int(n*up_limit) - else: - upidx = n- np.round(n*up_limit) - a[idx[upidx:]] = a[idx[upidx-1]] - return a - # We gonna modify a: better make a copy - a = ma.array(a, copy=np.logical_not(inplace)) - # - if limits is None: - return a - if (not isinstance(limits,tuple)) and isinstance(limits,float): - limits = (limits, limits) - # Check the limits - (lolim, uplim) = limits - errmsg = "The proportion to cut from the %s should be between 0. and 1." - if lolim is not None: - if lolim > 1. or lolim < 0: - raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) - if uplim is not None: - if uplim > 1. or uplim < 0: - raise ValueError(errmsg % 'end' + "(got %s)" % uplim) - # - (loinc, upinc) = inclusive - # - if axis is None: - shp = a.shape - return _winsorize1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp) - else: - return ma.apply_along_axis(_winsorize1D, axis,a,lolim,uplim,loinc,upinc) - - -#####-------------------------------------------------------------------------- -#---- --- Moments --- -#####-------------------------------------------------------------------------- - -def moment(a, moment=1, axis=0): - a, axis = _chk_asarray(a, axis) - if moment == 1: - # By definition the first moment about the mean is 0. - shape = list(a.shape) - del shape[axis] - if shape: - # return an actual array of the appropriate shape - return np.zeros(shape, dtype=float) - else: - # the input was 1D, so return a scalar instead of a rank-0 array - return np.float64(0.0) - else: - mn = ma.expand_dims(a.mean(axis=axis), axis) - s = ma.power((a-mn), moment) - return s.mean(axis=axis) -moment.__doc__ = stats.moment.__doc__ - - -def variation(a, axis=0): - a, axis = _chk_asarray(a, axis) - return a.std(axis)/a.mean(axis) -variation.__doc__ = stats.variation.__doc__ - - -def skew(a, axis=0, bias=True): - a, axis = _chk_asarray(a,axis) - n = a.count(axis) - m2 = moment(a, 2, axis) - m3 = moment(a, 3, axis) - vals = ma.where(m2 == 0, 0, m3 / m2**1.5) - if not bias: - can_correct = (n > 2) & (m2 > 0) - if can_correct.any(): - m2 = np.extract(can_correct, m2) - m3 = np.extract(can_correct, m3) - nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5 - np.place(vals, can_correct, nval) - return vals -skew.__doc__ = stats.skew.__doc__ - - -def kurtosis(a, axis=0, fisher=True, bias=True): - a, axis = _chk_asarray(a, axis) - n = a.count(axis) - m2 = moment(a,2,axis) - m4 = moment(a,4,axis) - vals = ma.where(m2 == 0, 0, m4/ m2**2.0) - if not bias: - can_correct = (n > 3) & (m2 > 0) - if can_correct.any(): - m2 = np.extract(can_correct, m2) - m4 = np.extract(can_correct, m4) - nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0) - np.place(vals, can_correct, nval+3.0) - if fisher: - return vals - 3 - else: - return vals -kurtosis.__doc__ = stats.kurtosis.__doc__ - -def describe(a, axis=0): - """ - Computes several descriptive statistics of the passed array. - - Parameters - ---------- - a : array - - axis : int or None - - Returns - ------- - n : int - (size of the data (discarding missing values) - mm : (int, int) - min, max - - arithmetic mean : float - - unbiased variance : float - - biased skewness : float - - biased kurtosis : float - - Examples - -------- - - >>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1]) - >>> describe(ma) - (array(3), - (0, 2), - 1.0, - 1.0, - masked_array(data = 0.0, - mask = False, - fill_value = 1e+20) - , - -1.5) - - """ - a, axis = _chk_asarray(a, axis) - n = a.count(axis) - mm = (ma.minimum.reduce(a), ma.maximum.reduce(a)) - m = a.mean(axis) - v = a.var(axis) - sk = skew(a, axis) - kurt = kurtosis(a, axis) - return n, mm, m, v, sk, kurt - -#............................................................................. -def stde_median(data, axis=None): - """Returns the McKean-Schrader estimate of the standard error of the sample -median along the given axis. masked values are discarded. - - Parameters - ---------- - data : ndarray - Data to trim. - axis : {None,int} optional - Axis along which to perform the trimming. - If None, the input array is first flattened. - - """ - def _stdemed_1D(data): - data = np.sort(data.compressed()) - n = len(data) - z = 2.5758293035489004 - k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0)) - return ((data[n-k] - data[k-1])/(2.*z)) - # - data = ma.array(data, copy=False, subok=True) - if (axis is None): - return _stdemed_1D(data) - else: - if data.ndim > 2: - raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim) - return ma.apply_along_axis(_stdemed_1D, axis, data) - -#####-------------------------------------------------------------------------- -#---- --- Normality Tests --- -#####-------------------------------------------------------------------------- - -def skewtest(a, axis=0): - """ - Tests whether the skew is different from the normal distribution. - - This function tests the null hypothesis that the skewness of - the population that the sample was drawn from is the same - as that of a corresponding normal distribution. - - Parameters - ---------- - a : array_like - The input array. - axis : int or None, optional - The axis along which to perform the skew test. Default is 0. - If `axis` is None, the array is first flattened. - - Returns - ------- - z-score : float - The computed z-score for this test. - p-value : float - a 2-sided p-value for the hypothesis test - - Notes - ----- - The sample size must be at least 8. - - """ - a, axis = _chk_asarray(a, axis) - if axis is None: - a = a.ravel() - axis = 0 - b2 = skew(a,axis) - n = a.count(axis) - if np.min(n) < 8: - warnings.warn( - "skewtest only valid for n>=8 ... continuing anyway, n=%i" % - np.min(n)) - y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) ) - beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) ) - W2 = -1 + ma.sqrt(2*(beta2-1)) - delta = 1/ma.sqrt(0.5*ma.log(W2)) - alpha = ma.sqrt(2.0/(W2-1)) - y = ma.where(y==0, 1, y) - Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1)) - return Z, (1.0 - stats.zprob(Z))*2 -skewtest.__doc__ = stats.skewtest.__doc__ - -def kurtosistest(a, axis=0): - """ - Tests whether a dataset has normal kurtosis. - - This function tests the null hypothesis that the kurtosis - of the population from which the sample was drawn is that - of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``. - - Parameters - ---------- - a : array_like - Array of the sample data. - axis : int or None, optional - The axis to operate along, or None to work on the whole array. - The default is 0 (the first axis). - - Returns - ------- - z-score : float - The computed z-score for this test. - p-value : float - The 2-sided p-value for the hypothesis test - - Notes - ----- - Valid only for n>20. The Z-score is set to 0 for bad entries. - - """ - a, axis = _chk_asarray(a, axis) - n = a.count(axis=axis).astype(float) - if np.min(n) < 20: - warnings.warn( - "kurtosistest only valid for n>=20 ... continuing anyway, n=%i" % - np.min(n)) - b2 = kurtosis(a, axis, fisher=False) - E = 3.0*(n-1) /(n+1) - varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5)) - x = (b2-E)/ma.sqrt(varb2) - sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5))/ - (n*(n-2)*(n-3))) - A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) - term1 = 1 - 2./(9.0*A) - denom = 1 + x*ma.sqrt(2/(A-4.0)) - denom[denom < 0] = masked - term2 = ma.power((1-2.0/A)/denom,1/3.0) - Z = ( term1 - term2 ) / np.sqrt(2/(9.0*A)) - return Z, (1.0-stats.zprob(Z))*2 -kurtosistest.__doc__ = stats.kurtosistest.__doc__ - - -def normaltest(a, axis=0): - a, axis = _chk_asarray(a, axis) - s,_ = skewtest(a,axis) - k,_ = kurtosistest(a,axis) - k2 = s*s + k*k - return k2, stats.chisqprob(k2,2) -normaltest.__doc__ = stats.normaltest.__doc__ - -# Martinez-Iglewicz test -# K-S test - - -#####-------------------------------------------------------------------------- -#---- --- Percentiles --- -#####-------------------------------------------------------------------------- - - -def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, - limit=()): - """ - Computes empirical quantiles for a data array. - - Samples quantile are defined by ``Q(p) = (1-g).x[i] +g.x[i+1]``, - where ``x[j]`` is the j-th order statistic, ``i = (floor(n*p+m))``, - ``m=alpha+p*(1-alpha-beta)`` and ``g = n*p + m - i``. - - Typical values of (alpha,beta) are: - - (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4) - - (.5,.5) : *p(k) = (k+1/2.)/n* : piecewise linear - function (R, type 5) - - (0,0) : *p(k) = k/(n+1)* : (R type 6) - - (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])]. - That's R default (R type 7) - - (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])]. - The resulting quantile estimates are approximately median-unbiased - regardless of the distribution of x. (R type 8) - - (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*. Blom. - The resulting quantile estimates are approximately unbiased - if x is normally distributed (R type 9) - - (.4,.4) : approximately quantile unbiased (Cunnane) - - (.35,.35): APL, used with PWM - - Parameters - ---------- - a : array_like - Input data, as a sequence or array of dimension at most 2. - prob : array_like, optional - List of quantiles to compute. - alpha : float, optional - Plotting positions parameter, default is 0.4. - beta : float, optional - Plotting positions parameter, default is 0.4. - axis : int, optional - Axis along which to perform the trimming. - If None (default), the input array is first flattened. - limit : tuple - Tuple of (lower, upper) values. - Values of `a` outside this closed interval are ignored. - - Returns - ------- - mquantiles : MaskedArray - An array containing the calculated quantiles. - - Examples - -------- - >>> from scipy.stats.mstats import mquantiles - >>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.]) - >>> mquantiles(a) - array([ 19.2, 40. , 42.8]) - - Using a 2D array, specifying axis and limit. - - >>> data = np.array([[ 6., 7., 1.], - [ 47., 15., 2.], - [ 49., 36., 3.], - [ 15., 39., 4.], - [ 42., 40., -999.], - [ 41., 41., -999.], - [ 7., -999., -999.], - [ 39., -999., -999.], - [ 43., -999., -999.], - [ 40., -999., -999.], - [ 36., -999., -999.]]) - >>> mquantiles(data, axis=0, limit=(0, 50)) - array([[ 19.2 , 14.6 , 1.45], - [ 40. , 37.5 , 2.5 ], - [ 42.8 , 40.05, 3.55]]) - - >>> data[:, 2] = -999. - >>> mquantiles(data, axis=0, limit=(0, 50)) - masked_array(data = - [[19.2 14.6 --] - [40.0 37.5 --] - [42.8 40.05 --]], - mask = - [[False False True] - [False False True] - [False False True]], - fill_value = 1e+20) - - """ - def _quantiles1D(data,m,p): - x = np.sort(data.compressed()) - n = len(x) - if n == 0: - return ma.array(np.empty(len(p), dtype=float), mask=True) - elif n == 1: - return ma.array(np.resize(x, p.shape), mask=nomask) - aleph = (n*p + m) - k = np.floor(aleph.clip(1, n-1)).astype(int) - gamma = (aleph-k).clip(0,1) - return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] - - # Initialization & checks --------- - data = ma.array(a, copy=False) - if data.ndim > 2: - raise TypeError("Array should be 2D at most !") - # - if limit: - condition = (limit[0] 100.): - raise ValueError("The percentile should be between 0. and 100. !"\ - " (got %s)" % per) - return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap, - limit=limit, axis=0).squeeze() - - -def plotting_positions(data, alpha=0.4, beta=0.4): - """ - Returns plotting positions (or empirical percentile points) for the data. - - Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where: - - i is the rank order statistics - - n is the number of unmasked values along the given axis - - alpha and beta are two parameters. - - Typical values for alpha and beta are: - - (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4) - - (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function - (R, type 5) - - (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6) - - (1,1) : ``p(k) = (k-1)/(n-1)``, in this case, - ``p(k) = mode[F(x[k])]``. That's R default (R type 7) - - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then - ``p(k) ~ median[F(x[k])]``. - The resulting quantile estimates are approximately median-unbiased - regardless of the distribution of x. (R type 8) - - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom. - The resulting quantile estimates are approximately unbiased - if x is normally distributed (R type 9) - - (.4,.4) : approximately quantile unbiased (Cunnane) - - (.35,.35): APL, used with PWM - - (.3175, .3175): used in scipy.stats.probplot - - Parameters - ---------- - data : array_like - Input data, as a sequence or array of dimension at most 2. - alpha : float, optional - Plotting positions parameter. Default is 0.4. - beta : float, optional - Plotting positions parameter. Default is 0.4. - - Returns - ------- - positions : MaskedArray - The calculated plotting positions. - - """ - data = ma.array(data, copy=False).reshape(1,-1) - n = data.count() - plpos = np.empty(data.size, dtype=float) - plpos[n:] = 0 - plpos[data.argsort()[:n]] = (np.arange(1, n+1) - alpha) / \ - (n + 1.0 - alpha - beta) - return ma.array(plpos, mask=data._mask) - -meppf = plotting_positions - -#####-------------------------------------------------------------------------- -#---- --- Variability --- -#####-------------------------------------------------------------------------- - -def obrientransform(*args): - """ -Computes a transform on input data (any number of columns). Used to -test for homogeneity of variance prior to running one-way stats. Each -array in *args is one level of a factor. If an F_oneway() run on the -transformed data and found significant, variances are unequal. From -Maxwell and Delaney, p.112. - -Returns: transformed data for use in an ANOVA - """ - data = argstoarray(*args).T - v = data.var(axis=0,ddof=1) - m = data.mean(0) - n = data.count(0).astype(float) - # result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2)) - data -= m - data **= 2 - data *= (n-1.5)*n - data -= 0.5*v*(n-1) - data /= (n-1.)*(n-2.) - if not ma.allclose(v,data.mean(0)): - raise ValueError("Lack of convergence in obrientransform.") - return data - - -def signaltonoise(data, axis=0): - """Calculates the signal-to-noise ratio, as the ratio of the mean over - standard deviation along the given axis. - - Parameters - ---------- - data : sequence - Input data - axis : {0, int} optional - Axis along which to compute. If None, the computation is performed - on a flat version of the array. -""" - data = ma.array(data, copy=False) - m = data.mean(axis) - sd = data.std(axis, ddof=0) - return m/sd - - -def sem(a, axis=0): - a, axis = _chk_asarray(a, axis) - n = a.count(axis=axis) - s = a.std(axis=axis,ddof=0) / ma.sqrt(n-1) - return s -sem.__doc__ = stats.sem.__doc__ - -zmap = stats.zmap -zscore = stats.zscore - - -#####-------------------------------------------------------------------------- -#---- --- ANOVA --- -#####-------------------------------------------------------------------------- - - -def f_oneway(*args): - """ -Performs a 1-way ANOVA, returning an F-value and probability given -any number of groups. From Heiman, pp.394-7. - -Usage: f_oneway (*args) where *args is 2 or more arrays, one per - treatment group -Returns: f-value, probability -""" - # Construct a single array of arguments: each row is a group - data = argstoarray(*args) - ngroups = len(data) - ntot = data.count() - sstot = (data**2).sum() - (data.sum())**2/float(ntot) - ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum() - sswg = sstot-ssbg - dfbg = ngroups-1 - dfwg = ntot - ngroups - msb = ssbg/float(dfbg) - msw = sswg/float(dfwg) - f = msb/msw - prob = stats.fprob(dfbg,dfwg,f) - return f, prob - - -def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b): - """Calculation of Wilks lambda F-statistic for multivarite data, per - Maxwell & Delaney p.657. - """ - ER = ma.array(ER, copy=False, ndmin=2) - EF = ma.array(EF, copy=False, ndmin=2) - if ma.getmask(ER).any() or ma.getmask(EF).any(): - raise NotImplementedError("Not implemented when the inputs "\ - "have missing data") - lmbda = np.linalg.det(EF) / np.linalg.det(ER) - q = ma.sqrt( ((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 -5) ) - q = ma.filled(q, 1) - n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1) - d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1) - return n_um / d_en - - - -def friedmanchisquare(*args): - """Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. - This function calculates the Friedman Chi-square test for repeated measures - and returns the result, along with the associated probability value. - - Each input is considered a given group. Ideally, the number of treatments - among each group should be equal. If this is not the case, only the first - n treatments are taken into account, where n is the number of treatments - of the smallest group. - If a group has some missing values, the corresponding treatments are masked - in the other groups. - The test statistic is corrected for ties. - - Masked values in one group are propagated to the other groups. - - Returns: chi-square statistic, associated p-value - """ - data = argstoarray(*args).astype(float) - k = len(data) - if k < 3: - raise ValueError("Less than 3 groups (%i): " % k +\ - "the Friedman test is NOT appropriate.") - ranked = ma.masked_values(rankdata(data, axis=0), 0) - if ranked._mask is not nomask: - ranked = ma.mask_cols(ranked) - ranked = ranked.compressed().reshape(k,-1).view(ndarray) - else: - ranked = ranked._data - (k,n) = ranked.shape - # Ties correction - repeats = np.array([find_repeats(_) for _ in ranked.T], dtype=object) - ties = repeats[repeats.nonzero()].reshape(-1,2)[:,-1].astype(int) - tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k)) - # - ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2) - chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction - return chisq, stats.chisqprob(chisq,k-1) - -#-############################################################################-# diff --git a/scipy-0.10.1/scipy/stats/mstats_extras.py b/scipy-0.10.1/scipy/stats/mstats_extras.py deleted file mode 100644 index 3ba1eafbea..0000000000 --- a/scipy-0.10.1/scipy/stats/mstats_extras.py +++ /dev/null @@ -1,392 +0,0 @@ -""" -Additional statistics functions, with support to MA. - -:author: Pierre GF Gerard-Marchant -:contact: pierregm_at_uga_edu -:date: $Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $ -:version: $Id: morestats.py 3473 2007-10-29 15:18:13Z jarrod.millman $ -""" -__author__ = "Pierre GF Gerard-Marchant" -__docformat__ = "restructuredtext en" - - -__all__ = ['compare_medians_ms', - 'hdquantiles', 'hdmedian', 'hdquantiles_sd', - 'idealfourths', - 'median_cihs','mjci','mquantiles_cimj', - 'rsh', - 'trimmed_mean_ci',] - -import numpy as np -from numpy import float_, int_, ndarray - -import numpy.ma as ma -from numpy.ma import MaskedArray - -import mstats_basic as mstats - -from scipy.stats.distributions import norm, beta, t, binom - - -#####-------------------------------------------------------------------------- -#---- --- Quantiles --- -#####-------------------------------------------------------------------------- -def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,): - """Computes quantile estimates with the Harrell-Davis method, where the estimates -are calculated as a weighted linear combination of order statistics. - -Parameters ----------- - data: ndarray - Data array. - prob: sequence - Sequence of quantiles to compute. - axis : int - Axis along which to compute the quantiles. If None, use a flattened array. - var : boolean - Whether to return the variance of the estimate. - -Returns -------- - A (p,) array of quantiles (if ``var`` is False), or a (2,p) array of quantiles - and variances (if ``var`` is True), where ``p`` is the number of quantiles. - -Notes ------ - The function is restricted to 2D arrays. - - """ - def _hd_1D(data,prob,var): - "Computes the HD quantiles for a 1D array. Returns nan for invalid data." - xsorted = np.squeeze(np.sort(data.compressed().view(ndarray))) - # Don't use length here, in case we have a numpy scalar - n = xsorted.size - #......... - hd = np.empty((2,len(prob)), float_) - if n < 2: - hd.flat = np.nan - if var: - return hd - return hd[0] - #......... - v = np.arange(n+1) / float(n) - betacdf = beta.cdf - for (i,p) in enumerate(prob): - _w = betacdf(v, (n+1)*p, (n+1)*(1-p)) - w = _w[1:] - _w[:-1] - hd_mean = np.dot(w, xsorted) - hd[0,i] = hd_mean - # - hd[1,i] = np.dot(w, (xsorted-hd_mean)**2) - # - hd[0, prob == 0] = xsorted[0] - hd[0, prob == 1] = xsorted[-1] - if var: - hd[1, prob == 0] = hd[1, prob == 1] = np.nan - return hd - return hd[0] - # Initialization & checks --------- - data = ma.array(data, copy=False, dtype=float_) - p = np.array(prob, copy=False, ndmin=1) - # Computes quantiles along axis (or globally) - if (axis is None) or (data.ndim == 1): - result = _hd_1D(data, p, var) - else: - if data.ndim > 2: - raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim) - result = ma.apply_along_axis(_hd_1D, axis, data, p, var) - # - return ma.fix_invalid(result, copy=False) - -#.............................................................................. -def hdmedian(data, axis=-1, var=False): - """Returns the Harrell-Davis estimate of the median along the given axis. - -Parameters ----------- - data: ndarray - Data array. - axis : int - Axis along which to compute the quantiles. If None, use a flattened array. - var : boolean - Whether to return the variance of the estimate. - - """ - result = hdquantiles(data,[0.5], axis=axis, var=var) - return result.squeeze() - - -#.............................................................................. -def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None): - """Computes the standard error of the Harrell-Davis quantile estimates by jackknife. - - -Parameters ----------- - data: ndarray - Data array. - prob: sequence - Sequence of quantiles to compute. - axis : int - Axis along which to compute the quantiles. If None, use a flattened array. - -Notes ------ - The function is restricted to 2D arrays. - - """ - def _hdsd_1D(data,prob): - "Computes the std error for 1D arrays." - xsorted = np.sort(data.compressed()) - n = len(xsorted) - #......... - hdsd = np.empty(len(prob), float_) - if n < 2: - hdsd.flat = np.nan - #......... - vv = np.arange(n) / float(n-1) - betacdf = beta.cdf - # - for (i,p) in enumerate(prob): - _w = betacdf(vv, (n+1)*p, (n+1)*(1-p)) - w = _w[1:] - _w[:-1] - mx_ = np.fromiter([np.dot(w,xsorted[np.r_[range(0,k), - range(k+1,n)].astype(int_)]) - for k in range(n)], dtype=float_) - mx_var = np.array(mx_.var(), copy=False, ndmin=1) * n / float(n-1) - hdsd[i] = float(n-1) * np.sqrt(np.diag(mx_var).diagonal() / float(n)) - return hdsd - # Initialization & checks --------- - data = ma.array(data, copy=False, dtype=float_) - p = np.array(prob, copy=False, ndmin=1) - # Computes quantiles along axis (or globally) - if (axis is None): - result = _hdsd_1D(data, p) - else: - if data.ndim > 2: - raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim) - result = ma.apply_along_axis(_hdsd_1D, axis, data, p) - # - return ma.fix_invalid(result, copy=False).ravel() - - -#####-------------------------------------------------------------------------- -#---- --- Confidence intervals --- -#####-------------------------------------------------------------------------- - -def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True), - alpha=0.05, axis=None): - """Returns the selected confidence interval of the trimmed mean along the -given axis. - -Parameters ----------- - data : sequence - Input data. The data is transformed to a masked array - proportiontocut : float - Proportion of the data to cut from each side of the data . - As a result, (2*proportiontocut*n) values are actually trimmed. - alpha : float - Confidence level of the intervals. - inclusive : tuple of boolean - If relative==False, tuple indicating whether values exactly equal to the - absolute limits are allowed. - If relative==True, tuple indicating whether the number of data being masked - on each side should be rounded (True) or truncated (False). - axis : int - Axis along which to cut. If None, uses a flattened version of the input. - - """ - data = ma.array(data, copy=False) - trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis) - tmean = trimmed.mean(axis) - tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis) - df = trimmed.count(axis) - 1 - tppf = t.ppf(1-alpha/2.,df) - return np.array((tmean - tppf*tstde, tmean+tppf*tstde)) - -#.............................................................................. -def mjci(data, prob=[0.25,0.5,0.75], axis=None): - """Returns the Maritz-Jarrett estimators of the standard error of selected -experimental quantiles of the data. - -Parameters ------------ - data: ndarray - Data array. - prob: sequence - Sequence of quantiles to compute. - axis : int - Axis along which to compute the quantiles. If None, use a flattened array. - - """ - def _mjci_1D(data, p): - data = np.sort(data.compressed()) - n = data.size - prob = (np.array(p) * n + 0.5).astype(int_) - betacdf = beta.cdf - # - mj = np.empty(len(prob), float_) - x = np.arange(1,n+1, dtype=float_) / n - y = x - 1./n - for (i,m) in enumerate(prob): - (m1,m2) = (m-1, n-m) - W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m) - C1 = np.dot(W,data) - C2 = np.dot(W,data**2) - mj[i] = np.sqrt(C2 - C1**2) - return mj - # - data = ma.array(data, copy=False) - if data.ndim > 2: - raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim) - p = np.array(prob, copy=False, ndmin=1) - # Computes quantiles along axis (or globally) - if (axis is None): - return _mjci_1D(data, p) - else: - return ma.apply_along_axis(_mjci_1D, axis, data, p) - -#.............................................................................. -def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None): - """ - Computes the alpha confidence interval for the selected quantiles of the - data, with Maritz-Jarrett estimators. - - Parameters - ---------- - data: ndarray - Data array. - prob: sequence - Sequence of quantiles to compute. - alpha : float - Confidence level of the intervals. - axis : integer - Axis along which to compute the quantiles. - If None, use a flattened array. - - """ - alpha = min(alpha, 1-alpha) - z = norm.ppf(1-alpha/2.) - xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis) - smj = mjci(data, prob, axis=axis) - return (xq - z * smj, xq + z * smj) - - -#............................................................................. -def median_cihs(data, alpha=0.05, axis=None): - """Computes the alpha-level confidence interval for the median of the data, -following the Hettmasperger-Sheather method. - -Parameters ----------- - data : sequence - Input data. Masked values are discarded. The input should be 1D only, or - axis should be set to None. - alpha : float - Confidence level of the intervals. - axis : integer - Axis along which to compute the quantiles. If None, use a flattened array. - """ - def _cihs_1D(data, alpha): - data = np.sort(data.compressed()) - n = len(data) - alpha = min(alpha, 1-alpha) - k = int(binom._ppf(alpha/2., n, 0.5)) - gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5) - if gk < 1-alpha: - k -= 1 - gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5) - gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5) - I = (gk - 1 + alpha)/(gk - gkk) - lambd = (n-k) * I / float(k + (n-2*k)*I) - lims = (lambd*data[k] + (1-lambd)*data[k-1], - lambd*data[n-k-1] + (1-lambd)*data[n-k]) - return lims - data = ma.rray(data, copy=False) - # Computes quantiles along axis (or globally) - if (axis is None): - result = _cihs_1D(data.compressed(), alpha) - else: - if data.ndim > 2: - raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim) - result = ma.apply_along_axis(_cihs_1D, axis, data, alpha) - # - return result - -#.............................................................................. -def compare_medians_ms(group_1, group_2, axis=None): - """Compares the medians from two independent groups along the given axis. - -The comparison is performed using the McKean-Schrader estimate of the standard -error of the medians. - -Parameters ----------- - group_1 : {sequence} - First dataset. - group_2 : {sequence} - Second dataset. - axis : {integer} - Axis along which the medians are estimated. If None, the arrays are flattened. - -Returns -------- - A (p,) array of comparison values. - - """ - (med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis)) - (std_1, std_2) = (mstats.stde_median(group_1, axis=axis), - mstats.stde_median(group_2, axis=axis)) - W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2) - return 1 - norm.cdf(W) - - -def idealfourths(data, axis=None): - """Returns an estimate of the lower and upper quartiles of the data along - the given axis, as computed with the ideal fourths. - """ - def _idf(data): - x = data.compressed() - n = len(x) - if n < 3: - return [np.nan,np.nan] - (j,h) = divmod(n/4. + 5/12.,1) - qlo = (1-h)*x[j-1] + h*x[j] - k = n - j - qup = (1-h)*x[k] + h*x[k-1] - return [qlo, qup] - data = ma.sort(data, axis=axis).view(MaskedArray) - if (axis is None): - return _idf(data) - else: - return ma.apply_along_axis(_idf, axis, data) - - -def rsh(data, points=None): - """Evaluates Rosenblatt's shifted histogram estimators for each point -on the dataset 'data'. - -Parameters - data : sequence - Input data. Masked values are ignored. - points : sequence - Sequence of points where to evaluate Rosenblatt shifted histogram. - If None, use the data. - """ - data = ma.array(data, copy=False) - if points is None: - points = data - else: - points = np.array(points, copy=False, ndmin=1) - if data.ndim != 1: - raise AttributeError("The input array should be 1D only !") - n = data.count() - r = idealfourths(data, axis=None) - h = 1.2 * (r[-1]-r[0]) / n**(1./5) - nhi = (data[:,None] <= points[None,:] + h).sum(0) - nlo = (data[:,None] < points[None,:] - h).sum(0) - return (nhi-nlo) / (2.*n*h) - - -############################################################################### diff --git a/scipy-0.10.1/scipy/stats/mvn.pyf b/scipy-0.10.1/scipy/stats/mvn.pyf deleted file mode 100644 index 4db7e398c1..0000000000 --- a/scipy-0.10.1/scipy/stats/mvn.pyf +++ /dev/null @@ -1,39 +0,0 @@ -! -*- f90 -*- -! Note: the context of this file is case sensitive. - -python module mvn ! in - interface ! in :mvn - subroutine mvnun(d,n,lower,upper,means,covar,maxpts,abseps,releps,value,inform) ! in :mvn:mvndst.f - integer intent(hide) :: d=shape(means,0) - integer intent(hide) :: n=shape(means,1) - double precision dimension(d) :: lower - double precision dimension(d) :: upper - double precision dimension(d,n) :: means - double precision dimension(d,d) :: covar - integer intent(optional) :: maxpts=d*1000 - double precision intent(optional) :: abseps=1e-6 - double precision intent(optional) :: releps=1e-6 - double precision intent(out) :: value - integer intent(out) :: inform - end subroutine mvnun - - subroutine mvndst(n,lower,upper,infin,correl,maxpts,abseps,releps,error,value,inform) ! in :mvn:mvndst.f - integer intent(hide) :: n=len(lower) - double precision dimension(n) :: lower - double precision dimension(n) :: upper - integer dimension(n) :: infin - double precision dimension(n*(n-1)/2) :: correl - integer intent(optional) :: maxpts=2000 - double precision intent(optional) :: abseps=1e-6 - double precision intent(optional) :: releps=1e-6 - double precision intent(out) :: error - double precision intent(out) :: value - integer intent(out) :: inform - integer :: ivls - common /dkblck/ ivls - end subroutine mvndst - end interface -end python module mvn - -! This file was auto-generated with f2py (version:2.39.235_1752). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/stats/mvndst.f b/scipy-0.10.1/scipy/stats/mvndst.f deleted file mode 100644 index a7851e3d2a..0000000000 --- a/scipy-0.10.1/scipy/stats/mvndst.f +++ /dev/null @@ -1,1126 +0,0 @@ -* Note: The test program has been removed and a utlity routine mvnun has been -* added. RTK 2004-08-10 -* -* Copyright 2000 by Alan Genz. -* Copyright 2004-2005 by Enthought, Inc. -* -* The subroutine MVNUN is copyrighted by Enthought, Inc. -* The rest of the file is copyrighted by Alan Genz and has kindly been offered -* to the Scipy project under it's BSD-style license. -* -* This file contains a short test program and MVNDST, a subroutine -* for computing multivariate normal distribution function values. -* The file is self contained and should compile without errors on (77) -* standard Fortran compilers. The test program demonstrates the use of -* MVNDST for computing MVN distribution values for a five dimensional -* example problem, with three different integration limit combinations. -* -* Alan Genz -* Department of Mathematics -* Washington State University -* Pullman, WA 99164-3113 -* Email : alangenz@wsu.edu -* - SUBROUTINE mvnun(d, n, lower, upper, means, covar, maxpts, - & abseps, releps, value, inform) -* Parameters -* -* d integer, dimensionality of the data -* n integer, the number of data points -* lower double(2), the lower integration limits -* upper double(2), the upper integration limits -* means double(n), the mean of each kernel -* covar double(2,2), the covariance matrix -* maxpts integer, the maximum number of points to evaluate at -* abseps double, absolute error tolerance -* releps double, relative error tolerance -* value double intent(out), integral value -* inform integer intent(out), -* if inform == 0: error < eps -* elif inform == 1: error > eps, all maxpts used - integer n, d, infin(d), maxpts, inform, tmpinf - double precision lower(d), upper(d), releps, abseps, - & error, value, stdev(d), rho(d*(d-1)/2), - & covar(d,d), - & nlower(d), nupper(d), means(d,n), tmpval - integer i, j - - do i=1,d - stdev(i) = dsqrt(covar(i,i)) - infin(i) = 2 - end do - do i=1,d - do j=1,i-1 - rho(j+(i-2)*(i-1)/2) = covar(i,j)/stdev(i)/stdev(j) - end do - end do - value = 0d0 - - inform = 0 - - do i=1,n - do j=1,d - nlower(j) = (lower(j) - means(j,i))/stdev(j) - nupper(j) = (upper(j) - means(j,i))/stdev(j) - end do - call mvndst(d,nlower,nupper,infin,rho,maxpts,abseps,releps, - & error,tmpval,tmpinf) - value = value + tmpval - if (tmpinf .eq. 1) then - inform = 1 - end if - end do - - value = value / n - - END - - SUBROUTINE MVNDST( N, LOWER, UPPER, INFIN, CORREL, MAXPTS, - & ABSEPS, RELEPS, ERROR, VALUE, INFORM ) -* -* A subroutine for computing multivariate normal probabilities. -* This subroutine uses an algorithm given in the paper -* "Numerical Computation of Multivariate Normal Probabilities", in -* J. of Computational and Graphical Stat., 1(1992), pp. 141-149, by -* Alan Genz -* Department of Mathematics -* Washington State University -* Pullman, WA 99164-3113 -* Email : AlanGenz@wsu.edu -* -* Parameters -* -* N INTEGER, the number of variables. -* LOWER REAL, array of lower integration limits. -* UPPER REAL, array of upper integration limits. -* INFIN INTEGER, array of integration limits flags: -* if INFIN(I) < 0, Ith limits are (-infinity, infinity); -* if INFIN(I) = 0, Ith limits are (-infinity, UPPER(I)]; -* if INFIN(I) = 1, Ith limits are [LOWER(I), infinity); -* if INFIN(I) = 2, Ith limits are [LOWER(I), UPPER(I)]. -* CORREL REAL, array of correlation coefficients; the correlation -* coefficient in row I column J of the correlation matrix -* should be stored in CORREL( J + ((I-2)*(I-1))/2 ), for J < I. -* THe correlation matrix must be positive semidefinite. -* MAXPTS INTEGER, maximum number of function values allowed. This -* parameter can be used to limit the time. A sensible -* strategy is to start with MAXPTS = 1000*N, and then -* increase MAXPTS if ERROR is too large. -* ABSEPS REAL absolute error tolerance. -* RELEPS REAL relative error tolerance. -* ERROR REAL estimated absolute error, with 99% confidence level. -* VALUE REAL estimated value for the integral -* INFORM INTEGER, termination status parameter: -* if INFORM = 0, normal completion with ERROR < EPS; -* if INFORM = 1, completion with ERROR > EPS and MAXPTS -* function vaules used; increase MAXPTS to -* decrease ERROR; -* if INFORM = 2, N > 500 or N < 1. -* - EXTERNAL MVNDFN - INTEGER N, INFIN(*), MAXPTS, INFORM, INFIS, IVLS - DOUBLE PRECISION CORREL(*), LOWER(*), UPPER(*), RELEPS, ABSEPS, - & ERROR, VALUE, E, D, MVNDNT, MVNDFN - COMMON /DKBLCK/IVLS - IF ( N .GT. 500 .OR. N .LT. 1 ) THEN - INFORM = 2 - VALUE = 0 - ERROR = 1 - ELSE - INFORM = MVNDNT(N, CORREL, LOWER, UPPER, INFIN, INFIS, D, E) - IF ( N-INFIS .EQ. 0 ) THEN - VALUE = 1 - ERROR = 0 - ELSE IF ( N-INFIS .EQ. 1 ) THEN - VALUE = E - D - ERROR = 2D-16 - ELSE -* -* Call the lattice rule integration subroutine -* - IVLS = 0 - CALL DKBVRC( N-INFIS-1, IVLS, MAXPTS, MVNDFN, - & ABSEPS, RELEPS, ERROR, VALUE, INFORM ) - ENDIF - ENDIF - END - DOUBLE PRECISION FUNCTION MVNDFN( N, W ) -* -* Integrand subroutine -* - INTEGER N, INFIN(*), INFIS, NL - DOUBLE PRECISION W(*), LOWER(*), UPPER(*), CORREL(*), D, E - PARAMETER ( NL = 500 ) - DOUBLE PRECISION COV(NL*(NL+1)/2), A(NL), B(NL), Y(NL) - INTEGER INFI(NL), I, J, IJ, IK, INFA, INFB - DOUBLE PRECISION SUM, AI, BI, DI, EI, PHINVS, BVNMVN, MVNDNT - SAVE A, B, INFI, COV - MVNDFN = 1 - INFA = 0 - INFB = 0 - IK = 1 - IJ = 0 - DO I = 1, N+1 - SUM = 0 - DO J = 1, I-1 - IJ = IJ + 1 - IF ( J .LT. IK ) SUM = SUM + COV(IJ)*Y(J) - END DO - IF ( INFI(I) .NE. 0 ) THEN - IF ( INFA .EQ. 1 ) THEN - AI = MAX( AI, A(I) - SUM ) - ELSE - AI = A(I) - SUM - INFA = 1 - END IF - END IF - IF ( INFI(I) .NE. 1 ) THEN - IF ( INFB .EQ. 1 ) THEN - BI = MIN( BI, B(I) - SUM ) - ELSE - BI = B(I) - SUM - INFB = 1 - END IF - END IF - IJ = IJ + 1 - IF ( I .EQ. N+1 .OR. COV(IJ+IK+1) .GT. 0 ) THEN - CALL MVNLMS( AI, BI, 2*INFA+INFB-1, DI, EI ) - IF ( DI .GE. EI ) THEN - MVNDFN = 0 - RETURN - ELSE - MVNDFN = MVNDFN*( EI - DI ) - IF ( I .LE. N ) Y(IK) = PHINVS( DI + W(IK)*( EI - DI ) ) - IK = IK + 1 - INFA = 0 - INFB = 0 - END IF - END IF - END DO - RETURN -* -* Entry point for intialization. -* - ENTRY MVNDNT( N, CORREL, LOWER, UPPER, INFIN, INFIS, D, E ) - MVNDNT = 0 -* -* Initialization and computation of covariance Cholesky factor. -* - CALL COVSRT( N, LOWER,UPPER,CORREL,INFIN,Y, INFIS,A,B,COV,INFI ) - IF ( N - INFIS .EQ. 1 ) THEN - CALL MVNLMS( A(1), B(1), INFI(1), D, E ) - ELSE IF ( N - INFIS .EQ. 2 ) THEN - IF ( ABS( COV(3) ) .GT. 0 ) THEN - D = SQRT( 1 + COV(2)**2 ) - IF ( INFI(2) .NE. 0 ) A(2) = A(2)/D - IF ( INFI(2) .NE. 1 ) B(2) = B(2)/D - E = BVNMVN( A, B, INFI, COV(2)/D ) - D = 0 - ELSE - IF ( INFI(1) .NE. 0 ) THEN - IF ( INFI(2) .NE. 0 ) A(1) = MAX( A(1), A(2) ) - ELSE - IF ( INFI(2) .NE. 0 ) A(1) = A(2) - END IF - IF ( INFI(1) .NE. 1 ) THEN - IF ( INFI(2) .NE. 1 ) B(1) = MIN( B(1), B(2) ) - ELSE - IF ( INFI(2) .NE. 1 ) B(1) = B(2) - END IF - IF ( INFI(1) .NE. INFI(2) ) INFI(1) = 2 - CALL MVNLMS( A(1), B(1), INFI(1), D, E ) - END IF - INFIS = INFIS + 1 - END IF - END - SUBROUTINE MVNLMS( A, B, INFIN, LOWER, UPPER ) - DOUBLE PRECISION A, B, LOWER, UPPER, MVNPHI - INTEGER INFIN - LOWER = 0 - UPPER = 1 - IF ( INFIN .GE. 0 ) THEN - IF ( INFIN .NE. 0 ) LOWER = MVNPHI(A) - IF ( INFIN .NE. 1 ) UPPER = MVNPHI(B) - ENDIF - UPPER = MAX( UPPER, LOWER ) - END - SUBROUTINE COVSRT( N, LOWER, UPPER, CORREL, INFIN, Y, - & INFIS, A, B, COV, INFI ) -* -* Subroutine to sort integration limits and determine Cholesky factor. -* - INTEGER N, INFI(*), INFIN(*), INFIS - DOUBLE PRECISION - & A(*), B(*), COV(*), LOWER(*), UPPER(*), CORREL(*), Y(*) - INTEGER I, J, K, L, M, II, IJ, IL, JMIN - DOUBLE PRECISION SUMSQ, AJ, BJ, SUM, SQTWPI, EPS, D, E - DOUBLE PRECISION CVDIAG, AMIN, BMIN, DMIN, EMIN, YL, YU - PARAMETER ( SQTWPI = 2.506628274631001D0, EPS = 1D-10 ) - IJ = 0 - II = 0 - INFIS = 0 - DO I = 1, N - A(I) = 0 - B(I) = 0 - INFI(I) = INFIN(I) - IF ( INFI(I) .LT. 0 ) THEN - INFIS = INFIS + 1 - ELSE - IF ( INFI(I) .NE. 0 ) A(I) = LOWER(I) - IF ( INFI(I) .NE. 1 ) B(I) = UPPER(I) - ENDIF - DO J = 1, I-1 - IJ = IJ + 1 - II = II + 1 - COV(IJ) = CORREL(II) - END DO - IJ = IJ + 1 - COV(IJ) = 1 - END DO -* -* First move any doubly infinite limits to innermost positions. -* - IF ( INFIS .LT. N ) THEN - DO I = N, N-INFIS+1, -1 - IF ( INFI(I) .GE. 0 ) THEN - DO J = 1,I-1 - IF ( INFI(J) .LT. 0 ) THEN - CALL RCSWP( J, I, A, B, INFI, N, COV ) - GO TO 10 - ENDIF - END DO - ENDIF - 10 END DO -* -* Sort remaining limits and determine Cholesky factor. -* - II = 0 - DO I = 1, N-INFIS -* -* Determine the integration limits for variable with minimum -* expected probability and interchange that variable with Ith. -* - DMIN = 0 - EMIN = 1 - JMIN = I - CVDIAG = 0 - IJ = II - DO J = I, N-INFIS - IF ( COV(IJ+J) .GT. EPS ) THEN - SUMSQ = SQRT( COV(IJ+J) ) - SUM = 0 - DO K = 1, I-1 - SUM = SUM + COV(IJ+K)*Y(K) - END DO - AJ = ( A(J) - SUM )/SUMSQ - BJ = ( B(J) - SUM )/SUMSQ - CALL MVNLMS( AJ, BJ, INFI(J), D, E ) - IF ( EMIN + D .GE. E + DMIN ) THEN - JMIN = J - AMIN = AJ - BMIN = BJ - DMIN = D - EMIN = E - CVDIAG = SUMSQ - ENDIF - ENDIF - IJ = IJ + J - END DO - IF ( JMIN .GT. I ) CALL RCSWP( I, JMIN, A,B, INFI, N, COV ) - COV(II+I) = CVDIAG -* -* Compute Ith column of Cholesky factor. -* Compute expected value for Ith integration variable and -* scale Ith covariance matrix row and limits. -* - IF ( CVDIAG .GT. 0 ) THEN - IL = II + I - DO L = I+1, N-INFIS - COV(IL+I) = COV(IL+I)/CVDIAG - IJ = II + I - DO J = I+1, L - COV(IL+J) = COV(IL+J) - COV(IL+I)*COV(IJ+I) - IJ = IJ + J - END DO - IL = IL + L - END DO - IF ( EMIN .GT. DMIN + EPS ) THEN - YL = 0 - YU = 0 - IF ( INFI(I) .NE. 0 ) YL = -EXP( -AMIN**2/2 )/SQTWPI - IF ( INFI(I) .NE. 1 ) YU = -EXP( -BMIN**2/2 )/SQTWPI - Y(I) = ( YU - YL )/( EMIN - DMIN ) - ELSE - IF ( INFI(I) .EQ. 0 ) Y(I) = BMIN - IF ( INFI(I) .EQ. 1 ) Y(I) = AMIN - IF ( INFI(I) .EQ. 2 ) Y(I) = ( AMIN + BMIN )/2 - END IF - DO J = 1, I - II = II + 1 - COV(II) = COV(II)/CVDIAG - END DO - A(I) = A(I)/CVDIAG - B(I) = B(I)/CVDIAG - ELSE - IL = II + I - DO L = I+1, N-INFIS - COV(IL+I) = 0 - IL = IL + L - END DO -* -* If the covariance matrix diagonal entry is zero, -* permute limits and/or rows, if necessary. -* -* - DO J = I-1, 1, -1 - IF ( ABS( COV(II+J) ) .GT. EPS ) THEN - A(I) = A(I)/COV(II+J) - B(I) = B(I)/COV(II+J) - IF ( COV(II+J) .LT. 0 ) THEN - CALL DKSWAP( A(I), B(I) ) - IF ( INFI(I) .NE. 2 ) INFI(I) = 1 - INFI(I) - END IF - DO L = 1, J - COV(II+L) = COV(II+L)/COV(II+J) - END DO - DO L = J+1, I-1 - IF( COV((L-1)*L/2+J+1) .GT. 0 ) THEN - IJ = II - DO K = I-1, L, -1 - DO M = 1, K - CALL DKSWAP( COV(IJ-K+M), COV(IJ+M) ) - END DO - CALL DKSWAP( A(K), A(K+1) ) - CALL DKSWAP( B(K), B(K+1) ) - M = INFI(K) - INFI(K) = INFI(K+1) - INFI(K+1) = M - IJ = IJ - K - END DO - GO TO 20 - END IF - END DO - GO TO 20 - END IF - COV(II+J) = 0 - END DO - 20 II = II + I - Y(I) = 0 - END IF - END DO - ENDIF - END -* - SUBROUTINE DKSWAP( X, Y ) - DOUBLE PRECISION X, Y, T - T = X - X = Y - Y = T - END -* - SUBROUTINE RCSWP( P, Q, A, B, INFIN, N, C ) -* -* Swaps rows and columns P and Q in situ, with P <= Q. -* - DOUBLE PRECISION A(*), B(*), C(*) - INTEGER INFIN(*), P, Q, N, I, J, II, JJ - CALL DKSWAP( A(P), A(Q) ) - CALL DKSWAP( B(P), B(Q) ) - J = INFIN(P) - INFIN(P) = INFIN(Q) - INFIN(Q) = J - JJ = ( P*( P - 1 ) )/2 - II = ( Q*( Q - 1 ) )/2 - CALL DKSWAP( C(JJ+P), C(II+Q) ) - DO J = 1, P-1 - CALL DKSWAP( C(JJ+J), C(II+J) ) - END DO - JJ = JJ + P - DO I = P+1, Q-1 - CALL DKSWAP( C(JJ+P), C(II+I) ) - JJ = JJ + I - END DO - II = II + Q - DO I = Q+1, N - CALL DKSWAP( C(II+P), C(II+Q) ) - II = II + I - END DO - END -* - SUBROUTINE DKBVRC( NDIM, MINVLS, MAXVLS, FUNCTN, ABSEPS, RELEPS, - & ABSERR, FINEST, INFORM ) -* -* Automatic Multidimensional Integration Subroutine -* -* AUTHOR: Alan Genz -* Department of Mathematics -* Washington State University -* Pulman, WA 99164-3113 -* Email: AlanGenz@wsu.edu -* -* Last Change: 1/15/03 -* -* KRBVRC computes an approximation to the integral -* -* 1 1 1 -* I I ... I F(X) dx(NDIM)...dx(2)dx(1) -* 0 0 0 -* -* -* DKBVRC uses randomized Korobov rules for the first 100 variables. -* The primary references are -* "Randomization of Number Theoretic Methods for Multiple Integration" -* R. Cranley and T.N.L. Patterson, SIAM J Numer Anal, 13, pp. 904-14, -* and -* "Optimal Parameters for Multidimensional Integration", -* P. Keast, SIAM J Numer Anal, 10, pp.831-838. -* If there are more than 100 variables, the remaining variables are -* integrated using the rules described in the reference -* "On a Number-Theoretical Integration Method" -* H. Niederreiter, Aequationes Mathematicae, 8(1972), pp. 304-11. -* -*************** Parameters ******************************************** -****** Input parameters -* NDIM Number of variables, must exceed 1, but not exceed 40 -* MINVLS Integer minimum number of function evaluations allowed. -* MINVLS must not exceed MAXVLS. If MINVLS < 0 then the -* routine assumes a previous call has been made with -* the same integrand and continues that calculation. -* MAXVLS Integer maximum number of function evaluations allowed. -* FUNCTN EXTERNALly declared user defined function to be integrated. -* It must have parameters (NDIM,Z), where Z is a real array -* of dimension NDIM. -* -* ABSEPS Required absolute accuracy. -* RELEPS Required relative accuracy. -****** Output parameters -* MINVLS Actual number of function evaluations used. -* ABSERR Estimated absolute accuracy of FINEST. -* FINEST Estimated value of integral. -* INFORM INFORM = 0 for normal exit, when -* ABSERR <= MAX(ABSEPS, RELEPS*ABS(FINEST)) -* and -* INTVLS <= MAXCLS. -* INFORM = 1 If MAXVLS was too small to obtain the required -* accuracy. In this case a value FINEST is returned with -* estimated absolute accuracy ABSERR. -************************************************************************ - EXTERNAL FUNCTN - INTEGER NDIM, MINVLS, MAXVLS, INFORM, NP, PLIM, NLIM, KLIM, KLIMI, - & SAMPLS, I, INTVLS, MINSMP - PARAMETER ( PLIM = 28, NLIM = 1000, KLIM = 100, MINSMP = 8 ) - INTEGER P(PLIM), C(PLIM,KLIM-1) - DOUBLE PRECISION FUNCTN, ABSEPS, RELEPS, FINEST, ABSERR, DIFINT, - & FINVAL, VARSQR, VAREST, VARPRD, VALUE - DOUBLE PRECISION X(2*NLIM), VK(NLIM), ONE - PARAMETER ( ONE = 1 ) - SAVE P, C, SAMPLS, NP, VAREST - INFORM = 1 - INTVLS = 0 - KLIMI = KLIM - IF ( MINVLS .GE. 0 ) THEN - FINEST = 0 - VAREST = 0 - SAMPLS = MINSMP - DO I = MIN( NDIM, 10), PLIM - NP = I - IF ( MINVLS .LT. 2*SAMPLS*P(I) ) GO TO 10 - END DO - SAMPLS = MAX( MINSMP, MINVLS/( 2*P(NP) ) ) - ENDIF - 10 VK(1) = ONE/P(NP) - DO I = 2, NDIM - IF ( I .LE. KLIM ) THEN - VK(I) = MOD( C(NP, MIN(NDIM-1,KLIM-1))*VK(I-1), ONE ) - ELSE - VK(I) = INT( P(NP)*2**(DBLE(I-KLIM)/(NDIM-KLIM+1)) ) - VK(I) = MOD( VK(I)/P(NP), ONE ) - END IF - END DO - FINVAL = 0 - VARSQR = 0 - DO I = 1, SAMPLS - CALL DKSMRC( NDIM, KLIMI, VALUE, P(NP), VK, FUNCTN, X ) - DIFINT = ( VALUE - FINVAL )/I - FINVAL = FINVAL + DIFINT - VARSQR = ( I - 2 )*VARSQR/I + DIFINT**2 - END DO - INTVLS = INTVLS + 2*SAMPLS*P(NP) - VARPRD = VAREST*VARSQR - FINEST = FINEST + ( FINVAL - FINEST )/( 1 + VARPRD ) - IF ( VARSQR .GT. 0 ) VAREST = ( 1 + VARPRD )/VARSQR - ABSERR = 7*SQRT( VARSQR/( 1 + VARPRD ) )/2 - IF ( ABSERR .GT. MAX( ABSEPS, ABS(FINEST)*RELEPS ) ) THEN - IF ( NP .LT. PLIM ) THEN - NP = NP + 1 - ELSE - SAMPLS = MIN( 3*SAMPLS/2, ( MAXVLS - INTVLS )/( 2*P(NP) ) ) - SAMPLS = MAX( MINSMP, SAMPLS ) - ENDIF - IF ( INTVLS + 2*SAMPLS*P(NP) .LE. MAXVLS ) GO TO 10 - ELSE - INFORM = 0 - ENDIF - MINVLS = INTVLS -* -* Optimal Parameters for Lattice Rules -* - DATA P( 1),(C( 1,I),I = 1,99)/ 31, 12, 2*9, 13, 8*12, 3*3, 12, - & 2*7, 9*12, 3*3, 12, 2*7, 9*12, 3*3, 12, 2*7, 9*12, 3*3, 12, 2*7, - & 8*12, 7, 3*3, 3*7, 21*3/ - DATA P( 2),(C( 2,I),I = 1,99)/ 47, 13, 11, 17, 10, 6*15, - & 22, 2*15, 3*6, 2*15, 9, 13, 3*2, 13, 2*11, 10, 9*15, 3*6, 2*15, - & 9, 13, 3*2, 13, 2*11, 10, 9*15, 3*6, 2*15, 9, 13, 3*2, 13, 2*11, - & 2*10, 8*15, 6, 2, 3, 2, 3, 12*2/ - DATA P( 3),(C( 3,I),I = 1,99)/ 73, 27, 28, 10, 2*11, 20, - & 2*11, 28, 2*13, 28, 3*13, 16*14, 2*31, 3*5, 31, 13, 6*11, 7*13, - & 16*14, 2*31, 3*5, 11, 13, 7*11, 2*13, 11, 13, 4*5, 14, 13, 8*5/ - DATA P( 4),(C( 4,I),I = 1,99)/ 113, 35, 2*27, 36, 22, 2*29, - & 20, 45, 3*5, 16*21, 29, 10*17, 12*23, 21, 27, 3*3, 24, 2*27, - & 17, 3*29, 17, 4*5, 16*21, 3*17, 6, 2*17, 6, 3, 2*6, 5*3/ - DATA P( 5),(C( 5,I),I = 1,99)/ 173, 64, 66, 2*28, 2*44, 55, - & 67, 6*10, 2*38, 5*10, 12*49, 2*38, 31, 2*4, 31, 64, 3*4, 64, - & 6*45, 19*66, 11, 9*66, 45, 11, 7, 3, 3*2, 27, 5, 2*3, 2*5, 7*2/ - DATA P( 6),(C( 6,I),I = 1,99)/ 263, 111, 42, 54, 118, 20, - & 2*31, 72, 17, 94, 2*14, 11, 3*14, 94, 4*10, 7*14, 3*11, 7*8, - & 5*18, 113, 2*62, 2*45, 17*113, 2*63, 53, 63, 15*67, 5*51, 12, - & 51, 12, 51, 5, 2*3, 2*2, 5/ - DATA P( 7),(C( 7,I),I = 1,99)/ 397, 163, 154, 83, 43, 82, - & 92, 150, 59, 2*76, 47, 2*11, 100, 131, 6*116, 9*138, 21*101, - & 6*116, 5*100, 5*138, 19*101, 8*38, 5*3/ - DATA P( 8),(C( 8,I),I = 1,99)/ 593, 246, 189, 242, 102, - & 2*250, 102, 250, 280, 118, 196, 118, 191, 215, 2*121, - & 12*49, 34*171, 8*161, 17*14, 6*10, 103, 4*10, 5/ - DATA P( 9),(C( 9,I),I = 1,99)/ 907, 347, 402, 322, 418, - & 215, 220, 3*339, 337, 218, 4*315, 4*167, 361, 201, 11*124, - & 2*231, 14*90, 4*48, 23*90, 10*243, 9*283, 16, 283, 16, 2*283/ - DATA P(10),(C(10,I),I = 1,99)/ 1361, 505, 220, 601, 644, - & 612, 160, 3*206, 422, 134, 518, 2*134, 518, 652, 382, - & 206, 158, 441, 179, 441, 56, 2*559, 14*56, 2*101, 56, - & 8*101, 7*193, 21*101, 17*122, 4*101/ - DATA P(11),(C(11,I),I = 1,99)/ 2053, 794, 325, 960, 528, - & 2*247, 338, 366, 847, 2*753, 236, 2*334, 461, 711, 652, - & 3*381, 652, 7*381, 226, 7*326, 126, 10*326, 2*195, 19*55, - & 7*195, 11*132, 13*387/ - DATA P(12),(C(12,I),I = 1,99)/ 3079, 1189, 888, 259, 1082, 725, - & 811, 636, 965, 2*497, 2*1490, 392, 1291, 2*508, 2*1291, 508, - & 1291, 2*508, 4*867, 934, 7*867, 9*1284, 4*563, 3*1010, 208, - & 838, 3*563, 2*759, 564, 2*759, 4*801, 5*759, 8*563, 22*226/ - DATA P(13),(C(13,I),I = 1,99)/ 4621, 1763, 1018, 1500, 432, - & 1332, 2203, 126, 2240, 1719, 1284, 878, 1983, 4*266, - & 2*747, 2*127, 2074, 127, 2074, 1400, 10*1383, 1400, 7*1383, - & 507, 4*1073, 5*1990, 9*507, 17*1073, 6*22, 1073, 6*452, 318, - & 4*301, 2*86, 15/ - DATA P(14),(C(14,I),I = 1,99)/ 6947, 2872, 3233, 1534, 2941, - & 2910, 393, 1796, 919, 446, 2*919, 1117, 7*103, 2311, 3117, 1101, - & 2*3117, 5*1101, 8*2503, 7*429, 3*1702, 5*184, 34*105, 13*784/ - DATA P(15),(C(15,I),I = 1,99)/ 10427, 4309, 3758, 4034, 1963, - & 730, 642, 1502, 2246, 3834, 1511, 2*1102, 2*1522, 2*3427, - & 3928, 2*915, 4*3818, 3*4782, 3818, 4782, 2*3818, 7*1327, 9*1387, - & 13*2339, 18*3148, 3*1776, 3*3354, 925, 2*3354, 5*925, 8*2133/ - DATA P(16),(C(16,I),I = 1,99)/ 15641, 6610, 6977, 1686, 3819, - & 2314, 5647, 3953, 3614, 5115, 2*423, 5408, 7426, 2*423, - & 487, 6227, 2660, 6227, 1221, 3811, 197, 4367, 351, - & 1281, 1221, 3*351, 7245, 1984, 6*2999, 3995, 4*2063, 1644, - & 2063, 2077, 3*2512, 4*2077, 19*754, 2*1097, 4*754, 248, 754, - & 4*1097, 4*222, 754,11*1982/ - DATA P(17),(C(17,I),I = 1,99)/ 23473, 9861, 3647, 4073, 2535, - & 3430, 9865, 2830, 9328, 4320, 5913, 10365, 8272, 3706, 6186, - & 3*7806, 8610, 2563, 2*11558, 9421, 1181, 9421, 3*1181, 9421, - & 2*1181, 2*10574, 5*3534, 3*2898, 3450, 7*2141, 15*7055, 2831, - & 24*8204, 3*4688, 8*2831/ - DATA P(18),(C(18,I),I = 1,99)/ 35221, 10327, 7582, 7124, 8214, - & 9600, 10271, 10193, 10800, 9086, 2365, 4409, 13812, - & 5661, 2*9344, 10362, 2*9344, 8585, 11114, 3*13080, 6949, - & 3*3436, 13213, 2*6130, 2*8159, 11595, 8159, 3436, 18*7096, - & 4377, 7096, 5*4377, 2*5410, 32*4377, 2*440, 3*1199/ - DATA P(19),(C(19,I),I = 1,99)/ 52837, 19540, 19926, 11582, - & 11113, 24585, 8726, 17218, 419, 3*4918, 15701, 17710, - & 2*4037, 15808, 11401, 19398, 2*25950, 4454, 24987, 11719, - & 8697, 5*1452, 2*8697, 6436, 21475, 6436, 22913, 6434, 18497, - & 4*11089, 2*3036, 4*14208, 8*12906, 4*7614, 6*5021, 24*10145, - & 6*4544, 4*8394/ - DATA P(20),(C(20,I),I = 1,99)/ 79259, 34566, 9579, 12654, - & 26856, 37873, 38806, 29501, 17271, 3663, 10763, 18955, - & 1298, 26560, 2*17132, 2*4753, 8713, 18624, 13082, 6791, - & 1122, 19363, 34695, 4*18770, 15628, 4*18770, 33766, 6*20837, - & 5*6545, 14*12138, 5*30483, 19*12138, 9305, 13*11107, 2*9305/ - DATA P(21),(C(21,I),I = 1,99)/118891, 31929, 49367, 10982, 3527, - & 27066, 13226, 56010, 18911, 40574, 2*20767, 9686, 2*47603, - & 2*11736, 41601, 12888, 32948, 30801, 44243, 2*53351, 16016, - & 2*35086, 32581, 2*2464, 49554, 2*2464, 2*49554, 2464, 81, 27260, - & 10681, 7*2185, 5*18086, 2*17631, 3*18086, 37335, 3*37774, - & 13*26401, 12982, 6*40398, 3*3518, 9*37799, 4*4721, 4*7067/ - DATA P(22),(C(22,I),I = 1,99)/178349, 40701, 69087, 77576, 64590, - & 39397, 33179, 10858, 38935, 43129, 2*35468, 5279, 2*61518, 27945, - & 2*70975, 2*86478, 2*20514, 2*73178, 2*43098, 4701, - & 2*59979, 58556, 69916, 2*15170, 2*4832, 43064, 71685, 4832, - & 3*15170, 3*27679, 2*60826, 2*6187, 5*4264, 45567, 4*32269, - & 9*62060, 13*1803, 12*51108, 2*55315, 5*54140, 13134/ - DATA P(23),(C(23,I),I = 1,99)/267523, 103650, 125480, 59978, - & 46875, 77172, 83021, 126904, 14541, 56299, 43636, 11655, - & 52680, 88549, 29804, 101894, 113675, 48040, 113675, - & 34987, 48308, 97926, 5475, 49449, 6850, 2*62545, 9440, - & 33242, 9440, 33242, 9440, 33242, 9440, 62850, 3*9440, - & 3*90308, 9*47904, 7*41143, 5*36114, 24997, 14*65162, 7*47650, - & 7*40586, 4*38725, 5*88329/ - DATA P(24),(C(24,I),I = 1,99)/401287, 165843, 90647, 59925, - & 189541, 67647, 74795, 68365, 167485, 143918, 74912, - & 167289, 75517, 8148, 172106, 126159,3*35867, 121694, - & 52171, 95354, 2*113969, 76304, 2*123709, 144615, 123709, - & 2*64958, 32377, 2*193002, 25023, 40017, 141605, 2*189165, - & 141605, 2*189165, 3*141605, 189165, 20*127047, 10*127785, - & 6*80822, 16*131661, 7114, 131661/ - DATA P(25),(C(25,I),I = 1,99)/601942, 130365, 236711, 110235, - & 125699, 56483, 93735, 234469, 60549, 1291, 93937, - & 245291, 196061, 258647, 162489, 176631, 204895, 73353, - & 172319, 28881, 136787,2*122081, 275993, 64673, 3*211587, - & 2*282859, 211587, 242821, 3*256865, 122203, 291915, 122203, - & 2*291915, 122203, 2*25639, 291803, 245397, 284047, - & 7*245397, 94241, 2*66575, 19*217673, 10*210249, 15*94453/ - DATA P(26),(C(26,I),I = 1,99)/902933, 333459, 375354, 102417, - & 383544, 292630, 41147, 374614, 48032, 435453, 281493, 358168, - & 114121, 346892, 238990, 317313, 164158, 35497, 2*70530, 434839, - & 3*24754, 393656, 2*118711, 148227, 271087, 355831, 91034, - & 2*417029, 2*91034, 417029, 91034, 2*299843, 2*413548, 308300, - & 3*413548, 3*308300, 413548, 5*308300, 4*15311, 2*176255, 6*23613, - & 172210, 4* 204328, 5*121626, 5*200187, 2*121551, 12*248492, - & 5*13942/ - DATA P(27), (C(27,I), I = 1,99)/ 1354471, 500884, 566009, 399251, - & 652979, 355008, 430235, 328722, 670680, 2*405585, 424646, - & 2*670180, 641587, 215580, 59048, 633320, 81010, 20789, 2*389250, - & 2*638764, 2*389250, 398094, 80846, 2*147776, 296177, 2*398094, - & 2*147776, 396313, 3*578233, 19482, 620706, 187095, 620706, - & 187095, 126467, 12*241663, 321632, 2*23210, 3*394484, 3*78101, - & 19*542095, 3*277743, 12*457259/ - DATA P(28), (C(28,I), I = 1, 99)/ 2031713, 858339, 918142, 501970, - & 234813, 460565, 31996, 753018, 256150, 199809, 993599, 245149, - & 794183, 121349, 150619, 376952, 2*809123, 804319, 67352, 969594, - & 434796, 969594, 804319, 391368, 761041, 754049, 466264, 2*754049, - & 466264, 2*754049, 282852, 429907, 390017, 276645, 994856, 250142, - & 144595, 907454, 689648, 4*687580, 978368, 687580, 552742, 105195, - & 942843, 768249, 4*307142, 7*880619, 11*117185, 11*60731, - & 4*178309, 8*74373, 3*214965/ -* - END -* - SUBROUTINE DKSMRC( NDIM, KLIM, SUMKRO, PRIME, VK, FUNCTN, X ) - EXTERNAL FUNCTN - INTEGER NDIM, NK, KLIM, PRIME, K, J, JP - DOUBLE PRECISION SUMKRO, VK(*), FUNCTN, X(*), ONE, XT, MVNUNI - PARAMETER ( ONE = 1 ) - SUMKRO = 0 - NK = MIN( NDIM, KLIM ) - DO J = 1, NK - 1 - JP = J + MVNUNI()*( NK + 1 - J ) - XT = VK(J) - VK(J) = VK(JP) - VK(JP) = XT - END DO - DO J = 1, NDIM - X(NDIM+J) = MVNUNI() - END DO - DO K = 1, PRIME - DO J = 1, NDIM - X(J) = ABS( 2*MOD( K*VK(J) + X(NDIM+J), ONE ) - 1 ) - END DO - SUMKRO = SUMKRO + ( FUNCTN(NDIM,X) - SUMKRO )/( 2*K - 1 ) - DO J = 1, NDIM - X(J) = 1 - X(J) - END DO - SUMKRO = SUMKRO + ( FUNCTN(NDIM,X) - SUMKRO )/( 2*K ) - END DO - END -* - DOUBLE PRECISION FUNCTION MVNPHI( Z ) -* -* Normal distribution probabilities accurate to 1.e-15. -* Z = no. of standard deviations from the mean. -* -* Based upon algorithm 5666 for the error function, from: -* Hart, J.F. et al, 'Computer Approximations', Wiley 1968 -* -* Programmer: Alan Miller -* -* Latest revision - 30 March 1986 -* - DOUBLE PRECISION P0, P1, P2, P3, P4, P5, P6, - * Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, - * Z, P, EXPNTL, CUTOFF, ROOTPI, ZABS - PARAMETER( - * P0 = 220.20 68679 12376 1D0, - * P1 = 221.21 35961 69931 1D0, - * P2 = 112.07 92914 97870 9D0, - * P3 = 33.912 86607 83830 0D0, - * P4 = 6.3739 62203 53165 0D0, - * P5 = .70038 30644 43688 1D0, - * P6 = .035262 49659 98910 9D0 ) - PARAMETER( - * Q0 = 440.41 37358 24752 2D0, - * Q1 = 793.82 65125 19948 4D0, - * Q2 = 637.33 36333 78831 1D0, - * Q3 = 296.56 42487 79673 7D0, - * Q4 = 86.780 73220 29460 8D0, - * Q5 = 16.064 17757 92069 5D0, - * Q6 = 1.7556 67163 18264 2D0, - * Q7 = .088388 34764 83184 4D0 ) - PARAMETER( ROOTPI = 2.5066 28274 63100 1D0 ) - PARAMETER( CUTOFF = 7.0710 67811 86547 5D0 ) -* - ZABS = ABS(Z) -* -* |Z| > 37 -* - IF ( ZABS .GT. 37 ) THEN - P = 0 - ELSE -* -* |Z| <= 37 -* - EXPNTL = EXP( -ZABS**2/2 ) -* -* |Z| < CUTOFF = 10/SQRT(2) -* - IF ( ZABS .LT. CUTOFF ) THEN - P = EXPNTL*( (((((P6*ZABS + P5)*ZABS + P4)*ZABS + P3)*ZABS - * + P2)*ZABS + P1)*ZABS + P0)/(((((((Q7*ZABS + Q6)*ZABS - * + Q5)*ZABS + Q4)*ZABS + Q3)*ZABS + Q2)*ZABS + Q1)*ZABS - * + Q0 ) -* -* |Z| >= CUTOFF. -* - ELSE - P = EXPNTL/( ZABS + 1/( ZABS + 2/( ZABS + 3/( ZABS - * + 4/( ZABS + 0.65D0 ) ) ) ) )/ROOTPI - END IF - END IF - IF ( Z .GT. 0 ) P = 1 - P - MVNPHI = P - END - DOUBLE PRECISION FUNCTION PHINVS(P) -* -* ALGORITHM AS241 APPL. STATIST. (1988) VOL. 37, NO. 3 -* -* Produces the normal deviate Z corresponding to a given lower -* tail area of P. -* -* The hash sums below are the sums of the mantissas of the -* coefficients. They are included for use in checking -* transcription. -* - DOUBLE PRECISION SPLIT1, SPLIT2, CONST1, CONST2, - * A0, A1, A2, A3, A4, A5, A6, A7, B1, B2, B3, B4, B5, B6, B7, - * C0, C1, C2, C3, C4, C5, C6, C7, D1, D2, D3, D4, D5, D6, D7, - * E0, E1, E2, E3, E4, E5, E6, E7, F1, F2, F3, F4, F5, F6, F7, - * P, Q, R - PARAMETER ( SPLIT1 = 0.425, SPLIT2 = 5, - * CONST1 = 0.180625D0, CONST2 = 1.6D0 ) -* -* Coefficients for P close to 0.5 -* - PARAMETER ( - * A0 = 3.38713 28727 96366 6080D0, - * A1 = 1.33141 66789 17843 7745D+2, - * A2 = 1.97159 09503 06551 4427D+3, - * A3 = 1.37316 93765 50946 1125D+4, - * A4 = 4.59219 53931 54987 1457D+4, - * A5 = 6.72657 70927 00870 0853D+4, - * A6 = 3.34305 75583 58812 8105D+4, - * A7 = 2.50908 09287 30122 6727D+3, - * B1 = 4.23133 30701 60091 1252D+1, - * B2 = 6.87187 00749 20579 0830D+2, - * B3 = 5.39419 60214 24751 1077D+3, - * B4 = 2.12137 94301 58659 5867D+4, - * B5 = 3.93078 95800 09271 0610D+4, - * B6 = 2.87290 85735 72194 2674D+4, - * B7 = 5.22649 52788 52854 5610D+3 ) -* HASH SUM AB 55.88319 28806 14901 4439 -* -* Coefficients for P not close to 0, 0.5 or 1. -* - PARAMETER ( - * C0 = 1.42343 71107 49683 57734D0, - * C1 = 4.63033 78461 56545 29590D0, - * C2 = 5.76949 72214 60691 40550D0, - * C3 = 3.64784 83247 63204 60504D0, - * C4 = 1.27045 82524 52368 38258D0, - * C5 = 2.41780 72517 74506 11770D-1, - * C6 = 2.27238 44989 26918 45833D-2, - * C7 = 7.74545 01427 83414 07640D-4, - * D1 = 2.05319 16266 37758 82187D0, - * D2 = 1.67638 48301 83803 84940D0, - * D3 = 6.89767 33498 51000 04550D-1, - * D4 = 1.48103 97642 74800 74590D-1, - * D5 = 1.51986 66563 61645 71966D-2, - * D6 = 5.47593 80849 95344 94600D-4, - * D7 = 1.05075 00716 44416 84324D-9 ) -* HASH SUM CD 49.33206 50330 16102 89036 -* -* Coefficients for P near 0 or 1. -* - PARAMETER ( - * E0 = 6.65790 46435 01103 77720D0, - * E1 = 5.46378 49111 64114 36990D0, - * E2 = 1.78482 65399 17291 33580D0, - * E3 = 2.96560 57182 85048 91230D-1, - * E4 = 2.65321 89526 57612 30930D-2, - * E5 = 1.24266 09473 88078 43860D-3, - * E6 = 2.71155 55687 43487 57815D-5, - * E7 = 2.01033 43992 92288 13265D-7, - * F1 = 5.99832 20655 58879 37690D-1, - * F2 = 1.36929 88092 27358 05310D-1, - * F3 = 1.48753 61290 85061 48525D-2, - * F4 = 7.86869 13114 56132 59100D-4, - * F5 = 1.84631 83175 10054 68180D-5, - * F6 = 1.42151 17583 16445 88870D-7, - * F7 = 2.04426 31033 89939 78564D-15 ) -* HASH SUM EF 47.52583 31754 92896 71629 -* - Q = ( 2*P - 1 )/2 - IF ( ABS(Q) .LE. SPLIT1 ) THEN - R = CONST1 - Q*Q - PHINVS = Q*( ( ( ((((A7*R + A6)*R + A5)*R + A4)*R + A3) - * *R + A2 )*R + A1 )*R + A0 ) - * /( ( ( ((((B7*R + B6)*R + B5)*R + B4)*R + B3) - * *R + B2 )*R + B1 )*R + 1 ) - ELSE - R = MIN( P, 1 - P ) - IF ( R .GT. 0 ) THEN - R = SQRT( -LOG(R) ) - IF ( R .LE. SPLIT2 ) THEN - R = R - CONST2 - PHINVS = ( ( ( ((((C7*R + C6)*R + C5)*R + C4)*R + C3) - * *R + C2 )*R + C1 )*R + C0 ) - * /( ( ( ((((D7*R + D6)*R + D5)*R + D4)*R + D3) - * *R + D2 )*R + D1 )*R + 1 ) - ELSE - R = R - SPLIT2 - PHINVS = ( ( ( ((((E7*R + E6)*R + E5)*R + E4)*R + E3) - * *R + E2 )*R + E1 )*R + E0 ) - * /( ( ( ((((F7*R + F6)*R + F5)*R + F4)*R + F3) - * *R + F2 )*R + F1 )*R + 1 ) - END IF - ELSE - PHINVS = 9 - END IF - IF ( Q .LT. 0 ) PHINVS = - PHINVS - END IF - END - DOUBLE PRECISION FUNCTION BVNMVN( LOWER, UPPER, INFIN, CORREL ) -* -* A function for computing bivariate normal probabilities. -* -* Parameters -* -* LOWER REAL, array of lower integration limits. -* UPPER REAL, array of upper integration limits. -* INFIN INTEGER, array of integration limits flags: -* if INFIN(I) = 0, Ith limits are (-infinity, UPPER(I)]; -* if INFIN(I) = 1, Ith limits are [LOWER(I), infinity); -* if INFIN(I) = 2, Ith limits are [LOWER(I), UPPER(I)]. -* CORREL REAL, correlation coefficient. -* - DOUBLE PRECISION LOWER(*), UPPER(*), CORREL, BVU - INTEGER INFIN(*) - IF ( INFIN(1) .EQ. 2 .AND. INFIN(2) .EQ. 2 ) THEN - BVNMVN = BVU ( LOWER(1), LOWER(2), CORREL ) - + - BVU ( UPPER(1), LOWER(2), CORREL ) - + - BVU ( LOWER(1), UPPER(2), CORREL ) - + + BVU ( UPPER(1), UPPER(2), CORREL ) - ELSE IF ( INFIN(1) .EQ. 2 .AND. INFIN(2) .EQ. 1 ) THEN - BVNMVN = BVU ( LOWER(1), LOWER(2), CORREL ) - + - BVU ( UPPER(1), LOWER(2), CORREL ) - ELSE IF ( INFIN(1) .EQ. 1 .AND. INFIN(2) .EQ. 2 ) THEN - BVNMVN = BVU ( LOWER(1), LOWER(2), CORREL ) - + - BVU ( LOWER(1), UPPER(2), CORREL ) - ELSE IF ( INFIN(1) .EQ. 2 .AND. INFIN(2) .EQ. 0 ) THEN - BVNMVN = BVU ( -UPPER(1), -UPPER(2), CORREL ) - + - BVU ( -LOWER(1), -UPPER(2), CORREL ) - ELSE IF ( INFIN(1) .EQ. 0 .AND. INFIN(2) .EQ. 2 ) THEN - BVNMVN = BVU ( -UPPER(1), -UPPER(2), CORREL ) - + - BVU ( -UPPER(1), -LOWER(2), CORREL ) - ELSE IF ( INFIN(1) .EQ. 1 .AND. INFIN(2) .EQ. 0 ) THEN - BVNMVN = BVU ( LOWER(1), -UPPER(2), -CORREL ) - ELSE IF ( INFIN(1) .EQ. 0 .AND. INFIN(2) .EQ. 1 ) THEN - BVNMVN = BVU ( -UPPER(1), LOWER(2), -CORREL ) - ELSE IF ( INFIN(1) .EQ. 1 .AND. INFIN(2) .EQ. 1 ) THEN - BVNMVN = BVU ( LOWER(1), LOWER(2), CORREL ) - ELSE IF ( INFIN(1) .EQ. 0 .AND. INFIN(2) .EQ. 0 ) THEN - BVNMVN = BVU ( -UPPER(1), -UPPER(2), CORREL ) - END IF - END - DOUBLE PRECISION FUNCTION BVU( SH, SK, R ) -* -* A function for computing bivariate normal probabilities. -* -* Yihong Ge -* Department of Computer Science and Electrical Engineering -* Washington State University -* Pullman, WA 99164-2752 -* and -* Alan Genz -* Department of Mathematics -* Washington State University -* Pullman, WA 99164-3113 -* Email : alangenz@wsu.edu -* -* BVN - calculate the probability that X is larger than SH and Y is -* larger than SK. -* -* Parameters -* -* SH REAL, integration limit -* SK REAL, integration limit -* R REAL, correlation coefficient -* LG INTEGER, number of Gauss Rule Points and Weights -* - DOUBLE PRECISION BVN, SH, SK, R, ZERO, TWOPI - INTEGER I, LG, NG - PARAMETER ( ZERO = 0, TWOPI = 6.283185307179586D0 ) - DOUBLE PRECISION X(10,3), W(10,3), AS, A, B, C, D, RS, XS - DOUBLE PRECISION MVNPHI, SN, ASR, H, K, BS, HS, HK - SAVE X, W -* Gauss Legendre Points and Weights, N = 6 - DATA ( W(I,1), X(I,1), I = 1,3) / - * 0.1713244923791705D+00,-0.9324695142031522D+00, - * 0.3607615730481384D+00,-0.6612093864662647D+00, - * 0.4679139345726904D+00,-0.2386191860831970D+00/ -* Gauss Legendre Points and Weights, N = 12 - DATA ( W(I,2), X(I,2), I = 1,6) / - * 0.4717533638651177D-01,-0.9815606342467191D+00, - * 0.1069393259953183D+00,-0.9041172563704750D+00, - * 0.1600783285433464D+00,-0.7699026741943050D+00, - * 0.2031674267230659D+00,-0.5873179542866171D+00, - * 0.2334925365383547D+00,-0.3678314989981802D+00, - * 0.2491470458134029D+00,-0.1252334085114692D+00/ -* Gauss Legendre Points and Weights, N = 20 - DATA ( W(I,3), X(I,3), I = 1,10) / - * 0.1761400713915212D-01,-0.9931285991850949D+00, - * 0.4060142980038694D-01,-0.9639719272779138D+00, - * 0.6267204833410906D-01,-0.9122344282513259D+00, - * 0.8327674157670475D-01,-0.8391169718222188D+00, - * 0.1019301198172404D+00,-0.7463319064601508D+00, - * 0.1181945319615184D+00,-0.6360536807265150D+00, - * 0.1316886384491766D+00,-0.5108670019508271D+00, - * 0.1420961093183821D+00,-0.3737060887154196D+00, - * 0.1491729864726037D+00,-0.2277858511416451D+00, - * 0.1527533871307259D+00,-0.7652652113349733D-01/ - IF ( ABS(R) .LT. 0.3 ) THEN - NG = 1 - LG = 3 - ELSE IF ( ABS(R) .LT. 0.75 ) THEN - NG = 2 - LG = 6 - ELSE - NG = 3 - LG = 10 - ENDIF - H = SH - K = SK - HK = H*K - BVN = 0 - IF ( ABS(R) .LT. 0.925 ) THEN - HS = ( H*H + K*K )/2 - ASR = ASIN(R) - DO I = 1, LG - SN = SIN(ASR*( X(I,NG)+1 )/2) - BVN = BVN + W(I,NG)*EXP( ( SN*HK - HS )/( 1 - SN*SN ) ) - SN = SIN(ASR*(-X(I,NG)+1 )/2) - BVN = BVN + W(I,NG)*EXP( ( SN*HK - HS )/( 1 - SN*SN ) ) - END DO - BVN = BVN*ASR/(2*TWOPI) + MVNPHI(-H)*MVNPHI(-K) - ELSE - IF ( R .LT. 0 ) THEN - K = -K - HK = -HK - ENDIF - IF ( ABS(R) .LT. 1 ) THEN - AS = ( 1 - R )*( 1 + R ) - A = SQRT(AS) - BS = ( H - K )**2 - C = ( 4 - HK )/8 - D = ( 12 - HK )/16 - BVN = A*EXP( -(BS/AS + HK)/2 ) - + *( 1 - C*(BS - AS)*(1 - D*BS/5)/3 + C*D*AS*AS/5 ) - IF ( HK .GT. -160 ) THEN - B = SQRT(BS) - BVN = BVN - EXP(-HK/2)*SQRT(TWOPI)*MVNPHI(-B/A)*B - + *( 1 - C*BS*( 1 - D*BS/5 )/3 ) - ENDIF - A = A/2 - DO I = 1, LG - XS = ( A*(X(I,NG)+1) )**2 - RS = SQRT( 1 - XS ) - BVN = BVN + A*W(I,NG)* - + ( EXP( -BS/(2*XS) - HK/(1+RS) )/RS - + - EXP( -(BS/XS+HK)/2 )*( 1 + C*XS*( 1 + D*XS ) ) ) - XS = AS*(-X(I,NG)+1)**2/4 - RS = SQRT( 1 - XS ) - BVN = BVN + A*W(I,NG)*EXP( -(BS/XS + HK)/2 ) - + *( EXP( -HK*(1-RS)/(2*(1+RS)) )/RS - + - ( 1 + C*XS*( 1 + D*XS ) ) ) - END DO - BVN = -BVN/TWOPI - ENDIF - IF ( R .GT. 0 ) BVN = BVN + MVNPHI( -MAX( H, K ) ) - IF ( R .LT. 0 ) BVN = -BVN + MAX( ZERO, MVNPHI(-H)-MVNPHI(-K) ) - ENDIF - BVU = BVN - END - DOUBLE PRECISION FUNCTION MVNUNI() -* -* Uniform (0,1) random number generator -* -* Reference: -* L'Ecuyer, Pierre (1996), -* "Combined Multiple Recursive Random Number Generators" -* Operations Research 44, pp. 816-822. -* -* - INTEGER A12, A13, A21, A23, P12, P13, P21, P23 - INTEGER Q12, Q13, Q21, Q23, R12, R13, R21, R23 - INTEGER X10, X11, X12, X20, X21, X22, Z, M1, M2, H - DOUBLE PRECISION INVMP1 - PARAMETER ( M1 = 2147483647, M2 = 2145483479 ) - PARAMETER ( A12 = 63308, Q12 = 33921, R12 = 12979 ) - PARAMETER ( A13 = -183326, Q13 = 11714, R13 = 2883 ) - PARAMETER ( A21 = 86098, Q21 = 24919, R21 = 7417 ) - PARAMETER ( A23 = -539608, Q23 = 3976, R23 = 2071 ) - PARAMETER ( INVMP1 = 4.656612873077392578125D-10 ) -* INVMP1 = 1/(M1+1) - SAVE X10, X11, X12, X20, X21, X22 - DATA X10, X11, X12, X20, X21, X22 - & / 15485857, 17329489, 36312197, 55911127, 75906931, 96210113 / -* -* Component 1 -* - H = X10/Q13 - P13 = -A13*( X10 - H*Q13 ) - H*R13 - H = X11/Q12 - P12 = A12*( X11 - H*Q12 ) - H*R12 - IF ( P13 .LT. 0 ) P13 = P13 + M1 - IF ( P12 .LT. 0 ) P12 = P12 + M1 - X10 = X11 - X11 = X12 - X12 = P12 - P13 - IF ( X12 .LT. 0 ) X12 = X12 + M1 -* -* Component 2 -* - H = X20/Q23 - P23 = -A23*( X20 - H*Q23 ) - H*R23 - H = X22/Q21 - P21 = A21*( X22 - H*Q21 ) - H*R21 - IF ( P23 .LT. 0 ) P23 = P23 + M2 - IF ( P21 .LT. 0 ) P21 = P21 + M2 - X20 = X21 - X21 = X22 - X22 = P21 - P23 - IF ( X22 .LT. 0 ) X22 = X22 + M2 -* -* Combination -* - Z = X12 - X22 - IF ( Z .LE. 0 ) Z = Z + M1 - MVNUNI = Z*INVMP1 - END diff --git a/scipy-0.10.1/scipy/stats/rv.py b/scipy-0.10.1/scipy/stats/rv.py deleted file mode 100644 index 98c8a6842a..0000000000 --- a/scipy-0.10.1/scipy/stats/rv.py +++ /dev/null @@ -1,46 +0,0 @@ - -from numpy import vectorize -from numpy.random import random_sample - -__all__ = ['randwppf', 'randwcdf'] - -# XXX: Are these needed anymore? - -##################################### -# General purpose continuous -###################################### - -def randwppf(ppf, args=(), size=None): - """ - returns an array of randomly distributed integers of a distribution - whose percent point function (inverse of the CDF or quantile function) - is given. - - args is a tuple of extra arguments to the ppf function (i.e. shape, - location, scale), and size is the size of the output. Note the ppf - function must accept an array of q values to compute over. - - """ - U = random_sample(size=size) - return apply(ppf, (U,)+args) - -def randwcdf(cdf, mean=1.0, args=(), size=None): - """returns an array of randomly distributed integers of a distribution - whose cumulative distribution function (CDF) is given. - - mean is the mean of the distribution (helps the solver). - args is a tuple of extra arguments to the cdf function (i.e. shape, - location, scale), and size is the size of the output. Note the - cdf function needs to accept a single value to compute over. - """ - import scipy.optimize as optimize - def _ppfopt(x, q, *nargs): - newargs = (x,)+nargs - return cdf(*newargs) - q - - def _ppf(q, *nargs): - return optimize.fsolve(_ppfopt, mean, args=(q,)+nargs) - - _vppf = vectorize(_ppf) - U = random_sample(size=size) - return apply(_vppf,(U,)+args) diff --git a/scipy-0.10.1/scipy/stats/setup.py b/scipy-0.10.1/scipy/stats/setup.py deleted file mode 100755 index e100f366e8..0000000000 --- a/scipy-0.10.1/scipy/stats/setup.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('stats', parent_package, top_path) - - config.add_data_dir('tests') - - config.add_library('statlib', - sources=[join('statlib', '*.f')]) - - # add statlib module - config.add_extension('statlib', - sources=['statlib.pyf'], - f2py_options=['--no-wrap-functions'], - libraries=['statlib'], - ) - - # add vonmises_cython module - config.add_extension('vonmises_cython', - sources=['vonmises_cython.c'], # FIXME: use cython source - ) - - # add futil module - config.add_extension('futil', - sources=['futil.f'], - ) - - # add mvn module - config.add_extension('mvn', - sources=['mvn.pyf','mvndst.f'], - ) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/stats/setupscons.py b/scipy-0.10.1/scipy/stats/setupscons.py deleted file mode 100755 index 91ca83c11e..0000000000 --- a/scipy-0.10.1/scipy/stats/setupscons.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('stats', parent_package, top_path) - - config.add_sconscript('SConstruct') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/scipy-0.10.1/scipy/stats/statlib.pyf b/scipy-0.10.1/scipy/stats/statlib.pyf deleted file mode 100644 index 6492cfb0f2..0000000000 --- a/scipy-0.10.1/scipy/stats/statlib.pyf +++ /dev/null @@ -1,48 +0,0 @@ -!%f90 -*- f90 -*- -python module statlib ! in - interface ! in :statlib - subroutine swilk(init,x,n,n1,n2,a,w,pw,ifault) ! in :statlib:swilk.f - logical intent(optional), intent(in) :: init=0 - real dimension(n),intent(in) :: x - integer depend(x),intent(hide) :: n = shape(x,0) - integer intent(optional),check(n1<=n),depend(n) :: n1=n - integer intent(hide),depend(n) :: n2=n/2 - real intent(in,out), dimension(n2), depend(n2) :: a - real intent(out) :: w - real intent(out) :: pw - integer intent(out) :: ifault - end subroutine swilk - - subroutine wprob(test,other,astart,a1,l1,a2,a3,ifault) ! in ansari.f - integer intent(in) :: test - integer intent(in) :: other - real intent(out) :: astart - real dimension(l1), intent(out), depend(l1) :: a1 - integer intent(hide) :: l1=(1+(test*other)/2) - real dimension(l1), intent(hide), depend(l1) :: a2 - real dimension(l1), intent(hide), depend(l1) :: a3 - integer intent(out) :: ifault - end subroutine wprob - subroutine gscale(test,other,astart,a1,l1,a2,a3,ifault) ! in ansari.f - integer intent(in) :: test - integer intent(in) :: other - real intent(out) :: astart - real dimension(l1), intent(out) :: a1 - integer intent(hide) :: l1=(1+(test*other)/2) - real dimension(l1), intent(hide), depend(l1) :: a2 - real dimension(l1), intent(hide), depend(l1) :: a3 - integer intent(out) :: ifault - end subroutine gscale - - function prho(n,is,ifault) ! in spearman.f - integer intent(in) :: n - integer intent(in) :: is - integer intent(out) :: ifault - double precision intent(out) :: prho - end function prho - - end interface -end python module statlib - -! This file was auto-generated with f2py (version:2.21.184-1308). -! See http://cens.ioc.ee/projects/f2py2e/ diff --git a/scipy-0.10.1/scipy/stats/statlib/ansari.f b/scipy-0.10.1/scipy/stats/statlib/ansari.f deleted file mode 100644 index 9a1564e0d9..0000000000 --- a/scipy-0.10.1/scipy/stats/statlib/ansari.f +++ /dev/null @@ -1,311 +0,0 @@ -c Routine AS 93 returns frequencies. The following short routine calculates -c the distribution function from these frequencies (overwriting them). -c The calling arguments are as for AS 93. The distribution function is -c returned in array A1. The first element in A1 is F(ASTART). N.B. ASTART -c is a real variable. -c - subroutine wprob(test, other, astart, a1, l1, a2, a3, ifault) - integer test, other, l1, ifault - real astart, a1(l1), a2(l1), a3(l1) -c -c Local variables -c - real zero, sum - data zero /0.0/ -c - call gscale(test, other, astart, a1, l1, a2, a3, ifault) - if (ifault .ne. 0) return -c -c Scale column of F -c - nrows = 1 + (test * other)/2 - sum = zero - do 10 i = 1, nrows - sum = sum + a1(i) - a1(i) = sum - 10 continue - do 20 i = 1, nrows - 20 a1(i) = a1(i) / sum -c - return - end - -c---------------------------------------------------------------------- - - SUBROUTINE GSCALE(TEST, OTHER, ASTART, A1, L1, A2, A3, IFAULT) -C -C ALGORITHM AS 93 APPL. STATIST. (1976) VOL.25, NO.1 -C -C FROM THE SIZES OF TWO SAMPLES THE DISTRIBUTION OF THE -C ANSARI-BRADLEY TEST FOR SCALE IS GENERATED IN ARRAY A1. -C - REAL ASTART, A1(L1), A2(L1), A3(L1), AI, ONE, FPOINT - INTEGER TEST, OTHER - LOGICAL SYMM - DATA ONE /1.0/ -C -C TYPE CONVERSION (EFFECT DEPENDS ON TYPE STATEMENT ABOVE). -C - FPOINT(I) = I -C -C CHECK PROBLEM SIZE AND DEFINE BASE VALUE OF THE DISTRIBUTION. -C - M = MIN0(TEST, OTHER) - IFAULT = 2 - IF (M. LT. 0) RETURN - ASTART = FPOINT((TEST + 1) / 2) * FPOINT(1 + TEST / 2) - N = MAX0(TEST, OTHER) -C -C CHECK SIZE OF RESULT ARRAY. -C - IFAULT = 1 - LRES = 1 + (M * N) / 2 - IF (L1 .LT. LRES) RETURN - SYMM = MOD(M + N, 2) .EQ. 0 -C -C TREAT SMALL SAMPLES SEPARATELY. -C - MM1 = M - 1 - IF (M .GT. 2) GOTO 5 -C -C START-UP PROCEDURES ONLY NEEDED. -C - IF (MM1) 1, 2, 3 -C -C ONE SAMPLE ONLY. -C -1 A1(1) = ONE - GOTO 15 -C -C SMALLER SAMPLE SIZE = 1. -C -2 CALL START1(N, A1, L1, LN1) - GOTO 4 -C -C SMALLER SAMPLE SIZE = 2. -C -3 CALL START2(N, A1, L1, LN1) -C -C RETURN IF A1 IS NOT IN REVERSE ORDER. -C -4 IF (SYMM .OR. (OTHER .GT. TEST)) GOTO 15 - GOTO 13 -C -C FULL GENERATOR NEEDED -C SET UP INITIAL CONDITIONS (DEPENDS ON MOD(N, 2)). -C -5 NM1 = N - 1 - NM2 = N - 2 - MNOW = 3 - NC = 3 - IF (MOD(N, 2) .EQ. 1) GOTO 6 -C SET UP FOR EVEN N. -C - N2B1 = 3 - N2B2 = 2 - CALL START2(N, A1, L1, LN1) - CALL START2(NM2, A3, L1, LN3) - CALL START1(NM1, A2, L1, LN2) - GOTO 8 -C -C SET UP FOR ODD N. -C -6 N2B1 = 2 - N2B2 = 3 - CALL START1(N, A1, L1, LN1) - CALL START2(NM1, A2, L1, LN2) -C -C INCREASE ORDER OF DISTRIBUTION IN A1 BY 2 -C (USING A2 AND IMPLYING A3). -C -7 CALL FRQADD(A1, LN1, L1OUT, L1, A2, LN2, N2B1) - LN1 = LN1 + N - CALL IMPLY(A1, L1OUT, LN1, A3, LN3, L1, NC) - NC = NC + 1 - IF (MNOW .EQ. M) GOTO 9 - MNOW = MNOW + 1 -C -C INCREASE ORDER OF DISTRIBUTION IN A2 BY 2 (USING A3). -C -8 CALL FRQADD(A2, LN2, L2OUT, L1, A3, LN3, N2B2) - LN2 = LN2 + NM1 - CALL IMPLY(A2, L2OUT, LN2, A3, J, L1, NC) - NC = NC + 1 - IF (MNOW .EQ. M) GOTO 9 - MNOW = MNOW + 1 - GOTO 7 -C -C IF SYMMETRICAL, RESULTS IN A1 ARE COMPLETE. -C -9 IF (SYMM) GOTO 15 -C -C FOR A SKEW RESULT ADD A2 (OFFSET) INTO A1. -C - KS = (M + 3) / 2 - J = 1 - DO 12 I = KS, LRES - IF (I .GT. LN1) GOTO 10 - A1(I) = A1(I) + A2(J) - GOTO 11 -10 A1(I) = A2(J) -11 J = J + 1 -12 CONTINUE -C -C DISTRIBUTION IN A1 POSSIBLY IN REVERSE ORDER. -C - IF (OTHER .LT. TEST) GOTO 15 -C -C REVERSE THE RESULTS IN A1. -C -13 J = LRES - NDO = LRES / 2 - DO 14 I = 1, NDO - AI = A1(I) - A1(I) =A1(J) - A1(J) = AI - J = J - 1 -14 CONTINUE -C -C FINAL RESULTS NOW IN A1. -C -15 IFAULT = 0 - RETURN - END - - SUBROUTINE START1(N, F, L, LOUT) -C -C ALGORITHM AS 93.1 APPL. STATIST. (1976) VOL.25, NO.1 -C -C GENERATES A 1,N ANSARI-BRADLEY DISTRIBUTION IN F. -C - REAL F(L), ONE, TWO - DATA ONE, TWO /1.0, 2.0/ - LOUT = 1 + N / 2 - DO 1 I = 1, LOUT -1 F(I) = TWO - IF (MOD(N, 2) .EQ. 0) F(LOUT) = ONE - RETURN - END -C - SUBROUTINE START2(N, F, L, LOUT) -C -C ALGORITHM AS 93.2 APPL. STATIST. (1976) VOL.25, NO.1 -C -C GENERATES A 2,N ANSARI-BRADLEY DISTRIBUTION IN F. -C - REAL F(L), ONE, TWO, THREE, FOUR - DATA ONE, TWO, THREE, FOUR /1.0, 2.0, 3.0, 4.0/ -C -C DERIVE F FOR 2, NU, WHERE NU IS HIGHEST EVEN INTEGER -C LESS THAN OR EQUAL TO N. -C DEFINE NU AND ARRAY LIMITS. -C - NU = N - MOD(N, 2) - J = NU + 1 - LOUT = J - LT1 = LOUT + 1 - NDO = LT1 / 2 - A = ONE - B = THREE -C -C GENERATE THE SYMMETRICAL 2,NU DISTRIBUTION. -C - DO 1 I = 1, NDO - F(I) = A - F(J) = A - J = J - 1 - A = A + B - B = FOUR - B -1 CONTINUE - IF (NU .EQ. N) RETURN -C -C ADD AN OFFSET 1,N DISTRIBUTION INTO F TO GIVE 2,N RESULT. -C - NU = NDO + 1 - DO 2 I = NU, LOUT -2 F(I) = F(I) + TWO - F(LT1) = TWO - LOUT = LT1 - RETURN - END -C - SUBROUTINE FRQADD(F1, L1IN, L1OUT, L1, F2, L2, NSTART) -C -C ALGORITHM AS 93.3 APPL. STATIST. (1976) VOL.25, NO.1 -C -C ARRAY F1 HAS TWICE THE CONTENTS OF ARRAY F2 ADDED INTO IT -C STARTING WITH ELEMENTS NSTART AND 1 IN F1 AND F2 RESPECTIVELY. -C - REAL F1(L1), F2(L2), MUL2 - DATA MUL2 /2.0/ - I2 = 1 - DO 1 I1 = NSTART, L1IN - F1(I1) = F1(I1) + MUL2 * F2(I2) - I2 = I2 + 1 -1 CONTINUE - NXT = L1IN + 1 - L1OUT = L2 + NSTART - 1 - DO 2 I1 = NXT, L1OUT - F1(I1) = MUL2 * F2(I2) - I2 = I2 + 1 -2 CONTINUE - NSTART = NSTART + 1 - RETURN - END -C - SUBROUTINE IMPLY(F1, L1IN, L1OUT, F2, L2, L2MAX, NOFF) -C -C ALGORITHM AS 93.4 APPL. STATIST. (1976) VOL.25, NO.1 -C -C GIVEN L1IN ELEMENTS OF AN ARRAY F1, A SYMMETRICAL -C ARRAY F2 IS DERIVED AND ADDED ONTO F1, LEAVING THE -C FIRST NOFF ELEMENTS OF F1 UNCHANGED AND GIVING A -C SYMMETRICAL RESULT OF L1OUT ELEMENTS IN F1. -C - REAL F1(L1OUT), F2(L2MAX), SUM, DIFF -C -C SET-UP SUBSCRIPTS AND LOOP COUNTER. -C - I2 = 1 - NOFF - J1 = L1OUT - J2 = L1OUT - NOFF - L2 = J2 - J2MIN = (J2 + 1) / 2 - NDO = (L1OUT + 1) / 2 -C -C DERIVE AND IMPLY NEW VALUES FROM OUTSIDE INWARDS. -C - DO 6 I1 = 1, NDO -C -C GET NEW F1 VALUE FROM SUM OF L/H ELEMENTS OF -C F1 + F2 (IF F2 IS IN RANGE). -C - IF (I2 .GT. 0) GOTO 1 - SUM = F1(I1) - GOTO 2 -1 SUM = F1(I1) + F2(I2) -C -C REVISE LEFT ELEMENT OF F1. -C - F1(I1) = SUM -C -C IF F2 NOT COMPLETE IMPLY AND ASSIGN F2 VALUES -C AND REVISE SUBSCRIPTS. -C -2 I2 = I2 + 1 - IF (J2 .LT. J2MIN) GOTO 5 - IF (J1 .LE. L1IN) GOTO 3 - DIFF = SUM - GOTO 4 -3 DIFF = SUM - F1(J1) -4 F2(I1) = DIFF - F2(J2) = DIFF - J2 = J2 - 1 -C -C ASSIGN R/H ELEMENT OF F1 AND REVISE SUBSCRIPT. -C -5 F1(J1) = SUM - J1 = J1 - 1 -6 CONTINUE - RETURN - END diff --git a/scipy-0.10.1/scipy/stats/statlib/spearman.f b/scipy-0.10.1/scipy/stats/statlib/spearman.f deleted file mode 100644 index 5434155f45..0000000000 --- a/scipy-0.10.1/scipy/stats/statlib/spearman.f +++ /dev/null @@ -1,79 +0,0 @@ - double precision function prho(n, is, ifault) -c -c Algorithm AS 89 Appl. Statist. (1975) Vol.24, No. 3, P377. -c -c To evaluate the probability of obtaining a value greater than or -c equal to is, where is=(n**3-n)*(1-r)/6, r=Spearman's rho and n -c must be greater than 1 -c -c Auxiliary function required: ALNORM = algorithm AS66 -c - dimension l(6) - double precision zero, one, two, b, x, y, z, u, six, - $ c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12 - data zero, one, two, six /0.0d0, 1.0d0, 2.0d0, 6.0d0/ - data c1, c2, c3, c4, c5, c6, - $ c7, c8, c9, c10, c11, c12/ - $ 0.2274d0, 0.2531d0, 0.1745d0, 0.0758d0, 0.1033d0, 0.3932d0, - $ 0.0879d0, 0.0151d0, 0.0072d0, 0.0831d0, 0.0131d0, 0.00046d0/ -c -c Test admissibility of arguments and initialize -c - prho = one - ifault = 1 - if (n .le. 1) return - ifault = 0 - if (is .le. 0) return - prho = zero - if (is .gt. n * (n * n -1) / 3) return - js = is - if (js .ne. 2 * (js / 2)) js = js + 1 - if (n .gt. 6) goto 6 -c -c Exact evaluation of probability -c - nfac = 1 - do 1 i = 1, n - nfac = nfac * i - l(i) = i - 1 continue - prho = one / dble(nfac) - if (js .eq. n * (n * n -1) / 3) return - ifr = 0 - do 5 m = 1,nfac - ise = 0 - do 2 i = 1, n - ise = ise + (i - l(i)) ** 2 - 2 continue - if (js .le. ise) ifr = ifr + 1 - n1 = n - 3 mt = l(1) - nn = n1 - 1 - do 4 i = 1, nn - l(i) = l(i + 1) - 4 continue - l(n1) = mt - if (l(n1) .ne. n1 .or. n1 .eq. 2) goto 5 - n1 = n1 - 1 - if (m .ne. nfac) goto 3 - 5 continue - prho = dble(ifr) / dble(nfac) - return -c -c Evaluation by Edgeworth series expansion -c - 6 b = one / dble(n) - x = (six * (dble(js) - one) * b / (one / (b * b) -one) - - $ one) * sqrt(one / b - one) - y = x * x - u = x * b * (c1 + b * (c2 + c3 * b) + y * (-c4 - $ + b * (c5 + c6 * b) - y * b * (c7 + c8 * b - $ - y * (c9 - c10 * b + y * b * (c11 - c12 * y))))) -c -c Call to algorithm AS 66 -c - prho = u / exp(y / two) + alnorm(x, .true.) - if (prho .lt. zero) prho = zero - if (prho .gt. one) prho = one - return - end diff --git a/scipy-0.10.1/scipy/stats/statlib/swilk.f b/scipy-0.10.1/scipy/stats/statlib/swilk.f deleted file mode 100644 index 45996ce1a2..0000000000 --- a/scipy-0.10.1/scipy/stats/statlib/swilk.f +++ /dev/null @@ -1,345 +0,0 @@ - SUBROUTINE SWILK (INIT, X, N, N1, N2, A, W, PW, IFAULT) -C -C ALGORITHM AS R94 APPL. STATIST. (1995) VOL.44, NO.4 -C -C Calculates the Shapiro-Wilk W test and its significance level -C - INTEGER N, N1, N2, IFAULT - REAL X(*), A(*), PW, W - REAL C1(6), C2(6), C3(4), C4(4), C5(4), C6(3), C7(2) - REAL C8(2), C9(2), G(2) - REAL Z90, Z95, Z99, ZM, ZSS, BF1, XX90, XX95, ZERO, ONE, TWO - REAL THREE, SQRTH, QTR, TH, SMALL, PI6, STQR - REAL SUMM2, SSUMM2, FAC, RSN, AN, AN25, A1, A2, DELTA, RANGE - REAL SA, SX, SSX, SSA, SAX, ASA, XSX, SSASSX, W1, Y, XX, XI - REAL GAMMA, M, S, LD, BF, Z90F, Z95F, Z99F, ZFM, ZSD, ZBAR -C -C Auxiliary routines -C - REAL PPND, POLY - DOUBLE PRECISION ALNORM -C - INTEGER NCENS, NN2, I, I1, J - LOGICAL INIT, UPPER -C - DATA C1 /0.0E0, 0.221157E0, -0.147981E0, -0.207119E1, - * 0.4434685E1, -0.2706056E1/ - DATA C2 /0.0E0, 0.42981E-1, -0.293762E0, -0.1752461E1, - * 0.5682633E1, -0.3582633E1/ - DATA C3 /0.5440E0, -0.39978E0, 0.25054E-1, -0.6714E-3/ - DATA C4 /0.13822E1, -0.77857E0, 0.62767E-1, -0.20322E-2/ - DATA C5 /-0.15861E1, -0.31082E0, -0.83751E-1, 0.38915E-2/ - DATA C6 /-0.4803E0, -0.82676E-1, 0.30302E-2/ - DATA C7 /0.164E0, 0.533E0/ - DATA C8 /0.1736E0, 0.315E0/ - DATA C9 /0.256E0, -0.635E-2/ - DATA G /-0.2273E1, 0.459E0/ - DATA Z90, Z95, Z99 /0.12816E1, 0.16449E1, 0.23263E1/ - DATA ZM, ZSS /0.17509E1, 0.56268E0/ - DATA BF1 /0.8378E0/, XX90, XX95 /0.556E0, 0.622E0/ - DATA ZERO /0.0E0/, ONE/1.0E0/, TWO/2.0E0/, THREE/3.0E0/ - DATA SQRTH /0.70711E0/, QTR/0.25E0/, TH/0.375E0/, SMALL/1E-19/ - DATA PI6 /0.1909859E1/, STQR/0.1047198E1/, UPPER/.TRUE./ -C - PW = ONE - IF (W .GE. ZERO) W = ONE - AN = N - IFAULT = 3 - NN2 = N/2 - IF (N2 .LT. NN2) RETURN - IFAULT = 1 - IF (N .LT. 3) RETURN -C -C If INIT is false, calculates coefficients for the test -C - IF (.NOT. INIT) THEN - IF (N .EQ. 3) THEN - A(1) = SQRTH - ELSE - AN25 = AN + QTR - SUMM2 = ZERO - DO 30 I = 1, N2 - A(I) = PPND((REAL(I) - TH)/AN25,IFAULT) - SUMM2 = SUMM2 + A(I) ** 2 -30 CONTINUE - SUMM2 = SUMM2 * TWO - SSUMM2 = SQRT(SUMM2) - RSN = ONE / SQRT(AN) - A1 = POLY(C1, 6, RSN) - A(1) / SSUMM2 -C -C Normalize coefficients -C - IF (N .GT. 5) THEN - I1 = 3 - A2 = -A(2)/SSUMM2 + POLY(C2,6,RSN) - FAC = SQRT((SUMM2 - TWO * A(1) ** 2 - TWO * - * A(2) ** 2)/(ONE - TWO * A1 ** 2 - TWO * A2 ** 2)) - A(1) = A1 - A(2) = A2 - ELSE - I1 = 2 - FAC = SQRT((SUMM2 - TWO * A(1) ** 2)/ - * (ONE - TWO * A1 ** 2)) - A(1) = A1 - END IF - DO 40 I = I1, NN2 - A(I) = -A(I)/FAC - 40 CONTINUE - END IF - INIT = .TRUE. - END IF - IF (N1 .LT. 3) RETURN - NCENS = N - N1 - IFAULT = 4 - IF (NCENS .LT. 0 .OR. (NCENS .GT. 0 .AND. N .LT. 20)) RETURN - IFAULT = 5 - DELTA = FLOAT(NCENS)/AN - IF (DELTA .GT. 0.8) RETURN -C -C If W input as negative, calculate significance level of -W -C - IF (W .LT. ZERO) THEN - W1 = ONE + W - IFAULT = 0 - GOTO 70 - END IF -C -C Check for zero range -C - IFAULT = 6 - RANGE = X(N1) - X(1) - IF (RANGE .LT. SMALL) RETURN -C -C Check for correct sort order on range - scaled X -C - IFAULT = 7 - XX = X(1)/RANGE - SX = XX - SA = -A(1) - J = N - 1 - DO 50 I = 2, N1 - XI = X(I)/RANGE -CCCCC IF (XX-XI .GT. SMALL) PRINT *,' ANYTHING' - SX = SX + XI - IF (I .NE. J) SA = SA + SIGN(1, I - J) * A(MIN(I, J)) - XX = XI - J = J - 1 -50 CONTINUE - IFAULT = 0 - IF (N .GT. 5000) IFAULT = 2 -C -C Calculate W statistic as squared correlation -C between data and coefficients -C - SA = SA/N1 - SX = SX/N1 - SSA = ZERO - SSX = ZERO - SAX = ZERO - J = N - DO 60 I = 1, N1 - IF (I .NE. J) THEN - ASA = SIGN(1, I - J) * A(MIN(I, J)) - SA - ELSE - ASA = -SA - END IF - XSX = X(I)/RANGE - SX - SSA = SSA + ASA * ASA - SSX = SSX + XSX * XSX - SAX = SAX + ASA * XSX - J = J - 1 - 60 CONTINUE -C -C W1 equals (1-W) claculated to avoid excessive rounding error -C for W very near 1 (a potential problem in very large samples) -C - SSASSX = SQRT(SSA * SSX) - W1 = (SSASSX - SAX) * (SSASSX + SAX)/(SSA * SSX) - 70 W = ONE - W1 -C -C Calculate significance level for W (exact for N=3) -C - IF (N .EQ. 3) THEN - PW = PI6 * (ASIN(SQRT(W)) - STQR) - RETURN - END IF - Y = LOG(W1) - XX = LOG(AN) - M = ZERO - S = ONE - IF (N .LE. 11) THEN - GAMMA = POLY(G, 2, AN) - IF (Y .GE. GAMMA) THEN - PW = SMALL - RETURN - END IF - Y = -LOG(GAMMA - Y) - M = POLY(C3, 4, AN) - S = EXP(POLY(C4, 4, AN)) - ELSE - M = POLY(C5, 4, XX) - S = EXP(POLY(C6, 3, XX)) - END IF - IF (NCENS .GT. 0) THEN -C -C Censoring by proportion NCENS/N. Calculate mean and sd -C of normal equivalent deviate of W. -C - LD = -LOG(DELTA) - BF = ONE + XX * BF1 - Z90F = Z90 + BF * POLY(C7, 2, XX90 ** XX) ** LD - Z95F = Z95 + BF * POLY(C8, 2, XX95 ** XX) ** LD - Z99F = Z99 + BF * POLY(C9, 2, XX) ** LD -C -C Regress Z90F,...,Z99F on normal deviates Z90,...,Z99 to get -C pseudo-mean and pseudo-sd of z as the slope and intercept -C - ZFM = (Z90F + Z95F + Z99F)/THREE - ZSD = (Z90*(Z90F-ZFM)+Z95*(Z95F-ZFM)+Z99*(Z99F-ZFM))/ZSS - ZBAR = ZFM - ZSD * ZM - M = M + ZBAR * S - S = S * ZSD - END IF - PW = REAL(ALNORM(DBLE((Y - M)/S), UPPER)) -C - RETURN - END - - DOUBLE PRECISION FUNCTION ALNORM(X, UPPER) -C -C EVALUATES THE TAIL AREA OF THE STANDARDIZED NORMAL CURVE FROM -C X TO INFINITY IF UPPER IS .TRUE. OR FROM MINUS INFINITY TO X -C IF UPPER IS .FALSE. -C -C NOTE NOVEMBER 2001: MODIFY UTZERO. ALTHOUGH NOT NECESSARY -C WHEN USING ALNORM FOR SIMPLY COMPUTING PERCENT POINTS, -C EXTENDING RANGE IS HELPFUL FOR USE WITH FUNCTIONS THAT -C USE ALNORM IN INTERMEDIATE COMPUTATIONS. -C - DOUBLE PRECISION LTONE,UTZERO,ZERO,HALF,ONE,CON, - $ A1,A2,A3,A4,A5,A6,A7,B1,B2, - $ B3,B4,B5,B6,B7,B8,B9,B10,B11,B12,X,Y,Z,ZEXP - LOGICAL UPPER,UP -C -C LTONE AND UTZERO MUST BE SET TO SUIT THE PARTICULAR COMPUTER -C -CCCCC DATA LTONE, UTZERO /7.0D0, 18.66D0/ - DATA LTONE, UTZERO /7.0D0, 38.00D0/ - DATA ZERO,HALF,ONE,CON /0.0D0,0.5D0,1.0D0,1.28D0/ - DATA A1, A2, A3, - $ A4, A5, A6, - $ A7 - $ /0.398942280444D0, 0.399903438504D0, 5.75885480458D0, - $ 29.8213557808D0, 2.62433121679D0, 48.6959930692D0, - $ 5.92885724438D0/ - DATA B1, B2, B3, - $ B4, B5, B6, - $ B7, B8, B9, - $ B10, B11, B12 - $ /0.398942280385D0, 3.8052D-8, 1.00000615302D0, - $ 3.98064794D-4, 1.98615381364D0, 0.151679116635D0, - $ 5.29330324926D0, 4.8385912808D0, 15.1508972451D0, - $ 0.742380924027D0, 30.789933034D0, 3.99019417011D0/ -C - ZEXP(Z) = DEXP(Z) -C - UP = UPPER - Z = X - IF (Z .GE. ZERO) GOTO 10 - UP = .NOT. UP - Z = -Z - 10 IF (Z .LE. LTONE .OR. UP .AND. Z .LE. UTZERO) GOTO 20 - ALNORM = ZERO - GOTO 40 - 20 Y = HALF * Z * Z - IF (Z .GT. CON) GOTO 30 -C - ALNORM = HALF - Z * (A1- A2 * Y / (Y + A3- A4 / (Y + A5 + A6 / - $ (Y + A7)))) - GOTO 40 -C - 30 ALNORM = B1* ZEXP(-Y)/(Z - B2 + B3/ (Z +B4 +B5/(Z -B6 +B7/ - $ (Z +B8 -B9/ (Z +B10 +B11/ (Z + B12)))))) -C - 40 IF (.NOT. UP) ALNORM = ONE - ALNORM - RETURN - END - - REAL FUNCTION PPND(P, IFAULT) -C -C ALGORITHM AS 111 APPL. STATIST. (1977), VOL.26, NO.1 -C -C PRODUCES NORMAL DEVIATE CORRESPONDING TO LOWER TAIL AREA OF P -C REAL VERSION FOR EPS = 2 **(-31) -C THE HASH SUMS ARE THE SUMS OF THE MODULI OF THE COEFFICIENTS -C THEY HAVE NO INHERENT MEANINGS BUT ARE INCLUDED FOR USE IN -C CHECKING TRANSCRIPTIONS -C STANDARD FUNCTIONS ABS, ALOG AND SQRT ARE USED -C -C NOTE: WE COULD USE DATAPLOT NORPPF, BUT VARIOUS APPLIED -C STATISTICS ALGORITHMS USE THIS. SO WE PROVIDE IT TO -C MAKE USE OF APPLIED STATISTICS ALGORITHMS EASIER. -C - REAL ZERO, SPLIT, HALF, ONE - REAL A0, A1, A2, A3, B1, B2, B3, B4, C0, C1, C2, C3, D1, D2 - REAL P, Q, R - INTEGER IFAULT - DATA ZERO /0.0E0/, HALF/0.5E0/, ONE/1.0E0/ - DATA SPLIT /0.42E0/ - DATA A0 / 2.50662823884E0/ - DATA A1 / -18.61500062529E0/ - DATA A2 / 41.39119773534E0/ - DATA A3 / -25.44106049637E0/ - DATA B1 / -8.47351093090E0/ - DATA B2 / 23.08336743743E0/ - DATA B3 / -21.06224101826E0/ - DATA B4 / 3.13082909833E0/ - DATA C0 / -2.78718931138E0/ - DATA C1 / -2.29796479134E0/ - DATA C2 / 4.85014127135E0/ - DATA C3 / 2.32121276858E0/ - DATA D1 / 3.54388924762E0/ - DATA D2 / 1.63706781897E0/ -C - IFAULT = 0 - Q = P - HALF - IF (ABS(Q) .GT. SPLIT) GOTO 1 - R = Q*Q - PPND = Q * (((A3*R + A2)*R + A1) * R + A0) / - * ((((B4*R + B3)*R + B2) * R + B1) * R + ONE) - RETURN -1 R = P - IF (Q .GT. ZERO)R = ONE - P - IF (R .LE. ZERO) GOTO 2 - R = SQRT(-ALOG(R)) - PPND = (((C3 * R + C2) * R + C1) * R + C0)/ - * ((D2*R + D1) * R + ONE) - IF (Q .LT. ZERO) PPND = -PPND - RETURN -2 IFAULT = 1 - PPND = ZERO - RETURN - END - - REAL FUNCTION POLY(C, NORD, X) -C -C -C ALGORITHM AS 181.2 APPL. STATIST. (1982) VOL. 31, NO. 2 -C -C CALCULATES THE ALGEBRAIC POLYNOMIAL OF ORDER NORED-1 WITH -C ARRAY OF COEFFICIENTS C. ZERO ORDER COEFFICIENT IS C(1) -C - REAL C(NORD) - POLY = C(1) - IF(NORD.EQ.1) RETURN - P = X*C(NORD) - IF(NORD.EQ.2) GOTO 20 - N2 = NORD-2 - J = N2+1 - DO 10 I = 1,N2 - P = (P+C(J))*X - J = J-1 - 10 CONTINUE - 20 POLY = POLY+P - RETURN - END diff --git a/scipy-0.10.1/scipy/stats/stats.py b/scipy-0.10.1/scipy/stats/stats.py deleted file mode 100644 index e95d6c3223..0000000000 --- a/scipy-0.10.1/scipy/stats/stats.py +++ /dev/null @@ -1,3956 +0,0 @@ -# Copyright (c) Gary Strangman. All rights reserved -# -# Disclaimer -# -# This software is provided "as-is". There are no expressed or implied -# warranties of any kind, including, but not limited to, the warranties -# of merchantability and fitness for a given application. In no event -# shall Gary Strangman be liable for any direct, indirect, incidental, -# special, exemplary or consequential damages (including, but not limited -# to, loss of use, data or profits, or business interruption) however -# caused and on any theory of liability, whether in contract, strict -# liability or tort (including negligence or otherwise) arising in any way -# out of the use of this software, even if advised of the possibility of -# such damage. -# - -# -# Heavily adapted for use by SciPy 2002 by Travis Oliphant -""" -stats.py module - -################################################# -####### Written by: Gary Strangman ########### -################################################# - -A collection of basic statistical functions for python. The function -names appear below. - - Some scalar functions defined here are also available in the scipy.special - package where they work on arbitrary sized arrays. - -Disclaimers: The function list is obviously incomplete and, worse, the -functions are not optimized. All functions have been tested (some more -so than others), but they are far from bulletproof. Thus, as with any -free software, no warranty or guarantee is expressed or implied. :-) A -few extra functions that don't appear in the list below can be found by -interested treasure-hunters. These functions don't necessarily have -both list and array versions but were deemed useful - -CENTRAL TENDENCY: gmean (geometric mean) - hmean (harmonic mean) - medianscore - mode - -MOMENTS: moment - variation - skew - kurtosis - normaltest (for arrays only) - -MOMENTS HANDLING NAN: nanmean - nanmedian - nanstd - -ALTERED VERSIONS: tmean - tvar - tstd - tsem - describe - -FREQUENCY STATS: freqtable - itemfreq - scoreatpercentile - percentileofscore - histogram - cumfreq - relfreq - -VARIABILITY: obrientransform - signaltonoise (for arrays only) - sem - -TRIMMING FCNS: threshold (for arrays only) - trimboth - trim1 - around (round all vals to 'n' decimals) - -CORRELATION FCNS: paired - pearsonr - fisher_exact - spearmanr - pointbiserialr - kendalltau - linregress - -INFERENTIAL STATS: ttest_1samp - ttest_ind - ttest_rel - chisquare - ks_2samp - mannwhitneyu - ranksums - wilcoxon - kruskal - friedmanchisquare - -PROBABILITY CALCS: chisqprob - zprob - fprob - betai - -## Note that scipy.stats.distributions has many more statistical probability -## functions defined. - - -ANOVA FUNCTIONS: f_oneway - f_value - -SUPPORT FUNCTIONS: ss - square_of_sums - shellsort - rankdata - -References ----------- -[CRCProbStat2000]_ - -.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard - Probability and Statistics Tables and Formulae. Chapman & Hall: New - York. 2000. - -""" -## CHANGE LOG: -## =========== -## since 2001-06-25 ... see scipy SVN changelog -## 05-11-29 ... fixed default axis to be 0 for consistency with scipy; -## cleanup of redundant imports, dead code, {0,1} -> booleans -## 02-02-10 ... require Numeric, eliminate "list-only" functions -## (only 1 set of functions now and no Dispatch class), -## removed all references to aXXXX functions. -## 00-04-13 ... pulled all "global" statements, except from aanova() -## added/fixed lots of documentation, removed io.py dependency -## changed to version 0.5 -## 99-11-13 ... added asign() function -## 99-11-01 ... changed version to 0.4 ... enough incremental changes now -## 99-10-25 ... added acovariance and acorrelation functions -## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors -## added aglm function (crude, but will be improved) -## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, var, etc. to -## all handle lists of 'dimension's and keepdims -## REMOVED ar0, ar2, ar3, ar4 and replaced them with around -## reinserted fixes for abetai to avoid math overflows -## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to -## handle multi-dimensional arrays (whew!) -## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990) -## added anormaltest per same reference -## re-wrote azprob to calc arrays of probs all at once -## 99-08-22 ... edited attest_ind printing section so arrays could be rounded -## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on -## short/byte arrays (mean of #s btw 100-300 = -150??) -## 99-08-09 ... fixed asum so that the None case works for Byte arrays -## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays -## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap) -## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0]) -## 04/11/99 ... added asignaltonoise, athreshold functions, changed all -## max/min in array section to maximum/minimum, -## fixed square_of_sums to prevent integer overflow -## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums -## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions -## 02/28/99 ... Fixed aobrientransform to return an array rather than a list -## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!) -## 01/13/99 ... CHANGED TO VERSION 0.3 -## fixed bug in a/lmannwhitneyu p-value calculation -## 12/31/98 ... fixed variable-name bug in ldescribe -## 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix) -## 12/16/98 ... changed amedianscore to return float (not array) for 1 score -## 12/14/98 ... added atmin and atmax functions -## removed umath from import line (not needed) -## l/ageometricmean modified to reduce chance of overflows (take -## nth root first, then multiply) -## 12/07/98 ... added __version__variable (now 0.2) -## removed all 'stats.' from anova() fcn -## 12/06/98 ... changed those functions (except shellsort) that altered -## arguments in-place ... cumsum, ranksort, ... -## updated (and fixed some) doc-strings -## 12/01/98 ... added anova() function (requires NumPy) -## incorporated Dispatch class -## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean -## added 'asum' function (added functionality to add.reduce) -## fixed both moment and amoment (two errors) -## changed name of skewness and askewness to skew and askew -## fixed (a)histogram (which sometimes counted points >> from scipy import stats - >>> a = np.linspace(0, 4, 3) - >>> a - array([ 0., 2., 4.]) - >>> a[-1] = np.nan - >>> stats.nanmean(a) - 1.0 - - """ - x, axis = _chk_asarray(x,axis) - x = x.copy() - Norig = x.shape[axis] - factor = 1.0-np.sum(np.isnan(x),axis)*1.0/Norig - - x[np.isnan(x)] = 0 - return np.mean(x,axis)/factor - -def nanstd(x, axis=0, bias=False): - """ - Compute the standard deviation over the given axis, ignoring nans. - - Parameters - ---------- - x : array_like - Input array. - axis : int or None, optional - Axis along which the standard deviation is computed. Default is 0. - If None, compute over the whole array `x`. - bias : bool, optional - If True, the biased (normalized by N) definition is used. If False - (default), the unbiased definition is used. - - Returns - ------- - s : float - The standard deviation. - - See Also - -------- - nanmean, nanmedian - - Examples - -------- - >>> from scipy import stats - >>> a = np.arange(10, dtype=float) - >>> a[1:3] = np.nan - >>> np.std(a) - nan - >>> stats.nanstd(a) - 2.9154759474226504 - >>> stats.nanstd(a.reshape(2, 5), axis=1) - array([ 2.0817, 1.5811]) - >>> stats.nanstd(a.reshape(2, 5), axis=None) - 2.9154759474226504 - - """ - x, axis = _chk_asarray(x,axis) - x = x.copy() - Norig = x.shape[axis] - - Nnan = np.sum(np.isnan(x),axis)*1.0 - n = Norig - Nnan - - x[np.isnan(x)] = 0. - m1 = np.sum(x,axis)/n - - if axis: - d = (x - np.expand_dims(m1, axis))**2.0 - else: - d = (x - m1)**2.0 - - m2 = np.sum(d,axis)-(m1*m1)*Nnan - if bias: - m2c = m2 / n - else: - m2c = m2 / (n - 1.) - return np.sqrt(m2c) - -def _nanmedian(arr1d): # This only works on 1d arrays - """Private function for rank a arrays. Compute the median ignoring Nan. - - Parameters - ---------- - arr1d : ndarray - Input array, of rank 1. - - Results - ------- - m : float - The median. - """ - cond = 1-np.isnan(arr1d) - x = np.sort(np.compress(cond,arr1d,axis=-1)) - if x.size == 0: - return np.nan - return np.median(x) - -def nanmedian(x, axis=0): - """ - Compute the median along the given axis ignoring nan values. - - Parameters - ---------- - x : array_like - Input array. - axis : int, optional - Axis along which the median is computed. Default is 0, i.e. the - first axis. - - Returns - ------- - m : float - The median of `x` along `axis`. - - See Also - -------- - nanstd, nanmean - - Examples - -------- - >>> from scipy import stats - >>> a = np.array([0, 3, 1, 5, 5, np.nan]) - >>> stats.nanmedian(a) - array(3.0) - - >>> b = np.array([0, 3, 1, 5, 5, np.nan, 5]) - >>> stats.nanmedian(b) - array(4.0) - - Example with axis: - - >>> c = np.arange(30.).reshape(5,6) - >>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6) - >>> c[idx] = np.nan - >>> c - array([[ 0., 1., 2., nan, 4., 5.], - [ 6., 7., nan, 9., 10., 11.], - [ 12., nan, 14., 15., 16., 17.], - [ nan, 19., 20., 21., 22., nan], - [ 24., 25., 26., 27., nan, 29.]]) - >>> stats.nanmedian(c, axis=1) - array([ 2. , 9. , 15. , 20.5, 26. ]) - - """ - x, axis = _chk_asarray(x, axis) - if x.ndim == 0: - return float(x.item()) - x = x.copy() - x = np.apply_along_axis(_nanmedian, axis, x) - if x.ndim == 0: - x = float(x.item()) - return x - - -##################################### -######## CENTRAL TENDENCY ######## -##################################### - - -def gmean(a, axis=0, dtype=None): - """ - Compute the geometric mean along the specified axis. - - Returns the geometric average of the array elements. - That is: n-th root of (x1 * x2 * ... * xn) - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int, optional, default axis=0 - Axis along which the geometric mean is computed. - dtype : dtype, optional - Type of the returned array and of the accumulator in which the - elements are summed. If dtype is not specified, it defaults to the - dtype of a, unless a has an integer dtype with a precision less than - that of the default platform integer. In that case, the default - platform integer is used. - - Returns - ------- - gmean : ndarray, - see dtype parameter above - - See Also - -------- - numpy.mean : Arithmetic average - numpy.average : Weighted average - hmean: Harmonic mean - - Notes - ----- - The geometric average is computed over a single dimension of the input - array, axis=0 by default, or all values in the array if axis=None. - float64 intermediate and return values are used for integer inputs. - - Use masked arrays to ignore any non-finite values in the input or that - arise in the calculations such as Not a Number and infinity because masked - arrays automatically mask any non-finite values. - - """ - if not isinstance(a, np.ndarray): #if not an ndarray object attempt to convert it - log_a=np.log(np.array(a, dtype=dtype)) - elif dtype: #Must change the default dtype allowing array type - if isinstance(a,np.ma.MaskedArray): - log_a=np.log(np.ma.asarray(a, dtype=dtype)) - else: - log_a=np.log(np.asarray(a, dtype=dtype)) - else: - log_a = np.log(a) - return np.exp(log_a.mean(axis=axis)) - -def hmean(a, axis=0, dtype=None): - """ - Calculates the harmonic mean along the specified axis. - - That is: n / (1/x1 + 1/x2 + ... + 1/xn) - - Parameters - ---------- - a : array_like - Input array, masked array or object that can be converted to an array. - axis : int, optional, default axis=0 - Axis along which the harmonic mean is computed. - dtype : dtype, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults to the - dtype of `a`, unless `a` has an integer `dtype` with a precision less - than that of the default platform integer. In that case, the default - platform integer is used. - - Returns - ------- - hmean : ndarray, - see `dtype` parameter above - - See Also - -------- - numpy.mean : Arithmetic average - numpy.average : Weighted average - gmean: Geometric mean - - Notes - ----- - The harmonic mean is computed over a single dimension of the input - array, axis=0 by default, or all values in the array if axis=None. - float64 intermediate and return values are used for integer inputs. - - Use masked arrays to ignore any non-finite values in the input or that - arise in the calculations such as Not a Number and infinity. - - """ - if not isinstance(a, np.ndarray): - a=np.array(a, dtype=dtype) - if np.all(a >0): # Harmonic mean only defined if greater than zero - if isinstance(a, np.ma.MaskedArray): - size = a.count(axis) - else: - if axis == None: - a=a.ravel() - size = a.shape[0] - else: - size = a.shape[axis] - return size / np.sum(1.0/a, axis=axis, dtype=dtype) - else: - raise ValueError("Harmonic mean only defined if all elements greater than zero") - - -def cmedian(a, numbins=1000): - """ - Returns the computed median value of an array. - - All of the values in the input array are used. The input array is first - histogrammed using `numbins` bins. The bin containing the median is - selected by searching for the halfway point in the cumulative histogram. - The median value is then computed by linearly interpolating across that - bin. - - Parameters - ---------- - a : array_like - Input array. - numbins : int - The number of bins used to histogram the data. More bins give greater - accuracy to the approximation of the median. - - Returns - ------- - cmedian : float - An approximation of the median. - - References - ---------- - [CRCProbStat2000]_ Section 2.2.6 - - .. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard - Probability and Statistics Tables and Formulae. Chapman & Hall: New - York. 2000. - - """ - # TODO: numpy.median() always seems to be a better choice. - # A better version of this function would take already-histogrammed data - # and compute the median from that. - a = np.ravel(a) - n = float(len(a)) - - # We will emulate the (fixed!) bounds selection scheme used by - # scipy.stats.histogram(), but use numpy.histogram() since it is faster. - amin = a.min() - amax = a.max() - estbinwidth = (amax - amin)/float(numbins - 1) - binsize = (amax - amin + estbinwidth) / float(numbins) - (hist, bins) = np.histogram(a, numbins, - range=(amin-binsize*0.5, amax+binsize*0.5)) - binsize = bins[1] - bins[0] - cumhist = np.cumsum(hist) # make cumulative histogram - cfbin = np.searchsorted(cumhist, n/2.0) - LRL = bins[cfbin] # get lower read limit of that bin - if cfbin == 0: - cfbelow = 0.0 - else: - cfbelow = cumhist[cfbin-1] # cum. freq. below bin - freq = hist[cfbin] # frequency IN the 50%ile bin - median = LRL + ((n/2.0-cfbelow)/float(freq))*binsize # MEDIAN - return median - - -def mode(a, axis=0): - """ - Returns an array of the modal (most common) value in the passed array. - - If there is more than one such value, only the first is returned. - The bin-count for the modal bins is also returned. - - Parameters - ---------- - a : array_like - n-dimensional array of which to find mode(s). - axis : int, optional - Axis along which to operate. Default is 0, i.e. the first axis. - - Returns - ------- - vals : ndarray - Array of modal values. - counts : ndarray - Array of counts for each mode. - - Examples - -------- - >>> a = np.array([[6, 8, 3, 0], - [3, 2, 1, 7], - [8, 1, 8, 4], - [5, 3, 0, 5], - [4, 7, 5, 9]]) - >>> from scipy import stats - >>> stats.mode(a) - (array([[ 3., 1., 0., 0.]]), array([[ 1., 1., 1., 1.]])) - - To get mode of whole array, specify axis=None: - - >>> stats.mode(a, axis=None) - (array([ 3.]), array([ 3.])) - - """ - a, axis = _chk_asarray(a, axis) - scores = np.unique(np.ravel(a)) # get ALL unique values - testshape = list(a.shape) - testshape[axis] = 1 - oldmostfreq = np.zeros(testshape) - oldcounts = np.zeros(testshape) - for score in scores: - template = (a == score) - counts = np.expand_dims(np.sum(template, axis),axis) - mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) - oldcounts = np.maximum(counts, oldcounts) - oldmostfreq = mostfrequent - return mostfrequent, oldcounts - -def mask_to_limits(a, limits, inclusive): - """Mask an array for values outside of given limits. - - This is primarily a utility function. - - Parameters - ---------- - a : array - limits : (float or None, float or None) - A tuple consisting of the (lower limit, upper limit). Values in the - input array less than the lower limit or greater than the upper limit - will be masked out. None implies no limit. - inclusive : (bool, bool) - A tuple consisting of the (lower flag, upper flag). These flags - determine whether values exactly equal to lower or upper are allowed. - - Returns - ------- - A MaskedArray. - - Raises - ------ - A ValueError if there are no values within the given limits. - """ - lower_limit, upper_limit = limits - lower_include, upper_include = inclusive - am = ma.MaskedArray(a) - if lower_limit is not None: - if lower_include: - am = ma.masked_less(am, lower_limit) - else: - am = ma.masked_less_equal(am, lower_limit) - if upper_limit is not None: - if upper_include: - am = ma.masked_greater(am, upper_limit) - else: - am = ma.masked_greater_equal(am, upper_limit) - if am.count() == 0: - raise ValueError("No array values within given limits") - return am - -def tmean(a, limits=None, inclusive=(True, True)): - """ - Compute the trimmed mean - - This function finds the arithmetic mean of given values, ignoring values - outside the given `limits`. - - Parameters - ---------- - a : array_like - array of values - limits : None or (lower limit, upper limit), optional - Values in the input array less than the lower limit or greater than the - upper limit will be ignored. When limits is None, then all values are - used. Either of the limit values in the tuple can also be None - representing a half-open interval. The default value is None. - inclusive : (bool, bool), optional - A tuple consisting of the (lower flag, upper flag). These flags - determine whether values exactly equal to the lower or upper limits - are included. The default value is (True, True). - - Returns - ------- - tmean : float - - """ - a = asarray(a) - - # Cast to a float if this is an integer array. If it is already a float - # array, leave it as is to preserve its precision. - if issubclass(a.dtype.type, np.integer): - a = a.astype(float) - - # No trimming. - if limits is None: - return np.mean(a,None) - - am = mask_to_limits(a.ravel(), limits, inclusive) - return am.mean() - -def masked_var(am): - m = am.mean() - s = ma.add.reduce((am - m)**2) - n = am.count() - 1.0 - return s / n - -def tvar(a, limits=None, inclusive=(1,1)): - """ - Compute the trimmed variance - - This function computes the sample variance of an array of values, - while ignoring values which are outside of given `limits`. - - Parameters - ---------- - a : array_like - array of values - limits : None or (lower limit, upper limit), optional - Values in the input array less than the lower limit or greater than the - upper limit will be ignored. When limits is None, then all values are - used. Either of the limit values in the tuple can also be None - representing a half-open interval. The default value is None. - inclusive : (bool, bool), optional - A tuple consisting of the (lower flag, upper flag). These flags - determine whether values exactly equal to the lower or upper limits - are included. The default value is (True, True). - - Returns - ------- - tvar : float - - """ - a = asarray(a) - a = a.astype(float).ravel() - if limits is None: - n = len(a) - return a.var()*(n/(n-1.)) - am = mask_to_limits(a, limits, inclusive) - return masked_var(am) - -def tmin(a, lowerlimit=None, axis=0, inclusive=True): - """ - Compute the trimmed minimum - - This function finds the miminum value of an array `a` along the - specified axis, but only considering values greater than a specified - lower limit. - - Parameters - ---------- - a : array_like - array of values - lowerlimit : None or float, optional - Values in the input array less than the given limit will be ignored. - When lowerlimit is None, then all values are used. The default value - is None. - axis : None or int, optional - Operate along this axis. None means to use the flattened array and - the default is zero - inclusive : {True, False}, optional - This flag determines whether values exactly equal to the lower limit - are included. The default value is True. - - Returns - ------- - tmin: float - - """ - a, axis = _chk_asarray(a, axis) - am = mask_to_limits(a, (lowerlimit, None), (inclusive, False)) - return ma.minimum.reduce(am, axis) - -def tmax(a, upperlimit, axis=0, inclusive=True): - """ - Compute the trimmed maximum - - This function computes the maximum value of an array along a given axis, - while ignoring values larger than a specified upper limit. - - Parameters - ---------- - a : array_like - array of values - upperlimit : None or float, optional - Values in the input array greater than the given limit will be ignored. - When upperlimit is None, then all values are used. The default value - is None. - axis : None or int, optional - Operate along this axis. None means to use the flattened array and - the default is zero. - inclusive : {True, False}, optional - This flag determines whether values exactly equal to the upper limit - are included. The default value is True. - - Returns - ------- - tmax : float - - """ - a, axis = _chk_asarray(a, axis) - am = mask_to_limits(a, (None, upperlimit), (False, inclusive)) - return ma.maximum.reduce(am, axis) - -def tstd(a, limits=None, inclusive=(1,1)): - """ - Compute the trimmed sample standard deviation - - This function finds the sample standard deviation of given values, - ignoring values outside the given `limits`. - - Parameters - ---------- - a : array_like - array of values - limits : None or (lower limit, upper limit), optional - Values in the input array less than the lower limit or greater than the - upper limit will be ignored. When limits is None, then all values are - used. Either of the limit values in the tuple can also be None - representing a half-open interval. The default value is None. - inclusive : (bool, bool), optional - A tuple consisting of the (lower flag, upper flag). These flags - determine whether values exactly equal to the lower or upper limits - are included. The default value is (True, True). - - Returns - ------- - tstd : float - - """ - return np.sqrt(tvar(a,limits,inclusive)) - - -def tsem(a, limits=None, inclusive=(True,True)): - """ - Compute the trimmed standard error of the mean - - This function finds the standard error of the mean for given - values, ignoring values outside the given `limits`. - - Parameters - ---------- - a : array_like - array of values - limits : None or (lower limit, upper limit), optional - Values in the input array less than the lower limit or greater than the - upper limit will be ignored. When limits is None, then all values are - used. Either of the limit values in the tuple can also be None - representing a half-open interval. The default value is None. - inclusive : (bool, bool), optional - A tuple consisting of the (lower flag, upper flag). These flags - determine whether values exactly equal to the lower or upper limits - are included. The default value is (True, True). - - Returns - ------- - tsem : float - - """ - a = np.asarray(a).ravel() - if limits is None: - n = float(len(a)) - return a.std()/np.sqrt(n) - am = mask_to_limits(a.ravel(), limits, inclusive) - sd = np.sqrt(masked_var(am)) - return sd / am.count() - - -##################################### -############ MOMENTS ############# -##################################### - -def moment(a, moment=1, axis=0): - """ - Calculates the nth moment about the mean for a sample. - - Generally used to calculate coefficients of skewness and - kurtosis. - - Parameters - ---------- - a : array_like - data - moment : int - order of central moment that is returned - axis : int or None - Axis along which the central moment is computed. If None, then the data - array is raveled. The default axis is zero. - - Returns - ------- - n-th central moment : ndarray or float - The appropriate moment along the given axis or over all values if axis - is None. The denominator for the moment calculation is the number of - observations, no degrees of freedom correction is done. - - """ - a, axis = _chk_asarray(a, axis) - if moment == 1: - # By definition the first moment about the mean is 0. - shape = list(a.shape) - del shape[axis] - if shape: - # return an actual array of the appropriate shape - return np.zeros(shape, dtype=float) - else: - # the input was 1D, so return a scalar instead of a rank-0 array - return np.float64(0.0) - else: - mn = np.expand_dims(np.mean(a,axis), axis) - s = np.power((a-mn), moment) - return np.mean(s, axis) - - -def variation(a, axis=0): - """ - Computes the coefficient of variation, the ratio of the biased standard - deviation to the mean. - - Parameters - ---------- - a : array_like - Input array. - axis : int or None - Axis along which to calculate the coefficient of variation. - - References - ---------- - [CRCProbStat2000]_ Section 2.2.20 - - .. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard - Probability and Statistics Tables and Formulae. Chapman & Hall: New - York. 2000. - - """ - a, axis = _chk_asarray(a, axis) - n = a.shape[axis] - return a.std(axis)/a.mean(axis) - - -def skew(a, axis=0, bias=True): - """ - Computes the skewness of a data set. - - For normally distributed data, the skewness should be about 0. A skewness - value > 0 means that there is more weight in the left tail of the - distribution. The function `skewtest` can be used to determine if the - skewness value is close enough to 0, statistically speaking. - - Parameters - ---------- - a : ndarray - data - axis : int or None - axis along which skewness is calculated - bias : bool - If False, then the calculations are corrected for statistical bias. - - Returns - ------- - skewness : ndarray - The skewness of values along an axis, returning 0 where all values are - equal. - - References - ---------- - [CRCProbStat2000]_ Section 2.2.24.1 - - .. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard - Probability and Statistics Tables and Formulae. Chapman & Hall: New - York. 2000. - - """ - a, axis = _chk_asarray(a,axis) - n = a.shape[axis] - m2 = moment(a, 2, axis) - m3 = moment(a, 3, axis) - zero = (m2 == 0) - vals = np.where(zero, 0, m3 / m2**1.5) - if not bias: - can_correct = (n > 2) & (m2 > 0) - if can_correct.any(): - m2 = np.extract(can_correct, m2) - m3 = np.extract(can_correct, m3) - nval = np.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5 - np.place(vals, can_correct, nval) - if vals.ndim == 0: - return vals.item() - return vals - -def kurtosis(a, axis=0, fisher=True, bias=True): - """ - Computes the kurtosis (Fisher or Pearson) of a dataset. - - Kurtosis is the fourth central moment divided by the square of the - variance. If Fisher's definition is used, then 3.0 is subtracted from - the result to give 0.0 for a normal distribution. - - If bias is False then the kurtosis is calculated using k statistics to - eliminate bias coming from biased moment estimators - - Use `kurtosistest` to see if result is close enough to normal. - - Parameters - ---------- - a : array - data for which the kurtosis is calculated - axis : int or None - Axis along which the kurtosis is calculated - fisher : bool - If True, Fisher's definition is used (normal ==> 0.0). If False, - Pearson's definition is used (normal ==> 3.0). - bias : bool - If False, then the calculations are corrected for statistical bias. - - Returns - ------- - kurtosis : array - The kurtosis of values along an axis. If all values are equal, - return -3 for Fisher's definition and 0 for Pearson's definition. - - - References - ---------- - [CRCProbStat2000]_ Section 2.2.25 - - .. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard - Probability and Statistics Tables and Formulae. Chapman & Hall: New - York. 2000. - - """ - a, axis = _chk_asarray(a, axis) - n = a.shape[axis] - m2 = moment(a,2,axis) - m4 = moment(a,4,axis) - zero = (m2 == 0) - vals = np.where(zero, 0, m4/ m2**2.0) - if not bias: - can_correct = (n > 3) & (m2 > 0) - if can_correct.any(): - m2 = np.extract(can_correct, m2) - m4 = np.extract(can_correct, m4) - nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0) - np.place(vals, can_correct, nval+3.0) - - if vals.ndim == 0: - vals = vals.item() # array scalar - - if fisher: - return vals - 3 - else: - return vals - -def describe(a, axis=0): - """ - Computes several descriptive statistics of the passed array. - - Parameters - ---------- - a : array_like - data - axis : int or None - axis along which statistics are calculated. If axis is None, then data - array is raveled. The default axis is zero. - - Returns - ------- - size of the data : int - length of data along axis - (min, max): tuple of ndarrays or floats - minimum and maximum value of data array - arithmetic mean : ndarray or float - mean of data along axis - unbiased variance : ndarray or float - variance of the data along axis, denominator is number of observations - minus one. - biased skewness : ndarray or float - skewness, based on moment calculations with denominator equal to the - number of observations, i.e. no degrees of freedom correction - biased kurtosis : ndarray or float - kurtosis (Fisher), the kurtosis is normalized so that it is zero for the - normal distribution. No degrees of freedom or bias correction is used. - - See Also - -------- - skew - kurtosis - - """ - a, axis = _chk_asarray(a, axis) - n = a.shape[axis] - #mm = (np.minimum.reduce(a), np.maximum.reduce(a)) - mm = (np.min(a, axis=axis), np.max(a, axis=axis)) - m = np.mean(a, axis=axis) - v = np.var(a, axis=axis, ddof=1) - sk = skew(a, axis) - kurt = kurtosis(a, axis) - return n, mm, m, v, sk, kurt - -##################################### -######## NORMALITY TESTS ########## -##################################### - -def skewtest(a, axis=0): - """ - Tests whether the skew is different from the normal distribution. - - This function tests the null hypothesis that the skewness of - the population that the sample was drawn from is the same - as that of a corresponding normal distribution. - - Parameters - ---------- - a : array - axis : int or None - - Returns - ------- - z-score : float - The computed z-score for this test. - p-value : float - a 2-sided p-value for the hypothesis test - - Notes - ----- - The sample size must be at least 8. - - """ - a, axis = _chk_asarray(a, axis) - if axis is None: - a = np.ravel(a) - axis = 0 - b2 = skew(a, axis) - n = float(a.shape[axis]) - if n < 8: - raise ValueError( - "skewtest is not valid with less than 8 samples; %i samples" - " were given." % int(n)) - y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2))) - beta2 = (3.0 * (n * n + 27 * n - 70) * (n + 1) * (n + 3) / - ((n - 2.0) * (n + 5) * (n + 7) * (n + 9))) - W2 = -1 + math.sqrt(2 * (beta2 - 1)) - delta = 1 / math.sqrt(0.5 * math.log(W2)) - alpha = math.sqrt(2.0 / (W2 - 1)) - y = np.where(y == 0, 1, y) - Z = delta * np.log(y / alpha + np.sqrt((y / alpha) ** 2 + 1)) - return Z, 2 * distributions.norm.sf(np.abs(Z)) - -def kurtosistest(a, axis=0): - """ - Tests whether a dataset has normal kurtosis - - This function tests the null hypothesis that the kurtosis - of the population from which the sample was drawn is that - of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``. - - Parameters - ---------- - a : array - array of the sample data - axis : int or None - the axis to operate along, or None to work on the whole array. - The default is the first axis. - - Returns - ------- - z-score : float - The computed z-score for this test. - p-value : float - The 2-sided p-value for the hypothesis test - - Notes - ----- - Valid only for n>20. The Z-score is set to 0 for bad entries. - - """ - a, axis = _chk_asarray(a, axis) - n = float(a.shape[axis]) - if n < 20: - warnings.warn( - "kurtosistest only valid for n>=20 ... continuing anyway, n=%i" % - int(n)) - b2 = kurtosis(a, axis, fisher=False) - E = 3.0*(n-1) /(n+1) - varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5)) - x = (b2-E)/np.sqrt(varb2) - sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5))/ - (n*(n-2)*(n-3))) - A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) - term1 = 1 -2/(9.0*A) - denom = 1 +x*np.sqrt(2/(A-4.0)) - denom = np.where(denom < 0, 99, denom) - term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom,1/3.0)) - Z = ( term1 - term2 ) / np.sqrt(2/(9.0*A)) - Z = np.where(denom == 99, 0, Z) - if Z.ndim == 0: - Z = Z[()] - #JPNote: p-value sometimes larger than 1 - #zprob uses upper tail, so Z needs to be positive - return Z, 2 * distributions.norm.sf(np.abs(Z)) - - -def normaltest(a, axis=0): - """ - Tests whether a sample differs from a normal distribution. - - This function tests the null hypothesis that a sample comes - from a normal distribution. It is based on D'Agostino and - Pearson's [1]_, [2]_ test that combines skew and kurtosis to - produce an omnibus test of normality. - - - Parameters - ---------- - a : array_like - The array containing the data to be tested. - axis : int or None - If None, the array is treated as a single data set, regardless of - its shape. Otherwise, each 1-d array along axis `axis` is tested. - - Returns - ------- - k2 : float or array - `s^2 + k^2`, where `s` is the z-score returned by `skewtest` and - `k` is the z-score returned by `kurtosistest`. - p-value : float or array - A 2-sided chi squared probability for the hypothesis test. - - References - ---------- - .. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for - moderate and large sample size," Biometrika, 58, 341-348 - - .. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for - departures from normality," Biometrika, 60, 613-622 - - """ - a, axis = _chk_asarray(a, axis) - s,p = skewtest(a,axis) - k,p = kurtosistest(a,axis) - k2 = s*s + k*k - return k2, chisqprob(k2,2) - -# Martinez-Iglewicz test -# K-S test - -##################################### -###### FREQUENCY FUNCTIONS ####### -##################################### - -def itemfreq(a): - """ - Returns a 2D array of item frequencies. - - Parameters - ---------- - a : array_like of rank 1 - Input array. - - Returns - ------- - itemfreq : ndarray of rank 2 - A 2D frequency table (col [0:n-1]=scores, col n=frequencies). - Column 1 contains item values, column 2 contains their respective - counts. - - Notes - ----- - This uses a loop that is only reasonably fast if the number of unique - elements is not large. For integers, numpy.bincount is much faster. - This function currently does not support strings or multi-dimensional - scores. - - Examples - -------- - >>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4]) - >>> stats.itemfreq(a) - array([[ 0., 2.], - [ 1., 4.], - [ 2., 2.], - [ 4., 1.], - [ 5., 1.]]) - >>> np.bincount(a) - array([2, 4, 2, 0, 1, 1]) - - >>> stats.itemfreq(a/10.) - array([[ 0. , 2. ], - [ 0.1, 4. ], - [ 0.2, 2. ], - [ 0.4, 1. ], - [ 0.5, 1. ]]) - - """ - # TODO: I'm not sure I understand what this does. The docstring is - # internally inconsistent. - # comment: fortunately, this function doesn't appear to be used elsewhere - scores = _support.unique(a) - scores = np.sort(scores) - freq = zeros(len(scores)) - for i in range(len(scores)): - freq[i] = np.add.reduce(np.equal(a,scores[i])) - return array(_support.abut(scores, freq)) - - -def _interpolate(a, b, fraction): - """Returns the point at the given fraction between a and b, where - 'fraction' must be between 0 and 1. - """ - return a + (b - a)*fraction; - -def scoreatpercentile(a, per, limit=()): - """ - Calculate the score at the given `per` percentile of the sequence `a`. - - For example, the score at per=50 is the median. If the desired quantile - lies between two data points, we interpolate between them. If the parameter - `limit` is provided, it should be a tuple (lower, upper) of two values. - Values of `a` outside this (closed) interval will be ignored. - - Parameters - ---------- - a : ndarray - Values from which to extract score. - per : int or float - Percentile at which to extract score. - limit : tuple, optional - Tuple of two scalars, the lower and upper limits within which to - compute the percentile. - - Returns - ------- - score : float - Score at percentile. - - See Also - -------- - percentileofscore - - Examples - -------- - >>> from scipy import stats - >>> a = np.arange(100) - >>> stats.scoreatpercentile(a, 50) - 49.5 - - """ - # TODO: this should be a simple wrapper around a well-written quantile - # function. GNU R provides 9 quantile algorithms (!), with differing - # behaviour at, for example, discontinuities. - values = np.sort(a,axis=0) - if limit: - values = values[(limit[0] <= values) & (values <= limit[1])] - - idx = per /100. * (values.shape[0] - 1) - if (idx % 1 == 0): - return values[idx] - else: - return _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) - - -def percentileofscore(a, score, kind='rank'): - ''' - The percentile rank of a score relative to a list of scores. - - A `percentileofscore` of, for example, 80% means that 80% of the - scores in `a` are below the given score. In the case of gaps or - ties, the exact definition depends on the optional keyword, `kind`. - - Parameters - ---------- - a: array like - Array of scores to which `score` is compared. - score: int or float - Score that is compared to the elements in `a`. - kind: {'rank', 'weak', 'strict', 'mean'}, optional - This optional parameter specifies the interpretation of the - resulting score: - - - "rank": Average percentage ranking of score. In case of - multiple matches, average the percentage rankings of - all matching scores. - - "weak": This kind corresponds to the definition of a cumulative - distribution function. A percentileofscore of 80% - means that 80% of values are less than or equal - to the provided score. - - "strict": Similar to "weak", except that only values that are - strictly less than the given score are counted. - - "mean": The average of the "weak" and "strict" scores, often used in - testing. See - - http://en.wikipedia.org/wiki/Percentile_rank - - Returns - ------- - pcos : float - Percentile-position of score (0-100) relative to `a`. - - Examples - -------- - Three-quarters of the given values lie below a given score: - - >>> percentileofscore([1, 2, 3, 4], 3) - 75.0 - - With multiple matches, note how the scores of the two matches, 0.6 - and 0.8 respectively, are averaged: - - >>> percentileofscore([1, 2, 3, 3, 4], 3) - 70.0 - - Only 2/5 values are strictly less than 3: - - >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='strict') - 40.0 - - But 4/5 values are less than or equal to 3: - - >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='weak') - 80.0 - - The average between the weak and the strict scores is - - >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='mean') - 60.0 - - ''' - a = np.array(a) - n = len(a) - - if kind == 'rank': - if not(np.any(a == score)): - a = np.append(a, score) - a_len = np.array(range(len(a))) - else: - a_len = np.array(range(len(a))) + 1.0 - - a = np.sort(a) - idx = [a == score] - pct = (np.mean(a_len[idx]) / n) * 100.0 - return pct - - elif kind == 'strict': - return sum(a < score) / float(n) * 100 - elif kind == 'weak': - return sum(a <= score) / float(n) * 100 - elif kind == 'mean': - return (sum(a < score) + sum(a <= score)) * 50 / float(n) - else: - raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'") - - -def histogram2(a, bins): - """ - Compute histogram using divisions in bins. - - Count the number of times values from array `a` fall into - numerical ranges defined by `bins`. Range x is given by - bins[x] <= range_x < bins[x+1] where x =0,N and N is the - length of the `bins` array. The last range is given by - bins[N] <= range_N < infinity. Values less than bins[0] are - not included in the histogram. - - Parameters - ---------- - a : array_like of rank 1 - The array of values to be assigned into bins - bins : array_like of rank 1 - Defines the ranges of values to use during histogramming. - - Returns - ------- - histogram2 : ndarray of rank 1 - Each value represents the occurrences for a given bin (range) of - values. - - """ - # comment: probably obsoleted by numpy.histogram() - n = np.searchsorted(np.sort(a), bins) - n = np.concatenate([ n, [len(a)]]) - return n[ 1:]-n[:-1] - - -def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False): - """ - Separates the range into several bins and returns the number of instances - of a in each bin. This histogram is based on numpy's histogram but has a - larger range by default if default limits is not set. - - Parameters - ---------- - a: array_like - Array of scores which will be put into bins. - numbins: int, optional - The number of bins to use for the histogram. Default is 10. - defaultlimits: tuple (lower, upper), optional - The lower and upper values for the range of the histogram. - If no value is given, a range slightly larger then the range of the - values in a is used. Specifically ``(a.min() - s, a.max() + s)``, - where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. - weights: array_like, optional - The weights for each value in `a`. Default is None, which gives each - value a weight of 1.0 - printextras: bool, optional - If True, the number of extra points is printed to standard output. - Default is False. - - Returns - ------- - histogram: ndarray - Number of points (or sum of weights) in each bin. - low_range: float - Lowest value of histogram, the lower limit of the first bin. - binsize: float - The size of the bins (all bins have the same size). - extrapoints: int - The number of points outside the range of the histogram. - - See Also - -------- - numpy.histogram - - """ - a = np.ravel(a) # flatten any >1D arrays - if defaultlimits is None: - # no range given, so use values in a - data_min = a.min() - data_max = a.max() - # Have bins extend past min and max values slightly - s = (data_max - data_min) / (2. * (numbins - 1.)) - defaultlimits = (data_min - s, data_max + s) - # use numpy's histogram method to compute bins - hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits, - weights=weights) - # hist are not always floats, convert to keep with old output - hist = np.array(hist, dtype=float) - # fixed width for bins is assumed, as numpy's histogram gives - # fixed width bins for int values for 'bins' - binsize = bin_edges[1] - bin_edges[0] - # calculate number of extra points - extrapoints = len([v for v in a - if defaultlimits[0] > v or v > defaultlimits[1]]) - if extrapoints > 0 and printextras: - warnings.warn("Points outside given histogram range = %s" \ - %extrapoints) - return (hist, defaultlimits[0], binsize, extrapoints) - - -def cumfreq(a, numbins=10, defaultreallimits=None, weights=None): - """ - Returns a cumulative frequency histogram, using the histogram function. - - Parameters - ---------- - a : array_like - Input array. - numbins: int, optional - The number of bins to use for the histogram. Default is 10. - defaultlimits: tuple (lower, upper), optional - The lower and upper values for the range of the histogram. - If no value is given, a range slightly larger then the range of the - values in a is used. Specifically ``(a.min() - s, a.max() + s)``, - where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. - weights: array_like, optional - The weights for each value in `a`. Default is None, which gives each - value a weight of 1.0 - - Returns - ------- - cumfreq : ndarray - Binned values of cumulative frequency. - lowerreallimit : float - Lower real limit - binsize : float - Width of each bin. - extrapoints : int - Extra points. - - Examples - -------- - >>> x = [1, 4, 2, 1, 3, 1] - >>> cumfreqs, lowlim, binsize, extrapoints = sp.stats.cumfreq(x, numbins=4) - >>> cumfreqs - array([ 3., 4., 5., 6.]) - >>> cumfreqs, lowlim, binsize, extrapoints = \ - ... sp.stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) - >>> cumfreqs - array([ 1., 2., 3., 3.]) - >>> extrapoints - 3 - - """ - h,l,b,e = histogram(a, numbins, defaultreallimits, weights=weights) - cumhist = np.cumsum(h*1, axis=0) - return cumhist,l,b,e - - -def relfreq(a, numbins=10, defaultreallimits=None, weights=None): - """ - Returns a relative frequency histogram, using the histogram function. - - Parameters - ---------- - a : array_like - Input array. - numbins: int, optional - The number of bins to use for the histogram. Default is 10. - defaultreallimits: tuple (lower, upper), optional - The lower and upper values for the range of the histogram. - If no value is given, a range slightly larger then the range of the - values in a is used. Specifically ``(a.min() - s, a.max() + s)``, - where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. - weights: array_like, optional - The weights for each value in `a`. Default is None, which gives each - value a weight of 1.0 - - Returns - ------- - relfreq : ndarray - Binned values of relative frequency. - lowerreallimit : float - Lower real limit - binsize : float - Width of each bin. - extrapoints : int - Extra points. - - Examples - -------- - >>> a = np.array([1, 4, 2, 1, 3, 1]) - >>> relfreqs, lowlim, binsize, extrapoints = sp.stats.relfreq(a, numbins=4) - >>> relfreqs - array([ 0.5 , 0.16666667, 0.16666667, 0.16666667]) - >>> np.sum(relfreqs) # relative frequencies should add up to 1 - 0.99999999999999989 - - """ - h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights) - h = np.array(h / float(np.array(a).shape[0])) - return h, l, b, e - - -##################################### -###### VARIABILITY FUNCTIONS ##### -##################################### - -def obrientransform(*args): - """ - Computes a transform on input data (any number of columns). - - Used to test for homogeneity of variance prior to running one-way stats. - Each array in *args is one level of a factor. If an F_oneway() run on the - transformed data and found significant, variances are unequal. From - Maxwell and Delaney, p.112. - - Returns - ------- - Transformed data for use in an ANOVA - - """ - TINY = 1e-10 - k = len(args) - n = zeros(k) - v = zeros(k) - m = zeros(k) - nargs = [] - for i in range(k): - nargs.append(args[i].astype(float)) - n[i] = float(len(nargs[i])) - v[i] = np.var(nargs[i], ddof=1) - m[i] = np.mean(nargs[i]) - for j in range(k): - for i in range(int(n[j])): - t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2 - t2 = 0.5*v[j]*(n[j]-1.0) - t3 = (n[j]-1.0)*(n[j]-2.0) - nargs[j][i] = (t1-t2) / float(t3) - check = 1 - for j in range(k): - if v[j] - np.mean(nargs[j]) > TINY: - check = 0 - if check != 1: - raise ValueError('Lack of convergence in obrientransform.') - else: - return array(nargs) - - -def signaltonoise(a, axis=0, ddof=0): - """ - The signal-to-noise ratio of the input data. - - Returns the signal-to-noise ratio of `a`, here defined as the mean - divided by the standard deviation. - - Parameters - ---------- - a: array_like - An array_like object containing the sample data. - axis: int or None, optional - If axis is equal to None, the array is first ravel'd. If axis is an - integer, this is the axis over which to operate. Default is 0. - ddof : int, optional - Degrees of freedom correction for standard deviation. Default is 0. - - Returns - ------- - s2n : ndarray - The mean to standard deviation ratio(s) along `axis`, or 0 where the - standard deviation is 0. - - """ - a = np.asanyarray(a) - m = a.mean(axis) - sd = a.std(axis=axis, ddof=ddof) - return np.where(sd == 0, 0, m/sd) - - -def sem(a, axis=0, ddof=1): - """ - Calculates the standard error of the mean (or standard error of - measurement) of the values in the input array. - - Parameters - ---------- - a : array_like - An array containing the values for which the standard error is - returned. - axis : int or None, optional. - If axis is None, ravel `a` first. If axis is an integer, this will be - the axis over which to operate. Defaults to 0. - ddof : int, optional - Delta degrees-of-freedom. How many degrees of freedom to adjust - for bias in limited samples relative to the population estimate - of variance. Defaults to 1. - - Returns - ------- - s : ndarray or float - The standard error of the mean in the sample(s), along the input axis. - - Notes - ----- - The default value for `ddof` is different to the default (0) used by other - ddof containing routines, such as np.std nd stats.nanstd. - - Examples - -------- - Find standard error along the first axis: - - >>> from scipy import stats - >>> a = np.arange(20).reshape(5,4) - >>> stats.sem(a) - array([ 2.8284, 2.8284, 2.8284, 2.8284]) - - Find standard error across the whole array, using n degrees of freedom: - - >>> stats.sem(a, axis=None, ddof=0) - 1.2893796958227628 - - """ - a, axis = _chk_asarray(a, axis) - n = a.shape[axis] - s = np.std(a,axis=axis, ddof=ddof) / np.sqrt(n) #JP check normalization - return s - - -def zscore(a, axis=0, ddof=0): - """ - Calculates the z score of each value in the sample, relative to the sample - mean and standard deviation. - - Parameters - ---------- - a : array_like - An array like object containing the sample data. - axis : int or None, optional - If `axis` is equal to None, the array is first raveled. If `axis` is - an integer, this is the axis over which to operate. Default is 0. - ddof : int, optional - Degrees of freedom correction in the calculation of the - standard deviation. Default is 0. - - Returns - ------- - zscore : array_like - The z-scores, standardized by mean and standard deviation of input - array `a`. - - Notes - ----- - This function preserves ndarray subclasses, and works also with - matrices and masked arrays (it uses `asanyarray` instead of `asarray` - for parameters). - - Examples - -------- - >>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954, - 0.6307, 0.6599, 0.1065, 0.0508]) - >>> from scipy import stats - >>> stats.zscore(a) - array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786, - 0.6748, -1.1488, -1.3324]) - - Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``) - to calculate the standard deviation: - - >>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608], - [ 0.7149, 0.0775, 0.6072, 0.9656], - [ 0.6341, 0.1403, 0.9759, 0.4064], - [ 0.5918, 0.6948, 0.904 , 0.3721], - [ 0.0921, 0.2481, 0.1188, 0.1366]]) - >>> stats.zscore(b, axis=1, ddof=1) - array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358], - [ 0.33048416, -1.37380874, 0.04251374, 1.00081084], - [ 0.26796377, -1.12598418, 1.23283094, -0.37481053], - [-0.22095197, 0.24468594, 1.19042819, -1.21416216], - [-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]]) - """ - a = np.asanyarray(a) - mns = a.mean(axis=axis) - sstd = a.std(axis=axis, ddof=ddof) - if axis and mns.ndim < a.ndim: - return ((a - np.expand_dims(mns, axis=axis)) / - np.expand_dims(sstd,axis=axis)) - else: - return (a - mns) / sstd - - -def zmap(scores, compare, axis=0, ddof=0): - """ - Calculates the relative z-scores. - - Returns an array of z-scores, i.e., scores that are standardized to zero - mean and unit variance, where mean and variance are calculated from the - comparison array. - - Parameters - ---------- - scores : array_like - The input for which z-scores are calculated. - compare : array_like - The input from which the mean and standard deviation of the - normalization are taken; assumed to have the same dimension as - `scores`. - axis : int or None, optional - Axis over which mean and variance of `compare` are calculated. - Default is 0. - ddof : int, optional - Degrees of freedom correction in the calculation of the - standard deviation. Default is 0. - - Returns - ------- - zscore : array_like - Z-scores, in the same shape as `scores`. - - Notes - ----- - This function preserves ndarray subclasses, and works also with - matrices and masked arrays (it uses `asanyarray` instead of `asarray` - for parameters). - - Examples - -------- - >>> a = [0.5, 2.0, 2.5, 3] - >>> b = [0, 1, 2, 3, 4] - >>> zmap(a, b) - array([-1.06066017, 0. , 0.35355339, 0.70710678]) - """ - scores, compare = map(np.asanyarray, [scores, compare]) - mns = compare.mean(axis=axis) - sstd = compare.std(axis=axis, ddof=ddof) - if axis and mns.ndim < compare.ndim: - return ((scores - np.expand_dims(mns, axis=axis)) / - np.expand_dims(sstd,axis=axis)) - else: - return (scores - mns) / sstd - - -##################################### -####### TRIMMING FUNCTIONS ####### -##################################### - -def threshold(a, threshmin=None, threshmax=None, newval=0): - """ - Clip array to a given value. - - Similar to numpy.clip(), except that values less than `threshmin` or - greater than `threshmax` are replaced by `newval`, instead of by - `threshmin` and `threshmax` respectively. - - Parameters - ---------- - a : array_like - Data to threshold. - threshmin : float, int or None, optional - Minimum threshold, defaults to None. - threshmax : float, int or None, optional - Maximum threshold, defaults to None. - newval : float or int, optional - Value to put in place of values in `a` outside of bounds. - Defaults to 0. - - Returns - ------- - out : ndarray - The clipped input array, with values less than `threshmin` or - greater than `threshmax` replaced with `newval`. - - Examples - -------- - >>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8]) - >>> from scipy import stats - >>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1) - array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8]) - - """ - a = asarray(a).copy() - mask = zeros(a.shape, dtype=bool) - if threshmin is not None: - mask |= (a < threshmin) - if threshmax is not None: - mask |= (a > threshmax) - a[mask] = newval - return a - - - -def sigmaclip(a, low=4., high=4.): - """ - Iterative sigma-clipping of array elements. - - The output array contains only those elements of the input array `c` - that satisfy the conditions :: - - mean(c) - std(c)*low < c < mean(c) + std(c)*high - - Starting from the full sample, all elements outside the critical range are - removed. The iteration continues with a new critical range until no - elements are outside the range. - - Parameters - ---------- - a : array_like - Data array, will be raveled if not 1-D. - low : float, optional - Lower bound factor of sigma clipping. Default is 4. - high : float, optional - Upper bound factor of sigma clipping. Default is 4. - - Returns - ------- - c : ndarray - Input array with clipped elements removed. - critlower : float - Lower threshold value use for clipping. - critlupper : float - Upper threshold value use for clipping. - - Examples - -------- - >>> a = np.concatenate((np.linspace(9.5,10.5,31), np.linspace(0,20,5))) - >>> fact = 1.5 - >>> c, low, upp = sigmaclip(a, fact, fact) - >>> c - array([ 9.96666667, 10. , 10.03333333, 10. ]) - >>> c.var(), c.std() - (0.00055555555555555165, 0.023570226039551501) - >>> low, c.mean() - fact*c.std(), c.min() - (9.9646446609406727, 9.9646446609406727, 9.9666666666666668) - >>> upp, c.mean() + fact*c.std(), c.max() - (10.035355339059327, 10.035355339059327, 10.033333333333333) - - >>> a = np.concatenate((np.linspace(9.5,10.5,11), - np.linspace(-100,-50,3))) - >>> c, low, upp = sigmaclip(a, 1.8, 1.8) - >>> (c == np.linspace(9.5,10.5,11)).all() - True - - """ - c = np.asarray(a).ravel() - delta = 1 - while delta: - c_std = c.std() - c_mean = c.mean() - size = c.size - critlower = c_mean - c_std*low - critupper = c_mean + c_std*high - c = c[(c>critlower) & (c>> from scipy import stats - >>> a = np.arange(20) - >>> b = stats.trimboth(a, 0.1) - >>> b.shape - (16,) - - """ - a = asarray(a) - lowercut = int(proportiontocut*len(a)) - uppercut = len(a) - lowercut - if (lowercut >= uppercut): - raise ValueError("Proportion too big.") - return a[lowercut:uppercut] - - -def trim1(a, proportiontocut, tail='right'): - """ - Slices off a proportion of items from ONE end of the passed array - distribution. - - If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost' - 10% of scores. Slices off LESS if proportion results in a non-integer - slice index (i.e., conservatively slices off `proportiontocut` ). - - Parameters - ---------- - a : array_like - Input array - proportiontocut : float - Fraction to cut off of 'left' or 'right' of distribution - tail : string, {'left', 'right'}, optional - Defaults to 'right'. - - Returns - ------- - trim1 : ndarray - Trimmed version of array `a` - - """ - a = asarray(a) - if tail.lower() == 'right': - lowercut = 0 - uppercut = len(a) - int(proportiontocut*len(a)) - elif tail.lower() == 'left': - lowercut = int(proportiontocut*len(a)) - uppercut = len(a) - return a[lowercut:uppercut] - -def trim_mean(a, proportiontocut): - """ - Return mean of array after trimming distribution from both lower and upper - tails. - - If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of - scores. Slices off LESS if proportion results in a non-integer slice - index (i.e., conservatively slices off `proportiontocut` ). - - Parameters - ---------- - a : array_like - Input array - proportiontocut : float - Fraction to cut off of both tails of the distribution - - Returns - ------- - trim_mean : ndarray - Mean of trimmed array. - - """ - newa = trimboth(np.sort(a),proportiontocut) - return np.mean(newa,axis=0) - - -def f_oneway(*args): - """ - Performs a 1-way ANOVA. - - The one-way ANOVA tests the null hypothesis that two or more groups have - the same population mean. The test is applied to samples from two or - more groups, possibly with differing sizes. - - Parameters - ---------- - sample1, sample2, ... : array_like - The sample measurements for each group. - - Returns - ------- - F-value : float - The computed F-value of the test. - p-value : float - The associated p-value from the F-distribution. - - Notes - ----- - The ANOVA test has important assumptions that must be satisfied in order - for the associated p-value to be valid. - - 1. The samples are independent. - 2. Each sample is from a normally distributed population. - 3. The population standard deviations of the groups are all equal. This - property is known as homoscedasticity. - - If these assumptions are not true for a given set of data, it may still be - possible to use the Kruskal-Wallis H-test (`stats.kruskal`_) although with - some loss of power. - - The algorithm is from Heiman[2], pp.394-7. - - - References - ---------- - .. [1] Lowry, Richard. "Concepts and Applications of Inferential - Statistics". Chapter 14. http://faculty.vassar.edu/lowry/ch14pt1.html - - .. [2] Heiman, G.W. Research Methods in Statistics. 2002. - - """ - na = len(args) # ANOVA on 'na' groups, each in it's own array - tmp = map(np.array,args) - alldata = np.concatenate(args) - bign = len(alldata) - sstot = ss(alldata)-(square_of_sums(alldata)/float(bign)) - ssbn = 0 - for a in args: - ssbn = ssbn + square_of_sums(array(a))/float(len(a)) - ssbn = ssbn - (square_of_sums(alldata)/float(bign)) - sswn = sstot-ssbn - dfbn = na-1 - dfwn = bign - na - msb = ssbn/float(dfbn) - msw = sswn/float(dfwn) - f = msb/msw - prob = fprob(dfbn,dfwn,f) - return f, prob - - - -def pearsonr(x, y): - """Calculates a Pearson correlation coefficient and the p-value for testing - non-correlation. - - The Pearson correlation coefficient measures the linear relationship - between two datasets. Strictly speaking, Pearson's correlation requires - that each dataset be normally distributed. Like other correlation - coefficients, this one varies between -1 and +1 with 0 implying no - correlation. Correlations of -1 or +1 imply an exact linear - relationship. Positive correlations imply that as x increases, so does - y. Negative correlations imply that as x increases, y decreases. - - The p-value roughly indicates the probability of an uncorrelated system - producing datasets that have a Pearson correlation at least as extreme - as the one computed from these datasets. The p-values are not entirely - reliable but are probably reasonable for datasets larger than 500 or so. - - Parameters - ---------- - x : 1D array - y : 1D array the same length as x - - Returns - ------- - (Pearson's correlation coefficient, - 2-tailed p-value) - - References - ---------- - http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation - """ - # x and y should have same length. - x = np.asarray(x) - y = np.asarray(y) - n = len(x) - mx = x.mean() - my = y.mean() - xm, ym = x-mx, y-my - r_num = n*(np.add.reduce(xm*ym)) - r_den = n*np.sqrt(ss(xm)*ss(ym)) - r = (r_num / r_den) - - # Presumably, if abs(r) > 1, then it is only some small artifact of floating - # point arithmetic. - r = max(min(r, 1.0), -1.0) - df = n-2 - if abs(r) == 1.0: - prob = 0.0 - else: - t_squared = r*r * (df / ((1.0 - r) * (1.0 + r))) - prob = betai(0.5*df, 0.5, df / (df + t_squared)) - return r, prob - - -def fisher_exact(table, alternative='two-sided'): - """Performs a Fisher exact test on a 2x2 contingency table. - - Parameters - ---------- - table : array_like of ints - A 2x2 contingency table. Elements should be non-negative integers. - alternative : {'two-sided', 'less', 'greater'}, optional - Which alternative hypothesis to the null hypothesis the test uses. - Default is 'two-sided'. - - Returns - ------- - oddsratio : float - This is prior odds ratio and not a posterior estimate. - p_value : float - P-value, the probability of obtaining a distribution at least as - extreme as the one that was actually observed, assuming that the - null hypothesis is true. - - See Also - -------- - chi2_contingency : Chi-square test of independence of variables in a - contingency table. - - Notes - ----- - The calculated odds ratio is different from the one R uses. In R language, - this implementation returns the (more common) "unconditional Maximum - Likelihood Estimate", while R uses the "conditional Maximum Likelihood - Estimate". - - For tables with large numbers the (inexact) chi-square test implemented - in the function `chi2_contingency` can also be used. - - Examples - -------- - Say we spend a few days counting whales and sharks in the Atlantic and - Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the - Indian ocean 2 whales and 5 sharks. Then our contingency table is:: - - Atlantic Indian - whales 8 2 - sharks 1 5 - - We use this table to find the p-value: - - >>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]]) - >>> pvalue - 0.0349... - - The probability that we would observe this or an even more imbalanced ratio - by chance is about 3.5%. A commonly used significance level is 5%, if we - adopt that we can therefore conclude that our observed imbalance is - statistically significant; whales prefer the Atlantic while sharks prefer - the Indian ocean. - - """ - hypergeom = distributions.hypergeom - c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm - if not c.shape == (2, 2): - raise ValueError("The input `table` must be of shape (2, 2).") - - if np.any(c < 0): - raise ValueError("All values in `table` must be nonnegative.") - - if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): - # If both values in a row or column are zero, the p-value is 1 and - # the odds ratio is NaN. - return np.nan, 1.0 - - if c[1,0] > 0 and c[0,1] > 0: - oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1]) - else: - oddsratio = np.inf - - n1 = c[0,0] + c[0,1] - n2 = c[1,0] + c[1,1] - n = c[0,0] + c[1,0] - - def binary_search(n, n1, n2, side): - """Binary search for where to begin lower/upper halves in two-sided - test. - """ - if side == "upper": - minval = mode - maxval = n - else: - minval = 0 - maxval = mode - guess = -1 - while maxval - minval > 1: - if maxval == minval + 1 and guess == minval: - guess = maxval - else: - guess = (maxval + minval) // 2 - pguess = hypergeom.pmf(guess, n1 + n2, n1, n) - if side == "upper": - ng = guess - 1 - else: - ng = guess + 1 - if pguess <= pexact and hypergeom.pmf(ng, n1 + n2, n1, n) > pexact: - break - elif pguess < pexact: - maxval = guess - else: - minval = guess - if guess == -1: - guess = minval - if side == "upper": - while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon: - guess -= 1 - while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon: - guess += 1 - else: - while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon: - guess += 1 - while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon: - guess -= 1 - return guess - - if alternative == 'less': - pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n) - elif alternative == 'greater': - # Same formula as the 'less' case, but with the second column. - pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1]) - elif alternative == 'two-sided': - mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2)) - pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n) - pmode = hypergeom.pmf(mode, n1 + n2, n1, n) - - epsilon = 1 - 1e-4 - if float(np.abs(pexact - pmode)) / np.abs(np.max(pexact, pmode)) <= 1 - epsilon: - return oddsratio, 1. - - elif c[0,0] < mode: - plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n) - if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon: - return oddsratio, plower - - guess = binary_search(n, n1, n2, "upper") - pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n) - else: - pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n) - if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon: - return oddsratio, pupper - - guess = binary_search(n, n1, n2, "lower") - pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n) - else: - msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}" - raise ValueError(msg) - - if pvalue > 1.0: - pvalue = 1.0 - return oddsratio, pvalue - - -def spearmanr(a, b=None, axis=0): - """ - Calculates a Spearman rank-order correlation coefficient and the p-value - to test for non-correlation. - - The Spearman correlation is a nonparametric measure of the monotonicity - of the relationship between two datasets. Unlike the Pearson correlation, - the Spearman correlation does not assume that both datasets are normally - distributed. Like other correlation coefficients, this one varies - between -1 and +1 with 0 implying no correlation. Correlations of -1 or - +1 imply an exact monotonic relationship. Positive correlations imply that - as x increases, so does y. Negative correlations imply that as x - increases, y decreases. - - The p-value roughly indicates the probability of an uncorrelated system - producing datasets that have a Spearman correlation at least as extreme - as the one computed from these datasets. The p-values are not entirely - reliable but are probably reasonable for datasets larger than 500 or so. - - Parameters - ---------- - a, b : 1D or 2D array_like, b is optional - One or two 1-D or 2-D arrays containing multiple variables and - observations. Each column of `a` and `b` represents a variable, and - each row entry a single observation of those variables. See also - `axis`. Both arrays need to have the same length in the `axis` - dimension. - axis : int or None, optional - If axis=0 (default), then each column represents a variable, with - observations in the rows. If axis=0, the relationship is transposed: - each row represents a variable, while the columns contain observations. - If axis=None, then both arrays will be raveled. - - Returns - ------- - rho: float or ndarray (2-D square) - Spearman correlation matrix or correlation coefficient (if only 2 - variables are given as parameters. Correlation matrix is square with - length equal to total number of variables (columns or rows) in a and b - combined. - p-value : float - The two-sided p-value for a hypothesis test whose null hypothesis is - that two sets of data are uncorrelated, has same dimension as rho. - - Notes - ----- - Changes in scipy 0.8.0: rewrite to add tie-handling, and axis. - - References - ---------- - [CRCProbStat2000]_ Section 14.7 - - .. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard - Probability and Statistics Tables and Formulae. Chapman & Hall: New - York. 2000. - - Examples - -------- - >>> spearmanr([1,2,3,4,5],[5,6,7,8,7]) - (0.82078268166812329, 0.088587005313543798) - >>> np.random.seed(1234321) - >>> x2n=np.random.randn(100,2) - >>> y2n=np.random.randn(100,2) - >>> spearmanr(x2n) - (0.059969996999699973, 0.55338590803773591) - >>> spearmanr(x2n[:,0], x2n[:,1]) - (0.059969996999699973, 0.55338590803773591) - >>> rho, pval = spearmanr(x2n,y2n) - >>> rho - array([[ 1. , 0.05997 , 0.18569457, 0.06258626], - [ 0.05997 , 1. , 0.110003 , 0.02534653], - [ 0.18569457, 0.110003 , 1. , 0.03488749], - [ 0.06258626, 0.02534653, 0.03488749, 1. ]]) - >>> pval - array([[ 0. , 0.55338591, 0.06435364, 0.53617935], - [ 0.55338591, 0. , 0.27592895, 0.80234077], - [ 0.06435364, 0.27592895, 0. , 0.73039992], - [ 0.53617935, 0.80234077, 0.73039992, 0. ]]) - >>> rho, pval = spearmanr(x2n.T, y2n.T, axis=1) - >>> rho - array([[ 1. , 0.05997 , 0.18569457, 0.06258626], - [ 0.05997 , 1. , 0.110003 , 0.02534653], - [ 0.18569457, 0.110003 , 1. , 0.03488749], - [ 0.06258626, 0.02534653, 0.03488749, 1. ]]) - >>> spearmanr(x2n, y2n, axis=None) - (0.10816770419260482, 0.1273562188027364) - >>> spearmanr(x2n.ravel(), y2n.ravel()) - (0.10816770419260482, 0.1273562188027364) - - >>> xint = np.random.randint(10,size=(100,2)) - >>> spearmanr(xint) - (0.052760927029710199, 0.60213045837062351) - - """ - a, axisout = _chk_asarray(a, axis) - ar = np.apply_along_axis(rankdata,axisout,a) - - br = None - if not b is None: - b, axisout = _chk_asarray(b, axis) - br = np.apply_along_axis(rankdata,axisout,b) - n = a.shape[axisout] - rs = np.corrcoef(ar,br,rowvar=axisout) - - olderr = np.seterr(divide='ignore') # rs can have elements equal to 1 - try: - t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs))) - finally: - np.seterr(**olderr) - prob = distributions.t.sf(np.abs(t),n-2)*2 - - if rs.shape == (2,2): - return rs[1,0], prob[1,0] - else: - return rs, prob - - -def pointbiserialr(x, y): - # comment: I am changing the semantics somewhat. The original function is - # fairly general and accepts an x sequence that has any type of thing in it as - # along as there are only two unique items. I am going to restrict this to - # a boolean array for my sanity. - """Calculates a point biserial correlation coefficient and the associated - p-value. - - The point biserial correlation is used to measure the relationship - between a binary variable, x, and a continuous variable, y. Like other - correlation coefficients, this one varies between -1 and +1 with 0 - implying no correlation. Correlations of -1 or +1 imply a determinative - relationship. - - This function uses a shortcut formula but produces the same result as - `pearsonr`. - - Parameters - ---------- - x : array_like of bools - Input array. - y : array_like - Input array. - - Returns - ------- - r : float - R value - p-value : float - 2-tailed p-value - - References - ---------- - http://www.childrens-mercy.org/stats/definitions/biserial.htm - - Examples - -------- - >>> from scipy import stats - >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) - >>> b = np.arange(7) - >>> stats.pointbiserialr(a, b) - (0.8660254037844386, 0.011724811003954652) - >>> stats.pearsonr(a, b) - (0.86602540378443871, 0.011724811003954626) - >>> np.corrcoef(a, b) - array([[ 1. , 0.8660254], - [ 0.8660254, 1. ]]) - """ - - ## Test data: http://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output - # x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1] - # y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,2.8,2.8,2.5, - # 2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,0.8,0.7,0.6,0.5,0.2,0.2, - # 0.1] - # rpb = 0.36149 - - x = np.asarray(x, dtype=bool) - y = np.asarray(y, dtype=float) - n = len(x) - - # phat is the fraction of x values that are True - phat = x.sum() / float(len(x)) - y0 = y[~x] # y-values where x is False - y1 = y[x] # y-values where x is True - y0m = y0.mean() - y1m = y1.mean() - - # phat - phat**2 is more stable than phat*(1-phat) - rpb = (y1m - y0m) * np.sqrt(phat - phat**2) / y.std() - - df = n-2 - # fixme: see comment about TINY in pearsonr() - TINY = 1e-20 - t = rpb*np.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY))) - prob = betai(0.5*df, 0.5, df/(df+t*t)) - return rpb, prob - - -def kendalltau(x, y, initial_lexsort=True): - """ - Calculates Kendall's tau, a correlation measure for ordinal data. - - Kendall's tau is a measure of the correspondence between two rankings. - Values close to 1 indicate strong agreement, values close to -1 indicate - strong disagreement. This is the tau-b version of Kendall's tau which - accounts for ties. - - Parameters - ---------- - x, y : array_like - Arrays of rankings, of the same shape. If arrays are not 1-D, they will - be flattened to 1-D. - initial_lexsort : bool, optional - Whether to use lexsort or quicksort as the sorting method for the - initial sort of the inputs. Default is lexsort (True), for which - `kendalltau` is of complexity O(n log(n)). If False, the complexity is - O(n^2), but with a smaller pre-factor (so quicksort may be faster for - small arrays). - - Returns - ------- - Kendall's tau : float - The tau statistic. - p-value : float - The two-sided p-value for a hypothesis test whose null hypothesis is - an absence of association, tau = 0. - - Notes - ----- - The definition of Kendall's tau that is used is:: - - tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U)) - - where P is the number of concordant pairs, Q the number of discordant - pairs, T the number of ties only in `x`, and U the number of ties only in - `y`. If a tie occurs for the same pair in both `x` and `y`, it is not - added to either T or U. - - References - ---------- - W.R. Knight, "A Computer Method for Calculating Kendall's Tau with - Ungrouped Data", Journal of the American Statistical Association, Vol. 61, - No. 314, Part 1, pp. 436-439, 1966. - - Examples - -------- - >>> x1 = [12, 2, 1, 12, 2] - >>> x2 = [1, 4, 7, 1, 0] - >>> tau, p_value = sp.stats.kendalltau(x1, x2) - >>> tau - -0.47140452079103173 - >>> p_value - 0.24821309157521476 - - """ - - x = np.asarray(x).ravel() - y = np.asarray(y).ravel() - n = np.int64(len(x)) - temp = range(n) # support structure used by mergesort - # this closure recursively sorts sections of perm[] by comparing - # elements of y[perm[]] using temp[] as support - # returns the number of swaps required by an equivalent bubble sort - def mergesort(offs, length): - exchcnt = 0 - if length == 1: - return 0 - if length == 2: - if y[perm[offs]] <= y[perm[offs+1]]: - return 0 - t = perm[offs] - perm[offs] = perm[offs+1] - perm[offs+1] = t - return 1 - length0 = length // 2 - length1 = length - length0 - middle = offs + length0 - exchcnt += mergesort(offs, length0) - exchcnt += mergesort(middle, length1) - if y[perm[middle - 1]] < y[perm[middle]]: - return exchcnt - # merging - i = j = k = 0 - while j < length0 or k < length1: - if k >= length1 or (j < length0 and y[perm[offs + j]] <= - y[perm[middle + k]]): - temp[i] = perm[offs + j] - d = i - j - j += 1 - else: - temp[i] = perm[middle + k] - d = (offs + i) - (middle + k) - k += 1 - if d > 0: - exchcnt += d; - i += 1 - perm[offs:offs+length] = temp[0:length] - return exchcnt - - # initial sort on values of x and, if tied, on values of y - if initial_lexsort: - # sort implemented as mergesort, worst case: O(n log(n)) - perm = np.lexsort((y, x)) - else: - # sort implemented as quicksort, 30% faster but with worst case: O(n^2) - perm = range(n) - perm.sort(key=lambda a: (x[a], y[a])) - - # compute joint ties - first = 0 - t = 0 - for i in xrange(1, n): - if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]: - t += ((i - first) * (i - first - 1)) // 2 - first = i - t += ((n - first) * (n - first - 1)) // 2 - - # compute ties in x - first = 0 - u = 0 - for i in xrange(1,n): - if x[perm[first]] != x[perm[i]]: - u += ((i - first) * (i - first - 1)) // 2 - first = i - u += ((n - first) * (n - first - 1)) // 2 - - # count exchanges - exchanges = mergesort(0, n) - # compute ties in y after mergesort with counting - first = 0 - v = 0 - for i in xrange(1,n): - if y[perm[first]] != y[perm[i]]: - v += ((i - first) * (i - first - 1)) // 2 - first = i - v += ((n - first) * (n - first - 1)) // 2 - - tot = (n * (n - 1)) // 2 - if tot == u and tot == v: - return 1 # Special case for all ties in both ranks - - # Prevent overflow; equal to np.sqrt((tot - u) * (tot - v)) - denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v))) - tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom - - # what follows reproduces the ending of Gary Strangman's original - # stats.kendalltau() in SciPy - svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1)) - z = tau / np.sqrt(svar) - prob = special.erfc(np.abs(z) / 1.4142136) - - return tau, prob - - -def linregress(x, y=None): - """ - Calculate a regression line - - This computes a least-squares regression for two sets of measurements. - - Parameters - ---------- - x, y : array_like - two sets of measurements. Both arrays should have the same length. - If only x is given (and y=None), then it must be a two-dimensional - array where one dimension has length 2. The two sets of measurements - are then found by splitting the array along the length-2 dimension. - - Returns - ------- - slope : float - slope of the regression line - intercept : float - intercept of the regression line - r-value : float - correlation coefficient - p-value : float - two-sided p-value for a hypothesis test whose null hypothesis is - that the slope is zero. - stderr : float - Standard error of the estimate - - - Examples - -------- - >>> from scipy import stats - >>> import numpy as np - >>> x = np.random.random(10) - >>> y = np.random.random(10) - >>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y) - - # To get coefficient of determination (r_squared) - - >>> print "r-squared:", r_value**2 - r-squared: 0.15286643777 - - """ - TINY = 1.0e-20 - if y is None: # x is a (2, N) or (N, 2) shaped array_like - x = asarray(x) - if x.shape[0] == 2: - x, y = x - elif x.shape[1] == 2: - x, y = x.T - else: - msg = "If only `x` is given as input, it has to be of shape (2, N) \ - or (N, 2), provided shape was %s" % str(x.shape) - raise ValueError(msg) - else: - x = asarray(x) - y = asarray(y) - n = len(x) - xmean = np.mean(x,None) - ymean = np.mean(y,None) - - # average sum of squares: - ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat - r_num = ssxym - r_den = np.sqrt(ssxm*ssym) - if r_den == 0.0: - r = 0.0 - else: - r = r_num / r_den - if (r > 1.0): r = 1.0 # from numerical error - #z = 0.5*log((1.0+r+TINY)/(1.0-r+TINY)) - df = n-2 - t = r*np.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY))) - prob = distributions.t.sf(np.abs(t),df)*2 - slope = r_num / ssxm - intercept = ymean - slope*xmean - sterrest = np.sqrt((1-r*r)*ssym / ssxm / df) - return slope, intercept, r, prob, sterrest - - -##################################### -##### INFERENTIAL STATISTICS ##### -##################################### - -def ttest_1samp(a, popmean, axis=0): - """Calculates the T-test for the mean of ONE group of scores `a`. - - This is a two-sided test for the null hypothesis that the expected value - (mean) of a sample of independent observations is equal to the given - population mean, `popmean`. - - Parameters - ---------- - a : array_like - sample observation - popmean : float or array_like - expected value in null hypothesis, if array_like than it must have the - same shape as `a` excluding the axis dimension - axis : int, optional, (default axis=0) - Axis can equal None (ravel array first), or an integer (the axis - over which to operate on a). - - Returns - ------- - t : float or array - t-statistic - prob : float or array - two-tailed p-value - - Examples - -------- - - >>> from scipy import stats - >>> import numpy as np - - >>> #fix seed to get the same result - >>> np.random.seed(7654567) - >>> rvs = stats.norm.rvs(loc=5,scale=10,size=(50,2)) - - test if mean of random sample is equal to true mean, and different mean. - We reject the null hypothesis in the second case and don't reject it in - the first case - - >>> stats.ttest_1samp(rvs,5.0) - (array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674])) - >>> stats.ttest_1samp(rvs,0.0) - (array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999])) - - examples using axis and non-scalar dimension for population mean - - >>> stats.ttest_1samp(rvs,[5.0,0.0]) - (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04])) - >>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1) - (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04])) - >>> stats.ttest_1samp(rvs,[[5.0],[0.0]]) - (array([[-0.68014479, -0.04323899], - [ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01], - [ 7.89094663e-03, 1.49986458e-04]])) - -""" - - - a, axis = _chk_asarray(a, axis) - n = a.shape[axis] - df=n-1 - - d = np.mean(a,axis) - popmean - v = np.var(a, axis, ddof=1) - - t = d / np.sqrt(v/float(n)) - t = np.where((d==0)*(v==0), 1.0, t) #define t=0/0 = 1, identical mean, var - prob = distributions.t.sf(np.abs(t),df)*2 #use np.abs to get upper tail - #distributions.t.sf currently does not propagate nans - #this can be dropped, if distributions.t.sf propagates nans - #if this is removed, then prob = prob[()] needs to be removed - prob = np.where(np.isnan(t), np.nan, prob) - - if t.ndim == 0: - t = t[()] - prob = prob[()] - return t,prob - - -def ttest_ind(a, b, axis=0): - """Calculates the T-test for the means of TWO INDEPENDENT samples of scores. - - This is a two-sided test for the null hypothesis that 2 independent samples - have identical average (expected) values. - - Parameters - ---------- - a, b : sequence of ndarrays - The arrays must have the same shape, except in the dimension - corresponding to `axis` (the first, by default). - axis : int, optional - Axis can equal None (ravel array first), or an integer (the axis - over which to operate on a and b). - - Returns - ------- - t : float or array - t-statistic - prob : float or array - two-tailed p-value - - - Notes - ----- - - We can use this test, if we observe two independent samples from - the same or different population, e.g. exam scores of boys and - girls or of two ethnic groups. The test measures whether the - average (expected) value differs significantly across samples. If - we observe a large p-value, for example larger than 0.05 or 0.1, - then we cannot reject the null hypothesis of identical average scores. - If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%, - then we reject the null hypothesis of equal averages. - - References - ---------- - - http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test - - - Examples - -------- - - >>> from scipy import stats - >>> import numpy as np - - >>> #fix seed to get the same result - >>> np.random.seed(12345678) - - test with sample with identical means - - >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500) - >>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500) - >>> stats.ttest_ind(rvs1,rvs2) - (0.26833823296239279, 0.78849443369564765) - - - test with sample with different means - - >>> rvs3 = stats.norm.rvs(loc=8,scale=10,size=500) - >>> stats.ttest_ind(rvs1,rvs3) - (-5.0434013458585092, 5.4302979468623391e-007) - - """ - a, b, axis = _chk2_asarray(a, b, axis) - - v1 = np.var(a,axis,ddof = 1) - v2 = np.var(b,axis,ddof = 1) - n1 = a.shape[axis] - n2 = b.shape[axis] - df = n1+n2-2 - - d = np.mean(a,axis) - np.mean(b,axis) - svar = ((n1-1)*v1+(n2-1)*v2) / float(df) - - t = d/np.sqrt(svar*(1.0/n1 + 1.0/n2)) - t = np.where((d==0)*(svar==0), 1.0, t) #define t=0/0 = 0, identical means - prob = distributions.t.sf(np.abs(t),df)*2#use np.abs to get upper tail - - #distributions.t.sf currently does not propagate nans - #this can be dropped, if distributions.t.sf propagates nans - #if this is removed, then prob = prob[()] needs to be removed - prob = np.where(np.isnan(t), np.nan, prob) - - if t.ndim == 0: - t = t[()] - prob = prob[()] - - return t, prob - - -def ttest_rel(a,b,axis=0): - """ - Calculates the T-test on TWO RELATED samples of scores, a and b. - - This is a two-sided test for the null hypothesis that 2 related or - repeated samples have identical average (expected) values. - - Parameters - ---------- - a, b : sequence of ndarrays - The arrays must have the same shape. - axis : int, optional, (default axis=0) - Axis can equal None (ravel array first), or an integer (the axis - over which to operate on a and b). - - Returns - ------- - t : float or array - t-statistic - prob : float or array - two-tailed p-value - - Notes - ----- - Examples for the use are scores of the same set of student in - different exams, or repeated sampling from the same units. The - test measures whether the average score differs significantly - across samples (e.g. exams). If we observe a large p-value, for - example greater than 0.05 or 0.1 then we cannot reject the null - hypothesis of identical average scores. If the p-value is smaller - than the threshold, e.g. 1%, 5% or 10%, then we reject the null - hypothesis of equal averages. Small p-values are associated with - large t-statistics. - - References - ---------- - - http://en.wikipedia.org/wiki/T-test#Dependent_t-test - - Examples - -------- - - >>> from scipy import stats - >>> np.random.seed(12345678) # fix random seed to get same numbers - >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500) - >>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) + - ... stats.norm.rvs(scale=0.2,size=500)) - >>> stats.ttest_rel(rvs1,rvs2) - (0.24101764965300962, 0.80964043445811562) - >>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) + - ... stats.norm.rvs(scale=0.2,size=500)) - >>> stats.ttest_rel(rvs1,rvs3) - (-3.9995108708727933, 7.3082402191726459e-005) - - """ - a, b, axis = _chk2_asarray(a, b, axis) - if a.shape[axis] != b.shape[axis]: - raise ValueError('unequal length arrays') - n = a.shape[axis] - df = float(n-1) - - d = (a-b).astype('d') - v = np.var(d,axis,ddof=1) - dm = np.mean(d, axis) - - t = dm / np.sqrt(v/float(n)) - t = np.where((dm==0)*(v==0), 1.0, t) #define t=0/0 = 1, zero mean and var - prob = distributions.t.sf(np.abs(t),df)*2 #use np.abs to get upper tail - #distributions.t.sf currently does not propagate nans - #this can be dropped, if distributions.t.sf propagates nans - #if this is removed, then prob = prob[()] needs to be removed - prob = np.where(np.isnan(t), np.nan, prob) - -## if not np.isscalar(t): -## probs = np.reshape(probs, t.shape) # this should be redundant -## if not np.isscalar(prob) and len(prob) == 1: -## prob = prob[0] - if t.ndim == 0: - t = t[()] - prob = prob[()] - - return t, prob - - -#import scipy.stats -#import distributions -def kstest(rvs, cdf, args=(), N=20, alternative = 'two_sided', mode='approx',**kwds): - """ - Perform the Kolmogorov-Smirnov test for goodness of fit - - This performs a test of the distribution G(x) of an observed - random variable against a given distribution F(x). Under the null - hypothesis the two distributions are identical, G(x)=F(x). The - alternative hypothesis can be either 'two_sided' (default), 'less' - or 'greater'. The KS test is only valid for continuous distributions. - - Parameters - ---------- - rvs : string or array or callable - string: name of a distribution in scipy.stats - - array: 1-D observations of random variables - - callable: function to generate random variables, requires keyword - argument `size` - - cdf : string or callable - string: name of a distribution in scipy.stats, if rvs is a string then - cdf can evaluate to `False` or be the same as rvs - callable: function to evaluate cdf - - args : tuple, sequence - distribution parameters, used if rvs or cdf are strings - N : int - sample size if rvs is string or callable - alternative : 'two_sided' (default), 'less' or 'greater' - defines the alternative hypothesis (see explanation) - - mode : 'approx' (default) or 'asymp' - defines the distribution used for calculating p-value - - 'approx' : use approximation to exact distribution of test statistic - - 'asymp' : use asymptotic distribution of test statistic - - - Returns - ------- - D : float - KS test statistic, either D, D+ or D- - p-value : float - one-tailed or two-tailed p-value - - Notes - ----- - - In the one-sided test, the alternative is that the empirical - cumulative distribution function of the random variable is "less" - or "greater" than the cumulative distribution function F(x) of the - hypothesis, G(x)<=F(x), resp. G(x)>=F(x). - - Examples - -------- - - >>> from scipy import stats - >>> import numpy as np - >>> from scipy.stats import kstest - - >>> x = np.linspace(-15,15,9) - >>> kstest(x,'norm') - (0.44435602715924361, 0.038850142705171065) - - >>> np.random.seed(987654321) # set random seed to get the same result - >>> kstest('norm','',N=100) - (0.058352892479417884, 0.88531190944151261) - - is equivalent to this - - >>> np.random.seed(987654321) - >>> kstest(stats.norm.rvs(size=100),'norm') - (0.058352892479417884, 0.88531190944151261) - - Test against one-sided alternative hypothesis: - - >>> np.random.seed(987654321) - - Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x): - - >>> x = stats.norm.rvs(loc=0.2, size=100) - >>> kstest(x,'norm', alternative = 'less') - (0.12464329735846891, 0.040989164077641749) - - Reject equal distribution against alternative hypothesis: less - - >>> kstest(x,'norm', alternative = 'greater') - (0.0072115233216311081, 0.98531158590396395) - - Don't reject equal distribution against alternative hypothesis: greater - - >>> kstest(x,'norm', mode='asymp') - (0.12464329735846891, 0.08944488871182088) - - - Testing t distributed random variables against normal distribution: - - With 100 degrees of freedom the t distribution looks close to the normal - distribution, and the kstest does not reject the hypothesis that the sample - came from the normal distribution - - >>> np.random.seed(987654321) - >>> stats.kstest(stats.t.rvs(100,size=100),'norm') - (0.072018929165471257, 0.67630062862479168) - - With 3 degrees of freedom the t distribution looks sufficiently different - from the normal distribution, that we can reject the hypothesis that the - sample came from the normal distribution at a alpha=10% level - - >>> np.random.seed(987654321) - >>> stats.kstest(stats.t.rvs(3,size=100),'norm') - (0.131016895759829, 0.058826222555312224) - - """ - if isinstance(rvs, basestring): - #cdf = getattr(stats, rvs).cdf - if (not cdf) or (cdf == rvs): - cdf = getattr(distributions, rvs).cdf - rvs = getattr(distributions, rvs).rvs - else: - raise AttributeError('if rvs is string, cdf has to be the same distribution') - - - if isinstance(cdf, basestring): - cdf = getattr(distributions, cdf).cdf - if callable(rvs): - kwds = {'size':N} - vals = np.sort(rvs(*args,**kwds)) - else: - vals = np.sort(rvs) - N = len(vals) - cdfvals = cdf(vals, *args) - - if alternative in ['two_sided', 'greater']: - Dplus = (np.arange(1.0, N+1)/N - cdfvals).max() - if alternative == 'greater': - return Dplus, distributions.ksone.sf(Dplus,N) - - if alternative in ['two_sided', 'less']: - Dmin = (cdfvals - np.arange(0.0, N)/N).max() - if alternative == 'less': - return Dmin, distributions.ksone.sf(Dmin,N) - - if alternative == 'two_sided': - D = np.max([Dplus,Dmin]) - if mode == 'asymp': - return D, distributions.kstwobign.sf(D*np.sqrt(N)) - if mode == 'approx': - pval_two = distributions.kstwobign.sf(D*np.sqrt(N)) - if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 : - return D, distributions.kstwobign.sf(D*np.sqrt(N)) - else: - return D, distributions.ksone.sf(D,N)*2 - -def chisquare(f_obs, f_exp=None, ddof=0): - """ - Calculates a one-way chi square test. - - The chi square test tests the null hypothesis that the categorical data - has the given frequencies. - - Parameters - ---------- - f_obs : array - observed frequencies in each category - f_exp : array, optional - expected frequencies in each category. By default the categories are - assumed to be equally likely. - ddof : int, optional - adjustment to the degrees of freedom for the p-value - - Returns - ------- - chisquare statistic : float - The chisquare test statistic - p : float - The p-value of the test. - - Notes - ----- - This test is invalid when the observed or expected frequencies in each - category are too small. A typical rule is that all of the observed - and expected frequencies should be at least 5. - The default degrees of freedom, k-1, are for the case when no parameters - of the distribution are estimated. If p parameters are estimated by - efficient maximum likelihood then the correct degrees of freedom are - k-1-p. If the parameters are estimated in a different way, then then - the dof can be between k-1-p and k-1. However, it is also possible that - the asymptotic distributions is not a chisquare, in which case this - test is not appropriate. - - References - ---------- - - .. [1] Lowry, Richard. "Concepts and Applications of Inferential - Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html - - """ - - f_obs = asarray(f_obs) - k = len(f_obs) - if f_exp is None: - f_exp = array([np.sum(f_obs,axis=0)/float(k)] * len(f_obs),float) - f_exp = f_exp.astype(float) - chisq = np.add.reduce((f_obs-f_exp)**2 / f_exp) - return chisq, chisqprob(chisq, k-1-ddof) - - -def ks_2samp(data1, data2): - """ - Computes the Kolmogorov-Smirnof statistic on 2 samples. - - This is a two-sided test for the null hypothesis that 2 independent samples - are drawn from the same continuous distribution. - - Parameters - ---------- - a, b : sequence of 1-D ndarrays - two arrays of sample observations assumed to be drawn from a continuous - distribution, sample sizes can be different - - - Returns - ------- - D : float - KS statistic - p-value : float - two-tailed p-value - - - Notes - ----- - - This tests whether 2 samples are drawn from the same distribution. Note - that, like in the case of the one-sample K-S test, the distribution is - assumed to be continuous. - - This is the two-sided test, one-sided tests are not implemented. - The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution. - - If the K-S statistic is small or the p-value is high, then we cannot - reject the hypothesis that the distributions of the two samples - are the same. - - Examples - -------- - - >>> from scipy import stats - >>> import numpy as np - >>> from scipy.stats import ks_2samp - - >>> #fix random seed to get the same result - >>> np.random.seed(12345678); - - >>> n1 = 200 # size of first sample - >>> n2 = 300 # size of second sample - - different distribution - we can reject the null hypothesis since the pvalue is below 1% - - >>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1); - >>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5) - >>> ks_2samp(rvs1,rvs2) - (0.20833333333333337, 4.6674975515806989e-005) - - slightly different distribution - we cannot reject the null hypothesis at a 10% or lower alpha since - the pvalue at 0.144 is higher than 10% - - >>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0) - >>> ks_2samp(rvs1,rvs3) - (0.10333333333333333, 0.14498781825751686) - - identical distribution - we cannot reject the null hypothesis since the pvalue is high, 41% - - >>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0) - >>> ks_2samp(rvs1,rvs4) - (0.07999999999999996, 0.41126949729859719) - - """ - data1, data2 = map(asarray, (data1, data2)) - n1 = data1.shape[0] - n2 = data2.shape[0] - n1 = len(data1) - n2 = len(data2) - data1 = np.sort(data1) - data2 = np.sort(data2) - data_all = np.concatenate([data1,data2]) - cdf1 = np.searchsorted(data1,data_all,side='right')/(1.0*n1) - cdf2 = (np.searchsorted(data2,data_all,side='right'))/(1.0*n2) - d = np.max(np.absolute(cdf1-cdf2)) - #Note: d absolute not signed distance - en = np.sqrt(n1*n2/float(n1+n2)) - try: - prob = ksprob((en+0.12+0.11/en)*d) - except: - prob = 1.0 - return d, prob - - -def mannwhitneyu(x, y, use_continuity=True): - """ - Computes the Mann-Whitney rank test on samples x and y. - - Parameters - ---------- - x, y : array_like - Array of samples, should be one-dimensional. - use_continuity : bool, optional - Whether a continuity correction (1/2.) should be taken into - account. Default is True. - - Returns - ------- - u : float - The Mann-Whitney statistics. - prob : float - One-sided p-value assuming a asymptotic normal distribution. - - Notes - ----- - Use only when the number of observation in each sample is > 20 and - you have 2 independent samples of ranks. Mann-Whitney U is - significant if the u-obtained is LESS THAN or equal to the critical - value of U. - - This test corrects for ties and by default uses a continuity correction. - The reported p-value is for a one-sided hypothesis, to get the two-sided - p-value multiply the returned p-value by 2. - - """ - x = asarray(x) - y = asarray(y) - n1 = len(x) - n2 = len(y) - ranked = rankdata(np.concatenate((x,y))) - rankx = ranked[0:n1] # get the x-ranks - #ranky = ranked[n1:] # the rest are y-ranks - u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx,axis=0) # calc U for x - u2 = n1*n2 - u1 # remainder is U for y - bigu = max(u1,u2) - smallu = min(u1,u2) - #T = np.sqrt(tiecorrect(ranked)) # correction factor for tied scores - T = tiecorrect(ranked) - if T == 0: - raise ValueError('All numbers are identical in amannwhitneyu') - sd = np.sqrt(T*n1*n2*(n1+n2+1)/12.0) - - if use_continuity: - # normal approximation for prob calc with continuity correction - z = abs((bigu-0.5-n1*n2/2.0) / sd) - else: - z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc - return smallu, distributions.norm.sf(z) #(1.0 - zprob(z)) - - -def tiecorrect(rankvals): - """Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests. - See Siegel, S. (1956) Nonparametric Statistics for the Behavioral - Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c - code. - - Returns - ------- - T correction factor for U or H - - """ - sorted,posn = fastsort(asarray(rankvals)) - n = len(sorted) - T = 0.0 - i = 0 - while (i 10 and more than - 6 repeated measurements. - - References - ---------- - .. [1] http://en.wikipedia.org/wiki/Friedman_test - - """ - k = len(args) - if k < 3: - raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n') - n = len(args[0]) - for i in range(1,k): - if len(args[i]) <> n: - raise ValueError('Unequal N in friedmanchisquare. Aborting.') - - # Rank data - data = apply(_support.abut,args) - data = data.astype(float) - for i in range(len(data)): - data[i] = rankdata(data[i]) - - # Handle ties - ties = 0 - for i in range(len(data)): - replist, repnum = find_repeats(array(data[i])) - for t in repnum: - ties += t*(t*t-1) - c = 1 - ties / float(k*(k*k-1)*n) - - ssbn = pysum(pysum(data)**2) - chisq = ( 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1) ) / c - return chisq, chisqprob(chisq,k-1) - - -##################################### -#### PROBABILITY CALCULATIONS #### -##################################### - -zprob = special.ndtr - -def chisqprob(chisq, df): - """ - Probability value (1-tail) for the Chi^2 probability distribution. - - Broadcasting rules apply. - - Parameters - ---------- - chisq : array_like or float > 0 - - df : array_like or float, probably int >= 1 - - Returns - ------- - chisqprob : ndarray - The area from `chisq` to infinity under the Chi^2 probability - distribution with degrees of freedom `df`. - - """ - return special.chdtrc(df,chisq) - -ksprob = special.kolmogorov -fprob = special.fdtrc - -def betai(a, b, x): - """ - Returns the incomplete beta function. - - I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt) - - where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma - function of a. - - The standard broadcasting rules apply to a, b, and x. - - Parameters - ---------- - a : array_like or float > 0 - - b : array_like or float > 0 - - x : array_like or float - x will be clipped to be no greater than 1.0 . - - Returns - ------- - betai : ndarray - Incomplete beta function. - - """ - x = np.asarray(x) - x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0 - return special.betainc(a, b, x) - -##################################### -####### ANOVA CALCULATIONS ####### -##################################### - -def glm(data, para): - """Calculates a linear model fit ... - anova/ancova/lin-regress/t-test/etc. Taken from: - - Peterson et al. Statistical limitations in functional neuroimaging - I. Non-inferential methods and statistical models. Phil Trans Royal Soc - Lond B 354: 1239-1260. - - Returns - ------- - statistic, p-value ??? - - """ - if len(para) != len(data): - raise ValueError("data and para must be same length in aglm") - n = len(para) - p = _support.unique(para) - x = zeros((n,len(p))) # design matrix - for l in range(len(p)): - x[:,l] = para == p[l] - # fixme: normal equations are bad. Use linalg.lstsq instead. - b = dot(dot(linalg.inv(dot(np.transpose(x),x)), # i.e., b=inv(X'X)X'Y - np.transpose(x)),data) - diffs = (data - dot(x,b)) - s_sq = 1./(n-len(p)) * dot(np.transpose(diffs), diffs) - - if len(p) == 2: # ttest_ind - c = array([1,-1]) - df = n-2 - fact = np.sum(1.0/np.sum(x,0),axis=0) # i.e., 1/n1 + 1/n2 + 1/n3 ... - t = dot(c,b) / np.sqrt(s_sq*fact) - probs = betai(0.5*df,0.5,float(df)/(df+t*t)) - return t, probs - else: - raise ValueError("only ttest_ind implemented") - - -def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b): - """Calculation of Wilks lambda F-statistic for multivarite data, per - Maxwell & Delaney p.657. - """ - if isinstance(ER, (int, float)): - ER = array([[ER]]) - if isinstance(EF, (int, float)): - EF = array([[EF]]) - lmbda = linalg.det(EF) / linalg.det(ER) - if (a-1)**2 + (b-1)**2 == 5: - q = 1 - else: - q = np.sqrt( ((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 -5) ) - n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1) - d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1) - return n_um / d_en - -def f_value(ER, EF, dfR, dfF): - """ - Returns an F-statistic for a restricted vs. unrestricted model. - - Parameters - ---------- - ER : float - `ER` is the sum of squared residuals for the restricted model - or null hypothesis - - EF : float - `EF` is the sum of squared residuals for the unrestricted model - or alternate hypothesis - - dfR : int - `dfR` is the degrees of freedom in the restricted model - - dfF : int - `dfF` is the degrees of freedom in the unrestricted model - - Returns - ------- - F-statistic : float - - """ - return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF))) - - - -def f_value_multivariate(ER, EF, dfnum, dfden): - """ - Returns a multivariate F-statistic. - - Parameters - ---------- - ER : ndarray - Error associated with the null hypothesis (the Restricted model). - From a multivariate F calculation. - EF : ndarray - Error associated with the alternate hypothesis (the Full model) - From a multivariate F calculation. - dfnum : int - Degrees of freedom the Restricted model. - dfden : int - Degrees of freedom associated with the Restricted model. - - Returns - ------- - fstat : float - The computed F-statistic. - - """ - if isinstance(ER, (int, float)): - ER = array([[ER]]) - if isinstance(EF, (int, float)): - EF = array([[EF]]) - n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum) - d_en = linalg.det(EF) / float(dfden) - return n_um / d_en - - -##################################### -####### SUPPORT FUNCTIONS ######## -##################################### - -def ss(a, axis=0): - """ - Squares each element of the input array, and returns the square(s) of that. - - Parameters - ---------- - a : array_like - Input array. - axis : int or None, optional - The axis along which to calculate. If None, use whole array. - Default is 0, i.e. along the first axis. - - Returns - ------- - ss : ndarray - The sum along the given axis for (a**2). - - See also - -------- - square_of_sums : The square(s) of the sum(s) (the opposite of `ss`). - - Examples - -------- - >>> from scipy import stats - >>> a = np.array([1., 2., 5.]) - >>> stats.ss(a) - 30.0 - - And calculating along an axis: - - >>> b = np.array([[1., 2., 5.], [2., 5., 6.]]) - >>> stats.ss(b, axis=1) - array([ 30., 65.]) - - """ - a, axis = _chk_asarray(a, axis) - return np.sum(a*a, axis) - - -def square_of_sums(a, axis=0): - """ - Sums elements of the input array, and returns the square(s) of that sum. - - Parameters - ---------- - a : array_like - Input array. - axis : int or None, optional - If axis is None, ravel `a` first. If `axis` is an integer, this will - be the axis over which to operate. Defaults to 0. - - Returns - ------- - ss : float or ndarray - The square of the sum over `axis`. - - See also - -------- - ss : The sum of squares (the opposite of `square_of_sums`). - - Examples - -------- - >>> from scipy import stats - >>> a = np.arange(20).reshape(5,4) - >>> stats.square_of_sums(a) - array([ 1600., 2025., 2500., 3025.]) - >>> stats.square_of_sums(a, axis=None) - 36100.0 - - """ - a, axis = _chk_asarray(a, axis) - s = np.sum(a,axis) - if not np.isscalar(s): - return s.astype(float)*s - else: - return float(s)*s - - -def fastsort(a): - """ - Sort an array and provide the argsort. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - fastsort : ndarray of type int - sorted indices into the original array - - """ - # TODO: the wording in the docstring is nonsense. - it = np.argsort(a) - as_ = a[it] - return as_, it - -def rankdata(a): - """ - Ranks the data, dealing with ties appropriately. - - Equal values are assigned a rank that is the average of the ranks that - would have been otherwise assigned to all of the values within that set. - Ranks begin at 1, not 0. - - Parameters - ---------- - a : array_like - This array is first flattened. - - Returns - ------- - rankdata : ndarray - An array of length equal to the size of `a`, containing rank scores. - - Examples - -------- - >>> stats.rankdata([0, 2, 2, 3]) - array([ 1. , 2.5, 2.5, 4. ]) - - """ - a = np.ravel(a) - n = len(a) - svec, ivec = fastsort(a) - sumranks = 0 - dupcount = 0 - newarray = np.zeros(n, float) - for i in xrange(n): - sumranks += i - dupcount += 1 - if i==n-1 or svec[i] != svec[i+1]: - averank = sumranks / float(dupcount) + 1 - for j in xrange(i-dupcount+1,i+1): - newarray[ivec[j]] = averank - sumranks = 0 - dupcount = 0 - return newarray diff --git a/scipy-0.10.1/scipy/stats/tests/test_contingency.py b/scipy-0.10.1/scipy/stats/tests/test_contingency.py deleted file mode 100644 index 94942d0645..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_contingency.py +++ /dev/null @@ -1,181 +0,0 @@ - -import numpy as np -from numpy.testing import run_module_suite, assert_equal, assert_array_equal, \ - assert_array_almost_equal, assert_approx_equal, assert_raises - -from scipy.stats.contingency import margins, expected_freq, chi2_contingency - - -def test_margins(): - a = np.array([1]) - m = margins(a) - assert_equal(len(m), 1) - m0 = m[0] - assert_array_equal(m0, np.array([1])) - - a = np.array([[1]]) - m0, m1 = margins(a) - expected0 = np.array([[1]]) - expected1 = np.array([[1]]) - assert_array_equal(m0, expected0) - assert_array_equal(m1, expected1) - - a = np.arange(12).reshape(2, 6) - m0, m1 = margins(a) - expected0 = np.array([[15], [51]]) - expected1 = np.array([[6, 8, 10, 12, 14, 16]]) - assert_array_equal(m0, expected0) - assert_array_equal(m1, expected1) - - a = np.arange(24).reshape(2, 3, 4) - m0, m1, m2 = margins(a) - expected0 = np.array([[[66]], [[210]]]) - expected1 = np.array([[[60], [92], [124]]]) - expected2 = np.array([[[60, 66, 72, 78]]]) - assert_array_equal(m0, expected0) - assert_array_equal(m1, expected1) - assert_array_equal(m2, expected2) - - -def test_expected_freq(): - assert_array_equal(expected_freq([1]), np.array([1.0])) - - observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]]) - e = expected_freq(observed) - assert_array_equal(e, np.ones_like(observed)) - - observed = np.array([[10, 10, 20], [20, 20, 20]]) - e = expected_freq(observed) - correct = np.array([[12., 12., 16.], [18., 18., 24.]]) - assert_array_almost_equal(e, correct) - - -def test_chi2_contingency_trivial(): - """Some very simple tests for chi2_contingency.""" - # A trivial case - obs = np.array([[1, 2], [1, 2]]) - chi2, p, dof, expected = chi2_contingency(obs, correction=False) - assert_equal(chi2, 0.0) - assert_equal(p, 1.0) - assert_equal(dof, 1) - assert_array_equal(obs, expected) - - # A *really* trivial case: 1-D data. - obs = np.array([1, 2, 3]) - chi2, p, dof, expected = chi2_contingency(obs, correction=False) - assert_equal(chi2, 0.0) - assert_equal(p, 1.0) - assert_equal(dof, 0) - assert_array_equal(obs, expected) - - -def test_chi2_contingency_R(): - """Some test cases that were computed independently, using R.""" - - Rcode = \ - """ - # Data vector. - data <- c( - 12, 34, 23, 4, 47, 11, - 35, 31, 11, 34, 10, 18, - 12, 32, 9, 18, 13, 19, - 12, 12, 14, 9, 33, 25 - ) - - # Create factor tags:r=rows, c=columns, t=tiers - r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4"))) - c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3"))) - t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2"))) - - # 3-way Chi squared test of independence - s = summary(xtabs(data~r+c+t)) - print(s) - """ - Routput = \ - """ - Call: xtabs(formula = data ~ r + c + t) - Number of cases in table: 478 - Number of factors: 3 - Test for independence of all factors: - Chisq = 102.17, df = 17, p-value = 3.514e-14 - """ - obs = np.array( - [[[12, 34, 23], - [35, 31, 11], - [12, 32, 9], - [12, 12, 14]], - [[ 4, 47, 11], - [34, 10, 18], - [18, 13, 19], - [ 9, 33, 25]]]) - chi2, p, dof, expected = chi2_contingency(obs) - assert_approx_equal(chi2, 102.17, significant=5) - assert_approx_equal(p, 3.514e-14, significant=4) - assert_equal(dof, 17) - - Rcode = \ - """ - # Data vector. - data <- c( - # - 12, 17, - 11, 16, - # - 11, 12, - 15, 16, - # - 23, 15, - 30, 22, - # - 14, 17, - 15, 16 - ) - - # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers - r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2"))) - c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2"))) - d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2"))) - t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2"))) - - # 4-way Chi squared test of independence - s = summary(xtabs(data~r+c+d+t)) - print(s) - """ - Routput = \ - """ - Call: xtabs(formula = data ~ r + c + d + t) - Number of cases in table: 262 - Number of factors: 4 - Test for independence of all factors: - Chisq = 8.758, df = 11, p-value = 0.6442 - """ - obs = np.array( - [[[[12, 17], - [11, 16]], - [[11, 12], - [15, 16]]], - [[[23, 15], - [30, 22]], - [[14, 17], - [15, 16]]]]) - chi2, p, dof, expected = chi2_contingency(obs) - assert_approx_equal(chi2, 8.758, significant=4) - assert_approx_equal(p, 0.6442, significant=4) - assert_equal(dof, 11) - - -def test_chi2_contingency_bad_args(): - # Negative value in the array of observed frequencies. - obs = np.array([[-1, 10], [1, 2]]) - assert_raises(ValueError, chi2_contingency, obs) - # The zeros in this will result in zeros in the array - # of expected frequencies. - obs = np.array([[0, 1], [0, 1]]) - assert_raises(ValueError, chi2_contingency, obs) - # A degenerate case: `observed` has size 0. - obs = np.empty((0, 8)) - assert_raises(ValueError, chi2_contingency, obs) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/stats/tests/test_continuous_basic.py b/scipy-0.10.1/scipy/stats/tests/test_continuous_basic.py deleted file mode 100644 index 00b1f47513..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_continuous_basic.py +++ /dev/null @@ -1,386 +0,0 @@ -import warnings - -import numpy.testing as npt -import numpy as np -import nose - -from scipy import stats - -""" -Test all continuous distributions. - -Parameters were chosen for those distributions that pass the -Kolmogorov-Smirnov test. This provides safe parameters for each -distributions so that we can perform further testing of class methods. - -These tests currently check only/mostly for serious errors and exceptions, -not for numerically exact results. - - -TODO: -* make functioning test for skew and kurtosis - still known failures - skip for now - - -""" - -#currently not used -DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5 -DECIMAL_kurt = 0 - -distcont = [ - ['alpha', (3.5704770516650459,)], - ['anglit', ()], - ['arcsine', ()], - ['beta', (2.3098496451481823, 0.62687954300963677)], - ['betaprime', (5, 6)], # avoid unbound error in entropy with (100, 86)], - ['bradford', (0.29891359763170633,)], - ['burr', (10.5, 4.3)], #incorrect mean and var for(0.94839838075366045, 4.3820284068855795)], - ['cauchy', ()], - ['chi', (78,)], - ['chi2', (55,)], - ['cosine', ()], - ['dgamma', (1.1023326088288166,)], - ['dweibull', (2.0685080649914673,)], - ['erlang', (20,)], #correction numargs = 1 - ['expon', ()], - ['exponpow', (2.697119160358469,)], - ['exponweib', (2.8923945291034436, 1.9505288745913174)], - ['f', (29, 18)], - ['fatiguelife', (29,)], #correction numargs = 1 - ['fisk', (3.0857548622253179,)], - ['foldcauchy', (4.7164673455831894,)], - ['foldnorm', (1.9521253373555869,)], - ['frechet_l', (3.6279911255583239,)], - ['frechet_r', (1.8928171603534227,)], - ['gamma', (1.9932305483800778,)], - ['gausshyper', (13.763771604130699, 3.1189636648681431, - 2.5145980350183019, 5.1811649903971615)], #veryslow - ['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)], - ['genextreme', (-0.1,)], # sample mean test fails for (3.3184017469423535,)], - ['gengamma', (4.4162385429431925, 3.1193091679242761)], - ['genhalflogistic', (0.77274727809929322,)], - ['genlogistic', (0.41192440799679475,)], - ['genpareto', (0.1,)], # use case with finite moments - ['gilbrat', ()], - ['gompertz', (0.94743713075105251,)], - ['gumbel_l', ()], - ['gumbel_r', ()], - ['halfcauchy', ()], - ['halflogistic', ()], - ['halfnorm', ()], - ['hypsecant', ()], - ['invgamma', (2.0668996136993067,)], - ['invgauss', (0.14546264555347513,)], - ['invweibull', (10.58,)], # sample mean test fails at(0.58847112119264788,)] - ['johnsonsb', (4.3172675099141058, 3.1837781130785063)], - ['johnsonsu', (2.554395574161155, 2.2482281679651965)], - ['ksone', (1000,)], #replace 22 by 100 to avoid failing range, ticket 956 - ['kstwobign', ()], - ['laplace', ()], - ['levy', ()], - ['levy_l', ()], -# ['levy_stable', (0.35667405469844993, -# -0.67450531578494011)], #NotImplementedError - # rvs not tested - ['loggamma', (0.41411931826052117,)], - ['logistic', ()], - ['loglaplace', (3.2505926592051435,)], - ['lognorm', (0.95368226960575331,)], - ['lomax', (1.8771398388773268,)], - ['maxwell', ()], - ['mielke', (10.4, 3.6)], # sample mean test fails for (4.6420495492121487, 0.59707419545516938)], - # mielke: good results if 2nd parameter >2, weird mean or var below - ['nakagami', (4.9673794866666237,)], - ['ncf', (27, 27, 0.41578441799226107)], - ['nct', (14, 0.24045031331198066)], - ['ncx2', (21, 1.0560465975116415)], - ['norm', ()], - ['pareto', (2.621716532144454,)], - ['powerlaw', (1.6591133289905851,)], - ['powerlognorm', (2.1413923530064087, 0.44639540782048337)], - ['powernorm', (4.4453652254590779,)], - ['rayleigh', ()], - ['rdist', (0.9,)], # feels also slow -# ['rdist', (3.8266985793976525,)], #veryslow, especially rvs - #['rdist', (541.0,)], # from ticket #758 #veryslow - ['recipinvgauss', (0.63004267809369119,)], - ['reciprocal', (0.0062309367010521255, 1.0062309367010522)], - ['rice', (0.7749725210111873,)], - ['semicircular', ()], - ['t', (2.7433514990818093,)], - ['triang', (0.15785029824528218,)], - ['truncexpon', (4.6907725456810478,)], - ['truncnorm', (-1.0978730080013919, 2.7306754109031979)], - ['tukeylambda', (3.1321477856738267,)], - ['uniform', ()], - ['vonmises', (3.9939042581071398,)], - ['wald', ()], - ['weibull_max', (2.8687961709100187,)], - ['weibull_min', (1.7866166930421596,)], - ['wrapcauchy', (0.031071279018614728,)]] - -# for testing only specific functions -##distcont = [ -## ['erlang', (20,)], #correction numargs = 1 -## ['fatiguelife', (29,)], #correction numargs = 1 -## ['loggamma', (0.41411931826052117,)]] - -# for testing ticket:767 -##distcont = [ -## ['genextreme', (3.3184017469423535,)], -## ['genextreme', (0.01,)], -## ['genextreme', (0.00001,)], -## ['genextreme', (0.0,)], -## ['genextreme', (-0.01,)] -## ] - -##distcont = [['gumbel_l', ()], -## ['gumbel_r', ()], -## ['norm', ()] -## ] - -##distcont = [['norm', ()]] - -distmissing = ['wald', 'gausshyper', 'genexpon', 'rv_continuous', - 'loglaplace', 'rdist', 'semicircular', 'invweibull', 'ksone', - 'cosine', 'kstwobign', 'truncnorm', 'mielke', 'recipinvgauss', 'levy', - 'johnsonsu', 'levy_l', 'powernorm', 'wrapcauchy', - 'johnsonsb', 'truncexpon', 'rice', 'invgauss', 'invgamma', - 'powerlognorm'] - -distmiss = [[dist,args] for dist,args in distcont if dist in distmissing] -distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon', - 'vonmises', 'rice', 'mielke', 'semicircular', 'cosine', 'invweibull', - 'powerlognorm', 'johnsonsu', 'kstwobign'] -#distslow are sorted by speed (very slow to slow) - -def _silence_fp_errors(func): - def wrap(*a, **kw): - olderr = np.seterr(all='ignore') - try: - return func(*a, **kw) - finally: - np.seterr(**olderr) - wrap.__name__ = func.__name__ - return wrap - -@_silence_fp_errors -def test_cont_basic(): - # this test skips slow distributions - for distname, arg in distcont[:]: - if distname in distslow: - continue - distfn = getattr(stats, distname) - np.random.seed(765456) - sn = 1000 - rvs = distfn.rvs(size=sn,*arg) - sm = rvs.mean() - sv = rvs.var() - skurt = stats.kurtosis(rvs) - sskew = stats.skew(rvs) - m,v = distfn.stats(*arg) - - yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, distname + \ - 'sample mean test' - # the sample skew kurtosis test has known failures, not very good distance measure - #yield check_sample_skew_kurt, distfn, arg, sskew, skurt, distname - yield check_moment, distfn, arg, m, v, distname - yield check_cdf_ppf, distfn, arg, distname - yield check_sf_isf, distfn, arg, distname - yield check_pdf, distfn, arg, distname - if distname in ['wald']: - continue - yield check_pdf_logpdf, distfn, arg, distname - yield check_cdf_logcdf, distfn, arg, distname - yield check_sf_logsf, distfn, arg, distname - if distname in distmissing: - alpha = 0.01 - yield check_distribution_rvs, distname, arg, alpha, rvs - -@npt.dec.slow -def test_cont_basic_slow(): - # same as above for slow distributions - for distname, arg in distcont[:]: - if distname not in distslow: continue - distfn = getattr(stats, distname) - np.random.seed(765456) - sn = 1000 - rvs = distfn.rvs(size=sn,*arg) - sm = rvs.mean() - sv = rvs.var() - skurt = stats.kurtosis(rvs) - sskew = stats.skew(rvs) - m,v = distfn.stats(*arg) - yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, distname + \ - 'sample mean test' - # the sample skew kurtosis test has known failures, not very good distance measure - #yield check_sample_skew_kurt, distfn, arg, sskew, skurt, distname - yield check_moment, distfn, arg, m, v, distname - yield check_cdf_ppf, distfn, arg, distname - yield check_sf_isf, distfn, arg, distname - yield check_pdf, distfn, arg, distname - yield check_pdf_logpdf, distfn, arg, distname - yield check_cdf_logcdf, distfn, arg, distname - yield check_sf_logsf, distfn, arg, distname - #yield check_oth, distfn, arg # is still missing - if distname in distmissing: - alpha = 0.01 - yield check_distribution_rvs, distname, arg, alpha, rvs - -@_silence_fp_errors -def check_moment(distfn, arg, m, v, msg): - m1 = distfn.moment(1,*arg) - m2 = distfn.moment(2,*arg) - if not np.isinf(m): - npt.assert_almost_equal(m1, m, decimal=10, err_msg= msg + \ - ' - 1st moment') - else: # or np.isnan(m1), - npt.assert_(np.isinf(m1), - msg + ' - 1st moment -infinite, m1=%s' % str(m1)) - #np.isnan(m1) temporary special treatment for loggamma - if not np.isinf(v): - npt.assert_almost_equal(m2-m1*m1, v, decimal=10, err_msg= msg + \ - ' - 2ndt moment') - else: #or np.isnan(m2), - npt.assert_(np.isinf(m2), - msg + ' - 2nd moment -infinite, m2=%s' % str(m2)) - #np.isnan(m2) temporary special treatment for loggamma - -@_silence_fp_errors -def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg): - #this did not work, skipped silently by nose - #check_sample_meanvar, sm, m, msg + 'sample mean test' - #check_sample_meanvar, sv, v, msg + 'sample var test' - if not np.isinf(m): - check_sample_mean(sm, sv, sn, m) - if not np.isinf(v): - check_sample_var(sv, sn, v) -## check_sample_meanvar( sm, m, msg + 'sample mean test') -## check_sample_meanvar( sv, v, msg + 'sample var test') - -def check_sample_mean(sm,v,n, popmean): - """ -from stats.stats.ttest_1samp(a, popmean): -Calculates the t-obtained for the independent samples T-test on ONE group -of scores a, given a population mean. - -Returns: t-value, two-tailed prob -""" -## a = asarray(a) -## x = np.mean(a) -## v = np.var(a, ddof=1) -## n = len(a) - df = n-1 - svar = ((n-1)*v) / float(df) #looks redundant - t = (sm-popmean)/np.sqrt(svar*(1.0/n)) - prob = stats.betai(0.5*df,0.5,df/(df+t*t)) - - #return t,prob - npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m,sm=%f,%f' % (t,prob,popmean,sm)) - -def check_sample_var(sv,n, popvar): - ''' -two-sided chisquare test for sample variance equal to hypothesized variance - ''' - df = n-1 - chi2 = (n-1)*popvar/float(popvar) - pval = stats.chisqprob(chi2,df)*2 - npt.assert_(pval > 0.01, 'var fail, t,pval = %f, %f, v,sv=%f,%f' % (chi2,pval,popvar,sv)) - - -def check_sample_skew_kurt(distfn, arg, ss, sk, msg): - skew,kurt = distfn.stats(moments='sk',*arg) -## skew = distfn.stats(moment='s',*arg)[()] -## kurt = distfn.stats(moment='k',*arg)[()] - check_sample_meanvar( sk, kurt, msg + 'sample kurtosis test') - check_sample_meanvar( ss, skew, msg + 'sample skew test') - -def check_sample_meanvar(sm,m,msg): - if not np.isinf(m) and not np.isnan(m): - npt.assert_almost_equal(sm, m, decimal=DECIMAL, err_msg= msg + \ - ' - finite moment') -## else: -## npt.assert_(abs(sm) > 10000), msg='infinite moment, sm = ' + str(sm)) - -@_silence_fp_errors -def check_cdf_ppf(distfn,arg,msg): - npt.assert_almost_equal(distfn.cdf(distfn.ppf([0.001,0.5,0.999], *arg), *arg), - [0.001,0.5,0.999], decimal=DECIMAL, err_msg= msg + \ - ' - cdf-ppf roundtrip') - -@_silence_fp_errors -def check_sf_isf(distfn,arg,msg): - npt.assert_almost_equal(distfn.sf(distfn.isf([0.1,0.5,0.9], *arg), *arg), - [0.1,0.5,0.9], decimal=DECIMAL, err_msg= msg + \ - ' - sf-isf roundtrip') - npt.assert_almost_equal(distfn.cdf([0.1,0.9], *arg), - 1.0-distfn.sf([0.1,0.9], *arg), - decimal=DECIMAL, err_msg= msg + \ - ' - cdf-sf relationship') - -@_silence_fp_errors -def check_pdf(distfn, arg, msg): - # compares pdf at median with numerical derivative of cdf - median = distfn.ppf(0.5, *arg) - eps = 1e-6 - pdfv = distfn.pdf(median, *arg) - if (pdfv < 1e-4) or (pdfv > 1e4): - # avoid checking a case where pdf is close to zero or huge (singularity) - median = median + 0.1 - pdfv = distfn.pdf(median, *arg) - cdfdiff = (distfn.cdf(median + eps, *arg) - - distfn.cdf(median - eps, *arg))/eps/2.0 - #replace with better diff and better test (more points), - #actually, this works pretty well - npt.assert_almost_equal(pdfv, cdfdiff, - decimal=DECIMAL, err_msg= msg + ' - cdf-pdf relationship') - -@_silence_fp_errors -def check_pdf_logpdf(distfn, args, msg): - # compares pdf at several points with the log of the pdf - points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) - vals = distfn.ppf(points, *args) - pdf = distfn.pdf(vals, *args) - logpdf = distfn.logpdf(vals, *args) - pdf = pdf[pdf != 0] - logpdf = logpdf[np.isfinite(logpdf)] - npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg + " - logpdf-log(pdf) relationship") - -@_silence_fp_errors -def check_sf_logsf(distfn, args, msg): - # compares sf at several points with the log of the sf - points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) - vals = distfn.ppf(points, *args) - sf = distfn.sf(vals, *args) - logsf = distfn.logsf(vals, *args) - sf = sf[sf != 0] - logsf = logsf[np.isfinite(logsf)] - npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg + " - logsf-log(sf) relationship") - -@_silence_fp_errors -def check_cdf_logcdf(distfn, args, msg): - # compares cdf at several points with the log of the cdf - points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) - vals = distfn.ppf(points, *args) - cdf = distfn.cdf(vals, *args) - logcdf = distfn.logcdf(vals, *args) - cdf = cdf[cdf != 0] - logcdf = logcdf[np.isfinite(logcdf)] - npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg + " - logcdf-log(cdf) relationship") - - -@_silence_fp_errors -def check_distribution_rvs(dist, args, alpha, rvs): - #test from scipy.stats.tests - #this version reuses existing random variables - D,pval = stats.kstest(rvs, dist, args=args, N=1000) - if (pval < alpha): - D,pval = stats.kstest(dist,'',args=args, N=1000) - npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) + - "; alpha = " + str(alpha) + "\nargs = " + str(args)) - - -if __name__ == "__main__": - #nose.run(argv=['', __file__]) - nose.runmodule(argv=[__file__,'-s'], exit=False) diff --git a/scipy-0.10.1/scipy/stats/tests/test_continuous_extra.py b/scipy-0.10.1/scipy/stats/tests/test_continuous_extra.py deleted file mode 100644 index 391f7653c5..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_continuous_extra.py +++ /dev/null @@ -1,101 +0,0 @@ -# contains additional tests for continuous distributions -# -# NOTE: one test, _est_cont_skip, that is renamed so that nose doesn't -# run it, -# 6 distributions return nan for entropy -# truncnorm fails by design for private method _ppf test - - -import numpy.testing as npt -import numpy as np - -from scipy import stats - -from test_continuous_basic import distcont - -DECIMAL = 5 - -@npt.dec.slow -def test_cont_extra(): - for distname, arg in distcont[:]: - distfn = getattr(stats, distname) - - yield check_ppf_limits, distfn, arg, distname + \ - ' ppf limit test' - yield check_isf_limits, distfn, arg, distname + \ - ' isf limit test' - yield check_loc_scale, distfn, arg, distname + \ - ' loc, scale test' - -@npt.dec.slow -def _est_cont_skip(): - for distname, arg in distcont: - distfn = getattr(stats, distname) - #entropy test checks only for isnan, currently 6 isnan left - yield check_entropy, distfn, arg, distname + \ - ' entropy nan test' - # _ppf test has 1 failure be design - yield check_ppf_private, distfn, arg, distname + \ - ' _ppf private test' - -def test_540_567(): - # test for nan returned in tickets 540, 567 - npt.assert_almost_equal(stats.norm.cdf(-1.7624320982),0.03899815971089126, - decimal=10, err_msg = 'test_540_567') - npt.assert_almost_equal(stats.norm.cdf(-1.7624320983),0.038998159702449846, - decimal=10, err_msg = 'test_540_567') - npt.assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309, - scale=0.204423758009),0.98353464004309321, - decimal=10, err_msg = 'test_540_567') - - -def check_ppf_limits(distfn,arg,msg): - below,low,upp,above = distfn.ppf([-1,0,1,2], *arg) - #print distfn.name, distfn.a, low, distfn.b, upp - #print distfn.name,below,low,upp,above - assert_equal_inf_nan(distfn.a,low, msg + 'ppf lower bound') - assert_equal_inf_nan(distfn.b,upp, msg + 'ppf upper bound') - npt.assert_(np.isnan(below), msg + 'ppf out of bounds - below') - npt.assert_(np.isnan(above), msg + 'ppf out of bounds - above') - -def check_ppf_private(distfn,arg,msg): - #fails by design for trunk norm self.nb not defined - ppfs = distfn._ppf(np.array([0.1,0.5,0.9]), *arg) - npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan') - - -def check_isf_limits(distfn,arg,msg): - below,low,upp,above = distfn.isf([-1,0,1,2], *arg) - #print distfn.name, distfn.a, low, distfn.b, upp - #print distfn.name,below,low,upp,above - assert_equal_inf_nan(distfn.a,upp, msg + 'isf lower bound') - assert_equal_inf_nan(distfn.b,low, msg + 'isf upper bound') - npt.assert_(np.isnan(below), msg + 'isf out of bounds - below') - npt.assert_(np.isnan(above), msg + 'isf out of bounds - above') - - -def check_loc_scale(distfn,arg,msg): - m,v = distfn.stats(*arg) - loc, scale = 10.0, 10.0 - mt,vt = distfn.stats(loc=loc, scale=scale, *arg) - assert_equal_inf_nan(m*scale+loc,mt,msg + 'mean') - assert_equal_inf_nan(v*scale*scale,vt,msg + 'var') - -def check_entropy(distfn,arg,msg): - ent = distfn.entropy(*arg) - #print 'Entropy =', ent - npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan') - -def assert_equal_inf_nan(v1,v2,msg): - npt.assert_(not np.isnan(v1)) - if not np.isinf(v1): - npt.assert_almost_equal(v1, v2, decimal=DECIMAL, err_msg = msg + \ - ' - finite') - else: - npt.assert_(np.isinf(v2) or np.isnan(v2), - msg + ' - infinite, v2=%s' % str(v2)) - -if __name__ == "__main__": - import nose - #nose.run(argv=['', __file__]) - nose.runmodule(argv=[__file__,'-s'], exit=False) diff --git a/scipy-0.10.1/scipy/stats/tests/test_discrete_basic.py b/scipy-0.10.1/scipy/stats/tests/test_discrete_basic.py deleted file mode 100644 index 66b4b2b3d0..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_discrete_basic.py +++ /dev/null @@ -1,269 +0,0 @@ -import numpy.testing as npt -import numpy as np -import nose - -from scipy import stats - -DECIMAL_meanvar = 0#1 # was 0 - -distdiscrete = [ - ['bernoulli',(0.3,)], - ['binom', (5, 0.4)], - ['boltzmann',(1.4, 19)], - ['dlaplace', (0.8,)], #0.5 - ['geom', (0.5,)], - ['hypergeom',(30, 12, 6)], - ['hypergeom',(21,3,12)], #numpy.random (3,18,12) numpy ticket:921 - ['hypergeom',(21,18,11)], #numpy.random (18,3,11) numpy ticket:921 - ['logser', (0.6,)], # reenabled, numpy ticket:921 - ['nbinom', (5, 0.5)], - ['nbinom', (0.4, 0.4)], #from tickets: 583 - ['planck', (0.51,)], #4.1 - ['poisson', (0.6,)], - ['randint', (7, 31)], - ['skellam', (15, 8)]] -# ['zipf', (4,)] ] # arg=4 is ok, - # Zipf broken for arg = 2, e.g. weird .stats - # looking closer, mean, var should be inf for arg=2 - - -#@npt.dec.slow -def test_discrete_basic(): - for distname, arg in distdiscrete: - distfn = getattr(stats,distname) - #npt.assert_(stats.dlaplace.rvs(0.8) is not None) - np.random.seed(9765456) - rvs = distfn.rvs(size=2000,*arg) - supp = np.unique(rvs) - m,v = distfn.stats(*arg) - #yield npt.assert_almost_equal(rvs.mean(), m, decimal=4,err_msg='mean') - #yield npt.assert_almost_equal, rvs.mean(), m, 2, 'mean' # does not work - yield check_sample_meanvar, rvs.mean(), m, distname + ' sample mean test' - yield check_sample_meanvar, rvs.var(), v, distname + ' sample var test' - yield check_cdf_ppf, distfn, arg, distname + ' cdf_ppf' - yield check_cdf_ppf2, distfn, arg, supp, distname + ' cdf_ppf' - yield check_pmf_cdf, distfn, arg, distname + ' pmf_cdf' - - # zipf doesn't fail, but generates floating point warnings. - # Should be checked. - if not distname in ['zipf']: - yield check_oth, distfn, arg, distname + ' oth' - skurt = stats.kurtosis(rvs) - sskew = stats.skew(rvs) - yield check_sample_skew_kurt, distfn, arg, skurt, sskew, \ - distname + ' skew_kurt' - - # dlaplace doesn't fail, but generates lots of floating point warnings. - # Should be checked. - if not distname in ['dlaplace']: #['logser']: #known failure, fixed - alpha = 0.01 - yield check_discrete_chisquare, distfn, arg, rvs, alpha, \ - distname + ' chisquare' - - -@npt.dec.slow -def test_discrete_extra(): - for distname, arg in distdiscrete: - distfn = getattr(stats,distname) - yield check_ppf_limits, distfn, arg, distname + \ - ' ppf limit test' - yield check_isf_limits, distfn, arg, distname + \ - ' isf limit test' - yield check_entropy, distfn, arg, distname + \ - ' entropy nan test' - -@npt.dec.skipif(True) -def test_discrete_private(): - #testing private methods mostly for debugging - # some tests might fail by design, - # e.g. incorrect definition of distfn.a and distfn.b - for distname, arg in distdiscrete: - distfn = getattr(stats,distname) - rvs = distfn.rvs(size=10000,*arg) - m,v = distfn.stats(*arg) - - yield check_ppf_ppf, distfn, arg - yield check_cdf_ppf_private, distfn, arg, distname - yield check_generic_moment, distfn, arg, m, 1, 3 # last is decimal - yield check_generic_moment, distfn, arg, v+m*m, 2, 3 # last is decimal - yield check_moment_frozen, distfn, arg, m, 1, 3 # last is decimal - yield check_moment_frozen, distfn, arg, v+m*m, 2, 3 # last is decimal - - -def check_sample_meanvar(sm,m,msg): - if not np.isinf(m): - npt.assert_almost_equal(sm, m, decimal=DECIMAL_meanvar, err_msg=msg + \ - ' - finite moment') - else: - npt.assert_(sm > 10000, msg='infinite moment, sm = ' + str(sm)) - -def check_sample_var(sm,m,msg): - npt.assert_almost_equal(sm, m, decimal=DECIMAL_meanvar, err_msg= msg + 'var') - -def check_cdf_ppf(distfn,arg,msg): - ppf05 = distfn.ppf(0.5,*arg) - cdf05 = distfn.cdf(ppf05,*arg) - npt.assert_almost_equal(distfn.ppf(cdf05-1e-6,*arg),ppf05, - err_msg=msg + 'ppf-cdf-median') - npt.assert_((distfn.ppf(cdf05+1e-4,*arg)>ppf05), msg + 'ppf-cdf-next') - -def check_cdf_ppf2(distfn,arg,supp,msg): - npt.assert_array_equal(distfn.ppf(distfn.cdf(supp,*arg),*arg), - supp, msg + '-roundtrip') - npt.assert_array_equal(distfn.ppf(distfn.cdf(supp,*arg)-1e-8,*arg), - supp, msg + '-roundtrip') - # -1e-8 could cause an error if pmf < 1e-8 - - -def check_cdf_ppf_private(distfn,arg,msg): - ppf05 = distfn._ppf(0.5,*arg) - cdf05 = distfn.cdf(ppf05,*arg) - npt.assert_almost_equal(distfn._ppf(cdf05-1e-6,*arg),ppf05, - err_msg=msg + '_ppf-cdf-median ') - npt.assert_((distfn._ppf(cdf05+1e-4,*arg)>ppf05), msg + '_ppf-cdf-next') - -def check_ppf_ppf(distfn, arg): - npt.assert_(distfn.ppf(0.5,*arg) < np.inf) - ppfs = distfn.ppf([0.5,0.9],*arg) - ppf_s = [distfn._ppf(0.5,*arg), distfn._ppf(0.9,*arg)] - npt.assert_(np.all(ppfs < np.inf)) - npt.assert_(ppf_s[0] == distfn.ppf(0.5,*arg)) - npt.assert_(ppf_s[1] == distfn.ppf(0.9,*arg)) - npt.assert_(ppf_s[0] == ppfs[0]) - npt.assert_(ppf_s[1] == ppfs[1]) - -def check_pmf_cdf(distfn, arg, msg): - startind = np.int(distfn._ppf(0.01,*arg)-1) - index = range(startind,startind+10) - cdfs = distfn.cdf(index,*arg) - npt.assert_almost_equal(cdfs, distfn.pmf(index, *arg).cumsum() + \ - cdfs[0] - distfn.pmf(index[0],*arg), - decimal=4, err_msg= msg + 'pmf-cdf') - -def check_generic_moment(distfn, arg, m, k, decim): - npt.assert_almost_equal(distfn.generic_moment(k,*arg), m, decimal=decim, - err_msg= str(distfn) + ' generic moment test') - -def check_moment_frozen(distfn, arg, m, k, decim): - npt.assert_almost_equal(distfn(*arg).moment(k), m, decimal=decim, - err_msg= str(distfn) + ' frozen moment test') - -def check_oth(distfn, arg, msg): - #checking other methods of distfn - meanint = round(float(distfn.stats(*arg)[0])) # closest integer to mean - npt.assert_almost_equal(distfn.sf(meanint, *arg), 1 - \ - distfn.cdf(meanint, *arg), decimal=8) - median_sf = distfn.isf(0.5, *arg) - - npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) - npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) - npt.assert_equal(distfn.isf(0.5, *arg), distfn.ppf(0.5, *arg)) - -#next 3 functions copied from test_continous_extra -# adjusted - -def check_ppf_limits(distfn,arg,msg): - below,low,upp,above = distfn.ppf([-1,0,1,2], *arg) - #print distfn.name, distfn.a, low, distfn.b, upp - #print distfn.name,below,low,upp,above - assert_equal_inf_nan(distfn.a-1,low, msg + 'ppf lower bound') - assert_equal_inf_nan(distfn.b,upp, msg + 'ppf upper bound') - npt.assert_(np.isnan(below), msg + 'ppf out of bounds - below') - npt.assert_(np.isnan(above), msg + 'ppf out of bounds - above') - -def check_isf_limits(distfn,arg,msg): - below,low,upp,above = distfn.isf([-1,0,1,2], *arg) - #print distfn.name, distfn.a, low, distfn.b, upp - #print distfn.name,below,low,upp,above - assert_equal_inf_nan(distfn.a-1,upp, msg + 'isf lower bound') - assert_equal_inf_nan(distfn.b,low, msg + 'isf upper bound') - npt.assert_(np.isnan(below), msg + 'isf out of bounds - below') - npt.assert_(np.isnan(above), msg + 'isf out of bounds - above') - -def assert_equal_inf_nan(v1,v2,msg): - npt.assert_(not np.isnan(v1)) - if not np.isinf(v1): - npt.assert_almost_equal(v1, v2, decimal=10, err_msg = msg + \ - ' - finite') - else: - npt.assert_(np.isinf(v2) or np.isnan(v2), - msg + ' - infinite, v2=%s' % str(v2)) - -def check_sample_skew_kurt(distfn, arg, sk, ss, msg): - k,s = distfn.stats(moment='ks',*arg) - check_sample_meanvar, sk, k, msg + 'sample skew test' - check_sample_meanvar, ss, s, msg + 'sample kurtosis test' - - -def check_entropy(distfn,arg,msg): - ent = distfn.entropy(*arg) - #print 'Entropy =', ent - npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan') - - -def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): - '''perform chisquare test for random sample of a discrete distribution - - Parameters - ---------- - distname : string - name of distribution function - arg : sequence - parameters of distribution - alpha : float - significance level, threshold for p-value - - Returns - ------- - result : bool - 0 if test passes, 1 if test fails - - uses global variable debug for printing results - ''' - - # define parameters for test -## n=2000 - n = len(rvs) - nsupp = 20 - wsupp = 1.0/nsupp - -## distfn = getattr(stats, distname) -## np.random.seed(9765456) -## rvs = distfn.rvs(size=n,*arg) - - # construct intervals with minimum mass 1/nsupp - # intervalls are left-half-open as in a cdf difference - distsupport = xrange(max(distfn.a, -1000), min(distfn.b, 1000) + 1) - last = 0 - distsupp = [max(distfn.a, -1000)] - distmass = [] - for ii in distsupport: - current = distfn.cdf(ii,*arg) - if current - last >= wsupp-1e-14: - distsupp.append(ii) - distmass.append(current - last) - last = current - if current > (1-wsupp): - break - if distsupp[-1] < distfn.b: - distsupp.append(distfn.b) - distmass.append(1-last) - distsupp = np.array(distsupp) - distmass = np.array(distmass) - - # convert intervals to right-half-open as required by histogram - histsupp = distsupp+1e-8 - histsupp[0] = distfn.a - - # find sample frequencies and perform chisquare test - freq,hsupp = np.histogram(rvs,histsupp) - cdfs = distfn.cdf(distsupp,*arg) - (chis,pval) = stats.chisquare(np.array(freq),n*distmass) - - npt.assert_(pval > alpha, 'chisquare - test for %s' - ' at arg = %s with pval = %s' % (msg,str(arg),str(pval))) - - -if __name__ == "__main__": - #nose.run(argv=['', __file__]) - nose.runmodule(argv=[__file__,'-s'], exit=False) diff --git a/scipy-0.10.1/scipy/stats/tests/test_distributions.py b/scipy-0.10.1/scipy/stats/tests/test_distributions.py deleted file mode 100644 index 0dbc0d914d..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_distributions.py +++ /dev/null @@ -1,796 +0,0 @@ -""" Test functions for stats module - -""" - -from numpy.testing import TestCase, run_module_suite, assert_equal, \ - assert_array_equal, assert_almost_equal, assert_array_almost_equal, \ - assert_allclose, assert_, rand, dec - - -import numpy -import numpy as np -from numpy import typecodes, array -import scipy.stats as stats -from scipy.stats.distributions import argsreduce - -def kolmogorov_check(diststr, args=(), N=20, significance=0.01): - qtest = stats.ksoneisf(significance, N) - cdf = eval('stats.'+diststr+'.cdf') - dist = eval('stats.'+diststr) - # Get random numbers - kwds = {'size':N} - vals = numpy.sort(dist.rvs(*args, **kwds)) - cdfvals = cdf(vals, *args) - q = max(abs(cdfvals - np.arange(1.0, N+1)/N)) - assert_(q < qtest, msg="Failed q=%f, bound=%f, alpha=%f" % (q, qtest, significance)) - return - - -# generate test cases to test cdf and distribution consistency -dists = ['uniform','norm','lognorm','expon','beta', - 'powerlaw','bradford','burr','fisk','cauchy','halfcauchy', - 'foldcauchy','gamma','gengamma','loggamma', - 'alpha','anglit','arcsine','betaprime','erlang', - 'dgamma','exponweib','exponpow','frechet_l','frechet_r', - 'gilbrat','f','ncf','chi2','chi','nakagami','genpareto', - 'genextreme','genhalflogistic','pareto','lomax','halfnorm', - 'halflogistic','fatiguelife','foldnorm','ncx2','t','nct', - 'weibull_min','weibull_max','dweibull','maxwell','rayleigh', - 'genlogistic', 'logistic','gumbel_l','gumbel_r','gompertz', - 'hypsecant', 'laplace', 'reciprocal','triang','tukeylambda', - 'vonmises'] - -# check function for test generator -def check_distribution(dist, args, alpha): - D,pval = stats.kstest(dist,'', args=args, N=1000) - if (pval < alpha): - D,pval = stats.kstest(dist,'',args=args, N=1000) - #if (pval < alpha): - # D,pval = stats.kstest(dist,'',args=args, N=1000) - assert_(pval > alpha, msg="D = " + str(D) + "; pval = " + str(pval) + \ - "; alpha = " + str(alpha) + "\nargs = " + str(args)) - -# nose test generator -def test_all_distributions(): - for dist in dists: - distfunc = getattr(stats, dist) - nargs = distfunc.numargs - alpha = 0.01 - if dist == 'fatiguelife': - alpha = 0.001 - if dist == 'erlang': - args = (4,)+tuple(rand(2)) - elif dist == 'frechet': - args = tuple(2*rand(1))+(0,)+tuple(2*rand(2)) - elif dist == 'triang': - args = tuple(rand(nargs)) - elif dist == 'reciprocal': - vals = rand(nargs) - vals[1] = vals[0] + 1.0 - args = tuple(vals) - elif dist == 'vonmises': - yield check_distribution, dist, (10,), alpha - yield check_distribution, dist, (101,), alpha - args = tuple(1.0+rand(nargs)) - else: - args = tuple(1.0+rand(nargs)) - yield check_distribution, dist, args, alpha - -def check_vonmises_pdf_periodic(k,l,s,x): - vm = stats.vonmises(k,loc=l,scale=s) - assert_almost_equal(vm.pdf(x),vm.pdf(x%(2*numpy.pi*s))) -def check_vonmises_cdf_periodic(k,l,s,x): - vm = stats.vonmises(k,loc=l,scale=s) - assert_almost_equal(vm.cdf(x)%1,vm.cdf(x%(2*numpy.pi*s))%1) - -def test_vonmises_pdf_periodic(): - for k in [0.1, 1, 101]: - for x in [0,1,numpy.pi,10,100]: - yield check_vonmises_pdf_periodic, k, 0, 1, x - yield check_vonmises_pdf_periodic, k, 1, 1, x - yield check_vonmises_pdf_periodic, k, 0, 10, x - - yield check_vonmises_cdf_periodic, k, 0, 1, x - yield check_vonmises_cdf_periodic, k, 1, 1, x - yield check_vonmises_cdf_periodic, k, 0, 10, x - -class TestRandInt(TestCase): - def test_rvs(self): - vals = stats.randint.rvs(5,30,size=100) - assert_(numpy.all(vals < 30) & numpy.all(vals >= 5)) - assert_(len(vals) == 100) - vals = stats.randint.rvs(5,30,size=(2,50)) - assert_(numpy.shape(vals) == (2,50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.randint.rvs(15,46) - assert_((val >= 15) & (val < 46)) - assert_(isinstance(val, numpy.ScalarType), msg=`type(val)`) - val = stats.randint(15,46).rvs(3) - assert_(val.dtype.char in typecodes['AllInteger']) - - def test_pdf(self): - k = numpy.r_[0:36] - out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0) - vals = stats.randint.pmf(k,5,30) - assert_array_almost_equal(vals,out) - - def test_cdf(self): - x = numpy.r_[0:36:100j] - k = numpy.floor(x) - out = numpy.select([k>=30,k>=5],[1.0,(k-5.0+1)/(30-5.0)],0) - vals = stats.randint.cdf(x,5,30) - assert_array_almost_equal(vals, out, decimal=12) - -class TestBinom(TestCase): - def test_rvs(self): - vals = stats.binom.rvs(10, 0.75, size=(2, 50)) - assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.binom.rvs(10, 0.75) - assert_(isinstance(val, int)) - val = stats.binom(10, 0.75).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - - -class TestBernoulli(TestCase): - def test_rvs(self): - vals = stats.bernoulli.rvs(0.75, size=(2, 50)) - assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.bernoulli.rvs(0.75) - assert_(isinstance(val, int)) - val = stats.bernoulli(0.75).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - -class TestNBinom(TestCase): - def test_rvs(self): - vals = stats.nbinom.rvs(10, 0.75, size=(2, 50)) - assert_(numpy.all(vals >= 0)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.nbinom.rvs(10, 0.75) - assert_(isinstance(val, int)) - val = stats.nbinom(10, 0.75).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - -class TestGeom(TestCase): - def test_rvs(self): - vals = stats.geom.rvs(0.75, size=(2, 50)) - assert_(numpy.all(vals >= 0)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.geom.rvs(0.75) - assert_(isinstance(val, int)) - val = stats.geom(0.75).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - - def test_pmf(self): - vals = stats.geom.pmf([1,2,3],0.5) - assert_array_almost_equal(vals,[0.5,0.25,0.125]) - - def test_cdf_sf(self): - vals = stats.geom.cdf([1,2,3],0.5) - vals_sf = stats.geom.sf([1,2,3],0.5) - expected = array([0.5,0.75,0.875]) - assert_array_almost_equal(vals,expected) - assert_array_almost_equal(vals_sf,1-expected) - - -class TestHypergeom(TestCase): - def test_rvs(self): - vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50)) - assert_(numpy.all(vals >= 0) & - numpy.all(vals <= 3)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.hypergeom.rvs(20, 3, 10) - assert_(isinstance(val, int)) - val = stats.hypergeom(20, 3, 10).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - - def test_precision(self): - # comparison number from mpmath - M = 2500 - n = 50 - N = 500 - tot = M - good = n - hgpmf = stats.hypergeom.pmf(2, tot, good, N) - assert_almost_equal(hgpmf, 0.0010114963068932233, 11) - - def test_precision2(self): - """Test hypergeom precision for large numbers. See #1218.""" - # Results compared with those from R. - oranges = 9.9e4 - pears = 1.1e5 - fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4 - quantile = 2e4 - res = [] - for eaten in fruits_eaten: - res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)) - expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32, - 8.265601e-11, 0.1237904, 1]) - assert_allclose(res, expected, atol=0, rtol=5e-7) - - # Test with array_like first argument - quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4] - res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4) - expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69] - assert_allclose(res2, expected2, atol=0, rtol=5e-7) - - -class TestLogser(TestCase): - def test_rvs(self): - vals = stats.logser.rvs(0.75, size=(2, 50)) - assert_(numpy.all(vals >= 1)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.logser.rvs(0.75) - assert_(isinstance(val, int)) - val = stats.logser(0.75).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - -class TestPoisson(TestCase): - def test_rvs(self): - vals = stats.poisson.rvs(0.5, size=(2, 50)) - assert_(numpy.all(vals >= 0)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.poisson.rvs(0.5) - assert_(isinstance(val, int)) - val = stats.poisson(0.5).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - -class TestZipf(TestCase): - def test_rvs(self): - vals = stats.zipf.rvs(1.5, size=(2, 50)) - assert_(numpy.all(vals >= 1)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.zipf.rvs(1.5) - assert_(isinstance(val, int)) - val = stats.zipf(1.5).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - -class TestDLaplace(TestCase): - def test_rvs(self): - vals = stats.dlaplace.rvs(1.5 , size=(2, 50)) - assert_(numpy.shape(vals) == (2, 50)) - assert_(vals.dtype.char in typecodes['AllInteger']) - val = stats.dlaplace.rvs(1.5) - assert_(isinstance(val, int)) - val = stats.dlaplace(1.5).rvs(3) - assert_(isinstance(val, numpy.ndarray)) - assert_(val.dtype.char in typecodes['AllInteger']) - -def test_rvgeneric_std(): - """Regression test for #1191""" - assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487]) - -class TestRvDiscrete(TestCase): - def test_rvs(self): - states = [-1,0,1,2,3,4] - probability = [0.0,0.3,0.4,0.0,0.3,0.0] - samples = 1000 - r = stats.rv_discrete(name='sample',values=(states,probability)) - x = r.rvs(size=samples) - assert_(isinstance(x, numpy.ndarray)) - - for s,p in zip(states,probability): - assert_(abs(sum(x == s)/float(samples) - p) < 0.05) - - x = r.rvs() - assert_(isinstance(x, int)) - -class TestExpon(TestCase): - def test_zero(self): - assert_equal(stats.expon.pdf(0),1) - - def test_tail(self): # Regression test for ticket 807 - assert_equal(stats.expon.cdf(1e-18), 1e-18) - assert_equal(stats.expon.isf(stats.expon.sf(40)), 40) - -class TestGenExpon(TestCase): - def test_pdf_unity_area(self): - from scipy.integrate import simps - # PDF should integrate to one - assert_almost_equal(simps(stats.genexpon.pdf(numpy.arange(0,10,0.01), - 0.5, 0.5, 2.0), - dx=0.01), 1, 1) - - def test_cdf_bounds(self): - # CDF should always be positive - cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0) - assert_(numpy.all((0 <= cdf) & (cdf <= 1))) - -class TestExponpow(TestCase): - def test_tail(self): - assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20) - assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5) - - -class TestSkellam(TestCase): - def test_pmf(self): - #comparison to R - k = numpy.arange(-10, 15) - mu1, mu2 = 10, 5 - skpmfR = numpy.array( - [4.2254582961926893e-005, 1.1404838449648488e-004, - 2.8979625801752660e-004, 6.9177078182101231e-004, - 1.5480716105844708e-003, 3.2412274963433889e-003, - 6.3373707175123292e-003, 1.1552351566696643e-002, - 1.9606152375042644e-002, 3.0947164083410337e-002, - 4.5401737566767360e-002, 6.1894328166820688e-002, - 7.8424609500170578e-002, 9.2418812533573133e-002, - 1.0139793148019728e-001, 1.0371927988298846e-001, - 9.9076583077406091e-002, 8.8546660073089561e-002, - 7.4187842052486810e-002, 5.8392772862200251e-002, - 4.3268692953013159e-002, 3.0248159818374226e-002, - 1.9991434305603021e-002, 1.2516877303301180e-002, - 7.4389876226229707e-003]) - - assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15) - - def test_cdf(self): - #comparison to R, only 5 decimals - k = numpy.arange(-10, 15) - mu1, mu2 = 10, 5 - skcdfR = numpy.array( - [6.4061475386192104e-005, 1.7810985988267694e-004, - 4.6790611790020336e-004, 1.1596768997212152e-003, - 2.7077485103056847e-003, 5.9489760066490718e-003, - 1.2286346724161398e-002, 2.3838698290858034e-002, - 4.3444850665900668e-002, 7.4392014749310995e-002, - 1.1979375231607835e-001, 1.8168808048289900e-001, - 2.6011268998306952e-001, 3.5253150251664261e-001, - 4.5392943399683988e-001, 5.5764871387982828e-001, - 6.5672529695723436e-001, 7.4527195703032389e-001, - 8.1945979908281064e-001, 8.7785257194501087e-001, - 9.2112126489802404e-001, 9.5136942471639818e-001, - 9.7136085902200120e-001, 9.8387773632530240e-001, - 9.9131672394792536e-001]) - - assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5) - - -class TestGamma(TestCase): - - def test_pdf(self): - # a few test cases to compare with R - pdf = stats.gamma.pdf(90, 394, scale=1./5) - assert_almost_equal(pdf, 0.002312341) - - pdf = stats.gamma.pdf(3, 10, scale=1./5) - assert_almost_equal(pdf, 0.1620358) - - -class TestChi2(TestCase): - # regression tests after precision improvements, ticket:1041, not verified - def test_precision(self): - assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, 14) - assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, 14) - -class TestArrayArgument(TestCase): #test for ticket:992 - def test_noexception(self): - rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), size=(10,5)) - assert_equal(rvs.shape, (10,5)) - -class TestDocstring(TestCase): - def test_docstrings(self): - """See ticket #761""" - if stats.rayleigh.__doc__ is not None: - self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower()) - if stats.bernoulli.__doc__ is not None: - self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower()) - - def test_no_name_arg(self): - """If name is not given, construction shouldn't fail. See #1508.""" - stats.rv_continuous() - stats.rv_discrete() - - -class TestEntropy(TestCase): - def test_entropy_positive(self): - """See ticket #497""" - pk = [0.5,0.2,0.3] - qk = [0.1,0.25,0.65] - eself = stats.entropy(pk,pk) - edouble = stats.entropy(pk,qk) - assert_(0.0 == eself) - assert_(edouble >= 0.0) - -def TestArgsreduce(): - a = array([1,3,2,1,2,3,3]) - b,c = argsreduce(a > 1, a, 2) - - assert_array_equal(b, [3,2,2,3,3]) - assert_array_equal(c, [2,2,2,2,2]) - - b,c = argsreduce(2 > 1, a, 2) - assert_array_equal(b, a[0]) - assert_array_equal(c, [2]) - - b,c = argsreduce(a > 0, a, 2) - assert_array_equal(b, a) - assert_array_equal(c, [2] * numpy.size(a)) - - -class TestFitMethod(TestCase): - skip = ['ncf'] - - @dec.slow - def test_fit(self): - for func, dist, args, alpha in test_all_distributions(): - if dist in self.skip: - continue - distfunc = getattr(stats, dist) - res = distfunc.rvs(*args, **{'size':200}) - vals = distfunc.fit(res) - vals2 = distfunc.fit(res, optimizer='powell') - # Only check the length of the return - # FIXME: should check the actual results to see if we are 'close' - # to what was created --- but what is 'close' enough - if dist in ['erlang', 'frechet']: - assert_(len(vals)==len(args)) - assert_(len(vals2)==len(args)) - else: - assert_(len(vals) == 2+len(args)) - assert_(len(vals2)==2+len(args)) - - @dec.slow - def test_fix_fit(self): - for func, dist, args, alpha in test_all_distributions(): - # Not sure why 'ncf', and 'beta' are failing - # erlang and frechet have different len(args) than distfunc.numargs - if dist in self.skip + ['erlang', 'frechet', 'beta']: - continue - distfunc = getattr(stats, dist) - res = distfunc.rvs(*args, **{'size':200}) - vals = distfunc.fit(res,floc=0) - vals2 = distfunc.fit(res,fscale=1) - assert_(len(vals) == 2+len(args)) - assert_(vals[-2] == 0) - assert_(vals2[-1] == 1) - assert_(len(vals2) == 2+len(args)) - if len(args) > 0: - vals3 = distfunc.fit(res, f0=args[0]) - assert_(len(vals3) == 2+len(args)) - assert_(vals3[0] == args[0]) - if len(args) > 1: - vals4 = distfunc.fit(res, f1=args[1]) - assert_(len(vals4) == 2+len(args)) - assert_(vals4[1] == args[1]) - if len(args) > 2: - vals5 = distfunc.fit(res, f2=args[2]) - assert_(len(vals5) == 2+len(args)) - assert_(vals5[2] == args[2]) - -class TestFrozen(TestCase): - """Test that a frozen distribution gives the same results as the original object. - - Only tested for the normal distribution (with loc and scale specified) and for the - gamma distribution (with a shape parameter specified). - """ - def test_norm(self): - dist = stats.norm - frozen = stats.norm(loc=10.0, scale=3.0) - - result_f = frozen.pdf(20.0) - result = dist.pdf(20.0, loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.cdf(20.0) - result = dist.cdf(20.0, loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.ppf(0.25) - result = dist.ppf(0.25, loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.isf(0.25) - result = dist.isf(0.25, loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.sf(10.0) - result = dist.sf(10.0, loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.median() - result = dist.median(loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.mean() - result = dist.mean(loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.var() - result = dist.var(loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.std() - result = dist.std(loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.entropy() - result = dist.entropy(loc=10.0, scale=3.0) - assert_equal(result_f, result) - - result_f = frozen.moment(2) - result = dist.moment(2,loc=10.0, scale=3.0) - assert_equal(result_f, result) - - def test_gamma(self): - a = 2.0 - dist = stats.gamma - frozen = stats.gamma(a) - - result_f = frozen.pdf(20.0) - result = dist.pdf(20.0, a) - assert_equal(result_f, result) - - result_f = frozen.cdf(20.0) - result = dist.cdf(20.0, a) - assert_equal(result_f, result) - - result_f = frozen.ppf(0.25) - result = dist.ppf(0.25, a) - assert_equal(result_f, result) - - result_f = frozen.isf(0.25) - result = dist.isf(0.25, a) - assert_equal(result_f, result) - - result_f = frozen.sf(10.0) - result = dist.sf(10.0, a) - assert_equal(result_f, result) - - result_f = frozen.median() - result = dist.median(a) - assert_equal(result_f, result) - - result_f = frozen.mean() - result = dist.mean(a) - assert_equal(result_f, result) - - result_f = frozen.var() - result = dist.var(a) - assert_equal(result_f, result) - - result_f = frozen.std() - result = dist.std(a) - assert_equal(result_f, result) - - result_f = frozen.entropy() - result = dist.entropy(a) - assert_equal(result_f, result) - - result_f = frozen.moment(2) - result = dist.moment(2, a) - assert_equal(result_f, result) - - def test_regression_02(self): - """Regression test for ticket #1293.""" - # Create a frozen distribution. - frozen = stats.lognorm(1) - # Call one of its methods that does not take any keyword arguments. - m1 = frozen.moment(2) - # Now call a method that takes a keyword argument. - s = frozen.stats(moments='mvsk') - # Call moment(2) again. - # After calling stats(), the following was raising an exception. - # So this test passes if the following does not raise an exception. - m2 = frozen.moment(2) - # The following should also be true, of course. But it is not - # the focus of this test. - assert_equal(m1, m2) - -class TestExpect(TestCase): - """Test for expect method. - - Uses normal distribution and beta distribution for finite bounds, and - hypergeom for discrete distribution with finite support - - """ - def test_norm(self): - v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2) - assert_almost_equal(v, 4, decimal=14) - - m = stats.norm.expect(lambda x: (x), loc=5, scale=2) - assert_almost_equal(m, 5, decimal=14) - - lb = stats.norm.ppf(0.05, loc=5, scale=2) - ub = stats.norm.ppf(0.95, loc=5, scale=2) - prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub) - assert_almost_equal(prob90, 0.9, decimal=14) - - prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub, - conditional=True) - assert_almost_equal(prob90c, 1., decimal=14) - - def test_beta(self): - #case with finite support interval -## >>> mtrue, vtrue = stats.beta.stats(10,5, loc=5., scale=2.) -## >>> mtrue, vtrue -## (array(6.333333333333333), array(0.055555555555555552)) - v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10,5), - loc=5, scale=2) - assert_almost_equal(v, 1./18., decimal=14) - - m = stats.beta.expect(lambda x: x, args=(10,5), loc=5., scale=2.) - assert_almost_equal(m, 19/3., decimal=14) - - ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2) - lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2) - prob90 = stats.beta.expect(lambda x: 1., args=(10,10), loc=5., - scale=2.,lb=lb, ub=ub, conditional=False) - assert_almost_equal(prob90, 0.9, decimal=14) - - prob90c = stats.beta.expect(lambda x: 1, args=(10,10), loc=5, - scale=2, lb=lb, ub=ub, conditional=True) - assert_almost_equal(prob90c, 1., decimal=14) - - - def test_hypergeom(self): - #test case with finite bounds - - #without specifying bounds - m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.) - m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.) - assert_almost_equal(m, m_true, decimal=13) - - v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8), - loc=5.) - assert_almost_equal(v, v_true, decimal=14) - - #with bounds, bounds equal to shifted support - v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8), - loc=5., lb=5, ub=13) - assert_almost_equal(v_bounds, v_true, decimal=14) - - #drop boundary points - prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum() - prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), - loc=5., lb=6, ub=12) - assert_almost_equal(prob_bounds, prob_true, decimal=13) - - #conditional - prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5., - lb=6, ub=12, conditional=True) - assert_almost_equal(prob_bc, 1, decimal=14) - - #check simple integral - prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), - lb=0, ub=8) - assert_almost_equal(prob_b, 1, decimal=13) - - def test_poisson(self): - #poisson, use lower bound only - prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3, - conditional=False) - prob_b_true = 1-stats.poisson.cdf(2,2) - assert_almost_equal(prob_bounds, prob_b_true, decimal=14) - - - prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2, - conditional=True) - assert_almost_equal(prob_lb, 1, decimal=14) - - - - - -def test_regression_ticket_1316(): - """Regression test for ticket #1316.""" - # The following was raising an exception, because _construct_default_doc() - # did not handle the default keyword extradoc=None. See ticket #1316. - g = stats.distributions.gamma_gen(name='gamma') - - -def test_regression_ticket_1326(): - """Regression test for ticket #1326.""" - #adjust to avoid nan with 0*log(0) - assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14) - - -def test_regression_tukey_lambda(): - """ Make sure that Tukey-Lambda distribution correctly handles non-positive lambdas. - """ - x = np.linspace(-5.0, 5.0, 101) - - olderr = np.seterr(divide='ignore') - try: - for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]: - p = stats.tukeylambda.pdf(x, lam) - assert_((p != 0.0).all()) - assert_(~np.isnan(p).all()) - - lam = np.array([[-1.0], [0.0], [2.0]]) - p = stats.tukeylambda.pdf(x, lam) - finally: - np.seterr(**olderr) - - assert_(~np.isnan(p).all()) - assert_((p[0] != 0.0).all()) - assert_((p[1] != 0.0).all()) - assert_((p[2] != 0.0).any()) - assert_((p[2] == 0.0).any()) - - -def test_regression_ticket_1421(): - """Regression test for ticket #1421 - correction discrete docs.""" - assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__) - assert_('pmf(x,' in stats.poisson.__doc__) - -def test_nan_arguments_ticket_835(): - assert_(np.isnan(stats.t.logcdf(np.nan))) - assert_(np.isnan(stats.t.cdf(np.nan))) - assert_(np.isnan(stats.t.logsf(np.nan))) - assert_(np.isnan(stats.t.sf(np.nan))) - assert_(np.isnan(stats.t.pdf(np.nan))) - assert_(np.isnan(stats.t.logpdf(np.nan))) - assert_(np.isnan(stats.t.ppf(np.nan))) - assert_(np.isnan(stats.t.isf(np.nan))) - - assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5))) - assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5))) - assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5))) - assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5))) - assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5))) - assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5))) - assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5))) - assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5))) - - - -def test_frozen_fit_ticket_1536(): - np.random.seed(5678) - true = np.array([0.25, 0., 0.5]) - x = stats.lognorm.rvs(true[0], true[1], true[2], size=100) - - olderr = np.seterr(divide='ignore') - try: - params = np.array(stats.lognorm.fit(x, floc=0.)) - finally: - np.seterr(**olderr) - - assert_almost_equal(params, true, decimal=2) - - params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0)) - assert_almost_equal(params, true, decimal=2) - - params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0)) - assert_almost_equal(params, true, decimal=2) - - params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0)) - assert_almost_equal(params, true, decimal=2) - - np.random.seed(5678) - loc = 1 - floc = 0.9 - x = stats.norm.rvs(loc, 2., size=100) - params = np.array(stats.norm.fit(x, floc=floc)) - expected = np.array([floc, np.sqrt(((x-floc)**2).mean())]) - assert_almost_equal(params, expected, decimal=4) - -def test_regression_ticket_1530(): - """Check the starting value works for Cauchy distribution fit.""" - np.random.seed(654321) - rvs = stats.cauchy.rvs(size=100) - params = stats.cauchy.fit(rvs) - expected = (0.045, 1.142) - assert_almost_equal(params, expected, decimal=1) - - -if __name__ == "__main__": - run_module_suite() - diff --git a/scipy-0.10.1/scipy/stats/tests/test_fit.py b/scipy-0.10.1/scipy/stats/tests/test_fit.py deleted file mode 100644 index 2f7e8acb6d..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_fit.py +++ /dev/null @@ -1,70 +0,0 @@ -# NOTE: contains only one test, _est_cont_fit, that is renamed so that -# nose doesn't run it -# I put this here for the record and for the case when someone wants to -# verify the quality of fit -# with current parameters: relatively small sample size, default starting values -# Ran 84 tests in 401.797s -# FAILED (failures=15) - - -import numpy.testing as npt -import numpy as np - -from scipy import stats - -from test_continuous_basic import distcont - -# this is not a proper statistical test for convergence, but only -# verifies that the estimate and true values don't differ by too much -n_repl1 = 1000 # sample size for first run -n_repl2 = 5000 # sample size for second run, if first run fails -thresh_percent = 0.25 # percent of true parameters for fail cut-off -thresh_min = 0.75 # minimum difference estimate - true to fail test - -#distcont = [['genextreme', (3.3184017469423535,)]] - -def _est_cont_fit(): - # this tests the closeness of the estimated parameters to the true - # parameters with fit method of continuous distributions - # Note: is slow, some distributions don't converge with sample size <= 10000 - - for distname, arg in distcont: - yield check_cont_fit, distname,arg - - -def check_cont_fit(distname,arg): - distfn = getattr(stats, distname) - rvs = distfn.rvs(size=n_repl1,*arg) - est = distfn.fit(rvs) #,*arg) # start with default values - - truearg = np.hstack([arg,[0.0,1.0]]) - diff = est-truearg - - txt = '' - diffthreshold = np.max(np.vstack([truearg*thresh_percent, - np.ones(distfn.numargs+2)*thresh_min]),0) - # threshold for location - diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min]) - - if np.any(np.isnan(est)): - raise AssertionError('nan returned in fit') - else: - if np.any((np.abs(diff) - diffthreshold) > 0.0): -## txt = 'WARNING - diff too large with small sample' -## print 'parameter diff =', diff - diffthreshold, txt - rvs = np.concatenate([rvs,distfn.rvs(size=n_repl2-n_repl1,*arg)]) - est = distfn.fit(rvs) #,*arg) - truearg = np.hstack([arg,[0.0,1.0]]) - diff = est-truearg - if np.any((np.abs(diff) - diffthreshold) > 0.0): - txt = 'parameter: %s\n' % str(truearg) - txt += 'estimated: %s\n' % str(est) - txt += 'diff : %s\n' % str(diff) - raise AssertionError('fit not very good in %s\n' % distfn.name + txt) - - - -if __name__ == "__main__": - import nose - #nose.run(argv=['', __file__]) - nose.runmodule(argv=[__file__,'-s'], exit=False) diff --git a/scipy-0.10.1/scipy/stats/tests/test_kdeoth.py b/scipy-0.10.1/scipy/stats/tests/test_kdeoth.py deleted file mode 100644 index 1d8975d0a9..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_kdeoth.py +++ /dev/null @@ -1,36 +0,0 @@ - - - -from scipy import stats -import numpy as np -from numpy.testing import assert_almost_equal, assert_ - -def test_kde_1d(): - #some basic tests comparing to normal distribution - np.random.seed(8765678) - n_basesample = 500 - xn = np.random.randn(n_basesample) - xnmean = xn.mean() - xnstd = xn.std(ddof=1) - - # get kde for original sample - gkde = stats.gaussian_kde(xn) - - # evaluate the density funtion for the kde for some points - xs = np.linspace(-7,7,501) - kdepdf = gkde.evaluate(xs) - normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd) - intervall = xs[1] - xs[0] - - assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01) - prob1 = gkde.integrate_box_1d(xnmean, np.inf) - prob2 = gkde.integrate_box_1d(-np.inf, xnmean) - assert_almost_equal(prob1, 0.5, decimal=1) - assert_almost_equal(prob2, 0.5, decimal=1) - assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13) - assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13) - - assert_almost_equal(gkde.integrate_kde(gkde), - (kdepdf**2).sum()*intervall, decimal=2) - assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2), - (kdepdf*normpdf).sum()*intervall, decimal=2) diff --git a/scipy-0.10.1/scipy/stats/tests/test_morestats.py b/scipy-0.10.1/scipy/stats/tests/test_morestats.py deleted file mode 100644 index c468302864..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_morestats.py +++ /dev/null @@ -1,305 +0,0 @@ -# Author: Travis Oliphant, 2002 -# -# Further enhancements and tests added by numerous SciPy developers. -# - -import warnings - -from numpy.testing import TestCase, run_module_suite, assert_array_equal, \ - assert_almost_equal, assert_array_less, assert_array_almost_equal, \ - assert_raises, assert_ - -import scipy.stats as stats - -import numpy as np -from numpy.random import RandomState - - -g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000] -g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988] -g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996] -g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996] -g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996] -g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996] -g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002] -g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006] -g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991] -g10= [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997] - - -class TestShapiro(TestCase): - def test_basic(self): - x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46, - 4.43,0.21,4.75,0.71,1.52,3.24, - 0.93,0.42,4.97,9.53,4.55,0.47,6.66] - w,pw = stats.shapiro(x1) - assert_almost_equal(w,0.90047299861907959,6) - assert_almost_equal(pw,0.042089745402336121,6) - x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11, - 3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69, - 0.08,3.67,2.81,3.49] - w,pw = stats.shapiro(x2) - assert_almost_equal(w,0.9590270,6) - assert_almost_equal(pw,0.52460,3) - - def test_bad_arg(self): - # Length of x is less than 3. - x = [1] - assert_raises(ValueError, stats.shapiro, x) - - -class TestAnderson(TestCase): - def test_normal(self): - rs = RandomState(1234567890) - x1 = rs.standard_exponential(size=50) - x2 = rs.standard_normal(size=50) - A,crit,sig = stats.anderson(x1) - assert_array_less(crit[:-1], A) - A,crit,sig = stats.anderson(x2) - assert_array_less(A, crit[-2:]) - - def test_expon(self): - rs = RandomState(1234567890) - x1 = rs.standard_exponential(size=50) - x2 = rs.standard_normal(size=50) - A,crit,sig = stats.anderson(x1,'expon') - assert_array_less(A, crit[-2:]) - olderr = np.seterr(all='ignore') - try: - A,crit,sig = stats.anderson(x2,'expon') - finally: - np.seterr(**olderr) - assert_(A > crit[-1]) - - def test_bad_arg(self): - assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp') - - -class TestAnsari(TestCase): - - def test_small(self): - x = [1,2,3,3,4] - y = [3,2,6,1,6,1,4,1] - W, pval = stats.ansari(x,y) - assert_almost_equal(W,23.5,11) - assert_almost_equal(pval,0.13499256881897437,11) - - def test_approx(self): - ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99, - 101, 96, 97, 102, 107, 113, 116, 113, 110, 98)) - parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104, - 100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99)) - W, pval = stats.ansari(ramsay, parekh) - assert_almost_equal(W,185.5,11) - assert_almost_equal(pval,0.18145819972867083,11) - - def test_exact(self): - W,pval = stats.ansari([1,2,3,4],[15,5,20,8,10,12]) - assert_almost_equal(W,10.0,11) - assert_almost_equal(pval,0.533333333333333333,7) - - def test_bad_arg(self): - assert_raises(ValueError, stats.ansari, [], [1]) - assert_raises(ValueError, stats.ansari, [1], []) - -warnings.filterwarnings('ignore', - message="Ties preclude use of exact statistic.") - - -class TestBartlett(TestCase): - - def test_data(self): - args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] - T, pval = stats.bartlett(*args) - assert_almost_equal(T,20.78587342806484,7) - assert_almost_equal(pval,0.0136358632781,7) - - def test_bad_arg(self): - """Too few args raises ValueError.""" - assert_raises(ValueError, stats.bartlett, [1]) - - -class TestLevene(TestCase): - - def test_data(self): - args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] - W, pval = stats.levene(*args) - assert_almost_equal(W,1.7059176930008939,7) - assert_almost_equal(pval,0.0990829755522,7) - - def test_trimmed1(self): - """Test that center='trimmed' gives the same result as center='mean' when proportiontocut=0.""" - W1, pval1 = stats.levene(g1, g2, g3, center='mean') - W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0) - assert_almost_equal(W1, W2) - assert_almost_equal(pval1, pval2) - - def test_trimmed2(self): - x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] - y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] - # Use center='trimmed' - W1, pval1 = stats.levene(x, y, center='trimmed', proportiontocut=0.125) - # Trim the data here, and use center='mean' - W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean') - # Result should be the same. - assert_almost_equal(W1, W2) - assert_almost_equal(pval1, pval2) - - def test_equal_mean_median(self): - x = np.linspace(-1,1,21) - y = x**3 - W1, pval1 = stats.levene(x, y, center='mean') - W2, pval2 = stats.levene(x, y, center='median') - assert_almost_equal(W1, W2) - assert_almost_equal(pval1, pval2) - - def test_bad_keyword(self): - x = np.linspace(-1,1,21) - assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1) - - def test_bad_center_value(self): - x = np.linspace(-1,1,21) - assert_raises(ValueError, stats.levene, x, x, center='trim') - - def test_too_few_args(self): - assert_raises(ValueError, stats.levene, [1]) - - -class TestBinomP(TestCase): - - def test_data(self): - pval = stats.binom_test(100,250) - assert_almost_equal(pval,0.0018833009350757682,11) - pval = stats.binom_test(201,405) - assert_almost_equal(pval,0.92085205962670713,11) - pval = stats.binom_test([682,243],p=3.0/4) - assert_almost_equal(pval,0.38249155957481695,11) - - def test_bad_len_x(self): - """Length of x must be 1 or 2.""" - assert_raises(ValueError, stats.binom_test, [1,2,3]) - - def test_bad_n(self): - """len(x) is 1, but n is invalid.""" - # Missing n - assert_raises(ValueError, stats.binom_test, [100]) - # n less than x[0] - assert_raises(ValueError, stats.binom_test, [100], n=50) - - def test_bad_p(self): - assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0) - - -class TestFindRepeats(TestCase): - def test_basic(self): - a = [1,2,3,4,1,2,3,4,1,2,5] - res,nums = stats.find_repeats(a) - assert_array_equal(res,[1,2,3,4]) - assert_array_equal(nums,[3,3,2,2]) - - -class TestFligner(TestCase): - - def test_data(self): - # numbers from R: fligner.test in package stats - x1 = np.arange(5) - assert_array_almost_equal(stats.fligner(x1,x1**2), - (3.2282229927203536, 0.072379187848207877), 11) - - def test_trimmed1(self): - """Test that center='trimmed' gives the same result as center='mean' when proportiontocut=0.""" - Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean') - Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0) - assert_almost_equal(Xsq1, Xsq2) - assert_almost_equal(pval1, pval2) - - def test_trimmed2(self): - x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] - y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] - # Use center='trimmed' - Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125) - # Trim the data here, and use center='mean' - Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean') - # Result should be the same. - assert_almost_equal(Xsq1, Xsq2) - assert_almost_equal(pval1, pval2) - - # The following test looks reasonable at first, but fligner() uses the - # function stats.rankdata(), and in one of the cases in this test, - # there are ties, while in the other (because of normal rounding - # errors) there are not. This difference leads to differences in the - # third significant digit of W. - # - #def test_equal_mean_median(self): - # x = np.linspace(-1,1,21) - # y = x**3 - # W1, pval1 = stats.fligner(x, y, center='mean') - # W2, pval2 = stats.fligner(x, y, center='median') - # assert_almost_equal(W1, W2) - # assert_almost_equal(pval1, pval2) - - def test_bad_keyword(self): - x = np.linspace(-1,1,21) - assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1) - - def test_bad_center_value(self): - x = np.linspace(-1,1,21) - assert_raises(ValueError, stats.fligner, x, x, center='trim') - - def test_bad_num_args(self): - """Too few args raises ValueError.""" - assert_raises(ValueError, stats.fligner, [1]) - - -def test_mood(): - # numbers from R: mood.test in package stats - x1 = np.arange(5) - assert_array_almost_equal(stats.mood(x1,x1**2), - (-1.3830857299399906, 0.16663858066771478), 11) - -def test_mood_bad_arg(): - """Raise ValueError when the sum of the lengths of the args is less than 3.""" - assert_raises(ValueError, stats.mood, [1], []) - -def test_oneway_bad_arg(): - """Raise ValueError is fewer than two args are given.""" - assert_raises(ValueError, stats.oneway, [1]) - -def test_wilcoxon_bad_arg(): - """Raise ValueError when two args of different lengths are given.""" - assert_raises(ValueError, stats.wilcoxon, [1], [1,2]) - -def test_mvsdist_bad_arg(): - """Raise ValueError if fewer than two data points are given.""" - data = [1] - assert_raises(ValueError, stats.mvsdist, data) - -def test_kstat_bad_arg(): - """Raise ValueError if n > 4 or n > 1.""" - data = [1] - n = 10 - assert_raises(ValueError, stats.kstat, data, n=n) - -def test_kstatvar_bad_arg(): - """Raise ValueError is n is not 1 or 2.""" - data = [1] - n = 10 - assert_raises(ValueError, stats.kstatvar, data, n=n) - -def test_probplot_bad_arg(): - """Raise ValueError when given an invalid distribution.""" - data = [1] - assert_raises(ValueError, stats.probplot, data, dist="plate_of_shrimp") - -def test_ppcc_max_bad_arg(): - """Raise ValueError when given an invalid distribution.""" - data = [1] - assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp") - -def test_boxcox_bad_arg(): - """Raise ValueError if any data value is negative.""" - x = np.array([-1]) - assert_raises(ValueError, stats.boxcox, x) - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/stats/tests/test_mstats_basic.py b/scipy-0.10.1/scipy/stats/tests/test_mstats_basic.py deleted file mode 100644 index 3351b27c9b..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_mstats_basic.py +++ /dev/null @@ -1,489 +0,0 @@ -""" -Tests for the stats.mstats module (support for maskd arrays) -""" - - -import numpy as np -from numpy import nan -import numpy.ma as ma -from numpy.ma import masked, nomask - -import scipy.stats.mstats as mstats -from numpy.testing import TestCase, run_module_suite -from numpy.ma.testutils import assert_equal, assert_almost_equal, \ - assert_array_almost_equal, assert_ - - -class TestMquantiles(TestCase): - """Regression tests for mstats module.""" - def test_mquantiles_limit_keyword(self): - """Ticket #867""" - data = np.array([[ 6., 7., 1.], - [ 47., 15., 2.], - [ 49., 36., 3.], - [ 15., 39., 4.], - [ 42., 40., -999.], - [ 41., 41., -999.], - [ 7., -999., -999.], - [ 39., -999., -999.], - [ 43., -999., -999.], - [ 40., -999., -999.], - [ 36., -999., -999.]]) - desired = [[19.2, 14.6, 1.45], - [40.0, 37.5, 2.5 ], - [42.8, 40.05, 3.55]] - quants = mstats.mquantiles(data, axis=0, limit=(0, 50)) - assert_almost_equal(quants, desired) - - - -class TestGMean(TestCase): - def test_1D(self): - a = (1,2,3,4) - actual= mstats.gmean(a) - desired = np.power(1*2*3*4,1./4.) - assert_almost_equal(actual, desired,decimal=14) - - desired1 = mstats.gmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - assert_(not isinstance(desired1, ma.MaskedArray)) - # - a = ma.array((1,2,3,4),mask=(0,0,0,1)) - actual= mstats.gmean(a) - desired = np.power(1*2*3,1./3.) - assert_almost_equal(actual, desired,decimal=14) - - desired1 = mstats.gmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - # - def test_2D(self): - a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)), - mask=((0,0,0,0),(1,0,0,1),(0,1,1,0))) - actual= mstats.gmean(a) - desired = np.array((1,2,3,4)) - assert_array_almost_equal(actual, desired, decimal=14) - # - desired1 = mstats.gmean(a,axis=0) - assert_array_almost_equal(actual, desired1, decimal=14) - # - actual= mstats.gmean(a, -1) - desired = ma.array((np.power(1*2*3*4,1./4.), - np.power(2*3,1./2.), - np.power(1*4,1./2.))) - assert_array_almost_equal(actual, desired, decimal=14) - -class TestHMean(TestCase): - def test_1D(self): - a = (1,2,3,4) - actual= mstats.hmean(a) - desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) - assert_almost_equal(actual, desired, decimal=14) - desired1 = mstats.hmean(ma.array(a),axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - # - a = ma.array((1,2,3,4),mask=(0,0,0,1)) - actual= mstats.hmean(a) - desired = 3. / (1./1 + 1./2 + 1./3) - assert_almost_equal(actual, desired,decimal=14) - desired1 = mstats.hmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - - def test_2D(self): - a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)), - mask=((0,0,0,0),(1,0,0,1),(0,1,1,0))) - actual= mstats.hmean(a) - desired = ma.array((1,2,3,4)) - assert_array_almost_equal(actual, desired, decimal=14) - # - actual1 = mstats.hmean(a,axis=-1) - desired = (4./(1/1.+1/2.+1/3.+1/4.), - 2./(1/2.+1/3.), - 2./(1/1.+1/4.) - ) - assert_array_almost_equal(actual1, desired, decimal=14) - - -class TestRanking(TestCase): - # - def __init__(self, *args, **kwargs): - TestCase.__init__(self, *args, **kwargs) - # - def test_ranking(self): - x = ma.array([0,1,1,1,2,3,4,5,5,6,]) - assert_almost_equal(mstats.rankdata(x),[1,3,3,3,5,6,7,8.5,8.5,10]) - x[[3,4]] = masked - assert_almost_equal(mstats.rankdata(x),[1,2.5,2.5,0,0,4,5,6.5,6.5,8]) - assert_almost_equal(mstats.rankdata(x,use_missing=True), - [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8]) - x = ma.array([0,1,5,1,2,4,3,5,1,6,]) - assert_almost_equal(mstats.rankdata(x),[1,3,8.5,3,5,7,6,8.5,3,10]) - x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]]) - assert_almost_equal(mstats.rankdata(x),[[1,3,3,3,5],[6,7,8.5,8.5,10]]) - assert_almost_equal(mstats.rankdata(x,axis=1),[[1,3,3,3,5],[1,2,3.5,3.5,5]]) - assert_almost_equal(mstats.rankdata(x,axis=0),[[1,1,1,1,1],[2,2,2,2,2,]]) - - -class TestCorr(TestCase): - # - def test_pearsonr(self): - "Tests some computations of Pearson's r" - x = ma.arange(10) - olderr = np.seterr(all='ignore') - try: - assert_almost_equal(mstats.pearsonr(x,x)[0], 1.0) - assert_almost_equal(mstats.pearsonr(x,x[::-1])[0], -1.0) - - x = ma.array(x, mask=True) - pr = mstats.pearsonr(x,x) - finally: - np.seterr(**olderr) - assert_(pr[0] is masked) - assert_(pr[1] is masked) - # - def test_spearmanr(self): - "Tests some computations of Spearman's rho" - (x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95]) - assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555) - (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan]) - (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) - assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555) - # - x = [ 2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, - 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7] - y = [22.6, 08.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, - 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4] - assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299) - x = [ 2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, - 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan] - y = [22.6, 08.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, - 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan] - (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) - assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299) - # - def test_kendalltau(self): - "Tests some computations of Kendall's tau" - x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan]) - y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan]) - z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan]) - assert_almost_equal(np.asarray(mstats.kendalltau(x,y)), - [+0.3333333,0.4969059]) - assert_almost_equal(np.asarray(mstats.kendalltau(x,z)), - [-0.5477226,0.2785987]) - # - x = ma.fix_invalid([ 0, 0, 0, 0,20,20, 0,60, 0,20, - 10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan]) - y = ma.fix_invalid([ 0,80,80,80,10,33,60, 0,67,27, - 25,80,80,80,80,80,80, 0,10,45, np.nan, 0]) - result = mstats.kendalltau(x,y) - assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009]) - # - def test_kendalltau_seasonal(self): - "Tests the seasonal Kendall tau." - x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], - [ 4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], - [ 3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], - [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] - x = ma.fix_invalid(x).T - output = mstats.kendalltau_seasonal(x) - assert_almost_equal(output['global p-value (indep)'], 0.008, 3) - assert_almost_equal(output['seasonal p-value'].round(2), - [0.18,0.53,0.20,0.04]) - # - def test_pointbiserial(self): - "Tests point biserial" - x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0, - 0,0,0,0,1,-1] - y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0, - 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1, - 0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan] - assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5) - - -class TestTrimming(TestCase): - # - def test_trim(self): - "Tests trimming" - a = ma.arange(10) - assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9]) - a = ma.arange(10) - assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None]) - a = ma.arange(10) - assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)), - [None,None,None,3,4,5,6,7,None,None]) - a = ma.arange(10) - assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True), - [None,1,2,3,4,5,6,7,None,None]) - # - a = ma.arange(12) - a[[0,-1]] = a[5] = masked - assert_equal(mstats.trim(a,(2,8)), - [None,None,2,3,4,None,6,7,8,None,None,None]) - # - x = ma.arange(100).reshape(10,10) - trimx = mstats.trim(x,(0.1,0.2),relative=True,axis=None) - assert_equal(trimx._mask.ravel(),[1]*10+[0]*70+[1]*20) - trimx = mstats.trim(x,(0.1,0.2),relative=True,axis=0) - assert_equal(trimx._mask.ravel(),[1]*10+[0]*70+[1]*20) - trimx = mstats.trim(x,(0.1,0.2),relative=True,axis=-1) - assert_equal(trimx._mask.T.ravel(),[1]*10+[0]*70+[1]*20) - # - x = ma.arange(110).reshape(11,10) - x[1] = masked - trimx = mstats.trim(x,(0.1,0.2),relative=True,axis=None) - assert_equal(trimx._mask.ravel(),[1]*20+[0]*70+[1]*20) - trimx = mstats.trim(x,(0.1,0.2),relative=True,axis=0) - assert_equal(trimx._mask.ravel(),[1]*20+[0]*70+[1]*20) - trimx = mstats.trim(x.T,(0.1,0.2),relative=True,axis=-1) - assert_equal(trimx.T._mask.ravel(),[1]*20+[0]*70+[1]*20) - # - def test_trim_old(self): - "Tests trimming." - x = ma.arange(100) - assert_equal(mstats.trimboth(x).count(), 60) - assert_equal(mstats.trimtail(x,tail='r').count(), 80) - x[50:70] = masked - trimx = mstats.trimboth(x) - assert_equal(trimx.count(), 48) - assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16) - x._mask = nomask - x.shape = (10,10) - assert_equal(mstats.trimboth(x).count(), 60) - assert_equal(mstats.trimtail(x).count(), 80) - # - def test_trimmedmean(self): - "Tests the trimmed mean." - data = ma.array([ 77, 87, 88,114,151,210,219,246,253,262, - 296,299,306,376,428,515,666,1310,2611]) - assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0) - assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0) - assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0) - # - def test_trimmed_stde(self): - "Tests the trimmed mean standard error." - data = ma.array([ 77, 87, 88,114,151,210,219,246,253,262, - 296,299,306,376,428,515,666,1310,2611]) - assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5) - assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5) - # - def test_winsorization(self): - "Tests the Winsorization of the data." - data = ma.array([ 77, 87, 88,114,151,210,219,246,253,262, - 296,299,306,376,428,515,666,1310,2611]) - assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1), - 21551.4, 1) - data[5] = masked - winsorized = mstats.winsorize(data) - assert_equal(winsorized.mask, data.mask) - - -class TestMoments(TestCase): - """ - Comparison numbers are found using R v.1.5.1 - note that length(testcase) = 4 - testmathworks comes from documentation for the - Statistics Toolbox for Matlab and can be found at both - http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml - http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml - Note that both test cases came from here. - """ - testcase = [1,2,3,4] - testmathworks = ma.fix_invalid([1.165 , 0.6268, 0.0751, 0.3516, -0.6965, - np.nan]) - def test_moment(self): - """ - mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))""" - y = mstats.moment(self.testcase,1) - assert_almost_equal(y,0.0,10) - y = mstats.moment(self.testcase,2) - assert_almost_equal(y,1.25) - y = mstats.moment(self.testcase,3) - assert_almost_equal(y,0.0) - y = mstats.moment(self.testcase,4) - assert_almost_equal(y,2.5625) - def test_variation(self): - """variation = samplestd/mean """ -## y = stats.variation(self.shoes[0]) -## assert_almost_equal(y,21.8770668) - y = mstats.variation(self.testcase) - assert_almost_equal(y,0.44721359549996, 10) - - def test_skewness(self): - """ - sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0)/((sqrt(var(testmathworks)*4/5))**3)/5 - """ - y = mstats.skew(self.testmathworks) - assert_almost_equal(y,-0.29322304336607,10) - y = mstats.skew(self.testmathworks,bias=0) - assert_almost_equal(y,-0.437111105023940,10) - y = mstats.skew(self.testcase) - assert_almost_equal(y,0.0,10) - - def test_kurtosis(self): - """ - sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4 - sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5 - Set flags for axis = 0 and - fisher=0 (Pearson's definition of kurtosis for compatibility with Matlab) - """ - y = mstats.kurtosis(self.testmathworks,0,fisher=0,bias=1) - assert_almost_equal(y, 2.1658856802973,10) - # Note that MATLAB has confusing docs for the following case - # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness - # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3) - # The MATLAB docs imply that both should give Fisher's - y = mstats.kurtosis(self.testmathworks,fisher=0,bias=0) - assert_almost_equal(y, 3.663542721189047,10) - y = mstats.kurtosis(self.testcase,0,0) - assert_almost_equal(y,1.64) - # - def test_mode(self): - "Tests the mode" - # - a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7] - a2 = np.reshape(a1, (3,5)) - ma1 = ma.masked_where(ma.array(a1)>2,a1) - ma2 = ma.masked_where(a2>2, a2) - assert_equal(mstats.mode(a1, axis=None), (3,4)) - assert_equal(mstats.mode(ma1, axis=None), (0,3)) - assert_equal(mstats.mode(a2, axis=None), (3,4)) - assert_equal(mstats.mode(ma2, axis=None), (0,3)) - assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]],[[1,1,1,1,1]])) - assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]],[[1,1,1,1,1]])) - assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]])) - assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]])) - - -class TestPercentile(TestCase): - def setUp(self): - self.a1 = [3,4,5,10,-3,-5,6] - self.a2 = [3,-6,-2,8,7,4,2,1] - self.a3 = [3.,4,5,10,-3,-5,-6,7.0] - - def test_percentile(self): - x = np.arange(8) * 0.5 - assert_equal(mstats.scoreatpercentile(x, 0), 0.) - assert_equal(mstats.scoreatpercentile(x, 100), 3.5) - assert_equal(mstats.scoreatpercentile(x, 50), 1.75) - - def test_2D(self): - x = ma.array([[1, 1, 1], - [1, 1, 1], - [4, 4, 3], - [1, 1, 1], - [1, 1, 1]]) - assert_equal(mstats.scoreatpercentile(x,50), [1,1,1]) - - -class TestVariability(TestCase): - """ Comparison numbers are found using R v.1.5.1 - note that length(testcase) = 4 - """ - testcase = ma.fix_invalid([1,2,3,4,np.nan]) - - def test_signaltonoise(self): - """ - this is not in R, so used - mean(testcase,axis=0)/(sqrt(var(testcase)*3/4)) """ - #y = stats.signaltonoise(self.shoes[0]) - #assert_approx_equal(y,4.5709967) - y = mstats.signaltonoise(self.testcase) - assert_almost_equal(y,2.236067977) - - def test_sem(self): - """ - this is not in R, so used - sqrt(var(testcase)*3/4)/sqrt(3) - """ - #y = stats.sem(self.shoes[0]) - #assert_approx_equal(y,0.775177399) - y = mstats.sem(self.testcase) - assert_almost_equal(y,0.6454972244) - - def test_zmap(self): - """ - not in R, so tested by using - (testcase[i]-mean(testcase,axis=0))/sqrt(var(testcase)*3/4) - """ - y = mstats.zmap(self.testcase, self.testcase) - desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996 , - 0.44721359549996 , 1.3416407864999]) - assert_array_almost_equal(desired_unmaskedvals, - y.data[y.mask==False], decimal=12) - - def test_zscore(self): - """ - not in R, so tested by using - (testcase[i]-mean(testcase,axis=0))/sqrt(var(testcase)*3/4) - """ - y = mstats.zscore(self.testcase) - desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996 , - 0.44721359549996 , 1.3416407864999, np.nan]) - assert_almost_equal(desired, y, decimal=12) - - -class TestMisc(TestCase): - # - def test_obrientransform(self): - "Tests Obrien transform" - args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2, - [6]+[7]*2+[8]*4+[9]*9+[10]*16] - result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538], - [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]] - assert_almost_equal(np.round(mstats.obrientransform(*args).T,4), - result,4) - # - def test_kstwosamp(self): - "Tests the Kolmogorov-Smirnov 2 samples test" - x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], - [ 4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], - [ 3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], - [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] - x = ma.fix_invalid(x).T - (winter,spring,summer,fall) = x.T - # - assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4), - (0.1818,0.9892)) - assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4), - (0.1469,0.7734)) - assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4), - (0.1818,0.6744)) - # - def test_friedmanchisq(self): - "Tests the Friedman Chi-square test" - # No missing values - args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0], - [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0], - [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0]) - result = mstats.friedmanchisquare(*args) - assert_almost_equal(result[0], 10.4737, 4) - assert_almost_equal(result[1], 0.005317, 6) - # Missing values - x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], - [ 4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], - [ 3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], - [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] - x = ma.fix_invalid(x) - result = mstats.friedmanchisquare(*x) - assert_almost_equal(result[0], 2.0156, 4) - assert_almost_equal(result[1], 0.5692, 4) - - -def test_regress_simple(): - """Regress a line with sinusoidal noise. Test for #1273.""" - x = np.linspace(0, 100, 100) - y = 0.2 * np.linspace(0, 100, 100) + 10 - y += np.sin(np.linspace(0, 20, 100)) - - slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y) - assert_almost_equal(slope, 0.19644990055858422) - assert_almost_equal(intercept, 10.211269918932341) - - -def test_plotting_positions(): - """Regression test for #1256""" - pos = mstats.plotting_positions(np.arange(3), 0, 0) - assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75])) - - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/stats/tests/test_mstats_extras.py b/scipy-0.10.1/scipy/stats/tests/test_mstats_extras.py deleted file mode 100644 index 2fe5d72358..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_mstats_extras.py +++ /dev/null @@ -1,103 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for maskedArray statistics. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -""" -__author__ = "Pierre GF Gerard-Marchant ($Author: backtopop $)" - -import numpy as np - -import numpy.ma as ma - -import scipy.stats.mstats as ms -#import scipy.stats.mmorestats as mms - -from numpy.testing import TestCase, run_module_suite, assert_equal, \ - assert_almost_equal, assert_ - - -class TestMisc(TestCase): - # - def __init__(self, *args, **kwargs): - TestCase.__init__(self, *args, **kwargs) - # - def test_mjci(self): - "Tests the Marits-Jarrett estimator" - data = ma.array([ 77, 87, 88,114,151,210,219,246,253,262, - 296,299,306,376,428,515,666,1310,2611]) - assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5) - # - def test_trimmedmeanci(self): - "Tests the confidence intervals of the trimmed mean." - data = ma.array([545,555,558,572,575,576,578,580, - 594,605,635,651,653,661,666]) - assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1) - assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1), - [561.8, 630.6]) - # - def test_idealfourths(self): - "Tests ideal-fourths" - test = np.arange(100) - assert_almost_equal(np.asarray(ms.idealfourths(test)), - [24.416667,74.583333],6) - test_2D = test.repeat(3).reshape(-1,3) - assert_almost_equal(ms.idealfourths(test_2D, axis=0), - [[24.416667,24.416667,24.416667], - [74.583333,74.583333,74.583333]],6) - assert_almost_equal(ms.idealfourths(test_2D, axis=1), - test.repeat(2).reshape(-1,2)) - test = [0,0] - _result = ms.idealfourths(test) - assert_(np.isnan(_result).all()) - -#.............................................................................. -class TestQuantiles(TestCase): - # - def __init__(self, *args, **kwargs): - TestCase.__init__(self, *args, **kwargs) - # - def test_hdquantiles(self): - data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014, - 0.887764025,0.239407086,0.349638551,0.972791145,0.149789972, - 0.936947700,0.132359948,0.046041972,0.641675031,0.945530547, - 0.224218684,0.771450991,0.820257774,0.336458052,0.589113496, - 0.509736129,0.696838829,0.491323573,0.622767425,0.775189248, - 0.641461450,0.118455200,0.773029450,0.319280007,0.752229111, - 0.047841438,0.466295911,0.583850781,0.840581845,0.550086491, - 0.466470062,0.504765074,0.226855960,0.362641207,0.891620942, - 0.127898691,0.490094097,0.044882048,0.041441695,0.317976349, - 0.504135618,0.567353033,0.434617473,0.636243375,0.231803616, - 0.230154113,0.160011327,0.819464108,0.854706985,0.438809221, - 0.487427267,0.786907310,0.408367937,0.405534192,0.250444460, - 0.995309248,0.144389588,0.739947527,0.953543606,0.680051621, - 0.388382017,0.863530727,0.006514031,0.118007779,0.924024803, - 0.384236354,0.893687694,0.626534881,0.473051932,0.750134705, - 0.241843555,0.432947602,0.689538104,0.136934797,0.150206859, - 0.474335206,0.907775349,0.525869295,0.189184225,0.854284286, - 0.831089744,0.251637345,0.587038213,0.254475554,0.237781276, - 0.827928620,0.480283781,0.594514455,0.213641488,0.024194386, - 0.536668589,0.699497811,0.892804071,0.093835427,0.731107772] - # - assert_almost_equal(ms.hdquantiles(data,[0., 1.]), - [0.006514031, 0.995309248]) - hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75]) - assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,]) - hdq = ms.hdquantiles_sd(data,[0.25, 0.5, 0.75]) - assert_almost_equal(hdq, [0.03786954, 0.03805389, 0.03800152,], 4) - # - data = np.array(data).reshape(10,10) - hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0) - assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75])) - assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75])) - hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True) - assert_almost_equal(hdq[...,0], - ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True)) - assert_almost_equal(hdq[...,-1], - ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True)) - - -############################################################################### - -if __name__ == "__main__": - run_module_suite() diff --git a/scipy-0.10.1/scipy/stats/tests/test_stats.py b/scipy-0.10.1/scipy/stats/tests/test_stats.py deleted file mode 100644 index f21994af56..0000000000 --- a/scipy-0.10.1/scipy/stats/tests/test_stats.py +++ /dev/null @@ -1,1964 +0,0 @@ -""" Test functions for stats module - - WRITTEN BY LOUIS LUANGKESORN FOR THE STATS MODULE - BASED ON WILKINSON'S STATISTICS QUIZ - http://www.stanford.edu/~clint/bench/wilk.txt - - Additional tests by a host of SciPy developers. -""" - -from numpy.testing import TestCase, rand, assert_, assert_equal, \ - assert_almost_equal, assert_array_almost_equal, assert_array_equal, \ - assert_approx_equal, assert_raises, run_module_suite, \ - assert_allclose, dec -from numpy import array, arange, zeros, ravel, float32, float64, power -import numpy as np -import sys - -import scipy.stats as stats - - -""" Numbers in docstrings begining with 'W' refer to the section numbers - and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are - considered to be essential functionality. True testing and - evaluation of a statistics package requires use of the - NIST Statistical test data. See McCoullough(1999) Assessing The Reliability - of Statistical Software for a test methodology and its - implementation in testing SAS, SPSS, and S-Plus -""" - -## Datasets -## These data sets are from the nasty.dat sets used by Wilkinson -## for MISS, need to be able to represent missing values -## For completeness, I should write the relevant tests and count them as failures -## Somewhat acceptable, since this is still beta software. It would count as a -## good target for 1.0 status -X = array([1,2,3,4,5,6,7,8,9],float) -ZERO= array([0,0,0,0,0,0,0,0,0], float) -#MISS=array([.,.,.,.,.,.,.,.,.], float) -BIG=array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,99999998,99999999],float) -LITTLE=array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,0.99999997,0.99999998,0.99999999],float) -HUGE=array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12],float) -TINY=array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12],float) -ROUND=array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5],float) -X2 = X * X -X3 = X2 * X -X4 = X3 * X -X5 = X4 * X -X6 = X5 * X -X7 = X6 * X -X8 = X7 * X -X9 = X8 * X - -class TestRound(TestCase): - """ W.II. ROUND - - You should get the numbers 1 to 9. Many language compilers, - such as Turbo Pascal and Lattice C, fail this test (they round - numbers inconsistently). Needless to say, statical packages - written in these languages may fail the test as well. You can - also check the following expressions: - Y = INT(2.6*7 -0.2) (Y should be 18) - Y = 2-INT(EXP(LOG(SQR(2)*SQR(2)))) (Y should be 0) - Y = INT(3-EXP(LOG(SQR(2)*SQR(2)))) (Y should be 1) - INT is the integer function. It converts decimal numbers to - integers by throwing away numbers after the decimal point. EXP - is exponential, LOG is logarithm, and SQR is suqare root. You may - have to substitute similar names for these functions for different - packages. Since the square of a square root should return the same - number, and the exponential of a log should return the same number, - we should get back a 2 from this function of functions. By taking - the integer result and subtracting from 2, we are exposing the - roundoff errors. These simple functions are at the heart of - statistical calculations. - """ - - def test_rounding0(self): - """ W.II.A.0. Print ROUND with only one digit. - - You should get the numbers 1 to 9. Many language compilers, - such as Turbo Pascal and Lattice C, fail this test (they round - numbers inconsistently). Needless to say, statical packages - written in these languages may fail the test as well. - """ - if sys.version_info[0] >= 3: - # round to even - for i in range(0,9): - y = round(ROUND[i]) - assert_equal(y, 2*((i+1)//2)) - else: - for i in range(0,9): - y = round(ROUND[i]) - assert_equal(y,i+1) - - def test_rounding1(self): - """ W.II.A.1. Y = INT(2.6*7 -0.2) (Y should be 18)""" - y = int(2.6*7 -0.2) - assert_equal(y, 18) - - def test_rounding2(self): - """ W.II.A.2. Y = 2-INT(EXP(LOG(SQR(2)*SQR(2)))) (Y should be 0)""" - y=2-int(np.exp(np.log(np.sqrt(2.)*np.sqrt(2.)))) - assert_equal(y,0) - - def test_rounding3(self): - """ W.II.A.3. Y = INT(3-EXP(LOG(SQR(2)*SQR(2)))) (Y should be 1)""" - y=(int(round((3-np.exp(np.log(np.sqrt(2.0)*np.sqrt(2.0))))))) - assert_equal(y,1) - -class TestBasicStats(TestCase): - """ W.II.C. Compute basic statistic on all the variables. - - The means should be the fifth value of all the variables (case FIVE). - The standard deviations should be "undefined" or missing for MISS, - 0 for ZERO, and 2.738612788 (times 10 to a power) for all the other variables. - II. C. Basic Statistics - """ - - dprec = np.finfo(np.float64).precision - - # Really need to write these tests to handle missing values properly - def test_tmeanX(self): - y = stats.tmean(X, (2, 8), (True, True)) - assert_approx_equal(y, 5.0, significant=TestBasicStats.dprec) - - def test_tvarX(self): - y = stats.tvar(X, (2, 8), (True, True)) - assert_approx_equal(y, 4.6666666666666661, - significant=TestBasicStats.dprec) - - def test_tstdX(self): - y = stats.tstd(X, (2, 8), (True, True)) - assert_approx_equal(y, 2.1602468994692865, - significant=TestBasicStats.dprec) - - - -class TestNanFunc(TestCase): - def __init__(self, *args, **kw): - TestCase.__init__(self, *args, **kw) - self.X = X.copy() - - self.Xall = X.copy() - self.Xall[:] = np.nan - - self.Xsome = X.copy() - self.Xsomet = X.copy() - self.Xsome[0] = np.nan - self.Xsomet = self.Xsomet[1:] - - def test_nanmean_none(self): - """Check nanmean when no values are nan.""" - m = stats.nanmean(X) - assert_approx_equal(m, X[4]) - - def test_nanmean_some(self): - """Check nanmean when some values only are nan.""" - m = stats.nanmean(self.Xsome) - assert_approx_equal(m, 5.5) - - def test_nanmean_all(self): - """Check nanmean when all values are nan.""" - olderr = np.seterr(all='ignore') - try: - m = stats.nanmean(self.Xall) - finally: - np.seterr(**olderr) - assert_(np.isnan(m)) - - def test_nanstd_none(self): - """Check nanstd when no values are nan.""" - s = stats.nanstd(self.X) - assert_approx_equal(s, np.std(self.X, ddof=1)) - - def test_nanstd_some(self): - """Check nanstd when some values only are nan.""" - s = stats.nanstd(self.Xsome) - assert_approx_equal(s, np.std(self.Xsomet, ddof=1)) - - def test_nanstd_all(self): - """Check nanstd when all values are nan.""" - olderr = np.seterr(all='ignore') - try: - s = stats.nanstd(self.Xall) - finally: - np.seterr(**olderr) - assert_(np.isnan(s)) - - def test_nanstd_negative_axis(self): - x = np.array([1, 2, 3]) - assert_equal(stats.nanstd(x, -1), 1) - - def test_nanmedian_none(self): - """Check nanmedian when no values are nan.""" - m = stats.nanmedian(self.X) - assert_approx_equal(m, np.median(self.X)) - - def test_nanmedian_some(self): - """Check nanmedian when some values only are nan.""" - m = stats.nanmedian(self.Xsome) - assert_approx_equal(m, np.median(self.Xsomet)) - - def test_nanmedian_all(self): - """Check nanmedian when all values are nan.""" - m = stats.nanmedian(self.Xall) - assert_(np.isnan(m)) - - def test_nanmedian_scalars(self): - """Check nanmedian for scalar inputs. See ticket #1098.""" - assert_equal(stats.nanmedian(1), np.median(1)) - assert_equal(stats.nanmedian(True), np.median(True)) - assert_equal(stats.nanmedian(np.array(1)), np.median(np.array(1))) - assert_equal(stats.nanmedian(np.nan), np.median(np.nan)) - - -class TestCorrPearsonr(TestCase): - """ W.II.D. Compute a correlation matrix on all the variables. - - All the correlations, except for ZERO and MISS, shoud be exactly 1. - ZERO and MISS should have undefined or missing correlations with the - other variables. The same should go for SPEARMAN corelations, if - your program has them. - """ - def test_pXX(self): - y = stats.pearsonr(X,X) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pXBIG(self): - y = stats.pearsonr(X,BIG) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pXLITTLE(self): - y = stats.pearsonr(X,LITTLE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pXHUGE(self): - y = stats.pearsonr(X,HUGE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pXTINY(self): - y = stats.pearsonr(X,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pXROUND(self): - y = stats.pearsonr(X,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pBIGBIG(self): - y = stats.pearsonr(BIG,BIG) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pBIGLITTLE(self): - y = stats.pearsonr(BIG,LITTLE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pBIGHUGE(self): - y = stats.pearsonr(BIG,HUGE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pBIGTINY(self): - y = stats.pearsonr(BIG,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pBIGROUND(self): - y = stats.pearsonr(BIG,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pLITTLELITTLE(self): - y = stats.pearsonr(LITTLE,LITTLE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pLITTLEHUGE(self): - y = stats.pearsonr(LITTLE,HUGE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pLITTLETINY(self): - y = stats.pearsonr(LITTLE,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pLITTLEROUND(self): - y = stats.pearsonr(LITTLE,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pHUGEHUGE(self): - y = stats.pearsonr(HUGE,HUGE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pHUGETINY(self): - y = stats.pearsonr(HUGE,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pHUGEROUND(self): - y = stats.pearsonr(HUGE,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pTINYTINY(self): - y = stats.pearsonr(TINY,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pTINYROUND(self): - y = stats.pearsonr(TINY,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_pROUNDROUND(self): - y = stats.pearsonr(ROUND,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_r_exactly_pos1(self): - a = arange(3.0) - b = a - r, prob = stats.pearsonr(a,b) - assert_equal(r, 1.0) - assert_equal(prob, 0.0) - - def test_r_exactly_neg1(self): - a = arange(3.0) - b = -a - r, prob = stats.pearsonr(a,b) - assert_equal(r, -1.0) - assert_equal(prob, 0.0) - -class TestFisherExact(TestCase): - """Some tests to show that fisher_exact() works correctly. - - Note that in SciPy 0.9.0 this was not working well for large numbers due to - inaccuracy of the hypergeom distribution (see #1218). Fixed now. - - Also note that R and Scipy have different argument formats for their - hypergeometric distribution functions. - - R: - > phyper(18999, 99000, 110000, 39000, lower.tail = FALSE) - [1] 1.701815e-09 - """ - def test_basic(self): - fisher_exact = stats.fisher_exact - - res = fisher_exact([[14500, 20000], [30000, 40000]])[1] - assert_approx_equal(res, 0.01106, significant=4) - res = fisher_exact([[100, 2], [1000, 5]])[1] - assert_approx_equal(res, 0.1301, significant=4) - res = fisher_exact([[2, 7], [8, 2]])[1] - assert_approx_equal(res, 0.0230141, significant=6) - res = fisher_exact([[5, 1], [10, 10]])[1] - assert_approx_equal(res, 0.1973244, significant=6) - res = fisher_exact([[5, 15], [20, 20]])[1] - assert_approx_equal(res, 0.0958044, significant=6) - res = fisher_exact([[5, 16], [20, 25]])[1] - assert_approx_equal(res, 0.1725862, significant=6) - res = fisher_exact([[10, 5], [10, 1]])[1] - assert_approx_equal(res, 0.1973244, significant=6) - res = fisher_exact([[5, 0], [1, 4]])[1] - assert_approx_equal(res, 0.04761904, significant=6) - res = fisher_exact([[0, 1], [3, 2]])[1] - assert_approx_equal(res, 1.0) - res = fisher_exact([[0, 2], [6, 4]])[1] - assert_approx_equal(res, 0.4545454545) - res = fisher_exact([[2, 7], [8, 2]]) - assert_approx_equal(res[1], 0.0230141, significant=6) - assert_approx_equal(res[0], 4.0 / 56) - - def test_precise(self): - fisher_exact = stats.fisher_exact - - # results from R - # - # R defines oddsratio differently (see Notes section of fisher_exact - # docstring), so those will not match. We leave them in anyway, in - # case they will be useful later on. We test only the p-value. - tablist = [ - ([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)), - ([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)), - ([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)), - ([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)), - ([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)), - ([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)), - ([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)), - ([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)), - ([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)), - ([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)), - ([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000)) - ] - for table, res_r in tablist: - res = stats.fisher_exact(np.asarray(table)) - np.testing.assert_almost_equal(res[1], res_r[1], decimal=11, - verbose=True) - - @dec.slow - def test_large_numbers(self): - # Test with some large numbers. Regression test for #1401 - pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R - for pval, num in zip(pvals, [75, 76, 77]): - res = stats.fisher_exact([[17704, 496], [1065, num]])[1] - assert_approx_equal(res, pval, significant=4) - - res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1] - assert_approx_equal(res, 0.2751, significant=4) - - def test_raises(self): - # test we raise an error for wrong shape of input. - assert_raises(ValueError, stats.fisher_exact, - np.arange(6).reshape(2, 3)) - - def test_row_or_col_zero(self): - tables = ([[0, 0], [5, 10]], - [[5, 10], [0, 0]], - [[0, 5], [0, 10]], - [[5, 0], [10, 0]]) - for table in tables: - oddsratio, pval = stats.fisher_exact(table) - assert_equal(pval, 1.0) - assert_equal(oddsratio, np.nan) - - def test_less_greater(self): - tables = ( - # Some tables to compare with R: - [[2, 7], [8, 2]], - [[200, 7], [8, 300]], - [[28, 21], [6, 1957]], - [[190, 800], [200, 900]], - # Some tables with simple exact values - # (includes regression test for ticket #1568): - [[0, 2], [3, 0]], - [[1, 1], [2, 1]], - [[2, 0], [1, 2]], - [[0, 1], [2, 3]], - [[1, 0], [1, 4]], - ) - pvals = ( - # from R: - [0.018521725952066501, 0.9990149169715733], - [1.0, 2.0056578803889148e-122], - [1.0, 5.7284374608319831e-44], - [0.7416227, 0.2959826], - # Exact: - [0.1, 1.0], - [0.7, 0.9], - [1.0, 0.3], - [2./3, 1.0], - [1.0, 1./3], - ) - for table, pval in zip(tables, pvals): - res = [] - res.append(stats.fisher_exact(table, alternative="less")[1]) - res.append(stats.fisher_exact(table, alternative="greater")[1]) - assert_allclose(res, pval, atol=0, rtol=1e-7) - - -class TestCorrSpearmanr(TestCase): - """ W.II.D. Compute a correlation matrix on all the variables. - - All the correlations, except for ZERO and MISS, shoud be exactly 1. - ZERO and MISS should have undefined or missing correlations with the - other variables. The same should go for SPEARMAN corelations, if - your program has them. - """ - def test_sXX(self): - y = stats.spearmanr(X,X) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sXBIG(self): - y = stats.spearmanr(X,BIG) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sXLITTLE(self): - y = stats.spearmanr(X,LITTLE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sXHUGE(self): - y = stats.spearmanr(X,HUGE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sXTINY(self): - y = stats.spearmanr(X,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sXROUND(self): - y = stats.spearmanr(X,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sBIGBIG(self): - y = stats.spearmanr(BIG,BIG) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sBIGLITTLE(self): - y = stats.spearmanr(BIG,LITTLE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sBIGHUGE(self): - y = stats.spearmanr(BIG,HUGE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sBIGTINY(self): - y = stats.spearmanr(BIG,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sBIGROUND(self): - y = stats.spearmanr(BIG,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sLITTLELITTLE(self): - y = stats.spearmanr(LITTLE,LITTLE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sLITTLEHUGE(self): - y = stats.spearmanr(LITTLE,HUGE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sLITTLETINY(self): - y = stats.spearmanr(LITTLE,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sLITTLEROUND(self): - y = stats.spearmanr(LITTLE,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sHUGEHUGE(self): - y = stats.spearmanr(HUGE,HUGE) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sHUGETINY(self): - y = stats.spearmanr(HUGE,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sHUGEROUND(self): - y = stats.spearmanr(HUGE,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sTINYTINY(self): - y = stats.spearmanr(TINY,TINY) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sTINYROUND(self): - y = stats.spearmanr(TINY,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - - def test_sROUNDROUND(self): - y = stats.spearmanr(ROUND,ROUND) - r = y[0] - assert_approx_equal(r,1.0) - -class TestCorrSpearmanrTies(TestCase): - """Some tests of tie-handling by the spearmanr function.""" - - def test_tie1(self): - # Data - x = [1.0, 2.0, 3.0, 4.0] - y = [1.0, 2.0, 2.0, 3.0] - # Ranks of the data, with tie-handling. - xr = [1.0, 2.0, 3.0, 4.0] - yr = [1.0, 2.5, 2.5, 4.0] - # Result of spearmanr should be the same as applying - # pearsonr to the ranks. - sr = stats.spearmanr(x, y) - pr = stats.pearsonr(xr, yr) - assert_almost_equal(sr, pr) - - -## W.II.E. Tabulate X against X, using BIG as a case weight. The values -## should appear on the diagonal and the total should be 899999955. -## If the table cannot hold these values, forget about working with -## census data. You can also tabulate HUGE against TINY. There is no -## reason a tabulation program should not be able to distinguish -## different values regardless of their magnitude. - -### I need to figure out how to do this one. - - -def test_kendalltau(): - """Some tests for kendalltau.""" - - # with some ties - x1 = [12, 2, 1, 12, 2] - x2 = [1, 4, 7, 1, 0] - expected = (-0.47140452079103173, 0.24821309157521476) - res = stats.kendalltau(x1, x2) - assert_approx_equal(res[0], expected[0]) - assert_approx_equal(res[1], expected[1]) - - # check two different sort methods - assert_approx_equal(stats.kendalltau(x1, x2, initial_lexsort=False)[1], - stats.kendalltau(x1, x2, initial_lexsort=True)[1]) - - # and with larger arrays - np.random.seed(7546) - x = np.array([np.random.normal(loc=1, scale=1, size=500), - np.random.normal(loc=1, scale=1, size=500)]) - corr = [[1.0, 0.3], - [0.3, 1.0]] - x = np.dot(np.linalg.cholesky(corr), x) - expected = (0.19291382765531062, 1.1337108207276285e-10) - res = stats.kendalltau(x[0], x[1]) - assert_approx_equal(res[0], expected[0]) - assert_approx_equal(res[1], expected[1]) - - # and do we get a tau of 1 for identical inputs? - assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0) - - -class TestRegression(TestCase): - def test_linregressBIGX(self): - """ W.II.F. Regress BIG on X. - - The constant should be 99999990 and the regression coefficient should be 1. - """ - y = stats.linregress(X,BIG) - intercept = y[1] - r=y[2] - assert_almost_equal(intercept,99999990) - assert_almost_equal(r,1.0) - -## W.IV.A. Take the NASTY dataset above. Use the variable X as a -## basis for computing polynomials. Namely, compute X1=X, X2=X*X, -## X3=X*X*X, and so on up to 9 products. Use the algebraic -## transformation language within the statistical package itself. You -## will end up with 9 variables. Now regress X1 on X2-X9 (a perfect -## fit). If the package balks (singular or roundoff error messages), -## try X1 on X2-X8, and so on. Most packages cannot handle more than -## a few polynomials. -## Scipy's stats.py does not seem to handle multiple linear regression -## The datasets X1 . . X9 are at the top of the file. - - - def test_regressXX(self): - """ W.IV.B. Regress X on X. - - The constant should be exactly 0 and the regression coefficient should be 1. - This is a perfectly valid regression. The program should not complain. - """ - y = stats.linregress(X,X) - intercept = y[1] - r=y[2] - assert_almost_equal(intercept,0.0) - assert_almost_equal(r,1.0) -## W.IV.C. Regress X on BIG and LITTLE (two predictors). The program -## should tell you that this model is "singular" because BIG and -## LITTLE are linear combinations of each other. Cryptic error -## messages are unacceptable here. Singularity is the most -## fundamental regression error. -### Need to figure out how to handle multiple linear regression. Not obvious - - def test_regressZEROX(self): - """ W.IV.D. Regress ZERO on X. - - The program should inform you that ZERO has no variance or it should - go ahead and compute the regression and report a correlation and - total sum of squares of exactly 0. - """ - y = stats.linregress(X,ZERO) - intercept = y[1] - r=y[2] - assert_almost_equal(intercept,0.0) - assert_almost_equal(r,0.0) - - def test_regress_simple(self): - """Regress a line with sinusoidal noise.""" - x = np.linspace(0, 100, 100) - y = 0.2 * np.linspace(0, 100, 100) + 10 - y += np.sin(np.linspace(0, 20, 100)) - - res = stats.linregress(x, y) - assert_almost_equal(res[4], 2.3957814497838803e-3) #4.3609875083149268e-3) - - def test_regress_simple_onearg_rows(self): - """Regress a line with sinusoidal noise, with a single input of shape - (2, N). - """ - x = np.linspace(0, 100, 100) - y = 0.2 * np.linspace(0, 100, 100) + 10 - y += np.sin(np.linspace(0, 20, 100)) - rows = np.vstack((x, y)) - - res = stats.linregress(rows) - assert_almost_equal(res[4], 2.3957814497838803e-3) #4.3609875083149268e-3) - - def test_regress_simple_onearg_cols(self): - """Regress a line with sinusoidal noise, with a single input of shape - (N, 2). - """ - x = np.linspace(0, 100, 100) - y = 0.2 * np.linspace(0, 100, 100) + 10 - y += np.sin(np.linspace(0, 20, 100)) - cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1))) - - res = stats.linregress(cols) - assert_almost_equal(res[4], 2.3957814497838803e-3) #4.3609875083149268e-3) - - def test_regress_shape_error(self): - """Check that a single input argument to linregress with wrong shape - results in a ValueError.""" - assert_raises(ValueError, stats.linregress, np.ones((3, 3))) - - def test_linregress(self): - '''compared with multivariate ols with pinv''' - x = np.arange(11) - y = np.arange(5,16) - y[[(1),(-2)]] -= 1 - y[[(0),(-1)]] += 1 - - res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733) - assert_array_almost_equal(stats.linregress(x,y),res,decimal=14) - -class TestHistogram(TestCase): - """ Tests that histogram works as it should, and keeps old behaviour - """ - # what is untested: - # - multidimensional arrays (since 'a' is ravel'd as the first line in the method) - # - very large arrays - # - Nans, Infs, empty and otherwise bad inputs - - # sample arrays to test the histogram with - low_values = np.array([0.2, 0.3, 0.4, 0.5, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1, 1.2], - dtype=float) # 11 values - high_range = np.array([2, 3, 4, 2, 21, 32, 78, 95, 65, 66, 66, 66, 66, 4], - dtype=float) # 14 values - low_range = np.array([2, 3, 3, 2, 3, 2.4, 2.1, 3.1, 2.9, 2.6, 2.7, 2.8, 2.2, 2.001], - dtype=float) # 14 values - few_values = np.array([2.0, 3.0, -1.0, 0.0], dtype=float) # 4 values - - def test_simple(self): - """ Tests that each of the tests works as expected with default params - """ - # basic tests, with expected results (no weighting) - # results taken from the previous (slower) version of histogram - basic_tests = ((self.low_values, (np.array([ 1., 1., 1., 2., 2., - 1., 1., 0., 1., 1.]), - 0.14444444444444446, 0.11111111111111112, 0)), - (self.high_range, (np.array([ 5., 0., 1., 1., 0., - 0., 5., 1., 0., 1.]), - -3.1666666666666661, 10.333333333333332, 0)), - (self.low_range, (np.array([ 3., 1., 1., 1., 0., 1., - 1., 2., 3., 1.]), - 1.9388888888888889, 0.12222222222222223, 0)), - (self.few_values, (np.array([ 1., 0., 1., 0., 0., 0., - 0., 1., 0., 1.]), - -1.2222222222222223, 0.44444444444444448, 0)), - ) - for inputs, expected_results in basic_tests: - given_results = stats.histogram(inputs) - assert_array_almost_equal(expected_results[0], given_results[0], - decimal=2) - for i in range(1, 4): - assert_almost_equal(expected_results[i], given_results[i], - decimal=2) - - def test_weighting(self): - """ Tests that weights give expected histograms - """ - # basic tests, with expected results, given a set of weights - # weights used (first n are used for each test, where n is len of array) (14 values) - weights = np.array([1., 3., 4.5, 0.1, -1.0, 0.0, 0.3, 7.0, 103.2, 2, 40, 0, 0, 1]) - # results taken from the numpy version of histogram - basic_tests = ((self.low_values, (np.array([ 4.0, 0.0, 4.5, -0.9, 0.0, - 0.3,110.2, 0.0, 0.0, 42.0]), - 0.2, 0.1, 0)), - (self.high_range, (np.array([ 9.6, 0. , -1. , 0. , 0. , - 0. ,145.2, 0. , 0.3, 7. ]), - 2.0, 9.3, 0)), - (self.low_range, (np.array([ 2.4, 0. , 0. , 0. , 0. , - 2. , 40. , 0. , 103.2, 13.5]), - 2.0, 0.11, 0)), - (self.few_values, (np.array([ 4.5, 0. , 0.1, 0. , 0. , 0. , - 0. , 1. , 0. , 3. ]), - -1., 0.4, 0)), - - ) - for inputs, expected_results in basic_tests: - # use the first lot of weights for test - # default limits given to reproduce output of numpy's test better - given_results = stats.histogram(inputs, defaultlimits=(inputs.min(), - inputs.max()), - weights=weights[:len(inputs)]) - assert_array_almost_equal(expected_results[0], given_results[0], - decimal=2) - for i in range(1, 4): - assert_almost_equal(expected_results[i], given_results[i], - decimal=2) - - def test_reduced_bins(self): - """ Tests that reducing the number of bins produces expected results - """ - # basic tests, with expected results (no weighting), - # except number of bins is halved to 5 - # results taken from the previous (slower) version of histogram - basic_tests = ((self.low_values, (np.array([ 2., 3., 3., 1., 2.]), - 0.075000000000000011, 0.25, 0)), - (self.high_range, (np.array([ 5., 2., 0., 6., 1.]), - -9.625, 23.25, 0)), - (self.low_range, (np.array([ 4., 2., 1., 3., 4.]), - 1.8625, 0.27500000000000002, 0)), - (self.few_values, (np.array([ 1., 1., 0., 1., 1.]), - -1.5, 1.0, 0)), - ) - for inputs, expected_results in basic_tests: - given_results = stats.histogram(inputs, numbins=5) - assert_array_almost_equal(expected_results[0], given_results[0], - decimal=2) - for i in range(1, 4): - assert_almost_equal(expected_results[i], given_results[i], - decimal=2) - - def test_increased_bins(self): - """ Tests that increasing the number of bins produces expected results - """ - # basic tests, with expected results (no weighting), - # except number of bins is double to 20 - # results taken from the previous (slower) version of histogram - basic_tests = ((self.low_values, (np.array([ 1., 0., 1., 0., 1., - 0., 2., 0., 1., 0., - 1., 1., 0., 1., 0., - 0., 0., 1., 0., 1.]), - 0.1736842105263158, 0.052631578947368418, 0)), - (self.high_range, (np.array([ 5., 0., 0., 0., 1., - 0., 1., 0., 0., 0., - 0., 0., 0., 5., 0., - 0., 1., 0., 0., 1.]), - -0.44736842105263142, 4.8947368421052628, 0)), - (self.low_range, (np.array([ 3., 0., 1., 1., 0., 0., - 0., 1., 0., 0., 1., 0., - 1., 0., 1., 0., 1., 3., - 0., 1.]), - 1.9710526315789474, 0.057894736842105263, 0)), - (self.few_values, (np.array([ 1., 0., 0., 0., 0., 1., - 0., 0., 0., 0., 0., 0., - 0., 0., 1., 0., 0., 0., - 0., 1.]), - -1.1052631578947367, 0.21052631578947367, 0)), - ) - for inputs, expected_results in basic_tests: - given_results = stats.histogram(inputs, numbins=20) - assert_array_almost_equal(expected_results[0], given_results[0], - decimal=2) - for i in range(1, 4): - assert_almost_equal(expected_results[i], given_results[i], - decimal=2) - - -def test_cumfreq(): - x = [1, 4, 2, 1, 3, 1] - cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4) - assert_array_almost_equal(cumfreqs, np.array([ 3., 4., 5., 6.])) - cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4, - defaultreallimits=(1.5, 5)) - assert_(extrapoints==3) - - -def test_relfreq(): - a = np.array([1, 4, 2, 1, 3, 1]) - relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4) - assert_array_almost_equal(relfreqs, array([0.5, 0.16666667, 0.16666667, 0.16666667])) - - # check array_like input is accepted - relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1], numbins=4) - assert_array_almost_equal(relfreqs, relfreqs2) - - -# Utility - -def compare_results(res,desired): - for i in range(len(desired)): - assert_array_equal(res[i],desired[i]) - - -################################################## -### Test for sum - -class TestGMean(TestCase): - - def test_1D_list(self): - a = (1,2,3,4) - actual= stats.gmean(a) - desired = power(1*2*3*4,1./4.) - assert_almost_equal(actual, desired,decimal=14) - - desired1 = stats.gmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - - def test_1D_array(self): - a = array((1,2,3,4), float32) - actual= stats.gmean(a) - desired = power(1*2*3*4,1./4.) - assert_almost_equal(actual, desired, decimal=7) - - desired1 = stats.gmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=7) - - def test_2D_array_default(self): - a = array(((1,2,3,4), - (1,2,3,4), - (1,2,3,4))) - actual= stats.gmean(a) - desired = array((1,2,3,4)) - assert_array_almost_equal(actual, desired, decimal=14) - - desired1 = stats.gmean(a,axis=0) - assert_array_almost_equal(actual, desired1, decimal=14) - - def test_2D_array_dim1(self): - a = array(((1,2,3,4), - (1,2,3,4), - (1,2,3,4))) - actual= stats.gmean(a, axis=1) - v = power(1*2*3*4,1./4.) - desired = array((v,v,v)) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_large_values(self): - a = array([1e100, 1e200, 1e300]) - actual = stats.gmean(a) - assert_approx_equal(actual, 1e200, significant=14) - -class TestHMean(TestCase): - def test_1D_list(self): - a = (1,2,3,4) - actual= stats.hmean(a) - desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) - assert_almost_equal(actual, desired, decimal=14) - - desired1 = stats.hmean(array(a),axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - def test_1D_array(self): - a = array((1,2,3,4), float64) - actual= stats.hmean(a) - desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) - assert_almost_equal(actual, desired, decimal=14) - - desired1 = stats.hmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - - def test_2D_array_default(self): - a = array(((1,2,3,4), - (1,2,3,4), - (1,2,3,4))) - actual = stats.hmean(a) - desired = array((1.,2.,3.,4.)) - assert_array_almost_equal(actual, desired, decimal=14) - - actual1 = stats.hmean(a,axis=0) - assert_array_almost_equal(actual1, desired, decimal=14) - - def test_2D_array_dim1(self): - a = array(((1,2,3,4), - (1,2,3,4), - (1,2,3,4))) - - v = 4. / (1./1 + 1./2 + 1./3 + 1./4) - desired1 = array((v,v,v)) - actual1 = stats.hmean(a, axis=1) - assert_array_almost_equal(actual1, desired1, decimal=14) - - -class TestPercentile(TestCase): - def setUp(self): - self.a1 = [3,4,5,10,-3,-5,6] - self.a2 = [3,-6,-2,8,7,4,2,1] - self.a3 = [3.,4,5,10,-3,-5,-6,7.0] - - def test_percentile(self): - x = arange(8) * 0.5 - assert_equal(stats.scoreatpercentile(x, 0), 0.) - assert_equal(stats.scoreatpercentile(x, 100), 3.5) - assert_equal(stats.scoreatpercentile(x, 50), 1.75) - - def test_2D(self): - x = array([[1, 1, 1], - [1, 1, 1], - [4, 4, 3], - [1, 1, 1], - [1, 1, 1]]) - assert_array_equal(stats.scoreatpercentile(x,50), - [1,1,1]) - - -class TestCMedian(TestCase): - def test_basic(self): - data = [1,2,3,1,5,3,6,4,3,2,4,3,5,2.0] - assert_almost_equal(stats.cmedian(data,5),3.2916666666666665) - assert_almost_equal(stats.cmedian(data,3),3.083333333333333) - assert_almost_equal(stats.cmedian(data),3.0020020020020022) - - -class TestMode(TestCase): - def test_basic(self): - data1 = [3,5,1,10,23,3,2,6,8,6,10,6] - vals = stats.mode(data1) - assert_almost_equal(vals[0][0],6) - assert_almost_equal(vals[1][0],3) - - -class TestVariability(TestCase): - - testcase = [1,2,3,4] - - def test_signaltonoise(self): - """ - this is not in R, so used - mean(testcase,axis=0)/(sqrt(var(testcase)*3/4)) """ - #y = stats.signaltonoise(self.shoes[0]) - #assert_approx_equal(y,4.5709967) - y = stats.signaltonoise(self.testcase) - assert_approx_equal(y,2.236067977) - - def test_sem(self): - """ - this is not in R, so used - sqrt(var(testcase)*3/4)/sqrt(3) - """ - #y = stats.sem(self.shoes[0]) - #assert_approx_equal(y,0.775177399) - y = stats.sem(self.testcase) - assert_approx_equal(y,0.6454972244) - - def test_zmap(self): - """ - not in R, so tested by using - (testcase[i]-mean(testcase,axis=0))/sqrt(var(testcase)*3/4) - """ - y = stats.zmap(self.testcase,self.testcase) - desired = ([-1.3416407864999, -0.44721359549996 , 0.44721359549996 , 1.3416407864999]) - assert_array_almost_equal(desired,y,decimal=12) - - def test_zmap_axis(self): - """Test use of 'axis' keyword in zmap.""" - x = np.array([[0.0, 0.0, 1.0, 1.0], - [1.0, 1.0, 1.0, 2.0], - [2.0, 0.0, 2.0, 0.0]]) - - t1 = 1.0/np.sqrt(2.0/3) - t2 = np.sqrt(3.)/3 - t3 = np.sqrt(2.) - - z0 = stats.zmap(x, x, axis=0) - z1 = stats.zmap(x, x, axis=1) - - z0_expected = [[-t1, -t3/2, -t3/2, 0.0], - [0.0, t3, -t3/2, t1], - [t1, -t3/2, t3, -t1]] - z1_expected = [[-1.0, -1.0, 1.0, 1.0], - [-t2, -t2, -t2, np.sqrt(3.)], - [1.0, -1.0, 1.0, -1.0]] - - assert_array_almost_equal(z0, z0_expected) - assert_array_almost_equal(z1, z1_expected) - - def test_zmap_ddof(self): - """Test use of 'ddof' keyword in zmap.""" - x = np.array([[0.0, 0.0, 1.0, 1.0], - [0.0, 1.0, 2.0, 3.0]]) - - t1 = 1.0/np.sqrt(2.0/3) - t2 = np.sqrt(3.)/3 - t3 = np.sqrt(2.) - - z = stats.zmap(x, x, axis=1, ddof=1) - - z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3)) - z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3)) - assert_array_almost_equal(z[0], z0_expected) - assert_array_almost_equal(z[1], z1_expected) - - def test_zscore(self): - """ - not in R, so tested by using - (testcase[i]-mean(testcase,axis=0))/sqrt(var(testcase)*3/4) - """ - y = stats.zscore(self.testcase) - desired = ([-1.3416407864999, -0.44721359549996 , 0.44721359549996 , 1.3416407864999]) - assert_array_almost_equal(desired,y,decimal=12) - - def test_zscore_axis(self): - """Test use of 'axis' keyword in zscore.""" - x = np.array([[0.0, 0.0, 1.0, 1.0], - [1.0, 1.0, 1.0, 2.0], - [2.0, 0.0, 2.0, 0.0]]) - - t1 = 1.0/np.sqrt(2.0/3) - t2 = np.sqrt(3.)/3 - t3 = np.sqrt(2.) - - z0 = stats.zscore(x, axis=0) - z1 = stats.zscore(x, axis=1) - - z0_expected = [[-t1, -t3/2, -t3/2, 0.0], - [0.0, t3, -t3/2, t1], - [t1, -t3/2, t3, -t1]] - z1_expected = [[-1.0, -1.0, 1.0, 1.0], - [-t2, -t2, -t2, np.sqrt(3.)], - [1.0, -1.0, 1.0, -1.0]] - - assert_array_almost_equal(z0, z0_expected) - assert_array_almost_equal(z1, z1_expected) - - def test_zscore_ddof(self): - """Test use of 'ddof' keyword in zscore.""" - x = np.array([[0.0, 0.0, 1.0, 1.0], - [0.0, 1.0, 2.0, 3.0]]) - - t1 = 1.0/np.sqrt(2.0/3) - t2 = np.sqrt(3.)/3 - t3 = np.sqrt(2.) - - z = stats.zscore(x, axis=1, ddof=1) - - z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3)) - z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3)) - assert_array_almost_equal(z[0], z0_expected) - assert_array_almost_equal(z[1], z1_expected) - - -class TestMoments(TestCase): - """ - Comparison numbers are found using R v.1.5.1 - note that length(testcase) = 4 - testmathworks comes from documentation for the - Statistics Toolbox for Matlab and can be found at both - http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml - http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml - Note that both test cases came from here. - """ - testcase = [1,2,3,4] - testmathworks = [1.165 , 0.6268, 0.0751, 0.3516, -0.6965] - def test_moment(self): - """ - mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))""" - y = stats.moment(self.testcase,1) - assert_approx_equal(y,0.0,10) - y = stats.moment(self.testcase,2) - assert_approx_equal(y,1.25) - y = stats.moment(self.testcase,3) - assert_approx_equal(y,0.0) - y = stats.moment(self.testcase,4) - assert_approx_equal(y,2.5625) - def test_variation(self): - """ - variation = samplestd/mean """ -## y = stats.variation(self.shoes[0]) -## assert_approx_equal(y,21.8770668) - y = stats.variation(self.testcase) - assert_approx_equal(y,0.44721359549996, 10) - - def test_skewness(self): - """ - sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0)/ - ((sqrt(var(testmathworks)*4/5))**3)/5 - """ - y = stats.skew(self.testmathworks) - assert_approx_equal(y,-0.29322304336607,10) - y = stats.skew(self.testmathworks,bias=0) - assert_approx_equal(y,-0.437111105023940,10) - y = stats.skew(self.testcase) - assert_approx_equal(y,0.0,10) - - def test_skewness_scalar(self): - """ - `skew` must return a scalar for 1-dim input - """ - assert_equal(stats.skew(arange(10)), 0.0) - - def test_kurtosis(self): - """ - sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4 - sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5 - Set flags for axis = 0 and - fisher=0 (Pearson's defn of kurtosis for compatiability with Matlab) - """ - y = stats.kurtosis(self.testmathworks,0,fisher=0,bias=1) - assert_approx_equal(y, 2.1658856802973,10) - - # Note that MATLAB has confusing docs for the following case - # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness - # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3) - # The MATLAB docs imply that both should give Fisher's - y = stats.kurtosis(self.testmathworks,fisher=0,bias=0) - assert_approx_equal(y, 3.663542721189047,10) - y = stats.kurtosis(self.testcase,0,0) - assert_approx_equal(y,1.64) - - def test_kurtosis_array_scalar(self): - assert_equal(type(stats.kurtosis([1,2,3])), float) - -class TestThreshold(TestCase): - def test_basic(self): - a = [-1,2,3,4,5,-1,-2] - assert_array_equal(stats.threshold(a),a) - assert_array_equal(stats.threshold(a,3,None,0), - [0,0,3,4,5,0,0]) - assert_array_equal(stats.threshold(a,None,3,0), - [-1,2,3,0,0,-1,-2]) - assert_array_equal(stats.threshold(a,2,4,0), - [0,2,3,4,0,0,0]) - -# Hypothesis test tests -class TestStudentTest(TestCase): - X1 = np.array([-1, 0, 1]) - X2 = np.array([0, 1, 2]) - T1_0 = 0 - P1_0 = 1 - T1_1 = -1.732051 - P1_1 = 0.2254033 - T1_2 = -3.464102 - P1_2 = 0.0741799 - T2_0 = 1.732051 - P2_0 = 0.2254033 - def test_onesample(self): - t, p = stats.ttest_1samp(self.X1, 0) - - assert_array_almost_equal(t, self.T1_0) - assert_array_almost_equal(p, self.P1_0) - - t, p = stats.ttest_1samp(self.X2, 0) - - assert_array_almost_equal(t, self.T2_0) - assert_array_almost_equal(p, self.P2_0) - - t, p = stats.ttest_1samp(self.X1, 1) - - assert_array_almost_equal(t, self.T1_1) - assert_array_almost_equal(p, self.P1_1) - - t, p = stats.ttest_1samp(self.X1, 2) - - assert_array_almost_equal(t, self.T1_2) - assert_array_almost_equal(p, self.P1_2) - -def test_scoreatpercentile(): - assert_equal(stats.scoreatpercentile(range(10), 50), 4.5) - assert_equal(stats.scoreatpercentile(range(10), 50, (2,7)), 4.5) - assert_equal(stats.scoreatpercentile(range(100), 50, (1,8)), 4.5) - - assert_equal(stats.scoreatpercentile(np.array([1, 10 ,100]), - 50, (10,100)), - 55) - assert_equal(stats.scoreatpercentile(np.array([1, 10 ,100]), - 50, (1,10)), - 5.5) - -def test_percentileofscore(): - pcos = stats.percentileofscore - - assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0) - - for (kind, result) in [('mean', 35.0), - ('strict', 30.0), - ('weak', 40.0)]: - yield assert_equal, pcos(np.arange(10) + 1, - 4, kind=kind), \ - result - - # multiple - 2 - for (kind, result) in [('rank', 45.0), - ('strict', 30.0), - ('weak', 50.0), - ('mean', 40.0)]: - yield assert_equal, pcos([1,2,3,4,4,5,6,7,8,9], - 4, kind=kind), \ - result - - # multiple - 3 - assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0) - for (kind, result) in [('rank', 50.0), - ('mean', 45.0), - ('strict', 30.0), - ('weak', 60.0)]: - - yield assert_equal, pcos([1,2,3,4,4,4,5,6,7,8], - 4, kind=kind), \ - result - - # missing - for kind in ('rank', 'mean', 'strict', 'weak'): - yield assert_equal, pcos([1,2,3,5,6,7,8,9,10,11], - 4, kind=kind), \ - 30 - - #larger numbers - for (kind, result) in [('mean', 35.0), - ('strict', 30.0), - ('weak', 40.0)]: - yield assert_equal, \ - pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40, - kind=kind), result - - for (kind, result) in [('mean', 45.0), - ('strict', 30.0), - ('weak', 60.0)]: - yield assert_equal, \ - pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80], - 40, kind=kind), result - - - for kind in ('rank', 'mean', 'strict', 'weak'): - yield assert_equal, \ - pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], - 40, kind=kind), 30.0 - - #boundaries - for (kind, result) in [('rank', 10.0), - ('mean', 5.0), - ('strict', 0.0), - ('weak', 10.0)]: - yield assert_equal, \ - pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], - 10, kind=kind), result - - for (kind, result) in [('rank', 100.0), - ('mean', 95.0), - ('strict', 90.0), - ('weak', 100.0)]: - yield assert_equal, \ - pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], - 110, kind=kind), result - - #out of bounds - for (kind, score, result) in [('rank', 200, 100.0), - ('mean', 200, 100.0), - ('mean', 0, 0.0)]: - yield assert_equal, \ - pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], - score, kind=kind), result - - -def test_friedmanchisquare(): - # see ticket:113 - # verified with matlab and R - #From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets" - #2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28) - x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583, - 0.775, 1.0, 0.94, 0.619, 0.972, 0.957]), - array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583, - 0.838, 1.0, 0.962, 0.666, 0.981, 0.978]), - array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563, - 0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]), - array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625, - 0.875, 1.0, 0.962, 0.669, 0.975, 0.970])] - - #From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001: - x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]), - array([2,2,1,2,3,1,2,3,2,1,1,3]), - array([2,4,3,3,4,3,3,4,4,1,2,1]), - array([3,5,4,3,4,4,3,3,3,4,4,4])] - - #From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01: - #Probability from this example is inexact using Chisquare aproximation of Friedman Chisquare. - x3 = [array([7.0,9.9,8.5,5.1,10.3]), - array([5.3,5.7,4.7,3.5,7.7]), - array([4.9,7.6,5.5,2.8,8.4]), - array([8.8,8.9,8.1,3.3,9.1])] - - - assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),(10.2283464566929, 0.0167215803284414)) - assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),(18.9428571428571, 0.000280938375189499)) - assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),(10.68, 0.0135882729582176)) - np.testing.assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1]) - - # test using mstats - assert_array_almost_equal(stats.mstats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),(10.2283464566929, 0.0167215803284414)) - # the following fails - #assert_array_almost_equal(stats.mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),(18.9428571428571, 0.000280938375189499)) - assert_array_almost_equal(stats.mstats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),(10.68, 0.0135882729582176)) - np.testing.assert_raises(ValueError,stats.mstats.friedmanchisquare,x3[0],x3[1]) - -def test_kstest(): - #from numpy.testing import assert_almost_equal - - # comparing with values from R - x = np.linspace(-1,1,9) - D,p = stats.kstest(x,'norm') - assert_almost_equal( D, 0.15865525393145705, 12) - assert_almost_equal( p, 0.95164069201518386, 1) - - x = np.linspace(-15,15,9) - D,p = stats.kstest(x,'norm') - assert_almost_equal( D, 0.44435602715924361, 15) - assert_almost_equal( p, 0.038850140086788665, 8) - - # the following tests rely on deterministicaly replicated rvs - np.random.seed(987654321) - x = stats.norm.rvs(loc=0.2, size=100) - D,p = stats.kstest(x, 'norm', mode='asymp') - assert_almost_equal( D, 0.12464329735846891, 15) - assert_almost_equal( p, 0.089444888711820769, 15) - assert_almost_equal( np.array(stats.kstest(x, 'norm', mode='asymp')), - np.array((0.12464329735846891, 0.089444888711820769)), 15) - assert_almost_equal( np.array(stats.kstest(x,'norm', alternative = 'less')), - np.array((0.12464329735846891, 0.040989164077641749)), 15) - # this 'greater' test fails with precision of decimal=14 - assert_almost_equal( np.array(stats.kstest(x,'norm', alternative = 'greater')), - np.array((0.0072115233216310994, 0.98531158590396228)), 12) - - #missing: no test that uses *args - - -def test_ks_2samp(): - #exact small sample solution - data1 = np.array([1.0,2.0]) - data2 = np.array([1.0,2.0,3.0]) - assert_almost_equal(np.array(stats.ks_2samp(data1+0.01,data2)), - np.array((0.33333333333333337, 0.99062316386915694))) - assert_almost_equal(np.array(stats.ks_2samp(data1-0.01,data2)), - np.array((0.66666666666666674, 0.42490954988801982))) - #these can also be verified graphically - assert_almost_equal( - np.array(stats.ks_2samp(np.linspace(1,100,100), - np.linspace(1,100,100)+2+0.1)), - np.array((0.030000000000000027, 0.99999999996005062))) - assert_almost_equal( - np.array(stats.ks_2samp(np.linspace(1,100,100), - np.linspace(1,100,100)+2-0.1)), - np.array((0.020000000000000018, 0.99999999999999933))) - #these are just regression tests - assert_almost_equal( - np.array(stats.ks_2samp(np.linspace(1,100,100), - np.linspace(1,100,110)+20.1)), - np.array((0.21090909090909091, 0.015880386730710221))) - assert_almost_equal( - np.array(stats.ks_2samp(np.linspace(1,100,100), - np.linspace(1,100,110)+20-0.1)), - np.array((0.20818181818181825, 0.017981441789762638))) - -def test_ttest_rel(): - #regression test - tr,pr = 0.81248591389165692, 0.41846234511362157 - tpr = ([tr,-tr],[pr,pr]) - - rvs1 = np.linspace(1,100,100) - rvs2 = np.linspace(1.01,99.989,100) - rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)]) - rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)]) - - t,p = stats.ttest_rel(rvs1, rvs2, axis=0) - assert_array_almost_equal([t,p],(tr,pr)) - t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0) - assert_array_almost_equal([t,p],tpr) - t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1) - assert_array_almost_equal([t,p],tpr) - - #test on 3 dimensions - rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) - rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) - t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1) - assert_array_almost_equal(np.abs(t), tr) - assert_array_almost_equal(np.abs(p), pr) - assert_equal(t.shape, (2, 3)) - - t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2) - assert_array_almost_equal(np.abs(t), tr) - assert_array_almost_equal(np.abs(p), pr) - assert_equal(t.shape, (3, 2)) - - olderr = np.seterr(all='ignore') - try: - #test zero division problem - t,p = stats.ttest_rel([0,0,0],[1,1,1]) - assert_equal((np.abs(t),p), (np.inf, 0)) - assert_almost_equal(stats.ttest_rel([0,0,0], [0,0,0]), (1.0, 0.42264973081037421)) - - #check that nan in input array result in nan output - anan = np.array([[1,np.nan],[-1,1]]) - assert_equal(stats.ttest_ind(anan, np.zeros((2,2))),([0, np.nan], [1,np.nan])) - finally: - np.seterr(**olderr) - - -def test_ttest_ind(): - #regression test - tr = 1.0912746897927283 - pr = 0.27647818616351882 - tpr = ([tr,-tr],[pr,pr]) - - rvs2 = np.linspace(1,100,100) - rvs1 = np.linspace(5,105,100) - rvs1_2D = np.array([rvs1, rvs2]) - rvs2_2D = np.array([rvs2, rvs1]) - - t,p = stats.ttest_ind(rvs1, rvs2, axis=0) - assert_array_almost_equal([t,p],(tr,pr)) - t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0) - assert_array_almost_equal([t,p],tpr) - t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1) - assert_array_almost_equal([t,p],tpr) - - #test on 3 dimensions - rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) - rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) - t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1) - assert_almost_equal(np.abs(t), np.abs(tr)) - assert_array_almost_equal(np.abs(p), pr) - assert_equal(t.shape, (2, 3)) - - t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2) - assert_array_almost_equal(np.abs(t), np.abs(tr)) - assert_array_almost_equal(np.abs(p), pr) - assert_equal(t.shape, (3, 2)) - - olderr = np.seterr(all='ignore') - try: - #test zero division problem - t,p = stats.ttest_ind([0,0,0],[1,1,1]) - assert_equal((np.abs(t),p), (np.inf, 0)) - assert_almost_equal(stats.ttest_ind([0,0,0], [0,0,0]), (1.0, 0.37390096630005898)) - - #check that nan in input array result in nan output - anan = np.array([[1,np.nan],[-1,1]]) - assert_equal(stats.ttest_ind(anan, np.zeros((2,2))),([0, np.nan], [1,np.nan])) - finally: - np.seterr(**olderr) - - -def test_ttest_1samp_new(): - n1, n2, n3 = (10,15,20) - rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3)) - - #check multidimensional array and correct axis handling - #deterministic rvn1 and rvn2 would be better as in test_ttest_rel - t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0) - t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0) - t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1) - assert_array_almost_equal(t1,t2, decimal=14) - assert_almost_equal(t1[0,0],t3, decimal=14) - assert_equal(t1.shape, (n2,n3)) - - t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1) - t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1) - t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1) - assert_array_almost_equal(t1,t2, decimal=14) - assert_almost_equal(t1[0,0],t3, decimal=14) - assert_equal(t1.shape, (n1,n3)) - - t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2) - t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2) - t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1) - assert_array_almost_equal(t1,t2, decimal=14) - assert_almost_equal(t1[0,0],t3, decimal=14) - assert_equal(t1.shape, (n1,n2)) - - olderr = np.seterr(all='ignore') - try: - #test zero division problem - t,p = stats.ttest_1samp([0,0,0], 1) - assert_equal((np.abs(t),p), (np.inf, 0)) - assert_almost_equal(stats.ttest_1samp([0,0,0], 0), (1.0, 0.42264973081037421)) - - #check that nan in input array result in nan output - anan = np.array([[1,np.nan],[-1,1]]) - assert_equal(stats.ttest_1samp(anan, 0),([0, np.nan], [1,np.nan])) - finally: - np.seterr(**olderr) - - -def test_describe(): - x = np.vstack((np.ones((3,4)),2*np.ones((2,4)))) - nc, mmc = (5, ([ 1., 1., 1., 1.], [ 2., 2., 2., 2.])) - mc = np.array([ 1.4, 1.4, 1.4, 1.4]) - vc = np.array([ 0.3, 0.3, 0.3, 0.3]) - skc = [0.40824829046386357]*4 - kurtc = [-1.833333333333333]*4 - n, mm, m, v, sk, kurt = stats.describe(x) - assert_equal(n, nc) - assert_equal(mm, mmc) - assert_equal(m, mc) - assert_equal(v, vc) - assert_array_almost_equal(sk, skc, decimal=13) #not sure about precision - assert_array_almost_equal(kurt, kurtc, decimal=13) - n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1) - assert_equal(n, nc) - assert_equal(mm, mmc) - assert_equal(m, mc) - assert_equal(v, vc) - assert_array_almost_equal(sk, skc, decimal=13) #not sure about precision - assert_array_almost_equal(kurt, kurtc, decimal=13) - -def test_normalitytests(): - # numbers verified with R: dagoTest in package fBasics - st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734) - pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019) - x = np.array((-2,-1,0,1,2,3)*4)**2 - yield assert_array_almost_equal, stats.normaltest(x), (st_normal, pv_normal) - yield assert_array_almost_equal, stats.skewtest(x), (st_skew, pv_skew) - yield assert_array_almost_equal, stats.kurtosistest(x), (st_kurt, pv_kurt) - -def test_skewtest_too_few_samples(): - """Regression test for ticket #1492. - - skewtest requires at least 8 samples; 7 should raise a ValueError. - """ - x = np.arange(7.0) - assert_raises(ValueError, stats.skewtest, x) - -def mannwhitneyu(): - x = np.array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., - 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 1.]) - - y = np.array([ 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., - 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., - 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 1., 1., 2., 1., 1., 2., - 1., 2., 1., 1., 1., 1., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., - 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., - 1., 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., - 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., - 1.]) - #p-value verified with matlab and R to 5 significant digits - assert_array_almost_equal(stats.stats.mannwhitneyu(x,y), - (16980.5, 2.8214327656317373e-005), decimal=12) - - - -def test_pointbiserial(): - # copied from mstats tests removing nans - x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0, - 0,0,0,0,1] - y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0, - 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1, - 0.8,0.7,0.6,0.5,0.2,0.2,0.1] - assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5) - - -def test_obrientransform(): - #this is a regression test to check np.var replacement - #I didn't separately verigy the numbers - x1 = np.arange(5) - result = np.array( - [[ 5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667], - [ 21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]]) - assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8) - - -class HarMeanTestCase: - def test_1dlist(self): - ''' Test a 1d list''' - a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100] - b = 34.1417152147 - self.do(a, b) - def test_1darray(self): - ''' Test a 1d array''' - a=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 34.1417152147 - self.do(a, b) - def test_1dma(self): - ''' Test a 1d masked array''' - a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 34.1417152147 - self.do(a, b) - def test_1dmavalue(self): - ''' Test a 1d masked array with a masked value''' - a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], - mask=[0,0,0,0,0,0,0,0,0,1]) - b = 31.8137186141 - self.do(a, b) - - # Note the next tests use axis=None as default, not axis=0 - def test_2dlist(self): - ''' Test a 2d list''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 38.6696271841 - self.do(a, b) - def test_2darray(self): - ''' Test a 2d array''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 38.6696271841 - self.do(np.array(a), b) - def test_2dma(self): - ''' Test a 2d masked array''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 38.6696271841 - self.do(np.ma.array(a), b) - def test_2daxis0(self): - ''' Test a 2d list with axis=0''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.array([ 22.88135593, 39.13043478, 52.90076336, 65.45454545]) - self.do(a, b, axis=0) - def test_2daxis1(self): - ''' Test a 2d list with axis=1''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.array([ 19.2 , 63.03939962, 103.80078637]) - self.do(a, b, axis=1) - def test_2dmatrixdaxis0(self): - ''' Test a 2d list with axis=0''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.matrix([[ 22.88135593, 39.13043478, 52.90076336, 65.45454545]]) - self.do(np.matrix(a), b, axis=0) - def test_2dmatrixaxis1(self): - ''' Test a 2d list with axis=1''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.matrix([[ 19.2 , 63.03939962, 103.80078637]]).T - self.do(np.matrix(a), b, axis=1) -## def test_dtype(self): -## ''' Test a 1d list with a new dtype''' -## a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100] -## b = 34.1417152147 -## self.do(a, b, dtype=np.float128) # does not work on Win32 - -class TestHarMean(HarMeanTestCase, TestCase): - def do(self, a, b, axis=None, dtype=None): - x = stats.hmean(a, axis=axis, dtype=dtype) - assert_almost_equal(b, x) - assert_equal(x.dtype, dtype) - -class GeoMeanTestCase: - def test_1dlist(self): - ''' Test a 1d list''' - a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100] - b = 45.2872868812 - self.do(a, b) - def test_1darray(self): - ''' Test a 1d array''' - a=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 45.2872868812 - self.do(a, b) - def test_1dma(self): - ''' Test a 1d masked array''' - a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 45.2872868812 - self.do(a, b) - def test_1dmavalue(self): - ''' Test a 1d masked array with a masked value''' - a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1]) - b = 41.4716627439 - self.do(a, b) - - # Note the next tests use axis=None as default, not axis=0 - def test_2dlist(self): - ''' Test a 2d list''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 52.8885199 - self.do(a, b) - def test_2darray(self): - ''' Test a 2d array''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 52.8885199 - self.do(np.array(a), b) - def test_2dma(self): - ''' Test a 2d masked array''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 52.8885199 - self.do(np.ma.array(a), b) - def test_2daxis0(self): - ''' Test a 2d list with axis=0''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.array([35.56893304, 49.32424149, 61.3579244 , 72.68482371]) - self.do(a, b, axis=0) - def test_2daxis1(self): - ''' Test a 2d list with axis=1''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.array([ 22.13363839, 64.02171746, 104.40086817]) - self.do(a, b, axis=1) - def test_2dmatrixdaxis0(self): - ''' Test a 2d list with axis=0''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.matrix([[35.56893304, 49.32424149, 61.3579244 , 72.68482371]]) - self.do(np.matrix(a), b, axis=0) - def test_2dmatrixaxis1(self): - ''' Test a 2d list with axis=1''' - a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.matrix([[ 22.13363839, 64.02171746, 104.40086817]]).T - self.do(np.matrix(a), b, axis=1) -## def test_dtype(self): -## ''' Test a 1d list with a new dtype''' -## a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100] -## b = 45.2872868812 -## self.do(a, b, dtype=np.float128) # does not exist on win32 - def test_1dlist0(self): - ''' Test a 1d list with zero element''' - a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 0] - b = 0.0 # due to exp(-inf)=0 - olderr = np.seterr(all='ignore') - try: - self.do(a, b) - finally: - np.seterr(**olderr) - - def test_1darray0(self): - ''' Test a 1d array with zero element''' - a=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) - b = 0.0 # due to exp(-inf)=0 - olderr = np.seterr(all='ignore') - try: - self.do(a, b) - finally: - np.seterr(**olderr) - - def test_1dma0(self): - ''' Test a 1d masked array with zero element''' - a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) - b = 41.4716627439 - olderr = np.seterr(all='ignore') - try: - self.do(a, b) - finally: - np.seterr(**olderr) - - def test_1dmainf(self): - ''' Test a 1d masked array with negative element''' - a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1]) - b = 41.4716627439 - olderr = np.seterr(all='ignore') - try: - self.do(a, b) - finally: - np.seterr(**olderr) - -class TestGeoMean(GeoMeanTestCase, TestCase): - def do(self, a, b, axis=None, dtype=None): - #Note this doesn't test when axis is not specified - x = stats.gmean(a, axis=axis, dtype=dtype) - assert_almost_equal(b, x) - assert_equal(x.dtype, dtype) - - -def test_binomtest(): - # precision tests compared to R for ticket:986 - pp = np.concatenate(( np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5), - np.linspace(0.85,0.95,5))) - n = 501 - x = 450 - results = [0.0, 0.0, 1.0159969301994141e-304, - 2.9752418572150531e-275, 7.7668382922535275e-250, - 2.3381250925167094e-099, 7.8284591587323951e-081, - 9.9155947819961383e-065, 2.8729390725176308e-050, - 1.7175066298388421e-037, 0.0021070691951093692, - 0.12044570587262322, 0.88154763174802508, 0.027120993063129286, - 2.6102587134694721e-006] - - for p, res in zip(pp,results): - assert_approx_equal(stats.binom_test(x, n, p), res, - significant=12, err_msg='fail forp=%f'%p) - - assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024, - significant=12, err_msg='fail forp=%f'%p) - -class Test_Trim(object): - # test trim functions - def test_trim1(self): - a = np.arange(11) - assert_equal(stats.trim1(a, 0.1), np.arange(10)) - assert_equal(stats.trim1(a, 0.2), np.arange(9)) - assert_equal(stats.trim1(a, 0.2, tail='left'), np.arange(2,11)) - assert_equal(stats.trim1(a, 3/11., tail='left'), np.arange(3,11)) - - def test_trimboth(self): - a = np.arange(11) - assert_equal(stats.trimboth(a, 3/11.), np.arange(3,8)) - assert_equal(stats.trimboth(a, 0.2), np.array([2, 3, 4, 5, 6, 7, 8])) - assert_equal(stats.trimboth(np.arange(24).reshape(6,4), 0.2), - np.arange(4,20).reshape(4,4)) - assert_equal(stats.trimboth(np.arange(24).reshape(4,6).T, 2/6.), - np.array([[ 2, 8, 14, 20],[ 3, 9, 15, 21]])) - assert_raises(ValueError, stats.trimboth, - np.arange(24).reshape(4,6).T, 4/6.) - - def test_trim_mean(self): - assert_equal(stats.trim_mean(np.arange(24).reshape(4,6).T, 2/6.), - np.array([ 2.5, 8.5, 14.5, 20.5])) - assert_equal(stats.trim_mean(np.arange(24).reshape(4,6), 2/6.), - np.array([ 9., 10., 11., 12., 13., 14.])) - assert_equal(stats.trim_mean(np.arange(24), 2/6.), 11.5) - assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5) - - -class TestSigamClip(object): - def test_sigmaclip1(self): - a = np.concatenate((np.linspace(9.5,10.5,31),np.linspace(0,20,5))) - fact = 4 #default - c, low, upp = stats.sigmaclip(a) - assert_(c.min()>low) - assert_(c.max()low) - assert_(c.max()low) - assert_(c.max() /* For offsetof */ -#ifndef offsetof -#define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif - -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif - -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif - -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif - -#if PY_VERSION_HEX < 0x02040000 - #define METH_COEXIST 0 - #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) - #define PyDict_Contains(d,o) PySequence_Contains(d,o) -#endif - -#if PY_VERSION_HEX < 0x02050000 - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #define PY_FORMAT_SIZE_T "" - #define PyInt_FromSsize_t(z) PyInt_FromLong(z) - #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) - #define PyNumber_Index(o) PyNumber_Int(o) - #define PyIndex_Check(o) PyNumber_Check(o) - #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) - #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) - #define PyVarObject_HEAD_INIT(type, size) \ - PyObject_HEAD_INIT(type) size, - #define PyType_Modified(t) - - typedef struct { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; - } Py_buffer; - - #define PyBUF_SIMPLE 0 - #define PyBUF_WRITABLE 0x0001 - #define PyBUF_FORMAT 0x0004 - #define PyBUF_ND 0x0008 - #define PyBUF_STRIDES (0x0010 | PyBUF_ND) - #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) - #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) - #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) - #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#endif - -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#endif - -#if PY_MAJOR_VERSION >= 3 - #define Py_TPFLAGS_CHECKTYPES 0 - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif - -#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PyBytesObject PyStringObject - #define PyBytes_Type PyString_Type - #define PyBytes_Check PyString_Check - #define PyBytes_CheckExact PyString_CheckExact - #define PyBytes_FromString PyString_FromString - #define PyBytes_FromStringAndSize PyString_FromStringAndSize - #define PyBytes_FromFormat PyString_FromFormat - #define PyBytes_DecodeEscape PyString_DecodeEscape - #define PyBytes_AsString PyString_AsString - #define PyBytes_AsStringAndSize PyString_AsStringAndSize - #define PyBytes_Size PyString_Size - #define PyBytes_AS_STRING PyString_AS_STRING - #define PyBytes_GET_SIZE PyString_GET_SIZE - #define PyBytes_Repr PyString_Repr - #define PyBytes_Concat PyString_Concat - #define PyBytes_ConcatAndDel PyString_ConcatAndDel -#endif - -#if PY_VERSION_HEX < 0x02060000 - #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) - #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif - -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif - -#if PY_VERSION_HEX < 0x03020000 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif - - -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - -#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) - #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) - #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) -#else - #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) - #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) - #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ - (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ - (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ - (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) -#endif - -#if PY_MAJOR_VERSION >= 3 - #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) -#else - #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) - #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) - #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) -#endif - -#if PY_VERSION_HEX < 0x02050000 - #define __Pyx_NAMESTR(n) ((char *)(n)) - #define __Pyx_DOCSTR(n) ((char *)(n)) -#else - #define __Pyx_NAMESTR(n) (n) - #define __Pyx_DOCSTR(n) (n) -#endif - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) -#define _USE_MATH_DEFINES -#endif -#include -#define __PYX_HAVE__scipy__stats__vonmises_cython -#define __PYX_HAVE_API__scipy__stats__vonmises_cython -#include "stdio.h" -#include "stdlib.h" -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" -#include "math.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#ifdef PYREX_WITHOUT_ASSERTIONS -#define CYTHON_WITHOUT_ASSERTIONS -#endif - - -/* inline attribute */ -#ifndef CYTHON_INLINE - #if defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -/* unused attribute */ -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif - -typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ - - -/* Type Conversion Predeclarations */ - -#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) -#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) - -#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) -#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); - -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) - - -#ifdef __GNUC__ - /* Test for GCC > 2.95 */ - #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #else /* __GNUC__ > 2 ... */ - #define likely(x) (x) - #define unlikely(x) (x) - #endif /* __GNUC__ > 2 ... */ -#else /* __GNUC__ */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ - -static PyObject *__pyx_m; -static PyObject *__pyx_b; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - -static const char *__pyx_f[] = { - "vonmises_cython.pyx", - "numpy.pxd", -}; - -/* "numpy.pxd":719 - * # in Cython to enable them only on the right systems. - * - * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - */ -typedef npy_int8 __pyx_t_5numpy_int8_t; - -/* "numpy.pxd":720 - * - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t - */ -typedef npy_int16 __pyx_t_5numpy_int16_t; - -/* "numpy.pxd":721 - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< - * ctypedef npy_int64 int64_t - * #ctypedef npy_int96 int96_t - */ -typedef npy_int32 __pyx_t_5numpy_int32_t; - -/* "numpy.pxd":722 - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< - * #ctypedef npy_int96 int96_t - * #ctypedef npy_int128 int128_t - */ -typedef npy_int64 __pyx_t_5numpy_int64_t; - -/* "numpy.pxd":726 - * #ctypedef npy_int128 int128_t - * - * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - */ -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - -/* "numpy.pxd":727 - * - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t - */ -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - -/* "numpy.pxd":728 - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< - * ctypedef npy_uint64 uint64_t - * #ctypedef npy_uint96 uint96_t - */ -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - -/* "numpy.pxd":729 - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< - * #ctypedef npy_uint96 uint96_t - * #ctypedef npy_uint128 uint128_t - */ -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - -/* "numpy.pxd":733 - * #ctypedef npy_uint128 uint128_t - * - * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< - * ctypedef npy_float64 float64_t - * #ctypedef npy_float80 float80_t - */ -typedef npy_float32 __pyx_t_5numpy_float32_t; - -/* "numpy.pxd":734 - * - * ctypedef npy_float32 float32_t - * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< - * #ctypedef npy_float80 float80_t - * #ctypedef npy_float128 float128_t - */ -typedef npy_float64 __pyx_t_5numpy_float64_t; - -/* "numpy.pxd":743 - * # The int types are mapped a bit surprising -- - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t - */ -typedef npy_long __pyx_t_5numpy_int_t; - -/* "numpy.pxd":744 - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong longlong_t - * - */ -typedef npy_longlong __pyx_t_5numpy_long_t; - -/* "numpy.pxd":745 - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_ulong uint_t - */ -typedef npy_longlong __pyx_t_5numpy_longlong_t; - -/* "numpy.pxd":747 - * ctypedef npy_longlong longlong_t - * - * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t - */ -typedef npy_ulong __pyx_t_5numpy_uint_t; - -/* "numpy.pxd":748 - * - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulonglong_t - * - */ -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - -/* "numpy.pxd":749 - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_intp intp_t - */ -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - -/* "numpy.pxd":751 - * ctypedef npy_ulonglong ulonglong_t - * - * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< - * ctypedef npy_uintp uintp_t - * - */ -typedef npy_intp __pyx_t_5numpy_intp_t; - -/* "numpy.pxd":752 - * - * ctypedef npy_intp intp_t - * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< - * - * ctypedef npy_double float_t - */ -typedef npy_uintp __pyx_t_5numpy_uintp_t; - -/* "numpy.pxd":754 - * ctypedef npy_uintp uintp_t - * - * ctypedef npy_double float_t # <<<<<<<<<<<<<< - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t - */ -typedef npy_double __pyx_t_5numpy_float_t; - -/* "numpy.pxd":755 - * - * ctypedef npy_double float_t - * ctypedef npy_double double_t # <<<<<<<<<<<<<< - * ctypedef npy_longdouble longdouble_t - * - */ -typedef npy_double __pyx_t_5numpy_double_t; - -/* "numpy.pxd":756 - * ctypedef npy_double float_t - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cfloat cfloat_t - */ -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif - -/*--- Type declarations ---*/ - -/* "numpy.pxd":758 - * ctypedef npy_longdouble longdouble_t - * - * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t - */ -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - -/* "numpy.pxd":759 - * - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< - * ctypedef npy_clongdouble clongdouble_t - * - */ -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - -/* "numpy.pxd":760 - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cdouble complex_t - */ -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - -/* "numpy.pxd":762 - * ctypedef npy_clongdouble clongdouble_t - * - * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew1(a): - */ -typedef npy_cdouble __pyx_t_5numpy_complex_t; - - -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif - -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; - #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) - #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, PyObject* kw_name); /*proto*/ - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ - -/* Run-time type information about structs used with buffers */ -struct __Pyx_StructField_; - -typedef struct { - const char* name; /* for error messages only */ - struct __Pyx_StructField_* fields; - size_t size; /* sizeof(type) */ - char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */ -} __Pyx_TypeInfo; - -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; - -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; - - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); - -static void __Pyx_RaiseBufferFallbackError(void); /*proto*/ -#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) - - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} - - -#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_List_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Tuple_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { - if (likely(o != Py_None)) { - if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - return r; - } - else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { - PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); - Py_INCREF(r); - return r; - } - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - - -#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ - __Pyx_GetItemInt_Fast(o, i) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i))) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { - PyObject *r; - if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) { - r = PyList_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { - r = PyTuple_GET_ITEM(o, i); - Py_INCREF(r); - } - else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) { - r = PySequence_GetItem(o, i); - } - else { - r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); - } - return r; -} - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); -static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else -#define __Pyx_GetBuffer PyObject_GetBuffer -#define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - -Py_ssize_t __Pyx_zeros[] = {0}; -Py_ssize_t __Pyx_minusones[] = {-1}; - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eqf(a, b) ((a)==(b)) - #define __Pyx_c_sumf(a, b) ((a)+(b)) - #define __Pyx_c_difff(a, b) ((a)-(b)) - #define __Pyx_c_prodf(a, b) ((a)*(b)) - #define __Pyx_c_quotf(a, b) ((a)/(b)) - #define __Pyx_c_negf(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zerof(z) ((z)==(float)0) - #define __Pyx_c_conjf(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_absf(z) (::std::abs(z)) - #define __Pyx_c_powf(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zerof(z) ((z)==0) - #define __Pyx_c_conjf(z) (conjf(z)) - #if 1 - #define __Pyx_c_absf(z) (cabsf(z)) - #define __Pyx_c_powf(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq(a, b) ((a)==(b)) - #define __Pyx_c_sum(a, b) ((a)+(b)) - #define __Pyx_c_diff(a, b) ((a)-(b)) - #define __Pyx_c_prod(a, b) ((a)*(b)) - #define __Pyx_c_quot(a, b) ((a)/(b)) - #define __Pyx_c_neg(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero(z) ((z)==(double)0) - #define __Pyx_c_conj(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs(z) (::std::abs(z)) - #define __Pyx_c_pow(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero(z) ((z)==0) - #define __Pyx_c_conj(z) (conj(z)) - #if 1 - #define __Pyx_c_abs(z) (cabs(z)) - #define __Pyx_c_pow(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename); /*proto*/ - -static int __Pyx_check_binary_version(void); - -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ - -static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename); /*proto*/ - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'libc.stdlib' */ - -/* Module declarations from 'numpy' */ - -/* Module declarations from 'numpy' */ -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/ -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/ - -/* Module declarations from 'cython.cython.view' */ - -/* Module declarations from 'cython' */ - -/* Module declarations from 'scipy.stats.vonmises_cython' */ -static double __pyx_f_5scipy_5stats_15vonmises_cython_von_mises_cdf_series(double, double, unsigned int); /*proto*/ -static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), 'R' }; -#define __Pyx_MODULE_NAME "scipy.stats.vonmises_cython" -int __pyx_module_is_main_scipy__stats__vonmises_cython = 0; - -/* Implementation of 'scipy.stats.vonmises_cython' */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_RuntimeError; -static char __pyx_k_1[] = "von_mises_cdf_normalapprox"; -static char __pyx_k_2[] = "ndarray is not C contiguous"; -static char __pyx_k_4[] = "ndarray is not Fortran contiguous"; -static char __pyx_k_6[] = "Non-native byte order not supported"; -static char __pyx_k_8[] = "unknown dtype code in numpy.pxd (%d)"; -static char __pyx_k_9[] = "Format string allocated too short, see comment in numpy.pxd"; -static char __pyx_k_12[] = "Format string allocated too short."; -static char __pyx_k_14[] = "scipy.stats"; -static char __pyx_k_15[] = "scipy.special"; -static char __pyx_k_16[] = "numpy.testing"; -static char __pyx_k_17[] = "scipy.stats.vonmises_cython"; -static char __pyx_k__B[] = "B"; -static char __pyx_k__H[] = "H"; -static char __pyx_k__I[] = "I"; -static char __pyx_k__L[] = "L"; -static char __pyx_k__O[] = "O"; -static char __pyx_k__Q[] = "Q"; -static char __pyx_k__b[] = "b"; -static char __pyx_k__d[] = "d"; -static char __pyx_k__f[] = "f"; -static char __pyx_k__g[] = "g"; -static char __pyx_k__h[] = "h"; -static char __pyx_k__i[] = "i"; -static char __pyx_k__k[] = "k"; -static char __pyx_k__l[] = "l"; -static char __pyx_k__q[] = "q"; -static char __pyx_k__x[] = "x"; -static char __pyx_k__C1[] = "C1"; -static char __pyx_k__Zd[] = "Zd"; -static char __pyx_k__Zf[] = "Zf"; -static char __pyx_k__Zg[] = "Zg"; -static char __pyx_k__i0[] = "i0"; -static char __pyx_k__np[] = "np"; -static char __pyx_k__pi[] = "pi"; -static char __pyx_k__cdf[] = "cdf"; -static char __pyx_k__exp[] = "exp"; -static char __pyx_k__sin[] = "sin"; -static char __pyx_k__ndim[] = "ndim"; -static char __pyx_k__norm[] = "norm"; -static char __pyx_k__sqrt[] = "sqrt"; -static char __pyx_k__dtype[] = "dtype"; -static char __pyx_k__empty[] = "empty"; -static char __pyx_k__float[] = "float"; -static char __pyx_k__numpy[] = "numpy"; -static char __pyx_k__range[] = "range"; -static char __pyx_k__round[] = "round"; -static char __pyx_k__scipy[] = "scipy"; -static char __pyx_k__shape[] = "shape"; -static char __pyx_k__stats[] = "stats"; -static char __pyx_k__astype[] = "astype"; -static char __pyx_k__asarray[] = "asarray"; -static char __pyx_k____main__[] = "__main__"; -static char __pyx_k____test__[] = "__test__"; -static char __pyx_k__ValueError[] = "ValueError"; -static char __pyx_k__atleast_1d[] = "atleast_1d"; -static char __pyx_k__RuntimeError[] = "RuntimeError"; -static char __pyx_k__von_mises_cdf[] = "von_mises_cdf"; -static char __pyx_k__broadcast_arrays[] = "broadcast_arrays"; -static PyObject *__pyx_n_s_1; -static PyObject *__pyx_kp_u_12; -static PyObject *__pyx_n_s_14; -static PyObject *__pyx_n_s_15; -static PyObject *__pyx_n_s_16; -static PyObject *__pyx_n_s_17; -static PyObject *__pyx_kp_u_2; -static PyObject *__pyx_kp_u_4; -static PyObject *__pyx_kp_u_6; -static PyObject *__pyx_kp_u_8; -static PyObject *__pyx_kp_u_9; -static PyObject *__pyx_n_s__C1; -static PyObject *__pyx_n_s__RuntimeError; -static PyObject *__pyx_n_s__ValueError; -static PyObject *__pyx_n_s____main__; -static PyObject *__pyx_n_s____test__; -static PyObject *__pyx_n_s__asarray; -static PyObject *__pyx_n_s__astype; -static PyObject *__pyx_n_s__atleast_1d; -static PyObject *__pyx_n_s__broadcast_arrays; -static PyObject *__pyx_n_s__cdf; -static PyObject *__pyx_n_s__dtype; -static PyObject *__pyx_n_s__empty; -static PyObject *__pyx_n_s__exp; -static PyObject *__pyx_n_s__float; -static PyObject *__pyx_n_s__i0; -static PyObject *__pyx_n_s__k; -static PyObject *__pyx_n_s__ndim; -static PyObject *__pyx_n_s__norm; -static PyObject *__pyx_n_s__np; -static PyObject *__pyx_n_s__numpy; -static PyObject *__pyx_n_s__pi; -static PyObject *__pyx_n_s__range; -static PyObject *__pyx_n_s__round; -static PyObject *__pyx_n_s__scipy; -static PyObject *__pyx_n_s__shape; -static PyObject *__pyx_n_s__sin; -static PyObject *__pyx_n_s__sqrt; -static PyObject *__pyx_n_s__stats; -static PyObject *__pyx_n_s__von_mises_cdf; -static PyObject *__pyx_n_s__x; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_2; -static PyObject *__pyx_int_3; -static PyObject *__pyx_int_4; -static PyObject *__pyx_int_15; -static PyObject *__pyx_int_16; -static PyObject *__pyx_int_24; -static PyObject *__pyx_k_tuple_3; -static PyObject *__pyx_k_tuple_5; -static PyObject *__pyx_k_tuple_7; -static PyObject *__pyx_k_tuple_10; -static PyObject *__pyx_k_tuple_11; -static PyObject *__pyx_k_tuple_13; - -/* "scipy/stats/vonmises_cython.pyx":12 - * - * - * cdef double von_mises_cdf_series(double k,double x,unsigned int p): # <<<<<<<<<<<<<< - * cdef double s, c, sn, cn, R, V - * cdef unsigned int n - */ - -static double __pyx_f_5scipy_5stats_15vonmises_cython_von_mises_cdf_series(double __pyx_v_k, double __pyx_v_x, unsigned int __pyx_v_p) { - double __pyx_v_s; - double __pyx_v_c; - double __pyx_v_sn; - double __pyx_v_cn; - double __pyx_v_R; - double __pyx_v_V; - unsigned int __pyx_v_n; - double __pyx_r; - __Pyx_RefNannyDeclarations - unsigned int __pyx_t_1; - double __pyx_t_2; - double __pyx_t_3; - long __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("von_mises_cdf_series"); - - /* "scipy/stats/vonmises_cython.pyx":15 - * cdef double s, c, sn, cn, R, V - * cdef unsigned int n - * s = sin(x) # <<<<<<<<<<<<<< - * c = cos(x) - * sn = sin(p*x) - */ - __pyx_v_s = sin(__pyx_v_x); - - /* "scipy/stats/vonmises_cython.pyx":16 - * cdef unsigned int n - * s = sin(x) - * c = cos(x) # <<<<<<<<<<<<<< - * sn = sin(p*x) - * cn = cos(p*x) - */ - __pyx_v_c = cos(__pyx_v_x); - - /* "scipy/stats/vonmises_cython.pyx":17 - * s = sin(x) - * c = cos(x) - * sn = sin(p*x) # <<<<<<<<<<<<<< - * cn = cos(p*x) - * R = 0 - */ - __pyx_v_sn = sin((__pyx_v_p * __pyx_v_x)); - - /* "scipy/stats/vonmises_cython.pyx":18 - * c = cos(x) - * sn = sin(p*x) - * cn = cos(p*x) # <<<<<<<<<<<<<< - * R = 0 - * V = 0 - */ - __pyx_v_cn = cos((__pyx_v_p * __pyx_v_x)); - - /* "scipy/stats/vonmises_cython.pyx":19 - * sn = sin(p*x) - * cn = cos(p*x) - * R = 0 # <<<<<<<<<<<<<< - * V = 0 - * for n in range(p-1,0,-1): - */ - __pyx_v_R = 0.0; - - /* "scipy/stats/vonmises_cython.pyx":20 - * cn = cos(p*x) - * R = 0 - * V = 0 # <<<<<<<<<<<<<< - * for n in range(p-1,0,-1): - * sn, cn = sn*c - cn*s, cn*c + sn*s - */ - __pyx_v_V = 0.0; - - /* "scipy/stats/vonmises_cython.pyx":21 - * R = 0 - * V = 0 - * for n in range(p-1,0,-1): # <<<<<<<<<<<<<< - * sn, cn = sn*c - cn*s, cn*c + sn*s - * R = 1./(2*n/k + R) - */ - for (__pyx_t_1 = (__pyx_v_p - 1); __pyx_t_1 > 0; __pyx_t_1-=1) { - __pyx_v_n = __pyx_t_1; - - /* "scipy/stats/vonmises_cython.pyx":22 - * V = 0 - * for n in range(p-1,0,-1): - * sn, cn = sn*c - cn*s, cn*c + sn*s # <<<<<<<<<<<<<< - * R = 1./(2*n/k + R) - * V = R*(sn/n+V) - */ - __pyx_t_2 = ((__pyx_v_sn * __pyx_v_c) - (__pyx_v_cn * __pyx_v_s)); - __pyx_t_3 = ((__pyx_v_cn * __pyx_v_c) + (__pyx_v_sn * __pyx_v_s)); - __pyx_v_sn = __pyx_t_2; - __pyx_v_cn = __pyx_t_3; - - /* "scipy/stats/vonmises_cython.pyx":23 - * for n in range(p-1,0,-1): - * sn, cn = sn*c - cn*s, cn*c + sn*s - * R = 1./(2*n/k + R) # <<<<<<<<<<<<<< - * V = R*(sn/n+V) - * - */ - __pyx_t_4 = (2 * __pyx_v_n); - if (unlikely(__pyx_v_k == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = ((__pyx_t_4 / __pyx_v_k) + __pyx_v_R); - if (unlikely(__pyx_t_3 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_v_R = (1. / __pyx_t_3); - - /* "scipy/stats/vonmises_cython.pyx":24 - * sn, cn = sn*c - cn*s, cn*c + sn*s - * R = 1./(2*n/k + R) - * V = R*(sn/n+V) # <<<<<<<<<<<<<< - * - * return 0.5+x/(2*np.pi) + V/np.pi - */ - if (unlikely(__pyx_v_n == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_v_V = (__pyx_v_R * ((__pyx_v_sn / __pyx_v_n) + __pyx_v_V)); - } - - /* "scipy/stats/vonmises_cython.pyx":26 - * V = R*(sn/n+V) - * - * return 0.5+x/(2*np.pi) + V/np.pi # <<<<<<<<<<<<<< - * - * def von_mises_cdf_normalapprox(k,x,C1): - */ - __pyx_t_5 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyFloat_FromDouble(__pyx_v_x); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__pi); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = PyNumber_Multiply(__pyx_int_2, __pyx_t_8); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_PyNumber_Divide(__pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = PyNumber_Add(__pyx_t_5, __pyx_t_8); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = PyFloat_FromDouble(__pyx_v_V); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__pi); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_t_8, __pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyNumber_Add(__pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_6); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_r = __pyx_t_3; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_WriteUnraisable("scipy.stats.vonmises_cython.von_mises_cdf_series", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/stats/vonmises_cython.pyx":28 - * return 0.5+x/(2*np.pi) + V/np.pi - * - * def von_mises_cdf_normalapprox(k,x,C1): # <<<<<<<<<<<<<< - * b = np.sqrt(2/np.pi)*np.exp(k)/i0(k) - * z = b*np.sin(x/2.) - */ - -static PyObject *__pyx_pf_5scipy_5stats_15vonmises_cython_von_mises_cdf_normalapprox(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_5scipy_5stats_15vonmises_cython_von_mises_cdf_normalapprox = {__Pyx_NAMESTR("von_mises_cdf_normalapprox"), (PyCFunction)__pyx_pf_5scipy_5stats_15vonmises_cython_von_mises_cdf_normalapprox, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_5scipy_5stats_15vonmises_cython_von_mises_cdf_normalapprox(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_k = 0; - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_C1 = 0; - PyObject *__pyx_v_b = NULL; - PyObject *__pyx_v_z = NULL; - PyObject *__pyx_v_C = NULL; - PyObject *__pyx_v_chi = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__k,&__pyx_n_s__x,&__pyx_n_s__C1,0}; - __Pyx_RefNannySetupContext("von_mises_cdf_normalapprox"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[3] = {0,0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__k); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("von_mises_cdf_normalapprox", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 2: - values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C1); - if (likely(values[2])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("von_mises_cdf_normalapprox", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "von_mises_cdf_normalapprox") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_k = values[0]; - __pyx_v_x = values[1]; - __pyx_v_C1 = values[2]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_k = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - __pyx_v_C1 = PyTuple_GET_ITEM(__pyx_args, 2); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("von_mises_cdf_normalapprox", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.stats.vonmises_cython.von_mises_cdf_normalapprox", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - - /* "scipy/stats/vonmises_cython.pyx":29 - * - * def von_mises_cdf_normalapprox(k,x,C1): - * b = np.sqrt(2/np.pi)*np.exp(k)/i0(k) # <<<<<<<<<<<<<< - * z = b*np.sin(x/2.) - * C = 24*k - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__sqrt); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__pi); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyNumber_Divide(__pyx_int_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__exp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_k); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_k); - __Pyx_GIVEREF(__pyx_v_k); - __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__i0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_k); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_k); - __Pyx_GIVEREF(__pyx_v_k); - __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyNumber_Divide(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_b = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/stats/vonmises_cython.pyx":30 - * def von_mises_cdf_normalapprox(k,x,C1): - * b = np.sqrt(2/np.pi)*np.exp(k)/i0(k) - * z = b*np.sin(x/2.) # <<<<<<<<<<<<<< - * C = 24*k - * chi = z - z**3/((C-2*z**2-16)/3.-(z**4+7/4.*z**2+167./2)/(C+C1-z**2+3))**2 - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__sin); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyFloat_FromDouble(2.); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_v_x, __pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_v_b, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_z = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/stats/vonmises_cython.pyx":31 - * b = np.sqrt(2/np.pi)*np.exp(k)/i0(k) - * z = b*np.sin(x/2.) - * C = 24*k # <<<<<<<<<<<<<< - * chi = z - z**3/((C-2*z**2-16)/3.-(z**4+7/4.*z**2+167./2)/(C+C1-z**2+3))**2 - * return scipy.stats.norm.cdf(z) - */ - __pyx_t_1 = PyNumber_Multiply(__pyx_int_24, __pyx_v_k); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_C = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/stats/vonmises_cython.pyx":32 - * z = b*np.sin(x/2.) - * C = 24*k - * chi = z - z**3/((C-2*z**2-16)/3.-(z**4+7/4.*z**2+167./2)/(C+C1-z**2+3))**2 # <<<<<<<<<<<<<< - * return scipy.stats.norm.cdf(z) - * - */ - __pyx_t_1 = PyNumber_Power(__pyx_v_z, __pyx_int_3, Py_None); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Power(__pyx_v_z, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_v_C, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Subtract(__pyx_t_3, __pyx_int_16); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyFloat_FromDouble(3.); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Power(__pyx_v_z, __pyx_int_4, Py_None); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyFloat_FromDouble((7.0 / 4.)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyNumber_Power(__pyx_v_z, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyNumber_Multiply(__pyx_t_2, __pyx_t_5); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Add(__pyx_t_3, __pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyFloat_FromDouble((167. / 2.0)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_3 = PyNumber_Add(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyNumber_Add(__pyx_v_C, __pyx_v_C1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = PyNumber_Power(__pyx_v_z, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyNumber_Subtract(__pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Add(__pyx_t_2, __pyx_int_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Subtract(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Power(__pyx_t_5, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Subtract(__pyx_v_z, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_chi = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/stats/vonmises_cython.pyx":33 - * C = 24*k - * chi = z - z**3/((C-2*z**2-16)/3.-(z**4+7/4.*z**2+167./2)/(C+C1-z**2+3))**2 - * return scipy.stats.norm.cdf(z) # <<<<<<<<<<<<<< - * - * cimport cython - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__scipy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__stats); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__norm); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__cdf); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_z); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_z); - __Pyx_GIVEREF(__pyx_v_z); - __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("scipy.stats.vonmises_cython.von_mises_cdf_normalapprox", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_b); - __Pyx_XDECREF(__pyx_v_z); - __Pyx_XDECREF(__pyx_v_C); - __Pyx_XDECREF(__pyx_v_chi); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "scipy/stats/vonmises_cython.pyx":37 - * cimport cython - * @cython.boundscheck(False) - * def von_mises_cdf(k,x): # <<<<<<<<<<<<<< - * cdef np.ndarray[double, ndim=1] temp, temp_xs, temp_ks - * cdef unsigned int i, p - */ - -static PyObject *__pyx_pf_5scipy_5stats_15vonmises_cython_1von_mises_cdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_5scipy_5stats_15vonmises_cython_1von_mises_cdf = {__Pyx_NAMESTR("von_mises_cdf"), (PyCFunction)__pyx_pf_5scipy_5stats_15vonmises_cython_1von_mises_cdf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; -static PyObject *__pyx_pf_5scipy_5stats_15vonmises_cython_1von_mises_cdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_k = 0; - PyObject *__pyx_v_x = 0; - PyArrayObject *__pyx_v_temp = 0; - PyArrayObject *__pyx_v_temp_xs = 0; - PyArrayObject *__pyx_v_temp_ks = 0; - unsigned int __pyx_v_i; - unsigned int __pyx_v_p; - double __pyx_v_a1; - double __pyx_v_a2; - double __pyx_v_a3; - double __pyx_v_a4; - double __pyx_v_C1; - double __pyx_v_CK; - PyObject *__pyx_v_zerodim = NULL; - PyObject *__pyx_v_ix = NULL; - PyObject *__pyx_v_bx = NULL; - PyObject *__pyx_v_bk = NULL; - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_v_c_small_k = NULL; - Py_buffer __pyx_bstruct_temp; - Py_ssize_t __pyx_bstride_0_temp = 0; - Py_ssize_t __pyx_bshape_0_temp = 0; - Py_buffer __pyx_bstruct_temp_xs; - Py_ssize_t __pyx_bstride_0_temp_xs = 0; - Py_ssize_t __pyx_bshape_0_temp_xs = 0; - Py_buffer __pyx_bstruct_temp_ks; - Py_ssize_t __pyx_bstride_0_temp_ks = 0; - Py_ssize_t __pyx_bshape_0_temp_ks = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - double __pyx_t_5; - double __pyx_t_6; - double __pyx_t_7; - double __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - PyObject *(*__pyx_t_10)(PyObject *); - PyObject *__pyx_t_11 = NULL; - PyArrayObject *__pyx_t_12 = NULL; - int __pyx_t_13; - PyObject *__pyx_t_14 = NULL; - PyObject *__pyx_t_15 = NULL; - PyObject *__pyx_t_16 = NULL; - Py_ssize_t __pyx_t_17; - unsigned int __pyx_t_18; - unsigned int __pyx_t_19; - unsigned int __pyx_t_20; - unsigned int __pyx_t_21; - unsigned int __pyx_t_22; - unsigned int __pyx_t_23; - unsigned int __pyx_t_24; - unsigned int __pyx_t_25; - unsigned int __pyx_t_26; - unsigned int __pyx_t_27; - int __pyx_t_28; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__k,&__pyx_n_s__x,0}; - __Pyx_RefNannySetupContext("von_mises_cdf"); - __pyx_self = __pyx_self; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); - PyObject* values[2] = {0,0}; - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 0: - values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__k); - if (likely(values[0])) kw_args--; - else goto __pyx_L5_argtuple_error; - case 1: - values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); - if (likely(values[1])) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("von_mises_cdf", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "von_mises_cdf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - __pyx_v_k = values[0]; - __pyx_v_x = values[1]; - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - __pyx_v_k = PyTuple_GET_ITEM(__pyx_args, 0); - __pyx_v_x = PyTuple_GET_ITEM(__pyx_args, 1); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("von_mises_cdf", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - __pyx_L3_error:; - __Pyx_AddTraceback("scipy.stats.vonmises_cython.von_mises_cdf", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __Pyx_INCREF(__pyx_v_k); - __Pyx_INCREF(__pyx_v_x); - __pyx_bstruct_temp.buf = NULL; - __pyx_bstruct_temp_xs.buf = NULL; - __pyx_bstruct_temp_ks.buf = NULL; - - /* "scipy/stats/vonmises_cython.pyx":42 - * cdef double a1, a2, a3, a4, C1, CK - * #k,x = np.broadcast_arrays(np.asarray(k),np.asarray(x)) - * k = np.asarray(k) # <<<<<<<<<<<<<< - * x = np.asarray(x) - * zerodim = k.ndim==0 and x.ndim==0 - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__asarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_k); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_k); - __Pyx_GIVEREF(__pyx_v_k); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_k); - __pyx_v_k = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/stats/vonmises_cython.pyx":43 - * #k,x = np.broadcast_arrays(np.asarray(k),np.asarray(x)) - * k = np.asarray(k) - * x = np.asarray(x) # <<<<<<<<<<<<<< - * zerodim = k.ndim==0 and x.ndim==0 - * - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__asarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_x); - __pyx_v_x = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/stats/vonmises_cython.pyx":44 - * k = np.asarray(k) - * x = np.asarray(x) - * zerodim = k.ndim==0 and x.ndim==0 # <<<<<<<<<<<<<< - * - * k = np.atleast_1d(k) - */ - __pyx_t_2 = PyObject_GetAttr(__pyx_v_k, __pyx_n_s__ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_int_0, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_t_4) { - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_int_0, Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __pyx_t_1; - __pyx_t_1 = 0; - } else { - __pyx_t_2 = __pyx_t_3; - __pyx_t_3 = 0; - } - __pyx_v_zerodim = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/stats/vonmises_cython.pyx":46 - * zerodim = k.ndim==0 and x.ndim==0 - * - * k = np.atleast_1d(k) # <<<<<<<<<<<<<< - * x = np.atleast_1d(x) - * ix = np.round(x/(2*np.pi)) - */ - __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__atleast_1d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __Pyx_INCREF(__pyx_v_k); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_k); - __Pyx_GIVEREF(__pyx_v_k); - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_v_k); - __pyx_v_k = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/stats/vonmises_cython.pyx":47 - * - * k = np.atleast_1d(k) - * x = np.atleast_1d(x) # <<<<<<<<<<<<<< - * ix = np.round(x/(2*np.pi)) - * x = x-ix*2*np.pi - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__atleast_1d); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_x); - __pyx_v_x = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/stats/vonmises_cython.pyx":48 - * k = np.atleast_1d(k) - * x = np.atleast_1d(x) - * ix = np.round(x/(2*np.pi)) # <<<<<<<<<<<<<< - * x = x-ix*2*np.pi - * - */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__round); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__pi); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_int_2, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_v_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_v_ix = __pyx_t_2; - __pyx_t_2 = 0; - - /* "scipy/stats/vonmises_cython.pyx":49 - * x = np.atleast_1d(x) - * ix = np.round(x/(2*np.pi)) - * x = x-ix*2*np.pi # <<<<<<<<<<<<<< - * - * # These values should give 12 decimal digits - */ - __pyx_t_2 = PyNumber_Multiply(__pyx_v_ix, __pyx_int_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__pi); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Subtract(__pyx_v_x, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_v_x); - __pyx_v_x = __pyx_t_1; - __pyx_t_1 = 0; - - /* "scipy/stats/vonmises_cython.pyx":52 - * - * # These values should give 12 decimal digits - * CK=50 # <<<<<<<<<<<<<< - * a1, a2, a3, a4 = [28., 0.5, 100., 5.0] - * C1 = 50.1 - */ - __pyx_v_CK = 50.0; - - /* "scipy/stats/vonmises_cython.pyx":53 - * # These values should give 12 decimal digits - * CK=50 - * a1, a2, a3, a4 = [28., 0.5, 100., 5.0] # <<<<<<<<<<<<<< - * C1 = 50.1 - * - */ - __pyx_t_5 = 28.; - __pyx_t_6 = 0.5; - __pyx_t_7 = 100.; - __pyx_t_8 = 5.0; - __pyx_v_a1 = __pyx_t_5; - __pyx_v_a2 = __pyx_t_6; - __pyx_v_a3 = __pyx_t_7; - __pyx_v_a4 = __pyx_t_8; - - /* "scipy/stats/vonmises_cython.pyx":54 - * CK=50 - * a1, a2, a3, a4 = [28., 0.5, 100., 5.0] - * C1 = 50.1 # <<<<<<<<<<<<<< - * - * bx, bk = np.broadcast_arrays(x,k) - */ - __pyx_v_C1 = 50.1; - - /* "scipy/stats/vonmises_cython.pyx":56 - * C1 = 50.1 - * - * bx, bk = np.broadcast_arrays(x,k) # <<<<<<<<<<<<<< - * result = np.empty(bx.shape,dtype=np.float) - * - */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__broadcast_arrays); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - __Pyx_INCREF(__pyx_v_k); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_k); - __Pyx_GIVEREF(__pyx_v_k); - __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { - PyObject* sequence = __pyx_t_2; - if (likely(PyTuple_CheckExact(sequence))) { - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - } else { - if (unlikely(PyList_GET_SIZE(sequence) != 2)) { - if (PyList_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyList_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_9 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_10 = Py_TYPE(__pyx_t_9)->tp_iternext; - index = 0; __pyx_t_1 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_1)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_1); - index = 1; __pyx_t_3 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_3)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_9), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L7_unpacking_done; - __pyx_L6_unpacking_failed:; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_StopIteration)) PyErr_Clear(); - if (!PyErr_Occurred()) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_L7_unpacking_done:; - } - __pyx_v_bx = __pyx_t_1; - __pyx_t_1 = 0; - __pyx_v_bk = __pyx_t_3; - __pyx_t_3 = 0; - - /* "scipy/stats/vonmises_cython.pyx":57 - * - * bx, bk = np.broadcast_arrays(x,k) - * result = np.empty(bx.shape,dtype=np.float) # <<<<<<<<<<<<<< - * - * c_small_k = bk(1+a1+a2*temp_ks[i]-a3/(temp_ks[i]+a4)) - */ - __pyx_t_1 = PyObject_GetItem(__pyx_v_bk, __pyx_v_c_small_k); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__astype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_11 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__float); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_11); - __Pyx_GIVEREF(__pyx_t_11); - __pyx_t_11 = 0; - __pyx_t_11 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_11) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_11, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_12 = ((PyArrayObject *)__pyx_t_11); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_temp_ks); - __pyx_t_13 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_temp_ks, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_13 < 0)) { - PyErr_Fetch(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_temp_ks, (PyObject*)__pyx_v_temp_ks, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_14, __pyx_t_15, __pyx_t_16); - } - } - __pyx_bstride_0_temp_ks = __pyx_bstruct_temp_ks.strides[0]; - __pyx_bshape_0_temp_ks = __pyx_bstruct_temp_ks.shape[0]; - if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_12 = 0; - __pyx_v_temp_ks = ((PyArrayObject *)__pyx_t_11); - __pyx_t_11 = 0; - - /* "scipy/stats/vonmises_cython.pyx":63 - * temp_xs = bx[c_small_k].astype(np.float) - * temp_ks = bk[c_small_k].astype(np.float) - * for i in range(len(temp)): # <<<<<<<<<<<<<< - * p = (1+a1+a2*temp_ks[i]-a3/(temp_ks[i]+a4)) - * temp[i] = von_mises_cdf_series(temp_ks[i],temp_xs[i],p) - */ - __pyx_t_17 = PyObject_Length(((PyObject *)__pyx_v_temp)); if (unlikely(__pyx_t_17 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) { - __pyx_v_i = __pyx_t_18; - - /* "scipy/stats/vonmises_cython.pyx":64 - * temp_ks = bk[c_small_k].astype(np.float) - * for i in range(len(temp)): - * p = (1+a1+a2*temp_ks[i]-a3/(temp_ks[i]+a4)) # <<<<<<<<<<<<<< - * temp[i] = von_mises_cdf_series(temp_ks[i],temp_xs[i],p) - * if temp[i]<0: - */ - __pyx_t_19 = __pyx_v_i; - __pyx_t_20 = __pyx_v_i; - __pyx_t_8 = ((*__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp_ks.buf, __pyx_t_20, __pyx_bstride_0_temp_ks)) + __pyx_v_a4); - if (unlikely(__pyx_t_8 == 0)) { - PyErr_Format(PyExc_ZeroDivisionError, "float division"); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_v_p = ((int)(((1.0 + __pyx_v_a1) + (__pyx_v_a2 * (*__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp_ks.buf, __pyx_t_19, __pyx_bstride_0_temp_ks)))) - (__pyx_v_a3 / __pyx_t_8))); - - /* "scipy/stats/vonmises_cython.pyx":65 - * for i in range(len(temp)): - * p = (1+a1+a2*temp_ks[i]-a3/(temp_ks[i]+a4)) - * temp[i] = von_mises_cdf_series(temp_ks[i],temp_xs[i],p) # <<<<<<<<<<<<<< - * if temp[i]<0: - * temp[i]=0 - */ - __pyx_t_21 = __pyx_v_i; - __pyx_t_22 = __pyx_v_i; - __pyx_t_23 = __pyx_v_i; - *__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp.buf, __pyx_t_23, __pyx_bstride_0_temp) = __pyx_f_5scipy_5stats_15vonmises_cython_von_mises_cdf_series((*__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp_ks.buf, __pyx_t_21, __pyx_bstride_0_temp_ks)), (*__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp_xs.buf, __pyx_t_22, __pyx_bstride_0_temp_xs)), __pyx_v_p); - - /* "scipy/stats/vonmises_cython.pyx":66 - * p = (1+a1+a2*temp_ks[i]-a3/(temp_ks[i]+a4)) - * temp[i] = von_mises_cdf_series(temp_ks[i],temp_xs[i],p) - * if temp[i]<0: # <<<<<<<<<<<<<< - * temp[i]=0 - * elif temp[i]>1: - */ - __pyx_t_24 = __pyx_v_i; - __pyx_t_4 = ((*__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp.buf, __pyx_t_24, __pyx_bstride_0_temp)) < 0.0); - if (__pyx_t_4) { - - /* "scipy/stats/vonmises_cython.pyx":67 - * temp[i] = von_mises_cdf_series(temp_ks[i],temp_xs[i],p) - * if temp[i]<0: - * temp[i]=0 # <<<<<<<<<<<<<< - * elif temp[i]>1: - * temp[i]=1 - */ - __pyx_t_25 = __pyx_v_i; - *__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp.buf, __pyx_t_25, __pyx_bstride_0_temp) = 0.0; - goto __pyx_L10; - } - - /* "scipy/stats/vonmises_cython.pyx":68 - * if temp[i]<0: - * temp[i]=0 - * elif temp[i]>1: # <<<<<<<<<<<<<< - * temp[i]=1 - * result[c_small_k] = temp - */ - __pyx_t_26 = __pyx_v_i; - __pyx_t_4 = ((*__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp.buf, __pyx_t_26, __pyx_bstride_0_temp)) > 1.0); - if (__pyx_t_4) { - - /* "scipy/stats/vonmises_cython.pyx":69 - * temp[i]=0 - * elif temp[i]>1: - * temp[i]=1 # <<<<<<<<<<<<<< - * result[c_small_k] = temp - * result[~c_small_k] = von_mises_cdf_normalapprox(bk[~c_small_k],bx[~c_small_k],C1) - */ - __pyx_t_27 = __pyx_v_i; - *__Pyx_BufPtrStrided1d(double *, __pyx_bstruct_temp.buf, __pyx_t_27, __pyx_bstride_0_temp) = 1.0; - goto __pyx_L10; - } - __pyx_L10:; - } - - /* "scipy/stats/vonmises_cython.pyx":70 - * elif temp[i]>1: - * temp[i]=1 - * result[c_small_k] = temp # <<<<<<<<<<<<<< - * result[~c_small_k] = von_mises_cdf_normalapprox(bk[~c_small_k],bx[~c_small_k],C1) - * - */ - if (PyObject_SetItem(__pyx_v_result, __pyx_v_c_small_k, ((PyObject *)__pyx_v_temp)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - - /* "scipy/stats/vonmises_cython.pyx":71 - * temp[i]=1 - * result[c_small_k] = temp - * result[~c_small_k] = von_mises_cdf_normalapprox(bk[~c_small_k],bx[~c_small_k],C1) # <<<<<<<<<<<<<< - * - * if not zerodim: - */ - __pyx_t_11 = __Pyx_GetName(__pyx_m, __pyx_n_s_1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_1 = PyNumber_Invert(__pyx_v_c_small_k); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetItem(__pyx_v_bk, __pyx_t_1); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Invert(__pyx_v_c_small_k); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_GetItem(__pyx_v_bx, __pyx_t_1); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyFloat_FromDouble(__pyx_v_C1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = PyTuple_New(3); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_9)); - PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_11, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0; - __pyx_t_9 = PyNumber_Invert(__pyx_v_c_small_k); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - if (PyObject_SetItem(__pyx_v_result, __pyx_t_9, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/stats/vonmises_cython.pyx":73 - * result[~c_small_k] = von_mises_cdf_normalapprox(bk[~c_small_k],bx[~c_small_k],C1) - * - * if not zerodim: # <<<<<<<<<<<<<< - * return result+ix - * else: - */ - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_zerodim); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_28 = (!__pyx_t_4); - if (__pyx_t_28) { - - /* "scipy/stats/vonmises_cython.pyx":74 - * - * if not zerodim: - * return result+ix # <<<<<<<<<<<<<< - * else: - * return (result+ix)[0] - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyNumber_Add(__pyx_v_result, __pyx_v_ix); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - goto __pyx_L11; - } - /*else*/ { - - /* "scipy/stats/vonmises_cython.pyx":76 - * return result+ix - * else: - * return (result+ix)[0] # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyNumber_Add(__pyx_v_result, __pyx_v_ix); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_9; - __pyx_t_9 = 0; - goto __pyx_L0; - } - __pyx_L11:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_11); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_temp); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_temp_xs); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_temp_ks); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("scipy.stats.vonmises_cython.von_mises_cdf", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_temp); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_temp_xs); - __Pyx_SafeReleaseBuffer(&__pyx_bstruct_temp_ks); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_temp); - __Pyx_XDECREF((PyObject *)__pyx_v_temp_xs); - __Pyx_XDECREF((PyObject *)__pyx_v_temp_ks); - __Pyx_XDECREF(__pyx_v_zerodim); - __Pyx_XDECREF(__pyx_v_ix); - __Pyx_XDECREF(__pyx_v_bx); - __Pyx_XDECREF(__pyx_v_bk); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_c_small_k); - __Pyx_XDECREF(__pyx_v_k); - __Pyx_XDECREF(__pyx_v_x); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":190 - * # experimental exception made for __getbuffer__ and __releasebuffer__ - * # -- the details of this may change. - * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< - * # This implementation of getbuffer is geared towards Cython - * # requirements, and does not yet fullfill the PEP. - */ - -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_copy_shape; - int __pyx_v_i; - int __pyx_v_ndim; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - int __pyx_v_t; - char *__pyx_v_f; - PyArray_Descr *__pyx_v_descr = 0; - int __pyx_v_offset; - int __pyx_v_hasfields; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - char *__pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getbuffer__"); - if (__pyx_v_info != NULL) { - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - } - - /* "numpy.pxd":196 - * # of flags - * - * if info == NULL: return # <<<<<<<<<<<<<< - * - * cdef int copy_shape, i, ndim - */ - __pyx_t_1 = (__pyx_v_info == NULL); - if (__pyx_t_1) { - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":199 - * - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":200 - * cdef int copy_shape, i, ndim - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * - * ndim = PyArray_NDIM(self) - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":202 - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * - * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":204 - * ndim = PyArray_NDIM(self) - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * copy_shape = 1 - * else: - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":205 - * - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * copy_shape = 1 # <<<<<<<<<<<<<< - * else: - * copy_shape = 0 - */ - __pyx_v_copy_shape = 1; - goto __pyx_L6; - } - /*else*/ { - - /* "numpy.pxd":207 - * copy_shape = 1 - * else: - * copy_shape = 0 # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - */ - __pyx_v_copy_shape = 0; - } - __pyx_L6:; - - /* "numpy.pxd":209 - * copy_shape = 0 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); - if (__pyx_t_1) { - - /* "numpy.pxd":210 - * - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not C contiguous") - * - */ - __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); - __pyx_t_3 = __pyx_t_2; - } else { - __pyx_t_3 = __pyx_t_1; - } - if (__pyx_t_3) { - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; - - /* "numpy.pxd":213 - * raise ValueError(u"ndarray is not C contiguous") - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") - */ - __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); - if (__pyx_t_3) { - - /* "numpy.pxd":214 - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< - * raise ValueError(u"ndarray is not Fortran contiguous") - * - */ - __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); - __pyx_t_2 = __pyx_t_1; - } else { - __pyx_t_2 = __pyx_t_3; - } - if (__pyx_t_2) { - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L8; - } - __pyx_L8:; - - /* "numpy.pxd":217 - * raise ValueError(u"ndarray is not Fortran contiguous") - * - * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< - * info.ndim = ndim - * if copy_shape: - */ - __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":218 - * - * info.buf = PyArray_DATA(self) - * info.ndim = ndim # <<<<<<<<<<<<<< - * if copy_shape: - * # Allocate new buffer for strides and shape info. - */ - __pyx_v_info->ndim = __pyx_v_ndim; - - /* "numpy.pxd":219 - * info.buf = PyArray_DATA(self) - * info.ndim = ndim - * if copy_shape: # <<<<<<<<<<<<<< - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - */ - if (__pyx_v_copy_shape) { - - /* "numpy.pxd":222 - * # Allocate new buffer for strides and shape info. - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< - * info.shape = info.strides + ndim - * for i in range(ndim): - */ - __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); - - /* "numpy.pxd":223 - * # This is allocated as one block, strides first. - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim # <<<<<<<<<<<<<< - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - */ - __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); - - /* "numpy.pxd":224 - * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) - * info.shape = info.strides + ndim - * for i in range(ndim): # <<<<<<<<<<<<<< - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] - */ - __pyx_t_5 = __pyx_v_ndim; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "numpy.pxd":225 - * info.shape = info.strides + ndim - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - */ - (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - - /* "numpy.pxd":226 - * for i in range(ndim): - * info.strides[i] = PyArray_STRIDES(self)[i] - * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< - * else: - * info.strides = PyArray_STRIDES(self) - */ - (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); - } - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":228 - * info.shape[i] = PyArray_DIMS(self)[i] - * else: - * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - */ - __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":229 - * else: - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - */ - __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); - } - __pyx_L9:; - - /* "numpy.pxd":230 - * info.strides = PyArray_STRIDES(self) - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) - */ - __pyx_v_info->suboffsets = NULL; - - /* "numpy.pxd":231 - * info.shape = PyArray_DIMS(self) - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< - * info.readonly = not PyArray_ISWRITEABLE(self) - * - */ - __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); - - /* "numpy.pxd":232 - * info.suboffsets = NULL - * info.itemsize = PyArray_ITEMSIZE(self) - * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< - * - * cdef int t - */ - __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); - - /* "numpy.pxd":235 - * - * cdef int t - * cdef char* f = NULL # <<<<<<<<<<<<<< - * cdef dtype descr = self.descr - * cdef list stack - */ - __pyx_v_f = NULL; - - /* "numpy.pxd":236 - * cdef int t - * cdef char* f = NULL - * cdef dtype descr = self.descr # <<<<<<<<<<<<<< - * cdef list stack - * cdef int offset - */ - __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); - __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; - - /* "numpy.pxd":240 - * cdef int offset - * - * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< - * - * if not hasfields and not copy_shape: - */ - __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); - - /* "numpy.pxd":242 - * cdef bint hasfields = PyDataType_HASFIELDS(descr) - * - * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< - * # do not call releasebuffer - * info.obj = None - */ - __pyx_t_2 = (!__pyx_v_hasfields); - if (__pyx_t_2) { - __pyx_t_3 = (!__pyx_v_copy_shape); - __pyx_t_1 = __pyx_t_3; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":244 - * if not hasfields and not copy_shape: - * # do not call releasebuffer - * info.obj = None # <<<<<<<<<<<<<< - * else: - * # need to call releasebuffer - */ - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = Py_None; - goto __pyx_L12; - } - /*else*/ { - - /* "numpy.pxd":247 - * else: - * # need to call releasebuffer - * info.obj = self # <<<<<<<<<<<<<< - * - * if not hasfields: - */ - __Pyx_INCREF(__pyx_v_self); - __Pyx_GIVEREF(__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = __pyx_v_self; - } - __pyx_L12:; - - /* "numpy.pxd":249 - * info.obj = self - * - * if not hasfields: # <<<<<<<<<<<<<< - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - */ - __pyx_t_1 = (!__pyx_v_hasfields); - if (__pyx_t_1) { - - /* "numpy.pxd":250 - * - * if not hasfields: - * t = descr.type_num # <<<<<<<<<<<<<< - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - */ - __pyx_v_t = __pyx_v_descr->type_num; - - /* "numpy.pxd":251 - * if not hasfields: - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); - if (__pyx_t_1) { - __pyx_t_2 = __pyx_v_little_endian; - } else { - __pyx_t_2 = __pyx_t_1; - } - if (!__pyx_t_2) { - - /* "numpy.pxd":252 - * t = descr.type_num - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - */ - __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); - if (__pyx_t_1) { - __pyx_t_3 = (!__pyx_v_little_endian); - __pyx_t_7 = __pyx_t_3; - } else { - __pyx_t_7 = __pyx_t_1; - } - __pyx_t_1 = __pyx_t_7; - } else { - __pyx_t_1 = __pyx_t_2; - } - if (__pyx_t_1) { - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_7), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L14; - } - __pyx_L14:; - - /* "numpy.pxd":254 - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - */ - __pyx_t_1 = (__pyx_v_t == NPY_BYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__b; - goto __pyx_L15; - } - - /* "numpy.pxd":255 - * raise ValueError(u"Non-native byte order not supported") - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__B; - goto __pyx_L15; - } - - /* "numpy.pxd":256 - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - */ - __pyx_t_1 = (__pyx_v_t == NPY_SHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__h; - goto __pyx_L15; - } - - /* "numpy.pxd":257 - * elif t == NPY_UBYTE: f = "B" - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - */ - __pyx_t_1 = (__pyx_v_t == NPY_USHORT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__H; - goto __pyx_L15; - } - - /* "numpy.pxd":258 - * elif t == NPY_SHORT: f = "h" - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - */ - __pyx_t_1 = (__pyx_v_t == NPY_INT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__i; - goto __pyx_L15; - } - - /* "numpy.pxd":259 - * elif t == NPY_USHORT: f = "H" - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - */ - __pyx_t_1 = (__pyx_v_t == NPY_UINT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__I; - goto __pyx_L15; - } - - /* "numpy.pxd":260 - * elif t == NPY_INT: f = "i" - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__l; - goto __pyx_L15; - } - - /* "numpy.pxd":261 - * elif t == NPY_UINT: f = "I" - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__L; - goto __pyx_L15; - } - - /* "numpy.pxd":262 - * elif t == NPY_LONG: f = "l" - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__q; - goto __pyx_L15; - } - - /* "numpy.pxd":263 - * elif t == NPY_ULONG: f = "L" - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - */ - __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Q; - goto __pyx_L15; - } - - /* "numpy.pxd":264 - * elif t == NPY_LONGLONG: f = "q" - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - */ - __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__f; - goto __pyx_L15; - } - - /* "numpy.pxd":265 - * elif t == NPY_ULONGLONG: f = "Q" - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - */ - __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__d; - goto __pyx_L15; - } - - /* "numpy.pxd":266 - * elif t == NPY_FLOAT: f = "f" - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - */ - __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__g; - goto __pyx_L15; - } - - /* "numpy.pxd":267 - * elif t == NPY_DOUBLE: f = "d" - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zf; - goto __pyx_L15; - } - - /* "numpy.pxd":268 - * elif t == NPY_LONGDOUBLE: f = "g" - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" - */ - __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zd; - goto __pyx_L15; - } - - /* "numpy.pxd":269 - * elif t == NPY_CFLOAT: f = "Zf" - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f = "O" - * else: - */ - __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__Zg; - goto __pyx_L15; - } - - /* "numpy.pxd":270 - * elif t == NPY_CDOUBLE: f = "Zd" - * elif t == NPY_CLONGDOUBLE: f = "Zg" - * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); - if (__pyx_t_1) { - __pyx_v_f = __pyx_k__O; - goto __pyx_L15; - } - /*else*/ { - - /* "numpy.pxd":272 - * elif t == NPY_OBJECT: f = "O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * info.format = f - * return - */ - __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_8), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_8)); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); - __pyx_t_8 = 0; - __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L15:; - - /* "numpy.pxd":273 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f # <<<<<<<<<<<<<< - * return - * else: - */ - __pyx_v_info->format = __pyx_v_f; - - /* "numpy.pxd":274 - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * info.format = f - * return # <<<<<<<<<<<<<< - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - */ - __pyx_r = 0; - goto __pyx_L0; - goto __pyx_L13; - } - /*else*/ { - - /* "numpy.pxd":276 - * return - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 - */ - __pyx_v_info->format = ((char *)malloc(255)); - - /* "numpy.pxd":277 - * else: - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<< - * offset = 0 - * f = _util_dtypestring(descr, info.format + 1, - */ - (__pyx_v_info->format[0]) = '^'; - - /* "numpy.pxd":278 - * info.format = stdlib.malloc(_buffer_format_string_len) - * info.format[0] = '^' # Native data types, manual alignment - * offset = 0 # <<<<<<<<<<<<<< - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - */ - __pyx_v_offset = 0; - - /* "numpy.pxd":281 - * f = _util_dtypestring(descr, info.format + 1, - * info.format + _buffer_format_string_len, - * &offset) # <<<<<<<<<<<<<< - * f[0] = 0 # Terminate format string - * - */ - __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_9; - - /* "numpy.pxd":282 - * info.format + _buffer_format_string_len, - * &offset) - * f[0] = 0 # Terminate format string # <<<<<<<<<<<<<< - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - */ - (__pyx_v_f[0]) = 0; - } - __pyx_L13:; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(Py_None); - __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; - } - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_descr); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":284 - * f[0] = 0 # Terminate format string - * - * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - */ - -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ -static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__releasebuffer__"); - - /* "numpy.pxd":285 - * - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - */ - __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); - if (__pyx_t_1) { - - /* "numpy.pxd":286 - * def __releasebuffer__(ndarray self, Py_buffer* info): - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) # <<<<<<<<<<<<<< - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) - */ - free(__pyx_v_info->format); - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":287 - * if PyArray_HASFIELDS(self): - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< - * stdlib.free(info.strides) - * # info.shape was stored after info.strides in the same block - */ - __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); - if (__pyx_t_1) { - - /* "numpy.pxd":288 - * stdlib.free(info.format) - * if sizeof(npy_intp) != sizeof(Py_ssize_t): - * stdlib.free(info.strides) # <<<<<<<<<<<<<< - * # info.shape was stored after info.strides in the same block - * - */ - free(__pyx_v_info->strides); - goto __pyx_L6; - } - __pyx_L6:; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":764 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); - - /* "numpy.pxd":765 - * - * cdef inline object PyArray_MultiIterNew1(a): - * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew2(a, b): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":767 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); - - /* "numpy.pxd":768 - * - * cdef inline object PyArray_MultiIterNew2(a, b): - * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":770 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); - - /* "numpy.pxd":771 - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":773 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); - - /* "numpy.pxd":774 - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":776 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); - - /* "numpy.pxd":777 - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":779 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< - * # Recursive utility function used in __getbuffer__ to get format - * # string. The new location in the format string is returned. - */ - -static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { - PyArray_Descr *__pyx_v_child = 0; - int __pyx_v_endian_detector; - int __pyx_v_little_endian; - PyObject *__pyx_v_fields = 0; - PyObject *__pyx_v_childname = NULL; - PyObject *__pyx_v_new_offset = NULL; - PyObject *__pyx_v_t = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - long __pyx_t_10; - char *__pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_util_dtypestring"); - - /* "numpy.pxd":786 - * cdef int delta_offset - * cdef tuple i - * cdef int endian_detector = 1 # <<<<<<<<<<<<<< - * cdef bint little_endian = ((&endian_detector)[0] != 0) - * cdef tuple fields - */ - __pyx_v_endian_detector = 1; - - /* "numpy.pxd":787 - * cdef tuple i - * cdef int endian_detector = 1 - * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< - * cdef tuple fields - * - */ - __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); - - /* "numpy.pxd":790 - * cdef tuple fields - * - * for childname in descr.names: # <<<<<<<<<<<<<< - * fields = descr.fields[childname] - * child, new_offset = fields - */ - if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - for (;;) { - if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; - __Pyx_XDECREF(__pyx_v_childname); - __pyx_v_childname = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":791 - * - * for childname in descr.names: - * fields = descr.fields[childname] # <<<<<<<<<<<<<< - * child, new_offset = fields - * - */ - __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); - __pyx_v_fields = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "numpy.pxd":792 - * for childname in descr.names: - * fields = descr.fields[childname] - * child, new_offset = fields # <<<<<<<<<<<<<< - * - * if (end - f) - (new_offset - offset[0]) < 15: - */ - if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { - PyObject* sequence = ((PyObject *)__pyx_v_fields); - if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { - if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); - else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - } else { - __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_XDECREF(((PyObject *)__pyx_v_child)); - __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_v_new_offset); - __pyx_v_new_offset = __pyx_t_4; - __pyx_t_4 = 0; - - /* "numpy.pxd":794 - * child, new_offset = fields - * - * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - */ - __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L5; - } - __pyx_L5:; - - /* "numpy.pxd":797 - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - * - * if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '>'); - if (__pyx_t_6) { - __pyx_t_7 = __pyx_v_little_endian; - } else { - __pyx_t_7 = __pyx_t_6; - } - if (!__pyx_t_7) { - - /* "numpy.pxd":798 - * - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< - * raise ValueError(u"Non-native byte order not supported") - * # One could encode it in the format string and have Cython - */ - __pyx_t_6 = (__pyx_v_child->byteorder == '<'); - if (__pyx_t_6) { - __pyx_t_8 = (!__pyx_v_little_endian); - __pyx_t_9 = __pyx_t_8; - } else { - __pyx_t_9 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_9; - } else { - __pyx_t_6 = __pyx_t_7; - } - if (__pyx_t_6) { - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L6; - } - __pyx_L6:; - - /* "numpy.pxd":809 - * - * # Output padding bytes - * while offset[0] < new_offset: # <<<<<<<<<<<<<< - * f[0] = 120 # "x"; pad byte - * f += 1 - */ - while (1) { - __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (!__pyx_t_6) break; - - /* "numpy.pxd":810 - * # Output padding bytes - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< - * f += 1 - * offset[0] += 1 - */ - (__pyx_v_f[0]) = 120; - - /* "numpy.pxd":811 - * while offset[0] < new_offset: - * f[0] = 120 # "x"; pad byte - * f += 1 # <<<<<<<<<<<<<< - * offset[0] += 1 - * - */ - __pyx_v_f = (__pyx_v_f + 1); - - /* "numpy.pxd":812 - * f[0] = 120 # "x"; pad byte - * f += 1 - * offset[0] += 1 # <<<<<<<<<<<<<< - * - * offset[0] += child.itemsize - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); - } - - /* "numpy.pxd":814 - * offset[0] += 1 - * - * offset[0] += child.itemsize # <<<<<<<<<<<<<< - * - * if not PyDataType_HASFIELDS(child): - */ - __pyx_t_10 = 0; - (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); - - /* "numpy.pxd":816 - * offset[0] += child.itemsize - * - * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< - * t = child.type_num - * if end - f < 5: - */ - __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); - if (__pyx_t_6) { - - /* "numpy.pxd":817 - * - * if not PyDataType_HASFIELDS(child): - * t = child.type_num # <<<<<<<<<<<<<< - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") - */ - __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_v_t); - __pyx_v_t = __pyx_t_3; - __pyx_t_3 = 0; - - /* "numpy.pxd":818 - * if not PyDataType_HASFIELDS(child): - * t = child.type_num - * if end - f < 5: # <<<<<<<<<<<<<< - * raise RuntimeError(u"Format string allocated too short.") - * - */ - __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); - if (__pyx_t_6) { - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_13), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L10; - } - __pyx_L10:; - - /* "numpy.pxd":822 - * - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - */ - __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 98; - goto __pyx_L11; - } - - /* "numpy.pxd":823 - * # Until ticket #99 is fixed, use integers to avoid warnings - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 66; - goto __pyx_L11; - } - - /* "numpy.pxd":824 - * if t == NPY_BYTE: f[0] = 98 #"b" - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - */ - __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 104; - goto __pyx_L11; - } - - /* "numpy.pxd":825 - * elif t == NPY_UBYTE: f[0] = 66 #"B" - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - */ - __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 72; - goto __pyx_L11; - } - - /* "numpy.pxd":826 - * elif t == NPY_SHORT: f[0] = 104 #"h" - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - */ - __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 105; - goto __pyx_L11; - } - - /* "numpy.pxd":827 - * elif t == NPY_USHORT: f[0] = 72 #"H" - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - */ - __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 73; - goto __pyx_L11; - } - - /* "numpy.pxd":828 - * elif t == NPY_INT: f[0] = 105 #"i" - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 108; - goto __pyx_L11; - } - - /* "numpy.pxd":829 - * elif t == NPY_UINT: f[0] = 73 #"I" - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 76; - goto __pyx_L11; - } - - /* "numpy.pxd":830 - * elif t == NPY_LONG: f[0] = 108 #"l" - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 113; - goto __pyx_L11; - } - - /* "numpy.pxd":831 - * elif t == NPY_ULONG: f[0] = 76 #"L" - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - */ - __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 81; - goto __pyx_L11; - } - - /* "numpy.pxd":832 - * elif t == NPY_LONGLONG: f[0] = 113 #"q" - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - */ - __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 102; - goto __pyx_L11; - } - - /* "numpy.pxd":833 - * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - */ - __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 100; - goto __pyx_L11; - } - - /* "numpy.pxd":834 - * elif t == NPY_FLOAT: f[0] = 102 #"f" - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - */ - __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 103; - goto __pyx_L11; - } - - /* "numpy.pxd":835 - * elif t == NPY_DOUBLE: f[0] = 100 #"d" - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - */ - __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 102; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":836 - * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" - */ - __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 100; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":837 - * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - */ - __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 90; - (__pyx_v_f[1]) = 103; - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L11; - } - - /* "numpy.pxd":838 - * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - */ - __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_6) { - (__pyx_v_f[0]) = 79; - goto __pyx_L11; - } - /*else*/ { - - /* "numpy.pxd":840 - * elif t == NPY_OBJECT: f[0] = 79 #"O" - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< - * f += 1 - * else: - */ - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_8), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); - __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - } - __pyx_L11:; - - /* "numpy.pxd":841 - * else: - * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - * f += 1 # <<<<<<<<<<<<<< - * else: - * # Cython ignores struct boundary information ("T{...}"), - */ - __pyx_v_f = (__pyx_v_f + 1); - goto __pyx_L9; - } - /*else*/ { - - /* "numpy.pxd":845 - * # Cython ignores struct boundary information ("T{...}"), - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< - * return f - * - */ - __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_f = __pyx_t_11; - } - __pyx_L9:; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "numpy.pxd":846 - * # so don't output it - * f = _util_dtypestring(child, f, end, offset) - * return f # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_f; - goto __pyx_L0; - - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_child); - __Pyx_XDECREF(__pyx_v_fields); - __Pyx_XDECREF(__pyx_v_childname); - __Pyx_XDECREF(__pyx_v_new_offset); - __Pyx_XDECREF(__pyx_v_t); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "numpy.pxd":961 - * - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * cdef PyObject* baseptr - * if base is None: - */ - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - PyObject *__pyx_v_baseptr; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("set_array_base"); - - /* "numpy.pxd":963 - * cdef inline void set_array_base(ndarray arr, object base): - * cdef PyObject* baseptr - * if base is None: # <<<<<<<<<<<<<< - * baseptr = NULL - * else: - */ - __pyx_t_1 = (__pyx_v_base == Py_None); - if (__pyx_t_1) { - - /* "numpy.pxd":964 - * cdef PyObject* baseptr - * if base is None: - * baseptr = NULL # <<<<<<<<<<<<<< - * else: - * Py_INCREF(base) # important to do this before decref below! - */ - __pyx_v_baseptr = NULL; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":966 - * baseptr = NULL - * else: - * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< - * baseptr = base - * Py_XDECREF(arr.base) - */ - Py_INCREF(__pyx_v_base); - - /* "numpy.pxd":967 - * else: - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base # <<<<<<<<<<<<<< - * Py_XDECREF(arr.base) - * arr.base = baseptr - */ - __pyx_v_baseptr = ((PyObject *)__pyx_v_base); - } - __pyx_L3:; - - /* "numpy.pxd":968 - * Py_INCREF(base) # important to do this before decref below! - * baseptr = base - * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< - * arr.base = baseptr - * - */ - Py_XDECREF(__pyx_v_arr->base); - - /* "numpy.pxd":969 - * baseptr = base - * Py_XDECREF(arr.base) - * arr.base = baseptr # <<<<<<<<<<<<<< - * - * cdef inline object get_array_base(ndarray arr): - */ - __pyx_v_arr->base = __pyx_v_baseptr; - - __Pyx_RefNannyFinishContext(); -} - -/* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base"); - - /* "numpy.pxd":972 - * - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: # <<<<<<<<<<<<<< - * return None - * else: - */ - __pyx_t_1 = (__pyx_v_arr->base == NULL); - if (__pyx_t_1) { - - /* "numpy.pxd":973 - * cdef inline object get_array_base(ndarray arr): - * if arr.base is NULL: - * return None # <<<<<<<<<<<<<< - * else: - * return arr.base - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_None); - __pyx_r = Py_None; - goto __pyx_L0; - goto __pyx_L3; - } - /*else*/ { - - /* "numpy.pxd":975 - * return None - * else: - * return arr.base # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); - __pyx_r = ((PyObject *)__pyx_v_arr->base); - goto __pyx_L0; - } - __pyx_L3:; - - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - __Pyx_NAMESTR("vonmises_cython"), - 0, /* m_doc */ - -1, /* m_size */ - __pyx_methods /* m_methods */, - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_n_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 1}, - {&__pyx_kp_u_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 1, 0, 0}, - {&__pyx_n_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 1}, - {&__pyx_n_s_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 0, 1, 1}, - {&__pyx_n_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 1}, - {&__pyx_n_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 1}, - {&__pyx_kp_u_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 1, 0, 0}, - {&__pyx_kp_u_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 1, 0, 0}, - {&__pyx_kp_u_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 1, 0, 0}, - {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, - {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, - {&__pyx_n_s__C1, __pyx_k__C1, sizeof(__pyx_k__C1), 0, 0, 1, 1}, - {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, - {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, - {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, - {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, - {&__pyx_n_s__asarray, __pyx_k__asarray, sizeof(__pyx_k__asarray), 0, 0, 1, 1}, - {&__pyx_n_s__astype, __pyx_k__astype, sizeof(__pyx_k__astype), 0, 0, 1, 1}, - {&__pyx_n_s__atleast_1d, __pyx_k__atleast_1d, sizeof(__pyx_k__atleast_1d), 0, 0, 1, 1}, - {&__pyx_n_s__broadcast_arrays, __pyx_k__broadcast_arrays, sizeof(__pyx_k__broadcast_arrays), 0, 0, 1, 1}, - {&__pyx_n_s__cdf, __pyx_k__cdf, sizeof(__pyx_k__cdf), 0, 0, 1, 1}, - {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, - {&__pyx_n_s__empty, __pyx_k__empty, sizeof(__pyx_k__empty), 0, 0, 1, 1}, - {&__pyx_n_s__exp, __pyx_k__exp, sizeof(__pyx_k__exp), 0, 0, 1, 1}, - {&__pyx_n_s__float, __pyx_k__float, sizeof(__pyx_k__float), 0, 0, 1, 1}, - {&__pyx_n_s__i0, __pyx_k__i0, sizeof(__pyx_k__i0), 0, 0, 1, 1}, - {&__pyx_n_s__k, __pyx_k__k, sizeof(__pyx_k__k), 0, 0, 1, 1}, - {&__pyx_n_s__ndim, __pyx_k__ndim, sizeof(__pyx_k__ndim), 0, 0, 1, 1}, - {&__pyx_n_s__norm, __pyx_k__norm, sizeof(__pyx_k__norm), 0, 0, 1, 1}, - {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, - {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, - {&__pyx_n_s__pi, __pyx_k__pi, sizeof(__pyx_k__pi), 0, 0, 1, 1}, - {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, - {&__pyx_n_s__round, __pyx_k__round, sizeof(__pyx_k__round), 0, 0, 1, 1}, - {&__pyx_n_s__scipy, __pyx_k__scipy, sizeof(__pyx_k__scipy), 0, 0, 1, 1}, - {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, - {&__pyx_n_s__sin, __pyx_k__sin, sizeof(__pyx_k__sin), 0, 0, 1, 1}, - {&__pyx_n_s__sqrt, __pyx_k__sqrt, sizeof(__pyx_k__sqrt), 0, 0, 1, 1}, - {&__pyx_n_s__stats, __pyx_k__stats, sizeof(__pyx_k__stats), 0, 0, 1, 1}, - {&__pyx_n_s__von_mises_cdf, __pyx_k__von_mises_cdf, sizeof(__pyx_k__von_mises_cdf), 0, 0, 1, 1}, - {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - return 0; - __pyx_L1_error:; - return -1; -} - -static int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); - - /* "numpy.pxd":211 - * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< - * - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - */ - __pyx_k_tuple_3 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_3)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_2)); - PyTuple_SET_ITEM(__pyx_k_tuple_3, 0, ((PyObject *)__pyx_kp_u_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_2)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_3)); - - /* "numpy.pxd":215 - * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< - * - * info.buf = PyArray_DATA(self) - */ - __pyx_k_tuple_5 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_5)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_4)); - PyTuple_SET_ITEM(__pyx_k_tuple_5, 0, ((PyObject *)__pyx_kp_u_4)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_4)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_5)); - - /* "numpy.pxd":253 - * if ((descr.byteorder == '>' and little_endian) or - * (descr.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * if t == NPY_BYTE: f = "b" - * elif t == NPY_UBYTE: f = "B" - */ - __pyx_k_tuple_7 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_7)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_6)); - PyTuple_SET_ITEM(__pyx_k_tuple_7, 0, ((PyObject *)__pyx_kp_u_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_7)); - - /* "numpy.pxd":795 - * - * if (end - f) - (new_offset - offset[0]) < 15: - * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< - * - * if ((child.byteorder == '>' and little_endian) or - */ - __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_10)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_9)); - PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_9)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_9)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); - - /* "numpy.pxd":799 - * if ((child.byteorder == '>' and little_endian) or - * (child.byteorder == '<' and not little_endian)): - * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< - * # One could encode it in the format string and have Cython - * # complain instead, BUT: < and > in format strings also imply - */ - __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_11)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_6)); - PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_6)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); - - /* "numpy.pxd":819 - * t = child.type_num - * if end - f < 5: - * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< - * - * # Until ticket #99 is fixed, use integers to avoid warnings - */ - __pyx_k_tuple_13 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_13)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_13)); - __Pyx_INCREF(((PyObject *)__pyx_kp_u_12)); - PyTuple_SET_ITEM(__pyx_k_tuple_13, 0, ((PyObject *)__pyx_kp_u_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_12)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_13)); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_16 = PyInt_FromLong(16); if (unlikely(!__pyx_int_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - __pyx_int_24 = PyInt_FromLong(24); if (unlikely(!__pyx_int_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - return 0; - __pyx_L1_error:; - return -1; -} - -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initvonmises_cython(void); /*proto*/ -PyMODINIT_FUNC initvonmises_cython(void) -#else -PyMODINIT_FUNC PyInit_vonmises_cython(void); /*proto*/ -PyMODINIT_FUNC PyInit_vonmises_cython(void) -#endif -{ - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } - #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_vonmises_cython(void)"); - if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #ifdef __pyx_binding_PyCFunctionType_USED - if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("vonmises_cython"), __pyx_methods, 0, 0, PYTHON_API_VERSION); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - /*--- Initialize various global constants etc. ---*/ - if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__pyx_module_is_main_scipy__stats__vonmises_cython) { - if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - } - /*--- Builtin init code ---*/ - if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Constants init code ---*/ - if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Global init code ---*/ - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - /*--- Type init code ---*/ - /*--- Type import code ---*/ - __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /*--- Variable import code ---*/ - /*--- Function import code ---*/ - /*--- Execution code ---*/ - - /* "scipy/stats/vonmises_cython.pyx":1 - * import numpy as np # <<<<<<<<<<<<<< - * import scipy.stats - * from scipy.special import i0 - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/stats/vonmises_cython.pyx":2 - * import numpy as np - * import scipy.stats # <<<<<<<<<<<<<< - * from scipy.special import i0 - * import numpy.testing - */ - __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s_14), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__scipy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "scipy/stats/vonmises_cython.pyx":3 - * import numpy as np - * import scipy.stats - * from scipy.special import i0 # <<<<<<<<<<<<<< - * import numpy.testing - * cimport numpy as np - */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __Pyx_INCREF(((PyObject *)__pyx_n_s__i0)); - PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__i0)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i0)); - __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_15), ((PyObject *)__pyx_t_1), -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__i0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__i0, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/stats/vonmises_cython.pyx":4 - * import scipy.stats - * from scipy.special import i0 - * import numpy.testing # <<<<<<<<<<<<<< - * cimport numpy as np - * - */ - __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_16), 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__numpy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/stats/vonmises_cython.pyx":28 - * return 0.5+x/(2*np.pi) + V/np.pi - * - * def von_mises_cdf_normalapprox(k,x,C1): # <<<<<<<<<<<<<< - * b = np.sqrt(2/np.pi)*np.exp(k)/i0(k) - * z = b*np.sin(x/2.) - */ - __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5scipy_5stats_15vonmises_cython_von_mises_cdf_normalapprox, NULL, __pyx_n_s_17); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_1, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/stats/vonmises_cython.pyx":37 - * cimport cython - * @cython.boundscheck(False) - * def von_mises_cdf(k,x): # <<<<<<<<<<<<<< - * cdef np.ndarray[double, ndim=1] temp, temp_xs, temp_ks - * cdef unsigned int i, p - */ - __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5scipy_5stats_15vonmises_cython_1von_mises_cdf, NULL, __pyx_n_s_17); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_2); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__von_mises_cdf, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "scipy/stats/vonmises_cython.pyx":1 - * import numpy as np # <<<<<<<<<<<<<< - * import scipy.stats - * from scipy.special import i0 - */ - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - - /* "numpy.pxd":971 - * arr.base = baseptr - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * if arr.base is NULL: - * return None - */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - if (__pyx_m) { - __Pyx_AddTraceback("init scipy.stats.vonmises_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); - Py_DECREF(__pyx_m); __pyx_m = 0; - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init scipy.stats.vonmises_cython"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else - return __pyx_m; - #endif -} - -/* Runtime support code */ - -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule((char *)modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif /* CYTHON_REFNANNY */ - -static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { - PyObject *result; - result = PyObject_GetAttr(dict, name); - if (!result) { - if (dict != __pyx_b) { - PyErr_Clear(); - result = PyObject_GetAttr(__pyx_b, name); - } - if (!result) { - PyErr_SetObject(PyExc_NameError, name); - } - } - return result; -} - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { - values[name-argnames] = value; - } else { - /* unexpected keyword found */ - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - } - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%s() got an unexpected keyword argument '%s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", - index, (index == 1) ? "" : "s"); -} - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); -} - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } else if (PyErr_Occurred()) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { - PyErr_Clear(); - return 0; - } else { - return -1; - } - } - return 0; -} - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_Format(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(PyObject_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { - unsigned int n = 1; - return *(unsigned char*)(&n) != 0; -} - -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; -} __Pyx_BufFmt_Context; - -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} - -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t < '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} - -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} - -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case 'b': return "'char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 0: return "end"; - default: return "unparseable format string"; - } -} - -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif - -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} - -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I'; - case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; - case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); - case 'O': return 'O'; - case 'P': return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} - -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} - -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset; - if (ctx->enc_type == 0) return 0; - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - } - - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - /* special case -- treat as struct rather than complex number */ - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - - ctx->fmt_offset += size; - - --ctx->enc_count; /* Consume from buffer string */ - - /* Done checking, move to next field, pushing or popping struct stack if needed */ - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; /* breaks both loops as ctx->enc_count == 0 */ - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; /* empty struct */ - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} - -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case 10: - case 13: - ++ts; - break; - case '<': - if (!__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_IsLittleEndian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': /* substruct */ - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - } - break; - case '}': /* end of substruct; either repeat or move on */ - ++ts; - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } /* fall through */ - case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': - if (ctx->enc_type == *ts && got_Z == ctx->is_complex && - ctx->enc_packmode == ctx->new_packmode) { - /* Continue pooling same type */ - ctx->enc_count += ctx->new_count; - } else { - /* New type */ - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - } - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - default: - { - int number = __Pyx_BufFmt_ParseNumber(&ts); - if (number == -1) { /* First char was not a digit */ - PyErr_Format(PyExc_ValueError, - "Does not understand character buffer dtype format string ('%c')", *ts); - return NULL; - } - ctx->new_count = (size_t)number; - } - } - } -} - -static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { - buf->buf = NULL; - buf->obj = NULL; - buf->strides = __Pyx_zeros; - buf->shape = __Pyx_zeros; - buf->suboffsets = __Pyx_minusones; -} - -static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { - if (obj == Py_None || obj == NULL) { - __Pyx_ZeroBuffer(buf); - return 0; - } - buf->buf = NULL; - if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; - if (buf->ndim != nd) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - nd, buf->ndim); - goto fail; - } - if (!cast) { - __Pyx_BufFmt_Context ctx; - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; - } - if ((unsigned)buf->itemsize != dtype->size) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)", - buf->itemsize, (buf->itemsize > 1) ? "s" : "", - dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; - return 0; -fail:; - __Pyx_ZeroBuffer(buf); - return -1; -} - -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { - if (info->buf == NULL) return; - if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; - __Pyx_ReleaseBuffer(info); -} - -static void __Pyx_RaiseBufferFallbackError(void) { - PyErr_Format(PyExc_ValueError, - "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); -} - - - -static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyThreadState *tstate = PyThreadState_GET(); - - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} - -static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { - PyThreadState *tstate = PyThreadState_GET(); - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} - - -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - /* cause is unused */ - Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - /* First, check the traceback argument, replacing None with NULL. */ - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - /* Next, replace a missing value with None */ - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) - #else - if (!PyType_Check(type)) - #endif - { - /* Raising an instance. The value should be a dummy. */ - if (value != Py_None) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - /* Normalize to raise , */ - Py_DECREF(value); - value = type; - #if PY_VERSION_HEX < 0x02050000 - if (PyInstance_Check(type)) { - type = (PyObject*) ((PyInstanceObject*)type)->in_class; - Py_INCREF(type); - } - else { - type = 0; - PyErr_SetString(PyExc_TypeError, - "raise: exception must be an old-style class or instance"); - goto raise_error; - } - #else - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - #endif - } - - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} - -#else /* Python 3+ */ - -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - - if (cause) { - PyObject *fixed_cause; - if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - if (!value) { - value = PyObject_CallObject(type, NULL); - } - PyException_SetCause(value, fixed_cause); - } - - PyErr_SetObject(type, value); - - if (tb) { - PyThreadState *tstate = PyThreadState_GET(); - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } - } - -bad: - return; -} -#endif - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags); - else { - PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; - } -} - -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject* obj = view->obj; - if (obj) { - #if PY_VERSION_HEX >= 0x02060000 - if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;} - #endif - if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view); - Py_DECREF(obj); - view->obj = NULL; - } -} - -#endif - -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { - PyObject *py_import = 0; - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); - if (!py_import) - goto bad; - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - #if PY_VERSION_HEX >= 0x02050000 - { - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - } - #else - if (level>0) { - PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); - goto bad; - } - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, NULL); - #endif -bad: - Py_XDECREF(empty_list); - Py_XDECREF(py_import); - Py_XDECREF(empty_dict); - return module; -} - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(a, a); - case 3: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, a); - case 4: - z = __Pyx_c_prodf(a, a); - return __Pyx_c_prodf(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_absf(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double denom = b.real * b.real + b.imag * b.imag; - z.real = (a.real * b.real + a.imag * b.imag) / denom; - z.imag = (a.imag * b.real - a.real * b.imag) / denom; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(a, a); - case 3: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, a); - case 4: - z = __Pyx_c_prod(a, a); - return __Pyx_c_prod(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } - r = a.real; - theta = 0; - } else { - r = __Pyx_c_abs(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { - const unsigned char neg_one = (unsigned char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned char" : - "value too large to convert to unsigned char"); - } - return (unsigned char)-1; - } - return (unsigned char)val; - } - return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { - const unsigned short neg_one = (unsigned short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned short" : - "value too large to convert to unsigned short"); - } - return (unsigned short)-1; - } - return (unsigned short)val; - } - return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { - const unsigned int neg_one = (unsigned int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(unsigned int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(unsigned int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to unsigned int" : - "value too large to convert to unsigned int"); - } - return (unsigned int)-1; - } - return (unsigned int)val; - } - return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); -} - -static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { - const char neg_one = (char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to char" : - "value too large to convert to char"); - } - return (char)-1; - } - return (char)val; - } - return (char)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { - const short neg_one = (short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to short" : - "value too large to convert to short"); - } - return (short)-1; - } - return (short)val; - } - return (short)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { - const signed char neg_one = (signed char)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed char) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed char)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed char" : - "value too large to convert to signed char"); - } - return (signed char)-1; - } - return (signed char)val; - } - return (signed char)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { - const signed short neg_one = (signed short)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed short) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed short)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed short" : - "value too large to convert to signed short"); - } - return (signed short)-1; - } - return (signed short)val; - } - return (signed short)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { - const signed int neg_one = (signed int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(signed int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(signed int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to signed int" : - "value too large to convert to signed int"); - } - return (signed int)-1; - } - return (signed int)val; - } - return (signed int)__Pyx_PyInt_AsSignedLong(x); -} - -static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { - const int neg_one = (int)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; - if (sizeof(int) < sizeof(long)) { - long val = __Pyx_PyInt_AsLong(x); - if (unlikely(val != (long)(int)val)) { - if (!unlikely(val == -1 && PyErr_Occurred())) { - PyErr_SetString(PyExc_OverflowError, - (is_unsigned && unlikely(val < 0)) ? - "can't convert negative value to int" : - "value too large to convert to int"); - } - return (int)-1; - } - return (int)val; - } - return (int)__Pyx_PyInt_AsLong(x); -} - -static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { - const unsigned long neg_one = (unsigned long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned long"); - return (unsigned long)-1; - } - return (unsigned long)PyLong_AsUnsignedLong(x); - } else { - return (unsigned long)PyLong_AsLong(x); - } - } else { - unsigned long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned long)-1; - val = __Pyx_PyInt_AsUnsignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { - const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned PY_LONG_LONG"); - return (unsigned PY_LONG_LONG)-1; - } - return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - unsigned PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (unsigned PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsUnsignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { - const long neg_one = (long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long)-1; - } - return (long)PyLong_AsUnsignedLong(x); - } else { - return (long)PyLong_AsLong(x); - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (long)-1; - val = __Pyx_PyInt_AsLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { - const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to PY_LONG_LONG"); - return (PY_LONG_LONG)-1; - } - return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { - const signed long neg_one = (signed long)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed long"); - return (signed long)-1; - } - return (signed long)PyLong_AsUnsignedLong(x); - } else { - return (signed long)PyLong_AsLong(x); - } - } else { - signed long val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed long)-1; - val = __Pyx_PyInt_AsSignedLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { - const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; - const int is_unsigned = neg_one > const_zero; -#if PY_VERSION_HEX < 0x03000000 - if (likely(PyInt_Check(x))) { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)val; - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { - if (unlikely(Py_SIZE(x) < 0)) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to signed PY_LONG_LONG"); - return (signed PY_LONG_LONG)-1; - } - return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); - } else { - return (signed PY_LONG_LONG)PyLong_AsLongLong(x); - } - } else { - signed PY_LONG_LONG val; - PyObject *tmp = __Pyx_PyNumber_Int(x); - if (!tmp) return (signed PY_LONG_LONG)-1; - val = __Pyx_PyInt_AsSignedLongLong(tmp); - Py_DECREF(tmp); - return val; - } -} - -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename) { - PyObject *old_exc, *old_val, *old_tb; - PyObject *ctx; - __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); - #if PY_MAJOR_VERSION < 3 - ctx = PyString_FromString(name); - #else - ctx = PyUnicode_FromString(name); - #endif - __Pyx_ErrRestore(old_exc, old_val, old_tb); - if (!ctx) { - PyErr_WriteUnraisable(Py_None); - } else { - PyErr_WriteUnraisable(ctx); - Py_DECREF(ctx); - } -} - -static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - #if PY_VERSION_HEX < 0x02050000 - return PyErr_Warn(NULL, message); - #else - return PyErr_WarnEx(NULL, message, 1); - #endif - } - return 0; -} - -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, - size_t size, int strict) -{ - PyObject *py_module = 0; - PyObject *result = 0; - PyObject *py_name = 0; - char warning[200]; - - py_module = __Pyx_ImportModule(module_name); - if (!py_module) - goto bad; - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(class_name); - #else - py_name = PyUnicode_FromString(class_name); - #endif - if (!py_name) - goto bad; - result = PyObject_GetAttr(py_module, py_name); - Py_DECREF(py_name); - py_name = 0; - Py_DECREF(py_module); - py_module = 0; - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%s.%s is not a type object", - module_name, class_name); - goto bad; - } - if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility", - module_name, class_name); - #if PY_VERSION_HEX < 0x02050000 - if (PyErr_Warn(NULL, warning) < 0) goto bad; - #else - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - #endif - } - else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { - PyErr_Format(PyExc_ValueError, - "%s.%s has the wrong size, try recompiling", - module_name, class_name); - goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(py_module); - Py_XDECREF(result); - return NULL; -} -#endif - -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - - #if PY_MAJOR_VERSION < 3 - py_name = PyString_FromString(name); - #else - py_name = PyUnicode_FromString(name); - #endif - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" - -static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, - int __pyx_lineno, const char *__pyx_filename) { - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - PyObject *py_globals = 0; - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(__pyx_filename); - #else - py_srcfile = PyUnicode_FromString(__pyx_filename); - #endif - if (!py_srcfile) goto bad; - if (__pyx_clineno) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_globals = PyModule_GetDict(__pyx_m); - if (!py_globals) goto bad; - py_code = PyCode_New( - 0, /*int argcount,*/ - #if PY_MAJOR_VERSION >= 3 - 0, /*int kwonlyargcount,*/ - #endif - 0, /*int nlocals,*/ - 0, /*int stacksize,*/ - 0, /*int flags,*/ - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - __pyx_lineno, /*int firstlineno,*/ - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - if (!py_code) goto bad; - py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - py_frame->f_lineno = __pyx_lineno; - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else /* Python 3+ has unicode identifiers */ - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - ++t; - } - return 0; -} - -/* Type Conversion Functions */ - -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} - -static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { - PyNumberMethods *m; - const char *name = NULL; - PyObject *res = NULL; -#if PY_VERSION_HEX < 0x03000000 - if (PyInt_Check(x) || PyLong_Check(x)) -#else - if (PyLong_Check(x)) -#endif - return Py_INCREF(x), x; - m = Py_TYPE(x)->tp_as_number; -#if PY_VERSION_HEX < 0x03000000 - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = PyNumber_Long(x); - } -#else - if (m && m->nb_int) { - name = "int"; - res = PyNumber_Long(x); - } -#endif - if (res) { -#if PY_VERSION_HEX < 0x03000000 - if (!PyInt_Check(res) && !PyLong_Check(res)) { -#else - if (!PyLong_Check(res)) { -#endif - PyErr_Format(PyExc_TypeError, - "__%s__ returned non-%s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} - -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject* x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} - -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { -#if PY_VERSION_HEX < 0x02050000 - if (ival <= LONG_MAX) - return PyInt_FromLong((long)ival); - else { - unsigned char *bytes = (unsigned char *) &ival; - int one = 1; int little = (int)*(unsigned char*)&one; - return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); - } -#else - return PyInt_FromSize_t(ival); -#endif -} - -static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { - unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); - if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { - return (size_t)-1; - } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to size_t"); - return (size_t)-1; - } - return (size_t)val; -} - - -#endif /* Py_PYTHON_H */ diff --git a/scipy-0.10.1/scipy/version.py b/scipy-0.10.1/scipy/version.py deleted file mode 100644 index 67c5d76911..0000000000 --- a/scipy-0.10.1/scipy/version.py +++ /dev/null @@ -1,10 +0,0 @@ - -# THIS FILE IS GENERATED FROM SCIPY SETUP.PY -short_version = '0.10.1' -version = '0.10.1' -full_version = '0.10.1' -git_revision = '772893ff951c61f15a710b3b773ac12e69254fa4' -release = True - -if not release: - version = full_version diff --git a/scipy-0.10.1/scipy/weave/__init__.py b/scipy-0.10.1/scipy/weave/__init__.py deleted file mode 100644 index 33c40188db..0000000000 --- a/scipy-0.10.1/scipy/weave/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -C/C++ integration -================= - - inline -- a function for including C/C++ code within Python - blitz -- a function for compiling Numeric expressions to C++ - ext_tools -- a module that helps construct C/C++ extension modules. - accelerate -- a module that inline accelerates Python functions - - -.. note:: On Linux one needs to have the Python development headers installed - in order to be able to compile things with the `weave` module. - Since this is a runtime dependency these headers (typically in a - pythonX.Y-dev package) are not always installed when installing - scipy. - -""" - -from weave_version import weave_version as __version__ - -try: - from blitz_tools import blitz -except ImportError: - pass # scipy (core) wasn't available - -from inline_tools import inline -import ext_tools -from ext_tools import ext_module, ext_function -try: - from accelerate_tools import accelerate -except: - pass - -from numpy.testing import Tester -test = Tester().test diff --git a/scipy-0.10.1/scipy/weave/accelerate_tools.py b/scipy-0.10.1/scipy/weave/accelerate_tools.py deleted file mode 100644 index dde913e127..0000000000 --- a/scipy-0.10.1/scipy/weave/accelerate_tools.py +++ /dev/null @@ -1,411 +0,0 @@ -#**************************************************************************# -#* FILE ************** accelerate_tools.py ************************# -#**************************************************************************# -#* Author: Patrick Miller February 9 2002 *# -#**************************************************************************# -""" -accelerate_tools contains the interface for on-the-fly building of -C++ equivalents to Python functions. -""" -#**************************************************************************# - -from types import InstanceType, XRangeType -import inspect -import scipy.weave.md5_load as md5 -import scipy.weave as weave -from numpy.testing import assert_ - -from bytecodecompiler import CXXCoder,Type_Descriptor,Function_Descriptor - -def CStr(s): - "Hacky way to get legal C string from Python string" - if s is None: - return '""' - assert_(isinstance(s, str), msg="only None and string allowed") - r = repr('"'+s) # Better for embedded quotes - return '"'+r[2:-1]+'"' - - -################################################################## -# CLASS INSTANCE # -################################################################## -class Instance(Type_Descriptor): - cxxtype = 'PyObject*' - - def __init__(self,prototype): - self.prototype = prototype - - def check(self,s): - return "PyInstance_Check(%s)"%s - - def inbound(self,s): - return s - - def outbound(self,s): - return s,0 - - def get_attribute(self,name): - proto = getattr(self.prototype,name) - T = lookup_type(proto) - code = 'tempPY = PyObject_GetAttrString(%%(rhs)s,"%s");\n'%name - convert = T.inbound('tempPY') - code += '%%(lhsType)s %%(lhs)s = %s;\n'%convert - return T,code - - def set_attribute(self,name): - proto = getattr(self.prototype,name) - T = lookup_type(proto) - convert,owned = T.outbound('%(rhs)s') - code = 'tempPY = %s;'%convert - if not owned: - code += ' Py_INCREF(tempPY);' - code += ' PyObject_SetAttrString(%%(lhs)s,"%s",tempPY);'%name - code += ' Py_DECREF(tempPY);\n' - return T,code - -################################################################## -# CLASS BASIC # -################################################################## -class Basic(Type_Descriptor): - owned = 1 - def check(self,s): - return "%s(%s)"%(self.checker,s) - def inbound(self,s): - return "%s(%s)"%(self.inbounder,s) - def outbound(self,s): - return "%s(%s)"%(self.outbounder,s),self.owned - -class Basic_Number(Basic): - def literalizer(self,s): - return str(s) - def binop(self,symbol,a,b): - assert_(symbol in ['+','-','*','/'], msg=symbol) - return '%s %s %s'%(a,symbol,b),self - -class Integer(Basic_Number): - cxxtype = "long" - checker = "PyInt_Check" - inbounder = "PyInt_AsLong" - outbounder = "PyInt_FromLong" - -class Double(Basic_Number): - cxxtype = "double" - checker = "PyFloat_Check" - inbounder = "PyFloat_AsDouble" - outbounder = "PyFloat_FromDouble" - -class String(Basic): - cxxtype = "char*" - checker = "PyString_Check" - inbounder = "PyString_AsString" - outbounder = "PyString_FromString" - - def literalizer(self,s): - return CStr(s) - -# ----------------------------------------------- -# Singletonize the type names -# ----------------------------------------------- -Integer = Integer() -Double = Double() -String = String() - -import numpy as np - -class Vector(Type_Descriptor): - cxxtype = 'PyArrayObject*' - refcount = 1 - dims = 1 - module_init_code = 'import_array();\n' - inbounder = "(PyArrayObject*)" - outbounder = "(PyObject*)" - owned = 0 # Convertion is by casting! - - prerequisites = Type_Descriptor.prerequisites+\ - ['#include "numpy/arrayobject.h"'] - dims = 1 - def check(self,s): - return "PyArray_Check(%s) && ((PyArrayObject*)%s)->nd == %d && ((PyArrayObject*)%s)->descr->type_num == %s"%( - s,s,self.dims,s,self.typecode) - - def inbound(self,s): - return "%s(%s)"%(self.inbounder,s) - def outbound(self,s): - return "%s(%s)"%(self.outbounder,s),self.owned - - def getitem(self,A,v,t): - assert_(self.dims == len(v), msg='Expect dimension %d'%self.dims) - code = '*((%s*)(%s->data'%(self.cxxbase,A) - for i in range(self.dims): - # assert that ''t[i]'' is an integer - code += '+%s*%s->strides[%d]'%(v[i],A,i) - code += '))' - return code,self.pybase - def setitem(self,A,v,t): - return self.getitem(A,v,t) - -class matrix(Vector): - dims = 2 - -class IntegerVector(Vector): - typecode = 'PyArray_INT' - cxxbase = 'int' - pybase = Integer - -class Integermatrix(matrix): - typecode = 'PyArray_INT' - cxxbase = 'int' - pybase = Integer - -class LongVector(Vector): - typecode = 'PyArray_LONG' - cxxbase = 'long' - pybase = Integer - -class Longmatrix(matrix): - typecode = 'PyArray_LONG' - cxxbase = 'long' - pybase = Integer - -class DoubleVector(Vector): - typecode = 'PyArray_DOUBLE' - cxxbase = 'double' - pybase = Double - -class Doublematrix(matrix): - typecode = 'PyArray_DOUBLE' - cxxbase = 'double' - pybase = Double - - -################################################################## -# CLASS XRANGE # -################################################################## -class XRange(Type_Descriptor): - cxxtype = 'XRange' - prerequisites = [''' - class XRange { - public: - XRange(long aLow, long aHigh, long aStep=1) - : low(aLow),high(aHigh),step(aStep) - { - } - XRange(long aHigh) - : low(0),high(aHigh),step(1) - { - } - long low; - long high; - long step; - };'''] - -# ----------------------------------------------- -# Singletonize the type names -# ----------------------------------------------- -IntegerVector = IntegerVector() -Integermatrix = Integermatrix() -LongVector = LongVector() -Longmatrix = Longmatrix() -DoubleVector = DoubleVector() -Doublematrix = Doublematrix() -XRange = XRange() - - -typedefs = { - int : Integer, - float : Double, - str: String, - (np.ndarray,1,int): IntegerVector, - (np.ndarray,2,int): Integermatrix, - (np.ndarray,1,np.long): LongVector, - (np.ndarray,2,np.long): Longmatrix, - (np.ndarray,1,float): DoubleVector, - (np.ndarray,2,float): Doublematrix, - XRangeType : XRange, - } - -import math -functiondefs = { - (len,(String,)): - Function_Descriptor(code='strlen(%s)',return_type=Integer), - - (len,(LongVector,)): - Function_Descriptor(code='PyArray_Size((PyObject*)%s)',return_type=Integer), - - (float,(Integer,)): - Function_Descriptor(code='(double)(%s)',return_type=Double), - - (range,(Integer,Integer)): - Function_Descriptor(code='XRange(%s)',return_type=XRange), - - (range,(Integer)): - Function_Descriptor(code='XRange(%s)',return_type=XRange), - - (math.sin,(Double,)): - Function_Descriptor(code='sin(%s)',return_type=Double), - - (math.cos,(Double,)): - Function_Descriptor(code='cos(%s)',return_type=Double), - - (math.sqrt,(Double,)): - Function_Descriptor(code='sqrt(%s)',return_type=Double), - } - - - -################################################################## -# FUNCTION LOOKUP_TYPE # -################################################################## -def lookup_type(x): - T = type(x) - try: - return typedefs[T] - except: - if isinstance(T,np.ndarray): - return typedefs[(T,len(x.shape),x.dtype.char)] - elif issubclass(T, InstanceType): - return Instance(x) - else: - raise NotImplementedError(T) - -################################################################## -# class ACCELERATE # -################################################################## -class accelerate(object): - - def __init__(self, function, *args, **kw): - assert_(inspect.isfunction(function)) - self.function = function - self.module = inspect.getmodule(function) - if self.module is None: - import __main__ - self.module = __main__ - self.__call_map = {} - - def __cache(self,*args): - raise TypeError - - def __call__(self,*args): - try: - return self.__cache(*args) - except TypeError: - # Figure out type info -- Do as tuple so its hashable - signature = tuple( map(lookup_type,args) ) - - # If we know the function, call it - try: - fast = self.__call_map[signature] - except: - fast = self.singleton(signature) - self.__cache = fast - self.__call_map[signature] = fast - return fast(*args) - - def signature(self,*args): - # Figure out type info -- Do as tuple so its hashable - signature = tuple( map(lookup_type,args) ) - return self.singleton(signature) - - - def singleton(self,signature): - identifier = self.identifier(signature) - - # Generate a new function, then call it - f = self.function - - # See if we have an accelerated version of module - try: - print 'lookup',self.module.__name__+'_weave' - accelerated_module = __import__(self.module.__name__+'_weave') - print 'have accelerated',self.module.__name__+'_weave' - fast = getattr(accelerated_module,identifier) - return fast - except ImportError: - accelerated_module = None - except AttributeError: - pass - - P = self.accelerate(signature,identifier) - - E = weave.ext_tools.ext_module(self.module.__name__+'_weave') - E.add_function(P) - E.generate_file() - weave.build_tools.build_extension(self.module.__name__+'_weave.cpp',verbose=2) - - if accelerated_module: - raise NotImplementedError('Reload') - else: - accelerated_module = __import__(self.module.__name__+'_weave') - - fast = getattr(accelerated_module,identifier) - return fast - - def identifier(self,signature): - # Build an MD5 checksum - f = self.function - co = f.func_code - identifier = str(signature)+\ - str(co.co_argcount)+\ - str(co.co_consts)+\ - str(co.co_varnames)+\ - co.co_code - return 'F'+md5.md5(identifier).hexdigest() - - def accelerate(self,signature,identifier): - P = Python2CXX(self.function,signature,name=identifier) - return P - - def code(self,*args): - if len(args) != self.function.func_code.co_argcount: - raise TypeError('%s() takes exactly %d arguments (%d given)' % - (self.function.__name__, - self.function.func_code.co_argcount, - len(args))) - signature = tuple( map(lookup_type,args) ) - ident = self.function.__name__ - return self.accelerate(signature,ident).function_code() - - -################################################################## -# CLASS PYTHON2CXX # -################################################################## -class Python2CXX(CXXCoder): - def typedef_by_value(self,v): - T = lookup_type(v) - if T not in self.used: - self.used.append(T) - return T - - def function_by_signature(self,signature): - descriptor = functiondefs[signature] - if descriptor.return_type not in self.used: - self.used.append(descriptor.return_type) - return descriptor - - def __init__(self,f,signature,name=None): - # Make sure function is a function - assert_(inspect.isfunction(f)) - # and check the input type signature - assert_(reduce(lambda x,y: x and y, - map(lambda x: isinstance(x,Type_Descriptor), - signature), - 1), msg='%s not all type objects'%signature) - self.arg_specs = [] - self.customize = weave.base_info.custom_info() - - CXXCoder.__init__(self,f,signature,name) - - return - - def function_code(self): - code = self.wrapped_code() - for T in self.used: - if T is not None and T.module_init_code: - self.customize.add_module_init_code(T.module_init_code) - return code - - def python_function_definition_code(self): - return '{ "%s", wrapper_%s, METH_VARARGS, %s },\n'%( - self.name, - self.name, - CStr(self.function.__doc__)) diff --git a/scipy-0.10.1/scipy/weave/ast_tools.py b/scipy-0.10.1/scipy/weave/ast_tools.py deleted file mode 100644 index 5e727ab80e..0000000000 --- a/scipy-0.10.1/scipy/weave/ast_tools.py +++ /dev/null @@ -1,215 +0,0 @@ -import token -import symbol -import parser - -def issequence(t): - return isinstance(t, (list, tuple)) - -def int_to_symbol(i): - """ Convert numeric symbol or token to a desriptive name. - """ - try: - return symbol.sym_name[i] - except KeyError: - return token.tok_name[i] - -def translate_symbols(ast_tuple): - """ Translate numeric grammar symbols in an ast_tuple descriptive names. - - This simply traverses the tree converting any integer value to values - found in symbol.sym_name or token.tok_name. - """ - new_list = [] - for item in ast_tuple: - if isinstance(item, int): - new_list.append(int_to_symbol(item)) - elif issequence(item): - new_list.append(translate_symbols(item)) - else: - new_list.append(item) - if isinstance(ast_tuple, tuple): - return tuple(new_list) - else: - return new_list - -def ast_to_string(ast_seq): - """* Traverse an ast tree sequence, printing out all leaf nodes. - - This effectively rebuilds the expression the tree was built - from. I guess its probably missing whitespace. How bout - indent stuff and new lines? Haven't checked this since we're - currently only dealing with simple expressions. - *""" - output = '' - for item in ast_seq: - if isinstance(item, str): - output = output + item - elif issequence(item): - output = output + ast_to_string(item) - return output - -def build_atom(expr_string): - """ Build an ast for an atom from the given expr string. - - If expr_string is not a string, it is converted to a string - before parsing to an ast_tuple. - """ - # the [1][1] indexing below starts atoms at the third level - # deep in the resulting parse tree. parser.expr will return - # a tree rooted with eval_input -> test_list -> test ... - # I'm considering test to be the root of atom symbols. - # It might be a better idea to move down a little in the - # parse tree. Any benefits? Right now, this works fine. - if isinstance(expr_string, str): - ast = parser.expr(expr_string).totuple()[1][1] - else: - ast = parser.expr(`expr_string`).totuple()[1][1] - return ast - -def atom_tuple(expr_string): - return build_atom(expr_string) - -def atom_list(expr_string): - return tuples_to_lists(build_atom(expr_string)) - -def find_first_pattern(ast_tuple,pattern_list): - """* Find the first occurence of a pattern one of a list of patterns - in ast_tuple. - - Used for testing at the moment. - - ast_tuple -- tuple or list created by ast.totuple() or ast.tolist(). - pattern_list -- A single pattern or list of patterns to search - for in the ast_tuple. If a single pattern is - used, it MUST BE A IN A TUPLE format. - Returns: - found -- true/false indicating whether pattern was found - data -- dictionary of data from first matching pattern in tree. - (see match function by Jeremy Hylton). - *""" - found,data = 0,{} - - # convert to a list if input wasn't a list - if not isinstance(pattern_list, list): - pattern_list = [pattern_list] - - # look for any of the patterns in a list of patterns - for pattern in pattern_list: - found,data = match(pattern,ast_tuple) - if found: - break - - # if we didn't find the pattern, search sub-trees of the parse tree - if not found: - for item in ast_tuple: - if issequence(item): - # only search sub items if they are a list or tuple. - found, data = find_first_pattern(item,pattern_list) - if found: - break - return found,data - -name_pattern = (token.NAME, ['var']) - -def remove_duplicates(lst): - output = [] - for item in lst: - if item not in output: - output.append(item) - return output - -reserved_names = ['sin'] - -def remove_reserved_names(lst): - """ These are functions names -- don't create variables for them - There is a more reobust approach, but this ought to work pretty - well. - """ - output = [] - for item in lst: - if item not in reserved_names: - output.append(item) - return output - -def harvest_variables(ast_list): - """ Retreive all the variables that need to be defined. - """ - variables = [] - if issequence(ast_list): - found,data = match(name_pattern,ast_list) - if found: - variables.append(data['var']) - for item in ast_list: - if issequence(item): - variables.extend(harvest_variables(item)) - variables = remove_duplicates(variables) - variables = remove_reserved_names(variables) - return variables - -def match(pattern, data, vars=None): - """match `data' to `pattern', with variable extraction. - - pattern - Pattern to match against, possibly containing variables. - - data - Data to be checked and against which variables are extracted. - - vars - Dictionary of variables which have already been found. If not - provided, an empty dictionary is created. - - The `pattern' value may contain variables of the form ['varname'] which - are allowed to match anything. The value that is matched is returned as - part of a dictionary which maps 'varname' to the matched value. 'varname' - is not required to be a string object, but using strings makes patterns - and the code which uses them more readable. - - This function returns two values: a boolean indicating whether a match - was found and a dictionary mapping variable names to their associated - values. - - From the Demo/Parser/example.py file - """ - if vars is None: - vars = {} - if isinstance(pattern, list): # 'variables' are ['varname'] - vars[pattern[0]] = data - return 1, vars - if not isinstance(pattern, tuple): - return (pattern == data), vars - if len(data) != len(pattern): - return 0, vars - for pattern, data in zip(pattern, data): - same, vars = match(pattern, data, vars) - if not same: - break - return same, vars - - -def tuples_to_lists(ast_tuple): - """ Convert an ast object tree in tuple form to list form. - """ - if not issequence(ast_tuple): - return ast_tuple - - new_list = [] - for item in ast_tuple: - new_list.append(tuples_to_lists(item)) - return new_list - - -""" -A little tree I built to help me understand the parse trees. - -----------303------------------------------ - | | - 304 -------------------------307------------------------- - | | | | | | - 1 'result' 9 '[' 308 12 ',' 308 10 ']' - | | - ---------309-------- -----309-------- - | | | | - 291|304 291|304 291|304 | - | | | | - 1 'a1' 11 ':' 1 'a2' 2 '10' 11 ':' -""" diff --git a/scipy-0.10.1/scipy/weave/base_info.py b/scipy-0.10.1/scipy/weave/base_info.py deleted file mode 100644 index 50274ec8bc..0000000000 --- a/scipy-0.10.1/scipy/weave/base_info.py +++ /dev/null @@ -1,139 +0,0 @@ -""" - base_info holds classes that define the information - needed for building C++ extension modules for Python that - handle different data types. The information includes - such as include files, libraries, and even code snippets. - - base_info -- base class for cxx_info, blitz_info, etc. - info_list -- a handy list class for working with multiple - info classes at the same time. -""" -import UserList - -class base_info(object): - _warnings =[] - _headers = [] - _include_dirs = [] - _libraries = [] - _library_dirs = [] - _support_code = [] - _module_init_code = [] - _sources = [] - _define_macros = [] - _undefine_macros = [] - _extra_compile_args = [] - _extra_link_args = [] - compiler = '' - def set_compiler(self,compiler): - self.check_compiler(compiler) - self.compiler = compiler - # it would probably be better to specify what the arguments are - # to avoid confusion, but I don't think these classes will get - # very complicated, and I don't really know the variety of things - # that should be passed in at this point. - def check_compiler(self,compiler): - pass - def warnings(self): - return self._warnings - def headers(self): - return self._headers - def include_dirs(self): - return self._include_dirs - def libraries(self): - return self._libraries - def library_dirs(self): - return self._library_dirs - def support_code(self): - return self._support_code - def module_init_code(self): - return self._module_init_code - def sources(self): - return self._sources - def define_macros(self): - return self._define_macros - def undefine_macros(self): - return self._undefine_macros - def extra_compile_args(self): - return self._extra_compile_args - def extra_link_args(self): - return self._extra_link_args - -class custom_info(base_info): - def __init__(self): - self._warnings =[] - self._headers = [] - self._include_dirs = [] - self._libraries = [] - self._library_dirs = [] - self._support_code = [] - self._module_init_code = [] - self._sources = [] - self._define_macros = [] - self._undefine_macros = [] - self._extra_compile_args = [] - self._extra_link_args = [] - - def add_warning(self,warning): - self._warnings.append(warning) - def add_header(self,header): - self._headers.append(header) - def add_include_dir(self,include_dir): - self._include_dirs.append(include_dir) - def add_library(self,library): - self._libraries.append(library) - def add_library_dir(self,library_dir): - self._library_dirs.append(library_dir) - def add_support_code(self,support_code): - self._support_code.append(support_code) - def add_module_init_code(self,module_init_code): - self._module_init_code.append(module_init_code) - def add_source(self,source): - self._sources.append(source) - def add_define_macro(self,define_macro): - self._define_macros.append(define_macro) - def add_undefine_macro(self,undefine_macro): - self._undefine_macros.append(undefine_macro) - def add_extra_compile_arg(self,compile_arg): - return self._extra_compile_args.append(compile_arg) - def add_extra_link_arg(self,link_arg): - return self._extra_link_args.append(link_arg) - -class info_list(UserList.UserList): - def get_unique_values(self,attribute): - all_values = [] - for info in self: - vals = eval('info.'+attribute+'()') - all_values.extend(vals) - return unique_values(all_values) - - def extra_compile_args(self): - return self.get_unique_values('extra_compile_args') - def extra_link_args(self): - return self.get_unique_values('extra_link_args') - def sources(self): - return self.get_unique_values('sources') - def define_macros(self): - return self.get_unique_values('define_macros') - def sources(self): - return self.get_unique_values('sources') - def warnings(self): - return self.get_unique_values('warnings') - def headers(self): - return self.get_unique_values('headers') - def include_dirs(self): - return self.get_unique_values('include_dirs') - def libraries(self): - return self.get_unique_values('libraries') - def library_dirs(self): - return self.get_unique_values('library_dirs') - def support_code(self): - return self.get_unique_values('support_code') - def module_init_code(self): - return self.get_unique_values('module_init_code') - -def unique_values(lst): - all_values = [] - for value in lst: - if value not in all_values or value == '-framework': - all_values.append(value) - return all_values diff --git a/scipy-0.10.1/scipy/weave/base_spec.py b/scipy-0.10.1/scipy/weave/base_spec.py deleted file mode 100644 index 4a48ec3aea..0000000000 --- a/scipy-0.10.1/scipy/weave/base_spec.py +++ /dev/null @@ -1,97 +0,0 @@ -class base_converter(object): - """ - Properties: - headers -- list of strings that name the header files needed by this - object. - include_dirs -- list of directories where the header files can be found. - libraries -- list of libraries needed to link to when compiling - extension. - library_dirs -- list of directories to search for libraries. - - support_code -- list of strings. Each string is a subroutine needed - by the type. Functions that are used in the conversion - between Python and C++ files are examples of these. - - Methods: - - type_match(value) returns 1 if this class is used to represent type - specification for value. - type_spec(name, value) returns a new object (of this class) that is - used to produce C++ code for value. - declaration_code() returns C++ code fragment for type declaration and - conversion of python object to C++ object. - cleanup_code() returns C++ code fragment for cleaning up after the - variable after main C++ code fragment has executed. - - """ - _build_information = [] - compiler = '' - - def set_compiler(self,compiler): - self.compiler = compiler - def type_match(self,value): - raise NotImplementedError("You must override method in derived class") - def build_information(self): - return self._build_information - def type_spec(self,name,value): - pass - def declaration_code(self,templatize = 0): - return "" - def local_dict_code(self): - return "" - def cleanup_code(self): - return "" - def retrieve_py_variable(self,inline=0): - # this needs a little coordination in name choices with the - # ext_inline_function class. - if inline: - vn = 'get_variable("%s",raw_locals,raw_globals)' % self.name - else: - vn = 'py_' + self.name - return vn - - def py_reference(self): - return "&py_" + self.name - def py_pointer(self): - return "*py_" + self.name - def py_variable(self): - return "py_" + self.name - def reference(self): - return "&" + self.name - def pointer(self): - return "*" + self.name - def init_flag(self): - return self.name + "_used" - - def variable(self): - return self.name - def variable_as_string(self): - return '"' + self.name + '"' - -import UserList -import base_info - -class arg_spec_list(UserList.UserList): - def build_information(self): - all_info = base_info.info_list() - for i in self: - all_info.extend(i.build_information()) - return all_info - - def py_references(self): - return map(lambda x: x.py_reference(),self) - def py_pointers(self): - return map(lambda x: x.py_pointer(),self) - def py_variables(self): - return map(lambda x: x.py_variable(),self) - - def references(self): - return map(lambda x: x.py_reference(),self) - def pointers(self): - return map(lambda x: x.pointer(),self) - def variables(self): - return map(lambda x: x.variable(),self) - def init_flags(self): - return map(lambda x: x.init_flag(),self) - def variable_as_strings(self): - return map(lambda x: x.variable_as_string(),self) diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/Makefile.am b/scipy-0.10.1/scipy/weave/blitz/blitz/Makefile.am deleted file mode 100644 index cfda130a15..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/Makefile.am +++ /dev/null @@ -1,39 +0,0 @@ -# -# Written by Patrick Guio -# - -SUBDIRS = generate meta array - -blitzdir = $(includedir)/blitz -generatedir = ./generate - -genheaders = matbops.h mathfunc.h matuops.h promote-old.h vecbops.cc vecuops.cc vecwhere.cc - -blitz_HEADERS = applics.h array-impl.h array-old.h array.h bench.cc bench.h \ -benchext.cc benchext.h blitz.h bzconfig.h bzdebug.h compiler.h \ -etbase.h extremum.h funcs.h indexexpr.h limits-hack.h listinit.h \ -matdiag.h matexpr.h matgen.h mathf2.h matltri.h matref.h matrix.cc \ -matrix.h matsymm.h mattoep.h matutri.h memblock.cc memblock.h \ -minmax.h mstruct.h numinquire.h numtrait.h ops.h prettyprint.h \ -promote.h rand-dunif.h rand-mt.h rand-normal.h rand-tt800.h rand-uniform.h \ -random.h randref.h range.h reduce.h shapecheck.h tau.h timer.h tiny.h \ -tinymat.h tinymatexpr.h tinymatio.cc tinyvec-et.h tinyvec.cc tinyvec.h \ -tinyvecio.cc tinyveciter.h traversal.cc traversal.h tuning.h tvcross.h \ -tvecglobs.h update.h vecaccum.cc vecall.cc vecany.cc vecbfn.cc \ -veccount.cc vecdelta.cc vecdot.cc vecexpr.h vecexprwrap.h vecglobs.cc \ -vecglobs.h vecio.cc veciter.h vecmax.cc vecmin.cc vecnorm.cc vecnorm1.cc \ -vecpick.cc vecpick.h vecpickio.cc vecpickiter.h vecsum.cc vector-et.h \ -vector.cc vector.h vecwhere.h wrap-climits.h zero.cc zero.h $(genheaders) - -EXTRA_HEADERS = apple/bzconfig.h intel/bzconfig.h ibm/bzconfig.h \ -compaq/bzconfig.h hp/bzconfig.h sgi/bzconfig.h gnu/bzconfig.h \ -pgi/bzconfig.h pathscale/bzconfig.h kai/bzconfig.h fujitsu/bzconfig.h - -nobase_blitz_HEADERS = $(COMPILER_SPECIFIC_HEADER) - -DISTCLEANFILES = apple/bzconfig.h intel/bzconfig.h ibm/bzconfig.h \ -compaq/bzconfig.h hp/bzconfig.h sgi/bzconfig.h gnu/bzconfig.h \ -pgi/bzconfig.h pathscale/bzconfig.h kai/bzconfig.h fujitsu/bzconfig.h - -clean-local: - -rm -rf config.h diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/Makefile.in b/scipy-0.10.1/scipy/weave/blitz/blitz/Makefile.in deleted file mode 100644 index 7cfe21cddf..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/Makefile.in +++ /dev/null @@ -1,625 +0,0 @@ -# Makefile.in generated by automake 1.9.6 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# -# Written by Patrick Guio -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = .. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -target_triplet = @target@ -subdir = blitz -DIST_COMMON = README $(blitz_HEADERS) $(nobase_blitz_HEADERS) \ - $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ac_check_cxx_features.m4 \ - $(top_srcdir)/m4/ac_compiler_specific_header.m4 \ - $(top_srcdir)/m4/ac_compilers_64bits.m4 \ - $(top_srcdir)/m4/ac_cxx_bool.m4 \ - $(top_srcdir)/m4/ac_cxx_complex_math_in_namespace_std.m4 \ - $(top_srcdir)/m4/ac_cxx_const_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_default_template_parameters.m4 \ - $(top_srcdir)/m4/ac_cxx_dynamic_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_enable_debug.m4 \ - $(top_srcdir)/m4/ac_cxx_enable_optimize.m4 \ - $(top_srcdir)/m4/ac_cxx_enum_computations.m4 \ - $(top_srcdir)/m4/ac_cxx_enum_computations_with_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_exceptions.m4 \ - $(top_srcdir)/m4/ac_cxx_explicit.m4 \ - $(top_srcdir)/m4/ac_cxx_explicit_template_function_qualification.m4 \ - $(top_srcdir)/m4/ac_cxx_flags_preset.m4 \ - $(top_srcdir)/m4/ac_cxx_full_specialization_syntax.m4 \ - $(top_srcdir)/m4/ac_cxx_function_nontype_parameters.m4 \ - $(top_srcdir)/m4/ac_cxx_general.m4 \ - $(top_srcdir)/m4/ac_cxx_have_climits.m4 \ - $(top_srcdir)/m4/ac_cxx_have_complex.m4 \ - $(top_srcdir)/m4/ac_cxx_have_complex_fcns.m4 \ - $(top_srcdir)/m4/ac_cxx_have_complex_math1.m4 \ - $(top_srcdir)/m4/ac_cxx_have_complex_math2.m4 \ - $(top_srcdir)/m4/ac_cxx_have_ieee_math.m4 \ - $(top_srcdir)/m4/ac_cxx_have_numeric_limits.m4 \ - $(top_srcdir)/m4/ac_cxx_have_rusage.m4 \ - $(top_srcdir)/m4/ac_cxx_have_std.m4 \ - $(top_srcdir)/m4/ac_cxx_have_stl.m4 \ - $(top_srcdir)/m4/ac_cxx_have_system_v_math.m4 \ - $(top_srcdir)/m4/ac_cxx_have_valarray.m4 \ - $(top_srcdir)/m4/ac_cxx_isnan_in_namespace_std.m4 \ - $(top_srcdir)/m4/ac_cxx_keywords.m4 \ - $(top_srcdir)/m4/ac_cxx_math_fn_in_namespace_std.m4 \ - $(top_srcdir)/m4/ac_cxx_member_constants.m4 \ - $(top_srcdir)/m4/ac_cxx_member_templates.m4 \ - $(top_srcdir)/m4/ac_cxx_member_templates_outside_class.m4 \ - $(top_srcdir)/m4/ac_cxx_mutable.m4 \ - $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ - $(top_srcdir)/m4/ac_cxx_nceg_restrict.m4 \ - $(top_srcdir)/m4/ac_cxx_nceg_restrict_egcs.m4 \ - $(top_srcdir)/m4/ac_cxx_old_for_scoping.m4 \ - $(top_srcdir)/m4/ac_cxx_partial_ordering.m4 \ - $(top_srcdir)/m4/ac_cxx_partial_specialization.m4 \ - $(top_srcdir)/m4/ac_cxx_reinterpret_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_rtti.m4 \ - $(top_srcdir)/m4/ac_cxx_standard_library.m4 \ - $(top_srcdir)/m4/ac_cxx_static_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_template_keyword_qualifier.m4 \ - $(top_srcdir)/m4/ac_cxx_template_qualified_base_class.m4 \ - $(top_srcdir)/m4/ac_cxx_template_qualified_return_type.m4 \ - $(top_srcdir)/m4/ac_cxx_template_scoped_argument_matching.m4 \ - $(top_srcdir)/m4/ac_cxx_templates.m4 \ - $(top_srcdir)/m4/ac_cxx_templates_as_template_arguments.m4 \ - $(top_srcdir)/m4/ac_cxx_templates_features.m4 \ - $(top_srcdir)/m4/ac_cxx_type_casts.m4 \ - $(top_srcdir)/m4/ac_cxx_type_promotion.m4 \ - $(top_srcdir)/m4/ac_cxx_typename.m4 \ - $(top_srcdir)/m4/ac_cxx_use_numtrait.m4 \ - $(top_srcdir)/m4/ac_env.m4 $(top_srcdir)/m4/ac_info.m4 \ - $(top_srcdir)/m4/ax_prefix_config_h.m4 \ - $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-exec-recursive install-info-recursive \ - install-recursive installcheck-recursive installdirs-recursive \ - pdf-recursive ps-recursive uninstall-info-recursive \ - uninstall-recursive -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(blitzdir)" "$(DESTDIR)$(blitzdir)" -blitzHEADERS_INSTALL = $(INSTALL_HEADER) -nobase_blitzHEADERS_INSTALL = $(install_sh_DATA) -HEADERS = $(blitz_HEADERS) $(nobase_blitz_HEADERS) -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AR = @AR@ -AR_FLAGS = @AR_FLAGS@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -COMPILER_SPECIFIC_HEADER = @COMPILER_SPECIFIC_HEADER@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CXX_DEBUG_FLAGS = @CXX_DEBUG_FLAGS@ -CXX_LIBS = @CXX_LIBS@ -CXX_OPTIMIZE_FLAGS = @CXX_OPTIMIZE_FLAGS@ -CXX_PROFIL_FLAGS = @CXX_PROFIL_FLAGS@ -CYGPATH_W = @CYGPATH_W@ -DATE = @DATE@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EXEEXT = @EXEEXT@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -OBJEXT = @OBJEXT@ -OS = @OS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -RANLIB = @RANLIB@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -STRIP = @STRIP@ -VERSION = @VERSION@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target = @target@ -target_alias = @target_alias@ -target_cpu = @target_cpu@ -target_os = @target_os@ -target_vendor = @target_vendor@ -SUBDIRS = generate meta array -blitzdir = $(includedir)/blitz -generatedir = ./generate -genheaders = matbops.h mathfunc.h matuops.h promote-old.h vecbops.cc vecuops.cc vecwhere.cc -blitz_HEADERS = applics.h array-impl.h array-old.h array.h bench.cc bench.h \ -benchext.cc benchext.h blitz.h bzconfig.h bzdebug.h compiler.h \ -etbase.h extremum.h funcs.h indexexpr.h limits-hack.h listinit.h \ -matdiag.h matexpr.h matgen.h mathf2.h matltri.h matref.h matrix.cc \ -matrix.h matsymm.h mattoep.h matutri.h memblock.cc memblock.h \ -minmax.h mstruct.h numinquire.h numtrait.h ops.h prettyprint.h \ -promote.h rand-dunif.h rand-mt.h rand-normal.h rand-tt800.h rand-uniform.h \ -random.h randref.h range.h reduce.h shapecheck.h tau.h timer.h tiny.h \ -tinymat.h tinymatexpr.h tinymatio.cc tinyvec-et.h tinyvec.cc tinyvec.h \ -tinyvecio.cc tinyveciter.h traversal.cc traversal.h tuning.h tvcross.h \ -tvecglobs.h update.h vecaccum.cc vecall.cc vecany.cc vecbfn.cc \ -veccount.cc vecdelta.cc vecdot.cc vecexpr.h vecexprwrap.h vecglobs.cc \ -vecglobs.h vecio.cc veciter.h vecmax.cc vecmin.cc vecnorm.cc vecnorm1.cc \ -vecpick.cc vecpick.h vecpickio.cc vecpickiter.h vecsum.cc vector-et.h \ -vector.cc vector.h vecwhere.h wrap-climits.h zero.cc zero.h $(genheaders) - -EXTRA_HEADERS = apple/bzconfig.h intel/bzconfig.h ibm/bzconfig.h \ -compaq/bzconfig.h hp/bzconfig.h sgi/bzconfig.h gnu/bzconfig.h \ -pgi/bzconfig.h pathscale/bzconfig.h kai/bzconfig.h fujitsu/bzconfig.h - -nobase_blitz_HEADERS = $(COMPILER_SPECIFIC_HEADER) -DISTCLEANFILES = apple/bzconfig.h intel/bzconfig.h ibm/bzconfig.h \ -compaq/bzconfig.h hp/bzconfig.h sgi/bzconfig.h gnu/bzconfig.h \ -pgi/bzconfig.h pathscale/bzconfig.h kai/bzconfig.h fujitsu/bzconfig.h - -all: config.h - $(MAKE) $(AM_MAKEFLAGS) all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign blitz/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign blitz/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -config.h: stamp-h1 - @if test ! -f $@; then \ - rm -f stamp-h1; \ - $(MAKE) stamp-h1; \ - else :; fi - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status blitz/config.h -$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_srcdir) && $(AUTOHEADER) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -uninstall-info-am: -install-blitzHEADERS: $(blitz_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(blitzdir)" || $(mkdir_p) "$(DESTDIR)$(blitzdir)" - @list='$(blitz_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(blitzHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(blitzdir)/$$f'"; \ - $(blitzHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(blitzdir)/$$f"; \ - done - -uninstall-blitzHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(blitz_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(blitzdir)/$$f'"; \ - rm -f "$(DESTDIR)$(blitzdir)/$$f"; \ - done -install-nobase_blitzHEADERS: $(nobase_blitz_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(blitzdir)" || $(mkdir_p) "$(DESTDIR)$(blitzdir)" - @$(am__vpath_adj_setup) \ - list='$(nobase_blitz_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - $(am__vpath_adj) \ - echo " $(nobase_blitzHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(blitzdir)/$$f'"; \ - $(nobase_blitzHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(blitzdir)/$$f"; \ - done - -uninstall-nobase_blitzHEADERS: - @$(NORMAL_UNINSTALL) - @$(am__vpath_adj_setup) \ - list='$(nobase_blitz_HEADERS)'; for p in $$list; do \ - $(am__vpath_adj) \ - echo " rm -f '$(DESTDIR)$(blitzdir)/$$f'"; \ - rm -f "$(DESTDIR)$(blitzdir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -mostlyclean-recursive clean-recursive distclean-recursive \ -maintainer-clean-recursive: - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(mkdir_p) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(HEADERS) config.h -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(blitzdir)" "$(DESTDIR)$(blitzdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-recursive - -clean-am: clean-generic clean-local mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-hdr distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-blitzHEADERS install-nobase_blitzHEADERS - -install-exec-am: - -install-info: install-info-recursive - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-blitzHEADERS uninstall-info-am \ - uninstall-nobase_blitzHEADERS - -uninstall-info: uninstall-info-recursive - -.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \ - clean clean-generic clean-local clean-recursive ctags \ - ctags-recursive distclean distclean-generic distclean-hdr \ - distclean-recursive distclean-tags distdir dvi dvi-am html \ - html-am info info-am install install-am install-blitzHEADERS \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-man \ - install-nobase_blitzHEADERS install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic maintainer-clean-recursive \ - mostlyclean mostlyclean-generic mostlyclean-recursive pdf \ - pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ - uninstall-blitzHEADERS uninstall-info-am \ - uninstall-nobase_blitzHEADERS - - -clean-local: - -rm -rf config.h -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/README b/scipy-0.10.1/scipy/weave/blitz/blitz/README deleted file mode 100644 index 4bac82651b..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/README +++ /dev/null @@ -1,16 +0,0 @@ -Blitz header file notes - -1) Some matrix headers are included in this release, but only because -the Benchmark class needs them. Their design is not yet stable, -and they are untested, so use them at your own peril. - -2) A compiler-specific header file with configuration settings is included -from the master header file . The compiler-specific -header file is selected on the basis of preprocessor symbols. Since some -C++ compilers (notably the IBM XL compiler for Darwin and for AIX systems) -do not define a unique preprocessor symbol, we have added a -D option in -the autoconf macro AC_CXX_FLAGS_PRESET (see file m4/ac_cxx_flags_preset.m4). -Thus, we use the option -D__APPLE with xlc++ and -D__IBM with xlC. -Please note that any user code must also be compiled with the same -D option -in order to be compiled successfully with one of these compilers. - diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/applics.h b/scipy-0.10.1/scipy/weave/blitz/blitz/applics.h deleted file mode 100644 index 3ec76ca73a..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/applics.h +++ /dev/null @@ -1,401 +0,0 @@ -/*************************************************************************** - * blitz/applics.h Applicative template classes - * - * $Id: applics.h 1414 2005-11-01 22:04:59Z cookedm $ - * - * Copyright (C) 1997-2001 Todd Veldhuizen - * - * This code was relicensed under the modified BSD license for use in SciPy - * by Todd Veldhuizen (see LICENSE.txt in the weave directory). - * - * - * Suggestions: blitz-dev@oonumerics.org - * Bugs: blitz-bugs@oonumerics.org - * - * For more information, please see the Blitz++ Home Page: - * http://oonumerics.org/blitz/ - * - ***************************************************************************/ - -#ifndef BZ_APPLICS_H -#define BZ_APPLICS_H - -#ifndef BZ_BLITZ_H - #include -#endif - -#ifndef BZ_PROMOTE_H - #include -#endif - -#ifndef BZ_NUMTRAIT_H - #include -#endif - -BZ_NAMESPACE(blitz) - -// These base classes are included for no other reason than to keep -// the applicative templates clustered together in a graphical -// class browser. -class ApplicativeTemplatesBase { }; -class TwoOperandApplicativeTemplatesBase : public ApplicativeTemplatesBase { }; -class OneOperandApplicativeTemplatesBase : public ApplicativeTemplatesBase { }; - -template -class _bz_Add : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x + y; } -}; - -template -class _bz_Subtract : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x - y; } -}; - -template -class _bz_Multiply : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x * y; } -}; - -template -class _bz_Divide : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x / y; } -}; - -template -class _bz_Mod : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x % y; } -}; - -template -class _bz_BitwiseXOR : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x ^ y; } -}; - -template -class _bz_BitwiseAnd : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x & y; } -}; - -template -class _bz_BitwiseOr : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x | y; } -}; - -template -class _bz_ShiftRight : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x >> y; } -}; - -template -class _bz_ShiftLeft : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x << y; } -}; - - -template -class _bz_Min : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return (x < y ? x : y); } -}; - -template -class _bz_Max : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef BZ_PROMOTE(T_numtype1,T_numtype2) T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return (x > y ? x : y); } -}; - -template -class _bz_Greater : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x > y; } -}; - -template -class _bz_Less : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x < y; } -}; - -template -class _bz_GreaterOrEqual : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x >= y; } -}; - -template -class _bz_LessOrEqual : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x <= y; } -}; - -template -class _bz_Equal : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x == y; } -}; - -template -class _bz_NotEqual : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x != y; } -}; - -template -class _bz_LogicalAnd : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x && y; } -}; - -template -class _bz_LogicalOr : public TwoOperandApplicativeTemplatesBase { -public: - typedef P_numtype1 T_numtype1; - typedef P_numtype2 T_numtype2; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline T_promote apply(P_numtype1 x, P_numtype2 y) - { return x || y; } -}; - - -template -class _bz_Cast : public OneOperandApplicativeTemplatesBase { -public: - typedef P_numtype_in T_numtype1; - typedef P_numtype_out T_promote; - typedef T_promote T_numtype; - - static inline P_numtype_out apply(P_numtype_in x) - { return P_numtype_out(x); } -}; - -template -class _bz_LogicalNot : public OneOperandApplicativeTemplatesBase { -public: - typedef P_numtype T_numtype1; - typedef bool T_promote; - typedef T_promote T_numtype; - - static inline P_numtype apply(P_numtype x) - { return !x; } -}; - -template -class _bz_BitwiseNot : public OneOperandApplicativeTemplatesBase { -public: - typedef P_numtype T_numtype1; - typedef T_numtype1 T_promote; - typedef T_promote T_numtype; - - static inline P_numtype apply(P_numtype x) - { return ~x; } -}; - - - -/***************************************************************************** - * Math Functions - *****************************************************************************/ - -// Applicative templates for these functions are defined in -// , which is included below: -// -// abs(i), labs(l) Absolute value -// acos(d), acols(ld) Inverse cosine -// acosh(d) Inverse hyperbolic cosine -// asin(d), asinl(ld) Inverse sine -// asinh(d) Inverse hyperbolic sine -// atan(d), atanl(ld) Inverse tangent -// atan2(d,d), atan2l(ld,ld) Inverse tangent -// atanh(d) Inverse hyperbolic tangent -// cbrt(x) Cube root -// ceil(d), ceill(ld) Smallest f-int not less than x -// int class(d) Classification of x (FP_XXXXX) -// cos(d), cosl(ld) Cosine -// cosh(d), coshl(ld) Hyperbolic cosine -// copysign(d,d) Return 1st arg with same sign as 2nd -// drem(x,x) IEEE remainder -// exp(d), expl(ld) Exponential -// expm1(d) Exp(x)-1 -// erf(d), erfl(ld) Error function -// erfc(d), erfcl(ld) Complementary error function -// fabs(d), fabsl(ld) Floating point absolute value -// int finite(d) Nonzero if finite -// floor(d), floor(ld) Largest f-int not greater than x -// fmod(d,d), fmodl(ld,ld) Floating point remainder -// frexp(d, int* e) Break into mantissa/exponent (*) -// frexpl(ld, int* e) Break into mantissa/exponent (*) -// gammaFunc(d) Gamma function (** needs special -// implementation using lgamma) -// hypot(d,d) Hypotenuse: sqrt(x*x+y*y) -// int ilogb(d) Integer unbiased exponent -// int isnan(d) Nonzero if NaNS or NaNQ -// int itrunc(d) Truncate and convert to integer -// j0(d) Bessel function first kind, order 0 -// j1(d) Bessel function first kind, order 1 -// jn(int, double) Bessel function first kind, order i -// ldexp(d,i), ldexpl(ld,i) Compute d * 2^i -// lgamma(d), lgammald(ld) Log absolute gamma -// log(d), logl(ld) Natural logarithm -// logb(d) Unbiased exponent (IEEE) -// log1p(d) Compute log(1 + x) -// log10(d), log10l(ld) Logarithm base 10 -// modf(d, int* i), modfl(ld, int* i) Break into integral/fractional part -// double nearest(double) Nearest floating point integer -// nextafter(d, d) Next representable neighbor of 1st -// in direction of 2nd -// pow(d,d), pow(ld,ld) Computes x ^ y -// d remainder(d,d) IEEE remainder -// d rint(d) Round to f-integer (depends on mode) -// d rsqrt(d) Reciprocal square root -// d scalb(d,d) Return x * (2^y) -// sin(d), sinl(ld) Sine -// sinh(d), sinhl(ld) Hyperbolic sine -// sqr(x) Return x * x -// sqrt(d), sqrtl(ld) Square root -// tan(d), tanl(ld) Tangent -// tanh(d), tanhl(ld) Hyperbolic tangent -// trunc(d) Nearest f-int in the direction of 0 -// unsigned uitrunc(d) Truncate and convert to unsigned -// int unordered(d,d) Nonzero if comparison is unordered -// y0(d) Bessel function 2nd kind, order 0 -// y1(d) Bessel function 2nd kind, order 1 -// yn(i,d) Bessel function 2nd kind, order d - - -BZ_NAMESPACE_END - -#ifndef BZ_MATHFUNC_H - #include -#endif - -#ifndef BZ_MATHF2_H - #include -#endif - -#endif // BZ_APPLICS_H diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/array-impl.h b/scipy-0.10.1/scipy/weave/blitz/blitz/array-impl.h deleted file mode 100644 index 223b86921f..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/array-impl.h +++ /dev/null @@ -1,2521 +0,0 @@ -// -*- C++ -*- -/*************************************************************************** - * blitz/array-impl.h Definition of the Array class - * - * $Id: array-impl.h 1414 2005-11-01 22:04:59Z cookedm $ - * - * Copyright (C) 1997-2001 Todd Veldhuizen - * - * This code was relicensed under the modified BSD license for use in SciPy - * by Todd Veldhuizen (see LICENSE.txt in the weave directory). - * - * - * Suggestions: blitz-dev@oonumerics.org - * Bugs: blitz-bugs@oonumerics.org - * - * For more information, please see the Blitz++ Home Page: - * http://oonumerics.org/blitz/ - * - ***************************************************************************/ - -/* - * Wish list for array classes. - * - Arrays whose dimensions are unknown at compile time. - * - where()/elsewhere()/elsewhere() as in Dan Quinlan's implementation - * - block reduction operations - * - conversion to/from matrix & vector - * - apply(T func(T)) - * - apply(T func(const T&)) - * - apply - */ - -#ifndef BZ_ARRAY_H -#define BZ_ARRAY_H - -#include -#include -#include -#include - -#ifdef BZ_ARRAY_SPACE_FILLING_TRAVERSAL -#include -#endif - -#include -#include - -#include // Subarrays and slicing -#include // Tensor index notation -#include // Multicomponent arrays -#include // RectDomain class -#include // GeneralArrayStorage - - -BZ_NAMESPACE(blitz) - -/* - * Forward declarations - */ - -template -class ArrayIterator; - -template -class ConstArrayIterator; - -template -class FastArrayIterator; - -template -class _bz_ArrayExpr; - -template -class IndirectArray; - -template -void swap(Array&,Array&); - -template -void find(Array,1>&,const Array&); - -/* - * Declaration of class Array - */ - -// NEEDS_WORK: Array should inherit protected from MemoryBlockReference. -// To make this work, need to expose MemoryBlockReference::numReferences() -// and make Array a friend of Array for slicing. - -template -class Array : public MemoryBlockReference -#ifdef BZ_NEW_EXPRESSION_TEMPLATES - , public ETBase > -#endif -{ - -private: - typedef MemoryBlockReference T_base; - using T_base::data_; - using T_base::changeToNullBlock; - using T_base::numReferences; - -public: - ////////////////////////////////////////////// - // Public Types - ////////////////////////////////////////////// - - /* - * T_numtype is the numeric type stored in the array. - * T_index is a vector type which can be used to access elements - * of many-dimensional arrays. - * T_array is the array type itself -- Array - * T_iterator is a a fast iterator for the array, used for expression - * templates - * iterator is a STL-style iterator - * const_iterator is an STL-style const iterator - */ - - typedef P_numtype T_numtype; - typedef TinyVector T_index; - typedef Array T_array; - typedef FastArrayIterator T_iterator; - - typedef ArrayIterator iterator; - typedef ConstArrayIterator const_iterator; - - static const int _bz_rank = N_rank; - - ////////////////////////////////////////////// - // Constructors // - ////////////////////////////////////////////// - - - /* - * Construct an array from an array expression. - */ - - template - explicit Array(_bz_ArrayExpr expr); - - /* - * Any missing length arguments will have their value taken from the - * last argument. For example, - * Array A(32,64); - * will create a 32x64x64 array. This is handled by setupStorage(). - */ - - Array(GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - length_ = 0; - stride_ = 0; - zeroOffset_ = 0; - } - - explicit Array(int length0, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - length_[0] = length0; - setupStorage(0); - } - - Array(int length0, int length1, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 2); - TAU_TYPE_STRING(p1, "Array::Array() [T=" - + CT(T_numtype) + ",N=" + CT(N_rank) + "]"); - TAU_PROFILE(p1, "void (int,int)", TAU_BLITZ); - - length_[0] = length0; - length_[1] = length1; - setupStorage(1); - } - - Array(int length0, int length1, int length2, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 3); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - setupStorage(2); - } - - Array(int length0, int length1, int length2, int length3, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 4); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - length_[3] = length3; - setupStorage(3); - } - - Array(int length0, int length1, int length2, int length3, int length4, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 5); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - length_[3] = length3; - length_[4] = length4; - setupStorage(4); - } - - Array(int length0, int length1, int length2, int length3, int length4, - int length5, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 6); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - length_[3] = length3; - length_[4] = length4; - length_[5] = length5; - setupStorage(5); - } - - Array(int length0, int length1, int length2, int length3, int length4, - int length5, int length6, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 7); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - length_[3] = length3; - length_[4] = length4; - length_[5] = length5; - length_[6] = length6; - setupStorage(6); - } - - Array(int length0, int length1, int length2, int length3, int length4, - int length5, int length6, int length7, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 8); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - length_[3] = length3; - length_[4] = length4; - length_[5] = length5; - length_[6] = length6; - length_[7] = length7; - setupStorage(7); - } - - Array(int length0, int length1, int length2, int length3, int length4, - int length5, int length6, int length7, int length8, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 9); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - length_[3] = length3; - length_[4] = length4; - length_[5] = length5; - length_[6] = length6; - length_[7] = length7; - length_[8] = length8; - setupStorage(8); - } - - Array(int length0, int length1, int length2, int length3, int length4, - int length5, int length6, int length7, int length8, int length9, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 10); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - length_[3] = length3; - length_[4] = length4; - length_[5] = length5; - length_[6] = length6; - length_[7] = length7; - length_[8] = length8; - length_[9] = length9; - setupStorage(9); - } - - Array(int length0, int length1, int length2, int length3, int length4, - int length5, int length6, int length7, int length8, int length9, - int length10, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(N_rank >= 11); - length_[0] = length0; - length_[1] = length1; - length_[2] = length2; - length_[3] = length3; - length_[4] = length4; - length_[5] = length5; - length_[6] = length6; - length_[7] = length7; - length_[8] = length8; - length_[9] = length9; - length_[10] = length10; - setupStorage(10); - } - - /* - * Construct an array from an existing block of memory. Ownership - * is not acquired (this is provided for backwards compatibility). - */ - Array(T_numtype* restrict dataFirst, TinyVector shape, - GeneralArrayStorage storage = GeneralArrayStorage()) - : MemoryBlockReference(product(shape), dataFirst, - neverDeleteData), - storage_(storage) - { - BZPRECONDITION(dataFirst != 0); - - length_ = shape; - computeStrides(); - data_ += zeroOffset_; - } - - /* - * Construct an array from an existing block of memory, with a - * given set of strides. Ownership is not acquired (i.e. the memory - * block will not be freed by Blitz++). - */ - Array(T_numtype* restrict dataFirst, TinyVector shape, - TinyVector stride, - GeneralArrayStorage storage = GeneralArrayStorage()) - : MemoryBlockReference(product(shape), dataFirst, - neverDeleteData), - storage_(storage) - { - BZPRECONDITION(dataFirst != 0); - - length_ = shape; - stride_ = stride; - calculateZeroOffset(); - data_ += zeroOffset_; - } - - /* - * Construct an array from an existing block of memory. - */ - Array(T_numtype* restrict dataFirst, TinyVector shape, - preexistingMemoryPolicy deletionPolicy, - GeneralArrayStorage storage = GeneralArrayStorage()) - : MemoryBlockReference(product(shape), dataFirst, - deletionPolicy), - storage_(storage) - { - BZPRECONDITION(dataFirst != 0); - - length_ = shape; - computeStrides(); - data_ += zeroOffset_; - - if (deletionPolicy == duplicateData) - reference(copy()); - } - - /* - * Construct an array from an existing block of memory, with a - * given set of strides. - */ - Array(T_numtype* restrict dataFirst, TinyVector shape, - TinyVector stride, - preexistingMemoryPolicy deletionPolicy, - GeneralArrayStorage storage = GeneralArrayStorage()) - : MemoryBlockReference(product(shape), dataFirst, - deletionPolicy), - storage_(storage) - { - BZPRECONDITION(dataFirst != 0); - - length_ = shape; - stride_ = stride; - calculateZeroOffset(); - data_ += zeroOffset_; - - if (deletionPolicy == duplicateData) - reference(copy()); - } - - /* - * This constructor takes an extent (length) vector and storage format. - */ - - Array(const TinyVector& extent, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - length_ = extent; - setupStorage(N_rank - 1); - } - - /* - * This construct takes a vector of bases (lbounds) and a vector of - * extents. - */ - - Array(const TinyVector& lbounds, - const TinyVector& extent, - const GeneralArrayStorage& storage - = GeneralArrayStorage()); - - /* - * These constructors allow arbitrary bases (starting indices) to be set. - * e.g. Array A(Range(10,20), Range(20,30)) - * will create an 11x11 array whose indices are 10..20 and 20..30 - */ - Array(Range r0, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - setupStorage(0); - } - - Array(Range r0, Range r1, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - - setupStorage(1); - } - - Array(Range r0, Range r1, Range r2, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - - setupStorage(2); - } - - Array(Range r0, Range r1, Range r2, Range r3, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous() - && r3.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - length_[3] = r3.length(); - storage_.setBase(3, r3.first()); - - setupStorage(3); - } - - Array(Range r0, Range r1, Range r2, Range r3, Range r4, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous() - && r3.isAscendingContiguous() && r4.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - length_[3] = r3.length(); - storage_.setBase(3, r3.first()); - length_[4] = r4.length(); - storage_.setBase(4, r4.first()); - - setupStorage(4); - } - - Array(Range r0, Range r1, Range r2, Range r3, Range r4, Range r5, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous() - && r3.isAscendingContiguous() && r4.isAscendingContiguous() - && r5.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - length_[3] = r3.length(); - storage_.setBase(3, r3.first()); - length_[4] = r4.length(); - storage_.setBase(4, r4.first()); - length_[5] = r5.length(); - storage_.setBase(5, r5.first()); - - setupStorage(5); - } - - Array(Range r0, Range r1, Range r2, Range r3, Range r4, Range r5, - Range r6, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous() - && r3.isAscendingContiguous() && r4.isAscendingContiguous() - && r5.isAscendingContiguous() && r6.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - length_[3] = r3.length(); - storage_.setBase(3, r3.first()); - length_[4] = r4.length(); - storage_.setBase(4, r4.first()); - length_[5] = r5.length(); - storage_.setBase(5, r5.first()); - length_[6] = r6.length(); - storage_.setBase(6, r6.first()); - - setupStorage(6); - } - - Array(Range r0, Range r1, Range r2, Range r3, Range r4, Range r5, - Range r6, Range r7, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous() - && r3.isAscendingContiguous() && r4.isAscendingContiguous() - && r5.isAscendingContiguous() && r6.isAscendingContiguous() - && r7.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - length_[3] = r3.length(); - storage_.setBase(3, r3.first()); - length_[4] = r4.length(); - storage_.setBase(4, r4.first()); - length_[5] = r5.length(); - storage_.setBase(5, r5.first()); - length_[6] = r6.length(); - storage_.setBase(6, r6.first()); - length_[7] = r7.length(); - storage_.setBase(7, r7.first()); - - setupStorage(7); - } - - Array(Range r0, Range r1, Range r2, Range r3, Range r4, Range r5, - Range r6, Range r7, Range r8, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous() - && r3.isAscendingContiguous() && r4.isAscendingContiguous() - && r5.isAscendingContiguous() && r6.isAscendingContiguous() - && r7.isAscendingContiguous() && r8.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - length_[3] = r3.length(); - storage_.setBase(3, r3.first()); - length_[4] = r4.length(); - storage_.setBase(4, r4.first()); - length_[5] = r5.length(); - storage_.setBase(5, r5.first()); - length_[6] = r6.length(); - storage_.setBase(6, r6.first()); - length_[7] = r7.length(); - storage_.setBase(7, r7.first()); - length_[8] = r8.length(); - storage_.setBase(8, r8.first()); - - setupStorage(8); - } - - Array(Range r0, Range r1, Range r2, Range r3, Range r4, Range r5, - Range r6, Range r7, Range r8, Range r9, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous() - && r3.isAscendingContiguous() && r4.isAscendingContiguous() - && r5.isAscendingContiguous() && r6.isAscendingContiguous() - && r7.isAscendingContiguous() && r8.isAscendingContiguous() - && r9.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - length_[3] = r3.length(); - storage_.setBase(3, r3.first()); - length_[4] = r4.length(); - storage_.setBase(4, r4.first()); - length_[5] = r5.length(); - storage_.setBase(5, r5.first()); - length_[6] = r6.length(); - storage_.setBase(6, r6.first()); - length_[7] = r7.length(); - storage_.setBase(7, r7.first()); - length_[8] = r8.length(); - storage_.setBase(8, r8.first()); - length_[9] = r9.length(); - storage_.setBase(9, r9.first()); - - setupStorage(9); - } - - Array(Range r0, Range r1, Range r2, Range r3, Range r4, Range r5, - Range r6, Range r7, Range r8, Range r9, Range r10, - GeneralArrayStorage storage = GeneralArrayStorage()) - : storage_(storage) - { - BZPRECONDITION(r0.isAscendingContiguous() && - r1.isAscendingContiguous() && r2.isAscendingContiguous() - && r3.isAscendingContiguous() && r4.isAscendingContiguous() - && r5.isAscendingContiguous() && r6.isAscendingContiguous() - && r7.isAscendingContiguous() && r8.isAscendingContiguous() - && r9.isAscendingContiguous() && r10.isAscendingContiguous()); - - length_[0] = r0.length(); - storage_.setBase(0, r0.first()); - length_[1] = r1.length(); - storage_.setBase(1, r1.first()); - length_[2] = r2.length(); - storage_.setBase(2, r2.first()); - length_[3] = r3.length(); - storage_.setBase(3, r3.first()); - length_[4] = r4.length(); - storage_.setBase(4, r4.first()); - length_[5] = r5.length(); - storage_.setBase(5, r5.first()); - length_[6] = r6.length(); - storage_.setBase(6, r6.first()); - length_[7] = r7.length(); - storage_.setBase(7, r7.first()); - length_[8] = r8.length(); - storage_.setBase(8, r8.first()); - length_[9] = r9.length(); - storage_.setBase(9, r9.first()); - length_[10] = r10.length(); - storage_.setBase(10, r10.first()); - - setupStorage(10); - } - - /* - * Create a reference of another array - */ - Array(const Array& array) -#ifdef BZ_NEW_EXPRESSION_TEMPLATES - : MemoryBlockReference(), - ETBase< Array >(array) -#else - : MemoryBlockReference() -#endif - { - // NEEDS_WORK: this const_cast is a tad ugly. - reference(const_cast(array)); - } - - /* - * These constructors are used for creating interlaced arrays (see - * - */ - Array(const TinyVector& shape, - int lastExtent, const GeneralArrayStorage& storage); - //Array(const TinyVector& shape, - // int lastExtent, const GeneralArrayStorage& storage); - - /* - * These constructors make the array a view of a subportion of another - * array. If there fewer than N_rank Range arguments provided, no - * slicing is performed in the unspecified ranks. - * e.g. Array A(20,20,20); - * Array B(A, Range(5,15)); - * is equivalent to: - * Array B(A, Range(5,15), Range::all(), Range::all()); - */ - Array(Array& array, Range r0) - { - constructSubarray(array, r0); - } - - Array(Array& array, Range r0, Range r1) - { - constructSubarray(array, r0, r1); - } - - Array(Array& array, Range r0, Range r1, Range r2) - { - constructSubarray(array, r0, r1, r2); - } - - Array(Array& array, Range r0, Range r1, Range r2, - Range r3) - { - constructSubarray(array, r0, r1, r2, r3); - } - - Array(Array& array, Range r0, Range r1, Range r2, - Range r3, Range r4) - { - constructSubarray(array, r0, r1, r2, r3, r4); - } - - Array(Array& array, Range r0, Range r1, Range r2, - Range r3, Range r4, Range r5) - { - constructSubarray(array, r0, r1, r2, r3, r4, r5); - } - - Array(Array& array, Range r0, Range r1, Range r2, - Range r3, Range r4, Range r5, Range r6) - { - constructSubarray(array, r0, r1, r2, r3, r4, r5, r6); - } - - Array(Array& array, Range r0, Range r1, Range r2, - Range r3, Range r4, Range r5, Range r6, Range r7) - { - constructSubarray(array, r0, r1, r2, r3, r4, r5, r6, r7); - } - - Array(Array& array, Range r0, Range r1, Range r2, - Range r3, Range r4, Range r5, Range r6, Range r7, Range r8) - { - constructSubarray(array, r0, r1, r2, r3, r4, r5, r6, r7, r8); - } - - Array(Array& array, Range r0, Range r1, Range r2, - Range r3, Range r4, Range r5, Range r6, Range r7, Range r8, Range r9) - { - constructSubarray(array, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9); - } - - Array(Array& array, Range r0, Range r1, Range r2, - Range r3, Range r4, Range r5, Range r6, Range r7, Range r8, Range r9, - Range r10) - { - constructSubarray(array, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10); - } - - Array(Array& array, - const RectDomain& subdomain) - { - constructSubarray(array, subdomain); - } - - /* Constructor added by Julian Cummings */ - Array(Array& array, - const StridedDomain& subdomain) - { - constructSubarray(array, subdomain); - } - - /* - * This constructor is invoked by the operator()'s which take - * a combination of integer and Range arguments. It's not intended - * for end-user use. - */ - template - Array(Array& array, R0 r0, R1 r1, R2 r2, - R3 r3, R4 r4, R5 r5, R6 r6, R7 r7, R8 r8, R9 r9, R10 r10) - { - constructSlice(array, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10); - } - - ////////////////////////////////////////////// - // Member functions - ////////////////////////////////////////////// - - const TinyVector& base() const - { return storage_.base(); } - - int base(int rank) const - { return storage_.base(rank); } - - iterator begin() - { return iterator(*this); } - - const_iterator begin() const - { return const_iterator(*this); } - - T_iterator beginFast() const - { return T_iterator(*this); } - - // Deprecated: now extractComponent(...) - template - Array chopComponent(P_numtype2 a, int compNum, - int numComponents) const - { return extractComponent(a, compNum, numComponents); } - - int cols() const - { return length_[1]; } - - int columns() const - { return length_[1]; } - - T_array copy() const; - - // data_ always refers to the point (0,0,...,0) which may - // not be in the array if the base is not zero in each rank. - // These data() routines return a pointer to the first - // element in the array (but note that it may not be - // stored first in memory if some ranks are stored descending). - - int dataOffset() const - { - return dot(storage_.base(), stride_); - } - - const T_numtype* restrict data() const - { return data_ + dataOffset(); } - - T_numtype* restrict data() - { return data_ + dataOffset(); } - - // These dataZero() routines refer to the point (0,0,...,0) - // which may not be in the array if the bases are nonzero. - - const T_numtype* restrict dataZero() const - { return data_; } - - T_numtype* restrict dataZero() - { return data_; } - - // These dataFirst() routines refer to the element in the - // array which falls first in memory. - - int dataFirstOffset() const - { - int pos = 0; - - // Used to use tinyvector expressions: - // return data_ + dot(storage_.base() - // + (1 - storage_.ascendingFlag()) * (length_ - 1), stride_); - - for (int i=0; i < N_rank; ++i) - pos += (storage_.base(i) + (1-storage_.isRankStoredAscending(i)) * - (length_(i)-1)) * stride_(i); - - return pos; - } - - const T_numtype* restrict dataFirst() const - { - return data_ + dataFirstOffset(); - } - - T_numtype* restrict dataFirst() - { - return data_ + dataFirstOffset(); - } - - int depth() const - { return length_[2]; } - - int dimensions() const - { return N_rank; } - - RectDomain domain() const - { - return RectDomain(lbound(), ubound()); - } - - void dumpStructureInformation(ostream& os = cout) const; - - iterator end() - { - return iterator(); - } - - const_iterator end() const - { - return const_iterator(); - } - - int extent(int rank) const - { return length_[rank]; } - - const TinyVector& extent() const - { return length_; } - - template - Array extractComponent(P_numtype2, int compNum, - int numComponents) const; - - void free() - { - changeToNullBlock(); - length_ = 0; - } - - bool isMajorRank(int rank) const { return storage_.ordering(rank) == 0; } - bool isMinorRank(int rank) const { return storage_.ordering(rank) != 0; } - bool isRankStoredAscending(int rank) const { - return storage_.isRankStoredAscending(rank); - } - - bool isStorageContiguous() const; - - int lbound(int rank) const { return base(rank); } - TinyVector lbound() const { return base(); } - - int length(int rank) const { return length_[rank]; } - const TinyVector& length() const { return length_; } - - void makeUnique(); - - int numElements() const { return product(length_); } - - // NEEDS_WORK -- Expose the numReferences() method - // MemoryBlockReference::numReferences; - - // The storage_.ordering_ array is a list of dimensions from - // the most minor (stride 1) to major dimension. Generally, - // ordering(0) will return the dimension which has the smallest - // stride, and ordering(N_rank-1) will return the dimension with - // the largest stride. - int ordering(int storageRankIndex) const - { return storage_.ordering(storageRankIndex); } - - const TinyVector& ordering() const - { return storage_.ordering(); } - - void transposeSelf(int r0, int r1, int r2=0, - int r3=0, int r4=0, int r5=0, int r6=0, int r7=0, int r8=0, int - r9=0, int r10=0); - T_array transpose(int r0, int r1, int r2=0, - int r3=0, int r4=0, int r5=0, int r6=0, int r7=0, int r8=0, int - r9=0, int r10=0); - - int rank() const - { return N_rank; } - - void reference(const T_array&); - - // Added by Derrick Bass - T_array reindex(const TinyVector&); - void reindexSelf(const - TinyVector&); - - void resize(int extent); - void resize(int extent1, int extent2); - void resize(int extent1, int extent2, - int extent3); - void resize(int extent1, int extent2, - int extent3, int extent4); - void resize(int extent1, int extent2, - int extent3, int extent4, int extent5); - void resize(int extent1, int extent2, - int extent3, int extent4, int extent5, - int extent6); - void resize(int extent1, int extent2, - int extent3, int extent4, int extent5, - int extent6, int extent7); - void resize(int extent1, int extent2, - int extent3, int extent4, int extent5, - int extent6, int extent7, int extent8); - void resize(int extent1, int extent2, - int extent3, int extent4, int extent5, - int extent6, int extent7, int extent8, - int extent9); - void resize(int extent1, int extent2, - int extent3, int extent4, int extent5, - int extent6, int extent7, int extent8, - int extent9, int extent10); - void resize(int extent1, int extent2, - int extent3, int extent4, int extent5, - int extent6, int extent7, int extent8, - int extent9, int extent10, - int extent11); - - - void resize(Range r1); - void resize(Range r1, Range r2); - void resize(Range r1, Range r2, Range r3); - void resize(Range r1, Range r2, Range r3, - Range r4); - void resize(Range r1, Range r2, Range r3, - Range r4, Range r5); - void resize(Range r1, Range r2, Range r3, - Range r4, Range r5, Range r6); - void resize(Range r1, Range r2, Range r3, - Range r4, Range r5, Range r6, - Range r7); - void resize(Range r1, Range r2, Range r3, - Range r4, Range r5, Range r6, - Range r7, Range r8); - void resize(Range r1, Range r2, Range r3, - Range r4, Range r5, Range r6, - Range r7, Range r8, Range r9); - void resize(Range r1, Range r2, Range r3, - Range r4, Range r5, Range r6, - Range r7, Range r8, Range r9, - Range r10); - void resize(Range r1, Range r2, Range r3, - Range r4, Range r5, Range r6, - Range r7, Range r8, Range r9, - Range r10, Range r11); - - void resize(const TinyVector&); - - - void resizeAndPreserve(const TinyVector&); - void resizeAndPreserve(int extent); - void resizeAndPreserve(int extent1, - int extent2); - void resizeAndPreserve(int extent1, - int extent2, int extent3); - void resizeAndPreserve(int extent1, - int extent2, int extent3, int extent4); - void resizeAndPreserve(int extent1, - int extent2, int extent3, int extent4, - int extent5); - void resizeAndPreserve(int extent1, - int extent2, int extent3, int extent4, - int extent5, int extent6); - void resizeAndPreserve(int extent1, - int extent2, int extent3, int extent4, - int extent5, int extent6, int extent7); - void resizeAndPreserve(int extent1, - int extent2, int extent3, int extent4, - int extent5, int extent6, int extent7, - int extent8); - void resizeAndPreserve(int extent1, - int extent2, int extent3, int extent4, - int extent5, int extent6, int extent7, - int extent8, int extent9); - void resizeAndPreserve(int extent1, - int extent2, int extent3, int extent4, - int extent5, int extent6, int extent7, - int extent8, int extent9, - int extent10); - void resizeAndPreserve(int extent1, - int extent2, int extent3, int extent4, - int extent5, int extent6, int extent7, - int extent8, int extent9, int extent10, - int extent11); - - // NEEDS_WORK -- resizeAndPreserve(Range,...) - // NEEDS_WORK -- resizeAndPreserve(const Domain&); - - T_array reverse(int rank); - void reverseSelf(int rank); - - int rows() const - { return length_[0]; } - - void setStorage(GeneralArrayStorage); - - void slice(int rank, Range r); - - const TinyVector& shape() const - { return length_; } - - int size() const - { return numElements(); } - - const TinyVector& stride() const - { return stride_; } - - int stride(int rank) const - { return stride_[rank]; } - - int ubound(int rank) const - { return base(rank) + length_(rank) - 1; } - - TinyVector ubound() const - { - TinyVector ub; - for (int i=0; i < N_rank; ++i) - ub(i) = base(i) + extent(i) - 1; - // WAS: ub = base() + extent() - 1; - return ub; - } - - int zeroOffset() const - { return zeroOffset_; } - - ////////////////////////////////////////////// - // Debugging routines - ////////////////////////////////////////////// - - bool isInRangeForDim(int i, int d) const { - return i >= base(d) && (i - base(d)) < length_[d]; - } - - bool isInRange(int i0) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0]; - } - - bool isInRange(int i0, int i1) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1]; - } - - bool isInRange(int i0, int i1, int i2) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2]; - } - - bool isInRange(int i0, int i1, int i2, int i3) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2] - && i3 >= base(3) && (i3 - base(3)) < length_[3]; - } - - bool isInRange(int i0, int i1, int i2, int i3, int i4) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2] - && i3 >= base(3) && (i3 - base(3)) < length_[3] - && i4 >= base(4) && (i4 - base(4)) < length_[4]; - } - - bool isInRange(int i0, int i1, int i2, int i3, int i4, int i5) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2] - && i3 >= base(3) && (i3 - base(3)) < length_[3] - && i4 >= base(4) && (i4 - base(4)) < length_[4] - && i5 >= base(5) && (i5 - base(5)) < length_[5]; - } - - bool isInRange(int i0, int i1, int i2, int i3, int i4, int i5, int i6) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2] - && i3 >= base(3) && (i3 - base(3)) < length_[3] - && i4 >= base(4) && (i4 - base(4)) < length_[4] - && i5 >= base(5) && (i5 - base(5)) < length_[5] - && i6 >= base(6) && (i6 - base(6)) < length_[6]; - } - - bool isInRange(int i0, int i1, int i2, int i3, int i4, - int i5, int i6, int i7) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2] - && i3 >= base(3) && (i3 - base(3)) < length_[3] - && i4 >= base(4) && (i4 - base(4)) < length_[4] - && i5 >= base(5) && (i5 - base(5)) < length_[5] - && i6 >= base(6) && (i6 - base(6)) < length_[6] - && i7 >= base(7) && (i7 - base(7)) < length_[7]; - } - - bool isInRange(int i0, int i1, int i2, int i3, int i4, - int i5, int i6, int i7, int i8) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2] - && i3 >= base(3) && (i3 - base(3)) < length_[3] - && i4 >= base(4) && (i4 - base(4)) < length_[4] - && i5 >= base(5) && (i5 - base(5)) < length_[5] - && i6 >= base(6) && (i6 - base(6)) < length_[6] - && i7 >= base(7) && (i7 - base(7)) < length_[7] - && i8 >= base(8) && (i8 - base(8)) < length_[8]; - } - - bool isInRange(int i0, int i1, int i2, int i3, int i4, - int i5, int i6, int i7, int i8, int i9) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2] - && i3 >= base(3) && (i3 - base(3)) < length_[3] - && i4 >= base(4) && (i4 - base(4)) < length_[4] - && i5 >= base(5) && (i5 - base(5)) < length_[5] - && i6 >= base(6) && (i6 - base(6)) < length_[6] - && i7 >= base(7) && (i7 - base(7)) < length_[7] - && i8 >= base(8) && (i8 - base(8)) < length_[8] - && i9 >= base(9) && (i9 - base(9)) < length_[9]; - } - - bool isInRange(int i0, int i1, int i2, int i3, int i4, - int i5, int i6, int i7, int i8, int i9, int i10) const { - return i0 >= base(0) && (i0 - base(0)) < length_[0] - && i1 >= base(1) && (i1 - base(1)) < length_[1] - && i2 >= base(2) && (i2 - base(2)) < length_[2] - && i3 >= base(3) && (i3 - base(3)) < length_[3] - && i4 >= base(4) && (i4 - base(4)) < length_[4] - && i5 >= base(5) && (i5 - base(5)) < length_[5] - && i6 >= base(6) && (i6 - base(6)) < length_[6] - && i7 >= base(7) && (i7 - base(7)) < length_[7] - && i8 >= base(8) && (i8 - base(8)) < length_[8] - && i9 >= base(9) && (i9 - base(9)) < length_[9] - && i10 >= base(10) && (i10 - base(10)) < length_[10]; - } - - bool isInRange(const T_index& index) const { - for (int i=0; i < N_rank; ++i) - if (index[i] < base(i) || (index[i] - base(i)) >= length_[i]) - return false; - - return true; - } - - bool assertInRange(const T_index& BZ_DEBUG_PARAM(index)) const { - BZPRECHECK(isInRange(index), "Array index out of range: " << index - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0)) const { - BZPRECHECK(isInRange(i0), "Array index out of range: " << i0 - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1)) const { - BZPRECHECK(isInRange(i0,i1), "Array index out of range: (" - << i0 << ", " << i1 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2)) const - { - BZPRECHECK(isInRange(i0,i1,i2), "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2), int BZ_DEBUG_PARAM(i3)) const - { - BZPRECHECK(isInRange(i0,i1,i2,i3), "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ", " << i3 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2), int BZ_DEBUG_PARAM(i3), - int BZ_DEBUG_PARAM(i4)) const - { - BZPRECHECK(isInRange(i0,i1,i2,i3,i4), "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ", " << i3 - << ", " << i4 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2), int BZ_DEBUG_PARAM(i3), int BZ_DEBUG_PARAM(i4), - int BZ_DEBUG_PARAM(i5)) const - { - BZPRECHECK(isInRange(i0,i1,i2,i3,i4,i5), "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ", " << i3 - << ", " << i4 << ", " << i5 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2), int BZ_DEBUG_PARAM(i3), int BZ_DEBUG_PARAM(i4), - int BZ_DEBUG_PARAM(i5), int BZ_DEBUG_PARAM(i6)) const - { - BZPRECHECK(isInRange(i0,i1,i2,i3,i4,i5,i6), - "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ", " << i3 - << ", " << i4 << ", " << i5 << ", " << i6 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2), int BZ_DEBUG_PARAM(i3), int BZ_DEBUG_PARAM(i4), - int BZ_DEBUG_PARAM(i5), int BZ_DEBUG_PARAM(i6), - int BZ_DEBUG_PARAM(i7)) const - { - BZPRECHECK(isInRange(i0,i1,i2,i3,i4,i5,i6,i7), - "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ", " << i3 - << ", " << i4 << ", " << i5 << ", " << i6 << ", " << i7 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2), int BZ_DEBUG_PARAM(i3), int BZ_DEBUG_PARAM(i4), - int BZ_DEBUG_PARAM(i5), int BZ_DEBUG_PARAM(i6), int BZ_DEBUG_PARAM(i7), - int BZ_DEBUG_PARAM(i8)) const - { - BZPRECHECK(isInRange(i0,i1,i2,i3,i4,i5,i6,i7,i8), - "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ", " << i3 - << ", " << i4 << ", " << i5 << ", " << i6 << ", " << i7 - << ", " << i8 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2), int BZ_DEBUG_PARAM(i3), int BZ_DEBUG_PARAM(i4), - int BZ_DEBUG_PARAM(i5), int BZ_DEBUG_PARAM(i6), int BZ_DEBUG_PARAM(i7), - int BZ_DEBUG_PARAM(i8), int BZ_DEBUG_PARAM(i9)) const - { - BZPRECHECK(isInRange(i0,i1,i2,i3,i4,i5,i6,i7,i8,i9), - "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ", " << i3 - << ", " << i4 << ", " << i5 << ", " << i6 << ", " << i7 - << ", " << i8 << ", " << i9 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - bool assertInRange(int BZ_DEBUG_PARAM(i0), int BZ_DEBUG_PARAM(i1), - int BZ_DEBUG_PARAM(i2), int BZ_DEBUG_PARAM(i3), int BZ_DEBUG_PARAM(i4), - int BZ_DEBUG_PARAM(i5), int BZ_DEBUG_PARAM(i6), int BZ_DEBUG_PARAM(i7), - int BZ_DEBUG_PARAM(i8), int BZ_DEBUG_PARAM(i9), - int BZ_DEBUG_PARAM(i10)) const - { - BZPRECHECK(isInRange(i0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10), - "Array index out of range: (" - << i0 << ", " << i1 << ", " << i2 << ", " << i3 - << ", " << i4 << ", " << i5 << ", " << i6 << ", " << i7 - << ", " << i8 << ", " << i9 << ", " << i10 << ")" - << endl << "Lower bounds: " << storage_.base() << endl - << "Length: " << length_ << endl); - return true; - } - - ////////////////////////////////////////////// - // Subscripting operators - ////////////////////////////////////////////// - - template - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index); - return data_[dot(index, stride_)]; - } - - template - T_numtype& restrict operator()(const TinyVector& index) - { - assertInRange(index); - return data_[dot(index, stride_)]; - } - - const T_numtype& restrict operator()(TinyVector index) const - { - assertInRange(index[0]); - return data_[index[0] * stride_[0]]; - } - - T_numtype& operator()(TinyVector index) - { - assertInRange(index[0]); - return data_[index[0] * stride_[0]]; - } - - const T_numtype& restrict operator()(TinyVector index) const - { - assertInRange(index[0], index[1]); - return data_[index[0] * stride_[0] + index[1] * stride_[1]]; - } - - T_numtype& operator()(TinyVector index) - { - assertInRange(index[0], index[1]); - return data_[index[0] * stride_[0] + index[1] * stride_[1]]; - } - - const T_numtype& restrict operator()(TinyVector index) const - { - assertInRange(index[0], index[1], index[2]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2]]; - } - - T_numtype& operator()(TinyVector index) - { - assertInRange(index[0], index[1], index[2]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2]]; - } - - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index[0], index[1], index[2], index[3]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3]]; - } - - T_numtype& operator()(const TinyVector& index) - { - assertInRange(index[0], index[1], index[2], index[3]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3]]; - } - - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index[0], index[1], index[2], index[3], - index[4]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4]]; - } - - T_numtype& operator()(const TinyVector& index) - { - assertInRange(index[0], index[1], index[2], index[3], - index[4]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4]]; - } - - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5]]; - } - - T_numtype& operator()(const TinyVector& index) - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5]]; - } - - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6]]; - } - - T_numtype& operator()(const TinyVector& index) - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6]]; - } - - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6], index[7]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6] + index[7] * stride_[7]]; - } - - T_numtype& operator()(const TinyVector& index) - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6], index[7]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6] + index[7] * stride_[7]]; - } - - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6], index[7], index[8]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6] + index[7] * stride_[7] - + index[8] * stride_[8]]; - } - - T_numtype& operator()(const TinyVector& index) - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6], index[7], index[8]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6] + index[7] * stride_[7] - + index[8] * stride_[8]]; - } - - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6], index[7], index[8], index[9]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6] + index[7] * stride_[7] - + index[8] * stride_[8] + index[9] * stride_[9]]; - } - - T_numtype& operator()(const TinyVector& index) - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6], index[7], index[8], index[9]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6] + index[7] * stride_[7] - + index[8] * stride_[8] + index[9] * stride_[9]]; - } - - const T_numtype& restrict operator()(const TinyVector& index) const - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6], index[7], index[8], index[9], - index[10]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6] + index[7] * stride_[7] - + index[8] * stride_[8] + index[9] * stride_[9] - + index[10] * stride_[10]]; - } - - T_numtype& operator()(const TinyVector& index) - { - assertInRange(index[0], index[1], index[2], index[3], - index[4], index[5], index[6], index[7], index[8], index[9], - index[10]); - return data_[index[0] * stride_[0] + index[1] * stride_[1] - + index[2] * stride_[2] + index[3] * stride_[3] - + index[4] * stride_[4] + index[5] * stride_[5] - + index[6] * stride_[6] + index[7] * stride_[7] - + index[8] * stride_[8] + index[9] * stride_[9] - + index[10] * stride_[10]]; - } - - const T_numtype& restrict operator()(int i0) const - { - assertInRange(i0); - return data_[i0 * stride_[0]]; - } - - T_numtype& restrict operator()(int i0) - { - assertInRange(i0); - return data_[i0 * stride_[0]]; - } - - const T_numtype& restrict operator()(int i0, int i1) const - { - assertInRange(i0, i1); - return data_[i0 * stride_[0] + i1 * stride_[1]]; - } - - T_numtype& restrict operator()(int i0, int i1) - { - assertInRange(i0, i1); - return data_[i0 * stride_[0] + i1 * stride_[1]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2) const - { - assertInRange(i0, i1, i2); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2) - { - assertInRange(i0, i1, i2); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2, int i3) const - { - assertInRange(i0, i1, i2, i3); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2, int i3) - { - assertInRange(i0, i1, i2, i3); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4) const - { - assertInRange(i0, i1, i2, i3, i4); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4) - { - assertInRange(i0, i1, i2, i3, i4); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5) const - { - assertInRange(i0, i1, i2, i3, i4, i5); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5) - { - assertInRange(i0, i1, i2, i3, i4, i5); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6) const - { - assertInRange(i0, i1, i2, i3, i4, i5, i6); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6) - { - assertInRange(i0, i1, i2, i3, i4, i5, i6); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6, int i7) const - { - assertInRange(i0, i1, i2, i3, i4, i5, i6, i7); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6] + i7 * stride_[7]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6, int i7) - { - assertInRange(i0, i1, i2, i3, i4, i5, i6, i7); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6] + i7 * stride_[7]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6, int i7, int i8) const - { - assertInRange(i0, i1, i2, i3, i4, i5, i6, i7, i8); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6] + i7 * stride_[7] - + i8 * stride_[8]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6, int i7, int i8) - { - assertInRange(i0, i1, i2, i3, i4, i5, i6, i7, i8); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6] + i7 * stride_[7] - + i8 * stride_[8]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6, int i7, int i8, int i9) const - { - assertInRange(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6] + i7 * stride_[7] - + i8 * stride_[8] + i9 * stride_[9]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6, int i7, int i8, int i9) - { - assertInRange(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6] + i7 * stride_[7] - + i8 * stride_[8] + i9 * stride_[9]]; - } - - const T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6, int i7, int i8, int i9, int i10) const - { - assertInRange(i0, i1, i2, i3, i4, i5, i6, i7, i8, - i9, i10); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6] + i7 * stride_[7] - + i8 * stride_[8] + i9 * stride_[9] + i10 * stride_[10]]; - } - - T_numtype& restrict operator()(int i0, int i1, int i2, int i3, - int i4, int i5, int i6, int i7, int i8, int i9, int i10) - { - assertInRange(i0, i1, i2, i3, i4, i5, i6, i7, i8, - i9, i10); - return data_[i0 * stride_[0] + i1 * stride_[1] - + i2 * stride_[2] + i3 * stride_[3] + i4 * stride_[4] - + i5 * stride_[5] + i6 * stride_[6] + i7 * stride_[7] - + i8 * stride_[8] + i9 * stride_[9] + i10 * stride_[10]]; - } - - /* - * Slicing to produce subarrays. If the number of Range arguments is - * fewer than N_rank, then missing arguments are treated like Range::all(). - */ - - T_array& noConst() const - { return const_cast(*this); } - - T_array operator()(const RectDomain& subdomain) const - { - return T_array(noConst(), subdomain); - } - - /* Operator added by Julian Cummings */ - T_array operator()(const StridedDomain& subdomain) const - { - return T_array(noConst(), subdomain); - } - - T_array operator()(Range r0) const - { - return T_array(noConst(), r0); - } - - T_array operator()(Range r0, Range r1) const - { - return T_array(noConst(), r0, r1); - } - - T_array operator()(Range r0, Range r1, Range r2) const - { - return T_array(noConst(), r0, r1, r2); - } - - T_array operator()(Range r0, Range r1, Range r2, Range r3) const - { - return T_array(noConst(), r0, r1, r2, r3); - } - - T_array operator()(Range r0, Range r1, Range r2, Range r3, Range r4) const - { - return T_array(noConst(), r0, r1, r2, r3, r4); - } - - T_array operator()(Range r0, Range r1, Range r2, Range r3, Range r4, - Range r5) const - { - return T_array(noConst(), r0, r1, r2, r3, r4, r5); - } - - T_array operator()(Range r0, Range r1, Range r2, Range r3, Range r4, - Range r5, Range r6) const - { - return T_array(noConst(), r0, r1, r2, r3, r4, r5, r6); - } - - T_array operator()(Range r0, Range r1, Range r2, Range r3, Range r4, - Range r5, Range r6, Range r7) const - { - return T_array(noConst(), r0, r1, r2, r3, r4, r5, r6, r7); - } - - T_array operator()(Range r0, Range r1, Range r2, Range r3, Range r4, - Range r5, Range r6, Range r7, Range r8) const - { - return T_array(noConst(), r0, r1, r2, r3, r4, r5, r6, r7, r8); - } - - T_array operator()(Range r0, Range r1, Range r2, Range r3, Range r4, - Range r5, Range r6, Range r7, Range r8, Range r9) const - { - return T_array(noConst(), r0, r1, r2, r3, r4, r5, r6, r7, r8, r9); - } - - T_array operator()(Range r0, Range r1, Range r2, Range r3, Range r4, - Range r5, Range r6, Range r7, Range r8, Range r9, Range r10) const - { - return T_array(noConst(), r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10); - } - - // Allow any mixture of Range, int and Vector objects as - // operands for operator(): A(Range(3,7), 5, Range(2,4)) - - /* - * These versions of operator() allow any combination of int - * and Range operands to be used. Each int operand reduces - * the rank of the resulting array by one. - * - * e.g. Array A(20,20,20,20); - * Array B = A(Range(5,15), 3, 5, Range(8,9)); - * - * SliceInfo is a helper class defined in . - * It counts the number of Range vs. int arguments and does some - * other helpful things. - * - * Once partial specialization becomes widely implemented, these - * operators may be expanded to accept Vector arguments - * and produce ArrayPick objects. - * - * This operator() is not provided with a single argument because - * the appropriate cases exist above. - */ - -#ifdef BZ_HAVE_PARTIAL_ORDERING - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, nilArraySection(), nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection(), nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, nilArraySection(), nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3, T4 r4) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, r4, nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3, T4 r4, T5 r5) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, r4, r5, nilArraySection(), - nilArraySection(), nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3, T4 r4, T5 r5, T6 r6) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, r4, r5, r6, nilArraySection(), nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3, T4 r4, T5 r5, T6 r6, T7 r7) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, r4, r5, r6, r7, nilArraySection(), nilArraySection(), - nilArraySection(), nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3, T4 r4, T5 r5, T6 r6, T7 r7, T8 r8) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, r4, r5, r6, r7, r8, - nilArraySection(), nilArraySection(), nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3, T4 r4, T5 r5, T6 r6, T7 r7, T8 r8, T9 r9) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, r4, r5, r6, r7, r8, r9, nilArraySection(), nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3, T4 r4, T5 r5, T6 r6, T7 r7, T8 r8, T9 r9, T10 r10) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, nilArraySection()); - } - - template - typename SliceInfo::T_slice - operator()(T1 r1, T2 r2, T3 r3, T4 r4, T5 r5, T6 r6, T7 r7, T8 r8, T9 r9, T10 r10, T11 r11) const - { - typedef typename SliceInfo::T_slice slice; - return slice(noConst(), r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11); - } - -#endif // BZ_HAVE_PARTIAL_ORDERING - - /* - * These versions of operator() are provided to support tensor-style - * array notation, e.g. - * - * Array A, B; - * firstIndex i; - * secondIndex j; - * thirdIndex k; - * Array C = A(i,j) * B(j,k); - */ - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder) const - { - return _bz_ArrayExpr > - (noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - template - _bz_ArrayExpr > - operator()(IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder, IndexPlaceholder, - IndexPlaceholder) const - { - return _bz_ArrayExpr >(noConst()); - } - - ////////////////////////////////////////////// - // Support for multicomponent arrays - ////////////////////////////////////////////// - - /* - * See for an explanation of the traits class - * multicomponent_traits. - */ - - Array::T_element,N_rank> - operator[](const unsigned component) { - typedef typename multicomponent_traits::T_element T_compType; - - return extractComponent(T_compType(),component, - multicomponent_traits::numComponents); - } - - const Array::T_element,N_rank> - operator[](const unsigned component) const { - typedef typename multicomponent_traits::T_element T_compType; - - return extractComponent(T_compType(),component, - multicomponent_traits::numComponents); - } - - Array::T_element,N_rank> - operator[](const int component) { - return operator[](static_cast(component)); - } - - const Array::T_element,N_rank> - operator[](const int component) const { - return operator[](static_cast(component)); - } - - ////////////////////////////////////////////// - // Indirection - ////////////////////////////////////////////// - - template - IndirectArray - operator[](const T_indexContainer& index) - { - return IndirectArray(*this, - const_cast(index)); - } - - ////////////////////////////////////////////// - // Assignment Operators - ////////////////////////////////////////////// - - // Scalar operand - // NEEDS_WORK : need a precondition check on - // isStorageContiguous when operator, is used. - ListInitializationSwitch operator=(T_numtype x) - { - return ListInitializationSwitch(*this, x); - } - - T_array& initialize(T_numtype); - - // Was: - // T_array& operator=(T_numtype); - -#ifdef BZ_NEW_EXPRESSION_TEMPLATES - template - T_array& operator=(const ETBase&); - T_array& operator=(const Array&); - - template T_array& operator+=(const T&); - template T_array& operator-=(const T&); - template T_array& operator*=(const T&); - template T_array& operator/=(const T&); - template T_array& operator%=(const T&); - template T_array& operator^=(const T&); - template T_array& operator&=(const T&); - template T_array& operator|=(const T&); - template T_array& operator>>=(const T&); - template T_array& operator<<=(const T&); - -#else - T_array& operator+=(T_numtype); - T_array& operator-=(T_numtype); - T_array& operator*=(T_numtype); - T_array& operator/=(T_numtype); - T_array& operator%=(T_numtype); - T_array& operator^=(T_numtype); - T_array& operator&=(T_numtype); - T_array& operator|=(T_numtype); - T_array& operator>>=(T_numtype); - T_array& operator<<=(T_numtype); - - // Array operands - T_array& operator=(const Array&); - - template - T_array& operator=(const Array&); - template - T_array& operator+=(const Array&); - template - T_array& operator-=(const Array&); - template - T_array& operator*=(const Array&); - template - T_array& operator/=(const Array&); - template - T_array& operator%=(const Array&); - template - T_array& operator^=(const Array&); - template - T_array& operator&=(const Array&); - template - T_array& operator|=(const Array&); - template - T_array& operator>>=(const Array&); - template - T_array& operator<<=(const Array&); - - // Array expression operands - template - inline T_array& operator=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator+=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator-=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator*=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator/=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator%=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator^=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator&=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator|=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator>>=(BZ_ETPARM(_bz_ArrayExpr) expr); - template - inline T_array& operator<<=(BZ_ETPARM(_bz_ArrayExpr) expr); - - // NEEDS_WORK -- Index placeholder operand - - // NEEDS_WORK -- Random operand -#endif - -public: - // Undocumented implementation routines - - template - inline T_array& evaluate(T_expr expr, T_update); - -#ifdef BZ_HAVE_STD -#ifdef BZ_ARRAY_SPACE_FILLING_TRAVERSAL - template - inline T_array& evaluateWithFastTraversal( - const TraversalOrder& order, - T_expr expr, T_update); -#endif // BZ_ARRAY_SPACE_FILLING_TRAVERSAL -#endif - -#ifdef BZ_ARRAY_2D_STENCIL_TILING - template - inline T_array& evaluateWithTiled2DTraversal( - T_expr expr, T_update); -#endif - - template - inline T_array& evaluateWithIndexTraversal1( - T_expr expr, T_update); - - template - inline T_array& evaluateWithIndexTraversalN( - T_expr expr, T_update); - - template - inline T_array& evaluateWithStackTraversal1( - T_expr expr, T_update); - - template - inline T_array& evaluateWithStackTraversalN( - T_expr expr, T_update); - - - T_numtype* restrict getInitializationIterator() { return dataFirst(); } - - bool canCollapse(int outerRank, int innerRank) const { -#ifdef BZ_DEBUG_TRAVERSE - BZ_DEBUG_MESSAGE("stride(" << innerRank << ")=" << stride(innerRank) - << ", extent()=" << extent(innerRank) << ", stride(outerRank)=" - << stride(outerRank)); -#endif - return (stride(innerRank) * extent(innerRank) == stride(outerRank)); - } - -protected: - ////////////////////////////////////////////// - // Implementation routines - ////////////////////////////////////////////// - - _bz_inline2 void computeStrides(); - _bz_inline2 void setupStorage(int rank); - void constructSubarray(Array& array, - const RectDomain&); - void constructSubarray(Array& array, - const StridedDomain&); - void constructSubarray(Array& array, Range r0); - void constructSubarray(Array& array, Range r0, Range r1); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2, Range r3); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2, Range r3, Range r4); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2, Range r3, Range r4, Range r5); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2, Range r3, Range r4, Range r5, Range r6); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2, Range r3, Range r4, Range r5, Range r6, - Range r7); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2, Range r3, Range r4, Range r5, Range r6, - Range r7, Range r8); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2, Range r3, Range r4, Range r5, Range r6, - Range r7, Range r8, Range r9); - void constructSubarray(Array& array, Range r0, - Range r1, Range r2, Range r3, Range r4, Range r5, Range r6, - Range r7, Range r8, Range r9, Range r10); - - void calculateZeroOffset(); - - template - void constructSlice(Array& array, R0 r0, R1 r1, R2 r2, - R3 r3, R4 r4, R5 r5, R6 r6, R7 r7, R8 r8, R9 r9, R10 r10); - - template - void slice(int& setRank, Range r, Array& array, - TinyVector& rankMap, int sourceRank); - - template - void slice(int& setRank, int i, Array& array, - TinyVector& rankMap, int sourceRank); - - template - void slice(int&, nilArraySection, Array&, - TinyVector&, int) - { } - - void doTranspose(int destRank, int sourceRank, T_array& array); - -protected: - ////////////////////////////////////////////// - // Data members - ////////////////////////////////////////////// - - // NB: adding new data members may require changes to ctors, reference() - - /* - * For a description of the storage_ members, see the comments for class - * GeneralArrayStorage above. - * - * length_[] contains the extent of each rank. E.g. a 10x20x30 array - * would have length_ = { 10, 20, 30}. - * stride_[] contains the stride to move to the next element along each - * rank. - * zeroOffset_ is the distance from the first element in the array - * to the point (0,0,...,0). If base_ is zero and all ranks are - * stored ascending, then zeroOffset_ is zero. This value - * is needed because to speed up indexing, the data_ member - * (inherited from MemoryBlockReference) always refers to - * (0,0,...,0). - */ - GeneralArrayStorage storage_; - TinyVector length_; - TinyVector stride_; - int zeroOffset_; -}; - -/* - * Rank numbers start with zero, which may be confusing to users coming - * from Fortran. To make code more readable, the following constants - * may help. Example: instead of - * - * int firstRankExtent = A.extent(0); - * - * One can write: - * - * int firstRankExtent = A.extent(firstRank); - */ - -const int firstRank = 0; -const int secondRank = 1; -const int thirdRank = 2; -const int fourthRank = 3; -const int fifthRank = 4; -const int sixthRank = 5; -const int seventhRank = 6; -const int eighthRank = 7; -const int ninthRank = 8; -const int tenthRank = 9; -const int eleventhRank = 10; - -const int firstDim = 0; -const int secondDim = 1; -const int thirdDim = 2; -const int fourthDim = 3; -const int fifthDim = 4; -const int sixthDim = 5; -const int seventhDim = 6; -const int eighthDim = 7; -const int ninthDim = 8; -const int tenthDim = 9; -const int eleventhDim = 10; - -/* - * Global Functions - */ - -template -ostream& operator<<(ostream&, const Array&); - -template -ostream& operator<<(ostream&, const Array&); - -template -ostream& operator<<(ostream&, const Array&); - -template -istream& operator>>(istream& is, Array& x); - -template -void swap(Array& a,Array& b) { - Array c(a); - a.reference(b); - b.reference(c); -} - -template -void find(Array,1>& indices, - const _bz_ArrayExpr& expr) { - find(indices, - static_cast< Array >(expr)); -} - -template -void find(Array,1>& indices, - const Array& exprVals) { - indices.resize(exprVals.size()); - typename Array::const_iterator it, end = exprVals.end(); - int j=0; - for (it = exprVals.begin(); it != end; ++it) - if (*it) - indices(j++) = it.position(); - if (j) - indices.resizeAndPreserve(j); - else - indices.free(); - return; -} - - -BZ_NAMESPACE_END - -/* - * Include implementations of the member functions and some additional - * global functions. - */ - -#include // Array iterators -#include // Fast Array iterators (for et) -#include // Array expression objects -#include // Member functions -#include // Array expression evaluation -#include // Assignment operators -#include // Output formatting -#include // Expression templates -#include // Array reduction expression templates -#include // Allocation of interlaced arrays -#include // Array resize, resizeAndPreserve -#include // Slicing and subarrays -#include // Cycling arrays -#include // Special support for complex arrays -#include // Zipping multicomponent types -#include // where(X,Y,Z) -#include // Indirection -#include // Stencil objects - -#endif // BZ_ARRAY_H diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/array-old.h b/scipy-0.10.1/scipy/weave/blitz/blitz/array-old.h deleted file mode 100644 index 6f75a26963..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/array-old.h +++ /dev/null @@ -1,50 +0,0 @@ -/*************************************************************************** - * blitz/array-old.h Maximal include version of Array - * Note: see for the class def. - * - * $Id: array-old.h 1414 2005-11-01 22:04:59Z cookedm $ - * - * Copyright (C) 1997-2001 Todd Veldhuizen - * - * This code was relicensed under the modified BSD license for use in SciPy - * by Todd Veldhuizen (see LICENSE.txt in the weave directory). - * - * - * Suggestions: blitz-dev@oonumerics.org - * Bugs: blitz-bugs@oonumerics.org - * - * For more information, please see the Blitz++ Home Page: - * http://oonumerics.org/blitz/ - * - ***************************************************************************/ - -#ifndef BZ_ARRAY_OLD_H -#define BZ_ARRAY_OLD_H - -/* - * used to include most of the Blitz++ library - * functionality, totally ~ 120000 lines of source code. This - * made for extremely slow compile times; processing #include - * took gcc about 25 seconds on a 500 MHz pentium box. - * - * Much of this compile time was due to the old vector expression templates - * implementation. Since this is not really needed for the Array - * class, the headers were redesigned so that: - * - * #include is the old-style include, pulls in most - * of Blitz++ including vector e.t. - * #include pulls in much less of the library, and - * in particular excludes the vector e.t. code - * - * With , one gets TinyVector expressions automatically. - * With , one must now also include - * to get TinyVector expressions. - * - * The implementation of Array has been moved to . - */ - -#include -#include - -#endif // BZ_ARRAY_OLD_H - diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/array.h b/scipy-0.10.1/scipy/weave/blitz/blitz/array.h deleted file mode 100644 index 1ac4b3f1f6..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/array.h +++ /dev/null @@ -1,29 +0,0 @@ -/*************************************************************************** - * blitz/array.h Minimal include version of Array - * - * $Id: array.h 1414 2005-11-01 22:04:59Z cookedm $ - * - * Copyright (C) 1997-2000 Todd Veldhuizen - * - * This code was relicensed under the modified BSD license for use in SciPy - * by Todd Veldhuizen (see LICENSE.txt in the weave directory). - * - * - * Suggestions: blitz-dev@oonumerics.org - * Bugs: blitz-bugs@oonumerics.org - * - * For more information, please see the Blitz++ Home Page: - * http://oonumerics.org/blitz/ - * - ***************************************************************************/ - -#ifndef BZ_ARRAY_ONLY_H -#define BZ_ARRAY_ONLY_H - -// See comments in for an explanation of the new -// headers arrangement. - -#include - -#endif // BZ_ARRAY_ONLY_H - diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/array/Makefile.am b/scipy-0.10.1/scipy/weave/blitz/blitz/array/Makefile.am deleted file mode 100644 index d22e74091f..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/array/Makefile.am +++ /dev/null @@ -1,20 +0,0 @@ -# -# Written by Patrick Guio -# - -arraydir = $(includedir)/blitz/array -generatedir = ../generate - -genheaders = bops.cc uops.cc - -array_HEADERS = asexpr.h cartesian.h cgsolve.h complex.cc \ -convolve.cc convolve.h cycle.cc domain.h et.h eval.cc expr.h fastiter.h \ -funcs.h functorExpr.h geometry.h indirect.h interlace.cc io.cc iter.h map.h \ -methods.cc misc.cc multi.h newet-macros.h \ -newet.h ops.cc ops.h reduce.cc reduce.h resize.cc shape.h slice.h slicing.cc \ -stencil-et.h stencilops.h stencils.cc stencils.h storage.h where.h zip.h \ -$(genheaders) - -$(genheaders): - cd $(generatedir) ; $(MAKE) generate-headers - diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/array/Makefile.in b/scipy-0.10.1/scipy/weave/blitz/blitz/array/Makefile.in deleted file mode 100644 index f91d53aa0c..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/array/Makefile.in +++ /dev/null @@ -1,452 +0,0 @@ -# Makefile.in generated by automake 1.9.6 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# -# Written by Patrick Guio -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -target_triplet = @target@ -subdir = blitz/array -DIST_COMMON = $(array_HEADERS) $(srcdir)/Makefile.am \ - $(srcdir)/Makefile.in -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ac_check_cxx_features.m4 \ - $(top_srcdir)/m4/ac_compiler_specific_header.m4 \ - $(top_srcdir)/m4/ac_compilers_64bits.m4 \ - $(top_srcdir)/m4/ac_cxx_bool.m4 \ - $(top_srcdir)/m4/ac_cxx_complex_math_in_namespace_std.m4 \ - $(top_srcdir)/m4/ac_cxx_const_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_default_template_parameters.m4 \ - $(top_srcdir)/m4/ac_cxx_dynamic_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_enable_debug.m4 \ - $(top_srcdir)/m4/ac_cxx_enable_optimize.m4 \ - $(top_srcdir)/m4/ac_cxx_enum_computations.m4 \ - $(top_srcdir)/m4/ac_cxx_enum_computations_with_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_exceptions.m4 \ - $(top_srcdir)/m4/ac_cxx_explicit.m4 \ - $(top_srcdir)/m4/ac_cxx_explicit_template_function_qualification.m4 \ - $(top_srcdir)/m4/ac_cxx_flags_preset.m4 \ - $(top_srcdir)/m4/ac_cxx_full_specialization_syntax.m4 \ - $(top_srcdir)/m4/ac_cxx_function_nontype_parameters.m4 \ - $(top_srcdir)/m4/ac_cxx_general.m4 \ - $(top_srcdir)/m4/ac_cxx_have_climits.m4 \ - $(top_srcdir)/m4/ac_cxx_have_complex.m4 \ - $(top_srcdir)/m4/ac_cxx_have_complex_fcns.m4 \ - $(top_srcdir)/m4/ac_cxx_have_complex_math1.m4 \ - $(top_srcdir)/m4/ac_cxx_have_complex_math2.m4 \ - $(top_srcdir)/m4/ac_cxx_have_ieee_math.m4 \ - $(top_srcdir)/m4/ac_cxx_have_numeric_limits.m4 \ - $(top_srcdir)/m4/ac_cxx_have_rusage.m4 \ - $(top_srcdir)/m4/ac_cxx_have_std.m4 \ - $(top_srcdir)/m4/ac_cxx_have_stl.m4 \ - $(top_srcdir)/m4/ac_cxx_have_system_v_math.m4 \ - $(top_srcdir)/m4/ac_cxx_have_valarray.m4 \ - $(top_srcdir)/m4/ac_cxx_isnan_in_namespace_std.m4 \ - $(top_srcdir)/m4/ac_cxx_keywords.m4 \ - $(top_srcdir)/m4/ac_cxx_math_fn_in_namespace_std.m4 \ - $(top_srcdir)/m4/ac_cxx_member_constants.m4 \ - $(top_srcdir)/m4/ac_cxx_member_templates.m4 \ - $(top_srcdir)/m4/ac_cxx_member_templates_outside_class.m4 \ - $(top_srcdir)/m4/ac_cxx_mutable.m4 \ - $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ - $(top_srcdir)/m4/ac_cxx_nceg_restrict.m4 \ - $(top_srcdir)/m4/ac_cxx_nceg_restrict_egcs.m4 \ - $(top_srcdir)/m4/ac_cxx_old_for_scoping.m4 \ - $(top_srcdir)/m4/ac_cxx_partial_ordering.m4 \ - $(top_srcdir)/m4/ac_cxx_partial_specialization.m4 \ - $(top_srcdir)/m4/ac_cxx_reinterpret_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_rtti.m4 \ - $(top_srcdir)/m4/ac_cxx_standard_library.m4 \ - $(top_srcdir)/m4/ac_cxx_static_cast.m4 \ - $(top_srcdir)/m4/ac_cxx_template_keyword_qualifier.m4 \ - $(top_srcdir)/m4/ac_cxx_template_qualified_base_class.m4 \ - $(top_srcdir)/m4/ac_cxx_template_qualified_return_type.m4 \ - $(top_srcdir)/m4/ac_cxx_template_scoped_argument_matching.m4 \ - $(top_srcdir)/m4/ac_cxx_templates.m4 \ - $(top_srcdir)/m4/ac_cxx_templates_as_template_arguments.m4 \ - $(top_srcdir)/m4/ac_cxx_templates_features.m4 \ - $(top_srcdir)/m4/ac_cxx_type_casts.m4 \ - $(top_srcdir)/m4/ac_cxx_type_promotion.m4 \ - $(top_srcdir)/m4/ac_cxx_typename.m4 \ - $(top_srcdir)/m4/ac_cxx_use_numtrait.m4 \ - $(top_srcdir)/m4/ac_env.m4 $(top_srcdir)/m4/ac_info.m4 \ - $(top_srcdir)/m4/ax_prefix_config_h.m4 \ - $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/blitz/config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(arraydir)" -arrayHEADERS_INSTALL = $(INSTALL_HEADER) -HEADERS = $(array_HEADERS) -ETAGS = etags -CTAGS = ctags -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AR = @AR@ -AR_FLAGS = @AR_FLAGS@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -COMPILER_SPECIFIC_HEADER = @COMPILER_SPECIFIC_HEADER@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CXX_DEBUG_FLAGS = @CXX_DEBUG_FLAGS@ -CXX_LIBS = @CXX_LIBS@ -CXX_OPTIMIZE_FLAGS = @CXX_OPTIMIZE_FLAGS@ -CXX_PROFIL_FLAGS = @CXX_PROFIL_FLAGS@ -CYGPATH_W = @CYGPATH_W@ -DATE = @DATE@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EXEEXT = @EXEEXT@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -OBJEXT = @OBJEXT@ -OS = @OS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -RANLIB = @RANLIB@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -STRIP = @STRIP@ -VERSION = @VERSION@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target = @target@ -target_alias = @target_alias@ -target_cpu = @target_cpu@ -target_os = @target_os@ -target_vendor = @target_vendor@ -arraydir = $(includedir)/blitz/array -generatedir = ../generate -genheaders = bops.cc uops.cc -array_HEADERS = asexpr.h cartesian.h cgsolve.h complex.cc \ -convolve.cc convolve.h cycle.cc domain.h et.h eval.cc expr.h fastiter.h \ -funcs.h functorExpr.h geometry.h indirect.h interlace.cc io.cc iter.h map.h \ -methods.cc misc.cc multi.h newet-macros.h \ -newet.h ops.cc ops.h reduce.cc reduce.h resize.cc shape.h slice.h slicing.cc \ -stencil-et.h stencilops.h stencils.cc stencils.h storage.h where.h zip.h \ -$(genheaders) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign blitz/array/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign blitz/array/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -uninstall-info-am: -install-arrayHEADERS: $(array_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(arraydir)" || $(mkdir_p) "$(DESTDIR)$(arraydir)" - @list='$(array_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(arrayHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(arraydir)/$$f'"; \ - $(arrayHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(arraydir)/$$f"; \ - done - -uninstall-arrayHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(array_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(arraydir)/$$f'"; \ - rm -f "$(DESTDIR)$(arraydir)/$$f"; \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(HEADERS) -installdirs: - for dir in "$(DESTDIR)$(arraydir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-arrayHEADERS - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-arrayHEADERS uninstall-info-am - -.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ - ctags distclean distclean-generic distclean-tags distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-arrayHEADERS install-data install-data-am install-exec \ - install-exec-am install-info install-info-am install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ - uninstall-am uninstall-arrayHEADERS uninstall-info-am - - -$(genheaders): - cd $(generatedir) ; $(MAKE) generate-headers -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/scipy-0.10.1/scipy/weave/blitz/blitz/array/asexpr.h b/scipy-0.10.1/scipy/weave/blitz/blitz/array/asexpr.h deleted file mode 100644 index 35fa73f582..0000000000 --- a/scipy-0.10.1/scipy/weave/blitz/blitz/array/asexpr.h +++ /dev/null @@ -1,99 +0,0 @@ -// -*- C++ -*- -/*************************************************************************** - * blitz/array/asexpr.h Declaration of the asExpr helper functions - * - * Copyright (C) 1997-2001 Todd Veldhuizen - * - * This code was relicensed under the modified BSD license for use in SciPy - * by Todd Veldhuizen (see LICENSE.txt in the weave directory). - * - * - * Suggestions: blitz-dev@oonumerics.org - * Bugs: blitz-bugs@oonumerics.org - * - * For more information, please see the Blitz++ Home Page: - * http://oonumerics.org/blitz/ - * - ***************************************************************************/ -#ifndef BZ_ARRAYASEXPR_H -#define BZ_ARRAYASEXPR_H - -#ifndef BZ_ARRAY_H - #error must be included via -#endif - -BZ_NAMESPACE(blitz) - -// The traits class asExpr converts arbitrary things to -// expression templatable operands. - -// Default to scalar. - -template -struct asExpr { - typedef _bz_ArrayExprConstant T_expr; - static T_expr getExpr(const T& x) { return T_expr(x); } -}; - -// Already an expression template term - -template -struct asExpr<_bz_ArrayExpr > { - typedef _bz_ArrayExpr T_expr; - static const T_expr& getExpr(const T_expr& x) { return x; } -}; - -// An array operand - -template -struct asExpr > { - typedef FastArrayIterator T_expr; - static T_expr getExpr(const Array& x) { return x.beginFast(); } -}; - -// Index placeholder - -template -struct asExpr > { - typedef IndexPlaceholder T_expr; - static T_expr getExpr(T_expr x) { return x; } -}; - -#ifdef BZ_HAVE_TEMPLATES_AS_TEMPLATE_ARGUMENTS - -// A traits class that provides the return type of a binary operation. - -template